summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2022-09-14 14:22:18 +0300
committerMaxime Ripard <maxime@cerno.tech>2022-09-14 14:22:18 +0300
commita108772d03d8bdb43258218b00bfe43bbe1e8800 (patch)
tree11b1f413ab384f2b5de0773f01b6886e2c601f8b /drivers
parent75cebd664d57a78af3e46c14bd2659df0a08847b (diff)
parent213cb76ddc8b875e772f9f4d173feefa122716af (diff)
downloadlinux-a108772d03d8bdb43258218b00bfe43bbe1e8800.tar.xz
Merge drm/drm-next into drm-misc-next
We need 6.0-rc1 to merge the backlight rework PR. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/accessibility/braille/braille_console.c2
-rw-r--r--drivers/accessibility/speakup/.gitignore4
-rw-r--r--drivers/accessibility/speakup/Makefile28
-rw-r--r--drivers/accessibility/speakup/genmap.c162
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/accessibility/speakup/makemapdata.c125
-rw-r--r--drivers/accessibility/speakup/serialio.h3
-rw-r--r--drivers/accessibility/speakup/speakupmap.h66
-rw-r--r--drivers/accessibility/speakup/utils.h102
-rw-r--r--drivers/acpi/Kconfig34
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_lpit.c6
-rw-r--r--drivers/acpi/acpi_lpss.c3
-rw-r--r--drivers/acpi/acpi_video.c52
-rw-r--r--drivers/acpi/apei/apei-base.c2
-rw-r--r--drivers/acpi/apei/bert.c31
-rw-r--r--drivers/acpi/apei/einj.c2
-rw-r--r--drivers/acpi/arm64/iort.c360
-rw-r--r--drivers/acpi/bus.c50
-rw-r--r--drivers/acpi/container.c17
-rw-r--r--drivers/acpi/cppc_acpi.c60
-rw-r--r--drivers/acpi/device_pm.c22
-rw-r--r--drivers/acpi/device_sysfs.c2
-rw-r--r--drivers/acpi/ec.c140
-rw-r--r--drivers/acpi/glue.c133
-rw-r--r--drivers/acpi/irq.c58
-rw-r--r--drivers/acpi/pci_link.c8
-rw-r--r--drivers/acpi/pci_mcfg.c13
-rw-r--r--drivers/acpi/pptt.c142
-rw-r--r--drivers/acpi/prmt.c4
-rw-r--r--drivers/acpi/processor_driver.c72
-rw-r--r--drivers/acpi/processor_idle.c11
-rw-r--r--drivers/acpi/processor_thermal.c54
-rw-r--r--drivers/acpi/property.c509
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/scan.c71
-rw-r--r--drivers/acpi/sleep.c11
-rw-r--r--drivers/acpi/utils.c38
-rw-r--r--drivers/acpi/video_detect.c63
-rw-r--r--drivers/acpi/viot.c32
-rw-r--r--drivers/acpi/x86/s2idle.c17
-rw-r--r--drivers/amba/bus.c317
-rw-r--r--drivers/android/Kconfig9
-rw-r--r--drivers/android/binder.c199
-rw-r--r--drivers/android/binder_alloc.c37
-rw-r--r--drivers/android/binder_alloc.h2
-rw-r--r--drivers/android/binder_alloc_selftest.c2
-rw-r--r--drivers/android/binder_internal.h46
-rw-r--r--drivers/android/binder_trace.h4
-rw-r--r--drivers/android/binderfs.c47
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/libata-acpi.c8
-rw-r--r--drivers/ata/libata-core.c68
-rw-r--r--drivers/ata/libata-eh.c31
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/ata/libata.h8
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_ali.c2
-rw-r--r--drivers/ata/pata_amd.c14
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_hpt37x.c119
-rw-r--r--drivers/ata/pata_hpt3x2n.c21
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c4
-rw-r--r--drivers/ata/pata_serverworks.c4
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/atm/he.c9
-rw-r--r--drivers/atm/idt77252.c1
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/arch_topology.c100
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/cacheinfo.c145
-rw-r--r--drivers/base/core.c123
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c59
-rw-r--r--drivers/base/devtmpfs.c1
-rw-r--r--drivers/base/firmware_loader/main.c4
-rw-r--r--drivers/base/firmware_loader/sysfs.c10
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/power/domain.c5
-rw-r--r--drivers/base/power/runtime.c6
-rw-r--r--drivers/base/power/wakeup.c30
-rw-r--r--drivers/base/regmap/regcache.c11
-rw-r--r--drivers/base/regmap/regmap-irq.c432
-rw-r--r--drivers/base/regmap/regmap.c27
-rw-r--r--drivers/base/topology.c32
-rw-r--r--drivers/block/Kconfig18
-rw-r--r--drivers/block/Makefile4
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/ataflop.c5
-rw-r--r--drivers/block/brd.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c9
-rw-r--r--drivers/block/drbd/drbd_bitmap.c51
-rw-r--r--drivers/block/drbd/drbd_int.h5
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/block/drbd/drbd_receiver.c24
-rw-r--r--drivers/block/drbd/drbd_req.c8
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c8
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c307
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h5
-rw-r--r--drivers/block/n64cart.c2
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/null_blk/main.c123
-rw-r--r--drivers/block/null_blk/null_blk.h14
-rw-r--r--drivers/block/null_blk/trace.h2
-rw-r--r--drivers/block/null_blk/zoned.c12
-rw-r--r--drivers/block/paride/pcd.c4
-rw-r--r--drivers/block/paride/pd.c6
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/pktcdvd.c16
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/ps3vram.c6
-rw-r--r--drivers/block/rbd.c8
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c207
-rw-r--r--drivers/block/rnbd/rnbd-clt.h18
-rw-r--r--drivers/block/rnbd/rnbd-proto.h7
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c1
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h1
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c5
-rw-r--r--drivers/block/rnbd/rnbd-srv.c29
-rw-r--r--drivers/block/rnbd/rnbd-srv.h7
-rw-r--r--drivers/block/sunvdc.c4
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/sx8.c1582
-rw-r--r--drivers/block/ublk_drv.c1824
-rw-r--r--drivers/block/virtio_blk.c27
-rw-r--r--drivers/block/xen-blkback/blkback.c6
-rw-r--r--drivers/block/xen-blkback/xenbus.c20
-rw-r--r--drivers/block/xen-blkfront.c8
-rw-r--r--drivers/block/z2ram.c3
-rw-r--r--drivers/block/zram/zcomp.c11
-rw-r--r--drivers/block/zram/zram_drv.c14
-rw-r--r--drivers/bluetooth/btbcm.c33
-rw-r--r--drivers/bluetooth/btbcm.h8
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c15
-rw-r--r--drivers/bluetooth/btmrvl_drv.h16
-rw-r--r--drivers/bluetooth/btmrvl_main.c15
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h16
-rw-r--r--drivers/bluetooth/btmtksdio.c15
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/btusb.c45
-rw-r--r--drivers/bluetooth/hci_bcm.c35
-rw-r--r--drivers/bluetooth/hci_intel.c6
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/bluetooth/hci_serdev.c11
-rw-r--r--drivers/bus/hisi_lpc.c204
-rw-r--r--drivers/bus/mhi/ep/main.c11
-rw-r--r--drivers/bus/mhi/host/init.c17
-rw-r--r--drivers/bus/mhi/host/pci_generic.c8
-rw-r--r--drivers/bus/mhi/host/pm.c19
-rw-r--r--drivers/bus/mvebu-mbus.c11
-rw-r--r--drivers/bus/omap_l3_noc.c10
-rw-r--r--drivers/bus/omap_l3_noc.h10
-rw-r--r--drivers/bus/sunxi-rsb.c6
-rw-r--r--drivers/bus/tegra-gmi.c5
-rw-r--r--drivers/bus/ts-nbus.c5
-rw-r--r--drivers/cdrom/gdrom.c3
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/intel-gtt.c17
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/iproc-rng200.c9
-rw-r--r--drivers/char/hw_random/powernv-rng.c2
-rw-r--r--drivers/char/hw_random/s390-trng.c11
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/char/random.c57
-rw-r--r--drivers/char/tb0219.c359
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm.h1
-rw-r--r--drivers/char/tpm/tpm1-cmd.c7
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c14
-rw-r--r--drivers/char/tpm/tpm_tis_core.h10
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c390
-rw-r--r--drivers/clk/.kunitconfig1
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c5
-rw-r--r--drivers/clk/axs10x/pll_clock.c5
-rw-r--r--drivers/clk/bcm/Kconfig4
-rw-r--r--drivers/clk/bcm/clk-bcm21664.c10
-rw-r--r--drivers/clk/bcm/clk-bcm281xx.c10
-rw-r--r--drivers/clk/bcm/clk-bcm63xx.c14
-rw-r--r--drivers/clk/bcm/clk-cygnus.c14
-rw-r--r--drivers/clk/bcm/clk-hr2.c14
-rw-r--r--drivers/clk/bcm/clk-iproc-armpll.c14
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c14
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c14
-rw-r--r--drivers/clk/bcm/clk-iproc.h14
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c10
-rw-r--r--drivers/clk/bcm/clk-kona.c10
-rw-r--r--drivers/clk/bcm/clk-kona.h10
-rw-r--r--drivers/clk/bcm/clk-ns2.c14
-rw-r--r--drivers/clk/bcm/clk-nsp.c14
-rw-r--r--drivers/clk/clk-devres.c91
-rw-r--r--drivers/clk/clk-fixed-factor.c56
-rw-r--r--drivers/clk/clk-hsdk-pll.c5
-rw-r--r--drivers/clk/clk-lan966x.c2
-rw-r--r--drivers/clk/clk-moxart.c5
-rw-r--r--drivers/clk/clk.c48
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c36
-rw-r--r--drivers/clk/imx/clk-imx93.c6
-rw-r--r--drivers/clk/keystone/sci-clk.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c22
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c22
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c12
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c22
-rw-r--r--drivers/clk/mediatek/clk-mt7629-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c12
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c22
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c22
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c18
-rw-r--r--drivers/clk/mediatek/clk-mt8186-infra_ao.c23
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c21
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c29
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c24
-rw-r--r--drivers/clk/mediatek/clk-mtk.c7
-rw-r--r--drivers/clk/mediatek/clk-mtk.h9
-rw-r--r--drivers/clk/mediatek/reset.c198
-rw-r--r--drivers/clk/mediatek/reset.h82
-rw-r--r--drivers/clk/meson/axg-audio.c36
-rw-r--r--drivers/clk/mmp/clk-apbc.c5
-rw-r--r--drivers/clk/mmp/clk-apmu.c5
-rw-r--r--drivers/clk/mmp/clk-frac.c5
-rw-r--r--drivers/clk/mmp/clk-gate.c5
-rw-r--r--drivers/clk/mmp/clk-mix.c5
-rw-r--r--drivers/clk/mmp/clk-mmp2.c5
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c5
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c5
-rw-r--r--drivers/clk/mmp/clk-of-pxa1928.c5
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c5
-rw-r--r--drivers/clk/mmp/clk-pxa168.c5
-rw-r--r--drivers/clk/mmp/clk-pxa910.c5
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c5
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c5
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-creg.c5
-rw-r--r--drivers/clk/qcom/Kconfig22
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c4
-rw-r--r--drivers/clk/qcom/camcc-sm8250.c16
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c2856
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c144
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h11
-rw-r--r--drivers/clk/qcom/clk-hfpll.c15
-rw-r--r--drivers/clk/qcom/clk-krait.c23
-rw-r--r--drivers/clk/qcom/clk-krait.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c16
-rw-r--r--drivers/clk/qcom/clk-regmap-phy-mux.c62
-rw-r--r--drivers/clk/qcom/clk-regmap-phy-mux.h33
-rw-r--r--drivers/clk/qcom/clk-rpm.c24
-rw-r--r--drivers/clk/qcom/clk-rpmh.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c64
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c104
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c35
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c47
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c6
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c8
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c49
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c142
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c2
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c49
-rw-r--r--drivers/clk/qcom/gdsc.c36
-rw-r--r--drivers/clk/qcom/gdsc.h4
-rw-r--r--drivers/clk/qcom/gpucc-sm8350.c637
-rw-r--r--drivers/clk/qcom/krait-cc.c8
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c1052
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c4
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c22
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c20
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c31
-rw-r--r--drivers/clk/renesas/clk-r8a7779.c27
-rw-r--r--drivers/clk/renesas/clk-rz.c33
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c26
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c10
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c32
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c32
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c17
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c5
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c2
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c2
-rw-r--r--drivers/clk/spear/clk-aux-synth.c5
-rw-r--r--drivers/clk/spear/clk-frac-synth.c5
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c5
-rw-r--r--drivers/clk/spear/clk-vco-pll.c5
-rw-r--r--drivers/clk/spear/clk.c5
-rw-r--r--drivers/clk/spear/clk.h5
-rw-r--r--drivers/clk/spear/spear1310_clock.c5
-rw-r--r--drivers/clk/spear/spear1340_clock.c5
-rw-r--r--drivers/clk/spear/spear3xx_clock.c5
-rw-r--r--drivers/clk/spear/spear6xx_clock.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c77
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c113
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c80
-rw-r--r--drivers/clk/sunxi/Kconfig4
-rw-r--r--drivers/clk/ti/adpll.c11
-rw-r--r--drivers/clk/ti/apll.c10
-rw-r--r--drivers/clk/ti/autoidle.c10
-rw-r--r--drivers/clk/ti/clk-2xxx.c10
-rw-r--r--drivers/clk/ti/clk-33xx.c10
-rw-r--r--drivers/clk/ti/clk-3xxx.c10
-rw-r--r--drivers/clk/ti/clk-43xx.c10
-rw-r--r--drivers/clk/ti/clk-44xx.c210
-rw-r--r--drivers/clk/ti/clk-54xx.c160
-rw-r--r--drivers/clk/ti/clk-816x.c11
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c10
-rw-r--r--drivers/clk/ti/clk.c10
-rw-r--r--drivers/clk/ti/clkctrl.c14
-rw-r--r--drivers/clk/ti/clkt_dflt.c10
-rw-r--r--drivers/clk/ti/clock.h10
-rw-r--r--drivers/clk/ti/clockdomain.c10
-rw-r--r--drivers/clk/ti/composite.c10
-rw-r--r--drivers/clk/ti/divider.c10
-rw-r--r--drivers/clk/ti/dpll.c10
-rw-r--r--drivers/clk/ti/fapll.c11
-rw-r--r--drivers/clk/ti/fixed-factor.c10
-rw-r--r--drivers/clk/ti/gate.c10
-rw-r--r--drivers/clk/ti/interface.c10
-rw-r--r--drivers/clk/ti/mux.c10
-rw-r--r--drivers/clk/x86/Makefile4
-rw-r--r--drivers/clocksource/Kconfig19
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/sh_cmt.c8
-rw-r--r--drivers/clocksource/timer-mediatek.c114
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c64
-rw-r--r--drivers/clocksource/timer-riscv.c40
-rw-r--r--drivers/clocksource/timer-sun4i.c2
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/clocksource/timer-tegra186.c514
-rw-r--r--drivers/clocksource/timer-ti-dm.c123
-rw-r--r--drivers/comedi/drivers/comedi_isadma.c2
-rw-r--r--drivers/counter/104-quad-8.c203
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt.c19
-rw-r--r--drivers/cpufreq/cpufreq.c43
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c13
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c12
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c7
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c9
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c5
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c14
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c109
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c15
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c10
-rw-r--r--drivers/cpufreq/sti-cpufreq.c27
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c31
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c4
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c12
-rw-r--r--drivers/cpufreq/ti-cpufreq.c42
-rw-r--r--drivers/cpuidle/Kconfig.arm3
-rw-r--r--drivers/cpuidle/cpuidle-at91.c5
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c5
-rw-r--r--drivers/cpuidle/cpuidle-psci.c8
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c8
-rw-r--r--drivers/cpuidle/cpuidle.c15
-rw-r--r--drivers/cpuidle/governors/haltpoll.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c16
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c10
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c40
-rw-r--r--drivers/crypto/atmel-aes.c3
-rw-r--r--drivers/crypto/atmel-ecc.c12
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes.c5
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c7
-rw-r--r--drivers/crypto/caam/caamhash_desc.c2
-rw-r--r--drivers/crypto/caam/qi.c6
-rw-r--r--drivers/crypto/cavium/cpt/cpt_hw_types.h2
-rw-r--r--drivers/crypto/ccp/ccp-dev.h2
-rw-r--r--drivers/crypto/ccp/sev-dev.c12
-rw-r--r--drivers/crypto/ccp/sp-pci.c7
-rw-r--r--drivers/crypto/ccree/cc_driver.c13
-rw-r--r--drivers/crypto/ccree/cc_pm.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c2
-rw-r--r--drivers/crypto/hisilicon/qm.c203
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c14
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c2
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c26
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h1
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c39
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c10
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c17
-rw-r--r--drivers/crypto/inside-secure/safexcel.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.h18
-rw-r--r--drivers/crypto/keembay/keembay-ocs-ecc.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c40
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c17
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h3
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c2
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c5
-rw-r--r--drivers/crypto/omap-aes.c4
-rw-r--r--drivers/crypto/omap-des.c4
-rw-r--r--drivers/crypto/omap-sham.c3
-rw-r--r--drivers/crypto/qat/Kconfig14
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c41
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h8
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c26
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c28
-rw-r--r--drivers/crypto/qat/qat_common/adf_sysfs.c191
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c4
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c4
-rw-r--r--drivers/crypto/s5p-sss.c3
-rw-r--r--drivers/crypto/sa2ul.c25
-rw-r--r--drivers/crypto/sa2ul.h1
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/crypto/vmx/ghash.c1
-rw-r--r--drivers/crypto/vmx/ghashp8-ppc.pl2
-rw-r--r--drivers/cxl/Kconfig9
-rw-r--r--drivers/cxl/acpi.c243
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/core.h51
-rw-r--r--drivers/cxl/core/hdm.c691
-rw-r--r--drivers/cxl/core/mbox.c95
-rw-r--r--drivers/cxl/core/memdev.c4
-rw-r--r--drivers/cxl/core/pci.c181
-rw-r--r--drivers/cxl/core/pmem.c4
-rw-r--r--drivers/cxl/core/port.c738
-rw-r--r--drivers/cxl/core/region.c1896
-rw-r--r--drivers/cxl/cxl.h312
-rw-r--r--drivers/cxl/cxlmem.h42
-rw-r--r--drivers/cxl/cxlpci.h1
-rw-r--r--drivers/cxl/mem.c49
-rw-r--r--drivers/cxl/pci.c46
-rw-r--r--drivers/cxl/pmem.c259
-rw-r--r--drivers/cxl/port.c53
-rw-r--r--drivers/dax/super.c67
-rw-r--r--drivers/devfreq/Kconfig10
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq.c4
-rw-r--r--drivers/devfreq/exynos-bus.c21
-rw-r--r--drivers/devfreq/imx-bus.c3
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c440
-rw-r--r--drivers/devfreq/tegra30-devfreq.c26
-rw-r--r--drivers/dma-buf/dma-buf.c1
-rw-r--r--drivers/dma-buf/dma-resv.c2
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/apple-admac.c818
-rw-r--r--drivers/dma/at_xdmac.c2
-rw-r--r--drivers/dma/bcm-sba-raid.c14
-rw-r--r--drivers/dma/bestcomm/ata.c7
-rw-r--r--drivers/dma/bestcomm/bestcomm.c7
-rw-r--r--drivers/dma/bestcomm/fec.c7
-rw-r--r--drivers/dma/bestcomm/sram.c7
-rw-r--r--drivers/dma/dma-axi-dmac.c16
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmatest.c45
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c11
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c141
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h31
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c83
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c49
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h4
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.h8
-rw-r--r--drivers/dma/dw/core.c3
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c3
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/fsl-edma-common.c3
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c38
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c2
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/moxart-dma.c5
-rw-r--r--drivers/dma/mv_xor_v2.c2
-rw-r--r--drivers/dma/owl-dma.c2
-rw-r--r--drivers/dma/ppc4xx/adma.h5
-rw-r--r--drivers/dma/ppc4xx/dma.h5
-rw-r--r--drivers/dma/ppc4xx/xor.h5
-rw-r--r--drivers/dma/s3c24xx-dma.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c44
-rw-r--r--drivers/dma/sh/rz-dmac.c17
-rw-r--r--drivers/dma/sprd-dma.c5
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/stm32-mdma.c5
-rw-r--r--drivers/dma/sun4i-dma.c32
-rw-r--r--drivers/dma/tegra186-gpc-dma.c26
-rw-r--r--drivers/dma/ti/edma.c10
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c122
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c6
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/edac/edac_pci.c7
-rw-r--r--drivers/edac/fsl_ddr_edac.c6
-rw-r--r--drivers/edac/fsl_ddr_edac.h7
-rw-r--r--drivers/edac/ghes_edac.c11
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.h7
-rw-r--r--drivers/edac/pnd2_edac.c62
-rw-r--r--drivers/edac/ppc4xx_edac.c1
-rw-r--r--drivers/edac/synopsys_edac.c44
-rw-r--r--drivers/extcon/extcon-fsa9480.c6
-rw-r--r--drivers/extcon/extcon-palmas.c2
-rw-r--r--drivers/extcon/extcon-rt8973a.c1
-rw-r--r--drivers/extcon/extcon-sm5502.c2
-rw-r--r--drivers/extcon/extcon.c12
-rw-r--r--drivers/firewire/net.c14
-rw-r--r--drivers/firmware/arm_scmi/Kconfig12
-rw-r--r--drivers/firmware/arm_scmi/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/driver.c281
-rw-r--r--drivers/firmware/arm_scmi/perf.c243
-rw-r--r--drivers/firmware/arm_scmi/powercap.c866
-rw-r--r--drivers/firmware/arm_scmi/protocols.h23
-rw-r--r--drivers/firmware/arm_scmi/scmi_power_control.c362
-rw-r--r--drivers/firmware/arm_scmi/system.c17
-rw-r--r--drivers/firmware/arm_scpi.c61
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c107
-rw-r--r--drivers/firmware/efi/Kconfig14
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/efi-init.c1
-rw-r--r--drivers/firmware/efi/efi-pstore.c377
-rw-r--r--drivers/firmware/efi/efi.c127
-rw-r--r--drivers/firmware/efi/efibc.c76
-rw-r--r--drivers/firmware/efi/efivars.c671
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c13
-rw-r--r--drivers/firmware/efi/memmap.c5
-rw-r--r--drivers/firmware/efi/reboot.c21
-rw-r--r--drivers/firmware/efi/vars.c1219
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c36
-rw-r--r--drivers/firmware/qcom_scm-legacy.c4
-rw-r--r--drivers/firmware/qcom_scm.c71
-rw-r--r--drivers/firmware/stratix10-rsu.c129
-rw-r--r--drivers/firmware/stratix10-svc.c201
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c10
-rw-r--r--drivers/firmware/tegra/bpmp.c6
-rw-r--r--drivers/firmware/xilinx/zynqmp.c16
-rw-r--r--drivers/fpga/Kconfig20
-rw-r--r--drivers/fpga/Makefile4
-rw-r--r--drivers/fpga/altera-pr-ip-core.c2
-rw-r--r--drivers/fpga/dfl.c4
-rw-r--r--drivers/fpga/fpga-bridge.c6
-rw-r--r--drivers/fpga/fpga-mgr.c229
-rw-r--r--drivers/fpga/fpga-region.c6
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c625
-rw-r--r--drivers/fpga/microchip-spi.c398
-rw-r--r--drivers/gpio/Kconfig22
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c249
-rw-r--r--drivers/gpio/gpio-104-idi-48.c157
-rw-r--r--drivers/gpio/gpio-104-idio-16.c60
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c19
-rw-r--r--drivers/gpio/gpio-adnp.c19
-rw-r--r--drivers/gpio/gpio-adp5588.c26
-rw-r--r--drivers/gpio/gpio-bcm-kona.c10
-rw-r--r--drivers/gpio/gpio-brcmstb.c23
-rw-r--r--drivers/gpio/gpio-davinci.c83
-rw-r--r--drivers/gpio/gpio-gpio-mm.c202
-rw-r--r--drivers/gpio/gpio-i8255.c287
-rw-r--r--drivers/gpio/gpio-i8255.h46
-rw-r--r--drivers/gpio/gpio-lp3943.c16
-rw-r--r--drivers/gpio/gpio-lp873x.c10
-rw-r--r--drivers/gpio/gpio-lp87565.c10
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c5
-rw-r--r--drivers/gpio/gpio-msc313.c15
-rw-r--r--drivers/gpio/gpio-mvebu.c5
-rw-r--r--drivers/gpio/gpio-pca953x.c22
-rw-r--r--drivers/gpio/gpio-pca9570.c2
-rw-r--r--drivers/gpio/gpio-pch.c43
-rw-r--r--drivers/gpio/gpio-pisosr.c10
-rw-r--r--drivers/gpio/gpio-rockchip.c3
-rw-r--r--drivers/gpio/gpio-sim.c16
-rw-r--r--drivers/gpio/gpio-spear-spics.c5
-rw-r--r--drivers/gpio/gpio-tegra.c15
-rw-r--r--drivers/gpio/gpio-tegra186.c15
-rw-r--r--drivers/gpio/gpio-thunderx.c17
-rw-r--r--drivers/gpio/gpio-tpic2810.c10
-rw-r--r--drivers/gpio/gpio-ts4800.c5
-rw-r--r--drivers/gpio/gpio-twl4030.c18
-rw-r--r--drivers/gpio/gpio-ucb1400.c20
-rw-r--r--drivers/gpio/gpio-visconti.c15
-rw-r--r--drivers/gpio/gpio-vr41xx.c541
-rw-r--r--drivers/gpio/gpio-ws16c48.c120
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c3
-rw-r--r--drivers/gpio/gpiolib-cdev.c295
-rw-r--r--drivers/gpio/gpiolib-devres.c32
-rw-r--r--drivers/gpio/gpiolib-of.c13
-rw-r--r--drivers/gpio/gpiolib.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c511
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c387
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c303
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h (renamed from drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h)14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c305
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c139
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c11
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c39
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h46
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c109
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c73
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c66
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c84
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c251
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h136
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c110
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c348
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c149
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c670
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h1080
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c653
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c295
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c376
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c827
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c1884
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h63
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h25
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h12086
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h44640
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_offset.h402
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_sh_mask.h595
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h13
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c45
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c43
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c76
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c51
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c77
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c50
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h3
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c4
-rw-r--r--drivers/gpu/drm/drm_file.c3
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c9
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c77
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c104
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c62
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c85
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c16
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c70
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c113
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c45
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h45
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c252
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c98
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c400
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c426
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h32
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c40
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c159
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c120
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c56
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c81
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h21
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c76
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h19
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c9
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h42
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c18
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c70
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c3
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c2
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c12
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c5
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c14
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c95
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c22
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c17
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c53
-rw-r--r--drivers/hid/.kunitconfig5
-rw-r--r--drivers/hid/Kconfig16
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/amd-sfh-hid/Makefile3
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c117
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h76
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.c12
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h12
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c78
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h52
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c17
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h3
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c300
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c324
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h26
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c75
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h154
-rw-r--r--drivers/hid/hid-alps.c2
-rw-r--r--drivers/hid/hid-apple.c35
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-cp2112.c5
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-lg-g15.c2
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-mcp2221.c3
-rw-r--r--drivers/hid/hid-multitouch.c13
-rw-r--r--drivers/hid/hid-nintendo.c6
-rw-r--r--drivers/hid/hid-uclogic-core.c2
-rw-r--r--drivers/hid/hid-uclogic-params.c225
-rw-r--r--drivers/hid/hid-uclogic-rdesc-test.c219
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c124
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h24
-rw-r--r--drivers/hid/i2c-hid/Kconfig15
-rw-r--r--drivers/hid/i2c-hid/Makefile1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c130
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c2
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c38
-rw-r--r--drivers/hid/wacom.h3
-rw-r--r--drivers/hid/wacom_sys.c4
-rw-r--r--drivers/hid/wacom_wac.c111
-rw-r--r--drivers/hv/connection.c11
-rw-r--r--drivers/hv/hv_balloon.c135
-rw-r--r--drivers/hv/hyperv_vmbus.h7
-rw-r--r--drivers/hv/vmbus_drv.c27
-rw-r--r--drivers/hwmon/Kconfig27
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c316
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c2
-rw-r--r--drivers/hwmon/asus-ec-sensors.c108
-rw-r--r--drivers/hwmon/asus_wmi_sensors.c12
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c93
-rw-r--r--drivers/hwmon/drivetemp.c1
-rw-r--r--drivers/hwmon/f71882fg.c2
-rw-r--r--drivers/hwmon/gsc-hwmon.c3
-rw-r--r--drivers/hwmon/k10temp.c12
-rw-r--r--drivers/hwmon/lm75.h3
-rw-r--r--drivers/hwmon/lm90.c2526
-rw-r--r--drivers/hwmon/mcp3021.c99
-rw-r--r--drivers/hwmon/nct6775-core.c3
-rw-r--r--drivers/hwmon/nct6775-platform.c15
-rw-r--r--drivers/hwmon/nct6775.h2
-rw-r--r--drivers/hwmon/occ/common.c8
-rw-r--r--drivers/hwmon/occ/p9_sbe.c6
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/lt7182s.c195
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c44
-rw-r--r--drivers/hwmon/pmbus/pmbus.h10
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c446
-rw-r--r--drivers/hwmon/sch56xx-common.c44
-rw-r--r--drivers/hwmon/sht15.c17
-rw-r--r--drivers/hwmon/tps23861.c14
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c6
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-config.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c22
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h11
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c295
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.h13
-rw-r--r--drivers/hwtracing/intel_th/msu-sink.c3
-rw-r--r--drivers/hwtracing/intel_th/msu.c14
-rw-r--r--drivers/hwtracing/intel_th/pci.c25
-rw-r--r--drivers/i2c/busses/Kconfig33
-rw-r--r--drivers/i2c/busses/Makefile4
-rw-r--r--drivers/i2c/busses/i2c-altera.c2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c2
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c14
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c16
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c18
-rw-r--r--drivers/i2c/busses/i2c-cadence.c40
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-hisi.c3
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c46
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-icy.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c22
-rw-r--r--drivers/i2c/busses/i2c-kempld.c1
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c480
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c7
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c45
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c63
-rw-r--r--drivers/i2c/busses/i2c-mxs.c4
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c176
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-opal.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c62
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c29
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-riic.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c532
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-simtec.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c7
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-wmt.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-core-base.c11
-rw-r--r--drivers/i2c/i2c-smbus.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c1
-rw-r--r--drivers/idle/intel_idle.c101
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/accel/adxl313_core.c2
-rw-r--r--drivers/iio/accel/adxl355_core.c2
-rw-r--r--drivers/iio/accel/adxl367.c48
-rw-r--r--drivers/iio/accel/adxl367_spi.c8
-rw-r--r--drivers/iio/accel/bma220_spi.c10
-rw-r--r--drivers/iio/accel/bma400.h50
-rw-r--r--drivers/iio/accel/bma400_core.c710
-rw-r--r--drivers/iio/accel/bma400_i2c.c10
-rw-r--r--drivers/iio/accel/bma400_spi.c8
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c99
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c17
-rw-r--r--drivers/iio/accel/bmi088-accel.h9
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c6
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c2
-rw-r--r--drivers/iio/accel/kxsd9-spi.c2
-rw-r--r--drivers/iio/accel/kxsd9.c11
-rw-r--r--drivers/iio/accel/mc3230.c4
-rw-r--r--drivers/iio/accel/mma7660.c6
-rw-r--r--drivers/iio/accel/sca3000.c6
-rw-r--r--drivers/iio/accel/sca3300.c353
-rw-r--r--drivers/iio/accel/stk8312.c4
-rw-r--r--drivers/iio/accel/stk8ba50.c4
-rw-r--r--drivers/iio/adc/Kconfig17
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7266.c4
-rw-r--r--drivers/iio/adc/ad7280a.c2
-rw-r--r--drivers/iio/adc/ad7292.c2
-rw-r--r--drivers/iio/adc/ad7298.c2
-rw-r--r--drivers/iio/adc/ad7476.c5
-rw-r--r--drivers/iio/adc/ad7606.c1
-rw-r--r--drivers/iio/adc/ad7606.h4
-rw-r--r--drivers/iio/adc/ad7606_par.c1
-rw-r--r--drivers/iio/adc/ad7766.c5
-rw-r--r--drivers/iio/adc/ad7768-1.c6
-rw-r--r--drivers/iio/adc/ad7887.c5
-rw-r--r--drivers/iio/adc/ad7923.c4
-rw-r--r--drivers/iio/adc/ad7949.c4
-rw-r--r--drivers/iio/adc/ad799x.c8
-rw-r--r--drivers/iio/adc/ad9467.c1
-rw-r--r--drivers/iio/adc/adi-axi-adc.c11
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c11
-rw-r--r--drivers/iio/adc/berlin2-adc.c2
-rw-r--r--drivers/iio/adc/hi8435.c2
-rw-r--r--drivers/iio/adc/imx7d_adc.c6
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c14
-rw-r--r--drivers/iio/adc/ina2xx-adc.c10
-rw-r--r--drivers/iio/adc/ingenic-adc.c2
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c1
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c3
-rw-r--r--drivers/iio/adc/ltc2496.c4
-rw-r--r--drivers/iio/adc/ltc2497.c4
-rw-r--r--drivers/iio/adc/max1027.c8
-rw-r--r--drivers/iio/adc/max11100.c4
-rw-r--r--drivers/iio/adc/max1118.c2
-rw-r--r--drivers/iio/adc/max1241.c2
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c187
-rw-r--r--drivers/iio/adc/mp2629_adc.c1
-rw-r--r--drivers/iio/adc/mt6360-adc.c1
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c12
-rw-r--r--drivers/iio/adc/nau7802.c16
-rw-r--r--drivers/iio/adc/npcm_adc.c37
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c1022
-rw-r--r--drivers/iio/adc/rzg2l_adc.c4
-rw-r--r--drivers/iio/adc/sc27xx_adc.c15
-rw-r--r--drivers/iio/adc/stm32-adc-core.c2
-rw-r--r--drivers/iio/adc/stm32-adc.c6
-rw-r--r--drivers/iio/adc/stmpe-adc.c6
-rw-r--r--drivers/iio/adc/stx104.c86
-rw-r--r--drivers/iio/adc/ti-adc0832.c2
-rw-r--r--drivers/iio/adc/ti-adc084s021.c4
-rw-r--r--drivers/iio/adc/ti-adc108s102.c4
-rw-r--r--drivers/iio/adc/ti-adc12138.c2
-rw-r--r--drivers/iio/adc/ti-adc128s052.c2
-rw-r--r--drivers/iio/adc/ti-adc161s626.c2
-rw-r--r--drivers/iio/adc/ti-ads1015.c8
-rw-r--r--drivers/iio/adc/ti-ads124s08.c8
-rw-r--r--drivers/iio/adc/ti-ads131e08.c2
-rw-r--r--drivers/iio/adc/ti-ads7950.c4
-rw-r--r--drivers/iio/adc/ti-ads8344.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/adc/ti-tlc4541.c4
-rw-r--r--drivers/iio/adc/ti-tsc2046.c2
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c8
-rw-r--r--drivers/iio/adc/vf610_adc.c15
-rw-r--r--drivers/iio/adc/xilinx-ams.c8
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c69
-rw-r--r--drivers/iio/addac/ad74413r.c12
-rw-r--r--drivers/iio/afe/iio-rescale.c2
-rw-r--r--drivers/iio/amplifiers/ad8366.c4
-rw-r--r--drivers/iio/chemical/atlas-sensor.c8
-rw-r--r--drivers/iio/chemical/bme680_core.c2
-rw-r--r--drivers/iio/chemical/ccs811.c10
-rw-r--r--drivers/iio/chemical/scd4x.c8
-rw-r--r--drivers/iio/chemical/sps30.c2
-rw-r--r--drivers/iio/chemical/sps30_i2c.c1
-rw-r--r--drivers/iio/chemical/sps30_serial.c1
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c5
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c7
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c88
-rw-r--r--drivers/iio/common/ssp_sensors/ssp.h3
-rw-r--r--drivers/iio/dac/Kconfig2
-rw-r--r--drivers/iio/dac/ad5064.c4
-rw-r--r--drivers/iio/dac/ad5360.c4
-rw-r--r--drivers/iio/dac/ad5380.c4
-rw-r--r--drivers/iio/dac/ad5421.c4
-rw-r--r--drivers/iio/dac/ad5449.c4
-rw-r--r--drivers/iio/dac/ad5504.c2
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/dac/ad5592r-base.h4
-rw-r--r--drivers/iio/dac/ad5686.h6
-rw-r--r--drivers/iio/dac/ad5755.c4
-rw-r--r--drivers/iio/dac/ad5761.c4
-rw-r--r--drivers/iio/dac/ad5764.c4
-rw-r--r--drivers/iio/dac/ad5766.c2
-rw-r--r--drivers/iio/dac/ad5770r.c2
-rw-r--r--drivers/iio/dac/ad5791.c2
-rw-r--r--drivers/iio/dac/ad7293.c2
-rw-r--r--drivers/iio/dac/ad7303.c4
-rw-r--r--drivers/iio/dac/ad8801.c2
-rw-r--r--drivers/iio/dac/cio-dac.c20
-rw-r--r--drivers/iio/dac/ds4424.c8
-rw-r--r--drivers/iio/dac/ltc1660.c9
-rw-r--r--drivers/iio/dac/ltc2688.c4
-rw-r--r--drivers/iio/dac/max517.c8
-rw-r--r--drivers/iio/dac/max5821.c9
-rw-r--r--drivers/iio/dac/mcp4725.c9
-rw-r--r--drivers/iio/dac/mcp4922.c13
-rw-r--r--drivers/iio/dac/stm32-dac.c6
-rw-r--r--drivers/iio/dac/ti-dac082s085.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c5
-rw-r--r--drivers/iio/dac/ti-dac7311.c2
-rw-r--r--drivers/iio/dac/ti-dac7612.c4
-rw-r--r--drivers/iio/dac/vf610_dac.c1
-rw-r--r--drivers/iio/frequency/ad9523.c6
-rw-r--r--drivers/iio/frequency/adf4350.c6
-rw-r--r--drivers/iio/frequency/adf4371.c2
-rw-r--r--drivers/iio/frequency/admv1013.c2
-rw-r--r--drivers/iio/frequency/admv1014.c2
-rw-r--r--drivers/iio/frequency/admv4420.c2
-rw-r--r--drivers/iio/frequency/adrf6780.c2
-rw-r--r--drivers/iio/gyro/adis16080.c2
-rw-r--r--drivers/iio/gyro/adis16130.c2
-rw-r--r--drivers/iio/gyro/adxrs450.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c2
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c6
-rw-r--r--drivers/iio/gyro/itg3200_core.c9
-rw-r--r--drivers/iio/gyro/mpu3050-core.c14
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c2
-rw-r--r--drivers/iio/health/afe4403.c9
-rw-r--r--drivers/iio/health/afe4404.c13
-rw-r--r--drivers/iio/humidity/hts221_buffer.c1
-rw-r--r--drivers/iio/humidity/hts221_core.c12
-rw-r--r--drivers/iio/humidity/hts221_i2c.c3
-rw-r--r--drivers/iio/humidity/hts221_spi.c3
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c6
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c1
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c1
-rw-r--r--drivers/iio/imu/fxos8700_core.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c16
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c3
-rw-r--r--drivers/iio/industrialio-buffer.c66
-rw-r--r--drivers/iio/industrialio-core.c88
-rw-r--r--drivers/iio/industrialio-sw-device.c2
-rw-r--r--drivers/iio/industrialio-sw-trigger.c2
-rw-r--r--drivers/iio/industrialio-trigger.c89
-rw-r--r--drivers/iio/light/al3010.c8
-rw-r--r--drivers/iio/light/al3320a.c9
-rw-r--r--drivers/iio/light/as73211.c9
-rw-r--r--drivers/iio/light/bh1750.c6
-rw-r--r--drivers/iio/light/bh1780.c7
-rw-r--r--drivers/iio/light/cm32181.c22
-rw-r--r--drivers/iio/light/cm3605.c13
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c9
-rw-r--r--drivers/iio/light/gp2ap002.c14
-rw-r--r--drivers/iio/light/isl29028.c19
-rw-r--r--drivers/iio/light/jsa1212.c4
-rw-r--r--drivers/iio/light/opt3001.c3
-rw-r--r--drivers/iio/light/pa12203001.c8
-rw-r--r--drivers/iio/light/stk3310.c4
-rw-r--r--drivers/iio/light/tsl2563.c7
-rw-r--r--drivers/iio/light/tsl2583.c17
-rw-r--r--drivers/iio/light/tsl2591.c12
-rw-r--r--drivers/iio/light/us5182d.c16
-rw-r--r--drivers/iio/light/vcnl4000.c22
-rw-r--r--drivers/iio/light/vcnl4035.c24
-rw-r--r--drivers/iio/light/veml6030.c14
-rw-r--r--drivers/iio/magnetometer/ak8974.c14
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c3
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.h2
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c2
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c2
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c14
-rw-r--r--drivers/iio/potentiometer/ad5110.c4
-rw-r--r--drivers/iio/potentiometer/ad5272.c2
-rw-r--r--drivers/iio/potentiometer/max5481.c2
-rw-r--r--drivers/iio/potentiometer/mcp41010.c2
-rw-r--r--drivers/iio/potentiometer/mcp4131.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c2
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c1
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c4
-rw-r--r--drivers/iio/pressure/bmp280-spi.c1
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c9
-rw-r--r--drivers/iio/pressure/dlhl60d.c2
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/iio/proximity/cros_ec_mkbp_proximity.c8
-rw-r--r--drivers/iio/proximity/ping.c2
-rw-r--r--drivers/iio/proximity/srf04.c11
-rw-r--r--drivers/iio/proximity/srf08.c2
-rw-r--r--drivers/iio/proximity/sx9324.c76
-rw-r--r--drivers/iio/proximity/sx9360.c15
-rw-r--r--drivers/iio/proximity/sx_common.c10
-rw-r--r--drivers/iio/proximity/vcnl3020.c4
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c55
-rw-r--r--drivers/iio/resolver/ad2s1200.c2
-rw-r--r--drivers/iio/resolver/ad2s90.c2
-rw-r--r--drivers/iio/temperature/ltc2983.c13
-rw-r--r--drivers/iio/temperature/max31865.c2
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/iio/test/Kconfig26
-rw-r--r--drivers/iio/test/Makefile2
-rw-r--r--drivers/iio/test/iio-test-format.c4
-rw-r--r--drivers/iio/test/iio-test-rescale.c5
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c4
-rw-r--r--drivers/infiniband/Kconfig15
-rw-r--r--drivers/infiniband/core/cma.c230
-rw-r--r--drivers/infiniband/core/cma_priv.h1
-rw-r--r--drivers/infiniband/core/rdma_core.c2
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/core/rw.c45
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c25
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig12
-rw-r--r--drivers/infiniband/hw/erdma/Makefile4
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h287
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c1430
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.h167
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c493
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c205
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c329
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h508
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c608
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c566
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c1460
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h342
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c4
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c248
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h13
-rw-r--r--drivers/infiniband/hw/irdma/cm.c61
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c8
-rw-r--r--drivers/infiniband/hw/irdma/hw.c33
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c1
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c1
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h1
-rw-r--r--drivers/infiniband/hw/irdma/main.h2
-rw-r--r--drivers/infiniband/hw/irdma/utils.c1
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c20
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c53
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c165
-rw-r--r--drivers/infiniband/hw/mlx5/main.c38
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h79
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c515
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c78
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c8
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c23
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c49
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c213
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c106
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c36
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c137
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c236
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c78
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h27
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c14
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c50
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h21
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c32
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c32
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h15
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c156
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h18
-rw-r--r--drivers/input/input-core-private.h16
-rw-r--r--drivers/input/input-mt.c48
-rw-r--r--drivers/input/input.c149
-rw-r--r--drivers/input/joystick/adc-joystick.c15
-rw-r--r--drivers/input/joystick/sensehat-joystick.c4
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c206
-rw-r--r--drivers/input/keyboard/applespi.c42
-rw-r--r--drivers/input/keyboard/bcm-keypad.c14
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c89
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c18
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c98
-rw-r--r--drivers/input/keyboard/omap4-keypad.c26
-rw-r--r--drivers/input/misc/gpio_decoder.c10
-rw-r--r--drivers/input/misc/iqs7222.c178
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c10
-rw-r--r--drivers/input/misc/tps65218-pwrbutton.c10
-rw-r--r--drivers/input/mouse/cyapa_gen6.c2
-rw-r--r--drivers/input/mouse/gpio_mouse.c2
-rw-r--r--drivers/input/serio/gscps2.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1270
-rw-r--r--drivers/input/touchscreen/bcm_iproc_tsc.c9
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c96
-rw-r--r--drivers/input/touchscreen/exc3000.c7
-rw-r--r--drivers/input/touchscreen/goodix.c27
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c4
-rw-r--r--drivers/input/touchscreen/zinitix.c112
-rw-r--r--drivers/interconnect/bulk.c42
-rw-r--r--drivers/interconnect/imx/Kconfig4
-rw-r--r--drivers/interconnect/imx/Makefile2
-rw-r--r--drivers/interconnect/imx/imx.c84
-rw-r--r--drivers/interconnect/imx/imx.h49
-rw-r--r--drivers/interconnect/imx/imx8mm.c2
-rw-r--r--drivers/interconnect/imx/imx8mn.c2
-rw-r--r--drivers/interconnect/imx/imx8mp.c259
-rw-r--r--drivers/interconnect/imx/imx8mq.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile5
-rw-r--r--drivers/interconnect/qcom/icc-common.c34
-rw-r--r--drivers/interconnect/qcom/icc-common.h13
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c168
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h6
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c30
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h1
-rw-r--r--drivers/interconnect/qcom/msm8939.c1
-rw-r--r--drivers/interconnect/qcom/sm6350.c493
-rw-r--r--drivers/interconnect/qcom/sm6350.h139
-rw-r--r--drivers/interconnect/qcom/sm8450.c1
-rw-r--r--drivers/iommu/Kconfig10
-rw-r--r--drivers/iommu/amd/amd_iommu.h18
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h186
-rw-r--r--drivers/iommu/amd/init.c942
-rw-r--r--drivers/iommu/amd/io_pgtable.c6
-rw-r--r--drivers/iommu/amd/iommu.c585
-rw-r--r--drivers/iommu/amd/iommu_v2.c67
-rw-r--r--drivers/iommu/amd/quirks.c4
-rw-r--r--drivers/iommu/apple-dart.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c144
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c142
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c34
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h28
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c73
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c18
-rw-r--r--drivers/iommu/dma-iommu.c124
-rw-r--r--drivers/iommu/exynos-iommu.c182
-rw-r--r--drivers/iommu/fsl_pamu_domain.c5
-rw-r--r--drivers/iommu/hyperv-iommu.c6
-rw-r--r--drivers/iommu/intel/cap_audit.c2
-rw-r--r--drivers/iommu/intel/debugfs.c51
-rw-r--r--drivers/iommu/intel/dmar.c41
-rw-r--r--drivers/iommu/intel/iommu.c447
-rw-r--r--drivers/iommu/intel/iommu.h839
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/pasid.c107
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/perf.c2
-rw-r--r--drivers/iommu/intel/svm.c11
-rw-r--r--drivers/iommu/intel/trace.c2
-rw-r--r--drivers/iommu/intel/trace.h99
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c75
-rw-r--r--drivers/iommu/iommu.c59
-rw-r--r--drivers/iommu/iova.c12
-rw-r--r--drivers/iommu/msm_iommu.c7
-rw-r--r--drivers/iommu/mtk_iommu.c71
-rw-r--r--drivers/iommu/mtk_iommu_v1.c5
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/sprd-iommu.c11
-rw-r--r--drivers/iommu/sun50i-iommu.c3
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c3
-rw-r--r--drivers/iommu/virtio-iommu.c31
-rw-r--r--drivers/irqchip/Kconfig69
-rw-r--r--drivers/irqchip/Makefile5
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c4
-rw-r--r--drivers/irqchip/irq-gic-v3.c20
-rw-r--r--drivers/irqchip/irq-gic.c18
-rw-r--r--drivers/irqchip/irq-keystone.c10
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c148
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c400
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c203
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c205
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c127
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c177
-rw-r--r--drivers/irqchip/irq-mips-gic.c84
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c393
-rw-r--r--drivers/irqchip/irq-riscv-intc.c7
-rw-r--r--drivers/irqchip/irq-sifive-plic.c149
-rw-r--r--drivers/irqchip/irq-sp7021-intc.c278
-rw-r--r--drivers/irqchip/irq-stm32-exti.c250
-rw-r--r--drivers/irqchip/irq-tegra.c10
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c2
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/blink/Kconfig14
-rw-r--r--drivers/leds/blink/Makefile1
-rw-r--r--drivers/leds/blink/leds-bcm63138.c307
-rw-r--r--drivers/leds/leds-is31fl319x.c529
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c8
-rw-r--r--drivers/leds/simple/Kconfig6
-rw-r--r--drivers/leds/simple/Makefile1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.c105
-rw-r--r--drivers/leds/simple/simatic-ipc-leds.c80
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c14
-rw-r--r--drivers/mailbox/imx-mailbox.c40
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c11
-rw-r--r--drivers/mcb/mcb-core.c4
-rw-r--r--drivers/md/Makefile3
-rw-r--r--drivers/md/bcache/Kconfig2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/super.c27
-rw-r--r--drivers/md/dm-bufio.c71
-rw-r--r--drivers/md/dm-cache-metadata.h2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-core.h23
-rw-r--r--drivers/md/dm-ebs-target.c18
-rw-r--r--drivers/md/dm-flakey.c8
-rw-r--r--drivers/md/dm-ima.c5
-rw-r--r--drivers/md/dm-integrity.c78
-rw-r--r--drivers/md/dm-io-rewind.c166
-rw-r--r--drivers/md/dm-io.c38
-rw-r--r--drivers/md/dm-ioctl.c6
-rw-r--r--drivers/md/dm-kcopyd.c28
-rw-r--r--drivers/md/dm-log.c8
-rw-r--r--drivers/md/dm-raid.c10
-rw-r--r--drivers/md/dm-raid1.c12
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-snap-persistent.c27
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/dm-table.c324
-rw-r--r--drivers/md/dm-thin-metadata.c7
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-loadpin.c75
-rw-r--r--drivers/md/dm-verity-target.c208
-rw-r--r--drivers/md/dm-verity.h10
-rw-r--r--drivers/md/dm-writecache.c58
-rw-r--r--drivers/md/dm-zone.c95
-rw-r--r--drivers/md/dm-zoned-metadata.c9
-rw-r--r--drivers/md/dm-zoned-target.c25
-rw-r--r--drivers/md/dm-zoned.h2
-rw-r--r--drivers/md/dm.c501
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/md-autodetect.c21
-rw-r--r--drivers/md/md-bitmap.c6
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c438
-rw-r--r--drivers/md/md.h22
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c3
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c27
-rw-r--r--drivers/md/raid5-cache.c52
-rw-r--r--drivers/md/raid5-log.h77
-rw-r--r--drivers/md/raid5-ppl.c14
-rw-r--r--drivers/md/raid5.c736
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/cec/core/cec-adap.c5
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c4
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c44
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c12
-rw-r--r--drivers/media/i2c/Kconfig18
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7180.c5
-rw-r--r--drivers/media/i2c/adv7343_regs.h10
-rw-r--r--drivers/media/i2c/adv7393_regs.h10
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h2
-rw-r--r--drivers/media/i2c/adv7604.c5
-rw-r--r--drivers/media/i2c/ar0521.c1061
-rw-r--r--drivers/media/i2c/mt9p031.c93
-rw-r--r--drivers/media/i2c/ov5640.c1650
-rw-r--r--drivers/media/i2c/ov5693.c57
-rw-r--r--drivers/media/i2c/ov7251.c7
-rw-r--r--drivers/media/i2c/st-mipid02.c30
-rw-r--r--drivers/media/i2c/tda1997x.c2
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/mc/mc-entity.c96
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c2
-rw-r--r--drivers/media/pci/cx88/cx88-core.c22
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-ci.c9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-ci.h9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.c9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.h11
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.c9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.h9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-io.h9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-main.c9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-max.c9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-max.h11
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-mci.c9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-mci.h9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-regs.h9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-sx8.c9
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h11
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c2
-rw-r--r--drivers/media/pci/sta2x11/Kconfig2
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c30
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c21
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c4
-rw-r--r--drivers/media/platform/amphion/vdec.c50
-rw-r--r--drivers/media/platform/amphion/venc.c3
-rw-r--r--drivers/media/platform/amphion/vpu.h1
-rw-r--r--drivers/media/platform/amphion/vpu_cmds.c3
-rw-r--r--drivers/media/platform/amphion/vpu_core.c18
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c2
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c20
-rw-r--r--drivers/media/platform/amphion/vpu_malone.h1
-rw-r--r--drivers/media/platform/amphion/vpu_msgs.c7
-rw-r--r--drivers/media/platform/amphion/vpu_rpc.c2
-rw-r--r--drivers/media/platform/amphion/vpu_rpc.h7
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c6
-rw-r--r--drivers/media/platform/atmel/Kconfig4
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c20
-rw-r--r--drivers/media/platform/atmel/atmel-sama7g5-isc.c2
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c3
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_ipi.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c133
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c13
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c12
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c50
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c29
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c30
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h36
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c37
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.c7
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c25
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c7
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c210
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_drv_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c5
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c9
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c16
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h10
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c328
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h4
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c41
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c8
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c2
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c43
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c36
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h3
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss.c73
-rw-r--r--drivers/media/platform/qcom/camss/camss.h7
-rw-r--r--drivers/media/platform/qcom/venus/core.c20
-rw-r--r--drivers/media/platform/qcom/venus/core.h2
-rw-r--r--drivers/media/platform/qcom/venus/dbgfs.c9
-rw-r--r--drivers/media/platform/qcom/venus/dbgfs.h13
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c9
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h1
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h20
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c22
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c26
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c10
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c4
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-csi2.c2
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c2
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c2
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c7
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/Kconfig2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/Makefile18
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c181
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.c143
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h157
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c536
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.h28
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c243
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c504
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c691
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c713
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h190
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c218
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c17
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.c3
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.h2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/common.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c6
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c2
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c2
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c3
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c33
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c24
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c91
-rw-r--r--drivers/media/platform/sunxi/Kconfig2
-rw-r--r--drivers/media/platform/sunxi/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c4
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig15
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/Makefile4
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c750
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.h52
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2_reg.h76
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig13
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Makefile4
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_dphy.c72
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_dphy.h39
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c816
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.h55
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2_reg.h151
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c4
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c2
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c1
-rw-r--r--drivers/media/platform/ti/davinci/vpif.h11
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.h10
-rw-r--r--drivers/media/platform/ti/omap/omap_voutlib.c4
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccp2.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispcsi2.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c4
-rw-r--r--drivers/media/platform/video-mux.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h4
-rw-r--r--drivers/media/rc/ati_remote.c11
-rw-r--r--drivers/media/rc/igorplugusb.c23
-rw-r--r--drivers/media/rc/iguanair.c5
-rw-r--r--drivers/media/rc/imon_raw.c16
-rw-r--r--drivers/media/rc/lirc_dev.c6
-rw-r--r--drivers/media/rc/rc-main.c9
-rw-r--r--drivers/media/rc/redrat3.c4
-rw-r--r--drivers/media/rc/streamzap.c2
-rw-r--r--drivers/media/rc/ttusbir.c4
-rw-r--r--drivers/media/rc/xbox_remote.c10
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c8
-rw-r--r--drivers/media/test-drivers/vimc/Makefile2
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c270
-rw-r--r--drivers/media/test-drivers/vimc/vimc-common.h9
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c142
-rw-r--r--drivers/media/test-drivers/vimc/vimc-debayer.c393
-rw-r--r--drivers/media/test-drivers/vimc/vimc-lens.c102
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c216
-rw-r--r--drivers/media/test-drivers/vimc/vimc-sensor.c307
-rw-r--r--drivers/media/test-drivers/vimc/vimc-streamer.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c29
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c15
-rw-r--r--drivers/media/usb/Kconfig1
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/usb/airspy/airspy.c17
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-v4l.h9
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx.h10
-rw-r--r--drivers/media/usb/gspca/spca501.c2
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c3
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c5
-rw-r--r--drivers/media/usb/usbtv/usbtv.h3
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c120
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c143
-rw-r--r--drivers/media/usb/uvc/uvc_isight.c13
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c6
-rw-r--r--drivers/media/usb/uvc/uvc_status.c6
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c18
-rw-r--r--drivers/media/usb/uvc/uvc_video.c96
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h16
-rw-r--r--drivers/media/v4l2-core/Kconfig6
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c45
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c103
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c212
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h3
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c13
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c71
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c2
-rw-r--r--drivers/memory/mtk-smi.c17
-rw-r--r--drivers/memory/tegra/tegra124-emc.c11
-rw-r--r--drivers/memory/tegra/tegra234.c80
-rw-r--r--drivers/memory/ti-emif-sram-pm.S10
-rw-r--r--drivers/memstick/core/ms_block.c18
-rw-r--r--drivers/memstick/core/mspro_block.c3
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/mfd/Kconfig6
-rw-r--r--drivers/mfd/asic3.c9
-rw-r--r--drivers/mfd/atmel-smc.c4
-rw-r--r--drivers/mfd/axp20x.c9
-rw-r--r--drivers/mfd/bcm2835-pm.c74
-rw-r--r--drivers/mfd/cros_ec_dev.c9
-rw-r--r--drivers/mfd/db8500-prcmu.c2
-rw-r--r--drivers/mfd/dln2.c17
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel-m10-bmc.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c194
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c27
-rw-r--r--drivers/mfd/lp873x.c10
-rw-r--r--drivers/mfd/lpc_ich.c161
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/max77714.c4
-rw-r--r--drivers/mfd/mfd-core.c31
-rw-r--r--drivers/mfd/mt6358-irq.c24
-rw-r--r--drivers/mfd/mt6397-core.c91
-rw-r--r--drivers/mfd/mt6397-irq.c9
-rw-r--r--drivers/mfd/qcom-pm8008.c53
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c272
-rw-r--r--drivers/mfd/syscon.c3
-rw-r--r--drivers/mfd/t7l66xb.c6
-rw-r--r--drivers/mfd/tc6393xb.c5
-rw-r--r--drivers/mfd/tps65086.c10
-rw-r--r--drivers/mfd/tps65217.c10
-rw-r--r--drivers/mfd/tps65218.c10
-rw-r--r--drivers/mfd/tps65912-core.c10
-rw-r--r--drivers/mfd/tps65912-i2c.c10
-rw-r--r--drivers/mfd/tps65912-spi.c10
-rw-r--r--drivers/mfd/twl-core.c323
-rw-r--r--drivers/mfd/ucb1400_core.c6
-rw-r--r--drivers/misc/Kconfig13
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/atmel-ssc.c4
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c8
-rw-r--r--drivers/misc/cxl/context.c2
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/cxl/irq.c4
-rw-r--r--drivers/misc/cxl/of.c5
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c14
-rw-r--r--drivers/misc/habanalabs/Makefile3
-rw-r--r--drivers/misc/habanalabs/common/Makefile3
-rw-r--r--drivers/misc/habanalabs/common/asid.c5
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c12
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c296
-rw-r--r--drivers/misc/habanalabs/common/context.c73
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c221
-rw-r--r--drivers/misc/habanalabs/common/decoder.c133
-rw-r--r--drivers/misc/habanalabs/common/device.c242
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c211
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h756
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c82
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c54
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c45
-rw-r--r--drivers/misc/habanalabs/common/irq.c160
-rw-r--r--drivers/misc/habanalabs/common/memory.c115
-rw-r--r--drivers/misc/habanalabs/common/memory_mgr.c2
-rw-r--r--drivers/misc/habanalabs/common/mmu/Makefile3
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c496
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c9
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c399
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c40
-rw-r--r--drivers/misc/habanalabs/common/security.c600
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c10
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c681
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c2
-rw-r--r--drivers/misc/habanalabs/gaudi2/Makefile4
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2.c9986
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2P.h566
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c2720
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h1063
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2_masks.h141
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2_security.c3849
-rw-r--r--drivers/misc/habanalabs/goya/goya.c160
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h6
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c2
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h297
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h7
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h213
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h567
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h819
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h591
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h575
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h135
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h221
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h95
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h29
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h415
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h157
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h777
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h229
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h85
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h95
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h415
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h157
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h591
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h29
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h1165
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h1057
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h294
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h237
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h348
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h141
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h73
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h33
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h39
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h73
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h35
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h67
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h67
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h67
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h468
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h163
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h567
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h591
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h575
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h29
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h1057
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h107
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h291
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h213
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h189
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h213
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h189
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h135
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h1203
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h135
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h87
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h43543
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h129
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h63
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h509
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h129
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h27
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h63
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h229
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h185
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h163
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h113
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h75
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h151
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h131
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h591
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h29
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h1057
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h581
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h245
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h185
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h163
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h163
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h45067
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h550
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h29
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h1057
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h591
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h905
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h27
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h31
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h293
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h422
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h229
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h85
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h580
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h245
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h185
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h601
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h95
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h415
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h157
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h135
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h591
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h29
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h1165
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h1057
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h334
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h141
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h135
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h311
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h115
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h1406
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h1337
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h2321
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h989
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h57
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h155
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h313
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h591
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h29
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h1057
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h111
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h199
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h199
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2.h123
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h963
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h2668
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h57
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h984
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h99
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h197
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h59
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h12
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h14
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h51
-rw-r--r--drivers/misc/lkdtm/Makefile9
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/lkdtm/cfi.c2
-rw-r--r--drivers/misc/mei/hw-me.c2
-rw-r--r--drivers/misc/sgi-gru/grukservices.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/misc/sgi-xp/xpnet.c13
-rw-r--r--drivers/misc/sram-exec.c10
-rw-r--r--drivers/misc/uacce/uacce.c133
-rw-r--r--drivers/misc/vcpu_stall_detector.c223
-rw-r--r--drivers/misc/vmw_balloon.c63
-rw-r--r--drivers/mmc/core/block.c38
-rw-r--r--drivers/mmc/core/bus.c4
-rw-r--r--drivers/mmc/core/core.c10
-rw-r--r--drivers/mmc/core/debugfs.c80
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/queue.c4
-rw-r--r--drivers/mmc/core/quirks.h4
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/core/sdio.c30
-rw-r--r--drivers/mmc/host/Kconfig5
-rw-r--r--drivers/mmc/host/cavium-octeon.c1
-rw-r--r--drivers/mmc/host/cavium-thunderx.c4
-rw-r--r--drivers/mmc/host/cqhci-core.c9
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c4
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c4
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c6
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c94
-rw-r--r--drivers/mmc/host/mxcmmc.c4
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/renesas_sdhi.h1
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c42
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c11
-rw-r--r--drivers/mmc/host/sdhci-acpi.c7
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c14
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c76
-rw-r--r--drivers/mmc/host/sdhci-iproc.c14
-rw-r--r--drivers/mmc/host/sdhci-msm.c29
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c5
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed-test.c8
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c34
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c9
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c209
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c7
-rw-r--r--drivers/mmc/host/sdhci-omap.c14
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c11
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c34
-rw-r--r--drivers/mmc/host/sdhci-st.c5
-rw-r--r--drivers/mmc/host/sdhci.c59
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.h6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c28
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c8
-rw-r--r--drivers/mtd/devices/powernv_flash.c4
-rw-r--r--drivers/mtd/devices/spear_smi.c10
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c23
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c6
-rw-r--r--drivers/mtd/hyperbus/hyperbus-core.c8
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c13
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c4
-rw-r--r--drivers/mtd/maps/physmap-core.c13
-rw-r--r--drivers/mtd/maps/physmap-versatile.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c4
-rw-r--r--drivers/mtd/mtdchar.c13
-rw-r--r--drivers/mtd/mtdcore.c63
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c16
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c9
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c6
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c17
-rw-r--r--drivers/mtd/nand/raw/omap2.c6
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c306
-rw-r--r--drivers/mtd/nand/raw/sm_common.c2
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c5
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/ato.c86
-rw-r--r--drivers/mtd/nand/spi/core.c1
-rw-r--r--drivers/mtd/parsers/Kconfig9
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/ofpart_bcm4908.c3
-rw-r--r--drivers/mtd/parsers/redboot.c1
-rw-r--r--drivers/mtd/parsers/scpart.c249
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c8
-rw-r--r--drivers/mtd/spi-nor/core.c70
-rw-r--r--drivers/mtd/spi-nor/core.h21
-rw-r--r--drivers/mtd/spi-nor/debugfs.c2
-rw-r--r--drivers/mtd/spi-nor/esmt.c2
-rw-r--r--drivers/mtd/spi-nor/issi.c31
-rw-r--r--drivers/mtd/spi-nor/micron-st.c12
-rw-r--r--drivers/mtd/spi-nor/otp.c12
-rw-r--r--drivers/mtd/spi-nor/sfdp.c34
-rw-r--r--drivers/mtd/spi-nor/spansion.c185
-rw-r--r--drivers/mtd/spi-nor/xilinx.c2
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/amt.c253
-rw-r--r--drivers/net/bonding/bond_alb.c10
-rw-r--r--drivers/net/bonding/bond_main.c96
-rw-r--r--drivers/net/bonding/bond_netlink.c116
-rw-r--r--drivers/net/bonding/bond_options.c65
-rw-r--r--drivers/net/can/Kconfig111
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c6
-rw-r--r--drivers/net/can/c_can/c_can.h2
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c8
-rw-r--r--drivers/net/can/c_can/c_can_main.c9
-rw-r--r--drivers/net/can/can327.c1144
-rw-r--r--drivers/net/can/cc770/cc770.c7
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c13
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kregs.h32
-rw-r--r--drivers/net/can/dev/Makefile17
-rw-r--r--drivers/net/can/dev/bittiming.c197
-rw-r--r--drivers/net/can/dev/calc_bittiming.c202
-rw-r--r--drivers/net/can/dev/dev.c59
-rw-r--r--drivers/net/can/dev/netlink.c9
-rw-r--r--drivers/net/can/dev/skb.c78
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c2
-rw-r--r--drivers/net/can/flexcan/flexcan-ethtool.c8
-rw-r--r--drivers/net/can/flexcan/flexcan.h2
-rw-r--r--drivers/net/can/grcan.c7
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c10
-rw-r--r--drivers/net/can/janz-ican3.c12
-rw-r--r--drivers/net/can/kvaser_pciefd.c9
-rw-r--r--drivers/net/can/m_can/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can.c14
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c5
-rw-r--r--drivers/net/can/pch_can.c15
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c54
-rw-r--r--drivers/net/can/rcar/rcar_can.c15
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c11
-rw-r--r--drivers/net/can/sja1000/sja1000.c22
-rw-r--r--drivers/net/can/sja1000/sja1000.h3
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c20
-rw-r--r--drivers/net/can/slcan.c793
-rw-r--r--drivers/net/can/slcan/Makefile7
-rw-r--r--drivers/net/can/slcan/slcan-core.c939
-rw-r--r--drivers/net/can/slcan/slcan-ethtool.c61
-rw-r--r--drivers/net/can/slcan/slcan.h19
-rw-r--r--drivers/net/can/softing/softing_main.c10
-rw-r--r--drivers/net/can/spi/hi311x.c12
-rw-r--r--drivers/net/can/spi/mcp251x.c24
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c20
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c1
-rw-r--r--drivers/net/can/sun4i_can.c16
-rw-r--r--drivers/net/can/ti_hecc.c18
-rw-r--r--drivers/net/can/usb/Kconfig15
-rw-r--r--drivers/net/can/usb/Makefile2
-rw-r--r--drivers/net/can/usb/ems_usb.c12
-rw-r--r--drivers/net/can/usb/esd_usb.c (renamed from drivers/net/can/usb/esd_usb2.c)259
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c39
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h6
-rw-r--r--drivers/net/can/usb/gs_usb.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h1
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c29
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c14
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c7
-rw-r--r--drivers/net/can/usb/mcba_usb.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c43
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c69
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h2
-rw-r--r--drivers/net/can/usb/ucan.c6
-rw-r--r--drivers/net/can/usb/usb_8dev.c18
-rw-r--r--drivers/net/can/vcan.c8
-rw-r--r--drivers/net/can/vxcan.c8
-rw-r--r--drivers/net/can/xilinx_can.c79
-rw-r--r--drivers/net/dsa/Kconfig17
-rw-r--r--drivers/net/dsa/Makefile2
-rw-r--r--drivers/net/dsa/b53/b53_spi.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c7
-rw-r--r--drivers/net/dsa/microchip/Kconfig42
-rw-r--r--drivers/net/dsa/microchip/Makefile11
-rw-r--r--drivers/net/dsa/microchip/ksz8.h105
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c623
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h37
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c19
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c521
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h60
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c6
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h46
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c150
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c1132
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h251
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c (renamed from drivers/net/dsa/microchip/ksz8795_spi.c)125
-rw-r--r--drivers/net/dsa/microchip/lan937x.h21
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c443
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h184
-rw-r--r--drivers/net/dsa/mt7530.c82
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c44
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c36
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/ocelot/Kconfig1
-rw-r--r--drivers/net/dsa/ocelot/felix.c12
-rw-r--r--drivers/net/dsa/ocelot/felix.h1
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c860
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c553
-rw-r--r--drivers/net/dsa/qca/Kconfig8
-rw-r--r--drivers/net/dsa/qca/Makefile2
-rw-r--r--drivers/net/dsa/qca/ar9331.c34
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c (renamed from drivers/net/dsa/qca8k.c)1259
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c1210
-rw-r--r--drivers/net/dsa/qca/qca8k.h (renamed from drivers/net/dsa/qca8k.h)100
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c299
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c1064
-rw-r--r--drivers/net/dsa/rzn1_a5psw.h259
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c18
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c10
-rw-r--r--drivers/net/eql.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h5
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c15
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c16
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c10
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.h5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c143
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c14
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/rq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_nic.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_resource.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c16
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rss.h14
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/wq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c10
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c6
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_hci.h40
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c36
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c3
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_rx.c5
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_tx.c174
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h7
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c103
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c180
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c113
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h27
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c263
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c130
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c253
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c81
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c698
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c137
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c104
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c232
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c135
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c15
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c26
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h8
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c91
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c74
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c69
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h45
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c57
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c179
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c164
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c2009
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h233
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c59
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c40
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h2
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h60
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c47
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c28
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c52
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c42
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c256
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h22
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c547
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c706
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c668
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h34
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c29
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c529
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.c722
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c813
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c526
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c554
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c318
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c210
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c408
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c433
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c405
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1298
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c300
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c1072
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c734
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1061
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c812
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c842
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c63
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h26
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c378
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h106
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c112
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c55
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma.h10
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c39
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c18
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.h5
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h70
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c64
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c148
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c12
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c33
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c63
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c17
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c55
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c468
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c84
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig24
-rw-r--r--drivers/net/ethernet/neterion/Makefile1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c10
-rw-r--r--drivers/net/ethernet/neterion/vxge/Makefile8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c5099
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2086
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c1154
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h48
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4808
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h516
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-reg.h4636
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c2428
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h2290
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-version.h49
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/rings.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c155
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/rings.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c153
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c222
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c30
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c33
-rw-r--r--drivers/net/ethernet/sfc/ef100.c70
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c148
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.h9
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c510
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h13
-rw-r--r--drivers/net/ethernet/sfc/ef100_regs.h83
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c435
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h69
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c46
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.c32
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.h2
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c84
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c26
-rw-r--r--drivers/net/ethernet/sfc/efx.c73
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c115
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h19
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c22
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c51
-rw-r--r--drivers/net/ethernet/sfc/falcon/bitfield.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/filter.h18
-rw-r--r--drivers/net/ethernet/sfc/mae.c346
-rw-r--r--drivers/net/ethernet/sfc/mae.h42
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c63
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h5
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h8136
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol_mae.h24
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h79
-rw-r--r--drivers/net/ethernet/sfc/ptp.c22
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c8
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h10
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/sriov.c10
-rw-r--r--drivers/net/ethernet/sfc/tc.c252
-rw-r--r--drivers/net/ethernet/sfc/tc.h85
-rw-r--r--drivers/net/ethernet/sfc/tx.c10
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c35
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c157
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c759
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c6
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.h2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c43
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c17
-rw-r--r--drivers/net/ethernet/via/via-velocity.h2
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig32
-rw-r--r--drivers/net/ethernet/wangxun/Makefile6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe.h24
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c166
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h57
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c15
-rw-r--r--drivers/net/fddi/skfp/fplustm.c2
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h2
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/ipa/Makefile10
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c (renamed from drivers/net/ipa/ipa_data-v3.1.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c (renamed from drivers/net/ipa/ipa_data-v3.5.1.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c (renamed from drivers/net/ipa/ipa_data-v4.11.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c (renamed from drivers/net/ipa/ipa_data-v4.2.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c (renamed from drivers/net/ipa/ipa_data-v4.5.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c (renamed from drivers/net/ipa/ipa_data-v4.9.c)8
-rw-r--r--drivers/net/ipa/gsi.c252
-rw-r--r--drivers/net/ipa/gsi.h26
-rw-r--r--drivers/net/ipa/gsi_private.h24
-rw-r--r--drivers/net/ipa/gsi_trans.c197
-rw-r--r--drivers/net/ipa/gsi_trans.h15
-rw-r--r--drivers/net/ipa/ipa_cmd.c8
-rw-r--r--drivers/net/ipa/ipa_endpoint.c27
-rw-r--r--drivers/net/ipa/ipa_endpoint.h4
-rw-r--r--drivers/net/ipa/ipa_main.c3
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h2
-rw-r--r--drivers/net/ipa/ipa_reg.h2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c69
-rw-r--r--drivers/net/ipa/ipa_sysfs.h1
-rw-r--r--drivers/net/ipvlan/ipvlan.h10
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c18
-rw-r--r--drivers/net/macsec.c103
-rw-r--r--drivers/net/macvlan.c22
-rw-r--r--drivers/net/mdio/fwnode_mdio.c4
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/netdevsim/bpf.c8
-rw-r--r--drivers/net/netdevsim/bus.c19
-rw-r--r--drivers/net/netdevsim/dev.c128
-rw-r--r--drivers/net/netdevsim/fib.c103
-rw-r--r--drivers/net/netdevsim/netdevsim.h3
-rw-r--r--drivers/net/pcs/Kconfig12
-rw-r--r--drivers/net/pcs/Makefile1
-rw-r--r--drivers/net/pcs/pcs-lynx.c80
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c531
-rw-r--r--drivers/net/pcs/pcs-xpcs.c170
-rw-r--r--drivers/net/pcs/pcs-xpcs.h1
-rw-r--r--drivers/net/phy/Kconfig7
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/aquantia_main.c20
-rw-r--r--drivers/net/phy/bcm-phy-lib.h19
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c944
-rw-r--r--drivers/net/phy/broadcom.c48
-rw-r--r--drivers/net/phy/dp83867.c55
-rw-r--r--drivers/net/phy/dp83td510.c49
-rw-r--r--drivers/net/phy/fixed_phy.c1
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c10
-rw-r--r--drivers/net/phy/micrel.c73
-rw-r--r--drivers/net/phy/mxl-gpy.c162
-rw-r--r--drivers/net/phy/nxp-tja11xx.c11
-rw-r--r--drivers/net/phy/phy-c45.c34
-rw-r--r--drivers/net/phy/phy_device.c24
-rw-r--r--drivers/net/phy/phylink.c74
-rw-r--r--drivers/net/phy/sfp.c10
-rw-r--r--drivers/net/phy/smsc.c13
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/sungem_phy.c1
-rw-r--r--drivers/net/tap.c20
-rw-r--r--drivers/net/team/team.c26
-rw-r--r--drivers/net/usb/Kconfig3
-rw-r--r--drivers/net/usb/asix.h3
-rw-r--r--drivers/net/usb/asix_common.c40
-rw-r--r--drivers/net/usb/ax88179_178a.c371
-rw-r--r--drivers/net/usb/catc.c46
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c25
-rw-r--r--drivers/net/usb/cdc_subset.c10
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c30
-rw-r--r--drivers/net/usb/smsc95xx.c207
-rw-r--r--drivers/net/usb/usbnet.c21
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c329
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h80
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c290
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c151
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h24
-rw-r--r--drivers/net/vrf.c10
-rw-r--r--drivers/net/vxlan/vxlan_core.c21
-rw-r--r--drivers/net/wan/farsync.h2
-rw-r--r--drivers/net/wireguard/allowedips.c9
-rw-r--r--drivers/net/wireguard/device.c3
-rw-r--r--drivers/net/wireguard/receive.c9
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c6
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c25
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c61
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c118
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c56
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c103
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c88
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h39
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c128
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c72
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c57
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c50
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h25
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c19
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h14
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c15
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c2
-rw-r--r--drivers/net/wireless/ath/hw.c2
-rw-r--r--drivers/net/wireless/ath/trace.h7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c39
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.c125
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.h84
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h74
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c110
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c59
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/atmel/atmel.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c11
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c49
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c41
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c21
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c23
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c6
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c6
-rw-r--r--drivers/net/wireless/intersil/p54/main.c15
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c525
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h5
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c10
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Makefile13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c32
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ethtool.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.h18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h14
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c65
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c121
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h69
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c109
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h323
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c920
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c315
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h156
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c69
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c915
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h333
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c415
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c279
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h93
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c716
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h340
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c125
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c424
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c106
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c9
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c252
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h21
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c228
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h16
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c20
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h15
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c13
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c14
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c12
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_if.h20
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c8
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c15
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/ray_cs.c20
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c7
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c26
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c16
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.h10
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c38
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c204
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h80
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c29
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c896
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c140
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c15
-rw-r--r--drivers/net/wireless/rndis_wlan.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c11
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c36
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c2
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.c3
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.c12
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c45
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h13
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c10
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c47
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h5
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c15
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c54
-rw-r--r--drivers/net/wireless/virt_wifi.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c13
-rw-r--r--drivers/net/xen-netback/common.h12
-rw-r--r--drivers/net/xen-netback/interface.c16
-rw-r--r--drivers/net/xen-netback/netback.c8
-rw-r--r--drivers/net/xen-netback/rx.c3
-rw-r--r--drivers/net/xen-netfront.c24
-rw-r--r--drivers/nfc/nxp-nci/core.c34
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c48
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c6
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c12
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h7
-rw-r--r--drivers/ntb/test/ntb_tool.c8
-rw-r--r--drivers/nvdimm/btt.c8
-rw-r--r--drivers/nvdimm/pmem.c23
-rw-r--r--drivers/nvdimm/region_devs.c28
-rw-r--r--drivers/nvdimm/virtio_pmem.c9
-rw-r--r--drivers/nvme/Kconfig1
-rw-r--r--drivers/nvme/Makefile1
-rw-r--r--drivers/nvme/common/Kconfig4
-rw-r--r--drivers/nvme/common/Makefile7
-rw-r--r--drivers/nvme/common/auth.c483
-rw-r--r--drivers/nvme/host/Kconfig15
-rw-r--r--drivers/nvme/host/Makefile4
-rw-r--r--drivers/nvme/host/apple.c33
-rw-r--r--drivers/nvme/host/auth.c1017
-rw-r--r--drivers/nvme/host/constants.c3
-rw-r--r--drivers/nvme/host/core.c498
-rw-r--r--drivers/nvme/host/fabrics.c102
-rw-r--r--drivers/nvme/host/fabrics.h7
-rw-r--r--drivers/nvme/host/fc.c23
-rw-r--r--drivers/nvme/host/ioctl.c4
-rw-r--r--drivers/nvme/host/multipath.c13
-rw-r--r--drivers/nvme/host/nvme.h45
-rw-r--r--drivers/nvme/host/pci.c243
-rw-r--r--drivers/nvme/host/rdma.c121
-rw-r--r--drivers/nvme/host/tcp.c113
-rw-r--r--drivers/nvme/host/trace.c32
-rw-r--r--drivers/nvme/host/trace.h2
-rw-r--r--drivers/nvme/host/zns.c6
-rw-r--r--drivers/nvme/target/Kconfig15
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/nvme/target/auth.c525
-rw-r--r--drivers/nvme/target/configfs.c136
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c544
-rw-r--r--drivers/nvme/target/fabrics-cmd.c55
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c17
-rw-r--r--drivers/nvme/target/io-cmd-file.c2
-rw-r--r--drivers/nvme/target/loop.c20
-rw-r--r--drivers/nvme/target/nvmet.h75
-rw-r--r--drivers/nvme/target/rdma.c2
-rw-r--r--drivers/nvme/target/tcp.c3
-rw-r--r--drivers/nvme/target/zns.c24
-rw-r--r--drivers/nvmem/Kconfig7
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/bcm-ocotp.c14
-rw-r--r--drivers/nvmem/microchip-otpc.c288
-rw-r--r--drivers/nvmem/mtk-efuse.c3
-rw-r--r--drivers/of/address.c17
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/of/device.c5
-rw-r--r--drivers/of/fdt.c25
-rw-r--r--drivers/of/kexec.c30
-rw-r--r--drivers/of/of_reserved_mem.c3
-rw-r--r--drivers/of/overlay.c20
-rw-r--r--drivers/of/unittest.c17
-rw-r--r--drivers/opp/core.c1571
-rw-r--r--drivers/opp/cpu.c12
-rw-r--r--drivers/opp/debugfs.c27
-rw-r--r--drivers/opp/of.c165
-rw-r--r--drivers/opp/opp.h56
-rw-r--r--drivers/opp/ti-opp-supply.c77
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/lba_pci.c6
-rw-r--r--drivers/pci/Kconfig8
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c6
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c22
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c19
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c670
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c34
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c12
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c2
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c92
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c404
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c25
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c472
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h178
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c36
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c431
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c684
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c6
-rw-r--r--drivers/pci/controller/pci-aardvark.c112
-rw-r--r--drivers/pci/controller/pci-hyperv.c12
-rw-r--r--drivers/pci/controller/pci-loongson.c206
-rw-r--r--drivers/pci/controller/pci-mvebu.c4
-rw-r--r--drivers/pci/controller/pci-rcar-gen2.c1
-rw-r--r--drivers/pci/controller/pci-tegra.c9
-rw-r--r--drivers/pci/controller/pci-xgene.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c443
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c62
-rw-r--r--drivers/pci/controller/pcie-mediatek.c8
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c8
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c60
-rw-r--r--drivers/pci/controller/vmd.c13
-rw-r--r--drivers/pci/doe.c536
-rw-r--r--drivers/pci/endpoint/functions/Kconfig12
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c117
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c1442
-rw-r--r--drivers/pci/mmap.c44
-rw-r--r--drivers/pci/p2pdma.c93
-rw-r--r--drivers/pci/pci-acpi.c5
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer.c15
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/pci/pcie/err.c12
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/probe.c92
-rw-r--r--drivers/pci/proc.c7
-rw-r--r--drivers/pci/quirks.c24
-rw-r--r--drivers/pci/switch/switchtec.c7
-rw-r--r--drivers/perf/arm-cci.c11
-rw-r--r--drivers/perf/arm-ccn.c6
-rw-r--r--drivers/perf/arm_spe_pmu.c22
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c6
-rw-r--r--drivers/perf/hisilicon/Kconfig10
-rw-r--r--drivers/perf/hisilicon/Makefile1
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c18
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c15
-rw-r--r--drivers/perf/hisilicon/hns3_pmu.c1671
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c12
-rw-r--r--drivers/perf/riscv_pmu.c5
-rw-r--r--drivers/perf/riscv_pmu_legacy.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c132
-rw-r--r--drivers/phy/amlogic/Kconfig12
-rw-r--r--drivers/phy/amlogic/Makefile1
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c171
-rw-r--r--drivers/phy/broadcom/Kconfig2
-rw-r--r--drivers/phy/broadcom/phy-bcm-cygnus-pcie.c14
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-pcie.c14
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c14
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c2
-rw-r--r--drivers/phy/cadence/cdns-dphy.c101
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c1
-rw-r--r--drivers/phy/freescale/Kconfig9
-rw-r--r--drivers/phy/freescale/Makefile1
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c450
-rw-r--r--drivers/phy/mediatek/Kconfig19
-rw-r--r--drivers/phy/mediatek/Makefile2
-rw-r--r--drivers/phy/mediatek/phy-mtk-dp.c202
-rw-r--r--drivers/phy/mediatek/phy-mtk-pcie.c267
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c10
-rw-r--r--drivers/phy/qualcomm/Makefile8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c12
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c2621
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c1054
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-qhp.h123
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c2556
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v3.h17
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4.h72
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h17
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h17
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h18
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v4.h31
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h27
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v4.h34
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v5.h36
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h46
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v3.h145
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4.h135
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4_20.h15
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h17
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v3.h111
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v4.h123
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v5.h124
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h140
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h66
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v3.h68
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4.h233
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4_20.h43
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5.h231
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h60
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx.h205
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c1383
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c2765
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c6350
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h1242
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c12
-rw-r--r--drivers/phy/samsung/Makefile1
-rw-r--r--drivers/phy/samsung/phy-exynos-pcie.c25
-rw-r--r--drivers/phy/samsung/phy-exynos7-ufs.c12
-rw-r--r--drivers/phy/samsung/phy-exynosautov9-ufs.c29
-rw-r--r--drivers/phy/samsung/phy-fsd-ufs.c63
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c138
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.h34
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c4
-rw-r--r--drivers/phy/tegra/phy-tegra194-p2u.c48
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c11
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c75
-rw-r--r--drivers/phy/ti/phy-tusb1210.c5
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c14
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c21
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c14
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c10
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c14
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c10
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/devicetree.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx93.c1
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c16
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c28
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h25
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c14
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c417
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c296
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c65
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c10
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c242
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1376
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c15
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c7
-rw-r--r--drivers/pinctrl/pinctrl-at91.c10
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c14
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c64
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c228
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c15
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c5
-rw-r--r--drivers/pinctrl/pinctrl-utils.c15
-rw-r--r--drivers/pinctrl/pinctrl-utils.h15
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c11
-rw-r--r--drivers/pinctrl/qcom/Kconfig19
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c956
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c1544
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c18
-rw-r--r--drivers/pinctrl/ralink/pinctrl-ralink.c2
-rw-r--r--drivers/pinctrl/renesas/Kconfig18
-rw-r--r--drivers/pinctrl/renesas/Makefile2
-rw-r--r--drivers/pinctrl/renesas/core.c6
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779f0.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c4262
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c235
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c1119
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h9
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h8
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c3
-rw-r--r--drivers/pinctrl/sunxi/Kconfig8
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c840
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c22
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c25
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c156
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h109
-rw-r--r--drivers/platform/Kconfig5
-rw-r--r--drivers/platform/chrome/Kconfig11
-rw-r--r--drivers/platform/chrome/Makefile5
-rw-r--r--drivers/platform/chrome/cros_ec.c11
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c473
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c2753
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h8
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c93
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c196
-rw-r--r--drivers/platform/chrome/cros_kunit_util.c130
-rw-r--r--drivers/platform/chrome/cros_kunit_util.h48
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c23
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c82
-rw-r--r--drivers/platform/olpc/olpc-ec.c2
-rw-r--r--drivers/platform/surface/Kconfig58
-rw-r--r--drivers/platform/surface/Makefile2
-rw-r--r--drivers/platform/surface/aggregator/Kconfig2
-rw-r--r--drivers/platform/surface/aggregator/Makefile2
-rw-r--r--drivers/platform/surface/aggregator/bus.c151
-rw-r--r--drivers/platform/surface/aggregator/bus.h2
-rw-r--r--drivers/platform/surface/aggregator/controller.c55
-rw-r--r--drivers/platform/surface/aggregator/controller.h2
-rw-r--r--drivers/platform/surface/aggregator/core.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.h2
-rw-r--r--drivers/platform/surface/aggregator/trace.h82
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c29
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_hub.c371
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c362
-rw-r--r--drivers/platform/surface/surface_aggregator_tabletsw.c533
-rw-r--r--drivers/platform/surface/surface_dtx.c2
-rw-r--r--drivers/platform/surface/surface_gpe.c14
-rw-r--r--drivers/platform/surface/surface_hotplug.c2
-rw-r--r--drivers/platform/surface/surface_platform_profile.c2
-rw-r--r--drivers/platform/x86/Kconfig52
-rw-r--r--drivers/platform/x86/Makefile9
-rw-r--r--drivers/platform/x86/acer-wmi.c7
-rw-r--r--drivers/platform/x86/amd/Kconfig31
-rw-r--r--drivers/platform/x86/amd/Makefile10
-rw-r--r--drivers/platform/x86/amd/hsmp.c (renamed from drivers/platform/x86/amd_hsmp.c)0
-rw-r--r--drivers/platform/x86/amd/pmc.c (renamed from drivers/platform/x86/amd-pmc.c)14
-rw-r--r--drivers/platform/x86/apple-gmux.c5
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c25
-rw-r--r--drivers/platform/x86/compal-laptop.c4
-rw-r--r--drivers/platform/x86/dell/Kconfig1
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c1
-rw-r--r--drivers/platform/x86/intel/atomisp2/led.c3
-rw-r--r--drivers/platform/x86/intel/ifs/Kconfig3
-rw-r--r--drivers/platform/x86/intel/pmt/class.c23
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c18
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c39
-rw-r--r--drivers/platform/x86/intel/vsec.c130
-rw-r--r--drivers/platform/x86/intel/vsec.h11
-rw-r--r--drivers/platform/x86/mlx-platform.c491
-rw-r--r--drivers/platform/x86/p2sb.c133
-rw-r--r--drivers/platform/x86/panasonic-laptop.c28
-rw-r--r--drivers/platform/x86/pmc_atom.c19
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c70
-rw-r--r--drivers/platform/x86/simatic-ipc.c43
-rw-r--r--drivers/platform/x86/sony-laptop.c7
-rw-r--r--drivers/platform/x86/system76_acpi.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c221
-rw-r--r--drivers/platform/x86/x86-android-tablets.c17
-rw-r--r--drivers/pnp/resource.c5
-rw-r--r--drivers/power/reset/Kconfig6
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/arm-versatile-reboot.c1
-rw-r--r--drivers/power/reset/at91-reset.c184
-rw-r--r--drivers/power/reset/brcm-kona-reset.c14
-rw-r--r--drivers/power/reset/brcmstb-reboot.c14
-rw-r--r--drivers/power/reset/pwr-mlxbf.c97
-rw-r--r--drivers/power/supply/ab8500-chargalg.h4
-rw-r--r--drivers/power/supply/ab8500_btemp.c1
-rw-r--r--drivers/power/supply/ab8500_chargalg.c70
-rw-r--r--drivers/power/supply/ab8500_charger.c48
-rw-r--r--drivers/power/supply/ab8500_fg.c12
-rw-r--r--drivers/power/supply/bq24257_charger.c2
-rw-r--r--drivers/power/supply/cpcap-battery.c10
-rw-r--r--drivers/power/supply/cros_peripheral_charger.c2
-rw-r--r--drivers/power/supply/goldfish_battery.c4
-rw-r--r--drivers/power/supply/lp8788-charger.c2
-rw-r--r--drivers/power/supply/max77976_charger.c4
-rw-r--r--drivers/power/supply/olpc_battery.c5
-rw-r--r--drivers/power/supply/pm2301_charger.h492
-rw-r--r--drivers/power/supply/power_supply_core.c28
-rw-r--r--drivers/power/supply/surface_battery.c4
-rw-r--r--drivers/power/supply/surface_charger.c4
-rw-r--r--drivers/powercap/dtpm_cpu.c38
-rw-r--r--drivers/powercap/intel_rapl_common.c1
-rw-r--r--drivers/powercap/intel_rapl_msr.c2
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/ptp/ptp_dte.c14
-rw-r--r--drivers/ptp/ptp_ocp.c23
-rw-r--r--drivers/pwm/Kconfig10
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c82
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c2
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c14
-rw-r--r--drivers/pwm/pwm-bcm-kona.c14
-rw-r--r--drivers/pwm/pwm-clk.c148
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c67
-rw-r--r--drivers/pwm/pwm-mediatek.c7
-rw-r--r--drivers/pwm/pwm-sifive.c117
-rw-r--r--drivers/pwm/pwm-twl-led.c16
-rw-r--r--drivers/regulator/Kconfig28
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/core.c25
-rw-r--r--drivers/regulator/cpcap-regulator.c10
-rw-r--r--drivers/regulator/cros-ec-regulator.c36
-rw-r--r--drivers/regulator/devres.c28
-rw-r--r--drivers/regulator/isl6271a-regulator.c10
-rw-r--r--drivers/regulator/lp873x-regulator.c10
-rw-r--r--drivers/regulator/max597x-regulator.c502
-rw-r--r--drivers/regulator/max8973-regulator.c15
-rw-r--r--drivers/regulator/mp5416.c30
-rw-r--r--drivers/regulator/mt6370-regulator.c390
-rw-r--r--drivers/regulator/mt6380-regulator.c2
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/qcom_smd-regulator.c29
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c37
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c6
-rw-r--r--drivers/regulator/rt5120-regulator.c420
-rw-r--r--drivers/regulator/rt5190a-regulator.c3
-rw-r--r--drivers/regulator/scmi-regulator.c1
-rw-r--r--drivers/regulator/ti-abb-regulator.c14
-rw-r--r--drivers/regulator/tps51632-regulator.c15
-rw-r--r--drivers/regulator/tps62360-regulator.c15
-rw-r--r--drivers/regulator/tps65023-regulator.c10
-rw-r--r--drivers/regulator/tps6507x-regulator.c10
-rw-r--r--drivers/regulator/tps65086-regulator.c10
-rw-r--r--drivers/regulator/tps65217-regulator.c10
-rw-r--r--drivers/regulator/tps65218-regulator.c10
-rw-r--r--drivers/regulator/tps65912-regulator.c10
-rw-r--r--drivers/remoteproc/imx_rproc.c7
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c3
-rw-r--r--drivers/remoteproc/mtk_scp.c23
-rw-r--r--drivers/remoteproc/omap_remoteproc.c6
-rw-r--r--drivers/remoteproc/pru_rproc.c1
-rw-r--r--drivers/remoteproc/qcom_common.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c54
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c105
-rw-r--r--drivers/remoteproc/qcom_sysmon.c16
-rw-r--r--drivers/remoteproc/qcom_wcnss.c10
-rw-r--r--drivers/remoteproc/remoteproc_core.c28
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c12
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/reset/Kconfig22
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-npcm.c207
-rw-r--r--drivers/reset/reset-sunplus.c212
-rw-r--r--drivers/reset/reset-ti-sci.c10
-rw-r--r--drivers/reset/reset-ti-syscon.c10
-rw-r--r--drivers/reset/reset-tps380x.c126
-rw-r--r--drivers/rpmsg/mtk_rpmsg.c2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c10
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c9
-rw-r--r--drivers/rpmsg/rpmsg_char.c7
-rw-r--r--drivers/rpmsg/rpmsg_core.c3
-rw-r--r--drivers/rpmsg/rpmsg_internal.h4
-rw-r--r--drivers/rtc/Kconfig41
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/class.c6
-rw-r--r--drivers/rtc/dev.c8
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c5
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c5
-rw-r--r--drivers/rtc/rtc-bq32k.c5
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-core.h5
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-ds1374.c5
-rw-r--r--drivers/rtc/rtc-ds1672.c5
-rw-r--r--drivers/rtc/rtc-ds3232.c5
-rw-r--r--drivers/rtc/rtc-em3027.c5
-rw-r--r--drivers/rtc/rtc-fm3130.c5
-rw-r--r--drivers/rtc/rtc-hym8563.c5
-rw-r--r--drivers/rtc/rtc-isl12022.c5
-rw-r--r--drivers/rtc/rtc-isl1208.c10
-rw-r--r--drivers/rtc/rtc-max6900.c5
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c8
-rw-r--r--drivers/rtc/rtc-mpfs.c323
-rw-r--r--drivers/rtc/rtc-nct3018y.c553
-rw-r--r--drivers/rtc/rtc-pcf8523.c5
-rw-r--r--drivers/rtc/rtc-pcf85363.c5
-rw-r--r--drivers/rtc/rtc-pcf8563.c5
-rw-r--r--drivers/rtc/rtc-pcf8583.c5
-rw-r--r--drivers/rtc/rtc-rv3029c2.c5
-rw-r--r--drivers/rtc/rtc-rv8803.c98
-rw-r--r--drivers/rtc/rtc-rx6110.c5
-rw-r--r--drivers/rtc/rtc-rx8025.c22
-rw-r--r--drivers/rtc/rtc-rx8581.c5
-rw-r--r--drivers/rtc/rtc-s35390a.c5
-rw-r--r--drivers/rtc/rtc-sd3078.c5
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-ti-k3.c680
-rw-r--r--drivers/rtc/rtc-vr41xx.c363
-rw-r--r--drivers/rtc/rtc-x1205.c5
-rw-r--r--drivers/rtc/rtc-zynqmp.c115
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c1
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c10
-rw-r--r--drivers/s390/block/scm_blk.c4
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/char/sclp_early.c4
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/uvdevice.c5
-rw-r--r--drivers/s390/char/zcore.c55
-rw-r--r--drivers/s390/cio/airq.c12
-rw-r--r--drivers/s390/cio/qdio_thinint.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c205
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h12
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c58
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c99
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c114
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h13
-rw-r--r--drivers/s390/crypto/ap_bus.c45
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/ap_queue.c2
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c124
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c1544
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h54
-rw-r--r--drivers/s390/net/ism_drv.c15
-rw-r--r--drivers/s390/net/qeth_core_main.c170
-rw-r--r--drivers/s390/net/qeth_ethtool.c12
-rw-r--r--drivers/s390/scsi/zfcp_diag.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c29
-rw-r--r--drivers/s390/scsi/zfcp_fc.h6
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c9
-rw-r--r--drivers/scsi/BusLogic.c35
-rw-r--r--drivers/scsi/FlashPoint.c4
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c63
-rw-r--r--drivers/scsi/a3000.c53
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c21
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h441
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h136
-rw-r--r--drivers/scsi/dpt/dptsig.h336
-rw-r--r--drivers/scsi/dpt/osd_defs.h79
-rw-r--r--drivers/scsi/dpt/osd_util.h358
-rw-r--r--drivers/scsi/dpt/sys_info.h417
-rw-r--r--drivers/scsi/dpt_i2o.c3545
-rw-r--r--drivers/scsi/dpti.h331
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/fnic/cq_desc.h14
-rw-r--r--drivers/scsi/fnic/cq_enet_desc.h14
-rw-r--r--drivers/scsi/fnic/cq_exch_desc.h14
-rw-r--r--drivers/scsi/fnic/fcpio.h14
-rw-r--r--drivers/scsi/fnic/fnic.h14
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c14
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c18
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c14
-rw-r--r--drivers/scsi/fnic/fnic_fip.h14
-rw-r--r--drivers/scsi/fnic/fnic_io.h14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c15
-rw-r--r--drivers/scsi/fnic/fnic_main.c60
-rw-r--r--drivers/scsi/fnic/fnic_res.c14
-rw-r--r--drivers/scsi/fnic/fnic_res.h14
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c28
-rw-r--r--drivers/scsi/fnic/fnic_stats.h18
-rw-r--r--drivers/scsi/fnic/fnic_trace.c18
-rw-r--r--drivers/scsi/fnic/fnic_trace.h18
-rw-r--r--drivers/scsi/fnic/rq_enet_desc.h14
-rw-r--r--drivers/scsi/fnic/vnic_cq.c14
-rw-r--r--drivers/scsi/fnic/vnic_cq.h14
-rw-r--r--drivers/scsi/fnic/vnic_cq_copy.h14
-rw-r--r--drivers/scsi/fnic/vnic_dev.c14
-rw-r--r--drivers/scsi/fnic/vnic_dev.h14
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h14
-rw-r--r--drivers/scsi/fnic/vnic_intr.c14
-rw-r--r--drivers/scsi/fnic/vnic_intr.h14
-rw-r--r--drivers/scsi/fnic/vnic_nic.h14
-rw-r--r--drivers/scsi/fnic/vnic_resource.h14
-rw-r--r--drivers/scsi/fnic/vnic_rq.c15
-rw-r--r--drivers/scsi/fnic/vnic_rq.h14
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h14
-rw-r--r--drivers/scsi/fnic/vnic_stats.h14
-rw-r--r--drivers/scsi/fnic/vnic_wq.c14
-rw-r--r--drivers/scsi/fnic/vnic_wq.h14
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.c15
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.h14
-rw-r--r--drivers/scsi/fnic/wq_enet_desc.h14
-rw-r--r--drivers/scsi/gvp11.c95
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c49
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c16
-rw-r--r--drivers/scsi/hosts.c41
-rw-r--r--drivers/scsi/iscsi_tcp.c74
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/libiscsi.c313
-rw-r--r--drivers/scsi/libiscsi_tcp.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c67
-rw-r--r--drivers/scsi/libsas/sas_init.c4
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/lpfc/lpfc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c324
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mesh.c7
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h73
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c67
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c307
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c26
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c75
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c10
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c52
-rw-r--r--drivers/scsi/qedi/qedi_main.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h36
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c585
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h106
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c131
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c138
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c103
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_error.c26
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c44
-rw-r--r--drivers/scsi/scsi_priv.h4
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/scsi_sysfs.c31
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c98
-rw-r--r--drivers/scsi/scsi_transport_sas.c6
-rw-r--r--drivers/scsi/sd.c101
-rw-r--r--drivers/scsi/sd.h5
-rw-r--r--drivers/scsi/sd_zbc.c12
-rw-r--r--drivers/scsi/sg.c53
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h27
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c405
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c11
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h4
-rw-r--r--drivers/scsi/snic/cq_desc.h18
-rw-r--r--drivers/scsi/snic/cq_enet_desc.h18
-rw-r--r--drivers/scsi/snic/snic.h18
-rw-r--r--drivers/scsi/snic/snic_attrs.c18
-rw-r--r--drivers/scsi/snic/snic_ctl.c18
-rw-r--r--drivers/scsi/snic/snic_debugfs.c18
-rw-r--r--drivers/scsi/snic/snic_disc.c18
-rw-r--r--drivers/scsi/snic/snic_disc.h18
-rw-r--r--drivers/scsi/snic/snic_fwint.h20
-rw-r--r--drivers/scsi/snic/snic_io.c18
-rw-r--r--drivers/scsi/snic/snic_io.h18
-rw-r--r--drivers/scsi/snic/snic_isr.c18
-rw-r--r--drivers/scsi/snic/snic_main.c18
-rw-r--r--drivers/scsi/snic/snic_res.c18
-rw-r--r--drivers/scsi/snic/snic_res.h18
-rw-r--r--drivers/scsi/snic/snic_scsi.c18
-rw-r--r--drivers/scsi/snic/snic_stats.h18
-rw-r--r--drivers/scsi/snic/snic_trc.c18
-rw-r--r--drivers/scsi/snic/snic_trc.h18
-rw-r--r--drivers/scsi/snic/vnic_cq.c18
-rw-r--r--drivers/scsi/snic/vnic_cq.h18
-rw-r--r--drivers/scsi/snic/vnic_cq_fw.h18
-rw-r--r--drivers/scsi/snic/vnic_dev.c18
-rw-r--r--drivers/scsi/snic/vnic_dev.h18
-rw-r--r--drivers/scsi/snic/vnic_devcmd.h18
-rw-r--r--drivers/scsi/snic/vnic_intr.c18
-rw-r--r--drivers/scsi/snic/vnic_intr.h18
-rw-r--r--drivers/scsi/snic/vnic_resource.h18
-rw-r--r--drivers/scsi/snic/vnic_snic.h18
-rw-r--r--drivers/scsi/snic/vnic_stats.h18
-rw-r--r--drivers/scsi/snic/vnic_wq.c18
-rw-r--r--drivers/scsi/snic/vnic_wq.h18
-rw-r--r--drivers/scsi/snic/wq_enet_desc.h18
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/sh/intc/chip.c2
-rw-r--r--drivers/slimbus/core.c6
-rw-r--r--drivers/slimbus/messaging.c4
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/amlogic/meson-mx-socinfo.c1
-rw-r--r--drivers/soc/amlogic/meson-secure-pwrc.c4
-rw-r--r--drivers/soc/bcm/bcm2835-power.c72
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c9
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c2
-rw-r--r--drivers/soc/fsl/guts.c221
-rw-r--r--drivers/soc/fujitsu/Kconfig16
-rw-r--r--drivers/soc/fujitsu/Makefile3
-rw-r--r--drivers/soc/fujitsu/a64fx-diag.c154
-rw-r--r--drivers/soc/imx/gpcv2.c8
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c9
-rw-r--r--drivers/soc/mediatek/Kconfig10
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mt6795-pm-domains.h112
-rw-r--r--drivers/soc/mediatek/mt8183-pm-domains.h1
-rw-r--r--drivers/soc/mediatek/mt8186-pm-domains.h2
-rw-r--r--drivers/soc/mediatek/mt8192-pm-domains.h2
-rw-r--r--drivers/soc/mediatek/mt8195-pm-domains.h4
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h22
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c45
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c155
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c8
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h2
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c225
-rw-r--r--drivers/soc/mediatek/mtk-svs.c2403
-rw-r--r--drivers/soc/qcom/Kconfig18
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/apr.c15
-rw-r--r--drivers/soc/qcom/cmd-db.c8
-rw-r--r--drivers/soc/qcom/icc-bwmon.c419
-rw-r--r--drivers/soc/qcom/llcc-qcom.c2
-rw-r--r--drivers/soc/qcom/mdt_loader.c4
-rw-r--r--drivers/soc/qcom/ocmem.c3
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c3
-rw-r--r--drivers/soc/qcom/qcom_aoss.c4
-rw-r--r--drivers/soc/qcom/rpmhpd.c4
-rw-r--r--drivers/soc/qcom/rpmpd.c1
-rw-r--r--drivers/soc/qcom/smd-rpm.c1
-rw-r--r--drivers/soc/qcom/smp2p.c3
-rw-r--r--drivers/soc/qcom/socinfo.c4
-rw-r--r--drivers/soc/qcom/spm.c14
-rw-r--r--drivers/soc/renesas/r8a779a0-sysc.c10
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.h4
-rw-r--r--drivers/soc/renesas/rcar-sysc.h4
-rw-r--r--drivers/soc/sunxi/Kconfig1
-rw-r--r--drivers/soc/tegra/common.c49
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/soc/ti/knav_dma.c10
-rw-r--r--drivers/soc/ti/pruss.c1
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c2
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c5
-rw-r--r--drivers/soundwire/bus.c75
-rw-r--r--drivers/soundwire/bus_type.c38
-rw-r--r--drivers/soundwire/intel.c32
-rw-r--r--drivers/soundwire/qcom.c32
-rw-r--r--drivers/soundwire/slave.c120
-rw-r--r--drivers/soundwire/stream.c53
-rw-r--r--drivers/spi/Kconfig18
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c104
-rw-r--r--drivers/spi/spi-altera-dfl.c14
-rw-r--r--drivers/spi/spi-amd.c53
-rw-r--r--drivers/spi/spi-armada-3700.c4
-rw-r--r--drivers/spi/spi-aspeed-smc.c12
-rw-r--r--drivers/spi/spi-atmel.c15
-rw-r--r--drivers/spi/spi-bcm2835.c20
-rw-r--r--drivers/spi/spi-cadence-quadspi.c19
-rw-r--r--drivers/spi/spi-cadence.c2
-rw-r--r--drivers/spi/spi-dw-core.c10
-rw-r--r--drivers/spi/spi-dw-dma.c25
-rw-r--r--drivers/spi/spi-dw-mmio.c8
-rw-r--r--drivers/spi/spi-dw.h13
-rw-r--r--drivers/spi/spi-fsi.c19
-rw-r--r--drivers/spi/spi-gxp.c325
-rw-r--r--drivers/spi/spi-intel-pci.c1
-rw-r--r--drivers/spi/spi-intel.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c129
-rw-r--r--drivers/spi/spi-microchip-core.c617
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c116
-rw-r--r--drivers/spi/spi-mpc52xx.c2
-rw-r--r--drivers/spi/spi-npcm-fiu.c28
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-rspi.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c123
-rw-r--r--drivers/spi/spi-sh.c94
-rw-r--r--drivers/spi/spi-sifive.c39
-rw-r--r--drivers/spi/spi-stm32-qspi.c18
-rw-r--r--drivers/spi/spi-synquacer.c1
-rw-r--r--drivers/spi/spi-tegra20-slink.c3
-rw-r--r--drivers/spi/spi-tegra210-quad.c33
-rw-r--r--drivers/spi/spi-ti-qspi.c75
-rw-r--r--drivers/spi/spi-topcliff-pch.c30
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c25
-rw-r--r--drivers/spi/spi.c577
-rw-r--r--drivers/spmi/spmi.c17
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c3
-rw-r--r--drivers/staging/fbtft/fbtft-core.c5
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c6
-rw-r--r--drivers/staging/greybus/audio_helper.c14
-rw-r--r--drivers/staging/greybus/fw-management.c6
-rw-r--r--drivers/staging/greybus/loopback.c2
-rw-r--r--drivers/staging/media/Kconfig12
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/Makefile3
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c20
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h2
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm.h32
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_bo.h37
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_common.h26
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_pool.h116
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h146
-rw-r--r--drivers/staging/media/atomisp/notes.txt30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_acc.c625
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_acc.h120
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c92
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h29
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c365
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h58
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c13
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c73
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c32
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h2
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c202
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c261
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c234
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c253
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_public.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h6
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h7
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c110
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c23
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c8
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c3
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c47
-rw-r--r--drivers/staging/media/av7110/av7110.c2
-rw-r--r--drivers/staging/media/hantro/hantro.h4
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c58
-rw-r--r--drivers/staging/media/hantro/hantro_g2_hevc_dec.c44
-rw-r--r--drivers/staging/media/hantro/hantro_g2_regs.h2
-rw-r--r--drivers/staging/media/hantro/hantro_g2_vp9_dec.c18
-rw-r--r--drivers/staging/media/hantro/hantro_hevc.c33
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h18
-rw-r--r--drivers/staging/media/hantro/hantro_postproc.c38
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c52
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.h3
-rw-r--r--drivers/staging/media/hantro/imx8m_vpu_hw.c80
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu_hw.c189
-rw-r--r--drivers/staging/media/hantro/sama5d4_vdec_hw.c40
-rw-r--r--drivers/staging/media/hantro/sunxi_vpu_hw.c51
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c2
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c2
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c1375
-rw-r--r--drivers/staging/media/omap4iss/iss.c6
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c41
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-vp9.c12
-rw-r--r--drivers/staging/media/stkwebcam/Kconfig (renamed from drivers/media/usb/stkwebcam/Kconfig)8
-rw-r--r--drivers/staging/media/stkwebcam/Makefile (renamed from drivers/media/usb/stkwebcam/Makefile)2
-rw-r--r--drivers/staging/media/stkwebcam/TODO12
-rw-r--r--drivers/staging/media/stkwebcam/stk-sensor.c (renamed from drivers/media/usb/stkwebcam/stk-sensor.c)0
-rw-r--r--drivers/staging/media/stkwebcam/stk-webcam.c (renamed from drivers/media/usb/stkwebcam/stk-webcam.c)0
-rw-r--r--drivers/staging/media/stkwebcam/stk-webcam.h (renamed from drivers/media/usb/stkwebcam/stk-webcam.h)0
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c54
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h7
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c37
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c180
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_vp8.c5
-rw-r--r--drivers/staging/media/tegra-video/vi.c4
-rw-r--r--drivers/staging/media/zoran/videocodec.c93
-rw-r--r--drivers/staging/media/zoran/videocodec.h15
-rw-r--r--drivers/staging/media/zoran/zoran.h14
-rw-r--r--drivers/staging/media/zoran/zr36016.c91
-rw-r--r--drivers/staging/media/zoran/zr36050.c144
-rw-r--r--drivers/staging/media/zoran/zr36060.c97
-rw-r--r--drivers/staging/octeon-usb/Kconfig11
-rw-r--r--drivers/staging/octeon-usb/Makefile2
-rw-r--r--drivers/staging/octeon-usb/TODO8
-rw-r--r--drivers/staging/octeon/ethernet-rx.c4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c7
-rw-r--r--drivers/staging/pi433/pi433_if.c2
-rw-r--r--drivers/staging/qlge/qlge_main.c42
-rw-r--r--drivers/staging/r8188eu/Makefile1
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c23
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c15
-rw-r--r--drivers/staging/r8188eu/core/rtw_efuse.c61
-rw-r--r--drivers/staging/r8188eu/core/rtw_fw.c78
-rw-r--r--drivers/staging/r8188eu/core/rtw_ieee80211.c1
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c1
-rw-r--r--drivers/staging/r8188eu/core/rtw_iol.c8
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c43
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c6
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c464
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c13
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c25
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c80
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c145
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c34
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c45
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c33
-rw-r--r--drivers/staging/r8188eu/hal/HalPhyRf_8188e.c21
-rw-r--r--drivers/staging/r8188eu/hal/HalPwrSeqCmd.c118
-rw-r--r--drivers/staging/r8188eu/hal/hal_com.c66
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c37
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_dm.c6
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c204
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c30
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_recv.c2
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c476
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c33
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPwrSeq.h13
-rw-r--r--drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h13
-rw-r--r--drivers/staging/r8188eu/include/HalPwrSeqCmd.h51
-rw-r--r--drivers/staging/r8188eu/include/basic_types.h52
-rw-r--r--drivers/staging/r8188eu/include/hal_com.h3
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h20
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h7
-rw-r--r--drivers/staging/r8188eu/include/osdep_service.h3
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h5
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h143
-rw-r--r--drivers/staging/r8188eu/include/rtw_eeprom.h10
-rw-r--r--drivers/staging/r8188eu/include/rtw_efuse.h2
-rw-r--r--drivers/staging/r8188eu/include/rtw_io.h8
-rw-r--r--drivers/staging/r8188eu/include/rtw_iol.h24
-rw-r--r--drivers/staging/r8188eu/include/rtw_led.h9
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h19
-rw-r--r--drivers/staging/r8188eu/include/usb_ops_linux.h2
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c83
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c19
-rw-r--r--drivers/staging/r8188eu/os_dep/osdep_service.c8
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c2
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c2
-rw-r--r--drivers/staging/r8188eu/os_dep/xmit_linux.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c24
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c37
-rw-r--r--drivers/staging/rtl8192u/r8192U.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c40
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c233
-rw-r--r--drivers/staging/rts5208/spi.c6
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c6
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c40
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h48
-rw-r--r--drivers/staging/vc04_services/interface/TESTING82
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c96
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c106
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h38
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c40
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c63
-rw-r--r--drivers/staging/vme_user/Kconfig27
-rw-r--r--drivers/staging/vme_user/Makefile3
-rw-r--r--drivers/staging/vme_user/vme.c (renamed from drivers/vme/vme.c)2
-rw-r--r--drivers/staging/vme_user/vme.h190
-rw-r--r--drivers/staging/vme_user/vme_bridge.h (renamed from drivers/vme/vme_bridge.h)2
-rw-r--r--drivers/staging/vme_user/vme_fake.c (renamed from drivers/vme/bridges/vme_fake.c)4
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c (renamed from drivers/vme/bridges/vme_tsi148.c)4
-rw-r--r--drivers/staging/vme_user/vme_tsi148.h (renamed from drivers/vme/bridges/vme_tsi148.h)0
-rw-r--r--drivers/staging/vme_user/vme_user.c2
-rw-r--r--drivers/staging/vt6655/baseband.c10
-rw-r--r--drivers/staging/vt6655/card.c103
-rw-r--r--drivers/staging/vt6655/channel.c2
-rw-r--r--drivers/staging/vt6655/device_main.c96
-rw-r--r--drivers/staging/vt6655/mac.c55
-rw-r--r--drivers/staging/vt6655/mac.h120
-rw-r--r--drivers/staging/vt6655/power.c25
-rw-r--r--drivers/staging/vt6655/rf.c20
-rw-r--r--drivers/staging/vt6655/rxtx.c8
-rw-r--r--drivers/staging/vt6655/srom.c2
-rw-r--r--drivers/staging/vt6655/upc.h35
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c57
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c122
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c113
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c160
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_configfs.c27
-rw-r--r--drivers/target/target_core_device.c38
-rw-r--r--drivers/target/target_core_file.c40
-rw-r--r--drivers/target/target_core_iblock.c21
-rw-r--r--drivers/target/target_core_pr.c28
-rw-r--r--drivers/target/target_core_sbc.c105
-rw-r--r--drivers/target/target_core_stat.c10
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/tee/optee/optee_smc.h2
-rw-r--r--drivers/tee/optee/smc_abi.c4
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/tee/tee_shm.c3
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/cpufreq_cooling.c96
-rw-r--r--drivers/thermal/db8500_thermal.c34
-rw-r--r--drivers/thermal/devfreq_cooling.c46
-rw-r--r--drivers/thermal/gov_fair_share.c6
-rw-r--r--drivers/thermal/gov_power_allocator.c4
-rw-r--r--drivers/thermal/gov_step_wise.c26
-rw-r--r--drivers/thermal/hisi_thermal.c10
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c8
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c2
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c13
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c5
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c5
-rw-r--r--drivers/thermal/qcom/tsens.c12
-rw-r--r--drivers/thermal/qcom/tsens.h2
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c8
-rw-r--r--drivers/thermal/rzg2l_thermal.c18
-rw-r--r--drivers/thermal/sun8i_thermal.c2
-rw-r--r--drivers/thermal/tegra/soctherm.c32
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c2
-rw-r--r--drivers/thermal/thermal_core.c79
-rw-r--r--drivers/thermal/thermal_core.h15
-rw-r--r--drivers/thermal/thermal_helpers.c13
-rw-r--r--drivers/thermal/thermal_netlink.c2
-rw-r--r--drivers/thermal/thermal_of.c201
-rw-r--r--drivers/thermal/thermal_sysfs.c32
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thunderbolt/Kconfig6
-rw-r--r--drivers/thunderbolt/acpi.c29
-rw-r--r--drivers/thunderbolt/ctl.c6
-rw-r--r--drivers/thunderbolt/ctl.h2
-rw-r--r--drivers/thunderbolt/domain.c3
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/thunderbolt/nhi.c4
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/switch.c91
-rw-r--r--drivers/thunderbolt/tb.c68
-rw-r--r--drivers/thunderbolt/tb.h56
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/test.c12
-rw-r--r--drivers/thunderbolt/tmu.c221
-rw-r--r--drivers/tty/amiserial.c20
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/tty/n_gsm.c757
-rw-r--r--drivers/tty/n_tty.c92
-rw-r--r--drivers/tty/pty.c14
-rw-r--r--drivers/tty/serial/8250/8250.h24
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c7
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c24
-rw-r--r--drivers/tty/serial/8250/8250_core.c12
-rw-r--r--drivers/tty/serial/8250/8250_dma.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c94
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c152
-rw-r--r--drivers/tty/serial/8250/8250_early.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c25
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c31
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c4
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c28
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci.c135
-rw-r--r--drivers/tty/serial/8250/8250_pericom.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c163
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig22
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl011.c38
-rw-r--r--drivers/tty/serial/ar933x_uart.c27
-rw-r--r--drivers/tty/serial/atmel_serial.c103
-rw-r--r--drivers/tty/serial/earlycon.c3
-rw-r--r--drivers/tty/serial/fsl_lpuart.c67
-rw-r--r--drivers/tty/serial/imx.c21
-rw-r--r--drivers/tty/serial/kgdboc.c2
-rw-r--r--drivers/tty/serial/max310x.c272
-rw-r--r--drivers/tty/serial/mcf.c10
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c4
-rw-r--r--drivers/tty/serial/msm_serial.c550
-rw-r--r--drivers/tty/serial/mux.c6
-rw-r--r--drivers/tty/serial/mvebu-uart.c36
-rw-r--r--drivers/tty/serial/omap-serial.c18
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pch_uart.c7
-rw-r--r--drivers/tty/serial/pic32_uart.c4
-rw-r--r--drivers/tty/serial/pmac_zilog.c1
-rw-r--r--drivers/tty/serial/pxa.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c91
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c95
-rw-r--r--drivers/tty/serial/sc16is7xx.c10
-rw-r--r--drivers/tty/serial/serial-tegra.c5
-rw-r--r--drivers/tty/serial/serial_core.c457
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c48
-rw-r--r--drivers/tty/serial/sifive.c10
-rw-r--r--drivers/tty/serial/st-asc.c1
-rw-r--r--drivers/tty/serial/stm32-usart.c81
-rw-r--r--drivers/tty/serial/stm32-usart.h68
-rw-r--r--drivers/tty/serial/sunsu.c4
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/vr41xx_siu.c934
-rw-r--r--drivers/tty/tty.h3
-rw-r--r--drivers/tty/tty_buffer.c105
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_port.c21
-rw-r--r--drivers/tty/vt/Makefile2
-rw-r--r--drivers/tty/vt/consolemap.c684
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped6
-rw-r--r--drivers/tty/vt/selection.c3
-rw-r--r--drivers/tty/vt/vt.c18
-rw-r--r--drivers/ufs/core/ufshcd-priv.h6
-rw-r--r--drivers/ufs/core/ufshcd.c159
-rw-r--r--drivers/ufs/core/ufshpb.c7
-rw-r--r--drivers/ufs/host/Kconfig12
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c182
-rw-r--r--drivers/ufs/host/ufs-exynos.h1
-rw-r--r--drivers/ufs/host/ufs-mediatek.c324
-rw-r--r--drivers/ufs/host/ufs-mediatek.h74
-rw-r--r--drivers/ufs/host/ufs-qcom.c23
-rw-r--r--drivers/ufs/host/ufs-renesas.c412
-rw-r--r--drivers/ufs/host/ufshcd-pci.c18
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c28
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h6
-rw-r--r--drivers/uio/uio_pruss.c10
-rw-r--r--drivers/usb/atm/ueagle-atm.c2
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c15
-rw-r--r--drivers/usb/chipidea/ci.h1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c23
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/otg_fsm.c2
-rw-r--r--drivers/usb/chipidea/trace.h4
-rw-r--r--drivers/usb/chipidea/udc.c14
-rw-r--r--drivers/usb/class/cdc-acm.c44
-rw-r--r--drivers/usb/class/cdc-acm.h20
-rw-r--r--drivers/usb/common/usb-conn-gpio.c17
-rw-r--r--drivers/usb/core/Makefile4
-rw-r--r--drivers/usb/core/driver.c2
-rw-r--r--drivers/usb/core/hcd.c51
-rw-r--r--drivers/usb/core/hub.c108
-rw-r--r--drivers/usb/core/hub.h4
-rw-r--r--drivers/usb/core/port.c83
-rw-r--r--drivers/usb/core/sysfs.c79
-rw-r--r--drivers/usb/core/usb-acpi.c18
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/dwc2/gadget.c3
-rw-r--r--drivers/usb/dwc2/hcd.c5
-rw-r--r--drivers/usb/dwc3/Kconfig4
-rw-r--r--drivers/usb/dwc3/core.c56
-rw-r--r--drivers/usb/dwc3/core.h3
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c8
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c144
-rw-r--r--drivers/usb/dwc3/ep0.c9
-rw-r--r--drivers/usb/dwc3/gadget.c97
-rw-r--r--drivers/usb/gadget/function/f_acm.c20
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c36
-rw-r--r--drivers/usb/gadget/function/f_uvc.c30
-rw-r--r--drivers/usb/gadget/function/storage_common.c15
-rw-r--r--drivers/usb/gadget/function/storage_common.h2
-rw-r--r--drivers/usb/gadget/function/u_ether.c1
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c6
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c23
-rw-r--r--drivers/usb/gadget/function/uvc_video.c14
-rw-r--r--drivers/usb/gadget/legacy/inode.c1
-rw-r--r--drivers/usb/gadget/udc/Kconfig19
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c4
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c1597
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c10
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c2
-rw-r--r--drivers/usb/gadget/udc/core.c11
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c8
-rw-r--r--drivers/usb/gadget/udc/trace.h8
-rw-r--r--drivers/usb/host/Kconfig10
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-fsl.c11
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c3
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/octeon-hcd.c (renamed from drivers/staging/octeon-usb/octeon-hcd.c)0
-rw-r--r--drivers/usb/host/octeon-hcd.h (renamed from drivers/staging/octeon-usb/octeon-hcd.h)0
-rw-r--r--drivers/usb/host/ohci-at91.c69
-rw-r--r--drivers/usb/host/ohci-nxp.c1
-rw-r--r--drivers/usb/host/ohci-platform.c3
-rw-r--r--drivers/usb/host/ohci-ppc-of.c1
-rw-r--r--drivers/usb/host/ohci-sa1111.c25
-rw-r--r--drivers/usb/host/ohci-sm501.c2
-rw-r--r--drivers/usb/host/uhci-grlib.c2
-rw-r--r--drivers/usb/host/uhci-hcd.h2
-rw-r--r--drivers/usb/host/xhci-mtk.c7
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c4
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c8
-rw-r--r--drivers/usb/host/xhci-trace.h4
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/image/mdc800.c2
-rw-r--r--drivers/usb/misc/Kconfig16
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/ldusb.c6
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c458
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h36
-rw-r--r--drivers/usb/misc/onboard_usb_hub_pdevs.c143
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c2
-rw-r--r--drivers/usb/misc/usbtest.c2
-rw-r--r--drivers/usb/mtu3/mtu3.h5
-rw-r--r--drivers/usb/mtu3/mtu3_core.c35
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c8
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c38
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c10
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h16
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c43
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h10
-rw-r--r--drivers/usb/musb/Kconfig13
-rw-r--r--drivers/usb/musb/Makefile1
-rw-r--r--drivers/usb/musb/mpfs.c269
-rw-r--r--drivers/usb/musb/musb_core.c16
-rw-r--r--drivers/usb/musb/musb_cppi41.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/musb/musb_trace.h4
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/phy/phy-keystone.c2
-rw-r--r--drivers/usb/renesas_usbhs/rza.c4
-rw-r--r--drivers/usb/serial/cypress_m8.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/garmin_gps.c4
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/mos7720.c2
-rw-r--r--drivers/usb/serial/opticon.c4
-rw-r--r--drivers/usb/serial/sierra.c7
-rw-r--r--drivers/usb/serial/usb-serial.c2
-rw-r--r--drivers/usb/serial/usb_wwan.c13
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/typec/Kconfig11
-rw-r--r--drivers/usb/typec/Makefile3
-rw-r--r--drivers/usb/typec/anx7411.c1601
-rw-r--r--drivers/usb/typec/class.c168
-rw-r--r--drivers/usb/typec/class.h6
-rw-r--r--drivers/usb/typec/mux.c8
-rw-r--r--drivers/usb/typec/pd.c708
-rw-r--r--drivers/usb/typec/pd.h30
-rw-r--r--drivers/usb/typec/retimer.c173
-rw-r--r--drivers/usb/typec/retimer.h15
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c3
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h209
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.c3
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6360.c3
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c146
-rw-r--r--drivers/usb/typec/ucsi/Kconfig10
-rw-r--r--drivers/usb/typec/ucsi/Makefile1
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c28
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c777
-rw-r--r--drivers/usb/usbip/vudc_rx.c6
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c14
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c14
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c144
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h11
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c173
-rw-r--r--drivers/vdpa/vdpa.c14
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c18
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h1
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c176
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c3
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c102
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c180
-rw-r--r--drivers/vfio/Makefile2
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_private.h2
-rw-r--r--drivers/vfio/pci/Kconfig11
-rw-r--r--drivers/vfio/pci/Makefile2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c11
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c14
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h4
-rw-r--r--drivers/vfio/pci/mlx5/main.c11
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c17
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c35
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h21
-rw-r--r--drivers/vfio/vfio.h17
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c14
-rw-r--r--drivers/vfio/vfio_iommu_type1.c197
-rw-r--r--drivers/vfio/vfio_main.c (renamed from drivers/vfio/vfio.c)209
-rw-r--r--drivers/vhost/scsi.c89
-rw-r--r--drivers/vhost/vdpa.c38
-rw-r--r--drivers/vhost/vringh.c78
-rw-r--r--drivers/video/backlight/lp855x_bl.c21
-rw-r--r--drivers/video/backlight/ltv350qv.c3
-rw-r--r--drivers/video/backlight/platform_lcd.c10
-rw-r--r--drivers/video/backlight/rt4831-backlight.c33
-rw-r--r--drivers/video/backlight/tps65217_bl.c10
-rw-r--r--drivers/video/console/vgacon.c12
-rw-r--r--drivers/video/fbdev/68328fb.c7
-rw-r--r--drivers/video/fbdev/amba-clcd.c24
-rw-r--r--drivers/video/fbdev/amifb.c15
-rw-r--r--drivers/video/fbdev/arkfb.c9
-rw-r--r--drivers/video/fbdev/atafb.c103
-rw-r--r--drivers/video/fbdev/cirrusfb.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c8
-rw-r--r--drivers/video/fbdev/dnfb.c2
-rw-r--r--drivers/video/fbdev/fm2fb.c4
-rw-r--r--drivers/video/fbdev/hpfb.c4
-rw-r--r--drivers/video/fbdev/i740fb.c9
-rw-r--r--drivers/video/fbdev/imxfb.c134
-rw-r--r--drivers/video/fbdev/offb.c1
-rw-r--r--drivers/video/fbdev/omap/hwa742.c3
-rw-r--r--drivers/video/fbdev/omap/omapfb.h9
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c3
-rw-r--r--drivers/video/fbdev/q40fb.c2
-rw-r--r--drivers/video/fbdev/s3fb.c2
-rw-r--r--drivers/video/fbdev/sa1100fb.c41
-rw-r--r--drivers/video/fbdev/sis/init.c4
-rw-r--r--drivers/video/fbdev/skeletonfb.c6
-rw-r--r--drivers/video/fbdev/valkyriefb.c10
-rw-r--r--drivers/video/fbdev/vt8623fb.c2
-rw-r--r--drivers/virt/acrn/ioreq.c6
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c9
-rw-r--r--drivers/virt/nitro_enclaves/Kconfig5
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.c27
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev_test.c5
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c9
-rw-r--r--drivers/virtio/Kconfig15
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c8
-rw-r--r--drivers/virtio/virtio_anchor.c18
-rw-r--r--drivers/virtio/virtio_balloon.c49
-rw-r--r--drivers/virtio/virtio_mem.c6
-rw-r--r--drivers/virtio/virtio_mmio.c5
-rw-r--r--drivers/virtio/virtio_pci_common.c12
-rw-r--r--drivers/virtio/virtio_pci_legacy.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c136
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c39
-rw-r--r--drivers/virtio/virtio_ring.c778
-rw-r--r--drivers/virtio/virtio_vdpa.c2
-rw-r--r--drivers/vme/Kconfig18
-rw-r--r--drivers/vme/Makefile8
-rw-r--r--drivers/vme/boards/Kconfig10
-rw-r--r--drivers/vme/boards/Makefile6
-rw-r--r--drivers/vme/boards/vme_vmivme7805.c106
-rw-r--r--drivers/vme/boards/vme_vmivme7805.h33
-rw-r--r--drivers/vme/bridges/Kconfig24
-rw-r--r--drivers/vme/bridges/Makefile4
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c1928
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.h579
-rw-r--r--drivers/watchdog/Kconfig9
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c2
-rw-r--r--drivers/watchdog/bcm7038_wdt.c8
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/dw_wdt.c8
-rw-r--r--drivers/watchdog/f71808e_wdt.c4
-rw-r--r--drivers/watchdog/max77620_wdt.c4
-rw-r--r--drivers/watchdog/mtk_wdt.c10
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c41
-rw-r--r--drivers/watchdog/pseries-wdt.c239
-rw-r--r--drivers/watchdog/realtek_otto_wdt.c1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c9
-rw-r--r--drivers/watchdog/sama5d4_wdt.c8
-rw-r--r--drivers/watchdog/simatic-ipc-wdt.c15
-rw-r--r--drivers/watchdog/sp5100_tco.c1
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/watchdog/st_lpc_wdt.c9
-rw-r--r--drivers/watchdog/tegra_wdt.c14
-rw-r--r--drivers/watchdog/wdat_wdt.c7
-rw-r--r--drivers/xen/Kconfig9
-rw-r--r--drivers/xen/events/events_base.c60
-rw-r--r--drivers/xen/gntdev.c6
-rw-r--r--drivers/xen/grant-dma-ops.c10
-rw-r--r--drivers/xen/manage.c2
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c4
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
4794 files changed, 426787 insertions, 117568 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b6a172d32a7d..19ee995bd0ae 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -183,8 +183,6 @@ source "drivers/iio/Kconfig"
source "drivers/ntb/Kconfig"
-source "drivers/vme/Kconfig"
-
source "drivers/pwm/Kconfig"
source "drivers/irqchip/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 9a30842b22c5..057857258bfd 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -165,7 +165,6 @@ obj-$(CONFIG_PM_DEVFREQ) += devfreq/
obj-$(CONFIG_EXTCON) += extcon/
obj-$(CONFIG_MEMORY) += memory/
obj-$(CONFIG_IIO) += iio/
-obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_POWERCAP) += powercap/
@@ -176,7 +175,7 @@ obj-$(CONFIG_USB4) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
obj-y += hwtracing/intel_th/
obj-$(CONFIG_STM) += hwtracing/stm/
-obj-$(CONFIG_ANDROID) += android/
+obj-y += android/
obj-$(CONFIG_NVMEM) += nvmem/
obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index fdc6b593f500..c4d54a5326b1 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -131,7 +131,7 @@ static void vc_refresh(struct vc_data *vc)
for (i = 0; i < WIDTH; i++) {
u16 glyph = screen_glyph(vc,
2 * (vc_x + i) + vc_y * vc->vc_size_row);
- buf[i] = inverse_translate(vc, glyph, 1);
+ buf[i] = inverse_translate(vc, glyph, true);
}
braille_write(buf);
}
diff --git a/drivers/accessibility/speakup/.gitignore b/drivers/accessibility/speakup/.gitignore
new file mode 100644
index 000000000000..ac084679fea7
--- /dev/null
+++ b/drivers/accessibility/speakup/.gitignore
@@ -0,0 +1,4 @@
+/makemapdata
+/mapdata.h
+/genmap
+/speakupmap.h
diff --git a/drivers/accessibility/speakup/Makefile b/drivers/accessibility/speakup/Makefile
index 6e4bfac8af65..ba69b0803d42 100644
--- a/drivers/accessibility/speakup/Makefile
+++ b/drivers/accessibility/speakup/Makefile
@@ -30,3 +30,31 @@ speakup-y := \
thread.o \
varhandlers.o
speakup-$(CONFIG_SPEAKUP_SERIALIO) += serialio.o
+
+
+clean-files := mapdata.h speakupmap.h
+
+
+# Generate mapdata.h from headers
+hostprogs += makemapdata
+makemapdata-objs := makemapdata.o
+
+quiet_cmd_mkmap = MKMAP $@
+ cmd_mkmap = TOPDIR=$(srctree) $(obj)/makemapdata > $@
+
+$(obj)/mapdata.h: $(obj)/makemapdata
+ $(call cmd,mkmap)
+
+
+# Generate speakupmap.h from mapdata.h
+hostprogs += genmap
+genmap-objs := genmap.o
+$(obj)/genmap.o: $(obj)/mapdata.h
+
+quiet_cmd_genmap = GENMAP $@
+ cmd_genmap = $(obj)/genmap $< > $@
+
+$(obj)/speakupmap.h: $(src)/speakupmap.map $(obj)/genmap
+ $(call cmd,genmap)
+
+$(obj)/main.o: $(obj)/speakupmap.h
diff --git a/drivers/accessibility/speakup/genmap.c b/drivers/accessibility/speakup/genmap.c
new file mode 100644
index 000000000000..0125000e00d9
--- /dev/null
+++ b/drivers/accessibility/speakup/genmap.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* genmap.c
+ * originally written by: Kirk Reiser.
+ *
+ ** Copyright (C) 2002 Kirk Reiser.
+ * Copyright (C) 2003 David Borowski.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <libgen.h>
+#include <string.h>
+#include <linux/version.h>
+#include <ctype.h>
+#include "utils.h"
+
+struct st_key_init {
+ char *name;
+ int value, shift;
+};
+
+static unsigned char key_data[MAXKEYVAL][16], *kp;
+
+#include "mapdata.h"
+
+static const char delims[] = "\t\n ";
+static char *cp;
+static int map_ver = 119; /* an arbitrary number so speakup can check */
+static int shift_table[17];
+static int max_states = 1, flags;
+/* flags reserved for later, maybe for individual console maps */
+
+static int get_shift_value(int state)
+{
+ int i;
+
+ for (i = 0; shift_table[i] != state; i++) {
+ if (shift_table[i] == -1) {
+ if (i >= 16)
+ oops("too many shift states", NULL);
+ shift_table[i] = state;
+ max_states = i+1;
+ break;
+ }
+ }
+ return i;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int value, shift_state, i, spk_val = 0, lock_val = 0;
+ int max_key_used = 0, num_keys_used = 0;
+ struct st_key *this;
+ struct st_key_init *p_init;
+ char buffer[256];
+
+ bzero(key_table, sizeof(key_table));
+ bzero(key_data, sizeof(key_data));
+
+ shift_table[0] = 0;
+ for (i = 1; i <= 16; i++)
+ shift_table[i] = -1;
+
+ if (argc < 2) {
+ fputs("usage: genmap filename\n", stderr);
+ exit(1);
+ }
+
+ for (p_init = init_key_data; p_init->name[0] != '.'; p_init++)
+ add_key(p_init->name, p_init->value, p_init->shift);
+
+ open_input(NULL, argv[1]);
+ while (fgets(buffer, sizeof(buffer), infile)) {
+ lc++;
+ value = shift_state = 0;
+
+ cp = strtok(buffer, delims);
+ if (*cp == '#')
+ continue;
+
+ while (cp) {
+ if (*cp == '=')
+ break;
+ this = find_key(cp);
+ if (this == NULL)
+ oops("unknown key/modifier", cp);
+ if (this->shift == is_shift) {
+ if (value)
+ oops("modifiers must come first", cp);
+ shift_state += this->value;
+ } else if (this->shift == is_input)
+ value = this->value;
+ else
+ oops("bad modifier or key", cp);
+ cp = strtok(0, delims);
+ }
+ if (!cp)
+ oops("no = found", NULL);
+
+ cp = strtok(0, delims);
+ if (!cp)
+ oops("no speakup function after =", NULL);
+
+ this = find_key(cp);
+ if (this == NULL || this->shift != is_spk)
+ oops("invalid speakup function", cp);
+
+ i = get_shift_value(shift_state);
+ if (key_data[value][i]) {
+ while (--cp > buffer)
+ if (!*cp)
+ *cp = ' ';
+ oops("two functions on same key combination", cp);
+ }
+ key_data[value][i] = (char)this->value;
+ if (value > max_key_used)
+ max_key_used = value;
+ }
+ fclose(infile);
+
+ this = find_key("spk_key");
+ if (this)
+ spk_val = this->value;
+
+ this = find_key("spk_lock");
+ if (this)
+ lock_val = this->value;
+
+ for (lc = 1; lc <= max_key_used; lc++) {
+ kp = key_data[lc];
+ if (!memcmp(key_data[0], kp, 16))
+ continue;
+ num_keys_used++;
+ for (i = 0; i < max_states; i++) {
+ if (kp[i] != spk_val && kp[i] != lock_val)
+ continue;
+ shift_state = shift_table[i];
+ if (shift_state&16)
+ continue;
+ shift_state = get_shift_value(shift_state+16);
+ kp[shift_state] = kp[i];
+ /* fill in so we can process the key up, as spk bit will be set */
+ }
+ }
+
+ printf("\t%d, %d, %d,\n\t", map_ver, num_keys_used, max_states);
+ for (i = 0; i < max_states; i++)
+ printf("%d, ", shift_table[i]);
+ printf("%d,", flags);
+ for (lc = 1; lc <= max_key_used; lc++) {
+ kp = key_data[lc];
+ if (!memcmp(key_data[0], kp, 16))
+ continue;
+ printf("\n\t%d,", lc);
+ for (i = 0; i < max_states; i++)
+ printf(" %d,", (unsigned int)kp[i]);
+ }
+ printf("\n\t0, %d\n", map_ver);
+
+ exit(0);
+}
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index d726537fa16c..f52265293482 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -470,7 +470,7 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
c |= 0x100;
}
- ch = inverse_translate(vc, c, 1);
+ ch = inverse_translate(vc, c, true);
*attribs = (w & 0xff00) >> 8;
}
return ch;
diff --git a/drivers/accessibility/speakup/makemapdata.c b/drivers/accessibility/speakup/makemapdata.c
new file mode 100644
index 000000000000..81db9ebf1fff
--- /dev/null
+++ b/drivers/accessibility/speakup/makemapdata.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* makemapdata.c
+ * originally written by: Kirk Reiser.
+ *
+ ** Copyright (C) 2002 Kirk Reiser.
+ * Copyright (C) 2003 David Borowski.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <libgen.h>
+#include <string.h>
+#include <linux/version.h>
+#include <ctype.h>
+#include "utils.h"
+
+static char buffer[256];
+
+static int get_define(void)
+{
+ char *c;
+
+ while (fgets(buffer, sizeof(buffer)-1, infile)) {
+ lc++;
+ if (strncmp(buffer, "#define", 7))
+ continue;
+ c = buffer + 7;
+ while (*c == ' ' || *c == '\t')
+ c++;
+ def_name = c;
+ while (*c && *c != ' ' && *c != '\t' && *c != '\n')
+ c++;
+ if (!*c || *c == '\n')
+ continue;
+ *c++ = '\0';
+ while (*c == ' ' || *c == '\t' || *c == '(')
+ c++;
+ def_val = c;
+ while (*c && *c != '\n' && *c != ')')
+ c++;
+ *c++ = '\0';
+ return 1;
+ }
+ fclose(infile);
+ infile = 0;
+ return 0;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int value, i;
+ struct st_key *this;
+ const char *dir_name;
+ char *cp;
+
+ dir_name = getenv("TOPDIR");
+ if (!dir_name)
+ dir_name = ".";
+ bzero(key_table, sizeof(key_table));
+ add_key("shift", 1, is_shift);
+ add_key("altgr", 2, is_shift);
+ add_key("ctrl", 4, is_shift);
+ add_key("alt", 8, is_shift);
+ add_key("spk", 16, is_shift);
+ add_key("double", 32, is_shift);
+
+ open_input(dir_name, "include/linux/input.h");
+ while (get_define()) {
+ if (strncmp(def_name, "KEY_", 4))
+ continue;
+ value = atoi(def_val);
+ if (value > 0 && value < MAXKEYVAL)
+ add_key(def_name, value, is_input);
+ }
+
+ open_input(dir_name, "include/uapi/linux/input-event-codes.h");
+ while (get_define()) {
+ if (strncmp(def_name, "KEY_", 4))
+ continue;
+ value = atoi(def_val);
+ if (value > 0 && value < MAXKEYVAL)
+ add_key(def_name, value, is_input);
+ }
+
+ open_input(dir_name, "drivers/accessibility/speakup/spk_priv_keyinfo.h");
+ while (get_define()) {
+ if (strlen(def_val) > 5) {
+ //if (def_val[0] == '(')
+ // def_val++;
+ cp = strchr(def_val, '+');
+ if (!cp)
+ continue;
+ if (cp[-1] == ' ')
+ cp[-1] = '\0';
+ *cp++ = '\0';
+ this = find_key(def_val);
+ while (*cp == ' ')
+ cp++;
+ if (!this || *cp < '0' || *cp > '9')
+ continue;
+ value = this->value+atoi(cp);
+ } else if (!strncmp(def_val, "0x", 2))
+ sscanf(def_val+2, "%x", &value);
+ else if (*def_val >= '0' && *def_val <= '9')
+ value = atoi(def_val);
+ else
+ continue;
+ add_key(def_name, value, is_spk);
+ }
+
+ printf("struct st_key_init init_key_data[] = {\n");
+ for (i = 0; i < HASHSIZE; i++) {
+ this = &key_table[i];
+ if (!this->name)
+ continue;
+ do {
+ printf("\t{ \"%s\", %d, %d, },\n", this->name, this->value, this->shift);
+ this = this->next;
+ } while (this);
+ }
+ printf("\t{ \".\", 0, 0 }\n};\n");
+
+ exit(0);
+}
diff --git a/drivers/accessibility/speakup/serialio.h b/drivers/accessibility/speakup/serialio.h
index 6f8f86f161bb..b4f9a1925b81 100644
--- a/drivers/accessibility/speakup/serialio.h
+++ b/drivers/accessibility/speakup/serialio.h
@@ -33,9 +33,8 @@ struct old_serial_port {
#define NUM_DISABLE_TIMEOUTS 3
/* buffer timeout in ms */
#define SPK_TIMEOUT 100
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
#define spk_serial_tx_busy() \
- ((inb(speakup_info.port_tts + UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+ (!uart_lsr_tx_empty(inb(speakup_info.port_tts + UART_LSR)))
#endif
diff --git a/drivers/accessibility/speakup/speakupmap.h b/drivers/accessibility/speakup/speakupmap.h
deleted file mode 100644
index c60d7339b89a..000000000000
--- a/drivers/accessibility/speakup/speakupmap.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
- 119, 62, 6,
- 0, 16, 20, 17, 32, 48, 0,
- 2, 0, 78, 0, 0, 0, 0,
- 3, 0, 79, 0, 0, 0, 0,
- 4, 0, 76, 0, 0, 0, 0,
- 5, 0, 77, 0, 0, 0, 0,
- 6, 0, 74, 0, 0, 0, 0,
- 7, 0, 75, 0, 0, 0, 0,
- 9, 0, 5, 46, 0, 0, 0,
- 10, 0, 4, 0, 0, 0, 0,
- 11, 0, 0, 1, 0, 0, 0,
- 12, 0, 27, 0, 33, 0, 0,
- 19, 0, 47, 0, 0, 0, 0,
- 21, 0, 29, 17, 0, 0, 0,
- 22, 0, 15, 0, 0, 0, 0,
- 23, 0, 14, 0, 0, 0, 28,
- 24, 0, 16, 0, 0, 0, 0,
- 25, 0, 30, 18, 0, 0, 0,
- 28, 0, 3, 26, 0, 0, 0,
- 35, 0, 31, 0, 0, 0, 0,
- 36, 0, 12, 0, 0, 0, 0,
- 37, 0, 11, 0, 0, 0, 22,
- 38, 0, 13, 0, 0, 0, 0,
- 39, 0, 32, 7, 0, 0, 0,
- 40, 0, 23, 0, 0, 0, 0,
- 44, 0, 44, 0, 0, 0, 0,
- 49, 0, 24, 0, 0, 0, 0,
- 50, 0, 9, 19, 6, 0, 0,
- 51, 0, 8, 0, 0, 0, 36,
- 52, 0, 10, 20, 0, 0, 0,
- 53, 0, 25, 0, 0, 0, 0,
- 55, 46, 1, 0, 0, 0, 0,
- 58, 128, 128, 0, 0, 0, 0,
- 59, 0, 45, 0, 0, 0, 0,
- 60, 0, 40, 0, 0, 0, 0,
- 61, 0, 41, 0, 0, 0, 0,
- 62, 0, 42, 0, 0, 0, 0,
- 63, 0, 34, 0, 0, 0, 0,
- 64, 0, 35, 0, 0, 0, 0,
- 65, 0, 37, 0, 0, 0, 0,
- 66, 0, 38, 0, 0, 0, 0,
- 67, 0, 66, 0, 39, 0, 0,
- 68, 0, 67, 0, 0, 0, 0,
- 71, 15, 19, 0, 0, 0, 0,
- 72, 14, 29, 0, 0, 28, 0,
- 73, 16, 17, 0, 0, 0, 0,
- 74, 27, 33, 0, 0, 0, 0,
- 75, 12, 31, 0, 0, 0, 0,
- 76, 11, 21, 0, 0, 22, 0,
- 77, 13, 32, 0, 0, 0, 0,
- 78, 23, 43, 0, 0, 0, 0,
- 79, 9, 20, 0, 0, 0, 0,
- 80, 8, 30, 0, 0, 36, 0,
- 81, 10, 18, 0, 0, 0, 0,
- 82, 128, 128, 0, 0, 0, 0,
- 83, 24, 25, 0, 0, 0, 0,
- 87, 0, 68, 0, 0, 0, 0,
- 88, 0, 69, 0, 0, 0, 0,
- 96, 3, 26, 0, 0, 0, 0,
- 98, 4, 5, 0, 0, 0, 0,
- 99, 2, 0, 0, 0, 0, 0,
- 104, 0, 6, 0, 0, 0, 0,
- 109, 0, 7, 0, 0, 0, 0,
- 125, 128, 128, 0, 0, 0, 0,
- 0, 119
diff --git a/drivers/accessibility/speakup/utils.h b/drivers/accessibility/speakup/utils.h
new file mode 100644
index 000000000000..4bf2ee8ac246
--- /dev/null
+++ b/drivers/accessibility/speakup/utils.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* utils.h
+ * originally written by: Kirk Reiser.
+ *
+ ** Copyright (C) 2002 Kirk Reiser.
+ * Copyright (C) 2003 David Borowski.
+ */
+
+#include <stdio.h>
+
+#define MAXKEYS 512
+#define MAXKEYVAL 160
+#define HASHSIZE 101
+#define is_shift -3
+#define is_spk -2
+#define is_input -1
+
+struct st_key {
+ char *name;
+ struct st_key *next;
+ int value, shift;
+};
+
+struct st_key key_table[MAXKEYS];
+struct st_key *extra_keys = key_table+HASHSIZE;
+char *def_name, *def_val;
+FILE *infile;
+int lc;
+
+char filename[256];
+
+static inline void open_input(const char *dir_name, const char *name)
+{
+ if (dir_name)
+ snprintf(filename, sizeof(filename), "%s/%s", dir_name, name);
+ else
+ snprintf(filename, sizeof(filename), "%s", name);
+ infile = fopen(filename, "r");
+ if (infile == 0) {
+ fprintf(stderr, "can't open %s\n", filename);
+ exit(1);
+ }
+ lc = 0;
+}
+
+static inline int oops(const char *msg, const char *info)
+{
+ if (info == NULL)
+ info = "";
+ fprintf(stderr, "error: file %s line %d\n", filename, lc);
+ fprintf(stderr, "%s %s\n", msg, info);
+ exit(1);
+}
+
+static inline struct st_key *hash_name(char *name)
+{
+ u_char *pn = (u_char *)name;
+ int hash = 0;
+
+ while (*pn) {
+ hash = (hash * 17) & 0xfffffff;
+ if (isupper(*pn))
+ *pn = tolower(*pn);
+ hash += (int)*pn;
+ pn++;
+ }
+ hash %= HASHSIZE;
+ return &key_table[hash];
+}
+
+static inline struct st_key *find_key(char *name)
+{
+ struct st_key *this = hash_name(name);
+
+ while (this) {
+ if (this->name && !strcmp(name, this->name))
+ return this;
+ this = this->next;
+ }
+ return this;
+}
+
+static inline struct st_key *add_key(char *name, int value, int shift)
+{
+ struct st_key *this = hash_name(name);
+
+ if (extra_keys-key_table >= MAXKEYS)
+ oops("out of key table space, enlarge MAXKEYS", NULL);
+ if (this->name != NULL) {
+ while (this->next) {
+ if (!strcmp(name, this->name))
+ oops("attempt to add duplicate key", name);
+ this = this->next;
+ }
+ this->next = extra_keys++;
+ this = this->next;
+ }
+ this->name = strdup(name);
+ this->value = value;
+ this->shift = shift;
+ return this;
+}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 1e34f846508f..7802d8846a8d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -210,7 +210,7 @@ config ACPI_TINY_POWER_BUTTON_SIGNAL
config ACPI_VIDEO
tristate "Video"
- depends on X86 && BACKLIGHT_CLASS_DEVICE
+ depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
select THERMAL
help
@@ -255,7 +255,6 @@ config ACPI_DOCK
config ACPI_CPU_FREQ_PSS
bool
- select THERMAL
config ACPI_PROCESSOR_CSTATE
def_bool y
@@ -287,6 +286,7 @@ config ACPI_PROCESSOR
depends on X86 || IA64 || ARM64 || LOONGARCH
select ACPI_PROCESSOR_IDLE
select ACPI_CPU_FREQ_PSS if X86 || IA64 || LOONGARCH
+ select THERMAL
default y
help
This driver adds support for the ACPI Processor package. It is required
@@ -572,6 +572,21 @@ source "drivers/acpi/pmic/Kconfig"
config ACPI_VIOT
bool
+config ACPI_PRMT
+ bool "Platform Runtime Mechanism Support"
+ depends on EFI && (X86_64 || ARM64)
+ default y
+ help
+ Platform Runtime Mechanism (PRM) is a firmware interface exposing a
+ set of binary executables that can be called from the AML interpreter
+ or directly from device drivers.
+
+ Say Y to enable the AML interpreter to execute the PRM code.
+
+ While this feature is optional in principle, leaving it out may
+ substantially increase computational overhead related to the
+ initialization of some server systems.
+
endif # ACPI
config X86_PM_TIMER
@@ -589,18 +604,3 @@ config X86_PM_TIMER
You should nearly always say Y here because many modern
systems require this timer.
-
-config ACPI_PRMT
- bool "Platform Runtime Mechanism Support"
- depends on EFI && X86_64
- default y
- help
- Platform Runtime Mechanism (PRM) is a firmware interface exposing a
- set of binary executables that can be called from the AML interpreter
- or directly from device drivers.
-
- Say Y to enable the AML interpreter to execute the PRM code.
-
- While this feature is optional in principle, leaving it out may
- substantially increase computational overhead related to the
- initialization of some server systems.
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b5a8d3e00a52..0002eecbf870 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -109,10 +109,9 @@ obj-$(CONFIG_ACPI_PPTT) += pptt.o
obj-$(CONFIG_ACPI_PFRUT) += pfr_update.o pfr_telemetry.o
# processor has its own "processor." module_param namespace
-processor-y := processor_driver.o
+processor-y := processor_driver.o processor_thermal.o
processor-$(CONFIG_ACPI_PROCESSOR_IDLE) += processor_idle.o
-processor-$(CONFIG_ACPI_CPU_FREQ_PSS) += processor_throttling.o \
- processor_thermal.o
+processor-$(CONFIG_ACPI_CPU_FREQ_PSS) += processor_throttling.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 48e5059d67ca..50540d4d4948 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -109,17 +109,11 @@ static void lpit_update_residency(struct lpit_residency_info *info,
if (!info->iomem_addr)
return;
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return;
-
/* Silently fail, if cpuidle attribute group is not present */
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
&dev_attr_low_power_idle_system_residency_us.attr,
"cpuidle");
} else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return;
-
/* Silently fail, if cpuidle attribute group is not present */
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
&dev_attr_low_power_idle_cpu_residency_us.attr,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index fbe0756259c5..c4d4d21391d7 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -422,6 +422,9 @@ static int register_device_clock(struct acpi_device *adev,
if (!lpss_clk_dev)
lpt_register_clock_device();
+ if (IS_ERR(lpss_clk_dev))
+ return PTR_ERR(lpss_clk_dev);
+
clk_data = platform_get_drvdata(lpss_clk_dev);
if (!clk_data)
return -ENODEV;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 43177c20ce4f..5cbe2196176d 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -73,7 +73,7 @@ module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1;
module_param(only_lcd, int, 0444);
-static bool has_backlight;
+static bool may_report_brightness_keys;
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
static DEFINE_MUTEX(video_list_lock);
@@ -1150,24 +1150,25 @@ acpi_video_get_device_type(struct acpi_video_bus *video,
return 0;
}
-static int
-acpi_video_bus_get_one_device(struct acpi_device *device,
- struct acpi_video_bus *video)
+static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
{
- unsigned long long device_id;
- int status, device_type;
- struct acpi_video_device *data;
+ struct acpi_video_bus *video = arg;
struct acpi_video_device_attrib *attribute;
+ struct acpi_video_device *data;
+ unsigned long long device_id;
+ acpi_status status;
+ int device_type;
- status =
- acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
- /* Some device omits _ADR, we skip them instead of fail */
+ status = acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
+ /* Skip devices without _ADR instead of failing. */
if (ACPI_FAILURE(status))
- return 0;
+ goto exit;
data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ dev_dbg(&device->dev, "Cannot attach\n");
return -ENOMEM;
+ }
strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
@@ -1224,13 +1225,15 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
acpi_video_device_find_cap(data);
if (data->cap._BCM && data->cap._BCL)
- has_backlight = true;
+ may_report_brightness_keys = true;
mutex_lock(&video->device_list_lock);
list_add_tail(&data->entry, &video->video_device_list);
mutex_unlock(&video->device_list_lock);
- return status;
+exit:
+ video->child_count++;
+ return 0;
}
/*
@@ -1542,9 +1545,6 @@ static int
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
{
- int status = 0;
- struct acpi_device *dev;
-
/*
* There are systems where video module known to work fine regardless
* of broken _DOD and ignoring returned value here doesn't cause
@@ -1552,16 +1552,7 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
*/
acpi_video_device_enumerate(video);
- list_for_each_entry(dev, &device->children, node) {
-
- status = acpi_video_bus_get_one_device(dev, video);
- if (status) {
- dev_err(&dev->dev, "Can't attach device\n");
- break;
- }
- video->child_count++;
- }
- return status;
+ return acpi_dev_for_each_child(device, acpi_video_bus_get_one_device, video);
}
/* acpi_video interface */
@@ -1693,6 +1684,9 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
break;
}
+ if (keycode)
+ may_report_brightness_keys = true;
+
acpi_notifier_call_chain(device, event, 0);
if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) {
@@ -2253,7 +2247,7 @@ void acpi_video_unregister(void)
if (register_count) {
acpi_bus_unregister_driver(&acpi_video_bus);
register_count = 0;
- has_backlight = false;
+ may_report_brightness_keys = false;
}
mutex_unlock(&register_count_mutex);
}
@@ -2275,7 +2269,7 @@ void acpi_video_unregister_backlight(void)
bool acpi_video_handles_brightness_key_presses(void)
{
- return has_backlight &&
+ return may_report_brightness_keys &&
(report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS);
}
EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses);
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 33b7fbbeda82..9f49272cad39 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -3,7 +3,7 @@
* apei-base.c - ACPI Platform Error Interface (APEI) supporting
* infrastructure
*
- * APEI allows to report errors (for example from the chipset) to the
+ * APEI allows to report errors (for example from the chipset) to
* the operating system. This improves NMI handling especially. In
* addition it supports error serialization and error injection.
*
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 598fd19b65fa..45973aa6e06d 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -29,16 +29,26 @@
#undef pr_fmt
#define pr_fmt(fmt) "BERT: " fmt
+
+#define ACPI_BERT_PRINT_MAX_RECORDS 5
#define ACPI_BERT_PRINT_MAX_LEN 1024
static int bert_disable;
+/*
+ * Print "all" the error records in the BERT table, but avoid huge spam to
+ * the console if the BIOS included oversize records, or too many records.
+ * Skipping some records here does not lose anything because the full
+ * data is available to user tools in:
+ * /sys/firmware/acpi/tables/data/BERT
+ */
static void __init bert_print_all(struct acpi_bert_region *region,
unsigned int region_len)
{
struct acpi_hest_generic_status *estatus =
(struct acpi_hest_generic_status *)region;
int remain = region_len;
+ int printed = 0, skipped = 0;
u32 estatus_len;
while (remain >= sizeof(struct acpi_bert_region)) {
@@ -46,24 +56,26 @@ static void __init bert_print_all(struct acpi_bert_region *region,
if (remain < estatus_len) {
pr_err(FW_BUG "Truncated status block (length: %u).\n",
estatus_len);
- return;
+ break;
}
/* No more error records. */
if (!estatus->block_status)
- return;
+ break;
if (cper_estatus_check(estatus)) {
pr_err(FW_BUG "Invalid error record.\n");
- return;
+ break;
}
- pr_info_once("Error records from previous boot:\n");
- if (region_len < ACPI_BERT_PRINT_MAX_LEN)
+ if (estatus_len < ACPI_BERT_PRINT_MAX_LEN &&
+ printed < ACPI_BERT_PRINT_MAX_RECORDS) {
+ pr_info_once("Error records from previous boot:\n");
cper_estatus_print(KERN_INFO HW_ERR, estatus);
- else
- pr_info_once("Max print length exceeded, table data is available at:\n"
- "/sys/firmware/acpi/tables/data/BERT");
+ printed++;
+ } else {
+ skipped++;
+ }
/*
* Because the boot error source is "one-time polled" type,
@@ -75,6 +87,9 @@ static void __init bert_print_all(struct acpi_bert_region *region,
estatus = (void *)estatus + estatus_len;
remain -= estatus_len;
}
+
+ if (skipped)
+ pr_info(HW_ERR "Skipped %d error records\n", skipped);
}
static int __init setup_bert_disable(char *str)
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index d4326ec12d29..6b583373c58a 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -546,6 +546,8 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
!= REGION_INTERSECTS) &&
(region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
!= REGION_INTERSECTS) &&
+ (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED)
+ != REGION_INTERSECTS) &&
!arch_is_platform_page(base_addr)))
return -EINVAL;
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index f2f8f05662de..ca2aed86b540 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -788,6 +788,294 @@ void acpi_configure_pmsi_domain(struct device *dev)
}
#ifdef CONFIG_IOMMU_API
+static void iort_rmr_free(struct device *dev,
+ struct iommu_resv_region *region)
+{
+ struct iommu_iort_rmr_data *rmr_data;
+
+ rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
+ kfree(rmr_data->sids);
+ kfree(rmr_data);
+}
+
+static struct iommu_iort_rmr_data *iort_rmr_alloc(
+ struct acpi_iort_rmr_desc *rmr_desc,
+ int prot, enum iommu_resv_type type,
+ u32 *sids, u32 num_sids)
+{
+ struct iommu_iort_rmr_data *rmr_data;
+ struct iommu_resv_region *region;
+ u32 *sids_copy;
+ u64 addr = rmr_desc->base_address, size = rmr_desc->length;
+
+ rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
+ if (!rmr_data)
+ return NULL;
+
+ /* Create a copy of SIDs array to associate with this rmr_data */
+ sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
+ if (!sids_copy) {
+ kfree(rmr_data);
+ return NULL;
+ }
+ rmr_data->sids = sids_copy;
+ rmr_data->num_sids = num_sids;
+
+ if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
+ /* PAGE align base addr and size */
+ addr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
+
+ pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
+ rmr_desc->base_address,
+ rmr_desc->base_address + rmr_desc->length - 1,
+ addr, addr + size - 1);
+ }
+
+ region = &rmr_data->rr;
+ INIT_LIST_HEAD(&region->list);
+ region->start = addr;
+ region->length = size;
+ region->prot = prot;
+ region->type = type;
+ region->free = iort_rmr_free;
+
+ return rmr_data;
+}
+
+static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
+ u32 count)
+{
+ int i, j;
+
+ for (i = 0; i < count; i++) {
+ u64 end, start = desc[i].base_address, length = desc[i].length;
+
+ if (!length) {
+ pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
+ start);
+ continue;
+ }
+
+ end = start + length - 1;
+
+ /* Check for address overlap */
+ for (j = i + 1; j < count; j++) {
+ u64 e_start = desc[j].base_address;
+ u64 e_end = e_start + desc[j].length - 1;
+
+ if (start <= e_end && end >= e_start)
+ pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
+ start, end);
+ }
+ }
+}
+
+/*
+ * Please note, we will keep the already allocated RMR reserve
+ * regions in case of a memory allocation failure.
+ */
+static void iort_get_rmrs(struct acpi_iort_node *node,
+ struct acpi_iort_node *smmu,
+ u32 *sids, u32 num_sids,
+ struct list_head *head)
+{
+ struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
+ struct acpi_iort_rmr_desc *rmr_desc;
+ int i;
+
+ rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
+ rmr->rmr_offset);
+
+ iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
+
+ for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
+ struct iommu_iort_rmr_data *rmr_data;
+ enum iommu_resv_type type;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
+ type = IOMMU_RESV_DIRECT_RELAXABLE;
+ else
+ type = IOMMU_RESV_DIRECT;
+
+ if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
+ prot |= IOMMU_PRIV;
+
+ /* Attributes 0x00 - 0x03 represents device memory */
+ if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
+ ACPI_IORT_RMR_ATTR_DEVICE_GRE)
+ prot |= IOMMU_MMIO;
+ else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
+ ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
+ prot |= IOMMU_CACHE;
+
+ rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
+ sids, num_sids);
+ if (!rmr_data)
+ return;
+
+ list_add_tail(&rmr_data->rr.list, head);
+ }
+}
+
+static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
+ u32 new_count)
+{
+ u32 *new_sids;
+ u32 total_count = count + new_count;
+ int i;
+
+ new_sids = krealloc_array(sids, count + new_count,
+ sizeof(*new_sids), GFP_KERNEL);
+ if (!new_sids)
+ return NULL;
+
+ for (i = count; i < total_count; i++)
+ new_sids[i] = id_start++;
+
+ return new_sids;
+}
+
+static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
+ u32 id_count)
+{
+ int i;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ /*
+ * Make sure the kernel has preserved the boot firmware PCIe
+ * configuration. This is required to ensure that the RMR PCIe
+ * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
+ */
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+
+ if (!host->preserve_config)
+ return false;
+ }
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ if (fwspec->ids[i] >= id_start &&
+ fwspec->ids[i] <= id_start + id_count)
+ return true;
+ }
+
+ return false;
+}
+
+static void iort_node_get_rmr_info(struct acpi_iort_node *node,
+ struct acpi_iort_node *iommu,
+ struct device *dev, struct list_head *head)
+{
+ struct acpi_iort_node *smmu = NULL;
+ struct acpi_iort_rmr *rmr;
+ struct acpi_iort_id_mapping *map;
+ u32 *sids = NULL;
+ u32 num_sids = 0;
+ int i;
+
+ if (!node->mapping_offset || !node->mapping_count) {
+ pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
+ node);
+ return;
+ }
+
+ rmr = (struct acpi_iort_rmr *)node->node_data;
+ if (!rmr->rmr_offset || !rmr->rmr_count)
+ return;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
+ node->mapping_offset);
+
+ /*
+ * Go through the ID mappings and see if we have a match for SMMU
+ * and dev(if !NULL). If found, get the sids for the Node.
+ * Please note, id_count is equal to the number of IDs in the
+ * range minus one.
+ */
+ for (i = 0; i < node->mapping_count; i++, map++) {
+ struct acpi_iort_node *parent;
+
+ if (!map->id_count)
+ continue;
+
+ parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+ map->output_reference);
+ if (parent != iommu)
+ continue;
+
+ /* If dev is valid, check RMR node corresponds to the dev SID */
+ if (dev && !iort_rmr_has_dev(dev, map->output_base,
+ map->id_count))
+ continue;
+
+ /* Retrieve SIDs associated with the Node. */
+ sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
+ map->id_count + 1);
+ if (!sids)
+ return;
+
+ num_sids += map->id_count + 1;
+ }
+
+ if (!sids)
+ return;
+
+ iort_get_rmrs(node, smmu, sids, num_sids, head);
+ kfree(sids);
+}
+
+static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
+ struct list_head *head)
+{
+ struct acpi_table_iort *iort;
+ struct acpi_iort_node *iort_node, *iort_end;
+ int i;
+
+ /* Only supports ARM DEN 0049E.d onwards */
+ if (iort_table->revision < 5)
+ return;
+
+ iort = (struct acpi_table_iort *)iort_table;
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort->node_offset);
+ iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort_table->length);
+
+ for (i = 0; i < iort->node_count; i++) {
+ if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
+ "IORT node pointer overflows, bad table!\n"))
+ return;
+
+ if (iort_node->type == ACPI_IORT_NODE_RMR)
+ iort_node_get_rmr_info(iort_node, iommu, dev, head);
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
+ iort_node->length);
+ }
+}
+
+/*
+ * Populate the RMR list associated with a given IOMMU and dev(if provided).
+ * If dev is NULL, the function populates all the RMRs associated with the
+ * given IOMMU.
+ */
+static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
+ struct device *dev,
+ struct list_head *head)
+{
+ struct acpi_iort_node *iommu;
+
+ iommu = iort_get_iort_node(iommu_fwnode);
+ if (!iommu)
+ return;
+
+ iort_find_rmrs(iommu, dev, head);
+}
+
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
struct acpi_iort_node *iommu;
@@ -806,27 +1094,22 @@ static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
return NULL;
}
-/**
- * iort_iommu_msi_get_resv_regions - Reserved region driver helper
- * @dev: Device from iommu_get_resv_regions()
- * @head: Reserved region list from iommu_get_resv_regions()
- *
- * Returns: Number of msi reserved regions on success (0 if platform
- * doesn't require the reservation or no associated msi regions),
- * appropriate error value otherwise. The ITS interrupt translation
- * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
- * are the msi reserved regions.
+/*
+ * Retrieve platform specific HW MSI reserve regions.
+ * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
+ * associated with the device are the HW MSI reserved regions.
*/
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
+static void iort_iommu_msi_get_resv_regions(struct device *dev,
+ struct list_head *head)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct acpi_iort_its_group *its;
struct acpi_iort_node *iommu_node, *its_node = NULL;
- int i, resv = 0;
+ int i;
iommu_node = iort_get_msi_resv_iommu(dev);
if (!iommu_node)
- return 0;
+ return;
/*
* Current logic to reserve ITS regions relies on HW topologies
@@ -846,7 +1129,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
}
if (!its_node)
- return 0;
+ return;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)its_node->node_data;
@@ -860,15 +1143,52 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
prot, IOMMU_RESV_MSI);
- if (region) {
+ if (region)
list_add_tail(&region->list, head);
- resv++;
- }
}
}
+}
+
+/**
+ * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
+ * @dev: Device from iommu_get_resv_regions()
+ * @head: Reserved region list from iommu_get_resv_regions()
+ */
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ iort_iommu_msi_get_resv_regions(dev, head);
+ iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
+}
+
+/**
+ * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
+ * associated StreamIDs information.
+ * @iommu_fwnode: fwnode associated with IOMMU
+ * @head: Resereved region list
+ */
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head)
+{
+ iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
+}
+EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
+
+/**
+ * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
+ * @iommu_fwnode: fwnode associated with IOMMU
+ * @head: Resereved region list
+ */
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
- return (resv == its->its_count) ? resv : -ENODEV;
+ list_for_each_entry_safe(entry, next, head, list)
+ entry->free(NULL, entry);
}
+EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
static inline bool iort_iommu_driver_enabled(u8 type)
{
@@ -1034,8 +1354,8 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
}
#else
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
-{ return 0; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
{ return -ENODEV; }
#endif
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index e2db1bdd9dd2..c0d20d997891 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -464,7 +464,6 @@ out_free:
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
struct acpi_device *adev;
- struct acpi_driver *driver;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
bool hotplug_event = false;
@@ -516,10 +515,13 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
if (!adev)
goto err;
- driver = adev->driver;
- if (driver && driver->ops.notify &&
- (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
- driver->ops.notify(adev, type);
+ if (adev->dev.driver) {
+ struct acpi_driver *driver = to_acpi_driver(adev->dev.driver);
+
+ if (driver && driver->ops.notify &&
+ (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+ driver->ops.notify(adev, type);
+ }
if (!hotplug_event) {
acpi_bus_put_acpi_device(adev);
@@ -538,8 +540,9 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
+ struct acpi_driver *acpi_drv = to_acpi_driver(device->dev.driver);
- device->driver->ops.notify(device, event);
+ acpi_drv->ops.notify(device, event);
}
static void acpi_notify_device_fixed(void *data)
@@ -1032,8 +1035,6 @@ static int acpi_device_probe(struct device *dev)
if (ret)
return ret;
- acpi_dev->driver = acpi_drv;
-
pr_debug("Driver [%s] successfully bound to device [%s]\n",
acpi_drv->name, acpi_dev->pnp.bus_id);
@@ -1043,7 +1044,6 @@ static int acpi_device_probe(struct device *dev)
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
- acpi_dev->driver = NULL;
acpi_dev->driver_data = NULL;
return ret;
}
@@ -1059,15 +1059,14 @@ static int acpi_device_probe(struct device *dev)
static void acpi_device_remove(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = acpi_dev->driver;
+ struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
+
+ if (acpi_drv->ops.notify)
+ acpi_device_remove_notify_handler(acpi_dev);
+
+ if (acpi_drv->ops.remove)
+ acpi_drv->ops.remove(acpi_dev);
- if (acpi_drv) {
- if (acpi_drv->ops.notify)
- acpi_device_remove_notify_handler(acpi_dev);
- if (acpi_drv->ops.remove)
- acpi_drv->ops.remove(acpi_dev);
- }
- acpi_dev->driver = NULL;
acpi_dev->driver_data = NULL;
put_device(dev);
@@ -1101,6 +1100,7 @@ static int acpi_dev_for_one_check(struct device *dev, void *context)
return adwc->fn(to_acpi_device(dev), adwc->data);
}
+EXPORT_SYMBOL_GPL(acpi_dev_for_each_child);
int acpi_dev_for_each_child(struct acpi_device *adev,
int (*fn)(struct acpi_device *, void *), void *data)
@@ -1113,6 +1113,18 @@ int acpi_dev_for_each_child(struct acpi_device *adev,
return device_for_each_child(&adev->dev, &adwc, acpi_dev_for_one_check);
}
+int acpi_dev_for_each_child_reverse(struct acpi_device *adev,
+ int (*fn)(struct acpi_device *, void *),
+ void *data)
+{
+ struct acpi_dev_walk_context adwc = {
+ .fn = fn,
+ .data = data,
+ };
+
+ return device_for_each_child_reverse(&adev->dev, &adwc, acpi_dev_for_one_check);
+}
+
/* --------------------------------------------------------------------------
Initialization/Cleanup
-------------------------------------------------------------------------- */
@@ -1144,6 +1156,9 @@ static int __init acpi_bus_init_irq(void)
case ACPI_IRQ_MODEL_PLATFORM:
message = "platform specific model";
break;
+ case ACPI_IRQ_MODEL_LPIC:
+ message = "LPIC";
+ break;
default:
pr_info("Unknown interrupt routing model\n");
return -ENODEV;
@@ -1399,6 +1414,7 @@ static int __init acpi_init(void)
pci_mmcfg_late_init();
acpi_iort_init();
+ acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
acpi_scan_init();
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index ccaa647ac3d4..5b7e3b9ae370 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -23,17 +23,18 @@ static const struct acpi_device_id container_device_ids[] = {
#ifdef CONFIG_ACPI_CONTAINER
-static int acpi_container_offline(struct container_dev *cdev)
+static int check_offline(struct acpi_device *adev, void *not_used)
{
- struct acpi_device *adev = ACPI_COMPANION(&cdev->dev);
- struct acpi_device *child;
+ if (acpi_scan_is_offline(adev, false))
+ return 0;
- /* Check all of the dependent devices' physical companions. */
- list_for_each_entry(child, &adev->children, node)
- if (!acpi_scan_is_offline(child, false))
- return -EBUSY;
+ return -EBUSY;
+}
- return 0;
+static int acpi_container_offline(struct container_dev *cdev)
+{
+ /* Check all of the dependent devices' physical companions. */
+ return acpi_dev_for_each_child(ACPI_COMPANION(&cdev->dev), check_offline, NULL);
}
static void acpi_container_release(struct device *dev)
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6ff1901d7d43..1e15a9f25ae9 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -618,33 +618,6 @@ static int pcc_data_alloc(int pcc_ss_id)
return 0;
}
-/* Check if CPPC revision + num_ent combination is supported */
-static bool is_cppc_supported(int revision, int num_ent)
-{
- int expected_num_ent;
-
- switch (revision) {
- case CPPC_V2_REV:
- expected_num_ent = CPPC_V2_NUM_ENT;
- break;
- case CPPC_V3_REV:
- expected_num_ent = CPPC_V3_NUM_ENT;
- break;
- default:
- pr_debug("Firmware exports unsupported CPPC revision: %d\n",
- revision);
- return false;
- }
-
- if (expected_num_ent != num_ent) {
- pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
- num_ent, expected_num_ent, revision);
- return false;
- }
-
- return true;
-}
-
/*
* An example CPC table looks like the following.
*
@@ -733,7 +706,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
cpc_obj->type, pr->id);
goto out_free;
}
- cpc_ptr->num_entries = num_ent;
/* Second entry should be revision. */
cpc_obj = &out_obj->package.elements[1];
@@ -744,10 +716,32 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
cpc_obj->type, pr->id);
goto out_free;
}
- cpc_ptr->version = cpc_rev;
- if (!is_cppc_supported(cpc_rev, num_ent))
+ if (cpc_rev < CPPC_V2_REV) {
+ pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
+ pr->id);
goto out_free;
+ }
+
+ /*
+ * Disregard _CPC if the number of entries in the return pachage is not
+ * as expected, but support future revisions being proper supersets of
+ * the v3 and only causing more entries to be returned by _CPC.
+ */
+ if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
+ (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
+ (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
+ pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
+ num_ent, pr->id);
+ goto out_free;
+ }
+ if (cpc_rev > CPPC_V3_REV) {
+ num_ent = CPPC_V3_NUM_ENT;
+ cpc_rev = CPPC_V3_REV;
+ }
+
+ cpc_ptr->num_entries = num_ent;
+ cpc_ptr->version = cpc_rev;
/* Iterate through remaining entries in _CPC */
for (i = 2; i < num_ent; i++) {
@@ -782,7 +776,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
- goto out_free;
+ if (!cpc_supported_by_cpu())
+ goto out_free;
}
addr = ioremap(gas_t->address, gas_t->bit_width/8);
@@ -809,7 +804,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
}
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
- goto out_free;
+ if (!cpc_supported_by_cpu())
+ goto out_free;
}
} else {
if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 130b5f4a50a3..9dce1245689c 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -369,6 +369,28 @@ int acpi_device_fix_up_power(struct acpi_device *device)
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
+static int fix_up_power_if_applicable(struct acpi_device *adev, void *not_used)
+{
+ if (adev->status.present && adev->status.enabled)
+ acpi_device_fix_up_power(adev);
+
+ return 0;
+}
+
+/**
+ * acpi_device_fix_up_power_extended - Force device and its children into D0.
+ * @adev: Parent device object whose power state is to be fixed up.
+ *
+ * Call acpi_device_fix_up_power() for @adev and its children so long as they
+ * are reported as present and enabled.
+ */
+void acpi_device_fix_up_power_extended(struct acpi_device *adev)
+{
+ acpi_device_fix_up_power(adev);
+ acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
+
int acpi_device_update_power(struct acpi_device *device, int *state_p)
{
int state;
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index d5d6403ba07b..120873dad2cc 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -376,7 +376,7 @@ eject_store(struct device *d, struct device_attribute *attr,
return -EINVAL;
if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
- && !acpi_device->driver)
+ && !d->driver)
return -ENODEV;
status = acpi_get_type(acpi_device->handle, &not_used);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a1b871a418f8..c95e535035a0 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -180,7 +180,6 @@ static struct workqueue_struct *ec_wq;
static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
-static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
@@ -1407,24 +1406,16 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
if (ec->data_addr == 0 || ec->command_addr == 0)
return AE_OK;
- if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
- /*
- * Always inherit the GPE number setting from the ECDT
- * EC.
- */
- ec->gpe = boot_ec->gpe;
- } else {
- /* Get GPE bit assignment (EC events). */
- /* TODO: Add support for _GPE returning a package */
- status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
- if (ACPI_SUCCESS(status))
- ec->gpe = tmp;
+ /* Get GPE bit assignment (EC events). */
+ /* TODO: Add support for _GPE returning a package */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status))
+ ec->gpe = tmp;
+ /*
+ * Errors are non-fatal, allowing for ACPI Reduced Hardware
+ * platforms which use GpioInt instead of GPE.
+ */
- /*
- * Errors are non-fatal, allowing for ACPI Reduced Hardware
- * platforms which use GpioInt instead of GPE.
- */
- }
/* Use the global lock for all EC transactions? */
tmp = 0;
acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
@@ -1626,15 +1617,18 @@ static int acpi_ec_add(struct acpi_device *device)
}
if (boot_ec && ec->command_addr == boot_ec->command_addr &&
- ec->data_addr == boot_ec->data_addr &&
- !EC_FLAGS_TRUST_DSDT_GPE) {
+ ec->data_addr == boot_ec->data_addr) {
/*
- * Trust PNP0C09 namespace location rather than
- * ECDT ID. But trust ECDT GPE rather than _GPE
- * because of ASUS quirks, so do not change
- * boot_ec->gpe to ec->gpe.
+ * Trust PNP0C09 namespace location rather than ECDT ID.
+ * But trust ECDT GPE rather than _GPE because of ASUS
+ * quirks. So do not change boot_ec->gpe to ec->gpe,
+ * except when the TRUST_DSDT_GPE quirk is set.
*/
boot_ec->handle = ec->handle;
+
+ if (EC_FLAGS_TRUST_DSDT_GPE)
+ boot_ec->gpe = ec->gpe;
+
acpi_handle_debug(ec->handle, "duplicated.\n");
acpi_ec_free(ec);
ec = boot_ec;
@@ -1862,68 +1856,40 @@ static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
return 0;
}
-/*
- * Some DSDTs contain wrong GPE setting.
- * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
- * https://bugzilla.kernel.org/show_bug.cgi?id=195651
- */
-static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
-{
- pr_debug("Detected system needing ignore DSDT GPE setting.\n");
- EC_FLAGS_IGNORE_DSDT_GPE = 1;
- return 0;
-}
-
static const struct dmi_system_id ec_dmi_table[] __initconst = {
{
- ec_correct_ecdt, "MSI MS-171F", {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUS FX502VD", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUS FX502VE", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUS GL702VMK", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUS X550VXK", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUS X580VD", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
+ /*
+ * MSI MS-171F
+ * https://bugzilla.kernel.org/show_bug.cgi?id=12461
+ */
+ .callback = ec_correct_ecdt,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),
+ },
+ },
{
- /* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */
- ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL},
+ /*
+ * HP Pavilion Gaming Laptop 15-cx0xxx
+ * https://bugzilla.kernel.org/show_bug.cgi?id=209989
+ */
+ .callback = ec_honor_dsdt_gpe,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),
+ },
+ },
{
- ec_clear_on_resume, "Samsung hardware", {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
- {},
+ /*
+ * Samsung hardware
+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
+ */
+ .callback = ec_clear_on_resume,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ },
+ },
+ {}
};
void __init acpi_ec_ecdt_probe(void)
@@ -2201,28 +2167,18 @@ static int acpi_ec_init_workqueues(void)
static const struct dmi_system_id acpi_ec_no_wakeup[] = {
{
- .ident = "Thinkpad X1 Carbon 6th",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
},
},
{
- .ident = "ThinkPad X1 Carbon 6th",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
- },
- },
- {
- .ident = "ThinkPad X1 Yoga 3rd",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
},
},
{
- .ident = "HP ZHAN 66 Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 8d769114a048..204fe94c7e45 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -77,12 +77,22 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
#define FIND_CHILD_MIN_SCORE 1
#define FIND_CHILD_MAX_SCORE 2
+static int match_any(struct acpi_device *adev, void *not_used)
+{
+ return 1;
+}
+
+static bool acpi_dev_has_children(struct acpi_device *adev)
+{
+ return acpi_dev_for_each_child(adev, match_any, NULL) > 0;
+}
+
static int find_child_checks(struct acpi_device *adev, bool check_children)
{
unsigned long long sta;
acpi_status status;
- if (check_children && list_empty(&adev->children))
+ if (check_children && !acpi_dev_has_children(adev))
return -ENODEV;
status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
@@ -105,54 +115,97 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
return FIND_CHILD_MAX_SCORE;
}
-struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
- u64 address, bool check_children)
-{
- struct acpi_device *adev, *ret = NULL;
- int ret_score = 0;
-
- if (!parent)
- return NULL;
+struct find_child_walk_data {
+ struct acpi_device *adev;
+ u64 address;
+ int score;
+ bool check_sta;
+ bool check_children;
+};
- list_for_each_entry(adev, &parent->children, node) {
- acpi_bus_address addr = acpi_device_adr(adev);
- int score;
+static int check_one_child(struct acpi_device *adev, void *data)
+{
+ struct find_child_walk_data *wd = data;
+ int score;
- if (!adev->pnp.type.bus_address || addr != address)
- continue;
+ if (!adev->pnp.type.bus_address || acpi_device_adr(adev) != wd->address)
+ return 0;
- if (!ret) {
- /* This is the first matching object. Save it. */
- ret = adev;
- continue;
- }
+ if (!wd->adev) {
/*
- * There is more than one matching device object with the same
- * _ADR value. That really is unexpected, so we are kind of
- * beyond the scope of the spec here. We have to choose which
- * one to return, though.
- *
- * First, check if the previously found object is good enough
- * and return it if so. Second, do the same for the object that
- * we've just found.
+ * This is the first matching object, so save it. If it is not
+ * necessary to look for any other matching objects, stop the
+ * search.
*/
- if (!ret_score) {
- ret_score = find_child_checks(ret, check_children);
- if (ret_score == FIND_CHILD_MAX_SCORE)
- return ret;
- }
- score = find_child_checks(adev, check_children);
- if (score == FIND_CHILD_MAX_SCORE) {
- return adev;
- } else if (score > ret_score) {
- ret = adev;
- ret_score = score;
- }
+ wd->adev = adev;
+ return !(wd->check_sta || wd->check_children);
}
- return ret;
+
+ /*
+ * There is more than one matching device object with the same _ADR
+ * value. That really is unexpected, so we are kind of beyond the scope
+ * of the spec here. We have to choose which one to return, though.
+ *
+ * First, get the score for the previously found object and terminate
+ * the walk if it is maximum.
+ */
+ if (!wd->score) {
+ score = find_child_checks(wd->adev, wd->check_children);
+ if (score == FIND_CHILD_MAX_SCORE)
+ return 1;
+
+ wd->score = score;
+ }
+ /*
+ * Second, if the object that has just been found has a better score,
+ * replace the previously found one with it and terminate the walk if
+ * the new score is maximum.
+ */
+ score = find_child_checks(adev, wd->check_children);
+ if (score > wd->score) {
+ wd->adev = adev;
+ if (score == FIND_CHILD_MAX_SCORE)
+ return 1;
+
+ wd->score = score;
+ }
+
+ /* Continue, because there may be better matches. */
+ return 0;
+}
+
+static struct acpi_device *acpi_find_child(struct acpi_device *parent,
+ u64 address, bool check_children,
+ bool check_sta)
+{
+ struct find_child_walk_data wd = {
+ .address = address,
+ .check_children = check_children,
+ .check_sta = check_sta,
+ .adev = NULL,
+ .score = 0,
+ };
+
+ if (parent)
+ acpi_dev_for_each_child(parent, check_one_child, &wd);
+
+ return wd.adev;
+}
+
+struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
+ u64 address, bool check_children)
+{
+ return acpi_find_child(parent, address, check_children, true);
}
EXPORT_SYMBOL_GPL(acpi_find_child_device);
+struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev,
+ acpi_bus_address adr)
+{
+ return acpi_find_child(adev, adr, false, false);
+}
+EXPORT_SYMBOL_GPL(acpi_find_child_by_adr);
+
static void acpi_physnode_link_name(char *buf, unsigned int node_id)
{
if (node_id > 0)
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index c68e694fca26..dabe45eba055 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -12,7 +12,8 @@
enum acpi_irq_model_id acpi_irq_model;
-static struct fwnode_handle *acpi_gsi_domain_id;
+static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
+static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
/**
* acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
@@ -26,14 +27,18 @@ static struct fwnode_handle *acpi_gsi_domain_id;
*/
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
- struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
- DOMAIN_BUS_ANY);
+ struct irq_domain *d;
+ d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi),
+ DOMAIN_BUS_ANY);
*irq = irq_find_mapping(d, gsi);
/*
- * *irq == 0 means no mapping, that should
- * be reported as a failure
+ * *irq == 0 means no mapping, that should be reported as a
+ * failure, unless there is an arch-specific fallback handler.
*/
+ if (!*irq && acpi_gsi_to_irq_fallback)
+ *irq = acpi_gsi_to_irq_fallback(gsi);
+
return (*irq > 0) ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
@@ -53,12 +58,12 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
{
struct irq_fwspec fwspec;
- if (WARN_ON(!acpi_gsi_domain_id)) {
+ fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
+ if (WARN_ON(!fwspec.fwnode)) {
pr_warn("GSI: No registered irqchip, giving up\n");
return -EINVAL;
}
- fwspec.fwnode = acpi_gsi_domain_id;
fwspec.param[0] = gsi;
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
fwspec.param_count = 2;
@@ -73,13 +78,14 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi);
*/
void acpi_unregister_gsi(u32 gsi)
{
- struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
- DOMAIN_BUS_ANY);
+ struct irq_domain *d;
int irq;
if (WARN_ON(acpi_irq_model == ACPI_IRQ_MODEL_GIC && gsi < 16))
return;
+ d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi),
+ DOMAIN_BUS_ANY);
irq = irq_find_mapping(d, gsi);
irq_dispose_mapping(irq);
}
@@ -97,7 +103,8 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
* The referenced device fwhandle or NULL on failure
*/
static struct fwnode_handle *
-acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source)
+acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source,
+ u32 gsi)
{
struct fwnode_handle *result;
struct acpi_device *device;
@@ -105,7 +112,7 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source)
acpi_status status;
if (!source->string_length)
- return acpi_gsi_domain_id;
+ return acpi_get_gsi_domain_id(gsi);
status = acpi_get_handle(NULL, source->string_ptr, &handle);
if (WARN_ON(ACPI_FAILURE(status)))
@@ -194,7 +201,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
ctx->index -= irq->interrupt_count;
return AE_OK;
}
- fwnode = acpi_gsi_domain_id;
+ fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
irq->triggering, irq->polarity,
irq->shareable, ctx);
@@ -207,7 +214,8 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
ctx->index -= eirq->interrupt_count;
return AE_OK;
}
- fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source);
+ fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source,
+ eirq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
eirq->triggering, eirq->polarity,
eirq->shareable, ctx);
@@ -291,10 +299,20 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
* GSI interrupts
*/
void __init acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *fwnode)
+ struct fwnode_handle *(*fn)(u32))
{
acpi_irq_model = model;
- acpi_gsi_domain_id = fwnode;
+ acpi_get_gsi_domain_id = fn;
+}
+
+/**
+ * acpi_set_gsi_to_irq_fallback - Register a GSI transfer
+ * callback to fallback to arch specified implementation.
+ * @fn: arch-specific fallback handler
+ */
+void __init acpi_set_gsi_to_irq_fallback(u32 (*fn)(u32))
+{
+ acpi_gsi_to_irq_fallback = fn;
}
/**
@@ -312,8 +330,14 @@ struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
const struct irq_domain_ops *ops,
void *host_data)
{
- struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
- DOMAIN_BUS_ANY);
+ struct irq_domain *d;
+
+ /* This only works for the GIC model... */
+ if (acpi_irq_model != ACPI_IRQ_MODEL_GIC)
+ return NULL;
+
+ d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(0),
+ DOMAIN_BUS_ANY);
if (!d)
return NULL;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 58647051c948..aa1038b8aec4 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -95,7 +95,7 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource,
case ACPI_RESOURCE_TYPE_IRQ:
{
struct acpi_resource_irq *p = &resource->data.irq;
- if (!p || !p->interrupt_count) {
+ if (!p->interrupt_count) {
acpi_handle_debug(handle,
"Blank _PRS IRQ resource\n");
return AE_OK;
@@ -121,7 +121,7 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource,
{
struct acpi_resource_extended_irq *p =
&resource->data.extended_irq;
- if (!p || !p->interrupt_count) {
+ if (!p->interrupt_count) {
acpi_handle_debug(handle,
"Blank _PRS EXT IRQ resource\n");
return AE_OK;
@@ -182,7 +182,7 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource,
case ACPI_RESOURCE_TYPE_IRQ:
{
struct acpi_resource_irq *p = &resource->data.irq;
- if (!p || !p->interrupt_count) {
+ if (!p->interrupt_count) {
/*
* IRQ descriptors may have no IRQ# bits set,
* particularly those w/ _STA disabled
@@ -197,7 +197,7 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource,
{
struct acpi_resource_extended_irq *p =
&resource->data.extended_irq;
- if (!p || !p->interrupt_count) {
+ if (!p->interrupt_count) {
/*
* extended IRQ descriptors must
* return at least 1 IRQ
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 53cab975f612..860014b89b8e 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -41,6 +41,8 @@ struct mcfg_fixup {
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+#ifdef CONFIG_ARM64
+
#define AL_ECAM(table_id, rev, seg, ops) \
{ "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
@@ -169,6 +171,17 @@ static struct mcfg_fixup mcfg_quirks[] = {
ALTRA_ECAM_QUIRK(1, 13),
ALTRA_ECAM_QUIRK(1, 14),
ALTRA_ECAM_QUIRK(1, 15),
+#endif /* ARM64 */
+
+#ifdef CONFIG_LOONGARCH
+#define LOONGSON_ECAM_MCFG(table_id, seg) \
+ { "LOONGS", table_id, 1, seg, MCFG_BUS_ANY, &loongson_pci_ecam_ops }
+
+ LOONGSON_ECAM_MCFG("\0", 0),
+ LOONGSON_ECAM_MCFG("LOONGSON", 0),
+ LOONGSON_ECAM_MCFG("\0", 1),
+ LOONGSON_ECAM_MCFG("LOONGSON", 1),
+#endif /* LOONGARCH */
};
static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 701f61c01359..c91342dcbcd6 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -437,7 +437,8 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table,
pr_debug("found = %p %p\n", found_cache, cpu_node);
if (found_cache)
update_cache_properties(this_leaf, found_cache,
- cpu_node, table->revision);
+ ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table)),
+ table->revision);
index++;
}
@@ -532,21 +533,37 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
return -ENOENT;
}
+
+static struct acpi_table_header *acpi_get_pptt(void)
+{
+ static struct acpi_table_header *pptt;
+ acpi_status status;
+
+ /*
+ * PPTT will be used at runtime on every CPU hotplug in path, so we
+ * don't need to call acpi_put_table() to release the table mapping.
+ */
+ if (!pptt) {
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
+ if (ACPI_FAILURE(status))
+ acpi_pptt_warn_missing();
+ }
+
+ return pptt;
+}
+
static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
{
struct acpi_table_header *table;
- acpi_status status;
int retval;
- status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
- if (ACPI_FAILURE(status)) {
- acpi_pptt_warn_missing();
+ table = acpi_get_pptt();
+ if (!table)
return -ENOENT;
- }
+
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
pr_debug("Topology Setup ACPI CPU %d, level %d ret = %d\n",
cpu, level, retval);
- acpi_put_table(table);
return retval;
}
@@ -567,16 +584,13 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
{
struct acpi_table_header *table;
- acpi_status status;
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
struct acpi_pptt_processor *cpu_node = NULL;
int ret = -ENOENT;
- status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
- if (ACPI_FAILURE(status)) {
- acpi_pptt_warn_missing();
- return ret;
- }
+ table = acpi_get_pptt();
+ if (!table)
+ return -ENOENT;
if (table->revision >= rev)
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
@@ -584,8 +598,6 @@ static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
if (cpu_node)
ret = (cpu_node->flags & flag) != 0;
- acpi_put_table(table);
-
return ret;
}
@@ -604,18 +616,15 @@ int acpi_find_last_cache_level(unsigned int cpu)
u32 acpi_cpu_id;
struct acpi_table_header *table;
int number_of_levels = 0;
- acpi_status status;
+
+ table = acpi_get_pptt();
+ if (!table)
+ return -ENOENT;
pr_debug("Cache Setup find last level CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
- status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
- if (ACPI_FAILURE(status)) {
- acpi_pptt_warn_missing();
- } else {
- number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id);
- acpi_put_table(table);
- }
+ number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id);
pr_debug("Cache Setup find last level level=%d\n", number_of_levels);
return number_of_levels;
@@ -637,20 +646,16 @@ int acpi_find_last_cache_level(unsigned int cpu)
int cache_setup_acpi(unsigned int cpu)
{
struct acpi_table_header *table;
- acpi_status status;
- pr_debug("Cache Setup ACPI CPU %d\n", cpu);
-
- status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
- if (ACPI_FAILURE(status)) {
- acpi_pptt_warn_missing();
+ table = acpi_get_pptt();
+ if (!table)
return -ENOENT;
- }
+
+ pr_debug("Cache Setup ACPI CPU %d\n", cpu);
cache_setup_acpi_cpu(table, cpu);
- acpi_put_table(table);
- return status;
+ return 0;
}
/**
@@ -691,43 +696,6 @@ int find_acpi_cpu_topology(unsigned int cpu, int level)
}
/**
- * find_acpi_cpu_cache_topology() - Determine a unique cache topology value
- * @cpu: Kernel logical CPU number
- * @level: The cache level for which we would like a unique ID
- *
- * Determine a unique ID for each unified cache in the system
- *
- * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
- * Otherwise returns a value which represents a unique topological feature.
- */
-int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
-{
- struct acpi_table_header *table;
- struct acpi_pptt_cache *found_cache;
- acpi_status status;
- u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
- struct acpi_pptt_processor *cpu_node = NULL;
- int ret = -1;
-
- status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
- if (ACPI_FAILURE(status)) {
- acpi_pptt_warn_missing();
- return -ENOENT;
- }
-
- found_cache = acpi_find_cache_node(table, acpi_cpu_id,
- CACHE_TYPE_UNIFIED,
- level,
- &cpu_node);
- if (found_cache)
- ret = ACPI_PTR_DIFF(cpu_node, table);
-
- acpi_put_table(table);
-
- return ret;
-}
-
-/**
* find_acpi_cpu_topology_package() - Determine a unique CPU package value
* @cpu: Kernel logical CPU number
*
@@ -766,50 +734,38 @@ int find_acpi_cpu_topology_package(unsigned int cpu)
int find_acpi_cpu_topology_cluster(unsigned int cpu)
{
struct acpi_table_header *table;
- acpi_status status;
struct acpi_pptt_processor *cpu_node, *cluster_node;
u32 acpi_cpu_id;
int retval;
int is_thread;
- status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
- if (ACPI_FAILURE(status)) {
- acpi_pptt_warn_missing();
+ table = acpi_get_pptt();
+ if (!table)
return -ENOENT;
- }
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
- if (cpu_node == NULL || !cpu_node->parent) {
- retval = -ENOENT;
- goto put_table;
- }
+ if (!cpu_node || !cpu_node->parent)
+ return -ENOENT;
is_thread = cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD;
cluster_node = fetch_pptt_node(table, cpu_node->parent);
- if (cluster_node == NULL) {
- retval = -ENOENT;
- goto put_table;
- }
+ if (!cluster_node)
+ return -ENOENT;
+
if (is_thread) {
- if (!cluster_node->parent) {
- retval = -ENOENT;
- goto put_table;
- }
+ if (!cluster_node->parent)
+ return -ENOENT;
+
cluster_node = fetch_pptt_node(table, cluster_node->parent);
- if (cluster_node == NULL) {
- retval = -ENOENT;
- goto put_table;
- }
+ if (!cluster_node)
+ return -ENOENT;
}
if (cluster_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
retval = cluster_node->acpi_processor_id;
else
retval = ACPI_PTR_DIFF(cluster_node, table);
-put_table:
- acpi_put_table(table);
-
return retval;
}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 4d3a219c67f8..998101cf16e4 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -53,7 +53,7 @@ static LIST_HEAD(prm_module_list);
struct prm_handler_info {
guid_t guid;
- u64 handler_addr;
+ void *handler_addr;
u64 static_data_buffer_addr;
u64 acpi_param_buffer_addr;
@@ -148,7 +148,7 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
- th->handler_addr = efi_pa_va_lookup(handler_info->handler_address);
+ th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address);
th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 368a9edefd0c..1278969eec1f 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -139,75 +139,17 @@ static int acpi_soft_cpu_dead(unsigned int cpu)
}
#ifdef CONFIG_ACPI_CPU_FREQ_PSS
-static int acpi_pss_perf_init(struct acpi_processor *pr,
- struct acpi_device *device)
+static void acpi_pss_perf_init(struct acpi_processor *pr)
{
- int result = 0;
-
acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_get_throttling_info(pr);
if (pr->flags.throttling)
pr->flags.limit = 1;
-
- pr->cdev = thermal_cooling_device_register("Processor", device,
- &processor_cooling_ops);
- if (IS_ERR(pr->cdev)) {
- result = PTR_ERR(pr->cdev);
- return result;
- }
-
- dev_dbg(&device->dev, "registered as cooling_device%d\n",
- pr->cdev->id);
-
- result = sysfs_create_link(&device->dev.kobj,
- &pr->cdev->device.kobj,
- "thermal_cooling");
- if (result) {
- dev_err(&device->dev,
- "Failed to create sysfs link 'thermal_cooling'\n");
- goto err_thermal_unregister;
- }
-
- result = sysfs_create_link(&pr->cdev->device.kobj,
- &device->dev.kobj,
- "device");
- if (result) {
- dev_err(&pr->cdev->device,
- "Failed to create sysfs link 'device'\n");
- goto err_remove_sysfs_thermal;
- }
-
- return 0;
-
- err_remove_sysfs_thermal:
- sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
- err_thermal_unregister:
- thermal_cooling_device_unregister(pr->cdev);
-
- return result;
-}
-
-static void acpi_pss_perf_exit(struct acpi_processor *pr,
- struct acpi_device *device)
-{
- if (pr->cdev) {
- sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
- sysfs_remove_link(&pr->cdev->device.kobj, "device");
- thermal_cooling_device_unregister(pr->cdev);
- pr->cdev = NULL;
- }
}
#else
-static inline int acpi_pss_perf_init(struct acpi_processor *pr,
- struct acpi_device *device)
-{
- return 0;
-}
-
-static inline void acpi_pss_perf_exit(struct acpi_processor *pr,
- struct acpi_device *device) {}
+static inline void acpi_pss_perf_init(struct acpi_processor *pr) {}
#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
static int __acpi_processor_start(struct acpi_device *device)
@@ -229,7 +171,9 @@ static int __acpi_processor_start(struct acpi_device *device)
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr);
- result = acpi_pss_perf_init(pr, device);
+ acpi_pss_perf_init(pr);
+
+ result = acpi_processor_thermal_init(pr, device);
if (result)
goto err_power_exit;
@@ -239,7 +183,7 @@ static int __acpi_processor_start(struct acpi_device *device)
return 0;
result = -ENODEV;
- acpi_pss_perf_exit(pr, device);
+ acpi_processor_thermal_exit(pr, device);
err_power_exit:
acpi_processor_power_exit(pr);
@@ -277,10 +221,10 @@ static int acpi_processor_stop(struct device *dev)
return 0;
acpi_processor_power_exit(pr);
- acpi_pss_perf_exit(pr, device);
-
acpi_cppc_processor_exit(pr);
+ acpi_processor_thermal_exit(pr, device);
+
return 0;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 6a5572a1a80c..16a1663d02d4 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -23,6 +23,7 @@
#include <linux/minmax.h>
#include <linux/perf_event.h>
#include <acpi/processor.h>
+#include <linux/context_tracking.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
@@ -607,7 +608,7 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
* @cx: Target state context
* @index: index of target state
*/
-static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
+static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int index)
@@ -647,11 +648,11 @@ static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
raw_spin_unlock(&c3_lock);
}
- rcu_idle_enter();
+ ct_idle_enter();
acpi_idle_do_entry(cx);
- rcu_idle_exit();
+ ct_idle_exit();
/* Re-enable bus master arbitration */
if (dis_bm) {
@@ -664,7 +665,7 @@ static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
return index;
}
-static int acpi_idle_enter(struct cpuidle_device *dev,
+static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -693,7 +694,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return index;
}
-static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
+static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index d8b2dfcd59b5..db6ac540e924 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -266,3 +266,57 @@ const struct thermal_cooling_device_ops processor_cooling_ops = {
.get_cur_state = processor_get_cur_state,
.set_cur_state = processor_set_cur_state,
};
+
+int acpi_processor_thermal_init(struct acpi_processor *pr,
+ struct acpi_device *device)
+{
+ int result = 0;
+
+ pr->cdev = thermal_cooling_device_register("Processor", device,
+ &processor_cooling_ops);
+ if (IS_ERR(pr->cdev)) {
+ result = PTR_ERR(pr->cdev);
+ return result;
+ }
+
+ dev_dbg(&device->dev, "registered as cooling_device%d\n",
+ pr->cdev->id);
+
+ result = sysfs_create_link(&device->dev.kobj,
+ &pr->cdev->device.kobj,
+ "thermal_cooling");
+ if (result) {
+ dev_err(&device->dev,
+ "Failed to create sysfs link 'thermal_cooling'\n");
+ goto err_thermal_unregister;
+ }
+
+ result = sysfs_create_link(&pr->cdev->device.kobj,
+ &device->dev.kobj,
+ "device");
+ if (result) {
+ dev_err(&pr->cdev->device,
+ "Failed to create sysfs link 'device'\n");
+ goto err_remove_sysfs_thermal;
+ }
+
+ return 0;
+
+err_remove_sysfs_thermal:
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+err_thermal_unregister:
+ thermal_cooling_device_unregister(pr->cdev);
+
+ return result;
+}
+
+void acpi_processor_thermal_exit(struct acpi_processor *pr,
+ struct acpi_device *device)
+{
+ if (pr->cdev) {
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&pr->cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(pr->cdev);
+ pr->cdev = NULL;
+ }
+}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index d3173811614e..7b3ad8ed2f4e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -55,14 +55,19 @@ static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
+static const guid_t buffer_prop_guid =
+ GUID_INIT(0xedb12dd0, 0x363d, 0x4085,
+ 0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4);
+
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
- const union acpi_object *desc,
+ union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent);
-static bool acpi_extract_properties(const union acpi_object *desc,
+static bool acpi_extract_properties(acpi_handle handle,
+ union acpi_object *desc,
struct acpi_device_data *data);
-static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
+static bool acpi_nondev_subnode_extract(union acpi_object *desc,
acpi_handle handle,
const union acpi_object *link,
struct list_head *list,
@@ -81,7 +86,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(desc, &dn->data);
+ result = acpi_extract_properties(handle, desc, &dn->data);
if (handle) {
acpi_handle scope;
@@ -155,16 +160,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
return acpi_nondev_subnode_data_ok(handle, link, list, parent);
}
-static int acpi_add_nondev_subnodes(acpi_handle scope,
- const union acpi_object *links,
- struct list_head *list,
- struct fwnode_handle *parent)
+static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ union acpi_object *links,
+ struct list_head *list,
+ struct fwnode_handle *parent)
{
bool ret = false;
int i;
for (i = 0; i < links->package.count; i++) {
- const union acpi_object *link, *desc;
+ union acpi_object *link, *desc;
acpi_handle handle;
bool result;
@@ -204,7 +209,7 @@ static int acpi_add_nondev_subnodes(acpi_handle scope,
}
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
- const union acpi_object *desc,
+ union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent)
{
@@ -212,7 +217,8 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
/* Look for the ACPI data subnodes GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *guid, *links;
+ const union acpi_object *guid;
+ union acpi_object *links;
guid = &desc->package.elements[i];
links = &desc->package.elements[i + 1];
@@ -325,7 +331,7 @@ static bool acpi_is_property_guid(const guid_t *guid)
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
- const union acpi_object *properties)
+ union acpi_object *properties)
{
struct acpi_device_properties *props;
@@ -340,7 +346,141 @@ acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
return props;
}
-static bool acpi_extract_properties(const union acpi_object *desc,
+static void acpi_nondev_subnode_tag(acpi_handle handle, void *context)
+{
+}
+
+static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
+{
+ struct acpi_data_node *dn;
+
+ list_for_each_entry(dn, &data->subnodes, sibling) {
+ acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
+
+ acpi_untie_nondev_subnodes(&dn->data);
+ }
+}
+
+static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
+{
+ struct acpi_data_node *dn;
+
+ list_for_each_entry(dn, &data->subnodes, sibling) {
+ acpi_status status;
+ bool ret;
+
+ status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(dn->handle, "Can't tag data node\n");
+ return false;
+ }
+
+ ret = acpi_tie_nondev_subnodes(&dn->data);
+ if (!ret)
+ return ret;
+ }
+
+ return true;
+}
+
+static void acpi_data_add_buffer_props(acpi_handle handle,
+ struct acpi_device_data *data,
+ union acpi_object *properties)
+{
+ struct acpi_device_properties *props;
+ union acpi_object *package;
+ size_t alloc_size;
+ unsigned int i;
+ u32 *count;
+
+ if (check_mul_overflow((size_t)properties->package.count,
+ sizeof(*package) + sizeof(void *),
+ &alloc_size) ||
+ check_add_overflow(sizeof(*props) + sizeof(*package), alloc_size,
+ &alloc_size)) {
+ acpi_handle_warn(handle,
+ "can't allocate memory for %u buffer props",
+ properties->package.count);
+ return;
+ }
+
+ props = kvzalloc(alloc_size, GFP_KERNEL);
+ if (!props)
+ return;
+
+ props->guid = &buffer_prop_guid;
+ props->bufs = (void *)(props + 1);
+ props->properties = (void *)(props->bufs + properties->package.count);
+
+ /* Outer package */
+ package = props->properties;
+ package->type = ACPI_TYPE_PACKAGE;
+ package->package.elements = package + 1;
+ count = &package->package.count;
+ *count = 0;
+
+ /* Inner packages */
+ package++;
+
+ for (i = 0; i < properties->package.count; i++) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ union acpi_object *property = &properties->package.elements[i];
+ union acpi_object *prop, *obj, *buf_obj;
+ acpi_status status;
+
+ if (property->type != ACPI_TYPE_PACKAGE ||
+ property->package.count != 2) {
+ acpi_handle_warn(handle,
+ "buffer property %u has %u entries\n",
+ i, property->package.count);
+ continue;
+ }
+
+ prop = &property->package.elements[0];
+ obj = &property->package.elements[1];
+
+ if (prop->type != ACPI_TYPE_STRING ||
+ obj->type != ACPI_TYPE_STRING) {
+ acpi_handle_warn(handle,
+ "wrong object types %u and %u\n",
+ prop->type, obj->type);
+ continue;
+ }
+
+ status = acpi_evaluate_object_typed(handle, obj->string.pointer,
+ NULL, &buf,
+ ACPI_TYPE_BUFFER);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_warn(handle,
+ "can't evaluate \"%*pE\" as buffer\n",
+ obj->string.length,
+ obj->string.pointer);
+ continue;
+ }
+
+ package->type = ACPI_TYPE_PACKAGE;
+ package->package.elements = prop;
+ package->package.count = 2;
+
+ buf_obj = buf.pointer;
+
+ /* Replace the string object with a buffer object */
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = buf_obj->buffer.length;
+ obj->buffer.pointer = buf_obj->buffer.pointer;
+
+ props->bufs[i] = buf.pointer;
+ package++;
+ (*count)++;
+ }
+
+ if (*count)
+ list_add(&props->list, &data->properties);
+ else
+ kvfree(props);
+}
+
+static bool acpi_extract_properties(acpi_handle scope, union acpi_object *desc,
struct acpi_device_data *data)
{
int i;
@@ -350,7 +490,8 @@ static bool acpi_extract_properties(const union acpi_object *desc,
/* Look for the device properties GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *guid, *properties;
+ const union acpi_object *guid;
+ union acpi_object *properties;
guid = &desc->package.elements[i];
properties = &desc->package.elements[i + 1];
@@ -364,6 +505,12 @@ static bool acpi_extract_properties(const union acpi_object *desc,
properties->type != ACPI_TYPE_PACKAGE)
break;
+ if (guid_equal((guid_t *)guid->buffer.pointer,
+ &buffer_prop_guid)) {
+ acpi_data_add_buffer_props(scope, data, properties);
+ continue;
+ }
+
if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer))
continue;
@@ -410,7 +557,7 @@ void acpi_init_properties(struct acpi_device *adev)
if (ACPI_FAILURE(status))
goto out;
- if (acpi_extract_properties(buf.pointer, &adev->data)) {
+ if (acpi_extract_properties(adev->handle, buf.pointer, &adev->data)) {
adev->data.pointer = buf.pointer;
if (acpi_of)
acpi_init_of_compatible(adev);
@@ -422,6 +569,9 @@ void acpi_init_properties(struct acpi_device *adev)
if (!adev->data.pointer) {
acpi_handle_debug(adev->handle, "Invalid _DSD data, skipping\n");
ACPI_FREE(buf.pointer);
+ } else {
+ if (!acpi_tie_nondev_subnodes(&adev->data))
+ acpi_untie_nondev_subnodes(&adev->data);
}
out:
@@ -438,8 +588,14 @@ static void acpi_free_device_properties(struct list_head *list)
struct acpi_device_properties *props, *tmp;
list_for_each_entry_safe(props, tmp, list, list) {
+ u32 i;
+
list_del(&props->list);
- kfree(props);
+ /* Buffer data properties were separately allocated */
+ if (props->bufs)
+ for (i = 0; i < props->properties->package.count; i++)
+ ACPI_FREE(props->bufs[i]);
+ kvfree(props);
}
}
@@ -462,6 +618,7 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
void acpi_free_properties(struct acpi_device *adev)
{
+ acpi_untie_nondev_subnodes(&adev->data);
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
@@ -633,6 +790,58 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static int acpi_get_ref_args(struct fwnode_reference_args *args,
+ struct fwnode_handle *ref_fwnode,
+ const union acpi_object **element,
+ const union acpi_object *end, size_t num_args)
+{
+ u32 nargs = 0, i;
+
+ /*
+ * Find the referred data extension node under the
+ * referred device node.
+ */
+ for (; *element < end && (*element)->type == ACPI_TYPE_STRING;
+ (*element)++) {
+ const char *child_name = (*element)->string.pointer;
+
+ ref_fwnode = acpi_fwnode_get_named_child_node(ref_fwnode, child_name);
+ if (!ref_fwnode)
+ return -EINVAL;
+ }
+
+ /*
+ * Assume the following integer elements are all args. Stop counting on
+ * the first reference or end of the package arguments. In case of
+ * neither reference, nor integer, return an error, we can't parse it.
+ */
+ for (i = 0; (*element) + i < end && i < num_args; i++) {
+ acpi_object_type type = (*element)[i].type;
+
+ if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
+
+ if (type == ACPI_TYPE_INTEGER)
+ nargs++;
+ else
+ return -EINVAL;
+ }
+
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ return -EINVAL;
+
+ if (args) {
+ args->fwnode = ref_fwnode;
+ args->nargs = nargs;
+ for (i = 0; i < nargs; i++)
+ args->args[i] = (*element)[i].integer.value;
+ }
+
+ (*element) += nargs;
+
+ return 0;
+}
+
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
@@ -686,11 +895,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret)
return ret == -EINVAL ? -ENOENT : -EINVAL;
- /*
- * The simplest case is when the value is a single reference. Just
- * return that reference then.
- */
- if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ switch (obj->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /* Plain single reference without arguments. */
if (index)
return -ENOENT;
@@ -701,19 +908,21 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * If it is not a single reference, then it is a package of
+ * references followed by number of ints as follows:
+ *
+ * Package () { REF, INT, REF, INT, INT }
+ *
+ * The index argument is then used to determine which reference
+ * the caller wants (along with the arguments).
+ */
+ break;
+ default:
+ return -EINVAL;
}
- /*
- * If it is not a single reference, then it is a package of
- * references followed by number of ints as follows:
- *
- * Package () { REF, INT, REF, INT, INT }
- *
- * The index argument is then used to determine which reference
- * the caller wants (along with the arguments).
- */
- if (obj->type != ACPI_TYPE_PACKAGE)
- return -EINVAL;
if (index >= obj->package.count)
return -ENOENT;
@@ -721,66 +930,30 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
end = element + obj->package.count;
while (element < end) {
- u32 nargs, i;
-
- if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
- struct fwnode_handle *ref_fwnode;
-
+ switch (element->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
device = acpi_fetch_acpi_dev(element->reference.handle);
if (!device)
return -EINVAL;
- nargs = 0;
element++;
- /*
- * Find the referred data extension node under the
- * referred device node.
- */
- for (ref_fwnode = acpi_fwnode_handle(device);
- element < end && element->type == ACPI_TYPE_STRING;
- element++) {
- ref_fwnode = acpi_fwnode_get_named_child_node(
- ref_fwnode, element->string.pointer);
- if (!ref_fwnode)
- return -EINVAL;
- }
-
- /*
- * Assume the following integer elements are all args.
- * Stop counting on the first reference or end of the
- * package arguments. In case of neither reference,
- * nor integer, return an error, we can't parse it.
- */
- for (i = 0; element + i < end && i < num_args; i++) {
- int type = element[i].type;
-
- if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
- if (type == ACPI_TYPE_INTEGER)
- nargs++;
- else
- return -EINVAL;
- }
-
- if (nargs > NR_FWNODE_REFERENCE_ARGS)
- return -EINVAL;
-
- if (idx == index) {
- args->fwnode = ref_fwnode;
- args->nargs = nargs;
- for (i = 0; i < nargs; i++)
- args->args[i] = element[i].integer.value;
+ ret = acpi_get_ref_args(idx == index ? args : NULL,
+ acpi_fwnode_handle(device),
+ &element, end, num_args);
+ if (ret < 0)
+ return ret;
+ if (idx == index)
return 0;
- }
- element += nargs;
- } else if (element->type == ACPI_TYPE_INTEGER) {
+ break;
+ case ACPI_TYPE_INTEGER:
if (idx == index)
return -ENOENT;
element++;
- } else {
+ break;
+ default:
return -EINVAL;
}
@@ -852,67 +1025,37 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
return ret;
}
-static int acpi_copy_property_array_u8(const union acpi_object *items, u8 *val,
- size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U8_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u16(const union acpi_object *items,
- u16 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U16_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u32(const union acpi_object *items,
- u32 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U32_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u64(const union acpi_object *items,
- u64 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
+#define acpi_copy_property_array_uint(items, val, nval) \
+ ({ \
+ typeof(items) __items = items; \
+ typeof(val) __val = val; \
+ typeof(nval) __nval = nval; \
+ size_t i; \
+ int ret = 0; \
+ \
+ for (i = 0; i < __nval; i++) { \
+ if (__items->type == ACPI_TYPE_BUFFER) { \
+ __val[i] = __items->buffer.pointer[i]; \
+ continue; \
+ } \
+ if (__items[i].type != ACPI_TYPE_INTEGER) { \
+ ret = -EPROTO; \
+ break; \
+ } \
+ if (__items[i].integer.value > _Generic(__val, \
+ u8: U8_MAX, \
+ u16: U16_MAX, \
+ u32: U32_MAX, \
+ u64: U64_MAX, \
+ default: 0U)) { \
+ ret = -EOVERFLOW; \
+ break; \
+ } \
+ \
+ __val[i] = __items[i].integer.value; \
+ } \
+ ret; \
+ })
static int acpi_copy_property_array_string(const union acpi_object *items,
char **val, size_t nval)
@@ -954,31 +1097,54 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
+ if (ret && proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64)
+ ret = acpi_data_get_property(data, propname, ACPI_TYPE_BUFFER,
+ &obj);
if (ret)
return ret;
- if (!val)
+ if (!val) {
+ if (obj->type == ACPI_TYPE_BUFFER)
+ return obj->buffer.length;
+
return obj->package.count;
+ }
- if (proptype != DEV_PROP_STRING && nval > obj->package.count)
- return -EOVERFLOW;
+ switch (proptype) {
+ case DEV_PROP_STRING:
+ break;
+ case DEV_PROP_U8 ... DEV_PROP_U64:
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (nval > obj->buffer.length)
+ return -EOVERFLOW;
+ break;
+ }
+ fallthrough;
+ default:
+ if (nval > obj->package.count)
+ return -EOVERFLOW;
+ break;
+ }
if (nval == 0)
return -EINVAL;
- items = obj->package.elements;
+ if (obj->type != ACPI_TYPE_BUFFER)
+ items = obj->package.elements;
+ else
+ items = obj;
switch (proptype) {
case DEV_PROP_U8:
- ret = acpi_copy_property_array_u8(items, (u8 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u8 *)val, nval);
break;
case DEV_PROP_U16:
- ret = acpi_copy_property_array_u16(items, (u16 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u16 *)val, nval);
break;
case DEV_PROP_U32:
- ret = acpi_copy_property_array_u32(items, (u32 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u32 *)val, nval);
break;
case DEV_PROP_U64:
- ret = acpi_copy_property_array_u64(items, (u64 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
ret = acpi_copy_property_array_string(
@@ -1012,6 +1178,22 @@ static int acpi_node_prop_read(const struct fwnode_handle *fwnode,
propname, proptype, val, nval);
}
+static int stop_on_next(struct acpi_device *adev, void *data)
+{
+ struct acpi_device **ret_p = data;
+
+ if (!*ret_p) {
+ *ret_p = adev;
+ return 1;
+ }
+
+ /* Skip until the "previous" object is found. */
+ if (*ret_p == adev)
+ *ret_p = NULL;
+
+ return 0;
+}
+
/**
* acpi_get_next_subnode - Return the next child node handle for a fwnode
* @fwnode: Firmware node to find the next child node for.
@@ -1020,35 +1202,22 @@ static int acpi_node_prop_read(const struct fwnode_handle *fwnode,
struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
- const struct acpi_device *adev = to_acpi_device_node(fwnode);
- const struct list_head *head;
- struct list_head *next;
+ struct acpi_device *adev = to_acpi_device_node(fwnode);
if ((!child || is_acpi_device_node(child)) && adev) {
- struct acpi_device *child_adev;
+ struct acpi_device *child_adev = to_acpi_device_node(child);
- head = &adev->children;
- if (list_empty(head))
- goto nondev;
+ acpi_dev_for_each_child(adev, stop_on_next, &child_adev);
+ if (child_adev)
+ return acpi_fwnode_handle(child_adev);
- if (child) {
- adev = to_acpi_device_node(child);
- next = adev->node.next;
- if (next == head) {
- child = NULL;
- goto nondev;
- }
- child_adev = list_entry(next, struct acpi_device, node);
- } else {
- child_adev = list_first_entry(head, struct acpi_device,
- node);
- }
- return acpi_fwnode_handle(child_adev);
+ child = NULL;
}
- nondev:
if (!child || is_acpi_data_node(child)) {
const struct acpi_data_node *data = to_acpi_data_node(fwnode);
+ const struct list_head *head;
+ struct list_head *next;
struct acpi_data_node *dn;
/*
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index c2d494784425..510cdec375c4 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -416,6 +416,16 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
{
int i;
+#ifdef CONFIG_X86
+ /*
+ * IRQ override isn't needed on modern AMD Zen systems and
+ * this override breaks active low IRQs on AMD Ryzen 6000 and
+ * newer systems. Skip it.
+ */
+ if (boot_cpu_has(X86_FEATURE_ZEN))
+ return false;
+#endif
+
for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
const struct irq_override_cmp *entry = &skip_override_table[i];
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 762b61f67e6c..42cec8120f18 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -334,10 +334,9 @@ static int acpi_scan_device_check(struct acpi_device *adev)
return error;
}
-static int acpi_scan_bus_check(struct acpi_device *adev)
+static int acpi_scan_bus_check(struct acpi_device *adev, void *not_used)
{
struct acpi_scan_handler *handler = adev->handler;
- struct acpi_device *child;
int error;
acpi_bus_get_status(adev);
@@ -353,19 +352,14 @@ static int acpi_scan_bus_check(struct acpi_device *adev)
dev_warn(&adev->dev, "Namespace scan failure\n");
return error;
}
- list_for_each_entry(child, &adev->children, node) {
- error = acpi_scan_bus_check(child);
- if (error)
- return error;
- }
- return 0;
+ return acpi_dev_for_each_child(adev, acpi_scan_bus_check, NULL);
}
static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
{
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- return acpi_scan_bus_check(adev);
+ return acpi_scan_bus_check(adev, NULL);
case ACPI_NOTIFY_DEVICE_CHECK:
return acpi_scan_device_check(adev);
case ACPI_NOTIFY_EJECT_REQUEST:
@@ -471,8 +465,6 @@ static void acpi_device_del(struct acpi_device *device)
struct acpi_device_bus_id *acpi_device_bus_id;
mutex_lock(&acpi_device_lock);
- if (device->parent)
- list_del(&device->node);
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
if (!strcmp(acpi_device_bus_id->bus_id,
@@ -488,6 +480,7 @@ static void acpi_device_del(struct acpi_device *device)
}
list_del(&device->wakeup_list);
+
mutex_unlock(&acpi_device_lock);
acpi_power_add_remove_device(device, false);
@@ -680,8 +673,6 @@ static int __acpi_device_add(struct acpi_device *device,
* -------
* Link this device to its parent and siblings.
*/
- INIT_LIST_HEAD(&device->children);
- INIT_LIST_HEAD(&device->node);
INIT_LIST_HEAD(&device->wakeup_list);
INIT_LIST_HEAD(&device->physical_node_list);
INIT_LIST_HEAD(&device->del_list);
@@ -721,9 +712,6 @@ static int __acpi_device_add(struct acpi_device *device,
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
- if (device->parent)
- list_add_tail(&device->node, &device->parent->children);
-
if (device->wakeup.flags.valid)
list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
@@ -752,9 +740,6 @@ static int __acpi_device_add(struct acpi_device *device,
err:
mutex_lock(&acpi_device_lock);
- if (device->parent)
- list_del(&device->node);
-
list_del(&device->wakeup_list);
err_unlock:
@@ -1737,6 +1722,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"INT3515", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
+ {"CLSA0101", },
/*
* Some ACPI devs contain SerialBus resources even though they are not
* attached to a serial bus at all.
@@ -2187,9 +2173,8 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
return ret;
}
-static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
+static int acpi_bus_attach(struct acpi_device *device, void *first_pass)
{
- struct acpi_device *child;
bool skip = !first_pass && device->flags.visited;
acpi_handle ejd;
int ret;
@@ -2206,7 +2191,7 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
device->flags.initialized = false;
acpi_device_clear_enumerated(device);
device->flags.power_manageable = 0;
- return;
+ return 0;
}
if (device->handler)
goto ok;
@@ -2224,7 +2209,7 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
ret = acpi_scan_attach_handler(device);
if (ret < 0)
- return;
+ return 0;
device->flags.match_driver = true;
if (ret > 0 && !device->flags.enumeration_by_parent) {
@@ -2234,19 +2219,20 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
ret = device_attach(&device->dev);
if (ret < 0)
- return;
+ return 0;
if (device->pnp.type.platform_id || device->flags.enumeration_by_parent)
acpi_default_enumeration(device);
else
acpi_device_set_enumerated(device);
- ok:
- list_for_each_entry(child, &device->children, node)
- acpi_bus_attach(child, first_pass);
+ok:
+ acpi_dev_for_each_child(device, acpi_bus_attach, first_pass);
if (!skip && device->handler && device->handler->hotplug.notify_online)
device->handler->hotplug.notify_online(device);
+
+ return 0;
}
static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
@@ -2274,7 +2260,7 @@ static void acpi_scan_clear_dep_fn(struct work_struct *work)
cdw = container_of(work, struct acpi_scan_clear_dep_work, work);
acpi_scan_lock_acquire();
- acpi_bus_attach(cdw->adev, true);
+ acpi_bus_attach(cdw->adev, (void *)true);
acpi_scan_lock_release();
acpi_dev_put(cdw->adev);
@@ -2432,7 +2418,7 @@ int acpi_bus_scan(acpi_handle handle)
if (!device)
return -ENODEV;
- acpi_bus_attach(device, true);
+ acpi_bus_attach(device, (void *)true);
if (!acpi_bus_scan_second_pass)
return 0;
@@ -2446,25 +2432,17 @@ int acpi_bus_scan(acpi_handle handle)
acpi_bus_check_add_2, NULL, NULL,
(void **)&device);
- acpi_bus_attach(device, false);
+ acpi_bus_attach(device, NULL);
return 0;
}
EXPORT_SYMBOL(acpi_bus_scan);
-/**
- * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
- * @adev: Root of the ACPI namespace scope to walk.
- *
- * Must be called under acpi_scan_lock.
- */
-void acpi_bus_trim(struct acpi_device *adev)
+static int acpi_bus_trim_one(struct acpi_device *adev, void *not_used)
{
struct acpi_scan_handler *handler = adev->handler;
- struct acpi_device *child;
- list_for_each_entry_reverse(child, &adev->children, node)
- acpi_bus_trim(child);
+ acpi_dev_for_each_child_reverse(adev, acpi_bus_trim_one, NULL);
adev->flags.match_driver = false;
if (handler) {
@@ -2482,6 +2460,19 @@ void acpi_bus_trim(struct acpi_device *adev)
acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
adev->flags.initialized = false;
acpi_device_clear_enumerated(adev);
+
+ return 0;
+}
+
+/**
+ * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
+ * @adev: Root of the ACPI namespace scope to walk.
+ *
+ * Must be called under acpi_scan_lock.
+ */
+void acpi_bus_trim(struct acpi_device *adev)
+{
+ acpi_bus_trim_one(adev, NULL);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 04ea1569df78..ad4b2987b3d6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -360,6 +360,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Lenovo G40-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
+ },
+ },
/*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see
@@ -816,6 +824,9 @@ static const struct platform_s2idle_ops acpi_s2idle_ops = {
void __weak acpi_s2idle_setup(void)
{
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
+ pr_info("Efficient low-power S0 idle declared\n");
+
s2idle_set_ops(&acpi_s2idle_ops);
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 3a9773a09e19..5a7b8065e77f 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -291,6 +291,44 @@ int acpi_get_local_address(acpi_handle handle, u32 *addr)
}
EXPORT_SYMBOL(acpi_get_local_address);
+#define ACPI_MAX_SUB_BUF_SIZE 9
+
+const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ const char *sub;
+ size_t len;
+
+ status = acpi_evaluate_object(handle, METHOD_NAME__SUB, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "Reading ACPI _SUB failed: %#x\n", status);
+ return ERR_PTR(-ENODATA);
+ }
+
+ obj = buffer.pointer;
+ if (obj->type == ACPI_TYPE_STRING) {
+ len = strlen(obj->string.pointer);
+ if (len < ACPI_MAX_SUB_BUF_SIZE && len > 0) {
+ sub = kstrdup(obj->string.pointer, GFP_KERNEL);
+ if (!sub)
+ sub = ERR_PTR(-ENOMEM);
+ } else {
+ acpi_handle_err(handle, "ACPI _SUB Length %zu is Invalid\n", len);
+ sub = ERR_PTR(-ENODATA);
+ }
+ } else {
+ acpi_handle_warn(handle, "Warning ACPI _SUB did not return a string\n");
+ sub = ERR_PTR(-ENODATA);
+ }
+
+ acpi_os_free(buffer.pointer);
+
+ return sub;
+}
+EXPORT_SYMBOL_GPL(acpi_get_subsystem_id);
+
acpi_status
acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index becc198e4c22..5d7f38016a24 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -349,6 +349,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Dell Inspiron N4010 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N4010"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Dell Vostro V131 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -430,7 +438,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
.callback = video_detect_force_native,
.ident = "Clevo NL5xRU",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
},
@@ -438,59 +445,75 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
.callback = video_detect_force_native,
.ident = "Clevo NL5xRU",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
},
{
.callback = video_detect_force_native,
.ident = "Clevo NL5xRU",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
+ .ident = "Clevo NL5xNU",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
},
},
+ /*
+ * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10,
+ * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo
+ * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description
+ * above.
+ */
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
+ .ident = "TongFang PF5PU1G",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
+ DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
+ .ident = "TongFang PF4NU1F",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF4NU1F",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
+ .ident = "TongFang PF5NU1G",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
+ .ident = "TongFang PF5NU1G",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF5LUXG",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
},
},
-
/*
* Desktops which falsely report a backlight and which our heuristics
* for this do not catch.
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index d2256326c73a..6132092dab2a 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -88,7 +88,7 @@ static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
return -ENODEV;
}
- fwnode = pdev->dev.fwnode;
+ fwnode = dev_fwnode(&pdev->dev);
if (!fwnode) {
/*
* PCI devices aren't necessarily described by ACPI. Create a
@@ -101,7 +101,7 @@ static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
}
set_primary_fwnode(&pdev->dev, fwnode);
}
- viommu->fwnode = pdev->dev.fwnode;
+ viommu->fwnode = dev_fwnode(&pdev->dev);
pci_dev_put(pdev);
return 0;
}
@@ -249,6 +249,26 @@ err_free:
}
/**
+ * acpi_viot_early_init - Test the presence of VIOT and enable ACS
+ *
+ * If the VIOT does exist, ACS must be enabled. This cannot be
+ * done in acpi_viot_init() which is called after the bus scan
+ */
+void __init acpi_viot_early_init(void)
+{
+#ifdef CONFIG_PCI
+ acpi_status status;
+ struct acpi_table_header *hdr;
+
+ status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
+ if (ACPI_FAILURE(status))
+ return;
+ pci_request_acs();
+ acpi_put_table(hdr);
+#endif
+}
+
+/**
* acpi_viot_init - Parse the VIOT table
*
* Parse the VIOT table, prepare the list of endpoints to be used during DMA
@@ -294,7 +314,7 @@ static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
return -ENODEV;
/* We're not translating ourself */
- if (viommu->fwnode == dev->fwnode)
+ if (device_match_fwnode(dev, viommu->fwnode))
return -EINVAL;
ops = iommu_ops_from_fwnode(viommu->fwnode);
@@ -319,12 +339,6 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
epid = ((domain_nr - ep->segment_start) << 16) +
dev_id - ep->bdf_start + ep->endpoint_id;
- /*
- * If we found a PCI range managed by the viommu, we're
- * the one that has to request ACS.
- */
- pci_request_acs();
-
return viot_dev_iommu_init(&pdev->dev, ep->viommu,
epid);
}
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 2963229062f8..f9ac12b778e6 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -369,9 +369,6 @@ static int lps0_device_attach(struct acpi_device *adev,
if (lps0_device_handle)
return 0;
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return 0;
-
if (acpi_s2idle_vendor_amd()) {
/* AMD0004, AMD0005, AMDI0005:
* - Should use rev_id 0x0
@@ -397,7 +394,9 @@ static int lps0_device_attach(struct acpi_device *adev,
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
- } else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) {
+ } else if (lps0_dsm_func_mask_microsoft > 0 &&
+ (!strcmp(hid, "AMDI0007") ||
+ !strcmp(hid, "AMDI0008"))) {
lps0_dsm_func_mask_microsoft = -EINVAL;
acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
}
@@ -419,11 +418,15 @@ static int lps0_device_attach(struct acpi_device *adev,
lpi_device_get_constraints();
/*
- * Use suspend-to-idle by default if the default suspend mode was not
- * set from the command line.
+ * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set in
+ * the FADT and the default suspend mode was not set from the command
+ * line.
*/
- if (mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3)
+ if ((acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) &&
+ mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3) {
mem_sleep_current = PM_SUSPEND_TO_IDLE;
+ pr_info("Low-power S0 idle used by default for system suspend\n");
+ }
/*
* Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 0e3ed5eb367b..32b0e0b930c1 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -130,11 +130,100 @@ static struct attribute *amba_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(amba_dev);
+static int amba_read_periphid(struct amba_device *dev)
+{
+ struct reset_control *rstc;
+ u32 size, pid, cid;
+ void __iomem *tmp;
+ int i, ret;
+
+ ret = dev_pm_domain_attach(&dev->dev, true);
+ if (ret) {
+ dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret);
+ goto err_out;
+ }
+
+ ret = amba_get_enable_pclk(dev);
+ if (ret) {
+ dev_dbg(&dev->dev, "can't get pclk: %d\n", ret);
+ goto err_pm;
+ }
+
+ /*
+ * Find reset control(s) of the amba bus and de-assert them.
+ */
+ rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&dev->dev, "can't get reset: %d\n", ret);
+ goto err_clk;
+ }
+ reset_control_deassert(rstc);
+ reset_control_put(rstc);
+
+ size = resource_size(&dev->res);
+ tmp = ioremap(dev->res.start, size);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto err_clk;
+ }
+
+ /*
+ * Read pid and cid based on size of resource
+ * they are located at end of region
+ */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8);
+ for (cid = 0, i = 0; i < 4; i++)
+ cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8);
+
+ if (cid == CORESIGHT_CID) {
+ /* set the base to the start of the last 4k block */
+ void __iomem *csbase = tmp + size - 4096;
+
+ dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET);
+ dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff;
+ }
+
+ if (cid == AMBA_CID || cid == CORESIGHT_CID) {
+ dev->periphid = pid;
+ dev->cid = cid;
+ }
+
+ if (!dev->periphid)
+ ret = -ENODEV;
+
+ iounmap(tmp);
+
+err_clk:
+ amba_put_disable_pclk(dev);
+err_pm:
+ dev_pm_domain_detach(&dev->dev, true);
+err_out:
+ return ret;
+}
+
static int amba_match(struct device *dev, struct device_driver *drv)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(drv);
+ if (!pcdev->periphid) {
+ int ret = amba_read_periphid(pcdev);
+
+ /*
+ * Returning any error other than -EPROBE_DEFER from bus match
+ * can cause driver registration failure. So, if there's a
+ * permanent failure in reading pid and cid, simply map it to
+ * -EPROBE_DEFER.
+ */
+ if (ret)
+ return -EPROBE_DEFER;
+ dev_set_uevent_suppress(dev, false);
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+ }
+
/* When driver_override is set, only bind to the matching driver */
if (pcdev->driver_override)
return !strcmp(pcdev->driver_override, drv->name);
@@ -368,6 +457,42 @@ static int __init amba_init(void)
postcore_initcall(amba_init);
+static int amba_proxy_probe(struct amba_device *adev,
+ const struct amba_id *id)
+{
+ WARN(1, "Stub driver should never match any device.\n");
+ return -ENODEV;
+}
+
+static const struct amba_id amba_stub_drv_ids[] = {
+ { 0, 0 },
+};
+
+static struct amba_driver amba_proxy_drv = {
+ .drv = {
+ .name = "amba-proxy",
+ },
+ .probe = amba_proxy_probe,
+ .id_table = amba_stub_drv_ids,
+};
+
+static int __init amba_stub_drv_init(void)
+{
+ if (!IS_ENABLED(CONFIG_MODULES))
+ return 0;
+
+ /*
+ * The amba_match() function will get called only if there is at least
+ * one amba driver registered. If all amba drivers are modules and are
+ * only loaded based on uevents, then we'll hit a chicken-and-egg
+ * situation where amba_match() is waiting on drivers and drivers are
+ * waiting on amba_match(). So, register a stub driver to make sure
+ * amba_match() is called even if no amba driver has been registered.
+ */
+ return amba_driver_register(&amba_proxy_drv);
+}
+late_initcall_sync(amba_stub_drv_init);
+
/**
* amba_driver_register - register an AMBA device driver
* @drv: amba device driver structure
@@ -410,160 +535,6 @@ static void amba_device_release(struct device *dev)
kfree(d);
}
-static int amba_read_periphid(struct amba_device *dev)
-{
- struct reset_control *rstc;
- u32 size, pid, cid;
- void __iomem *tmp;
- int i, ret;
-
- ret = dev_pm_domain_attach(&dev->dev, true);
- if (ret)
- goto err_out;
-
- ret = amba_get_enable_pclk(dev);
- if (ret)
- goto err_pm;
-
- /*
- * Find reset control(s) of the amba bus and de-assert them.
- */
- rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
- if (IS_ERR(rstc)) {
- ret = PTR_ERR(rstc);
- if (ret != -EPROBE_DEFER)
- dev_err(&dev->dev, "can't get reset: %d\n", ret);
- goto err_clk;
- }
- reset_control_deassert(rstc);
- reset_control_put(rstc);
-
- size = resource_size(&dev->res);
- tmp = ioremap(dev->res.start, size);
- if (!tmp) {
- ret = -ENOMEM;
- goto err_clk;
- }
-
- /*
- * Read pid and cid based on size of resource
- * they are located at end of region
- */
- for (pid = 0, i = 0; i < 4; i++)
- pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8);
- for (cid = 0, i = 0; i < 4; i++)
- cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8);
-
- if (cid == CORESIGHT_CID) {
- /* set the base to the start of the last 4k block */
- void __iomem *csbase = tmp + size - 4096;
-
- dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET);
- dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff;
- }
-
- if (cid == AMBA_CID || cid == CORESIGHT_CID) {
- dev->periphid = pid;
- dev->cid = cid;
- }
-
- if (!dev->periphid)
- ret = -ENODEV;
-
- iounmap(tmp);
-
-err_clk:
- amba_put_disable_pclk(dev);
-err_pm:
- dev_pm_domain_detach(&dev->dev, true);
-err_out:
- return ret;
-}
-
-static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
-{
- int ret;
-
- ret = request_resource(parent, &dev->res);
- if (ret)
- goto err_out;
-
- /* Hard-coded primecell ID instead of plug-n-play */
- if (dev->periphid != 0)
- goto skip_probe;
-
- ret = amba_read_periphid(dev);
- if (ret) {
- if (ret != -EPROBE_DEFER) {
- amba_device_put(dev);
- goto err_out;
- }
- goto err_release;
- }
-
-skip_probe:
- ret = device_add(&dev->dev);
-err_release:
- if (ret)
- release_resource(&dev->res);
-err_out:
- return ret;
-}
-
-/*
- * Registration of AMBA device require reading its pid and cid registers.
- * To do this, the device must be turned on (if it is a part of power domain)
- * and have clocks enabled. However in some cases those resources might not be
- * yet available. Returning EPROBE_DEFER is not a solution in such case,
- * because callers don't handle this special error code. Instead such devices
- * are added to the special list and their registration is retried from
- * periodic worker, until all resources are available and registration succeeds.
- */
-struct deferred_device {
- struct amba_device *dev;
- struct resource *parent;
- struct list_head node;
-};
-
-static LIST_HEAD(deferred_devices);
-static DEFINE_MUTEX(deferred_devices_lock);
-
-static void amba_deferred_retry_func(struct work_struct *dummy);
-static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
-
-#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
-
-static int amba_deferred_retry(void)
-{
- struct deferred_device *ddev, *tmp;
-
- mutex_lock(&deferred_devices_lock);
-
- list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) {
- int ret = amba_device_try_add(ddev->dev, ddev->parent);
-
- if (ret == -EPROBE_DEFER)
- continue;
-
- list_del_init(&ddev->node);
- kfree(ddev);
- }
-
- mutex_unlock(&deferred_devices_lock);
-
- return 0;
-}
-late_initcall(amba_deferred_retry);
-
-static void amba_deferred_retry_func(struct work_struct *dummy)
-{
- amba_deferred_retry();
-
- if (!list_empty(&deferred_devices))
- schedule_delayed_work(&deferred_retry_work,
- DEFERRED_DEVICE_TIMEOUT);
-}
-
/**
* amba_device_add - add a previously allocated AMBA device structure
* @dev: AMBA device allocated by amba_device_alloc
@@ -575,28 +546,30 @@ static void amba_deferred_retry_func(struct work_struct *dummy)
*/
int amba_device_add(struct amba_device *dev, struct resource *parent)
{
- int ret = amba_device_try_add(dev, parent);
-
- if (ret == -EPROBE_DEFER) {
- struct deferred_device *ddev;
-
- ddev = kmalloc(sizeof(*ddev), GFP_KERNEL);
- if (!ddev)
- return -ENOMEM;
+ int ret;
- ddev->dev = dev;
- ddev->parent = parent;
- ret = 0;
+ ret = request_resource(parent, &dev->res);
+ if (ret)
+ return ret;
- mutex_lock(&deferred_devices_lock);
+ /* If primecell ID isn't hard-coded, figure it out */
+ if (!dev->periphid) {
+ /*
+ * AMBA device uevents require reading its pid and cid
+ * registers. To do this, the device must be on, clocked and
+ * out of reset. However in some cases those resources might
+ * not yet be available. If that's the case, we suppress the
+ * generation of uevents until we can read the pid and cid
+ * registers. See also amba_match().
+ */
+ if (amba_read_periphid(dev))
+ dev_set_uevent_suppress(&dev->dev, true);
+ }
- if (list_empty(&deferred_devices))
- schedule_delayed_work(&deferred_retry_work,
- DEFERRED_DEVICE_TIMEOUT);
- list_add_tail(&ddev->node, &deferred_devices);
+ ret = device_add(&dev->dev);
+ if (ret)
+ release_resource(&dev->res);
- mutex_unlock(&deferred_devices_lock);
- }
return ret;
}
EXPORT_SYMBOL_GPL(amba_device_add);
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 53b22e26266c..07aa8ae0a058 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -1,13 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
menu "Android"
-config ANDROID
- bool "Android Drivers"
- help
- Enable support for various drivers needed on the Android platform
-
-if ANDROID
-
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
depends on MMU
@@ -54,6 +47,4 @@ config ANDROID_BINDER_IPC_SELFTEST
exhaustively with combinations of various buffer sizes and
alignments.
-endif # if ANDROID
-
endmenu
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 362c0deb65f1..c964d7c8c384 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -197,8 +197,32 @@ static inline void binder_stats_created(enum binder_stat_types type)
atomic_inc(&binder_stats.obj_created[type]);
}
-struct binder_transaction_log binder_transaction_log;
-struct binder_transaction_log binder_transaction_log_failed;
+struct binder_transaction_log_entry {
+ int debug_id;
+ int debug_id_done;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
+ char context_name[BINDERFS_MAX_NAME + 1];
+};
+
+struct binder_transaction_log {
+ atomic_t cur;
+ bool full;
+ struct binder_transaction_log_entry entry[32];
+};
+
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
@@ -2627,6 +2651,56 @@ static int binder_fixup_parent(struct list_head *pf_head,
}
/**
+ * binder_can_update_transaction() - Can a txn be superseded by an updated one?
+ * @t1: the pending async txn in the frozen process
+ * @t2: the new async txn to supersede the outdated pending one
+ *
+ * Return: true if t2 can supersede t1
+ * false if t2 can not supersede t1
+ */
+static bool binder_can_update_transaction(struct binder_transaction *t1,
+ struct binder_transaction *t2)
+{
+ if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
+ (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
+ return false;
+ if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
+ t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
+ t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
+ t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
+ return true;
+ return false;
+}
+
+/**
+ * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
+ * @t: new async transaction
+ * @target_list: list to find outdated transaction
+ *
+ * Return: the outdated transaction if found
+ * NULL if no outdated transacton can be found
+ *
+ * Requires the proc->inner_lock to be held.
+ */
+static struct binder_transaction *
+binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
+ struct list_head *target_list)
+{
+ struct binder_work *w;
+
+ list_for_each_entry(w, target_list, entry) {
+ struct binder_transaction *t_queued;
+
+ if (w->type != BINDER_WORK_TRANSACTION)
+ continue;
+ t_queued = container_of(w, struct binder_transaction, work);
+ if (binder_can_update_transaction(t_queued, t))
+ return t_queued;
+ }
+ return NULL;
+}
+
+/**
* binder_proc_transaction() - sends a transaction to a process and wakes it up
* @t: transaction to send
* @proc: process to send the transaction to
@@ -2651,6 +2725,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
struct binder_node *node = t->buffer->target_node;
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
+ struct binder_transaction *t_outdated = NULL;
BUG_ON(!node);
binder_node_lock(node);
@@ -2678,12 +2753,24 @@ static int binder_proc_transaction(struct binder_transaction *t,
if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
- if (thread)
+ if (thread) {
binder_enqueue_thread_work_ilocked(thread, &t->work);
- else if (!pending_async)
+ } else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
- else
+ } else {
+ if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
+ t_outdated = binder_find_outdated_transaction_ilocked(t,
+ &node->async_todo);
+ if (t_outdated) {
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "txn %d supersedes %d\n",
+ t->debug_id, t_outdated->debug_id);
+ list_del_init(&t_outdated->work.entry);
+ proc->outstanding_txns--;
+ }
+ }
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
+ }
if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
@@ -2692,6 +2779,22 @@ static int binder_proc_transaction(struct binder_transaction *t,
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
+ /*
+ * To reduce potential contention, free the outdated transaction and
+ * buffer after releasing the locks.
+ */
+ if (t_outdated) {
+ struct binder_buffer *buffer = t_outdated->buffer;
+
+ t_outdated->buffer = NULL;
+ buffer->transaction = NULL;
+ trace_binder_transaction_update_buffer_release(buffer);
+ binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
+ binder_alloc_free_buf(&proc->alloc, buffer);
+ kfree(t_outdated);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ }
+
return 0;
}
@@ -6197,8 +6300,7 @@ static void print_binder_proc_stats(struct seq_file *m,
print_binder_stats(m, " ", &proc->stats);
}
-
-int binder_state_show(struct seq_file *m, void *unused)
+static int state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -6237,7 +6339,7 @@ int binder_state_show(struct seq_file *m, void *unused)
return 0;
}
-int binder_stats_show(struct seq_file *m, void *unused)
+static int stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -6253,7 +6355,7 @@ int binder_stats_show(struct seq_file *m, void *unused)
return 0;
}
-int binder_transactions_show(struct seq_file *m, void *unused)
+static int transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -6309,7 +6411,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
"\n" : " (incomplete)\n");
}
-int binder_transaction_log_show(struct seq_file *m, void *unused)
+static int transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
unsigned int log_cur = atomic_read(&log->cur);
@@ -6341,6 +6443,45 @@ const struct file_operations binder_fops = {
.release = binder_release,
};
+DEFINE_SHOW_ATTRIBUTE(state);
+DEFINE_SHOW_ATTRIBUTE(stats);
+DEFINE_SHOW_ATTRIBUTE(transactions);
+DEFINE_SHOW_ATTRIBUTE(transaction_log);
+
+const struct binder_debugfs_entry binder_debugfs_entries[] = {
+ {
+ .name = "state",
+ .mode = 0444,
+ .fops = &state_fops,
+ .data = NULL,
+ },
+ {
+ .name = "stats",
+ .mode = 0444,
+ .fops = &stats_fops,
+ .data = NULL,
+ },
+ {
+ .name = "transactions",
+ .mode = 0444,
+ .fops = &transactions_fops,
+ .data = NULL,
+ },
+ {
+ .name = "transaction_log",
+ .mode = 0444,
+ .fops = &transaction_log_fops,
+ .data = &binder_transaction_log,
+ },
+ {
+ .name = "failed_transaction_log",
+ .mode = 0444,
+ .fops = &transaction_log_fops,
+ .data = &binder_transaction_log_failed,
+ },
+ {} /* terminator */
+};
+
static int __init init_binder_device(const char *name)
{
int ret;
@@ -6386,36 +6527,18 @@ static int __init binder_init(void)
atomic_set(&binder_transaction_log_failed.cur, ~0U);
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
- if (binder_debugfs_dir_entry_root)
+ if (binder_debugfs_dir_entry_root) {
+ const struct binder_debugfs_entry *db_entry;
+
+ binder_for_each_debugfs_entry(db_entry)
+ debugfs_create_file(db_entry->name,
+ db_entry->mode,
+ binder_debugfs_dir_entry_root,
+ db_entry->data,
+ db_entry->fops);
+
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
binder_debugfs_dir_entry_root);
-
- if (binder_debugfs_dir_entry_root) {
- debugfs_create_file("state",
- 0444,
- binder_debugfs_dir_entry_root,
- NULL,
- &binder_state_fops);
- debugfs_create_file("stats",
- 0444,
- binder_debugfs_dir_entry_root,
- NULL,
- &binder_stats_fops);
- debugfs_create_file("transactions",
- 0444,
- binder_debugfs_dir_entry_root,
- NULL,
- &binder_transactions_fops);
- debugfs_create_file("transaction_log",
- 0444,
- binder_debugfs_dir_entry_root,
- &binder_transaction_log,
- &binder_transaction_log_fops);
- debugfs_create_file("failed_transaction_log",
- 0444,
- binder_debugfs_dir_entry_root,
- &binder_transaction_log_failed,
- &binder_transaction_log_fops);
}
if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 5649a0371a1f..1014beb12802 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
if (mm) {
mmap_read_lock(mm);
- vma = alloc->vma;
+ vma = vma_lookup(mm, alloc->vma_addr);
}
if (!vma && need_mm) {
@@ -313,16 +313,22 @@ err_no_vma:
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
- if (vma)
- alloc->vma_vm_mm = vma->vm_mm;
+ unsigned long vm_start = 0;
+
/*
- * If we see alloc->vma is not NULL, buffer data structures set up
- * completely. Look at smp_rmb side binder_alloc_get_vma.
- * We also want to guarantee new alloc->vma_vm_mm is always visible
- * if alloc->vma is set.
+ * Allow clearing the vma with holding just the read lock to allow
+ * munmapping downgrade of the write lock before freeing and closing the
+ * file using binder_alloc_vma_close().
*/
- smp_wmb();
- alloc->vma = vma;
+ if (vma) {
+ vm_start = vma->vm_start;
+ alloc->vma_vm_mm = vma->vm_mm;
+ mmap_assert_write_locked(alloc->vma_vm_mm);
+ } else {
+ mmap_assert_locked(alloc->vma_vm_mm);
+ }
+
+ alloc->vma_addr = vm_start;
}
static inline struct vm_area_struct *binder_alloc_get_vma(
@@ -330,11 +336,9 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
{
struct vm_area_struct *vma = NULL;
- if (alloc->vma) {
- /* Look at description in binder_alloc_set_vma */
- smp_rmb();
- vma = alloc->vma;
- }
+ if (alloc->vma_addr)
+ vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
+
return vma;
}
@@ -817,7 +821,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
- BUG_ON(alloc->vma);
+ BUG_ON(alloc->vma_addr &&
+ vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -1084,7 +1089,7 @@ int binder_alloc_shrinker_init(void)
int ret = list_lru_init(&binder_alloc_lru);
if (ret == 0) {
- ret = register_shrinker(&binder_shrinker);
+ ret = register_shrinker(&binder_shrinker, "android-binder");
if (ret)
list_lru_destroy(&binder_alloc_lru);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 7dea57a84c79..1e4fd37af5e0 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -100,7 +100,7 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
- struct vm_area_struct *vma;
+ unsigned long vma_addr;
struct mm_struct *vma_vm_mm;
void __user *buffer;
struct list_head buffers;
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index c2b323bc3b3a..43a881073a42 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->vma_addr)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 8dc0bccf8513..abe19d88c6ec 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -107,41 +107,19 @@ static inline int __init init_binderfs(void)
}
#endif
-int binder_stats_show(struct seq_file *m, void *unused);
-DEFINE_SHOW_ATTRIBUTE(binder_stats);
-
-int binder_state_show(struct seq_file *m, void *unused);
-DEFINE_SHOW_ATTRIBUTE(binder_state);
-
-int binder_transactions_show(struct seq_file *m, void *unused);
-DEFINE_SHOW_ATTRIBUTE(binder_transactions);
-
-int binder_transaction_log_show(struct seq_file *m, void *unused);
-DEFINE_SHOW_ATTRIBUTE(binder_transaction_log);
-
-struct binder_transaction_log_entry {
- int debug_id;
- int debug_id_done;
- int call_type;
- int from_proc;
- int from_thread;
- int target_handle;
- int to_proc;
- int to_thread;
- int to_node;
- int data_size;
- int offsets_size;
- int return_error_line;
- uint32_t return_error;
- uint32_t return_error_param;
- char context_name[BINDERFS_MAX_NAME + 1];
+struct binder_debugfs_entry {
+ const char *name;
+ umode_t mode;
+ const struct file_operations *fops;
+ void *data;
};
-struct binder_transaction_log {
- atomic_t cur;
- bool full;
- struct binder_transaction_log_entry entry[32];
-};
+extern const struct binder_debugfs_entry binder_debugfs_entries[];
+
+#define binder_for_each_debugfs_entry(entry) \
+ for ((entry) = binder_debugfs_entries; \
+ (entry)->name; \
+ (entry)++)
enum binder_stat_types {
BINDER_STAT_PROC,
@@ -580,6 +558,4 @@ struct binder_object {
};
};
-extern struct binder_transaction_log binder_transaction_log;
-extern struct binder_transaction_log binder_transaction_log_failed;
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 8eeccdc64724..8cc07e6a4273 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -311,6 +311,10 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_PROTO(struct binder_buffer *buffer),
TP_ARGS(buffer));
+DEFINE_EVENT(binder_buffer_class, binder_transaction_update_buffer_release,
+ TP_PROTO(struct binder_buffer *buffer),
+ TP_ARGS(buffer));
+
TRACE_EVENT(binder_update_page_range,
TP_PROTO(struct binder_alloc *alloc, bool allocate,
void __user *start, void __user *end),
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 6c5e94f6cb3a..588d753a7a19 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -629,6 +629,7 @@ static int init_binder_features(struct super_block *sb)
static int init_binder_logs(struct super_block *sb)
{
struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ const struct binder_debugfs_entry *db_entry;
struct binderfs_info *info;
int ret = 0;
@@ -639,43 +640,15 @@ static int init_binder_logs(struct super_block *sb)
goto out;
}
- dentry = binderfs_create_file(binder_logs_root_dir, "stats",
- &binder_stats_fops, NULL);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto out;
- }
-
- dentry = binderfs_create_file(binder_logs_root_dir, "state",
- &binder_state_fops, NULL);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto out;
- }
-
- dentry = binderfs_create_file(binder_logs_root_dir, "transactions",
- &binder_transactions_fops, NULL);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto out;
- }
-
- dentry = binderfs_create_file(binder_logs_root_dir,
- "transaction_log",
- &binder_transaction_log_fops,
- &binder_transaction_log);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto out;
- }
-
- dentry = binderfs_create_file(binder_logs_root_dir,
- "failed_transaction_log",
- &binder_transaction_log_fops,
- &binder_transaction_log_failed);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto out;
+ binder_for_each_debugfs_entry(db_entry) {
+ dentry = binderfs_create_file(binder_logs_root_dir,
+ db_entry->name,
+ db_entry->fops,
+ db_entry->data);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
}
proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index bb45a9c00514..1c9f4fb2595d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -148,7 +148,7 @@ config SATA_AHCI_PLATFORM
config AHCI_BRCM
tristate "Broadcom AHCI SATA support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
- ARCH_BCM_63XX || COMPILE_TEST
+ ARCH_BCMBCA || COMPILE_TEST
select SATA_HOST
help
This option enables support for the AHCI SATA3 controller found on
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 3d345d173556..61b4ccf88bf1 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -480,10 +480,10 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
* RETURNS:
* Determined xfermask.
*/
-unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
- const struct ata_acpi_gtm *gtm)
+unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ const struct ata_acpi_gtm *gtm)
{
- unsigned long xfer_mask = 0;
+ unsigned int xfer_mask = 0;
unsigned int type;
int unit;
u8 mode;
@@ -525,7 +525,7 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
struct ata_device *dev;
ata_for_each_dev(dev, &ap->link, ENABLED) {
- unsigned long xfer_mask, udma_mask;
+ unsigned int xfer_mask, udma_mask;
xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9601fa92950a..826d41f341e4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -93,7 +93,7 @@ struct ata_force_param {
const char *name;
u8 cbl;
u8 spd_limit;
- unsigned long xfer_mask;
+ unsigned int xfer_mask;
unsigned int horkage_on;
unsigned int horkage_off;
u16 lflags_on;
@@ -425,7 +425,7 @@ static void ata_force_xfermask(struct ata_device *dev)
for (i = ata_force_tbl_size - 1; i >= 0; i--) {
const struct ata_force_ent *fe = &ata_force_tbl[i];
- unsigned long pio_mask, mwdma_mask, udma_mask;
+ unsigned int pio_mask, mwdma_mask, udma_mask;
if (fe->port != -1 && fe->port != dev->link->ap->print_id)
continue;
@@ -803,11 +803,11 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
* RETURNS:
* Packed xfer_mask.
*/
-unsigned long ata_pack_xfermask(unsigned long pio_mask,
- unsigned long mwdma_mask,
- unsigned long udma_mask)
+unsigned int ata_pack_xfermask(unsigned int pio_mask,
+ unsigned int mwdma_mask,
+ unsigned int udma_mask)
{
- return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
+ return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
}
@@ -823,8 +823,8 @@ EXPORT_SYMBOL_GPL(ata_pack_xfermask);
* Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
* Any NULL destination masks will be ignored.
*/
-void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
- unsigned long *mwdma_mask, unsigned long *udma_mask)
+void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
+ unsigned int *mwdma_mask, unsigned int *udma_mask)
{
if (pio_mask)
*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
@@ -857,7 +857,7 @@ static const struct ata_xfer_ent {
* RETURNS:
* Matching XFER_* value, 0xff if no match found.
*/
-u8 ata_xfer_mask2mode(unsigned long xfer_mask)
+u8 ata_xfer_mask2mode(unsigned int xfer_mask)
{
int highbit = fls(xfer_mask) - 1;
const struct ata_xfer_ent *ent;
@@ -881,7 +881,7 @@ EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
* RETURNS:
* Matching xfer_mask, 0 if no match found.
*/
-unsigned long ata_xfer_mode2mask(u8 xfer_mode)
+unsigned int ata_xfer_mode2mask(u8 xfer_mode)
{
const struct ata_xfer_ent *ent;
@@ -930,7 +930,7 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
* Constant C string representing highest speed listed in
* @mode_mask, or the constant C string "<n/a>".
*/
-const char *ata_mode_string(unsigned long xfer_mask)
+const char *ata_mode_string(unsigned int xfer_mask)
{
static const char * const xfer_mode_str[] = {
"PIO0",
@@ -1103,16 +1103,16 @@ static u64 ata_id_n_sectors(const u16 *id)
if (ata_id_has_lba(id)) {
if (ata_id_has_lba48(id))
return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
- else
- return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
- } else {
- if (ata_id_current_chs_valid(id))
- return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
- id[ATA_ID_CUR_SECTORS];
- else
- return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
- id[ATA_ID_SECTORS];
+
+ return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
}
+
+ if (ata_id_current_chs_valid(id))
+ return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
+ (u32)id[ATA_ID_CUR_SECTORS];
+
+ return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
+ (u32)id[ATA_ID_SECTORS];
}
u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
@@ -1383,9 +1383,9 @@ static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
* RETURNS:
* Computed xfermask
*/
-unsigned long ata_id_xfermask(const u16 *id)
+unsigned int ata_id_xfermask(const u16 *id)
{
- unsigned long pio_mask, mwdma_mask, udma_mask;
+ unsigned int pio_mask, mwdma_mask, udma_mask;
/* Usual case. Word 53 indicates word 64 is valid */
if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
@@ -1467,10 +1467,10 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
-unsigned ata_exec_internal_sg(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, struct scatterlist *sgl,
- unsigned int n_elem, unsigned long timeout)
+static unsigned ata_exec_internal_sg(struct ata_device *dev,
+ struct ata_taskfile *tf, const u8 *cdb,
+ int dma_dir, struct scatterlist *sgl,
+ unsigned int n_elem, unsigned int timeout)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
@@ -1645,7 +1645,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
- unsigned long timeout)
+ unsigned int timeout)
{
struct scatterlist *psg = NULL, sg;
unsigned int n_elem = 0;
@@ -2534,7 +2534,7 @@ int ata_dev_configure(struct ata_device *dev)
struct ata_port *ap = dev->link->ap;
bool print_info = ata_dev_print_info(dev);
const u16 *id = dev->id;
- unsigned long xfer_mask;
+ unsigned int xfer_mask;
unsigned int err_mask;
char revbuf[7]; /* XYZ-99\0 */
char fwrevbuf[ATA_ID_FW_REV_LEN+1];
@@ -3202,8 +3202,8 @@ u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
{
char buf[32];
- unsigned long orig_mask, xfer_mask;
- unsigned long pio_mask, mwdma_mask, udma_mask;
+ unsigned int orig_mask, xfer_mask;
+ unsigned int pio_mask, mwdma_mask, udma_mask;
int quiet, highbit;
quiet = !!(sel & ATA_DNXFER_QUIET);
@@ -3381,7 +3381,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
/* step 1: calculate xfer_mask */
ata_for_each_dev(dev, link, ENABLED) {
- unsigned long pio_mask, dma_mask;
+ unsigned int pio_mask, dma_mask;
unsigned int mode_mask;
mode_mask = ATA_DMA_MASK_ATA;
@@ -4217,7 +4217,7 @@ static void ata_dev_xfermask(struct ata_device *dev)
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ata_host *host = ap->host;
- unsigned long xfer_mask;
+ unsigned int xfer_mask;
/* controller modes available */
xfer_mask = ata_pack_xfermask(ap->pio_mask,
@@ -4342,7 +4342,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
{
struct ata_taskfile tf;
unsigned int err_mask;
- unsigned long timeout = 0;
+ unsigned int timeout = 0;
/* set up set-features taskfile */
ata_dev_dbg(dev, "set features - SATA features\n");
@@ -5776,7 +5776,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
/* set cable, sata_spd_limit and report */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- unsigned long xfer_mask;
+ unsigned int xfer_mask;
/* set SATA cable type if still unset */
if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3307ed45fe4d..7c128c89b454 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -86,36 +86,36 @@ static const unsigned long ata_eh_reset_timeouts[] = {
ULONG_MAX, /* > 1 min has elapsed, give up */
};
-static const unsigned long ata_eh_identify_timeouts[] = {
+static const unsigned int ata_eh_identify_timeouts[] = {
5000, /* covers > 99% of successes and not too boring on failures */
10000, /* combined time till here is enough even for media access */
30000, /* for true idiots */
- ULONG_MAX,
+ UINT_MAX,
};
-static const unsigned long ata_eh_revalidate_timeouts[] = {
+static const unsigned int ata_eh_revalidate_timeouts[] = {
15000, /* Some drives are slow to read log pages when waking-up */
15000, /* combined time till here is enough even for media access */
- ULONG_MAX,
+ UINT_MAX,
};
-static const unsigned long ata_eh_flush_timeouts[] = {
+static const unsigned int ata_eh_flush_timeouts[] = {
15000, /* be generous with flush */
15000, /* ditto */
30000, /* and even more generous */
- ULONG_MAX,
+ UINT_MAX,
};
-static const unsigned long ata_eh_other_timeouts[] = {
+static const unsigned int ata_eh_other_timeouts[] = {
5000, /* same rationale as identify timeout */
10000, /* ditto */
/* but no merciful 30sec for other commands, it just isn't worth it */
- ULONG_MAX,
+ UINT_MAX,
};
struct ata_eh_cmd_timeout_ent {
const u8 *commands;
- const unsigned long *timeouts;
+ const unsigned int *timeouts;
};
/* The following table determines timeouts to use for EH internal
@@ -326,7 +326,7 @@ static int ata_lookup_timeout_table(u8 cmd)
* RETURNS:
* Determined timeout.
*/
-unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
+unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
int ent = ata_lookup_timeout_table(cmd);
@@ -361,7 +361,7 @@ void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
return;
idx = ehc->cmd_timeout_idx[dev->devno][ent];
- if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
+ if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != UINT_MAX)
ehc->cmd_timeout_idx[dev->devno][ent]++;
}
@@ -802,11 +802,11 @@ void ata_port_wait_eh(struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ata_port_wait_eh);
-static int ata_eh_nr_in_flight(struct ata_port *ap)
+static unsigned int ata_eh_nr_in_flight(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
unsigned int tag;
- int nr = 0;
+ unsigned int nr = 0;
/* count only non-internal commands */
ata_qc_for_each(ap, qc, tag) {
@@ -821,7 +821,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
{
struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
unsigned long flags;
- int cnt;
+ unsigned int cnt;
spin_lock_irqsave(ap->lock, flags);
@@ -870,7 +870,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
*/
static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
{
- int cnt;
+ unsigned int cnt;
/* already scheduled? */
if (ap->pflags & ATA_PFLAG_EH_PENDING)
@@ -2122,6 +2122,7 @@ const char *ata_get_cmd_name(u8 command)
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 86dbb1cdfabd..29e2f55c6faa 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -539,13 +539,13 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
return rc;
}
-static int ata_ioc32(struct ata_port *ap)
+static bool ata_ioc32(struct ata_port *ap)
{
if (ap->flags & ATA_FLAG_PIO_DMA)
- return 1;
+ return true;
if (ap->pflags & ATA_PFLAG_PIO32)
- return 1;
- return 0;
+ return true;
+ return false;
}
/*
@@ -1060,6 +1060,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
dev->flags |= ATA_DFLAG_NO_UNLOAD;
/* configure max sectors */
+ dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
blk_queue_max_hw_sectors(q, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) {
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index c38027887499..a7e9a75410a3 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -9,7 +9,7 @@
* and various sysfs attributes to expose these topologies and management
* interfaces to user-space.
*
- * There are 3 objects defined in in this class:
+ * There are 3 objects defined in this class:
* - ata_port
* - ata_link
* - ata_device
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 926a7f41303d..98bc8649c63f 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -52,11 +52,7 @@ extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
- unsigned long timeout);
-extern unsigned ata_exec_internal_sg(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, struct scatterlist *sg,
- unsigned int n_elem, unsigned long timeout);
+ unsigned int timeout);
extern int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
@@ -136,7 +132,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
/* libata-eh.c */
-extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
+extern unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
extern void ata_eh_acquire(struct ata_port *ap);
extern void ata_eh_release(struct ata_port *ap);
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index ade4c3eee230..f8706ee427d2 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -97,7 +97,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device
* this case the list of discovered valid modes obtained by ACPI probing
*/
-static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int pacpi_mode_filter(struct ata_device *adev, unsigned int mask)
{
struct pata_acpi *acpi = adev->link->ap->private_data;
return mask & acpi->mask[adev->devno];
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 1b90cda27246..76ad0e73fe2a 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -115,7 +115,7 @@ static int ali_c2_cable_detect(struct ata_port *ap)
* fix that later on. Also ensure we do not do UDMA on WDC drives
*/
-static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int ali_20_filter(struct ata_device *adev, unsigned int mask)
{
char model_num[ATA_ID_PROD_LEN + 1];
/* No DMA on anything but a disk for now */
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 154748cfcc79..f216f9d7b9ec 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -264,8 +264,8 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
* cached during driver attach and are consulted to select transfer
* mode.
*/
-static unsigned long nv_mode_filter(struct ata_device *dev,
- unsigned long xfer_mask)
+static unsigned int nv_mode_filter(struct ata_device *dev,
+ unsigned int xfer_mask)
{
static const unsigned int udma_mask_map[] =
{ ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0,
@@ -274,7 +274,7 @@ static unsigned long nv_mode_filter(struct ata_device *dev,
char acpi_str[32] = "";
u32 saved_udma, udma;
const struct ata_acpi_gtm *gtm;
- unsigned long bios_limit = 0, acpi_limit = 0, limit;
+ unsigned int bios_limit = 0, acpi_limit = 0, limit;
/* find out what BIOS configured */
udma = saved_udma = (unsigned long)ap->host->private_data;
@@ -310,10 +310,10 @@ static unsigned long nv_mode_filter(struct ata_device *dev,
cable detection result */
limit |= ata_pack_xfermask(ATA_PIO4, ATA_MWDMA2, ATA_UDMA2);
- ata_port_dbg(ap, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
- "BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
- xfer_mask, limit, xfer_mask & limit, bios_limit,
- saved_udma, acpi_limit, acpi_str);
+ ata_port_dbg(ap,
+ "nv_mode_filter: 0x%x&0x%x->0x%x, BIOS=0x%x (0x%x) ACPI=0x%x%s\n",
+ xfer_mask, limit, xfer_mask & limit, bios_limit,
+ saved_udma, acpi_limit, acpi_str);
return xfer_mask & limit;
}
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index c99e8f0708b3..7e441fb304d3 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -194,7 +194,7 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
* Block UDMA on devices that cause trouble with this controller.
*/
-static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int hpt366_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 156f304ef051..ce3c5eaa7e76 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -23,7 +23,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
-#define DRV_VERSION "0.6.25"
+#define DRV_VERSION "0.6.30"
struct hpt_clock {
u8 xfer_speed;
@@ -278,7 +278,7 @@ static const char * const bad_ata100_5[] = {
* Block UDMA on devices that cause trouble with this controller.
*/
-static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
@@ -297,7 +297,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
* Block UDMA on devices that cause trouble with this controller.
*/
-static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int hpt370a_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
@@ -314,7 +314,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
* The Marvell bridge chips used on the HighPoint SATA cards do not seem
* to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
*/
-static unsigned long hpt372_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int hpt372_filter(struct ata_device *adev, unsigned int mask)
{
if (ata_id_is_sata(adev->id))
mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
@@ -592,21 +592,19 @@ static struct ata_port_operations hpt374_fn1_port_ops = {
/**
* hpt37x_clock_slot - Turn timing to PC clock entry
- * @freq: Reported frequency timing
- * @base: Base timing
+ * @freq: Reported frequency in MHz
*
- * Turn the timing data intoa clock slot (0 for 33, 1 for 40, 2 for 50
+ * Turn the timing data into a clock slot (0 for 33, 1 for 40, 2 for 50
* and 3 for 66Mhz)
*/
-static int hpt37x_clock_slot(unsigned int freq, unsigned int base)
+static int hpt37x_clock_slot(unsigned int freq)
{
- unsigned int f = (base * freq) / 192; /* Mhz */
- if (f < 40)
+ if (freq < 40)
return 0; /* 33Mhz slot */
- if (f < 45)
+ if (freq < 45)
return 1; /* 40Mhz slot */
- if (f < 55)
+ if (freq < 55)
return 2; /* 50Mhz slot */
return 3; /* 60Mhz slot */
}
@@ -646,24 +644,57 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev)
return 0;
}
-static u32 hpt374_read_freq(struct pci_dev *pdev)
+static int hpt37x_pci_clock(struct pci_dev *pdev, unsigned int base)
{
- u32 freq;
- unsigned long io_base = pci_resource_start(pdev, 4);
+ unsigned int freq;
+ u32 fcnt;
- if (PCI_FUNC(pdev->devfn) & 1) {
- struct pci_dev *pdev_0;
+ /*
+ * Some devices do not let this value be accessed via PCI space
+ * according to the old driver. In addition we must use the value
+ * from FN 0 on the HPT374.
+ */
+ if (pdev->device == PCI_DEVICE_ID_TTI_HPT374 &&
+ (PCI_FUNC(pdev->devfn) & 1)) {
+ struct pci_dev *pdev_fn0;
- pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
- /* Someone hot plugged the controller on us ? */
- if (pdev_0 == NULL)
+ pdev_fn0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
+ /* Someone hot plugged the controller on us? */
+ if (!pdev_fn0)
return 0;
- io_base = pci_resource_start(pdev_0, 4);
- freq = inl(io_base + 0x90);
- pci_dev_put(pdev_0);
- } else
- freq = inl(io_base + 0x90);
- return freq;
+ fcnt = inl(pci_resource_start(pdev_fn0, 4) + 0x90);
+ pci_dev_put(pdev_fn0);
+ } else {
+ fcnt = inl(pci_resource_start(pdev, 4) + 0x90);
+ }
+
+ if ((fcnt >> 12) != 0xABCDE) {
+ u32 total = 0;
+ int i;
+ u16 sr;
+
+ dev_warn(&pdev->dev, "BIOS clock data not set\n");
+
+ /* This is the process the HPT371 BIOS is reported to use */
+ for (i = 0; i < 128; i++) {
+ pci_read_config_word(pdev, 0x78, &sr);
+ total += sr & 0x1FF;
+ udelay(15);
+ }
+ fcnt = total / 128;
+ }
+ fcnt &= 0x1FF;
+
+ freq = (fcnt * base) / 192; /* in MHz */
+
+ /* Clamp to bands */
+ if (freq < 40)
+ return 33;
+ if (freq < 45)
+ return 40;
+ if (freq < 55)
+ return 50;
+ return 66;
}
/**
@@ -770,7 +801,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
u8 rev = dev->revision;
u8 irqmask;
u8 mcr1;
- u32 freq;
+ unsigned int freq; /* MHz */
int prefer_dpll = 1;
unsigned long iobase = pci_resource_start(dev, 4);
@@ -896,42 +927,16 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (chip_table == &hpt372a)
outb(0x0e, iobase + 0x9c);
- /*
- * Some devices do not let this value be accessed via PCI space
- * according to the old driver. In addition we must use the value
- * from FN 0 on the HPT374.
- */
-
- if (chip_table == &hpt374) {
- freq = hpt374_read_freq(dev);
- if (freq == 0)
- return -ENODEV;
- } else
- freq = inl(iobase + 0x90);
-
- if ((freq >> 12) != 0xABCDE) {
- int i;
- u16 sr;
- u32 total = 0;
-
- dev_warn(&dev->dev, "BIOS has not set timing clocks\n");
-
- /* This is the process the HPT371 BIOS is reported to use */
- for (i = 0; i < 128; i++) {
- pci_read_config_word(dev, 0x78, &sr);
- total += sr & 0x1FF;
- udelay(15);
- }
- freq = total / 128;
- }
- freq &= 0x1FF;
+ freq = hpt37x_pci_clock(dev, chip_table->base);
+ if (!freq)
+ return -ENODEV;
/*
* Turn the frequency check into a band and then find a timing
* table to match it.
*/
- clock_slot = hpt37x_clock_slot(freq, chip_table->base);
+ clock_slot = hpt37x_clock_slot(freq);
if (chip_table->clocks[clock_slot] == NULL || prefer_dpll) {
/*
* We need to try PLL mode instead
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 1f6afd8ee29b..617c95522f43 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -24,7 +24,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.18"
+#define DRV_VERSION "0.3.19"
enum {
PCI66 = (1 << 1),
@@ -113,7 +113,7 @@ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
* The Marvell bridge chips used on the HighPoint SATA cards do not seem
* to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
*/
-static unsigned long hpt372n_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int hpt372n_filter(struct ata_device *adev, unsigned int mask)
{
if (ata_id_is_sata(adev->id))
mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
@@ -403,17 +403,20 @@ static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
return 0;
}
-static int hpt3x2n_pci_clock(struct pci_dev *pdev)
+static int hpt3x2n_pci_clock(struct pci_dev *pdev, unsigned int base)
{
- unsigned long freq;
+ unsigned int freq;
u32 fcnt;
- unsigned long iobase = pci_resource_start(pdev, 4);
- fcnt = inl(iobase + 0x90); /* Not PCI readable for some chips */
+ /*
+ * Some devices do not let this value be accessed via PCI space
+ * according to the old driver.
+ */
+ fcnt = inl(pci_resource_start(pdev, 4) + 0x90);
if ((fcnt >> 12) != 0xABCDE) {
+ u32 total = 0;
int i;
u16 sr;
- u32 total = 0;
dev_warn(&pdev->dev, "BIOS clock data not set\n");
@@ -427,7 +430,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
}
fcnt &= 0x1FF;
- freq = (fcnt * 77) / 192;
+ freq = (fcnt * base) / 192; /* in MHz */
/* Clamp to bands */
if (freq < 40)
@@ -559,7 +562,7 @@ hpt372n:
* 50 for UDMA100. Right now we always use 66
*/
- pci_mhz = hpt3x2n_pci_clock(dev);
+ pci_mhz = hpt3x2n_pci_clock(dev, 77);
f_low = (pci_mhz * 48) / 66; /* PCI Mhz for 66Mhz DPLL */
f_high = f_low + 2; /* Tolerance */
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 42798402cf63..bfea2be2959a 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1028,7 +1028,7 @@ static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
}
i++;
}
- dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n",
+ dev_dbg(priv->dev, "Supported masks: PIO=%x, MWDMA=%x, UDMA=%x\n",
pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
}
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 03b6ae37a578..6559b606736d 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -683,7 +683,7 @@ static int mpc52xx_ata_probe(struct platform_device *op)
struct bcom_task *dmatsk;
/* Get ipb frequency */
- ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
if (!ipb_freq) {
dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 4fbb3eed8b0b..4191aa61c8e4 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -57,7 +57,7 @@ static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline);
static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
-static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask);
+static unsigned int pdc2027x_mode_filter(struct ata_device *adev, unsigned int mask);
static int pdc2027x_cable_detect(struct ata_port *ap);
static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed);
@@ -251,7 +251,7 @@ static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline)
* Block UDMA on devices that cause trouble with this controller.
*/
-static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int pdc2027x_mode_filter(struct ata_device *adev, unsigned int mask)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
struct ata_device *pair = ata_dev_pair(adev);
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index e410fe44177f..c0bc4af0d196 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -150,7 +150,7 @@ static u8 serverworks_is_csb(struct pci_dev *pdev)
* bug we hit.
*/
-static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int serverworks_osb4_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA)
mask &= ~ATA_MASK_UDMA;
@@ -166,7 +166,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l
* Check the blacklist and disable UDMA5 if matched
*/
-static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int serverworks_csb_filter(struct ata_device *adev, unsigned int mask)
{
const char *p;
char model_num[ATA_ID_PROD_LEN + 1];
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index b5b764e18adf..92e4cf05de2c 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -525,7 +525,7 @@ static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
* Block UDMA6 on devices that do not support it.
*/
-static unsigned long sis_133_mode_filter(struct ata_device *adev, unsigned long mask)
+static unsigned int sis_133_mode_filter(struct ata_device *adev, unsigned int mask)
{
struct ata_port *ap = adev->link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 215c02d4056a..34f00f389932 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -352,7 +352,7 @@ static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
* one breed of Transcend SSD. Return the updated mask.
*/
-static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
+static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
{
struct ata_host *host = dev->link->ap->host;
const struct via_isa_bridge *config = host->private_data;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index de5bd02cad44..e3cff01201b8 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4057,7 +4057,7 @@ static int mv_platform_probe(struct platform_device *pdev)
/*
* Simple resource validation ..
*/
- if (unlikely(pdev->num_resources != 2)) {
+ if (unlikely(pdev->num_resources != 1)) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 17f44abc9418..ad91cc6a34fc 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -780,14 +780,11 @@ static int he_init_group(struct he_dev *he_dev, int group)
G0_RBPS_BS + (group * 32));
/* bitmap table */
- he_dev->rbpl_table = kmalloc_array(BITS_TO_LONGS(RBPL_TABLE_SIZE),
- sizeof(*he_dev->rbpl_table),
- GFP_KERNEL);
+ he_dev->rbpl_table = bitmap_zalloc(RBPL_TABLE_SIZE, GFP_KERNEL);
if (!he_dev->rbpl_table) {
hprintk("unable to allocate rbpl bitmap table\n");
return -ENOMEM;
}
- bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
/* rbpl_virt 64-bit pointers */
he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
@@ -902,7 +899,7 @@ out_destroy_rbpl_pool:
out_free_rbpl_virt:
kfree(he_dev->rbpl_virt);
out_free_rbpl_table:
- kfree(he_dev->rbpl_table);
+ bitmap_free(he_dev->rbpl_table);
return -ENOMEM;
}
@@ -1578,7 +1575,7 @@ he_stop(struct he_dev *he_dev)
}
kfree(he_dev->rbpl_virt);
- kfree(he_dev->rbpl_table);
+ bitmap_free(he_dev->rbpl_table);
dma_pool_destroy(he_dev->rbpl_pool);
if (he_dev->rbrq_base)
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 81ce81a75fc6..681cb3786794 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3752,6 +3752,7 @@ static void __exit idt77252_exit(void)
card = idt77252_chain;
dev = card->atmdev;
idt77252_chain = card->next;
+ del_timer_sync(&card->tst_timer);
if (dev->phy->stop)
dev->phy->stop(dev);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 3e726ee91fdc..324148686953 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -739,7 +739,7 @@ static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
u32 t;
int i;
/*
- * Read the first bit that was clocked with the falling edge of the
+ * Read the first bit that was clocked with the falling edge of
* the last command data clock
*/
NVRAM_CMD(IAREAD + addr);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 579c851a2bd7..0424b59b695e 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
@@ -496,7 +497,7 @@ static int __init get_cpu_for_node(struct device_node *node)
}
static int __init parse_core(struct device_node *core, int package_id,
- int core_id)
+ int cluster_id, int core_id)
{
char name[20];
bool leaf = true;
@@ -512,6 +513,7 @@ static int __init parse_core(struct device_node *core, int package_id,
cpu = get_cpu_for_node(t);
if (cpu >= 0) {
cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
} else if (cpu != -ENODEV) {
@@ -533,6 +535,7 @@ static int __init parse_core(struct device_node *core, int package_id,
}
cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
} else if (leaf && cpu != -ENODEV) {
pr_err("%pOF: Can't get CPU for leaf core\n", core);
@@ -542,13 +545,13 @@ static int __init parse_core(struct device_node *core, int package_id,
return 0;
}
-static int __init parse_cluster(struct device_node *cluster, int depth)
+static int __init parse_cluster(struct device_node *cluster, int package_id,
+ int cluster_id, int depth)
{
char name[20];
bool leaf = true;
bool has_cores = false;
struct device_node *c;
- static int package_id __initdata;
int core_id = 0;
int i, ret;
@@ -563,7 +566,9 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
c = of_get_child_by_name(cluster, name);
if (c) {
leaf = false;
- ret = parse_cluster(c, depth + 1);
+ ret = parse_cluster(c, package_id, i, depth + 1);
+ if (depth > 0)
+ pr_warn("Topology for clusters of clusters not yet supported\n");
of_node_put(c);
if (ret != 0)
return ret;
@@ -587,7 +592,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
}
if (leaf) {
- ret = parse_core(c, package_id, core_id++);
+ ret = parse_core(c, package_id, cluster_id,
+ core_id++);
} else {
pr_err("%pOF: Non-leaf cluster with core %s\n",
cluster, name);
@@ -604,10 +610,33 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
if (leaf && !has_cores)
pr_warn("%pOF: empty cluster\n", cluster);
- if (leaf)
+ return 0;
+}
+
+static int __init parse_socket(struct device_node *socket)
+{
+ char name[20];
+ struct device_node *c;
+ bool has_socket = false;
+ int package_id = 0, ret;
+
+ do {
+ snprintf(name, sizeof(name), "socket%d", package_id);
+ c = of_get_child_by_name(socket, name);
+ if (c) {
+ has_socket = true;
+ ret = parse_cluster(c, package_id, -1, 0);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
package_id++;
+ } while (c);
- return 0;
+ if (!has_socket)
+ ret = parse_cluster(socket, 0, -1, 0);
+
+ return ret;
}
static int __init parse_dt_topology(void)
@@ -630,7 +659,7 @@ static int __init parse_dt_topology(void)
if (!map)
goto out;
- ret = parse_cluster(map, 0);
+ ret = parse_socket(map);
if (ret != 0)
goto out_map;
@@ -641,8 +670,10 @@ static int __init parse_dt_topology(void)
* only mark cores described in the DT as possible.
*/
for_each_possible_cpu(cpu)
- if (cpu_topology[cpu].package_id == -1)
+ if (cpu_topology[cpu].package_id < 0) {
ret = -EINVAL;
+ break;
+ }
out_map:
of_node_put(map);
@@ -667,7 +698,8 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
/* not numa in package, lets use the package siblings */
core_mask = &cpu_topology[cpu].core_sibling;
}
- if (cpu_topology[cpu].llc_id != -1) {
+
+ if (last_level_cache_is_valid(cpu)) {
if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
core_mask = &cpu_topology[cpu].llc_sibling;
}
@@ -686,19 +718,31 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
const struct cpumask *cpu_clustergroup_mask(int cpu)
{
+ /*
+ * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
+ * cpu_coregroup_mask().
+ */
+ if (cpumask_subset(cpu_coregroup_mask(cpu),
+ &cpu_topology[cpu].cluster_sibling))
+ return get_cpu_mask(cpu);
+
return &cpu_topology[cpu].cluster_sibling;
}
void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
- int cpu;
+ int cpu, ret;
+
+ ret = detect_cache_attributes(cpuid);
+ if (ret)
+ pr_info("Early cacheinfo failed, ret = %d\n", ret);
/* update core and thread sibling masks */
for_each_online_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
- if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) {
+ if (last_level_cache_is_shared(cpu, cpuid)) {
cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
}
@@ -706,15 +750,17 @@ void update_siblings_masks(unsigned int cpuid)
if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
- if (cpuid_topo->cluster_id == cpu_topo->cluster_id &&
- cpuid_topo->cluster_id != -1) {
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+ continue;
+
+ if (cpuid_topo->cluster_id >= 0) {
cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
}
- cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
-
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
@@ -750,7 +796,6 @@ void __init reset_cpu_topology(void)
cpu_topo->core_id = -1;
cpu_topo->cluster_id = -1;
cpu_topo->package_id = -1;
- cpu_topo->llc_id = -1;
clear_cpu_topology(cpu);
}
@@ -780,15 +825,20 @@ __weak int __init parse_acpi_topology(void)
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
+ int ret;
+
reset_cpu_topology();
+ ret = parse_acpi_topology();
+ if (!ret)
+ ret = of_have_populated_dt() && parse_dt_topology();
- /*
- * Discard anything that was parsed if we hit an error so we
- * don't use partial information.
- */
- if (parse_acpi_topology())
- reset_cpu_topology();
- else if (of_have_populated_dt() && parse_dt_topology())
+ if (ret) {
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
reset_cpu_topology();
+ return;
+ }
}
#endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index ab71403d102f..b3a43a164dcd 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -160,6 +160,7 @@ extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
extern void deferred_probe_extend_timeout(void);
+extern void driver_deferred_probe_trigger(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index dad296229161..4b5cd08c5a65 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -14,7 +14,7 @@
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/init.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -25,19 +25,60 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
+#define per_cpu_cacheinfo_idx(cpu, idx) \
+ (per_cpu_cacheinfo(cpu) + (idx))
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
{
return ci_cacheinfo(cpu);
}
-#ifdef CONFIG_OF
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
+ /*
+ * For non DT/ACPI systems, assume unique level 1 caches,
+ * system-wide shared caches for all other levels. This will be used
+ * only if arch specific code has not populated shared_cpu_map
+ */
+ if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
+ return !(this_leaf->level == 1);
+
+ if ((sib_leaf->attributes & CACHE_ID) &&
+ (this_leaf->attributes & CACHE_ID))
+ return sib_leaf->id == this_leaf->id;
+
return sib_leaf->fw_token == this_leaf->fw_token;
}
+bool last_level_cache_is_valid(unsigned int cpu)
+{
+ struct cacheinfo *llc;
+
+ if (!cache_leaves(cpu))
+ return false;
+
+ llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+
+ return (llc->attributes & CACHE_ID) || !!llc->fw_token;
+
+}
+
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
+{
+ struct cacheinfo *llc_x, *llc_y;
+
+ if (!last_level_cache_is_valid(cpu_x) ||
+ !last_level_cache_is_valid(cpu_y))
+ return false;
+
+ llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
+ llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
+
+ return cache_leaves_are_shared(llc_x, llc_y);
+}
+
+#ifdef CONFIG_OF
/* OF properties to query for a given cache type */
struct cache_type_info {
const char *size_prop;
@@ -157,27 +198,16 @@ static int cache_setup_of_node(unsigned int cpu)
{
struct device_node *np;
struct cacheinfo *this_leaf;
- struct device *cpu_dev = get_cpu_device(cpu);
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
unsigned int index = 0;
- /* skip if fw_token is already populated */
- if (this_cpu_ci->info_list->fw_token) {
- return 0;
- }
-
- if (!cpu_dev) {
- pr_err("No cpu device for CPU %d\n", cpu);
- return -ENODEV;
- }
- np = cpu_dev->of_node;
+ np = of_cpu_device_node_get(cpu);
if (!np) {
pr_err("Failed to find cpu%d device node\n", cpu);
return -ENOENT;
}
while (index < cache_leaves(cpu)) {
- this_leaf = this_cpu_ci->info_list + index;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
if (this_leaf->level != 1)
np = of_find_next_cache_node(np);
else
@@ -196,16 +226,6 @@ static int cache_setup_of_node(unsigned int cpu)
}
#else
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
-static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
- struct cacheinfo *sib_leaf)
-{
- /*
- * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
- * shared caches for all other levels. This will be used only if
- * arch specific code has not populated shared_cpu_map
- */
- return !(this_leaf->level == 1);
-}
#endif
int __weak cache_setup_acpi(unsigned int cpu)
@@ -215,6 +235,18 @@ int __weak cache_setup_acpi(unsigned int cpu)
unsigned int coherency_max_size;
+static int cache_setup_properties(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (of_have_populated_dt())
+ ret = cache_setup_of_node(cpu);
+ else if (!acpi_disabled)
+ ret = cache_setup_acpi(cpu);
+
+ return ret;
+}
+
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -225,21 +257,21 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (this_cpu_ci->cpu_map_populated)
return 0;
- if (of_have_populated_dt())
- ret = cache_setup_of_node(cpu);
- else if (!acpi_disabled)
- ret = cache_setup_acpi(cpu);
-
- if (ret)
- return ret;
+ /*
+ * skip setting up cache properties if LLC is valid, just need
+ * to update the shared cpu_map if the cache attributes were
+ * populated early before all the cpus are brought online
+ */
+ if (!last_level_cache_is_valid(cpu)) {
+ ret = cache_setup_properties(cpu);
+ if (ret)
+ return ret;
+ }
for (index = 0; index < cache_leaves(cpu); index++) {
unsigned int i;
- this_leaf = this_cpu_ci->info_list + index;
- /* skip if shared_cpu_map is already populated */
- if (!cpumask_empty(&this_leaf->shared_cpu_map))
- continue;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
for_each_online_cpu(i) {
@@ -247,7 +279,8 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
- sib_leaf = sib_cpu_ci->info_list + index;
+
+ sib_leaf = per_cpu_cacheinfo_idx(i, index);
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
@@ -263,23 +296,19 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int sibling, index;
for (index = 0; index < cache_leaves(cpu); index++) {
- this_leaf = this_cpu_ci->info_list + index;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
- struct cpu_cacheinfo *sib_cpu_ci;
-
- if (sibling == cpu) /* skip itself */
- continue;
+ struct cpu_cacheinfo *sib_cpu_ci =
+ get_cpu_cacheinfo(sibling);
- sib_cpu_ci = get_cpu_cacheinfo(sibling);
- if (!sib_cpu_ci->info_list)
- continue;
+ if (sibling == cpu || !sib_cpu_ci->info_list)
+ continue;/* skip if itself or no cacheinfo */
- sib_leaf = sib_cpu_ci->info_list + index;
+ sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
}
@@ -310,17 +339,28 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
-static int detect_cache_attributes(unsigned int cpu)
+int detect_cache_attributes(unsigned int cpu)
{
int ret;
+ /* Since early detection of the cacheinfo is allowed via this
+ * function and this also gets called as CPU hotplug callbacks via
+ * cacheinfo_cpu_online, the initialisation can be skipped and only
+ * CPU maps can be updated as the CPU online status would be update
+ * if called via cacheinfo_cpu_online path.
+ */
+ if (per_cpu_cacheinfo(cpu))
+ goto update_cpu_map;
+
if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_KERNEL);
- if (per_cpu_cacheinfo(cpu) == NULL)
+ sizeof(struct cacheinfo), GFP_ATOMIC);
+ if (per_cpu_cacheinfo(cpu) == NULL) {
+ cache_leaves(cpu) = 0;
return -ENOMEM;
+ }
/*
* populate_cache_leaves() may completely setup the cache leaves and
@@ -329,6 +369,8 @@ static int detect_cache_attributes(unsigned int cpu)
ret = populate_cache_leaves(cpu);
if (ret)
goto free_ci;
+
+update_cpu_map:
/*
* For systems using DT for cache hierarchy, fw_token
* and shared_cpu_map will be set up here only if they are
@@ -614,7 +656,6 @@ static int cache_add_dev(unsigned int cpu)
int rc;
struct device *ci_dev, *parent;
struct cacheinfo *this_leaf;
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
const struct attribute_group **cache_groups;
rc = cpu_cache_sysfs_init(cpu);
@@ -623,7 +664,7 @@ static int cache_add_dev(unsigned int cpu)
parent = per_cpu_cache_dev(cpu);
for (i = 0; i < cache_leaves(cpu); i++) {
- this_leaf = this_cpu_ci->info_list + i;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, i);
if (this_leaf->disable_sysfs)
continue;
if (this_leaf->type == CACHE_TYPE_NOCACHE)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 460d6f163e41..753e7cca0f40 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -54,6 +54,7 @@ static unsigned int defer_sync_state_count = 1;
static DEFINE_MUTEX(fwnode_link_lock);
static bool fw_devlink_is_permissive(void);
static bool fw_devlink_drv_reg_done;
+static bool fw_devlink_best_effort;
/**
* fwnode_link_add - Create a link between two fwnode_handles.
@@ -976,6 +977,12 @@ static void device_links_missing_supplier(struct device *dev)
}
}
+static bool dev_is_best_effort(struct device *dev)
+{
+ return (fw_devlink_best_effort && dev->can_match) ||
+ (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
+}
+
/**
* device_links_check_suppliers - Check presence of supplier drivers.
* @dev: Consumer device.
@@ -995,7 +1002,7 @@ static void device_links_missing_supplier(struct device *dev)
int device_links_check_suppliers(struct device *dev)
{
struct device_link *link;
- int ret = 0;
+ int ret = 0, fwnode_ret = 0;
struct fwnode_handle *sup_fw;
/*
@@ -1008,12 +1015,17 @@ int device_links_check_suppliers(struct device *dev)
sup_fw = list_first_entry(&dev->fwnode->suppliers,
struct fwnode_link,
c_hook)->supplier;
- dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",
- sup_fw);
- mutex_unlock(&fwnode_link_lock);
- return -EPROBE_DEFER;
+ if (!dev_is_best_effort(dev)) {
+ fwnode_ret = -EPROBE_DEFER;
+ dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for supplier %pfwP\n", sup_fw);
+ } else {
+ fwnode_ret = -EAGAIN;
+ }
}
mutex_unlock(&fwnode_link_lock);
+ if (fwnode_ret == -EPROBE_DEFER)
+ return fwnode_ret;
device_links_write_lock();
@@ -1023,6 +1035,14 @@ int device_links_check_suppliers(struct device *dev)
if (link->status != DL_STATE_AVAILABLE &&
!(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
+
+ if (dev_is_best_effort(dev) &&
+ link->flags & DL_FLAG_INFERRED &&
+ !link->supplier->can_match) {
+ ret = -EAGAIN;
+ continue;
+ }
+
device_links_missing_supplier(dev);
dev_err_probe(dev, -EPROBE_DEFER,
"supplier %s not ready\n",
@@ -1035,7 +1055,8 @@ int device_links_check_suppliers(struct device *dev)
dev->links.status = DL_DEV_PROBING;
device_links_write_unlock();
- return ret;
+
+ return ret ? ret : fwnode_ret;
}
/**
@@ -1300,6 +1321,18 @@ void device_links_driver_bound(struct device *dev)
* save to drop the managed link completely.
*/
device_link_drop_managed(link);
+ } else if (dev_is_best_effort(dev) &&
+ link->flags & DL_FLAG_INFERRED &&
+ link->status != DL_STATE_CONSUMER_PROBE &&
+ !link->supplier->can_match) {
+ /*
+ * When dev_is_best_effort() is true, we ignore device
+ * links to suppliers that don't have a driver. If the
+ * consumer device still managed to probe, there's no
+ * point in maintaining a device link in a weird state
+ * (consumer probed before supplier). So delete it.
+ */
+ device_link_drop_managed(link);
} else {
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
@@ -1592,7 +1625,7 @@ static int __init fw_devlink_setup(char *arg)
}
early_param("fw_devlink", fw_devlink_setup);
-static bool fw_devlink_strict;
+static bool fw_devlink_strict = true;
static int __init fw_devlink_strict_setup(char *arg)
{
return strtobool(arg, &fw_devlink_strict);
@@ -1666,6 +1699,62 @@ void fw_devlink_drivers_done(void)
device_links_write_unlock();
}
+/**
+ * wait_for_init_devices_probe - Try to probe any device needed for init
+ *
+ * Some devices might need to be probed and bound successfully before the kernel
+ * boot sequence can finish and move on to init/userspace. For example, a
+ * network interface might need to be bound to be able to mount a NFS rootfs.
+ *
+ * With fw_devlink=on by default, some of these devices might be blocked from
+ * probing because they are waiting on a optional supplier that doesn't have a
+ * driver. While fw_devlink will eventually identify such devices and unblock
+ * the probing automatically, it might be too late by the time it unblocks the
+ * probing of devices. For example, the IP4 autoconfig might timeout before
+ * fw_devlink unblocks probing of the network interface.
+ *
+ * This function is available to temporarily try and probe all devices that have
+ * a driver even if some of their suppliers haven't been added or don't have
+ * drivers.
+ *
+ * The drivers can then decide which of the suppliers are optional vs mandatory
+ * and probe the device if possible. By the time this function returns, all such
+ * "best effort" probes are guaranteed to be completed. If a device successfully
+ * probes in this mode, we delete all fw_devlink discovered dependencies of that
+ * device where the supplier hasn't yet probed successfully because they have to
+ * be optional dependencies.
+ *
+ * Any devices that didn't successfully probe go back to being treated as if
+ * this function was never called.
+ *
+ * This also means that some devices that aren't needed for init and could have
+ * waited for their optional supplier to probe (when the supplier's module is
+ * loaded later on) would end up probing prematurely with limited functionality.
+ * So call this function only when boot would fail without it.
+ */
+void __init wait_for_init_devices_probe(void)
+{
+ if (!fw_devlink_flags || fw_devlink_is_permissive())
+ return;
+
+ /*
+ * Wait for all ongoing probes to finish so that the "best effort" is
+ * only applied to devices that can't probe otherwise.
+ */
+ wait_for_device_probe();
+
+ pr_info("Trying to probe devices needed for running init ...\n");
+ fw_devlink_best_effort = true;
+ driver_deferred_probe_trigger();
+
+ /*
+ * Wait for all "best effort" probes to finish before going back to
+ * normal enforcement.
+ */
+ wait_for_device_probe();
+ fw_devlink_best_effort = false;
+}
+
static void fw_devlink_unblock_consumers(struct device *dev)
{
struct device_link *link;
@@ -3843,6 +3932,26 @@ struct device *device_find_child_by_name(struct device *parent,
}
EXPORT_SYMBOL_GPL(device_find_child_by_name);
+static int match_any(struct device *dev, void *unused)
+{
+ return 1;
+}
+
+/**
+ * device_find_any_child - device iterator for locating a child device, if any.
+ * @parent: parent struct device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a child device, if any.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+struct device *device_find_any_child(struct device *parent)
+{
+ return device_find_child(parent, NULL, match_any);
+}
+EXPORT_SYMBOL_GPL(device_find_any_child);
+
int __init devices_init(void)
{
devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index a97776ea9d99..4c98849577d4 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -570,6 +570,12 @@ ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
return sysfs_emit(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -580,6 +586,7 @@ static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -592,6 +599,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
&dev_attr_mmio_stale_data.attr,
+ &dev_attr_retbleed.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 11b0fb6414d3..70f79fc71539 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -172,7 +172,7 @@ static bool driver_deferred_probe_enable;
* changes in the midst of a probe, then deferred processing should be triggered
* again.
*/
-static void driver_deferred_probe_trigger(void)
+void driver_deferred_probe_trigger(void)
{
if (!driver_deferred_probe_enable)
return;
@@ -256,7 +256,12 @@ static int deferred_devs_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(deferred_devs);
+#ifdef CONFIG_MODULES
+int driver_deferred_probe_timeout = 10;
+#else
int driver_deferred_probe_timeout;
+#endif
+
EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
static int __init deferred_probe_timeout_setup(char *str)
@@ -269,42 +274,12 @@ static int __init deferred_probe_timeout_setup(char *str)
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
-/**
- * driver_deferred_probe_check_state() - Check deferred probe state
- * @dev: device to check
- *
- * Return:
- * * -ENODEV if initcalls have completed and modules are disabled.
- * * -ETIMEDOUT if the deferred probe timeout was set and has expired
- * and modules are enabled.
- * * -EPROBE_DEFER in other cases.
- *
- * Drivers or subsystems can opt-in to calling this function instead of directly
- * returning -EPROBE_DEFER.
- */
-int driver_deferred_probe_check_state(struct device *dev)
-{
- if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
- dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
- return -ENODEV;
- }
-
- if (!driver_deferred_probe_timeout && initcalls_done) {
- dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
- return -ETIMEDOUT;
- }
-
- return -EPROBE_DEFER;
-}
-EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
-
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
struct device_private *p;
fw_devlink_drivers_done();
- driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
@@ -580,7 +555,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
{
bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
!drv->suppress_bind_attrs;
- int ret;
+ int ret, link_ret;
if (defer_all_probes) {
/*
@@ -592,9 +567,9 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return -EPROBE_DEFER;
}
- ret = device_links_check_suppliers(dev);
- if (ret)
- return ret;
+ link_ret = device_links_check_suppliers(dev);
+ if (link_ret == -EPROBE_DEFER)
+ return link_ret;
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@@ -634,6 +609,15 @@ re_probe:
ret = call_driver_probe(dev, drv);
if (ret) {
/*
+ * If fw_devlink_best_effort is active (denoted by -EAGAIN), the
+ * device might actually probe properly once some of its missing
+ * suppliers have probed. So, treat this as if the driver
+ * returned -EPROBE_DEFER.
+ */
+ if (link_ret == -EAGAIN)
+ ret = -EPROBE_DEFER;
+
+ /*
* Return probe errors as positive values so that the callers
* can distinguish them from other errors.
*/
@@ -1115,6 +1099,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
+ bool async = false;
int ret;
/*
@@ -1153,9 +1138,11 @@ static int __driver_attach(struct device *dev, void *data)
if (!dev->driver && !dev->p->async_driver) {
get_device(dev);
dev->p->async_driver = drv;
- async_schedule_dev(__driver_attach_async_helper, dev);
+ async = true;
}
device_unlock(dev);
+ if (async)
+ async_schedule_dev(__driver_attach_async_helper, dev);
return 0;
}
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 8a3ddbae3b70..e4bffeabf344 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -482,6 +482,7 @@ int __init devtmpfs_init(void)
if (err) {
printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
unregister_filesystem(&dev_fs_type);
+ thread = NULL;
return err;
}
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index ac3f34e80194..7c3590fd97c2 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -435,11 +435,11 @@ static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
/* decompress onto the new allocated page */
page = fw_priv->pages[fw_priv->nr_pages - 1];
- xz_buf.out = kmap(page);
+ xz_buf.out = kmap_local_page(page);
xz_buf.out_pos = 0;
xz_buf.out_size = PAGE_SIZE;
xz_ret = xz_dec_run(xz_dec, &xz_buf);
- kunmap(page);
+ kunmap_local(xz_buf.out);
fw_priv->size += xz_buf.out_pos;
/* partial decompression means either end or error */
if (xz_buf.out_pos != PAGE_SIZE)
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index 5b0b85b70b6f..77bad32c481a 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -242,19 +242,17 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
loff_t offset, size_t count, bool read)
{
while (count) {
- void *page_data;
int page_nr = offset >> PAGE_SHIFT;
int page_ofs = offset & (PAGE_SIZE - 1);
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
- page_data = kmap(fw_priv->pages[page_nr]);
-
if (read)
- memcpy(buffer, page_data + page_ofs, page_cnt);
+ memcpy_from_page(buffer, fw_priv->pages[page_nr],
+ page_ofs, page_cnt);
else
- memcpy(page_data + page_ofs, buffer, page_cnt);
+ memcpy_to_page(fw_priv->pages[page_nr], page_ofs,
+ buffer, page_cnt);
- kunmap(fw_priv->pages[page_nr]);
buffer += page_cnt;
offset += page_cnt;
count -= page_cnt;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 0ac6376ef7a1..eb0f43784c2b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -45,7 +45,7 @@ static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpumap, 0);
+static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
@@ -66,7 +66,7 @@ static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpulist, 0);
+static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
/**
* struct node_access_nodes - Access class device to hold user visible
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 739e52cd4aba..5a2e0232862e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -222,6 +222,9 @@ static void genpd_debug_remove(struct generic_pm_domain *genpd)
{
struct dentry *d;
+ if (!genpd_debugfs_dir)
+ return;
+
d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
debugfs_remove(d);
}
@@ -2730,7 +2733,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return driver_deferred_probe_check_state(base_dev);
+ return -ENODEV;
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 949907e2e242..997be3ac20a7 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1862,10 +1862,13 @@ int pm_runtime_force_suspend(struct device *dev)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+ dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
if (ret)
goto err;
+ dev_pm_enable_wake_irq_complete(dev);
+
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
@@ -1882,6 +1885,7 @@ int pm_runtime_force_suspend(struct device *dev)
return 0;
err:
+ dev_pm_disable_wake_irq_check(dev, true);
pm_runtime_enable(dev);
return ret;
}
@@ -1915,9 +1919,11 @@ int pm_runtime_force_resume(struct device *dev)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
+ dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
+ dev_pm_enable_wake_irq_check(dev, false);
goto out;
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 11a4ffe91367..e3befa2c1b66 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -501,36 +501,6 @@ void device_set_wakeup_capable(struct device *dev, bool capable)
EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
/**
- * device_init_wakeup - Device wakeup initialization.
- * @dev: Device to handle.
- * @enable: Whether or not to enable @dev as a wakeup device.
- *
- * By default, most devices should leave wakeup disabled. The exceptions are
- * devices that everyone expects to be wakeup sources: keyboards, power buttons,
- * possibly network interfaces, etc. Also, devices that don't generate their
- * own wakeup requests but merely forward requests from one bus to another
- * (like PCI bridges) should have wakeup enabled by default.
- */
-int device_init_wakeup(struct device *dev, bool enable)
-{
- int ret = 0;
-
- if (!dev)
- return -EINVAL;
-
- if (enable) {
- device_set_wakeup_capable(dev, true);
- ret = device_wakeup_enable(dev);
- } else {
- device_wakeup_disable(dev);
- device_set_wakeup_capable(dev, false);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(device_init_wakeup);
-
-/**
* device_set_wakeup_enable - Enable or disable a device to wake up the system.
* @dev: Device to handle.
* @enable: enable/disable flag
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d0f5bc827978..362e043e26d8 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -133,6 +133,12 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
return -EINVAL;
}
+ if (config->num_reg_defaults && !config->reg_defaults) {
+ dev_err(map->dev,
+ "Register defaults number are set without the reg!\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < config->num_reg_defaults; i++)
if (config->reg_defaults[i].reg % map->reg_stride)
return -EINVAL;
@@ -495,7 +501,8 @@ EXPORT_SYMBOL_GPL(regcache_drop_region);
void regcache_cache_only(struct regmap *map, bool enable)
{
map->lock(map->lock_arg);
- WARN_ON(map->cache_bypass && enable);
+ WARN_ON(map->cache_type != REGCACHE_NONE &&
+ map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map, enable);
map->unlock(map->lock_arg);
@@ -531,7 +538,7 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
* @enable: flag if changes should not be written to the cache
*
* When a register map is marked with the cache bypass option, writes
- * to the register map API will only update the hardware and not the
+ * to the register map API will only update the hardware and not
* the cache directly. This is useful when syncing the cache back to
* the hardware.
*/
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index a6db605707b0..4ef9488d05cd 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -30,6 +30,9 @@ struct regmap_irq_chip_data {
int irq;
int wake_count;
+ unsigned int mask_base;
+ unsigned int unmask_base;
+
void *status_reg_buf;
unsigned int *main_status_buf;
unsigned int *status_buf;
@@ -39,33 +42,15 @@ struct regmap_irq_chip_data {
unsigned int *type_buf;
unsigned int *type_buf_def;
unsigned int **virt_buf;
+ unsigned int **config_buf;
unsigned int irq_reg_stride;
- unsigned int type_reg_stride;
- bool clear_status:1;
-};
+ unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
-static int sub_irq_reg(struct regmap_irq_chip_data *data,
- unsigned int base_reg, int i)
-{
- const struct regmap_irq_chip *chip = data->chip;
- struct regmap *map = data->map;
- struct regmap_irq_sub_irq_map *subreg;
- unsigned int offset;
- int reg = 0;
-
- if (!chip->sub_reg_offsets || !chip->not_fixed_stride) {
- /* Assume linear mapping */
- reg = base_reg + (i * map->reg_stride * data->irq_reg_stride);
- } else {
- subreg = &chip->sub_reg_offsets[i];
- offset = subreg->offset[0];
- reg = base_reg + offset;
- }
-
- return reg;
-}
+ unsigned int clear_status:1;
+};
static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
@@ -74,21 +59,25 @@ struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
return &data->chip->irqs[irq];
}
-static void regmap_irq_lock(struct irq_data *data)
+static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data)
{
- struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = data->map;
- mutex_lock(&d->lock);
+ /*
+ * While possible that a user-defined ->get_irq_reg() callback might
+ * be linear enough to support bulk reads, most of the time it won't.
+ * Therefore only allow them if the default callback is being used.
+ */
+ return data->irq_reg_stride == 1 && map->reg_stride == 1 &&
+ data->get_irq_reg == regmap_irq_get_irq_reg_linear &&
+ !map->use_single_read;
}
-static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
- unsigned int reg, unsigned int mask,
- unsigned int val)
+static void regmap_irq_lock(struct irq_data *data)
{
- if (d->chip->mask_writeonly)
- return regmap_write_bits(d->map, reg, mask, val);
- else
- return regmap_update_bits(d->map, reg, mask, val);
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&d->lock);
}
static void regmap_irq_sync_unlock(struct irq_data *data)
@@ -97,7 +86,6 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
struct regmap *map = d->map;
int i, j, ret;
u32 reg;
- u32 unmask_offset;
u32 val;
if (d->chip->runtime_pm) {
@@ -109,7 +97,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (d->clear_status) {
for (i = 0; i < d->chip->num_regs; i++) {
- reg = sub_irq_reg(d, d->chip->status_base, i);
+ reg = d->get_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &val);
if (ret)
@@ -126,44 +114,32 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* suppress pointless writes.
*/
for (i = 0; i < d->chip->num_regs; i++) {
- if (!d->chip->mask_base)
- continue;
+ if (d->mask_base) {
+ reg = d->get_irq_reg(d, d->mask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], d->mask_buf[i]);
+ if (ret)
+ dev_err(d->map->dev, "Failed to sync masks in %x\n",
+ reg);
+ }
- reg = sub_irq_reg(d, d->chip->mask_base, i);
- if (d->chip->mask_invert) {
- ret = regmap_irq_update_bits(d, reg,
- d->mask_buf_def[i], ~d->mask_buf[i]);
- } else if (d->chip->unmask_base) {
- /* set mask with mask_base register */
- ret = regmap_irq_update_bits(d, reg,
+ if (d->unmask_base) {
+ reg = d->get_irq_reg(d, d->unmask_base, i);
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], ~d->mask_buf[i]);
- if (ret < 0)
- dev_err(d->map->dev,
- "Failed to sync unmasks in %x\n",
+ if (ret)
+ dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
- unmask_offset = d->chip->unmask_base -
- d->chip->mask_base;
- /* clear mask with unmask_base register */
- ret = regmap_irq_update_bits(d,
- reg + unmask_offset,
- d->mask_buf_def[i],
- d->mask_buf[i]);
- } else {
- ret = regmap_irq_update_bits(d, reg,
- d->mask_buf_def[i], d->mask_buf[i]);
}
- if (ret != 0)
- dev_err(d->map->dev, "Failed to sync masks in %x\n",
- reg);
- reg = sub_irq_reg(d, d->chip->wake_base, i);
+ reg = d->get_irq_reg(d, d->chip->wake_base, i);
if (d->wake_buf) {
if (d->chip->wake_invert)
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
~d->wake_buf[i]);
else
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
d->wake_buf[i]);
if (ret != 0)
@@ -180,7 +156,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* it'll be ignored in irq handler, then may introduce irq storm
*/
if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
- reg = sub_irq_reg(d, d->chip->ack_base, i);
+ reg = d->get_irq_reg(d, d->chip->ack_base, i);
/* some chips ack by write 0 */
if (d->chip->ack_invert)
@@ -204,12 +180,12 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
for (i = 0; i < d->chip->num_type_reg; i++) {
if (!d->type_buf_def[i])
continue;
- reg = sub_irq_reg(d, d->chip->type_base, i);
+ reg = d->get_irq_reg(d, d->chip->type_base, i);
if (d->chip->type_invert)
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->type_buf_def[i], ~d->type_buf[i]);
else
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->type_buf_def[i], d->type_buf[i]);
if (ret != 0)
dev_err(d->map->dev, "Failed to sync type in %x\n",
@@ -220,8 +196,8 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (d->chip->num_virt_regs) {
for (i = 0; i < d->chip->num_virt_regs; i++) {
for (j = 0; j < d->chip->num_regs; j++) {
- reg = sub_irq_reg(d, d->chip->virt_reg_base[i],
- j);
+ reg = d->get_irq_reg(d, d->chip->virt_reg_base[i],
+ j);
ret = regmap_write(map, reg, d->virt_buf[i][j]);
if (ret != 0)
dev_err(d->map->dev,
@@ -231,6 +207,17 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
}
}
+ for (i = 0; i < d->chip->num_config_bases; i++) {
+ for (j = 0; j < d->chip->num_config_regs; j++) {
+ reg = d->get_irq_reg(d, d->chip->config_base[i], j);
+ ret = regmap_write(map, reg, d->config_buf[i][j]);
+ if (ret)
+ dev_err(d->map->dev,
+ "Failed to write config %x: %d\n",
+ reg, ret);
+ }
+ }
+
if (d->chip->runtime_pm)
pm_runtime_put(map->dev);
@@ -253,22 +240,19 @@ static void regmap_irq_enable(struct irq_data *data)
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
unsigned int reg = irq_data->reg_offset / map->reg_stride;
- unsigned int mask, type;
-
- type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
+ unsigned int mask;
/*
* The type_in_mask flag means that the underlying hardware uses
- * separate mask bits for rising and falling edge interrupts, but
- * we want to make them into a single virtual interrupt with
- * configurable edge.
+ * separate mask bits for each interrupt trigger type, but we want
+ * to have a single logical interrupt with a configurable type.
*
- * If the interrupt we're enabling defines the falling or rising
- * masks then instead of using the regular mask bits for this
- * interrupt, use the value previously written to the type buffer
- * at the corresponding offset in regmap_irq_set_type().
+ * If the interrupt we're enabling defines any supported types
+ * then instead of using the regular mask bits for this interrupt,
+ * use the value previously written to the type buffer at the
+ * corresponding offset in regmap_irq_set_type().
*/
- if (d->chip->type_in_mask && type)
+ if (d->chip->type_in_mask && irq_data->type.types_supported)
mask = d->type_buf[reg] & irq_data->mask;
else
mask = irq_data->mask;
@@ -293,7 +277,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
- int reg;
+ int reg, ret;
const struct regmap_irq_type *t = &irq_data->type;
if ((t->types_supported & type) != type)
@@ -333,9 +317,19 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
}
- if (d->chip->set_type_virt)
- return d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
- reg);
+ if (d->chip->set_type_virt) {
+ ret = d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
+ reg);
+ if (ret)
+ return ret;
+ }
+
+ if (d->chip->set_type_config) {
+ ret = d->chip->set_type_config(d->config_buf, type,
+ irq_data, reg);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -376,14 +370,17 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
struct regmap_irq_sub_irq_map *subreg;
+ unsigned int reg;
int i, ret = 0;
if (!chip->sub_reg_offsets) {
- /* Assume linear mapping */
- ret = regmap_read(map, chip->status_base +
- (b * map->reg_stride * data->irq_reg_stride),
- &data->status_buf[b]);
+ reg = data->get_irq_reg(data, chip->status_base, b);
+ ret = regmap_read(map, reg, &data->status_buf[b]);
} else {
+ /*
+ * Note we can't use ->get_irq_reg() here because the offsets
+ * in 'subreg' are *not* interchangeable with indices.
+ */
subreg = &chip->sub_reg_offsets[b];
for (i = 0; i < subreg->num_regs; i++) {
unsigned int offset = subreg->offset[i];
@@ -449,10 +446,18 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
* sake of simplicity. and add bulk reads only if needed
*/
for (i = 0; i < chip->num_main_regs; i++) {
- ret = regmap_read(map, chip->main_status +
- (i * map->reg_stride
- * data->irq_reg_stride),
- &data->main_status_buf[i]);
+ /*
+ * For not_fixed_stride, don't use ->get_irq_reg().
+ * It would produce an incorrect result.
+ */
+ if (data->chip->not_fixed_stride)
+ reg = chip->main_status +
+ i * map->reg_stride * data->irq_reg_stride;
+ else
+ reg = data->get_irq_reg(data,
+ chip->main_status, i);
+
+ ret = regmap_read(map, reg, &data->main_status_buf[i]);
if (ret) {
dev_err(map->dev,
"Failed to read IRQ status %d\n",
@@ -481,8 +486,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
}
}
- } else if (!map->use_single_read && map->reg_stride == 1 &&
- data->irq_reg_stride == 1) {
+ } else if (regmap_irq_can_bulk_read_status(data)) {
u8 *buf8 = data->status_reg_buf;
u16 *buf16 = data->status_reg_buf;
@@ -518,7 +522,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
} else {
for (i = 0; i < data->chip->num_regs; i++) {
- unsigned int reg = sub_irq_reg(data,
+ unsigned int reg = data->get_irq_reg(data,
data->chip->status_base, i);
ret = regmap_read(map, reg, &data->status_buf[i]);
@@ -546,7 +550,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_buf[i] &= ~data->mask_buf[i];
if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
- reg = sub_irq_reg(data, data->chip->ack_base, i);
+ reg = data->get_irq_reg(data, data->chip->ack_base, i);
if (chip->ack_invert)
ret = regmap_write(map, reg,
@@ -607,6 +611,91 @@ static const struct irq_domain_ops regmap_domain_ops = {
};
/**
+ * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback.
+ * @data: Data for the &struct regmap_irq_chip
+ * @base: Base register
+ * @index: Register index
+ *
+ * Returns the register address corresponding to the given @base and @index
+ * by the formula ``base + index * regmap_stride * irq_reg_stride``.
+ */
+unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
+ unsigned int base, int index)
+{
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+
+ /*
+ * FIXME: This is for backward compatibility and should be removed
+ * when not_fixed_stride is dropped (it's only used by qcom-pm8008).
+ */
+ if (chip->not_fixed_stride && chip->sub_reg_offsets) {
+ struct regmap_irq_sub_irq_map *subreg;
+
+ subreg = &chip->sub_reg_offsets[0];
+ return base + subreg->offset[0];
+ }
+
+ return base + index * map->reg_stride * data->irq_reg_stride;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear);
+
+/**
+ * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback.
+ * @buf: Buffer containing configuration register values, this is a 2D array of
+ * `num_config_bases` rows, each of `num_config_regs` elements.
+ * @type: The requested IRQ type.
+ * @irq_data: The IRQ being configured.
+ * @idx: Index of the irq's config registers within each array `buf[i]`
+ *
+ * This is a &struct regmap_irq_chip->set_type_config callback suitable for
+ * chips with one config register. Register values are updated according to
+ * the &struct regmap_irq_type data associated with an IRQ.
+ */
+int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data, int idx)
+{
+ const struct regmap_irq_type *t = &irq_data->type;
+
+ if (t->type_reg_mask)
+ buf[0][idx] &= ~t->type_reg_mask;
+ else
+ buf[0][idx] &= ~(t->type_falling_val |
+ t->type_rising_val |
+ t->type_level_low_val |
+ t->type_level_high_val);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ buf[0][idx] |= t->type_falling_val;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ buf[0][idx] |= t->type_rising_val;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ buf[0][idx] |= (t->type_falling_val |
+ t->type_rising_val);
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ buf[0][idx] |= t->type_level_high_val;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ buf[0][idx] |= t->type_level_low_val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
+
+/**
* regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
*
* @fwnode: The firmware node where the IRQ domain should be added to.
@@ -634,7 +723,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
int ret = -ENOMEM;
int num_type_reg;
u32 reg;
- u32 unmask_offset;
if (chip->num_regs <= 0)
return -EINVAL;
@@ -651,11 +739,19 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
}
if (chip->not_fixed_stride) {
+ dev_warn(map->dev, "not_fixed_stride is deprecated; use ->get_irq_reg() instead");
+
for (i = 0; i < chip->num_regs; i++)
if (chip->sub_reg_offsets[i].num_regs != 1)
return -EINVAL;
}
+ if (chip->num_type_reg)
+ dev_warn(map->dev, "type registers are deprecated; use config registers instead");
+
+ if (chip->num_virt_regs || chip->virt_reg_base || chip->set_type_virt)
+ dev_warn(map->dev, "virtual registers are deprecated; use config registers instead");
+
if (irq_base) {
irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
if (irq_base < 0) {
@@ -671,30 +767,30 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->num_main_regs) {
d->main_status_buf = kcalloc(chip->num_main_regs,
- sizeof(unsigned int),
+ sizeof(*d->main_status_buf),
GFP_KERNEL);
if (!d->main_status_buf)
goto err_alloc;
}
- d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
+ d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf),
GFP_KERNEL);
if (!d->status_buf)
goto err_alloc;
- d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
+ d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
GFP_KERNEL);
if (!d->mask_buf)
goto err_alloc;
- d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
+ d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def),
GFP_KERNEL);
if (!d->mask_buf_def)
goto err_alloc;
if (chip->wake_base) {
- d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
+ d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf),
GFP_KERNEL);
if (!d->wake_buf)
goto err_alloc;
@@ -703,11 +799,11 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
if (num_type_reg) {
d->type_buf_def = kcalloc(num_type_reg,
- sizeof(unsigned int), GFP_KERNEL);
+ sizeof(*d->type_buf_def), GFP_KERNEL);
if (!d->type_buf_def)
goto err_alloc;
- d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
+ d->type_buf = kcalloc(num_type_reg, sizeof(*d->type_buf),
GFP_KERNEL);
if (!d->type_buf)
goto err_alloc;
@@ -724,13 +820,31 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
for (i = 0; i < chip->num_virt_regs; i++) {
d->virt_buf[i] = kcalloc(chip->num_regs,
- sizeof(unsigned int),
+ sizeof(**d->virt_buf),
GFP_KERNEL);
if (!d->virt_buf[i])
goto err_alloc;
}
}
+ if (chip->num_config_bases && chip->num_config_regs) {
+ /*
+ * Create config_buf[num_config_bases][num_config_regs]
+ */
+ d->config_buf = kcalloc(chip->num_config_bases,
+ sizeof(*d->config_buf), GFP_KERNEL);
+ if (!d->config_buf)
+ goto err_alloc;
+
+ for (i = 0; i < chip->num_config_regs; i++) {
+ d->config_buf[i] = kcalloc(chip->num_config_regs,
+ sizeof(**d->config_buf),
+ GFP_KERNEL);
+ if (!d->config_buf[i])
+ goto err_alloc;
+ }
+ }
+
d->irq_chip = regmap_irq_chip;
d->irq_chip.name = chip->name;
d->irq = irq;
@@ -738,18 +852,53 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->chip = chip;
d->irq_base = irq_base;
+ if (chip->mask_base && chip->unmask_base &&
+ !chip->mask_unmask_non_inverted) {
+ /*
+ * Chips that specify both mask_base and unmask_base used to
+ * get inverted mask behavior by default, with no way to ask
+ * for the normal, non-inverted behavior. This "inverted by
+ * default" behavior is deprecated, but we have to support it
+ * until existing drivers have been fixed.
+ *
+ * Existing drivers should be updated by swapping mask_base
+ * and unmask_base and setting mask_unmask_non_inverted=true.
+ * New drivers should always set the flag.
+ */
+ dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it");
+
+ /* Might as well warn about mask_invert while we're at it... */
+ if (chip->mask_invert)
+ dev_warn(map->dev, "mask_invert=true ignored");
+
+ d->mask_base = chip->unmask_base;
+ d->unmask_base = chip->mask_base;
+ } else if (chip->mask_invert) {
+ /*
+ * Swap the roles of mask_base and unmask_base if the bits are
+ * inverted. This is deprecated, drivers should use unmask_base
+ * directly.
+ */
+ dev_warn(map->dev, "mask_invert=true is deprecated; please switch to unmask_base");
+
+ d->mask_base = chip->unmask_base;
+ d->unmask_base = chip->mask_base;
+ } else {
+ d->mask_base = chip->mask_base;
+ d->unmask_base = chip->unmask_base;
+ }
+
if (chip->irq_reg_stride)
d->irq_reg_stride = chip->irq_reg_stride;
else
d->irq_reg_stride = 1;
- if (chip->type_reg_stride)
- d->type_reg_stride = chip->type_reg_stride;
+ if (chip->get_irq_reg)
+ d->get_irq_reg = chip->get_irq_reg;
else
- d->type_reg_stride = 1;
+ d->get_irq_reg = regmap_irq_get_irq_reg_linear;
- if (!map->use_single_read && map->reg_stride == 1 &&
- d->irq_reg_stride == 1) {
+ if (regmap_irq_can_bulk_read_status(d)) {
d->status_reg_buf = kmalloc_array(chip->num_regs,
map->format.val_bytes,
GFP_KERNEL);
@@ -766,35 +915,34 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
/* Mask all the interrupts by default */
for (i = 0; i < chip->num_regs; i++) {
d->mask_buf[i] = d->mask_buf_def[i];
- if (!chip->mask_base)
- continue;
- reg = sub_irq_reg(d, d->chip->mask_base, i);
+ if (d->mask_base) {
+ reg = d->get_irq_reg(d, d->mask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], d->mask_buf[i]);
+ if (ret) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
- if (chip->mask_invert)
- ret = regmap_irq_update_bits(d, reg,
- d->mask_buf[i], ~d->mask_buf[i]);
- else if (d->chip->unmask_base) {
- unmask_offset = d->chip->unmask_base -
- d->chip->mask_base;
- ret = regmap_irq_update_bits(d,
- reg + unmask_offset,
- d->mask_buf[i],
- d->mask_buf[i]);
- } else
- ret = regmap_irq_update_bits(d, reg,
- d->mask_buf[i], d->mask_buf[i]);
- if (ret != 0) {
- dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
- reg, ret);
- goto err_alloc;
+ if (d->unmask_base) {
+ reg = d->get_irq_reg(d, d->unmask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], ~d->mask_buf[i]);
+ if (ret) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
}
if (!chip->init_ack_masked)
continue;
/* Ack masked but set interrupts */
- reg = sub_irq_reg(d, d->chip->status_base, i);
+ reg = d->get_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &d->status_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
@@ -806,7 +954,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->status_buf[i] = ~d->status_buf[i];
if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
- reg = sub_irq_reg(d, d->chip->ack_base, i);
+ reg = d->get_irq_reg(d, d->chip->ack_base, i);
if (chip->ack_invert)
ret = regmap_write(map, reg,
~(d->status_buf[i] & d->mask_buf[i]));
@@ -831,14 +979,14 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (d->wake_buf) {
for (i = 0; i < chip->num_regs; i++) {
d->wake_buf[i] = d->mask_buf_def[i];
- reg = sub_irq_reg(d, d->chip->wake_base, i);
+ reg = d->get_irq_reg(d, d->chip->wake_base, i);
if (chip->wake_invert)
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
0);
else
- ret = regmap_irq_update_bits(d, reg,
+ ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
d->wake_buf[i]);
if (ret != 0) {
@@ -851,7 +999,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->num_type_reg && !chip->type_in_mask) {
for (i = 0; i < chip->num_type_reg; ++i) {
- reg = sub_irq_reg(d, d->chip->type_base, i);
+ reg = d->get_irq_reg(d, d->chip->type_base, i);
ret = regmap_read(map, reg, &d->type_buf_def[i]);
@@ -907,6 +1055,11 @@ err_alloc:
kfree(d->virt_buf[i]);
kfree(d->virt_buf);
}
+ if (d->config_buf) {
+ for (i = 0; i < chip->num_config_bases; i++)
+ kfree(d->config_buf[i]);
+ kfree(d->config_buf);
+ }
kfree(d);
return ret;
}
@@ -947,7 +1100,7 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
unsigned int virq;
- int hwirq;
+ int i, hwirq;
if (!d)
return;
@@ -977,6 +1130,11 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->mask_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
+ if (d->config_buf) {
+ for (i = 0; i < d->chip->num_config_bases; i++)
+ kfree(d->config_buf[i]);
+ kfree(d->config_buf);
+ }
kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index c3517ccc3159..fee221c5008c 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -882,6 +882,8 @@ struct regmap *__regmap_init(struct device *dev,
if (config && config->read && config->write) {
map->reg_read = _regmap_bus_read;
+ if (config->reg_update_bits)
+ map->reg_update_bits = config->reg_update_bits;
/* Bulk read/write */
map->read = config->read;
@@ -1298,6 +1300,9 @@ static void regmap_field_init(struct regmap_field *rm_field,
rm_field->reg = reg_field.reg;
rm_field->shift = reg_field.lsb;
rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
+
+ WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
+
rm_field->id_size = reg_field.id_size;
rm_field->id_offset = reg_field.id_offset;
}
@@ -2219,6 +2224,28 @@ int regmap_field_update_bits_base(struct regmap_field *field,
EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
/**
+ * regmap_field_test_bits() - Check if all specified bits are set in a
+ * register field.
+ *
+ * @field: Register field to operate on
+ * @bits: Bits to test
+ *
+ * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
+ * tested bits is not set and 1 if all tested bits are set.
+ */
+int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
+{
+ unsigned int val, ret;
+
+ ret = regmap_field_read(field, &val);
+ if (ret)
+ return ret;
+
+ return (val & bits) == bits;
+}
+EXPORT_SYMBOL_GPL(regmap_field_test_bits);
+
+/**
* regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
* register field with port ID
*
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index ac6ad9ab67f9..89f98be5c5b9 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -62,47 +62,47 @@ define_id_show_func(ppin, "0x%llx");
static DEVICE_ATTR_ADMIN_RO(ppin);
define_siblings_read_func(thread_siblings, sibling_cpumask);
-static BIN_ATTR_RO(thread_siblings, 0);
-static BIN_ATTR_RO(thread_siblings_list, 0);
+static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_cpus, sibling_cpumask);
-static BIN_ATTR_RO(core_cpus, 0);
-static BIN_ATTR_RO(core_cpus_list, 0);
+static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_siblings, core_cpumask);
-static BIN_ATTR_RO(core_siblings, 0);
-static BIN_ATTR_RO(core_siblings_list, 0);
+static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
-static BIN_ATTR_RO(cluster_cpus, 0);
-static BIN_ATTR_RO(cluster_cpus_list, 0);
+static BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
-static BIN_ATTR_RO(die_cpus, 0);
-static BIN_ATTR_RO(die_cpus_list, 0);
+static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
define_siblings_read_func(package_cpus, core_cpumask);
-static BIN_ATTR_RO(package_cpus, 0);
-static BIN_ATTR_RO(package_cpus_list, 0);
+static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_BOOK_SYSFS
define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
-static BIN_ATTR_RO(book_siblings, 0);
-static BIN_ATTR_RO(book_siblings_list, 0);
+static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
-static BIN_ATTR_RO(drawer_siblings, 0);
-static BIN_ATTR_RO(drawer_siblings_list, 0);
+static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
static struct bin_attribute *bin_attrs[] = {
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index fdb81f2794cd..db1b4b202646 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -248,15 +248,6 @@ config BLK_DEV_NBD
If unsure, say N.
-config BLK_DEV_SX8
- tristate "Promise SATA SX8 support"
- depends on PCI
- help
- Saying Y or M here will enable support for the
- Promise SATA SX8 controllers.
-
- Use devices /dev/sx8/$N and /dev/sx8/$Np$M.
-
config BLK_DEV_RAM
tristate "RAM block device support"
help
@@ -408,6 +399,15 @@ config BLK_DEV_RBD
If unsure, say N.
+config BLK_DEV_UBLK
+ tristate "Userspace block driver (Experimental)"
+ select IO_URING
+ help
+ io_uring based userspace block driver. Together with ublk server, ublk
+ has been working well, but interface with userspace or command data
+ definition isn't finalized yet, and might change according to future
+ requirement, so mark is as experimental now.
+
source "drivers/block/rnbd/Kconfig"
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 934a9c7c3a7c..101612cba303 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -26,8 +26,6 @@ obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
-obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
-
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
@@ -39,4 +37,6 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
+obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
+
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 5a566f2fd533..4c8b2ba579ee 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1802,7 +1802,7 @@ static int fd_alloc_disk(int drive, int system)
unit[drive].gendisk[system] = disk;
err = add_disk(disk);
if (err)
- blk_cleanup_disk(disk);
+ put_disk(disk);
return err;
}
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 348adf335217..12b3ca8f6f4a 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -427,7 +427,7 @@ aoeblk_gdalloc(void *vp)
return;
out_disk_cleanup:
- blk_cleanup_disk(gd);
+ put_disk(gd);
err_tagset:
blk_mq_free_tag_set(set);
err_mempool:
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index b381d1c3ef32..3523dd82d7a0 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -277,7 +277,7 @@ freedev(struct aoedev *d)
if (d->gd) {
aoedisk_rm_debugfs(d);
del_gendisk(d->gd);
- blk_cleanup_disk(d->gd);
+ put_disk(d->gd);
blk_mq_free_tag_set(&d->tag_set);
}
t = d->targets;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index e232cc4fd444..9deb4df6bdb8 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -2031,7 +2031,7 @@ static void ataflop_probe(dev_t dev)
return;
cleanup_disk:
- blk_cleanup_disk(unit[drive].disk[type]);
+ put_disk(unit[drive].disk[type]);
unit[drive].disk[type] = NULL;
}
@@ -2045,7 +2045,6 @@ static void atari_floppy_cleanup(void)
if (!unit[i].disk[type])
continue;
del_gendisk(unit[i].disk[type]);
- blk_cleanup_queue(unit[i].disk[type]->queue);
put_disk(unit[i].disk[type]);
}
blk_mq_free_tag_set(&unit[i].tag_set);
@@ -2064,7 +2063,7 @@ static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs)
continue;
if (fs->registered[type])
del_gendisk(fs->disk[type]);
- blk_cleanup_disk(fs->disk[type]);
+ put_disk(fs->disk[type]);
}
blk_mq_free_tag_set(&fs->tag_set);
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 6e3f2f0d2352..859499cd1ff8 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -256,7 +256,7 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, unsigned int op,
+ unsigned int len, unsigned int off, enum req_op op,
sector_t sector)
{
void *mem;
@@ -310,7 +310,7 @@ static void brd_submit_bio(struct bio *bio)
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
struct brd_device *brd = bdev->bd_disk->private_data;
int err;
@@ -419,7 +419,7 @@ static int brd_alloc(int i)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_free_dev:
list_del(&brd->brd_list);
kfree(brd);
@@ -439,7 +439,7 @@ static void brd_cleanup(void)
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
del_gendisk(brd->brd_disk);
- blk_cleanup_disk(brd->brd_disk);
+ put_disk(brd->brd_disk);
brd_free_pages(brd);
list_del(&brd->brd_list);
kfree(brd);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index f5bcded3640d..e27478ae579c 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -124,12 +124,13 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b
static int _drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev,
- sector_t sector, int op)
+ sector_t sector, enum req_op op)
{
struct bio *bio;
/* we do all our meta data IO in aligned 4k blocks. */
const int size = 4096;
- int err, op_flags = 0;
+ int err;
+ blk_opf_t op_flags = 0;
device->md_io.done = 0;
device->md_io.error = -ENODEV;
@@ -174,7 +175,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
}
int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
- sector_t sector, int op)
+ sector_t sector, enum req_op op)
{
int err;
D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
@@ -385,7 +386,7 @@ static int __al_write_transaction(struct drbd_device *device, struct al_transact
write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates) {
- if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
err = -EIO;
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
} else {
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 9e060e49b3f8..7d9db33363de 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -974,25 +974,58 @@ static void drbd_bm_endio(struct bio *bio)
}
}
+/* For the layout, see comment above drbd_md_set_sector_offsets(). */
+static inline sector_t drbd_md_last_bitmap_sector(struct drbd_backing_dev *bdev)
+{
+ switch (bdev->md.meta_dev_idx) {
+ case DRBD_MD_INDEX_INTERNAL:
+ case DRBD_MD_INDEX_FLEX_INT:
+ return bdev->md.md_offset + bdev->md.al_offset -1;
+ case DRBD_MD_INDEX_FLEX_EXT:
+ default:
+ return bdev->md.md_offset + bdev->md.md_size_sect -1;
+ }
+}
+
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
struct drbd_device *device = ctx->device;
- unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
- struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
- GFP_NOIO, &drbd_md_io_bio_set);
+ enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE;
struct drbd_bitmap *b = device->bitmap;
+ struct bio *bio;
struct page *page;
+ sector_t last_bm_sect;
+ sector_t first_bm_sect;
+ sector_t on_disk_sector;
unsigned int len;
- sector_t on_disk_sector =
- device->ldev->md.md_offset + device->ldev->md.bm_offset;
- on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
+ first_bm_sect = device->ldev->md.md_offset + device->ldev->md.bm_offset;
+ on_disk_sector = first_bm_sect + (((sector_t)page_nr) << (PAGE_SHIFT-SECTOR_SHIFT));
/* this might happen with very small
* flexible external meta data device,
* or with PAGE_SIZE > 4k */
- len = min_t(unsigned int, PAGE_SIZE,
- (drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
+ last_bm_sect = drbd_md_last_bitmap_sector(device->ldev);
+ if (first_bm_sect <= on_disk_sector && last_bm_sect >= on_disk_sector) {
+ sector_t len_sect = last_bm_sect - on_disk_sector + 1;
+ if (len_sect < PAGE_SIZE/SECTOR_SIZE)
+ len = (unsigned int)len_sect*SECTOR_SIZE;
+ else
+ len = PAGE_SIZE;
+ } else {
+ if (__ratelimit(&drbd_ratelimit_state)) {
+ drbd_err(device, "Invalid offset during on-disk bitmap access: "
+ "page idx %u, sector %llu\n", page_nr, on_disk_sector);
+ }
+ ctx->error = -EIO;
+ bm_set_page_io_err(b->bm_pages[page_nr]);
+ if (atomic_dec_and_test(&ctx->in_flight)) {
+ ctx->done = 1;
+ wake_up(&device->misc_wait);
+ kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
+ }
+ return;
+ }
/* serialize IO on this page */
bm_page_lock_io(device, page_nr);
@@ -1007,6 +1040,8 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
+ bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO,
+ &drbd_md_io_bio_set);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 4d3efaa20b7b..f15f2f041596 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1495,7 +1495,7 @@ extern int drbd_resync_finished(struct drbd_device *device);
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
extern void drbd_md_put_buffer(struct drbd_device *device);
extern int drbd_md_sync_page_io(struct drbd_device *device,
- struct drbd_backing_dev *bdev, sector_t sector, int op);
+ struct drbd_backing_dev *bdev, sector_t sector, enum req_op op);
extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
@@ -1547,8 +1547,7 @@ extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
bool throttle_if_app_is_waiting);
extern int drbd_submit_peer_request(struct drbd_device *,
- struct drbd_peer_request *, const unsigned,
- const unsigned, const int);
+ struct drbd_peer_request *, blk_opf_t, int);
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
sector_t, unsigned int,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2887350ae010..f3e4db16fd07 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2207,7 +2207,7 @@ void drbd_destroy_device(struct kref *kref)
if (device->bitmap) /* should no longer be there. */
drbd_bm_cleanup(device);
__free_page(device->md_io.page);
- blk_cleanup_disk(device->vdisk);
+ put_disk(device->vdisk);
kfree(device->rs_plan_s);
/* not for_each_connection(connection, resource):
@@ -2807,7 +2807,7 @@ out_no_minor_idr:
out_no_bitmap:
__free_page(device->md_io.page);
out_no_io_page:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_no_disk:
kref_put(&resource->kref, drbd_destroy_resource);
kfree(device);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6762be53f409..af4c7d65490b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1621,8 +1621,7 @@ static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, stru
/* TODO allocate from our own bio_set. */
int drbd_submit_peer_request(struct drbd_device *device,
struct drbd_peer_request *peer_req,
- const unsigned op, const unsigned op_flags,
- const int fault_type)
+ const blk_opf_t opf, const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
@@ -1668,8 +1667,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
* generated bio, but a bio allocated on behalf of the peer.
*/
next_bio:
- bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags,
- GFP_NOIO);
+ bio = bio_alloc(device->ldev->backing_bdev, nr_pages, opf, GFP_NOIO);
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
bio->bi_private = peer_req;
@@ -2060,7 +2058,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
- if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE,
DRBD_FAULT_RS_WR) == 0)
return 0;
@@ -2383,14 +2381,14 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co
/* see also bio_flags_to_wire()
* DRBD_REQ_*, because we need to semantically map the flags to data packet
* flags and back. We may replicate to other kernel versions. */
-static unsigned long wire_flags_to_bio_flags(u32 dpf)
+static blk_opf_t wire_flags_to_bio_flags(u32 dpf)
{
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
(dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
}
-static unsigned long wire_flags_to_bio_op(u32 dpf)
+static enum req_op wire_flags_to_bio_op(u32 dpf)
{
if (dpf & DP_ZEROES)
return REQ_OP_WRITE_ZEROES;
@@ -2543,7 +2541,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
struct drbd_peer_request *peer_req;
struct p_data *p = pi->data;
u32 peer_seq = be32_to_cpu(p->seq_num);
- int op, op_flags;
+ enum req_op op;
+ blk_opf_t op_flags;
u32 dp_flags;
int err, tp;
@@ -2681,7 +2680,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
}
- err = drbd_submit_peer_request(device, peer_req, op, op_flags,
+ err = drbd_submit_peer_request(device, peer_req, op | op_flags,
DRBD_FAULT_DT_WR);
if (!err)
return 0;
@@ -2979,7 +2978,7 @@ submit_for_resync:
submit:
update_receiver_timing_details(connection, drbd_submit_peer_request);
inc_unacked(device);
- if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ,
fault_type) == 0)
return 0;
@@ -4951,7 +4950,7 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac
if (get_ldev(device)) {
struct drbd_peer_request *peer_req;
- const int op = REQ_OP_WRITE_ZEROES;
+ const enum req_op op = REQ_OP_WRITE_ZEROES;
peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
size, 0, GFP_NOIO);
@@ -4969,7 +4968,8 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
- err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
+ err = drbd_submit_peer_request(device, peer_req, op,
+ DRBD_FAULT_RS_WR);
if (err) {
spin_lock_irq(&device->resource->req_lock);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index e64bcfba30ef..8f7f144e54f3 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -523,16 +523,14 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
{
- char b[BDEVNAME_SIZE];
-
if (!__ratelimit(&drbd_ratelimit_state))
return;
- drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
+ drbd_warn(device, "local %s IO error sector %llu+%u on %pg\n",
(req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
(unsigned long long)req->i.sector,
req->i.size >> 9,
- bdevname(device->ldev->backing_bdev, b));
+ device->ldev->backing_bdev);
}
/* Helper for HANDED_OVER_TO_NETWORK.
@@ -1610,7 +1608,7 @@ void drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
/*
* what we "blindly" assume:
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index af3051dd8912..0bb1a900c2d5 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -405,7 +405,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
spin_unlock_irq(&device->resource->req_lock);
atomic_add(size >> 9, &device->rs_sect_ev);
- if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ,
DRBD_FAULT_RS_RD) == 0)
return 0;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 015841f50f4e..ccad3d7b3ddd 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2859,7 +2859,7 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req),
- (unsigned long long) current_req->cmd_flags))
+ (__force unsigned long long) current_req->cmd_flags))
return BLK_STS_IOERR;
if (test_and_set_bit(0, &fdc_busy)) {
@@ -4557,7 +4557,7 @@ out:
return;
cleanup_disk:
- blk_cleanup_disk(disks[drive][type]);
+ put_disk(disks[drive][type]);
disks[drive][type] = NULL;
mutex_unlock(&floppy_probe_lock);
}
@@ -4753,7 +4753,7 @@ out_put_disk:
if (!disks[drive][0])
break;
del_timer_sync(&motor_off_timer[drive]);
- blk_cleanup_disk(disks[drive][0]);
+ put_disk(disks[drive][0]);
blk_mq_free_tag_set(&tag_sets[drive]);
}
return err;
@@ -4985,7 +4985,7 @@ static void __exit floppy_module_exit(void)
}
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
- blk_cleanup_disk(disks[drive][i]);
+ put_disk(disks[drive][i]);
}
blk_mq_free_tag_set(&tag_sets[drive]);
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 084f9b8a0ba3..e3c0ba93c1a3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -2040,7 +2040,7 @@ static int loop_add(int i)
return i;
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
@@ -2057,7 +2057,6 @@ static void loop_remove(struct loop_device *lo)
{
/* Make this loop device unreachable from pathname. */
del_gendisk(lo->lo_disk);
- blk_cleanup_queue(lo->lo_disk->queue);
blk_mq_free_tag_set(&lo->tag_set);
mutex_lock(&loop_ctl_mutex);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 27386a572ba4..562725d222a7 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -94,17 +94,12 @@
/* Device instance number, incremented each time a device is probed. */
static int instance;
-static LIST_HEAD(online_list);
-static LIST_HEAD(removing_list);
-static DEFINE_SPINLOCK(dev_lock);
-
/*
* Global variable used to hold the major block device number
* allocated in mtip_init().
*/
static int mtip_major;
static struct dentry *dfs_parent;
-static struct dentry *dfs_device_status;
static u32 cpu_use[NR_CPUS];
@@ -146,11 +141,8 @@ static bool mtip_check_surprise_removal(struct driver_data *dd)
pci_read_config_word(dd->pdev, 0x00, &vendor_id);
if (vendor_id == 0xFFFF) {
dd->sr = true;
- if (dd->queue)
- blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue);
- else
- dev_warn(&dd->pdev->dev,
- "%s: dd->queue is NULL\n", __func__);
+ if (dd->disk)
+ blk_mark_disk_dead(dd->disk);
return true; /* device removed */
}
@@ -2170,106 +2162,6 @@ static const struct attribute_group *mtip_disk_attr_groups[] = {
NULL,
};
-/* debugsfs entries */
-
-static ssize_t show_device_status(struct device_driver *drv, char *buf)
-{
- int size = 0;
- struct driver_data *dd, *tmp;
- unsigned long flags;
- char id_buf[42];
- u16 status = 0;
-
- spin_lock_irqsave(&dev_lock, flags);
- size += sprintf(&buf[size], "Devices Present:\n");
- list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
- if (dd->pdev) {
- if (dd->port &&
- dd->port->identify &&
- dd->port->identify_valid) {
- strlcpy(id_buf,
- (char *) (dd->port->identify + 10), 21);
- status = *(dd->port->identify + 141);
- } else {
- memset(id_buf, 0, 42);
- status = 0;
- }
-
- if (dd->port &&
- test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
- size += sprintf(&buf[size],
- " device %s %s (ftl rebuild %d %%)\n",
- dev_name(&dd->pdev->dev),
- id_buf,
- status);
- } else {
- size += sprintf(&buf[size],
- " device %s %s\n",
- dev_name(&dd->pdev->dev),
- id_buf);
- }
- }
- }
-
- size += sprintf(&buf[size], "Devices Being Removed:\n");
- list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
- if (dd->pdev) {
- if (dd->port &&
- dd->port->identify &&
- dd->port->identify_valid) {
- strlcpy(id_buf,
- (char *) (dd->port->identify+10), 21);
- status = *(dd->port->identify + 141);
- } else {
- memset(id_buf, 0, 42);
- status = 0;
- }
-
- if (dd->port &&
- test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
- size += sprintf(&buf[size],
- " device %s %s (ftl rebuild %d %%)\n",
- dev_name(&dd->pdev->dev),
- id_buf,
- status);
- } else {
- size += sprintf(&buf[size],
- " device %s %s\n",
- dev_name(&dd->pdev->dev),
- id_buf);
- }
- }
- }
- spin_unlock_irqrestore(&dev_lock, flags);
-
- return size;
-}
-
-static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
- size_t len, loff_t *offset)
-{
- int size = *offset;
- char *buf;
- int rv = 0;
-
- if (!len || *offset)
- return 0;
-
- buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- size += show_device_status(NULL, buf);
-
- *offset = size <= len ? size : len;
- size = copy_to_user(ubuf, buf, *offset);
- if (size)
- rv = -EFAULT;
-
- kfree(buf);
- return rv ? rv : *offset;
-}
-
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
@@ -2363,13 +2255,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
return rv ? rv : *offset;
}
-static const struct file_operations mtip_device_status_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = mtip_hw_read_device_status,
- .llseek = no_llseek,
-};
-
static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -2556,7 +2441,7 @@ static void mtip_softirq_done_fn(struct request *rq)
blk_mq_end_request(rq, cmd->status);
}
-static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
+static bool mtip_abort_cmd(struct request *req, void *data)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
struct driver_data *dd = data;
@@ -2569,7 +2454,7 @@ static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
return true;
}
-static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
+static bool mtip_queue_cmd(struct request *req, void *data)
{
struct driver_data *dd = data;
@@ -3297,26 +3182,12 @@ static int mtip_block_getgeo(struct block_device *dev,
return 0;
}
-static int mtip_block_open(struct block_device *dev, fmode_t mode)
+static void mtip_block_free_disk(struct gendisk *disk)
{
- struct driver_data *dd;
+ struct driver_data *dd = disk->private_data;
- if (dev && dev->bd_disk) {
- dd = (struct driver_data *) dev->bd_disk->private_data;
-
- if (dd) {
- if (test_bit(MTIP_DDF_REMOVAL_BIT,
- &dd->dd_flag)) {
- return -ENODEV;
- }
- return 0;
- }
- }
- return -ENODEV;
-}
-
-static void mtip_block_release(struct gendisk *disk, fmode_t mode)
-{
+ ida_free(&rssd_index_ida, dd->index);
+ kfree(dd);
}
/*
@@ -3326,13 +3197,12 @@ static void mtip_block_release(struct gendisk *disk, fmode_t mode)
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
- .open = mtip_block_open,
- .release = mtip_block_release,
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
#endif
.getgeo = mtip_block_getgeo,
+ .free_disk = mtip_block_free_disk,
.owner = THIS_MODULE
};
@@ -3487,12 +3357,11 @@ static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
return 0;
}
-static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
- bool reserved)
+static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req)
{
struct driver_data *dd = req->q->queuedata;
- if (reserved) {
+ if (blk_mq_is_reserved_rq(req)) {
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
cmd->status = BLK_STS_TIMEOUT;
@@ -3664,7 +3533,7 @@ init_hw_cmds_error:
disk_index_error:
ida_free(&rssd_index_ida, index);
ida_get_error:
- blk_cleanup_disk(dd->disk);
+ put_disk(dd->disk);
block_queue_alloc_init_error:
blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_tag_error:
@@ -3673,72 +3542,6 @@ protocol_init_error:
return rv;
}
-static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
-{
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
- cmd->status = BLK_STS_IOERR;
- blk_mq_complete_request(rq);
- return true;
-}
-
-/*
- * Block layer deinitialization function.
- *
- * Called by the PCI layer as each P320 device is removed.
- *
- * @dd Pointer to the driver data structure.
- *
- * return value
- * 0
- */
-static int mtip_block_remove(struct driver_data *dd)
-{
- mtip_hw_debugfs_exit(dd);
-
- if (dd->mtip_svc_handler) {
- set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
- wake_up_interruptible(&dd->port->svc_wait);
- kthread_stop(dd->mtip_svc_handler);
- }
-
- if (!dd->sr) {
- /*
- * Explicitly wait here for IOs to quiesce,
- * as mtip_standby_drive usually won't wait for IOs.
- */
- if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
- mtip_standby_drive(dd);
- }
- else
- dev_info(&dd->pdev->dev, "device %s surprise removal\n",
- dd->disk->disk_name);
-
- blk_freeze_queue_start(dd->queue);
- blk_mq_quiesce_queue(dd->queue);
- blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
- blk_mq_unquiesce_queue(dd->queue);
-
- if (dd->disk) {
- if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
- del_gendisk(dd->disk);
- if (dd->disk->queue) {
- blk_cleanup_queue(dd->queue);
- blk_mq_free_tag_set(&dd->tags);
- dd->queue = NULL;
- }
- put_disk(dd->disk);
- }
- dd->disk = NULL;
-
- ida_free(&rssd_index_ida, dd->index);
-
- /* De-initialize the protocol layer. */
- mtip_hw_exit(dd);
-
- return 0;
-}
-
/*
* Function called by the PCI layer when just before the
* machine shuts down.
@@ -3755,23 +3558,14 @@ static int mtip_block_shutdown(struct driver_data *dd)
{
mtip_hw_shutdown(dd);
- /* Delete our gendisk structure, and cleanup the blk queue. */
- if (dd->disk) {
- dev_info(&dd->pdev->dev,
- "Shutting down %s ...\n", dd->disk->disk_name);
+ dev_info(&dd->pdev->dev,
+ "Shutting down %s ...\n", dd->disk->disk_name);
- if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
- del_gendisk(dd->disk);
- if (dd->disk->queue) {
- blk_cleanup_queue(dd->queue);
- blk_mq_free_tag_set(&dd->tags);
- }
- put_disk(dd->disk);
- dd->disk = NULL;
- dd->queue = NULL;
- }
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
- ida_free(&rssd_index_ida, dd->index);
+ blk_mq_free_tag_set(&dd->tags);
+ put_disk(dd->disk);
return 0;
}
@@ -3905,7 +3699,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
const struct cpumask *node_mask;
int cpu, i = 0, j = 0;
int my_node = NUMA_NO_NODE;
- unsigned long flags;
/* Allocate memory for this devices private data. */
my_node = pcibus_to_node(pdev->bus);
@@ -3952,9 +3745,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
dd->pdev = pdev;
dd->numa_node = my_node;
- INIT_LIST_HEAD(&dd->online_list);
- INIT_LIST_HEAD(&dd->remove_list);
-
memset(dd->workq_name, 0, 32);
snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
@@ -4047,11 +3837,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
else
rv = 0; /* device in rebuild state, return 0 from probe */
- /* Add to online list even if in ftl rebuild */
- spin_lock_irqsave(&dev_lock, flags);
- list_add(&dd->online_list, &online_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
goto done;
block_initialize_err:
@@ -4085,14 +3870,7 @@ done:
static void mtip_pci_remove(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
- unsigned long flags, to;
-
- set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
-
- spin_lock_irqsave(&dev_lock, flags);
- list_del_init(&dd->online_list);
- list_add(&dd->remove_list, &removing_list);
- spin_unlock_irqrestore(&dev_lock, flags);
+ unsigned long to;
mtip_check_surprise_removal(dd);
synchronize_irq(dd->pdev->irq);
@@ -4109,11 +3887,35 @@ static void mtip_pci_remove(struct pci_dev *pdev)
"Completion workers still active!\n");
}
- blk_mark_disk_dead(dd->disk);
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
- /* Clean up the block layer. */
- mtip_block_remove(dd);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
+
+ mtip_hw_debugfs_exit(dd);
+
+ if (dd->mtip_svc_handler) {
+ set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
+ wake_up_interruptible(&dd->port->svc_wait);
+ kthread_stop(dd->mtip_svc_handler);
+ }
+
+ if (!dd->sr) {
+ /*
+ * Explicitly wait here for IOs to quiesce,
+ * as mtip_standby_drive usually won't wait for IOs.
+ */
+ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
+ mtip_standby_drive(dd);
+ }
+ else
+ dev_info(&dd->pdev->dev, "device %s surprise removal\n",
+ dd->disk->disk_name);
+
+ blk_mq_free_tag_set(&dd->tags);
+
+ /* De-initialize the protocol layer. */
+ mtip_hw_exit(dd);
if (dd->isr_workq) {
destroy_workqueue(dd->isr_workq);
@@ -4124,14 +3926,10 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
- spin_lock_irqsave(&dev_lock, flags);
- list_del_init(&dd->remove_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
- kfree(dd);
-
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
+
+ put_disk(dd->disk);
}
/*
@@ -4250,15 +4048,6 @@ static int __init mtip_init(void)
pr_warn("Error creating debugfs parent\n");
dfs_parent = NULL;
}
- if (dfs_parent) {
- dfs_device_status = debugfs_create_file("device_status",
- 0444, dfs_parent, NULL,
- &mtip_device_status_fops);
- if (IS_ERR_OR_NULL(dfs_device_status)) {
- pr_err("Error creating device_status node\n");
- dfs_device_status = NULL;
- }
- }
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 6816beb45352..f7328f19ac5c 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -149,7 +149,6 @@ enum {
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
- MTIP_DDF_REMOVAL_BIT = 9,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
(1 << MTIP_DDF_SEC_LOCK_BIT) |
@@ -462,10 +461,6 @@ struct driver_data {
int isr_binding;
- struct list_head online_list; /* linkage for online list */
-
- struct list_head remove_list; /* linkage for removing list */
-
int unal_qdepth; /* qdepth of unaligned IO queue */
};
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index e094d2b8b5a9..d914156db2d8 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -157,7 +157,7 @@ static int __init n64cart_probe(struct platform_device *pdev)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out:
return err;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 07f3c139a3d7..2a709daefbc4 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -11,6 +11,8 @@
* (part of code stolen from loop.c)
*/
+#define pr_fmt(fmt) "nbd: " fmt
+
#include <linux/major.h>
#include <linux/blkdev.h>
@@ -250,7 +252,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
struct gendisk *disk = nbd->disk;
del_gendisk(disk);
- blk_cleanup_disk(disk);
+ put_disk(disk);
blk_mq_free_tag_set(&nbd->tag_set);
/*
@@ -393,8 +395,7 @@ static u32 req_to_nbd_cmd_type(struct request *req)
}
}
-static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
- bool reserved)
+static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
struct nbd_device *nbd = cmd->nbd;
@@ -880,7 +881,7 @@ static void recv_work(struct work_struct *work)
kfree(args);
}
-static bool nbd_clear_req(struct request *req, void *data, bool reserved)
+static bool nbd_clear_req(struct request *req, void *data)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -1833,7 +1834,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
out_free_work:
destroy_workqueue(nbd->recv_workq);
out_err_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_free_idr:
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, index);
@@ -1951,7 +1952,7 @@ again:
test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
- pr_err("nbd: device at index %d is going down\n",
+ pr_err("device at index %d is going down\n",
index);
return -EINVAL;
}
@@ -1962,7 +1963,7 @@ again:
if (!nbd) {
nbd = nbd_dev_add(index, 2);
if (IS_ERR(nbd)) {
- pr_err("nbd: failed to add new device\n");
+ pr_err("failed to add new device\n");
return PTR_ERR(nbd);
}
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 6b67088f4ea7..c451c477978f 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -201,6 +201,22 @@ static bool g_use_per_node_hctx;
module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static bool g_memory_backed;
+module_param_named(memory_backed, g_memory_backed, bool, 0444);
+MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
+
+static bool g_discard;
+module_param_named(discard, g_discard, bool, 0444);
+MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
+
+static unsigned long g_cache_size;
+module_param_named(cache_size, g_cache_size, ulong, 0444);
+MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+
+static unsigned int g_mbps;
+module_param_named(mbps, g_mbps, uint, 0444);
+MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
+
static bool g_zoned;
module_param_named(zoned, g_zoned, bool, S_IRUGO);
MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
@@ -409,6 +425,8 @@ NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
+NULLB_DEVICE_ATTR(no_sched, bool, NULL);
+NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -532,6 +550,8 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_max_open,
&nullb_device_attr_zone_max_active,
&nullb_device_attr_virt_boundary,
+ &nullb_device_attr_no_sched,
+ &nullb_device_attr_shared_tag_bitmap,
NULL,
};
@@ -588,7 +608,13 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors,virt_boundary\n");
+ "badblocks,blocking,blocksize,cache_size,"
+ "completion_nsec,discard,home_node,hw_queue_depth,"
+ "irqmode,max_sectors,mbps,memory_backed,no_sched,"
+ "poll_queues,power,queue_mode,shared_tag_bitmap,size,"
+ "submit_queues,use_per_node_hctx,virt_boundary,zoned,"
+ "zone_capacity,zone_max_active,zone_max_open,"
+ "zone_nr_conv,zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -650,6 +676,10 @@ static struct nullb_device *null_alloc_dev(void)
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
+ dev->memory_backed = g_memory_backed;
+ dev->discard = g_discard;
+ dev->cache_size = g_cache_size;
+ dev->mbps = g_mbps;
dev->use_per_node_hctx = g_use_per_node_hctx;
dev->zoned = g_zoned;
dev->zone_size = g_zone_size;
@@ -658,6 +688,8 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
dev->virt_boundary = g_virt_boundary;
+ dev->no_sched = g_no_sched;
+ dev->shared_tag_bitmap = g_shared_tag_bitmap;
return dev;
}
@@ -1310,7 +1342,7 @@ static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
}
static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
- enum req_opf op,
+ enum req_op op,
sector_t sector,
sector_t nr_sectors)
{
@@ -1381,9 +1413,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
}
}
-blk_status_t null_process_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- unsigned int nr_sectors)
+blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
blk_status_t ret;
@@ -1401,7 +1432,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd,
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
- sector_t nr_sectors, enum req_opf op)
+ sector_t nr_sectors, enum req_op op)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
@@ -1578,7 +1609,7 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return nr;
}
-static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
@@ -1656,7 +1687,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
static void cleanup_queue(struct nullb_queue *nq)
{
- kfree(nq->tag_map);
+ bitmap_free(nq->tag_map);
kfree(nq->cmds);
}
@@ -1737,7 +1768,7 @@ static void null_del_dev(struct nullb *nullb)
null_restart_queue_async(nullb);
}
- blk_cleanup_disk(nullb->disk);
+ put_disk(nullb->disk);
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
@@ -1783,14 +1814,13 @@ static const struct block_device_operations null_rq_ops = {
static int setup_commands(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
- int i, tag_size;
+ int i;
nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
return -ENOMEM;
- tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
- nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
+ nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
return -ENOMEM;
@@ -1867,31 +1897,48 @@ static int null_gendisk_register(struct nullb *nullb)
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
+ unsigned int flags = BLK_MQ_F_SHOULD_MERGE;
+ int hw_queues, numa_node;
+ unsigned int queue_depth;
int poll_queues;
+ if (nullb) {
+ hw_queues = nullb->dev->submit_queues;
+ poll_queues = nullb->dev->poll_queues;
+ queue_depth = nullb->dev->hw_queue_depth;
+ numa_node = nullb->dev->home_node;
+ if (nullb->dev->no_sched)
+ flags |= BLK_MQ_F_NO_SCHED;
+ if (nullb->dev->shared_tag_bitmap)
+ flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (nullb->dev->blocking)
+ flags |= BLK_MQ_F_BLOCKING;
+ } else {
+ hw_queues = g_submit_queues;
+ poll_queues = g_poll_queues;
+ queue_depth = g_hw_queue_depth;
+ numa_node = g_home_node;
+ if (g_no_sched)
+ flags |= BLK_MQ_F_NO_SCHED;
+ if (g_shared_tag_bitmap)
+ flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (g_blocking)
+ flags |= BLK_MQ_F_BLOCKING;
+ }
+
set->ops = &null_mq_ops;
- set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
- g_submit_queues;
- poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues;
- if (poll_queues)
- set->nr_hw_queues += poll_queues;
- set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
- g_hw_queue_depth;
- set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
set->cmd_size = sizeof(struct nullb_cmd);
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- if (g_no_sched)
- set->flags |= BLK_MQ_F_NO_SCHED;
- if (g_shared_tag_bitmap)
- set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ set->flags = flags;
set->driver_data = nullb;
- if (poll_queues)
+ set->nr_hw_queues = hw_queues;
+ set->queue_depth = queue_depth;
+ set->numa_node = numa_node;
+ if (poll_queues) {
+ set->nr_hw_queues += poll_queues;
set->nr_maps = 3;
- else
+ } else {
set->nr_maps = 1;
-
- if ((nullb && nullb->dev->blocking) || g_blocking)
- set->flags |= BLK_MQ_F_BLOCKING;
+ }
return blk_mq_alloc_tag_set(set);
}
@@ -2043,8 +2090,13 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
- nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
- dev->index = nullb->index;
+ rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ if (rv < 0) {
+ mutex_unlock(&lock);
+ goto out_cleanup_zone;
+ }
+ nullb->index = rv;
+ dev->index = rv;
mutex_unlock(&lock);
blk_queue_logical_block_size(nullb->q, dev->blocksize);
@@ -2070,7 +2122,7 @@ static int null_add_dev(struct nullb_device *dev)
rv = null_gendisk_register(nullb);
if (rv)
- goto out_cleanup_zone;
+ goto out_ida_free;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
@@ -2079,10 +2131,13 @@ static int null_add_dev(struct nullb_device *dev)
pr_info("disk %s created\n", nullb->disk_name);
return 0;
+
+out_ida_free:
+ ida_free(&nullb_indexes, nullb->index);
out_cleanup_zone:
null_free_zoned_dev(dev);
out_cleanup_disk:
- blk_cleanup_disk(nullb->disk);
+ put_disk(nullb->disk);
out_cleanup_tags:
if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 8359b43842f2..94ff68052b1e 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -113,6 +113,8 @@ struct nullb_device {
bool discard; /* if support discard */
bool zoned; /* if device is zoned */
bool virt_boundary; /* virtual boundary on/off for the device */
+ bool no_sched; /* no IO scheduler for the device */
+ bool shared_tag_bitmap; /* use hostwide shared tags */
};
struct nullb {
@@ -136,9 +138,8 @@ struct nullb {
blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
sector_t nr_sectors);
-blk_status_t null_process_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- unsigned int nr_sectors);
+blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, unsigned int nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
@@ -146,9 +147,8 @@ int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
-blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- sector_t nr_sectors);
+blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len);
#else
@@ -164,7 +164,7 @@ static inline int null_register_zoned_dev(struct nullb *nullb)
}
static inline void null_free_zoned_dev(struct nullb_device *dev) {}
static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector, sector_t nr_sectors)
+ enum req_op op, sector_t sector, sector_t nr_sectors)
{
return BLK_STS_NOTSUPP;
}
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index 86d6c12c603c..6b2b370e786f 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -36,7 +36,7 @@ TRACE_EVENT(nullb_zone_op,
TP_ARGS(cmd, zone_no, zone_cond),
TP_STRUCT__entry(
__array(char, disk, DISK_NAME_LEN)
- __field(enum req_opf, op)
+ __field(enum req_op, op)
__field(unsigned int, zone_no)
__field(unsigned int, zone_cond)
),
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 2fdd7b20c224..55a69e48ef8b 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -159,7 +159,7 @@ int null_register_zoned_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
+ disk_set_zoned(nullb->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
@@ -170,12 +170,12 @@ int null_register_zoned_dev(struct nullb *nullb)
return ret;
} else {
blk_queue_chunk_sectors(q, dev->zone_size_sects);
- q->nr_zones = blkdev_nr_zones(nullb->disk);
+ nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
}
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
- blk_queue_max_open_zones(q, dev->zone_max_open);
- blk_queue_max_active_zones(q, dev->zone_max_active);
+ disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
+ disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
return 0;
}
@@ -600,7 +600,7 @@ static blk_status_t null_reset_zone(struct nullb_device *dev,
return BLK_STS_OK;
}
-static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
+static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
@@ -653,7 +653,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
return ret;
}
-blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
+blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, sector_t nr_sectors)
{
struct nullb_device *dev;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index f462ad67931a..a5ab40784119 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -956,7 +956,7 @@ out_unreg_cdrom:
out_pi_release:
pi_release(cd->pi);
out_free_disk:
- blk_cleanup_disk(cd->disk);
+ put_disk(cd->disk);
out_free_tag_set:
blk_mq_free_tag_set(&cd->tag_set);
return ret;
@@ -1029,7 +1029,7 @@ static void __exit pcd_exit(void)
unregister_cdrom(&cd->info);
del_gendisk(cd->disk);
pi_release(cd->pi);
- blk_cleanup_disk(cd->disk);
+ put_disk(cd->disk);
blk_mq_free_tag_set(&cd->tag_set);
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 3637c38c72f9..f8a75bc90f70 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -501,6 +501,8 @@ static enum action do_pd_io_start(void)
return do_pd_read_start();
else
return do_pd_write_start();
+ default:
+ break;
}
return Fail;
}
@@ -943,7 +945,7 @@ static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port,
goto cleanup_disk;
return 0;
cleanup_disk:
- blk_cleanup_disk(disk->gd);
+ put_disk(disk->gd);
put_disk:
put_disk(p);
disk->gd = NULL;
@@ -1018,7 +1020,7 @@ static void __exit pd_exit(void)
if (p) {
disk->gd = NULL;
del_gendisk(p);
- blk_cleanup_disk(p);
+ put_disk(p);
blk_mq_free_tag_set(&disk->tag_set);
pi_release(disk->pi);
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 292e9a4ce1b9..eec1b9fde245 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -975,7 +975,7 @@ static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
out_pi_release:
pi_release(pf->pi);
out_free_disk:
- blk_cleanup_disk(pf->disk);
+ put_disk(pf->disk);
out_free_tag_set:
blk_mq_free_tag_set(&pf->tag_set);
return ret;
@@ -1044,7 +1044,7 @@ static void __exit pf_exit(void)
if (!pf->present)
continue;
del_gendisk(pf->disk);
- blk_cleanup_disk(pf->disk);
+ put_disk(pf->disk);
blk_mq_free_tag_set(&pf->tag_set);
pi_release(pf->pi);
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 789093375344..4cea3b08087e 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2399,7 +2399,7 @@ static void pkt_submit_bio(struct bio *bio)
struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
struct bio *split;
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_iter.bi_sector,
@@ -2460,11 +2460,9 @@ static int pkt_seq_show(struct seq_file *m, void *p)
{
struct pktcdvd_device *pd = m->private;
char *msg;
- char bdev_buf[BDEVNAME_SIZE];
int states[PACKET_NUM_STATES];
- seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
- bdevname(pd->bdev, bdev_buf));
+ seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
@@ -2521,7 +2519,6 @@ static int pkt_seq_show(struct seq_file *m, void *p)
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
int i;
- char b[BDEVNAME_SIZE];
struct block_device *bdev;
struct scsi_device *sdev;
@@ -2534,8 +2531,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (!pd2)
continue;
if (pd2->bdev->bd_dev == dev) {
- pkt_err(pd, "%s already setup\n",
- bdevname(pd2->bdev, b));
+ pkt_err(pd, "%pg already setup\n", pd2->bdev);
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
@@ -2570,7 +2566,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
- pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
+ pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
return 0;
out_mem:
@@ -2733,7 +2729,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
return 0;
out_mem2:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_mem:
mempool_exit(&pd->rb_pool);
kfree(pd);
@@ -2783,7 +2779,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_dbg(1, pd, "writer unmapped\n");
del_gendisk(pd->disk);
- blk_cleanup_disk(pd->disk);
+ put_disk(pd->disk);
mempool_exit(&pd->rb_pool);
kfree(pd);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 3054adf77460..36d7b36c60c7 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -473,7 +473,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
return 0;
fail_cleanup_disk:
- blk_cleanup_disk(gendisk);
+ put_disk(gendisk);
fail_free_tag_set:
blk_mq_free_tag_set(&priv->tag_set);
fail_teardown:
@@ -500,7 +500,7 @@ static void ps3disk_remove(struct ps3_system_bus_device *_dev)
&ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
- blk_cleanup_disk(priv->gendisk);
+ put_disk(priv->gendisk);
blk_mq_free_tag_set(&priv->tag_set);
dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
ps3disk_sync_cache(dev);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 4f90819e245e..e1d080f680ed 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -586,7 +586,7 @@ static void ps3vram_submit_bio(struct bio *bio)
dev_dbg(&dev->core, "%s\n", __func__);
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
@@ -761,7 +761,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(gendisk);
+ put_disk(gendisk);
out_cache_cleanup:
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
@@ -792,7 +792,7 @@ static void ps3vram_remove(struct ps3_system_bus_device *dev)
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
del_gendisk(priv->gendisk);
- blk_cleanup_disk(priv->gendisk);
+ put_disk(priv->gendisk);
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
iounmap(priv->reports);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index ef9bc62e9afd..f9e39301c4af 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1297,7 +1297,7 @@ static void rbd_osd_submit(struct ceph_osd_request *osd_req)
dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
__func__, osd_req, obj_req, obj_req->ex.oe_objno,
obj_req->ex.oe_off, obj_req->ex.oe_len);
- ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
+ ceph_osdc_start_request(osd_req->r_osdc, osd_req);
}
/*
@@ -2081,7 +2081,7 @@ static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
if (ret)
return ret;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
return 0;
}
@@ -4729,7 +4729,7 @@ static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
- blk_cleanup_disk(rbd_dev->disk);
+ put_disk(rbd_dev->disk);
blk_mq_free_tag_set(&rbd_dev->tag_set);
rbd_dev->disk = NULL;
}
@@ -4768,7 +4768,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
if (ret)
goto out_req;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0)
ceph_copy_from_page_vector(pages, buf, 0, ret);
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 2be5d87a3ca6..e7c7d9a68168 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -376,7 +376,7 @@ static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
if (ret)
return ret;
- ret = rnbd_clt_resize_disk(dev, (size_t)sectors);
+ ret = rnbd_clt_resize_disk(dev, sectors);
if (ret)
return ret;
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 409c76b81aed..04da33a22ef4 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -68,39 +68,18 @@ static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
return refcount_inc_not_zero(&dev->refcount);
}
-static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
- const struct rnbd_msg_open_rsp *rsp)
+static void rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
+ sector_t new_nsectors)
{
- struct rnbd_clt_session *sess = dev->sess;
-
- if (!rsp->logical_block_size)
- return -EINVAL;
-
- dev->device_id = le32_to_cpu(rsp->device_id);
- dev->nsectors = le64_to_cpu(rsp->nsectors);
- dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
- dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
- dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
- dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
- dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
- dev->secure_discard = le16_to_cpu(rsp->secure_discard);
- dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
- dev->fua = !!(rsp->cache_policy & RNBD_FUA);
-
- dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
- dev->max_segments = sess->max_segments;
-
- return 0;
-}
+ if (get_capacity(dev->gd) == new_nsectors)
+ return;
-static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
- size_t new_nsectors)
-{
- rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
- dev->nsectors, new_nsectors);
- dev->nsectors = new_nsectors;
- set_capacity_and_notify(dev->gd, dev->nsectors);
- return 0;
+ /*
+ * If the size changed, we need to revalidate it
+ */
+ rnbd_clt_info(dev, "Device size changed from %llu to %llu sectors\n",
+ get_capacity(dev->gd), new_nsectors);
+ set_capacity_and_notify(dev->gd, new_nsectors);
}
static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
@@ -119,19 +98,16 @@ static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
u64 nsectors = le64_to_cpu(rsp->nsectors);
- /*
- * If the device was remapped and the size changed in the
- * meantime we need to revalidate it
- */
- if (dev->nsectors != nsectors)
- rnbd_clt_change_capacity(dev, nsectors);
+ rnbd_clt_change_capacity(dev, nsectors);
gd_kobj = &disk_to_dev(dev->gd)->kobj;
kobject_uevent(gd_kobj, KOBJ_ONLINE);
rnbd_clt_info(dev, "Device online, device remapped successfully\n");
}
- err = rnbd_clt_set_dev_attr(dev, rsp);
- if (err)
+ if (!rsp->logical_block_size) {
+ err = -EINVAL;
goto out;
+ }
+ dev->device_id = le32_to_cpu(rsp->device_id);
dev->dev_state = DEV_STATE_MAPPED;
out:
@@ -140,7 +116,7 @@ out:
return err;
}
-int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, sector_t newsize)
{
int ret = 0;
@@ -150,7 +126,7 @@ int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
ret = -ENOENT;
goto out;
}
- ret = rnbd_clt_change_capacity(dev, newsize);
+ rnbd_clt_change_capacity(dev, newsize);
out:
mutex_unlock(&dev->lock);
@@ -507,6 +483,11 @@ static void msg_open_conf(struct work_struct *work)
struct rnbd_msg_open_rsp *rsp = iu->buf;
struct rnbd_clt_dev *dev = iu->dev;
int errno = iu->errno;
+ bool from_map = false;
+
+ /* INIT state is only triggered from rnbd_clt_map_device */
+ if (dev->dev_state == DEV_STATE_INIT)
+ from_map = true;
if (errno) {
rnbd_clt_err(dev,
@@ -523,7 +504,9 @@ static void msg_open_conf(struct work_struct *work)
send_msg_close(dev, device_id, RTRS_PERMIT_NOWAIT);
}
}
- kfree(rsp);
+ /* We free rsp in rnbd_clt_map_device for map scenario */
+ if (!from_map)
+ kfree(rsp);
wake_up_iu_comp(iu, errno);
rnbd_put_iu(dev->sess, iu);
rnbd_clt_put_dev(dev);
@@ -942,7 +925,7 @@ static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
{
struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
- if (dev->read_only && (mode & FMODE_WRITE))
+ if (get_disk_ro(dev->gd) && (mode & FMODE_WRITE))
return -EPERM;
if (dev->dev_state == DEV_STATE_UNMAPPED ||
@@ -963,10 +946,10 @@ static int rnbd_client_getgeo(struct block_device *block_device,
struct hd_geometry *geo)
{
u64 size;
- struct rnbd_clt_dev *dev;
+ struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct queue_limits *limit = &dev->queue->limits;
- dev = block_device->bd_disk->private_data;
- size = dev->size * (dev->logical_block_size / SECTOR_SIZE);
+ size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
geo->cylinders = size >> 6; /* size/64 */
geo->heads = 4;
geo->sectors = 16;
@@ -1350,11 +1333,15 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
}
}
-static void setup_request_queue(struct rnbd_clt_dev *dev)
+static void setup_request_queue(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp)
{
- blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
- blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
- blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
+ blk_queue_logical_block_size(dev->queue,
+ le16_to_cpu(rsp->logical_block_size));
+ blk_queue_physical_block_size(dev->queue,
+ le16_to_cpu(rsp->physical_block_size));
+ blk_queue_max_hw_sectors(dev->queue,
+ dev->sess->max_io_size / SECTOR_SIZE);
/*
* we don't support discards to "discontiguous" segments
@@ -1362,21 +1349,27 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
*/
blk_queue_max_discard_segments(dev->queue, 1);
- blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
- dev->queue->limits.discard_granularity = dev->discard_granularity;
- dev->queue->limits.discard_alignment = dev->discard_alignment;
- if (dev->secure_discard)
+ blk_queue_max_discard_sectors(dev->queue,
+ le32_to_cpu(rsp->max_discard_sectors));
+ dev->queue->limits.discard_granularity =
+ le32_to_cpu(rsp->discard_granularity);
+ dev->queue->limits.discard_alignment =
+ le32_to_cpu(rsp->discard_alignment);
+ if (le16_to_cpu(rsp->secure_discard))
blk_queue_max_secure_erase_sectors(dev->queue,
- dev->max_discard_sectors);
+ le32_to_cpu(rsp->max_discard_sectors));
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
- blk_queue_max_segments(dev->queue, dev->max_segments);
+ blk_queue_max_segments(dev->queue, dev->sess->max_segments);
blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
- blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
+ blk_queue_write_cache(dev->queue,
+ !!(rsp->cache_policy & RNBD_WRITEBACK),
+ !!(rsp->cache_policy & RNBD_FUA));
}
-static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
+static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp, int idx)
{
int err;
@@ -1388,19 +1381,15 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
dev->gd->private_data = dev;
snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
idx);
- pr_debug("disk_name=%s, capacity=%zu\n",
+ pr_debug("disk_name=%s, capacity=%llu\n",
dev->gd->disk_name,
- dev->nsectors * (dev->logical_block_size / SECTOR_SIZE)
- );
+ le64_to_cpu(rsp->nsectors) *
+ (le16_to_cpu(rsp->logical_block_size) / SECTOR_SIZE));
- set_capacity(dev->gd, dev->nsectors);
+ set_capacity(dev->gd, le64_to_cpu(rsp->nsectors));
- if (dev->access_mode == RNBD_ACCESS_RO) {
- dev->read_only = true;
+ if (dev->access_mode == RNBD_ACCESS_RO)
set_disk_ro(dev->gd, true);
- } else {
- dev->read_only = false;
- }
/*
* Network device does not need rotational
@@ -1408,16 +1397,18 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
return err;
}
-static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
+static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp)
{
int idx = dev->clt_device_id;
- dev->size = dev->nsectors * dev->logical_block_size;
+ dev->size = le64_to_cpu(rsp->nsectors) *
+ le16_to_cpu(rsp->logical_block_size);
dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev);
if (IS_ERR(dev->gd))
@@ -1425,8 +1416,8 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
dev->queue = dev->gd->queue;
rnbd_init_mq_hw_queues(dev);
- setup_request_queue(dev);
- return rnbd_clt_setup_gen_disk(dev, idx);
+ setup_request_queue(dev, rsp);
+ return rnbd_clt_setup_gen_disk(dev, rsp, idx);
}
static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
@@ -1562,7 +1553,14 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
{
struct rnbd_clt_session *sess;
struct rnbd_clt_dev *dev;
- int ret;
+ int ret, errno;
+ struct rnbd_msg_open_rsp *rsp;
+ struct rnbd_msg_open msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
if (exists_devpath(pathname, sessname))
return ERR_PTR(-EEXIST);
@@ -1582,17 +1580,47 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
ret = -EEXIST;
goto put_dev;
}
- ret = send_msg_open(dev, RTRS_PERMIT_WAIT);
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp) {
+ ret = -ENOMEM;
+ goto del_dev;
+ }
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu) {
+ ret = -ENOMEM;
+ kfree(rsp);
+ goto del_dev;
+ }
+ iu->buf = rsp;
+ iu->dev = dev;
+ sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
+ msg.access_mode = dev->access_mode;
+ strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
+
+ WARN_ON(!rnbd_clt_get_dev(dev));
+ ret = send_usr_msg(sess->rtrs, READ, iu,
+ &vec, sizeof(*rsp), iu->sgt.sgl, 1,
+ msg_open_conf, &errno, RTRS_PERMIT_WAIT);
+ if (ret) {
+ rnbd_clt_put_dev(dev);
+ rnbd_put_iu(sess, iu);
+ } else {
+ ret = errno;
+ }
if (ret) {
rnbd_clt_err(dev,
"map_device: failed, can't open remote device, err: %d\n",
ret);
- goto del_dev;
+ goto put_iu;
}
mutex_lock(&dev->lock);
pr_debug("Opened remote device: session=%s, path='%s'\n",
sess->sessname, pathname);
- ret = rnbd_client_setup_device(dev);
+ ret = rnbd_client_setup_device(dev, rsp);
if (ret) {
rnbd_clt_err(dev,
"map_device: Failed to configure device, err: %d\n",
@@ -1602,21 +1630,30 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
}
rnbd_clt_info(dev,
- "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
- dev->gd->disk_name, dev->nsectors,
- dev->logical_block_size, dev->physical_block_size,
- dev->max_discard_sectors,
- dev->discard_granularity, dev->discard_alignment,
- dev->secure_discard, dev->max_segments,
- dev->max_hw_sectors, dev->wc, dev->fua);
+ "map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
+ dev->gd->disk_name, le64_to_cpu(rsp->nsectors),
+ le16_to_cpu(rsp->logical_block_size),
+ le16_to_cpu(rsp->physical_block_size),
+ le32_to_cpu(rsp->max_discard_sectors),
+ le32_to_cpu(rsp->discard_granularity),
+ le32_to_cpu(rsp->discard_alignment),
+ le16_to_cpu(rsp->secure_discard),
+ sess->max_segments, sess->max_io_size / SECTOR_SIZE,
+ !!(rsp->cache_policy & RNBD_WRITEBACK),
+ !!(rsp->cache_policy & RNBD_FUA));
mutex_unlock(&dev->lock);
+ kfree(rsp);
+ rnbd_put_iu(sess, iu);
rnbd_clt_put_sess(sess);
return dev;
send_close:
send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT);
+put_iu:
+ kfree(rsp);
+ rnbd_put_iu(sess, iu);
del_dev:
delete_dev(dev);
put_dev:
@@ -1630,7 +1667,7 @@ put_sess:
static void destroy_gen_disk(struct rnbd_clt_dev *dev)
{
del_gendisk(dev->gd);
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
}
static void destroy_sysfs(struct rnbd_clt_dev *dev,
@@ -1755,7 +1792,7 @@ static void rnbd_destroy_sessions(void)
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
* Here unmap happens in parallel for only one reason:
- * blk_cleanup_queue() takes around half a second, so
+ * del_gendisk() takes around half a second, so
* on huge amount of devices the whole module unload
* procedure takes minutes.
*/
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index 2e2e8c4a85c1..a48e040abe63 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -106,6 +106,7 @@ struct rnbd_queue {
};
struct rnbd_clt_dev {
+ struct kobject kobj;
struct rnbd_clt_session *sess;
struct request_queue *queue;
struct rnbd_queue *hw_queues;
@@ -114,27 +115,14 @@ struct rnbd_clt_dev {
u32 clt_device_id;
struct mutex lock;
enum rnbd_clt_dev_state dev_state;
+ refcount_t refcount;
char *pathname;
enum rnbd_access_mode access_mode;
u32 nr_poll_queues;
- bool read_only;
- bool wc;
- bool fua;
- u32 max_hw_sectors;
- u32 max_discard_sectors;
- u32 discard_granularity;
- u32 discard_alignment;
- u16 secure_discard;
- u16 physical_block_size;
- u16 logical_block_size;
- u16 max_segments;
- size_t nsectors;
u64 size; /* device size in bytes */
struct list_head list;
struct gendisk *gd;
- struct kobject kobj;
char *blk_symlink_name;
- refcount_t refcount;
struct work_struct unmap_on_rmmod_work;
};
@@ -150,7 +138,7 @@ int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
const struct attribute *sysfs_self);
int rnbd_clt_remap_device(struct rnbd_clt_dev *dev);
-int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize);
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, sector_t newsize);
/* rnbd-clt-sysfs.c */
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index bfb08dd434d1..ea7ac8bca63c 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -229,9 +229,9 @@ static inline bool rnbd_flags_supported(u32 flags)
return true;
}
-static inline u32 rnbd_to_bio_flags(u32 rnbd_opf)
+static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
{
- u32 bio_opf;
+ blk_opf_t bio_opf;
switch (rnbd_op(rnbd_opf)) {
case RNBD_OP_READ:
@@ -286,7 +286,8 @@ static inline u32 rq_to_rnbd_flags(struct request *rq)
break;
default:
WARN(1, "Unknown request type %d (flags %llu)\n",
- req_op(rq), (unsigned long long)rq->cmd_flags);
+ (__force u32)req_op(rq),
+ (__force unsigned long long)rq->cmd_flags);
rnbd_opf = 0;
}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
index c5d0a0391165..c63017f6e421 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.c
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -28,7 +28,6 @@ struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags)
goto err;
dev->blk_open_flags = flags;
- bdevname(dev->bdev, dev->name);
return dev;
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
index 4309e5252469..8407d12f70af 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -15,7 +15,6 @@
struct rnbd_dev {
struct block_device *bdev;
fmode_t blk_open_flags;
- char name[BDEVNAME_SIZE];
};
/**
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
index feaa76c5a342..297a6924ff4e 100644
--- a/drivers/block/rnbd/rnbd-srv-sysfs.c
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -38,14 +38,13 @@ static struct kobj_type dev_ktype = {
};
int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
- struct block_device *bdev,
- const char *dev_name)
+ struct block_device *bdev)
{
struct kobject *bdev_kobj;
int ret;
ret = kobject_init_and_add(&dev->dev_kobj, &dev_ktype,
- rnbd_devs_kobj, dev_name);
+ rnbd_devs_kobj, "%pg", bdev);
if (ret) {
kobject_put(&dev->dev_kobj);
return ret;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index beaef43a67b9..5e08da277ddf 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -224,7 +224,6 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
wait_for_completion(&dc); /* wait for inflights to drop to zero */
rnbd_dev_close(sess_dev->rnbd_dev);
- list_del(&sess_dev->sess_list);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (sess_dev->open_flags & FMODE_WRITE)
@@ -239,14 +238,14 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
static void destroy_sess(struct rnbd_srv_session *srv_sess)
{
- struct rnbd_srv_sess_dev *sess_dev, *tmp;
+ struct rnbd_srv_sess_dev *sess_dev;
+ unsigned long index;
- if (list_empty(&srv_sess->sess_dev_list))
+ if (xa_empty(&srv_sess->index_idr))
goto out;
mutex_lock(&srv_sess->lock);
- list_for_each_entry_safe(sess_dev, tmp, &srv_sess->sess_dev_list,
- sess_list)
+ xa_for_each(&srv_sess->index_idr, index, sess_dev)
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
mutex_unlock(&srv_sess->lock);
@@ -281,7 +280,6 @@ static int create_sess(struct rtrs_srv_sess *rtrs)
srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
- INIT_LIST_HEAD(&srv_sess->sess_dev_list);
mutex_init(&srv_sess->lock);
mutex_lock(&sess_lock);
list_add(&srv_sess->list, &sess_list);
@@ -323,10 +321,11 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev,
{
struct rnbd_srv_session *sess = sess_dev->sess;
- sess_dev->keep_id = true;
/* It is already started to close by client's close message. */
if (!mutex_trylock(&sess->lock))
return;
+
+ sess_dev->keep_id = true;
/* first remove sysfs itself to avoid deadlock */
sysfs_remove_file_self(&sess_dev->kobj, &attr->attr);
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
@@ -419,7 +418,7 @@ static struct rnbd_srv_sess_dev
return sess_dev;
}
-static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id)
+static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(struct block_device *bdev)
{
struct rnbd_srv_dev *dev;
@@ -427,7 +426,7 @@ static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id)
if (!dev)
return ERR_PTR(-ENOMEM);
- strscpy(dev->id, id, sizeof(dev->id));
+ snprintf(dev->id, sizeof(dev->id), "%pg", bdev);
kref_init(&dev->kref);
INIT_LIST_HEAD(&dev->sess_dev_list);
mutex_init(&dev->lock);
@@ -512,7 +511,7 @@ rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
int ret;
struct rnbd_srv_dev *new_dev, *dev;
- new_dev = rnbd_srv_init_srv_dev(rnbd_dev->name);
+ new_dev = rnbd_srv_init_srv_dev(rnbd_dev->bdev);
if (IS_ERR(new_dev))
return new_dev;
@@ -666,11 +665,12 @@ static struct rnbd_srv_sess_dev *
find_srv_sess_dev(struct rnbd_srv_session *srv_sess, const char *dev_name)
{
struct rnbd_srv_sess_dev *sess_dev;
+ unsigned long index;
- if (list_empty(&srv_sess->sess_dev_list))
+ if (xa_empty(&srv_sess->index_idr))
return NULL;
- list_for_each_entry(sess_dev, &srv_sess->sess_dev_list, sess_list)
+ xa_for_each(&srv_sess->index_idr, index, sess_dev)
if (!strcmp(sess_dev->pathname, dev_name))
return sess_dev;
@@ -758,8 +758,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
- ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev,
- rnbd_dev->name);
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev);
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
@@ -781,8 +780,6 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
list_add(&srv_sess_dev->dev_list, &srv_dev->sess_dev_list);
mutex_unlock(&srv_dev->lock);
- list_add(&srv_sess_dev->sess_list, &srv_sess->sess_dev_list);
-
rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->id);
kfree(full_path);
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index be2ae486d407..081bceaf4ae9 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -25,8 +25,6 @@ struct rnbd_srv_session {
int queue_depth;
struct xarray index_idr;
- /* List of struct rnbd_srv_sess_dev */
- struct list_head sess_dev_list;
struct mutex lock;
u8 ver;
};
@@ -48,8 +46,6 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
- /* Entry inside rnbd_srv_session struct */
- struct list_head sess_list;
struct rnbd_dev *rnbd_dev;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
@@ -68,8 +64,7 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev,
/* rnbd-srv-sysfs.c */
int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
- struct block_device *bdev,
- const char *dir_name);
+ struct block_device *bdev);
void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev);
int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index dd0a1a6fed29..fb855da971ee 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -886,7 +886,7 @@ static int probe_disk(struct vdc_port *port)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(g);
+ put_disk(g);
out_free_tag:
blk_mq_free_tag_set(&port->tag_set);
return err;
@@ -1070,7 +1070,7 @@ static void vdc_port_remove(struct vio_dev *vdev)
del_timer_sync(&port->vio.timer);
del_gendisk(port->disk);
- blk_cleanup_disk(port->disk);
+ put_disk(port->disk);
blk_mq_free_tag_set(&port->tag_set);
vdc_free_tx_ring(port);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index fef65a18d56f..42b4b6828690 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -783,7 +783,7 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs)
if (fs->registered)
del_gendisk(fs->disk);
- blk_cleanup_disk(disk);
+ put_disk(disk);
blk_mq_free_tag_set(&fs->tag_set);
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 6c39f2c9f806..da811a7da03f 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1238,7 +1238,7 @@ static int swim3_attach(struct macio_dev *mdev,
return 0;
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_free_tag_set:
blk_mq_free_tag_set(&fs->tag_set);
out_unregister:
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
deleted file mode 100644
index 63b4f6431d2e..000000000000
--- a/drivers/block/sx8.c
+++ /dev/null
@@ -1,1582 +0,0 @@
-/*
- * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
- *
- * Copyright 2004-2005 Red Hat, Inc.
- *
- * Author/maintainer: Jeff Garzik <jgarzik@pobox.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/blk-mq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/compiler.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/ktime.h>
-#include <linux/hdreg.h>
-#include <linux/dma-mapping.h>
-#include <linux/completion.h>
-#include <linux/scatterlist.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-
-#if 0
-#define CARM_DEBUG
-#define CARM_VERBOSE_DEBUG
-#else
-#undef CARM_DEBUG
-#undef CARM_VERBOSE_DEBUG
-#endif
-#undef CARM_NDEBUG
-
-#define DRV_NAME "sx8"
-#define DRV_VERSION "1.0"
-#define PFX DRV_NAME ": "
-
-MODULE_AUTHOR("Jeff Garzik");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Promise SATA SX8 block driver");
-MODULE_VERSION(DRV_VERSION);
-
-/*
- * SX8 hardware has a single message queue for all ATA ports.
- * When this driver was written, the hardware (firmware?) would
- * corrupt data eventually, if more than one request was outstanding.
- * As one can imagine, having 8 ports bottlenecking on a single
- * command hurts performance.
- *
- * Based on user reports, later versions of the hardware (firmware?)
- * seem to be able to survive with more than one command queued.
- *
- * Therefore, we default to the safe option -- 1 command -- but
- * allow the user to increase this.
- *
- * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ),
- * but problems seem to occur when you exceed ~30, even on newer hardware.
- */
-static int max_queue = 1;
-module_param(max_queue, int, 0444);
-MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)");
-
-
-#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN)
-
-/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
-#define TAG_ENCODE(tag) (((tag) << 16) | 0xf)
-#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f)
-#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
-
-/* note: prints function name for you */
-#ifdef CARM_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef CARM_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* CARM_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* CARM_DEBUG */
-
-#ifdef CARM_NDEBUG
-#define assert(expr)
-#else
-#define assert(expr) \
- if(unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- }
-#endif
-
-/* defines only for the constants which don't work well as enums */
-struct carm_host;
-
-enum {
- /* adapter-wide limits */
- CARM_MAX_PORTS = 8,
- CARM_SHM_SIZE = (4096 << 7),
- CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS,
- CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1,
-
- /* command message queue limits */
- CARM_MAX_REQ = 64, /* max command msgs per host */
- CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */
-
- /* S/G limits, host-wide and per-request */
- CARM_MAX_REQ_SG = 32, /* max s/g entries per request */
- CARM_MAX_HOST_SG = 600, /* max s/g entries per host */
- CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */
-
- /* hardware registers */
- CARM_IHQP = 0x1c,
- CARM_INT_STAT = 0x10, /* interrupt status */
- CARM_INT_MASK = 0x14, /* interrupt mask */
- CARM_HMUC = 0x18, /* host message unit control */
- RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */
- RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */
- RBUF_BYTE_SZ = 0x28,
- CARM_RESP_IDX = 0x2c,
- CARM_CMS0 = 0x30, /* command message size reg 0 */
- CARM_LMUC = 0x48,
- CARM_HMPHA = 0x6c,
- CARM_INITC = 0xb5,
-
- /* bits in CARM_INT_{STAT,MASK} */
- INT_RESERVED = 0xfffffff0,
- INT_WATCHDOG = (1 << 3), /* watchdog timer */
- INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */
- INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */
- INT_RESPONSE = (1 << 0), /* response msg available */
- INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW,
- INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW |
- INT_RESPONSE,
-
- /* command messages, and related register bits */
- CARM_HAVE_RESP = 0x01,
- CARM_MSG_READ = 1,
- CARM_MSG_WRITE = 2,
- CARM_MSG_VERIFY = 3,
- CARM_MSG_GET_CAPACITY = 4,
- CARM_MSG_FLUSH = 5,
- CARM_MSG_IOCTL = 6,
- CARM_MSG_ARRAY = 8,
- CARM_MSG_MISC = 9,
- CARM_CME = (1 << 2),
- CARM_RME = (1 << 1),
- CARM_WZBC = (1 << 0),
- CARM_RMI = (1 << 0),
- CARM_Q_FULL = (1 << 3),
- CARM_MSG_SIZE = 288,
- CARM_Q_LEN = 48,
-
- /* CARM_MSG_IOCTL messages */
- CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */
- CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */
- CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */
-
- IOC_SCAN_CHAN_NODEV = 0x1f,
- IOC_SCAN_CHAN_OFFSET = 0x40,
-
- /* CARM_MSG_ARRAY messages */
- CARM_ARRAY_INFO = 0,
-
- ARRAY_NO_EXIST = (1 << 31),
-
- /* response messages */
- RMSG_SZ = 8, /* sizeof(struct carm_response) */
- RMSG_Q_LEN = 48, /* resp. msg list length */
- RMSG_OK = 1, /* bit indicating msg was successful */
- /* length of entire resp. msg buffer */
- RBUF_LEN = RMSG_SZ * RMSG_Q_LEN,
-
- PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */
-
- /* CARM_MSG_MISC messages */
- MISC_GET_FW_VER = 2,
- MISC_ALLOC_MEM = 3,
- MISC_SET_TIME = 5,
-
- /* MISC_GET_FW_VER feature bits */
- FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */
- FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */
- FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */
-
- /* carm_host flags */
- FL_NON_RAID = FW_VER_NON_RAID,
- FL_4PORT = FW_VER_4PORT,
- FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT),
- FL_DYN_MAJOR = (1 << 17),
-};
-
-enum {
- CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */
-};
-
-enum scatter_gather_types {
- SGT_32BIT = 0,
- SGT_64BIT = 1,
-};
-
-enum host_states {
- HST_INVALID, /* invalid state; never used */
- HST_ALLOC_BUF, /* setting up master SHM area */
- HST_ERROR, /* we never leave here */
- HST_PORT_SCAN, /* start dev scan */
- HST_DEV_SCAN_START, /* start per-device probe */
- HST_DEV_SCAN, /* continue per-device probe */
- HST_DEV_ACTIVATE, /* activate devices we found */
- HST_PROBE_FINISHED, /* probe is complete */
- HST_PROBE_START, /* initiate probe */
- HST_SYNC_TIME, /* tell firmware what time it is */
- HST_GET_FW_VER, /* get firmware version, adapter port cnt */
-};
-
-#ifdef CARM_DEBUG
-static const char *state_name[] = {
- "HST_INVALID",
- "HST_ALLOC_BUF",
- "HST_ERROR",
- "HST_PORT_SCAN",
- "HST_DEV_SCAN_START",
- "HST_DEV_SCAN",
- "HST_DEV_ACTIVATE",
- "HST_PROBE_FINISHED",
- "HST_PROBE_START",
- "HST_SYNC_TIME",
- "HST_GET_FW_VER",
-};
-#endif
-
-struct carm_port {
- unsigned int port_no;
- struct gendisk *disk;
- struct carm_host *host;
-
- /* attached device characteristics */
- u64 capacity;
- char name[41];
- u16 dev_geom_head;
- u16 dev_geom_sect;
- u16 dev_geom_cyl;
-};
-
-struct carm_request {
- int n_elem;
- unsigned int msg_type;
- unsigned int msg_subtype;
- unsigned int msg_bucket;
- struct scatterlist sg[CARM_MAX_REQ_SG];
-};
-
-struct carm_host {
- unsigned long flags;
- void __iomem *mmio;
- void *shm;
- dma_addr_t shm_dma;
-
- int major;
- int id;
- char name[32];
-
- spinlock_t lock;
- struct pci_dev *pdev;
- unsigned int state;
- u32 fw_ver;
-
- struct blk_mq_tag_set tag_set;
- struct request_queue *oob_q;
- unsigned int n_oob;
-
- unsigned int hw_sg_used;
-
- unsigned int resp_idx;
-
- unsigned int wait_q_prod;
- unsigned int wait_q_cons;
- struct request_queue *wait_q[CARM_MAX_WAIT_Q];
-
- void *msg_base;
- dma_addr_t msg_dma;
-
- int cur_scan_dev;
- unsigned long dev_active;
- unsigned long dev_present;
- struct carm_port port[CARM_MAX_PORTS];
-
- struct work_struct fsm_task;
-
- int probe_err;
- struct completion probe_comp;
-};
-
-struct carm_response {
- __le32 ret_handle;
- __le32 status;
-} __attribute__((packed));
-
-struct carm_msg_sg {
- __le32 start;
- __le32 len;
-} __attribute__((packed));
-
-struct carm_msg_rw {
- u8 type;
- u8 id;
- u8 sg_count;
- u8 sg_type;
- __le32 handle;
- __le32 lba;
- __le16 lba_count;
- __le16 lba_high;
- struct carm_msg_sg sg[32];
-} __attribute__((packed));
-
-struct carm_msg_allocbuf {
- u8 type;
- u8 subtype;
- u8 n_sg;
- u8 sg_type;
- __le32 handle;
- __le32 addr;
- __le32 len;
- __le32 evt_pool;
- __le32 n_evt;
- __le32 rbuf_pool;
- __le32 n_rbuf;
- __le32 msg_pool;
- __le32 n_msg;
- struct carm_msg_sg sg[8];
-} __attribute__((packed));
-
-struct carm_msg_ioctl {
- u8 type;
- u8 subtype;
- u8 array_id;
- u8 reserved1;
- __le32 handle;
- __le32 data_addr;
- u32 reserved2;
-} __attribute__((packed));
-
-struct carm_msg_sync_time {
- u8 type;
- u8 subtype;
- u16 reserved1;
- __le32 handle;
- u32 reserved2;
- __le32 timestamp;
-} __attribute__((packed));
-
-struct carm_msg_get_fw_ver {
- u8 type;
- u8 subtype;
- u16 reserved1;
- __le32 handle;
- __le32 data_addr;
- u32 reserved2;
-} __attribute__((packed));
-
-struct carm_fw_ver {
- __le32 version;
- u8 features;
- u8 reserved1;
- u16 reserved2;
-} __attribute__((packed));
-
-struct carm_array_info {
- __le32 size;
-
- __le16 size_hi;
- __le16 stripe_size;
-
- __le32 mode;
-
- __le16 stripe_blk_sz;
- __le16 reserved1;
-
- __le16 cyl;
- __le16 head;
-
- __le16 sect;
- u8 array_id;
- u8 reserved2;
-
- char name[40];
-
- __le32 array_status;
-
- /* device list continues beyond this point? */
-} __attribute__((packed));
-
-static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static void carm_remove_one (struct pci_dev *pdev);
-static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static const struct pci_device_id carm_pci_tbl[] = {
- { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { } /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
-
-static struct pci_driver carm_driver = {
- .name = DRV_NAME,
- .id_table = carm_pci_tbl,
- .probe = carm_init_one,
- .remove = carm_remove_one,
-};
-
-static const struct block_device_operations carm_bd_ops = {
- .owner = THIS_MODULE,
- .getgeo = carm_bdev_getgeo,
-};
-
-static unsigned int carm_host_id;
-static unsigned long carm_major_alloc;
-
-
-
-static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct carm_port *port = bdev->bd_disk->private_data;
-
- geo->heads = (u8) port->dev_geom_head;
- geo->sectors = (u8) port->dev_geom_sect;
- geo->cylinders = port->dev_geom_cyl;
- return 0;
-}
-
-static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
-
-static inline int carm_lookup_bucket(u32 msg_size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
- if (msg_size <= msg_sizes[i])
- return i;
-
- return -ENOENT;
-}
-
-static void carm_init_buckets(void __iomem *mmio)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
- writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
-}
-
-static inline void *carm_ref_msg(struct carm_host *host,
- unsigned int msg_idx)
-{
- return host->msg_base + (msg_idx * CARM_MSG_SIZE);
-}
-
-static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
- unsigned int msg_idx)
-{
- return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
-}
-
-static int carm_send_msg(struct carm_host *host,
- struct carm_request *crq, unsigned tag)
-{
- void __iomem *mmio = host->mmio;
- u32 msg = (u32) carm_ref_msg_dma(host, tag);
- u32 cm_bucket = crq->msg_bucket;
- u32 tmp;
- int rc = 0;
-
- VPRINTK("ENTER\n");
-
- tmp = readl(mmio + CARM_HMUC);
- if (tmp & CARM_Q_FULL) {
-#if 0
- tmp = readl(mmio + CARM_INT_MASK);
- tmp |= INT_Q_AVAILABLE;
- writel(tmp, mmio + CARM_INT_MASK);
- readl(mmio + CARM_INT_MASK); /* flush */
-#endif
- DPRINTK("host msg queue full\n");
- rc = -EBUSY;
- } else {
- writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
- readl(mmio + CARM_IHQP); /* flush */
- }
-
- return rc;
-}
-
-static int carm_array_info (struct carm_host *host, unsigned int array_idx)
-{
- struct carm_msg_ioctl *ioc;
- u32 msg_data;
- dma_addr_t msg_dma;
- struct carm_request *crq;
- struct request *rq;
- int rc;
-
- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq)) {
- rc = -ENOMEM;
- goto err_out;
- }
- crq = blk_mq_rq_to_pdu(rq);
-
- ioc = carm_ref_msg(host, rq->tag);
- msg_dma = carm_ref_msg_dma(host, rq->tag);
- msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
-
- crq->msg_type = CARM_MSG_ARRAY;
- crq->msg_subtype = CARM_ARRAY_INFO;
- rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
- sizeof(struct carm_array_info));
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_ARRAY;
- ioc->subtype = CARM_ARRAY_INFO;
- ioc->array_id = (u8) array_idx;
- ioc->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- spin_lock_irq(&host->lock);
- assert(host->state == HST_DEV_SCAN_START ||
- host->state == HST_DEV_SCAN);
- spin_unlock_irq(&host->lock);
-
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true);
-
- return 0;
-
-err_out:
- spin_lock_irq(&host->lock);
- host->state = HST_ERROR;
- spin_unlock_irq(&host->lock);
- return rc;
-}
-
-typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
-
-static int carm_send_special (struct carm_host *host, carm_sspc_t func)
-{
- struct request *rq;
- struct carm_request *crq;
- struct carm_msg_ioctl *ioc;
- void *mem;
- unsigned int msg_size;
- int rc;
-
- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq))
- return -ENOMEM;
- crq = blk_mq_rq_to_pdu(rq);
-
- mem = carm_ref_msg(host, rq->tag);
-
- msg_size = func(host, rq->tag, mem);
-
- ioc = mem;
- crq->msg_type = ioc->type;
- crq->msg_subtype = ioc->subtype;
- rc = carm_lookup_bucket(msg_size);
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true);
-
- return 0;
-}
-
-static unsigned int carm_fill_sync_time(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_sync_time *st = mem;
-
- time64_t tv = ktime_get_real_seconds();
-
- memset(st, 0, sizeof(*st));
- st->type = CARM_MSG_MISC;
- st->subtype = MISC_SET_TIME;
- st->handle = cpu_to_le32(TAG_ENCODE(idx));
- st->timestamp = cpu_to_le32(tv);
-
- return sizeof(struct carm_msg_sync_time);
-}
-
-static unsigned int carm_fill_alloc_buf(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_allocbuf *ab = mem;
-
- memset(ab, 0, sizeof(*ab));
- ab->type = CARM_MSG_MISC;
- ab->subtype = MISC_ALLOC_MEM;
- ab->handle = cpu_to_le32(TAG_ENCODE(idx));
- ab->n_sg = 1;
- ab->sg_type = SGT_32BIT;
- ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
- ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1);
- ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024));
- ab->n_evt = cpu_to_le32(1024);
- ab->rbuf_pool = cpu_to_le32(host->shm_dma);
- ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN);
- ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN);
- ab->n_msg = cpu_to_le32(CARM_Q_LEN);
- ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
- ab->sg[0].len = cpu_to_le32(65536);
-
- return sizeof(struct carm_msg_allocbuf);
-}
-
-static unsigned int carm_fill_scan_channels(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_ioctl *ioc = mem;
- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
- IOC_SCAN_CHAN_OFFSET);
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_IOCTL;
- ioc->subtype = CARM_IOC_SCAN_CHAN;
- ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- /* fill output data area with "no device" default values */
- mem += IOC_SCAN_CHAN_OFFSET;
- memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
-
- return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
-}
-
-static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_get_fw_ver *ioc = mem;
- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_MISC;
- ioc->subtype = MISC_GET_FW_VER;
- ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- return sizeof(struct carm_msg_get_fw_ver) +
- sizeof(struct carm_fw_ver);
-}
-
-static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
-{
- unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
-
- blk_mq_stop_hw_queues(q);
- VPRINTK("STOPPED QUEUE %p\n", q);
-
- host->wait_q[idx] = q;
- host->wait_q_prod++;
- BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
-}
-
-static inline struct request_queue *carm_pop_q(struct carm_host *host)
-{
- unsigned int idx;
-
- if (host->wait_q_prod == host->wait_q_cons)
- return NULL;
-
- idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
- host->wait_q_cons++;
-
- return host->wait_q[idx];
-}
-
-static inline void carm_round_robin(struct carm_host *host)
-{
- struct request_queue *q = carm_pop_q(host);
- if (q) {
- blk_mq_start_hw_queues(q);
- VPRINTK("STARTED QUEUE %p\n", q);
- }
-}
-
-static inline enum dma_data_direction carm_rq_dir(struct request *rq)
-{
- return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-}
-
-static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct request_queue *q = hctx->queue;
- struct request *rq = bd->rq;
- struct carm_port *port = q->queuedata;
- struct carm_host *host = port->host;
- struct carm_request *crq = blk_mq_rq_to_pdu(rq);
- struct carm_msg_rw *msg;
- struct scatterlist *sg;
- int i, n_elem = 0, rc;
- unsigned int msg_size;
- u32 tmp;
-
- crq->n_elem = 0;
- sg_init_table(crq->sg, CARM_MAX_REQ_SG);
-
- blk_mq_start_request(rq);
-
- spin_lock_irq(&host->lock);
- if (req_op(rq) == REQ_OP_DRV_OUT)
- goto send_msg;
-
- /* get scatterlist from block layer */
- sg = &crq->sg[0];
- n_elem = blk_rq_map_sg(q, rq, sg);
- if (n_elem <= 0)
- goto out_ioerr;
-
- /* map scatterlist to PCI bus addresses */
- n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq));
- if (n_elem <= 0)
- goto out_ioerr;
-
- /* obey global hardware limit on S/G entries */
- if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem)
- goto out_resource;
-
- crq->n_elem = n_elem;
- host->hw_sg_used += n_elem;
-
- /*
- * build read/write message
- */
-
- VPRINTK("build msg\n");
- msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag);
-
- if (rq_data_dir(rq) == WRITE) {
- msg->type = CARM_MSG_WRITE;
- crq->msg_type = CARM_MSG_WRITE;
- } else {
- msg->type = CARM_MSG_READ;
- crq->msg_type = CARM_MSG_READ;
- }
-
- msg->id = port->port_no;
- msg->sg_count = n_elem;
- msg->sg_type = SGT_32BIT;
- msg->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
- msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
- tmp = (blk_rq_pos(rq) >> 16) >> 16;
- msg->lba_high = cpu_to_le16( (u16) tmp );
- msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
-
- msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
- for (i = 0; i < n_elem; i++) {
- struct carm_msg_sg *carm_sg = &msg->sg[i];
- carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
- carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
- msg_size += sizeof(struct carm_msg_sg);
- }
-
- rc = carm_lookup_bucket(msg_size);
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-send_msg:
- /*
- * queue read/write message to hardware
- */
- VPRINTK("send msg, tag == %u\n", rq->tag);
- rc = carm_send_msg(host, crq, rq->tag);
- if (rc) {
- host->hw_sg_used -= n_elem;
- goto out_resource;
- }
-
- spin_unlock_irq(&host->lock);
- return BLK_STS_OK;
-out_resource:
- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq));
- carm_push_q(host, q);
- spin_unlock_irq(&host->lock);
- return BLK_STS_DEV_RESOURCE;
-out_ioerr:
- carm_round_robin(host);
- spin_unlock_irq(&host->lock);
- return BLK_STS_IOERR;
-}
-
-static void carm_handle_array_info(struct carm_host *host,
- struct carm_request *crq, u8 *mem,
- blk_status_t error)
-{
- struct carm_port *port;
- u8 *msg_data = mem + sizeof(struct carm_array_info);
- struct carm_array_info *desc = (struct carm_array_info *) msg_data;
- u64 lo, hi;
- int cur_port;
- size_t slen;
-
- DPRINTK("ENTER\n");
-
- if (error)
- goto out;
- if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
- goto out;
-
- cur_port = host->cur_scan_dev;
-
- /* should never occur */
- if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
- printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
- cur_port, (int) desc->array_id);
- goto out;
- }
-
- port = &host->port[cur_port];
-
- lo = (u64) le32_to_cpu(desc->size);
- hi = (u64) le16_to_cpu(desc->size_hi);
-
- port->capacity = lo | (hi << 32);
- port->dev_geom_head = le16_to_cpu(desc->head);
- port->dev_geom_sect = le16_to_cpu(desc->sect);
- port->dev_geom_cyl = le16_to_cpu(desc->cyl);
-
- host->dev_active |= (1 << cur_port);
-
- strncpy(port->name, desc->name, sizeof(port->name));
- port->name[sizeof(port->name) - 1] = 0;
- slen = strlen(port->name);
- while (slen && (port->name[slen - 1] == ' ')) {
- port->name[slen - 1] = 0;
- slen--;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
- pci_name(host->pdev), port->port_no,
- (unsigned long long) port->capacity);
- printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
- pci_name(host->pdev), port->port_no, port->name);
-
-out:
- assert(host->state == HST_DEV_SCAN);
- schedule_work(&host->fsm_task);
-}
-
-static void carm_handle_scan_chan(struct carm_host *host,
- struct carm_request *crq, u8 *mem,
- blk_status_t error)
-{
- u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
- unsigned int i, dev_count = 0;
- int new_state = HST_DEV_SCAN_START;
-
- DPRINTK("ENTER\n");
-
- if (error) {
- new_state = HST_ERROR;
- goto out;
- }
-
- /* TODO: scan and support non-disk devices */
- for (i = 0; i < 8; i++)
- if (msg_data[i] == 0) { /* direct-access device (disk) */
- host->dev_present |= (1 << i);
- dev_count++;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
- pci_name(host->pdev), dev_count);
-
-out:
- assert(host->state == HST_PORT_SCAN);
- host->state = new_state;
- schedule_work(&host->fsm_task);
-}
-
-static void carm_handle_generic(struct carm_host *host,
- struct carm_request *crq, blk_status_t error,
- int cur_state, int next_state)
-{
- DPRINTK("ENTER\n");
-
- assert(host->state == cur_state);
- if (error)
- host->state = HST_ERROR;
- else
- host->state = next_state;
- schedule_work(&host->fsm_task);
-}
-
-static inline void carm_handle_resp(struct carm_host *host,
- __le32 ret_handle_le, u32 status)
-{
- u32 handle = le32_to_cpu(ret_handle_le);
- unsigned int msg_idx;
- struct request *rq;
- struct carm_request *crq;
- blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
- u8 *mem;
-
- VPRINTK("ENTER, handle == 0x%x\n", handle);
-
- if (unlikely(!TAG_VALID(handle))) {
- printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
- pci_name(host->pdev), handle);
- return;
- }
-
- msg_idx = TAG_DECODE(handle);
- VPRINTK("tag == %u\n", msg_idx);
-
- rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx);
- crq = blk_mq_rq_to_pdu(rq);
-
- /* fast path */
- if (likely(crq->msg_type == CARM_MSG_READ ||
- crq->msg_type == CARM_MSG_WRITE)) {
- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem,
- carm_rq_dir(rq));
- goto done;
- }
-
- mem = carm_ref_msg(host, msg_idx);
-
- switch (crq->msg_type) {
- case CARM_MSG_IOCTL: {
- switch (crq->msg_subtype) {
- case CARM_IOC_SCAN_CHAN:
- carm_handle_scan_chan(host, crq, mem, error);
- goto done;
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- case CARM_MSG_MISC: {
- switch (crq->msg_subtype) {
- case MISC_ALLOC_MEM:
- carm_handle_generic(host, crq, error,
- HST_ALLOC_BUF, HST_SYNC_TIME);
- goto done;
- case MISC_SET_TIME:
- carm_handle_generic(host, crq, error,
- HST_SYNC_TIME, HST_GET_FW_VER);
- goto done;
- case MISC_GET_FW_VER: {
- struct carm_fw_ver *ver = (struct carm_fw_ver *)
- (mem + sizeof(struct carm_msg_get_fw_ver));
- if (!error) {
- host->fw_ver = le32_to_cpu(ver->version);
- host->flags |= (ver->features & FL_FW_VER_MASK);
- }
- carm_handle_generic(host, crq, error,
- HST_GET_FW_VER, HST_PORT_SCAN);
- goto done;
- }
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- case CARM_MSG_ARRAY: {
- switch (crq->msg_subtype) {
- case CARM_ARRAY_INFO:
- carm_handle_array_info(host, crq, mem, error);
- break;
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- default:
- /* unknown / invalid response */
- goto err_out;
- }
-
- return;
-
-err_out:
- printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
- pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
- error = BLK_STS_IOERR;
-done:
- host->hw_sg_used -= crq->n_elem;
- blk_mq_end_request(blk_mq_rq_from_pdu(crq), error);
-
- if (host->hw_sg_used <= CARM_SG_LOW_WATER)
- carm_round_robin(host);
-}
-
-static inline void carm_handle_responses(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- struct carm_response *resp = (struct carm_response *) host->shm;
- unsigned int work = 0;
- unsigned int idx = host->resp_idx % RMSG_Q_LEN;
-
- while (1) {
- u32 status = le32_to_cpu(resp[idx].status);
-
- if (status == 0xffffffff) {
- VPRINTK("ending response on index %u\n", idx);
- writel(idx << 3, mmio + CARM_RESP_IDX);
- break;
- }
-
- /* response to a message we sent */
- else if ((status & (1 << 31)) == 0) {
- VPRINTK("handling msg response on index %u\n", idx);
- carm_handle_resp(host, resp[idx].ret_handle, status);
- resp[idx].status = cpu_to_le32(0xffffffff);
- }
-
- /* asynchronous events the hardware throws our way */
- else if ((status & 0xff000000) == (1 << 31)) {
- u8 *evt_type_ptr = (u8 *) &resp[idx];
- u8 evt_type = *evt_type_ptr;
- printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
- pci_name(host->pdev), (int) evt_type);
- resp[idx].status = cpu_to_le32(0xffffffff);
- }
-
- idx = NEXT_RESP(idx);
- work++;
- }
-
- VPRINTK("EXIT, work==%u\n", work);
- host->resp_idx += work;
-}
-
-static irqreturn_t carm_interrupt(int irq, void *__host)
-{
- struct carm_host *host = __host;
- void __iomem *mmio;
- u32 mask;
- int handled = 0;
- unsigned long flags;
-
- if (!host) {
- VPRINTK("no host\n");
- return IRQ_NONE;
- }
-
- spin_lock_irqsave(&host->lock, flags);
-
- mmio = host->mmio;
-
- /* reading should also clear interrupts */
- mask = readl(mmio + CARM_INT_STAT);
-
- if (mask == 0 || mask == 0xffffffff) {
- VPRINTK("no work, mask == 0x%x\n", mask);
- goto out;
- }
-
- if (mask & INT_ACK_MASK)
- writel(mask, mmio + CARM_INT_STAT);
-
- if (unlikely(host->state == HST_INVALID)) {
- VPRINTK("not initialized yet, mask = 0x%x\n", mask);
- goto out;
- }
-
- if (mask & CARM_HAVE_RESP) {
- handled = 1;
- carm_handle_responses(host);
- }
-
-out:
- spin_unlock_irqrestore(&host->lock, flags);
- VPRINTK("EXIT\n");
- return IRQ_RETVAL(handled);
-}
-
-static void carm_fsm_task (struct work_struct *work)
-{
- struct carm_host *host =
- container_of(work, struct carm_host, fsm_task);
- unsigned long flags;
- unsigned int state;
- int rc, i, next_dev;
- int reschedule = 0;
- int new_state = HST_INVALID;
-
- spin_lock_irqsave(&host->lock, flags);
- state = host->state;
- spin_unlock_irqrestore(&host->lock, flags);
-
- DPRINTK("ENTER, state == %s\n", state_name[state]);
-
- switch (state) {
- case HST_PROBE_START:
- new_state = HST_ALLOC_BUF;
- reschedule = 1;
- break;
-
- case HST_ALLOC_BUF:
- rc = carm_send_special(host, carm_fill_alloc_buf);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_SYNC_TIME:
- rc = carm_send_special(host, carm_fill_sync_time);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_GET_FW_VER:
- rc = carm_send_special(host, carm_fill_get_fw_ver);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_PORT_SCAN:
- rc = carm_send_special(host, carm_fill_scan_channels);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_DEV_SCAN_START:
- host->cur_scan_dev = -1;
- new_state = HST_DEV_SCAN;
- reschedule = 1;
- break;
-
- case HST_DEV_SCAN:
- next_dev = -1;
- for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
- if (host->dev_present & (1 << i)) {
- next_dev = i;
- break;
- }
-
- if (next_dev >= 0) {
- host->cur_scan_dev = next_dev;
- rc = carm_array_info(host, next_dev);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- } else {
- new_state = HST_DEV_ACTIVATE;
- reschedule = 1;
- }
- break;
-
- case HST_DEV_ACTIVATE: {
- int activated = 0;
- for (i = 0; i < CARM_MAX_PORTS; i++)
- if (host->dev_active & (1 << i)) {
- struct carm_port *port = &host->port[i];
- struct gendisk *disk = port->disk;
-
- set_capacity(disk, port->capacity);
- host->probe_err = add_disk(disk);
- if (!host->probe_err)
- activated++;
- else
- break;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
- pci_name(host->pdev), activated);
-
- new_state = HST_PROBE_FINISHED;
- reschedule = 1;
- break;
- }
- case HST_PROBE_FINISHED:
- complete(&host->probe_comp);
- break;
- case HST_ERROR:
- /* FIXME: TODO */
- break;
-
- default:
- /* should never occur */
- printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
- assert(0);
- break;
- }
-
- if (new_state != HST_INVALID) {
- spin_lock_irqsave(&host->lock, flags);
- host->state = new_state;
- spin_unlock_irqrestore(&host->lock, flags);
- }
- if (reschedule)
- schedule_work(&host->fsm_task);
-}
-
-static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
-{
- unsigned int i;
-
- for (i = 0; i < 50000; i++) {
- u32 tmp = readl(mmio + CARM_LMUC);
- udelay(100);
-
- if (test_bit) {
- if ((tmp & bits) == bits)
- return 0;
- } else {
- if ((tmp & bits) == 0)
- return 0;
- }
-
- cond_resched();
- }
-
- printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
- bits, test_bit ? "yes" : "no");
- return -EBUSY;
-}
-
-static void carm_init_responses(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- unsigned int i;
- struct carm_response *resp = (struct carm_response *) host->shm;
-
- for (i = 0; i < RMSG_Q_LEN; i++)
- resp[i].status = cpu_to_le32(0xffffffff);
-
- writel(0, mmio + CARM_RESP_IDX);
-}
-
-static int carm_init_host(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- u32 tmp;
- u8 tmp8;
- int rc;
-
- DPRINTK("ENTER\n");
-
- writel(0, mmio + CARM_INT_MASK);
-
- tmp8 = readb(mmio + CARM_INITC);
- if (tmp8 & 0x01) {
- tmp8 &= ~0x01;
- writeb(tmp8, mmio + CARM_INITC);
- readb(mmio + CARM_INITC); /* flush */
-
- DPRINTK("snooze...\n");
- msleep(5000);
- }
-
- tmp = readl(mmio + CARM_HMUC);
- if (tmp & CARM_CME) {
- DPRINTK("CME bit present, waiting\n");
- rc = carm_init_wait(mmio, CARM_CME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 1 failed\n");
- return rc;
- }
- }
- if (tmp & CARM_RME) {
- DPRINTK("RME bit present, waiting\n");
- rc = carm_init_wait(mmio, CARM_RME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 2 failed\n");
- return rc;
- }
- }
-
- tmp &= ~(CARM_RME | CARM_CME);
- writel(tmp, mmio + CARM_HMUC);
- readl(mmio + CARM_HMUC); /* flush */
-
- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 3 failed\n");
- return rc;
- }
-
- carm_init_buckets(mmio);
-
- writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
- writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
- writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
-
- tmp = readl(mmio + CARM_HMUC);
- tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
- writel(tmp, mmio + CARM_HMUC);
- readl(mmio + CARM_HMUC); /* flush */
-
- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 4 failed\n");
- return rc;
- }
-
- writel(0, mmio + CARM_HMPHA);
- writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
-
- carm_init_responses(host);
-
- /* start initialization, probing state machine */
- spin_lock_irq(&host->lock);
- assert(host->state == HST_INVALID);
- host->state = HST_PROBE_START;
- spin_unlock_irq(&host->lock);
- schedule_work(&host->fsm_task);
-
- DPRINTK("EXIT\n");
- return 0;
-}
-
-static const struct blk_mq_ops carm_mq_ops = {
- .queue_rq = carm_queue_rq,
-};
-
-static int carm_init_disk(struct carm_host *host, unsigned int port_no)
-{
- struct carm_port *port = &host->port[port_no];
- struct gendisk *disk;
-
- port->host = host;
- port->port_no = port_no;
-
- disk = blk_mq_alloc_disk(&host->tag_set, port);
- if (IS_ERR(disk))
- return PTR_ERR(disk);
-
- port->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "/%u",
- (unsigned int)host->id * CARM_MAX_PORTS + port_no);
- disk->major = host->major;
- disk->first_minor = port_no * CARM_MINORS_PER_MAJOR;
- disk->minors = CARM_MINORS_PER_MAJOR;
- disk->fops = &carm_bd_ops;
- disk->private_data = port;
-
- blk_queue_max_segments(disk->queue, CARM_MAX_REQ_SG);
- blk_queue_segment_boundary(disk->queue, CARM_SG_BOUNDARY);
- return 0;
-}
-
-static void carm_free_disk(struct carm_host *host, unsigned int port_no)
-{
- struct carm_port *port = &host->port[port_no];
- struct gendisk *disk = port->disk;
-
- if (!disk)
- return;
-
- if (host->state > HST_DEV_ACTIVATE)
- del_gendisk(disk);
- blk_cleanup_disk(disk);
-}
-
-static int carm_init_shm(struct carm_host *host)
-{
- host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE,
- &host->shm_dma, GFP_KERNEL);
- if (!host->shm)
- return -ENOMEM;
-
- host->msg_base = host->shm + RBUF_LEN;
- host->msg_dma = host->shm_dma + RBUF_LEN;
-
- memset(host->shm, 0xff, RBUF_LEN);
- memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
-
- return 0;
-}
-
-static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct carm_host *host;
- int rc;
- struct request_queue *q;
- unsigned int i;
-
- printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
-
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
-
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
- pci_name(pdev));
- goto err_out_regions;
- }
-
- host = kzalloc(sizeof(*host), GFP_KERNEL);
- if (!host) {
- rc = -ENOMEM;
- goto err_out_regions;
- }
-
- host->pdev = pdev;
- spin_lock_init(&host->lock);
- INIT_WORK(&host->fsm_task, carm_fsm_task);
- init_completion(&host->probe_comp);
-
- host->mmio = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!host->mmio) {
- printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
- pci_name(pdev));
- rc = -ENOMEM;
- goto err_out_kfree;
- }
-
- rc = carm_init_shm(host);
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
- pci_name(pdev));
- goto err_out_iounmap;
- }
-
- memset(&host->tag_set, 0, sizeof(host->tag_set));
- host->tag_set.ops = &carm_mq_ops;
- host->tag_set.cmd_size = sizeof(struct carm_request);
- host->tag_set.nr_hw_queues = 1;
- host->tag_set.nr_maps = 1;
- host->tag_set.queue_depth = max_queue;
- host->tag_set.numa_node = NUMA_NO_NODE;
- host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
-
- rc = blk_mq_alloc_tag_set(&host->tag_set);
- if (rc)
- goto err_out_dma_free;
-
- q = blk_mq_init_queue(&host->tag_set);
- if (IS_ERR(q)) {
- rc = PTR_ERR(q);
- blk_mq_free_tag_set(&host->tag_set);
- goto err_out_dma_free;
- }
-
- host->oob_q = q;
- q->queuedata = host;
-
- /*
- * Figure out which major to use: 160, 161, or dynamic
- */
- if (!test_and_set_bit(0, &carm_major_alloc))
- host->major = 160;
- else if (!test_and_set_bit(1, &carm_major_alloc))
- host->major = 161;
- else
- host->flags |= FL_DYN_MAJOR;
-
- host->id = carm_host_id;
- sprintf(host->name, DRV_NAME "%d", carm_host_id);
-
- rc = register_blkdev(host->major, host->name);
- if (rc < 0)
- goto err_out_free_majors;
- if (host->flags & FL_DYN_MAJOR)
- host->major = rc;
-
- for (i = 0; i < CARM_MAX_PORTS; i++) {
- rc = carm_init_disk(host, i);
- if (rc)
- goto err_out_blkdev_disks;
- }
-
- pci_set_master(pdev);
-
- rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host);
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
- pci_name(pdev));
- goto err_out_blkdev_disks;
- }
-
- rc = carm_init_host(host);
- if (rc)
- goto err_out_free_irq;
-
- DPRINTK("waiting for probe_comp\n");
- host->probe_err = -ENODEV;
- wait_for_completion(&host->probe_comp);
- if (host->probe_err) {
- rc = host->probe_err;
- goto err_out_free_irq;
- }
-
- printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
- host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
- (unsigned long long)pci_resource_start(pdev, 0),
- pdev->irq, host->major);
-
- carm_host_id++;
- pci_set_drvdata(pdev, host);
- return 0;
-
-err_out_free_irq:
- free_irq(pdev->irq, host);
-err_out_blkdev_disks:
- for (i = 0; i < CARM_MAX_PORTS; i++)
- carm_free_disk(host, i);
- unregister_blkdev(host->major, host->name);
-err_out_free_majors:
- if (host->major == 160)
- clear_bit(0, &carm_major_alloc);
- else if (host->major == 161)
- clear_bit(1, &carm_major_alloc);
- blk_cleanup_queue(host->oob_q);
- blk_mq_free_tag_set(&host->tag_set);
-err_out_dma_free:
- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
-err_out_iounmap:
- iounmap(host->mmio);
-err_out_kfree:
- kfree(host);
-err_out_regions:
- pci_release_regions(pdev);
-err_out:
- pci_disable_device(pdev);
- return rc;
-}
-
-static void carm_remove_one (struct pci_dev *pdev)
-{
- struct carm_host *host = pci_get_drvdata(pdev);
- unsigned int i;
-
- if (!host) {
- printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
- pci_name(pdev));
- return;
- }
-
- free_irq(pdev->irq, host);
- for (i = 0; i < CARM_MAX_PORTS; i++)
- carm_free_disk(host, i);
- unregister_blkdev(host->major, host->name);
- if (host->major == 160)
- clear_bit(0, &carm_major_alloc);
- else if (host->major == 161)
- clear_bit(1, &carm_major_alloc);
- blk_cleanup_queue(host->oob_q);
- blk_mq_free_tag_set(&host->tag_set);
- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
- iounmap(host->mmio);
- kfree(host);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-module_pci_driver(carm_driver);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
new file mode 100644
index 000000000000..6a4a94b4cdf4
--- /dev/null
+++ b/drivers/block/ublk_drv.c
@@ -0,0 +1,1824 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Userspace block device - block device which IO is handled from userspace
+ *
+ * Take full use of io_uring passthrough command for communicating with
+ * ublk userspace daemon(ublksrvd) for handling basic IO request.
+ *
+ * Copyright 2022 Ming Lei <ming.lei@redhat.com>
+ *
+ * (part of code stolen from loop.c)
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/major.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/mutex.h>
+#include <linux/writeback.h>
+#include <linux/completion.h>
+#include <linux/highmem.h>
+#include <linux/sysfs.h>
+#include <linux/miscdevice.h>
+#include <linux/falloc.h>
+#include <linux/uio.h>
+#include <linux/ioprio.h>
+#include <linux/sched/mm.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/io_uring.h>
+#include <linux/blk-mq.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <linux/task_work.h>
+#include <uapi/linux/ublk_cmd.h>
+
+#define UBLK_MINORS (1U << MINORBITS)
+
+/* All UBLK_F_* have to be included into UBLK_F_ALL */
+#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
+ | UBLK_F_URING_CMD_COMP_IN_TASK \
+ | UBLK_F_NEED_GET_DATA)
+
+/* All UBLK_PARAM_TYPE_* should be included here */
+#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
+
+struct ublk_rq_data {
+ struct callback_head work;
+};
+
+struct ublk_uring_cmd_pdu {
+ struct request *req;
+};
+
+/*
+ * io command is active: sqe cmd is received, and its cqe isn't done
+ *
+ * If the flag is set, the io command is owned by ublk driver, and waited
+ * for incoming blk-mq request from the ublk block device.
+ *
+ * If the flag is cleared, the io command will be completed, and owned by
+ * ublk server.
+ */
+#define UBLK_IO_FLAG_ACTIVE 0x01
+
+/*
+ * IO command is completed via cqe, and it is being handled by ublksrv, and
+ * not committed yet
+ *
+ * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
+ * cross verification
+ */
+#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
+
+/*
+ * IO command is aborted, so this flag is set in case of
+ * !UBLK_IO_FLAG_ACTIVE.
+ *
+ * After this flag is observed, any pending or new incoming request
+ * associated with this io command will be failed immediately
+ */
+#define UBLK_IO_FLAG_ABORTED 0x04
+
+/*
+ * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
+ * get data buffer address from ublksrv.
+ *
+ * Then, bio data could be copied into this data buffer for a WRITE request
+ * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
+ */
+#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+
+struct ublk_io {
+ /* userspace buffer address from io cmd */
+ __u64 addr;
+ unsigned int flags;
+ int res;
+
+ struct io_uring_cmd *cmd;
+};
+
+struct ublk_queue {
+ int q_id;
+ int q_depth;
+
+ unsigned long flags;
+ struct task_struct *ubq_daemon;
+ char *io_cmd_buf;
+
+ unsigned long io_addr; /* mapped vm address */
+ unsigned int max_io_sz;
+ bool abort_work_pending;
+ unsigned short nr_io_ready; /* how many ios setup */
+ struct ublk_device *dev;
+ struct ublk_io ios[0];
+};
+
+#define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
+
+struct ublk_device {
+ struct gendisk *ub_disk;
+
+ char *__queues;
+
+ unsigned short queue_size;
+ struct ublksrv_ctrl_dev_info dev_info;
+
+ struct blk_mq_tag_set tag_set;
+
+ struct cdev cdev;
+ struct device cdev_dev;
+
+#define UB_STATE_OPEN 0
+#define UB_STATE_USED 1
+ unsigned long state;
+ int ub_number;
+
+ struct mutex mutex;
+
+ spinlock_t mm_lock;
+ struct mm_struct *mm;
+
+ struct ublk_params params;
+
+ struct completion completion;
+ unsigned int nr_queues_ready;
+ atomic_t nr_aborted_queues;
+
+ /*
+ * Our ubq->daemon may be killed without any notification, so
+ * monitor each queue's daemon periodically
+ */
+ struct delayed_work monitor_work;
+ struct work_struct stop_work;
+};
+
+/* header of ublk_params */
+struct ublk_params_header {
+ __u32 len;
+ __u32 types;
+};
+
+static dev_t ublk_chr_devt;
+static struct class *ublk_chr_class;
+
+static DEFINE_IDR(ublk_index_idr);
+static DEFINE_SPINLOCK(ublk_idr_lock);
+static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
+
+static DEFINE_MUTEX(ublk_ctl_mutex);
+
+static struct miscdevice ublk_misc;
+
+static void ublk_dev_param_basic_apply(struct ublk_device *ub)
+{
+ struct request_queue *q = ub->ub_disk->queue;
+ const struct ublk_param_basic *p = &ub->params.basic;
+
+ blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
+ blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
+ blk_queue_io_min(q, 1 << p->io_min_shift);
+ blk_queue_io_opt(q, 1 << p->io_opt_shift);
+
+ blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
+ p->attrs & UBLK_ATTR_FUA);
+ if (p->attrs & UBLK_ATTR_ROTATIONAL)
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ else
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+
+ blk_queue_max_hw_sectors(q, p->max_sectors);
+ blk_queue_chunk_sectors(q, p->chunk_sectors);
+ blk_queue_virt_boundary(q, p->virt_boundary_mask);
+
+ if (p->attrs & UBLK_ATTR_READ_ONLY)
+ set_disk_ro(ub->ub_disk, true);
+
+ set_capacity(ub->ub_disk, p->dev_sectors);
+}
+
+static void ublk_dev_param_discard_apply(struct ublk_device *ub)
+{
+ struct request_queue *q = ub->ub_disk->queue;
+ const struct ublk_param_discard *p = &ub->params.discard;
+
+ q->limits.discard_alignment = p->discard_alignment;
+ q->limits.discard_granularity = p->discard_granularity;
+ blk_queue_max_discard_sectors(q, p->max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q,
+ p->max_write_zeroes_sectors);
+ blk_queue_max_discard_segments(q, p->max_discard_segments);
+}
+
+static int ublk_validate_params(const struct ublk_device *ub)
+{
+ /* basic param is the only one which must be set */
+ if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
+ const struct ublk_param_basic *p = &ub->params.basic;
+
+ if (p->logical_bs_shift > PAGE_SHIFT)
+ return -EINVAL;
+
+ if (p->logical_bs_shift > p->physical_bs_shift)
+ return -EINVAL;
+
+ if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
+ const struct ublk_param_discard *p = &ub->params.discard;
+
+ /* So far, only support single segment discard */
+ if (p->max_discard_sectors && p->max_discard_segments != 1)
+ return -EINVAL;
+
+ if (!p->discard_granularity)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ublk_apply_params(struct ublk_device *ub)
+{
+ if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
+ return -EINVAL;
+
+ ublk_dev_param_basic_apply(ub);
+
+ if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
+ ublk_dev_param_discard_apply(ub);
+
+ return 0;
+}
+
+static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
+{
+ if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
+ !(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK))
+ return true;
+ return false;
+}
+
+static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
+{
+ if (ubq->flags & UBLK_F_NEED_GET_DATA)
+ return true;
+ return false;
+}
+
+static struct ublk_device *ublk_get_device(struct ublk_device *ub)
+{
+ if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
+ return ub;
+ return NULL;
+}
+
+static void ublk_put_device(struct ublk_device *ub)
+{
+ put_device(&ub->cdev_dev);
+}
+
+static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
+ int qid)
+{
+ return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
+}
+
+static inline bool ublk_rq_has_data(const struct request *rq)
+{
+ return rq->bio && bio_has_data(rq->bio);
+}
+
+static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
+ int tag)
+{
+ return (struct ublksrv_io_desc *)
+ &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
+}
+
+static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
+{
+ return ublk_get_queue(ub, q_id)->io_cmd_buf;
+}
+
+static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+{
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+
+ return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
+ PAGE_SIZE);
+}
+
+static void ublk_free_disk(struct gendisk *disk)
+{
+ struct ublk_device *ub = disk->private_data;
+
+ clear_bit(UB_STATE_USED, &ub->state);
+ put_device(&ub->cdev_dev);
+}
+
+static const struct block_device_operations ub_fops = {
+ .owner = THIS_MODULE,
+ .free_disk = ublk_free_disk,
+};
+
+#define UBLK_MAX_PIN_PAGES 32
+
+struct ublk_map_data {
+ const struct ublk_queue *ubq;
+ const struct request *rq;
+ const struct ublk_io *io;
+ unsigned max_bytes;
+};
+
+struct ublk_io_iter {
+ struct page *pages[UBLK_MAX_PIN_PAGES];
+ unsigned pg_off; /* offset in the 1st page in pages */
+ int nr_pages; /* how many page pointers in pages */
+ struct bio *bio;
+ struct bvec_iter iter;
+};
+
+static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
+ unsigned max_bytes, bool to_vm)
+{
+ const unsigned total = min_t(unsigned, max_bytes,
+ PAGE_SIZE - data->pg_off +
+ ((data->nr_pages - 1) << PAGE_SHIFT));
+ unsigned done = 0;
+ unsigned pg_idx = 0;
+
+ while (done < total) {
+ struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
+ const unsigned int bytes = min3(bv.bv_len, total - done,
+ (unsigned)(PAGE_SIZE - data->pg_off));
+ void *bv_buf = bvec_kmap_local(&bv);
+ void *pg_buf = kmap_local_page(data->pages[pg_idx]);
+
+ if (to_vm)
+ memcpy(pg_buf + data->pg_off, bv_buf, bytes);
+ else
+ memcpy(bv_buf, pg_buf + data->pg_off, bytes);
+
+ kunmap_local(pg_buf);
+ kunmap_local(bv_buf);
+
+ /* advance page array */
+ data->pg_off += bytes;
+ if (data->pg_off == PAGE_SIZE) {
+ pg_idx += 1;
+ data->pg_off = 0;
+ }
+
+ done += bytes;
+
+ /* advance bio */
+ bio_advance_iter_single(data->bio, &data->iter, bytes);
+ if (!data->iter.bi_size) {
+ data->bio = data->bio->bi_next;
+ if (data->bio == NULL)
+ break;
+ data->iter = data->bio->bi_iter;
+ }
+ }
+
+ return done;
+}
+
+static inline int ublk_copy_user_pages(struct ublk_map_data *data,
+ bool to_vm)
+{
+ const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
+ const unsigned long start_vm = data->io->addr;
+ unsigned int done = 0;
+ struct ublk_io_iter iter = {
+ .pg_off = start_vm & (PAGE_SIZE - 1),
+ .bio = data->rq->bio,
+ .iter = data->rq->bio->bi_iter,
+ };
+ const unsigned int nr_pages = round_up(data->max_bytes +
+ (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
+
+ while (done < nr_pages) {
+ const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES,
+ nr_pages - done);
+ unsigned i, len;
+
+ iter.nr_pages = get_user_pages_fast(start_vm +
+ (done << PAGE_SHIFT), to_pin, gup_flags,
+ iter.pages);
+ if (iter.nr_pages <= 0)
+ return done == 0 ? iter.nr_pages : done;
+ len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
+ for (i = 0; i < iter.nr_pages; i++) {
+ if (to_vm)
+ set_page_dirty(iter.pages[i]);
+ put_page(iter.pages[i]);
+ }
+ data->max_bytes -= len;
+ done += iter.nr_pages;
+ }
+
+ return done;
+}
+
+static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
+ struct ublk_io *io)
+{
+ const unsigned int rq_bytes = blk_rq_bytes(req);
+ /*
+ * no zero copy, we delay copy WRITE request data into ublksrv
+ * context and the big benefit is that pinning pages in current
+ * context is pretty fast, see ublk_pin_user_pages
+ */
+ if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH)
+ return rq_bytes;
+
+ if (ublk_rq_has_data(req)) {
+ struct ublk_map_data data = {
+ .ubq = ubq,
+ .rq = req,
+ .io = io,
+ .max_bytes = rq_bytes,
+ };
+
+ ublk_copy_user_pages(&data, true);
+
+ return rq_bytes - data.max_bytes;
+ }
+ return rq_bytes;
+}
+
+static int ublk_unmap_io(const struct ublk_queue *ubq,
+ const struct request *req,
+ struct ublk_io *io)
+{
+ const unsigned int rq_bytes = blk_rq_bytes(req);
+
+ if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) {
+ struct ublk_map_data data = {
+ .ubq = ubq,
+ .rq = req,
+ .io = io,
+ .max_bytes = io->res,
+ };
+
+ WARN_ON_ONCE(io->res > rq_bytes);
+
+ ublk_copy_user_pages(&data, false);
+
+ return io->res - data.max_bytes;
+ }
+ return rq_bytes;
+}
+
+static inline unsigned int ublk_req_build_flags(struct request *req)
+{
+ unsigned flags = 0;
+
+ if (req->cmd_flags & REQ_FAILFAST_DEV)
+ flags |= UBLK_IO_F_FAILFAST_DEV;
+
+ if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
+ flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
+
+ if (req->cmd_flags & REQ_FAILFAST_DRIVER)
+ flags |= UBLK_IO_F_FAILFAST_DRIVER;
+
+ if (req->cmd_flags & REQ_META)
+ flags |= UBLK_IO_F_META;
+
+ if (req->cmd_flags & REQ_FUA)
+ flags |= UBLK_IO_F_FUA;
+
+ if (req->cmd_flags & REQ_NOUNMAP)
+ flags |= UBLK_IO_F_NOUNMAP;
+
+ if (req->cmd_flags & REQ_SWAP)
+ flags |= UBLK_IO_F_SWAP;
+
+ return flags;
+}
+
+static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
+{
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
+ struct ublk_io *io = &ubq->ios[req->tag];
+ u32 ublk_op;
+
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ ublk_op = UBLK_IO_OP_READ;
+ break;
+ case REQ_OP_WRITE:
+ ublk_op = UBLK_IO_OP_WRITE;
+ break;
+ case REQ_OP_FLUSH:
+ ublk_op = UBLK_IO_OP_FLUSH;
+ break;
+ case REQ_OP_DISCARD:
+ ublk_op = UBLK_IO_OP_DISCARD;
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ ublk_op = UBLK_IO_OP_WRITE_ZEROES;
+ break;
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ /* need to translate since kernel may change */
+ iod->op_flags = ublk_op | ublk_req_build_flags(req);
+ iod->nr_sectors = blk_rq_sectors(req);
+ iod->start_sector = blk_rq_pos(req);
+ iod->addr = io->addr;
+
+ return BLK_STS_OK;
+}
+
+static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
+ struct io_uring_cmd *ioucmd)
+{
+ return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
+}
+
+static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
+{
+ return ubq->ubq_daemon->flags & PF_EXITING;
+}
+
+/* todo: handle partial completion */
+static void ublk_complete_rq(struct request *req)
+{
+ struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ struct ublk_io *io = &ubq->ios[req->tag];
+ unsigned int unmapped_bytes;
+
+ /* failed read IO if nothing is read */
+ if (!io->res && req_op(req) == REQ_OP_READ)
+ io->res = -EIO;
+
+ if (io->res < 0) {
+ blk_mq_end_request(req, errno_to_blk_status(io->res));
+ return;
+ }
+
+ /*
+ * FLUSH or DISCARD usually won't return bytes returned, so end them
+ * directly.
+ *
+ * Both the two needn't unmap.
+ */
+ if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) {
+ blk_mq_end_request(req, BLK_STS_OK);
+ return;
+ }
+
+ /* for READ request, writing data in iod->addr to rq buffers */
+ unmapped_bytes = ublk_unmap_io(ubq, req, io);
+
+ /*
+ * Extremely impossible since we got data filled in just before
+ *
+ * Re-read simply for this unlikely case.
+ */
+ if (unlikely(unmapped_bytes < io->res))
+ io->res = unmapped_bytes;
+
+ if (blk_update_request(req, BLK_STS_OK, io->res))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+}
+
+/*
+ * Since __ublk_rq_task_work always fails requests immediately during
+ * exiting, __ublk_fail_req() is only called from abort context during
+ * exiting. So lock is unnecessary.
+ *
+ * Also aborting may not be started yet, keep in mind that one failed
+ * request may be issued by block layer again.
+ */
+static void __ublk_fail_req(struct ublk_io *io, struct request *req)
+{
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
+
+ if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
+ io->flags |= UBLK_IO_FLAG_ABORTED;
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ }
+}
+
+static void ubq_complete_io_cmd(struct ublk_io *io, int res)
+{
+ /* mark this cmd owned by ublksrv */
+ io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
+
+ /*
+ * clear ACTIVE since we are done with this sqe/cmd slot
+ * We can only accept io cmd in case of being not active.
+ */
+ io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+
+ /* tell ublksrv one io request is coming */
+ io_uring_cmd_done(io->cmd, res, 0);
+}
+
+#define UBLK_REQUEUE_DELAY_MS 3
+
+static inline void __ublk_rq_task_work(struct request *req)
+{
+ struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ struct ublk_device *ub = ubq->dev;
+ int tag = req->tag;
+ struct ublk_io *io = &ubq->ios[tag];
+ bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
+ unsigned int mapped_bytes;
+
+ pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
+ __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+
+ if (unlikely(task_exiting)) {
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ mod_delayed_work(system_wq, &ub->monitor_work, 0);
+ return;
+ }
+
+ if (ublk_need_get_data(ubq) &&
+ (req_op(req) == REQ_OP_WRITE ||
+ req_op(req) == REQ_OP_FLUSH)) {
+ /*
+ * We have not handled UBLK_IO_NEED_GET_DATA command yet,
+ * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
+ * and notify it.
+ */
+ if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
+ io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
+ pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
+ __func__, io->cmd->cmd_op, ubq->q_id,
+ req->tag, io->flags);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
+ return;
+ }
+ /*
+ * We have handled UBLK_IO_NEED_GET_DATA command,
+ * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
+ * do the copy work.
+ */
+ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
+ __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+ }
+
+ mapped_bytes = ublk_map_io(ubq, req, io);
+
+ /* partially mapped, update io descriptor */
+ if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
+ /*
+ * Nothing mapped, retry until we succeed.
+ *
+ * We may never succeed in mapping any bytes here because
+ * of OOM. TODO: reserve one buffer with single page pinned
+ * for providing forward progress guarantee.
+ */
+ if (unlikely(!mapped_bytes)) {
+ blk_mq_requeue_request(req, false);
+ blk_mq_delay_kick_requeue_list(req->q,
+ UBLK_REQUEUE_DELAY_MS);
+ return;
+ }
+
+ ublk_get_iod(ubq, req->tag)->nr_sectors =
+ mapped_bytes >> 9;
+ }
+
+ ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
+}
+
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ __ublk_rq_task_work(pdu->req);
+}
+
+static void ublk_rq_task_work_fn(struct callback_head *work)
+{
+ struct ublk_rq_data *data = container_of(work,
+ struct ublk_rq_data, work);
+ struct request *req = blk_mq_rq_from_pdu(data);
+
+ __ublk_rq_task_work(req);
+}
+
+static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+ struct request *rq = bd->rq;
+ blk_status_t res;
+
+ /* fill iod to slot in io cmd buffer */
+ res = ublk_setup_iod(ubq, rq);
+ if (unlikely(res != BLK_STS_OK))
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(bd->rq);
+
+ if (unlikely(ubq_daemon_is_dying(ubq))) {
+ fail:
+ mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
+ return BLK_STS_IOERR;
+ }
+
+ if (ublk_can_use_task_work(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
+ enum task_work_notify_mode notify_mode = bd->last ?
+ TWA_SIGNAL_NO_IPI : TWA_NONE;
+
+ if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
+ goto fail;
+ } else {
+ struct ublk_io *io = &ubq->ios[rq->tag];
+ struct io_uring_cmd *cmd = io->cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ /*
+ * If the check pass, we know that this is a re-issued request aborted
+ * previously in monitor_work because the ubq_daemon(cmd's task) is
+ * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
+ * because this ioucmd's io_uring context may be freed now if no inflight
+ * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
+ *
+ * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
+ * the tag). Then the request is re-started(allocating the tag) and we are here.
+ * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
+ * guarantees that here is a re-issued request aborted previously.
+ */
+ if ((io->flags & UBLK_IO_FLAG_ABORTED))
+ goto fail;
+
+ pdu->req = rq;
+ io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
+ }
+
+ return BLK_STS_OK;
+}
+
+static void ublk_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+
+ if (ublk_can_use_task_work(ubq))
+ __set_notify_signal(ubq->ubq_daemon);
+}
+
+static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
+ unsigned int hctx_idx)
+{
+ struct ublk_device *ub = driver_data;
+ struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
+
+ hctx->driver_data = ubq;
+ return 0;
+}
+
+static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ init_task_work(&data->work, ublk_rq_task_work_fn);
+ return 0;
+}
+
+static const struct blk_mq_ops ublk_mq_ops = {
+ .queue_rq = ublk_queue_rq,
+ .commit_rqs = ublk_commit_rqs,
+ .init_hctx = ublk_init_hctx,
+ .init_request = ublk_init_rq,
+};
+
+static int ublk_ch_open(struct inode *inode, struct file *filp)
+{
+ struct ublk_device *ub = container_of(inode->i_cdev,
+ struct ublk_device, cdev);
+
+ if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
+ return -EBUSY;
+ filp->private_data = ub;
+ return 0;
+}
+
+static int ublk_ch_release(struct inode *inode, struct file *filp)
+{
+ struct ublk_device *ub = filp->private_data;
+
+ clear_bit(UB_STATE_OPEN, &ub->state);
+ return 0;
+}
+
+/* map pre-allocated per-queue cmd buffer to ublksrv daemon */
+static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct ublk_device *ub = filp->private_data;
+ size_t sz = vma->vm_end - vma->vm_start;
+ unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
+ unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
+ int q_id, ret = 0;
+
+ spin_lock(&ub->mm_lock);
+ if (!ub->mm)
+ ub->mm = current->mm;
+ if (current->mm != ub->mm)
+ ret = -EINVAL;
+ spin_unlock(&ub->mm_lock);
+
+ if (ret)
+ return ret;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
+ if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
+ return -EINVAL;
+
+ q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
+ pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
+ __func__, q_id, current->pid, vma->vm_start,
+ phys_off, (unsigned long)sz);
+
+ if (sz != ublk_queue_cmd_buf_size(ub, q_id))
+ return -EINVAL;
+
+ pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
+ return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
+}
+
+static void ublk_commit_completion(struct ublk_device *ub,
+ struct ublksrv_io_cmd *ub_cmd)
+{
+ u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
+ struct ublk_queue *ubq = ublk_get_queue(ub, qid);
+ struct ublk_io *io = &ubq->ios[tag];
+ struct request *req;
+
+ /* now this cmd slot is owned by nbd driver */
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+ io->res = ub_cmd->result;
+
+ /* find the io request and complete */
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
+
+ if (req && likely(!blk_should_fake_timeout(req->q)))
+ ublk_complete_rq(req);
+}
+
+/*
+ * When ->ubq_daemon is exiting, either new request is ended immediately,
+ * or any queued io command is drained, so it is safe to abort queue
+ * lockless
+ */
+static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ int i;
+
+ if (!ublk_get_device(ub))
+ return;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
+ struct request *rq;
+
+ /*
+ * Either we fail the request or ublk_rq_task_work_fn
+ * will do it
+ */
+ rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
+ if (rq)
+ __ublk_fail_req(io, rq);
+ }
+ }
+ ublk_put_device(ub);
+}
+
+static void ublk_daemon_monitor_work(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, monitor_work.work);
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ if (ubq_daemon_is_dying(ubq)) {
+ schedule_work(&ub->stop_work);
+
+ /* abort queue is for making forward progress */
+ ublk_abort_queue(ub, ubq);
+ }
+ }
+
+ /*
+ * We can't schedule monitor work after ublk_remove() is started.
+ *
+ * No need ub->mutex, monitor work are canceled after state is marked
+ * as DEAD, so DEAD state is observed reliably.
+ */
+ if (ub->dev_info.state != UBLK_S_DEV_DEAD)
+ schedule_delayed_work(&ub->monitor_work,
+ UBLK_DAEMON_MONITOR_PERIOD);
+}
+
+static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+{
+ return ubq->nr_io_ready == ubq->q_depth;
+}
+
+static void ublk_cancel_queue(struct ublk_queue *ubq)
+{
+ int i;
+
+ if (!ublk_queue_ready(ubq))
+ return;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ if (io->flags & UBLK_IO_FLAG_ACTIVE)
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
+ }
+
+ /* all io commands are canceled */
+ ubq->nr_io_ready = 0;
+}
+
+/* Cancel all pending commands, must be called after del_gendisk() returns */
+static void ublk_cancel_dev(struct ublk_device *ub)
+{
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_cancel_queue(ublk_get_queue(ub, i));
+}
+
+static void ublk_stop_dev(struct ublk_device *ub)
+{
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto unlock;
+
+ del_gendisk(ub->ub_disk);
+ ub->dev_info.state = UBLK_S_DEV_DEAD;
+ ub->dev_info.ublksrv_pid = -1;
+ put_disk(ub->ub_disk);
+ ub->ub_disk = NULL;
+ unlock:
+ ublk_cancel_dev(ub);
+ mutex_unlock(&ub->mutex);
+ cancel_delayed_work_sync(&ub->monitor_work);
+}
+
+/* device can only be started after all IOs are ready */
+static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ mutex_lock(&ub->mutex);
+ ubq->nr_io_ready++;
+ if (ublk_queue_ready(ubq)) {
+ ubq->ubq_daemon = current;
+ get_task_struct(ubq->ubq_daemon);
+ ub->nr_queues_ready++;
+ }
+ if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
+ complete_all(&ub->completion);
+ mutex_unlock(&ub->mutex);
+}
+
+static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
+ int tag, struct io_uring_cmd *cmd)
+{
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
+
+ if (ublk_can_use_task_work(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ /* should not fail since we call it just in ubq->ubq_daemon */
+ task_work_add(ubq->ubq_daemon, &data->work, TWA_SIGNAL_NO_IPI);
+ } else {
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ pdu->req = req;
+ io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
+ }
+}
+
+static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
+ struct ublk_device *ub = cmd->file->private_data;
+ struct ublk_queue *ubq;
+ struct ublk_io *io;
+ u32 cmd_op = cmd->cmd_op;
+ unsigned tag = ub_cmd->tag;
+ int ret = -EINVAL;
+
+ pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
+ __func__, cmd->cmd_op, ub_cmd->q_id, tag,
+ ub_cmd->result);
+
+ if (!(issue_flags & IO_URING_F_SQE128))
+ goto out;
+
+ if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
+ goto out;
+
+ ubq = ublk_get_queue(ub, ub_cmd->q_id);
+ if (!ubq || ub_cmd->q_id != ubq->q_id)
+ goto out;
+
+ if (ubq->ubq_daemon && ubq->ubq_daemon != current)
+ goto out;
+
+ if (tag >= ubq->q_depth)
+ goto out;
+
+ io = &ubq->ios[tag];
+
+ /* there is pending io cmd, something must be wrong */
+ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * ensure that the user issues UBLK_IO_NEED_GET_DATA
+ * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
+ */
+ if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
+ ^ (cmd_op == UBLK_IO_NEED_GET_DATA))
+ goto out;
+
+ switch (cmd_op) {
+ case UBLK_IO_FETCH_REQ:
+ /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
+ if (ublk_queue_ready(ubq)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * The io is being handled by server, so COMMIT_RQ is expected
+ * instead of FETCH_REQ
+ */
+ if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ goto out;
+ /* FETCH_RQ has to provide IO buffer */
+ if (!ub_cmd->addr)
+ goto out;
+ io->cmd = cmd;
+ io->flags |= UBLK_IO_FLAG_ACTIVE;
+ io->addr = ub_cmd->addr;
+
+ ublk_mark_io_ready(ub, ubq);
+ break;
+ case UBLK_IO_COMMIT_AND_FETCH_REQ:
+ /* FETCH_RQ has to provide IO buffer */
+ if (!ub_cmd->addr)
+ goto out;
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ goto out;
+ io->addr = ub_cmd->addr;
+ io->flags |= UBLK_IO_FLAG_ACTIVE;
+ io->cmd = cmd;
+ ublk_commit_completion(ub, ub_cmd);
+ break;
+ case UBLK_IO_NEED_GET_DATA:
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ goto out;
+ io->addr = ub_cmd->addr;
+ io->cmd = cmd;
+ io->flags |= UBLK_IO_FLAG_ACTIVE;
+ ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag, cmd);
+ break;
+ default:
+ goto out;
+ }
+ return -EIOCBQUEUED;
+
+ out:
+ io_uring_cmd_done(cmd, ret, 0);
+ pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
+ __func__, cmd_op, tag, ret, io->flags);
+ return -EIOCBQUEUED;
+}
+
+static const struct file_operations ublk_ch_fops = {
+ .owner = THIS_MODULE,
+ .open = ublk_ch_open,
+ .release = ublk_ch_release,
+ .llseek = no_llseek,
+ .uring_cmd = ublk_ch_uring_cmd,
+ .mmap = ublk_ch_mmap,
+};
+
+static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
+{
+ int size = ublk_queue_cmd_buf_size(ub, q_id);
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+
+ if (ubq->ubq_daemon)
+ put_task_struct(ubq->ubq_daemon);
+ if (ubq->io_cmd_buf)
+ free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
+}
+
+static int ublk_init_queue(struct ublk_device *ub, int q_id)
+{
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ void *ptr;
+ int size;
+
+ ubq->flags = ub->dev_info.flags;
+ ubq->q_id = q_id;
+ ubq->q_depth = ub->dev_info.queue_depth;
+ size = ublk_queue_cmd_buf_size(ub, q_id);
+
+ ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
+ if (!ptr)
+ return -ENOMEM;
+
+ ubq->io_cmd_buf = ptr;
+ ubq->dev = ub;
+ return 0;
+}
+
+static void ublk_deinit_queues(struct ublk_device *ub)
+{
+ int nr_queues = ub->dev_info.nr_hw_queues;
+ int i;
+
+ if (!ub->__queues)
+ return;
+
+ for (i = 0; i < nr_queues; i++)
+ ublk_deinit_queue(ub, i);
+ kfree(ub->__queues);
+}
+
+static int ublk_init_queues(struct ublk_device *ub)
+{
+ int nr_queues = ub->dev_info.nr_hw_queues;
+ int depth = ub->dev_info.queue_depth;
+ int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
+ int i, ret = -ENOMEM;
+
+ ub->queue_size = ubq_size;
+ ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
+ if (!ub->__queues)
+ return ret;
+
+ for (i = 0; i < nr_queues; i++) {
+ if (ublk_init_queue(ub, i))
+ goto fail;
+ }
+
+ init_completion(&ub->completion);
+ return 0;
+
+ fail:
+ ublk_deinit_queues(ub);
+ return ret;
+}
+
+static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
+{
+ int i = idx;
+ int err;
+
+ spin_lock(&ublk_idr_lock);
+ /* allocate id, if @id >= 0, we're requesting that specific id */
+ if (i >= 0) {
+ err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
+ if (err == -ENOSPC)
+ err = -EEXIST;
+ } else {
+ err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT);
+ }
+ spin_unlock(&ublk_idr_lock);
+
+ if (err >= 0)
+ ub->ub_number = err;
+
+ return err;
+}
+
+static void ublk_free_dev_number(struct ublk_device *ub)
+{
+ spin_lock(&ublk_idr_lock);
+ idr_remove(&ublk_index_idr, ub->ub_number);
+ wake_up_all(&ublk_idr_wq);
+ spin_unlock(&ublk_idr_lock);
+}
+
+static void ublk_cdev_rel(struct device *dev)
+{
+ struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
+
+ blk_mq_free_tag_set(&ub->tag_set);
+ ublk_deinit_queues(ub);
+ ublk_free_dev_number(ub);
+ mutex_destroy(&ub->mutex);
+ kfree(ub);
+}
+
+static int ublk_add_chdev(struct ublk_device *ub)
+{
+ struct device *dev = &ub->cdev_dev;
+ int minor = ub->ub_number;
+ int ret;
+
+ dev->parent = ublk_misc.this_device;
+ dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
+ dev->class = ublk_chr_class;
+ dev->release = ublk_cdev_rel;
+ device_initialize(dev);
+
+ ret = dev_set_name(dev, "ublkc%d", minor);
+ if (ret)
+ goto fail;
+
+ cdev_init(&ub->cdev, &ublk_ch_fops);
+ ret = cdev_device_add(&ub->cdev, dev);
+ if (ret)
+ goto fail;
+ return 0;
+ fail:
+ put_device(dev);
+ return ret;
+}
+
+static void ublk_stop_work_fn(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, stop_work);
+
+ ublk_stop_dev(ub);
+}
+
+/* align max io buffer size with PAGE_SIZE */
+static void ublk_align_max_io_size(struct ublk_device *ub)
+{
+ unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
+
+ ub->dev_info.max_io_buf_bytes =
+ round_down(max_io_bytes, PAGE_SIZE);
+}
+
+static int ublk_add_tag_set(struct ublk_device *ub)
+{
+ ub->tag_set.ops = &ublk_mq_ops;
+ ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
+ ub->tag_set.queue_depth = ub->dev_info.queue_depth;
+ ub->tag_set.numa_node = NUMA_NO_NODE;
+ ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
+ ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ub->tag_set.driver_data = ub;
+ return blk_mq_alloc_tag_set(&ub->tag_set);
+}
+
+static void ublk_remove(struct ublk_device *ub)
+{
+ ublk_stop_dev(ub);
+ cancel_work_sync(&ub->stop_work);
+ cdev_device_del(&ub->cdev, &ub->cdev_dev);
+ put_device(&ub->cdev_dev);
+}
+
+static struct ublk_device *ublk_get_device_from_id(int idx)
+{
+ struct ublk_device *ub = NULL;
+
+ if (idx < 0)
+ return NULL;
+
+ spin_lock(&ublk_idr_lock);
+ ub = idr_find(&ublk_index_idr, idx);
+ if (ub)
+ ub = ublk_get_device(ub);
+ spin_unlock(&ublk_idr_lock);
+
+ return ub;
+}
+
+static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ int ublksrv_pid = (int)header->data[0];
+ struct ublk_device *ub;
+ struct gendisk *disk;
+ int ret = -EINVAL;
+
+ if (ublksrv_pid <= 0)
+ return -EINVAL;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ wait_for_completion_interruptible(&ub->completion);
+
+ schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
+
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
+ test_bit(UB_STATE_USED, &ub->state)) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ disk = blk_mq_alloc_disk(&ub->tag_set, ub);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
+ goto out_unlock;
+ }
+ sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
+ disk->fops = &ub_fops;
+ disk->private_data = ub;
+
+ ub->dev_info.ublksrv_pid = ublksrv_pid;
+ ub->ub_disk = disk;
+
+ ret = ublk_apply_params(ub);
+ if (ret)
+ goto out_put_disk;
+
+ get_device(&ub->cdev_dev);
+ ret = add_disk(disk);
+ if (ret) {
+ /*
+ * Has to drop the reference since ->free_disk won't be
+ * called in case of add_disk failure.
+ */
+ ublk_put_device(ub);
+ goto out_put_disk;
+ }
+ set_bit(UB_STATE_USED, &ub->state);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+out_put_disk:
+ if (ret)
+ put_disk(disk);
+out_unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_device(ub);
+ return ret;
+}
+
+static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_device *ub;
+ cpumask_var_t cpumask;
+ unsigned long queue;
+ unsigned int retlen;
+ unsigned int i;
+ int ret = -EINVAL;
+
+ if (header->len * BITS_PER_BYTE < nr_cpu_ids)
+ return -EINVAL;
+ if (header->len & (sizeof(unsigned long)-1))
+ return -EINVAL;
+ if (!header->addr)
+ return -EINVAL;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ queue = header->data[0];
+ if (queue >= ub->dev_info.nr_hw_queues)
+ goto out_put_device;
+
+ ret = -ENOMEM;
+ if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
+ goto out_put_device;
+
+ for_each_possible_cpu(i) {
+ if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
+ cpumask_set_cpu(i, cpumask);
+ }
+
+ ret = -EFAULT;
+ retlen = min_t(unsigned short, header->len, cpumask_size());
+ if (copy_to_user(argp, cpumask, retlen))
+ goto out_free_cpumask;
+ if (retlen != header->len &&
+ clear_user(argp + retlen, header->len - retlen))
+ goto out_free_cpumask;
+
+ ret = 0;
+out_free_cpumask:
+ free_cpumask_var(cpumask);
+out_put_device:
+ ublk_put_device(ub);
+ return ret;
+}
+
+static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
+{
+ pr_devel("%s: dev id %d flags %llx\n", __func__,
+ info->dev_id, info->flags);
+ pr_devel("\t nr_hw_queues %d queue_depth %d\n",
+ info->nr_hw_queues, info->queue_depth);
+}
+
+static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublksrv_ctrl_dev_info info;
+ struct ublk_device *ub;
+ int ret = -EINVAL;
+
+ if (header->len < sizeof(info) || !header->addr)
+ return -EINVAL;
+ if (header->queue_id != (u16)-1) {
+ pr_warn("%s: queue_id is wrong %x\n",
+ __func__, header->queue_id);
+ return -EINVAL;
+ }
+ if (copy_from_user(&info, argp, sizeof(info)))
+ return -EFAULT;
+ ublk_dump_dev_info(&info);
+ if (header->dev_id != info.dev_id) {
+ pr_warn("%s: dev id not match %u %u\n",
+ __func__, header->dev_id, info.dev_id);
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_killable(&ublk_ctl_mutex);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
+ ub = kzalloc(sizeof(*ub), GFP_KERNEL);
+ if (!ub)
+ goto out_unlock;
+ mutex_init(&ub->mutex);
+ spin_lock_init(&ub->mm_lock);
+ INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
+ INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
+
+ ret = ublk_alloc_dev_number(ub, header->dev_id);
+ if (ret < 0)
+ goto out_free_ub;
+
+ memcpy(&ub->dev_info, &info, sizeof(info));
+
+ /* update device id */
+ ub->dev_info.dev_id = ub->ub_number;
+
+ /*
+ * 64bit flags will be copied back to userspace as feature
+ * negotiation result, so have to clear flags which driver
+ * doesn't support yet, then userspace can get correct flags
+ * (features) to handle.
+ */
+ ub->dev_info.flags &= UBLK_F_ALL;
+
+ /* We are not ready to support zero copy */
+ ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
+
+ ub->dev_info.nr_hw_queues = min_t(unsigned int,
+ ub->dev_info.nr_hw_queues, nr_cpu_ids);
+ ublk_align_max_io_size(ub);
+
+ ret = ublk_init_queues(ub);
+ if (ret)
+ goto out_free_dev_number;
+
+ ret = ublk_add_tag_set(ub);
+ if (ret)
+ goto out_deinit_queues;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
+ goto out_free_tag_set;
+
+ /*
+ * Add the char dev so that ublksrv daemon can be setup.
+ * ublk_add_chdev() will cleanup everything if it fails.
+ */
+ ret = ublk_add_chdev(ub);
+ goto out_unlock;
+
+out_free_tag_set:
+ blk_mq_free_tag_set(&ub->tag_set);
+out_deinit_queues:
+ ublk_deinit_queues(ub);
+out_free_dev_number:
+ ublk_free_dev_number(ub);
+out_free_ub:
+ mutex_destroy(&ub->mutex);
+ kfree(ub);
+out_unlock:
+ mutex_unlock(&ublk_ctl_mutex);
+ return ret;
+}
+
+static inline bool ublk_idr_freed(int id)
+{
+ void *ptr;
+
+ spin_lock(&ublk_idr_lock);
+ ptr = idr_find(&ublk_index_idr, id);
+ spin_unlock(&ublk_idr_lock);
+
+ return ptr == NULL;
+}
+
+static int ublk_ctrl_del_dev(int idx)
+{
+ struct ublk_device *ub;
+ int ret;
+
+ ret = mutex_lock_killable(&ublk_ctl_mutex);
+ if (ret)
+ return ret;
+
+ ub = ublk_get_device_from_id(idx);
+ if (ub) {
+ ublk_remove(ub);
+ ublk_put_device(ub);
+ ret = 0;
+ } else {
+ ret = -ENODEV;
+ }
+
+ /*
+ * Wait until the idr is removed, then it can be reused after
+ * DEL_DEV command is returned.
+ */
+ if (!ret)
+ wait_event(ublk_idr_wq, ublk_idr_freed(idx));
+ mutex_unlock(&ublk_ctl_mutex);
+
+ return ret;
+}
+
+static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+
+ pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
+ __func__, cmd->cmd_op, header->dev_id, header->queue_id,
+ header->data[0], header->addr, header->len);
+}
+
+static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ struct ublk_device *ub;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ ublk_stop_dev(ub);
+ cancel_work_sync(&ub->stop_work);
+
+ ublk_put_device(ub);
+ return 0;
+}
+
+static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_device *ub;
+ int ret = 0;
+
+ if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
+ return -EINVAL;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
+ ret = -EFAULT;
+ ublk_put_device(ub);
+
+ return ret;
+}
+
+static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_params_header ph;
+ struct ublk_device *ub;
+ int ret;
+
+ if (header->len <= sizeof(ph) || !header->addr)
+ return -EINVAL;
+
+ if (copy_from_user(&ph, argp, sizeof(ph)))
+ return -EFAULT;
+
+ if (ph.len > header->len || !ph.len)
+ return -EINVAL;
+
+ if (ph.len > sizeof(struct ublk_params))
+ ph.len = sizeof(struct ublk_params);
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ mutex_lock(&ub->mutex);
+ if (copy_to_user(argp, &ub->params, ph.len))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ mutex_unlock(&ub->mutex);
+
+ ublk_put_device(ub);
+ return ret;
+}
+
+static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_params_header ph;
+ struct ublk_device *ub;
+ int ret = -EFAULT;
+
+ if (header->len <= sizeof(ph) || !header->addr)
+ return -EINVAL;
+
+ if (copy_from_user(&ph, argp, sizeof(ph)))
+ return -EFAULT;
+
+ if (ph.len > header->len || !ph.len || !ph.types)
+ return -EINVAL;
+
+ if (ph.len > sizeof(struct ublk_params))
+ ph.len = sizeof(struct ublk_params);
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ /* parameters can only be changed when device isn't live */
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+ ret = -EACCES;
+ } else if (copy_from_user(&ub->params, argp, ph.len)) {
+ ret = -EFAULT;
+ } else {
+ /* clear all we don't support yet */
+ ub->params.types &= UBLK_PARAM_TYPE_ALL;
+ ret = ublk_validate_params(ub);
+ }
+ mutex_unlock(&ub->mutex);
+ ublk_put_device(ub);
+
+ return ret;
+}
+
+static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ int ret = -EINVAL;
+
+ ublk_ctrl_cmd_dump(cmd);
+
+ if (!(issue_flags & IO_URING_F_SQE128))
+ goto out;
+
+ ret = -EPERM;
+ if (!capable(CAP_SYS_ADMIN))
+ goto out;
+
+ ret = -ENODEV;
+ switch (cmd->cmd_op) {
+ case UBLK_CMD_START_DEV:
+ ret = ublk_ctrl_start_dev(cmd);
+ break;
+ case UBLK_CMD_STOP_DEV:
+ ret = ublk_ctrl_stop_dev(cmd);
+ break;
+ case UBLK_CMD_GET_DEV_INFO:
+ ret = ublk_ctrl_get_dev_info(cmd);
+ break;
+ case UBLK_CMD_ADD_DEV:
+ ret = ublk_ctrl_add_dev(cmd);
+ break;
+ case UBLK_CMD_DEL_DEV:
+ ret = ublk_ctrl_del_dev(header->dev_id);
+ break;
+ case UBLK_CMD_GET_QUEUE_AFFINITY:
+ ret = ublk_ctrl_get_queue_affinity(cmd);
+ break;
+ case UBLK_CMD_GET_PARAMS:
+ ret = ublk_ctrl_get_params(cmd);
+ break;
+ case UBLK_CMD_SET_PARAMS:
+ ret = ublk_ctrl_set_params(cmd);
+ break;
+ default:
+ break;
+ }
+ out:
+ io_uring_cmd_done(cmd, ret, 0);
+ pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
+ __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
+ return -EIOCBQUEUED;
+}
+
+static const struct file_operations ublk_ctl_fops = {
+ .open = nonseekable_open,
+ .uring_cmd = ublk_ctrl_uring_cmd,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice ublk_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ublk-control",
+ .fops = &ublk_ctl_fops,
+};
+
+static int __init ublk_init(void)
+{
+ int ret;
+
+ init_waitqueue_head(&ublk_idr_wq);
+
+ ret = misc_register(&ublk_misc);
+ if (ret)
+ return ret;
+
+ ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
+ if (ret)
+ goto unregister_mis;
+
+ ublk_chr_class = class_create(THIS_MODULE, "ublk-char");
+ if (IS_ERR(ublk_chr_class)) {
+ ret = PTR_ERR(ublk_chr_class);
+ goto free_chrdev_region;
+ }
+ return 0;
+
+free_chrdev_region:
+ unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
+unregister_mis:
+ misc_deregister(&ublk_misc);
+ return ret;
+}
+
+static void __exit ublk_exit(void)
+{
+ struct ublk_device *ub;
+ int id;
+
+ class_destroy(ublk_chr_class);
+
+ misc_deregister(&ublk_misc);
+
+ idr_for_each_entry(&ublk_index_idr, ub, id)
+ ublk_remove(ub);
+
+ idr_destroy(&ublk_index_idr);
+ unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
+}
+
+module_init(ublk_init);
+module_exit(ublk_exit);
+
+MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6fc7850c2b0a..30255fcaf181 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -101,6 +101,14 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
}
}
+static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
+
+ return vq;
+}
+
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
{
struct scatterlist hdr, status, *sgs[3];
@@ -416,7 +424,7 @@ static void virtio_queue_rqs(struct request **rqlist)
struct request *requeue_list = NULL;
rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
bool kick;
if (!virtblk_prep_rq_batch(req)) {
@@ -837,7 +845,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
- struct virtio_blk_vq *vq = hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
struct virtblk_req *vbr;
unsigned long flags;
unsigned int len;
@@ -862,22 +870,10 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return found;
}
-static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
-{
- struct virtio_blk *vblk = data;
- struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
-
- WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
- hctx->driver_data = vq;
- return 0;
-}
-
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
- .init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
.map_queues = virtblk_map_queues,
.poll = virtblk_poll,
@@ -1089,7 +1085,7 @@ static int virtblk_probe(struct virtio_device *vdev)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(vblk->disk);
+ put_disk(vblk->disk);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_free_vq:
@@ -1111,7 +1107,6 @@ static void virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
del_gendisk(vblk->disk);
- blk_cleanup_queue(vblk->disk->queue);
blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index a97f2bf5b01b..a5cf7f1e871c 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -442,7 +442,7 @@ static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
* Routines for managing virtual block devices (vbds).
*/
static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
- int operation)
+ enum req_op operation)
{
struct xen_vbd *vbd = &blkif->vbd;
int rc = -EACCES;
@@ -1193,8 +1193,8 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct bio *bio = NULL;
struct bio **biolist = pending_req->biolist;
int i, nbio = 0;
- int operation;
- int operation_flags = 0;
+ enum req_op operation;
+ blk_opf_t operation_flags = 0;
struct blk_plug plug;
bool drain = false;
struct grant_page **pages = pending_req->segments;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 97de13b14175..ee7ad2fb432d 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -157,6 +157,11 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
return 0;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature");
+
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
struct xen_blkif *blkif;
@@ -472,12 +477,6 @@ static void xen_vbd_free(struct xen_vbd *vbd)
vbd->bdev = NULL;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
@@ -520,8 +519,6 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (bdev_max_secure_erase_sectors(bdev))
vbd->discard_secure = true;
- vbd->feature_gnt_persistent = feature_persistent;
-
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@@ -1087,10 +1084,9 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- if (blkif->vbd.feature_gnt_persistent)
- blkif->vbd.feature_gnt_persistent =
- xenbus_read_unsigned(dev->otherend,
- "feature-persistent", 0);
+
+ blkif->vbd.feature_gnt_persistent = feature_persistent &&
+ xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3646c0cae672..8e56e69fb4c4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1988,8 +1988,6 @@ static int blkfront_probe(struct xenbus_device *dev,
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
- info->feature_persistent = feature_persistent;
-
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
@@ -2283,7 +2281,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- if (info->feature_persistent)
+ if (feature_persistent)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
@@ -2397,7 +2395,7 @@ static void blkfront_connect(struct blkfront_info *info)
err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
if (err) {
- blk_cleanup_disk(info->gd);
+ put_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
info->rq = NULL;
goto fail;
@@ -2482,7 +2480,7 @@ static int blkfront_remove(struct xenbus_device *xbdev)
blkif_free(info, 0);
if (info->gd) {
xlbd_release_minors(info->gd->first_minor, info->gd->minors);
- blk_cleanup_disk(info->gd);
+ put_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
}
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 7a6ed83481b8..c1e85f356e4d 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -337,7 +337,7 @@ static int z2ram_register_disk(int minor)
z2ram_gendisk[minor] = disk;
err = add_disk(disk);
if (err)
- blk_cleanup_disk(disk);
+ put_disk(disk);
return err;
}
@@ -384,7 +384,6 @@ static void __exit z2_exit(void)
for (i = 0; i < Z2MINOR_COUNT; i++) {
del_gendisk(z2ram_gendisk[i]);
- blk_cleanup_queue(z2ram_gendisk[i]->queue);
put_disk(z2ram_gendisk[i]);
}
blk_mq_free_tag_set(&tag_set);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 052aa3f65514..0916de952e09 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -63,12 +63,6 @@ static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
bool zcomp_available_algorithm(const char *comp)
{
- int i;
-
- i = sysfs_match_string(backends, comp);
- if (i >= 0)
- return true;
-
/*
* Crypto does not ignore a trailing new line symbol,
* so make sure you don't supply a string containing
@@ -217,6 +211,11 @@ struct zcomp *zcomp_create(const char *compress)
struct zcomp *comp;
int error;
+ /*
+ * Crypto API will execute /sbin/modprobe if the compression module
+ * is not loaded yet. We must do it here, otherwise we are about to
+ * call /sbin/modprobe under CPU hot-plug lock.
+ */
if (!zcomp_available_algorithm(compress))
return ERR_PTR(-EINVAL);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index b8549c61ff2c..92cb929a45b7 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,7 +52,9 @@ static unsigned int num_devices = 1;
static size_t huge_class_size;
static const struct block_device_operations zram_devops;
+#ifdef CONFIG_ZRAM_WRITEBACK
static const struct block_device_operations zram_wb_devops;
+#endif
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
@@ -1387,9 +1389,9 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
__GFP_HIGHMEM |
__GFP_MOVABLE);
- if (unlikely(!handle)) {
+ if (IS_ERR((void *)handle)) {
zcomp_stream_put(zram->comp);
- return -ENOMEM;
+ return PTR_ERR((void *)handle);
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
@@ -1523,7 +1525,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
* Returns 1 if IO request was successfully submitted.
*/
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, unsigned int op, struct bio *bio)
+ int offset, enum req_op op, struct bio *bio)
{
int ret;
@@ -1631,7 +1633,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
int offset, ret;
u32 index;
@@ -1957,7 +1959,7 @@ static int zram_add(void)
return device_id;
out_cleanup_disk:
- blk_cleanup_disk(zram->disk);
+ put_disk(zram->disk);
out_free_idr:
idr_remove(&zram_index_idr, device_id);
out_free_dev:
@@ -2008,7 +2010,7 @@ static int zram_remove(struct zram *zram)
*/
zram_reset_device(zram);
- blk_cleanup_disk(zram->disk);
+ put_disk(zram->disk);
kfree(zram);
return 0;
}
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 76fbb046bdbe..3006e2a0f37e 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -403,6 +403,13 @@ static int btbcm_read_info(struct hci_dev *hdev)
bt_dev_info(hdev, "BCM: chip id %u", skb->data[1]);
kfree_skb(skb);
+ return 0;
+}
+
+static int btbcm_print_controller_features(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
/* Read Controller Features */
skb = btbcm_read_controller_features(hdev);
if (IS_ERR(skb))
@@ -454,6 +461,8 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x6606, "BCM4345C5" }, /* 003.006.006 */
{ 0x230f, "BCM4356A2" }, /* 001.003.015 */
{ 0x220e, "BCM20702A1" }, /* 001.002.014 */
+ { 0x420d, "BCM4349B1" }, /* 002.002.013 */
+ { 0x420e, "BCM4349B1" }, /* 002.002.014 */
{ 0x4217, "BCM4329B1" }, /* 002.002.023 */
{ 0x6106, "BCM4359C0" }, /* 003.001.006 */
{ 0x4106, "BCM4335A0" }, /* 002.001.006 */
@@ -514,7 +523,7 @@ static const char *btbcm_get_board_name(struct device *dev)
#endif
}
-int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
+int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode)
{
u16 subver, rev, pid, vid;
struct sk_buff *skb;
@@ -551,9 +560,16 @@ int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
if (err)
return err;
}
- err = btbcm_print_local_name(hdev);
- if (err)
- return err;
+
+ if (!use_autobaud_mode) {
+ err = btbcm_print_controller_features(hdev);
+ if (err)
+ return err;
+
+ err = btbcm_print_local_name(hdev);
+ if (err)
+ return err;
+ }
bcm_subver_table = (hdev->bus == HCI_USB) ? bcm_usb_subver_table :
bcm_uart_subver_table;
@@ -636,13 +652,13 @@ int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
}
EXPORT_SYMBOL_GPL(btbcm_initialize);
-int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done)
+int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode)
{
int err;
/* Re-initialize if necessary */
if (*fw_load_done) {
- err = btbcm_initialize(hdev, fw_load_done);
+ err = btbcm_initialize(hdev, fw_load_done, use_autobaud_mode);
if (err)
return err;
}
@@ -658,15 +674,16 @@ EXPORT_SYMBOL_GPL(btbcm_finalize);
int btbcm_setup_patchram(struct hci_dev *hdev)
{
bool fw_load_done = false;
+ bool use_autobaud_mode = false;
int err;
/* Initialize */
- err = btbcm_initialize(hdev, &fw_load_done);
+ err = btbcm_initialize(hdev, &fw_load_done, use_autobaud_mode);
if (err)
return err;
/* Re-initialize after loading Patch */
- return btbcm_finalize(hdev, &fw_load_done);
+ return btbcm_finalize(hdev, &fw_load_done, use_autobaud_mode);
}
EXPORT_SYMBOL_GPL(btbcm_setup_patchram);
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index 8bf01565fdfc..b4cb24231a20 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -62,8 +62,8 @@ int btbcm_write_pcm_int_params(struct hci_dev *hdev,
int btbcm_setup_patchram(struct hci_dev *hdev);
int btbcm_setup_apple(struct hci_dev *hdev);
-int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done);
-int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done);
+int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode);
+int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode);
#else
@@ -104,12 +104,12 @@ static inline int btbcm_setup_apple(struct hci_dev *hdev)
return 0;
}
-static inline int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
+static inline int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode)
{
return 0;
}
-static inline int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done)
+static inline int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode)
{
return 0;
}
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index db35b917aecf..32329a2e526f 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell Bluetooth driver: debugfs related functions
*
* Copyright (C) 2009, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
**/
#include <linux/debugfs.h>
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index fb7729779166..d7df05c56b28 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Marvell Bluetooth driver: global definitions & declarations
*
* Copyright (C) 2009, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- *
*/
#include <linux/kthread.h>
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 181338f60530..9658b33c824a 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell Bluetooth driver
*
* Copyright (C) 2009, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
**/
#include <linux/module.h>
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index b8ef66f89fc1..ba057ebfda5c 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell BT-over-SDIO driver: SDIO interface related functions.
*
* Copyright (C) 2009, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
**/
#include <linux/firmware.h>
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 3a522d23ee6e..72dd3b7d82aa 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/**
* Marvell BT-over-SDIO driver: SDIO interface related definitions
*
* Copyright (C) 2009, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- *
**/
#define SDIO_HEADER_LEN 4
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index d6700efcfe8c..f9a3444753c2 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1282,6 +1282,13 @@ err:
hci_reset_dev(hdev);
}
+static bool btmtksdio_sdio_inband_wakeup(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+
+ return device_may_wakeup(bdev->dev);
+}
+
static bool btmtksdio_sdio_wakeup(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
@@ -1349,6 +1356,14 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->shutdown = btmtksdio_shutdown;
hdev->send = btmtksdio_send_frame;
hdev->wakeup = btmtksdio_sdio_wakeup;
+ /*
+ * If SDIO controller supports wake on Bluetooth, sending a wakeon
+ * command is not necessary.
+ */
+ if (device_can_wakeup(func->card->host->parent))
+ hdev->wakeup = btmtksdio_sdio_inband_wakeup;
+ else
+ hdev->wakeup = btmtksdio_sdio_wakeup;
hdev->set_bdaddr = btmtk_set_bdaddr;
SET_HCIDEV_DEV(hdev, &func->dev);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 47c28fd8f006..fb52313a1d45 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -330,7 +330,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
/* Loop from the end of the firmware parsing instructions, until
* we find an instruction that identifies the "project ID" for the
* hardware supported by this firwmare file.
- * Once we have that, we double-check that that project_id is suitable
+ * Once we have that, we double-check that project_id is suitable
* for the hardware we are working with.
*/
while (fwptr >= btrtl_dev->fw_data + (sizeof(*epatch_info) + 3)) {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index e25fcd49db70..15caa6469538 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -427,6 +427,18 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8852CE Bluetooth devices */
+ { USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04c5, 0x1675), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cb8, 0xc558), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3587), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
@@ -477,6 +489,12 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -893,11 +911,21 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
hci_skb_expect(skb) -= len;
if (skb->len == HCI_ACL_HDR_SIZE) {
+ __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
__le16 dlen = hci_acl_hdr(skb)->dlen;
+ __u8 type;
/* Complete ACL header */
hci_skb_expect(skb) = __le16_to_cpu(dlen);
+ /* Detect if ISO packet has been sent over bulk */
+ if (hci_conn_num(data->hdev, ISO_LINK)) {
+ type = hci_conn_lookup_type(data->hdev,
+ hci_handle(handle));
+ if (type == ISO_LINK)
+ hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
+ }
+
if (skb_tailroom(skb) < hci_skb_expect(skb)) {
kfree_skb(skb);
skb = NULL;
@@ -1762,6 +1790,13 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
hdev->stat.sco_tx++;
return submit_tx_urb(hdev, urb);
+
+ case HCI_ISODATA_PKT:
+ urb = alloc_bulk_urb(hdev, skb);
+ if (IS_ERR(urb))
+ return PTR_ERR(urb);
+
+ return submit_or_queue_tx_urb(hdev, urb);
}
return -EILSEQ;
@@ -2069,7 +2104,6 @@ static int btusb_setup_csr(struct hci_dev *hdev)
* without these the controller will lock up.
*/
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
@@ -2255,6 +2289,13 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
hdev->stat.sco_tx++;
return submit_tx_urb(hdev, urb);
+
+ case HCI_ISODATA_PKT:
+ urb = alloc_bulk_urb(hdev, skb);
+ if (IS_ERR(urb))
+ return PTR_ERR(urb);
+
+ return submit_or_queue_tx_urb(hdev, urb);
}
return -EILSEQ;
@@ -3352,7 +3393,6 @@ static int btusb_setup_qca(struct hci_dev *hdev)
* work with the likes of HSP/HFP mSBC.
*/
set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
return 0;
}
@@ -3795,6 +3835,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->manufacturer = 70;
hdev->cmd_timeout = btusb_mtk_cmd_timeout;
hdev->set_bdaddr = btmtk_set_bdaddr;
+ set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
data->recv_acl = btusb_recv_acl_mtk;
}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 785f445dd60d..d7e0b75db8a6 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -53,10 +53,12 @@
* struct bcm_device_data - device specific data
* @no_early_set_baudrate: Disallow set baudrate before driver setup()
* @drive_rts_on_open: drive RTS signal on ->open() when platform requires it
+ * @max_autobaud_speed: max baudrate supported by device in autobaud mode
*/
struct bcm_device_data {
bool no_early_set_baudrate;
bool drive_rts_on_open;
+ u32 max_autobaud_speed;
};
/**
@@ -99,6 +101,8 @@ struct bcm_device_data {
* @no_early_set_baudrate: don't set_baudrate before setup()
* @drive_rts_on_open: drive RTS signal on ->open() when platform requires it
* @pcm_int_params: keep the initial PCM configuration
+ * @use_autobaud_mode: start Bluetooth device in autobaud mode
+ * @max_autobaud_speed: max baudrate supported by device in autobaud mode
*/
struct bcm_device {
/* Must be the first member, hci_serdev.c expects this. */
@@ -136,7 +140,9 @@ struct bcm_device {
#endif
bool no_early_set_baudrate;
bool drive_rts_on_open;
+ bool use_autobaud_mode;
u8 pcm_int_params[5];
+ u32 max_autobaud_speed;
};
/* generic bcm uart resources */
@@ -472,15 +478,20 @@ static int bcm_open(struct hci_uart *hu)
out:
if (bcm->dev) {
- if (bcm->dev->drive_rts_on_open)
+ if (bcm->dev->use_autobaud_mode)
+ hci_uart_set_flow_control(hu, false); /* Assert BT_UART_CTS_N */
+ else if (bcm->dev->drive_rts_on_open)
hci_uart_set_flow_control(hu, true);
- hu->init_speed = bcm->dev->init_speed;
+ if (bcm->dev->use_autobaud_mode && bcm->dev->max_autobaud_speed)
+ hu->init_speed = min(bcm->dev->oper_speed, bcm->dev->max_autobaud_speed);
+ else
+ hu->init_speed = bcm->dev->init_speed;
/* If oper_speed is set, ldisc/serdev will set the baudrate
* before calling setup()
*/
- if (!bcm->dev->no_early_set_baudrate)
+ if (!bcm->dev->no_early_set_baudrate && !bcm->dev->use_autobaud_mode)
hu->oper_speed = bcm->dev->oper_speed;
err = bcm_gpio_set_power(bcm->dev, true);
@@ -564,6 +575,7 @@ static int bcm_setup(struct hci_uart *hu)
{
struct bcm_data *bcm = hu->priv;
bool fw_load_done = false;
+ bool use_autobaud_mode = (bcm->dev ? bcm->dev->use_autobaud_mode : 0);
unsigned int speed;
int err;
@@ -572,7 +584,7 @@ static int bcm_setup(struct hci_uart *hu)
hu->hdev->set_diag = bcm_set_diag;
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
- err = btbcm_initialize(hu->hdev, &fw_load_done);
+ err = btbcm_initialize(hu->hdev, &fw_load_done, use_autobaud_mode);
if (err)
return err;
@@ -580,8 +592,8 @@ static int bcm_setup(struct hci_uart *hu)
return 0;
/* Init speed if any */
- if (hu->init_speed)
- speed = hu->init_speed;
+ if (bcm->dev && bcm->dev->init_speed)
+ speed = bcm->dev->init_speed;
else if (hu->proto->init_speed)
speed = hu->proto->init_speed;
else
@@ -616,7 +628,7 @@ static int bcm_setup(struct hci_uart *hu)
btbcm_write_pcm_int_params(hu->hdev, &params);
}
- err = btbcm_finalize(hu->hdev, &fw_load_done);
+ err = btbcm_finalize(hu->hdev, &fw_load_done, use_autobaud_mode);
if (err)
return err;
@@ -1197,6 +1209,8 @@ static int bcm_acpi_probe(struct bcm_device *dev)
static int bcm_of_probe(struct bcm_device *bdev)
{
+ bdev->use_autobaud_mode = device_property_read_bool(bdev->dev,
+ "brcm,requires-autobaud-mode");
device_property_read_u32(bdev->dev, "max-speed", &bdev->oper_speed);
device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params",
bdev->pcm_int_params, 5);
@@ -1512,6 +1526,7 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
data = device_get_match_data(bcmdev->dev);
if (data) {
+ bcmdev->max_autobaud_speed = data->max_autobaud_speed;
bcmdev->no_early_set_baudrate = data->no_early_set_baudrate;
bcmdev->drive_rts_on_open = data->drive_rts_on_open;
}
@@ -1535,6 +1550,10 @@ static struct bcm_device_data bcm43438_device_data = {
.drive_rts_on_open = true,
};
+static struct bcm_device_data cyw55572_device_data = {
+ .max_autobaud_speed = 921600,
+};
+
static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm20702a1" },
{ .compatible = "brcm,bcm4329-bt" },
@@ -1544,8 +1563,10 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm43430a0-bt" },
{ .compatible = "brcm,bcm43430a1-bt" },
{ .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
+ { .compatible = "brcm,bcm4349-bt", .data = &bcm43438_device_data },
{ .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
{ .compatible = "brcm,bcm4335a0" },
+ { .compatible = "infineon,cyw55572-bt", .data = &cyw55572_device_data },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 7249b91d9b91..78afb9a348e7 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -1217,7 +1217,11 @@ static struct platform_driver intel_driver = {
int __init intel_init(void)
{
- platform_driver_register(&intel_driver);
+ int err;
+
+ err = platform_driver_register(&intel_driver);
+ if (err)
+ return err;
return hci_uart_register_proto(&intel_proto);
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index eab34e24d944..8df11016fd51 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1588,7 +1588,7 @@ static bool qca_wakeup(struct hci_dev *hdev)
wakeup = device_may_wakeup(hu->serdev->ctrl->dev.parent);
bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup);
- return !wakeup;
+ return wakeup;
}
static int qca_regulator_init(struct hci_uart *hu)
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 4cda890ce647..c0e5f42ec6b7 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -231,6 +231,15 @@ static int hci_uart_setup(struct hci_dev *hdev)
return 0;
}
+/* Check if the device is wakeable */
+static bool hci_uart_wakeup(struct hci_dev *hdev)
+{
+ /* HCI UART devices are assumed to be wakeable by default.
+ * Implement wakeup callback to override this behavior.
+ */
+ return true;
+}
+
/** hci_uart_write_wakeup - transmit buffer wakeup
* @serdev: serial device
*
@@ -342,6 +351,8 @@ int hci_uart_register_device(struct hci_uart *hu,
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
hdev->setup = hci_uart_setup;
+ if (!hdev->wakeup)
+ hdev->wakeup = hci_uart_wakeup;
SET_HCIDEV_DEV(hdev, &hu->serdev->dev);
if (test_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &hu->flags))
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 378f5d62a991..2e564803e786 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -379,7 +379,7 @@ static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
/*
* hisi_lpc_acpi_set_io_res - set the resources for a child
- * @child: the device node to be updated the I/O resource
+ * @adev: ACPI companion of the device node to be updated the I/O resource
* @hostdev: the device node associated with host controller
* @res: double pointer to be set to the address of translated resources
* @num_res: pointer to variable to hold the number of translated resources
@@ -390,31 +390,24 @@ static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
* host-relative address resource. This function will return the translated
* logical PIO addresses for each child devices resources.
*/
-static int hisi_lpc_acpi_set_io_res(struct device *child,
+static int hisi_lpc_acpi_set_io_res(struct acpi_device *adev,
struct device *hostdev,
const struct resource **res, int *num_res)
{
- struct acpi_device *adev;
- struct acpi_device *host;
+ struct acpi_device *host = to_acpi_device(adev->dev.parent);
struct resource_entry *rentry;
LIST_HEAD(resource_list);
struct resource *resources;
int count;
int i;
- if (!child || !hostdev)
- return -EINVAL;
-
- host = to_acpi_device(hostdev);
- adev = to_acpi_device(child);
-
if (!adev->status.present) {
- dev_dbg(child, "device is not present\n");
+ dev_dbg(&adev->dev, "device is not present\n");
return -EIO;
}
if (acpi_device_enumerated(adev)) {
- dev_dbg(child, "has been enumerated\n");
+ dev_dbg(&adev->dev, "has been enumerated\n");
return -EIO;
}
@@ -425,7 +418,7 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
*/
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (count <= 0) {
- dev_dbg(child, "failed to get resources\n");
+ dev_dbg(&adev->dev, "failed to get resources\n");
return count ? count : -EIO;
}
@@ -454,7 +447,7 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
continue;
ret = hisi_lpc_acpi_xlat_io_res(adev, host, &resources[i]);
if (ret) {
- dev_err(child, "translate IO range %pR failed (%d)\n",
+ dev_err(&adev->dev, "translate IO range %pR failed (%d)\n",
&resources[i], ret);
return ret;
}
@@ -471,6 +464,12 @@ static int hisi_lpc_acpi_remove_subdev(struct device *dev, void *unused)
return 0;
}
+static int hisi_lpc_acpi_clear_enumerated(struct acpi_device *adev, void *not_used)
+{
+ acpi_device_clear_enumerated(adev);
+ return 0;
+}
+
struct hisi_lpc_acpi_cell {
const char *hid;
const char *name;
@@ -480,115 +479,114 @@ struct hisi_lpc_acpi_cell {
static void hisi_lpc_acpi_remove(struct device *hostdev)
{
- struct acpi_device *adev = ACPI_COMPANION(hostdev);
- struct acpi_device *child;
-
device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
-
- list_for_each_entry(child, &adev->children, node)
- acpi_device_clear_enumerated(child);
+ acpi_dev_for_each_child(ACPI_COMPANION(hostdev),
+ hisi_lpc_acpi_clear_enumerated, NULL);
}
-/*
- * hisi_lpc_acpi_probe - probe children for ACPI FW
- * @hostdev: LPC host device pointer
- *
- * Returns 0 when successful, and a negative value for failure.
- *
- * Create a platform device per child, fixing up the resources
- * from bus addresses to Logical PIO addresses.
- *
- */
-static int hisi_lpc_acpi_probe(struct device *hostdev)
+static int hisi_lpc_acpi_add_child(struct acpi_device *child, void *data)
{
- struct acpi_device *adev = ACPI_COMPANION(hostdev);
- struct acpi_device *child;
+ const char *hid = acpi_device_hid(child);
+ struct device *hostdev = data;
+ const struct hisi_lpc_acpi_cell *cell;
+ struct platform_device *pdev;
+ const struct resource *res;
+ bool found = false;
+ int num_res;
int ret;
- /* Only consider the children of the host */
- list_for_each_entry(child, &adev->children, node) {
- const char *hid = acpi_device_hid(child);
- const struct hisi_lpc_acpi_cell *cell;
- struct platform_device *pdev;
- const struct resource *res;
- bool found = false;
- int num_res;
-
- ret = hisi_lpc_acpi_set_io_res(&child->dev, &adev->dev, &res,
- &num_res);
- if (ret) {
- dev_warn(hostdev, "set resource fail (%d)\n", ret);
- goto fail;
- }
+ ret = hisi_lpc_acpi_set_io_res(child, hostdev, &res, &num_res);
+ if (ret) {
+ dev_warn(hostdev, "set resource fail (%d)\n", ret);
+ return ret;
+ }
- cell = (struct hisi_lpc_acpi_cell []){
- /* ipmi */
- {
- .hid = "IPI0001",
- .name = "hisi-lpc-ipmi",
- },
- /* 8250-compatible uart */
- {
- .hid = "HISI1031",
- .name = "serial8250",
- .pdata = (struct plat_serial8250_port []) {
- {
- .iobase = res->start,
- .uartclk = 1843200,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF,
- },
- {}
+ cell = (struct hisi_lpc_acpi_cell []){
+ /* ipmi */
+ {
+ .hid = "IPI0001",
+ .name = "hisi-lpc-ipmi",
+ },
+ /* 8250-compatible uart */
+ {
+ .hid = "HISI1031",
+ .name = "serial8250",
+ .pdata = (struct plat_serial8250_port []) {
+ {
+ .iobase = res->start,
+ .uartclk = 1843200,
+ .iotype = UPIO_PORT,
+ .flags = UPF_BOOT_AUTOCONF,
},
- .pdata_size = 2 *
- sizeof(struct plat_serial8250_port),
+ {}
},
- {}
- };
-
- for (; cell && cell->name; cell++) {
- if (!strcmp(cell->hid, hid)) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- dev_warn(hostdev,
- "could not find cell for child device (%s), discarding\n",
- hid);
- continue;
+ .pdata_size = 2 *
+ sizeof(struct plat_serial8250_port),
+ },
+ {}
+ };
+
+ for (; cell && cell->name; cell++) {
+ if (!strcmp(cell->hid, hid)) {
+ found = true;
+ break;
}
+ }
- pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
- if (!pdev) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!found) {
+ dev_warn(hostdev,
+ "could not find cell for child device (%s), discarding\n",
+ hid);
+ return 0;
+ }
- pdev->dev.parent = hostdev;
- ACPI_COMPANION_SET(&pdev->dev, child);
+ pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
- ret = platform_device_add_resources(pdev, res, num_res);
- if (ret)
- goto fail;
+ pdev->dev.parent = hostdev;
+ ACPI_COMPANION_SET(&pdev->dev, child);
- ret = platform_device_add_data(pdev, cell->pdata,
- cell->pdata_size);
- if (ret)
- goto fail;
+ ret = platform_device_add_resources(pdev, res, num_res);
+ if (ret)
+ goto fail;
- ret = platform_device_add(pdev);
- if (ret)
- goto fail;
+ ret = platform_device_add_data(pdev, cell->pdata, cell->pdata_size);
+ if (ret)
+ goto fail;
- acpi_device_set_enumerated(child);
- }
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto fail;
+ acpi_device_set_enumerated(child);
return 0;
fail:
- hisi_lpc_acpi_remove(hostdev);
+ platform_device_put(pdev);
+ return ret;
+}
+
+/*
+ * hisi_lpc_acpi_probe - probe children for ACPI FW
+ * @hostdev: LPC host device pointer
+ *
+ * Returns 0 when successful, and a negative value for failure.
+ *
+ * Create a platform device per child, fixing up the resources
+ * from bus addresses to Logical PIO addresses.
+ *
+ */
+static int hisi_lpc_acpi_probe(struct device *hostdev)
+{
+ int ret;
+
+ /* Only consider the children of the host */
+ ret = acpi_dev_for_each_child(ACPI_COMPANION(hostdev),
+ hisi_lpc_acpi_add_child, hostdev);
+ if (ret)
+ hisi_lpc_acpi_remove(hostdev);
+
return ret;
}
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 40109a79017a..1dc8a3557a46 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -1242,9 +1242,13 @@ static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
/* Channel name is same for both UL and DL */
mhi_dev->name = mhi_chan->name;
- dev_set_name(&mhi_dev->dev, "%s_%s",
+ ret = dev_set_name(&mhi_dev->dev, "%s_%s",
dev_name(&mhi_cntrl->mhi_dev->dev),
mhi_dev->name);
+ if (ret) {
+ put_device(&mhi_dev->dev);
+ return ret;
+ }
ret = device_add(&mhi_dev->dev);
if (ret)
@@ -1408,7 +1412,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
goto err_free_irq;
}
- dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
+ ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
+ if (ret)
+ goto err_put_dev;
+
mhi_dev->name = dev_name(&mhi_dev->dev);
mhi_cntrl->mhi_dev = mhi_dev;
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index c137d55ccfa0..bf672de35131 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -178,6 +178,12 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
"bhi", mhi_cntrl);
if (ret)
return ret;
+ /*
+ * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here.
+ * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that
+ * IRQ_NOAUTOEN is not applicable.
+ */
+ disable_irq(mhi_cntrl->irq[0]);
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
@@ -199,6 +205,8 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
mhi_cntrl->irq[mhi_event->irq], i);
goto error_request;
}
+
+ disable_irq(mhi_cntrl->irq[mhi_event->irq]);
}
return 0;
@@ -978,12 +986,16 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
goto err_destroy_wq;
}
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret)
+ goto err_ida_free;
+
/* Register controller with MHI bus */
mhi_dev = mhi_alloc_device(mhi_cntrl);
if (IS_ERR(mhi_dev)) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
ret = PTR_ERR(mhi_dev);
- goto err_ida_free;
+ goto error_setup_irq;
}
mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
@@ -1006,6 +1018,8 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
err_release_dev:
put_device(&mhi_dev->dev);
+error_setup_irq:
+ mhi_deinit_free_irq(mhi_cntrl);
err_ida_free:
ida_free(&mhi_controller_ida, mhi_cntrl->index);
err_destroy_wq:
@@ -1026,6 +1040,7 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
unsigned int i;
+ mhi_deinit_free_irq(mhi_cntrl);
mhi_destroy_debugfs(mhi_cntrl);
destroy_workqueue(mhi_cntrl->hiprio_wq);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 841626727f6b..9e545f2a5a26 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -557,6 +557,8 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ { PCI_DEVICE(0x1eac, 0x2001), /* EM120R-GL for FCCL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
@@ -569,6 +571,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* T99W175 (sdx55), Based on Qualcomm new baseline */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W175 (sdx55) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
/* T99W368 (sdx65) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
@@ -578,6 +583,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* MV31-W (Cinterion) */
{ PCI_DEVICE(0x1269, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
+ /* MV31-W (Cinterion), based on new baseline */
+ { PCI_DEVICE(0x1269, 0x00b4),
+ .driver_data = (kernel_ulong_t) &mhi_mv31_info },
/* MV32-WA (Cinterion) */
{ PCI_DEVICE(0x1269, 0x00ba),
.driver_data = (kernel_ulong_t) &mhi_mv32_info },
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index dc2e8ff3bff2..4a42186ff111 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -500,7 +500,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
- free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ disable_irq(mhi_cntrl->irq[mhi_event->irq]);
tasklet_kill(&mhi_event->task);
}
@@ -1060,12 +1060,13 @@ static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
enum mhi_state state;
enum mhi_ee_type current_ee;
enum dev_st_transition next_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 interval_us = 25000; /* poll register field every 25 milliseconds */
- int ret;
+ int ret, i;
dev_info(dev, "Requested to power ON\n");
@@ -1117,9 +1118,15 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
}
- ret = mhi_init_irq_setup(mhi_cntrl);
- if (ret)
- goto error_exit;
+ /* IRQs have been requested during probe, so we just need to enable them. */
+ enable_irq(mhi_cntrl->irq[0]);
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ enable_irq(mhi_cntrl->irq[mhi_event->irq]);
+ }
/* Transition to next state */
next_state = MHI_IN_PBL(current_ee) ?
@@ -1182,7 +1189,7 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
/* Wait for shutdown to complete */
flush_work(&mhi_cntrl->st_worker);
- free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+ disable_irq(mhi_cntrl->irq[0]);
}
EXPORT_SYMBOL_GPL(mhi_power_down);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index db612045616f..5dc2669432ba 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Address map functions for Marvell EBU SoCs (Kirkwood, Armada
* 370/XP, Dove, Orion5x and MV78xx0)
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* The Marvell EBU SoCs have a configurable physical address space:
* the physical address at which certain devices (PCIe, NOR, NAND,
* etc.) sit can be configured. The configuration takes place through
@@ -25,8 +22,8 @@
*
* - Reads out the SDRAM address decoding windows at initialization
* time, and fills the mvebu_mbus_dram_info structure with these
- * informations. The exported function mv_mbus_dram_info() allow
- * device drivers to get those informations related to the SDRAM
+ * information. The exported function mv_mbus_dram_info() allow
+ * device drivers to get those information related to the SDRAM
* address decoding windows. This is because devices also have their
* own windows (configured through registers that are part of each
* device register space), and therefore the drivers for Marvell
@@ -123,7 +120,7 @@ struct mvebu_mbus_soc_data {
};
/*
- * Used to store the state of one MBus window accross suspend/resume.
+ * Used to store the state of one MBus window across suspend/resume.
*/
struct mvebu_mbus_win_data {
u32 ctrl;
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index dcfb32ee5cb6..eb1ba6319fda 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP L3 Interconnect error handling driver
*
* Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Sricharan <r.sricharan@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
index 73431f81da28..bb3eebd3465d 100644
--- a/drivers/bus/omap_l3_noc.h
+++ b/drivers/bus/omap_l3_noc.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* OMAP L3 Interconnect error handling driver header
*
* Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* sricharan <r.sricharan@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __OMAP_L3_NOC_H
#define __OMAP_L3_NOC_H
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 60b082fe2ed0..4cd2e127946e 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* RSB (Reduced Serial Bus) driver.
*
* Author: Chen-Yu Tsai <wens@csie.org>
*
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
* The RSB controller looks like an SMBus controller which only supports
* byte and word data transfers. But, it differs from standard SMBus
* protocol on several aspects:
@@ -31,7 +28,6 @@
* This document is officially released by Allwinner.
*
* This driver is based on i2c-sun6i-p2wi.c, the P2WI bus driver.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index 35b59f92fa66..662266719682 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for NVIDIA Generic Memory Interface
*
* Copyright (C) 2016 Host Mobility AB. All rights reserved.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index 9989ce904a37..38c886dc2ed6 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NBUS driver for TS-4600 based boards
*
* Copyright (c) 2016 - Savoir-faire Linux
* Author: Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* This driver implements a GPIOs bit-banged bus, called the NBUS by Technologic
* Systems. It is used to communicate with the peripherals in the FPGA on the
* TS-4600 SoM.
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 8e78b37d0f6a..ceded5772aac 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -817,7 +817,7 @@ probe_fail_free_irqs:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
probe_fail_cleanup_disk:
- blk_cleanup_disk(gd.disk);
+ put_disk(gd.disk);
probe_fail_free_tag_set:
blk_mq_free_tag_set(&gd.tag_set);
probe_fail_free_cd_info:
@@ -831,7 +831,6 @@ probe_fail_no_mem:
static int remove_gdrom(struct platform_device *devptr)
{
- blk_cleanup_queue(gd.gdrom_rq);
blk_mq_free_tag_set(&gd.tag_set);
free_irq(HW_EVENT_GDROM_CMD, &gd);
free_irq(HW_EVENT_GDROM_DMA, &gd);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0b6c03643ddc..0f378d29dab0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -247,11 +247,6 @@ config SONYPI
To compile this driver as a module, choose M here: the
module will be called sonypi.
-config GPIO_TB0219
- tristate "TANBAC TB0219 GPIO support"
- depends on TANBAC_TB022X
- select GPIO_VR41XX
-
source "drivers/char/pcmcia/Kconfig"
config MWAVE
@@ -431,7 +426,6 @@ config ADI
config RANDOM_TRUST_CPU
bool "Initialize RNG using CPU RNG instructions"
default y
- depends on ARCH_RANDOM
help
Initialize the RNG using random numbers supplied by the CPU's
RNG instructions (e.g. RDRAND), if supported and available. These
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 264eb398fdd4..1b35d1724565 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_NWFLASH) += nwflash.o
obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
-obj-$(CONFIG_GPIO_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
obj-$(CONFIG_MWAVE) += mwave/
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fe7e2105e766..bf6716ff863b 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -20,7 +20,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
-#include <linux/intel-iommu.h>
+#include <linux/iommu.h>
#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
@@ -573,18 +573,15 @@ static void intel_gtt_cleanup(void)
*/
static inline int needs_ilk_vtd_wa(void)
{
-#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
+ /*
+ * Query iommu subsystem to see if we need the workaround. Presumably
+ * that was loaded first.
*/
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
- gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
- intel_iommu_gfx_mapped)
- return 1;
-#endif
- return 0;
+ return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ device_iommu_mapped(&intel_private.pcidev->dev));
}
static bool intel_gtt_can_wc(void)
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 230cf852fa9c..e795390b070f 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -544,7 +544,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
wake_up_interruptible(&apm_waitqueue);
/*
- * Wait for the the suspend_acks_pending variable to drop to
+ * Wait for the suspend_acks_pending variable to drop to
* zero, meaning everybody acked the suspend event (or the
* process was killed.)
*
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index b3f2d55dc551..3da8e85f8aae 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -87,7 +87,7 @@ config HW_RANDOM_BA431
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
- ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index a43743887db1..06bc060534d8 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -1,14 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Broadcom Corporation
*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License as
-* published by the Free Software Foundation version 2.
-*
-* This program is distributed "as is" WITHOUT ANY WARRANTY of any
-* kind, whether express or implied; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
*/
/*
* DESCRIPTION: The Broadcom iProc RNG200 Driver
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 8da1d7917bdc..429e956f34e1 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -23,7 +23,7 @@ static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
buf = (unsigned long *)data;
for (i = 0; i < len; i++)
- powernv_get_random_long(buf++);
+ pnv_get_random_long(buf++);
return len * sizeof(unsigned long);
}
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index 2beaa35c0d74..795853dfc46b 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -108,7 +108,6 @@ static ssize_t trng_counter_show(struct device *dev,
{
u64 dev_counter = atomic64_read(&trng_dev_counter);
u64 hwrng_counter = atomic64_read(&trng_hwrng_counter);
-#if IS_ENABLED(CONFIG_ARCH_RANDOM)
u64 arch_counter = atomic64_read(&s390_arch_random_counter);
return sysfs_emit(buf,
@@ -118,14 +117,6 @@ static ssize_t trng_counter_show(struct device *dev,
"total: %llu\n",
dev_counter, hwrng_counter, arch_counter,
dev_counter + hwrng_counter + arch_counter);
-#else
- return sysfs_emit(buf,
- "trng: %llu\n"
- "hwrng: %llu\n"
- "total: %llu\n",
- dev_counter, hwrng_counter,
- dev_counter + hwrng_counter);
-#endif
}
static DEVICE_ATTR(byte_counter, 0444, trng_counter_show, NULL);
@@ -261,5 +252,5 @@ static void __exit trng_exit(void)
trng_debug_exit();
}
-module_cpu_feature_match(MSA, trng_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, trng_init);
module_exit(trng_exit);
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 7444cc146e86..a9a0a3b09c8b 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -145,7 +145,7 @@ static int via_rng_init(struct hwrng *rng)
}
/* Control the RNG via MSR. Tread lightly and pay very close
- * close attention to values written, as the reserved fields
+ * attention to values written, as the reserved fields
* are documented to be "undefined and unpredictable"; but it
* does not say to write them as zero, so I make a guess that
* we restore the values we find in the register.
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e3dd1dd3dd22..79d7d4e4e582 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -229,7 +229,7 @@ static void crng_reseed(void)
/*
* This generates a ChaCha block using the provided key, and then
- * immediately overwites that key with half the block. It returns
+ * immediately overwrites that key with half the block. It returns
* the resultant ChaCha state to the user, along with the second
* half of the block containing 32 bytes of random data that may
* be used; random_data_len may not be greater than 32.
@@ -596,12 +596,20 @@ static void extract_entropy(void *buf, size_t len)
unsigned long rdseed[32 / sizeof(long)];
size_t counter;
} block;
- size_t i;
+ size_t i, longs;
- for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
- if (!arch_get_random_seed_long(&block.rdseed[i]) &&
- !arch_get_random_long(&block.rdseed[i]))
- block.rdseed[i] = random_get_entropy();
+ for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
+ longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
+ if (longs) {
+ i += longs;
+ continue;
+ }
+ longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
+ if (longs) {
+ i += longs;
+ continue;
+ }
+ block.rdseed[i++] = random_get_entropy();
}
spin_lock_irqsave(&input_pool.lock, flags);
@@ -643,10 +651,10 @@ static void __cold _credit_init_bits(size_t bits)
add = min_t(size_t, bits, POOL_BITS);
+ orig = READ_ONCE(input_pool.init_bits);
do {
- orig = READ_ONCE(input_pool.init_bits);
new = min_t(unsigned int, POOL_BITS, orig + add);
- } while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
+ } while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
@@ -755,8 +763,8 @@ static int random_pm_notification(struct notifier_block *nb, unsigned long actio
spin_unlock_irqrestore(&input_pool.lock, flags);
if (crng_ready() && (action == PM_RESTORE_PREPARE ||
- (action == PM_POST_SUSPEND &&
- !IS_ENABLED(CONFIG_PM_AUTOSLEEP) && !IS_ENABLED(CONFIG_ANDROID)))) {
+ (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
+ !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
crng_reseed();
pr_notice("crng reseeded on system resumption\n");
}
@@ -776,22 +784,31 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica
int __init random_init(const char *command_line)
{
ktime_t now = ktime_get_real();
- unsigned int i, arch_bits;
- unsigned long entropy;
+ size_t i, longs, arch_bits;
+ unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
#if defined(LATENT_ENTROPY_PLUGIN)
static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
#endif
- for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8;
- i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
- if (!arch_get_random_seed_long_early(&entropy) &&
- !arch_get_random_long_early(&entropy)) {
- entropy = random_get_entropy();
- arch_bits -= sizeof(entropy) * 8;
+ for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
+ longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
+ if (longs) {
+ _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+ i += longs;
+ continue;
}
- _mix_pool_bytes(&entropy, sizeof(entropy));
+ longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
+ if (longs) {
+ _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+ i += longs;
+ continue;
+ }
+ entropy[0] = random_get_entropy();
+ _mix_pool_bytes(entropy, sizeof(*entropy));
+ arch_bits -= sizeof(*entropy) * 8;
+ ++i;
}
_mix_pool_bytes(&now, sizeof(now));
_mix_pool_bytes(utsname(), sizeof(*(utsname())));
@@ -1174,7 +1191,7 @@ static void __cold entropy_timer(struct timer_list *timer)
*/
static void __cold try_to_generate_entropy(void)
{
- enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 };
+ enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 30 };
struct entropy_timer_state stack;
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
deleted file mode 100644
index 1f36be14978f..000000000000
--- a/drivers/char/tb0219.c
+++ /dev/null
@@ -1,359 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for TANBAC TB0219 base board.
- *
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/platform_device.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/uaccess.h>
-
-#include <asm/io.h>
-#include <asm/reboot.h>
-#include <asm/vr41xx/giu.h>
-#include <asm/vr41xx/tb0219.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_DESCRIPTION("TANBAC TB0219 base board driver");
-MODULE_LICENSE("GPL");
-
-static int major; /* default is dynamic major device number */
-module_param(major, int, 0);
-MODULE_PARM_DESC(major, "Major device number");
-
-static void (*old_machine_restart)(char *command);
-static void __iomem *tb0219_base;
-static DEFINE_SPINLOCK(tb0219_lock);
-
-#define tb0219_read(offset) readw(tb0219_base + (offset))
-#define tb0219_write(offset, value) writew((value), tb0219_base + (offset))
-
-#define TB0219_START 0x0a000000UL
-#define TB0219_SIZE 0x20UL
-
-#define TB0219_LED 0x00
-#define TB0219_GPIO_INPUT 0x02
-#define TB0219_GPIO_OUTPUT 0x04
-#define TB0219_DIP_SWITCH 0x06
-#define TB0219_MISC 0x08
-#define TB0219_RESET 0x0e
-#define TB0219_PCI_SLOT1_IRQ_STATUS 0x10
-#define TB0219_PCI_SLOT2_IRQ_STATUS 0x12
-#define TB0219_PCI_SLOT3_IRQ_STATUS 0x14
-
-typedef enum {
- TYPE_LED,
- TYPE_GPIO_OUTPUT,
-} tb0219_type_t;
-
-/*
- * Minor device number
- * 0 = 7 segment LED
- *
- * 16 = GPIO IN 0
- * 17 = GPIO IN 1
- * 18 = GPIO IN 2
- * 19 = GPIO IN 3
- * 20 = GPIO IN 4
- * 21 = GPIO IN 5
- * 22 = GPIO IN 6
- * 23 = GPIO IN 7
- *
- * 32 = GPIO OUT 0
- * 33 = GPIO OUT 1
- * 34 = GPIO OUT 2
- * 35 = GPIO OUT 3
- * 36 = GPIO OUT 4
- * 37 = GPIO OUT 5
- * 38 = GPIO OUT 6
- * 39 = GPIO OUT 7
- *
- * 48 = DIP switch 1
- * 49 = DIP switch 2
- * 50 = DIP switch 3
- * 51 = DIP switch 4
- * 52 = DIP switch 5
- * 53 = DIP switch 6
- * 54 = DIP switch 7
- * 55 = DIP switch 8
- */
-
-static inline char get_led(void)
-{
- return (char)tb0219_read(TB0219_LED);
-}
-
-static inline char get_gpio_input_pin(unsigned int pin)
-{
- uint16_t values;
-
- values = tb0219_read(TB0219_GPIO_INPUT);
- if (values & (1 << pin))
- return '1';
-
- return '0';
-}
-
-static inline char get_gpio_output_pin(unsigned int pin)
-{
- uint16_t values;
-
- values = tb0219_read(TB0219_GPIO_OUTPUT);
- if (values & (1 << pin))
- return '1';
-
- return '0';
-}
-
-static inline char get_dip_switch(unsigned int pin)
-{
- uint16_t values;
-
- values = tb0219_read(TB0219_DIP_SWITCH);
- if (values & (1 << pin))
- return '1';
-
- return '0';
-}
-
-static inline int set_led(char command)
-{
- tb0219_write(TB0219_LED, command);
-
- return 0;
-}
-
-static inline int set_gpio_output_pin(unsigned int pin, char command)
-{
- unsigned long flags;
- uint16_t value;
-
- if (command != '0' && command != '1')
- return -EINVAL;
-
- spin_lock_irqsave(&tb0219_lock, flags);
- value = tb0219_read(TB0219_GPIO_OUTPUT);
- if (command == '0')
- value &= ~(1 << pin);
- else
- value |= 1 << pin;
- tb0219_write(TB0219_GPIO_OUTPUT, value);
- spin_unlock_irqrestore(&tb0219_lock, flags);
-
- return 0;
-
-}
-
-static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
-{
- unsigned int minor;
- char value;
-
- minor = iminor(file_inode(file));
- switch (minor) {
- case 0:
- value = get_led();
- break;
- case 16 ... 23:
- value = get_gpio_input_pin(minor - 16);
- break;
- case 32 ... 39:
- value = get_gpio_output_pin(minor - 32);
- break;
- case 48 ... 55:
- value = get_dip_switch(minor - 48);
- break;
- default:
- return -EBADF;
- }
-
- if (len <= 0)
- return -EFAULT;
-
- if (put_user(value, buf))
- return -EFAULT;
-
- return 1;
-}
-
-static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- unsigned int minor;
- tb0219_type_t type;
- size_t i;
- int retval = 0;
- char c;
-
- minor = iminor(file_inode(file));
- switch (minor) {
- case 0:
- type = TYPE_LED;
- break;
- case 32 ... 39:
- type = TYPE_GPIO_OUTPUT;
- break;
- default:
- return -EBADF;
- }
-
- for (i = 0; i < len; i++) {
- if (get_user(c, data + i))
- return -EFAULT;
-
- switch (type) {
- case TYPE_LED:
- retval = set_led(c);
- break;
- case TYPE_GPIO_OUTPUT:
- retval = set_gpio_output_pin(minor - 32, c);
- break;
- }
-
- if (retval < 0)
- break;
- }
-
- return i;
-}
-
-static int tanbac_tb0219_open(struct inode *inode, struct file *file)
-{
- unsigned int minor;
-
- minor = iminor(inode);
- switch (minor) {
- case 0:
- case 16 ... 23:
- case 32 ... 39:
- case 48 ... 55:
- return stream_open(inode, file);
- default:
- break;
- }
-
- return -EBADF;
-}
-
-static int tanbac_tb0219_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static const struct file_operations tb0219_fops = {
- .owner = THIS_MODULE,
- .read = tanbac_tb0219_read,
- .write = tanbac_tb0219_write,
- .open = tanbac_tb0219_open,
- .release = tanbac_tb0219_release,
- .llseek = no_llseek,
-};
-
-static void tb0219_restart(char *command)
-{
- tb0219_write(TB0219_RESET, 0);
-}
-
-static void tb0219_pci_irq_init(void)
-{
- /* PCI Slot 1 */
- vr41xx_set_irq_trigger(TB0219_PCI_SLOT1_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(TB0219_PCI_SLOT1_PIN, IRQ_LEVEL_LOW);
-
- /* PCI Slot 2 */
- vr41xx_set_irq_trigger(TB0219_PCI_SLOT2_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(TB0219_PCI_SLOT2_PIN, IRQ_LEVEL_LOW);
-
- /* PCI Slot 3 */
- vr41xx_set_irq_trigger(TB0219_PCI_SLOT3_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW);
-}
-
-static int tb0219_probe(struct platform_device *dev)
-{
- int retval;
-
- if (request_mem_region(TB0219_START, TB0219_SIZE, "TB0219") == NULL)
- return -EBUSY;
-
- tb0219_base = ioremap(TB0219_START, TB0219_SIZE);
- if (tb0219_base == NULL) {
- release_mem_region(TB0219_START, TB0219_SIZE);
- return -ENOMEM;
- }
-
- retval = register_chrdev(major, "TB0219", &tb0219_fops);
- if (retval < 0) {
- iounmap(tb0219_base);
- tb0219_base = NULL;
- release_mem_region(TB0219_START, TB0219_SIZE);
- return retval;
- }
-
- old_machine_restart = _machine_restart;
- _machine_restart = tb0219_restart;
-
- tb0219_pci_irq_init();
-
- if (major == 0) {
- major = retval;
- printk(KERN_INFO "TB0219: major number %d\n", major);
- }
-
- return 0;
-}
-
-static int tb0219_remove(struct platform_device *dev)
-{
- _machine_restart = old_machine_restart;
-
- iounmap(tb0219_base);
- tb0219_base = NULL;
-
- release_mem_region(TB0219_START, TB0219_SIZE);
-
- return 0;
-}
-
-static struct platform_device *tb0219_platform_device;
-
-static struct platform_driver tb0219_device_driver = {
- .probe = tb0219_probe,
- .remove = tb0219_remove,
- .driver = {
- .name = "TB0219",
- },
-};
-
-static int __init tanbac_tb0219_init(void)
-{
- int retval;
-
- tb0219_platform_device = platform_device_alloc("TB0219", -1);
- if (!tb0219_platform_device)
- return -ENOMEM;
-
- retval = platform_device_add(tb0219_platform_device);
- if (retval < 0) {
- platform_device_put(tb0219_platform_device);
- return retval;
- }
-
- retval = platform_driver_register(&tb0219_device_driver);
- if (retval < 0)
- platform_device_unregister(tb0219_platform_device);
-
- return retval;
-}
-
-static void __exit tanbac_tb0219_exit(void)
-{
- platform_driver_unregister(&tb0219_device_driver);
- platform_device_unregister(tb0219_platform_device);
-}
-
-module_init(tanbac_tb0219_init);
-module_exit(tanbac_tb0219_exit);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 4a5516406c22..927088b2c3d3 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -74,6 +74,18 @@ config TCG_TIS_SPI_CR50
If you have a H1 secure module running Cr50 firmware on SPI bus,
say Yes and it will be accessible from within Linux.
+config TCG_TIS_I2C
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (I2C - generic)"
+ depends on I2C
+ select CRC_CCITT
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip, compliant with the TCG TPM PTP
+ (I2C interface) specification and connected to an I2C bus master,
+ say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_i2c.
+
config TCG_TIS_SYNQUACER
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
depends on ARCH_SYNQUACER || COMPILE_TEST
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 66d39ea6bd10..0222b1ddb310 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -29,6 +29,7 @@ tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
obj-$(CONFIG_TCG_TIS_I2C_CR50) += tpm_tis_i2c_cr50.o
+obj-$(CONFIG_TCG_TIS_I2C) += tpm_tis_i2c.o
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2163c6ee0d36..24ee4e1cc452 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -55,6 +55,7 @@ enum tpm_addr {
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
#define TPM_ERR_DISABLED 0x7
+#define TPM_ERR_FAILEDSELFTEST 0x1C
#define TPM_ERR_INVALID_POSTINIT 38
#define TPM_TAG_RQU_COMMAND 193
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index f7dc986fa4a0..cf64c7385105 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -709,7 +709,12 @@ int tpm1_auto_startup(struct tpm_chip *chip)
if (rc)
goto out;
rc = tpm1_do_selftest(chip);
- if (rc) {
+ if (rc == TPM_ERR_FAILEDSELFTEST) {
+ dev_warn(&chip->dev, "TPM self test failed, switching to the firmware upgrade mode\n");
+ /* A TPM in this state possibly allows or needs a firmware upgrade */
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ return 0;
+ } else if (rc) {
dev_err(&chip->dev, "TPM self test failed\n");
goto out;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c1eb5d223839..65d03867e114 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -752,6 +752,12 @@ int tpm2_auto_startup(struct tpm_chip *chip)
}
rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc == TPM2_RC_FAILURE || (rc < 0 && rc != -ENOMEM)) {
+ dev_info(&chip->dev,
+ "TPM in field failure mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
out:
/*
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index dc56b976d816..757623bacfd5 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -289,6 +289,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
int size = 0;
int status;
u32 expected;
+ int rc;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -328,6 +329,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
+ rc = tpm_tis_verify_crc(priv, (size_t)size, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for response.\n");
+ size = rc;
+ goto out;
+ }
+
out:
tpm_tis_ready(chip);
return size;
@@ -443,6 +451,12 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc < 0)
return rc;
+ rc = tpm_tis_verify_crc(priv, len, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for command.\n");
+ return rc;
+ }
+
/* go and do it */
rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
if (rc < 0)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 6c203f36b8a1..66a5a13cd1df 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -121,6 +121,8 @@ struct tpm_tis_phy_ops {
u8 *result, enum tpm_tis_io_mode mode);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
const u8 *value, enum tpm_tis_io_mode mode);
+ int (*verify_crc)(struct tpm_tis_data *data, size_t len,
+ const u8 *value);
};
static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr,
@@ -188,6 +190,14 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
return rc;
}
+static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ if (!data->phy_ops->verify_crc)
+ return 0;
+ return data->phy_ops->verify_crc(data, len, value);
+}
+
static inline bool is_bsw(void)
{
#ifdef CONFIG_X86
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
new file mode 100644
index 000000000000..ba0911b1d1ff
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2021 Nuvoton Technology corporation
+ * Copyright (C) 2019-2022 Infineon Technologies AG
+ *
+ * This device driver implements the TPM interface as defined in the TCG PC
+ * Client Platform TPM Profile (PTP) Specification for TPM 2.0 v1.04
+ * Revision 14.
+ *
+ * It is based on the tpm_tis_spi device driver.
+ */
+
+#include <linux/i2c.h>
+#include <linux/crc-ccitt.h>
+#include "tpm_tis_core.h"
+
+/* TPM registers */
+#define TPM_I2C_LOC_SEL 0x00
+#define TPM_I2C_ACCESS 0x04
+#define TPM_I2C_INTERFACE_CAPABILITY 0x30
+#define TPM_I2C_DEVICE_ADDRESS 0x38
+#define TPM_I2C_DATA_CSUM_ENABLE 0x40
+#define TPM_DATA_CSUM 0x44
+#define TPM_I2C_DID_VID 0x48
+#define TPM_I2C_RID 0x4C
+
+/* TIS-compatible register address to avoid clash with TPM_ACCESS (0x00) */
+#define TPM_LOC_SEL 0x0FFF
+
+/* Mask to extract the I2C register from TIS register addresses */
+#define TPM_TIS_REGISTER_MASK 0x0FFF
+
+/* Default Guard Time of 250µs until interface capability register is read */
+#define GUARD_TIME_DEFAULT_MIN 250
+#define GUARD_TIME_DEFAULT_MAX 300
+
+/* Guard Time of 250µs after I2C slave NACK */
+#define GUARD_TIME_ERR_MIN 250
+#define GUARD_TIME_ERR_MAX 300
+
+/* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */
+#define TPM_GUARD_TIME_SR_MASK 0x40000000
+#define TPM_GUARD_TIME_RR_MASK 0x00100000
+#define TPM_GUARD_TIME_RW_MASK 0x00080000
+#define TPM_GUARD_TIME_WR_MASK 0x00040000
+#define TPM_GUARD_TIME_WW_MASK 0x00020000
+#define TPM_GUARD_TIME_MIN_MASK 0x0001FE00
+#define TPM_GUARD_TIME_MIN_SHIFT 9
+
+/* Masks with bits that must be read zero */
+#define TPM_ACCESS_READ_ZERO 0x48
+#define TPM_INT_ENABLE_ZERO 0x7FFFFF6
+#define TPM_STS_READ_ZERO 0x23
+#define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
+#define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
+
+struct tpm_tis_i2c_phy {
+ struct tpm_tis_data priv;
+ struct i2c_client *i2c_client;
+ bool guard_time_read;
+ bool guard_time_write;
+ u16 guard_time_min;
+ u16 guard_time_max;
+ u8 *io_buf;
+};
+
+static inline struct tpm_tis_i2c_phy *
+to_tpm_tis_i2c_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_i2c_phy, priv);
+}
+
+/*
+ * tpm_tis_core uses the register addresses as defined in Table 19 "Allocation
+ * of Register Space for FIFO TPM Access" of the TCG PC Client PTP
+ * Specification. In order for this code to work together with tpm_tis_core,
+ * those addresses need to mapped to the registers defined for I2C TPMs in
+ * Table 51 "I2C-TPM Register Overview".
+ *
+ * For most addresses this can be done by simply stripping off the locality
+ * information from the address. A few addresses need to be mapped explicitly,
+ * since the corresponding I2C registers have been moved around. TPM_LOC_SEL is
+ * only defined for I2C TPMs and is also mapped explicitly here to distinguish
+ * it from TPM_ACCESS(0).
+ *
+ * Locality information is ignored, since this driver assumes exclusive access
+ * to the TPM and always uses locality 0.
+ */
+static u8 tpm_tis_i2c_address_to_register(u32 addr)
+{
+ addr &= TPM_TIS_REGISTER_MASK;
+
+ switch (addr) {
+ case TPM_ACCESS(0):
+ return TPM_I2C_ACCESS;
+ case TPM_LOC_SEL:
+ return TPM_I2C_LOC_SEL;
+ case TPM_DID_VID(0):
+ return TPM_I2C_DID_VID;
+ case TPM_RID(0):
+ return TPM_I2C_RID;
+ default:
+ return addr;
+ }
+}
+
+static int tpm_tis_i2c_retry_transfer_until_ack(struct tpm_tis_data *data,
+ struct i2c_msg *msg)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ bool guard_time;
+ int i = 0;
+ int ret;
+
+ if (msg->flags & I2C_M_RD)
+ guard_time = phy->guard_time_read;
+ else
+ guard_time = phy->guard_time_write;
+
+ do {
+ ret = i2c_transfer(phy->i2c_client->adapter, msg, 1);
+ if (ret < 0)
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ else if (guard_time)
+ usleep_range(phy->guard_time_min, phy->guard_time_max);
+ /* retry on TPM NACK */
+ } while (ret < 0 && i++ < TPM_RETRY);
+
+ return ret;
+}
+
+/* Check that bits which must be read zero are not set */
+static int tpm_tis_i2c_sanity_check_read(u8 reg, u16 len, u8 *buf)
+{
+ u32 zero_mask;
+ u32 value;
+
+ switch (len) {
+ case sizeof(u8):
+ value = buf[0];
+ break;
+ case sizeof(u16):
+ value = le16_to_cpup((__le16 *)buf);
+ break;
+ case sizeof(u32):
+ value = le32_to_cpup((__le32 *)buf);
+ break;
+ default:
+ /* unknown length, skip check */
+ return 0;
+ }
+
+ switch (reg) {
+ case TPM_I2C_ACCESS:
+ zero_mask = TPM_ACCESS_READ_ZERO;
+ break;
+ case TPM_INT_ENABLE(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INT_ENABLE_ZERO;
+ break;
+ case TPM_STS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_STS_READ_ZERO;
+ break;
+ case TPM_INTF_CAPS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INTF_CAPABILITY_ZERO;
+ break;
+ case TPM_I2C_INTERFACE_CAPABILITY:
+ zero_mask = TPM_I2C_INTERFACE_CAPABILITY_ZERO;
+ break;
+ default:
+ /* unknown register, skip check */
+ return 0;
+ }
+
+ if (unlikely((value & zero_mask) != 0x00)) {
+ pr_debug("TPM I2C read of register 0x%02x failed sanity check: 0x%x\n", reg, value);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int i;
+ int ret;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ /* write register */
+ msg.len = sizeof(reg);
+ msg.buf = &reg;
+ msg.flags = 0;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ /* read data */
+ msg.buf = result;
+ msg.len = len;
+ msg.flags = I2C_M_RD;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
+ if (ret == 0)
+ return 0;
+
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ }
+
+ return ret;
+}
+
+static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int ret;
+
+ if (len > TPM_BUFSIZE - 1)
+ return -EIO;
+
+ /* write register and data in one go */
+ phy->io_buf[0] = reg;
+ memcpy(phy->io_buf + sizeof(reg), value, len);
+
+ msg.len = sizeof(reg) + len;
+ msg.buf = phy->io_buf;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tpm_tis_i2c_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ u16 crc_tpm, crc_host;
+ int rc;
+
+ rc = tpm_tis_read16(data, TPM_DATA_CSUM, &crc_tpm);
+ if (rc < 0)
+ return rc;
+
+ /* reflect crc result, regardless of host endianness */
+ crc_host = swab16(crc_ccitt(0, value, len));
+ if (crc_tpm != crc_host)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Guard Time:
+ * After each I2C operation, the TPM might require the master to wait.
+ * The time period is vendor-specific and must be read from the
+ * TPM_I2C_INTERFACE_CAPABILITY register.
+ *
+ * Before the Guard Time is read (or after the TPM failed to send an I2C NACK),
+ * a Guard Time of 250µs applies.
+ *
+ * Various flags in the same register indicate if a guard time is needed:
+ * - SR: <I2C read with repeated start> <guard time> <I2C read>
+ * - RR: <I2C read> <guard time> <I2C read>
+ * - RW: <I2C read> <guard time> <I2C write>
+ * - WR: <I2C write> <guard time> <I2C read>
+ * - WW: <I2C write> <guard time> <I2C write>
+ *
+ * See TCG PC Client PTP Specification v1.04, 8.1.10 GUARD_TIME
+ */
+static int tpm_tis_i2c_init_guard_time(struct tpm_tis_i2c_phy *phy)
+{
+ u32 i2c_caps;
+ int ret;
+
+ phy->guard_time_read = true;
+ phy->guard_time_write = true;
+ phy->guard_time_min = GUARD_TIME_DEFAULT_MIN;
+ phy->guard_time_max = GUARD_TIME_DEFAULT_MAX;
+
+ ret = tpm_tis_i2c_read_bytes(&phy->priv, TPM_I2C_INTERFACE_CAPABILITY,
+ sizeof(i2c_caps), (u8 *)&i2c_caps,
+ TPM_TIS_PHYS_32);
+ if (ret)
+ return ret;
+
+ phy->guard_time_read = (i2c_caps & TPM_GUARD_TIME_RR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_RW_MASK);
+ phy->guard_time_write = (i2c_caps & TPM_GUARD_TIME_WR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_WW_MASK);
+ phy->guard_time_min = (i2c_caps & TPM_GUARD_TIME_MIN_MASK) >>
+ TPM_GUARD_TIME_MIN_SHIFT;
+ /* guard_time_max = guard_time_min * 1.2 */
+ phy->guard_time_max = phy->guard_time_min + phy->guard_time_min / 5;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = {
+ .read_bytes = tpm_tis_i2c_read_bytes,
+ .write_bytes = tpm_tis_i2c_write_bytes,
+ .verify_crc = tpm_tis_i2c_verify_crc,
+};
+
+static int tpm_tis_i2c_probe(struct i2c_client *dev,
+ const struct i2c_device_id *id)
+{
+ struct tpm_tis_i2c_phy *phy;
+ const u8 crc_enable = 1;
+ const u8 locality = 0;
+ int ret;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->io_buf = devm_kzalloc(&dev->dev, TPM_BUFSIZE, GFP_KERNEL);
+ if (!phy->io_buf)
+ return -ENOMEM;
+
+ phy->i2c_client = dev;
+
+ /* must precede all communication with the tpm */
+ ret = tpm_tis_i2c_init_guard_time(phy);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_LOC_SEL, sizeof(locality),
+ &locality, TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_I2C_DATA_CSUM_ENABLE,
+ sizeof(crc_enable), &crc_enable,
+ TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_i2c_phy_ops,
+ NULL);
+}
+
+static int tpm_tis_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+ return 0;
+}
+
+static const struct i2c_device_id tpm_tis_i2c_id[] = {
+ { "tpm_tis_i2c", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_tis_i2c_match[] = {
+ { .compatible = "infineon,slb9673", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_i2c_match);
+#endif
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+ .driver = {
+ .name = "tpm_tis_i2c",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_i2c_match),
+ },
+ .probe = tpm_tis_i2c_probe,
+ .remove = tpm_tis_i2c_remove,
+ .id_table = tpm_tis_i2c_id,
+};
+module_i2c_driver(tpm_tis_i2c_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native I2C access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig
index cdbc7d7deba9..2fbeb71316f8 100644
--- a/drivers/clk/.kunitconfig
+++ b/drivers/clk/.kunitconfig
@@ -2,3 +2,4 @@ CONFIG_KUNIT=y
CONFIG_COMMON_CLK=y
CONFIG_CLK_KUNIT_TEST=y
CONFIG_CLK_GATE_KUNIT_TEST=y
+CONFIG_UML_PCI_OVER_VIRTIO=n
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index e9da0e69bf6c..e1fda6ad5cd5 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys AXS10X SDP I2S PLL clock driver
*
* Copyright (C) 2016 Synopsys
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/platform_device.h>
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index 500345d99adb..90fb0e6ff573 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys AXS10X SDP Generic PLL clock driver
*
* Copyright (C) 2017 Synopsys
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/platform_device.h>
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index ec738f74a026..77266afb1c79 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -22,9 +22,9 @@ config CLK_BCM2835
config CLK_BCM_63XX
bool "Broadcom BCM63xx clock support"
- depends on ARCH_BCM_63XX || COMPILE_TEST
+ depends on ARCH_BCMBCA || COMPILE_TEST
select COMMON_CLK_IPROC
- default ARCH_BCM_63XX
+ default ARCH_BCMBCA
help
Enable common clock framework support for Broadcom BCM63xx DSL SoCs
based on the ARM architecture
diff --git a/drivers/clk/bcm/clk-bcm21664.c b/drivers/clk/bcm/clk-bcm21664.c
index eeae4cad2281..520c3aeb4ea9 100644
--- a/drivers/clk/bcm/clk-bcm21664.c
+++ b/drivers/clk/bcm/clk-bcm21664.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Broadcom Corporation
* Copyright 2014 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "clk-kona.h"
diff --git a/drivers/clk/bcm/clk-bcm281xx.c b/drivers/clk/bcm/clk-bcm281xx.c
index 502a487d62c5..823d5dfa31b8 100644
--- a/drivers/clk/bcm/clk-bcm281xx.c
+++ b/drivers/clk/bcm/clk-bcm281xx.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "clk-kona.h"
diff --git a/drivers/clk/bcm/clk-bcm63xx.c b/drivers/clk/bcm/clk-bcm63xx.c
index fbc17ae5ff2b..c8383834fb39 100644
--- a/drivers/clk/bcm/clk-bcm63xx.c
+++ b/drivers/clk/bcm/clk-bcm63xx.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2015 Broadcom Corporation
#include <linux/init.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
diff --git a/drivers/clk/bcm/clk-cygnus.c b/drivers/clk/bcm/clk-cygnus.c
index b8d073e4855f..43b04fc4c493 100644
--- a/drivers/clk/bcm/clk-cygnus.c
+++ b/drivers/clk/bcm/clk-cygnus.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
#include <linux/kernel.h>
#include <linux/err.h>
diff --git a/drivers/clk/bcm/clk-hr2.c b/drivers/clk/bcm/clk-hr2.c
index f7c5b7379475..9f6318f3752b 100644
--- a/drivers/clk/bcm/clk-hr2.c
+++ b/drivers/clk/bcm/clk-hr2.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2017 Broadcom
#include <linux/kernel.h>
#include <linux/err.h>
diff --git a/drivers/clk/bcm/clk-iproc-armpll.c b/drivers/clk/bcm/clk-iproc-armpll.c
index d7d628214b85..9e86c0c10b57 100644
--- a/drivers/clk/bcm/clk-iproc-armpll.c
+++ b/drivers/clk/bcm/clk-iproc-armpll.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index e062dd4992ea..dcacf55c55ae 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
#include <linux/kernel.h>
#include <linux/err.h>
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 33da30f99c79..1a098db12062 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
#include <linux/kernel.h>
#include <linux/err.h>
diff --git a/drivers/clk/bcm/clk-iproc.h b/drivers/clk/bcm/clk-iproc.h
index d7e5b94bed45..0151d6ae1661 100644
--- a/drivers/clk/bcm/clk-iproc.h
+++ b/drivers/clk/bcm/clk-iproc.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2014 Broadcom Corporation */
#ifndef _CLK_IPROC_H
#define _CLK_IPROC_H
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index 5dd65164c8b1..338558f6fbae 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index cc3b1e1bc087..ec5749e301ba 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "clk-kona.h"
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index f4b39bb5558a..e09655024ac2 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CLK_KONA_H
diff --git a/drivers/clk/bcm/clk-ns2.c b/drivers/clk/bcm/clk-ns2.c
index adc14145861a..065f4290aaad 100644
--- a/drivers/clk/bcm/clk-ns2.c
+++ b/drivers/clk/bcm/clk-ns2.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2015 Broadcom Corporation
#include <linux/kernel.h>
#include <linux/err.h>
diff --git a/drivers/clk/bcm/clk-nsp.c b/drivers/clk/bcm/clk-nsp.c
index cf66f640a47d..c24c9adbc6f3 100644
--- a/drivers/clk/bcm/clk-nsp.c
+++ b/drivers/clk/bcm/clk-nsp.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2015 Broadcom Corporation
#include <linux/kernel.h>
#include <linux/err.h>
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index f9d5b7334341..4fb4fd4b06bd 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -4,42 +4,101 @@
#include <linux/export.h>
#include <linux/gfp.h>
+struct devm_clk_state {
+ struct clk *clk;
+ void (*exit)(struct clk *clk);
+};
+
static void devm_clk_release(struct device *dev, void *res)
{
- clk_put(*(struct clk **)res);
+ struct devm_clk_state *state = res;
+
+ if (state->exit)
+ state->exit(state->clk);
+
+ clk_put(state->clk);
}
-struct clk *devm_clk_get(struct device *dev, const char *id)
+static struct clk *__devm_clk_get(struct device *dev, const char *id,
+ struct clk *(*get)(struct device *dev, const char *id),
+ int (*init)(struct clk *clk),
+ void (*exit)(struct clk *clk))
{
- struct clk **ptr, *clk;
+ struct devm_clk_state *state;
+ struct clk *clk;
+ int ret;
- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
+ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
+ if (!state)
return ERR_PTR(-ENOMEM);
- clk = clk_get(dev, id);
- if (!IS_ERR(clk)) {
- *ptr = clk;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
+ clk = get(dev, id);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_clk_get;
}
+ if (init) {
+ ret = init(clk);
+ if (ret)
+ goto err_clk_init;
+ }
+
+ state->clk = clk;
+ state->exit = exit;
+
+ devres_add(dev, state);
+
return clk;
+
+err_clk_init:
+
+ clk_put(clk);
+err_clk_get:
+
+ devres_free(state);
+ return ERR_PTR(ret);
+}
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get);
-struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id)
{
- struct clk *clk = devm_clk_get(dev, id);
+ return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_prepared);
- if (clk == ERR_PTR(-ENOENT))
- return NULL;
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get,
+ clk_prepare_enable, clk_disable_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_enabled);
- return clk;
+struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get_optional);
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional,
+ clk_prepare, clk_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared);
+
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional,
+ clk_prepare_enable, clk_disable_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
+
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 54942d758ee6..f734e34735a9 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -78,7 +78,8 @@ static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *
static struct clk_hw *
__clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
- const char *name, const char *parent_name, int index,
+ const char *name, const char *parent_name,
+ const struct clk_hw *parent_hw, int index,
unsigned long flags, unsigned int mult, unsigned int div,
bool devm)
{
@@ -110,6 +111,8 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
init.flags = flags;
if (parent_name)
init.parent_names = &parent_name;
+ else if (parent_hw)
+ init.parent_hws = &parent_hw;
else
init.parent_data = &pdata;
init.num_parents = 1;
@@ -148,16 +151,48 @@ struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
const char *name, unsigned int index, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, index,
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, index,
flags, mult, div, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_index);
+/**
+ * devm_clk_hw_register_fixed_factor_parent_hw - Register a fixed factor clock with
+ * pointer to parent clock
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: fixed factor flags
+ * @mult: multiplier
+ * @div: divider
+ *
+ * Return: Pointer to fixed factor clk_hw structure that was registered or
+ * an error pointer.
+ */
+struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, parent_hw,
+ -1, flags, mult, div, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_parent_hw);
+
+struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL,
+ parent_hw, -1, flags, mult, div,
+ false);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_parent_hw);
+
struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
flags, mult, div, false);
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
@@ -204,22 +239,16 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
flags, mult, div, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor);
#ifdef CONFIG_OF
-static const struct of_device_id set_rate_parent_matches[] = {
- { .compatible = "allwinner,sun4i-a10-pll3-2x-clk" },
- { /* Sentinel */ },
-};
-
static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
{
struct clk_hw *hw;
const char *clk_name = node->name;
- unsigned long flags = 0;
u32 div, mult;
int ret;
@@ -237,11 +266,8 @@ static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
of_property_read_string(node, "clock-output-names", &clk_name);
- if (of_match_node(set_rate_parent_matches, node))
- flags |= CLK_SET_RATE_PARENT;
-
- hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, 0,
- flags, mult, div, false);
+ hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, NULL, 0,
+ 0, mult, div, false);
if (IS_ERR(hw)) {
/*
* Clear OF_POPULATED flag so that clock registration can be
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index b4f8852201cb..60007b508590 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys HSDK SDP Generic PLL clock driver
*
* Copyright (C) 2017 Synopsys
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index d1535ac13e89..81cb90955d68 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev,
hw_data->hws[i] =
devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
- "lan966x", 0, base,
+ "lan966x", 0, gate_base,
clk_gate_desc[idx].bit_idx,
0, &clk_gate_lock);
diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c
index 58428d0043fd..3786a0153ad1 100644
--- a/drivers/clk/clk-moxart.c
+++ b/drivers/clk/clk-moxart.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* MOXA ART SoCs clock driver.
*
* Copyright (C) 2013 Jonas Jensen
*
* Jonas Jensen <jonas.jensen@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f00d4c1158d7..7fc191c15507 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4279,54 +4279,6 @@ int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register);
-static int devm_clk_match(struct device *dev, void *res, void *data)
-{
- struct clk *c = res;
- if (WARN_ON(!c))
- return 0;
- return c == data;
-}
-
-static int devm_clk_hw_match(struct device *dev, void *res, void *data)
-{
- struct clk_hw *hw = res;
-
- if (WARN_ON(!hw))
- return 0;
- return hw == data;
-}
-
-/**
- * devm_clk_unregister - resource managed clk_unregister()
- * @dev: device that is unregistering the clock data
- * @clk: clock to unregister
- *
- * Deallocate a clock allocated with devm_clk_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
- */
-void devm_clk_unregister(struct device *dev, struct clk *clk)
-{
- WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
-}
-EXPORT_SYMBOL_GPL(devm_clk_unregister);
-
-/**
- * devm_clk_hw_unregister - resource managed clk_hw_unregister()
- * @dev: device that is unregistering the hardware-specific clock data
- * @hw: link to hardware-specific clock data
- *
- * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
- */
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
-{
- WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
- hw));
-}
-EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
-
static void devm_clk_release(struct device *dev, void *res)
{
clk_put(*(struct clk **)res);
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 71c102d950ab..a2aaa14fc1ae 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -64,10 +64,13 @@ struct clk_fracn_gppll {
* Fout = Fvco / (rdiv * odiv)
*/
static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
- PLL_FRACN_GP(650000000U, 81, 0, 0, 0, 3),
- PLL_FRACN_GP(594000000U, 198, 0, 0, 0, 8),
- PLL_FRACN_GP(560000000U, 70, 0, 0, 0, 3),
- PLL_FRACN_GP(400000000U, 50, 0, 0, 0, 3),
+ PLL_FRACN_GP(650000000U, 81, 0, 1, 0, 3),
+ PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
+ PLL_FRACN_GP(560000000U, 70, 0, 1, 0, 3),
+ PLL_FRACN_GP(498000000U, 83, 0, 1, 0, 4),
+ PLL_FRACN_GP(484000000U, 121, 0, 1, 0, 6),
+ PLL_FRACN_GP(445333333U, 167, 0, 1, 0, 9),
+ PLL_FRACN_GP(400000000U, 50, 0, 1, 0, 3),
PLL_FRACN_GP(393216000U, 81, 92, 100, 0, 5)
};
@@ -131,18 +134,7 @@ static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned lon
mfi = FIELD_GET(PLL_MFI_MASK, pll_div);
rdiv = FIELD_GET(PLL_RDIV_MASK, pll_div);
- rdiv = rdiv + 1;
odiv = FIELD_GET(PLL_ODIV_MASK, pll_div);
- switch (odiv) {
- case 0:
- odiv = 2;
- break;
- case 1:
- odiv = 3;
- break;
- default:
- break;
- }
/*
* Sometimes, the recalculated rate has deviation due to
@@ -160,6 +152,20 @@ static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned lon
if (rate)
return (unsigned long)rate;
+ if (!rdiv)
+ rdiv = rdiv + 1;
+
+ switch (odiv) {
+ case 0:
+ odiv = 2;
+ break;
+ case 1:
+ odiv = 3;
+ break;
+ default:
+ break;
+ }
+
/* Fvco = Fref * (MFI + MFN / MFD) */
fvco = fvco * mfi * mfd + fvco * mfn;
do_div(fvco, mfd * rdiv * odiv);
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index edcc87661d1f..f5c9fa40491c 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -150,7 +150,7 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_A55_GATE, "a55", "a55_root", 0x8000, },
/* M33 critical clk for system run */
{ IMX93_CLK_CM33_GATE, "cm33", "m33_root", 0x8040, CLK_IS_CRITICAL },
- { IMX93_CLK_ADC1_GATE, "adc1", "osc_24m", 0x82c0, },
+ { IMX93_CLK_ADC1_GATE, "adc1", "adc_root", 0x82c0, },
{ IMX93_CLK_WDOG1_GATE, "wdog1", "osc_24m", 0x8300, },
{ IMX93_CLK_WDOG2_GATE, "wdog2", "osc_24m", 0x8340, },
{ IMX93_CLK_WDOG3_GATE, "wdog3", "osc_24m", 0x8380, },
@@ -160,7 +160,7 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
{ IMX93_CLK_MU_A_GATE, "mu_a", "bus_aon_root", 0x84c0, },
{ IMX93_CLK_MU_B_GATE, "mu_b", "bus_aon_root", 0x8500, },
- { IMX93_CLK_EDMA1_GATE, "edma1", "wakeup_axi_root", 0x8540, },
+ { IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi", "flexspi_root", 0x8640, },
{ IMX93_CLK_GPIO1_GATE, "gpio1", "m33_root", 0x8880, },
@@ -219,7 +219,7 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_LCDIF_GATE, "lcdif", "media_apb_root", 0x9640, },
{ IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, },
{ IMX93_CLK_ISI_GATE, "isi", "media_apb_root", 0x96c0, },
- { IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_apb_root", 0x9700, },
+ { IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_axi_root", 0x9700, },
{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root", 0x9a00, },
{ IMX93_CLK_USB_TEST_60M_GATE, "usb_test_60m", "hsio_usb_test_60m_root", 0x9a40, },
{ IMX93_CLK_HSIO_TROUT_24M_GATE, "hsio_trout_24m", "osc_24m", 0x9a80, },
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 7e1b136e71ae..d4b4e74e22da 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SCI Clock driver for keystone based devices
*
* Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
#include <linux/err.h>
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index 47c2289f3d1d..edf1e2ed2b59 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -36,6 +36,14 @@ static const struct mtk_gate eth_clks[] = {
GATE_ETH(CLK_ETHSYS_CRYPTO, "crypto_clk", "ethif_sel", 29),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static const struct of_device_id of_match_clk_mt2701_eth[] = {
{ .compatible = "mediatek,mt2701-ethsys", },
{}
@@ -58,7 +66,7 @@ static int clk_mt2701_eth_probe(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index 79929ed37f83..1458109d99d9 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -35,6 +35,14 @@ static const struct mtk_gate g3d_clks[] = {
GATE_G3D(CLK_G3DSYS_CORE, "g3d_core", "mfg_sel", 0),
};
+static u16 rst_ofs[] = { 0xc, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -52,7 +60,7 @@ static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0xc);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index 1aa36cb93ad0..434cbbe8c037 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -33,6 +33,14 @@ static const struct mtk_gate hif_clks[] = {
GATE_HIF(CLK_HIFSYS_PCIE2, "pcie2_clk", "ethpll_500m_ck", 26),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static const struct of_device_id of_match_clk_mt2701_hif[] = {
{ .compatible = "mediatek,mt2701-hifsys", },
{}
@@ -57,7 +65,7 @@ static int clk_mt2701_hif_probe(struct platform_device *pdev)
return r;
}
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return 0;
}
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 04ba356db2d7..9b442af37e67 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -735,6 +735,24 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = {
FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ },
+};
+
static struct clk_hw_onecell_data *infra_clk_data;
static void __init mtk_infrasys_init_early(struct device_node *node)
@@ -787,7 +805,7 @@ static int mtk_infrasys_init(struct platform_device *pdev)
if (r)
return r;
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
return 0;
}
@@ -910,7 +928,7 @@ static int mtk_pericfg_init(struct platform_device *pdev)
if (r)
return r;
- mtk_register_reset_controller(node, 2, 0x0);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
return 0;
}
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 410b059727ea..56980dd6c2ea 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -1258,6 +1258,24 @@ static const struct mtk_pll_data plls[] = {
0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infra */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* peri */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ },
+};
+
static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -1361,7 +1379,7 @@ static int clk_mt2712_infra_probe(struct platform_device *pdev)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
return r;
}
@@ -1383,7 +1401,7 @@ static int clk_mt2712_peri_probe(struct platform_device *pdev)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index b12d48705496..43de0477d5d9 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -65,6 +65,14 @@ static const struct mtk_gate sgmii_clks[] = {
"ssusb_cdr_fb", 5),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7622_ethsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -82,7 +90,7 @@ static int clk_mt7622_ethsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index 58728e35e80a..67e96231dd25 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -76,6 +76,14 @@ static const struct mtk_gate pcie_clks[] = {
GATE_PCIE(CLK_SATA_PM_EN, "sata_pm_en", "univpll2_d4", 30),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -93,7 +101,7 @@ static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
@@ -115,7 +123,7 @@ static int clk_mt7622_pciesys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index e4a5e5230861..3b55f8641fae 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -610,6 +610,24 @@ static struct mtk_composite peri_muxes[] = {
MUX(CLK_PERIBUS_SEL, "peribus_ck_sel", peribus_ck_parents, 0x05C, 0, 1),
};
+static u16 infrasys_rst_ofs[] = { 0x30, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ },
+};
+
static int mtk_topckgen_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -663,7 +681,7 @@ static int mtk_infrasys_init(struct platform_device *pdev)
if (r)
return r;
- mtk_register_reset_controller(node, 1, 0x30);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
return 0;
}
@@ -714,7 +732,7 @@ static int mtk_pericfg_init(struct platform_device *pdev)
clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk);
- mtk_register_reset_controller(node, 2, 0x0);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
return 0;
}
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
index c49fd732c9b2..282dd6559465 100644
--- a/drivers/clk/mediatek/clk-mt7629-eth.c
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -76,6 +76,14 @@ static const struct mtk_gate sgmii_clks[2][4] = {
}
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7629_ethsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -92,7 +100,7 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index acaa97fda331..0c8b9e139789 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -71,6 +71,14 @@ static const struct mtk_gate pcie_clks[] = {
GATE_PCIE(CLK_PCIE_P0_PIPE_EN, "pcie_p0_pipe_en", "pcie0_pipe_en", 23),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -88,7 +96,7 @@ static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
@@ -110,7 +118,7 @@ static int clk_mt7629_pciesys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 9ef524b44862..b68888a034c4 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -514,6 +514,24 @@ static const struct mtk_composite peri_clks[] __initconst = {
MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ }
+};
+
static void __init mtk_topckgen_init(struct device_node *node)
{
struct clk_hw_onecell_data *clk_data;
@@ -559,7 +577,7 @@ static void __init mtk_infrasys_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller(node, &clk_rst_desc[0]);
}
CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8135-infracfg", mtk_infrasys_init);
@@ -587,7 +605,7 @@ static void __init mtk_pericfg_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0);
+ mtk_register_reset_controller(node, &clk_rst_desc[1]);
}
CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8135-pericfg", mtk_pericfg_init);
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 0929db330852..b8529ee7199d 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -819,6 +819,24 @@ static const struct mtk_gate venclt_clks[] __initconst = {
GATE_VENCLT(CLK_VENCLT_CKE1, "venclt_cke1", "venclt_sel", 4),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ }
+};
+
static struct clk_hw_onecell_data *mt8173_top_clk_data __initdata;
static struct clk_hw_onecell_data *mt8173_pll_clk_data __initdata;
@@ -882,7 +900,7 @@ static void __init mtk_infrasys_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller(node, &clk_rst_desc[0]);
}
CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8173-infracfg", mtk_infrasys_init);
@@ -910,7 +928,7 @@ static void __init mtk_pericfg_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0);
+ mtk_register_reset_controller(node, &clk_rst_desc[1]);
}
CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index b5c17988c337..8512101e1189 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -18,9 +18,6 @@
#include <dt-bindings/clock/mt8183-clk.h>
-/* Infra global controller reset set register */
-#define INFRA_RST0_SET_OFFSET 0x120
-
static DEFINE_SPINLOCK(mt8183_clk_lock);
static const struct mtk_fixed_clk top_fixed_clks[] = {
@@ -1153,6 +1150,19 @@ static const struct mtk_pll_data plls[] = {
0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4),
};
+static u16 infra_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_rst_ofs),
+};
+
static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -1240,7 +1250,7 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev)
return r;
}
- mtk_register_reset_controller_set_clr(node, 4, INFRA_RST0_SET_OFFSET);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
index 2a7adc25abaa..df2a6bd1aefa 100644
--- a/drivers/clk/mediatek/clk-mt8186-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt8186-clk.h>
+#include <dt-bindings/reset/mt8186-resets.h>
#include "clk-gate.h"
#include "clk-mtk.h"
@@ -191,9 +192,31 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO3(CLK_INFRA_AO_FLASHIF_66M, "infra_ao_flashif_66m", "top_axi", 29),
};
+static u16 infra_ao_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+ INFRA_RST4_SET_OFFSET,
+};
+
+static u16 infra_ao_idx_map[] = {
+ [MT8186_INFRA_THERMAL_CTRL_RST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8186_INFRA_PTP_CTRL_RST] = 1 * RST_NR_PER_BANK + 0,
+};
+
+static struct mtk_clk_rst_desc infra_ao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
static const struct mtk_clk_desc infra_ao_desc = {
.clks = infra_ao_clks,
.num_clks = ARRAY_SIZE(infra_ao_clks),
+ .rst_desc = &infra_ao_rst_desc,
};
static const struct of_device_id of_match_clk_mt8186_infra_ao[] = {
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
index 87c3b79b79cf..635f7a0b629a 100644
--- a/drivers/clk/mediatek/clk-mt8192-msdc.c
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -12,28 +12,15 @@
#include <dt-bindings/clock/mt8192-clk.h>
-static const struct mtk_gate_regs msdc_cg_regs = {
- .set_ofs = 0xb4,
- .clr_ofs = 0xb4,
- .sta_ofs = 0xb4,
-};
-
static const struct mtk_gate_regs msdc_top_cg_regs = {
.set_ofs = 0x0,
.clr_ofs = 0x0,
.sta_ofs = 0x0,
};
-#define GATE_MSDC(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &msdc_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-
#define GATE_MSDC_TOP(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &msdc_top_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-static const struct mtk_gate msdc_clks[] = {
- GATE_MSDC(CLK_MSDC_AXI_WRAP, "msdc_axi_wrap", "axi_sel", 22),
-};
-
static const struct mtk_gate msdc_top_clks[] = {
GATE_MSDC_TOP(CLK_MSDC_TOP_AES_0P, "msdc_top_aes_0p", "aes_msdcfde_sel", 0),
GATE_MSDC_TOP(CLK_MSDC_TOP_SRC_0P, "msdc_top_src_0p", "infra_msdc0_src", 1),
@@ -52,11 +39,6 @@ static const struct mtk_gate msdc_top_clks[] = {
GATE_MSDC_TOP(CLK_MSDC_TOP_AHB2AXI_BRG_AXI, "msdc_top_ahb2axi_brg_axi", "axi_sel", 14),
};
-static const struct mtk_clk_desc msdc_desc = {
- .clks = msdc_clks,
- .num_clks = ARRAY_SIZE(msdc_clks),
-};
-
static const struct mtk_clk_desc msdc_top_desc = {
.clks = msdc_top_clks,
.num_clks = ARRAY_SIZE(msdc_top_clks),
@@ -64,9 +46,6 @@ static const struct mtk_clk_desc msdc_top_desc = {
static const struct of_device_id of_match_clk_mt8192_msdc[] = {
{
- .compatible = "mediatek,mt8192-msdc",
- .data = &msdc_desc,
- }, {
.compatible = "mediatek,mt8192-msdc_top",
.data = &msdc_top_desc,
}, {
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index dda211b7a745..ebbd2798d9a3 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -18,6 +18,7 @@
#include "clk-pll.h"
#include <dt-bindings/clock/mt8192-clk.h>
+#include <dt-bindings/reset/mt8192-resets.h>
static DEFINE_SPINLOCK(mt8192_clk_lock);
@@ -1114,6 +1115,30 @@ static const struct mtk_gate top_clks[] = {
GATE_TOP(CLK_TOP_SSUSB_PHY_REF, "ssusb_phy_ref", "clk26m", 25),
};
+static u16 infra_ao_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+ INFRA_RST4_SET_OFFSET,
+};
+
+static u16 infra_ao_idx_map[] = {
+ [MT8192_INFRA_RST0_THERM_CTRL_SWRST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8192_INFRA_RST2_PEXTP_PHY_SWRST] = 2 * RST_NR_PER_BANK + 15,
+ [MT8192_INFRA_RST3_THERM_CTRL_PTP_SWRST] = 3 * RST_NR_PER_BANK + 5,
+ [MT8192_INFRA_RST4_PCIE_TOP_SWRST] = 4 * RST_NR_PER_BANK + 1,
+ [MT8192_INFRA_RST4_THERM_CTRL_MCU_SWRST] = 4 * RST_NR_PER_BANK + 12,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
#define MT8192_PLL_FMAX (3800UL * MHZ)
#define MT8192_PLL_FMIN (1500UL * MHZ)
#define MT8192_INTEGER_BITS 8
@@ -1240,6 +1265,10 @@ static int clk_mt8192_infra_probe(struct platform_device *pdev)
if (r)
goto free_clk_data;
+ r = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ if (r)
+ goto free_clk_data;
+
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto free_clk_data;
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index 8ebe3b9415c4..97657f255618 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -7,6 +7,7 @@
#include "clk-mtk.h"
#include <dt-bindings/clock/mt8195-clk.h>
+#include <dt-bindings/reset/mt8195-resets.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
@@ -182,9 +183,32 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO4(CLK_INFRA_AO_PERI_UFS_MEM_SUB, "infra_ao_peri_ufs_mem_sub", "mem_466m", 31),
};
+static u16 infra_ao_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+ INFRA_RST4_SET_OFFSET,
+};
+
+static u16 infra_ao_idx_map[] = {
+ [MT8195_INFRA_RST0_THERM_CTRL_SWRST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST] = 3 * RST_NR_PER_BANK + 5,
+ [MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST] = 4 * RST_NR_PER_BANK + 10,
+};
+
+static struct mtk_clk_rst_desc infra_ao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
static const struct mtk_clk_desc infra_ao_desc = {
.clks = infra_ao_clks,
.num_clks = ARRAY_SIZE(infra_ao_clks),
+ .rst_desc = &infra_ao_rst_desc,
};
static const struct of_device_id of_match_clk_mt8195_infra_ao[] = {
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index b9188000ab3c..05a188c62119 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -444,6 +444,13 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, clk_data);
+ if (mcd->rst_desc) {
+ r = mtk_register_reset_controller_with_dev(&pdev->dev,
+ mcd->rst_desc);
+ if (r)
+ goto unregister_clks;
+ }
+
return r;
unregister_clks:
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index adb1304d35d4..1b95c484d5aa 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -13,6 +13,8 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include "reset.h"
+
#define MAX_MUX_GATE_BIT 31
#define INVALID_MUX_GATE_BIT (MAX_MUX_GATE_BIT + 1)
@@ -187,15 +189,10 @@ void mtk_free_clk_data(struct clk_hw_onecell_data *clk_data);
struct clk_hw *mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg);
-void mtk_register_reset_controller(struct device_node *np,
- unsigned int num_regs, int regofs);
-
-void mtk_register_reset_controller_set_clr(struct device_node *np,
- unsigned int num_regs, int regofs);
-
struct mtk_clk_desc {
const struct mtk_gate *clks;
size_t num_clks;
+ const struct mtk_clk_rst_desc *rst_desc;
};
int mtk_clk_simple_probe(struct platform_device *pdev);
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index bcec4b89f449..179505549a7c 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -8,55 +8,39 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <linux/slab.h>
-#include "clk-mtk.h"
+#include "reset.h"
-struct mtk_reset {
- struct regmap *regmap;
- int regofs;
- struct reset_controller_dev rcdev;
-};
-
-static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev,
- unsigned long id)
+static inline struct mtk_clk_rst_data *to_mtk_clk_rst_data(struct reset_controller_dev *rcdev)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
- unsigned int reg = data->regofs + ((id / 32) << 4);
-
- return regmap_write(data->regmap, reg, 1);
+ return container_of(rcdev, struct mtk_clk_rst_data, rcdev);
}
-static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int mtk_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool deassert)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
- unsigned int reg = data->regofs + ((id / 32) << 4) + 0x4;
+ struct mtk_clk_rst_data *data = to_mtk_clk_rst_data(rcdev);
+ unsigned int val = deassert ? 0 : ~0;
- return regmap_write(data->regmap, reg, 1);
+ return regmap_update_bits(data->regmap,
+ data->desc->rst_bank_ofs[id / RST_NR_PER_BANK],
+ BIT(id % RST_NR_PER_BANK), val);
}
static int mtk_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+ unsigned long id)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
-
- return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2),
- BIT(id % 32), ~0);
+ return mtk_reset_update(rcdev, id, false);
}
static int mtk_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
+ unsigned long id)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
-
- return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2),
- BIT(id % 32), 0);
+ return mtk_reset_update(rcdev, id, true);
}
-static int mtk_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int mtk_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
int ret;
@@ -67,8 +51,32 @@ static int mtk_reset(struct reset_controller_dev *rcdev,
return mtk_reset_deassert(rcdev, id);
}
+static int mtk_reset_update_set_clr(struct reset_controller_dev *rcdev,
+ unsigned long id, bool deassert)
+{
+ struct mtk_clk_rst_data *data = to_mtk_clk_rst_data(rcdev);
+ unsigned int deassert_ofs = deassert ? 0x4 : 0;
+
+ return regmap_write(data->regmap,
+ data->desc->rst_bank_ofs[id / RST_NR_PER_BANK] +
+ deassert_ofs,
+ BIT(id % RST_NR_PER_BANK));
+}
+
+static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return mtk_reset_update_set_clr(rcdev, id, false);
+}
+
+static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return mtk_reset_update_set_clr(rcdev, id, true);
+}
+
static int mtk_reset_set_clr(struct reset_controller_dev *rcdev,
- unsigned long id)
+ unsigned long id)
{
int ret;
@@ -90,51 +98,135 @@ static const struct reset_control_ops mtk_reset_ops_set_clr = {
.reset = mtk_reset_set_clr,
};
-static void mtk_register_reset_controller_common(struct device_node *np,
- unsigned int num_regs, int regofs,
- const struct reset_control_ops *reset_ops)
+static int reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct mtk_clk_rst_data *data = to_mtk_clk_rst_data(rcdev);
+
+ if (reset_spec->args[0] >= rcdev->nr_resets ||
+ reset_spec->args[0] >= data->desc->rst_idx_map_nr)
+ return -EINVAL;
+
+ return data->desc->rst_idx_map[reset_spec->args[0]];
+}
+
+int mtk_register_reset_controller(struct device_node *np,
+ const struct mtk_clk_rst_desc *desc)
{
- struct mtk_reset *data;
- int ret;
struct regmap *regmap;
+ const struct reset_control_ops *rcops = NULL;
+ struct mtk_clk_rst_data *data;
+ int ret;
+
+ if (!desc) {
+ pr_err("mtk clock reset desc is NULL\n");
+ return -EINVAL;
+ }
+
+ switch (desc->version) {
+ case MTK_RST_SIMPLE:
+ rcops = &mtk_reset_ops;
+ break;
+ case MTK_RST_SET_CLR:
+ rcops = &mtk_reset_ops_set_clr;
+ break;
+ default:
+ pr_err("Unknown reset version %d\n", desc->version);
+ return -EINVAL;
+ }
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap)) {
pr_err("Cannot find regmap for %pOF: %pe\n", np, regmap);
- return;
+ return -EINVAL;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- return;
+ return -ENOMEM;
+ data->desc = desc;
data->regmap = regmap;
- data->regofs = regofs;
data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = num_regs * 32;
- data->rcdev.ops = reset_ops;
+ data->rcdev.ops = rcops;
data->rcdev.of_node = np;
+ if (data->desc->rst_idx_map_nr > 0) {
+ data->rcdev.of_reset_n_cells = 1;
+ data->rcdev.nr_resets = desc->rst_idx_map_nr;
+ data->rcdev.of_xlate = reset_xlate;
+ } else {
+ data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK;
+ }
+
ret = reset_controller_register(&data->rcdev);
if (ret) {
pr_err("could not register reset controller: %d\n", ret);
kfree(data);
- return;
+ return ret;
}
-}
-void mtk_register_reset_controller(struct device_node *np,
- unsigned int num_regs, int regofs)
-{
- mtk_register_reset_controller_common(np, num_regs, regofs,
- &mtk_reset_ops);
+ return 0;
}
-void mtk_register_reset_controller_set_clr(struct device_node *np,
- unsigned int num_regs, int regofs)
+int mtk_register_reset_controller_with_dev(struct device *dev,
+ const struct mtk_clk_rst_desc *desc)
{
- mtk_register_reset_controller_common(np, num_regs, regofs,
- &mtk_reset_ops_set_clr);
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ const struct reset_control_ops *rcops = NULL;
+ struct mtk_clk_rst_data *data;
+ int ret;
+
+ if (!desc) {
+ dev_err(dev, "mtk clock reset desc is NULL\n");
+ return -EINVAL;
+ }
+
+ switch (desc->version) {
+ case MTK_RST_SIMPLE:
+ rcops = &mtk_reset_ops;
+ break;
+ case MTK_RST_SET_CLR:
+ rcops = &mtk_reset_ops_set_clr;
+ break;
+ default:
+ dev_err(dev, "Unknown reset version %d\n", desc->version);
+ return -EINVAL;
+ }
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Cannot find regmap %pe\n", regmap);
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->desc = desc;
+ data->regmap = regmap;
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.ops = rcops;
+ data->rcdev.of_node = np;
+ data->rcdev.dev = dev;
+
+ if (data->desc->rst_idx_map_nr > 0) {
+ data->rcdev.of_reset_n_cells = 1;
+ data->rcdev.nr_resets = desc->rst_idx_map_nr;
+ data->rcdev.of_xlate = reset_xlate;
+ } else {
+ data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK;
+ }
+
+ ret = devm_reset_controller_register(dev, &data->rcdev);
+ if (ret) {
+ dev_err(dev, "could not register reset controller: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/reset.h b/drivers/clk/mediatek/reset.h
new file mode 100644
index 000000000000..6a58a3d59165
--- /dev/null
+++ b/drivers/clk/mediatek/reset.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __DRV_CLK_MTK_RESET_H
+#define __DRV_CLK_MTK_RESET_H
+
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+#define RST_NR_PER_BANK 32
+
+/* Infra global controller reset set register */
+#define INFRA_RST0_SET_OFFSET 0x120
+#define INFRA_RST1_SET_OFFSET 0x130
+#define INFRA_RST2_SET_OFFSET 0x140
+#define INFRA_RST3_SET_OFFSET 0x150
+#define INFRA_RST4_SET_OFFSET 0x730
+
+/**
+ * enum mtk_reset_version - Version of MediaTek clock reset controller.
+ * @MTK_RST_SIMPLE: Use the same registers for bit set and clear.
+ * @MTK_RST_SET_CLR: Use separate registers for bit set and clear.
+ * @MTK_RST_MAX: Total quantity of version for MediaTek clock reset controller.
+ */
+enum mtk_reset_version {
+ MTK_RST_SIMPLE = 0,
+ MTK_RST_SET_CLR,
+ MTK_RST_MAX,
+};
+
+/**
+ * struct mtk_clk_rst_desc - Description of MediaTek clock reset.
+ * @version: Reset version which is defined in enum mtk_reset_version.
+ * @rst_bank_ofs: Pointer to an array containing base offsets of the reset register.
+ * @rst_bank_nr: Quantity of reset bank.
+ * @rst_idx_map:Pointer to an array containing ids if input argument is index.
+ * This array is not necessary if our input argument does not mean index.
+ * @rst_idx_map_nr: Quantity of reset index map.
+ */
+struct mtk_clk_rst_desc {
+ enum mtk_reset_version version;
+ u16 *rst_bank_ofs;
+ u32 rst_bank_nr;
+ u16 *rst_idx_map;
+ u32 rst_idx_map_nr;
+};
+
+/**
+ * struct mtk_clk_rst_data - Data of MediaTek clock reset controller.
+ * @regmap: Pointer to base address of reset register address.
+ * @rcdev: Reset controller device.
+ * @desc: Pointer to description of the reset controller.
+ */
+struct mtk_clk_rst_data {
+ struct regmap *regmap;
+ struct reset_controller_dev rcdev;
+ const struct mtk_clk_rst_desc *desc;
+};
+
+/**
+ * mtk_register_reset_controller - Register MediaTek clock reset controller
+ * @np: Pointer to device node.
+ * @desc: Constant pointer to description of clock reset.
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int mtk_register_reset_controller(struct device_node *np,
+ const struct mtk_clk_rst_desc *desc);
+
+/**
+ * mtk_register_reset_controller - Register mediatek clock reset controller with device
+ * @np: Pointer to device.
+ * @desc: Constant pointer to description of clock reset.
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int mtk_register_reset_controller_with_dev(struct device *dev,
+ const struct mtk_clk_rst_desc *desc);
+
+#endif /* __DRV_CLK_MTK_RESET_H */
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index bfe36bd41339..5016682e47c8 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -1657,35 +1657,6 @@ static struct clk_regmap *const sm1_clk_regmaps[] = {
&sm1_sysclk_b_en,
};
-static int devm_clk_get_enable(struct device *dev, char *id)
-{
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get(dev, id);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err_probe(dev, ret, "failed to get %s", id);
- return ret;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to enable %s", id);
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
- if (ret) {
- dev_err(dev, "failed to add reset action on %s", id);
- return ret;
- }
-
- return 0;
-}
-
struct axg_audio_reset_data {
struct reset_controller_dev rstc;
struct regmap *map;
@@ -1787,6 +1758,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
struct regmap *map;
void __iomem *regs;
struct clk_hw *hw;
+ struct clk *clk;
int ret, i;
data = of_device_get_match_data(dev);
@@ -1804,9 +1776,9 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
/* Get the mandatory peripheral clock */
- ret = devm_clk_get_enable(dev, "pclk");
- if (ret)
- return ret;
+ clk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
ret = device_reset(dev);
if (ret) {
diff --git a/drivers/clk/mmp/clk-apbc.c b/drivers/clk/mmp/clk-apbc.c
index fb294ada0b03..23c43a46604e 100644
--- a/drivers/clk/mmp/clk-apbc.c
+++ b/drivers/clk/mmp/clk-apbc.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mmp APB clock operation source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mmp/clk-apmu.c b/drivers/clk/mmp/clk-apmu.c
index b7ce8f52026e..9313428b083a 100644
--- a/drivers/clk/mmp/clk-apmu.c
+++ b/drivers/clk/mmp/clk-apmu.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mmp AXI peripharal clock operation source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 48f592bd633d..1b90867b60c4 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mmp factor clock operation source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index 1755916ddef2..350eeb3e9e25 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mmp gate clock operation source file
*
* Copyright (C) 2014 Marvell
* Chao Xie <chao.xie@marvell.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index 7a351ec65564..454d131f475e 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mmp mix(div and mux) clock operation source file
*
* Copyright (C) 2014 Marvell
* Chao Xie <chao.xie@marvell.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index 7460031714da..aabacfa10158 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mmp2 clock framework source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 50a780274ba0..bcf60f43aa13 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mmp2 clock framework source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
* Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index f110c02e83cb..48dfb18b490e 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* pxa168 clock framework source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c
index 998fc4207b0e..2508a0d795f8 100644
--- a/drivers/clk/mmp/clk-of-pxa1928.c
+++ b/drivers/clk/mmp/clk-of-pxa1928.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* pxa1928 clock framework source file
*
@@ -7,10 +8,6 @@
* Based on drivers/clk/mmp/clk-of-mmp2.c:
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/io.h>
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index 1dcabe95cb67..4d15bac987eb 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* pxa910 clock framework source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index b351039cac09..8a9b8fb3a465 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* pxa168 clock framework source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index f254ceff3ea7..9fcd76316d7e 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* pxa910 clock framework source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index f2e171a01fb4..ddb28b38f549 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Clk driver for NXP LPC18xx/LPC43xx Clock Control Unit (CCU)
*
* Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index c23ac463ab0f..f253ef1996b1 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Clk driver for NXP LPC18xx/LPC43xx Clock Generation Unit (CGU)
*
* Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-creg.c b/drivers/clk/nxp/clk-lpc18xx-creg.c
index c6e802e7e6ec..3d3982e9c661 100644
--- a/drivers/clk/nxp/clk-lpc18xx-creg.c
+++ b/drivers/clk/nxp/clk-lpc18xx-creg.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Clk driver for NXP LPC18xx/43xx Configuration Registers (CREG)
*
* Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index bc4dcf356d82..1cf1ef70e347 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -166,6 +166,7 @@ config IPQ_LCC_806X
config IPQ_GCC_8074
tristate "IPQ8074 Global Clock Controller"
+ select QCOM_GDSC
help
Support for global clock controller on ipq8074 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -608,6 +609,13 @@ config SM_CAMCC_8250
Support for the camera clock controller on SM8250 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_CAMCC_8450
+ tristate "SM8450 Camera Clock Controller"
+ select SM_GCC_8450
+ help
+ Support for the camera clock controller on SM8450 devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_DISPCC_6125
tristate "SM6125 Display Clock Controller"
depends on SM_GCC_6125
@@ -618,11 +626,11 @@ config SM_DISPCC_6125
splash screen
config SM_DISPCC_8250
- tristate "SM8150 and SM8250 Display Clock Controller"
- depends on SM_GCC_8150 || SM_GCC_8250
+ tristate "SM8150/SM8250/SM8350 Display Clock Controller"
+ depends on SM_GCC_8150 || SM_GCC_8250 || SM_GCC_8350
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8150 and SM8250 devices.
+ SM8150/SM8250/SM8350 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
@@ -712,6 +720,14 @@ config SM_GPUCC_8250
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_GPUCC_8350
+ tristate "SM8350 Graphics Clock Controller"
+ select SM_GCC_8350
+ help
+ Support for the graphics clock controller on SM8350 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
select SM_GCC_8150
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 36789f5233ef..fbcf04073f07 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -11,6 +11,7 @@ clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-y += clk-regmap-phy-mux.o
clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
+obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
@@ -101,6 +103,7 @@ obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
+obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index be3f95326965..27d44188a7ab 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1534,6 +1534,8 @@ static struct clk_branch cam_cc_sys_tmr_clk = {
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
.pd = {
@@ -1567,6 +1569,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1576,6 +1579,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
index 439eaafdcc86..9b32c56a5bc5 100644
--- a/drivers/clk/qcom/camcc-sm8250.c
+++ b/drivers/clk/qcom/camcc-sm8250.c
@@ -2205,6 +2205,8 @@ static struct clk_branch cam_cc_sleep_clk = {
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x7004,
.pd = {
@@ -2238,6 +2240,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2247,6 +2250,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2440,17 +2444,7 @@ static struct platform_driver cam_cc_sm8250_driver = {
},
};
-static int __init cam_cc_sm8250_init(void)
-{
- return platform_driver_register(&cam_cc_sm8250_driver);
-}
-subsys_initcall(cam_cc_sm8250_init);
-
-static void __exit cam_cc_sm8250_exit(void)
-{
- platform_driver_unregister(&cam_cc_sm8250_driver);
-}
-module_exit(cam_cc_sm8250_exit);
+module_platform_driver(cam_cc_sm8250_driver);
MODULE_DESCRIPTION("QTI CAMCC SM8250 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
new file mode 100644
index 000000000000..e3c09471dadf
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sm8450.c
@@ -0,0 +1,2856 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8450-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL7_OUT_EVEN,
+ P_CAM_CC_PLL8_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct pll_vco rivian_evo_vco[] = {
+ { 864000000, 1056000000, 0 },
+};
+
+static const struct clk_parent_data pll_parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x25,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x90008820,
+ .config_ctl_hi_val = 0x00890263,
+ .config_ctl_hi1_val = 0x00000217,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = rivian_evo_vco,
+ .num_vco = ARRAY_SIZE(rivian_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x5000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll7_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll7 = {
+ .offset = 0x7000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll7",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll7_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll7_out_even = {
+ .offset = 0x7000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll7_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll7_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll7_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll7.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll8_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll8 = {
+ .offset = 0x8000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll8_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = {
+ .offset = 0x8000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll8_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll8_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL8_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll8_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL7_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll7_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_8[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_9_ao[] = {
+ { .index = DT_BI_TCXO_AO, .name = "bi_tcxo_ao" },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x10050,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x13194,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x1312c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x13148,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x1104c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x150e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x15104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x15124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x1514c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
+ .cmd_rcgr = 0x1516c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
+ .cmd_rcgr = 0x1518c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csid_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csid_clk_src = {
+ .cmd_rcgr = 0x13174,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x13108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x11018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0x12018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = {
+ F(432000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_2_clk_src = {
+ .cmd_rcgr = 0x12064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0x13000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0x13024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_nps_clk_src[] = {
+ F(364000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(500000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_nps_clk_src = {
+ .cmd_rcgr = 0x1008c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ipe_nps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0x130dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_MAIN, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x15000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x1501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x15038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x15054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x15070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0x1508c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk6_clk_src = {
+ .cmd_rcgr = 0x150a8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk7_clk_src = {
+ .cmd_rcgr = 0x150c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_qdss_debug_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_qdss_debug_clk_src = {
+ .cmd_rcgr = 0x131bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_qdss_debug_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_0_clk_src[] = {
+ F(432000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_0_clk_src = {
+ .cmd_rcgr = 0x13064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_sfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_1_clk_src[] = {
+ F(432000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_1_clk_src = {
+ .cmd_rcgr = 0x130ac,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_sfe_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x13210,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_8,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x10034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x131f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_9,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_9_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_9_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_gdsc_clk = {
+ .halt_reg = 0x1320c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1320c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_gdsc_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x1004c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x10068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_fast_ahb_clk = {
+ .halt_reg = 0x10030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0x131ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0x131b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x13144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x13160,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x131f0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x131f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x13164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_bps_clk = {
+ .halt_reg = 0x10070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_bps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_fast_ahb_clk = {
+ .halt_reg = 0x1316c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1316c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_0_clk = {
+ .halt_reg = 0x11038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_1_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_2_clk = {
+ .halt_reg = 0x12084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_lite_clk = {
+ .halt_reg = 0x13020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_lite_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ipe_nps_clk = {
+ .halt_reg = 0x100ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ipe_nps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sbi_clk = {
+ .halt_reg = 0x100ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sbi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_0_clk = {
+ .halt_reg = 0x13084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_1_clk = {
+ .halt_reg = 0x130cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x150f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x1511c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1511c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x1513c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1513c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x15164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi4phytimer_clk = {
+ .halt_reg = 0x15184,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15184,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi4phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi5phytimer_clk = {
+ .halt_reg = 0x151a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi5phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_clk = {
+ .halt_reg = 0x1318c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1318c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_csiphy_rx_clk = {
+ .halt_reg = 0x15100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_csiphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x150fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x15120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x15140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15140,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x15168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy4_clk = {
+ .halt_reg = 0x15188,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15188,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy4_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy5_clk = {
+ .halt_reg = 0x151a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy5_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0x13128,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13128,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x13120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x11030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x1103c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1103c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_fast_ahb_clk = {
+ .halt_reg = 0x11048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0x12030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_fast_ahb_clk = {
+ .halt_reg = 0x12048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_clk = {
+ .halt_reg = 0x1207c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1207c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_dsp_clk = {
+ .halt_reg = 0x12088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_dsp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_fast_ahb_clk = {
+ .halt_reg = 0x12094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_ahb_clk = {
+ .halt_reg = 0x13048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0x13018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0x13044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0x1303c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1303c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_ahb_clk = {
+ .halt_reg = 0x100c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_clk = {
+ .halt_reg = 0x100a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_fast_ahb_clk = {
+ .halt_reg = 0x100c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_clk = {
+ .halt_reg = 0x100b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_fast_ahb_clk = {
+ .halt_reg = 0x100c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0x130f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x15018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x15034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x15050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x1506c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1506c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x15088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0x150a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk6_clk = {
+ .halt_reg = 0x150c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk7_clk = {
+ .halt_reg = 0x150dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_clk = {
+ .halt_reg = 0x131d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_qdss_debug_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_xo_clk = {
+ .halt_reg = 0x131d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_xo_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sbi_ahb_clk = {
+ .halt_reg = 0x100f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sbi_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sbi_clk = {
+ .halt_reg = 0x100e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sbi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_clk = {
+ .halt_reg = 0x1307c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1307c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
+ .halt_reg = 0x13090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_1_clk = {
+ .halt_reg = 0x130c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_1_fast_ahb_clk = {
+ .halt_reg = 0x130d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sleep_clk = {
+ .halt_reg = 0x13228,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13228,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *cam_cc_sm8450_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_BPS_FAST_AHB_CLK] = &cam_cc_bps_fast_ahb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPAS_BPS_CLK] = &cam_cc_cpas_bps_clk.clkr,
+ [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr,
+ [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr,
+ [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr,
+ [CAM_CC_CPAS_IFE_2_CLK] = &cam_cc_cpas_ife_2_clk.clkr,
+ [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr,
+ [CAM_CC_CPAS_IPE_NPS_CLK] = &cam_cc_cpas_ipe_nps_clk.clkr,
+ [CAM_CC_CPAS_SBI_CLK] = &cam_cc_cpas_sbi_clk.clkr,
+ [CAM_CC_CPAS_SFE_0_CLK] = &cam_cc_cpas_sfe_0_clk.clkr,
+ [CAM_CC_CPAS_SFE_1_CLK] = &cam_cc_cpas_sfe_1_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK] = &cam_cc_csi4phytimer_clk.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK_SRC] = &cam_cc_csi4phytimer_clk_src.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK] = &cam_cc_csi5phytimer_clk.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK_SRC] = &cam_cc_csi5phytimer_clk_src.clkr,
+ [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr,
+ [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr,
+ [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
+ [CAM_CC_CSIPHY5_CLK] = &cam_cc_csiphy5_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr,
+ [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr,
+ [CAM_CC_IFE_2_DSP_CLK] = &cam_cc_ife_2_dsp_clk.clkr,
+ [CAM_CC_IFE_2_FAST_AHB_CLK] = &cam_cc_ife_2_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_NPS_AHB_CLK] = &cam_cc_ipe_nps_ahb_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK] = &cam_cc_ipe_nps_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK_SRC] = &cam_cc_ipe_nps_clk_src.clkr,
+ [CAM_CC_IPE_NPS_FAST_AHB_CLK] = &cam_cc_ipe_nps_fast_ahb_clk.clkr,
+ [CAM_CC_IPE_PPS_CLK] = &cam_cc_ipe_pps_clk.clkr,
+ [CAM_CC_IPE_PPS_FAST_AHB_CLK] = &cam_cc_ipe_pps_fast_ahb_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr,
+ [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr,
+ [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr,
+ [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_PLL7] = &cam_cc_pll7.clkr,
+ [CAM_CC_PLL7_OUT_EVEN] = &cam_cc_pll7_out_even.clkr,
+ [CAM_CC_PLL8] = &cam_cc_pll8.clkr,
+ [CAM_CC_PLL8_OUT_EVEN] = &cam_cc_pll8_out_even.clkr,
+ [CAM_CC_QDSS_DEBUG_CLK] = &cam_cc_qdss_debug_clk.clkr,
+ [CAM_CC_QDSS_DEBUG_CLK_SRC] = &cam_cc_qdss_debug_clk_src.clkr,
+ [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
+ [CAM_CC_SBI_AHB_CLK] = &cam_cc_sbi_ahb_clk.clkr,
+ [CAM_CC_SBI_CLK] = &cam_cc_sbi_clk.clkr,
+ [CAM_CC_SFE_0_CLK] = &cam_cc_sfe_0_clk.clkr,
+ [CAM_CC_SFE_0_CLK_SRC] = &cam_cc_sfe_0_clk_src.clkr,
+ [CAM_CC_SFE_0_FAST_AHB_CLK] = &cam_cc_sfe_0_fast_ahb_clk.clkr,
+ [CAM_CC_SFE_1_CLK] = &cam_cc_sfe_1_clk.clkr,
+ [CAM_CC_SFE_1_CLK_SRC] = &cam_cc_sfe_1_clk_src.clkr,
+ [CAM_CC_SFE_1_FAST_AHB_CLK] = &cam_cc_sfe_1_fast_ahb_clk.clkr,
+ [CAM_CC_SLEEP_CLK] = &cam_cc_sleep_clk.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map cam_cc_sm8450_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x10000 },
+ [CAM_CC_ICP_BCR] = { 0x13104 },
+ [CAM_CC_IFE_0_BCR] = { 0x11000 },
+ [CAM_CC_IFE_1_BCR] = { 0x12000 },
+ [CAM_CC_IFE_2_BCR] = { 0x1204c },
+ [CAM_CC_IPE_0_BCR] = { 0x10074 },
+ [CAM_CC_QDSS_DEBUG_BCR] = { 0x131b8 },
+ [CAM_CC_SBI_BCR] = { 0x100cc },
+ [CAM_CC_SFE_0_BCR] = { 0x1304c },
+ [CAM_CC_SFE_1_BCR] = { 0x13094 },
+};
+
+static const struct regmap_config cam_cc_sm8450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1601c,
+ .fast_io = true,
+};
+
+static struct gdsc titan_top_gdsc;
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x10078,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc sbi_gdsc = {
+ .gdscr = 0x100d0,
+ .pd = {
+ .name = "sbi_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x11004,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0x12004,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_2_gdsc = {
+ .gdscr = 0x12050,
+ .pd = {
+ .name = "ife_2_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc sfe_0_gdsc = {
+ .gdscr = 0x13050,
+ .pd = {
+ .name = "sfe_0_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc sfe_1_gdsc = {
+ .gdscr = 0x13098,
+ .pd = {
+ .name = "sfe_1_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0x131dc,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc *cam_cc_sm8450_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [SBI_GDSC] = &sbi_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IFE_2_GDSC] = &ife_2_gdsc,
+ [SFE_0_GDSC] = &sfe_0_gdsc,
+ [SFE_1_GDSC] = &sfe_1_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_cc_desc cam_cc_sm8450_desc = {
+ .config = &cam_cc_sm8450_regmap_config,
+ .clks = cam_cc_sm8450_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sm8450_clocks),
+ .resets = cam_cc_sm8450_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sm8450_resets),
+ .gdscs = cam_cc_sm8450_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sm8450_gdscs),
+};
+
+static const struct of_device_id cam_cc_sm8450_match_table[] = {
+ { .compatible = "qcom,sm8450-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sm8450_match_table);
+
+static int cam_cc_sm8450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sm8450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+
+ return qcom_cc_really_probe(pdev, &cam_cc_sm8450_desc, regmap);
+}
+
+static struct platform_driver cam_cc_sm8450_driver = {
+ .probe = cam_cc_sm8450_probe,
+ .driver = {
+ .name = "camcc-sm8450",
+ .of_match_table = cam_cc_sm8450_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sm8450_driver);
+
+MODULE_DESCRIPTION("QCOM CAMCC SM8450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 4406cf609aae..b42684703fbb 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -154,6 +154,18 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x30,
[PLL_OFF_TEST_CTL_U1] = 0x34,
},
+ [CLK_ALPHA_PLL_TYPE_RIVIAN_EVO] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_USER_CTL_U] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x24,
+ [PLL_OFF_TEST_CTL] = 0x28,
+ [PLL_OFF_TEST_CTL_U] = 0x2c,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -191,8 +203,10 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define LUCID_5LPE_ENABLE_VOTE_RUN BIT(21)
/* LUCID EVO PLL specific settings and offsets */
+#define LUCID_EVO_PCAL_NOT_DONE BIT(8)
#define LUCID_EVO_ENABLE_VOTE_RUN BIT(25)
#define LUCID_EVO_PLL_L_VAL_MASK GENMASK(15, 0)
+#define LUCID_EVO_PLL_CAL_L_VAL_SHIFT 16
/* ZONDA PLL specific */
#define ZONDA_PLL_OUT_MASK 0xf
@@ -1439,7 +1453,7 @@ const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
/**
- * clk_lucid_pll_configure - configure the lucid pll
+ * clk_trion_pll_configure - configure the trion pll
*
* @pll: clk alpha pll
* @regmap: register map
@@ -1823,7 +1837,7 @@ const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
.round_rate = clk_alpha_pll_round_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_lucid_5lpe_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.enable = alpha_pll_lucid_5lpe_enable,
@@ -1832,14 +1846,14 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.recalc_rate = clk_trion_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_fixed_lucid_5lpe_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
.round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
.set_rate = clk_lucid_5lpe_pll_postdiv_set_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
@@ -1992,7 +2006,33 @@ const struct clk_ops clk_alpha_pll_zonda_ops = {
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_zonda_pll_set_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_zonda_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_zonda_ops);
+
+void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 lval = config->l;
+
+ lval |= TRION_PLL_CAL_VAL << LUCID_EVO_PLL_CAL_L_VAL_SHIFT;
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), lval);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+
+ /* Set operation mode to STANDBY and de-assert the reset */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+EXPORT_SYMBOL_GPL(clk_lucid_evo_pll_configure);
static int alpha_pll_lucid_evo_enable(struct clk_hw *hw)
{
@@ -2079,6 +2119,31 @@ static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
}
+static int alpha_pll_lucid_evo_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct clk_hw *p;
+ u32 val = 0;
+ int ret;
+
+ /* Return early if calibration is not needed. */
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (!(val & LUCID_EVO_PCAL_NOT_DONE))
+ return 0;
+
+ p = clk_hw_get_parent(hw);
+ if (!p)
+ return -EINVAL;
+
+ ret = alpha_pll_lucid_evo_enable(hw);
+ if (ret)
+ return ret;
+
+ alpha_pll_lucid_evo_disable(hw);
+
+ return 0;
+}
+
static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -2114,3 +2179,72 @@ const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops = {
.set_rate = clk_lucid_evo_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_evo_ops);
+
+const struct clk_ops clk_alpha_pll_lucid_evo_ops = {
+ .prepare = alpha_pll_lucid_evo_prepare,
+ .enable = alpha_pll_lucid_evo_enable,
+ .disable = alpha_pll_lucid_evo_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = alpha_pll_lucid_evo_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_lucid_5lpe_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_evo_ops);
+
+void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ regmap_update_bits(regmap, PLL_MODE(pll),
+ PLL_RESET_N | PLL_BYPASSNL | PLL_OUTCTRL,
+ PLL_RESET_N | PLL_BYPASSNL);
+}
+EXPORT_SYMBOL_GPL(clk_rivian_evo_pll_configure);
+
+static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l;
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+
+ return parent_rate * l;
+}
+
+static long clk_rivian_evo_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ unsigned long min_freq, max_freq;
+ u32 l;
+ u64 a;
+
+ rate = alpha_pll_round_rate(rate, *prate, &l, &a, 0);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
+ return rate;
+
+ min_freq = pll->vco_table[0].min_freq;
+ max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+
+ return clamp(rate, min_freq, max_freq);
+}
+
+const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
+ .enable = alpha_pll_lucid_5lpe_enable,
+ .disable = alpha_pll_lucid_5lpe_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_rivian_evo_pll_recalc_rate,
+ .round_rate = clk_rivian_evo_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_rivian_evo_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 6e9907deaf30..447efb82fe59 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -18,6 +18,7 @@ enum {
CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_ZONDA,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
+ CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -152,9 +153,14 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
+
+extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
+extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
+#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -168,6 +174,9 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
-
+void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
index e847d586a73a..7dd17c184b69 100644
--- a/drivers/clk/qcom/clk-hfpll.c
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -72,13 +72,16 @@ static void __clk_hfpll_enable(struct clk_hw *hw)
regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N);
/* Wait for PLL to lock. */
- if (hd->status_reg) {
- do {
- regmap_read(regmap, hd->status_reg, &val);
- } while (!(val & BIT(hd->lock_bit)));
- } else {
+ if (hd->status_reg)
+ /*
+ * Busy wait. Should never timeout, we add a timeout to
+ * prevent any sort of stall.
+ */
+ regmap_read_poll_timeout(regmap, hd->status_reg, val,
+ !(val & BIT(hd->lock_bit)), 0,
+ 100 * USEC_PER_MSEC);
+ else
udelay(60);
- }
/* Enable PLL output. */
regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL);
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
index 59f1af415b58..45da736bd5f4 100644
--- a/drivers/clk/qcom/clk-krait.c
+++ b/drivers/clk/qcom/clk-krait.c
@@ -18,13 +18,23 @@
static DEFINE_SPINLOCK(krait_clock_reg_lock);
#define LPL_SHIFT 8
+#define SECCLKAGD BIT(4)
+
static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
{
unsigned long flags;
u32 regval;
spin_lock_irqsave(&krait_clock_reg_lock, flags);
+
regval = krait_get_l2_indirect_reg(mux->offset);
+
+ /* apq/ipq8064 Errata: disable sec_src clock gating during switch. */
+ if (mux->disable_sec_src_gating) {
+ regval |= SECCLKAGD;
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ }
+
regval &= ~(mux->mask << mux->shift);
regval |= (sel & mux->mask) << mux->shift;
if (mux->lpl) {
@@ -32,11 +42,22 @@ static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
}
krait_set_l2_indirect_reg(mux->offset, regval);
- spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ /* apq/ipq8064 Errata: re-enabled sec_src clock gating. */
+ if (mux->disable_sec_src_gating) {
+ regval &= ~SECCLKAGD;
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ }
/* Wait for switch to complete. */
mb();
udelay(1);
+
+ /*
+ * Unlock now to make sure the mux register is not
+ * modified while switching to the new parent.
+ */
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
}
static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h
index 9120bd2f5297..f930538c539e 100644
--- a/drivers/clk/qcom/clk-krait.h
+++ b/drivers/clk/qcom/clk-krait.h
@@ -15,6 +15,7 @@ struct krait_mux_clk {
u8 safe_sel;
u8 old_index;
bool reparent;
+ bool disable_sec_src_gating;
struct clk_hw hw;
struct notifier_block clk_nb;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8e5dce09d162..28019edd2a50 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -13,6 +13,7 @@
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/math64.h>
+#include <linux/minmax.h>
#include <linux/slab.h>
#include <asm/div64.h>
@@ -437,7 +438,7 @@ static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- u32 notn_m, n, m, d, not2d, mask, duty_per;
+ u32 notn_m, n, m, d, not2d, mask, duty_per, cfg;
int ret;
/* Duty-cycle cannot be modified for non-MND RCGs */
@@ -448,6 +449,11 @@ static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
+ regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+
+ /* Duty-cycle cannot be modified if MND divider is in bypass mode. */
+ if (!(cfg & CFG_MODE_MASK))
+ return -EINVAL;
n = (~(notn_m) + m) & mask;
@@ -456,9 +462,11 @@ static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
/* Calculate 2d value */
d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
- /* Check bit widths of 2d. If D is too big reduce duty cycle. */
- if (d > mask)
- d = mask;
+ /*
+ * Check bit widths of 2d. If D is too big reduce duty cycle.
+ * Also make sure it is never zero.
+ */
+ d = clamp_val(d, 1, mask);
if ((d / 2) > (n - m))
d = (n - m) * 2;
diff --git a/drivers/clk/qcom/clk-regmap-phy-mux.c b/drivers/clk/qcom/clk-regmap-phy-mux.c
new file mode 100644
index 000000000000..7b7243b7107d
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-phy-mux.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap.h"
+#include "clk-regmap-phy-mux.h"
+
+#define PHY_MUX_MASK GENMASK(1, 0)
+#define PHY_MUX_PHY_SRC 0
+#define PHY_MUX_REF_SRC 2
+
+static inline struct clk_regmap_phy_mux *to_clk_regmap_phy_mux(struct clk_regmap *clkr)
+{
+ return container_of(clkr, struct clk_regmap_phy_mux, clkr);
+}
+
+static int phy_mux_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr);
+ unsigned int val;
+
+ regmap_read(clkr->regmap, phy_mux->reg, &val);
+ val = FIELD_GET(PHY_MUX_MASK, val);
+
+ WARN_ON(val != PHY_MUX_PHY_SRC && val != PHY_MUX_REF_SRC);
+
+ return val == PHY_MUX_PHY_SRC;
+}
+
+static int phy_mux_enable(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr);
+
+ return regmap_update_bits(clkr->regmap, phy_mux->reg,
+ PHY_MUX_MASK,
+ FIELD_PREP(PHY_MUX_MASK, PHY_MUX_PHY_SRC));
+}
+
+static void phy_mux_disable(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr);
+
+ regmap_update_bits(clkr->regmap, phy_mux->reg,
+ PHY_MUX_MASK,
+ FIELD_PREP(PHY_MUX_MASK, PHY_MUX_REF_SRC));
+}
+
+const struct clk_ops clk_regmap_phy_mux_ops = {
+ .enable = phy_mux_enable,
+ .disable = phy_mux_disable,
+ .is_enabled = phy_mux_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_phy_mux_ops);
diff --git a/drivers/clk/qcom/clk-regmap-phy-mux.h b/drivers/clk/qcom/clk-regmap-phy-mux.h
new file mode 100644
index 000000000000..614dd384695c
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-phy-mux.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_PHY_MUX_H__
+#define __QCOM_CLK_REGMAP_PHY_MUX_H__
+
+#include "clk-regmap.h"
+
+/*
+ * A clock implementation for PHY pipe and symbols clock muxes.
+ *
+ * If the clock is running off the from-PHY source, report it as enabled.
+ * Report it as disabled otherwise (if it uses reference source).
+ *
+ * This way the PHY will disable the pipe clock before turning off the GDSC,
+ * which in turn would lead to disabling corresponding pipe_clk_src (and thus
+ * it being parked to a safe, reference clock source). And vice versa, after
+ * enabling the GDSC the PHY will enable the pipe clock, which would cause
+ * pipe_clk_src to be switched from a safe source to the working one.
+ *
+ * For some platforms this should be used for the UFS symbol_clk_src clocks
+ * too.
+ */
+struct clk_regmap_phy_mux {
+ u32 reg;
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_regmap_phy_mux_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index a18811c38018..747c473b0b5e 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -23,6 +23,14 @@
#define QCOM_RPM_SCALING_ENABLE_ID 0x2
#define QCOM_RPM_XO_MODE_ON 0x2
+static const struct clk_parent_data gcc_pxo[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+};
+
+static const struct clk_parent_data gcc_cxo[] = {
+ { .fw_name = "cxo", .name = "cxo_board" },
+};
+
#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
static struct clk_rpm _platform##_##_active; \
static struct clk_rpm _platform##_##_name = { \
@@ -32,8 +40,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_ops, \
.name = #_name, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
+ .parent_data = gcc_pxo, \
+ .num_parents = ARRAY_SIZE(gcc_pxo), \
}, \
}; \
static struct clk_rpm _platform##_##_active = { \
@@ -44,8 +52,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_ops, \
.name = #_active, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
+ .parent_data = gcc_pxo, \
+ .num_parents = ARRAY_SIZE(gcc_pxo), \
}, \
}
@@ -56,8 +64,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_xo_ops, \
.name = #_name, \
- .parent_names = (const char *[]){ "cxo_board" }, \
- .num_parents = 1, \
+ .parent_data = gcc_cxo, \
+ .num_parents = ARRAY_SIZE(gcc_cxo), \
}, \
}
@@ -68,8 +76,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_fixed_ops, \
.name = #_name, \
- .parent_names = (const char *[]){ "pxo" }, \
- .num_parents = 1, \
+ .parent_data = gcc_pxo, \
+ .num_parents = ARRAY_SIZE(gcc_pxo), \
}, \
}
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index aed907982344..c07cab6905cb 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -274,6 +274,11 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
cmd.addr = c->res_addr;
cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
+ /*
+ * Send only an active only state request. RPMh continues to
+ * use the active state when we're in sleep/wake state as long
+ * as the sleep/wake state has never been set.
+ */
ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
if (ret) {
dev_err(c->dev, "set active state of %s failed: (%d)\n",
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index db9379634fb2..709076f0f9d7 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -43,6 +43,10 @@ static struct pll_vco vco_table[] = {
{ 249600000, 2000000000, 0 },
};
+static struct pll_vco lucid_5lpe_vco[] = {
+ { 249600000, 1750000000, 0 },
+};
+
static struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x47,
.alpha = 0xE000,
@@ -1134,7 +1138,6 @@ static struct gdsc mdss_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.flags = HW_CTRL,
- .supply = "mmcx",
};
static struct clk_regmap *disp_cc_sm8250_clocks[] = {
@@ -1228,6 +1231,7 @@ static const struct of_device_id disp_cc_sm8250_match_table[] = {
{ .compatible = "qcom,sc8180x-dispcc" },
{ .compatible = "qcom,sm8150-dispcc" },
{ .compatible = "qcom,sm8250-dispcc" },
+ { .compatible = "qcom,sm8350-dispcc" },
{ }
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8250_match_table);
@@ -1258,7 +1262,7 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
- /* note: trion == lucid, except for the prepare() op */
+ /* Apply differences for SM8150 and SM8350 */
BUILD_BUG_ON(CLK_ALPHA_PLL_TYPE_TRION != CLK_ALPHA_PLL_TYPE_LUCID);
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sc8180x-dispcc") ||
of_device_is_compatible(pdev->dev.of_node, "qcom,sm8150-dispcc")) {
@@ -1270,6 +1274,62 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
disp_cc_pll1_config.config_ctl_hi1_val = 0x00000024;
disp_cc_pll1_config.user_ctl_hi1_val = 0x000000D0;
disp_cc_pll1_init.ops = &clk_alpha_pll_trion_ops;
+ } else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) {
+ static struct clk_rcg2 * const rcgs[] = {
+ &disp_cc_mdss_byte0_clk_src,
+ &disp_cc_mdss_byte1_clk_src,
+ &disp_cc_mdss_dp_aux1_clk_src,
+ &disp_cc_mdss_dp_aux_clk_src,
+ &disp_cc_mdss_dp_link1_clk_src,
+ &disp_cc_mdss_dp_link_clk_src,
+ &disp_cc_mdss_dp_pixel1_clk_src,
+ &disp_cc_mdss_dp_pixel2_clk_src,
+ &disp_cc_mdss_dp_pixel_clk_src,
+ &disp_cc_mdss_esc0_clk_src,
+ &disp_cc_mdss_mdp_clk_src,
+ &disp_cc_mdss_pclk0_clk_src,
+ &disp_cc_mdss_pclk1_clk_src,
+ &disp_cc_mdss_rot_clk_src,
+ &disp_cc_mdss_vsync_clk_src,
+ };
+ static struct clk_regmap_div * const divs[] = {
+ &disp_cc_mdss_byte0_div_clk_src,
+ &disp_cc_mdss_byte1_div_clk_src,
+ &disp_cc_mdss_dp_link1_div_clk_src,
+ &disp_cc_mdss_dp_link_div_clk_src,
+ };
+ unsigned int i;
+ static bool offset_applied;
+
+ /*
+ * note: trion == lucid, except for the prepare() op
+ * only apply the offsets once (in case of deferred probe)
+ */
+ if (!offset_applied) {
+ for (i = 0; i < ARRAY_SIZE(rcgs); i++)
+ rcgs[i]->cmd_rcgr -= 4;
+
+ for (i = 0; i < ARRAY_SIZE(divs); i++) {
+ divs[i]->reg -= 4;
+ divs[i]->width = 4;
+ }
+
+ disp_cc_mdss_ahb_clk.halt_reg -= 4;
+ disp_cc_mdss_ahb_clk.clkr.enable_reg -= 4;
+
+ offset_applied = true;
+ }
+
+ disp_cc_mdss_ahb_clk_src.cmd_rcgr = 0x22a0;
+
+ disp_cc_pll0_config.config_ctl_hi1_val = 0x2a9a699c;
+ disp_cc_pll0_config.test_ctl_hi1_val = 0x01800000;
+ disp_cc_pll0_init.ops = &clk_alpha_pll_lucid_5lpe_ops;
+ disp_cc_pll0.vco_table = lucid_5lpe_vco;
+ disp_cc_pll1_config.config_ctl_hi1_val = 0x2a9a699c;
+ disp_cc_pll1_config.test_ctl_hi1_val = 0x01800000;
+ disp_cc_pll1_init.ops = &clk_alpha_pll_lucid_5lpe_ops;
+ disp_cc_pll1.vco_table = lucid_5lpe_vco;
}
clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 541016db3c4b..42d185fe19c8 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -22,6 +22,7 @@
#include "clk-alpha-pll.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "gdsc.h"
#include "reset.h"
enum {
@@ -662,6 +663,7 @@ static struct clk_branch gcc_sleep_clk_src = {
},
.num_parents = 1,
.ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -1788,8 +1790,10 @@ static struct clk_regmap_div nss_port4_tx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_RX, 5, 0, 0),
F(78125000, P_UNIPHY1_RX, 4, 0, 0),
F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_RX, 1, 0, 0),
F(156250000, P_UNIPHY1_RX, 2, 0, 0),
F(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
@@ -1828,8 +1832,10 @@ static struct clk_regmap_div nss_port5_rx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_TX, 5, 0, 0),
F(78125000, P_UNIPHY1_TX, 4, 0, 0),
F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_TX, 1, 0, 0),
F(156250000, P_UNIPHY1_TX, 2, 0, 0),
F(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
@@ -1867,8 +1873,10 @@ static struct clk_regmap_div nss_port5_tx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_RX, 5, 0, 0),
F(25000000, P_UNIPHY2_RX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_RX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_RX, 1, 0, 0),
F(125000000, P_UNIPHY2_RX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_RX, 2, 0, 0),
F(312500000, P_UNIPHY2_RX, 1, 0, 0),
@@ -1907,8 +1915,10 @@ static struct clk_regmap_div nss_port6_rx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_TX, 5, 0, 0),
F(25000000, P_UNIPHY2_TX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_TX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_TX, 1, 0, 0),
F(125000000, P_UNIPHY2_TX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_TX, 2, 0, 0),
F(312500000, P_UNIPHY2_TX, 1, 0, 0),
@@ -3174,6 +3184,24 @@ static struct clk_branch gcc_nss_ptp_ref_clk = {
},
};
+static struct clk_branch gcc_crypto_ppe_clk = {
+ .halt_reg = 0x68310,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x68310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ppe_clk",
+ .parent_names = (const char *[]){
+ "nss_ppe_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_nssnoc_ce_apb_clk = {
.halt_reg = 0x6830c,
.clkr = {
@@ -3346,6 +3374,7 @@ static struct clk_branch gcc_nssnoc_ubi1_ahb_clk = {
static struct clk_branch gcc_ubi0_ahb_clk = {
.halt_reg = 0x6820c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6820c,
.enable_mask = BIT(0),
@@ -3363,6 +3392,7 @@ static struct clk_branch gcc_ubi0_ahb_clk = {
static struct clk_branch gcc_ubi0_axi_clk = {
.halt_reg = 0x68200,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68200,
.enable_mask = BIT(0),
@@ -3380,6 +3410,7 @@ static struct clk_branch gcc_ubi0_axi_clk = {
static struct clk_branch gcc_ubi0_nc_axi_clk = {
.halt_reg = 0x68204,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68204,
.enable_mask = BIT(0),
@@ -3397,6 +3428,7 @@ static struct clk_branch gcc_ubi0_nc_axi_clk = {
static struct clk_branch gcc_ubi0_core_clk = {
.halt_reg = 0x68210,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68210,
.enable_mask = BIT(0),
@@ -3414,6 +3446,7 @@ static struct clk_branch gcc_ubi0_core_clk = {
static struct clk_branch gcc_ubi0_mpt_clk = {
.halt_reg = 0x68208,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68208,
.enable_mask = BIT(0),
@@ -3431,6 +3464,7 @@ static struct clk_branch gcc_ubi0_mpt_clk = {
static struct clk_branch gcc_ubi1_ahb_clk = {
.halt_reg = 0x6822c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6822c,
.enable_mask = BIT(0),
@@ -3448,6 +3482,7 @@ static struct clk_branch gcc_ubi1_ahb_clk = {
static struct clk_branch gcc_ubi1_axi_clk = {
.halt_reg = 0x68220,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68220,
.enable_mask = BIT(0),
@@ -3465,6 +3500,7 @@ static struct clk_branch gcc_ubi1_axi_clk = {
static struct clk_branch gcc_ubi1_nc_axi_clk = {
.halt_reg = 0x68224,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68224,
.enable_mask = BIT(0),
@@ -3482,6 +3518,7 @@ static struct clk_branch gcc_ubi1_nc_axi_clk = {
static struct clk_branch gcc_ubi1_core_clk = {
.halt_reg = 0x68230,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68230,
.enable_mask = BIT(0),
@@ -3499,6 +3536,7 @@ static struct clk_branch gcc_ubi1_core_clk = {
static struct clk_branch gcc_ubi1_mpt_clk = {
.halt_reg = 0x68228,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68228,
.enable_mask = BIT(0),
@@ -4371,6 +4409,49 @@ static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
},
};
+static struct gdsc usb0_gdsc = {
+ .gdscr = 0x3e078,
+ .pd = {
+ .name = "usb0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb1_gdsc = {
+ .gdscr = 0x3f078,
+ .pd = {
+ .name = "usb1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static const struct alpha_pll_config ubi32_pll_config = {
+ .l = 0x4e,
+ .config_ctl_val = 0x200d4aa8,
+ .config_ctl_hi_val = 0x3c2,
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = GENMASK(9, 8),
+};
+
+static const struct alpha_pll_config nss_crypto_pll_config = {
+ .l = 0x3e,
+ .alpha = 0x0,
+ .alpha_hi = 0x80,
+ .config_ctl_val = 0x4001055b,
+ .main_output_mask = BIT(0),
+ .pre_div_val = 0x0,
+ .pre_div_mask = GENMASK(14, 12),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(11, 8),
+ .vco_mask = GENMASK(21, 20),
+ .vco_val = 0x0,
+ .alpha_en_mask = BIT(24),
+};
+
static struct clk_hw *gcc_ipq8074_hws[] = {
&gpll0_out_main_div2.hw,
&gpll6_out_main_div2.hw,
@@ -4609,6 +4690,7 @@ static struct clk_regmap *gcc_ipq8074_clks[] = {
[GCC_PCIE0_RCHNG_CLK_SRC] = &pcie0_rchng_clk_src.clkr,
[GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
[GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
+ [GCC_CRYPTO_PPE_CLK] = &gcc_crypto_ppe_clk.clkr,
};
static const struct qcom_reset_map gcc_ipq8074_resets[] = {
@@ -4746,6 +4828,11 @@ static const struct qcom_reset_map gcc_ipq8074_resets[] = {
[GCC_PCIE1_AXI_MASTER_STICKY_ARES] = { 0x76040, 6 },
};
+static struct gdsc *gcc_ipq8074_gdscs[] = {
+ [USB0_GDSC] = &usb0_gdsc,
+ [USB1_GDSC] = &usb1_gdsc,
+};
+
static const struct of_device_id gcc_ipq8074_match_table[] = {
{ .compatible = "qcom,gcc-ipq8074" },
{ }
@@ -4768,11 +4855,26 @@ static const struct qcom_cc_desc gcc_ipq8074_desc = {
.num_resets = ARRAY_SIZE(gcc_ipq8074_resets),
.clk_hws = gcc_ipq8074_hws,
.num_clk_hws = ARRAY_SIZE(gcc_ipq8074_hws),
+ .gdscs = gcc_ipq8074_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_ipq8074_gdscs),
};
static int gcc_ipq8074_probe(struct platform_device *pdev)
{
- return qcom_cc_probe(pdev, &gcc_ipq8074_desc);
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_ipq8074_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* SW Workaround for UBI32 Huayra PLL */
+ regmap_update_bits(regmap, 0x2501c, BIT(26), BIT(26));
+
+ clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+ clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
+ &nss_crypto_pll_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_ipq8074_desc, regmap);
}
static struct platform_driver gcc_ipq8074_driver = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 17e4a5a2a9fd..9a46794f6eb8 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -765,7 +765,20 @@ static struct clk_rcg2 cci_clk_src = {
},
};
+/*
+ * This is a frequency table for "General Purpose" clocks.
+ * These clocks can be muxed to the SoC pins and may be used by
+ * external devices. They're often used as PWM source.
+ *
+ * See comment at ftbl_gcc_gp1_3_clk.
+ */
static const struct freq_tbl ftbl_gcc_camss_gp0_1_clk[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(100000, P_XO, 16, 1, 12),
+ F(500000, P_GPLL0, 16, 1, 100),
+ F(1000000, P_GPLL0, 16, 1, 50),
+ F(2500000, P_GPLL0, 16, 1, 20),
+ F(5000000, P_GPLL0, 16, 1, 10),
F(100000000, P_GPLL0, 8, 0, 0),
F(200000000, P_GPLL0, 4, 0, 0),
{ }
@@ -927,7 +940,29 @@ static struct clk_rcg2 crypto_clk_src = {
},
};
+/*
+ * This is a frequency table for "General Purpose" clocks.
+ * These clocks can be muxed to the SoC pins and may be used by
+ * external devices. They're often used as PWM source.
+ *
+ * Please note that MND divider must be enabled for duty-cycle
+ * control to be possible. (M != N) Also since D register is configured
+ * with a value multiplied by 2, and duty cycle is calculated as
+ * (2 * D) % 2^W
+ * DutyCycle = ----------------
+ * 2 * (N % 2^W)
+ * (where W = .mnd_width)
+ * N must be half or less than maximum value for the register.
+ * Otherwise duty-cycle control would be limited.
+ * (e.g. for 8-bit NMD N should be less than 128)
+ */
static const struct freq_tbl ftbl_gcc_gp1_3_clk[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(100000, P_XO, 16, 1, 12),
+ F(500000, P_GPLL0, 16, 1, 100),
+ F(1000000, P_GPLL0, 16, 1, 50),
+ F(2500000, P_GPLL0, 16, 1, 20),
+ F(5000000, P_GPLL0, 16, 1, 10),
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 39ebb443ae3d..8e2d9fb98ad5 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -632,7 +632,7 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
};
static struct clk_rcg2 bimc_ddr_clk_src = {
- .cmd_rcgr = 0x32004,
+ .cmd_rcgr = 0x32024,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_bimc_map,
.clkr.hw.init = &(struct clk_init_data){
@@ -644,6 +644,18 @@ static struct clk_rcg2 bimc_ddr_clk_src = {
},
};
+static struct clk_rcg2 system_mm_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x2600c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6a_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_mm_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_camss_ahb_clk[] = {
F(40000000, P_GPLL0, 10, 1, 2),
F(80000000, P_GPLL0, 10, 0, 0),
@@ -1002,7 +1014,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
};
static const struct freq_tbl ftbl_gcc_camss_cci_clk[] = {
- F(19200000, P_XO, 1, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 1, 3, 64),
{ }
};
@@ -1142,6 +1155,9 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
static const struct freq_tbl ftbl_gcc_camss_cpp_clk[] = {
F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
F(320000000, P_GPLL0, 2.5, 0, 0),
F(465000000, P_GPLL2, 2, 0, 0),
{ }
@@ -1290,6 +1306,8 @@ static const struct freq_tbl ftbl_gcc_mdss_mdp_clk[] = {
F(50000000, P_GPLL0_AUX, 16, 0, 0),
F(80000000, P_GPLL0_AUX, 10, 0, 0),
F(100000000, P_GPLL0_AUX, 8, 0, 0),
+ F(145500000, P_GPLL0_AUX, 5.5, 0, 0),
+ F(153600000, P_GPLL0, 4, 0, 0),
F(160000000, P_GPLL0_AUX, 5, 0, 0),
F(177780000, P_GPLL0_AUX, 4.5, 0, 0),
F(200000000, P_GPLL0_AUX, 4, 0, 0),
@@ -1462,7 +1480,9 @@ static struct clk_rcg2 bimc_gpu_clk_src = {
};
static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(57140000, P_GPLL0, 14, 0, 0),
F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
{ }
};
@@ -1823,9 +1843,9 @@ static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
};
static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
- F(100000000, P_GPLL0, 8, 0, 0),
- F(160000000, P_GPLL0, 5, 0, 0),
- F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
{ }
};
@@ -2441,7 +2461,7 @@ static struct clk_branch gcc_camss_jpeg_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2645,7 +2665,7 @@ static struct clk_branch gcc_camss_vfe_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2801,7 +2821,7 @@ static struct clk_branch gcc_mdss_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3193,7 +3213,7 @@ static struct clk_branch gcc_mdp_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mdp_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3211,7 +3231,7 @@ static struct clk_branch gcc_venus_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_venus_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3229,7 +3249,7 @@ static struct clk_branch gcc_vfe_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_vfe_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3247,7 +3267,7 @@ static struct clk_branch gcc_jpeg_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_jpeg_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3484,7 +3504,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3623,6 +3643,7 @@ static struct clk_regmap *gcc_msm8939_clocks[] = {
[GPLL2_VOTE] = &gpll2_vote,
[PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
[SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [SYSTEM_MM_NOC_BFDCD_CLK_SRC] = &system_mm_noc_bfdcd_clk_src.clkr,
[CAMSS_AHB_CLK_SRC] = &camss_ahb_clk_src.clkr,
[APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
[CSI0_CLK_SRC] = &csi0_clk_src.clkr,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 051745ef99c8..a6e13b91e4c8 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3641,6 +3641,9 @@ static int gcc_msm8960_probe(struct platform_device *pdev)
hfpll_l2.d = &hfpll_l2_8064_data;
}
+ if (of_get_available_child_count(pdev->dev.of_node) != 0)
+ return devm_of_platform_populate(&pdev->dev);
+
tsens = platform_device_register_data(&pdev->dev, "qcom-tsens", -1,
NULL, 0);
if (IS_ERR(tsens))
@@ -3655,7 +3658,8 @@ static int gcc_msm8960_remove(struct platform_device *pdev)
{
struct platform_device *tsens = platform_get_drvdata(pdev);
- platform_device_unregister(tsens);
+ if (tsens)
+ platform_device_unregister(tsens);
return 0;
}
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 6b702cdacbf2..0f52c48e89d8 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -52,7 +52,9 @@ static struct clk_alpha_pll_postdiv gpll0 = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
- .parent_names = (const char *[]) { "gpll0_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
},
@@ -81,7 +83,9 @@ static struct clk_alpha_pll_postdiv gpll4 = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4",
- .parent_names = (const char *[]) { "gpll4_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll4_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
},
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 423627d49719..7ff64d4d5920 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -17,6 +17,7 @@
#include "clk-rcg.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "common.h"
#include "gdsc.h"
#include "reset.h"
@@ -255,26 +256,6 @@ static const struct clk_parent_data gcc_parent_data_5[] = {
{ .hw = &gcc_gpll0_out_even.clkr.hw },
};
-static const struct parent_map gcc_parent_map_6[] = {
- { P_PCIE_0_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_6[] = {
- { .fw_name = "pcie_0_pipe_clk", .name = "pcie_0_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
-static const struct parent_map gcc_parent_map_7[] = {
- { P_PCIE_1_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_7[] = {
- { .fw_name = "pcie_1_pipe_clk", .name = "pcie_1_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
static const struct parent_map gcc_parent_map_8[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -369,32 +350,32 @@ static const struct clk_parent_data gcc_parent_data_15[] = {
{ .hw = &gcc_mss_gpll0_main_div_clk_src.clkr.hw },
};
-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
.reg = 0x6b054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_6,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk_src",
- .parent_data = gcc_parent_data_6,
- .num_parents = ARRAY_SIZE(gcc_parent_data_6),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_0_pipe_clk",
+ .name = "pcie_0_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
.reg = 0x8d054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_7,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk_src",
- .parent_data = gcc_parent_data_7,
- .num_parents = ARRAY_SIZE(gcc_parent_data_7),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_1_pipe_clk",
+ .name = "pcie_1_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index 4b894442fdf5..a2f3ffcc5849 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -20,6 +20,7 @@
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "common.h"
#include "gdsc.h"
#include "reset.h"
@@ -82,11 +83,6 @@ enum {
P_GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC,
P_GCC_USB4_PHY_PIPEGMUX_CLK_SRC,
P_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC,
- P_PCIE_2A_PIPE_CLK,
- P_PCIE_2B_PIPE_CLK,
- P_PCIE_3A_PIPE_CLK,
- P_PCIE_3B_PIPE_CLK,
- P_PCIE_4_PIPE_CLK,
P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
P_QUSB4PHY_GCC_USB4_RX0_CLK,
@@ -351,56 +347,6 @@ static const struct clk_parent_data gcc_parent_data_9[] = {
{ .hw = &gcc_gpll0_out_even.clkr.hw },
};
-static const struct parent_map gcc_parent_map_10[] = {
- { P_PCIE_2A_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_10[] = {
- { .index = DT_PCIE_2A_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_11[] = {
- { P_PCIE_2B_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_11[] = {
- { .index = DT_PCIE_2B_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_12[] = {
- { P_PCIE_3A_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_12[] = {
- { .index = DT_PCIE_3A_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_13[] = {
- { P_PCIE_3B_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_13[] = {
- { .index = DT_PCIE_3B_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_14[] = {
- { P_PCIE_4_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_14[] = {
- { .index = DT_PCIE_4_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
static const struct parent_map gcc_parent_map_15[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -741,77 +687,72 @@ static const struct clk_parent_data gcc_parent_data_41[] = {
{ .index = DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK },
};
-static struct clk_regmap_mux gcc_pcie_2a_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_2a_pipe_clk_src = {
.reg = 0x9d05c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_10,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_2a_pipe_clk_src",
- .parent_data = gcc_parent_data_10,
- .num_parents = ARRAY_SIZE(gcc_parent_data_10),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_2A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_2b_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_2b_pipe_clk_src = {
.reg = 0x9e05c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_11,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_2b_pipe_clk_src",
- .parent_data = gcc_parent_data_11,
- .num_parents = ARRAY_SIZE(gcc_parent_data_11),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_2B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_3a_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_3a_pipe_clk_src = {
.reg = 0xa005c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_12,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_3a_pipe_clk_src",
- .parent_data = gcc_parent_data_12,
- .num_parents = ARRAY_SIZE(gcc_parent_data_12),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_3b_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_3b_pipe_clk_src = {
.reg = 0xa205c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_13,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_3b_pipe_clk_src",
- .parent_data = gcc_parent_data_13,
- .num_parents = ARRAY_SIZE(gcc_parent_data_13),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_4_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_4_pipe_clk_src = {
.reg = 0x6b05c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_14,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_4_pipe_clk_src",
- .parent_data = gcc_parent_data_14,
- .num_parents = ARRAY_SIZE(gcc_parent_data_14),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_4_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
@@ -6807,58 +6748,79 @@ static struct clk_branch gcc_video_vcodec_throttle_clk = {
static struct gdsc pcie_0_tunnel_gdsc = {
.gdscr = 0xa4004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(0),
.pd = {
.name = "pcie_0_tunnel_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_1_tunnel_gdsc = {
.gdscr = 0x8d004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(1),
.pd = {
.name = "pcie_1_tunnel_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_2a_gdsc = {
.gdscr = 0x9d004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(2),
.pd = {
.name = "pcie_2a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_2b_gdsc = {
.gdscr = 0x9e004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(3),
.pd = {
.name = "pcie_2b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_3a_gdsc = {
.gdscr = 0xa0004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(4),
.pd = {
.name = "pcie_3a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_3b_gdsc = {
.gdscr = 0xa2004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(5),
.pd = {
.name = "pcie_3b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_4_gdsc = {
.gdscr = 0x6b004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(6),
.pd = {
.name = "pcie_4_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc ufs_card_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index a4f7fba70393..69412400efa4 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -2558,7 +2558,7 @@ static int gcc_sm6350_probe(struct platform_device *pdev)
if (ret)
return ret;
- return qcom_cc_really_probe(pdev, &gcc_sm6350_desc, regmap);;
+ return qcom_cc_really_probe(pdev, &gcc_sm6350_desc, regmap);
}
static struct platform_driver gcc_sm6350_driver = {
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index 593a195467ff..666efa5ff978 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -17,6 +17,7 @@
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "gdsc.h"
#include "reset.h"
@@ -26,9 +27,7 @@ enum {
P_GCC_GPLL0_OUT_MAIN,
P_GCC_GPLL4_OUT_MAIN,
P_GCC_GPLL9_OUT_MAIN,
- P_PCIE_0_PIPE_CLK,
P_PCIE_1_PHY_AUX_CLK,
- P_PCIE_1_PIPE_CLK,
P_SLEEP_CLK,
P_UFS_PHY_RX_SYMBOL_0_CLK,
P_UFS_PHY_RX_SYMBOL_1_CLK,
@@ -153,16 +152,6 @@ static const struct clk_parent_data gcc_parent_data_3[] = {
{ .fw_name = "bi_tcxo" },
};
-static const struct parent_map gcc_parent_map_4[] = {
- { P_PCIE_0_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_4[] = {
- { .fw_name = "pcie_0_pipe_clk", },
- { .fw_name = "bi_tcxo", },
-};
-
static const struct parent_map gcc_parent_map_5[] = {
{ P_PCIE_1_PHY_AUX_CLK, 0 },
{ P_BI_TCXO, 2 },
@@ -173,16 +162,6 @@ static const struct clk_parent_data gcc_parent_data_5[] = {
{ .fw_name = "bi_tcxo" },
};
-static const struct parent_map gcc_parent_map_6[] = {
- { P_PCIE_1_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_6[] = {
- { .fw_name = "pcie_1_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
static const struct parent_map gcc_parent_map_7[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -239,17 +218,16 @@ static const struct clk_parent_data gcc_parent_data_11[] = {
{ .fw_name = "bi_tcxo" },
};
-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
.reg = 0x7b060,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk_src",
- .parent_data = gcc_parent_data_4,
- .num_parents = ARRAY_SIZE(gcc_parent_data_4),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_0_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
@@ -269,17 +247,16 @@ static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = {
},
};
-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
.reg = 0x9d064,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_6,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk_src",
- .parent_data = gcc_parent_data_6,
- .num_parents = ARRAY_SIZE(gcc_parent_data_6),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_1_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 44520efc6c72..d3244006c661 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -132,10 +132,29 @@ static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status)
return -ETIMEDOUT;
}
+static int gdsc_update_collapse_bit(struct gdsc *sc, bool val)
+{
+ u32 reg, mask;
+ int ret;
+
+ if (sc->collapse_mask) {
+ reg = sc->collapse_ctrl;
+ mask = sc->collapse_mask;
+ } else {
+ reg = sc->gdscr;
+ mask = SW_COLLAPSE_MASK;
+ }
+
+ ret = regmap_update_bits(sc->regmap, reg, mask, val ? mask : 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
{
int ret;
- u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK;
if (status == GDSC_ON && sc->rsupply) {
ret = regulator_enable(sc->rsupply);
@@ -143,9 +162,7 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
return ret;
}
- ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
- if (ret)
- return ret;
+ ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF);
/* If disabling votable gdscs, don't poll on status */
if ((sc->flags & VOTABLE) && status == GDSC_OFF) {
@@ -420,13 +437,20 @@ static int gdsc_init(struct gdsc *sc)
return ret;
}
+ /* ...and the power-domain */
+ ret = gdsc_pm_runtime_get(sc);
+ if (ret) {
+ if (sc->rsupply)
+ regulator_disable(sc->rsupply);
+ return ret;
+ }
+
/*
* Votable GDSCs can be ON due to Vote from other masters.
* If a Votable GDSC is ON, make sure we have a Vote.
*/
if (sc->flags & VOTABLE) {
- ret = regmap_update_bits(sc->regmap, sc->gdscr,
- SW_COLLAPSE_MASK, val);
+ ret = gdsc_update_collapse_bit(sc, false);
if (ret)
return ret;
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index ad313d7210bd..5de48c9439b2 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -18,6 +18,8 @@ struct reset_controller_dev;
* @pd: generic power domain
* @regmap: regmap for MMIO accesses
* @gdscr: gsdc control register
+ * @collapse_ctrl: APCS collapse-vote register
+ * @collapse_mask: APCS collapse-vote mask
* @gds_hw_ctrl: gds_hw_ctrl register
* @cxcs: offsets of branch registers to toggle mem/periph bits in
* @cxc_count: number of @cxcs
@@ -35,6 +37,8 @@ struct gdsc {
struct generic_pm_domain *parent;
struct regmap *regmap;
unsigned int gdscr;
+ unsigned int collapse_ctrl;
+ unsigned int collapse_mask;
unsigned int gds_hw_ctrl;
unsigned int clamp_io_ctrl;
unsigned int *cxcs;
diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c
new file mode 100644
index 000000000000..5367ce654ac9
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm8350.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sm8350.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-divider.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static struct pll_vco lucid_5lpe_vco[] = {
+ { 249600000, 1750000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x18,
+ .alpha = 0x6000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static const struct clk_parent_data gpu_cc_parent = {
+ .fw_name = "bi_tcxo",
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &gpu_cc_parent,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1a,
+ .alpha = 0xaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &gpu_cc_parent,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(150000000, P_GPLL0_OUT_MAIN_DIV, 2, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x117c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x11c0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x11bc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cb_clk = {
+ .halt_reg = 0x1170,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1170,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+ .halt_reg = 0x1088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_qdss_at_clk = {
+ .halt_reg = 0x1080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_qdss_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_qdss_trig_clk = {
+ .halt_reg = 0x1094,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_qdss_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_qdss_tsctr_clk = {
+ .halt_reg = 0x1084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_qdss_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x120c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x120c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_qdss_tsctr_clk = {
+ .halt_reg = 0x105c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x105c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_qdss_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x1058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x1178,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1178,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x1204,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm8350_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CB_CLK] = &gpu_cc_cb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_QDSS_AT_CLK] = &gpu_cc_cx_qdss_at_clk.clkr,
+ [GPU_CC_CX_QDSS_TRIG_CLK] = &gpu_cc_cx_qdss_trig_clk.clkr,
+ [GPU_CC_CX_QDSS_TSCTR_CLK] = &gpu_cc_cx_qdss_tsctr_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_QDSS_TSCTR_CLK] = &gpu_cc_gx_qdss_tsctr_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8350_resets[] = {
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x1160 },
+ [GPUCC_GPU_CC_CB_BCR] = { 0x116c },
+ [GPUCC_GPU_CC_CX_BCR] = { 0x1068 },
+ [GPUCC_GPU_CC_FAST_HUB_BCR] = { 0x1174 },
+ [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
+ [GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct gdsc *gpu_cc_sm8350_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm8350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8030,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8350_desc = {
+ .config = &gpu_cc_sm8350_regmap_config,
+ .clks = gpu_cc_sm8350_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm8350_clocks),
+ .resets = gpu_cc_sm8350_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm8350_resets),
+ .gdscs = gpu_cc_sm8350_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm8350_gdscs),
+};
+
+static int gpu_cc_sm8350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm8350_desc);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to map gpu cc registers\n");
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm8350_desc, regmap);
+}
+
+static const struct of_device_id gpu_cc_sm8350_match_table[] = {
+ { .compatible = "qcom,sm8350-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8350_match_table);
+
+static struct platform_driver gpu_cc_sm8350_driver = {
+ .probe = gpu_cc_sm8350_probe,
+ .driver = {
+ .name = "sm8350-gpucc",
+ .of_match_table = gpu_cc_sm8350_match_table,
+ },
+};
+
+static int __init gpu_cc_sm8350_init(void)
+{
+ return platform_driver_register(&gpu_cc_sm8350_driver);
+}
+subsys_initcall(gpu_cc_sm8350_init);
+
+static void __exit gpu_cc_sm8350_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sm8350_driver);
+}
+module_exit(gpu_cc_sm8350_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SM8350 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
index 4d4b657d33c3..cfd961d5cc45 100644
--- a/drivers/clk/qcom/krait-cc.c
+++ b/drivers/clk/qcom/krait-cc.c
@@ -139,6 +139,14 @@ krait_add_sec_mux(struct device *dev, int id, const char *s,
mux->hw.init = &init;
mux->safe_sel = 0;
+ /* Checking for qcom,krait-cc-v1 or qcom,krait-cc-v2 is not
+ * enough to limit this to apq/ipq8064. Directly check machine
+ * compatible to correctly handle this errata.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064") ||
+ of_machine_is_compatible("qcom,apq8064"))
+ mux->disable_sec_src_gating = true;
+
init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
if (!init.name)
return -ENOMEM;
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 24843e4f2599..80330dab4d81 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -45,194 +45,14 @@ enum {
P_MMPLL4,
};
-static const struct parent_map mmss_xo_hdmi_map[] = {
- { P_XO, 0 },
- { P_HDMIPLL, 1 }
-};
-
-static const char * const mmss_xo_hdmi[] = {
- "xo",
- "hdmipll"
-};
-
-static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
- { P_XO, 0 },
- { P_DSI0PLL, 1 },
- { P_DSI1PLL, 2 }
-};
-
-static const char * const mmss_xo_dsi0pll_dsi1pll[] = {
- "xo",
- "dsi0pll",
- "dsi1pll"
-};
-
-static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_gpll0_gpll0_div[] = {
- "xo",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_dsibyte_map[] = {
- { P_XO, 0 },
- { P_DSI0PLL_BYTE, 1 },
- { P_DSI1PLL_BYTE, 2 }
-};
-
-static const char * const mmss_xo_dsibyte[] = {
- "xo",
- "dsi0pllbyte",
- "dsi1pllbyte"
-};
-
-static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL1, 2 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll1",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL3, 3 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll3",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL5, 2 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll5",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL4, 3 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll4",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL9, 2 },
- { P_MMPLL2, 3 },
- { P_MMPLL8, 4 },
- { P_GPLL0, 5 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0[] = {
- "xo",
- "mmpll0",
- "mmpll9",
- "mmpll2",
- "mmpll8",
- "gpll0"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL9, 2 },
- { P_MMPLL2, 3 },
- { P_MMPLL8, 4 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll9",
- "mmpll2",
- "mmpll8",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL1, 2 },
- { P_MMPLL4, 3 },
- { P_MMPLL3, 4 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll1",
- "mmpll4",
- "mmpll3",
- "gpll0",
- "gpll0_div"
-};
-
static struct clk_fixed_factor gpll0_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "gpll0_div",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gpll0", .name = "gpll0" },
+ },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
@@ -265,7 +85,9 @@ static struct clk_alpha_pll mmpll0_early = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmpll0_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -278,7 +100,9 @@ static struct clk_alpha_pll_postdiv mmpll0 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll0",
- .parent_names = (const char *[]){ "mmpll0_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll0_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -295,7 +119,9 @@ static struct clk_alpha_pll mmpll1_early = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "mmpll1_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
}
@@ -308,7 +134,9 @@ static struct clk_alpha_pll_postdiv mmpll1 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll1",
- .parent_names = (const char *[]){ "mmpll1_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll1_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -322,7 +150,9 @@ static struct clk_alpha_pll mmpll2_early = {
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll2_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -334,7 +164,9 @@ static struct clk_alpha_pll_postdiv mmpll2 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll2",
- .parent_names = (const char *[]){ "mmpll2_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll2_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -348,7 +180,9 @@ static struct clk_alpha_pll mmpll3_early = {
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -360,7 +194,9 @@ static struct clk_alpha_pll_postdiv mmpll3 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3",
- .parent_names = (const char *[]){ "mmpll3_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll3_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -374,7 +210,9 @@ static struct clk_alpha_pll mmpll4_early = {
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -386,7 +224,9 @@ static struct clk_alpha_pll_postdiv mmpll4 = {
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4",
- .parent_names = (const char *[]){ "mmpll4_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll4_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -400,7 +240,9 @@ static struct clk_alpha_pll mmpll5_early = {
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -412,7 +254,9 @@ static struct clk_alpha_pll_postdiv mmpll5 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5",
- .parent_names = (const char *[]){ "mmpll5_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll5_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -426,7 +270,9 @@ static struct clk_alpha_pll mmpll8_early = {
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll8_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -438,7 +284,9 @@ static struct clk_alpha_pll_postdiv mmpll8 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll8",
- .parent_names = (const char *[]){ "mmpll8_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll8_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -452,7 +300,9 @@ static struct clk_alpha_pll mmpll9_early = {
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll9_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -464,13 +314,197 @@ static struct clk_alpha_pll_postdiv mmpll9 = {
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll9",
- .parent_names = (const char *[]){ "mmpll9_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll9_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
+static const struct parent_map mmss_xo_hdmi_map[] = {
+ { P_XO, 0 },
+ { P_HDMIPLL, 1 }
+};
+
+static const struct clk_parent_data mmss_xo_hdmi[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "hdmipll", .name = "hdmipll" }
+};
+
+static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 }
+};
+
+static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" }
+};
+
+static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 }
+};
+
+static const struct clk_parent_data mmss_xo_dsibyte[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte", .name = "dsi1pllbyte" }
+};
+
+static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll1.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL3, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll3.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL5, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll5.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL9, 2 },
+ { P_MMPLL2, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll9.clkr.hw },
+ { .hw = &mmpll2.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL9, 2 },
+ { P_MMPLL2, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll9.clkr.hw },
+ { .hw = &mmpll2.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_MMPLL4, 3 },
+ { P_MMPLL3, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll1.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll3.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
static const struct freq_tbl ftbl_ahb_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(40000000, P_GPLL0_DIV, 7.5, 0, 0),
@@ -485,8 +519,8 @@ static struct clk_rcg2 ahb_clk_src = {
.freq_tbl = ftbl_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
- .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -509,8 +543,8 @@ static struct clk_rcg2 axi_clk_src = {
.freq_tbl = ftbl_axi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "axi_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -522,8 +556,8 @@ static struct clk_rcg2 maxi_clk_src = {
.freq_tbl = ftbl_axi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "maxi_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -535,8 +569,8 @@ static struct clk_rcg2_gfx3d gfx3d_clk_src = {
.parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
- .num_parents = 6,
+ .parent_data = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0),
.ops = &clk_gfx3d_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -560,8 +594,8 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
.freq_tbl = ftbl_rbbmtimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "rbbmtimer_clk_src",
- .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -572,8 +606,8 @@ static struct clk_rcg2 isense_clk_src = {
.parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "isense_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -591,8 +625,8 @@ static struct clk_rcg2 rbcpr_clk_src = {
.freq_tbl = ftbl_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "rbcpr_clk_src",
- .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -613,8 +647,8 @@ static struct clk_rcg2 video_core_clk_src = {
.freq_tbl = ftbl_video_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_core_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -627,8 +661,8 @@ static struct clk_rcg2 video_subcore0_clk_src = {
.freq_tbl = ftbl_video_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -641,8 +675,8 @@ static struct clk_rcg2 video_subcore1_clk_src = {
.freq_tbl = ftbl_video_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -654,8 +688,8 @@ static struct clk_rcg2 pclk0_clk_src = {
.parent_map = mmss_xo_dsi0pll_dsi1pll_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
- .parent_names = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -668,8 +702,8 @@ static struct clk_rcg2 pclk1_clk_src = {
.parent_map = mmss_xo_dsi0pll_dsi1pll_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
- .parent_names = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -695,8 +729,8 @@ static struct clk_rcg2 mdp_clk_src = {
.freq_tbl = ftbl_mdp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -713,8 +747,8 @@ static struct clk_rcg2 extpclk_clk_src = {
.freq_tbl = extpclk_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "extpclk_clk_src",
- .parent_names = mmss_xo_hdmi,
- .num_parents = 2,
+ .parent_data = mmss_xo_hdmi,
+ .num_parents = ARRAY_SIZE(mmss_xo_hdmi),
.ops = &clk_byte_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -732,8 +766,8 @@ static struct clk_rcg2 vsync_clk_src = {
.freq_tbl = ftbl_mdss_vsync_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
- .parent_names = mmss_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .parent_data = mmss_xo_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -750,8 +784,8 @@ static struct clk_rcg2 hdmi_clk_src = {
.freq_tbl = ftbl_mdss_hdmi_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "hdmi_clk_src",
- .parent_names = mmss_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .parent_data = mmss_xo_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -762,8 +796,8 @@ static struct clk_rcg2 byte0_clk_src = {
.parent_map = mmss_xo_dsibyte_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -775,8 +809,8 @@ static struct clk_rcg2 byte1_clk_src = {
.parent_map = mmss_xo_dsibyte_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -794,8 +828,8 @@ static struct clk_rcg2 esc0_clk_src = {
.freq_tbl = ftbl_mdss_esc0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -807,8 +841,8 @@ static struct clk_rcg2 esc1_clk_src = {
.freq_tbl = ftbl_mdss_esc0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -831,8 +865,8 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.freq_tbl = ftbl_camss_gp0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -845,8 +879,8 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.freq_tbl = ftbl_camss_gp0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -873,8 +907,8 @@ static struct clk_rcg2 mclk0_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -887,8 +921,8 @@ static struct clk_rcg2 mclk1_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -901,8 +935,8 @@ static struct clk_rcg2 mclk2_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk2_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -915,8 +949,8 @@ static struct clk_rcg2 mclk3_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk3_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -937,8 +971,8 @@ static struct clk_rcg2 cci_clk_src = {
.freq_tbl = ftbl_cci_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -957,8 +991,8 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.freq_tbl = ftbl_csi0phytimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -970,8 +1004,8 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.freq_tbl = ftbl_csi0phytimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -983,8 +1017,8 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
.freq_tbl = ftbl_csi0phytimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2phytimer_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1004,8 +1038,8 @@ static struct clk_rcg2 csiphy0_3p_clk_src = {
.freq_tbl = ftbl_csiphy0_3p_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy0_3p_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1017,8 +1051,8 @@ static struct clk_rcg2 csiphy1_3p_clk_src = {
.freq_tbl = ftbl_csiphy0_3p_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy1_3p_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1030,8 +1064,8 @@ static struct clk_rcg2 csiphy2_3p_clk_src = {
.freq_tbl = ftbl_csiphy0_3p_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy2_3p_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1053,8 +1087,8 @@ static struct clk_rcg2 jpeg0_clk_src = {
.freq_tbl = ftbl_jpeg0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1075,8 +1109,8 @@ static struct clk_rcg2 jpeg2_clk_src = {
.freq_tbl = ftbl_jpeg2_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg2_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1088,8 +1122,8 @@ static struct clk_rcg2 jpeg_dma_clk_src = {
.freq_tbl = ftbl_jpeg0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg_dma_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1111,8 +1145,8 @@ static struct clk_rcg2 vfe0_clk_src = {
.freq_tbl = ftbl_vfe0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1124,8 +1158,8 @@ static struct clk_rcg2 vfe1_clk_src = {
.freq_tbl = ftbl_vfe0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1146,8 +1180,8 @@ static struct clk_rcg2 cpp_clk_src = {
.freq_tbl = ftbl_cpp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1168,8 +1202,8 @@ static struct clk_rcg2 csi0_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1181,8 +1215,8 @@ static struct clk_rcg2 csi1_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1194,8 +1228,8 @@ static struct clk_rcg2 csi2_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1207,8 +1241,8 @@ static struct clk_rcg2 csi3_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi3_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1227,8 +1261,8 @@ static struct clk_rcg2 fd_core_clk_src = {
.freq_tbl = ftbl_fd_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "fd_core_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1240,7 +1274,9 @@ static struct clk_branch mmss_mmagic_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmagic_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1255,7 +1291,9 @@ static struct clk_branch mmss_mmagic_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmagic_cfg_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1270,7 +1308,9 @@ static struct clk_branch mmss_misc_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_misc_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1285,7 +1325,9 @@ static struct clk_branch mmss_misc_cxo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_misc_cxo_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -1299,7 +1341,9 @@ static struct clk_branch mmss_mmagic_maxi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmagic_maxi_clk",
- .parent_names = (const char *[]){ "maxi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &maxi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1314,7 +1358,9 @@ static struct clk_branch mmagic_camss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_camss_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1329,7 +1375,9 @@ static struct clk_branch mmagic_camss_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_camss_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1344,7 +1392,9 @@ static struct clk_branch smmu_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_vfe_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1359,7 +1409,9 @@ static struct clk_branch smmu_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_vfe_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1374,7 +1426,9 @@ static struct clk_branch smmu_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_cpp_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1389,7 +1443,9 @@ static struct clk_branch smmu_cpp_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_cpp_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1404,7 +1460,9 @@ static struct clk_branch smmu_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_jpeg_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1419,7 +1477,9 @@ static struct clk_branch smmu_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_jpeg_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1434,7 +1494,9 @@ static struct clk_branch mmagic_mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_mdss_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1449,7 +1511,9 @@ static struct clk_branch mmagic_mdss_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_mdss_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1464,7 +1528,9 @@ static struct clk_branch smmu_rot_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_rot_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1479,7 +1545,9 @@ static struct clk_branch smmu_rot_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_rot_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1494,7 +1562,9 @@ static struct clk_branch smmu_mdp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_mdp_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1509,7 +1579,9 @@ static struct clk_branch smmu_mdp_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_mdp_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1524,7 +1596,9 @@ static struct clk_branch mmagic_video_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_video_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1539,7 +1613,9 @@ static struct clk_branch mmagic_video_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_video_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1554,7 +1630,9 @@ static struct clk_branch smmu_video_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_video_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1569,7 +1647,9 @@ static struct clk_branch smmu_video_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_video_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1584,7 +1664,9 @@ static struct clk_branch mmagic_bimc_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_bimc_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1599,7 +1681,9 @@ static struct clk_branch gpu_gx_gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_gx_gfx3d_clk",
- .parent_names = (const char *[]){ "gfx3d_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.rcg.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1614,7 +1698,9 @@ static struct clk_branch gpu_gx_rbbmtimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_gx_rbbmtimer_clk",
- .parent_names = (const char *[]){ "rbbmtimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &rbbmtimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1629,7 +1715,9 @@ static struct clk_branch gpu_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1644,7 +1732,9 @@ static struct clk_branch gpu_aon_isense_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_aon_isense_clk",
- .parent_names = (const char *[]){ "isense_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &isense_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1659,7 +1749,9 @@ static struct clk_branch vmem_maxi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vmem_maxi_clk",
- .parent_names = (const char *[]){ "maxi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &maxi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1674,7 +1766,9 @@ static struct clk_branch vmem_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vmem_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1689,7 +1783,9 @@ static struct clk_branch mmss_rbcpr_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_rbcpr_clk",
- .parent_names = (const char *[]){ "rbcpr_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &rbcpr_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1704,7 +1800,9 @@ static struct clk_branch mmss_rbcpr_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_rbcpr_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1719,7 +1817,9 @@ static struct clk_branch video_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_core_clk",
- .parent_names = (const char *[]){ "video_core_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &video_core_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1734,7 +1834,9 @@ static struct clk_branch video_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1749,7 +1851,9 @@ static struct clk_branch video_maxi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_maxi_clk",
- .parent_names = (const char *[]){ "maxi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &maxi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1764,7 +1868,9 @@ static struct clk_branch video_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1779,7 +1885,9 @@ static struct clk_branch video_subcore0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_subcore0_clk",
- .parent_names = (const char *[]){ "video_subcore0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &video_subcore0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1794,7 +1902,9 @@ static struct clk_branch video_subcore1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_subcore1_clk",
- .parent_names = (const char *[]){ "video_subcore1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &video_subcore1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1809,7 +1919,9 @@ static struct clk_branch mdss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1824,7 +1936,9 @@ static struct clk_branch mdss_hdmi_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_hdmi_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1839,7 +1953,9 @@ static struct clk_branch mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1854,7 +1970,9 @@ static struct clk_branch mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_pclk0_clk",
- .parent_names = (const char *[]){ "pclk0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1869,7 +1987,9 @@ static struct clk_branch mdss_pclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_pclk1_clk",
- .parent_names = (const char *[]){ "pclk1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1884,7 +2004,9 @@ static struct clk_branch mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_mdp_clk",
- .parent_names = (const char *[]){ "mdp_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1899,7 +2021,9 @@ static struct clk_branch mdss_extpclk_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_extpclk_clk",
- .parent_names = (const char *[]){ "extpclk_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &extpclk_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1914,7 +2038,9 @@ static struct clk_branch mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_vsync_clk",
- .parent_names = (const char *[]){ "vsync_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1929,7 +2055,9 @@ static struct clk_branch mdss_hdmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_hdmi_clk",
- .parent_names = (const char *[]){ "hdmi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &hdmi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1944,7 +2072,9 @@ static struct clk_branch mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_byte0_clk",
- .parent_names = (const char *[]){ "byte0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1959,7 +2089,9 @@ static struct clk_branch mdss_byte1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_byte1_clk",
- .parent_names = (const char *[]){ "byte1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1974,7 +2106,9 @@ static struct clk_branch mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_esc0_clk",
- .parent_names = (const char *[]){ "esc0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1989,7 +2123,9 @@ static struct clk_branch mdss_esc1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_esc1_clk",
- .parent_names = (const char *[]){ "esc1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2004,7 +2140,9 @@ static struct clk_branch camss_top_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_top_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2019,7 +2157,9 @@ static struct clk_branch camss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2034,7 +2174,9 @@ static struct clk_branch camss_micro_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_micro_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2049,7 +2191,9 @@ static struct clk_branch camss_gp0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk",
- .parent_names = (const char *[]){ "camss_gp0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2064,7 +2208,9 @@ static struct clk_branch camss_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk",
- .parent_names = (const char *[]){ "camss_gp1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2079,7 +2225,9 @@ static struct clk_branch camss_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk0_clk",
- .parent_names = (const char *[]){ "mclk0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2094,7 +2242,9 @@ static struct clk_branch camss_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk1_clk",
- .parent_names = (const char *[]){ "mclk1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2109,7 +2259,9 @@ static struct clk_branch camss_mclk2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk2_clk",
- .parent_names = (const char *[]){ "mclk2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2124,7 +2276,9 @@ static struct clk_branch camss_mclk3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk3_clk",
- .parent_names = (const char *[]){ "mclk3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2139,7 +2293,9 @@ static struct clk_branch camss_cci_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cci_clk",
- .parent_names = (const char *[]){ "cci_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2154,7 +2310,9 @@ static struct clk_branch camss_cci_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cci_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2169,7 +2327,9 @@ static struct clk_branch camss_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0phytimer_clk",
- .parent_names = (const char *[]){ "csi0phytimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2184,7 +2344,9 @@ static struct clk_branch camss_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1phytimer_clk",
- .parent_names = (const char *[]){ "csi1phytimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2199,7 +2361,9 @@ static struct clk_branch camss_csi2phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2phytimer_clk",
- .parent_names = (const char *[]){ "csi2phytimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2phytimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2214,7 +2378,9 @@ static struct clk_branch camss_csiphy0_3p_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csiphy0_3p_clk",
- .parent_names = (const char *[]){ "csiphy0_3p_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphy0_3p_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2229,7 +2395,9 @@ static struct clk_branch camss_csiphy1_3p_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csiphy1_3p_clk",
- .parent_names = (const char *[]){ "csiphy1_3p_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphy1_3p_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2244,7 +2412,9 @@ static struct clk_branch camss_csiphy2_3p_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csiphy2_3p_clk",
- .parent_names = (const char *[]){ "csiphy2_3p_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphy2_3p_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2259,7 +2429,9 @@ static struct clk_branch camss_jpeg0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg0_clk",
- .parent_names = (const char *[]){ "jpeg0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2274,7 +2446,9 @@ static struct clk_branch camss_jpeg2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg2_clk",
- .parent_names = (const char *[]){ "jpeg2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2289,7 +2463,9 @@ static struct clk_branch camss_jpeg_dma_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_dma_clk",
- .parent_names = (const char *[]){ "jpeg_dma_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg_dma_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2304,7 +2480,9 @@ static struct clk_branch camss_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2319,7 +2497,9 @@ static struct clk_branch camss_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2334,7 +2514,9 @@ static struct clk_branch camss_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2349,7 +2531,9 @@ static struct clk_branch camss_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2364,7 +2548,9 @@ static struct clk_branch camss_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe0_clk",
- .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2379,7 +2565,9 @@ static struct clk_branch camss_vfe0_stream_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe0_stream_clk",
- .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2394,7 +2582,9 @@ static struct clk_branch camss_vfe0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe0_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2409,7 +2599,9 @@ static struct clk_branch camss_vfe1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe1_clk",
- .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2424,7 +2616,9 @@ static struct clk_branch camss_vfe1_stream_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe1_stream_clk",
- .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2439,7 +2633,9 @@ static struct clk_branch camss_vfe1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe1_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2454,7 +2650,9 @@ static struct clk_branch camss_csi_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi_vfe0_clk",
- .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2469,7 +2667,9 @@ static struct clk_branch camss_csi_vfe1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi_vfe1_clk",
- .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2484,7 +2684,9 @@ static struct clk_branch camss_cpp_vbif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_vbif_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2499,7 +2701,9 @@ static struct clk_branch camss_cpp_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2514,7 +2718,9 @@ static struct clk_branch camss_cpp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_clk",
- .parent_names = (const char *[]){ "cpp_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2529,7 +2735,9 @@ static struct clk_branch camss_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2544,7 +2752,9 @@ static struct clk_branch camss_csi0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2559,7 +2769,9 @@ static struct clk_branch camss_csi0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2574,7 +2786,9 @@ static struct clk_branch camss_csi0phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0phy_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2589,7 +2803,9 @@ static struct clk_branch camss_csi0rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0rdi_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2604,7 +2820,9 @@ static struct clk_branch camss_csi0pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0pix_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2619,7 +2837,9 @@ static struct clk_branch camss_csi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2634,7 +2854,9 @@ static struct clk_branch camss_csi1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2649,7 +2871,9 @@ static struct clk_branch camss_csi1phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1phy_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2664,7 +2888,9 @@ static struct clk_branch camss_csi1rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1rdi_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2679,7 +2905,9 @@ static struct clk_branch camss_csi1pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1pix_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2694,7 +2922,9 @@ static struct clk_branch camss_csi2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2709,7 +2939,9 @@ static struct clk_branch camss_csi2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2724,7 +2956,9 @@ static struct clk_branch camss_csi2phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2phy_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2739,7 +2973,9 @@ static struct clk_branch camss_csi2rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2rdi_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2754,7 +2990,9 @@ static struct clk_branch camss_csi2pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2pix_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2769,7 +3007,9 @@ static struct clk_branch camss_csi3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2784,7 +3024,9 @@ static struct clk_branch camss_csi3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2799,7 +3041,9 @@ static struct clk_branch camss_csi3phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3phy_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2814,7 +3058,9 @@ static struct clk_branch camss_csi3rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3rdi_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2829,7 +3075,9 @@ static struct clk_branch camss_csi3pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3pix_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2844,7 +3092,9 @@ static struct clk_branch camss_ispif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_ispif_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2859,7 +3109,9 @@ static struct clk_branch fd_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "fd_core_clk",
- .parent_names = (const char *[]){ "fd_core_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &fd_core_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2874,7 +3126,9 @@ static struct clk_branch fd_core_uar_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "fd_core_uar_clk",
- .parent_names = (const char *[]){ "fd_core_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &fd_core_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2889,7 +3143,9 @@ static struct clk_branch fd_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "fd_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index 8617454e4a77..f28f2cb051d7 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -277,7 +277,6 @@ static struct gdsc mvs0c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct gdsc mvs1c_gdsc = {
@@ -287,7 +286,6 @@ static struct gdsc mvs1c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct gdsc mvs0_gdsc = {
@@ -297,7 +295,6 @@ static struct gdsc mvs0_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct gdsc mvs1_gdsc = {
@@ -307,7 +304,6 @@ static struct gdsc mvs1_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct clk_regmap *video_cc_sm8250_clocks[] = {
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index cfed11c659d9..f45c2c45808b 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -18,7 +18,6 @@
struct r8a73a4_cpg {
struct clk_onecell_data data;
spinlock_t lock;
- void __iomem *reg;
};
#define CPG_CKSCR 0xc0
@@ -59,7 +58,7 @@ static const struct clk_div_table div4_div_table[] = {
static struct clk * __init
r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
- const char *name)
+ void __iomem *base, const char *name)
{
const struct clk_div_table *table = NULL;
const char *parent_name;
@@ -69,7 +68,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
if (!strcmp(name, "main")) {
- u32 ckscr = readl(cpg->reg + CPG_CKSCR);
+ u32 ckscr = readl(base + CPG_CKSCR);
switch ((ckscr >> 28) & 3) {
case 0: /* extal1 */
@@ -93,14 +92,14 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = readl(cpg->reg + CPG_PLL0CR);
+ u32 value = readl(base + CPG_PLL0CR);
parent_name = "main";
mult = ((value >> 24) & 0x7f) + 1;
if (value & BIT(20))
div = 2;
} else if (!strcmp(name, "pll1")) {
- u32 value = readl(cpg->reg + CPG_PLL1CR);
+ u32 value = readl(base + CPG_PLL1CR);
parent_name = "main";
/* XXX: enable bit? */
@@ -123,7 +122,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- value = readl(cpg->reg + cr);
+ value = readl(base + cr);
switch ((value >> 5) & 7) {
case 0:
parent_name = "main";
@@ -159,7 +158,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
shift = 0;
}
div *= 32;
- mult = 0x20 - ((readl(cpg->reg + CPG_FRQCRC) >> shift) & 0x1f);
+ mult = 0x20 - ((readl(base + CPG_FRQCRC) >> shift) & 0x1f);
} else {
struct div4_clk *c;
@@ -181,7 +180,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
mult, div);
} else {
return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + reg, shift, 4, 0,
+ base + reg, shift, 4, 0,
table, &cpg->lock);
}
}
@@ -189,6 +188,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
{
struct r8a73a4_cpg *cpg;
+ void __iomem *base;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -213,8 +213,8 @@ static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
+ base = of_iomap(np, 0);
+ if (WARN_ON(base == NULL))
return;
for (i = 0; i < num_clks; ++i) {
@@ -224,7 +224,7 @@ static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a73a4_cpg_register_clock(np, cpg, name);
+ clk = r8a73a4_cpg_register_clock(np, cpg, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index d8190f007a81..3ee3f57e4e9a 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -18,7 +18,6 @@
struct r8a7740_cpg {
struct clk_onecell_data data;
spinlock_t lock;
- void __iomem *reg;
};
#define CPG_FRQCRA 0x00
@@ -61,7 +60,7 @@ static u32 cpg_mode __initdata;
static struct clk * __init
r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
- const char *name)
+ void __iomem *base, const char *name)
{
const struct clk_div_table *table = NULL;
const char *parent_name;
@@ -96,20 +95,20 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = readl(cpg->reg + CPG_FRQCRC);
+ u32 value = readl(base + CPG_FRQCRC);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
} else if (!strcmp(name, "pllc1")) {
- u32 value = readl(cpg->reg + CPG_FRQCRA);
+ u32 value = readl(base + CPG_FRQCRA);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
div = 2;
} else if (!strcmp(name, "pllc2")) {
- u32 value = readl(cpg->reg + CPG_PLLC2CR);
+ u32 value = readl(base + CPG_PLLC2CR);
parent_name = "system";
mult = ((value >> 24) & 0x3f) + 1;
} else if (!strcmp(name, "usb24s")) {
- u32 value = readl(cpg->reg + CPG_USBCKCR);
+ u32 value = readl(base + CPG_USBCKCR);
if (value & BIT(7))
/* extal2 */
parent_name = of_clk_get_parent_name(np, 1);
@@ -137,7 +136,7 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
mult, div);
} else {
return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + reg, shift, 4, 0,
+ base + reg, shift, 4, 0,
table, &cpg->lock);
}
}
@@ -145,6 +144,7 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
static void __init r8a7740_cpg_clocks_init(struct device_node *np)
{
struct r8a7740_cpg *cpg;
+ void __iomem *base;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -172,8 +172,8 @@ static void __init r8a7740_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
+ base = of_iomap(np, 0);
+ if (WARN_ON(base == NULL))
return;
for (i = 0; i < num_clks; ++i) {
@@ -183,7 +183,7 @@ static void __init r8a7740_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a7740_cpg_register_clock(np, cpg, name);
+ clk = r8a7740_cpg_register_clock(np, cpg, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index 3ccc53685bdd..797556259370 100644
--- a/drivers/clk/renesas/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -11,12 +11,6 @@
#include <linux/slab.h>
#include <linux/soc/renesas/rcar-rst.h>
-struct r8a7778_cpg {
- struct clk_onecell_data data;
- spinlock_t lock;
- void __iomem *reg;
-};
-
/* PLL multipliers per bits 11, 12, and 18 of MODEMR */
static const struct {
unsigned long plla_mult;
@@ -47,8 +41,7 @@ static u32 cpg_mode_rates __initdata;
static u32 cpg_mode_divs __initdata;
static struct clk * __init
-r8a7778_cpg_register_clock(struct device_node *np, struct r8a7778_cpg *cpg,
- const char *name)
+r8a7778_cpg_register_clock(struct device_node *np, const char *name)
{
if (!strcmp(name, "plla")) {
return clk_register_fixed_factor(NULL, "plla",
@@ -77,7 +70,7 @@ r8a7778_cpg_register_clock(struct device_node *np, struct r8a7778_cpg *cpg,
static void __init r8a7778_cpg_clocks_init(struct device_node *np)
{
- struct r8a7778_cpg *cpg;
+ struct clk_onecell_data *data;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -100,23 +93,17 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
return;
}
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
- if (cpg == NULL || clks == NULL) {
+ if (data == NULL || clks == NULL) {
/* We're leaking memory on purpose, there's no point in cleaning
* up as the system won't boot anyway.
*/
return;
}
- spin_lock_init(&cpg->lock);
-
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
-
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
- return;
+ data->clks = clks;
+ data->clk_num = num_clks;
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -125,15 +112,15 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a7778_cpg_register_clock(np, cpg, name);
+ clk = r8a7778_cpg_register_clock(np, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
- cpg->data.clks[i] = clk;
+ data->clks[i] = clk;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
diff --git a/drivers/clk/renesas/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c
index 9f3b5522eef5..9a2fea8cf4d7 100644
--- a/drivers/clk/renesas/clk-r8a7779.c
+++ b/drivers/clk/renesas/clk-r8a7779.c
@@ -21,12 +21,6 @@
#define CPG_NUM_CLOCKS (R8A7779_CLK_OUT + 1)
-struct r8a7779_cpg {
- struct clk_onecell_data data;
- spinlock_t lock;
- void __iomem *reg;
-};
-
/* -----------------------------------------------------------------------------
* CPG Clock Data
*/
@@ -87,7 +81,7 @@ static const unsigned int cpg_plla_mult[4] __initconst = { 42, 48, 56, 64 };
*/
static struct clk * __init
-r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg,
+r8a7779_cpg_register_clock(struct device_node *np,
const struct cpg_clk_config *config,
unsigned int plla_mult, const char *name)
{
@@ -119,7 +113,7 @@ r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg,
static void __init r8a7779_cpg_clocks_init(struct device_node *np)
{
const struct cpg_clk_config *config;
- struct r8a7779_cpg *cpg;
+ struct clk_onecell_data *data;
struct clk **clks;
unsigned int i, plla_mult;
int num_clks;
@@ -134,19 +128,17 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
return;
}
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(CPG_NUM_CLOCKS, sizeof(*clks), GFP_KERNEL);
- if (cpg == NULL || clks == NULL) {
+ if (data == NULL || clks == NULL) {
/* We're leaking memory on purpose, there's no point in cleaning
* up as the system won't boot anyway.
*/
return;
}
- spin_lock_init(&cpg->lock);
-
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
+ data->clks = clks;
+ data->clk_num = num_clks;
config = &cpg_clk_configs[CPG_CLK_CONFIG_INDEX(mode)];
plla_mult = cpg_plla_mult[CPG_PLLA_MULT_INDEX(mode)];
@@ -158,16 +150,15 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a7779_cpg_register_clock(np, cpg, config,
- plla_mult, name);
+ clk = r8a7779_cpg_register_clock(np, config, plla_mult, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
- cpg->data.clks[i] = clk;
+ data->clks[i] = clk;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 7b703f14e20b..e770f09a27ed 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -15,11 +15,6 @@
#include <linux/of_address.h>
#include <linux/slab.h>
-struct rz_cpg {
- struct clk_onecell_data data;
- void __iomem *reg;
-};
-
#define CPG_FRQCR 0x10
#define CPG_FRQCR2 0x14
@@ -49,7 +44,8 @@ static u16 __init rz_cpg_read_mode_pins(void)
}
static struct clk * __init
-rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *name)
+rz_cpg_register_clock(struct device_node *np, void __iomem *base,
+ const char *name)
{
u32 val;
unsigned mult;
@@ -65,7 +61,7 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
}
/* If mapping regs failed, skip non-pll clocks. System will boot anyhow */
- if (!cpg->reg)
+ if (!base)
return ERR_PTR(-ENXIO);
/* FIXME:"i" and "g" are variable clocks with non-integer dividers (e.g. 2/3)
@@ -73,9 +69,9 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
* let them run at fixed current speed and implement the details later.
*/
if (strcmp(name, "i") == 0)
- val = (readl(cpg->reg + CPG_FRQCR) >> 8) & 3;
+ val = (readl(base + CPG_FRQCR) >> 8) & 3;
else if (strcmp(name, "g") == 0)
- val = readl(cpg->reg + CPG_FRQCR2) & 3;
+ val = readl(base + CPG_FRQCR2) & 3;
else
return ERR_PTR(-EINVAL);
@@ -85,8 +81,9 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
static void __init rz_cpg_clocks_init(struct device_node *np)
{
- struct rz_cpg *cpg;
+ struct clk_onecell_data *data;
struct clk **clks;
+ void __iomem *base;
unsigned i;
int num_clks;
@@ -94,14 +91,14 @@ static void __init rz_cpg_clocks_init(struct device_node *np)
if (WARN(num_clks <= 0, "can't count CPG clocks\n"))
return;
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
- BUG_ON(!cpg || !clks);
+ BUG_ON(!data || !clks);
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
+ data->clks = clks;
+ data->clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
+ base = of_iomap(np, 0);
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -109,15 +106,15 @@ static void __init rz_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i, &name);
- clk = rz_cpg_register_clock(np, cpg, name);
+ clk = rz_cpg_register_clock(np, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
- cpg->data.clks[i] = clk;
+ data->clks[i] = clk;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index 4146c1d717b9..8c51090f13e1 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -18,7 +18,6 @@
struct sh73a0_cpg {
struct clk_onecell_data data;
spinlock_t lock;
- void __iomem *reg;
};
#define CPG_FRQCRA 0x00
@@ -73,7 +72,7 @@ static const struct clk_div_table z_div_table[] = {
static struct clk * __init
sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
- const char *name)
+ void __iomem *base, const char *name)
{
const struct clk_div_table *table = NULL;
unsigned int shift, reg, width;
@@ -83,12 +82,12 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
if (!strcmp(name, "main")) {
/* extal1, extal1_div2, extal2, extal2_div2 */
- u32 parent_idx = (readl(cpg->reg + CPG_CKSCR) >> 28) & 3;
+ u32 parent_idx = (readl(base + CPG_CKSCR) >> 28) & 3;
parent_name = of_clk_get_parent_name(np, parent_idx >> 1);
div = (parent_idx & 1) + 1;
} else if (!strncmp(name, "pll", 3)) {
- void __iomem *enable_reg = cpg->reg;
+ void __iomem *enable_reg = base;
u32 enable_bit = name[3] - '0';
parent_name = "main";
@@ -108,7 +107,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- if (readl(cpg->reg + CPG_PLLECR) & BIT(enable_bit)) {
+ if (readl(base + CPG_PLLECR) & BIT(enable_bit)) {
mult = ((readl(enable_reg) >> 24) & 0x3f) + 1;
/* handle CFG bit for PLL1 and PLL2 */
if (enable_bit == 1 || enable_bit == 2)
@@ -117,7 +116,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
}
} else if (!strcmp(name, "dsi0phy") || !strcmp(name, "dsi1phy")) {
u32 phy_no = name[3] - '0';
- void __iomem *dsi_reg = cpg->reg +
+ void __iomem *dsi_reg = base +
(phy_no ? CPG_DSI1PHYCR : CPG_DSI0PHYCR);
parent_name = phy_no ? "dsi1pck" : "dsi0pck";
@@ -154,7 +153,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
mult, div);
} else {
return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + reg, shift, width, 0,
+ base + reg, shift, width, 0,
table, &cpg->lock);
}
}
@@ -162,6 +161,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
static void __init sh73a0_cpg_clocks_init(struct device_node *np)
{
struct sh73a0_cpg *cpg;
+ void __iomem *base;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -186,14 +186,14 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
+ base = of_iomap(np, 0);
+ if (WARN_ON(base == NULL))
return;
/* Set SDHI clocks to a known state */
- writel(0x108, cpg->reg + CPG_SD0CKCR);
- writel(0x108, cpg->reg + CPG_SD1CKCR);
- writel(0x108, cpg->reg + CPG_SD2CKCR);
+ writel(0x108, base + CPG_SD0CKCR);
+ writel(0x108, base + CPG_SD1CKCR);
+ writel(0x108, base + CPG_SD2CKCR);
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -202,7 +202,7 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = sh73a0_cpg_register_clock(np, cpg, name);
+ clk = sh73a0_cpg_register_clock(np, cpg, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index c17ebe6b5992..cd80b6084ece 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -77,6 +77,8 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
/* Core Clock Outputs */
+ DEF_GEN4_Z("z0", R8A779F0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL2, 2, 0),
+ DEF_GEN4_Z("z1", R8A779F0_CLK_Z1, CLK_TYPE_GEN4_Z, CLK_PLL2, 2, 8),
DEF_FIXED("s0d2", R8A779F0_CLK_S0D2, CLK_S0, 2, 1),
DEF_FIXED("s0d3", R8A779F0_CLK_S0D3, CLK_S0, 3, 1),
DEF_FIXED("s0d4", R8A779F0_CLK_S0D4, CLK_S0, 4, 1),
@@ -118,20 +120,28 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+ DEF_MOD("hscif0", 514, R8A779F0_CLK_S0D3),
+ DEF_MOD("hscif1", 515, R8A779F0_CLK_S0D3),
+ DEF_MOD("hscif2", 516, R8A779F0_CLK_S0D3),
+ DEF_MOD("hscif3", 517, R8A779F0_CLK_S0D3),
DEF_MOD("i2c0", 518, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c1", 519, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c2", 520, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c3", 521, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c4", 522, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c5", 523, R8A779F0_CLK_S0D6_PER),
+ DEF_MOD("pcie0", 624, R8A779F0_CLK_S0D2),
+ DEF_MOD("pcie1", 625, R8A779F0_CLK_S0D2),
DEF_MOD("scif0", 702, R8A779F0_CLK_S0D12_PER),
DEF_MOD("scif1", 703, R8A779F0_CLK_S0D12_PER),
DEF_MOD("scif3", 704, R8A779F0_CLK_S0D12_PER),
DEF_MOD("scif4", 705, R8A779F0_CLK_S0D12_PER),
+ DEF_MOD("sdhi0", 706, R8A779F0_CLK_SD0),
DEF_MOD("sys-dmac0", 709, R8A779F0_CLK_S0D3_PER),
DEF_MOD("sys-dmac1", 710, R8A779F0_CLK_S0D3_PER),
DEF_MOD("wdt", 907, R8A779F0_CLK_R),
DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
+ DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M),
DEF_MOD("ufs", 1514, R8A779F0_CLK_S0D4_HSC),
};
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 35ffc462af1a..1488c9d6e639 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -51,11 +51,9 @@ struct r9a06g032_clkdesc {
struct {
u16 div, mul;
};
- unsigned int factor;
- unsigned int frequency;
/* for dual gate */
struct {
- uint16_t group : 1, index: 3;
+ uint16_t group : 1;
u16 sel, g1, r1, g2, r2;
} dual;
};
@@ -85,10 +83,10 @@ struct r9a06g032_clkdesc {
.source = 1 + R9A06G032_##_src, .name = _n, \
.reg = _reg, .div_min = _min, .div_max = _max, \
.div_table = { __VA_ARGS__ } }
-#define D_UGATE(_idx, _n, _src, _g, _gi, _g1, _r1, _g2, _r2) \
+#define D_UGATE(_idx, _n, _src, _g, _g1, _r1, _g2, _r2) \
{ .type = K_DUALGATE, .index = R9A06G032_##_idx, \
.source = 1 + R9A06G032_##_src, .name = _n, \
- .dual = { .group = _g, .index = _gi, \
+ .dual = { .group = _g, \
.g1 = _g1, .r1 = _r1, .g2 = _g2, .r2 = _r2 }, }
enum { K_GATE = 0, K_FFC, K_DIV, K_BITSEL, K_DUALGATE };
@@ -290,8 +288,8 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.name = "uart_group_012",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_UART,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
- .dual.sel = ((0xec / 4) << 5) | 24,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
+ .dual.sel = ((0x34 / 4) << 5) | 30,
.dual.group = 0,
},
{
@@ -299,18 +297,18 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.name = "uart_group_34567",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_P2_PG,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
- .dual.sel = ((0x34 / 4) << 5) | 30,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
+ .dual.sel = ((0xec / 4) << 5) | 24,
.dual.group = 1,
},
- D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
- D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 1, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
- D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 2, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
- D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0, 0x760, 0x761, 0x762, 0x763),
- D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 1, 0x764, 0x765, 0x766, 0x767),
- D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 2, 0x768, 0x769, 0x76a, 0x76b),
- D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 3, 0x76c, 0x76d, 0x76e, 0x76f),
- D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 4, 0x770, 0x771, 0x772, 0x773),
+ D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
+ D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
+ D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
+ D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0x760, 0x761, 0x762, 0x763),
+ D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 0x764, 0x765, 0x766, 0x767),
+ D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 0x768, 0x769, 0x76a, 0x76b),
+ D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 0x76c, 0x76d, 0x76e, 0x76f),
+ D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 0x770, 0x771, 0x772, 0x773),
};
struct r9a06g032_priv {
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index 33c2bd8df2e5..37475465100d 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -36,9 +36,11 @@ enum clk_ids {
CLK_PLL3_DIV2_4_2,
CLK_SEL_PLL3_3,
CLK_DIV_PLL3_C,
+#ifdef CONFIG_ARM64
CLK_PLL5,
CLK_PLL5_500,
CLK_PLL5_250,
+#endif
CLK_PLL6,
CLK_PLL6_250,
CLK_P1_DIV2,
@@ -100,9 +102,11 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
DEF_MUX_RO(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3, sel_pll3_3),
DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3, DIVPL3C, dtable_1_32),
+#ifdef CONFIG_ARM64
DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
DEF_FIXED(".pll5_500", CLK_PLL5_500, CLK_PLL5, 1, 6),
DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_500, 1, 2),
+#endif
DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
@@ -126,12 +130,20 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
};
static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
+#ifdef CONFIG_ARM64
DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
0x514, 0),
DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2,
0x518, 0),
DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1,
0x518, 1),
+#endif
+#ifdef CONFIG_RISCV
+ DEF_MOD("iax45_pclk", R9A07G043_IAX45_PCLK, R9A07G043_CLK_P2,
+ 0x518, 0),
+ DEF_MOD("iax45_clk", R9A07G043_IAX45_CLK, R9A07G043_CLK_P1,
+ 0x518, 1),
+#endif
DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1,
0x52c, 0),
DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2,
@@ -243,9 +255,14 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
};
static struct rzg2l_reset r9a07g043_resets[] = {
+#ifdef CONFIG_ARM64
DEF_RST(R9A07G043_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A07G043_GIC600_DBG_GICRESET_N, 0x814, 1),
DEF_RST(R9A07G043_IA55_RESETN, 0x818, 0),
+#endif
+#ifdef CONFIG_RISCV
+ DEF_RST(R9A07G043_IAX45_RESETN, 0x818, 0),
+#endif
DEF_RST(R9A07G043_DMAC_ARESETN, 0x82c, 0),
DEF_RST(R9A07G043_DMAC_RST_ASYNC, 0x82c, 1),
DEF_RST(R9A07G043_OSTM0_PRESETZ, 0x834, 0),
@@ -291,8 +308,13 @@ static struct rzg2l_reset r9a07g043_resets[] = {
};
static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+#ifdef CONFIG_ARM64
MOD_CLK_BASE + R9A07G043_GIC600_GICCLK,
MOD_CLK_BASE + R9A07G043_IA55_CLK,
+#endif
+#ifdef CONFIG_RISCV
+ MOD_CLK_BASE + R9A07G043_IAX45_CLK,
+#endif
MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
};
@@ -310,11 +332,21 @@ const struct rzg2l_cpg_info r9a07g043_cpg_info = {
/* Module Clocks */
.mod_clks = r9a07g043_mod_clks,
.num_mod_clks = ARRAY_SIZE(r9a07g043_mod_clks),
+#ifdef CONFIG_ARM64
.num_hw_mod_clks = R9A07G043_TSU_PCLK + 1,
+#endif
+#ifdef CONFIG_RISCV
+ .num_hw_mod_clks = R9A07G043_IAX45_PCLK + 1,
+#endif
/* Resets */
.resets = r9a07g043_resets,
+#ifdef CONFIG_ARM64
.num_resets = R9A07G043_TSU_PRESETN + 1, /* Last reset ID + 1 */
+#endif
+#ifdef CONFIG_RISCV
+ .num_resets = R9A07G043_IAX45_RESETN + 1, /* Last reset ID + 1 */
+#endif
.has_clk_mon_regs = true,
};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index b288897852c7..fd7c4eecd398 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -182,7 +182,7 @@ static const struct {
};
static const struct {
- struct rzg2l_mod_clk common[71];
+ struct rzg2l_mod_clk common[76];
#ifdef CONFIG_CLK_R9A07G054
struct rzg2l_mod_clk drp[0];
#endif
@@ -204,6 +204,16 @@ static const struct {
0x534, 1),
DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
0x534, 2),
+ DEF_MOD("gpt_pclk", R9A07G044_GPT_PCLK, R9A07G044_CLK_P0,
+ 0x540, 0),
+ DEF_MOD("poeg_a_clkp", R9A07G044_POEG_A_CLKP, R9A07G044_CLK_P0,
+ 0x544, 0),
+ DEF_MOD("poeg_b_clkp", R9A07G044_POEG_B_CLKP, R9A07G044_CLK_P0,
+ 0x544, 1),
+ DEF_MOD("poeg_c_clkp", R9A07G044_POEG_C_CLKP, R9A07G044_CLK_P0,
+ 0x544, 2),
+ DEF_MOD("poeg_d_clkp", R9A07G044_POEG_D_CLKP, R9A07G044_CLK_P0,
+ 0x544, 3),
DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
0x548, 0),
DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
@@ -346,6 +356,11 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_OSTM0_PRESETZ, 0x834, 0),
DEF_RST(R9A07G044_OSTM1_PRESETZ, 0x834, 1),
DEF_RST(R9A07G044_OSTM2_PRESETZ, 0x834, 2),
+ DEF_RST(R9A07G044_GPT_RST_C, 0x840, 0),
+ DEF_RST(R9A07G044_POEG_A_RST, 0x844, 0),
+ DEF_RST(R9A07G044_POEG_B_RST, 0x844, 1),
+ DEF_RST(R9A07G044_POEG_C_RST, 0x844, 2),
+ DEF_RST(R9A07G044_POEG_D_RST, 0x844, 3),
DEF_RST(R9A07G044_WDT0_PRESETN, 0x848, 0),
DEF_RST(R9A07G044_WDT1_PRESETN, 0x848, 1),
DEF_RST(R9A07G044_WDT2_PRESETN, 0x848, 2),
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index 40693bb85b80..b21915cf6648 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -126,19 +126,24 @@ static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
};
static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
+ DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2),
DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5),
DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8),
DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
+ DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12),
+ DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13),
DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5),
DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0),
};
static const struct rzg2l_reset r9a09g011_resets[] = {
+ DEF_RST(R9A09G011_PFC_PRESETN, 0x600, 2),
DEF_RST_MON(R9A09G011_ETH0_RST_HW_N, 0x608, 11, 11),
DEF_RST_MON(R9A09G011_SYC_RST_N, 0x610, 9, 13),
+ DEF_RST_MON(R9A09G011_WDT0_PRESETN, 0x614, 12, 19),
};
static const unsigned int r9a09g011_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index c7ed43d6aa67..e27832e5114f 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -23,7 +23,7 @@
#include "rcar-gen4-cpg.h"
#include "rcar-cpg-lib.h"
-static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initconst;
+static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index e2999ab2b53c..3ff6ecd61756 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -1180,7 +1180,7 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
s8 monbit = info->resets[id].monbit;
if (info->has_clk_mon_regs) {
- return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
+ return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
} else if (monbit >= 0) {
u32 monbitmask = BIT(monbit);
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index 906410413bc1..637938e804f8 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* Auxiliary Synthesizer clock implementation
*/
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index f5be02205ac6..2380df293a2c 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* Fractional Synthesizer clock implementation
*/
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 6ed406d943ba..4ef747c2abbb 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* General Purpose Timer Synthesizer clock implementation
*/
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index fed194169666..348eeab0a906 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* VCO-PLL clock implementation
*/
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 157fe099ea6a..50847cccdf58 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* SPEAr clk - Common routines
*/
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index af0e25f496c1..3d580d1bdadd 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Clock framework definitions for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __SPEAR_CLK_H
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 8c8974866789..9d5959a4251a 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/mach-spear13xx/spear1310_clock.c
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clkdev.h>
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index c0dc94355c87..8b51229d0471 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/mach-spear13xx/spear1340_clock.c
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clkdev.h>
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index c403c66b6583..41717ff707f6 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SPEAr3xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 47810be7f15c..490701ac9e93 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SPEAr6xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <vireshk@kernel.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clkdev.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index 29a8c710ae06..001582ea71ba 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -138,19 +138,9 @@ static struct ccu_common *sun50i_h6_r_ccu_clks[] = {
&r_apb2_rsb_clk.common,
&r_apb1_ir_clk.common,
&r_apb1_w1_clk.common,
- &ir_clk.common,
- &w1_clk.common,
-};
-
-static struct ccu_common *sun50i_h616_r_ccu_clks[] = {
- &r_apb1_clk.common,
- &r_apb2_clk.common,
- &r_apb1_twd_clk.common,
- &r_apb2_i2c_clk.common,
- &r_apb2_rsb_clk.common,
- &r_apb1_ir_clk.common,
&r_apb1_rtc_clk.common,
&ir_clk.common,
+ &w1_clk.common,
};
static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
@@ -218,8 +208,8 @@ static const struct sunxi_ccu_desc sun50i_h6_r_ccu_desc = {
};
static const struct sunxi_ccu_desc sun50i_h616_r_ccu_desc = {
- .ccu_clks = sun50i_h616_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_h616_r_ccu_clks),
+ .ccu_clks = sun50i_h6_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h6_r_ccu_clks),
.hw_clks = &sun50i_h616_r_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 1a5e418923f6..30056da3e0af 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -95,13 +95,13 @@ static struct ccu_nkmp pll_periph1_clk = {
},
};
+/* For GPU PLL, using an output divider for DFS causes system to fail */
#define SUN50I_H6_PLL_GPU_REG 0x030
static struct ccu_nkmp pll_gpu_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
- .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
.common = {
.reg = 0x030,
.hw.init = CLK_HW_INIT("pll-gpu", "osc24M",
@@ -294,9 +294,9 @@ static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace",
static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "psi-ahb1-ahb2",
0x62c, BIT(0), 0);
+/* Keep GPU_CLK divider const to avoid DFS instability. */
static const char * const gpu_parents[] = { "pll-gpu" };
-static SUNXI_CCU_M_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
- 0, 3, /* M */
+static SUNXI_CCU_MUX_WITH_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
24, 1, /* mux */
BIT(31), /* gate */
CLK_SET_RATE_PARENT);
@@ -1191,6 +1191,16 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
+ /* Force PLL_GPU output divider bits to 0 */
+ val = readl(reg + SUN50I_H6_PLL_GPU_REG);
+ val &= ~BIT(0);
+ writel(val, reg + SUN50I_H6_PLL_GPU_REG);
+
+ /* Force GPU_CLK divider bits to 0 */
+ val = readl(reg + gpu_clk.common.reg);
+ val &= ~GENMASK(3, 0);
+ writel(val, reg + gpu_clk.common.reg);
+
/* Enable the lock bits on all PLLs */
for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
val = readl(reg + pll_regs[i]);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index e7e3ddf4a227..2f6f02f00be2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -53,65 +53,26 @@ static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4,
static SUNXI_CCU_M(rot_div_a83_clk, "rot-div", "pll-de", 0x0c, 0x0c, 4,
CLK_SET_RATE_PARENT);
-static struct ccu_common *sun8i_a83t_de2_clks[] = {
+static struct ccu_common *sun8i_de2_ccu_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
&wb_clk.common,
-
- &bus_mixer0_clk.common,
- &bus_mixer1_clk.common,
- &bus_wb_clk.common,
-
- &mixer0_div_a83_clk.common,
- &mixer1_div_a83_clk.common,
- &wb_div_a83_clk.common,
-
- &bus_rot_clk.common,
&rot_clk.common,
- &rot_div_a83_clk.common,
-};
-
-static struct ccu_common *sun8i_h3_de2_clks[] = {
- &mixer0_clk.common,
- &mixer1_clk.common,
- &wb_clk.common,
-
- &bus_mixer0_clk.common,
- &bus_mixer1_clk.common,
- &bus_wb_clk.common,
-
- &mixer0_div_clk.common,
- &mixer1_div_clk.common,
- &wb_div_clk.common,
-};
-
-static struct ccu_common *sun8i_v3s_de2_clks[] = {
- &mixer0_clk.common,
- &wb_clk.common,
-
- &bus_mixer0_clk.common,
- &bus_wb_clk.common,
-
- &mixer0_div_clk.common,
- &wb_div_clk.common,
-};
-
-static struct ccu_common *sun50i_a64_de2_clks[] = {
- &mixer0_clk.common,
- &mixer1_clk.common,
- &wb_clk.common,
&bus_mixer0_clk.common,
&bus_mixer1_clk.common,
&bus_wb_clk.common,
+ &bus_rot_clk.common,
&mixer0_div_clk.common,
&mixer1_div_clk.common,
&wb_div_clk.common,
-
- &bus_rot_clk.common,
- &rot_clk.common,
&rot_div_clk.common,
+
+ &mixer0_div_a83_clk.common,
+ &mixer1_div_a83_clk.common,
+ &wb_div_a83_clk.common,
+ &rot_div_a83_clk.common,
};
static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
@@ -219,8 +180,8 @@ static struct ccu_reset_map sun50i_h5_de2_resets[] = {
};
static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
- .ccu_clks = sun8i_a83t_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_a83t_de2_hw_clks,
@@ -229,8 +190,8 @@ static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun8i_h3_de2_clk_desc = {
- .ccu_clks = sun8i_h3_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_h3_de2_hw_clks,
@@ -239,8 +200,8 @@ static const struct sunxi_ccu_desc sun8i_h3_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun8i_r40_de2_clk_desc = {
- .ccu_clks = sun50i_a64_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun50i_a64_de2_hw_clks,
@@ -249,8 +210,8 @@ static const struct sunxi_ccu_desc sun8i_r40_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
- .ccu_clks = sun8i_v3s_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_v3s_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_v3s_de2_hw_clks,
@@ -259,8 +220,8 @@ static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
- .ccu_clks = sun50i_a64_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun50i_a64_de2_hw_clks,
@@ -269,8 +230,8 @@ static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
- .ccu_clks = sun8i_h3_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_h3_de2_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index e058cf691aea..d3fcb983c17c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -562,6 +562,7 @@ static struct ccu_common *sun8i_h3_ccu_clks[] = {
&bus_uart2_clk.common,
&bus_uart3_clk.common,
&bus_scr0_clk.common,
+ &bus_scr1_clk.common,
&bus_ephy_clk.common,
&bus_dbg_clk.common,
&ths_clk.common,
@@ -612,114 +613,6 @@ static struct ccu_common *sun8i_h3_ccu_clks[] = {
&gpu_clk.common,
};
-static struct ccu_common *sun50i_h5_ccu_clks[] = {
- &pll_cpux_clk.common,
- &pll_audio_base_clk.common,
- &pll_video_clk.common,
- &pll_ve_clk.common,
- &pll_ddr_clk.common,
- &pll_periph0_clk.common,
- &pll_gpu_clk.common,
- &pll_periph1_clk.common,
- &pll_de_clk.common,
- &cpux_clk.common,
- &axi_clk.common,
- &ahb1_clk.common,
- &apb1_clk.common,
- &apb2_clk.common,
- &ahb2_clk.common,
- &bus_ce_clk.common,
- &bus_dma_clk.common,
- &bus_mmc0_clk.common,
- &bus_mmc1_clk.common,
- &bus_mmc2_clk.common,
- &bus_nand_clk.common,
- &bus_dram_clk.common,
- &bus_emac_clk.common,
- &bus_ts_clk.common,
- &bus_hstimer_clk.common,
- &bus_spi0_clk.common,
- &bus_spi1_clk.common,
- &bus_otg_clk.common,
- &bus_ehci0_clk.common,
- &bus_ehci1_clk.common,
- &bus_ehci2_clk.common,
- &bus_ehci3_clk.common,
- &bus_ohci0_clk.common,
- &bus_ohci1_clk.common,
- &bus_ohci2_clk.common,
- &bus_ohci3_clk.common,
- &bus_ve_clk.common,
- &bus_tcon0_clk.common,
- &bus_tcon1_clk.common,
- &bus_deinterlace_clk.common,
- &bus_csi_clk.common,
- &bus_tve_clk.common,
- &bus_hdmi_clk.common,
- &bus_de_clk.common,
- &bus_gpu_clk.common,
- &bus_msgbox_clk.common,
- &bus_spinlock_clk.common,
- &bus_codec_clk.common,
- &bus_spdif_clk.common,
- &bus_pio_clk.common,
- &bus_ths_clk.common,
- &bus_i2s0_clk.common,
- &bus_i2s1_clk.common,
- &bus_i2s2_clk.common,
- &bus_i2c0_clk.common,
- &bus_i2c1_clk.common,
- &bus_i2c2_clk.common,
- &bus_uart0_clk.common,
- &bus_uart1_clk.common,
- &bus_uart2_clk.common,
- &bus_uart3_clk.common,
- &bus_scr0_clk.common,
- &bus_scr1_clk.common,
- &bus_ephy_clk.common,
- &bus_dbg_clk.common,
- &ths_clk.common,
- &nand_clk.common,
- &mmc0_clk.common,
- &mmc1_clk.common,
- &mmc2_clk.common,
- &ts_clk.common,
- &ce_clk.common,
- &spi0_clk.common,
- &spi1_clk.common,
- &i2s0_clk.common,
- &i2s1_clk.common,
- &i2s2_clk.common,
- &spdif_clk.common,
- &usb_phy0_clk.common,
- &usb_phy1_clk.common,
- &usb_phy2_clk.common,
- &usb_phy3_clk.common,
- &usb_ohci0_clk.common,
- &usb_ohci1_clk.common,
- &usb_ohci2_clk.common,
- &usb_ohci3_clk.common,
- &dram_clk.common,
- &dram_ve_clk.common,
- &dram_csi_clk.common,
- &dram_deinterlace_clk.common,
- &dram_ts_clk.common,
- &de_clk.common,
- &tcon_clk.common,
- &tve_clk.common,
- &deinterlace_clk.common,
- &csi_misc_clk.common,
- &csi_sclk_clk.common,
- &csi_mclk_clk.common,
- &ve_clk.common,
- &ac_dig_clk.common,
- &avs_clk.common,
- &hdmi_clk.common,
- &hdmi_ddc_clk.common,
- &mbus_clk.common,
- &gpu_clk.common,
-};
-
static const struct clk_hw *clk_parent_pll_audio[] = {
&pll_audio_base_clk.common.hw
};
@@ -1116,8 +1009,8 @@ static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
};
static const struct sunxi_ccu_desc sun50i_h5_ccu_desc = {
- .ccu_clks = sun50i_h5_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_h5_ccu_clks),
+ .ccu_clks = sun8i_h3_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_h3_ccu_clks),
.hw_clks = &sun50i_h5_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 5b7fab832a52..4221649b311f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -114,32 +114,7 @@ static struct ccu_mp a83t_ir_clk = {
},
};
-static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
- &ar100_clk.common,
- &apb0_clk.common,
- &apb0_pio_clk.common,
- &apb0_ir_clk.common,
- &apb0_timer_clk.common,
- &apb0_rsb_clk.common,
- &apb0_uart_clk.common,
- &apb0_i2c_clk.common,
- &apb0_twd_clk.common,
- &a83t_ir_clk.common,
-};
-
-static struct ccu_common *sun8i_h3_r_ccu_clks[] = {
- &ar100_clk.common,
- &apb0_clk.common,
- &apb0_pio_clk.common,
- &apb0_ir_clk.common,
- &apb0_timer_clk.common,
- &apb0_uart_clk.common,
- &apb0_i2c_clk.common,
- &apb0_twd_clk.common,
- &ir_clk.common,
-};
-
-static struct ccu_common *sun50i_a64_r_ccu_clks[] = {
+static struct ccu_common *sun8i_r_ccu_clks[] = {
&ar100_clk.common,
&apb0_clk.common,
&apb0_pio_clk.common,
@@ -150,6 +125,7 @@ static struct ccu_common *sun50i_a64_r_ccu_clks[] = {
&apb0_i2c_clk.common,
&apb0_twd_clk.common,
&ir_clk.common,
+ &a83t_ir_clk.common,
};
static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
@@ -226,8 +202,8 @@ static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = {
};
static const struct sunxi_ccu_desc sun8i_a83t_r_ccu_desc = {
- .ccu_clks = sun8i_a83t_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_r_ccu_clks),
+ .ccu_clks = sun8i_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r_ccu_clks),
.hw_clks = &sun8i_a83t_r_hw_clks,
@@ -236,8 +212,8 @@ static const struct sunxi_ccu_desc sun8i_a83t_r_ccu_desc = {
};
static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = {
- .ccu_clks = sun8i_h3_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_r_ccu_clks),
+ .ccu_clks = sun8i_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r_ccu_clks),
.hw_clks = &sun8i_h3_r_hw_clks,
@@ -246,8 +222,8 @@ static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = {
};
static const struct sunxi_ccu_desc sun50i_a64_r_ccu_desc = {
- .ccu_clks = sun50i_a64_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_a64_r_ccu_clks),
+ .ccu_clks = sun8i_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r_ccu_clks),
.hw_clks = &sun50i_a64_r_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 87f87d6ea3ad..fbb3529f0d3e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -421,6 +421,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
&bus_de_clk.common,
&bus_codec_clk.common,
&bus_pio_clk.common,
+ &bus_i2s0_clk.common,
&bus_i2c0_clk.common,
&bus_i2c1_clk.common,
&bus_uart0_clk.common,
@@ -439,6 +440,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
&mmc2_output_clk.common,
&ce_clk.common,
&spi0_clk.common,
+ &i2s0_clk.common,
&usb_phy0_clk.common,
&usb_ohci0_clk.common,
&dram_clk.common,
@@ -463,80 +465,6 @@ static const struct clk_hw *clk_parent_pll_audio[] = {
&pll_audio_base_clk.common.hw
};
-static struct ccu_common *sun8i_v3_ccu_clks[] = {
- &pll_cpu_clk.common,
- &pll_audio_base_clk.common,
- &pll_video_clk.common,
- &pll_ve_clk.common,
- &pll_ddr0_clk.common,
- &pll_periph0_clk.common,
- &pll_isp_clk.common,
- &pll_periph1_clk.common,
- &pll_ddr1_clk.common,
- &cpu_clk.common,
- &axi_clk.common,
- &ahb1_clk.common,
- &apb1_clk.common,
- &apb2_clk.common,
- &ahb2_clk.common,
- &bus_ce_clk.common,
- &bus_dma_clk.common,
- &bus_mmc0_clk.common,
- &bus_mmc1_clk.common,
- &bus_mmc2_clk.common,
- &bus_dram_clk.common,
- &bus_emac_clk.common,
- &bus_hstimer_clk.common,
- &bus_spi0_clk.common,
- &bus_otg_clk.common,
- &bus_ehci0_clk.common,
- &bus_ohci0_clk.common,
- &bus_ve_clk.common,
- &bus_tcon0_clk.common,
- &bus_csi_clk.common,
- &bus_de_clk.common,
- &bus_codec_clk.common,
- &bus_pio_clk.common,
- &bus_i2s0_clk.common,
- &bus_i2c0_clk.common,
- &bus_i2c1_clk.common,
- &bus_uart0_clk.common,
- &bus_uart1_clk.common,
- &bus_uart2_clk.common,
- &bus_ephy_clk.common,
- &bus_dbg_clk.common,
- &mmc0_clk.common,
- &mmc0_sample_clk.common,
- &mmc0_output_clk.common,
- &mmc1_clk.common,
- &mmc1_sample_clk.common,
- &mmc1_output_clk.common,
- &mmc2_clk.common,
- &mmc2_sample_clk.common,
- &mmc2_output_clk.common,
- &ce_clk.common,
- &spi0_clk.common,
- &i2s0_clk.common,
- &usb_phy0_clk.common,
- &usb_ohci0_clk.common,
- &dram_clk.common,
- &dram_ve_clk.common,
- &dram_csi_clk.common,
- &dram_ohci_clk.common,
- &dram_ehci_clk.common,
- &de_clk.common,
- &tcon_clk.common,
- &csi_misc_clk.common,
- &csi0_mclk_clk.common,
- &csi1_sclk_clk.common,
- &csi1_mclk_clk.common,
- &ve_clk.common,
- &ac_dig_clk.common,
- &avs_clk.common,
- &mbus_clk.common,
- &mipi_csi_clk.common,
-};
-
/* We hardcode the divider to 1 for SDM support */
static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio",
clk_parent_pll_audio,
@@ -798,8 +726,8 @@ static const struct sunxi_ccu_desc sun8i_v3s_ccu_desc = {
};
static const struct sunxi_ccu_desc sun8i_v3_ccu_desc = {
- .ccu_clks = sun8i_v3_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_v3_ccu_clks),
+ .ccu_clks = sun8i_v3s_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_v3s_ccu_clks),
.hw_clks = &sun8i_v3_hw_clks,
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
index 3fba3d3ac9a2..1c4e543366dd 100644
--- a/drivers/clk/sunxi/Kconfig
+++ b/drivers/clk/sunxi/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CLK_SUNXI
bool "Legacy clock support for Allwinner SoCs"
- depends on ARCH_SUNXI || COMPILE_TEST
+ depends on (ARM && ARCH_SUNXI) || COMPILE_TEST
default y
if CLK_SUNXI
@@ -19,7 +19,6 @@ config CLK_SUNXI_CLOCKS
config CLK_SUNXI_PRCM_SUN6I
bool "Legacy A31 PRCM driver"
- select MFD_SUN6I_PRCM
default y
help
Legacy clock driver for the A31 PRCM clocks. Those are
@@ -27,7 +26,6 @@ config CLK_SUNXI_PRCM_SUN6I
config CLK_SUNXI_PRCM_SUN8I
bool "Legacy sun8i PRCM driver"
- select MFD_SUN6I_PRCM
default y
help
Legacy clock driver for the sun8i family PRCM clocks.
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 962502ca7ff0..f5e7e2049241 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -1,13 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/clk.h>
#include <linux/clkdev.h>
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index e4db6b9a55c6..dd0709c9c249 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP APLL clock support
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* J Keerthy <j-keerthy@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index d6e5f1511ace..27e6b9cb1881 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI clock autoidle support
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/ti/clk-2xxx.c b/drivers/clk/ti/clk-2xxx.c
index 657c4fe07a95..363c4fdbe01f 100644
--- a/drivers/clk/ti/clk-2xxx.c
+++ b/drivers/clk/ti/clk-2xxx.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP2 Clock init
*
* Copyright (C) 2013 Texas Instruments, Inc
* Tero Kristo (t-kristo@ti.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index b4d142adede4..85c50ea39e6d 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* AM33XX Clock init
*
* Copyright (C) 2013 Texas Instruments, Inc
* Tero Kristo (t-kristo@ti.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 8aa5f5793835..ae943ea63c6c 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP3 Clock init
*
* Copyright (C) 2013 Texas Instruments, Inc
* Tero Kristo (t-kristo@ti.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 2ff4ff3d95d5..f24f6eb2157a 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* AM43XX Clock init
*
* Copyright (C) 2013 Texas Instruments, Inc
* Tero Kristo (t-kristo@ti.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index d078e5d73ed9..868bc7af21b0 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -56,7 +56,7 @@ static const struct omap_clkctrl_bit_data omap4_aess_bit_data[] __initconst = {
};
static const char * const omap4_func_dmic_abe_gfclk_parents[] __initconst = {
- "abe_cm:clk:0018:26",
+ "abe-clkctrl:0018:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -76,7 +76,7 @@ static const struct omap_clkctrl_bit_data omap4_dmic_bit_data[] __initconst = {
};
static const char * const omap4_func_mcasp_abe_gfclk_parents[] __initconst = {
- "abe_cm:clk:0020:26",
+ "abe-clkctrl:0020:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -89,7 +89,7 @@ static const struct omap_clkctrl_bit_data omap4_mcasp_bit_data[] __initconst = {
};
static const char * const omap4_func_mcbsp1_gfclk_parents[] __initconst = {
- "abe_cm:clk:0028:26",
+ "abe-clkctrl:0028:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -102,7 +102,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp1_bit_data[] __initconst =
};
static const char * const omap4_func_mcbsp2_gfclk_parents[] __initconst = {
- "abe_cm:clk:0030:26",
+ "abe-clkctrl:0030:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -115,7 +115,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp2_bit_data[] __initconst =
};
static const char * const omap4_func_mcbsp3_gfclk_parents[] __initconst = {
- "abe_cm:clk:0038:26",
+ "abe-clkctrl:0038:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -183,18 +183,18 @@ static const struct omap_clkctrl_bit_data omap4_timer8_bit_data[] __initconst =
static const struct omap_clkctrl_reg_data omap4_abe_clkctrl_regs[] __initconst = {
{ OMAP4_L4_ABE_CLKCTRL, NULL, 0, "ocp_abe_iclk" },
- { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
+ { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
{ OMAP4_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
- { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
- { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe_cm:clk:0020:24" },
- { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
- { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" },
- { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" },
- { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0040:8" },
- { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" },
- { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" },
- { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" },
- { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" },
+ { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
+ { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe-clkctrl:0020:24" },
+ { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
+ { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
+ { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
+ { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0040:8" },
+ { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
+ { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
+ { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
+ { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
{ OMAP4_WD_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
};
@@ -287,7 +287,7 @@ static const struct omap_clkctrl_bit_data omap4_fdif_bit_data[] __initconst = {
static const struct omap_clkctrl_reg_data omap4_iss_clkctrl_regs[] __initconst = {
{ OMAP4_ISS_CLKCTRL, omap4_iss_bit_data, CLKF_SW_SUP, "ducati_clk_mux_ck" },
- { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss_cm:clk:0008:24" },
+ { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss-clkctrl:0008:24" },
{ 0 },
};
@@ -320,7 +320,7 @@ static const struct omap_clkctrl_bit_data omap4_dss_core_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_l3_dss_clkctrl_regs[] __initconst = {
- { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3_dss_cm:clk:0000:8" },
+ { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3-dss-clkctrl:0000:8" },
{ 0 },
};
@@ -336,7 +336,7 @@ static const struct omap_clkctrl_bit_data omap4_gpu_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data omap4_l3_gfx_clkctrl_regs[] __initconst = {
- { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3_gfx_cm:clk:0000:24" },
+ { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3-gfx-clkctrl:0000:24" },
{ 0 },
};
@@ -372,12 +372,12 @@ static const struct omap_clkctrl_bit_data omap4_hsi_bit_data[] __initconst = {
};
static const char * const omap4_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
- "l3_init_cm:clk:0038:24",
+ "l3-init-clkctrl:0038:24",
NULL,
};
static const char * const omap4_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
- "l3_init_cm:clk:0038:25",
+ "l3-init-clkctrl:0038:25",
NULL,
};
@@ -418,7 +418,7 @@ static const struct omap_clkctrl_bit_data omap4_usb_host_hs_bit_data[] __initcon
};
static const char * const omap4_usb_otg_hs_xclk_parents[] __initconst = {
- "l3_init_cm:clk:0040:24",
+ "l3-init-clkctrl:0040:24",
NULL,
};
@@ -452,14 +452,14 @@ static const struct omap_clkctrl_bit_data omap4_ocp2scp_usb_phy_bit_data[] __ini
};
static const struct omap_clkctrl_reg_data omap4_l3_init_clkctrl_regs[] __initconst = {
- { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0008:24" },
- { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0010:24" },
- { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:0018:24" },
+ { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0008:24" },
+ { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0010:24" },
+ { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:0018:24" },
{ OMAP4_USB_HOST_HS_CLKCTRL, omap4_usb_host_hs_bit_data, CLKF_SW_SUP, "init_60m_fclk" },
{ OMAP4_USB_OTG_HS_CLKCTRL, omap4_usb_otg_hs_bit_data, CLKF_HW_SUP, "l3_div_ck" },
{ OMAP4_USB_TLL_HS_CLKCTRL, omap4_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_div_ck" },
{ OMAP4_USB_HOST_FS_CLKCTRL, NULL, CLKF_SW_SUP, "func_48mc_fclk" },
- { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:00c0:8" },
+ { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:00c0:8" },
{ 0 },
};
@@ -530,7 +530,7 @@ static const struct omap_clkctrl_bit_data omap4_gpio6_bit_data[] __initconst = {
};
static const char * const omap4_per_mcbsp4_gfclk_parents[] __initconst = {
- "l4_per_cm:clk:00c0:26",
+ "l4-per-clkctrl:00c0:26",
"pad_clks_ck",
NULL,
};
@@ -570,12 +570,12 @@ static const struct omap_clkctrl_bit_data omap4_slimbus2_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initconst = {
- { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0008:24" },
- { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0010:24" },
- { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0018:24" },
- { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0020:24" },
- { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0028:24" },
- { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0030:24" },
+ { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0008:24" },
+ { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0010:24" },
+ { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0018:24" },
+ { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0020:24" },
+ { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0028:24" },
+ { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0030:24" },
{ OMAP4_ELM_CLKCTRL, NULL, 0, "l4_div_ck" },
{ OMAP4_GPIO2_CLKCTRL, omap4_gpio2_bit_data, CLKF_HW_SUP, "l4_div_ck" },
{ OMAP4_GPIO3_CLKCTRL, omap4_gpio3_bit_data, CLKF_HW_SUP, "l4_div_ck" },
@@ -588,14 +588,14 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons
{ OMAP4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
{ OMAP4_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
{ OMAP4_L4_PER_CLKCTRL, NULL, 0, "l4_div_ck" },
- { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:00c0:24" },
+ { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:00c0:24" },
{ OMAP4_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MMC4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
- { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0118:8" },
+ { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0118:8" },
{ OMAP4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
@@ -630,7 +630,7 @@ static const struct omap_clkctrl_reg_data omap4_l4_wkup_clkctrl_regs[] __initcon
{ OMAP4_L4_WKUP_CLKCTRL, NULL, 0, "l4_wkup_clk_mux_ck" },
{ OMAP4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ OMAP4_GPIO1_CLKCTRL, omap4_gpio1_bit_data, CLKF_HW_SUP, "l4_wkup_clk_mux_ck" },
- { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0020:24" },
+ { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4-wkup-clkctrl:0020:24" },
{ OMAP4_COUNTER_32K_CLKCTRL, NULL, 0, "sys_32k_ck" },
{ OMAP4_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
@@ -644,7 +644,7 @@ static const char * const omap4_pmd_stm_clock_mux_ck_parents[] __initconst = {
};
static const char * const omap4_trace_clk_div_div_ck_parents[] __initconst = {
- "emu_sys_cm:clk:0000:22",
+ "emu-sys-clkctrl:0000:22",
NULL,
};
@@ -662,7 +662,7 @@ static const struct omap_clkctrl_div_data omap4_trace_clk_div_div_ck_data __init
};
static const char * const omap4_stm_clk_div_ck_parents[] __initconst = {
- "emu_sys_cm:clk:0000:20",
+ "emu-sys-clkctrl:0000:20",
NULL,
};
@@ -716,73 +716,73 @@ static struct ti_dt_clk omap44xx_clks[] = {
* hwmod support. Once hwmod is removed, these can be removed
* also.
*/
- DT_CLK(NULL, "aess_fclk", "abe_cm:0008:24"),
- DT_CLK(NULL, "cm2_dm10_mux", "l4_per_cm:0008:24"),
- DT_CLK(NULL, "cm2_dm11_mux", "l4_per_cm:0010:24"),
- DT_CLK(NULL, "cm2_dm2_mux", "l4_per_cm:0018:24"),
- DT_CLK(NULL, "cm2_dm3_mux", "l4_per_cm:0020:24"),
- DT_CLK(NULL, "cm2_dm4_mux", "l4_per_cm:0028:24"),
- DT_CLK(NULL, "cm2_dm9_mux", "l4_per_cm:0030:24"),
- DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"),
- DT_CLK(NULL, "dmt1_clk_mux", "l4_wkup_cm:0020:24"),
- DT_CLK(NULL, "dss_48mhz_clk", "l3_dss_cm:0000:9"),
- DT_CLK(NULL, "dss_dss_clk", "l3_dss_cm:0000:8"),
- DT_CLK(NULL, "dss_sys_clk", "l3_dss_cm:0000:10"),
- DT_CLK(NULL, "dss_tv_clk", "l3_dss_cm:0000:11"),
- DT_CLK(NULL, "fdif_fck", "iss_cm:0008:24"),
- DT_CLK(NULL, "func_dmic_abe_gfclk", "abe_cm:0018:24"),
- DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe_cm:0020:24"),
- DT_CLK(NULL, "func_mcbsp1_gfclk", "abe_cm:0028:24"),
- DT_CLK(NULL, "func_mcbsp2_gfclk", "abe_cm:0030:24"),
- DT_CLK(NULL, "func_mcbsp3_gfclk", "abe_cm:0038:24"),
- DT_CLK(NULL, "gpio1_dbclk", "l4_wkup_cm:0018:8"),
- DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0040:8"),
- DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0048:8"),
- DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0050:8"),
- DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0058:8"),
- DT_CLK(NULL, "gpio6_dbclk", "l4_per_cm:0060:8"),
- DT_CLK(NULL, "hsi_fck", "l3_init_cm:0018:24"),
- DT_CLK(NULL, "hsmmc1_fclk", "l3_init_cm:0008:24"),
- DT_CLK(NULL, "hsmmc2_fclk", "l3_init_cm:0010:24"),
- DT_CLK(NULL, "iss_ctrlclk", "iss_cm:0000:8"),
- DT_CLK(NULL, "mcasp_sync_mux_ck", "abe_cm:0020:26"),
- DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"),
- DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"),
- DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"),
- DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4_per_cm:00c0:26"),
- DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3_init_cm:00c0:8"),
- DT_CLK(NULL, "otg_60m_gfclk", "l3_init_cm:0040:24"),
- DT_CLK(NULL, "per_mcbsp4_gfclk", "l4_per_cm:00c0:24"),
- DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu_sys_cm:0000:20"),
- DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu_sys_cm:0000:22"),
- DT_CLK(NULL, "sgx_clk_mux", "l3_gfx_cm:0000:24"),
- DT_CLK(NULL, "slimbus1_fclk_0", "abe_cm:0040:8"),
- DT_CLK(NULL, "slimbus1_fclk_1", "abe_cm:0040:9"),
- DT_CLK(NULL, "slimbus1_fclk_2", "abe_cm:0040:10"),
- DT_CLK(NULL, "slimbus1_slimbus_clk", "abe_cm:0040:11"),
- DT_CLK(NULL, "slimbus2_fclk_0", "l4_per_cm:0118:8"),
- DT_CLK(NULL, "slimbus2_fclk_1", "l4_per_cm:0118:9"),
- DT_CLK(NULL, "slimbus2_slimbus_clk", "l4_per_cm:0118:10"),
- DT_CLK(NULL, "stm_clk_div_ck", "emu_sys_cm:0000:27"),
- DT_CLK(NULL, "timer5_sync_mux", "abe_cm:0048:24"),
- DT_CLK(NULL, "timer6_sync_mux", "abe_cm:0050:24"),
- DT_CLK(NULL, "timer7_sync_mux", "abe_cm:0058:24"),
- DT_CLK(NULL, "timer8_sync_mux", "abe_cm:0060:24"),
- DT_CLK(NULL, "trace_clk_div_div_ck", "emu_sys_cm:0000:24"),
- DT_CLK(NULL, "usb_host_hs_func48mclk", "l3_init_cm:0038:15"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3_init_cm:0038:13"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3_init_cm:0038:14"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3_init_cm:0038:11"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3_init_cm:0038:12"),
- DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3_init_cm:0038:8"),
- DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3_init_cm:0038:9"),
- DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init_cm:0038:10"),
- DT_CLK(NULL, "usb_otg_hs_xclk", "l3_init_cm:0040:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3_init_cm:0048:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3_init_cm:0048:9"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3_init_cm:0048:10"),
- DT_CLK(NULL, "utmi_p1_gfclk", "l3_init_cm:0038:24"),
- DT_CLK(NULL, "utmi_p2_gfclk", "l3_init_cm:0038:25"),
+ DT_CLK(NULL, "aess_fclk", "abe-clkctrl:0008:24"),
+ DT_CLK(NULL, "cm2_dm10_mux", "l4-per-clkctrl:0008:24"),
+ DT_CLK(NULL, "cm2_dm11_mux", "l4-per-clkctrl:0010:24"),
+ DT_CLK(NULL, "cm2_dm2_mux", "l4-per-clkctrl:0018:24"),
+ DT_CLK(NULL, "cm2_dm3_mux", "l4-per-clkctrl:0020:24"),
+ DT_CLK(NULL, "cm2_dm4_mux", "l4-per-clkctrl:0028:24"),
+ DT_CLK(NULL, "cm2_dm9_mux", "l4-per-clkctrl:0030:24"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
+ DT_CLK(NULL, "dmt1_clk_mux", "l4-wkup-clkctrl:0020:24"),
+ DT_CLK(NULL, "dss_48mhz_clk", "l3-dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "l3-dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_sys_clk", "l3-dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "dss_tv_clk", "l3-dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "fdif_fck", "iss-clkctrl:0008:24"),
+ DT_CLK(NULL, "func_dmic_abe_gfclk", "abe-clkctrl:0018:24"),
+ DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe-clkctrl:0020:24"),
+ DT_CLK(NULL, "func_mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ DT_CLK(NULL, "func_mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ DT_CLK(NULL, "func_mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4-wkup-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4-per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4-per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4-per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4-per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4-per-clkctrl:0060:8"),
+ DT_CLK(NULL, "hsi_fck", "l3-init-clkctrl:0018:24"),
+ DT_CLK(NULL, "hsmmc1_fclk", "l3-init-clkctrl:0008:24"),
+ DT_CLK(NULL, "hsmmc2_fclk", "l3-init-clkctrl:0010:24"),
+ DT_CLK(NULL, "iss_ctrlclk", "iss-clkctrl:0000:8"),
+ DT_CLK(NULL, "mcasp_sync_mux_ck", "abe-clkctrl:0020:26"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
+ DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
+ DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
+ DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
+ DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
+ DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
+ DT_CLK(NULL, "sgx_clk_mux", "l3-gfx-clkctrl:0000:24"),
+ DT_CLK(NULL, "slimbus1_fclk_0", "abe-clkctrl:0040:8"),
+ DT_CLK(NULL, "slimbus1_fclk_1", "abe-clkctrl:0040:9"),
+ DT_CLK(NULL, "slimbus1_fclk_2", "abe-clkctrl:0040:10"),
+ DT_CLK(NULL, "slimbus1_slimbus_clk", "abe-clkctrl:0040:11"),
+ DT_CLK(NULL, "slimbus2_fclk_0", "l4-per-clkctrl:0118:8"),
+ DT_CLK(NULL, "slimbus2_fclk_1", "l4-per-clkctrl:0118:9"),
+ DT_CLK(NULL, "slimbus2_slimbus_clk", "l4-per-clkctrl:0118:10"),
+ DT_CLK(NULL, "stm_clk_div_ck", "emu-sys-clkctrl:0000:27"),
+ DT_CLK(NULL, "timer5_sync_mux", "abe-clkctrl:0048:24"),
+ DT_CLK(NULL, "timer6_sync_mux", "abe-clkctrl:0050:24"),
+ DT_CLK(NULL, "timer7_sync_mux", "abe-clkctrl:0058:24"),
+ DT_CLK(NULL, "timer8_sync_mux", "abe-clkctrl:0060:24"),
+ DT_CLK(NULL, "trace_clk_div_div_ck", "emu-sys-clkctrl:0000:24"),
+ DT_CLK(NULL, "usb_host_hs_func48mclk", "l3-init-clkctrl:0038:15"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3-init-clkctrl:0038:13"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3-init-clkctrl:0038:14"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3-init-clkctrl:0038:11"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3-init-clkctrl:0038:12"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3-init-clkctrl:0038:8"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3-init-clkctrl:0038:9"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init-clkctrl:0038:10"),
+ DT_CLK(NULL, "usb_otg_hs_xclk", "l3-init-clkctrl:0040:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3-init-clkctrl:0048:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3-init-clkctrl:0048:9"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3-init-clkctrl:0048:10"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "l3-init-clkctrl:0038:24"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "l3-init-clkctrl:0038:25"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 90e0a9ea6351..b4aff76eb373 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -50,7 +50,7 @@ static const struct omap_clkctrl_bit_data omap5_aess_bit_data[] __initconst = {
};
static const char * const omap5_dmic_gfclk_parents[] __initconst = {
- "abe_cm:clk:0018:26",
+ "abe-clkctrl:0018:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -70,7 +70,7 @@ static const struct omap_clkctrl_bit_data omap5_dmic_bit_data[] __initconst = {
};
static const char * const omap5_mcbsp1_gfclk_parents[] __initconst = {
- "abe_cm:clk:0028:26",
+ "abe-clkctrl:0028:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -83,7 +83,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp1_bit_data[] __initconst =
};
static const char * const omap5_mcbsp2_gfclk_parents[] __initconst = {
- "abe_cm:clk:0030:26",
+ "abe-clkctrl:0030:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -96,7 +96,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp2_bit_data[] __initconst =
};
static const char * const omap5_mcbsp3_gfclk_parents[] __initconst = {
- "abe_cm:clk:0038:26",
+ "abe-clkctrl:0038:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -136,16 +136,16 @@ static const struct omap_clkctrl_bit_data omap5_timer8_bit_data[] __initconst =
static const struct omap_clkctrl_reg_data omap5_abe_clkctrl_regs[] __initconst = {
{ OMAP5_L4_ABE_CLKCTRL, NULL, 0, "abe_iclk" },
- { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
+ { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
{ OMAP5_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
- { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
- { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
- { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" },
- { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" },
- { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" },
- { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" },
- { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" },
- { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" },
+ { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
+ { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
+ { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
+ { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
+ { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
+ { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
+ { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
+ { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
{ 0 },
};
@@ -268,12 +268,12 @@ static const struct omap_clkctrl_bit_data omap5_gpio8_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst = {
- { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0008:24" },
- { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0010:24" },
- { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0018:24" },
- { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0020:24" },
- { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" },
- { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" },
+ { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" },
+ { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" },
+ { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" },
+ { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" },
+ { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" },
+ { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0030:24" },
{ OMAP5_GPIO2_CLKCTRL, omap5_gpio2_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
{ OMAP5_GPIO3_CLKCTRL, omap5_gpio3_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
{ OMAP5_GPIO4_CLKCTRL, omap5_gpio4_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
@@ -345,7 +345,7 @@ static const struct omap_clkctrl_bit_data omap5_dss_core_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap5_dss_clkctrl_regs[] __initconst = {
- { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" },
+ { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" },
{ 0 },
};
@@ -378,7 +378,7 @@ static const struct omap_clkctrl_bit_data omap5_gpu_core_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap5_gpu_clkctrl_regs[] __initconst = {
- { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24" },
+ { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24" },
{ 0 },
};
@@ -389,7 +389,7 @@ static const char * const omap5_mmc1_fclk_mux_parents[] __initconst = {
};
static const char * const omap5_mmc1_fclk_parents[] __initconst = {
- "l3init_cm:clk:0008:24",
+ "l3init-clkctrl:0008:24",
NULL,
};
@@ -405,7 +405,7 @@ static const struct omap_clkctrl_bit_data omap5_mmc1_bit_data[] __initconst = {
};
static const char * const omap5_mmc2_fclk_parents[] __initconst = {
- "l3init_cm:clk:0010:24",
+ "l3init-clkctrl:0010:24",
NULL,
};
@@ -430,12 +430,12 @@ static const char * const omap5_usb_host_hs_hsic480m_p3_clk_parents[] __initcons
};
static const char * const omap5_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
- "l3init_cm:clk:0038:24",
+ "l3init-clkctrl:0038:24",
NULL,
};
static const char * const omap5_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
- "l3init_cm:clk:0038:25",
+ "l3init-clkctrl:0038:25",
NULL,
};
@@ -494,8 +494,8 @@ static const struct omap_clkctrl_bit_data omap5_usb_otg_ss_bit_data[] __initcons
};
static const struct omap_clkctrl_reg_data omap5_l3init_clkctrl_regs[] __initconst = {
- { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" },
- { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" },
+ { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" },
+ { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" },
{ OMAP5_USB_HOST_HS_CLKCTRL, omap5_usb_host_hs_bit_data, CLKF_SW_SUP, "l3init_60m_fclk" },
{ OMAP5_USB_TLL_HS_CLKCTRL, omap5_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
{ OMAP5_SATA_CLKCTRL, omap5_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
@@ -519,7 +519,7 @@ static const struct omap_clkctrl_reg_data omap5_wkupaon_clkctrl_regs[] __initcon
{ OMAP5_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ OMAP5_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ OMAP5_GPIO1_CLKCTRL, omap5_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
- { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" },
+ { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" },
{ OMAP5_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ OMAP5_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
@@ -549,58 +549,58 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = {
static struct ti_dt_clk omap54xx_clks[] = {
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
DT_CLK(NULL, "sys_clkin_ck", "sys_clkin"),
- DT_CLK(NULL, "dmic_gfclk", "abe_cm:0018:24"),
- DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"),
- DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"),
- DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"),
- DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"),
- DT_CLK(NULL, "dss_sys_clk", "dss_cm:0000:10"),
- DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"),
- DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0040:8"),
- DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0048:8"),
- DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0050:8"),
- DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0058:8"),
- DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0060:8"),
- DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:00f0:8"),
- DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:00f8:8"),
- DT_CLK(NULL, "mcbsp1_gfclk", "abe_cm:0028:24"),
- DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"),
- DT_CLK(NULL, "mcbsp2_gfclk", "abe_cm:0030:24"),
- DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"),
- DT_CLK(NULL, "mcbsp3_gfclk", "abe_cm:0038:24"),
- DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"),
- DT_CLK(NULL, "mmc1_32khz_clk", "l3init_cm:0008:8"),
- DT_CLK(NULL, "mmc1_fclk", "l3init_cm:0008:25"),
- DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"),
- DT_CLK(NULL, "mmc2_fclk", "l3init_cm:0010:25"),
- DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"),
- DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"),
- DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0008:24"),
- DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0010:24"),
- DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"),
- DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0018:24"),
- DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0020:24"),
- DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0028:24"),
- DT_CLK(NULL, "timer5_gfclk_mux", "abe_cm:0048:24"),
- DT_CLK(NULL, "timer6_gfclk_mux", "abe_cm:0050:24"),
- DT_CLK(NULL, "timer7_gfclk_mux", "abe_cm:0058:24"),
- DT_CLK(NULL, "timer8_gfclk_mux", "abe_cm:0060:24"),
- DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0030:24"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init_cm:0038:13"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init_cm:0038:14"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init_cm:0038:7"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init_cm:0038:11"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init_cm:0038:12"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init_cm:0038:6"),
- DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init_cm:0038:8"),
- DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init_cm:0038:9"),
- DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init_cm:0038:10"),
- DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init_cm:00d0:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init_cm:0048:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init_cm:0048:9"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init_cm:0048:10"),
- DT_CLK(NULL, "utmi_p1_gfclk", "l3init_cm:0038:24"),
- DT_CLK(NULL, "utmi_p2_gfclk", "l3init_cm:0038:25"),
+ DT_CLK(NULL, "dmic_gfclk", "abe-clkctrl:0018:24"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_sys_clk", "dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0060:8"),
+ DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00f0:8"),
+ DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
+ DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
+ DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
+ DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
+ DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0018:24"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0028:24"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "abe-clkctrl:0048:24"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "abe-clkctrl:0050:24"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "abe-clkctrl:0058:24"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "abe-clkctrl:0060:24"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0030:24"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init-clkctrl:0038:13"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init-clkctrl:0038:14"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init-clkctrl:0038:7"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init-clkctrl:0038:11"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init-clkctrl:0038:12"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init-clkctrl:0038:6"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init-clkctrl:0038:8"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init-clkctrl:0038:9"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init-clkctrl:0038:10"),
+ DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init-clkctrl:00d0:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init-clkctrl:0048:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init-clkctrl:0048:9"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init-clkctrl:0048:10"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "l3init-clkctrl:0038:24"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "l3init-clkctrl:0038:25"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 9daf3825f289..3b8e483aec92 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -1,13 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/list.h>
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index aa0950c4f498..f0f5bf68b6d2 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* DRA7 ATL (Audio Tracking Logic) clock driver
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/init.h>
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 3463579220b5..ef2a445c63a3 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI clock support
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 617360e20d86..ae5862879417 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP clkctrl clock support
*
* Copyright (C) 2017 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
@@ -528,10 +520,6 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
char *c;
u16 soc_mask = 0;
- if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
- of_node_name_eq(node, "clk"))
- ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
-
addrp = of_get_address(node, 0, NULL, NULL);
addr = (u32)of_translate_address(node, addrp);
diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c
index 91751dd26b16..a756ab1a5856 100644
--- a/drivers/clk/ti/clkt_dflt.c
+++ b/drivers/clk/ti/clkt_dflt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Default clock type
*
@@ -8,15 +9,6 @@
* Richard Woodruff <r-woodruff2@ti.com>
* Paul Walmsley
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index c841d2d28111..37ab53339a9b 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Clock driver internal definitions
*
* Copyright (C) 2014 Texas Instruments, Inc
* Tero Kristo (t-kristo@ti.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DRIVERS_CLK_TI_CLOCK__
#define __DRIVERS_CLK_TI_CLOCK__
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 24179c907774..c897ad7e681e 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP clockdomain support
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 779b9900f636..77b771dd050a 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI composite clock support
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 9fbea0997b43..488d3da60c31 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI Divider Clock
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 7c6dc8449b22..8ed43bc6b7cc 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP DPLL clock support
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 749c6b73abff..2db3fc4a443e 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -1,13 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
index 8cb00d0af966..c80cee0f5d3d 100644
--- a/drivers/clk/ti/fixed-factor.c
+++ b/drivers/clk/ti/fixed-factor.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI Fixed Factor Clock
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 0033de9beb4c..307702921431 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP gate clock support
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index dd2b455183a9..f47beeea211e 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP interface clock support
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 15de513d2d81..46b45b3e8319 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI Multiplexer Clock
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Tero Kristo <t-kristo@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index 1244c4e568ff..c2088b3c4081 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o
obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-fch.o
-clk-x86-lpss-y := clk-lpss-atom.o
-obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
+obj-$(CONFIG_X86_INTEL_LPSS) += clk-lpss-atom.o clk-pmc-atom.o
obj-$(CONFIG_CLK_LGM_CGU) += clk-cgu.o clk-cgu-pll.o clk-lgm.o
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 3c0ee102fe73..4f2bb7315b67 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -22,7 +22,7 @@ config CLKEVT_I8253
config I8253_LOCK
bool
-config OMAP_DM_TIMER
+config OMAP_DM_SYSTIMER
bool
select TIMER_OF
@@ -56,6 +56,13 @@ config DIGICOLOR_TIMER
help
Enables the support for the digicolor timer driver.
+config OMAP_DM_TIMER
+ bool "OMAP dual-mode timer driver" if ARCH_K3 || COMPILE_TEST
+ default y if ARCH_K3
+ select TIMER_OF
+ help
+ Enables the support for the TI dual-mode timer driver.
+
config DW_APB_TIMER
bool "DW APB timer driver" if COMPILE_TEST
help
@@ -150,6 +157,14 @@ config TEGRA_TIMER
help
Enables support for the Tegra driver.
+config TEGRA186_TIMER
+ bool "NVIDIA Tegra186 timer driver"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on WATCHDOG && WATCHDOG_CORE
+ help
+ Enables support for the timers and watchdogs found on NVIDIA
+ Tegra186 and later SoCs.
+
config VT8500_TIMER
bool "VT8500 timer driver" if COMPILE_TEST
depends on HAS_IOMEM
@@ -367,7 +382,7 @@ config ARM_GT_INITIAL_PRESCALER_VAL
depends on ARM_GLOBAL_TIMER
help
When the ARM global timer initializes, its current rate is declared
- to the kernel and maintained forever. Should it's parent clock
+ to the kernel and maintained forever. Should its parent clock
change, the driver tries to fix the timer's internal prescaler.
On some machs (i.e. Zynq) the initial prescaler value thus poses
bounds about how much the parent clock is allowed to decrease or
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 6ca640019e10..64ab547de97b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
-obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm-systimer.o
+obj-$(CONFIG_OMAP_DM_SYSTIMER) += timer-ti-dm-systimer.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
@@ -36,6 +36,7 @@ obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += timer-meson6.o
obj-$(CONFIG_TEGRA_TIMER) += timer-tegra.o
+obj-$(CONFIG_TEGRA186_TIMER) += timer-tegra186.o
obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index dd0956ad969c..64dcb082d4cf 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -981,6 +981,14 @@ static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
.compatible = "renesas,rcar-gen3-cmt1",
.data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
},
+ {
+ .compatible = "renesas,rcar-gen4-cmt0",
+ .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen4-cmt1",
+ .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
index 7bcb4a3f26fb..d5b29fd03ca2 100644
--- a/drivers/clocksource/timer-mediatek.c
+++ b/drivers/clocksource/timer-mediatek.c
@@ -22,6 +22,19 @@
#define TIMER_SYNC_TICKS (3)
+/* cpux mcusys wrapper */
+#define CPUX_CON_REG 0x0
+#define CPUX_IDX_REG 0x4
+
+/* cpux */
+#define CPUX_IDX_GLOBAL_CTRL 0x0
+ #define CPUX_ENABLE BIT(0)
+ #define CPUX_CLK_DIV_MASK GENMASK(10, 8)
+ #define CPUX_CLK_DIV1 BIT(8)
+ #define CPUX_CLK_DIV2 BIT(9)
+ #define CPUX_CLK_DIV4 BIT(10)
+#define CPUX_IDX_GLOBAL_IRQ 0x30
+
/* gpt */
#define GPT_IRQ_EN_REG 0x00
#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
@@ -72,6 +85,52 @@
static void __iomem *gpt_sched_reg __read_mostly;
+static u32 mtk_cpux_readl(u32 reg_idx, struct timer_of *to)
+{
+ writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
+ return readl(timer_of_base(to) + CPUX_CON_REG);
+}
+
+static void mtk_cpux_writel(u32 val, u32 reg_idx, struct timer_of *to)
+{
+ writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
+ writel(val, timer_of_base(to) + CPUX_CON_REG);
+}
+
+static void mtk_cpux_set_irq(struct timer_of *to, bool enable)
+{
+ const unsigned long *irq_mask = cpumask_bits(cpu_possible_mask);
+ u32 val;
+
+ val = mtk_cpux_readl(CPUX_IDX_GLOBAL_IRQ, to);
+
+ if (enable)
+ val |= *irq_mask;
+ else
+ val &= ~(*irq_mask);
+
+ mtk_cpux_writel(val, CPUX_IDX_GLOBAL_IRQ, to);
+}
+
+static int mtk_cpux_clkevt_shutdown(struct clock_event_device *clkevt)
+{
+ /* Clear any irq */
+ mtk_cpux_set_irq(to_timer_of(clkevt), false);
+
+ /*
+ * Disabling CPUXGPT timer will crash the platform, especially
+ * if Trusted Firmware is using it (usually, for sleep states),
+ * so we only mask the IRQ and call it a day.
+ */
+ return 0;
+}
+
+static int mtk_cpux_clkevt_resume(struct clock_event_device *clkevt)
+{
+ mtk_cpux_set_irq(to_timer_of(clkevt), true);
+ return 0;
+}
+
static void mtk_syst_ack_irq(struct timer_of *to)
{
/* Clear and disable interrupt */
@@ -281,6 +340,60 @@ static struct timer_of to = {
},
};
+static int __init mtk_cpux_init(struct device_node *node)
+{
+ static struct timer_of to_cpux;
+ u32 freq, val;
+ int ret;
+
+ /*
+ * There are per-cpu interrupts for the CPUX General Purpose Timer
+ * but since this timer feeds the AArch64 System Timer we can rely
+ * on the CPU timer PPIs as well, so we don't declare TIMER_OF_IRQ.
+ */
+ to_cpux.flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
+ to_cpux.clkevt.name = "mtk-cpuxgpt";
+ to_cpux.clkevt.rating = 10;
+ to_cpux.clkevt.cpumask = cpu_possible_mask;
+ to_cpux.clkevt.set_state_shutdown = mtk_cpux_clkevt_shutdown;
+ to_cpux.clkevt.tick_resume = mtk_cpux_clkevt_resume;
+
+ /* If this fails, bad things are about to happen... */
+ ret = timer_of_init(node, &to_cpux);
+ if (ret) {
+ WARN(1, "Cannot start CPUX timers.\n");
+ return ret;
+ }
+
+ /*
+ * Check if we're given a clock with the right frequency for this
+ * timer, otherwise warn but keep going with the setup anyway, as
+ * that makes it possible to still boot the kernel, even though
+ * it may not work correctly (random lockups, etc).
+ * The reason behind this is that having an early UART may not be
+ * possible for everyone and this gives a chance to retrieve kmsg
+ * for eventual debugging even on consumer devices.
+ */
+ freq = timer_of_rate(&to_cpux);
+ if (freq > 13000000)
+ WARN(1, "Requested unsupported timer frequency %u\n", freq);
+
+ /* Clock input is 26MHz, set DIV2 to achieve 13MHz clock */
+ val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux);
+ val &= ~CPUX_CLK_DIV_MASK;
+ val |= CPUX_CLK_DIV2;
+ mtk_cpux_writel(val, CPUX_IDX_GLOBAL_CTRL, &to_cpux);
+
+ /* Enable all CPUXGPT timers */
+ val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux);
+ mtk_cpux_writel(val | CPUX_ENABLE, CPUX_IDX_GLOBAL_CTRL, &to_cpux);
+
+ clockevents_config_and_register(&to_cpux.clkevt, timer_of_rate(&to_cpux),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ return 0;
+}
+
static int __init mtk_syst_init(struct device_node *node)
{
int ret;
@@ -339,3 +452,4 @@ static int __init mtk_gpt_init(struct device_node *node)
}
TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
+TIMER_OF_DECLARE(mtk_mt6795, "mediatek,mt6795-systimer", mtk_cpux_init);
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index abce83d2f00b..d5f1436f33d9 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -61,7 +61,7 @@ struct mchp_pit64b_timer {
};
/**
- * mchp_pit64b_clkevt - PIT64B clockevent data structure
+ * struct mchp_pit64b_clkevt - PIT64B clockevent data structure
* @timer: PIT64B timer
* @clkevt: clockevent
*/
@@ -75,7 +75,7 @@ struct mchp_pit64b_clkevt {
struct mchp_pit64b_clkevt, clkevt))
/**
- * mchp_pit64b_clksrc - PIT64B clocksource data structure
+ * struct mchp_pit64b_clksrc - PIT64B clocksource data structure
* @timer: PIT64B timer
* @clksrc: clocksource
*/
@@ -173,7 +173,8 @@ static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
- writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+ if (!clockevent_state_detached(cedev))
+ mchp_pit64b_suspend(timer);
return 0;
}
@@ -182,35 +183,37 @@ static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
+ if (clockevent_state_shutdown(cedev))
+ mchp_pit64b_resume(timer);
+
mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT,
MCHP_PIT64B_IER_PERIOD);
return 0;
}
-static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
- struct clock_event_device *cedev)
+static int mchp_pit64b_clkevt_set_oneshot(struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
- mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT,
+ if (clockevent_state_shutdown(cedev))
+ mchp_pit64b_resume(timer);
+
+ mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_ONE_SHOT,
MCHP_PIT64B_IER_PERIOD);
return 0;
}
-static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev)
+static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
+ struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
- mchp_pit64b_suspend(timer);
-}
-
-static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev)
-{
- struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
+ mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT,
+ MCHP_PIT64B_IER_PERIOD);
- mchp_pit64b_resume(timer);
+ return 0;
}
static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id)
@@ -242,8 +245,10 @@ static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
}
/**
- * mchp_pit64b_init_mode - prepare PIT64B mode register value to be used at
- * runtime; this includes prescaler and SGCLK bit
+ * mchp_pit64b_init_mode() - prepare PIT64B mode register value to be used at
+ * runtime; this includes prescaler and SGCLK bit
+ * @timer: pointer to pit64b timer to init
+ * @max_rate: maximum rate that timer's clock could use
*
* PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to
* be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate
@@ -341,6 +346,7 @@ static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
if (!cs)
return -ENOMEM;
+ mchp_pit64b_resume(timer);
mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
mchp_pit64b_cs_base = timer->base;
@@ -362,8 +368,7 @@ static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
pr_debug("clksrc: Failed to register PIT64B clocksource!\n");
/* Stop timer. */
- writel_relaxed(MCHP_PIT64B_CR_SWRST,
- timer->base + MCHP_PIT64B_CR);
+ mchp_pit64b_suspend(timer);
kfree(cs);
return ret;
@@ -395,9 +400,8 @@ static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer,
ce->clkevt.rating = 150;
ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown;
ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic;
+ ce->clkevt.set_state_oneshot = mchp_pit64b_clkevt_set_oneshot;
ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event;
- ce->clkevt.suspend = mchp_pit64b_clkevt_suspend;
- ce->clkevt.resume = mchp_pit64b_clkevt_resume;
ce->clkevt.cpumask = cpumask_of(0);
ce->clkevt.irq = irq;
@@ -448,19 +452,10 @@ static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
if (ret)
goto irq_unmap;
- ret = clk_prepare_enable(timer.pclk);
- if (ret)
- goto irq_unmap;
-
- if (timer.mode & MCHP_PIT64B_MR_SGCLK) {
- ret = clk_prepare_enable(timer.gclk);
- if (ret)
- goto pclk_unprepare;
-
+ if (timer.mode & MCHP_PIT64B_MR_SGCLK)
clk_rate = clk_get_rate(timer.gclk);
- } else {
+ else
clk_rate = clk_get_rate(timer.pclk);
- }
clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1);
if (clkevt)
@@ -469,15 +464,10 @@ static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
ret = mchp_pit64b_init_clksrc(&timer, clk_rate);
if (ret)
- goto gclk_unprepare;
+ goto irq_unmap;
return 0;
-gclk_unprepare:
- if (timer.mode & MCHP_PIT64B_MR_SGCLK)
- clk_disable_unprepare(timer.gclk);
-pclk_unprepare:
- clk_disable_unprepare(timer.pclk);
irq_unmap:
irq_dispose_mapping(irq);
io_unmap:
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 593d5a957b69..969a552da8d2 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -7,6 +7,9 @@
* either be read from the "time" and "timeh" CSRs, and can use the SBI to
* setup events, or directly accessed using MMIO registers.
*/
+
+#define pr_fmt(fmt) "riscv-timer: " fmt
+
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
@@ -20,14 +23,28 @@
#include <linux/of_irq.h>
#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
+#include <asm/hwcap.h>
#include <asm/sbi.h>
#include <asm/timex.h>
+static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
+
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
+ u64 next_tval = get_cycles64() + delta;
+
csr_set(CSR_IE, IE_TIE);
- sbi_set_timer(get_cycles64() + delta);
+ if (static_branch_likely(&riscv_sstc_available)) {
+#if defined(CONFIG_32BIT)
+ csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
+ csr_write(CSR_STIMECMPH, next_tval >> 32);
+#else
+ csr_write(CSR_STIMECMP, next_tval);
+#endif
+ } else
+ sbi_set_timer(next_tval);
+
return 0;
}
@@ -101,20 +118,21 @@ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
static int __init riscv_timer_init_dt(struct device_node *n)
{
- int cpuid, hartid, error;
+ int cpuid, error;
+ unsigned long hartid;
struct device_node *child;
struct irq_domain *domain;
- hartid = riscv_of_processor_hartid(n);
- if (hartid < 0) {
- pr_warn("Not valid hartid for node [%pOF] error = [%d]\n",
+ error = riscv_of_processor_hartid(n, &hartid);
+ if (error < 0) {
+ pr_warn("Not valid hartid for node [%pOF] error = [%lu]\n",
n, hartid);
- return hartid;
+ return error;
}
cpuid = riscv_hartid_to_cpuid(hartid);
if (cpuid < 0) {
- pr_warn("Invalid cpuid for hartid [%d]\n", hartid);
+ pr_warn("Invalid cpuid for hartid [%lu]\n", hartid);
return cpuid;
}
@@ -140,7 +158,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
return -ENODEV;
}
- pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
+ pr_info("%s: Registering clocksource cpuid [%d] hartid [%lu]\n",
__func__, cpuid, hartid);
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
if (error) {
@@ -165,6 +183,12 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (error)
pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
error);
+
+ if (riscv_isa_extension_available(NULL, SSTC)) {
+ pr_info("Timer interrupt in S-mode is available via sstc extension\n");
+ static_branch_enable(&riscv_sstc_available);
+ }
+
return error;
}
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index bb6ea6c19829..94dc6e42e983 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -128,7 +128,7 @@ static void sun4i_timer_clear_interrupt(void __iomem *base)
static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
{
- struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ struct clock_event_device *evt = dev_id;
struct timer_of *to = to_timer_of(evt);
sun4i_timer_clear_interrupt(timer_of_base(to));
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 85900f7fc69f..7d5fa9069906 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -142,7 +142,7 @@ static int sun5i_clkevt_next_event(unsigned long evt,
static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
{
- struct sun5i_timer_clkevt *ce = (struct sun5i_timer_clkevt *)dev_id;
+ struct sun5i_timer_clkevt *ce = dev_id;
writel(0x1, ce->timer.base + TIMER_IRQ_ST_REG);
ce->clkevt.event_handler(&ce->clkevt);
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
new file mode 100644
index 000000000000..ea742889ee06
--- /dev/null
+++ b/drivers/clocksource/timer-tegra186.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020 NVIDIA Corporation. All rights reserved.
+ */
+
+#include <linux/clocksource.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/watchdog.h>
+
+/* shared registers */
+#define TKETSC0 0x000
+#define TKETSC1 0x004
+#define TKEUSEC 0x008
+#define TKEOSC 0x00c
+
+#define TKEIE(x) (0x100 + ((x) * 4))
+#define TKEIE_WDT_MASK(x, y) ((y) << (16 + 4 * (x)))
+
+/* timer registers */
+#define TMRCR 0x000
+#define TMRCR_ENABLE BIT(31)
+#define TMRCR_PERIODIC BIT(30)
+#define TMRCR_PTV(x) ((x) & 0x0fffffff)
+
+#define TMRSR 0x004
+#define TMRSR_INTR_CLR BIT(30)
+
+#define TMRCSSR 0x008
+#define TMRCSSR_SRC_USEC (0 << 0)
+
+/* watchdog registers */
+#define WDTCR 0x000
+#define WDTCR_SYSTEM_POR_RESET_ENABLE BIT(16)
+#define WDTCR_SYSTEM_DEBUG_RESET_ENABLE BIT(15)
+#define WDTCR_REMOTE_INT_ENABLE BIT(14)
+#define WDTCR_LOCAL_FIQ_ENABLE BIT(13)
+#define WDTCR_LOCAL_INT_ENABLE BIT(12)
+#define WDTCR_PERIOD_MASK (0xff << 4)
+#define WDTCR_PERIOD(x) (((x) & 0xff) << 4)
+#define WDTCR_TIMER_SOURCE_MASK 0xf
+#define WDTCR_TIMER_SOURCE(x) ((x) & 0xf)
+
+#define WDTCMDR 0x008
+#define WDTCMDR_DISABLE_COUNTER BIT(1)
+#define WDTCMDR_START_COUNTER BIT(0)
+
+#define WDTUR 0x00c
+#define WDTUR_UNLOCK_PATTERN 0x0000c45a
+
+struct tegra186_timer_soc {
+ unsigned int num_timers;
+ unsigned int num_wdts;
+};
+
+struct tegra186_tmr {
+ struct tegra186_timer *parent;
+ void __iomem *regs;
+ unsigned int index;
+ unsigned int hwirq;
+};
+
+struct tegra186_wdt {
+ struct watchdog_device base;
+
+ void __iomem *regs;
+ unsigned int index;
+ bool locked;
+
+ struct tegra186_tmr *tmr;
+};
+
+static inline struct tegra186_wdt *to_tegra186_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct tegra186_wdt, base);
+}
+
+struct tegra186_timer {
+ const struct tegra186_timer_soc *soc;
+ struct device *dev;
+ void __iomem *regs;
+
+ struct tegra186_wdt *wdt;
+ struct clocksource usec;
+ struct clocksource tsc;
+ struct clocksource osc;
+};
+
+static void tmr_writel(struct tegra186_tmr *tmr, u32 value, unsigned int offset)
+{
+ writel_relaxed(value, tmr->regs + offset);
+}
+
+static void wdt_writel(struct tegra186_wdt *wdt, u32 value, unsigned int offset)
+{
+ writel_relaxed(value, wdt->regs + offset);
+}
+
+static u32 wdt_readl(struct tegra186_wdt *wdt, unsigned int offset)
+{
+ return readl_relaxed(wdt->regs + offset);
+}
+
+static struct tegra186_tmr *tegra186_tmr_create(struct tegra186_timer *tegra,
+ unsigned int index)
+{
+ unsigned int offset = 0x10000 + index * 0x10000;
+ struct tegra186_tmr *tmr;
+
+ tmr = devm_kzalloc(tegra->dev, sizeof(*tmr), GFP_KERNEL);
+ if (!tmr)
+ return ERR_PTR(-ENOMEM);
+
+ tmr->parent = tegra;
+ tmr->regs = tegra->regs + offset;
+ tmr->index = index;
+ tmr->hwirq = 0;
+
+ return tmr;
+}
+
+static const struct watchdog_info tegra186_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = "NVIDIA Tegra186 WDT",
+};
+
+static void tegra186_wdt_disable(struct tegra186_wdt *wdt)
+{
+ /* unlock and disable the watchdog */
+ wdt_writel(wdt, WDTUR_UNLOCK_PATTERN, WDTUR);
+ wdt_writel(wdt, WDTCMDR_DISABLE_COUNTER, WDTCMDR);
+
+ /* disable timer */
+ tmr_writel(wdt->tmr, 0, TMRCR);
+}
+
+static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
+{
+ struct tegra186_timer *tegra = wdt->tmr->parent;
+ u32 value;
+
+ /* unmask hardware IRQ, this may have been lost across powergate */
+ value = TKEIE_WDT_MASK(wdt->index, 1);
+ writel(value, tegra->regs + TKEIE(wdt->tmr->hwirq));
+
+ /* clear interrupt */
+ tmr_writel(wdt->tmr, TMRSR_INTR_CLR, TMRSR);
+
+ /* select microsecond source */
+ tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR);
+
+ /* configure timer (system reset happens on the fifth expiration) */
+ value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) |
+ TMRCR_PERIODIC | TMRCR_ENABLE;
+ tmr_writel(wdt->tmr, value, TMRCR);
+
+ if (!wdt->locked) {
+ value = wdt_readl(wdt, WDTCR);
+
+ /* select the proper timer source */
+ value &= ~WDTCR_TIMER_SOURCE_MASK;
+ value |= WDTCR_TIMER_SOURCE(wdt->tmr->index);
+
+ /* single timer period since that's already configured */
+ value &= ~WDTCR_PERIOD_MASK;
+ value |= WDTCR_PERIOD(1);
+
+ /* enable local interrupt for WDT petting */
+ value |= WDTCR_LOCAL_INT_ENABLE;
+
+ /* enable local FIQ and remote interrupt for debug dump */
+ if (0)
+ value |= WDTCR_REMOTE_INT_ENABLE |
+ WDTCR_LOCAL_FIQ_ENABLE;
+
+ /* enable system debug reset (doesn't properly reboot) */
+ if (0)
+ value |= WDTCR_SYSTEM_DEBUG_RESET_ENABLE;
+
+ /* enable system POR reset */
+ value |= WDTCR_SYSTEM_POR_RESET_ENABLE;
+
+ wdt_writel(wdt, value, WDTCR);
+ }
+
+ wdt_writel(wdt, WDTCMDR_START_COUNTER, WDTCMDR);
+}
+
+static int tegra186_wdt_start(struct watchdog_device *wdd)
+{
+ struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
+
+ tegra186_wdt_enable(wdt);
+
+ return 0;
+}
+
+static int tegra186_wdt_stop(struct watchdog_device *wdd)
+{
+ struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
+
+ tegra186_wdt_disable(wdt);
+
+ return 0;
+}
+
+static int tegra186_wdt_ping(struct watchdog_device *wdd)
+{
+ struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
+
+ tegra186_wdt_disable(wdt);
+ tegra186_wdt_enable(wdt);
+
+ return 0;
+}
+
+static int tegra186_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
+
+ if (watchdog_active(&wdt->base))
+ tegra186_wdt_disable(wdt);
+
+ wdt->base.timeout = timeout;
+
+ if (watchdog_active(&wdt->base))
+ tegra186_wdt_enable(wdt);
+
+ return 0;
+}
+
+static const struct watchdog_ops tegra186_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = tegra186_wdt_start,
+ .stop = tegra186_wdt_stop,
+ .ping = tegra186_wdt_ping,
+ .set_timeout = tegra186_wdt_set_timeout,
+};
+
+static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
+ unsigned int index)
+{
+ unsigned int offset = 0x10000, source;
+ struct tegra186_wdt *wdt;
+ u32 value;
+ int err;
+
+ offset += tegra->soc->num_timers * 0x10000 + index * 0x10000;
+
+ wdt = devm_kzalloc(tegra->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return ERR_PTR(-ENOMEM);
+
+ wdt->regs = tegra->regs + offset;
+ wdt->index = index;
+
+ /* read the watchdog configuration since it might be locked down */
+ value = wdt_readl(wdt, WDTCR);
+
+ if (value & WDTCR_LOCAL_INT_ENABLE)
+ wdt->locked = true;
+
+ source = value & WDTCR_TIMER_SOURCE_MASK;
+
+ wdt->tmr = tegra186_tmr_create(tegra, source);
+ if (IS_ERR(wdt->tmr))
+ return ERR_CAST(wdt->tmr);
+
+ wdt->base.info = &tegra186_wdt_info;
+ wdt->base.ops = &tegra186_wdt_ops;
+ wdt->base.min_timeout = 1;
+ wdt->base.max_timeout = 255;
+ wdt->base.parent = tegra->dev;
+
+ err = watchdog_init_timeout(&wdt->base, 5, tegra->dev);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to initialize timeout: %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ err = devm_watchdog_register_device(tegra->dev, &wdt->base);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to register WDT: %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ return wdt;
+}
+
+static u64 tegra186_timer_tsc_read(struct clocksource *cs)
+{
+ struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer,
+ tsc);
+ u32 hi, lo, ss;
+
+ hi = readl_relaxed(tegra->regs + TKETSC1);
+
+ /*
+ * The 56-bit value of the TSC is spread across two registers that are
+ * not synchronized. In order to read them atomically, ensure that the
+ * high 24 bits match before and after reading the low 32 bits.
+ */
+ do {
+ /* snapshot the high 24 bits */
+ ss = hi;
+
+ lo = readl_relaxed(tegra->regs + TKETSC0);
+ hi = readl_relaxed(tegra->regs + TKETSC1);
+ } while (hi != ss);
+
+ return (u64)hi << 32 | lo;
+}
+
+static int tegra186_timer_tsc_init(struct tegra186_timer *tegra)
+{
+ tegra->tsc.name = "tsc";
+ tegra->tsc.rating = 300;
+ tegra->tsc.read = tegra186_timer_tsc_read;
+ tegra->tsc.mask = CLOCKSOURCE_MASK(56);
+ tegra->tsc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ return clocksource_register_hz(&tegra->tsc, 31250000);
+}
+
+static u64 tegra186_timer_osc_read(struct clocksource *cs)
+{
+ struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer,
+ osc);
+
+ return readl_relaxed(tegra->regs + TKEOSC);
+}
+
+static int tegra186_timer_osc_init(struct tegra186_timer *tegra)
+{
+ tegra->osc.name = "osc";
+ tegra->osc.rating = 300;
+ tegra->osc.read = tegra186_timer_osc_read;
+ tegra->osc.mask = CLOCKSOURCE_MASK(32);
+ tegra->osc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ return clocksource_register_hz(&tegra->osc, 38400000);
+}
+
+static u64 tegra186_timer_usec_read(struct clocksource *cs)
+{
+ struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer,
+ usec);
+
+ return readl_relaxed(tegra->regs + TKEUSEC);
+}
+
+static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
+{
+ tegra->usec.name = "usec";
+ tegra->usec.rating = 300;
+ tegra->usec.read = tegra186_timer_usec_read;
+ tegra->usec.mask = CLOCKSOURCE_MASK(32);
+ tegra->usec.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
+}
+
+static irqreturn_t tegra186_timer_irq(int irq, void *data)
+{
+ struct tegra186_timer *tegra = data;
+
+ if (watchdog_active(&tegra->wdt->base)) {
+ tegra186_wdt_disable(tegra->wdt);
+ tegra186_wdt_enable(tegra->wdt);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra186_timer_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra186_timer *tegra;
+ unsigned int irq;
+ int err;
+
+ tegra = devm_kzalloc(dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ tegra->soc = of_device_get_match_data(dev);
+ dev_set_drvdata(dev, tegra);
+ tegra->dev = dev;
+
+ tegra->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tegra->regs))
+ return PTR_ERR(tegra->regs);
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
+
+ irq = err;
+
+ /* create a watchdog using a preconfigured timer */
+ tegra->wdt = tegra186_wdt_create(tegra, 0);
+ if (IS_ERR(tegra->wdt)) {
+ err = PTR_ERR(tegra->wdt);
+ dev_err(dev, "failed to create WDT: %d\n", err);
+ return err;
+ }
+
+ err = tegra186_timer_tsc_init(tegra);
+ if (err < 0) {
+ dev_err(dev, "failed to register TSC counter: %d\n", err);
+ return err;
+ }
+
+ err = tegra186_timer_osc_init(tegra);
+ if (err < 0) {
+ dev_err(dev, "failed to register OSC counter: %d\n", err);
+ goto unregister_tsc;
+ }
+
+ err = tegra186_timer_usec_init(tegra);
+ if (err < 0) {
+ dev_err(dev, "failed to register USEC counter: %d\n", err);
+ goto unregister_osc;
+ }
+
+ err = devm_request_irq(dev, irq, tegra186_timer_irq, 0,
+ "tegra186-timer", tegra);
+ if (err < 0) {
+ dev_err(dev, "failed to request IRQ#%u: %d\n", irq, err);
+ goto unregister_usec;
+ }
+
+ return 0;
+
+unregister_usec:
+ clocksource_unregister(&tegra->usec);
+unregister_osc:
+ clocksource_unregister(&tegra->osc);
+unregister_tsc:
+ clocksource_unregister(&tegra->tsc);
+ return err;
+}
+
+static int tegra186_timer_remove(struct platform_device *pdev)
+{
+ struct tegra186_timer *tegra = platform_get_drvdata(pdev);
+
+ clocksource_unregister(&tegra->usec);
+ clocksource_unregister(&tegra->osc);
+ clocksource_unregister(&tegra->tsc);
+
+ return 0;
+}
+
+static int __maybe_unused tegra186_timer_suspend(struct device *dev)
+{
+ struct tegra186_timer *tegra = dev_get_drvdata(dev);
+
+ if (watchdog_active(&tegra->wdt->base))
+ tegra186_wdt_disable(tegra->wdt);
+
+ return 0;
+}
+
+static int __maybe_unused tegra186_timer_resume(struct device *dev)
+{
+ struct tegra186_timer *tegra = dev_get_drvdata(dev);
+
+ if (watchdog_active(&tegra->wdt->base))
+ tegra186_wdt_enable(tegra->wdt);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tegra186_timer_pm_ops, tegra186_timer_suspend,
+ tegra186_timer_resume);
+
+static const struct tegra186_timer_soc tegra186_timer = {
+ .num_timers = 10,
+ .num_wdts = 3,
+};
+
+static const struct tegra186_timer_soc tegra234_timer = {
+ .num_timers = 16,
+ .num_wdts = 3,
+};
+
+static const struct of_device_id tegra186_timer_of_match[] = {
+ { .compatible = "nvidia,tegra186-timer", .data = &tegra186_timer },
+ { .compatible = "nvidia,tegra234-timer", .data = &tegra234_timer },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra186_timer_of_match);
+
+static struct platform_driver tegra186_wdt_driver = {
+ .driver = {
+ .name = "tegra186-timer",
+ .pm = &tegra186_timer_pm_ops,
+ .of_match_table = tegra186_timer_of_match,
+ },
+ .probe = tegra186_timer_probe,
+ .remove = tegra186_timer_remove,
+};
+module_platform_driver(tegra186_wdt_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra186 timers driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index c194e8f74e1d..469f7c91564b 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -44,6 +44,121 @@ enum {
REQUEST_BY_NODE,
};
+static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
+ int posted)
+{
+ if (posted)
+ while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ cpu_relax();
+
+ return readl_relaxed(timer->func_base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
+ u32 reg, u32 val, int posted)
+{
+ if (posted)
+ while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ cpu_relax();
+
+ writel_relaxed(val, timer->func_base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
+{
+ u32 tidr;
+
+ /* Assume v1 ip if bits [31:16] are zero */
+ tidr = readl_relaxed(timer->io_base);
+ if (!(tidr >> 16)) {
+ timer->revision = 1;
+ timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
+ timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
+ timer->func_base = timer->io_base;
+ } else {
+ timer->revision = 2;
+ timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
+ timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
+ timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
+ timer->pend = timer->io_base +
+ _OMAP_TIMER_WRITE_PEND_OFFSET +
+ OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
+ }
+}
+
+/*
+ * __omap_dm_timer_enable_posted - enables write posted mode
+ * @timer: pointer to timer instance handle
+ *
+ * Enables the write posted mode for the timer. When posted mode is enabled
+ * writes to certain timer registers are immediately acknowledged by the
+ * internal bus and hence prevents stalling the CPU waiting for the write to
+ * complete. Enabling this feature can improve performance for writing to the
+ * timer registers.
+ */
+static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
+{
+ if (timer->posted)
+ return;
+
+ if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
+ timer->posted = OMAP_TIMER_NONPOSTED;
+ __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
+ return;
+ }
+
+ __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
+ OMAP_TIMER_CTRL_POSTED, 0);
+ timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
+ timer->posted = OMAP_TIMER_POSTED;
+}
+
+static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
+ int posted, unsigned long rate)
+{
+ u32 l;
+
+ l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ l &= ~0x1;
+ __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ /* Readback to make sure write has completed */
+ __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ /*
+ * Wait for functional clock period x 3.5 to make sure that
+ * timer is stopped
+ */
+ udelay(3500000 / rate + 1);
+#endif
+ }
+
+ /* Ack possibly pending interrupt */
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
+}
+
+static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
+ unsigned int value)
+{
+ writel_relaxed(value, timer->irq_ena);
+ __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
+}
+
+static inline unsigned int
+__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
+{
+ return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
+}
+
+static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
+ unsigned int value)
+{
+ writel_relaxed(value, timer->irq_stat);
+}
+
/**
* omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
* @timer: timer pointer over which read operation to perform
@@ -921,6 +1036,10 @@ static const struct dmtimer_platform_data omap3plus_pdata = {
.timer_ops = &dmtimer_ops,
};
+static const struct dmtimer_platform_data am6_pdata = {
+ .timer_ops = &dmtimer_ops,
+};
+
static const struct of_device_id omap_timer_match[] = {
{
.compatible = "ti,omap2420-timer",
@@ -949,6 +1068,10 @@ static const struct of_device_id omap_timer_match[] = {
.compatible = "ti,dm816-timer",
.data = &omap3plus_pdata,
},
+ {
+ .compatible = "ti,am654-timer",
+ .data = &am6_pdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_timer_match);
diff --git a/drivers/comedi/drivers/comedi_isadma.c b/drivers/comedi/drivers/comedi_isadma.c
index 700982464c53..020b3d1e1ac0 100644
--- a/drivers/comedi/drivers/comedi_isadma.c
+++ b/drivers/comedi/drivers/comedi_isadma.c
@@ -8,7 +8,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <asm/dma.h>
+#include <linux/isa-dma.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_isadma.h>
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index a17e51d65aca..62c2b7ac4339 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -34,6 +34,36 @@ MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
#define QUAD8_NUM_COUNTERS 8
/**
+ * struct channel_reg - channel register structure
+ * @data: Count data
+ * @control: Channel flags and control
+ */
+struct channel_reg {
+ u8 data;
+ u8 control;
+};
+
+/**
+ * struct quad8_reg - device register structure
+ * @channel: quadrature counter data and control
+ * @interrupt_status: channel interrupt status
+ * @channel_oper: enable/reset counters and interrupt functions
+ * @index_interrupt: enable channel interrupts
+ * @reserved: reserved for Factory Use
+ * @index_input_levels: index signal logical input level
+ * @cable_status: differential encoder cable status
+ */
+struct quad8_reg {
+ struct channel_reg channel[QUAD8_NUM_COUNTERS];
+ u8 interrupt_status;
+ u8 channel_oper;
+ u8 index_interrupt;
+ u8 reserved[3];
+ u8 index_input_levels;
+ u8 cable_status;
+};
+
+/**
* struct quad8 - device private data structure
* @lock: lock to prevent clobbering device states during R/W ops
* @counter: instance of the counter_device
@@ -48,7 +78,7 @@ MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
* @synchronous_mode: array of index function synchronous mode configurations
* @index_polarity: array of index function polarity configurations
* @cable_fault_enable: differential encoder cable status enable configurations
- * @base: base port address of the device
+ * @reg: I/O address offset for the device registers
*/
struct quad8 {
spinlock_t lock;
@@ -63,14 +93,9 @@ struct quad8 {
unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
unsigned int index_polarity[QUAD8_NUM_COUNTERS];
unsigned int cable_fault_enable;
- unsigned int base;
+ struct quad8_reg __iomem *reg;
};
-#define QUAD8_REG_INTERRUPT_STATUS 0x10
-#define QUAD8_REG_CHAN_OP 0x11
-#define QUAD8_REG_INDEX_INTERRUPT 0x12
-#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
-#define QUAD8_DIFF_ENCODER_CABLE_STATUS 0x17
/* Borrow Toggle flip-flop */
#define QUAD8_FLAG_BT BIT(0)
/* Carry Toggle flip-flop */
@@ -118,8 +143,7 @@ static int quad8_signal_read(struct counter_device *counter,
if (signal->id < 16)
return -EINVAL;
- state = inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
- & BIT(signal->id - 16);
+ state = ioread8(&priv->reg->index_input_levels) & BIT(signal->id - 16);
*level = (state) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
@@ -130,14 +154,14 @@ static int quad8_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct quad8 *const priv = counter_priv(counter);
- const int base_offset = priv->base + 2 * count->id;
+ struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
unsigned int flags;
unsigned int borrow;
unsigned int carry;
unsigned long irqflags;
int i;
- flags = inb(base_offset + 1);
+ flags = ioread8(&chan->control);
borrow = flags & QUAD8_FLAG_BT;
carry = !!(flags & QUAD8_FLAG_CT);
@@ -147,11 +171,11 @@ static int quad8_count_read(struct counter_device *counter,
spin_lock_irqsave(&priv->lock, irqflags);
/* Reset Byte Pointer; transfer Counter to Output Latch */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
- base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ &chan->control);
for (i = 0; i < 3; i++)
- *val |= (unsigned long)inb(base_offset) << (8 * i);
+ *val |= (unsigned long)ioread8(&chan->data) << (8 * i);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -162,7 +186,7 @@ static int quad8_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
struct quad8 *const priv = counter_priv(counter);
- const int base_offset = priv->base + 2 * count->id;
+ struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
unsigned long irqflags;
int i;
@@ -173,27 +197,27 @@ static int quad8_count_write(struct counter_device *counter,
spin_lock_irqsave(&priv->lock, irqflags);
/* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
/* Counter can only be set via Preset Register */
for (i = 0; i < 3; i++)
- outb(val >> (8 * i), base_offset);
+ iowrite8(val >> (8 * i), &chan->data);
/* Transfer Preset Register to Counter */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, &chan->control);
/* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
/* Set Preset Register back to original value */
val = priv->preset[count->id];
for (i = 0; i < 3; i++)
- outb(val >> (8 * i), base_offset);
+ iowrite8(val >> (8 * i), &chan->data);
/* Reset Borrow, Carry, Compare, and Sign flags */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, &chan->control);
/* Reset Error flag */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, &chan->control);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -246,7 +270,7 @@ static int quad8_function_write(struct counter_device *counter,
unsigned int *const quadrature_mode = priv->quadrature_mode + id;
unsigned int *const scale = priv->quadrature_scale + id;
unsigned int *const synchronous_mode = priv->synchronous_mode + id;
- const int base_offset = priv->base + 2 * id + 1;
+ u8 __iomem *const control = &priv->reg->channel[id].control;
unsigned long irqflags;
unsigned int mode_cfg;
unsigned int idr_cfg;
@@ -266,7 +290,7 @@ static int quad8_function_write(struct counter_device *counter,
if (*synchronous_mode) {
*synchronous_mode = 0;
/* Disable synchronous function mode */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ iowrite8(QUAD8_CTR_IDR | idr_cfg, control);
}
} else {
*quadrature_mode = 1;
@@ -292,7 +316,7 @@ static int quad8_function_write(struct counter_device *counter,
}
/* Load mode configuration to Counter Mode Register */
- outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ iowrite8(QUAD8_CTR_CMR | mode_cfg, control);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -305,10 +329,10 @@ static int quad8_direction_read(struct counter_device *counter,
{
const struct quad8 *const priv = counter_priv(counter);
unsigned int ud_flag;
- const unsigned int flag_addr = priv->base + 2 * count->id + 1;
+ u8 __iomem *const flag_addr = &priv->reg->channel[count->id].control;
/* U/D flag: nonzero = up, zero = down */
- ud_flag = inb(flag_addr) & QUAD8_FLAG_UD;
+ ud_flag = ioread8(flag_addr) & QUAD8_FLAG_UD;
*direction = (ud_flag) ? COUNTER_COUNT_DIRECTION_FORWARD :
COUNTER_COUNT_DIRECTION_BACKWARD;
@@ -402,7 +426,6 @@ static int quad8_events_configure(struct counter_device *counter)
struct counter_event_node *event_node;
unsigned int next_irq_trigger;
unsigned long ior_cfg;
- unsigned long base_offset;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -437,14 +460,14 @@ static int quad8_events_configure(struct counter_device *counter)
ior_cfg = priv->ab_enable[event_node->channel] |
priv->preset_enable[event_node->channel] << 1 |
priv->irq_trigger[event_node->channel] << 3;
- base_offset = priv->base + 2 * event_node->channel + 1;
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+ iowrite8(QUAD8_CTR_IOR | ior_cfg,
+ &priv->reg->channel[event_node->channel].control);
/* Enable IRQ line */
irq_enabled |= BIT(event_node->channel);
}
- outb(irq_enabled, priv->base + QUAD8_REG_INDEX_INTERRUPT);
+ iowrite8(irq_enabled, &priv->reg->index_interrupt);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -508,7 +531,7 @@ static int quad8_index_polarity_set(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
- const int base_offset = priv->base + 2 * channel_id + 1;
+ u8 __iomem *const control = &priv->reg->channel[channel_id].control;
unsigned long irqflags;
unsigned int idr_cfg = index_polarity << 1;
@@ -519,7 +542,7 @@ static int quad8_index_polarity_set(struct counter_device *counter,
priv->index_polarity[channel_id] = index_polarity;
/* Load Index Control configuration to Index Control Register */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ iowrite8(QUAD8_CTR_IDR | idr_cfg, control);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -549,7 +572,7 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
- const int base_offset = priv->base + 2 * channel_id + 1;
+ u8 __iomem *const control = &priv->reg->channel[channel_id].control;
unsigned long irqflags;
unsigned int idr_cfg = synchronous_mode;
@@ -566,7 +589,7 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
priv->synchronous_mode[channel_id] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ iowrite8(QUAD8_CTR_IDR | idr_cfg, control);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -614,7 +637,7 @@ static int quad8_count_mode_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter);
unsigned int count_mode;
unsigned int mode_cfg;
- const int base_offset = priv->base + 2 * count->id + 1;
+ u8 __iomem *const control = &priv->reg->channel[count->id].control;
unsigned long irqflags;
/* Map Generic Counter count mode to 104-QUAD-8 count mode */
@@ -648,7 +671,7 @@ static int quad8_count_mode_write(struct counter_device *counter,
mode_cfg |= (priv->quadrature_scale[count->id] + 1) << 3;
/* Load mode configuration to Counter Mode Register */
- outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ iowrite8(QUAD8_CTR_CMR | mode_cfg, control);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -669,7 +692,7 @@ static int quad8_count_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
struct quad8 *const priv = counter_priv(counter);
- const int base_offset = priv->base + 2 * count->id;
+ u8 __iomem *const control = &priv->reg->channel[count->id].control;
unsigned long irqflags;
unsigned int ior_cfg;
@@ -681,7 +704,7 @@ static int quad8_count_enable_write(struct counter_device *counter,
priv->irq_trigger[count->id] << 3;
/* Load I/O control configuration */
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+ iowrite8(QUAD8_CTR_IOR | ior_cfg, control);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -697,9 +720,9 @@ static int quad8_error_noise_get(struct counter_device *counter,
struct counter_count *count, u32 *noise_error)
{
const struct quad8 *const priv = counter_priv(counter);
- const int base_offset = priv->base + 2 * count->id + 1;
+ u8 __iomem *const flag_addr = &priv->reg->channel[count->id].control;
- *noise_error = !!(inb(base_offset) & QUAD8_FLAG_E);
+ *noise_error = !!(ioread8(flag_addr) & QUAD8_FLAG_E);
return 0;
}
@@ -717,17 +740,17 @@ static int quad8_count_preset_read(struct counter_device *counter,
static void quad8_preset_register_set(struct quad8 *const priv, const int id,
const unsigned int preset)
{
- const unsigned int base_offset = priv->base + 2 * id;
+ struct channel_reg __iomem *const chan = priv->reg->channel + id;
int i;
priv->preset[id] = preset;
/* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
/* Set Preset Register */
for (i = 0; i < 3; i++)
- outb(preset >> (8 * i), base_offset);
+ iowrite8(preset >> (8 * i), &chan->data);
}
static int quad8_count_preset_write(struct counter_device *counter,
@@ -816,7 +839,7 @@ static int quad8_count_preset_enable_write(struct counter_device *counter,
u8 preset_enable)
{
struct quad8 *const priv = counter_priv(counter);
- const int base_offset = priv->base + 2 * count->id + 1;
+ u8 __iomem *const control = &priv->reg->channel[count->id].control;
unsigned long irqflags;
unsigned int ior_cfg;
@@ -831,7 +854,7 @@ static int quad8_count_preset_enable_write(struct counter_device *counter,
priv->irq_trigger[count->id] << 3;
/* Load I/O control configuration to Input / Output Control Register */
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+ iowrite8(QUAD8_CTR_IOR | ior_cfg, control);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -858,7 +881,7 @@ static int quad8_signal_cable_fault_read(struct counter_device *counter,
}
/* Logic 0 = cable fault */
- status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
+ status = ioread8(&priv->reg->cable_status);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -899,7 +922,7 @@ static int quad8_signal_cable_fault_enable_write(struct counter_device *counter,
/* Enable is active low in Differential Encoder Cable Status register */
cable_fault_enable = ~priv->cable_fault_enable;
- outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
+ iowrite8(cable_fault_enable, &priv->reg->cable_status);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -923,7 +946,7 @@ static int quad8_signal_fck_prescaler_write(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
- const int base_offset = priv->base + 2 * channel_id;
+ struct channel_reg __iomem *const chan = priv->reg->channel + channel_id;
unsigned long irqflags;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -931,12 +954,12 @@ static int quad8_signal_fck_prescaler_write(struct counter_device *counter,
priv->fck_prescaler[channel_id] = prescaler;
/* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
/* Set filter clock factor */
- outb(prescaler, base_offset);
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
- base_offset + 1);
+ iowrite8(prescaler, &chan->data);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
+ &chan->control);
spin_unlock_irqrestore(&priv->lock, irqflags);
@@ -1084,12 +1107,11 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
{
struct counter_device *counter = private;
struct quad8 *const priv = counter_priv(counter);
- const unsigned long base = priv->base;
unsigned long irq_status;
unsigned long channel;
u8 event;
- irq_status = inb(base + QUAD8_REG_INTERRUPT_STATUS);
+ irq_status = ioread8(&priv->reg->interrupt_status);
if (!irq_status)
return IRQ_NONE;
@@ -1118,17 +1140,43 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
}
/* Clear pending interrupts on device */
- outb(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, base + QUAD8_REG_CHAN_OP);
+ iowrite8(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, &priv->reg->channel_oper);
return IRQ_HANDLED;
}
+static void quad8_init_counter(struct channel_reg __iomem *const chan)
+{
+ unsigned long i;
+
+ /* Reset Byte Pointer */
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
+ /* Reset filter clock factor */
+ iowrite8(0, &chan->data);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
+ &chan->control);
+ /* Reset Byte Pointer */
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
+ /* Reset Preset Register */
+ for (i = 0; i < 3; i++)
+ iowrite8(0x00, &chan->data);
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, &chan->control);
+ /* Reset Error flag */
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, &chan->control);
+ /* Binary encoding; Normal count; non-quadrature mode */
+ iowrite8(QUAD8_CTR_CMR, &chan->control);
+ /* Disable A and B inputs; preset on index; FLG1 as Carry */
+ iowrite8(QUAD8_CTR_IOR, &chan->control);
+ /* Disable index function; negative index polarity */
+ iowrite8(QUAD8_CTR_IDR, &chan->control);
+}
+
static int quad8_probe(struct device *dev, unsigned int id)
{
struct counter_device *counter;
struct quad8 *priv;
- int i, j;
- unsigned int base_offset;
+ unsigned long i;
int err;
if (!devm_request_region(dev, base[id], QUAD8_EXTENT, dev_name(dev))) {
@@ -1142,6 +1190,10 @@ static int quad8_probe(struct device *dev, unsigned int id)
return -ENOMEM;
priv = counter_priv(counter);
+ priv->reg = devm_ioport_map(dev, base[id], QUAD8_EXTENT);
+ if (!priv->reg)
+ return -ENOMEM;
+
/* Initialize Counter device and driver data */
counter->name = dev_name(dev);
counter->parent = dev;
@@ -1150,43 +1202,20 @@ static int quad8_probe(struct device *dev, unsigned int id)
counter->num_counts = ARRAY_SIZE(quad8_counts);
counter->signals = quad8_signals;
counter->num_signals = ARRAY_SIZE(quad8_signals);
- priv->base = base[id];
spin_lock_init(&priv->lock);
/* Reset Index/Interrupt Register */
- outb(0x00, base[id] + QUAD8_REG_INDEX_INTERRUPT);
+ iowrite8(0x00, &priv->reg->index_interrupt);
/* Reset all counters and disable interrupt function */
- outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+ iowrite8(QUAD8_CHAN_OP_RESET_COUNTERS, &priv->reg->channel_oper);
/* Set initial configuration for all counters */
- for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
- base_offset = base[id] + 2 * i;
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
- /* Reset filter clock factor */
- outb(0, base_offset);
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
- base_offset + 1);
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
- /* Reset Preset Register */
- for (j = 0; j < 3; j++)
- outb(0x00, base_offset);
- /* Reset Borrow, Carry, Compare, and Sign flags */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
- /* Reset Error flag */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
- /* Binary encoding; Normal count; non-quadrature mode */
- outb(QUAD8_CTR_CMR, base_offset + 1);
- /* Disable A and B inputs; preset on index; FLG1 as Carry */
- outb(QUAD8_CTR_IOR, base_offset + 1);
- /* Disable index function; negative index polarity */
- outb(QUAD8_CTR_IDR, base_offset + 1);
- }
+ for (i = 0; i < QUAD8_NUM_COUNTERS; i++)
+ quad8_init_counter(priv->reg->channel + i);
/* Disable Differential Encoder Cable Status for all channels */
- outb(0xFF, base[id] + QUAD8_DIFF_ENCODER_CABLE_STATUS);
+ iowrite8(0xFF, &priv->reg->cable_status);
/* Enable all counters and enable interrupt function */
- outb(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, base[id] + QUAD8_REG_CHAN_OP);
+ iowrite8(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, &priv->reg->channel_oper);
err = devm_request_irq(&counter->dev, irq[id], quad8_irq_handler,
IRQF_SHARED, counter->name, counter);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index c3038cdc6865..2a84fc63371e 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -268,7 +268,7 @@ config LOONGSON2_CPUFREQ
This option adds a CPUFreq driver for loongson processors which
support software configurable cpu frequency.
- Loongson2F and it's successors support this feature.
+ Loongson2F and its successors support this feature.
If in doubt, say N.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 3d514b82d055..1bb2b90ebb21 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -78,6 +78,8 @@ static bool boost_state(unsigned int cpu)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
+ case X86_VENDOR_ZHAOXIN:
rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
msr = lo | ((u64)hi << 32);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
@@ -97,6 +99,8 @@ static int boost_set_msr(bool enable)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
+ case X86_VENDOR_ZHAOXIN:
msr_addr = MSR_IA32_MISC_ENABLE;
msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
break;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 8fcaba541539..d69d13a26414 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -29,9 +29,9 @@ struct private_data {
cpumask_var_t cpus;
struct device *cpu_dev;
- struct opp_table *opp_table;
struct cpufreq_frequency_table *freq_table;
bool have_static_opps;
+ int opp_token;
};
static LIST_HEAD(priv_list);
@@ -193,7 +193,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
struct private_data *priv;
struct device *cpu_dev;
bool fallback = false;
- const char *reg_name;
+ const char *reg_name[] = { NULL, NULL };
int ret;
/* Check if this CPU is already covered by some other policy */
@@ -218,12 +218,11 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
* OPP layer will be taking care of regulators now, but it needs to know
* the name of the regulator first.
*/
- reg_name = find_supply_name(cpu_dev);
- if (reg_name) {
- priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, &reg_name,
- 1);
- if (IS_ERR(priv->opp_table)) {
- ret = PTR_ERR(priv->opp_table);
+ reg_name[0] = find_supply_name(cpu_dev);
+ if (reg_name[0]) {
+ priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name);
+ if (priv->opp_token < 0) {
+ ret = priv->opp_token;
if (ret != -EPROBE_DEFER)
dev_err(cpu_dev, "failed to set regulators: %d\n",
ret);
@@ -295,7 +294,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
out:
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
- dev_pm_opp_put_regulators(priv->opp_table);
+ dev_pm_opp_put_regulators(priv->opp_token);
free_cpumask:
free_cpumask_var(priv->cpus);
return ret;
@@ -309,7 +308,7 @@ static void dt_cpufreq_release(void)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
- dev_pm_opp_put_regulators(priv->opp_table);
+ dev_pm_opp_put_regulators(priv->opp_token);
free_cpumask_var(priv->cpus);
list_del(&priv->node);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2cad42774164..7820c4e74289 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -843,12 +843,14 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
unsigned int cpu;
for_each_cpu(cpu, mask) {
- if (i)
- i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
- i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
+ i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
if (i >= (PAGE_SIZE - 5))
break;
}
+
+ /* Remove the extra space at the end */
+ i--;
+
i += sprintf(&buf[i], "\n");
return i;
}
@@ -971,21 +973,10 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
if (!fattr->store)
return -EIO;
- /*
- * cpus_read_trylock() is used here to work around a circular lock
- * dependency problem with respect to the cpufreq_register_driver().
- */
- if (!cpus_read_trylock())
- return -EBUSY;
-
- if (cpu_online(policy->cpu)) {
- down_write(&policy->rwsem);
- if (likely(!policy_is_inactive(policy)))
- ret = fattr->store(policy, buf, count);
- up_write(&policy->rwsem);
- }
-
- cpus_read_unlock();
+ down_write(&policy->rwsem);
+ if (likely(!policy_is_inactive(policy)))
+ ret = fattr->store(policy, buf, count);
+ up_write(&policy->rwsem);
return ret;
}
@@ -1282,6 +1273,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
unsigned long flags;
int cpu;
+ /*
+ * The callers must ensure the policy is inactive by now, to avoid any
+ * races with show()/store() callbacks.
+ */
+ if (unlikely(!policy_is_inactive(policy)))
+ pr_warn("%s: Freeing active policy\n", __func__);
+
/* Remove policy from list */
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_del(&policy->policy_list);
@@ -1350,15 +1348,15 @@ static int cpufreq_online(unsigned int cpu)
}
if (!new_policy && cpufreq_driver->online) {
+ /* Recover policy->cpus using related_cpus */
+ cpumask_copy(policy->cpus, policy->related_cpus);
+
ret = cpufreq_driver->online(policy);
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
goto out_exit_policy;
}
-
- /* Recover policy->cpus using related_cpus */
- cpumask_copy(policy->cpus, policy->related_cpus);
} else {
cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -1536,8 +1534,6 @@ out_destroy_policy:
for_each_cpu(j, policy->real_cpus)
remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
- cpumask_clear(policy->cpus);
-
out_offline_policy:
if (cpufreq_driver->offline)
cpufreq_driver->offline(policy);
@@ -1547,6 +1543,7 @@ out_exit_policy:
cpufreq_driver->exit(policy);
out_free_policy:
+ cpumask_clear(policy->cpus);
up_write(&policy->rwsem);
cpufreq_policy_free(policy);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e8fbf970ff07..c52d19d67557 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -416,10 +416,13 @@ static struct dbs_governor od_dbs_gov = {
static void od_set_powersave_bias(unsigned int powersave_bias)
{
unsigned int cpu;
- cpumask_t done;
+ cpumask_var_t done;
+
+ if (!alloc_cpumask_var(&done, GFP_KERNEL))
+ return;
default_powersave_bias = powersave_bias;
- cpumask_clear(&done);
+ cpumask_clear(done);
cpus_read_lock();
for_each_online_cpu(cpu) {
@@ -428,7 +431,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
struct dbs_data *dbs_data;
struct od_dbs_tuners *od_tuners;
- if (cpumask_test_cpu(cpu, &done))
+ if (cpumask_test_cpu(cpu, done))
continue;
policy = cpufreq_cpu_get_raw(cpu);
@@ -439,13 +442,15 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
if (!policy_dbs)
continue;
- cpumask_or(&done, &done, policy->cpus);
+ cpumask_or(done, done, policy->cpus);
dbs_data = policy_dbs->dbs_data;
od_tuners = dbs_data->tuners;
od_tuners->powersave_bias = default_powersave_bias;
}
cpus_read_unlock();
+
+ free_cpumask_var(done);
}
void od_register_powersave_bias_handler(unsigned int (*f)
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 3fe9125156b4..76e553af2071 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -31,8 +31,8 @@
/* cpufreq-dt device registered by imx-cpufreq-dt */
static struct platform_device *cpufreq_dt_pdev;
-static struct opp_table *cpufreq_opp_table;
static struct device *cpu_dev;
+static int cpufreq_opp_token;
enum IMX7ULP_CPUFREQ_CLKS {
ARM,
@@ -153,9 +153,9 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "cpu speed grade %d mkt segment %d supported-hw %#x %#x\n",
speed_grade, mkt_segment, supported_hw[0], supported_hw[1]);
- cpufreq_opp_table = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
- if (IS_ERR(cpufreq_opp_table)) {
- ret = PTR_ERR(cpufreq_opp_table);
+ cpufreq_opp_token = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
+ if (cpufreq_opp_token < 0) {
+ ret = cpufreq_opp_token;
dev_err(&pdev->dev, "Failed to set supported opp: %d\n", ret);
return ret;
}
@@ -163,7 +163,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_data(
&pdev->dev, "cpufreq-dt", -1, NULL, 0);
if (IS_ERR(cpufreq_dt_pdev)) {
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
return ret;
@@ -176,7 +176,7 @@ static int imx_cpufreq_dt_remove(struct platform_device *pdev)
{
platform_device_unregister(cpufreq_dt_pdev);
if (!of_machine_is_compatible("fsl,imx7ulp"))
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
else
clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 813cccbfe934..f0e0a35c7f21 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -51,7 +51,7 @@ static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
};
static int __maybe_unused
-mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *mW,
+mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
struct mtk_cpufreq_data *data;
@@ -71,8 +71,9 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *mW,
i--;
*KHz = data->table[i].frequency;
- *mW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] +
- i * LUT_ROW_SIZE) / 1000;
+ /* Provide micro-Watts value to the Energy Model */
+ *uW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] +
+ i * LUT_ROW_SIZE);
return 0;
}
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 37a1eb20f5ba..7f2680bc9a0f 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -439,9 +439,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
/* Both presence and absence of sram regulator are valid cases. */
info->sram_reg = regulator_get_optional(cpu_dev, "sram");
- if (IS_ERR(info->sram_reg))
+ if (IS_ERR(info->sram_reg)) {
+ ret = PTR_ERR(info->sram_reg);
+ if (ret == -EPROBE_DEFER)
+ goto out_free_resources;
+
info->sram_reg = NULL;
- else {
+ } else {
ret = regulator_enable(info->sram_reg);
if (ret) {
dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
@@ -474,6 +478,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
if (info->soc_data->ccifreq_supported) {
info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
if (info->vproc_on_boot < 0) {
+ ret = info->vproc_on_boot;
dev_err(info->cpu_dev,
"invalid Vproc value: %d\n", info->vproc_on_boot);
goto out_disable_inter_clock;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index 6d33a639f902..7f3cfe668f30 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* CPUFreq support for Armada 370/XP platforms.
*
@@ -6,10 +7,6 @@
* Yehuda Yitschak <yehuday@marvell.com>
* Gregory Clement <gregory.clement@free-electrons.com>
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#define pr_fmt(fmt) "mvebu-pmsu: " fmt
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 36c79580fba2..d5ef3c66c762 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -15,6 +15,7 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/units.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
@@ -26,8 +27,6 @@
#define GT_IRQ_STATUS BIT(2)
-#define HZ_PER_KHZ 1000
-
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_domain_state;
@@ -428,7 +427,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return 0;
}
- ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
@@ -445,7 +444,11 @@ static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
if (data->throttle_irq <= 0)
return 0;
- ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ mutex_lock(&data->throttle_lock);
+ data->cancel_throttle = false;
+ mutex_unlock(&data->throttle_lock);
+
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
@@ -465,7 +468,8 @@ static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work);
- irq_set_affinity_hint(data->throttle_irq, NULL);
+ irq_set_affinity_and_hint(data->throttle_irq, NULL);
+ disable_irq_nosync(data->throttle_irq);
return 0;
}
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 6dfa86971a75..863548f59c3e 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -55,9 +55,7 @@ struct qcom_cpufreq_match_data {
};
struct qcom_cpufreq_drv {
- struct opp_table **names_opp_tables;
- struct opp_table **hw_opp_tables;
- struct opp_table **genpd_opp_tables;
+ int *opp_tokens;
u32 versions;
const struct qcom_cpufreq_match_data *data;
};
@@ -315,72 +313,43 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
of_node_put(np);
- drv->names_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->names_opp_tables),
+ drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
GFP_KERNEL);
- if (!drv->names_opp_tables) {
+ if (!drv->opp_tokens) {
ret = -ENOMEM;
goto free_drv;
}
- drv->hw_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->hw_opp_tables),
- GFP_KERNEL);
- if (!drv->hw_opp_tables) {
- ret = -ENOMEM;
- goto free_opp_names;
- }
-
- drv->genpd_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->genpd_opp_tables),
- GFP_KERNEL);
- if (!drv->genpd_opp_tables) {
- ret = -ENOMEM;
- goto free_opp;
- }
for_each_possible_cpu(cpu) {
+ struct dev_pm_opp_config config = {
+ .supported_hw = NULL,
+ };
+
cpu_dev = get_cpu_device(cpu);
if (NULL == cpu_dev) {
ret = -ENODEV;
- goto free_genpd_opp;
+ goto free_opp;
}
if (drv->data->get_version) {
+ config.supported_hw = &drv->versions;
+ config.supported_hw_count = 1;
- if (pvs_name) {
- drv->names_opp_tables[cpu] = dev_pm_opp_set_prop_name(
- cpu_dev,
- pvs_name);
- if (IS_ERR(drv->names_opp_tables[cpu])) {
- ret = PTR_ERR(drv->names_opp_tables[cpu]);
- dev_err(cpu_dev, "Failed to add OPP name %s\n",
- pvs_name);
- goto free_opp;
- }
- }
-
- drv->hw_opp_tables[cpu] = dev_pm_opp_set_supported_hw(
- cpu_dev, &drv->versions, 1);
- if (IS_ERR(drv->hw_opp_tables[cpu])) {
- ret = PTR_ERR(drv->hw_opp_tables[cpu]);
- dev_err(cpu_dev,
- "Failed to set supported hardware\n");
- goto free_genpd_opp;
- }
+ if (pvs_name)
+ config.prop_name = pvs_name;
}
if (drv->data->genpd_names) {
- drv->genpd_opp_tables[cpu] =
- dev_pm_opp_attach_genpd(cpu_dev,
- drv->data->genpd_names,
- NULL);
- if (IS_ERR(drv->genpd_opp_tables[cpu])) {
- ret = PTR_ERR(drv->genpd_opp_tables[cpu]);
- if (ret != -EPROBE_DEFER)
- dev_err(cpu_dev,
- "Could not attach to pm_domain: %d\n",
- ret);
- goto free_genpd_opp;
+ config.genpd_names = drv->data->genpd_names;
+ config.virt_devs = NULL;
+ }
+
+ if (config.supported_hw || config.genpd_names) {
+ drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
+ if (drv->opp_tokens[cpu] < 0) {
+ ret = drv->opp_tokens[cpu];
+ dev_err(cpu_dev, "Failed to set OPP config\n");
+ goto free_opp;
}
}
}
@@ -395,27 +364,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(cpu_dev, "Failed to register platform device\n");
-free_genpd_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->genpd_opp_tables[cpu]))
- break;
- dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
- }
- kfree(drv->genpd_opp_tables);
free_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->names_opp_tables[cpu]))
- break;
- dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]);
- }
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->hw_opp_tables[cpu]))
- break;
- dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
- }
- kfree(drv->hw_opp_tables);
-free_opp_names:
- kfree(drv->names_opp_tables);
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+ kfree(drv->opp_tokens);
free_drv:
kfree(drv);
@@ -429,15 +381,10 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) {
- dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
- dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
- dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
- }
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
- kfree(drv->names_opp_tables);
- kfree(drv->hw_opp_tables);
- kfree(drv->genpd_opp_tables);
+ kfree(drv->opp_tokens);
kfree(drv);
return 0;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 6d2a4cf46db7..513a071845c2 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
+#include <linux/units.h>
struct scmi_data {
int domain_id;
@@ -99,6 +100,7 @@ static int __maybe_unused
scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
unsigned long *KHz)
{
+ enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph);
unsigned long Hz;
int ret, domain;
@@ -112,6 +114,10 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
if (ret)
return ret;
+ /* Convert the power to uW if it is mW (ignore bogoW) */
+ if (power_scale == SCMI_POWER_MILLIWATTS)
+ *power *= MICROWATT_PER_MILLIWATT;
+
/* The EM framework specifies the frequency in KHz. */
*KHz = Hz / 1000;
@@ -249,8 +255,9 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
- bool power_scale_mw = perf_ops->power_scale_mw_get(ph);
+ enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph);
struct scmi_data *priv = policy->driver_data;
+ bool em_power_scale = false;
/*
* This callback will be called for each policy, but we don't need to
@@ -262,9 +269,13 @@ static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
if (!priv->nr_opp)
return;
+ if (power_scale == SCMI_POWER_MILLIWATTS
+ || power_scale == SCMI_POWER_MICROWATTS)
+ em_power_scale = true;
+
em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
&em_cb, priv->opp_shared_cpus,
- power_scale_mw);
+ em_power_scale);
}
static struct cpufreq_driver scmi_cpufreq_driver = {
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index bda3e7d42964..fd2c16821d54 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* System Control and Power Interface (SCPI) based CPUFreq Interface driver
*
* Copyright (C) 2015 ARM Ltd.
* Sudeep Holla <sudeep.holla@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index fdb0a722d881..a67df90848c2 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -156,9 +156,13 @@ static int sti_cpufreq_set_opp_info(void)
unsigned int hw_info_offset;
unsigned int version[VERSION_ELEMENTS];
int pcode, substrate, major, minor;
- int ret;
+ int opp_token, ret;
char name[MAX_PCODE_NAME_LEN];
- struct opp_table *opp_table;
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ .prop_name = name,
+ };
reg_fields = sti_cpufreq_match();
if (!reg_fields) {
@@ -210,21 +214,14 @@ use_defaults:
snprintf(name, MAX_PCODE_NAME_LEN, "pcode%d", pcode);
- opp_table = dev_pm_opp_set_prop_name(dev, name);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to set prop name\n");
- return PTR_ERR(opp_table);
- }
-
version[0] = BIT(major);
version[1] = BIT(minor);
version[2] = BIT(substrate);
- opp_table = dev_pm_opp_set_supported_hw(dev, version, VERSION_ELEMENTS);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to set supported hardware\n");
- ret = PTR_ERR(opp_table);
- goto err_put_prop_name;
+ opp_token = dev_pm_opp_set_config(dev, &config);
+ if (opp_token < 0) {
+ dev_err(dev, "Failed to set OPP config\n");
+ return opp_token;
}
dev_dbg(dev, "pcode: %d major: %d minor: %d substrate: %d\n",
@@ -233,10 +230,6 @@ use_defaults:
version[0], version[1], version[2]);
return 0;
-
-err_put_prop_name:
- dev_pm_opp_put_prop_name(opp_table);
- return ret;
}
static int sti_cpufreq_fetch_syscon_registers(void)
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 75e1bf3a08f7..a4922580ce06 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -86,20 +86,20 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
{
- struct opp_table **opp_tables;
+ int *opp_tokens;
char name[MAX_NAME_LEN];
unsigned int cpu;
u32 speed = 0;
int ret;
- opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables),
+ opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
GFP_KERNEL);
- if (!opp_tables)
+ if (!opp_tokens)
return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed);
if (ret) {
- kfree(opp_tables);
+ kfree(opp_tokens);
return ret;
}
@@ -113,9 +113,9 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
goto free_opp;
}
- opp_tables[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
- if (IS_ERR(opp_tables[cpu])) {
- ret = PTR_ERR(opp_tables[cpu]);
+ opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
+ if (opp_tokens[cpu] < 0) {
+ ret = opp_tokens[cpu];
pr_err("Failed to set prop name\n");
goto free_opp;
}
@@ -124,7 +124,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
NULL, 0);
if (!IS_ERR(cpufreq_dt_pdev)) {
- platform_set_drvdata(pdev, opp_tables);
+ platform_set_drvdata(pdev, opp_tokens);
return 0;
}
@@ -132,27 +132,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
pr_err("Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(opp_tables[cpu]))
- break;
- dev_pm_opp_put_prop_name(opp_tables[cpu]);
- }
- kfree(opp_tables);
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ kfree(opp_tokens);
return ret;
}
static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
{
- struct opp_table **opp_tables = platform_get_drvdata(pdev);
+ int *opp_tokens = platform_get_drvdata(pdev);
unsigned int cpu;
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tables[cpu]);
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
- kfree(opp_tables);
+ kfree(opp_tokens);
return 0;
}
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 2a6a98764a8c..1216046cf4c2 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -162,7 +162,7 @@ static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
.set_cpu_ndiv = tegra234_set_cpu_ndiv,
};
-const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
+static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4,
@@ -430,7 +430,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
.set_cpu_ndiv = tegra194_set_cpu_ndiv,
};
-const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
+static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2,
};
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index e8db3d75be25..ab7ac7df9e62 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -32,9 +32,9 @@ static bool cpu0_node_has_opp_v2_prop(void)
return ret;
}
-static void tegra20_cpufreq_put_supported_hw(void *opp_table)
+static void tegra20_cpufreq_put_supported_hw(void *opp_token)
{
- dev_pm_opp_put_supported_hw(opp_table);
+ dev_pm_opp_put_supported_hw((unsigned long) opp_token);
}
static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
@@ -45,7 +45,6 @@ static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
struct platform_device *cpufreq_dt;
- struct opp_table *opp_table;
struct device *cpu_dev;
u32 versions[2];
int err;
@@ -71,16 +70,15 @@ static int tegra20_cpufreq_probe(struct platform_device *pdev)
if (WARN_ON(!cpu_dev))
return -ENODEV;
- opp_table = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
- err = PTR_ERR_OR_ZERO(opp_table);
- if (err) {
+ err = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
+ if (err < 0) {
dev_err(&pdev->dev, "failed to set supported hw: %d\n", err);
return err;
}
err = devm_add_action_or_reset(&pdev->dev,
tegra20_cpufreq_put_supported_hw,
- opp_table);
+ (void *)((unsigned long) err));
if (err)
return err;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 8f9fdd864391..df85a77d476b 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -60,7 +60,6 @@ struct ti_cpufreq_data {
struct device_node *opp_node;
struct regmap *syscon;
const struct ti_cpufreq_soc_data *soc_data;
- struct opp_table *opp_table;
};
static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -173,7 +172,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
* seems to always read as 0).
*/
-static const char * const omap3_reg_names[] = {"cpu0", "vbb"};
+static const char * const omap3_reg_names[] = {"cpu0", "vbb", NULL};
static struct ti_cpufreq_soc_data omap36xx_soc_data = {
.reg_names = omap3_reg_names,
@@ -324,10 +323,13 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
const struct of_device_id *match;
- struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
- const char * const default_reg_names[] = {"vdd", "vbb"};
+ const char * const default_reg_names[] = {"vdd", "vbb", NULL};
int ret;
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ };
match = dev_get_platdata(&pdev->dev);
if (!match)
@@ -370,33 +372,21 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
if (ret)
goto fail_put_node;
- ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
- version, VERSION_COUNT);
- if (IS_ERR(ti_opp_table)) {
- dev_err(opp_data->cpu_dev,
- "Failed to set supported hardware\n");
- ret = PTR_ERR(ti_opp_table);
- goto fail_put_node;
- }
-
- opp_data->opp_table = ti_opp_table;
-
if (opp_data->soc_data->multi_regulator) {
- const char * const *reg_names = default_reg_names;
-
if (opp_data->soc_data->reg_names)
- reg_names = opp_data->soc_data->reg_names;
- ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
- reg_names,
- ARRAY_SIZE(default_reg_names));
- if (IS_ERR(ti_opp_table)) {
- dev_pm_opp_put_supported_hw(opp_data->opp_table);
- ret = PTR_ERR(ti_opp_table);
- goto fail_put_node;
- }
+ config.regulator_names = opp_data->soc_data->reg_names;
+ else
+ config.regulator_names = default_reg_names;
+ }
+
+ ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
+ if (ret < 0) {
+ dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
+ goto fail_put_node;
}
of_node_put(opp_data->opp_node);
+
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index be7f512109f7..747aa537389b 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -3,7 +3,8 @@
# ARM CPU Idle drivers
#
config ARM_CPUIDLE
- bool "Generic ARM/ARM64 CPU idle Driver"
+ bool "Generic ARM CPU idle Driver"
+ depends on ARM
select DT_IDLE_STATES
select CPU_IDLE_MULTIPLE_DRIVERS
help
diff --git a/drivers/cpuidle/cpuidle-at91.c b/drivers/cpuidle/cpuidle-at91.c
index 9c5853b6ca4a..45ee8e1e71ae 100644
--- a/drivers/cpuidle/cpuidle-at91.c
+++ b/drivers/cpuidle/cpuidle-at91.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* based on arch/arm/mach-kirkwood/cpuidle.c
*
* CPU idle support for AT91 SoC
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* The cpu idle uses wait-for-interrupt and RAM self refresh in order
* to implement two idle states -
* #1 wait-for-interrupt
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 511c4f46027a..13bf743f885b 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* CPU idle Marvell Kirkwood SoCs
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* The cpu idle uses wait-for-interrupt and DDR self refresh in order
* to implement two idle states -
* #1 wait-for-interrupt
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 540105ca0781..57bc3e3ae391 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -69,12 +69,12 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- rcu_irq_enter_irqson();
+ ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- rcu_irq_exit_irqson();
+ ct_irq_exit_irqson();
state = psci_get_domain_state();
if (!state)
@@ -82,12 +82,12 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
- rcu_irq_enter_irqson();
+ ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- rcu_irq_exit_irqson();
+ ct_irq_exit_irqson();
cpu_pm_exit();
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 1151e5e2ba82..862a2876f1c9 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -116,12 +116,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- rcu_irq_enter_irqson();
+ ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- rcu_irq_exit_irqson();
+ ct_irq_exit_irqson();
if (sbi_is_domain_state_available())
state = sbi_get_domain_state();
@@ -130,12 +130,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
ret = sbi_suspend(state) ? -1 : idx;
- rcu_irq_enter_irqson();
+ ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- rcu_irq_exit_irqson();
+ ct_irq_exit_irqson();
cpu_pm_exit();
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index ef2ea1b12cd8..6eceb1988243 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -8,6 +8,7 @@
* This code is licenced under the GPL.
*/
+#include "linux/percpu-defs.h"
#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -23,6 +24,7 @@
#include <linux/suspend.h>
#include <linux/tick.h>
#include <linux/mmu_context.h>
+#include <linux/context_tracking.h>
#include <trace/events/power.h>
#include "cpuidle.h"
@@ -150,12 +152,12 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
*/
stop_critical_timings();
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- rcu_idle_enter();
+ ct_idle_enter();
target_state->enter_s2idle(dev, drv, index);
if (WARN_ON_ONCE(!irqs_disabled()))
local_irq_disable();
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- rcu_idle_exit();
+ ct_idle_exit();
tick_unfreeze();
start_critical_timings();
@@ -233,10 +235,10 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
stop_critical_timings();
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- rcu_idle_enter();
+ ct_idle_enter();
entered_state = target_state->enter(dev, drv, index);
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- rcu_idle_exit();
+ ct_idle_exit();
start_critical_timings();
sched_clock_idle_wakeup_event();
@@ -278,6 +280,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* Shallower states are enabled, so update. */
dev->states_usage[entered_state].above++;
+ trace_cpu_idle_miss(dev->cpu, entered_state, false);
break;
}
} else if (diff > delay) {
@@ -289,8 +292,10 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* Update if a deeper state would have been a
* better match for the observed idle duration.
*/
- if (diff - delay >= drv->states[i].target_residency_ns)
+ if (diff - delay >= drv->states[i].target_residency_ns) {
dev->states_usage[entered_state].below++;
+ trace_cpu_idle_miss(dev->cpu, entered_state, true);
+ }
break;
}
diff --git a/drivers/cpuidle/governors/haltpoll.c b/drivers/cpuidle/governors/haltpoll.c
index cb2a96eafc02..1dff3a52917d 100644
--- a/drivers/cpuidle/governors/haltpoll.c
+++ b/drivers/cpuidle/governors/haltpoll.c
@@ -19,6 +19,7 @@
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/kvm_para.h>
+#include <trace/events/power.h>
static unsigned int guest_halt_poll_ns __read_mostly = 200000;
module_param(guest_halt_poll_ns, uint, 0644);
@@ -90,6 +91,7 @@ static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns)
if (val > guest_halt_poll_ns)
val = guest_halt_poll_ns;
+ trace_guest_halt_poll_ns_grow(val, dev->poll_limit_ns);
dev->poll_limit_ns = val;
} else if (block_ns > guest_halt_poll_ns &&
guest_halt_poll_allow_shrink) {
@@ -100,6 +102,7 @@ static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns)
val = 0;
else
val /= shrink;
+ trace_guest_halt_poll_ns_shrink(val, dev->poll_limit_ns);
dev->poll_limit_ns = val;
}
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 5bb950182026..910d6751644c 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -170,6 +170,7 @@ dma_iv_error:
while (i >= 0) {
dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
memzero_explicit(sf->iv[i], ivsize);
+ i--;
}
return err;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 98593a0cff69..ac2329e2b0e5 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -528,25 +528,33 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
GFP_KERNEL | GFP_DMA);
- if (!ss->flows[i].biv)
+ if (!ss->flows[i].biv) {
+ err = -ENOMEM;
goto error_engine;
+ }
for (j = 0; j < MAX_SG; j++) {
ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
GFP_KERNEL | GFP_DMA);
- if (!ss->flows[i].iv[j])
+ if (!ss->flows[i].iv[j]) {
+ err = -ENOMEM;
goto error_engine;
+ }
}
/* the padding could be up to two block. */
ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE,
GFP_KERNEL | GFP_DMA);
- if (!ss->flows[i].pad)
+ if (!ss->flows[i].pad) {
+ err = -ENOMEM;
goto error_engine;
+ }
ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE,
GFP_KERNEL | GFP_DMA);
- if (!ss->flows[i].result)
+ if (!ss->flows[i].result) {
+ err = -ENOMEM;
goto error_engine;
+ }
ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
if (!ss->flows[i].engine) {
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index ac417a6b39e5..36a82b22953c 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -30,8 +30,8 @@ static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
int ret = 0;
xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
- if (!xtfm)
- return -ENOMEM;
+ if (IS_ERR(xtfm))
+ return PTR_ERR(xtfm);
len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
sdesc = kmalloc(len, GFP_KERNEL);
@@ -586,7 +586,8 @@ retry:
rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
}
addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
- if (dma_mapping_error(ss->dev, addr_xpad)) {
+ err = dma_mapping_error(ss->dev, addr_xpad);
+ if (err) {
dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
goto err_dma_xpad;
}
@@ -612,7 +613,8 @@ retry:
goto err_dma_result;
}
addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
- if (dma_mapping_error(ss->dev, addr_xpad)) {
+ err = dma_mapping_error(ss->dev, addr_xpad);
+ if (err) {
dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
goto err_dma_xpad;
}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 8278d98074e9..280f4b0e7133 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1378,6 +1378,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ struct device_node *np;
u32 pvr;
bool is_revb = true;
@@ -1385,29 +1386,36 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (rc)
return -ENODEV;
- if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
+ np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto");
+ if (np) {
mtdcri(SDR0, PPC460EX_SDR0_SRST,
mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
mtdcri(SDR0, PPC460EX_SDR0_SRST,
mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
- } else if (of_find_compatible_node(NULL, NULL,
- "amcc,ppc405ex-crypto")) {
- mtdcri(SDR0, PPC405EX_SDR0_SRST,
- mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
- mtdcri(SDR0, PPC405EX_SDR0_SRST,
- mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
- is_revb = false;
- } else if (of_find_compatible_node(NULL, NULL,
- "amcc,ppc460sx-crypto")) {
- mtdcri(SDR0, PPC460SX_SDR0_SRST,
- mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
- mtdcri(SDR0, PPC460SX_SDR0_SRST,
- mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
} else {
- printk(KERN_ERR "Crypto Function Not supported!\n");
- return -EINVAL;
+ np = of_find_compatible_node(NULL, NULL, "amcc,ppc405ex-crypto");
+ if (np) {
+ mtdcri(SDR0, PPC405EX_SDR0_SRST,
+ mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
+ mtdcri(SDR0, PPC405EX_SDR0_SRST,
+ mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ is_revb = false;
+ } else {
+ np = of_find_compatible_node(NULL, NULL, "amcc,ppc460sx-crypto");
+ if (np) {
+ mtdcri(SDR0, PPC460SX_SDR0_SRST,
+ mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
+ mtdcri(SDR0, PPC460SX_SDR0_SRST,
+ mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
+ } else {
+ printk(KERN_ERR "Crypto Function Not supported!\n");
+ return -EINVAL;
+ }
+ }
}
+ of_node_put(np);
+
core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
if (!core_dev)
return -ENOMEM;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index f72c6b3e4ad8..886bf258544c 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2669,8 +2669,7 @@ static int atmel_aes_remove(struct platform_device *pdev)
struct atmel_aes_dev *aes_dd;
aes_dd = platform_get_drvdata(pdev);
- if (!aes_dd)
- return -ENODEV;
+
spin_lock(&atmel_aes.lock);
list_del(&aes_dd->list);
spin_unlock(&atmel_aes.lock);
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 59a57279e77b..a4b13d326cfc 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -349,8 +349,16 @@ static int atmel_ecc_remove(struct i2c_client *client)
/* Return EBUSY if i2c client already allocated. */
if (atomic_read(&i2c_priv->tfm_count)) {
- dev_err(&client->dev, "Device is busy\n");
- return -EBUSY;
+ /*
+ * After we return here, the memory backing the device is freed.
+ * That happens no matter what the return value of this function
+ * is because in the Linux device model there is no error
+ * handling for unbinding a driver.
+ * If there is still some action pending, it probably involves
+ * accessing the freed memory.
+ */
+ dev_emerg(&client->dev, "Device is busy, expect memory corruption.\n");
+ return 0;
}
crypto_unregister_kpp(&atmel_ecdh_nist_p256);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index d1628112dacc..ca4b01926d1b 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2666,11 +2666,8 @@ err_tasklet_kill:
static int atmel_sha_remove(struct platform_device *pdev)
{
- struct atmel_sha_dev *sha_dd;
+ struct atmel_sha_dev *sha_dd = platform_get_drvdata(pdev);
- sha_dd = platform_get_drvdata(pdev);
- if (!sha_dd)
- return -ENODEV;
spin_lock(&atmel_sha.lock);
list_del(&sha_dd->list);
spin_unlock(&atmel_sha.lock);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 9fd7b8e439d2..8b7bc1076e0d 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1263,11 +1263,8 @@ err_tasklet_kill:
static int atmel_tdes_remove(struct platform_device *pdev)
{
- struct atmel_tdes_dev *tdes_dd;
+ struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev);
- tdes_dd = platform_get_drvdata(pdev);
- if (!tdes_dd)
- return -ENODEV;
spin_lock(&atmel_tdes.lock);
list_del(&tdes_dd->list);
spin_unlock(&atmel_tdes.lock);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 6753f0e6e55d..4482cb145d05 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -29,7 +29,7 @@
SHA512_DIGEST_SIZE * 2)
/*
- * This is a a cache of buffers, from which the users of CAAM QI driver
+ * This is a cache of buffers, from which the users of CAAM QI driver
* can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
* NOTE: A more elegant solution would be to have some headroom in the frames
* being processed. This can be added by the dpaa2-eth driver. This would
@@ -5083,8 +5083,9 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
ppriv->net_dev.dev = *dev;
INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
- netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
- DPAA2_CAAM_NAPI_WEIGHT);
+ netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi,
+ dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
}
return 0;
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
index 78383d77da99..619564509936 100644
--- a/drivers/crypto/caam/caamhash_desc.c
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -22,7 +22,7 @@
* @ctx_len: size of Context Register
* @import_ctx: true if previous Context Register needs to be restored
* must be true for ahash update and final
- * must be false for for ahash first and digest
+ * must be false for ahash first and digest
* @era: SEC Era
*/
void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 8163f5df8ebf..c36f27376d7e 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -75,7 +75,7 @@ bool caam_congested __read_mostly;
EXPORT_SYMBOL(caam_congested);
/*
- * This is a a cache of buffers, from which the users of CAAM QI driver
+ * This is a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
* doing malloc on the hotpath.
* NOTE: A more elegant solution would be to have some headroom in the frames
@@ -749,8 +749,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
net_dev->dev = *qidev;
INIT_LIST_HEAD(&net_dev->napi_list);
- netif_napi_add(net_dev, irqtask, caam_qi_poll,
- CAAM_NAPI_WEIGHT);
+ netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll,
+ CAAM_NAPI_WEIGHT);
napi_enable(irqtask);
}
diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h
index 96bc963bb804..8ec6edc69f3f 100644
--- a/drivers/crypto/cavium/cpt/cpt_hw_types.h
+++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h
@@ -265,7 +265,7 @@ union cptx_pf_exe_bist_status {
* big-endian format in memory.
* iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
* 0 = The hardware issues NCB transient load (LDT) towards the cache,
- * which if the line hits and is is dirty will cause the line to be
+ * which if the line hits and it is dirty will cause the line to be
* written back before being replaced.
* 1 = The hardware issues NCB LDWB read-and-invalidate command towards
* the cache when fetching the last word of instructions; as a result the
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index a5d9123a22ea..83350e2d9821 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -366,7 +366,7 @@ struct ccp_device {
/* Master lists that all cmds are queued on. Because there can be
* more than one CCP command queue that can process a cmd a separate
- * backlog list is neeeded so that the backlog completion call
+ * backlog list is needed so that the backlog completion call
* completes before the cmd is available for execution.
*/
spinlock_t cmd_lock ____cacheline_aligned;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 799b476fc3e8..9f588c9728f8 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -503,7 +503,7 @@ static int __sev_platform_shutdown_locked(int *error)
struct sev_device *sev = psp_master->sev_data;
int ret;
- if (sev->state == SEV_STATE_UNINIT)
+ if (!sev || sev->state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
@@ -577,6 +577,8 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
struct sev_user_data_status data;
int ret;
+ memset(&data, 0, sizeof(data));
+
ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error);
if (ret)
return ret;
@@ -630,7 +632,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
if (input.length > SEV_FW_BLOB_MAX_SIZE)
return -EFAULT;
- blob = kmalloc(input.length, GFP_KERNEL);
+ blob = kzalloc(input.length, GFP_KERNEL);
if (!blob)
return -ENOMEM;
@@ -854,7 +856,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
input_address = (void __user *)input.address;
if (input.address && input.length) {
- id_blob = kmalloc(input.length, GFP_KERNEL);
+ id_blob = kzalloc(input.length, GFP_KERNEL);
if (!id_blob)
return -ENOMEM;
@@ -973,14 +975,14 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE)
return -EFAULT;
- pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
+ pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL);
if (!pdh_blob)
return -ENOMEM;
data.pdh_cert_address = __psp_pa(pdh_blob);
data.pdh_cert_len = input.pdh_cert_len;
- cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
+ cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL);
if (!cert_blob) {
ret = -ENOMEM;
goto e_free_pdh;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index b5970ae54d0e..792d6da7f0c0 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -429,6 +429,12 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv2,
#endif
},
+ { /* 6 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv3,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
@@ -438,6 +444,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
+ { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 7d1bee86d581..cadead18b59e 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -372,17 +372,10 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev->dma_mask = &dev->coherent_dma_mask;
dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
- while (dma_mask > 0x7fffffffUL) {
- if (dma_supported(dev, dma_mask)) {
- rc = dma_set_coherent_mask(dev, dma_mask);
- if (!rc)
- break;
- }
- dma_mask >>= 1;
- }
-
+ rc = dma_set_coherent_mask(dev, dma_mask);
if (rc) {
- dev_err(dev, "Failed in dma_set_mask, mask=%llx\n", dma_mask);
+ dev_err(dev, "Failed in dma_set_coherent_mask, mask=%llx\n",
+ dma_mask);
return rc;
}
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d5421b0c6831..6124fbbbed94 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -41,6 +41,7 @@ static int cc_pm_resume(struct device *dev)
/* wait for Cryptocell reset completion */
if (!cc_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
+ clk_disable_unprepare(drvdata->clk);
return -EBUSY;
}
@@ -48,6 +49,7 @@ static int cc_pm_resume(struct device *dev)
rc = init_cc_regs(drvdata);
if (rc) {
dev_err(dev, "init_cc_regs (%x)\n", rc);
+ clk_disable_unprepare(drvdata->clk);
return rc;
}
/* check if tee fips error occurred during power down */
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 97d54c1465c2..3ba6f15deafc 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -252,7 +252,7 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
if (unlikely(shift < 0))
return -EINVAL;
- ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
if (unlikely(!ptr))
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index b4ca2eb034d7..ad83c194d664 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -877,13 +877,6 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
pm_runtime_put_autosuspend(dev);
}
-static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
-{
- u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
-
- return &qm->qp_array[cqn];
-}
-
static void qm_cq_head_update(struct hisi_qp *qp)
{
if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
@@ -894,47 +887,37 @@ static void qm_cq_head_update(struct hisi_qp *qp)
}
}
-static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
+static void qm_poll_req_cb(struct hisi_qp *qp)
{
- if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
- return;
-
- if (qp->event_cb) {
- qp->event_cb(qp);
- return;
- }
-
- if (qp->req_cb) {
- struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
-
- while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
- dma_rmb();
- qp->req_cb(qp, qp->sqe + qm->sqe_size *
- le16_to_cpu(cqe->sq_head));
- qm_cq_head_update(qp);
- cqe = qp->cqe + qp->qp_status.cq_head;
- qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
- qp->qp_status.cq_head, 0);
- atomic_dec(&qp->qp_status.used);
- }
+ struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
+ struct hisi_qm *qm = qp->qm;
- /* set c_flag */
+ while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
+ dma_rmb();
+ qp->req_cb(qp, qp->sqe + qm->sqe_size *
+ le16_to_cpu(cqe->sq_head));
+ qm_cq_head_update(qp);
+ cqe = qp->cqe + qp->qp_status.cq_head;
qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
- qp->qp_status.cq_head, 1);
+ qp->qp_status.cq_head, 0);
+ atomic_dec(&qp->qp_status.used);
}
+
+ /* set c_flag */
+ qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
}
-static void qm_work_process(struct work_struct *work)
+static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
{
- struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
+ struct hisi_qm *qm = poll_data->qm;
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
- struct hisi_qp *qp;
int eqe_num = 0;
+ u16 cqn;
while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
+ cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
+ poll_data->qp_finish_id[eqe_num] = cqn;
eqe_num++;
- qp = qm_to_hisi_qp(qm, eqe);
- qm_poll_qp(qp, qm);
if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
@@ -945,37 +928,70 @@ static void qm_work_process(struct work_struct *work)
qm->status.eq_head++;
}
- if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
- eqe_num = 0;
- qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
- }
+ if (eqe_num == (QM_EQ_DEPTH >> 1) - 1)
+ break;
}
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
+
+ return eqe_num;
}
-static irqreturn_t do_qm_irq(int irq, void *data)
+static void qm_work_process(struct work_struct *work)
{
- struct hisi_qm *qm = (struct hisi_qm *)data;
+ struct hisi_qm_poll_data *poll_data =
+ container_of(work, struct hisi_qm_poll_data, work);
+ struct hisi_qm *qm = poll_data->qm;
+ struct hisi_qp *qp;
+ int eqe_num, i;
- /* the workqueue created by device driver of QM */
- if (qm->wq)
- queue_work(qm->wq, &qm->work);
- else
- schedule_work(&qm->work);
+ /* Get qp id of completed tasks and re-enable the interrupt. */
+ eqe_num = qm_get_complete_eqe_num(poll_data);
+ for (i = eqe_num - 1; i >= 0; i--) {
+ qp = &qm->qp_array[poll_data->qp_finish_id[i]];
+ if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
+ continue;
- return IRQ_HANDLED;
+ if (qp->event_cb) {
+ qp->event_cb(qp);
+ continue;
+ }
+
+ if (likely(qp->req_cb))
+ qm_poll_req_cb(qp);
+ }
+}
+
+static bool do_qm_irq(struct hisi_qm *qm)
+{
+ struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
+ struct hisi_qm_poll_data *poll_data;
+ u16 cqn;
+
+ if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
+ return false;
+
+ if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
+ cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
+ poll_data = &qm->poll_data[cqn];
+ queue_work(qm->wq, &poll_data->work);
+
+ return true;
+ }
+
+ return false;
}
static irqreturn_t qm_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
+ bool ret;
- if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
- return do_qm_irq(irq, data);
+ ret = do_qm_irq(qm);
+ if (ret)
+ return IRQ_HANDLED;
atomic64_inc(&qm->debug.dfx.err_irq_cnt);
- dev_err(&qm->pdev->dev, "invalid int source\n");
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
return IRQ_NONE;
@@ -3134,11 +3150,8 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp)
if (ret)
dev_err(dev, "Failed to drain out data for stopping!\n");
- if (qp->qm->wq)
- flush_workqueue(qp->qm->wq);
- else
- flush_work(&qp->qm->work);
+ flush_workqueue(qp->qm->wq);
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
@@ -3557,8 +3570,10 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
for (i = num - 1; i >= 0; i--) {
qdma = &qm->qp_array[i].qdma;
dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
+ kfree(qm->poll_data[i].qp_finish_id);
}
+ kfree(qm->poll_data);
kfree(qm->qp_array);
}
@@ -3567,12 +3582,18 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
struct device *dev = &qm->pdev->dev;
size_t off = qm->sqe_size * QM_Q_DEPTH;
struct hisi_qp *qp;
+ int ret = -ENOMEM;
+
+ qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16),
+ GFP_KERNEL);
+ if (!qm->poll_data[id].qp_finish_id)
+ return -ENOMEM;
qp = &qm->qp_array[id];
qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
GFP_KERNEL);
if (!qp->qdma.va)
- return -ENOMEM;
+ goto err_free_qp_finish_id;
qp->sqe = qp->qdma.va;
qp->sqe_dma = qp->qdma.dma;
@@ -3583,6 +3604,10 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
qp->qp_id = id;
return 0;
+
+err_free_qp_finish_id:
+ kfree(qm->poll_data[id].qp_finish_id);
+ return ret;
}
static void hisi_qm_pre_init(struct hisi_qm *qm)
@@ -3672,6 +3697,26 @@ static void qm_last_regs_uninit(struct hisi_qm *qm)
debug->qm_last_words = NULL;
}
+static void hisi_qm_unint_work(struct hisi_qm *qm)
+{
+ destroy_workqueue(qm->wq);
+}
+
+static void hisi_qm_memory_uninit(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ hisi_qp_memory_uninit(qm, qm->qp_num);
+ if (qm->qdma.va) {
+ hisi_qm_cache_wb(qm);
+ dma_free_coherent(dev, qm->qdma.size,
+ qm->qdma.va, qm->qdma.dma);
+ }
+
+ idr_destroy(&qm->qp_idr);
+ kfree(qm->factor);
+}
+
/**
* hisi_qm_uninit() - Uninitialize qm.
* @qm: The qm needed uninit.
@@ -3680,13 +3725,10 @@ static void qm_last_regs_uninit(struct hisi_qm *qm)
*/
void hisi_qm_uninit(struct hisi_qm *qm)
{
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
-
qm_last_regs_uninit(qm);
qm_cmd_uninit(qm);
- kfree(qm->factor);
+ hisi_qm_unint_work(qm);
down_write(&qm->qps_lock);
if (!qm_avail_state(qm, QM_CLOSE)) {
@@ -3694,14 +3736,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
return;
}
- hisi_qp_memory_uninit(qm, qm->qp_num);
- idr_destroy(&qm->qp_idr);
-
- if (qm->qdma.va) {
- hisi_qm_cache_wb(qm);
- dma_free_coherent(dev, qm->qdma.size,
- qm->qdma.va, qm->qdma.dma);
- }
+ hisi_qm_memory_uninit(qm);
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
@@ -6018,14 +6053,28 @@ err_disable_pcidev:
return ret;
}
-static void hisi_qm_init_work(struct hisi_qm *qm)
+static int hisi_qm_init_work(struct hisi_qm *qm)
{
- INIT_WORK(&qm->work, qm_work_process);
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++)
+ INIT_WORK(&qm->poll_data[i].work, qm_work_process);
+
if (qm->fun_type == QM_HW_PF)
INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
if (qm->ver > QM_HW_V2)
INIT_WORK(&qm->cmd_process, qm_cmd_process);
+
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
+ if (!qm->wq) {
+ pci_err(qm->pdev, "failed to alloc workqueue!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
}
static int hisi_qp_alloc_memory(struct hisi_qm *qm)
@@ -6038,11 +6087,18 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm)
if (!qm->qp_array)
return -ENOMEM;
+ qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL);
+ if (!qm->poll_data) {
+ kfree(qm->qp_array);
+ return -ENOMEM;
+ }
+
/* one more page for device or qp statuses */
qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
sizeof(struct qm_cqe) * QM_Q_DEPTH;
qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
for (i = 0; i < qm->qp_num; i++) {
+ qm->poll_data[i].qm = qm;
ret = hisi_qp_memory_init(qm, qp_dma_size, i);
if (ret)
goto err_init_qp_mem;
@@ -6176,7 +6232,10 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
goto err_alloc_uacce;
- hisi_qm_init_work(qm);
+ ret = hisi_qm_init_work(qm);
+ if (ret)
+ goto err_free_qm_memory;
+
qm_cmd_init(qm);
atomic_set(&qm->status.flags, QM_INIT);
@@ -6184,6 +6243,8 @@ int hisi_qm_init(struct hisi_qm *qm)
return 0;
+err_free_qm_memory:
+ hisi_qm_memory_uninit(qm);
err_alloc_uacce:
if (qm->use_sva) {
uacce_remove(qm->uacce);
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 0a3c8f019b02..490e1542305e 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -449,7 +449,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
*/
}
- mutex_lock(&ctx->queue->queuelock);
+ spin_lock_bh(&ctx->queue->queuelock);
/* Put the IV in place for chained cases */
switch (ctx->cipher_alg) {
case SEC_C_AES_CBC_128:
@@ -509,7 +509,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
list_del(&backlog_req->backlog_head);
}
}
- mutex_unlock(&ctx->queue->queuelock);
+ spin_unlock_bh(&ctx->queue->queuelock);
mutex_lock(&sec_req->lock);
list_del(&sec_req_el->head);
@@ -798,7 +798,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
*/
/* Grab a big lock for a long time to avoid concurrency issues */
- mutex_lock(&queue->queuelock);
+ spin_lock_bh(&queue->queuelock);
/*
* Can go on to queue if we have space in either:
@@ -814,15 +814,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
ret = -EBUSY;
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
list_add_tail(&sec_req->backlog_head, &ctx->backlog);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto out;
}
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto err_free_elements;
}
ret = sec_send_request(sec_req, queue);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
if (ret)
goto err_free_elements;
@@ -881,7 +881,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
if (IS_ERR(ctx->queue))
return PTR_ERR(ctx->queue);
- mutex_init(&ctx->queue->queuelock);
+ spin_lock_init(&ctx->queue->queuelock);
ctx->queue->havesoftqueue = false;
return 0;
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index c8de1b51c843..e75851326c1e 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -892,7 +892,7 @@ bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
static void sec_queue_hw_init(struct sec_queue *queue)
{
sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
- sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+ sec_queue_aw_alloc(queue, SEC_QUEUE_AW_FROCE_NOALLOC);
sec_queue_ar_pkgattr(queue, 1);
sec_queue_aw_pkgattr(queue, 1);
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 179a8250d691..e2a50bf2234b 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -347,7 +347,7 @@ struct sec_queue {
DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
bool havesoftqueue;
- struct mutex queuelock;
+ spinlock_t queuelock;
void *shadow[SEC_QUEUE_LEN];
};
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index c2e9b01187a7..d2a0bc93e752 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -119,7 +119,7 @@ struct sec_qp_ctx {
struct idr req_idr;
struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
- struct mutex req_lock;
+ spinlock_t req_lock;
struct list_head backlog;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
@@ -143,10 +143,10 @@ struct sec_ctx {
/* Threshold for fake busy, trigger to return -EBUSY to user */
u32 fake_req_limit;
- /* Currrent cyclic index to select a queue for encipher */
+ /* Current cyclic index to select a queue for encipher */
atomic_t enc_qcyclic;
- /* Currrent cyclic index to select a queue for decipher */
+ /* Current cyclic index to select a queue for decipher */
atomic_t dec_qcyclic;
enum sec_alg_type alg_type;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 6eebe739893c..77c9f13cf69a 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -127,11 +127,11 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
int req_id;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
0, QM_Q_DEPTH, GFP_ATOMIC);
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
dev_err(req->ctx->dev, "alloc req id fail!\n");
return req_id;
@@ -156,9 +156,9 @@ static void sec_free_req_id(struct sec_req *req)
qp_ctx->req_list[req_id] = NULL;
req->qp_ctx = NULL;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
idr_remove(&qp_ctx->req_idr, req_id);
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
}
static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
@@ -273,7 +273,7 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
!(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
if (ctx->fake_req_limit <=
@@ -281,10 +281,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
list_add_tail(&req->backlog_head, &qp_ctx->backlog);
atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
return -EBUSY;
}
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(ret == -EBUSY))
return -ENOBUFS;
@@ -487,7 +487,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp->req_cb = sec_req_cb;
- mutex_init(&qp_ctx->req_lock);
+ spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
INIT_LIST_HEAD(&qp_ctx->backlog);
@@ -620,7 +620,7 @@ static int sec_auth_init(struct sec_ctx *ctx)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
+ a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
&a_ctx->a_key_dma, GFP_KERNEL);
if (!a_ctx->a_key)
return -ENOMEM;
@@ -632,8 +632,8 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
+ memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
+ dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
a_ctx->a_key, a_ctx->a_key_dma);
}
@@ -1382,7 +1382,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
{
struct sec_req *backlog_req = NULL;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
if (ctx->fake_req_limit >=
atomic_read(&qp_ctx->qp->qp_status.used) &&
!list_empty(&qp_ctx->backlog)) {
@@ -1390,7 +1390,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
typeof(*backlog_req), backlog_head);
list_del(&backlog_req->backlog_head);
}
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
return backlog_req;
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 5e039b50e9d4..d033f63b583f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -7,6 +7,7 @@
#define SEC_AIV_SIZE 12
#define SEC_IV_SIZE 24
#define SEC_MAX_KEY_SIZE 64
+#define SEC_MAX_AKEY_SIZE 128
#define SEC_COMM_SCENE 0
#define SEC_MIN_BLOCK_SZ 1
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 4d85d2cbf376..2c0be91c0b09 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -508,16 +508,17 @@ static int sec_engine_init(struct hisi_qm *qm)
writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
- /* HW V2 enable sm4 extra mode, as ctr/ecb */
- if (qm->ver < QM_HW_V3)
+ if (qm->ver < QM_HW_V3) {
+ /* HW V2 enable sm4 extra mode, as ctr/ecb */
writel_relaxed(SEC_BD_ERR_CHK_EN0,
qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
- /* Enable sm4 xts mode multiple iv */
- writel_relaxed(SEC_BD_ERR_CHK_EN1,
- qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
- writel_relaxed(SEC_BD_ERR_CHK_EN3,
- qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
+ /* HW V2 enable sm4 xts mode multiple iv */
+ writel_relaxed(SEC_BD_ERR_CHK_EN1,
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
+ writel_relaxed(SEC_BD_ERR_CHK_EN3,
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
+ }
/* config endian */
sec_set_endian(qm);
@@ -1002,8 +1003,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- int ret;
-
qm->pdev = pdev;
qm->ver = pdev->revision;
qm->algs = "cipher\ndigest\naead";
@@ -1029,25 +1028,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
}
- /*
- * WQ_HIGHPRI: SEC request must be low delayed,
- * so need a high priority workqueue.
- * WQ_UNBOUND: SEC task is likely with long
- * running CPU intensive workloads.
- */
- qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
- WQ_UNBOUND, num_online_cpus(),
- pci_name(qm->pdev));
- if (!qm->wq) {
- pci_err(qm->pdev, "fail to alloc workqueue\n");
- return -ENOMEM;
- }
-
- ret = hisi_qm_init(qm);
- if (ret)
- destroy_workqueue(qm->wq);
-
- return ret;
+ return hisi_qm_init(qm);
}
static void sec_qm_uninit(struct hisi_qm *qm)
@@ -1078,8 +1059,6 @@ static int sec_probe_init(struct sec_dev *sec)
static void sec_probe_uninit(struct hisi_qm *qm)
{
hisi_qm_dev_err_uninit(qm);
-
- destroy_workqueue(qm->wq);
}
static void sec_iommu_used_check(struct sec_dev *sec)
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 829f2caf0f67..97e500db0a82 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -185,7 +185,7 @@ static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct hisi_trng *trng;
int currsize = 0;
u32 val = 0;
- u32 ret;
+ int ret;
trng = container_of(rng, struct hisi_trng, rng);
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 67869513e48c..ad35434a3fdb 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/internal/acompress.h>
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include "zip.h"
@@ -606,8 +607,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
req_q = &ctx->qp_ctx[i].req_q;
req_q->size = QM_Q_DEPTH;
- req_q->req_bitmap = kcalloc(BITS_TO_LONGS(req_q->size),
- sizeof(long), GFP_KERNEL);
+ req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
if (!req_q->req_bitmap) {
ret = -ENOMEM;
if (i == 0)
@@ -631,11 +631,11 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
return 0;
err_free_loop1:
- kfree(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
+ bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
err_free_loop0:
kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
err_free_bitmap:
- kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
+ bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
return ret;
}
@@ -645,7 +645,7 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
kfree(ctx->qp_ctx[i].req_q.q);
- kfree(ctx->qp_ctx[i].req_q.req_bitmap);
+ bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap);
}
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 9c925e9c0a2d..c3303d99acac 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -990,8 +990,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- int ret;
-
qm->pdev = pdev;
qm->ver = pdev->revision;
if (pdev->revision >= QM_HW_V3)
@@ -1021,25 +1019,12 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
}
- qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
- WQ_UNBOUND, num_online_cpus(),
- pci_name(qm->pdev));
- if (!qm->wq) {
- pci_err(qm->pdev, "fail to alloc workqueue\n");
- return -ENOMEM;
- }
-
- ret = hisi_qm_init(qm);
- if (ret)
- destroy_workqueue(qm->wq);
-
- return ret;
+ return hisi_qm_init(qm);
}
static void hisi_zip_qm_uninit(struct hisi_qm *qm)
{
hisi_qm_uninit(qm);
- destroy_workqueue(qm->wq);
}
static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 9b1a158aec29..ad0d8c4a71ac 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1831,6 +1831,8 @@ static const struct of_device_id safexcel_of_match_table[] = {
{},
};
+MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
+
static struct platform_driver crypto_safexcel = {
.probe = safexcel_probe,
.remove = safexcel_remove,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index ce1e611a163e..797ff91512e0 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -497,15 +497,15 @@ struct result_data_desc {
u32 packet_length:17;
u32 error_code:15;
- u8 bypass_length:4;
- u8 e15:1;
- u16 rsvd0;
- u8 hash_bytes:1;
- u8 hash_length:6;
- u8 generic_bytes:1;
- u8 checksum:1;
- u8 next_header:1;
- u8 length:1;
+ u32 bypass_length:4;
+ u32 e15:1;
+ u32 rsvd0:16;
+ u32 hash_bytes:1;
+ u32 hash_length:6;
+ u32 generic_bytes:1;
+ u32 checksum:1;
+ u32 next_header:1;
+ u32 length:1;
u16 application_id;
u16 rsvd1;
diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/keembay/keembay-ocs-ecc.c
index 5d0785d3f1b5..2269df17514c 100644
--- a/drivers/crypto/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/keembay/keembay-ocs-ecc.c
@@ -976,8 +976,6 @@ static int kmb_ocs_ecc_remove(struct platform_device *pdev)
struct ocs_ecc_dev *ecc_dev;
ecc_dev = platform_get_drvdata(pdev);
- if (!ecc_dev)
- return -ENODEV;
crypto_unregister_kpp(&ocs_ecdh_p384);
crypto_unregister_kpp(&ocs_ecdh_p256);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
index bb02e0db3615..7503f6b18ac5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -51,11 +51,47 @@ static const struct devlink_param otx2_cpt_dl_params[] = {
NULL),
};
-static int otx2_cpt_devlink_info_get(struct devlink *devlink,
+static int otx2_cpt_dl_info_firmware_version_put(struct devlink_info_req *req,
+ struct otx2_cpt_eng_grp_info grp[],
+ const char *ver_name, int eng_type)
+{
+ struct otx2_cpt_engs_rsvd *eng;
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ eng = find_engines_by_type(&grp[i], eng_type);
+ if (eng)
+ return devlink_info_version_running_put(req, ver_name,
+ eng->ucode->ver_str);
+ }
+
+ return 0;
+}
+
+static int otx2_cpt_devlink_info_get(struct devlink *dl,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
- return devlink_info_driver_name_put(req, "rvu_cptpf");
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+ int err;
+
+ err = devlink_info_driver_name_put(req, "rvu_cptpf");
+ if (err)
+ return err;
+
+ err = otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp,
+ "fw.ae", OTX2_CPT_AE_TYPES);
+ if (err)
+ return err;
+
+ err = otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp,
+ "fw.se", OTX2_CPT_SE_TYPES);
+ if (err)
+ return err;
+
+ return otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp,
+ "fw.ie", OTX2_CPT_IE_TYPES);
}
static const struct devlink_ops otx2_cpt_devlink_ops = {
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 9cba2f714c7e..f10050fead16 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -476,7 +476,7 @@ release_fw:
return ret;
}
-static struct otx2_cpt_engs_rsvd *find_engines_by_type(
+struct otx2_cpt_engs_rsvd *find_engines_by_type(
struct otx2_cpt_eng_grp_info *eng_grp,
int eng_type)
{
@@ -1605,7 +1605,10 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
if (has_se || ucode_idx)
goto err_print;
- tmp = strim(strsep(&val, ":"));
+ tmp = strsep(&val, ":");
+ if (!tmp)
+ goto err_print;
+ tmp = strim(tmp);
if (!val)
goto err_print;
if (strlen(tmp) != 2)
@@ -1617,7 +1620,10 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
if (has_ae || ucode_idx)
goto err_print;
- tmp = strim(strsep(&val, ":"));
+ tmp = strsep(&val, ":");
+ if (!tmp)
+ goto err_print;
+ tmp = strim(tmp);
if (!val)
goto err_print;
if (strlen(tmp) != 2)
@@ -1629,7 +1635,10 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
} else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
if (has_ie || ucode_idx)
goto err_print;
- tmp = strim(strsep(&val, ":"));
+ tmp = strsep(&val, ":");
+ if (!tmp)
+ goto err_print;
+ tmp = strim(tmp);
if (!val)
goto err_print;
if (strlen(tmp) != 2)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
index 8f4d4e5f531a..e69320a54b5d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -166,4 +166,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
struct devlink_param_gset_ctx *ctx);
void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf);
+struct otx2_cpt_engs_rsvd *find_engines_by_type(
+ struct otx2_cpt_eng_grp_info *eng_grp,
+ int eng_type);
#endif /* __OTX2_CPTPF_UCODE_H */
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index f418817c0f43..f34c75a862f2 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -75,7 +75,7 @@ static int (*nx842_powernv_exec)(const unsigned char *in,
/**
* setup_indirect_dde - Setup an indirect DDE
*
- * The DDE is setup with the the DDE count, byte count, and address of
+ * The DDE is setup with the DDE count, byte count, and address of
* first direct DDE in the list.
*/
static void setup_indirect_dde(struct data_descriptor_entry *dde,
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 7584a34ba88c..3ea334b7f820 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -1208,10 +1208,13 @@ static struct vio_driver nx842_vio_driver = {
static int __init nx842_pseries_init(void)
{
struct nx842_devdata *new_devdata;
+ struct device_node *np;
int ret;
- if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
+ np = of_find_compatible_node(NULL, NULL, "ibm,compression");
+ if (!np)
return -ENODEV;
+ of_node_put(np);
RCU_INIT_POINTER(devdata, NULL);
new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 581211a92628..67a99c760bc4 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1261,9 +1261,6 @@ static int omap_aes_remove(struct platform_device *pdev)
struct aead_alg *aalg;
int i, j;
- if (!dd)
- return -ENODEV;
-
spin_lock_bh(&list_lock);
list_del(&dd->list);
spin_unlock_bh(&list_lock);
@@ -1279,7 +1276,6 @@ static int omap_aes_remove(struct platform_device *pdev)
aalg = &dd->pdata->aead_algs_info->algs_list[i];
crypto_unregister_aead(aalg);
dd->pdata->aead_algs_info->registered--;
-
}
crypto_engine_exit(dd->engine);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 538aff80869f..f783769ea110 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1091,9 +1091,6 @@ static int omap_des_remove(struct platform_device *pdev)
struct omap_des_dev *dd = platform_get_drvdata(pdev);
int i, j;
- if (!dd)
- return -ENODEV;
-
spin_lock_bh(&list_lock);
list_del(&dd->list);
spin_unlock_bh(&list_lock);
@@ -1106,7 +1103,6 @@ static int omap_des_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_des_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
- dd = NULL;
return 0;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 4b37dc69a50c..655a7f5a406a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2197,8 +2197,7 @@ static int omap_sham_remove(struct platform_device *pdev)
int i, j;
dd = platform_get_drvdata(pdev);
- if (!dd)
- return -ENODEV;
+
spin_lock_bh(&sham.lock);
list_del(&dd->list);
spin_unlock_bh(&sham.lock);
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 4b90c0f22b03..1220cc86f910 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -17,7 +17,7 @@ config CRYPTO_DEV_QAT
config CRYPTO_DEV_QAT_DH895xCC
tristate "Support for Intel(R) DH895xCC"
- depends on X86 && PCI
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
select CRYPTO_DEV_QAT
help
Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
@@ -28,7 +28,7 @@ config CRYPTO_DEV_QAT_DH895xCC
config CRYPTO_DEV_QAT_C3XXX
tristate "Support for Intel(R) C3XXX"
- depends on X86 && PCI
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
select CRYPTO_DEV_QAT
help
Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
@@ -39,7 +39,7 @@ config CRYPTO_DEV_QAT_C3XXX
config CRYPTO_DEV_QAT_C62X
tristate "Support for Intel(R) C62X"
- depends on X86 && PCI
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
select CRYPTO_DEV_QAT
help
Support for Intel(R) C62x with Intel(R) QuickAssist Technology
@@ -50,7 +50,7 @@ config CRYPTO_DEV_QAT_C62X
config CRYPTO_DEV_QAT_4XXX
tristate "Support for Intel(R) QAT_4XXX"
- depends on X86 && PCI
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
select CRYPTO_DEV_QAT
help
Support for Intel(R) QuickAssist Technology QAT_4xxx
@@ -61,7 +61,7 @@ config CRYPTO_DEV_QAT_4XXX
config CRYPTO_DEV_QAT_DH895xCCVF
tristate "Support for Intel(R) DH895xCC Virtual Function"
- depends on X86 && PCI
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
select PCI_IOV
select CRYPTO_DEV_QAT
@@ -74,7 +74,7 @@ config CRYPTO_DEV_QAT_DH895xCCVF
config CRYPTO_DEV_QAT_C3XXXVF
tristate "Support for Intel(R) C3XXX Virtual Function"
- depends on X86 && PCI
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
select PCI_IOV
select CRYPTO_DEV_QAT
help
@@ -86,7 +86,7 @@ config CRYPTO_DEV_QAT_C3XXXVF
config CRYPTO_DEV_QAT_C62XVF
tristate "Support for Intel(R) C62X Virtual Function"
- depends on X86 && PCI
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
select PCI_IOV
select CRYPTO_DEV_QAT
help
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index fb5970a68484..fda5f699ff57 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -357,6 +357,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
+ hw_data->dev_config = adf_crypto_dev_config;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
index 1034752845ca..9d49248931f6 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -70,5 +70,6 @@ enum icp_qat_4xxx_slice_mask {
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index 181fa1c8b3c7..2f212561acc4 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -53,7 +53,7 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
return 0;
}
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -289,6 +289,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_disable_aer;
}
+ ret = adf_sysfs_init(accel_dev);
+ if (ret)
+ goto out_err_disable_aer;
+
ret = adf_crypto_dev_config(accel_dev);
if (ret)
goto out_err_disable_aer;
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 04f058acc4d3..80919cfcc29d 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -10,6 +10,7 @@ intel_qat-objs := adf_cfg.o \
adf_transport.o \
adf_admin.o \
adf_hw_arbiter.o \
+ adf_sysfs.o \
adf_gen2_hw_data.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index ede6458c9dbf..0a55a4f34dcf 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -199,6 +199,7 @@ struct adf_hw_device_data {
char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(void);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ int (*dev_config)(struct adf_accel_dev *accel_dev);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
const char *fw_name;
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index b5b208cbe5a1..e61b3e13db3b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -128,6 +128,24 @@ static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
list_add_tail(&new->list, &sec->param_head);
}
+static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec)
+{
+ struct list_head *head = &sec->param_head;
+ struct list_head *list_ptr, *tmp;
+
+ list_for_each_prev_safe(list_ptr, tmp, head) {
+ struct adf_cfg_key_val *ptr =
+ list_entry(list_ptr, struct adf_cfg_key_val, list);
+
+ if (strncmp(ptr->key, key, sizeof(ptr->key)))
+ continue;
+
+ list_del(list_ptr);
+ kfree(ptr);
+ break;
+ }
+}
+
static void adf_cfg_keyval_del_all(struct list_head *head)
{
struct list_head *list_ptr, *tmp;
@@ -208,7 +226,8 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
* @type: Type - string, int or address
*
* Function adds configuration key - value entry in the appropriate section
- * in the given acceleration device
+ * in the given acceleration device. If the key exists already, the value
+ * is updated.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
@@ -222,6 +241,8 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
struct adf_cfg_key_val *key_val;
struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
section_name);
+ char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
if (!section)
return -EFAULT;
@@ -246,6 +267,24 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
return -EINVAL;
}
key_val->type = type;
+
+ /* Add the key-value pair as below policy:
+ * 1. if the key doesn't exist, add it;
+ * 2. if the key already exists with a different value then update it
+ * to the new value (the key is deleted and the newly created
+ * key_val containing the new value is added to the database);
+ * 3. if the key exists with the same value, then return without doing
+ * anything (the newly created key_val is freed).
+ */
+ if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
+ if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
+ adf_cfg_keyval_remove(key, section);
+ } else {
+ kfree(key_val);
+ return 0;
+ }
+ }
+
down_write(&cfg->lock);
adf_cfg_keyval_add(key_val, section);
up_write(&cfg->lock);
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 0464fa257929..7bb477c3ce25 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -49,11 +49,6 @@ struct service_hndl {
struct list_head list;
};
-static inline int get_current_node(void)
-{
- return topology_physical_package_id(raw_smp_processor_id());
-}
-
int adf_service_register(struct service_hndl *service);
int adf_service_unregister(struct service_hndl *service);
@@ -61,6 +56,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev);
int adf_dev_start(struct adf_accel_dev *accel_dev);
void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
@@ -132,6 +128,8 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev);
+int adf_sysfs_init(struct adf_accel_dev *accel_dev);
+
int qat_hal_init(struct adf_accel_dev *accel_dev);
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index c2c718f1b489..33a9a46d6949 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -363,3 +363,29 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
}
return 0;
}
+
+int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+ if (!ret) {
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED,
+ services, ADF_STR);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index f38b2ffde146..b2db1d70d71f 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -120,32 +120,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
-static int adf_sriov_prepare_restart(struct adf_accel_dev *accel_dev)
-{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- int ret;
-
- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, services);
-
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
-
- if (!ret) {
- ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
- if (ret)
- return ret;
-
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED,
- services, ADF_STR);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
/**
* adf_sriov_configure() - Enable SRIOV for the device
* @pdev: Pointer to PCI device.
@@ -185,7 +159,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EBUSY;
}
- ret = adf_sriov_prepare_restart(accel_dev);
+ ret = adf_dev_shutdown_cache_cfg(accel_dev);
if (ret)
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c
new file mode 100644
index 000000000000..e8b078e719c2
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_sysfs.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static const char * const state_operations[] = {
+ [DEV_DOWN] = "down",
+ [DEV_UP] = "up",
+};
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ char *state;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ state = adf_dev_started(accel_dev) ? "up" : "down";
+ return sysfs_emit(buf, "%s\n", state);
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_accel_dev *accel_dev;
+ u32 accel_id;
+ int ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ accel_id = accel_dev->accel_id;
+
+ if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
+ dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
+ return -EBUSY;
+ }
+
+ ret = sysfs_match_string(state_operations, buf);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case DEV_DOWN:
+ if (!adf_dev_started(accel_dev)) {
+ dev_info(dev, "Device qat_dev%d already down\n",
+ accel_id);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
+
+ ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ if (ret < 0)
+ return -EINVAL;
+
+ break;
+ case DEV_UP:
+ if (adf_dev_started(accel_dev)) {
+ dev_info(dev, "Device qat_dev%d already up\n",
+ accel_id);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+
+ ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+ if (!ret)
+ ret = adf_dev_init(accel_dev);
+ if (!ret)
+ ret = adf_dev_start(accel_dev);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to start device qat_dev%d\n",
+ accel_id);
+ adf_dev_shutdown_cache_cfg(accel_dev);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const char * const services_operations[] = {
+ ADF_CFG_CY,
+ ADF_CFG_DC,
+};
+
+static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", services);
+}
+
+static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
+ const char *services)
+{
+ return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services,
+ ADF_STR);
+}
+
+static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_hw_device_data *hw_data;
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+ ret = sysfs_match_string(services_operations, buf);
+ if (ret < 0)
+ return ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
+ accel_dev->accel_id);
+ return -EINVAL;
+ }
+
+ ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]);
+ if (ret < 0)
+ return ret;
+
+ hw_data = GET_HW_DATA(accel_dev);
+
+ /* Update capabilities mask after change in configuration.
+ * A call to this function is required as capabilities are, at the
+ * moment, tied to configuration
+ */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask)
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(state);
+static DEVICE_ATTR_RW(cfg_services);
+
+static struct attribute *qat_attrs[] = {
+ &dev_attr_state.attr,
+ &dev_attr_cfg_services.attr,
+ NULL,
+};
+
+static struct attribute_group qat_group = {
+ .attrs = qat_attrs,
+ .name = "qat",
+};
+
+int adf_sysfs_init(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to create qat attribute group: %d\n", ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_sysfs_init);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 148edbe379e3..fb45fa83841c 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -605,7 +605,7 @@ static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qat_crypto_instance *inst = NULL;
- int node = get_current_node();
+ int node = numa_node_id();
struct device *dev;
int ret;
@@ -1065,7 +1065,7 @@ static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
{
struct qat_crypto_instance *inst = NULL;
struct device *dev;
- int node = get_current_node();
+ int node = numa_node_id();
int ret;
inst = qat_crypto_get_instance_node(node);
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 16d97db9ea15..095ed2a404d2 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -489,7 +489,7 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct qat_crypto_instance *inst =
- qat_crypto_get_instance_node(get_current_node());
+ qat_crypto_get_instance_node(numa_node_id());
if (!inst)
return -EINVAL;
@@ -1225,7 +1225,7 @@ static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst =
- qat_crypto_get_instance_node(get_current_node());
+ qat_crypto_get_instance_node(numa_node_id());
if (!inst)
return -EINVAL;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 7717e9e5977b..b79e49aa724f 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -2321,9 +2321,6 @@ static int s5p_aes_remove(struct platform_device *pdev)
struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
int i;
- if (!pdata)
- return -ENODEV;
-
for (i = 0; i < ARRAY_SIZE(algs); i++)
crypto_unregister_skcipher(&algs[i]);
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 6957a125b447..f4bc06c24ad8 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -86,7 +86,6 @@ struct sa_match_data {
u8 priv;
u8 priv_id;
u32 supported_algos;
- bool skip_engine_control;
};
static struct device *sa_k3_dev;
@@ -2361,7 +2360,15 @@ static int sa_link_child(struct device *dev, void *data)
static struct sa_match_data am654_match_data = {
.priv = 1,
.priv_id = 1,
- .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0),
+ .supported_algos = BIT(SA_ALG_CBC_AES) |
+ BIT(SA_ALG_EBC_AES) |
+ BIT(SA_ALG_CBC_DES3) |
+ BIT(SA_ALG_ECB_DES3) |
+ BIT(SA_ALG_SHA1) |
+ BIT(SA_ALG_SHA256) |
+ BIT(SA_ALG_SHA512) |
+ BIT(SA_ALG_AUTHENC_SHA1_AES) |
+ BIT(SA_ALG_AUTHENC_SHA256_AES),
};
static struct sa_match_data am64_match_data = {
@@ -2372,7 +2379,6 @@ static struct sa_match_data am64_match_data = {
BIT(SA_ALG_SHA256) |
BIT(SA_ALG_SHA512) |
BIT(SA_ALG_AUTHENC_SHA256_AES),
- .skip_engine_control = true,
};
static const struct of_device_id of_match[] = {
@@ -2390,6 +2396,7 @@ static int sa_ul_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
static void __iomem *saul_base;
struct sa_crypto_data *dev_data;
+ u32 status, val;
int ret;
dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
@@ -2426,13 +2433,13 @@ static int sa_ul_probe(struct platform_device *pdev)
spin_lock_init(&dev_data->scid_lock);
- if (!dev_data->match_data->skip_engine_control) {
- u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
- SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
- SA_EEC_TRNG_EN;
-
+ val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
+ SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
+ SA_EEC_TRNG_EN;
+ status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
+ /* Only enable engines if all are not already enabled */
+ if (val & ~status)
writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
- }
sa_register_algos(dev_data);
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
index ed66d1f111db..92bf97232a29 100644
--- a/drivers/crypto/sa2ul.h
+++ b/drivers/crypto/sa2ul.h
@@ -16,6 +16,7 @@
#include <crypto/sha1.h>
#include <crypto/sha2.h>
+#define SA_ENGINE_STATUS 0x0008
#define SA_ENGINE_ENABLE_CONTROL 0x1000
struct sa_tfm_ctx;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 265ef3e96fdd..f104e8a43036 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -421,7 +421,7 @@ static int hash_get_device_data(struct hash_ctx *ctx,
* @keylen: The lengt of the key.
*
* Note! This function DOES NOT write to the NBLW registry, even though
- * specified in the the hw design spec. Either due to incorrect info in the
+ * specified in the hw design spec. Either due to incorrect info in the
* spec or due to a bug in the hw.
*/
static void hash_hw_write_key(struct hash_device_data *device_data,
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 5bc5710a6de0..77eca20bc7ac 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -23,6 +23,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/b128ops.h>
+#include "aesp8-ppc.h"
void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
index 09bba1852eec..041e633c214f 100644
--- a/drivers/crypto/vmx/ghashp8-ppc.pl
+++ b/drivers/crypto/vmx/ghashp8-ppc.pl
@@ -16,7 +16,7 @@
# details see https://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
-# GHASH for for PowerISA v2.07.
+# GHASH for PowerISA v2.07.
#
# July 2014
#
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index f64e3984689f..768ced3d6fe8 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -2,6 +2,7 @@
menuconfig CXL_BUS
tristate "CXL (Compute Express Link) Devices Support"
depends on PCI
+ select PCI_DOE
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -102,4 +103,12 @@ config CXL_SUSPEND
def_bool y
depends on SUSPEND && CXL_MEM
+config CXL_REGION
+ bool
+ default CXL_BUS
+ # For MAX_PHYSMEM_BITS
+ depends on SPARSEMEM
+ select MEMREGION
+ select GET_FREE_REGION
+
endif
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 40286f5df812..fb649683dd3a 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -9,10 +9,6 @@
#include "cxlpci.h"
#include "cxl.h"
-/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
-#define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways)
-#define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
-
static unsigned long cfmws_to_decoder_flags(int restrictions)
{
unsigned long flags = CXL_DECODER_F_ENABLE;
@@ -34,7 +30,8 @@ static unsigned long cfmws_to_decoder_flags(int restrictions)
static int cxl_acpi_cfmws_verify(struct device *dev,
struct acpi_cedt_cfmws *cfmws)
{
- int expected_len;
+ int rc, expected_len;
+ unsigned int ways;
if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
@@ -51,14 +48,14 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
return -EINVAL;
}
- if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
- dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
- CFMWS_INTERLEAVE_WAYS(cfmws));
+ rc = cxl_to_ways(cfmws->interleave_ways, &ways);
+ if (rc) {
+ dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
+ cfmws->interleave_ways);
return -EINVAL;
}
- expected_len = struct_size((cfmws), interleave_targets,
- CFMWS_INTERLEAVE_WAYS(cfmws));
+ expected_len = struct_size(cfmws, interleave_targets, ways);
if (cfmws->header.length < expected_len) {
dev_err(dev, "CFMWS length %d less than expected %d\n",
@@ -76,6 +73,8 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
struct cxl_cfmws_context {
struct device *dev;
struct cxl_port *root_port;
+ struct resource *cxl_res;
+ int id;
};
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -84,10 +83,14 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
+ struct resource *cxl_res = ctx->cxl_res;
+ struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
struct acpi_cedt_cfmws *cfmws;
struct cxl_decoder *cxld;
- int rc, i;
+ unsigned int ways, i, ig;
+ struct resource *res;
+ int rc;
cfmws = (struct acpi_cedt_cfmws *) header;
@@ -99,19 +102,51 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
return 0;
}
- for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
+ rc = cxl_to_ways(cfmws->interleave_ways, &ways);
+ if (rc)
+ return rc;
+ rc = cxl_to_granularity(cfmws->granularity, &ig);
+ if (rc)
+ return rc;
+ for (i = 0; i < ways; i++)
target_map[i] = cfmws->interleave_targets[i];
- cxld = cxl_root_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws));
- if (IS_ERR(cxld))
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++);
+ if (!res->name)
+ goto err_name;
+
+ res->start = cfmws->base_hpa;
+ res->end = cfmws->base_hpa + cfmws->window_size - 1;
+ res->flags = IORESOURCE_MEM;
+
+ /* add to the local resource tracking to establish a sort order */
+ rc = insert_resource(cxl_res, res);
+ if (rc)
+ goto err_insert;
+
+ cxlrd = cxl_root_decoder_alloc(root_port, ways);
+ if (IS_ERR(cxlrd))
return 0;
+ cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->platform_res = (struct resource)DEFINE_RES_MEM(cfmws->base_hpa,
- cfmws->window_size);
- cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
- cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws);
+ cxld->hpa_range = (struct range) {
+ .start = res->start,
+ .end = res->end,
+ };
+ cxld->interleave_ways = ways;
+ /*
+ * Minimize the x1 granularity to advertise support for any
+ * valid region granularity
+ */
+ if (ways == 1)
+ ig = CXL_DECODER_MIN_GRANULARITY;
+ cxld->interleave_granularity = ig;
rc = cxl_decoder_add(cxld, target_map);
if (rc)
@@ -119,15 +154,22 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
else
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
- dev_err(dev, "Failed to add decoder for %pr\n",
- &cxld->platform_res);
+ dev_err(dev, "Failed to add decode range [%#llx - %#llx]\n",
+ cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
}
- dev_dbg(dev, "add: %s node: %d range %pr\n", dev_name(&cxld->dev),
- phys_to_target_node(cxld->platform_res.start),
- &cxld->platform_res);
+ dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
+ dev_name(&cxld->dev),
+ phys_to_target_node(cxld->hpa_range.start),
+ cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
+
+err_insert:
+ kfree(res->name);
+err_name:
+ kfree(res);
+ return -ENOMEM;
}
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
@@ -175,8 +217,7 @@ static int add_host_bridge_uport(struct device *match, void *arg)
if (rc)
return rc;
- port = devm_cxl_add_port(host, match, dport->component_reg_phys,
- root_port);
+ port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport);
if (IS_ERR(port))
return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
@@ -282,9 +323,127 @@ static void cxl_acpi_lock_reset_class(void *dev)
device_lock_reset_class(dev);
}
+static void del_cxl_resource(struct resource *res)
+{
+ kfree(res->name);
+ kfree(res);
+}
+
+static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
+{
+ priv->desc = (unsigned long) pub;
+}
+
+static struct resource *cxl_get_public_resource(struct resource *priv)
+{
+ return (struct resource *) priv->desc;
+}
+
+static void remove_cxl_resources(void *data)
+{
+ struct resource *res, *next, *cxl = data;
+
+ for (res = cxl->child; res; res = next) {
+ struct resource *victim = cxl_get_public_resource(res);
+
+ next = res->sibling;
+ remove_resource(res);
+
+ if (victim) {
+ remove_resource(victim);
+ kfree(victim);
+ }
+
+ del_cxl_resource(res);
+ }
+}
+
+/**
+ * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
+ * @cxl_res: A standalone resource tree where each CXL window is a sibling
+ *
+ * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
+ * expanding its boundaries to ensure that any conflicting resources become
+ * children. If a window is expanded it may then conflict with a another window
+ * entry and require the window to be truncated or trimmed. Consider this
+ * situation:
+ *
+ * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
+ * |--------------- "System RAM" -------------|
+ *
+ * ...where platform firmware has established as System RAM resource across 2
+ * windows, but has left some portion of window 1 for dynamic CXL region
+ * provisioning. In this case "Window 0" will span the entirety of the "System
+ * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
+ * of that "System RAM" resource.
+ */
+static int add_cxl_resources(struct resource *cxl_res)
+{
+ struct resource *res, *new, *next;
+
+ for (res = cxl_res->child; res; res = next) {
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->name = res->name;
+ new->start = res->start;
+ new->end = res->end;
+ new->flags = IORESOURCE_MEM;
+ new->desc = IORES_DESC_CXL;
+
+ /*
+ * Record the public resource in the private cxl_res tree for
+ * later removal.
+ */
+ cxl_set_public_resource(res, new);
+
+ insert_resource_expand_to_fit(&iomem_resource, new);
+
+ next = res->sibling;
+ while (next && resource_overlaps(new, next)) {
+ if (resource_contains(new, next)) {
+ struct resource *_next = next->sibling;
+
+ remove_resource(next);
+ del_cxl_resource(next);
+ next = _next;
+ } else
+ next->start = new->end + 1;
+ }
+ }
+ return 0;
+}
+
+static int pair_cxl_resource(struct device *dev, void *data)
+{
+ struct resource *cxl_res = data;
+ struct resource *p;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ for (p = cxl_res->child; p; p = p->sibling) {
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct resource res = {
+ .start = cxld->hpa_range.start,
+ .end = cxld->hpa_range.end,
+ .flags = IORESOURCE_MEM,
+ };
+
+ if (resource_contains(p, &res)) {
+ cxlrd->res = cxl_get_public_resource(p);
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
+ struct resource *cxl_res;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
@@ -296,6 +455,14 @@ static int cxl_acpi_probe(struct platform_device *pdev)
if (rc)
return rc;
+ cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
+ if (!cxl_res)
+ return -ENOMEM;
+ cxl_res->name = "CXL mem";
+ cxl_res->start = 0;
+ cxl_res->end = -1;
+ cxl_res->flags = IORESOURCE_MEM;
+
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port))
return PTR_ERR(root_port);
@@ -306,11 +473,28 @@ static int cxl_acpi_probe(struct platform_device *pdev)
if (rc < 0)
return rc;
+ rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
+ if (rc)
+ return rc;
+
ctx = (struct cxl_cfmws_context) {
.dev = host,
.root_port = root_port,
+ .cxl_res = cxl_res,
};
- acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
+ rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
+ if (rc < 0)
+ return -ENXIO;
+
+ rc = add_cxl_resources(cxl_res);
+ if (rc)
+ return rc;
+
+ /*
+ * Populate the root decoders with their related iomem resource,
+ * if present
+ */
+ device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
/*
* Root level scanned with host-bridge as dports, now scan host-bridges
@@ -337,12 +521,19 @@ static const struct acpi_device_id cxl_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
+static const struct platform_device_id cxl_test_ids[] = {
+ { "cxl_acpi" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, cxl_test_ids);
+
static struct platform_driver cxl_acpi_driver = {
.probe = cxl_acpi_probe,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = cxl_acpi_ids,
},
+ .id_table = cxl_test_ids,
};
module_platform_driver(cxl_acpi_driver);
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 9d35085d25af..79c7257f4107 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -10,3 +10,4 @@ cxl_core-y += memdev.o
cxl_core-y += mbox.o
cxl_core-y += pci.o
cxl_core-y += hdm.o
+cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 1a50c0fc399c..1d8f87be283f 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -9,6 +9,36 @@ extern const struct device_type cxl_nvdimm_type;
extern struct attribute_group cxl_base_attribute_group;
+#ifdef CONFIG_CXL_REGION
+extern struct device_attribute dev_attr_create_pmem_region;
+extern struct device_attribute dev_attr_delete_region;
+extern struct device_attribute dev_attr_region;
+extern const struct device_type cxl_pmem_region_type;
+extern const struct device_type cxl_region_type;
+void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
+#define CXL_REGION_TYPE(x) (&cxl_region_type)
+#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
+#define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type)
+int cxl_region_init(void);
+void cxl_region_exit(void);
+#else
+static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+{
+}
+static inline int cxl_region_init(void)
+{
+ return 0;
+}
+static inline void cxl_region_exit(void)
+{
+}
+#define CXL_REGION_ATTR(x) NULL
+#define CXL_REGION_TYPE(x) NULL
+#define SET_CXL_REGION_ATTR(x)
+#define CXL_PMEM_REGION_TYPE(x) NULL
+#endif
+
struct cxl_send_command;
struct cxl_mem_query_commands;
int cxl_query_cmd(struct cxl_memdev *cxlmd,
@@ -17,9 +47,28 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length);
+struct dentry *cxl_debugfs_create_dir(const char *dir);
+int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode);
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
+resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
+resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+extern struct rw_semaphore cxl_dpa_rwsem;
+
+bool is_switch_decoder(struct device *dev);
+struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
+static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
+ struct cxl_memdev *cxlmd)
+{
+ if (!port)
+ return NULL;
+
+ return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
+}
+
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
void cxl_mbox_init(void);
-void cxl_mbox_exit(void);
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index bfc8ee876278..d1d2caea5c62 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -16,6 +17,8 @@
* for enumerating these registers and capabilities.
*/
+DECLARE_RWSEM(cxl_dpa_rwsem);
+
static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map)
{
@@ -46,20 +49,22 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
*/
int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
- struct cxl_decoder *cxld;
- struct cxl_dport *dport;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_dport *dport = NULL;
int single_port_map[1];
+ unsigned long index;
- cxld = cxl_switch_decoder_alloc(port, 1);
- if (IS_ERR(cxld))
- return PTR_ERR(cxld);
+ cxlsd = cxl_switch_decoder_alloc(port, 1);
+ if (IS_ERR(cxlsd))
+ return PTR_ERR(cxlsd);
device_lock_assert(&port->dev);
- dport = list_first_entry(&port->dports, typeof(*dport), list);
+ xa_for_each(&port->dports, index, dport)
+ break;
single_port_map[0] = dport->port_id;
- return add_hdm_decoder(port, cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
@@ -124,47 +129,577 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
return ERR_PTR(-ENXIO);
}
+ dev_set_drvdata(dev, cxlhdm);
+
return cxlhdm;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
-static int to_interleave_granularity(u32 ctrl)
+static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
+{
+ unsigned long long start = r->start, end = r->end;
+
+ seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
+ r->name);
+}
+
+void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
+{
+ struct resource *p1, *p2;
+
+ down_read(&cxl_dpa_rwsem);
+ for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
+ __cxl_dpa_debug(file, p1, 0);
+ for (p2 = p1->child; p2; p2 = p2->sibling)
+ __cxl_dpa_debug(file, p2, 1);
+ }
+ up_read(&cxl_dpa_rwsem);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
+
+/*
+ * Must be called in a context that synchronizes against this decoder's
+ * port ->remove() callback (like an endpoint decoder sysfs attribute)
+ */
+static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct resource *res = cxled->dpa_res;
+ resource_size_t skip_start;
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+ /* save @skip_start, before @res is released */
+ skip_start = res->start - cxled->skip;
+ __release_region(&cxlds->dpa_res, res->start, resource_size(res));
+ if (cxled->skip)
+ __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
+ cxled->skip = 0;
+ cxled->dpa_res = NULL;
+ put_device(&cxled->cxld.dev);
+ port->hdm_end--;
+}
+
+static void cxl_dpa_release(void *cxled)
+{
+ down_write(&cxl_dpa_rwsem);
+ __cxl_dpa_release(cxled);
+ up_write(&cxl_dpa_rwsem);
+}
+
+/*
+ * Must be called from context that will not race port device
+ * unregistration, like decoder sysfs attribute methods
+ */
+static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+ devm_remove_action(&port->dev, cxl_dpa_release, cxled);
+ __cxl_dpa_release(cxled);
+}
+
+static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &port->dev;
+ struct resource *res;
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+ if (!len)
+ goto success;
+
+ if (cxled->dpa_res) {
+ dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
+ port->id, cxled->cxld.id, cxled->dpa_res);
+ return -EBUSY;
+ }
+
+ if (port->hdm_end + 1 != cxled->cxld.id) {
+ /*
+ * Assumes alloc and commit order is always in hardware instance
+ * order per expectations from 8.2.5.12.20 Committing Decoder
+ * Programming that enforce decoder[m] committed before
+ * decoder[m+1] commit start.
+ */
+ dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
+ cxled->cxld.id, port->id, port->hdm_end + 1);
+ return -EBUSY;
+ }
+
+ if (skipped) {
+ res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
+ dev_name(&cxled->cxld.dev), 0);
+ if (!res) {
+ dev_dbg(dev,
+ "decoder%d.%d: failed to reserve skipped space\n",
+ port->id, cxled->cxld.id);
+ return -EBUSY;
+ }
+ }
+ res = __request_region(&cxlds->dpa_res, base, len,
+ dev_name(&cxled->cxld.dev), 0);
+ if (!res) {
+ dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
+ port->id, cxled->cxld.id);
+ if (skipped)
+ __release_region(&cxlds->dpa_res, base - skipped,
+ skipped);
+ return -EBUSY;
+ }
+ cxled->dpa_res = res;
+ cxled->skip = skipped;
+
+ if (resource_contains(&cxlds->pmem_res, res))
+ cxled->mode = CXL_DECODER_PMEM;
+ else if (resource_contains(&cxlds->ram_res, res))
+ cxled->mode = CXL_DECODER_RAM;
+ else {
+ dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
+ cxled->cxld.id, cxled->dpa_res);
+ cxled->mode = CXL_DECODER_MIXED;
+ }
+
+success:
+ port->hdm_end++;
+ get_device(&cxled->cxld.dev);
+ return 0;
+}
+
+static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped)
{
- int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl);
+ struct cxl_port *port = cxled_to_port(cxled);
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);
+ up_write(&cxl_dpa_rwsem);
+
+ if (rc)
+ return rc;
- return 256 << val;
+ return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
}
-static int to_interleave_ways(u32 ctrl)
+resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl);
+ resource_size_t size = 0;
+
+ down_read(&cxl_dpa_rwsem);
+ if (cxled->dpa_res)
+ size = resource_size(cxled->dpa_res);
+ up_read(&cxl_dpa_rwsem);
- switch (val) {
- case 0 ... 4:
- return 1 << val;
- case 8 ... 10:
- return 3 << (val - 8);
+ return size;
+}
+
+resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
+{
+ resource_size_t base = -1;
+
+ down_read(&cxl_dpa_rwsem);
+ if (cxled->dpa_res)
+ base = cxled->dpa_res->start;
+ up_read(&cxl_dpa_rwsem);
+
+ return base;
+}
+
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct device *dev = &cxled->cxld.dev;
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ if (!cxled->dpa_res) {
+ rc = 0;
+ goto out;
+ }
+ if (cxled->cxld.region) {
+ dev_dbg(dev, "decoder assigned to: %s\n",
+ dev_name(&cxled->cxld.region->dev));
+ rc = -EBUSY;
+ goto out;
+ }
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ dev_dbg(dev, "decoder enabled\n");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (cxled->cxld.id != port->hdm_end) {
+ dev_dbg(dev, "expected decoder%d.%d\n", port->id,
+ port->hdm_end);
+ rc = -EBUSY;
+ goto out;
+ }
+ devm_cxl_dpa_release(cxled);
+ rc = 0;
+out:
+ up_write(&cxl_dpa_rwsem);
+ return rc;
+}
+
+int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &cxled->cxld.dev;
+ int rc;
+
+ switch (mode) {
+ case CXL_DECODER_RAM:
+ case CXL_DECODER_PMEM:
+ break;
default:
+ dev_dbg(dev, "unsupported mode: %d\n", mode);
+ return -EINVAL;
+ }
+
+ down_write(&cxl_dpa_rwsem);
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Only allow modes that are supported by the current partition
+ * configuration
+ */
+ if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
+ dev_dbg(dev, "no available pmem capacity\n");
+ rc = -ENXIO;
+ goto out;
+ }
+ if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
+ dev_dbg(dev, "no available ram capacity\n");
+ rc = -ENXIO;
+ goto out;
+ }
+
+ cxled->mode = mode;
+ rc = 0;
+out:
+ up_write(&cxl_dpa_rwsem);
+
+ return rc;
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ resource_size_t free_ram_start, free_pmem_start;
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &cxled->cxld.dev;
+ resource_size_t start, avail, skip;
+ struct resource *p, *last;
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ if (cxled->cxld.region) {
+ dev_dbg(dev, "decoder attached to %s\n",
+ dev_name(&cxled->cxld.region->dev));
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ dev_dbg(dev, "decoder enabled\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ free_ram_start = last->end + 1;
+ else
+ free_ram_start = cxlds->ram_res.start;
+
+ for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ free_pmem_start = last->end + 1;
+ else
+ free_pmem_start = cxlds->pmem_res.start;
+
+ if (cxled->mode == CXL_DECODER_RAM) {
+ start = free_ram_start;
+ avail = cxlds->ram_res.end - start + 1;
+ skip = 0;
+ } else if (cxled->mode == CXL_DECODER_PMEM) {
+ resource_size_t skip_start, skip_end;
+
+ start = free_pmem_start;
+ avail = cxlds->pmem_res.end - start + 1;
+ skip_start = free_ram_start;
+
+ /*
+ * If some pmem is already allocated, then that allocation
+ * already handled the skip.
+ */
+ if (cxlds->pmem_res.child &&
+ skip_start == cxlds->pmem_res.child->start)
+ skip_end = skip_start - 1;
+ else
+ skip_end = start - 1;
+ skip = skip_end - skip_start + 1;
+ } else {
+ dev_dbg(dev, "mode not set\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (size > avail) {
+ dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
+ cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
+ &avail);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ rc = __cxl_dpa_reserve(cxled, start, size, skip);
+out:
+ up_write(&cxl_dpa_rwsem);
+
+ if (rc)
+ return rc;
+
+ return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
+}
+
+static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
+{
+ u16 eig;
+ u8 eiw;
+
+ /*
+ * Input validation ensures these warns never fire, but otherwise
+ * suppress unititalized variable usage warnings.
+ */
+ if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw),
+ "invalid interleave_ways: %d\n", cxld->interleave_ways))
+ return;
+ if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig),
+ "invalid interleave_granularity: %d\n",
+ cxld->interleave_granularity))
+ return;
+
+ u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
+ u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
+ *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
+}
+
+static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
+{
+ u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
+ CXL_HDM_DECODER0_CTRL_TYPE);
+}
+
+static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+{
+ struct cxl_dport **t = &cxlsd->target[0];
+ int ways = cxlsd->cxld.interleave_ways;
+
+ if (dev_WARN_ONCE(&cxlsd->cxld.dev,
+ ways > 8 || ways > cxlsd->nr_targets,
+ "ways: %d overflows targets: %d\n", ways,
+ cxlsd->nr_targets))
+ return -ENXIO;
+
+ *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
+ if (ways > 1)
+ *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
+ if (ways > 2)
+ *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
+ if (ways > 3)
+ *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
+ if (ways > 4)
+ *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
+ if (ways > 5)
+ *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
+ if (ways > 6)
+ *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
+ if (ways > 7)
+ *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
+
+ return 0;
+}
+
+/*
+ * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
+ * committed or error within 10ms, but just be generous with 20ms to account for
+ * clock skew and other marginal behavior
+ */
+#define COMMIT_TIMEOUT_MS 20
+static int cxld_await_commit(void __iomem *hdm, int id)
+{
+ u32 ctrl;
+ int i;
+
+ for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ return -EIO;
+ }
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
+ return 0;
+ fsleep(1000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int cxl_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id, rc;
+ u64 base, size;
+ u32 ctrl;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ if (port->commit_end + 1 != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end + 1);
+ return -EBUSY;
+ }
+
+ down_read(&cxl_dpa_rwsem);
+ /* common decoder settings */
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
+ cxld_set_interleave(cxld, &ctrl);
+ cxld_set_type(cxld, &ctrl);
+ base = cxld->hpa_range.start;
+ size = range_len(&cxld->hpa_range);
+
+ writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
+ writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
+ writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
+ writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
+
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+ void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
+ void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
+ u64 targets;
+
+ rc = cxlsd_set_targets(cxlsd, &targets);
+ if (rc) {
+ dev_dbg(&port->dev, "%s: target configuration error\n",
+ dev_name(&cxld->dev));
+ goto err;
+ }
+
+ writel(upper_32_bits(targets), tl_hi);
+ writel(lower_32_bits(targets), tl_lo);
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
+ void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
+
+ writel(upper_32_bits(cxled->skip), sk_hi);
+ writel(lower_32_bits(cxled->skip), sk_lo);
+ }
+
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ up_read(&cxl_dpa_rwsem);
+
+ port->commit_end++;
+ rc = cxld_await_commit(hdm, cxld->id);
+err:
+ if (rc) {
+ dev_dbg(&port->dev, "%s: error %d committing decoder\n",
+ dev_name(&cxld->dev), rc);
+ cxld->reset(cxld);
+ return rc;
+ }
+ cxld->flags |= CXL_DECODER_F_ENABLE;
+
+ return 0;
+}
+
+static int cxl_decoder_reset(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id;
+ u32 ctrl;
+
+ if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
return 0;
+
+ if (port->commit_end != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order reset, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end);
+ return -EBUSY;
}
+
+ down_read(&cxl_dpa_rwsem);
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+
+ writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
+ up_read(&cxl_dpa_rwsem);
+
+ port->commit_end--;
+ cxld->flags &= ~CXL_DECODER_F_ENABLE;
+
+ return 0;
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which)
+ int *target_map, void __iomem *hdm, int which,
+ u64 *dpa_base)
{
- u64 size, base;
+ struct cxl_endpoint_decoder *cxled = NULL;
+ u64 size, base, skip, dpa_size;
+ bool committed;
+ u32 remainder;
+ int i, rc;
u32 ctrl;
- int i;
union {
u64 value;
unsigned char target_id[8];
} target_list;
+ if (is_endpoint_decoder(&cxld->dev))
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+ committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
+ cxld->commit = cxl_decoder_commit;
+ cxld->reset = cxl_decoder_reset;
- if (!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED))
+ if (!committed)
size = 0;
if (base == U64_MAX || size == U64_MAX) {
dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
@@ -172,39 +707,77 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO;
}
- cxld->decoder_range = (struct range) {
+ cxld->hpa_range = (struct range) {
.start = base,
.end = base + size - 1,
};
- /* switch decoders are always enabled if committed */
- if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) {
+ /* decoders are enabled if committed */
+ if (committed) {
cxld->flags |= CXL_DECODER_F_ENABLE;
if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
cxld->flags |= CXL_DECODER_F_LOCK;
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
+ cxld->target_type = CXL_DECODER_EXPANDER;
+ else
+ cxld->target_type = CXL_DECODER_ACCELERATOR;
+ if (cxld->id != port->commit_end + 1) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Committed out of order\n",
+ port->id, cxld->id);
+ return -ENXIO;
+ }
+ port->commit_end = cxld->id;
+ } else {
+ /* unless / until type-2 drivers arrive, assume type-3 */
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
+ ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
+ }
+ cxld->target_type = CXL_DECODER_EXPANDER;
}
- cxld->interleave_ways = to_interleave_ways(ctrl);
- if (!cxld->interleave_ways) {
+ rc = cxl_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
+ &cxld->interleave_ways);
+ if (rc) {
dev_warn(&port->dev,
"decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
port->id, cxld->id, ctrl);
- return -ENXIO;
+ return rc;
}
- cxld->interleave_granularity = to_interleave_granularity(ctrl);
+ rc = cxl_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
+ &cxld->interleave_granularity);
+ if (rc)
+ return rc;
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
- cxld->target_type = CXL_DECODER_EXPANDER;
- else
- cxld->target_type = CXL_DECODER_ACCELERATOR;
+ if (!cxled) {
+ target_list.value =
+ ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+ for (i = 0; i < cxld->interleave_ways; i++)
+ target_map[i] = target_list.target_id[i];
- if (is_endpoint_decoder(&cxld->dev))
return 0;
+ }
- target_list.value =
- ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
- for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ if (!committed)
+ return 0;
+ dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
+ if (remainder) {
+ dev_err(&port->dev,
+ "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
+ port->id, cxld->id, size, cxld->interleave_ways);
+ return -ENXIO;
+ }
+ skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
+ if (rc) {
+ dev_err(&port->dev,
+ "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+ port->id, cxld->id, *dpa_base,
+ *dpa_base + dpa_size + skip - 1, rc);
+ return rc;
+ }
+ *dpa_base += dpa_size + skip;
return 0;
}
@@ -216,7 +789,8 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
- int i, committed, failed;
+ int i, committed;
+ u64 dpa_base = 0;
u32 ctrl;
/*
@@ -236,27 +810,37 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
if (committed != cxlhdm->decoder_count)
msleep(20);
- for (i = 0, failed = 0; i < cxlhdm->decoder_count; i++) {
+ for (i = 0; i < cxlhdm->decoder_count; i++) {
int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
- if (is_cxl_endpoint(port))
- cxld = cxl_endpoint_decoder_alloc(port);
- else
- cxld = cxl_switch_decoder_alloc(port, target_count);
- if (IS_ERR(cxld)) {
- dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
- return PTR_ERR(cxld);
+ if (is_cxl_endpoint(port)) {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = cxl_endpoint_decoder_alloc(port);
+ if (IS_ERR(cxled)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxled);
+ }
+ cxld = &cxled->cxld;
+ } else {
+ struct cxl_switch_decoder *cxlsd;
+
+ cxlsd = cxl_switch_decoder_alloc(port, target_count);
+ if (IS_ERR(cxlsd)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxlsd);
+ }
+ cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map,
- cxlhdm->regs.hdm_decoder, i);
+ rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
if (rc) {
put_device(&cxld->dev);
- failed++;
- continue;
+ return rc;
}
rc = add_hdm_decoder(port, cxld, target_map);
if (rc) {
@@ -266,11 +850,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
}
}
- if (failed == cxlhdm->decoder_count) {
- dev_err(&port->dev, "No valid decoders found\n");
- return -ENXIO;
- }
-
return 0;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index cbf23beebebe..16176b9278b4 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -718,12 +718,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
*/
static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
{
- struct cxl_mbox_get_partition_info {
- __le64 active_volatile_cap;
- __le64 active_persistent_cap;
- __le64 next_volatile_cap;
- __le64 next_persistent_cap;
- } __packed pi;
+ struct cxl_mbox_get_partition_info pi;
int rc;
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
@@ -773,15 +768,6 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
cxlds->partition_align_bytes =
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
- dev_dbg(cxlds->dev,
- "Identify Memory Device\n"
- " total_bytes = %#llx\n"
- " volatile_only_bytes = %#llx\n"
- " persistent_only_bytes = %#llx\n"
- " partition_align_bytes = %#llx\n",
- cxlds->total_bytes, cxlds->volatile_only_bytes,
- cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
-
cxlds->lsa_size = le32_to_cpu(id.lsa_size);
memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
@@ -789,42 +775,63 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
-int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+ resource_size_t size, const char *type)
{
int rc;
- if (cxlds->partition_align_bytes == 0) {
- cxlds->ram_range.start = 0;
- cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
- cxlds->pmem_range.start = cxlds->volatile_only_bytes;
- cxlds->pmem_range.end = cxlds->volatile_only_bytes +
- cxlds->persistent_only_bytes - 1;
+ res->name = type;
+ res->start = start;
+ res->end = start + size - 1;
+ res->flags = IORESOURCE_MEM;
+ if (resource_size(res) == 0) {
+ dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
return 0;
}
-
- rc = cxl_mem_get_partition_info(cxlds);
+ rc = request_resource(parent, res);
if (rc) {
- dev_err(cxlds->dev, "Failed to query partition information\n");
+ dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
+ res, rc);
return rc;
}
- dev_dbg(cxlds->dev,
- "Get Partition Info\n"
- " active_volatile_bytes = %#llx\n"
- " active_persistent_bytes = %#llx\n"
- " next_volatile_bytes = %#llx\n"
- " next_persistent_bytes = %#llx\n",
- cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
- cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
+ dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+
+ return 0;
+}
- cxlds->ram_range.start = 0;
- cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+{
+ struct device *dev = cxlds->dev;
+ int rc;
- cxlds->pmem_range.start = cxlds->active_volatile_bytes;
- cxlds->pmem_range.end =
- cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
+ cxlds->dpa_res =
+ (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
- return 0;
+ if (cxlds->partition_align_bytes == 0) {
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
+ cxlds->volatile_only_bytes, "ram");
+ if (rc)
+ return rc;
+ return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
+ cxlds->volatile_only_bytes,
+ cxlds->persistent_only_bytes, "pmem");
+ }
+
+ rc = cxl_mem_get_partition_info(cxlds);
+ if (rc) {
+ dev_err(dev, "Failed to query partition information\n");
+ return rc;
+ }
+
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
+ cxlds->active_volatile_bytes, "ram");
+ if (rc)
+ return rc;
+ return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
+ cxlds->active_volatile_bytes,
+ cxlds->active_persistent_bytes, "pmem");
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
@@ -845,19 +852,11 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
-static struct dentry *cxl_debugfs;
-
void __init cxl_mbox_init(void)
{
struct dentry *mbox_debugfs;
- cxl_debugfs = debugfs_create_dir("cxl", NULL);
- mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
+ mbox_debugfs = cxl_debugfs_create_dir("mbox");
debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
&cxl_raw_allow_all);
}
-
-void cxl_mbox_exit(void)
-{
- debugfs_remove_recursive(cxl_debugfs);
-}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index f7cdcd33504a..20ce488a7754 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -68,7 +68,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = range_len(&cxlds->ram_range);
+ unsigned long long len = resource_size(&cxlds->ram_res);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -81,7 +81,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = range_len(&cxlds->pmem_range);
+ unsigned long long len = resource_size(&cxlds->pmem_res);
return sysfs_emit(buf, "%#llx\n", len);
}
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index c4c99ff7b55e..9240df53ed87 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/pci-doe.h>
#include <cxlpci.h>
#include <cxlmem.h>
#include <cxl.h>
@@ -225,7 +226,6 @@ static int dvsec_range_allowed(struct device *dev, void *arg)
{
struct range *dev_range = arg;
struct cxl_decoder *cxld;
- struct range root_range;
if (!is_root_decoder(dev))
return 0;
@@ -237,12 +237,7 @@ static int dvsec_range_allowed(struct device *dev, void *arg)
if (!(cxld->flags & CXL_DECODER_F_RAM))
return 0;
- root_range = (struct range) {
- .start = cxld->platform_res.start,
- .end = cxld->platform_res.end,
- };
-
- return range_contains(&root_range, dev_range);
+ return range_contains(&cxld->hpa_range, dev_range);
}
static void disable_hdm(void *_cxlhdm)
@@ -458,3 +453,175 @@ hdm_init:
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
+
+#define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
+#define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
+#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00
+#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0
+#define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000
+#define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
+#define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
+
+static struct pci_doe_mb *find_cdat_doe(struct device *uport)
+{
+ struct cxl_memdev *cxlmd;
+ struct cxl_dev_state *cxlds;
+ unsigned long index;
+ void *entry;
+
+ cxlmd = to_cxl_memdev(uport);
+ cxlds = cxlmd->cxlds;
+
+ xa_for_each(&cxlds->doe_mbs, index, entry) {
+ struct pci_doe_mb *cur = entry;
+
+ if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL,
+ CXL_DOE_PROTOCOL_TABLE_ACCESS))
+ return cur;
+ }
+
+ return NULL;
+}
+
+#define CDAT_DOE_REQ(entry_handle) \
+ (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
+ CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
+ FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
+ CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
+ FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
+
+static void cxl_doe_task_complete(struct pci_doe_task *task)
+{
+ complete(task->private);
+}
+
+struct cdat_doe_task {
+ u32 request_pl;
+ u32 response_pl[32];
+ struct completion c;
+ struct pci_doe_task task;
+};
+
+#define DECLARE_CDAT_DOE_TASK(req, cdt) \
+struct cdat_doe_task cdt = { \
+ .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \
+ .request_pl = req, \
+ .task = { \
+ .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \
+ .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \
+ .request_pl = &cdt.request_pl, \
+ .request_pl_sz = sizeof(cdt.request_pl), \
+ .response_pl = cdt.response_pl, \
+ .response_pl_sz = sizeof(cdt.response_pl), \
+ .complete = cxl_doe_task_complete, \
+ .private = &cdt.c, \
+ } \
+}
+
+static int cxl_cdat_get_length(struct device *dev,
+ struct pci_doe_mb *cdat_doe,
+ size_t *length)
+{
+ DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t);
+ int rc;
+
+ rc = pci_doe_submit_task(cdat_doe, &t.task);
+ if (rc < 0) {
+ dev_err(dev, "DOE submit failed: %d", rc);
+ return rc;
+ }
+ wait_for_completion(&t.c);
+ if (t.task.rv < sizeof(u32))
+ return -EIO;
+
+ *length = t.response_pl[1];
+ dev_dbg(dev, "CDAT length %zu\n", *length);
+
+ return 0;
+}
+
+static int cxl_cdat_read_table(struct device *dev,
+ struct pci_doe_mb *cdat_doe,
+ struct cxl_cdat *cdat)
+{
+ size_t length = cdat->length;
+ u32 *data = cdat->table;
+ int entry_handle = 0;
+
+ do {
+ DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
+ size_t entry_dw;
+ u32 *entry;
+ int rc;
+
+ rc = pci_doe_submit_task(cdat_doe, &t.task);
+ if (rc < 0) {
+ dev_err(dev, "DOE submit failed: %d", rc);
+ return rc;
+ }
+ wait_for_completion(&t.c);
+ /* 1 DW header + 1 DW data min */
+ if (t.task.rv < (2 * sizeof(u32)))
+ return -EIO;
+
+ /* Get the CXL table access header entry handle */
+ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
+ t.response_pl[0]);
+ entry = t.response_pl + 1;
+ entry_dw = t.task.rv / sizeof(u32);
+ /* Skip Header */
+ entry_dw -= 1;
+ entry_dw = min(length / sizeof(u32), entry_dw);
+ /* Prevent length < 1 DW from causing a buffer overflow */
+ if (entry_dw) {
+ memcpy(data, entry, entry_dw * sizeof(u32));
+ length -= entry_dw * sizeof(u32);
+ data += entry_dw;
+ }
+ } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
+
+ return 0;
+}
+
+/**
+ * read_cdat_data - Read the CDAT data on this port
+ * @port: Port to read data from
+ *
+ * This call will sleep waiting for responses from the DOE mailbox.
+ */
+void read_cdat_data(struct cxl_port *port)
+{
+ struct pci_doe_mb *cdat_doe;
+ struct device *dev = &port->dev;
+ struct device *uport = port->uport;
+ size_t cdat_length;
+ int rc;
+
+ cdat_doe = find_cdat_doe(uport);
+ if (!cdat_doe) {
+ dev_dbg(dev, "No CDAT mailbox\n");
+ return;
+ }
+
+ port->cdat_available = true;
+
+ if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) {
+ dev_dbg(dev, "No CDAT length\n");
+ return;
+ }
+
+ port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL);
+ if (!port->cdat.table)
+ return;
+
+ port->cdat.length = cdat_length;
+ rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat);
+ if (rc) {
+ /* Don't leave table data allocated on error */
+ devm_kfree(dev, port->cdat.table);
+ port->cdat.table = NULL;
+ port->cdat.length = 0;
+ dev_err(dev, "CDAT data read error\n");
+ }
+}
+EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index bec7cfb54ebf..1d12a8206444 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev);
}
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
{
- struct cxl_port *port = find_cxl_root(&cxl_nvd->dev);
+ struct cxl_port *port = find_cxl_root(start);
struct device *dev;
if (!port)
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index dbce99bdffab..bffde862de0b 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/memregion.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -42,6 +44,8 @@ static int cxl_device_id(struct device *dev)
return CXL_DEVICE_NVDIMM_BRIDGE;
if (dev->type == &cxl_nvdimm_type)
return CXL_DEVICE_NVDIMM;
+ if (dev->type == CXL_PMEM_REGION_TYPE())
+ return CXL_DEVICE_PMEM_REGION;
if (is_cxl_port(dev)) {
if (is_cxl_root(to_cxl_port(dev)))
return CXL_DEVICE_ROOT;
@@ -49,6 +53,8 @@ static int cxl_device_id(struct device *dev)
}
if (is_cxl_memdev(dev))
return CXL_DEVICE_MEMORY_EXPANDER;
+ if (dev->type == CXL_REGION_TYPE())
+ return CXL_DEVICE_REGION;
return 0;
}
@@ -73,14 +79,8 @@ static ssize_t start_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
- u64 start;
- if (is_root_decoder(dev))
- start = cxld->platform_res.start;
- else
- start = cxld->decoder_range.start;
-
- return sysfs_emit(buf, "%#llx\n", start);
+ return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
}
static DEVICE_ATTR_ADMIN_RO(start);
@@ -88,14 +88,8 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
- u64 size;
-
- if (is_root_decoder(dev))
- size = resource_size(&cxld->platform_res);
- else
- size = range_len(&cxld->decoder_range);
- return sysfs_emit(buf, "%#llx\n", size);
+ return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
}
static DEVICE_ATTR_RO(size);
@@ -131,20 +125,21 @@ static ssize_t target_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(target_type);
-static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf)
+static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
{
+ struct cxl_decoder *cxld = &cxlsd->cxld;
ssize_t offset = 0;
int i, rc = 0;
for (i = 0; i < cxld->interleave_ways; i++) {
- struct cxl_dport *dport = cxld->target[i];
+ struct cxl_dport *dport = cxlsd->target[i];
struct cxl_dport *next = NULL;
if (!dport)
break;
if (i + 1 < cxld->interleave_ways)
- next = cxld->target[i + 1];
+ next = cxlsd->target[i + 1];
rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
next ? "," : "");
if (rc < 0)
@@ -158,15 +153,15 @@ static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf)
static ssize_t target_list_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
ssize_t offset;
unsigned int seq;
int rc;
do {
- seq = read_seqbegin(&cxld->target_lock);
- rc = emit_target_list(cxld, buf);
- } while (read_seqretry(&cxld->target_lock, seq));
+ seq = read_seqbegin(&cxlsd->target_lock);
+ rc = emit_target_list(cxlsd, buf);
+ } while (read_seqretry(&cxlsd->target_lock, seq));
if (rc < 0)
return rc;
@@ -180,10 +175,121 @@ static ssize_t target_list_show(struct device *dev,
}
static DEVICE_ATTR_RO(target_list);
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+
+ switch (cxled->mode) {
+ case CXL_DECODER_RAM:
+ return sysfs_emit(buf, "ram\n");
+ case CXL_DECODER_PMEM:
+ return sysfs_emit(buf, "pmem\n");
+ case CXL_DECODER_NONE:
+ return sysfs_emit(buf, "none\n");
+ case CXL_DECODER_MIXED:
+ default:
+ return sysfs_emit(buf, "mixed\n");
+ }
+}
+
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ enum cxl_decoder_mode mode;
+ ssize_t rc;
+
+ if (sysfs_streq(buf, "pmem"))
+ mode = CXL_DECODER_PMEM;
+ else if (sysfs_streq(buf, "ram"))
+ mode = CXL_DECODER_RAM;
+ else
+ return -EINVAL;
+
+ rc = cxl_dpa_set_mode(cxled, mode);
+ if (rc)
+ return rc;
+
+ return len;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ u64 base = cxl_dpa_resource_start(cxled);
+
+ return sysfs_emit(buf, "%#llx\n", base);
+}
+static DEVICE_ATTR_RO(dpa_resource);
+
+static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ resource_size_t size = cxl_dpa_size(cxled);
+
+ return sysfs_emit(buf, "%pa\n", &size);
+}
+
+static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ unsigned long long size;
+ ssize_t rc;
+
+ rc = kstrtoull(buf, 0, &size);
+ if (rc)
+ return rc;
+
+ if (!IS_ALIGNED(size, SZ_256M))
+ return -EINVAL;
+
+ rc = cxl_dpa_free(cxled);
+ if (rc)
+ return rc;
+
+ if (size == 0)
+ return len;
+
+ rc = cxl_dpa_alloc(cxled, size);
+ if (rc)
+ return rc;
+
+ return len;
+}
+static DEVICE_ATTR_RW(dpa_size);
+
+static ssize_t interleave_granularity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
+}
+
+static DEVICE_ATTR_RO(interleave_granularity);
+
+static ssize_t interleave_ways_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
+}
+
+static DEVICE_ATTR_RO(interleave_ways);
+
static struct attribute *cxl_decoder_base_attrs[] = {
&dev_attr_start.attr,
&dev_attr_size.attr,
&dev_attr_locked.attr,
+ &dev_attr_interleave_granularity.attr,
+ &dev_attr_interleave_ways.attr,
NULL,
};
@@ -197,11 +303,35 @@ static struct attribute *cxl_decoder_root_attrs[] = {
&dev_attr_cap_type2.attr,
&dev_attr_cap_type3.attr,
&dev_attr_target_list.attr,
+ SET_CXL_REGION_ATTR(create_pmem_region)
+ SET_CXL_REGION_ATTR(delete_region)
NULL,
};
+static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
+{
+ unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
+
+ return (cxlrd->cxlsd.cxld.flags & flags) == flags;
+}
+
+static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
+ return 0;
+
+ if (a == CXL_REGION_ATTR(delete_region) && !can_create_pmem(cxlrd))
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_decoder_root_attribute_group = {
.attrs = cxl_decoder_root_attrs,
+ .is_visible = cxl_root_decoder_visible,
};
static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
@@ -214,6 +344,7 @@ static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
static struct attribute *cxl_decoder_switch_attrs[] = {
&dev_attr_target_type.attr,
&dev_attr_target_list.attr,
+ SET_CXL_REGION_ATTR(region)
NULL,
};
@@ -230,6 +361,10 @@ static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
static struct attribute *cxl_decoder_endpoint_attrs[] = {
&dev_attr_target_type.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_dpa_size.attr,
+ &dev_attr_dpa_resource.attr,
+ SET_CXL_REGION_ATTR(region)
NULL,
};
@@ -244,31 +379,64 @@ static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
NULL,
};
-static void cxl_decoder_release(struct device *dev)
+static void __cxl_decoder_release(struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld = to_cxl_decoder(dev);
- struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
ida_free(&port->decoder_ida, cxld->id);
- kfree(cxld);
put_device(&port->dev);
}
+static void cxl_endpoint_decoder_release(struct device *dev)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+
+ __cxl_decoder_release(&cxled->cxld);
+ kfree(cxled);
+}
+
+static void cxl_switch_decoder_release(struct device *dev)
+{
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
+
+ __cxl_decoder_release(&cxlsd->cxld);
+ kfree(cxlsd);
+}
+
+struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
+ "not a cxl_root_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
+
+static void cxl_root_decoder_release(struct device *dev)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ if (atomic_read(&cxlrd->region_id) >= 0)
+ memregion_free(atomic_read(&cxlrd->region_id));
+ __cxl_decoder_release(&cxlrd->cxlsd.cxld);
+ kfree(cxlrd);
+}
+
static const struct device_type cxl_decoder_endpoint_type = {
.name = "cxl_decoder_endpoint",
- .release = cxl_decoder_release,
+ .release = cxl_endpoint_decoder_release,
.groups = cxl_decoder_endpoint_attribute_groups,
};
static const struct device_type cxl_decoder_switch_type = {
.name = "cxl_decoder_switch",
- .release = cxl_decoder_release,
+ .release = cxl_switch_decoder_release,
.groups = cxl_decoder_switch_attribute_groups,
};
static const struct device_type cxl_decoder_root_type = {
.name = "cxl_decoder_root",
- .release = cxl_decoder_release,
+ .release = cxl_root_decoder_release,
.groups = cxl_decoder_root_attribute_groups,
};
@@ -283,39 +451,63 @@ bool is_root_decoder(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
-bool is_cxl_decoder(struct device *dev)
+bool is_switch_decoder(struct device *dev)
{
- return dev->type && dev->type->release == cxl_decoder_release;
+ return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_decoder, CXL);
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
- if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
+ if (dev_WARN_ONCE(dev,
+ !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
"not a cxl_decoder device\n"))
return NULL;
return container_of(dev, struct cxl_decoder, dev);
}
EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
+struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
+ "not a cxl_endpoint_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
+
+struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
+ "not a cxl_switch_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_switch_decoder, cxld.dev);
+}
+
static void cxl_ep_release(struct cxl_ep *ep)
{
- if (!ep)
- return;
- list_del(&ep->list);
put_device(ep->ep);
kfree(ep);
}
+static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
+{
+ if (!ep)
+ return;
+ xa_erase(&port->endpoints, (unsigned long) ep->ep);
+ cxl_ep_release(ep);
+}
+
static void cxl_port_release(struct device *dev)
{
struct cxl_port *port = to_cxl_port(dev);
- struct cxl_ep *ep, *_e;
+ unsigned long index;
+ struct cxl_ep *ep;
- device_lock(dev);
- list_for_each_entry_safe(ep, _e, &port->endpoints, list)
- cxl_ep_release(ep);
- device_unlock(dev);
+ xa_for_each(&port->endpoints, index, ep)
+ cxl_ep_remove(port, ep);
+ xa_destroy(&port->endpoints);
+ xa_destroy(&port->dports);
+ xa_destroy(&port->regions);
ida_free(&cxl_port_ida, port->id);
kfree(port);
}
@@ -370,7 +562,7 @@ static void unregister_port(void *_port)
lock_dev = &parent->dev;
device_lock_assert(lock_dev);
- port->uport = NULL;
+ port->dead = true;
device_unregister(&port->dev);
}
@@ -395,7 +587,7 @@ static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port)
+ struct cxl_dport *parent_dport)
{
struct cxl_port *port;
struct device *dev;
@@ -409,6 +601,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
if (rc < 0)
goto err;
port->id = rc;
+ port->uport = uport;
/*
* The top-level cxl_port "cxl_root" does not have a cxl_port as
@@ -417,17 +610,37 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
* description.
*/
dev = &port->dev;
- if (parent_port) {
+ if (parent_dport) {
+ struct cxl_port *parent_port = parent_dport->port;
+ struct cxl_port *iter;
+
dev->parent = &parent_port->dev;
port->depth = parent_port->depth + 1;
+ port->parent_dport = parent_dport;
+
+ /*
+ * walk to the host bridge, or the first ancestor that knows
+ * the host bridge
+ */
+ iter = port;
+ while (!iter->host_bridge &&
+ !is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+ if (iter->host_bridge)
+ port->host_bridge = iter->host_bridge;
+ else
+ port->host_bridge = iter->uport;
+ dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge));
} else
dev->parent = uport;
- port->uport = uport;
port->component_reg_phys = component_reg_phys;
ida_init(&port->decoder_ida);
- INIT_LIST_HEAD(&port->dports);
- INIT_LIST_HEAD(&port->endpoints);
+ port->hdm_end = -1;
+ port->commit_end = -1;
+ xa_init(&port->dports);
+ xa_init(&port->endpoints);
+ xa_init(&port->regions);
device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
@@ -447,24 +660,24 @@ err:
* @host: host device for devm operations
* @uport: "physical" device implementing this upstream port
* @component_reg_phys: (optional) for configurable cxl_port instances
- * @parent_port: next hop up in the CXL memory decode hierarchy
+ * @parent_dport: next hop up in the CXL memory decode hierarchy
*/
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port)
+ struct cxl_dport *parent_dport)
{
struct cxl_port *port;
struct device *dev;
int rc;
- port = cxl_port_alloc(uport, component_reg_phys, parent_port);
+ port = cxl_port_alloc(uport, component_reg_phys, parent_dport);
if (IS_ERR(port))
return port;
dev = &port->dev;
if (is_cxl_memdev(uport))
rc = dev_set_name(dev, "endpoint%d", port->id);
- else if (parent_port)
+ else if (parent_dport)
rc = dev_set_name(dev, "port%d", port->id);
else
rc = dev_set_name(dev, "root%d", port->id);
@@ -556,17 +769,13 @@ static int match_root_child(struct device *dev, const void *match)
return 0;
port = to_cxl_port(dev);
- device_lock(dev);
- list_for_each_entry(dport, &port->dports, list) {
- iter = match;
- while (iter) {
- if (iter == dport->dport)
- goto out;
- iter = iter->parent;
- }
+ iter = match;
+ while (iter) {
+ dport = cxl_find_dport_by_dev(port, iter);
+ if (dport)
+ break;
+ iter = iter->parent;
}
-out:
- device_unlock(dev);
return !!iter;
}
@@ -590,9 +799,10 @@ EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
+ unsigned long index;
device_lock_assert(&port->dev);
- list_for_each_entry (dport, &port->dports, list)
+ xa_for_each(&port->dports, index, dport)
if (dport->port_id == id)
return dport;
return NULL;
@@ -604,15 +814,15 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new)
device_lock_assert(&port->dev);
dup = find_dport(port, new->port_id);
- if (dup)
+ if (dup) {
dev_err(&port->dev,
"unable to add dport%d-%s non-unique port id (%s)\n",
new->port_id, dev_name(new->dport),
dev_name(dup->dport));
- else
- list_add_tail(&new->list, &port->dports);
-
- return dup ? -EEXIST : 0;
+ return -EBUSY;
+ }
+ return xa_insert(&port->dports, (unsigned long)new->dport, new,
+ GFP_KERNEL);
}
/*
@@ -639,10 +849,8 @@ static void cxl_dport_remove(void *data)
struct cxl_dport *dport = data;
struct cxl_port *port = dport->port;
+ xa_erase(&port->dports, (unsigned long) dport->dport);
put_device(dport->dport);
- cond_cxl_root_lock(port);
- list_del(&dport->list);
- cond_cxl_root_unlock(port);
}
static void cxl_dport_unlink(void *data)
@@ -694,7 +902,6 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
if (!dport)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&dport->list);
dport->dport = dport_dev;
dport->port_id = port_id;
dport->component_reg_phys = component_reg_phys;
@@ -723,44 +930,33 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
-static struct cxl_ep *find_ep(struct cxl_port *port, struct device *ep_dev)
-{
- struct cxl_ep *ep;
-
- device_lock_assert(&port->dev);
- list_for_each_entry(ep, &port->endpoints, list)
- if (ep->ep == ep_dev)
- return ep;
- return NULL;
-}
-
-static int add_ep(struct cxl_port *port, struct cxl_ep *new)
+static int add_ep(struct cxl_ep *new)
{
- struct cxl_ep *dup;
+ struct cxl_port *port = new->dport->port;
+ int rc;
device_lock(&port->dev);
if (port->dead) {
device_unlock(&port->dev);
return -ENXIO;
}
- dup = find_ep(port, new->ep);
- if (!dup)
- list_add_tail(&new->list, &port->endpoints);
+ rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
+ GFP_KERNEL);
device_unlock(&port->dev);
- return dup ? -EEXIST : 0;
+ return rc;
}
/**
* cxl_add_ep - register an endpoint's interest in a port
- * @port: a port in the endpoint's topology ancestry
+ * @dport: the dport that routes to @ep_dev
* @ep_dev: device representing the endpoint
*
* Intermediate CXL ports are scanned based on the arrival of endpoints.
* When those endpoints depart the port can be destroyed once all
* endpoints that care about that port have been removed.
*/
-static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
+static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
{
struct cxl_ep *ep;
int rc;
@@ -769,10 +965,10 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
if (!ep)
return -ENOMEM;
- INIT_LIST_HEAD(&ep->list);
ep->ep = get_device(ep_dev);
+ ep->dport = dport;
- rc = add_ep(port, ep);
+ rc = add_ep(ep);
if (rc)
cxl_ep_release(ep);
return rc;
@@ -781,11 +977,13 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
struct cxl_find_port_ctx {
const struct device *dport_dev;
const struct cxl_port *parent_port;
+ struct cxl_dport **dport;
};
static int match_port_by_dport(struct device *dev, const void *data)
{
const struct cxl_find_port_ctx *ctx = data;
+ struct cxl_dport *dport;
struct cxl_port *port;
if (!is_cxl_port(dev))
@@ -794,7 +992,10 @@ static int match_port_by_dport(struct device *dev, const void *data)
return 0;
port = to_cxl_port(dev);
- return cxl_find_dport_by_dev(port, ctx->dport_dev) != NULL;
+ dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
+ if (ctx->dport)
+ *ctx->dport = dport;
+ return dport != NULL;
}
static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
@@ -810,24 +1011,32 @@ static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
return NULL;
}
-static struct cxl_port *find_cxl_port(struct device *dport_dev)
+static struct cxl_port *find_cxl_port(struct device *dport_dev,
+ struct cxl_dport **dport)
{
struct cxl_find_port_ctx ctx = {
.dport_dev = dport_dev,
+ .dport = dport,
};
+ struct cxl_port *port;
- return __find_cxl_port(&ctx);
+ port = __find_cxl_port(&ctx);
+ return port;
}
static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
- struct device *dport_dev)
+ struct device *dport_dev,
+ struct cxl_dport **dport)
{
struct cxl_find_port_ctx ctx = {
.dport_dev = dport_dev,
.parent_port = parent_port,
+ .dport = dport,
};
+ struct cxl_port *port;
- return __find_cxl_port(&ctx);
+ port = __find_cxl_port(&ctx);
+ return port;
}
/*
@@ -851,13 +1060,13 @@ static void delete_endpoint(void *data)
struct cxl_port *parent_port;
struct device *parent;
- parent_port = cxl_mem_find_port(cxlmd);
+ parent_port = cxl_mem_find_port(cxlmd, NULL);
if (!parent_port)
goto out;
parent = &parent_port->dev;
device_lock(parent);
- if (parent->driver && endpoint->uport) {
+ if (parent->driver && !endpoint->dead) {
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
}
@@ -883,21 +1092,70 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
* for a port to be unregistered is when all memdevs beneath that port have gone
* through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is
- * devm action registration order.
+ * devm action registration order, and for dports to have already been
+ * destroyed by reap_dports().
*/
-static void delete_switch_port(struct cxl_port *port, struct list_head *dports)
+static void delete_switch_port(struct cxl_port *port)
+{
+ devm_release_action(port->dev.parent, cxl_unlink_uport, port);
+ devm_release_action(port->dev.parent, unregister_port, port);
+}
+
+static void reap_dports(struct cxl_port *port)
{
- struct cxl_dport *dport, *_d;
+ struct cxl_dport *dport;
+ unsigned long index;
+
+ device_lock_assert(&port->dev);
- list_for_each_entry_safe(dport, _d, dports, list) {
+ xa_for_each(&port->dports, index, dport) {
devm_release_action(&port->dev, cxl_dport_unlink, dport);
devm_release_action(&port->dev, cxl_dport_remove, dport);
devm_kfree(&port->dev, dport);
}
- devm_release_action(port->dev.parent, cxl_unlink_uport, port);
- devm_release_action(port->dev.parent, unregister_port, port);
}
+int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+ struct cxl_dport *parent_dport)
+{
+ struct cxl_port *parent_port = parent_dport->port;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_port *endpoint, *iter, *down;
+ int rc;
+
+ /*
+ * Now that the path to the root is established record all the
+ * intervening ports in the chain.
+ */
+ for (iter = parent_port, down = NULL; !is_cxl_root(iter);
+ down = iter, iter = to_cxl_port(iter->dev.parent)) {
+ struct cxl_ep *ep;
+
+ ep = cxl_ep_load(iter, cxlmd);
+ ep->next = down;
+ }
+
+ endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
+ cxlds->component_reg_phys, parent_dport);
+ if (IS_ERR(endpoint))
+ return PTR_ERR(endpoint);
+
+ dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+
+ rc = cxl_endpoint_autoremove(cxlmd, endpoint);
+ if (rc)
+ return rc;
+
+ if (!endpoint->dev.driver) {
+ dev_err(&cxlmd->dev, "%s failed probe\n",
+ dev_name(&endpoint->dev));
+ return -ENXIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL);
+
static void cxl_detach_ep(void *data)
{
struct cxl_memdev *cxlmd = data;
@@ -906,13 +1164,13 @@ static void cxl_detach_ep(void *data)
for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) {
struct device *dport_dev = grandparent(iter);
struct cxl_port *port, *parent_port;
- LIST_HEAD(reap_dports);
struct cxl_ep *ep;
+ bool died = false;
if (!dport_dev)
break;
- port = find_cxl_port(dport_dev);
+ port = find_cxl_port(dport_dev, NULL);
if (!port)
continue;
@@ -936,26 +1194,27 @@ static void cxl_detach_ep(void *data)
}
device_lock(&port->dev);
- ep = find_ep(port, &cxlmd->dev);
+ ep = cxl_ep_load(port, cxlmd);
dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
- cxl_ep_release(ep);
- if (ep && !port->dead && list_empty(&port->endpoints) &&
+ cxl_ep_remove(port, ep);
+ if (ep && !port->dead && xa_empty(&port->endpoints) &&
!is_cxl_root(parent_port)) {
/*
* This was the last ep attached to a dynamically
* enumerated port. Block new cxl_add_ep() and garbage
* collect the port.
*/
+ died = true;
port->dead = true;
- list_splice_init(&port->dports, &reap_dports);
+ reap_dports(port);
}
device_unlock(&port->dev);
- if (!list_empty(&reap_dports)) {
+ if (died) {
dev_dbg(&cxlmd->dev, "delete %s\n",
dev_name(&port->dev));
- delete_switch_port(port, &reap_dports);
+ delete_switch_port(port);
}
put_device(&port->dev);
device_unlock(&parent_port->dev);
@@ -986,6 +1245,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
{
struct device *dparent = grandparent(dport_dev);
struct cxl_port *port, *parent_port = NULL;
+ struct cxl_dport *dport, *parent_dport;
resource_size_t component_reg_phys;
int rc;
@@ -1000,7 +1260,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return -ENXIO;
}
- parent_port = find_cxl_port(dparent);
+ parent_port = find_cxl_port(dparent, &parent_dport);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
@@ -1015,13 +1275,14 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
goto out;
}
- port = find_cxl_port_at(parent_port, dport_dev);
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
if (!port) {
component_reg_phys = find_component_registers(uport_dev);
port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_port);
+ component_reg_phys, parent_dport);
+ /* retry find to pick up the new dport information */
if (!IS_ERR(port))
- get_device(&port->dev);
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
}
out:
device_unlock(&parent_port->dev);
@@ -1031,8 +1292,8 @@ out:
else {
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport));
- rc = cxl_add_ep(port, &cxlmd->dev);
- if (rc == -EEXIST) {
+ rc = cxl_add_ep(dport, &cxlmd->dev);
+ if (rc == -EBUSY) {
/*
* "can't" happen, but this error code means
* something to the caller, so translate it.
@@ -1065,6 +1326,7 @@ retry:
for (iter = dev; iter; iter = grandparent(iter)) {
struct device *dport_dev = grandparent(iter);
struct device *uport_dev;
+ struct cxl_dport *dport;
struct cxl_port *port;
if (!dport_dev)
@@ -1080,12 +1342,12 @@ retry:
dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
- port = find_cxl_port(dport_dev);
+ port = find_cxl_port(dport_dev, &dport);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport));
- rc = cxl_add_ep(port, &cxlmd->dev);
+ rc = cxl_add_ep(dport, &cxlmd->dev);
/*
* If the endpoint already exists in the port's list,
@@ -1094,7 +1356,7 @@ retry:
* the parent_port lock as the current port may be being
* reaped.
*/
- if (rc && rc != -EEXIST) {
+ if (rc && rc != -EBUSY) {
put_device(&port->dev);
return rc;
}
@@ -1124,30 +1386,14 @@ retry:
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
-struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd)
+struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
+ struct cxl_dport **dport)
{
- return find_cxl_port(grandparent(&cxlmd->dev));
+ return find_cxl_port(grandparent(&cxlmd->dev), dport);
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
-struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
- const struct device *dev)
-{
- struct cxl_dport *dport;
-
- device_lock(&port->dev);
- list_for_each_entry(dport, &port->dports, list)
- if (dport->dport == dev) {
- device_unlock(&port->dev);
- return dport;
- }
-
- device_unlock(&port->dev);
- return NULL;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL);
-
-static int decoder_populate_targets(struct cxl_decoder *cxld,
+static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
struct cxl_port *port, int *target_map)
{
int i, rc = 0;
@@ -1157,88 +1403,92 @@ static int decoder_populate_targets(struct cxl_decoder *cxld,
device_lock_assert(&port->dev);
- if (list_empty(&port->dports))
+ if (xa_empty(&port->dports))
return -EINVAL;
- write_seqlock(&cxld->target_lock);
- for (i = 0; i < cxld->nr_targets; i++) {
+ write_seqlock(&cxlsd->target_lock);
+ for (i = 0; i < cxlsd->nr_targets; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);
if (!dport) {
rc = -ENXIO;
break;
}
- cxld->target[i] = dport;
+ cxlsd->target[i] = dport;
}
- write_sequnlock(&cxld->target_lock);
+ write_sequnlock(&cxlsd->target_lock);
return rc;
}
+static struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
+{
+ struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
+ struct cxl_decoder *cxld = &cxlsd->cxld;
+ int iw;
+
+ iw = cxld->interleave_ways;
+ if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
+ "misconfigured root decoder\n"))
+ return NULL;
+
+ return cxlrd->cxlsd.target[pos % iw];
+}
+
static struct lock_class_key cxl_decoder_key;
/**
- * cxl_decoder_alloc - Allocate a new CXL decoder
+ * cxl_decoder_init - Common decoder setup / initialization
* @port: owning port of this decoder
- * @nr_targets: downstream targets accessible by this decoder. All upstream
- * ports and root ports must have at least 1 target. Endpoint
- * devices will have 0 targets. Callers wishing to register an
- * endpoint device should specify 0.
- *
- * A port should contain one or more decoders. Each of those decoders enable
- * some address space for CXL.mem utilization. A decoder is expected to be
- * configured by the caller before registering.
+ * @cxld: common decoder properties to initialize
*
- * Return: A new cxl decoder to be registered by cxl_decoder_add(). The decoder
- * is initialized to be a "passthrough" decoder.
+ * A port may contain one or more decoders. Each of those decoders
+ * enable some address space for CXL.mem utilization. A decoder is
+ * expected to be configured by the caller before registering via
+ * cxl_decoder_add()
*/
-static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld;
struct device *dev;
- int rc = 0;
-
- if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
- return ERR_PTR(-EINVAL);
-
- cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
- if (!cxld)
- return ERR_PTR(-ENOMEM);
+ int rc;
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
if (rc < 0)
- goto err;
+ return rc;
/* need parent to stick around to release the id */
get_device(&port->dev);
cxld->id = rc;
- cxld->nr_targets = nr_targets;
- seqlock_init(&cxld->target_lock);
dev = &cxld->dev;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_decoder_key);
device_set_pm_not_required(dev);
dev->parent = &port->dev;
dev->bus = &cxl_bus_type;
- if (is_cxl_root(port))
- cxld->dev.type = &cxl_decoder_root_type;
- else if (is_cxl_endpoint(port))
- cxld->dev.type = &cxl_decoder_endpoint_type;
- else
- cxld->dev.type = &cxl_decoder_switch_type;
/* Pre initialize an "empty" decoder */
cxld->interleave_ways = 1;
cxld->interleave_granularity = PAGE_SIZE;
cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->platform_res = (struct resource)DEFINE_RES_MEM(0, 0);
+ cxld->hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
- return cxld;
-err:
- kfree(cxld);
- return ERR_PTR(rc);
+ return 0;
+}
+
+static int cxl_switch_decoder_init(struct cxl_port *port,
+ struct cxl_switch_decoder *cxlsd,
+ int nr_targets)
+{
+ if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
+ return -EINVAL;
+
+ cxlsd->nr_targets = nr_targets;
+ seqlock_init(&cxlsd->target_lock);
+ return cxl_decoder_init(port, &cxlsd->cxld);
}
/**
@@ -1251,13 +1501,46 @@ err:
* firmware description of CXL resources into a CXL standard decode
* topology.
*/
-struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets)
{
+ struct cxl_root_decoder *cxlrd;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (!is_cxl_root(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, nr_targets);
+ cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
+ GFP_KERNEL);
+ if (!cxlrd)
+ return ERR_PTR(-ENOMEM);
+
+ cxlsd = &cxlrd->cxlsd;
+ rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
+ if (rc) {
+ kfree(cxlrd);
+ return ERR_PTR(rc);
+ }
+
+ cxlrd->calc_hb = cxl_hb_modulo;
+
+ cxld = &cxlsd->cxld;
+ cxld->dev.type = &cxl_decoder_root_type;
+ /*
+ * cxl_root_decoder_release() special cases negative ids to
+ * detect memregion_alloc() failures.
+ */
+ atomic_set(&cxlrd->region_id, -1);
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0) {
+ put_device(&cxld->dev);
+ return ERR_PTR(rc);
+ }
+
+ atomic_set(&cxlrd->region_id, rc);
+ return cxlrd;
}
EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
@@ -1272,13 +1555,29 @@ EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
* that sit between Switch Upstream Ports / Switch Downstream Ports and
* Host Bridges / Root Ports.
*/
-struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets)
{
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (is_cxl_root(port) || is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, nr_targets);
+ cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
+ if (!cxlsd)
+ return ERR_PTR(-ENOMEM);
+
+ rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
+ if (rc) {
+ kfree(cxlsd);
+ return ERR_PTR(rc);
+ }
+
+ cxld = &cxlsd->cxld;
+ cxld->dev.type = &cxl_decoder_switch_type;
+ return cxlsd;
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
@@ -1288,18 +1587,35 @@ EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
*
* Return: A new cxl decoder to be registered by cxl_decoder_add()
*/
-struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
+struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
{
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (!is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, 0);
+ cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
+ if (!cxled)
+ return ERR_PTR(-ENOMEM);
+
+ cxled->pos = -1;
+ cxld = &cxled->cxld;
+ rc = cxl_decoder_init(port, cxld);
+ if (rc) {
+ kfree(cxled);
+ return ERR_PTR(rc);
+ }
+
+ cxld->dev.type = &cxl_decoder_endpoint_type;
+ return cxled;
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
/**
* cxl_decoder_add_locked - Add a decoder with targets
- * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
+ * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
* @target_map: A list of downstream ports that this decoder can direct memory
* traffic to. These numbers should correspond with the port number
* in the PCIe Link Capabilities structure.
@@ -1335,7 +1651,9 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
if (!is_endpoint_decoder(dev)) {
- rc = decoder_populate_targets(cxld, port, target_map);
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
+
+ rc = decoder_populate_targets(cxlsd, port, target_map);
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
dev_err(&port->dev,
"Failed to populate active decoder targets\n");
@@ -1347,20 +1665,13 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
if (rc)
return rc;
- /*
- * Platform decoder resources should show up with a reasonable name. All
- * other resources are just sub ranges within the main decoder resource.
- */
- if (is_root_decoder(dev))
- cxld->platform_res.name = dev_name(dev);
-
return device_add(dev);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
/**
* cxl_decoder_add - Add a decoder with targets
- * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
+ * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
* @target_map: A list of downstream ports that this decoder can direct memory
* traffic to. These numbers should correspond with the port number
* in the PCIe Link Capabilities structure.
@@ -1394,6 +1705,13 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
static void cxld_unregister(void *dev)
{
+ struct cxl_endpoint_decoder *cxled;
+
+ if (is_endpoint_decoder(dev)) {
+ cxled = to_cxl_endpoint_decoder(dev);
+ cxl_decoder_kill_region(cxled);
+ }
+
device_unregister(dev);
}
@@ -1521,10 +1839,20 @@ struct bus_type cxl_bus_type = {
};
EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
+static struct dentry *cxl_debugfs;
+
+struct dentry *cxl_debugfs_create_dir(const char *dir)
+{
+ return debugfs_create_dir(dir, cxl_debugfs);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
+
static __init int cxl_core_init(void)
{
int rc;
+ cxl_debugfs = debugfs_create_dir("cxl", NULL);
+
cxl_mbox_init();
rc = cxl_memdev_init();
@@ -1541,22 +1869,28 @@ static __init int cxl_core_init(void)
if (rc)
goto err_bus;
+ rc = cxl_region_init();
+ if (rc)
+ goto err_region;
+
return 0;
+err_region:
+ bus_unregister(&cxl_bus_type);
err_bus:
destroy_workqueue(cxl_bus_wq);
err_wq:
cxl_memdev_exit();
- cxl_mbox_exit();
return rc;
}
static void cxl_core_exit(void)
{
+ cxl_region_exit();
bus_unregister(&cxl_bus_type);
destroy_workqueue(cxl_bus_wq);
cxl_memdev_exit();
- cxl_mbox_exit();
+ debugfs_remove_recursive(cxl_debugfs);
}
module_init(cxl_core_init);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
new file mode 100644
index 000000000000..401148016978
--- /dev/null
+++ b/drivers/cxl/core/region.c
@@ -0,0 +1,1896 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/memregion.h>
+#include <linux/genalloc.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/idr.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+/**
+ * DOC: cxl core region
+ *
+ * CXL Regions represent mapped memory capacity in system physical address
+ * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
+ * Memory ranges, Regions represent the active mapped capacity by the HDM
+ * Decoder Capability structures throughout the Host Bridges, Switches, and
+ * Endpoints in the topology.
+ *
+ * Region configuration has ordering constraints. UUID may be set at any time
+ * but is only visible for persistent regions.
+ * 1. Interleave granularity
+ * 2. Interleave size
+ * 3. Decoder targets
+ */
+
+/*
+ * All changes to the interleave configuration occur with this lock held
+ * for write.
+ */
+static DECLARE_RWSEM(cxl_region_rwsem);
+
+static struct cxl_region *to_cxl_region(struct device *dev);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static int is_dup(struct device *match, void *data)
+{
+ struct cxl_region_params *p;
+ struct cxl_region *cxlr;
+ uuid_t *uuid = data;
+
+ if (!is_cxl_region(match))
+ return 0;
+
+ lockdep_assert_held(&cxl_region_rwsem);
+ cxlr = to_cxl_region(match);
+ p = &cxlr->params;
+
+ if (uuid_equal(&p->uuid, uuid)) {
+ dev_dbg(match, "already has uuid: %pUb\n", uuid);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ uuid_t temp;
+ ssize_t rc;
+
+ if (len != UUID_STRING_LEN + 1)
+ return -EINVAL;
+
+ rc = uuid_parse(buf, &temp);
+ if (rc)
+ return rc;
+
+ if (uuid_is_null(&temp))
+ return -EINVAL;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (uuid_equal(&p->uuid, &temp))
+ goto out;
+
+ rc = -EBUSY;
+ if (p->state >= CXL_CONFIG_ACTIVE)
+ goto out;
+
+ rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
+ if (rc < 0)
+ goto out;
+
+ uuid_copy(&p->uuid, &temp);
+out:
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(uuid);
+
+static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ return xa_load(&port->regions, (unsigned long)cxlr);
+}
+
+static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int i;
+
+ for (i = count - 1; i >= 0; i--) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *iter = cxled_to_port(cxled);
+ struct cxl_ep *ep;
+ int rc;
+
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_decoder *cxld;
+
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ rc = cxld->reset(cxld);
+ if (rc)
+ return rc;
+ }
+
+ rc = cxled->cxld.reset(&cxled->cxld);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int cxl_region_decode_commit(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int i, rc = 0;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_decoder *cxld;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+
+ /* commit bottom up */
+ for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent)) {
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ rc = cxld->commit(cxld);
+ if (rc)
+ break;
+ }
+
+ if (rc) {
+ /* programming @iter failed, teardown */
+ for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ cxld->reset(cxld);
+ }
+
+ cxled->cxld.reset(&cxled->cxld);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ /* undo the targets that were successfully committed */
+ cxl_region_decode_reset(cxlr, i);
+ return rc;
+}
+
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool commit;
+ ssize_t rc;
+
+ rc = kstrtobool(buf, &commit);
+ if (rc)
+ return rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ /* Already in the requested state? */
+ if (commit && p->state >= CXL_CONFIG_COMMIT)
+ goto out;
+ if (!commit && p->state < CXL_CONFIG_COMMIT)
+ goto out;
+
+ /* Not ready to commit? */
+ if (commit && p->state < CXL_CONFIG_ACTIVE) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (commit)
+ rc = cxl_region_decode_commit(cxlr);
+ else {
+ p->state = CXL_CONFIG_RESET_PENDING;
+ up_write(&cxl_region_rwsem);
+ device_release_driver(&cxlr->dev);
+ down_write(&cxl_region_rwsem);
+
+ /*
+ * The lock was dropped, so need to revalidate that the reset is
+ * still pending.
+ */
+ if (p->state == CXL_CONFIG_RESET_PENDING)
+ rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ }
+
+ if (rc)
+ goto out;
+
+ if (commit)
+ p->state = CXL_CONFIG_COMMIT;
+ else if (p->state == CXL_CONFIG_RESET_PENDING)
+ p->state = CXL_CONFIG_ACTIVE;
+
+out:
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+ return len;
+}
+
+static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(commit);
+
+static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
+ return 0;
+ return a->mode;
+}
+
+static ssize_t interleave_ways_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static const struct attribute_group *get_cxl_region_target_group(void);
+
+static ssize_t interleave_ways_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ unsigned int val, save;
+ int rc;
+ u8 iw;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = ways_to_cxl(val, &iw);
+ if (rc)
+ return rc;
+
+ /*
+ * Even for x3, x9, and x12 interleaves the region interleave must be a
+ * power of 2 multiple of the host bridge interleave.
+ */
+ if (!is_power_of_2(val / cxld->interleave_ways) ||
+ (val % cxld->interleave_ways)) {
+ dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
+ return -EINVAL;
+ }
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ save = p->interleave_ways;
+ p->interleave_ways = val;
+ rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
+ if (rc)
+ p->interleave_ways = save;
+out:
+ up_write(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(interleave_ways);
+
+static ssize_t interleave_granularity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static ssize_t interleave_granularity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc, val;
+ u16 ig;
+
+ rc = kstrtoint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = granularity_to_cxl(val, &ig);
+ if (rc)
+ return rc;
+
+ /*
+ * When the host-bridge is interleaved, disallow region granularity !=
+ * root granularity. Regions with a granularity less than the root
+ * interleave result in needing multiple endpoints to support a single
+ * slot in the interleave (possible to suport in the future). Regions
+ * with a granularity greater than the root interleave result in invalid
+ * DPA translations (invalid to support).
+ */
+ if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
+ return -EINVAL;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ p->interleave_granularity = val;
+out:
+ up_write(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(interleave_granularity);
+
+static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ u64 resource = -1ULL;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->res)
+ resource = p->res->start;
+ rc = sysfs_emit(buf, "%#llx\n", resource);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(resource);
+
+static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ struct resource *res;
+ u32 remainder = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ /* Nothing to do... */
+ if (p->res && resource_size(p->res) == size)
+ return 0;
+
+ /* To change size the old size must be freed first */
+ if (p->res)
+ return -EBUSY;
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
+
+ /* ways, granularity and uuid (if PMEM) need to be set before HPA */
+ if (!p->interleave_ways || !p->interleave_granularity ||
+ (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ return -ENXIO;
+
+ div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
+ if (remainder)
+ return -EINVAL;
+
+ res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
+ dev_name(&cxlr->dev));
+ if (IS_ERR(res)) {
+ dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
+ PTR_ERR(res));
+ return PTR_ERR(res);
+ }
+
+ p->res = res;
+ p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+
+ return 0;
+}
+
+static void cxl_region_iomem_release(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+
+ if (device_is_registered(&cxlr->dev))
+ lockdep_assert_held_write(&cxl_region_rwsem);
+ if (p->res) {
+ remove_resource(p->res);
+ kfree(p->res);
+ p->res = NULL;
+ }
+}
+
+static int free_hpa(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ if (!p->res)
+ return 0;
+
+ if (p->state >= CXL_CONFIG_ACTIVE)
+ return -EBUSY;
+
+ cxl_region_iomem_release(cxlr);
+ p->state = CXL_CONFIG_IDLE;
+ return 0;
+}
+
+static ssize_t size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ u64 val;
+ int rc;
+
+ rc = kstrtou64(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (val)
+ rc = alloc_hpa(cxlr, val);
+ else
+ rc = free_hpa(cxlr);
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+
+ return len;
+}
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ u64 size = 0;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->res)
+ size = resource_size(p->res);
+ rc = sysfs_emit(buf, "%#llx\n", size);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(size);
+
+static struct attribute *cxl_region_attrs[] = {
+ &dev_attr_uuid.attr,
+ &dev_attr_commit.attr,
+ &dev_attr_interleave_ways.attr,
+ &dev_attr_interleave_granularity.attr,
+ &dev_attr_resource.attr,
+ &dev_attr_size.attr,
+ NULL,
+};
+
+static const struct attribute_group cxl_region_group = {
+ .attrs = cxl_region_attrs,
+ .is_visible = cxl_region_visible,
+};
+
+static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ int rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ cxled = p->targets[pos];
+ if (!cxled)
+ rc = sysfs_emit(buf, "\n");
+ else
+ rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
+out:
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static int match_free_decoder(struct device *dev, void *data)
+{
+ struct cxl_decoder *cxld;
+ int *id = data;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+
+ /* enforce ordered allocation */
+ if (cxld->id != *id)
+ return 0;
+
+ if (!cxld->region)
+ return 1;
+
+ (*id)++;
+
+ return 0;
+}
+
+static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct device *dev;
+ int id = 0;
+
+ dev = device_find_child(&port->dev, &id, match_free_decoder);
+ if (!dev)
+ return NULL;
+ /*
+ * This decoder is pinned registered as long as the endpoint decoder is
+ * registered, and endpoint decoder unregistration holds the
+ * cxl_region_rwsem over unregister events, so no need to hold on to
+ * this extra reference.
+ */
+ put_device(dev);
+ return to_cxl_decoder(dev);
+}
+
+static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_region_ref *cxl_rr, *iter;
+ unsigned long index;
+ int rc;
+
+ xa_for_each(&port->regions, index, iter) {
+ struct cxl_region_params *ip = &iter->region->params;
+
+ if (ip->res->start > p->res->start) {
+ dev_dbg(&cxlr->dev,
+ "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
+ cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
+ if (!cxl_rr)
+ return ERR_PTR(-ENOMEM);
+ cxl_rr->port = port;
+ cxl_rr->region = cxlr;
+ cxl_rr->nr_targets = 1;
+ xa_init(&cxl_rr->endpoints);
+
+ rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to track region reference: %d\n",
+ dev_name(&port->dev), rc);
+ kfree(cxl_rr);
+ return ERR_PTR(rc);
+ }
+
+ return cxl_rr;
+}
+
+static void free_region_ref(struct cxl_region_ref *cxl_rr)
+{
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+
+ dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
+ if (cxld->region == cxlr) {
+ cxld->region = NULL;
+ put_device(&cxlr->dev);
+ }
+
+ xa_erase(&port->regions, (unsigned long)cxlr);
+ xa_destroy(&cxl_rr->endpoints);
+ kfree(cxl_rr);
+}
+
+static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ int rc;
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+ struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
+
+ if (ep) {
+ rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+ }
+ cxl_rr->nr_eps++;
+
+ if (!cxld->region) {
+ cxld->region = cxlr;
+ get_device(&cxlr->dev);
+ }
+
+ return 0;
+}
+
+/**
+ * cxl_port_attach_region() - track a region's interest in a port by endpoint
+ * @port: port to add a new region reference 'struct cxl_region_ref'
+ * @cxlr: region to attach to @port
+ * @cxled: endpoint decoder used to create or further pin a region reference
+ * @pos: interleave position of @cxled in @cxlr
+ *
+ * The attach event is an opportunity to validate CXL decode setup
+ * constraints and record metadata needed for programming HDM decoders,
+ * in particular decoder target lists.
+ *
+ * The steps are:
+ *
+ * - validate that there are no other regions with a higher HPA already
+ * associated with @port
+ * - establish a region reference if one is not already present
+ *
+ * - additionally allocate a decoder instance that will host @cxlr on
+ * @port
+ *
+ * - pin the region reference by the endpoint
+ * - account for how many entries in @port's target list are needed to
+ * cover all of the added endpoints.
+ */
+static int cxl_port_attach_region(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
+ struct cxl_region_ref *cxl_rr;
+ bool nr_targets_inc = false;
+ struct cxl_decoder *cxld;
+ unsigned long index;
+ int rc = -EBUSY;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ cxl_rr = cxl_rr_load(port, cxlr);
+ if (cxl_rr) {
+ struct cxl_ep *ep_iter;
+ int found = 0;
+
+ /*
+ * Walk the existing endpoints that have been attached to
+ * @cxlr at @port and see if they share the same 'next' port
+ * in the downstream direction. I.e. endpoints that share common
+ * upstream switch.
+ */
+ xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
+ if (ep_iter == ep)
+ continue;
+ if (ep_iter->next == ep->next) {
+ found++;
+ break;
+ }
+ }
+
+ /*
+ * New target port, or @port is an endpoint port that always
+ * accounts its own local decode as a target.
+ */
+ if (!found || !ep->next) {
+ cxl_rr->nr_targets++;
+ nr_targets_inc = true;
+ }
+
+ /*
+ * The decoder for @cxlr was allocated when the region was first
+ * attached to @port.
+ */
+ cxld = cxl_rr->decoder;
+ } else {
+ cxl_rr = alloc_region_ref(port, cxlr);
+ if (IS_ERR(cxl_rr)) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to allocate region reference\n",
+ dev_name(&port->dev));
+ return PTR_ERR(cxl_rr);
+ }
+ nr_targets_inc = true;
+
+ if (port == cxled_to_port(cxled))
+ cxld = &cxled->cxld;
+ else
+ cxld = cxl_region_find_decoder(port, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+ goto out_erase;
+ }
+
+ if (cxld->region) {
+ dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
+ dev_name(&port->dev), dev_name(&cxld->dev),
+ dev_name(&cxld->region->dev));
+ rc = -EBUSY;
+ goto out_erase;
+ }
+
+ cxl_rr->decoder = cxld;
+ }
+
+ rc = cxl_rr_ep_add(cxl_rr, cxled);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to track endpoint %s:%s reference\n",
+ dev_name(&port->dev), dev_name(&cxlmd->dev),
+ dev_name(&cxld->dev));
+ goto out_erase;
+ }
+
+ dev_dbg(&cxlr->dev,
+ "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxld->dev), dev_name(&cxlmd->dev),
+ dev_name(&cxled->cxld.dev), pos,
+ ep ? ep->next ? dev_name(ep->next->uport) :
+ dev_name(&cxlmd->dev) :
+ "none",
+ cxl_rr->nr_eps, cxl_rr->nr_targets);
+
+ return 0;
+out_erase:
+ if (nr_targets_inc)
+ cxl_rr->nr_targets--;
+ if (cxl_rr->nr_eps == 0)
+ free_region_ref(cxl_rr);
+ return rc;
+}
+
+static void cxl_port_detach_region(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_ep *ep = NULL;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ cxl_rr = cxl_rr_load(port, cxlr);
+ if (!cxl_rr)
+ return;
+
+ /*
+ * Endpoint ports do not carry cxl_ep references, and they
+ * never target more than one endpoint by definition
+ */
+ if (cxl_rr->decoder == &cxled->cxld)
+ cxl_rr->nr_eps--;
+ else
+ ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
+ if (ep) {
+ struct cxl_ep *ep_iter;
+ unsigned long index;
+ int found = 0;
+
+ cxl_rr->nr_eps--;
+ xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
+ if (ep_iter->next == ep->next) {
+ found++;
+ break;
+ }
+ }
+ if (!found)
+ cxl_rr->nr_targets--;
+ }
+
+ if (cxl_rr->nr_eps == 0)
+ free_region_ref(cxl_rr);
+}
+
+static int check_last_peer(struct cxl_endpoint_decoder *cxled,
+ struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
+ int distance)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled_peer;
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_memdev *cxlmd_peer;
+ struct cxl_ep *ep_peer;
+ int pos = cxled->pos;
+
+ /*
+ * If this position wants to share a dport with the last endpoint mapped
+ * then that endpoint, at index 'position - distance', must also be
+ * mapped by this dport.
+ */
+ if (pos < distance) {
+ dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+ return -ENXIO;
+ }
+ cxled_peer = p->targets[pos - distance];
+ cxlmd_peer = cxled_to_memdev(cxled_peer);
+ ep_peer = cxl_ep_load(port, cxlmd_peer);
+ if (ep->dport != ep_peer->dport) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
+ dev_name(&cxlmd_peer->dev),
+ dev_name(&cxled_peer->cxld.dev));
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cxl_port_setup_targets(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
+ struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
+ struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+ struct cxl_switch_decoder *cxlsd;
+ u16 eig, peig;
+ u8 eiw, peiw;
+
+ /*
+ * While root level decoders support x3, x6, x12, switch level
+ * decoders only support powers of 2 up to x16.
+ */
+ if (!is_power_of_2(cxl_rr->nr_targets)) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ cxl_rr->nr_targets);
+ return -EINVAL;
+ }
+
+ cxlsd = to_cxl_switch_decoder(&cxld->dev);
+ if (cxl_rr->nr_targets_set) {
+ int i, distance;
+
+ distance = p->nr_targets / cxl_rr->nr_targets;
+ for (i = 0; i < cxl_rr->nr_targets_set; i++)
+ if (ep->dport == cxlsd->target[i]) {
+ rc = check_last_peer(cxled, ep, cxl_rr,
+ distance);
+ if (rc)
+ return rc;
+ goto out_target_set;
+ }
+ goto add_target;
+ }
+
+ if (is_cxl_root(parent_port)) {
+ parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
+ parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+ /*
+ * For purposes of address bit routing, use power-of-2 math for
+ * switch ports.
+ */
+ if (!is_power_of_2(parent_iw))
+ parent_iw /= 3;
+ } else {
+ struct cxl_region_ref *parent_rr;
+ struct cxl_decoder *parent_cxld;
+
+ parent_rr = cxl_rr_load(parent_port, cxlr);
+ parent_cxld = parent_rr->decoder;
+ parent_ig = parent_cxld->interleave_granularity;
+ parent_iw = parent_cxld->interleave_ways;
+ }
+
+ rc = granularity_to_cxl(parent_ig, &peig);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
+ dev_name(parent_port->uport),
+ dev_name(&parent_port->dev), parent_ig);
+ return rc;
+ }
+
+ rc = ways_to_cxl(parent_iw, &peiw);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
+ dev_name(parent_port->uport),
+ dev_name(&parent_port->dev), parent_iw);
+ return rc;
+ }
+
+ iw = cxl_rr->nr_targets;
+ rc = ways_to_cxl(iw, &eiw);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
+ dev_name(port->uport), dev_name(&port->dev), iw);
+ return rc;
+ }
+
+ /*
+ * If @parent_port is masking address bits, pick the next unused address
+ * bit to route @port's targets.
+ */
+ if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
+ u32 address_bit = max(peig + peiw, eiw + peig);
+
+ eig = address_bit - eiw + 1;
+ } else {
+ eiw = peiw;
+ eig = peig;
+ }
+
+ rc = cxl_to_granularity(eig, &ig);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ 256 << eig);
+ return rc;
+ }
+
+ cxld->interleave_ways = iw;
+ cxld->interleave_granularity = ig;
+ cxld->hpa_range = (struct range) {
+ .start = p->res->start,
+ .end = p->res->end,
+ };
+ dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
+ dev_name(&port->dev), iw, ig);
+add_target:
+ if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: targets full trying to add %s:%s at %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+ return -ENXIO;
+ }
+ cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
+ inc = 1;
+out_target_set:
+ cxl_rr->nr_targets_set += inc;
+ dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+
+ return 0;
+}
+
+static void cxl_port_reset_targets(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+ struct cxl_decoder *cxld;
+
+ /*
+ * After the last endpoint has been detached the entire cxl_rr may now
+ * be gone.
+ */
+ if (!cxl_rr)
+ return;
+ cxl_rr->nr_targets_set = 0;
+
+ cxld = cxl_rr->decoder;
+ cxld->hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
+}
+
+static void cxl_region_teardown_targets(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+ int i;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ cxlmd = cxled_to_memdev(cxled);
+
+ iter = cxled_to_port(cxled);
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
+ cxl_port_reset_targets(iter, cxlr);
+ }
+}
+
+static int cxl_region_setup_targets(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+ int i, rc;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ cxlmd = cxled_to_memdev(cxled);
+
+ iter = cxled_to_port(cxled);
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ /*
+ * Descend the topology tree programming targets while
+ * looking for conflicts.
+ */
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ rc = cxl_port_setup_targets(iter, cxlr, cxled);
+ if (rc) {
+ cxl_region_teardown_targets(cxlr);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cxl_region_attach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *ep_port, *root_port, *iter;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_dport *dport;
+ int i, rc = -ENXIO;
+
+ if (cxled->mode == CXL_DECODER_DEAD) {
+ dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
+ return -ENODEV;
+ }
+
+ /* all full of members, or interleave config not established? */
+ if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "region already active\n");
+ return -EBUSY;
+ } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "interleave config missing\n");
+ return -ENXIO;
+ }
+
+ if (pos < 0 || pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ return -ENXIO;
+ }
+
+ if (p->targets[pos] == cxled)
+ return 0;
+
+ if (p->targets[pos]) {
+ struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
+ struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
+
+ dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
+ pos, dev_name(&cxlmd_target->dev),
+ dev_name(&cxled_target->cxld.dev));
+ return -EBUSY;
+ }
+
+ for (i = 0; i < p->interleave_ways; i++) {
+ struct cxl_endpoint_decoder *cxled_target;
+ struct cxl_memdev *cxlmd_target;
+
+ cxled_target = p->targets[pos];
+ if (!cxled_target)
+ continue;
+
+ cxlmd_target = cxled_to_memdev(cxled_target);
+ if (cxlmd_target == cxlmd) {
+ dev_dbg(&cxlr->dev,
+ "%s already specified at position %d via: %s\n",
+ dev_name(&cxlmd->dev), pos,
+ dev_name(&cxled_target->cxld.dev));
+ return -EBUSY;
+ }
+ }
+
+ ep_port = cxled_to_port(cxled);
+ root_port = cxlrd_to_port(cxlrd);
+ dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
+ if (!dport) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(cxlr->dev.parent));
+ return -ENXIO;
+ }
+
+ if (cxlrd->calc_hb(cxlrd, pos) != dport) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(&cxlrd->cxlsd.cxld.dev));
+ return -ENXIO;
+ }
+
+ if (cxled->cxld.target_type != cxlr->type) {
+ dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ cxled->cxld.target_type, cxlr->type);
+ return -ENXIO;
+ }
+
+ if (!cxled->dpa_res) {
+ dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
+ return -ENXIO;
+ }
+
+ if (resource_size(cxled->dpa_res) * p->interleave_ways !=
+ resource_size(p->res)) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ (u64)resource_size(cxled->dpa_res), p->interleave_ways,
+ (u64)resource_size(p->res));
+ return -EINVAL;
+ }
+
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent)) {
+ rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
+ if (rc)
+ goto err;
+ }
+
+ p->targets[pos] = cxled;
+ cxled->pos = pos;
+ p->nr_targets++;
+
+ if (p->nr_targets == p->interleave_ways) {
+ rc = cxl_region_setup_targets(cxlr);
+ if (rc)
+ goto err_decrement;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+ cxled->cxld.interleave_ways = p->interleave_ways;
+ cxled->cxld.interleave_granularity = p->interleave_granularity;
+ cxled->cxld.hpa_range = (struct range) {
+ .start = p->res->start,
+ .end = p->res->end,
+ };
+
+ return 0;
+
+err_decrement:
+ p->nr_targets--;
+err:
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent))
+ cxl_port_detach_region(iter, cxlr, cxled);
+ return rc;
+}
+
+static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
+ struct cxl_region *cxlr = cxled->cxld.region;
+ struct cxl_region_params *p;
+ int rc = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ if (!cxlr)
+ return 0;
+
+ p = &cxlr->params;
+ get_device(&cxlr->dev);
+
+ if (p->state > CXL_CONFIG_ACTIVE) {
+ /*
+ * TODO: tear down all impacted regions if a device is
+ * removed out of order
+ */
+ rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ if (rc)
+ goto out;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent))
+ cxl_port_detach_region(iter, cxlr, cxled);
+
+ if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
+ p->targets[cxled->pos] != cxled) {
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+
+ dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ cxled->pos);
+ goto out;
+ }
+
+ if (p->state == CXL_CONFIG_ACTIVE) {
+ p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+ cxl_region_teardown_targets(cxlr);
+ }
+ p->targets[cxled->pos] = NULL;
+ p->nr_targets--;
+ cxled->cxld.hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
+
+ /* notify the region driver that one of its targets has departed */
+ up_write(&cxl_region_rwsem);
+ device_release_driver(&cxlr->dev);
+ down_write(&cxl_region_rwsem);
+out:
+ put_device(&cxlr->dev);
+ return rc;
+}
+
+void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+{
+ down_write(&cxl_region_rwsem);
+ cxled->mode = CXL_DECODER_DEAD;
+ cxl_region_detach(cxled);
+ up_write(&cxl_region_rwsem);
+}
+
+static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
+{
+ struct device *dev;
+ int rc;
+
+ dev = bus_find_device_by_name(&cxl_bus_type, NULL, decoder);
+ if (!dev)
+ return -ENODEV;
+
+ if (!is_endpoint_decoder(dev)) {
+ put_device(dev);
+ return -EINVAL;
+ }
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ goto out;
+ down_read(&cxl_dpa_rwsem);
+ rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
+ up_read(&cxl_dpa_rwsem);
+ up_write(&cxl_region_rwsem);
+out:
+ put_device(dev);
+ return rc;
+}
+
+static int detach_target(struct cxl_region *cxlr, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (!p->targets[pos]) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = cxl_region_detach(p->targets[pos]);
+out:
+ up_write(&cxl_region_rwsem);
+ return rc;
+}
+
+static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
+ size_t len)
+{
+ int rc;
+
+ if (sysfs_streq(buf, "\n"))
+ rc = detach_target(cxlr, pos);
+ else
+ rc = attach_target(cxlr, buf, pos);
+
+ if (rc < 0)
+ return rc;
+ return len;
+}
+
+#define TARGET_ATTR_RW(n) \
+static ssize_t target##n##_show( \
+ struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ return show_targetN(to_cxl_region(dev), buf, (n)); \
+} \
+static ssize_t target##n##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_targetN(to_cxl_region(dev), buf, (n), len); \
+} \
+static DEVICE_ATTR_RW(target##n)
+
+TARGET_ATTR_RW(0);
+TARGET_ATTR_RW(1);
+TARGET_ATTR_RW(2);
+TARGET_ATTR_RW(3);
+TARGET_ATTR_RW(4);
+TARGET_ATTR_RW(5);
+TARGET_ATTR_RW(6);
+TARGET_ATTR_RW(7);
+TARGET_ATTR_RW(8);
+TARGET_ATTR_RW(9);
+TARGET_ATTR_RW(10);
+TARGET_ATTR_RW(11);
+TARGET_ATTR_RW(12);
+TARGET_ATTR_RW(13);
+TARGET_ATTR_RW(14);
+TARGET_ATTR_RW(15);
+
+static struct attribute *target_attrs[] = {
+ &dev_attr_target0.attr,
+ &dev_attr_target1.attr,
+ &dev_attr_target2.attr,
+ &dev_attr_target3.attr,
+ &dev_attr_target4.attr,
+ &dev_attr_target5.attr,
+ &dev_attr_target6.attr,
+ &dev_attr_target7.attr,
+ &dev_attr_target8.attr,
+ &dev_attr_target9.attr,
+ &dev_attr_target10.attr,
+ &dev_attr_target11.attr,
+ &dev_attr_target12.attr,
+ &dev_attr_target13.attr,
+ &dev_attr_target14.attr,
+ &dev_attr_target15.attr,
+ NULL,
+};
+
+static umode_t cxl_region_target_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+
+ if (n < p->interleave_ways)
+ return a->mode;
+ return 0;
+}
+
+static const struct attribute_group cxl_region_target_group = {
+ .attrs = target_attrs,
+ .is_visible = cxl_region_target_visible,
+};
+
+static const struct attribute_group *get_cxl_region_target_group(void)
+{
+ return &cxl_region_target_group;
+}
+
+static const struct attribute_group *region_groups[] = {
+ &cxl_base_attribute_group,
+ &cxl_region_group,
+ &cxl_region_target_group,
+ NULL,
+};
+
+static void cxl_region_release(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ memregion_free(cxlr->id);
+ kfree(cxlr);
+}
+
+const struct device_type cxl_region_type = {
+ .name = "cxl_region",
+ .release = cxl_region_release,
+ .groups = region_groups
+};
+
+bool is_cxl_region(struct device *dev)
+{
+ return dev->type == &cxl_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
+
+static struct cxl_region *to_cxl_region(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
+ "not a cxl_region device\n"))
+ return NULL;
+
+ return container_of(dev, struct cxl_region, dev);
+}
+
+static void unregister_region(void *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ device_del(dev);
+ cxl_region_iomem_release(cxlr);
+ put_device(dev);
+}
+
+static struct lock_class_key cxl_region_key;
+
+static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
+{
+ struct cxl_region *cxlr;
+ struct device *dev;
+
+ cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
+ if (!cxlr) {
+ memregion_free(id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev = &cxlr->dev;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_region_key);
+ dev->parent = &cxlrd->cxlsd.cxld.dev;
+ device_set_pm_not_required(dev);
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_region_type;
+ cxlr->id = id;
+
+ return cxlr;
+}
+
+/**
+ * devm_cxl_add_region - Adds a region to a decoder
+ * @cxlrd: root decoder
+ * @id: memregion id to create, or memregion_free() on failure
+ * @mode: mode for the endpoint decoders of this region
+ * @type: select whether this is an expander or accelerator (type-2 or type-3)
+ *
+ * This is the second step of region initialization. Regions exist within an
+ * address space which is mapped by a @cxlrd.
+ *
+ * Return: 0 if the region was added to the @cxlrd, else returns negative error
+ * code. The region will be named "regionZ" where Z is the unique region number.
+ */
+static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
+ int id,
+ enum cxl_decoder_mode mode,
+ enum cxl_decoder_type type)
+{
+ struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
+ struct cxl_region *cxlr;
+ struct device *dev;
+ int rc;
+
+ cxlr = cxl_region_alloc(cxlrd, id);
+ if (IS_ERR(cxlr))
+ return cxlr;
+ cxlr->mode = mode;
+ cxlr->type = type;
+
+ dev = &cxlr->dev;
+ rc = dev_set_name(dev, "region%d", id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr);
+ if (rc)
+ return ERR_PTR(rc);
+
+ dev_dbg(port->uport, "%s: created %s\n",
+ dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
+ return cxlr;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+
+static ssize_t create_pmem_region_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
+}
+
+static ssize_t create_pmem_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_region *cxlr;
+ int id, rc;
+
+ rc = sscanf(buf, "region%d\n", &id);
+ if (rc != 1)
+ return -EINVAL;
+
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0)
+ return rc;
+
+ if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
+ memregion_free(rc);
+ return -EBUSY;
+ }
+
+ cxlr = devm_cxl_add_region(cxlrd, id, CXL_DECODER_PMEM,
+ CXL_DECODER_EXPANDER);
+ if (IS_ERR(cxlr))
+ return PTR_ERR(cxlr);
+
+ return len;
+}
+DEVICE_ATTR_RW(create_pmem_region);
+
+static ssize_t region_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (cxld->region)
+ rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
+ else
+ rc = sysfs_emit(buf, "\n");
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+DEVICE_ATTR_RO(region);
+
+static struct cxl_region *
+cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
+{
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct device *region_dev;
+
+ region_dev = device_find_child_by_name(&cxld->dev, name);
+ if (!region_dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_cxl_region(region_dev);
+}
+
+static ssize_t delete_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_region *cxlr;
+
+ cxlr = cxl_find_region_by_name(cxlrd, buf);
+ if (IS_ERR(cxlr))
+ return PTR_ERR(cxlr);
+
+ devm_release_action(port->uport, unregister_region, cxlr);
+ put_device(&cxlr->dev);
+
+ return len;
+}
+DEVICE_ATTR_WO(delete_region);
+
+static void cxl_pmem_region_release(struct device *dev)
+{
+ struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+ int i;
+
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
+
+ put_device(&cxlmd->dev);
+ }
+
+ kfree(cxlr_pmem);
+}
+
+static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_pmem_region_type = {
+ .name = "cxl_pmem_region",
+ .release = cxl_pmem_region_release,
+ .groups = cxl_pmem_region_attribute_groups,
+};
+
+bool is_cxl_pmem_region(struct device *dev)
+{
+ return dev->type == &cxl_pmem_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
+
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
+ "not a cxl_pmem_region device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_pmem_region, dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
+
+static struct lock_class_key cxl_pmem_region_key;
+
+static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_pmem_region *cxlr_pmem;
+ struct device *dev;
+ int i;
+
+ down_read(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT) {
+ cxlr_pmem = ERR_PTR(-ENXIO);
+ goto out;
+ }
+
+ cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
+ GFP_KERNEL);
+ if (!cxlr_pmem) {
+ cxlr_pmem = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ cxlr_pmem->hpa_range.start = p->res->start;
+ cxlr_pmem->hpa_range.end = p->res->end;
+
+ /* Snapshot the region configuration underneath the cxl_region_rwsem */
+ cxlr_pmem->nr_mappings = p->nr_targets;
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+
+ m->cxlmd = cxlmd;
+ get_device(&cxlmd->dev);
+ m->start = cxled->dpa_res->start;
+ m->size = resource_size(cxled->dpa_res);
+ m->position = i;
+ }
+
+ dev = &cxlr_pmem->dev;
+ cxlr_pmem->cxlr = cxlr;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
+ device_set_pm_not_required(dev);
+ dev->parent = &cxlr->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_pmem_region_type;
+out:
+ up_read(&cxl_region_rwsem);
+
+ return cxlr_pmem;
+}
+
+static void cxlr_pmem_unregister(void *dev)
+{
+ device_unregister(dev);
+}
+
+/**
+ * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
+ * @cxlr: parent CXL region for this pmem region bridge device
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
+{
+ struct cxl_pmem_region *cxlr_pmem;
+ struct device *dev;
+ int rc;
+
+ cxlr_pmem = cxl_pmem_region_alloc(cxlr);
+ if (IS_ERR(cxlr_pmem))
+ return PTR_ERR(cxlr_pmem);
+
+ dev = &cxlr_pmem->dev;
+ rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
+ dev_name(dev));
+
+ return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev);
+
+err:
+ put_device(dev);
+ return rc;
+}
+
+static int cxl_region_probe(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "probe interrupted\n");
+ return rc;
+ }
+
+ if (p->state < CXL_CONFIG_COMMIT) {
+ dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
+ rc = -ENXIO;
+ }
+
+ /*
+ * From this point on any path that changes the region's state away from
+ * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
+ */
+ up_read(&cxl_region_rwsem);
+
+ switch (cxlr->mode) {
+ case CXL_DECODER_PMEM:
+ return devm_cxl_add_pmem_region(cxlr);
+ default:
+ dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
+ cxlr->mode);
+ return -ENXIO;
+ }
+}
+
+static struct cxl_driver cxl_region_driver = {
+ .name = "cxl_region",
+ .probe = cxl_region_probe,
+ .id = CXL_DEVICE_REGION,
+};
+
+int cxl_region_init(void)
+{
+ return cxl_driver_register(&cxl_region_driver);
+}
+
+void cxl_region_exit(void)
+{
+ cxl_driver_unregister(&cxl_region_driver);
+}
+
+MODULE_IMPORT_NS(CXL);
+MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 6799b27c7db2..f680450f0b16 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -7,6 +7,7 @@
#include <linux/libnvdimm.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/log2.h>
#include <linux/io.h>
/**
@@ -53,9 +54,12 @@
#define CXL_HDM_DECODER0_CTRL_LOCK BIT(8)
#define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
#define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
+#define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
#define CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
#define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
#define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
+#define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
+#define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i)
static inline int cxl_hdm_decoder_count(u32 cap_hdr)
{
@@ -64,6 +68,57 @@ static inline int cxl_hdm_decoder_count(u32 cap_hdr)
return val ? val * 2 : 1;
}
+/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
+static inline int cxl_to_granularity(u16 ig, unsigned int *val)
+{
+ if (ig > 6)
+ return -EINVAL;
+ *val = 256 << ig;
+ return 0;
+}
+
+/* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */
+static inline int cxl_to_ways(u8 eniw, unsigned int *val)
+{
+ switch (eniw) {
+ case 0 ... 4:
+ *val = 1 << eniw;
+ break;
+ case 8 ... 10:
+ *val = 3 << (eniw - 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int granularity_to_cxl(int g, u16 *ig)
+{
+ if (g > SZ_16K || g < 256 || !is_power_of_2(g))
+ return -EINVAL;
+ *ig = ilog2(g) - 8;
+ return 0;
+}
+
+static inline int ways_to_cxl(unsigned int ways, u8 *iw)
+{
+ if (ways > 16)
+ return -EINVAL;
+ if (is_power_of_2(ways)) {
+ *iw = ilog2(ways);
+ return 0;
+ }
+ if (ways % 3)
+ return -EINVAL;
+ ways /= 3;
+ if (!is_power_of_2(ways))
+ return -EINVAL;
+ *iw = ilog2(ways) + 8;
+ return 0;
+}
+
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
#define CXLDEV_CAP_ARRAY_CAP_ID 0
@@ -193,31 +248,77 @@ enum cxl_decoder_type {
*/
#define CXL_DECODER_MAX_INTERLEAVE 16
+#define CXL_DECODER_MIN_GRANULARITY 256
+
/**
- * struct cxl_decoder - CXL address range decode configuration
+ * struct cxl_decoder - Common CXL HDM Decoder Attributes
* @dev: this decoder's device
* @id: kernel device name id
- * @platform_res: address space resources considered by root decoder
- * @decoder_range: address space resources considered by midlevel decoder
+ * @hpa_range: Host physical address range mapped by this decoder
* @interleave_ways: number of cxl_dports in this decode
* @interleave_granularity: data stride per dport
* @target_type: accelerator vs expander (type2 vs type3) selector
+ * @region: currently assigned region for this decoder
* @flags: memory type capabilities and locking
- * @target_lock: coordinate coherent reads of the target list
- * @nr_targets: number of elements in @target
- * @target: active ordered target list in current decoder configuration
- */
+ * @commit: device/decoder-type specific callback to commit settings to hw
+ * @reset: device/decoder-type specific callback to reset hw settings
+*/
struct cxl_decoder {
struct device dev;
int id;
- union {
- struct resource platform_res;
- struct range decoder_range;
- };
+ struct range hpa_range;
int interleave_ways;
int interleave_granularity;
enum cxl_decoder_type target_type;
+ struct cxl_region *region;
unsigned long flags;
+ int (*commit)(struct cxl_decoder *cxld);
+ int (*reset)(struct cxl_decoder *cxld);
+};
+
+/*
+ * CXL_DECODER_DEAD prevents endpoints from being reattached to regions
+ * while cxld_unregister() is running
+ */
+enum cxl_decoder_mode {
+ CXL_DECODER_NONE,
+ CXL_DECODER_RAM,
+ CXL_DECODER_PMEM,
+ CXL_DECODER_MIXED,
+ CXL_DECODER_DEAD,
+};
+
+/**
+ * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder
+ * @cxld: base cxl_decoder_object
+ * @dpa_res: actively claimed DPA span of this decoder
+ * @skip: offset into @dpa_res where @cxld.hpa_range maps
+ * @mode: which memory type / access-mode-partition this decoder targets
+ * @pos: interleave position in @cxld.region
+ */
+struct cxl_endpoint_decoder {
+ struct cxl_decoder cxld;
+ struct resource *dpa_res;
+ resource_size_t skip;
+ enum cxl_decoder_mode mode;
+ int pos;
+};
+
+/**
+ * struct cxl_switch_decoder - Switch specific CXL HDM Decoder
+ * @cxld: base cxl_decoder object
+ * @target_lock: coordinate coherent reads of the target list
+ * @nr_targets: number of elements in @target
+ * @target: active ordered target list in current decoder configuration
+ *
+ * The 'switch' decoder type represents the decoder instances of cxl_port's that
+ * route from the root of a CXL memory decode topology to the endpoints. They
+ * come in two flavors, root-level decoders, statically defined by platform
+ * firmware, and mid-level decoders, where interleave-granularity,
+ * interleave-width, and the target list are mutable.
+ */
+struct cxl_switch_decoder {
+ struct cxl_decoder cxld;
seqlock_t target_lock;
int nr_targets;
struct cxl_dport *target[];
@@ -225,6 +326,76 @@ struct cxl_decoder {
/**
+ * struct cxl_root_decoder - Static platform CXL address decoder
+ * @res: host / parent resource for region allocations
+ * @region_id: region id for next region provisioning event
+ * @calc_hb: which host bridge covers the n'th position by granularity
+ * @cxlsd: base cxl switch decoder
+ */
+struct cxl_root_decoder {
+ struct resource *res;
+ atomic_t region_id;
+ struct cxl_dport *(*calc_hb)(struct cxl_root_decoder *cxlrd, int pos);
+ struct cxl_switch_decoder cxlsd;
+};
+
+/*
+ * enum cxl_config_state - State machine for region configuration
+ * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely
+ * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more
+ * changes to interleave_ways or interleave_granularity
+ * @CXL_CONFIG_ACTIVE: All targets have been added the region is now
+ * active
+ * @CXL_CONFIG_RESET_PENDING: see commit_store()
+ * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware
+ */
+enum cxl_config_state {
+ CXL_CONFIG_IDLE,
+ CXL_CONFIG_INTERLEAVE_ACTIVE,
+ CXL_CONFIG_ACTIVE,
+ CXL_CONFIG_RESET_PENDING,
+ CXL_CONFIG_COMMIT,
+};
+
+/**
+ * struct cxl_region_params - region settings
+ * @state: allow the driver to lockdown further parameter changes
+ * @uuid: unique id for persistent regions
+ * @interleave_ways: number of endpoints in the region
+ * @interleave_granularity: capacity each endpoint contributes to a stripe
+ * @res: allocated iomem capacity for this region
+ * @targets: active ordered targets in current decoder configuration
+ * @nr_targets: number of targets
+ *
+ * State transitions are protected by the cxl_region_rwsem
+ */
+struct cxl_region_params {
+ enum cxl_config_state state;
+ uuid_t uuid;
+ int interleave_ways;
+ int interleave_granularity;
+ struct resource *res;
+ struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
+ int nr_targets;
+};
+
+/**
+ * struct cxl_region - CXL region
+ * @dev: This region's device
+ * @id: This region's id. Id is globally unique across all regions
+ * @mode: Endpoint decoder allocation / access mode
+ * @type: Endpoint decoder target type
+ * @params: active + config params for the region
+ */
+struct cxl_region {
+ struct device dev;
+ int id;
+ enum cxl_decoder_mode mode;
+ enum cxl_decoder_type type;
+ struct cxl_region_params params;
+};
+
+/**
* enum cxl_nvdimm_brige_state - state machine for managing bus rescans
* @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
* @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
@@ -251,7 +422,26 @@ struct cxl_nvdimm_bridge {
struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
- struct nvdimm *nvdimm;
+ struct cxl_nvdimm_bridge *bridge;
+ struct cxl_pmem_region *region;
+};
+
+struct cxl_pmem_region_mapping {
+ struct cxl_memdev *cxlmd;
+ struct cxl_nvdimm *cxl_nvd;
+ u64 start;
+ u64 size;
+ int position;
+};
+
+struct cxl_pmem_region {
+ struct device dev;
+ struct cxl_region *cxlr;
+ struct nd_region *nd_region;
+ struct cxl_nvdimm_bridge *bridge;
+ struct range hpa_range;
+ int nr_mappings;
+ struct cxl_pmem_region_mapping mapping[];
};
/**
@@ -260,50 +450,94 @@ struct cxl_nvdimm {
* decode hierarchy.
* @dev: this port's device
* @uport: PCI or platform device implementing the upstream port capability
+ * @host_bridge: Shortcut to the platform attach point for this port
* @id: id for port device-name
* @dports: cxl_dport instances referenced by decoders
* @endpoints: cxl_ep instances, endpoints that are a descendant of this port
+ * @regions: cxl_region_ref instances, regions mapped by this port
+ * @parent_dport: dport that points to this port in the parent
* @decoder_ida: allocator for decoder ids
+ * @hdm_end: track last allocated HDM decoder instance for allocation ordering
+ * @commit_end: cursor to track highest committed decoder for commit ordering
* @component_reg_phys: component register capability base address (optional)
* @dead: last ep has been removed, force port re-creation
* @depth: How deep this port is relative to the root. depth 0 is the root.
+ * @cdat: Cached CDAT data
+ * @cdat_available: Should a CDAT attribute be available in sysfs
*/
struct cxl_port {
struct device dev;
struct device *uport;
+ struct device *host_bridge;
int id;
- struct list_head dports;
- struct list_head endpoints;
+ struct xarray dports;
+ struct xarray endpoints;
+ struct xarray regions;
+ struct cxl_dport *parent_dport;
struct ida decoder_ida;
+ int hdm_end;
+ int commit_end;
resource_size_t component_reg_phys;
bool dead;
unsigned int depth;
+ struct cxl_cdat {
+ void *table;
+ size_t length;
+ } cdat;
+ bool cdat_available;
};
+static inline struct cxl_dport *
+cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev)
+{
+ return xa_load(&port->dports, (unsigned long)dport_dev);
+}
+
/**
* struct cxl_dport - CXL downstream port
* @dport: PCI bridge or firmware device representing the downstream link
* @port_id: unique hardware identifier for dport in decoder target list
* @component_reg_phys: downstream port component registers
* @port: reference to cxl_port that contains this downstream port
- * @list: node for a cxl_port's list of cxl_dport instances
*/
struct cxl_dport {
struct device *dport;
int port_id;
resource_size_t component_reg_phys;
struct cxl_port *port;
- struct list_head list;
};
/**
* struct cxl_ep - track an endpoint's interest in a port
* @ep: device that hosts a generic CXL endpoint (expander or accelerator)
- * @list: node on port->endpoints list
+ * @dport: which dport routes to this endpoint on @port
+ * @next: cxl switch port across the link attached to @dport NULL if
+ * attached to an endpoint
*/
struct cxl_ep {
struct device *ep;
- struct list_head list;
+ struct cxl_dport *dport;
+ struct cxl_port *next;
+};
+
+/**
+ * struct cxl_region_ref - track a region's interest in a port
+ * @port: point in topology to install this reference
+ * @decoder: decoder assigned for @region in @port
+ * @region: region for this reference
+ * @endpoints: cxl_ep references for region members beneath @port
+ * @nr_targets_set: track how many targets have been programmed during setup
+ * @nr_eps: number of endpoints beneath @port
+ * @nr_targets: number of distinct targets needed to reach @nr_eps
+ */
+struct cxl_region_ref {
+ struct cxl_port *port;
+ struct cxl_decoder *decoder;
+ struct cxl_region *region;
+ struct xarray endpoints;
+ int nr_targets_set;
+ int nr_eps;
+ int nr_targets;
};
/*
@@ -325,29 +559,31 @@ int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port);
+ struct cxl_dport *parent_dport);
+int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+ struct cxl_dport *parent_dport);
struct cxl_port *find_cxl_root(struct device *dev);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
int cxl_bus_rescan(void);
-struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd);
+struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
+ struct cxl_dport **dport);
bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
struct device *dport, int port_id,
resource_size_t component_reg_phys);
-struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
- const struct device *dev);
struct cxl_decoder *to_cxl_decoder(struct device *dev);
+struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
+struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
bool is_root_decoder(struct device *dev);
bool is_endpoint_decoder(struct device *dev);
-bool is_cxl_decoder(struct device *dev);
-struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets);
-struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets);
+struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets);
+struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
-struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
+struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
@@ -357,6 +593,8 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port);
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+bool is_cxl_region(struct device *dev);
+
extern struct bus_type cxl_bus_type;
struct cxl_driver {
@@ -385,6 +623,8 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
#define CXL_DEVICE_PORT 3
#define CXL_DEVICE_ROOT 4
#define CXL_DEVICE_MEMORY_EXPANDER 5
+#define CXL_DEVICE_REGION 6
+#define CXL_DEVICE_PMEM_REGION 7
#define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
#define CXL_MODALIAS_FMT "cxl:t%d"
@@ -396,7 +636,21 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
+
+#ifdef CONFIG_CXL_REGION
+bool is_cxl_pmem_region(struct device *dev);
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
+#else
+static inline bool is_cxl_pmem_region(struct device *dev)
+{
+ return false;
+}
+static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+ return NULL;
+}
+#endif
/*
* Unit test builds overrides this to __weak, find the 'strong' version
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 7df0b053373a..88e3a8e54b6a 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -50,6 +50,24 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
return container_of(dev, struct cxl_memdev, dev);
}
+static inline struct cxl_port *cxled_to_port(struct cxl_endpoint_decoder *cxled)
+{
+ return to_cxl_port(cxled->cxld.dev.parent);
+}
+
+static inline struct cxl_port *cxlrd_to_port(struct cxl_root_decoder *cxlrd)
+{
+ return to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
+}
+
+static inline struct cxl_memdev *
+cxled_to_memdev(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = to_cxl_port(cxled->cxld.dev.parent);
+
+ return to_cxl_memdev(port->uport);
+}
+
bool is_cxl_memdev(struct device *dev);
static inline bool is_cxl_endpoint(struct cxl_port *port)
{
@@ -178,8 +196,9 @@ struct cxl_endpoint_dvsec_info {
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
* @exclusive_cmds: Commands that are kernel-internal only
- * @pmem_range: Active Persistent memory capacity configuration
- * @ram_range: Active Volatile memory capacity configuration
+ * @dpa_res: Overall DPA resource tree for the device
+ * @pmem_res: Active Persistent memory capacity configuration
+ * @ram_res: Active Volatile memory capacity configuration
* @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity
@@ -191,6 +210,7 @@ struct cxl_endpoint_dvsec_info {
* @component_reg_phys: register base of component registers
* @info: Cached DVSEC information about the device.
* @serial: PCIe Device Serial Number
+ * @doe_mbs: PCI DOE mailbox array
* @mbox_send: @dev specific transport for transmitting mailbox commands
*
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
@@ -209,8 +229,9 @@ struct cxl_dev_state {
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
- struct range pmem_range;
- struct range ram_range;
+ struct resource dpa_res;
+ struct resource pmem_res;
+ struct resource ram_res;
u64 total_bytes;
u64 volatile_only_bytes;
u64 persistent_only_bytes;
@@ -224,6 +245,8 @@ struct cxl_dev_state {
resource_size_t component_reg_phys;
u64 serial;
+ struct xarray doe_mbs;
+
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
};
@@ -299,6 +322,13 @@ struct cxl_mbox_identify {
u8 qos_telemetry_caps;
} __packed;
+struct cxl_mbox_get_partition_info {
+ __le64 active_volatile_cap;
+ __le64 active_persistent_cap;
+ __le64 next_volatile_cap;
+ __le64 next_persistent_cap;
+} __packed;
+
struct cxl_mbox_get_lsa {
__le32 offset;
__le32 length;
@@ -370,4 +400,8 @@ struct cxl_hdm {
unsigned int interleave_mask;
struct cxl_port *port;
};
+
+struct seq_file;
+struct dentry *cxl_debugfs_create_dir(const char *dir);
+void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds);
#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index fce1c11729c2..eec597dbe763 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -74,4 +74,5 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
+void read_cdat_data(struct cxl_port *port);
#endif /* __CXL_PCI_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index a979d0b484d5..64ccf053d32c 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -24,42 +25,32 @@
* in higher level operations.
*/
-static int create_endpoint(struct cxl_memdev *cxlmd,
- struct cxl_port *parent_port)
+static void enable_suspend(void *data)
{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_port *endpoint;
- int rc;
+ cxl_mem_active_dec();
+}
- endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
- cxlds->component_reg_phys, parent_port);
- if (IS_ERR(endpoint))
- return PTR_ERR(endpoint);
+static void remove_debugfs(void *dentry)
+{
+ debugfs_remove_recursive(dentry);
+}
- dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+static int cxl_mem_dpa_show(struct seq_file *file, void *data)
+{
+ struct device *dev = file->private;
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- rc = cxl_endpoint_autoremove(cxlmd, endpoint);
- if (rc)
- return rc;
-
- if (!endpoint->dev.driver) {
- dev_err(&cxlmd->dev, "%s failed probe\n",
- dev_name(&endpoint->dev));
- return -ENXIO;
- }
+ cxl_dpa_debug(file, cxlmd->cxlds);
return 0;
}
-static void enable_suspend(void *data)
-{
- cxl_mem_active_dec();
-}
-
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_port *parent_port;
+ struct cxl_dport *dport;
+ struct dentry *dentry;
int rc;
/*
@@ -73,11 +64,17 @@ static int cxl_mem_probe(struct device *dev)
if (work_pending(&cxlmd->detach_work))
return -EBUSY;
+ dentry = cxl_debugfs_create_dir(dev_name(dev));
+ debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
+ rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
+ if (rc)
+ return rc;
+
rc = devm_cxl_enumerate_ports(cxlmd);
if (rc)
return rc;
- parent_port = cxl_mem_find_port(cxlmd);
+ parent_port = cxl_mem_find_port(cxlmd, &dport);
if (!parent_port) {
dev_err(dev, "CXL port topology not found\n");
return -ENXIO;
@@ -91,7 +88,7 @@ static int cxl_mem_probe(struct device *dev)
goto unlock;
}
- rc = create_endpoint(cxlmd, parent_port);
+ rc = devm_cxl_add_endpoint(cxlmd, dport);
unlock:
device_unlock(&parent_port->dev);
put_device(&parent_port->dev);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 5a0ae46d4989..faeb5d9d7a7a 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/pci.h>
+#include <linux/pci-doe.h>
#include <linux/io.h>
#include "cxlmem.h"
#include "cxlpci.h"
@@ -386,6 +387,47 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
return rc;
}
+static void cxl_pci_destroy_doe(void *mbs)
+{
+ xa_destroy(mbs);
+}
+
+static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
+{
+ struct device *dev = cxlds->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 off = 0;
+
+ xa_init(&cxlds->doe_mbs);
+ if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) {
+ dev_err(dev, "Failed to create XArray for DOE's\n");
+ return;
+ }
+
+ /*
+ * Mailbox creation is best effort. Higher layers must determine if
+ * the lack of a mailbox for their protocol is a device failure or not.
+ */
+ pci_doe_for_each_off(pdev, off) {
+ struct pci_doe_mb *doe_mb;
+
+ doe_mb = pcim_doe_create_mb(pdev, off);
+ if (IS_ERR(doe_mb)) {
+ dev_err(dev, "Failed to create MB object for MB @ %x\n",
+ off);
+ continue;
+ }
+
+ if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
+ dev_err(dev, "xa_insert failed to insert MB @ %x\n",
+ off);
+ continue;
+ }
+
+ dev_dbg(dev, "Created DOE mailbox @%x\n", off);
+ }
+}
+
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
@@ -434,6 +476,8 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map);
+ devm_cxl_pci_create_doe(cxlds);
+
rc = cxl_pci_setup_mailbox(cxlds);
if (rc)
return rc;
@@ -454,7 +498,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+ if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
return rc;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 0aaa70b4e0f7..7dc0a2fa1a6b 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -7,6 +7,7 @@
#include <linux/ndctl.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <linux/nd.h>
#include "cxlmem.h"
#include "cxl.h"
@@ -26,7 +27,23 @@ static void clear_exclusive(void *cxlds)
static void unregister_nvdimm(void *nvdimm)
{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge;
+ struct cxl_pmem_region *cxlr_pmem;
+
+ device_lock(&cxl_nvb->dev);
+ cxlr_pmem = cxl_nvd->region;
+ dev_set_drvdata(&cxl_nvd->dev, NULL);
+ cxl_nvd->region = NULL;
+ device_unlock(&cxl_nvb->dev);
+
+ if (cxlr_pmem) {
+ device_release_driver(&cxlr_pmem->dev);
+ put_device(&cxlr_pmem->dev);
+ }
+
nvdimm_delete(nvdimm);
+ cxl_nvd->bridge = NULL;
}
static int cxl_nvdimm_probe(struct device *dev)
@@ -39,7 +56,7 @@ static int cxl_nvdimm_probe(struct device *dev)
struct nvdimm *nvdimm;
int rc;
- cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd);
+ cxl_nvb = cxl_find_nvdimm_bridge(dev);
if (!cxl_nvb)
return -ENXIO;
@@ -66,6 +83,7 @@ static int cxl_nvdimm_probe(struct device *dev)
}
dev_set_drvdata(dev, nvdimm);
+ cxl_nvd->bridge = cxl_nvb;
rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
out:
device_unlock(&cxl_nvb->dev);
@@ -204,15 +222,38 @@ static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
return cxl_nvb->nvdimm_bus != NULL;
}
-static int cxl_nvdimm_release_driver(struct device *dev, void *data)
+static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb)
{
+ struct cxl_nvdimm *cxl_nvd;
+
if (!is_cxl_nvdimm(dev))
return 0;
+
+ cxl_nvd = to_cxl_nvdimm(dev);
+ if (cxl_nvd->bridge != cxl_nvb)
+ return 0;
+
device_release_driver(dev);
return 0;
}
-static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
+static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb)
+{
+ struct cxl_pmem_region *cxlr_pmem;
+
+ if (!is_cxl_pmem_region(dev))
+ return 0;
+
+ cxlr_pmem = to_cxl_pmem_region(dev);
+ if (cxlr_pmem->bridge != cxl_nvb)
+ return 0;
+
+ device_release_driver(dev);
+ return 0;
+}
+
+static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
+ struct nvdimm_bus *nvdimm_bus)
{
if (!nvdimm_bus)
return;
@@ -222,7 +263,10 @@ static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
* nvdimm_bus_unregister() rips the nvdimm objects out from
* underneath them.
*/
- bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver);
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
+ cxl_pmem_region_release_driver);
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
+ cxl_nvdimm_release_driver);
nvdimm_bus_unregister(nvdimm_bus);
}
@@ -260,7 +304,7 @@ static void cxl_nvb_update_state(struct work_struct *work)
dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
}
- offline_nvdimm_bus(victim_bus);
+ offline_nvdimm_bus(cxl_nvb, victim_bus);
put_device(&cxl_nvb->dev);
}
@@ -315,6 +359,203 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
.id = CXL_DEVICE_NVDIMM_BRIDGE,
};
+static int match_cxl_nvdimm(struct device *dev, void *data)
+{
+ return is_cxl_nvdimm(dev);
+}
+
+static void unregister_nvdimm_region(void *nd_region)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct cxl_pmem_region *cxlr_pmem;
+ int i;
+
+ cxlr_pmem = nd_region_provider_data(nd_region);
+ cxl_nvb = cxlr_pmem->bridge;
+ device_lock(&cxl_nvb->dev);
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+ struct cxl_nvdimm *cxl_nvd = m->cxl_nvd;
+
+ if (cxl_nvd->region) {
+ put_device(&cxlr_pmem->dev);
+ cxl_nvd->region = NULL;
+ }
+ }
+ device_unlock(&cxl_nvb->dev);
+
+ nvdimm_region_delete(nd_region);
+}
+
+static void cxlr_pmem_remove_resource(void *res)
+{
+ remove_resource(res);
+}
+
+struct cxl_pmem_region_info {
+ u64 offset;
+ u64 serial;
+};
+
+static int cxl_pmem_region_probe(struct device *dev)
+{
+ struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
+ struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+ struct cxl_region *cxlr = cxlr_pmem->cxlr;
+ struct cxl_pmem_region_info *info = NULL;
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct nd_interleave_set *nd_set;
+ struct nd_region_desc ndr_desc;
+ struct cxl_nvdimm *cxl_nvd;
+ struct nvdimm *nvdimm;
+ struct resource *res;
+ int rc, i = 0;
+
+ cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev);
+ if (!cxl_nvb) {
+ dev_dbg(dev, "bridge not found\n");
+ return -ENXIO;
+ }
+ cxlr_pmem->bridge = cxl_nvb;
+
+ device_lock(&cxl_nvb->dev);
+ if (!cxl_nvb->nvdimm_bus) {
+ dev_dbg(dev, "nvdimm bus not found\n");
+ rc = -ENXIO;
+ goto err;
+ }
+
+ memset(&mappings, 0, sizeof(mappings));
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ res->name = "Persistent Memory";
+ res->start = cxlr_pmem->hpa_range.start;
+ res->end = cxlr_pmem->hpa_range.end;
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_PERSISTENT_MEMORY;
+
+ rc = insert_resource(&iomem_resource, res);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
+ if (rc)
+ goto err;
+
+ ndr_desc.res = res;
+ ndr_desc.provider_data = cxlr_pmem;
+
+ ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
+ ndr_desc.target_node = phys_to_target_node(res->start);
+ if (ndr_desc.target_node == NUMA_NO_NODE) {
+ ndr_desc.target_node = ndr_desc.numa_node;
+ dev_dbg(&cxlr->dev, "changing target node from %d to %d",
+ NUMA_NO_NODE, ndr_desc.target_node);
+ }
+
+ nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
+ if (!nd_set) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ndr_desc.memregion = cxlr->id;
+ set_bit(ND_REGION_CXL, &ndr_desc.flags);
+ set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
+
+ info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+ struct cxl_memdev *cxlmd = m->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *d;
+
+ d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm);
+ if (!d) {
+ dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i,
+ dev_name(&cxlmd->dev));
+ rc = -ENODEV;
+ goto err;
+ }
+
+ /* safe to drop ref now with bridge lock held */
+ put_device(d);
+
+ cxl_nvd = to_cxl_nvdimm(d);
+ nvdimm = dev_get_drvdata(&cxl_nvd->dev);
+ if (!nvdimm) {
+ dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
+ dev_name(&cxlmd->dev));
+ rc = -ENODEV;
+ goto err;
+ }
+ cxl_nvd->region = cxlr_pmem;
+ get_device(&cxlr_pmem->dev);
+ m->cxl_nvd = cxl_nvd;
+ mappings[i] = (struct nd_mapping_desc) {
+ .nvdimm = nvdimm,
+ .start = m->start,
+ .size = m->size,
+ .position = i,
+ };
+ info[i].offset = m->start;
+ info[i].serial = cxlds->serial;
+ }
+ ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
+ ndr_desc.mapping = mappings;
+
+ /*
+ * TODO enable CXL labels which skip the need for 'interleave-set cookie'
+ */
+ nd_set->cookie1 =
+ nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
+ nd_set->cookie2 = nd_set->cookie1;
+ ndr_desc.nd_set = nd_set;
+
+ cxlr_pmem->nd_region =
+ nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
+ if (!cxlr_pmem->nd_region) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
+ cxlr_pmem->nd_region);
+out:
+ kfree(info);
+ device_unlock(&cxl_nvb->dev);
+ put_device(&cxl_nvb->dev);
+
+ return rc;
+
+err:
+ dev_dbg(dev, "failed to create nvdimm region\n");
+ for (i--; i >= 0; i--) {
+ nvdimm = mappings[i].nvdimm;
+ cxl_nvd = nvdimm_provider_data(nvdimm);
+ put_device(&cxl_nvd->region->dev);
+ cxl_nvd->region = NULL;
+ }
+ goto out;
+}
+
+static struct cxl_driver cxl_pmem_region_driver = {
+ .name = "cxl_pmem_region",
+ .probe = cxl_pmem_region_probe,
+ .id = CXL_DEVICE_PMEM_REGION,
+};
+
/*
* Return all bridges to the CXL_NVB_NEW state to invalidate any
* ->state_work referring to the now destroyed cxl_pmem_wq.
@@ -359,8 +600,14 @@ static __init int cxl_pmem_init(void)
if (rc)
goto err_nvdimm;
+ rc = cxl_driver_register(&cxl_pmem_region_driver);
+ if (rc)
+ goto err_region;
+
return 0;
+err_region:
+ cxl_driver_unregister(&cxl_nvdimm_driver);
err_nvdimm:
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
err_bridge:
@@ -370,6 +617,7 @@ err_bridge:
static __exit void cxl_pmem_exit(void)
{
+ cxl_driver_unregister(&cxl_pmem_region_driver);
cxl_driver_unregister(&cxl_nvdimm_driver);
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
destroy_cxl_pmem_wq();
@@ -381,3 +629,4 @@ module_exit(cxl_pmem_exit);
MODULE_IMPORT_NS(CXL);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
+MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 3cf308f114c4..5453771bf330 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -53,6 +53,9 @@ static int cxl_port_probe(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ /* Cache the data early to ensure is_visible() works */
+ read_cdat_data(port);
+
get_device(&cxlmd->dev);
rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd);
if (rc)
@@ -78,10 +81,60 @@ static int cxl_port_probe(struct device *dev)
return 0;
}
+static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_port *port = to_cxl_port(dev);
+
+ if (!port->cdat_available)
+ return -ENXIO;
+
+ if (!port->cdat.table)
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &offset,
+ port->cdat.table,
+ port->cdat.length);
+}
+
+static BIN_ATTR_ADMIN_RO(CDAT, 0);
+
+static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_port *port = to_cxl_port(dev);
+
+ if ((attr == &bin_attr_CDAT) && port->cdat_available)
+ return attr->attr.mode;
+
+ return 0;
+}
+
+static struct bin_attribute *cxl_cdat_bin_attributes[] = {
+ &bin_attr_CDAT,
+ NULL,
+};
+
+static struct attribute_group cxl_cdat_attribute_group = {
+ .bin_attrs = cxl_cdat_bin_attributes,
+ .is_bin_visible = cxl_port_bin_attr_is_visible,
+};
+
+static const struct attribute_group *cxl_port_attribute_groups[] = {
+ &cxl_cdat_attribute_group,
+ NULL,
+};
+
static struct cxl_driver cxl_port_driver = {
.name = "cxl_port",
.probe = cxl_port_probe,
.id = CXL_DEVICE_PORT,
+ .drv = {
+ .dev_groups = cxl_port_attribute_groups,
+ },
};
module_cxl_driver(cxl_port_driver);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 50a08b2ec247..9b5e2a5eb0ae 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -22,6 +22,8 @@
* @private: dax driver private data
* @flags: state and boolean properties
* @ops: operations for this device
+ * @holder_data: holder of a dax_device: could be filesystem or mapped device
+ * @holder_ops: operations for the inner holder
*/
struct dax_device {
struct inode inode;
@@ -29,6 +31,8 @@ struct dax_device {
void *private;
unsigned long flags;
const struct dax_operations *ops;
+ void *holder_data;
+ const struct dax_holder_operations *holder_ops;
};
static dev_t dax_devt;
@@ -71,8 +75,11 @@ EXPORT_SYMBOL_GPL(dax_remove_host);
* fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
* @bdev: block device to find a dax_device for
* @start_off: returns the byte offset into the dax_device that @bdev starts
+ * @holder: filesystem or mapped device inside the dax_device
+ * @ops: operations for the inner holder
*/
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops)
{
struct dax_device *dax_dev;
u64 part_size;
@@ -92,11 +99,26 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
dax_dev = NULL;
+ else if (holder) {
+ if (!cmpxchg(&dax_dev->holder_data, NULL, holder))
+ dax_dev->holder_ops = ops;
+ else
+ dax_dev = NULL;
+ }
dax_read_unlock(id);
return dax_dev;
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+
+void fs_put_dax(struct dax_device *dax_dev, void *holder)
+{
+ if (dax_dev && holder &&
+ cmpxchg(&dax_dev->holder_data, holder, NULL) == holder)
+ dax_dev->holder_ops = NULL;
+ put_dax(dax_dev);
+}
+EXPORT_SYMBOL_GPL(fs_put_dax);
#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
enum dax_device_flags {
@@ -204,6 +226,29 @@ size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
}
EXPORT_SYMBOL_GPL(dax_recovery_write);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off,
+ u64 len, int mf_flags)
+{
+ int rc, id;
+
+ id = dax_read_lock();
+ if (!dax_alive(dax_dev)) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (!dax_dev->holder_ops) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags);
+out:
+ dax_read_unlock(id);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dax_holder_notify_failure);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
@@ -277,8 +322,15 @@ void kill_dax(struct dax_device *dax_dev)
if (!dax_dev)
return;
+ if (dax_dev->holder_data != NULL)
+ dax_holder_notify_failure(dax_dev, 0, U64_MAX, 0);
+
clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
synchronize_srcu(&dax_srcu);
+
+ /* clear holder data */
+ dax_dev->holder_ops = NULL;
+ dax_dev->holder_data = NULL;
}
EXPORT_SYMBOL_GPL(kill_dax);
@@ -421,6 +473,19 @@ void put_dax(struct dax_device *dax_dev)
EXPORT_SYMBOL_GPL(put_dax);
/**
+ * dax_holder() - obtain the holder of a dax device
+ * @dax_dev: a dax_device instance
+
+ * Return: the holder's data which represents the holder if registered,
+ * otherwize NULL.
+ */
+void *dax_holder(struct dax_device *dax_dev)
+{
+ return dax_dev->holder_data;
+}
+EXPORT_SYMBOL_GPL(dax_holder);
+
+/**
* inode_dax: convert a public inode into its dax_dev
* @inode: An inode with i_cdev pointing to a dax_dev
*
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 87eb2b837e68..9754d8b31621 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -120,6 +120,16 @@ config ARM_TEGRA_DEVFREQ
It reads ACTMON counters of memory controllers and adjusts the
operating frequencies and voltages with OPP support.
+config ARM_MEDIATEK_CCI_DEVFREQ
+ tristate "MEDIATEK CCI DEVFREQ Driver"
+ depends on ARM_MEDIATEK_CPUFREQ || COMPILE_TEST
+ select DEVFREQ_GOV_PASSIVE
+ help
+ This adds a devfreq driver for MediaTek Cache Coherent Interconnect
+ which is shared the same regulators with the cpu cluster. It can track
+ buck voltages and update a proper CCI frequency. Use the notification
+ to get the regulator status.
+
config ARM_RK3399_DMC_DEVFREQ
tristate "ARM RK3399 DMC DEVFREQ Driver"
depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 0b6be92a25d9..bf40d04928d0 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o
obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
+obj-$(CONFIG_ARM_MEDIATEK_CCI_DEVFREQ) += mtk-cci-devfreq.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
obj-$(CONFIG_ARM_SUN8I_A33_MBUS_DEVFREQ) += sun8i-a33-mbus.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 9602141bb8ec..63347a5ae599 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -696,6 +696,8 @@ static int qos_notifier_call(struct devfreq *devfreq)
/**
* qos_min_notifier_call() - Callback for QoS min_freq changes.
* @nb: Should be devfreq->nb_min
+ * @val: not used
+ * @ptr: not used
*/
static int qos_min_notifier_call(struct notifier_block *nb,
unsigned long val, void *ptr)
@@ -706,6 +708,8 @@ static int qos_min_notifier_call(struct notifier_block *nb,
/**
* qos_max_notifier_call() - Callback for QoS max_freq changes.
* @nb: Should be devfreq->nb_max
+ * @val: not used
+ * @ptr: not used
*/
static int qos_max_notifier_call(struct notifier_block *nb,
unsigned long val, void *ptr)
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index f7dcc44f9414..027e8f336acc 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -33,7 +33,7 @@ struct exynos_bus {
unsigned long curr_freq;
- struct opp_table *opp_table;
+ int opp_token;
struct clk *clk;
unsigned int ratio;
};
@@ -161,8 +161,7 @@ static void exynos_bus_exit(struct device *dev)
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
}
static void exynos_bus_passive_exit(struct device *dev)
@@ -179,18 +178,16 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
struct exynos_bus *bus)
{
struct device *dev = bus->dev;
- struct opp_table *opp_table;
- const char *vdd = "vdd";
+ const char *supplies[] = { "vdd", NULL };
int i, ret, count, size;
- opp_table = dev_pm_opp_set_regulators(dev, &vdd, 1);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
+ ret = dev_pm_opp_set_regulators(dev, supplies);
+ if (ret < 0) {
dev_err(dev, "failed to set regulators %d\n", ret);
return ret;
}
- bus->opp_table = opp_table;
+ bus->opp_token = ret;
/*
* Get the devfreq-event devices to get the current utilization of
@@ -236,8 +233,7 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
return 0;
err_regulator:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
@@ -459,8 +455,7 @@ err:
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
err_reg:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c
index f3f6e25053ed..a727067980fb 100644
--- a/drivers/devfreq/imx-bus.c
+++ b/drivers/devfreq/imx-bus.c
@@ -59,7 +59,7 @@ static int imx_bus_init_icc(struct device *dev)
struct imx_bus *priv = dev_get_drvdata(dev);
const char *icc_driver_name;
- if (!of_get_property(dev->of_node, "#interconnect-cells", 0))
+ if (!of_get_property(dev->of_node, "#interconnect-cells", NULL))
return 0;
if (!IS_ENABLED(CONFIG_INTERCONNECT_IMX)) {
dev_warn(dev, "imx interconnect drivers disabled\n");
@@ -145,6 +145,7 @@ static const struct of_device_id imx_bus_of_match[] = {
{ .compatible = "fsl,imx8mq-noc", .data = "imx8mq-interconnect", },
{ .compatible = "fsl,imx8mm-noc", .data = "imx8mm-interconnect", },
{ .compatible = "fsl,imx8mn-noc", .data = "imx8mn-interconnect", },
+ { .compatible = "fsl,imx8mp-noc", .data = "imx8mp-interconnect", },
{ .compatible = "fsl,imx8m-noc", },
{ .compatible = "fsl,imx8m-nic", },
{ /* sentinel */ },
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
new file mode 100644
index 000000000000..71abb3fbd042
--- /dev/null
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+
+struct mtk_ccifreq_platform_data {
+ int min_volt_shift;
+ int max_volt_shift;
+ int proc_max_volt;
+ int sram_min_volt;
+ int sram_max_volt;
+};
+
+struct mtk_ccifreq_drv {
+ struct device *dev;
+ struct devfreq *devfreq;
+ struct regulator *proc_reg;
+ struct regulator *sram_reg;
+ struct clk *cci_clk;
+ struct clk *inter_clk;
+ int inter_voltage;
+ unsigned long pre_freq;
+ /* Avoid race condition for regulators between notify and policy */
+ struct mutex reg_lock;
+ struct notifier_block opp_nb;
+ const struct mtk_ccifreq_platform_data *soc_data;
+ int vtrack_max;
+};
+
+static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage)
+{
+ const struct mtk_ccifreq_platform_data *soc_data = drv->soc_data;
+ struct device *dev = drv->dev;
+ int pre_voltage, pre_vsram, new_vsram, vsram, voltage, ret;
+ int retry_max = drv->vtrack_max;
+
+ if (!drv->sram_reg) {
+ ret = regulator_set_voltage(drv->proc_reg, new_voltage,
+ drv->soc_data->proc_max_volt);
+ return ret;
+ }
+
+ pre_voltage = regulator_get_voltage(drv->proc_reg);
+ if (pre_voltage < 0) {
+ dev_err(dev, "invalid vproc value: %d\n", pre_voltage);
+ return pre_voltage;
+ }
+
+ pre_vsram = regulator_get_voltage(drv->sram_reg);
+ if (pre_vsram < 0) {
+ dev_err(dev, "invalid vsram value: %d\n", pre_vsram);
+ return pre_vsram;
+ }
+
+ new_vsram = clamp(new_voltage + soc_data->min_volt_shift,
+ soc_data->sram_min_volt, soc_data->sram_max_volt);
+
+ do {
+ if (pre_voltage <= new_voltage) {
+ vsram = clamp(pre_voltage + soc_data->max_volt_shift,
+ soc_data->sram_min_volt, new_vsram);
+ ret = regulator_set_voltage(drv->sram_reg, vsram,
+ soc_data->sram_max_volt);
+ if (ret)
+ return ret;
+
+ if (vsram == soc_data->sram_max_volt ||
+ new_vsram == soc_data->sram_min_volt)
+ voltage = new_voltage;
+ else
+ voltage = vsram - soc_data->min_volt_shift;
+
+ ret = regulator_set_voltage(drv->proc_reg, voltage,
+ soc_data->proc_max_volt);
+ if (ret) {
+ regulator_set_voltage(drv->sram_reg, pre_vsram,
+ soc_data->sram_max_volt);
+ return ret;
+ }
+ } else if (pre_voltage > new_voltage) {
+ voltage = max(new_voltage,
+ pre_vsram - soc_data->max_volt_shift);
+ ret = regulator_set_voltage(drv->proc_reg, voltage,
+ soc_data->proc_max_volt);
+ if (ret)
+ return ret;
+
+ if (voltage == new_voltage)
+ vsram = new_vsram;
+ else
+ vsram = max(new_vsram,
+ voltage + soc_data->min_volt_shift);
+
+ ret = regulator_set_voltage(drv->sram_reg, vsram,
+ soc_data->sram_max_volt);
+ if (ret) {
+ regulator_set_voltage(drv->proc_reg, pre_voltage,
+ soc_data->proc_max_volt);
+ return ret;
+ }
+ }
+
+ pre_voltage = voltage;
+ pre_vsram = vsram;
+
+ if (--retry_max < 0) {
+ dev_err(dev,
+ "over loop count, failed to set voltage\n");
+ return -EINVAL;
+ }
+ } while (voltage != new_voltage || vsram != new_vsram);
+
+ return 0;
+}
+
+static int mtk_ccifreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct mtk_ccifreq_drv *drv = dev_get_drvdata(dev);
+ struct clk *cci_pll = clk_get_parent(drv->cci_clk);
+ struct dev_pm_opp *opp;
+ unsigned long opp_rate;
+ int voltage, pre_voltage, inter_voltage, target_voltage, ret;
+
+ if (!drv)
+ return -EINVAL;
+
+ if (drv->pre_freq == *freq)
+ return 0;
+
+ inter_voltage = drv->inter_voltage;
+
+ opp_rate = *freq;
+ opp = devfreq_recommended_opp(dev, &opp_rate, 1);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to find opp for freq: %ld\n", opp_rate);
+ return PTR_ERR(opp);
+ }
+
+ mutex_lock(&drv->reg_lock);
+
+ voltage = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ pre_voltage = regulator_get_voltage(drv->proc_reg);
+ if (pre_voltage < 0) {
+ dev_err(dev, "invalid vproc value: %d\n", pre_voltage);
+ ret = pre_voltage;
+ goto out_unlock;
+ }
+
+ /* scale up: set voltage first then freq. */
+ target_voltage = max(inter_voltage, voltage);
+ if (pre_voltage <= target_voltage) {
+ ret = mtk_ccifreq_set_voltage(drv, target_voltage);
+ if (ret) {
+ dev_err(dev, "failed to scale up voltage\n");
+ goto out_restore_voltage;
+ }
+ }
+
+ /* switch the cci clock to intermediate clock source. */
+ ret = clk_set_parent(drv->cci_clk, drv->inter_clk);
+ if (ret) {
+ dev_err(dev, "failed to re-parent cci clock\n");
+ goto out_restore_voltage;
+ }
+
+ /* set the original clock to target rate. */
+ ret = clk_set_rate(cci_pll, *freq);
+ if (ret) {
+ dev_err(dev, "failed to set cci pll rate: %d\n", ret);
+ clk_set_parent(drv->cci_clk, cci_pll);
+ goto out_restore_voltage;
+ }
+
+ /* switch the cci clock back to the original clock source. */
+ ret = clk_set_parent(drv->cci_clk, cci_pll);
+ if (ret) {
+ dev_err(dev, "failed to re-parent cci clock\n");
+ mtk_ccifreq_set_voltage(drv, inter_voltage);
+ goto out_unlock;
+ }
+
+ /*
+ * If the new voltage is lower than the intermediate voltage or the
+ * original voltage, scale down to the new voltage.
+ */
+ if (voltage < inter_voltage || voltage < pre_voltage) {
+ ret = mtk_ccifreq_set_voltage(drv, voltage);
+ if (ret) {
+ dev_err(dev, "failed to scale down voltage\n");
+ goto out_unlock;
+ }
+ }
+
+ drv->pre_freq = *freq;
+ mutex_unlock(&drv->reg_lock);
+
+ return 0;
+
+out_restore_voltage:
+ mtk_ccifreq_set_voltage(drv, pre_voltage);
+
+out_unlock:
+ mutex_unlock(&drv->reg_lock);
+ return ret;
+}
+
+static int mtk_ccifreq_opp_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct dev_pm_opp *opp = data;
+ struct mtk_ccifreq_drv *drv;
+ unsigned long freq, volt;
+
+ drv = container_of(nb, struct mtk_ccifreq_drv, opp_nb);
+
+ if (event == OPP_EVENT_ADJUST_VOLTAGE) {
+ freq = dev_pm_opp_get_freq(opp);
+
+ mutex_lock(&drv->reg_lock);
+ /* current opp item is changed */
+ if (freq == drv->pre_freq) {
+ volt = dev_pm_opp_get_voltage(opp);
+ mtk_ccifreq_set_voltage(drv, volt);
+ }
+ mutex_unlock(&drv->reg_lock);
+ }
+
+ return 0;
+}
+
+static struct devfreq_dev_profile mtk_ccifreq_profile = {
+ .target = mtk_ccifreq_target,
+};
+
+static int mtk_ccifreq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ccifreq_drv *drv;
+ struct devfreq_passive_data *passive_data;
+ struct dev_pm_opp *opp;
+ unsigned long rate, opp_volt;
+ int ret;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->dev = dev;
+ drv->soc_data = (const struct mtk_ccifreq_platform_data *)
+ of_device_get_match_data(&pdev->dev);
+ mutex_init(&drv->reg_lock);
+ platform_set_drvdata(pdev, drv);
+
+ drv->cci_clk = devm_clk_get(dev, "cci");
+ if (IS_ERR(drv->cci_clk)) {
+ ret = PTR_ERR(drv->cci_clk);
+ return dev_err_probe(dev, ret, "failed to get cci clk\n");
+ }
+
+ drv->inter_clk = devm_clk_get(dev, "intermediate");
+ if (IS_ERR(drv->inter_clk)) {
+ ret = PTR_ERR(drv->inter_clk);
+ return dev_err_probe(dev, ret,
+ "failed to get intermediate clk\n");
+ }
+
+ drv->proc_reg = devm_regulator_get_optional(dev, "proc");
+ if (IS_ERR(drv->proc_reg)) {
+ ret = PTR_ERR(drv->proc_reg);
+ return dev_err_probe(dev, ret,
+ "failed to get proc regulator\n");
+ }
+
+ ret = regulator_enable(drv->proc_reg);
+ if (ret) {
+ dev_err(dev, "failed to enable proc regulator\n");
+ return ret;
+ }
+
+ drv->sram_reg = devm_regulator_get_optional(dev, "sram");
+ if (IS_ERR(drv->sram_reg))
+ drv->sram_reg = NULL;
+ else {
+ ret = regulator_enable(drv->sram_reg);
+ if (ret) {
+ dev_err(dev, "failed to enable sram regulator\n");
+ goto out_free_resources;
+ }
+ }
+
+ /*
+ * We assume min voltage is 0 and tracking target voltage using
+ * min_volt_shift for each iteration.
+ * The retry_max is 3 times of expected iteration count.
+ */
+ drv->vtrack_max = 3 * DIV_ROUND_UP(max(drv->soc_data->sram_max_volt,
+ drv->soc_data->proc_max_volt),
+ drv->soc_data->min_volt_shift);
+
+ ret = clk_prepare_enable(drv->cci_clk);
+ if (ret)
+ goto out_free_resources;
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret) {
+ dev_err(dev, "failed to add opp table: %d\n", ret);
+ goto out_disable_cci_clk;
+ }
+
+ rate = clk_get_rate(drv->inter_clk);
+ opp = dev_pm_opp_find_freq_ceil(dev, &rate);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err(dev, "failed to get intermediate opp: %d\n", ret);
+ goto out_remove_opp_table;
+ }
+ drv->inter_voltage = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ rate = U32_MAX;
+ opp = dev_pm_opp_find_freq_floor(drv->dev, &rate);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to get opp\n");
+ ret = PTR_ERR(opp);
+ goto out_remove_opp_table;
+ }
+
+ opp_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+ ret = mtk_ccifreq_set_voltage(drv, opp_volt);
+ if (ret) {
+ dev_err(dev, "failed to scale to highest voltage %lu in proc_reg\n",
+ opp_volt);
+ goto out_remove_opp_table;
+ }
+
+ passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
+ if (!passive_data) {
+ ret = -ENOMEM;
+ goto out_remove_opp_table;
+ }
+
+ passive_data->parent_type = CPUFREQ_PARENT_DEV;
+ drv->devfreq = devm_devfreq_add_device(dev, &mtk_ccifreq_profile,
+ DEVFREQ_GOV_PASSIVE,
+ passive_data);
+ if (IS_ERR(drv->devfreq)) {
+ ret = -EPROBE_DEFER;
+ dev_err(dev, "failed to add devfreq device: %ld\n",
+ PTR_ERR(drv->devfreq));
+ goto out_remove_opp_table;
+ }
+
+ drv->opp_nb.notifier_call = mtk_ccifreq_opp_notifier;
+ ret = dev_pm_opp_register_notifier(dev, &drv->opp_nb);
+ if (ret) {
+ dev_err(dev, "failed to register opp notifier: %d\n", ret);
+ goto out_remove_opp_table;
+ }
+ return 0;
+
+out_remove_opp_table:
+ dev_pm_opp_of_remove_table(dev);
+
+out_disable_cci_clk:
+ clk_disable_unprepare(drv->cci_clk);
+
+out_free_resources:
+ if (regulator_is_enabled(drv->proc_reg))
+ regulator_disable(drv->proc_reg);
+ if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ regulator_disable(drv->sram_reg);
+
+ return ret;
+}
+
+static int mtk_ccifreq_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ccifreq_drv *drv;
+
+ drv = platform_get_drvdata(pdev);
+
+ dev_pm_opp_unregister_notifier(dev, &drv->opp_nb);
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(drv->cci_clk);
+ regulator_disable(drv->proc_reg);
+ if (drv->sram_reg)
+ regulator_disable(drv->sram_reg);
+
+ return 0;
+}
+
+static const struct mtk_ccifreq_platform_data mt8183_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1150000,
+};
+
+static const struct mtk_ccifreq_platform_data mt8186_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 250000,
+ .proc_max_volt = 1118750,
+ .sram_min_volt = 850000,
+ .sram_max_volt = 1118750,
+};
+
+static const struct of_device_id mtk_ccifreq_machines[] = {
+ { .compatible = "mediatek,mt8183-cci", .data = &mt8183_platform_data },
+ { .compatible = "mediatek,mt8186-cci", .data = &mt8186_platform_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines);
+
+static struct platform_driver mtk_ccifreq_platdrv = {
+ .probe = mtk_ccifreq_probe,
+ .remove = mtk_ccifreq_remove,
+ .driver = {
+ .name = "mtk-ccifreq",
+ .of_match_table = mtk_ccifreq_machines,
+ },
+};
+module_platform_driver(mtk_ccifreq_platdrv);
+
+MODULE_DESCRIPTION("MediaTek CCI devfreq driver");
+MODULE_AUTHOR("Jia-Wei Chang <jia-wei.chang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 65ecf17a36f4..503376b894b6 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -821,6 +821,15 @@ static int devm_tegra_devfreq_init_hw(struct device *dev,
return err;
}
+static int tegra_devfreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
+{
+ /* We want to skip clk configuration via dev_pm_opp_set_opp() */
+ return 0;
+}
+
static int tegra_devfreq_probe(struct platform_device *pdev)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
@@ -830,6 +839,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
unsigned int i;
long rate;
int err;
+ const char *clk_names[] = { "actmon", NULL };
+ struct dev_pm_opp_config config = {
+ .supported_hw = &hw_version,
+ .supported_hw_count = 1,
+ .clk_names = clk_names,
+ .config_clks = tegra_devfreq_config_clks_nop,
+ };
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
@@ -874,13 +890,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- err = devm_pm_opp_set_supported_hw(&pdev->dev, &hw_version, 1);
+ err = devm_pm_opp_set_config(&pdev->dev, &config);
if (err) {
- dev_err(&pdev->dev, "Failed to set supported HW: %d\n", err);
+ dev_err(&pdev->dev, "Failed to set OPP config: %d\n", err);
return err;
}
- err = devm_pm_opp_of_add_table_noclk(&pdev->dev, 0);
+ err = devm_pm_opp_of_add_table_indexed(&pdev->dev, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
return err;
@@ -922,8 +938,10 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
devfreq = devm_devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
"tegra_actmon", NULL);
- if (IS_ERR(devfreq))
+ if (IS_ERR(devfreq)) {
+ dev_err(&pdev->dev, "Failed to add device: %pe\n", devfreq);
return PTR_ERR(devfreq);
+ }
return 0;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 630133284e2b..efb4990b29e1 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -655,7 +655,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
goto err_dmabuf;
}
- file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
mutex_init(&dmabuf->lock);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 0cce6e4ec946..205acb2c744d 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -343,7 +343,7 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
if (old->context != context)
continue;
- dma_resv_list_set(list, i, replacement, usage);
+ dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
dma_fence_put(old);
}
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 487ed4ddc3be..a06d2a7627aa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -85,6 +85,14 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config APPLE_ADMAC
+ tristate "Apple ADMAC support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DMA_ENGINE
+ default ARCH_APPLE
+ help
+ Enable support for Audio DMA Controller found on Apple Silicon SoCs.
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2f1b87ffd7ab..10f7d4241001 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AMD_PTDMA) += ptdma/
+obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 6f56dfd375e3..4153c2edb049 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -749,7 +749,7 @@ static irqreturn_t msgdma_irq_handler(int irq, void *data)
}
/**
- * msgdma_chan_remove - Channel remove function
+ * msgdma_dev_remove() - Device remove function
* @mdev: Pointer to the Altera mSGDMA device structure
*/
static void msgdma_dev_remove(struct msgdma_device *mdev)
@@ -918,7 +918,7 @@ fail:
}
/**
- * msgdma_dma_remove - Driver remove function
+ * msgdma_remove() - Driver remove function
* @pdev: Pointer to the platform_device structure
*
* Return: Always '0'
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index a4a794e62ac2..487a01aa207d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -231,7 +231,7 @@ enum pl08x_dma_chan_state {
/**
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @phychan: the physical channel utilized by this channel, if there is one
* @name: name of channel
* @cd: channel platform data
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
new file mode 100644
index 000000000000..d1f74a3aa999
--- /dev/null
+++ b/drivers/dma/apple-admac.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Audio DMA Controller (ADMAC) on t8103 (M1) and other Apple chips
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include "dmaengine.h"
+
+#define NCHANNELS_MAX 64
+#define IRQ_NOUTPUTS 4
+
+#define RING_WRITE_SLOT GENMASK(1, 0)
+#define RING_READ_SLOT GENMASK(5, 4)
+#define RING_FULL BIT(9)
+#define RING_EMPTY BIT(8)
+#define RING_ERR BIT(10)
+
+#define STATUS_DESC_DONE BIT(0)
+#define STATUS_ERR BIT(6)
+
+#define FLAG_DESC_NOTIFY BIT(16)
+
+#define REG_TX_START 0x0000
+#define REG_TX_STOP 0x0004
+#define REG_RX_START 0x0008
+#define REG_RX_STOP 0x000c
+
+#define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200)
+#define REG_CHAN_CTL_RST_RINGS BIT(0)
+
+#define REG_DESC_RING(ch) (0x8070 + (ch) * 0x200)
+#define REG_REPORT_RING(ch) (0x8074 + (ch) * 0x200)
+
+#define REG_RESIDUE(ch) (0x8064 + (ch) * 0x200)
+
+#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
+
+#define BUS_WIDTH_8BIT 0x00
+#define BUS_WIDTH_16BIT 0x01
+#define BUS_WIDTH_32BIT 0x02
+#define BUS_WIDTH_FRAME_2_WORDS 0x10
+#define BUS_WIDTH_FRAME_4_WORDS 0x20
+
+#define CHAN_BUFSIZE 0x8000
+
+#define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200)
+#define CHAN_FIFOCTL_LIMIT GENMASK(31, 16)
+#define CHAN_FIFOCTL_THRESHOLD GENMASK(15, 0)
+
+#define REG_DESC_WRITE(ch) (0x10000 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
+#define REG_REPORT_READ(ch) (0x10100 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
+
+#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
+#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
+#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
+#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
+
+struct admac_data;
+struct admac_tx;
+
+struct admac_chan {
+ unsigned int no;
+ struct admac_data *host;
+ struct dma_chan chan;
+ struct tasklet_struct tasklet;
+
+ spinlock_t lock;
+ struct admac_tx *current_tx;
+ int nperiod_acks;
+
+ /*
+ * We maintain a 'submitted' and 'issued' list mainly for interface
+ * correctness. Typical use of the driver (per channel) will be
+ * prepping, submitting and issuing a single cyclic transaction which
+ * will stay current until terminate_all is called.
+ */
+ struct list_head submitted;
+ struct list_head issued;
+
+ struct list_head to_free;
+};
+
+struct admac_data {
+ struct dma_device dma;
+ struct device *dev;
+ __iomem void *base;
+
+ int irq_index;
+ int nchannels;
+ struct admac_chan channels[];
+};
+
+struct admac_tx {
+ struct dma_async_tx_descriptor tx;
+ bool cyclic;
+ dma_addr_t buf_addr;
+ dma_addr_t buf_end;
+ size_t buf_len;
+ size_t period_len;
+
+ size_t submitted_pos;
+ size_t reclaimed_pos;
+
+ struct list_head node;
+};
+
+static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val)
+{
+ void __iomem *addr = ad->base + reg;
+ u32 curr = readl_relaxed(addr);
+
+ writel_relaxed((curr & ~mask) | (val & mask), addr);
+}
+
+static struct admac_chan *to_admac_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct admac_chan, chan);
+}
+
+static struct admac_tx *to_admac_tx(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct admac_tx, tx);
+}
+
+static enum dma_transfer_direction admac_chan_direction(int channo)
+{
+ /* Channel directions are hardwired */
+ return (channo & 1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+}
+
+static dma_cookie_t admac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct admac_tx *adtx = to_admac_tx(tx);
+ struct admac_chan *adchan = to_admac_chan(tx->chan);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&adtx->node, &adchan->submitted);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ return cookie;
+}
+
+static int admac_desc_free(struct dma_async_tx_descriptor *tx)
+{
+ kfree(to_admac_tx(tx));
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *admac_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct admac_chan *adchan = container_of(chan, struct admac_chan, chan);
+ struct admac_tx *adtx;
+
+ if (direction != admac_chan_direction(adchan->no))
+ return NULL;
+
+ adtx = kzalloc(sizeof(*adtx), GFP_NOWAIT);
+ if (!adtx)
+ return NULL;
+
+ adtx->cyclic = true;
+
+ adtx->buf_addr = buf_addr;
+ adtx->buf_len = buf_len;
+ adtx->buf_end = buf_addr + buf_len;
+ adtx->period_len = period_len;
+
+ adtx->submitted_pos = 0;
+ adtx->reclaimed_pos = 0;
+
+ dma_async_tx_descriptor_init(&adtx->tx, chan);
+ adtx->tx.tx_submit = admac_tx_submit;
+ adtx->tx.desc_free = admac_desc_free;
+
+ return &adtx->tx;
+}
+
+/*
+ * Write one hardware descriptor for a dmaengine cyclic transaction.
+ */
+static void admac_cyclic_write_one_desc(struct admac_data *ad, int channo,
+ struct admac_tx *tx)
+{
+ dma_addr_t addr;
+
+ addr = tx->buf_addr + (tx->submitted_pos % tx->buf_len);
+
+ /* If happens means we have buggy code */
+ WARN_ON_ONCE(addr + tx->period_len > tx->buf_end);
+
+ dev_dbg(ad->dev, "ch%d descriptor: addr=0x%pad len=0x%zx flags=0x%lx\n",
+ channo, &addr, tx->period_len, FLAG_DESC_NOTIFY);
+
+ writel_relaxed(lower_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(upper_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(tx->period_len, ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(FLAG_DESC_NOTIFY, ad->base + REG_DESC_WRITE(channo));
+
+ tx->submitted_pos += tx->period_len;
+ tx->submitted_pos %= 2 * tx->buf_len;
+}
+
+/*
+ * Write all the hardware descriptors for a dmaengine cyclic
+ * transaction there is space for.
+ */
+static void admac_cyclic_write_desc(struct admac_data *ad, int channo,
+ struct admac_tx *tx)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_FULL)
+ break;
+ admac_cyclic_write_one_desc(ad, channo, tx);
+ }
+}
+
+static int admac_ring_noccupied_slots(int ringval)
+{
+ int wrslot = FIELD_GET(RING_WRITE_SLOT, ringval);
+ int rdslot = FIELD_GET(RING_READ_SLOT, ringval);
+
+ if (wrslot != rdslot) {
+ return (wrslot + 4 - rdslot) % 4;
+ } else {
+ WARN_ON((ringval & (RING_FULL | RING_EMPTY)) == 0);
+
+ if (ringval & RING_FULL)
+ return 4;
+ else
+ return 0;
+ }
+}
+
+/*
+ * Read from hardware the residue of a cyclic dmaengine transaction.
+ */
+static u32 admac_cyclic_read_residue(struct admac_data *ad, int channo,
+ struct admac_tx *adtx)
+{
+ u32 ring1, ring2;
+ u32 residue1, residue2;
+ int nreports;
+ size_t pos;
+
+ ring1 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
+ residue1 = readl_relaxed(ad->base + REG_RESIDUE(channo));
+ ring2 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
+ residue2 = readl_relaxed(ad->base + REG_RESIDUE(channo));
+
+ if (residue2 > residue1) {
+ /*
+ * Controller must have loaded next descriptor between
+ * the two residue reads
+ */
+ nreports = admac_ring_noccupied_slots(ring1) + 1;
+ } else {
+ /* No descriptor load between the two reads, ring2 is safe to use */
+ nreports = admac_ring_noccupied_slots(ring2);
+ }
+
+ pos = adtx->reclaimed_pos + adtx->period_len * (nreports + 1) - residue2;
+
+ return adtx->buf_len - pos % adtx->buf_len;
+}
+
+static enum dma_status admac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_data *ad = adchan->host;
+ struct admac_tx *adtx;
+
+ enum dma_status ret;
+ size_t residue;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ adtx = adchan->current_tx;
+
+ if (adtx && adtx->tx.cookie == cookie) {
+ ret = DMA_IN_PROGRESS;
+ residue = admac_cyclic_read_residue(ad, adchan->no, adtx);
+ } else {
+ ret = DMA_IN_PROGRESS;
+ residue = 0;
+ list_for_each_entry(adtx, &adchan->issued, node) {
+ if (adtx->tx.cookie == cookie) {
+ residue = adtx->buf_len;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ dma_set_residue(txstate, residue);
+ return ret;
+}
+
+static void admac_start_chan(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ u32 startbit = 1 << (adchan->no / 2);
+
+ writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
+ ad->base + REG_CHAN_INTSTATUS(adchan->no, ad->irq_index));
+ writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
+ ad->base + REG_CHAN_INTMASK(adchan->no, ad->irq_index));
+
+ switch (admac_chan_direction(adchan->no)) {
+ case DMA_MEM_TO_DEV:
+ writel_relaxed(startbit, ad->base + REG_TX_START);
+ break;
+ case DMA_DEV_TO_MEM:
+ writel_relaxed(startbit, ad->base + REG_RX_START);
+ break;
+ default:
+ break;
+ }
+ dev_dbg(adchan->host->dev, "ch%d start\n", adchan->no);
+}
+
+static void admac_stop_chan(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ u32 stopbit = 1 << (adchan->no / 2);
+
+ switch (admac_chan_direction(adchan->no)) {
+ case DMA_MEM_TO_DEV:
+ writel_relaxed(stopbit, ad->base + REG_TX_STOP);
+ break;
+ case DMA_DEV_TO_MEM:
+ writel_relaxed(stopbit, ad->base + REG_RX_STOP);
+ break;
+ default:
+ break;
+ }
+ dev_dbg(adchan->host->dev, "ch%d stop\n", adchan->no);
+}
+
+static void admac_reset_rings(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+
+ writel_relaxed(REG_CHAN_CTL_RST_RINGS,
+ ad->base + REG_CHAN_CTL(adchan->no));
+ writel_relaxed(0, ad->base + REG_CHAN_CTL(adchan->no));
+}
+
+static void admac_start_current_tx(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ int ch = adchan->no;
+
+ admac_reset_rings(adchan);
+ writel_relaxed(0, ad->base + REG_CHAN_CTL(ch));
+
+ admac_cyclic_write_one_desc(ad, ch, adchan->current_tx);
+ admac_start_chan(adchan);
+ admac_cyclic_write_desc(ad, ch, adchan->current_tx);
+}
+
+static void admac_issue_pending(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_tx *tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ list_splice_tail_init(&adchan->submitted, &adchan->issued);
+ if (!list_empty(&adchan->issued) && !adchan->current_tx) {
+ tx = list_first_entry(&adchan->issued, struct admac_tx, node);
+ list_del(&tx->node);
+
+ adchan->current_tx = tx;
+ adchan->nperiod_acks = 0;
+ admac_start_current_tx(adchan);
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+}
+
+static int admac_pause(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ admac_stop_chan(adchan);
+
+ return 0;
+}
+
+static int admac_resume(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ admac_start_chan(adchan);
+
+ return 0;
+}
+
+static int admac_terminate_all(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ admac_stop_chan(adchan);
+ admac_reset_rings(adchan);
+
+ adchan->current_tx = NULL;
+ /*
+ * Descriptors can only be freed after the tasklet
+ * has been killed (in admac_synchronize).
+ */
+ list_splice_tail_init(&adchan->submitted, &adchan->to_free);
+ list_splice_tail_init(&adchan->issued, &adchan->to_free);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ return 0;
+}
+
+static void admac_synchronize(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_tx *adtx, *_adtx;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ list_splice_tail_init(&adchan->to_free, &head);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ tasklet_kill(&adchan->tasklet);
+
+ list_for_each_entry_safe(adtx, _adtx, &head, node) {
+ list_del(&adtx->node);
+ admac_desc_free(&adtx->tx);
+ }
+}
+
+static int admac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ dma_cookie_init(&adchan->chan);
+ return 0;
+}
+
+static void admac_free_chan_resources(struct dma_chan *chan)
+{
+ admac_terminate_all(chan);
+ admac_synchronize(chan);
+}
+
+static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct admac_data *ad = (struct admac_data *) ofdma->of_dma_data;
+ unsigned int index;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ index = dma_spec->args[0];
+
+ if (index >= ad->nchannels) {
+ dev_err(ad->dev, "channel index %u out of bounds\n", index);
+ return NULL;
+ }
+
+ return &ad->channels[index].chan;
+}
+
+static int admac_drain_reports(struct admac_data *ad, int channo)
+{
+ int count;
+
+ for (count = 0; count < 4; count++) {
+ u32 countval_hi, countval_lo, unk1, flags;
+
+ if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_EMPTY)
+ break;
+
+ countval_lo = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ countval_hi = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ unk1 = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ flags = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+
+ dev_dbg(ad->dev, "ch%d report: countval=0x%llx unk1=0x%x flags=0x%x\n",
+ channo, ((u64) countval_hi) << 32 | countval_lo, unk1, flags);
+ }
+
+ return count;
+}
+
+static void admac_handle_status_err(struct admac_data *ad, int channo)
+{
+ bool handled = false;
+
+ if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_ERR) {
+ writel_relaxed(RING_ERR, ad->base + REG_DESC_RING(channo));
+ dev_err_ratelimited(ad->dev, "ch%d descriptor ring error\n", channo);
+ handled = true;
+ }
+
+ if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_ERR) {
+ writel_relaxed(RING_ERR, ad->base + REG_REPORT_RING(channo));
+ dev_err_ratelimited(ad->dev, "ch%d report ring error\n", channo);
+ handled = true;
+ }
+
+ if (unlikely(!handled)) {
+ dev_err(ad->dev, "ch%d unknown error, masking errors as cause of IRQs\n", channo);
+ admac_modify(ad, REG_CHAN_INTMASK(channo, ad->irq_index),
+ STATUS_ERR, 0);
+ }
+}
+
+static void admac_handle_status_desc_done(struct admac_data *ad, int channo)
+{
+ struct admac_chan *adchan = &ad->channels[channo];
+ unsigned long flags;
+ int nreports;
+
+ writel_relaxed(STATUS_DESC_DONE,
+ ad->base + REG_CHAN_INTSTATUS(channo, ad->irq_index));
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ nreports = admac_drain_reports(ad, channo);
+
+ if (adchan->current_tx) {
+ struct admac_tx *tx = adchan->current_tx;
+
+ adchan->nperiod_acks += nreports;
+ tx->reclaimed_pos += nreports * tx->period_len;
+ tx->reclaimed_pos %= 2 * tx->buf_len;
+
+ admac_cyclic_write_desc(ad, channo, tx);
+ tasklet_schedule(&adchan->tasklet);
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+}
+
+static void admac_handle_chan_int(struct admac_data *ad, int no)
+{
+ u32 cause = readl_relaxed(ad->base + REG_CHAN_INTSTATUS(no, ad->irq_index));
+
+ if (cause & STATUS_ERR)
+ admac_handle_status_err(ad, no);
+
+ if (cause & STATUS_DESC_DONE)
+ admac_handle_status_desc_done(ad, no);
+}
+
+static irqreturn_t admac_interrupt(int irq, void *devid)
+{
+ struct admac_data *ad = devid;
+ u32 rx_intstate, tx_intstate;
+ int i;
+
+ rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
+ tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
+
+ if (!tx_intstate && !rx_intstate)
+ return IRQ_NONE;
+
+ for (i = 0; i < ad->nchannels; i += 2) {
+ if (tx_intstate & 1)
+ admac_handle_chan_int(ad, i);
+ tx_intstate >>= 1;
+ }
+
+ for (i = 1; i < ad->nchannels; i += 2) {
+ if (rx_intstate & 1)
+ admac_handle_chan_int(ad, i);
+ rx_intstate >>= 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void admac_chan_tasklet(struct tasklet_struct *t)
+{
+ struct admac_chan *adchan = from_tasklet(adchan, t, tasklet);
+ struct admac_tx *adtx;
+ struct dmaengine_desc_callback cb;
+ struct dmaengine_result tx_result;
+ int nacks;
+
+ spin_lock_irq(&adchan->lock);
+ adtx = adchan->current_tx;
+ nacks = adchan->nperiod_acks;
+ adchan->nperiod_acks = 0;
+ spin_unlock_irq(&adchan->lock);
+
+ if (!adtx || !nacks)
+ return;
+
+ tx_result.result = DMA_TRANS_NOERROR;
+ tx_result.residue = 0;
+
+ dmaengine_desc_get_callback(&adtx->tx, &cb);
+ while (nacks--)
+ dmaengine_desc_callback_invoke(&cb, &tx_result);
+}
+
+static int admac_device_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_data *ad = adchan->host;
+ bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
+ int wordsize = 0;
+ u32 bus_width = 0;
+
+ switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ wordsize = 1;
+ bus_width |= BUS_WIDTH_8BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ wordsize = 2;
+ bus_width |= BUS_WIDTH_16BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ wordsize = 4;
+ bus_width |= BUS_WIDTH_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * We take port_window_size to be the number of words in a frame.
+ *
+ * The controller has some means of out-of-band signalling, to the peripheral,
+ * of words position in a frame. That's where the importance of this control
+ * comes from.
+ */
+ switch (is_tx ? config->dst_port_window_size : config->src_port_window_size) {
+ case 0 ... 1:
+ break;
+ case 2:
+ bus_width |= BUS_WIDTH_FRAME_2_WORDS;
+ break;
+ case 4:
+ bus_width |= BUS_WIDTH_FRAME_4_WORDS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(bus_width, ad->base + REG_BUS_WIDTH(adchan->no));
+
+ /*
+ * By FIFOCTL_LIMIT we seem to set the maximal number of bytes allowed to be
+ * held in controller's per-channel FIFO. Transfers seem to be triggered
+ * around the time FIFO occupancy touches FIFOCTL_THRESHOLD.
+ *
+ * The numbers we set are more or less arbitrary.
+ */
+ writel_relaxed(FIELD_PREP(CHAN_FIFOCTL_LIMIT, 0x30 * wordsize)
+ | FIELD_PREP(CHAN_FIFOCTL_THRESHOLD, 0x18 * wordsize),
+ ad->base + REG_CHAN_FIFOCTL(adchan->no));
+
+ return 0;
+}
+
+static int admac_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct admac_data *ad;
+ struct dma_device *dma;
+ int nchannels;
+ int err, irq, i;
+
+ err = of_property_read_u32(np, "dma-channels", &nchannels);
+ if (err || nchannels > NCHANNELS_MAX) {
+ dev_err(&pdev->dev, "missing or invalid dma-channels property\n");
+ return -EINVAL;
+ }
+
+ ad = devm_kzalloc(&pdev->dev, struct_size(ad, channels, nchannels), GFP_KERNEL);
+ if (!ad)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ad);
+ ad->dev = &pdev->dev;
+ ad->nchannels = nchannels;
+
+ /*
+ * The controller has 4 IRQ outputs. Try them all until
+ * we find one we can use.
+ */
+ for (i = 0; i < IRQ_NOUTPUTS; i++) {
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq >= 0) {
+ ad->irq_index = i;
+ break;
+ }
+ }
+
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n");
+
+ err = devm_request_irq(&pdev->dev, irq, admac_interrupt,
+ 0, dev_name(&pdev->dev), ad);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "unable to register interrupt\n");
+
+ ad->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ad->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ad->base),
+ "unable to obtain MMIO resource\n");
+
+ dma = &ad->dma;
+
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+
+ dma->dev = &pdev->dev;
+ dma->device_alloc_chan_resources = admac_alloc_chan_resources;
+ dma->device_free_chan_resources = admac_free_chan_resources;
+ dma->device_tx_status = admac_tx_status;
+ dma->device_issue_pending = admac_issue_pending;
+ dma->device_terminate_all = admac_terminate_all;
+ dma->device_synchronize = admac_synchronize;
+ dma->device_prep_dma_cyclic = admac_prep_dma_cyclic;
+ dma->device_config = admac_device_config;
+ dma->device_pause = admac_pause;
+ dma->device_resume = admac_resume;
+
+ dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+
+ INIT_LIST_HEAD(&dma->channels);
+ for (i = 0; i < nchannels; i++) {
+ struct admac_chan *adchan = &ad->channels[i];
+
+ adchan->host = ad;
+ adchan->no = i;
+ adchan->chan.device = &ad->dma;
+ spin_lock_init(&adchan->lock);
+ INIT_LIST_HEAD(&adchan->submitted);
+ INIT_LIST_HEAD(&adchan->issued);
+ INIT_LIST_HEAD(&adchan->to_free);
+ list_add_tail(&adchan->chan.device_node, &dma->channels);
+ tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
+ }
+
+ err = dma_async_device_register(&ad->dma);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
+
+ err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad);
+ if (err) {
+ dma_async_device_unregister(&ad->dma);
+ return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
+ }
+
+ return 0;
+}
+
+static int admac_remove(struct platform_device *pdev)
+{
+ struct admac_data *ad = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&ad->dma);
+
+ return 0;
+}
+
+static const struct of_device_id admac_of_match[] = {
+ { .compatible = "apple,admac", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, admac_of_match);
+
+static struct platform_driver apple_admac_driver = {
+ .driver = {
+ .name = "apple-admac",
+ .of_match_table = admac_of_match,
+ },
+ .probe = admac_probe,
+ .remove = admac_remove,
+};
+module_platform_driver(apple_admac_driver);
+
+MODULE_AUTHOR("Martin Povišer <povik+lin@cutebit.org>");
+MODULE_DESCRIPTION("Driver for Audio DMA Controller (ADMAC) on Apple SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7b3e6030f7b4..b102d8eb5d83 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -649,7 +649,7 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
}
/*
- * Only check that maxburst and addr width values are supported by the
+ * Only check that maxburst and addr width values are supported by
* the controller but not that the configuration is good to perform the
* transfer since we don't know the direction at this stage.
*/
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 64239da02e74..064761289a73 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2017 Broadcom
/*
* Broadcom SBA RAID Driver
diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c
index e169f18da551..502a45d76adc 100644
--- a/drivers/dma/bestcomm/ata.c
+++ b/drivers/dma/bestcomm/ata.c
@@ -1,16 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Bestcomm ATA task driver
*
- *
* Patterned after bestcomm/fec.c by Dale Farnsworth <dfarnsworth@mvista.com>
* 2003-2004 (c) MontaVista, Software, Inc.
*
* Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2006 Freescale - John Rigby
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/kernel.h>
@@ -154,4 +150,3 @@ EXPORT_SYMBOL_GPL(bcom_ata_release);
MODULE_DESCRIPTION("BestComm ATA task driver");
MODULE_AUTHOR("John Rigby");
MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 1822a7034630..eabbcfcaa7cb 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -1,16 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for MPC52xx processor BestComm peripheral controller
*
- *
* Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2005 Varma Electronics Oy,
* ( by Andrey Volkov <avolkov@varma-el.com> )
* Copyright (C) 2003-2004 MontaVista, Software, Inc.
* ( by Dale Farnsworth <dfarnsworth@mvista.com> )
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -528,4 +524,3 @@ MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/dma/bestcomm/fec.c b/drivers/dma/bestcomm/fec.c
index d203618ac11f..3a4a2f7910c6 100644
--- a/drivers/dma/bestcomm/fec.c
+++ b/drivers/dma/bestcomm/fec.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Bestcomm FEC tasks driver
*
- *
* Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2003-2004 MontaVista, Software, Inc.
* ( by Dale Farnsworth <dfarnsworth@mvista.com> )
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/kernel.h>
@@ -267,4 +263,3 @@ EXPORT_SYMBOL_GPL(bcom_fec_tx_release);
MODULE_DESCRIPTION("BestComm FEC tasks driver");
MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
index 2074e0e3fa21..c465758e7193 100644
--- a/drivers/dma/bestcomm/sram.c
+++ b/drivers/dma/bestcomm/sram.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Simple memory allocator for on-board SRAM
*
- *
* Maintainer : Sylvain Munaut <tnt@246tNt.com>
*
* Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/err.h>
@@ -176,4 +172,3 @@ void bcom_sram_free(void *ptr)
spin_unlock(&bcom_sram->lock);
}
EXPORT_SYMBOL_GPL(bcom_sram_free);
-
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 5161b73c30c4..f30dabc99795 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -55,6 +56,9 @@
#define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
#define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
#define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
+#define AXI_DMAC_REG_COHERENCY_DESC 0x14
+#define AXI_DMAC_DST_COHERENT_MSK BIT(0)
+#define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x)
#define AXI_DMAC_REG_IRQ_MASK 0x80
#define AXI_DMAC_REG_IRQ_PENDING 0x84
@@ -979,6 +983,18 @@ static int axi_dmac_probe(struct platform_device *pdev)
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
+
+ if (version < ADI_AXI_PCORE_VER(4, 4, 'a') ||
+ !AXI_DMAC_DST_COHERENT_GET(ret)) {
+ dev_err(dmac->dma_dev.dev,
+ "Coherent DMA not supported in hardware");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+ }
+
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_clk_disable;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index e2ec540e6519..2a483802d9ee 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -388,7 +388,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
if (i != (sg_len - 1) &&
!(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
- /* Automatically proceeed to the next descriptor. */
+ /* Automatically proceed to the next descriptor. */
desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
/*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e80feeea0e01..c741b6431958 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1153,13 +1153,6 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
- if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMCPY_SG");
- return -EIO;
- }
-
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index f696246f57fd..9fe2ae794316 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -22,51 +22,50 @@
#include <linux/wait.h>
static unsigned int test_buf_size = 16384;
-module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
+module_param(test_buf_size, uint, 0644);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_device[32];
-module_param_string(device, test_device, sizeof(test_device),
- S_IRUGO | S_IWUSR);
+module_param_string(device, test_device, sizeof(test_device), 0644);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
-module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
+module_param(threads_per_chan, uint, 0644);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
-module_param(max_channels, uint, S_IRUGO | S_IWUSR);
+module_param(max_channels, uint, 0644);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
-module_param(iterations, uint, S_IRUGO | S_IWUSR);
+module_param(iterations, uint, 0644);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int dmatest;
-module_param(dmatest, uint, S_IRUGO | S_IWUSR);
+module_param(dmatest, uint, 0644);
MODULE_PARM_DESC(dmatest,
"dmatest 0-memcpy 1-memset (default: 0)");
static unsigned int xor_sources = 3;
-module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
+module_param(xor_sources, uint, 0644);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
-module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
+module_param(pq_sources, uint, 0644);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, int, S_IRUGO | S_IWUSR);
+module_param(timeout, int, 0644);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
static bool noverify;
-module_param(noverify, bool, S_IRUGO | S_IWUSR);
+module_param(noverify, bool, 0644);
MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
static bool norandom;
@@ -74,7 +73,7 @@ module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
static bool verbose;
-module_param(verbose, bool, S_IRUGO | S_IWUSR);
+module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
static int alignment = -1;
@@ -86,7 +85,7 @@ module_param(transfer_size, uint, 0644);
MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
static bool polled;
-module_param(polled, bool, S_IRUGO | S_IWUSR);
+module_param(polled, bool, 0644);
MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
/**
@@ -154,7 +153,7 @@ static const struct kernel_param_ops run_ops = {
.get = dmatest_run_get,
};
static bool dmatest_run;
-module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
+module_param_cb(run, &run_ops, &dmatest_run, 0644);
MODULE_PARM_DESC(run, "Run the test (default: false)");
static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
@@ -290,7 +289,7 @@ static const struct kernel_param_ops wait_ops = {
.get = dmatest_wait_get,
.set = param_set_bool,
};
-module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
+module_param_cb(wait, &wait_ops, &wait, 0444);
MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
static bool dmatest_match_channel(struct dmatest_params *params,
@@ -579,10 +578,10 @@ static int dmatest_func(void *data)
unsigned int total_tests = 0;
dma_cookie_t cookie;
enum dma_status status;
- enum dma_ctrl_flags flags;
+ enum dma_ctrl_flags flags;
u8 *pq_coefs = NULL;
int ret;
- unsigned int buf_size;
+ unsigned int buf_size;
struct dmatest_data *src;
struct dmatest_data *dst;
int i;
@@ -1095,8 +1094,8 @@ static void add_threaded_test(struct dmatest_info *info)
/* Copy test parameters */
params->buf_size = test_buf_size;
- strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
- strlcpy(params->device, strim(test_device), sizeof(params->device));
+ strscpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strscpy(params->device, strim(test_device), sizeof(params->device));
params->threads_per_chan = threads_per_chan;
params->max_channels = max_channels;
params->iterations = iterations;
@@ -1240,7 +1239,7 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
dtc = list_last_entry(&info->channels,
struct dmatest_chan,
node);
- strlcpy(chan_reset_val,
+ strscpy(chan_reset_val,
dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
ret = -EBUSY;
@@ -1263,14 +1262,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
&& (strcmp("", strim(test_channel)) != 0)) {
ret = -EINVAL;
- strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
+ strscpy(chan_reset_val, dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
goto add_chan_err;
}
} else {
/* Clear test_channel if no channels were added successfully */
- strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
+ strscpy(chan_reset_val, "", sizeof(chan_reset_val));
ret = -EBUSY;
goto add_chan_err;
}
@@ -1295,7 +1294,7 @@ static int dmatest_chan_get(char *val, const struct kernel_param *kp)
mutex_lock(&info->lock);
if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
stop_threaded_test(info);
- strlcpy(test_channel, "", sizeof(test_channel));
+ strscpy(test_channel, "", sizeof(test_channel));
}
mutex_unlock(&info->lock);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index c741da02b67e..a183d93bd7e2 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -982,6 +982,11 @@ static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
static void axi_chan_dump_lli(struct axi_dma_chan *chan,
struct axi_dma_hw_desc *desc)
{
+ if (!desc->lli) {
+ dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
+ return;
+ }
+
dev_err(dchan2dev(&chan->vc.chan),
"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
le64_to_cpu(desc->lli->sar),
@@ -1049,6 +1054,11 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* The completed descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
if (chan->cyclic) {
desc = vd_to_axi_desc(vd);
@@ -1078,6 +1088,7 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
axi_chan_start_first_queued(chan);
}
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 468d1097a1ec..07f756479663 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -64,8 +64,8 @@ static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
{
+ struct dw_edma_chip *chip = desc->chan->dw->chip;
struct dw_edma_chan *chan = desc->chan;
- struct dw_edma *dw = chan->chip->dw;
struct dw_edma_chunk *chunk;
chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
@@ -82,11 +82,11 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
*/
chunk->cb = !(desc->chunks_alloc % 2);
if (chan->dir == EDMA_DIR_WRITE) {
- chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
- chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
+ chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
+ chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
} else {
- chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
- chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
+ chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
+ chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
}
if (desc->chunk) {
@@ -339,21 +339,40 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (!chan->configured)
return NULL;
- switch (chan->config.direction) {
- case DMA_DEV_TO_MEM: /* local DMA */
- if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
- break;
- return NULL;
- case DMA_MEM_TO_DEV: /* local DMA */
- if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
- break;
- return NULL;
- default: /* remote DMA */
- if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
- break;
- if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
- break;
- return NULL;
+ /*
+ * Local Root Port/End-point Remote End-point
+ * +-----------------------+ PCIe bus +----------------------+
+ * | | +-+ | |
+ * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM |
+ * | | | | | |
+ * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV |
+ * | | +-+ | |
+ * +-----------------------+ +----------------------+
+ *
+ * 1. Normal logic:
+ * If eDMA is embedded into the DW PCIe RP/EP and controlled from the
+ * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
+ * for the device read operations (DEV_TO_MEM) and the Tx channel
+ * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
+ *
+ * 2. Inverted logic:
+ * If eDMA is embedded into a Remote PCIe EP and is controlled by the
+ * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
+ * channel (EDMA_DIR_WRITE) will be used for the device read operations
+ * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
+ * operations (MEM_TO_DEV).
+ *
+ * It is the client driver responsibility to choose a proper channel
+ * for the DMA transfers.
+ */
+ if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
+ (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
+ return NULL;
+ } else {
+ if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
+ (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
+ return NULL;
}
if (xfer->type == EDMA_XFER_CYCLIC) {
@@ -423,7 +442,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
- if (chan->dir == EDMA_DIR_WRITE) {
+ if (dir == DMA_DEV_TO_MEM) {
burst->sar = src_addr;
if (xfer->type == EDMA_XFER_CYCLIC) {
burst->dar = xfer->xfer.cyclic.paddr;
@@ -663,7 +682,7 @@ static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->status != EDMA_ST_IDLE)
return -EBUSY;
- pm_runtime_get(chan->chip->dev);
+ pm_runtime_get(chan->dw->chip->dev);
return 0;
}
@@ -685,15 +704,15 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
cpu_relax();
}
- pm_runtime_put(chan->chip->dev);
+ pm_runtime_put(chan->dw->chip->dev);
}
-static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
+static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
u32 wr_alloc, u32 rd_alloc)
{
+ struct dw_edma_chip *chip = dw->chip;
struct dw_edma_region *dt_region;
struct device *dev = chip->dev;
- struct dw_edma *dw = chip->dw;
struct dw_edma_chan *chan;
struct dw_edma_irq *irq;
struct dma_device *dma;
@@ -726,7 +745,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->vc.chan.private = dt_region;
- chan->chip = chip;
+ chan->dw = dw;
chan->id = j;
chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
chan->configured = false;
@@ -734,9 +753,9 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->status = EDMA_ST_IDLE;
if (write)
- chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
else
- chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
@@ -766,13 +785,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
vchan_init(&chan->vc, dma);
if (write) {
- dt_region->paddr = dw->dt_region_wr[j].paddr;
- dt_region->vaddr = dw->dt_region_wr[j].vaddr;
- dt_region->sz = dw->dt_region_wr[j].sz;
+ dt_region->paddr = chip->dt_region_wr[j].paddr;
+ dt_region->vaddr = chip->dt_region_wr[j].vaddr;
+ dt_region->sz = chip->dt_region_wr[j].sz;
} else {
- dt_region->paddr = dw->dt_region_rd[j].paddr;
- dt_region->vaddr = dw->dt_region_rd[j].vaddr;
- dt_region->sz = dw->dt_region_rd[j].sz;
+ dt_region->paddr = chip->dt_region_rd[j].paddr;
+ dt_region->vaddr = chip->dt_region_rd[j].vaddr;
+ dt_region->sz = chip->dt_region_rd[j].sz;
}
dw_edma_v0_core_device_config(chan);
@@ -826,11 +845,11 @@ static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
(*mask)++;
}
-static int dw_edma_irq_request(struct dw_edma_chip *chip,
+static int dw_edma_irq_request(struct dw_edma *dw,
u32 *wr_alloc, u32 *rd_alloc)
{
- struct device *dev = chip->dev;
- struct dw_edma *dw = chip->dw;
+ struct dw_edma_chip *chip = dw->chip;
+ struct device *dev = dw->chip->dev;
u32 wr_mask = 1;
u32 rd_mask = 1;
int i, err = 0;
@@ -839,12 +858,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
- if (dw->nr_irqs < 1)
+ if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
return -EINVAL;
- if (dw->nr_irqs == 1) {
+ dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
+ if (!dw->irq)
+ return -ENOMEM;
+
+ if (chip->nr_irqs == 1) {
/* Common IRQ shared among all channels */
- irq = dw->ops->irq_vector(dev, 0);
+ irq = chip->ops->irq_vector(dev, 0);
err = request_irq(irq, dw_edma_interrupt_common,
IRQF_SHARED, dw->name, &dw->irq[0]);
if (err) {
@@ -854,9 +877,11 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
if (irq_get_msi_desc(irq))
get_cached_msi_msg(irq, &dw->irq[0].msi);
+
+ dw->nr_irqs = 1;
} else {
/* Distribute IRQs equally among all channels */
- int tmp = dw->nr_irqs;
+ int tmp = chip->nr_irqs;
while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
@@ -867,7 +892,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
- irq = dw->ops->irq_vector(dev, i);
+ irq = chip->ops->irq_vector(dev, i);
err = request_irq(irq,
i < *wr_alloc ?
dw_edma_interrupt_write :
@@ -901,20 +926,22 @@ int dw_edma_probe(struct dw_edma_chip *chip)
return -EINVAL;
dev = chip->dev;
- if (!dev)
+ if (!dev || !chip->ops)
return -EINVAL;
- dw = chip->dw;
- if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
- return -EINVAL;
+ dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ dw->chip = chip;
raw_spin_lock_init(&dw->lock);
- dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
+ dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
- dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
+ dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
@@ -936,17 +963,17 @@ int dw_edma_probe(struct dw_edma_chip *chip)
dw_edma_v0_core_off(dw);
/* Request IRQs */
- err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
+ err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
if (err)
return err;
/* Setup write channels */
- err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
+ err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
/* Setup read channels */
- err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
+ err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
@@ -954,15 +981,15 @@ int dw_edma_probe(struct dw_edma_chip *chip)
pm_runtime_enable(dev);
/* Turn debugfs on */
- dw_edma_v0_core_debugfs_on(chip);
+ dw_edma_v0_core_debugfs_on(dw);
+
+ chip->dw = dw;
return 0;
err_irq_free:
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
-
- dw->nr_irqs = 0;
+ free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
return err;
}
@@ -980,7 +1007,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Free irqs */
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
+ free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
/* Power management */
pm_runtime_disable(dev);
@@ -1001,7 +1028,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
}
/* Turn debugfs off */
- dw_edma_v0_core_debugfs_off(chip);
+ dw_edma_v0_core_debugfs_off(dw);
return 0;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 60316d408c3e..85df2d511907 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -15,20 +15,12 @@
#include "../virt-dma.h"
#define EDMA_LL_SZ 24
-#define EDMA_MAX_WR_CH 8
-#define EDMA_MAX_RD_CH 8
enum dw_edma_dir {
EDMA_DIR_WRITE = 0,
EDMA_DIR_READ
};
-enum dw_edma_map_format {
- EDMA_MF_EDMA_LEGACY = 0x0,
- EDMA_MF_EDMA_UNROLL = 0x1,
- EDMA_MF_HDMA_COMPAT = 0x5
-};
-
enum dw_edma_request {
EDMA_REQ_NONE = 0,
EDMA_REQ_STOP,
@@ -57,12 +49,6 @@ struct dw_edma_burst {
u32 sz;
};
-struct dw_edma_region {
- phys_addr_t paddr;
- void __iomem *vaddr;
- size_t sz;
-};
-
struct dw_edma_chunk {
struct list_head list;
struct dw_edma_chan *chan;
@@ -87,7 +73,7 @@ struct dw_edma_desc {
struct dw_edma_chan {
struct virt_dma_chan vc;
- struct dw_edma_chip *chip;
+ struct dw_edma *dw;
int id;
enum dw_edma_dir dir;
@@ -109,10 +95,6 @@ struct dw_edma_irq {
struct dw_edma *dw;
};
-struct dw_edma_core_ops {
- int (*irq_vector)(struct device *dev, unsigned int nr);
-};
-
struct dw_edma {
char name[20];
@@ -122,21 +104,14 @@ struct dw_edma {
struct dma_device rd_edma;
u16 rd_ch_cnt;
- struct dw_edma_region rg_region; /* Registers */
- struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
- struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
- struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
- struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
-
struct dw_edma_irq *irq;
int nr_irqs;
- enum dw_edma_map_format mf;
-
struct dw_edma_chan *chan;
- const struct dw_edma_core_ops *ops;
raw_spinlock_t lock; /* Only for legacy */
+
+ struct dw_edma_chip *chip;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index cee7aa231d7b..d6b5e2463884 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -148,7 +148,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_pcie_data vsec_data;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
- struct dw_edma *dw;
int err, nr_irqs;
int i, mask;
@@ -197,10 +196,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
if (!chip)
return -ENOMEM;
- dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
- if (!dw)
- return -ENOMEM;
-
/* IRQs allocation */
nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
@@ -211,29 +206,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
}
/* Data structure initialization */
- chip->dw = dw;
chip->dev = dev;
chip->id = pdev->devfn;
- chip->irq = pdev->irq;
- dw->mf = vsec_data.mf;
- dw->nr_irqs = nr_irqs;
- dw->ops = &dw_edma_pcie_core_ops;
- dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
- dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
+ chip->mf = vsec_data.mf;
+ chip->nr_irqs = nr_irqs;
+ chip->ops = &dw_edma_pcie_core_ops;
- dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
- if (!dw->rg_region.vaddr)
- return -ENOMEM;
+ chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
+ chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
- dw->rg_region.vaddr += vsec_data.rg.off;
- dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
- dw->rg_region.paddr += vsec_data.rg.off;
- dw->rg_region.sz = vsec_data.rg.sz;
+ chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ if (!chip->reg_base)
+ return -ENOMEM;
- for (i = 0; i < dw->wr_ch_cnt; i++) {
- struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
- struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
+ for (i = 0; i < chip->ll_wr_cnt; i++) {
+ struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
+ struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
@@ -256,9 +245,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
dt_region->sz = dt_block->sz;
}
- for (i = 0; i < dw->rd_ch_cnt; i++) {
- struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
- struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
+ for (i = 0; i < chip->ll_rd_cnt; i++) {
+ struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
+ struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
@@ -282,45 +271,45 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
}
/* Debug info */
- if (dw->mf == EDMA_MF_EDMA_LEGACY)
- pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
- else if (dw->mf == EDMA_MF_EDMA_UNROLL)
- pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
- else if (dw->mf == EDMA_MF_HDMA_COMPAT)
- pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
+ if (chip->mf == EDMA_MF_EDMA_LEGACY)
+ pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_EDMA_UNROLL)
+ pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_HDMA_COMPAT)
+ pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
else
- pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
+ pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
- pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
- dw->rg_region.vaddr, &dw->rg_region.paddr);
+ chip->reg_base);
- for (i = 0; i < dw->wr_ch_cnt; i++) {
+ for (i = 0; i < chip->ll_wr_cnt; i++) {
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_wr[i].bar,
- vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
- dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
+ vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+ chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_wr[i].bar,
- vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
- dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
+ vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+ chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
}
- for (i = 0; i < dw->rd_ch_cnt; i++) {
+ for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_rd[i].bar,
- vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
- dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
+ vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+ chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_rd[i].bar,
- vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
- dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
+ vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+ chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
}
- pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
+ pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);
/* Validating if PCI interrupts were enabled */
if (!pci_dev_msi_enabled(pdev)) {
@@ -328,10 +317,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -EPERM;
}
- dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
- if (!dw->irq)
- return -ENOMEM;
-
/* Starting eDMA driver */
err = dw_edma_probe(chip);
if (err) {
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 33bc1e6c4cf2..77e6cfe52e0a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
- return dw->rg_region.vaddr;
+ return dw->chip->reg_base;
}
#define SET_32(dw, name, value) \
@@ -96,7 +96,7 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
{
- if (dw->mf == EDMA_MF_EDMA_LEGACY)
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
return &(__dw_regs(dw)->type.legacy.ch);
if (dir == EDMA_DIR_WRITE)
@@ -108,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u32 value, void __iomem *addr)
{
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -133,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{
u32 value;
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -169,7 +169,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u64 value, void __iomem *addr)
{
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -194,7 +194,7 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{
u32 value;
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -256,7 +256,7 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
u32 tmp;
tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
@@ -272,7 +272,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
SET_RW_32(dw, chan->dir, int_clear,
FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
@@ -280,7 +280,7 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
SET_RW_32(dw, chan->dir, int_clear,
FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
@@ -301,6 +301,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
+ struct dw_edma_chan *chan = chunk->chan;
struct dw_edma_v0_lli __iomem *lli;
struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
@@ -314,9 +315,11 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
j = chunk->bursts_alloc;
list_for_each_entry(child, &chunk->burst->list, list) {
j--;
- if (!j)
- control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
-
+ if (!j) {
+ control |= DW_EDMA_V0_LIE;
+ if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ control |= DW_EDMA_V0_RIE;
+ }
/* Channel control */
SET_LL_32(&lli[i].control, control);
/* Transfer size */
@@ -357,7 +360,7 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
u32 tmp;
dw_edma_v0_core_write_chunk(chunk);
@@ -365,7 +368,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
if (first) {
/* Enable engine */
SET_RW_32(dw, chan->dir, engine_en, BIT(0));
- if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
switch (chan->id) {
case 0:
SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
@@ -414,19 +417,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
-
- #ifdef CONFIG_64BIT
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
- #else /* CONFIG_64BIT */
- SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH_32(dw, chan->dir, chan->id, llp.msb,
- upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
}
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
@@ -435,7 +430,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
u32 tmp = 0;
/* MSI done addr - low, high */
@@ -505,12 +500,12 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
}
/* eDMA debugfs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
{
- dw_edma_v0_debugfs_on(chip);
+ dw_edma_v0_debugfs_on(dw);
}
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
{
- dw_edma_v0_debugfs_off(chip);
+ dw_edma_v0_debugfs_off(dw);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index 2afa626b8300..75aec6d31b21 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -22,7 +22,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir)
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 4b3bcffd15ef..5226c9014703 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -54,7 +54,7 @@ struct debugfs_entries {
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
void __iomem *reg = (void __force __iomem *)data;
- if (dw->mf == EDMA_MF_EDMA_LEGACY &&
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
reg >= (void __iomem *)&regs->type.legacy.ch) {
void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
@@ -173,7 +173,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -242,7 +242,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -282,13 +282,13 @@ static void dw_edma_debugfs_regs(void)
dw_edma_debugfs_regs_rd(regs_dir);
}
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
{
- dw = chip->dw;
+ dw = _dw;
if (!dw)
return;
- regs = dw->rg_region.vaddr;
+ regs = dw->chip->reg_base;
if (!regs)
return;
@@ -296,16 +296,16 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!dw->debugfs)
return;
- debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf);
+ debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
dw_edma_debugfs_regs();
}
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
{
- dw = chip->dw;
+ dw = _dw;
if (!dw)
return;
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
index d0ff25a9ea5c..3391b86edf5a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -12,14 +12,14 @@
#include <linux/dma/edma.h>
#ifdef CONFIG_DEBUG_FS
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_debugfs_off(struct dw_edma *dw);
#else
-static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
}
-static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
{
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7ab83fe601ed..97ba3bfc10b1 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -29,9 +29,6 @@
* (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
* of which use ARM any more). See the "Databook" from Synopsys for
* information beyond what licensees probably provide.
- *
- * The driver has been tested with the Atmel AT32AP7000, which does not
- * support descriptor writeback.
*/
/* The set of bus widths supported by the DMA controller */
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index 11d254e450b0..f9912c3dd4d7 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -102,10 +102,12 @@ free_map:
return ERR_PTR(ret);
}
+#ifdef CONFIG_OF
static const struct of_device_id rzn1_dmac_match[] = {
{ .compatible = "renesas,rzn1-dma" },
{}
};
+#endif
static int rzn1_dmamux_probe(struct platform_device *pdev)
{
@@ -140,6 +142,7 @@ static const struct of_device_id rzn1_dmamux_match[] = {
{ .compatible = "renesas,rzn1-dmamux" },
{}
};
+MODULE_DEVICE_TABLE(of, rzn1_dmamux_match);
static struct platform_driver rzn1_dmamux_driver = {
.driver = {
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 971ff5f9ae84..d19ea885c63e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1183,7 +1183,7 @@ fail:
*
* Synchronizes the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory associated
+ * descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 3ae05d1446a5..a06a1575a2a5 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -559,9 +559,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
}
for_each_sg(sgl, sg, sg_len, i) {
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = fsl_chan->dma_dev_addr;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 3bffe3ecbd1b..65c6094ce063 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1047,7 +1047,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
return -ENOMEM;
imxdma->dev = &pdev->dev;
- imxdma->devtype = (enum imx_dma_type)of_device_get_match_data(&pdev->dev);
+ imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imxdma->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f37a276f519e..fbea5f62dd98 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -183,12 +183,14 @@
BIT(DMA_DEV_TO_DEV))
#define SDMA_WATERMARK_LEVEL_N_FIFOS GENMASK(15, 12)
+#define SDMA_WATERMARK_LEVEL_OFF_FIFOS GENMASK(19, 16)
+#define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO GENMASK(31, 28)
#define SDMA_WATERMARK_LEVEL_SW_DONE BIT(23)
#define SDMA_DONE0_CONFIG_DONE_SEL BIT(7)
#define SDMA_DONE0_CONFIG_DONE_DIS BIT(6)
-/**
+/*
* struct sdma_script_start_addrs - SDMA script start pointers
*
* start addresses of the different functions in the physical
@@ -424,6 +426,14 @@ struct sdma_desc {
* @data: specific sdma interface structure
* @bd_pool: dma_pool for bd
* @terminate_worker: used to call back into terminate work function
+ * @terminated: terminated list
+ * @is_ram_script: flag for script in ram
+ * @n_fifos_src: number of source device fifos
+ * @n_fifos_dst: number of destination device fifos
+ * @sw_done: software done flag
+ * @stride_fifos_src: stride for source device FIFOs
+ * @stride_fifos_dst: stride for destination device FIFOs
+ * @words_per_fifo: copy number of words one time for one FIFO
*/
struct sdma_channel {
struct virt_dma_chan vc;
@@ -451,6 +461,9 @@ struct sdma_channel {
bool is_ram_script;
unsigned int n_fifos_src;
unsigned int n_fifos_dst;
+ unsigned int stride_fifos_src;
+ unsigned int stride_fifos_dst;
+ unsigned int words_per_fifo;
bool sw_done;
};
@@ -1240,17 +1253,29 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
{
unsigned int n_fifos;
+ unsigned int stride_fifos;
+ unsigned int words_per_fifo;
if (sdmac->sw_done)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
- if (sdmac->direction == DMA_DEV_TO_MEM)
+ if (sdmac->direction == DMA_DEV_TO_MEM) {
n_fifos = sdmac->n_fifos_src;
- else
+ stride_fifos = sdmac->stride_fifos_src;
+ } else {
n_fifos = sdmac->n_fifos_dst;
+ stride_fifos = sdmac->stride_fifos_dst;
+ }
+
+ words_per_fifo = sdmac->words_per_fifo;
sdmac->watermark_level |=
FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
+ sdmac->watermark_level |=
+ FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
+ if (words_per_fifo)
+ sdmac->watermark_level |=
+ FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
}
static int sdma_config_channel(struct dma_chan *chan)
@@ -1764,6 +1789,9 @@ static int sdma_config(struct dma_chan *chan,
}
sdmac->n_fifos_src = sdmacfg->n_fifos_src;
sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
+ sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
+ sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
+ sdmac->words_per_fifo = sdmacfg->words_per_fifo;
sdmac->sw_done = sdmacfg->sw_done;
}
@@ -2183,8 +2211,8 @@ static int sdma_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
- ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
- sdma);
+ ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
+ dev_name(&pdev->dev), sdma);
if (ret)
goto err_irq;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index f8847c48ba03..9ae92b8940ef 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -373,7 +373,7 @@ static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
/*
* free child CVD after completion.
- * the parent CVD would be freeed with desc_free by user.
+ * the parent CVD would be freed with desc_free by user.
*/
if (cvd->parent != cvd)
kfree(cvd);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 9ebd9231f62f..f7717c44b887 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -138,7 +138,7 @@ struct mtk_hsdma_vdesc {
/**
* struct mtk_hsdma_cb - This is the struct holding extra info required for RX
- * ring to know what relevant VD the the PD is being
+ * ring to know what relevant VD the PD is being
* mapped to.
* @vd: Pointer to the relevant VD.
* @flag: Flag indicating what action should be taken when VD
@@ -761,7 +761,7 @@ static void mtk_hsdma_free_active_desc(struct dma_chan *c)
/*
* Once issue_synchronize is being set, which means once the hardware
* consumes all descriptors for the channel in the ring, the
- * synchronization must be be notified immediately it is completed.
+ * synchronization must be notified immediately it is completed.
*/
spin_lock(&hvc->vc.lock);
if (!list_empty(&hvc->desc_hw_processing)) {
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 74755093e14b..7459382a8353 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* MOXA ART SoCs DMA Engine support.
*
* Copyright (C) 2013 Jonas Jensen
*
* Jonas Jensen <jonas.jensen@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f10b29034da1..f629ef6fd3c2 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -313,7 +313,7 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
- /* assign coookie */
+ /* assign cookie */
spin_lock_bh(&xor_dev->lock);
cookie = dma_cookie_assign(tx);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 1f0bbaed4643..95a462a1f511 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -193,7 +193,7 @@ struct owl_dma_pchan {
/**
* struct owl_dma_pchan - Wrapper for DMA ENGINE channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
* @cfg: slave configuration for this channel
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h
index 26b7a5ed9ac7..f8a5d7c1fb40 100644
--- a/drivers/dma/ppc4xx/adma.h
+++ b/drivers/dma/ppc4xx/adma.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* 2006-2009 (C) DENX Software Engineering.
*
* Author: Yuri Tikhonov <yur@emcraft.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of
- * any kind, whether express or implied.
*/
#ifndef _PPC440SPE_ADMA_H
diff --git a/drivers/dma/ppc4xx/dma.h b/drivers/dma/ppc4xx/dma.h
index bcde2df2f373..1ff4be23db0f 100644
--- a/drivers/dma/ppc4xx/dma.h
+++ b/drivers/dma/ppc4xx/dma.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* 440SPe's DMA engines support header file
*
* 2006-2009 (C) DENX Software Engineering.
*
* Author: Yuri Tikhonov <yur@emcraft.com>
- *
- * This file is licensed under the term of the GNU General Public License
- * version 2. The program licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#ifndef _PPC440SPE_DMA_H
diff --git a/drivers/dma/ppc4xx/xor.h b/drivers/dma/ppc4xx/xor.h
index daed7384daac..da1230df2817 100644
--- a/drivers/dma/ppc4xx/xor.h
+++ b/drivers/dma/ppc4xx/xor.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* 440SPe's XOR engines support header file
*
* 2006-2009 (C) DENX Software Engineering.
*
* Author: Yuri Tikhonov <yur@emcraft.com>
- *
- * This file is licensed under the term of the GNU General Public License
- * version 2. The program licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#ifndef _PPC440SPE_XOR_H
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 8e14c72d03f0..f6ed7e889781 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -202,7 +202,7 @@ struct s3c24xx_dma_phy {
* struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
* @id: the id of the channel
* @name: name of the channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @phy: the physical channel utilized by this channel, if there is one
* @runtime_addr: address for RX/TX according to the runtime config
* @at: active transaction on this channel
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index db5a4ef76077..4f8b8498c5c6 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -52,16 +52,6 @@ static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->lock, flags);
-
- if (chan->desc && !chan->desc->in_use) {
- spin_unlock_irqrestore(&chan->lock, flags);
- return chan->desc;
- }
-
- spin_unlock_irqrestore(&chan->lock, flags);
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
@@ -111,7 +101,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
spin_lock_irqsave(&chan->vchan.lock, iflags);
- chan->desc = desc;
sf_pdma_fill_desc(desc, dest, src, len);
spin_unlock_irqrestore(&chan->vchan.lock, iflags);
@@ -170,11 +159,17 @@ static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
unsigned long flags;
u64 residue = 0;
struct sf_pdma_desc *desc;
- struct dma_async_tx_descriptor *tx;
+ struct dma_async_tx_descriptor *tx = NULL;
spin_lock_irqsave(&chan->vchan.lock, flags);
- tx = &chan->desc->vdesc.tx;
+ list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
+ if (vd->tx.cookie == cookie)
+ tx = &vd->tx;
+
+ if (!tx)
+ goto out;
+
if (cookie == tx->chan->completed_cookie)
goto out;
@@ -241,6 +236,19 @@ static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
writel(v, regs->ctrl);
}
+static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
+{
+ struct virt_dma_chan *vchan = &chan->vchan;
+ struct virt_dma_desc *vdesc;
+
+ if (list_empty(&vchan->desc_issued))
+ return NULL;
+
+ vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
+
+ return container_of(vdesc, struct sf_pdma_desc, vdesc);
+}
+
static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc = chan->desc;
@@ -268,8 +276,11 @@ static void sf_pdma_issue_pending(struct dma_chan *dchan)
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (vchan_issue_pending(&chan->vchan) && chan->desc)
+ if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
+ /* vchan_issue_pending has made a check that desc in not NULL */
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
sf_pdma_xfer_desc(chan);
+ }
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
@@ -298,6 +309,11 @@ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
spin_lock_irqsave(&chan->vchan.lock, flags);
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
+
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
+ if (chan->desc)
+ sf_pdma_xfer_desc(chan);
+
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index ee2872e7d64c..476847a4916b 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -12,6 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -630,6 +631,21 @@ static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
*/
}
+static void rz_dmac_device_synchronize(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ u32 chstat;
+ int ret;
+
+ ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN),
+ 100, 100000, false, channel, CHSTAT, 1);
+ if (ret < 0)
+ dev_warn(dmac->dev, "DMA Timeout");
+
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+}
+
/*
* -----------------------------------------------------------------------------
* IRQ handling
@@ -909,6 +925,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
engine->device_config = rz_dmac_config;
engine->device_terminate_all = rz_dmac_terminate_all;
engine->device_issue_pending = rz_dmac_issue_pending;
+ engine->device_synchronize = rz_dmac_device_synchronize;
engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
dma_set_max_seg_size(engine->dev, U32_MAX);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 2138b80435ab..474d3ba8ec9f 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1237,11 +1237,8 @@ static int sprd_dma_remove(struct platform_device *pdev)
{
struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
struct sprd_dma_chn *c, *cn;
- int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- return ret;
+ pm_runtime_get_sync(&pdev->dev);
/* explicitly free the irq */
if (sdev->irq > 0)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index e1827393143f..f093e08c23b1 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1970,7 +1970,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = dma40_memcpy_conf_phy;
- /* Generate interrrupt at end of transfer or relink. */
+ /* Generate interrupt at end of transfer or relink. */
d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
/* Generate interrupt on error. */
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index caf0cce8f528..b11927ed4367 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1328,12 +1328,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
return IRQ_NONE;
}
id = __ffs(status);
-
chan = &dmadev->chan[id];
- if (!chan) {
- dev_warn(mdma2dev(dmadev), "MDMA channel not initialized\n");
- return IRQ_NONE;
- }
/* Handle interrupt for the channel */
spin_lock(&chan->vchan.lock);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 93f1645ae928..f291b1b4db32 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -7,6 +7,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
@@ -122,6 +123,15 @@
SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
+/*
+ * Normal DMA supports individual transfers (segments) up to 128k.
+ * Dedicated DMA supports transfers up to 16M. We can only report
+ * one size limit, so we have to use the smaller value.
+ */
+#define SUN4I_NDMA_MAX_SEG_SIZE SZ_128K
+#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
+#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
+
struct sun4i_dma_pchan {
/* Register base of channel */
void __iomem *base;
@@ -155,7 +165,8 @@ struct sun4i_dma_contract {
struct virt_dma_desc vd;
struct list_head demands;
struct list_head completed_demands;
- int is_cyclic;
+ bool is_cyclic : 1;
+ bool use_half_int : 1;
};
struct sun4i_dma_dev {
@@ -372,7 +383,7 @@ static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
if (promise) {
vchan->contract = contract;
vchan->pchan = pchan;
- set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
+ set_pchan_interrupt(priv, pchan, contract->use_half_int, 1);
configure_pchan(pchan, promise);
}
@@ -735,12 +746,21 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
*
* Which requires half the engine programming for the same
* functionality.
+ *
+ * This only works if two periods fit in a single promise. That will
+ * always be the case for dedicated DMA, where the hardware has a much
+ * larger maximum transfer size than advertised to clients.
*/
- nr_periods = DIV_ROUND_UP(len / period_len, 2);
+ if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) {
+ period_len *= 2;
+ contract->use_half_int = 1;
+ }
+
+ nr_periods = DIV_ROUND_UP(len, period_len);
for (i = 0; i < nr_periods; i++) {
/* Calculate the offset in the buffer and the length needed */
- offset = i * period_len * 2;
- plength = min((len - offset), (period_len * 2));
+ offset = i * period_len;
+ plength = min((len - offset), period_len);
if (dir == DMA_MEM_TO_DEV)
src = buf + offset;
else
@@ -1149,6 +1169,8 @@ static int sun4i_dma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
spin_lock_init(&priv->lock);
+ dma_set_max_seg_size(&pdev->dev, SUN4I_DMA_MAX_SEG_SIZE);
+
dma_cap_zero(priv->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 05cd451f541d..fa9bda4a2bc6 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -157,8 +157,8 @@
* If any burst is in flight and DMA paused then this is the time to complete
* on-flight burst and update DMA status register.
*/
-#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20
-#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100
+#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 10
+#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */
/* Channel base address offset from GPCDMA base address */
#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000
@@ -432,6 +432,17 @@ static int tegra_dma_device_resume(struct dma_chan *dc)
return 0;
}
+static inline int tegra_dma_pause_noerr(struct tegra_dma_channel *tdc)
+{
+ /* Return 0 irrespective of PAUSE status.
+ * This is useful to recover channels that can exit out of flush
+ * state when the channel is disabled.
+ */
+
+ tegra_dma_pause(tdc);
+ return 0;
+}
+
static void tegra_dma_disable(struct tegra_dma_channel *tdc)
{
u32 csr, status;
@@ -1292,6 +1303,14 @@ static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
.terminate = tegra_dma_pause,
};
+static const struct tegra_dma_chip_data tegra234_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = true,
+ .terminate = tegra_dma_pause_noerr,
+};
+
static const struct of_device_id tegra_dma_of_match[] = {
{
.compatible = "nvidia,tegra186-gpcdma",
@@ -1300,6 +1319,9 @@ static const struct of_device_id tegra_dma_of_match[] = {
.compatible = "nvidia,tegra194-gpcdma",
.data = &tegra194_dma_chip_data,
}, {
+ .compatible = "nvidia,tegra234-gpcdma",
+ .data = &tegra234_dma_chip_data,
+ }, {
},
};
MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 3ea8ef7f57df..4cbca80ee16e 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI EDMA DMA engine driver
*
* Copyright 2012 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
index 4c4172a4d271..a488c2250623 100644
--- a/drivers/dma/ti/k3-psil-j721s2.c
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -112,6 +112,11 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0x4a40, 0),
+ PSIL_SA2UL(0x4a41, 0),
+ PSIL_SA2UL(0x4a42, 0),
+ PSIL_SA2UL(0x4a43, 0),
/* CPSW0 */
PSIL_ETHERNET(0x7000),
/* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
@@ -144,6 +149,9 @@ static struct psil_ep j721s2_src_ep_map[] = {
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep j721s2_dst_ep_map[] = {
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0xca40, 1),
+ PSIL_SA2UL(0xca41, 1),
/* CPSW0 */
PSIL_ETHERNET(0xf000),
PSIL_ETHERNET(0xf001),
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index cd62bbb50e8b..6276934d4d2b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2128,126 +2128,6 @@ error:
}
/**
- * xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction
- * @dchan: DMA channel
- * @dst_sg: Destination scatter list
- * @dst_sg_len: Number of entries in destination scatter list
- * @src_sg: Source scatter list
- * @src_sg_len: Number of entries in source scatter list
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg(
- struct dma_chan *dchan, struct scatterlist *dst_sg,
- unsigned int dst_sg_len, struct scatterlist *src_sg,
- unsigned int src_sg_len, unsigned long flags)
-{
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_cdma_tx_segment *segment, *prev = NULL;
- struct xilinx_cdma_desc_hw *hw;
- size_t len, dst_avail, src_avail;
- dma_addr_t dma_dst, dma_src;
-
- if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
- return NULL;
-
- if (unlikely(!dst_sg || !src_sg))
- return NULL;
-
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
-
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
-
- dst_avail = sg_dma_len(dst_sg);
- src_avail = sg_dma_len(src_sg);
- /*
- * loop until there is either no more source or no more destination
- * scatterlist entry
- */
- while (true) {
- len = min_t(size_t, src_avail, dst_avail);
- len = min_t(size_t, len, chan->xdev->max_buffer_len);
- if (len == 0)
- goto fetch;
-
- /* Allocate the link descriptor from DMA pool */
- segment = xilinx_cdma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
-
- dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
- dst_avail;
- dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
- src_avail;
- hw = &segment->hw;
- hw->control = len;
- hw->src_addr = dma_src;
- hw->dest_addr = dma_dst;
- if (chan->ext_addr) {
- hw->src_addr_msb = upper_32_bits(dma_src);
- hw->dest_addr_msb = upper_32_bits(dma_dst);
- }
-
- if (prev) {
- prev->hw.next_desc = segment->phys;
- if (chan->ext_addr)
- prev->hw.next_desc_msb =
- upper_32_bits(segment->phys);
- }
-
- prev = segment;
- dst_avail -= len;
- src_avail -= len;
- list_add_tail(&segment->node, &desc->segments);
-
-fetch:
- /* Fetch the next dst scatterlist entry */
- if (dst_avail == 0) {
- if (dst_sg_len == 0)
- break;
- dst_sg = sg_next(dst_sg);
- if (dst_sg == NULL)
- break;
- dst_sg_len--;
- dst_avail = sg_dma_len(dst_sg);
- }
- /* Fetch the next src scatterlist entry */
- if (src_avail == 0) {
- if (src_sg_len == 0)
- break;
- src_sg = sg_next(src_sg);
- if (src_sg == NULL)
- break;
- src_sg_len--;
- src_avail = sg_dma_len(src_sg);
- }
- }
-
- if (list_empty(&desc->segments)) {
- dev_err(chan->xdev->dev,
- "%s: Zero-size SG transfer requested\n", __func__);
- goto error;
- }
-
- /* Link the last hardware descriptor with the first. */
- segment = list_first_entry(&desc->segments,
- struct xilinx_cdma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
-
- return &desc->async_tx;
-
-error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
-}
-
-/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -3240,9 +3120,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
- dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
- xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
/* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b0f4948b00a5..84dc5240a807 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -376,7 +376,7 @@ static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
if (ret < 0)
goto done;
} else {
- strlcpy(kern_buff, "No testcase executed",
+ strscpy(kern_buff, "No testcase executed",
XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
}
@@ -1652,10 +1652,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
dpdma_hw_init(xdev);
xdev->irq = platform_get_irq(pdev, 0);
- if (xdev->irq < 0) {
- dev_err(xdev->dev, "failed to get platform irq\n");
+ if (xdev->irq < 0)
return xdev->irq;
- }
ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
dev_name(xdev->dev), xdev);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index d3e2477948c8..17562cf1fe97 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -263,6 +263,7 @@ config EDAC_I10NM
config EDAC_PND2
tristate "Intel Pondicherry2"
depends on PCI && X86_64 && X86_MCE_INTEL
+ select P2SB if X86
help
Support for error detection and correction on the Intel
Pondicherry2 Integrated Memory Controller. This SoC IP is
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 2205d7e731db..64c142aecca7 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* EDAC PCI component
*
* Author: Dave Jiang <djiang@mvista.com>
*
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
+ * 2007 (c) MontaVista Software, Inc.
*/
#include <asm/page.h>
#include <linux/uaccess.h>
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index 6d8ea226010d..ac2102b25706 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Freescale Memory Controller kernel module
*
@@ -9,10 +10,7 @@
*
* Author: Dave Jiang <djiang@mvista.com>
*
- * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * 2006-2007 (c) MontaVista Software, Inc.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/edac/fsl_ddr_edac.h b/drivers/edac/fsl_ddr_edac.h
index 589b9b4a5e8a..332439d7b2d9 100644
--- a/drivers/edac/fsl_ddr_edac.h
+++ b/drivers/edac/fsl_ddr_edac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Freescale Memory Controller kernel module
*
@@ -7,11 +8,7 @@
*
* Author: Dave Jiang <djiang@mvista.com>
*
- * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
+ * 2006-2007 (c) MontaVista Software, Inc.
*/
#ifndef _FSL_DDR_EDAC_H_
#define _FSL_DDR_EDAC_H_
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 59b0bedc9c24..c8fa7dcfdbd0 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -103,9 +103,14 @@ static void dimm_setup_label(struct dimm_info *dimm, u16 handle)
dmi_memdev_name(handle, &bank, &device);
- /* both strings must be non-zero */
- if (bank && *bank && device && *device)
- snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device);
+ /*
+ * Set to a NULL string when both bank and device are zero. In this case,
+ * the label assigned by default will be preserved.
+ */
+ snprintf(dimm->label, sizeof(dimm->label), "%s%s%s",
+ (bank && *bank) ? bank : "",
+ (bank && *bank && device && *device) ? " " : "",
+ (device && *device) ? device : "");
}
static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry)
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 5bf92298554d..e50d7928bf8f 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -24,6 +24,8 @@
#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include "edac_module.h"
#include "mpc85xx_edac.h"
#include "fsl_ddr_edac.h"
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 3f6fb16ad34f..66a046ae33ee 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Freescale MPC85xx Memory Controller kernel module
* Author: Dave Jiang <djiang@mvista.com>
*
- * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
+ * 2006-2007 (c) MontaVista Software, Inc.
*/
#ifndef _MPC85XX_EDAC_H_
#define _MPC85XX_EDAC_H_
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index c94ca1f790c4..a20b299f1202 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -28,6 +28,8 @@
#include <linux/bitmap.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
+#include <linux/platform_data/x86/p2sb.h>
+
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
@@ -232,42 +234,14 @@ static u64 get_mem_ctrl_hub_base_addr(void)
return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
}
-static u64 get_sideband_reg_base_addr(void)
-{
- struct pci_dev *pdev;
- u32 hi, lo;
- u8 hidden;
-
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
- if (pdev) {
- /* Unhide the P2SB device, if it's hidden */
- pci_read_config_byte(pdev, 0xe1, &hidden);
- if (hidden)
- pci_write_config_byte(pdev, 0xe1, 0);
-
- pci_read_config_dword(pdev, 0x10, &lo);
- pci_read_config_dword(pdev, 0x14, &hi);
- lo &= 0xfffffff0;
-
- /* Hide the P2SB device, if it was hidden before */
- if (hidden)
- pci_write_config_byte(pdev, 0xe1, hidden);
-
- pci_dev_put(pdev);
- return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
- } else {
- return 0xfd000000;
- }
-}
-
#define DNV_MCHBAR_SIZE 0x8000
#define DNV_SB_PORT_SIZE 0x10000
static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
struct pci_dev *pdev;
- char *base;
- u64 addr;
- unsigned long size;
+ void __iomem *base;
+ struct resource r;
+ int ret;
if (op == 4) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
@@ -279,26 +253,30 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
} else {
/* MMIO via memory controller hub base address */
if (op == 0 && port == 0x4c) {
- addr = get_mem_ctrl_hub_base_addr();
- if (!addr)
+ memset(&r, 0, sizeof(r));
+
+ r.start = get_mem_ctrl_hub_base_addr();
+ if (!r.start)
return -ENODEV;
- size = DNV_MCHBAR_SIZE;
+ r.end = r.start + DNV_MCHBAR_SIZE - 1;
} else {
/* MMIO via sideband register base address */
- addr = get_sideband_reg_base_addr();
- if (!addr)
- return -ENODEV;
- addr += (port << 16);
- size = DNV_SB_PORT_SIZE;
+ ret = p2sb_bar(NULL, 0, &r);
+ if (ret)
+ return ret;
+
+ r.start += (port << 16);
+ r.end = r.start + DNV_SB_PORT_SIZE - 1;
}
- base = ioremap((resource_size_t)addr, size);
+ base = ioremap(r.start, resource_size(&r));
if (!base)
return -ENODEV;
if (sz == 8)
- *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
- *(u32 *)data = *(u32 *)(base + off);
+ *(u64 *)data = readq(base + off);
+ else
+ *(u32 *)data = readl(base + off);
iounmap(base);
}
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 6793f6d799e7..0bc670778c99 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/types.h>
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 1cee64b80a7e..f7d37c282819 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -514,6 +514,28 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
memset(p, 0, sizeof(*p));
}
+static void enable_intr(struct synps_edac_priv *priv)
+{
+ /* Enable UE/CE Interrupts */
+ if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
+ writel(DDR_UE_MASK | DDR_CE_MASK,
+ priv->baseaddr + ECC_CLR_OFST);
+ else
+ writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
+ priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+
+}
+
+static void disable_intr(struct synps_edac_priv *priv)
+{
+ /* Disable UE/CE Interrupts */
+ if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
+ writel(0x0, priv->baseaddr + ECC_CLR_OFST);
+ else
+ writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
+ priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
+}
+
/**
* intr_handler - Interrupt Handler for ECC interrupts.
* @irq: IRQ number.
@@ -555,6 +577,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
/* v3.0 of the controller does not have this register */
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
+ else
+ enable_intr(priv);
+
return IRQ_HANDLED;
}
@@ -837,25 +862,6 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
init_csrows(mci);
}
-static void enable_intr(struct synps_edac_priv *priv)
-{
- /* Enable UE/CE Interrupts */
- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
- writel(DDR_UE_MASK | DDR_CE_MASK,
- priv->baseaddr + ECC_CLR_OFST);
- else
- writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
- priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
-
-}
-
-static void disable_intr(struct synps_edac_priv *priv)
-{
- /* Disable UE/CE Interrupts */
- writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
- priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
-}
-
static int setup_irq(struct mem_ctl_info *mci,
struct platform_device *pdev)
{
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index 08bdedbcdb0d..7cff66c29907 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -324,11 +324,6 @@ static int fsa9480_probe(struct i2c_client *client,
return 0;
}
-static int fsa9480_remove(struct i2c_client *client)
-{
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int fsa9480_suspend(struct device *dev)
{
@@ -376,7 +371,6 @@ static struct i2c_driver fsa9480_i2c_driver = {
.of_match_table = fsa9480_of_match,
},
.probe = fsa9480_probe,
- .remove = fsa9480_remove,
.id_table = fsa9480_id,
};
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index d2c1a8b89c08..32f8b541770b 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -107,7 +107,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
(id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
- dev_dbg(palmas_usb->dev, " USB-HOST cable is attached\n");
+ dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
}
return IRQ_HANDLED;
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 40c07f4d656e..02ba770acb27 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -192,7 +192,6 @@ static const struct regmap_irq_chip rt8973a_muic_irq_chip = {
.name = "rt8973a",
.status_base = RT8973A_REG_INT1,
.mask_base = RT8973A_REG_INTM1,
- .mask_invert = false,
.num_regs = 2,
.irqs = rt8973a_irqs,
.num_irqs = ARRAY_SIZE(rt8973a_irqs),
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index f706f5288257..8401e8b27788 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -227,7 +227,6 @@ static const struct regmap_irq_chip sm5502_muic_irq_chip = {
.name = "sm5502",
.status_base = SM5502_REG_INT1,
.mask_base = SM5502_REG_INTMASK1,
- .mask_invert = false,
.num_regs = 2,
.irqs = sm5502_irqs,
.num_irqs = ARRAY_SIZE(sm5502_irqs),
@@ -276,7 +275,6 @@ static const struct regmap_irq_chip sm5504_muic_irq_chip = {
.name = "sm5504",
.status_base = SM5502_REG_INT1,
.mask_base = SM5502_REG_INTMASK1,
- .mask_invert = false,
.num_regs = 2,
.irqs = sm5504_irqs,
.num_irqs = ARRAY_SIZE(sm5504_irqs),
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index d3a32b806499..e1c71359b605 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -167,6 +167,16 @@ static const struct __extcon_info {
.id = EXTCON_DISP_HMD,
.name = "HMD",
},
+ [EXTCON_DISP_CVBS] = {
+ .type = EXTCON_TYPE_DISP,
+ .id = EXTCON_DISP_CVBS,
+ .name = "CVBS",
+ },
+ [EXTCON_DISP_EDP] = {
+ .type = EXTCON_TYPE_DISP,
+ .id = EXTCON_DISP_EDP,
+ .name = "EDP",
+ },
/* Miscellaneous external connector */
[EXTCON_DOCK] = {
@@ -247,7 +257,7 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
{
int i;
- /* Find the the index of extcon cable in edev->supported_cable */
+ /* Find the index of extcon cable in edev->supported_cable */
for (i = 0; i < edev->max_supported; i++) {
if (edev->supported_cable[i] == id)
return i;
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index dcc141068128..af22be84034b 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -202,15 +202,6 @@ struct fwnet_packet_task {
};
/*
- * Get fifo address embedded in hwaddr
- */
-static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha)
-{
- return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32
- | get_unaligned_be32(&ha->uc.fifo_lo);
-}
-
-/*
* saddr == NULL means use device source address.
* daddr == NULL means leave destination address (eg unresolved arp).
*/
@@ -1306,7 +1297,7 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
max_payload = peer->max_payload;
datagram_label_ptr = &peer->datagram_label;
- ptask->fifo_addr = fwnet_hwaddr_fifo(ha);
+ ptask->fifo_addr = get_unaligned_be48(ha->uc.fifo);
ptask->generation = generation;
ptask->dest_node = dest_node;
ptask->speed = peer->speed;
@@ -1494,8 +1485,7 @@ static int fwnet_probe(struct fw_unit *unit,
ha.uc.uniq_id = cpu_to_be64(card->guid);
ha.uc.max_rec = dev->card->max_receive;
ha.uc.sspd = dev->card->link_speed;
- ha.uc.fifo_hi = cpu_to_be16(dev->local_fifo >> 32);
- ha.uc.fifo_lo = cpu_to_be32(dev->local_fifo & 0xffffffff);
+ put_unaligned_be48(dev->local_fifo, ha.uc.fifo);
dev_addr_set(net, ha.u);
memset(net->broadcast, -1, net->addr_len);
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index 1e7b7fec97d9..a14f65444b35 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -149,4 +149,16 @@ config ARM_SCMI_POWER_DOMAIN
will be called scmi_pm_domain. Note this may needed early in boot
before rootfs may be available.
+config ARM_SCMI_POWER_CONTROL
+ tristate "SCMI system power control driver"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ help
+ This enables System Power control logic which binds system shutdown or
+ reboot actions to SCMI System Power notifications generated by SCP
+ firmware.
+
+ This driver can also be built as a module. If so, the module will be
+ called scmi_power_control. Note this may needed early in boot to catch
+ early shutdown/reboot SCMI requests.
+
endmenu
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 8d4afadda38c..9ea86f8cc8f7 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -7,11 +7,12 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
-scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
$(scmi-transport-y)
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
+obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 8b7ac6663d57..609ebedee9cb 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -19,6 +19,7 @@
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/hashtable.h>
@@ -60,6 +61,11 @@ static atomic_t transfer_last_id;
static DEFINE_IDR(scmi_requested_devices);
static DEFINE_MUTEX(scmi_requested_devices_mtx);
+/* Track globally the creation of SCMI SystemPower related devices */
+static bool scmi_syspower_registered;
+/* Protect access to scmi_syspower_registered */
+static DEFINE_MUTEX(scmi_syspower_mtx);
+
struct scmi_requested_dev {
const struct scmi_device_id *id_table;
struct list_head node;
@@ -660,6 +666,11 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
+
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
+
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -694,6 +705,12 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+ xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
+ "DLYD" : "RESP",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
+
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.type);
@@ -827,6 +844,12 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
xfer->state = SCMI_XFER_RESP_OK;
}
spin_unlock_irqrestore(&xfer->lock, flags);
+
+ /* Trace polled replies. */
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+ "RESP",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
}
} else {
/* And we wait for the response. */
@@ -903,6 +926,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->tx.buf, xfer->tx.len);
+
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
@@ -1259,10 +1286,174 @@ out:
return ret;
}
+struct scmi_msg_get_fc_info {
+ __le32 domain;
+ __le32 message_id;
+};
+
+struct scmi_msg_resp_desc_fc {
+ __le32 attr;
+#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
+#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
+ __le32 rate_limit;
+ __le32 chan_addr_low;
+ __le32 chan_addr_high;
+ __le32 chan_size;
+ __le32 db_addr_low;
+ __le32 db_addr_high;
+ __le32 db_set_lmask;
+ __le32 db_set_hmask;
+ __le32 db_preserve_lmask;
+ __le32 db_preserve_hmask;
+};
+
+static void
+scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
+ u8 describe_id, u32 message_id, u32 valid_size,
+ u32 domain, void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db)
+{
+ int ret;
+ u32 flags;
+ u64 phys_addr;
+ u8 size;
+ void __iomem *addr;
+ struct scmi_xfer *t;
+ struct scmi_fc_db_info *db = NULL;
+ struct scmi_msg_get_fc_info *info;
+ struct scmi_msg_resp_desc_fc *resp;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ if (!p_addr) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = ph->xops->xfer_get_init(ph, describe_id,
+ sizeof(*info), sizeof(*resp), &t);
+ if (ret)
+ goto err_out;
+
+ info = t->tx.buf;
+ info->domain = cpu_to_le32(domain);
+ info->message_id = cpu_to_le32(message_id);
+
+ /*
+ * Bail out on error leaving fc_info addresses zeroed; this includes
+ * the case in which the requested domain/message_id does NOT support
+ * fastchannels at all.
+ */
+ ret = ph->xops->do_xfer(ph, t);
+ if (ret)
+ goto err_xfer;
+
+ resp = t->rx.buf;
+ flags = le32_to_cpu(resp->attr);
+ size = le32_to_cpu(resp->chan_size);
+ if (size != valid_size) {
+ ret = -EINVAL;
+ goto err_xfer;
+ }
+
+ phys_addr = le32_to_cpu(resp->chan_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_xfer;
+ }
+
+ *p_addr = addr;
+
+ if (p_db && SUPPORTS_DOORBELL(flags)) {
+ db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
+ if (!db) {
+ ret = -ENOMEM;
+ goto err_db;
+ }
+
+ size = 1 << DOORBELL_REG_WIDTH(flags);
+ phys_addr = le32_to_cpu(resp->db_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_db_mem;
+ }
+
+ db->addr = addr;
+ db->width = size;
+ db->set = le32_to_cpu(resp->db_set_lmask);
+ db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
+ db->mask = le32_to_cpu(resp->db_preserve_lmask);
+ db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
+
+ *p_db = db;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ dev_dbg(ph->dev,
+ "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
+ pi->proto->id, message_id, domain);
+
+ return;
+
+err_db_mem:
+ devm_kfree(ph->dev, db);
+
+err_db:
+ *p_addr = NULL;
+
+err_xfer:
+ ph->xops->xfer_put(ph, t);
+
+err_out:
+ dev_warn(ph->dev,
+ "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
+ pi->proto->id, message_id, domain, ret);
+}
+
+#define SCMI_PROTO_FC_RING_DB(w) \
+do { \
+ u##w val = 0; \
+ \
+ if (db->mask) \
+ val = ioread##w(db->addr) & db->mask; \
+ iowrite##w((u##w)db->set | val, db->addr); \
+} while (0)
+
+static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
+{
+ if (!db || !db->addr)
+ return;
+
+ if (db->width == 1)
+ SCMI_PROTO_FC_RING_DB(8);
+ else if (db->width == 2)
+ SCMI_PROTO_FC_RING_DB(16);
+ else if (db->width == 4)
+ SCMI_PROTO_FC_RING_DB(32);
+ else /* db->width == 8 */
+#ifdef CONFIG_64BIT
+ SCMI_PROTO_FC_RING_DB(64);
+#else
+ {
+ u64 val = 0;
+
+ if (db->mask)
+ val = ioread64_hi_lo(db->addr) & db->mask;
+ iowrite64_hi_lo(db->set | val, db->addr);
+ }
+#endif
+}
+
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.iter_response_init = scmi_iterator_init,
.iter_response_run = scmi_iterator_run,
+ .fastchannel_init = scmi_common_fastchannel_init,
+ .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
};
/**
@@ -1497,6 +1688,30 @@ static void scmi_devm_release_protocol(struct device *dev, void *res)
scmi_protocol_release(dres->handle, dres->protocol_id);
}
+static struct scmi_protocol_instance __must_check *
+scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+ struct scmi_protocol_devres *dres;
+
+ dres = devres_alloc(scmi_devm_release_protocol,
+ sizeof(*dres), GFP_KERNEL);
+ if (!dres)
+ return ERR_PTR(-ENOMEM);
+
+ pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
+ if (IS_ERR(pi)) {
+ devres_free(dres);
+ return pi;
+ }
+
+ dres->handle = sdev->handle;
+ dres->protocol_id = protocol_id;
+ devres_add(&sdev->dev, dres);
+
+ return pi;
+}
+
/**
* scmi_devm_protocol_get - Devres managed get protocol operations and handle
* @sdev: A reference to an scmi_device whose embedded struct device is to
@@ -1520,32 +1735,47 @@ scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
struct scmi_protocol_handle **ph)
{
struct scmi_protocol_instance *pi;
- struct scmi_protocol_devres *dres;
- struct scmi_handle *handle = sdev->handle;
if (!ph)
return ERR_PTR(-EINVAL);
- dres = devres_alloc(scmi_devm_release_protocol,
- sizeof(*dres), GFP_KERNEL);
- if (!dres)
- return ERR_PTR(-ENOMEM);
-
- pi = scmi_get_protocol_instance(handle, protocol_id);
- if (IS_ERR(pi)) {
- devres_free(dres);
+ pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
+ if (IS_ERR(pi))
return pi;
- }
-
- dres->handle = handle;
- dres->protocol_id = protocol_id;
- devres_add(&sdev->dev, dres);
*ph = &pi->ph;
return pi->proto->ops;
}
+/**
+ * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ *
+ * Get hold of a protocol accounting for its usage, possibly triggering its
+ * initialization but without getting access to its protocol specific operations
+ * and handle.
+ *
+ * Being a devres based managed method, protocol hold will be automatically
+ * released, and possibly de-initialized on last user, once the SCMI driver
+ * owning the scmi_device is unbound from it.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
+ u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+
+ pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
+ if (IS_ERR(pi))
+ return PTR_ERR(pi);
+
+ return 0;
+}
+
static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
{
struct scmi_protocol_devres *dres = res;
@@ -1849,21 +2079,39 @@ scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
if (sdev)
return sdev;
+ mutex_lock(&scmi_syspower_mtx);
+ if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
+ dev_warn(info->dev,
+ "SCMI SystemPower protocol device must be unique !\n");
+ mutex_unlock(&scmi_syspower_mtx);
+
+ return NULL;
+ }
+
pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
sdev = scmi_device_create(np, info->dev, prot_id, name);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
+ mutex_unlock(&scmi_syspower_mtx);
+
return NULL;
}
if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
+ mutex_unlock(&scmi_syspower_mtx);
+
return NULL;
}
+ if (prot_id == SCMI_PROTOCOL_SYSTEM)
+ scmi_syspower_registered = true;
+
+ mutex_unlock(&scmi_syspower_mtx);
+
return sdev;
}
@@ -2132,6 +2380,7 @@ static int scmi_probe(struct platform_device *pdev)
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
+ handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
@@ -2401,6 +2650,7 @@ static int __init scmi_driver_init(void)
scmi_sensors_register();
scmi_voltage_register();
scmi_system_register();
+ scmi_powercap_register();
return platform_driver_register(&scmi_driver);
}
@@ -2417,6 +2667,7 @@ static void __exit scmi_driver_exit(void)
scmi_sensors_unregister();
scmi_voltage_unregister();
scmi_system_unregister();
+ scmi_powercap_unregister();
scmi_bus_exit();
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index bbb0331801ff..ecf5c4de851b 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -10,13 +10,14 @@
#include <linux/bits.h>
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/scmi_protocol.h>
#include <linux/sort.h>
+#include <trace/events/scmi.h>
+
#include "protocols.h"
#include "notify.h"
@@ -35,6 +36,12 @@ enum scmi_performance_protocol_cmd {
PERF_DOMAIN_NAME_GET = 0xc,
};
+enum {
+ PERF_FC_LEVEL,
+ PERF_FC_LIMIT,
+ PERF_FC_MAX,
+};
+
struct scmi_opp {
u32 perf;
u32 power;
@@ -115,43 +122,6 @@ struct scmi_msg_resp_perf_describe_levels {
} opp[];
};
-struct scmi_perf_get_fc_info {
- __le32 domain;
- __le32 message_id;
-};
-
-struct scmi_msg_resp_perf_desc_fc {
- __le32 attr;
-#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
-#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
- __le32 rate_limit;
- __le32 chan_addr_low;
- __le32 chan_addr_high;
- __le32 chan_size;
- __le32 db_addr_low;
- __le32 db_addr_high;
- __le32 db_set_lmask;
- __le32 db_set_hmask;
- __le32 db_preserve_lmask;
- __le32 db_preserve_hmask;
-};
-
-struct scmi_fc_db_info {
- int width;
- u64 set;
- u64 mask;
- void __iomem *addr;
-};
-
-struct scmi_fc_info {
- void __iomem *level_set_addr;
- void __iomem *limit_set_addr;
- void __iomem *level_get_addr;
- void __iomem *limit_get_addr;
- struct scmi_fc_db_info *level_set_db;
- struct scmi_fc_db_info *limit_set_db;
-};
-
struct perf_dom_info {
bool set_limits;
bool set_perf;
@@ -170,8 +140,7 @@ struct perf_dom_info {
struct scmi_perf_info {
u32 version;
int num_domains;
- bool power_scale_mw;
- bool power_scale_uw;
+ enum scmi_power_scale power_scale;
u64 stats_addr;
u32 stats_size;
struct perf_dom_info *dom_info;
@@ -201,9 +170,13 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
u16 flags = le16_to_cpu(attr->flags);
pi->num_domains = le16_to_cpu(attr->num_domains);
- pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
+
+ if (POWER_SCALE_IN_MILLIWATT(flags))
+ pi->power_scale = SCMI_POWER_MILLIWATTS;
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
- pi->power_scale_uw = POWER_SCALE_IN_MICROWATT(flags);
+ if (POWER_SCALE_IN_MICROWATT(flags))
+ pi->power_scale = SCMI_POWER_MICROWATTS;
+
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
pi->stats_size = le32_to_cpu(attr->stats_size);
@@ -360,40 +333,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
return ret;
}
-#define SCMI_PERF_FC_RING_DB(w) \
-do { \
- u##w val = 0; \
- \
- if (db->mask) \
- val = ioread##w(db->addr) & db->mask; \
- iowrite##w((u##w)db->set | val, db->addr); \
-} while (0)
-
-static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
-{
- if (!db || !db->addr)
- return;
-
- if (db->width == 1)
- SCMI_PERF_FC_RING_DB(8);
- else if (db->width == 2)
- SCMI_PERF_FC_RING_DB(16);
- else if (db->width == 4)
- SCMI_PERF_FC_RING_DB(32);
- else /* db->width == 8 */
-#ifdef CONFIG_64BIT
- SCMI_PERF_FC_RING_DB(64);
-#else
- {
- u64 val = 0;
-
- if (db->mask)
- val = ioread64_hi_lo(db->addr) & db->mask;
- iowrite64_hi_lo(db->set | val, db->addr);
- }
-#endif
-}
-
static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
u32 domain, u32 max_perf, u32 min_perf)
{
@@ -426,10 +365,14 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
return -EINVAL;
- if (dom->fc_info && dom->fc_info->limit_set_addr) {
- iowrite32(max_perf, dom->fc_info->limit_set_addr);
- iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
- scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
+ domain, min_perf, max_perf);
+ iowrite32(max_perf, fci->set_addr);
+ iowrite32(min_perf, fci->set_addr + 4);
+ ph->hops->fastchannel_db_ring(fci->set_db);
return 0;
}
@@ -468,9 +411,13 @@ static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- if (dom->fc_info && dom->fc_info->limit_get_addr) {
- *max_perf = ioread32(dom->fc_info->limit_get_addr);
- *min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ *max_perf = ioread32(fci->get_addr);
+ *min_perf = ioread32(fci->get_addr + 4);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
+ domain, *min_perf, *max_perf);
return 0;
}
@@ -505,9 +452,13 @@ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- if (dom->fc_info && dom->fc_info->level_set_addr) {
- iowrite32(level, dom->fc_info->level_set_addr);
- scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
+ domain, level, 0);
+ iowrite32(level, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
return 0;
}
@@ -542,8 +493,10 @@ static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- if (dom->fc_info && dom->fc_info->level_get_addr) {
- *level = ioread32(dom->fc_info->level_get_addr);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
+ *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
+ domain, *level, 0);
return 0;
}
@@ -572,100 +525,33 @@ static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
return ret;
}
-static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
-{
- if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
- return true;
- if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
- return true;
- return false;
-}
-
-static void
-scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain,
- u32 message_id, void __iomem **p_addr,
- struct scmi_fc_db_info **p_db)
-{
- int ret;
- u32 flags;
- u64 phys_addr;
- u8 size;
- void __iomem *addr;
- struct scmi_xfer *t;
- struct scmi_fc_db_info *db;
- struct scmi_perf_get_fc_info *info;
- struct scmi_msg_resp_perf_desc_fc *resp;
-
- if (!p_addr)
- return;
-
- ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL,
- sizeof(*info), sizeof(*resp), &t);
- if (ret)
- return;
-
- info = t->tx.buf;
- info->domain = cpu_to_le32(domain);
- info->message_id = cpu_to_le32(message_id);
-
- ret = ph->xops->do_xfer(ph, t);
- if (ret)
- goto err_xfer;
-
- resp = t->rx.buf;
- flags = le32_to_cpu(resp->attr);
- size = le32_to_cpu(resp->chan_size);
- if (!scmi_perf_fc_size_is_valid(message_id, size))
- goto err_xfer;
-
- phys_addr = le32_to_cpu(resp->chan_addr_low);
- phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
- addr = devm_ioremap(ph->dev, phys_addr, size);
- if (!addr)
- goto err_xfer;
- *p_addr = addr;
-
- if (p_db && SUPPORTS_DOORBELL(flags)) {
- db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- goto err_xfer;
-
- size = 1 << DOORBELL_REG_WIDTH(flags);
- phys_addr = le32_to_cpu(resp->db_addr_low);
- phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
- addr = devm_ioremap(ph->dev, phys_addr, size);
- if (!addr)
- goto err_xfer;
-
- db->addr = addr;
- db->width = size;
- db->set = le32_to_cpu(resp->db_set_lmask);
- db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
- db->mask = le32_to_cpu(resp->db_preserve_lmask);
- db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
- *p_db = db;
- }
-err_xfer:
- ph->xops->xfer_put(ph, t);
-}
-
static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
u32 domain, struct scmi_fc_info **p_fc)
{
struct scmi_fc_info *fc;
- fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL);
+ fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
if (!fc)
return;
- scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET,
- &fc->level_set_addr, &fc->level_set_db);
- scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET,
- &fc->level_get_addr, NULL);
- scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET,
- &fc->limit_set_addr, &fc->limit_set_db);
- scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET,
- &fc->limit_get_addr, NULL);
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_SET, 4, domain,
+ &fc[PERF_FC_LEVEL].set_addr,
+ &fc[PERF_FC_LEVEL].set_db);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_GET, 4, domain,
+ &fc[PERF_FC_LEVEL].get_addr, NULL);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_SET, 8, domain,
+ &fc[PERF_FC_LIMIT].set_addr,
+ &fc[PERF_FC_LIMIT].set_db);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_GET, 8, domain,
+ &fc[PERF_FC_LIMIT].get_addr, NULL);
+
*p_fc = fc;
}
@@ -789,14 +675,15 @@ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
dom = pi->dom_info + scmi_dev_domain_id(dev);
- return dom->fc_info && dom->fc_info->level_set_addr;
+ return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
}
-static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph)
+static enum scmi_power_scale
+scmi_power_scale_get(const struct scmi_protocol_handle *ph)
{
struct scmi_perf_info *pi = ph->get_priv(ph);
- return pi->power_scale_mw;
+ return pi->power_scale;
}
static const struct scmi_perf_proto_ops perf_proto_ops = {
@@ -811,7 +698,7 @@ static const struct scmi_perf_proto_ops perf_proto_ops = {
.freq_get = scmi_dvfs_freq_get,
.est_power_get = scmi_dvfs_est_power_get,
.fast_switch_possible = scmi_fast_switch_possible,
- .power_scale_mw_get = scmi_power_scale_mw_get,
+ .power_scale_get = scmi_power_scale_get,
};
static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
new file mode 100644
index 000000000000..83b90bde755c
--- /dev/null
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Powercap Protocol
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include <trace/events/scmi.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+enum scmi_powercap_protocol_cmd {
+ POWERCAP_DOMAIN_ATTRIBUTES = 0x3,
+ POWERCAP_CAP_GET = 0x4,
+ POWERCAP_CAP_SET = 0x5,
+ POWERCAP_PAI_GET = 0x6,
+ POWERCAP_PAI_SET = 0x7,
+ POWERCAP_DOMAIN_NAME_GET = 0x8,
+ POWERCAP_MEASUREMENTS_GET = 0x9,
+ POWERCAP_CAP_NOTIFY = 0xa,
+ POWERCAP_MEASUREMENTS_NOTIFY = 0xb,
+ POWERCAP_DESCRIBE_FASTCHANNEL = 0xc,
+};
+
+enum {
+ POWERCAP_FC_CAP,
+ POWERCAP_FC_PAI,
+ POWERCAP_FC_MAX,
+};
+
+struct scmi_msg_resp_powercap_domain_attributes {
+ __le32 attributes;
+#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31))
+#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30))
+#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28))
+#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27))
+#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26))
+#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25))
+#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22))
+#define POWERCAP_POWER_UNIT(x) \
+ (FIELD_GET(GENMASK(24, 23), (x)))
+#define SUPPORTS_POWER_UNITS_MW(x) \
+ (POWERCAP_POWER_UNIT(x) == 0x2)
+#define SUPPORTS_POWER_UNITS_UW(x) \
+ (POWERCAP_POWER_UNIT(x) == 0x1)
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+ __le32 min_pai;
+ __le32 max_pai;
+ __le32 pai_step;
+ __le32 min_power_cap;
+ __le32 max_power_cap;
+ __le32 power_cap_step;
+ __le32 sustainable_power;
+ __le32 accuracy;
+ __le32 parent_id;
+};
+
+struct scmi_msg_powercap_set_cap_or_pai {
+ __le32 domain;
+ __le32 flags;
+#define CAP_SET_ASYNC BIT(1)
+#define CAP_SET_IGNORE_DRESP BIT(0)
+ __le32 value;
+};
+
+struct scmi_msg_resp_powercap_cap_set_complete {
+ __le32 domain;
+ __le32 power_cap;
+};
+
+struct scmi_msg_resp_powercap_meas_get {
+ __le32 power;
+ __le32 pai;
+};
+
+struct scmi_msg_powercap_notify_cap {
+ __le32 domain;
+ __le32 notify_enable;
+};
+
+struct scmi_msg_powercap_notify_thresh {
+ __le32 domain;
+ __le32 notify_enable;
+ __le32 power_thresh_low;
+ __le32 power_thresh_high;
+};
+
+struct scmi_powercap_cap_changed_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power_cap;
+ __le32 pai;
+};
+
+struct scmi_powercap_meas_changed_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power;
+};
+
+struct scmi_powercap_state {
+ bool meas_notif_enabled;
+ u64 thresholds;
+#define THRESH_LOW(p, id) \
+ (lower_32_bits((p)->states[(id)].thresholds))
+#define THRESH_HIGH(p, id) \
+ (upper_32_bits((p)->states[(id)].thresholds))
+};
+
+struct powercap_info {
+ u32 version;
+ int num_domains;
+ struct scmi_powercap_state *states;
+ struct scmi_powercap_info *powercaps;
+};
+
+static enum scmi_powercap_protocol_cmd evt_2_cmd[] = {
+ POWERCAP_CAP_NOTIFY,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+};
+
+static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id, bool enable);
+
+static int
+scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ u32 attributes;
+
+ attributes = get_unaligned_le32(t->rx.buf);
+ pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static inline int
+scmi_powercap_validate(unsigned int min_val, unsigned int max_val,
+ unsigned int step_val, bool configurable)
+{
+ if (!min_val || !max_val)
+ return -EPROTO;
+
+ if ((configurable && min_val == max_val) ||
+ (!configurable && min_val != max_val))
+ return -EPROTO;
+
+ if (min_val != max_val && !step_val)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int
+scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pinfo, u32 domain)
+{
+ int ret;
+ u32 flags;
+ struct scmi_xfer *t;
+ struct scmi_powercap_info *dom_info = pinfo->powercaps + domain;
+ struct scmi_msg_resp_powercap_domain_attributes *resp;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*resp), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ resp = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ flags = le32_to_cpu(resp->attributes);
+
+ dom_info->id = domain;
+ dom_info->notify_powercap_cap_change =
+ SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags);
+ dom_info->notify_powercap_measurement_change =
+ SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags);
+ dom_info->async_powercap_cap_set =
+ SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags);
+ dom_info->powercap_cap_config =
+ SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags);
+ dom_info->powercap_monitoring =
+ SUPPORTS_POWERCAP_MONITORING(flags);
+ dom_info->powercap_pai_config =
+ SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags);
+ dom_info->powercap_scale_mw =
+ SUPPORTS_POWER_UNITS_MW(flags);
+ dom_info->powercap_scale_uw =
+ SUPPORTS_POWER_UNITS_UW(flags);
+ dom_info->fastchannels =
+ SUPPORTS_POWERCAP_FASTCHANNELS(flags);
+
+ strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE);
+
+ dom_info->min_pai = le32_to_cpu(resp->min_pai);
+ dom_info->max_pai = le32_to_cpu(resp->max_pai);
+ dom_info->pai_step = le32_to_cpu(resp->pai_step);
+ ret = scmi_powercap_validate(dom_info->min_pai,
+ dom_info->max_pai,
+ dom_info->pai_step,
+ dom_info->powercap_pai_config);
+ if (ret) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent PAI config for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ goto clean;
+ }
+
+ dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap);
+ dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap);
+ dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step);
+ ret = scmi_powercap_validate(dom_info->min_power_cap,
+ dom_info->max_power_cap,
+ dom_info->power_cap_step,
+ dom_info->powercap_cap_config);
+ if (ret) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent CAP config for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ goto clean;
+ }
+
+ dom_info->sustainable_power =
+ le32_to_cpu(resp->sustainable_power);
+ dom_info->accuracy = le32_to_cpu(resp->accuracy);
+
+ dom_info->parent_id = le32_to_cpu(resp->parent_id);
+ if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID &&
+ (dom_info->parent_id >= pinfo->num_domains ||
+ dom_info->parent_id == dom_info->id)) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent parent ID for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ ret = -ENODEV;
+ }
+ }
+
+clean:
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && SUPPORTS_EXTENDED_NAMES(flags))
+ ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET,
+ domain, dom_info->name,
+ SCMI_MAX_STR_SIZE);
+
+ return ret;
+}
+
+static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ return pi->num_domains;
+}
+
+static const struct scmi_powercap_info *
+scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (domain_id >= pi->num_domains)
+ return NULL;
+
+ return pi->powercaps + domain_id;
+}
+
+static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_cap)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32),
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *power_cap = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_cap)
+{
+ struct scmi_powercap_info *dom;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!power_cap || domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ dom = pi->powercaps + domain_id;
+ if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) {
+ *power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET,
+ domain_id, *power_cap, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_cap_get(ph, domain_id, power_cap);
+}
+
+static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph,
+ const struct scmi_powercap_info *pc,
+ u32 power_cap, bool ignore_dresp)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_powercap_set_cap_or_pai *msg;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET,
+ sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->domain = cpu_to_le32(pc->id);
+ msg->flags =
+ cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) |
+ FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp));
+ msg->value = cpu_to_le32(power_cap);
+
+ if (!pc->async_powercap_cap_set || ignore_dresp) {
+ ret = ph->xops->do_xfer(ph, t);
+ } else {
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ struct scmi_msg_resp_powercap_cap_set_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->domain) == pc->id)
+ dev_dbg(ph->dev,
+ "Powercap ID %d CAP set async to %u\n",
+ pc->id,
+ get_unaligned_le32(&resp->power_cap));
+ else
+ ret = -EPROTO;
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_cap,
+ bool ignore_dresp)
+{
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_cap_config || !power_cap ||
+ power_cap < pc->min_power_cap ||
+ power_cap > pc->max_power_cap)
+ return -EINVAL;
+
+ if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) {
+ struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP];
+
+ iowrite32(power_cap, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET,
+ domain_id, power_cap, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_cap_set(ph, pc, power_cap, ignore_dresp);
+}
+
+static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32),
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *pai = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *pai)
+{
+ struct scmi_powercap_info *dom;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!pai || domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ dom = pi->powercaps + domain_id;
+ if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) {
+ *pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET,
+ domain_id, *pai, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_pai_get(ph, domain_id, pai);
+}
+
+static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_powercap_set_cap_or_pai *msg;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET,
+ sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->domain = cpu_to_le32(domain_id);
+ msg->flags = cpu_to_le32(0);
+ msg->value = cpu_to_le32(pai);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 pai)
+{
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_pai_config || !pai ||
+ pai < pc->min_pai || pai > pc->max_pai)
+ return -EINVAL;
+
+ if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) {
+ struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET,
+ domain_id, pai, 0);
+ iowrite32(pai, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_pai_set(ph, domain_id, pai);
+}
+
+static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *average_power,
+ u32 *pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_powercap_meas_get *resp;
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_monitoring || !pai || !average_power)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET,
+ sizeof(u32), sizeof(*resp), &t);
+ if (ret)
+ return ret;
+
+ resp = t->rx.buf;
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ *average_power = le32_to_cpu(resp->power);
+ *pai = le32_to_cpu(resp->pai);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_thresh_low,
+ u32 *power_thresh_high)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!power_thresh_low || !power_thresh_high ||
+ domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ *power_thresh_low = THRESH_LOW(pi, domain_id);
+ *power_thresh_high = THRESH_HIGH(pi, domain_id);
+
+ return 0;
+}
+
+static int
+scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_thresh_low,
+ u32 power_thresh_high)
+{
+ int ret = 0;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (domain_id >= pi->num_domains ||
+ power_thresh_low > power_thresh_high)
+ return -EINVAL;
+
+ /* Anything to do ? */
+ if (THRESH_LOW(pi, domain_id) == power_thresh_low &&
+ THRESH_HIGH(pi, domain_id) == power_thresh_high)
+ return ret;
+
+ pi->states[domain_id].thresholds =
+ (FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) |
+ FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high));
+
+ /* Update thresholds if notification already enabled */
+ if (pi->states[domain_id].meas_notif_enabled)
+ ret = scmi_powercap_notify(ph, domain_id,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+ true);
+
+ return ret;
+}
+
+static const struct scmi_powercap_proto_ops powercap_proto_ops = {
+ .num_domains_get = scmi_powercap_num_domains_get,
+ .info_get = scmi_powercap_dom_info_get,
+ .cap_get = scmi_powercap_cap_get,
+ .cap_set = scmi_powercap_cap_set,
+ .pai_get = scmi_powercap_pai_get,
+ .pai_set = scmi_powercap_pai_set,
+ .measurements_get = scmi_powercap_measurements_get,
+ .measurements_threshold_set = scmi_powercap_measurements_threshold_set,
+ .measurements_threshold_get = scmi_powercap_measurements_threshold_get,
+};
+
+static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
+ u32 domain, struct scmi_fc_info **p_fc)
+{
+ struct scmi_fc_info *fc;
+
+ fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return;
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_CAP_SET, 4, domain,
+ &fc[POWERCAP_FC_CAP].set_addr,
+ &fc[POWERCAP_FC_CAP].set_db);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_CAP_GET, 4, domain,
+ &fc[POWERCAP_FC_CAP].get_addr, NULL);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_PAI_SET, 4, domain,
+ &fc[POWERCAP_FC_PAI].set_addr,
+ &fc[POWERCAP_FC_PAI].set_db);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_PAI_GET, 4, domain,
+ &fc[POWERCAP_FC_PAI].get_addr, NULL);
+
+ *p_fc = fc;
+}
+
+static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ switch (message_id) {
+ case POWERCAP_CAP_NOTIFY:
+ {
+ struct scmi_msg_powercap_notify_cap *notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
+ break;
+ }
+ case POWERCAP_MEASUREMENTS_NOTIFY:
+ {
+ u32 low, high;
+ struct scmi_msg_powercap_notify_thresh *notify;
+
+ /*
+ * Note that we have to pick the most recently configured
+ * thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY
+ * enable request and we fail, complaining, if no thresholds
+ * were ever set, since this is an indication the API has been
+ * used wrongly.
+ */
+ ret = scmi_powercap_measurements_threshold_get(ph, domain,
+ &low, &high);
+ if (ret)
+ return ret;
+
+ if (enable && !low && !high) {
+ dev_err(ph->dev,
+ "Invalid Measurements Notify thresholds: %u/%u\n",
+ low, high);
+ return -EINVAL;
+ }
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
+ notify->power_thresh_low = cpu_to_le32(low);
+ notify->power_thresh_high = cpu_to_le32(high);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains)
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_powercap_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+ else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY)
+ /*
+ * On success save the current notification enabled state, so
+ * as to be able to properly update the notification thresholds
+ * when they are modified on a domain for which measurement
+ * notifications were currently enabled.
+ *
+ * This is needed because the SCMI Notification core machinery
+ * and API does not support passing per-notification custom
+ * arguments at callback registration time.
+ *
+ * Note that this can be done here with a simple flag since the
+ * SCMI core Notifications code takes care of keeping proper
+ * per-domain enables refcounting, so that this helper function
+ * will be called only once (for enables) when the first user
+ * registers a callback on this domain and once more (disable)
+ * when the last user de-registers its callback.
+ */
+ pi->states[src_id].meas_notif_enabled = enable;
+
+ return ret;
+}
+
+static void *
+scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_POWERCAP_CAP_CHANGED:
+ {
+ const struct scmi_powercap_cap_changed_notify_payld *p = payld;
+ struct scmi_powercap_cap_changed_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power_cap = le32_to_cpu(p->power_cap);
+ r->pai = le32_to_cpu(p->pai);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED:
+ {
+ const struct scmi_powercap_meas_changed_notify_payld *p = payld;
+ struct scmi_powercap_meas_changed_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power = le32_to_cpu(p->power);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static int
+scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!pi)
+ return -EINVAL;
+
+ return pi->num_domains;
+}
+
+static const struct scmi_event powercap_events[] = {
+ {
+ .id = SCMI_EVENT_POWERCAP_CAP_CHANGED,
+ .max_payld_sz =
+ sizeof(struct scmi_powercap_cap_changed_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_powercap_cap_changed_report),
+ },
+ {
+ .id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED,
+ .max_payld_sz =
+ sizeof(struct scmi_powercap_meas_changed_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_powercap_meas_changed_report),
+ },
+};
+
+static const struct scmi_event_ops powercap_event_ops = {
+ .get_num_sources = scmi_powercap_get_num_sources,
+ .set_notify_enabled = scmi_powercap_set_notify_enabled,
+ .fill_custom_report = scmi_powercap_fill_custom_report,
+};
+
+static const struct scmi_protocol_events powercap_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &powercap_event_ops,
+ .evts = powercap_events,
+ .num_events = ARRAY_SIZE(powercap_events),
+};
+
+static int
+scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int domain, ret;
+ u32 version;
+ struct powercap_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Powercap Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ ret = scmi_powercap_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->powercaps),
+ GFP_KERNEL);
+ if (!pinfo->powercaps)
+ return -ENOMEM;
+
+ /*
+ * Note that any failure in retrieving any domain attribute leads to
+ * the whole Powercap protocol initialization failure: this way the
+ * reported Powercap domains are all assured, when accessed, to be well
+ * formed and correlated by sane parent-child relationship (if any).
+ */
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain);
+ if (ret)
+ return ret;
+
+ if (pinfo->powercaps[domain].fastchannels)
+ scmi_powercap_domain_init_fc(ph, domain,
+ &pinfo->powercaps[domain].fc_info);
+ }
+
+ pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->states), GFP_KERNEL);
+ if (!pinfo->states)
+ return -ENOMEM;
+
+ pinfo->version = version;
+
+ return ph->set_priv(ph, pinfo);
+}
+
+static const struct scmi_protocol scmi_powercap = {
+ .id = SCMI_PROTOCOL_POWERCAP,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_powercap_protocol_init,
+ .ops = &powercap_proto_ops,
+ .events = &powercap_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap)
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 51c31379f9b3..2f3bf691db7c 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -215,6 +215,19 @@ struct scmi_iterator_ops {
struct scmi_iterator_state *st, void *priv);
};
+struct scmi_fc_db_info {
+ int width;
+ u64 set;
+ u64 mask;
+ void __iomem *addr;
+};
+
+struct scmi_fc_info {
+ void __iomem *set_addr;
+ void __iomem *get_addr;
+ struct scmi_fc_db_info *set_db;
+};
+
/**
* struct scmi_proto_helpers_ops - References to common protocol helpers
* @extended_name_get: A common helper function to retrieve extended naming
@@ -230,6 +243,9 @@ struct scmi_iterator_ops {
* provided in @ops.
* @iter_response_run: A common helper to trigger the run of a previously
* initialized iterator.
+ * @fastchannel_init: A common helper used to initialize FC descriptors by
+ * gathering FC descriptions from the SCMI platform server.
+ * @fastchannel_db_ring: A common helper to ring a FC doorbell.
*/
struct scmi_proto_helpers_ops {
int (*extended_name_get)(const struct scmi_protocol_handle *ph,
@@ -239,6 +255,12 @@ struct scmi_proto_helpers_ops {
unsigned int max_resources, u8 msg_id,
size_t tx_size, void *priv);
int (*iter_response_run)(void *iter);
+ void (*fastchannel_init)(const struct scmi_protocol_handle *ph,
+ u8 describe_id, u32 message_id,
+ u32 valid_size, u32 domain,
+ void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db);
+ void (*fastchannel_db_ring)(struct scmi_fc_db_info *db);
};
/**
@@ -315,5 +337,6 @@ DECLARE_SCMI_REGISTER_UNREGISTER(reset);
DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
DECLARE_SCMI_REGISTER_UNREGISTER(system);
+DECLARE_SCMI_REGISTER_UNREGISTER(powercap);
#endif /* _SCMI_PROTOCOLS_H */
diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c
new file mode 100644
index 000000000000..6eb7d2a4b6b1
--- /dev/null
+++ b/drivers/firmware/arm_scmi/scmi_power_control.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Generic SystemPower Control driver.
+ *
+ * Copyright (C) 2020-2022 ARM Ltd.
+ */
+/*
+ * In order to handle platform originated SCMI SystemPower requests (like
+ * shutdowns or cold/warm resets) we register an SCMI Notification notifier
+ * block to react when such SCMI SystemPower events are emitted by platform.
+ *
+ * Once such a notification is received we act accordingly to perform the
+ * required system transition depending on the kind of request.
+ *
+ * Graceful requests are routed to userspace through the same API methods
+ * (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus
+ * events.
+ *
+ * Direct forceful requests are not supported since are not meant to be sent
+ * by the SCMI platform to an OSPM like Linux.
+ *
+ * Additionally, graceful request notifications can carry an optional timeout
+ * field stating the maximum amount of time allowed by the platform for
+ * completion after which they are converted to forceful ones: the assumption
+ * here is that even graceful requests can be upper-bound by a maximum final
+ * timeout strictly enforced by the platform itself which can ultimately cut
+ * the power off at will anytime; in order to avoid such extreme scenario, we
+ * track progress of graceful requests through the means of a reboot notifier
+ * converting timed-out graceful requests to forceful ones, so at least we
+ * try to perform a clean sync and shutdown/restart before the power is cut.
+ *
+ * Given the peculiar nature of SCMI SystemPower protocol, that is being in
+ * charge of triggering system wide shutdown/reboot events, there should be
+ * only one SCMI platform actively emitting SystemPower events.
+ * For this reason the SCMI core takes care to enforce the creation of one
+ * single unique device associated to the SCMI System Power protocol; no matter
+ * how many SCMI platforms are defined on the system, only one can be designated
+ * to support System Power: as a consequence this driver will never be probed
+ * more than once.
+ *
+ * For similar reasons as soon as the first valid SystemPower is received by
+ * this driver and the shutdown/reboot is started, any further notification
+ * possibly emitted by the platform will be ignored.
+ */
+
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/time64.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#ifndef MODULE
+#include <linux/fs.h>
+#endif
+
+enum scmi_syspower_state {
+ SCMI_SYSPOWER_IDLE,
+ SCMI_SYSPOWER_IN_PROGRESS,
+ SCMI_SYSPOWER_REBOOTING
+};
+
+/**
+ * struct scmi_syspower_conf - Common configuration
+ *
+ * @dev: A reference device
+ * @state: Current SystemPower state
+ * @state_mtx: @state related mutex
+ * @required_transition: The requested transition as decribed in the received
+ * SCMI SystemPower notification
+ * @userspace_nb: The notifier_block registered against the SCMI SystemPower
+ * notification to start the needed userspace interactions.
+ * @reboot_nb: A notifier_block optionally used to track reboot progress
+ * @forceful_work: A worker used to trigger a forceful transition once a
+ * graceful has timed out.
+ */
+struct scmi_syspower_conf {
+ struct device *dev;
+ enum scmi_syspower_state state;
+ /* Protect access to state */
+ struct mutex state_mtx;
+ enum scmi_system_events required_transition;
+
+ struct notifier_block userspace_nb;
+ struct notifier_block reboot_nb;
+
+ struct delayed_work forceful_work;
+};
+
+#define userspace_nb_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, userspace_nb)
+
+#define reboot_nb_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, reboot_nb)
+
+#define dwork_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, forceful_work)
+
+/**
+ * scmi_reboot_notifier - A reboot notifier to catch an ongoing successful
+ * system transition
+ * @nb: Reference to the related notifier block
+ * @reason: The reason for the ongoing reboot
+ * @__unused: The cmd being executed on a restart request (unused)
+ *
+ * When an ongoing system transition is detected, compatible with the one
+ * requested by SCMI, cancel the delayed work.
+ *
+ * Return: NOTIFY_OK in any case
+ */
+static int scmi_reboot_notifier(struct notifier_block *nb,
+ unsigned long reason, void *__unused)
+{
+ struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb);
+
+ mutex_lock(&sc->state_mtx);
+ switch (reason) {
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN)
+ sc->state = SCMI_SYSPOWER_REBOOTING;
+ break;
+ case SYS_RESTART:
+ if (sc->required_transition == SCMI_SYSTEM_COLDRESET ||
+ sc->required_transition == SCMI_SYSTEM_WARMRESET)
+ sc->state = SCMI_SYSPOWER_REBOOTING;
+ break;
+ default:
+ break;
+ }
+
+ if (sc->state == SCMI_SYSPOWER_REBOOTING) {
+ dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n");
+ cancel_delayed_work_sync(&sc->forceful_work);
+ }
+ mutex_unlock(&sc->state_mtx);
+
+ return NOTIFY_OK;
+}
+
+/**
+ * scmi_request_forceful_transition - Request forceful SystemPower transition
+ * @sc: A reference to the configuration data
+ *
+ * Initiates the required SystemPower transition without involving userspace:
+ * just trigger the action at the kernel level after issuing an emergency
+ * sync. (if possible at all)
+ */
+static inline void
+scmi_request_forceful_transition(struct scmi_syspower_conf *sc)
+{
+ dev_dbg(sc->dev, "Serving forceful request:%d\n",
+ sc->required_transition);
+
+#ifndef MODULE
+ emergency_sync();
+#endif
+ switch (sc->required_transition) {
+ case SCMI_SYSTEM_SHUTDOWN:
+ kernel_power_off();
+ break;
+ case SCMI_SYSTEM_COLDRESET:
+ case SCMI_SYSTEM_WARMRESET:
+ kernel_restart(NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+static void scmi_forceful_work_func(struct work_struct *work)
+{
+ struct scmi_syspower_conf *sc;
+ struct delayed_work *dwork;
+
+ if (system_state > SYSTEM_RUNNING)
+ return;
+
+ dwork = to_delayed_work(work);
+ sc = dwork_to_sconf(dwork);
+
+ dev_dbg(sc->dev, "Graceful request timed out...forcing !\n");
+ mutex_lock(&sc->state_mtx);
+ /* avoid deadlock by unregistering reboot notifier first */
+ unregister_reboot_notifier(&sc->reboot_nb);
+ if (sc->state == SCMI_SYSPOWER_IN_PROGRESS)
+ scmi_request_forceful_transition(sc);
+ mutex_unlock(&sc->state_mtx);
+}
+
+/**
+ * scmi_request_graceful_transition - Request graceful SystemPower transition
+ * @sc: A reference to the configuration data
+ * @timeout_ms: The desired timeout to wait for the shutdown to complete before
+ * system is forcibly shutdown.
+ *
+ * Initiates the required SystemPower transition, requesting userspace
+ * co-operation: it uses the same orderly_ methods used by ACPI Shutdown event
+ * processing.
+ *
+ * Takes care also to register a reboot notifier and to schedule a delayed work
+ * in order to detect if userspace actions are taking too long and in such a
+ * case to trigger a forceful transition.
+ */
+static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc,
+ unsigned int timeout_ms)
+{
+ unsigned int adj_timeout_ms = 0;
+
+ if (timeout_ms) {
+ int ret;
+
+ sc->reboot_nb.notifier_call = &scmi_reboot_notifier;
+ ret = register_reboot_notifier(&sc->reboot_nb);
+ if (!ret) {
+ /* Wait only up to 75% of the advertised timeout */
+ adj_timeout_ms = mult_frac(timeout_ms, 3, 4);
+ INIT_DELAYED_WORK(&sc->forceful_work,
+ scmi_forceful_work_func);
+ schedule_delayed_work(&sc->forceful_work,
+ msecs_to_jiffies(adj_timeout_ms));
+ } else {
+ /* Carry on best effort even without a reboot notifier */
+ dev_warn(sc->dev,
+ "Cannot register reboot notifier !\n");
+ }
+ }
+
+ dev_dbg(sc->dev,
+ "Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n",
+ sc->required_transition, timeout_ms, adj_timeout_ms);
+
+ switch (sc->required_transition) {
+ case SCMI_SYSTEM_SHUTDOWN:
+ /*
+ * When triggered early at boot-time the 'orderly' call will
+ * partially fail due to the lack of userspace itself, but
+ * the force=true argument will start anyway a successful
+ * forced shutdown.
+ */
+ orderly_poweroff(true);
+ break;
+ case SCMI_SYSTEM_COLDRESET:
+ case SCMI_SYSTEM_WARMRESET:
+ orderly_reboot();
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * scmi_userspace_notifier - Notifier callback to act on SystemPower
+ * Notifications
+ * @nb: Reference to the related notifier block
+ * @event: The SystemPower notification event id
+ * @data: The SystemPower event report
+ *
+ * This callback is in charge of decoding the received SystemPower report
+ * and act accordingly triggering a graceful or forceful system transition.
+ *
+ * Note that once a valid SCMI SystemPower event starts being served, any
+ * other following SystemPower notification received from the same SCMI
+ * instance (handle) will be ignored.
+ *
+ * Return: NOTIFY_OK once a valid SystemPower event has been successfully
+ * processed.
+ */
+static int scmi_userspace_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct scmi_system_power_state_notifier_report *er = data;
+ struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb);
+
+ if (er->system_state >= SCMI_SYSTEM_POWERUP) {
+ dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n",
+ er->system_state);
+ return NOTIFY_DONE;
+ }
+
+ if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) {
+ dev_err(sc->dev, "Ignoring forceful notification.\n");
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * Bail out if system is already shutting down or an SCMI SystemPower
+ * requested is already being served.
+ */
+ if (system_state > SYSTEM_RUNNING)
+ return NOTIFY_DONE;
+ mutex_lock(&sc->state_mtx);
+ if (sc->state != SCMI_SYSPOWER_IDLE) {
+ dev_dbg(sc->dev,
+ "Transition already in progress...ignore.\n");
+ mutex_unlock(&sc->state_mtx);
+ return NOTIFY_DONE;
+ }
+ sc->state = SCMI_SYSPOWER_IN_PROGRESS;
+ mutex_unlock(&sc->state_mtx);
+
+ sc->required_transition = er->system_state;
+
+ /* Leaving a trace in logs of who triggered the shutdown/reboot. */
+ dev_info(sc->dev, "Serving shutdown/reboot request: %d\n",
+ sc->required_transition);
+
+ scmi_request_graceful_transition(sc, er->timeout);
+
+ return NOTIFY_OK;
+}
+
+static int scmi_syspower_probe(struct scmi_device *sdev)
+{
+ int ret;
+ struct scmi_syspower_conf *sc;
+ struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM);
+ if (ret)
+ return ret;
+
+ sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return -ENOMEM;
+
+ sc->state = SCMI_SYSPOWER_IDLE;
+ mutex_init(&sc->state_mtx);
+ sc->required_transition = SCMI_SYSTEM_MAX;
+ sc->userspace_nb.notifier_call = &scmi_userspace_notifier;
+ sc->dev = &sdev->dev;
+
+ return handle->notify_ops->devm_event_notifier_register(sdev,
+ SCMI_PROTOCOL_SYSTEM,
+ SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
+ NULL, &sc->userspace_nb);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_SYSTEM, "syspower" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_system_power_driver = {
+ .name = "scmi-system-power",
+ .probe = scmi_syspower_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_system_power_driver);
+
+MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index 220e399118ad..9383d7584539 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -27,10 +27,12 @@ struct scmi_system_power_state_notifier_payld {
__le32 agent_id;
__le32 flags;
__le32 system_state;
+ __le32 timeout;
};
struct scmi_system_info {
u32 version;
+ bool graceful_timeout_supported;
};
static int scmi_system_request_notify(const struct scmi_protocol_handle *ph,
@@ -72,17 +74,27 @@ scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
+ size_t expected_sz;
const struct scmi_system_power_state_notifier_payld *p = payld;
struct scmi_system_power_state_notifier_report *r = report;
+ struct scmi_system_info *pinfo = ph->get_priv(ph);
+ expected_sz = pinfo->graceful_timeout_supported ?
+ sizeof(*p) : sizeof(*p) - sizeof(__le32);
if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER ||
- sizeof(*p) != payld_sz)
+ payld_sz != expected_sz)
return NULL;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->flags = le32_to_cpu(p->flags);
r->system_state = le32_to_cpu(p->system_state);
+ if (pinfo->graceful_timeout_supported &&
+ r->system_state == SCMI_SYSTEM_SHUTDOWN &&
+ SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags))
+ r->timeout = le32_to_cpu(p->timeout);
+ else
+ r->timeout = 0x00;
*src_id = 0;
return r;
@@ -129,6 +141,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
return -ENOMEM;
pinfo->version = version;
+ if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2)
+ pinfo->graceful_timeout_supported = true;
+
return ph->set_priv(ph, pinfo);
}
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index ddf0b9ff9e15..435d0e2658a4 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
info->firmware_version = le32_to_cpu(caps.platform_version);
}
/* Ignore error if not implemented */
- if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+ if (info->is_legacy && ret == -EOPNOTSUPP)
return 0;
return ret;
@@ -913,13 +913,14 @@ static int scpi_probe(struct platform_device *pdev)
struct resource res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ struct scpi_drvinfo *scpi_drvinfo;
- scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL);
- if (!scpi_info)
+ scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
+ if (!scpi_drvinfo)
return -ENOMEM;
if (of_match_device(legacy_scpi_of_match, &pdev->dev))
- scpi_info->is_legacy = true;
+ scpi_drvinfo->is_legacy = true;
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
@@ -927,19 +928,19 @@ static int scpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
- GFP_KERNEL);
- if (!scpi_info->channels)
+ scpi_drvinfo->channels =
+ devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
+ if (!scpi_drvinfo->channels)
return -ENOMEM;
- ret = devm_add_action(dev, scpi_free_channels, scpi_info);
+ ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
if (ret)
return ret;
- for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
+ for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
resource_size_t size;
- int idx = scpi_info->num_chans;
- struct scpi_chan *pchan = scpi_info->channels + idx;
+ int idx = scpi_drvinfo->num_chans;
+ struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
@@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev)
return ret;
}
- scpi_info->commands = scpi_std_commands;
+ scpi_drvinfo->commands = scpi_std_commands;
- platform_set_drvdata(pdev, scpi_info);
+ platform_set_drvdata(pdev, scpi_drvinfo);
- if (scpi_info->is_legacy) {
+ if (scpi_drvinfo->is_legacy) {
/* Replace with legacy variants */
scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
- scpi_info->commands = scpi_legacy_commands;
+ scpi_drvinfo->commands = scpi_legacy_commands;
/* Fill priority bitmap */
for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
set_bit(legacy_hpriority_cmds[idx],
- scpi_info->cmd_priority);
+ scpi_drvinfo->cmd_priority);
}
- ret = scpi_init_versions(scpi_info);
+ scpi_info = scpi_drvinfo;
+
+ ret = scpi_init_versions(scpi_drvinfo);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
+ scpi_info = NULL;
return ret;
}
- if (scpi_info->is_legacy && !scpi_info->protocol_version &&
- !scpi_info->firmware_version)
+ if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
+ !scpi_drvinfo->firmware_version)
dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
else
dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
FIELD_GET(PROTO_REV_MAJOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(FW_REV_MAJOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK,
- scpi_info->firmware_version));
- scpi_info->scpi_ops = &scpi_ops;
+ scpi_drvinfo->firmware_version));
+
+ scpi_drvinfo->scpi_ops = &scpi_ops;
- return devm_of_platform_populate(dev);
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ scpi_info = NULL;
+
+ return ret;
}
static const struct of_device_id scpi_of_match[] = {
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 7dad6f57d970..81cc3d0f6eec 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -2725,6 +2725,9 @@ void cs_dsp_stop(struct cs_dsp *dsp)
mutex_lock(&dsp->pwr_lock);
+ if (dsp->client_ops->pre_stop)
+ dsp->client_ops->pre_stop(dsp);
+
dsp->running = false;
if (dsp->ops->stop_core)
@@ -3177,6 +3180,110 @@ static const struct cs_dsp_ops cs_dsp_halo_ops = {
.stop_core = cs_dsp_halo_stop_core,
};
+/**
+ * cs_dsp_chunk_write() - Format data to a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to write
+ * @val: Value to write
+ *
+ * This function sequentially writes values into the format required for DSP
+ * memory, it handles both inserting of the padding bytes and converting to
+ * big endian. Note that data is only committed to the chunk when a whole DSP
+ * words worth of data is available.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
+{
+ int nwrite, i;
+
+ nwrite = min(CS_DSP_DATA_WORD_BITS - ch->cachebits, nbits);
+
+ ch->cache <<= nwrite;
+ ch->cache |= val >> (nbits - nwrite);
+ ch->cachebits += nwrite;
+ nbits -= nwrite;
+
+ if (ch->cachebits == CS_DSP_DATA_WORD_BITS) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache &= 0xFFFFFF;
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ *ch->data++ = (ch->cache & 0xFF000000) >> CS_DSP_DATA_WORD_BITS;
+
+ ch->bytes += sizeof(ch->cache);
+ ch->cachebits = 0;
+ }
+
+ if (nbits)
+ return cs_dsp_chunk_write(ch, nbits, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_write);
+
+/**
+ * cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * As cs_dsp_chunk_write only writes data when a whole DSP word is ready to
+ * be written out it is possible that some data will remain in the cache, this
+ * function will pad that data with zeros upto a whole DSP word and write out.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
+{
+ if (!ch->cachebits)
+ return 0;
+
+ return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_flush);
+
+/**
+ * cs_dsp_chunk_read() - Parse data from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to read
+ *
+ * This function sequentially reads values from a DSP memory formatted buffer,
+ * it handles both removing of the padding bytes and converting from big endian.
+ *
+ * Return: A negative number is returned on error, otherwise the read value.
+ */
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
+{
+ int nread, i;
+ u32 result;
+
+ if (!ch->cachebits) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache = 0;
+ ch->cachebits = CS_DSP_DATA_WORD_BITS;
+
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ ch->cache |= *ch->data++;
+
+ ch->bytes += sizeof(ch->cache);
+ }
+
+ nread = min(ch->cachebits, nbits);
+ nbits -= nread;
+
+ result = ch->cache >> ((sizeof(ch->cache) * BITS_PER_BYTE) - nread);
+ ch->cache <<= nread;
+ ch->cachebits -= nread;
+
+ if (nbits)
+ result = (result << nbits) | cs_dsp_chunk_read(ch, nbits);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_read);
+
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 7aa4717cdcac..6cb7384ad2ac 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -2,18 +2,6 @@
menu "EFI (Extensible Firmware Interface) Support"
depends on EFI
-config EFI_VARS
- tristate "EFI Variable Support via sysfs"
- depends on EFI && (X86 || IA64)
- default n
- help
- If you say Y here, you are able to get EFI (Extensible Firmware
- Interface) variable information via sysfs. You may read,
- write, create, and destroy EFI variables through this interface.
- Note that this driver is only retained for compatibility with
- legacy users: new users should use the efivarfs filesystem
- instead.
-
config EFI_ESRT
bool
depends on EFI && !IA64
@@ -22,6 +10,7 @@ config EFI_ESRT
config EFI_VARS_PSTORE
tristate "Register efivars backend for pstore"
depends on PSTORE
+ select UCS2_STRING
default y
help
Say Y here to enable use efivars as a backend to pstore. This
@@ -145,6 +134,7 @@ config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
+ select UCS2_STRING
default n
help
This module installs a reboot hook, such that if reboot() is
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index c02ff25dd477..8d151e332584 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -17,7 +17,6 @@ ifneq ($(CONFIG_EFI_CAPSULE_LOADER),)
obj-$(CONFIG_EFI) += capsule.o
endif
obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o
-obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index b2c829e95bd1..3928dbff76d0 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -240,6 +240,7 @@ void __init efi_init(void)
* And now, memblock is fully populated, it is time to do capping.
*/
early_init_dt_check_for_usable_mem_range();
+ efi_find_mirror();
efi_esrt_init();
efi_mokvar_table_init();
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 7e771c56c13c..3bddc152fcd4 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -6,6 +6,8 @@
#include <linux/slab.h>
#include <linux/ucs2_string.h>
+MODULE_IMPORT_NS(EFIVAR);
+
#define DUMP_NAME_LEN 66
#define EFIVARS_DATA_SIZE_MAX 1024
@@ -20,18 +22,25 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS)
-static LIST_HEAD(efi_pstore_list);
-static DECLARE_WORK(efivar_work, NULL);
-
static int efi_pstore_open(struct pstore_info *psi)
{
- psi->data = NULL;
+ int err;
+
+ err = efivar_lock();
+ if (err)
+ return err;
+
+ psi->data = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
+ if (!psi->data)
+ return -ENOMEM;
+
return 0;
}
static int efi_pstore_close(struct pstore_info *psi)
{
- psi->data = NULL;
+ efivar_unlock();
+ kfree(psi->data);
return 0;
}
@@ -40,22 +49,17 @@ static inline u64 generic_id(u64 timestamp, unsigned int part, int count)
return (timestamp * 100 + part) * 1000 + count;
}
-static int efi_pstore_read_func(struct efivar_entry *entry,
- struct pstore_record *record)
+static int efi_pstore_read_func(struct pstore_record *record,
+ efi_char16_t *varname)
{
- efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ unsigned long wlen, size = EFIVARS_DATA_SIZE_MAX;
char name[DUMP_NAME_LEN], data_type;
- int i;
+ efi_status_t status;
int cnt;
unsigned int part;
- unsigned long size;
u64 time;
- if (efi_guidcmp(entry->var.VendorGuid, vendor))
- return 0;
-
- for (i = 0; i < DUMP_NAME_LEN; i++)
- name[i] = entry->var.VariableName[i];
+ ucs2_as_utf8(name, varname, DUMP_NAME_LEN);
if (sscanf(name, "dump-type%u-%u-%d-%llu-%c",
&record->type, &part, &cnt, &time, &data_type) == 5) {
@@ -95,161 +99,75 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
} else
return 0;
- entry->var.DataSize = 1024;
- __efivar_entry_get(entry, &entry->var.Attributes,
- &entry->var.DataSize, entry->var.Data);
- size = entry->var.DataSize;
- memcpy(record->buf, entry->var.Data,
- (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size));
-
- return size;
-}
-
-/**
- * efi_pstore_scan_sysfs_enter
- * @pos: scanning entry
- * @next: next entry
- * @head: list head
- */
-static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos,
- struct efivar_entry *next,
- struct list_head *head)
-{
- pos->scanning = true;
- if (&next->list != head)
- next->scanning = true;
-}
-
-/**
- * __efi_pstore_scan_sysfs_exit
- * @entry: deleting entry
- * @turn_off_scanning: Check if a scanning flag should be turned off
- */
-static inline int __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry,
- bool turn_off_scanning)
-{
- if (entry->deleting) {
- list_del(&entry->list);
- efivar_entry_iter_end();
- kfree(entry);
- if (efivar_entry_iter_begin())
- return -EINTR;
- } else if (turn_off_scanning)
- entry->scanning = false;
-
- return 0;
-}
-
-/**
- * efi_pstore_scan_sysfs_exit
- * @pos: scanning entry
- * @next: next entry
- * @head: list head
- * @stop: a flag checking if scanning will stop
- */
-static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
- struct efivar_entry *next,
- struct list_head *head, bool stop)
-{
- int ret = __efi_pstore_scan_sysfs_exit(pos, true);
-
- if (ret)
- return ret;
-
- if (stop)
- ret = __efi_pstore_scan_sysfs_exit(next, &next->list != head);
- return ret;
-}
+ record->buf = kmalloc(size, GFP_KERNEL);
+ if (!record->buf)
+ return -ENOMEM;
-/**
- * efi_pstore_sysfs_entry_iter
- *
- * @record: pstore record to pass to callback
- *
- * You MUST call efivar_entry_iter_begin() before this function, and
- * efivar_entry_iter_end() afterwards.
- *
- */
-static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
-{
- struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
- struct efivar_entry *entry, *n;
- struct list_head *head = &efi_pstore_list;
- int size = 0;
- int ret;
-
- if (!*pos) {
- list_for_each_entry_safe(entry, n, head, list) {
- efi_pstore_scan_sysfs_enter(entry, n, head);
-
- size = efi_pstore_read_func(entry, record);
- ret = efi_pstore_scan_sysfs_exit(entry, n, head,
- size < 0);
- if (ret)
- return ret;
- if (size)
- break;
- }
- *pos = n;
- return size;
+ status = efivar_get_variable(varname, &LINUX_EFI_CRASH_GUID, NULL,
+ &size, record->buf);
+ if (status != EFI_SUCCESS) {
+ kfree(record->buf);
+ return -EIO;
}
- list_for_each_entry_safe_from((*pos), n, head, list) {
- efi_pstore_scan_sysfs_enter((*pos), n, head);
-
- size = efi_pstore_read_func((*pos), record);
- ret = efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0);
- if (ret)
- return ret;
- if (size)
- break;
+ /*
+ * Store the name of the variable in the pstore_record priv field, so
+ * we can reuse it later if we need to delete the EFI variable from the
+ * variable store.
+ */
+ wlen = (ucs2_strnlen(varname, DUMP_NAME_LEN) + 1) * sizeof(efi_char16_t);
+ record->priv = kmemdup(varname, wlen, GFP_KERNEL);
+ if (!record->priv) {
+ kfree(record->buf);
+ return -ENOMEM;
}
- *pos = n;
+
return size;
}
-/**
- * efi_pstore_read
- *
- * This function returns a size of NVRAM entry logged via efi_pstore_write().
- * The meaning and behavior of efi_pstore/pstore are as below.
- *
- * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
- * and pstore filesystem will continue reading subsequent entries.
- * size == 0: Entry was not logged via efi_pstore_write(),
- * and efi_pstore driver will continue reading subsequent entries.
- * size < 0: Failed to get data of entry logging via efi_pstore_write(),
- * and pstore will stop reading entry.
- */
static ssize_t efi_pstore_read(struct pstore_record *record)
{
- ssize_t size;
+ efi_char16_t *varname = record->psi->data;
+ efi_guid_t guid = LINUX_EFI_CRASH_GUID;
+ unsigned long varname_size;
+ efi_status_t status;
- record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
- if (!record->buf)
- return -ENOMEM;
+ for (;;) {
+ varname_size = EFIVARS_DATA_SIZE_MAX;
- if (efivar_entry_iter_begin()) {
- size = -EINTR;
- goto out;
- }
- size = efi_pstore_sysfs_entry_iter(record);
- efivar_entry_iter_end();
+ /*
+ * If this is the first read() call in the pstore enumeration,
+ * varname will be the empty string, and the GetNextVariable()
+ * runtime service call will return the first EFI variable in
+ * its own enumeration order, ignoring the guid argument.
+ *
+ * Subsequent calls to GetNextVariable() must pass the name and
+ * guid values returned by the previous call, which is why we
+ * store varname in record->psi->data. Given that we only
+ * enumerate variables with the efi-pstore GUID, there is no
+ * need to record the guid return value.
+ */
+ status = efivar_get_next_variable(&varname_size, varname, &guid);
+ if (status == EFI_NOT_FOUND)
+ return 0;
-out:
- if (size <= 0) {
- kfree(record->buf);
- record->buf = NULL;
+ if (status != EFI_SUCCESS)
+ return -EIO;
+
+ /* skip variables that don't concern us */
+ if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID))
+ continue;
+
+ return efi_pstore_read_func(record, varname);
}
- return size;
}
static int efi_pstore_write(struct pstore_record *record)
{
char name[DUMP_NAME_LEN];
efi_char16_t efi_name[DUMP_NAME_LEN];
- efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
- int i, ret = 0;
+ efi_status_t status;
+ int i;
record->id = generic_id(record->time.tv_sec, record->part,
record->count);
@@ -265,88 +183,26 @@ static int efi_pstore_write(struct pstore_record *record)
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = name[i];
- ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
- false, record->size, record->psi->buf);
-
- if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE))
- if (!schedule_work(&efivar_work))
- module_put(THIS_MODULE);
-
- return ret;
+ if (efivar_trylock())
+ return -EBUSY;
+ status = efivar_set_variable_locked(efi_name, &LINUX_EFI_CRASH_GUID,
+ PSTORE_EFI_ATTRIBUTES,
+ record->size, record->psi->buf,
+ true);
+ efivar_unlock();
+ return status == EFI_SUCCESS ? 0 : -EIO;
};
-/*
- * Clean up an entry with the same name
- */
-static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
-{
- efi_char16_t *efi_name = data;
- efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
- unsigned long ucs2_len = ucs2_strlen(efi_name);
-
- if (efi_guidcmp(entry->var.VendorGuid, vendor))
- return 0;
-
- if (ucs2_strncmp(entry->var.VariableName, efi_name, (size_t)ucs2_len))
- return 0;
-
- if (entry->scanning) {
- /*
- * Skip deletion because this entry will be deleted
- * after scanning is completed.
- */
- entry->deleting = true;
- } else
- list_del(&entry->list);
-
- /* found */
- __efivar_entry_delete(entry);
-
- return 1;
-}
-
-static int efi_pstore_erase_name(const char *name)
-{
- struct efivar_entry *entry = NULL;
- efi_char16_t efi_name[DUMP_NAME_LEN];
- int found, i;
-
- for (i = 0; i < DUMP_NAME_LEN; i++) {
- efi_name[i] = name[i];
- if (name[i] == '\0')
- break;
- }
-
- if (efivar_entry_iter_begin())
- return -EINTR;
-
- found = __efivar_entry_iter(efi_pstore_erase_func, &efi_pstore_list,
- efi_name, &entry);
- efivar_entry_iter_end();
-
- if (found && !entry->scanning)
- kfree(entry);
-
- return found ? 0 : -ENOENT;
-}
-
static int efi_pstore_erase(struct pstore_record *record)
{
- char name[DUMP_NAME_LEN];
- int ret;
-
- snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld",
- record->type, record->part, record->count,
- (long long)record->time.tv_sec);
- ret = efi_pstore_erase_name(name);
- if (ret != -ENOENT)
- return ret;
+ efi_status_t status;
- snprintf(name, sizeof(name), "dump-type%u-%u-%lld",
- record->type, record->part, (long long)record->time.tv_sec);
- ret = efi_pstore_erase_name(name);
+ status = efivar_set_variable(record->priv, &LINUX_EFI_CRASH_GUID,
+ PSTORE_EFI_ATTRIBUTES, 0, NULL);
- return ret;
+ if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
+ return -EIO;
+ return 0;
}
static struct pstore_info efi_pstore_info = {
@@ -360,77 +216,14 @@ static struct pstore_info efi_pstore_info = {
.erase = efi_pstore_erase,
};
-static int efi_pstore_callback(efi_char16_t *name, efi_guid_t vendor,
- unsigned long name_size, void *data)
-{
- struct efivar_entry *entry;
- int ret;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- memcpy(entry->var.VariableName, name, name_size);
- entry->var.VendorGuid = vendor;
-
- ret = efivar_entry_add(entry, &efi_pstore_list);
- if (ret)
- kfree(entry);
-
- return ret;
-}
-
-static int efi_pstore_update_entry(efi_char16_t *name, efi_guid_t vendor,
- unsigned long name_size, void *data)
-{
- struct efivar_entry *entry = data;
-
- if (efivar_entry_find(name, vendor, &efi_pstore_list, false))
- return 0;
-
- memcpy(entry->var.VariableName, name, name_size);
- memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
-
- return 1;
-}
-
-static void efi_pstore_update_entries(struct work_struct *work)
-{
- struct efivar_entry *entry;
- int err;
-
- /* Add new sysfs entries */
- while (1) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return;
-
- err = efivar_init(efi_pstore_update_entry, entry,
- false, &efi_pstore_list);
- if (!err)
- break;
-
- efivar_entry_add(entry, &efi_pstore_list);
- }
-
- kfree(entry);
- module_put(THIS_MODULE);
-}
-
static __init int efivars_pstore_init(void)
{
- int ret;
-
- if (!efivars_kobject() || !efivar_supports_writes())
+ if (!efivar_supports_writes())
return 0;
if (efivars_pstore_disable)
return 0;
- ret = efivar_init(efi_pstore_callback, NULL, true, &efi_pstore_list);
- if (ret)
- return ret;
-
efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
if (!efi_pstore_info.buf)
return -ENOMEM;
@@ -443,8 +236,6 @@ static __init int efivars_pstore_init(void)
efi_pstore_info.bufsize = 0;
}
- INIT_WORK(&efivar_work, efi_pstore_update_entries);
-
return 0;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 860534bcfdac..e4080ad96089 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -202,7 +202,7 @@ static void generic_ops_unregister(void)
}
#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
-#define EFIVAR_SSDT_NAME_MAX 16
+#define EFIVAR_SSDT_NAME_MAX 16UL
static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
static int __init efivar_ssdt_setup(char *str)
{
@@ -219,83 +219,62 @@ static int __init efivar_ssdt_setup(char *str)
}
__setup("efivar_ssdt=", efivar_ssdt_setup);
-static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
- unsigned long name_size, void *data)
-{
- struct efivar_entry *entry;
- struct list_head *list = data;
- char utf8_name[EFIVAR_SSDT_NAME_MAX];
- int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
-
- ucs2_as_utf8(utf8_name, name, limit - 1);
- if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
- return 0;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return 0;
-
- memcpy(entry->var.VariableName, name, name_size);
- memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
-
- efivar_entry_add(entry, list);
-
- return 0;
-}
-
static __init int efivar_ssdt_load(void)
{
- LIST_HEAD(entries);
- struct efivar_entry *entry, *aux;
- unsigned long size;
- void *data;
- int ret;
+ unsigned long name_size = 256;
+ efi_char16_t *name = NULL;
+ efi_status_t status;
+ efi_guid_t guid;
if (!efivar_ssdt[0])
return 0;
- ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
-
- list_for_each_entry_safe(entry, aux, &entries, list) {
- pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
- &entry->var.VendorGuid);
+ name = kzalloc(name_size, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
- list_del(&entry->list);
+ for (;;) {
+ char utf8_name[EFIVAR_SSDT_NAME_MAX];
+ unsigned long data_size = 0;
+ void *data;
+ int limit;
- ret = efivar_entry_size(entry, &size);
- if (ret) {
- pr_err("failed to get var size\n");
- goto free_entry;
+ status = efi.get_next_variable(&name_size, name, &guid);
+ if (status == EFI_NOT_FOUND) {
+ break;
+ } else if (status == EFI_BUFFER_TOO_SMALL) {
+ name = krealloc(name, name_size, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ continue;
}
- data = kmalloc(size, GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto free_entry;
- }
+ limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
+ ucs2_as_utf8(utf8_name, name, limit - 1);
+ if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
+ continue;
- ret = efivar_entry_get(entry, NULL, &size, data);
- if (ret) {
- pr_err("failed to get var data\n");
- goto free_data;
- }
+ pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
- ret = acpi_load_table(data, NULL);
- if (ret) {
- pr_err("failed to load table: %d\n", ret);
- goto free_data;
- }
+ status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL || !data_size)
+ return -EIO;
- goto free_entry;
+ data = kmalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
-free_data:
+ status = efi.get_variable(name, &guid, NULL, &data_size, data);
+ if (status == EFI_SUCCESS) {
+ acpi_status ret = acpi_load_table(data, NULL);
+ if (ret)
+ pr_err("failed to load table: %u\n", ret);
+ } else {
+ pr_err("failed to get var data: 0x%lx\n", status);
+ }
kfree(data);
-
-free_entry:
- kfree(entry);
}
-
- return ret;
+ return 0;
}
#else
static inline int efivar_ssdt_load(void) { return 0; }
@@ -446,6 +425,29 @@ err_put:
subsys_initcall(efisubsys_init);
+void __init efi_find_mirror(void)
+{
+ efi_memory_desc_t *md;
+ u64 mirror_size = 0, total_size = 0;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+ total_size += size;
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
+ memblock_mark_mirror(start, size);
+ mirror_size += size;
+ }
+ }
+ if (mirror_size)
+ pr_info("Memory: %lldM/%lldM mirrored memory\n",
+ mirror_size>>20, total_size>>20);
+}
+
/*
* Find the efi memory descriptor for a given physical address. Given a
* physical address, determine if it exists within an EFI Memory Map entry,
@@ -897,6 +899,7 @@ int efi_status_to_err(efi_status_t status)
return err;
}
+EXPORT_SYMBOL_GPL(efi_status_to_err);
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 15a47539dc56..8ced7af8e56d 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -10,69 +10,51 @@
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/ucs2_string.h>
-static void efibc_str_to_str16(const char *str, efi_char16_t *str16)
-{
- size_t i;
-
- for (i = 0; i < strlen(str); i++)
- str16[i] = str[i];
-
- str16[i] = '\0';
-}
+#define MAX_DATA_LEN 512
-static int efibc_set_variable(const char *name, const char *value)
+static int efibc_set_variable(efi_char16_t *name, efi_char16_t *value,
+ unsigned long len)
{
- int ret;
- efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID;
- struct efivar_entry *entry;
- size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
+ efi_status_t status;
- if (size > sizeof(entry->var.Data)) {
- pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name);
- return -EINVAL;
- }
+ status = efi.set_variable(name, &LINUX_EFI_LOADER_ENTRY_GUID,
+ EFI_VARIABLE_NON_VOLATILE
+ | EFI_VARIABLE_BOOTSERVICE_ACCESS
+ | EFI_VARIABLE_RUNTIME_ACCESS,
+ len * sizeof(efi_char16_t), value);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name);
- return -ENOMEM;
+ if (status != EFI_SUCCESS) {
+ pr_err("failed to set EFI variable: 0x%lx\n", status);
+ return -EIO;
}
-
- efibc_str_to_str16(name, entry->var.VariableName);
- efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
- memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
-
- ret = efivar_entry_set_safe(entry->var.VariableName,
- entry->var.VendorGuid,
- EFI_VARIABLE_NON_VOLATILE
- | EFI_VARIABLE_BOOTSERVICE_ACCESS
- | EFI_VARIABLE_RUNTIME_ACCESS,
- false, size, entry->var.Data);
-
- if (ret)
- pr_err("failed to set %s EFI variable: 0x%x\n",
- name, ret);
-
- kfree(entry);
- return ret;
+ return 0;
}
static int efibc_reboot_notifier_call(struct notifier_block *notifier,
unsigned long event, void *data)
{
- const char *reason = "shutdown";
+ efi_char16_t *reason = event == SYS_RESTART ? L"reboot"
+ : L"shutdown";
+ const u8 *str = data;
+ efi_char16_t *wdata;
+ unsigned long l;
int ret;
- if (event == SYS_RESTART)
- reason = "reboot";
-
- ret = efibc_set_variable("LoaderEntryRebootReason", reason);
+ ret = efibc_set_variable(L"LoaderEntryRebootReason", reason,
+ ucs2_strlen(reason));
if (ret || !data)
return NOTIFY_DONE;
- efibc_set_variable("LoaderEntryOneShot", (char *)data);
+ wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++)
+ wdata[l] = str[l];
+ wdata[l] = L'\0';
+
+ efibc_set_variable(L"LoaderEntryOneShot", wdata, l);
+ kfree(wdata);
return NOTIFY_DONE;
}
@@ -84,7 +66,7 @@ static int __init efibc_init(void)
{
int ret;
- if (!efivars_kobject() || !efivar_supports_writes())
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE))
return -ENODEV;
ret = register_reboot_notifier(&efibc_reboot_notifier);
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
deleted file mode 100644
index ea0bc39dc965..000000000000
--- a/drivers/firmware/efi/efivars.c
+++ /dev/null
@@ -1,671 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Originally from efivars.c,
- *
- * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
- * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
- *
- * This code takes all variables accessible from EFI runtime and
- * exports them via sysfs
- */
-
-#include <linux/efi.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/ucs2_string.h>
-#include <linux/compat.h>
-
-#define EFIVARS_VERSION "0.08"
-#define EFIVARS_DATE "2004-May-17"
-
-MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
-MODULE_DESCRIPTION("sysfs interface to EFI Variables");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(EFIVARS_VERSION);
-
-static LIST_HEAD(efivar_sysfs_list);
-
-static struct kset *efivars_kset;
-
-static struct bin_attribute *efivars_new_var;
-static struct bin_attribute *efivars_del_var;
-
-struct compat_efi_variable {
- efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
- efi_guid_t VendorGuid;
- __u32 DataSize;
- __u8 Data[1024];
- __u32 Status;
- __u32 Attributes;
-} __packed;
-
-struct efivar_attribute {
- struct attribute attr;
- ssize_t (*show) (struct efivar_entry *entry, char *buf);
- ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
-};
-
-#define EFIVAR_ATTR(_name, _mode, _show, _store) \
-struct efivar_attribute efivar_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode}, \
- .show = _show, \
- .store = _store, \
-};
-
-#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
-#define to_efivar_entry(obj) container_of(obj, struct efivar_entry, kobj)
-
-/*
- * Prototype for sysfs creation function
- */
-static int
-efivar_create_sysfs_entry(struct efivar_entry *new_var);
-
-static ssize_t
-efivar_guid_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- char *str = buf;
-
- if (!entry || !buf)
- return 0;
-
- efi_guid_to_str(&var->VendorGuid, str);
- str += strlen(str);
- str += sprintf(str, "\n");
-
- return str - buf;
-}
-
-static ssize_t
-efivar_attr_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- unsigned long size = sizeof(var->Data);
- char *str = buf;
- int ret;
-
- if (!entry || !buf)
- return -EINVAL;
-
- ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
- var->DataSize = size;
- if (ret)
- return -EIO;
-
- if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
- str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
- if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
- str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
- if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
- str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
- if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
- str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
- if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
- str += sprintf(str,
- "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
- if (var->Attributes &
- EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
- str += sprintf(str,
- "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
- if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
- str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
- return str - buf;
-}
-
-static ssize_t
-efivar_size_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- unsigned long size = sizeof(var->Data);
- char *str = buf;
- int ret;
-
- if (!entry || !buf)
- return -EINVAL;
-
- ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
- var->DataSize = size;
- if (ret)
- return -EIO;
-
- str += sprintf(str, "0x%lx\n", var->DataSize);
- return str - buf;
-}
-
-static ssize_t
-efivar_data_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- unsigned long size = sizeof(var->Data);
- int ret;
-
- if (!entry || !buf)
- return -EINVAL;
-
- ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
- var->DataSize = size;
- if (ret)
- return -EIO;
-
- memcpy(buf, var->Data, var->DataSize);
- return var->DataSize;
-}
-
-static inline int
-sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
- unsigned long size, u32 attributes, u8 *data)
-{
- /*
- * If only updating the variable data, then the name
- * and guid should remain the same
- */
- if (memcmp(name, var->VariableName, sizeof(var->VariableName)) ||
- efi_guidcmp(vendor, var->VendorGuid)) {
- printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n");
- return -EINVAL;
- }
-
- if ((size <= 0) || (attributes == 0)){
- printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n");
- return -EINVAL;
- }
-
- if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
- efivar_validate(vendor, name, data, size) == false) {
- printk(KERN_ERR "efivars: Malformed variable content\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void
-copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src)
-{
- memcpy(dst->VariableName, src->VariableName, EFI_VAR_NAME_LEN);
- memcpy(dst->Data, src->Data, sizeof(src->Data));
-
- dst->VendorGuid = src->VendorGuid;
- dst->DataSize = src->DataSize;
- dst->Attributes = src->Attributes;
-}
-
-/*
- * We allow each variable to be edited via rewriting the
- * entire efi variable structure.
- */
-static ssize_t
-efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
-{
- struct efi_variable *new_var, *var = &entry->var;
- efi_char16_t *name;
- unsigned long size;
- efi_guid_t vendor;
- u32 attributes;
- u8 *data;
- int err;
-
- if (!entry || !buf)
- return -EINVAL;
-
- if (in_compat_syscall()) {
- struct compat_efi_variable *compat;
-
- if (count != sizeof(*compat))
- return -EINVAL;
-
- compat = (struct compat_efi_variable *)buf;
- attributes = compat->Attributes;
- vendor = compat->VendorGuid;
- name = compat->VariableName;
- size = compat->DataSize;
- data = compat->Data;
-
- err = sanity_check(var, name, vendor, size, attributes, data);
- if (err)
- return err;
-
- copy_out_compat(&entry->var, compat);
- } else {
- if (count != sizeof(struct efi_variable))
- return -EINVAL;
-
- new_var = (struct efi_variable *)buf;
-
- attributes = new_var->Attributes;
- vendor = new_var->VendorGuid;
- name = new_var->VariableName;
- size = new_var->DataSize;
- data = new_var->Data;
-
- err = sanity_check(var, name, vendor, size, attributes, data);
- if (err)
- return err;
-
- memcpy(&entry->var, new_var, count);
- }
-
- err = efivar_entry_set(entry, attributes, size, data, NULL);
- if (err) {
- printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err);
- return -EIO;
- }
-
- return count;
-}
-
-static ssize_t
-efivar_show_raw(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- struct compat_efi_variable *compat;
- unsigned long datasize = sizeof(var->Data);
- size_t size;
- int ret;
-
- if (!entry || !buf)
- return 0;
-
- ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
- var->DataSize = datasize;
- if (ret)
- return -EIO;
-
- if (in_compat_syscall()) {
- compat = (struct compat_efi_variable *)buf;
-
- size = sizeof(*compat);
- memcpy(compat->VariableName, var->VariableName,
- EFI_VAR_NAME_LEN);
- memcpy(compat->Data, var->Data, sizeof(compat->Data));
-
- compat->VendorGuid = var->VendorGuid;
- compat->DataSize = var->DataSize;
- compat->Attributes = var->Attributes;
- } else {
- size = sizeof(*var);
- memcpy(buf, var, size);
- }
-
- return size;
-}
-
-/*
- * Generic read/write functions that call the specific functions of
- * the attributes...
- */
-static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct efivar_entry *var = to_efivar_entry(kobj);
- struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
- ssize_t ret = -EIO;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (efivar_attr->show) {
- ret = efivar_attr->show(var, buf);
- }
- return ret;
-}
-
-static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct efivar_entry *var = to_efivar_entry(kobj);
- struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
- ssize_t ret = -EIO;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (efivar_attr->store)
- ret = efivar_attr->store(var, buf, count);
-
- return ret;
-}
-
-static const struct sysfs_ops efivar_attr_ops = {
- .show = efivar_attr_show,
- .store = efivar_attr_store,
-};
-
-static void efivar_release(struct kobject *kobj)
-{
- struct efivar_entry *var = to_efivar_entry(kobj);
- kfree(var);
-}
-
-static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL);
-static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL);
-static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL);
-static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL);
-static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw);
-
-static struct attribute *def_attrs[] = {
- &efivar_attr_guid.attr,
- &efivar_attr_size.attr,
- &efivar_attr_attributes.attr,
- &efivar_attr_data.attr,
- &efivar_attr_raw_var.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(def);
-
-static struct kobj_type efivar_ktype = {
- .release = efivar_release,
- .sysfs_ops = &efivar_attr_ops,
- .default_groups = def_groups,
-};
-
-static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct compat_efi_variable *compat = (struct compat_efi_variable *)buf;
- struct efi_variable *new_var = (struct efi_variable *)buf;
- struct efivar_entry *new_entry;
- bool need_compat = in_compat_syscall();
- efi_char16_t *name;
- unsigned long size;
- u32 attributes;
- u8 *data;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (need_compat) {
- if (count != sizeof(*compat))
- return -EINVAL;
-
- attributes = compat->Attributes;
- name = compat->VariableName;
- size = compat->DataSize;
- data = compat->Data;
- } else {
- if (count != sizeof(*new_var))
- return -EINVAL;
-
- attributes = new_var->Attributes;
- name = new_var->VariableName;
- size = new_var->DataSize;
- data = new_var->Data;
- }
-
- if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
- efivar_validate(new_var->VendorGuid, name, data,
- size) == false) {
- printk(KERN_ERR "efivars: Malformed variable content\n");
- return -EINVAL;
- }
-
- new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
- if (!new_entry)
- return -ENOMEM;
-
- if (need_compat)
- copy_out_compat(&new_entry->var, compat);
- else
- memcpy(&new_entry->var, new_var, sizeof(*new_var));
-
- err = efivar_entry_set(new_entry, attributes, size,
- data, &efivar_sysfs_list);
- if (err) {
- if (err == -EEXIST)
- err = -EINVAL;
- goto out;
- }
-
- if (efivar_create_sysfs_entry(new_entry)) {
- printk(KERN_WARNING "efivars: failed to create sysfs entry.\n");
- kfree(new_entry);
- }
- return count;
-
-out:
- kfree(new_entry);
- return err;
-}
-
-static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct efi_variable *del_var = (struct efi_variable *)buf;
- struct compat_efi_variable *compat;
- struct efivar_entry *entry;
- efi_char16_t *name;
- efi_guid_t vendor;
- int err = 0;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (in_compat_syscall()) {
- if (count != sizeof(*compat))
- return -EINVAL;
-
- compat = (struct compat_efi_variable *)buf;
- name = compat->VariableName;
- vendor = compat->VendorGuid;
- } else {
- if (count != sizeof(*del_var))
- return -EINVAL;
-
- name = del_var->VariableName;
- vendor = del_var->VendorGuid;
- }
-
- if (efivar_entry_iter_begin())
- return -EINTR;
- entry = efivar_entry_find(name, vendor, &efivar_sysfs_list, true);
- if (!entry)
- err = -EINVAL;
- else if (__efivar_entry_delete(entry))
- err = -EIO;
-
- if (err) {
- efivar_entry_iter_end();
- return err;
- }
-
- if (!entry->scanning) {
- efivar_entry_iter_end();
- efivar_unregister(entry);
- } else
- efivar_entry_iter_end();
-
- /* It's dead Jim.... */
- return count;
-}
-
-/**
- * efivar_create_sysfs_entry - create a new entry in sysfs
- * @new_var: efivar entry to create
- *
- * Returns 0 on success, negative error code on failure
- */
-static int
-efivar_create_sysfs_entry(struct efivar_entry *new_var)
-{
- int short_name_size;
- char *short_name;
- unsigned long utf8_name_size;
- efi_char16_t *variable_name = new_var->var.VariableName;
- int ret;
-
- /*
- * Length of the variable bytes in UTF8, plus the '-' separator,
- * plus the GUID, plus trailing NUL
- */
- utf8_name_size = ucs2_utf8size(variable_name);
- short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
-
- short_name = kmalloc(short_name_size, GFP_KERNEL);
- if (!short_name)
- return -ENOMEM;
-
- ucs2_as_utf8(short_name, variable_name, short_name_size);
-
- /* This is ugly, but necessary to separate one vendor's
- private variables from another's. */
- short_name[utf8_name_size] = '-';
- efi_guid_to_str(&new_var->var.VendorGuid,
- short_name + utf8_name_size + 1);
-
- new_var->kobj.kset = efivars_kset;
-
- ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
- NULL, "%s", short_name);
- kfree(short_name);
- if (ret) {
- kobject_put(&new_var->kobj);
- return ret;
- }
-
- kobject_uevent(&new_var->kobj, KOBJ_ADD);
- if (efivar_entry_add(new_var, &efivar_sysfs_list)) {
- efivar_unregister(new_var);
- return -EINTR;
- }
-
- return 0;
-}
-
-static int
-create_efivars_bin_attributes(void)
-{
- struct bin_attribute *attr;
- int error;
-
- /* new_var */
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr)
- return -ENOMEM;
-
- attr->attr.name = "new_var";
- attr->attr.mode = 0200;
- attr->write = efivar_create;
- efivars_new_var = attr;
-
- /* del_var */
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr) {
- error = -ENOMEM;
- goto out_free;
- }
- attr->attr.name = "del_var";
- attr->attr.mode = 0200;
- attr->write = efivar_delete;
- efivars_del_var = attr;
-
- sysfs_bin_attr_init(efivars_new_var);
- sysfs_bin_attr_init(efivars_del_var);
-
- /* Register */
- error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_new_var);
- if (error) {
- printk(KERN_ERR "efivars: unable to create new_var sysfs file"
- " due to error %d\n", error);
- goto out_free;
- }
-
- error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_del_var);
- if (error) {
- printk(KERN_ERR "efivars: unable to create del_var sysfs file"
- " due to error %d\n", error);
- sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
- goto out_free;
- }
-
- return 0;
-out_free:
- kfree(efivars_del_var);
- efivars_del_var = NULL;
- kfree(efivars_new_var);
- efivars_new_var = NULL;
- return error;
-}
-
-static int efivars_sysfs_callback(efi_char16_t *name, efi_guid_t vendor,
- unsigned long name_size, void *data)
-{
- struct efivar_entry *entry;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- memcpy(entry->var.VariableName, name, name_size);
- memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
-
- efivar_create_sysfs_entry(entry);
-
- return 0;
-}
-
-static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data)
-{
- int err = efivar_entry_remove(entry);
-
- if (err)
- return err;
- efivar_unregister(entry);
- return 0;
-}
-
-static void efivars_sysfs_exit(void)
-{
- /* Remove all entries and destroy */
- int err;
-
- err = __efivar_entry_iter(efivar_sysfs_destroy, &efivar_sysfs_list,
- NULL, NULL);
- if (err) {
- pr_err("efivars: Failed to destroy sysfs entries\n");
- return;
- }
-
- if (efivars_new_var)
- sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
- if (efivars_del_var)
- sysfs_remove_bin_file(&efivars_kset->kobj, efivars_del_var);
- kfree(efivars_new_var);
- kfree(efivars_del_var);
- kset_unregister(efivars_kset);
-}
-
-static int efivars_sysfs_init(void)
-{
- struct kobject *parent_kobj = efivars_kobject();
- int error = 0;
-
- /* No efivars has been registered yet */
- if (!parent_kobj || !efivar_supports_writes())
- return 0;
-
- printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
- EFIVARS_DATE);
-
- efivars_kset = kset_create_and_add("vars", NULL, parent_kobj);
- if (!efivars_kset) {
- printk(KERN_ERR "efivars: Subsystem registration failed.\n");
- return -ENOMEM;
- }
-
- efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list);
-
- error = create_efivars_bin_attributes();
- if (error) {
- efivars_sysfs_exit();
- return error;
- }
-
- return 0;
-}
-
-module_init(efivars_sysfs_init);
-module_exit(efivars_sysfs_exit);
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 9e85e58d1f27..b450ebf95977 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -8,6 +8,7 @@
#include <asm/efi.h>
#include <asm/sections.h>
+#include <asm/unaligned.h>
#include "efistub.h"
@@ -29,7 +30,7 @@ static int get_boot_hartid_from_fdt(void)
{
const void *fdt;
int chosen_node, len;
- const fdt32_t *prop;
+ const void *prop;
fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
@@ -40,10 +41,16 @@ static int get_boot_hartid_from_fdt(void)
return -EINVAL;
prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
- if (!prop || len != sizeof(u32))
+ if (!prop)
+ return -EINVAL;
+
+ if (len == sizeof(u32))
+ hartid = (unsigned long) fdt32_to_cpu(*(fdt32_t *)prop);
+ else if (len == sizeof(u64))
+ hartid = (unsigned long) fdt64_to_cpu(__get_unaligned_t(fdt64_t, prop));
+ else
return -EINVAL;
- hartid = fdt32_to_cpu(*prop);
return 0;
}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 4df55a55da84..6ec7970dbd40 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -59,8 +59,7 @@ static void __init efi_memmap_free(void)
* Depending on whether mm_init() has already been invoked or not,
* either memblock or "normal" page allocation is used.
*
- * Returns the physical address of the allocated memory map on
- * success, zero on failure.
+ * Returns zero on success, a negative error code on failure.
*/
int __init efi_memmap_alloc(unsigned int num_entries,
struct efi_memory_map_data *data)
@@ -245,7 +244,7 @@ int __init efi_memmap_install(struct efi_memory_map_data *data)
* @range: Address range (start, end) to split around
*
* Returns the number of additional EFI memmap entries required to
- * accomodate @range.
+ * accommodate @range.
*/
int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
{
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 73089a24f04b..ceae84c19d22 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -6,7 +6,7 @@
#include <linux/efi.h>
#include <linux/reboot.h>
-static void (*orig_pm_power_off)(void);
+static struct sys_off_handler *efi_sys_off_handler;
int efi_reboot_quirk_mode = -1;
@@ -51,15 +51,11 @@ bool __weak efi_poweroff_required(void)
return false;
}
-static void efi_power_off(void)
+static int efi_power_off(struct sys_off_data *data)
{
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
- /*
- * The above call should not return, if it does fall back to
- * the original power off method (typically ACPI poweroff).
- */
- if (orig_pm_power_off)
- orig_pm_power_off();
+
+ return NOTIFY_DONE;
}
static int __init efi_shutdown_init(void)
@@ -68,8 +64,13 @@ static int __init efi_shutdown_init(void)
return -ENODEV;
if (efi_poweroff_required()) {
- orig_pm_power_off = pm_power_off;
- pm_power_off = efi_power_off;
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ efi_sys_off_handler =
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE + 1,
+ efi_power_off, NULL);
+ if (IS_ERR(efi_sys_off_handler))
+ return PTR_ERR(efi_sys_off_handler);
}
return 0;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index cae590bd08f2..dd74d2ad3184 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -6,306 +6,24 @@
* Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
*/
-#include <linux/capability.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/efi.h>
-#include <linux/sysfs.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
#include <linux/ucs2_string.h>
/* Private pointer to registered efivars */
static struct efivars *__efivars;
-/*
- * efivars_lock protects three things:
- * 1) efivarfs_list and efivars_sysfs_list
- * 2) ->ops calls
- * 3) (un)registration of __efivars
- */
static DEFINE_SEMAPHORE(efivars_lock);
-static bool
-validate_device_path(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- struct efi_generic_dev_path *node;
- int offset = 0;
-
- node = (struct efi_generic_dev_path *)buffer;
-
- if (len < sizeof(*node))
- return false;
-
- while (offset <= len - sizeof(*node) &&
- node->length >= sizeof(*node) &&
- node->length <= len - offset) {
- offset += node->length;
-
- if ((node->type == EFI_DEV_END_PATH ||
- node->type == EFI_DEV_END_PATH2) &&
- node->sub_type == EFI_DEV_END_ENTIRE)
- return true;
-
- node = (struct efi_generic_dev_path *)(buffer + offset);
- }
-
- /*
- * If we're here then either node->length pointed past the end
- * of the buffer or we reached the end of the buffer without
- * finding a device path end node.
- */
- return false;
-}
-
-static bool
-validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- /* An array of 16-bit integers */
- if ((len % 2) != 0)
- return false;
-
- return true;
-}
-
-static bool
-validate_load_option(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- u16 filepathlength;
- int i, desclength = 0, namelen;
-
- namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN);
-
- /* Either "Boot" or "Driver" followed by four digits of hex */
- for (i = match; i < match+4; i++) {
- if (var_name[i] > 127 ||
- hex_to_bin(var_name[i] & 0xff) < 0)
- return true;
- }
-
- /* Reject it if there's 4 digits of hex and then further content */
- if (namelen > match + 4)
- return false;
-
- /* A valid entry must be at least 8 bytes */
- if (len < 8)
- return false;
-
- filepathlength = buffer[4] | buffer[5] << 8;
-
- /*
- * There's no stored length for the description, so it has to be
- * found by hand
- */
- desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
-
- /* Each boot entry must have a descriptor */
- if (!desclength)
- return false;
-
- /*
- * If the sum of the length of the description, the claimed filepath
- * length and the original header are greater than the length of the
- * variable, it's malformed
- */
- if ((desclength + filepathlength + 6) > len)
- return false;
-
- /*
- * And, finally, check the filepath
- */
- return validate_device_path(var_name, match, buffer + desclength + 6,
- filepathlength);
-}
-
-static bool
-validate_uint16(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- /* A single 16-bit integer */
- if (len != 2)
- return false;
-
- return true;
-}
-
-static bool
-validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (buffer[i] > 127)
- return false;
-
- if (buffer[i] == 0)
- return true;
- }
-
- return false;
-}
-
-struct variable_validate {
- efi_guid_t vendor;
- char *name;
- bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
- unsigned long len);
-};
-
-/*
- * This is the list of variables we need to validate, as well as the
- * whitelist for what we think is safe not to default to immutable.
- *
- * If it has a validate() method that's not NULL, it'll go into the
- * validation routine. If not, it is assumed valid, but still used for
- * whitelisting.
- *
- * Note that it's sorted by {vendor,name}, but globbed names must come after
- * any other name with the same prefix.
- */
-static const struct variable_validate variable_validate[] = {
- { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
- { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
- { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
- { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
- { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
- { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
- { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
- { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
- { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
- { LINUX_EFI_CRASH_GUID, "*", NULL },
- { NULL_GUID, "", NULL },
-};
-
-/*
- * Check if @var_name matches the pattern given in @match_name.
- *
- * @var_name: an array of @len non-NUL characters.
- * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
- * final "*" character matches any trailing characters @var_name,
- * including the case when there are none left in @var_name.
- * @match: on output, the number of non-wildcard characters in @match_name
- * that @var_name matches, regardless of the return value.
- * @return: whether @var_name fully matches @match_name.
- */
-static bool
-variable_matches(const char *var_name, size_t len, const char *match_name,
- int *match)
-{
- for (*match = 0; ; (*match)++) {
- char c = match_name[*match];
-
- switch (c) {
- case '*':
- /* Wildcard in @match_name means we've matched. */
- return true;
-
- case '\0':
- /* @match_name has ended. Has @var_name too? */
- return (*match == len);
-
- default:
- /*
- * We've reached a non-wildcard char in @match_name.
- * Continue only if there's an identical character in
- * @var_name.
- */
- if (*match < len && c == var_name[*match])
- continue;
- return false;
- }
- }
-}
-
-bool
-efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
- unsigned long data_size)
-{
- int i;
- unsigned long utf8_size;
- u8 *utf8_name;
-
- utf8_size = ucs2_utf8size(var_name);
- utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
- if (!utf8_name)
- return false;
-
- ucs2_as_utf8(utf8_name, var_name, utf8_size);
- utf8_name[utf8_size] = '\0';
-
- for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
- const char *name = variable_validate[i].name;
- int match = 0;
-
- if (efi_guidcmp(vendor, variable_validate[i].vendor))
- continue;
-
- if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
- if (variable_validate[i].validate == NULL)
- break;
- kfree(utf8_name);
- return variable_validate[i].validate(var_name, match,
- data, data_size);
- }
- }
- kfree(utf8_name);
- return true;
-}
-EXPORT_SYMBOL_GPL(efivar_validate);
-
-bool
-efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
- size_t len)
-{
- int i;
- bool found = false;
- int match = 0;
-
- /*
- * Check if our variable is in the validated variables list
- */
- for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
- if (efi_guidcmp(variable_validate[i].vendor, vendor))
- continue;
-
- if (variable_matches(var_name, len,
- variable_validate[i].name, &match)) {
- found = true;
- break;
- }
- }
-
- /*
- * If it's in our list, it is removable.
- */
- return found;
-}
-EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
-
-static efi_status_t
-check_var_size(u32 attributes, unsigned long size)
+efi_status_t check_var_size(u32 attributes, unsigned long size)
{
const struct efivar_operations *fops;
- if (!__efivars)
- return EFI_UNSUPPORTED;
-
fops = __efivars->ops;
if (!fops->query_variable_store)
@@ -313,15 +31,12 @@ check_var_size(u32 attributes, unsigned long size)
return fops->query_variable_store(attributes, size, false);
}
+EXPORT_SYMBOL_NS_GPL(check_var_size, EFIVAR);
-static efi_status_t
-check_var_size_nonblocking(u32 attributes, unsigned long size)
+efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size)
{
const struct efivar_operations *fops;
- if (!__efivars)
- return EFI_UNSUPPORTED;
-
fops = __efivars->ops;
if (!fops->query_variable_store)
@@ -329,894 +44,228 @@ check_var_size_nonblocking(u32 attributes, unsigned long size)
return fops->query_variable_store(attributes, size, true);
}
-
-static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
- struct list_head *head)
-{
- struct efivar_entry *entry, *n;
- unsigned long strsize1, strsize2;
- bool found = false;
-
- strsize1 = ucs2_strsize(variable_name, 1024);
- list_for_each_entry_safe(entry, n, head, list) {
- strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
- if (strsize1 == strsize2 &&
- !memcmp(variable_name, &(entry->var.VariableName),
- strsize2) &&
- !efi_guidcmp(entry->var.VendorGuid,
- *vendor)) {
- found = true;
- break;
- }
- }
- return found;
-}
-
-/*
- * Returns the size of variable_name, in bytes, including the
- * terminating NULL character, or variable_name_size if no NULL
- * character is found among the first variable_name_size bytes.
- */
-static unsigned long var_name_strnsize(efi_char16_t *variable_name,
- unsigned long variable_name_size)
-{
- unsigned long len;
- efi_char16_t c;
-
- /*
- * The variable name is, by definition, a NULL-terminated
- * string, so make absolutely sure that variable_name_size is
- * the value we expect it to be. If not, return the real size.
- */
- for (len = 2; len <= variable_name_size; len += sizeof(c)) {
- c = variable_name[(len / sizeof(c)) - 1];
- if (!c)
- break;
- }
-
- return min(len, variable_name_size);
-}
-
-/*
- * Print a warning when duplicate EFI variables are encountered and
- * disable the sysfs workqueue since the firmware is buggy.
- */
-static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
- unsigned long len16)
-{
- size_t i, len8 = len16 / sizeof(efi_char16_t);
- char *str8;
-
- str8 = kzalloc(len8, GFP_KERNEL);
- if (!str8)
- return;
-
- for (i = 0; i < len8; i++)
- str8[i] = str16[i];
-
- printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
- str8, vendor_guid);
- kfree(str8);
-}
+EXPORT_SYMBOL_NS_GPL(check_var_size_nonblocking, EFIVAR);
/**
- * efivar_init - build the initial list of EFI variables
- * @func: callback function to invoke for every variable
- * @data: function-specific data to pass to @func
- * @duplicates: error if we encounter duplicates on @head?
- * @head: initialised head of variable list
- *
- * Get every EFI variable from the firmware and invoke @func. @func
- * should call efivar_entry_add() to build the list of variables.
+ * efivars_kobject - get the kobject for the registered efivars
*
- * Returns 0 on success, or a kernel error code on failure.
+ * If efivars_register() has not been called we return NULL,
+ * otherwise return the kobject used at registration time.
*/
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool duplicates, struct list_head *head)
+struct kobject *efivars_kobject(void)
{
- const struct efivar_operations *ops;
- unsigned long variable_name_size = 1024;
- efi_char16_t *variable_name;
- efi_status_t status;
- efi_guid_t vendor_guid;
- int err = 0;
-
if (!__efivars)
- return -EFAULT;
-
- ops = __efivars->ops;
-
- variable_name = kzalloc(variable_name_size, GFP_KERNEL);
- if (!variable_name) {
- printk(KERN_ERR "efivars: Memory allocation failed.\n");
- return -ENOMEM;
- }
-
- if (down_interruptible(&efivars_lock)) {
- err = -EINTR;
- goto free;
- }
-
- /*
- * Per EFI spec, the maximum storage allocated for both
- * the variable name and variable data is 1024 bytes.
- */
-
- do {
- variable_name_size = 1024;
-
- status = ops->get_next_variable(&variable_name_size,
- variable_name,
- &vendor_guid);
- switch (status) {
- case EFI_SUCCESS:
- if (duplicates)
- up(&efivars_lock);
-
- variable_name_size = var_name_strnsize(variable_name,
- variable_name_size);
-
- /*
- * Some firmware implementations return the
- * same variable name on multiple calls to
- * get_next_variable(). Terminate the loop
- * immediately as there is no guarantee that
- * we'll ever see a different variable name,
- * and may end up looping here forever.
- */
- if (duplicates &&
- variable_is_present(variable_name, &vendor_guid,
- head)) {
- dup_variable_bug(variable_name, &vendor_guid,
- variable_name_size);
- status = EFI_NOT_FOUND;
- } else {
- err = func(variable_name, vendor_guid,
- variable_name_size, data);
- if (err)
- status = EFI_NOT_FOUND;
- }
-
- if (duplicates) {
- if (down_interruptible(&efivars_lock)) {
- err = -EINTR;
- goto free;
- }
- }
-
- break;
- case EFI_UNSUPPORTED:
- err = -EOPNOTSUPP;
- status = EFI_NOT_FOUND;
- break;
- case EFI_NOT_FOUND:
- break;
- default:
- printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
- status);
- status = EFI_NOT_FOUND;
- break;
- }
-
- } while (status != EFI_NOT_FOUND);
-
- up(&efivars_lock);
-free:
- kfree(variable_name);
+ return NULL;
- return err;
+ return __efivars->kobject;
}
-EXPORT_SYMBOL_GPL(efivar_init);
+EXPORT_SYMBOL_GPL(efivars_kobject);
/**
- * efivar_entry_add - add entry to variable list
- * @entry: entry to add to list
- * @head: list head
+ * efivars_register - register an efivars
+ * @efivars: efivars to register
+ * @ops: efivars operations
+ * @kobject: @efivars-specific kobject
*
- * Returns 0 on success, or a kernel error code on failure.
+ * Only a single efivars can be registered at any time.
*/
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head)
+int efivars_register(struct efivars *efivars,
+ const struct efivar_operations *ops,
+ struct kobject *kobject)
{
if (down_interruptible(&efivars_lock))
return -EINTR;
- list_add(&entry->list, head);
- up(&efivars_lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(efivar_entry_add);
+ efivars->ops = ops;
+ efivars->kobject = kobject;
-/**
- * efivar_entry_remove - remove entry from variable list
- * @entry: entry to remove from list
- *
- * Returns 0 on success, or a kernel error code on failure.
- */
-int efivar_entry_remove(struct efivar_entry *entry)
-{
- if (down_interruptible(&efivars_lock))
- return -EINTR;
- list_del(&entry->list);
- up(&efivars_lock);
+ __efivars = efivars;
- return 0;
-}
-EXPORT_SYMBOL_GPL(efivar_entry_remove);
+ pr_info("Registered efivars operations\n");
-/*
- * efivar_entry_list_del_unlock - remove entry from variable list
- * @entry: entry to remove
- *
- * Remove @entry from the variable list and release the list lock.
- *
- * NOTE: slightly weird locking semantics here - we expect to be
- * called with the efivars lock already held, and we release it before
- * returning. This is because this function is usually called after
- * set_variable() while the lock is still held.
- */
-static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
-{
- list_del(&entry->list);
up(&efivars_lock);
-}
-/**
- * __efivar_entry_delete - delete an EFI variable
- * @entry: entry containing EFI variable to delete
- *
- * Delete the variable from the firmware but leave @entry on the
- * variable list.
- *
- * This function differs from efivar_entry_delete() because it does
- * not remove @entry from the variable list. Also, it is safe to be
- * called from within a efivar_entry_iter_begin() and
- * efivar_entry_iter_end() region, unlike efivar_entry_delete().
- *
- * Returns 0 on success, or a converted EFI status code if
- * set_variable() fails.
- */
-int __efivar_entry_delete(struct efivar_entry *entry)
-{
- efi_status_t status;
-
- if (!__efivars)
- return -EINVAL;
-
- status = __efivars->ops->set_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- 0, 0, NULL);
-
- return efi_status_to_err(status);
-}
-EXPORT_SYMBOL_GPL(__efivar_entry_delete);
-
-/**
- * efivar_entry_delete - delete variable and remove entry from list
- * @entry: entry containing variable to delete
- *
- * Delete the variable from the firmware and remove @entry from the
- * variable list. It is the caller's responsibility to free @entry
- * once we return.
- *
- * Returns 0 on success, -EINTR if we can't grab the semaphore,
- * converted EFI status code if set_variable() fails.
- */
-int efivar_entry_delete(struct efivar_entry *entry)
-{
- const struct efivar_operations *ops;
- efi_status_t status;
-
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- if (!__efivars) {
- up(&efivars_lock);
- return -EINVAL;
- }
- ops = __efivars->ops;
- status = ops->set_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- 0, 0, NULL);
- if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) {
- up(&efivars_lock);
- return efi_status_to_err(status);
- }
-
- efivar_entry_list_del_unlock(entry);
return 0;
}
-EXPORT_SYMBOL_GPL(efivar_entry_delete);
+EXPORT_SYMBOL_GPL(efivars_register);
/**
- * efivar_entry_set - call set_variable()
- * @entry: entry containing the EFI variable to write
- * @attributes: variable attributes
- * @size: size of @data buffer
- * @data: buffer containing variable data
- * @head: head of variable list
- *
- * Calls set_variable() for an EFI variable. If creating a new EFI
- * variable, this function is usually followed by efivar_entry_add().
- *
- * Before writing the variable, the remaining EFI variable storage
- * space is checked to ensure there is enough room available.
- *
- * If @head is not NULL a lookup is performed to determine whether
- * the entry is already on the list.
+ * efivars_unregister - unregister an efivars
+ * @efivars: efivars to unregister
*
- * Returns 0 on success, -EINTR if we can't grab the semaphore,
- * -EEXIST if a lookup is performed and the entry already exists on
- * the list, or a converted EFI status code if set_variable() fails.
+ * The caller must have already removed every entry from the list,
+ * failure to do so is an error.
*/
-int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
- unsigned long size, void *data, struct list_head *head)
+int efivars_unregister(struct efivars *efivars)
{
- const struct efivar_operations *ops;
- efi_status_t status;
- efi_char16_t *name = entry->var.VariableName;
- efi_guid_t vendor = entry->var.VendorGuid;
+ int rv;
if (down_interruptible(&efivars_lock))
return -EINTR;
if (!__efivars) {
- up(&efivars_lock);
- return -EINVAL;
- }
- ops = __efivars->ops;
- if (head && efivar_entry_find(name, vendor, head, false)) {
- up(&efivars_lock);
- return -EEXIST;
- }
-
- status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
- if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
- status = ops->set_variable(name, &vendor,
- attributes, size, data);
-
- up(&efivars_lock);
-
- return efi_status_to_err(status);
-
-}
-EXPORT_SYMBOL_GPL(efivar_entry_set);
-
-/*
- * efivar_entry_set_nonblocking - call set_variable_nonblocking()
- *
- * This function is guaranteed to not block and is suitable for calling
- * from crash/panic handlers.
- *
- * Crucially, this function will not block if it cannot acquire
- * efivars_lock. Instead, it returns -EBUSY.
- */
-static int
-efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
- u32 attributes, unsigned long size, void *data)
-{
- const struct efivar_operations *ops;
- efi_status_t status;
-
- if (down_trylock(&efivars_lock))
- return -EBUSY;
-
- if (!__efivars) {
- up(&efivars_lock);
- return -EINVAL;
- }
-
- status = check_var_size_nonblocking(attributes,
- size + ucs2_strsize(name, 1024));
- if (status != EFI_SUCCESS) {
- up(&efivars_lock);
- return -ENOSPC;
- }
-
- ops = __efivars->ops;
- status = ops->set_variable_nonblocking(name, &vendor, attributes,
- size, data);
-
- up(&efivars_lock);
- return efi_status_to_err(status);
-}
-
-/**
- * efivar_entry_set_safe - call set_variable() if enough space in firmware
- * @name: buffer containing the variable name
- * @vendor: variable vendor guid
- * @attributes: variable attributes
- * @block: can we block in this context?
- * @size: size of @data buffer
- * @data: buffer containing variable data
- *
- * Ensures there is enough free storage in the firmware for this variable, and
- * if so, calls set_variable(). If creating a new EFI variable, this function
- * is usually followed by efivar_entry_add().
- *
- * Returns 0 on success, -ENOSPC if the firmware does not have enough
- * space for set_variable() to succeed, or a converted EFI status code
- * if set_variable() fails.
- */
-int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
- bool block, unsigned long size, void *data)
-{
- const struct efivar_operations *ops;
- efi_status_t status;
- unsigned long varsize;
-
- if (!__efivars)
- return -EINVAL;
-
- ops = __efivars->ops;
- if (!ops->query_variable_store)
- return -ENOSYS;
-
- /*
- * If the EFI variable backend provides a non-blocking
- * ->set_variable() operation and we're in a context where we
- * cannot block, then we need to use it to avoid live-locks,
- * since the implication is that the regular ->set_variable()
- * will block.
- *
- * If no ->set_variable_nonblocking() is provided then
- * ->set_variable() is assumed to be non-blocking.
- */
- if (!block && ops->set_variable_nonblocking)
- return efivar_entry_set_nonblocking(name, vendor, attributes,
- size, data);
-
- varsize = size + ucs2_strsize(name, 1024);
- if (!block) {
- if (down_trylock(&efivars_lock))
- return -EBUSY;
- status = check_var_size_nonblocking(attributes, varsize);
- } else {
- if (down_interruptible(&efivars_lock))
- return -EINTR;
- status = check_var_size(attributes, varsize);
+ printk(KERN_ERR "efivars not registered\n");
+ rv = -EINVAL;
+ goto out;
}
- if (status != EFI_SUCCESS) {
- up(&efivars_lock);
- return -ENOSPC;
+ if (__efivars != efivars) {
+ rv = -EINVAL;
+ goto out;
}
- status = ops->set_variable(name, &vendor, attributes, size, data);
+ pr_info("Unregistered efivars operations\n");
+ __efivars = NULL;
+ rv = 0;
+out:
up(&efivars_lock);
-
- return efi_status_to_err(status);
+ return rv;
}
-EXPORT_SYMBOL_GPL(efivar_entry_set_safe);
+EXPORT_SYMBOL_GPL(efivars_unregister);
-/**
- * efivar_entry_find - search for an entry
- * @name: the EFI variable name
- * @guid: the EFI variable vendor's guid
- * @head: head of the variable list
- * @remove: should we remove the entry from the list?
- *
- * Search for an entry on the variable list that has the EFI variable
- * name @name and vendor guid @guid. If an entry is found on the list
- * and @remove is true, the entry is removed from the list.
- *
- * The caller MUST call efivar_entry_iter_begin() and
- * efivar_entry_iter_end() before and after the invocation of this
- * function, respectively.
- *
- * Returns the entry if found on the list, %NULL otherwise.
- */
-struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
- struct list_head *head, bool remove)
+int efivar_supports_writes(void)
{
- struct efivar_entry *entry, *n;
- int strsize1, strsize2;
- bool found = false;
-
- list_for_each_entry_safe(entry, n, head, list) {
- strsize1 = ucs2_strsize(name, 1024);
- strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
- if (strsize1 == strsize2 &&
- !memcmp(name, &(entry->var.VariableName), strsize1) &&
- !efi_guidcmp(guid, entry->var.VendorGuid)) {
- found = true;
- break;
- }
- }
-
- if (!found)
- return NULL;
-
- if (remove) {
- if (entry->scanning) {
- /*
- * The entry will be deleted
- * after scanning is completed.
- */
- entry->deleting = true;
- } else
- list_del(&entry->list);
- }
-
- return entry;
+ return __efivars && __efivars->ops->set_variable;
}
-EXPORT_SYMBOL_GPL(efivar_entry_find);
+EXPORT_SYMBOL_GPL(efivar_supports_writes);
-/**
- * efivar_entry_size - obtain the size of a variable
- * @entry: entry for this variable
- * @size: location to store the variable's size
+/*
+ * efivar_lock() - obtain the efivar lock, wait for it if needed
+ * @return 0 on success, error code on failure
*/
-int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
+int efivar_lock(void)
{
- const struct efivar_operations *ops;
- efi_status_t status;
-
- *size = 0;
-
if (down_interruptible(&efivars_lock))
return -EINTR;
- if (!__efivars) {
+ if (!__efivars->ops) {
up(&efivars_lock);
- return -EINVAL;
+ return -ENODEV;
}
- ops = __efivars->ops;
- status = ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid, NULL, size, NULL);
- up(&efivars_lock);
-
- if (status != EFI_BUFFER_TOO_SMALL)
- return efi_status_to_err(status);
-
return 0;
}
-EXPORT_SYMBOL_GPL(efivar_entry_size);
+EXPORT_SYMBOL_NS_GPL(efivar_lock, EFIVAR);
-/**
- * __efivar_entry_get - call get_variable()
- * @entry: read data for this variable
- * @attributes: variable attributes
- * @size: size of @data buffer
- * @data: buffer to store variable data
- *
- * The caller MUST call efivar_entry_iter_begin() and
- * efivar_entry_iter_end() before and after the invocation of this
- * function, respectively.
- */
-int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data)
-{
- efi_status_t status;
-
- if (!__efivars)
- return -EINVAL;
-
- status = __efivars->ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- attributes, size, data);
-
- return efi_status_to_err(status);
-}
-EXPORT_SYMBOL_GPL(__efivar_entry_get);
-
-/**
- * efivar_entry_get - call get_variable()
- * @entry: read data for this variable
- * @attributes: variable attributes
- * @size: size of @data buffer
- * @data: buffer to store variable data
+/*
+ * efivar_lock() - obtain the efivar lock if it is free
+ * @return 0 on success, error code on failure
*/
-int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data)
+int efivar_trylock(void)
{
- efi_status_t status;
-
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- if (!__efivars) {
+ if (down_trylock(&efivars_lock))
+ return -EBUSY;
+ if (!__efivars->ops) {
up(&efivars_lock);
- return -EINVAL;
- }
-
- status = __efivars->ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- attributes, size, data);
- up(&efivars_lock);
-
- return efi_status_to_err(status);
-}
-EXPORT_SYMBOL_GPL(efivar_entry_get);
-
-/**
- * efivar_entry_set_get_size - call set_variable() and get new size (atomic)
- * @entry: entry containing variable to set and get
- * @attributes: attributes of variable to be written
- * @size: size of data buffer
- * @data: buffer containing data to write
- * @set: did the set_variable() call succeed?
- *
- * This is a pretty special (complex) function. See efivarfs_file_write().
- *
- * Atomically call set_variable() for @entry and if the call is
- * successful, return the new size of the variable from get_variable()
- * in @size. The success of set_variable() is indicated by @set.
- *
- * Returns 0 on success, -EINVAL if the variable data is invalid,
- * -ENOSPC if the firmware does not have enough available space, or a
- * converted EFI status code if either of set_variable() or
- * get_variable() fail.
- *
- * If the EFI variable does not exist when calling set_variable()
- * (EFI_NOT_FOUND), @entry is removed from the variable list.
- */
-int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
- unsigned long *size, void *data, bool *set)
-{
- const struct efivar_operations *ops;
- efi_char16_t *name = entry->var.VariableName;
- efi_guid_t *vendor = &entry->var.VendorGuid;
- efi_status_t status;
- int err;
-
- *set = false;
-
- if (efivar_validate(*vendor, name, data, *size) == false)
- return -EINVAL;
-
- /*
- * The lock here protects the get_variable call, the conditional
- * set_variable call, and removal of the variable from the efivars
- * list (in the case of an authenticated delete).
- */
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- if (!__efivars) {
- err = -EINVAL;
- goto out;
- }
-
- /*
- * Ensure that the available space hasn't shrunk below the safe level
- */
- status = check_var_size(attributes, *size + ucs2_strsize(name, 1024));
- if (status != EFI_SUCCESS) {
- if (status != EFI_UNSUPPORTED) {
- err = efi_status_to_err(status);
- goto out;
- }
-
- if (*size > 65536) {
- err = -ENOSPC;
- goto out;
- }
- }
-
- ops = __efivars->ops;
-
- status = ops->set_variable(name, vendor, attributes, *size, data);
- if (status != EFI_SUCCESS) {
- err = efi_status_to_err(status);
- goto out;
+ return -ENODEV;
}
-
- *set = true;
-
- /*
- * Writing to the variable may have caused a change in size (which
- * could either be an append or an overwrite), or the variable to be
- * deleted. Perform a GetVariable() so we can tell what actually
- * happened.
- */
- *size = 0;
- status = ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- NULL, size, NULL);
-
- if (status == EFI_NOT_FOUND)
- efivar_entry_list_del_unlock(entry);
- else
- up(&efivars_lock);
-
- if (status && status != EFI_BUFFER_TOO_SMALL)
- return efi_status_to_err(status);
-
return 0;
-
-out:
- up(&efivars_lock);
- return err;
-
}
-EXPORT_SYMBOL_GPL(efivar_entry_set_get_size);
+EXPORT_SYMBOL_NS_GPL(efivar_trylock, EFIVAR);
-/**
- * efivar_entry_iter_begin - begin iterating the variable list
- *
- * Lock the variable list to prevent entry insertion and removal until
- * efivar_entry_iter_end() is called. This function is usually used in
- * conjunction with __efivar_entry_iter() or efivar_entry_iter().
- */
-int efivar_entry_iter_begin(void)
-{
- return down_interruptible(&efivars_lock);
-}
-EXPORT_SYMBOL_GPL(efivar_entry_iter_begin);
-
-/**
- * efivar_entry_iter_end - finish iterating the variable list
- *
- * Unlock the variable list and allow modifications to the list again.
+/*
+ * efivar_unlock() - release the efivar lock
*/
-void efivar_entry_iter_end(void)
+void efivar_unlock(void)
{
up(&efivars_lock);
}
-EXPORT_SYMBOL_GPL(efivar_entry_iter_end);
+EXPORT_SYMBOL_NS_GPL(efivar_unlock, EFIVAR);
-/**
- * __efivar_entry_iter - iterate over variable list
- * @func: callback function
- * @head: head of the variable list
- * @data: function-specific data to pass to callback
- * @prev: entry to begin iterating from
- *
- * Iterate over the list of EFI variables and call @func with every
- * entry on the list. It is safe for @func to remove entries in the
- * list via efivar_entry_delete().
- *
- * You MUST call efivar_entry_iter_begin() before this function, and
- * efivar_entry_iter_end() afterwards.
- *
- * It is possible to begin iteration from an arbitrary entry within
- * the list by passing @prev. @prev is updated on return to point to
- * the last entry passed to @func. To begin iterating from the
- * beginning of the list @prev must be %NULL.
+/*
+ * efivar_get_variable() - retrieve a variable identified by name/vendor
*
- * The restrictions for @func are the same as documented for
- * efivar_entry_iter().
+ * Must be called with efivars_lock held.
*/
-int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data,
- struct efivar_entry **prev)
+efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *size, void *data)
{
- struct efivar_entry *entry, *n;
- int err = 0;
-
- if (!prev || !*prev) {
- list_for_each_entry_safe(entry, n, head, list) {
- err = func(entry, data);
- if (err)
- break;
- }
-
- if (prev)
- *prev = entry;
-
- return err;
- }
-
-
- list_for_each_entry_safe_continue((*prev), n, head, list) {
- err = func(*prev, data);
- if (err)
- break;
- }
-
- return err;
+ return __efivars->ops->get_variable(name, vendor, attr, size, data);
}
-EXPORT_SYMBOL_GPL(__efivar_entry_iter);
+EXPORT_SYMBOL_NS_GPL(efivar_get_variable, EFIVAR);
-/**
- * efivar_entry_iter - iterate over variable list
- * @func: callback function
- * @head: head of variable list
- * @data: function-specific data to pass to callback
- *
- * Iterate over the list of EFI variables and call @func with every
- * entry on the list. It is safe for @func to remove entries in the
- * list via efivar_entry_delete() while iterating.
+/*
+ * efivar_get_next_variable() - enumerate the next name/vendor pair
*
- * Some notes for the callback function:
- * - a non-zero return value indicates an error and terminates the loop
- * - @func is called from atomic context
+ * Must be called with efivars_lock held.
*/
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data)
+efi_status_t efivar_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name, efi_guid_t *vendor)
{
- int err = 0;
-
- err = efivar_entry_iter_begin();
- if (err)
- return err;
- err = __efivar_entry_iter(func, head, data, NULL);
- efivar_entry_iter_end();
-
- return err;
+ return __efivars->ops->get_next_variable(name_size, name, vendor);
}
-EXPORT_SYMBOL_GPL(efivar_entry_iter);
+EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR);
-/**
- * efivars_kobject - get the kobject for the registered efivars
+/*
+ * efivar_set_variable_blocking() - local helper function for set_variable
*
- * If efivars_register() has not been called we return NULL,
- * otherwise return the kobject used at registration time.
+ * Must be called with efivars_lock held.
*/
-struct kobject *efivars_kobject(void)
+static efi_status_t
+efivar_set_variable_blocking(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data)
{
- if (!__efivars)
- return NULL;
+ efi_status_t status;
- return __efivars->kobject;
+ if (data_size > 0) {
+ status = check_var_size(attr, data_size +
+ ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+ return __efivars->ops->set_variable(name, vendor, attr, data_size, data);
}
-EXPORT_SYMBOL_GPL(efivars_kobject);
-/**
- * efivars_register - register an efivars
- * @efivars: efivars to register
- * @ops: efivars operations
- * @kobject: @efivars-specific kobject
+/*
+ * efivar_set_variable_locked() - set a variable identified by name/vendor
*
- * Only a single efivars can be registered at any time.
+ * Must be called with efivars_lock held. If @nonblocking is set, it will use
+ * non-blocking primitives so it is guaranteed not to sleep.
*/
-int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject)
+efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data, bool nonblocking)
{
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- efivars->ops = ops;
- efivars->kobject = kobject;
-
- __efivars = efivars;
+ efi_set_variable_t *setvar;
+ efi_status_t status;
- pr_info("Registered efivars operations\n");
+ if (!nonblocking)
+ return efivar_set_variable_blocking(name, vendor, attr,
+ data_size, data);
- up(&efivars_lock);
-
- return 0;
+ /*
+ * If no _nonblocking variant exists, the ordinary one
+ * is assumed to be non-blocking.
+ */
+ setvar = __efivars->ops->set_variable_nonblocking ?:
+ __efivars->ops->set_variable;
+
+ if (data_size > 0) {
+ status = check_var_size_nonblocking(attr, data_size +
+ ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+ return setvar(name, vendor, attr, data_size, data);
}
-EXPORT_SYMBOL_GPL(efivars_register);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR);
-/**
- * efivars_unregister - unregister an efivars
- * @efivars: efivars to unregister
+/*
+ * efivar_set_variable() - set a variable identified by name/vendor
*
- * The caller must have already removed every entry from the list,
- * failure to do so is an error.
+ * Can be called without holding the efivars_lock. Will sleep on obtaining the
+ * lock, or on obtaining other locks that are needed in order to complete the
+ * call.
*/
-int efivars_unregister(struct efivars *efivars)
+efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data)
{
- int rv;
-
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- if (!__efivars) {
- printk(KERN_ERR "efivars not registered\n");
- rv = -EINVAL;
- goto out;
- }
-
- if (__efivars != efivars) {
- rv = -EINVAL;
- goto out;
- }
-
- pr_info("Unregistered efivars operations\n");
- __efivars = NULL;
+ efi_status_t status;
- rv = 0;
-out:
- up(&efivars_lock);
- return rv;
-}
-EXPORT_SYMBOL_GPL(efivars_unregister);
+ if (efivar_lock())
+ return EFI_ABORTED;
-int efivar_supports_writes(void)
-{
- return __efivars && __efivars->ops->set_variable;
+ status = efivar_set_variable_blocking(name, vendor, attr, data_size, data);
+ efivar_unlock();
+ return status;
}
-EXPORT_SYMBOL_GPL(efivar_supports_writes);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR);
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
index cb255a99170c..3c071f814455 100644
--- a/drivers/firmware/mtk-adsp-ipc.c
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -12,6 +12,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+static const char * const adsp_mbox_ch_names[MTK_ADSP_MBOX_NUM] = { "rx", "tx" };
+
/*
* mtk_adsp_ipc_send - send ipc cmd to MTK ADSP
*
@@ -72,7 +74,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
struct mtk_adsp_ipc *adsp_ipc;
struct mtk_adsp_chan *adsp_chan;
struct mbox_client *cl;
- char *chan_name;
int ret;
int i, j;
@@ -83,12 +84,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) {
- chan_name = kasprintf(GFP_KERNEL, "mbox%d", i);
- if (!chan_name) {
- ret = -ENOMEM;
- goto out;
- }
-
adsp_chan = &adsp_ipc->chans[i];
cl = &adsp_chan->cl;
cl->dev = dev->parent;
@@ -99,17 +94,20 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
adsp_chan->ipc = adsp_ipc;
adsp_chan->idx = i;
- adsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
if (IS_ERR(adsp_chan->ch)) {
ret = PTR_ERR(adsp_chan->ch);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request mbox chan %d ret %d\n",
- i, ret);
- goto out_free;
- }
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ adsp_mbox_ch_names[i], ret);
+
+ for (j = 0; j < i; j++) {
+ adsp_chan = &adsp_ipc->chans[j];
+ mbox_free_channel(adsp_chan->ch);
+ }
- dev_dbg(dev, "request mbox chan %s\n", chan_name);
- kfree(chan_name);
+ return ret;
+ }
}
adsp_ipc->dev = dev;
@@ -117,16 +115,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
dev_dbg(dev, "MTK ADSP IPC initialized\n");
return 0;
-
-out_free:
- kfree(chan_name);
-out:
- for (j = 0; j < i; j++) {
- adsp_chan = &adsp_ipc->chans[j];
- mbox_free_channel(adsp_chan->ch);
- }
-
- return ret;
}
static int mtk_adsp_ipc_remove(struct platform_device *pdev)
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index 1829ba220576..9f918b9e6f8f 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -120,6 +120,9 @@ static void __scm_legacy_do(const struct arm_smccc_args *smc,
/**
* scm_legacy_call() - Sends a command to the SCM and waits for the command to
* finish processing.
+ * @dev: device
+ * @desc: descriptor structure containing arguments and return values
+ * @res: results from SMC call
*
* A note on cache maintenance:
* Note that any buffers that are expected to be accessed by the secure world
@@ -211,6 +214,7 @@ out:
/**
* scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments
* and 3 return values
+ * @unused: device, legacy argument, not used, can be NULL
* @desc: SCM call descriptor containing arguments
* @res: SCM call return values
*
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 3163660fa8e2..cdbfe54c8146 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -7,6 +7,7 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
@@ -31,8 +32,13 @@ struct qcom_scm {
struct clk *core_clk;
struct clk *iface_clk;
struct clk *bus_clk;
+ struct icc_path *path;
struct reset_controller_dev reset;
+ /* control access to the interconnect path */
+ struct mutex scm_bw_lock;
+ int scm_vote_count;
+
u64 dload_mode_addr;
};
@@ -99,6 +105,42 @@ static void qcom_scm_clk_disable(void)
clk_disable_unprepare(__scm->bus_clk);
}
+static int qcom_scm_bw_enable(void)
+{
+ int ret = 0;
+
+ if (!__scm->path)
+ return 0;
+
+ if (IS_ERR(__scm->path))
+ return -EINVAL;
+
+ mutex_lock(&__scm->scm_bw_lock);
+ if (!__scm->scm_vote_count) {
+ ret = icc_set_bw(__scm->path, 0, UINT_MAX);
+ if (ret < 0) {
+ dev_err(__scm->dev, "failed to set bandwidth request\n");
+ goto err_bw;
+ }
+ }
+ __scm->scm_vote_count++;
+err_bw:
+ mutex_unlock(&__scm->scm_bw_lock);
+
+ return ret;
+}
+
+static void qcom_scm_bw_disable(void)
+{
+ if (IS_ERR_OR_NULL(__scm->path))
+ return;
+
+ mutex_lock(&__scm->scm_bw_lock);
+ if (__scm->scm_vote_count-- == 1)
+ icc_set_bw(__scm->path, 0, 0);
+ mutex_unlock(&__scm->scm_bw_lock);
+}
+
enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
static DEFINE_SPINLOCK(scm_query_lock);
@@ -444,10 +486,15 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
if (ret)
goto out;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
out:
@@ -507,7 +554,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
if (ret)
return ret;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -537,7 +589,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
if (ret)
return ret;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -566,8 +623,13 @@ int qcom_scm_pas_shutdown(u32 peripheral)
if (ret)
return ret;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -1277,8 +1339,15 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ mutex_init(&scm->scm_bw_lock);
+
clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+ scm->path = devm_of_icc_get(&pdev->dev, NULL);
+ if (IS_ERR(scm->path))
+ return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
+ "failed to acquire interconnect path\n");
+
scm->core_clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(scm->core_clk)) {
if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
@@ -1337,7 +1406,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
/*
* If requested enable "download mode", from this point on warmboot
- * will cause the the boot stages to enter download mode, unless
+ * will cause the boot stages to enter download mode, unless
* disabled below by a clean shutdown/reboot.
*/
if (download_mode)
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index 9378075d04e9..e51c95f8d445 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -24,12 +24,16 @@
#define RSU_DCMF1_MASK GENMASK_ULL(63, 32)
#define RSU_DCMF2_MASK GENMASK_ULL(31, 0)
#define RSU_DCMF3_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF0_STATUS_MASK GENMASK_ULL(15, 0)
+#define RSU_DCMF1_STATUS_MASK GENMASK_ULL(31, 16)
+#define RSU_DCMF2_STATUS_MASK GENMASK_ULL(47, 32)
+#define RSU_DCMF3_STATUS_MASK GENMASK_ULL(63, 48)
#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
#define INVALID_RETRY_COUNTER 0xFF
#define INVALID_DCMF_VERSION 0xFF
-
+#define INVALID_DCMF_STATUS 0xFFFFFFFF
typedef void (*rsu_callback)(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data);
@@ -49,6 +53,10 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client,
* @dcmf_version.dcmf1: Quartus dcmf1 version
* @dcmf_version.dcmf2: Quartus dcmf2 version
* @dcmf_version.dcmf3: Quartus dcmf3 version
+ * @dcmf_status.dcmf0: dcmf0 status
+ * @dcmf_status.dcmf1: dcmf1 status
+ * @dcmf_status.dcmf2: dcmf2 status
+ * @dcmf_status.dcmf3: dcmf3 status
* @retry_counter: the current image's retry counter
* @max_retry: the preset max retry value
*/
@@ -73,6 +81,13 @@ struct stratix10_rsu_priv {
unsigned int dcmf3;
} dcmf_version;
+ struct {
+ unsigned int dcmf0;
+ unsigned int dcmf1;
+ unsigned int dcmf2;
+ unsigned int dcmf3;
+ } dcmf_status;
+
unsigned int retry_counter;
unsigned int max_retry;
};
@@ -129,7 +144,7 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "FW doesn't support notify\n");
+ dev_warn(client->dev, "Secure FW doesn't support notify\n");
else if (data->status == BIT(SVC_STATUS_ERROR))
dev_err(client->dev, "Failure, returned status is %lu\n",
BIT(data->status));
@@ -156,7 +171,7 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_OK))
priv->retry_counter = *counter;
else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "FW doesn't support retry\n");
+ dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
dev_err(client->dev, "Failed to get retry counter %lu\n",
BIT(data->status));
@@ -181,7 +196,7 @@ static void rsu_max_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_OK))
priv->max_retry = *max_retry;
else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "FW doesn't support max retry\n");
+ dev_warn(client->dev, "Secure FW doesn't support max retry\n");
else
dev_err(client->dev, "Failed to get max retry %lu\n",
BIT(data->status));
@@ -216,6 +231,35 @@ static void rsu_dcmf_version_callback(struct stratix10_svc_client *client,
}
/**
+ * rsu_dcmf_status_callback() - Callback from Intel service layer for getting
+ * the DCMF status
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for DCMF status
+ */
+static void rsu_dcmf_status_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned long long *value = (unsigned long long *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK)) {
+ priv->dcmf_status.dcmf0 = FIELD_GET(RSU_DCMF0_STATUS_MASK,
+ *value);
+ priv->dcmf_status.dcmf1 = FIELD_GET(RSU_DCMF1_STATUS_MASK,
+ *value);
+ priv->dcmf_status.dcmf2 = FIELD_GET(RSU_DCMF2_STATUS_MASK,
+ *value);
+ priv->dcmf_status.dcmf3 = FIELD_GET(RSU_DCMF3_STATUS_MASK,
+ *value);
+ } else
+ dev_err(client->dev, "failed to get DCMF status\n");
+
+ complete(&priv->completion);
+}
+
+/**
* rsu_send_msg() - send a message to Intel service layer
* @priv: pointer to rsu private data
* @command: RSU status or update command
@@ -361,7 +405,8 @@ static ssize_t max_retry_show(struct device *dev,
if (!priv)
return -ENODEV;
- return sprintf(buf, "0x%08x\n", priv->max_retry);
+ return scnprintf(buf, sizeof(priv->max_retry),
+ "0x%08x\n", priv->max_retry);
}
static ssize_t dcmf0_show(struct device *dev,
@@ -408,6 +453,61 @@ static ssize_t dcmf3_show(struct device *dev,
return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf3);
}
+static ssize_t dcmf0_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf0 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf0);
+}
+
+static ssize_t dcmf1_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf1 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf1);
+}
+
+static ssize_t dcmf2_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf2 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf2);
+}
+
+static ssize_t dcmf3_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf3 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf3);
+}
static ssize_t reboot_image_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -484,6 +584,10 @@ static DEVICE_ATTR_RO(dcmf0);
static DEVICE_ATTR_RO(dcmf1);
static DEVICE_ATTR_RO(dcmf2);
static DEVICE_ATTR_RO(dcmf3);
+static DEVICE_ATTR_RO(dcmf0_status);
+static DEVICE_ATTR_RO(dcmf1_status);
+static DEVICE_ATTR_RO(dcmf2_status);
+static DEVICE_ATTR_RO(dcmf3_status);
static DEVICE_ATTR_WO(reboot_image);
static DEVICE_ATTR_WO(notify);
@@ -500,6 +604,10 @@ static struct attribute *rsu_attrs[] = {
&dev_attr_dcmf1.attr,
&dev_attr_dcmf2.attr,
&dev_attr_dcmf3.attr,
+ &dev_attr_dcmf0_status.attr,
+ &dev_attr_dcmf1_status.attr,
+ &dev_attr_dcmf2_status.attr,
+ &dev_attr_dcmf3_status.attr,
&dev_attr_reboot_image.attr,
&dev_attr_notify.attr,
NULL
@@ -532,6 +640,10 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
priv->dcmf_version.dcmf2 = INVALID_DCMF_VERSION;
priv->dcmf_version.dcmf3 = INVALID_DCMF_VERSION;
priv->max_retry = INVALID_RETRY_COUNTER;
+ priv->dcmf_status.dcmf0 = INVALID_DCMF_STATUS;
+ priv->dcmf_status.dcmf1 = INVALID_DCMF_STATUS;
+ priv->dcmf_status.dcmf2 = INVALID_DCMF_STATUS;
+ priv->dcmf_status.dcmf3 = INVALID_DCMF_STATUS;
mutex_init(&priv->lock);
priv->chan = stratix10_svc_request_channel_byname(&priv->client,
@@ -561,6 +673,13 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
+ ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_STATUS,
+ 0, rsu_dcmf_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting DCMF status %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
if (ret) {
dev_err(dev, "Error, getting RSU retry %i\n", ret);
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 14663f671323..b4081f4d88a3 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -34,12 +34,13 @@
* timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
*/
#define SVC_NUM_DATA_IN_FIFO 32
-#define SVC_NUM_CHANNEL 2
+#define SVC_NUM_CHANNEL 3
#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
/* stratix10 service layer clients */
#define STRATIX10_RSU "stratix10-rsu"
+#define INTEL_FCS "intel-fcs"
typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long,
@@ -53,6 +54,7 @@ struct stratix10_svc_chan;
*/
struct stratix10_svc {
struct platform_device *stratix10_svc_rsu;
+ struct platform_device *intel_svc_fcs;
};
/**
@@ -97,8 +99,10 @@ struct stratix10_svc_data_mem {
/**
* struct stratix10_svc_data - service data structure
* @chan: service channel
- * @paddr: playload physical address
- * @size: playload size
+ * @paddr: physical address of to be processed payload
+ * @size: to be processed playload size
+ * @paddr_output: physical address of processed payload
+ * @size_output: processed payload size
* @command: service command requested by client
* @flag: configuration type (full or partial)
* @arg: args to be passed via registers and not physically mapped buffers
@@ -109,6 +113,8 @@ struct stratix10_svc_data {
struct stratix10_svc_chan *chan;
phys_addr_t paddr;
size_t size;
+ phys_addr_t paddr_output;
+ size_t size_output;
u32 command;
u32 flag;
u64 arg[3];
@@ -246,6 +252,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
{
struct arm_smccc_res res;
int count_in_sec;
+ unsigned long a0, a1, a2;
cb_data->kaddr1 = NULL;
cb_data->kaddr2 = NULL;
@@ -254,24 +261,45 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
pr_debug("%s: polling config status\n", __func__);
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE;
+ a1 = (unsigned long)p_data->paddr;
+ a2 = (unsigned long)p_data->size;
+
+ if (p_data->command == COMMAND_POLL_SERVICE_STATUS)
+ a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
+
count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC;
while (count_in_sec) {
- ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE,
- 0, 0, 0, 0, 0, 0, 0, &res);
+ ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
- (res.a0 == INTEL_SIP_SMC_STATUS_ERROR))
+ (res.a0 == INTEL_SIP_SMC_STATUS_ERROR) ||
+ (res.a0 == INTEL_SIP_SMC_STATUS_REJECTED))
break;
/*
- * configuration is still in progress, wait one second then
+ * request is still in progress, wait one second then
* poll again
*/
msleep(1000);
count_in_sec--;
}
- if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
+ if (!count_in_sec) {
+ pr_err("%s: poll status timeout\n", __func__);
+ cb_data->status = BIT(SVC_STATUS_BUSY);
+ } else if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
cb_data->status = BIT(SVC_STATUS_COMPLETED);
+ cb_data->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL;
+ } else {
+ pr_err("%s: poll status error\n", __func__);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL;
+ cb_data->status = BIT(SVC_STATUS_ERROR);
+ }
p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
}
@@ -296,6 +324,10 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
case COMMAND_RECONFIG:
case COMMAND_RSU_UPDATE:
case COMMAND_RSU_NOTIFY:
+ case COMMAND_FCS_REQUEST_SERVICE:
+ case COMMAND_FCS_SEND_CERTIFICATE:
+ case COMMAND_FCS_DATA_ENCRYPTION:
+ case COMMAND_FCS_DATA_DECRYPTION:
cb_data->status = BIT(SVC_STATUS_OK);
break;
case COMMAND_RECONFIG_DATA_SUBMIT:
@@ -306,15 +338,29 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
break;
case COMMAND_RSU_RETRY:
case COMMAND_RSU_MAX_RETRY:
+ case COMMAND_RSU_DCMF_STATUS:
case COMMAND_FIRMWARE_VERSION:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
+ case COMMAND_SMC_SVC_VERSION:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = &res.a2;
+ break;
case COMMAND_RSU_DCMF_VERSION:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
cb_data->kaddr2 = &res.a2;
break;
+ case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ case COMMAND_FCS_GET_PROVISION_DATA:
+ case COMMAND_POLL_SERVICE_STATUS:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = svc_pa_to_va(res.a2);
+ cb_data->kaddr3 = &res.a3;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -341,7 +387,7 @@ static int svc_normal_to_secure_thread(void *data)
struct stratix10_svc_data *pdata;
struct stratix10_svc_cb_data *cbdata;
struct arm_smccc_res res;
- unsigned long a0, a1, a2;
+ unsigned long a0, a1, a2, a3, a4, a5, a6, a7;
int ret_fifo = 0;
pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
@@ -358,6 +404,11 @@ static int svc_normal_to_secure_thread(void *data)
a0 = INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK;
a1 = 0;
a2 = 0;
+ a3 = 0;
+ a4 = 0;
+ a5 = 0;
+ a6 = 0;
+ a7 = 0;
pr_debug("smc_hvc_shm_thread is running\n");
@@ -428,15 +479,74 @@ static int svc_normal_to_secure_thread(void *data)
a1 = 0;
a2 = 0;
break;
+
+ /* for FCS */
+ case COMMAND_FCS_DATA_ENCRYPTION:
+ a0 = INTEL_SIP_SMC_FCS_CRYPTION;
+ a1 = 1;
+ a2 = (unsigned long)pdata->paddr;
+ a3 = (unsigned long)pdata->size;
+ a4 = (unsigned long)pdata->paddr_output;
+ a5 = (unsigned long)pdata->size_output;
+ break;
+ case COMMAND_FCS_DATA_DECRYPTION:
+ a0 = INTEL_SIP_SMC_FCS_CRYPTION;
+ a1 = 0;
+ a2 = (unsigned long)pdata->paddr;
+ a3 = (unsigned long)pdata->size;
+ a4 = (unsigned long)pdata->paddr_output;
+ a5 = (unsigned long)pdata->size_output;
+ break;
+ case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ a0 = INTEL_SIP_SMC_FCS_RANDOM_NUMBER;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = 0;
+ break;
+ case COMMAND_FCS_REQUEST_SERVICE:
+ a0 = INTEL_SIP_SMC_FCS_SERVICE_REQUEST;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_FCS_SEND_CERTIFICATE:
+ a0 = INTEL_SIP_SMC_FCS_SEND_CERTIFICATE;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_FCS_GET_PROVISION_DATA:
+ a0 = INTEL_SIP_SMC_FCS_GET_PROVISION_DATA;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = 0;
+ break;
+
+ /* for polling */
+ case COMMAND_POLL_SERVICE_STATUS:
+ a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_RSU_DCMF_STATUS:
+ a0 = INTEL_SIP_SMC_RSU_DCMF_STATUS;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_SMC_SVC_VERSION:
+ a0 = INTEL_SIP_SMC_SVC_VERSION;
+ a1 = 0;
+ a2 = 0;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
}
pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x",
- __func__, (unsigned int)a0, (unsigned int)a1);
+ __func__,
+ (unsigned int)a0,
+ (unsigned int)a1);
pr_debug(" a2=0x%016x\n", (unsigned int)a2);
-
- ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
+ pr_debug(" a3=0x%016x\n", (unsigned int)a3);
+ pr_debug(" a4=0x%016x\n", (unsigned int)a4);
+ pr_debug(" a5=0x%016x\n", (unsigned int)a5);
+ ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res);
pr_debug("%s: after SMC call -- res.a0=0x%016x",
__func__, (unsigned int)res.a0);
@@ -468,6 +578,7 @@ static int svc_normal_to_secure_thread(void *data)
pdata, cbdata);
break;
case COMMAND_RECONFIG_STATUS:
+ case COMMAND_POLL_SERVICE_STATUS:
svc_thread_cmd_config_status(ctrl,
pdata, cbdata);
break;
@@ -478,14 +589,31 @@ static int svc_normal_to_secure_thread(void *data)
break;
case INTEL_SIP_SMC_STATUS_REJECTED:
pr_debug("%s: STATUS_REJECTED\n", __func__);
+ /* for FCS */
+ switch (pdata->command) {
+ case COMMAND_FCS_REQUEST_SERVICE:
+ case COMMAND_FCS_SEND_CERTIFICATE:
+ case COMMAND_FCS_GET_PROVISION_DATA:
+ case COMMAND_FCS_DATA_ENCRYPTION:
+ case COMMAND_FCS_DATA_DECRYPTION:
+ case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ cbdata->status = BIT(SVC_STATUS_INVALID_PARAM);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(pdata->chan->scl,
+ cbdata);
+ break;
+ }
break;
case INTEL_SIP_SMC_STATUS_ERROR:
case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
cbdata->status = BIT(SVC_STATUS_ERROR);
cbdata->kaddr1 = &res.a1;
- cbdata->kaddr2 = NULL;
- cbdata->kaddr3 = NULL;
+ cbdata->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cbdata->kaddr3 = (res.a3) ? &res.a3 : NULL;
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
break;
default:
@@ -493,12 +621,10 @@ static int svc_normal_to_secure_thread(void *data)
/*
* be compatible with older version firmware which
- * doesn't support RSU notify or retry
+ * doesn't support newer RSU commands
*/
- if ((pdata->command == COMMAND_RSU_RETRY) ||
- (pdata->command == COMMAND_RSU_MAX_RETRY) ||
- (pdata->command == COMMAND_RSU_NOTIFY) ||
- (pdata->command == COMMAND_FIRMWARE_VERSION)) {
+ if ((pdata->command != COMMAND_RSU_UPDATE) &&
+ (pdata->command != COMMAND_RSU_STATUS)) {
cbdata->status =
BIT(SVC_STATUS_NO_SUPPORT);
cbdata->kaddr1 = NULL;
@@ -852,8 +978,19 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
list_for_each_entry(p_mem, &svc_data_mem, node)
if (p_mem->vaddr == p_msg->payload) {
p_data->paddr = p_mem->paddr;
+ p_data->size = p_msg->payload_length;
break;
}
+ if (p_msg->payload_output) {
+ list_for_each_entry(p_mem, &svc_data_mem, node)
+ if (p_mem->vaddr == p_msg->payload_output) {
+ p_data->paddr_output =
+ p_mem->paddr;
+ p_data->size_output =
+ p_msg->payload_length_output;
+ break;
+ }
+ }
}
p_data->command = p_msg->command;
@@ -1036,6 +1173,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
chans[1].name = SVC_CLIENT_RSU;
spin_lock_init(&chans[1].lock);
+ chans[2].scl = NULL;
+ chans[2].ctrl = controller;
+ chans[2].name = SVC_CLIENT_FCS;
+ spin_lock_init(&chans[2].lock);
+
list_add_tail(&controller->node, &svc_ctrl);
platform_set_drvdata(pdev, controller);
@@ -1054,8 +1196,22 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
}
ret = platform_device_add(svc->stratix10_svc_rsu);
- if (ret)
- goto err_put_device;
+ if (ret) {
+ platform_device_put(svc->stratix10_svc_rsu);
+ return ret;
+ }
+
+ svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
+ if (!svc->intel_svc_fcs) {
+ dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add(svc->intel_svc_fcs);
+ if (ret) {
+ platform_device_put(svc->intel_svc_fcs);
+ return ret;
+ }
dev_set_drvdata(dev, svc);
@@ -1063,8 +1219,6 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
return 0;
-err_put_device:
- platform_device_put(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
return ret;
@@ -1075,6 +1229,7 @@ static int stratix10_svc_drv_remove(struct platform_device *pdev)
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);
kfifo_free(&ctrl->svc_fifo);
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index fd89899aeeed..0c440afd5224 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -474,7 +474,7 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
dentry = debugfs_create_file(name, mode, parent, bpmp,
&bpmp_debug_fops);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
err = -ENOMEM;
goto out;
}
@@ -725,7 +725,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
if (t & DEBUGFS_S_ISDIR) {
dentry = debugfs_create_dir(name, parent);
- if (!dentry)
+ if (IS_ERR(dentry))
return -ENOMEM;
err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
if (err < 0)
@@ -738,7 +738,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
dentry = debugfs_create_file(name, mode,
parent, bpmp,
&debugfs_fops);
- if (!dentry)
+ if (IS_ERR(dentry))
return -ENOMEM;
}
}
@@ -788,11 +788,11 @@ int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
return 0;
root = debugfs_create_dir("bpmp", NULL);
- if (!root)
+ if (IS_ERR(root))
return -ENOMEM;
bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
- if (!bpmp->debugfs_mirror) {
+ if (IS_ERR(bpmp->debugfs_mirror)) {
err = -ENOMEM;
goto out;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 5654c5e9862b..037db21de510 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -201,7 +201,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
int err;
if (data && size > 0)
- memcpy(data, channel->ib->data, size);
+ memcpy_fromio(data, channel->ib->data, size);
err = tegra_bpmp_ack_response(channel);
if (err < 0)
@@ -245,7 +245,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
channel->ob->flags = flags;
if (data && size > 0)
- memcpy(channel->ob->data, data, size);
+ memcpy_toio(channel->ob->data, data, size);
return tegra_bpmp_post_request(channel);
}
@@ -420,7 +420,7 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
channel->ob->code = code;
if (data && size > 0)
- memcpy(channel->ob->data, data, size);
+ memcpy_toio(channel->ob->data, data, size);
err = tegra_bpmp_post_response(channel);
if (WARN_ON(err < 0))
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 7977a494a651..d1f652802181 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2021 Xilinx, Inc.
+ * Copyright (C) 2014-2022 Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -340,6 +340,20 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
static u32 pm_api_version;
static u32 pm_tz_version;
+int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
+{
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0,
+ NULL);
+ if (!ret)
+ return ret;
+
+ /* try old implementation as fallback strategy if above fails */
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
+ reset, NULL);
+}
+
/**
* zynqmp_pm_get_api_version() - Get version number of PMU PM firmware
* @version: Returned version value
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 991b3f361ec9..6c416955da53 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -243,4 +243,24 @@ config FPGA_MGR_VERSAL_FPGA
configure the programmable logic(PL).
To compile this as a module, choose M here.
+
+config FPGA_M10_BMC_SEC_UPDATE
+ tristate "Intel MAX10 BMC Secure Update driver"
+ depends on MFD_INTEL_M10_BMC && FW_UPLOAD
+ help
+ Secure update support for the Intel MAX10 board management
+ controller.
+
+ This is a subdriver of the Intel MAX10 board management controller
+ (BMC) and provides support for secure updates for the BMC image,
+ the FPGA image, the Root Entry Hashes, etc.
+
+config FPGA_MGR_MICROCHIP_SPI
+ tristate "Microchip Polarfire SPI FPGA manager"
+ depends on SPI
+ help
+ FPGA manager driver support for Microchip Polarfire FPGAs
+ programming over slave SPI interface with .dat formatted
+ bitstream image.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 5935b3d0abd5..42ae8b58abce 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -19,9 +19,13 @@ obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o
+obj-$(CONFIG_FPGA_MGR_MICROCHIP_SPI) += microchip-spi.o
obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
+# FPGA Secure Update Drivers
+obj-$(CONFIG_FPGA_M10_BMC_SEC_UPDATE) += intel-m10-bmc-sec-update.o
+
# FPGA Bridge Drivers
obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o
obj-$(CONFIG_SOCFPGA_FPGA_BRIDGE) += altera-hps2fpga.o altera-fpga2sdram.o
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index be0667968d33..df8671af4a92 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -108,7 +108,7 @@ static int alt_pr_fpga_write(struct fpga_manager *mgr, const char *buf,
u32 *buffer_32 = (u32 *)buf;
size_t i = 0;
- if (count <= 0)
+ if (!count)
return -EINVAL;
/* Write out the complete 32-bit chunks */
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 6bff39ff21a0..5498bc337f8b 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -342,7 +342,7 @@ static void release_dfl_dev(struct device *dev)
if (ddev->mmio_res.parent)
release_resource(&ddev->mmio_res);
- ida_simple_remove(&dfl_device_ida, ddev->id);
+ ida_free(&dfl_device_ida, ddev->id);
kfree(ddev->irqs);
kfree(ddev);
}
@@ -360,7 +360,7 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
if (!ddev)
return ERR_PTR(-ENOMEM);
- id = ida_simple_get(&dfl_device_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&dfl_device_ida, GFP_KERNEL);
if (id < 0) {
dev_err(&pdev->dev, "unable to get id\n");
kfree(ddev);
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 16f2b164a178..727704431f61 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -342,7 +342,7 @@ fpga_bridge_register(struct device *parent, const char *name,
if (!bridge)
return ERR_PTR(-ENOMEM);
- id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&fpga_bridge_ida, GFP_KERNEL);
if (id < 0) {
ret = id;
goto error_kfree;
@@ -375,7 +375,7 @@ fpga_bridge_register(struct device *parent, const char *name,
return bridge;
error_device:
- ida_simple_remove(&fpga_bridge_ida, id);
+ ida_free(&fpga_bridge_ida, id);
error_kfree:
kfree(bridge);
@@ -407,7 +407,7 @@ static void fpga_bridge_dev_release(struct device *dev)
{
struct fpga_bridge *bridge = to_fpga_bridge(dev);
- ida_simple_remove(&fpga_bridge_ida, bridge->dev.id);
+ ida_free(&fpga_bridge_ida, bridge->dev.id);
kfree(bridge);
}
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index a3595ecc3f79..8efa67620e21 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -74,6 +74,15 @@ static inline int fpga_mgr_write_complete(struct fpga_manager *mgr,
return 0;
}
+static inline int fpga_mgr_parse_header(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ if (mgr->mops->parse_header)
+ return mgr->mops->parse_header(mgr, info, buf, count);
+ return 0;
+}
+
static inline int fpga_mgr_write_init(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count)
@@ -136,24 +145,141 @@ void fpga_image_info_free(struct fpga_image_info *info)
EXPORT_SYMBOL_GPL(fpga_image_info_free);
/*
- * Call the low level driver's write_init function. This will do the
+ * Call the low level driver's parse_header function with entire FPGA image
+ * buffer on the input. This will set info->header_size and info->data_size.
+ */
+static int fpga_mgr_parse_header_mapped(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER;
+ ret = fpga_mgr_parse_header(mgr, info, buf, count);
+
+ if (info->header_size + info->data_size > count) {
+ dev_err(&mgr->dev, "Bitstream data outruns FPGA image\n");
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(&mgr->dev, "Error while parsing FPGA image header\n");
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER_ERR;
+ }
+
+ return ret;
+}
+
+/*
+ * Call the low level driver's parse_header function with first fragment of
+ * scattered FPGA image on the input. If header fits first fragment,
+ * parse_header will set info->header_size and info->data_size. If it is not,
+ * parse_header will set desired size to info->header_size and -EAGAIN will be
+ * returned.
+ */
+static int fpga_mgr_parse_header_sg_first(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ struct sg_table *sgt)
+{
+ struct sg_mapping_iter miter;
+ int ret;
+
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER;
+
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+ if (sg_miter_next(&miter) &&
+ miter.length >= info->header_size)
+ ret = fpga_mgr_parse_header(mgr, info, miter.addr, miter.length);
+ else
+ ret = -EAGAIN;
+ sg_miter_stop(&miter);
+
+ if (ret && ret != -EAGAIN) {
+ dev_err(&mgr->dev, "Error while parsing FPGA image header\n");
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER_ERR;
+ }
+
+ return ret;
+}
+
+/*
+ * Copy scattered FPGA image fragments to temporary buffer and call the
+ * low level driver's parse_header function. This should be called after
+ * fpga_mgr_parse_header_sg_first() returned -EAGAIN. In case of success,
+ * pointer to the newly allocated image header copy will be returned and
+ * its size will be set into *ret_size. Returned buffer needs to be freed.
+ */
+static void *fpga_mgr_parse_header_sg(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ struct sg_table *sgt, size_t *ret_size)
+{
+ size_t len, new_header_size, header_size = 0;
+ char *new_buf, *buf = NULL;
+ int ret;
+
+ do {
+ new_header_size = info->header_size;
+ if (new_header_size <= header_size) {
+ dev_err(&mgr->dev, "Requested invalid header size\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ new_buf = krealloc(buf, new_header_size, GFP_KERNEL);
+ if (!new_buf) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ buf = new_buf;
+
+ len = sg_pcopy_to_buffer(sgt->sgl, sgt->nents,
+ buf + header_size,
+ new_header_size - header_size,
+ header_size);
+ if (len != new_header_size - header_size) {
+ ret = -EFAULT;
+ break;
+ }
+
+ header_size = new_header_size;
+ ret = fpga_mgr_parse_header(mgr, info, buf, header_size);
+ } while (ret == -EAGAIN);
+
+ if (ret) {
+ dev_err(&mgr->dev, "Error while parsing FPGA image header\n");
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER_ERR;
+ kfree(buf);
+ buf = ERR_PTR(ret);
+ }
+
+ *ret_size = header_size;
+
+ return buf;
+}
+
+/*
+ * Call the low level driver's write_init function. This will do the
* device-specific things to get the FPGA into the state where it is ready to
- * receive an FPGA image. The low level driver only gets to see the first
- * initial_header_size bytes in the buffer.
+ * receive an FPGA image. The low level driver gets to see at least first
+ * info->header_size bytes in the buffer. If info->header_size is 0,
+ * write_init will not get any bytes of image buffer.
*/
static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count)
{
+ size_t header_size = info->header_size;
int ret;
mgr->state = FPGA_MGR_STATE_WRITE_INIT;
- if (!mgr->mops->initial_header_size) {
+
+ if (header_size > count)
+ ret = -EINVAL;
+ else if (!header_size)
ret = fpga_mgr_write_init(mgr, info, NULL, 0);
- } else {
- count = min(mgr->mops->initial_header_size, count);
+ else
ret = fpga_mgr_write_init(mgr, info, buf, count);
- }
if (ret) {
dev_err(&mgr->dev, "Error preparing FPGA for writing\n");
@@ -164,39 +290,50 @@ static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
return 0;
}
-static int fpga_mgr_write_init_sg(struct fpga_manager *mgr,
- struct fpga_image_info *info,
- struct sg_table *sgt)
+static int fpga_mgr_prepare_sg(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ struct sg_table *sgt)
{
struct sg_mapping_iter miter;
size_t len;
char *buf;
int ret;
- if (!mgr->mops->initial_header_size)
+ /* Short path. Low level driver don't care about image header. */
+ if (!mgr->mops->initial_header_size && !mgr->mops->parse_header)
return fpga_mgr_write_init_buf(mgr, info, NULL, 0);
/*
* First try to use miter to map the first fragment to access the
* header, this is the typical path.
*/
- sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
- if (sg_miter_next(&miter) &&
- miter.length >= mgr->mops->initial_header_size) {
- ret = fpga_mgr_write_init_buf(mgr, info, miter.addr,
- miter.length);
+ ret = fpga_mgr_parse_header_sg_first(mgr, info, sgt);
+ /* If 0, header fits first fragment, call write_init on it */
+ if (!ret) {
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+ if (sg_miter_next(&miter)) {
+ ret = fpga_mgr_write_init_buf(mgr, info, miter.addr,
+ miter.length);
+ sg_miter_stop(&miter);
+ return ret;
+ }
sg_miter_stop(&miter);
+ /*
+ * If -EAGAIN, more sg buffer is needed,
+ * otherwise an error has occurred.
+ */
+ } else if (ret != -EAGAIN) {
return ret;
}
- sg_miter_stop(&miter);
- /* Otherwise copy the fragments into temporary memory. */
- buf = kmalloc(mgr->mops->initial_header_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ /*
+ * Copy the fragments into temporary memory.
+ * Copying is done inside fpga_mgr_parse_header_sg().
+ */
+ buf = fpga_mgr_parse_header_sg(mgr, info, sgt, &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
- len = sg_copy_to_buffer(sgt->sgl, sgt->nents, buf,
- mgr->mops->initial_header_size);
ret = fpga_mgr_write_init_buf(mgr, info, buf, len);
kfree(buf);
@@ -227,7 +364,7 @@ static int fpga_mgr_buf_load_sg(struct fpga_manager *mgr,
{
int ret;
- ret = fpga_mgr_write_init_sg(mgr, info, sgt);
+ ret = fpga_mgr_prepare_sg(mgr, info, sgt);
if (ret)
return ret;
@@ -236,17 +373,35 @@ static int fpga_mgr_buf_load_sg(struct fpga_manager *mgr,
if (mgr->mops->write_sg) {
ret = fpga_mgr_write_sg(mgr, sgt);
} else {
+ size_t length, count = 0, data_size = info->data_size;
struct sg_mapping_iter miter;
sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+
+ if (mgr->mops->skip_header &&
+ !sg_miter_skip(&miter, info->header_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
while (sg_miter_next(&miter)) {
- ret = fpga_mgr_write(mgr, miter.addr, miter.length);
+ if (data_size)
+ length = min(miter.length, data_size - count);
+ else
+ length = miter.length;
+
+ ret = fpga_mgr_write(mgr, miter.addr, length);
if (ret)
break;
+
+ count += length;
+ if (data_size && count >= data_size)
+ break;
}
sg_miter_stop(&miter);
}
+out:
if (ret) {
dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
mgr->state = FPGA_MGR_STATE_WRITE_ERR;
@@ -262,10 +417,22 @@ static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
{
int ret;
+ ret = fpga_mgr_parse_header_mapped(mgr, info, buf, count);
+ if (ret)
+ return ret;
+
ret = fpga_mgr_write_init_buf(mgr, info, buf, count);
if (ret)
return ret;
+ if (mgr->mops->skip_header) {
+ buf += info->header_size;
+ count -= info->header_size;
+ }
+
+ if (info->data_size)
+ count = info->data_size;
+
/*
* Write the FPGA image to the FPGA.
*/
@@ -404,6 +571,8 @@ static int fpga_mgr_firmware_load(struct fpga_manager *mgr,
*/
int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info)
{
+ info->header_size = mgr->mops->initial_header_size;
+
if (info->sgt)
return fpga_mgr_buf_load_sg(mgr, info, info->sgt);
if (info->buf && info->count)
@@ -424,6 +593,10 @@ static const char * const state_str[] = {
[FPGA_MGR_STATE_FIRMWARE_REQ] = "firmware request",
[FPGA_MGR_STATE_FIRMWARE_REQ_ERR] = "firmware request error",
+ /* Parse FPGA image header */
+ [FPGA_MGR_STATE_PARSE_HEADER] = "parse header",
+ [FPGA_MGR_STATE_PARSE_HEADER_ERR] = "parse header error",
+
/* Preparing FPGA to receive image */
[FPGA_MGR_STATE_WRITE_INIT] = "write init",
[FPGA_MGR_STATE_WRITE_INIT_ERR] = "write init error",
@@ -623,7 +796,7 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in
if (!mgr)
return ERR_PTR(-ENOMEM);
- id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&fpga_mgr_ida, GFP_KERNEL);
if (id < 0) {
ret = id;
goto error_kfree;
@@ -662,7 +835,7 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in
return mgr;
error_device:
- ida_simple_remove(&fpga_mgr_ida, id);
+ ida_free(&fpga_mgr_ida, id);
error_kfree:
kfree(mgr);
@@ -790,7 +963,7 @@ static void fpga_mgr_dev_release(struct device *dev)
{
struct fpga_manager *mgr = to_fpga_manager(dev);
- ida_simple_remove(&fpga_mgr_ida, mgr->dev.id);
+ ida_free(&fpga_mgr_ida, mgr->dev.id);
kfree(mgr);
}
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 485948e3c0db..27ff9dea04ae 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -202,7 +202,7 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
if (!region)
return ERR_PTR(-ENOMEM);
- id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&fpga_region_ida, GFP_KERNEL);
if (id < 0) {
ret = id;
goto err_free;
@@ -234,7 +234,7 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
return region;
err_remove:
- ida_simple_remove(&fpga_region_ida, id);
+ ida_free(&fpga_region_ida, id);
err_free:
kfree(region);
@@ -283,7 +283,7 @@ static void fpga_region_dev_release(struct device *dev)
{
struct fpga_region *region = to_fpga_region(dev);
- ida_simple_remove(&fpga_region_ida, region->dev.id);
+ ida_free(&fpga_region_ida, region->dev.id);
kfree(region);
}
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
new file mode 100644
index 000000000000..72c677c910de
--- /dev/null
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel MAX10 Board Management Controller Secure Update Driver
+ *
+ * Copyright (C) 2019-2022 Intel Corporation. All rights reserved.
+ *
+ */
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/mfd/intel-m10-bmc.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct m10bmc_sec {
+ struct device *dev;
+ struct intel_m10bmc *m10bmc;
+ struct fw_upload *fwl;
+ char *fw_name;
+ u32 fw_name_id;
+ bool cancel_request;
+};
+
+static DEFINE_XARRAY_ALLOC(fw_upload_xa);
+
+/* Root Entry Hash (REH) support */
+#define REH_SHA256_SIZE 32
+#define REH_SHA384_SIZE 48
+#define REH_MAGIC GENMASK(15, 0)
+#define REH_SHA_NUM_BYTES GENMASK(31, 16)
+
+static ssize_t
+show_root_entry_hash(struct device *dev, u32 exp_magic,
+ u32 prog_addr, u32 reh_addr, char *buf)
+{
+ struct m10bmc_sec *sec = dev_get_drvdata(dev);
+ int sha_num_bytes, i, ret, cnt = 0;
+ u8 hash[REH_SHA384_SIZE];
+ unsigned int stride;
+ u32 magic;
+
+ stride = regmap_get_reg_stride(sec->m10bmc->regmap);
+ ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(REH_MAGIC, magic) != exp_magic)
+ return sysfs_emit(buf, "hash not programmed\n");
+
+ sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
+ if ((sha_num_bytes % stride) ||
+ (sha_num_bytes != REH_SHA256_SIZE &&
+ sha_num_bytes != REH_SHA384_SIZE)) {
+ dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
+ sha_num_bytes);
+ return -EINVAL;
+ }
+
+ ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr,
+ hash, sha_num_bytes / stride);
+ if (ret) {
+ dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n",
+ reh_addr, sha_num_bytes / stride, ret);
+ return ret;
+ }
+
+ for (i = 0; i < sha_num_bytes; i++)
+ cnt += sprintf(buf + cnt, "%02x", hash[i]);
+ cnt += sprintf(buf + cnt, "\n");
+
+ return cnt;
+}
+
+#define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \
+static ssize_t _name##_root_entry_hash_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \
+static DEVICE_ATTR_RO(_name##_root_entry_hash)
+
+DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR);
+DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR);
+DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
+
+#define CSK_BIT_LEN 128U
+#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
+
+static ssize_t
+show_canceled_csk(struct device *dev, u32 addr, char *buf)
+{
+ unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32);
+ struct m10bmc_sec *sec = dev_get_drvdata(dev);
+ DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
+ __le32 csk_le32[CSK_32ARRAY_SIZE];
+ u32 csk32[CSK_32ARRAY_SIZE];
+ int ret;
+
+ stride = regmap_get_reg_stride(sec->m10bmc->regmap);
+ if (size % stride) {
+ dev_err(sec->dev,
+ "CSK vector size (0x%x) not aligned to stride (0x%x)\n",
+ size, stride);
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32,
+ size / stride);
+ if (ret) {
+ dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n",
+ addr, size / stride, ret);
+ return ret;
+ }
+
+ for (i = 0; i < CSK_32ARRAY_SIZE; i++)
+ csk32[i] = le32_to_cpu(((csk_le32[i])));
+
+ bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN);
+ bitmap_complement(csk_map, csk_map, CSK_BIT_LEN);
+ return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
+}
+
+#define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \
+static ssize_t _name##_canceled_csks_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ return show_canceled_csk(dev, _addr, buf); } \
+static DEVICE_ATTR_RO(_name##_canceled_csks)
+
+#define CSK_VEC_OFFSET 0x34
+
+DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET);
+DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET);
+DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET);
+
+#define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
+
+static ssize_t flash_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct m10bmc_sec *sec = dev_get_drvdata(dev);
+ unsigned int stride, num_bits;
+ u8 *flash_buf;
+ int cnt, ret;
+
+ stride = regmap_get_reg_stride(sec->m10bmc->regmap);
+ num_bits = FLASH_COUNT_SIZE * 8;
+
+ flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
+ if (!flash_buf)
+ return -ENOMEM;
+
+ if (FLASH_COUNT_SIZE % stride) {
+ dev_err(sec->dev,
+ "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
+ FLASH_COUNT_SIZE, stride);
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
+ flash_buf, FLASH_COUNT_SIZE / stride);
+ if (ret) {
+ dev_err(sec->dev,
+ "failed to read flash count: %x cnt %x: %d\n",
+ STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret);
+ goto exit_free;
+ }
+ cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
+
+exit_free:
+ kfree(flash_buf);
+
+ return ret ? : sysfs_emit(buf, "%u\n", cnt);
+}
+static DEVICE_ATTR_RO(flash_count);
+
+static struct attribute *m10bmc_security_attrs[] = {
+ &dev_attr_flash_count.attr,
+ &dev_attr_bmc_root_entry_hash.attr,
+ &dev_attr_sr_root_entry_hash.attr,
+ &dev_attr_pr_root_entry_hash.attr,
+ &dev_attr_sr_canceled_csks.attr,
+ &dev_attr_pr_canceled_csks.attr,
+ &dev_attr_bmc_canceled_csks.attr,
+ NULL,
+};
+
+static struct attribute_group m10bmc_security_attr_group = {
+ .name = "security",
+ .attrs = m10bmc_security_attrs,
+};
+
+static const struct attribute_group *m10bmc_sec_attr_groups[] = {
+ &m10bmc_security_attr_group,
+ NULL,
+};
+
+static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
+{
+ u32 auth_result;
+
+ dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell);
+
+ if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result))
+ dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
+}
+
+static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
+{
+ u32 doorbell;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ if (rsu_prog(doorbell) != RSU_PROG_IDLE &&
+ rsu_prog(doorbell) != RSU_PROG_RSU_DONE) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_BUSY;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static inline bool rsu_start_done(u32 doorbell)
+{
+ u32 status, progress;
+
+ if (doorbell & DRBL_RSU_REQUEST)
+ return false;
+
+ status = rsu_stat(doorbell);
+ if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
+ return true;
+
+ progress = rsu_prog(doorbell);
+ if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE)
+ return true;
+
+ return false;
+}
+
+static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
+{
+ u32 doorbell, status;
+ int ret;
+
+ ret = regmap_update_bits(sec->m10bmc->regmap,
+ M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
+ DRBL_RSU_REQUEST |
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_IDLE));
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
+ M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ doorbell,
+ rsu_start_done(doorbell),
+ NIOS_HANDSHAKE_INTERVAL_US,
+ NIOS_HANDSHAKE_TIMEOUT_US);
+
+ if (ret == -ETIMEDOUT) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_TIMEOUT;
+ } else if (ret) {
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ status = rsu_stat(doorbell);
+ if (status == RSU_STAT_WEAROUT) {
+ dev_warn(sec->dev, "Excessive flash update count detected\n");
+ return FW_UPLOAD_ERR_WEAROUT;
+ } else if (status == RSU_STAT_ERASE_FAIL) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
+{
+ unsigned long poll_timeout;
+ u32 doorbell, progress;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS);
+ while (rsu_prog(doorbell) == RSU_PROG_PREPARE) {
+ msleep(RSU_PREP_INTERVAL_MS);
+ if (time_after(jiffies, poll_timeout))
+ break;
+
+ ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ progress = rsu_prog(doorbell);
+ if (progress == RSU_PROG_PREPARE) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_TIMEOUT;
+ } else if (progress != RSU_PROG_READY) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
+{
+ u32 doorbell;
+ int ret;
+
+ ret = regmap_update_bits(sec->m10bmc->regmap,
+ M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ DRBL_HOST_STATUS,
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_WRITE_DONE));
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
+ M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ doorbell,
+ rsu_prog(doorbell) != RSU_PROG_READY,
+ NIOS_HANDSHAKE_INTERVAL_US,
+ NIOS_HANDSHAKE_TIMEOUT_US);
+
+ if (ret == -ETIMEDOUT) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_TIMEOUT;
+ } else if (ret) {
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ switch (rsu_stat(doorbell)) {
+ case RSU_STAT_NORMAL:
+ case RSU_STAT_NIOS_OK:
+ case RSU_STAT_USER_OK:
+ case RSU_STAT_FACTORY_OK:
+ break;
+ default:
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell)
+{
+ if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell))
+ return -EIO;
+
+ switch (rsu_stat(*doorbell)) {
+ case RSU_STAT_NORMAL:
+ case RSU_STAT_NIOS_OK:
+ case RSU_STAT_USER_OK:
+ case RSU_STAT_FACTORY_OK:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (rsu_prog(*doorbell)) {
+ case RSU_PROG_IDLE:
+ case RSU_PROG_RSU_DONE:
+ return 0;
+ case RSU_PROG_AUTHENTICATING:
+ case RSU_PROG_COPYING:
+ case RSU_PROG_UPDATE_CANCEL:
+ case RSU_PROG_PROGRAM_KEY_HASH:
+ return -EAGAIN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
+{
+ u32 doorbell;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ if (rsu_prog(doorbell) != RSU_PROG_READY)
+ return FW_UPLOAD_ERR_BUSY;
+
+ ret = regmap_update_bits(sec->m10bmc->regmap,
+ M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ DRBL_HOST_STATUS,
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_ABORT_RSU));
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ return FW_UPLOAD_ERR_CANCELED;
+}
+
+static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
+ const u8 *data, u32 size)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+ u32 ret;
+
+ sec->cancel_request = false;
+
+ if (!size || size > M10BMC_STAGING_SIZE)
+ return FW_UPLOAD_ERR_INVALID_SIZE;
+
+ ret = rsu_check_idle(sec);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ ret = rsu_update_init(sec);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ ret = rsu_prog_ready(sec);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ if (sec->cancel_request)
+ return rsu_cancel(sec);
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+#define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
+
+static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data,
+ u32 offset, u32 size, u32 *written)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+ u32 blk_size, doorbell, extra_offset;
+ unsigned int stride, extra = 0;
+ int ret;
+
+ stride = regmap_get_reg_stride(sec->m10bmc->regmap);
+ if (sec->cancel_request)
+ return rsu_cancel(sec);
+
+ ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ if (ret) {
+ return FW_UPLOAD_ERR_RW_ERROR;
+ } else if (rsu_prog(doorbell) != RSU_PROG_READY) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride);
+ blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
+ ret = regmap_bulk_write(sec->m10bmc->regmap,
+ M10BMC_STAGING_BASE + offset,
+ (void *)data + offset,
+ blk_size / stride);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ /*
+ * If blk_size is not aligned to stride, then handle the extra
+ * bytes with regmap_write.
+ */
+ if (blk_size % stride) {
+ extra_offset = offset + ALIGN_DOWN(blk_size, stride);
+ memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride);
+ ret = regmap_write(sec->m10bmc->regmap,
+ M10BMC_STAGING_BASE + extra_offset, extra);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ *written = blk_size;
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+ unsigned long poll_timeout;
+ u32 doorbell, result;
+ int ret;
+
+ if (sec->cancel_request)
+ return rsu_cancel(sec);
+
+ result = rsu_send_data(sec);
+ if (result != FW_UPLOAD_ERR_NONE)
+ return result;
+
+ poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS);
+ do {
+ msleep(RSU_COMPLETE_INTERVAL_MS);
+ ret = rsu_check_complete(sec, &doorbell);
+ } while (ret == -EAGAIN && !time_after(jiffies, poll_timeout));
+
+ if (ret == -EAGAIN) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_TIMEOUT;
+ } else if (ret == -EIO) {
+ return FW_UPLOAD_ERR_RW_ERROR;
+ } else if (ret) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+/*
+ * m10bmc_sec_cancel() may be called asynchronously with an on-going update.
+ * All other functions are called sequentially in a single thread. To avoid
+ * contention on register accesses, m10bmc_sec_cancel() must only update
+ * the cancel_request flag. Other functions will check this flag and handle
+ * the cancel request synchronously.
+ */
+static void m10bmc_sec_cancel(struct fw_upload *fwl)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+
+ sec->cancel_request = true;
+}
+
+static void m10bmc_sec_cleanup(struct fw_upload *fwl)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+
+ (void)rsu_cancel(sec);
+}
+
+static const struct fw_upload_ops m10bmc_ops = {
+ .prepare = m10bmc_sec_prepare,
+ .write = m10bmc_sec_write,
+ .poll_complete = m10bmc_sec_poll_complete,
+ .cancel = m10bmc_sec_cancel,
+ .cleanup = m10bmc_sec_cleanup,
+};
+
+#define SEC_UPDATE_LEN_MAX 32
+static int m10bmc_sec_probe(struct platform_device *pdev)
+{
+ char buf[SEC_UPDATE_LEN_MAX];
+ struct m10bmc_sec *sec;
+ struct fw_upload *fwl;
+ unsigned int len;
+ int ret;
+
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+
+ sec->dev = &pdev->dev;
+ sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
+ dev_set_drvdata(&pdev->dev, sec);
+
+ ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
+ xa_limit_32b, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
+ sec->fw_name_id);
+ sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
+ if (!sec->fw_name)
+ return -ENOMEM;
+
+ fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
+ &m10bmc_ops, sec);
+ if (IS_ERR(fwl)) {
+ dev_err(sec->dev, "Firmware Upload driver failed to start\n");
+ kfree(sec->fw_name);
+ xa_erase(&fw_upload_xa, sec->fw_name_id);
+ return PTR_ERR(fwl);
+ }
+
+ sec->fwl = fwl;
+ return 0;
+}
+
+static int m10bmc_sec_remove(struct platform_device *pdev)
+{
+ struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
+
+ firmware_upload_unregister(sec->fwl);
+ kfree(sec->fw_name);
+ xa_erase(&fw_upload_xa, sec->fw_name_id);
+
+ return 0;
+}
+
+static const struct platform_device_id intel_m10bmc_sec_ids[] = {
+ {
+ .name = "n3000bmc-sec-update",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
+
+static struct platform_driver intel_m10bmc_sec_driver = {
+ .probe = m10bmc_sec_probe,
+ .remove = m10bmc_sec_remove,
+ .driver = {
+ .name = "intel-m10bmc-sec-update",
+ .dev_groups = m10bmc_sec_attr_groups,
+ },
+ .id_table = intel_m10bmc_sec_ids,
+};
+module_platform_driver(intel_m10bmc_sec_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
new file mode 100644
index 000000000000..bd284c7b8dc9
--- /dev/null
+++ b/drivers/fpga/microchip-spi.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip Polarfire FPGA programming over slave SPI interface.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+#define MPF_SPI_ISC_ENABLE 0x0B
+#define MPF_SPI_ISC_DISABLE 0x0C
+#define MPF_SPI_READ_STATUS 0x00
+#define MPF_SPI_READ_DATA 0x01
+#define MPF_SPI_FRAME_INIT 0xAE
+#define MPF_SPI_FRAME 0xEE
+#define MPF_SPI_PRG_MODE 0x01
+#define MPF_SPI_RELEASE 0x23
+
+#define MPF_SPI_FRAME_SIZE 16
+
+#define MPF_HEADER_SIZE_OFFSET 24
+#define MPF_DATA_SIZE_OFFSET 55
+
+#define MPF_LOOKUP_TABLE_RECORD_SIZE 9
+#define MPF_LOOKUP_TABLE_BLOCK_ID_OFFSET 0
+#define MPF_LOOKUP_TABLE_BLOCK_START_OFFSET 1
+
+#define MPF_COMPONENTS_SIZE_ID 5
+#define MPF_BITSTREAM_ID 8
+
+#define MPF_BITS_PER_COMPONENT_SIZE 22
+
+#define MPF_STATUS_POLL_RETRIES 10000
+#define MPF_STATUS_BUSY BIT(0)
+#define MPF_STATUS_READY BIT(1)
+#define MPF_STATUS_SPI_VIOLATION BIT(2)
+#define MPF_STATUS_SPI_ERROR BIT(3)
+
+struct mpf_priv {
+ struct spi_device *spi;
+ bool program_mode;
+};
+
+static int mpf_read_status(struct spi_device *spi)
+{
+ u8 status = 0, status_command = MPF_SPI_READ_STATUS;
+ struct spi_transfer xfers[2] = { 0 };
+ int ret;
+
+ /*
+ * HW status is returned on MISO in the first byte after CS went
+ * active. However, first reading can be inadequate, so we submit
+ * two identical SPI transfers and use result of the later one.
+ */
+ xfers[0].tx_buf = &status_command;
+ xfers[1].tx_buf = &status_command;
+ xfers[0].rx_buf = &status;
+ xfers[1].rx_buf = &status;
+ xfers[0].len = 1;
+ xfers[1].len = 1;
+ xfers[0].cs_change = 1;
+
+ ret = spi_sync_transfer(spi, xfers, 2);
+
+ if ((status & MPF_STATUS_SPI_VIOLATION) ||
+ (status & MPF_STATUS_SPI_ERROR))
+ ret = -EIO;
+
+ return ret ? : status;
+}
+
+static enum fpga_mgr_states mpf_ops_state(struct fpga_manager *mgr)
+{
+ struct mpf_priv *priv = mgr->priv;
+ struct spi_device *spi;
+ bool program_mode;
+ int status;
+
+ spi = priv->spi;
+ program_mode = priv->program_mode;
+ status = mpf_read_status(spi);
+
+ if (!program_mode && !status)
+ return FPGA_MGR_STATE_OPERATING;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static int mpf_ops_parse_header(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ size_t component_size_byte_num, component_size_byte_off,
+ components_size_start, bitstream_start,
+ block_id_offset, block_start_offset;
+ u8 header_size, blocks_num, block_id;
+ u32 block_start, component_size;
+ u16 components_num, i;
+
+ if (!buf) {
+ dev_err(&mgr->dev, "Image buffer is not provided\n");
+ return -EINVAL;
+ }
+
+ header_size = *(buf + MPF_HEADER_SIZE_OFFSET);
+ if (header_size > count) {
+ info->header_size = header_size;
+ return -EAGAIN;
+ }
+
+ /*
+ * Go through look-up table to find out where actual bitstream starts
+ * and where sizes of components of the bitstream lies.
+ */
+ blocks_num = *(buf + header_size - 1);
+ block_id_offset = header_size + MPF_LOOKUP_TABLE_BLOCK_ID_OFFSET;
+ block_start_offset = header_size + MPF_LOOKUP_TABLE_BLOCK_START_OFFSET;
+
+ header_size += blocks_num * MPF_LOOKUP_TABLE_RECORD_SIZE;
+ if (header_size > count) {
+ info->header_size = header_size;
+ return -EAGAIN;
+ }
+
+ components_size_start = 0;
+ bitstream_start = 0;
+
+ while (blocks_num--) {
+ block_id = *(buf + block_id_offset);
+ block_start = get_unaligned_le32(buf + block_start_offset);
+
+ switch (block_id) {
+ case MPF_BITSTREAM_ID:
+ bitstream_start = block_start;
+ info->header_size = block_start;
+ if (block_start > count)
+ return -EAGAIN;
+
+ break;
+ case MPF_COMPONENTS_SIZE_ID:
+ components_size_start = block_start;
+ break;
+ default:
+ break;
+ }
+
+ if (bitstream_start && components_size_start)
+ break;
+
+ block_id_offset += MPF_LOOKUP_TABLE_RECORD_SIZE;
+ block_start_offset += MPF_LOOKUP_TABLE_RECORD_SIZE;
+ }
+
+ if (!bitstream_start || !components_size_start) {
+ dev_err(&mgr->dev, "Failed to parse header look-up table\n");
+ return -EFAULT;
+ }
+
+ /*
+ * Parse bitstream size.
+ * Sizes of components of the bitstream are 22-bits long placed next
+ * to each other. Image header should be extended by now up to where
+ * actual bitstream starts, so no need for overflow check anymore.
+ */
+ components_num = get_unaligned_le16(buf + MPF_DATA_SIZE_OFFSET);
+
+ for (i = 0; i < components_num; i++) {
+ component_size_byte_num =
+ (i * MPF_BITS_PER_COMPONENT_SIZE) / BITS_PER_BYTE;
+ component_size_byte_off =
+ (i * MPF_BITS_PER_COMPONENT_SIZE) % BITS_PER_BYTE;
+
+ component_size = get_unaligned_le32(buf +
+ components_size_start +
+ component_size_byte_num);
+ component_size >>= component_size_byte_off;
+ component_size &= GENMASK(MPF_BITS_PER_COMPONENT_SIZE - 1, 0);
+
+ info->data_size += component_size * MPF_SPI_FRAME_SIZE;
+ }
+
+ return 0;
+}
+
+/* Poll HW status until busy bit is cleared and mask bits are set. */
+static int mpf_poll_status(struct spi_device *spi, u8 mask)
+{
+ int status, retries = MPF_STATUS_POLL_RETRIES;
+
+ while (retries--) {
+ status = mpf_read_status(spi);
+ if (status < 0)
+ return status;
+
+ if (status & MPF_STATUS_BUSY)
+ continue;
+
+ if (!mask || (status & mask))
+ return status;
+ }
+
+ return -EBUSY;
+}
+
+static int mpf_spi_write(struct spi_device *spi, const void *buf, size_t buf_size)
+{
+ int status = mpf_poll_status(spi, 0);
+
+ if (status < 0)
+ return status;
+
+ return spi_write(spi, buf, buf_size);
+}
+
+static int mpf_spi_write_then_read(struct spi_device *spi,
+ const void *txbuf, size_t txbuf_size,
+ void *rxbuf, size_t rxbuf_size)
+{
+ const u8 read_command[] = { MPF_SPI_READ_DATA };
+ int ret;
+
+ ret = mpf_spi_write(spi, txbuf, txbuf_size);
+ if (ret)
+ return ret;
+
+ ret = mpf_poll_status(spi, MPF_STATUS_READY);
+ if (ret < 0)
+ return ret;
+
+ return spi_write_then_read(spi, read_command, sizeof(read_command),
+ rxbuf, rxbuf_size);
+}
+
+static int mpf_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info, const char *buf,
+ size_t count)
+{
+ const u8 program_mode[] = { MPF_SPI_FRAME_INIT, MPF_SPI_PRG_MODE };
+ const u8 isc_en_command[] = { MPF_SPI_ISC_ENABLE };
+ struct mpf_priv *priv = mgr->priv;
+ struct device *dev = &mgr->dev;
+ struct spi_device *spi;
+ u32 isc_ret = 0;
+ int ret;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(dev, "Partial reconfiguration is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ spi = priv->spi;
+
+ ret = mpf_spi_write_then_read(spi, isc_en_command, sizeof(isc_en_command),
+ &isc_ret, sizeof(isc_ret));
+ if (ret || isc_ret) {
+ dev_err(dev, "Failed to enable ISC: spi_ret %d, isc_ret %u\n",
+ ret, isc_ret);
+ return -EFAULT;
+ }
+
+ ret = mpf_spi_write(spi, program_mode, sizeof(program_mode));
+ if (ret) {
+ dev_err(dev, "Failed to enter program mode: %d\n", ret);
+ return ret;
+ }
+
+ priv->program_mode = true;
+
+ return 0;
+}
+
+static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ u8 spi_frame_command[] = { MPF_SPI_FRAME };
+ struct spi_transfer xfers[2] = { 0 };
+ struct mpf_priv *priv = mgr->priv;
+ struct device *dev = &mgr->dev;
+ struct spi_device *spi;
+ int ret, i;
+
+ if (count % MPF_SPI_FRAME_SIZE) {
+ dev_err(dev, "Bitstream size is not a multiple of %d\n",
+ MPF_SPI_FRAME_SIZE);
+ return -EINVAL;
+ }
+
+ spi = priv->spi;
+
+ xfers[0].tx_buf = spi_frame_command;
+ xfers[0].len = sizeof(spi_frame_command);
+
+ for (i = 0; i < count / MPF_SPI_FRAME_SIZE; i++) {
+ xfers[1].tx_buf = buf + i * MPF_SPI_FRAME_SIZE;
+ xfers[1].len = MPF_SPI_FRAME_SIZE;
+
+ ret = mpf_poll_status(spi, 0);
+ if (ret >= 0)
+ ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
+
+ if (ret) {
+ dev_err(dev, "Failed to write bitstream frame %d/%zu\n",
+ i, count / MPF_SPI_FRAME_SIZE);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mpf_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ const u8 isc_dis_command[] = { MPF_SPI_ISC_DISABLE };
+ const u8 release_command[] = { MPF_SPI_RELEASE };
+ struct mpf_priv *priv = mgr->priv;
+ struct device *dev = &mgr->dev;
+ struct spi_device *spi;
+ int ret;
+
+ spi = priv->spi;
+
+ ret = mpf_spi_write(spi, isc_dis_command, sizeof(isc_dis_command));
+ if (ret) {
+ dev_err(dev, "Failed to disable ISC: %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(1000, 2000);
+
+ ret = mpf_spi_write(spi, release_command, sizeof(release_command));
+ if (ret) {
+ dev_err(dev, "Failed to exit program mode: %d\n", ret);
+ return ret;
+ }
+
+ priv->program_mode = false;
+
+ return 0;
+}
+
+static const struct fpga_manager_ops mpf_ops = {
+ .state = mpf_ops_state,
+ .initial_header_size = 71,
+ .skip_header = true,
+ .parse_header = mpf_ops_parse_header,
+ .write_init = mpf_ops_write_init,
+ .write = mpf_ops_write,
+ .write_complete = mpf_ops_write_complete,
+};
+
+static int mpf_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct fpga_manager *mgr;
+ struct mpf_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spi = spi;
+
+ mgr = devm_fpga_mgr_register(dev, "Microchip Polarfire SPI FPGA Manager",
+ &mpf_ops, priv);
+
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+static const struct spi_device_id mpf_spi_ids[] = {
+ { .name = "mpf-spi-fpga-mgr", },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, mpf_spi_ids);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id mpf_of_ids[] = {
+ { .compatible = "microchip,mpf-spi-fpga-mgr" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpf_of_ids);
+#endif /* IS_ENABLED(CONFIG_OF) */
+
+static struct spi_driver mpf_driver = {
+ .probe = mpf_probe,
+ .id_table = mpf_spi_ids,
+ .driver = {
+ .name = "microchip_mpf_spi_fpga_mgr",
+ .of_match_table = of_match_ptr(mpf_of_ids),
+ },
+};
+
+module_spi_driver(mpf_driver);
+
+MODULE_DESCRIPTION("Microchip Polarfire SPI FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b01961999ced..0642f579196f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -544,6 +544,7 @@ config GPIO_SAMA5D2_PIOBU
tristate "SAMA5D2 PIOBU GPIO support"
depends on MFD_SYSCON
depends on OF_GPIO
+ depends on ARCH_AT91 || COMPILE_TEST
select GPIO_SYSCON
help
Say yes here to use the PIOBU pins as GPIOs.
@@ -690,12 +691,6 @@ config GPIO_VISCONTI
help
Say yes here to support GPIO on Tohisba Visconti.
-config GPIO_VR41XX
- tristate "NEC VR4100 series General-purpose I/O Unit support"
- depends on CPU_VR41XX
- help
- Say yes here to support the NEC VR4100 series General-purpose I/O Unit.
-
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
depends on (X86 || COMPILE_TEST) && PCI
@@ -829,11 +824,24 @@ endmenu
menu "Port-mapped I/O GPIO drivers"
depends on X86 # Unconditional I/O space access
+config GPIO_I8255
+ tristate
+ help
+ Enables support for the i8255 interface library functions. The i8255
+ interface library provides functions to facilitate communication with
+ interfaces compatible with the venerable Intel 8255 Programmable
+ Peripheral Interface (PPI). The Intel 8255 PPI chip was first released
+ in the early 1970s but compatible interfaces are nowadays typically
+ found embedded in larger VLSI processing chips and FPGA components.
+
+ If built as a module its name will be gpio-i8255.
+
config GPIO_104_DIO_48E
tristate "ACCES 104-DIO-48E GPIO support"
depends on PC104
select ISA_BUS_API
select GPIOLIB_IRQCHIP
+ select GPIO_I8255
help
Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
104-DIO-24E). The base port addresses for the devices may be
@@ -857,6 +865,7 @@ config GPIO_104_IDI_48
depends on PC104
select ISA_BUS_API
select GPIOLIB_IRQCHIP
+ select GPIO_I8255
help
Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
104-IDI-48AC, 104-IDI-48B, 104-IDI-48BC). The base port addresses for
@@ -877,6 +886,7 @@ config GPIO_GPIO_MM
tristate "Diamond Systems GPIO-MM GPIO support"
depends on PC104
select ISA_BUS_API
+ select GPIO_I8255
help
Enables GPIO support for the Diamond Systems GPIO-MM and GPIO-MM-12.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 14352f6dfe8e..a0985d30f51b 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o
obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
+obj-$(CONFIG_GPIO_I8255) += gpio-i8255.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IDT3243X) += gpio-idt3243x.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
@@ -169,7 +170,6 @@ obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VIRTIO) += gpio-virtio.o
obj-$(CONFIG_GPIO_VISCONTI) += gpio-visconti.o
-obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WCD934X) += gpio-wcd934x.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index f118ad9bcd33..a41551870759 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -6,8 +6,7 @@
* This driver supports the following ACCES devices: 104-DIO-48E and
* 104-DIO-24E.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -20,6 +19,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "gpio-i8255.h"
+
+MODULE_IMPORT_NS(I8255);
#define DIO48E_EXTENT 16
#define MAX_NUM_DIO48E max_num_isa_dev(DIO48E_EXTENT)
@@ -33,34 +37,54 @@ static unsigned int irq[MAX_NUM_DIO48E];
module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers");
+#define DIO48E_NUM_PPI 2
+
+/**
+ * struct dio48e_reg - device register structure
+ * @ppi: Programmable Peripheral Interface groups
+ * @enable_buffer: Enable/Disable Buffer groups
+ * @unused1: Unused
+ * @enable_interrupt: Write: Enable Interrupt
+ * Read: Disable Interrupt
+ * @unused2: Unused
+ * @enable_counter: Write: Enable Counter/Timer Addressing
+ * Read: Disable Counter/Timer Addressing
+ * @unused3: Unused
+ * @clear_interrupt: Clear Interrupt
+ */
+struct dio48e_reg {
+ struct i8255 ppi[DIO48E_NUM_PPI];
+ u8 enable_buffer[DIO48E_NUM_PPI];
+ u8 unused1;
+ u8 enable_interrupt;
+ u8 unused2;
+ u8 enable_counter;
+ u8 unused3;
+ u8 clear_interrupt;
+};
+
/**
* struct dio48e_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @io_state: bit I/O state (whether bit is set to input or output)
- * @out_state: output bits state
- * @control: Control registers state
- * @lock: synchronization lock to prevent I/O race conditions
- * @base: base port address of the GPIO device
- * @irq_mask: I/O bits affected by interrupts
+ * @chip: instance of the gpio_chip
+ * @ppi_state: PPI device states
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @reg: I/O address offset for the device registers
+ * @irq_mask: I/O bits affected by interrupts
*/
struct dio48e_gpio {
struct gpio_chip chip;
- unsigned char io_state[6];
- unsigned char out_state[6];
- unsigned char control[2];
+ struct i8255_state ppi_state[DIO48E_NUM_PPI];
raw_spinlock_t lock;
- void __iomem *base;
+ struct dio48e_reg __iomem *reg;
unsigned char irq_mask;
};
static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- if (dio48egpio->io_state[port] & mask)
- return GPIO_LINE_DIRECTION_IN;
+ if (i8255_get_direction(dio48egpio->ppi_state, offset))
+ return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
@@ -68,38 +92,9 @@ static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset
static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- void __iomem *const control_addr = dio48egpio->base + 3 + control_port * 4;
- unsigned long flags;
- unsigned int control;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- dio48egpio->io_state[io_port] |= 0xF0;
- dio48egpio->control[control_port] |= BIT(3);
- } else {
- dio48egpio->io_state[io_port] |= 0x0F;
- dio48egpio->control[control_port] |= BIT(0);
- }
- } else {
- dio48egpio->io_state[io_port] |= 0xFF;
- if (io_port == 0 || io_port == 3)
- dio48egpio->control[control_port] |= BIT(4);
- else
- dio48egpio->control[control_port] |= BIT(1);
- }
-
- control = BIT(7) | dio48egpio->control[control_port];
- iowrite8(control, control_addr);
- control &= ~BIT(7);
- iowrite8(control, control_addr);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ i8255_direction_input(dio48egpio->reg->ppi, dio48egpio->ppi_state,
+ offset);
return 0;
}
@@ -108,48 +103,9 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int off
int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- const unsigned int mask = BIT(offset % 8);
- void __iomem *const control_addr = dio48egpio->base + 3 + control_port * 4;
- const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
- unsigned long flags;
- unsigned int control;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- dio48egpio->io_state[io_port] &= 0x0F;
- dio48egpio->control[control_port] &= ~BIT(3);
- } else {
- dio48egpio->io_state[io_port] &= 0xF0;
- dio48egpio->control[control_port] &= ~BIT(0);
- }
- } else {
- dio48egpio->io_state[io_port] &= 0x00;
- if (io_port == 0 || io_port == 3)
- dio48egpio->control[control_port] &= ~BIT(4);
- else
- dio48egpio->control[control_port] &= ~BIT(1);
- }
-
- if (value)
- dio48egpio->out_state[io_port] |= mask;
- else
- dio48egpio->out_state[io_port] &= ~mask;
- control = BIT(7) | dio48egpio->control[control_port];
- iowrite8(control, control_addr);
-
- iowrite8(dio48egpio->out_state[io_port], dio48egpio->base + out_port);
-
- control &= ~BIT(7);
- iowrite8(control, control_addr);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ i8255_direction_output(dio48egpio->reg->ppi, dio48egpio->ppi_state,
+ offset, value);
return 0;
}
@@ -157,47 +113,16 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int off
static int dio48e_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int in_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
- unsigned int port_state;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
- /* ensure that GPIO is set for input */
- if (!(dio48egpio->io_state[port] & mask)) {
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
- return -EINVAL;
- }
-
- port_state = ioread8(dio48egpio->base + in_port);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
-
- return !!(port_state & mask);
+ return i8255_get(dio48egpio->reg->ppi, offset);
}
-static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
-
static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- void __iomem *port_addr;
- unsigned long port_state;
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- port_addr = dio48egpio->base + ports[offset / 8];
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ i8255_get_multiple(dio48egpio->reg->ppi, mask, bits, chip->ngpio);
return 0;
}
@@ -205,49 +130,17 @@ static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
static void dio48e_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int out_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- if (value)
- dio48egpio->out_state[port] |= mask;
- else
- dio48egpio->out_state[port] &= ~mask;
-
- iowrite8(dio48egpio->out_state[port], dio48egpio->base + out_port);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ i8255_set(dio48egpio->reg->ppi, dio48egpio->ppi_state, offset, value);
}
static void dio48e_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- size_t index;
- void __iomem *port_addr;
- unsigned long bitmask;
- unsigned long flags;
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- index = offset / 8;
- port_addr = dio48egpio->base + ports[index];
-
- bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
- /* update output state data and set device gpio register */
- dio48egpio->out_state[index] &= ~gpio_mask;
- dio48egpio->out_state[index] |= bitmask;
- iowrite8(dio48egpio->out_state[index], port_addr);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
- }
+ i8255_set_multiple(dio48egpio->reg->ppi, dio48egpio->ppi_state, mask,
+ bits, chip->ngpio);
}
static void dio48e_irq_ack(struct irq_data *data)
@@ -274,7 +167,7 @@ static void dio48e_irq_mask(struct irq_data *data)
if (!dio48egpio->irq_mask)
/* disable interrupts */
- ioread8(dio48egpio->base + 0xB);
+ ioread8(&dio48egpio->reg->enable_interrupt);
raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
}
@@ -294,8 +187,8 @@ static void dio48e_irq_unmask(struct irq_data *data)
if (!dio48egpio->irq_mask) {
/* enable interrupts */
- iowrite8(0x00, dio48egpio->base + 0xF);
- iowrite8(0x00, dio48egpio->base + 0xB);
+ iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
+ iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
}
if (offset == 19)
@@ -341,7 +234,7 @@ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
raw_spin_lock(&dio48egpio->lock);
- iowrite8(0x00, dio48egpio->base + 0xF);
+ iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
raw_spin_unlock(&dio48egpio->lock);
@@ -373,11 +266,26 @@ static int dio48e_irq_init_hw(struct gpio_chip *gc)
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(gc);
/* Disable IRQ by default */
- ioread8(dio48egpio->base + 0xB);
+ ioread8(&dio48egpio->reg->enable_interrupt);
return 0;
}
+static void dio48e_init_ppi(struct i8255 __iomem *const ppi,
+ struct i8255_state *const ppi_state)
+{
+ const unsigned long ngpio = 24;
+ const unsigned long mask = GENMASK(ngpio - 1, 0);
+ const unsigned long bits = 0;
+ unsigned long i;
+
+ /* Initialize all GPIO to output 0 */
+ for (i = 0; i < DIO48E_NUM_PPI; i++) {
+ i8255_mode0_output(&ppi[i]);
+ i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
+ }
+}
+
static int dio48e_probe(struct device *dev, unsigned int id)
{
struct dio48e_gpio *dio48egpio;
@@ -395,8 +303,8 @@ static int dio48e_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- dio48egpio->base = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
- if (!dio48egpio->base)
+ dio48egpio->reg = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
+ if (!dio48egpio->reg)
return -ENOMEM;
dio48egpio->chip.label = name;
@@ -425,17 +333,8 @@ static int dio48e_probe(struct device *dev, unsigned int id)
raw_spin_lock_init(&dio48egpio->lock);
- /* initialize all GPIO as output */
- iowrite8(0x80, dio48egpio->base + 3);
- iowrite8(0x00, dio48egpio->base);
- iowrite8(0x00, dio48egpio->base + 1);
- iowrite8(0x00, dio48egpio->base + 2);
- iowrite8(0x00, dio48egpio->base + 3);
- iowrite8(0x80, dio48egpio->base + 7);
- iowrite8(0x00, dio48egpio->base + 4);
- iowrite8(0x00, dio48egpio->base + 5);
- iowrite8(0x00, dio48egpio->base + 6);
- iowrite8(0x00, dio48egpio->base + 7);
+ i8255_state_init(dio48egpio->ppi_state, DIO48E_NUM_PPI);
+ dio48e_init_ppi(dio48egpio->reg->ppi, dio48egpio->ppi_state);
err = devm_gpiochip_add_data(dev, &dio48egpio->chip, dio48egpio);
if (err) {
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 9521ece3ebef..40be76efeed7 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -6,8 +6,7 @@
* This driver supports the following ACCES devices: 104-IDI-48A,
* 104-IDI-48AC, 104-IDI-48B, and 104-IDI-48BC.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -20,6 +19,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "gpio-i8255.h"
+
+MODULE_IMPORT_NS(I8255);
#define IDI_48_EXTENT 8
#define MAX_NUM_IDI_48 max_num_isa_dev(IDI_48_EXTENT)
@@ -34,72 +38,61 @@ module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers");
/**
+ * struct idi_48_reg - device register structure
+ * @port0: Port 0 Inputs
+ * @unused: Unused
+ * @port1: Port 1 Inputs
+ * @irq: Read: IRQ Status Register/IRQ Clear
+ * Write: IRQ Enable/Disable
+ */
+struct idi_48_reg {
+ u8 port0[3];
+ u8 unused;
+ u8 port1[3];
+ u8 irq;
+};
+
+/**
* struct idi_48_gpio - GPIO device private data structure
* @chip: instance of the gpio_chip
* @lock: synchronization lock to prevent I/O race conditions
- * @ack_lock: synchronization lock to prevent IRQ handler race conditions
* @irq_mask: input bits affected by interrupts
- * @base: base port address of the GPIO device
+ * @reg: I/O address offset for the device registers
* @cos_enb: Change-Of-State IRQ enable boundaries mask
*/
struct idi_48_gpio {
struct gpio_chip chip;
- raw_spinlock_t lock;
- spinlock_t ack_lock;
+ spinlock_t lock;
unsigned char irq_mask[6];
- void __iomem *base;
+ struct idi_48_reg __iomem *reg;
unsigned char cos_enb;
};
-static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
return GPIO_LINE_DIRECTION_IN;
}
-static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
return 0;
}
-static int idi_48_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int idi_48_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- unsigned i;
- static const unsigned int register_offset[6] = { 0, 1, 2, 4, 5, 6 };
- void __iomem *port_addr;
- unsigned mask;
-
- for (i = 0; i < 48; i += 8)
- if (offset < i + 8) {
- port_addr = idi48gpio->base + register_offset[i / 8];
- mask = BIT(offset - i);
-
- return !!(ioread8(port_addr) & mask);
- }
+ void __iomem *const ppi = idi48gpio->reg;
- /* The following line should never execute since offset < 48 */
- return 0;
+ return i8255_get(ppi, offset);
}
static int idi_48_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
- void __iomem *port_addr;
- unsigned long port_state;
-
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
+ void __iomem *const ppi = idi48gpio->reg;
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- port_addr = idi48gpio->base + ports[offset / 8];
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ i8255_get_multiple(ppi, mask, bits, chip->ngpio);
return 0;
}
@@ -112,67 +105,56 @@ static void idi_48_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned offset = irqd_to_hwirq(data);
- unsigned i;
- unsigned mask;
- unsigned boundary;
+ const unsigned int offset = irqd_to_hwirq(data);
+ const unsigned long boundary = offset / 8;
+ const unsigned long mask = BIT(offset % 8);
unsigned long flags;
- for (i = 0; i < 48; i += 8)
- if (offset < i + 8) {
- mask = BIT(offset - i);
- boundary = i / 8;
-
- idi48gpio->irq_mask[boundary] &= ~mask;
+ spin_lock_irqsave(&idi48gpio->lock, flags);
- if (!idi48gpio->irq_mask[boundary]) {
- idi48gpio->cos_enb &= ~BIT(boundary);
+ idi48gpio->irq_mask[boundary] &= ~mask;
- raw_spin_lock_irqsave(&idi48gpio->lock, flags);
+ /* Exit early if there are still input lines with IRQ unmasked */
+ if (idi48gpio->irq_mask[boundary])
+ goto exit;
- iowrite8(idi48gpio->cos_enb, idi48gpio->base + 7);
+ idi48gpio->cos_enb &= ~BIT(boundary);
- raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
- }
+ iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
- return;
- }
+exit:
+ spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
static void idi_48_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned offset = irqd_to_hwirq(data);
- unsigned i;
- unsigned mask;
- unsigned boundary;
- unsigned prev_irq_mask;
+ const unsigned int offset = irqd_to_hwirq(data);
+ const unsigned long boundary = offset / 8;
+ const unsigned long mask = BIT(offset % 8);
+ unsigned int prev_irq_mask;
unsigned long flags;
- for (i = 0; i < 48; i += 8)
- if (offset < i + 8) {
- mask = BIT(offset - i);
- boundary = i / 8;
- prev_irq_mask = idi48gpio->irq_mask[boundary];
+ spin_lock_irqsave(&idi48gpio->lock, flags);
- idi48gpio->irq_mask[boundary] |= mask;
+ prev_irq_mask = idi48gpio->irq_mask[boundary];
- if (!prev_irq_mask) {
- idi48gpio->cos_enb |= BIT(boundary);
+ idi48gpio->irq_mask[boundary] |= mask;
- raw_spin_lock_irqsave(&idi48gpio->lock, flags);
+ /* Exit early if IRQ was already unmasked for this boundary */
+ if (prev_irq_mask)
+ goto exit;
- iowrite8(idi48gpio->cos_enb, idi48gpio->base + 7);
+ idi48gpio->cos_enb |= BIT(boundary);
- raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
- }
+ iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
- return;
- }
+exit:
+ spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
-static int idi_48_irq_set_type(struct irq_data *data, unsigned flow_type)
+static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
/* The only valid irq types are none and both-edges */
if (flow_type != IRQ_TYPE_NONE &&
@@ -200,17 +182,13 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
unsigned long gpio;
struct gpio_chip *const chip = &idi48gpio->chip;
- spin_lock(&idi48gpio->ack_lock);
-
- raw_spin_lock(&idi48gpio->lock);
-
- cos_status = ioread8(idi48gpio->base + 7);
+ spin_lock(&idi48gpio->lock);
- raw_spin_unlock(&idi48gpio->lock);
+ cos_status = ioread8(&idi48gpio->reg->irq);
/* IRQ Status (bit 6) is active low (0 = IRQ generated by device) */
if (cos_status & BIT(6)) {
- spin_unlock(&idi48gpio->ack_lock);
+ spin_unlock(&idi48gpio->lock);
return IRQ_NONE;
}
@@ -228,7 +206,7 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
}
}
- spin_unlock(&idi48gpio->ack_lock);
+ spin_unlock(&idi48gpio->lock);
return IRQ_HANDLED;
}
@@ -250,8 +228,8 @@ static int idi_48_irq_init_hw(struct gpio_chip *gc)
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(gc);
/* Disable IRQ by default */
- iowrite8(0, idi48gpio->base + 7);
- ioread8(idi48gpio->base + 7);
+ iowrite8(0, &idi48gpio->reg->irq);
+ ioread8(&idi48gpio->reg->irq);
return 0;
}
@@ -273,8 +251,8 @@ static int idi_48_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- idi48gpio->base = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
- if (!idi48gpio->base)
+ idi48gpio->reg = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
+ if (!idi48gpio->reg)
return -ENOMEM;
idi48gpio->chip.label = name;
@@ -298,8 +276,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
girq->handler = handle_edge_irq;
girq->init_hw = idi_48_irq_init_hw;
- raw_spin_lock_init(&idi48gpio->lock);
- spin_lock_init(&idi48gpio->ack_lock);
+ spin_lock_init(&idi48gpio->lock);
err = devm_gpiochip_add_data(dev, &idi48gpio->chip, idi48gpio);
if (err) {
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 45f7ad8573e1..65a5f581d981 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -6,7 +6,7 @@
* This driver supports the following ACCES devices: 104-IDIO-16,
* 104-IDIO-16E, 104-IDO-16, 104-IDIO-8, 104-IDIO-8E, and 104-IDO-8.
*/
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
#define IDIO_16_EXTENT 8
#define MAX_NUM_IDIO_16 max_num_isa_dev(IDIO_16_EXTENT)
@@ -33,18 +34,41 @@ module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers");
/**
+ * struct idio_16_reg - device registers structure
+ * @out0_7: Read: N/A
+ * Write: FET Drive Outputs 0-7
+ * @in0_7: Read: Isolated Inputs 0-7
+ * Write: Clear Interrupt
+ * @irq_ctl: Read: Enable IRQ
+ * Write: Disable IRQ
+ * @unused: N/A
+ * @out8_15: Read: N/A
+ * Write: FET Drive Outputs 8-15
+ * @in8_15: Read: Isolated Inputs 8-15
+ * Write: N/A
+ */
+struct idio_16_reg {
+ u8 out0_7;
+ u8 in0_7;
+ u8 irq_ctl;
+ u8 unused;
+ u8 out8_15;
+ u8 in8_15;
+};
+
+/**
* struct idio_16_gpio - GPIO device private data structure
* @chip: instance of the gpio_chip
* @lock: synchronization lock to prevent I/O race conditions
* @irq_mask: I/O bits affected by interrupts
- * @base: base port address of the GPIO device
+ * @reg: I/O address offset for the device registers
* @out_state: output bits state
*/
struct idio_16_gpio {
struct gpio_chip chip;
raw_spinlock_t lock;
unsigned long irq_mask;
- void __iomem *base;
+ struct idio_16_reg __iomem *reg;
unsigned int out_state;
};
@@ -79,9 +103,9 @@ static int idio_16_gpio_get(struct gpio_chip *chip, unsigned int offset)
return -EINVAL;
if (offset < 24)
- return !!(ioread8(idio16gpio->base + 1) & mask);
+ return !!(ioread8(&idio16gpio->reg->in0_7) & mask);
- return !!(ioread8(idio16gpio->base + 5) & (mask>>8));
+ return !!(ioread8(&idio16gpio->reg->in8_15) & (mask>>8));
}
static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
@@ -91,9 +115,9 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
*bits = 0;
if (*mask & GENMASK(23, 16))
- *bits |= (unsigned long)ioread8(idio16gpio->base + 1) << 16;
+ *bits |= (unsigned long)ioread8(&idio16gpio->reg->in0_7) << 16;
if (*mask & GENMASK(31, 24))
- *bits |= (unsigned long)ioread8(idio16gpio->base + 5) << 24;
+ *bits |= (unsigned long)ioread8(&idio16gpio->reg->in8_15) << 24;
return 0;
}
@@ -116,9 +140,9 @@ static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset,
idio16gpio->out_state &= ~mask;
if (offset > 7)
- iowrite8(idio16gpio->out_state >> 8, idio16gpio->base + 4);
+ iowrite8(idio16gpio->out_state >> 8, &idio16gpio->reg->out8_15);
else
- iowrite8(idio16gpio->out_state, idio16gpio->base);
+ iowrite8(idio16gpio->out_state, &idio16gpio->reg->out0_7);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -135,9 +159,9 @@ static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
idio16gpio->out_state |= *mask & *bits;
if (*mask & 0xFF)
- iowrite8(idio16gpio->out_state, idio16gpio->base);
+ iowrite8(idio16gpio->out_state, &idio16gpio->reg->out0_7);
if ((*mask >> 8) & 0xFF)
- iowrite8(idio16gpio->out_state >> 8, idio16gpio->base + 4);
+ iowrite8(idio16gpio->out_state >> 8, &idio16gpio->reg->out8_15);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -158,7 +182,7 @@ static void idio_16_irq_mask(struct irq_data *data)
if (!idio16gpio->irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
- iowrite8(0, idio16gpio->base + 2);
+ iowrite8(0, &idio16gpio->reg->irq_ctl);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -177,7 +201,7 @@ static void idio_16_irq_unmask(struct irq_data *data)
if (!prev_irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
- ioread8(idio16gpio->base + 2);
+ ioread8(&idio16gpio->reg->irq_ctl);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -212,7 +236,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
raw_spin_lock(&idio16gpio->lock);
- iowrite8(0, idio16gpio->base + 1);
+ iowrite8(0, &idio16gpio->reg->in0_7);
raw_spin_unlock(&idio16gpio->lock);
@@ -232,8 +256,8 @@ static int idio_16_irq_init_hw(struct gpio_chip *gc)
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(gc);
/* Disable IRQ by default */
- iowrite8(0, idio16gpio->base + 2);
- iowrite8(0, idio16gpio->base + 1);
+ iowrite8(0, &idio16gpio->reg->irq_ctl);
+ iowrite8(0, &idio16gpio->reg->in0_7);
return 0;
}
@@ -255,8 +279,8 @@ static int idio_16_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- idio16gpio->base = devm_ioport_map(dev, base[id], IDIO_16_EXTENT);
- if (!idio16gpio->base)
+ idio16gpio->reg = devm_ioport_map(dev, base[id], IDIO_16_EXTENT);
+ if (!idio16gpio->reg)
return -ENOMEM;
idio16gpio->chip.label = name;
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 173e06758e6c..0464f1ecd20d 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -5,15 +5,17 @@
* Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
*/
+#include <linux/bits.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/gpio/driver.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
-#define MMIO_74XX_DIR_IN (0 << 8)
-#define MMIO_74XX_DIR_OUT (1 << 8)
-#define MMIO_74XX_BIT_CNT(x) ((x) & 0xff)
+#define MMIO_74XX_DIR_IN BIT(8)
+#define MMIO_74XX_DIR_OUT BIT(9)
+#define MMIO_74XX_BIT_CNT(x) ((x) & GENMASK(7, 0))
struct mmio_74xx_gpio_priv {
struct gpio_chip gc;
@@ -87,7 +89,10 @@ static int mmio_74xx_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct mmio_74xx_gpio_priv *priv = gpiochip_get_data(gc);
- return (priv->flags & MMIO_74XX_DIR_OUT) ? -ENOTSUPP : 0;
+ if (priv->flags & MMIO_74XX_DIR_IN)
+ return 0;
+
+ return -ENOTSUPP;
}
static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -112,7 +117,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ priv->flags = (uintptr_t)device_get_match_data(&pdev->dev);
dat = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dat))
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index cc349d4e4973..a6439e3daff0 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -6,8 +6,9 @@
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -485,22 +486,17 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
return 0;
}
-static int adnp_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adnp_i2c_probe(struct i2c_client *client)
{
- struct device_node *np = client->dev.of_node;
+ struct device *dev = &client->dev;
struct adnp *adnp;
u32 num_gpios;
int err;
- err = of_property_read_u32(np, "nr-gpios", &num_gpios);
+ err = device_property_read_u32(dev, "nr-gpios", &num_gpios);
if (err < 0)
return err;
- client->irq = irq_of_parse_and_map(np, 0);
- if (!client->irq)
- return -EPROBE_DEFER;
-
adnp = devm_kzalloc(&client->dev, sizeof(*adnp), GFP_KERNEL);
if (!adnp)
return -ENOMEM;
@@ -508,8 +504,7 @@ static int adnp_i2c_probe(struct i2c_client *client,
mutex_init(&adnp->i2c_lock);
adnp->client = client;
- err = adnp_gpio_setup(adnp, num_gpios,
- of_property_read_bool(np, "interrupt-controller"));
+ err = adnp_gpio_setup(adnp, num_gpios, device_property_read_bool(dev, "interrupt-controller"));
if (err)
return err;
@@ -535,7 +530,7 @@ static struct i2c_driver adnp_i2c_driver = {
.name = "gpio-adnp",
.of_match_table = adnp_of_match,
},
- .probe = adnp_i2c_probe,
+ .probe_new = adnp_i2c_probe,
.id_table = adnp_i2c_id,
};
module_i2c_driver(adnp_i2c_driver);
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index e388e75103f4..d49f12560cde 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -6,20 +6,18 @@
* Copyright 2009-2010 Analog Devices Inc.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/platform_data/adp5588.h>
-#define DRV_NAME "adp5588-gpio"
-
/*
* Early pre 4.0 Silicon required to delay readout by at least 25ms,
* since the Event Counter Register updated 25ms after the interrupt
@@ -422,23 +420,21 @@ static int adp5588_gpio_remove(struct i2c_client *client)
}
static const struct i2c_device_id adp5588_gpio_id[] = {
- {DRV_NAME, 0},
+ { "adp5588-gpio" },
{}
};
MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id);
-#ifdef CONFIG_OF
static const struct of_device_id adp5588_gpio_of_id[] = {
- { .compatible = "adi," DRV_NAME, },
- {},
+ { .compatible = "adi,adp5588-gpio" },
+ {}
};
MODULE_DEVICE_TABLE(of, adp5588_gpio_of_id);
-#endif
static struct i2c_driver adp5588_gpio_driver = {
.driver = {
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(adp5588_gpio_of_id),
+ .name = "adp5588-gpio",
+ .of_match_table = adp5588_gpio_of_id,
},
.probe_new = adp5588_gpio_probe,
.remove = adp5588_gpio_remove,
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index e84474494429..70770429ba48 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Broadcom Kona GPIO Driver
*
* Author: Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>
* Copyright (C) 2012-2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/bitops.h>
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 6b7439b44690..c55b35da61a0 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2015-2017 Broadcom
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
@@ -385,12 +375,7 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
{
struct brcmstb_gpio_priv *priv = platform_get_drvdata(pdev);
struct brcmstb_gpio_bank *bank;
- int offset, ret = 0, virq;
-
- if (!priv) {
- dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
- return -EFAULT;
- }
+ int offset, virq;
if (priv->parent_irq > 0)
irq_set_chained_handler_and_data(priv->parent_irq, NULL, NULL);
@@ -411,7 +396,7 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
list_for_each_entry(bank, &priv->bank_list, node)
gpiochip_remove(&bank->gc);
- return ret;
+ return 0;
}
static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index f960587f86a3..59c4c48d8296 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -22,6 +22,7 @@
#include <linux/platform_data/gpio-davinci.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
#include <asm-generic/gpio.h>
@@ -62,6 +63,8 @@ struct davinci_gpio_controller {
void __iomem *regs[MAX_REGS_BANKS];
int gpio_unbanked;
int irqs[MAX_INT_PER_BANK];
+ struct davinci_gpio_regs context[MAX_REGS_BANKS];
+ u32 binten_context;
};
static inline u32 __gpio_mask(unsigned gpio)
@@ -622,6 +625,85 @@ done:
return 0;
}
+static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
+ u32 nbank)
+{
+ struct davinci_gpio_regs __iomem *g;
+ struct davinci_gpio_regs *context;
+ u32 bank;
+ void __iomem *base;
+
+ base = chips->regs[0] - offset_array[0];
+ chips->binten_context = readl_relaxed(base + BINTEN);
+
+ for (bank = 0; bank < nbank; bank++) {
+ g = chips->regs[bank];
+ context = &chips->context[bank];
+ context->dir = readl_relaxed(&g->dir);
+ context->set_data = readl_relaxed(&g->set_data);
+ context->set_rising = readl_relaxed(&g->set_rising);
+ context->set_falling = readl_relaxed(&g->set_falling);
+ }
+
+ /* Clear Bank interrupt enable bit */
+ writel_relaxed(0, base + BINTEN);
+
+ /* Clear all interrupt status registers */
+ writel_relaxed(GENMASK(31, 0), &g->intstat);
+}
+
+static void davinci_gpio_restore_context(struct davinci_gpio_controller *chips,
+ u32 nbank)
+{
+ struct davinci_gpio_regs __iomem *g;
+ struct davinci_gpio_regs *context;
+ u32 bank;
+ void __iomem *base;
+
+ base = chips->regs[0] - offset_array[0];
+
+ if (readl_relaxed(base + BINTEN) != chips->binten_context)
+ writel_relaxed(chips->binten_context, base + BINTEN);
+
+ for (bank = 0; bank < nbank; bank++) {
+ g = chips->regs[bank];
+ context = &chips->context[bank];
+ if (readl_relaxed(&g->dir) != context->dir)
+ writel_relaxed(context->dir, &g->dir);
+ if (readl_relaxed(&g->set_data) != context->set_data)
+ writel_relaxed(context->set_data, &g->set_data);
+ if (readl_relaxed(&g->set_rising) != context->set_rising)
+ writel_relaxed(context->set_rising, &g->set_rising);
+ if (readl_relaxed(&g->set_falling) != context->set_falling)
+ writel_relaxed(context->set_falling, &g->set_falling);
+ }
+}
+
+static int davinci_gpio_suspend(struct device *dev)
+{
+ struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
+ struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
+ u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+
+ davinci_gpio_save_context(chips, nbank);
+
+ return 0;
+}
+
+static int davinci_gpio_resume(struct device *dev)
+{
+ struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
+ struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
+ u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+
+ davinci_gpio_restore_context(chips, nbank);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEV_PM_OPS(davinci_gpio_dev_pm_ops, davinci_gpio_suspend,
+ davinci_gpio_resume);
+
static const struct of_device_id davinci_gpio_ids[] = {
{ .compatible = "ti,keystone-gpio", keystone_gpio_get_irq_chip},
{ .compatible = "ti,am654-gpio", keystone_gpio_get_irq_chip},
@@ -634,6 +716,7 @@ static struct platform_driver davinci_gpio_driver = {
.probe = davinci_gpio_probe,
.driver = {
.name = "davinci_gpio",
+ .pm = pm_sleep_ptr(&davinci_gpio_dev_pm_ops),
.of_match_table = of_match_ptr(davinci_gpio_ids),
},
};
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index 097a06463d01..2689671b6b01 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -6,8 +6,6 @@
* This driver supports the following Diamond Systems devices: GPIO-MM and
* GPIO-MM-12.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -17,7 +15,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/spinlock.h>
+
+#include "gpio-i8255.h"
+
+MODULE_IMPORT_NS(I8255);
#define GPIOMM_EXTENT 8
#define MAX_NUM_GPIOMM max_num_isa_dev(GPIOMM_EXTENT)
@@ -27,32 +28,26 @@ static unsigned int num_gpiomm;
module_param_hw_array(base, uint, ioport, &num_gpiomm, 0);
MODULE_PARM_DESC(base, "Diamond Systems GPIO-MM base addresses");
+#define GPIOMM_NUM_PPI 2
+
/**
* struct gpiomm_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @io_state: bit I/O state (whether bit is set to input or output)
- * @out_state: output bits state
- * @control: Control registers state
- * @lock: synchronization lock to prevent I/O race conditions
- * @base: base port address of the GPIO device
+ * @chip: instance of the gpio_chip
+ * @ppi_state: Programmable Peripheral Interface group states
+ * @ppi: Programmable Peripheral Interface groups
*/
struct gpiomm_gpio {
struct gpio_chip chip;
- unsigned char io_state[6];
- unsigned char out_state[6];
- unsigned char control[2];
- spinlock_t lock;
- void __iomem *base;
+ struct i8255_state ppi_state[GPIOMM_NUM_PPI];
+ struct i8255 __iomem *ppi;
};
static int gpiomm_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- if (gpiommgpio->io_state[port] & mask)
+ if (i8255_get_direction(gpiommgpio->ppi_state, offset))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -62,35 +57,8 @@ static int gpiomm_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- unsigned long flags;
- unsigned int control;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- gpiommgpio->io_state[io_port] |= 0xF0;
- gpiommgpio->control[control_port] |= BIT(3);
- } else {
- gpiommgpio->io_state[io_port] |= 0x0F;
- gpiommgpio->control[control_port] |= BIT(0);
- }
- } else {
- gpiommgpio->io_state[io_port] |= 0xFF;
- if (io_port == 0 || io_port == 3)
- gpiommgpio->control[control_port] |= BIT(4);
- else
- gpiommgpio->control[control_port] |= BIT(1);
- }
- control = BIT(7) | gpiommgpio->control[control_port];
- iowrite8(control, gpiommgpio->base + 3 + control_port*4);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ i8255_direction_input(gpiommgpio->ppi, gpiommgpio->ppi_state, offset);
return 0;
}
@@ -99,44 +67,9 @@ static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
- unsigned long flags;
- unsigned int control;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- gpiommgpio->io_state[io_port] &= 0x0F;
- gpiommgpio->control[control_port] &= ~BIT(3);
- } else {
- gpiommgpio->io_state[io_port] &= 0xF0;
- gpiommgpio->control[control_port] &= ~BIT(0);
- }
- } else {
- gpiommgpio->io_state[io_port] &= 0x00;
- if (io_port == 0 || io_port == 3)
- gpiommgpio->control[control_port] &= ~BIT(4);
- else
- gpiommgpio->control[control_port] &= ~BIT(1);
- }
-
- if (value)
- gpiommgpio->out_state[io_port] |= mask;
- else
- gpiommgpio->out_state[io_port] &= ~mask;
-
- control = BIT(7) | gpiommgpio->control[control_port];
- iowrite8(control, gpiommgpio->base + 3 + control_port*4);
- iowrite8(gpiommgpio->out_state[io_port], gpiommgpio->base + out_port);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ i8255_direction_output(gpiommgpio->ppi, gpiommgpio->ppi_state, offset,
+ value);
return 0;
}
@@ -144,47 +77,16 @@ static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
static int gpiomm_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int in_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
- unsigned int port_state;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- /* ensure that GPIO is set for input */
- if (!(gpiommgpio->io_state[port] & mask)) {
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
- return -EINVAL;
- }
-
- port_state = ioread8(gpiommgpio->base + in_port);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
- return !!(port_state & mask);
+ return i8255_get(gpiommgpio->ppi, offset);
}
-static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
-
static int gpiomm_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- void __iomem *port_addr;
- unsigned long port_state;
-
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- port_addr = gpiommgpio->base + ports[offset / 8];
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ i8255_get_multiple(gpiommgpio->ppi, mask, bits, chip->ngpio);
return 0;
}
@@ -193,49 +95,17 @@ static void gpiomm_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int out_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- if (value)
- gpiommgpio->out_state[port] |= mask;
- else
- gpiommgpio->out_state[port] &= ~mask;
-
- iowrite8(gpiommgpio->out_state[port], gpiommgpio->base + out_port);
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ i8255_set(gpiommgpio->ppi, gpiommgpio->ppi_state, offset, value);
}
static void gpiomm_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- size_t index;
- void __iomem *port_addr;
- unsigned long bitmask;
- unsigned long flags;
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- index = offset / 8;
- port_addr = gpiommgpio->base + ports[index];
-
- bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
- /* update output state data and set device gpio register */
- gpiommgpio->out_state[index] &= ~gpio_mask;
- gpiommgpio->out_state[index] |= bitmask;
- iowrite8(gpiommgpio->out_state[index], port_addr);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
- }
+ i8255_set_multiple(gpiommgpio->ppi, gpiommgpio->ppi_state, mask, bits,
+ chip->ngpio);
}
#define GPIOMM_NGPIO 48
@@ -250,6 +120,21 @@ static const char *gpiomm_names[GPIOMM_NGPIO] = {
"Port 2C2", "Port 2C3", "Port 2C4", "Port 2C5", "Port 2C6", "Port 2C7",
};
+static void gpiomm_init_dio(struct i8255 __iomem *const ppi,
+ struct i8255_state *const ppi_state)
+{
+ const unsigned long ngpio = 24;
+ const unsigned long mask = GENMASK(ngpio - 1, 0);
+ const unsigned long bits = 0;
+ unsigned long i;
+
+ /* Initialize all GPIO to output 0 */
+ for (i = 0; i < GPIOMM_NUM_PPI; i++) {
+ i8255_mode0_output(&ppi[i]);
+ i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
+ }
+}
+
static int gpiomm_probe(struct device *dev, unsigned int id)
{
struct gpiomm_gpio *gpiommgpio;
@@ -266,8 +151,8 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- gpiommgpio->base = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
- if (!gpiommgpio->base)
+ gpiommgpio->ppi = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
+ if (!gpiommgpio->ppi)
return -ENOMEM;
gpiommgpio->chip.label = name;
@@ -284,7 +169,8 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
gpiommgpio->chip.set = gpiomm_gpio_set;
gpiommgpio->chip.set_multiple = gpiomm_gpio_set_multiple;
- spin_lock_init(&gpiommgpio->lock);
+ i8255_state_init(gpiommgpio->ppi_state, GPIOMM_NUM_PPI);
+ gpiomm_init_dio(gpiommgpio->ppi, gpiommgpio->ppi_state);
err = devm_gpiochip_add_data(dev, &gpiommgpio->chip, gpiommgpio);
if (err) {
@@ -292,16 +178,6 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
return err;
}
- /* initialize all GPIO as output */
- iowrite8(0x80, gpiommgpio->base + 3);
- iowrite8(0x00, gpiommgpio->base);
- iowrite8(0x00, gpiommgpio->base + 1);
- iowrite8(0x00, gpiommgpio->base + 2);
- iowrite8(0x80, gpiommgpio->base + 7);
- iowrite8(0x00, gpiommgpio->base + 4);
- iowrite8(0x00, gpiommgpio->base + 5);
- iowrite8(0x00, gpiommgpio->base + 6);
-
return 0;
}
diff --git a/drivers/gpio/gpio-i8255.c b/drivers/gpio/gpio-i8255.c
new file mode 100644
index 000000000000..9b97db418df1
--- /dev/null
+++ b/drivers/gpio/gpio-i8255.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel 8255 Programmable Peripheral Interface
+ * Copyright (C) 2022 William Breathitt Gray
+ */
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "gpio-i8255.h"
+
+#define I8255_CONTROL_PORTC_LOWER_DIRECTION BIT(0)
+#define I8255_CONTROL_PORTB_DIRECTION BIT(1)
+#define I8255_CONTROL_PORTC_UPPER_DIRECTION BIT(3)
+#define I8255_CONTROL_PORTA_DIRECTION BIT(4)
+#define I8255_CONTROL_MODE_SET BIT(7)
+#define I8255_PORTA 0
+#define I8255_PORTB 1
+#define I8255_PORTC 2
+
+static int i8255_get_port(struct i8255 __iomem *const ppi,
+ const unsigned long io_port, const unsigned long mask)
+{
+ const unsigned long bank = io_port / 3;
+ const unsigned long ppi_port = io_port % 3;
+
+ return ioread8(&ppi[bank].port[ppi_port]) & mask;
+}
+
+static u8 i8255_direction_mask(const unsigned long offset)
+{
+ const unsigned long port_offset = offset % 8;
+ const unsigned long io_port = offset / 8;
+ const unsigned long ppi_port = io_port % 3;
+
+ switch (ppi_port) {
+ case I8255_PORTA:
+ return I8255_CONTROL_PORTA_DIRECTION;
+ case I8255_PORTB:
+ return I8255_CONTROL_PORTB_DIRECTION;
+ case I8255_PORTC:
+ /* Port C can be configured by nibble */
+ if (port_offset >= 4)
+ return I8255_CONTROL_PORTC_UPPER_DIRECTION;
+ return I8255_CONTROL_PORTC_LOWER_DIRECTION;
+ default:
+ /* Should never reach this path */
+ return 0;
+ }
+}
+
+static void i8255_set_port(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long io_port,
+ const unsigned long mask, const unsigned long bits)
+{
+ const unsigned long bank = io_port / 3;
+ const unsigned long ppi_port = io_port % 3;
+ unsigned long flags;
+ unsigned long out_state;
+
+ spin_lock_irqsave(&state[bank].lock, flags);
+
+ out_state = ioread8(&ppi[bank].port[ppi_port]);
+ out_state = (out_state & ~mask) | (bits & mask);
+ iowrite8(out_state, &ppi[bank].port[ppi_port]);
+
+ spin_unlock_irqrestore(&state[bank].lock, flags);
+}
+
+/**
+ * i8255_direction_input - configure signal offset as input
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @offset: signal offset to configure as input
+ *
+ * Configures a signal @offset as input for the respective Intel 8255
+ * Programmable Peripheral Interface (@ppi) banks. The @state control_state
+ * values are updated to reflect the new configuration.
+ */
+void i8255_direction_input(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long offset)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long bank = io_port / 3;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state[bank].lock, flags);
+
+ state[bank].control_state |= I8255_CONTROL_MODE_SET;
+ state[bank].control_state |= i8255_direction_mask(offset);
+
+ iowrite8(state[bank].control_state, &ppi[bank].control);
+
+ spin_unlock_irqrestore(&state[bank].lock, flags);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_direction_input, I8255);
+
+/**
+ * i8255_direction_output - configure signal offset as output
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @offset: signal offset to configure as output
+ * @value: signal value to output
+ *
+ * Configures a signal @offset as output for the respective Intel 8255
+ * Programmable Peripheral Interface (@ppi) banks and sets the respective signal
+ * output to the desired @value. The @state control_state values are updated to
+ * reflect the new configuration.
+ */
+void i8255_direction_output(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long offset,
+ const unsigned long value)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long bank = io_port / 3;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state[bank].lock, flags);
+
+ state[bank].control_state |= I8255_CONTROL_MODE_SET;
+ state[bank].control_state &= ~i8255_direction_mask(offset);
+
+ iowrite8(state[bank].control_state, &ppi[bank].control);
+
+ spin_unlock_irqrestore(&state[bank].lock, flags);
+
+ i8255_set(ppi, state, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_direction_output, I8255);
+
+/**
+ * i8255_get - get signal value at signal offset
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @offset: offset of signal to get
+ *
+ * Returns the signal value (0=low, 1=high) for the signal at @offset for the
+ * respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
+ */
+int i8255_get(struct i8255 __iomem *const ppi, const unsigned long offset)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long offset_mask = BIT(offset % 8);
+
+ return !!i8255_get_port(ppi, io_port, offset_mask);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_get, I8255);
+
+/**
+ * i8255_get_direction - get the I/O direction for a signal offset
+ * @state: devices states of the respective PPI banks
+ * @offset: offset of signal to get direction
+ *
+ * Returns the signal direction (0=output, 1=input) for the signal at @offset.
+ */
+int i8255_get_direction(const struct i8255_state *const state,
+ const unsigned long offset)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long bank = io_port / 3;
+
+ return !!(state[bank].control_state & i8255_direction_mask(offset));
+}
+EXPORT_SYMBOL_NS_GPL(i8255_get_direction, I8255);
+
+/**
+ * i8255_get_multiple - get multiple signal values at multiple signal offsets
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @mask: mask of signals to get
+ * @bits: bitmap to store signal values
+ * @ngpio: number of GPIO signals of the respective PPI banks
+ *
+ * Stores in @bits the values (0=low, 1=high) for the signals defined by @mask
+ * for the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
+ */
+void i8255_get_multiple(struct i8255 __iomem *const ppi,
+ const unsigned long *const mask,
+ unsigned long *const bits, const unsigned long ngpio)
+{
+ unsigned long offset;
+ unsigned long port_mask;
+ unsigned long io_port;
+ unsigned long port_state;
+
+ bitmap_zero(bits, ngpio);
+
+ for_each_set_clump8(offset, port_mask, mask, ngpio) {
+ io_port = offset / 8;
+ port_state = i8255_get_port(ppi, io_port, port_mask);
+
+ bitmap_set_value8(bits, port_state, offset);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(i8255_get_multiple, I8255);
+
+/**
+ * i8255_mode0_output - configure all PPI ports to MODE 0 output mode
+ * @ppi: Intel 8255 Programmable Peripheral Interface bank
+ *
+ * Configures all Intel 8255 Programmable Peripheral Interface (@ppi) ports to
+ * MODE 0 (Basic Input/Output) output mode.
+ */
+void i8255_mode0_output(struct i8255 __iomem *const ppi)
+{
+ iowrite8(I8255_CONTROL_MODE_SET, &ppi->control);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_mode0_output, I8255);
+
+/**
+ * i8255_set - set signal value at signal offset
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @offset: offset of signal to set
+ * @value: value of signal to set
+ *
+ * Assigns output @value for the signal at @offset for the respective Intel 8255
+ * Programmable Peripheral Interface (@ppi) banks.
+ */
+void i8255_set(struct i8255 __iomem *const ppi, struct i8255_state *const state,
+ const unsigned long offset, const unsigned long value)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long port_offset = offset % 8;
+ const unsigned long mask = BIT(port_offset);
+ const unsigned long bits = value << port_offset;
+
+ i8255_set_port(ppi, state, io_port, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_set, I8255);
+
+/**
+ * i8255_set_multiple - set signal values at multiple signal offsets
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @mask: mask of signals to set
+ * @bits: bitmap of signal output values
+ * @ngpio: number of GPIO signals of the respective PPI banks
+ *
+ * Assigns output values defined by @bits for the signals defined by @mask for
+ * the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
+ */
+void i8255_set_multiple(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long *const mask,
+ const unsigned long *const bits,
+ const unsigned long ngpio)
+{
+ unsigned long offset;
+ unsigned long port_mask;
+ unsigned long io_port;
+ unsigned long value;
+
+ for_each_set_clump8(offset, port_mask, mask, ngpio) {
+ io_port = offset / 8;
+ value = bitmap_get_value8(bits, offset);
+ i8255_set_port(ppi, state, io_port, port_mask, value);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(i8255_set_multiple, I8255);
+
+/**
+ * i8255_state_init - initialize i8255_state structure
+ * @state: devices states of the respective PPI banks
+ * @nbanks: number of Intel 8255 Programmable Peripheral Interface banks
+ *
+ * Initializes the @state of each Intel 8255 Programmable Peripheral Interface
+ * bank for use in i8255 library functions.
+ */
+void i8255_state_init(struct i8255_state *const state,
+ const unsigned long nbanks)
+{
+ unsigned long bank;
+
+ for (bank = 0; bank < nbanks; bank++)
+ spin_lock_init(&state[bank].lock);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_state_init, I8255);
+
+MODULE_AUTHOR("William Breathitt Gray");
+MODULE_DESCRIPTION("Intel 8255 Programmable Peripheral Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-i8255.h b/drivers/gpio/gpio-i8255.h
new file mode 100644
index 000000000000..d9084aae9446
--- /dev/null
+++ b/drivers/gpio/gpio-i8255.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2022 William Breathitt Gray */
+#ifndef _I8255_H_
+#define _I8255_H_
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * struct i8255 - Intel 8255 register structure
+ * @port: Port A, B, and C
+ * @control: Control register
+ */
+struct i8255 {
+ u8 port[3];
+ u8 control;
+};
+
+/**
+ * struct i8255_state - Intel 8255 state structure
+ * @lock: synchronization lock for accessing device state
+ * @control_state: Control register state
+ */
+struct i8255_state {
+ spinlock_t lock;
+ u8 control_state;
+};
+
+void i8255_direction_input(struct i8255 __iomem *ppi, struct i8255_state *state,
+ unsigned long offset);
+void i8255_direction_output(struct i8255 __iomem *ppi,
+ struct i8255_state *state, unsigned long offset,
+ unsigned long value);
+int i8255_get(struct i8255 __iomem *ppi, unsigned long offset);
+int i8255_get_direction(const struct i8255_state *state, unsigned long offset);
+void i8255_get_multiple(struct i8255 __iomem *ppi, const unsigned long *mask,
+ unsigned long *bits, unsigned long ngpio);
+void i8255_mode0_output(struct i8255 __iomem *const ppi);
+void i8255_set(struct i8255 __iomem *ppi, struct i8255_state *state,
+ unsigned long offset, unsigned long value);
+void i8255_set_multiple(struct i8255 __iomem *ppi, struct i8255_state *state,
+ const unsigned long *mask, const unsigned long *bits,
+ unsigned long ngpio);
+void i8255_state_init(struct i8255_state *const state, unsigned long nbanks);
+
+#endif /* _I8255_H_ */
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 8a30fb185aab..79edd5db49d2 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -42,7 +42,7 @@ struct lp3943_gpio {
u16 input_mask; /* 1 = GPIO is input direction, 0 = output */
};
-static int lp3943_gpio_request(struct gpio_chip *chip, unsigned offset)
+static int lp3943_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
struct lp3943 *lp3943 = lp3943_gpio->lp3943;
@@ -54,7 +54,7 @@ static int lp3943_gpio_request(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void lp3943_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void lp3943_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
struct lp3943 *lp3943 = lp3943_gpio->lp3943;
@@ -72,7 +72,7 @@ static int lp3943_gpio_set_mode(struct lp3943_gpio *lp3943_gpio, u8 offset,
val << mux[offset].shift);
}
-static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
@@ -82,7 +82,7 @@ static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
}
static int lp3943_get_gpio_in_status(struct lp3943_gpio *lp3943_gpio,
- struct gpio_chip *chip, unsigned offset)
+ struct gpio_chip *chip, unsigned int offset)
{
u8 addr, read;
int err;
@@ -107,7 +107,7 @@ static int lp3943_get_gpio_in_status(struct lp3943_gpio *lp3943_gpio,
}
static int lp3943_get_gpio_out_status(struct lp3943_gpio *lp3943_gpio,
- struct gpio_chip *chip, unsigned offset)
+ struct gpio_chip *chip, unsigned int offset)
{
struct lp3943 *lp3943 = lp3943_gpio->lp3943;
const struct lp3943_reg_cfg *mux = lp3943->mux_cfg;
@@ -128,7 +128,7 @@ static int lp3943_get_gpio_out_status(struct lp3943_gpio *lp3943_gpio,
return -EINVAL;
}
-static int lp3943_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int lp3943_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
@@ -147,7 +147,7 @@ static int lp3943_gpio_get(struct gpio_chip *chip, unsigned offset)
return lp3943_get_gpio_out_status(lp3943_gpio, chip, offset);
}
-static void lp3943_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
u8 data;
@@ -160,7 +160,7 @@ static void lp3943_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
lp3943_gpio_set_mode(lp3943_gpio, offset, data);
}
-static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 70fad87ff2db..5c79ba1f229c 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
* Keerthy <j-keerthy@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver
*/
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index fcde6708b5df..d3ce027de081 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
* Keerthy <j-keerthy@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the LP873X driver
*/
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index a964e25ea620..15049822937a 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIOs on MPC512x/8349/8572/8610/QorIQ and compatible
*
* Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
* Copyright (C) 2016 Freescale Semiconductor Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/acpi.h>
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index b2c90bdd39d0..52d7b8d99170 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -550,15 +550,12 @@ static struct irq_chip msc313_gpio_irqchip = {
* so we need to provide the fwspec. Essentially gpiochip_populate_parent_fwspec_twocell
* that puts GIC_SPI into the first cell.
*/
-static void *msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type)
+static int msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
{
- struct irq_fwspec *fwspec;
-
- fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
- if (!fwspec)
- return NULL;
+ struct irq_fwspec *fwspec = &gfwspec->fwspec;
fwspec->fwnode = gc->irq.parent_domain->fwnode;
fwspec->param_count = 3;
@@ -566,7 +563,7 @@ static void *msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc,
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = parent_type;
- return fwspec;
+ return 0;
}
static int msc313e_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 2db19cd640a4..aa126ab80f0c 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIO driver for Marvell SoCs
*
@@ -7,10 +8,6 @@
* Andrew Lunn <andrew@lunn.ch>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* This driver is a fairly straightforward GPIO driver for the
* complete family of Marvell EBU SoC platforms (Orion, Dove,
* Kirkwood, Discovery, Armada 370/XP). The only complexity of this
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 08bc52c3cdcb..ecd7d169470b 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = {
.reg_bits = 8,
.val_bits = 8,
+ .use_single_read = true,
+ .use_single_write = true,
+
.readable_reg = pca953x_readable_register,
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
@@ -906,15 +909,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
{
DECLARE_BITMAP(val, MAX_LINE);
+ u8 regaddr;
int ret;
- ret = regcache_sync_region(chip->regmap, chip->regs->output,
- chip->regs->output + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret)
goto out;
- ret = regcache_sync_region(chip->regmap, chip->regs->direction,
- chip->regs->direction + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret)
goto out;
@@ -1127,14 +1133,14 @@ static int pca953x_regcache_sync(struct device *dev)
* sync these registers first and only then sync the rest.
*/
regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
- ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
- ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
@@ -1144,7 +1150,7 @@ static int pca953x_regcache_sync(struct device *dev)
if (chip->driver_data & PCA_PCAL) {
regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
- regaddr + NBANK(chip));
+ regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
@@ -1153,7 +1159,7 @@ static int pca953x_regcache_sync(struct device *dev)
regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
- regaddr + NBANK(chip));
+ regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index cb2b2f735c15..ab2a652964ec 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -121,12 +121,14 @@ static int pca9570_probe(struct i2c_client *client)
static const struct i2c_device_id pca9570_id_table[] = {
{ "pca9570", 4 },
+ { "pca9571", 8 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, pca9570_id_table);
static const struct of_device_id pca9570_of_match_table[] = {
{ .compatible = "nxp,pca9570", .data = (void *)4 },
+ { .compatible = "nxp,pca9571", .data = (void *)8 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pca9570_of_match_table);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 3a0bd8795741..ee37ecb615cb 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -37,6 +37,11 @@ struct pch_regs {
u32 reset;
};
+#define PCI_DEVICE_ID_INTEL_EG20T_PCH 0x8803
+#define PCI_DEVICE_ID_ROHM_ML7223m_IOH 0x8014
+#define PCI_DEVICE_ID_ROHM_ML7223n_IOH 0x8043
+#define PCI_DEVICE_ID_ROHM_EG20T_PCH 0x8803
+
enum pch_type_t {
INTEL_EG20T_PCH,
OKISEMI_ML7223m_IOH, /* LAPIS Semiconductor ML7223 IOH PCIe Bus-m */
@@ -357,16 +362,12 @@ static int pch_gpio_probe(struct pci_dev *pdev,
chip->dev = dev;
ret = pcim_enable_device(pdev);
- if (ret) {
- dev_err(dev, "pci_enable_device FAILED");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable PCI device\n");
ret = pcim_iomap_regions(pdev, BIT(1), KBUILD_MODNAME);
- if (ret) {
- dev_err(dev, "pci_request_regions FAILED-%d", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request and map PCI regions\n");
chip->base = pcim_iomap_table(pdev)[1];
chip->ioh = id->driver_data;
@@ -376,10 +377,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
pch_gpio_setup(chip);
ret = devm_gpiochip_add_data(dev, &chip->gpio, chip);
- if (ret) {
- dev_err(dev, "PCH gpio: Failed to register GPIO\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register GPIO\n");
irq_base = devm_irq_alloc_descs(dev, -1, 0,
gpio_pins[chip->ioh], NUMA_NO_NODE);
@@ -396,10 +395,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
ret = devm_request_irq(dev, pdev->irq, pch_gpio_handler,
IRQF_SHARED, KBUILD_MODNAME, chip);
- if (ret) {
- dev_err(dev, "request_irq failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ\n");
return pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
}
@@ -433,15 +430,11 @@ static int __maybe_unused pch_gpio_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume);
static const struct pci_device_id pch_gpio_pcidev_id[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803),
- .driver_data = INTEL_EG20T_PCH },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014),
- .driver_data = OKISEMI_ML7223m_IOH },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043),
- .driver_data = OKISEMI_ML7223n_IOH },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803),
- .driver_data = INTEL_EG20T_PCH },
- { 0, }
+ { PCI_DEVICE_DATA(INTEL, EG20T_PCH, INTEL_EG20T_PCH) },
+ { PCI_DEVICE_DATA(ROHM, ML7223m_IOH, OKISEMI_ML7223m_IOH) },
+ { PCI_DEVICE_DATA(ROHM, ML7223n_IOH, OKISEMI_ML7223n_IOH) },
+ { PCI_DEVICE_DATA(ROHM, EG20T_PCH, INTEL_EG20T_PCH) },
+ { }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index 81a47ae09ff8..67071bea08c2 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
*/
#include <linux/bitmap.h>
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index e342a6dc4c6c..f91e876fd969 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -27,6 +27,7 @@
#define GPIO_TYPE_V1 (0) /* GPIO Version ID reserved */
#define GPIO_TYPE_V2 (0x01000C2B) /* GPIO Version ID 0x01000C2B */
+#define GPIO_TYPE_V2_1 (0x0101157C) /* GPIO Version ID 0x0101157C */
static const struct rockchip_gpio_regs gpio_regs_v1 = {
.port_dr = 0x00,
@@ -664,7 +665,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank)
id = readl(bank->reg_base + gpio_regs_v2.version_id);
/* If not gpio v2, that is default to v1. */
- if (id == GPIO_TYPE_V2) {
+ if (id == GPIO_TYPE_V2 || id == GPIO_TYPE_V2_1) {
bank->gpio_regs = &gpio_regs_v2;
bank->gpio_type = GPIO_TYPE_V2;
bank->db_clk = of_clk_get(bank->of_node, 1);
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 98109839102f..1020c2feb249 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -991,28 +991,22 @@ static struct configfs_attribute *gpio_sim_device_config_attrs[] = {
};
struct gpio_sim_chip_name_ctx {
- struct gpio_sim_device *dev;
+ struct fwnode_handle *swnode;
char *page;
};
static int gpio_sim_emit_chip_name(struct device *dev, void *data)
{
struct gpio_sim_chip_name_ctx *ctx = data;
- struct fwnode_handle *swnode;
- struct gpio_sim_bank *bank;
/* This would be the sysfs device exported in /sys/class/gpio. */
if (dev->class)
return 0;
- swnode = dev_fwnode(dev);
+ if (device_match_fwnode(dev, ctx->swnode))
+ return sprintf(ctx->page, "%s\n", dev_name(dev));
- list_for_each_entry(bank, &ctx->dev->bank_list, siblings) {
- if (bank->swnode == swnode)
- return sprintf(ctx->page, "%s\n", dev_name(dev));
- }
-
- return -ENODATA;
+ return 0;
}
static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
@@ -1020,7 +1014,7 @@ static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
{
struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
- struct gpio_sim_chip_name_ctx ctx = { dev, page };
+ struct gpio_sim_chip_name_ctx ctx = { bank->swnode, page };
int ret;
mutex_lock(&dev->lock);
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 49aac2bb8d2c..51539185400d 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SPEAr platform SPI chipselect abstraction over gpiolib
*
* Copyright (C) 2012 ST Microelectronics
* Shiraz Hashim <shiraz.linux.kernel@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/err.h>
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index ff2d2a1f9c73..e4fb4cb38a0f 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -443,15 +443,12 @@ static int tegra_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
return 0;
}
-static void *tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip,
- unsigned int parent_hwirq,
- unsigned int parent_type)
+static int tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
{
- struct irq_fwspec *fwspec;
-
- fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
- if (!fwspec)
- return NULL;
+ struct irq_fwspec *fwspec = &gfwspec->fwspec;
fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 3;
@@ -459,7 +456,7 @@ static void *tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip,
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = parent_type;
- return fwspec;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index de28a68daea0..54d9fa7da9c1 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -621,16 +621,13 @@ static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain,
return 0;
}
-static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
- unsigned int parent_hwirq,
- unsigned int parent_type)
+static int tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
{
struct tegra_gpio *gpio = gpiochip_get_data(chip);
- struct irq_fwspec *fwspec;
-
- fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
- if (!fwspec)
- return NULL;
+ struct irq_fwspec *fwspec = &gfwspec->fwspec;
fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 3;
@@ -638,7 +635,7 @@ static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = parent_type;
- return fwspec;
+ return 0;
}
static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 9f66deab46ea..cc62c6e64103 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -15,8 +15,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <asm-generic/msi.h>
-
#define GPIO_RX_DAT 0x0
#define GPIO_TX_SET 0x8
@@ -408,18 +406,15 @@ static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
return 0;
}
-static void *thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip,
- unsigned int parent_hwirq,
- unsigned int parent_type)
+static int thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
{
- msi_alloc_info_t *info;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return NULL;
+ msi_alloc_info_t *info = &gfwspec->msiinfo;
info->hwirq = parent_hwirq;
- return info;
+ return 0;
}
static int thunderx_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index 99d5a84a9129..a09b1e69b072 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
*/
#include <linux/gpio/driver.h>
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 8c0d82d926dd..95d80ba14bee 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIO driver for the TS-4800 board
*
* Copyright (c) 2016 - Savoir-faire Linux
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/gpio/driver.h>
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index de249726230e..5046e51af8df 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -593,27 +593,13 @@ out:
/* Cannot use as gpio_twl4030_probe() calls us */
static int gpio_twl4030_remove(struct platform_device *pdev)
{
- struct twl4030_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
- int status;
-
- if (pdata && pdata->teardown) {
- status = pdata->teardown(&pdev->dev, priv->gpio_chip.base,
- TWL4030_GPIO_MAX);
- if (status) {
- dev_dbg(&pdev->dev, "teardown --> %d\n", status);
- return status;
- }
- }
gpiochip_remove(&priv->gpio_chip);
- if (is_module())
- return 0;
-
/* REVISIT no support yet for deregistering all the IRQs */
- WARN_ON(1);
- return -EIO;
+ WARN_ON(!is_module());
+ return 0;
}
static const struct of_device_id twl_gpio_match[] = {
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index d2a8644864c3..386e69300332 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -64,34 +64,14 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
ucb->gc.can_sleep = true;
err = devm_gpiochip_add_data(&dev->dev, &ucb->gc, ucb);
- if (err)
- goto err;
-
- if (ucb->gpio_setup)
- err = ucb->gpio_setup(&dev->dev, ucb->gc.ngpio);
err:
return err;
}
-static int ucb1400_gpio_remove(struct platform_device *dev)
-{
- int err = 0;
- struct ucb1400_gpio *ucb = platform_get_drvdata(dev);
-
- if (ucb && ucb->gpio_teardown) {
- err = ucb->gpio_teardown(&dev->dev, ucb->gc.ngpio);
- if (err)
- return err;
- }
-
- return err;
-}
-
static struct platform_driver ucb1400_gpio_driver = {
.probe = ucb1400_gpio_probe,
- .remove = ucb1400_gpio_remove,
.driver = {
.name = "ucb1400_gpio"
},
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index e6534ea1eaa7..5e108ba9956a 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -103,15 +103,12 @@ static int visconti_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
return -EINVAL;
}
-static void *visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip,
- unsigned int parent_hwirq,
- unsigned int parent_type)
+static int visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
{
- struct irq_fwspec *fwspec;
-
- fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
- if (!fwspec)
- return NULL;
+ struct irq_fwspec *fwspec = &gfwspec->fwspec;
fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 3;
@@ -119,7 +116,7 @@ static void *visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip,
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = parent_type;
- return fwspec;
+ return 0;
}
static int visconti_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
deleted file mode 100644
index 8d09b619c166..000000000000
--- a/drivers/gpio/gpio-vr41xx.c
+++ /dev/null
@@ -1,541 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for NEC VR4100 series General-purpose I/O Unit.
- *
- * Copyright (C) 2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <source@mvista.com>
- * Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <asm/vr41xx/giu.h>
-#include <asm/vr41xx/irq.h>
-#include <asm/vr41xx/vr41xx.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
-MODULE_LICENSE("GPL");
-
-#define GIUIOSELL 0x00
-#define GIUIOSELH 0x02
-#define GIUPIODL 0x04
-#define GIUPIODH 0x06
-#define GIUINTSTATL 0x08
-#define GIUINTSTATH 0x0a
-#define GIUINTENL 0x0c
-#define GIUINTENH 0x0e
-#define GIUINTTYPL 0x10
-#define GIUINTTYPH 0x12
-#define GIUINTALSELL 0x14
-#define GIUINTALSELH 0x16
-#define GIUINTHTSELL 0x18
-#define GIUINTHTSELH 0x1a
-#define GIUPODATL 0x1c
-#define GIUPODATEN 0x1c
-#define GIUPODATH 0x1e
- #define PIOEN0 0x0100
- #define PIOEN1 0x0200
-#define GIUPODAT 0x1e
-#define GIUFEDGEINHL 0x20
-#define GIUFEDGEINHH 0x22
-#define GIUREDGEINHL 0x24
-#define GIUREDGEINHH 0x26
-
-#define GIUUSEUPDN 0x1e0
-#define GIUTERMUPDN 0x1e2
-
-#define GPIO_HAS_PULLUPDOWN_IO 0x0001
-#define GPIO_HAS_OUTPUT_ENABLE 0x0002
-#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
-
-enum {
- GPIO_INPUT,
- GPIO_OUTPUT,
-};
-
-static DEFINE_SPINLOCK(giu_lock);
-static unsigned long giu_flags;
-
-static void __iomem *giu_base;
-static struct gpio_chip vr41xx_gpio_chip;
-
-#define giu_read(offset) readw(giu_base + (offset))
-#define giu_write(offset, value) writew((value), giu_base + (offset))
-
-#define GPIO_PIN_OF_IRQ(irq) ((irq) - GIU_IRQ_BASE)
-#define GIUINT_HIGH_OFFSET 16
-#define GIUINT_HIGH_MAX 32
-
-static inline u16 giu_set(u16 offset, u16 set)
-{
- u16 data;
-
- data = giu_read(offset);
- data |= set;
- giu_write(offset, data);
-
- return data;
-}
-
-static inline u16 giu_clear(u16 offset, u16 clear)
-{
- u16 data;
-
- data = giu_read(offset);
- data &= ~clear;
- giu_write(offset, data);
-
- return data;
-}
-
-static void ack_giuint_low(struct irq_data *d)
-{
- giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(d->irq));
-}
-
-static void mask_giuint_low(struct irq_data *d)
-{
- giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
-}
-
-static void mask_ack_giuint_low(struct irq_data *d)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(d->irq);
- giu_clear(GIUINTENL, 1 << pin);
- giu_write(GIUINTSTATL, 1 << pin);
-}
-
-static void unmask_giuint_low(struct irq_data *d)
-{
- giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
-}
-
-static unsigned int startup_giuint(struct irq_data *data)
-{
- int ret;
-
- ret = gpiochip_lock_as_irq(&vr41xx_gpio_chip, irqd_to_hwirq(data));
- if (ret) {
- dev_err(vr41xx_gpio_chip.parent,
- "unable to lock HW IRQ %lu for IRQ\n",
- data->hwirq);
- return ret;
- }
-
- /* Satisfy the .enable semantics by unmasking the line */
- unmask_giuint_low(data);
- return 0;
-}
-
-static void shutdown_giuint(struct irq_data *data)
-{
- mask_giuint_low(data);
- gpiochip_unlock_as_irq(&vr41xx_gpio_chip, data->hwirq);
-}
-
-static struct irq_chip giuint_low_irq_chip = {
- .name = "GIUINTL",
- .irq_ack = ack_giuint_low,
- .irq_mask = mask_giuint_low,
- .irq_mask_ack = mask_ack_giuint_low,
- .irq_unmask = unmask_giuint_low,
- .irq_startup = startup_giuint,
- .irq_shutdown = shutdown_giuint,
-};
-
-static void ack_giuint_high(struct irq_data *d)
-{
- giu_write(GIUINTSTATH,
- 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_giuint_high(struct irq_data *d)
-{
- giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_ack_giuint_high(struct irq_data *d)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET;
- giu_clear(GIUINTENH, 1 << pin);
- giu_write(GIUINTSTATH, 1 << pin);
-}
-
-static void unmask_giuint_high(struct irq_data *d)
-{
- giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
-}
-
-static struct irq_chip giuint_high_irq_chip = {
- .name = "GIUINTH",
- .irq_ack = ack_giuint_high,
- .irq_mask = mask_giuint_high,
- .irq_mask_ack = mask_ack_giuint_high,
- .irq_unmask = unmask_giuint_high,
-};
-
-static int giu_get_irq(unsigned int irq)
-{
- u16 pendl, pendh, maskl, maskh;
- int i;
-
- pendl = giu_read(GIUINTSTATL);
- pendh = giu_read(GIUINTSTATH);
- maskl = giu_read(GIUINTENL);
- maskh = giu_read(GIUINTENH);
-
- maskl &= pendl;
- maskh &= pendh;
-
- if (maskl) {
- for (i = 0; i < 16; i++) {
- if (maskl & (1 << i))
- return GIU_IRQ(i);
- }
- } else if (maskh) {
- for (i = 0; i < 16; i++) {
- if (maskh & (1 << i))
- return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
- }
- }
-
- printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
- maskl, pendl, maskh, pendh);
-
- return -EINVAL;
-}
-
-void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
- irq_signal_t signal)
-{
- u16 mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPL, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELL, mask);
- else
- giu_clear(GIUINTHTSELL, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHL, mask);
- giu_clear(GIUREDGEINHL, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- default:
- giu_set(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- }
- }
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPL, mask);
- giu_clear(GIUINTHTSELL, mask);
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPH, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELH, mask);
- else
- giu_clear(GIUINTHTSELH, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHH, mask);
- giu_clear(GIUREDGEINHH, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- default:
- giu_set(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- }
- }
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPH, mask);
- giu_clear(GIUINTHTSELH, mask);
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
-
-void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
-{
- u16 mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELL, mask);
- else
- giu_clear(GIUINTALSELL, mask);
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELH, mask);
- else
- giu_clear(GIUINTALSELH, mask);
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
-
-static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir)
-{
- u16 offset, mask, reg;
- unsigned long flags;
-
- if (pin >= chip->ngpio)
- return -EINVAL;
-
- if (pin < 16) {
- offset = GIUIOSELL;
- mask = 1 << pin;
- } else if (pin < 32) {
- offset = GIUIOSELH;
- mask = 1 << (pin - 16);
- } else {
- if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
- offset = GIUPODATEN;
- mask = 1 << (pin - 32);
- } else {
- switch (pin) {
- case 48:
- offset = GIUPODATH;
- mask = PIOEN0;
- break;
- case 49:
- offset = GIUPODATH;
- mask = PIOEN1;
- break;
- default:
- return -EINVAL;
- }
- }
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (dir == GPIO_OUTPUT)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-
-static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin)
-{
- u16 reg, mask;
-
- if (pin >= chip->ngpio)
- return -EINVAL;
-
- if (pin < 16) {
- reg = giu_read(GIUPIODL);
- mask = 1 << pin;
- } else if (pin < 32) {
- reg = giu_read(GIUPIODH);
- mask = 1 << (pin - 16);
- } else if (pin < 48) {
- reg = giu_read(GIUPODATL);
- mask = 1 << (pin - 32);
- } else {
- reg = giu_read(GIUPODATH);
- mask = 1 << (pin - 48);
- }
-
- if (reg & mask)
- return 1;
-
- return 0;
-}
-
-static void vr41xx_gpio_set(struct gpio_chip *chip, unsigned pin,
- int value)
-{
- u16 offset, mask, reg;
- unsigned long flags;
-
- if (pin >= chip->ngpio)
- return;
-
- if (pin < 16) {
- offset = GIUPIODL;
- mask = 1 << pin;
- } else if (pin < 32) {
- offset = GIUPIODH;
- mask = 1 << (pin - 16);
- } else if (pin < 48) {
- offset = GIUPODATL;
- mask = 1 << (pin - 32);
- } else {
- offset = GIUPODATH;
- mask = 1 << (pin - 48);
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (value)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-}
-
-
-static int vr41xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return giu_set_direction(chip, offset, GPIO_INPUT);
-}
-
-static int vr41xx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- vr41xx_gpio_set(chip, offset, value);
-
- return giu_set_direction(chip, offset, GPIO_OUTPUT);
-}
-
-static int vr41xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- if (offset >= chip->ngpio)
- return -EINVAL;
-
- return GIU_IRQ_BASE + offset;
-}
-
-static struct gpio_chip vr41xx_gpio_chip = {
- .label = "vr41xx",
- .owner = THIS_MODULE,
- .direction_input = vr41xx_gpio_direction_input,
- .get = vr41xx_gpio_get,
- .direction_output = vr41xx_gpio_direction_output,
- .set = vr41xx_gpio_set,
- .to_irq = vr41xx_gpio_to_irq,
-};
-
-static int giu_probe(struct platform_device *pdev)
-{
- unsigned int trigger, i, pin;
- struct irq_chip *chip;
- int irq;
-
- switch (pdev->id) {
- case GPIO_50PINS_PULLUPDOWN:
- giu_flags = GPIO_HAS_PULLUPDOWN_IO;
- vr41xx_gpio_chip.ngpio = 50;
- break;
- case GPIO_36PINS:
- vr41xx_gpio_chip.ngpio = 36;
- break;
- case GPIO_48PINS_EDGE_SELECT:
- giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
- vr41xx_gpio_chip.ngpio = 48;
- break;
- default:
- dev_err(&pdev->dev, "GIU: unknown ID %d\n", pdev->id);
- return -ENODEV;
- }
-
- giu_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(giu_base))
- return PTR_ERR(giu_base);
-
- vr41xx_gpio_chip.parent = &pdev->dev;
-
- if (gpiochip_add_data(&vr41xx_gpio_chip, NULL))
- return -ENODEV;
-
- giu_write(GIUINTENL, 0);
- giu_write(GIUINTENH, 0);
-
- trigger = giu_read(GIUINTTYPH) << 16;
- trigger |= giu_read(GIUINTTYPL);
- for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
- pin = GPIO_PIN_OF_IRQ(i);
- if (pin < GIUINT_HIGH_OFFSET)
- chip = &giuint_low_irq_chip;
- else
- chip = &giuint_high_irq_chip;
-
- if (trigger & (1 << pin))
- irq_set_chip_and_handler(i, chip, handle_edge_irq);
- else
- irq_set_chip_and_handler(i, chip, handle_level_irq);
-
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0 || irq >= nr_irqs)
- return -EBUSY;
-
- return cascade_irq(irq, giu_get_irq);
-}
-
-static int giu_remove(struct platform_device *pdev)
-{
- if (giu_base) {
- giu_base = NULL;
- }
-
- return 0;
-}
-
-static struct platform_driver giu_device_driver = {
- .probe = giu_probe,
- .remove = giu_remove,
- .driver = {
- .name = "GIU",
- },
-};
-
-module_platform_driver(giu_device_driver);
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index 5078631d8014..b098f2dc196b 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -4,7 +4,6 @@
* Copyright (C) 2016 William Breathitt Gray
*/
#include <linux/bitmap.h>
-#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -17,8 +16,9 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
-#define WS16C48_EXTENT 16
+#define WS16C48_EXTENT 10
#define MAX_NUM_WS16C48 max_num_isa_dev(WS16C48_EXTENT)
static unsigned int base[MAX_NUM_WS16C48];
@@ -31,6 +31,20 @@ module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
/**
+ * struct ws16c48_reg - device register structure
+ * @port: Port 0 through 5 I/O
+ * @int_pending: Interrupt Pending
+ * @page_lock: Register page (Bits 7-6) and I/O port lock (Bits 5-0)
+ * @pol_enab_int_id: Interrupt polarity, enable, and ID
+ */
+struct ws16c48_reg {
+ u8 port[6];
+ u8 int_pending;
+ u8 page_lock;
+ u8 pol_enab_int_id[3];
+};
+
+/**
* struct ws16c48_gpio - GPIO device private data structure
* @chip: instance of the gpio_chip
* @io_state: bit I/O state (whether bit is set to input or output)
@@ -38,7 +52,7 @@ MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
* @lock: synchronization lock to prevent I/O race conditions
* @irq_mask: I/O bits affected by interrupts
* @flow_mask: IRQ flow type mask for the respective I/O bits
- * @base: base port address of the GPIO device
+ * @reg: I/O address offset for the device registers
*/
struct ws16c48_gpio {
struct gpio_chip chip;
@@ -47,7 +61,7 @@ struct ws16c48_gpio {
raw_spinlock_t lock;
unsigned long irq_mask;
unsigned long flow_mask;
- void __iomem *base;
+ struct ws16c48_reg __iomem *reg;
};
static int ws16c48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -73,7 +87,7 @@ static int ws16c48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
ws16c48gpio->io_state[port] |= mask;
ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -95,7 +109,7 @@ static int ws16c48_gpio_direction_output(struct gpio_chip *chip,
ws16c48gpio->out_state[port] |= mask;
else
ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -118,7 +132,7 @@ static int ws16c48_gpio_get(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- port_state = ioread8(ws16c48gpio->base + port);
+ port_state = ioread8(ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -131,14 +145,16 @@ static int ws16c48_gpio_get_multiple(struct gpio_chip *chip,
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
unsigned long offset;
unsigned long gpio_mask;
- void __iomem *port_addr;
+ size_t index;
+ u8 __iomem *port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
- port_addr = ws16c48gpio->base + offset / 8;
+ index = offset / 8;
+ port_addr = ws16c48gpio->reg->port + index;
port_state = ioread8(port_addr) & gpio_mask;
bitmap_set_value8(bits, port_state, offset);
@@ -166,7 +182,7 @@ static void ws16c48_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
ws16c48gpio->out_state[port] |= mask;
else
ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -178,13 +194,13 @@ static void ws16c48_gpio_set_multiple(struct gpio_chip *chip,
unsigned long offset;
unsigned long gpio_mask;
size_t index;
- void __iomem *port_addr;
+ u8 __iomem *port_addr;
unsigned long bitmask;
unsigned long flags;
for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
index = offset / 8;
- port_addr = ws16c48gpio->base + index;
+ port_addr = ws16c48gpio->reg->port + index;
/* mask out GPIO configured for input */
gpio_mask &= ~ws16c48gpio->io_state[index];
@@ -219,10 +235,15 @@ static void ws16c48_irq_ack(struct irq_data *data)
port_state = ws16c48gpio->irq_mask >> (8*port);
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(port_state & ~mask, ws16c48gpio->base + 8 + port);
- iowrite8(port_state | mask, ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
+
+ /* Clear pending interrupt */
+ iowrite8(port_state & ~mask, ws16c48gpio->reg->pol_enab_int_id + port);
+ iowrite8(port_state | mask, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -235,6 +256,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
const unsigned long mask = BIT(offset);
const unsigned port = offset / 8;
unsigned long flags;
+ unsigned long port_state;
/* only the first 3 ports support interrupts */
if (port > 2)
@@ -243,10 +265,16 @@ static void ws16c48_irq_mask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask &= ~mask;
+ port_state = ws16c48gpio->irq_mask >> (8 * port);
+
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Disable interrupt */
+ iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -259,6 +287,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
const unsigned long mask = BIT(offset);
const unsigned port = offset / 8;
unsigned long flags;
+ unsigned long port_state;
/* only the first 3 ports support interrupts */
if (port > 2)
@@ -267,10 +296,16 @@ static void ws16c48_irq_unmask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask |= mask;
+ port_state = ws16c48gpio->irq_mask >> (8 * port);
+
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Enable interrupt */
+ iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -283,6 +318,7 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
const unsigned long mask = BIT(offset);
const unsigned port = offset / 8;
unsigned long flags;
+ unsigned long port_state;
/* only the first 3 ports support interrupts */
if (port > 2)
@@ -304,9 +340,16 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
return -EINVAL;
}
- iowrite8(0x40, ws16c48gpio->base + 7);
- iowrite8(ws16c48gpio->flow_mask >> (8*port), ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ port_state = ws16c48gpio->flow_mask >> (8 * port);
+
+ /* Select Register Page 1; Unlock all I/O ports */
+ iowrite8(0x40, &ws16c48gpio->reg->page_lock);
+
+ /* Set interrupt polarity */
+ iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -325,25 +368,26 @@ static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
{
struct ws16c48_gpio *const ws16c48gpio = dev_id;
struct gpio_chip *const chip = &ws16c48gpio->chip;
+ struct ws16c48_reg __iomem *const reg = ws16c48gpio->reg;
unsigned long int_pending;
unsigned long port;
unsigned long int_id;
unsigned long gpio;
- int_pending = ioread8(ws16c48gpio->base + 6) & 0x7;
+ int_pending = ioread8(&reg->int_pending) & 0x7;
if (!int_pending)
return IRQ_NONE;
/* loop until all pending interrupts are handled */
do {
for_each_set_bit(port, &int_pending, 3) {
- int_id = ioread8(ws16c48gpio->base + 8 + port);
+ int_id = ioread8(reg->pol_enab_int_id + port);
for_each_set_bit(gpio, &int_id, 8)
generic_handle_domain_irq(chip->irq.domain,
gpio + 8*port);
}
- int_pending = ioread8(ws16c48gpio->base + 6) & 0x7;
+ int_pending = ioread8(&reg->int_pending) & 0x7;
} while (int_pending);
return IRQ_HANDLED;
@@ -369,12 +413,16 @@ static int ws16c48_irq_init_hw(struct gpio_chip *gc)
{
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(gc);
- /* Disable IRQ by default */
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(0, ws16c48gpio->base + 8);
- iowrite8(0, ws16c48gpio->base + 9);
- iowrite8(0, ws16c48gpio->base + 10);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
+
+ /* Disable interrupts for all lines */
+ iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[0]);
+ iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[1]);
+ iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[2]);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
return 0;
}
@@ -396,8 +444,8 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- ws16c48gpio->base = devm_ioport_map(dev, base[id], WS16C48_EXTENT);
- if (!ws16c48gpio->base)
+ ws16c48gpio->reg = devm_ioport_map(dev, base[id], WS16C48_EXTENT);
+ if (!ws16c48gpio->reg)
return -ENOMEM;
ws16c48gpio->chip.label = name;
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index 43ca52fa6f9a..fd88500399c6 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -281,11 +281,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
static int iproc_gpio_remove(struct platform_device *pdev)
{
- struct iproc_gpio_chip *chip;
-
- chip = platform_get_drvdata(pdev);
- if (!chip)
- return -ENODEV;
+ struct iproc_gpio_chip *chip = platform_get_drvdata(pdev);
if (chip->intr) {
u32 val;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index b6d3a57e27ed..2fc6b6ff7f16 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
map[index] &= ~(0xFFFFFFFFul << offset);
- map[index] |= v << offset;
+ map[index] |= (unsigned long)v << offset;
}
static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
@@ -117,12 +117,14 @@ static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
static void xgpio_read_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+
xgpio_set_value32(a, bit, xgpio_readreg(addr));
}
static void xgpio_write_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+
xgpio_writereg(addr, xgpio_get_value32(a, bit));
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c2523ac26fac..9be1376f9a62 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -687,6 +687,9 @@ int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
case ACPI_PIN_CONFIG_PULLDOWN:
*lookupflags |= GPIO_PULL_DOWN;
break;
+ case ACPI_PIN_CONFIG_NOPULL:
+ *lookupflags |= GPIO_PULL_DISABLE;
+ break;
default:
break;
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 0c9a63becfef..f8041d4898d1 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -421,6 +421,10 @@ out_free_lh:
* @work: the worker that implements software debouncing
* @sw_debounced: flag indicating if the software debouncer is active
* @level: the current debounced physical level of the line
+ * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
+ * @raw_level: the line level at the time of event
+ * @total_discard_seq: the running counter of the discarded events
+ * @last_seqno: the last sequence number before debounce period expires
*/
struct line {
struct gpio_desc *desc;
@@ -430,12 +434,15 @@ struct line {
struct linereq *req;
unsigned int irq;
/*
- * eflags is set by edge_detector_setup(), edge_detector_stop() and
- * edge_detector_update(), which are themselves mutually exclusive,
- * and is accessed by edge_irq_thread() and debounce_work_func(),
- * which can both live with a slightly stale value.
+ * The flags for the active edge detector configuration.
+ *
+ * edflags is set by linereq_create(), linereq_free(), and
+ * linereq_set_config_unlocked(), which are themselves mutually
+ * exclusive, and is accessed by edge_irq_thread(),
+ * process_hw_ts_thread() and debounce_work_func(),
+ * which can all live with a slightly stale value.
*/
- u64 eflags;
+ u64 edflags;
/*
* timestamp_ns and req_seqno are accessed only by
* edge_irq_handler() and edge_irq_thread(), which are themselves
@@ -465,9 +472,7 @@ struct line {
* stale value.
*/
unsigned int level;
- /*
- * -- hte specific fields --
- */
+#ifdef CONFIG_HTE
struct hte_ts_desc hdesc;
/*
* HTE provider sets line level at the time of event. The valid
@@ -484,6 +489,7 @@ struct line {
* last sequence number before debounce period expires.
*/
u32 last_seqno;
+#endif /* CONFIG_HTE */
};
/**
@@ -541,6 +547,12 @@ struct linereq {
GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
GPIO_V2_LINE_BIAS_FLAGS)
+/* subset of flags relevant for edge detector configuration */
+#define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
+ (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
+ GPIO_V2_LINE_EDGE_FLAGS)
+
static void linereq_put_event(struct linereq *lr,
struct gpio_v2_line_event *le)
{
@@ -563,19 +575,28 @@ static u64 line_event_timestamp(struct line *line)
{
if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
- else if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+ else if (IS_ENABLED(CONFIG_HTE) &&
+ test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
return line->timestamp_ns;
return ktime_get_ns();
}
+static u32 line_event_id(int level)
+{
+ return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
+ GPIO_V2_LINE_EVENT_FALLING_EDGE;
+}
+
+#ifdef CONFIG_HTE
+
static enum hte_return process_hw_ts_thread(void *p)
{
struct line *line;
struct linereq *lr;
struct gpio_v2_line_event le;
+ u64 edflags;
int level;
- u64 eflags;
if (!p)
return HTE_CB_HANDLED;
@@ -586,29 +607,26 @@ static enum hte_return process_hw_ts_thread(void *p)
memset(&le, 0, sizeof(le));
le.timestamp_ns = line->timestamp_ns;
- eflags = READ_ONCE(line->eflags);
+ edflags = READ_ONCE(line->edflags);
- if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
- if (line->raw_level >= 0) {
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
- level = !line->raw_level;
- else
- level = line->raw_level;
- } else {
- level = gpiod_get_value_cansleep(line->desc);
- }
+ switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
+ case GPIO_V2_LINE_FLAG_EDGE_BOTH:
+ level = (line->raw_level >= 0) ?
+ line->raw_level :
+ gpiod_get_raw_value_cansleep(line->desc);
- if (level)
- le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- else
- le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
- /* Emit low-to-high event */
+ if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
+ level = !level;
+
+ le.id = line_event_id(level);
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_RISING:
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
- /* Emit high-to-low event */
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_FALLING:
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else {
+ break;
+ default:
return HTE_CB_HANDLED;
}
le.line_seqno = line->line_seqno;
@@ -655,12 +673,47 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
return HTE_CB_HANDLED;
}
+static int hte_edge_setup(struct line *line, u64 eflags)
+{
+ int ret;
+ unsigned long flags = 0;
+ struct hte_ts_desc *hdesc = &line->hdesc;
+
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_FALLING_EDGE_TS :
+ HTE_RISING_EDGE_TS;
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_RISING_EDGE_TS :
+ HTE_FALLING_EDGE_TS;
+
+ line->total_discard_seq = 0;
+
+ hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
+ line->desc);
+
+ ret = hte_ts_get(NULL, hdesc, 0);
+ if (ret)
+ return ret;
+
+ return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
+ line);
+}
+
+#else
+
+static int hte_edge_setup(struct line *line, u64 eflags)
+{
+ return 0;
+}
+#endif /* CONFIG_HTE */
+
static irqreturn_t edge_irq_thread(int irq, void *p)
{
struct line *line = p;
struct linereq *lr = line->req;
struct gpio_v2_line_event le;
- u64 eflags;
/* Do not leak kernel stack to userspace */
memset(&le, 0, sizeof(le));
@@ -679,23 +732,17 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
}
line->timestamp_ns = 0;
- eflags = READ_ONCE(line->eflags);
- if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
- int level = gpiod_get_value_cansleep(line->desc);
-
- if (level)
- /* Emit low-to-high event */
- le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- else
- /* Emit high-to-low event */
- le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
- /* Emit low-to-high event */
+ switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
+ case GPIO_V2_LINE_FLAG_EDGE_BOTH:
+ le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_RISING:
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
- /* Emit high-to-low event */
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_FALLING:
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else {
+ break;
+ default:
return IRQ_NONE;
}
line->line_seqno++;
@@ -760,16 +807,16 @@ static void debounce_work_func(struct work_struct *work)
struct gpio_v2_line_event le;
struct line *line = container_of(work, struct line, work.work);
struct linereq *lr;
- int level, diff_seqno;
- u64 eflags;
+ u64 eflags, edflags = READ_ONCE(line->edflags);
+ int level = -1;
+#ifdef CONFIG_HTE
+ int diff_seqno;
- if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+ if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
level = line->raw_level;
- if (level < 0)
- level = gpiod_get_raw_value_cansleep(line->desc);
- } else {
+#endif
+ if (level < 0)
level = gpiod_get_raw_value_cansleep(line->desc);
- }
if (level < 0) {
pr_debug_ratelimited("debouncer failed to read line value\n");
return;
@@ -781,12 +828,12 @@ static void debounce_work_func(struct work_struct *work)
WRITE_ONCE(line->level, level);
/* -- edge detection -- */
- eflags = READ_ONCE(line->eflags);
+ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (!eflags)
return;
/* switch from physical level to logical - if they differ */
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
level = !level;
/* ignore edges that are not being monitored */
@@ -800,7 +847,8 @@ static void debounce_work_func(struct work_struct *work)
lr = line->req;
le.timestamp_ns = line_event_timestamp(line);
le.offset = gpio_chip_hwgpio(line->desc);
- if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+#ifdef CONFIG_HTE
+ if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
/* discard events except the last one */
line->total_discard_seq -= 1;
diff_seqno = line->last_seqno - line->total_discard_seq -
@@ -809,51 +857,21 @@ static void debounce_work_func(struct work_struct *work)
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ?
le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
- } else {
+ } else
+#endif /* CONFIG_HTE */
+ {
line->line_seqno++;
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ?
le.line_seqno : atomic_inc_return(&lr->seqno);
}
- if (level)
- /* Emit low-to-high event */
- le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- else
- /* Emit high-to-low event */
- le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ le.id = line_event_id(level);
linereq_put_event(lr, &le);
}
-static int hte_edge_setup(struct line *line, u64 eflags)
-{
- int ret;
- unsigned long flags = 0;
- struct hte_ts_desc *hdesc = &line->hdesc;
-
- if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
- HTE_FALLING_EDGE_TS : HTE_RISING_EDGE_TS;
- if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
- HTE_RISING_EDGE_TS : HTE_FALLING_EDGE_TS;
-
- line->total_discard_seq = 0;
-
- hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags,
- NULL, line->desc);
-
- ret = hte_ts_get(NULL, hdesc, 0);
- if (ret)
- return ret;
-
- return hte_request_ts_ns(hdesc, process_hw_ts,
- process_hw_ts_thread, line);
-}
-
-static int debounce_setup(struct line *line,
- unsigned int debounce_period_us, bool hte_req)
+static int debounce_setup(struct line *line, unsigned int debounce_period_us)
{
unsigned long irqflags;
int ret, level, irq;
@@ -873,7 +891,8 @@ static int debounce_setup(struct line *line,
if (level < 0)
return level;
- if (!hte_req) {
+ if (!(IS_ENABLED(CONFIG_HTE) &&
+ test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
@@ -885,9 +904,7 @@ static int debounce_setup(struct line *line,
return ret;
line->irq = irq;
} else {
- ret = hte_edge_setup(line,
- GPIO_V2_LINE_FLAG_EDGE_RISING |
- GPIO_V2_LINE_FLAG_EDGE_FALLING);
+ ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
if (ret)
return ret;
}
@@ -926,19 +943,21 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
return 0;
}
-static void edge_detector_stop(struct line *line, bool hte_en)
+static void edge_detector_stop(struct line *line)
{
- if (line->irq && !hte_en) {
+ if (line->irq) {
free_irq(line->irq, line);
line->irq = 0;
}
- if (hte_en)
+#ifdef CONFIG_HTE
+ if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
hte_ts_put(&line->hdesc);
+#endif
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
- WRITE_ONCE(line->eflags, 0);
+ WRITE_ONCE(line->edflags, 0);
if (line->desc)
WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
@@ -946,23 +965,23 @@ static void edge_detector_stop(struct line *line, bool hte_en)
static int edge_detector_setup(struct line *line,
struct gpio_v2_line_config *lc,
- unsigned int line_idx,
- u64 eflags, bool hte_req)
+ unsigned int line_idx, u64 edflags)
{
u32 debounce_period_us;
unsigned long irqflags = 0;
+ u64 eflags;
int irq, ret;
+ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (eflags && !kfifo_initialized(&line->req->events)) {
ret = kfifo_alloc(&line->req->events,
line->req->event_buffer_size, GFP_KERNEL);
if (ret)
return ret;
}
- WRITE_ONCE(line->eflags, eflags);
if (gpio_v2_line_config_debounced(lc, line_idx)) {
debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
- ret = debounce_setup(line, debounce_period_us, hte_req);
+ ret = debounce_setup(line, debounce_period_us);
if (ret)
return ret;
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
@@ -972,8 +991,9 @@ static int edge_detector_setup(struct line *line,
if (!eflags || READ_ONCE(line->sw_debounced))
return 0;
- if (hte_req)
- return hte_edge_setup(line, eflags);
+ if (IS_ENABLED(CONFIG_HTE) &&
+ (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
+ return hte_edge_setup(line, edflags);
irq = gpiod_to_irq(line->desc);
if (irq < 0)
@@ -999,35 +1019,29 @@ static int edge_detector_setup(struct line *line,
static int edge_detector_update(struct line *line,
struct gpio_v2_line_config *lc,
- unsigned int line_idx,
- u64 flags, bool polarity_change,
- bool prev_hte_flag)
+ unsigned int line_idx, u64 edflags)
{
- u64 eflags = flags & GPIO_V2_LINE_EDGE_FLAGS;
+ u64 active_edflags = READ_ONCE(line->edflags);
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period(lc, line_idx);
- bool hte_change = (prev_hte_flag !=
- ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) != 0));
- if ((READ_ONCE(line->eflags) == eflags) && !polarity_change &&
- (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)
- && !hte_change)
+ if ((active_edflags == edflags) &&
+ (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
return 0;
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
- WRITE_ONCE(line->eflags, eflags);
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
return 0;
}
/* reconfiguring edge detection or sw debounce being disabled */
- if ((line->irq && !READ_ONCE(line->sw_debounced)) || prev_hte_flag ||
+ if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
+ (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
(!debounce_period_us && READ_ONCE(line->sw_debounced)))
- edge_detector_stop(line, prev_hte_flag);
+ edge_detector_stop(line);
- return edge_detector_setup(line, lc, line_idx, eflags,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+ return edge_detector_setup(line, lc, line_idx, edflags);
}
static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
@@ -1063,6 +1077,11 @@ static int gpio_v2_line_flags_validate(u64 flags)
/* Return an error if an unknown flag is set */
if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
return -EINVAL;
+
+ if (!IS_ENABLED(CONFIG_HTE) &&
+ (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
+ return -EOPNOTSUPP;
+
/*
* Do not allow both INPUT and OUTPUT flags to be set as they are
* contradictory.
@@ -1072,7 +1091,8 @@ static int gpio_v2_line_flags_validate(u64 flags)
return -EINVAL;
/* Only allow one event clock source */
- if ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
+ if (IS_ENABLED(CONFIG_HTE) &&
+ (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
(flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
return -EINVAL;
@@ -1296,22 +1316,17 @@ static long linereq_set_config_unlocked(struct linereq *lr,
struct gpio_v2_line_config *lc)
{
struct gpio_desc *desc;
+ struct line *line;
unsigned int i;
- u64 flags;
- bool polarity_change;
- bool prev_hte_flag;
+ u64 flags, edflags;
int ret;
for (i = 0; i < lr->num_lines; i++) {
+ line = &lr->lines[i];
desc = lr->lines[i].desc;
flags = gpio_v2_line_config_flags(lc, i);
- polarity_change =
- (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) !=
- ((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0));
-
- prev_hte_flag = !!test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags);
-
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -1319,7 +1334,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
- edge_detector_stop(&lr->lines[i], prev_hte_flag);
+ edge_detector_stop(line);
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
@@ -1328,12 +1343,13 @@ static long linereq_set_config_unlocked(struct linereq *lr,
if (ret)
return ret;
- ret = edge_detector_update(&lr->lines[i], lc, i,
- flags, polarity_change, prev_hte_flag);
+ ret = edge_detector_update(line, lc, i, edflags);
if (ret)
return ret;
}
+ WRITE_ONCE(line->edflags, edflags);
+
blocking_notifier_call_chain(&desc->gdev->notifier,
GPIO_V2_LINE_CHANGED_CONFIG,
desc);
@@ -1460,15 +1476,12 @@ static ssize_t linereq_read(struct file *file,
static void linereq_free(struct linereq *lr)
{
unsigned int i;
- bool hte = false;
for (i = 0; i < lr->num_lines; i++) {
- if (lr->lines[i].desc)
- hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
- &lr->lines[i].desc->flags);
- edge_detector_stop(&lr->lines[i], hte);
- if (lr->lines[i].desc)
+ if (lr->lines[i].desc) {
+ edge_detector_stop(&lr->lines[i]);
gpiod_free(lr->lines[i].desc);
+ }
}
kfifo_free(&lr->events);
kfree(lr->label);
@@ -1502,7 +1515,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
struct gpio_v2_line_config *lc;
struct linereq *lr;
struct file *file;
- u64 flags;
+ u64 flags, edflags;
unsigned int i;
int fd, ret;
@@ -1576,6 +1589,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if (ret < 0)
goto out_free_linereq;
+ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -1592,12 +1606,13 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
goto out_free_linereq;
ret = edge_detector_setup(&lr->lines[i], lc, i,
- flags & GPIO_V2_LINE_EDGE_FLAGS,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+ edflags);
if (ret)
goto out_free_linereq;
}
+ lr->lines[i].edflags = edflags;
+
blocking_notifier_call_chain(&desc->gdev->notifier,
GPIO_V2_LINE_CHANGED_REQUESTED, desc);
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 79da85d17b71..16a696249229 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -375,9 +375,6 @@ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
-
-
-
static void devm_gpio_release(struct device *dev, void *res)
{
unsigned *gpio = res;
@@ -385,13 +382,6 @@ static void devm_gpio_release(struct device *dev, void *res)
gpio_free(*gpio);
}
-static int devm_gpio_match(struct device *dev, void *res, void *data)
-{
- unsigned *this = res, *gpio = data;
-
- return *this == *gpio;
-}
-
/**
* devm_gpio_request - request a GPIO for a managed device
* @dev: device to request the GPIO for
@@ -402,11 +392,7 @@ static int devm_gpio_match(struct device *dev, void *res, void *data)
* same arguments and performs the same function as
* gpio_request(). GPIOs requested with this function will be
* automatically freed on driver detach.
- *
- * If an GPIO allocated with this function needs to be freed
- * separately, devm_gpio_free() must be used.
*/
-
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
{
unsigned *dr;
@@ -459,24 +445,6 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
}
EXPORT_SYMBOL_GPL(devm_gpio_request_one);
-/**
- * devm_gpio_free - free a GPIO
- * @dev: device to free GPIO for
- * @gpio: GPIO to free
- *
- * Except for the extra @dev argument, this function takes the
- * same arguments and performs the same function as gpio_free().
- * This function instead of gpio_free() should be used to manually
- * free GPIOs allocated with devm_gpio_request().
- */
-void devm_gpio_free(struct device *dev, unsigned int gpio)
-{
-
- WARN_ON(devres_release(dev, devm_gpio_release, devm_gpio_match,
- &gpio));
-}
-EXPORT_SYMBOL_GPL(devm_gpio_free);
-
static void devm_gpio_chip_release(void *data)
{
struct gpio_chip *gc = data;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 3d6c3ffd5576..a037b50bef33 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -354,6 +354,9 @@ struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
if (flags & OF_GPIO_PULL_DOWN)
lflags |= GPIO_PULL_DOWN;
+ if (flags & OF_GPIO_PULL_DISABLE)
+ lflags |= GPIO_PULL_DISABLE;
+
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
@@ -556,6 +559,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_PULL_UP;
if (of_flags & OF_GPIO_PULL_DOWN)
*flags |= GPIO_PULL_DOWN;
+ if (of_flags & OF_GPIO_PULL_DISABLE)
+ *flags |= GPIO_PULL_DISABLE;
return desc;
}
@@ -621,6 +626,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*lflags |= GPIO_PULL_UP;
if (xlate_flags & OF_GPIO_PULL_DOWN)
*lflags |= GPIO_PULL_DOWN;
+ if (xlate_flags & OF_GPIO_PULL_DISABLE)
+ *lflags |= GPIO_PULL_DISABLE;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
@@ -720,7 +727,7 @@ static void of_gpiochip_remove_hog(struct gpio_chip *chip,
static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
{
- return chip->gpiodev->dev.of_node == data;
+ return device_match_of_node(&chip->gpiodev->dev, data);
}
static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
@@ -860,7 +867,8 @@ int of_mm_gpiochip_add_data(struct device_node *np,
if (mm_gc->save_regs)
mm_gc->save_regs(mm_gc);
- mm_gc->gc.of_node = np;
+ of_node_put(mm_gc->gc.of_node);
+ mm_gc->gc.of_node = of_node_get(np);
ret = gpiochip_add_data(gc, data);
if (ret)
@@ -868,6 +876,7 @@ int of_mm_gpiochip_add_data(struct device_node *np,
return 0;
err2:
+ of_node_put(np);
iounmap(mm_gc->regs);
err1:
kfree(gc->label);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9535f48e18d1..cc9c0a12259e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1107,7 +1107,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
struct irq_fwspec *fwspec = data;
- void *parent_arg;
+ union gpio_irq_fwspec gpio_parent_fwspec = {};
unsigned int parent_hwirq;
unsigned int parent_type;
struct gpio_irq_chip *girq = &gc->irq;
@@ -1147,14 +1147,15 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
irq_set_probe(irq);
/* This parent only handles asserted level IRQs */
- parent_arg = girq->populate_parent_alloc_arg(gc, parent_hwirq, parent_type);
- if (!parent_arg)
- return -ENOMEM;
+ ret = girq->populate_parent_alloc_arg(gc, &gpio_parent_fwspec,
+ parent_hwirq, parent_type);
+ if (ret)
+ return ret;
chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
irq, parent_hwirq);
irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
- ret = irq_domain_alloc_irqs_parent(d, irq, 1, parent_arg);
+ ret = irq_domain_alloc_irqs_parent(d, irq, 1, &gpio_parent_fwspec);
/*
* If the parent irqdomain is msi, the interrupts have already
* been allocated, so the EEXIST is good.
@@ -1166,7 +1167,6 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
"failed to allocate parent hwirq %d for hwirq %lu\n",
parent_hwirq, hwirq);
- kfree(parent_arg);
return ret;
}
@@ -1181,15 +1181,18 @@ static void gpiochip_hierarchy_setup_domain_ops(struct irq_domain_ops *ops)
ops->activate = gpiochip_irq_domain_activate;
ops->deactivate = gpiochip_irq_domain_deactivate;
ops->alloc = gpiochip_hierarchy_irq_domain_alloc;
- ops->free = irq_domain_free_irqs_common;
/*
- * We only allow overriding the translate() function for
+ * We only allow overriding the translate() and free() functions for
* hierarchical chips, and this should only be done if the user
- * really need something other than 1:1 translation.
+ * really need something other than 1:1 translation for translate()
+ * callback and free if user wants to free up any resources which
+ * were allocated during callbacks, for example populate_parent_alloc_arg.
*/
if (!ops->translate)
ops->translate = gpiochip_hierarchy_irq_domain_translate;
+ if (!ops->free)
+ ops->free = irq_domain_free_irqs_common;
}
static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
@@ -1230,34 +1233,28 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
return !!gc->irq.parent_domain;
}
-void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type)
+int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
{
- struct irq_fwspec *fwspec;
-
- fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
- if (!fwspec)
- return NULL;
+ struct irq_fwspec *fwspec = &gfwspec->fwspec;
fwspec->fwnode = gc->irq.parent_domain->fwnode;
fwspec->param_count = 2;
fwspec->param[0] = parent_hwirq;
fwspec->param[1] = parent_type;
- return fwspec;
+ return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell);
-void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type)
+int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
{
- struct irq_fwspec *fwspec;
-
- fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
- if (!fwspec)
- return NULL;
+ struct irq_fwspec *fwspec = &gfwspec->fwspec;
fwspec->fwnode = gc->irq.parent_domain->fwnode;
fwspec->param_count = 4;
@@ -1266,7 +1263,7 @@ void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
fwspec->param[2] = 0;
fwspec->param[3] = parent_type;
- return fwspec;
+ return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell);
@@ -3945,9 +3942,11 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if ((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) {
+ if (((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) ||
+ ((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DISABLE)) ||
+ ((lflags & GPIO_PULL_DOWN) && (lflags & GPIO_PULL_DISABLE))) {
gpiod_err(desc,
- "both pull-up and pull-down enabled, invalid configuration\n");
+ "multiple pull-up, pull-down or pull-disable enabled, invalid configuration\n");
return -EINVAL;
}
@@ -3955,6 +3954,8 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
set_bit(FLAG_PULL_UP, &desc->flags);
else if (lflags & GPIO_PULL_DOWN)
set_bit(FLAG_PULL_DOWN, &desc->flags);
+ else if (lflags & GPIO_PULL_DISABLE)
+ set_bit(FLAG_BIAS_DISABLE, &desc->flags);
ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
if (ret < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7d0cd15b5ef..6ad39cf71bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -75,7 +75,7 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
- nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o
+ sienna_cichlid.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o
# add DF block
amdgpu-y += \
@@ -89,7 +89,7 @@ amdgpu-y += \
gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \
gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \
mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o mmhub_v3_0_2.o gmc_v11_0.o \
- mmhub_v3_0_1.o
+ mmhub_v3_0_1.o gfxhub_v3_0_3.o
# add UMC block
amdgpu-y += \
@@ -115,7 +115,8 @@ amdgpu-y += \
psp_v11_0.o \
psp_v11_0_8.o \
psp_v12_0.o \
- psp_v13_0.o
+ psp_v13_0.o \
+ psp_v13_0_4.o
# add DCE block
amdgpu-y += \
@@ -133,7 +134,8 @@ amdgpu-y += \
gfx_v9_4_2.o \
gfx_v10_0.o \
imu_v11_0.o \
- gfx_v11_0.o
+ gfx_v11_0.o \
+ imu_v11_0_3.o
# add async DMA block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index c6cc493a5486..2b97b8a96fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -148,30 +148,22 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r = 0;
dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
/* Wrong context, return error */
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
}
@@ -179,7 +171,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
* Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
* them together so that they can be completed asynchronously on multiple nodes
*/
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
if (!queue_work(system_unbound_wq,
@@ -197,7 +189,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
/* For XGMI wait for all resets to complete before proceed */
if (!r) {
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->reset_cntl->reset_work);
r = tmp_adev->asic_reset_res;
@@ -207,7 +199,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
}
}
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
}
@@ -339,10 +331,13 @@ static int
aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r;
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
@@ -350,19 +345,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
r = aldebaran_mode2_restore_ip(tmp_adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b075845a5328..79bb6fd83094 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -274,6 +274,9 @@ extern int amdgpu_vcnfw_log;
#define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14)
+#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0)
+#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1)
+
/* max cursor sizes (in pixels) */
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
@@ -317,7 +320,7 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
@@ -1020,7 +1023,7 @@ struct amdgpu_device {
bool psp_sysfs_en;
/* Chip product information */
- char product_number[16];
+ char product_number[20];
char product_name[AMDGPU_PRODUCT_NAME_LEN];
char serial[20];
@@ -1060,6 +1063,9 @@ struct amdgpu_device {
uint32_t scpm_status;
struct work_struct reset_work;
+
+ uint32_t amdgpu_reset_level_mask;
+ bool job_hang;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index bcc7ee02e0fc..6d72355ac492 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -390,14 +390,6 @@ static int acp_hw_init(void *handle)
i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
- i2s_pdata[3].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
- switch (adev->asic_type) {
- case CHIP_STONEY:
- i2s_pdata[3].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
- break;
- default:
- break;
- }
adev->acp.acp_res[0].name = "acp2x_dma";
adev->acp.acp_res[0].flags = IORESOURCE_MEM;
adev->acp.acp_res[0].start = acp_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 130060834b4e..55402d238919 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1066,6 +1066,12 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
return false;
+ /*
+ * If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
+ * risky to do any special firmware-related preparations for entering
+ * S0ix even though the system is suspending to idle, so return false
+ * in that case.
+ */
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
dev_warn_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5e53a5293935..091415a4abf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -135,6 +135,7 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 73bf8b5f2aa9..647220a8762d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
uint64_t vram_used;
+ uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
};
@@ -172,6 +173,9 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm,
struct svm_range_bo *svm_bo);
+#if defined(CONFIG_DEBUG_FS)
+int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
+#endif
#if IS_ENABLED(CONFIG_HSA_AMD)
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
@@ -305,6 +309,10 @@ bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev);
+int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 alloc_flag);
+void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 alloc_flag);
#if IS_ENABLED(CONFIG_HSA_AMD)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 1d0dbff87d3f..469785d33791 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -159,11 +159,14 @@ static void amdkfd_fence_release(struct dma_fence *f)
}
/**
- * amdkfd_fence_check_mm - Check if @mm is same as that of the fence @f
- * if same return TRUE else return FALSE.
+ * amdkfd_fence_check_mm - Check whether to prevent eviction of @f by @mm
*
* @f: [IN] fence
* @mm: [IN] mm that needs to be verified
+ *
+ * Check if @mm is same as that of the fence @f, if same return TRUE else
+ * return FALSE.
+ * For svm bo, which support vram overcommitment, always return FALSE.
*/
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
{
@@ -171,7 +174,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
if (!fence)
return false;
- else if (fence->mm == mm)
+ else if (fence->mm == mm && !fence->svm_bo)
return true;
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 08997092e7f7..2170db83e41d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -40,10 +40,10 @@
#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
/*
- * Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
+ * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
* BO chunk
*/
-#define VRAM_ALLOCATION_ALIGN (1 << 21)
+#define VRAM_AVAILABLITY_ALIGN (1 << 21)
/* Impose limit on how much memory KFD can use */
static struct {
@@ -129,7 +129,7 @@ void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
*
* Return: returns -ENOMEM in case of error, ZERO otherwise
*/
-static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag)
{
uint64_t reserved_for_pt =
@@ -149,7 +149,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
* to avoid fragmentation caused by 4K allocations in the tail
* 2M BO chunk.
*/
- vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ vram_needed = size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = size;
} else if (!(alloc_flag &
@@ -169,7 +169,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
- (adev->kfd.vram_used + vram_needed >
+ (adev && adev->kfd.vram_used + vram_needed >
adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
reserved_for_pt)) {
@@ -180,7 +180,12 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
/* Update memory accounting by decreasing available system
* memory, TTM memory and GPU memory as computed above
*/
- adev->kfd.vram_used += vram_needed;
+ WARN_ONCE(vram_needed && !adev,
+ "adev reference can't be null when vram is used");
+ if (adev) {
+ adev->kfd.vram_used += vram_needed;
+ adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ }
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
@@ -189,7 +194,7 @@ release:
return ret;
}
-static void unreserve_mem_limit(struct amdgpu_device *adev,
+void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag)
{
spin_lock(&kfd_mem_limit.mem_limit_lock);
@@ -198,7 +203,12 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
- adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ WARN_ONCE(!adev,
+ "adev reference can't be null when alloc mem flags vram is set");
+ if (adev) {
+ adev->kfd.vram_used -= size;
+ adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+ }
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= size;
} else if (!(alloc_flag &
@@ -207,8 +217,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
goto release;
}
-
- WARN_ONCE(adev->kfd.vram_used < 0,
+ WARN_ONCE(adev && adev->kfd.vram_used < 0,
"KFD VRAM memory accounting unbalanced");
WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
"KFD TTM memory accounting unbalanced");
@@ -225,7 +234,7 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
u32 alloc_flags = bo->kfd_bo->alloc_flags;
u64 size = amdgpu_bo_size(bo);
- unreserve_mem_limit(adev, size, alloc_flags);
+ amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags);
kfree(bo->kfd_bo);
}
@@ -773,7 +782,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
*bo = gem_to_amdgpu_bo(gobj);
(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
- (*bo)->parent = amdgpu_bo_ref(mem->bo);
return 0;
}
@@ -1604,15 +1612,14 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t available;
-
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
- - adev->kfd.vram_used
+ - adev->kfd.vram_used_aligned
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
spin_unlock(&kfd_mem_limit.mem_limit_lock);
- return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
+ return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
@@ -1721,7 +1728,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
- pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
+ pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
ret = init_user_pages(*mem, user_addr, criu_resume);
if (ret)
goto allocate_init_user_pages_failed;
@@ -1749,7 +1756,7 @@ err_node_allow:
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
- unreserve_mem_limit(adev, size, flags);
+ amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
if (gobj)
@@ -1770,6 +1777,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
{
struct amdkfd_process_info *process_info = mem->process_info;
unsigned long bo_size = mem->bo->tbo.base.size;
+ bool use_release_notifier = (mem->bo->kfd_bo == mem);
struct kfd_mem_attachment *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
@@ -1861,6 +1869,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
*/
drm_gem_object_put(&mem->bo->tbo.base);
+ /*
+ * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
+ * explicitly free it here.
+ */
+ if (!use_release_notifier)
+ kfree(mem);
+
return ret;
}
@@ -2883,3 +2898,22 @@ bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *
}
return false;
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
+{
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+ seq_printf(m, "System mem used %lldM out of %lluM\n",
+ (kfd_mem_limit.system_mem_used >> 20),
+ (kfd_mem_limit.max_system_mem_limit >> 20));
+ seq_printf(m, "TTM mem used %lldM out of %lluM\n",
+ (kfd_mem_limit.ttm_mem_used >> 20),
+ (kfd_mem_limit.max_ttm_mem_limit >> 20));
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index fd8f3731758e..b81b77a9efa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -314,7 +314,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * mem_channel_width;
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d8f1335bc68f..b7bae833c804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -837,16 +837,12 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
}
r = amdgpu_vm_handle_moved(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index afe22f83d4a6..3ea48385fab3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -402,7 +402,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
}
}
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
drm_dev_exit(idx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index e2eec985adb3..6066aebf491c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1043,6 +1043,157 @@ err:
}
/**
+ * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * Read the last residency value logged. It doesn't auto update, one needs to
+ * stop logging before getting the current value.
+ */
+static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ r = amdgpu_get_gfx_off_residency(adev, &value);
+ if (r)
+ goto out;
+
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop
+ */
+static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u32 value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ amdgpu_set_gfx_off_residency(adev, value ? true : false);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+
+/**
+ * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ */
+static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u64 value = 0;
+
+ r = amdgpu_get_gfx_off_entrycount(adev, &value);
+ if (r)
+ goto out;
+
+ r = put_user(value, (u64 *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+/**
* amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
*
* @f: open file handle
@@ -1249,6 +1400,19 @@ static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_count_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_residency_read,
+ .write = amdgpu_debugfs_gfxoff_residency_write,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs2_fops,
@@ -1261,6 +1425,8 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_gpr_fops,
&amdgpu_debugfs_gfxoff_fops,
&amdgpu_debugfs_gfxoff_status_fops,
+ &amdgpu_debugfs_gfxoff_count_fops,
+ &amdgpu_debugfs_gfxoff_residency_fops,
};
static const char *debugfs_regs_names[] = {
@@ -1275,6 +1441,8 @@ static const char *debugfs_regs_names[] = {
"amdgpu_gpr",
"amdgpu_gfxoff",
"amdgpu_gfxoff_status",
+ "amdgpu_gfxoff_count",
+ "amdgpu_gfxoff_residency",
};
/**
@@ -1705,7 +1873,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
char reg_offset[11];
- uint32_t *new, *tmp = NULL;
+ uint32_t *new = NULL, *tmp = NULL;
int ret, i = 0, len = 0;
do {
@@ -1747,7 +1915,8 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
ret = size;
error_free:
- kfree(tmp);
+ if (tmp != new)
+ kfree(tmp);
kfree(new);
return ret;
}
@@ -1785,6 +1954,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return PTR_ERR(ent);
}
+ debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask);
+
/* Register debugfs entries for amdgpu_ttm */
amdgpu_ttm_debugfs_init(adev);
amdgpu_debugfs_pm_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 041bd906449d..62b26f0e37b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2456,12 +2456,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!hive->reset_domain ||
!amdgpu_reset_get_reset_domain(hive->reset_domain)) {
r = -ENOENT;
+ amdgpu_put_xgmi_hive(hive);
goto init_failed;
}
/* Drop the early temporary reset domain we created for device */
amdgpu_reset_put_reset_domain(adev->reset_domain);
adev->reset_domain = hive->reset_domain;
+ amdgpu_put_xgmi_hive(hive);
}
}
@@ -3577,6 +3579,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
adev->gfx.gfx_off_req_count = 1;
+ adev->gfx.gfx_off_residency = 0;
+ adev->gfx.gfx_off_entrycount = 0;
adev->pm.ac_power = power_supply_is_system_supplied() > 0;
atomic_set(&adev->throttling_logging_enabled, 1);
@@ -3965,8 +3969,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_gart_dummy_page_fini(adev);
- if (drm_dev_is_unplugged(adev_to_drm(adev)))
- amdgpu_device_unmap_mmio(adev);
+ amdgpu_device_unmap_mmio(adev);
}
@@ -4413,8 +4416,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
retry:
amdgpu_amdkfd_pre_reset(adev);
- amdgpu_amdkfd_pre_reset(adev);
-
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
@@ -4509,14 +4510,15 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
*/
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
{
- if (!amdgpu_device_ip_check_soft_reset(adev)) {
- dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
- return false;
- }
if (amdgpu_gpu_recovery == 0)
goto disabled;
+ if (!amdgpu_device_ip_check_soft_reset(adev)) {
+ dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
+ return false;
+ }
+
if (amdgpu_sriov_vf(adev))
return true;
@@ -4641,7 +4643,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!need_full_reset)
need_full_reset = amdgpu_device_ip_need_full_reset(adev);
- if (!need_full_reset) {
+ if (!need_full_reset && amdgpu_gpu_recovery) {
amdgpu_device_ip_pre_soft_reset(adev);
r = amdgpu_device_ip_soft_reset(adev);
amdgpu_device_ip_post_soft_reset(adev);
@@ -4668,7 +4670,6 @@ static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
int i;
lockdep_assert_held(&adev->reset_domain->sem);
- dump_stack();
for (i = 0; i < adev->num_regs; i++) {
adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
@@ -4743,6 +4744,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_reset_reg_dumps(tmp_adev);
+
+ reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
@@ -5038,6 +5041,7 @@ static void amdgpu_device_recheck_guilty_jobs(
/* set guilty */
drm_sched_increase_karma(s_job);
+ amdgpu_reset_prepare_hwcontext(adev, reset_context);
retry:
/* do hw reset */
if (amdgpu_sriov_vf(adev)) {
@@ -5147,6 +5151,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->job = job;
reset_context->hive = hive;
+
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
@@ -5266,8 +5271,11 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_ras_resume(adev);
} else {
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
- if (r && r == -EAGAIN)
+ if (r && r == -EAGAIN) {
+ set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
+ adev->asic_reset_res = 0;
goto retry;
+ }
}
skip_hw_reset:
@@ -5300,6 +5308,9 @@ skip_hw_reset:
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
}
+ if (adev->enable_mes)
+ amdgpu_mes_self_test(tmp_adev);
+
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
}
@@ -5520,7 +5531,8 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1;
- bool p2p_access = !(pci_p2pdma_distance_many(adev->pdev,
+ bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
+ !(pci_p2pdma_distance_many(adev->pdev,
&peer_adev->dev, 1, true) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
@@ -5695,6 +5707,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(adev, &reset_context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 242d1847c4aa..9fa2a5ceb77d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1506,6 +1506,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
break;
default:
@@ -1549,6 +1550,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
break;
default:
@@ -1630,12 +1632,15 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
- case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
+ case IP_VERSION(13, 0, 4):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
@@ -1680,6 +1685,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
default:
@@ -1778,6 +1784,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
break;
default:
@@ -1821,6 +1828,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
default:
@@ -1901,7 +1909,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 0, 2):
case IP_VERSION(4, 0, 4):
amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
break;
default:
dev_err(adev->dev,
@@ -1938,6 +1947,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
adev->enable_mes = true;
adev->enable_mes_kiq = true;
@@ -2163,6 +2173,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->family = AMDGPU_FAMILY_GC_11_0_0;
break;
case IP_VERSION(11, 0, 1):
@@ -2232,7 +2243,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(4, 3, 0):
case IP_VERSION(4, 3, 1):
- adev->nbio.funcs = &nbio_v4_3_funcs;
+ if (amdgpu_sriov_vf(adev))
+ adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
+ else
+ adev->nbio.funcs = &nbio_v4_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
break;
case IP_VERSION(7, 7, 0):
@@ -2330,6 +2344,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
adev->lsdma.funcs = &lsdma_v6_0_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 429fcdf28836..728a0933ea6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2181,8 +2181,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- drm_dev_unplug(dev);
-
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
@@ -2190,6 +2188,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
amdgpu_driver_unload_kms(dev);
+ drm_dev_unplug(dev);
+
/*
* Flush any in flight DMA operations from device.
* Clear the Bus Master Enable bit and then wait on the PCIe Device
@@ -2563,8 +2563,11 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
- if (ret)
+ if (ret) {
+ if (amdgpu_device_supports_px(drm_dev))
+ pci_disable_device(pdev);
return ret;
+ }
if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index ecada5eadfe3..e325150879df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true;
case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603",
+ sizeof(atom_ctx->vbios_version))) {
+ if (strnstr(atom_ctx->vbios_version, "D603GLXE",
sizeof(atom_ctx->vbios_version)))
- return true;
- else
+ return false;
+ else
+ return true;
+ } else {
return false;
+ }
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 222d3d7ea076..ceb91469958a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -477,7 +477,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
- if (adev->gfx.kiq.ring.sched.ready)
+ if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
@@ -610,6 +610,45 @@ unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_set_residency_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_get_residency_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 23a696d38390..8abdf41d0f83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -332,10 +332,12 @@ struct amdgpu_gfx {
uint32_t srbm_soft_reset;
/* gfx off */
- bool gfx_off_state; /* true: enabled, false: disabled */
- struct mutex gfx_off_mutex;
- uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
- struct delayed_work gfx_off_delay_work;
+ bool gfx_off_state; /* true: enabled, false: disabled */
+ struct mutex gfx_off_mutex; /* mutex to change gfxoff state */
+ uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
+ struct delayed_work gfx_off_delay_work; /* async work to set gfx block off */
+ uint32_t gfx_off_residency; /* last logged residency */
+ uint64_t gfx_off_entrycount; /* count of times GPU has get into GFXOFF state */
/* pipe reservation */
struct mutex pipe_reserve_mutex;
@@ -407,6 +409,10 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value);
+int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *residency);
+int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value);
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
index beabab515836..c7b44aeb671b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -35,6 +35,9 @@ struct amdgpu_gfxhub_funcs {
void (*init)(struct amdgpu_device *adev);
int (*get_xgmi_info)(struct amdgpu_device *adev);
void (*utcl2_harvest)(struct amdgpu_device *adev);
+ void (*mode2_save_regs)(struct amdgpu_device *adev);
+ void (*mode2_restore_regs)(struct amdgpu_device *adev);
+ void (*halt)(struct amdgpu_device *adev);
};
struct amdgpu_gfxhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 008eaca27151..0305b660cd17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -264,6 +264,32 @@ struct amdgpu_gmc {
u64 mall_size;
/* number of UMC instances */
int num_umc;
+ /* mode2 save restore */
+ u64 VM_L2_CNTL;
+ u64 VM_L2_CNTL2;
+ u64 VM_DUMMY_PAGE_FAULT_CNTL;
+ u64 VM_DUMMY_PAGE_FAULT_ADDR_LO32;
+ u64 VM_DUMMY_PAGE_FAULT_ADDR_HI32;
+ u64 VM_L2_PROTECTION_FAULT_CNTL;
+ u64 VM_L2_PROTECTION_FAULT_CNTL2;
+ u64 VM_L2_PROTECTION_FAULT_MM_CNTL3;
+ u64 VM_L2_PROTECTION_FAULT_MM_CNTL4;
+ u64 VM_L2_PROTECTION_FAULT_ADDR_LO32;
+ u64 VM_L2_PROTECTION_FAULT_ADDR_HI32;
+ u64 VM_DEBUG;
+ u64 VM_L2_MM_GROUP_RT_CLASSES;
+ u64 VM_L2_BANK_SELECT_RESERVED_CID;
+ u64 VM_L2_BANK_SELECT_RESERVED_CID2;
+ u64 VM_L2_CACHE_PARITY_CNTL;
+ u64 VM_L2_IH_LOG_CNTL;
+ u64 VM_CONTEXT_CNTL[16];
+ u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[16];
+ u64 MC_VM_MX_L1_TLB_CNTL;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5071b96be982..1062b7ed74ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -49,6 +49,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
memset(&ti, 0, sizeof(struct amdgpu_task_info));
+ adev->job_hang = true;
if (amdgpu_gpu_recovery &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
@@ -71,6 +72,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
if (r)
@@ -82,6 +84,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
+ adev->job_hang = false;
drm_dev_exit(idx);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -159,7 +162,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
- dma_fence_put(&job->hw_fence);
+ if (!job->hw_fence.ops)
+ kfree(job);
+ else
+ dma_fence_put(&job->hw_fence);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
@@ -272,10 +278,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
/* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
-
- if (!rq)
- continue;
-
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index fe82b8b19a4e..0c546245793b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -181,6 +181,9 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
+ /* zero sdma_hqd_mask for non-existent engine */
+ else if (adev->sdma.num_instances == 1)
+ adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
else
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3ee363bfbac2..cfcaf890a6a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -37,6 +37,7 @@
#include "psp_v11_0_8.h"
#include "psp_v12_0.h"
#include "psp_v13_0.h"
+#include "psp_v13_0_4.h"
#include "amdgpu_ras.h"
#include "amdgpu_securedisplay.h"
@@ -137,6 +138,7 @@ static int psp_early_init(void *handle)
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
@@ -151,6 +153,10 @@ static int psp_early_init(void *handle)
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
+ case IP_VERSION(13, 0, 4):
+ psp_v13_0_4_set_psp_funcs(psp);
+ psp->autoload_supported = true;
+ break;
default:
return -EINVAL;
}
@@ -322,23 +328,32 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(9, 0, 0):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "vega10");
break;
case IP_VERSION(11, 0, 9):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "navi12");
break;
case IP_VERSION(11, 0, 7):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "sienna_cichlid");
break;
case IP_VERSION(13, 0, 2):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "aldebaran");
ret &= psp_init_ta_microcode(psp, "aldebaran");
break;
+ case IP_VERSION(13, 0, 0):
+ adev->virt.autoload_ucode_id = 0;
+ break;
+ case IP_VERSION(13, 0, 10):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ break;
default:
BUG();
break;
}
-
return ret;
}
@@ -481,11 +496,14 @@ static int psp_sw_fini(void *handle)
release_firmware(psp->ta_fw);
psp->ta_fw = NULL;
}
- if (adev->psp.cap_fw) {
+ if (psp->cap_fw) {
release_firmware(psp->cap_fw);
psp->cap_fw = NULL;
}
-
+ if (psp->toc_fw) {
+ release_firmware(psp->toc_fw);
+ psp->toc_fw = NULL;
+ }
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
psp_sysfs_fini(adev);
@@ -761,6 +779,7 @@ static bool psp_skip_tmr(struct psp_context *psp)
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 10):
return true;
default:
return false;
@@ -807,7 +826,7 @@ static int psp_tmr_unload(struct psp_context *psp)
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
psp_prep_tmr_unload_cmd_buf(psp, cmd);
- DRM_INFO("free PSP TMR buffer\n");
+ dev_info(psp->adev->dev, "free PSP TMR buffer\n");
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -1292,6 +1311,8 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
break;
}
+
+ amdgpu_put_xgmi_hive(hive);
}
int psp_xgmi_get_topology_info(struct psp_context *psp,
@@ -2394,7 +2415,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
- if (!ucode->fw)
+ if (!ucode->fw || !ucode->ucode_size)
return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
@@ -2404,20 +2425,7 @@ static bool fw_load_skip_check(struct psp_context *psp,
return true;
if (amdgpu_sriov_vf(psp->adev) &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
- /*skip ucode loading in SRIOV VF */
+ amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
return true;
if (psp->autoload_supported &&
@@ -2491,7 +2499,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
- AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
+ adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload_start(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
@@ -2634,6 +2642,9 @@ static int psp_hw_fini(void *handle)
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ psp_xgmi_terminate(psp);
}
psp_asd_terminate(psp);
@@ -3692,3 +3703,11 @@ const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
.rev = 0,
.funcs = &psp_ip_funcs,
};
+
+const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 13,
+ .minor = 0,
+ .rev = 4,
+ .funcs = &psp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 180634616b0f..c32b74bd970f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -439,6 +439,7 @@ extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block;
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, bool check_changed);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ff5361f5c2d4..ab9ba5a9c33d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1949,6 +1949,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 32c86a0b145c..9da5ead50c90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -23,6 +23,7 @@
#include "amdgpu_reset.h"
#include "aldebaran.h"
+#include "sienna_cichlid.h"
int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_handler *handler)
@@ -36,10 +37,15 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
{
int ret = 0;
+ adev->amdgpu_reset_level_mask = 0x1;
+
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_init(adev);
break;
+ case IP_VERSION(11, 0, 7):
+ ret = sienna_cichlid_reset_init(adev);
+ break;
default:
break;
}
@@ -55,6 +61,9 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_fini(adev);
break;
+ case IP_VERSION(11, 0, 7):
+ ret = sienna_cichlid_reset_fini(adev);
+ break;
default:
break;
}
@@ -67,6 +76,12 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
{
struct amdgpu_reset_handler *reset_handler = NULL;
+ if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
+ return -ENOSYS;
+
+ if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
+ return -ENOSYS;
+
if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
@@ -83,6 +98,12 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
int ret;
struct amdgpu_reset_handler *reset_handler = NULL;
+ if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
+ return -ENOSYS;
+
+ if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
+ return -ENOSYS;
+
if (adev->reset_cntl)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 9e55a5d7a825..f71b83c42590 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -30,6 +30,7 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
+ AMDGPU_SKIP_MODE2_RESET = 2,
};
struct amdgpu_reset_context {
@@ -37,6 +38,7 @@ struct amdgpu_reset_context {
struct amdgpu_device *reset_req_dev;
struct amdgpu_job *job;
struct amdgpu_hive_info *hive;
+ struct list_head *reset_device_list;
unsigned long flags;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index d3558c34d406..3e316b013fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -405,6 +405,9 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
+ if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY))
+ return false;
+
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7d25a10395c0..b1c455329023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -638,6 +638,8 @@ struct amdgpu_ttm_tt {
#endif
};
+#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
+
#ifdef CONFIG_DRM_AMDGPU_USERPTR
/*
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
@@ -649,7 +651,7 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
struct ttm_tt *ttm = bo->tbo.ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct mm_struct *mm;
@@ -703,7 +705,7 @@ out_unlock:
*/
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
bool r = false;
if (!gtt || !gtt->userptr)
@@ -752,7 +754,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -789,7 +791,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -823,7 +825,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
struct ttm_tt *ttm = tbo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (amdgpu_bo_encrypted(abo))
flags |= AMDGPU_PTE_TMZ;
@@ -861,7 +863,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_resource *bo_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void*)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
uint64_t flags;
int r;
@@ -928,7 +930,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -999,7 +1001,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
@@ -1026,7 +1028,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1080,7 +1082,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_operation_ctx *ctx)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
pgoff_t i;
int ret;
@@ -1114,7 +1116,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
struct amdgpu_device *adev;
pgoff_t i;
@@ -1183,7 +1185,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
- gtt = (void *)bo->ttm;
+ gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1200,7 +1202,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
*/
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return NULL;
@@ -1219,7 +1221,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end, unsigned long *userptr)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long size;
if (gtt == NULL || !gtt->userptr)
@@ -1242,7 +1244,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
*/
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL || !gtt->userptr)
return false;
@@ -1255,7 +1257,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
*/
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index ebed3f5226db..96b6cf4c4d54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -390,6 +390,7 @@ union amdgpu_firmware_header {
struct rlc_firmware_header_v2_1 rlc_v2_1;
struct rlc_firmware_header_v2_2 rlc_v2_2;
struct rlc_firmware_header_v2_3 rlc_v2_3;
+ struct rlc_firmware_header_v2_4 rlc_v2_4;
struct sdma_firmware_header_v1_0 sdma;
struct sdma_firmware_header_v1_1 sdma_v1_1;
struct sdma_firmware_header_v2_0 sdma_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 1bfdfb9207ac..f36e4f08db6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -54,6 +54,7 @@
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
+#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
@@ -74,6 +75,7 @@ MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -185,6 +187,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
+ case IP_VERSION(4, 0, 2):
+ fw_name = FIRMWARE_VCN4_0_2;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = false;
+ break;
case IP_VERSION(4, 0, 4):
fw_name = FIRMWARE_VCN4_0_4;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 60c608144480..80b7a6cfd026 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -161,6 +161,7 @@
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
+#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 12)
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
@@ -317,12 +318,24 @@ struct amdgpu_fw_shared {
struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
};
+struct amdgpu_fw_shared_rb_setup {
+ uint32_t is_rb_enabled_flags;
+ uint32_t rb_addr_lo;
+ uint32_t rb_addr_hi;
+ uint32_t rb_size;
+ uint32_t rb4_addr_lo;
+ uint32_t rb4_addr_hi;
+ uint32_t rb4_size;
+ uint32_t reserved[6];
+};
+
struct amdgpu_vcn4_fw_shared {
uint32_t present_flag_0;
uint8_t pad[12];
struct amdgpu_fw_shared_unified_queue_struct sq;
uint8_t pad1[8];
struct amdgpu_fw_shared_fw_logging fw_log;
+ struct amdgpu_fw_shared_rb_setup rb_setup;
};
struct amdgpu_vcn_fwlog {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 9be57389301b..e4af40b9a8aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -690,7 +690,6 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
}
}
-
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -707,6 +706,7 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
case CHIP_SIENNA_CICHLID:
case CHIP_ARCTURUS:
case CHIP_ALDEBARAN:
+ case CHIP_IP_DISCOVERY:
reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
break;
default: /* other chip doesn't support SRIOV */
@@ -750,6 +750,7 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
+ case CHIP_IP_DISCOVERY:
nv_set_virt_ops(adev);
/* try send GPU_INIT_DATA request to host */
amdgpu_virt_request_init_data(adev);
@@ -807,6 +808,60 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
return mode;
}
+bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
+{
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ /* no vf autoload, white list */
+ if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
+ ucode_id == AMDGPU_UCODE_ID_VCN)
+ return false;
+ else
+ return true;
+ case IP_VERSION(13, 0, 10):
+ /* white list */
+ if (ucode_id == AMDGPU_UCODE_ID_CAP
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES1
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
+ || ucode_id == AMDGPU_UCODE_ID_VCN1
+ || ucode_id == AMDGPU_UCODE_ID_VCN)
+ return false;
+ else
+ return true;
+ default:
+ /* lagacy black list */
+ if (ucode_id == AMDGPU_UCODE_ID_SDMA0
+ || ucode_id == AMDGPU_UCODE_ID_SDMA1
+ || ucode_id == AMDGPU_UCODE_ID_SDMA2
+ || ucode_id == AMDGPU_UCODE_ID_SDMA3
+ || ucode_id == AMDGPU_UCODE_ID_SDMA4
+ || ucode_id == AMDGPU_UCODE_ID_SDMA5
+ || ucode_id == AMDGPU_UCODE_ID_SDMA6
+ || ucode_id == AMDGPU_UCODE_ID_SDMA7
+ || ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_SMC)
+ return true;
+ else
+ return false;
+ }
+}
+
void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 239f232f9c02..d94c31e68a14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -253,6 +253,9 @@ struct amdgpu_virt {
uint32_t decode_max_frame_pixels;
uint32_t encode_max_dimension_pixels;
uint32_t encode_max_frame_pixels;
+
+ /* the ucode id to signal the autoload */
+ uint32_t autoload_ucode_id;
};
struct amdgpu_video_codec_info;
@@ -343,4 +346,6 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
u32 acc_flags, u32 hwip);
u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
u32 offset, u32 acc_flags, u32 hwip);
+bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
+ uint32_t ucode_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index a60cb6d84b79..f4b5301ea2a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 1b108d03e785..d3b483aa81f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -504,6 +504,9 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
{
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
atomic_read(&hive->number_devices),
@@ -742,7 +745,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_put_xgmi_hive(hive);
}
- return psp_xgmi_terminate(&adev->psp);
+ return 0;
}
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
index 33a8a7365aef..f0e235f98afb 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -28,13 +28,44 @@
#include "navi10_enum.h"
#include "soc15_common.h"
+#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
+#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+
+
+static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
+ break;
+ default:
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ }
+ return data;
+}
+
+static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
+ break;
+ default:
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ }
+}
+
static void
athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
@@ -42,7 +73,7 @@ athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
static void
@@ -51,7 +82,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
@@ -59,7 +90,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
@@ -70,6 +101,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 0, 2):
athub_v3_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -88,7 +120,7 @@ void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
int data;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
- data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ data = athub_v3_0_get_cg_cntl(adev);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9c964cd3b5d4..288fce7dc0ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index e0ad9f27dc3f..cbe5250b31cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 77f5e998a120..b1c44fab074f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_width = 16384;
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 802e5c753271..a22b45c92792 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index fafbad3cf08d..e4dde41f2f68 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4274,35 +4274,45 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
@@ -4846,7 +4856,7 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 2;
+ adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
@@ -5971,6 +5981,9 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
}
+ if (adev->job_hang && !enable)
+ return 0;
+
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
break;
@@ -7569,8 +7582,10 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0);
-
- return amdgpu_ring_test_helper(kiq_ring);
+ if (!adev->job_hang)
+ return amdgpu_ring_test_helper(kiq_ring);
+ else
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 0d8193b30fc5..fa718318568e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -53,9 +53,12 @@
#define GFX11_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
#define regCGTT_WD_CLK_CTRL 0x5086
#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e
+#define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1
MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
@@ -70,21 +73,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
-
-static const struct soc15_reg_golden golden_settings_gc_11_0[] =
-{
- /* Pending on emulation bring up */
-};
-
-static const struct soc15_reg_golden golden_settings_gc_11_0_0[] =
-{
- /* Pending on emulation bring up */
-};
-
-static const struct soc15_reg_golden golden_settings_gc_rlc_spm_11_0[] =
-{
- /* Pending on emulation bring up */
-};
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
{
@@ -128,6 +120,8 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
bool all_hub, uint8_t dst_sel);
static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
+static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
+ bool enable);
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -264,42 +258,17 @@ static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs;
}
-static void gfx_v11_0_init_spm_golden_registers(struct amdgpu_device *adev)
-{
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(11, 0, 0):
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_11_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_11_0));
- break;
- default:
- break;
- }
-}
-
static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(11, 0, 0):
- soc15_program_register_sequence(adev,
- golden_settings_gc_11_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
- soc15_program_register_sequence(adev,
- golden_settings_gc_11_0_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_0_0));
- break;
case IP_VERSION(11, 0, 1):
soc15_program_register_sequence(adev,
- golden_settings_gc_11_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
- soc15_program_register_sequence(adev,
golden_settings_gc_11_0_1,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
break;
default:
break;
}
- gfx_v11_0_init_spm_golden_registers(adev);
}
static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
@@ -1135,7 +1104,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
- .init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
+ .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1145,6 +1114,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1580,6 +1550,7 @@ static int gfx_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -2754,6 +2725,21 @@ static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
mec_hdr->ucode_start_addr_hi >> 2);
}
soc21_grbm_select(adev, 0, 0, 0, 0);
+
+ /* reset mec pipe */
+ tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
+
+ /* clear mec pipe reset */
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
}
static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
@@ -2765,7 +2751,13 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
for (i = 0; i < adev->usec_timeout; i++) {
cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
- bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
+
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1))
+ bootload_status = RREG32_SOC15(GC, 0,
+ regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
+ else
+ bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
+
if ((cp_status == 0) &&
(REG_GET_FIELD(bootload_status,
RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
@@ -5173,9 +5165,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
} else {
/* Program RLC_CGCG_CGLS_CTRL */
def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
@@ -5204,9 +5199,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
}
}
@@ -5271,6 +5269,38 @@ static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
.update_spm_vmid = gfx_v11_0_update_spm_vmid,
};
+static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
+
+ // Program RLC_PG_DELAY3 for CGPG hysteresis
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 1):
+ WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ gfx_v11_cntl_power_gating(adev, enable);
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+}
+
static int gfx_v11_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -5285,6 +5315,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
case IP_VERSION(11, 0, 2):
amdgpu_gfx_off_ctrl(adev, enable);
break;
+ case IP_VERSION(11, 0, 1):
+ gfx_v11_cntl_pg(adev, enable);
+ amdgpu_gfx_off_ctrl(adev, enable);
+ break;
default:
break;
}
@@ -5302,6 +5336,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c6e0f9313a7f..1d6d3a852a0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -126,6 +126,8 @@ MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
@@ -1496,7 +1498,11 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
const struct common_firmware_header *header = NULL;
const struct gfx_firmware_header_v1_0 *cp_hdr;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -1509,7 +1515,11 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec2.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
@@ -2587,7 +2597,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_tiling_mode_table_init(adev);
- gfx_v9_0_setup_rb(adev);
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index d8c531581116..8cf53e039c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -576,6 +576,111 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
}
}
+static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev)
+{
+ int i;
+ adev->gmc.VM_L2_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
+ adev->gmc.VM_L2_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
+ adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL);
+ adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_LO32 = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_LO32);
+ adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_HI32 = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_HI32);
+ adev->gmc.VM_L2_PROTECTION_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
+ adev->gmc.VM_L2_PROTECTION_FAULT_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL2);
+ adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL3 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL3);
+ adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL4 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL4);
+ adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_LO32 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_LO32);
+ adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_HI32 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_HI32);
+ adev->gmc.VM_DEBUG = RREG32_SOC15(GC, 0, mmGCVM_DEBUG);
+ adev->gmc.VM_L2_MM_GROUP_RT_CLASSES = RREG32_SOC15(GC, 0, mmGCVM_L2_MM_GROUP_RT_CLASSES);
+ adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID = RREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID);
+ adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID2 = RREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID2);
+ adev->gmc.VM_L2_CACHE_PARITY_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CACHE_PARITY_CNTL);
+ adev->gmc.VM_L2_IH_LOG_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_IH_LOG_CNTL);
+
+ for (i = 0; i <= 15; i++) {
+ adev->gmc.VM_CONTEXT_CNTL[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, i);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, i * 2);
+ }
+
+ adev->gmc.MC_VM_MX_L1_TLB_CNTL = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
+}
+
+static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev)
+{
+ int i;
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, adev->gmc.VM_L2_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, adev->gmc.VM_L2_CNTL2);
+ WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL, adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_LO32, adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_LO32);
+ WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_HI32, adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_HI32);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, adev->gmc.VM_L2_PROTECTION_FAULT_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL2, adev->gmc.VM_L2_PROTECTION_FAULT_CNTL2);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL3, adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL3);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL4, adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL4);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_LO32, adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_LO32);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_HI32, adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_HI32);
+ WREG32_SOC15(GC, 0, mmGCVM_DEBUG, adev->gmc.VM_DEBUG);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_MM_GROUP_RT_CLASSES, adev->gmc.VM_L2_MM_GROUP_RT_CLASSES);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID, adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID2, adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID2);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CACHE_PARITY_CNTL, adev->gmc.VM_L2_CACHE_PARITY_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_IH_LOG_CNTL, adev->gmc.VM_L2_IH_LOG_CNTL);
+
+ for (i = 0; i <= 15; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, i, adev->gmc.VM_CONTEXT_CNTL[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[i]);
+ }
+
+ WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE, adev->gmc.vram_start >> 24);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP, adev->gmc.vram_end >> 24);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, adev->gmc.MC_VM_MX_L1_TLB_CNTL);
+}
+
+static void gfxhub_v2_1_halt(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ int i;
+ uint32_t tmp;
+ int time = 1000;
+
+ gfxhub_v2_1_set_fault_enable_default(adev, false);
+
+ for (i = 0; i <= 14; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, ~0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, ~0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ 0);
+ }
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
+ while ((tmp & (GRBM_STATUS2__EA_BUSY_MASK |
+ GRBM_STATUS2__EA_LINK_BUSY_MASK)) != 0 &&
+ time) {
+ udelay(100);
+ time--;
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
+ }
+
+ if (!time) {
+ DRM_WARN("failed to wait for GRBM(EA) idle\n");
+ }
+}
+
const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
.get_fb_location = gfxhub_v2_1_get_fb_location,
.get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset,
@@ -586,4 +691,7 @@ const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
.init = gfxhub_v2_1_init,
.get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
.utcl2_harvest = gfxhub_v2_1_utcl2_harvest,
+ .mode2_save_regs = gfxhub_v2_1_save_regs,
+ .mode2_restore_regs = gfxhub_v2_1_restore_regs,
+ .halt = gfxhub_v2_1_halt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
index 5eccaa2c7ca0..0e13370c2057 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
@@ -26,13 +26,10 @@
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
+#include "gc/gc_11_0_0_default.h"
#include "navi10_enum.h"
#include "soc15_common.h"
-#define regGCVM_L2_CNTL3_DEFAULT 0x80100007
-#define regGCVM_L2_CNTL4_DEFAULT 0x000000c1
-#define regGCVM_L2_CNTL5_DEFAULT 0x00003fe0
-
static const char *gfxhub_client_ids[] = {
"CB/DB",
"Reserved",
@@ -414,12 +411,39 @@ static void gfxhub_v3_0_set_fault_enable_default(struct amdgpu_device *adev,
{
u32 tmp;
+ /* NO halt CP when page fault */
+ tmp = RREG32_SOC15(GC, 0, regCP_DEBUG);
+ tmp = REG_SET_FIELD(tmp, CP_DEBUG, CPG_UTCL1_ERROR_HALT_DISABLE, 1);
+ WREG32_SOC15(GC, 0, regCP_DEBUG, tmp);
+
+ /**
+ * Set GRBM_GFX_INDEX in broad cast mode
+ * before programming GL1C_UTCL0_CNTL1 and SQG_CONFIG
+ */
+ WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, regGRBM_GFX_INDEX_DEFAULT);
+
+ /**
+ * Retry respond mode: RETRY
+ * Error (no retry) respond mode: SUCCESS
+ */
+ tmp = RREG32_SOC15(GC, 0, regGL1C_UTCL0_CNTL1);
+ tmp = REG_SET_FIELD(tmp, GL1C_UTCL0_CNTL1, RESP_MODE, 0);
+ tmp = REG_SET_FIELD(tmp, GL1C_UTCL0_CNTL1, RESP_FAULT_MODE, 0x2);
+ WREG32_SOC15(GC, 0, regGL1C_UTCL0_CNTL1, tmp);
+
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
+ /* Disable SQ XNACK interrupt for all VMIDs */
+ tmp = RREG32_SOC15(GC, 0, regSQG_CONFIG);
+ tmp = REG_SET_FIELD(tmp, SQG_CONFIG, XNACK_INTR_MASK,
+ SQG_CONFIG__XNACK_INTR_MASK_MASK >>
+ SQG_CONFIG__XNACK_INTR_MASK__SHIFT);
+ WREG32_SOC15(GC, 0, regSQG_CONFIG, tmp);
+
tmp = RREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
new file mode 100644
index 000000000000..5d3fffd4929f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "gfxhub_v3_0_3.h"
+
+#include "gc/gc_11_0_3_offset.h"
+#include "gc/gc_11_0_3_sh_mask.h"
+#include "navi10_enum.h"
+#include "soc15_common.h"
+
+#define regGCVM_L2_CNTL3_DEFAULT 0x80100007
+#define regGCVM_L2_CNTL4_DEFAULT 0x000000c1
+#define regGCVM_L2_CNTL5_DEFAULT 0x00003fe0
+
+static const char *gfxhub_client_ids[] = {
+ "CB/DB",
+ "Reserved",
+ "GE1",
+ "GE2",
+ "CPF",
+ "CPC",
+ "CPG",
+ "RLC",
+ "TCP",
+ "SQC (inst)",
+ "SQC (data)",
+ "SQG",
+ "Reserved",
+ "SDMA0",
+ "SDMA1",
+ "GCR",
+ "SDMA2",
+ "SDMA3",
+};
+
+static uint32_t gfxhub_v3_0_3_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+static void
+gfxhub_v3_0_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ u32 cid = REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID);
+
+ dev_err(adev->dev,
+ "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
+ cid);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, RW));
+}
+
+static u64 gfxhub_v3_0_3_get_fb_location(struct amdgpu_device *adev)
+{
+ u64 base = RREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE);
+
+ base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
+ base <<= 24;
+
+ return base;
+}
+
+static u64 gfxhub_v3_0_3_get_mc_fb_offset(struct amdgpu_device *adev)
+{
+ return (u64)RREG32_SOC15(GC, 0, regGCMC_VM_FB_OFFSET) << 24;
+}
+
+static void gfxhub_v3_0_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid,
+ lower_32_bits(page_table_base));
+
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid,
+ upper_32_bits(page_table_base));
+}
+
+static void gfxhub_v3_0_3_init_gart_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v3_0_3_setup_vm_pt_regs(adev, 0, pt_base);
+
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+}
+
+static void gfxhub_v3_0_3_init_system_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t value;
+
+ /* Disable AGP. */
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ + adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+}
+
+
+static void gfxhub_v3_0_3_init_tlb_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC); /* UC, uncached */
+
+ WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp);
+}
+
+static void gfxhub_v3_0_3_init_cache_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
+ ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
+ L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL2, tmp);
+
+ tmp = regGCVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, tmp);
+
+ tmp = regGCVM_L2_CNTL4_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL4, tmp);
+
+ tmp = regGCVM_L2_CNTL5_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL5, tmp);
+}
+
+static void gfxhub_v3_0_3_enable_system_domain(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL, tmp);
+}
+
+static void gfxhub_v3_0_3_disable_identity_aperture(struct amdgpu_device *adev)
+{
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ 0xFFFFFFFF);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ 0x0000000F);
+
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
+ 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
+ 0);
+
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+
+}
+
+static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ adev->vm_manager.num_level);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ adev->vm_manager.block_size - 9);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
+
+ hub->vm_cntx_cntl = tmp;
+}
+
+static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ unsigned i;
+
+ for (i = 0 ; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
+}
+
+static int gfxhub_v3_0_3_gart_enable(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev)) {
+ /*
+ * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
+ * VF copy registers so vbios post doesn't program them, for
+ * SRIOV driver need to program them
+ */
+ WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE,
+ adev->gmc.vram_start >> 24);
+ WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_TOP,
+ adev->gmc.vram_end >> 24);
+ }
+
+ /* GART Enable. */
+ gfxhub_v3_0_3_init_gart_aperture_regs(adev);
+ gfxhub_v3_0_3_init_system_aperture_regs(adev);
+ gfxhub_v3_0_3_init_tlb_regs(adev);
+ gfxhub_v3_0_3_init_cache_regs(adev);
+
+ gfxhub_v3_0_3_enable_system_domain(adev);
+ gfxhub_v3_0_3_disable_identity_aperture(adev);
+ gfxhub_v3_0_3_setup_vmid_config(adev);
+ gfxhub_v3_0_3_program_invalidation(adev);
+
+ return 0;
+}
+
+static void gfxhub_v3_0_3_gart_disable(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ u32 tmp;
+ u32 i;
+
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ /* Setup L2 cache */
+ WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, 0);
+}
+
+/**
+ * gfxhub_v3_0_3_set_fault_enable_default - update GART/VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void gfxhub_v3_0_3_set_fault_enable_default(struct amdgpu_device *adev,
+ bool value)
+{
+ u32 tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ tmp = RREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+ WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
+}
+
+static const struct amdgpu_vmhub_funcs gfxhub_v3_0_3_vmhub_funcs = {
+ .print_l2_protection_fault_status = gfxhub_v3_0_3_print_l2_protection_fault_status,
+ .get_invalidate_req = gfxhub_v3_0_3_get_invalidate_req,
+};
+
+static void gfxhub_v3_0_3_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+
+ hub->ctx0_ptb_addr_lo32 =
+ SOC15_REG_OFFSET(GC, 0,
+ regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
+ hub->ctx0_ptb_addr_hi32 =
+ SOC15_REG_OFFSET(GC, 0,
+ regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_SEM);
+ hub->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_REQ);
+ hub->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ACK);
+ hub->vm_context0_cntl =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL);
+ hub->vm_l2_pro_fault_status =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS);
+ hub->vm_l2_pro_fault_cntl =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
+
+ hub->ctx_distance = regGCVM_CONTEXT1_CNTL - regGCVM_CONTEXT0_CNTL;
+ hub->ctx_addr_distance = regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
+ regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ hub->eng_distance = regGCVM_INVALIDATE_ENG1_REQ -
+ regGCVM_INVALIDATE_ENG0_REQ;
+ hub->eng_addr_distance = regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
+ regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+ hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
+ hub->vmhub_funcs = &gfxhub_v3_0_3_vmhub_funcs;
+}
+
+const struct amdgpu_gfxhub_funcs gfxhub_v3_0_3_funcs = {
+ .get_fb_location = gfxhub_v3_0_3_get_fb_location,
+ .get_mc_fb_offset = gfxhub_v3_0_3_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v3_0_3_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v3_0_3_gart_enable,
+ .gart_disable = gfxhub_v3_0_3_gart_disable,
+ .set_fault_enable_default = gfxhub_v3_0_3_set_fault_enable_default,
+ .init = gfxhub_v3_0_3_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h
new file mode 100644
index 000000000000..6153bd5e3083
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFXHUB_V3_0_3_H__
+#define __GFXHUB_V3_0_3_H__
+
+extern const struct amdgpu_gfxhub_funcs gfxhub_v3_0_3_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9ae8cdaa033e..f513e2c2e964 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -419,6 +419,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -437,7 +438,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 1471bfb9ae38..846ccb6cf07d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -39,6 +39,7 @@
#include "soc15_common.h"
#include "nbio_v4_3.h"
#include "gfxhub_v3_0.h"
+#include "gfxhub_v3_0_3.h"
#include "mmhub_v3_0.h"
#include "mmhub_v3_0_1.h"
#include "mmhub_v3_0_2.h"
@@ -233,7 +234,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
/* Issue additional private vm invalidation to MMHUB */
if ((vmhub != AMDGPU_GFXHUB_0) &&
- (hub->vm_l2_bank_select_reserved_cid2)) {
+ (hub->vm_l2_bank_select_reserved_cid2) &&
+ !amdgpu_sriov_vf(adev)) {
inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
/* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
inv_req |= (1 << 25);
@@ -590,7 +592,14 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 3):
+ adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
+ break;
+ default:
+ adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
+ break;
+ }
}
static int gmc_v11_0_early_init(void *handle)
@@ -640,7 +649,10 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_gart_location(adev, mc);
/* base offset of vram pages */
- adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
+ if (amdgpu_sriov_vf(adev))
+ adev->vm_manager.vram_base_offset = 0;
+ else
+ adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
}
/**
@@ -732,6 +744,7 @@ static int gmc_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->num_vmhubs = 2;
/*
* To fulfill 4-level page support,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 22761a3bb818..4603653916f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -896,6 +896,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -935,7 +936,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_domain->sem);
@@ -1624,12 +1625,15 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
case IP_VERSION(9, 4, 1):
adev->num_vmhubs = 3;
/* Keep the vm size same with Vega20 */
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index 39a696cd45b5..29c3484ae1f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -40,6 +40,156 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
0);
}
+static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch, forced on MEM clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ }
+
+ /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
+ }
+
+ /* disable MEM clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+
+ if (enable) {
+ hdp_clk_cntl &=
+ ~(uint32_t)
+ (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+ } else {
+ hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+ }
+
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ hdp_v5_2_update_mem_power_gating(adev, enable);
+ hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
+}
+
const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
.flush_hdp = hdp_v5_2_flush_hdp,
+ .update_clock_gating = hdp_v5_2_update_clock_gating,
+ .get_clock_gating_state = hdp_v5_2_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 92dc60a9d209..7cd79a3844b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -105,7 +105,13 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_USED_INT_THRESHOLD, threshold);
- WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
+ return;
+ } else {
+ WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+
WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
}
@@ -132,7 +138,13 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
- WREG32(ih_regs->ih_rb_cntl, tmp);
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
if (enable) {
ih->enabled = true;
@@ -242,7 +254,15 @@ static int ih_v6_0_enable_ring(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
}
- WREG32(ih_regs->ih_rb_cntl, tmp);
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
if (ih == &adev->irq.ih) {
/* set the ih ring 0 writeback address whether it's enabled or not */
@@ -727,6 +747,7 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
.get_wptr = ih_v6_0_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = ih_v6_0_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 76383baa3929..95548c512f4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -26,12 +26,15 @@
#include "amdgpu_imu.h"
#include "amdgpu_dpm.h"
+#include "imu_v11_0_3.h"
+
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
@@ -360,6 +363,9 @@ static void imu_v11_0_program_rlc_ram(struct amdgpu_device *adev)
program_imu_rlc_ram(adev, imu_rlc_ram_golden_11_0_2,
(const u32)ARRAY_SIZE(imu_rlc_ram_golden_11_0_2));
break;
+ case IP_VERSION(11, 0, 3):
+ imu_v11_0_3_program_rlc_ram(adev);
+ break;
default:
BUG();
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c
new file mode 100644
index 000000000000..536dafb57ee0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_imu.h"
+
+#include "gc/gc_11_0_3_offset.h"
+#include "gc/gc_11_0_3_sh_mask.h"
+
+static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_3[] = {
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_RD_COMBINE_FLUSH, 0x00055555, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_WR_COMBINE_FLUSH, 0x00055555, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_DRAM_COMBINE_FLUSH, 0x00555555, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC2, 0x00001ffe, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_CREDITS, 0x003f3fff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_TAG_RESERVE1, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE0, 0x00041000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE1, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE0, 0x00040000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE1, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC, 0x00000017, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_ENABLE, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_CREDITS, 0x003f3fbf, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE0, 0x10200800, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE1, 0x00000088, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE0, 0x1d041040, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE1, 0x80000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_IO_PRIORITY, 0x88888888, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MAM_CTRL, 0x0000d800, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ARB_FINAL, 0x000007ff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_DRAM_PAGE_BURST, 0x20080200, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ENABLE, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END, 0x000fffff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MISC, 0x0c48bff0, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SA_UNIT_DISABLE, 0x00fffc01, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_PRIM_CONFIG, 0x000fffe1, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_RB_BACKEND_DISABLE, 0xffffff01, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xfffe0001, 0x40000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xfffe0001, 0x42000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x44000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x46000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x48000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x4A000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCGTS_TCC_DISABLE, 0x00000001, 0x00000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_RATE_CONFIG, 0x00000001, 0x00000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_EDC_CONFIG, 0x00000001, 0x00000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000500, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_START, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_END, 0x000005ff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_BASE, 0x00006000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_TOP, 0x000065ff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_TOP_OF_DRAM_SLOT1, 0xff800000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_LOWER_TOP_OF_DRAM2, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_UPPER_TOP_OF_DRAM2, 0x00000fff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, 0x00001ffc, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000551, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL, 0x00080603, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL2, 0x00000003, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL3, 0x00100003, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL5, 0x00003fe0, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000444, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_0, 0x54105410, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_2, 0x76323276, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000244, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCUTCL2_HARVEST_BYPASS_GROUPS, 0x00000006, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BASE, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BOT, 0x00000002, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_TOP, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA0_UCODE_SELFLOAD_CONTROL, 0x00000210, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA1_UCODE_SELFLOAD_CONTROL, 0x00000210, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPC_PSP_DEBUG, CPC_PSP_DEBUG__GPA_OVERRIDE_MASK, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0xe0000000),
+};
+
+static void program_rlc_ram_register_setting(struct amdgpu_device *adev,
+ const struct imu_rlc_ram_golden *regs,
+ const u32 array_size)
+{
+ const struct imu_rlc_ram_golden *entry;
+ u32 reg, data;
+ int i;
+
+ for (i = 0; i < array_size; ++i) {
+ entry = &regs[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ reg |= entry->addr_mask;
+
+ data = entry->data;
+ if (entry->reg == regGCMC_VM_AGP_BASE)
+ data = 0x00ffffff;
+ else if (entry->reg == regGCMC_VM_AGP_TOP)
+ data = 0x0;
+ else if (entry->reg == regGCMC_VM_FB_LOCATION_BASE)
+ data = adev->gmc.vram_start >> 24;
+ else if (entry->reg == regGCMC_VM_FB_LOCATION_TOP)
+ data = adev->gmc.vram_end >> 24;
+
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, reg);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, data);
+ }
+ //Indicate the latest entry
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, 0);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, 0);
+}
+
+void imu_v11_0_3_program_rlc_ram(struct amdgpu_device *adev)
+{
+ program_rlc_ram_register_setting(adev,
+ imu_rlc_ram_golden_11_0_3,
+ (const u32)ARRAY_SIZE(imu_rlc_ram_golden_11_0_3));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h
new file mode 100644
index 000000000000..702be568f26b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __IMU_V11_0_3_H__
+#define __IMU_V11_0_3_H__
+
+void imu_v11_0_3_program_rlc_ram(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 0082e2e1e0b4..067d10073a56 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -1233,7 +1233,8 @@ static int mes_v10_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_mes_self_test(adev);
+ if (!amdgpu_in_reset(adev))
+ amdgpu_mes_self_test(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 777f9268d92d..b64cd46a159a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -38,6 +38,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
static int mes_v11_0_hw_fini(void *handle);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
@@ -183,6 +185,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
+ mes_add_queue_pkt.trap_en = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
@@ -1280,7 +1283,8 @@ static int mes_v11_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_mes_self_test(adev);
+ if (!amdgpu_in_reset(adev))
+ amdgpu_mes_self_test(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3f44a099c52a..3e51e773f92b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index bc11b2de37ae..a1d26c4d80b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -169,17 +169,17 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
- /* Disable AGP. */
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
-
if (!amdgpu_sriov_vf(adev)) {
/*
* the new L1 policy will block SRIOV guest from writing
* these regs, and they will be programed at host.
* so skip programing these regs.
*/
+ /* Disable AGP. */
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->gmc.vram_start >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index cac72ced94c8..e8058edc1d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -518,18 +518,41 @@ static u64 mmhub_v3_0_1_get_mc_fb_offset(struct amdgpu_device *adev)
static void mmhub_v3_0_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static void mmhub_v3_0_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
mmhub_v3_0_1_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
mmhub_v3_0_1_update_medium_grain_light_sleep(adev,
@@ -539,7 +562,20 @@ static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v3_0_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- //TODO
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
}
const struct amdgpu_mmhub_funcs mmhub_v3_0_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 6e0145b2b408..445cb06b9d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -295,9 +295,17 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned int num_level, block_size;
uint32_t tmp;
int i;
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
@@ -305,7 +313,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_DEPTH,
- adev->vm_manager.num_level);
+ num_level);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
@@ -323,7 +331,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
new file mode 100644
index 000000000000..f772bb499f3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V4_0_H__
+#define __MMSCH_V4_0_H__
+
+#include "amdgpu_vcn.h"
+
+#define MMSCH_VERSION_MAJOR 4
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+#define RB_ENABLED (1 << 0)
+#define RB4_ENABLED (1 << 1)
+#define MMSCH_DOORBELL_OFFSET 0x8
+
+#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
+
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+
+enum mmsch_v4_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v4_0_table_info {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v4_0_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_v4_0_table_info inst[AMDGPU_MAX_VCN_INSTANCES];
+ struct mmsch_v4_0_table_info jpegdec;
+};
+
+struct mmsch_v4_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v4_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v4_0_cmd_direct_write {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v4_0_cmd_direct_read_modify_write {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v4_0_cmd_direct_polling {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v4_0_cmd_end {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v4_0_cmd_indirect_write {
+ struct mmsch_v4_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+#define MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ size = sizeof(struct mmsch_v4_0_cmd_direct_read_modify_write); \
+ size_dw = size / 4; \
+ direct_rd_mod_wt.cmd_header.reg_offset = reg; \
+ direct_rd_mod_wt.mask_value = mask; \
+ direct_rd_mod_wt.write_data = data; \
+ memcpy((void *)table_loc, &direct_rd_mod_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V4_0_INSERT_DIRECT_WT(reg, value) { \
+ size = sizeof(struct mmsch_v4_0_cmd_direct_write); \
+ size_dw = size / 4; \
+ direct_wt.cmd_header.reg_offset = reg; \
+ direct_wt.reg_value = value; \
+ memcpy((void *)table_loc, &direct_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V4_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ size = sizeof(struct mmsch_v4_0_cmd_direct_polling); \
+ size_dw = size / 4; \
+ direct_poll.cmd_header.reg_offset = reg; \
+ direct_poll.mask_value = mask; \
+ direct_poll.wait_value = wait; \
+ memcpy((void *)table_loc, &direct_poll, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V4_0_INSERT_END() { \
+ size = sizeof(struct mmsch_v4_0_cmd_end); \
+ size_dw = size / 4; \
+ memcpy((void *)table_loc, &end, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 12906ba74462..a2f04b249132 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -290,6 +290,7 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index e07757eea7ad..a977f0027928 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -317,6 +317,7 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 288c414babdf..fd14fa9b9cd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -529,6 +529,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 4b5396d3e60f..eec13cb5bf75 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -409,9 +409,11 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -483,6 +485,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index 982a89f841d5..15eb3658d70e 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -488,3 +488,47 @@ const struct amdgpu_nbio_funcs nbio_v4_3_funcs = {
.get_rom_offset = nbio_v4_3_get_rom_offset,
.program_aspm = nbio_v4_3_program_aspm,
};
+
+
+static void nbio_v4_3_sriov_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
+{
+}
+
+static void nbio_v4_3_sriov_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index,
+ int doorbell_size)
+{
+}
+
+static void nbio_v4_3_sriov_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance)
+{
+}
+
+static void nbio_v4_3_sriov_gc_doorbell_init(struct amdgpu_device *adev)
+{
+}
+
+const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs = {
+ .get_hdp_flush_req_offset = nbio_v4_3_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v4_3_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v4_3_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v4_3_get_pcie_data_offset,
+ .get_rev_id = nbio_v4_3_get_rev_id,
+ .mc_access_enable = nbio_v4_3_mc_access_enable,
+ .get_memsize = nbio_v4_3_get_memsize,
+ .sdma_doorbell_range = nbio_v4_3_sriov_sdma_doorbell_range,
+ .vcn_doorbell_range = nbio_v4_3_sriov_vcn_doorbell_range,
+ .gc_doorbell_init = nbio_v4_3_sriov_gc_doorbell_init,
+ .enable_doorbell_aperture = nbio_v4_3_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v4_3_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v4_3_sriov_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v4_3_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v4_3_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v4_3_get_clockgating_state,
+ .ih_control = nbio_v4_3_ih_control,
+ .init_registers = nbio_v4_3_init_registers,
+ .remap_hdp_registers = nbio_v4_3_remap_hdp_registers,
+ .get_rom_offset = nbio_v4_3_get_rom_offset,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
index ade43661d7a9..711999ceedf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
@@ -28,5 +28,6 @@
extern const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v4_3_funcs;
+extern const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index e786b825cea9..f30bc826a878 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -68,16 +68,30 @@ static void nbio_v7_7_sdma_doorbell_range(struct amdgpu_device *adev, int instan
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_CSDMA_DOORBELL_RANGE,
SIZE, doorbell_size);
+ } else {
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_SDMA0_DOORBELL_RANGE,
- OFFSET, doorbell_index);
+ SIZE, 0);
+ }
+
+ WREG32_PCIE_PORT(reg, doorbell_range);
+}
+
+static void nbio_v7_7_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance)
+{
+ u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE);
+ u32 doorbell_range = RREG32_PCIE_PORT(reg);
+
+ if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- SIZE, doorbell_size);
+ GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET,
+ doorbell_index);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8);
} else {
doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- SIZE, 0);
+ GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0);
}
WREG32_PCIE_PORT(reg, doorbell_range);
@@ -227,6 +241,81 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
}
+static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (enable) {
+ data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ } else {
+ data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
+}
+
+static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (enable)
+ data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+ else
+ data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
+ if (enable) {
+ data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ } else {
+ data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
+}
+
+static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t data;
+
+ /* AMD_CG_SUPPORT_BIF_MGCG */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
@@ -238,9 +327,13 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.mc_access_enable = nbio_v7_7_mc_access_enable,
.get_memsize = nbio_v7_7_get_memsize,
.sdma_doorbell_range = nbio_v7_7_sdma_doorbell_range,
+ .vcn_doorbell_range = nbio_v7_7_vcn_doorbell_range,
.enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index a2588200ea58..0b2ac418e4ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -101,6 +101,16 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ if (adev->apu_flags & AMD_APU_IS_RENOIR) {
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 30386d34d0d6..5b5b1ef0c2b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -20,6 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <drm/drm_drv.h>
+#include <linux/vmalloc.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
@@ -42,6 +44,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -58,6 +61,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+/* memory training timeout define */
+#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
+
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -104,6 +110,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -419,6 +426,159 @@ static void psp_v13_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67, value);
}
+static int psp_v13_0_memory_training_send_msg(struct psp_context *psp, int msg)
+{
+ int ret;
+ int i;
+ uint32_t data_32;
+ int max_wait;
+ struct amdgpu_device *adev = psp->adev;
+
+ data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, data_32);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, msg);
+
+ max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
+ for (i = 0; i < max_wait; i++) {
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret == 0)
+ break;
+ }
+ if (i < max_wait)
+ ret = 0;
+ else
+ ret = -ETIME;
+
+ dev_dbg(adev->dev, "training %s %s, cost %d @ %d ms\n",
+ (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
+ (ret == 0) ? "succeed" : "failed",
+ i, adev->usec_timeout/1000);
+ return ret;
+}
+
+
+static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+ uint32_t *pcache = (uint32_t *)ctx->sys_cache;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t p2c_header[4];
+ uint32_t sz;
+ void *buf;
+ int ret, idx;
+
+ if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
+ dev_dbg(adev->dev, "Memory training is not supported.\n");
+ return 0;
+ } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
+ dev_err(adev->dev, "Memory training initialization failure.\n");
+ return -EINVAL;
+ }
+
+ if (psp_v13_0_is_sos_alive(psp)) {
+ dev_dbg(adev->dev, "SOS is alive, skip memory training.\n");
+ return 0;
+ }
+
+ amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ dev_dbg(adev->dev, "sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
+ pcache[0], pcache[1], pcache[2], pcache[3],
+ p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ dev_dbg(adev->dev, "Short training depends on restore.\n");
+ ops |= PSP_MEM_TRAIN_RESTORE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_RESTORE) &&
+ pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ dev_dbg(adev->dev, "sys_cache[0] is invalid, restore depends on save.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ pcache[3] == p2c_header[3])) {
+ dev_dbg(adev->dev, "sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_SAVE) &&
+ p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ dev_dbg(adev->dev, "p2c_header[0] is invalid, save depends on long training.\n");
+ ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ dev_dbg(adev->dev, "Memory training ops:%x.\n", ops);
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ /*
+ * Long training will encroach a certain amount on the bottom of VRAM;
+ * save the content from the bottom of VRAM to system memory
+ * before training, and restore it after training to avoid
+ * VRAM corruption.
+ */
+ sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+
+ if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
+ dev_err(adev->dev, "visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
+ adev->gmc.visible_vram_size,
+ adev->mman.aper_base_kaddr);
+ return -EINVAL;
+ }
+
+ buf = vmalloc(sz);
+ if (!buf) {
+ dev_err(adev->dev, "failed to allocate system memory.\n");
+ return -ENOMEM;
+ }
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
+ ret = psp_v13_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
+ if (ret) {
+ DRM_ERROR("Send long training msg failed.\n");
+ vfree(buf);
+ drm_dev_exit(idx);
+ return ret;
+ }
+
+ memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
+ vfree(buf);
+ drm_dev_exit(idx);
+ } else {
+ vfree(buf);
+ return -ENODEV;
+ }
+ }
+
+ if (ops & PSP_MEM_TRAIN_SAVE) {
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
+ }
+
+ if (ops & PSP_MEM_TRAIN_RESTORE) {
+ amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ ret = psp_v13_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
+ PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
+ if (ret) {
+ dev_err(adev->dev, "send training msg failed.\n");
+ return ret;
+ }
+ }
+ ctx->training_cnt++;
+ return 0;
+}
+
static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr)
{
struct amdgpu_device *adev = psp->adev;
@@ -567,6 +727,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.ring_destroy = psp_v13_0_ring_destroy,
.ring_get_wptr = psp_v13_0_ring_get_wptr,
.ring_set_wptr = psp_v13_0_ring_set_wptr,
+ .mem_training = psp_v13_0_memory_training,
.load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw,
.read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw,
.update_spirom = psp_v13_0_update_spirom,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
new file mode 100644
index 000000000000..321089dfa7db
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_ucode.h"
+#include "soc15_common.h"
+#include "psp_v13_0_4.h"
+
+#include "mp/mp_13_0_4_offset.h"
+#include "mp/mp_13_0_4_sh_mask.h"
+
+MODULE_FIRMWARE("amdgpu/psp_13_0_4_toc.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_4_ta.bin");
+
+static int psp_v13_0_4_init_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ const char *chip_name;
+ char ucode_prefix[30];
+ int err = 0;
+
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ chip_name = ucode_prefix;
+ break;
+ default:
+ BUG();
+ }
+
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ err = psp_init_toc_microcode(psp, chip_name);
+ if (err)
+ return err;
+ err = psp_init_ta_microcode(psp, chip_name);
+ if (err)
+ return err;
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static bool psp_v13_0_4_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
+static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ int ret;
+ int retry_loop;
+
+ for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000,
+ 0x80000000,
+ false);
+
+ if (ret == 0)
+ return 0;
+ }
+
+ return ret;
+}
+
+static int psp_v13_0_4_bootloader_load_component(struct psp_context *psp,
+ struct psp_bin_desc *bin_desc,
+ enum psp_bootloader_cmd bl_cmd)
+{
+ int ret;
+ uint32_t psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check tOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v13_0_4_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v13_0_4_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy PSP KDB binary to memory */
+ memcpy(psp->fw_pri_buf, bin_desc->start_addr, bin_desc->size_bytes);
+
+ /* Provide the PSP KDB to bootloader */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = bl_cmd;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ ret = psp_v13_0_4_wait_for_bootloader(psp);
+
+ return ret;
+}
+
+static int psp_v13_0_4_bootloader_load_kdb(struct psp_context *psp)
+{
+ return psp_v13_0_4_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE);
+}
+
+static int psp_v13_0_4_bootloader_load_spl(struct psp_context *psp)
+{
+ return psp_v13_0_4_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_TOS_SPL_TABLE);
+}
+
+static int psp_v13_0_4_bootloader_load_sysdrv(struct psp_context *psp)
+{
+ return psp_v13_0_4_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV);
+}
+
+static int psp_v13_0_4_bootloader_load_soc_drv(struct psp_context *psp)
+{
+ return psp_v13_0_4_bootloader_load_component(psp, &psp->soc_drv, PSP_BL__LOAD_SOCDRV);
+}
+
+static int psp_v13_0_4_bootloader_load_intf_drv(struct psp_context *psp)
+{
+ return psp_v13_0_4_bootloader_load_component(psp, &psp->intf_drv, PSP_BL__LOAD_INTFDRV);
+}
+
+static int psp_v13_0_4_bootloader_load_dbg_drv(struct psp_context *psp)
+{
+ return psp_v13_0_4_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
+}
+
+static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp)
+{
+ int ret;
+ unsigned int psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v13_0_4_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v13_0_4_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy Secure OS binary to PSP memory */
+ memcpy(psp->fw_pri_buf, psp->sos.start_addr, psp->sos.size_bytes);
+
+ /* Provide the PSP secure OS to bootloader */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
+ 0, true);
+
+ return ret;
+}
+
+static int psp_v13_0_4_ring_init(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ring = &psp->km_ring;
+
+ ring->ring_type = ring_type;
+
+ /* allocate 4k Page of Local Frame Buffer memory for ring */
+ ring->ring_size = 0x1000;
+ ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+ if (ret) {
+ ring->ring_size = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int psp_v13_0_4_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ } else {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ }
+
+ return ret;
+}
+
+static int psp_v13_0_4_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ unsigned int psp_ring_reg = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ ret = psp_v13_0_4_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v13_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Wait for sOS ready for ring creation */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ if (ret) {
+ DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
+
+ return ret;
+}
+
+static int psp_v13_0_4_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v13_0_4_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
+ return ret;
+}
+
+static uint32_t psp_v13_0_4_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev))
+ data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67);
+
+ return data;
+}
+
+static void psp_v13_0_4_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, value);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67, value);
+}
+
+static const struct psp_funcs psp_v13_0_4_funcs = {
+ .init_microcode = psp_v13_0_4_init_microcode,
+ .bootloader_load_kdb = psp_v13_0_4_bootloader_load_kdb,
+ .bootloader_load_spl = psp_v13_0_4_bootloader_load_spl,
+ .bootloader_load_sysdrv = psp_v13_0_4_bootloader_load_sysdrv,
+ .bootloader_load_soc_drv = psp_v13_0_4_bootloader_load_soc_drv,
+ .bootloader_load_intf_drv = psp_v13_0_4_bootloader_load_intf_drv,
+ .bootloader_load_dbg_drv = psp_v13_0_4_bootloader_load_dbg_drv,
+ .bootloader_load_sos = psp_v13_0_4_bootloader_load_sos,
+ .ring_init = psp_v13_0_4_ring_init,
+ .ring_create = psp_v13_0_4_ring_create,
+ .ring_stop = psp_v13_0_4_ring_stop,
+ .ring_destroy = psp_v13_0_4_ring_destroy,
+ .ring_get_wptr = psp_v13_0_4_ring_get_wptr,
+ .ring_set_wptr = psp_v13_0_4_ring_set_wptr,
+};
+
+void psp_v13_0_4_set_psp_funcs(struct psp_context *psp)
+{
+ psp->funcs = &psp_v13_0_4_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.h b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.h
new file mode 100644
index 000000000000..8547b8d514d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __PSP_V13_0_4_H__
+#define __PSP_V13_0_4_H__
+
+#include "amdgpu_psp.h"
+
+void psp_v13_0_4_set_psp_funcs(struct psp_context *psp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 65181efba50e..0cf9d3b486b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2002,7 +2002,6 @@ static int sdma_v4_0_sw_fini(void *handle)
static int sdma_v4_0_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU)
@@ -2011,9 +2010,7 @@ static int sdma_v4_0_hw_init(void *handle)
if (!amdgpu_sriov_vf(adev))
sdma_v4_0_init_golden_registers(adev);
- r = sdma_v4_0_start(adev);
-
- return r;
+ return sdma_v4_0_start(adev);
}
static int sdma_v4_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 83c6ccaaa9e4..95689ef4be10 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1413,12 +1413,9 @@ static int sdma_v5_2_sw_fini(void *handle)
static int sdma_v5_2_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = sdma_v5_2_start(adev);
-
- return r;
+ return sdma_v5_2_start(adev);
}
static int sdma_v5_2_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 0200cb3a31a4..7ae572a08cb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -47,6 +47,7 @@
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
+MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
@@ -559,7 +560,8 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -593,7 +595,10 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
+ if (amdgpu_sriov_vf(adev))
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
+ else
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
new file mode 100644
index 000000000000..7aa570c1ce4a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "sienna_cichlid.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
+
+static struct amdgpu_reset_handler *
+sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (reset_context->method != AMD_RESET_METHOD_NONE) {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_context->method)
+ return handler;
+ }
+ } else {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == AMD_RESET_METHOD_MODE2 &&
+ adev->pm.fw_version >= 0x3a5500 &&
+ !amdgpu_sriov_vf(adev)) {
+ reset_context->method = AMD_RESET_METHOD_MODE2;
+ return handler;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+
+ if (r) {
+ dev_err(adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ return r;
+}
+
+static int
+sienna_cichlid_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->gfxhub.funcs->mode2_save_regs)
+ adev->gfxhub.funcs->mode2_save_regs(adev);
+ if (adev->gfxhub.funcs->halt)
+ adev->gfxhub.funcs->halt(adev);
+ r = sienna_cichlid_mode2_suspend_ip(adev);
+ }
+
+ return r;
+}
+
+static void sienna_cichlid_async_reset(struct work_struct *work)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_reset_control *reset_ctl =
+ container_of(work, struct amdgpu_reset_control, reset_work);
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_ctl->active_reset) {
+ dev_dbg(adev->dev, "Resetting device\n");
+ handler->do_reset(adev);
+ break;
+ }
+ }
+}
+
+static int sienna_cichlid_mode2_reset(struct amdgpu_device *adev)
+{
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+ return amdgpu_dpm_mode2_reset(adev);
+}
+
+static int
+sienna_cichlid_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ int r;
+
+ r = sienna_cichlid_mode2_reset(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "ASIC reset failed with error, %d ", r);
+ }
+ return r;
+}
+
+static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev)
+{
+ int i, r;
+ struct psp_context *psp = &adev->psp;
+
+ r = psp_rlc_autoload_start(psp);
+ if (r) {
+ dev_err(adev->dev, "Failed to start rlc autoload\n");
+ return r;
+ }
+
+ /* Reinit GFXHUB */
+ if (adev->gfxhub.funcs->mode2_restore_regs)
+ adev->gfxhub.funcs->mode2_restore_regs(adev);
+ adev->gfxhub.funcs->init(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r) {
+ dev_err(adev->dev, "GFXHUB gart reenable failed after reset\n");
+ return r;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init(
+ (void *)adev);
+ if (r) {
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d after reset\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ return r;
+ }
+ }
+ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ return r;
+}
+
+static int
+sienna_cichlid_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r;
+ struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ dev_info(tmp_adev->dev,
+ "GPU reset succeeded, trying to resume\n");
+ r = sienna_cichlid_mode2_restore_ip(tmp_adev);
+ if (r)
+ goto end;
+
+ /*
+ * Add this ASIC as tracked as reset was already
+ * complete successfully.
+ */
+ amdgpu_register_gpu_instance(tmp_adev);
+
+ /* Resume RAS */
+ amdgpu_ras_resume(tmp_adev);
+
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ib ring test failed (%d).\n", r);
+ r = -EAGAIN;
+ goto end;
+ }
+
+end:
+ if (r)
+ return -EAGAIN;
+ else
+ return r;
+}
+
+static struct amdgpu_reset_handler sienna_cichlid_mode2_handler = {
+ .reset_method = AMD_RESET_METHOD_MODE2,
+ .prepare_env = NULL,
+ .prepare_hwcontext = sienna_cichlid_mode2_prepare_hwcontext,
+ .perform_reset = sienna_cichlid_mode2_perform_reset,
+ .restore_hwcontext = sienna_cichlid_mode2_restore_hwcontext,
+ .restore_env = NULL,
+ .do_reset = sienna_cichlid_mode2_reset,
+};
+
+int sienna_cichlid_reset_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_reset_control *reset_ctl;
+
+ reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+ if (!reset_ctl)
+ return -ENOMEM;
+
+ reset_ctl->handle = adev;
+ reset_ctl->async_reset = sienna_cichlid_async_reset;
+ reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+ reset_ctl->get_reset_handler = sienna_cichlid_get_reset_handler;
+
+ INIT_LIST_HEAD(&reset_ctl->reset_handlers);
+ INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+ /* Only mode2 is handled through reset control now */
+ amdgpu_reset_add_handler(reset_ctl, &sienna_cichlid_mode2_handler);
+
+ adev->reset_cntl = reset_ctl;
+
+ return 0;
+}
+
+int sienna_cichlid_reset_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->reset_cntl);
+ adev->reset_cntl = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h
index 5dcfbd8e2697..5213b162dacd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,16 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#ifndef DML_WRAPPER_H_
-#define DML_WRAPPER_H_
+#ifndef __SIENNA_CICHLID_H__
+#define __SIENNA_CICHLID_H__
-#include "dc.h"
-#include "dml/display_mode_vba.h"
+#include "amdgpu.h"
-bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate);
+int sienna_cichlid_reset_init(struct amdgpu_device *adev);
+int sienna_cichlid_reset_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 00e9b7089feb..a26c5723c46e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -80,6 +80,7 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 2):
if (encode)
*codecs = &vcn_4_0_0_video_codecs_encode;
else
@@ -178,7 +179,7 @@ void soc21_grbm_select(struct amdgpu_device *adev,
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
- WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL), grbm_gfx_cntl);
+ WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, grbm_gfx_cntl);
}
static void soc21_vga_set_state(struct amdgpu_device *adev, bool state)
@@ -493,6 +494,20 @@ static void soc21_pre_asic_init(struct amdgpu_device *adev)
{
}
+static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
+ bool enter)
+{
+ if (enter)
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+ else
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ if (adev->gfx.funcs->update_perfmon_mgcg)
+ adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
+
+ return 0;
+}
+
static const struct amdgpu_asic_funcs soc21_asic_funcs =
{
.read_disabled_bios = &soc21_read_disabled_bios,
@@ -512,6 +527,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs =
.supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &soc21_pre_asic_init,
.query_video_codecs = &soc21_query_video_codecs,
+ .update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
};
static int soc21_common_early_init(void *handle)
@@ -545,8 +561,10 @@ static int soc21_common_early_init(void *handle)
case IP_VERSION(11, 0, 0):
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
+#if 0
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
+#endif
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_FGCG |
@@ -564,6 +582,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_ATHUB |
AMD_PG_SUPPORT_MMHUB;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
+ }
adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update
break;
case IP_VERSION(11, 0, 2):
@@ -574,7 +596,9 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_ATHUB_MGCG |
- AMD_CG_SUPPORT_ATHUB_LS;
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_HDP_SD;
adev->pg_flags =
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
@@ -584,10 +608,42 @@ static int soc21_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x10;
break;
case IP_VERSION(11, 0, 1):
- adev->cg_flags = 0;
- adev->pg_flags = 0;
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_PERF_CLK |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags =
+ AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
+ case IP_VERSION(11, 0, 3):
+ adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG;
+ if (amdgpu_sriov_vf(adev)) {
+ /* hypervisor control CG and PG enablement */
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
+ }
+ adev->external_rev_id = adev->rev_id + 0x20;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -679,6 +735,8 @@ static int soc21_common_set_clockgating_state(void *handle,
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ case IP_VERSION(7, 7, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index a91ffbf902d4..09c89faa8c27 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -30,6 +30,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "mmsch_v4_0.h"
#include "vcn/vcn_4_0_0_offset.h"
#include "vcn/vcn_4_0_0_sh_mask.h"
@@ -45,6 +46,8 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN_HARVEST_MMSCH 0
+
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
@@ -53,12 +56,14 @@ static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN1
};
+static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
+static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
* vcn_v4_0_early_init - set function pointers
@@ -71,6 +76,9 @@ static int vcn_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
+
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -92,6 +100,7 @@ static int vcn_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ int vcn_doorbell_index = 0;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -103,6 +112,12 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
+ if (amdgpu_sriov_vf(adev)) {
+ vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 - MMSCH_DOORBELL_OFFSET;
+ /* get DWORD offset */
+ vcn_doorbell_index = vcn_doorbell_index << 1;
+ }
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -119,7 +134,10 @@ static int vcn_v4_0_sw_init(void *handle)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
+ if (amdgpu_sriov_vf(adev))
+ ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1;
+ else
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
sprintf(ring->name, "vcn_unified_%d", i);
@@ -132,10 +150,19 @@ static int vcn_v4_0_sw_init(void *handle)
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
fw_shared->sq.is_enabled = 1;
+ if (amdgpu_sriov_vf(adev))
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
@@ -154,7 +181,7 @@ static int vcn_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r, idx;
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -169,6 +196,9 @@ static int vcn_v4_0_sw_fini(void *handle)
drm_dev_exit(idx);
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -191,18 +221,42 @@ static int vcn_v4_0_hw_init(void *handle)
struct amdgpu_ring *ring;
int i, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (amdgpu_sriov_vf(adev)) {
+ r = vcn_v4_0_start_sriov(adev);
+ if (r)
+ goto done;
- ring = &adev->vcn.inst[i].ring_enc[0];
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
+ ring->sched.ready = false;
+ ring->no_scheduler = true;
+ dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
+ } else {
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v4_0_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ }
+ } else {
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
- r = amdgpu_ring_test_helper(ring);
- if (r)
- goto done;
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+
+ }
}
done:
@@ -230,12 +284,14 @@ static int vcn_v4_0_hw_fini(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
-
- if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ if (!amdgpu_sriov_vf(adev)) {
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
}
+
}
return 0;
@@ -1041,6 +1097,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode==1) {
+ r = -1;
if (status & 2) {
r = 0;
break;
@@ -1106,6 +1163,214 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
return 0;
}
+static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
+{
+ int i;
+ struct amdgpu_ring *ring_enc;
+ uint64_t cache_addr;
+ uint64_t rb_enc_addr;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t offset, cache_size;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+ uint32_t enabled_vcn;
+
+ struct mmsch_v4_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v4_0_cmd_direct_read_modify_write
+ direct_rd_mod_wt = { {0} };
+ struct mmsch_v4_0_cmd_end end = { {0} };
+ struct mmsch_v4_0_init_header header;
+
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
+ for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
+ header.inst[i].init_status = 0;
+ header.inst[i].table_offset = 0;
+ header.inst[i].table_size = 0;
+ }
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ table_size = 0;
+
+ MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+ offset = 0;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET0),
+ 0);
+ } else {
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = cache_size;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_SIZE0),
+ cache_size);
+
+ cache_addr = adev->vcn.inst[i].gpu_addr + offset;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+
+ cache_addr = adev->vcn.inst[i].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ rb_setup = &fw_shared->rb_setup;
+
+ ring_enc = &adev->vcn.inst[i].ring_enc[0];
+ ring_enc->wptr = 0;
+ rb_enc_addr = ring_enc->gpu_addr;
+
+ rb_setup->is_rb_enabled_flags |= RB_ENABLED;
+ rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+ rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+ rb_setup->rb_size = ring_enc->ring_size / 4;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+
+ /* add end packet */
+ MMSCH_V4_0_INSERT_END();
+
+ /* refine header */
+ header.inst[i].init_status = 0;
+ header.inst[i].table_offset = header.total_size;
+ header.inst[i].table_size = table_size;
+ header.total_size += table_size;
+ }
+
+ /* Update init table header in memory */
+ size = sizeof(struct mmsch_v4_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ /* message MMSCH (in VCN[0]) to initialize this client
+ * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+ * of memory descriptor location
+ */
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ /* 2, update vmid of descriptor */
+ tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ size = header.total_size;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ /* 5, kick off the initialization and wait until
+ * MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ param = 0x00000001;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
+ if (resp != 0)
+ break;
+
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
+ init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->inst[enabled_vcn].init_status;
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
+ && init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
+ "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
+
+ return 0;
+}
+
/**
* vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
*
@@ -1114,7 +1379,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
*
* Stop VCN block with dpg mode
*/
-static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
uint32_t tmp;
@@ -1132,7 +1397,6 @@ static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
- return 0;
}
/**
@@ -1153,7 +1417,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_stop_dpg_mode(adev, i);
+ vcn_v4_0_stop_dpg_mode(adev, i);
continue;
}
@@ -1596,6 +1860,15 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ /* for SRIOV, guest should not control VCN Power-gating
+ * MMSCH FW should control Power-gating and clock-gating
+ * guest should avoid touching CGC and PG
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if(state == adev->vcn.cur_state)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index cdd599a08125..03b7066471f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -334,9 +334,11 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -409,6 +411,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 3b4eb8285943..2022ffbb8dba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -385,9 +385,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -461,6 +463,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 2b3d8bc8f0aa..84da1a9ce37c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -327,6 +327,12 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
+ err = -ENOMEM;
+ goto err_alloc_doorbells;
+ }
+
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
@@ -404,6 +410,7 @@ err_create_queue:
if (wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
err_wptr_map_gart:
+err_alloc_doorbells:
err_bind_process:
err_pdd:
mutex_unlock(&p->mutex);
@@ -869,14 +876,11 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_wait_events_args *args = data;
- int err;
- err = kfd_wait_on_events(p, args->num_events,
+ return kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
- args->timeout, &args->wait_result);
-
- return err;
+ &args->timeout, &args->wait_result);
}
static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
struct kfd_process *p, void *data)
@@ -1092,6 +1096,10 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_unlock;
}
offset = kfd_get_process_doorbells(pdd);
+ if (!offset) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
if (args->size != PAGE_SIZE) {
err = -EINVAL;
@@ -2173,6 +2181,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
return -EINVAL;
offset = kfd_get_process_doorbells(pdd);
+ if (!offset)
+ return -ENOMEM;
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
/* MMIO BOs need remapped bus address */
if (bo_bucket->size != PAGE_SIZE) {
@@ -2847,7 +2857,6 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
- int ret;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
@@ -2867,12 +2876,11 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
process->pasid, (unsigned long long) vma->vm_start,
address, vma->vm_flags, PAGE_SIZE);
- ret = io_remap_pfn_range(vma,
+ return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,
PAGE_SIZE,
vma->vm_page_prot);
- return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index a5409531a2fd..24b414cff3ec 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1522,6 +1522,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
pcache_info = cache_info;
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config(kdev, pcache_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 581c3a30fee1..ad5a40a685ac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -101,6 +101,8 @@ void kfd_debugfs_init(void)
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
+ debugfs_create_file("mem_limit", S_IFREG | 0200, debugfs_root,
+ kfd_debugfs_kfd_mem_limits, &kfd_debugfs_fops);
}
void kfd_debugfs_fini(void)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f5853835f03a..65a1d4f9004b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -91,6 +91,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
kfd->device_info.num_sdma_queues_per_engine = 8;
break;
default:
@@ -102,13 +103,19 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
- case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
break;
+ case IP_VERSION(6, 0, 1):
+ /* Reserve 1 for paging and 1 for gfx */
+ kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
+ /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
+ kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
+ break;
default:
break;
}
@@ -145,6 +152,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
break;
default:
@@ -377,12 +385,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
case IP_VERSION(10, 3, 6):
- gfx_target_version = 100306;
- if (!vf)
- f2g = &gfx_v10_3_kfd2kgd;
- break;
case IP_VERSION(10, 3, 7):
- gfx_target_version = 100307;
+ gfx_target_version = 100306;
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
@@ -398,6 +402,11 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 110002;
f2g = &gfx_v11_kfd2kgd;
break;
+ case IP_VERSION(11, 0, 3):
+ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+ gfx_target_version = 110001;
+ f2g = &gfx_v11_kfd2kgd;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index cb3d2ccc5100..b33798f89ef0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -157,6 +157,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
/* Calculate physical address of doorbell */
address = kfd_get_process_doorbells(pdd);
+ if (!address)
+ return -ENOMEM;
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
@@ -275,6 +277,13 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
{
+ if (!pdd->doorbell_index) {
+ int r = kfd_alloc_process_doorbells(pdd->dev,
+ &pdd->doorbell_index);
+ if (r)
+ return 0;
+ }
+
return pdd->dev->doorbell_base +
pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 3942a56c28bb..83e3ce9f6049 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -894,7 +894,8 @@ static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
return msecs_to_jiffies(user_timeout_ms) + 1;
}
-static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
+static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
+ bool undo_auto_reset)
{
uint32_t i;
@@ -903,6 +904,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
spin_lock(&waiters[i].event->lock);
remove_wait_queue(&waiters[i].event->wq,
&waiters[i].wait);
+ if (undo_auto_reset && waiters[i].activated &&
+ waiters[i].event && waiters[i].event->auto_reset)
+ set_event(waiters[i].event);
spin_unlock(&waiters[i].event->lock);
}
@@ -911,7 +915,7 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result)
{
struct kfd_event_data __user *events =
@@ -920,7 +924,7 @@ int kfd_wait_on_events(struct kfd_process *p,
int ret = 0;
struct kfd_event_waiter *event_waiters = NULL;
- long timeout = user_timeout_to_jiffies(user_timeout_ms);
+ long timeout = user_timeout_to_jiffies(*user_timeout_ms);
event_waiters = alloc_event_waiters(num_events);
if (!event_waiters) {
@@ -970,15 +974,11 @@ int kfd_wait_on_events(struct kfd_process *p,
}
if (signal_pending(current)) {
- /*
- * This is wrong when a nonzero, non-infinite timeout
- * is specified. We need to use
- * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
- * contains a union with data for each user and it's
- * in generic kernel code that I don't want to
- * touch yet.
- */
ret = -ERESTARTSYS;
+ if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
+ *user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
+ *user_timeout_ms = jiffies_to_msecs(
+ max(0l, timeout-1));
break;
}
@@ -1019,7 +1019,7 @@ int kfd_wait_on_events(struct kfd_process *p,
event_waiters, events);
out_unlock:
- free_waiters(num_events, event_waiters);
+ free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
mutex_unlock(&p->event_mutex);
out:
if (ret)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index a9466d154395..34772fe74296 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -146,7 +146,7 @@ static void interrupt_wq(struct work_struct *work)
struct kfd_dev *dev = container_of(work, struct kfd_dev,
interrupt_work);
uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
- long start_jiffies = jiffies;
+ unsigned long start_jiffies = jiffies;
if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
dev_err_once(dev->adev->dev, "Ring entry too small\n");
@@ -156,7 +156,7 @@ static void interrupt_wq(struct work_struct *work)
while (dequeue_ih_ring_entry(dev, ih_ring_entry)) {
dev->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
- if (jiffies - start_jiffies > HZ) {
+ if (time_is_before_jiffies(start_jiffies + HZ)) {
/* If we spent more than a second processing signals,
* reschedule the worker to avoid soft-lockup warnings
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index eecb262270e2..b059a77b6081 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -685,13 +685,15 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.vma = vma;
migrate.start = start;
migrate.end = end;
- migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
+ if (adev->gmc.xgmi.connected_to_cpu)
+ migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
+ else
+ migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
GFP_KERNEL);
-
if (!buf)
goto out;
@@ -974,7 +976,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
{
struct kfd_dev *kfddev = adev->kfd.dev;
struct dev_pagemap *pgmap;
- struct resource *res;
+ struct resource *res = NULL;
unsigned long size;
void *r;
@@ -989,28 +991,34 @@ int svm_migrate_init(struct amdgpu_device *adev)
* should remove reserved size
*/
size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
- res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
- if (IS_ERR(res))
- return -ENOMEM;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ pgmap->range.start = adev->gmc.aper_base;
+ pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
+ pgmap->type = MEMORY_DEVICE_COHERENT;
+ } else {
+ res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
+ if (IS_ERR(res))
+ return -ENOMEM;
+ pgmap->range.start = res->start;
+ pgmap->range.end = res->end;
+ pgmap->type = MEMORY_DEVICE_PRIVATE;
+ }
- pgmap->type = MEMORY_DEVICE_PRIVATE;
pgmap->nr_range = 1;
- pgmap->range.start = res->start;
- pgmap->range.end = res->end;
pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
- pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
-
+ pgmap->flags = 0;
/* Device manager releases device-specific resources, memory region and
* pgmap when driver disconnects from device.
*/
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
-
/* Disable SVM support capability */
pgmap->type = 0;
- devm_release_mem_region(adev->dev, res->start, resource_size(res));
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE)
+ devm_release_mem_region(adev->dev, res->start,
+ res->end - res->start + 1);
return PTR_ERR(r);
}
@@ -1019,6 +1027,8 @@ int svm_migrate_init(struct amdgpu_device *adev)
amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
+ svm_range_set_max_pages(adev);
+
pr_info("HMM registered %ldMB device memory\n", size >> 20);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d03a3b9c9c5d..bf610e3b683b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1317,7 +1317,7 @@ void kfd_event_free_process(struct kfd_process *p);
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result);
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 6c83a519b3a1..951b63677248 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1499,11 +1499,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
if (!pdd)
return NULL;
- if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
- pr_err("Failed to alloc doorbell for pdd\n");
- goto err_free_pdd;
- }
-
if (init_doorbell_bitmap(&pdd->qpd, dev)) {
pr_err("Failed to init doorbell for process\n");
goto err_free_pdd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index b0c1c7172a45..11074cc8c333 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -46,6 +46,12 @@
*/
#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
+/* Giant svm range split into smaller ranges based on this, it is decided using
+ * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
+ * power of 2MB.
+ */
+static uint64_t max_svm_range_pages;
+
struct criu_svm_metadata {
struct list_head list;
struct kfd_criu_svm_range_priv_data data;
@@ -260,13 +266,22 @@ void svm_range_free_dma_mappings(struct svm_range *prange)
}
}
-static void svm_range_free(struct svm_range *prange)
+static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
{
+ uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
+ struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
+
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
prange->start, prange->last);
svm_range_vram_node_free(prange);
svm_range_free_dma_mappings(prange);
+
+ if (update_mem_usage && !p->xnack_enabled) {
+ pr_debug("unreserve mem limit: %lld\n", size);
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ }
mutex_destroy(&prange->lock);
mutex_destroy(&prange->migrate_mutex);
kfree(prange);
@@ -285,7 +300,7 @@ svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
static struct
svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
- uint64_t last)
+ uint64_t last, bool update_mem_usage)
{
uint64_t size = last - start + 1;
struct svm_range *prange;
@@ -294,6 +309,15 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
prange = kzalloc(sizeof(*prange), GFP_KERNEL);
if (!prange)
return NULL;
+
+ p = container_of(svms, struct kfd_process, svms);
+ if (!p->xnack_enabled && update_mem_usage &&
+ amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
+ pr_info("SVM mapping failed, exceeds resident system memory limit\n");
+ kfree(prange);
+ return NULL;
+ }
prange->npages = size;
prange->svms = svms;
prange->start = start;
@@ -308,7 +332,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
mutex_init(&prange->migrate_mutex);
mutex_init(&prange->lock);
- p = container_of(svms, struct kfd_process, svms);
if (p->xnack_enabled)
bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
MAX_GPU_INSTANCE);
@@ -518,7 +541,6 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
kfree(svm_bo);
return -ESRCH;
}
- svm_bo->svms = prange->svms;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
@@ -1001,9 +1023,9 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
svms = prange->svms;
if (old_start == start)
- *new = svm_range_new(svms, last + 1, old_last);
+ *new = svm_range_new(svms, last + 1, old_last, false);
else
- *new = svm_range_new(svms, old_start, start - 1);
+ *new = svm_range_new(svms, old_start, start - 1, false);
if (!*new)
return -ENOMEM;
@@ -1011,7 +1033,7 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
if (r) {
pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
r, old_start, old_last, start, last);
- svm_range_free(*new);
+ svm_range_free(*new, false);
*new = NULL;
}
@@ -1846,7 +1868,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
{
struct svm_range *new;
- new = svm_range_new(old->svms, old->start, old->last);
+ new = svm_range_new(old->svms, old->start, old->last, false);
if (!new)
return NULL;
@@ -1870,6 +1892,46 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
return new;
}
+void svm_range_set_max_pages(struct amdgpu_device *adev)
+{
+ uint64_t max_pages;
+ uint64_t pages, _pages;
+
+ /* 1/32 VRAM size in pages */
+ pages = adev->gmc.real_vram_size >> 17;
+ pages = clamp(pages, 1ULL << 9, 1ULL << 18);
+ pages = rounddown_pow_of_two(pages);
+ do {
+ max_pages = READ_ONCE(max_svm_range_pages);
+ _pages = min_not_zero(max_pages, pages);
+ } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
+}
+
+static int
+svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
+ uint64_t max_pages, struct list_head *insert_list,
+ struct list_head *update_list)
+{
+ struct svm_range *prange;
+ uint64_t l;
+
+ pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
+ max_pages, start, last);
+
+ while (last >= start) {
+ l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
+
+ prange = svm_range_new(svms, start, l, true);
+ if (!prange)
+ return -ENOMEM;
+ list_add(&prange->list, insert_list);
+ list_add(&prange->update_list, update_list);
+
+ start = l + 1;
+ }
+ return 0;
+}
+
/**
* svm_range_add - add svm range and handle overlap
* @p: the range add to this process svms
@@ -1910,6 +1972,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
struct interval_tree_node *node;
struct svm_range *prange;
struct svm_range *tmp;
+ struct list_head new_list;
int r = 0;
pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
@@ -1917,6 +1980,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
INIT_LIST_HEAD(update_list);
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
+ INIT_LIST_HEAD(&new_list);
node = interval_tree_iter_first(&svms->objects, start, last);
while (node) {
@@ -1972,14 +2036,11 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
/* insert a new node if needed */
if (node->start > start) {
- prange = svm_range_new(svms, start, node->start - 1);
- if (!prange) {
- r = -ENOMEM;
+ r = svm_range_split_new(svms, start, node->start - 1,
+ READ_ONCE(max_svm_range_pages),
+ &new_list, update_list);
+ if (r)
goto out;
- }
-
- list_add(&prange->list, insert_list);
- list_add(&prange->update_list, update_list);
}
node = next;
@@ -1987,20 +2048,20 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
}
/* add a final range at the end if needed */
- if (start <= last) {
- prange = svm_range_new(svms, start, last);
- if (!prange) {
- r = -ENOMEM;
- goto out;
- }
- list_add(&prange->list, insert_list);
- list_add(&prange->update_list, update_list);
- }
+ if (start <= last)
+ r = svm_range_split_new(svms, start, last,
+ READ_ONCE(max_svm_range_pages),
+ &new_list, update_list);
out:
- if (r)
+ if (r) {
list_for_each_entry_safe(prange, tmp, insert_list, list)
- svm_range_free(prange);
+ svm_range_free(prange, false);
+ list_for_each_entry_safe(prange, tmp, &new_list, list)
+ svm_range_free(prange, true);
+ } else {
+ list_splice(&new_list, insert_list);
+ }
return r;
}
@@ -2047,7 +2108,7 @@ svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
svms, prange, prange->start, prange->last);
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
- svm_range_free(prange);
+ svm_range_free(prange, true);
break;
case SVM_OP_UPDATE_RANGE_NOTIFIER:
pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
@@ -2610,14 +2671,14 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
last = addr;
}
- prange = svm_range_new(&p->svms, start, last);
+ prange = svm_range_new(&p->svms, start, last, true);
if (!prange) {
pr_debug("Failed to create prange in address [0x%llx]\n", addr);
return NULL;
}
if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
pr_debug("failed to get gpuid from kgd\n");
- svm_range_free(prange);
+ svm_range_free(prange, true);
return NULL;
}
@@ -2917,7 +2978,7 @@ void svm_range_list_fini(struct kfd_process *p)
list_for_each_entry_safe(prange, next, &p->svms.list, list) {
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
- svm_range_free(prange);
+ svm_range_free(prange, true);
}
mutex_destroy(&p->svms.lock);
@@ -3211,7 +3272,6 @@ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
{
struct svm_range_bo *svm_bo;
- struct kfd_process *p;
struct mm_struct *mm;
int r = 0;
@@ -3219,13 +3279,12 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
if (!svm_bo_ref_unless_zero(svm_bo))
return; /* svm_bo was freed while eviction was pending */
- /* svm_range_bo_release destroys this worker thread. So during
- * the lifetime of this thread, kfd_process and mm will be valid.
- */
- p = container_of(svm_bo->svms, struct kfd_process, svms);
- mm = p->mm;
- if (!mm)
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ mm = svm_bo->eviction_fence->mm;
+ } else {
+ svm_range_bo_unref(svm_bo);
return;
+ }
mmap_read_lock(mm);
spin_lock(&svm_bo->list_lock);
@@ -3243,8 +3302,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
- r = svm_migrate_vram_to_ram(prange,
- svm_bo->eviction_fence->mm,
+ r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_TTM_EVICTION);
} while (!r && prange->actual_loc && --retries);
@@ -3262,6 +3320,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
}
spin_unlock(&svm_bo->list_lock);
mmap_read_unlock(mm);
+ mmput(mm);
dma_fence_signal(&svm_bo->eviction_fence->base);
@@ -3333,7 +3392,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
prange->last);
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
- svm_range_free(prange);
+ svm_range_free(prange, false);
}
mmap_write_downgrade(mm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index eab7f6d3b13c..cfac13ad06ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -46,7 +46,6 @@ struct svm_range_bo {
spinlock_t list_lock;
struct amdgpu_amdkfd_fence *eviction_fence;
struct work_struct eviction_work;
- struct svm_range_list *svms;
uint32_t evicting;
struct work_struct release_work;
};
@@ -204,6 +203,9 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
+
+void svm_range_set_max_pages(struct amdgpu_device *adev);
+
#else
struct kfd_process;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 25990bec600d..3f0a4a415907 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1392,8 +1392,8 @@ static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
{
+ struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
struct kfd_iolink_properties *props = NULL, *props2 = NULL;
- struct kfd_iolink_properties *gpu_link, *cpu_link;
struct kfd_topology_device *cpu_dev;
int ret = 0;
int i, num_cpu;
@@ -1416,16 +1416,19 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
continue;
/* find CPU <--> CPU links */
+ cpu_link = NULL;
cpu_dev = kfd_topology_device_by_proximity_domain(i);
if (cpu_dev) {
- list_for_each_entry(cpu_link,
+ list_for_each_entry(tmp_link,
&cpu_dev->io_link_props, list) {
- if (cpu_link->node_to == gpu_link->node_to)
+ if (tmp_link->node_to == gpu_link->node_to) {
+ cpu_link = tmp_link;
break;
+ }
}
}
- if (cpu_link->node_to != gpu_link->node_to)
+ if (!cpu_link)
return -ENOMEM;
/* CPU <--> CPU <--> GPU, GPU node*/
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 96cbc87f7b6b..413d8c6d592f 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if (X86 || PPC64)
+ select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9ab01c58bedb..7a93162633ae 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1530,7 +1530,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
adev->dm.dc->debug.disable_dsc = true;
- adev->dm.dc->debug.disable_dsc_edp = true;
}
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
@@ -3800,8 +3799,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
@@ -4110,6 +4112,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
}
}
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
/*
* In this architecture, the association
@@ -4301,6 +4304,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
+ amdgpu_set_panel_orientation(&aconnector->base);
}
/* Software is initialized. Now we can register interrupt handlers. */
@@ -5599,7 +5603,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+ !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
@@ -6667,6 +6672,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
return;
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!encoder)
return;
@@ -6711,8 +6720,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, edid);
-
- amdgpu_set_panel_orientation(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
}
@@ -9297,6 +9304,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ *
* @dev: The DRM device
* @state: The atomic state to commit
*
@@ -9353,9 +9361,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
/* Skip connectors that are disabled or part of modeset already. */
- if (!old_con_state->crtc && !new_con_state->crtc)
- continue;
-
if (!new_con_state->crtc)
continue;
@@ -9882,8 +9887,19 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
return valid_vsdb_found ? i : -ENODEV;
}
+/**
+ * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
+ *
+ * @connector: Connector to query.
+ * @edid: EDID from monitor
+ *
+ * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
+ * track of some of the display information in the internal data struct used by
+ * amdgpu_dm. This function checks which type of connector we need to set the
+ * FreeSync parameters.
+ */
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
- struct edid *edid)
+ struct edid *edid)
{
int i = 0;
struct detailed_timing *timing;
@@ -9896,8 +9912,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
- bool freesync_capable = false;
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
+ bool freesync_capable = false;
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
@@ -9926,7 +9942,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
-
if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
|| sink->sink_signal == SIGNAL_TYPE_EDP) {
bool edid_check_required = false;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 90b306a1dd68..b44faaad9b0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -598,6 +598,10 @@ struct amdgpu_dm_connector {
* The 'current' sink is in dc_link->sink. */
struct dc_sink *dc_sink;
struct dc_link *dc_link;
+
+ /**
+ * @dc_em_sink: Reference to the emulated (virtual) sink.
+ */
struct dc_sink *dc_em_sink;
/* DM only */
@@ -610,7 +614,16 @@ struct amdgpu_dm_connector {
struct amdgpu_i2c_adapter *i2c;
/* Monitor range limits */
- int min_vfreq ;
+ /**
+ * @min_vfreq: Minimal frequency supported by the display in Hz. This
+ * value is set to zero when there is no FreeSync support.
+ */
+ int min_vfreq;
+
+ /**
+ * @max_vfreq: Maximum frequency supported by the display in Hz. This
+ * value is set to zero when there is no FreeSync support.
+ */
int max_vfreq ;
int pixel_clock_mhz;
@@ -705,11 +718,34 @@ struct dm_connector_state {
uint64_t pbn;
};
+/**
+ * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
+ *
+ * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
+ * struct is useful to keep track of the display-specific information about
+ * FreeSync.
+ */
struct amdgpu_hdmi_vsdb_info {
- unsigned int amd_vsdb_version; /* VSDB version, should be used to determine which VSIF to send */
- bool freesync_supported; /* FreeSync Supported */
- unsigned int min_refresh_rate_hz; /* FreeSync Minimum Refresh Rate in Hz */
- unsigned int max_refresh_rate_hz; /* FreeSync Maximum Refresh Rate in Hz */
+ /**
+ * @amd_vsdb_version: Vendor Specific Data Block Version, should be
+ * used to determine which Vendor Specific InfoFrame (VSIF) to send.
+ */
+ unsigned int amd_vsdb_version;
+
+ /**
+ * @freesync_supported: FreeSync Supported.
+ */
+ bool freesync_supported;
+
+ /**
+ * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
+ */
+ unsigned int min_refresh_rate_hz;
+
+ /**
+ * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
+ */
+ unsigned int max_refresh_rate_hz;
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index a71177305bcd..a4cb23d059bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -29,7 +29,9 @@
#include "modules/color/color_gamma.h"
#include "basics/conversion.h"
-/*
+/**
+ * DOC: overview
+ *
* The DC interface to HW gives us the following color management blocks
* per pipe (surface):
*
@@ -71,8 +73,8 @@
#define MAX_DRM_LUT_VALUE 0xFFFF
-/*
- * Initialize the color module.
+/**
+ * amdgpu_dm_init_color_mod - Initialize the color module.
*
* We're not using the full color module, only certain components.
* Only call setup functions for components that we need.
@@ -82,7 +84,14 @@ void amdgpu_dm_init_color_mod(void)
setup_x_points_distribution();
}
-/* Extracts the DRM lut and lut size from a blob. */
+/**
+ * __extract_blob_lut - Extracts the DRM lut and lut size from a blob.
+ * @blob: DRM color mgmt property blob
+ * @size: lut size
+ *
+ * Returns:
+ * DRM LUT or NULL
+ */
static const struct drm_color_lut *
__extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
{
@@ -90,13 +99,18 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
return blob ? (struct drm_color_lut *)blob->data : NULL;
}
-/*
- * Return true if the given lut is a linear mapping of values, i.e. it acts
- * like a bypass LUT.
+/**
+ * __is_lut_linear - check if the given lut is a linear mapping of values
+ * @lut: given lut to check values
+ * @size: lut size
*
* It is considered linear if the lut represents:
- * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in
- * [0, MAX_COLOR_LUT_ENTRIES)
+ * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in [0,
+ * MAX_COLOR_LUT_ENTRIES)
+ *
+ * Returns:
+ * True if the given lut is a linear mapping of values, i.e. it acts like a
+ * bypass LUT. Otherwise, false.
*/
static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size)
{
@@ -119,9 +133,13 @@ static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size)
return true;
}
-/*
- * Convert the drm_color_lut to dc_gamma. The conversion depends on the size
- * of the lut - whether or not it's legacy.
+/**
+ * __drm_lut_to_dc_gamma - convert the drm_color_lut to dc_gamma.
+ * @lut: DRM lookup table for color conversion
+ * @gamma: DC gamma to set entries
+ * @is_legacy: legacy or atomic gamma
+ *
+ * The conversion depends on the size of the lut - whether or not it's legacy.
*/
static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
struct dc_gamma *gamma, bool is_legacy)
@@ -154,8 +172,11 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
}
}
-/*
- * Converts a DRM CTM to a DC CSC float matrix.
+/**
+ * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix
+ * @ctm: DRM color transformation matrix
+ * @matrix: DC CSC float matrix
+ *
* The matrix needs to be a 3x4 (12 entry) matrix.
*/
static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
@@ -189,7 +210,18 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
}
}
-/* Calculates the legacy transfer function - only for sRGB input space. */
+/**
+ * __set_legacy_tf - Calculates the legacy transfer function
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Only for sRGB input space
+ *
+ * Returns:
+ * 0 in case of success, -ENOMEM if fails
+ */
static int __set_legacy_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size,
bool has_rom)
@@ -218,7 +250,16 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-/* Calculates the output transfer function based on expected input space. */
+/**
+ * __set_output_tf - calculates the output transfer function based on expected input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
static int __set_output_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size,
bool has_rom)
@@ -262,7 +303,16 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-/* Caculates the input transfer function based on expected input space. */
+/**
+ * __set_input_tf - calculates the input transfer function based on expected
+ * input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut.
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
static int __set_input_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size)
{
@@ -285,13 +335,14 @@ static int __set_input_tf(struct dc_transfer_func *func,
}
/**
- * amdgpu_dm_verify_lut_sizes
+ * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes
* @crtc_state: the DRM CRTC state
*
- * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
- * the expected size.
+ * Verifies that the Degamma and Gamma LUTs attached to the &crtc_state
+ * are of the expected size.
*
- * Returns 0 on success.
+ * Returns:
+ * 0 on success. -EINVAL if any lut sizes are invalid.
*/
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
{
@@ -327,9 +378,9 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
* of the HW blocks as long as the CRTC CTM always comes before the
* CRTC RGM and after the CRTC DGM.
*
- * The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
- * The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
- * The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
+ * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
+ * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
*
* The RGM block is typically more fully featured and accurate across
* all ASICs - DCE can't support a custom non-linear CRTC DGM.
@@ -338,7 +389,8 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
* management at once we have to either restrict the usage of CRTC properties
* or blend adjustments together.
*
- * Returns 0 on success.
+ * Returns:
+ * 0 on success. Error code if setup fails.
*/
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
{
@@ -393,7 +445,7 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
if (r)
return r;
} else if (has_regamma) {
- /* CRTC RGM goes into RGM LUT. */
+ /* If atomic regamma, CRTC RGM goes into RGM LUT. */
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
@@ -450,9 +502,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* Update the underlying dc_stream_state's input transfer function (ITF) in
* preparation for hardware commit. The transfer function used depends on
- * the prepartion done on the stream for color management.
+ * the preparation done on the stream for color management.
*
- * Returns 0 on success.
+ * Returns:
+ * 0 on success. -ENOMEM if mem allocation fails.
*/
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index aa4edf182095..ee242d9d8b06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1369,9 +1369,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx) {
@@ -1475,9 +1475,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx || !pipe_ctx->stream)
@@ -1560,9 +1560,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx) {
@@ -1664,9 +1664,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx || !pipe_ctx->stream)
@@ -1749,9 +1749,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx) {
@@ -1853,9 +1853,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx || !pipe_ctx->stream)
@@ -1934,9 +1934,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx) {
@@ -2035,9 +2035,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx || !pipe_ctx->stream)
@@ -2114,9 +2114,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx) {
@@ -2175,9 +2175,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx) {
@@ -2251,9 +2251,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx) {
@@ -2327,9 +2327,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
- break;
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
}
if (!pipe_ctx) {
@@ -3288,6 +3288,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
&crc_win_y_end_fops);
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
&crc_win_update_fops);
+ dput(dir);
#endif
debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
crtc, &amdgpu_current_bpc_fops);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 988b9bf34c93..b8077fcd4651 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -531,7 +531,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
unsigned char rc_cmd = 0;
unsigned char rc_result = 0xFF;
unsigned char i = 0;
- uint8_t ret = 0;
+ int ret;
if (is_write_cmd) {
// write rc data
@@ -691,8 +691,14 @@ bool dm_helpers_dp_write_dsc_enable(
const struct dc_stream_state *stream,
bool enable)
{
- uint8_t enable_dsc = enable ? 1 : 0;
+ static const uint8_t DSC_DISABLE;
+ static const uint8_t DSC_DECODING = 0x01;
+ static const uint8_t DSC_PASSTHROUGH = 0x02;
+
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_port *port;
+ uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE;
+ uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
uint8_t ret = 0;
if (!stream)
@@ -712,8 +718,39 @@ bool dm_helpers_dp_write_dsc_enable(
aconnector->dsc_aux, stream, enable_dsc);
#endif
- ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
- DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable");
+ port = aconnector->port;
+
+ if (enable) {
+ if (port->passthrough_aux) {
+ ret = drm_dp_dpcd_write(port->passthrough_aux,
+ DP_DSC_ENABLE,
+ &enable_passthrough, 1);
+ DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
+ ret);
+ }
+
+ ret = drm_dp_dpcd_write(aconnector->dsc_aux,
+ DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n",
+ (port->passthrough_aux) ? "remote RX" :
+ "virtual dpcd",
+ ret);
+ } else {
+ ret = drm_dp_dpcd_write(aconnector->dsc_aux,
+ DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n",
+ (port->passthrough_aux) ? "remote RX" :
+ "virtual dpcd",
+ ret);
+
+ if (port->passthrough_aux) {
+ ret = drm_dp_dpcd_write(port->passthrough_aux,
+ DP_DSC_ENABLE,
+ &enable_passthrough, 1);
+ DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
+ ret);
+ }
+ }
}
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
@@ -730,7 +767,7 @@ bool dm_helpers_dp_write_dsc_enable(
#endif
}
- return (ret > 0);
+ return ret;
}
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
@@ -841,6 +878,25 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
//amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
}
+void dm_helpers_init_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *panel_config)
+{
+ // Feature DSC
+ panel_config->dsc.disable_dsc_edp = false;
+ panel_config->dsc.force_dsc_edp_policy = 0;
+}
+
+void dm_helpers_override_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *panel_config)
+{
+ // Feature DSC
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+ panel_config->dsc.disable_dsc_edp = true;
+ }
+}
+
void *dm_helpers_allocate_gpu_mem(
struct dc_context *ctx,
enum dc_gpu_mem_alloc_type type,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index bd9606307dc7..6ff96b4bdda5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,6 +36,7 @@
#include "dm_helpers.h"
#include "dc_link_ddc.h"
+#include "dc_link_dp.h"
#include "ddc_service_types.h"
#include "dpcd_defs.h"
@@ -1351,19 +1352,90 @@ clean_exit:
return (ret == 0);
}
-#endif
+static unsigned int kbps_from_pbn(unsigned int pbn)
+{
+ unsigned int kbps = pbn;
+
+ kbps *= (1000000 / PEAK_FACTOR_X1000);
+ kbps *= 8;
+ kbps *= 54;
+ kbps /= 64;
+
+ return kbps;
+}
+
+static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
+ struct dc_dsc_bw_range *bw_range)
+{
+ struct dc_dsc_policy dsc_policy = {0};
+
+ dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy);
+ dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &stream->timing, bw_range);
+
+ return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
+}
+#endif /* CONFIG_DRM_AMD_DC_DCN */
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
int bpp, pbn, branch_max_throughput_mps = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link_settings cur_link_settings;
+ unsigned int end_to_end_bw_in_kbps = 0;
+ unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
+ unsigned int max_compressed_bw_in_kbps = 0;
+ struct dc_dsc_bw_range bw_range = {0};
- /* check if mode could be supported within fUll_pbn */
- bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
- if (pbn > aconnector->port->full_pbn)
- return DC_FAIL_BANDWIDTH_VALIDATE;
+ /*
+ * check if the mode could be supported if DSC pass-through is supported
+ * AND check if there enough bandwidth available to support the mode
+ * with DSC enabled.
+ */
+ if (is_dsc_common_config_possible(stream, &bw_range) &&
+ aconnector->port->passthrough_aux) {
+ mutex_lock(&aconnector->mst_mgr.lock);
+
+ cur_link_settings = stream->link->verified_link_cap;
+
+ upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ &cur_link_settings
+ );
+ down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn);
+
+ /* pick the bottleneck */
+ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
+ down_link_bw_in_kbps);
+
+ mutex_unlock(&aconnector->mst_mgr.lock);
+
+ /*
+ * use the maximum dsc compression bandwidth as the required
+ * bandwidth for the mode
+ */
+ max_compressed_bw_in_kbps = bw_range.min_kbps;
+
+ if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+ } else {
+#endif
+ /* check if mode could be supported within full_pbn */
+ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
+ pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
+
+ if (pbn > aconnector->port->full_pbn)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ }
+#endif
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
switch (stream->timing.pixel_encoding) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 5eb5d31e591d..dfd3be49eac8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -34,6 +34,7 @@
#include "dal_asic_id.h"
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
+#include "amdgpu_dm_plane.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -149,12 +150,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_
*size += 1;
}
-bool modifier_has_dcc(uint64_t modifier)
+static bool modifier_has_dcc(uint64_t modifier)
{
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
-unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@@ -660,7 +661,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
add_gfx11_modifiers(adev, mods, &size, &capacity);
break;
}
@@ -1412,7 +1413,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
}
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
@@ -1591,6 +1592,9 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
if (res)
return res;
+ if (modifiers == NULL)
+ adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
+
res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
&dm_plane_funcs, formats, num_formats,
modifiers, plane->type, NULL);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 95168c2cfa6f..286981a2dd40 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -36,17 +36,9 @@ int fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info);
-void get_min_max_dc_plane_scaling(struct drm_device *dev,
- struct drm_framebuffer *fb,
- int *min_downscale, int *max_upscale);
-
int dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state);
-bool modifier_has_dcc(uint64_t modifier);
-
-unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier);
-
int fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 6767fab55c26..352e9afb85c6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -100,3 +100,24 @@ void convert_float_matrix(
matrix[i] = (uint16_t)reg_value;
}
}
+
+static uint32_t find_gcd(uint32_t a, uint32_t b)
+{
+ uint32_t remainder = 0;
+ while (b != 0) {
+ remainder = a % b;
+ a = b;
+ b = remainder;
+ }
+ return a;
+}
+
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den)
+{
+ uint32_t gcd = 0;
+
+ gcd = find_gcd(num, den);
+ *out_num = num / gcd;
+ *out_den = den / gcd;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index ade785c4fdc7..81da4e6f7a1a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -38,6 +38,9 @@ void convert_float_matrix(
struct fixed31_32 *flt,
uint32_t buffer_size);
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den);
+
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 09fbb7ad5362..ead4da11a992 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -44,25 +44,6 @@
#include "bios_parser_common.h"
-/* Temporarily add in defines until ObjectID.h patch is updated in a few days */
-#ifndef GENERIC_OBJECT_ID_BRACKET_LAYOUT
-#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
-#endif /* GENERIC_OBJECT_ID_BRACKET_LAYOUT */
-
-#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1
-#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
-#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 */
-
-#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2
-#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
-#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 */
-
#define DC_LOGGER \
bp->base.ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c76091fd1f2..f276abb63bcd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -337,7 +337,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
}
- case AMDGPU_FAMILY_GC_11_0_2: {
+ case AMDGPU_FAMILY_GC_11_0_1: {
struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
@@ -397,7 +397,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
dcn32_clk_mgr_destroy(clk_mgr);
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dcn314_clk_mgr_destroy(clk_mgr);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index 451e8d6cd8bd..f0577dcd1af6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -101,8 +101,8 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
return;
if (clk_mgr_base->clks.dispclk_khz == 0 ||
- dc->debug.force_clock_mode & 0x1) {
- force_reset = true;
+ dc->debug.force_clock_mode & 0x1) {
+ force_reset = true;
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 0202dc682682..ca6dfd2d7561 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -24,10 +24,9 @@
*/
#include "dccg.h"
-#include "clk_mgr_internal.h"
+#include "rn_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
-#include "rn_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dce100/dce_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index 2e088c5171b2..f1319957e400 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -28,6 +28,7 @@
#include "clk_mgr.h"
#include "dm_pp_smu.h"
+#include "clk_mgr_internal.h"
extern struct wm_table ddr4_wm_table_gs;
extern struct wm_table lpddr4_wm_table_gs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index bca5f01da763..d43258e3cd4f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -48,6 +48,11 @@
#include "dc_dmub_srv.h"
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
#include "yellow_carp_offset.h"
#define regCLK1_CLK_PLL_REQ 0x0237
@@ -723,7 +728,8 @@ void dcn31_clk_mgr_construct(
dcn31_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+ dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
+ &clk_mgr->base.base, &log_info);
}
@@ -736,8 +742,49 @@ void dcn31_clk_mgr_construct(
clk_mgr->base.base.bw_params = &dcn31_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+
dcn31_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "NumDfPst atesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
+ }
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
dcn31_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index df6dd8465272..171c38fac6a3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -51,6 +51,13 @@
#include "dc_link_dp.h"
#include "dcn314_smu.h"
+
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
+
#define MAX_INSTANCE 7
#define MAX_SEGMENT 8
@@ -307,16 +314,6 @@ static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn314_smu_enable_pme_wa(clk_mgr);
}
-void dcn314_init_clocks(struct clk_mgr *clk_mgr)
-{
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
-
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
@@ -425,7 +422,7 @@ static struct wm_table lpddr5_wm_table = {
}
};
-static DpmClocks_t dummy_clocks;
+static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
@@ -510,7 +507,7 @@ static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct dcn314_smu_dpm_clks *smu_dpm_clks)
{
- DpmClocks_t *table = smu_dpm_clks->dpm_clks;
+ DpmClocks314_t *table = smu_dpm_clks->dpm_clks;
if (!clk_mgr->smu_ver)
return;
@@ -527,6 +524,26 @@ static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+ return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+ switch (wck_ratio) {
+ case WCK_RATIO_1_2:
+ return 2;
+
+ case WCK_RATIO_1_4:
+ return 4;
+
+ default:
+ break;
+ }
+ return 1;
+}
+
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
@@ -540,89 +557,127 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
return max;
}
-static unsigned int find_clk_for_voltage(
- const DpmClocks_t *clock_table,
- const uint32_t clocks[],
- unsigned int voltage)
-{
- int i;
- int max_voltage = 0;
- int clock = 0;
-
- for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage) {
- return clocks[i];
- } else if (clock_table->SocVoltage[i] >= max_voltage &&
- clock_table->SocVoltage[i] < voltage) {
- max_voltage = clock_table->SocVoltage[i];
- clock = clocks[i];
- }
- }
-
- ASSERT(clock);
- return clock;
-}
-
static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
- const DpmClocks_t *clock_table)
+ const DpmClocks314_t *clock_table)
{
- int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
- uint32_t max_dispclk = 0, max_dppclk = 0;
-
- j = -1;
-
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
- /* Find lowest DPM, FCLK is filled in reverse order*/
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+ uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ int i;
- for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
- if (clock_table->DfPstateTable[i].FClk != 0) {
- j = i;
- break;
+ /* Find highest valid fclk pstate */
+ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
+ clock_table->DfPstateTable[i].FClk > max_fclk) {
+ max_fclk = clock_table->DfPstateTable[i].FClk;
+ max_pstate = i;
}
}
- if (j == -1) {
- /* clock table is all 0s, just use our own hardcode */
- ASSERT(0);
- return;
- }
-
- bw_params->clk_table.num_entries = j + 1;
+ /* We expect the table to contain at least one valid fclk entry. */
+ ASSERT(is_valid_clock_value(max_fclk));
- /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ /* Dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
} else {
+ /* Invalid number of entries in the table from PMFW. */
ASSERT(0);
}
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
- bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
- switch (clock_table->DfPstateTable[j].WckRatio) {
- case WCK_RATIO_1_2:
- bw_params->clk_table.entries[i].wck_ratio = 2;
- break;
- case WCK_RATIO_1_4:
- bw_params->clk_table.entries[i].wck_ratio = 4;
- break;
- default:
- bw_params->clk_table.entries[i].wck_ratio = 1;
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
+ int j;
+
+ for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) &&
+ clock_table->DfPstateTable[j].FClk < min_fclk &&
+ clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
+ min_fclk = clock_table->DfPstateTable[j].FClk;
+ min_pstate = j;
+ }
}
- bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
- bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[min_pstate].WckRatio);
}
+ /* Make sure to include at least one entry at highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ if (i > MAX_NUM_DPM_LVL - 1)
+ i = MAX_NUM_DPM_LVL - 1;
+
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[max_pstate].WckRatio);
+ i++;
+ }
+ bw_params->clk_table.num_entries = i--;
+
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
- bw_params->num_channels = bios_info->ma_channel_number;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -641,7 +696,7 @@ static struct clk_mgr_funcs dcn314_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn314_update_clocks,
- .init_clocks = dcn314_init_clocks,
+ .init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn314_enable_pme_wa,
.are_clock_states_equal = dcn314_are_clock_states_equal,
.notify_wm_ranges = dcn314_notify_wm_ranges
@@ -681,10 +736,10 @@ void dcn314_clk_mgr_construct(
}
ASSERT(clk_mgr->smu_wm_set.wm_set);
- smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
+ smu_dpm_clks.dpm_clks = (DpmClocks314_t *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
- sizeof(DpmClocks_t),
+ sizeof(DpmClocks314_t),
&smu_dpm_clks.mc_address.quad_part);
if (smu_dpm_clks.dpm_clks == NULL) {
@@ -713,7 +768,8 @@ void dcn314_clk_mgr_construct(
dcn314_bw_params.wm_table = ddr5_wm_table;
/* Saved clocks configured at boot for debug purposes */
- dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+ dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
+ &clk_mgr->base.base, &log_info);
}
@@ -726,9 +782,50 @@ void dcn314_clk_mgr_construct(
clk_mgr->base.base.bw_params = &dcn314_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+
dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "NumDfPst atesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
+ }
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
dcn314_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index c695a4498c50..171f84340eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -42,7 +42,7 @@ struct clk_mgr_dcn314 {
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
-void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
index a7958dc96581..047d19ea919c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
@@ -36,6 +36,37 @@ typedef enum {
WCK_RATIO_MAX
} WCK_RATIO_e;
+typedef struct {
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
+} DfPstateTable314_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks314_t;
+
struct dcn314_watermarks {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
@@ -43,7 +74,7 @@ struct dcn314_watermarks {
};
struct dcn314_smu_dpm_clks {
- DpmClocks_t *dpm_clks;
+ DpmClocks314_t *dpm_clks;
union large_integer mc_address;
};
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 36b0cd47c1c7..14071aef5eab 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -41,6 +41,11 @@
#include "dc_dmub_srv.h"
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
#include "dc_link_dp.h"
#define TO_CLK_MGR_DCN315(clk_mgr)\
@@ -507,7 +512,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].dispclk_mhz = clock_table->DispClocks[i];
bw_params->clk_table.entries[i].dppclk_mhz = clock_table->DppClocks[i];
bw_params->clk_table.entries[i].wck_ratio = 1;
- };
+ }
/* Make sure to include at least one entry and highest pstate */
if (max_pstate != min_pstate || i == 0) {
@@ -652,7 +657,8 @@ void dcn315_clk_mgr_construct(
dcn315_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+ dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
+ &clk_mgr->base.base, &log_info);
}
@@ -665,7 +671,48 @@ void dcn315_clk_mgr_construct(
clk_mgr->base.base.bw_params = &dcn315_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+
dcn315_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "NumDfPst atesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
+ }
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
dcn315_clk_mgr_helper_populate_bw_params(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index e4bb9c6193b5..0cd3d2eb7ac7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -680,7 +680,8 @@ void dcn316_clk_mgr_construct(
dcn316_bw_params.wm_table = ddr4_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+ dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
+ &clk_mgr->base.base, &log_info);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e42f44fc1c08..9860bf38c547 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -638,14 +638,17 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
/**
* dc_stream_get_crc() - Get CRC values for the given stream.
- * @dc: DC object
+ *
+ * @dc: DC object.
* @stream: The DC stream state of the stream to get CRCs from.
- * @r_cr: CRC value for the first of the 3 channels stored here.
- * @g_y: CRC value for the second of the 3 channels stored here.
- * @b_cb: CRC value for the third of the 3 channels stored here.
+ * @r_cr: CRC value for the red component.
+ * @g_y: CRC value for the green component.
+ * @b_cb: CRC value for the blue component.
*
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
- * Return false if stream is not found, or if CRCs are not enabled.
+ *
+ * Return:
+ * false if stream is not found, or if CRCs are not enabled.
*/
bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
@@ -1074,8 +1077,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
bool should_disable = true;
- bool pipe_split_change =
- context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
+ bool pipe_split_change = false;
+
+ if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
+ (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
+ else
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
for (j = 0; j < context->stream_count; j++) {
if (old_stream == context->streams[j]) {
@@ -1087,7 +1097,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->current_state->stream_count != context->stream_count)
should_disable = true;
- if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
+ if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
+ !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *old_pipe, *new_pipe;
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1170,7 +1181,11 @@ static void disable_vbios_mode_if_required(
pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
if (pix_clk_100hz != requested_pix_clk_100hz) {
- core_link_disable_stream(pipe);
+ if (dc->hwss.update_phy_state)
+ dc->hwss.update_phy_state(dc->current_state,
+ pipe, TX_OFF_SYMCLK_OFF);
+ else
+ core_link_disable_stream(pipe);
pipe->stream->dpms_off = false;
}
}
@@ -3053,7 +3068,11 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
- core_link_disable_stream(pipe_ctx);
+ if (dc->hwss.update_phy_state)
+ dc->hwss.update_phy_state(dc->current_state,
+ pipe_ctx, TX_OFF_SYMCLK_ON);
+ else
+ core_link_disable_stream(pipe_ctx);
/* for dpms, keep acquired resources*/
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
@@ -3064,7 +3083,11 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
- core_link_enable_stream(dc->current_state, pipe_ctx);
+ if (dc->hwss.update_phy_state)
+ dc->hwss.update_phy_state(dc->current_state,
+ pipe_ctx, TX_ON_SYMCLK_ON);
+ else
+ core_link_enable_stream(dc->current_state, pipe_ctx);
}
}
@@ -3091,11 +3114,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
{
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- return true;
-
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
- dc->debug.enable_sw_cntl_psr)
+ if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
+ || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ && stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
return false;
@@ -3229,7 +3250,7 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
@@ -3247,7 +3268,6 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
top_pipe_to_program->stream_res.tg);
}
- }
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
@@ -3312,10 +3332,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
- if (update_type != UPDATE_TYPE_FAST)
- if (dc->hwss.commit_subvp_config)
- dc->hwss.commit_subvp_config(dc, context);
-
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
} else {
@@ -3323,16 +3339,15 @@ static void commit_planes_for_stream(struct dc *dc,
}
dc->hwss.post_unlock_program_front_end(dc, context);
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- }
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
return;
}
@@ -3456,17 +3471,13 @@ static void commit_planes_for_stream(struct dc *dc,
}
- if (update_type != UPDATE_TYPE_FAST)
- if (dc->hwss.commit_subvp_config)
- dc->hwss.commit_subvp_config(dc, context);
-
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
} else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
@@ -3493,21 +3504,23 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
- }
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
- /* Since phantom pipe programming is moved to post_unlock_program_front_end,
- * move the SubVP lock to after the phantom pipes have been setup
- */
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- }
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
}
// Fire manual trigger only when bottom plane is flipped
@@ -3539,8 +3552,10 @@ static bool commit_minimal_transition_state(struct dc *dc,
if (!transition_context)
return false;
- tmp_policy = dc->debug.pipe_split_policy;
- dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ if (!dc->config.is_vmin_only_asic) {
+ tmp_policy = dc->debug.pipe_split_policy;
+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
dc_resource_state_copy_construct(transition_base_context, transition_context);
@@ -3566,7 +3581,8 @@ static bool commit_minimal_transition_state(struct dc *dc,
dc_release_state(transition_context);
//restore previous pipe split policy
- dc->debug.pipe_split_policy = tmp_policy;
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = tmp_policy;
if (ret != DC_OK) {
//this should never happen
@@ -4271,8 +4287,8 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
/*
*****************************************************************************
* Function: dc_is_dmub_outbox_supported -
- *
- * @brief
+ *
+ * @brief
* Checks whether DMUB FW supports outbox notifications, if supported
* DM should register outbox interrupt prior to actually enabling interrupts
* via dc_enable_dmub_outbox
@@ -4292,7 +4308,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
@@ -4340,6 +4356,7 @@ void dc_enable_dmub_outbox(struct dc *dc)
struct dc_context *dc_ctx = dc->ctx;
dmub_enable_outbox_notification(dc_ctx->dmub_srv);
+ DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 2a8007928210..9dd705b985b9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -402,6 +402,44 @@ void get_hdr_visual_confirm_color(
}
}
+void get_subvp_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+ bool enable_subvp = false;
+ int i;
+
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ /* SubVP enable - red */
+ color->color_r_cr = color_value;
+ enable_subvp = true;
+
+ if (pipe_ctx->stream == pipe->stream)
+ return;
+ break;
+ }
+ }
+
+ if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) {
+ color->color_r_cr = 0;
+ if (pipe_ctx->stream->ignore_msa_timing_param == 1)
+ /* SubVP enable and DRR on - green */
+ color->color_g_y = color_value;
+ else
+ /* SubVP enable and No DRR - blue */
+ color->color_b_cb = color_value;
+ }
+}
+
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index e01424fb02ba..4ab27e231337 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1311,6 +1311,14 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink->edid_caps.audio_modes[i].sample_rate,
sink->edid_caps.audio_modes[i].sample_size);
}
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ // Init dc_panel_config
+ dm_helpers_init_panel_settings(dc_ctx, &link->panel_config);
+ // Override dc_panel_config if system has specific settings
+ dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
+ }
+
} else {
/* From Connected-to-Disconnected. */
link->type = dc_connection_none;
@@ -2069,11 +2077,7 @@ static enum dc_status enable_link_edp(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
- enum dc_status status;
-
- status = enable_link_dp(state, pipe_ctx);
-
- return status;
+ return enable_link_dp(state, pipe_ctx);
}
static enum dc_status enable_link_dp_mst(
@@ -3372,7 +3376,7 @@ bool dc_link_setup_psr(struct dc_link *link,
switch(link->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
if(!dc->debug.disable_z10)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
break;
@@ -4295,18 +4299,6 @@ void core_link_enable_stream(
if (pipe_ctx->stream->dpms_off)
return;
- /* Have to setup DSC before DIG FE and BE are connected (which happens before the
- * link training). This is to make sure the bandwidth sent to DIG BE won't be
- * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
- * will be automatically set at a later time when the video is enabled
- * (DP_VID_STREAM_EN = 1).
- */
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
- }
-
status = enable_link(state, pipe_ctx);
if (status != DC_OK) {
@@ -4736,7 +4728,7 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
else if (link->connector_signal == SIGNAL_TYPE_EDP
&& (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
dsc_support.DSC_SUPPORT == false
- || link->dc->debug.disable_dsc_edp
+ || link->panel_config.dsc.disable_dsc_edp
|| !link->dc->caps.edp_dsc_support))
force_disable = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 48dad093ae8b..e2413d2908c9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3743,7 +3743,7 @@ static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
unsigned int policy = 0;
- policy = link->ctx->dc->debug.force_dsc_edp_policy;
+ policy = link->panel_config.dsc.force_dsc_edp_policy;
if (max_link_rate == LINK_RATE_UNKNOWN)
max_link_rate = link->verified_link_cap.link_rate;
/*
@@ -3909,7 +3909,7 @@ bool decide_link_settings(struct dc_stream_state *stream,
if (stream->timing.flags.DSC) {
enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
- if (link->ctx->dc->debug.force_dsc_edp_policy) {
+ if (link->panel_config.dsc.force_dsc_edp_policy) {
/* calculate link max link rate cap*/
struct dc_link_settings tmp_link_setting;
struct dc_crtc_timing tmp_timing = stream->timing;
@@ -4519,7 +4519,11 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
- core_link_disable_stream(pipe_ctx);
+ if (link->dc->hwss.update_phy_state)
+ link->dc->hwss.update_phy_state(link->dc->current_state,
+ pipe_ctx, TX_OFF_SYMCLK_OFF);
+ else
+ core_link_disable_stream(pipe_ctx);
}
}
@@ -4527,7 +4531,11 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
- core_link_enable_stream(link->dc->current_state, pipe_ctx);
+ if (link->dc->hwss.update_phy_state)
+ link->dc->hwss.update_phy_state(link->dc->current_state,
+ pipe_ctx, TX_ON_SYMCLK_ON);
+ else
+ core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
}
}
@@ -5024,6 +5032,10 @@ static void determine_lttpr_mode(struct dc_link *link)
bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
+ if (link->ctx->dc->debug.lttpr_mode_override != 0) {
+ link->lttpr_mode = link->ctx->dc->debug.lttpr_mode_override;
+ return;
+ }
if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
@@ -5267,6 +5279,7 @@ static bool retrieve_link_cap(struct dc_link *link)
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t read_dpcd_retry_cnt = 3;
+ uint32_t aux_channel_retry_cnt = 0;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
@@ -5294,21 +5307,43 @@ static bool retrieve_link_cap(struct dc_link *link)
status = wa_try_to_wake_dprx(link, timeout_ms);
}
+ while (status != DC_OK && aux_channel_retry_cnt < 10) {
+ status = core_link_read_dpcd(link, DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ /* Delay 1 ms if AUX CH is in power down state. Based on spec
+ * section 2.3.1.2, if AUX CH may be powered down due to
+ * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
+ * signal and may need up to 1 ms before being able to reply.
+ */
+ if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) {
+ udelay(1000);
+ aux_channel_retry_cnt++;
+ }
+ }
+
+ /* If aux channel is not active, return false and trigger another detect*/
+ if (status != DC_OK) {
+ dpcd_power_state = DP_SET_POWER_D0;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+
+ dpcd_power_state = DP_SET_POWER_D3;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+ return false;
+ }
+
is_lttpr_present = dp_retrieve_lttpr_cap(link);
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
- status = core_link_read_dpcd(link, DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
-
- /* Delay 1 ms if AUX CH is in power down state. Based on spec
- * section 2.3.1.2, if AUX CH may be powered down due to
- * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
- * signal and may need up to 1 ms before being able to reply.
- */
- if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
- udelay(1000);
-
dpcd_set_source_specific_data(link);
/* Sink may need to configure internals based on vendor, so allow some
* time before proceeding with possibly vendor specific transactions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ffc0f1c0ea93..29f27e3fe3ac 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -169,7 +169,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_21;
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dc_version = DCN_VERSION_3_14;
break;
default:
@@ -1904,9 +1904,6 @@ bool dc_is_stream_unchanged(
if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
return false;
- if (old_stream->odm_2to1_policy_applied != stream->odm_2to1_policy_applied)
- return false;
-
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8e1e40083ec8..75dbc665f435 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.196"
+#define DC_VER "3.2.201"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -118,7 +118,26 @@ struct dc_plane_cap {
uint32_t min_height;
};
-// Color management caps (DPP and MPC)
+/**
+ * DOC: color-management-caps
+ *
+ * **Color management caps (DPP and MPC)**
+ *
+ * Modules/color calculates various color operations which are translated to
+ * abstracted HW. DCE 5-12 had almost no important changes, but starting with
+ * DCN1, every new generation comes with fairly major differences in color
+ * pipeline. Therefore, we abstract color pipe capabilities so modules/DM can
+ * decide mapping to HW block based on logical capabilities.
+ */
+
+/**
+ * struct rom_curve_caps - predefined transfer function caps for degamma and regamma
+ * @srgb: RGB color space transfer func
+ * @bt2020: BT.2020 transfer func
+ * @gamma2_2: standard gamma
+ * @pq: perceptual quantizer transfer function
+ * @hlg: hybrid log–gamma transfer function
+ */
struct rom_curve_caps {
uint16_t srgb : 1;
uint16_t bt2020 : 1;
@@ -127,36 +146,68 @@ struct rom_curve_caps {
uint16_t hlg : 1;
};
+/**
+ * struct dpp_color_caps - color pipeline capabilities for display pipe and
+ * plane blocks
+ *
+ * @dcn_arch: all DCE generations treated the same
+ * @input_lut_shared: shared with DGAM. Input LUT is different than most LUTs,
+ * just plain 256-entry lookup
+ * @icsc: input color space conversion
+ * @dgam_ram: programmable degamma LUT
+ * @post_csc: post color space conversion, before gamut remap
+ * @gamma_corr: degamma correction
+ * @hw_3d_lut: 3D LUT support. It implies a shaper LUT before. It may be shared
+ * with MPC by setting mpc:shared_3d_lut flag
+ * @ogam_ram: programmable out/blend gamma LUT
+ * @ocsc: output color space conversion
+ * @dgam_rom_for_yuv: pre-defined degamma LUT for YUV planes
+ * @dgam_rom_caps: pre-definied curve caps for degamma 1D LUT
+ * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ *
+ * Note: hdr_mult and gamut remap (CTM) are always available in DPP (in that order)
+ */
struct dpp_color_caps {
- uint16_t dcn_arch : 1; // all DCE generations treated the same
- // input lut is different than most LUTs, just plain 256-entry lookup
- uint16_t input_lut_shared : 1; // shared with DGAM
+ uint16_t dcn_arch : 1;
+ uint16_t input_lut_shared : 1;
uint16_t icsc : 1;
uint16_t dgam_ram : 1;
- uint16_t post_csc : 1; // before gamut remap
+ uint16_t post_csc : 1;
uint16_t gamma_corr : 1;
-
- // hdr_mult and gamut remap always available in DPP (in that order)
- // 3d lut implies shaper LUT,
- // it may be shared with MPC - check MPC:shared_3d_lut flag
uint16_t hw_3d_lut : 1;
- uint16_t ogam_ram : 1; // blnd gam
+ uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t dgam_rom_for_yuv : 1;
struct rom_curve_caps dgam_rom_caps;
struct rom_curve_caps ogam_rom_caps;
};
+/**
+ * struct mpc_color_caps - color pipeline capabilities for multiple pipe and
+ * plane combined blocks
+ *
+ * @gamut_remap: color transformation matrix
+ * @ogam_ram: programmable out gamma LUT
+ * @ocsc: output color space conversion matrix
+ * @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
+ * @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
+ * instance
+ * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ */
struct mpc_color_caps {
uint16_t gamut_remap : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
- uint16_t num_3dluts : 3; //3d lut always assumes a preceding shaper LUT
- uint16_t shared_3d_lut:1; //can be in either DPP or MPC, but single instance
-
+ uint16_t num_3dluts : 3;
+ uint16_t shared_3d_lut:1;
struct rom_curve_caps ogam_rom_caps;
};
+/**
+ * struct dc_color_caps - color pipes capabilities for DPP and MPC hw blocks
+ * @dpp: color pipes caps for DPP
+ * @mpc: color pipes caps for MPC
+ */
struct dc_color_caps {
struct dpp_color_caps dpp;
struct mpc_color_caps mpc;
@@ -213,6 +264,7 @@ struct dc_caps {
uint32_t cache_num_ways;
uint16_t subvp_fw_processing_delay_us;
uint16_t subvp_prefetch_end_to_mall_start_us;
+ uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
uint16_t subvp_pstate_allow_width_us;
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
@@ -349,9 +401,11 @@ struct dc_config {
uint8_t vblank_alignment_max_frame_time_diff;
bool is_asymmetric_memory;
bool is_single_rank_dimm;
+ bool is_vmin_only_asic;
bool use_pipe_ctx_sync_logic;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
+ bool use_default_clock_table;
};
enum visual_confirm {
@@ -363,6 +417,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_SUBVP = 14,
};
enum dc_psr_power_opts {
@@ -384,9 +439,31 @@ enum dcc_option {
DCC_HALF_REQ_DISALBE = 2,
};
+/**
+ * enum pipe_split_policy - Pipe split strategy supported by DCN
+ *
+ * This enum is used to define the pipe split policy supported by DCN. By
+ * default, DC favors MPC_SPLIT_DYNAMIC.
+ */
enum pipe_split_policy {
+ /**
+ * @MPC_SPLIT_DYNAMIC: DC will automatically decide how to split the
+ * pipe in order to bring the best trade-off between performance and
+ * power consumption. This is the recommended option.
+ */
MPC_SPLIT_DYNAMIC = 0,
+
+ /**
+ * @MPC_SPLIT_DYNAMIC: Avoid pipe split, which means that DC will not
+ * try any sort of split optimization.
+ */
MPC_SPLIT_AVOID = 1,
+
+ /**
+ * @MPC_SPLIT_DYNAMIC: With this option, DC will only try to optimize
+ * the pipe utilization when using a single display; if the user
+ * connects to a second display, DC will avoid pipe split.
+ */
MPC_SPLIT_AVOID_MULT_DISP = 2,
};
@@ -609,6 +686,7 @@ struct dc_bounding_box_overrides {
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
int dummy_clock_change_latency_ns;
+ int fclk_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
@@ -620,6 +698,14 @@ struct dc_state;
struct resource_pool;
struct dce_hwseq;
+/**
+ * struct dc_debug_options - DC debug struct
+ *
+ * This struct provides a simple mechanism for developers to change some
+ * configurations, enable/disable features, and activate extra debug options.
+ * This can be very handy to narrow down whether some specific feature is
+ * causing an issue or not.
+ */
struct dc_debug_options {
bool native422_support;
bool disable_dsc;
@@ -639,6 +725,11 @@ struct dc_debug_options {
bool disable_stutter;
bool use_max_lb;
enum dcc_option disable_dcc;
+
+ /**
+ * @pipe_split_policy: Define which pipe split policy is used by the
+ * display core.
+ */
enum pipe_split_policy pipe_split_policy;
bool force_single_disp_pipe_split;
bool voltage_align_fclk;
@@ -712,8 +803,6 @@ struct dc_debug_options {
bool validate_dml_output;
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
- bool disable_dsc_edp;
- unsigned int force_dsc_edp_policy;
bool enable_dram_clock_change_one_display_vactive;
/* TODO - remove once tested */
bool legacy_dp2_lt;
@@ -737,7 +826,6 @@ struct dc_debug_options {
int crb_alloc_policy_min_disp_count;
bool disable_z10;
bool enable_z9_disable_interface;
- bool enable_sw_cntl_psr;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
bool force_disable_subvp;
@@ -751,10 +839,12 @@ struct dc_debug_options {
uint32_t mst_start_top_delay;
uint8_t psr_power_use_phy_fsm;
enum dml_hostvm_override_opts dml_hostvm_override;
+ bool dml_disallow_alternate_prefetch_modes;
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
bool enable_dp_dig_pixel_rate_div_policy;
+ enum lttpr_mode lttpr_mode_override;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -810,6 +900,17 @@ struct dc {
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
+
+ /* Scratch memory */
+ struct {
+ struct {
+ /*
+ * For matching clock_limits table in driver with table
+ * from PMFW.
+ */
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ } update_bw_bounding_box;
+ } scratch;
};
enum frame_buffer_mode {
@@ -1081,6 +1182,7 @@ struct dc_plane_state {
/* private to dc_surface.c */
enum dc_irq_source irq_source;
struct kref refcount;
+ struct tg_color visual_confirm_color;
};
struct dc_plane_info {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2d61c2a91cee..7b765efe0825 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -29,6 +29,7 @@
#include "dm_helpers.h"
#include "dc_hw_types.h"
#include "core_types.h"
+#include "../basics/conversion.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -275,8 +276,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
union dmub_rb_cmd cmd = { 0 };
cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
- // TODO: Uncomment once FW headers are promoted
- //cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
+ cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
@@ -323,11 +323,13 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
int i = 0;
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
- uint8_t visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
+ uint8_t visual_confirm_enabled;
if (dc == NULL)
return false;
+ visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
+
// Format command.
cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
@@ -387,6 +389,37 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
}
}
+void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ union dmub_rb_cmd cmd = { 0 };
+ enum dmub_status status;
+ unsigned int panel_inst = 0;
+
+ dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ // Prepare fw command
+ cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR;
+ cmd.visual_confirm_color.header.sub_type = 0;
+ cmd.visual_confirm_color.header.ret_status = 1;
+ cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
+ cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
+
+ // Send command to fw
+ status = dmub_srv_cmd_with_reply_data(dc->ctx->dmub_srv->dmub, &cmd);
+
+ ASSERT(status == DMUB_STATUS_OK);
+
+ // If command was processed, copy feature caps to dmub srv
+ if (status == DMUB_STATUS_OK &&
+ cmd.visual_confirm_color.header.ret_status == 0) {
+ memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
+ &cmd.visual_confirm_color.visual_confirm_color_data,
+ sizeof(struct dmub_visual_confirm_color));
+ }
+}
+
#ifdef CONFIG_DRM_AMD_DC_DCN
/**
* ***********************************************************************************************
@@ -601,6 +634,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
pipe_data->mode = SUBVP;
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
@@ -612,6 +646,21 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
+
+ /* Calculate the scaling factor from the src and dst height.
+ * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
+ * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
+ *
+ * Make sure to combine stream and plane scaling together.
+ */
+ reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
+ &out_num_stream, &out_den_stream);
+ reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
+ &out_num_plane, &out_den_plane);
+ reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
+ pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
+ pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
// Prefetch lines is equal to VACTIVE + BP + VSYNC
pipe_data->pipe_config.subvp_data.prefetch_lines =
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 159782cd6659..9f5b47b9a83d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -78,6 +78,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst);
bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context);
void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub);
+void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 584aaf6967fd..848db8676adf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -417,19 +417,43 @@ enum dc_scan_direction {
SCAN_DIRECTION_VERTICAL = 2, /* 90, 270 rotation */
};
+/**
+ * struct dc_cursor_position: Hardware cursor data.
+ *
+ * This struct keeps the action information related to the cursor that will be
+ * sent and received from our DC core.
+ */
struct dc_cursor_position {
+ /**
+ * @x: It represents the top left abscissa coordinate of the cursor.
+ */
uint32_t x;
+
+ /**
+ * @y: It is the top ordinate of the cursor coordinate.
+ */
uint32_t y;
+ /**
+ * @x_hotspot: Define the abscissa point where mouse click happens.
+ */
uint32_t x_hotspot;
+
+ /**
+ * @y_hotspot: Define the ordinate point where mouse click happens.
+ */
uint32_t y_hotspot;
- /*
- * This parameter indicates whether HW cursor should be enabled
+ /**
+ * @enable: This parameter indicates whether hardware cursor should be
+ * enabled.
*/
bool enable;
- /* Translate cursor x/y by the source rectangle for each plane. */
+ /**
+ * @translate_by_source: Translate cursor x/y by the source rectangle
+ * for each plane.
+ */
bool translate_by_source;
};
@@ -494,7 +518,9 @@ struct dc_gamma {
/* Used by both ipp amd opp functions*/
/* TODO: to be consolidated with enum color_space */
-/*
+/**
+ * enum dc_cursor_color_format - DC cursor programming mode
+ *
* This enum is for programming CURSOR_MODE register field. What this register
* should be programmed to depends on OS requested cursor shape flags and what
* we stored in the cursor surface.
@@ -530,17 +556,39 @@ union dc_cursor_attribute_flags {
};
struct dc_cursor_attributes {
+ /**
+ * @address: This field represents the framebuffer address associated
+ * with the cursor. It is important to highlight that this address is
+ * divided into a high and low parts.
+ */
PHYSICAL_ADDRESS_LOC address;
+
+ /**
+ * @pitch: Cursor line stride.
+ */
uint32_t pitch;
- /* Width and height should correspond to cursor surface width x heigh */
+ /**
+ * @width: Width should correspond to cursor surface width.
+ */
uint32_t width;
+ /**
+ * @heigh: Height should correspond to cursor surface heigh.
+ */
uint32_t height;
+ /**
+ * @color_format: DC cursor programming mode.
+ */
enum dc_cursor_color_format color_format;
- uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
+ /**
+ * @sdr_white_level: Boosting (SDR) cursor in HDR mode.
+ */
+ uint32_t sdr_white_level;
- /* In case we support HW Cursor rotation in the future */
+ /**
+ * @rotation_angle: In case we support HW Cursor rotation in the future
+ */
enum dc_rotation_angle rotation_angle;
union dc_cursor_attribute_flags attribute_flags;
@@ -764,22 +812,108 @@ struct dc_dsc_config {
bool is_dp; /* indicate if DSC is applied based on DP's capability */
uint32_t mst_pbn; /* pbn of display on dsc mst hub */
};
+
+/**
+ * struct dc_crtc_timing - Timing parameters used to configure DCN blocks
+ *
+ * DCN provides multiple signals and parameters that can be used to adjust
+ * timing parameters, this struct aggregate multiple of these values for easy
+ * access. In this struct, fields prefixed with h_* are related to horizontal
+ * timing, and v_* to vertical timing. Keep in mind that when we talk about
+ * vertical timings, the values, in general, are described in the number of
+ * lines; on the other hand, the horizontal values are in pixels.
+ */
struct dc_crtc_timing {
+ /**
+ * @h_total: The total number of pixels from the rising edge of HSync
+ * until the rising edge of the current HSync.
+ */
uint32_t h_total;
+
+ /**
+ * @h_border_left: The black pixels related to the left border
+ */
uint32_t h_border_left;
+
+ /**
+ * @h_addressable: It is the range of pixels displayed horizontally.
+ * For example, if the display resolution is 3840@2160, the horizontal
+ * addressable area is 3840.
+ */
uint32_t h_addressable;
+
+ /**
+ * @h_border_right: The black pixels related to the right border
+ */
uint32_t h_border_right;
+
+ /**
+ * @h_front_porch: Period (in pixels) between HBlank start and the
+ * rising edge of HSync.
+ */
uint32_t h_front_porch;
+
+ /**
+ * @h_sync_width: HSync duration in pixels.
+ */
uint32_t h_sync_width;
+ /**
+ * @v_total: It is the total number of lines from the rising edge of
+ * the previous VSync until the rising edge of the current VSync.
+ *
+ * |--------------------------|
+ * +-+ V_TOTAL +-+
+ * | | | |
+ * VSync ---+ +--------- // -----------+ +---
+ */
uint32_t v_total;
+
+ /**
+ * @v_border_top: The black border on the top.
+ */
uint32_t v_border_top;
+
+ /**
+ * @v_addressable: It is the range of the scanout at which the
+ * framebuffer is displayed. For example, if the display resolution is
+ * 3840@2160, the addressable area is 2160 lines, or if the resolution
+ * is 1920x1080, the addressable area is 1080 lines.
+ */
uint32_t v_addressable;
+
+ /**
+ * @v_border_bottom: The black border on the bottom.
+ */
uint32_t v_border_bottom;
+
+ /**
+ * @v_front_porch: Period (in lines) between VBlank start and rising
+ * edge of VSync.
+ * +-+
+ * VSync | |
+ * ----------+ +--------...
+ * +------------------...
+ * VBlank |
+ * --+
+ * |-------|
+ * v_front_porch
+ */
uint32_t v_front_porch;
+
+ /**
+ * @v_sync_width: VSync signal width in lines.
+ */
uint32_t v_sync_width;
+ /**
+ * @pix_clk_100hz: Pipe pixel precision
+ *
+ * This field is used to communicate pixel clocks with 100 Hz accuracy
+ * from dc_crtc_timing to BIOS command table.
+ */
uint32_t pix_clk_100hz;
+
uint32_t min_refresh_in_uhz;
uint32_t vic;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index a0af0f6afeef..3f64b3092692 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -113,6 +113,16 @@ struct psr_settings {
unsigned int psr_power_opt;
};
+/* To split out "global" and "per-panel" config settings.
+ * Add a struct dc_panel_config under dc_link
+ */
+struct dc_panel_config {
+ // edp DSC
+ struct dsc {
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
+ } dsc;
+};
/*
* A link contains one or more sinks and their connected status.
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -224,6 +234,7 @@ struct dc_link {
bool dpia_mst_dsc_always_on;
/* Forced DPIA into TBT3 compatibility mode. */
bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -232,6 +243,8 @@ struct dc_link {
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
+ struct dc_panel_config panel_config;
+ enum phy_state phy_state;
};
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
@@ -344,6 +357,7 @@ enum dc_detect_reason {
DETECT_REASON_HPDRX,
DETECT_REASON_FALLBACK,
DETECT_REASON_RETRAIN,
+ DETECT_REASON_TDR,
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index c3d97206ed89..9fcf9dc5bce4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -48,11 +48,6 @@ struct dc_stream_status {
bool is_abm_supported;
};
-// TODO: References to this needs to be removed..
-struct freesync_context {
- bool dummy;
-};
-
enum hubp_dmdata_mode {
DMDATA_SW_MODE,
DMDATA_HW_MODE
@@ -184,9 +179,6 @@ struct dc_stream_state {
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
- // TODO: References to this needs to be removed..
- struct freesync_context freesync_ctx;
-
struct audio_info audio_info;
struct dc_info_packet hdr_static_metadata;
@@ -276,8 +268,6 @@ struct dc_stream_state {
bool has_non_synchronizable_pclk;
bool vblank_synchronized;
struct mall_stream_config mall_stream_config;
-
- bool odm_2to1_policy_applied;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index eb5a7fe88971..bdb6bac8dd97 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -320,7 +320,7 @@ static void set_video_latency(
value);
}
-/* set audio latency in in ms/2+1 */
+/* set audio latency in ms/2+1 */
static void set_audio_latency(
struct audio *audio,
int latency_in_ms)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 919c2c2ba84b..32782ef9ef77 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -814,12 +814,6 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER");
retry_on_defer = true;
- fallthrough;
- case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
- if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK)
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK");
if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES
&& defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) {
@@ -848,7 +842,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
}
}
break;
-
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: FAILURE: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK");
+ goto fail;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 213de8cabfad..165392380842 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -543,9 +543,11 @@ static void dce112_get_pix_clk_dividers_helper (
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 38a67051d470..fe346e96c2d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1577,8 +1577,25 @@ static enum dc_status apply_single_controller_ctx_to_hw(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
- if (!stream->dpms_off)
- core_link_enable_stream(context, pipe_ctx);
+ /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+ * link training). This is to make sure the bandwidth sent to DIG BE won't be
+ * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+ * will be automatically set at a later time when the video is enabled
+ * (DP_VID_STREAM_EN = 1).
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, true);
+
+ }
+
+ if (!stream->dpms_off) {
+ if (dc->hwss.update_phy_state)
+ dc->hwss.update_phy_state(context, pipe_ctx, TX_ON_SYMCLK_ON);
+ else
+ core_link_enable_stream(context, pipe_ctx);
+ }
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d4a6504dfe00..897f412f539e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -361,8 +361,6 @@ void dpp1_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
@@ -450,11 +448,12 @@ void dpp1_set_cursor_position(
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
}
} else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
+ if (!param->mirror)
+ src_x_offset = pos->x - param->viewport.x;
+
src_y_offset = pos->y - param->viewport.y;
}
-
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index b54c12400323..52e201e9b091 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -278,9 +278,6 @@ void hubp1_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
@@ -1211,13 +1208,10 @@ void hubp1_cursor_set_position(
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
}
} else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
+ if (!param->mirror)
+ src_x_offset = pos->x - param->viewport.x;
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ src_y_offset = pos->y - param->viewport.y;
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index bed783747f16..f26e08032da0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -110,6 +110,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
+ !pipe_ctx->plane_state ||
!tg->funcs->is_tg_enabled(tg))
continue;
@@ -2150,8 +2151,8 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
dc->res_pool->dp_clock_source,
grouped_pipes[i]->stream_res.tg->inst, &pclk);
- grouped_pipes[i]->stream->timing.pix_clk_100hz =
- pclk*get_clock_divider(grouped_pipes[i], false);
+ grouped_pipes[i]->stream->timing.pix_clk_100hz =
+ pclk*get_clock_divider(grouped_pipes[i], false);
if (master == -1)
master = i;
}
@@ -2198,14 +2199,14 @@ void dcn10_enable_vblanks_synchronization(
if (master >= 0) {
for (i = 0; i < group_size; i++) {
if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
- grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
- grouped_pipes[master]->stream_res.tg,
- grouped_pipes[i]->stream_res.tg,
- grouped_pipes[master]->stream->timing.pix_clk_100hz,
- grouped_pipes[i]->stream->timing.pix_clk_100hz,
- get_clock_divider(grouped_pipes[master], false),
- get_clock_divider(grouped_pipes[i], false));
- grouped_pipes[i]->stream->vblank_synchronized = true;
+ grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
+ grouped_pipes[master]->stream_res.tg,
+ grouped_pipes[i]->stream_res.tg,
+ grouped_pipes[master]->stream->timing.pix_clk_100hz,
+ grouped_pipes[i]->stream->timing.pix_clk_100hz,
+ get_clock_divider(grouped_pipes[master], false),
+ get_clock_divider(grouped_pipes[i], false));
+ grouped_pipes[i]->stream->vblank_synchronized = true;
}
grouped_pipes[master]->stream->vblank_synchronized = true;
DC_SYNC_INFO("Sync complete\n");
@@ -2538,8 +2539,10 @@ void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
color_space_to_black_color(
dc, pipe_ctx->stream->output_color_space, color);
- if (mpc->funcs->set_bg_color)
+ if (mpc->funcs->set_bg_color) {
+ memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, color, mpcc_id);
+ }
}
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
@@ -3339,11 +3342,11 @@ static bool dcn10_dmub_should_update_cursor_data(
if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
return false;
- if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- return true;
+ if (dcn10_can_pipe_disable_cursor(pipe_ctx))
+ return false;
- if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
- debug->enable_sw_cntl_psr)
+ if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
return false;
@@ -3467,8 +3470,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
- bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
- (pipe_ctx->bottom_pipe != NULL);
+ bool pipe_split_on = false;
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
(pipe_ctx->prev_odm_pipe != NULL);
@@ -3477,6 +3479,13 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
+ if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
+ (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
+ pipe_split_on = true;
+ }
+ }
+
/**
* DC cursor is stream space, HW cursor is plane space and drawn
* as part of the framebuffer.
@@ -3548,8 +3557,36 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
+
+ if (param.rotation == ROTATION_ANGLE_0) {
+ int viewport_width =
+ pipe_ctx->plane_res.scl_data.viewport.width;
+ int viewport_x =
+ pipe_ctx->plane_res.scl_data.viewport.x;
+
+ if (param.mirror) {
+ if (pipe_split_on || odm_combine_on) {
+ if (pos_cpy.x >= viewport_width + viewport_x) {
+ pos_cpy.x = 2 * viewport_width
+ - pos_cpy.x + 2 * viewport_x;
+ } else {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+ if (temp_x >= viewport_x +
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+ pos_cpy.x = temp_x + viewport_width;
+ }
+ }
+ } else {
+ pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
+ }
+ }
+ }
// Swap axis and mirror horizontally
- if (param.rotation == ROTATION_ANGLE_90) {
+ else if (param.rotation == ROTATION_ANGLE_90) {
uint32_t temp_x = pos_cpy.x;
pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
@@ -3620,23 +3657,25 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int viewport_x =
pipe_ctx->plane_res.scl_data.viewport.x;
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ if (!param.mirror) {
+ if (pipe_split_on || odm_combine_on) {
+ if (pos_cpy.x >= viewport_width + viewport_x) {
+ pos_cpy.x = 2 * viewport_width
+ - pos_cpy.x + 2 * viewport_x;
+ } else {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+ if (temp_x >= viewport_x +
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+ pos_cpy.x = temp_x + viewport_width;
+ }
}
+ } else {
+ pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
/**
@@ -3737,7 +3776,6 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
int vesa_sync_start;
int asic_blank_end;
int interlace_factor;
- int vertical_line_start;
patched_crtc_timing = *dc_crtc_timing;
apply_front_porch_workaround(&patched_crtc_timing);
@@ -3753,10 +3791,8 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
patched_crtc_timing.v_border_top)
* interlace_factor;
- vertical_line_start = asic_blank_end -
+ return asic_blank_end -
pipe_ctx->pipe_dlg_param.vstartup_start + 1;
-
- return vertical_line_start;
}
void dcn10_calc_vupdate_position(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 769974375b4b..8e9384094f6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -131,6 +131,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e1a9a45b03b6..294827906c69 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -312,6 +312,20 @@ void optc1_program_timing(
}
}
+/**
+ * optc1_set_vtg_params - Set Vertical Timing Generator (VTG) parameters
+ *
+ * @optc: timing_generator struct used to extract the optc parameters
+ * @dc_crtc_timing: Timing parameters configured
+ * @program_fp2: Boolean value indicating if FP2 will be programmed or not
+ *
+ * OTG is responsible for generating the global sync signals, including
+ * vertical timing information for each HUBP in the dcfclk domain. Each VTG is
+ * associated with one OTG that provides HUBP with vertical timing information
+ * (i.e., there is 1:1 correspondence between OTG and VTG). This function is
+ * responsible for setting the OTG parameters to the VTG during the pipe
+ * programming.
+ */
void optc1_set_vtg_params(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2)
{
@@ -465,6 +479,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
OTG_CLOCK_ON, 1,
1, 1000);
} else {
+
+ //last chance to clear underflow, otherwise, it will always there due to clock is off.
+ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
+ optc->funcs->clear_optc_underflow(optc);
+
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
@@ -1067,7 +1086,7 @@ static void optc1_set_test_pattern(
src_color[index] >> (src_bpc - dst_bpc);
/* CRTC_TEST_PATTERN_DATA has 16 bits,
* lowest 6 are hardwired to ZERO
- * color bits should be left aligned aligned to MSB
+ * color bits should be left aligned to MSB
* XXXXXXXXXX000000 for 10 bit,
* XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
*/
@@ -1493,8 +1512,23 @@ bool optc1_configure_crc(struct timing_generator *optc,
return true;
}
+/**
+ * optc1_get_crc - Capture CRC result per component
+ *
+ * @optc: timing_generator instance.
+ * @r_cr: 16-bit primary CRC signature for red data.
+ * @g_y: 16-bit primary CRC signature for green data.
+ * @b_cb: 16-bit primary CRC signature for blue data.
+ *
+ * This function reads the CRC signature from the OPTC registers. Notice that
+ * we have three registers to keep the CRC result per color component (RGB).
+ *
+ * Returns:
+ * If CRC is disabled, return false; otherwise, return true, and the CRC
+ * results in the parameters.
+ */
bool optc1_get_crc(struct timing_generator *optc,
- uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t field = 0;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -1505,12 +1539,14 @@ bool optc1_get_crc(struct timing_generator *optc,
if (!field)
return false;
+ /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
REG_GET_2(OTG_CRC0_DATA_RG,
- CRC0_R_CR, r_cr,
- CRC0_G_Y, g_y);
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
+ /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
REG_GET(OTG_CRC0_DATA_B,
- CRC0_B_CB, b_cb);
+ CRC0_B_CB, b_cb);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 174eebbe8b4f..831080b9eb87 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1495,6 +1495,24 @@ static bool dcn10_resource_construct(
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
+ if (!dc->config.is_vmin_only_asic)
+ if (ASICREV_IS_RAVEN2(dc->ctx->asic_id.hw_internal_rev))
+ switch (dc->ctx->asic_id.pci_revision_id) {
+ case PRID_DALI_DE:
+ case PRID_DALI_DF:
+ case PRID_DALI_E3:
+ case PRID_DALI_E4:
+ case PRID_POLLOCK_94:
+ case PRID_POLLOCK_95:
+ case PRID_POLLOCK_E9:
+ case PRID_POLLOCK_EA:
+ case PRID_POLLOCK_EB:
+ dc->config.is_vmin_only_asic = true;
+ break;
+ default:
+ break;
+ }
+
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index ea1f14af0db7..eaa7032f0f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -166,8 +166,6 @@ static void dpp2_cnv_setup (
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index 2feb051a2002..598caa508d43 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -444,24 +444,24 @@ static enum dc_lut_mode dpp20_get_blndgam_current(struct dpp *dpp_base)
uint32_t state_mode;
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
- REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK,
- CM_BLNDGAM_CONFIG_STATUS, &state_mode);
+ REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, &state_mode);
- switch (state_mode) {
- case 0:
- mode = LUT_BYPASS;
- break;
- case 1:
- mode = LUT_RAM_A;
- break;
- case 2:
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
- return mode;
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
}
bool dpp20_program_blnd_lut(
@@ -537,24 +537,24 @@ static enum dc_lut_mode dpp20_get_shaper_current(struct dpp *dpp_base)
uint32_t state_mode;
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
- REG_GET(CM_SHAPER_LUT_WRITE_EN_MASK,
- CM_SHAPER_CONFIG_STATUS, &state_mode);
+ REG_GET(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, &state_mode);
- switch (state_mode) {
- case 0:
- mode = LUT_BYPASS;
- break;
- case 1:
- mode = LUT_RAM_A;
- break;
- case 2:
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
- return mode;
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
}
static void dpp20_configure_shaper_lut(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
index 8d3884b306dd..f1490e97b6ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
@@ -101,8 +101,8 @@ static bool dwb2_enable(struct dwbc *dwbc, struct dc_dwb_params *params)
struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
/* Only chroma scaling (sub-sampling) is supported in DCN2 */
-if ((params->cnv_params.src_width != params->dest_width) ||
- (params->cnv_params.src_height != params->dest_height)) {
+ if ((params->cnv_params.src_width != params->dest_width) ||
+ (params->cnv_params.src_height != params->dest_height)) {
DC_LOG_DWB("%s inst = %d, FAILED!LUMA SCALING NOT SUPPORTED", __func__, dwbc20->base.inst);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 936af65381ef..b1ec0e6f7f58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -463,9 +463,6 @@ void hubp2_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
@@ -990,13 +987,10 @@ void hubp2_cursor_set_position(
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
}
} else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
+ if (!param->mirror)
+ src_x_offset = pos->x - param->viewport.x;
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ src_y_offset = pos->y - param->viewport.y;
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 3e44b7998429..6271caca4d9a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1716,15 +1716,13 @@ void dcn20_program_front_end_for_ctx(
DC_LOGGER_INIT(dc->ctx->logger);
/* Carry over GSL groups in case the context is changing. */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream == old_pipe_ctx->stream)
- pipe_ctx->stream_res.gsl_group =
- old_pipe_ctx->stream_res.gsl_group;
- }
+ if (pipe_ctx->stream == old_pipe_ctx->stream)
+ pipe_ctx->stream_res.gsl_group = old_pipe_ctx->stream_res.gsl_group;
+ }
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2363,9 +2361,12 @@ static void dcn20_reset_back_end_for_pipe(
* screen only, the dpms_off would be true but
* VBIOS lit up eDP, so check link status too.
*/
- if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- core_link_disable_stream(pipe_ctx);
- else if (pipe_ctx->stream_res.audio)
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) {
+ if (dc->hwss.update_phy_state)
+ dc->hwss.update_phy_state(dc->current_state, pipe_ctx, TX_OFF_SYMCLK_OFF);
+ else
+ core_link_disable_stream(pipe_ctx);
+ } else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
/* free acquired resources */
@@ -2464,9 +2465,13 @@ void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
get_mpctree_visual_confirm_color(pipe_ctx, color);
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
get_surface_tile_visual_confirm_color(pipe_ctx, color);
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
+ get_subvp_visual_confirm_color(dc, pipe_ctx, color);
- if (mpc->funcs->set_bg_color)
+ if (mpc->funcs->set_bg_color) {
+ memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, color, mpcc_id);
+ }
}
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 15734db0cdea..116f67a0b989 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -299,24 +299,24 @@ static enum dc_lut_mode mpc20_get_ogam_current(struct mpc *mpc, int mpcc_id)
uint32_t state_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
- REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id],
- MPCC_OGAM_CONFIG_STATUS, &state_mode);
+ REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id], MPCC_OGAM_CONFIG_STATUS, &state_mode);
- switch (state_mode) {
- case 0:
- mode = LUT_BYPASS;
- break;
- case 1:
- mode = LUT_RAM_A;
- break;
- case 2:
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
- return mode;
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
}
static void mpc2_program_lutb(struct mpc *mpc, int mpcc_id,
@@ -531,6 +531,12 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index c5e200d09038..5752271f22df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,9 +67,15 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active;
+ uint32_t riommu_active, prefetch_done;
int i;
+ REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
+
+ if (prefetch_done) {
+ hubbub->riommu_active = true;
+ return;
+ }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 787b852eeaf2..4a668d6563df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -244,8 +244,6 @@ void dpp3_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
@@ -716,28 +714,27 @@ static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base)
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
- REG_GET(CM_BLNDGAM_CONTROL,
- CM_BLNDGAM_MODE_CURRENT, &mode_current);
- REG_GET(CM_BLNDGAM_CONTROL,
- CM_BLNDGAM_SELECT_CURRENT, &in_use);
-
- switch (mode_current) {
- case 0:
- case 1:
- mode = LUT_BYPASS;
- break;
-
- case 2:
- if (in_use == 0)
- mode = LUT_RAM_A;
- else
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
- return mode;
+ REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &mode_current);
+ REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &in_use);
+
+ switch (mode_current) {
+ case 0:
+ case 1:
+ mode = LUT_BYPASS;
+ break;
+
+ case 2:
+ if (in_use == 0)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
}
static bool dpp3_program_blnd_lut(struct dpp *dpp_base,
@@ -817,24 +814,24 @@ static enum dc_lut_mode dpp3_get_shaper_current(struct dpp *dpp_base)
uint32_t state_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
- REG_GET(CM_SHAPER_CONTROL,
- CM_SHAPER_MODE_CURRENT, &state_mode);
+ REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &state_mode);
- switch (state_mode) {
- case 0:
- mode = LUT_BYPASS;
- break;
- case 1:
- mode = LUT_RAM_A;
- break;
- case 2:
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
- return mode;
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
}
static void dpp3_configure_shaper_lut(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 87dbeca18984..e43f77c11c00 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -61,23 +61,20 @@ static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
uint32_t lut_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
- REG_GET(CM_GAMCOR_CONTROL,
- CM_GAMCOR_MODE_CURRENT, &state_mode);
+ REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
- if (state_mode == 0)
- mode = LUT_BYPASS;
+ if (state_mode == 0)
+ mode = LUT_BYPASS;
- if (state_mode == 2) {//Programmable RAM LUT
- REG_GET(CM_GAMCOR_CONTROL,
- CM_GAMCOR_SELECT_CURRENT, &lut_mode);
-
- if (lut_mode == 0)
- mode = LUT_RAM_A;
- else
- mode = LUT_RAM_B;
- }
+ if (state_mode == 2) {//Programmable RAM LUT
+ REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
+ if (lut_mode == 0)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+ }
- return mode;
+ return mode;
}
static void dpp3_program_gammcor_lut(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 6a4dcafb9bba..dc3e8df706b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index 1981a71b961b..ad1c1b703874 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -109,32 +109,32 @@ enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
uint32_t state_ram_lut_in_use;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
- REG_GET_2(MPCC_OGAM_CONTROL[mpcc_id],
- MPCC_OGAM_MODE_CURRENT, &state_mode,
- MPCC_OGAM_SELECT_CURRENT, &state_ram_lut_in_use);
+ REG_GET_2(MPCC_OGAM_CONTROL[mpcc_id], MPCC_OGAM_MODE_CURRENT, &state_mode,
+ MPCC_OGAM_SELECT_CURRENT, &state_ram_lut_in_use);
- switch (state_mode) {
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 2:
+ switch (state_ram_lut_in_use) {
case 0:
- mode = LUT_BYPASS;
+ mode = LUT_RAM_A;
break;
- case 2:
- switch (state_ram_lut_in_use) {
- case 0:
- mode = LUT_RAM_A;
- break;
- case 1:
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
+ case 1:
+ mode = LUT_RAM_B;
break;
default:
mode = LUT_BYPASS;
break;
}
- return mode;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
}
void mpc3_power_on_ogam_lut(
@@ -439,24 +439,24 @@ static enum dc_lut_mode mpc3_get_shaper_current(struct mpc *mpc, uint32_t rmu_id
uint32_t state_mode;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
- REG_GET(SHAPER_CONTROL[rmu_idx],
- MPC_RMU_SHAPER_LUT_MODE_CURRENT, &state_mode);
+ REG_GET(SHAPER_CONTROL[rmu_idx], MPC_RMU_SHAPER_LUT_MODE_CURRENT, &state_mode);
- switch (state_mode) {
- case 0:
- mode = LUT_BYPASS;
- break;
- case 1:
- mode = LUT_RAM_A;
- break;
- case 2:
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
- return mode;
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
}
static void mpc3_configure_shaper_lut(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 80136b5d7e48..1782b9c26cf4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -180,19 +180,8 @@ void optc3_set_dsc_config(struct timing_generator *optc,
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- optc2_set_dsc_config(optc, dsc_mode, dsc_bytes_per_pixel,
- dsc_slice_width);
-
- REG_UPDATE(OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, 0);
-
-}
-
-void optc3_set_vrr_m_const(struct timing_generator *optc,
- double vtotal_avg)
-{
- DC_FP_START();
- optc3_fpu_set_vrr_m_const(optc, vtotal_avg);
- DC_FP_END();
+ optc2_set_dsc_config(optc, dsc_mode, dsc_bytes_per_pixel, dsc_slice_width);
+ REG_UPDATE(OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, 0);
}
void optc3_set_odm_bypass(struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
index 33bd12f5dc17..dd45a5499b07 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
@@ -329,9 +329,6 @@ void optc3_lock_doublebuffer_enable(struct timing_generator *optc);
void optc3_lock_doublebuffer_disable(struct timing_generator *optc);
-void optc3_set_vrr_m_const(struct timing_generator *optc,
- double vtotal_avg);
-
void optc3_set_drr_trigger_window(struct timing_generator *optc,
uint32_t window_start, uint32_t window_end);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 0a67f8a5656d..d97076648acb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -372,7 +372,7 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGE) {
+ if (eng_id <= ENGINE_ID_DIGB) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index a788d160953b..ab70ebd8f223 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -104,6 +104,9 @@ static bool has_query_dp_alt(struct link_encoder *enc)
{
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+ if (enc->ctx->dce_version >= DCN_VERSION_3_15)
+ return true;
+
/* Supports development firmware and firmware >= 4.0.11 */
return dc_dmub_srv &&
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
index 7c77c71591a0..82c3b3ac1f0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
@@ -162,7 +162,8 @@
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
- SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh)
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh)
#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 1ed1404e969d..8d83b611507a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -553,9 +553,12 @@ static void dcn31_reset_back_end_for_pipe(
* screen only, the dpms_off would be true but
* VBIOS lit up eDP, so check link status too.
*/
- if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- core_link_disable_stream(pipe_ctx);
- else if (pipe_ctx->stream_res.audio)
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) {
+ if (dc->hwss.update_phy_state)
+ dc->hwss.update_phy_state(dc->current_state, pipe_ctx, TX_OFF_SYMCLK_OFF);
+ else
+ core_link_disable_stream(pipe_ctx);
+ } else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
/* free acquired resources */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 468a893ff785..8745132d6374 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -889,7 +889,6 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.disable_z10 = true,
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
};
@@ -2153,7 +2152,7 @@ static bool dcn31_resource_construct(
pool->base.usb4_dpia_count = 4;
}
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2)
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1)
pool->base.usb4_dpia_count = 4;
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 41f8ec99da6b..901436591ed4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn31_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
-extern struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc;
struct dcn31_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index e3b5a95e03b1..702c28c2560e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -13,31 +13,6 @@
DCN314 = dcn314_resource.o dcn314_hwseq.o dcn314_init.o \
dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN314)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index b384f30395d3..e3351ddc566c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -317,6 +317,7 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 755c715ad8dc..39931d48f385 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -343,7 +343,10 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ bool two_pix_per_container = false;
+ two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
if (is_dp_128b_132b_signal(pipe_ctx)) {
@@ -355,16 +358,13 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
else
*k2_div = PIXEL_RATE_DIV_BY_4;
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
- } else if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- *k1_div = PIXEL_RATE_DIV_BY_2;
- *k2_div = PIXEL_RATE_DIV_BY_2;
} else {
- if (odm_combine_factor == 1)
- *k2_div = PIXEL_RATE_DIV_BY_4;
- else if (odm_combine_factor == 2)
+ *k1_div = PIXEL_RATE_DIV_BY_1;
+ *k2_div = PIXEL_RATE_DIV_BY_4;
+ if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -374,3 +374,31 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
return odm_combine_factor;
}
+
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t pix_per_cycle = 1;
+ uint32_t odm_combine_factor = 1;
+
+ if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
+ || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ pix_per_cycle = 2;
+
+ if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
+ pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
+ pix_per_cycle);
+}
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return true;
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index be0f5e4d48e1..d014580592ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -39,4 +39,8 @@ void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index b9debeb081fd..fcf67eb3478f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -145,6 +145,8 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
+ .set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
+ .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 0c7980266b85..38aa28ec6b13 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -98,7 +98,8 @@ static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
- REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
optc1->opp_count = opp_cnt;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 63861cdfb09f..49b7e256d4ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -70,6 +70,7 @@
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn314/dcn314_fpu.h"
#include "dcn314/dcn314_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -132,155 +133,6 @@ static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C
#define DC_LOGGER_INIT(logger)
-#define DCN3_14_DEFAULT_DET_SIZE 384
-#define DCN3_14_MAX_DET_SIZE 384
-#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
-#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_14_ip = {
- .VBlankNomDefaultUS = 668,
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1792,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 4,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -602,6 +454,7 @@ static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
+ hpo_dp_stream_encoder_reg_list(3)
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
@@ -1059,7 +912,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
.seamless_boot_odm_combine = true
};
@@ -1402,7 +1254,7 @@ static struct stream_encoder *dcn314_stream_encoder_create(
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGF) {
+ if (eng_id < ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
@@ -1447,7 +1299,8 @@ static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
* VPG[8] -> HPO_DP[2]
* VPG[9] -> HPO_DP[3]
*/
- vpg_inst = hpo_dp_inst + 6;
+ //Uses offset index 5-8, but actually maps to vpg_inst 6-9
+ vpg_inst = hpo_dp_inst + 5;
/* Mapping of APG register blocks to HPO DP block instance:
* APG[0] -> HPO_DP[0]
@@ -1790,112 +1643,20 @@ static struct clock_source *dcn31_clock_source_create(
}
BREAK_TO_DEBUGGER();
+ kfree(clk_src);
return NULL;
}
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
- int i, pipe_cnt;
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
- bool upscaled = false;
-
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
- && pipe->stream->adjust.v_total_min > timing->v_total)
- pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
-
- if (pipe->plane_state &&
- (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
- pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
- upscaled = true;
-
- /*
- * Immediate flip can be set dynamically after enabling the plane.
- * We need to require support for immediate flip or underflow can be
- * intermittently experienced depending on peak b/w requirements.
- */
- pipes[pipe_cnt].pipe.src.immediate_flip = true;
-
- pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
- pipes[pipe_cnt].pipe.src.gpuvm = true;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
- pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.src.dcc_rate = 3;
- pipes[pipe_cnt].dout.dsc_input_bpc = 0;
-
- if (pipes[pipe_cnt].dout.dsc_enable) {
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_888:
- pipes[pipe_cnt].dout.dsc_input_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- pipes[pipe_cnt].dout.dsc_input_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
+ int pipe_cnt;
- pipe_cnt++;
- }
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
-
- dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
- /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- pipes[0].pipe.src.unbounded_req_mode = true;
- }
- } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
- && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
- } else if (context->stream_count >= 3 && upscaled) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe->stream)
- continue;
-
- if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
- pipe->stream->apply_seamless_boot_optimization) {
-
- if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
- context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
- break;
- }
- }
- }
+ DC_FP_START();
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ DC_FP_END();
return pipe_cnt;
}
@@ -1906,88 +1667,9 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st *clock_tmp = dcn3_14_soc._clock_tmp;
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
-
- if (bw_params->num_channels > 0)
- dcn3_14_soc.num_chans = bw_params->num_channels;
-
- ASSERT(dcn3_14_soc.num_chans);
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_14_soc.num_states - 1;
- }
-
- clock_tmp[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_tmp[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_tmp[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
-
- if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
- clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_tmp[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_tmp[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_tmp[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_tmp[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_tmp[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_14_soc.clock_limits[i] = clock_tmp[i];
- if (clk_table->num_entries)
- dcn3_14_soc.num_states = clk_table->num_entries;
- }
-
- if (max_dispclk_mhz) {
- dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+ DC_FP_START();
+ dcn314_update_bw_bounding_box_fpu(dc, bw_params);
+ DC_FP_END();
}
static struct resource_funcs dcn314_res_pool_funcs = {
@@ -2034,6 +1716,7 @@ static struct clock_source *dcn30_clock_source_create(
}
BREAK_TO_DEBUGGER();
+ kfree(clk_src);
return NULL;
}
@@ -2069,6 +1752,7 @@ static bool dcn314_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
@@ -2132,8 +1816,6 @@ static bool dcn314_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)
- dc->debug = debug_defaults_diags;
else
dc->debug = debug_defaults_diags;
// Init the vm_helper
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
index c41108847ce0..0dd3153aa5c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
@@ -29,6 +29,9 @@
#include "core_types.h"
+extern struct _vcs_dpi_ip_params_st dcn3_14_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc;
+
#define TO_DCN314_RES_POOL(pool)\
container_of(pool, struct dcn314_resource_pool, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 7463b12ae4a3..eebb42c9ddd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -886,7 +886,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
.psr_power_use_phy_fsm = 0,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
index 39929fa67a51..22849eaa6f24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn315_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_15_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_15_soc;
struct dcn315_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index d56a212e065c..f4b52a35ad84 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -886,7 +886,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
index 0dc5a6c13ae7..aba6d634131b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn316_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_16_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_16_soc;
struct dcn316_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index a31c64b50410..0d5e8a441512 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -225,19 +225,19 @@ void dccg32_set_dpstreamclk(
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, 0);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, 1);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, 2);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, 3);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
break;
default:
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 26648ce772da..38a48983f663 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -310,6 +310,11 @@ static void enc32_stream_encoder_dp_unblank(
// TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
+ /* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1
+ * so set it to 1/2 full = 7 before reset as suggested by hardware team.
+ */
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index be2e3b9e971e..769171ab8ef6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -250,6 +250,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
uint32_t total_lines = 0;
uint32_t lines_per_way = 0;
uint32_t num_ways = 0;
+ uint32_t prev_addr_low = 0;
for (i = 0; i < ctx->stream_count; i++) {
stream = ctx->streams[i];
@@ -267,10 +268,20 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
plane = ctx->stream_status[i].plane_states[j];
// Calculate total surface size
- surface_size = plane->plane_size.surface_pitch *
+ if (prev_addr_low != plane->address.grph.addr.u.low_part) {
+ /* if plane address are different from prev FB, then userspace allocated separate FBs*/
+ surface_size += plane->plane_size.surface_pitch *
plane->plane_size.surface_size.height *
(plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ prev_addr_low = plane->address.grph.addr.u.low_part;
+ } else {
+ /* We have the same fb for all the planes.
+ * Xorg always creates one giant fb that holds all surfaces,
+ * so allocating it once is sufficient.
+ * */
+ continue;
+ }
// Convert surface size + starting address to number of cache lines required
// (alignment accounted for)
cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
@@ -284,24 +295,38 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
}
// Include cursor size for CAB allocation
- if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
- cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
- switch (stream->cursor_attributes.color_format) {
- case CURSOR_MODE_MONO:
- cursor_size /= 2;
- break;
- case CURSOR_MODE_COLOR_1BIT_AND:
- case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
- case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
- cursor_size *= 4;
- break;
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
+ struct hubp *hubp = pipe->plane_res.hubp;
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
- case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
- cursor_size *= 8;
- break;
- }
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
+ if (pipe->stream && pipe->plane_state && hubp)
+ /* Find the cursor plane and use the exact size instead of
+ * using the max for calculation
+ */
+ if (hubp->curs_attr.width > 0) {
+ cursor_size = hubp->curs_attr.width * hubp->curs_attr.height;
+ break;
+ }
+ }
+
+ switch (stream->cursor_attributes.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
+ cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size,
plane->address.grph.cursor_cache_addr.quad_part);
}
}
@@ -314,13 +339,36 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
if (cache_lines_used % lines_per_way > 0)
num_ways++;
+ for (i = 0; i < ctx->stream_count; i++) {
+ stream = ctx->streams[i];
+ for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
+ plane = ctx->stream_status[i].plane_states[j];
+
+ if (stream->cursor_position.enable && plane &&
+ !plane->address.grph.cursor_cache_addr.quad_part &&
+ cursor_size > 16384) {
+ /* Cursor caching is not supported since it won't be on the same line.
+ * So we need an extra line to accommodate it. With large cursors and a single 4k monitor
+ * this case triggers corruption. If we're at the edge, then dont trigger display refresh
+ * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
+ */
+ num_ways++;
+ /* We only expect one cursor plane */
+ break;
+ }
+ }
+ }
+
return num_ways;
}
bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
- uint8_t ways;
+ uint8_t ways, i;
+ int j;
+ bool stereo_in_use = false;
+ struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
return false;
@@ -349,7 +397,23 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
* and configure HUBP's to fetch from MALL
*/
ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- if (ways <= dc->caps.cache_num_ways) {
+
+ /* MALL not supported with Stereo3D. If any plane is using stereo,
+ * don't try to enter MALL.
+ */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+ plane = dc->current_state->stream_status[i].plane_states[j];
+
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO) {
+ stereo_in_use = true;
+ break;
+ }
+ }
+ if (stereo_in_use)
+ break;
+ }
+ if (ways <= dc->caps.cache_num_ways && !stereo_in_use) {
memset(&cmd, 0, sizeof(cmd));
cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -611,9 +675,9 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
- /* there are no ROM LUTs in OUTGAM */
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
- BREAK_TO_DEBUGGER();
+ /* there are no ROM LUTs in OUTGAM */
+ if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ BREAK_TO_DEBUGGER();
}
}
@@ -683,9 +747,11 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
} else {
+ // MALL not supported with Stereo3D
hubp->funcs->hubp_update_mall_sel(hubp,
num_ways <= dc->caps.cache_num_ways &&
- pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED ? 2 : 0,
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO ? 2 : 0,
cache_cursor);
}
}
@@ -909,10 +975,10 @@ void dcn32_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
// Get DMCUB capabilities
- if (dc->ctx->dmub_srv) {
- dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
- dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- }
+ if (dc->ctx->dmub_srv) {
+ dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
+ }
}
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
@@ -1186,3 +1252,30 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
return true;
return false;
}
+
+void dcn32_update_phy_state(struct dc_state *state, struct pipe_ctx *pipe_ctx,
+ enum phy_state target_state)
+{
+ enum phy_state current_state = pipe_ctx->stream->link->phy_state;
+
+ if (target_state == TX_OFF_SYMCLK_OFF) {
+ core_link_disable_stream(pipe_ctx);
+ pipe_ctx->stream->link->phy_state = TX_OFF_SYMCLK_OFF;
+ } else if (target_state == TX_ON_SYMCLK_ON) {
+ core_link_enable_stream(state, pipe_ctx);
+ pipe_ctx->stream->link->phy_state = TX_ON_SYMCLK_ON;
+ } else if (target_state == TX_OFF_SYMCLK_ON) {
+ if (current_state == TX_ON_SYMCLK_ON) {
+ core_link_disable_stream(pipe_ctx);
+ pipe_ctx->stream->link->phy_state = TX_OFF_SYMCLK_OFF;
+ }
+
+ pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ &pipe_ctx->pll_settings);
+ pipe_ctx->stream->link->phy_state = TX_OFF_SYMCLK_ON;
+ } else
+ BREAK_TO_DEBUGGER();
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index 083f3aeb54f0..221e31144d50 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -84,4 +84,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+void dcn32_update_phy_state(struct dc_state *state, struct pipe_ctx *pipe_ctx,
+ enum phy_state target_state);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index c279a25ea293..28d220218133 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -104,6 +104,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.commit_subvp_config = dcn32_commit_subvp_config,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_phy_state = dcn32_update_phy_state,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 94141f5e6994..357bd2461bc9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -300,24 +300,24 @@ static enum dc_lut_mode mpc32_get_shaper_current(struct mpc *mpc, uint32_t mpcc_
uint32_t state_mode;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
- REG_GET(MPCC_MCM_SHAPER_CONTROL[mpcc_id],
- MPCC_MCM_SHAPER_MODE_CURRENT, &state_mode);
-
- switch (state_mode) {
- case 0:
- mode = LUT_BYPASS;
- break;
- case 1:
- mode = LUT_RAM_A;
- break;
- case 2:
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
- return mode;
+ REG_GET(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_MODE_CURRENT, &state_mode);
+
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index 992e56c6907e..1fad7b48bd5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -281,8 +281,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
- .set_vrr_m_const = optc3_set_vrr_m_const,
- .set_drr = optc31_set_drr, // TODO: Update to optc32_set_drr once FW headers are promoted
+ .set_drr = optc32_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 9a26d24b579f..ef0a6d468a10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -131,79 +131,96 @@ enum dcn32_clk_src_array_id {
/* DCN */
/* TODO awful hack. fixup dcn20_dwb.h */
#undef BASE_INNER
-#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
+#define SR_ARR(reg_name, id) \
+ REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SR_ARR_INIT(reg_name, id, value) \
+ REG_STRUCT[id].reg_name = value
#define SRI(reg_name, block, id)\
- .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## temp_name
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
#define DCCG_SRII(reg_name, block, id)\
- .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
- reg ## reg_name ## _ ## block ## id
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
/* NBIO */
-#define NBIO_BASE_INNER(seg) \
- NBIO_BASE__INST0_SEG ## seg
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- .reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX0_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
#undef CTX
#define CTX ctx
#define REG(reg_name) \
- (DCN_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+ (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-static const struct bios_registers bios_regs = {
- NBIO_SR(BIOS_SCRATCH_3),
- NBIO_SR(BIOS_SCRATCH_6)
-};
+static struct bios_registers bios_regs;
-#define clk_src_regs(index, pllid)\
-[index] = {\
- CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
-}
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
-static const struct dce110_clk_src_regs clk_src_regs[] = {
- clk_src_regs(0, A),
- clk_src_regs(1, B),
- clk_src_regs(2, C),
- clk_src_regs(3, D),
- clk_src_regs(4, E)
-};
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -213,17 +230,10 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define abm_regs(id)\
-[id] = {\
- ABM_DCN32_REG_LIST(id)\
-}
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
-static const struct dce_abm_registers abm_regs[] = {
- abm_regs(0),
- abm_regs(1),
- abm_regs(2),
- abm_regs(3),
-};
+static struct dce_abm_registers abm_regs[4];
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN32(__SHIFT)
@@ -233,18 +243,10 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN32(_MASK)
};
-#define audio_regs(id)\
-[id] = {\
- AUD_COMMON_REG_LIST(id)\
-}
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
-static const struct dce_audio_registers audio_regs[] = {
- audio_regs(0),
- audio_regs(1),
- audio_regs(2),
- audio_regs(3),
- audio_regs(4)
-};
+static struct dce_audio_registers audio_regs[5];
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
@@ -259,23 +261,10 @@ static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
-#define vpg_regs(id)\
-[id] = {\
- VPG_DCN3_REG_LIST(id)\
-}
+#define vpg_regs_init(id)\
+ VPG_DCN3_REG_LIST_RI(id)
-static const struct dcn30_vpg_registers vpg_regs[] = {
- vpg_regs(0),
- vpg_regs(1),
- vpg_regs(2),
- vpg_regs(3),
- vpg_regs(4),
- vpg_regs(5),
- vpg_regs(6),
- vpg_regs(7),
- vpg_regs(8),
- vpg_regs(9),
-};
+static struct dcn30_vpg_registers vpg_regs[10];
static const struct dcn30_vpg_shift vpg_shift = {
DCN3_VPG_MASK_SH_LIST(__SHIFT)
@@ -285,19 +274,10 @@ static const struct dcn30_vpg_mask vpg_mask = {
DCN3_VPG_MASK_SH_LIST(_MASK)
};
-#define afmt_regs(id)\
-[id] = {\
- AFMT_DCN3_REG_LIST(id)\
-}
+#define afmt_regs_init(id)\
+ AFMT_DCN3_REG_LIST_RI(id)
-static const struct dcn30_afmt_registers afmt_regs[] = {
- afmt_regs(0),
- afmt_regs(1),
- afmt_regs(2),
- afmt_regs(3),
- afmt_regs(4),
- afmt_regs(5)
-};
+static struct dcn30_afmt_registers afmt_regs[6];
static const struct dcn30_afmt_shift afmt_shift = {
DCN3_AFMT_MASK_SH_LIST(__SHIFT)
@@ -307,17 +287,10 @@ static const struct dcn30_afmt_mask afmt_mask = {
DCN3_AFMT_MASK_SH_LIST(_MASK)
};
-#define apg_regs(id)\
-[id] = {\
- APG_DCN31_REG_LIST(id)\
-}
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
-static const struct dcn31_apg_registers apg_regs[] = {
- apg_regs(0),
- apg_regs(1),
- apg_regs(2),
- apg_regs(3)
-};
+static struct dcn31_apg_registers apg_regs[4];
static const struct dcn31_apg_shift apg_shift = {
DCN31_APG_MASK_SH_LIST(__SHIFT)
@@ -327,18 +300,10 @@ static const struct dcn31_apg_mask apg_mask = {
DCN31_APG_MASK_SH_LIST(_MASK)
};
-#define stream_enc_regs(id)\
-[id] = {\
- SE_DCN32_REG_LIST(id)\
-}
+#define stream_enc_regs_init(id)\
+ SE_DCN32_REG_LIST_RI(id)
-static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
- stream_enc_regs(0),
- stream_enc_regs(1),
- stream_enc_regs(2),
- stream_enc_regs(3),
- stream_enc_regs(4)
-};
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -349,46 +314,24 @@ static const struct dcn10_stream_encoder_mask se_mask = {
};
-#define aux_regs(id)\
-[id] = {\
- DCN2_AUX_REG_LIST(id)\
-}
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
-static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
- aux_regs(0),
- aux_regs(1),
- aux_regs(2),
- aux_regs(3),
- aux_regs(4)
-};
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
-#define hpd_regs(id)\
-[id] = {\
- HPD_REG_LIST(id)\
-}
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
-static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
- hpd_regs(0),
- hpd_regs(1),
- hpd_regs(2),
- hpd_regs(3),
- hpd_regs(4)
-};
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
-#define link_regs(id, phyid)\
-[id] = {\
- LE_DCN31_REG_LIST(id), \
- UNIPHY_DCN2_REG_LIST(phyid), \
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN31_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
/*DPCS_DCN31_REG_LIST(id),*/ \
-}
-static const struct dcn10_link_enc_registers link_enc_regs[] = {
- link_regs(0, A),
- link_regs(1, B),
- link_regs(2, C),
- link_regs(3, D),
- link_regs(4, E)
-};
+static struct dcn10_link_enc_registers link_enc_regs[5];
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \
@@ -401,17 +344,10 @@ static const struct dcn10_link_enc_mask le_mask = {
//DPCS_DCN31_MASK_SH_LIST(_MASK)
};
-#define hpo_dp_stream_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
-}
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
-static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
- hpo_dp_stream_encoder_reg_list(0),
- hpo_dp_stream_encoder_reg_list(1),
- hpo_dp_stream_encoder_reg_list(2),
- hpo_dp_stream_encoder_reg_list(3),
-};
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
@@ -422,20 +358,14 @@ static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
};
-#define hpo_dp_link_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
- /*DCN3_1_RDPCSTX_REG_LIST(0),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(1),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(2),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(3),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(4)*/\
-}
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+ /*DCN3_1_RDPCSTX_REG_LIST(0),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(1),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(2),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(3),*/
-static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
- hpo_dp_link_encoder_reg_list(0),
- hpo_dp_link_encoder_reg_list(1),
-};
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
@@ -445,17 +375,10 @@ static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
};
-#define dpp_regs(id)\
-[id] = {\
- DPP_REG_LIST_DCN30_COMMON(id),\
-}
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN30_COMMON_RI(id)
-static const struct dcn3_dpp_registers dpp_regs[] = {
- dpp_regs(0),
- dpp_regs(1),
- dpp_regs(2),
- dpp_regs(3)
-};
+static struct dcn3_dpp_registers dpp_regs[4];
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT)
@@ -466,17 +389,10 @@ static const struct dcn3_dpp_mask tf_mask = {
};
-#define opp_regs(id)\
-[id] = {\
- OPP_REG_LIST_DCN30(id),\
-}
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN30_RI(id)
-static const struct dcn20_opp_registers opp_regs[] = {
- opp_regs(0),
- opp_regs(1),
- opp_regs(2),
- opp_regs(3)
-};
+static struct dcn20_opp_registers opp_regs[4];
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
@@ -486,21 +402,16 @@ static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
-#define aux_engine_regs(id)\
-[id] = {\
- AUX_COMMON_REG_LIST0(id), \
- .AUXN_IMPCAL = 0, \
- .AUXP_IMPCAL = 0, \
- .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
-}
+#define aux_engine_regs_init(id)\
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), \
+ SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\
+ )
-static const struct dce110_aux_registers aux_engine_regs[] = {
- aux_engine_regs(0),
- aux_engine_regs(1),
- aux_engine_regs(2),
- aux_engine_regs(3),
- aux_engine_regs(4)
-};
+static struct dce110_aux_registers aux_engine_regs[5];
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
@@ -510,15 +421,10 @@ static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
-#define dwbc_regs_dcn3(id)\
-[id] = {\
- DWBC_COMMON_REG_LIST_DCN30(id),\
-}
-
-static const struct dcn30_dwbc_registers dwbc30_regs[] = {
- dwbc_regs_dcn3(0),
-};
+static struct dcn30_dwbc_registers dwbc30_regs[1];
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -528,14 +434,10 @@ static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
-#define mcif_wb_regs_dcn3(id)\
-[id] = {\
- MCIF_WB_COMMON_REG_LIST_DCN32(id),\
-}
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN32_RI(id)
-static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
- mcif_wb_regs_dcn3(0)
-};
+static struct dcn30_mmhubbub_registers mcif_wb30_regs[1];
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -545,17 +447,10 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define dsc_regsDCN20(id)\
-[id] = {\
- DSC_REG_LIST_DCN20(id)\
-}
+#define dsc_regsDCN20_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
-static const struct dcn20_dsc_registers dsc_regs[] = {
- dsc_regsDCN20(0),
- dsc_regsDCN20(1),
- dsc_regsDCN20(2),
- dsc_regsDCN20(3)
-};
+static struct dcn20_dsc_registers dsc_regs[4];
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
@@ -565,17 +460,23 @@ static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-static const struct dcn30_mpc_registers mpc_regs = {
- MPC_REG_LIST_DCN3_2(0),
- MPC_REG_LIST_DCN3_2(1),
- MPC_REG_LIST_DCN3_2(2),
- MPC_REG_LIST_DCN3_2(3),
- MPC_OUT_MUX_REG_LIST_DCN3_0(0),
- MPC_OUT_MUX_REG_LIST_DCN3_0(1),
- MPC_OUT_MUX_REG_LIST_DCN3_0(2),
- MPC_OUT_MUX_REG_LIST_DCN3_0(3),
- MPC_DWB_MUX_REG_LIST_DCN3_0(0),
-};
+static struct dcn30_mpc_registers mpc_regs;
+#define dcn_mpc_regs_init()\
+ ( \
+ MPC_REG_LIST_DCN3_0_RI(0),\
+ MPC_REG_LIST_DCN3_0_RI(1),\
+ MPC_REG_LIST_DCN3_0_RI(2),\
+ MPC_REG_LIST_DCN3_0_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_MCM_REG_LIST_DCN32_RI(0),\
+ MPC_MCM_REG_LIST_DCN32_RI(1),\
+ MPC_MCM_REG_LIST_DCN32_RI(2),\
+ MPC_MCM_REG_LIST_DCN32_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)\
+ )
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -585,19 +486,10 @@ static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define optc_regs(id)\
-[id] = {OPTC_COMMON_REG_LIST_DCN3_2(id)}
-
-//#ifdef DIAGS_BUILD
-//static struct dcn_optc_registers optc_regs[] = {
-//#else
-static const struct dcn_optc_registers optc_regs[] = {
-//#endif
- optc_regs(0),
- optc_regs(1),
- optc_regs(2),
- optc_regs(3)
-};
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_2_RI(id)
+
+static struct dcn_optc_registers optc_regs[4];
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -607,17 +499,10 @@ static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define hubp_regs(id)\
-[id] = {\
- HUBP_REG_LIST_DCN32(id)\
-}
+#define hubp_regs_init(id)\
+ HUBP_REG_LIST_DCN32_RI(id)
-static const struct dcn_hubp2_registers hubp_regs[] = {
- hubp_regs(0),
- hubp_regs(1),
- hubp_regs(2),
- hubp_regs(3)
-};
+static struct dcn_hubp2_registers hubp_regs[4];
static const struct dcn_hubp2_shift hubp_shift = {
@@ -627,9 +512,10 @@ static const struct dcn_hubp2_shift hubp_shift = {
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dcn_hubbub_registers hubbub_reg = {
- HUBBUB_REG_LIST_DCN32(0)
-};
+
+static struct dcn_hubbub_registers hubbub_reg;
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN32_RI(0)
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN32(__SHIFT)
@@ -639,9 +525,10 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dccg_registers dccg_regs = {
- DCCG_REG_LIST_DCN32()
-};
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN32_RI()
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN32(__SHIFT)
@@ -714,9 +601,10 @@ static const struct dccg_mask dccg_mask = {
SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING)
-static const struct dce_hwseq_registers hwseq_reg = {
- HWSEQ_DCN32_REG_LIST()
-};
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN32_REG_LIST()
#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -759,29 +647,10 @@ static const struct dce_hwseq_shift hwseq_shift = {
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN32_MASK_SH_LIST(_MASK)
};
-#define vmid_regs(id)\
-[id] = {\
- DCN20_VMID_REG_LIST(id)\
-}
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
-static const struct dcn_vmid_registers vmid_regs[] = {
- vmid_regs(0),
- vmid_regs(1),
- vmid_regs(2),
- vmid_regs(3),
- vmid_regs(4),
- vmid_regs(5),
- vmid_regs(6),
- vmid_regs(7),
- vmid_regs(8),
- vmid_regs(9),
- vmid_regs(10),
- vmid_regs(11),
- vmid_regs(12),
- vmid_regs(13),
- vmid_regs(14),
- vmid_regs(15)
-};
+static struct dcn_vmid_registers vmid_regs[16];
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
@@ -867,7 +736,7 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
@@ -903,6 +772,14 @@ static struct dce_aux *dcn32_aux_engine_create(
if (!aux_engine)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
@@ -912,15 +789,10 @@ static struct dce_aux *dcn32_aux_engine_create(
return &aux_engine->base;
}
-#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
-
-static const struct dce_i2c_registers i2c_hw_regs[] = {
- i2c_inst_regs(1),
- i2c_inst_regs(2),
- i2c_inst_regs(3),
- i2c_inst_regs(4),
- i2c_inst_regs(5),
-};
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[6];
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -940,6 +812,14 @@ static struct dce_i2c_hw *dcn32_i2c_hw_create(
if (!dce_i2c_hw)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
@@ -979,6 +859,29 @@ static struct hubbub *dcn32_hubbub_create(struct dc_context *ctx)
if (!hubbub2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
hubbub32_construct(hubbub2, ctx,
&hubbub_reg,
&hubbub_shift,
@@ -1011,6 +914,13 @@ static struct hubp *dcn32_hubp_create(
if (!hubp2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
if (hubp32_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
@@ -1036,6 +946,13 @@ static struct dpp *dcn32_dpp_create(
if (!dpp3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
if (dpp32_construct(dpp3, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp3->base;
@@ -1056,6 +973,10 @@ static struct mpc *dcn32_mpc_create(
if (!mpc30)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
dcn32_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
@@ -1077,6 +998,13 @@ static struct output_pixel_processor *dcn32_opp_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
dcn20_opp_construct(opp2, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp2->base;
@@ -1093,6 +1021,13 @@ static struct timing_generator *dcn32_timing_generator_create(
if (!tgn10)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
@@ -1127,6 +1062,30 @@ static struct link_encoder *dcn32_link_encoder_create(
if (!enc20)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
dcn32_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
@@ -1156,7 +1115,7 @@ static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
- generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS,
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
@@ -1164,6 +1123,15 @@ static void read_dce_straps(
static struct audio *dcn32_create_audio(
struct dc_context *ctx, unsigned int inst)
{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
@@ -1177,6 +1145,19 @@ static struct vpg *dcn32_vpg_create(
if (!vpg3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
vpg3_construct(vpg3, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
@@ -1194,6 +1175,15 @@ static struct afmt *dcn32_afmt_create(
if (!afmt3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
afmt3_construct(afmt3, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
@@ -1211,6 +1201,13 @@ static struct apg *dcn31_apg_create(
if (!apg31)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
apg31_construct(apg31, ctx, inst,
&apg_regs[inst],
&apg_shift,
@@ -1247,6 +1244,14 @@ static struct stream_encoder *dcn32_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
@@ -1297,6 +1302,13 @@ static struct hpo_dp_stream_encoder *dcn32_hpo_dp_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
&hpo_dp_stream_enc_regs[hpo_dp_inst],
@@ -1314,6 +1326,11 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
&hpo_dp_le_shift, &hpo_dp_le_mask);
@@ -1326,6 +1343,10 @@ static struct dce_hwseq *dcn32_hwseq_create(
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
@@ -1517,6 +1538,10 @@ static bool dcn32_dwbc_create(struct dc_context *ctx, struct resource_pool *pool
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT dwbc30_regs
+ dwbc_regs_dcn3_init(0);
+
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
@@ -1542,6 +1567,10 @@ static bool dcn32_mmhubbub_create(struct dc_context *ctx, struct resource_pool *
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb30_regs
+ mcif_wb_regs_dcn3_init(0);
+
dcn32_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
@@ -1564,6 +1593,13 @@ static struct display_stream_compressor *dcn32_dsc_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN20_init(0),
+ dsc_regsDCN20_init(1),
+ dsc_regsDCN20_init(2),
+ dsc_regsDCN20_init(3);
+
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
dsc->max_image_width = 6016;
@@ -1701,13 +1737,26 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
{
int i;
bool removed_pipe = false;
+ struct dc_plane_state *phantom_plane = NULL;
+ struct dc_stream_state *phantom_stream = NULL;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// build scaling params for phantom pipes
if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ phantom_plane = pipe->plane_state;
+ phantom_stream = pipe->stream;
+
dc_rem_all_planes_for_stream(dc, pipe->stream, context);
dc_remove_stream_from_ctx(dc, context, pipe->stream);
+
+ /* Ref count is incremented on allocation and also when added to the context.
+ * Therefore we must call release for the the phantom plane and stream once
+ * they are removed from the ctx to finally decrement the refcount to 0 to free.
+ */
+ dc_plane_state_release(phantom_plane);
+ dc_stream_release(phantom_stream);
+
removed_pipe = true;
}
@@ -1904,13 +1953,11 @@ int dcn32_populate_dml_pipes_from_context(
timing = &pipe->stream->timing;
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
- res_ctx->pipe_ctx[i].stream->odm_2to1_policy_applied = false;
- if (context->stream_count == 1 && timing->dsc_cfg.num_slices_h != 1) {
+ if (context->stream_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal)) {
if (dc->debug.enable_single_display_2to1_odm_policy) {
if (!((plane_count > 2) && pipe->top_pipe))
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
}
- res_ctx->pipe_ctx[i].stream->odm_2to1_policy_applied = true;
}
pipe_cnt++;
}
@@ -2002,6 +2049,28 @@ static bool dcn32_resource_construct(
uint32_t pipe_fuses = 0;
uint32_t num_pipes = 4;
+ #undef REG_STRUCT
+ #define REG_STRUCT bios_regs
+ bios_regs_init();
+
+ #undef REG_STRUCT
+ #define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+ #undef REG_STRUCT
+ #define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+ #undef REG_STRUCT
+ #define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
DC_FP_START();
ctx->dc_bios->regs = &bios_regs;
@@ -2051,6 +2120,7 @@ static bool dcn32_resource_construct(
dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
dc->caps.subvp_vertical_int_margin_us = 30;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 1e7e6201c880..60d8fad16eee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -109,4 +109,1084 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_params_st *pipes,
bool *is_pipe_split_expected, int pipe_cnt);
+/* definitions for run time init of reg offsets */
+
+/* CLK SRC */
+#define CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) \
+ ( \
+ SRI_ARR_ALPHABET(PIXCLK_RESYNC_CNTL, PHYPLL, index, pllid), \
+ SRII_ARR_2(PHASE, DP_DTO, 0, index), \
+ SRII_ARR_2(PHASE, DP_DTO, 1, index), \
+ SRII_ARR_2(PHASE, DP_DTO, 2, index), \
+ SRII_ARR_2(PHASE, DP_DTO, 3, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 0, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 1, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 2, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 3, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 0, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 1, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 2, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 3, index) \
+ )
+
+/* ABM */
+#define ABM_DCN32_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI_ARR(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI_ARR(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI_ARR(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI_ARR(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI_ARR(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI_ARR(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI_ARR(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI_ARR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI_ARR(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ SRI_ARR(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
+ SRI_ARR(DC_ABM1_ACE_THRES_12, ABM, id), NBIO_SR_ARR(BIOS_SCRATCH_2, id) \
+ )
+
+/* Audio */
+#define AUD_COMMON_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id), \
+ SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id), \
+ SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS, id), \
+ SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, id), \
+ SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, id), \
+ SR_ARR(DCCG_AUDIO_DTO_SOURCE, id), SR_ARR(DCCG_AUDIO_DTO0_MODULE, id), \
+ SR_ARR(DCCG_AUDIO_DTO0_PHASE, id), SR_ARR(DCCG_AUDIO_DTO1_MODULE, id), \
+ SR_ARR(DCCG_AUDIO_DTO1_PHASE, id) \
+ )
+
+/* VPG */
+
+#define VPG_DCN3_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(VPG_GENERIC_STATUS, VPG, id), \
+ SRI_ARR(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \
+ SRI_ARR(VPG_GENERIC_PACKET_DATA, VPG, id), \
+ SRI_ARR(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \
+ SRI_ARR(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id) \
+ )
+
+/* AFMT */
+#define AFMT_DCN3_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AFMT_INFOFRAME_CONTROL0, AFMT, id), \
+ SRI_ARR(AFMT_VBI_PACKET_CONTROL, AFMT, id), \
+ SRI_ARR(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \
+ SRI_ARR(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \
+ SRI_ARR(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \
+ SRI_ARR(AFMT_60958_0, AFMT, id), SRI_ARR(AFMT_60958_1, AFMT, id), \
+ SRI_ARR(AFMT_60958_2, AFMT, id), SRI_ARR(AFMT_MEM_PWR, AFMT, id) \
+ )
+
+/* APG */
+#define APG_DCN31_REG_LIST_RI(id) \
+ (\
+ SRI_ARR(APG_CONTROL, APG, id), SRI_ARR(APG_CONTROL2, APG, id), \
+ SRI_ARR(APG_MEM_PWR, APG, id), SRI_ARR(APG_DBG_GEN_CONTROL, APG, id) \
+ )
+
+/* Stream encoder */
+#define SE_DCN32_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AFMT_CNTL, DIG, id), SRI_ARR(DIG_FE_CNTL, DIG, id), \
+ SRI_ARR(HDMI_CONTROL, DIG, id), SRI_ARR(HDMI_DB_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_GC, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \
+ SRI_ARR(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+ SRI_ARR(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+ SRI_ARR(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_AUDIO_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_ACR_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_ACR_32_0, DIG, id), SRI_ARR(HDMI_ACR_32_1, DIG, id), \
+ SRI_ARR(HDMI_ACR_44_0, DIG, id), SRI_ARR(HDMI_ACR_44_1, DIG, id), \
+ SRI_ARR(HDMI_ACR_48_0, DIG, id), SRI_ARR(HDMI_ACR_48_1, DIG, id), \
+ SRI_ARR(DP_DB_CNTL, DP, id), SRI_ARR(DP_MSA_MISC, DP, id), \
+ SRI_ARR(DP_MSA_VBID_MISC, DP, id), SRI_ARR(DP_MSA_COLORIMETRY, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM1, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM2, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM3, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM4, DP, id), \
+ SRI_ARR(DP_MSE_RATE_CNTL, DP, id), SRI_ARR(DP_MSE_RATE_UPDATE, DP, id), \
+ SRI_ARR(DP_PIXEL_FORMAT, DP, id), SRI_ARR(DP_SEC_CNTL, DP, id), \
+ SRI_ARR(DP_SEC_CNTL2, DP, id), SRI_ARR(DP_SEC_CNTL6, DP, id), \
+ SRI_ARR(DP_STEER_FIFO, DP, id), SRI_ARR(DP_VID_M, DP, id), \
+ SRI_ARR(DP_VID_N, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \
+ SRI_ARR(DP_VID_TIMING, DP, id), SRI_ARR(DP_SEC_AUD_N, DP, id), \
+ SRI_ARR(DP_SEC_TIMESTAMP, DP, id), SRI_ARR(DP_DSC_CNTL, DP, id), \
+ SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(DP_SEC_FRAMING4, DP, id), SRI_ARR(DP_GSP11_CNTL, DP, id), \
+ SRI_ARR(DME_CONTROL, DME, id), \
+ SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(DIG_FE_CNTL, DIG, id), SRI_ARR(DIG_CLOCK_PATTERN, DIG, id), \
+ SRI_ARR(DIG_FIFO_CTRL0, DIG, id) \
+ )
+
+/* Aux regs */
+
+#define AUX_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
+ SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id) \
+ )
+
+#define DCN2_AUX_REG_LIST_RI(id) \
+ ( \
+ AUX_REG_LIST_RI(id), SRI_ARR(AUX_DPHY_TX_CONTROL, DP_AUX, id) \
+ )
+
+/* HDP */
+#define HPD_REG_LIST_RI(id) SRI_ARR(DC_HPD_CONTROL, HPD, id)
+
+/* Link encoder */
+#define LE_DCN3_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DIG_BE_CNTL, DIG, id), SRI_ARR(DIG_BE_EN_CNTL, DIG, id), \
+ SRI_ARR(TMDS_CTL_BITS, DIG, id), \
+ SRI_ARR(TMDS_DCBALANCER_CONTROL, DIG, id), SRI_ARR(DP_CONFIG, DP, id), \
+ SRI_ARR(DP_DPHY_CNTL, DP, id), SRI_ARR(DP_DPHY_PRBS_CNTL, DP, id), \
+ SRI_ARR(DP_DPHY_SCRAM_CNTL, DP, id), SRI_ARR(DP_DPHY_SYM0, DP, id), \
+ SRI_ARR(DP_DPHY_SYM1, DP, id), SRI_ARR(DP_DPHY_SYM2, DP, id), \
+ SRI_ARR(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+ SRI_ARR(DP_LINK_CNTL, DP, id), SRI_ARR(DP_LINK_FRAMING_CNTL, DP, id), \
+ SRI_ARR(DP_MSE_SAT0, DP, id), SRI_ARR(DP_MSE_SAT1, DP, id), \
+ SRI_ARR(DP_MSE_SAT2, DP, id), SRI_ARR(DP_MSE_SAT_UPDATE, DP, id), \
+ SRI_ARR(DP_SEC_CNTL, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \
+ SRI_ARR(DP_DPHY_FAST_TRAINING, DP, id), SRI_ARR(DP_SEC_CNTL1, DP, id), \
+ SRI_ARR(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI_ARR(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) \
+ )
+
+#define LE_DCN31_REG_LIST_RI(id) \
+ ( \
+ LE_DCN3_REG_LIST_RI(id), SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR_ARR(DIO_LINKA_CNTL, id), SR_ARR(DIO_LINKB_CNTL, id), \
+ SR_ARR(DIO_LINKC_CNTL, id), SR_ARR(DIO_LINKD_CNTL, id), \
+ SR_ARR(DIO_LINKE_CNTL, id), SR_ARR(DIO_LINKF_CNTL, id) \
+ )
+
+#define UNIPHY_DCN2_REG_LIST_RI(id, phyid) \
+ ( \
+ SRI_ARR_ALPHABET(CLOCK_ENABLE, SYMCLK, id, phyid), \
+ SRI_ARR_ALPHABET(CHANNEL_XBAR_CNTL, UNIPHY, id, phyid) \
+ )
+
+/* HPO DP stream encoder */
+#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) \
+ ( \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL0, id), \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL1, id), \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL2, id), \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL3, id), \
+ SRI_ARR(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) \
+ )
+
+/* HPO DP link encoder regs */
+#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \
+ SRI_ARR(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) \
+ )
+
+/* DPP */
+#define DPP_REG_LIST_DCN30_COMMON_RI(id) \
+ ( \
+ SRI_ARR(CM_DEALPHA, CM, id), SRI_ARR(CM_MEM_PWR_STATUS, CM, id), \
+ SRI_ARR(CM_BIAS_CR_R, CM, id), SRI_ARR(CM_BIAS_Y_G_CB_B, CM, id), \
+ SRI_ARR(PRE_DEGAM, CNVC_CFG, id), SRI_ARR(CM_GAMCOR_CONTROL, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_CONTROL, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_DATA, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_REGION_0_1, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_REGION_32_33, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_OFFSET_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_OFFSET_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_OFFSET_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_REGION_0_1, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_REGION_32_33, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_OFFSET_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_OFFSET_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_OFFSET_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_CONTROL, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C11_C12, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C13_C14, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C21_C22, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C23_C24, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C31_C32, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C33_C34, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C11_C12, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C13_C14, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C21_C22, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C23_C24, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C31_C32, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C33_C34, CM, id), \
+ SRI_ARR(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
+ SRI_ARR(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
+ SRI_ARR(OTG_H_BLANK, DSCL, id), SRI_ARR(OTG_V_BLANK, DSCL, id), \
+ SRI_ARR(SCL_MODE, DSCL, id), SRI_ARR(LB_DATA_FORMAT, DSCL, id), \
+ SRI_ARR(LB_MEMORY_CTRL, DSCL, id), SRI_ARR(DSCL_AUTOCAL, DSCL, id), \
+ SRI_ARR(SCL_TAP_CONTROL, DSCL, id), \
+ SRI_ARR(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
+ SRI_ARR(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
+ SRI_ARR(DSCL_2TAP_CONTROL, DSCL, id), SRI_ARR(MPC_SIZE, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_INIT, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_INIT_C, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_INIT, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_INIT_C, DSCL, id), \
+ SRI_ARR(RECOUT_START, DSCL, id), SRI_ARR(RECOUT_SIZE, DSCL, id), \
+ SRI_ARR(PRE_DEALPHA, CNVC_CFG, id), SRI_ARR(PRE_REALPHA, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_MODE, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_C11_C12, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_C33_C34, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_B_C11_C12, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_B_C33_C34, CNVC_CFG, id), \
+ SRI_ARR(CM_POST_CSC_CONTROL, CM, id), \
+ SRI_ARR(CM_POST_CSC_C11_C12, CM, id), \
+ SRI_ARR(CM_POST_CSC_C33_C34, CM, id), \
+ SRI_ARR(CM_POST_CSC_B_C11_C12, CM, id), \
+ SRI_ARR(CM_POST_CSC_B_C33_C34, CM, id), \
+ SRI_ARR(CM_MEM_PWR_CTRL, CM, id), SRI_ARR(CM_CONTROL, CM, id), \
+ SRI_ARR(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI_ARR(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI_ARR(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI_ARR(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI_ARR(CURSOR0_COLOR1, CNVC_CUR, id), \
+ SRI_ARR(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
+ SRI_ARR(DPP_CONTROL, DPP_TOP, id), SRI_ARR(CM_HDR_MULT_COEF, CM, id), \
+ SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI_ARR(ALPHA_2BIT_LUT, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_BIAS_R, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_BIAS_G, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_BIAS_B, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_SCALE_R, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_SCALE_G, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_SCALE_B, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_CONTROL, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_ALPHA, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_RED, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_GREEN, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_BLUE, CNVC_CFG, id), \
+ SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI_ARR(OBUF_MEM_PWR_CTRL, DSCL, id), \
+ SRI_ARR(DSCL_MEM_PWR_STATUS, DSCL, id), \
+ SRI_ARR(DSCL_MEM_PWR_CTRL, DSCL, id) \
+ )
+
+/* OPP */
+#define OPP_REG_LIST_DCN_RI(id) \
+ ( \
+ SRI_ARR(FMT_BIT_DEPTH_CONTROL, FMT, id), SRI_ARR(FMT_CONTROL, FMT, id), \
+ SRI_ARR(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI_ARR(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI_ARR(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI_ARR(FMT_CLAMP_CNTL, FMT, id), \
+ SRI_ARR(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI_ARR(FMT_MAP420_MEMORY_CONTROL, FMT, id), \
+ SRI_ARR(OPPBUF_CONTROL, OPPBUF, id), \
+ SRI_ARR(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \
+ SRI_ARR(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \
+ SRI_ARR(OPP_PIPE_CONTROL, OPP_PIPE, id) \
+ )
+
+#define OPP_REG_LIST_DCN10_RI(id) OPP_REG_LIST_DCN_RI(id)
+
+#define OPP_DPG_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DPG_CONTROL, DPG, id), SRI_ARR(DPG_DIMENSIONS, DPG, id), \
+ SRI_ARR(DPG_OFFSET_SEGMENT, DPG, id), SRI_ARR(DPG_COLOUR_B_CB, DPG, id), \
+ SRI_ARR(DPG_COLOUR_G_Y, DPG, id), SRI_ARR(DPG_COLOUR_R_CR, DPG, id), \
+ SRI_ARR(DPG_RAMP_CONTROL, DPG, id), SRI_ARR(DPG_STATUS, DPG, id) \
+ )
+
+#define OPP_REG_LIST_DCN30_RI(id) \
+ ( \
+ OPP_REG_LIST_DCN10_RI(id), OPP_DPG_REG_LIST_RI(id), \
+ SRI_ARR(FMT_422_CONTROL, FMT, id) \
+ )
+
+/* Aux engine regs */
+#define AUX_COMMON_REG_LIST0_RI(id) \
+ ( \
+ SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI_ARR(AUX_SW_DATA, DP_AUX, id), SRI_ARR(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI_ARR(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
+ SRI_ARR(AUX_SW_STATUS, DP_AUX, id) \
+ )
+
+/* DWBC */
+#define DWBC_COMMON_REG_LIST_DCN30_RI(id) \
+ ( \
+ SR_ARR(DWB_ENABLE_CLK_CTRL, id), SR_ARR(DWB_MEM_PWR_CTRL, id), \
+ SR_ARR(FC_MODE_CTRL, id), SR_ARR(FC_FLOW_CTRL, id), \
+ SR_ARR(FC_WINDOW_START, id), SR_ARR(FC_WINDOW_SIZE, id), \
+ SR_ARR(FC_SOURCE_SIZE, id), SR_ARR(DWB_UPDATE_CTRL, id), \
+ SR_ARR(DWB_CRC_CTRL, id), SR_ARR(DWB_CRC_MASK_R_G, id), \
+ SR_ARR(DWB_CRC_MASK_B_A, id), SR_ARR(DWB_CRC_VAL_R_G, id), \
+ SR_ARR(DWB_CRC_VAL_B_A, id), SR_ARR(DWB_OUT_CTRL, id), \
+ SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, id), \
+ SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT, id), \
+ SR_ARR(DWB_HOST_READ_CONTROL, id), SR_ARR(DWB_SOFT_RESET, id), \
+ SR_ARR(DWB_HDR_MULT_COEF, id), SR_ARR(DWB_GAMUT_REMAP_MODE, id), \
+ SR_ARR(DWB_GAMUT_REMAP_COEF_FORMAT, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C11_C12, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C13_C14, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C21_C22, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C23_C24, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C31_C32, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C33_C34, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C11_C12, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C13_C14, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C21_C22, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C23_C24, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C31_C32, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C33_C34, id), SR_ARR(DWB_OGAM_CONTROL, id), \
+ SR_ARR(DWB_OGAM_LUT_INDEX, id), SR_ARR(DWB_OGAM_LUT_DATA, id), \
+ SR_ARR(DWB_OGAM_LUT_CONTROL, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL1_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL2_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL1_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL2_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL1_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL2_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMA_OFFSET_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_OFFSET_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_0_1, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_2_3, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_4_5, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_6_7, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_8_9, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_10_11, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_12_13, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_14_15, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_16_17, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_18_19, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_20_21, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_22_23, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_24_25, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_26_27, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_28_29, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_30_31, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_32_33, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL1_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL2_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL1_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL2_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL1_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL2_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMB_OFFSET_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_OFFSET_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_0_1, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_2_3, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_4_5, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_6_7, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_8_9, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_10_11, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_12_13, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_14_15, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_16_17, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_18_19, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_20_21, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_22_23, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_24_25, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_26_27, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_28_29, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_30_31, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_32_33, id) \
+ )
+
+/* MCIF */
+
+#define MCIF_WB_COMMON_REG_LIST_DCN32_RI(inst) \
+ ( \
+ SRI2_ARR(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_PITCH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_TEST_DEBUG_DATA, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MMHUBBUB, inst), \
+ SRI2_ARR(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_WATERMARK, MMHUBBUB, inst), \
+ SRI2_ARR(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_SECURITY_LEVEL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MMHUBBUB_MEM_PWR_CNTL, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB, inst) \
+ )
+
+/* DSC */
+
+#define DSC_REG_LIST_DCN20_RI(id) \
+ ( \
+ SRI_ARR(DSC_TOP_CONTROL, DSC_TOP, id), \
+ SRI_ARR(DSC_DEBUG_CONTROL, DSC_TOP, id), \
+ SRI_ARR(DSCC_CONFIG0, DSCC, id), SRI_ARR(DSCC_CONFIG1, DSCC, id), \
+ SRI_ARR(DSCC_STATUS, DSCC, id), \
+ SRI_ARR(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG0, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG1, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG2, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG3, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG4, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG5, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG6, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG7, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG8, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG9, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG10, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG11, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG12, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG13, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG14, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG15, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG16, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG17, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG18, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG19, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG20, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG21, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG22, DSCC, id), \
+ SRI_ARR(DSCC_MEM_POWER_CONTROL, DSCC, id), \
+ SRI_ARR(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id), \
+ SRI_ARR(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id), \
+ SRI_ARR(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id), \
+ SRI_ARR(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id), \
+ SRI_ARR(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id), \
+ SRI_ARR(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id), \
+ SRI_ARR(DSCC_MAX_ABS_ERROR0, DSCC, id), \
+ SRI_ARR(DSCC_MAX_ABS_ERROR1, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \
+ SRI_ARR(DSCCIF_CONFIG1, DSCCIF, id), \
+ SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) \
+ )
+
+/* MPC */
+
+#define MPC_DWB_MUX_REG_LIST_DCN3_0_RI(inst) \
+ SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst)
+
+#define MPC_MCM_REG_LIST_DCN32_RI(inst) \
+ ( \
+ SRII(MPCC_MCM_SHAPER_CONTROL, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_OFFSET_R, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_OFFSET_G, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_OFFSET_B, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_SCALE_R, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_SCALE_G_B, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_LUT_INDEX, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_LUT_DATA, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_B, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_G, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_R, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_B, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_G, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_R, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_0_1, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_2_3, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_4_5, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_6_7, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_8_9, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_10_11, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_12_13, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_14_15, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_16_17, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_18_19, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_20_21, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_22_23, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_24_25, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_26_27, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_28_29, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_30_31, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_32_33, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_B, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_G, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_R, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_B, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_G, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_R, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_0_1, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_2_3, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_4_5, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_6_7, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_8_9, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_10_11, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_12_13, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_14_15, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_16_17, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_18_19, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_20_21, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_22_23, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_24_25, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_26_27, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_28_29, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_30_31, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_32_33, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_3DLUT_MODE, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_3DLUT_INDEX, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_3DLUT_DATA, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_3DLUT_DATA_30BIT, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_3DLUT_READ_WRITE_CONTROL, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_3DLUT_OUT_NORM_FACTOR, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_R, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_G, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_B, MPCC_MCM, inst), \
+ SRII(MPCC_MCM_MEM_PWR_CTRL, MPCC_MCM, inst) \
+ )
+
+#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst) \
+ ( \
+ SRII(MUX, MPC_OUT, inst), VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) \
+ )
+
+#define MPC_OUT_MUX_REG_LIST_DCN3_0_RI(inst) \
+ ( \
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(CSC_MODE, MPC_OUT, inst), \
+ SRII(CSC_C11_C12_A, MPC_OUT, inst), SRII(CSC_C33_C34_A, MPC_OUT, inst), \
+ SRII(CSC_C11_C12_B, MPC_OUT, inst), SRII(CSC_C33_C34_B, MPC_OUT, inst), \
+ SRII(DENORM_CONTROL, MPC_OUT, inst), \
+ SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst), \
+ SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), SR(MPC_OUT_CSC_COEF_FORMAT) \
+ )
+
+#define MPC_COMMON_REG_LIST_DCN1_0_RI(inst) \
+ ( \
+ SRII(MPCC_TOP_SEL, MPCC, inst), SRII(MPCC_BOT_SEL, MPCC, inst), \
+ SRII(MPCC_CONTROL, MPCC, inst), SRII(MPCC_STATUS, MPCC, inst), \
+ SRII(MPCC_OPP_ID, MPCC, inst), SRII(MPCC_BG_G_Y, MPCC, inst), \
+ SRII(MPCC_BG_R_CR, MPCC, inst), SRII(MPCC_BG_B_CB, MPCC, inst), \
+ SRII(MPCC_SM_CONTROL, MPCC, inst), \
+ SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) \
+ )
+
+#define MPC_REG_LIST_DCN3_0_RI(inst) \
+ ( \
+ MPC_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(MPCC_TOP_GAIN, MPCC, inst), \
+ SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst), \
+ SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst), \
+ SRII(MPCC_MEM_PWR_CTRL, MPCC, inst), \
+ SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst), \
+ SRII(MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_OGAM, inst), \
+ SRII(MPCC_GAMUT_REMAP_MODE, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C11_C12_A, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C33_C34_A, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C11_C12_B, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C33_C34_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_OFFSET_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_OFFSET_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_OFFSET_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst) \
+ )
+
+/* OPTC */
+
+#define OPTC_COMMON_REG_LIST_DCN3_2_RI(inst) \
+ ( \
+ SRI_ARR(OTG_VSTARTUP_PARAM, OTG, inst), \
+ SRI_ARR(OTG_VUPDATE_PARAM, OTG, inst), \
+ SRI_ARR(OTG_VREADY_PARAM, OTG, inst), \
+ SRI_ARR(OTG_MASTER_UPDATE_LOCK, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL0, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL4, OTG, inst), \
+ SRI_ARR(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_H_TOTAL, OTG, inst), \
+ SRI_ARR(OTG_H_BLANK_START_END, OTG, inst), \
+ SRI_ARR(OTG_H_SYNC_A, OTG, inst), SRI_ARR(OTG_H_SYNC_A_CNTL, OTG, inst), \
+ SRI_ARR(OTG_H_TIMING_CNTL, OTG, inst), SRI_ARR(OTG_V_TOTAL, OTG, inst), \
+ SRI_ARR(OTG_V_BLANK_START_END, OTG, inst), \
+ SRI_ARR(OTG_V_SYNC_A, OTG, inst), SRI_ARR(OTG_V_SYNC_A_CNTL, OTG, inst), \
+ SRI_ARR(OTG_CONTROL, OTG, inst), SRI_ARR(OTG_STEREO_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_3D_STRUCTURE_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_STEREO_STATUS, OTG, inst), \
+ SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst), \
+ SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst), \
+ SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_TRIGA_CNTL, OTG, inst), \
+ SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst), \
+ SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_STATUS_FRAME_COUNT, OTG, inst), \
+ SRI_ARR(OTG_STATUS, OTG, inst), SRI_ARR(OTG_STATUS_POSITION, OTG, inst), \
+ SRI_ARR(OTG_NOM_VERT_POSITION, OTG, inst), \
+ SRI_ARR(OTG_M_CONST_DTO0, OTG, inst), \
+ SRI_ARR(OTG_M_CONST_DTO1, OTG, inst), \
+ SRI_ARR(OTG_CLOCK_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst), \
+ SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst), \
+ SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst), \
+ SRI_ARR(CONTROL, VTG, inst), SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_GSL_CONTROL, OTG, inst), SRI_ARR(OTG_CRC_CNTL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_DATA_RG, OTG, inst), \
+ SRI_ARR(OTG_CRC0_DATA_B, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst), \
+ SR_ARR(GSL_SOURCE_SELECT, inst), \
+ SRI_ARR(OTG_TRIGA_MANUAL_TRIG, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \
+ SRI_ARR(OTG_GSL_WINDOW_X, OTG, inst), \
+ SRI_ARR(OTG_GSL_WINDOW_Y, OTG, inst), \
+ SRI_ARR(OTG_VUPDATE_KEEPOUT, OTG, inst), \
+ SRI_ARR(OTG_DSC_START_POSITION, OTG, inst), \
+ SRI_ARR(OTG_DRR_TRIGGER_WINDOW, OTG, inst), \
+ SRI_ARR(OTG_DRR_V_TOTAL_CHANGE, OTG, inst), \
+ SRI_ARR(OPTC_DATA_FORMAT_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \
+ SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
+ SRI_ARR(OTG_DRR_CONTROL, OTG, inst) \
+ )
+
+/* HUBP */
+
+#define HUBP_REG_LIST_DCN_VM_RI(id) \
+ ( \
+ SRI_ARR(NOM_PARAMETERS_0, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_1, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_2, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_3, HUBPREQ, id), \
+ SRI_ARR(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN_RI(id) \
+ ( \
+ SRI_ARR(DCHUBP_CNTL, HUBP, id), SRI_ARR(HUBPREQ_DEBUG_DB, HUBP, id), \
+ SRI_ARR(HUBPREQ_DEBUG, HUBP, id), SRI_ARR(DCSURF_ADDR_CONFIG, HUBP, id), \
+ SRI_ARR(DCSURF_TILING_CONFIG, HUBP, id), \
+ SRI_ARR(DCSURF_SURFACE_PITCH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_PITCH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_CONFIG, HUBP, id), \
+ SRI_ARR(DCSURF_FLIP_CONTROL, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_START, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_CONTROL, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id), \
+ SRI_ARR(HUBPRET_CONTROL, HUBPRET, id), \
+ SRI_ARR(HUBPRET_READ_LINE_STATUS, HUBPRET, id), \
+ SRI_ARR(DCN_EXPANSION_MODE, HUBPREQ, id), \
+ SRI_ARR(DCHUBP_REQ_SIZE_CONFIG, HUBP, id), \
+ SRI_ARR(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id), \
+ SRI_ARR(BLANK_OFFSET_0, HUBPREQ, id), \
+ SRI_ARR(BLANK_OFFSET_1, HUBPREQ, id), \
+ SRI_ARR(DST_DIMENSIONS, HUBPREQ, id), \
+ SRI_ARR(DST_AFTER_SCALER, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_0, HUBPREQ, id), \
+ SRI_ARR(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_1, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_3, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_4, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_5, HUBPREQ, id), \
+ SRI_ARR(PER_LINE_DELIVERY_PRE, HUBPREQ, id), \
+ SRI_ARR(PER_LINE_DELIVERY, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_2, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_4, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_6, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_7, HUBPREQ, id), \
+ SRI_ARR(DCN_TTU_QOS_WM, HUBPREQ, id), \
+ SRI_ARR(DCN_GLOBAL_TTU_CNTL, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF0_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF0_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF1_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(HUBP_CLK_CNTL, HUBP, id) \
+ )
+
+#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \
+ SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
+ SRI_ARR(PREFETCH_SETTINGS_C, HUBPREQ, id), \
+ SRI_ARR(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, HUBPREQ, id), \
+ SRI_ARR(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, HUBPREQ, id), \
+ SRI_ARR(CURSOR_SETTINGS, HUBPREQ, id), \
+ SRI_ARR(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI_ARR(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
+ SRI_ARR(CURSOR_SIZE, CURSOR0_, id), \
+ SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI_ARR(CURSOR_POSITION, CURSOR0_, id), \
+ SRI_ARR(CURSOR_HOT_SPOT, CURSOR0_, id), \
+ SRI_ARR(CURSOR_DST_OFFSET, CURSOR0_, id), \
+ SRI_ARR(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI_ARR(DMDATA_ADDRESS_LOW, CURSOR0_, id), \
+ SRI_ARR(DMDATA_CNTL, CURSOR0_, id), \
+ SRI_ARR(DMDATA_SW_CNTL, CURSOR0_, id), \
+ SRI_ARR(DMDATA_QOS_CNTL, CURSOR0_, id), \
+ SRI_ARR(DMDATA_SW_DATA, CURSOR0_, id), \
+ SRI_ARR(DMDATA_STATUS, CURSOR0_, id), \
+ SRI_ARR(FLIP_PARAMETERS_0, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_1, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_2, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR1_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR1_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \
+ SRI_ARR(VMID_SETTINGS_0, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN21_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN2_COMMON_RI(id), SRI_ARR(FLIP_PARAMETERS_3, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_4, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_5, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_6, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_5, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_6, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN30_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN21_RI(id), SRI_ARR(DCN_DMDATA_VM_CNTL, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN32_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN30_RI(id), SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \
+ SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \
+ SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id) \
+ )
+
+/* HUBBUB */
+
+#define HUBBUB_REG_LIST_DCN32_RI(id) \
+ ( \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D), \
+ SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL), \
+ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL), SR(DCHUBBUB_ARB_SAT_LEVEL), \
+ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND), SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_SOFT_RESET), SR(DCHUBBUB_CRC_CTRL), \
+ SR(DCN_VM_FB_LOCATION_BASE), SR(DCN_VM_FB_LOCATION_TOP), \
+ SR(DCN_VM_FB_OFFSET), SR(DCN_VM_AGP_BOT), SR(DCN_VM_AGP_TOP), \
+ SR(DCN_VM_AGP_BASE), HUBBUB_SR_WATERMARK_REG_LIST(), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D), SR(DCHUBBUB_DET0_CTRL), \
+ SR(DCHUBBUB_DET1_CTRL), SR(DCHUBBUB_DET2_CTRL), SR(DCHUBBUB_DET3_CTRL), \
+ SR(DCHUBBUB_COMPBUF_CTRL), SR(COMPBUF_RESERVED_SPACE), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D), \
+ SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
+ SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS) \
+ )
+
+/* DCCG */
+
+#define DCCG_REG_LIST_DCN32_RI() \
+ ( \
+ SR(DPPCLK_DTO_CTRL), DCCG_SRII(DTO_PARAM, DPPCLK, 0), \
+ DCCG_SRII(DTO_PARAM, DPPCLK, 1), DCCG_SRII(DTO_PARAM, DPPCLK, 2), \
+ DCCG_SRII(DTO_PARAM, DPPCLK, 3), DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0), \
+ SR(PHYASYMCLK_CLOCK_CNTL), SR(PHYBSYMCLK_CLOCK_CNTL), \
+ SR(PHYCSYMCLK_CLOCK_CNTL), SR(PHYDSYMCLK_CLOCK_CNTL), \
+ SR(PHYESYMCLK_CLOCK_CNTL), SR(DPSTREAMCLK_CNTL), SR(HDMISTREAMCLK_CNTL), \
+ SR(SYMCLK32_SE_CNTL), SR(SYMCLK32_LE_CNTL), \
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1), \
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3), \
+ DCCG_SRII(MODULO, DTBCLK_DTO, 0), DCCG_SRII(MODULO, DTBCLK_DTO, 1), \
+ DCCG_SRII(MODULO, DTBCLK_DTO, 2), DCCG_SRII(MODULO, DTBCLK_DTO, 3), \
+ DCCG_SRII(PHASE, DTBCLK_DTO, 0), DCCG_SRII(PHASE, DTBCLK_DTO, 1), \
+ DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3), \
+ SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE), \
+ SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), SR(DCCG_AUDIO_DTO_SOURCE) \
+ )
+
+/* VMID */
+#define DCN20_VMID_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(CNTL, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_BASE_ADDR_LO32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_START_ADDR_HI32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_START_ADDR_LO32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_END_ADDR_HI32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_END_ADDR_LO32, DCN_VM_CONTEXT, id) \
+ )
+
+/* I2C HW */
+
+#define I2C_HW_ENGINE_COMMON_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(SETUP, DC_I2C_DDC, id), SRI_ARR(SPEED, DC_I2C_DDC, id), \
+ SRI_ARR(HW_STATUS, DC_I2C_DDC, id), SR_ARR(DC_I2C_ARBITRATION, id), \
+ SR_ARR(DC_I2C_CONTROL, id), SR_ARR(DC_I2C_SW_STATUS, id), \
+ SR_ARR(DC_I2C_TRANSACTION0, id), SR_ARR(DC_I2C_TRANSACTION1, id), \
+ SR_ARR(DC_I2C_TRANSACTION2, id), SR_ARR(DC_I2C_TRANSACTION3, id), \
+ SR_ARR(DC_I2C_DATA, id), SR_ARR(MICROSECOND_TIME_BASE_DIV, id) \
+ )
+
+#define I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) \
+ ( \
+ I2C_HW_ENGINE_COMMON_REG_LIST_RI(id), SR_ARR(DIO_MEM_PWR_CTRL, id), \
+ SR_ARR(DIO_MEM_PWR_STATUS, id) \
+ )
+
#endif /* _DCN32_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index b3f8503cea9c..ab918fe38f6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -63,7 +63,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mall_region_pixels = pipe->stream->timing.h_addressable * pipe->stream->timing.v_addressable;
+ mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
// For bytes required in MALL, calculate based on number of MBlks required
num_mblks = (mall_region_pixels * bytes_per_pixel +
@@ -144,7 +144,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
- continue;
+ return false;
if (!pipe->plane_state)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 8157e40d2c7e..a93dc00ebfb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -138,78 +138,95 @@ enum dcn321_clk_src_array_id {
/* DCN */
/* TODO awful hack. fixup dcn20_dwb.h */
#undef BASE_INNER
-#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+#define SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+#define SR_ARR_INIT(reg_name, id, value)\
+ REG_STRUCT[id].reg_name = value
#define SRI(reg_name, block, id)\
- .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+ reg ## reg_name
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## temp_name
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
#define DCCG_SRII(reg_name, block, id)\
- .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
- reg ## reg_name ## _ ## block ## id
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
/* NBIO */
-#define NBIO_BASE_INNER(seg) \
- NBIO_BASE__INST0_SEG ## seg
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- .reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX0_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
#define CTX ctx
#define REG(reg_name) \
- (DCN_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+ (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-static const struct bios_registers bios_regs = {
- NBIO_SR(BIOS_SCRATCH_3),
- NBIO_SR(BIOS_SCRATCH_6)
-};
+static struct bios_registers bios_regs;
-#define clk_src_regs(index, pllid)\
-[index] = {\
- CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
-}
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
-static const struct dce110_clk_src_regs clk_src_regs[] = {
- clk_src_regs(0, A),
- clk_src_regs(1, B),
- clk_src_regs(2, C),
- clk_src_regs(3, D),
- clk_src_regs(4, E)
-};
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -219,17 +236,10 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define abm_regs(id)\
-[id] = {\
- ABM_DCN32_REG_LIST(id)\
-}
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
-static const struct dce_abm_registers abm_regs[] = {
- abm_regs(0),
- abm_regs(1),
- abm_regs(2),
- abm_regs(3),
-};
+static struct dce_abm_registers abm_regs[4];
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN32(__SHIFT)
@@ -239,18 +249,10 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN32(_MASK)
};
-#define audio_regs(id)\
-[id] = {\
- AUD_COMMON_REG_LIST(id)\
-}
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
-static const struct dce_audio_registers audio_regs[] = {
- audio_regs(0),
- audio_regs(1),
- audio_regs(2),
- audio_regs(3),
- audio_regs(4)
-};
+static struct dce_audio_registers audio_regs[5];
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
@@ -265,23 +267,10 @@ static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
-#define vpg_regs(id)\
-[id] = {\
- VPG_DCN3_REG_LIST(id)\
-}
+#define vpg_regs_init(id)\
+ VPG_DCN3_REG_LIST_RI(id)
-static const struct dcn30_vpg_registers vpg_regs[] = {
- vpg_regs(0),
- vpg_regs(1),
- vpg_regs(2),
- vpg_regs(3),
- vpg_regs(4),
- vpg_regs(5),
- vpg_regs(6),
- vpg_regs(7),
- vpg_regs(8),
- vpg_regs(9),
-};
+static struct dcn30_vpg_registers vpg_regs[10];
static const struct dcn30_vpg_shift vpg_shift = {
DCN3_VPG_MASK_SH_LIST(__SHIFT)
@@ -291,19 +280,10 @@ static const struct dcn30_vpg_mask vpg_mask = {
DCN3_VPG_MASK_SH_LIST(_MASK)
};
-#define afmt_regs(id)\
-[id] = {\
- AFMT_DCN3_REG_LIST(id)\
-}
+#define afmt_regs_init(id)\
+ AFMT_DCN3_REG_LIST_RI(id)
-static const struct dcn30_afmt_registers afmt_regs[] = {
- afmt_regs(0),
- afmt_regs(1),
- afmt_regs(2),
- afmt_regs(3),
- afmt_regs(4),
- afmt_regs(5)
-};
+static struct dcn30_afmt_registers afmt_regs[6];
static const struct dcn30_afmt_shift afmt_shift = {
DCN3_AFMT_MASK_SH_LIST(__SHIFT)
@@ -313,17 +293,10 @@ static const struct dcn30_afmt_mask afmt_mask = {
DCN3_AFMT_MASK_SH_LIST(_MASK)
};
-#define apg_regs(id)\
-[id] = {\
- APG_DCN31_REG_LIST(id)\
-}
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
-static const struct dcn31_apg_registers apg_regs[] = {
- apg_regs(0),
- apg_regs(1),
- apg_regs(2),
- apg_regs(3)
-};
+static struct dcn31_apg_registers apg_regs[4];
static const struct dcn31_apg_shift apg_shift = {
DCN31_APG_MASK_SH_LIST(__SHIFT)
@@ -333,18 +306,10 @@ static const struct dcn31_apg_mask apg_mask = {
DCN31_APG_MASK_SH_LIST(_MASK)
};
-#define stream_enc_regs(id)\
-[id] = {\
- SE_DCN32_REG_LIST(id)\
-}
+#define stream_enc_regs_init(id)\
+ SE_DCN32_REG_LIST_RI(id)
-static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
- stream_enc_regs(0),
- stream_enc_regs(1),
- stream_enc_regs(2),
- stream_enc_regs(3),
- stream_enc_regs(4)
-};
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -355,46 +320,24 @@ static const struct dcn10_stream_encoder_mask se_mask = {
};
-#define aux_regs(id)\
-[id] = {\
- DCN2_AUX_REG_LIST(id)\
-}
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
-static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
- aux_regs(0),
- aux_regs(1),
- aux_regs(2),
- aux_regs(3),
- aux_regs(4)
-};
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
-#define hpd_regs(id)\
-[id] = {\
- HPD_REG_LIST(id)\
-}
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
-static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
- hpd_regs(0),
- hpd_regs(1),
- hpd_regs(2),
- hpd_regs(3),
- hpd_regs(4)
-};
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
-#define link_regs(id, phyid)\
-[id] = {\
- LE_DCN31_REG_LIST(id), \
- UNIPHY_DCN2_REG_LIST(phyid), \
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN31_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
/*DPCS_DCN31_REG_LIST(id),*/ \
-}
-static const struct dcn10_link_enc_registers link_enc_regs[] = {
- link_regs(0, A),
- link_regs(1, B),
- link_regs(2, C),
- link_regs(3, D),
- link_regs(4, E)
-};
+static struct dcn10_link_enc_registers link_enc_regs[5];
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \
@@ -406,17 +349,10 @@ static const struct dcn10_link_enc_mask le_mask = {
// DPCS_DCN31_MASK_SH_LIST(_MASK)
};
-#define hpo_dp_stream_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
-}
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
-static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
- hpo_dp_stream_encoder_reg_list(0),
- hpo_dp_stream_encoder_reg_list(1),
- hpo_dp_stream_encoder_reg_list(2),
- hpo_dp_stream_encoder_reg_list(3),
-};
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
@@ -427,20 +363,14 @@ static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
};
-#define hpo_dp_link_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
- /*DCN3_1_RDPCSTX_REG_LIST(0),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(1),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(2),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(3),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(4)*/\
-}
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+ /*DCN3_1_RDPCSTX_REG_LIST(0),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(1),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(2),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(3),*/
-static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
- hpo_dp_link_encoder_reg_list(0),
- hpo_dp_link_encoder_reg_list(1),
-};
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
@@ -450,17 +380,10 @@ static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
};
-#define dpp_regs(id)\
-[id] = {\
- DPP_REG_LIST_DCN30_COMMON(id),\
-}
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN30_COMMON_RI(id)
-static const struct dcn3_dpp_registers dpp_regs[] = {
- dpp_regs(0),
- dpp_regs(1),
- dpp_regs(2),
- dpp_regs(3)
-};
+static struct dcn3_dpp_registers dpp_regs[4];
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT)
@@ -471,17 +394,10 @@ static const struct dcn3_dpp_mask tf_mask = {
};
-#define opp_regs(id)\
-[id] = {\
- OPP_REG_LIST_DCN30(id),\
-}
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN30_RI(id)
-static const struct dcn20_opp_registers opp_regs[] = {
- opp_regs(0),
- opp_regs(1),
- opp_regs(2),
- opp_regs(3)
-};
+static struct dcn20_opp_registers opp_regs[4];
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
@@ -491,21 +407,15 @@ static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
-#define aux_engine_regs(id)\
-[id] = {\
- AUX_COMMON_REG_LIST0(id), \
- .AUXN_IMPCAL = 0, \
- .AUXP_IMPCAL = 0, \
- .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
-}
+#define aux_engine_regs_init(id) \
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\
+ )
-static const struct dce110_aux_registers aux_engine_regs[] = {
- aux_engine_regs(0),
- aux_engine_regs(1),
- aux_engine_regs(2),
- aux_engine_regs(3),
- aux_engine_regs(4)
-};
+static struct dce110_aux_registers aux_engine_regs[5];
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
@@ -515,15 +425,10 @@ static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
-#define dwbc_regs_dcn3(id)\
-[id] = {\
- DWBC_COMMON_REG_LIST_DCN30(id),\
-}
-
-static const struct dcn30_dwbc_registers dwbc30_regs[] = {
- dwbc_regs_dcn3(0),
-};
+static struct dcn30_dwbc_registers dwbc30_regs[1];
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -533,14 +438,10 @@ static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
-#define mcif_wb_regs_dcn3(id)\
-[id] = {\
- MCIF_WB_COMMON_REG_LIST_DCN32(id),\
-}
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN32_RI(id)
-static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
- mcif_wb_regs_dcn3(0)
-};
+static struct dcn30_mmhubbub_registers mcif_wb30_regs[1];
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -550,17 +451,10 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define dsc_regsDCN20(id)\
-[id] = {\
- DSC_REG_LIST_DCN20(id)\
-}
+#define dsc_regsDCN20_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
-static const struct dcn20_dsc_registers dsc_regs[] = {
- dsc_regsDCN20(0),
- dsc_regsDCN20(1),
- dsc_regsDCN20(2),
- dsc_regsDCN20(3)
-};
+static struct dcn20_dsc_registers dsc_regs[4];
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
@@ -570,17 +464,23 @@ static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-static const struct dcn30_mpc_registers mpc_regs = {
- MPC_REG_LIST_DCN3_2(0),
- MPC_REG_LIST_DCN3_2(1),
- MPC_REG_LIST_DCN3_2(2),
- MPC_REG_LIST_DCN3_2(3),
- MPC_OUT_MUX_REG_LIST_DCN3_0(0),
- MPC_OUT_MUX_REG_LIST_DCN3_0(1),
- MPC_OUT_MUX_REG_LIST_DCN3_0(2),
- MPC_OUT_MUX_REG_LIST_DCN3_0(3),
- MPC_DWB_MUX_REG_LIST_DCN3_0(0),
-};
+static struct dcn30_mpc_registers mpc_regs;
+#define dcn_mpc_regs_init()\
+ ( \
+ MPC_REG_LIST_DCN3_0_RI(0),\
+ MPC_REG_LIST_DCN3_0_RI(1),\
+ MPC_REG_LIST_DCN3_0_RI(2),\
+ MPC_REG_LIST_DCN3_0_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_MCM_REG_LIST_DCN32_RI(0),\
+ MPC_MCM_REG_LIST_DCN32_RI(1),\
+ MPC_MCM_REG_LIST_DCN32_RI(2),\
+ MPC_MCM_REG_LIST_DCN32_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)\
+ )
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -590,15 +490,10 @@ static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define optc_regs(id)\
-[id] = {OPTC_COMMON_REG_LIST_DCN3_2(id)}
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_2_RI(id)
-static const struct dcn_optc_registers optc_regs[] = {
- optc_regs(0),
- optc_regs(1),
- optc_regs(2),
- optc_regs(3)
-};
+static struct dcn_optc_registers optc_regs[4];
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -608,18 +503,10 @@ static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define hubp_regs(id)\
-[id] = {\
- HUBP_REG_LIST_DCN32(id)\
-}
-
-static const struct dcn_hubp2_registers hubp_regs[] = {
- hubp_regs(0),
- hubp_regs(1),
- hubp_regs(2),
- hubp_regs(3)
-};
+#define hubp_regs_init(id) \
+ HUBP_REG_LIST_DCN32_RI(id)
+static struct dcn_hubp2_registers hubp_regs[4];
static const struct dcn_hubp2_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN32(__SHIFT)
@@ -628,9 +515,10 @@ static const struct dcn_hubp2_shift hubp_shift = {
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dcn_hubbub_registers hubbub_reg = {
- HUBBUB_REG_LIST_DCN32(0)
-};
+
+static struct dcn_hubbub_registers hubbub_reg;
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN32_RI(0)
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN32(__SHIFT)
@@ -640,9 +528,10 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dccg_registers dccg_regs = {
- DCCG_REG_LIST_DCN32()
-};
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN32_RI()
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN32(__SHIFT)
@@ -715,9 +604,10 @@ static const struct dccg_mask dccg_mask = {
SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING)
-static const struct dce_hwseq_registers hwseq_reg = {
- HWSEQ_DCN32_REG_LIST()
-};
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN32_REG_LIST()
#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -760,29 +650,10 @@ static const struct dce_hwseq_shift hwseq_shift = {
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN32_MASK_SH_LIST(_MASK)
};
-#define vmid_regs(id)\
-[id] = {\
- DCN20_VMID_REG_LIST(id)\
-}
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
-static const struct dcn_vmid_registers vmid_regs[] = {
- vmid_regs(0),
- vmid_regs(1),
- vmid_regs(2),
- vmid_regs(3),
- vmid_regs(4),
- vmid_regs(5),
- vmid_regs(6),
- vmid_regs(7),
- vmid_regs(8),
- vmid_regs(9),
- vmid_regs(10),
- vmid_regs(11),
- vmid_regs(12),
- vmid_regs(13),
- vmid_regs(14),
- vmid_regs(15)
-};
+static struct dcn_vmid_registers vmid_regs[16];
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
@@ -868,7 +739,7 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
@@ -905,6 +776,14 @@ static struct dce_aux *dcn321_aux_engine_create(
if (!aux_engine)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
@@ -914,15 +793,10 @@ static struct dce_aux *dcn321_aux_engine_create(
return &aux_engine->base;
}
-#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
-
-static const struct dce_i2c_registers i2c_hw_regs[] = {
- i2c_inst_regs(1),
- i2c_inst_regs(2),
- i2c_inst_regs(3),
- i2c_inst_regs(4),
- i2c_inst_regs(5),
-};
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[6];
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -942,6 +816,14 @@ static struct dce_i2c_hw *dcn321_i2c_hw_create(
if (!dce_i2c_hw)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
@@ -981,6 +863,29 @@ static struct hubbub *dcn321_hubbub_create(struct dc_context *ctx)
if (!hubbub2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
hubbub32_construct(hubbub2, ctx,
&hubbub_reg,
&hubbub_shift,
@@ -1013,6 +918,13 @@ static struct hubp *dcn321_hubp_create(
if (!hubp2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
if (hubp32_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
@@ -1038,6 +950,13 @@ static struct dpp *dcn321_dpp_create(
if (!dpp3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
if (dpp32_construct(dpp3, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp3->base;
@@ -1058,6 +977,10 @@ static struct mpc *dcn321_mpc_create(
if (!mpc30)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
dcn32_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
@@ -1079,6 +1002,13 @@ static struct output_pixel_processor *dcn321_opp_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
dcn20_opp_construct(opp2, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp2->base;
@@ -1095,6 +1025,13 @@ static struct timing_generator *dcn321_timing_generator_create(
if (!tgn10)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
@@ -1129,6 +1066,30 @@ static struct link_encoder *dcn321_link_encoder_create(
if (!enc20)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
dcn321_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
@@ -1145,7 +1106,7 @@ static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
- generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS,
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
@@ -1153,6 +1114,15 @@ static void read_dce_straps(
static struct audio *dcn321_create_audio(
struct dc_context *ctx, unsigned int inst)
{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
@@ -1166,6 +1136,19 @@ static struct vpg *dcn321_vpg_create(
if (!vpg3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
vpg3_construct(vpg3, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
@@ -1183,6 +1166,15 @@ static struct afmt *dcn321_afmt_create(
if (!afmt3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
afmt3_construct(afmt3, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
@@ -1200,6 +1192,13 @@ static struct apg *dcn321_apg_create(
if (!apg31)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
apg31_construct(apg31, ctx, inst,
&apg_regs[inst],
&apg_shift,
@@ -1236,6 +1235,14 @@ static struct stream_encoder *dcn321_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
@@ -1286,6 +1293,13 @@ static struct hpo_dp_stream_encoder *dcn321_hpo_dp_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
&hpo_dp_stream_enc_regs[hpo_dp_inst],
@@ -1303,6 +1317,11 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
&hpo_dp_le_shift, &hpo_dp_le_mask);
@@ -1315,6 +1334,10 @@ static struct dce_hwseq *dcn321_hwseq_create(
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
@@ -1505,6 +1528,10 @@ static bool dcn321_dwbc_create(struct dc_context *ctx, struct resource_pool *poo
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT dwbc30_regs
+ dwbc_regs_dcn3_init(0);
+
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
@@ -1530,6 +1557,10 @@ static bool dcn321_mmhubbub_create(struct dc_context *ctx, struct resource_pool
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb30_regs
+ mcif_wb_regs_dcn3_init(0);
+
dcn32_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
@@ -1552,6 +1583,13 @@ static struct display_stream_compressor *dcn321_dsc_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN20_init(0),
+ dsc_regsDCN20_init(1),
+ dsc_regsDCN20_init(2),
+ dsc_regsDCN20_init(3);
+
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
dsc->max_image_width = 6016;
@@ -1616,6 +1654,30 @@ static bool dcn321_resource_construct(
uint32_t pipe_fuses = 0;
uint32_t num_pipes = 4;
+#undef REG_STRUCT
+#define REG_STRUCT bios_regs
+ bios_regs_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+
+#undef REG_STRUCT
+#define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+#undef REG_STRUCT
+#define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
+
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn321;
@@ -1662,8 +1724,9 @@ static bool dcn321_resource_construct(
dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
-
+ dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 8173f4b80424..e93187c06648 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -171,7 +171,12 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
// 0x1 = Result_OK, 0xFE = Result_UnkmownCmd, 0x0 = Status_Busy
#define IS_SMU_TIMEOUT(result) \
(result == 0x0)
-
+void dm_helpers_init_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *config);
+void dm_helpers_override_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *config);
int dm_helper_dmub_aux_transfer_sync(
struct dc_context *ctx,
const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 359f6e9a1da0..86a3b5bfd699 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -61,7 +61,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
@@ -71,6 +70,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
@@ -82,7 +82,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare
@@ -131,6 +130,7 @@ DML += dcn321/dcn321_fpu.o
DML += dcn301/dcn301_fpu.o
DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
+DML += dcn314/dcn314_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c
index 6ca288fb5fb9..3aa8dd0acd5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c
@@ -25,12 +25,11 @@
#include "dm_services.h"
#include "bw_fixed.h"
+#define MAX_I64 \
+ ((int64_t)((1ULL << 63) - 1))
#define MIN_I64 \
- (int64_t)(-(1LL << 63))
-
-#define MAX_I64 \
- (int64_t)((1ULL << 63) - 1)
+ (-MAX_I64 - 1)
#define FRACTIONAL_PART_MASK \
((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
@@ -49,6 +48,7 @@ static uint64_t abs_i64(int64_t arg)
struct bw_fixed bw_int_to_fixed_nonconst(int64_t value)
{
struct bw_fixed res;
+
ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32);
res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
return res;
@@ -78,14 +78,12 @@ struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
{
uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART;
- do
- {
+ do {
remainder <<= 1;
res_value <<= 1;
- if (remainder >= arg2_value)
- {
+ if (remainder >= arg2_value) {
res_value |= 1;
remainder -= arg2_value;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index db3b16b77034..d46adc849d2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -736,30 +736,13 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
-static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family,
- uint32_t hw_internal_rev,
- uint32_t pci_revision_id)
+static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic)
{
/* for low power RV2 variants, the highest voltage level we want is 0 */
- if ((chip_family == FAMILY_RV) &&
- ASICREV_IS_RAVEN2(hw_internal_rev))
- switch (pci_revision_id) {
- case PRID_DALI_DE:
- case PRID_DALI_DF:
- case PRID_DALI_E3:
- case PRID_DALI_E4:
- case PRID_POLLOCK_94:
- case PRID_POLLOCK_95:
- case PRID_POLLOCK_E9:
- case PRID_POLLOCK_EA:
- case PRID_POLLOCK_EB:
- return 0;
- default:
- break;
- }
-
- /* we are ok with all levels */
- return 4;
+ if (is_vmin_only_asic)
+ return 0;
+ else /* we are ok with all levels */
+ return 4;
}
bool dcn_validate_bandwidth(
@@ -1323,10 +1306,7 @@ bool dcn_validate_bandwidth(
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
- if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(
- dc->ctx->asic_id.chip_family,
- dc->ctx->asic_id.hw_internal_rev,
- dc->ctx->asic_id.pci_revision_id))
+ if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->config.is_vmin_only_asic))
return true;
else
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 39428488a052..d680f1c5b69f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,6 +30,7 @@
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
+#include "clk_mgr/dcn21/rn_clk_mgr.h"
#include "dcn20_fpu.h"
@@ -2233,6 +2234,7 @@ static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_li
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl = 0, k = 0;
@@ -2246,8 +2248,7 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
ASSERT(clk_table->num_entries);
/* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */
- memcpy(&dcn2_1_soc._clock_tmp, &dcn2_1_soc.clock_limits,
- sizeof(dcn2_1_soc.clock_limits));
+ memcpy(s, dcn2_1_soc.clock_limits, sizeof(dcn2_1_soc.clock_limits));
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
@@ -2262,25 +2263,25 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
if (i == 1)
k++;
- dcn2_1_soc._clock_tmp[k].state = k;
- dcn2_1_soc._clock_tmp[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn2_1_soc._clock_tmp[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn2_1_soc._clock_tmp[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc._clock_tmp[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- dcn2_1_soc._clock_tmp[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn2_1_soc._clock_tmp[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn2_1_soc._clock_tmp[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn2_1_soc._clock_tmp[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn2_1_soc._clock_tmp[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn2_1_soc._clock_tmp[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn2_1_soc._clock_tmp[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[k].state = k;
+ s[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ s[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ s[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ s[k].dram_bw_per_chan_gbps =
+ dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
k++;
}
- memcpy(&dcn2_1_soc.clock_limits, &dcn2_1_soc._clock_tmp,
- sizeof(dcn2_1_soc.clock_limits));
+ memcpy(dcn2_1_soc.clock_limits, s, sizeof(dcn2_1_soc.clock_limits));
if (clk_table->num_entries) {
dcn2_1_soc.num_states = clk_table->num_entries + 1;
@@ -2341,7 +2342,7 @@ void dcn201_populate_dml_writeback_from_context_fpu(struct dc *dc,
dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
- dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;;
+ dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
dout_wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
dout_wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index 6dd9a70314c0..e1e92daba668 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -181,7 +181,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
double vtotal_avg)
{
-struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
double vtotal_min, vtotal_max;
double ratio, modulo, phase;
uint32_t vblank_start;
@@ -350,24 +350,24 @@ void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
int pipe_cnt,
int cur_pipe)
{
- int i;
+ int i;
dc_assert_fp_enabled();
- for (i = 0; i < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(wb_arb_params->cli_watermark); i++) {
wb_arb_params->cli_watermark[i] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
wb_arb_params->pstate_watermark[i] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
- }
+ }
- wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[cur_pipe] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
+ wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[cur_pipe] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
}
void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
-dc_assert_fp_enabled();
+ dc_assert_fp_enabled();
-if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
@@ -380,12 +380,12 @@ void dcn30_fpu_calculate_wm_and_dlg(
int pipe_cnt,
int vlevel)
{
-int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
+ int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
int i, pipe_idx;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
-dc_assert_fp_enabled();
+ dc_assert_fp_enabled();
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 876b321b30ca..b7fa003ffe06 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -396,64 +396,10 @@ static void CalculateUrgentBurstFactor(
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
+ struct vba_vars_st *v,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2]);
+ int ReorderingBytes);
+
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
@@ -4692,66 +4638,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
if (v->UseMinimumRequiredDCFCLK == true) {
- UseMinimumDCFCLK(
- mode_lib,
- v->MaxInterDCNTileRepeaters,
- MaxPrefetchMode,
- v->FinalDRAMClockChangeLatency,
- v->SREnterPlusExitTime,
- v->ReturnBusWidth,
- v->RoundTripPingLatencyCycles,
- ReorderingBytes,
- v->PixelChunkSizeInKByte,
- v->MetaChunkSize,
- v->GPUVMEnable,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->NumberOfActivePlanes,
- v->HostVMMinPageSize,
- v->HostVMMaxNonCachedPageTableLevels,
- v->DynamicMetadataVMEnabled,
- v->ImmediateFlipRequirement[0],
- v->ProgressiveToInterlaceUnitInOPP,
- v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
- v->VTotal,
- v->VActive,
- v->DynamicMetadataTransmittedBytes,
- v->DynamicMetadataLinesBeforeActiveRequired,
- v->Interlace,
- v->RequiredDPPCLK,
- v->RequiredDISPCLK,
- v->UrgLatency,
- v->NoOfDPP,
- v->ProjectedDCFCLKDeepSleep,
- v->MaximumVStartup,
- v->TotalVActivePixelBandwidth,
- v->TotalVActiveCursorBandwidth,
- v->TotalMetaRowBandwidth,
- v->TotalDPTERowBandwidth,
- v->TotalNumberOfActiveDPP,
- v->TotalNumberOfDCCActiveDPP,
- v->dpte_group_bytes,
- v->PrefetchLinesY,
- v->PrefetchLinesC,
- v->swath_width_luma_ub_all_states,
- v->swath_width_chroma_ub_all_states,
- v->BytePerPixelY,
- v->BytePerPixelC,
- v->HTotal,
- v->PixelClock,
- v->PDEAndMetaPTEBytesPerFrame,
- v->DPTEBytesPerRow,
- v->MetaRowBytes,
- v->DynamicMetadataEnable,
- v->VActivePixelBandwidth,
- v->VActiveCursorBandwidth,
- v->ReadBandwidthLuma,
- v->ReadBandwidthChroma,
- v->DCFCLKPerState,
- v->DCFCLKState);
+ UseMinimumDCFCLK(mode_lib, v, MaxPrefetchMode, ReorderingBytes);
if (v->ClampMinDCFCLK) {
/* Clamp calculated values to actual minimum */
@@ -6610,77 +6497,21 @@ static double CalculateUrgentLatency(
return ret;
}
-
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
+ struct vba_vars_st *v,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2])
+ int ReorderingBytes)
{
double NormalEfficiency = 0;
double PTEEfficiency = 0;
double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2] = { { 0 } };
unsigned int i, j, k;
- NormalEfficiency = (HostVMEnable == true ? PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
- : PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly) / 100.0;
- PTEEfficiency = (HostVMEnable == true ? PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly
- / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData : 1.0);
+ NormalEfficiency = (v->HostVMEnable == true ? v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
+ : v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly) / 100.0;
+ PTEEfficiency = (v->HostVMEnable == true ? v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly
+ / v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData : 1.0);
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX] = { 0 };
@@ -6698,58 +6529,58 @@ static void UseMinimumDCFCLK(
double MinimumTvmPlus2Tr0 = 0;
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
- + NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k] / (15.75 * HTotal[k] / PixelClock[k]);
+ + v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
- NoOfDPPState[k] = NoOfDPP[i][j][k];
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
+ NoOfDPPState[k] = v->NoOfDPP[i][j][k];
}
- MinimumTWait = CalculateTWait(MaxPrefetchMode, FinalDRAMClockChangeLatency, UrgLatency[i], SREnterPlusExitTime);
- NonDPTEBandwidth = TotalVActivePixelBandwidth[i][j] + TotalVActiveCursorBandwidth[i][j] + TotalMetaRowBandwidth[i][j];
- DPTEBandwidth = (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) ?
- TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : TotalDPTERowBandwidth[i][j];
- DCFCLKRequiredForAverageBandwidth = dml_max3(ProjectedDCFCLKDeepSleep[i][j],
- (NonDPTEBandwidth + TotalDPTERowBandwidth[i][j]) / ReturnBusWidth / (MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
- (NonDPTEBandwidth + DPTEBandwidth / PTEEfficiency) / NormalEfficiency / ReturnBusWidth);
-
- ExtraLatencyBytes = CalculateExtraLatencyBytes(ReorderingBytes, TotalNumberOfActiveDPP[i][j], PixelChunkSizeInKByte, TotalNumberOfDCCActiveDPP[i][j],
- MetaChunkSize, GPUVMEnable, HostVMEnable, NumberOfActivePlanes, NoOfDPPState, dpte_group_bytes,
- PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- HostVMMinPageSize, HostVMMaxNonCachedPageTableLevels);
- ExtraLatencyCycles = RoundTripPingLatencyCycles + 32 + ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
+ NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j];
+ DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ?
+ TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j];
+ DCFCLKRequiredForAverageBandwidth = dml_max3(v->ProjectedDCFCLKDeepSleep[i][j],
+ (NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth / (v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
+ (NonDPTEBandwidth + DPTEBandwidth / PTEEfficiency) / NormalEfficiency / v->ReturnBusWidth);
+
+ ExtraLatencyBytes = CalculateExtraLatencyBytes(ReorderingBytes, v->TotalNumberOfActiveDPP[i][j], v->PixelChunkSizeInKByte, v->TotalNumberOfDCCActiveDPP[i][j],
+ v->MetaChunkSize, v->GPUVMEnable, v->HostVMEnable, v->NumberOfActivePlanes, NoOfDPPState, v->dpte_group_bytes,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ v->HostVMMinPageSize, v->HostVMMaxNonCachedPageTableLevels);
+ ExtraLatencyCycles = v->RoundTripPingLatencyCycles + 32 + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double DCFCLKCyclesRequiredInPrefetch = { 0 };
double ExpectedPrefetchBWAcceleration = { 0 };
double PrefetchTime = { 0 };
- PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k] * swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k]
- + PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k] * BytePerPixelC[k]) / NormalEfficiency / ReturnBusWidth;
- DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] + PDEAndMetaPTEBytesPerFrame[i][j][k] / PTEEfficiency
- / NormalEfficiency / ReturnBusWidth * (GPUVMMaxPageTableLevels > 2 ? 1 : 0) + 2 * DPTEBytesPerRow[i][j][k] / PTEEfficiency
- / NormalEfficiency / ReturnBusWidth + 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
- PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k]) * HTotal[k] / PixelClock[k];
- ExpectedPrefetchBWAcceleration = (VActivePixelBandwidth[i][j][k] + VActiveCursorBandwidth[i][j][k]) / (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
- DynamicMetadataVMExtraLatency[k] = (GPUVMEnable == true && DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ?
- UrgLatency[i] * GPUVMMaxPageTableLevels * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
- PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] - MinimumTWait - UrgLatency[i] * ((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels
- : GPUVMMaxPageTableLevels - 2) * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k];
+ PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k]
+ + v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth;
+ DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] + v->PDEAndMetaPTEBytesPerFrame[i][j][k] / PTEEfficiency
+ / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0) + 2 * v->DPTEBytesPerRow[i][j][k] / PTEEfficiency
+ / NormalEfficiency / v->ReturnBusWidth + 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
+ PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k];
+ ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k]) / (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]);
+ DynamicMetadataVMExtraLatency[k] = (v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ?
+ v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
+ PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - v->UrgLatency[i] * ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels
+ : v->GPUVMMaxPageTableLevels - 2) * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k];
if (PrefetchTime > 0) {
double ExpectedVRatioPrefetch = { 0 };
ExpectedVRatioPrefetch = PrefetchPixelLinesTime[k] / (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);
DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]
* dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration;
- if (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) {
+ if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k]
- + NoOfDPPState[k] * DPTEBandwidth / PTEEfficiency / NormalEfficiency / ReturnBusWidth;
+ + NoOfDPPState[k] * DPTEBandwidth / PTEEfficiency / NormalEfficiency / v->ReturnBusWidth;
}
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
- if (DynamicMetadataEnable[k] == true) {
+ if (v->DynamicMetadataEnable[k] == true) {
double TsetupPipe = { 0 };
double TdmbfPipe = { 0 };
double TdmsksPipe = { 0 };
@@ -6757,49 +6588,49 @@ static void UseMinimumDCFCLK(
double AllowedTimeForUrgentExtraLatency = { 0 };
CalculateDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
- RequiredDPPCLK[i][j][k],
- RequiredDISPCLK[i][j],
- ProjectedDCFCLKDeepSleep[i][j],
- PixelClock[k],
- HTotal[k],
- VTotal[k] - VActive[k],
- DynamicMetadataTransmittedBytes[k],
- DynamicMetadataLinesBeforeActiveRequired[k],
- Interlace[k],
- ProgressiveToInterlaceUnitInOPP,
+ v->MaxInterDCNTileRepeaters,
+ v->RequiredDPPCLK[i][j][k],
+ v->RequiredDISPCLK[i][j],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->PixelClock[k],
+ v->HTotal[k],
+ v->VTotal[k] - v->VActive[k],
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
+ v->Interlace[k],
+ v->ProgressiveToInterlaceUnitInOPP,
&TsetupPipe,
&TdmbfPipe,
&TdmecPipe,
&TdmsksPipe);
- AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] / PixelClock[k] - MinimumTWait - TsetupPipe
+ AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TsetupPipe
- TdmbfPipe - TdmecPipe - TdmsksPipe - DynamicMetadataVMExtraLatency[k];
if (AllowedTimeForUrgentExtraLatency > 0) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(DCFCLKRequiredForPeakBandwidthPerPlane[k],
ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
}
}
DCFCLKRequiredForPeakBandwidth = 0;
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];
}
- MinimumTvmPlus2Tr0 = UrgLatency[i] * (GPUVMEnable == true ? (HostVMEnable == true ?
- (GPUVMMaxPageTableLevels + 2) * (HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) : 0);
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ MinimumTvmPlus2Tr0 = v->UrgLatency[i] * (v->GPUVMEnable == true ? (v->HostVMEnable == true ?
+ (v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) : 0);
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double MaximumTvmPlus2Tr0PlusTsw = { 0 };
- MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] / PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
+ MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
- DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i];
} else {
DCFCLKRequiredForPeakBandwidth = dml_max3(DCFCLKRequiredForPeakBandwidth, 2 * ExtraLatencyCycles
/ (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0 - PrefetchPixelLinesTime[k] / 4),
(2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
}
}
- DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 * (1 + mode_lib->vba.PercentMarginOverMinimumRequiredDCFCLK / 100)
+ v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * (1 + mode_lib->vba.PercentMarginOverMinimumRequiredDCFCLK / 100)
* dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 7ef66e511ec8..241d28d0b7fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -26,6 +26,7 @@
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn301/dcn301_resource.h"
+#include "clk_mgr/dcn301/vg_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn301_fpu.h"
@@ -321,6 +322,7 @@ static void calculate_wm_set_for_vlevel(int vlevel,
void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
@@ -328,8 +330,7 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dc_assert_fp_enabled();
- memcpy(&dcn3_01_soc._clock_tmp, &dcn3_01_soc.clock_limits,
- sizeof(dcn3_01_soc.clock_limits));
+ memcpy(s, dcn3_01_soc.clock_limits, sizeof(dcn3_01_soc.clock_limits));
/* Default clock levels are used for diags, which may lead to overclocking. */
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -347,31 +348,33 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
}
}
- dcn3_01_soc._clock_tmp[i].state = i;
- dcn3_01_soc._clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn3_01_soc._clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn3_01_soc._clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn3_01_soc._clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- dcn3_01_soc._clock_tmp[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn3_01_soc._clock_tmp[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn3_01_soc._clock_tmp[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn3_01_soc._clock_tmp[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn3_01_soc._clock_tmp[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn3_01_soc._clock_tmp[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn3_01_soc._clock_tmp[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[i].state = i;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_01_soc.num_states = clk_table->num_entries;
/* duplicate last level */
- dcn3_01_soc._clock_tmp[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
- dcn3_01_soc._clock_tmp[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
+ s[dcn3_01_soc.num_states] =
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
+ s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
}
}
- memcpy(&dcn3_01_soc.clock_limits, &dcn3_01_soc._clock_tmp,
- sizeof(dcn3_01_soc.clock_limits));
+ memcpy(dcn3_01_soc.clock_limits, s, sizeof(dcn3_01_soc.clock_limits));
dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index 8fb14baed208..3eb3a021ab7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -202,7 +202,7 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
- dc_assert_fp_enabled();
+ dc_assert_fp_enabled();
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_03_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
@@ -349,14 +349,11 @@ void dcn303_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
dc_assert_fp_enabled();
if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_03_soc.dram_clock_change_latency_us =
- bb_info.dram_clock_change_latency_100ns * 10;
+ dcn3_03_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_03_soc.sr_enter_plus_exit_time_us =
- bb_info.dram_sr_enter_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_03_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_03_soc.sr_exit_time_us =
- bb_info.dram_sr_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_03_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index e36cfa5985ea..0e62eb823e34 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -25,6 +25,9 @@
#include "resource.h"
#include "clk_mgr.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn315/dcn315_resource.h"
+#include "dcn316/dcn316_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn31_fpu.h"
@@ -114,7 +117,7 @@ struct _vcs_dpi_ip_params_st dcn3_1_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -259,7 +262,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 50.0,
@@ -355,7 +358,7 @@ struct _vcs_dpi_ip_params_st dcn3_16_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -594,14 +597,14 @@ void dcn31_calculate_wm_and_dlg_fp(
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
int j;
dc_assert_fp_enabled();
- memcpy(&dcn3_1_soc._clock_tmp, &dcn3_1_soc.clock_limits,
- sizeof(dcn3_1_soc.clock_limits));
+ memcpy(s, dcn3_1_soc.clock_limits, sizeof(dcn3_1_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -630,34 +633,36 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
}
}
- dcn3_1_soc._clock_tmp[i].state = i;
+ s[i].state = i;
/* Clocks dependent on voltage level. */
- dcn3_1_soc._clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn3_1_soc._clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn3_1_soc._clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn3_1_soc._clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
+ 2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
- dcn3_1_soc._clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn3_1_soc._clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn3_1_soc._clock_tmp[i].dram_bw_per_chan_gbps = dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn3_1_soc._clock_tmp[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn3_1_soc._clock_tmp[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn3_1_soc._clock_tmp[i].phyclk_d18_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn3_1_soc._clock_tmp[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_1_soc.num_states = clk_table->num_entries;
}
}
- memcpy(&dcn3_1_soc.clock_limits, &dcn3_1_soc._clock_tmp,
- sizeof(dcn3_1_soc.clock_limits));
+ memcpy(dcn3_1_soc.clock_limits, s, sizeof(dcn3_1_soc.clock_limits));
dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
@@ -724,6 +729,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
@@ -731,8 +737,7 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dc_assert_fp_enabled();
- memcpy(&dcn3_16_soc._clock_tmp, &dcn3_16_soc.clock_limits,
- sizeof(dcn3_16_soc.clock_limits));
+ memcpy(s, dcn3_16_soc.clock_limits, sizeof(dcn3_16_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -754,7 +759,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <=
+ clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
@@ -765,39 +771,43 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
closest_clk_lvl = dcn3_16_soc.num_states - 1;
}
- dcn3_16_soc._clock_tmp[i].state = i;
+ s[i].state = i;
/* Clocks dependent on voltage level. */
- dcn3_16_soc._clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
if (clk_table->num_entries == 1 &&
- dcn3_16_soc._clock_tmp[i].dcfclk_mhz < dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ s[i].dcfclk_mhz <
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
/*SMU fix not released yet*/
- dcn3_16_soc._clock_tmp[i].dcfclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ s[i].dcfclk_mhz =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
}
- dcn3_16_soc._clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn3_16_soc._clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn3_16_soc._clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
+ 2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
- dcn3_16_soc._clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn3_16_soc._clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn3_16_soc._clock_tmp[i].dram_bw_per_chan_gbps = dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn3_16_soc._clock_tmp[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn3_16_soc._clock_tmp[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn3_16_soc._clock_tmp[i].phyclk_d18_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn3_16_soc._clock_tmp[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_16_soc.num_states = clk_table->num_entries;
}
}
- memcpy(&dcn3_16_soc.clock_limits, &dcn3_16_soc._clock_tmp,
- sizeof(dcn3_16_soc.clock_limits));
+ memcpy(dcn3_16_soc.clock_limits, s, sizeof(dcn3_16_soc.clock_limits));
if (max_dispclk_mhz) {
dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 3fab19134480..d63b4209b14c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -26,7 +26,7 @@
#include "dc.h"
#include "dc_link.h"
#include "../display_mode_lib.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
#include "../dml_inline_defs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 66b82e4f05c6..35d10b4d018b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -27,7 +27,7 @@
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_31.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
static bool is_dual_plane(enum source_format_class source_format)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
new file mode 100644
index 000000000000..34a5d0f87b5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr.h"
+#include "resource.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn314_fpu.h"
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dml/display_mode_vba.h"
+
+struct _vcs_dpi_ip_params_st dcn3_14_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dcn3_14_soc.clock_limits;
+ unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) {
+
+ dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+
+ if (bw_params->num_channels > 0)
+ dcn3_14_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(dcn3_14_soc.num_chans);
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_14_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_14_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_14_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ if (max_dispclk_mhz) {
+ dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+ bool upscaled = false;
+
+ dc_assert_fp_enabled();
+
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
+ && pipe->stream->adjust.v_total_min > timing->v_total)
+ pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+ pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
+
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+ && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
new file mode 100644
index 000000000000..d32c5bb99f4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN314_FPU_H__
+#define __DCN314_FPU_H__
+
+#define DCN3_14_DEFAULT_DET_SIZE 384
+#define DCN3_14_MAX_DET_SIZE 384
+#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 66453546e24f..7f6c977c4981 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -473,8 +473,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
// DML calculation for MALL region doesn't take into account FW delay
// and required pstate allow width for multi-display cases
+ /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
+ * to 2 swaths (i.e. 16 lines)
+ */
phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
- pstate_width_fw_delay_lines;
+ pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
// For backporch of phantom pipe, use vstartup of the main pipe
phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -490,6 +493,7 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
phantom_stream->timing.v_front_porch +
phantom_stream->timing.v_sync_width +
phantom_bp;
+ phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
}
/**
@@ -556,6 +560,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
bool valid_assignment_found = false;
unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context);
bool current_assignment_freesync = false;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -569,8 +574,15 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
+ /* SubVP pipe candidate requirements:
+ * - Refresh rate < 120hz
+ * - Not able to switch in vactive naturally (switching in active means the
+ * DET provides enough buffer to hide the P-State switch latency -- trying
+ * to combine this with SubVP can cause issues with the scheduling).
+ */
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120) {
+ pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 &&
+ vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
while (pipe) {
num_pipes++;
pipe = pipe->bottom_pipe;
@@ -983,13 +995,21 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
* prefetch mode > 0 so try capping the prefetch mode to start.
+ * Override present for testing.
*/
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ if (dc->debug.dml_disallow_alternate_prefetch_modes)
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter;
+ else
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
- if (*vlevel < context->bw_ctx.dml.soc.num_states)
+ if (*vlevel < context->bw_ctx.dml.soc.num_states) {
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
+ }
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
@@ -1004,6 +1024,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
+ // to re-initialize viewport after the pipe merge
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state || !pipe_ctx->stream)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
@@ -1014,7 +1043,9 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
* enough to support MCLK switching.
*/
- if (*vlevel == context->bw_ctx.dml.soc.num_states) {
+ if (*vlevel == context->bw_ctx.dml.soc.num_states &&
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
+ dm_prefetch_support_uclk_fclk_and_stutter) {
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_stutter;
/* There are params (such as FabricClock) that need to be recalculated
@@ -1070,11 +1101,19 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->res_pool->funcs->remove_phantom_pipes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+
+ *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ /* This may adjust vlevel and maxMpcComb */
+ if (*vlevel < context->bw_ctx.dml.soc.num_states) {
+ *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
+ }
} else {
// only call dcn20_validate_apply_pipe_split_flags if we found a supported config
memset(split, 0, MAX_PIPES * sizeof(int));
memset(merge, 0, MAX_PIPES * sizeof(bool));
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
// Most populate phantom DLG params before programming hardware / timing for phantom pipe
DC_FP_START();
@@ -1344,7 +1383,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
+ int pipe_cnt, i, pipe_idx;
+ int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
dc_assert_fp_enabled();
@@ -1373,17 +1413,22 @@ bool dcn32_internal_validate_bw(struct dc *dc,
DC_FP_END();
}
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ if (fast_validate ||
+ (dc->debug.dml_disallow_alternate_prefetch_modes &&
+ (vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
/*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
+ * If dml_disallow_alternate_prefetch_modes is false, then we have already
+ * tried alternate prefetch modes during full validation.
*
- * If Prefetch mode 0 failed for this config, or passed with Max UCLK, try if
- * supported with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+ * If mode is unsupported or there is no p-state support, then
+ * fall back to favouring voltage.
+ *
+ * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+ * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
*/
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_fclk_and_stutter;
+ dm_prefetch_support_fclk_and_stutter;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -1398,6 +1443,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
memset(split, 0, sizeof(split));
memset(merge, 0, sizeof(merge));
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML
+ vba->VoltageLevel = vlevel;
}
}
@@ -1758,7 +1805,11 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ /* On DCN32/321, PMFW will set PSTATE_CHANGE_TYPE = 1 (FCLK) for UCLK dummy p-state.
+ * In this case we must program FCLK WM Set C to use the UCLK dummy p-state WM
+ * value.
+ */
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
@@ -2098,6 +2149,13 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_2_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
@@ -2111,13 +2169,16 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_2_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+ dcn3_2_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_2_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_2_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_2_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_2_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 890612db08dc..d8014bfbc3fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -221,7 +221,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
// VBA_DELTA
// Calculate DET size, swath height
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -461,7 +460,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters,
v->SurfaceSizeInMALL,
@@ -679,9 +677,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
dml_ceil((double) v->WritebackDelay[mode_lib->vba.VoltageLevel][k]
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1));
- // Clamp to max OTG vstartup register limit
- if (v->MaxVStartupLines[k] > 1023)
- v->MaxVStartupLines[k] = 1023;
+ // Clamp to max OTG vstartup register limit
+ if (v->MaxVStartupLines[k] > 1023)
+ v->MaxVStartupLines[k] = 1023;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
@@ -757,9 +755,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
- v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
- v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+ v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
mode_lib->vba.DPPCLKDelaySCL,
@@ -1167,7 +1163,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
mode_lib->vba.USRRetrainingRequiredFinal,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
@@ -1952,7 +1947,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2051,6 +2045,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKRampingMargin,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed,
+ mode_lib->vba.NumberOfDSCSlices[k],
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportNoDSC,
@@ -2072,6 +2067,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKRampingMargin,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed,
+ mode_lib->vba.NumberOfDSCSlices[k],
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportDSC,
@@ -2549,7 +2545,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2749,7 +2744,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters,
mode_lib->vba.SurfaceSizeInMALL,
@@ -3266,7 +3260,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
mode_lib->vba.DSCDelayPerState[i][k],
@@ -3566,7 +3559,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
mode_lib->vba.USRRetrainingRequiredFinal,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.PrefetchModePerState[i][j],
@@ -3625,7 +3617,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.FCLKChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported
&mode_lib->vba.USRRetrainingSupport[i][j],
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin);
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMarginPerState[i][j]);
}
}
} // End of Prefetch Check
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 4b010b1b8aed..dc501ee7d01a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -391,7 +391,6 @@ void dml32_CalculateBytePerPixelAndBlockSizes(
} // CalculateBytePerPixelAndBlockSizes
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -456,10 +455,18 @@ void dml32_CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerSurface[],
bool *ViewportSizeSupport)
{
+ unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
+ unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpSwathSizeBytesY;
+ unsigned int RoundedUpSwathSizeBytesC;
+ double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
+ double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
unsigned int k;
-
- st_vars->TotalActiveDPP = 0;
- st_vars->NoChromaSurfaces = true;
+ unsigned int TotalActiveDPP = 0;
+ bool NoChromaSurfaces = true;
+ unsigned int DETBufferSizeInKByteForSwathCalculation;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -494,43 +501,43 @@ void dml32_CalculateSwathAndDETConfiguration(
DPPPerSurface,
/* Output */
- st_vars->SwathWidthdoubleDPP,
- st_vars->SwathWidthdoubleDPPChroma,
+ SwathWidthdoubleDPP,
+ SwathWidthdoubleDPPChroma,
SwathWidth,
SwathWidthChroma,
- st_vars->MaximumSwathHeightY,
- st_vars->MaximumSwathHeightC,
+ MaximumSwathHeightY,
+ MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * st_vars->MaximumSwathHeightY[k];
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * st_vars->MaximumSwathHeightC[k];
+ RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
+ RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%0d swath_width_luma_ub = %d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETY = %f\n", __func__, k, BytePerPixDETY[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, st_vars->MaximumSwathHeightY[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, MaximumSwathHeightY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d swath_width_chroma_ub = %d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETC = %f\n", __func__, k, BytePerPixDETC[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, st_vars->MaximumSwathHeightC[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, MaximumSwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesC[k]);
+ RoundedUpMaxSwathSizeBytesC[k]);
#endif
if (SourcePixelFormat[k] == dm_420_10) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesY[k], 256);
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesC[k], 256);
+ RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesY[k], 256);
+ RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesC[k], 256);
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalActiveDPP = st_vars->TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
+ TotalActiveDPP = TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
SourcePixelFormat[k] == dm_420_12 || SourcePixelFormat[k] == dm_rgbe_alpha) {
- st_vars->NoChromaSurfaces = false;
+ NoChromaSurfaces = false;
}
}
@@ -540,10 +547,10 @@ void dml32_CalculateSwathAndDETConfiguration(
// if unbounded req is enabled, program reserved space such that the ROB will not hold more than 8 swaths worth of data
// - assume worst-case compression rate of 4. [ROB size - 8 * swath_size / max_compression ratio]
// - assume for "narrow" vp case in which the ROB can fit 8 swaths, the DET should be big enough to do full size req
- *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (st_vars->RoundedUpMaxSwathSizeBytesY[0]/512);
+ *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (RoundedUpMaxSwathSizeBytesY[0]/512);
if (*CompBufReservedSpaceNeedAdjustment == 1) {
- *CompBufReservedSpaceKBytes = ROBSizeKBytes - st_vars->RoundedUpMaxSwathSizeBytesY[0]/512;
+ *CompBufReservedSpaceKBytes = ROBSizeKBytes - RoundedUpMaxSwathSizeBytesY[0]/512;
}
#ifdef __DML_VBA_DEBUG__
@@ -551,7 +558,7 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: CompBufReservedSpaceNeedAdjustment = %d\n", __func__, *CompBufReservedSpaceNeedAdjustment);
#endif
- *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, st_vars->TotalActiveDPP, st_vars->NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
+ *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
dml32_CalculateDETBufferSize(DETSizeOverride,
UseMALLForPStateChange,
@@ -566,8 +573,8 @@ void dml32_CalculateSwathAndDETConfiguration(
SourcePixelFormat,
ReadBandwidthLuma,
ReadBandwidthChroma,
- st_vars->RoundedUpMaxSwathSizeBytesY,
- st_vars->RoundedUpMaxSwathSizeBytesC,
+ RoundedUpMaxSwathSizeBytesY,
+ RoundedUpMaxSwathSizeBytesC,
DPPPerSurface,
/* Output */
@@ -575,7 +582,7 @@ void dml32_CalculateSwathAndDETConfiguration(
CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, st_vars->TotalActiveDPP);
+ dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
@@ -586,42 +593,42 @@ void dml32_CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
+ DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
dm_use_mall_pstate_change_phantom_pipe ? 1024 : DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DETBufferSizeInKByteForSwathCalculation = %d\n", __func__, k,
- st_vars->DETBufferSizeInKByteForSwathCalculation);
+ DETBufferSizeInKByteForSwathCalculation);
#endif
- if (st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] < 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ if (RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] < 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] / 2 <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
} else {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
}
- if ((st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 >
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
+ if ((RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] / 2 >
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
|| SwathWidth[k] > MaximumSwathWidthLuma[k] || (SwathHeightC[k] > 0 &&
SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
@@ -636,7 +643,7 @@ void dml32_CalculateSwathAndDETConfiguration(
#endif
DETBufferSizeY[k] = DETBufferSizeInKByte[k] * 1024;
DETBufferSizeC[k] = 0;
- } else if (st_vars->RoundedUpSwathSizeBytesY <= 1.5 * st_vars->RoundedUpSwathSizeBytesC) {
+ } else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Half DET for plane0, half for plane1\n", __func__, k);
#endif
@@ -654,11 +661,11 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: k=%0d SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
dml_print("DML::%s: k=%0d SwathHeightC = %d\n", __func__, k, SwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ k, RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesC[k]);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesY);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesC);
+ k, RoundedUpMaxSwathSizeBytesC[k]);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, RoundedUpSwathSizeBytesY);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, RoundedUpSwathSizeBytesC);
dml_print("DML::%s: k=%0d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
dml_print("DML::%s: k=%0d DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d DETBufferSizeC = %d\n", __func__, k, DETBufferSizeC[k]);
@@ -710,10 +717,10 @@ void dml32_CalculateSwathWidth(
unsigned int k, j;
enum odm_combine_mode MainSurfaceODMMode;
- unsigned int surface_width_ub_l;
- unsigned int surface_height_ub_l;
- unsigned int surface_width_ub_c;
- unsigned int surface_height_ub_c;
+ unsigned int surface_width_ub_l;
+ unsigned int surface_height_ub_l;
+ unsigned int surface_width_ub_c;
+ unsigned int surface_height_ub_c;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -1186,6 +1193,7 @@ void dml32_CalculateODMMode(
double DISPCLKDPPCLKDSCCLKDownSpreading,
double DISPCLKRampingMargin,
double DISPCLKDPPCLKVCOSpeed,
+ unsigned int NumberOfDSCSlices,
/* Output */
bool *TotalAvailablePipesSupport,
@@ -1221,7 +1229,8 @@ void dml32_CalculateODMMode(
if (!(Output == dm_hdmi || Output == dm_dp || Output == dm_edp) && (ODMUse == dm_odm_combine_policy_4to1 ||
((SurfaceRequiredDISPCLKWithODMCombineTwoToOne > StateDispclk ||
- (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit)))))) {
+ (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit))
+ || NumberOfDSCSlices > 8)))) {
if (TotalNumberOfActiveDPP + 4 <= MaxNumDPP) {
*ODMMode = dm_odm_combine_mode_4to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
@@ -1232,7 +1241,8 @@ void dml32_CalculateODMMode(
} else if (Output != dm_hdmi && (ODMUse == dm_odm_combine_policy_2to1 ||
(((SurfaceRequiredDISPCLKWithoutODMCombine > StateDispclk &&
SurfaceRequiredDISPCLKWithODMCombineTwoToOne <= StateDispclk) ||
- (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit)))))) {
+ (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit))
+ || (NumberOfDSCSlices <= 8 && NumberOfDSCSlices > 4))))) {
if (TotalNumberOfActiveDPP + 2 <= MaxNumDPP) {
*ODMMode = dm_odm_combine_mode_2to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
@@ -1867,7 +1877,6 @@ void dml32_CalculateSurfaceSizeInMall(
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -1933,6 +1942,21 @@ void dml32_CalculateVMRowAndSwath(
unsigned int BIGK_FRAGMENT_SIZE[])
{
unsigned int k;
+ unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
+ unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
+ unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
+ bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (HostVMEnable == true) {
@@ -1954,15 +1978,15 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].SourcePixelFormat == dm_rgbe_alpha) {
if ((myPipe[k].SourcePixelFormat == dm_420_10 || myPipe[k].SourcePixelFormat == dm_420_12) &&
!IsVertical(myPipe[k].SourceRotation)) {
- st_vars->PTEBufferSizeInRequestsForLuma[k] =
+ PTEBufferSizeInRequestsForLuma[k] =
(PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma) / 2;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = st_vars->PTEBufferSizeInRequestsForLuma[k];
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsForLuma[k];
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
}
- st_vars->PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -1982,21 +2006,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForChroma[k],
+ PTEBufferSizeInRequestsForChroma[k],
myPipe[k].PitchC,
myPipe[k].DCCMetaPitchC,
myPipe[k].BlockWidthC,
myPipe[k].BlockHeightC,
/* Output */
- &st_vars->MetaRowByteC[k],
- &st_vars->PixelPTEBytesPerRowC[k],
+ &MetaRowByteC[k],
+ &PixelPTEBytesPerRowC[k],
&dpte_row_width_chroma_ub[k],
&dpte_row_height_chroma[k],
&dpte_row_height_linear_chroma[k],
- &st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k],
- &st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_chroma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowC_one_row_per_frame[k],
+ &dpte_row_width_chroma_ub_one_row_per_frame[k],
+ &dpte_row_height_chroma_one_row_per_frame[k],
&meta_req_width_chroma[k],
&meta_req_height_chroma[k],
&meta_row_width_chroma[k],
@@ -2024,19 +2048,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillC[k],
&MaxNumSwathC[k]);
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = 0;
- st_vars->PixelPTEBytesPerRowC[k] = 0;
- st_vars->PDEAndMetaPTEBytesFrameC = 0;
- st_vars->MetaRowByteC[k] = 0;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForChroma[k] = 0;
+ PixelPTEBytesPerRowC[k] = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC[k] = 0;
MaxNumSwathC[k] = 0;
PrefetchSourceLinesC[k] = 0;
- st_vars->dpte_row_height_chroma_one_row_per_frame[k] = 0;
- st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
+ dpte_row_height_chroma_one_row_per_frame[k] = 0;
+ dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
+ PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
}
- st_vars->PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -2056,21 +2080,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForLuma[k],
+ PTEBufferSizeInRequestsForLuma[k],
myPipe[k].PitchY,
myPipe[k].DCCMetaPitchY,
myPipe[k].BlockWidthY,
myPipe[k].BlockHeightY,
/* Output */
- &st_vars->MetaRowByteY[k],
- &st_vars->PixelPTEBytesPerRowY[k],
+ &MetaRowByteY[k],
+ &PixelPTEBytesPerRowY[k],
&dpte_row_width_luma_ub[k],
&dpte_row_height_luma[k],
&dpte_row_height_linear_luma[k],
- &st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k],
- &st_vars->dpte_row_width_luma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_luma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowY_one_row_per_frame[k],
+ &dpte_row_width_luma_ub_one_row_per_frame[k],
+ &dpte_row_height_luma_one_row_per_frame[k],
&meta_req_width[k],
&meta_req_height[k],
&meta_row_width[k],
@@ -2098,19 +2122,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillY[k],
&MaxNumSwathY[k]);
- PDEAndMetaPTEBytesFrame[k] = st_vars->PDEAndMetaPTEBytesFrameY + st_vars->PDEAndMetaPTEBytesFrameC;
- MetaRowByte[k] = st_vars->MetaRowByteY[k] + st_vars->MetaRowByteC[k];
+ PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
+ MetaRowByte[k] = MetaRowByteY[k] + MetaRowByteC[k];
- if (st_vars->PixelPTEBytesPerRowY[k] <= 64 * st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC[k] <= 64 * st_vars->PTEBufferSizeInRequestsForChroma[k]) {
+ if (PixelPTEBytesPerRowY[k] <= 64 * PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC[k] <= 64 * PTEBufferSizeInRequestsForChroma[k]) {
PTEBufferSizeNotExceeded[k] = true;
} else {
PTEBufferSizeNotExceeded[k] = false;
}
- st_vars->one_row_per_frame_fits_in_buffer[k] = (st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
- st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * st_vars->PTEBufferSizeInRequestsForChroma[k]);
+ one_row_per_frame_fits_in_buffer[k] = (PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
+ PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * PTEBufferSizeInRequestsForChroma[k]);
}
dml32_CalculateMALLUseForStaticScreen(
@@ -2118,7 +2142,7 @@ void dml32_CalculateVMRowAndSwath(
MALLAllocatedForDCN,
UseMALLForStaticScreen, // mode
SurfaceSizeInMALL,
- st_vars->one_row_per_frame_fits_in_buffer,
+ one_row_per_frame_fits_in_buffer,
/* Output */
UsesMALLForStaticScreen); // boolen
@@ -2144,13 +2168,13 @@ void dml32_CalculateVMRowAndSwath(
!(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame);
if (use_one_row_for_frame[k]) {
- dpte_row_height_luma[k] = st_vars->dpte_row_height_luma_one_row_per_frame[k];
- dpte_row_width_luma_ub[k] = st_vars->dpte_row_width_luma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowY[k] = st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k];
- dpte_row_height_chroma[k] = st_vars->dpte_row_height_chroma_one_row_per_frame[k];
- dpte_row_width_chroma_ub[k] = st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowC[k] = st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k];
- PTEBufferSizeNotExceeded[k] = st_vars->one_row_per_frame_fits_in_buffer[k];
+ dpte_row_height_luma[k] = dpte_row_height_luma_one_row_per_frame[k];
+ dpte_row_width_luma_ub[k] = dpte_row_width_luma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowY[k] = PixelPTEBytesPerRowY_one_row_per_frame[k];
+ dpte_row_height_chroma[k] = dpte_row_height_chroma_one_row_per_frame[k];
+ dpte_row_width_chroma_ub[k] = dpte_row_width_chroma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowC[k] = PixelPTEBytesPerRowC_one_row_per_frame[k];
+ PTEBufferSizeNotExceeded[k] = one_row_per_frame_fits_in_buffer[k];
}
if (MetaRowByte[k] <= DCCMetaBufferSizeBytes)
@@ -2158,7 +2182,7 @@ void dml32_CalculateVMRowAndSwath(
else
DCCMetaBufferSizeNotExceeded[k] = false;
- PixelPTEBytesPerRow[k] = st_vars->PixelPTEBytesPerRowY[k] + st_vars->PixelPTEBytesPerRowC[k];
+ PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY[k] + PixelPTEBytesPerRowC[k];
if (use_one_row_for_frame[k])
PixelPTEBytesPerRow[k] = PixelPTEBytesPerRow[k] / 2;
@@ -2169,11 +2193,11 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].VRatioChroma,
myPipe[k].DCCEnable,
myPipe[k].HTotal / myPipe[k].PixelClock,
- st_vars->MetaRowByteY[k], st_vars->MetaRowByteC[k],
+ MetaRowByteY[k], MetaRowByteC[k],
meta_row_height[k],
meta_row_height_chroma[k],
- st_vars->PixelPTEBytesPerRowY[k],
- st_vars->PixelPTEBytesPerRowC[k],
+ PixelPTEBytesPerRowY[k],
+ PixelPTEBytesPerRowC[k],
dpte_row_height_luma[k],
dpte_row_height_chroma[k],
@@ -2189,12 +2213,12 @@ void dml32_CalculateVMRowAndSwath(
dml_print("DML::%s: k=%d, dpte_row_height_luma = %d\n", __func__, k, dpte_row_height_luma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_luma_ub = %d\n",
__func__, k, dpte_row_width_luma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowY[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, PixelPTEBytesPerRowY[k]);
dml_print("DML::%s: k=%d, dpte_row_height_chroma = %d\n",
__func__, k, dpte_row_height_chroma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_chroma_ub = %d\n",
__func__, k, dpte_row_width_chroma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowC[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, PixelPTEBytesPerRowC[k]);
dml_print("DML::%s: k=%d, PixelPTEBytesPerRow = %d\n", __func__, k, PixelPTEBytesPerRow[k]);
dml_print("DML::%s: k=%d, PTEBufferSizeNotExceeded = %d\n",
__func__, k, PTEBufferSizeNotExceeded[k]);
@@ -3342,7 +3366,6 @@ double dml32_CalculateExtraLatency(
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
@@ -3406,18 +3429,45 @@ bool dml32_CalculatePrefetchSchedule(
double *VReadyOffsetPix)
{
bool MyError = false;
-
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
- st_vars->max_vratio_pre = __DML_MAX_VRATIO_PRE__;
- st_vars->Tsw_est1 = 0;
- st_vars->Tsw_est3 = 0;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler;
+ double LineTime;
+ double dst_y_prefetch_equ;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tvm_oto_lines;
+ double Tr0_oto_lines;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+ unsigned int HostVMDynamicLevelsTrips;
+ double trip_to_mem;
+ double Tvm_trips;
+ double Tr0_trips;
+ double Tvm_trips_rounded;
+ double Tr0_trips_rounded;
+ double Lsw_oto;
+ double Tpre_rounded;
+ double prefetch_bw_equ;
+ double Tvm_equ;
+ double Tr0_equ;
+ double Tdmbf;
+ double Tdmec;
+ double Tdmsks;
+ double prefetch_sw_bytes;
+ double bytes_pp;
+ double dep_bytes;
+ unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ double min_Lsw;
+ double Tsw_est1 = 0;
+ double Tsw_est3 = 0;
if (GPUVMEnable == true && HostVMEnable == true)
- st_vars->HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
else
- st_vars->HostVMDynamicLevelsTrips = 0;
+ HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
@@ -3440,19 +3490,19 @@ bool dml32_CalculatePrefetchSchedule(
TSetup,
/* output */
- &st_vars->Tdmbf,
- &st_vars->Tdmec,
- &st_vars->Tdmsks,
+ &Tdmbf,
+ &Tdmec,
+ &Tdmsks,
VUpdateOffsetPix,
VUpdateWidthPix,
VReadyOffsetPix);
- st_vars->LineTime = myPipe->HTotal / myPipe->PixelClock;
- st_vars->trip_to_mem = UrgentLatency;
- st_vars->Tvm_trips = UrgentExtraLatency + st_vars->trip_to_mem * (GPUVMPageTableLevels * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ LineTime = myPipe->HTotal / myPipe->PixelClock;
+ trip_to_mem = UrgentLatency;
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
if (DynamicMetadataVMEnabled == true)
- *Tdmdl = TWait + st_vars->Tvm_trips + st_vars->trip_to_mem;
+ *Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
@@ -3462,15 +3512,15 @@ bool dml32_CalculatePrefetchSchedule(
#endif
if (DynamicMetadataEnable == true) {
- if (VStartup * st_vars->LineTime < *TSetup + *Tdmdl + st_vars->Tdmbf + st_vars->Tdmec + st_vars->Tdmsks) {
+ if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n",
- __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n",
- __func__, st_vars->Tdmsks);
+ __func__, Tdmsks);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n",
__func__, *Tdmdl);
#endif
@@ -3482,21 +3532,21 @@ bool dml32_CalculatePrefetchSchedule(
}
*Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
- GPUVMEnable == true ? TWait + st_vars->Tvm_trips : 0);
+ GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
else
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
- st_vars->DPPCycles = st_vars->DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
- st_vars->DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
- *DSTXAfterScaler = st_vars->DPPCycles * myPipe->PixelClock / myPipe->Dppclk + st_vars->DISPCLKCycles *
+ *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->Dppclk + DISPCLKCycles *
myPipe->PixelClock / myPipe->Dispclk + DSCDelay;
*DSTXAfterScaler = *DSTXAfterScaler + (myPipe->ODMMode != dm_odm_combine_mode_disabled ? 18 : 0)
@@ -3506,10 +3556,10 @@ bool dml32_CalculatePrefetchSchedule(
+ ((myPipe->ODMMode == dm_odm_mode_mso_1to4) ? myPipe->HActive * 3 / 4 : 0);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DPPCycles: %d\n", __func__, st_vars->DPPCycles);
+ dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
dml_print("DML::%s: Dppclk: %f\n", __func__, myPipe->Dppclk);
- dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, st_vars->DISPCLKCycles);
+ dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
dml_print("DML::%s: DISPCLK: %f\n", __func__, myPipe->Dispclk);
dml_print("DML::%s: DSCDelay: %d\n", __func__, DSCDelay);
dml_print("DML::%s: ODMMode: %d\n", __func__, myPipe->ODMMode);
@@ -3522,9 +3572,9 @@ bool dml32_CalculatePrefetchSchedule(
else
*DSTYAfterScaler = 0;
- st_vars->DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(st_vars->DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
- *DSTXAfterScaler = st_vars->DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+ DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__, *DSTXAfterScaler);
dml_print("DML::%s: DSTYAfterScaler: %d (final)\n", __func__, *DSTYAfterScaler);
@@ -3532,132 +3582,132 @@ bool dml32_CalculatePrefetchSchedule(
MyError = false;
- st_vars->Tr0_trips = st_vars->trip_to_mem * (st_vars->HostVMDynamicLevelsTrips + 1);
+ Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
if (GPUVMEnable == true) {
- st_vars->Tvm_trips_rounded = dml_ceil(4.0 * st_vars->Tvm_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
if (GPUVMPageTableLevels >= 3) {
- *Tno_bw = UrgentExtraLatency + st_vars->trip_to_mem *
- (double) ((GPUVMPageTableLevels - 2) * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ *Tno_bw = UrgentExtraLatency + trip_to_mem *
+ (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
} else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / st_vars->LineTime, 1.0) /
- 4.0 * st_vars->LineTime; // VBA_ERROR
+ Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
+ 4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
} else {
*Tno_bw = 0;
}
} else if (myPipe->DCCEnable == true) {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
*Tno_bw = 0;
} else {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = st_vars->LineTime / 2.0;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = LineTime / 2.0;
*Tno_bw = 0;
}
- st_vars->Tvm_trips_rounded = dml_max(st_vars->Tvm_trips_rounded, st_vars->LineTime / 4.0);
- st_vars->Tr0_trips_rounded = dml_max(st_vars->Tr0_trips_rounded, st_vars->LineTime / 4.0);
+ Tvm_trips_rounded = dml_max(Tvm_trips_rounded, LineTime / 4.0);
+ Tr0_trips_rounded = dml_max(Tr0_trips_rounded, LineTime / 4.0);
if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10
|| myPipe->SourcePixelFormat == dm_420_12) {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
} else {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
}
- st_vars->prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
- st_vars->prefetch_bw_oto = dml_max(st_vars->bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
- st_vars->prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * st_vars->LineTime));
+ prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
+ prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
- st_vars->min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / st_vars->max_vratio_pre;
- st_vars->min_Lsw = dml_max(st_vars->min_Lsw, 1.0);
- st_vars->Lsw_oto = dml_ceil(4.0 * dml_max(st_vars->prefetch_sw_bytes / st_vars->prefetch_bw_oto / st_vars->LineTime, st_vars->min_Lsw), 1.0) / 4.0;
+ min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre;
+ min_Lsw = dml_max(min_Lsw, 1.0);
+ Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
if (GPUVMEnable == true) {
- st_vars->Tvm_oto = dml_max3(
- st_vars->Tvm_trips,
- *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / st_vars->prefetch_bw_oto,
- st_vars->LineTime / 4.0);
+ Tvm_oto = dml_max3(
+ Tvm_trips,
+ *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ LineTime / 4.0);
} else
- st_vars->Tvm_oto = st_vars->LineTime / 4.0;
+ Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_oto = dml_max4(
- st_vars->Tr0_trips,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto,
- (st_vars->LineTime - st_vars->Tvm_oto)/2.0,
- st_vars->LineTime / 4.0);
+ Tr0_oto = dml_max4(
+ Tr0_trips,
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
+ (LineTime - Tvm_oto)/2.0,
+ LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tr0_oto max0 = %f\n", __func__,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, st_vars->LineTime - st_vars->Tvm_oto);
- dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, st_vars->LineTime / 4);
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, LineTime - Tvm_oto);
+ dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, LineTime / 4);
#endif
} else
- st_vars->Tr0_oto = (st_vars->LineTime - st_vars->Tvm_oto) / 2.0;
+ Tr0_oto = (LineTime - Tvm_oto) / 2.0;
- st_vars->Tvm_oto_lines = dml_ceil(4.0 * st_vars->Tvm_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->Tr0_oto_lines = dml_ceil(4.0 * st_vars->Tr0_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->dst_y_prefetch_oto = st_vars->Tvm_oto_lines + 2 * st_vars->Tr0_oto_lines + st_vars->Lsw_oto;
+ Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
+ Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
+ dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
- st_vars->dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / st_vars->LineTime -
+ dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
- dml_print("DML::%s: min_Lsw = %f\n", __func__, st_vars->min_Lsw);
+ dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
dml_print("DML::%s: *Tno_bw = %f\n", __func__, *Tno_bw);
dml_print("DML::%s: UrgentExtraLatency = %f\n", __func__, UrgentExtraLatency);
- dml_print("DML::%s: trip_to_mem = %f\n", __func__, st_vars->trip_to_mem);
+ dml_print("DML::%s: trip_to_mem = %f\n", __func__, trip_to_mem);
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: BytePerPixelC = %d\n", __func__, myPipe->BytePerPixelC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: swath_width_chroma_ub = %d\n", __func__, swath_width_chroma_ub);
- dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, st_vars->prefetch_sw_bytes);
- dml_print("DML::%s: bytes_pp = %f\n", __func__, st_vars->bytes_pp);
+ dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, prefetch_sw_bytes);
+ dml_print("DML::%s: bytes_pp = %f\n", __func__, bytes_pp);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml_print("DML::%s: Tvm_trips = %f\n", __func__, st_vars->Tvm_trips);
- dml_print("DML::%s: Tr0_trips = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto = %f\n", __func__, st_vars->Tr0_oto);
- dml_print("DML::%s: Tvm_oto = %f\n", __func__, st_vars->Tvm_oto);
- dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, st_vars->Tvm_oto_lines);
- dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, st_vars->Tr0_oto_lines);
- dml_print("DML::%s: Lsw_oto = %f\n", __func__, st_vars->Lsw_oto);
- dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, st_vars->dst_y_prefetch_oto);
- dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, st_vars->dst_y_prefetch_equ);
+ dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
+ dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
+ dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
+ dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, Tvm_oto_lines);
+ dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, Tr0_oto_lines);
+ dml_print("DML::%s: Lsw_oto = %f\n", __func__, Lsw_oto);
+ dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, dst_y_prefetch_oto);
+ dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, dst_y_prefetch_equ);
#endif
- st_vars->dst_y_prefetch_equ = dml_floor(4.0 * (st_vars->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- st_vars->Tpre_rounded = st_vars->dst_y_prefetch_equ * st_vars->LineTime;
+ dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
+ Tpre_rounded = dst_y_prefetch_equ * LineTime;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, st_vars->dst_y_prefetch_equ);
- dml_print("DML::%s: LineTime: %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
+ dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
dml_print("DML::%s: VStartup: %d\n", __func__, VStartup);
dml_print("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n",
- __func__, VStartup * st_vars->LineTime);
+ __func__, VStartup * LineTime);
dml_print("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *TSetup);
dml_print("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, TCalc);
- dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd\n", __func__, *Tdmdl_vm);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", __func__, *Tdmdl);
dml_print("DML::%s: DSTYAfterScaler: %d lines - number of lines of pipeline and buffer delay after scaler\n",
__func__, *DSTYAfterScaler);
#endif
- st_vars->dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
+ dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
- if (st_vars->prefetch_sw_bytes < st_vars->dep_bytes)
- st_vars->prefetch_sw_bytes = 2 * st_vars->dep_bytes;
+ if (prefetch_sw_bytes < dep_bytes)
+ prefetch_sw_bytes = 2 * dep_bytes;
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@@ -3665,61 +3715,61 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (st_vars->dst_y_prefetch_equ > 1) {
+ if (dst_y_prefetch_equ > 1) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
double PrefetchBandwidth4;
- if (st_vars->Tpre_rounded - *Tno_bw > 0) {
+ if (Tpre_rounded - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - *Tno_bw);
- st_vars->Tsw_est1 = st_vars->prefetch_sw_bytes / PrefetchBandwidth1;
+ + prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
+ Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
} else
PrefetchBandwidth1 = 0;
- if (VStartup == MaxVStartup && (st_vars->Tsw_est1 / st_vars->LineTime < st_vars->min_Lsw)
- && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw > 0) {
+ if (VStartup == MaxVStartup && (Tsw_est1 / LineTime < min_Lsw)
+ && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
}
- if (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded > 0)
- PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + st_vars->prefetch_sw_bytes) /
- (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) /
+ (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded > 0) {
+ if (Tpre_rounded - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded);
- st_vars->Tsw_est3 = st_vars->prefetch_sw_bytes / PrefetchBandwidth3;
+ + prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
+ Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
} else
PrefetchBandwidth3 = 0;
if (VStartup == MaxVStartup &&
- (st_vars->Tsw_est3 / st_vars->LineTime < st_vars->min_Lsw) && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 *
- st_vars->LineTime - st_vars->Tvm_trips_rounded > 0) {
+ (Tsw_est3 / LineTime < min_Lsw) && Tpre_rounded - min_Lsw * LineTime - 0.75 *
+ LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - st_vars->Tvm_trips_rounded);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
}
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded > 0) {
- PrefetchBandwidth4 = st_vars->prefetch_sw_bytes /
- (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0) {
+ PrefetchBandwidth4 = prefetch_sw_bytes /
+ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
} else {
PrefetchBandwidth4 = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: Tpre_rounded: %f\n", __func__, st_vars->Tpre_rounded);
+ dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
dml_print("DML::%s: Tno_bw: %f\n", __func__, *Tno_bw);
- dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, st_vars->Tvm_trips_rounded);
- dml_print("DML::%s: Tsw_est1: %f\n", __func__, st_vars->Tsw_est1);
- dml_print("DML::%s: Tsw_est3: %f\n", __func__, st_vars->Tsw_est3);
+ dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
+ dml_print("DML::%s: Tsw_est1: %f\n", __func__, Tsw_est1);
+ dml_print("DML::%s: Tsw_est3: %f\n", __func__, Tsw_est3);
dml_print("DML::%s: PrefetchBandwidth1: %f\n", __func__, PrefetchBandwidth1);
dml_print("DML::%s: PrefetchBandwidth2: %f\n", __func__, PrefetchBandwidth2);
dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
@@ -3732,9 +3782,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth1 >= st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
@@ -3745,9 +3795,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth2 < st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
@@ -3758,9 +3808,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 <
- st_vars->Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
+ Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / PrefetchBandwidth3 >=
- st_vars->Tr0_trips_rounded) {
+ Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
@@ -3770,80 +3820,80 @@ bool dml32_CalculatePrefetchSchedule(
}
if (Case1OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth1;
+ prefetch_bw_equ = PrefetchBandwidth1;
else if (Case2OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth2;
+ prefetch_bw_equ = PrefetchBandwidth2;
else if (Case3OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth3;
+ prefetch_bw_equ = PrefetchBandwidth3;
else
- st_vars->prefetch_bw_equ = PrefetchBandwidth4;
+ prefetch_bw_equ = PrefetchBandwidth4;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
- dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, st_vars->prefetch_bw_equ);
+ dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
#endif
- if (st_vars->prefetch_bw_equ > 0) {
+ if (prefetch_bw_equ > 0) {
if (GPUVMEnable == true) {
- st_vars->Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
- HostVMInefficiencyFactor / st_vars->prefetch_bw_equ,
- st_vars->Tvm_trips, st_vars->LineTime / 4);
+ Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
+ HostVMInefficiencyFactor / prefetch_bw_equ,
+ Tvm_trips, LineTime / 4);
} else {
- st_vars->Tvm_equ = st_vars->LineTime / 4;
+ Tvm_equ = LineTime / 4;
}
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
- HostVMInefficiencyFactor) / st_vars->prefetch_bw_equ, st_vars->Tr0_trips,
- (st_vars->LineTime - st_vars->Tvm_equ) / 2, st_vars->LineTime / 4);
+ Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
+ HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
+ (LineTime - Tvm_equ) / 2, LineTime / 4);
} else {
- st_vars->Tr0_equ = (st_vars->LineTime - st_vars->Tvm_equ) / 2;
+ Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
- st_vars->Tvm_equ = 0;
- st_vars->Tr0_equ = 0;
+ Tvm_equ = 0;
+ Tr0_equ = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
#endif
}
}
- if (st_vars->dst_y_prefetch_oto < st_vars->dst_y_prefetch_equ) {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_oto;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_oto;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_oto;
- *PrefetchBandwidth = st_vars->prefetch_bw_oto;
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ TimeForFetchingMetaPTE = Tvm_oto;
+ TimeForFetchingRowInVBlank = Tr0_oto;
+ *PrefetchBandwidth = prefetch_bw_oto;
} else {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_equ;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_equ;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_equ;
- *PrefetchBandwidth = st_vars->prefetch_bw_equ;
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ TimeForFetchingMetaPTE = Tvm_equ;
+ TimeForFetchingRowInVBlank = Tr0_equ;
+ *PrefetchBandwidth = prefetch_bw_equ;
}
- *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * st_vars->TimeForFetchingMetaPTE / st_vars->LineTime, 1.0) / 4.0;
+ *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank =
- dml_ceil(4.0 * st_vars->TimeForFetchingRowInVBlank / st_vars->LineTime, 1.0) / 4.0;
+ dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
- st_vars->LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, st_vars->TimeForFetchingRowInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n",
__func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
- dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, st_vars->LinesToRequestPrefetchPixelData);
+ dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
#endif
- if (st_vars->LinesToRequestPrefetchPixelData >= 1 && st_vars->prefetch_bw_equ > 0) {
- *VRatioPrefetchY = (double) PrefetchSourceLinesY / st_vars->LinesToRequestPrefetchPixelData;
+ if (LinesToRequestPrefetchPixelData >= 1 && prefetch_bw_equ > 0) {
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
@@ -3851,12 +3901,12 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillY = %d\n", __func__, VInitPreFillY);
#endif
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max((double) PrefetchSourceLinesY /
- st_vars->LinesToRequestPrefetchPixelData,
+ LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
@@ -3870,7 +3920,7 @@ bool dml32_CalculatePrefetchSchedule(
#endif
}
- *VRatioPrefetchC = (double) PrefetchSourceLinesC / st_vars->LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
@@ -3879,11 +3929,11 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillC = %d\n", __func__, VInitPreFillC);
#endif
if ((SwathHeightC > 4)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
@@ -3898,25 +3948,25 @@ bool dml32_CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY
- / st_vars->LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
- / st_vars->LineTime;
+ / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
+ / LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n",
__func__, *RequiredPrefetchPixDataBWLuma);
#endif
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC /
- st_vars->LinesToRequestPrefetchPixelData
+ LinesToRequestPrefetchPixelData
* myPipe->BytePerPixelC
- * swath_width_chroma_ub / st_vars->LineTime;
+ * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML:%s: MyErr set. LinesToRequestPrefetchPixelData: %f, should be > 0\n",
- __func__, st_vars->LinesToRequestPrefetchPixelData);
+ __func__, LinesToRequestPrefetchPixelData);
#endif
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
@@ -3925,15 +3975,15 @@ bool dml32_CalculatePrefetchSchedule(
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
- (double)st_vars->LinesToRequestPrefetchPixelData * st_vars->LineTime +
- 2.0*st_vars->TimeForFetchingRowInVBlank + st_vars->TimeForFetchingMetaPTE);
- dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", st_vars->TimeForFetchingMetaPTE);
+ (double)LinesToRequestPrefetchPixelData * LineTime +
+ 2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
+ dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n",
- (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime);
+ (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
- dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * st_vars->LineTime -
- st_vars->TimeForFetchingMetaPTE - 2*st_vars->TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
- ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime - TWait - TCalc - *TSetup);
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime -
+ TimeForFetchingMetaPTE - 2*TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
+ ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n",
PixelPTEBytesPerRow);
#endif
@@ -3941,7 +3991,7 @@ bool dml32_CalculatePrefetchSchedule(
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n",
- __func__, st_vars->dst_y_prefetch_equ);
+ __func__, dst_y_prefetch_equ);
#endif
}
@@ -3957,10 +4007,10 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor /
- (*DestinationLinesToRequestVMInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestVMInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -3977,7 +4027,7 @@ bool dml32_CalculatePrefetchSchedule(
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) /
- (*DestinationLinesToRequestRowInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestRowInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
@@ -4000,12 +4050,12 @@ bool dml32_CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
+ LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
@@ -4159,7 +4209,6 @@ void dml32_CalculateFlipSchedule(
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
bool USRRetrainingRequiredFinal,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int PrefetchMode,
@@ -4221,15 +4270,37 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
double ActiveDRAMClockChangeLatencyMargin[])
{
unsigned int i, j, k;
-
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = 0;
- st_vars->DRAMClockChangeSupportNumber = 0;
- st_vars->DRAMClockChangeMethod = 0;
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
- st_vars->MinActiveFCLKChangeMargin = 0.;
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
- st_vars->TotalPixelBW = 0.0;
- st_vars->TotalActiveWriteback = 0;
+ unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
+ unsigned int DRAMClockChangeSupportNumber = 0;
+ unsigned int LastSurfaceWithoutMargin;
+ unsigned int DRAMClockChangeMethod = 0;
+ bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
+ double MinActiveFCLKChangeMargin = 0.;
+ double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
+ double ActiveClockChangeLatencyHidingY;
+ double ActiveClockChangeLatencyHidingC;
+ double ActiveClockChangeLatencyHiding;
+ double EffectiveDETBufferSizeY;
+ double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
+ double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
+ double TotalPixelBW = 0.0;
+ bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double LinesInDETY[DC__NUM_DPP__MAX];
+ double LinesInDETC[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
+ double FullDETBufferingTimeY;
+ double FullDETBufferingTimeC;
+ double WritebackDRAMClockChangeLatencyMargin;
+ double WritebackFCLKChangeLatencyMargin;
+ double WritebackLatencyHiding;
+ bool SameTimingForFCLKChange;
+
+ unsigned int TotalActiveWriteback = 0;
+ unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
+ unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
@@ -4261,13 +4332,13 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
#endif
- st_vars->TotalActiveWriteback = 0;
+ TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (WritebackEnable[k] == true)
- st_vars->TotalActiveWriteback = st_vars->TotalActiveWriteback + 1;
+ TotalActiveWriteback = TotalActiveWriteback + 1;
}
- if (st_vars->TotalActiveWriteback <= 1) {
+ if (TotalActiveWriteback <= 1) {
Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
@@ -4277,7 +4348,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (st_vars->TotalActiveWriteback <= 1) {
+ if (TotalActiveWriteback <= 1) {
Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
@@ -4307,14 +4378,14 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
#endif
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalPixelBW = st_vars->TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
+ TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
- st_vars->LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+ LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
+ LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
@@ -4325,72 +4396,72 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]);
#endif
- st_vars->EffectiveLBLatencyHidingY = st_vars->LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveLBLatencyHidingC = st_vars->LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveDETBufferSizeY = DETBufferSizeY[k];
+ EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
- st_vars->EffectiveDETBufferSizeY = st_vars->EffectiveDETBufferSizeY
+ EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
* (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
- / (HTotal[k] / PixelClock[k]) / st_vars->TotalPixelBW;
+ / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
}
- st_vars->LinesInDETY[k] = (double) st_vars->EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
- st_vars->LinesInDETYRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETY[k], SwathHeightY[k]);
- st_vars->FullDETBufferingTimeY = st_vars->LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->EffectiveLBLatencyHidingY + st_vars->FullDETBufferingTimeY
+ ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->ActiveClockChangeLatencyHidingY
+ ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
/ PixelClock[k] / VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
- st_vars->LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
- st_vars->LinesInDETCRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETC[k], SwathHeightC[k]);
- st_vars->FullDETBufferingTimeC = st_vars->LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
+ LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
/ VRatioChroma[k];
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->EffectiveLBLatencyHidingC + st_vars->FullDETBufferingTimeC
+ ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
/ PixelClock[k];
if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->ActiveClockChangeLatencyHidingC
+ ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
- (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
/ PixelClock[k] / VRatioChroma[k];
}
- st_vars->ActiveClockChangeLatencyHiding = dml_min(st_vars->ActiveClockChangeLatencyHidingY,
- st_vars->ActiveClockChangeLatencyHidingC);
+ ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
+ ActiveClockChangeLatencyHidingC);
} else {
- st_vars->ActiveClockChangeLatencyHiding = st_vars->ActiveClockChangeLatencyHidingY;
+ ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
- ActiveDRAMClockChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+ ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- Watermark->DRAMClockChangeWatermark;
- st_vars->ActiveFCLKChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+ ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- Watermark->FCLKChangeWatermark;
- st_vars->USRRetrainingLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
+ USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
if (WritebackEnable[k]) {
- st_vars->WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
+ WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
/ (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
/ (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
if (WritebackPixelFormat[k] == dm_444_64)
- st_vars->WritebackLatencyHiding = st_vars->WritebackLatencyHiding / 2;
+ WritebackLatencyHiding = WritebackLatencyHiding / 2;
- st_vars->WritebackDRAMClockChangeLatencyMargin = st_vars->WritebackLatencyHiding
+ WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
- Watermark->WritebackDRAMClockChangeWatermark;
- st_vars->WritebackFCLKChangeLatencyMargin = st_vars->WritebackLatencyHiding
+ WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
- Watermark->WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
- st_vars->WritebackFCLKChangeLatencyMargin);
- st_vars->ActiveFCLKChangeLatencyMargin[k] = dml_min(st_vars->ActiveFCLKChangeLatencyMargin[k],
- st_vars->WritebackDRAMClockChangeLatencyMargin);
+ WritebackFCLKChangeLatencyMargin);
+ ActiveFCLKChangeLatencyMargin[k] = dml_min(ActiveFCLKChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
@@ -4409,41 +4480,41 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
(DRRDisplay[i] || DRRDisplay[j]))) {
- st_vars->SynchronizedSurfaces[i][j] = true;
+ SynchronizedSurfaces[i][j] = true;
} else {
- st_vars->SynchronizedSurfaces[i][j] = false;
+ SynchronizedSurfaces[i][j] = false;
}
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (!st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] < st_vars->MinActiveFCLKChangeMargin)) {
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
- st_vars->MinActiveFCLKChangeMargin = st_vars->ActiveFCLKChangeLatencyMargin[k];
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = k;
+ (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
+ ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
+ FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
+ MinActiveFCLKChangeMargin = ActiveFCLKChangeLatencyMargin[k];
+ SurfaceWithMinActiveFCLKChangeMargin = k;
}
}
- *MinActiveFCLKChangeLatencySupported = st_vars->MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
+ *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
- st_vars->SameTimingForFCLKChange = true;
+ SameTimingForFCLKChange = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (!st_vars->SynchronizedSurfaces[k][st_vars->SurfaceWithMinActiveFCLKChangeMargin]) {
+ if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->SameTimingForFCLKChange ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] <
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = st_vars->ActiveFCLKChangeLatencyMargin[k];
+ (SameTimingForFCLKChange ||
+ ActiveFCLKChangeLatencyMargin[k] <
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = ActiveFCLKChangeLatencyMargin[k];
}
- st_vars->SameTimingForFCLKChange = false;
+ SameTimingForFCLKChange = false;
}
}
- if (st_vars->MinActiveFCLKChangeMargin > 0) {
+ if (MinActiveFCLKChangeMargin > 0) {
*FCLKChangeSupport = dm_fclock_change_vactive;
- } else if ((st_vars->SameTimingForFCLKChange || st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
+ } else if ((SameTimingForFCLKChange || SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
(PrefetchMode <= 1)) {
*FCLKChangeSupport = dm_fclock_change_vblank;
} else {
@@ -4453,7 +4524,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*USRRetrainingSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->USRRetrainingLatencyMargin[k] < 0)) {
+ (USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
@@ -4464,42 +4535,42 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
- st_vars->DRAMClockChangeSupportNumber = 2;
- } else if (st_vars->DRAMClockChangeSupportNumber == 0) {
- st_vars->DRAMClockChangeSupportNumber = 1;
- st_vars->LastSurfaceWithoutMargin = k;
- } else if (st_vars->DRAMClockChangeSupportNumber == 1 &&
- !st_vars->SynchronizedSurfaces[st_vars->LastSurfaceWithoutMargin][k]) {
- st_vars->DRAMClockChangeSupportNumber = 2;
+ DRAMClockChangeSupportNumber = 2;
+ } else if (DRAMClockChangeSupportNumber == 0) {
+ DRAMClockChangeSupportNumber = 1;
+ LastSurfaceWithoutMargin = k;
+ } else if (DRAMClockChangeSupportNumber == 1 &&
+ !SynchronizedSurfaces[LastSurfaceWithoutMargin][k]) {
+ DRAMClockChangeSupportNumber = 2;
}
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
- st_vars->DRAMClockChangeMethod = 1;
+ DRAMClockChangeMethod = 1;
else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
- st_vars->DRAMClockChangeMethod = 2;
+ DRAMClockChangeMethod = 2;
}
- if (st_vars->DRAMClockChangeMethod == 0) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeMethod == 0) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
- } else if (st_vars->DRAMClockChangeMethod == 1) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ } else if (DRAMClockChangeMethod == 1) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_full_frame;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_full_frame;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
} else {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_sub_vp;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_sub_vp;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
@@ -4513,7 +4584,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
- src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + st_vars->LBLatencyHidingSourceLinesY[k];
+ src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
@@ -4521,7 +4592,7 @@ dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DET
dml_print("DML::%s: k=%d, BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%d, SwathWidthY = %d\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%d, SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
-dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, st_vars->LBLatencyHidingSourceLinesY[k]);
+dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBLatencyHidingSourceLinesY[k]);
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
@@ -4532,7 +4603,7 @@ dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l
if (BytePerPixelDETC[k] > 0) {
src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
- src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + st_vars->LBLatencyHidingSourceLinesC[k];
+ src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 37a314ce284b..626f6605e2d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -30,7 +30,6 @@
#include "os_types.h"
#include "../dc_features.h"
#include "../display_mode_structs.h"
-#include "dml/display_mode_vba.h"
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
@@ -82,7 +81,6 @@ void dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(
double *DPPCLKUsingSingleDPP);
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -228,6 +226,7 @@ void dml32_CalculateODMMode(
double DISPCLKDPPCLKDSCCLKDownSpreading,
double DISPCLKRampingMargin,
double DISPCLKDPPCLKVCOSpeed,
+ unsigned int NumberOfDSCSlices,
/* Output */
bool *TotalAvailablePipesSupport,
@@ -362,7 +361,6 @@ void dml32_CalculateSurfaceSizeInMall(
bool *ExceededMALLSize);
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -715,7 +713,6 @@ double dml32_CalculateExtraLatency(
unsigned int HostVMMaxNonCachedPageTableLevels);
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
@@ -811,7 +808,6 @@ void dml32_CalculateFlipSchedule(
bool *ImmediateFlipSupportedForPipe);
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
bool USRRetrainingRequiredFinal,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int PrefetchMode,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 269bdfc4bc40..a1276f6b9581 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -48,9 +48,9 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
bool dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
- double stored_swath_l_bytes;
- double stored_swath_c_bytes;
- bool is_phantom_pipe;
+ double stored_swath_l_bytes;
+ double stored_swath_c_bytes;
+ bool is_phantom_pipe;
uint32_t pixel_chunk_bytes = 0;
uint32_t min_pixel_chunk_bytes = 0;
uint32_t meta_chunk_bytes = 0;
@@ -65,9 +65,9 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
uint32_t p1_dpte_group_bytes = 0;
uint32_t p1_mpte_group_bytes = 0;
- unsigned int detile_buf_size_in_bytes;
- unsigned int detile_buf_plane1_addr;
- unsigned int pte_row_height_linear;
+ unsigned int detile_buf_size_in_bytes;
+ unsigned int detile_buf_plane1_addr;
+ unsigned int pte_row_height_linear;
memset(rq_regs, 0, sizeof(*rq_regs));
@@ -217,52 +217,51 @@ void dml32_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
double refcyc_per_req_delivery_cur0 = 0.;
double refcyc_per_req_delivery_pre_c = 0.;
double refcyc_per_req_delivery_c = 0.;
- double refcyc_per_req_delivery_pre_l;
- double refcyc_per_req_delivery_l;
+ double refcyc_per_req_delivery_pre_l;
+ double refcyc_per_req_delivery_l;
double refcyc_per_line_delivery_pre_c = 0.;
double refcyc_per_line_delivery_c = 0.;
- double refcyc_per_line_delivery_pre_l;
- double refcyc_per_line_delivery_l;
- double min_ttu_vblank;
- double vratio_pre_l;
- double vratio_pre_c;
- unsigned int min_dst_y_next_start;
+ double refcyc_per_line_delivery_pre_l;
+ double refcyc_per_line_delivery_l;
+ double min_ttu_vblank;
+ double vratio_pre_l;
+ double vratio_pre_c;
+ unsigned int min_dst_y_next_start;
unsigned int htotal = dst->htotal;
unsigned int hblank_end = dst->hblank_end;
unsigned int vblank_end = dst->vblank_end;
bool interlaced = dst->interlaced;
double pclk_freq_in_mhz = dst->pixel_rate_mhz;
- unsigned int vready_after_vcount0;
+ unsigned int vready_after_vcount0;
double refclk_freq_in_mhz = clks->refclk_mhz;
double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
bool dual_plane = 0;
unsigned int pipe_index_in_combine[DC__NUM_PIPES__MAX];
- int unsigned dst_x_after_scaler;
- int unsigned dst_y_after_scaler;
- double dst_y_prefetch;
- double dst_y_per_vm_vblank;
- double dst_y_per_row_vblank;
- double dst_y_per_vm_flip;
- double dst_y_per_row_flip;
- double max_dst_y_per_vm_vblank = 32.0;
- double max_dst_y_per_row_vblank = 16.0;
-
- double dst_y_per_pte_row_nom_l;
- double dst_y_per_pte_row_nom_c;
- double dst_y_per_meta_row_nom_l;
- double dst_y_per_meta_row_nom_c;
- double refcyc_per_pte_group_nom_l;
- double refcyc_per_pte_group_nom_c;
- double refcyc_per_pte_group_vblank_l;
- double refcyc_per_pte_group_vblank_c;
- double refcyc_per_pte_group_flip_l;
- double refcyc_per_pte_group_flip_c;
- double refcyc_per_meta_chunk_nom_l;
- double refcyc_per_meta_chunk_nom_c;
- double refcyc_per_meta_chunk_vblank_l;
- double refcyc_per_meta_chunk_vblank_c;
- double refcyc_per_meta_chunk_flip_l;
- double refcyc_per_meta_chunk_flip_c;
+ unsigned int dst_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ double dst_y_prefetch;
+ double dst_y_per_vm_vblank;
+ double dst_y_per_row_vblank;
+ double dst_y_per_vm_flip;
+ double dst_y_per_row_flip;
+ double max_dst_y_per_vm_vblank = 32.0;
+ double max_dst_y_per_row_vblank = 16.0;
+ double dst_y_per_pte_row_nom_l;
+ double dst_y_per_pte_row_nom_c;
+ double dst_y_per_meta_row_nom_l;
+ double dst_y_per_meta_row_nom_c;
+ double refcyc_per_pte_group_nom_l;
+ double refcyc_per_pte_group_nom_c;
+ double refcyc_per_pte_group_vblank_l;
+ double refcyc_per_pte_group_vblank_c;
+ double refcyc_per_pte_group_flip_l;
+ double refcyc_per_pte_group_flip_c;
+ double refcyc_per_meta_chunk_nom_l;
+ double refcyc_per_meta_chunk_nom_c;
+ double refcyc_per_meta_chunk_vblank_l;
+ double refcyc_per_meta_chunk_vblank_c;
+ double refcyc_per_meta_chunk_flip_l;
+ double refcyc_per_meta_chunk_flip_c;
memset(dlg_regs, 0, sizeof(*dlg_regs));
memset(ttu_regs, 0, sizeof(*ttu_regs));
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 84b4b00f29cb..7ebf25e87933 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -498,6 +498,13 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_21_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
@@ -511,13 +518,16 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_21_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+ dcn3_21_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_21_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_21_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_21_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_21_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index e8b094006d95..c596187a1e09 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -26,6 +26,16 @@
#include "dc_features.h"
#include "display_mode_enums.h"
+/**
+ * DOC: overview
+ *
+ * Most of the DML code is automatically generated and tested via hardware
+ * description language. Usually, we use the reference _vcs_dpi in the code
+ * where VCS means "Verilog Compiled Simulator" and DPI stands for "Direct
+ * Programmer Interface". In other words, those structs can be used to
+ * interface with Verilog with other languages such as C.
+ */
+
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
@@ -159,13 +169,20 @@ struct _vcs_dpi_voltage_scaling_st {
double dtbclk_mhz;
};
+/**
+ * _vcs_dpi_soc_bounding_box_st: SOC definitions
+ *
+ * This struct maintains the SOC Bounding Box information for the ASIC; it
+ * defines things such as clock, voltage, performance, etc. Usually, we load
+ * these values from VBIOS; if something goes wrong, we use some hard-coded
+ * values, which will enable the ASIC to light up with limitations.
+ */
struct _vcs_dpi_soc_bounding_box_st {
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- /*
- * This is a temporary stash for updating @clock_limits with the PMFW
- * clock table. Do not use outside of *update_bw_boudning_box functions.
+ /**
+ * @num_states: It represents the total of Display Power Management
+ * (DPM) supported by the specific ASIC.
*/
- struct _vcs_dpi_voltage_scaling_st _clock_tmp[DC__VOLTAGE_STATES];
unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
@@ -231,6 +248,14 @@ struct _vcs_dpi_soc_bounding_box_st {
enum self_refresh_affinity allow_dram_self_refresh_or_dram_clock_change_in_vblank;
};
+/**
+ * @_vcs_dpi_ip_params_st: IP configuraion for DCN blocks
+ *
+ * In this struct you can find the DCN configuration associated to the specific
+ * ASIC. For example, here we can save how many DPPs the ASIC is using and it
+ * is available.
+ *
+ */
struct _vcs_dpi_ip_params_st {
bool use_min_dcfclk;
bool clamp_min_dcfclk;
@@ -283,6 +308,9 @@ struct _vcs_dpi_ip_params_st {
unsigned int writeback_line_buffer_chroma_buffer_size;
unsigned int max_page_table_levels;
+ /**
+ * @max_num_dpp: Maximum number of DPP supported in the target ASIC.
+ */
unsigned int max_num_dpp;
unsigned int max_num_otg;
unsigned int cursor_chunk_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 8460aefe7b6d..da8acf59ccac 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -182,108 +182,6 @@ void Calculate256BBlockSizes(
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC);
-struct dml32_CalculateSwathAndDETConfiguration {
- unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
- unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpSwathSizeBytesY;
- unsigned int RoundedUpSwathSizeBytesC;
- double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
- double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
- unsigned int TotalActiveDPP;
- bool NoChromaSurfaces;
- unsigned int DETBufferSizeInKByteForSwathCalculation;
-};
-
-struct dml32_CalculateVMRowAndSwath {
- unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
- unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
- unsigned int PDEAndMetaPTEBytesFrameY;
- unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
- unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
- bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport {
- unsigned int SurfaceWithMinActiveFCLKChangeMargin;
- unsigned int DRAMClockChangeSupportNumber;
- unsigned int LastSurfaceWithoutMargin;
- unsigned int DRAMClockChangeMethod;
- bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin;
- double MinActiveFCLKChangeMargin;
- double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank;
- double ActiveClockChangeLatencyHidingY;
- double ActiveClockChangeLatencyHidingC;
- double ActiveClockChangeLatencyHiding;
- double EffectiveDETBufferSizeY;
- double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
- double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
- double TotalPixelBW;
- bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
- double EffectiveLBLatencyHidingY;
- double EffectiveLBLatencyHidingC;
- double LinesInDETY[DC__NUM_DPP__MAX];
- double LinesInDETC[DC__NUM_DPP__MAX];
- unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
- unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
- double FullDETBufferingTimeY;
- double FullDETBufferingTimeC;
- double WritebackDRAMClockChangeLatencyMargin;
- double WritebackFCLKChangeLatencyMargin;
- double WritebackLatencyHiding;
- bool SameTimingForFCLKChange;
- unsigned int TotalActiveWriteback;
- unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
- unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculatePrefetchSchedule {
- unsigned int DPPCycles, DISPCLKCycles;
- double DSTTotalPixelsAfterScaler;
- double LineTime;
- double dst_y_prefetch_equ;
- double prefetch_bw_oto;
- double Tvm_oto;
- double Tr0_oto;
- double Tvm_oto_lines;
- double Tr0_oto_lines;
- double dst_y_prefetch_oto;
- double TimeForFetchingMetaPTE;
- double TimeForFetchingRowInVBlank;
- double LinesToRequestPrefetchPixelData;
- unsigned int HostVMDynamicLevelsTrips;
- double trip_to_mem;
- double Tvm_trips;
- double Tr0_trips;
- double Tvm_trips_rounded;
- double Tr0_trips_rounded;
- double Lsw_oto;
- double Tpre_rounded;
- double prefetch_bw_equ;
- double Tvm_equ;
- double Tr0_equ;
- double Tdmbf;
- double Tdmec;
- double Tdmsks;
- double prefetch_sw_bytes;
- double bytes_pp;
- double dep_bytes;
- unsigned int max_vratio_pre;
- double min_Lsw;
- double Tsw_est1;
- double Tsw_est3;
-};
-
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation {
unsigned int dummy_integer_array[2][DC__NUM_DPP__MAX];
double dummy_single_array[2][DC__NUM_DPP__MAX];
@@ -355,10 +253,6 @@ struct dummy_vars {
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation;
struct dml32_ModeSupportAndSystemConfigurationFull dml32_ModeSupportAndSystemConfigurationFull;
- struct dml32_CalculateSwathAndDETConfiguration dml32_CalculateSwathAndDETConfiguration;
- struct dml32_CalculateVMRowAndSwath dml32_CalculateVMRowAndSwath;
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport;
- struct dml32_CalculatePrefetchSchedule dml32_CalculatePrefetchSchedule;
};
struct vba_vars_st {
@@ -418,6 +312,7 @@ struct vba_vars_st {
unsigned int ActiveDPPs;
unsigned int LBLatencyHidingSourceLinesY;
unsigned int LBLatencyHidingSourceLinesC;
+ double ActiveDRAMClockChangeLatencyMarginPerState[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];// DML doesn't save active margin per state
double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX];
double CachedActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX]; // Cache in dml_get_voltage_level for debug purposes only
double MinActiveDRAMClockChangeMargin;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
deleted file mode 100644
index b4b51e51fc25..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
+++ /dev/null
@@ -1,1884 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "resource.h"
-#include "core_types.h"
-#include "dsc.h"
-#include "clk_mgr.h"
-
-#ifndef DC_LOGGER_INIT
-#define DC_LOGGER_INIT
-#undef DC_LOG_WARNING
-#define DC_LOG_WARNING
-#endif
-
-#define DML_WRAPPER_TRANSLATION_
-#include "dml_wrapper_translation.c"
-#undef DML_WRAPPER_TRANSLATION_
-
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
-static void build_clamping_params(struct dc_stream_state *stream)
-{
- stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
- stream->clamping.c_depth = stream->timing.display_color_depth;
- stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
-}
-
-static void get_pixel_clock_parameters(
- const struct pipe_ctx *pipe_ctx,
- struct pixel_clk_params *pixel_clk_params)
-{
- const struct dc_stream_state *stream = pipe_ctx->stream;
-
- /*TODO: is this halved for YCbCr 420? in that case we might want to move
- * the pixel clock normalization for hdmi up to here instead of doing it
- * in pll_adjust_pix_clk
- */
- pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
- pixel_clk_params->signal_type = pipe_ctx->stream->signal;
- pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
- /* TODO: un-hardcode*/
- pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
- LINK_RATE_REF_FREQ_IN_KHZ;
- pixel_clk_params->flags.ENABLE_SS = 0;
- pixel_clk_params->color_depth =
- stream->timing.display_color_depth;
- pixel_clk_params->flags.DISPLAY_BLANKED = 1;
- pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
- PIXEL_ENCODING_YCBCR420);
- pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- pixel_clk_params->color_depth = COLOR_DEPTH_888;
- }
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2;
- }
- if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
- pixel_clk_params->requested_pix_clk_100hz *= 2;
-
-}
-
-static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
- struct bit_depth_reduction_params *fmt_bit_depth)
-{
- enum dc_dither_option option = stream->dither_option;
- enum dc_pixel_encoding pixel_encoding =
- stream->timing.pixel_encoding;
-
- memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
-
- if (option == DITHER_OPTION_DEFAULT) {
- switch (stream->timing.display_color_depth) {
- case COLOR_DEPTH_666:
- option = DITHER_OPTION_SPATIAL6;
- break;
- case COLOR_DEPTH_888:
- option = DITHER_OPTION_SPATIAL8;
- break;
- case COLOR_DEPTH_101010:
- option = DITHER_OPTION_SPATIAL10;
- break;
- default:
- option = DITHER_OPTION_DISABLE;
- }
- }
-
- if (option == DITHER_OPTION_DISABLE)
- return;
-
- if (option == DITHER_OPTION_TRUN6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
- } else if (option == DITHER_OPTION_TRUN8 ||
- option == DITHER_OPTION_TRUN8_SPATIAL6 ||
- option == DITHER_OPTION_TRUN8_FM6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
- } else if (option == DITHER_OPTION_TRUN10 ||
- option == DITHER_OPTION_TRUN10_SPATIAL6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8 ||
- option == DITHER_OPTION_TRUN10_FM8 ||
- option == DITHER_OPTION_TRUN10_FM6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
- }
-
- /* special case - Formatter can only reduce by 4 bits at most.
- * When reducing from 12 to 6 bits,
- * HW recommends we use trunc with round mode
- * (if we did nothing, trunc to 10 bits would be used)
- * note that any 12->10 bit reduction is ignored prior to DCE8,
- * as the input was 10 bits.
- */
- if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL6 ||
- option == DITHER_OPTION_FM6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
- fmt_bit_depth->flags.TRUNCATE_MODE = 1;
- }
-
- /* spatial dither
- * note that spatial modes 1-3 are never used
- */
- if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL6 ||
- option == DITHER_OPTION_TRUN8_SPATIAL6) {
- fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
- fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
- fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
- fmt_bit_depth->flags.RGB_RANDOM =
- (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
- } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL8 ||
- option == DITHER_OPTION_SPATIAL8_FM6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
- fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
- fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
- fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
- fmt_bit_depth->flags.RGB_RANDOM =
- (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
- } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL10 ||
- option == DITHER_OPTION_SPATIAL10_FM8 ||
- option == DITHER_OPTION_SPATIAL10_FM6) {
- fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
- fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
- fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
- fmt_bit_depth->flags.RGB_RANDOM =
- (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
- }
-
- if (option == DITHER_OPTION_SPATIAL6 ||
- option == DITHER_OPTION_SPATIAL8 ||
- option == DITHER_OPTION_SPATIAL10) {
- fmt_bit_depth->flags.FRAME_RANDOM = 0;
- } else {
- fmt_bit_depth->flags.FRAME_RANDOM = 1;
- }
-
- //////////////////////
- //// temporal dither
- //////////////////////
- if (option == DITHER_OPTION_FM6 ||
- option == DITHER_OPTION_SPATIAL8_FM6 ||
- option == DITHER_OPTION_SPATIAL10_FM6 ||
- option == DITHER_OPTION_TRUN10_FM6 ||
- option == DITHER_OPTION_TRUN8_FM6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
- fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
- fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
- } else if (option == DITHER_OPTION_FM8 ||
- option == DITHER_OPTION_SPATIAL10_FM8 ||
- option == DITHER_OPTION_TRUN10_FM8) {
- fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
- fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
- } else if (option == DITHER_OPTION_FM10) {
- fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
- fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
- }
-
- fmt_bit_depth->pixel_encoding = pixel_encoding;
-}
-
-/* Move this after the above function as VS complains about
- * declaration issues for resource_build_bit_depth_reduction_params.
- */
-
-static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
-{
-
- get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
-
- if (pipe_ctx->clock_source)
- pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
- pipe_ctx->clock_source,
- &pipe_ctx->stream_res.pix_clk_params,
- &pipe_ctx->pll_settings);
-
- pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(pipe_ctx->stream,
- &pipe_ctx->stream->bit_depth_params);
- build_clamping_params(pipe_ctx->stream);
-
- return DC_OK;
-}
-
-bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
-{
- int i;
-
- /* Validate DSC config, dsc count validation is already done */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dsc_config dsc_cfg;
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- /* Only need to validate top pipe */
- if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
- continue;
-
- dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
- + stream->timing.h_border_right) / opp_cnt;
- dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
- + stream->timing.v_border_bottom;
- dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
- dsc_cfg.color_depth = stream->timing.display_color_depth;
- dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
- dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
- dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
-
- if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
- return false;
- }
- return true;
-}
-
-enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
-{
- enum dc_status status = DC_OK;
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
-
- if (!pipe_ctx)
- return DC_ERROR_UNEXPECTED;
-
-
- status = build_pipe_hw_param(pipe_ctx);
-
- return status;
-}
-
-void dml_acquire_dsc(const struct dc *dc,
- struct resource_context *res_ctx,
- struct display_stream_compressor **dsc,
- int pipe_idx)
-{
- int i;
- const struct resource_pool *pool = dc->res_pool;
- struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
-
- ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
- *dsc = NULL;
-
- /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
- if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
- *dsc = pool->dscs[pipe_idx];
- res_ctx->is_dsc_acquired[pipe_idx] = true;
- return;
- }
-
- /* Return old DSC to avoid the need for redo it */
- if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
- *dsc = dsc_old;
- res_ctx->is_dsc_acquired[dsc_old->inst] = true;
- return ;
- }
-
- /* Find first free DSC */
- for (i = 0; i < pool->res_cap->num_dsc; i++)
- if (!res_ctx->is_dsc_acquired[i]) {
- *dsc = pool->dscs[i];
- res_ctx->is_dsc_acquired[i] = true;
- break;
- }
-}
-
-static bool dml_split_stream_for_mpc_or_odm(
- const struct dc *dc,
- struct resource_context *res_ctx,
- struct pipe_ctx *pri_pipe,
- struct pipe_ctx *sec_pipe,
- bool odm)
-{
- int pipe_idx = sec_pipe->pipe_idx;
- const struct resource_pool *pool = dc->res_pool;
-
- *sec_pipe = *pri_pipe;
-
- sec_pipe->pipe_idx = pipe_idx;
- sec_pipe->plane_res.mi = pool->mis[pipe_idx];
- sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
- sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
- sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
- sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
- sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
- sec_pipe->stream_res.dsc = NULL;
- if (odm) {
- if (pri_pipe->next_odm_pipe) {
- ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
- sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
- sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
- }
- if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
- pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
- sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
- }
- if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
- pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
- sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
- }
- pri_pipe->next_odm_pipe = sec_pipe;
- sec_pipe->prev_odm_pipe = pri_pipe;
- ASSERT(sec_pipe->top_pipe == NULL);
-
- if (!sec_pipe->top_pipe)
- sec_pipe->stream_res.opp = pool->opps[pipe_idx];
- else
- sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
- if (sec_pipe->stream->timing.flags.DSC == 1) {
- dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
- ASSERT(sec_pipe->stream_res.dsc);
- if (sec_pipe->stream_res.dsc == NULL)
- return false;
- }
- } else {
- if (pri_pipe->bottom_pipe) {
- ASSERT(pri_pipe->bottom_pipe != sec_pipe);
- sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
- sec_pipe->bottom_pipe->top_pipe = sec_pipe;
- }
- pri_pipe->bottom_pipe = sec_pipe;
- sec_pipe->top_pipe = pri_pipe;
-
- ASSERT(pri_pipe->plane_state);
- }
-
- return true;
-}
-
-static struct pipe_ctx *dml_find_split_pipe(
- struct dc *dc,
- struct dc_state *context,
- int old_index)
-{
- struct pipe_ctx *pipe = NULL;
- int i;
-
- if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
- pipe = &context->res_ctx.pipe_ctx[old_index];
- pipe->pipe_idx = old_index;
- }
-
- if (!pipe)
- for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
- if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
- && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
- if (context->res_ctx.pipe_ctx[i].stream == NULL) {
- pipe = &context->res_ctx.pipe_ctx[i];
- pipe->pipe_idx = i;
- break;
- }
- }
- }
-
- /*
- * May need to fix pipes getting tossed from 1 opp to another on flip
- * Add for debugging transient underflow during topology updates:
- * ASSERT(pipe);
- */
- if (!pipe)
- for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
- if (context->res_ctx.pipe_ctx[i].stream == NULL) {
- pipe = &context->res_ctx.pipe_ctx[i];
- pipe->pipe_idx = i;
- break;
- }
- }
-
- return pipe;
-}
-
-static void dml_release_dsc(struct resource_context *res_ctx,
- const struct resource_pool *pool,
- struct display_stream_compressor **dsc)
-{
- int i;
-
- for (i = 0; i < pool->res_cap->num_dsc; i++)
- if (pool->dscs[i] == *dsc) {
- res_ctx->is_dsc_acquired[i] = false;
- *dsc = NULL;
- break;
- }
-}
-
-static int dml_get_num_mpc_splits(struct pipe_ctx *pipe)
-{
- int mpc_split_count = 0;
- struct pipe_ctx *other_pipe = pipe->bottom_pipe;
-
- while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
- mpc_split_count++;
- other_pipe = other_pipe->bottom_pipe;
- }
- other_pipe = pipe->top_pipe;
- while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
- mpc_split_count++;
- other_pipe = other_pipe->top_pipe;
- }
-
- return mpc_split_count;
-}
-
-static bool dml_enough_pipes_for_subvp(struct dc *dc,
- struct dc_state *context)
-{
- int i = 0;
- int num_pipes = 0;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream && pipe->plane_state)
- num_pipes++;
- }
-
- // Sub-VP only possible if the number of "real" pipes is
- // less than or equal to half the number of available pipes
- if (num_pipes * 2 > dc->res_pool->pipe_count)
- return false;
-
- return true;
-}
-
-static int dml_validate_apply_pipe_split_flags(
- struct dc *dc,
- struct dc_state *context,
- int vlevel,
- int *split,
- bool *merge)
-{
- int i, pipe_idx, vlevel_split;
- int plane_count = 0;
- bool force_split = false;
- bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
- struct vba_vars_st *v = &context->bw_ctx.dml.vba;
- int max_mpc_comb = v->maxMpcComb;
-
- if (context->stream_count > 1) {
- if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
- avoid_split = true;
- } else if (dc->debug.force_single_disp_pipe_split)
- force_split = true;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- /**
- * Workaround for avoiding pipe-split in cases where we'd split
- * planes that are too small, resulting in splits that aren't
- * valid for the scaler.
- */
- if (pipe->plane_state &&
- (pipe->plane_state->dst_rect.width <= 16 ||
- pipe->plane_state->dst_rect.height <= 16 ||
- pipe->plane_state->src_rect.width <= 16 ||
- pipe->plane_state->src_rect.height <= 16))
- avoid_split = true;
-
- /* TODO: fix dc bugs and remove this split threshold thing */
- if (pipe->stream && !pipe->prev_odm_pipe &&
- (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
- ++plane_count;
- }
- if (plane_count > dc->res_pool->pipe_count / 2)
- avoid_split = true;
-
- /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct dc_crtc_timing timing;
-
- if (!pipe->stream)
- continue;
- else {
- timing = pipe->stream->timing;
- if (timing.h_border_left + timing.h_border_right
- + timing.v_border_top + timing.v_border_bottom > 0) {
- avoid_split = true;
- break;
- }
- }
- }
-
- /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
- if (avoid_split) {
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
- if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
- v->ModeSupport[vlevel][0])
- break;
- /* Impossible to not split this pipe */
- if (vlevel > context->bw_ctx.dml.soc.num_states)
- vlevel = vlevel_split;
- else
- max_mpc_comb = 0;
- pipe_idx++;
- }
- v->maxMpcComb = max_mpc_comb;
- }
-
- /* Split loop sets which pipe should be split based on dml outputs and dc flags */
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- int pipe_plane = v->pipe_plane[pipe_idx];
- bool split4mpc = context->stream_count == 1 && plane_count == 1
- && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
-
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
- split[i] = 4;
- else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
- split[i] = 2;
-
- if ((pipe->stream->view_format ==
- VIEW_3D_FORMAT_SIDE_BY_SIDE ||
- pipe->stream->view_format ==
- VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
- (pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
- pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_SIDE_BY_SIDE))
- split[i] = 2;
- if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- split[i] = 2;
- v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
- }
- if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
- split[i] = 4;
- v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
- }
- /*420 format workaround*/
- if (pipe->stream->timing.h_addressable > 7680 &&
- pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- split[i] = 4;
- }
-
- v->ODMCombineEnabled[pipe_plane] =
- v->ODMCombineEnablePerState[vlevel][pipe_plane];
-
- if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
- if (dml_get_num_mpc_splits(pipe) == 1) {
- /*If need split for mpc but 2 way split already*/
- if (split[i] == 4)
- split[i] = 2; /* 2 -> 4 MPC */
- else if (split[i] == 2)
- split[i] = 0; /* 2 -> 2 MPC */
- else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
- merge[i] = true; /* 2 -> 1 MPC */
- } else if (dml_get_num_mpc_splits(pipe) == 3) {
- /*If need split for mpc but 4 way split already*/
- if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
- || !pipe->bottom_pipe)) {
- merge[i] = true; /* 4 -> 2 MPC */
- } else if (split[i] == 0 && pipe->top_pipe &&
- pipe->top_pipe->plane_state == pipe->plane_state)
- merge[i] = true; /* 4 -> 1 MPC */
- split[i] = 0;
- } else if (dml_get_num_mpc_splits(pipe)) {
- /* ODM -> MPC transition */
- if (pipe->prev_odm_pipe) {
- split[i] = 0;
- merge[i] = true;
- }
- }
- } else {
- if (dml_get_num_mpc_splits(pipe) == 1) {
- /*If need split for odm but 2 way split already*/
- if (split[i] == 4)
- split[i] = 2; /* 2 -> 4 ODM */
- else if (split[i] == 2)
- split[i] = 0; /* 2 -> 2 ODM */
- else if (pipe->prev_odm_pipe) {
- ASSERT(0); /* NOT expected yet */
- merge[i] = true; /* exit ODM */
- }
- } else if (dml_get_num_mpc_splits(pipe) == 3) {
- /*If need split for odm but 4 way split already*/
- if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
- || !pipe->next_odm_pipe)) {
- ASSERT(0); /* NOT expected yet */
- merge[i] = true; /* 4 -> 2 ODM */
- } else if (split[i] == 0 && pipe->prev_odm_pipe) {
- ASSERT(0); /* NOT expected yet */
- merge[i] = true; /* exit ODM */
- }
- split[i] = 0;
- } else if (dml_get_num_mpc_splits(pipe)) {
- /* MPC -> ODM transition */
- ASSERT(0); /* NOT expected yet */
- if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
- split[i] = 0;
- merge[i] = true;
- }
- }
- }
-
- /* Adjust dppclk when split is forced, do not bother with dispclk */
- if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
- v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
- pipe_idx++;
- }
-
- return vlevel;
-}
-
-static void dml_set_phantom_stream_timing(struct dc *dc,
- struct dc_state *context,
- struct pipe_ctx *ref_pipe,
- struct dc_stream_state *phantom_stream)
-{
- // phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width
- uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 +
- dc->caps.subvp_fw_processing_delay_us +
- dc->caps.subvp_pstate_allow_width_us;
- uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) *
- (ref_pipe->stream->timing.pix_clk_100hz * 100) /
- (double)ref_pipe->stream->timing.h_total;
- uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start;
-
- phantom_stream->dst.y = 0;
- phantom_stream->dst.height = phantom_vactive;
- phantom_stream->src.y = 0;
- phantom_stream->src.height = phantom_vactive;
-
- phantom_stream->timing.v_addressable = phantom_vactive;
- phantom_stream->timing.v_front_porch = 1;
- phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
- phantom_stream->timing.v_front_porch +
- phantom_stream->timing.v_sync_width +
- phantom_bp;
-}
-
-static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc,
- struct dc_state *context,
- struct pipe_ctx *ref_pipe)
-{
- struct dc_stream_state *phantom_stream = NULL;
-
- phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
- phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
- phantom_stream->dpms_off = true;
- phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
- ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
- ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
-
- /* stream has limited viewport and small timing */
- memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
- memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src));
- memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
- dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream);
-
- dc_add_stream_to_ctx(dc, context, phantom_stream);
- dc->hwss.apply_ctx_to_hw(dc, context);
- return phantom_stream;
-}
-
-static void dml_enable_phantom_plane(struct dc *dc,
- struct dc_state *context,
- struct dc_stream_state *phantom_stream,
- struct pipe_ctx *main_pipe)
-{
- struct dc_plane_state *phantom_plane = NULL;
- struct dc_plane_state *prev_phantom_plane = NULL;
- struct pipe_ctx *curr_pipe = main_pipe;
-
- while (curr_pipe) {
- if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
- phantom_plane = prev_phantom_plane;
- else
- phantom_plane = dc_create_plane_state(dc);
-
- memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
- memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
- sizeof(phantom_plane->scaling_quality));
- memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect));
- memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect));
- memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect));
- memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size,
- sizeof(phantom_plane->plane_size));
- memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info,
- sizeof(phantom_plane->tiling_info));
- memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc));
- /* Currently compat_level is undefined in dc_state
- * phantom_plane->compat_level = curr_pipe->plane_state->compat_level;
- */
- phantom_plane->format = curr_pipe->plane_state->format;
- phantom_plane->rotation = curr_pipe->plane_state->rotation;
- phantom_plane->visible = curr_pipe->plane_state->visible;
-
- /* Shadow pipe has small viewport. */
- phantom_plane->clip_rect.y = 0;
- phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
-
- dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
-
- curr_pipe = curr_pipe->bottom_pipe;
- prev_phantom_plane = phantom_plane;
- }
-}
-
-static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
- int i = 0;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct dc_stream_state *ref_stream = pipe->stream;
- // Only construct phantom stream for top pipes that have plane enabled
- if (!pipe->top_pipe && pipe->plane_state && pipe->stream &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
- struct dc_stream_state *phantom_stream = NULL;
-
- phantom_stream = dml_enable_phantom_stream(dc, context, pipe);
- dml_enable_phantom_plane(dc, context, phantom_stream, pipe);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->plane_state && pipe->stream &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- pipe->stream->use_dynamic_meta = false;
- pipe->plane_state->flip_immediate = false;
- if (!resource_build_scaling_params(pipe)) {
- // Log / remove phantom pipes since failed to build scaling params
- }
- }
- }
-}
-
-static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
- int i;
- bool removed_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- // build scaling params for phantom pipes
- if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- dc_rem_all_planes_for_stream(dc, pipe->stream, context);
- dc_remove_stream_from_ctx(dc, context, pipe->stream);
- removed_pipe = true;
- }
-
- // Clear all phantom stream info
- if (pipe->stream) {
- pipe->stream->mall_stream_config.type = SUBVP_NONE;
- pipe->stream->mall_stream_config.paired_stream = NULL;
- }
- }
- if (removed_pipe)
- dc->hwss.apply_ctx_to_hw(dc, context);
-}
-
-/*
- * If the input state contains no upstream planes for a particular pipe (i.e. only timing)
- * we need to populate some "conservative" plane information as DML cannot handle "no planes"
- */
-static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe)
-{
- pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled;
- pipe->src.source_scan = dm_horz;
- pipe->src.sw_mode = dm_sw_4kb_s;
- pipe->src.macro_tile_size = dm_64k_tile;
- pipe->src.viewport_width = timing->h_addressable;
- if (pipe->src.viewport_width > 1920)
- pipe->src.viewport_width = 1920;
- pipe->src.viewport_height = timing->v_addressable;
- if (pipe->src.viewport_height > 1080)
- pipe->src.viewport_height = 1080;
- pipe->src.surface_height_y = pipe->src.viewport_height;
- pipe->src.surface_width_y = pipe->src.viewport_width;
- pipe->src.surface_height_c = pipe->src.viewport_height;
- pipe->src.surface_width_c = pipe->src.viewport_width;
- pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256;
- pipe->src.source_format = dm_444_32;
- pipe->dest.recout_width = pipe->src.viewport_width;
- pipe->dest.recout_height = pipe->src.viewport_height;
- pipe->dest.full_recout_width = pipe->dest.recout_width;
- pipe->dest.full_recout_height = pipe->dest.recout_height;
- pipe->scale_ratio_depth.lb_depth = dm_lb_16;
- pipe->scale_ratio_depth.hscl_ratio = 1.0;
- pipe->scale_ratio_depth.vscl_ratio = 1.0;
- pipe->scale_ratio_depth.scl_enable = 0;
- pipe->scale_taps.htaps = 1;
- pipe->scale_taps.vtaps = 1;
- pipe->dest.vtotal_min = timing->v_total;
- pipe->dest.vtotal_max = timing->v_total;
-
- if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) {
- pipe->src.viewport_width /= 2;
- pipe->dest.recout_width /= 2;
- } else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) {
- pipe->src.viewport_width /= 4;
- pipe->dest.recout_width /= 4;
- }
-
- pipe->src.dcc = false;
- pipe->src.dcc_rate = 1;
-}
-
-/*
- * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its
- * hsplit group is equal to its own pipe ID
- * Otherwise, all pipes part of the same blending tree have the same hsplit group
- * ID as the top most pipe
- *
- * If the pipe ctx is ODM combined, then similar logic follows
- */
-static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
-{
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
-
- if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state
- == dc_pipe_ctx->plane_state) {
- struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe;
- int split_idx = 0;
-
- while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
- == dc_pipe_ctx->plane_state) {
- first_pipe = first_pipe->top_pipe;
- split_idx++;
- }
-
- /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */
- if (split_idx == 0)
- e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
- else if (split_idx == 1)
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
- else if (split_idx == 2)
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx;
-
- } else if (dc_pipe_ctx->prev_odm_pipe) {
- struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe;
-
- while (first_pipe->prev_odm_pipe)
- first_pipe = first_pipe->prev_odm_pipe;
- e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
- }
-}
-
-static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale)
-{
- const struct dc_plane_state *pln = dc_pipe_ctx->plane_state;
- const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data;
-
- e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate;
- e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln)
- || (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln)
- || e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
-
- /* stereo is not split */
- if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
- pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
- e2e_pipe->pipe.src.is_hsplit = false;
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
- }
-
- e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
- || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
- e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y;
- e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y;
- e2e_pipe->pipe.src.viewport_width = scl->viewport.width;
- e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width;
- e2e_pipe->pipe.src.viewport_height = scl->viewport.height;
- e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height;
- e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width;
- e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height;
- e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width;
- e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height;
- e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
- e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
-
- if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
- || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
- e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
- e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
- e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
- } else {
- e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
- e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
- }
- e2e_pipe->pipe.src.dcc = pln->dcc.enable;
- e2e_pipe->pipe.src.dcc_rate = 1;
- e2e_pipe->pipe.dest.recout_width = scl->recout.width;
- e2e_pipe->pipe.dest.recout_height = scl->recout.height;
- e2e_pipe->pipe.dest.full_recout_height = scl->recout.height;
- e2e_pipe->pipe.dest.full_recout_width = scl->recout.width;
- if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
- e2e_pipe->pipe.dest.full_recout_width *= 2;
- else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
- e2e_pipe->pipe.dest.full_recout_width *= 4;
- else {
- struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe;
-
- while (split_pipe && split_pipe->plane_state == pln) {
- e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
- split_pipe = split_pipe->bottom_pipe;
- }
- split_pipe = dc_pipe_ctx->top_pipe;
- while (split_pipe && split_pipe->plane_state == pln) {
- e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
- split_pipe = split_pipe->top_pipe;
- }
- }
-
- e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16;
- e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.scl_enable =
- scl->ratios.vert.value != dc_fixpt_one.value
- || scl->ratios.horz.value != dc_fixpt_one.value
- || scl->ratios.vert_c.value != dc_fixpt_one.value
- || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
- || always_scale; /*support always scale*/
- e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps;
- e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
- e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps;
- e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
-
- /* Currently compat_level is not defined. Commenting it until further resolution
- * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) {
- swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
- &e2e_pipe->pipe.src.sw_mode);
- e2e_pipe->pipe.src.macro_tile_size =
- swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
- } else {
- gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode,
- pln->compat_level,
- &e2e_pipe->pipe.src.sw_mode);
- e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile;
- }*/
-
- e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format);
-}
-
-static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
-{
- /*
- * For graphic plane, cursor number is 1, nv12 is 0
- * bw calculations due to cursor on/off
- */
- if (dc_pipe_ctx->plane_state &&
- (dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
- dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM))
- e2e_pipe->pipe.src.num_cursors = 0;
- else
- e2e_pipe->pipe.src.num_cursors = 1;
-
- e2e_pipe->pipe.src.cur0_src_width = 256;
- e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit;
-}
-
-static int populate_dml_pipes_from_context_base(
- struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- bool fast_validate)
-{
- int pipe_cnt, i;
- bool synchronized_vblank = true;
- struct resource_context *res_ctx = &context->res_ctx;
-
- for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
-
- if (pipe_cnt < 0) {
- pipe_cnt = i;
- continue;
- }
-
- if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
- continue;
-
- if (dc->debug.disable_timing_sync ||
- (!resource_are_streams_timing_synchronizable(
- res_ctx->pipe_ctx[pipe_cnt].stream,
- res_ctx->pipe_ctx[i].stream) &&
- !resource_are_vblanks_synchronizable(
- res_ctx->pipe_ctx[pipe_cnt].stream,
- res_ctx->pipe_ctx[i].stream))) {
- synchronized_vblank = false;
- break;
- }
- }
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
-
- struct audio_check aud_check = {0};
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
-
- /* todo:
- pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
- pipes[pipe_cnt].pipe.src.dcc = 0;
- pipes[pipe_cnt].pipe.src.vm = 0;*/
-
- pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
-
- pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
- /* todo: rotation?*/
- pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
- if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
- pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
- /* 1/2 vblank */
- pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
- (timing->v_total - timing->v_addressable
- - timing->v_border_top - timing->v_border_bottom) / 2;
- /* 36 bytes dp, 32 hdmi */
- pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
- dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
- }
- pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
-
- dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest);
- pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
- pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
-
- pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
-
- pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]);
-
- populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
-
- pipes[pipe_cnt].dout.dp_lanes = 4;
- pipes[pipe_cnt].dout.is_virtual = 0;
- pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal);
- if (pipes[pipe_cnt].dout.output_type < 0) {
- pipes[pipe_cnt].dout.output_type = dm_dp;
- pipes[pipe_cnt].dout.is_virtual = 1;
- }
-
- populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout);
-
- if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
- pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
-
- /* todo: default max for now, until there is logic reflecting this in dc*/
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- /*fill up the audio sample rate (unit in kHz)*/
- get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
- pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
-
- populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
-
- if (!res_ctx->pipe_ctx[i].plane_state) {
- populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe);
- } else {
- populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale);
- }
-
- pipe_cnt++;
- }
-
- /* populate writeback information */
- if (dc->res_pool)
- dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
-
- return pipe_cnt;
-}
-
-static int dml_populate_dml_pipes_from_context(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- bool fast_validate)
-{
- int i, pipe_cnt;
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe = NULL; // Fix potentially uninitialized error from VS
-
- populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate);
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- pipes[pipe_cnt].pipe.src.gpuvm = true;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
- pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
-
- pipes[pipe_cnt].dout.dsc_input_bpc = 0;
- if (pipes[pipe_cnt].dout.dsc_enable) {
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_888:
- pipes[pipe_cnt].dout.dsc_input_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- pipes[pipe_cnt].dout.dsc_input_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
- pipe_cnt++;
- }
- dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format)) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- pipes[0].pipe.src.unbounded_req_mode = true;
- }
- }
-
- return pipe_cnt;
-}
-
-static void dml_full_validate_bw_helper(struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *vlevel,
- int *split,
- bool *merge,
- int *pipe_cnt)
-{
- struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
- /*
- * DML favors voltage over p-state, but we're more interested in
- * supporting p-state over voltage. We can't support p-state in
- * prefetch mode > 0 so try capping the prefetch mode to start.
- */
- context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
- dm_allow_self_refresh_and_mclk_switch;
- *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
- /* This may adjust vlevel and maxMpcComb */
- if (*vlevel < context->bw_ctx.dml.soc.num_states)
- *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
-
- /* Conditions for setting up phantom pipes for SubVP:
- * 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
- * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
- * 4. Display configuration passes validation
- * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
- */
- if (!dc->debug.force_disable_subvp &&
- dml_enough_pipes_for_subvp(dc, context) &&
- *vlevel < context->bw_ctx.dml.soc.num_states &&
- (vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
- dc->debug.force_subvp_mclk_switch)) {
-
- dml_add_phantom_pipes(dc, context);
-
- /* Create input to DML based on new context which includes phantom pipes
- * TODO: Input to DML should mark which pipes are phantom
- */
- *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
- *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
- if (*vlevel < context->bw_ctx.dml.soc.num_states) {
- memset(split, 0, MAX_PIPES * sizeof(*split));
- memset(merge, 0, MAX_PIPES * sizeof(*merge));
- *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
- }
-
- // If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
- // remove phantom pipes and repopulate dml pipes
- if (*vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
- dml_remove_phantom_pipes(dc, context);
- *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
- }
- }
-}
-
-static void dcn20_adjust_adaptive_sync_v_startup(
- const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
-{
- struct dc_crtc_timing patched_crtc_timing;
- uint32_t asic_blank_end = 0;
- uint32_t asic_blank_start = 0;
- uint32_t newVstartup = 0;
-
- patched_crtc_timing = *dc_crtc_timing;
-
- if (patched_crtc_timing.flags.INTERLACE == 1) {
- if (patched_crtc_timing.v_front_porch < 2)
- patched_crtc_timing.v_front_porch = 2;
- } else {
- if (patched_crtc_timing.v_front_porch < 1)
- patched_crtc_timing.v_front_porch = 1;
- }
-
- /* blank_start = frame end - front porch */
- asic_blank_start = patched_crtc_timing.v_total -
- patched_crtc_timing.v_front_porch;
-
- /* blank_end = blank_start - active */
- asic_blank_end = asic_blank_start -
- patched_crtc_timing.v_border_bottom -
- patched_crtc_timing.v_addressable -
- patched_crtc_timing.v_border_top;
-
- newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
-
- *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
-}
-
-static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
-{
- return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->link_res.hpo_dp_link_enc &&
- dc_is_dp_signal(pipe_ctx->stream->signal));
-}
-
-static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
-{
- int i;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
- return true;
- }
- return false;
-}
-
-static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
-{
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
- context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
- }
-}
-
-static bool dml_internal_validate(
- struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *pipe_cnt_out,
- int *vlevel_out,
- bool fast_validate)
-{
- bool out = false;
- bool repopulate_pipes = false;
- int split[MAX_PIPES] = { 0 };
- bool merge[MAX_PIPES] = { false };
- bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
- struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
- ASSERT(pipes);
- if (!pipes)
- return false;
-
- // For each full update, remove all existing phantom pipes first
- dml_remove_phantom_pipes(dc, context);
-
- dml_update_soc_for_wm_a(dc, context);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->plane_state) {
- // On initial pass through DML, we intend to use MALL for SS on all
- // (non-PSR) surfaces with none using MALL for P-State
- // 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state
- //if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
- // pipe->plane_state->mall_plane_config.use_mall_for_ss = true;
- }
- }
- pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
- if (!pipe_cnt) {
- out = true;
- goto validate_out;
- }
-
- dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- if (!fast_validate) {
- dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
- }
-
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
- /*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
- *
- * We don't actually support prefetch mode 2, so require that we
- * at least support prefetch mode 1.
- */
- context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
- dm_allow_self_refresh;
-
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (vlevel < context->bw_ctx.dml.soc.num_states) {
- memset(split, 0, sizeof(split));
- memset(merge, 0, sizeof(merge));
- vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
- }
- }
-
- dml_log_mode_support_params(&context->bw_ctx.dml);
-
- if (vlevel == context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
-
- if (!pipe->stream)
- continue;
-
- /* We only support full screen mpo with ODM */
- if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
- && pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
- sizeof(struct rect)) != 0) {
- ASSERT(mpo_pipe->plane_state != pipe->plane_state);
- goto validate_fail;
- }
- pipe_idx++;
- }
-
- /* merge pipes if necessary */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- /*skip pipes that don't need merging*/
- if (!merge[i])
- continue;
-
- /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
- if (pipe->prev_odm_pipe) {
- /*split off odm pipe*/
- pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
- if (pipe->next_odm_pipe)
- pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
-
- pipe->bottom_pipe = NULL;
- pipe->next_odm_pipe = NULL;
- pipe->plane_state = NULL;
- pipe->stream = NULL;
- pipe->top_pipe = NULL;
- pipe->prev_odm_pipe = NULL;
- if (pipe->stream_res.dsc)
- dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
- memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
- memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
- repopulate_pipes = true;
- } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
- struct pipe_ctx *top_pipe = pipe->top_pipe;
- struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
-
- top_pipe->bottom_pipe = bottom_pipe;
- if (bottom_pipe)
- bottom_pipe->top_pipe = top_pipe;
-
- pipe->top_pipe = NULL;
- pipe->bottom_pipe = NULL;
- pipe->plane_state = NULL;
- pipe->stream = NULL;
- memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
- memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
- repopulate_pipes = true;
- } else
- ASSERT(0); /* Should never try to merge master pipe */
-
- }
-
- for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *hsplit_pipe = NULL;
- bool odm;
- int old_index = -1;
-
- if (!pipe->stream || newly_split[i])
- continue;
-
- pipe_idx++;
- odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
-
- if (!pipe->plane_state && !odm)
- continue;
-
- if (split[i]) {
- if (odm) {
- if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
- else if (old_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->pipe_idx;
- } else {
- if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
- else if (old_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->pipe_idx;
- }
- hsplit_pipe = dml_find_split_pipe(dc, context, old_index);
- ASSERT(hsplit_pipe);
- if (!hsplit_pipe)
- goto validate_fail;
-
- if (!dml_split_stream_for_mpc_or_odm(
- dc, &context->res_ctx,
- pipe, hsplit_pipe, odm))
- goto validate_fail;
-
- newly_split[hsplit_pipe->pipe_idx] = true;
- repopulate_pipes = true;
- }
- if (split[i] == 4) {
- struct pipe_ctx *pipe_4to1;
-
- if (odm && old_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->pipe_idx;
- else if (!odm && old_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->pipe_idx;
- else
- old_index = -1;
- pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
- ASSERT(pipe_4to1);
- if (!pipe_4to1)
- goto validate_fail;
- if (!dml_split_stream_for_mpc_or_odm(
- dc, &context->res_ctx,
- pipe, pipe_4to1, odm))
- goto validate_fail;
- newly_split[pipe_4to1->pipe_idx] = true;
-
- if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
- && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
- else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
- else
- old_index = -1;
- pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
- ASSERT(pipe_4to1);
- if (!pipe_4to1)
- goto validate_fail;
- if (!dml_split_stream_for_mpc_or_odm(
- dc, &context->res_ctx,
- hsplit_pipe, pipe_4to1, odm))
- goto validate_fail;
- newly_split[pipe_4to1->pipe_idx] = true;
- }
- if (odm)
- dml_build_mapped_resource(dc, context, pipe->stream);
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->plane_state) {
- if (!resource_build_scaling_params(pipe))
- goto validate_fail;
- }
- }
-
- /* Actual dsc count per stream dsc validation*/
- if (!dml_validate_dsc(dc, context)) {
- vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
- goto validate_fail;
- }
-
- if (repopulate_pipes)
- pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
- *vlevel_out = vlevel;
- *pipe_cnt_out = pipe_cnt;
-
- out = true;
- goto validate_out;
-
-validate_fail:
- out = false;
-
-validate_out:
- return out;
-}
-
-static void dml_calculate_dlg_params(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
-{
- int i, pipe_idx;
- int plane_count;
-
- /* Writeback MCIF_WB arbitration parameters */
- if (dc->res_pool)
- dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
-
- context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
- context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
- context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
- context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
- context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
- context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
- context->bw_ctx.bw.dcn.clk.p_state_change_support =
- context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
- != dm_dram_clock_change_unsupported;
-
- context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- /* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks
- * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
- DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
- */
- plane_count = 0;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].plane_state)
- plane_count++;
- }
-
- /* Commented out as per above error for now.
- if (plane_count == 0)
- context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
- */
- context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
- context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support =
- context->bw_ctx.dml.vba.FCLKChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
- if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
- context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
- // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
- context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
- context->res_ctx.pipe_ctx[i].unbounded_req = false;
- } else {
- context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
- context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
- }
-
- if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
- context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
- pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
- context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
- pipe_idx++;
- }
- /*save a original dppclock copy*/
- context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
- context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
- context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
- context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
- context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes
- - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
-
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
- &context->res_ctx.pipe_ctx[i].dlg_regs,
- &context->res_ctx.pipe_ctx[i].ttu_regs,
- pipes,
- pipe_cnt,
- pipe_idx,
- cstate_en,
- context->bw_ctx.bw.dcn.clk.p_state_change_support,
- false, false, true);
-
- context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
- &context->res_ctx.pipe_ctx[i].rq_regs,
- &pipes[pipe_idx].pipe);
- pipe_idx++;
- }
-}
-
-static void dml_calculate_wm_and_dlg(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
-{
- int i, pipe_idx, vlevel_temp = 0;
-
- double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
- unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
- dm_dram_clock_change_unsupported;
-
- /* Set B:
- * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
- * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
- * calculations to cover bootup clocks.
- * DCFCLK: soc.clock_limits[2] when available
- * UCLK: soc.clock_limits[2] when available
- */
- if (context->bw_ctx.dml.soc.num_states > 2) {
- vlevel_temp = 2;
- dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
- } else
- dcfclk = 615; //DCFCLK Vmin_lv
-
- pipes[0].clks_cfg.voltage = vlevel_temp;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
-
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
- //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8;
-
- /* Set D:
- * All clocks min.
- * DCFCLK: Min, as reported by PM FW when available
- * UCLK : Min, as reported by PM FW when available
- * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
- */
-
- if (context->bw_ctx.dml.soc.num_states > 2) {
- vlevel_temp = 0;
- dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
- } else
- dcfclk = 615; //DCFCLK Vmin_lv
-
- pipes[0].clks_cfg.voltage = vlevel_temp;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
-
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
- //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8;
- /* Set C, for Dummy P-State:
- * All clocks min.
- * DCFCLK: Min, as reported by PM FW, when available
- * UCLK : Min, as reported by PM FW, when available
- * pstate latency as per UCLK state dummy pstate latency
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- unsigned int min_dram_speed_mts_margin = 160;
-
- if ((!pstate_en))
- min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
-
- /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
- for (i = 3; i > 0; i--)
- if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
- break;
-
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
- //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8;
- if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
- /* The only difference between A and C is p-state latency, if p-state is not supported
- * with full p-state latency we want to calculate DLG based on dummy p-state latency,
- * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation.
- */
- context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
- } else {
- /* Set A:
- * All clocks min.
- * DCFCLK: Min, as reported by PM FW, when available
- * UCLK: Min, as reported by PM FW, when available
- */
- dml_update_soc_for_wm_a(dc, context);
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- }
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
- context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
-
- dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-
- if (!pstate_en)
- /* Restore full p-state latency */
- context->bw_ctx.dml.soc.dram_clock_change_latency_us =
- dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-}
-
-bool dml_validate(struct dc *dc,
- struct dc_state *context,
- bool fast_validate)
-{
- bool out = false;
-
- BW_VAL_TRACE_SETUP();
-
- int vlevel = 0;
- int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- BW_VAL_TRACE_COUNT();
-
- out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
-
- if (pipe_cnt == 0)
- goto validate_out;
-
- if (!out)
- goto validate_fail;
-
- BW_VAL_TRACE_END_VOLTAGE_LEVEL();
-
- if (fast_validate) {
- BW_VAL_TRACE_SKIP(fast);
- goto validate_out;
- }
-
- dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
-
- BW_VAL_TRACE_END_WATERMARKS();
-
- goto validate_out;
-
-validate_fail:
- DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
- dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
-
- BW_VAL_TRACE_SKIP(fail);
- out = false;
-
-validate_out:
- BW_VAL_TRACE_FINISH();
-
- return out;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
deleted file mode 100644
index 4ec5310a2962..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifdef DML_WRAPPER_TRANSLATION_
-
-static void gfx10array_mode_to_dml_params(
- enum array_mode_values array_mode,
- enum legacy_tiling_compat_level compat_level,
- unsigned int *sw_mode)
-{
- switch (array_mode) {
- case DC_ARRAY_LINEAR_ALLIGNED:
- case DC_ARRAY_LINEAR_GENERAL:
- *sw_mode = dm_sw_linear;
- break;
- case DC_ARRAY_2D_TILED_THIN1:
-// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed
-#if 0
- if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO)
- *sw_mode = dm_sw_gfx7_2d_thin_l_vp;
- else
- *sw_mode = dm_sw_gfx7_2d_thin_gl;
-#endif
- break;
- default:
- ASSERT(0); /* Not supported */
- break;
- }
-}
-
-static void swizzle_to_dml_params(
- enum swizzle_mode_values swizzle,
- unsigned int *sw_mode)
-{
- switch (swizzle) {
- case DC_SW_LINEAR:
- *sw_mode = dm_sw_linear;
- break;
- case DC_SW_4KB_S:
- *sw_mode = dm_sw_4kb_s;
- break;
- case DC_SW_4KB_S_X:
- *sw_mode = dm_sw_4kb_s_x;
- break;
- case DC_SW_4KB_D:
- *sw_mode = dm_sw_4kb_d;
- break;
- case DC_SW_4KB_D_X:
- *sw_mode = dm_sw_4kb_d_x;
- break;
- case DC_SW_64KB_S:
- *sw_mode = dm_sw_64kb_s;
- break;
- case DC_SW_64KB_S_X:
- *sw_mode = dm_sw_64kb_s_x;
- break;
- case DC_SW_64KB_S_T:
- *sw_mode = dm_sw_64kb_s_t;
- break;
- case DC_SW_64KB_D:
- *sw_mode = dm_sw_64kb_d;
- break;
- case DC_SW_64KB_D_X:
- *sw_mode = dm_sw_64kb_d_x;
- break;
- case DC_SW_64KB_D_T:
- *sw_mode = dm_sw_64kb_d_t;
- break;
- case DC_SW_64KB_R_X:
- *sw_mode = dm_sw_64kb_r_x;
- break;
- case DC_SW_VAR_S:
- *sw_mode = dm_sw_var_s;
- break;
- case DC_SW_VAR_S_X:
- *sw_mode = dm_sw_var_s_x;
- break;
- case DC_SW_VAR_D:
- *sw_mode = dm_sw_var_d;
- break;
- case DC_SW_VAR_D_X:
- *sw_mode = dm_sw_var_d_x;
- break;
-
- default:
- ASSERT(0); /* Not supported */
- break;
- }
-}
-
-static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest)
-{
- dest->hblank_start = timing->h_total - timing->h_front_porch;
- dest->hblank_end = dest->hblank_start
- - timing->h_addressable
- - timing->h_border_left
- - timing->h_border_right;
- dest->vblank_start = timing->v_total - timing->v_front_porch;
- dest->vblank_end = dest->vblank_start
- - timing->v_addressable
- - timing->v_border_top
- - timing->v_border_bottom;
- dest->htotal = timing->h_total;
- dest->vtotal = timing->v_total;
- dest->hactive = timing->h_addressable;
- dest->vactive = timing->v_addressable;
- dest->interlaced = timing->flags.INTERLACE;
- dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
- if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
- dest->pixel_rate_mhz *= 2;
-}
-
-static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe)
-{
- int odm_split_count = 0;
- enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
-
- // Traverse pipe tree to determine odm split count
- while (next_pipe) {
- odm_split_count++;
- next_pipe = next_pipe->next_odm_pipe;
- }
- pipe = pipe->prev_odm_pipe;
- while (pipe) {
- odm_split_count++;
- pipe = pipe->prev_odm_pipe;
- }
-
- // Translate split to DML odm combine factor
- switch (odm_split_count) {
- case 1:
- combine_mode = dm_odm_combine_mode_2to1;
- break;
- case 3:
- combine_mode = dm_odm_combine_mode_4to1;
- break;
- default:
- combine_mode = dm_odm_combine_mode_disabled;
- }
-
- return combine_mode;
-}
-
-static int get_dml_output_type(enum signal_type dc_signal)
-{
- int dml_output_type = -1;
-
- switch (dc_signal) {
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- case SIGNAL_TYPE_DISPLAY_PORT:
- dml_output_type = dm_dp;
- break;
- case SIGNAL_TYPE_EDP:
- dml_output_type = dm_edp;
- break;
- case SIGNAL_TYPE_HDMI_TYPE_A:
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- dml_output_type = dm_hdmi;
- break;
- default:
- break;
- }
-
- return dml_output_type;
-}
-
-static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout)
-{
- int output_bpc = 0;
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- output_bpc = 6;
- break;
- case COLOR_DEPTH_888:
- output_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- output_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- output_bpc = 12;
- break;
- case COLOR_DEPTH_141414:
- output_bpc = 14;
- break;
- case COLOR_DEPTH_161616:
- output_bpc = 16;
- break;
- case COLOR_DEPTH_999:
- output_bpc = 9;
- break;
- case COLOR_DEPTH_111111:
- output_bpc = 11;
- break;
- default:
- output_bpc = 8;
- break;
- }
-
- switch (timing->pixel_encoding) {
- case PIXEL_ENCODING_RGB:
- case PIXEL_ENCODING_YCBCR444:
- dout->output_format = dm_444;
- dout->output_bpp = output_bpc * 3;
- break;
- case PIXEL_ENCODING_YCBCR420:
- dout->output_format = dm_420;
- dout->output_bpp = (output_bpc * 3.0) / 2;
- break;
- case PIXEL_ENCODING_YCBCR422:
- if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple)
- dout->output_format = dm_n422;
- else
- dout->output_format = dm_s422;
- dout->output_bpp = output_bpc * 2;
- break;
- default:
- dout->output_format = dm_444;
- dout->output_bpp = output_bpc * 3;
- }
-}
-
-static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format)
-{
- enum source_format_class dml_format = dm_444_32;
-
- switch (dc_format) {
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
- dml_format = dm_420_8;
- break;
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
- dml_format = dm_420_10;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
- dml_format = dm_444_64;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
- case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
- dml_format = dm_444_16;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
- dml_format = dm_444_8;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
- dml_format = dm_rgbe_alpha;
- break;
- default:
- dml_format = dm_444_32;
- break;
- }
-
- return dml_format;
-}
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b3d0a4ea2446..8919a2092ac5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -399,6 +399,10 @@ struct pipe_ctx {
struct dc_stream_state *stream;
struct plane_resource plane_res;
+
+ /**
+ * @stream_res: Reference to DCN resource components such OPP and DSC.
+ */
struct stream_resource stream_res;
struct link_resource link_res;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index d89bd55f110f..437b64e87377 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -268,6 +268,12 @@ enum dc_lut_mode {
LUT_RAM_B
};
+enum phy_state {
+ TX_OFF_SYMCLK_OFF,
+ TX_ON_SYMCLK_ON,
+ TX_OFF_SYMCLK_ON
+};
+
/**
* speakersToChannels
*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 5097037e3962..8d86159d9de0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -22,6 +22,16 @@
*
*/
+/**
+ * DOC: mpc-overview
+ *
+ * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline
+ * that performs blending of multiple planes, using global and per-pixel alpha.
+ * It also performs post-blending color correction operations according to the
+ * hardware capabilities, such as color transformation matrix and gamma 1D and
+ * 3D LUT.
+ */
+
#ifndef __DC_MPCC_H__
#define __DC_MPCC_H__
@@ -48,14 +58,39 @@ enum mpcc_blend_mode {
MPCC_BLEND_MODE_TOP_BOT_BLENDING
};
+/**
+ * enum mpcc_alpha_blend_mode - define the alpha blend mode regarding pixel
+ * alpha and plane alpha values
+ */
enum mpcc_alpha_blend_mode {
+ /**
+ * @MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA: per pixel alpha using DPP
+ * alpha value
+ */
MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA,
+ /**
+ * @MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN: per
+ * pixel alpha using DPP alpha value multiplied by a global gain (plane
+ * alpha)
+ */
MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN,
+ /**
+ * @MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA: global alpha value, ignores
+ * pixel alpha and consider only plane alpha
+ */
MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA
};
-/*
- * MPCC blending configuration
+/**
+ * struct mpcc_blnd_cfg - MPCC blending configuration
+ *
+ * @black_color: background color
+ * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE)
+ * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the
+ * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE)
+ * @global_gain: used when blend mode considers both pixel alpha and plane
+ * alpha value and assumes the global alpha value.
+ * @global_alpha: plane alpha value
*/
struct mpcc_blnd_cfg {
struct tg_color black_color; /* background color */
@@ -107,8 +142,15 @@ struct mpc_dwb_flow_control {
int flow_ctrl_cnt1;
};
-/*
- * MPCC connection and blending configuration for a single MPCC instance.
+/**
+ * struct mpcc - MPCC connection and blending configuration for a single MPCC instance.
+ * @mpcc_id: MPCC physical instance
+ * @dpp_id: DPP input to this MPCC
+ * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected.
+ * @blnd_cfg: the blending configuration for this MPCC
+ * @sm_cfg: stereo mix setting for this MPCC
+ * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
+ *
* This struct is used as a node in an MPC tree.
*/
struct mpcc {
@@ -120,8 +162,12 @@ struct mpcc {
bool shared_bottom; /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */
};
-/*
- * MPC tree represents all MPCC connections for a pipe.
+/**
+ * struct mpc_tree - MPC tree represents all MPCC connections for a pipe.
+ *
+ * @opp_id: the OPP instance that owns this MPC tree
+ * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
+ *
*/
struct mpc_tree {
int opp_id; /* The OPP instance that owns this MPC tree */
@@ -149,13 +195,18 @@ struct mpcc_state {
uint32_t busy;
};
+/**
+ * struct mpc_funcs - funcs
+ */
struct mpc_funcs {
void (*read_mpcc_state)(
struct mpc *mpc,
int mpcc_inst,
struct mpcc_state *s);
- /*
+ /**
+ * @insert_plane:
+ *
* Insert DPP into MPC tree based on specified blending position.
* Only used for planes that are part of blending chain for OPP output
*
@@ -180,7 +231,9 @@ struct mpc_funcs {
int dpp_id,
int mpcc_id);
- /*
+ /**
+ * @remove_mpcc:
+ *
* Remove a specified MPCC from the MPC tree.
*
* Parameters:
@@ -195,7 +248,9 @@ struct mpc_funcs {
struct mpc_tree *tree,
struct mpcc *mpcc);
- /*
+ /**
+ * @mpc_init:
+ *
* Reset the MPCC HW status by disconnecting all muxes.
*
* Parameters:
@@ -208,7 +263,9 @@ struct mpc_funcs {
struct mpc *mpc,
unsigned int mpcc_id);
- /*
+ /**
+ * @update_blending:
+ *
* Update the blending configuration for a specified MPCC.
*
* Parameters:
@@ -223,7 +280,9 @@ struct mpc_funcs {
struct mpcc_blnd_cfg *blnd_cfg,
int mpcc_id);
- /*
+ /**
+ * @cursor_lock:
+ *
* Lock cursor updates for the specified OPP.
* OPP defines the set of MPCC that are locked together for cursor.
*
@@ -239,8 +298,10 @@ struct mpc_funcs {
int opp_id,
bool lock);
- /*
- * Add DPP into 'secondary' MPC tree based on specified blending position.
+ /**
+ * @insert_plane_to_secondary:
+ *
+ * Add DPP into secondary MPC tree based on specified blending position.
* Only used for planes that are part of blending chain for DWB output
*
* Parameters:
@@ -264,7 +325,9 @@ struct mpc_funcs {
int dpp_id,
int mpcc_id);
- /*
+ /**
+ * @remove_mpcc_from_secondary:
+ *
* Remove a specified DPP from the 'secondary' MPC tree.
*
* Parameters:
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 62d4683f17a2..72eef7a5ed83 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -137,7 +137,13 @@ struct crc_params {
bool enable;
};
+/**
+ * struct timing_generator - Entry point to Output Timing Generator feature.
+ */
struct timing_generator {
+ /**
+ * @funcs: Timing generator control functions
+ */
const struct timing_generator_funcs *funcs;
struct dc_bios *bp;
struct dc_context *ctx;
@@ -148,7 +154,9 @@ struct dc_crtc_timing;
struct drr_params;
-
+/**
+ * struct timing_generator_funcs - Control timing generator on a given device.
+ */
struct timing_generator_funcs {
bool (*validate_timing)(struct timing_generator *tg,
const struct dc_crtc_timing *timing);
@@ -273,8 +281,8 @@ struct timing_generator_funcs {
const struct crc_params *params);
/**
- * Get CRCs for the given timing generator. Return false if CRCs are
- * not enabled (via configure_crc).
+ * @get_crc: Get CRCs for the given timing generator. Return false if
+ * CRCs are not enabled (via configure_crc).
*/
bool (*get_crc)(struct timing_generator *tg,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
@@ -302,8 +310,6 @@ struct timing_generator_funcs {
int group_idx,
uint32_t gsl_ready_signal);
void (*set_out_mux)(struct timing_generator *tg, enum otg_out_mux_dest dest);
- void (*set_vrr_m_const)(struct timing_generator *optc,
- double vtotal_avg);
void (*set_drr_trigger_window)(struct timing_generator *optc,
uint32_t window_start, uint32_t window_end);
void (*set_vtotal_change_limit)(struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index ccb3c719fc4d..52b4350c9cd8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -245,6 +245,8 @@ struct hw_sequencer_funcs {
struct tg_color *color,
int mpcc_id);
+ void (*update_phy_state)(struct dc_state *state, struct pipe_ctx *pipe_ctx, enum phy_state target_state);
+
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
@@ -271,6 +273,11 @@ void get_surface_visual_confirm_color(
const struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_subvp_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void get_hdr_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index db7b0b155374..226af06278ce 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -116,7 +116,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
@@ -137,7 +137,7 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->disable(stream_enc);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, pipe_ctx->link_res.hpo_dp_link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index ced176d17bae..f34c45b19fcb 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -441,6 +441,7 @@ struct dmub_srv {
/* Feature capabilities reported by fw */
struct dmub_feature_caps feature_caps;
+ struct dmub_visual_confirm_color visual_confirm_color;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index d7f3619352f0..7cddbc431b57 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -234,8 +234,7 @@ union dmub_psr_debug_flags {
};
/**
- * DMUB feature capabilities.
- * After DMUB init, driver will query FW capabilities prior to enabling certain features.
+ * DMUB visual confirm color
*/
struct dmub_feature_caps {
/**
@@ -246,6 +245,16 @@ struct dmub_feature_caps {
uint8_t reserved[6];
};
+struct dmub_visual_confirm_color {
+ /**
+ * Maximum 10 bits color value
+ */
+ uint16_t color_r_cr;
+ uint16_t color_g_y;
+ uint16_t color_b_cb;
+ uint16_t panel_inst;
+};
+
#if defined(__cplusplus)
}
#endif
@@ -645,6 +654,10 @@ enum dmub_cmd_type {
*/
DMUB_CMD__QUERY_FEATURE_CAPS = 6,
/**
+ * Command type used to get visual confirm color.
+ */
+ DMUB_CMD__GET_VISUAL_CONFIRM_COLOR = 8,
+ /**
* Command type used for all PSR commands.
*/
DMUB_CMD__PSR = 64,
@@ -976,8 +989,16 @@ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 {
uint16_t vtotal;
uint8_t main_pipe_index;
uint8_t phantom_pipe_index;
+ /* Since the microschedule is calculated in terms of OTG lines,
+ * include any scaling factors to make sure when we get accurate
+ * conversion when programming MALL_START_LINE (which is in terms
+ * of HUBP lines). If 4K is being downscaled to 1080p, scale factor
+ * is 1/2 (numerator = 1, denominator = 2).
+ */
+ uint8_t scale_factor_numerator;
+ uint8_t scale_factor_denominator;
uint8_t is_drr;
- uint8_t padding;
+ uint8_t pad[2];
} subvp_data;
struct {
@@ -999,7 +1020,11 @@ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 {
} vblank_data;
} pipe_config;
- enum mclk_switch_mode mode;
+ /* - subvp_data in the union (pipe_config) takes up 27 bytes.
+ * - Make the "mode" field a uint8_t instead of enum so we only use 1 byte (only
+ * for the DMCUB command, cast to enum once we populate the DMCUB subvp state).
+ */
+ uint8_t mode; // enum mclk_switch_mode
};
/**
@@ -2766,6 +2791,31 @@ struct dmub_rb_cmd_query_feature_caps {
struct dmub_cmd_query_feature_caps_data query_feature_caps_data;
};
+/**
+ * Data passed from driver to FW in a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+struct dmub_cmd_visual_confirm_color_data {
+ /**
+ * DMUB feature capabilities.
+ * After DMUB init, driver will query FW capabilities prior to enabling certain features.
+ */
+struct dmub_visual_confirm_color visual_confirm_color;
+};
+
+/**
+ * Definition of a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+struct dmub_rb_cmd_get_visual_confirm_color {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+ struct dmub_cmd_visual_confirm_color_data visual_confirm_color_data;
+};
+
struct dmub_optc_state {
uint32_t v_total_max;
uint32_t v_total_min;
@@ -3138,6 +3188,11 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__QUERY_FEATURE_CAPS command.
*/
struct dmub_rb_cmd_query_feature_caps query_feature_caps;
+
+ /**
+ * Definition of a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+ struct dmub_rb_cmd_get_visual_confirm_color visual_confirm_color;
struct dmub_rb_cmd_drr_update drr_update;
struct dmub_rb_cmd_fw_assisted_mclk_switch fw_assisted_mclk_switch;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index ab06c7fc7452..9f3558c0ef11 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -244,13 +244,15 @@ enum {
#define ASICREV_IS_GC_10_3_7(eChipRev) ((eChipRev >= GC_10_3_7_A0) && (eChipRev < GC_10_3_7_UNKNOWN))
#define AMDGPU_FAMILY_GC_11_0_0 145
-#define AMDGPU_FAMILY_GC_11_0_2 148
+#define AMDGPU_FAMILY_GC_11_0_1 148
#define GC_11_0_0_A0 0x1
#define GC_11_0_2_A0 0x10
+#define GC_11_0_3_A0 0x20
#define GC_11_UNKNOWN 0xFF
#define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0)
-#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
+#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index f093b49c5e6e..3bf08a60c45c 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -119,13 +119,15 @@ enum dc_log_type {
LOG_HDMI_RETIMER_REDRIVER,
LOG_DSC,
LOG_SMU_MSG,
+ LOG_DC2RESERVED4,
+ LOG_DC2RESERVED5,
LOG_DWB,
LOG_GAMMA_DEBUG,
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
LOG_DP2,
- LOG_SECTION_TOTAL_COUNT
+ LOG_DC2RESERVED12,
};
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index da09ba7589f7..0f39ab9dc5b4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -613,10 +613,6 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
-
- /* FreeSync HDR */
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
@@ -684,10 +680,6 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
/* PB16 : Reserved bits 7:1, FixedRate bit 0 */
infopacket->sb[16] = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? 1 : 0;
-
- //FreeSync HDR
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
@@ -772,8 +764,7 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal,
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
infopacket->hb2 = 0x09;
- *payload_size = 0x0A;
-
+ *payload_size = 0x09;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
@@ -822,9 +813,9 @@ static void build_vrr_infopacket_header_v3(enum signal_type signal,
infopacket->hb1 = version;
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length] */
- *payload_size = 0x10;
- infopacket->hb2 = *payload_size - 1; //-1 for checksum
+ infopacket->hb2 = 0x10;
+ *payload_size = 0x10;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index f21554a1c86c..1115dfc6ae1f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -3129,6 +3129,8 @@
#define mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
#define mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x15cc
#define mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define mmGCVM_DEBUG 0x15cd
+#define mmGCVM_DEBUG_BASE_IDX 0
#define mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x15ce
#define mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
#define mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x15cf
@@ -3151,6 +3153,8 @@
#define mmGCVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
#define mmGCVM_L2_CACHE_PARITY_CNTL 0x15d8
#define mmGCVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define mmGCVM_L2_IH_LOG_CNTL 0x15d9
+#define mmGCVM_L2_IH_LOG_CNTL_BASE_IDX 0
#define mmGCVM_L2_CNTL5 0x15dc
#define mmGCVM_L2_CNTL5_BASE_IDX 0
#define mmGCVM_L2_GCR_CNTL 0x15dd
@@ -9796,6 +9800,10 @@
// addressBlock: gc_pwrdec
// base address: 0x3c000
+#define mmCGTS_TCC_DISABLE 0x5006
+#define mmCGTS_TCC_DISABLE_BASE_IDX 1
+#define mmCGTS_USER_TCC_DISABLE 0x5007
+#define mmCGTS_USER_TCC_DISABLE_BASE_IDX 1
#define mmSQ_ALU_CLK_CTRL 0x508e
#define mmSQ_ALU_CLK_CTRL_BASE_IDX 1
#define mmSQ_TEX_CLK_CTRL 0x508f
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index a827b0ff8905..83faa276523f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -34547,6 +34547,16 @@
// addressBlock: gc_pwrdec
+//CGTS_TCC_DISABLE
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_USER_TCC_DISABLE
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
//SQ_ALU_CLK_CTRL
#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
index e5b85bf1d7dc..c92c4b83253f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
@@ -4221,6 +4221,7 @@
#define regCP_ECC_FIRSTOCCURRENCE_RING1_BASE_IDX 0
#define regGB_EDC_MODE 0x1e1e
#define regGB_EDC_MODE_BASE_IDX 0
+#define regCP_DEBUG 0x1e1f
#define regCP_DEBUG_BASE_IDX 0
#define regCP_CPC_DEBUG 0x1e21
#define regCP_CPC_DEBUG_BASE_IDX 0
@@ -8306,6 +8307,8 @@
#define regGL1I_GL1R_REP_FGCG_OVERRIDE_BASE_IDX 1
#define regGL1C_STATUS 0x2d41
#define regGL1C_STATUS_BASE_IDX 1
+#define regGL1C_UTCL0_CNTL1 0x2d42
+#define regGL1C_UTCL0_CNTL1_BASE_IDX 1
#define regGL1C_UTCL0_CNTL2 0x2d43
#define regGL1C_UTCL0_CNTL2_BASE_IDX 1
#define regGL1C_UTCL0_STATUS 0x2d44
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h
index 119c97b28fea..4f08f90856fc 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h
@@ -29424,6 +29424,31 @@
#define GL1C_STATUS__TAG_EVICT_MASK 0x04000000L
#define GL1C_STATUS__TAG_REQUEST_STATE_OPERATION_MASK 0x78000000L
#define GL1C_STATUS__TRACKER_LAST_SET_MATCHES_CURRENT_SET_MASK 0x80000000L
+//GL1C_UTCL0_CNTL1
+#define GL1C_UTCL0_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define GL1C_UTCL0_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define GL1C_UTCL0_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define GL1C_UTCL0_CNTL1__RESP_MODE__SHIFT 0x3
+#define GL1C_UTCL0_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define GL1C_UTCL0_CNTL1__CLIENTID__SHIFT 0x7
+#define GL1C_UTCL0_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define GL1C_UTCL0_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define GL1C_UTCL0_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define GL1C_UTCL0_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define GL1C_UTCL0_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define GL1C_UTCL0_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define GL1C_UTCL0_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define GL1C_UTCL0_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define GL1C_UTCL0_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define GL1C_UTCL0_CNTL1__RESP_MODE_MASK 0x00000018L
+#define GL1C_UTCL0_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define GL1C_UTCL0_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define GL1C_UTCL0_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define GL1C_UTCL0_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define GL1C_UTCL0_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define GL1C_UTCL0_CNTL1__FORCE_IN_ORDER_MASK 0x06000000L
+#define GL1C_UTCL0_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define GL1C_UTCL0_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
//GL1C_UTCL0_CNTL2
#define GL1C_UTCL0_CNTL2__SPARE__SHIFT 0x0
#define GL1C_UTCL0_CNTL2__COMP_SYNC_DISABLE__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
new file mode 100644
index 000000000000..3b95a59b196c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
@@ -0,0 +1,12086 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _gc_11_0_3_OFFSET_HEADER
+#define _gc_11_0_3_OFFSET_HEADER
+
+
+
+// addressBlock: gc_sdma0_sdma0dec
+// base address: 0x4980
+#define regSDMA0_DEC_START 0x0000
+#define regSDMA0_DEC_START_BASE_IDX 0
+#define regSDMA0_F32_MISC_CNTL 0x000b
+#define regSDMA0_F32_MISC_CNTL_BASE_IDX 0
+#define regSDMA0_GLOBAL_TIMESTAMP_LO 0x000f
+#define regSDMA0_GLOBAL_TIMESTAMP_LO_BASE_IDX 0
+#define regSDMA0_GLOBAL_TIMESTAMP_HI 0x0010
+#define regSDMA0_GLOBAL_TIMESTAMP_HI_BASE_IDX 0
+#define regSDMA0_POWER_CNTL 0x001a
+#define regSDMA0_POWER_CNTL_BASE_IDX 0
+#define regSDMA0_CNTL 0x001c
+#define regSDMA0_CNTL_BASE_IDX 0
+#define regSDMA0_CHICKEN_BITS 0x001d
+#define regSDMA0_CHICKEN_BITS_BASE_IDX 0
+#define regSDMA0_GB_ADDR_CONFIG 0x001e
+#define regSDMA0_GB_ADDR_CONFIG_BASE_IDX 0
+#define regSDMA0_GB_ADDR_CONFIG_READ 0x001f
+#define regSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regSDMA0_RB_RPTR_FETCH 0x0020
+#define regSDMA0_RB_RPTR_FETCH_BASE_IDX 0
+#define regSDMA0_RB_RPTR_FETCH_HI 0x0021
+#define regSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0022
+#define regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regSDMA0_IB_OFFSET_FETCH 0x0023
+#define regSDMA0_IB_OFFSET_FETCH_BASE_IDX 0
+#define regSDMA0_PROGRAM 0x0024
+#define regSDMA0_PROGRAM_BASE_IDX 0
+#define regSDMA0_STATUS_REG 0x0025
+#define regSDMA0_STATUS_REG_BASE_IDX 0
+#define regSDMA0_STATUS1_REG 0x0026
+#define regSDMA0_STATUS1_REG_BASE_IDX 0
+#define regSDMA0_CNTL1 0x0027
+#define regSDMA0_CNTL1_BASE_IDX 0
+#define regSDMA0_HBM_PAGE_CONFIG 0x0028
+#define regSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regSDMA0_UCODE_CHECKSUM 0x0029
+#define regSDMA0_UCODE_CHECKSUM_BASE_IDX 0
+#define regSDMA0_FREEZE 0x002b
+#define regSDMA0_FREEZE_BASE_IDX 0
+#define regSDMA0_PROCESS_QUANTUM0 0x002c
+#define regSDMA0_PROCESS_QUANTUM0_BASE_IDX 0
+#define regSDMA0_PROCESS_QUANTUM1 0x002d
+#define regSDMA0_PROCESS_QUANTUM1_BASE_IDX 0
+#define regSDMA0_WATCHDOG_CNTL 0x002e
+#define regSDMA0_WATCHDOG_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE_STATUS0 0x002f
+#define regSDMA0_QUEUE_STATUS0_BASE_IDX 0
+#define regSDMA0_EDC_CONFIG 0x0032
+#define regSDMA0_EDC_CONFIG_BASE_IDX 0
+#define regSDMA0_BA_THRESHOLD 0x0033
+#define regSDMA0_BA_THRESHOLD_BASE_IDX 0
+#define regSDMA0_ID 0x0034
+#define regSDMA0_ID_BASE_IDX 0
+#define regSDMA0_VERSION 0x0035
+#define regSDMA0_VERSION_BASE_IDX 0
+#define regSDMA0_EDC_COUNTER 0x0036
+#define regSDMA0_EDC_COUNTER_BASE_IDX 0
+#define regSDMA0_EDC_COUNTER_CLEAR 0x0037
+#define regSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define regSDMA0_STATUS2_REG 0x0038
+#define regSDMA0_STATUS2_REG_BASE_IDX 0
+#define regSDMA0_ATOMIC_CNTL 0x0039
+#define regSDMA0_ATOMIC_CNTL_BASE_IDX 0
+#define regSDMA0_ATOMIC_PREOP_LO 0x003a
+#define regSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regSDMA0_ATOMIC_PREOP_HI 0x003b
+#define regSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regSDMA0_UTCL1_CNTL 0x003c
+#define regSDMA0_UTCL1_CNTL_BASE_IDX 0
+#define regSDMA0_UTCL1_WATERMK 0x003d
+#define regSDMA0_UTCL1_WATERMK_BASE_IDX 0
+#define regSDMA0_UTCL1_TIMEOUT 0x003e
+#define regSDMA0_UTCL1_TIMEOUT_BASE_IDX 0
+#define regSDMA0_UTCL1_PAGE 0x003f
+#define regSDMA0_UTCL1_PAGE_BASE_IDX 0
+#define regSDMA0_UTCL1_RD_STATUS 0x0040
+#define regSDMA0_UTCL1_RD_STATUS_BASE_IDX 0
+#define regSDMA0_UTCL1_WR_STATUS 0x0041
+#define regSDMA0_UTCL1_WR_STATUS_BASE_IDX 0
+#define regSDMA0_UTCL1_INV0 0x0042
+#define regSDMA0_UTCL1_INV0_BASE_IDX 0
+#define regSDMA0_UTCL1_INV1 0x0043
+#define regSDMA0_UTCL1_INV1_BASE_IDX 0
+#define regSDMA0_UTCL1_INV2 0x0044
+#define regSDMA0_UTCL1_INV2_BASE_IDX 0
+#define regSDMA0_UTCL1_RD_XNACK0 0x0045
+#define regSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regSDMA0_UTCL1_RD_XNACK1 0x0046
+#define regSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regSDMA0_UTCL1_WR_XNACK0 0x0047
+#define regSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regSDMA0_UTCL1_WR_XNACK1 0x0048
+#define regSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regSDMA0_RELAX_ORDERING_LUT 0x004a
+#define regSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regSDMA0_CHICKEN_BITS_2 0x004b
+#define regSDMA0_CHICKEN_BITS_2_BASE_IDX 0
+#define regSDMA0_STATUS3_REG 0x004c
+#define regSDMA0_STATUS3_REG_BASE_IDX 0
+#define regSDMA0_PHYSICAL_ADDR_LO 0x004d
+#define regSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_PHYSICAL_ADDR_HI 0x004e
+#define regSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_GLOBAL_QUANTUM 0x004f
+#define regSDMA0_GLOBAL_QUANTUM_BASE_IDX 0
+#define regSDMA0_ERROR_LOG 0x0050
+#define regSDMA0_ERROR_LOG_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG0 0x0051
+#define regSDMA0_PUB_DUMMY_REG0_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG1 0x0052
+#define regSDMA0_PUB_DUMMY_REG1_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG2 0x0053
+#define regSDMA0_PUB_DUMMY_REG2_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG3 0x0054
+#define regSDMA0_PUB_DUMMY_REG3_BASE_IDX 0
+#define regSDMA0_F32_COUNTER 0x0055
+#define regSDMA0_F32_COUNTER_BASE_IDX 0
+#define regSDMA0_CRD_CNTL 0x005b
+#define regSDMA0_CRD_CNTL_BASE_IDX 0
+#define regSDMA0_RLC_CGCG_CTRL 0x005c
+#define regSDMA0_RLC_CGCG_CTRL_BASE_IDX 0
+#define regSDMA0_GPU_IOV_VIOLATION_LOG 0x005d
+#define regSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regSDMA0_AQL_STATUS 0x005f
+#define regSDMA0_AQL_STATUS_BASE_IDX 0
+#define regSDMA0_EA_DBIT_ADDR_DATA 0x0060
+#define regSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regSDMA0_EA_DBIT_ADDR_INDEX 0x0061
+#define regSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regSDMA0_TLBI_GCR_CNTL 0x0062
+#define regSDMA0_TLBI_GCR_CNTL_BASE_IDX 0
+#define regSDMA0_TILING_CONFIG 0x0063
+#define regSDMA0_TILING_CONFIG_BASE_IDX 0
+#define regSDMA0_HASH 0x0064
+#define regSDMA0_HASH_BASE_IDX 0
+#define regSDMA0_INT_STATUS 0x0070
+#define regSDMA0_INT_STATUS_BASE_IDX 0
+#define regSDMA0_GPU_IOV_VIOLATION_LOG2 0x0071
+#define regSDMA0_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regSDMA0_HOLE_ADDR_LO 0x0072
+#define regSDMA0_HOLE_ADDR_LO_BASE_IDX 0
+#define regSDMA0_HOLE_ADDR_HI 0x0073
+#define regSDMA0_HOLE_ADDR_HI_BASE_IDX 0
+#define regSDMA0_CLOCK_GATING_STATUS 0x0075
+#define regSDMA0_CLOCK_GATING_STATUS_BASE_IDX 0
+#define regSDMA0_STATUS4_REG 0x0076
+#define regSDMA0_STATUS4_REG_BASE_IDX 0
+#define regSDMA0_SCRATCH_RAM_DATA 0x0077
+#define regSDMA0_SCRATCH_RAM_DATA_BASE_IDX 0
+#define regSDMA0_SCRATCH_RAM_ADDR 0x0078
+#define regSDMA0_SCRATCH_RAM_ADDR_BASE_IDX 0
+#define regSDMA0_TIMESTAMP_CNTL 0x0079
+#define regSDMA0_TIMESTAMP_CNTL_BASE_IDX 0
+#define regSDMA0_STATUS5_REG 0x007a
+#define regSDMA0_STATUS5_REG_BASE_IDX 0
+#define regSDMA0_QUEUE_RESET_REQ 0x007b
+#define regSDMA0_QUEUE_RESET_REQ_BASE_IDX 0
+#define regSDMA0_STATUS6_REG 0x007c
+#define regSDMA0_STATUS6_REG_BASE_IDX 0
+#define regSDMA0_UCODE1_CHECKSUM 0x007d
+#define regSDMA0_UCODE1_CHECKSUM_BASE_IDX 0
+#define regSDMA0_CE_CTRL 0x007e
+#define regSDMA0_CE_CTRL_BASE_IDX 0
+#define regSDMA0_FED_STATUS 0x007f
+#define regSDMA0_FED_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_CNTL 0x0080
+#define regSDMA0_QUEUE0_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_BASE 0x0081
+#define regSDMA0_QUEUE0_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_BASE_HI 0x0082
+#define regSDMA0_QUEUE0_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR 0x0083
+#define regSDMA0_QUEUE0_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR_HI 0x0084
+#define regSDMA0_QUEUE0_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR 0x0085
+#define regSDMA0_QUEUE0_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR_HI 0x0086
+#define regSDMA0_QUEUE0_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_HI 0x0088
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_LO 0x0089
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_CNTL 0x008a
+#define regSDMA0_QUEUE0_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_RPTR 0x008b
+#define regSDMA0_QUEUE0_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_OFFSET 0x008c
+#define regSDMA0_QUEUE0_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_BASE_LO 0x008d
+#define regSDMA0_QUEUE0_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_BASE_HI 0x008e
+#define regSDMA0_QUEUE0_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_SIZE 0x008f
+#define regSDMA0_QUEUE0_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE0_SKIP_CNTL 0x0090
+#define regSDMA0_QUEUE0_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_CONTEXT_STATUS 0x0091
+#define regSDMA0_QUEUE0_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE0_DOORBELL 0x0092
+#define regSDMA0_QUEUE0_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE0_DOORBELL_LOG 0x00a9
+#define regSDMA0_QUEUE0_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE0_DOORBELL_OFFSET 0x00ab
+#define regSDMA0_QUEUE0_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE0_CSA_ADDR_LO 0x00ac
+#define regSDMA0_QUEUE0_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_CSA_ADDR_HI 0x00ad
+#define regSDMA0_QUEUE0_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_SCHEDULE_CNTL 0x00ae
+#define regSDMA0_QUEUE0_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_SUB_REMAIN 0x00af
+#define regSDMA0_QUEUE0_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE0_PREEMPT 0x00b0
+#define regSDMA0_QUEUE0_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE0_DUMMY_REG 0x00b1
+#define regSDMA0_QUEUE0_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_AQL_CNTL 0x00b4
+#define regSDMA0_QUEUE0_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_MINOR_PTR_UPDATE 0x00b5
+#define regSDMA0_QUEUE0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_PREEMPT 0x00b6
+#define regSDMA0_QUEUE0_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA0 0x00c0
+#define regSDMA0_QUEUE0_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA1 0x00c1
+#define regSDMA0_QUEUE0_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA2 0x00c2
+#define regSDMA0_QUEUE0_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA3 0x00c3
+#define regSDMA0_QUEUE0_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA4 0x00c4
+#define regSDMA0_QUEUE0_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA5 0x00c5
+#define regSDMA0_QUEUE0_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA6 0x00c6
+#define regSDMA0_QUEUE0_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA7 0x00c7
+#define regSDMA0_QUEUE0_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA8 0x00c8
+#define regSDMA0_QUEUE0_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA9 0x00c9
+#define regSDMA0_QUEUE0_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA10 0x00ca
+#define regSDMA0_QUEUE0_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_CNTL 0x00cb
+#define regSDMA0_QUEUE0_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_CNTL 0x00d8
+#define regSDMA0_QUEUE1_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_BASE 0x00d9
+#define regSDMA0_QUEUE1_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_BASE_HI 0x00da
+#define regSDMA0_QUEUE1_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR 0x00db
+#define regSDMA0_QUEUE1_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR_HI 0x00dc
+#define regSDMA0_QUEUE1_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR 0x00dd
+#define regSDMA0_QUEUE1_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR_HI 0x00de
+#define regSDMA0_QUEUE1_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_HI 0x00e0
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_LO 0x00e1
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_CNTL 0x00e2
+#define regSDMA0_QUEUE1_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_RPTR 0x00e3
+#define regSDMA0_QUEUE1_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_OFFSET 0x00e4
+#define regSDMA0_QUEUE1_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_BASE_LO 0x00e5
+#define regSDMA0_QUEUE1_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_BASE_HI 0x00e6
+#define regSDMA0_QUEUE1_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_SIZE 0x00e7
+#define regSDMA0_QUEUE1_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE1_SKIP_CNTL 0x00e8
+#define regSDMA0_QUEUE1_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_CONTEXT_STATUS 0x00e9
+#define regSDMA0_QUEUE1_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE1_DOORBELL 0x00ea
+#define regSDMA0_QUEUE1_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE1_DOORBELL_LOG 0x0101
+#define regSDMA0_QUEUE1_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE1_DOORBELL_OFFSET 0x0103
+#define regSDMA0_QUEUE1_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE1_CSA_ADDR_LO 0x0104
+#define regSDMA0_QUEUE1_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_CSA_ADDR_HI 0x0105
+#define regSDMA0_QUEUE1_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_SCHEDULE_CNTL 0x0106
+#define regSDMA0_QUEUE1_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_SUB_REMAIN 0x0107
+#define regSDMA0_QUEUE1_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE1_PREEMPT 0x0108
+#define regSDMA0_QUEUE1_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE1_DUMMY_REG 0x0109
+#define regSDMA0_QUEUE1_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI 0x010a
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO 0x010b
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_AQL_CNTL 0x010c
+#define regSDMA0_QUEUE1_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_MINOR_PTR_UPDATE 0x010d
+#define regSDMA0_QUEUE1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_PREEMPT 0x010e
+#define regSDMA0_QUEUE1_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA0 0x0118
+#define regSDMA0_QUEUE1_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA1 0x0119
+#define regSDMA0_QUEUE1_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA2 0x011a
+#define regSDMA0_QUEUE1_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA3 0x011b
+#define regSDMA0_QUEUE1_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA4 0x011c
+#define regSDMA0_QUEUE1_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA5 0x011d
+#define regSDMA0_QUEUE1_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA6 0x011e
+#define regSDMA0_QUEUE1_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA7 0x011f
+#define regSDMA0_QUEUE1_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA8 0x0120
+#define regSDMA0_QUEUE1_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA9 0x0121
+#define regSDMA0_QUEUE1_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA10 0x0122
+#define regSDMA0_QUEUE1_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_CNTL 0x0123
+#define regSDMA0_QUEUE1_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_CNTL 0x0130
+#define regSDMA0_QUEUE2_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_BASE 0x0131
+#define regSDMA0_QUEUE2_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_BASE_HI 0x0132
+#define regSDMA0_QUEUE2_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR 0x0133
+#define regSDMA0_QUEUE2_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR_HI 0x0134
+#define regSDMA0_QUEUE2_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR 0x0135
+#define regSDMA0_QUEUE2_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR_HI 0x0136
+#define regSDMA0_QUEUE2_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_HI 0x0138
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_LO 0x0139
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_CNTL 0x013a
+#define regSDMA0_QUEUE2_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_RPTR 0x013b
+#define regSDMA0_QUEUE2_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_OFFSET 0x013c
+#define regSDMA0_QUEUE2_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_BASE_LO 0x013d
+#define regSDMA0_QUEUE2_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_BASE_HI 0x013e
+#define regSDMA0_QUEUE2_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_SIZE 0x013f
+#define regSDMA0_QUEUE2_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE2_SKIP_CNTL 0x0140
+#define regSDMA0_QUEUE2_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_CONTEXT_STATUS 0x0141
+#define regSDMA0_QUEUE2_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE2_DOORBELL 0x0142
+#define regSDMA0_QUEUE2_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE2_DOORBELL_LOG 0x0159
+#define regSDMA0_QUEUE2_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE2_DOORBELL_OFFSET 0x015b
+#define regSDMA0_QUEUE2_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE2_CSA_ADDR_LO 0x015c
+#define regSDMA0_QUEUE2_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_CSA_ADDR_HI 0x015d
+#define regSDMA0_QUEUE2_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_SCHEDULE_CNTL 0x015e
+#define regSDMA0_QUEUE2_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_SUB_REMAIN 0x015f
+#define regSDMA0_QUEUE2_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE2_PREEMPT 0x0160
+#define regSDMA0_QUEUE2_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE2_DUMMY_REG 0x0161
+#define regSDMA0_QUEUE2_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI 0x0162
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO 0x0163
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_AQL_CNTL 0x0164
+#define regSDMA0_QUEUE2_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_MINOR_PTR_UPDATE 0x0165
+#define regSDMA0_QUEUE2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_PREEMPT 0x0166
+#define regSDMA0_QUEUE2_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA0 0x0170
+#define regSDMA0_QUEUE2_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA1 0x0171
+#define regSDMA0_QUEUE2_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA2 0x0172
+#define regSDMA0_QUEUE2_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA3 0x0173
+#define regSDMA0_QUEUE2_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA4 0x0174
+#define regSDMA0_QUEUE2_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA5 0x0175
+#define regSDMA0_QUEUE2_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA6 0x0176
+#define regSDMA0_QUEUE2_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA7 0x0177
+#define regSDMA0_QUEUE2_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA8 0x0178
+#define regSDMA0_QUEUE2_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA9 0x0179
+#define regSDMA0_QUEUE2_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA10 0x017a
+#define regSDMA0_QUEUE2_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_CNTL 0x017b
+#define regSDMA0_QUEUE2_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_CNTL 0x0188
+#define regSDMA0_QUEUE3_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_BASE 0x0189
+#define regSDMA0_QUEUE3_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_BASE_HI 0x018a
+#define regSDMA0_QUEUE3_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR 0x018b
+#define regSDMA0_QUEUE3_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR_HI 0x018c
+#define regSDMA0_QUEUE3_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR 0x018d
+#define regSDMA0_QUEUE3_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR_HI 0x018e
+#define regSDMA0_QUEUE3_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_HI 0x0190
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_LO 0x0191
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_CNTL 0x0192
+#define regSDMA0_QUEUE3_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_RPTR 0x0193
+#define regSDMA0_QUEUE3_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_OFFSET 0x0194
+#define regSDMA0_QUEUE3_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_BASE_LO 0x0195
+#define regSDMA0_QUEUE3_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_BASE_HI 0x0196
+#define regSDMA0_QUEUE3_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_SIZE 0x0197
+#define regSDMA0_QUEUE3_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE3_SKIP_CNTL 0x0198
+#define regSDMA0_QUEUE3_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_CONTEXT_STATUS 0x0199
+#define regSDMA0_QUEUE3_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE3_DOORBELL 0x019a
+#define regSDMA0_QUEUE3_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE3_DOORBELL_LOG 0x01b1
+#define regSDMA0_QUEUE3_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE3_DOORBELL_OFFSET 0x01b3
+#define regSDMA0_QUEUE3_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE3_CSA_ADDR_LO 0x01b4
+#define regSDMA0_QUEUE3_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_CSA_ADDR_HI 0x01b5
+#define regSDMA0_QUEUE3_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_SCHEDULE_CNTL 0x01b6
+#define regSDMA0_QUEUE3_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_SUB_REMAIN 0x01b7
+#define regSDMA0_QUEUE3_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE3_PREEMPT 0x01b8
+#define regSDMA0_QUEUE3_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE3_DUMMY_REG 0x01b9
+#define regSDMA0_QUEUE3_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_AQL_CNTL 0x01bc
+#define regSDMA0_QUEUE3_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_MINOR_PTR_UPDATE 0x01bd
+#define regSDMA0_QUEUE3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_PREEMPT 0x01be
+#define regSDMA0_QUEUE3_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA0 0x01c8
+#define regSDMA0_QUEUE3_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA1 0x01c9
+#define regSDMA0_QUEUE3_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA2 0x01ca
+#define regSDMA0_QUEUE3_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA3 0x01cb
+#define regSDMA0_QUEUE3_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA4 0x01cc
+#define regSDMA0_QUEUE3_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA5 0x01cd
+#define regSDMA0_QUEUE3_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA6 0x01ce
+#define regSDMA0_QUEUE3_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA7 0x01cf
+#define regSDMA0_QUEUE3_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA8 0x01d0
+#define regSDMA0_QUEUE3_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA9 0x01d1
+#define regSDMA0_QUEUE3_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA10 0x01d2
+#define regSDMA0_QUEUE3_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_CNTL 0x01d3
+#define regSDMA0_QUEUE3_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_CNTL 0x01e0
+#define regSDMA0_QUEUE4_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_BASE 0x01e1
+#define regSDMA0_QUEUE4_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_BASE_HI 0x01e2
+#define regSDMA0_QUEUE4_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR 0x01e3
+#define regSDMA0_QUEUE4_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR_HI 0x01e4
+#define regSDMA0_QUEUE4_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR 0x01e5
+#define regSDMA0_QUEUE4_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR_HI 0x01e6
+#define regSDMA0_QUEUE4_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_HI 0x01e8
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_LO 0x01e9
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_CNTL 0x01ea
+#define regSDMA0_QUEUE4_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_RPTR 0x01eb
+#define regSDMA0_QUEUE4_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_OFFSET 0x01ec
+#define regSDMA0_QUEUE4_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_BASE_LO 0x01ed
+#define regSDMA0_QUEUE4_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_BASE_HI 0x01ee
+#define regSDMA0_QUEUE4_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_SIZE 0x01ef
+#define regSDMA0_QUEUE4_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE4_SKIP_CNTL 0x01f0
+#define regSDMA0_QUEUE4_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_CONTEXT_STATUS 0x01f1
+#define regSDMA0_QUEUE4_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE4_DOORBELL 0x01f2
+#define regSDMA0_QUEUE4_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE4_DOORBELL_LOG 0x0209
+#define regSDMA0_QUEUE4_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE4_DOORBELL_OFFSET 0x020b
+#define regSDMA0_QUEUE4_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE4_CSA_ADDR_LO 0x020c
+#define regSDMA0_QUEUE4_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_CSA_ADDR_HI 0x020d
+#define regSDMA0_QUEUE4_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_SCHEDULE_CNTL 0x020e
+#define regSDMA0_QUEUE4_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_SUB_REMAIN 0x020f
+#define regSDMA0_QUEUE4_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE4_PREEMPT 0x0210
+#define regSDMA0_QUEUE4_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE4_DUMMY_REG 0x0211
+#define regSDMA0_QUEUE4_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI 0x0212
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO 0x0213
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_AQL_CNTL 0x0214
+#define regSDMA0_QUEUE4_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_MINOR_PTR_UPDATE 0x0215
+#define regSDMA0_QUEUE4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_PREEMPT 0x0216
+#define regSDMA0_QUEUE4_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA0 0x0220
+#define regSDMA0_QUEUE4_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA1 0x0221
+#define regSDMA0_QUEUE4_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA2 0x0222
+#define regSDMA0_QUEUE4_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA3 0x0223
+#define regSDMA0_QUEUE4_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA4 0x0224
+#define regSDMA0_QUEUE4_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA5 0x0225
+#define regSDMA0_QUEUE4_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA6 0x0226
+#define regSDMA0_QUEUE4_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA7 0x0227
+#define regSDMA0_QUEUE4_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA8 0x0228
+#define regSDMA0_QUEUE4_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA9 0x0229
+#define regSDMA0_QUEUE4_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA10 0x022a
+#define regSDMA0_QUEUE4_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_CNTL 0x022b
+#define regSDMA0_QUEUE4_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_CNTL 0x0238
+#define regSDMA0_QUEUE5_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_BASE 0x0239
+#define regSDMA0_QUEUE5_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_BASE_HI 0x023a
+#define regSDMA0_QUEUE5_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR 0x023b
+#define regSDMA0_QUEUE5_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR_HI 0x023c
+#define regSDMA0_QUEUE5_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR 0x023d
+#define regSDMA0_QUEUE5_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR_HI 0x023e
+#define regSDMA0_QUEUE5_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_HI 0x0240
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_LO 0x0241
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_CNTL 0x0242
+#define regSDMA0_QUEUE5_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_RPTR 0x0243
+#define regSDMA0_QUEUE5_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_OFFSET 0x0244
+#define regSDMA0_QUEUE5_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_BASE_LO 0x0245
+#define regSDMA0_QUEUE5_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_BASE_HI 0x0246
+#define regSDMA0_QUEUE5_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_SIZE 0x0247
+#define regSDMA0_QUEUE5_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE5_SKIP_CNTL 0x0248
+#define regSDMA0_QUEUE5_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_CONTEXT_STATUS 0x0249
+#define regSDMA0_QUEUE5_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE5_DOORBELL 0x024a
+#define regSDMA0_QUEUE5_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE5_DOORBELL_LOG 0x0261
+#define regSDMA0_QUEUE5_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE5_DOORBELL_OFFSET 0x0263
+#define regSDMA0_QUEUE5_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE5_CSA_ADDR_LO 0x0264
+#define regSDMA0_QUEUE5_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_CSA_ADDR_HI 0x0265
+#define regSDMA0_QUEUE5_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_SCHEDULE_CNTL 0x0266
+#define regSDMA0_QUEUE5_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_SUB_REMAIN 0x0267
+#define regSDMA0_QUEUE5_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE5_PREEMPT 0x0268
+#define regSDMA0_QUEUE5_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE5_DUMMY_REG 0x0269
+#define regSDMA0_QUEUE5_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI 0x026a
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO 0x026b
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_AQL_CNTL 0x026c
+#define regSDMA0_QUEUE5_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_MINOR_PTR_UPDATE 0x026d
+#define regSDMA0_QUEUE5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_PREEMPT 0x026e
+#define regSDMA0_QUEUE5_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA0 0x0278
+#define regSDMA0_QUEUE5_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA1 0x0279
+#define regSDMA0_QUEUE5_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA2 0x027a
+#define regSDMA0_QUEUE5_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA3 0x027b
+#define regSDMA0_QUEUE5_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA4 0x027c
+#define regSDMA0_QUEUE5_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA5 0x027d
+#define regSDMA0_QUEUE5_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA6 0x027e
+#define regSDMA0_QUEUE5_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA7 0x027f
+#define regSDMA0_QUEUE5_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA8 0x0280
+#define regSDMA0_QUEUE5_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA9 0x0281
+#define regSDMA0_QUEUE5_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA10 0x0282
+#define regSDMA0_QUEUE5_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_CNTL 0x0283
+#define regSDMA0_QUEUE5_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_CNTL 0x0290
+#define regSDMA0_QUEUE6_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_BASE 0x0291
+#define regSDMA0_QUEUE6_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_BASE_HI 0x0292
+#define regSDMA0_QUEUE6_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR 0x0293
+#define regSDMA0_QUEUE6_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR_HI 0x0294
+#define regSDMA0_QUEUE6_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR 0x0295
+#define regSDMA0_QUEUE6_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR_HI 0x0296
+#define regSDMA0_QUEUE6_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_HI 0x0298
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_LO 0x0299
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_CNTL 0x029a
+#define regSDMA0_QUEUE6_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_RPTR 0x029b
+#define regSDMA0_QUEUE6_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_OFFSET 0x029c
+#define regSDMA0_QUEUE6_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_BASE_LO 0x029d
+#define regSDMA0_QUEUE6_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_BASE_HI 0x029e
+#define regSDMA0_QUEUE6_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_SIZE 0x029f
+#define regSDMA0_QUEUE6_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE6_SKIP_CNTL 0x02a0
+#define regSDMA0_QUEUE6_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_CONTEXT_STATUS 0x02a1
+#define regSDMA0_QUEUE6_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE6_DOORBELL 0x02a2
+#define regSDMA0_QUEUE6_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE6_DOORBELL_LOG 0x02b9
+#define regSDMA0_QUEUE6_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE6_DOORBELL_OFFSET 0x02bb
+#define regSDMA0_QUEUE6_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE6_CSA_ADDR_LO 0x02bc
+#define regSDMA0_QUEUE6_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_CSA_ADDR_HI 0x02bd
+#define regSDMA0_QUEUE6_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_SCHEDULE_CNTL 0x02be
+#define regSDMA0_QUEUE6_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_SUB_REMAIN 0x02bf
+#define regSDMA0_QUEUE6_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE6_PREEMPT 0x02c0
+#define regSDMA0_QUEUE6_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE6_DUMMY_REG 0x02c1
+#define regSDMA0_QUEUE6_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_AQL_CNTL 0x02c4
+#define regSDMA0_QUEUE6_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_MINOR_PTR_UPDATE 0x02c5
+#define regSDMA0_QUEUE6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_PREEMPT 0x02c6
+#define regSDMA0_QUEUE6_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA0 0x02d0
+#define regSDMA0_QUEUE6_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA1 0x02d1
+#define regSDMA0_QUEUE6_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA2 0x02d2
+#define regSDMA0_QUEUE6_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA3 0x02d3
+#define regSDMA0_QUEUE6_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA4 0x02d4
+#define regSDMA0_QUEUE6_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA5 0x02d5
+#define regSDMA0_QUEUE6_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA6 0x02d6
+#define regSDMA0_QUEUE6_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA7 0x02d7
+#define regSDMA0_QUEUE6_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA8 0x02d8
+#define regSDMA0_QUEUE6_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA9 0x02d9
+#define regSDMA0_QUEUE6_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA10 0x02da
+#define regSDMA0_QUEUE6_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_CNTL 0x02db
+#define regSDMA0_QUEUE6_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_CNTL 0x02e8
+#define regSDMA0_QUEUE7_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_BASE 0x02e9
+#define regSDMA0_QUEUE7_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_BASE_HI 0x02ea
+#define regSDMA0_QUEUE7_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR 0x02eb
+#define regSDMA0_QUEUE7_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR_HI 0x02ec
+#define regSDMA0_QUEUE7_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR 0x02ed
+#define regSDMA0_QUEUE7_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR_HI 0x02ee
+#define regSDMA0_QUEUE7_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_HI 0x02f0
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_LO 0x02f1
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_CNTL 0x02f2
+#define regSDMA0_QUEUE7_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_RPTR 0x02f3
+#define regSDMA0_QUEUE7_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_OFFSET 0x02f4
+#define regSDMA0_QUEUE7_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_BASE_LO 0x02f5
+#define regSDMA0_QUEUE7_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_BASE_HI 0x02f6
+#define regSDMA0_QUEUE7_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_SIZE 0x02f7
+#define regSDMA0_QUEUE7_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE7_SKIP_CNTL 0x02f8
+#define regSDMA0_QUEUE7_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_CONTEXT_STATUS 0x02f9
+#define regSDMA0_QUEUE7_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE7_DOORBELL 0x02fa
+#define regSDMA0_QUEUE7_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE7_DOORBELL_LOG 0x0311
+#define regSDMA0_QUEUE7_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE7_DOORBELL_OFFSET 0x0313
+#define regSDMA0_QUEUE7_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE7_CSA_ADDR_LO 0x0314
+#define regSDMA0_QUEUE7_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_CSA_ADDR_HI 0x0315
+#define regSDMA0_QUEUE7_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_SCHEDULE_CNTL 0x0316
+#define regSDMA0_QUEUE7_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_SUB_REMAIN 0x0317
+#define regSDMA0_QUEUE7_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE7_PREEMPT 0x0318
+#define regSDMA0_QUEUE7_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE7_DUMMY_REG 0x0319
+#define regSDMA0_QUEUE7_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI 0x031a
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO 0x031b
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_AQL_CNTL 0x031c
+#define regSDMA0_QUEUE7_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_MINOR_PTR_UPDATE 0x031d
+#define regSDMA0_QUEUE7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_PREEMPT 0x031e
+#define regSDMA0_QUEUE7_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA0 0x0328
+#define regSDMA0_QUEUE7_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA1 0x0329
+#define regSDMA0_QUEUE7_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA2 0x032a
+#define regSDMA0_QUEUE7_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA3 0x032b
+#define regSDMA0_QUEUE7_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA4 0x032c
+#define regSDMA0_QUEUE7_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA5 0x032d
+#define regSDMA0_QUEUE7_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA6 0x032e
+#define regSDMA0_QUEUE7_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA7 0x032f
+#define regSDMA0_QUEUE7_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA8 0x0330
+#define regSDMA0_QUEUE7_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA9 0x0331
+#define regSDMA0_QUEUE7_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA10 0x0332
+#define regSDMA0_QUEUE7_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_CNTL 0x0333
+#define regSDMA0_QUEUE7_MIDCMD_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_sdma0_sdma1dec
+// base address: 0x6180
+#define regSDMA1_DEC_START 0x0600
+#define regSDMA1_DEC_START_BASE_IDX 0
+#define regSDMA1_F32_MISC_CNTL 0x060b
+#define regSDMA1_F32_MISC_CNTL_BASE_IDX 0
+#define regSDMA1_GLOBAL_TIMESTAMP_LO 0x060f
+#define regSDMA1_GLOBAL_TIMESTAMP_LO_BASE_IDX 0
+#define regSDMA1_GLOBAL_TIMESTAMP_HI 0x0610
+#define regSDMA1_GLOBAL_TIMESTAMP_HI_BASE_IDX 0
+#define regSDMA1_POWER_CNTL 0x061a
+#define regSDMA1_POWER_CNTL_BASE_IDX 0
+#define regSDMA1_CNTL 0x061c
+#define regSDMA1_CNTL_BASE_IDX 0
+#define regSDMA1_CHICKEN_BITS 0x061d
+#define regSDMA1_CHICKEN_BITS_BASE_IDX 0
+#define regSDMA1_GB_ADDR_CONFIG 0x061e
+#define regSDMA1_GB_ADDR_CONFIG_BASE_IDX 0
+#define regSDMA1_GB_ADDR_CONFIG_READ 0x061f
+#define regSDMA1_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regSDMA1_RB_RPTR_FETCH 0x0620
+#define regSDMA1_RB_RPTR_FETCH_BASE_IDX 0
+#define regSDMA1_RB_RPTR_FETCH_HI 0x0621
+#define regSDMA1_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regSDMA1_SEM_WAIT_FAIL_TIMER_CNTL 0x0622
+#define regSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regSDMA1_IB_OFFSET_FETCH 0x0623
+#define regSDMA1_IB_OFFSET_FETCH_BASE_IDX 0
+#define regSDMA1_PROGRAM 0x0624
+#define regSDMA1_PROGRAM_BASE_IDX 0
+#define regSDMA1_STATUS_REG 0x0625
+#define regSDMA1_STATUS_REG_BASE_IDX 0
+#define regSDMA1_STATUS1_REG 0x0626
+#define regSDMA1_STATUS1_REG_BASE_IDX 0
+#define regSDMA1_CNTL1 0x0627
+#define regSDMA1_CNTL1_BASE_IDX 0
+#define regSDMA1_HBM_PAGE_CONFIG 0x0628
+#define regSDMA1_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regSDMA1_UCODE_CHECKSUM 0x0629
+#define regSDMA1_UCODE_CHECKSUM_BASE_IDX 0
+#define regSDMA1_FREEZE 0x062b
+#define regSDMA1_FREEZE_BASE_IDX 0
+#define regSDMA1_PROCESS_QUANTUM0 0x062c
+#define regSDMA1_PROCESS_QUANTUM0_BASE_IDX 0
+#define regSDMA1_PROCESS_QUANTUM1 0x062d
+#define regSDMA1_PROCESS_QUANTUM1_BASE_IDX 0
+#define regSDMA1_WATCHDOG_CNTL 0x062e
+#define regSDMA1_WATCHDOG_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE_STATUS0 0x062f
+#define regSDMA1_QUEUE_STATUS0_BASE_IDX 0
+#define regSDMA1_EDC_CONFIG 0x0632
+#define regSDMA1_EDC_CONFIG_BASE_IDX 0
+#define regSDMA1_BA_THRESHOLD 0x0633
+#define regSDMA1_BA_THRESHOLD_BASE_IDX 0
+#define regSDMA1_ID 0x0634
+#define regSDMA1_ID_BASE_IDX 0
+#define regSDMA1_VERSION 0x0635
+#define regSDMA1_VERSION_BASE_IDX 0
+#define regSDMA1_EDC_COUNTER 0x0636
+#define regSDMA1_EDC_COUNTER_BASE_IDX 0
+#define regSDMA1_EDC_COUNTER_CLEAR 0x0637
+#define regSDMA1_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define regSDMA1_STATUS2_REG 0x0638
+#define regSDMA1_STATUS2_REG_BASE_IDX 0
+#define regSDMA1_ATOMIC_CNTL 0x0639
+#define regSDMA1_ATOMIC_CNTL_BASE_IDX 0
+#define regSDMA1_ATOMIC_PREOP_LO 0x063a
+#define regSDMA1_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regSDMA1_ATOMIC_PREOP_HI 0x063b
+#define regSDMA1_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regSDMA1_UTCL1_CNTL 0x063c
+#define regSDMA1_UTCL1_CNTL_BASE_IDX 0
+#define regSDMA1_UTCL1_WATERMK 0x063d
+#define regSDMA1_UTCL1_WATERMK_BASE_IDX 0
+#define regSDMA1_UTCL1_TIMEOUT 0x063e
+#define regSDMA1_UTCL1_TIMEOUT_BASE_IDX 0
+#define regSDMA1_UTCL1_PAGE 0x063f
+#define regSDMA1_UTCL1_PAGE_BASE_IDX 0
+#define regSDMA1_UTCL1_RD_STATUS 0x0640
+#define regSDMA1_UTCL1_RD_STATUS_BASE_IDX 0
+#define regSDMA1_UTCL1_WR_STATUS 0x0641
+#define regSDMA1_UTCL1_WR_STATUS_BASE_IDX 0
+#define regSDMA1_UTCL1_INV0 0x0642
+#define regSDMA1_UTCL1_INV0_BASE_IDX 0
+#define regSDMA1_UTCL1_INV1 0x0643
+#define regSDMA1_UTCL1_INV1_BASE_IDX 0
+#define regSDMA1_UTCL1_INV2 0x0644
+#define regSDMA1_UTCL1_INV2_BASE_IDX 0
+#define regSDMA1_UTCL1_RD_XNACK0 0x0645
+#define regSDMA1_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regSDMA1_UTCL1_RD_XNACK1 0x0646
+#define regSDMA1_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regSDMA1_UTCL1_WR_XNACK0 0x0647
+#define regSDMA1_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regSDMA1_UTCL1_WR_XNACK1 0x0648
+#define regSDMA1_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regSDMA1_RELAX_ORDERING_LUT 0x064a
+#define regSDMA1_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regSDMA1_CHICKEN_BITS_2 0x064b
+#define regSDMA1_CHICKEN_BITS_2_BASE_IDX 0
+#define regSDMA1_STATUS3_REG 0x064c
+#define regSDMA1_STATUS3_REG_BASE_IDX 0
+#define regSDMA1_PHYSICAL_ADDR_LO 0x064d
+#define regSDMA1_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_PHYSICAL_ADDR_HI 0x064e
+#define regSDMA1_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_GLOBAL_QUANTUM 0x064f
+#define regSDMA1_GLOBAL_QUANTUM_BASE_IDX 0
+#define regSDMA1_ERROR_LOG 0x0650
+#define regSDMA1_ERROR_LOG_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG0 0x0651
+#define regSDMA1_PUB_DUMMY_REG0_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG1 0x0652
+#define regSDMA1_PUB_DUMMY_REG1_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG2 0x0653
+#define regSDMA1_PUB_DUMMY_REG2_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG3 0x0654
+#define regSDMA1_PUB_DUMMY_REG3_BASE_IDX 0
+#define regSDMA1_F32_COUNTER 0x0655
+#define regSDMA1_F32_COUNTER_BASE_IDX 0
+#define regSDMA1_CRD_CNTL 0x065b
+#define regSDMA1_CRD_CNTL_BASE_IDX 0
+#define regSDMA1_RLC_CGCG_CTRL 0x065c
+#define regSDMA1_RLC_CGCG_CTRL_BASE_IDX 0
+#define regSDMA1_GPU_IOV_VIOLATION_LOG 0x065d
+#define regSDMA1_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regSDMA1_AQL_STATUS 0x065f
+#define regSDMA1_AQL_STATUS_BASE_IDX 0
+#define regSDMA1_EA_DBIT_ADDR_DATA 0x0660
+#define regSDMA1_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regSDMA1_EA_DBIT_ADDR_INDEX 0x0661
+#define regSDMA1_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regSDMA1_TLBI_GCR_CNTL 0x0662
+#define regSDMA1_TLBI_GCR_CNTL_BASE_IDX 0
+#define regSDMA1_TILING_CONFIG 0x0663
+#define regSDMA1_TILING_CONFIG_BASE_IDX 0
+#define regSDMA1_HASH 0x0664
+#define regSDMA1_HASH_BASE_IDX 0
+#define regSDMA1_INT_STATUS 0x0670
+#define regSDMA1_INT_STATUS_BASE_IDX 0
+#define regSDMA1_GPU_IOV_VIOLATION_LOG2 0x0671
+#define regSDMA1_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regSDMA1_HOLE_ADDR_LO 0x0672
+#define regSDMA1_HOLE_ADDR_LO_BASE_IDX 0
+#define regSDMA1_HOLE_ADDR_HI 0x0673
+#define regSDMA1_HOLE_ADDR_HI_BASE_IDX 0
+#define regSDMA1_CLOCK_GATING_STATUS 0x0675
+#define regSDMA1_CLOCK_GATING_STATUS_BASE_IDX 0
+#define regSDMA1_STATUS4_REG 0x0676
+#define regSDMA1_STATUS4_REG_BASE_IDX 0
+#define regSDMA1_SCRATCH_RAM_DATA 0x0677
+#define regSDMA1_SCRATCH_RAM_DATA_BASE_IDX 0
+#define regSDMA1_SCRATCH_RAM_ADDR 0x0678
+#define regSDMA1_SCRATCH_RAM_ADDR_BASE_IDX 0
+#define regSDMA1_TIMESTAMP_CNTL 0x0679
+#define regSDMA1_TIMESTAMP_CNTL_BASE_IDX 0
+#define regSDMA1_STATUS5_REG 0x067a
+#define regSDMA1_STATUS5_REG_BASE_IDX 0
+#define regSDMA1_QUEUE_RESET_REQ 0x067b
+#define regSDMA1_QUEUE_RESET_REQ_BASE_IDX 0
+#define regSDMA1_STATUS6_REG 0x067c
+#define regSDMA1_STATUS6_REG_BASE_IDX 0
+#define regSDMA1_UCODE1_CHECKSUM 0x067d
+#define regSDMA1_UCODE1_CHECKSUM_BASE_IDX 0
+#define regSDMA1_CE_CTRL 0x067e
+#define regSDMA1_CE_CTRL_BASE_IDX 0
+#define regSDMA1_FED_STATUS 0x067f
+#define regSDMA1_FED_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_CNTL 0x0680
+#define regSDMA1_QUEUE0_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_BASE 0x0681
+#define regSDMA1_QUEUE0_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_BASE_HI 0x0682
+#define regSDMA1_QUEUE0_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR 0x0683
+#define regSDMA1_QUEUE0_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR_HI 0x0684
+#define regSDMA1_QUEUE0_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR 0x0685
+#define regSDMA1_QUEUE0_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR_HI 0x0686
+#define regSDMA1_QUEUE0_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_HI 0x0688
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_LO 0x0689
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_CNTL 0x068a
+#define regSDMA1_QUEUE0_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_RPTR 0x068b
+#define regSDMA1_QUEUE0_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_OFFSET 0x068c
+#define regSDMA1_QUEUE0_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_BASE_LO 0x068d
+#define regSDMA1_QUEUE0_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_BASE_HI 0x068e
+#define regSDMA1_QUEUE0_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_SIZE 0x068f
+#define regSDMA1_QUEUE0_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE0_SKIP_CNTL 0x0690
+#define regSDMA1_QUEUE0_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_CONTEXT_STATUS 0x0691
+#define regSDMA1_QUEUE0_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE0_DOORBELL 0x0692
+#define regSDMA1_QUEUE0_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE0_DOORBELL_LOG 0x06a9
+#define regSDMA1_QUEUE0_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE0_DOORBELL_OFFSET 0x06ab
+#define regSDMA1_QUEUE0_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE0_CSA_ADDR_LO 0x06ac
+#define regSDMA1_QUEUE0_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_CSA_ADDR_HI 0x06ad
+#define regSDMA1_QUEUE0_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_SCHEDULE_CNTL 0x06ae
+#define regSDMA1_QUEUE0_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_SUB_REMAIN 0x06af
+#define regSDMA1_QUEUE0_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE0_PREEMPT 0x06b0
+#define regSDMA1_QUEUE0_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE0_DUMMY_REG 0x06b1
+#define regSDMA1_QUEUE0_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI 0x06b2
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO 0x06b3
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_AQL_CNTL 0x06b4
+#define regSDMA1_QUEUE0_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_MINOR_PTR_UPDATE 0x06b5
+#define regSDMA1_QUEUE0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_PREEMPT 0x06b6
+#define regSDMA1_QUEUE0_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA0 0x06c0
+#define regSDMA1_QUEUE0_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA1 0x06c1
+#define regSDMA1_QUEUE0_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA2 0x06c2
+#define regSDMA1_QUEUE0_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA3 0x06c3
+#define regSDMA1_QUEUE0_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA4 0x06c4
+#define regSDMA1_QUEUE0_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA5 0x06c5
+#define regSDMA1_QUEUE0_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA6 0x06c6
+#define regSDMA1_QUEUE0_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA7 0x06c7
+#define regSDMA1_QUEUE0_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA8 0x06c8
+#define regSDMA1_QUEUE0_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA9 0x06c9
+#define regSDMA1_QUEUE0_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA10 0x06ca
+#define regSDMA1_QUEUE0_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_CNTL 0x06cb
+#define regSDMA1_QUEUE0_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_CNTL 0x06d8
+#define regSDMA1_QUEUE1_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_BASE 0x06d9
+#define regSDMA1_QUEUE1_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_BASE_HI 0x06da
+#define regSDMA1_QUEUE1_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR 0x06db
+#define regSDMA1_QUEUE1_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR_HI 0x06dc
+#define regSDMA1_QUEUE1_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR 0x06dd
+#define regSDMA1_QUEUE1_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR_HI 0x06de
+#define regSDMA1_QUEUE1_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_HI 0x06e0
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_LO 0x06e1
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_CNTL 0x06e2
+#define regSDMA1_QUEUE1_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_RPTR 0x06e3
+#define regSDMA1_QUEUE1_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_OFFSET 0x06e4
+#define regSDMA1_QUEUE1_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_BASE_LO 0x06e5
+#define regSDMA1_QUEUE1_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_BASE_HI 0x06e6
+#define regSDMA1_QUEUE1_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_SIZE 0x06e7
+#define regSDMA1_QUEUE1_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE1_SKIP_CNTL 0x06e8
+#define regSDMA1_QUEUE1_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_CONTEXT_STATUS 0x06e9
+#define regSDMA1_QUEUE1_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE1_DOORBELL 0x06ea
+#define regSDMA1_QUEUE1_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE1_DOORBELL_LOG 0x0701
+#define regSDMA1_QUEUE1_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE1_DOORBELL_OFFSET 0x0703
+#define regSDMA1_QUEUE1_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE1_CSA_ADDR_LO 0x0704
+#define regSDMA1_QUEUE1_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_CSA_ADDR_HI 0x0705
+#define regSDMA1_QUEUE1_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_SCHEDULE_CNTL 0x0706
+#define regSDMA1_QUEUE1_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_SUB_REMAIN 0x0707
+#define regSDMA1_QUEUE1_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE1_PREEMPT 0x0708
+#define regSDMA1_QUEUE1_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE1_DUMMY_REG 0x0709
+#define regSDMA1_QUEUE1_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI 0x070a
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO 0x070b
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_AQL_CNTL 0x070c
+#define regSDMA1_QUEUE1_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_MINOR_PTR_UPDATE 0x070d
+#define regSDMA1_QUEUE1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_PREEMPT 0x070e
+#define regSDMA1_QUEUE1_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA0 0x0718
+#define regSDMA1_QUEUE1_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA1 0x0719
+#define regSDMA1_QUEUE1_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA2 0x071a
+#define regSDMA1_QUEUE1_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA3 0x071b
+#define regSDMA1_QUEUE1_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA4 0x071c
+#define regSDMA1_QUEUE1_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA5 0x071d
+#define regSDMA1_QUEUE1_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA6 0x071e
+#define regSDMA1_QUEUE1_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA7 0x071f
+#define regSDMA1_QUEUE1_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA8 0x0720
+#define regSDMA1_QUEUE1_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA9 0x0721
+#define regSDMA1_QUEUE1_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA10 0x0722
+#define regSDMA1_QUEUE1_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_CNTL 0x0723
+#define regSDMA1_QUEUE1_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_CNTL 0x0730
+#define regSDMA1_QUEUE2_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_BASE 0x0731
+#define regSDMA1_QUEUE2_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_BASE_HI 0x0732
+#define regSDMA1_QUEUE2_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR 0x0733
+#define regSDMA1_QUEUE2_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR_HI 0x0734
+#define regSDMA1_QUEUE2_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR 0x0735
+#define regSDMA1_QUEUE2_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR_HI 0x0736
+#define regSDMA1_QUEUE2_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_HI 0x0738
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_LO 0x0739
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_CNTL 0x073a
+#define regSDMA1_QUEUE2_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_RPTR 0x073b
+#define regSDMA1_QUEUE2_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_OFFSET 0x073c
+#define regSDMA1_QUEUE2_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_BASE_LO 0x073d
+#define regSDMA1_QUEUE2_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_BASE_HI 0x073e
+#define regSDMA1_QUEUE2_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_SIZE 0x073f
+#define regSDMA1_QUEUE2_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE2_SKIP_CNTL 0x0740
+#define regSDMA1_QUEUE2_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_CONTEXT_STATUS 0x0741
+#define regSDMA1_QUEUE2_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE2_DOORBELL 0x0742
+#define regSDMA1_QUEUE2_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE2_DOORBELL_LOG 0x0759
+#define regSDMA1_QUEUE2_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE2_DOORBELL_OFFSET 0x075b
+#define regSDMA1_QUEUE2_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE2_CSA_ADDR_LO 0x075c
+#define regSDMA1_QUEUE2_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_CSA_ADDR_HI 0x075d
+#define regSDMA1_QUEUE2_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_SCHEDULE_CNTL 0x075e
+#define regSDMA1_QUEUE2_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_SUB_REMAIN 0x075f
+#define regSDMA1_QUEUE2_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE2_PREEMPT 0x0760
+#define regSDMA1_QUEUE2_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE2_DUMMY_REG 0x0761
+#define regSDMA1_QUEUE2_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI 0x0762
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO 0x0763
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_AQL_CNTL 0x0764
+#define regSDMA1_QUEUE2_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_MINOR_PTR_UPDATE 0x0765
+#define regSDMA1_QUEUE2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_PREEMPT 0x0766
+#define regSDMA1_QUEUE2_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA0 0x0770
+#define regSDMA1_QUEUE2_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA1 0x0771
+#define regSDMA1_QUEUE2_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA2 0x0772
+#define regSDMA1_QUEUE2_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA3 0x0773
+#define regSDMA1_QUEUE2_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA4 0x0774
+#define regSDMA1_QUEUE2_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA5 0x0775
+#define regSDMA1_QUEUE2_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA6 0x0776
+#define regSDMA1_QUEUE2_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA7 0x0777
+#define regSDMA1_QUEUE2_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA8 0x0778
+#define regSDMA1_QUEUE2_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA9 0x0779
+#define regSDMA1_QUEUE2_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA10 0x077a
+#define regSDMA1_QUEUE2_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_CNTL 0x077b
+#define regSDMA1_QUEUE2_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_CNTL 0x0788
+#define regSDMA1_QUEUE3_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_BASE 0x0789
+#define regSDMA1_QUEUE3_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_BASE_HI 0x078a
+#define regSDMA1_QUEUE3_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR 0x078b
+#define regSDMA1_QUEUE3_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR_HI 0x078c
+#define regSDMA1_QUEUE3_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR 0x078d
+#define regSDMA1_QUEUE3_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR_HI 0x078e
+#define regSDMA1_QUEUE3_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_HI 0x0790
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_LO 0x0791
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_CNTL 0x0792
+#define regSDMA1_QUEUE3_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_RPTR 0x0793
+#define regSDMA1_QUEUE3_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_OFFSET 0x0794
+#define regSDMA1_QUEUE3_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_BASE_LO 0x0795
+#define regSDMA1_QUEUE3_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_BASE_HI 0x0796
+#define regSDMA1_QUEUE3_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_SIZE 0x0797
+#define regSDMA1_QUEUE3_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE3_SKIP_CNTL 0x0798
+#define regSDMA1_QUEUE3_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_CONTEXT_STATUS 0x0799
+#define regSDMA1_QUEUE3_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE3_DOORBELL 0x079a
+#define regSDMA1_QUEUE3_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE3_DOORBELL_LOG 0x07b1
+#define regSDMA1_QUEUE3_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE3_DOORBELL_OFFSET 0x07b3
+#define regSDMA1_QUEUE3_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE3_CSA_ADDR_LO 0x07b4
+#define regSDMA1_QUEUE3_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_CSA_ADDR_HI 0x07b5
+#define regSDMA1_QUEUE3_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_SCHEDULE_CNTL 0x07b6
+#define regSDMA1_QUEUE3_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_SUB_REMAIN 0x07b7
+#define regSDMA1_QUEUE3_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE3_PREEMPT 0x07b8
+#define regSDMA1_QUEUE3_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE3_DUMMY_REG 0x07b9
+#define regSDMA1_QUEUE3_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI 0x07ba
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO 0x07bb
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_AQL_CNTL 0x07bc
+#define regSDMA1_QUEUE3_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_MINOR_PTR_UPDATE 0x07bd
+#define regSDMA1_QUEUE3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_PREEMPT 0x07be
+#define regSDMA1_QUEUE3_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA0 0x07c8
+#define regSDMA1_QUEUE3_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA1 0x07c9
+#define regSDMA1_QUEUE3_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA2 0x07ca
+#define regSDMA1_QUEUE3_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA3 0x07cb
+#define regSDMA1_QUEUE3_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA4 0x07cc
+#define regSDMA1_QUEUE3_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA5 0x07cd
+#define regSDMA1_QUEUE3_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA6 0x07ce
+#define regSDMA1_QUEUE3_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA7 0x07cf
+#define regSDMA1_QUEUE3_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA8 0x07d0
+#define regSDMA1_QUEUE3_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA9 0x07d1
+#define regSDMA1_QUEUE3_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA10 0x07d2
+#define regSDMA1_QUEUE3_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_CNTL 0x07d3
+#define regSDMA1_QUEUE3_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_CNTL 0x07e0
+#define regSDMA1_QUEUE4_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_BASE 0x07e1
+#define regSDMA1_QUEUE4_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_BASE_HI 0x07e2
+#define regSDMA1_QUEUE4_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR 0x07e3
+#define regSDMA1_QUEUE4_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR_HI 0x07e4
+#define regSDMA1_QUEUE4_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR 0x07e5
+#define regSDMA1_QUEUE4_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR_HI 0x07e6
+#define regSDMA1_QUEUE4_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_HI 0x07e8
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_LO 0x07e9
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_CNTL 0x07ea
+#define regSDMA1_QUEUE4_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_RPTR 0x07eb
+#define regSDMA1_QUEUE4_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_OFFSET 0x07ec
+#define regSDMA1_QUEUE4_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_BASE_LO 0x07ed
+#define regSDMA1_QUEUE4_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_BASE_HI 0x07ee
+#define regSDMA1_QUEUE4_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_SIZE 0x07ef
+#define regSDMA1_QUEUE4_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE4_SKIP_CNTL 0x07f0
+#define regSDMA1_QUEUE4_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_CONTEXT_STATUS 0x07f1
+#define regSDMA1_QUEUE4_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE4_DOORBELL 0x07f2
+#define regSDMA1_QUEUE4_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE4_DOORBELL_LOG 0x0809
+#define regSDMA1_QUEUE4_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE4_DOORBELL_OFFSET 0x080b
+#define regSDMA1_QUEUE4_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE4_CSA_ADDR_LO 0x080c
+#define regSDMA1_QUEUE4_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_CSA_ADDR_HI 0x080d
+#define regSDMA1_QUEUE4_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_SCHEDULE_CNTL 0x080e
+#define regSDMA1_QUEUE4_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_SUB_REMAIN 0x080f
+#define regSDMA1_QUEUE4_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE4_PREEMPT 0x0810
+#define regSDMA1_QUEUE4_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE4_DUMMY_REG 0x0811
+#define regSDMA1_QUEUE4_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI 0x0812
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO 0x0813
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_AQL_CNTL 0x0814
+#define regSDMA1_QUEUE4_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_MINOR_PTR_UPDATE 0x0815
+#define regSDMA1_QUEUE4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_PREEMPT 0x0816
+#define regSDMA1_QUEUE4_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA0 0x0820
+#define regSDMA1_QUEUE4_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA1 0x0821
+#define regSDMA1_QUEUE4_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA2 0x0822
+#define regSDMA1_QUEUE4_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA3 0x0823
+#define regSDMA1_QUEUE4_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA4 0x0824
+#define regSDMA1_QUEUE4_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA5 0x0825
+#define regSDMA1_QUEUE4_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA6 0x0826
+#define regSDMA1_QUEUE4_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA7 0x0827
+#define regSDMA1_QUEUE4_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA8 0x0828
+#define regSDMA1_QUEUE4_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA9 0x0829
+#define regSDMA1_QUEUE4_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA10 0x082a
+#define regSDMA1_QUEUE4_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_CNTL 0x082b
+#define regSDMA1_QUEUE4_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_CNTL 0x0838
+#define regSDMA1_QUEUE5_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_BASE 0x0839
+#define regSDMA1_QUEUE5_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_BASE_HI 0x083a
+#define regSDMA1_QUEUE5_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR 0x083b
+#define regSDMA1_QUEUE5_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR_HI 0x083c
+#define regSDMA1_QUEUE5_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR 0x083d
+#define regSDMA1_QUEUE5_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR_HI 0x083e
+#define regSDMA1_QUEUE5_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_HI 0x0840
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_LO 0x0841
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_CNTL 0x0842
+#define regSDMA1_QUEUE5_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_RPTR 0x0843
+#define regSDMA1_QUEUE5_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_OFFSET 0x0844
+#define regSDMA1_QUEUE5_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_BASE_LO 0x0845
+#define regSDMA1_QUEUE5_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_BASE_HI 0x0846
+#define regSDMA1_QUEUE5_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_SIZE 0x0847
+#define regSDMA1_QUEUE5_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE5_SKIP_CNTL 0x0848
+#define regSDMA1_QUEUE5_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_CONTEXT_STATUS 0x0849
+#define regSDMA1_QUEUE5_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE5_DOORBELL 0x084a
+#define regSDMA1_QUEUE5_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE5_DOORBELL_LOG 0x0861
+#define regSDMA1_QUEUE5_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE5_DOORBELL_OFFSET 0x0863
+#define regSDMA1_QUEUE5_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE5_CSA_ADDR_LO 0x0864
+#define regSDMA1_QUEUE5_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_CSA_ADDR_HI 0x0865
+#define regSDMA1_QUEUE5_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_SCHEDULE_CNTL 0x0866
+#define regSDMA1_QUEUE5_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_SUB_REMAIN 0x0867
+#define regSDMA1_QUEUE5_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE5_PREEMPT 0x0868
+#define regSDMA1_QUEUE5_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE5_DUMMY_REG 0x0869
+#define regSDMA1_QUEUE5_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI 0x086a
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO 0x086b
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_AQL_CNTL 0x086c
+#define regSDMA1_QUEUE5_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_MINOR_PTR_UPDATE 0x086d
+#define regSDMA1_QUEUE5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_PREEMPT 0x086e
+#define regSDMA1_QUEUE5_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA0 0x0878
+#define regSDMA1_QUEUE5_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA1 0x0879
+#define regSDMA1_QUEUE5_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA2 0x087a
+#define regSDMA1_QUEUE5_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA3 0x087b
+#define regSDMA1_QUEUE5_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA4 0x087c
+#define regSDMA1_QUEUE5_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA5 0x087d
+#define regSDMA1_QUEUE5_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA6 0x087e
+#define regSDMA1_QUEUE5_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA7 0x087f
+#define regSDMA1_QUEUE5_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA8 0x0880
+#define regSDMA1_QUEUE5_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA9 0x0881
+#define regSDMA1_QUEUE5_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA10 0x0882
+#define regSDMA1_QUEUE5_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_CNTL 0x0883
+#define regSDMA1_QUEUE5_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_CNTL 0x0890
+#define regSDMA1_QUEUE6_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_BASE 0x0891
+#define regSDMA1_QUEUE6_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_BASE_HI 0x0892
+#define regSDMA1_QUEUE6_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR 0x0893
+#define regSDMA1_QUEUE6_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR_HI 0x0894
+#define regSDMA1_QUEUE6_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR 0x0895
+#define regSDMA1_QUEUE6_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR_HI 0x0896
+#define regSDMA1_QUEUE6_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_HI 0x0898
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_LO 0x0899
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_CNTL 0x089a
+#define regSDMA1_QUEUE6_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_RPTR 0x089b
+#define regSDMA1_QUEUE6_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_OFFSET 0x089c
+#define regSDMA1_QUEUE6_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_BASE_LO 0x089d
+#define regSDMA1_QUEUE6_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_BASE_HI 0x089e
+#define regSDMA1_QUEUE6_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_SIZE 0x089f
+#define regSDMA1_QUEUE6_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE6_SKIP_CNTL 0x08a0
+#define regSDMA1_QUEUE6_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_CONTEXT_STATUS 0x08a1
+#define regSDMA1_QUEUE6_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE6_DOORBELL 0x08a2
+#define regSDMA1_QUEUE6_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE6_DOORBELL_LOG 0x08b9
+#define regSDMA1_QUEUE6_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE6_DOORBELL_OFFSET 0x08bb
+#define regSDMA1_QUEUE6_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE6_CSA_ADDR_LO 0x08bc
+#define regSDMA1_QUEUE6_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_CSA_ADDR_HI 0x08bd
+#define regSDMA1_QUEUE6_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_SCHEDULE_CNTL 0x08be
+#define regSDMA1_QUEUE6_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_SUB_REMAIN 0x08bf
+#define regSDMA1_QUEUE6_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE6_PREEMPT 0x08c0
+#define regSDMA1_QUEUE6_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE6_DUMMY_REG 0x08c1
+#define regSDMA1_QUEUE6_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI 0x08c2
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO 0x08c3
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_AQL_CNTL 0x08c4
+#define regSDMA1_QUEUE6_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_MINOR_PTR_UPDATE 0x08c5
+#define regSDMA1_QUEUE6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_PREEMPT 0x08c6
+#define regSDMA1_QUEUE6_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA0 0x08d0
+#define regSDMA1_QUEUE6_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA1 0x08d1
+#define regSDMA1_QUEUE6_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA2 0x08d2
+#define regSDMA1_QUEUE6_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA3 0x08d3
+#define regSDMA1_QUEUE6_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA4 0x08d4
+#define regSDMA1_QUEUE6_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA5 0x08d5
+#define regSDMA1_QUEUE6_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA6 0x08d6
+#define regSDMA1_QUEUE6_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA7 0x08d7
+#define regSDMA1_QUEUE6_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA8 0x08d8
+#define regSDMA1_QUEUE6_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA9 0x08d9
+#define regSDMA1_QUEUE6_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA10 0x08da
+#define regSDMA1_QUEUE6_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_CNTL 0x08db
+#define regSDMA1_QUEUE6_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_CNTL 0x08e8
+#define regSDMA1_QUEUE7_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_BASE 0x08e9
+#define regSDMA1_QUEUE7_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_BASE_HI 0x08ea
+#define regSDMA1_QUEUE7_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR 0x08eb
+#define regSDMA1_QUEUE7_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR_HI 0x08ec
+#define regSDMA1_QUEUE7_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR 0x08ed
+#define regSDMA1_QUEUE7_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR_HI 0x08ee
+#define regSDMA1_QUEUE7_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_HI 0x08f0
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_LO 0x08f1
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_CNTL 0x08f2
+#define regSDMA1_QUEUE7_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_RPTR 0x08f3
+#define regSDMA1_QUEUE7_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_OFFSET 0x08f4
+#define regSDMA1_QUEUE7_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_BASE_LO 0x08f5
+#define regSDMA1_QUEUE7_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_BASE_HI 0x08f6
+#define regSDMA1_QUEUE7_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_SIZE 0x08f7
+#define regSDMA1_QUEUE7_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE7_SKIP_CNTL 0x08f8
+#define regSDMA1_QUEUE7_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_CONTEXT_STATUS 0x08f9
+#define regSDMA1_QUEUE7_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE7_DOORBELL 0x08fa
+#define regSDMA1_QUEUE7_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE7_DOORBELL_LOG 0x0911
+#define regSDMA1_QUEUE7_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE7_DOORBELL_OFFSET 0x0913
+#define regSDMA1_QUEUE7_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE7_CSA_ADDR_LO 0x0914
+#define regSDMA1_QUEUE7_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_CSA_ADDR_HI 0x0915
+#define regSDMA1_QUEUE7_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_SCHEDULE_CNTL 0x0916
+#define regSDMA1_QUEUE7_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_SUB_REMAIN 0x0917
+#define regSDMA1_QUEUE7_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE7_PREEMPT 0x0918
+#define regSDMA1_QUEUE7_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE7_DUMMY_REG 0x0919
+#define regSDMA1_QUEUE7_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI 0x091a
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO 0x091b
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_AQL_CNTL 0x091c
+#define regSDMA1_QUEUE7_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_MINOR_PTR_UPDATE 0x091d
+#define regSDMA1_QUEUE7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_PREEMPT 0x091e
+#define regSDMA1_QUEUE7_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA0 0x0928
+#define regSDMA1_QUEUE7_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA1 0x0929
+#define regSDMA1_QUEUE7_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA2 0x092a
+#define regSDMA1_QUEUE7_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA3 0x092b
+#define regSDMA1_QUEUE7_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA4 0x092c
+#define regSDMA1_QUEUE7_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA5 0x092d
+#define regSDMA1_QUEUE7_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA6 0x092e
+#define regSDMA1_QUEUE7_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA7 0x092f
+#define regSDMA1_QUEUE7_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA8 0x0930
+#define regSDMA1_QUEUE7_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA9 0x0931
+#define regSDMA1_QUEUE7_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA10 0x0932
+#define regSDMA1_QUEUE7_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_CNTL 0x0933
+#define regSDMA1_QUEUE7_MIDCMD_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_sdma0_sdma0hypdec
+// base address: 0x3e200
+#define regSDMA0_UCODE_ADDR 0x5880
+#define regSDMA0_UCODE_ADDR_BASE_IDX 1
+#define regSDMA0_UCODE_DATA 0x5881
+#define regSDMA0_UCODE_DATA_BASE_IDX 1
+#define regSDMA0_UCODE_SELFLOAD_CONTROL 0x5882
+#define regSDMA0_UCODE_SELFLOAD_CONTROL_BASE_IDX 1
+#define regSDMA0_BROADCAST_UCODE_ADDR 0x5886
+#define regSDMA0_BROADCAST_UCODE_ADDR_BASE_IDX 1
+#define regSDMA0_BROADCAST_UCODE_DATA 0x5887
+#define regSDMA0_BROADCAST_UCODE_DATA_BASE_IDX 1
+#define regSDMA0_VM_CTX_LO 0x588c
+#define regSDMA0_VM_CTX_LO_BASE_IDX 1
+#define regSDMA0_VM_CTX_HI 0x588d
+#define regSDMA0_VM_CTX_HI_BASE_IDX 1
+#define regSDMA0_ACTIVE_FCN_ID 0x588e
+#define regSDMA0_ACTIVE_FCN_ID_BASE_IDX 1
+#define regSDMA0_VM_CTX_CNTL 0x588f
+#define regSDMA0_VM_CTX_CNTL_BASE_IDX 1
+#define regSDMA0_VIRT_RESET_REQ 0x5890
+#define regSDMA0_VIRT_RESET_REQ_BASE_IDX 1
+#define regSDMA0_CONTEXT_REG_TYPE0 0x5891
+#define regSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define regSDMA0_CONTEXT_REG_TYPE1 0x5892
+#define regSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define regSDMA0_CONTEXT_REG_TYPE2 0x5893
+#define regSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE0 0x5894
+#define regSDMA0_PUB_REG_TYPE0_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE1 0x5895
+#define regSDMA0_PUB_REG_TYPE1_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE2 0x5896
+#define regSDMA0_PUB_REG_TYPE2_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE3 0x5897
+#define regSDMA0_PUB_REG_TYPE3_BASE_IDX 1
+#define regSDMA0_VM_CNTL 0x5899
+#define regSDMA0_VM_CNTL_BASE_IDX 1
+#define regSDMA0_F32_CNTL 0x589a
+#define regSDMA0_F32_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma1hypdec
+// base address: 0x3e280
+#define regSDMA1_UCODE_ADDR 0x58a0
+#define regSDMA1_UCODE_ADDR_BASE_IDX 1
+#define regSDMA1_UCODE_DATA 0x58a1
+#define regSDMA1_UCODE_DATA_BASE_IDX 1
+#define regSDMA1_UCODE_SELFLOAD_CONTROL 0x58a2
+#define regSDMA1_UCODE_SELFLOAD_CONTROL_BASE_IDX 1
+#define regSDMA1_BROADCAST_UCODE_ADDR 0x58a6
+#define regSDMA1_BROADCAST_UCODE_ADDR_BASE_IDX 1
+#define regSDMA1_BROADCAST_UCODE_DATA 0x58a7
+#define regSDMA1_BROADCAST_UCODE_DATA_BASE_IDX 1
+#define regSDMA1_VM_CTX_LO 0x58ac
+#define regSDMA1_VM_CTX_LO_BASE_IDX 1
+#define regSDMA1_VM_CTX_HI 0x58ad
+#define regSDMA1_VM_CTX_HI_BASE_IDX 1
+#define regSDMA1_ACTIVE_FCN_ID 0x58ae
+#define regSDMA1_ACTIVE_FCN_ID_BASE_IDX 1
+#define regSDMA1_VM_CTX_CNTL 0x58af
+#define regSDMA1_VM_CTX_CNTL_BASE_IDX 1
+#define regSDMA1_VIRT_RESET_REQ 0x58b0
+#define regSDMA1_VIRT_RESET_REQ_BASE_IDX 1
+#define regSDMA1_CONTEXT_REG_TYPE0 0x58b1
+#define regSDMA1_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define regSDMA1_CONTEXT_REG_TYPE1 0x58b2
+#define regSDMA1_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define regSDMA1_CONTEXT_REG_TYPE2 0x58b3
+#define regSDMA1_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE0 0x58b4
+#define regSDMA1_PUB_REG_TYPE0_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE1 0x58b5
+#define regSDMA1_PUB_REG_TYPE1_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE2 0x58b6
+#define regSDMA1_PUB_REG_TYPE2_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE3 0x58b7
+#define regSDMA1_PUB_REG_TYPE3_BASE_IDX 1
+#define regSDMA1_VM_CNTL 0x58b9
+#define regSDMA1_VM_CNTL_BASE_IDX 1
+#define regSDMA1_F32_CNTL 0x58ba
+#define regSDMA1_F32_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma0perfsdec
+// base address: 0x37880
+#define regSDMA0_PERFCNT_PERFCOUNTER0_CFG 0x3e20
+#define regSDMA0_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regSDMA0_PERFCNT_PERFCOUNTER1_CFG 0x3e21
+#define regSDMA0_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regSDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x3e22
+#define regSDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regSDMA0_PERFCNT_MISC_CNTL 0x3e23
+#define regSDMA0_PERFCNT_MISC_CNTL_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_SELECT 0x3e24
+#define regSDMA0_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_SELECT1 0x3e25
+#define regSDMA0_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_SELECT 0x3e26
+#define regSDMA0_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_SELECT1 0x3e27
+#define regSDMA0_PERFCOUNTER1_SELECT1_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma1perfsdec
+// base address: 0x378b0
+#define regSDMA1_PERFCNT_PERFCOUNTER0_CFG 0x3e2c
+#define regSDMA1_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regSDMA1_PERFCNT_PERFCOUNTER1_CFG 0x3e2d
+#define regSDMA1_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regSDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x3e2e
+#define regSDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regSDMA1_PERFCNT_MISC_CNTL 0x3e2f
+#define regSDMA1_PERFCNT_MISC_CNTL_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_SELECT 0x3e30
+#define regSDMA1_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_SELECT1 0x3e31
+#define regSDMA1_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_SELECT 0x3e32
+#define regSDMA1_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_SELECT1 0x3e33
+#define regSDMA1_PERFCOUNTER1_SELECT1_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma0perfddec
+// base address: 0x35980
+#define regSDMA0_PERFCNT_PERFCOUNTER_LO 0x3660
+#define regSDMA0_PERFCNT_PERFCOUNTER_LO_BASE_IDX 1
+#define regSDMA0_PERFCNT_PERFCOUNTER_HI 0x3661
+#define regSDMA0_PERFCNT_PERFCOUNTER_HI_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_LO 0x3662
+#define regSDMA0_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_HI 0x3663
+#define regSDMA0_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_LO 0x3664
+#define regSDMA0_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_HI 0x3665
+#define regSDMA0_PERFCOUNTER1_HI_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma1perfddec
+// base address: 0x359b0
+#define regSDMA1_PERFCNT_PERFCOUNTER_LO 0x366c
+#define regSDMA1_PERFCNT_PERFCOUNTER_LO_BASE_IDX 1
+#define regSDMA1_PERFCNT_PERFCOUNTER_HI 0x366d
+#define regSDMA1_PERFCNT_PERFCOUNTER_HI_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_LO 0x366e
+#define regSDMA1_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_HI 0x366f
+#define regSDMA1_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_LO 0x3670
+#define regSDMA1_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_HI 0x3671
+#define regSDMA1_PERFCOUNTER1_HI_BASE_IDX 1
+
+
+// addressBlock: gc_grbmdec
+// base address: 0x8000
+#define regGRBM_CNTL 0x0da0
+#define regGRBM_CNTL_BASE_IDX 0
+#define regGRBM_SKEW_CNTL 0x0da1
+#define regGRBM_SKEW_CNTL_BASE_IDX 0
+#define regGRBM_STATUS2 0x0da2
+#define regGRBM_STATUS2_BASE_IDX 0
+#define regGRBM_PWR_CNTL 0x0da3
+#define regGRBM_PWR_CNTL_BASE_IDX 0
+#define regGRBM_STATUS 0x0da4
+#define regGRBM_STATUS_BASE_IDX 0
+#define regGRBM_STATUS_SE0 0x0da5
+#define regGRBM_STATUS_SE0_BASE_IDX 0
+#define regGRBM_STATUS_SE1 0x0da6
+#define regGRBM_STATUS_SE1_BASE_IDX 0
+#define regGRBM_STATUS3 0x0da7
+#define regGRBM_STATUS3_BASE_IDX 0
+#define regGRBM_SOFT_RESET 0x0da8
+#define regGRBM_SOFT_RESET_BASE_IDX 0
+#define regGRBM_GFX_CLKEN_CNTL 0x0dac
+#define regGRBM_GFX_CLKEN_CNTL_BASE_IDX 0
+#define regGRBM_WAIT_IDLE_CLOCKS 0x0dad
+#define regGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0
+#define regGRBM_STATUS_SE2 0x0dae
+#define regGRBM_STATUS_SE2_BASE_IDX 0
+#define regGRBM_READ_ERROR 0x0db6
+#define regGRBM_READ_ERROR_BASE_IDX 0
+#define regGRBM_READ_ERROR2 0x0db7
+#define regGRBM_READ_ERROR2_BASE_IDX 0
+#define regGRBM_INT_CNTL 0x0db8
+#define regGRBM_INT_CNTL_BASE_IDX 0
+#define regGRBM_TRAP_OP 0x0db9
+#define regGRBM_TRAP_OP_BASE_IDX 0
+#define regGRBM_TRAP_ADDR 0x0dba
+#define regGRBM_TRAP_ADDR_BASE_IDX 0
+#define regGRBM_TRAP_ADDR_MSK 0x0dbb
+#define regGRBM_TRAP_ADDR_MSK_BASE_IDX 0
+#define regGRBM_TRAP_WD 0x0dbc
+#define regGRBM_TRAP_WD_BASE_IDX 0
+#define regGRBM_TRAP_WD_MSK 0x0dbd
+#define regGRBM_TRAP_WD_MSK_BASE_IDX 0
+#define regGRBM_DSM_BYPASS 0x0dbe
+#define regGRBM_DSM_BYPASS_BASE_IDX 0
+#define regGRBM_WRITE_ERROR 0x0dbf
+#define regGRBM_WRITE_ERROR_BASE_IDX 0
+#define regGRBM_CHIP_REVISION 0x0dc1
+#define regGRBM_CHIP_REVISION_BASE_IDX 0
+#define regGRBM_RSMU_CFG 0x0dc3
+#define regGRBM_RSMU_CFG_BASE_IDX 0
+#define regGRBM_IH_CREDIT 0x0dc4
+#define regGRBM_IH_CREDIT_BASE_IDX 0
+#define regGRBM_PWR_CNTL2 0x0dc5
+#define regGRBM_PWR_CNTL2_BASE_IDX 0
+#define regGRBM_UTCL2_INVAL_RANGE_START 0x0dc6
+#define regGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0
+#define regGRBM_UTCL2_INVAL_RANGE_END 0x0dc7
+#define regGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0
+#define regGRBM_RSMU_READ_ERROR 0x0dc8
+#define regGRBM_RSMU_READ_ERROR_BASE_IDX 0
+#define regGRBM_INVALID_PIPE 0x0dc9
+#define regGRBM_INVALID_PIPE_BASE_IDX 0
+#define regGRBM_FENCE_RANGE0 0x0dca
+#define regGRBM_FENCE_RANGE0_BASE_IDX 0
+#define regGRBM_FENCE_RANGE1 0x0dcb
+#define regGRBM_FENCE_RANGE1_BASE_IDX 0
+#define regGRBM_SCRATCH_REG0 0x0de0
+#define regGRBM_SCRATCH_REG0_BASE_IDX 0
+#define regGRBM_SCRATCH_REG1 0x0de1
+#define regGRBM_SCRATCH_REG1_BASE_IDX 0
+#define regGRBM_SCRATCH_REG2 0x0de2
+#define regGRBM_SCRATCH_REG2_BASE_IDX 0
+#define regGRBM_SCRATCH_REG3 0x0de3
+#define regGRBM_SCRATCH_REG3_BASE_IDX 0
+#define regGRBM_SCRATCH_REG4 0x0de4
+#define regGRBM_SCRATCH_REG4_BASE_IDX 0
+#define regGRBM_SCRATCH_REG5 0x0de5
+#define regGRBM_SCRATCH_REG5_BASE_IDX 0
+#define regGRBM_SCRATCH_REG6 0x0de6
+#define regGRBM_SCRATCH_REG6_BASE_IDX 0
+#define regGRBM_SCRATCH_REG7 0x0de7
+#define regGRBM_SCRATCH_REG7_BASE_IDX 0
+#define regVIOLATION_DATA_ASYNC_VF_PROG 0x0df1
+#define regVIOLATION_DATA_ASYNC_VF_PROG_BASE_IDX 0
+
+
+// addressBlock: gc_cpdec
+// base address: 0x8200
+#define regCP_CPC_DEBUG_CNTL 0x0e20
+#define regCP_CPC_DEBUG_CNTL_BASE_IDX 0
+#define regCP_CPF_DEBUG_CNTL 0x0e22
+#define regCP_CPF_DEBUG_CNTL_BASE_IDX 0
+#define regCP_CPC_STATUS 0x0e24
+#define regCP_CPC_STATUS_BASE_IDX 0
+#define regCP_CPC_BUSY_STAT 0x0e25
+#define regCP_CPC_BUSY_STAT_BASE_IDX 0
+#define regCP_CPC_STALLED_STAT1 0x0e26
+#define regCP_CPC_STALLED_STAT1_BASE_IDX 0
+#define regCP_CPF_STATUS 0x0e27
+#define regCP_CPF_STATUS_BASE_IDX 0
+#define regCP_CPF_BUSY_STAT 0x0e28
+#define regCP_CPF_BUSY_STAT_BASE_IDX 0
+#define regCP_CPF_STALLED_STAT1 0x0e29
+#define regCP_CPF_STALLED_STAT1_BASE_IDX 0
+#define regCP_CPC_BUSY_STAT2 0x0e2a
+#define regCP_CPC_BUSY_STAT2_BASE_IDX 0
+#define regCP_CPC_GRBM_FREE_COUNT 0x0e2b
+#define regCP_CPC_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_CPC_PRIV_VIOLATION_ADDR 0x0e2c
+#define regCP_CPC_PRIV_VIOLATION_ADDR_BASE_IDX 0
+#define regCP_MEC_ME1_HEADER_DUMP 0x0e2e
+#define regCP_MEC_ME1_HEADER_DUMP_BASE_IDX 0
+#define regCP_MEC_ME2_HEADER_DUMP 0x0e2f
+#define regCP_MEC_ME2_HEADER_DUMP_BASE_IDX 0
+#define regCP_CPC_SCRATCH_INDEX 0x0e30
+#define regCP_CPC_SCRATCH_INDEX_BASE_IDX 0
+#define regCP_CPC_SCRATCH_DATA 0x0e31
+#define regCP_CPC_SCRATCH_DATA_BASE_IDX 0
+#define regCP_CPF_GRBM_FREE_COUNT 0x0e32
+#define regCP_CPF_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_CPF_BUSY_STAT2 0x0e33
+#define regCP_CPF_BUSY_STAT2_BASE_IDX 0
+#define regCP_CPC_HALT_HYST_COUNT 0x0e47
+#define regCP_CPC_HALT_HYST_COUNT_BASE_IDX 0
+#define regCP_STALLED_STAT3 0x0f3c
+#define regCP_STALLED_STAT3_BASE_IDX 0
+#define regCP_STALLED_STAT1 0x0f3d
+#define regCP_STALLED_STAT1_BASE_IDX 0
+#define regCP_STALLED_STAT2 0x0f3e
+#define regCP_STALLED_STAT2_BASE_IDX 0
+#define regCP_BUSY_STAT 0x0f3f
+#define regCP_BUSY_STAT_BASE_IDX 0
+#define regCP_STAT 0x0f40
+#define regCP_STAT_BASE_IDX 0
+#define regCP_ME_HEADER_DUMP 0x0f41
+#define regCP_ME_HEADER_DUMP_BASE_IDX 0
+#define regCP_PFP_HEADER_DUMP 0x0f42
+#define regCP_PFP_HEADER_DUMP_BASE_IDX 0
+#define regCP_GRBM_FREE_COUNT 0x0f43
+#define regCP_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_PFP_INSTR_PNTR 0x0f45
+#define regCP_PFP_INSTR_PNTR_BASE_IDX 0
+#define regCP_ME_INSTR_PNTR 0x0f46
+#define regCP_ME_INSTR_PNTR_BASE_IDX 0
+#define regCP_MEC1_INSTR_PNTR 0x0f48
+#define regCP_MEC1_INSTR_PNTR_BASE_IDX 0
+#define regCP_MEC2_INSTR_PNTR 0x0f49
+#define regCP_MEC2_INSTR_PNTR_BASE_IDX 0
+#define regCP_CSF_STAT 0x0f54
+#define regCP_CSF_STAT_BASE_IDX 0
+#define regCP_CNTX_STAT 0x0f58
+#define regCP_CNTX_STAT_BASE_IDX 0
+#define regCP_ME_PREEMPTION 0x0f59
+#define regCP_ME_PREEMPTION_BASE_IDX 0
+#define regCP_RB1_RPTR 0x0f5f
+#define regCP_RB1_RPTR_BASE_IDX 0
+#define regCP_RB0_RPTR 0x0f60
+#define regCP_RB0_RPTR_BASE_IDX 0
+#define regCP_RB_RPTR 0x0f60
+#define regCP_RB_RPTR_BASE_IDX 0
+#define regCP_RB_WPTR_DELAY 0x0f61
+#define regCP_RB_WPTR_DELAY_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_CNTL 0x0f62
+#define regCP_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regCP_ROQ1_THRESHOLDS 0x0f75
+#define regCP_ROQ1_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ2_THRESHOLDS 0x0f76
+#define regCP_ROQ2_THRESHOLDS_BASE_IDX 0
+#define regCP_STQ_THRESHOLDS 0x0f77
+#define regCP_STQ_THRESHOLDS_BASE_IDX 0
+#define regCP_MEQ_THRESHOLDS 0x0f79
+#define regCP_MEQ_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ_AVAIL 0x0f7a
+#define regCP_ROQ_AVAIL_BASE_IDX 0
+#define regCP_STQ_AVAIL 0x0f7b
+#define regCP_STQ_AVAIL_BASE_IDX 0
+#define regCP_ROQ2_AVAIL 0x0f7c
+#define regCP_ROQ2_AVAIL_BASE_IDX 0
+#define regCP_MEQ_AVAIL 0x0f7d
+#define regCP_MEQ_AVAIL_BASE_IDX 0
+#define regCP_CMD_INDEX 0x0f7e
+#define regCP_CMD_INDEX_BASE_IDX 0
+#define regCP_CMD_DATA 0x0f7f
+#define regCP_CMD_DATA_BASE_IDX 0
+#define regCP_ROQ_RB_STAT 0x0f80
+#define regCP_ROQ_RB_STAT_BASE_IDX 0
+#define regCP_ROQ_IB1_STAT 0x0f81
+#define regCP_ROQ_IB1_STAT_BASE_IDX 0
+#define regCP_ROQ_IB2_STAT 0x0f82
+#define regCP_ROQ_IB2_STAT_BASE_IDX 0
+#define regCP_STQ_STAT 0x0f83
+#define regCP_STQ_STAT_BASE_IDX 0
+#define regCP_STQ_WR_STAT 0x0f84
+#define regCP_STQ_WR_STAT_BASE_IDX 0
+#define regCP_MEQ_STAT 0x0f85
+#define regCP_MEQ_STAT_BASE_IDX 0
+#define regCP_ROQ3_THRESHOLDS 0x0f8c
+#define regCP_ROQ3_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ_DB_STAT 0x0f8d
+#define regCP_ROQ_DB_STAT_BASE_IDX 0
+#define regCP_INT_STAT_DEBUG 0x0f97
+#define regCP_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_DEBUG_CNTL 0x0f98
+#define regCP_DEBUG_CNTL_BASE_IDX 0
+#define regCP_PRIV_VIOLATION_ADDR 0x0f9a
+#define regCP_PRIV_VIOLATION_ADDR_BASE_IDX 0
+
+
+// addressBlock: gc_padec
+// base address: 0x8800
+#define regVGT_DMA_DATA_FIFO_DEPTH 0x0fcd
+#define regVGT_DMA_DATA_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_DMA_REQ_FIFO_DEPTH 0x0fce
+#define regVGT_DMA_REQ_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_DRAW_INIT_FIFO_DEPTH 0x0fcf
+#define regVGT_DRAW_INIT_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_MC_LAT_CNTL 0x0fd6
+#define regVGT_MC_LAT_CNTL_BASE_IDX 0
+#define regIA_UTCL1_STATUS_2 0x0fd7
+#define regIA_UTCL1_STATUS_2_BASE_IDX 0
+#define regWD_CNTL_STATUS 0x0fdf
+#define regWD_CNTL_STATUS_BASE_IDX 0
+#define regCC_GC_PRIM_CONFIG 0x0fe0
+#define regCC_GC_PRIM_CONFIG_BASE_IDX 0
+#define regWD_QOS 0x0fe2
+#define regWD_QOS_BASE_IDX 0
+#define regWD_UTCL1_CNTL 0x0fe3
+#define regWD_UTCL1_CNTL_BASE_IDX 0
+#define regWD_UTCL1_STATUS 0x0fe4
+#define regWD_UTCL1_STATUS_BASE_IDX 0
+#define regIA_UTCL1_CNTL 0x0fe6
+#define regIA_UTCL1_CNTL_BASE_IDX 0
+#define regIA_UTCL1_STATUS 0x0fe7
+#define regIA_UTCL1_STATUS_BASE_IDX 0
+#define regCC_GC_SA_UNIT_DISABLE 0x0fe9
+#define regCC_GC_SA_UNIT_DISABLE_BASE_IDX 0
+#define regGE_RATE_CNTL_1 0x0ff4
+#define regGE_RATE_CNTL_1_BASE_IDX 0
+#define regGE_RATE_CNTL_2 0x0ff5
+#define regGE_RATE_CNTL_2_BASE_IDX 0
+#define regVGT_SYS_CONFIG 0x1003
+#define regVGT_SYS_CONFIG_BASE_IDX 0
+#define regGE_PRIV_CONTROL 0x1004
+#define regGE_PRIV_CONTROL_BASE_IDX 0
+#define regGE_STATUS 0x1005
+#define regGE_STATUS_BASE_IDX 0
+#define regVGT_GS_MAX_WAVE_ID 0x1009
+#define regVGT_GS_MAX_WAVE_ID_BASE_IDX 0
+#define regGFX_PIPE_CONTROL 0x100d
+#define regGFX_PIPE_CONTROL_BASE_IDX 0
+#define regCC_GC_SHADER_ARRAY_CONFIG 0x100f
+#define regCC_GC_SHADER_ARRAY_CONFIG_BASE_IDX 0
+#define regGE2_SE_CNTL_STATUS 0x1011
+#define regGE2_SE_CNTL_STATUS_BASE_IDX 0
+#define regVGT_RESET_DEBUG 0x1014
+#define regVGT_RESET_DEBUG_BASE_IDX 0
+#define regGE_SPI_IF_SAFE_REG 0x1018
+#define regGE_SPI_IF_SAFE_REG_BASE_IDX 0
+#define regGE_PA_IF_SAFE_REG 0x1019
+#define regGE_PA_IF_SAFE_REG_BASE_IDX 0
+#define regPA_CL_CNTL_STATUS 0x1024
+#define regPA_CL_CNTL_STATUS_BASE_IDX 0
+#define regPA_CL_ENHANCE 0x1025
+#define regPA_CL_ENHANCE_BASE_IDX 0
+#define regPA_CL_RESET_DEBUG 0x1026
+#define regPA_CL_RESET_DEBUG_BASE_IDX 0
+#define regPA_SU_CNTL_STATUS 0x1034
+#define regPA_SU_CNTL_STATUS_BASE_IDX 0
+#define regPA_SC_FIFO_DEPTH_CNTL 0x1035
+#define regPA_SC_FIFO_DEPTH_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_sqdec
+// base address: 0x8c00
+#define regSQ_CONFIG 0x10a0
+#define regSQ_CONFIG_BASE_IDX 0
+#define regSQC_CONFIG 0x10a1
+#define regSQC_CONFIG_BASE_IDX 0
+#define regLDS_CONFIG 0x10a2
+#define regLDS_CONFIG_BASE_IDX 0
+#define regSQ_RANDOM_WAVE_PRI 0x10a3
+#define regSQ_RANDOM_WAVE_PRI_BASE_IDX 0
+#define regSQG_STATUS 0x10a4
+#define regSQG_STATUS_BASE_IDX 0
+#define regSQ_FIFO_SIZES 0x10a5
+#define regSQ_FIFO_SIZES_BASE_IDX 0
+#define regSQ_DSM_CNTL 0x10a6
+#define regSQ_DSM_CNTL_BASE_IDX 0
+#define regSQ_DSM_CNTL2 0x10a7
+#define regSQ_DSM_CNTL2_BASE_IDX 0
+#define regSP_CONFIG 0x10ab
+#define regSP_CONFIG_BASE_IDX 0
+#define regSQ_ARB_CONFIG 0x10ac
+#define regSQ_ARB_CONFIG_BASE_IDX 0
+#define regSQ_DEBUG_HOST_TRAP_STATUS 0x10b6
+#define regSQ_DEBUG_HOST_TRAP_STATUS_BASE_IDX 0
+#define regSQG_GL1H_STATUS 0x10b9
+#define regSQG_GL1H_STATUS_BASE_IDX 0
+#define regSQG_CONFIG 0x10ba
+#define regSQG_CONFIG_BASE_IDX 0
+#define regSQ_PERF_SNAPSHOT_CTRL 0x10bb
+#define regSQ_PERF_SNAPSHOT_CTRL_BASE_IDX 0
+#define regCC_GC_SHADER_RATE_CONFIG 0x10bc
+#define regCC_GC_SHADER_RATE_CONFIG_BASE_IDX 0
+#define regSQ_INTERRUPT_AUTO_MASK 0x10be
+#define regSQ_INTERRUPT_AUTO_MASK_BASE_IDX 0
+#define regSQ_INTERRUPT_MSG_CTRL 0x10bf
+#define regSQ_INTERRUPT_MSG_CTRL_BASE_IDX 0
+#define regSQ_WATCH0_ADDR_H 0x10d0
+#define regSQ_WATCH0_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH0_ADDR_L 0x10d1
+#define regSQ_WATCH0_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH0_CNTL 0x10d2
+#define regSQ_WATCH0_CNTL_BASE_IDX 0
+#define regSQ_WATCH1_ADDR_H 0x10d3
+#define regSQ_WATCH1_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH1_ADDR_L 0x10d4
+#define regSQ_WATCH1_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH1_CNTL 0x10d5
+#define regSQ_WATCH1_CNTL_BASE_IDX 0
+#define regSQ_WATCH2_ADDR_H 0x10d6
+#define regSQ_WATCH2_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH2_ADDR_L 0x10d7
+#define regSQ_WATCH2_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH2_CNTL 0x10d8
+#define regSQ_WATCH2_CNTL_BASE_IDX 0
+#define regSQ_WATCH3_ADDR_H 0x10d9
+#define regSQ_WATCH3_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH3_ADDR_L 0x10da
+#define regSQ_WATCH3_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH3_CNTL 0x10db
+#define regSQ_WATCH3_CNTL_BASE_IDX 0
+#define regSQ_IND_INDEX 0x1118
+#define regSQ_IND_INDEX_BASE_IDX 0
+#define regSQ_IND_DATA 0x1119
+#define regSQ_IND_DATA_BASE_IDX 0
+#define regSQ_CMD 0x111b
+#define regSQ_CMD_BASE_IDX 0
+#define regSQC_MISC_CONFIG 0x1179
+#define regSQC_MISC_CONFIG_BASE_IDX 0
+
+
+// addressBlock: gc_shsdec
+// base address: 0x9000
+#define regSX_DEBUG_BUSY 0x11b4
+#define regSX_DEBUG_BUSY_BASE_IDX 0
+#define regSX_DEBUG_BUSY_2 0x11b5
+#define regSX_DEBUG_BUSY_2_BASE_IDX 0
+#define regSX_DEBUG_BUSY_3 0x11b6
+#define regSX_DEBUG_BUSY_3_BASE_IDX 0
+#define regSX_DEBUG_BUSY_4 0x11b7
+#define regSX_DEBUG_BUSY_4_BASE_IDX 0
+#define regSX_DEBUG_1 0x11b8
+#define regSX_DEBUG_1_BASE_IDX 0
+#define regSX_DEBUG_BUSY_5 0x11b9
+#define regSX_DEBUG_BUSY_5_BASE_IDX 0
+#define regSX_DEBUG_BUSY_6 0x11ba
+#define regSX_DEBUG_BUSY_6_BASE_IDX 0
+#define regSX_DEBUG_BUSY_7 0x11bb
+#define regSX_DEBUG_BUSY_7_BASE_IDX 0
+#define regSX_DEBUG_BUSY_8 0x11bc
+#define regSX_DEBUG_BUSY_8_BASE_IDX 0
+#define regSX_DEBUG_BUSY_9 0x11bd
+#define regSX_DEBUG_BUSY_9_BASE_IDX 0
+#define regSX_DEBUG_BUSY_10 0x11be
+#define regSX_DEBUG_BUSY_10_BASE_IDX 0
+#define regSPI_PS_MAX_WAVE_ID 0x11da
+#define regSPI_PS_MAX_WAVE_ID_BASE_IDX 0
+#define regSPI_GFX_CNTL 0x11dc
+#define regSPI_GFX_CNTL_BASE_IDX 0
+#define regSPI_DEBUG_READ 0x11e2
+#define regSPI_DEBUG_READ_BASE_IDX 0
+#define regSPI_DSM_CNTL 0x11e3
+#define regSPI_DSM_CNTL_BASE_IDX 0
+#define regSPI_DSM_CNTL2 0x11e4
+#define regSPI_DSM_CNTL2_BASE_IDX 0
+#define regSPI_EDC_CNT 0x11e5
+#define regSPI_EDC_CNT_BASE_IDX 0
+#define regSPI_DEBUG_BUSY 0x11f0
+#define regSPI_DEBUG_BUSY_BASE_IDX 0
+#define regSPI_CONFIG_PS_CU_EN 0x11f2
+#define regSPI_CONFIG_PS_CU_EN_BASE_IDX 0
+#define regSPI_WF_LIFETIME_CNTL 0x124a
+#define regSPI_WF_LIFETIME_CNTL_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_0 0x124b
+#define regSPI_WF_LIFETIME_LIMIT_0_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_1 0x124c
+#define regSPI_WF_LIFETIME_LIMIT_1_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_2 0x124d
+#define regSPI_WF_LIFETIME_LIMIT_2_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_3 0x124e
+#define regSPI_WF_LIFETIME_LIMIT_3_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_4 0x124f
+#define regSPI_WF_LIFETIME_LIMIT_4_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_5 0x1250
+#define regSPI_WF_LIFETIME_LIMIT_5_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_0 0x1255
+#define regSPI_WF_LIFETIME_STATUS_0_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_2 0x1257
+#define regSPI_WF_LIFETIME_STATUS_2_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_4 0x1259
+#define regSPI_WF_LIFETIME_STATUS_4_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_6 0x125b
+#define regSPI_WF_LIFETIME_STATUS_6_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_7 0x125c
+#define regSPI_WF_LIFETIME_STATUS_7_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_9 0x125e
+#define regSPI_WF_LIFETIME_STATUS_9_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_11 0x1260
+#define regSPI_WF_LIFETIME_STATUS_11_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_13 0x1262
+#define regSPI_WF_LIFETIME_STATUS_13_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_14 0x1263
+#define regSPI_WF_LIFETIME_STATUS_14_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_15 0x1264
+#define regSPI_WF_LIFETIME_STATUS_15_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_16 0x1265
+#define regSPI_WF_LIFETIME_STATUS_16_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_17 0x1266
+#define regSPI_WF_LIFETIME_STATUS_17_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_18 0x1267
+#define regSPI_WF_LIFETIME_STATUS_18_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_19 0x1268
+#define regSPI_WF_LIFETIME_STATUS_19_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_20 0x1269
+#define regSPI_WF_LIFETIME_STATUS_20_BASE_IDX 0
+#define regSPI_WF_LIFETIME_DEBUG 0x126a
+#define regSPI_WF_LIFETIME_DEBUG_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_21 0x126b
+#define regSPI_WF_LIFETIME_STATUS_21_BASE_IDX 0
+#define regSPI_LB_CTR_CTRL 0x1274
+#define regSPI_LB_CTR_CTRL_BASE_IDX 0
+#define regSPI_LB_WGP_MASK 0x1275
+#define regSPI_LB_WGP_MASK_BASE_IDX 0
+#define regSPI_LB_DATA_REG 0x1276
+#define regSPI_LB_DATA_REG_BASE_IDX 0
+#define regSPI_PG_ENABLE_STATIC_WGP_MASK 0x1277
+#define regSPI_PG_ENABLE_STATIC_WGP_MASK_BASE_IDX 0
+#define regSPI_GDS_CREDITS 0x1278
+#define regSPI_GDS_CREDITS_BASE_IDX 0
+#define regSPI_SX_EXPORT_BUFFER_SIZES 0x1279
+#define regSPI_SX_EXPORT_BUFFER_SIZES_BASE_IDX 0
+#define regSPI_SX_SCOREBOARD_BUFFER_SIZES 0x127a
+#define regSPI_SX_SCOREBOARD_BUFFER_SIZES_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_STATUS 0x127b
+#define regSPI_CSQ_WF_ACTIVE_STATUS_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_0 0x127c
+#define regSPI_CSQ_WF_ACTIVE_COUNT_0_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_1 0x127d
+#define regSPI_CSQ_WF_ACTIVE_COUNT_1_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_2 0x127e
+#define regSPI_CSQ_WF_ACTIVE_COUNT_2_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_3 0x127f
+#define regSPI_CSQ_WF_ACTIVE_COUNT_3_BASE_IDX 0
+#define regSPI_LB_DATA_WAVES 0x1284
+#define regSPI_LB_DATA_WAVES_BASE_IDX 0
+#define regSPI_LB_DATA_PERWGP_WAVE_HSGS 0x1285
+#define regSPI_LB_DATA_PERWGP_WAVE_HSGS_BASE_IDX 0
+#define regSPI_LB_DATA_PERWGP_WAVE_CS 0x1287
+#define regSPI_LB_DATA_PERWGP_WAVE_CS_BASE_IDX 0
+#define regSPIS_DEBUG_READ 0x128a
+#define regSPIS_DEBUG_READ_BASE_IDX 0
+#define regBCI_DEBUG_READ 0x128b
+#define regBCI_DEBUG_READ_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSBA_LO 0x128c
+#define regSPI_P0_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSBA_HI 0x128d
+#define regSPI_P0_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSMA_LO 0x128e
+#define regSPI_P0_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSMA_HI 0x128f
+#define regSPI_P0_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_GPR_MIN 0x1290
+#define regSPI_P0_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSBA_LO 0x1291
+#define regSPI_P1_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSBA_HI 0x1292
+#define regSPI_P1_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSMA_LO 0x1293
+#define regSPI_P1_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSMA_HI 0x1294
+#define regSPI_P1_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_GPR_MIN 0x1295
+#define regSPI_P1_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define regTD_CNTL 0x12c5
+#define regTD_CNTL_BASE_IDX 0
+#define regTD_STATUS 0x12c6
+#define regTD_STATUS_BASE_IDX 0
+#define regTD_POWER_CNTL 0x12ca
+#define regTD_POWER_CNTL_BASE_IDX 0
+#define regTD_CNTL2 0x12cb
+#define regTD_CNTL2_BASE_IDX 0
+#define regTD_DSM_CNTL 0x12cf
+#define regTD_DSM_CNTL_BASE_IDX 0
+#define regTD_DSM_CNTL2 0x12d0
+#define regTD_DSM_CNTL2_BASE_IDX 0
+#define regTD_SCRATCH 0x12d3
+#define regTD_SCRATCH_BASE_IDX 0
+#define regTA_CNTL 0x12e1
+#define regTA_CNTL_BASE_IDX 0
+#define regTA_CNTL_AUX 0x12e2
+#define regTA_CNTL_AUX_BASE_IDX 0
+#define regTA_CNTL2 0x12e5
+#define regTA_CNTL2_BASE_IDX 0
+#define regTA_STATUS 0x12e8
+#define regTA_STATUS_BASE_IDX 0
+#define regTA_SCRATCH 0x1304
+#define regTA_SCRATCH_BASE_IDX 0
+
+
+// addressBlock: gc_gdsdec
+// base address: 0x9700
+#define regGDS_CONFIG 0x1360
+#define regGDS_CONFIG_BASE_IDX 0
+#define regGDS_CNTL_STATUS 0x1361
+#define regGDS_CNTL_STATUS_BASE_IDX 0
+#define regGDS_ENHANCE 0x1362
+#define regGDS_ENHANCE_BASE_IDX 0
+#define regGDS_PROTECTION_FAULT 0x1363
+#define regGDS_PROTECTION_FAULT_BASE_IDX 0
+#define regGDS_VM_PROTECTION_FAULT 0x1364
+#define regGDS_VM_PROTECTION_FAULT_BASE_IDX 0
+#define regGDS_EDC_CNT 0x1365
+#define regGDS_EDC_CNT_BASE_IDX 0
+#define regGDS_EDC_GRBM_CNT 0x1366
+#define regGDS_EDC_GRBM_CNT_BASE_IDX 0
+#define regGDS_EDC_OA_DED 0x1367
+#define regGDS_EDC_OA_DED_BASE_IDX 0
+#define regGDS_DSM_CNTL 0x136a
+#define regGDS_DSM_CNTL_BASE_IDX 0
+#define regGDS_EDC_OA_PHY_CNT 0x136b
+#define regGDS_EDC_OA_PHY_CNT_BASE_IDX 0
+#define regGDS_EDC_OA_PIPE_CNT 0x136c
+#define regGDS_EDC_OA_PIPE_CNT_BASE_IDX 0
+#define regGDS_DSM_CNTL2 0x136d
+#define regGDS_DSM_CNTL2_BASE_IDX 0
+
+
+// addressBlock: gc_rbdec
+// base address: 0x9800
+#define regDB_DEBUG 0x13ac
+#define regDB_DEBUG_BASE_IDX 0
+#define regDB_DEBUG2 0x13ad
+#define regDB_DEBUG2_BASE_IDX 0
+#define regDB_DEBUG3 0x13ae
+#define regDB_DEBUG3_BASE_IDX 0
+#define regDB_DEBUG4 0x13af
+#define regDB_DEBUG4_BASE_IDX 0
+#define regDB_ETILE_STUTTER_CONTROL 0x13b0
+#define regDB_ETILE_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_LTILE_STUTTER_CONTROL 0x13b1
+#define regDB_LTILE_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_EQUAD_STUTTER_CONTROL 0x13b2
+#define regDB_EQUAD_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_LQUAD_STUTTER_CONTROL 0x13b3
+#define regDB_LQUAD_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_CREDIT_LIMIT 0x13b4
+#define regDB_CREDIT_LIMIT_BASE_IDX 0
+#define regDB_WATERMARKS 0x13b5
+#define regDB_WATERMARKS_BASE_IDX 0
+#define regDB_SUBTILE_CONTROL 0x13b6
+#define regDB_SUBTILE_CONTROL_BASE_IDX 0
+#define regDB_FREE_CACHELINES 0x13b7
+#define regDB_FREE_CACHELINES_BASE_IDX 0
+#define regDB_FIFO_DEPTH1 0x13b8
+#define regDB_FIFO_DEPTH1_BASE_IDX 0
+#define regDB_FIFO_DEPTH2 0x13b9
+#define regDB_FIFO_DEPTH2_BASE_IDX 0
+#define regDB_LAST_OF_BURST_CONFIG 0x13ba
+#define regDB_LAST_OF_BURST_CONFIG_BASE_IDX 0
+#define regDB_RING_CONTROL 0x13bb
+#define regDB_RING_CONTROL_BASE_IDX 0
+#define regDB_MEM_ARB_WATERMARKS 0x13bc
+#define regDB_MEM_ARB_WATERMARKS_BASE_IDX 0
+#define regDB_FIFO_DEPTH3 0x13bd
+#define regDB_FIFO_DEPTH3_BASE_IDX 0
+#define regDB_DEBUG6 0x13be
+#define regDB_DEBUG6_BASE_IDX 0
+#define regDB_EXCEPTION_CONTROL 0x13bf
+#define regDB_EXCEPTION_CONTROL_BASE_IDX 0
+#define regDB_DEBUG7 0x13d0
+#define regDB_DEBUG7_BASE_IDX 0
+#define regDB_DEBUG5 0x13d1
+#define regDB_DEBUG5_BASE_IDX 0
+#define regDB_FGCG_SRAMS_CLK_CTRL 0x13d7
+#define regDB_FGCG_SRAMS_CLK_CTRL_BASE_IDX 0
+#define regDB_FGCG_INTERFACES_CLK_CTRL 0x13d8
+#define regDB_FGCG_INTERFACES_CLK_CTRL_BASE_IDX 0
+#define regDB_FIFO_DEPTH4 0x13d9
+#define regDB_FIFO_DEPTH4_BASE_IDX 0
+#define regCC_RB_REDUNDANCY 0x13dc
+#define regCC_RB_REDUNDANCY_BASE_IDX 0
+#define regCC_RB_BACKEND_DISABLE 0x13dd
+#define regCC_RB_BACKEND_DISABLE_BASE_IDX 0
+#define regGB_ADDR_CONFIG 0x13de
+#define regGB_ADDR_CONFIG_BASE_IDX 0
+#define regGB_BACKEND_MAP 0x13df
+#define regGB_BACKEND_MAP_BASE_IDX 0
+#define regGB_GPU_ID 0x13e0
+#define regGB_GPU_ID_BASE_IDX 0
+#define regCC_RB_DAISY_CHAIN 0x13e1
+#define regCC_RB_DAISY_CHAIN_BASE_IDX 0
+#define regGB_ADDR_CONFIG_READ 0x13e2
+#define regGB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regCB_HW_CONTROL_4 0x1422
+#define regCB_HW_CONTROL_4_BASE_IDX 0
+#define regCB_HW_CONTROL_3 0x1423
+#define regCB_HW_CONTROL_3_BASE_IDX 0
+#define regCB_HW_CONTROL 0x1424
+#define regCB_HW_CONTROL_BASE_IDX 0
+#define regCB_HW_CONTROL_1 0x1425
+#define regCB_HW_CONTROL_1_BASE_IDX 0
+#define regCB_HW_CONTROL_2 0x1426
+#define regCB_HW_CONTROL_2_BASE_IDX 0
+#define regCB_DCC_CONFIG 0x1427
+#define regCB_DCC_CONFIG_BASE_IDX 0
+#define regCB_HW_MEM_ARBITER_RD 0x1428
+#define regCB_HW_MEM_ARBITER_RD_BASE_IDX 0
+#define regCB_HW_MEM_ARBITER_WR 0x1429
+#define regCB_HW_MEM_ARBITER_WR_BASE_IDX 0
+#define regCB_FGCG_SRAM_OVERRIDE 0x142a
+#define regCB_FGCG_SRAM_OVERRIDE_BASE_IDX 0
+#define regCB_DCC_CONFIG2 0x142b
+#define regCB_DCC_CONFIG2_BASE_IDX 0
+#define regCHICKEN_BITS 0x142d
+#define regCHICKEN_BITS_BASE_IDX 0
+#define regCB_CACHE_EVICT_POINTS 0x142e
+#define regCB_CACHE_EVICT_POINTS_BASE_IDX 0
+
+
+// addressBlock: gc_gceadec
+// base address: 0xa800
+#define regGCEA_DRAM_RD_CLI2GRP_MAP0 0x17a0
+#define regGCEA_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_DRAM_RD_CLI2GRP_MAP1 0x17a1
+#define regGCEA_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_DRAM_WR_CLI2GRP_MAP0 0x17a2
+#define regGCEA_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_DRAM_WR_CLI2GRP_MAP1 0x17a3
+#define regGCEA_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_DRAM_RD_GRP2VC_MAP 0x17a4
+#define regGCEA_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define regGCEA_DRAM_WR_GRP2VC_MAP 0x17a5
+#define regGCEA_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define regGCEA_DRAM_RD_LAZY 0x17a6
+#define regGCEA_DRAM_RD_LAZY_BASE_IDX 0
+#define regGCEA_DRAM_WR_LAZY 0x17a7
+#define regGCEA_DRAM_WR_LAZY_BASE_IDX 0
+#define regGCEA_DRAM_RD_CAM_CNTL 0x17a8
+#define regGCEA_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define regGCEA_DRAM_WR_CAM_CNTL 0x17a9
+#define regGCEA_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define regGCEA_DRAM_PAGE_BURST 0x17aa
+#define regGCEA_DRAM_PAGE_BURST_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_AGE 0x17ab
+#define regGCEA_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_AGE 0x17ac
+#define regGCEA_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUEUING 0x17ad
+#define regGCEA_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUEUING 0x17ae
+#define regGCEA_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_FIXED 0x17af
+#define regGCEA_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_FIXED 0x17b0
+#define regGCEA_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_URGENCY 0x17b1
+#define regGCEA_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_URGENCY 0x17b2
+#define regGCEA_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI1 0x17b3
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI2 0x17b4
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI3 0x17b5
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI1 0x17b6
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI2 0x17b7
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI3 0x17b8
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_IO_RD_CLI2GRP_MAP0 0x187d
+#define regGCEA_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_IO_RD_CLI2GRP_MAP1 0x187e
+#define regGCEA_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_IO_WR_CLI2GRP_MAP0 0x187f
+#define regGCEA_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_IO_WR_CLI2GRP_MAP1 0x1880
+#define regGCEA_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_IO_RD_COMBINE_FLUSH 0x1881
+#define regGCEA_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define regGCEA_IO_WR_COMBINE_FLUSH 0x1882
+#define regGCEA_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define regGCEA_IO_GROUP_BURST 0x1883
+#define regGCEA_IO_GROUP_BURST_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_AGE 0x1884
+#define regGCEA_IO_RD_PRI_AGE_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_AGE 0x1885
+#define regGCEA_IO_WR_PRI_AGE_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUEUING 0x1886
+#define regGCEA_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUEUING 0x1887
+#define regGCEA_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_FIXED 0x1888
+#define regGCEA_IO_RD_PRI_FIXED_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_FIXED 0x1889
+#define regGCEA_IO_WR_PRI_FIXED_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_URGENCY 0x188a
+#define regGCEA_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_URGENCY 0x188b
+#define regGCEA_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_URGENCY_MASKING 0x188c
+#define regGCEA_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_URGENCY_MASKING 0x188d
+#define regGCEA_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI1 0x188e
+#define regGCEA_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI2 0x188f
+#define regGCEA_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI3 0x1890
+#define regGCEA_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI1 0x1891
+#define regGCEA_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI2 0x1892
+#define regGCEA_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI3 0x1893
+#define regGCEA_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_SDP_ARB_DRAM 0x1894
+#define regGCEA_SDP_ARB_DRAM_BASE_IDX 0
+#define regGCEA_SDP_ARB_FINAL 0x1896
+#define regGCEA_SDP_ARB_FINAL_BASE_IDX 0
+#define regGCEA_SDP_DRAM_PRIORITY 0x1897
+#define regGCEA_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define regGCEA_SDP_IO_PRIORITY 0x1899
+#define regGCEA_SDP_IO_PRIORITY_BASE_IDX 0
+#define regGCEA_SDP_CREDITS 0x189a
+#define regGCEA_SDP_CREDITS_BASE_IDX 0
+#define regGCEA_SDP_TAG_RESERVE0 0x189b
+#define regGCEA_SDP_TAG_RESERVE0_BASE_IDX 0
+#define regGCEA_SDP_TAG_RESERVE1 0x189c
+#define regGCEA_SDP_TAG_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_VCC_RESERVE0 0x189d
+#define regGCEA_SDP_VCC_RESERVE0_BASE_IDX 0
+#define regGCEA_SDP_VCC_RESERVE1 0x189e
+#define regGCEA_SDP_VCC_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_VCD_RESERVE0 0x189f
+#define regGCEA_SDP_VCD_RESERVE0_BASE_IDX 0
+
+
+// addressBlock: gc_gceadec2
+// base address: 0x9c00
+#define regGCEA_SDP_VCD_RESERVE1 0x14a0
+#define regGCEA_SDP_VCD_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_REQ_CNTL 0x14a1
+#define regGCEA_SDP_REQ_CNTL_BASE_IDX 0
+#define regGCEA_MISC 0x14a2
+#define regGCEA_MISC_BASE_IDX 0
+#define regGCEA_LATENCY_SAMPLING 0x14a3
+#define regGCEA_LATENCY_SAMPLING_BASE_IDX 0
+#define regGCEA_MAM_CTRL2 0x14a9
+#define regGCEA_MAM_CTRL2_BASE_IDX 0
+#define regGCEA_MAM_CTRL 0x14ab
+#define regGCEA_MAM_CTRL_BASE_IDX 0
+#define regGCEA_EDC_CNT 0x14b2
+#define regGCEA_EDC_CNT_BASE_IDX 0
+#define regGCEA_EDC_CNT2 0x14b3
+#define regGCEA_EDC_CNT2_BASE_IDX 0
+#define regGCEA_DSM_CNTL 0x14b4
+#define regGCEA_DSM_CNTL_BASE_IDX 0
+#define regGCEA_DSM_CNTLA 0x14b5
+#define regGCEA_DSM_CNTLA_BASE_IDX 0
+#define regGCEA_DSM_CNTLB 0x14b6
+#define regGCEA_DSM_CNTLB_BASE_IDX 0
+#define regGCEA_DSM_CNTL2 0x14b7
+#define regGCEA_DSM_CNTL2_BASE_IDX 0
+#define regGCEA_DSM_CNTL2A 0x14b8
+#define regGCEA_DSM_CNTL2A_BASE_IDX 0
+#define regGCEA_DSM_CNTL2B 0x14b9
+#define regGCEA_DSM_CNTL2B_BASE_IDX 0
+#define regGCEA_GL2C_XBR_CREDITS 0x14ba
+#define regGCEA_GL2C_XBR_CREDITS_BASE_IDX 0
+#define regGCEA_GL2C_XBR_MAXBURST 0x14bb
+#define regGCEA_GL2C_XBR_MAXBURST_BASE_IDX 0
+#define regGCEA_PROBE_CNTL 0x14bc
+#define regGCEA_PROBE_CNTL_BASE_IDX 0
+#define regGCEA_PROBE_MAP 0x14bd
+#define regGCEA_PROBE_MAP_BASE_IDX 0
+#define regGCEA_ERR_STATUS 0x14be
+#define regGCEA_ERR_STATUS_BASE_IDX 0
+#define regGCEA_MISC2 0x14bf
+#define regGCEA_MISC2_BASE_IDX 0
+
+
+// addressBlock: gc_gceadec3
+// base address: 0x9dc0
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS0 0x1512
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS0_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS1 0x1513
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS1_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_DATACREDITS0 0x1514
+#define regGCEA_SDP_BACKDOOR_DATACREDITS0_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_DATACREDITS1 0x1515
+#define regGCEA_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_MISCCREDITS 0x1516
+#define regGCEA_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 0
+#define regGCEA_RRET_MEM_RESERVE 0x1518
+#define regGCEA_RRET_MEM_RESERVE_BASE_IDX 0
+#define regGCEA_EDC_CNT3 0x151a
+#define regGCEA_EDC_CNT3_BASE_IDX 0
+#define regGCEA_SDP_ENABLE 0x151e
+#define regGCEA_SDP_ENABLE_BASE_IDX 0
+
+
+// addressBlock: gc_spipdec2
+// base address: 0x9c80
+#define regSPI_PQEV_CTRL 0x14c0
+#define regSPI_PQEV_CTRL_BASE_IDX 0
+#define regSPI_EXP_THROTTLE_CTRL 0x14c3
+#define regSPI_EXP_THROTTLE_CTRL_BASE_IDX 0
+
+
+// addressBlock: gc_rmi_rmidec
+// base address: 0x2e200
+#define regRMI_GENERAL_CNTL 0x1880
+#define regRMI_GENERAL_CNTL_BASE_IDX 1
+#define regRMI_GENERAL_CNTL1 0x1881
+#define regRMI_GENERAL_CNTL1_BASE_IDX 1
+#define regRMI_GENERAL_STATUS 0x1882
+#define regRMI_GENERAL_STATUS_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS0 0x1883
+#define regRMI_SUBBLOCK_STATUS0_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS1 0x1884
+#define regRMI_SUBBLOCK_STATUS1_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS2 0x1885
+#define regRMI_SUBBLOCK_STATUS2_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS3 0x1886
+#define regRMI_SUBBLOCK_STATUS3_BASE_IDX 1
+#define regRMI_XBAR_CONFIG 0x1887
+#define regRMI_XBAR_CONFIG_BASE_IDX 1
+#define regRMI_PROBE_POP_LOGIC_CNTL 0x1888
+#define regRMI_PROBE_POP_LOGIC_CNTL_BASE_IDX 1
+#define regRMI_UTC_XNACK_N_MISC_CNTL 0x1889
+#define regRMI_UTC_XNACK_N_MISC_CNTL_BASE_IDX 1
+#define regRMI_DEMUX_CNTL 0x188a
+#define regRMI_DEMUX_CNTL_BASE_IDX 1
+#define regRMI_UTCL1_CNTL1 0x188b
+#define regRMI_UTCL1_CNTL1_BASE_IDX 1
+#define regRMI_UTCL1_CNTL2 0x188c
+#define regRMI_UTCL1_CNTL2_BASE_IDX 1
+#define regRMI_UTC_UNIT_CONFIG 0x188d
+#define regRMI_UTC_UNIT_CONFIG_BASE_IDX 1
+#define regRMI_TCIW_FORMATTER0_CNTL 0x188e
+#define regRMI_TCIW_FORMATTER0_CNTL_BASE_IDX 1
+#define regRMI_TCIW_FORMATTER1_CNTL 0x188f
+#define regRMI_TCIW_FORMATTER1_CNTL_BASE_IDX 1
+#define regRMI_SCOREBOARD_CNTL 0x1890
+#define regRMI_SCOREBOARD_CNTL_BASE_IDX 1
+#define regRMI_SCOREBOARD_STATUS0 0x1891
+#define regRMI_SCOREBOARD_STATUS0_BASE_IDX 1
+#define regRMI_SCOREBOARD_STATUS1 0x1892
+#define regRMI_SCOREBOARD_STATUS1_BASE_IDX 1
+#define regRMI_SCOREBOARD_STATUS2 0x1893
+#define regRMI_SCOREBOARD_STATUS2_BASE_IDX 1
+#define regRMI_XBAR_ARBITER_CONFIG 0x1894
+#define regRMI_XBAR_ARBITER_CONFIG_BASE_IDX 1
+#define regRMI_XBAR_ARBITER_CONFIG_1 0x1895
+#define regRMI_XBAR_ARBITER_CONFIG_1_BASE_IDX 1
+#define regRMI_CLOCK_CNTRL 0x1896
+#define regRMI_CLOCK_CNTRL_BASE_IDX 1
+#define regRMI_UTCL1_STATUS 0x1897
+#define regRMI_UTCL1_STATUS_BASE_IDX 1
+#define regRMI_RB_GLX_CID_MAP 0x1898
+#define regRMI_RB_GLX_CID_MAP_BASE_IDX 1
+#define regRMI_XNACK_DEBUG 0x189e
+#define regRMI_XNACK_DEBUG_BASE_IDX 1
+#define regRMI_SPARE 0x189f
+#define regRMI_SPARE_BASE_IDX 1
+#define regRMI_SPARE_1 0x18a0
+#define regRMI_SPARE_1_BASE_IDX 1
+#define regRMI_SPARE_2 0x18a1
+#define regRMI_SPARE_2_BASE_IDX 1
+#define regCC_RMI_REDUNDANCY 0x18a2
+#define regCC_RMI_REDUNDANCY_BASE_IDX 1
+
+
+// addressBlock: gc_pmmdec
+// base address: 0x9f80
+#define regGCR_PIO_CNTL 0x1580
+#define regGCR_PIO_CNTL_BASE_IDX 0
+#define regGCR_PIO_DATA 0x1581
+#define regGCR_PIO_DATA_BASE_IDX 0
+#define regPMM_CNTL 0x1582
+#define regPMM_CNTL_BASE_IDX 0
+#define regPMM_STATUS 0x1583
+#define regPMM_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_utcl1dec
+// base address: 0x9fb0
+#define regUTCL1_CTRL_1 0x158c
+#define regUTCL1_CTRL_1_BASE_IDX 0
+#define regUTCL1_ALOG 0x158f
+#define regUTCL1_ALOG_BASE_IDX 0
+#define regUTCL1_STATUS 0x1594
+#define regUTCL1_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_gcvmsharedpfdec
+// base address: 0xa000
+#define regGCMC_VM_NB_MMIOBASE 0x15a0
+#define regGCMC_VM_NB_MMIOBASE_BASE_IDX 0
+#define regGCMC_VM_NB_MMIOLIMIT 0x15a1
+#define regGCMC_VM_NB_MMIOLIMIT_BASE_IDX 0
+#define regGCMC_VM_NB_PCI_CTRL 0x15a2
+#define regGCMC_VM_NB_PCI_CTRL_BASE_IDX 0
+#define regGCMC_VM_NB_PCI_ARB 0x15a3
+#define regGCMC_VM_NB_PCI_ARB_BASE_IDX 0
+#define regGCMC_VM_NB_TOP_OF_DRAM_SLOT1 0x15a4
+#define regGCMC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 0
+#define regGCMC_VM_NB_LOWER_TOP_OF_DRAM2 0x15a5
+#define regGCMC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 0
+#define regGCMC_VM_NB_UPPER_TOP_OF_DRAM2 0x15a6
+#define regGCMC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 0
+#define regGCMC_VM_FB_OFFSET 0x15a7
+#define regGCMC_VM_FB_OFFSET_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x15a8
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x15a9
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 0
+#define regGCMC_VM_STEERING 0x15aa
+#define regGCMC_VM_STEERING_BASE_IDX 0
+#define regGCMC_SHARED_VIRT_RESET_REQ 0x15ab
+#define regGCMC_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define regGCMC_MEM_POWER_LS 0x15ac
+#define regGCMC_MEM_POWER_LS_BASE_IDX 0
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_START 0x15ad
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 0
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END 0x15ae
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 0
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_START 0x15af
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_START_BASE_IDX 0
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_END 0x15b0
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_END_BASE_IDX 0
+#define regGCMC_VM_APT_CNTL 0x15b1
+#define regGCMC_VM_APT_CNTL_BASE_IDX 0
+#define regGCMC_VM_LOCAL_FB_ADDRESS_START 0x15b2
+#define regGCMC_VM_LOCAL_FB_ADDRESS_START_BASE_IDX 0
+#define regGCMC_VM_LOCAL_FB_ADDRESS_END 0x15b3
+#define regGCMC_VM_LOCAL_FB_ADDRESS_END_BASE_IDX 0
+#define regGCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL 0x15b4
+#define regGCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL_BASE_IDX 0
+#define regGCUTCL2_ICG_CTRL 0x15b5
+#define regGCUTCL2_ICG_CTRL_BASE_IDX 0
+#define regGCMC_SHARED_ACTIVE_FCN_ID 0x15b6
+#define regGCMC_SHARED_ACTIVE_FCN_ID_BASE_IDX 0
+#define regGCUTCL2_CGTT_BUSY_CTRL 0x15b7
+#define regGCUTCL2_CGTT_BUSY_CTRL_BASE_IDX 0
+#define regGCMC_VM_FB_NOALLOC_CNTL 0x15b8
+#define regGCMC_VM_FB_NOALLOC_CNTL_BASE_IDX 0
+#define regGCUTCL2_HARVEST_BYPASS_GROUPS 0x15b9
+#define regGCUTCL2_HARVEST_BYPASS_GROUPS_BASE_IDX 0
+#define regGCUTCL2_GROUP_RET_FAULT_STATUS 0x15bb
+#define regGCUTCL2_GROUP_RET_FAULT_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_gcvml2pfdec
+// base address: 0xa080
+#define regGCVM_L2_CNTL 0x15c0
+#define regGCVM_L2_CNTL_BASE_IDX 0
+#define regGCVM_L2_CNTL2 0x15c1
+#define regGCVM_L2_CNTL2_BASE_IDX 0
+#define regGCVM_L2_CNTL3 0x15c2
+#define regGCVM_L2_CNTL3_BASE_IDX 0
+#define regGCVM_L2_STATUS 0x15c3
+#define regGCVM_L2_STATUS_BASE_IDX 0
+#define regGCVM_DUMMY_PAGE_FAULT_CNTL 0x15c4
+#define regGCVM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 0
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_LO32 0x15c5
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 0
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_HI32 0x15c6
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_CNTL 0x15c7
+#define regGCVM_INVALIDATE_CNTL_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_CNTL 0x15c8
+#define regGCVM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_CNTL2 0x15c9
+#define regGCVM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL3 0x15ca
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL4 0x15cb
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_STATUS 0x15cc
+#define regGCVM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_LO32 0x15cd
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_HI32 0x15ce
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x15cf
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x15d0
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x15d2
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x15d3
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x15d4
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x15d5
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x15d6
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x15d7
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 0
+#define regGCVM_L2_CNTL4 0x15d8
+#define regGCVM_L2_CNTL4_BASE_IDX 0
+#define regGCVM_L2_MM_GROUP_RT_CLASSES 0x15d9
+#define regGCVM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID 0x15da
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 0
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID2 0x15db
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
+#define regGCVM_L2_CACHE_PARITY_CNTL 0x15dc
+#define regGCVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define regGCVM_L2_ICG_CTRL 0x15dd
+#define regGCVM_L2_ICG_CTRL_BASE_IDX 0
+#define regGCVM_L2_CNTL5 0x15de
+#define regGCVM_L2_CNTL5_BASE_IDX 0
+#define regGCVM_L2_GCR_CNTL 0x15df
+#define regGCVM_L2_GCR_CNTL_BASE_IDX 0
+#define regGCVML2_WALKER_MACRO_THROTTLE_TIME 0x15e0
+#define regGCVML2_WALKER_MACRO_THROTTLE_TIME_BASE_IDX 0
+#define regGCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT 0x15e1
+#define regGCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT_BASE_IDX 0
+#define regGCVML2_WALKER_MICRO_THROTTLE_TIME 0x15e2
+#define regGCVML2_WALKER_MICRO_THROTTLE_TIME_BASE_IDX 0
+#define regGCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT 0x15e3
+#define regGCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT_BASE_IDX 0
+#define regGCVM_L2_CGTT_BUSY_CTRL 0x15e4
+#define regGCVM_L2_CGTT_BUSY_CTRL_BASE_IDX 0
+#define regGCVM_L2_PTE_CACHE_DUMP_CNTL 0x15e5
+#define regGCVM_L2_PTE_CACHE_DUMP_CNTL_BASE_IDX 0
+#define regGCVM_L2_PTE_CACHE_DUMP_READ 0x15e6
+#define regGCVM_L2_PTE_CACHE_DUMP_READ_BASE_IDX 0
+#define regGCVM_L2_BANK_SELECT_MASKS 0x15e9
+#define regGCVM_L2_BANK_SELECT_MASKS_BASE_IDX 0
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC 0x15ea
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC_BASE_IDX 0
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC 0x15eb
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC_BASE_IDX 0
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC 0x15ec
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC_BASE_IDX 0
+#define regGCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT 0x15ed
+#define regGCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT_BASE_IDX 0
+#define regGCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ 0x15ee
+#define regGCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ_BASE_IDX 0
+
+
+// addressBlock: gc_gcatcl2dec
+// base address: 0xa300
+#define regGC_ATC_L2_CNTL 0x1660
+#define regGC_ATC_L2_CNTL_BASE_IDX 0
+#define regGC_ATC_L2_CNTL2 0x1661
+#define regGC_ATC_L2_CNTL2_BASE_IDX 0
+#define regGC_ATC_L2_CACHE_DATA0 0x1664
+#define regGC_ATC_L2_CACHE_DATA0_BASE_IDX 0
+#define regGC_ATC_L2_CACHE_DATA1 0x1665
+#define regGC_ATC_L2_CACHE_DATA1_BASE_IDX 0
+#define regGC_ATC_L2_CACHE_DATA2 0x1666
+#define regGC_ATC_L2_CACHE_DATA2_BASE_IDX 0
+#define regGC_ATC_L2_CNTL3 0x1667
+#define regGC_ATC_L2_CNTL3_BASE_IDX 0
+#define regGC_ATC_L2_STATUS 0x1668
+#define regGC_ATC_L2_STATUS_BASE_IDX 0
+#define regGC_ATC_L2_STATUS2 0x1669
+#define regGC_ATC_L2_STATUS2_BASE_IDX 0
+#define regGC_ATC_L2_MISC_CG 0x166a
+#define regGC_ATC_L2_MISC_CG_BASE_IDX 0
+#define regGC_ATC_L2_MEM_POWER_LS 0x166b
+#define regGC_ATC_L2_MEM_POWER_LS_BASE_IDX 0
+#define regGC_ATC_L2_SDPPORT_CTRL 0x166f
+#define regGC_ATC_L2_SDPPORT_CTRL_BASE_IDX 0
+
+
+// addressBlock: gc_gcl2tlbpfdec
+// base address: 0xa380
+#define regGCL2TLB_TLB0_STATUS 0x1681
+#define regGCL2TLB_TLB0_STATUS_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO 0x1683
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI 0x1684
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO 0x1685
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI 0x1686
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI_BASE_IDX 0
+
+
+// addressBlock: gc_gcvmsharedvcdec
+// base address: 0xa3a0
+#define regGCMC_VM_FB_LOCATION_BASE 0x1688
+#define regGCMC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regGCMC_VM_FB_LOCATION_TOP 0x1689
+#define regGCMC_VM_FB_LOCATION_TOP_BASE_IDX 0
+#define regGCMC_VM_AGP_TOP 0x168a
+#define regGCMC_VM_AGP_TOP_BASE_IDX 0
+#define regGCMC_VM_AGP_BOT 0x168b
+#define regGCMC_VM_AGP_BOT_BASE_IDX 0
+#define regGCMC_VM_AGP_BASE 0x168c
+#define regGCMC_VM_AGP_BASE_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR 0x168d
+#define regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x168e
+#define regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 0
+#define regGCMC_VM_MX_L1_TLB_CNTL 0x168f
+#define regGCMC_VM_MX_L1_TLB_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_gcvml2vcdec
+// base address: 0xa3e0
+#define regGCVM_CONTEXT0_CNTL 0x1698
+#define regGCVM_CONTEXT0_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT1_CNTL 0x1699
+#define regGCVM_CONTEXT1_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT2_CNTL 0x169a
+#define regGCVM_CONTEXT2_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT3_CNTL 0x169b
+#define regGCVM_CONTEXT3_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT4_CNTL 0x169c
+#define regGCVM_CONTEXT4_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT5_CNTL 0x169d
+#define regGCVM_CONTEXT5_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT6_CNTL 0x169e
+#define regGCVM_CONTEXT6_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT7_CNTL 0x169f
+#define regGCVM_CONTEXT7_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT8_CNTL 0x16a0
+#define regGCVM_CONTEXT8_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT9_CNTL 0x16a1
+#define regGCVM_CONTEXT9_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT10_CNTL 0x16a2
+#define regGCVM_CONTEXT10_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT11_CNTL 0x16a3
+#define regGCVM_CONTEXT11_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT12_CNTL 0x16a4
+#define regGCVM_CONTEXT12_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT13_CNTL 0x16a5
+#define regGCVM_CONTEXT13_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT14_CNTL 0x16a6
+#define regGCVM_CONTEXT14_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT15_CNTL 0x16a7
+#define regGCVM_CONTEXT15_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXTS_DISABLE 0x16a8
+#define regGCVM_CONTEXTS_DISABLE_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_SEM 0x16a9
+#define regGCVM_INVALIDATE_ENG0_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_SEM 0x16aa
+#define regGCVM_INVALIDATE_ENG1_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_SEM 0x16ab
+#define regGCVM_INVALIDATE_ENG2_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_SEM 0x16ac
+#define regGCVM_INVALIDATE_ENG3_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_SEM 0x16ad
+#define regGCVM_INVALIDATE_ENG4_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_SEM 0x16ae
+#define regGCVM_INVALIDATE_ENG5_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_SEM 0x16af
+#define regGCVM_INVALIDATE_ENG6_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_SEM 0x16b0
+#define regGCVM_INVALIDATE_ENG7_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_SEM 0x16b1
+#define regGCVM_INVALIDATE_ENG8_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_SEM 0x16b2
+#define regGCVM_INVALIDATE_ENG9_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_SEM 0x16b3
+#define regGCVM_INVALIDATE_ENG10_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_SEM 0x16b4
+#define regGCVM_INVALIDATE_ENG11_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_SEM 0x16b5
+#define regGCVM_INVALIDATE_ENG12_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_SEM 0x16b6
+#define regGCVM_INVALIDATE_ENG13_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_SEM 0x16b7
+#define regGCVM_INVALIDATE_ENG14_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_SEM 0x16b8
+#define regGCVM_INVALIDATE_ENG15_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_SEM 0x16b9
+#define regGCVM_INVALIDATE_ENG16_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_SEM 0x16ba
+#define regGCVM_INVALIDATE_ENG17_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_REQ 0x16bb
+#define regGCVM_INVALIDATE_ENG0_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_REQ 0x16bc
+#define regGCVM_INVALIDATE_ENG1_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_REQ 0x16bd
+#define regGCVM_INVALIDATE_ENG2_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_REQ 0x16be
+#define regGCVM_INVALIDATE_ENG3_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_REQ 0x16bf
+#define regGCVM_INVALIDATE_ENG4_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_REQ 0x16c0
+#define regGCVM_INVALIDATE_ENG5_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_REQ 0x16c1
+#define regGCVM_INVALIDATE_ENG6_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_REQ 0x16c2
+#define regGCVM_INVALIDATE_ENG7_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_REQ 0x16c3
+#define regGCVM_INVALIDATE_ENG8_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_REQ 0x16c4
+#define regGCVM_INVALIDATE_ENG9_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_REQ 0x16c5
+#define regGCVM_INVALIDATE_ENG10_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_REQ 0x16c6
+#define regGCVM_INVALIDATE_ENG11_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_REQ 0x16c7
+#define regGCVM_INVALIDATE_ENG12_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_REQ 0x16c8
+#define regGCVM_INVALIDATE_ENG13_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_REQ 0x16c9
+#define regGCVM_INVALIDATE_ENG14_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_REQ 0x16ca
+#define regGCVM_INVALIDATE_ENG15_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_REQ 0x16cb
+#define regGCVM_INVALIDATE_ENG16_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_REQ 0x16cc
+#define regGCVM_INVALIDATE_ENG17_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_ACK 0x16cd
+#define regGCVM_INVALIDATE_ENG0_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_ACK 0x16ce
+#define regGCVM_INVALIDATE_ENG1_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_ACK 0x16cf
+#define regGCVM_INVALIDATE_ENG2_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_ACK 0x16d0
+#define regGCVM_INVALIDATE_ENG3_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_ACK 0x16d1
+#define regGCVM_INVALIDATE_ENG4_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_ACK 0x16d2
+#define regGCVM_INVALIDATE_ENG5_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_ACK 0x16d3
+#define regGCVM_INVALIDATE_ENG6_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_ACK 0x16d4
+#define regGCVM_INVALIDATE_ENG7_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_ACK 0x16d5
+#define regGCVM_INVALIDATE_ENG8_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_ACK 0x16d6
+#define regGCVM_INVALIDATE_ENG9_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_ACK 0x16d7
+#define regGCVM_INVALIDATE_ENG10_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_ACK 0x16d8
+#define regGCVM_INVALIDATE_ENG11_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_ACK 0x16d9
+#define regGCVM_INVALIDATE_ENG12_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_ACK 0x16da
+#define regGCVM_INVALIDATE_ENG13_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_ACK 0x16db
+#define regGCVM_INVALIDATE_ENG14_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_ACK 0x16dc
+#define regGCVM_INVALIDATE_ENG15_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_ACK 0x16dd
+#define regGCVM_INVALIDATE_ENG16_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_ACK 0x16de
+#define regGCVM_INVALIDATE_ENG17_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x16df
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x16e0
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x16e1
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x16e2
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x16e3
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x16e4
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x16e5
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x16e6
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x16e7
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x16e8
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x16e9
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x16ea
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x16eb
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x16ec
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x16ed
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x16ee
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x16ef
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x16f0
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x16f1
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x16f2
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x16f3
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x16f4
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x16f5
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x16f6
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x16f7
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x16f8
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x16f9
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x16fa
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x16fb
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x16fc
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x16fd
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x16fe
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x16ff
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x1700
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x1701
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x1702
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x1703
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x1704
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x1705
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x1706
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x1707
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x1708
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x1709
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x170a
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x170b
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x170c
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x170d
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x170e
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x170f
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x1710
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x1711
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x1712
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x1713
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x1714
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x1715
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x1716
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x1717
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x1718
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x1719
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x171a
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x171b
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x171c
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x171d
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x171e
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x171f
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x1720
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x1721
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x1722
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x1723
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x1724
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x1725
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x1726
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x1727
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x1728
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x1729
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x172a
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x172b
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x172c
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x172d
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x172e
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x172f
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x1730
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x1731
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x1732
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x1733
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x1734
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x1735
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x1736
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x1737
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x1738
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x1739
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x173a
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x173b
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x173c
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x173d
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x173e
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x173f
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x1740
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x1741
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x1742
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x1743
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x1744
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x1745
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x1746
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x1747
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x1748
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x1749
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x174a
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x174b
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x174c
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x174d
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x174e
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x174f
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x1750
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x1751
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x1752
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x1753
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x1754
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x1755
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x1756
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x1757
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x1758
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x1759
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x175a
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x175b
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x175c
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x175d
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x175e
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x175f
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x1760
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x1761
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x1762
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1763
+#define regGCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1764
+#define regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1765
+#define regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1766
+#define regGCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1767
+#define regGCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1768
+#define regGCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1769
+#define regGCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176a
+#define regGCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176b
+#define regGCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176c
+#define regGCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176d
+#define regGCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176e
+#define regGCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176f
+#define regGCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1770
+#define regGCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1771
+#define regGCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1772
+#define regGCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1773
+#define regGCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+
+
+// addressBlock: gc_gcvml2perfddec
+// base address: 0x35380
+#define regGCVML2_PERFCOUNTER2_0_LO 0x34e0
+#define regGCVML2_PERFCOUNTER2_0_LO_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_LO 0x34e1
+#define regGCVML2_PERFCOUNTER2_1_LO_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_0_HI 0x34e2
+#define regGCVML2_PERFCOUNTER2_0_HI_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_HI 0x34e3
+#define regGCVML2_PERFCOUNTER2_1_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2prdec
+// base address: 0x35390
+#define regGCMC_VM_L2_PERFCOUNTER_LO 0x34e4
+#define regGCMC_VM_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER_HI 0x34e5
+#define regGCMC_VM_L2_PERFCOUNTER_HI_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER_LO 0x34e6
+#define regGCUTCL2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER_HI 0x34e7
+#define regGCUTCL2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2perfddec
+// base address: 0x353d0
+#define regGC_ATC_L2_PERFCOUNTER2_LO 0x34f4
+#define regGC_ATC_L2_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER2_HI 0x34f5
+#define regGC_ATC_L2_PERFCOUNTER2_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2pfcntrdec
+// base address: 0x353e0
+#define regGC_ATC_L2_PERFCOUNTER_LO 0x34f8
+#define regGC_ATC_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER_HI 0x34f9
+#define regGC_ATC_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcl2tlbprdec
+// base address: 0x353e8
+#define regGCL2TLB_PERFCOUNTER_LO 0x34fa
+#define regGCL2TLB_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER_HI 0x34fb
+#define regGCL2TLB_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2perfsdec
+// base address: 0x37480
+#define regGCVML2_PERFCOUNTER2_0_SELECT 0x3d20
+#define regGCVML2_PERFCOUNTER2_0_SELECT_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_SELECT 0x3d21
+#define regGCVML2_PERFCOUNTER2_1_SELECT_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_0_SELECT1 0x3d22
+#define regGCVML2_PERFCOUNTER2_0_SELECT1_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_SELECT1 0x3d23
+#define regGCVML2_PERFCOUNTER2_1_SELECT1_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_0_MODE 0x3d24
+#define regGCVML2_PERFCOUNTER2_0_MODE_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_MODE 0x3d25
+#define regGCVML2_PERFCOUNTER2_1_MODE_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2pldec
+// base address: 0x374c0
+#define regGCMC_VM_L2_PERFCOUNTER0_CFG 0x3d30
+#define regGCMC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER1_CFG 0x3d31
+#define regGCMC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER2_CFG 0x3d32
+#define regGCMC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER3_CFG 0x3d33
+#define regGCMC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER4_CFG 0x3d34
+#define regGCMC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER5_CFG 0x3d35
+#define regGCMC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER6_CFG 0x3d36
+#define regGCMC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER7_CFG 0x3d37
+#define regGCMC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x3d38
+#define regGCMC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER0_CFG 0x3d39
+#define regGCUTCL2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER1_CFG 0x3d3a
+#define regGCUTCL2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER2_CFG 0x3d3b
+#define regGCUTCL2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER3_CFG 0x3d3c
+#define regGCUTCL2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER_RSLT_CNTL 0x3d3d
+#define regGCUTCL2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2perfsdec
+// base address: 0x37500
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT 0x3d40
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT1 0x3d41
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER2_MODE 0x3d42
+#define regGC_ATC_L2_PERFCOUNTER2_MODE_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2pfcntldec
+// base address: 0x37510
+#define regGC_ATC_L2_PERFCOUNTER0_CFG 0x3d44
+#define regGC_ATC_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER1_CFG 0x3d45
+#define regGC_ATC_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER_RSLT_CNTL 0x3d46
+#define regGC_ATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcl2tlbpldec
+// base address: 0x37528
+#define regGCL2TLB_PERFCOUNTER0_CFG 0x3d4a
+#define regGCL2TLB_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER1_CFG 0x3d4b
+#define regGCL2TLB_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER2_CFG 0x3d4c
+#define regGCL2TLB_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER3_CFG 0x3d4d
+#define regGCL2TLB_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER_RSLT_CNTL 0x3d4e
+#define regGCL2TLB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2pspdec
+// base address: 0x3f900
+#define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID 0x5e41
+#define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID_BASE_IDX 1
+#define regGCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE 0x5e43
+#define regGCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE_BASE_IDX 1
+#define regGCVM_IOMMU_CONTROL_REGISTER 0x5e44
+#define regGCVM_IOMMU_CONTROL_REGISTER_BASE_IDX 1
+#define regGCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x5e45
+#define regGCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 1
+#define regGCVM_IOMMU_MMIO_CNTRL_1 0x5e46
+#define regGCVM_IOMMU_MMIO_CNTRL_1_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_0 0x5e47
+#define regGCMC_VM_MARC_BASE_LO_0_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_1 0x5e48
+#define regGCMC_VM_MARC_BASE_LO_1_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_2 0x5e49
+#define regGCMC_VM_MARC_BASE_LO_2_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_3 0x5e4a
+#define regGCMC_VM_MARC_BASE_LO_3_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_4 0x5e4b
+#define regGCMC_VM_MARC_BASE_LO_4_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_5 0x5e4c
+#define regGCMC_VM_MARC_BASE_LO_5_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_6 0x5e4d
+#define regGCMC_VM_MARC_BASE_LO_6_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_7 0x5e4e
+#define regGCMC_VM_MARC_BASE_LO_7_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_8 0x5e4f
+#define regGCMC_VM_MARC_BASE_LO_8_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_9 0x5e50
+#define regGCMC_VM_MARC_BASE_LO_9_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_10 0x5e51
+#define regGCMC_VM_MARC_BASE_LO_10_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_11 0x5e52
+#define regGCMC_VM_MARC_BASE_LO_11_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_12 0x5e53
+#define regGCMC_VM_MARC_BASE_LO_12_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_13 0x5e54
+#define regGCMC_VM_MARC_BASE_LO_13_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_14 0x5e55
+#define regGCMC_VM_MARC_BASE_LO_14_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_15 0x5e56
+#define regGCMC_VM_MARC_BASE_LO_15_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_0 0x5e57
+#define regGCMC_VM_MARC_BASE_HI_0_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_1 0x5e58
+#define regGCMC_VM_MARC_BASE_HI_1_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_2 0x5e59
+#define regGCMC_VM_MARC_BASE_HI_2_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_3 0x5e5a
+#define regGCMC_VM_MARC_BASE_HI_3_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_4 0x5e5b
+#define regGCMC_VM_MARC_BASE_HI_4_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_5 0x5e5c
+#define regGCMC_VM_MARC_BASE_HI_5_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_6 0x5e5d
+#define regGCMC_VM_MARC_BASE_HI_6_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_7 0x5e5e
+#define regGCMC_VM_MARC_BASE_HI_7_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_8 0x5e5f
+#define regGCMC_VM_MARC_BASE_HI_8_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_9 0x5e60
+#define regGCMC_VM_MARC_BASE_HI_9_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_10 0x5e61
+#define regGCMC_VM_MARC_BASE_HI_10_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_11 0x5e62
+#define regGCMC_VM_MARC_BASE_HI_11_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_12 0x5e63
+#define regGCMC_VM_MARC_BASE_HI_12_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_13 0x5e64
+#define regGCMC_VM_MARC_BASE_HI_13_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_14 0x5e65
+#define regGCMC_VM_MARC_BASE_HI_14_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_15 0x5e66
+#define regGCMC_VM_MARC_BASE_HI_15_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_0 0x5e67
+#define regGCMC_VM_MARC_RELOC_LO_0_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_1 0x5e68
+#define regGCMC_VM_MARC_RELOC_LO_1_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_2 0x5e69
+#define regGCMC_VM_MARC_RELOC_LO_2_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_3 0x5e6a
+#define regGCMC_VM_MARC_RELOC_LO_3_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_4 0x5e6b
+#define regGCMC_VM_MARC_RELOC_LO_4_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_5 0x5e6c
+#define regGCMC_VM_MARC_RELOC_LO_5_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_6 0x5e6d
+#define regGCMC_VM_MARC_RELOC_LO_6_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_7 0x5e6e
+#define regGCMC_VM_MARC_RELOC_LO_7_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_8 0x5e6f
+#define regGCMC_VM_MARC_RELOC_LO_8_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_9 0x5e70
+#define regGCMC_VM_MARC_RELOC_LO_9_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_10 0x5e71
+#define regGCMC_VM_MARC_RELOC_LO_10_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_11 0x5e72
+#define regGCMC_VM_MARC_RELOC_LO_11_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_12 0x5e73
+#define regGCMC_VM_MARC_RELOC_LO_12_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_13 0x5e74
+#define regGCMC_VM_MARC_RELOC_LO_13_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_14 0x5e75
+#define regGCMC_VM_MARC_RELOC_LO_14_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_15 0x5e76
+#define regGCMC_VM_MARC_RELOC_LO_15_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_0 0x5e77
+#define regGCMC_VM_MARC_RELOC_HI_0_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_1 0x5e78
+#define regGCMC_VM_MARC_RELOC_HI_1_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_2 0x5e79
+#define regGCMC_VM_MARC_RELOC_HI_2_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_3 0x5e7a
+#define regGCMC_VM_MARC_RELOC_HI_3_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_4 0x5e7b
+#define regGCMC_VM_MARC_RELOC_HI_4_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_5 0x5e7c
+#define regGCMC_VM_MARC_RELOC_HI_5_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_6 0x5e7d
+#define regGCMC_VM_MARC_RELOC_HI_6_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_7 0x5e7e
+#define regGCMC_VM_MARC_RELOC_HI_7_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_8 0x5e7f
+#define regGCMC_VM_MARC_RELOC_HI_8_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_9 0x5e80
+#define regGCMC_VM_MARC_RELOC_HI_9_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_10 0x5e81
+#define regGCMC_VM_MARC_RELOC_HI_10_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_11 0x5e82
+#define regGCMC_VM_MARC_RELOC_HI_11_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_12 0x5e83
+#define regGCMC_VM_MARC_RELOC_HI_12_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_13 0x5e84
+#define regGCMC_VM_MARC_RELOC_HI_13_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_14 0x5e85
+#define regGCMC_VM_MARC_RELOC_HI_14_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_15 0x5e86
+#define regGCMC_VM_MARC_RELOC_HI_15_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_0 0x5e87
+#define regGCMC_VM_MARC_LEN_LO_0_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_1 0x5e88
+#define regGCMC_VM_MARC_LEN_LO_1_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_2 0x5e89
+#define regGCMC_VM_MARC_LEN_LO_2_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_3 0x5e8a
+#define regGCMC_VM_MARC_LEN_LO_3_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_4 0x5e8b
+#define regGCMC_VM_MARC_LEN_LO_4_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_5 0x5e8c
+#define regGCMC_VM_MARC_LEN_LO_5_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_6 0x5e8d
+#define regGCMC_VM_MARC_LEN_LO_6_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_7 0x5e8e
+#define regGCMC_VM_MARC_LEN_LO_7_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_8 0x5e8f
+#define regGCMC_VM_MARC_LEN_LO_8_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_9 0x5e90
+#define regGCMC_VM_MARC_LEN_LO_9_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_10 0x5e91
+#define regGCMC_VM_MARC_LEN_LO_10_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_11 0x5e92
+#define regGCMC_VM_MARC_LEN_LO_11_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_12 0x5e93
+#define regGCMC_VM_MARC_LEN_LO_12_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_13 0x5e94
+#define regGCMC_VM_MARC_LEN_LO_13_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_14 0x5e95
+#define regGCMC_VM_MARC_LEN_LO_14_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_15 0x5e96
+#define regGCMC_VM_MARC_LEN_LO_15_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_0 0x5e97
+#define regGCMC_VM_MARC_LEN_HI_0_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_1 0x5e98
+#define regGCMC_VM_MARC_LEN_HI_1_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_2 0x5e99
+#define regGCMC_VM_MARC_LEN_HI_2_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_3 0x5e9a
+#define regGCMC_VM_MARC_LEN_HI_3_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_4 0x5e9b
+#define regGCMC_VM_MARC_LEN_HI_4_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_5 0x5e9c
+#define regGCMC_VM_MARC_LEN_HI_5_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_6 0x5e9d
+#define regGCMC_VM_MARC_LEN_HI_6_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_7 0x5e9e
+#define regGCMC_VM_MARC_LEN_HI_7_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_8 0x5e9f
+#define regGCMC_VM_MARC_LEN_HI_8_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_9 0x5ea0
+#define regGCMC_VM_MARC_LEN_HI_9_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_10 0x5ea1
+#define regGCMC_VM_MARC_LEN_HI_10_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_11 0x5ea2
+#define regGCMC_VM_MARC_LEN_HI_11_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_12 0x5ea3
+#define regGCMC_VM_MARC_LEN_HI_12_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_13 0x5ea4
+#define regGCMC_VM_MARC_LEN_HI_13_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_14 0x5ea5
+#define regGCMC_VM_MARC_LEN_HI_14_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_15 0x5ea6
+#define regGCMC_VM_MARC_LEN_HI_15_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_0 0x5ea7
+#define regGCMC_VM_MARC_PFVF_MAPPING_0_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_1 0x5ea8
+#define regGCMC_VM_MARC_PFVF_MAPPING_1_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_2 0x5ea9
+#define regGCMC_VM_MARC_PFVF_MAPPING_2_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_3 0x5eaa
+#define regGCMC_VM_MARC_PFVF_MAPPING_3_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_4 0x5eab
+#define regGCMC_VM_MARC_PFVF_MAPPING_4_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_5 0x5eac
+#define regGCMC_VM_MARC_PFVF_MAPPING_5_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_6 0x5ead
+#define regGCMC_VM_MARC_PFVF_MAPPING_6_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_7 0x5eae
+#define regGCMC_VM_MARC_PFVF_MAPPING_7_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_8 0x5eaf
+#define regGCMC_VM_MARC_PFVF_MAPPING_8_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_9 0x5eb0
+#define regGCMC_VM_MARC_PFVF_MAPPING_9_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_10 0x5eb1
+#define regGCMC_VM_MARC_PFVF_MAPPING_10_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_11 0x5eb2
+#define regGCMC_VM_MARC_PFVF_MAPPING_11_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_12 0x5eb3
+#define regGCMC_VM_MARC_PFVF_MAPPING_12_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_13 0x5eb4
+#define regGCMC_VM_MARC_PFVF_MAPPING_13_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_14 0x5eb5
+#define regGCMC_VM_MARC_PFVF_MAPPING_14_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_15 0x5eb6
+#define regGCMC_VM_MARC_PFVF_MAPPING_15_BASE_IDX 1
+#define regGCUTC_TRANSLATION_FAULT_CNTL0 0x5eb7
+#define regGCUTC_TRANSLATION_FAULT_CNTL0_BASE_IDX 1
+#define regGCUTC_TRANSLATION_FAULT_CNTL1 0x5eb8
+#define regGCUTC_TRANSLATION_FAULT_CNTL1_BASE_IDX 1
+
+
+// addressBlock: gc_gcl2tlbpspdec
+// base address: 0x3fb10
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL 0x5ec4
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_shdec
+// base address: 0xb000
+#define regSPI_SHADER_PGM_RSRC4_PS 0x19a1
+#define regSPI_SHADER_PGM_RSRC4_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_CHKSUM_PS 0x19a6
+#define regSPI_SHADER_PGM_CHKSUM_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_PS 0x19a7
+#define regSPI_SHADER_PGM_RSRC3_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_PS 0x19a8
+#define regSPI_SHADER_PGM_LO_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_PS 0x19a9
+#define regSPI_SHADER_PGM_HI_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_PS 0x19aa
+#define regSPI_SHADER_PGM_RSRC1_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_PS 0x19ab
+#define regSPI_SHADER_PGM_RSRC2_PS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_0 0x19ac
+#define regSPI_SHADER_USER_DATA_PS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_1 0x19ad
+#define regSPI_SHADER_USER_DATA_PS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_2 0x19ae
+#define regSPI_SHADER_USER_DATA_PS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_3 0x19af
+#define regSPI_SHADER_USER_DATA_PS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_4 0x19b0
+#define regSPI_SHADER_USER_DATA_PS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_5 0x19b1
+#define regSPI_SHADER_USER_DATA_PS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_6 0x19b2
+#define regSPI_SHADER_USER_DATA_PS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_7 0x19b3
+#define regSPI_SHADER_USER_DATA_PS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_8 0x19b4
+#define regSPI_SHADER_USER_DATA_PS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_9 0x19b5
+#define regSPI_SHADER_USER_DATA_PS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_10 0x19b6
+#define regSPI_SHADER_USER_DATA_PS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_11 0x19b7
+#define regSPI_SHADER_USER_DATA_PS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_12 0x19b8
+#define regSPI_SHADER_USER_DATA_PS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_13 0x19b9
+#define regSPI_SHADER_USER_DATA_PS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_14 0x19ba
+#define regSPI_SHADER_USER_DATA_PS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_15 0x19bb
+#define regSPI_SHADER_USER_DATA_PS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_16 0x19bc
+#define regSPI_SHADER_USER_DATA_PS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_17 0x19bd
+#define regSPI_SHADER_USER_DATA_PS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_18 0x19be
+#define regSPI_SHADER_USER_DATA_PS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_19 0x19bf
+#define regSPI_SHADER_USER_DATA_PS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_20 0x19c0
+#define regSPI_SHADER_USER_DATA_PS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_21 0x19c1
+#define regSPI_SHADER_USER_DATA_PS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_22 0x19c2
+#define regSPI_SHADER_USER_DATA_PS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_23 0x19c3
+#define regSPI_SHADER_USER_DATA_PS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_24 0x19c4
+#define regSPI_SHADER_USER_DATA_PS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_25 0x19c5
+#define regSPI_SHADER_USER_DATA_PS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_26 0x19c6
+#define regSPI_SHADER_USER_DATA_PS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_27 0x19c7
+#define regSPI_SHADER_USER_DATA_PS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_28 0x19c8
+#define regSPI_SHADER_USER_DATA_PS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_29 0x19c9
+#define regSPI_SHADER_USER_DATA_PS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_30 0x19ca
+#define regSPI_SHADER_USER_DATA_PS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_31 0x19cb
+#define regSPI_SHADER_USER_DATA_PS_31_BASE_IDX 0
+#define regSPI_SHADER_REQ_CTRL_PS 0x19d0
+#define regSPI_SHADER_REQ_CTRL_PS_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_0 0x19d2
+#define regSPI_SHADER_USER_ACCUM_PS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_1 0x19d3
+#define regSPI_SHADER_USER_ACCUM_PS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_2 0x19d4
+#define regSPI_SHADER_USER_ACCUM_PS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_3 0x19d5
+#define regSPI_SHADER_USER_ACCUM_PS_3_BASE_IDX 0
+#define regSPI_SHADER_PGM_CHKSUM_GS 0x1a20
+#define regSPI_SHADER_PGM_CHKSUM_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC4_GS 0x1a21
+#define regSPI_SHADER_PGM_RSRC4_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_LO_GS 0x1a22
+#define regSPI_SHADER_USER_DATA_ADDR_LO_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_HI_GS 0x1a23
+#define regSPI_SHADER_USER_DATA_ADDR_HI_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_ES_GS 0x1a24
+#define regSPI_SHADER_PGM_LO_ES_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_ES_GS 0x1a25
+#define regSPI_SHADER_PGM_HI_ES_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_GS 0x1a27
+#define regSPI_SHADER_PGM_RSRC3_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_GS 0x1a28
+#define regSPI_SHADER_PGM_LO_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_GS 0x1a29
+#define regSPI_SHADER_PGM_HI_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_GS 0x1a2a
+#define regSPI_SHADER_PGM_RSRC1_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_GS 0x1a2b
+#define regSPI_SHADER_PGM_RSRC2_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_0 0x1a2c
+#define regSPI_SHADER_USER_DATA_GS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_1 0x1a2d
+#define regSPI_SHADER_USER_DATA_GS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_2 0x1a2e
+#define regSPI_SHADER_USER_DATA_GS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_3 0x1a2f
+#define regSPI_SHADER_USER_DATA_GS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_4 0x1a30
+#define regSPI_SHADER_USER_DATA_GS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_5 0x1a31
+#define regSPI_SHADER_USER_DATA_GS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_6 0x1a32
+#define regSPI_SHADER_USER_DATA_GS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_7 0x1a33
+#define regSPI_SHADER_USER_DATA_GS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_8 0x1a34
+#define regSPI_SHADER_USER_DATA_GS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_9 0x1a35
+#define regSPI_SHADER_USER_DATA_GS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_10 0x1a36
+#define regSPI_SHADER_USER_DATA_GS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_11 0x1a37
+#define regSPI_SHADER_USER_DATA_GS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_12 0x1a38
+#define regSPI_SHADER_USER_DATA_GS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_13 0x1a39
+#define regSPI_SHADER_USER_DATA_GS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_14 0x1a3a
+#define regSPI_SHADER_USER_DATA_GS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_15 0x1a3b
+#define regSPI_SHADER_USER_DATA_GS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_16 0x1a3c
+#define regSPI_SHADER_USER_DATA_GS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_17 0x1a3d
+#define regSPI_SHADER_USER_DATA_GS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_18 0x1a3e
+#define regSPI_SHADER_USER_DATA_GS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_19 0x1a3f
+#define regSPI_SHADER_USER_DATA_GS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_20 0x1a40
+#define regSPI_SHADER_USER_DATA_GS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_21 0x1a41
+#define regSPI_SHADER_USER_DATA_GS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_22 0x1a42
+#define regSPI_SHADER_USER_DATA_GS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_23 0x1a43
+#define regSPI_SHADER_USER_DATA_GS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_24 0x1a44
+#define regSPI_SHADER_USER_DATA_GS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_25 0x1a45
+#define regSPI_SHADER_USER_DATA_GS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_26 0x1a46
+#define regSPI_SHADER_USER_DATA_GS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_27 0x1a47
+#define regSPI_SHADER_USER_DATA_GS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_28 0x1a48
+#define regSPI_SHADER_USER_DATA_GS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_29 0x1a49
+#define regSPI_SHADER_USER_DATA_GS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_30 0x1a4a
+#define regSPI_SHADER_USER_DATA_GS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_31 0x1a4b
+#define regSPI_SHADER_USER_DATA_GS_31_BASE_IDX 0
+#define regSPI_SHADER_GS_MESHLET_DIM 0x1a4c
+#define regSPI_SHADER_GS_MESHLET_DIM_BASE_IDX 0
+#define regSPI_SHADER_GS_MESHLET_EXP_ALLOC 0x1a4d
+#define regSPI_SHADER_GS_MESHLET_EXP_ALLOC_BASE_IDX 0
+#define regSPI_SHADER_REQ_CTRL_ESGS 0x1a50
+#define regSPI_SHADER_REQ_CTRL_ESGS_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_0 0x1a52
+#define regSPI_SHADER_USER_ACCUM_ESGS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_1 0x1a53
+#define regSPI_SHADER_USER_ACCUM_ESGS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_2 0x1a54
+#define regSPI_SHADER_USER_ACCUM_ESGS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_3 0x1a55
+#define regSPI_SHADER_USER_ACCUM_ESGS_3_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_ES 0x1a68
+#define regSPI_SHADER_PGM_LO_ES_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_ES 0x1a69
+#define regSPI_SHADER_PGM_HI_ES_BASE_IDX 0
+#define regSPI_SHADER_PGM_CHKSUM_HS 0x1aa0
+#define regSPI_SHADER_PGM_CHKSUM_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC4_HS 0x1aa1
+#define regSPI_SHADER_PGM_RSRC4_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_LO_HS 0x1aa2
+#define regSPI_SHADER_USER_DATA_ADDR_LO_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_HI_HS 0x1aa3
+#define regSPI_SHADER_USER_DATA_ADDR_HI_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_LS_HS 0x1aa4
+#define regSPI_SHADER_PGM_LO_LS_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_LS_HS 0x1aa5
+#define regSPI_SHADER_PGM_HI_LS_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_HS 0x1aa7
+#define regSPI_SHADER_PGM_RSRC3_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_HS 0x1aa8
+#define regSPI_SHADER_PGM_LO_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_HS 0x1aa9
+#define regSPI_SHADER_PGM_HI_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_HS 0x1aaa
+#define regSPI_SHADER_PGM_RSRC1_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_HS 0x1aab
+#define regSPI_SHADER_PGM_RSRC2_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_0 0x1aac
+#define regSPI_SHADER_USER_DATA_HS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_1 0x1aad
+#define regSPI_SHADER_USER_DATA_HS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_2 0x1aae
+#define regSPI_SHADER_USER_DATA_HS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_3 0x1aaf
+#define regSPI_SHADER_USER_DATA_HS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_4 0x1ab0
+#define regSPI_SHADER_USER_DATA_HS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_5 0x1ab1
+#define regSPI_SHADER_USER_DATA_HS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_6 0x1ab2
+#define regSPI_SHADER_USER_DATA_HS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_7 0x1ab3
+#define regSPI_SHADER_USER_DATA_HS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_8 0x1ab4
+#define regSPI_SHADER_USER_DATA_HS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_9 0x1ab5
+#define regSPI_SHADER_USER_DATA_HS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_10 0x1ab6
+#define regSPI_SHADER_USER_DATA_HS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_11 0x1ab7
+#define regSPI_SHADER_USER_DATA_HS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_12 0x1ab8
+#define regSPI_SHADER_USER_DATA_HS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_13 0x1ab9
+#define regSPI_SHADER_USER_DATA_HS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_14 0x1aba
+#define regSPI_SHADER_USER_DATA_HS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_15 0x1abb
+#define regSPI_SHADER_USER_DATA_HS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_16 0x1abc
+#define regSPI_SHADER_USER_DATA_HS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_17 0x1abd
+#define regSPI_SHADER_USER_DATA_HS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_18 0x1abe
+#define regSPI_SHADER_USER_DATA_HS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_19 0x1abf
+#define regSPI_SHADER_USER_DATA_HS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_20 0x1ac0
+#define regSPI_SHADER_USER_DATA_HS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_21 0x1ac1
+#define regSPI_SHADER_USER_DATA_HS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_22 0x1ac2
+#define regSPI_SHADER_USER_DATA_HS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_23 0x1ac3
+#define regSPI_SHADER_USER_DATA_HS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_24 0x1ac4
+#define regSPI_SHADER_USER_DATA_HS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_25 0x1ac5
+#define regSPI_SHADER_USER_DATA_HS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_26 0x1ac6
+#define regSPI_SHADER_USER_DATA_HS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_27 0x1ac7
+#define regSPI_SHADER_USER_DATA_HS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_28 0x1ac8
+#define regSPI_SHADER_USER_DATA_HS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_29 0x1ac9
+#define regSPI_SHADER_USER_DATA_HS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_30 0x1aca
+#define regSPI_SHADER_USER_DATA_HS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_31 0x1acb
+#define regSPI_SHADER_USER_DATA_HS_31_BASE_IDX 0
+#define regSPI_SHADER_REQ_CTRL_LSHS 0x1ad0
+#define regSPI_SHADER_REQ_CTRL_LSHS_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_0 0x1ad2
+#define regSPI_SHADER_USER_ACCUM_LSHS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_1 0x1ad3
+#define regSPI_SHADER_USER_ACCUM_LSHS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_2 0x1ad4
+#define regSPI_SHADER_USER_ACCUM_LSHS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_3 0x1ad5
+#define regSPI_SHADER_USER_ACCUM_LSHS_3_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_LS 0x1ae8
+#define regSPI_SHADER_PGM_LO_LS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_LS 0x1ae9
+#define regSPI_SHADER_PGM_HI_LS_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_INITIATOR 0x1ba0
+#define regCOMPUTE_DISPATCH_INITIATOR_BASE_IDX 0
+#define regCOMPUTE_DIM_X 0x1ba1
+#define regCOMPUTE_DIM_X_BASE_IDX 0
+#define regCOMPUTE_DIM_Y 0x1ba2
+#define regCOMPUTE_DIM_Y_BASE_IDX 0
+#define regCOMPUTE_DIM_Z 0x1ba3
+#define regCOMPUTE_DIM_Z_BASE_IDX 0
+#define regCOMPUTE_START_X 0x1ba4
+#define regCOMPUTE_START_X_BASE_IDX 0
+#define regCOMPUTE_START_Y 0x1ba5
+#define regCOMPUTE_START_Y_BASE_IDX 0
+#define regCOMPUTE_START_Z 0x1ba6
+#define regCOMPUTE_START_Z_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_X 0x1ba7
+#define regCOMPUTE_NUM_THREAD_X_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_Y 0x1ba8
+#define regCOMPUTE_NUM_THREAD_Y_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_Z 0x1ba9
+#define regCOMPUTE_NUM_THREAD_Z_BASE_IDX 0
+#define regCOMPUTE_PIPELINESTAT_ENABLE 0x1baa
+#define regCOMPUTE_PIPELINESTAT_ENABLE_BASE_IDX 0
+#define regCOMPUTE_PERFCOUNT_ENABLE 0x1bab
+#define regCOMPUTE_PERFCOUNT_ENABLE_BASE_IDX 0
+#define regCOMPUTE_PGM_LO 0x1bac
+#define regCOMPUTE_PGM_LO_BASE_IDX 0
+#define regCOMPUTE_PGM_HI 0x1bad
+#define regCOMPUTE_PGM_HI_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_PKT_ADDR_LO 0x1bae
+#define regCOMPUTE_DISPATCH_PKT_ADDR_LO_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_PKT_ADDR_HI 0x1baf
+#define regCOMPUTE_DISPATCH_PKT_ADDR_HI_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_LO 0x1bb0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_LO_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_HI 0x1bb1
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_HI_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC1 0x1bb2
+#define regCOMPUTE_PGM_RSRC1_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC2 0x1bb3
+#define regCOMPUTE_PGM_RSRC2_BASE_IDX 0
+#define regCOMPUTE_VMID 0x1bb4
+#define regCOMPUTE_VMID_BASE_IDX 0
+#define regCOMPUTE_RESOURCE_LIMITS 0x1bb5
+#define regCOMPUTE_RESOURCE_LIMITS_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE0 0x1bb6
+#define regCOMPUTE_DESTINATION_EN_SE0_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE0 0x1bb6
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE0_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE1 0x1bb7
+#define regCOMPUTE_DESTINATION_EN_SE1_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE1 0x1bb7
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE1_BASE_IDX 0
+#define regCOMPUTE_TMPRING_SIZE 0x1bb8
+#define regCOMPUTE_TMPRING_SIZE_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE2 0x1bb9
+#define regCOMPUTE_DESTINATION_EN_SE2_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE2 0x1bb9
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE3 0x1bba
+#define regCOMPUTE_DESTINATION_EN_SE3_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE3 0x1bba
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0
+#define regCOMPUTE_RESTART_X 0x1bbb
+#define regCOMPUTE_RESTART_X_BASE_IDX 0
+#define regCOMPUTE_RESTART_Y 0x1bbc
+#define regCOMPUTE_RESTART_Y_BASE_IDX 0
+#define regCOMPUTE_RESTART_Z 0x1bbd
+#define regCOMPUTE_RESTART_Z_BASE_IDX 0
+#define regCOMPUTE_THREAD_TRACE_ENABLE 0x1bbe
+#define regCOMPUTE_THREAD_TRACE_ENABLE_BASE_IDX 0
+#define regCOMPUTE_MISC_RESERVED 0x1bbf
+#define regCOMPUTE_MISC_RESERVED_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_ID 0x1bc0
+#define regCOMPUTE_DISPATCH_ID_BASE_IDX 0
+#define regCOMPUTE_THREADGROUP_ID 0x1bc1
+#define regCOMPUTE_THREADGROUP_ID_BASE_IDX 0
+#define regCOMPUTE_REQ_CTRL 0x1bc2
+#define regCOMPUTE_REQ_CTRL_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_0 0x1bc4
+#define regCOMPUTE_USER_ACCUM_0_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_1 0x1bc5
+#define regCOMPUTE_USER_ACCUM_1_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_2 0x1bc6
+#define regCOMPUTE_USER_ACCUM_2_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_3 0x1bc7
+#define regCOMPUTE_USER_ACCUM_3_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC3 0x1bc8
+#define regCOMPUTE_PGM_RSRC3_BASE_IDX 0
+#define regCOMPUTE_DDID_INDEX 0x1bc9
+#define regCOMPUTE_DDID_INDEX_BASE_IDX 0
+#define regCOMPUTE_SHADER_CHKSUM 0x1bca
+#define regCOMPUTE_SHADER_CHKSUM_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE4 0x1bcb
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE4_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE5 0x1bcc
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE5_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE6 0x1bcd
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE6_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE7 0x1bce
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE7_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_INTERLEAVE 0x1bcf
+#define regCOMPUTE_DISPATCH_INTERLEAVE_BASE_IDX 0
+#define regCOMPUTE_RELAUNCH 0x1bd0
+#define regCOMPUTE_RELAUNCH_BASE_IDX 0
+#define regCOMPUTE_WAVE_RESTORE_ADDR_LO 0x1bd1
+#define regCOMPUTE_WAVE_RESTORE_ADDR_LO_BASE_IDX 0
+#define regCOMPUTE_WAVE_RESTORE_ADDR_HI 0x1bd2
+#define regCOMPUTE_WAVE_RESTORE_ADDR_HI_BASE_IDX 0
+#define regCOMPUTE_RELAUNCH2 0x1bd3
+#define regCOMPUTE_RELAUNCH2_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_0 0x1be0
+#define regCOMPUTE_USER_DATA_0_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_1 0x1be1
+#define regCOMPUTE_USER_DATA_1_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_2 0x1be2
+#define regCOMPUTE_USER_DATA_2_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_3 0x1be3
+#define regCOMPUTE_USER_DATA_3_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_4 0x1be4
+#define regCOMPUTE_USER_DATA_4_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_5 0x1be5
+#define regCOMPUTE_USER_DATA_5_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_6 0x1be6
+#define regCOMPUTE_USER_DATA_6_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_7 0x1be7
+#define regCOMPUTE_USER_DATA_7_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_8 0x1be8
+#define regCOMPUTE_USER_DATA_8_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_9 0x1be9
+#define regCOMPUTE_USER_DATA_9_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_10 0x1bea
+#define regCOMPUTE_USER_DATA_10_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_11 0x1beb
+#define regCOMPUTE_USER_DATA_11_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_12 0x1bec
+#define regCOMPUTE_USER_DATA_12_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_13 0x1bed
+#define regCOMPUTE_USER_DATA_13_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_14 0x1bee
+#define regCOMPUTE_USER_DATA_14_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_15 0x1bef
+#define regCOMPUTE_USER_DATA_15_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_TUNNEL 0x1c1d
+#define regCOMPUTE_DISPATCH_TUNNEL_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_END 0x1c1e
+#define regCOMPUTE_DISPATCH_END_BASE_IDX 0
+#define regCOMPUTE_NOWHERE 0x1c1f
+#define regCOMPUTE_NOWHERE_BASE_IDX 0
+#define regSH_RESERVED_REG0 0x1c20
+#define regSH_RESERVED_REG0_BASE_IDX 0
+#define regSH_RESERVED_REG1 0x1c21
+#define regSH_RESERVED_REG1_BASE_IDX 0
+
+
+// addressBlock: gc_cppdec
+// base address: 0xc080
+#define regCP_CU_MASK_ADDR_LO 0x1dd2
+#define regCP_CU_MASK_ADDR_LO_BASE_IDX 0
+#define regCP_CU_MASK_ADDR_HI 0x1dd3
+#define regCP_CU_MASK_ADDR_HI_BASE_IDX 0
+#define regCP_CU_MASK_CNTL 0x1dd4
+#define regCP_CU_MASK_CNTL_BASE_IDX 0
+#define regCP_EOPQ_WAIT_TIME 0x1dd5
+#define regCP_EOPQ_WAIT_TIME_BASE_IDX 0
+#define regCP_CPC_MGCG_SYNC_CNTL 0x1dd6
+#define regCP_CPC_MGCG_SYNC_CNTL_BASE_IDX 0
+#define regCPC_INT_INFO 0x1dd7
+#define regCPC_INT_INFO_BASE_IDX 0
+#define regCP_VIRT_STATUS 0x1dd8
+#define regCP_VIRT_STATUS_BASE_IDX 0
+#define regCPC_INT_ADDR 0x1dd9
+#define regCPC_INT_ADDR_BASE_IDX 0
+#define regCPC_INT_PASID 0x1dda
+#define regCPC_INT_PASID_BASE_IDX 0
+#define regCP_GFX_ERROR 0x1ddb
+#define regCP_GFX_ERROR_BASE_IDX 0
+#define regCPG_UTCL1_CNTL 0x1ddc
+#define regCPG_UTCL1_CNTL_BASE_IDX 0
+#define regCPC_UTCL1_CNTL 0x1ddd
+#define regCPC_UTCL1_CNTL_BASE_IDX 0
+#define regCPF_UTCL1_CNTL 0x1dde
+#define regCPF_UTCL1_CNTL_BASE_IDX 0
+#define regCP_AQL_SMM_STATUS 0x1ddf
+#define regCP_AQL_SMM_STATUS_BASE_IDX 0
+#define regCP_RB0_BASE 0x1de0
+#define regCP_RB0_BASE_BASE_IDX 0
+#define regCP_RB_BASE 0x1de0
+#define regCP_RB_BASE_BASE_IDX 0
+#define regCP_RB0_CNTL 0x1de1
+#define regCP_RB0_CNTL_BASE_IDX 0
+#define regCP_RB_CNTL 0x1de1
+#define regCP_RB_CNTL_BASE_IDX 0
+#define regCP_RB_RPTR_WR 0x1de2
+#define regCP_RB_RPTR_WR_BASE_IDX 0
+#define regCP_RB0_RPTR_ADDR 0x1de3
+#define regCP_RB0_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB_RPTR_ADDR 0x1de3
+#define regCP_RB_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB0_RPTR_ADDR_HI 0x1de4
+#define regCP_RB0_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB_RPTR_ADDR_HI 0x1de4
+#define regCP_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB0_BUFSZ_MASK 0x1de5
+#define regCP_RB0_BUFSZ_MASK_BASE_IDX 0
+#define regCP_RB_BUFSZ_MASK 0x1de5
+#define regCP_RB_BUFSZ_MASK_BASE_IDX 0
+#define regGC_PRIV_MODE 0x1de8
+#define regGC_PRIV_MODE_BASE_IDX 0
+#define regCP_INT_CNTL 0x1de9
+#define regCP_INT_CNTL_BASE_IDX 0
+#define regCP_INT_STATUS 0x1dea
+#define regCP_INT_STATUS_BASE_IDX 0
+#define regCP_DEVICE_ID 0x1deb
+#define regCP_DEVICE_ID_BASE_IDX 0
+#define regCP_ME0_PIPE_PRIORITY_CNTS 0x1dec
+#define regCP_ME0_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_RING_PRIORITY_CNTS 0x1dec
+#define regCP_RING_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME0_PIPE0_PRIORITY 0x1ded
+#define regCP_ME0_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_RING0_PRIORITY 0x1ded
+#define regCP_RING0_PRIORITY_BASE_IDX 0
+#define regCP_ME0_PIPE1_PRIORITY 0x1dee
+#define regCP_ME0_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_RING1_PRIORITY 0x1dee
+#define regCP_RING1_PRIORITY_BASE_IDX 0
+#define regCP_FATAL_ERROR 0x1df0
+#define regCP_FATAL_ERROR_BASE_IDX 0
+#define regCP_RB_VMID 0x1df1
+#define regCP_RB_VMID_BASE_IDX 0
+#define regCP_ME0_PIPE0_VMID 0x1df2
+#define regCP_ME0_PIPE0_VMID_BASE_IDX 0
+#define regCP_ME0_PIPE1_VMID 0x1df3
+#define regCP_ME0_PIPE1_VMID_BASE_IDX 0
+#define regCP_RB0_WPTR 0x1df4
+#define regCP_RB0_WPTR_BASE_IDX 0
+#define regCP_RB_WPTR 0x1df4
+#define regCP_RB_WPTR_BASE_IDX 0
+#define regCP_RB0_WPTR_HI 0x1df5
+#define regCP_RB0_WPTR_HI_BASE_IDX 0
+#define regCP_RB_WPTR_HI 0x1df5
+#define regCP_RB_WPTR_HI_BASE_IDX 0
+#define regCP_RB1_WPTR 0x1df6
+#define regCP_RB1_WPTR_BASE_IDX 0
+#define regCP_RB1_WPTR_HI 0x1df7
+#define regCP_RB1_WPTR_HI_BASE_IDX 0
+#define regCP_PROCESS_QUANTUM 0x1df9
+#define regCP_PROCESS_QUANTUM_BASE_IDX 0
+#define regCP_RB_DOORBELL_RANGE_LOWER 0x1dfa
+#define regCP_RB_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define regCP_RB_DOORBELL_RANGE_UPPER 0x1dfb
+#define regCP_RB_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define regCP_MEC_DOORBELL_RANGE_LOWER 0x1dfc
+#define regCP_MEC_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define regCP_MEC_DOORBELL_RANGE_UPPER 0x1dfd
+#define regCP_MEC_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define regCPG_UTCL1_ERROR 0x1dfe
+#define regCPG_UTCL1_ERROR_BASE_IDX 0
+#define regCPC_UTCL1_ERROR 0x1dff
+#define regCPC_UTCL1_ERROR_BASE_IDX 0
+#define regCP_RB1_BASE 0x1e00
+#define regCP_RB1_BASE_BASE_IDX 0
+#define regCP_RB1_CNTL 0x1e01
+#define regCP_RB1_CNTL_BASE_IDX 0
+#define regCP_RB1_RPTR_ADDR 0x1e02
+#define regCP_RB1_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB1_RPTR_ADDR_HI 0x1e03
+#define regCP_RB1_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB1_BUFSZ_MASK 0x1e04
+#define regCP_RB1_BUFSZ_MASK_BASE_IDX 0
+#define regCP_INT_CNTL_RING0 0x1e0a
+#define regCP_INT_CNTL_RING0_BASE_IDX 0
+#define regCP_INT_CNTL_RING1 0x1e0b
+#define regCP_INT_CNTL_RING1_BASE_IDX 0
+#define regCP_INT_STATUS_RING0 0x1e0d
+#define regCP_INT_STATUS_RING0_BASE_IDX 0
+#define regCP_INT_STATUS_RING1 0x1e0e
+#define regCP_INT_STATUS_RING1_BASE_IDX 0
+#define regCP_ME_F32_INTERRUPT 0x1e13
+#define regCP_ME_F32_INTERRUPT_BASE_IDX 0
+#define regCP_PFP_F32_INTERRUPT 0x1e14
+#define regCP_PFP_F32_INTERRUPT_BASE_IDX 0
+#define regCP_MEC1_F32_INTERRUPT 0x1e16
+#define regCP_MEC1_F32_INTERRUPT_BASE_IDX 0
+#define regCP_MEC2_F32_INTERRUPT 0x1e17
+#define regCP_MEC2_F32_INTERRUPT_BASE_IDX 0
+#define regCP_PWR_CNTL 0x1e18
+#define regCP_PWR_CNTL_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE 0x1e1a
+#define regCP_ECC_FIRSTOCCURRENCE_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE_RING0 0x1e1b
+#define regCP_ECC_FIRSTOCCURRENCE_RING0_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE_RING1 0x1e1c
+#define regCP_ECC_FIRSTOCCURRENCE_RING1_BASE_IDX 0
+#define regGB_EDC_MODE 0x1e1e
+#define regGB_EDC_MODE_BASE_IDX 0
+#define regCP_DEBUG 0x1e1f
+#define regCP_DEBUG_BASE_IDX 0
+#define regCP_CPF_DEBUG 0x1e20
+#define regCP_CPF_DEBUG_BASE_IDX 0
+#define regCP_CPC_DEBUG 0x1e21
+#define regCP_CPC_DEBUG_BASE_IDX 0
+#define regCP_PQ_WPTR_POLL_CNTL 0x1e23
+#define regCP_PQ_WPTR_POLL_CNTL_BASE_IDX 0
+#define regCP_PQ_WPTR_POLL_CNTL1 0x1e24
+#define regCP_PQ_WPTR_POLL_CNTL1_BASE_IDX 0
+#define regCP_ME1_PIPE0_INT_CNTL 0x1e25
+#define regCP_ME1_PIPE0_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE1_INT_CNTL 0x1e26
+#define regCP_ME1_PIPE1_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE2_INT_CNTL 0x1e27
+#define regCP_ME1_PIPE2_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE3_INT_CNTL 0x1e28
+#define regCP_ME1_PIPE3_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE0_INT_CNTL 0x1e29
+#define regCP_ME2_PIPE0_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE1_INT_CNTL 0x1e2a
+#define regCP_ME2_PIPE1_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE2_INT_CNTL 0x1e2b
+#define regCP_ME2_PIPE2_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE3_INT_CNTL 0x1e2c
+#define regCP_ME2_PIPE3_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE0_INT_STATUS 0x1e2d
+#define regCP_ME1_PIPE0_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE1_INT_STATUS 0x1e2e
+#define regCP_ME1_PIPE1_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE2_INT_STATUS 0x1e2f
+#define regCP_ME1_PIPE2_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE3_INT_STATUS 0x1e30
+#define regCP_ME1_PIPE3_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE0_INT_STATUS 0x1e31
+#define regCP_ME2_PIPE0_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE1_INT_STATUS 0x1e32
+#define regCP_ME2_PIPE1_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE2_INT_STATUS 0x1e33
+#define regCP_ME2_PIPE2_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE3_INT_STATUS 0x1e34
+#define regCP_ME2_PIPE3_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_INT_STAT_DEBUG 0x1e35
+#define regCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_ME2_INT_STAT_DEBUG 0x1e36
+#define regCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_GFX_QUEUE_INDEX 0x1e37
+#define regCP_GFX_QUEUE_INDEX_BASE_IDX 0
+#define regCC_GC_EDC_CONFIG 0x1e38
+#define regCC_GC_EDC_CONFIG_BASE_IDX 0
+#define regCP_ME1_PIPE_PRIORITY_CNTS 0x1e39
+#define regCP_ME1_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME1_PIPE0_PRIORITY 0x1e3a
+#define regCP_ME1_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE1_PRIORITY 0x1e3b
+#define regCP_ME1_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE2_PRIORITY 0x1e3c
+#define regCP_ME1_PIPE2_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE3_PRIORITY 0x1e3d
+#define regCP_ME1_PIPE3_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE_PRIORITY_CNTS 0x1e3e
+#define regCP_ME2_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME2_PIPE0_PRIORITY 0x1e3f
+#define regCP_ME2_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE1_PRIORITY 0x1e40
+#define regCP_ME2_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE2_PRIORITY 0x1e41
+#define regCP_ME2_PIPE2_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE3_PRIORITY 0x1e42
+#define regCP_ME2_PIPE3_PRIORITY_BASE_IDX 0
+#define regCP_PFP_PRGRM_CNTR_START 0x1e44
+#define regCP_PFP_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_ME_PRGRM_CNTR_START 0x1e45
+#define regCP_ME_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_MEC1_PRGRM_CNTR_START 0x1e46
+#define regCP_MEC1_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_MEC2_PRGRM_CNTR_START 0x1e47
+#define regCP_MEC2_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_PFP_INTR_ROUTINE_START 0x1e49
+#define regCP_PFP_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_ME_INTR_ROUTINE_START 0x1e4a
+#define regCP_ME_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_MEC1_INTR_ROUTINE_START 0x1e4b
+#define regCP_MEC1_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_MEC2_INTR_ROUTINE_START 0x1e4c
+#define regCP_MEC2_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_CONTEXT_CNTL 0x1e4d
+#define regCP_CONTEXT_CNTL_BASE_IDX 0
+#define regCP_MAX_CONTEXT 0x1e4e
+#define regCP_MAX_CONTEXT_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME1 0x1e4f
+#define regCP_IQ_WAIT_TIME1_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME2 0x1e50
+#define regCP_IQ_WAIT_TIME2_BASE_IDX 0
+#define regCP_RB0_BASE_HI 0x1e51
+#define regCP_RB0_BASE_HI_BASE_IDX 0
+#define regCP_RB1_BASE_HI 0x1e52
+#define regCP_RB1_BASE_HI_BASE_IDX 0
+#define regCP_VMID_RESET 0x1e53
+#define regCP_VMID_RESET_BASE_IDX 0
+#define regCPC_INT_CNTL 0x1e54
+#define regCPC_INT_CNTL_BASE_IDX 0
+#define regCPC_INT_STATUS 0x1e55
+#define regCPC_INT_STATUS_BASE_IDX 0
+#define regCP_VMID_PREEMPT 0x1e56
+#define regCP_VMID_PREEMPT_BASE_IDX 0
+#define regCPC_INT_CNTX_ID 0x1e57
+#define regCPC_INT_CNTX_ID_BASE_IDX 0
+#define regCP_PQ_STATUS 0x1e58
+#define regCP_PQ_STATUS_BASE_IDX 0
+#define regCP_PFP_PRGRM_CNTR_START_HI 0x1e59
+#define regCP_PFP_PRGRM_CNTR_START_HI_BASE_IDX 0
+#define regCP_MAX_DRAW_COUNT 0x1e5c
+#define regCP_MAX_DRAW_COUNT_BASE_IDX 0
+#define regCP_MEC1_F32_INT_DIS 0x1e5d
+#define regCP_MEC1_F32_INT_DIS_BASE_IDX 0
+#define regCP_MEC2_F32_INT_DIS 0x1e5e
+#define regCP_MEC2_F32_INT_DIS_BASE_IDX 0
+#define regCP_VMID_STATUS 0x1e5f
+#define regCP_VMID_STATUS_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO 0x1e60
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI 0x1e61
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_CONTROL 0x1e62
+#define regCPC_SUSPEND_CTX_SAVE_CONTROL_BASE_IDX 0
+#define regCPC_SUSPEND_CNTL_STACK_OFFSET 0x1e63
+#define regCPC_SUSPEND_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCPC_SUSPEND_CNTL_STACK_SIZE 0x1e64
+#define regCPC_SUSPEND_CNTL_STACK_SIZE_BASE_IDX 0
+#define regCPC_SUSPEND_WG_STATE_OFFSET 0x1e65
+#define regCPC_SUSPEND_WG_STATE_OFFSET_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_SIZE 0x1e66
+#define regCPC_SUSPEND_CTX_SAVE_SIZE_BASE_IDX 0
+#define regCPC_OS_PIPES 0x1e67
+#define regCPC_OS_PIPES_BASE_IDX 0
+#define regCP_SUSPEND_RESUME_REQ 0x1e68
+#define regCP_SUSPEND_RESUME_REQ_BASE_IDX 0
+#define regCP_SUSPEND_CNTL 0x1e69
+#define regCP_SUSPEND_CNTL_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME3 0x1e6a
+#define regCP_IQ_WAIT_TIME3_BASE_IDX 0
+#define regCPC_DDID_BASE_ADDR_LO 0x1e6b
+#define regCPC_DDID_BASE_ADDR_LO_BASE_IDX 0
+#define regCP_DDID_BASE_ADDR_LO 0x1e6b
+#define regCP_DDID_BASE_ADDR_LO_BASE_IDX 0
+#define regCPC_DDID_BASE_ADDR_HI 0x1e6c
+#define regCPC_DDID_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_DDID_BASE_ADDR_HI 0x1e6c
+#define regCP_DDID_BASE_ADDR_HI_BASE_IDX 0
+#define regCPC_DDID_CNTL 0x1e6d
+#define regCPC_DDID_CNTL_BASE_IDX 0
+#define regCP_DDID_CNTL 0x1e6d
+#define regCP_DDID_CNTL_BASE_IDX 0
+#define regCP_GFX_DDID_INFLIGHT_COUNT 0x1e6e
+#define regCP_GFX_DDID_INFLIGHT_COUNT_BASE_IDX 0
+#define regCP_GFX_DDID_WPTR 0x1e6f
+#define regCP_GFX_DDID_WPTR_BASE_IDX 0
+#define regCP_GFX_DDID_RPTR 0x1e70
+#define regCP_GFX_DDID_RPTR_BASE_IDX 0
+#define regCP_GFX_DDID_DELTA_RPT_COUNT 0x1e71
+#define regCP_GFX_DDID_DELTA_RPT_COUNT_BASE_IDX 0
+#define regCP_GFX_HPD_STATUS0 0x1e72
+#define regCP_GFX_HPD_STATUS0_BASE_IDX 0
+#define regCP_GFX_HPD_CONTROL0 0x1e73
+#define regCP_GFX_HPD_CONTROL0_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_LO 0x1e74
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_LO_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_HI 0x1e75
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_HI_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_LO 0x1e76
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_LO_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_HI 0x1e77
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_HI_BASE_IDX 0
+#define regCP_GFX_INDEX_MUTEX 0x1e78
+#define regCP_GFX_INDEX_MUTEX_BASE_IDX 0
+#define regCP_ME_PRGRM_CNTR_START_HI 0x1e79
+#define regCP_ME_PRGRM_CNTR_START_HI_BASE_IDX 0
+#define regCP_PFP_INTR_ROUTINE_START_HI 0x1e7a
+#define regCP_PFP_INTR_ROUTINE_START_HI_BASE_IDX 0
+#define regCP_ME_INTR_ROUTINE_START_HI 0x1e7b
+#define regCP_ME_INTR_ROUTINE_START_HI_BASE_IDX 0
+#define regCP_GFX_MQD_BASE_ADDR 0x1e7e
+#define regCP_GFX_MQD_BASE_ADDR_BASE_IDX 0
+#define regCP_GFX_MQD_BASE_ADDR_HI 0x1e7f
+#define regCP_GFX_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_GFX_HQD_ACTIVE 0x1e80
+#define regCP_GFX_HQD_ACTIVE_BASE_IDX 0
+#define regCP_GFX_HQD_VMID 0x1e81
+#define regCP_GFX_HQD_VMID_BASE_IDX 0
+#define regCP_GFX_HQD_QUEUE_PRIORITY 0x1e84
+#define regCP_GFX_HQD_QUEUE_PRIORITY_BASE_IDX 0
+#define regCP_GFX_HQD_QUANTUM 0x1e85
+#define regCP_GFX_HQD_QUANTUM_BASE_IDX 0
+#define regCP_GFX_HQD_BASE 0x1e86
+#define regCP_GFX_HQD_BASE_BASE_IDX 0
+#define regCP_GFX_HQD_BASE_HI 0x1e87
+#define regCP_GFX_HQD_BASE_HI_BASE_IDX 0
+#define regCP_GFX_HQD_RPTR 0x1e88
+#define regCP_GFX_HQD_RPTR_BASE_IDX 0
+#define regCP_GFX_HQD_RPTR_ADDR 0x1e89
+#define regCP_GFX_HQD_RPTR_ADDR_BASE_IDX 0
+#define regCP_GFX_HQD_RPTR_ADDR_HI 0x1e8a
+#define regCP_GFX_HQD_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_ADDR_LO 0x1e8b
+#define regCP_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_ADDR_HI 0x1e8c
+#define regCP_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL 0x1e8d
+#define regCP_RB_DOORBELL_CONTROL_BASE_IDX 0
+#define regCP_GFX_HQD_OFFSET 0x1e8e
+#define regCP_GFX_HQD_OFFSET_BASE_IDX 0
+#define regCP_GFX_HQD_CNTL 0x1e8f
+#define regCP_GFX_HQD_CNTL_BASE_IDX 0
+#define regCP_GFX_HQD_CSMD_RPTR 0x1e90
+#define regCP_GFX_HQD_CSMD_RPTR_BASE_IDX 0
+#define regCP_GFX_HQD_WPTR 0x1e91
+#define regCP_GFX_HQD_WPTR_BASE_IDX 0
+#define regCP_GFX_HQD_WPTR_HI 0x1e92
+#define regCP_GFX_HQD_WPTR_HI_BASE_IDX 0
+#define regCP_GFX_HQD_DEQUEUE_REQUEST 0x1e93
+#define regCP_GFX_HQD_DEQUEUE_REQUEST_BASE_IDX 0
+#define regCP_GFX_HQD_MAPPED 0x1e94
+#define regCP_GFX_HQD_MAPPED_BASE_IDX 0
+#define regCP_GFX_HQD_QUE_MGR_CONTROL 0x1e95
+#define regCP_GFX_HQD_QUE_MGR_CONTROL_BASE_IDX 0
+#define regCP_GFX_HQD_IQ_TIMER 0x1e96
+#define regCP_GFX_HQD_IQ_TIMER_BASE_IDX 0
+#define regCP_GFX_HQD_HQ_STATUS0 0x1e98
+#define regCP_GFX_HQD_HQ_STATUS0_BASE_IDX 0
+#define regCP_GFX_HQD_HQ_CONTROL0 0x1e99
+#define regCP_GFX_HQD_HQ_CONTROL0_BASE_IDX 0
+#define regCP_GFX_MQD_CONTROL 0x1e9a
+#define regCP_GFX_MQD_CONTROL_BASE_IDX 0
+#define regCP_HQD_GFX_CONTROL 0x1e9f
+#define regCP_HQD_GFX_CONTROL_BASE_IDX 0
+#define regCP_HQD_GFX_STATUS 0x1ea0
+#define regCP_HQD_GFX_STATUS_BASE_IDX 0
+#define regCP_DMA_WATCH0_ADDR_LO 0x1ec0
+#define regCP_DMA_WATCH0_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH0_ADDR_HI 0x1ec1
+#define regCP_DMA_WATCH0_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH0_MASK 0x1ec2
+#define regCP_DMA_WATCH0_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH0_CNTL 0x1ec3
+#define regCP_DMA_WATCH0_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH1_ADDR_LO 0x1ec4
+#define regCP_DMA_WATCH1_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH1_ADDR_HI 0x1ec5
+#define regCP_DMA_WATCH1_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH1_MASK 0x1ec6
+#define regCP_DMA_WATCH1_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH1_CNTL 0x1ec7
+#define regCP_DMA_WATCH1_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH2_ADDR_LO 0x1ec8
+#define regCP_DMA_WATCH2_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH2_ADDR_HI 0x1ec9
+#define regCP_DMA_WATCH2_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH2_MASK 0x1eca
+#define regCP_DMA_WATCH2_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH2_CNTL 0x1ecb
+#define regCP_DMA_WATCH2_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH3_ADDR_LO 0x1ecc
+#define regCP_DMA_WATCH3_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH3_ADDR_HI 0x1ecd
+#define regCP_DMA_WATCH3_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH3_MASK 0x1ece
+#define regCP_DMA_WATCH3_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH3_CNTL 0x1ecf
+#define regCP_DMA_WATCH3_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH_STAT_ADDR_LO 0x1ed0
+#define regCP_DMA_WATCH_STAT_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH_STAT_ADDR_HI 0x1ed1
+#define regCP_DMA_WATCH_STAT_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH_STAT 0x1ed2
+#define regCP_DMA_WATCH_STAT_BASE_IDX 0
+#define regCP_PFP_JT_STAT 0x1ed3
+#define regCP_PFP_JT_STAT_BASE_IDX 0
+#define regCP_MEC_JT_STAT 0x1ed5
+#define regCP_MEC_JT_STAT_BASE_IDX 0
+#define regCP_CPC_BUSY_HYSTERESIS 0x1edb
+#define regCP_CPC_BUSY_HYSTERESIS_BASE_IDX 0
+#define regCP_CPF_BUSY_HYSTERESIS1 0x1edc
+#define regCP_CPF_BUSY_HYSTERESIS1_BASE_IDX 0
+#define regCP_CPF_BUSY_HYSTERESIS2 0x1edd
+#define regCP_CPF_BUSY_HYSTERESIS2_BASE_IDX 0
+#define regCP_CPG_BUSY_HYSTERESIS1 0x1ede
+#define regCP_CPG_BUSY_HYSTERESIS1_BASE_IDX 0
+#define regCP_CPG_BUSY_HYSTERESIS2 0x1edf
+#define regCP_CPG_BUSY_HYSTERESIS2_BASE_IDX 0
+#define regCP_RB_DOORBELL_CLEAR 0x1f28
+#define regCP_RB_DOORBELL_CLEAR_BASE_IDX 0
+#define regCP_RB0_ACTIVE 0x1f40
+#define regCP_RB0_ACTIVE_BASE_IDX 0
+#define regCP_RB_ACTIVE 0x1f40
+#define regCP_RB_ACTIVE_BASE_IDX 0
+#define regCP_RB1_ACTIVE 0x1f41
+#define regCP_RB1_ACTIVE_BASE_IDX 0
+#define regCP_RB_STATUS 0x1f43
+#define regCP_RB_STATUS_BASE_IDX 0
+#define regCPG_RCIU_CAM_INDEX 0x1f44
+#define regCPG_RCIU_CAM_INDEX_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA 0x1f45
+#define regCPG_RCIU_CAM_DATA_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA_PHASE0 0x1f45
+#define regCPG_RCIU_CAM_DATA_PHASE0_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA_PHASE1 0x1f45
+#define regCPG_RCIU_CAM_DATA_PHASE1_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA_PHASE2 0x1f45
+#define regCPG_RCIU_CAM_DATA_PHASE2_BASE_IDX 0
+#define regCP_GPU_TIMESTAMP_OFFSET_LO 0x1f4c
+#define regCP_GPU_TIMESTAMP_OFFSET_LO_BASE_IDX 0
+#define regCP_GPU_TIMESTAMP_OFFSET_HI 0x1f4d
+#define regCP_GPU_TIMESTAMP_OFFSET_HI_BASE_IDX 0
+#define regCP_SDMA_DMA_DONE 0x1f4e
+#define regCP_SDMA_DMA_DONE_BASE_IDX 0
+#define regCP_PFP_SDMA_CS 0x1f4f
+#define regCP_PFP_SDMA_CS_BASE_IDX 0
+#define regCP_ME_SDMA_CS 0x1f50
+#define regCP_ME_SDMA_CS_BASE_IDX 0
+#define regCPF_GCR_CNTL 0x1f53
+#define regCPF_GCR_CNTL_BASE_IDX 0
+#define regCPG_UTCL1_STATUS 0x1f54
+#define regCPG_UTCL1_STATUS_BASE_IDX 0
+#define regCPC_UTCL1_STATUS 0x1f55
+#define regCPC_UTCL1_STATUS_BASE_IDX 0
+#define regCPF_UTCL1_STATUS 0x1f56
+#define regCPF_UTCL1_STATUS_BASE_IDX 0
+#define regCP_SD_CNTL 0x1f57
+#define regCP_SD_CNTL_BASE_IDX 0
+#define regCP_SOFT_RESET_CNTL 0x1f59
+#define regCP_SOFT_RESET_CNTL_BASE_IDX 0
+#define regCP_CPC_GFX_CNTL 0x1f5a
+#define regCP_CPC_GFX_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_spipdec
+// base address: 0xc700
+#define regSPI_ARB_PRIORITY 0x1f60
+#define regSPI_ARB_PRIORITY_BASE_IDX 0
+#define regSPI_ARB_CYCLES_0 0x1f61
+#define regSPI_ARB_CYCLES_0_BASE_IDX 0
+#define regSPI_ARB_CYCLES_1 0x1f62
+#define regSPI_ARB_CYCLES_1_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_GFX 0x1f67
+#define regSPI_WCL_PIPE_PERCENT_GFX_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_HP3D 0x1f68
+#define regSPI_WCL_PIPE_PERCENT_HP3D_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS0 0x1f69
+#define regSPI_WCL_PIPE_PERCENT_CS0_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS1 0x1f6a
+#define regSPI_WCL_PIPE_PERCENT_CS1_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS2 0x1f6b
+#define regSPI_WCL_PIPE_PERCENT_CS2_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS3 0x1f6c
+#define regSPI_WCL_PIPE_PERCENT_CS3_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS4 0x1f6d
+#define regSPI_WCL_PIPE_PERCENT_CS4_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS5 0x1f6e
+#define regSPI_WCL_PIPE_PERCENT_CS5_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS6 0x1f6f
+#define regSPI_WCL_PIPE_PERCENT_CS6_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS7 0x1f70
+#define regSPI_WCL_PIPE_PERCENT_CS7_BASE_IDX 0
+#define regSPI_USER_ACCUM_VMID_CNTL 0x1f71
+#define regSPI_USER_ACCUM_VMID_CNTL_BASE_IDX 0
+#define regSPI_GDBG_PER_VMID_CNTL 0x1f72
+#define regSPI_GDBG_PER_VMID_CNTL_BASE_IDX 0
+#define regSPI_COMPUTE_QUEUE_RESET 0x1f73
+#define regSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0
+#define regSPI_COMPUTE_WF_CTX_SAVE 0x1f74
+#define regSPI_COMPUTE_WF_CTX_SAVE_BASE_IDX 0
+
+
+// addressBlock: gc_cpphqddec
+// base address: 0xc800
+#define regCP_HPD_UTCL1_CNTL 0x1fa3
+#define regCP_HPD_UTCL1_CNTL_BASE_IDX 0
+#define regCP_HPD_UTCL1_ERROR 0x1fa7
+#define regCP_HPD_UTCL1_ERROR_BASE_IDX 0
+#define regCP_HPD_UTCL1_ERROR_ADDR 0x1fa8
+#define regCP_HPD_UTCL1_ERROR_ADDR_BASE_IDX 0
+#define regCP_MQD_BASE_ADDR 0x1fa9
+#define regCP_MQD_BASE_ADDR_BASE_IDX 0
+#define regCP_MQD_BASE_ADDR_HI 0x1faa
+#define regCP_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_ACTIVE 0x1fab
+#define regCP_HQD_ACTIVE_BASE_IDX 0
+#define regCP_HQD_VMID 0x1fac
+#define regCP_HQD_VMID_BASE_IDX 0
+#define regCP_HQD_PERSISTENT_STATE 0x1fad
+#define regCP_HQD_PERSISTENT_STATE_BASE_IDX 0
+#define regCP_HQD_PIPE_PRIORITY 0x1fae
+#define regCP_HQD_PIPE_PRIORITY_BASE_IDX 0
+#define regCP_HQD_QUEUE_PRIORITY 0x1faf
+#define regCP_HQD_QUEUE_PRIORITY_BASE_IDX 0
+#define regCP_HQD_QUANTUM 0x1fb0
+#define regCP_HQD_QUANTUM_BASE_IDX 0
+#define regCP_HQD_PQ_BASE 0x1fb1
+#define regCP_HQD_PQ_BASE_BASE_IDX 0
+#define regCP_HQD_PQ_BASE_HI 0x1fb2
+#define regCP_HQD_PQ_BASE_HI_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR 0x1fb3
+#define regCP_HQD_PQ_RPTR_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR 0x1fb4
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_HI 0x1fb5
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_POLL_ADDR 0x1fb6
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_HI 0x1fb7
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_PQ_DOORBELL_CONTROL 0x1fb8
+#define regCP_HQD_PQ_DOORBELL_CONTROL_BASE_IDX 0
+#define regCP_HQD_PQ_CONTROL 0x1fba
+#define regCP_HQD_PQ_CONTROL_BASE_IDX 0
+#define regCP_HQD_IB_BASE_ADDR 0x1fbb
+#define regCP_HQD_IB_BASE_ADDR_BASE_IDX 0
+#define regCP_HQD_IB_BASE_ADDR_HI 0x1fbc
+#define regCP_HQD_IB_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_IB_RPTR 0x1fbd
+#define regCP_HQD_IB_RPTR_BASE_IDX 0
+#define regCP_HQD_IB_CONTROL 0x1fbe
+#define regCP_HQD_IB_CONTROL_BASE_IDX 0
+#define regCP_HQD_IQ_TIMER 0x1fbf
+#define regCP_HQD_IQ_TIMER_BASE_IDX 0
+#define regCP_HQD_IQ_RPTR 0x1fc0
+#define regCP_HQD_IQ_RPTR_BASE_IDX 0
+#define regCP_HQD_DEQUEUE_REQUEST 0x1fc1
+#define regCP_HQD_DEQUEUE_REQUEST_BASE_IDX 0
+#define regCP_HQD_DMA_OFFLOAD 0x1fc2
+#define regCP_HQD_DMA_OFFLOAD_BASE_IDX 0
+#define regCP_HQD_OFFLOAD 0x1fc2
+#define regCP_HQD_OFFLOAD_BASE_IDX 0
+#define regCP_HQD_SEMA_CMD 0x1fc3
+#define regCP_HQD_SEMA_CMD_BASE_IDX 0
+#define regCP_HQD_MSG_TYPE 0x1fc4
+#define regCP_HQD_MSG_TYPE_BASE_IDX 0
+#define regCP_HQD_ATOMIC0_PREOP_LO 0x1fc5
+#define regCP_HQD_ATOMIC0_PREOP_LO_BASE_IDX 0
+#define regCP_HQD_ATOMIC0_PREOP_HI 0x1fc6
+#define regCP_HQD_ATOMIC0_PREOP_HI_BASE_IDX 0
+#define regCP_HQD_ATOMIC1_PREOP_LO 0x1fc7
+#define regCP_HQD_ATOMIC1_PREOP_LO_BASE_IDX 0
+#define regCP_HQD_ATOMIC1_PREOP_HI 0x1fc8
+#define regCP_HQD_ATOMIC1_PREOP_HI_BASE_IDX 0
+#define regCP_HQD_HQ_SCHEDULER0 0x1fc9
+#define regCP_HQD_HQ_SCHEDULER0_BASE_IDX 0
+#define regCP_HQD_HQ_STATUS0 0x1fc9
+#define regCP_HQD_HQ_STATUS0_BASE_IDX 0
+#define regCP_HQD_HQ_CONTROL0 0x1fca
+#define regCP_HQD_HQ_CONTROL0_BASE_IDX 0
+#define regCP_HQD_HQ_SCHEDULER1 0x1fca
+#define regCP_HQD_HQ_SCHEDULER1_BASE_IDX 0
+#define regCP_MQD_CONTROL 0x1fcb
+#define regCP_MQD_CONTROL_BASE_IDX 0
+#define regCP_HQD_HQ_STATUS1 0x1fcc
+#define regCP_HQD_HQ_STATUS1_BASE_IDX 0
+#define regCP_HQD_HQ_CONTROL1 0x1fcd
+#define regCP_HQD_HQ_CONTROL1_BASE_IDX 0
+#define regCP_HQD_EOP_BASE_ADDR 0x1fce
+#define regCP_HQD_EOP_BASE_ADDR_BASE_IDX 0
+#define regCP_HQD_EOP_BASE_ADDR_HI 0x1fcf
+#define regCP_HQD_EOP_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_EOP_CONTROL 0x1fd0
+#define regCP_HQD_EOP_CONTROL_BASE_IDX 0
+#define regCP_HQD_EOP_RPTR 0x1fd1
+#define regCP_HQD_EOP_RPTR_BASE_IDX 0
+#define regCP_HQD_EOP_WPTR 0x1fd2
+#define regCP_HQD_EOP_WPTR_BASE_IDX 0
+#define regCP_HQD_EOP_EVENTS 0x1fd3
+#define regCP_HQD_EOP_EVENTS_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_LO 0x1fd4
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_LO_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_HI 0x1fd5
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_CONTROL 0x1fd6
+#define regCP_HQD_CTX_SAVE_CONTROL_BASE_IDX 0
+#define regCP_HQD_CNTL_STACK_OFFSET 0x1fd7
+#define regCP_HQD_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCP_HQD_CNTL_STACK_SIZE 0x1fd8
+#define regCP_HQD_CNTL_STACK_SIZE_BASE_IDX 0
+#define regCP_HQD_WG_STATE_OFFSET 0x1fd9
+#define regCP_HQD_WG_STATE_OFFSET_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_SIZE 0x1fda
+#define regCP_HQD_CTX_SAVE_SIZE_BASE_IDX 0
+#define regCP_HQD_GDS_RESOURCE_STATE 0x1fdb
+#define regCP_HQD_GDS_RESOURCE_STATE_BASE_IDX 0
+#define regCP_HQD_ERROR 0x1fdc
+#define regCP_HQD_ERROR_BASE_IDX 0
+#define regCP_HQD_EOP_WPTR_MEM 0x1fdd
+#define regCP_HQD_EOP_WPTR_MEM_BASE_IDX 0
+#define regCP_HQD_AQL_CONTROL 0x1fde
+#define regCP_HQD_AQL_CONTROL_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_LO 0x1fdf
+#define regCP_HQD_PQ_WPTR_LO_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_HI 0x1fe0
+#define regCP_HQD_PQ_WPTR_HI_BASE_IDX 0
+#define regCP_HQD_SUSPEND_CNTL_STACK_OFFSET 0x1fe1
+#define regCP_HQD_SUSPEND_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT 0x1fe2
+#define regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT_BASE_IDX 0
+#define regCP_HQD_SUSPEND_WG_STATE_OFFSET 0x1fe3
+#define regCP_HQD_SUSPEND_WG_STATE_OFFSET_BASE_IDX 0
+#define regCP_HQD_DDID_RPTR 0x1fe4
+#define regCP_HQD_DDID_RPTR_BASE_IDX 0
+#define regCP_HQD_DDID_WPTR 0x1fe5
+#define regCP_HQD_DDID_WPTR_BASE_IDX 0
+#define regCP_HQD_DDID_INFLIGHT_COUNT 0x1fe6
+#define regCP_HQD_DDID_INFLIGHT_COUNT_BASE_IDX 0
+#define regCP_HQD_DDID_DELTA_RPT_COUNT 0x1fe7
+#define regCP_HQD_DDID_DELTA_RPT_COUNT_BASE_IDX 0
+#define regCP_HQD_DEQUEUE_STATUS 0x1fe8
+#define regCP_HQD_DEQUEUE_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_tcpdec
+// base address: 0xca80
+#define regTCP_WATCH0_ADDR_H 0x2048
+#define regTCP_WATCH0_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH0_ADDR_L 0x2049
+#define regTCP_WATCH0_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH0_CNTL 0x204a
+#define regTCP_WATCH0_CNTL_BASE_IDX 0
+#define regTCP_WATCH1_ADDR_H 0x204b
+#define regTCP_WATCH1_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH1_ADDR_L 0x204c
+#define regTCP_WATCH1_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH1_CNTL 0x204d
+#define regTCP_WATCH1_CNTL_BASE_IDX 0
+#define regTCP_WATCH2_ADDR_H 0x204e
+#define regTCP_WATCH2_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH2_ADDR_L 0x204f
+#define regTCP_WATCH2_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH2_CNTL 0x2050
+#define regTCP_WATCH2_CNTL_BASE_IDX 0
+#define regTCP_WATCH3_ADDR_H 0x2051
+#define regTCP_WATCH3_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH3_ADDR_L 0x2052
+#define regTCP_WATCH3_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH3_CNTL 0x2053
+#define regTCP_WATCH3_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_gdspdec
+// base address: 0xcc00
+#define regGDS_VMID0_BASE 0x20a0
+#define regGDS_VMID0_BASE_BASE_IDX 0
+#define regGDS_VMID0_SIZE 0x20a1
+#define regGDS_VMID0_SIZE_BASE_IDX 0
+#define regGDS_VMID1_BASE 0x20a2
+#define regGDS_VMID1_BASE_BASE_IDX 0
+#define regGDS_VMID1_SIZE 0x20a3
+#define regGDS_VMID1_SIZE_BASE_IDX 0
+#define regGDS_VMID2_BASE 0x20a4
+#define regGDS_VMID2_BASE_BASE_IDX 0
+#define regGDS_VMID2_SIZE 0x20a5
+#define regGDS_VMID2_SIZE_BASE_IDX 0
+#define regGDS_VMID3_BASE 0x20a6
+#define regGDS_VMID3_BASE_BASE_IDX 0
+#define regGDS_VMID3_SIZE 0x20a7
+#define regGDS_VMID3_SIZE_BASE_IDX 0
+#define regGDS_VMID4_BASE 0x20a8
+#define regGDS_VMID4_BASE_BASE_IDX 0
+#define regGDS_VMID4_SIZE 0x20a9
+#define regGDS_VMID4_SIZE_BASE_IDX 0
+#define regGDS_VMID5_BASE 0x20aa
+#define regGDS_VMID5_BASE_BASE_IDX 0
+#define regGDS_VMID5_SIZE 0x20ab
+#define regGDS_VMID5_SIZE_BASE_IDX 0
+#define regGDS_VMID6_BASE 0x20ac
+#define regGDS_VMID6_BASE_BASE_IDX 0
+#define regGDS_VMID6_SIZE 0x20ad
+#define regGDS_VMID6_SIZE_BASE_IDX 0
+#define regGDS_VMID7_BASE 0x20ae
+#define regGDS_VMID7_BASE_BASE_IDX 0
+#define regGDS_VMID7_SIZE 0x20af
+#define regGDS_VMID7_SIZE_BASE_IDX 0
+#define regGDS_VMID8_BASE 0x20b0
+#define regGDS_VMID8_BASE_BASE_IDX 0
+#define regGDS_VMID8_SIZE 0x20b1
+#define regGDS_VMID8_SIZE_BASE_IDX 0
+#define regGDS_VMID9_BASE 0x20b2
+#define regGDS_VMID9_BASE_BASE_IDX 0
+#define regGDS_VMID9_SIZE 0x20b3
+#define regGDS_VMID9_SIZE_BASE_IDX 0
+#define regGDS_VMID10_BASE 0x20b4
+#define regGDS_VMID10_BASE_BASE_IDX 0
+#define regGDS_VMID10_SIZE 0x20b5
+#define regGDS_VMID10_SIZE_BASE_IDX 0
+#define regGDS_VMID11_BASE 0x20b6
+#define regGDS_VMID11_BASE_BASE_IDX 0
+#define regGDS_VMID11_SIZE 0x20b7
+#define regGDS_VMID11_SIZE_BASE_IDX 0
+#define regGDS_VMID12_BASE 0x20b8
+#define regGDS_VMID12_BASE_BASE_IDX 0
+#define regGDS_VMID12_SIZE 0x20b9
+#define regGDS_VMID12_SIZE_BASE_IDX 0
+#define regGDS_VMID13_BASE 0x20ba
+#define regGDS_VMID13_BASE_BASE_IDX 0
+#define regGDS_VMID13_SIZE 0x20bb
+#define regGDS_VMID13_SIZE_BASE_IDX 0
+#define regGDS_VMID14_BASE 0x20bc
+#define regGDS_VMID14_BASE_BASE_IDX 0
+#define regGDS_VMID14_SIZE 0x20bd
+#define regGDS_VMID14_SIZE_BASE_IDX 0
+#define regGDS_VMID15_BASE 0x20be
+#define regGDS_VMID15_BASE_BASE_IDX 0
+#define regGDS_VMID15_SIZE 0x20bf
+#define regGDS_VMID15_SIZE_BASE_IDX 0
+#define regGDS_GWS_VMID0 0x20c0
+#define regGDS_GWS_VMID0_BASE_IDX 0
+#define regGDS_GWS_VMID1 0x20c1
+#define regGDS_GWS_VMID1_BASE_IDX 0
+#define regGDS_GWS_VMID2 0x20c2
+#define regGDS_GWS_VMID2_BASE_IDX 0
+#define regGDS_GWS_VMID3 0x20c3
+#define regGDS_GWS_VMID3_BASE_IDX 0
+#define regGDS_GWS_VMID4 0x20c4
+#define regGDS_GWS_VMID4_BASE_IDX 0
+#define regGDS_GWS_VMID5 0x20c5
+#define regGDS_GWS_VMID5_BASE_IDX 0
+#define regGDS_GWS_VMID6 0x20c6
+#define regGDS_GWS_VMID6_BASE_IDX 0
+#define regGDS_GWS_VMID7 0x20c7
+#define regGDS_GWS_VMID7_BASE_IDX 0
+#define regGDS_GWS_VMID8 0x20c8
+#define regGDS_GWS_VMID8_BASE_IDX 0
+#define regGDS_GWS_VMID9 0x20c9
+#define regGDS_GWS_VMID9_BASE_IDX 0
+#define regGDS_GWS_VMID10 0x20ca
+#define regGDS_GWS_VMID10_BASE_IDX 0
+#define regGDS_GWS_VMID11 0x20cb
+#define regGDS_GWS_VMID11_BASE_IDX 0
+#define regGDS_GWS_VMID12 0x20cc
+#define regGDS_GWS_VMID12_BASE_IDX 0
+#define regGDS_GWS_VMID13 0x20cd
+#define regGDS_GWS_VMID13_BASE_IDX 0
+#define regGDS_GWS_VMID14 0x20ce
+#define regGDS_GWS_VMID14_BASE_IDX 0
+#define regGDS_GWS_VMID15 0x20cf
+#define regGDS_GWS_VMID15_BASE_IDX 0
+#define regGDS_OA_VMID0 0x20d0
+#define regGDS_OA_VMID0_BASE_IDX 0
+#define regGDS_OA_VMID1 0x20d1
+#define regGDS_OA_VMID1_BASE_IDX 0
+#define regGDS_OA_VMID2 0x20d2
+#define regGDS_OA_VMID2_BASE_IDX 0
+#define regGDS_OA_VMID3 0x20d3
+#define regGDS_OA_VMID3_BASE_IDX 0
+#define regGDS_OA_VMID4 0x20d4
+#define regGDS_OA_VMID4_BASE_IDX 0
+#define regGDS_OA_VMID5 0x20d5
+#define regGDS_OA_VMID5_BASE_IDX 0
+#define regGDS_OA_VMID6 0x20d6
+#define regGDS_OA_VMID6_BASE_IDX 0
+#define regGDS_OA_VMID7 0x20d7
+#define regGDS_OA_VMID7_BASE_IDX 0
+#define regGDS_OA_VMID8 0x20d8
+#define regGDS_OA_VMID8_BASE_IDX 0
+#define regGDS_OA_VMID9 0x20d9
+#define regGDS_OA_VMID9_BASE_IDX 0
+#define regGDS_OA_VMID10 0x20da
+#define regGDS_OA_VMID10_BASE_IDX 0
+#define regGDS_OA_VMID11 0x20db
+#define regGDS_OA_VMID11_BASE_IDX 0
+#define regGDS_OA_VMID12 0x20dc
+#define regGDS_OA_VMID12_BASE_IDX 0
+#define regGDS_OA_VMID13 0x20dd
+#define regGDS_OA_VMID13_BASE_IDX 0
+#define regGDS_OA_VMID14 0x20de
+#define regGDS_OA_VMID14_BASE_IDX 0
+#define regGDS_OA_VMID15 0x20df
+#define regGDS_OA_VMID15_BASE_IDX 0
+#define regGDS_GWS_RESET0 0x20e4
+#define regGDS_GWS_RESET0_BASE_IDX 0
+#define regGDS_GWS_RESET1 0x20e5
+#define regGDS_GWS_RESET1_BASE_IDX 0
+#define regGDS_GWS_RESOURCE_RESET 0x20e6
+#define regGDS_GWS_RESOURCE_RESET_BASE_IDX 0
+#define regGDS_COMPUTE_MAX_WAVE_ID 0x20e8
+#define regGDS_COMPUTE_MAX_WAVE_ID_BASE_IDX 0
+#define regGDS_OA_RESET_MASK 0x20e9
+#define regGDS_OA_RESET_MASK_BASE_IDX 0
+#define regGDS_OA_RESET 0x20ea
+#define regGDS_OA_RESET_BASE_IDX 0
+#define regGDS_CS_CTXSW_STATUS 0x20ed
+#define regGDS_CS_CTXSW_STATUS_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT0 0x20ee
+#define regGDS_CS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT1 0x20ef
+#define regGDS_CS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT2 0x20f0
+#define regGDS_CS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT3 0x20f1
+#define regGDS_CS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_GFX_CTXSW_STATUS 0x20f2
+#define regGDS_GFX_CTXSW_STATUS_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT0 0x20f7
+#define regGDS_PS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT1 0x20f8
+#define regGDS_PS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT2 0x20f9
+#define regGDS_PS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT3 0x20fa
+#define regGDS_PS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS_CTXSW_IDX 0x20fb
+#define regGDS_PS_CTXSW_IDX_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT0 0x2117
+#define regGDS_GS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT1 0x2118
+#define regGDS_GS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT2 0x2119
+#define regGDS_GS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT3 0x211a
+#define regGDS_GS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_MEMORY_CLEAN 0x211f
+#define regGDS_MEMORY_CLEAN_BASE_IDX 0
+
+
+// addressBlock: gc_rasdec
+// base address: 0xce00
+#define regRAS_SIGNATURE_CONTROL 0x2120
+#define regRAS_SIGNATURE_CONTROL_BASE_IDX 0
+#define regRAS_SIGNATURE_MASK 0x2121
+#define regRAS_SIGNATURE_MASK_BASE_IDX 0
+#define regRAS_SX_SIGNATURE0 0x2122
+#define regRAS_SX_SIGNATURE0_BASE_IDX 0
+#define regRAS_SX_SIGNATURE1 0x2123
+#define regRAS_SX_SIGNATURE1_BASE_IDX 0
+#define regRAS_SX_SIGNATURE2 0x2124
+#define regRAS_SX_SIGNATURE2_BASE_IDX 0
+#define regRAS_SX_SIGNATURE3 0x2125
+#define regRAS_SX_SIGNATURE3_BASE_IDX 0
+#define regRAS_DB_SIGNATURE0 0x212b
+#define regRAS_DB_SIGNATURE0_BASE_IDX 0
+#define regRAS_PA_SIGNATURE0 0x212c
+#define regRAS_PA_SIGNATURE0_BASE_IDX 0
+#define regRAS_SC_SIGNATURE0 0x212f
+#define regRAS_SC_SIGNATURE0_BASE_IDX 0
+#define regRAS_SC_SIGNATURE1 0x2130
+#define regRAS_SC_SIGNATURE1_BASE_IDX 0
+#define regRAS_SC_SIGNATURE2 0x2131
+#define regRAS_SC_SIGNATURE2_BASE_IDX 0
+#define regRAS_SC_SIGNATURE3 0x2132
+#define regRAS_SC_SIGNATURE3_BASE_IDX 0
+#define regRAS_SC_SIGNATURE4 0x2133
+#define regRAS_SC_SIGNATURE4_BASE_IDX 0
+#define regRAS_SC_SIGNATURE5 0x2134
+#define regRAS_SC_SIGNATURE5_BASE_IDX 0
+#define regRAS_SC_SIGNATURE6 0x2135
+#define regRAS_SC_SIGNATURE6_BASE_IDX 0
+#define regRAS_SC_SIGNATURE7 0x2136
+#define regRAS_SC_SIGNATURE7_BASE_IDX 0
+#define regRAS_SPI_SIGNATURE0 0x2139
+#define regRAS_SPI_SIGNATURE0_BASE_IDX 0
+#define regRAS_SPI_SIGNATURE1 0x213a
+#define regRAS_SPI_SIGNATURE1_BASE_IDX 0
+#define regRAS_CB_SIGNATURE0 0x213d
+#define regRAS_CB_SIGNATURE0_BASE_IDX 0
+#define regRAS_BCI_SIGNATURE0 0x213e
+#define regRAS_BCI_SIGNATURE0_BASE_IDX 0
+#define regRAS_BCI_SIGNATURE1 0x213f
+#define regRAS_BCI_SIGNATURE1_BASE_IDX 0
+
+
+// addressBlock: gc_gusdec
+// base address: 0x33000
+#define regGUS_IO_RD_COMBINE_FLUSH 0x2c00
+#define regGUS_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define regGUS_IO_WR_COMBINE_FLUSH 0x2c01
+#define regGUS_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define regGUS_IO_RD_PRI_AGE_RATE 0x2c02
+#define regGUS_IO_RD_PRI_AGE_RATE_BASE_IDX 1
+#define regGUS_IO_WR_PRI_AGE_RATE 0x2c03
+#define regGUS_IO_WR_PRI_AGE_RATE_BASE_IDX 1
+#define regGUS_IO_RD_PRI_AGE_COEFF 0x2c04
+#define regGUS_IO_RD_PRI_AGE_COEFF_BASE_IDX 1
+#define regGUS_IO_WR_PRI_AGE_COEFF 0x2c05
+#define regGUS_IO_WR_PRI_AGE_COEFF_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUEUING 0x2c06
+#define regGUS_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUEUING 0x2c07
+#define regGUS_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define regGUS_IO_RD_PRI_FIXED 0x2c08
+#define regGUS_IO_RD_PRI_FIXED_BASE_IDX 1
+#define regGUS_IO_WR_PRI_FIXED 0x2c09
+#define regGUS_IO_WR_PRI_FIXED_BASE_IDX 1
+#define regGUS_IO_RD_PRI_URGENCY_COEFF 0x2c0a
+#define regGUS_IO_RD_PRI_URGENCY_COEFF_BASE_IDX 1
+#define regGUS_IO_WR_PRI_URGENCY_COEFF 0x2c0b
+#define regGUS_IO_WR_PRI_URGENCY_COEFF_BASE_IDX 1
+#define regGUS_IO_RD_PRI_URGENCY_MODE 0x2c0c
+#define regGUS_IO_RD_PRI_URGENCY_MODE_BASE_IDX 1
+#define regGUS_IO_WR_PRI_URGENCY_MODE 0x2c0d
+#define regGUS_IO_WR_PRI_URGENCY_MODE_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI1 0x2c0e
+#define regGUS_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI2 0x2c0f
+#define regGUS_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI3 0x2c10
+#define regGUS_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI4 0x2c11
+#define regGUS_IO_RD_PRI_QUANT_PRI4_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI1 0x2c12
+#define regGUS_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI2 0x2c13
+#define regGUS_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI3 0x2c14
+#define regGUS_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI4 0x2c15
+#define regGUS_IO_WR_PRI_QUANT_PRI4_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI1 0x2c16
+#define regGUS_IO_RD_PRI_QUANT1_PRI1_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI2 0x2c17
+#define regGUS_IO_RD_PRI_QUANT1_PRI2_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI3 0x2c18
+#define regGUS_IO_RD_PRI_QUANT1_PRI3_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI4 0x2c19
+#define regGUS_IO_RD_PRI_QUANT1_PRI4_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI1 0x2c1a
+#define regGUS_IO_WR_PRI_QUANT1_PRI1_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI2 0x2c1b
+#define regGUS_IO_WR_PRI_QUANT1_PRI2_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI3 0x2c1c
+#define regGUS_IO_WR_PRI_QUANT1_PRI3_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI4 0x2c1d
+#define regGUS_IO_WR_PRI_QUANT1_PRI4_BASE_IDX 1
+#define regGUS_DRAM_COMBINE_FLUSH 0x2c1e
+#define regGUS_DRAM_COMBINE_FLUSH_BASE_IDX 1
+#define regGUS_DRAM_COMBINE_RD_WR_EN 0x2c1f
+#define regGUS_DRAM_COMBINE_RD_WR_EN_BASE_IDX 1
+#define regGUS_DRAM_PRI_AGE_RATE 0x2c20
+#define regGUS_DRAM_PRI_AGE_RATE_BASE_IDX 1
+#define regGUS_DRAM_PRI_AGE_COEFF 0x2c21
+#define regGUS_DRAM_PRI_AGE_COEFF_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUEUING 0x2c22
+#define regGUS_DRAM_PRI_QUEUING_BASE_IDX 1
+#define regGUS_DRAM_PRI_FIXED 0x2c23
+#define regGUS_DRAM_PRI_FIXED_BASE_IDX 1
+#define regGUS_DRAM_PRI_URGENCY_COEFF 0x2c24
+#define regGUS_DRAM_PRI_URGENCY_COEFF_BASE_IDX 1
+#define regGUS_DRAM_PRI_URGENCY_MODE 0x2c25
+#define regGUS_DRAM_PRI_URGENCY_MODE_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI1 0x2c26
+#define regGUS_DRAM_PRI_QUANT_PRI1_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI2 0x2c27
+#define regGUS_DRAM_PRI_QUANT_PRI2_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI3 0x2c28
+#define regGUS_DRAM_PRI_QUANT_PRI3_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI4 0x2c29
+#define regGUS_DRAM_PRI_QUANT_PRI4_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI5 0x2c2a
+#define regGUS_DRAM_PRI_QUANT_PRI5_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI1 0x2c2b
+#define regGUS_DRAM_PRI_QUANT1_PRI1_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI2 0x2c2c
+#define regGUS_DRAM_PRI_QUANT1_PRI2_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI3 0x2c2d
+#define regGUS_DRAM_PRI_QUANT1_PRI3_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI4 0x2c2e
+#define regGUS_DRAM_PRI_QUANT1_PRI4_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI5 0x2c2f
+#define regGUS_DRAM_PRI_QUANT1_PRI5_BASE_IDX 1
+#define regGUS_IO_GROUP_BURST 0x2c30
+#define regGUS_IO_GROUP_BURST_BASE_IDX 1
+#define regGUS_DRAM_GROUP_BURST 0x2c31
+#define regGUS_DRAM_GROUP_BURST_BASE_IDX 1
+#define regGUS_SDP_ARB_FINAL 0x2c32
+#define regGUS_SDP_ARB_FINAL_BASE_IDX 1
+#define regGUS_SDP_QOS_VC_PRIORITY 0x2c33
+#define regGUS_SDP_QOS_VC_PRIORITY_BASE_IDX 1
+#define regGUS_SDP_CREDITS 0x2c34
+#define regGUS_SDP_CREDITS_BASE_IDX 1
+#define regGUS_SDP_TAG_RESERVE0 0x2c35
+#define regGUS_SDP_TAG_RESERVE0_BASE_IDX 1
+#define regGUS_SDP_TAG_RESERVE1 0x2c36
+#define regGUS_SDP_TAG_RESERVE1_BASE_IDX 1
+#define regGUS_SDP_VCC_RESERVE0 0x2c37
+#define regGUS_SDP_VCC_RESERVE0_BASE_IDX 1
+#define regGUS_SDP_VCC_RESERVE1 0x2c38
+#define regGUS_SDP_VCC_RESERVE1_BASE_IDX 1
+#define regGUS_SDP_VCD_RESERVE0 0x2c39
+#define regGUS_SDP_VCD_RESERVE0_BASE_IDX 1
+#define regGUS_SDP_VCD_RESERVE1 0x2c3a
+#define regGUS_SDP_VCD_RESERVE1_BASE_IDX 1
+#define regGUS_SDP_REQ_CNTL 0x2c3b
+#define regGUS_SDP_REQ_CNTL_BASE_IDX 1
+#define regGUS_MISC 0x2c3c
+#define regGUS_MISC_BASE_IDX 1
+#define regGUS_LATENCY_SAMPLING 0x2c3d
+#define regGUS_LATENCY_SAMPLING_BASE_IDX 1
+#define regGUS_ERR_STATUS 0x2c3e
+#define regGUS_ERR_STATUS_BASE_IDX 1
+#define regGUS_MISC2 0x2c3f
+#define regGUS_MISC2_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_CMDCREDITS0 0x2c40
+#define regGUS_SDP_BACKDOOR_CMDCREDITS0_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_CMDCREDITS1 0x2c41
+#define regGUS_SDP_BACKDOOR_CMDCREDITS1_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_DATACREDITS0 0x2c42
+#define regGUS_SDP_BACKDOOR_DATACREDITS0_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_DATACREDITS1 0x2c43
+#define regGUS_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_MISCCREDITS 0x2c44
+#define regGUS_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 1
+#define regGUS_SDP_ENABLE 0x2c45
+#define regGUS_SDP_ENABLE_BASE_IDX 1
+#define regGUS_L1_CH0_CMD_IN 0x2c46
+#define regGUS_L1_CH0_CMD_IN_BASE_IDX 1
+#define regGUS_L1_CH0_CMD_OUT 0x2c47
+#define regGUS_L1_CH0_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_IN 0x2c48
+#define regGUS_L1_CH0_DATA_IN_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_OUT 0x2c49
+#define regGUS_L1_CH0_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_U_IN 0x2c4a
+#define regGUS_L1_CH0_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_U_OUT 0x2c4b
+#define regGUS_L1_CH0_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_CH1_CMD_IN 0x2c4c
+#define regGUS_L1_CH1_CMD_IN_BASE_IDX 1
+#define regGUS_L1_CH1_CMD_OUT 0x2c4d
+#define regGUS_L1_CH1_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_IN 0x2c4e
+#define regGUS_L1_CH1_DATA_IN_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_OUT 0x2c4f
+#define regGUS_L1_CH1_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_U_IN 0x2c50
+#define regGUS_L1_CH1_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_U_OUT 0x2c51
+#define regGUS_L1_CH1_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA0_CMD_IN 0x2c52
+#define regGUS_L1_SA0_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA0_CMD_OUT 0x2c53
+#define regGUS_L1_SA0_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_IN 0x2c54
+#define regGUS_L1_SA0_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_OUT 0x2c55
+#define regGUS_L1_SA0_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_U_IN 0x2c56
+#define regGUS_L1_SA0_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_U_OUT 0x2c57
+#define regGUS_L1_SA0_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA1_CMD_IN 0x2c58
+#define regGUS_L1_SA1_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA1_CMD_OUT 0x2c59
+#define regGUS_L1_SA1_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_IN 0x2c5a
+#define regGUS_L1_SA1_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_OUT 0x2c5b
+#define regGUS_L1_SA1_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_U_IN 0x2c5c
+#define regGUS_L1_SA1_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_U_OUT 0x2c5d
+#define regGUS_L1_SA1_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA2_CMD_IN 0x2c5e
+#define regGUS_L1_SA2_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA2_CMD_OUT 0x2c5f
+#define regGUS_L1_SA2_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_IN 0x2c60
+#define regGUS_L1_SA2_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_OUT 0x2c61
+#define regGUS_L1_SA2_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_U_IN 0x2c62
+#define regGUS_L1_SA2_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_U_OUT 0x2c63
+#define regGUS_L1_SA2_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA3_CMD_IN 0x2c64
+#define regGUS_L1_SA3_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA3_CMD_OUT 0x2c65
+#define regGUS_L1_SA3_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_IN 0x2c66
+#define regGUS_L1_SA3_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_OUT 0x2c67
+#define regGUS_L1_SA3_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_U_IN 0x2c68
+#define regGUS_L1_SA3_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_U_OUT 0x2c69
+#define regGUS_L1_SA3_DATA_U_OUT_BASE_IDX 1
+#define regGUS_MISC3 0x2c6a
+#define regGUS_MISC3_BASE_IDX 1
+#define regGUS_WRRSP_FIFO_CNTL 0x2c6b
+#define regGUS_WRRSP_FIFO_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gfxdec0
+// base address: 0x28000
+#define regDB_RENDER_CONTROL 0x0000
+#define regDB_RENDER_CONTROL_BASE_IDX 1
+#define regDB_COUNT_CONTROL 0x0001
+#define regDB_COUNT_CONTROL_BASE_IDX 1
+#define regDB_DEPTH_VIEW 0x0002
+#define regDB_DEPTH_VIEW_BASE_IDX 1
+#define regDB_RENDER_OVERRIDE 0x0003
+#define regDB_RENDER_OVERRIDE_BASE_IDX 1
+#define regDB_RENDER_OVERRIDE2 0x0004
+#define regDB_RENDER_OVERRIDE2_BASE_IDX 1
+#define regDB_HTILE_DATA_BASE 0x0005
+#define regDB_HTILE_DATA_BASE_BASE_IDX 1
+#define regDB_DEPTH_SIZE_XY 0x0007
+#define regDB_DEPTH_SIZE_XY_BASE_IDX 1
+#define regDB_DEPTH_BOUNDS_MIN 0x0008
+#define regDB_DEPTH_BOUNDS_MIN_BASE_IDX 1
+#define regDB_DEPTH_BOUNDS_MAX 0x0009
+#define regDB_DEPTH_BOUNDS_MAX_BASE_IDX 1
+#define regDB_STENCIL_CLEAR 0x000a
+#define regDB_STENCIL_CLEAR_BASE_IDX 1
+#define regDB_DEPTH_CLEAR 0x000b
+#define regDB_DEPTH_CLEAR_BASE_IDX 1
+#define regPA_SC_SCREEN_SCISSOR_TL 0x000c
+#define regPA_SC_SCREEN_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_SCREEN_SCISSOR_BR 0x000d
+#define regPA_SC_SCREEN_SCISSOR_BR_BASE_IDX 1
+#define regDB_RESERVED_REG_2 0x000f
+#define regDB_RESERVED_REG_2_BASE_IDX 1
+#define regDB_Z_INFO 0x0010
+#define regDB_Z_INFO_BASE_IDX 1
+#define regDB_STENCIL_INFO 0x0011
+#define regDB_STENCIL_INFO_BASE_IDX 1
+#define regDB_Z_READ_BASE 0x0012
+#define regDB_Z_READ_BASE_BASE_IDX 1
+#define regDB_STENCIL_READ_BASE 0x0013
+#define regDB_STENCIL_READ_BASE_BASE_IDX 1
+#define regDB_Z_WRITE_BASE 0x0014
+#define regDB_Z_WRITE_BASE_BASE_IDX 1
+#define regDB_STENCIL_WRITE_BASE 0x0015
+#define regDB_STENCIL_WRITE_BASE_BASE_IDX 1
+#define regDB_RESERVED_REG_1 0x0016
+#define regDB_RESERVED_REG_1_BASE_IDX 1
+#define regDB_RESERVED_REG_3 0x0017
+#define regDB_RESERVED_REG_3_BASE_IDX 1
+#define regDB_Z_READ_BASE_HI 0x001a
+#define regDB_Z_READ_BASE_HI_BASE_IDX 1
+#define regDB_STENCIL_READ_BASE_HI 0x001b
+#define regDB_STENCIL_READ_BASE_HI_BASE_IDX 1
+#define regDB_Z_WRITE_BASE_HI 0x001c
+#define regDB_Z_WRITE_BASE_HI_BASE_IDX 1
+#define regDB_STENCIL_WRITE_BASE_HI 0x001d
+#define regDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
+#define regDB_HTILE_DATA_BASE_HI 0x001e
+#define regDB_HTILE_DATA_BASE_HI_BASE_IDX 1
+#define regDB_RMI_L2_CACHE_CONTROL 0x001f
+#define regDB_RMI_L2_CACHE_CONTROL_BASE_IDX 1
+#define regTA_BC_BASE_ADDR 0x0020
+#define regTA_BC_BASE_ADDR_BASE_IDX 1
+#define regTA_BC_BASE_ADDR_HI 0x0021
+#define regTA_BC_BASE_ADDR_HI_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_0 0x007a
+#define regCOHER_DEST_BASE_HI_0_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_1 0x007b
+#define regCOHER_DEST_BASE_HI_1_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_2 0x007c
+#define regCOHER_DEST_BASE_HI_2_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_3 0x007d
+#define regCOHER_DEST_BASE_HI_3_BASE_IDX 1
+#define regCOHER_DEST_BASE_2 0x007e
+#define regCOHER_DEST_BASE_2_BASE_IDX 1
+#define regCOHER_DEST_BASE_3 0x007f
+#define regCOHER_DEST_BASE_3_BASE_IDX 1
+#define regPA_SC_WINDOW_OFFSET 0x0080
+#define regPA_SC_WINDOW_OFFSET_BASE_IDX 1
+#define regPA_SC_WINDOW_SCISSOR_TL 0x0081
+#define regPA_SC_WINDOW_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_WINDOW_SCISSOR_BR 0x0082
+#define regPA_SC_WINDOW_SCISSOR_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_RULE 0x0083
+#define regPA_SC_CLIPRECT_RULE_BASE_IDX 1
+#define regPA_SC_CLIPRECT_0_TL 0x0084
+#define regPA_SC_CLIPRECT_0_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_0_BR 0x0085
+#define regPA_SC_CLIPRECT_0_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_1_TL 0x0086
+#define regPA_SC_CLIPRECT_1_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_1_BR 0x0087
+#define regPA_SC_CLIPRECT_1_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_2_TL 0x0088
+#define regPA_SC_CLIPRECT_2_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_2_BR 0x0089
+#define regPA_SC_CLIPRECT_2_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_3_TL 0x008a
+#define regPA_SC_CLIPRECT_3_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_3_BR 0x008b
+#define regPA_SC_CLIPRECT_3_BR_BASE_IDX 1
+#define regPA_SC_EDGERULE 0x008c
+#define regPA_SC_EDGERULE_BASE_IDX 1
+#define regPA_SU_HARDWARE_SCREEN_OFFSET 0x008d
+#define regPA_SU_HARDWARE_SCREEN_OFFSET_BASE_IDX 1
+#define regCB_TARGET_MASK 0x008e
+#define regCB_TARGET_MASK_BASE_IDX 1
+#define regCB_SHADER_MASK 0x008f
+#define regCB_SHADER_MASK_BASE_IDX 1
+#define regPA_SC_GENERIC_SCISSOR_TL 0x0090
+#define regPA_SC_GENERIC_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_GENERIC_SCISSOR_BR 0x0091
+#define regPA_SC_GENERIC_SCISSOR_BR_BASE_IDX 1
+#define regCOHER_DEST_BASE_0 0x0092
+#define regCOHER_DEST_BASE_0_BASE_IDX 1
+#define regCOHER_DEST_BASE_1 0x0093
+#define regCOHER_DEST_BASE_1_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_0_TL 0x0094
+#define regPA_SC_VPORT_SCISSOR_0_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_0_BR 0x0095
+#define regPA_SC_VPORT_SCISSOR_0_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_1_TL 0x0096
+#define regPA_SC_VPORT_SCISSOR_1_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_1_BR 0x0097
+#define regPA_SC_VPORT_SCISSOR_1_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_2_TL 0x0098
+#define regPA_SC_VPORT_SCISSOR_2_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_2_BR 0x0099
+#define regPA_SC_VPORT_SCISSOR_2_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_3_TL 0x009a
+#define regPA_SC_VPORT_SCISSOR_3_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_3_BR 0x009b
+#define regPA_SC_VPORT_SCISSOR_3_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_4_TL 0x009c
+#define regPA_SC_VPORT_SCISSOR_4_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_4_BR 0x009d
+#define regPA_SC_VPORT_SCISSOR_4_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_5_TL 0x009e
+#define regPA_SC_VPORT_SCISSOR_5_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_5_BR 0x009f
+#define regPA_SC_VPORT_SCISSOR_5_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_6_TL 0x00a0
+#define regPA_SC_VPORT_SCISSOR_6_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_6_BR 0x00a1
+#define regPA_SC_VPORT_SCISSOR_6_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_7_TL 0x00a2
+#define regPA_SC_VPORT_SCISSOR_7_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_7_BR 0x00a3
+#define regPA_SC_VPORT_SCISSOR_7_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_8_TL 0x00a4
+#define regPA_SC_VPORT_SCISSOR_8_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_8_BR 0x00a5
+#define regPA_SC_VPORT_SCISSOR_8_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_9_TL 0x00a6
+#define regPA_SC_VPORT_SCISSOR_9_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_9_BR 0x00a7
+#define regPA_SC_VPORT_SCISSOR_9_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_10_TL 0x00a8
+#define regPA_SC_VPORT_SCISSOR_10_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_10_BR 0x00a9
+#define regPA_SC_VPORT_SCISSOR_10_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_11_TL 0x00aa
+#define regPA_SC_VPORT_SCISSOR_11_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_11_BR 0x00ab
+#define regPA_SC_VPORT_SCISSOR_11_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_12_TL 0x00ac
+#define regPA_SC_VPORT_SCISSOR_12_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_12_BR 0x00ad
+#define regPA_SC_VPORT_SCISSOR_12_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_13_TL 0x00ae
+#define regPA_SC_VPORT_SCISSOR_13_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_13_BR 0x00af
+#define regPA_SC_VPORT_SCISSOR_13_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_14_TL 0x00b0
+#define regPA_SC_VPORT_SCISSOR_14_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_14_BR 0x00b1
+#define regPA_SC_VPORT_SCISSOR_14_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_15_TL 0x00b2
+#define regPA_SC_VPORT_SCISSOR_15_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_15_BR 0x00b3
+#define regPA_SC_VPORT_SCISSOR_15_BR_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_0 0x00b4
+#define regPA_SC_VPORT_ZMIN_0_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_0 0x00b5
+#define regPA_SC_VPORT_ZMAX_0_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_1 0x00b6
+#define regPA_SC_VPORT_ZMIN_1_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_1 0x00b7
+#define regPA_SC_VPORT_ZMAX_1_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_2 0x00b8
+#define regPA_SC_VPORT_ZMIN_2_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_2 0x00b9
+#define regPA_SC_VPORT_ZMAX_2_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_3 0x00ba
+#define regPA_SC_VPORT_ZMIN_3_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_3 0x00bb
+#define regPA_SC_VPORT_ZMAX_3_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_4 0x00bc
+#define regPA_SC_VPORT_ZMIN_4_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_4 0x00bd
+#define regPA_SC_VPORT_ZMAX_4_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_5 0x00be
+#define regPA_SC_VPORT_ZMIN_5_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_5 0x00bf
+#define regPA_SC_VPORT_ZMAX_5_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_6 0x00c0
+#define regPA_SC_VPORT_ZMIN_6_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_6 0x00c1
+#define regPA_SC_VPORT_ZMAX_6_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_7 0x00c2
+#define regPA_SC_VPORT_ZMIN_7_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_7 0x00c3
+#define regPA_SC_VPORT_ZMAX_7_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_8 0x00c4
+#define regPA_SC_VPORT_ZMIN_8_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_8 0x00c5
+#define regPA_SC_VPORT_ZMAX_8_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_9 0x00c6
+#define regPA_SC_VPORT_ZMIN_9_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_9 0x00c7
+#define regPA_SC_VPORT_ZMAX_9_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_10 0x00c8
+#define regPA_SC_VPORT_ZMIN_10_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_10 0x00c9
+#define regPA_SC_VPORT_ZMAX_10_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_11 0x00ca
+#define regPA_SC_VPORT_ZMIN_11_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_11 0x00cb
+#define regPA_SC_VPORT_ZMAX_11_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_12 0x00cc
+#define regPA_SC_VPORT_ZMIN_12_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_12 0x00cd
+#define regPA_SC_VPORT_ZMAX_12_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_13 0x00ce
+#define regPA_SC_VPORT_ZMIN_13_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_13 0x00cf
+#define regPA_SC_VPORT_ZMAX_13_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_14 0x00d0
+#define regPA_SC_VPORT_ZMIN_14_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_14 0x00d1
+#define regPA_SC_VPORT_ZMAX_14_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_15 0x00d2
+#define regPA_SC_VPORT_ZMIN_15_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_15 0x00d3
+#define regPA_SC_VPORT_ZMAX_15_BASE_IDX 1
+#define regPA_SC_RASTER_CONFIG 0x00d4
+#define regPA_SC_RASTER_CONFIG_BASE_IDX 1
+#define regPA_SC_RASTER_CONFIG_1 0x00d5
+#define regPA_SC_RASTER_CONFIG_1_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_CONTROL 0x00d6
+#define regPA_SC_SCREEN_EXTENT_CONTROL_BASE_IDX 1
+#define regPA_SC_TILE_STEERING_OVERRIDE 0x00d7
+#define regPA_SC_TILE_STEERING_OVERRIDE_BASE_IDX 1
+#define regCP_PERFMON_CNTX_CNTL 0x00d8
+#define regCP_PERFMON_CNTX_CNTL_BASE_IDX 1
+#define regCP_PIPEID 0x00d9
+#define regCP_PIPEID_BASE_IDX 1
+#define regCP_RINGID 0x00d9
+#define regCP_RINGID_BASE_IDX 1
+#define regCP_VMID 0x00da
+#define regCP_VMID_BASE_IDX 1
+#define regCONTEXT_RESERVED_REG0 0x00db
+#define regCONTEXT_RESERVED_REG0_BASE_IDX 1
+#define regCONTEXT_RESERVED_REG1 0x00dc
+#define regCONTEXT_RESERVED_REG1_BASE_IDX 1
+#define regPA_SC_VRS_OVERRIDE_CNTL 0x00f4
+#define regPA_SC_VRS_OVERRIDE_CNTL_BASE_IDX 1
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE 0x00f5
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE_BASE_IDX 1
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE_EXT 0x00f6
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE_EXT_BASE_IDX 1
+#define regPA_SC_VRS_RATE_FEEDBACK_SIZE_XY 0x00f7
+#define regPA_SC_VRS_RATE_FEEDBACK_SIZE_XY_BASE_IDX 1
+#define regPA_SC_VRS_RATE_CACHE_CNTL 0x00f9
+#define regPA_SC_VRS_RATE_CACHE_CNTL_BASE_IDX 1
+#define regPA_SC_VRS_RATE_BASE 0x00fc
+#define regPA_SC_VRS_RATE_BASE_BASE_IDX 1
+#define regPA_SC_VRS_RATE_BASE_EXT 0x00fd
+#define regPA_SC_VRS_RATE_BASE_EXT_BASE_IDX 1
+#define regPA_SC_VRS_RATE_SIZE_XY 0x00fe
+#define regPA_SC_VRS_RATE_SIZE_XY_BASE_IDX 1
+#define regVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
+#define regVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
+#define regCB_RMI_GL2_CACHE_CONTROL 0x0104
+#define regCB_RMI_GL2_CACHE_CONTROL_BASE_IDX 1
+#define regCB_BLEND_RED 0x0105
+#define regCB_BLEND_RED_BASE_IDX 1
+#define regCB_BLEND_GREEN 0x0106
+#define regCB_BLEND_GREEN_BASE_IDX 1
+#define regCB_BLEND_BLUE 0x0107
+#define regCB_BLEND_BLUE_BASE_IDX 1
+#define regCB_BLEND_ALPHA 0x0108
+#define regCB_BLEND_ALPHA_BASE_IDX 1
+#define regCB_FDCC_CONTROL 0x0109
+#define regCB_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COVERAGE_OUT_CONTROL 0x010a
+#define regCB_COVERAGE_OUT_CONTROL_BASE_IDX 1
+#define regDB_STENCIL_CONTROL 0x010b
+#define regDB_STENCIL_CONTROL_BASE_IDX 1
+#define regDB_STENCILREFMASK 0x010c
+#define regDB_STENCILREFMASK_BASE_IDX 1
+#define regDB_STENCILREFMASK_BF 0x010d
+#define regDB_STENCILREFMASK_BF_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE 0x010f
+#define regPA_CL_VPORT_XSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET 0x0110
+#define regPA_CL_VPORT_XOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE 0x0111
+#define regPA_CL_VPORT_YSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET 0x0112
+#define regPA_CL_VPORT_YOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE 0x0113
+#define regPA_CL_VPORT_ZSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET 0x0114
+#define regPA_CL_VPORT_ZOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_1 0x0115
+#define regPA_CL_VPORT_XSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_1 0x0116
+#define regPA_CL_VPORT_XOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_1 0x0117
+#define regPA_CL_VPORT_YSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_1 0x0118
+#define regPA_CL_VPORT_YOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_1 0x0119
+#define regPA_CL_VPORT_ZSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_1 0x011a
+#define regPA_CL_VPORT_ZOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_2 0x011b
+#define regPA_CL_VPORT_XSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_2 0x011c
+#define regPA_CL_VPORT_XOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_2 0x011d
+#define regPA_CL_VPORT_YSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_2 0x011e
+#define regPA_CL_VPORT_YOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_2 0x011f
+#define regPA_CL_VPORT_ZSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_2 0x0120
+#define regPA_CL_VPORT_ZOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_3 0x0121
+#define regPA_CL_VPORT_XSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_3 0x0122
+#define regPA_CL_VPORT_XOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_3 0x0123
+#define regPA_CL_VPORT_YSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_3 0x0124
+#define regPA_CL_VPORT_YOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_3 0x0125
+#define regPA_CL_VPORT_ZSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_3 0x0126
+#define regPA_CL_VPORT_ZOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_4 0x0127
+#define regPA_CL_VPORT_XSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_4 0x0128
+#define regPA_CL_VPORT_XOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_4 0x0129
+#define regPA_CL_VPORT_YSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_4 0x012a
+#define regPA_CL_VPORT_YOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_4 0x012b
+#define regPA_CL_VPORT_ZSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_4 0x012c
+#define regPA_CL_VPORT_ZOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_5 0x012d
+#define regPA_CL_VPORT_XSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_5 0x012e
+#define regPA_CL_VPORT_XOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_5 0x012f
+#define regPA_CL_VPORT_YSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_5 0x0130
+#define regPA_CL_VPORT_YOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_5 0x0131
+#define regPA_CL_VPORT_ZSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_5 0x0132
+#define regPA_CL_VPORT_ZOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_6 0x0133
+#define regPA_CL_VPORT_XSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_6 0x0134
+#define regPA_CL_VPORT_XOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_6 0x0135
+#define regPA_CL_VPORT_YSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_6 0x0136
+#define regPA_CL_VPORT_YOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_6 0x0137
+#define regPA_CL_VPORT_ZSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_6 0x0138
+#define regPA_CL_VPORT_ZOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_7 0x0139
+#define regPA_CL_VPORT_XSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_7 0x013a
+#define regPA_CL_VPORT_XOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_7 0x013b
+#define regPA_CL_VPORT_YSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_7 0x013c
+#define regPA_CL_VPORT_YOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_7 0x013d
+#define regPA_CL_VPORT_ZSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_7 0x013e
+#define regPA_CL_VPORT_ZOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_8 0x013f
+#define regPA_CL_VPORT_XSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_8 0x0140
+#define regPA_CL_VPORT_XOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_8 0x0141
+#define regPA_CL_VPORT_YSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_8 0x0142
+#define regPA_CL_VPORT_YOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_8 0x0143
+#define regPA_CL_VPORT_ZSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_8 0x0144
+#define regPA_CL_VPORT_ZOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_9 0x0145
+#define regPA_CL_VPORT_XSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_9 0x0146
+#define regPA_CL_VPORT_XOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_9 0x0147
+#define regPA_CL_VPORT_YSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_9 0x0148
+#define regPA_CL_VPORT_YOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_9 0x0149
+#define regPA_CL_VPORT_ZSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_9 0x014a
+#define regPA_CL_VPORT_ZOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_10 0x014b
+#define regPA_CL_VPORT_XSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_10 0x014c
+#define regPA_CL_VPORT_XOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_10 0x014d
+#define regPA_CL_VPORT_YSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_10 0x014e
+#define regPA_CL_VPORT_YOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_10 0x014f
+#define regPA_CL_VPORT_ZSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_10 0x0150
+#define regPA_CL_VPORT_ZOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_11 0x0151
+#define regPA_CL_VPORT_XSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_11 0x0152
+#define regPA_CL_VPORT_XOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_11 0x0153
+#define regPA_CL_VPORT_YSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_11 0x0154
+#define regPA_CL_VPORT_YOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_11 0x0155
+#define regPA_CL_VPORT_ZSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_11 0x0156
+#define regPA_CL_VPORT_ZOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_12 0x0157
+#define regPA_CL_VPORT_XSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_12 0x0158
+#define regPA_CL_VPORT_XOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_12 0x0159
+#define regPA_CL_VPORT_YSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_12 0x015a
+#define regPA_CL_VPORT_YOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_12 0x015b
+#define regPA_CL_VPORT_ZSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_12 0x015c
+#define regPA_CL_VPORT_ZOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_13 0x015d
+#define regPA_CL_VPORT_XSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_13 0x015e
+#define regPA_CL_VPORT_XOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_13 0x015f
+#define regPA_CL_VPORT_YSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_13 0x0160
+#define regPA_CL_VPORT_YOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_13 0x0161
+#define regPA_CL_VPORT_ZSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_13 0x0162
+#define regPA_CL_VPORT_ZOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_14 0x0163
+#define regPA_CL_VPORT_XSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_14 0x0164
+#define regPA_CL_VPORT_XOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_14 0x0165
+#define regPA_CL_VPORT_YSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_14 0x0166
+#define regPA_CL_VPORT_YOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_14 0x0167
+#define regPA_CL_VPORT_ZSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_14 0x0168
+#define regPA_CL_VPORT_ZOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_15 0x0169
+#define regPA_CL_VPORT_XSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_15 0x016a
+#define regPA_CL_VPORT_XOFFSET_15_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_15 0x016b
+#define regPA_CL_VPORT_YSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_15 0x016c
+#define regPA_CL_VPORT_YOFFSET_15_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_15 0x016d
+#define regPA_CL_VPORT_ZSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_15 0x016e
+#define regPA_CL_VPORT_ZOFFSET_15_BASE_IDX 1
+#define regPA_CL_UCP_0_X 0x016f
+#define regPA_CL_UCP_0_X_BASE_IDX 1
+#define regPA_CL_UCP_0_Y 0x0170
+#define regPA_CL_UCP_0_Y_BASE_IDX 1
+#define regPA_CL_UCP_0_Z 0x0171
+#define regPA_CL_UCP_0_Z_BASE_IDX 1
+#define regPA_CL_UCP_0_W 0x0172
+#define regPA_CL_UCP_0_W_BASE_IDX 1
+#define regPA_CL_UCP_1_X 0x0173
+#define regPA_CL_UCP_1_X_BASE_IDX 1
+#define regPA_CL_UCP_1_Y 0x0174
+#define regPA_CL_UCP_1_Y_BASE_IDX 1
+#define regPA_CL_UCP_1_Z 0x0175
+#define regPA_CL_UCP_1_Z_BASE_IDX 1
+#define regPA_CL_UCP_1_W 0x0176
+#define regPA_CL_UCP_1_W_BASE_IDX 1
+#define regPA_CL_UCP_2_X 0x0177
+#define regPA_CL_UCP_2_X_BASE_IDX 1
+#define regPA_CL_UCP_2_Y 0x0178
+#define regPA_CL_UCP_2_Y_BASE_IDX 1
+#define regPA_CL_UCP_2_Z 0x0179
+#define regPA_CL_UCP_2_Z_BASE_IDX 1
+#define regPA_CL_UCP_2_W 0x017a
+#define regPA_CL_UCP_2_W_BASE_IDX 1
+#define regPA_CL_UCP_3_X 0x017b
+#define regPA_CL_UCP_3_X_BASE_IDX 1
+#define regPA_CL_UCP_3_Y 0x017c
+#define regPA_CL_UCP_3_Y_BASE_IDX 1
+#define regPA_CL_UCP_3_Z 0x017d
+#define regPA_CL_UCP_3_Z_BASE_IDX 1
+#define regPA_CL_UCP_3_W 0x017e
+#define regPA_CL_UCP_3_W_BASE_IDX 1
+#define regPA_CL_UCP_4_X 0x017f
+#define regPA_CL_UCP_4_X_BASE_IDX 1
+#define regPA_CL_UCP_4_Y 0x0180
+#define regPA_CL_UCP_4_Y_BASE_IDX 1
+#define regPA_CL_UCP_4_Z 0x0181
+#define regPA_CL_UCP_4_Z_BASE_IDX 1
+#define regPA_CL_UCP_4_W 0x0182
+#define regPA_CL_UCP_4_W_BASE_IDX 1
+#define regPA_CL_UCP_5_X 0x0183
+#define regPA_CL_UCP_5_X_BASE_IDX 1
+#define regPA_CL_UCP_5_Y 0x0184
+#define regPA_CL_UCP_5_Y_BASE_IDX 1
+#define regPA_CL_UCP_5_Z 0x0185
+#define regPA_CL_UCP_5_Z_BASE_IDX 1
+#define regPA_CL_UCP_5_W 0x0186
+#define regPA_CL_UCP_5_W_BASE_IDX 1
+#define regPA_CL_PROG_NEAR_CLIP_Z 0x0187
+#define regPA_CL_PROG_NEAR_CLIP_Z_BASE_IDX 1
+#define regPA_RATE_CNTL 0x0188
+#define regPA_RATE_CNTL_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_0 0x0191
+#define regSPI_PS_INPUT_CNTL_0_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_1 0x0192
+#define regSPI_PS_INPUT_CNTL_1_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_2 0x0193
+#define regSPI_PS_INPUT_CNTL_2_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_3 0x0194
+#define regSPI_PS_INPUT_CNTL_3_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_4 0x0195
+#define regSPI_PS_INPUT_CNTL_4_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_5 0x0196
+#define regSPI_PS_INPUT_CNTL_5_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_6 0x0197
+#define regSPI_PS_INPUT_CNTL_6_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_7 0x0198
+#define regSPI_PS_INPUT_CNTL_7_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_8 0x0199
+#define regSPI_PS_INPUT_CNTL_8_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_9 0x019a
+#define regSPI_PS_INPUT_CNTL_9_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_10 0x019b
+#define regSPI_PS_INPUT_CNTL_10_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_11 0x019c
+#define regSPI_PS_INPUT_CNTL_11_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_12 0x019d
+#define regSPI_PS_INPUT_CNTL_12_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_13 0x019e
+#define regSPI_PS_INPUT_CNTL_13_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_14 0x019f
+#define regSPI_PS_INPUT_CNTL_14_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_15 0x01a0
+#define regSPI_PS_INPUT_CNTL_15_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_16 0x01a1
+#define regSPI_PS_INPUT_CNTL_16_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_17 0x01a2
+#define regSPI_PS_INPUT_CNTL_17_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_18 0x01a3
+#define regSPI_PS_INPUT_CNTL_18_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_19 0x01a4
+#define regSPI_PS_INPUT_CNTL_19_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_20 0x01a5
+#define regSPI_PS_INPUT_CNTL_20_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_21 0x01a6
+#define regSPI_PS_INPUT_CNTL_21_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_22 0x01a7
+#define regSPI_PS_INPUT_CNTL_22_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_23 0x01a8
+#define regSPI_PS_INPUT_CNTL_23_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_24 0x01a9
+#define regSPI_PS_INPUT_CNTL_24_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_25 0x01aa
+#define regSPI_PS_INPUT_CNTL_25_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_26 0x01ab
+#define regSPI_PS_INPUT_CNTL_26_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_27 0x01ac
+#define regSPI_PS_INPUT_CNTL_27_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_28 0x01ad
+#define regSPI_PS_INPUT_CNTL_28_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_29 0x01ae
+#define regSPI_PS_INPUT_CNTL_29_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_30 0x01af
+#define regSPI_PS_INPUT_CNTL_30_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_31 0x01b0
+#define regSPI_PS_INPUT_CNTL_31_BASE_IDX 1
+#define regSPI_VS_OUT_CONFIG 0x01b1
+#define regSPI_VS_OUT_CONFIG_BASE_IDX 1
+#define regSPI_PS_INPUT_ENA 0x01b3
+#define regSPI_PS_INPUT_ENA_BASE_IDX 1
+#define regSPI_PS_INPUT_ADDR 0x01b4
+#define regSPI_PS_INPUT_ADDR_BASE_IDX 1
+#define regSPI_INTERP_CONTROL_0 0x01b5
+#define regSPI_INTERP_CONTROL_0_BASE_IDX 1
+#define regSPI_PS_IN_CONTROL 0x01b6
+#define regSPI_PS_IN_CONTROL_BASE_IDX 1
+#define regSPI_BARYC_CNTL 0x01b8
+#define regSPI_BARYC_CNTL_BASE_IDX 1
+#define regSPI_TMPRING_SIZE 0x01ba
+#define regSPI_TMPRING_SIZE_BASE_IDX 1
+#define regSPI_GFX_SCRATCH_BASE_LO 0x01bb
+#define regSPI_GFX_SCRATCH_BASE_LO_BASE_IDX 1
+#define regSPI_GFX_SCRATCH_BASE_HI 0x01bc
+#define regSPI_GFX_SCRATCH_BASE_HI_BASE_IDX 1
+#define regSPI_SHADER_IDX_FORMAT 0x01c2
+#define regSPI_SHADER_IDX_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_POS_FORMAT 0x01c3
+#define regSPI_SHADER_POS_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_Z_FORMAT 0x01c4
+#define regSPI_SHADER_Z_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_COL_FORMAT 0x01c5
+#define regSPI_SHADER_COL_FORMAT_BASE_IDX 1
+#define regSX_PS_DOWNCONVERT_CONTROL 0x01d4
+#define regSX_PS_DOWNCONVERT_CONTROL_BASE_IDX 1
+#define regSX_PS_DOWNCONVERT 0x01d5
+#define regSX_PS_DOWNCONVERT_BASE_IDX 1
+#define regSX_BLEND_OPT_EPSILON 0x01d6
+#define regSX_BLEND_OPT_EPSILON_BASE_IDX 1
+#define regSX_BLEND_OPT_CONTROL 0x01d7
+#define regSX_BLEND_OPT_CONTROL_BASE_IDX 1
+#define regSX_MRT0_BLEND_OPT 0x01d8
+#define regSX_MRT0_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT1_BLEND_OPT 0x01d9
+#define regSX_MRT1_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT2_BLEND_OPT 0x01da
+#define regSX_MRT2_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT3_BLEND_OPT 0x01db
+#define regSX_MRT3_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT4_BLEND_OPT 0x01dc
+#define regSX_MRT4_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT5_BLEND_OPT 0x01dd
+#define regSX_MRT5_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT6_BLEND_OPT 0x01de
+#define regSX_MRT6_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT7_BLEND_OPT 0x01df
+#define regSX_MRT7_BLEND_OPT_BASE_IDX 1
+#define regCB_BLEND0_CONTROL 0x01e0
+#define regCB_BLEND0_CONTROL_BASE_IDX 1
+#define regCB_BLEND1_CONTROL 0x01e1
+#define regCB_BLEND1_CONTROL_BASE_IDX 1
+#define regCB_BLEND2_CONTROL 0x01e2
+#define regCB_BLEND2_CONTROL_BASE_IDX 1
+#define regCB_BLEND3_CONTROL 0x01e3
+#define regCB_BLEND3_CONTROL_BASE_IDX 1
+#define regCB_BLEND4_CONTROL 0x01e4
+#define regCB_BLEND4_CONTROL_BASE_IDX 1
+#define regCB_BLEND5_CONTROL 0x01e5
+#define regCB_BLEND5_CONTROL_BASE_IDX 1
+#define regCB_BLEND6_CONTROL 0x01e6
+#define regCB_BLEND6_CONTROL_BASE_IDX 1
+#define regCB_BLEND7_CONTROL 0x01e7
+#define regCB_BLEND7_CONTROL_BASE_IDX 1
+#define regGFX_COPY_STATE 0x01f4
+#define regGFX_COPY_STATE_BASE_IDX 1
+#define regPA_CL_POINT_X_RAD 0x01f5
+#define regPA_CL_POINT_X_RAD_BASE_IDX 1
+#define regPA_CL_POINT_Y_RAD 0x01f6
+#define regPA_CL_POINT_Y_RAD_BASE_IDX 1
+#define regPA_CL_POINT_SIZE 0x01f7
+#define regPA_CL_POINT_SIZE_BASE_IDX 1
+#define regPA_CL_POINT_CULL_RAD 0x01f8
+#define regPA_CL_POINT_CULL_RAD_BASE_IDX 1
+#define regVGT_DMA_BASE_HI 0x01f9
+#define regVGT_DMA_BASE_HI_BASE_IDX 1
+#define regVGT_DMA_BASE 0x01fa
+#define regVGT_DMA_BASE_BASE_IDX 1
+#define regVGT_DRAW_INITIATOR 0x01fc
+#define regVGT_DRAW_INITIATOR_BASE_IDX 1
+#define regVGT_EVENT_ADDRESS_REG 0x01fe
+#define regVGT_EVENT_ADDRESS_REG_BASE_IDX 1
+#define regGE_MAX_OUTPUT_PER_SUBGROUP 0x01ff
+#define regGE_MAX_OUTPUT_PER_SUBGROUP_BASE_IDX 1
+#define regDB_DEPTH_CONTROL 0x0200
+#define regDB_DEPTH_CONTROL_BASE_IDX 1
+#define regDB_EQAA 0x0201
+#define regDB_EQAA_BASE_IDX 1
+#define regCB_COLOR_CONTROL 0x0202
+#define regCB_COLOR_CONTROL_BASE_IDX 1
+#define regDB_SHADER_CONTROL 0x0203
+#define regDB_SHADER_CONTROL_BASE_IDX 1
+#define regPA_CL_CLIP_CNTL 0x0204
+#define regPA_CL_CLIP_CNTL_BASE_IDX 1
+#define regPA_SU_SC_MODE_CNTL 0x0205
+#define regPA_SU_SC_MODE_CNTL_BASE_IDX 1
+#define regPA_CL_VTE_CNTL 0x0206
+#define regPA_CL_VTE_CNTL_BASE_IDX 1
+#define regPA_CL_VS_OUT_CNTL 0x0207
+#define regPA_CL_VS_OUT_CNTL_BASE_IDX 1
+#define regPA_CL_NANINF_CNTL 0x0208
+#define regPA_CL_NANINF_CNTL_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_CNTL 0x0209
+#define regPA_SU_LINE_STIPPLE_CNTL_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_SCALE 0x020a
+#define regPA_SU_LINE_STIPPLE_SCALE_BASE_IDX 1
+#define regPA_SU_PRIM_FILTER_CNTL 0x020b
+#define regPA_SU_PRIM_FILTER_CNTL_BASE_IDX 1
+#define regPA_SU_SMALL_PRIM_FILTER_CNTL 0x020c
+#define regPA_SU_SMALL_PRIM_FILTER_CNTL_BASE_IDX 1
+#define regPA_CL_NGG_CNTL 0x020e
+#define regPA_CL_NGG_CNTL_BASE_IDX 1
+#define regPA_SU_OVER_RASTERIZATION_CNTL 0x020f
+#define regPA_SU_OVER_RASTERIZATION_CNTL_BASE_IDX 1
+#define regPA_STEREO_CNTL 0x0210
+#define regPA_STEREO_CNTL_BASE_IDX 1
+#define regPA_STATE_STEREO_X 0x0211
+#define regPA_STATE_STEREO_X_BASE_IDX 1
+#define regPA_CL_VRS_CNTL 0x0212
+#define regPA_CL_VRS_CNTL_BASE_IDX 1
+#define regPA_SU_POINT_SIZE 0x0280
+#define regPA_SU_POINT_SIZE_BASE_IDX 1
+#define regPA_SU_POINT_MINMAX 0x0281
+#define regPA_SU_POINT_MINMAX_BASE_IDX 1
+#define regPA_SU_LINE_CNTL 0x0282
+#define regPA_SU_LINE_CNTL_BASE_IDX 1
+#define regPA_SC_LINE_STIPPLE 0x0283
+#define regPA_SC_LINE_STIPPLE_BASE_IDX 1
+#define regVGT_HOS_MAX_TESS_LEVEL 0x0286
+#define regVGT_HOS_MAX_TESS_LEVEL_BASE_IDX 1
+#define regVGT_HOS_MIN_TESS_LEVEL 0x0287
+#define regVGT_HOS_MIN_TESS_LEVEL_BASE_IDX 1
+#define regPA_SC_MODE_CNTL_0 0x0292
+#define regPA_SC_MODE_CNTL_0_BASE_IDX 1
+#define regPA_SC_MODE_CNTL_1 0x0293
+#define regPA_SC_MODE_CNTL_1_BASE_IDX 1
+#define regVGT_ENHANCE 0x0294
+#define regVGT_ENHANCE_BASE_IDX 1
+#define regIA_ENHANCE 0x029c
+#define regIA_ENHANCE_BASE_IDX 1
+#define regVGT_DMA_SIZE 0x029d
+#define regVGT_DMA_SIZE_BASE_IDX 1
+#define regVGT_DMA_MAX_SIZE 0x029e
+#define regVGT_DMA_MAX_SIZE_BASE_IDX 1
+#define regVGT_DMA_INDEX_TYPE 0x029f
+#define regVGT_DMA_INDEX_TYPE_BASE_IDX 1
+#define regWD_ENHANCE 0x02a0
+#define regWD_ENHANCE_BASE_IDX 1
+#define regVGT_PRIMITIVEID_EN 0x02a1
+#define regVGT_PRIMITIVEID_EN_BASE_IDX 1
+#define regVGT_DMA_NUM_INSTANCES 0x02a2
+#define regVGT_DMA_NUM_INSTANCES_BASE_IDX 1
+#define regVGT_PRIMITIVEID_RESET 0x02a3
+#define regVGT_PRIMITIVEID_RESET_BASE_IDX 1
+#define regVGT_EVENT_INITIATOR 0x02a4
+#define regVGT_EVENT_INITIATOR_BASE_IDX 1
+#define regVGT_DRAW_PAYLOAD_CNTL 0x02a6
+#define regVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
+#define regVGT_ESGS_RING_ITEMSIZE 0x02ab
+#define regVGT_ESGS_RING_ITEMSIZE_BASE_IDX 1
+#define regVGT_REUSE_OFF 0x02ad
+#define regVGT_REUSE_OFF_BASE_IDX 1
+#define regDB_HTILE_SURFACE 0x02af
+#define regDB_HTILE_SURFACE_BASE_IDX 1
+#define regDB_SRESULTS_COMPARE_STATE0 0x02b0
+#define regDB_SRESULTS_COMPARE_STATE0_BASE_IDX 1
+#define regDB_SRESULTS_COMPARE_STATE1 0x02b1
+#define regDB_SRESULTS_COMPARE_STATE1_BASE_IDX 1
+#define regDB_PRELOAD_CONTROL 0x02b2
+#define regDB_PRELOAD_CONTROL_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_OFFSET 0x02ca
+#define regVGT_STRMOUT_DRAW_OPAQUE_OFFSET_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE 0x02cb
+#define regVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE 0x02cc
+#define regVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE_BASE_IDX 1
+#define regVGT_GS_MAX_VERT_OUT 0x02ce
+#define regVGT_GS_MAX_VERT_OUT_BASE_IDX 1
+#define regGE_NGG_SUBGRP_CNTL 0x02d3
+#define regGE_NGG_SUBGRP_CNTL_BASE_IDX 1
+#define regVGT_TESS_DISTRIBUTION 0x02d4
+#define regVGT_TESS_DISTRIBUTION_BASE_IDX 1
+#define regVGT_SHADER_STAGES_EN 0x02d5
+#define regVGT_SHADER_STAGES_EN_BASE_IDX 1
+#define regVGT_LS_HS_CONFIG 0x02d6
+#define regVGT_LS_HS_CONFIG_BASE_IDX 1
+#define regVGT_TF_PARAM 0x02db
+#define regVGT_TF_PARAM_BASE_IDX 1
+#define regDB_ALPHA_TO_MASK 0x02dc
+#define regDB_ALPHA_TO_MASK_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_DB_FMT_CNTL 0x02de
+#define regPA_SU_POLY_OFFSET_DB_FMT_CNTL_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_CLAMP 0x02df
+#define regPA_SU_POLY_OFFSET_CLAMP_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_FRONT_SCALE 0x02e0
+#define regPA_SU_POLY_OFFSET_FRONT_SCALE_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_FRONT_OFFSET 0x02e1
+#define regPA_SU_POLY_OFFSET_FRONT_OFFSET_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_BACK_SCALE 0x02e2
+#define regPA_SU_POLY_OFFSET_BACK_SCALE_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_BACK_OFFSET 0x02e3
+#define regPA_SU_POLY_OFFSET_BACK_OFFSET_BASE_IDX 1
+#define regVGT_GS_INSTANCE_CNT 0x02e4
+#define regVGT_GS_INSTANCE_CNT_BASE_IDX 1
+#define regPA_SC_CENTROID_PRIORITY_0 0x02f5
+#define regPA_SC_CENTROID_PRIORITY_0_BASE_IDX 1
+#define regPA_SC_CENTROID_PRIORITY_1 0x02f6
+#define regPA_SC_CENTROID_PRIORITY_1_BASE_IDX 1
+#define regPA_SC_LINE_CNTL 0x02f7
+#define regPA_SC_LINE_CNTL_BASE_IDX 1
+#define regPA_SC_AA_CONFIG 0x02f8
+#define regPA_SC_AA_CONFIG_BASE_IDX 1
+#define regPA_SU_VTX_CNTL 0x02f9
+#define regPA_SU_VTX_CNTL_BASE_IDX 1
+#define regPA_CL_GB_VERT_CLIP_ADJ 0x02fa
+#define regPA_CL_GB_VERT_CLIP_ADJ_BASE_IDX 1
+#define regPA_CL_GB_VERT_DISC_ADJ 0x02fb
+#define regPA_CL_GB_VERT_DISC_ADJ_BASE_IDX 1
+#define regPA_CL_GB_HORZ_CLIP_ADJ 0x02fc
+#define regPA_CL_GB_HORZ_CLIP_ADJ_BASE_IDX 1
+#define regPA_CL_GB_HORZ_DISC_ADJ 0x02fd
+#define regPA_CL_GB_HORZ_DISC_ADJ_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 0x02fe
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 0x02ff
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 0x0300
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 0x0301
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 0x0302
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 0x0303
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 0x0304
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 0x0305
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 0x0306
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 0x0307
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 0x0308
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 0x0309
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 0x030a
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 0x030b
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 0x030c
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 0x030d
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3_BASE_IDX 1
+#define regPA_SC_AA_MASK_X0Y0_X1Y0 0x030e
+#define regPA_SC_AA_MASK_X0Y0_X1Y0_BASE_IDX 1
+#define regPA_SC_AA_MASK_X0Y1_X1Y1 0x030f
+#define regPA_SC_AA_MASK_X0Y1_X1Y1_BASE_IDX 1
+#define regPA_SC_SHADER_CONTROL 0x0310
+#define regPA_SC_SHADER_CONTROL_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_0 0x0311
+#define regPA_SC_BINNER_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_1 0x0312
+#define regPA_SC_BINNER_CNTL_1_BASE_IDX 1
+#define regPA_SC_CONSERVATIVE_RASTERIZATION_CNTL 0x0313
+#define regPA_SC_CONSERVATIVE_RASTERIZATION_CNTL_BASE_IDX 1
+#define regPA_SC_NGG_MODE_CNTL 0x0314
+#define regPA_SC_NGG_MODE_CNTL_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_2 0x0315
+#define regPA_SC_BINNER_CNTL_2_BASE_IDX 1
+#define regCB_COLOR0_BASE 0x0318
+#define regCB_COLOR0_BASE_BASE_IDX 1
+#define regCB_COLOR0_VIEW 0x031b
+#define regCB_COLOR0_VIEW_BASE_IDX 1
+#define regCB_COLOR0_INFO 0x031c
+#define regCB_COLOR0_INFO_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB 0x031d
+#define regCB_COLOR0_ATTRIB_BASE_IDX 1
+#define regCB_COLOR0_FDCC_CONTROL 0x031e
+#define regCB_COLOR0_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR0_DCC_BASE 0x0325
+#define regCB_COLOR0_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR1_BASE 0x0327
+#define regCB_COLOR1_BASE_BASE_IDX 1
+#define regCB_COLOR1_VIEW 0x032a
+#define regCB_COLOR1_VIEW_BASE_IDX 1
+#define regCB_COLOR1_INFO 0x032b
+#define regCB_COLOR1_INFO_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB 0x032c
+#define regCB_COLOR1_ATTRIB_BASE_IDX 1
+#define regCB_COLOR1_FDCC_CONTROL 0x032d
+#define regCB_COLOR1_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR1_DCC_BASE 0x0334
+#define regCB_COLOR1_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR2_BASE 0x0336
+#define regCB_COLOR2_BASE_BASE_IDX 1
+#define regCB_COLOR2_VIEW 0x0339
+#define regCB_COLOR2_VIEW_BASE_IDX 1
+#define regCB_COLOR2_INFO 0x033a
+#define regCB_COLOR2_INFO_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB 0x033b
+#define regCB_COLOR2_ATTRIB_BASE_IDX 1
+#define regCB_COLOR2_FDCC_CONTROL 0x033c
+#define regCB_COLOR2_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR2_DCC_BASE 0x0343
+#define regCB_COLOR2_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR3_BASE 0x0345
+#define regCB_COLOR3_BASE_BASE_IDX 1
+#define regCB_COLOR3_VIEW 0x0348
+#define regCB_COLOR3_VIEW_BASE_IDX 1
+#define regCB_COLOR3_INFO 0x0349
+#define regCB_COLOR3_INFO_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB 0x034a
+#define regCB_COLOR3_ATTRIB_BASE_IDX 1
+#define regCB_COLOR3_FDCC_CONTROL 0x034b
+#define regCB_COLOR3_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR3_DCC_BASE 0x0352
+#define regCB_COLOR3_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR4_BASE 0x0354
+#define regCB_COLOR4_BASE_BASE_IDX 1
+#define regCB_COLOR4_VIEW 0x0357
+#define regCB_COLOR4_VIEW_BASE_IDX 1
+#define regCB_COLOR4_INFO 0x0358
+#define regCB_COLOR4_INFO_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB 0x0359
+#define regCB_COLOR4_ATTRIB_BASE_IDX 1
+#define regCB_COLOR4_FDCC_CONTROL 0x035a
+#define regCB_COLOR4_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR4_DCC_BASE 0x0361
+#define regCB_COLOR4_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR5_BASE 0x0363
+#define regCB_COLOR5_BASE_BASE_IDX 1
+#define regCB_COLOR5_VIEW 0x0366
+#define regCB_COLOR5_VIEW_BASE_IDX 1
+#define regCB_COLOR5_INFO 0x0367
+#define regCB_COLOR5_INFO_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB 0x0368
+#define regCB_COLOR5_ATTRIB_BASE_IDX 1
+#define regCB_COLOR5_FDCC_CONTROL 0x0369
+#define regCB_COLOR5_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR5_DCC_BASE 0x0370
+#define regCB_COLOR5_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR6_BASE 0x0372
+#define regCB_COLOR6_BASE_BASE_IDX 1
+#define regCB_COLOR6_VIEW 0x0375
+#define regCB_COLOR6_VIEW_BASE_IDX 1
+#define regCB_COLOR6_INFO 0x0376
+#define regCB_COLOR6_INFO_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB 0x0377
+#define regCB_COLOR6_ATTRIB_BASE_IDX 1
+#define regCB_COLOR6_FDCC_CONTROL 0x0378
+#define regCB_COLOR6_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR6_DCC_BASE 0x037f
+#define regCB_COLOR6_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR7_BASE 0x0381
+#define regCB_COLOR7_BASE_BASE_IDX 1
+#define regCB_COLOR7_VIEW 0x0384
+#define regCB_COLOR7_VIEW_BASE_IDX 1
+#define regCB_COLOR7_INFO 0x0385
+#define regCB_COLOR7_INFO_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB 0x0386
+#define regCB_COLOR7_ATTRIB_BASE_IDX 1
+#define regCB_COLOR7_FDCC_CONTROL 0x0387
+#define regCB_COLOR7_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR7_DCC_BASE 0x038e
+#define regCB_COLOR7_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR0_BASE_EXT 0x0390
+#define regCB_COLOR0_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_BASE_EXT 0x0391
+#define regCB_COLOR1_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_BASE_EXT 0x0392
+#define regCB_COLOR2_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_BASE_EXT 0x0393
+#define regCB_COLOR3_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_BASE_EXT 0x0394
+#define regCB_COLOR4_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_BASE_EXT 0x0395
+#define regCB_COLOR5_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_BASE_EXT 0x0396
+#define regCB_COLOR6_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_BASE_EXT 0x0397
+#define regCB_COLOR7_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR0_DCC_BASE_EXT 0x03a8
+#define regCB_COLOR0_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_DCC_BASE_EXT 0x03a9
+#define regCB_COLOR1_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_DCC_BASE_EXT 0x03aa
+#define regCB_COLOR2_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_DCC_BASE_EXT 0x03ab
+#define regCB_COLOR3_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_DCC_BASE_EXT 0x03ac
+#define regCB_COLOR4_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_DCC_BASE_EXT 0x03ad
+#define regCB_COLOR5_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_DCC_BASE_EXT 0x03ae
+#define regCB_COLOR6_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_DCC_BASE_EXT 0x03af
+#define regCB_COLOR7_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB2 0x03b0
+#define regCB_COLOR0_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB2 0x03b1
+#define regCB_COLOR1_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB2 0x03b2
+#define regCB_COLOR2_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB2 0x03b3
+#define regCB_COLOR3_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB2 0x03b4
+#define regCB_COLOR4_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB2 0x03b5
+#define regCB_COLOR5_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB2 0x03b6
+#define regCB_COLOR6_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB2 0x03b7
+#define regCB_COLOR7_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB3 0x03b8
+#define regCB_COLOR0_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB3 0x03b9
+#define regCB_COLOR1_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB3 0x03ba
+#define regCB_COLOR2_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB3 0x03bb
+#define regCB_COLOR3_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB3 0x03bc
+#define regCB_COLOR4_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB3 0x03bd
+#define regCB_COLOR5_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB3 0x03be
+#define regCB_COLOR6_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB3 0x03bf
+#define regCB_COLOR7_ATTRIB3_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_cpdec
+// base address: 0x2a000
+#define regCONFIG_RESERVED_REG0 0x0800
+#define regCONFIG_RESERVED_REG0_BASE_IDX 1
+#define regCONFIG_RESERVED_REG1 0x0801
+#define regCONFIG_RESERVED_REG1_BASE_IDX 1
+#define regCP_MEC_CNTL 0x0802
+#define regCP_MEC_CNTL_BASE_IDX 1
+#define regCP_ME_CNTL 0x0803
+#define regCP_ME_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_grbmdec
+// base address: 0x2a400
+#define regGRBM_GFX_CNTL 0x0900
+#define regGRBM_GFX_CNTL_BASE_IDX 1
+#define regGRBM_NOWHERE 0x0901
+#define regGRBM_NOWHERE_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_padec
+// base address: 0x2a500
+#define regPA_SC_VRS_SURFACE_CNTL 0x0940
+#define regPA_SC_VRS_SURFACE_CNTL_BASE_IDX 1
+#define regPA_SC_ENHANCE 0x0941
+#define regPA_SC_ENHANCE_BASE_IDX 1
+#define regPA_SC_ENHANCE_1 0x0942
+#define regPA_SC_ENHANCE_1_BASE_IDX 1
+#define regPA_SC_ENHANCE_2 0x0943
+#define regPA_SC_ENHANCE_2_BASE_IDX 1
+#define regPA_SC_ENHANCE_3 0x0944
+#define regPA_SC_ENHANCE_3_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_OVERRIDE 0x0946
+#define regPA_SC_BINNER_CNTL_OVERRIDE_BASE_IDX 1
+#define regPA_SC_PBB_OVERRIDE_FLAG 0x0947
+#define regPA_SC_PBB_OVERRIDE_FLAG_BASE_IDX 1
+#define regPA_SC_DSM_CNTL 0x0948
+#define regPA_SC_DSM_CNTL_BASE_IDX 1
+#define regPA_SC_TILE_STEERING_CREST_OVERRIDE 0x0949
+#define regPA_SC_TILE_STEERING_CREST_OVERRIDE_BASE_IDX 1
+#define regPA_SC_FIFO_SIZE 0x094a
+#define regPA_SC_FIFO_SIZE_BASE_IDX 1
+#define regPA_SC_IF_FIFO_SIZE 0x094b
+#define regPA_SC_IF_FIFO_SIZE_BASE_IDX 1
+#define regPA_SC_PACKER_WAVE_ID_CNTL 0x094c
+#define regPA_SC_PACKER_WAVE_ID_CNTL_BASE_IDX 1
+#define regPA_SC_ATM_CNTL 0x094d
+#define regPA_SC_ATM_CNTL_BASE_IDX 1
+#define regPA_SC_PKR_WAVE_TABLE_CNTL 0x094e
+#define regPA_SC_PKR_WAVE_TABLE_CNTL_BASE_IDX 1
+#define regPA_SC_FORCE_EOV_MAX_CNTS 0x094f
+#define regPA_SC_FORCE_EOV_MAX_CNTS_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_0 0x0950
+#define regPA_SC_BINNER_EVENT_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_1 0x0951
+#define regPA_SC_BINNER_EVENT_CNTL_1_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_2 0x0952
+#define regPA_SC_BINNER_EVENT_CNTL_2_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_3 0x0953
+#define regPA_SC_BINNER_EVENT_CNTL_3_BASE_IDX 1
+#define regPA_SC_BINNER_TIMEOUT_COUNTER 0x0954
+#define regPA_SC_BINNER_TIMEOUT_COUNTER_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_0 0x0955
+#define regPA_SC_BINNER_PERF_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_1 0x0956
+#define regPA_SC_BINNER_PERF_CNTL_1_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_2 0x0957
+#define regPA_SC_BINNER_PERF_CNTL_2_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_3 0x0958
+#define regPA_SC_BINNER_PERF_CNTL_3_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_HV_LOCK 0x095b
+#define regPA_SC_P3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_LOCK 0x095c
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_HV_LOCK 0x095d
+#define regPA_SC_TRAP_SCREEN_HV_LOCK_BASE_IDX 1
+#define regPA_PH_INTERFACE_FIFO_SIZE 0x095e
+#define regPA_PH_INTERFACE_FIFO_SIZE_BASE_IDX 1
+#define regPA_PH_ENHANCE 0x095f
+#define regPA_PH_ENHANCE_BASE_IDX 1
+#define regPA_SC_VRS_SURFACE_CNTL_1 0x0960
+#define regPA_SC_VRS_SURFACE_CNTL_1_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_sqdec
+// base address: 0x2a780
+#define regSQ_RUNTIME_CONFIG 0x09e0
+#define regSQ_RUNTIME_CONFIG_BASE_IDX 1
+#define regSQ_DEBUG_STS_GLOBAL 0x09e1
+#define regSQ_DEBUG_STS_GLOBAL_BASE_IDX 1
+#define regSQ_DEBUG_STS_GLOBAL2 0x09e2
+#define regSQ_DEBUG_STS_GLOBAL2_BASE_IDX 1
+#define regSH_MEM_BASES 0x09e3
+#define regSH_MEM_BASES_BASE_IDX 1
+#define regSH_MEM_CONFIG 0x09e4
+#define regSH_MEM_CONFIG_BASE_IDX 1
+#define regSQ_DEBUG 0x09e5
+#define regSQ_DEBUG_BASE_IDX 1
+#define regSQ_SHADER_TBA_LO 0x09e6
+#define regSQ_SHADER_TBA_LO_BASE_IDX 1
+#define regSQ_SHADER_TBA_HI 0x09e7
+#define regSQ_SHADER_TBA_HI_BASE_IDX 1
+#define regSQ_SHADER_TMA_LO 0x09e8
+#define regSQ_SHADER_TMA_LO_BASE_IDX 1
+#define regSQ_SHADER_TMA_HI 0x09e9
+#define regSQ_SHADER_TMA_HI_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_cpdec
+// base address: 0x2e000
+#define regCP_DEBUG_2 0x1800
+#define regCP_DEBUG_2_BASE_IDX 1
+#define regCP_FETCHER_SOURCE 0x1801
+#define regCP_FETCHER_SOURCE_BASE_IDX 1
+#define regCP_DFY_CNTL 0x1804
+#define regCP_DFY_CNTL_BASE_IDX 1
+#define regCP_DFY_STAT 0x1805
+#define regCP_DFY_STAT_BASE_IDX 1
+#define regCP_DFY_ADDR_HI 0x1806
+#define regCP_DFY_ADDR_HI_BASE_IDX 1
+#define regCP_DFY_ADDR_LO 0x1807
+#define regCP_DFY_ADDR_LO_BASE_IDX 1
+#define regCP_DFY_DATA_0 0x1808
+#define regCP_DFY_DATA_0_BASE_IDX 1
+#define regCP_DFY_DATA_1 0x1809
+#define regCP_DFY_DATA_1_BASE_IDX 1
+#define regCP_DFY_DATA_2 0x180a
+#define regCP_DFY_DATA_2_BASE_IDX 1
+#define regCP_DFY_DATA_3 0x180b
+#define regCP_DFY_DATA_3_BASE_IDX 1
+#define regCP_DFY_DATA_4 0x180c
+#define regCP_DFY_DATA_4_BASE_IDX 1
+#define regCP_DFY_DATA_5 0x180d
+#define regCP_DFY_DATA_5_BASE_IDX 1
+#define regCP_DFY_DATA_6 0x180e
+#define regCP_DFY_DATA_6_BASE_IDX 1
+#define regCP_DFY_DATA_7 0x180f
+#define regCP_DFY_DATA_7_BASE_IDX 1
+#define regCP_DFY_DATA_8 0x1810
+#define regCP_DFY_DATA_8_BASE_IDX 1
+#define regCP_DFY_DATA_9 0x1811
+#define regCP_DFY_DATA_9_BASE_IDX 1
+#define regCP_DFY_DATA_10 0x1812
+#define regCP_DFY_DATA_10_BASE_IDX 1
+#define regCP_DFY_DATA_11 0x1813
+#define regCP_DFY_DATA_11_BASE_IDX 1
+#define regCP_DFY_DATA_12 0x1814
+#define regCP_DFY_DATA_12_BASE_IDX 1
+#define regCP_DFY_DATA_13 0x1815
+#define regCP_DFY_DATA_13_BASE_IDX 1
+#define regCP_DFY_DATA_14 0x1816
+#define regCP_DFY_DATA_14_BASE_IDX 1
+#define regCP_DFY_DATA_15 0x1817
+#define regCP_DFY_DATA_15_BASE_IDX 1
+#define regCP_DFY_CMD 0x1818
+#define regCP_DFY_CMD_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_cpphqddec
+// base address: 0x2e080
+#define regCP_HPD_MES_ROQ_OFFSETS 0x1821
+#define regCP_HPD_MES_ROQ_OFFSETS_BASE_IDX 1
+#define regCP_HPD_ROQ_OFFSETS 0x1821
+#define regCP_HPD_ROQ_OFFSETS_BASE_IDX 1
+#define regCP_HPD_STATUS0 0x1822
+#define regCP_HPD_STATUS0_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_didtdec
+// base address: 0x2e400
+#define regDIDT_INDEX_AUTO_INCR_EN 0x1900
+#define regDIDT_INDEX_AUTO_INCR_EN_BASE_IDX 1
+#define regDIDT_EDC_CTRL 0x1901
+#define regDIDT_EDC_CTRL_BASE_IDX 1
+#define regDIDT_EDC_THROTTLE_CTRL 0x1902
+#define regDIDT_EDC_THROTTLE_CTRL_BASE_IDX 1
+#define regDIDT_EDC_THRESHOLD 0x1903
+#define regDIDT_EDC_THRESHOLD_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_1_2 0x1904
+#define regDIDT_EDC_STALL_PATTERN_1_2_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_3_4 0x1905
+#define regDIDT_EDC_STALL_PATTERN_3_4_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_5_6 0x1906
+#define regDIDT_EDC_STALL_PATTERN_5_6_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_7 0x1907
+#define regDIDT_EDC_STALL_PATTERN_7_BASE_IDX 1
+#define regDIDT_EDC_STATUS 0x1908
+#define regDIDT_EDC_STATUS_BASE_IDX 1
+#define regDIDT_EDC_DYNAMIC_THRESHOLD_RO 0x1909
+#define regDIDT_EDC_DYNAMIC_THRESHOLD_RO_BASE_IDX 1
+#define regDIDT_EDC_OVERFLOW 0x190a
+#define regDIDT_EDC_OVERFLOW_BASE_IDX 1
+#define regDIDT_EDC_ROLLING_POWER_DELTA 0x190b
+#define regDIDT_EDC_ROLLING_POWER_DELTA_BASE_IDX 1
+#define regDIDT_IND_INDEX 0x190c
+#define regDIDT_IND_INDEX_BASE_IDX 1
+#define regDIDT_IND_DATA 0x190d
+#define regDIDT_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_spidec
+// base address: 0x2e500
+#define regSPI_CDBG_SYS_GFX 0x1940
+#define regSPI_CDBG_SYS_GFX_BASE_IDX 1
+#define regSPI_CDBG_SYS_HP3D 0x1941
+#define regSPI_CDBG_SYS_HP3D_BASE_IDX 1
+#define regSPI_CDBG_SYS_CS0 0x1942
+#define regSPI_CDBG_SYS_CS0_BASE_IDX 1
+#define regSPI_GDBG_WAVE_CNTL 0x1943
+#define regSPI_GDBG_WAVE_CNTL_BASE_IDX 1
+#define regSPI_GDBG_TRAP_CONFIG 0x1944
+#define regSPI_GDBG_TRAP_CONFIG_BASE_IDX 1
+#define regSPI_GDBG_WAVE_CNTL3 0x1945
+#define regSPI_GDBG_WAVE_CNTL3_BASE_IDX 1
+#define regSPI_RESET_DEBUG 0x1946
+#define regSPI_RESET_DEBUG_BASE_IDX 1
+#define regSPI_ARB_CNTL_0 0x1949
+#define regSPI_ARB_CNTL_0_BASE_IDX 1
+#define regSPI_FEATURE_CTRL 0x194a
+#define regSPI_FEATURE_CTRL_BASE_IDX 1
+#define regSPI_SHADER_RSRC_LIMIT_CTRL 0x194b
+#define regSPI_SHADER_RSRC_LIMIT_CTRL_BASE_IDX 1
+#define regSPI_COMPUTE_WF_CTX_SAVE_STATUS 0x194e
+#define regSPI_COMPUTE_WF_CTX_SAVE_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_tcpdec
+// base address: 0x2e680
+#define regTCP_INVALIDATE 0x19a0
+#define regTCP_INVALIDATE_BASE_IDX 1
+#define regTCP_STATUS 0x19a1
+#define regTCP_STATUS_BASE_IDX 1
+#define regTCP_CNTL 0x19a2
+#define regTCP_CNTL_BASE_IDX 1
+#define regTCP_CNTL2 0x19a3
+#define regTCP_CNTL2_BASE_IDX 1
+#define regTCP_CREDIT 0x19a4
+#define regTCP_CREDIT_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_gdsdec
+// base address: 0x2e6c0
+#define regGDS_ENHANCE2 0x19b0
+#define regGDS_ENHANCE2_BASE_IDX 1
+#define regGDS_OA_CGPG_RESTORE 0x19b1
+#define regGDS_OA_CGPG_RESTORE_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_utcl1dec
+// base address: 0x2e600
+#define regUTCL1_CTRL_0 0x1980
+#define regUTCL1_CTRL_0_BASE_IDX 1
+#define regUTCL1_UTCL0_INVREQ_DISABLE 0x1984
+#define regUTCL1_UTCL0_INVREQ_DISABLE_BASE_IDX 1
+#define regUTCL1_CTRL_2 0x1985
+#define regUTCL1_CTRL_2_BASE_IDX 1
+#define regUTCL1_FIFO_SIZING 0x1986
+#define regUTCL1_FIFO_SIZING_BASE_IDX 1
+#define regGCRD_SA0_TARGETS_DISABLE 0x1987
+#define regGCRD_SA0_TARGETS_DISABLE_BASE_IDX 1
+#define regGCRD_SA1_TARGETS_DISABLE 0x1989
+#define regGCRD_SA1_TARGETS_DISABLE_BASE_IDX 1
+#define regGCRD_CREDIT_SAFE 0x198a
+#define regGCRD_CREDIT_SAFE_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_pmmdec
+// base address: 0x2e640
+#define regGCR_GENERAL_CNTL 0x1990
+#define regGCR_GENERAL_CNTL_BASE_IDX 1
+#define regGCR_TARGET_DISABLE 0x1991
+#define regGCR_TARGET_DISABLE_BASE_IDX 1
+#define regGCR_CMD_STATUS 0x1992
+#define regGCR_CMD_STATUS_BASE_IDX 1
+#define regGCR_SPARE 0x1993
+#define regGCR_SPARE_BASE_IDX 1
+#define regPMM_CNTL2 0x1999
+#define regPMM_CNTL2_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_gccacdec
+// base address: 0x2eb40
+#define regGC_CAC_CTRL_1 0x1ad0
+#define regGC_CAC_CTRL_1_BASE_IDX 1
+#define regGC_CAC_CTRL_2 0x1ad1
+#define regGC_CAC_CTRL_2_BASE_IDX 1
+#define regGC_CAC_AGGR_LOWER 0x1ad2
+#define regGC_CAC_AGGR_LOWER_BASE_IDX 1
+#define regGC_CAC_AGGR_UPPER 0x1ad3
+#define regGC_CAC_AGGR_UPPER_BASE_IDX 1
+#define regSE0_CAC_AGGR_LOWER 0x1ad4
+#define regSE0_CAC_AGGR_LOWER_BASE_IDX 1
+#define regSE0_CAC_AGGR_UPPER 0x1ad5
+#define regSE0_CAC_AGGR_UPPER_BASE_IDX 1
+#define regSE1_CAC_AGGR_LOWER 0x1ad6
+#define regSE1_CAC_AGGR_LOWER_BASE_IDX 1
+#define regSE1_CAC_AGGR_UPPER 0x1ad7
+#define regSE1_CAC_AGGR_UPPER_BASE_IDX 1
+#define regSE2_CAC_AGGR_LOWER 0x1ad8
+#define regSE2_CAC_AGGR_LOWER_BASE_IDX 1
+#define regSE2_CAC_AGGR_UPPER 0x1ad9
+#define regSE2_CAC_AGGR_UPPER_BASE_IDX 1
+#define regGC_CAC_AGGR_GFXCLK_CYCLE 0x1ae4
+#define regGC_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE0_CAC_AGGR_GFXCLK_CYCLE 0x1ae5
+#define regSE0_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE1_CAC_AGGR_GFXCLK_CYCLE 0x1ae6
+#define regSE1_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE2_CAC_AGGR_GFXCLK_CYCLE 0x1ae7
+#define regSE2_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regGC_EDC_CTRL 0x1aed
+#define regGC_EDC_CTRL_BASE_IDX 1
+#define regGC_EDC_THRESHOLD 0x1aee
+#define regGC_EDC_THRESHOLD_BASE_IDX 1
+#define regGC_EDC_STRETCH_CTRL 0x1aef
+#define regGC_EDC_STRETCH_CTRL_BASE_IDX 1
+#define regGC_EDC_STRETCH_THRESHOLD 0x1af0
+#define regGC_EDC_STRETCH_THRESHOLD_BASE_IDX 1
+#define regEDC_HYSTERESIS_CNTL 0x1af1
+#define regEDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGC_THROTTLE_CTRL 0x1af2
+#define regGC_THROTTLE_CTRL_BASE_IDX 1
+#define regGC_THROTTLE_CTRL1 0x1af3
+#define regGC_THROTTLE_CTRL1_BASE_IDX 1
+#define regPCC_STALL_PATTERN_CTRL 0x1af4
+#define regPCC_STALL_PATTERN_CTRL_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_CTRL 0x1af5
+#define regPWRBRK_STALL_PATTERN_CTRL_BASE_IDX 1
+#define regPCC_STALL_PATTERN_1_2 0x1af6
+#define regPCC_STALL_PATTERN_1_2_BASE_IDX 1
+#define regPCC_STALL_PATTERN_3_4 0x1af7
+#define regPCC_STALL_PATTERN_3_4_BASE_IDX 1
+#define regPCC_STALL_PATTERN_5_6 0x1af8
+#define regPCC_STALL_PATTERN_5_6_BASE_IDX 1
+#define regPCC_STALL_PATTERN_7 0x1af9
+#define regPCC_STALL_PATTERN_7_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_1_2 0x1afa
+#define regPWRBRK_STALL_PATTERN_1_2_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_3_4 0x1afb
+#define regPWRBRK_STALL_PATTERN_3_4_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_5_6 0x1afc
+#define regPWRBRK_STALL_PATTERN_5_6_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_7 0x1afd
+#define regPWRBRK_STALL_PATTERN_7_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_CTRL 0x1afe
+#define regDIDT_STALL_PATTERN_CTRL_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_1_2 0x1aff
+#define regDIDT_STALL_PATTERN_1_2_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_3_4 0x1b00
+#define regDIDT_STALL_PATTERN_3_4_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_5_6 0x1b01
+#define regDIDT_STALL_PATTERN_5_6_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_7 0x1b02
+#define regDIDT_STALL_PATTERN_7_BASE_IDX 1
+#define regPCC_PWRBRK_HYSTERESIS_CTRL 0x1b03
+#define regPCC_PWRBRK_HYSTERESIS_CTRL_BASE_IDX 1
+#define regEDC_STRETCH_PERF_COUNTER 0x1b04
+#define regEDC_STRETCH_PERF_COUNTER_BASE_IDX 1
+#define regEDC_UNSTRETCH_PERF_COUNTER 0x1b05
+#define regEDC_UNSTRETCH_PERF_COUNTER_BASE_IDX 1
+#define regEDC_STRETCH_NUM_PERF_COUNTER 0x1b06
+#define regEDC_STRETCH_NUM_PERF_COUNTER_BASE_IDX 1
+#define regGC_EDC_STATUS 0x1b07
+#define regGC_EDC_STATUS_BASE_IDX 1
+#define regGC_EDC_OVERFLOW 0x1b08
+#define regGC_EDC_OVERFLOW_BASE_IDX 1
+#define regGC_EDC_ROLLING_POWER_DELTA 0x1b09
+#define regGC_EDC_ROLLING_POWER_DELTA_BASE_IDX 1
+#define regGC_THROTTLE_STATUS 0x1b0a
+#define regGC_THROTTLE_STATUS_BASE_IDX 1
+#define regEDC_PERF_COUNTER 0x1b0b
+#define regEDC_PERF_COUNTER_BASE_IDX 1
+#define regPCC_PERF_COUNTER 0x1b0c
+#define regPCC_PERF_COUNTER_BASE_IDX 1
+#define regPWRBRK_PERF_COUNTER 0x1b0d
+#define regPWRBRK_PERF_COUNTER_BASE_IDX 1
+#define regEDC_HYSTERESIS_STAT 0x1b0e
+#define regEDC_HYSTERESIS_STAT_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CP_0 0x1b10
+#define regGC_CAC_WEIGHT_CP_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CP_1 0x1b11
+#define regGC_CAC_WEIGHT_CP_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_EA_0 0x1b12
+#define regGC_CAC_WEIGHT_EA_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_EA_1 0x1b13
+#define regGC_CAC_WEIGHT_EA_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_EA_2 0x1b14
+#define regGC_CAC_WEIGHT_EA_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_0 0x1b15
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_1 0x1b16
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_2 0x1b17
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_3 0x1b18
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_4 0x1b19
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_4_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_VML2_0 0x1b1a
+#define regGC_CAC_WEIGHT_UTCL2_VML2_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_VML2_1 0x1b1b
+#define regGC_CAC_WEIGHT_UTCL2_VML2_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_VML2_2 0x1b1c
+#define regGC_CAC_WEIGHT_UTCL2_VML2_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_0 0x1b1d
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_1 0x1b1e
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_2 0x1b1f
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GDS_0 0x1b20
+#define regGC_CAC_WEIGHT_GDS_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GDS_1 0x1b21
+#define regGC_CAC_WEIGHT_GDS_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GDS_2 0x1b22
+#define regGC_CAC_WEIGHT_GDS_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_0 0x1b23
+#define regGC_CAC_WEIGHT_GE_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_1 0x1b24
+#define regGC_CAC_WEIGHT_GE_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_2 0x1b25
+#define regGC_CAC_WEIGHT_GE_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_3 0x1b26
+#define regGC_CAC_WEIGHT_GE_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PMM_0 0x1b2e
+#define regGC_CAC_WEIGHT_PMM_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GL2C_0 0x1b2f
+#define regGC_CAC_WEIGHT_GL2C_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GL2C_1 0x1b30
+#define regGC_CAC_WEIGHT_GL2C_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GL2C_2 0x1b31
+#define regGC_CAC_WEIGHT_GL2C_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_0 0x1b32
+#define regGC_CAC_WEIGHT_PH_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_1 0x1b33
+#define regGC_CAC_WEIGHT_PH_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_2 0x1b34
+#define regGC_CAC_WEIGHT_PH_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_3 0x1b35
+#define regGC_CAC_WEIGHT_PH_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_0 0x1b36
+#define regGC_CAC_WEIGHT_SDMA_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_1 0x1b37
+#define regGC_CAC_WEIGHT_SDMA_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_2 0x1b38
+#define regGC_CAC_WEIGHT_SDMA_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_3 0x1b39
+#define regGC_CAC_WEIGHT_SDMA_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_4 0x1b3a
+#define regGC_CAC_WEIGHT_SDMA_4_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_5 0x1b3b
+#define regGC_CAC_WEIGHT_SDMA_5_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CHC_0 0x1b3c
+#define regGC_CAC_WEIGHT_CHC_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CHC_1 0x1b3d
+#define regGC_CAC_WEIGHT_CHC_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GUS_0 0x1b3e
+#define regGC_CAC_WEIGHT_GUS_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GUS_1 0x1b3f
+#define regGC_CAC_WEIGHT_GUS_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_RLC_0 0x1b40
+#define regGC_CAC_WEIGHT_RLC_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GRBM_0 0x1b44
+#define regGC_CAC_WEIGHT_GRBM_0_BASE_IDX 1
+#define regGC_EDC_CLK_MONITOR_CTRL 0x1b56
+#define regGC_EDC_CLK_MONITOR_CTRL_BASE_IDX 1
+#define regGC_CAC_IND_INDEX 0x1b58
+#define regGC_CAC_IND_INDEX_BASE_IDX 1
+#define regGC_CAC_IND_DATA 0x1b59
+#define regGC_CAC_IND_DATA_BASE_IDX 1
+#define regSE_CAC_CTRL_1 0x1b70
+#define regSE_CAC_CTRL_1_BASE_IDX 1
+#define regSE_CAC_CTRL_2 0x1b71
+#define regSE_CAC_CTRL_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TA_0 0x1b72
+#define regSE_CAC_WEIGHT_TA_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_0 0x1b73
+#define regSE_CAC_WEIGHT_TD_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_1 0x1b74
+#define regSE_CAC_WEIGHT_TD_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_2 0x1b75
+#define regSE_CAC_WEIGHT_TD_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_3 0x1b76
+#define regSE_CAC_WEIGHT_TD_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_4 0x1b77
+#define regSE_CAC_WEIGHT_TD_4_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_5 0x1b78
+#define regSE_CAC_WEIGHT_TD_5_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_0 0x1b79
+#define regSE_CAC_WEIGHT_TCP_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_1 0x1b7a
+#define regSE_CAC_WEIGHT_TCP_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_2 0x1b7b
+#define regSE_CAC_WEIGHT_TCP_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_3 0x1b7c
+#define regSE_CAC_WEIGHT_TCP_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQ_0 0x1b7d
+#define regSE_CAC_WEIGHT_SQ_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQ_1 0x1b7e
+#define regSE_CAC_WEIGHT_SQ_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQ_2 0x1b7f
+#define regSE_CAC_WEIGHT_SQ_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SP_0 0x1b80
+#define regSE_CAC_WEIGHT_SP_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SP_1 0x1b81
+#define regSE_CAC_WEIGHT_SP_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_0 0x1b82
+#define regSE_CAC_WEIGHT_LDS_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_1 0x1b83
+#define regSE_CAC_WEIGHT_LDS_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_2 0x1b84
+#define regSE_CAC_WEIGHT_LDS_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_3 0x1b85
+#define regSE_CAC_WEIGHT_LDS_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQC_0 0x1b87
+#define regSE_CAC_WEIGHT_SQC_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQC_1 0x1b88
+#define regSE_CAC_WEIGHT_SQC_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CU_0 0x1b89
+#define regSE_CAC_WEIGHT_CU_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_BCI_0 0x1b8a
+#define regSE_CAC_WEIGHT_BCI_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_0 0x1b8b
+#define regSE_CAC_WEIGHT_CB_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_1 0x1b8c
+#define regSE_CAC_WEIGHT_CB_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_2 0x1b8d
+#define regSE_CAC_WEIGHT_CB_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_3 0x1b8e
+#define regSE_CAC_WEIGHT_CB_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_4 0x1b8f
+#define regSE_CAC_WEIGHT_CB_4_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_5 0x1b90
+#define regSE_CAC_WEIGHT_CB_5_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_6 0x1b91
+#define regSE_CAC_WEIGHT_CB_6_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_7 0x1b92
+#define regSE_CAC_WEIGHT_CB_7_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_8 0x1b93
+#define regSE_CAC_WEIGHT_CB_8_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_9 0x1b94
+#define regSE_CAC_WEIGHT_CB_9_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_10 0x1b95
+#define regSE_CAC_WEIGHT_CB_10_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_11 0x1b96
+#define regSE_CAC_WEIGHT_CB_11_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_0 0x1b97
+#define regSE_CAC_WEIGHT_DB_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_1 0x1b98
+#define regSE_CAC_WEIGHT_DB_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_2 0x1b99
+#define regSE_CAC_WEIGHT_DB_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_3 0x1b9a
+#define regSE_CAC_WEIGHT_DB_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_4 0x1b9b
+#define regSE_CAC_WEIGHT_DB_4_BASE_IDX 1
+#define regSE_CAC_WEIGHT_RMI_0 0x1b9c
+#define regSE_CAC_WEIGHT_RMI_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_RMI_1 0x1b9d
+#define regSE_CAC_WEIGHT_RMI_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SX_0 0x1b9e
+#define regSE_CAC_WEIGHT_SX_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SXRB_0 0x1b9f
+#define regSE_CAC_WEIGHT_SXRB_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_UTCL1_0 0x1ba0
+#define regSE_CAC_WEIGHT_UTCL1_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_GL1C_0 0x1ba1
+#define regSE_CAC_WEIGHT_GL1C_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_GL1C_1 0x1ba2
+#define regSE_CAC_WEIGHT_GL1C_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_GL1C_2 0x1ba3
+#define regSE_CAC_WEIGHT_GL1C_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SPI_0 0x1ba4
+#define regSE_CAC_WEIGHT_SPI_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SPI_1 0x1ba5
+#define regSE_CAC_WEIGHT_SPI_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SPI_2 0x1ba6
+#define regSE_CAC_WEIGHT_SPI_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PC_0 0x1ba7
+#define regSE_CAC_WEIGHT_PC_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_0 0x1ba8
+#define regSE_CAC_WEIGHT_PA_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_1 0x1ba9
+#define regSE_CAC_WEIGHT_PA_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_2 0x1baa
+#define regSE_CAC_WEIGHT_PA_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_3 0x1bab
+#define regSE_CAC_WEIGHT_PA_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_0 0x1bac
+#define regSE_CAC_WEIGHT_SC_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_1 0x1bad
+#define regSE_CAC_WEIGHT_SC_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_2 0x1bae
+#define regSE_CAC_WEIGHT_SC_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_3 0x1baf
+#define regSE_CAC_WEIGHT_SC_3_BASE_IDX 1
+#define regSE_CAC_WINDOW_AGGR_VALUE 0x1bb0
+#define regSE_CAC_WINDOW_AGGR_VALUE_BASE_IDX 1
+#define regSE_CAC_WINDOW_GFXCLK_CYCLE 0x1bb1
+#define regSE_CAC_WINDOW_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE_CAC_IND_INDEX 0x1bce
+#define regSE_CAC_IND_INDEX_BASE_IDX 1
+#define regSE_CAC_IND_DATA 0x1bcf
+#define regSE_CAC_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly2_spidec
+// base address: 0x2f000
+#define regSPI_RESOURCE_RESERVE_CU_0 0x1c00
+#define regSPI_RESOURCE_RESERVE_CU_0_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_1 0x1c01
+#define regSPI_RESOURCE_RESERVE_CU_1_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_2 0x1c02
+#define regSPI_RESOURCE_RESERVE_CU_2_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_3 0x1c03
+#define regSPI_RESOURCE_RESERVE_CU_3_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_4 0x1c04
+#define regSPI_RESOURCE_RESERVE_CU_4_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_5 0x1c05
+#define regSPI_RESOURCE_RESERVE_CU_5_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_6 0x1c06
+#define regSPI_RESOURCE_RESERVE_CU_6_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_7 0x1c07
+#define regSPI_RESOURCE_RESERVE_CU_7_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_8 0x1c08
+#define regSPI_RESOURCE_RESERVE_CU_8_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_9 0x1c09
+#define regSPI_RESOURCE_RESERVE_CU_9_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_10 0x1c0a
+#define regSPI_RESOURCE_RESERVE_CU_10_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_11 0x1c0b
+#define regSPI_RESOURCE_RESERVE_CU_11_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_12 0x1c0c
+#define regSPI_RESOURCE_RESERVE_CU_12_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_13 0x1c0d
+#define regSPI_RESOURCE_RESERVE_CU_13_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_14 0x1c0e
+#define regSPI_RESOURCE_RESERVE_CU_14_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_15 0x1c0f
+#define regSPI_RESOURCE_RESERVE_CU_15_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_0 0x1c10
+#define regSPI_RESOURCE_RESERVE_EN_CU_0_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_1 0x1c11
+#define regSPI_RESOURCE_RESERVE_EN_CU_1_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_2 0x1c12
+#define regSPI_RESOURCE_RESERVE_EN_CU_2_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_3 0x1c13
+#define regSPI_RESOURCE_RESERVE_EN_CU_3_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_4 0x1c14
+#define regSPI_RESOURCE_RESERVE_EN_CU_4_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_5 0x1c15
+#define regSPI_RESOURCE_RESERVE_EN_CU_5_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_6 0x1c16
+#define regSPI_RESOURCE_RESERVE_EN_CU_6_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_7 0x1c17
+#define regSPI_RESOURCE_RESERVE_EN_CU_7_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_8 0x1c18
+#define regSPI_RESOURCE_RESERVE_EN_CU_8_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_9 0x1c19
+#define regSPI_RESOURCE_RESERVE_EN_CU_9_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_10 0x1c1a
+#define regSPI_RESOURCE_RESERVE_EN_CU_10_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_11 0x1c1b
+#define regSPI_RESOURCE_RESERVE_EN_CU_11_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_12 0x1c1c
+#define regSPI_RESOURCE_RESERVE_EN_CU_12_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_13 0x1c1d
+#define regSPI_RESOURCE_RESERVE_EN_CU_13_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_14 0x1c1e
+#define regSPI_RESOURCE_RESERVE_EN_CU_14_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_15 0x1c1f
+#define regSPI_RESOURCE_RESERVE_EN_CU_15_BASE_IDX 1
+
+
+// addressBlock: gc_gfxudec
+// base address: 0x30000
+#define regCP_EOP_DONE_ADDR_LO 0x2000
+#define regCP_EOP_DONE_ADDR_LO_BASE_IDX 1
+#define regCP_EOP_DONE_ADDR_HI 0x2001
+#define regCP_EOP_DONE_ADDR_HI_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_LO 0x2002
+#define regCP_EOP_DONE_DATA_LO_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_HI 0x2003
+#define regCP_EOP_DONE_DATA_HI_BASE_IDX 1
+#define regCP_EOP_LAST_FENCE_LO 0x2004
+#define regCP_EOP_LAST_FENCE_LO_BASE_IDX 1
+#define regCP_EOP_LAST_FENCE_HI 0x2005
+#define regCP_EOP_LAST_FENCE_HI_BASE_IDX 1
+#define regCP_PIPE_STATS_ADDR_LO 0x2018
+#define regCP_PIPE_STATS_ADDR_LO_BASE_IDX 1
+#define regCP_PIPE_STATS_ADDR_HI 0x2019
+#define regCP_PIPE_STATS_ADDR_HI_BASE_IDX 1
+#define regCP_VGT_IAVERT_COUNT_LO 0x201a
+#define regCP_VGT_IAVERT_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_IAVERT_COUNT_HI 0x201b
+#define regCP_VGT_IAVERT_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_IAPRIM_COUNT_LO 0x201c
+#define regCP_VGT_IAPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_IAPRIM_COUNT_HI 0x201d
+#define regCP_VGT_IAPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_GSPRIM_COUNT_LO 0x201e
+#define regCP_VGT_GSPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_GSPRIM_COUNT_HI 0x201f
+#define regCP_VGT_GSPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_VSINVOC_COUNT_LO 0x2020
+#define regCP_VGT_VSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_VSINVOC_COUNT_HI 0x2021
+#define regCP_VGT_VSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_GSINVOC_COUNT_LO 0x2022
+#define regCP_VGT_GSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_GSINVOC_COUNT_HI 0x2023
+#define regCP_VGT_GSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_HSINVOC_COUNT_LO 0x2024
+#define regCP_VGT_HSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_HSINVOC_COUNT_HI 0x2025
+#define regCP_VGT_HSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_DSINVOC_COUNT_LO 0x2026
+#define regCP_VGT_DSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_DSINVOC_COUNT_HI 0x2027
+#define regCP_VGT_DSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PA_CINVOC_COUNT_LO 0x2028
+#define regCP_PA_CINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_PA_CINVOC_COUNT_HI 0x2029
+#define regCP_PA_CINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PA_CPRIM_COUNT_LO 0x202a
+#define regCP_PA_CPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_PA_CPRIM_COUNT_HI 0x202b
+#define regCP_PA_CPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT0_LO 0x202c
+#define regCP_SC_PSINVOC_COUNT0_LO_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT0_HI 0x202d
+#define regCP_SC_PSINVOC_COUNT0_HI_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT1_LO 0x202e
+#define regCP_SC_PSINVOC_COUNT1_LO_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT1_HI 0x202f
+#define regCP_SC_PSINVOC_COUNT1_HI_BASE_IDX 1
+#define regCP_VGT_CSINVOC_COUNT_LO 0x2030
+#define regCP_VGT_CSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_CSINVOC_COUNT_HI 0x2031
+#define regCP_VGT_CSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_ASINVOC_COUNT_LO 0x2032
+#define regCP_VGT_ASINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_ASINVOC_COUNT_HI 0x2033
+#define regCP_VGT_ASINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PIPE_STATS_CONTROL 0x203d
+#define regCP_PIPE_STATS_CONTROL_BASE_IDX 1
+#define regSCRATCH_REG0 0x2040
+#define regSCRATCH_REG0_BASE_IDX 1
+#define regSCRATCH_REG1 0x2041
+#define regSCRATCH_REG1_BASE_IDX 1
+#define regSCRATCH_REG2 0x2042
+#define regSCRATCH_REG2_BASE_IDX 1
+#define regSCRATCH_REG3 0x2043
+#define regSCRATCH_REG3_BASE_IDX 1
+#define regSCRATCH_REG4 0x2044
+#define regSCRATCH_REG4_BASE_IDX 1
+#define regSCRATCH_REG5 0x2045
+#define regSCRATCH_REG5_BASE_IDX 1
+#define regSCRATCH_REG6 0x2046
+#define regSCRATCH_REG6_BASE_IDX 1
+#define regSCRATCH_REG7 0x2047
+#define regSCRATCH_REG7_BASE_IDX 1
+#define regSCRATCH_REG_ATOMIC 0x2048
+#define regSCRATCH_REG_ATOMIC_BASE_IDX 1
+#define regSCRATCH_REG_CMPSWAP_ATOMIC 0x2048
+#define regSCRATCH_REG_CMPSWAP_ATOMIC_BASE_IDX 1
+#define regCP_APPEND_DDID_CNT 0x204b
+#define regCP_APPEND_DDID_CNT_BASE_IDX 1
+#define regCP_APPEND_DATA_HI 0x204c
+#define regCP_APPEND_DATA_HI_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE_HI 0x204d
+#define regCP_APPEND_LAST_CS_FENCE_HI_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE_HI 0x204e
+#define regCP_APPEND_LAST_PS_FENCE_HI_BASE_IDX 1
+#define regCP_PFP_ATOMIC_PREOP_LO 0x2052
+#define regCP_PFP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_ATOMIC_PREOP_HI 0x2053
+#define regCP_PFP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC0_PREOP_LO 0x2054
+#define regCP_PFP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC0_PREOP_HI 0x2055
+#define regCP_PFP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC1_PREOP_LO 0x2056
+#define regCP_PFP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC1_PREOP_HI 0x2057
+#define regCP_PFP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_APPEND_ADDR_LO 0x2058
+#define regCP_APPEND_ADDR_LO_BASE_IDX 1
+#define regCP_APPEND_ADDR_HI 0x2059
+#define regCP_APPEND_ADDR_HI_BASE_IDX 1
+#define regCP_APPEND_DATA 0x205a
+#define regCP_APPEND_DATA_BASE_IDX 1
+#define regCP_APPEND_DATA_LO 0x205a
+#define regCP_APPEND_DATA_LO_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE 0x205b
+#define regCP_APPEND_LAST_CS_FENCE_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE_LO 0x205b
+#define regCP_APPEND_LAST_CS_FENCE_LO_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE 0x205c
+#define regCP_APPEND_LAST_PS_FENCE_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE_LO 0x205c
+#define regCP_APPEND_LAST_PS_FENCE_LO_BASE_IDX 1
+#define regCP_ATOMIC_PREOP_LO 0x205d
+#define regCP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_ME_ATOMIC_PREOP_LO 0x205d
+#define regCP_ME_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_ATOMIC_PREOP_HI 0x205e
+#define regCP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_ME_ATOMIC_PREOP_HI 0x205e
+#define regCP_ME_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_GDS_ATOMIC0_PREOP_LO 0x205f
+#define regCP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC0_PREOP_LO 0x205f
+#define regCP_ME_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_GDS_ATOMIC0_PREOP_HI 0x2060
+#define regCP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC0_PREOP_HI 0x2060
+#define regCP_ME_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_GDS_ATOMIC1_PREOP_LO 0x2061
+#define regCP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC1_PREOP_LO 0x2061
+#define regCP_ME_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_GDS_ATOMIC1_PREOP_HI 0x2062
+#define regCP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC1_PREOP_HI 0x2062
+#define regCP_ME_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_ME_MC_WADDR_LO 0x2069
+#define regCP_ME_MC_WADDR_LO_BASE_IDX 1
+#define regCP_ME_MC_WADDR_HI 0x206a
+#define regCP_ME_MC_WADDR_HI_BASE_IDX 1
+#define regCP_ME_MC_WDATA_LO 0x206b
+#define regCP_ME_MC_WDATA_LO_BASE_IDX 1
+#define regCP_ME_MC_WDATA_HI 0x206c
+#define regCP_ME_MC_WDATA_HI_BASE_IDX 1
+#define regCP_ME_MC_RADDR_LO 0x206d
+#define regCP_ME_MC_RADDR_LO_BASE_IDX 1
+#define regCP_ME_MC_RADDR_HI 0x206e
+#define regCP_ME_MC_RADDR_HI_BASE_IDX 1
+#define regCP_SEM_WAIT_TIMER 0x206f
+#define regCP_SEM_WAIT_TIMER_BASE_IDX 1
+#define regCP_SIG_SEM_ADDR_LO 0x2070
+#define regCP_SIG_SEM_ADDR_LO_BASE_IDX 1
+#define regCP_SIG_SEM_ADDR_HI 0x2071
+#define regCP_SIG_SEM_ADDR_HI_BASE_IDX 1
+#define regCP_WAIT_REG_MEM_TIMEOUT 0x2074
+#define regCP_WAIT_REG_MEM_TIMEOUT_BASE_IDX 1
+#define regCP_WAIT_SEM_ADDR_LO 0x2075
+#define regCP_WAIT_SEM_ADDR_LO_BASE_IDX 1
+#define regCP_WAIT_SEM_ADDR_HI 0x2076
+#define regCP_WAIT_SEM_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_CONTROL 0x2077
+#define regCP_DMA_PFP_CONTROL_BASE_IDX 1
+#define regCP_DMA_ME_CONTROL 0x2078
+#define regCP_DMA_ME_CONTROL_BASE_IDX 1
+#define regCP_DMA_ME_SRC_ADDR 0x2080
+#define regCP_DMA_ME_SRC_ADDR_BASE_IDX 1
+#define regCP_DMA_ME_SRC_ADDR_HI 0x2081
+#define regCP_DMA_ME_SRC_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_ME_DST_ADDR 0x2082
+#define regCP_DMA_ME_DST_ADDR_BASE_IDX 1
+#define regCP_DMA_ME_DST_ADDR_HI 0x2083
+#define regCP_DMA_ME_DST_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_ME_COMMAND 0x2084
+#define regCP_DMA_ME_COMMAND_BASE_IDX 1
+#define regCP_DMA_PFP_SRC_ADDR 0x2085
+#define regCP_DMA_PFP_SRC_ADDR_BASE_IDX 1
+#define regCP_DMA_PFP_SRC_ADDR_HI 0x2086
+#define regCP_DMA_PFP_SRC_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_DST_ADDR 0x2087
+#define regCP_DMA_PFP_DST_ADDR_BASE_IDX 1
+#define regCP_DMA_PFP_DST_ADDR_HI 0x2088
+#define regCP_DMA_PFP_DST_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_COMMAND 0x2089
+#define regCP_DMA_PFP_COMMAND_BASE_IDX 1
+#define regCP_DMA_CNTL 0x208a
+#define regCP_DMA_CNTL_BASE_IDX 1
+#define regCP_DMA_READ_TAGS 0x208b
+#define regCP_DMA_READ_TAGS_BASE_IDX 1
+#define regCP_PFP_IB_CONTROL 0x208d
+#define regCP_PFP_IB_CONTROL_BASE_IDX 1
+#define regCP_PFP_LOAD_CONTROL 0x208e
+#define regCP_PFP_LOAD_CONTROL_BASE_IDX 1
+#define regCP_SCRATCH_INDEX 0x208f
+#define regCP_SCRATCH_INDEX_BASE_IDX 1
+#define regCP_SCRATCH_DATA 0x2090
+#define regCP_SCRATCH_DATA_BASE_IDX 1
+#define regCP_RB_OFFSET 0x2091
+#define regCP_RB_OFFSET_BASE_IDX 1
+#define regCP_IB1_OFFSET 0x2092
+#define regCP_IB1_OFFSET_BASE_IDX 1
+#define regCP_IB2_OFFSET 0x2093
+#define regCP_IB2_OFFSET_BASE_IDX 1
+#define regCP_IB1_PREAMBLE_BEGIN 0x2094
+#define regCP_IB1_PREAMBLE_BEGIN_BASE_IDX 1
+#define regCP_IB1_PREAMBLE_END 0x2095
+#define regCP_IB1_PREAMBLE_END_BASE_IDX 1
+#define regCP_IB2_PREAMBLE_BEGIN 0x2096
+#define regCP_IB2_PREAMBLE_BEGIN_BASE_IDX 1
+#define regCP_IB2_PREAMBLE_END 0x2097
+#define regCP_IB2_PREAMBLE_END_BASE_IDX 1
+#define regCP_DMA_ME_CMD_ADDR_LO 0x209c
+#define regCP_DMA_ME_CMD_ADDR_LO_BASE_IDX 1
+#define regCP_DMA_ME_CMD_ADDR_HI 0x209d
+#define regCP_DMA_ME_CMD_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_CMD_ADDR_LO 0x209e
+#define regCP_DMA_PFP_CMD_ADDR_LO_BASE_IDX 1
+#define regCP_DMA_PFP_CMD_ADDR_HI 0x209f
+#define regCP_DMA_PFP_CMD_ADDR_HI_BASE_IDX 1
+#define regCP_APPEND_CMD_ADDR_LO 0x20a0
+#define regCP_APPEND_CMD_ADDR_LO_BASE_IDX 1
+#define regCP_APPEND_CMD_ADDR_HI 0x20a1
+#define regCP_APPEND_CMD_ADDR_HI_BASE_IDX 1
+#define regUCONFIG_RESERVED_REG0 0x20a2
+#define regUCONFIG_RESERVED_REG0_BASE_IDX 1
+#define regUCONFIG_RESERVED_REG1 0x20a3
+#define regUCONFIG_RESERVED_REG1_BASE_IDX 1
+#define regCP_PA_MSPRIM_COUNT_LO 0x20a4
+#define regCP_PA_MSPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_PA_MSPRIM_COUNT_HI 0x20a5
+#define regCP_PA_MSPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_GE_MSINVOC_COUNT_LO 0x20a6
+#define regCP_GE_MSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_GE_MSINVOC_COUNT_HI 0x20a7
+#define regCP_GE_MSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_IB1_CMD_BUFSZ 0x20c0
+#define regCP_IB1_CMD_BUFSZ_BASE_IDX 1
+#define regCP_IB2_CMD_BUFSZ 0x20c1
+#define regCP_IB2_CMD_BUFSZ_BASE_IDX 1
+#define regCP_ST_CMD_BUFSZ 0x20c2
+#define regCP_ST_CMD_BUFSZ_BASE_IDX 1
+#define regCP_IB1_BASE_LO 0x20cc
+#define regCP_IB1_BASE_LO_BASE_IDX 1
+#define regCP_IB1_BASE_HI 0x20cd
+#define regCP_IB1_BASE_HI_BASE_IDX 1
+#define regCP_IB1_BUFSZ 0x20ce
+#define regCP_IB1_BUFSZ_BASE_IDX 1
+#define regCP_IB2_BASE_LO 0x20cf
+#define regCP_IB2_BASE_LO_BASE_IDX 1
+#define regCP_IB2_BASE_HI 0x20d0
+#define regCP_IB2_BASE_HI_BASE_IDX 1
+#define regCP_IB2_BUFSZ 0x20d1
+#define regCP_IB2_BUFSZ_BASE_IDX 1
+#define regCP_ST_BASE_LO 0x20d2
+#define regCP_ST_BASE_LO_BASE_IDX 1
+#define regCP_ST_BASE_HI 0x20d3
+#define regCP_ST_BASE_HI_BASE_IDX 1
+#define regCP_ST_BUFSZ 0x20d4
+#define regCP_ST_BUFSZ_BASE_IDX 1
+#define regCP_EOP_DONE_EVENT_CNTL 0x20d5
+#define regCP_EOP_DONE_EVENT_CNTL_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_CNTL 0x20d6
+#define regCP_EOP_DONE_DATA_CNTL_BASE_IDX 1
+#define regCP_EOP_DONE_CNTX_ID 0x20d7
+#define regCP_EOP_DONE_CNTX_ID_BASE_IDX 1
+#define regCP_DB_BASE_LO 0x20d8
+#define regCP_DB_BASE_LO_BASE_IDX 1
+#define regCP_DB_BASE_HI 0x20d9
+#define regCP_DB_BASE_HI_BASE_IDX 1
+#define regCP_DB_BUFSZ 0x20da
+#define regCP_DB_BUFSZ_BASE_IDX 1
+#define regCP_DB_CMD_BUFSZ 0x20db
+#define regCP_DB_CMD_BUFSZ_BASE_IDX 1
+#define regCP_PFP_COMPLETION_STATUS 0x20ec
+#define regCP_PFP_COMPLETION_STATUS_BASE_IDX 1
+#define regCP_PRED_NOT_VISIBLE 0x20ee
+#define regCP_PRED_NOT_VISIBLE_BASE_IDX 1
+#define regCP_PFP_METADATA_BASE_ADDR 0x20f0
+#define regCP_PFP_METADATA_BASE_ADDR_BASE_IDX 1
+#define regCP_PFP_METADATA_BASE_ADDR_HI 0x20f1
+#define regCP_PFP_METADATA_BASE_ADDR_HI_BASE_IDX 1
+#define regCP_DRAW_INDX_INDR_ADDR 0x20f4
+#define regCP_DRAW_INDX_INDR_ADDR_BASE_IDX 1
+#define regCP_DRAW_INDX_INDR_ADDR_HI 0x20f5
+#define regCP_DRAW_INDX_INDR_ADDR_HI_BASE_IDX 1
+#define regCP_DISPATCH_INDR_ADDR 0x20f6
+#define regCP_DISPATCH_INDR_ADDR_BASE_IDX 1
+#define regCP_DISPATCH_INDR_ADDR_HI 0x20f7
+#define regCP_DISPATCH_INDR_ADDR_HI_BASE_IDX 1
+#define regCP_INDEX_BASE_ADDR 0x20f8
+#define regCP_INDEX_BASE_ADDR_BASE_IDX 1
+#define regCP_INDEX_BASE_ADDR_HI 0x20f9
+#define regCP_INDEX_BASE_ADDR_HI_BASE_IDX 1
+#define regCP_INDEX_TYPE 0x20fa
+#define regCP_INDEX_TYPE_BASE_IDX 1
+#define regCP_GDS_BKUP_ADDR 0x20fb
+#define regCP_GDS_BKUP_ADDR_BASE_IDX 1
+#define regCP_GDS_BKUP_ADDR_HI 0x20fc
+#define regCP_GDS_BKUP_ADDR_HI_BASE_IDX 1
+#define regCP_SAMPLE_STATUS 0x20fd
+#define regCP_SAMPLE_STATUS_BASE_IDX 1
+#define regCP_ME_COHER_CNTL 0x20fe
+#define regCP_ME_COHER_CNTL_BASE_IDX 1
+#define regCP_ME_COHER_SIZE 0x20ff
+#define regCP_ME_COHER_SIZE_BASE_IDX 1
+#define regCP_ME_COHER_SIZE_HI 0x2100
+#define regCP_ME_COHER_SIZE_HI_BASE_IDX 1
+#define regCP_ME_COHER_BASE 0x2101
+#define regCP_ME_COHER_BASE_BASE_IDX 1
+#define regCP_ME_COHER_BASE_HI 0x2102
+#define regCP_ME_COHER_BASE_HI_BASE_IDX 1
+#define regCP_ME_COHER_STATUS 0x2103
+#define regCP_ME_COHER_STATUS_BASE_IDX 1
+#define regRLC_GPM_PERF_COUNT_0 0x2140
+#define regRLC_GPM_PERF_COUNT_0_BASE_IDX 1
+#define regRLC_GPM_PERF_COUNT_1 0x2141
+#define regRLC_GPM_PERF_COUNT_1_BASE_IDX 1
+#define regGRBM_GFX_INDEX 0x2200
+#define regGRBM_GFX_INDEX_BASE_IDX 1
+#define regVGT_PRIMITIVE_TYPE 0x2242
+#define regVGT_PRIMITIVE_TYPE_BASE_IDX 1
+#define regVGT_INDEX_TYPE 0x2243
+#define regVGT_INDEX_TYPE_BASE_IDX 1
+#define regGE_MIN_VTX_INDX 0x2249
+#define regGE_MIN_VTX_INDX_BASE_IDX 1
+#define regGE_INDX_OFFSET 0x224a
+#define regGE_INDX_OFFSET_BASE_IDX 1
+#define regGE_MULTI_PRIM_IB_RESET_EN 0x224b
+#define regGE_MULTI_PRIM_IB_RESET_EN_BASE_IDX 1
+#define regVGT_NUM_INDICES 0x224c
+#define regVGT_NUM_INDICES_BASE_IDX 1
+#define regVGT_NUM_INSTANCES 0x224d
+#define regVGT_NUM_INSTANCES_BASE_IDX 1
+#define regVGT_TF_RING_SIZE 0x224e
+#define regVGT_TF_RING_SIZE_BASE_IDX 1
+#define regVGT_HS_OFFCHIP_PARAM 0x224f
+#define regVGT_HS_OFFCHIP_PARAM_BASE_IDX 1
+#define regVGT_TF_MEMORY_BASE 0x2250
+#define regVGT_TF_MEMORY_BASE_BASE_IDX 1
+#define regGE_MAX_VTX_INDX 0x2259
+#define regGE_MAX_VTX_INDX_BASE_IDX 1
+#define regVGT_INSTANCE_BASE_ID 0x225a
+#define regVGT_INSTANCE_BASE_ID_BASE_IDX 1
+#define regGE_CNTL 0x225b
+#define regGE_CNTL_BASE_IDX 1
+#define regGE_USER_VGPR1 0x225c
+#define regGE_USER_VGPR1_BASE_IDX 1
+#define regGE_USER_VGPR2 0x225d
+#define regGE_USER_VGPR2_BASE_IDX 1
+#define regGE_USER_VGPR3 0x225e
+#define regGE_USER_VGPR3_BASE_IDX 1
+#define regGE_STEREO_CNTL 0x225f
+#define regGE_STEREO_CNTL_BASE_IDX 1
+#define regGE_PC_ALLOC 0x2260
+#define regGE_PC_ALLOC_BASE_IDX 1
+#define regVGT_TF_MEMORY_BASE_HI 0x2261
+#define regVGT_TF_MEMORY_BASE_HI_BASE_IDX 1
+#define regGE_USER_VGPR_EN 0x2262
+#define regGE_USER_VGPR_EN_BASE_IDX 1
+#define regGE_GS_FAST_LAUNCH_WG_DIM 0x2264
+#define regGE_GS_FAST_LAUNCH_WG_DIM_BASE_IDX 1
+#define regGE_GS_FAST_LAUNCH_WG_DIM_1 0x2265
+#define regGE_GS_FAST_LAUNCH_WG_DIM_1_BASE_IDX 1
+#define regVGT_GS_OUT_PRIM_TYPE 0x2266
+#define regVGT_GS_OUT_PRIM_TYPE_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_VALUE 0x2280
+#define regPA_SU_LINE_STIPPLE_VALUE_BASE_IDX 1
+#define regPA_SC_LINE_STIPPLE_STATE 0x2281
+#define regPA_SC_LINE_STIPPLE_STATE_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MIN_0 0x2284
+#define regPA_SC_SCREEN_EXTENT_MIN_0_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MAX_0 0x2285
+#define regPA_SC_SCREEN_EXTENT_MAX_0_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MIN_1 0x2286
+#define regPA_SC_SCREEN_EXTENT_MIN_1_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MAX_1 0x228b
+#define regPA_SC_SCREEN_EXTENT_MAX_1_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_HV_EN 0x22a0
+#define regPA_SC_P3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_H 0x22a1
+#define regPA_SC_P3D_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_V 0x22a2
+#define regPA_SC_P3D_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_OCCURRENCE 0x22a3
+#define regPA_SC_P3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_COUNT 0x22a4
+#define regPA_SC_P3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_EN 0x22a8
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_H 0x22a9
+#define regPA_SC_HP3D_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_V 0x22aa
+#define regPA_SC_HP3D_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE 0x22ab
+#define regPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_COUNT 0x22ac
+#define regPA_SC_HP3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_HV_EN 0x22b0
+#define regPA_SC_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_H 0x22b1
+#define regPA_SC_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_V 0x22b2
+#define regPA_SC_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_OCCURRENCE 0x22b3
+#define regPA_SC_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_COUNT 0x22b4
+#define regPA_SC_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_0 0x2340
+#define regSQ_THREAD_TRACE_USERDATA_0_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_1 0x2341
+#define regSQ_THREAD_TRACE_USERDATA_1_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_2 0x2342
+#define regSQ_THREAD_TRACE_USERDATA_2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_3 0x2343
+#define regSQ_THREAD_TRACE_USERDATA_3_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_4 0x2344
+#define regSQ_THREAD_TRACE_USERDATA_4_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_5 0x2345
+#define regSQ_THREAD_TRACE_USERDATA_5_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_6 0x2346
+#define regSQ_THREAD_TRACE_USERDATA_6_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_7 0x2347
+#define regSQ_THREAD_TRACE_USERDATA_7_BASE_IDX 1
+#define regSQC_CACHES 0x2348
+#define regSQC_CACHES_BASE_IDX 1
+#define regTA_CS_BC_BASE_ADDR 0x2380
+#define regTA_CS_BC_BASE_ADDR_BASE_IDX 1
+#define regTA_CS_BC_BASE_ADDR_HI 0x2381
+#define regTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT0_LOW 0x23c0
+#define regDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT0_HI 0x23c1
+#define regDB_OCCLUSION_COUNT0_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT1_LOW 0x23c2
+#define regDB_OCCLUSION_COUNT1_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT1_HI 0x23c3
+#define regDB_OCCLUSION_COUNT1_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT2_LOW 0x23c4
+#define regDB_OCCLUSION_COUNT2_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT2_HI 0x23c5
+#define regDB_OCCLUSION_COUNT2_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT3_LOW 0x23c6
+#define regDB_OCCLUSION_COUNT3_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT3_HI 0x23c7
+#define regDB_OCCLUSION_COUNT3_HI_BASE_IDX 1
+#define regGDS_RD_ADDR 0x2400
+#define regGDS_RD_ADDR_BASE_IDX 1
+#define regGDS_RD_DATA 0x2401
+#define regGDS_RD_DATA_BASE_IDX 1
+#define regGDS_RD_BURST_ADDR 0x2402
+#define regGDS_RD_BURST_ADDR_BASE_IDX 1
+#define regGDS_RD_BURST_COUNT 0x2403
+#define regGDS_RD_BURST_COUNT_BASE_IDX 1
+#define regGDS_RD_BURST_DATA 0x2404
+#define regGDS_RD_BURST_DATA_BASE_IDX 1
+#define regGDS_WR_ADDR 0x2405
+#define regGDS_WR_ADDR_BASE_IDX 1
+#define regGDS_WR_DATA 0x2406
+#define regGDS_WR_DATA_BASE_IDX 1
+#define regGDS_WR_BURST_ADDR 0x2407
+#define regGDS_WR_BURST_ADDR_BASE_IDX 1
+#define regGDS_WR_BURST_DATA 0x2408
+#define regGDS_WR_BURST_DATA_BASE_IDX 1
+#define regGDS_WRITE_COMPLETE 0x2409
+#define regGDS_WRITE_COMPLETE_BASE_IDX 1
+#define regGDS_ATOM_CNTL 0x240a
+#define regGDS_ATOM_CNTL_BASE_IDX 1
+#define regGDS_ATOM_COMPLETE 0x240b
+#define regGDS_ATOM_COMPLETE_BASE_IDX 1
+#define regGDS_ATOM_BASE 0x240c
+#define regGDS_ATOM_BASE_BASE_IDX 1
+#define regGDS_ATOM_SIZE 0x240d
+#define regGDS_ATOM_SIZE_BASE_IDX 1
+#define regGDS_ATOM_OFFSET0 0x240e
+#define regGDS_ATOM_OFFSET0_BASE_IDX 1
+#define regGDS_ATOM_OFFSET1 0x240f
+#define regGDS_ATOM_OFFSET1_BASE_IDX 1
+#define regGDS_ATOM_DST 0x2410
+#define regGDS_ATOM_DST_BASE_IDX 1
+#define regGDS_ATOM_OP 0x2411
+#define regGDS_ATOM_OP_BASE_IDX 1
+#define regGDS_ATOM_SRC0 0x2412
+#define regGDS_ATOM_SRC0_BASE_IDX 1
+#define regGDS_ATOM_SRC0_U 0x2413
+#define regGDS_ATOM_SRC0_U_BASE_IDX 1
+#define regGDS_ATOM_SRC1 0x2414
+#define regGDS_ATOM_SRC1_BASE_IDX 1
+#define regGDS_ATOM_SRC1_U 0x2415
+#define regGDS_ATOM_SRC1_U_BASE_IDX 1
+#define regGDS_ATOM_READ0 0x2416
+#define regGDS_ATOM_READ0_BASE_IDX 1
+#define regGDS_ATOM_READ0_U 0x2417
+#define regGDS_ATOM_READ0_U_BASE_IDX 1
+#define regGDS_ATOM_READ1 0x2418
+#define regGDS_ATOM_READ1_BASE_IDX 1
+#define regGDS_ATOM_READ1_U 0x2419
+#define regGDS_ATOM_READ1_U_BASE_IDX 1
+#define regGDS_GWS_RESOURCE_CNTL 0x241a
+#define regGDS_GWS_RESOURCE_CNTL_BASE_IDX 1
+#define regGDS_GWS_RESOURCE 0x241b
+#define regGDS_GWS_RESOURCE_BASE_IDX 1
+#define regGDS_GWS_RESOURCE_CNT 0x241c
+#define regGDS_GWS_RESOURCE_CNT_BASE_IDX 1
+#define regGDS_OA_CNTL 0x241d
+#define regGDS_OA_CNTL_BASE_IDX 1
+#define regGDS_OA_COUNTER 0x241e
+#define regGDS_OA_COUNTER_BASE_IDX 1
+#define regGDS_OA_ADDRESS 0x241f
+#define regGDS_OA_ADDRESS_BASE_IDX 1
+#define regGDS_OA_INCDEC 0x2420
+#define regGDS_OA_INCDEC_BASE_IDX 1
+#define regGDS_OA_RING_SIZE 0x2421
+#define regGDS_OA_RING_SIZE_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_0 0x2422
+#define regGDS_STRMOUT_DWORDS_WRITTEN_0_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_1 0x2423
+#define regGDS_STRMOUT_DWORDS_WRITTEN_1_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_2 0x2424
+#define regGDS_STRMOUT_DWORDS_WRITTEN_2_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_3 0x2425
+#define regGDS_STRMOUT_DWORDS_WRITTEN_3_BASE_IDX 1
+#define regGDS_GS_0 0x2426
+#define regGDS_GS_0_BASE_IDX 1
+#define regGDS_GS_1 0x2427
+#define regGDS_GS_1_BASE_IDX 1
+#define regGDS_GS_2 0x2428
+#define regGDS_GS_2_BASE_IDX 1
+#define regGDS_GS_3 0x2429
+#define regGDS_GS_3_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_LO 0x242a
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_HI 0x242b
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_LO 0x242c
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_HI 0x242d
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_LO 0x242e
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_HI 0x242f
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_LO 0x2430
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_HI 0x2431
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_LO 0x2432
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_HI 0x2433
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_LO 0x2434
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_HI 0x2435
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_LO 0x2436
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_HI 0x2437
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_LO 0x2438
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_HI 0x2439
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_HI_BASE_IDX 1
+#define regSPI_CONFIG_CNTL 0x2440
+#define regSPI_CONFIG_CNTL_BASE_IDX 1
+#define regSPI_CONFIG_CNTL_1 0x2441
+#define regSPI_CONFIG_CNTL_1_BASE_IDX 1
+#define regSPI_CONFIG_CNTL_2 0x2442
+#define regSPI_CONFIG_CNTL_2_BASE_IDX 1
+#define regSPI_WAVE_LIMIT_CNTL 0x2443
+#define regSPI_WAVE_LIMIT_CNTL_BASE_IDX 1
+#define regSPI_GS_THROTTLE_CNTL1 0x2444
+#define regSPI_GS_THROTTLE_CNTL1_BASE_IDX 1
+#define regSPI_GS_THROTTLE_CNTL2 0x2445
+#define regSPI_GS_THROTTLE_CNTL2_BASE_IDX 1
+#define regSPI_ATTRIBUTE_RING_BASE 0x2446
+#define regSPI_ATTRIBUTE_RING_BASE_BASE_IDX 1
+#define regSPI_ATTRIBUTE_RING_SIZE 0x2447
+#define regSPI_ATTRIBUTE_RING_SIZE_BASE_IDX 1
+
+
+// addressBlock: gc_cprs64dec
+// base address: 0x32000
+#define regCP_MES_PRGRM_CNTR_START 0x2800
+#define regCP_MES_PRGRM_CNTR_START_BASE_IDX 1
+#define regCP_MES_INTR_ROUTINE_START 0x2801
+#define regCP_MES_INTR_ROUTINE_START_BASE_IDX 1
+#define regCP_MES_MTVEC_LO 0x2801
+#define regCP_MES_MTVEC_LO_BASE_IDX 1
+#define regCP_MES_INTR_ROUTINE_START_HI 0x2802
+#define regCP_MES_INTR_ROUTINE_START_HI_BASE_IDX 1
+#define regCP_MES_MTVEC_HI 0x2802
+#define regCP_MES_MTVEC_HI_BASE_IDX 1
+#define regCP_MES_CNTL 0x2807
+#define regCP_MES_CNTL_BASE_IDX 1
+#define regCP_MES_PIPE_PRIORITY_CNTS 0x2808
+#define regCP_MES_PIPE_PRIORITY_CNTS_BASE_IDX 1
+#define regCP_MES_PIPE0_PRIORITY 0x2809
+#define regCP_MES_PIPE0_PRIORITY_BASE_IDX 1
+#define regCP_MES_PIPE1_PRIORITY 0x280a
+#define regCP_MES_PIPE1_PRIORITY_BASE_IDX 1
+#define regCP_MES_PIPE2_PRIORITY 0x280b
+#define regCP_MES_PIPE2_PRIORITY_BASE_IDX 1
+#define regCP_MES_PIPE3_PRIORITY 0x280c
+#define regCP_MES_PIPE3_PRIORITY_BASE_IDX 1
+#define regCP_MES_HEADER_DUMP 0x280d
+#define regCP_MES_HEADER_DUMP_BASE_IDX 1
+#define regCP_MES_MIE_LO 0x280e
+#define regCP_MES_MIE_LO_BASE_IDX 1
+#define regCP_MES_MIE_HI 0x280f
+#define regCP_MES_MIE_HI_BASE_IDX 1
+#define regCP_MES_INTERRUPT 0x2810
+#define regCP_MES_INTERRUPT_BASE_IDX 1
+#define regCP_MES_SCRATCH_INDEX 0x2811
+#define regCP_MES_SCRATCH_INDEX_BASE_IDX 1
+#define regCP_MES_SCRATCH_DATA 0x2812
+#define regCP_MES_SCRATCH_DATA_BASE_IDX 1
+#define regCP_MES_INSTR_PNTR 0x2813
+#define regCP_MES_INSTR_PNTR_BASE_IDX 1
+#define regCP_MES_MSCRATCH_HI 0x2814
+#define regCP_MES_MSCRATCH_HI_BASE_IDX 1
+#define regCP_MES_MSCRATCH_LO 0x2815
+#define regCP_MES_MSCRATCH_LO_BASE_IDX 1
+#define regCP_MES_MSTATUS_LO 0x2816
+#define regCP_MES_MSTATUS_LO_BASE_IDX 1
+#define regCP_MES_MSTATUS_HI 0x2817
+#define regCP_MES_MSTATUS_HI_BASE_IDX 1
+#define regCP_MES_MEPC_LO 0x2818
+#define regCP_MES_MEPC_LO_BASE_IDX 1
+#define regCP_MES_MEPC_HI 0x2819
+#define regCP_MES_MEPC_HI_BASE_IDX 1
+#define regCP_MES_MCAUSE_LO 0x281a
+#define regCP_MES_MCAUSE_LO_BASE_IDX 1
+#define regCP_MES_MCAUSE_HI 0x281b
+#define regCP_MES_MCAUSE_HI_BASE_IDX 1
+#define regCP_MES_MBADADDR_LO 0x281c
+#define regCP_MES_MBADADDR_LO_BASE_IDX 1
+#define regCP_MES_MBADADDR_HI 0x281d
+#define regCP_MES_MBADADDR_HI_BASE_IDX 1
+#define regCP_MES_MIP_LO 0x281e
+#define regCP_MES_MIP_LO_BASE_IDX 1
+#define regCP_MES_MIP_HI 0x281f
+#define regCP_MES_MIP_HI_BASE_IDX 1
+#define regCP_MES_IC_OP_CNTL 0x2820
+#define regCP_MES_IC_OP_CNTL_BASE_IDX 1
+#define regCP_MES_MCYCLE_LO 0x2826
+#define regCP_MES_MCYCLE_LO_BASE_IDX 1
+#define regCP_MES_MCYCLE_HI 0x2827
+#define regCP_MES_MCYCLE_HI_BASE_IDX 1
+#define regCP_MES_MTIME_LO 0x2828
+#define regCP_MES_MTIME_LO_BASE_IDX 1
+#define regCP_MES_MTIME_HI 0x2829
+#define regCP_MES_MTIME_HI_BASE_IDX 1
+#define regCP_MES_MINSTRET_LO 0x282a
+#define regCP_MES_MINSTRET_LO_BASE_IDX 1
+#define regCP_MES_MINSTRET_HI 0x282b
+#define regCP_MES_MINSTRET_HI_BASE_IDX 1
+#define regCP_MES_MISA_LO 0x282c
+#define regCP_MES_MISA_LO_BASE_IDX 1
+#define regCP_MES_MISA_HI 0x282d
+#define regCP_MES_MISA_HI_BASE_IDX 1
+#define regCP_MES_MVENDORID_LO 0x282e
+#define regCP_MES_MVENDORID_LO_BASE_IDX 1
+#define regCP_MES_MVENDORID_HI 0x282f
+#define regCP_MES_MVENDORID_HI_BASE_IDX 1
+#define regCP_MES_MARCHID_LO 0x2830
+#define regCP_MES_MARCHID_LO_BASE_IDX 1
+#define regCP_MES_MARCHID_HI 0x2831
+#define regCP_MES_MARCHID_HI_BASE_IDX 1
+#define regCP_MES_MIMPID_LO 0x2832
+#define regCP_MES_MIMPID_LO_BASE_IDX 1
+#define regCP_MES_MIMPID_HI 0x2833
+#define regCP_MES_MIMPID_HI_BASE_IDX 1
+#define regCP_MES_MHARTID_LO 0x2834
+#define regCP_MES_MHARTID_LO_BASE_IDX 1
+#define regCP_MES_MHARTID_HI 0x2835
+#define regCP_MES_MHARTID_HI_BASE_IDX 1
+#define regCP_MES_DC_BASE_CNTL 0x2836
+#define regCP_MES_DC_BASE_CNTL_BASE_IDX 1
+#define regCP_MES_DC_OP_CNTL 0x2837
+#define regCP_MES_DC_OP_CNTL_BASE_IDX 1
+#define regCP_MES_MTIMECMP_LO 0x2838
+#define regCP_MES_MTIMECMP_LO_BASE_IDX 1
+#define regCP_MES_MTIMECMP_HI 0x2839
+#define regCP_MES_MTIMECMP_HI_BASE_IDX 1
+#define regCP_MES_PROCESS_QUANTUM_PIPE0 0x283a
+#define regCP_MES_PROCESS_QUANTUM_PIPE0_BASE_IDX 1
+#define regCP_MES_PROCESS_QUANTUM_PIPE1 0x283b
+#define regCP_MES_PROCESS_QUANTUM_PIPE1_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL1 0x283c
+#define regCP_MES_DOORBELL_CONTROL1_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL2 0x283d
+#define regCP_MES_DOORBELL_CONTROL2_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL3 0x283e
+#define regCP_MES_DOORBELL_CONTROL3_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL4 0x283f
+#define regCP_MES_DOORBELL_CONTROL4_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL5 0x2840
+#define regCP_MES_DOORBELL_CONTROL5_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL6 0x2841
+#define regCP_MES_DOORBELL_CONTROL6_BASE_IDX 1
+#define regCP_MES_GP0_LO 0x2843
+#define regCP_MES_GP0_LO_BASE_IDX 1
+#define regCP_MES_GP0_HI 0x2844
+#define regCP_MES_GP0_HI_BASE_IDX 1
+#define regCP_MES_GP1_LO 0x2845
+#define regCP_MES_GP1_LO_BASE_IDX 1
+#define regCP_MES_GP1_HI 0x2846
+#define regCP_MES_GP1_HI_BASE_IDX 1
+#define regCP_MES_GP2_LO 0x2847
+#define regCP_MES_GP2_LO_BASE_IDX 1
+#define regCP_MES_GP2_HI 0x2848
+#define regCP_MES_GP2_HI_BASE_IDX 1
+#define regCP_MES_GP3_LO 0x2849
+#define regCP_MES_GP3_LO_BASE_IDX 1
+#define regCP_MES_GP3_HI 0x284a
+#define regCP_MES_GP3_HI_BASE_IDX 1
+#define regCP_MES_GP4_LO 0x284b
+#define regCP_MES_GP4_LO_BASE_IDX 1
+#define regCP_MES_GP4_HI 0x284c
+#define regCP_MES_GP4_HI_BASE_IDX 1
+#define regCP_MES_GP5_LO 0x284d
+#define regCP_MES_GP5_LO_BASE_IDX 1
+#define regCP_MES_GP5_HI 0x284e
+#define regCP_MES_GP5_HI_BASE_IDX 1
+#define regCP_MES_GP6_LO 0x284f
+#define regCP_MES_GP6_LO_BASE_IDX 1
+#define regCP_MES_GP6_HI 0x2850
+#define regCP_MES_GP6_HI_BASE_IDX 1
+#define regCP_MES_GP7_LO 0x2851
+#define regCP_MES_GP7_LO_BASE_IDX 1
+#define regCP_MES_GP7_HI 0x2852
+#define regCP_MES_GP7_HI_BASE_IDX 1
+#define regCP_MES_GP8_LO 0x2853
+#define regCP_MES_GP8_LO_BASE_IDX 1
+#define regCP_MES_GP8_HI 0x2854
+#define regCP_MES_GP8_HI_BASE_IDX 1
+#define regCP_MES_GP9_LO 0x2855
+#define regCP_MES_GP9_LO_BASE_IDX 1
+#define regCP_MES_GP9_HI 0x2856
+#define regCP_MES_GP9_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_BASE0_LO 0x2883
+#define regCP_MES_LOCAL_BASE0_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_BASE0_HI 0x2884
+#define regCP_MES_LOCAL_BASE0_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_MASK0_LO 0x2885
+#define regCP_MES_LOCAL_MASK0_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_MASK0_HI 0x2886
+#define regCP_MES_LOCAL_MASK0_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_APERTURE 0x2887
+#define regCP_MES_LOCAL_APERTURE_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_BASE_LO 0x2888
+#define regCP_MES_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_BASE_HI 0x2889
+#define regCP_MES_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_MASK_LO 0x288a
+#define regCP_MES_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_MASK_HI 0x288b
+#define regCP_MES_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_APERTURE 0x288c
+#define regCP_MES_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regCP_MES_LOCAL_SCRATCH_APERTURE 0x288d
+#define regCP_MES_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regCP_MES_LOCAL_SCRATCH_BASE_LO 0x288e
+#define regCP_MES_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_SCRATCH_BASE_HI 0x288f
+#define regCP_MES_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regCP_MES_PERFCOUNT_CNTL 0x2899
+#define regCP_MES_PERFCOUNT_CNTL_BASE_IDX 1
+#define regCP_MES_PENDING_INTERRUPT 0x289a
+#define regCP_MES_PENDING_INTERRUPT_BASE_IDX 1
+#define regCP_MES_PRGRM_CNTR_START_HI 0x289d
+#define regCP_MES_PRGRM_CNTR_START_HI_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_16 0x289f
+#define regCP_MES_INTERRUPT_DATA_16_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_17 0x28a0
+#define regCP_MES_INTERRUPT_DATA_17_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_18 0x28a1
+#define regCP_MES_INTERRUPT_DATA_18_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_19 0x28a2
+#define regCP_MES_INTERRUPT_DATA_19_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_20 0x28a3
+#define regCP_MES_INTERRUPT_DATA_20_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_21 0x28a4
+#define regCP_MES_INTERRUPT_DATA_21_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_22 0x28a5
+#define regCP_MES_INTERRUPT_DATA_22_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_23 0x28a6
+#define regCP_MES_INTERRUPT_DATA_23_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_24 0x28a7
+#define regCP_MES_INTERRUPT_DATA_24_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_25 0x28a8
+#define regCP_MES_INTERRUPT_DATA_25_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_26 0x28a9
+#define regCP_MES_INTERRUPT_DATA_26_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_27 0x28aa
+#define regCP_MES_INTERRUPT_DATA_27_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_28 0x28ab
+#define regCP_MES_INTERRUPT_DATA_28_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_29 0x28ac
+#define regCP_MES_INTERRUPT_DATA_29_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_30 0x28ad
+#define regCP_MES_INTERRUPT_DATA_30_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_31 0x28ae
+#define regCP_MES_INTERRUPT_DATA_31_BASE_IDX 1
+#define regCP_MES_DC_APERTURE0_BASE 0x28af
+#define regCP_MES_DC_APERTURE0_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE0_MASK 0x28b0
+#define regCP_MES_DC_APERTURE0_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE0_CNTL 0x28b1
+#define regCP_MES_DC_APERTURE0_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE1_BASE 0x28b2
+#define regCP_MES_DC_APERTURE1_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE1_MASK 0x28b3
+#define regCP_MES_DC_APERTURE1_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE1_CNTL 0x28b4
+#define regCP_MES_DC_APERTURE1_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE2_BASE 0x28b5
+#define regCP_MES_DC_APERTURE2_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE2_MASK 0x28b6
+#define regCP_MES_DC_APERTURE2_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE2_CNTL 0x28b7
+#define regCP_MES_DC_APERTURE2_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE3_BASE 0x28b8
+#define regCP_MES_DC_APERTURE3_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE3_MASK 0x28b9
+#define regCP_MES_DC_APERTURE3_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE3_CNTL 0x28ba
+#define regCP_MES_DC_APERTURE3_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE4_BASE 0x28bb
+#define regCP_MES_DC_APERTURE4_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE4_MASK 0x28bc
+#define regCP_MES_DC_APERTURE4_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE4_CNTL 0x28bd
+#define regCP_MES_DC_APERTURE4_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE5_BASE 0x28be
+#define regCP_MES_DC_APERTURE5_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE5_MASK 0x28bf
+#define regCP_MES_DC_APERTURE5_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE5_CNTL 0x28c0
+#define regCP_MES_DC_APERTURE5_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE6_BASE 0x28c1
+#define regCP_MES_DC_APERTURE6_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE6_MASK 0x28c2
+#define regCP_MES_DC_APERTURE6_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE6_CNTL 0x28c3
+#define regCP_MES_DC_APERTURE6_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE7_BASE 0x28c4
+#define regCP_MES_DC_APERTURE7_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE7_MASK 0x28c5
+#define regCP_MES_DC_APERTURE7_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE7_CNTL 0x28c6
+#define regCP_MES_DC_APERTURE7_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE8_BASE 0x28c7
+#define regCP_MES_DC_APERTURE8_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE8_MASK 0x28c8
+#define regCP_MES_DC_APERTURE8_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE8_CNTL 0x28c9
+#define regCP_MES_DC_APERTURE8_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE9_BASE 0x28ca
+#define regCP_MES_DC_APERTURE9_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE9_MASK 0x28cb
+#define regCP_MES_DC_APERTURE9_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE9_CNTL 0x28cc
+#define regCP_MES_DC_APERTURE9_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE10_BASE 0x28cd
+#define regCP_MES_DC_APERTURE10_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE10_MASK 0x28ce
+#define regCP_MES_DC_APERTURE10_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE10_CNTL 0x28cf
+#define regCP_MES_DC_APERTURE10_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE11_BASE 0x28d0
+#define regCP_MES_DC_APERTURE11_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE11_MASK 0x28d1
+#define regCP_MES_DC_APERTURE11_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE11_CNTL 0x28d2
+#define regCP_MES_DC_APERTURE11_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE12_BASE 0x28d3
+#define regCP_MES_DC_APERTURE12_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE12_MASK 0x28d4
+#define regCP_MES_DC_APERTURE12_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE12_CNTL 0x28d5
+#define regCP_MES_DC_APERTURE12_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE13_BASE 0x28d6
+#define regCP_MES_DC_APERTURE13_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE13_MASK 0x28d7
+#define regCP_MES_DC_APERTURE13_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE13_CNTL 0x28d8
+#define regCP_MES_DC_APERTURE13_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE14_BASE 0x28d9
+#define regCP_MES_DC_APERTURE14_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE14_MASK 0x28da
+#define regCP_MES_DC_APERTURE14_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE14_CNTL 0x28db
+#define regCP_MES_DC_APERTURE14_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE15_BASE 0x28dc
+#define regCP_MES_DC_APERTURE15_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE15_MASK 0x28dd
+#define regCP_MES_DC_APERTURE15_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE15_CNTL 0x28de
+#define regCP_MES_DC_APERTURE15_CNTL_BASE_IDX 1
+#define regCP_MEC_RS64_PRGRM_CNTR_START 0x2900
+#define regCP_MEC_RS64_PRGRM_CNTR_START_BASE_IDX 1
+#define regCP_MEC_MTVEC_LO 0x2901
+#define regCP_MEC_MTVEC_LO_BASE_IDX 1
+#define regCP_MEC_MTVEC_HI 0x2902
+#define regCP_MEC_MTVEC_HI_BASE_IDX 1
+#define regCP_MEC_ISA_CNTL 0x2903
+#define regCP_MEC_ISA_CNTL_BASE_IDX 1
+#define regCP_MEC_RS64_CNTL 0x2904
+#define regCP_MEC_RS64_CNTL_BASE_IDX 1
+#define regCP_MEC_MIE_LO 0x2905
+#define regCP_MEC_MIE_LO_BASE_IDX 1
+#define regCP_MEC_MIE_HI 0x2906
+#define regCP_MEC_MIE_HI_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT 0x2907
+#define regCP_MEC_RS64_INTERRUPT_BASE_IDX 1
+#define regCP_MEC_RS64_INSTR_PNTR 0x2908
+#define regCP_MEC_RS64_INSTR_PNTR_BASE_IDX 1
+#define regCP_MEC_MIP_LO 0x2909
+#define regCP_MEC_MIP_LO_BASE_IDX 1
+#define regCP_MEC_MIP_HI 0x290a
+#define regCP_MEC_MIP_HI_BASE_IDX 1
+#define regCP_MEC_DC_BASE_CNTL 0x290b
+#define regCP_MEC_DC_BASE_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_OP_CNTL 0x290c
+#define regCP_MEC_DC_OP_CNTL_BASE_IDX 1
+#define regCP_MEC_MTIMECMP_LO 0x290d
+#define regCP_MEC_MTIMECMP_LO_BASE_IDX 1
+#define regCP_MEC_MTIMECMP_HI 0x290e
+#define regCP_MEC_MTIMECMP_HI_BASE_IDX 1
+#define regCP_MEC_GP0_LO 0x2910
+#define regCP_MEC_GP0_LO_BASE_IDX 1
+#define regCP_MEC_GP0_HI 0x2911
+#define regCP_MEC_GP0_HI_BASE_IDX 1
+#define regCP_MEC_GP1_LO 0x2912
+#define regCP_MEC_GP1_LO_BASE_IDX 1
+#define regCP_MEC_GP1_HI 0x2913
+#define regCP_MEC_GP1_HI_BASE_IDX 1
+#define regCP_MEC_GP2_LO 0x2914
+#define regCP_MEC_GP2_LO_BASE_IDX 1
+#define regCP_MEC_GP2_HI 0x2915
+#define regCP_MEC_GP2_HI_BASE_IDX 1
+#define regCP_MEC_GP3_LO 0x2916
+#define regCP_MEC_GP3_LO_BASE_IDX 1
+#define regCP_MEC_GP3_HI 0x2917
+#define regCP_MEC_GP3_HI_BASE_IDX 1
+#define regCP_MEC_GP4_LO 0x2918
+#define regCP_MEC_GP4_LO_BASE_IDX 1
+#define regCP_MEC_GP4_HI 0x2919
+#define regCP_MEC_GP4_HI_BASE_IDX 1
+#define regCP_MEC_GP5_LO 0x291a
+#define regCP_MEC_GP5_LO_BASE_IDX 1
+#define regCP_MEC_GP5_HI 0x291b
+#define regCP_MEC_GP5_HI_BASE_IDX 1
+#define regCP_MEC_GP6_LO 0x291c
+#define regCP_MEC_GP6_LO_BASE_IDX 1
+#define regCP_MEC_GP6_HI 0x291d
+#define regCP_MEC_GP6_HI_BASE_IDX 1
+#define regCP_MEC_GP7_LO 0x291e
+#define regCP_MEC_GP7_LO_BASE_IDX 1
+#define regCP_MEC_GP7_HI 0x291f
+#define regCP_MEC_GP7_HI_BASE_IDX 1
+#define regCP_MEC_GP8_LO 0x2920
+#define regCP_MEC_GP8_LO_BASE_IDX 1
+#define regCP_MEC_GP8_HI 0x2921
+#define regCP_MEC_GP8_HI_BASE_IDX 1
+#define regCP_MEC_GP9_LO 0x2922
+#define regCP_MEC_GP9_LO_BASE_IDX 1
+#define regCP_MEC_GP9_HI 0x2923
+#define regCP_MEC_GP9_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_BASE0_LO 0x2927
+#define regCP_MEC_LOCAL_BASE0_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_BASE0_HI 0x2928
+#define regCP_MEC_LOCAL_BASE0_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_MASK0_LO 0x2929
+#define regCP_MEC_LOCAL_MASK0_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_MASK0_HI 0x292a
+#define regCP_MEC_LOCAL_MASK0_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_APERTURE 0x292b
+#define regCP_MEC_LOCAL_APERTURE_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_BASE_LO 0x292c
+#define regCP_MEC_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_BASE_HI 0x292d
+#define regCP_MEC_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_MASK_LO 0x292e
+#define regCP_MEC_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_MASK_HI 0x292f
+#define regCP_MEC_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_APERTURE 0x2930
+#define regCP_MEC_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regCP_MEC_LOCAL_SCRATCH_APERTURE 0x2931
+#define regCP_MEC_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regCP_MEC_LOCAL_SCRATCH_BASE_LO 0x2932
+#define regCP_MEC_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_SCRATCH_BASE_HI 0x2933
+#define regCP_MEC_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regCP_MEC_RS64_PERFCOUNT_CNTL 0x2934
+#define regCP_MEC_RS64_PERFCOUNT_CNTL_BASE_IDX 1
+#define regCP_MEC_RS64_PENDING_INTERRUPT 0x2935
+#define regCP_MEC_RS64_PENDING_INTERRUPT_BASE_IDX 1
+#define regCP_MEC_RS64_PRGRM_CNTR_START_HI 0x2938
+#define regCP_MEC_RS64_PRGRM_CNTR_START_HI_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_16 0x293a
+#define regCP_MEC_RS64_INTERRUPT_DATA_16_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_17 0x293b
+#define regCP_MEC_RS64_INTERRUPT_DATA_17_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_18 0x293c
+#define regCP_MEC_RS64_INTERRUPT_DATA_18_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_19 0x293d
+#define regCP_MEC_RS64_INTERRUPT_DATA_19_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_20 0x293e
+#define regCP_MEC_RS64_INTERRUPT_DATA_20_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_21 0x293f
+#define regCP_MEC_RS64_INTERRUPT_DATA_21_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_22 0x2940
+#define regCP_MEC_RS64_INTERRUPT_DATA_22_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_23 0x2941
+#define regCP_MEC_RS64_INTERRUPT_DATA_23_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_24 0x2942
+#define regCP_MEC_RS64_INTERRUPT_DATA_24_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_25 0x2943
+#define regCP_MEC_RS64_INTERRUPT_DATA_25_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_26 0x2944
+#define regCP_MEC_RS64_INTERRUPT_DATA_26_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_27 0x2945
+#define regCP_MEC_RS64_INTERRUPT_DATA_27_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_28 0x2946
+#define regCP_MEC_RS64_INTERRUPT_DATA_28_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_29 0x2947
+#define regCP_MEC_RS64_INTERRUPT_DATA_29_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_30 0x2948
+#define regCP_MEC_RS64_INTERRUPT_DATA_30_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_31 0x2949
+#define regCP_MEC_RS64_INTERRUPT_DATA_31_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE0_BASE 0x294a
+#define regCP_MEC_DC_APERTURE0_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE0_MASK 0x294b
+#define regCP_MEC_DC_APERTURE0_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE0_CNTL 0x294c
+#define regCP_MEC_DC_APERTURE0_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE1_BASE 0x294d
+#define regCP_MEC_DC_APERTURE1_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE1_MASK 0x294e
+#define regCP_MEC_DC_APERTURE1_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE1_CNTL 0x294f
+#define regCP_MEC_DC_APERTURE1_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE2_BASE 0x2950
+#define regCP_MEC_DC_APERTURE2_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE2_MASK 0x2951
+#define regCP_MEC_DC_APERTURE2_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE2_CNTL 0x2952
+#define regCP_MEC_DC_APERTURE2_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE3_BASE 0x2953
+#define regCP_MEC_DC_APERTURE3_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE3_MASK 0x2954
+#define regCP_MEC_DC_APERTURE3_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE3_CNTL 0x2955
+#define regCP_MEC_DC_APERTURE3_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE4_BASE 0x2956
+#define regCP_MEC_DC_APERTURE4_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE4_MASK 0x2957
+#define regCP_MEC_DC_APERTURE4_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE4_CNTL 0x2958
+#define regCP_MEC_DC_APERTURE4_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE5_BASE 0x2959
+#define regCP_MEC_DC_APERTURE5_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE5_MASK 0x295a
+#define regCP_MEC_DC_APERTURE5_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE5_CNTL 0x295b
+#define regCP_MEC_DC_APERTURE5_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE6_BASE 0x295c
+#define regCP_MEC_DC_APERTURE6_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE6_MASK 0x295d
+#define regCP_MEC_DC_APERTURE6_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE6_CNTL 0x295e
+#define regCP_MEC_DC_APERTURE6_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE7_BASE 0x295f
+#define regCP_MEC_DC_APERTURE7_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE7_MASK 0x2960
+#define regCP_MEC_DC_APERTURE7_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE7_CNTL 0x2961
+#define regCP_MEC_DC_APERTURE7_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE8_BASE 0x2962
+#define regCP_MEC_DC_APERTURE8_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE8_MASK 0x2963
+#define regCP_MEC_DC_APERTURE8_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE8_CNTL 0x2964
+#define regCP_MEC_DC_APERTURE8_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE9_BASE 0x2965
+#define regCP_MEC_DC_APERTURE9_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE9_MASK 0x2966
+#define regCP_MEC_DC_APERTURE9_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE9_CNTL 0x2967
+#define regCP_MEC_DC_APERTURE9_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE10_BASE 0x2968
+#define regCP_MEC_DC_APERTURE10_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE10_MASK 0x2969
+#define regCP_MEC_DC_APERTURE10_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE10_CNTL 0x296a
+#define regCP_MEC_DC_APERTURE10_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE11_BASE 0x296b
+#define regCP_MEC_DC_APERTURE11_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE11_MASK 0x296c
+#define regCP_MEC_DC_APERTURE11_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE11_CNTL 0x296d
+#define regCP_MEC_DC_APERTURE11_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE12_BASE 0x296e
+#define regCP_MEC_DC_APERTURE12_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE12_MASK 0x296f
+#define regCP_MEC_DC_APERTURE12_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE12_CNTL 0x2970
+#define regCP_MEC_DC_APERTURE12_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE13_BASE 0x2971
+#define regCP_MEC_DC_APERTURE13_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE13_MASK 0x2972
+#define regCP_MEC_DC_APERTURE13_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE13_CNTL 0x2973
+#define regCP_MEC_DC_APERTURE13_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE14_BASE 0x2974
+#define regCP_MEC_DC_APERTURE14_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE14_MASK 0x2975
+#define regCP_MEC_DC_APERTURE14_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE14_CNTL 0x2976
+#define regCP_MEC_DC_APERTURE14_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE15_BASE 0x2977
+#define regCP_MEC_DC_APERTURE15_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE15_MASK 0x2978
+#define regCP_MEC_DC_APERTURE15_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE15_CNTL 0x2979
+#define regCP_MEC_DC_APERTURE15_CNTL_BASE_IDX 1
+#define regCP_CPC_IC_OP_CNTL 0x297a
+#define regCP_CPC_IC_OP_CNTL_BASE_IDX 1
+#define regCP_GFX_CNTL 0x2a00
+#define regCP_GFX_CNTL_BASE_IDX 1
+#define regCP_GFX_RS64_INTERRUPT0 0x2a01
+#define regCP_GFX_RS64_INTERRUPT0_BASE_IDX 1
+#define regCP_GFX_RS64_INTR_EN0 0x2a02
+#define regCP_GFX_RS64_INTR_EN0_BASE_IDX 1
+#define regCP_GFX_RS64_INTR_EN1 0x2a03
+#define regCP_GFX_RS64_INTR_EN1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE_CNTL 0x2a08
+#define regCP_GFX_RS64_DC_BASE_CNTL_BASE_IDX 1
+#define regCP_GFX_RS64_DC_OP_CNTL 0x2a09
+#define regCP_GFX_RS64_DC_OP_CNTL_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_BASE0_LO 0x2a0a
+#define regCP_GFX_RS64_LOCAL_BASE0_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_BASE0_HI 0x2a0b
+#define regCP_GFX_RS64_LOCAL_BASE0_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_MASK0_LO 0x2a0c
+#define regCP_GFX_RS64_LOCAL_MASK0_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_MASK0_HI 0x2a0d
+#define regCP_GFX_RS64_LOCAL_MASK0_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_APERTURE 0x2a0e
+#define regCP_GFX_RS64_LOCAL_APERTURE_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_LO 0x2a0f
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_HI 0x2a10
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_LO 0x2a11
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_HI 0x2a12
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_APERTURE 0x2a13
+#define regCP_GFX_RS64_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_SCRATCH_APERTURE 0x2a14
+#define regCP_GFX_RS64_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_LO 0x2a15
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_HI 0x2a16
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regCP_GFX_RS64_PERFCOUNT_CNTL0 0x2a1a
+#define regCP_GFX_RS64_PERFCOUNT_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_PERFCOUNT_CNTL1 0x2a1b
+#define regCP_GFX_RS64_PERFCOUNT_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_LO0 0x2a1c
+#define regCP_GFX_RS64_MIP_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_LO1 0x2a1d
+#define regCP_GFX_RS64_MIP_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_HI0 0x2a1e
+#define regCP_GFX_RS64_MIP_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_HI1 0x2a1f
+#define regCP_GFX_RS64_MIP_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_LO0 0x2a20
+#define regCP_GFX_RS64_MTIMECMP_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_LO1 0x2a21
+#define regCP_GFX_RS64_MTIMECMP_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_HI0 0x2a22
+#define regCP_GFX_RS64_MTIMECMP_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_HI1 0x2a23
+#define regCP_GFX_RS64_MTIMECMP_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_LO0 0x2a24
+#define regCP_GFX_RS64_GP0_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_LO1 0x2a25
+#define regCP_GFX_RS64_GP0_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_HI0 0x2a26
+#define regCP_GFX_RS64_GP0_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_HI1 0x2a27
+#define regCP_GFX_RS64_GP0_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_LO0 0x2a28
+#define regCP_GFX_RS64_GP1_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_LO1 0x2a29
+#define regCP_GFX_RS64_GP1_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_HI0 0x2a2a
+#define regCP_GFX_RS64_GP1_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_HI1 0x2a2b
+#define regCP_GFX_RS64_GP1_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_LO0 0x2a2c
+#define regCP_GFX_RS64_GP2_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_LO1 0x2a2d
+#define regCP_GFX_RS64_GP2_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_HI0 0x2a2e
+#define regCP_GFX_RS64_GP2_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_HI1 0x2a2f
+#define regCP_GFX_RS64_GP2_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_LO0 0x2a30
+#define regCP_GFX_RS64_GP3_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_LO1 0x2a31
+#define regCP_GFX_RS64_GP3_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_HI0 0x2a32
+#define regCP_GFX_RS64_GP3_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_HI1 0x2a33
+#define regCP_GFX_RS64_GP3_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_LO0 0x2a34
+#define regCP_GFX_RS64_GP4_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_LO1 0x2a35
+#define regCP_GFX_RS64_GP4_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_HI0 0x2a36
+#define regCP_GFX_RS64_GP4_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_HI1 0x2a37
+#define regCP_GFX_RS64_GP4_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_LO0 0x2a38
+#define regCP_GFX_RS64_GP5_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_LO1 0x2a39
+#define regCP_GFX_RS64_GP5_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_HI0 0x2a3a
+#define regCP_GFX_RS64_GP5_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_HI1 0x2a3b
+#define regCP_GFX_RS64_GP5_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP6_LO 0x2a3c
+#define regCP_GFX_RS64_GP6_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP6_HI 0x2a3d
+#define regCP_GFX_RS64_GP6_HI_BASE_IDX 1
+#define regCP_GFX_RS64_GP7_LO 0x2a3e
+#define regCP_GFX_RS64_GP7_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP7_HI 0x2a3f
+#define regCP_GFX_RS64_GP7_HI_BASE_IDX 1
+#define regCP_GFX_RS64_GP8_LO 0x2a40
+#define regCP_GFX_RS64_GP8_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP8_HI 0x2a41
+#define regCP_GFX_RS64_GP8_HI_BASE_IDX 1
+#define regCP_GFX_RS64_GP9_LO 0x2a42
+#define regCP_GFX_RS64_GP9_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP9_HI 0x2a43
+#define regCP_GFX_RS64_GP9_HI_BASE_IDX 1
+#define regCP_GFX_RS64_INSTR_PNTR0 0x2a44
+#define regCP_GFX_RS64_INSTR_PNTR0_BASE_IDX 1
+#define regCP_GFX_RS64_INSTR_PNTR1 0x2a45
+#define regCP_GFX_RS64_INSTR_PNTR1_BASE_IDX 1
+#define regCP_GFX_RS64_PENDING_INTERRUPT0 0x2a46
+#define regCP_GFX_RS64_PENDING_INTERRUPT0_BASE_IDX 1
+#define regCP_GFX_RS64_PENDING_INTERRUPT1 0x2a47
+#define regCP_GFX_RS64_PENDING_INTERRUPT1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_BASE0 0x2a49
+#define regCP_GFX_RS64_DC_APERTURE0_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_MASK0 0x2a4a
+#define regCP_GFX_RS64_DC_APERTURE0_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL0 0x2a4b
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_BASE0 0x2a4c
+#define regCP_GFX_RS64_DC_APERTURE1_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_MASK0 0x2a4d
+#define regCP_GFX_RS64_DC_APERTURE1_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL0 0x2a4e
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_BASE0 0x2a4f
+#define regCP_GFX_RS64_DC_APERTURE2_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_MASK0 0x2a50
+#define regCP_GFX_RS64_DC_APERTURE2_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL0 0x2a51
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_BASE0 0x2a52
+#define regCP_GFX_RS64_DC_APERTURE3_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_MASK0 0x2a53
+#define regCP_GFX_RS64_DC_APERTURE3_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL0 0x2a54
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_BASE0 0x2a55
+#define regCP_GFX_RS64_DC_APERTURE4_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_MASK0 0x2a56
+#define regCP_GFX_RS64_DC_APERTURE4_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL0 0x2a57
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_BASE0 0x2a58
+#define regCP_GFX_RS64_DC_APERTURE5_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_MASK0 0x2a59
+#define regCP_GFX_RS64_DC_APERTURE5_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL0 0x2a5a
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_BASE0 0x2a5b
+#define regCP_GFX_RS64_DC_APERTURE6_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_MASK0 0x2a5c
+#define regCP_GFX_RS64_DC_APERTURE6_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL0 0x2a5d
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_BASE0 0x2a5e
+#define regCP_GFX_RS64_DC_APERTURE7_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_MASK0 0x2a5f
+#define regCP_GFX_RS64_DC_APERTURE7_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL0 0x2a60
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_BASE0 0x2a61
+#define regCP_GFX_RS64_DC_APERTURE8_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_MASK0 0x2a62
+#define regCP_GFX_RS64_DC_APERTURE8_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL0 0x2a63
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_BASE0 0x2a64
+#define regCP_GFX_RS64_DC_APERTURE9_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_MASK0 0x2a65
+#define regCP_GFX_RS64_DC_APERTURE9_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL0 0x2a66
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_BASE0 0x2a67
+#define regCP_GFX_RS64_DC_APERTURE10_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_MASK0 0x2a68
+#define regCP_GFX_RS64_DC_APERTURE10_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL0 0x2a69
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_BASE0 0x2a6a
+#define regCP_GFX_RS64_DC_APERTURE11_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_MASK0 0x2a6b
+#define regCP_GFX_RS64_DC_APERTURE11_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL0 0x2a6c
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_BASE0 0x2a6d
+#define regCP_GFX_RS64_DC_APERTURE12_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_MASK0 0x2a6e
+#define regCP_GFX_RS64_DC_APERTURE12_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL0 0x2a6f
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_BASE0 0x2a70
+#define regCP_GFX_RS64_DC_APERTURE13_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_MASK0 0x2a71
+#define regCP_GFX_RS64_DC_APERTURE13_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL0 0x2a72
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_BASE0 0x2a73
+#define regCP_GFX_RS64_DC_APERTURE14_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_MASK0 0x2a74
+#define regCP_GFX_RS64_DC_APERTURE14_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL0 0x2a75
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_BASE0 0x2a76
+#define regCP_GFX_RS64_DC_APERTURE15_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_MASK0 0x2a77
+#define regCP_GFX_RS64_DC_APERTURE15_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL0 0x2a78
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_BASE1 0x2a79
+#define regCP_GFX_RS64_DC_APERTURE0_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_MASK1 0x2a7a
+#define regCP_GFX_RS64_DC_APERTURE0_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL1 0x2a7b
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_BASE1 0x2a7c
+#define regCP_GFX_RS64_DC_APERTURE1_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_MASK1 0x2a7d
+#define regCP_GFX_RS64_DC_APERTURE1_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL1 0x2a7e
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_BASE1 0x2a7f
+#define regCP_GFX_RS64_DC_APERTURE2_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_MASK1 0x2a80
+#define regCP_GFX_RS64_DC_APERTURE2_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL1 0x2a81
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_BASE1 0x2a82
+#define regCP_GFX_RS64_DC_APERTURE3_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_MASK1 0x2a83
+#define regCP_GFX_RS64_DC_APERTURE3_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL1 0x2a84
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_BASE1 0x2a85
+#define regCP_GFX_RS64_DC_APERTURE4_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_MASK1 0x2a86
+#define regCP_GFX_RS64_DC_APERTURE4_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL1 0x2a87
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_BASE1 0x2a88
+#define regCP_GFX_RS64_DC_APERTURE5_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_MASK1 0x2a89
+#define regCP_GFX_RS64_DC_APERTURE5_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL1 0x2a8a
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_BASE1 0x2a8b
+#define regCP_GFX_RS64_DC_APERTURE6_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_MASK1 0x2a8c
+#define regCP_GFX_RS64_DC_APERTURE6_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL1 0x2a8d
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_BASE1 0x2a8e
+#define regCP_GFX_RS64_DC_APERTURE7_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_MASK1 0x2a8f
+#define regCP_GFX_RS64_DC_APERTURE7_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL1 0x2a90
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_BASE1 0x2a91
+#define regCP_GFX_RS64_DC_APERTURE8_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_MASK1 0x2a92
+#define regCP_GFX_RS64_DC_APERTURE8_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL1 0x2a93
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_BASE1 0x2a94
+#define regCP_GFX_RS64_DC_APERTURE9_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_MASK1 0x2a95
+#define regCP_GFX_RS64_DC_APERTURE9_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL1 0x2a96
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_BASE1 0x2a97
+#define regCP_GFX_RS64_DC_APERTURE10_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_MASK1 0x2a98
+#define regCP_GFX_RS64_DC_APERTURE10_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL1 0x2a99
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_BASE1 0x2a9a
+#define regCP_GFX_RS64_DC_APERTURE11_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_MASK1 0x2a9b
+#define regCP_GFX_RS64_DC_APERTURE11_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL1 0x2a9c
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_BASE1 0x2a9d
+#define regCP_GFX_RS64_DC_APERTURE12_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_MASK1 0x2a9e
+#define regCP_GFX_RS64_DC_APERTURE12_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL1 0x2a9f
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_BASE1 0x2aa0
+#define regCP_GFX_RS64_DC_APERTURE13_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_MASK1 0x2aa1
+#define regCP_GFX_RS64_DC_APERTURE13_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL1 0x2aa2
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_BASE1 0x2aa3
+#define regCP_GFX_RS64_DC_APERTURE14_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_MASK1 0x2aa4
+#define regCP_GFX_RS64_DC_APERTURE14_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL1 0x2aa5
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_BASE1 0x2aa6
+#define regCP_GFX_RS64_DC_APERTURE15_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_MASK1 0x2aa7
+#define regCP_GFX_RS64_DC_APERTURE15_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL1 0x2aa8
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_INTERRUPT1 0x2aac
+#define regCP_GFX_RS64_INTERRUPT1_BASE_IDX 1
+
+
+// addressBlock: gc_gl1dec
+// base address: 0x33400
+#define regGL1_ARB_CTRL 0x2d00
+#define regGL1_ARB_CTRL_BASE_IDX 1
+#define regGL1_DRAM_BURST_MASK 0x2d02
+#define regGL1_DRAM_BURST_MASK_BASE_IDX 1
+#define regGL1_ARB_STATUS 0x2d03
+#define regGL1_ARB_STATUS_BASE_IDX 1
+#define regGL1_DRAM_BURST_CTRL 0x2d04
+#define regGL1_DRAM_BURST_CTRL_BASE_IDX 1
+#define regGL1I_GL1R_REP_FGCG_OVERRIDE 0x2d05
+#define regGL1I_GL1R_REP_FGCG_OVERRIDE_BASE_IDX 1
+#define regGL1C_CTRL 0x2d40
+#define regGL1C_CTRL_BASE_IDX 1
+#define regGL1C_STATUS 0x2d41
+#define regGL1C_STATUS_BASE_IDX 1
+#define regGL1C_UTCL0_CNTL2 0x2d43
+#define regGL1C_UTCL0_CNTL2_BASE_IDX 1
+#define regGL1C_UTCL0_STATUS 0x2d44
+#define regGL1C_UTCL0_STATUS_BASE_IDX 1
+#define regGL1C_UTCL0_RETRY 0x2d45
+#define regGL1C_UTCL0_RETRY_BASE_IDX 1
+#define regGL1C_CTRL2 0x2d46
+#define regGL1C_CTRL2_BASE_IDX 1
+
+
+// addressBlock: gc_chdec
+// base address: 0x33600
+#define regCH_ARB_CTRL 0x2d80
+#define regCH_ARB_CTRL_BASE_IDX 1
+#define regCH_DRAM_BURST_MASK 0x2d82
+#define regCH_DRAM_BURST_MASK_BASE_IDX 1
+#define regCH_ARB_STATUS 0x2d83
+#define regCH_ARB_STATUS_BASE_IDX 1
+#define regCH_DRAM_BURST_CTRL 0x2d84
+#define regCH_DRAM_BURST_CTRL_BASE_IDX 1
+#define regCHA_CHC_CREDITS 0x2d88
+#define regCHA_CHC_CREDITS_BASE_IDX 1
+#define regCHA_CLIENT_FREE_DELAY 0x2d89
+#define regCHA_CLIENT_FREE_DELAY_BASE_IDX 1
+#define regCHI_CHR_REP_FGCG_OVERRIDE 0x2d8c
+#define regCHI_CHR_REP_FGCG_OVERRIDE_BASE_IDX 1
+#define regCH_VC5_ENABLE 0x2d94
+#define regCH_VC5_ENABLE_BASE_IDX 1
+#define regCHC_CTRL 0x2dc0
+#define regCHC_CTRL_BASE_IDX 1
+#define regCHC_STATUS 0x2dc1
+#define regCHC_STATUS_BASE_IDX 1
+#define regCHCG_CTRL 0x2dc2
+#define regCHCG_CTRL_BASE_IDX 1
+#define regCHCG_STATUS 0x2dc3
+#define regCHCG_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_gl2dec
+// base address: 0x33800
+#define regGL2C_CTRL 0x2e00
+#define regGL2C_CTRL_BASE_IDX 1
+#define regGL2C_CTRL2 0x2e01
+#define regGL2C_CTRL2_BASE_IDX 1
+#define regGL2C_STATUS 0x2e02
+#define regGL2C_STATUS_BASE_IDX 1
+#define regGL2C_ADDR_MATCH_MASK 0x2e03
+#define regGL2C_ADDR_MATCH_MASK_BASE_IDX 1
+#define regGL2C_ADDR_MATCH_SIZE 0x2e04
+#define regGL2C_ADDR_MATCH_SIZE_BASE_IDX 1
+#define regGL2C_WBINVL2 0x2e05
+#define regGL2C_WBINVL2_BASE_IDX 1
+#define regGL2C_SOFT_RESET 0x2e06
+#define regGL2C_SOFT_RESET_BASE_IDX 1
+#define regGL2C_CM_CTRL0 0x2e07
+#define regGL2C_CM_CTRL0_BASE_IDX 1
+#define regGL2C_CM_CTRL1 0x2e08
+#define regGL2C_CM_CTRL1_BASE_IDX 1
+#define regGL2C_CM_STALL 0x2e09
+#define regGL2C_CM_STALL_BASE_IDX 1
+#define regGL2C_CM_CTRL2 0x2e0b
+#define regGL2C_CM_CTRL2_BASE_IDX 1
+#define regGL2C_CTRL3 0x2e0c
+#define regGL2C_CTRL3_BASE_IDX 1
+#define regGL2C_LB_CTR_CTRL 0x2e0d
+#define regGL2C_LB_CTR_CTRL_BASE_IDX 1
+#define regGL2C_LB_DATA0 0x2e0e
+#define regGL2C_LB_DATA0_BASE_IDX 1
+#define regGL2C_LB_DATA1 0x2e0f
+#define regGL2C_LB_DATA1_BASE_IDX 1
+#define regGL2C_LB_DATA2 0x2e10
+#define regGL2C_LB_DATA2_BASE_IDX 1
+#define regGL2C_LB_DATA3 0x2e11
+#define regGL2C_LB_DATA3_BASE_IDX 1
+#define regGL2C_LB_CTR_SEL0 0x2e12
+#define regGL2C_LB_CTR_SEL0_BASE_IDX 1
+#define regGL2C_LB_CTR_SEL1 0x2e13
+#define regGL2C_LB_CTR_SEL1_BASE_IDX 1
+#define regGL2C_CTRL4 0x2e17
+#define regGL2C_CTRL4_BASE_IDX 1
+#define regGL2C_DISCARD_STALL_CTRL 0x2e18
+#define regGL2C_DISCARD_STALL_CTRL_BASE_IDX 1
+#define regGL2A_ADDR_MATCH_CTRL 0x2e20
+#define regGL2A_ADDR_MATCH_CTRL_BASE_IDX 1
+#define regGL2A_ADDR_MATCH_MASK 0x2e21
+#define regGL2A_ADDR_MATCH_MASK_BASE_IDX 1
+#define regGL2A_ADDR_MATCH_SIZE 0x2e22
+#define regGL2A_ADDR_MATCH_SIZE_BASE_IDX 1
+#define regGL2A_PRIORITY_CTRL 0x2e23
+#define regGL2A_PRIORITY_CTRL_BASE_IDX 1
+#define regGL2A_CTRL 0x2e24
+#define regGL2A_CTRL_BASE_IDX 1
+#define regGL2A_RESP_THROTTLE_CTRL 0x2e2a
+#define regGL2A_RESP_THROTTLE_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_gl1hdec
+// base address: 0x33900
+#define regGL1H_ARB_CTRL 0x2e40
+#define regGL1H_ARB_CTRL_BASE_IDX 1
+#define regGL1H_GL1_CREDITS 0x2e41
+#define regGL1H_GL1_CREDITS_BASE_IDX 1
+#define regGL1H_BURST_MASK 0x2e42
+#define regGL1H_BURST_MASK_BASE_IDX 1
+#define regGL1H_BURST_CTRL 0x2e43
+#define regGL1H_BURST_CTRL_BASE_IDX 1
+#define regGL1H_ARB_STATUS 0x2e44
+#define regGL1H_ARB_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_perfddec
+// base address: 0x34000
+#define regCPG_PERFCOUNTER1_LO 0x3000
+#define regCPG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPG_PERFCOUNTER1_HI 0x3001
+#define regCPG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_LO 0x3002
+#define regCPG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_HI 0x3003
+#define regCPG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_LO 0x3004
+#define regCPC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_HI 0x3005
+#define regCPC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_LO 0x3006
+#define regCPC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_HI 0x3007
+#define regCPC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_LO 0x3008
+#define regCPF_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_HI 0x3009
+#define regCPF_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_LO 0x300a
+#define regCPF_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_HI 0x300b
+#define regCPF_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPF_LATENCY_STATS_DATA 0x300c
+#define regCPF_LATENCY_STATS_DATA_BASE_IDX 1
+#define regCPG_LATENCY_STATS_DATA 0x300d
+#define regCPG_LATENCY_STATS_DATA_BASE_IDX 1
+#define regCPC_LATENCY_STATS_DATA 0x300e
+#define regCPC_LATENCY_STATS_DATA_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_LO 0x3040
+#define regGRBM_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_HI 0x3041
+#define regGRBM_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_LO 0x3043
+#define regGRBM_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_HI 0x3044
+#define regGRBM_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_LO 0x3045
+#define regGRBM_SE0_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_HI 0x3046
+#define regGRBM_SE0_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_LO 0x3047
+#define regGRBM_SE1_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_HI 0x3048
+#define regGRBM_SE1_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_LO 0x3049
+#define regGRBM_SE2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_HI 0x304a
+#define regGRBM_SE2_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_LO 0x304b
+#define regGRBM_SE3_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_HI 0x304c
+#define regGRBM_SE3_PERFCOUNTER_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_LO 0x30a4
+#define regGE1_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_HI 0x30a5
+#define regGE1_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_LO 0x30a6
+#define regGE1_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_HI 0x30a7
+#define regGE1_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_LO 0x30a8
+#define regGE1_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_HI 0x30a9
+#define regGE1_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_LO 0x30aa
+#define regGE1_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_HI 0x30ab
+#define regGE1_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_LO 0x30ac
+#define regGE2_DIST_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_HI 0x30ad
+#define regGE2_DIST_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_LO 0x30ae
+#define regGE2_DIST_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_HI 0x30af
+#define regGE2_DIST_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_LO 0x30b0
+#define regGE2_DIST_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_HI 0x30b1
+#define regGE2_DIST_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_LO 0x30b2
+#define regGE2_DIST_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_HI 0x30b3
+#define regGE2_DIST_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_LO 0x30b4
+#define regGE2_SE_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_HI 0x30b5
+#define regGE2_SE_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_LO 0x30b6
+#define regGE2_SE_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_HI 0x30b7
+#define regGE2_SE_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_LO 0x30b8
+#define regGE2_SE_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_HI 0x30b9
+#define regGE2_SE_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_LO 0x30ba
+#define regGE2_SE_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_HI 0x30bb
+#define regGE2_SE_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_LO 0x3100
+#define regPA_SU_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_HI 0x3101
+#define regPA_SU_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_LO 0x3102
+#define regPA_SU_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_HI 0x3103
+#define regPA_SU_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_LO 0x3104
+#define regPA_SU_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_HI 0x3105
+#define regPA_SU_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_LO 0x3106
+#define regPA_SU_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_HI 0x3107
+#define regPA_SU_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_LO 0x3140
+#define regPA_SC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_HI 0x3141
+#define regPA_SC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_LO 0x3142
+#define regPA_SC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_HI 0x3143
+#define regPA_SC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_LO 0x3144
+#define regPA_SC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_HI 0x3145
+#define regPA_SC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_LO 0x3146
+#define regPA_SC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_HI 0x3147
+#define regPA_SC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_LO 0x3148
+#define regPA_SC_PERFCOUNTER4_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_HI 0x3149
+#define regPA_SC_PERFCOUNTER4_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_LO 0x314a
+#define regPA_SC_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_HI 0x314b
+#define regPA_SC_PERFCOUNTER5_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_LO 0x314c
+#define regPA_SC_PERFCOUNTER6_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_HI 0x314d
+#define regPA_SC_PERFCOUNTER6_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_LO 0x314e
+#define regPA_SC_PERFCOUNTER7_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_HI 0x314f
+#define regPA_SC_PERFCOUNTER7_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_HI 0x3180
+#define regSPI_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_LO 0x3181
+#define regSPI_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_HI 0x3182
+#define regSPI_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_LO 0x3183
+#define regSPI_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_HI 0x3184
+#define regSPI_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_LO 0x3185
+#define regSPI_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_HI 0x3186
+#define regSPI_PERFCOUNTER3_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_LO 0x3187
+#define regSPI_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_HI 0x3188
+#define regSPI_PERFCOUNTER4_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_LO 0x3189
+#define regSPI_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_HI 0x318a
+#define regSPI_PERFCOUNTER5_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_LO 0x318b
+#define regSPI_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER0_HI 0x318c
+#define regPC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER0_LO 0x318d
+#define regPC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER1_HI 0x318e
+#define regPC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER1_LO 0x318f
+#define regPC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER2_HI 0x3190
+#define regPC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER2_LO 0x3191
+#define regPC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER3_HI 0x3192
+#define regPC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER3_LO 0x3193
+#define regPC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER0_LO 0x31c0
+#define regSQ_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER1_LO 0x31c2
+#define regSQ_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER2_LO 0x31c4
+#define regSQ_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER3_LO 0x31c6
+#define regSQ_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER4_LO 0x31c8
+#define regSQ_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER5_LO 0x31ca
+#define regSQ_PERFCOUNTER5_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER6_LO 0x31cc
+#define regSQ_PERFCOUNTER6_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER7_LO 0x31ce
+#define regSQ_PERFCOUNTER7_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER0_LO 0x31e4
+#define regSQG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER0_HI 0x31e5
+#define regSQG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER1_LO 0x31e6
+#define regSQG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER1_HI 0x31e7
+#define regSQG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER2_LO 0x31e8
+#define regSQG_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER2_HI 0x31e9
+#define regSQG_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER3_LO 0x31ea
+#define regSQG_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER3_HI 0x31eb
+#define regSQG_PERFCOUNTER3_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER4_LO 0x31ec
+#define regSQG_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER4_HI 0x31ed
+#define regSQG_PERFCOUNTER4_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER5_LO 0x31ee
+#define regSQG_PERFCOUNTER5_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER5_HI 0x31ef
+#define regSQG_PERFCOUNTER5_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER6_LO 0x31f0
+#define regSQG_PERFCOUNTER6_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER6_HI 0x31f1
+#define regSQG_PERFCOUNTER6_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER7_LO 0x31f2
+#define regSQG_PERFCOUNTER7_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER7_HI 0x31f3
+#define regSQG_PERFCOUNTER7_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER0_LO 0x3240
+#define regSX_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER0_HI 0x3241
+#define regSX_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER1_LO 0x3242
+#define regSX_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER1_HI 0x3243
+#define regSX_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER2_LO 0x3244
+#define regSX_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER2_HI 0x3245
+#define regSX_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER3_LO 0x3246
+#define regSX_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER3_HI 0x3247
+#define regSX_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_LO 0x3260
+#define regGCEA_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_HI 0x3261
+#define regGCEA_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGCEA_PERFCOUNTER_LO 0x3262
+#define regGCEA_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCEA_PERFCOUNTER_HI 0x3263
+#define regGCEA_PERFCOUNTER_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_LO 0x3280
+#define regGDS_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_HI 0x3281
+#define regGDS_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_LO 0x3282
+#define regGDS_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_HI 0x3283
+#define regGDS_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_LO 0x3284
+#define regGDS_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_HI 0x3285
+#define regGDS_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_LO 0x3286
+#define regGDS_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_HI 0x3287
+#define regGDS_PERFCOUNTER3_HI_BASE_IDX 1
+#define regTA_PERFCOUNTER0_LO 0x32c0
+#define regTA_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTA_PERFCOUNTER0_HI 0x32c1
+#define regTA_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTA_PERFCOUNTER1_LO 0x32c2
+#define regTA_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTA_PERFCOUNTER1_HI 0x32c3
+#define regTA_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTD_PERFCOUNTER0_LO 0x3300
+#define regTD_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTD_PERFCOUNTER0_HI 0x3301
+#define regTD_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTD_PERFCOUNTER1_LO 0x3302
+#define regTD_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTD_PERFCOUNTER1_HI 0x3303
+#define regTD_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_LO 0x3340
+#define regTCP_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_HI 0x3341
+#define regTCP_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_LO 0x3342
+#define regTCP_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_HI 0x3343
+#define regTCP_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_LO 0x3344
+#define regTCP_PERFCOUNTER2_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_HI 0x3345
+#define regTCP_PERFCOUNTER2_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_LO 0x3346
+#define regTCP_PERFCOUNTER3_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_HI 0x3347
+#define regTCP_PERFCOUNTER3_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER_FILTER 0x3348
+#define regTCP_PERFCOUNTER_FILTER_BASE_IDX 1
+#define regTCP_PERFCOUNTER_FILTER2 0x3349
+#define regTCP_PERFCOUNTER_FILTER2_BASE_IDX 1
+#define regTCP_PERFCOUNTER_FILTER_EN 0x334a
+#define regTCP_PERFCOUNTER_FILTER_EN_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_LO 0x3380
+#define regGL2C_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_HI 0x3381
+#define regGL2C_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_LO 0x3382
+#define regGL2C_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_HI 0x3383
+#define regGL2C_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL2C_PERFCOUNTER2_LO 0x3384
+#define regGL2C_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER2_HI 0x3385
+#define regGL2C_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL2C_PERFCOUNTER3_LO 0x3386
+#define regGL2C_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER3_HI 0x3387
+#define regGL2C_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_LO 0x3390
+#define regGL2A_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_HI 0x3391
+#define regGL2A_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_LO 0x3392
+#define regGL2A_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_HI 0x3393
+#define regGL2A_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER2_LO 0x3394
+#define regGL2A_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER2_HI 0x3395
+#define regGL2A_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER3_LO 0x3396
+#define regGL2A_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER3_HI 0x3397
+#define regGL2A_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_LO 0x33a0
+#define regGL1C_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_HI 0x33a1
+#define regGL1C_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER1_LO 0x33a2
+#define regGL1C_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER1_HI 0x33a3
+#define regGL1C_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER2_LO 0x33a4
+#define regGL1C_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER2_HI 0x33a5
+#define regGL1C_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER3_LO 0x33a6
+#define regGL1C_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER3_HI 0x33a7
+#define regGL1C_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_LO 0x33c0
+#define regCHC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_HI 0x33c1
+#define regCHC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER1_LO 0x33c2
+#define regCHC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER1_HI 0x33c3
+#define regCHC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER2_LO 0x33c4
+#define regCHC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER2_HI 0x33c5
+#define regCHC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER3_LO 0x33c6
+#define regCHC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER3_HI 0x33c7
+#define regCHC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_LO 0x33c8
+#define regCHCG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_HI 0x33c9
+#define regCHCG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER1_LO 0x33ca
+#define regCHCG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER1_HI 0x33cb
+#define regCHCG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER2_LO 0x33cc
+#define regCHCG_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER2_HI 0x33cd
+#define regCHCG_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER3_LO 0x33ce
+#define regCHCG_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER3_HI 0x33cf
+#define regCHCG_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER0_LO 0x3406
+#define regCB_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER0_HI 0x3407
+#define regCB_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER1_LO 0x3408
+#define regCB_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER1_HI 0x3409
+#define regCB_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER2_LO 0x340a
+#define regCB_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER2_HI 0x340b
+#define regCB_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER3_LO 0x340c
+#define regCB_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER3_HI 0x340d
+#define regCB_PERFCOUNTER3_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER0_LO 0x3440
+#define regDB_PERFCOUNTER0_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER0_HI 0x3441
+#define regDB_PERFCOUNTER0_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER1_LO 0x3442
+#define regDB_PERFCOUNTER1_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER1_HI 0x3443
+#define regDB_PERFCOUNTER1_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER2_LO 0x3444
+#define regDB_PERFCOUNTER2_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER2_HI 0x3445
+#define regDB_PERFCOUNTER2_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER3_LO 0x3446
+#define regDB_PERFCOUNTER3_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER3_HI 0x3447
+#define regDB_PERFCOUNTER3_HI_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_LO 0x3480
+#define regRLC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_HI 0x3481
+#define regRLC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_LO 0x3482
+#define regRLC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_HI 0x3483
+#define regRLC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_LO 0x34c0
+#define regRMI_PERFCOUNTER0_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_HI 0x34c1
+#define regRMI_PERFCOUNTER0_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_LO 0x34c2
+#define regRMI_PERFCOUNTER1_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_HI 0x34c3
+#define regRMI_PERFCOUNTER1_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_LO 0x34c4
+#define regRMI_PERFCOUNTER2_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_HI 0x34c5
+#define regRMI_PERFCOUNTER2_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_LO 0x34c6
+#define regRMI_PERFCOUNTER3_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_HI 0x34c7
+#define regRMI_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_LO 0x3520
+#define regGCR_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_HI 0x3521
+#define regGCR_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGCR_PERFCOUNTER1_LO 0x3522
+#define regGCR_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGCR_PERFCOUNTER1_HI 0x3523
+#define regGCR_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_LO 0x3580
+#define regPA_PH_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_HI 0x3581
+#define regPA_PH_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_LO 0x3582
+#define regPA_PH_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_HI 0x3583
+#define regPA_PH_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_LO 0x3584
+#define regPA_PH_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_HI 0x3585
+#define regPA_PH_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_LO 0x3586
+#define regPA_PH_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_HI 0x3587
+#define regPA_PH_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER4_LO 0x3588
+#define regPA_PH_PERFCOUNTER4_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER4_HI 0x3589
+#define regPA_PH_PERFCOUNTER4_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER5_LO 0x358a
+#define regPA_PH_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER5_HI 0x358b
+#define regPA_PH_PERFCOUNTER5_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER6_LO 0x358c
+#define regPA_PH_PERFCOUNTER6_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER6_HI 0x358d
+#define regPA_PH_PERFCOUNTER6_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER7_LO 0x358e
+#define regPA_PH_PERFCOUNTER7_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER7_HI 0x358f
+#define regPA_PH_PERFCOUNTER7_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER0_LO 0x35a0
+#define regUTCL1_PERFCOUNTER0_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER0_HI 0x35a1
+#define regUTCL1_PERFCOUNTER0_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER1_LO 0x35a2
+#define regUTCL1_PERFCOUNTER1_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER1_HI 0x35a3
+#define regUTCL1_PERFCOUNTER1_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER2_LO 0x35a4
+#define regUTCL1_PERFCOUNTER2_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER2_HI 0x35a5
+#define regUTCL1_PERFCOUNTER2_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER3_LO 0x35a6
+#define regUTCL1_PERFCOUNTER3_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER3_HI 0x35a7
+#define regUTCL1_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_LO 0x35c0
+#define regGL1A_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_HI 0x35c1
+#define regGL1A_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER1_LO 0x35c2
+#define regGL1A_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER1_HI 0x35c3
+#define regGL1A_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER2_LO 0x35c4
+#define regGL1A_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER2_HI 0x35c5
+#define regGL1A_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER3_LO 0x35c6
+#define regGL1A_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER3_HI 0x35c7
+#define regGL1A_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_LO 0x35d0
+#define regGL1H_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_HI 0x35d1
+#define regGL1H_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER1_LO 0x35d2
+#define regGL1H_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER1_HI 0x35d3
+#define regGL1H_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER2_LO 0x35d4
+#define regGL1H_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER2_HI 0x35d5
+#define regGL1H_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER3_LO 0x35d6
+#define regGL1H_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER3_HI 0x35d7
+#define regGL1H_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_LO 0x3600
+#define regCHA_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_HI 0x3601
+#define regCHA_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER1_LO 0x3602
+#define regCHA_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER1_HI 0x3603
+#define regCHA_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER2_LO 0x3604
+#define regCHA_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER2_HI 0x3605
+#define regCHA_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER3_LO 0x3606
+#define regCHA_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER3_HI 0x3607
+#define regCHA_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_LO 0x3640
+#define regGUS_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_HI 0x3641
+#define regGUS_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGUS_PERFCOUNTER_LO 0x3642
+#define regGUS_PERFCOUNTER_LO_BASE_IDX 1
+#define regGUS_PERFCOUNTER_HI 0x3643
+#define regGUS_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_perfsdec
+// base address: 0x36000
+#define regCPG_PERFCOUNTER1_SELECT 0x3800
+#define regCPG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_SELECT1 0x3801
+#define regCPG_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_SELECT 0x3802
+#define regCPG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_SELECT 0x3803
+#define regCPC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_SELECT1 0x3804
+#define regCPC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_SELECT 0x3805
+#define regCPF_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_SELECT1 0x3806
+#define regCPF_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_SELECT 0x3807
+#define regCPF_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCP_PERFMON_CNTL 0x3808
+#define regCP_PERFMON_CNTL_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_SELECT 0x3809
+#define regCPC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCPF_TC_PERF_COUNTER_WINDOW_SELECT 0x380a
+#define regCPF_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCPG_TC_PERF_COUNTER_WINDOW_SELECT 0x380b
+#define regCPG_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCPF_LATENCY_STATS_SELECT 0x380c
+#define regCPF_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPG_LATENCY_STATS_SELECT 0x380d
+#define regCPG_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPC_LATENCY_STATS_SELECT 0x380e
+#define regCPC_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPC_TC_PERF_COUNTER_WINDOW_SELECT 0x380f
+#define regCPC_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCP_DRAW_OBJECT 0x3810
+#define regCP_DRAW_OBJECT_BASE_IDX 1
+#define regCP_DRAW_OBJECT_COUNTER 0x3811
+#define regCP_DRAW_OBJECT_COUNTER_BASE_IDX 1
+#define regCP_DRAW_WINDOW_MASK_HI 0x3812
+#define regCP_DRAW_WINDOW_MASK_HI_BASE_IDX 1
+#define regCP_DRAW_WINDOW_HI 0x3813
+#define regCP_DRAW_WINDOW_HI_BASE_IDX 1
+#define regCP_DRAW_WINDOW_LO 0x3814
+#define regCP_DRAW_WINDOW_LO_BASE_IDX 1
+#define regCP_DRAW_WINDOW_CNTL 0x3815
+#define regCP_DRAW_WINDOW_CNTL_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_SELECT 0x3840
+#define regGRBM_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_SELECT 0x3841
+#define regGRBM_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_SELECT 0x3842
+#define regGRBM_SE0_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_SELECT 0x3843
+#define regGRBM_SE1_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_SELECT 0x3844
+#define regGRBM_SE2_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_SELECT 0x3845
+#define regGRBM_SE3_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_SELECT_HI 0x384d
+#define regGRBM_PERFCOUNTER0_SELECT_HI_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_SELECT_HI 0x384e
+#define regGRBM_PERFCOUNTER1_SELECT_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_SELECT 0x38a4
+#define regGE1_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_SELECT1 0x38a5
+#define regGE1_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_SELECT 0x38a6
+#define regGE1_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_SELECT1 0x38a7
+#define regGE1_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_SELECT 0x38a8
+#define regGE1_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_SELECT1 0x38a9
+#define regGE1_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_SELECT 0x38aa
+#define regGE1_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_SELECT1 0x38ab
+#define regGE1_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_SELECT 0x38ac
+#define regGE2_DIST_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_SELECT1 0x38ad
+#define regGE2_DIST_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_SELECT 0x38ae
+#define regGE2_DIST_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_SELECT1 0x38af
+#define regGE2_DIST_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_SELECT 0x38b0
+#define regGE2_DIST_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_SELECT1 0x38b1
+#define regGE2_DIST_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_SELECT 0x38b2
+#define regGE2_DIST_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_SELECT1 0x38b3
+#define regGE2_DIST_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_SELECT 0x38b4
+#define regGE2_SE_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_SELECT1 0x38b5
+#define regGE2_SE_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_SELECT 0x38b6
+#define regGE2_SE_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_SELECT1 0x38b7
+#define regGE2_SE_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_SELECT 0x38b8
+#define regGE2_SE_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_SELECT1 0x38b9
+#define regGE2_SE_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_SELECT 0x38ba
+#define regGE2_SE_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_SELECT1 0x38bb
+#define regGE2_SE_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_SELECT 0x3900
+#define regPA_SU_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_SELECT1 0x3901
+#define regPA_SU_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_SELECT 0x3902
+#define regPA_SU_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_SELECT1 0x3903
+#define regPA_SU_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_SELECT 0x3904
+#define regPA_SU_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_SELECT1 0x3905
+#define regPA_SU_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_SELECT 0x3906
+#define regPA_SU_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_SELECT1 0x3907
+#define regPA_SU_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_SELECT 0x3940
+#define regPA_SC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_SELECT1 0x3941
+#define regPA_SC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_SELECT 0x3942
+#define regPA_SC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_SELECT 0x3943
+#define regPA_SC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_SELECT 0x3944
+#define regPA_SC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_SELECT 0x3945
+#define regPA_SC_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_SELECT 0x3946
+#define regPA_SC_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_SELECT 0x3947
+#define regPA_SC_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_SELECT 0x3948
+#define regPA_SC_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_SELECT 0x3980
+#define regSPI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_SELECT 0x3981
+#define regSPI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_SELECT 0x3982
+#define regSPI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_SELECT 0x3983
+#define regSPI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_SELECT1 0x3984
+#define regSPI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_SELECT1 0x3985
+#define regSPI_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_SELECT1 0x3986
+#define regSPI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_SELECT1 0x3987
+#define regSPI_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_SELECT 0x3988
+#define regSPI_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_SELECT 0x3989
+#define regSPI_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER_BINS 0x398a
+#define regSPI_PERFCOUNTER_BINS_BASE_IDX 1
+#define regPC_PERFCOUNTER0_SELECT 0x398c
+#define regPC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER1_SELECT 0x398d
+#define regPC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER2_SELECT 0x398e
+#define regPC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER3_SELECT 0x398f
+#define regPC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER0_SELECT1 0x3990
+#define regPC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPC_PERFCOUNTER1_SELECT1 0x3991
+#define regPC_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPC_PERFCOUNTER2_SELECT1 0x3992
+#define regPC_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regPC_PERFCOUNTER3_SELECT1 0x3993
+#define regPC_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regSQ_PERFCOUNTER0_SELECT 0x39c0
+#define regSQ_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER1_SELECT 0x39c1
+#define regSQ_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER2_SELECT 0x39c2
+#define regSQ_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER3_SELECT 0x39c3
+#define regSQ_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER4_SELECT 0x39c4
+#define regSQ_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER5_SELECT 0x39c5
+#define regSQ_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER6_SELECT 0x39c6
+#define regSQ_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER7_SELECT 0x39c7
+#define regSQ_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER8_SELECT 0x39c8
+#define regSQ_PERFCOUNTER8_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER9_SELECT 0x39c9
+#define regSQ_PERFCOUNTER9_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER10_SELECT 0x39ca
+#define regSQ_PERFCOUNTER10_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER11_SELECT 0x39cb
+#define regSQ_PERFCOUNTER11_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER12_SELECT 0x39cc
+#define regSQ_PERFCOUNTER12_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER13_SELECT 0x39cd
+#define regSQ_PERFCOUNTER13_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER14_SELECT 0x39ce
+#define regSQ_PERFCOUNTER14_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER15_SELECT 0x39cf
+#define regSQ_PERFCOUNTER15_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER0_SELECT 0x39d0
+#define regSQG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER1_SELECT 0x39d1
+#define regSQG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER2_SELECT 0x39d2
+#define regSQG_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER3_SELECT 0x39d3
+#define regSQG_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER4_SELECT 0x39d4
+#define regSQG_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER5_SELECT 0x39d5
+#define regSQG_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER6_SELECT 0x39d6
+#define regSQG_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER7_SELECT 0x39d7
+#define regSQG_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER_CTRL 0x39d8
+#define regSQG_PERFCOUNTER_CTRL_BASE_IDX 1
+#define regSQG_PERFCOUNTER_CTRL2 0x39da
+#define regSQG_PERFCOUNTER_CTRL2_BASE_IDX 1
+#define regSQG_PERF_SAMPLE_FINISH 0x39db
+#define regSQG_PERF_SAMPLE_FINISH_BASE_IDX 1
+#define regSQ_PERFCOUNTER_CTRL 0x39e0
+#define regSQ_PERFCOUNTER_CTRL_BASE_IDX 1
+#define regSQ_PERFCOUNTER_CTRL2 0x39e2
+#define regSQ_PERFCOUNTER_CTRL2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF0_BASE 0x39e8
+#define regSQ_THREAD_TRACE_BUF0_BASE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF0_SIZE 0x39e9
+#define regSQ_THREAD_TRACE_BUF0_SIZE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF1_BASE 0x39ea
+#define regSQ_THREAD_TRACE_BUF1_BASE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF1_SIZE 0x39eb
+#define regSQ_THREAD_TRACE_BUF1_SIZE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_CTRL 0x39ec
+#define regSQ_THREAD_TRACE_CTRL_BASE_IDX 1
+#define regSQ_THREAD_TRACE_MASK 0x39ed
+#define regSQ_THREAD_TRACE_MASK_BASE_IDX 1
+#define regSQ_THREAD_TRACE_TOKEN_MASK 0x39ee
+#define regSQ_THREAD_TRACE_TOKEN_MASK_BASE_IDX 1
+#define regSQ_THREAD_TRACE_WPTR 0x39ef
+#define regSQ_THREAD_TRACE_WPTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_STATUS 0x39f4
+#define regSQ_THREAD_TRACE_STATUS_BASE_IDX 1
+#define regSQ_THREAD_TRACE_STATUS2 0x39f5
+#define regSQ_THREAD_TRACE_STATUS2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_GFX_DRAW_CNTR 0x39f6
+#define regSQ_THREAD_TRACE_GFX_DRAW_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_GFX_MARKER_CNTR 0x39f7
+#define regSQ_THREAD_TRACE_GFX_MARKER_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_HP3D_DRAW_CNTR 0x39f8
+#define regSQ_THREAD_TRACE_HP3D_DRAW_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_HP3D_MARKER_CNTR 0x39f9
+#define regSQ_THREAD_TRACE_HP3D_MARKER_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_DROPPED_CNTR 0x39fa
+#define regSQ_THREAD_TRACE_DROPPED_CNTR_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_SELECT 0x3a00
+#define regGCEA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_SELECT1 0x3a01
+#define regGCEA_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_MODE 0x3a02
+#define regGCEA_PERFCOUNTER2_MODE_BASE_IDX 1
+#define regGCEA_PERFCOUNTER0_CFG 0x3a03
+#define regGCEA_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCEA_PERFCOUNTER1_CFG 0x3a04
+#define regGCEA_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCEA_PERFCOUNTER_RSLT_CNTL 0x3a05
+#define regGCEA_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regSX_PERFCOUNTER0_SELECT 0x3a40
+#define regSX_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER1_SELECT 0x3a41
+#define regSX_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER2_SELECT 0x3a42
+#define regSX_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER3_SELECT 0x3a43
+#define regSX_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER0_SELECT1 0x3a44
+#define regSX_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSX_PERFCOUNTER1_SELECT1 0x3a45
+#define regSX_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_SELECT 0x3a80
+#define regGDS_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_SELECT 0x3a81
+#define regGDS_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_SELECT 0x3a82
+#define regGDS_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_SELECT 0x3a83
+#define regGDS_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_SELECT1 0x3a84
+#define regGDS_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_SELECT1 0x3a85
+#define regGDS_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_SELECT1 0x3a86
+#define regGDS_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_SELECT1 0x3a87
+#define regGDS_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regTA_PERFCOUNTER0_SELECT 0x3ac0
+#define regTA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTA_PERFCOUNTER0_SELECT1 0x3ac1
+#define regTA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTA_PERFCOUNTER1_SELECT 0x3ac2
+#define regTA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTD_PERFCOUNTER0_SELECT 0x3b00
+#define regTD_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTD_PERFCOUNTER0_SELECT1 0x3b01
+#define regTD_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTD_PERFCOUNTER1_SELECT 0x3b02
+#define regTD_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_SELECT 0x3b40
+#define regTCP_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_SELECT1 0x3b41
+#define regTCP_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_SELECT 0x3b42
+#define regTCP_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_SELECT1 0x3b43
+#define regTCP_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_SELECT 0x3b44
+#define regTCP_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_SELECT 0x3b45
+#define regTCP_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_SELECT 0x3b80
+#define regGL2C_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_SELECT1 0x3b81
+#define regGL2C_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_SELECT 0x3b82
+#define regGL2C_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_SELECT1 0x3b83
+#define regGL2C_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGL2C_PERFCOUNTER2_SELECT 0x3b84
+#define regGL2C_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER3_SELECT 0x3b85
+#define regGL2C_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_SELECT 0x3b90
+#define regGL2A_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_SELECT1 0x3b91
+#define regGL2A_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_SELECT 0x3b92
+#define regGL2A_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_SELECT1 0x3b93
+#define regGL2A_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGL2A_PERFCOUNTER2_SELECT 0x3b94
+#define regGL2A_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER3_SELECT 0x3b95
+#define regGL2A_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_SELECT 0x3ba0
+#define regGL1C_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_SELECT1 0x3ba1
+#define regGL1C_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL1C_PERFCOUNTER1_SELECT 0x3ba2
+#define regGL1C_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER2_SELECT 0x3ba3
+#define regGL1C_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER3_SELECT 0x3ba4
+#define regGL1C_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_SELECT 0x3bc0
+#define regCHC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_SELECT1 0x3bc1
+#define regCHC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCHC_PERFCOUNTER1_SELECT 0x3bc2
+#define regCHC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER2_SELECT 0x3bc3
+#define regCHC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER3_SELECT 0x3bc4
+#define regCHC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_SELECT 0x3bc6
+#define regCHCG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_SELECT1 0x3bc7
+#define regCHCG_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCHCG_PERFCOUNTER1_SELECT 0x3bc8
+#define regCHCG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER2_SELECT 0x3bc9
+#define regCHCG_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER3_SELECT 0x3bca
+#define regCHCG_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER_FILTER 0x3c00
+#define regCB_PERFCOUNTER_FILTER_BASE_IDX 1
+#define regCB_PERFCOUNTER0_SELECT 0x3c01
+#define regCB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER0_SELECT1 0x3c02
+#define regCB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCB_PERFCOUNTER1_SELECT 0x3c03
+#define regCB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER2_SELECT 0x3c04
+#define regCB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER3_SELECT 0x3c05
+#define regCB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER0_SELECT 0x3c40
+#define regDB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER0_SELECT1 0x3c41
+#define regDB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regDB_PERFCOUNTER1_SELECT 0x3c42
+#define regDB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER1_SELECT1 0x3c43
+#define regDB_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regDB_PERFCOUNTER2_SELECT 0x3c44
+#define regDB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER3_SELECT 0x3c46
+#define regDB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regRLC_SPM_PERFMON_CNTL 0x3c80
+#define regRLC_SPM_PERFMON_CNTL_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_BASE_LO 0x3c81
+#define regRLC_SPM_PERFMON_RING_BASE_LO_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_BASE_HI 0x3c82
+#define regRLC_SPM_PERFMON_RING_BASE_HI_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_SIZE 0x3c83
+#define regRLC_SPM_PERFMON_RING_SIZE_BASE_IDX 1
+#define regRLC_SPM_RING_WRPTR 0x3c84
+#define regRLC_SPM_RING_WRPTR_BASE_IDX 1
+#define regRLC_SPM_RING_RDPTR 0x3c85
+#define regRLC_SPM_RING_RDPTR_BASE_IDX 1
+#define regRLC_SPM_SEGMENT_THRESHOLD 0x3c86
+#define regRLC_SPM_SEGMENT_THRESHOLD_BASE_IDX 1
+#define regRLC_SPM_PERFMON_SEGMENT_SIZE 0x3c87
+#define regRLC_SPM_PERFMON_SEGMENT_SIZE_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_MUXSEL_ADDR 0x3c88
+#define regRLC_SPM_GLOBAL_MUXSEL_ADDR_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_MUXSEL_DATA 0x3c89
+#define regRLC_SPM_GLOBAL_MUXSEL_DATA_BASE_IDX 1
+#define regRLC_SPM_SE_MUXSEL_ADDR 0x3c8a
+#define regRLC_SPM_SE_MUXSEL_ADDR_BASE_IDX 1
+#define regRLC_SPM_SE_MUXSEL_DATA 0x3c8b
+#define regRLC_SPM_SE_MUXSEL_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_ADDR 0x3c92
+#define regRLC_SPM_ACCUM_DATARAM_ADDR_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_DATA 0x3c93
+#define regRLC_SPM_ACCUM_DATARAM_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_SWA_DATARAM_ADDR 0x3c94
+#define regRLC_SPM_ACCUM_SWA_DATARAM_ADDR_BASE_IDX 1
+#define regRLC_SPM_ACCUM_SWA_DATARAM_DATA 0x3c95
+#define regRLC_SPM_ACCUM_SWA_DATARAM_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR 0x3c96
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRLRAM_DATA 0x3c97
+#define regRLC_SPM_ACCUM_CTRLRAM_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET 0x3c98
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET_BASE_IDX 1
+#define regRLC_SPM_ACCUM_STATUS 0x3c99
+#define regRLC_SPM_ACCUM_STATUS_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRL 0x3c9a
+#define regRLC_SPM_ACCUM_CTRL_BASE_IDX 1
+#define regRLC_SPM_ACCUM_MODE 0x3c9b
+#define regRLC_SPM_ACCUM_MODE_BASE_IDX 1
+#define regRLC_SPM_ACCUM_THRESHOLD 0x3c9c
+#define regRLC_SPM_ACCUM_THRESHOLD_BASE_IDX 1
+#define regRLC_SPM_ACCUM_SAMPLES_REQUESTED 0x3c9d
+#define regRLC_SPM_ACCUM_SAMPLES_REQUESTED_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_WRCOUNT 0x3c9e
+#define regRLC_SPM_ACCUM_DATARAM_WRCOUNT_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS 0x3c9f
+#define regRLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS_BASE_IDX 1
+#define regRLC_SPM_PAUSE 0x3ca2
+#define regRLC_SPM_PAUSE_BASE_IDX 1
+#define regRLC_SPM_STATUS 0x3ca3
+#define regRLC_SPM_STATUS_BASE_IDX 1
+#define regRLC_SPM_GFXCLOCK_LOWCOUNT 0x3ca4
+#define regRLC_SPM_GFXCLOCK_LOWCOUNT_BASE_IDX 1
+#define regRLC_SPM_GFXCLOCK_HIGHCOUNT 0x3ca5
+#define regRLC_SPM_GFXCLOCK_HIGHCOUNT_BASE_IDX 1
+#define regRLC_SPM_MODE 0x3cad
+#define regRLC_SPM_MODE_BASE_IDX 1
+#define regRLC_SPM_RSPM_REQ_DATA_LO 0x3cae
+#define regRLC_SPM_RSPM_REQ_DATA_LO_BASE_IDX 1
+#define regRLC_SPM_RSPM_REQ_DATA_HI 0x3caf
+#define regRLC_SPM_RSPM_REQ_DATA_HI_BASE_IDX 1
+#define regRLC_SPM_RSPM_REQ_OP 0x3cb0
+#define regRLC_SPM_RSPM_REQ_OP_BASE_IDX 1
+#define regRLC_SPM_RSPM_RET_DATA 0x3cb1
+#define regRLC_SPM_RSPM_RET_DATA_BASE_IDX 1
+#define regRLC_SPM_RSPM_RET_OP 0x3cb2
+#define regRLC_SPM_RSPM_RET_OP_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_REQ_DATA_LO 0x3cb3
+#define regRLC_SPM_SE_RSPM_REQ_DATA_LO_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_REQ_DATA_HI 0x3cb4
+#define regRLC_SPM_SE_RSPM_REQ_DATA_HI_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_REQ_OP 0x3cb5
+#define regRLC_SPM_SE_RSPM_REQ_OP_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_RET_DATA 0x3cb6
+#define regRLC_SPM_SE_RSPM_RET_DATA_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_RET_OP 0x3cb7
+#define regRLC_SPM_SE_RSPM_RET_OP_BASE_IDX 1
+#define regRLC_SPM_RSPM_CMD 0x3cb8
+#define regRLC_SPM_RSPM_CMD_BASE_IDX 1
+#define regRLC_SPM_RSPM_CMD_ACK 0x3cb9
+#define regRLC_SPM_RSPM_CMD_ACK_BASE_IDX 1
+#define regRLC_SPM_SPARE 0x3cbf
+#define regRLC_SPM_SPARE_BASE_IDX 1
+#define regRLC_PERFMON_CNTL 0x3cc0
+#define regRLC_PERFMON_CNTL_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_SELECT 0x3cc1
+#define regRLC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_SELECT 0x3cc2
+#define regRLC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_CNTL 0x3cc3
+#define regRLC_GPU_IOV_PERF_CNT_CNTL_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_WR_ADDR 0x3cc4
+#define regRLC_GPU_IOV_PERF_CNT_WR_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_WR_DATA 0x3cc5
+#define regRLC_GPU_IOV_PERF_CNT_WR_DATA_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_RD_ADDR 0x3cc6
+#define regRLC_GPU_IOV_PERF_CNT_RD_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_RD_DATA 0x3cc7
+#define regRLC_GPU_IOV_PERF_CNT_RD_DATA_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_SELECT 0x3d00
+#define regRMI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_SELECT1 0x3d01
+#define regRMI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_SELECT 0x3d02
+#define regRMI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_SELECT 0x3d03
+#define regRMI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_SELECT1 0x3d04
+#define regRMI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_SELECT 0x3d05
+#define regRMI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regRMI_PERF_COUNTER_CNTL 0x3d06
+#define regRMI_PERF_COUNTER_CNTL_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_SELECT 0x3d60
+#define regGCR_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_SELECT1 0x3d61
+#define regGCR_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGCR_PERFCOUNTER1_SELECT 0x3d62
+#define regGCR_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_SELECT 0x3d80
+#define regPA_PH_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_SELECT1 0x3d81
+#define regPA_PH_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_SELECT 0x3d82
+#define regPA_PH_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_SELECT 0x3d83
+#define regPA_PH_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_SELECT 0x3d84
+#define regPA_PH_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER4_SELECT 0x3d85
+#define regPA_PH_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER5_SELECT 0x3d86
+#define regPA_PH_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER6_SELECT 0x3d87
+#define regPA_PH_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER7_SELECT 0x3d88
+#define regPA_PH_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_SELECT1 0x3d90
+#define regPA_PH_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_SELECT1 0x3d91
+#define regPA_PH_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_SELECT1 0x3d92
+#define regPA_PH_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER0_SELECT 0x3da0
+#define regUTCL1_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER1_SELECT 0x3da1
+#define regUTCL1_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER2_SELECT 0x3da2
+#define regUTCL1_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER3_SELECT 0x3da3
+#define regUTCL1_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_SELECT 0x3dc0
+#define regGL1A_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_SELECT1 0x3dc1
+#define regGL1A_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL1A_PERFCOUNTER1_SELECT 0x3dc2
+#define regGL1A_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER2_SELECT 0x3dc3
+#define regGL1A_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER3_SELECT 0x3dc4
+#define regGL1A_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_SELECT 0x3dd0
+#define regGL1H_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_SELECT1 0x3dd1
+#define regGL1H_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL1H_PERFCOUNTER1_SELECT 0x3dd2
+#define regGL1H_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER2_SELECT 0x3dd3
+#define regGL1H_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER3_SELECT 0x3dd4
+#define regGL1H_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_SELECT 0x3de0
+#define regCHA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_SELECT1 0x3de1
+#define regCHA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCHA_PERFCOUNTER1_SELECT 0x3de2
+#define regCHA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER2_SELECT 0x3de3
+#define regCHA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER3_SELECT 0x3de4
+#define regCHA_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_SELECT 0x3e00
+#define regGUS_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_SELECT1 0x3e01
+#define regGUS_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_MODE 0x3e02
+#define regGUS_PERFCOUNTER2_MODE_BASE_IDX 1
+#define regGUS_PERFCOUNTER0_CFG 0x3e03
+#define regGUS_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGUS_PERFCOUNTER1_CFG 0x3e04
+#define regGUS_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGUS_PERFCOUNTER_RSLT_CNTL 0x3e05
+#define regGUS_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gdfll_gdfll_dec
+// base address: 0x3a000
+#define regGDFLL_EDC_HYSTERESIS_CNTL 0x4828
+#define regGDFLL_EDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGDFLL_EDC_HYSTERESIS_STAT 0x4829
+#define regGDFLL_EDC_HYSTERESIS_STAT_BASE_IDX 1
+
+
+// addressBlock: gc_gdfll_se_gdfll_dec
+// base address: 0x3a300
+#define regGDFLL_SE_EDC_HYSTERESIS_CNTL 0x48e8
+#define regGDFLL_SE_EDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGDFLL_SE_EDC_HYSTERESIS_STAT 0x48e9
+#define regGDFLL_SE_EDC_HYSTERESIS_STAT_BASE_IDX 1
+
+
+// addressBlock: gc_grtavfs_grtavfs_dec
+// base address: 0x3ac00
+#define regGRTAVFS_RTAVFS_REG_ADDR 0x4b00
+#define regGRTAVFS_RTAVFS_REG_ADDR_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_WR_DATA 0x4b01
+#define regGRTAVFS_RTAVFS_WR_DATA_BASE_IDX 1
+#define regGRTAVFS_GENERAL_0 0x4b02
+#define regGRTAVFS_GENERAL_0_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_RD_DATA 0x4b03
+#define regGRTAVFS_RTAVFS_RD_DATA_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_REG_CTRL 0x4b04
+#define regGRTAVFS_RTAVFS_REG_CTRL_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_REG_STATUS 0x4b05
+#define regGRTAVFS_RTAVFS_REG_STATUS_BASE_IDX 1
+#define regGRTAVFS_TARG_FREQ 0x4b06
+#define regGRTAVFS_TARG_FREQ_BASE_IDX 1
+#define regGRTAVFS_TARG_VOLT 0x4b07
+#define regGRTAVFS_TARG_VOLT_BASE_IDX 1
+#define regGRTAVFS_SOFT_RESET 0x4b0c
+#define regGRTAVFS_SOFT_RESET_BASE_IDX 1
+#define regGRTAVFS_PSM_CNTL 0x4b0d
+#define regGRTAVFS_PSM_CNTL_BASE_IDX 1
+#define regGRTAVFS_CLK_CNTL 0x4b0e
+#define regGRTAVFS_CLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_grtavfs_se_grtavfs_dec
+// base address: 0x3ad00
+#define regGRTAVFS_SE_RTAVFS_REG_ADDR 0x4b40
+#define regGRTAVFS_SE_RTAVFS_REG_ADDR_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_WR_DATA 0x4b41
+#define regGRTAVFS_SE_RTAVFS_WR_DATA_BASE_IDX 1
+#define regGRTAVFS_SE_GENERAL_0 0x4b42
+#define regGRTAVFS_SE_GENERAL_0_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_RD_DATA 0x4b43
+#define regGRTAVFS_SE_RTAVFS_RD_DATA_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_REG_CTRL 0x4b44
+#define regGRTAVFS_SE_RTAVFS_REG_CTRL_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_REG_STATUS 0x4b45
+#define regGRTAVFS_SE_RTAVFS_REG_STATUS_BASE_IDX 1
+#define regGRTAVFS_SE_TARG_FREQ 0x4b46
+#define regGRTAVFS_SE_TARG_FREQ_BASE_IDX 1
+#define regGRTAVFS_SE_TARG_VOLT 0x4b47
+#define regGRTAVFS_SE_TARG_VOLT_BASE_IDX 1
+#define regGRTAVFS_SE_SOFT_RESET 0x4b4c
+#define regGRTAVFS_SE_SOFT_RESET_BASE_IDX 1
+#define regGRTAVFS_SE_PSM_CNTL 0x4b4d
+#define regGRTAVFS_SE_PSM_CNTL_BASE_IDX 1
+#define regGRTAVFS_SE_CLK_CNTL 0x4b4e
+#define regGRTAVFS_SE_CLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_grtavfsdec
+// base address: 0x3ac00
+#define regRTAVFS_RTAVFS_REG_ADDR 0x4b00
+#define regRTAVFS_RTAVFS_REG_ADDR_BASE_IDX 1
+#define regRTAVFS_RTAVFS_WR_DATA 0x4b01
+#define regRTAVFS_RTAVFS_WR_DATA_BASE_IDX 1
+
+
+// addressBlock: gc_hypdec
+// base address: 0x3e000
+#define regGFX_PIPE_PRIORITY 0x587f
+#define regGFX_PIPE_PRIORITY_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_ENABLE 0x5b00
+#define regRLC_GPU_IOV_VF_ENABLE_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG6 0x5b06
+#define regRLC_GPU_IOV_CFG_REG6_BASE_IDX 1
+#define regRLC_SDMA0_STATUS 0x5b18
+#define regRLC_SDMA0_STATUS_BASE_IDX 1
+#define regRLC_SDMA1_STATUS 0x5b19
+#define regRLC_SDMA1_STATUS_BASE_IDX 1
+#define regRLC_SDMA2_STATUS 0x5b1a
+#define regRLC_SDMA2_STATUS_BASE_IDX 1
+#define regRLC_SDMA3_STATUS 0x5b1b
+#define regRLC_SDMA3_STATUS_BASE_IDX 1
+#define regRLC_SDMA0_BUSY_STATUS 0x5b1c
+#define regRLC_SDMA0_BUSY_STATUS_BASE_IDX 1
+#define regRLC_SDMA1_BUSY_STATUS 0x5b1d
+#define regRLC_SDMA1_BUSY_STATUS_BASE_IDX 1
+#define regRLC_SDMA2_BUSY_STATUS 0x5b1e
+#define regRLC_SDMA2_BUSY_STATUS_BASE_IDX 1
+#define regRLC_SDMA3_BUSY_STATUS 0x5b1f
+#define regRLC_SDMA3_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG8 0x5b20
+#define regRLC_GPU_IOV_CFG_REG8_BASE_IDX 1
+#define regRLC_RLCV_TIMER_INT_0 0x5b25
+#define regRLC_RLCV_TIMER_INT_0_BASE_IDX 1
+#define regRLC_RLCV_TIMER_INT_1 0x5b26
+#define regRLC_RLCV_TIMER_INT_1_BASE_IDX 1
+#define regRLC_RLCV_TIMER_CTRL 0x5b27
+#define regRLC_RLCV_TIMER_CTRL_BASE_IDX 1
+#define regRLC_RLCV_TIMER_STAT 0x5b28
+#define regRLC_RLCV_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS 0x5b2a
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_SET 0x5b2b
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_SET_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR 0x5b2c
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_MASK 0x5b2d
+#define regRLC_GPU_IOV_VF_MASK_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_0 0x5b2e
+#define regRLC_HYP_SEMAPHORE_0_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_1 0x5b2f
+#define regRLC_HYP_SEMAPHORE_1_BASE_IDX 1
+#define regRLC_BUSY_CLK_CNTL 0x5b30
+#define regRLC_BUSY_CLK_CNTL_BASE_IDX 1
+#define regRLC_CLK_CNTL 0x5b31
+#define regRLC_CLK_CNTL_BASE_IDX 1
+#define regRLC_PACE_TIMER_STAT 0x5b33
+#define regRLC_PACE_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_BLOCK 0x5b34
+#define regRLC_GPU_IOV_SCH_BLOCK_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG1 0x5b35
+#define regRLC_GPU_IOV_CFG_REG1_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG2 0x5b36
+#define regRLC_GPU_IOV_CFG_REG2_BASE_IDX 1
+#define regRLC_GPU_IOV_VM_BUSY_STATUS 0x5b37
+#define regRLC_GPU_IOV_VM_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_0 0x5b38
+#define regRLC_GPU_IOV_SCH_0_BASE_IDX 1
+#define regRLC_GPU_IOV_ACTIVE_FCN_ID 0x5b39
+#define regRLC_GPU_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_3 0x5b3a
+#define regRLC_GPU_IOV_SCH_3_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_1 0x5b3b
+#define regRLC_GPU_IOV_SCH_1_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_2 0x5b3c
+#define regRLC_GPU_IOV_SCH_2_BASE_IDX 1
+#define regRLC_PACE_INT_FORCE 0x5b3d
+#define regRLC_PACE_INT_FORCE_BASE_IDX 1
+#define regRLC_PACE_INT_CLEAR 0x5b3e
+#define regRLC_PACE_INT_CLEAR_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_STAT 0x5b3f
+#define regRLC_GPU_IOV_INT_STAT_BASE_IDX 1
+#define regRLC_IH_COOKIE 0x5b41
+#define regRLC_IH_COOKIE_BASE_IDX 1
+#define regRLC_IH_COOKIE_CNTL 0x5b42
+#define regRLC_IH_COOKIE_CNTL_BASE_IDX 1
+#define regRLC_HYP_RLCG_UCODE_CHKSUM 0x5b43
+#define regRLC_HYP_RLCG_UCODE_CHKSUM_BASE_IDX 1
+#define regRLC_HYP_RLCP_UCODE_CHKSUM 0x5b44
+#define regRLC_HYP_RLCP_UCODE_CHKSUM_BASE_IDX 1
+#define regRLC_HYP_RLCV_UCODE_CHKSUM 0x5b45
+#define regRLC_HYP_RLCV_UCODE_CHKSUM_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_CNTL 0x5b46
+#define regRLC_GPU_IOV_F32_CNTL_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_RESET 0x5b47
+#define regRLC_GPU_IOV_F32_RESET_BASE_IDX 1
+#define regRLC_GPU_IOV_UCODE_ADDR 0x5b48
+#define regRLC_GPU_IOV_UCODE_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_UCODE_DATA 0x5b49
+#define regRLC_GPU_IOV_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPU_IOV_SMU_RESPONSE 0x5b4a
+#define regRLC_GPU_IOV_SMU_RESPONSE_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_INVALIDATE_CACHE 0x5b4b
+#define regRLC_GPU_IOV_F32_INVALIDATE_CACHE_BASE_IDX 1
+#define regRLC_GPU_IOV_VIRT_RESET_REQ 0x5b4c
+#define regRLC_GPU_IOV_VIRT_RESET_REQ_BASE_IDX 1
+#define regRLC_GPU_IOV_RLC_RESPONSE 0x5b4d
+#define regRLC_GPU_IOV_RLC_RESPONSE_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_DISABLE 0x5b4e
+#define regRLC_GPU_IOV_INT_DISABLE_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_FORCE 0x5b4f
+#define regRLC_GPU_IOV_INT_FORCE_BASE_IDX 1
+#define regRLC_GPU_IOV_SCRATCH_ADDR 0x5b50
+#define regRLC_GPU_IOV_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_SCRATCH_DATA 0x5b51
+#define regRLC_GPU_IOV_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_2 0x5b52
+#define regRLC_HYP_SEMAPHORE_2_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_3 0x5b53
+#define regRLC_HYP_SEMAPHORE_3_BASE_IDX 1
+#define regRLC_LX6_SCRATCH_ADDR 0x5b59
+#define regRLC_LX6_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_LX6_CORE1_SCRATCH_ADDR 0x5b5b
+#define regRLC_LX6_CORE1_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPM_UCODE_ADDR 0x5b60
+#define regRLC_GPM_UCODE_ADDR_BASE_IDX 1
+#define regRLC_GPM_UCODE_DATA 0x5b61
+#define regRLC_GPM_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPM_IRAM_ADDR 0x5b62
+#define regRLC_GPM_IRAM_ADDR_BASE_IDX 1
+#define regRLC_GPM_IRAM_DATA 0x5b63
+#define regRLC_GPM_IRAM_DATA_BASE_IDX 1
+#define regRLC_RLCP_IRAM_ADDR 0x5b64
+#define regRLC_RLCP_IRAM_ADDR_BASE_IDX 1
+#define regRLC_RLCP_IRAM_DATA 0x5b65
+#define regRLC_RLCP_IRAM_DATA_BASE_IDX 1
+#define regRLC_RLCV_IRAM_ADDR 0x5b66
+#define regRLC_RLCV_IRAM_ADDR_BASE_IDX 1
+#define regRLC_RLCV_IRAM_DATA 0x5b67
+#define regRLC_RLCV_IRAM_DATA_BASE_IDX 1
+#define regRLC_LX6_DRAM_ADDR 0x5b68
+#define regRLC_LX6_DRAM_ADDR_BASE_IDX 1
+#define regRLC_LX6_DRAM_DATA 0x5b69
+#define regRLC_LX6_DRAM_DATA_BASE_IDX 1
+#define regRLC_LX6_IRAM_ADDR 0x5b6a
+#define regRLC_LX6_IRAM_ADDR_BASE_IDX 1
+#define regRLC_LX6_IRAM_DATA 0x5b6b
+#define regRLC_LX6_IRAM_DATA_BASE_IDX 1
+#define regRLC_PACE_UCODE_ADDR 0x5b6c
+#define regRLC_PACE_UCODE_ADDR_BASE_IDX 1
+#define regRLC_PACE_UCODE_DATA 0x5b6d
+#define regRLC_PACE_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPM_SCRATCH_ADDR 0x5b6e
+#define regRLC_GPM_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPM_SCRATCH_DATA 0x5b6f
+#define regRLC_GPM_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_SRM_DRAM_ADDR 0x5b71
+#define regRLC_SRM_DRAM_ADDR_BASE_IDX 1
+#define regRLC_SRM_DRAM_DATA 0x5b72
+#define regRLC_SRM_DRAM_DATA_BASE_IDX 1
+#define regRLC_SRM_ARAM_ADDR 0x5b73
+#define regRLC_SRM_ARAM_ADDR_BASE_IDX 1
+#define regRLC_SRM_ARAM_DATA 0x5b74
+#define regRLC_SRM_ARAM_DATA_BASE_IDX 1
+#define regRLC_PACE_SCRATCH_ADDR 0x5b77
+#define regRLC_PACE_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_PACE_SCRATCH_DATA 0x5b78
+#define regRLC_PACE_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_GTS_OFFSET_LSB 0x5b79
+#define regRLC_GTS_OFFSET_LSB_BASE_IDX 1
+#define regRLC_GTS_OFFSET_MSB 0x5b7a
+#define regRLC_GTS_OFFSET_MSB_BASE_IDX 1
+#define regGL2_PIPE_STEER_0 0x5b80
+#define regGL2_PIPE_STEER_0_BASE_IDX 1
+#define regGL2_PIPE_STEER_1 0x5b81
+#define regGL2_PIPE_STEER_1_BASE_IDX 1
+#define regGL2_PIPE_STEER_2 0x5b82
+#define regGL2_PIPE_STEER_2_BASE_IDX 1
+#define regGL2_PIPE_STEER_3 0x5b83
+#define regGL2_PIPE_STEER_3_BASE_IDX 1
+#define regGL1_PIPE_STEER 0x5b84
+#define regGL1_PIPE_STEER_BASE_IDX 1
+#define regCH_PIPE_STEER 0x5b88
+#define regCH_PIPE_STEER_BASE_IDX 1
+#define regGC_USER_SHADER_ARRAY_CONFIG 0x5b90
+#define regGC_USER_SHADER_ARRAY_CONFIG_BASE_IDX 1
+#define regGC_USER_PRIM_CONFIG 0x5b91
+#define regGC_USER_PRIM_CONFIG_BASE_IDX 1
+#define regGC_USER_SA_UNIT_DISABLE 0x5b92
+#define regGC_USER_SA_UNIT_DISABLE_BASE_IDX 1
+#define regGC_USER_RB_REDUNDANCY 0x5b93
+#define regGC_USER_RB_REDUNDANCY_BASE_IDX 1
+#define regGC_USER_RB_BACKEND_DISABLE 0x5b94
+#define regGC_USER_RB_BACKEND_DISABLE_BASE_IDX 1
+#define regGC_USER_RMI_REDUNDANCY 0x5b95
+#define regGC_USER_RMI_REDUNDANCY_BASE_IDX 1
+#define regCGTS_USER_TCC_DISABLE 0x5b96
+#define regCGTS_USER_TCC_DISABLE_BASE_IDX 1
+#define regGC_USER_SHADER_RATE_CONFIG 0x5b97
+#define regGC_USER_SHADER_RATE_CONFIG_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA0_STATUS 0x5bc0
+#define regRLC_GPU_IOV_SDMA0_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA1_STATUS 0x5bc1
+#define regRLC_GPU_IOV_SDMA1_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA2_STATUS 0x5bc2
+#define regRLC_GPU_IOV_SDMA2_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA3_STATUS 0x5bc3
+#define regRLC_GPU_IOV_SDMA3_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA4_STATUS 0x5bc4
+#define regRLC_GPU_IOV_SDMA4_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA5_STATUS 0x5bc5
+#define regRLC_GPU_IOV_SDMA5_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA6_STATUS 0x5bc6
+#define regRLC_GPU_IOV_SDMA6_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA7_STATUS 0x5bc7
+#define regRLC_GPU_IOV_SDMA7_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA0_BUSY_STATUS 0x5bc8
+#define regRLC_GPU_IOV_SDMA0_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA1_BUSY_STATUS 0x5bc9
+#define regRLC_GPU_IOV_SDMA1_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA2_BUSY_STATUS 0x5bca
+#define regRLC_GPU_IOV_SDMA2_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA3_BUSY_STATUS 0x5bcb
+#define regRLC_GPU_IOV_SDMA3_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA4_BUSY_STATUS 0x5bcc
+#define regRLC_GPU_IOV_SDMA4_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA5_BUSY_STATUS 0x5bcd
+#define regRLC_GPU_IOV_SDMA5_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA6_BUSY_STATUS 0x5bce
+#define regRLC_GPU_IOV_SDMA6_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA7_BUSY_STATUS 0x5bcf
+#define regRLC_GPU_IOV_SDMA7_BUSY_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_cphypdec
+// base address: 0x3e000
+#define regCP_HYP_PFP_UCODE_ADDR 0x5814
+#define regCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
+#define regCP_PFP_UCODE_ADDR 0x5814
+#define regCP_PFP_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_PFP_UCODE_DATA 0x5815
+#define regCP_HYP_PFP_UCODE_DATA_BASE_IDX 1
+#define regCP_PFP_UCODE_DATA 0x5815
+#define regCP_PFP_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_ADDR 0x5816
+#define regCP_HYP_ME_UCODE_ADDR_BASE_IDX 1
+#define regCP_ME_RAM_RADDR 0x5816
+#define regCP_ME_RAM_RADDR_BASE_IDX 1
+#define regCP_ME_RAM_WADDR 0x5816
+#define regCP_ME_RAM_WADDR_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_DATA 0x5817
+#define regCP_HYP_ME_UCODE_DATA_BASE_IDX 1
+#define regCP_ME_RAM_DATA 0x5817
+#define regCP_ME_RAM_DATA_BASE_IDX 1
+#define regCP_HYP_MEC1_UCODE_ADDR 0x581a
+#define regCP_HYP_MEC1_UCODE_ADDR_BASE_IDX 1
+#define regCP_MEC_ME1_UCODE_ADDR 0x581a
+#define regCP_MEC_ME1_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_MEC1_UCODE_DATA 0x581b
+#define regCP_HYP_MEC1_UCODE_DATA_BASE_IDX 1
+#define regCP_MEC_ME1_UCODE_DATA 0x581b
+#define regCP_MEC_ME1_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_MEC2_UCODE_ADDR 0x581c
+#define regCP_HYP_MEC2_UCODE_ADDR_BASE_IDX 1
+#define regCP_MEC_ME2_UCODE_ADDR 0x581c
+#define regCP_MEC_ME2_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_MEC2_UCODE_DATA 0x581d
+#define regCP_HYP_MEC2_UCODE_DATA_BASE_IDX 1
+#define regCP_MEC_ME2_UCODE_DATA 0x581d
+#define regCP_MEC_ME2_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_PFP_UCODE_CHKSUM 0x581e
+#define regCP_HYP_PFP_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_CHKSUM 0x5820
+#define regCP_HYP_ME_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_MEC_ME1_UCODE_CHKSUM 0x5821
+#define regCP_HYP_MEC_ME1_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_MEC_ME2_UCODE_CHKSUM 0x5822
+#define regCP_HYP_MEC_ME2_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_PFP_IC_BASE_LO 0x5840
+#define regCP_PFP_IC_BASE_LO_BASE_IDX 1
+#define regCP_PFP_IC_BASE_HI 0x5841
+#define regCP_PFP_IC_BASE_HI_BASE_IDX 1
+#define regCP_PFP_IC_BASE_CNTL 0x5842
+#define regCP_PFP_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_PFP_IC_OP_CNTL 0x5843
+#define regCP_PFP_IC_OP_CNTL_BASE_IDX 1
+#define regCP_ME_IC_BASE_LO 0x5844
+#define regCP_ME_IC_BASE_LO_BASE_IDX 1
+#define regCP_ME_IC_BASE_HI 0x5845
+#define regCP_ME_IC_BASE_HI_BASE_IDX 1
+#define regCP_ME_IC_BASE_CNTL 0x5846
+#define regCP_ME_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_ME_IC_OP_CNTL 0x5847
+#define regCP_ME_IC_OP_CNTL_BASE_IDX 1
+#define regCP_CPC_IC_BASE_LO 0x584c
+#define regCP_CPC_IC_BASE_LO_BASE_IDX 1
+#define regCP_CPC_IC_BASE_HI 0x584d
+#define regCP_CPC_IC_BASE_HI_BASE_IDX 1
+#define regCP_CPC_IC_BASE_CNTL 0x584e
+#define regCP_CPC_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_MES_IC_BASE_LO 0x5850
+#define regCP_MES_IC_BASE_LO_BASE_IDX 1
+#define regCP_MES_MIBASE_LO 0x5850
+#define regCP_MES_MIBASE_LO_BASE_IDX 1
+#define regCP_MES_IC_BASE_HI 0x5851
+#define regCP_MES_IC_BASE_HI_BASE_IDX 1
+#define regCP_MES_MIBASE_HI 0x5851
+#define regCP_MES_MIBASE_HI_BASE_IDX 1
+#define regCP_MES_IC_BASE_CNTL 0x5852
+#define regCP_MES_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_MES_DC_BASE_LO 0x5854
+#define regCP_MES_DC_BASE_LO_BASE_IDX 1
+#define regCP_MES_MDBASE_LO 0x5854
+#define regCP_MES_MDBASE_LO_BASE_IDX 1
+#define regCP_MES_DC_BASE_HI 0x5855
+#define regCP_MES_DC_BASE_HI_BASE_IDX 1
+#define regCP_MES_MDBASE_HI 0x5855
+#define regCP_MES_MDBASE_HI_BASE_IDX 1
+#define regCP_MES_MIBOUND_LO 0x585b
+#define regCP_MES_MIBOUND_LO_BASE_IDX 1
+#define regCP_MES_MIBOUND_HI 0x585c
+#define regCP_MES_MIBOUND_HI_BASE_IDX 1
+#define regCP_MES_MDBOUND_LO 0x585d
+#define regCP_MES_MDBOUND_LO_BASE_IDX 1
+#define regCP_MES_MDBOUND_HI 0x585e
+#define regCP_MES_MDBOUND_HI_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE0_LO 0x5863
+#define regCP_GFX_RS64_DC_BASE0_LO_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE1_LO 0x5864
+#define regCP_GFX_RS64_DC_BASE1_LO_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE0_HI 0x5865
+#define regCP_GFX_RS64_DC_BASE0_HI_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE1_HI 0x5866
+#define regCP_GFX_RS64_DC_BASE1_HI_BASE_IDX 1
+#define regCP_GFX_RS64_MIBOUND_LO 0x586c
+#define regCP_GFX_RS64_MIBOUND_LO_BASE_IDX 1
+#define regCP_GFX_RS64_MIBOUND_HI 0x586d
+#define regCP_GFX_RS64_MIBOUND_HI_BASE_IDX 1
+#define regCP_MEC_DC_BASE_LO 0x5870
+#define regCP_MEC_DC_BASE_LO_BASE_IDX 1
+#define regCP_MEC_MDBASE_LO 0x5870
+#define regCP_MEC_MDBASE_LO_BASE_IDX 1
+#define regCP_MEC_DC_BASE_HI 0x5871
+#define regCP_MEC_DC_BASE_HI_BASE_IDX 1
+#define regCP_MEC_MDBASE_HI 0x5871
+#define regCP_MEC_MDBASE_HI_BASE_IDX 1
+#define regCP_MEC_MIBOUND_LO 0x5872
+#define regCP_MEC_MIBOUND_LO_BASE_IDX 1
+#define regCP_MEC_MIBOUND_HI 0x5873
+#define regCP_MEC_MIBOUND_HI_BASE_IDX 1
+#define regCP_MEC_MDBOUND_LO 0x5874
+#define regCP_MEC_MDBOUND_LO_BASE_IDX 1
+#define regCP_MEC_MDBOUND_HI 0x5875
+#define regCP_MEC_MDBOUND_HI_BASE_IDX 1
+
+
+// addressBlock: gc_grbm_hypdec
+// base address: 0x3e800
+#define regGRBM_GFX_INDEX_SR_SELECT 0x5a00
+#define regGRBM_GFX_INDEX_SR_SELECT_BASE_IDX 1
+#define regGRBM_GFX_INDEX_SR_DATA 0x5a01
+#define regGRBM_GFX_INDEX_SR_DATA_BASE_IDX 1
+#define regGRBM_GFX_CNTL_SR_SELECT 0x5a02
+#define regGRBM_GFX_CNTL_SR_SELECT_BASE_IDX 1
+#define regGRBM_GFX_CNTL_SR_DATA 0x5a03
+#define regGRBM_GFX_CNTL_SR_DATA_BASE_IDX 1
+#define regGC_IH_COOKIE_0_PTR 0x5a07
+#define regGC_IH_COOKIE_0_PTR_BASE_IDX 1
+#define regGRBM_SE_REMAP_CNTL 0x5a08
+#define regGRBM_SE_REMAP_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcvmsharedhvdec
+// base address: 0x3ea00
+#define regGCMC_VM_FB_SIZE_OFFSET_VF0 0x5a80
+#define regGCMC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF1 0x5a81
+#define regGCMC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF2 0x5a82
+#define regGCMC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF3 0x5a83
+#define regGCMC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF4 0x5a84
+#define regGCMC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF5 0x5a85
+#define regGCMC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF6 0x5a86
+#define regGCMC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF7 0x5a87
+#define regGCMC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF8 0x5a88
+#define regGCMC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF9 0x5a89
+#define regGCMC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF10 0x5a8a
+#define regGCMC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF11 0x5a8b
+#define regGCMC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF12 0x5a8c
+#define regGCMC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF13 0x5a8d
+#define regGCMC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF14 0x5a8e
+#define regGCMC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF15 0x5a8f
+#define regGCMC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 1
+
+
+// addressBlock: gc_rlcdec
+// base address: 0x3b000
+#define regRLC_CNTL 0x4c00
+#define regRLC_CNTL_BASE_IDX 1
+#define regRLC_F32_UCODE_VERSION 0x4c03
+#define regRLC_F32_UCODE_VERSION_BASE_IDX 1
+#define regRLC_STAT 0x4c04
+#define regRLC_STAT_BASE_IDX 1
+#define regRLC_REFCLOCK_TIMESTAMP_LSB 0x4c0c
+#define regRLC_REFCLOCK_TIMESTAMP_LSB_BASE_IDX 1
+#define regRLC_REFCLOCK_TIMESTAMP_MSB 0x4c0d
+#define regRLC_REFCLOCK_TIMESTAMP_MSB_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_0 0x4c0e
+#define regRLC_GPM_TIMER_INT_0_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_1 0x4c0f
+#define regRLC_GPM_TIMER_INT_1_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_2 0x4c10
+#define regRLC_GPM_TIMER_INT_2_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_3 0x4c11
+#define regRLC_GPM_TIMER_INT_3_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_4 0x4c12
+#define regRLC_GPM_TIMER_INT_4_BASE_IDX 1
+#define regRLC_GPM_TIMER_CTRL 0x4c13
+#define regRLC_GPM_TIMER_CTRL_BASE_IDX 1
+#define regRLC_GPM_TIMER_STAT 0x4c14
+#define regRLC_GPM_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPM_LEGACY_INT_STAT 0x4c16
+#define regRLC_GPM_LEGACY_INT_STAT_BASE_IDX 1
+#define regRLC_GPM_LEGACY_INT_CLEAR 0x4c17
+#define regRLC_GPM_LEGACY_INT_CLEAR_BASE_IDX 1
+#define regRLC_INT_STAT 0x4c18
+#define regRLC_INT_STAT_BASE_IDX 1
+#define regRLC_MGCG_CTRL 0x4c1a
+#define regRLC_MGCG_CTRL_BASE_IDX 1
+#define regRLC_JUMP_TABLE_RESTORE 0x4c1e
+#define regRLC_JUMP_TABLE_RESTORE_BASE_IDX 1
+#define regRLC_PG_DELAY_2 0x4c1f
+#define regRLC_PG_DELAY_2_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB 0x4c24
+#define regRLC_GPU_CLOCK_COUNT_LSB_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB 0x4c25
+#define regRLC_GPU_CLOCK_COUNT_MSB_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT 0x4c26
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_BASE_IDX 1
+#define regRLC_UCODE_CNTL 0x4c27
+#define regRLC_UCODE_CNTL_BASE_IDX 1
+#define regRLC_GPM_THREAD_RESET 0x4c28
+#define regRLC_GPM_THREAD_RESET_BASE_IDX 1
+#define regRLC_GPM_CP_DMA_COMPLETE_T0 0x4c29
+#define regRLC_GPM_CP_DMA_COMPLETE_T0_BASE_IDX 1
+#define regRLC_GPM_CP_DMA_COMPLETE_T1 0x4c2a
+#define regRLC_GPM_CP_DMA_COMPLETE_T1_BASE_IDX 1
+#define regRLC_GPM_THREAD_INVALIDATE_CACHE 0x4c2b
+#define regRLC_GPM_THREAD_INVALIDATE_CACHE_BASE_IDX 1
+#define regRLC_CLK_COUNT_GFXCLK_LSB 0x4c30
+#define regRLC_CLK_COUNT_GFXCLK_LSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_GFXCLK_MSB 0x4c31
+#define regRLC_CLK_COUNT_GFXCLK_MSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_REFCLK_LSB 0x4c32
+#define regRLC_CLK_COUNT_REFCLK_LSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_REFCLK_MSB 0x4c33
+#define regRLC_CLK_COUNT_REFCLK_MSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_CTRL 0x4c34
+#define regRLC_CLK_COUNT_CTRL_BASE_IDX 1
+#define regRLC_CLK_COUNT_STAT 0x4c35
+#define regRLC_CLK_COUNT_STAT_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_CNTL 0x4c36
+#define regRLC_RLCG_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_STAT 0x4c37
+#define regRLC_RLCG_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_0_DATA_LO 0x4c38
+#define regRLC_RLCG_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_0_DATA_HI 0x4c39
+#define regRLC_RLCG_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_1_DATA_LO 0x4c3a
+#define regRLC_RLCG_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_1_DATA_HI 0x4c3b
+#define regRLC_RLCG_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_2_DATA_LO 0x4c3c
+#define regRLC_RLCG_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_2_DATA_HI 0x4c3d
+#define regRLC_RLCG_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_3_DATA_LO 0x4c3e
+#define regRLC_RLCG_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_3_DATA_HI 0x4c3f
+#define regRLC_RLCG_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_GPU_CLOCK_32_RES_SEL 0x4c41
+#define regRLC_GPU_CLOCK_32_RES_SEL_BASE_IDX 1
+#define regRLC_GPU_CLOCK_32 0x4c42
+#define regRLC_GPU_CLOCK_32_BASE_IDX 1
+#define regRLC_PG_CNTL 0x4c43
+#define regRLC_PG_CNTL_BASE_IDX 1
+#define regRLC_GPM_THREAD_PRIORITY 0x4c44
+#define regRLC_GPM_THREAD_PRIORITY_BASE_IDX 1
+#define regRLC_GPM_THREAD_ENABLE 0x4c45
+#define regRLC_GPM_THREAD_ENABLE_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_RANGE 0x4c47
+#define regRLC_RLCG_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_CGTT_MGCG_OVERRIDE 0x4c48
+#define regRLC_CGTT_MGCG_OVERRIDE_BASE_IDX 1
+#define regRLC_CGCG_CGLS_CTRL 0x4c49
+#define regRLC_CGCG_CGLS_CTRL_BASE_IDX 1
+#define regRLC_CGCG_RAMP_CTRL 0x4c4a
+#define regRLC_CGCG_RAMP_CTRL_BASE_IDX 1
+#define regRLC_DYN_PG_STATUS 0x4c4b
+#define regRLC_DYN_PG_STATUS_BASE_IDX 1
+#define regRLC_DYN_PG_REQUEST 0x4c4c
+#define regRLC_DYN_PG_REQUEST_BASE_IDX 1
+#define regRLC_PG_DELAY 0x4c4d
+#define regRLC_PG_DELAY_BASE_IDX 1
+#define regRLC_WGP_STATUS 0x4c4e
+#define regRLC_WGP_STATUS_BASE_IDX 1
+#define regRLC_PG_ALWAYS_ON_WGP_MASK 0x4c53
+#define regRLC_PG_ALWAYS_ON_WGP_MASK_BASE_IDX 1
+#define regRLC_MAX_PG_WGP 0x4c54
+#define regRLC_MAX_PG_WGP_BASE_IDX 1
+#define regRLC_AUTO_PG_CTRL 0x4c55
+#define regRLC_AUTO_PG_CTRL_BASE_IDX 1
+#define regRLC_SERDES_RD_INDEX 0x4c59
+#define regRLC_SERDES_RD_INDEX_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_0 0x4c5a
+#define regRLC_SERDES_RD_DATA_0_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_1 0x4c5b
+#define regRLC_SERDES_RD_DATA_1_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_2 0x4c5c
+#define regRLC_SERDES_RD_DATA_2_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_3 0x4c5d
+#define regRLC_SERDES_RD_DATA_3_BASE_IDX 1
+#define regRLC_SERDES_MASK 0x4c5e
+#define regRLC_SERDES_MASK_BASE_IDX 1
+#define regRLC_SERDES_CTRL 0x4c5f
+#define regRLC_SERDES_CTRL_BASE_IDX 1
+#define regRLC_SERDES_DATA 0x4c60
+#define regRLC_SERDES_DATA_BASE_IDX 1
+#define regRLC_SERDES_BUSY 0x4c61
+#define regRLC_SERDES_BUSY_BASE_IDX 1
+#define regRLC_GPM_GENERAL_0 0x4c63
+#define regRLC_GPM_GENERAL_0_BASE_IDX 1
+#define regRLC_GPM_GENERAL_1 0x4c64
+#define regRLC_GPM_GENERAL_1_BASE_IDX 1
+#define regRLC_GPM_GENERAL_2 0x4c65
+#define regRLC_GPM_GENERAL_2_BASE_IDX 1
+#define regRLC_GPM_GENERAL_3 0x4c66
+#define regRLC_GPM_GENERAL_3_BASE_IDX 1
+#define regRLC_GPM_GENERAL_4 0x4c67
+#define regRLC_GPM_GENERAL_4_BASE_IDX 1
+#define regRLC_GPM_GENERAL_5 0x4c68
+#define regRLC_GPM_GENERAL_5_BASE_IDX 1
+#define regRLC_GPM_GENERAL_6 0x4c69
+#define regRLC_GPM_GENERAL_6_BASE_IDX 1
+#define regRLC_GPM_GENERAL_7 0x4c6a
+#define regRLC_GPM_GENERAL_7_BASE_IDX 1
+#define regRLC_STATIC_PG_STATUS 0x4c6e
+#define regRLC_STATIC_PG_STATUS_BASE_IDX 1
+#define regRLC_GPM_GENERAL_16 0x4c76
+#define regRLC_GPM_GENERAL_16_BASE_IDX 1
+#define regRLC_PG_DELAY_3 0x4c78
+#define regRLC_PG_DELAY_3_BASE_IDX 1
+#define regRLC_GPR_REG1 0x4c79
+#define regRLC_GPR_REG1_BASE_IDX 1
+#define regRLC_GPR_REG2 0x4c7a
+#define regRLC_GPR_REG2_BASE_IDX 1
+#define regRLC_GPM_INT_DISABLE_TH0 0x4c7c
+#define regRLC_GPM_INT_DISABLE_TH0_BASE_IDX 1
+#define regRLC_GPM_LEGACY_INT_DISABLE 0x4c7d
+#define regRLC_GPM_LEGACY_INT_DISABLE_BASE_IDX 1
+#define regRLC_GPM_INT_FORCE_TH0 0x4c7e
+#define regRLC_GPM_INT_FORCE_TH0_BASE_IDX 1
+#define regRLC_SRM_CNTL 0x4c80
+#define regRLC_SRM_CNTL_BASE_IDX 1
+#define regRLC_SRM_GPM_COMMAND_STATUS 0x4c88
+#define regRLC_SRM_GPM_COMMAND_STATUS_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_0 0x4c8b
+#define regRLC_SRM_INDEX_CNTL_ADDR_0_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_1 0x4c8c
+#define regRLC_SRM_INDEX_CNTL_ADDR_1_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_2 0x4c8d
+#define regRLC_SRM_INDEX_CNTL_ADDR_2_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_3 0x4c8e
+#define regRLC_SRM_INDEX_CNTL_ADDR_3_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_4 0x4c8f
+#define regRLC_SRM_INDEX_CNTL_ADDR_4_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_5 0x4c90
+#define regRLC_SRM_INDEX_CNTL_ADDR_5_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_6 0x4c91
+#define regRLC_SRM_INDEX_CNTL_ADDR_6_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_7 0x4c92
+#define regRLC_SRM_INDEX_CNTL_ADDR_7_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_0 0x4c93
+#define regRLC_SRM_INDEX_CNTL_DATA_0_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_1 0x4c94
+#define regRLC_SRM_INDEX_CNTL_DATA_1_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_2 0x4c95
+#define regRLC_SRM_INDEX_CNTL_DATA_2_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_3 0x4c96
+#define regRLC_SRM_INDEX_CNTL_DATA_3_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_4 0x4c97
+#define regRLC_SRM_INDEX_CNTL_DATA_4_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_5 0x4c98
+#define regRLC_SRM_INDEX_CNTL_DATA_5_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_6 0x4c99
+#define regRLC_SRM_INDEX_CNTL_DATA_6_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_7 0x4c9a
+#define regRLC_SRM_INDEX_CNTL_DATA_7_BASE_IDX 1
+#define regRLC_SRM_STAT 0x4c9b
+#define regRLC_SRM_STAT_BASE_IDX 1
+#define regRLC_GPM_GENERAL_8 0x4cad
+#define regRLC_GPM_GENERAL_8_BASE_IDX 1
+#define regRLC_GPM_GENERAL_9 0x4cae
+#define regRLC_GPM_GENERAL_9_BASE_IDX 1
+#define regRLC_GPM_GENERAL_10 0x4caf
+#define regRLC_GPM_GENERAL_10_BASE_IDX 1
+#define regRLC_GPM_GENERAL_11 0x4cb0
+#define regRLC_GPM_GENERAL_11_BASE_IDX 1
+#define regRLC_GPM_GENERAL_12 0x4cb1
+#define regRLC_GPM_GENERAL_12_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_0 0x4cb2
+#define regRLC_GPM_UTCL1_CNTL_0_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_1 0x4cb3
+#define regRLC_GPM_UTCL1_CNTL_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_2 0x4cb4
+#define regRLC_GPM_UTCL1_CNTL_2_BASE_IDX 1
+#define regRLC_SPM_UTCL1_CNTL 0x4cb5
+#define regRLC_SPM_UTCL1_CNTL_BASE_IDX 1
+#define regRLC_UTCL1_STATUS_2 0x4cb6
+#define regRLC_UTCL1_STATUS_2_BASE_IDX 1
+#define regRLC_SPM_UTCL1_ERROR_1 0x4cbc
+#define regRLC_SPM_UTCL1_ERROR_1_BASE_IDX 1
+#define regRLC_SPM_UTCL1_ERROR_2 0x4cbd
+#define regRLC_SPM_UTCL1_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH0_ERROR_1 0x4cbe
+#define regRLC_GPM_UTCL1_TH0_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH0_ERROR_2 0x4cc0
+#define regRLC_GPM_UTCL1_TH0_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH1_ERROR_1 0x4cc1
+#define regRLC_GPM_UTCL1_TH1_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH1_ERROR_2 0x4cc2
+#define regRLC_GPM_UTCL1_TH1_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH2_ERROR_1 0x4cc3
+#define regRLC_GPM_UTCL1_TH2_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH2_ERROR_2 0x4cc4
+#define regRLC_GPM_UTCL1_TH2_ERROR_2_BASE_IDX 1
+#define regRLC_CGCG_CGLS_CTRL_3D 0x4cc5
+#define regRLC_CGCG_CGLS_CTRL_3D_BASE_IDX 1
+#define regRLC_CGCG_RAMP_CTRL_3D 0x4cc6
+#define regRLC_CGCG_RAMP_CTRL_3D_BASE_IDX 1
+#define regRLC_SEMAPHORE_0 0x4cc7
+#define regRLC_SEMAPHORE_0_BASE_IDX 1
+#define regRLC_SEMAPHORE_1 0x4cc8
+#define regRLC_SEMAPHORE_1_BASE_IDX 1
+#define regRLC_SEMAPHORE_2 0x4cc9
+#define regRLC_SEMAPHORE_2_BASE_IDX 1
+#define regRLC_SEMAPHORE_3 0x4cca
+#define regRLC_SEMAPHORE_3_BASE_IDX 1
+#define regRLC_PACE_INT_STAT 0x4ccc
+#define regRLC_PACE_INT_STAT_BASE_IDX 1
+#define regRLC_UTCL1_STATUS 0x4cd4
+#define regRLC_UTCL1_STATUS_BASE_IDX 1
+#define regRLC_R2I_CNTL_0 0x4cd5
+#define regRLC_R2I_CNTL_0_BASE_IDX 1
+#define regRLC_R2I_CNTL_1 0x4cd6
+#define regRLC_R2I_CNTL_1_BASE_IDX 1
+#define regRLC_R2I_CNTL_2 0x4cd7
+#define regRLC_R2I_CNTL_2_BASE_IDX 1
+#define regRLC_R2I_CNTL_3 0x4cd8
+#define regRLC_R2I_CNTL_3_BASE_IDX 1
+#define regRLC_GPM_INT_STAT_TH0 0x4cdc
+#define regRLC_GPM_INT_STAT_TH0_BASE_IDX 1
+#define regRLC_GPM_GENERAL_13 0x4cdd
+#define regRLC_GPM_GENERAL_13_BASE_IDX 1
+#define regRLC_GPM_GENERAL_14 0x4cde
+#define regRLC_GPM_GENERAL_14_BASE_IDX 1
+#define regRLC_GPM_GENERAL_15 0x4cdf
+#define regRLC_GPM_GENERAL_15_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_1 0x4cea
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_1_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB_2 0x4ceb
+#define regRLC_GPU_CLOCK_COUNT_LSB_2_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB_2 0x4cec
+#define regRLC_GPU_CLOCK_COUNT_MSB_2_BASE_IDX 1
+#define regRLC_PACE_INT_DISABLE 0x4ced
+#define regRLC_PACE_INT_DISABLE_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_2 0x4cef
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_2_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_RANGE 0x4cf0
+#define regRLC_RLCV_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_CNTL 0x4cf1
+#define regRLC_RLCV_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_STAT 0x4cf2
+#define regRLC_RLCV_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_0_DATA_LO 0x4cf3
+#define regRLC_RLCV_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_0_DATA_HI 0x4cf4
+#define regRLC_RLCV_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_1_DATA_LO 0x4cf5
+#define regRLC_RLCV_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_1_DATA_HI 0x4cf6
+#define regRLC_RLCV_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_2_DATA_LO 0x4cf7
+#define regRLC_RLCV_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_2_DATA_HI 0x4cf8
+#define regRLC_RLCV_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_3_DATA_LO 0x4cf9
+#define regRLC_RLCV_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_3_DATA_HI 0x4cfa
+#define regRLC_RLCV_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB_1 0x4cfb
+#define regRLC_GPU_CLOCK_COUNT_LSB_1_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB_1 0x4cfc
+#define regRLC_GPU_CLOCK_COUNT_MSB_1_BASE_IDX 1
+#define regRLC_RLCV_SPARE_INT 0x4d00
+#define regRLC_RLCV_SPARE_INT_BASE_IDX 1
+#define regRLC_FIREWALL_VIOLATION 0x4d02
+#define regRLC_FIREWALL_VIOLATION_BASE_IDX 1
+#define regRLC_PACE_TIMER_INT_0 0x4d04
+#define regRLC_PACE_TIMER_INT_0_BASE_IDX 1
+#define regRLC_PACE_TIMER_INT_1 0x4d05
+#define regRLC_PACE_TIMER_INT_1_BASE_IDX 1
+#define regRLC_PACE_TIMER_CTRL 0x4d06
+#define regRLC_PACE_TIMER_CTRL_BASE_IDX 1
+#define regRLC_SMU_CLK_REQ 0x4d08
+#define regRLC_SMU_CLK_REQ_BASE_IDX 1
+#define regRLC_CP_STAT_INVAL_STAT 0x4d09
+#define regRLC_CP_STAT_INVAL_STAT_BASE_IDX 1
+#define regRLC_CP_STAT_INVAL_CTRL 0x4d0a
+#define regRLC_CP_STAT_INVAL_CTRL_BASE_IDX 1
+#define regRLC_SPARE 0x4d0b
+#define regRLC_SPARE_BASE_IDX 1
+#define regRLC_SPP_CTRL 0x4d0c
+#define regRLC_SPP_CTRL_BASE_IDX 1
+#define regRLC_SPP_SHADER_PROFILE_EN 0x4d0d
+#define regRLC_SPP_SHADER_PROFILE_EN_BASE_IDX 1
+#define regRLC_SPP_SSF_CAPTURE_EN 0x4d0e
+#define regRLC_SPP_SSF_CAPTURE_EN_BASE_IDX 1
+#define regRLC_SPP_SSF_THRESHOLD_0 0x4d0f
+#define regRLC_SPP_SSF_THRESHOLD_0_BASE_IDX 1
+#define regRLC_SPP_SSF_THRESHOLD_1 0x4d10
+#define regRLC_SPP_SSF_THRESHOLD_1_BASE_IDX 1
+#define regRLC_SPP_SSF_THRESHOLD_2 0x4d11
+#define regRLC_SPP_SSF_THRESHOLD_2_BASE_IDX 1
+#define regRLC_SPP_INFLIGHT_RD_ADDR 0x4d12
+#define regRLC_SPP_INFLIGHT_RD_ADDR_BASE_IDX 1
+#define regRLC_SPP_INFLIGHT_RD_DATA 0x4d13
+#define regRLC_SPP_INFLIGHT_RD_DATA_BASE_IDX 1
+#define regRLC_SPP_PROF_INFO_1 0x4d18
+#define regRLC_SPP_PROF_INFO_1_BASE_IDX 1
+#define regRLC_SPP_PROF_INFO_2 0x4d19
+#define regRLC_SPP_PROF_INFO_2_BASE_IDX 1
+#define regRLC_SPP_GLOBAL_SH_ID 0x4d1a
+#define regRLC_SPP_GLOBAL_SH_ID_BASE_IDX 1
+#define regRLC_SPP_GLOBAL_SH_ID_VALID 0x4d1b
+#define regRLC_SPP_GLOBAL_SH_ID_VALID_BASE_IDX 1
+#define regRLC_SPP_STATUS 0x4d1c
+#define regRLC_SPP_STATUS_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_0 0x4d1d
+#define regRLC_SPP_PVT_STAT_0_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_1 0x4d1e
+#define regRLC_SPP_PVT_STAT_1_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_2 0x4d1f
+#define regRLC_SPP_PVT_STAT_2_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_3 0x4d20
+#define regRLC_SPP_PVT_STAT_3_BASE_IDX 1
+#define regRLC_SPP_PVT_LEVEL_MAX 0x4d21
+#define regRLC_SPP_PVT_LEVEL_MAX_BASE_IDX 1
+#define regRLC_SPP_STALL_STATE_UPDATE 0x4d22
+#define regRLC_SPP_STALL_STATE_UPDATE_BASE_IDX 1
+#define regRLC_SPP_PBB_INFO 0x4d23
+#define regRLC_SPP_PBB_INFO_BASE_IDX 1
+#define regRLC_SPP_RESET 0x4d24
+#define regRLC_SPP_RESET_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_RANGE 0x4d26
+#define regRLC_RLCP_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_CNTL 0x4d27
+#define regRLC_RLCP_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_STAT 0x4d28
+#define regRLC_RLCP_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_0_DATA_LO 0x4d29
+#define regRLC_RLCP_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_0_DATA_HI 0x4d2a
+#define regRLC_RLCP_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_1_DATA_LO 0x4d2b
+#define regRLC_RLCP_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_1_DATA_HI 0x4d2c
+#define regRLC_RLCP_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_2_DATA_LO 0x4d2d
+#define regRLC_RLCP_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_2_DATA_HI 0x4d2e
+#define regRLC_RLCP_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_3_DATA_LO 0x4d2f
+#define regRLC_RLCP_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_3_DATA_HI 0x4d30
+#define regRLC_RLCP_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_CAC_MASK_CNTL 0x4d45
+#define regRLC_CAC_MASK_CNTL_BASE_IDX 1
+#define regRLC_POWER_RESIDENCY_CNTR_CTRL 0x4d48
+#define regRLC_POWER_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_CLK_RESIDENCY_CNTR_CTRL 0x4d49
+#define regRLC_CLK_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_DS_RESIDENCY_CNTR_CTRL 0x4d4a
+#define regRLC_DS_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_ULV_RESIDENCY_CNTR_CTRL 0x4d4b
+#define regRLC_ULV_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_PCC_RESIDENCY_CNTR_CTRL 0x4d4c
+#define regRLC_PCC_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_GENERAL_RESIDENCY_CNTR_CTRL 0x4d4d
+#define regRLC_GENERAL_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_POWER_RESIDENCY_EVENT_CNTR 0x4d50
+#define regRLC_POWER_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_CLK_RESIDENCY_EVENT_CNTR 0x4d51
+#define regRLC_CLK_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_DS_RESIDENCY_EVENT_CNTR 0x4d52
+#define regRLC_DS_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_ULV_RESIDENCY_EVENT_CNTR 0x4d53
+#define regRLC_ULV_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_PCC_RESIDENCY_EVENT_CNTR 0x4d54
+#define regRLC_PCC_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_GENERAL_RESIDENCY_EVENT_CNTR 0x4d55
+#define regRLC_GENERAL_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_POWER_RESIDENCY_REF_CNTR 0x4d58
+#define regRLC_POWER_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_CLK_RESIDENCY_REF_CNTR 0x4d59
+#define regRLC_CLK_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_DS_RESIDENCY_REF_CNTR 0x4d5a
+#define regRLC_DS_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_ULV_RESIDENCY_REF_CNTR 0x4d5b
+#define regRLC_ULV_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_PCC_RESIDENCY_REF_CNTR 0x4d5c
+#define regRLC_PCC_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_GENERAL_RESIDENCY_REF_CNTR 0x4d5d
+#define regRLC_GENERAL_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_CTRL 0x4d5e
+#define regRLC_GFX_IH_CLIENT_CTRL_BASE_IDX 1
+#define regRLC_GFX_IH_ARBITER_STAT 0x4d5f
+#define regRLC_GFX_IH_ARBITER_STAT_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_SE_STAT_L 0x4d60
+#define regRLC_GFX_IH_CLIENT_SE_STAT_L_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_SE_STAT_H 0x4d61
+#define regRLC_GFX_IH_CLIENT_SE_STAT_H_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_SDMA_STAT 0x4d62
+#define regRLC_GFX_IH_CLIENT_SDMA_STAT_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_OTHER_STAT 0x4d63
+#define regRLC_GFX_IH_CLIENT_OTHER_STAT_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_DELAY_IND_ADDR 0x4d64
+#define regRLC_SPM_GLOBAL_DELAY_IND_ADDR_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_DELAY_IND_DATA 0x4d65
+#define regRLC_SPM_GLOBAL_DELAY_IND_DATA_BASE_IDX 1
+#define regRLC_SPM_SE_DELAY_IND_ADDR 0x4d66
+#define regRLC_SPM_SE_DELAY_IND_ADDR_BASE_IDX 1
+#define regRLC_SPM_SE_DELAY_IND_DATA 0x4d67
+#define regRLC_SPM_SE_DELAY_IND_DATA_BASE_IDX 1
+#define regRLC_LX6_CNTL 0x4d80
+#define regRLC_LX6_CNTL_BASE_IDX 1
+#define regRLC_XT_CORE_STATUS 0x4dd4
+#define regRLC_XT_CORE_STATUS_BASE_IDX 1
+#define regRLC_XT_CORE_INTERRUPT 0x4dd5
+#define regRLC_XT_CORE_INTERRUPT_BASE_IDX 1
+#define regRLC_XT_CORE_FAULT_INFO 0x4dd6
+#define regRLC_XT_CORE_FAULT_INFO_BASE_IDX 1
+#define regRLC_XT_CORE_ALT_RESET_VEC 0x4dd7
+#define regRLC_XT_CORE_ALT_RESET_VEC_BASE_IDX 1
+#define regRLC_XT_CORE_RESERVED 0x4dd8
+#define regRLC_XT_CORE_RESERVED_BASE_IDX 1
+#define regRLC_XT_INT_VEC_FORCE 0x4dd9
+#define regRLC_XT_INT_VEC_FORCE_BASE_IDX 1
+#define regRLC_XT_INT_VEC_CLEAR 0x4dda
+#define regRLC_XT_INT_VEC_CLEAR_BASE_IDX 1
+#define regRLC_XT_INT_VEC_MUX_SEL 0x4ddb
+#define regRLC_XT_INT_VEC_MUX_SEL_BASE_IDX 1
+#define regRLC_XT_INT_VEC_MUX_INT_SEL 0x4ddc
+#define regRLC_XT_INT_VEC_MUX_INT_SEL_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_SPM_LSB 0x4de4
+#define regRLC_GPU_CLOCK_COUNT_SPM_LSB_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_SPM_MSB 0x4de5
+#define regRLC_GPU_CLOCK_COUNT_SPM_MSB_BASE_IDX 1
+#define regRLC_SPM_THREAD_TRACE_CTRL 0x4de6
+#define regRLC_SPM_THREAD_TRACE_CTRL_BASE_IDX 1
+#define regRLC_SPP_CAM_ADDR 0x4de8
+#define regRLC_SPP_CAM_ADDR_BASE_IDX 1
+#define regRLC_SPP_CAM_DATA 0x4de9
+#define regRLC_SPP_CAM_DATA_BASE_IDX 1
+#define regRLC_SPP_CAM_EXT_ADDR 0x4dea
+#define regRLC_SPP_CAM_EXT_ADDR_BASE_IDX 1
+#define regRLC_SPP_CAM_EXT_DATA 0x4deb
+#define regRLC_SPP_CAM_EXT_DATA_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_CTRL 0x4df1
+#define regRLC_CPAXI_DOORBELL_MON_CTRL_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_STAT 0x4df2
+#define regRLC_CPAXI_DOORBELL_MON_STAT_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_DATA_LSB 0x4df3
+#define regRLC_CPAXI_DOORBELL_MON_DATA_LSB_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_DATA_MSB 0x4df4
+#define regRLC_CPAXI_DOORBELL_MON_DATA_MSB_BASE_IDX 1
+#define regRLC_XT_DOORBELL_RANGE 0x4df5
+#define regRLC_XT_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_XT_DOORBELL_CNTL 0x4df6
+#define regRLC_XT_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_XT_DOORBELL_STAT 0x4df7
+#define regRLC_XT_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_XT_DOORBELL_0_DATA_LO 0x4df8
+#define regRLC_XT_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_0_DATA_HI 0x4df9
+#define regRLC_XT_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_XT_DOORBELL_1_DATA_LO 0x4dfa
+#define regRLC_XT_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_1_DATA_HI 0x4dfb
+#define regRLC_XT_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_XT_DOORBELL_2_DATA_LO 0x4dfc
+#define regRLC_XT_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_2_DATA_HI 0x4dfd
+#define regRLC_XT_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_XT_DOORBELL_3_DATA_LO 0x4dfe
+#define regRLC_XT_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_3_DATA_HI 0x4dff
+#define regRLC_XT_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_MEM_SLP_CNTL 0x4e00
+#define regRLC_MEM_SLP_CNTL_BASE_IDX 1
+#define regSMU_RLC_RESPONSE 0x4e01
+#define regSMU_RLC_RESPONSE_BASE_IDX 1
+#define regRLC_RLCV_SAFE_MODE 0x4e02
+#define regRLC_RLCV_SAFE_MODE_BASE_IDX 1
+#define regRLC_SMU_SAFE_MODE 0x4e03
+#define regRLC_SMU_SAFE_MODE_BASE_IDX 1
+#define regRLC_RLCV_COMMAND 0x4e04
+#define regRLC_RLCV_COMMAND_BASE_IDX 1
+#define regRLC_SMU_MESSAGE 0x4e05
+#define regRLC_SMU_MESSAGE_BASE_IDX 1
+#define regRLC_SMU_MESSAGE_1 0x4e06
+#define regRLC_SMU_MESSAGE_1_BASE_IDX 1
+#define regRLC_SMU_MESSAGE_2 0x4e07
+#define regRLC_SMU_MESSAGE_2_BASE_IDX 1
+#define regRLC_SRM_GPM_COMMAND 0x4e08
+#define regRLC_SRM_GPM_COMMAND_BASE_IDX 1
+#define regRLC_SRM_GPM_ABORT 0x4e09
+#define regRLC_SRM_GPM_ABORT_BASE_IDX 1
+#define regRLC_SMU_COMMAND 0x4e0a
+#define regRLC_SMU_COMMAND_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_1 0x4e0b
+#define regRLC_SMU_ARGUMENT_1_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_2 0x4e0c
+#define regRLC_SMU_ARGUMENT_2_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_3 0x4e0d
+#define regRLC_SMU_ARGUMENT_3_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_4 0x4e0e
+#define regRLC_SMU_ARGUMENT_4_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_5 0x4e0f
+#define regRLC_SMU_ARGUMENT_5_BASE_IDX 1
+#define regRLC_IMU_BOOTLOAD_ADDR_HI 0x4e10
+#define regRLC_IMU_BOOTLOAD_ADDR_HI_BASE_IDX 1
+#define regRLC_IMU_BOOTLOAD_ADDR_LO 0x4e11
+#define regRLC_IMU_BOOTLOAD_ADDR_LO_BASE_IDX 1
+#define regRLC_IMU_BOOTLOAD_SIZE 0x4e12
+#define regRLC_IMU_BOOTLOAD_SIZE_BASE_IDX 1
+#define regRLC_IMU_MISC 0x4e16
+#define regRLC_IMU_MISC_BASE_IDX 1
+#define regRLC_IMU_RESET_VECTOR 0x4e17
+#define regRLC_IMU_RESET_VECTOR_BASE_IDX 1
+
+
+// addressBlock: gc_rlcsdec
+// base address: 0x3b980
+#define regRLC_RLCS_DEC_START 0x4e60
+#define regRLC_RLCS_DEC_START_BASE_IDX 1
+#define regRLC_RLCS_DEC_DUMP_ADDR 0x4e61
+#define regRLC_RLCS_DEC_DUMP_ADDR_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_1 0x4e62
+#define regRLC_RLCS_EXCEPTION_REG_1_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_2 0x4e63
+#define regRLC_RLCS_EXCEPTION_REG_2_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_3 0x4e64
+#define regRLC_RLCS_EXCEPTION_REG_3_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_4 0x4e65
+#define regRLC_RLCS_EXCEPTION_REG_4_BASE_IDX 1
+#define regRLC_RLCS_CGCG_REQUEST 0x4e66
+#define regRLC_RLCS_CGCG_REQUEST_BASE_IDX 1
+#define regRLC_RLCS_CGCG_STATUS 0x4e67
+#define regRLC_RLCS_CGCG_STATUS_BASE_IDX 1
+#define regRLC_RLCS_SOC_DS_CNTL 0x4e68
+#define regRLC_RLCS_SOC_DS_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GFX_DS_CNTL 0x4e69
+#define regRLC_RLCS_GFX_DS_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GFX_DS_ALLOW_MASK_CNTL 0x4e6a
+#define regRLC_RLCS_GFX_DS_ALLOW_MASK_CNTL_BASE_IDX 1
+#define regRLC_GPM_STAT 0x4e6b
+#define regRLC_GPM_STAT_BASE_IDX 1
+#define regRLC_RLCS_GPM_STAT 0x4e6b
+#define regRLC_RLCS_GPM_STAT_BASE_IDX 1
+#define regRLC_RLCS_ABORTED_PD_SEQUENCE 0x4e6c
+#define regRLC_RLCS_ABORTED_PD_SEQUENCE_BASE_IDX 1
+#define regRLC_RLCS_DIDT_FORCE_STALL 0x4e6d
+#define regRLC_RLCS_DIDT_FORCE_STALL_BASE_IDX 1
+#define regRLC_RLCS_IOV_CMD_STATUS 0x4e6e
+#define regRLC_RLCS_IOV_CMD_STATUS_BASE_IDX 1
+#define regRLC_RLCS_IOV_CNTX_LOC_SIZE 0x4e6f
+#define regRLC_RLCS_IOV_CNTX_LOC_SIZE_BASE_IDX 1
+#define regRLC_RLCS_IOV_SCH_BLOCK 0x4e70
+#define regRLC_RLCS_IOV_SCH_BLOCK_BASE_IDX 1
+#define regRLC_RLCS_IOV_VM_BUSY_STATUS 0x4e71
+#define regRLC_RLCS_IOV_VM_BUSY_STATUS_BASE_IDX 1
+#define regRLC_RLCS_GPM_STAT_2 0x4e72
+#define regRLC_RLCS_GPM_STAT_2_BASE_IDX 1
+#define regRLC_RLCS_GRBM_SOFT_RESET 0x4e73
+#define regRLC_RLCS_GRBM_SOFT_RESET_BASE_IDX 1
+#define regRLC_RLCS_PG_CHANGE_STATUS 0x4e74
+#define regRLC_RLCS_PG_CHANGE_STATUS_BASE_IDX 1
+#define regRLC_RLCS_PG_CHANGE_READ 0x4e75
+#define regRLC_RLCS_PG_CHANGE_READ_BASE_IDX 1
+#define regRLC_RLCS_IH_SEMAPHORE 0x4e76
+#define regRLC_RLCS_IH_SEMAPHORE_BASE_IDX 1
+#define regRLC_RLCS_IH_COOKIE_SEMAPHORE 0x4e77
+#define regRLC_RLCS_IH_COOKIE_SEMAPHORE_BASE_IDX 1
+#define regRLC_RLCS_WGP_STATUS 0x4e78
+#define regRLC_RLCS_WGP_STATUS_BASE_IDX 1
+#define regRLC_RLCS_WGP_READ 0x4e79
+#define regRLC_RLCS_WGP_READ_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_CTRL_1 0x4e7a
+#define regRLC_RLCS_CP_INT_CTRL_1_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_CTRL_2 0x4e7b
+#define regRLC_RLCS_CP_INT_CTRL_2_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_INFO_1 0x4e7c
+#define regRLC_RLCS_CP_INT_INFO_1_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_INFO_2 0x4e7d
+#define regRLC_RLCS_CP_INT_INFO_2_BASE_IDX 1
+#define regRLC_RLCS_SPM_INT_CTRL 0x4e7e
+#define regRLC_RLCS_SPM_INT_CTRL_BASE_IDX 1
+#define regRLC_RLCS_SPM_INT_INFO_1 0x4e7f
+#define regRLC_RLCS_SPM_INT_INFO_1_BASE_IDX 1
+#define regRLC_RLCS_SPM_INT_INFO_2 0x4e80
+#define regRLC_RLCS_SPM_INT_INFO_2_BASE_IDX 1
+#define regRLC_RLCS_DSM_TRIG 0x4e81
+#define regRLC_RLCS_DSM_TRIG_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_STATUS 0x4e82
+#define regRLC_RLCS_BOOTLOAD_STATUS_BASE_IDX 1
+#define regRLC_RLCS_POWER_BRAKE_CNTL 0x4e83
+#define regRLC_RLCS_POWER_BRAKE_CNTL_BASE_IDX 1
+#define regRLC_RLCS_POWER_BRAKE_CNTL_TH1 0x4e84
+#define regRLC_RLCS_POWER_BRAKE_CNTL_TH1_BASE_IDX 1
+#define regRLC_RLCS_GRBM_IDLE_BUSY_STAT 0x4e85
+#define regRLC_RLCS_GRBM_IDLE_BUSY_STAT_BASE_IDX 1
+#define regRLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL 0x4e86
+#define regRLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL_BASE_IDX 1
+#define regRLC_RLCS_CMP_IDLE_CNTL 0x4e87
+#define regRLC_RLCS_CMP_IDLE_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_0 0x4e88
+#define regRLC_RLCS_GENERAL_0_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_1 0x4e89
+#define regRLC_RLCS_GENERAL_1_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_2 0x4e8a
+#define regRLC_RLCS_GENERAL_2_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_3 0x4e8b
+#define regRLC_RLCS_GENERAL_3_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_4 0x4e8c
+#define regRLC_RLCS_GENERAL_4_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_5 0x4e8d
+#define regRLC_RLCS_GENERAL_5_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_6 0x4e8e
+#define regRLC_RLCS_GENERAL_6_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_7 0x4e8f
+#define regRLC_RLCS_GENERAL_7_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_8 0x4e90
+#define regRLC_RLCS_GENERAL_8_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_9 0x4e91
+#define regRLC_RLCS_GENERAL_9_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_10 0x4e92
+#define regRLC_RLCS_GENERAL_10_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_11 0x4e93
+#define regRLC_RLCS_GENERAL_11_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_12 0x4e94
+#define regRLC_RLCS_GENERAL_12_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_13 0x4e95
+#define regRLC_RLCS_GENERAL_13_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_14 0x4e96
+#define regRLC_RLCS_GENERAL_14_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_15 0x4e97
+#define regRLC_RLCS_GENERAL_15_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_16 0x4e98
+#define regRLC_RLCS_GENERAL_16_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_1 0x4ec5
+#define regRLC_RLCS_AUXILIARY_REG_1_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_2 0x4ec6
+#define regRLC_RLCS_AUXILIARY_REG_2_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_3 0x4ec7
+#define regRLC_RLCS_AUXILIARY_REG_3_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_4 0x4ec8
+#define regRLC_RLCS_AUXILIARY_REG_4_BASE_IDX 1
+#define regRLC_RLCS_SPM_SQTT_MODE 0x4ec9
+#define regRLC_RLCS_SPM_SQTT_MODE_BASE_IDX 1
+#define regRLC_RLCS_CP_DMA_SRCID_OVER 0x4eca
+#define regRLC_RLCS_CP_DMA_SRCID_OVER_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS1 0x4ecb
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS1_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS2 0x4ecc
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS2_BASE_IDX 1
+#define regRLC_RLCS_IMU_VIDCHG_CNTL 0x4ecd
+#define regRLC_RLCS_IMU_VIDCHG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_EDC_INT_CNTL 0x4ece
+#define regRLC_RLCS_EDC_INT_CNTL_BASE_IDX 1
+#define regRLC_RLCS_KMD_LOG_CNTL1 0x4ecf
+#define regRLC_RLCS_KMD_LOG_CNTL1_BASE_IDX 1
+#define regRLC_RLCS_KMD_LOG_CNTL2 0x4ed0
+#define regRLC_RLCS_KMD_LOG_CNTL2_BASE_IDX 1
+#define regRLC_RLCS_GPM_LEGACY_INT_STAT 0x4ed1
+#define regRLC_RLCS_GPM_LEGACY_INT_STAT_BASE_IDX 1
+#define regRLC_RLCS_GPM_LEGACY_INT_DISABLE 0x4ed2
+#define regRLC_RLCS_GPM_LEGACY_INT_DISABLE_BASE_IDX 1
+#define regRLC_RLCS_SRM_SRCID_CNTL 0x4ed3
+#define regRLC_RLCS_SRM_SRCID_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_0 0x4ed4
+#define regRLC_RLCS_GCR_DATA_0_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_1 0x4ed5
+#define regRLC_RLCS_GCR_DATA_1_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_2 0x4ed6
+#define regRLC_RLCS_GCR_DATA_2_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_3 0x4ed7
+#define regRLC_RLCS_GCR_DATA_3_BASE_IDX 1
+#define regRLC_RLCS_GCR_STATUS 0x4ed8
+#define regRLC_RLCS_GCR_STATUS_BASE_IDX 1
+#define regRLC_RLCS_PERFMON_CLK_CNTL_UCODE 0x4ed9
+#define regRLC_RLCS_PERFMON_CLK_CNTL_UCODE_BASE_IDX 1
+#define regRLC_RLCS_UTCL2_CNTL 0x4eda
+#define regRLC_RLCS_UTCL2_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA0 0x4edb
+#define regRLC_RLCS_IMU_RLC_MSG_DATA0_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA1 0x4edc
+#define regRLC_RLCS_IMU_RLC_MSG_DATA1_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA2 0x4edd
+#define regRLC_RLCS_IMU_RLC_MSG_DATA2_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA3 0x4ede
+#define regRLC_RLCS_IMU_RLC_MSG_DATA3_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA4 0x4edf
+#define regRLC_RLCS_IMU_RLC_MSG_DATA4_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_CONTROL 0x4ee0
+#define regRLC_RLCS_IMU_RLC_MSG_CONTROL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_CNTL 0x4ee1
+#define regRLC_RLCS_IMU_RLC_MSG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_MSG_DATA0 0x4ee2
+#define regRLC_RLCS_RLC_IMU_MSG_DATA0_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_MSG_CONTROL 0x4ee3
+#define regRLC_RLCS_RLC_IMU_MSG_CONTROL_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_MSG_CNTL 0x4ee4
+#define regRLC_RLCS_RLC_IMU_MSG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_0 0x4ee5
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_0_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_1 0x4ee6
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_1_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MUTEX_CNTL 0x4ee7
+#define regRLC_RLCS_IMU_RLC_MUTEX_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_STATUS 0x4ee8
+#define regRLC_RLCS_IMU_RLC_STATUS_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_STATUS 0x4ee9
+#define regRLC_RLCS_RLC_IMU_STATUS_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_DATA_1 0x4eea
+#define regRLC_RLCS_IMU_RAM_DATA_1_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_1_LSB 0x4eeb
+#define regRLC_RLCS_IMU_RAM_ADDR_1_LSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_1_MSB 0x4eec
+#define regRLC_RLCS_IMU_RAM_ADDR_1_MSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_DATA_0 0x4eed
+#define regRLC_RLCS_IMU_RAM_DATA_0_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_0_LSB 0x4eee
+#define regRLC_RLCS_IMU_RAM_ADDR_0_LSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_0_MSB 0x4eef
+#define regRLC_RLCS_IMU_RAM_ADDR_0_MSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_CNTL 0x4ef0
+#define regRLC_RLCS_IMU_RAM_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_GFX_DOORBELL_FENCE 0x4ef1
+#define regRLC_RLCS_IMU_GFX_DOORBELL_FENCE_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_CNTL_1 0x4ef3
+#define regRLC_RLCS_SDMA_INT_CNTL_1_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_CNTL_2 0x4ef4
+#define regRLC_RLCS_SDMA_INT_CNTL_2_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_STAT 0x4ef5
+#define regRLC_RLCS_SDMA_INT_STAT_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_INFO 0x4ef6
+#define regRLC_RLCS_SDMA_INT_INFO_BASE_IDX 1
+#define regRLC_RLCS_PMM_CGCG_CNTL 0x4ef7
+#define regRLC_RLCS_PMM_CGCG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GFX_MEM_POWER_CTRL_LO 0x4ef8
+#define regRLC_RLCS_GFX_MEM_POWER_CTRL_LO_BASE_IDX 1
+#define regRLC_RLCS_GFX_RM_CNTL 0x4efa
+#define regRLC_RLCS_GFX_RM_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IH_CTRL_1 0x4efb
+#define regRLC_RLCS_IH_CTRL_1_BASE_IDX 1
+#define regRLC_RLCS_IH_CTRL_2 0x4efc
+#define regRLC_RLCS_IH_CTRL_2_BASE_IDX 1
+#define regRLC_RLCS_IH_CTRL_3 0x4efd
+#define regRLC_RLCS_IH_CTRL_3_BASE_IDX 1
+#define regRLC_RLCS_IH_STATUS 0x4efe
+#define regRLC_RLCS_IH_STATUS_BASE_IDX 1
+#define regRLC_RLCS_DEC_END 0x4fff
+#define regRLC_RLCS_DEC_END_BASE_IDX 1
+
+
+// addressBlock: gc_pfvfdec_rlc
+// base address: 0x2a600
+#define regRLC_SAFE_MODE 0x0980
+#define regRLC_SAFE_MODE_BASE_IDX 1
+#define regRLC_SPM_SAMPLE_CNT 0x0981
+#define regRLC_SPM_SAMPLE_CNT_BASE_IDX 1
+#define regRLC_SPM_MC_CNTL 0x0982
+#define regRLC_SPM_MC_CNTL_BASE_IDX 1
+#define regRLC_SPM_INT_CNTL 0x0983
+#define regRLC_SPM_INT_CNTL_BASE_IDX 1
+#define regRLC_SPM_INT_STATUS 0x0984
+#define regRLC_SPM_INT_STATUS_BASE_IDX 1
+#define regRLC_SPM_INT_INFO_1 0x0985
+#define regRLC_SPM_INT_INFO_1_BASE_IDX 1
+#define regRLC_SPM_INT_INFO_2 0x0986
+#define regRLC_SPM_INT_INFO_2_BASE_IDX 1
+#define regRLC_CSIB_ADDR_LO 0x0987
+#define regRLC_CSIB_ADDR_LO_BASE_IDX 1
+#define regRLC_CSIB_ADDR_HI 0x0988
+#define regRLC_CSIB_ADDR_HI_BASE_IDX 1
+#define regRLC_CSIB_LENGTH 0x0989
+#define regRLC_CSIB_LENGTH_BASE_IDX 1
+#define regRLC_CP_SCHEDULERS 0x098a
+#define regRLC_CP_SCHEDULERS_BASE_IDX 1
+#define regRLC_CP_EOF_INT 0x098b
+#define regRLC_CP_EOF_INT_BASE_IDX 1
+#define regRLC_CP_EOF_INT_CNT 0x098c
+#define regRLC_CP_EOF_INT_CNT_BASE_IDX 1
+#define regRLC_SPARE_INT_0 0x098d
+#define regRLC_SPARE_INT_0_BASE_IDX 1
+#define regRLC_SPARE_INT_1 0x098e
+#define regRLC_SPARE_INT_1_BASE_IDX 1
+#define regRLC_SPARE_INT_2 0x098f
+#define regRLC_SPARE_INT_2_BASE_IDX 1
+#define regRLC_PACE_SPARE_INT 0x0990
+#define regRLC_PACE_SPARE_INT_BASE_IDX 1
+#define regRLC_PACE_SPARE_INT_1 0x0991
+#define regRLC_PACE_SPARE_INT_1_BASE_IDX 1
+#define regRLC_RLCV_SPARE_INT_1 0x0992
+#define regRLC_RLCV_SPARE_INT_1_BASE_IDX 1
+
+
+// addressBlock: gc_pwrdec
+// base address: 0x3c000
+#define regCGTS_TCC_DISABLE 0x5006
+#define regCGTS_TCC_DISABLE_BASE_IDX 1
+#define regCGTX_SPI_DEBUG_CLK_CTRL 0x507f
+#define regCGTX_SPI_DEBUG_CLK_CTRL_BASE_IDX 1
+#define regCGTT_VGT_CLK_CTRL 0x5084
+#define regCGTT_VGT_CLK_CTRL_BASE_IDX 1
+#define regCGTT_IA_CLK_CTRL 0x5085
+#define regCGTT_IA_CLK_CTRL_BASE_IDX 1
+#define regCGTT_WD_CLK_CTRL 0x5086
+#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define regCGTT_GS_NGG_CLK_CTRL 0x5087
+#define regCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
+#define regCGTT_PA_CLK_CTRL 0x5088
+#define regCGTT_PA_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL0 0x5089
+#define regCGTT_SC_CLK_CTRL0_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL1 0x508a
+#define regCGTT_SC_CLK_CTRL1_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL2 0x508b
+#define regCGTT_SC_CLK_CTRL2_BASE_IDX 1
+#define regCGTT_SQG_CLK_CTRL 0x508d
+#define regCGTT_SQG_CLK_CTRL_BASE_IDX 1
+#define regSQ_ALU_CLK_CTRL 0x508e
+#define regSQ_ALU_CLK_CTRL_BASE_IDX 1
+#define regSQ_TEX_CLK_CTRL 0x508f
+#define regSQ_TEX_CLK_CTRL_BASE_IDX 1
+#define regSQ_LDS_CLK_CTRL 0x5090
+#define regSQ_LDS_CLK_CTRL_BASE_IDX 1
+#define regICG_SP_CLK_CTRL 0x5093
+#define regICG_SP_CLK_CTRL_BASE_IDX 1
+#define regTA_CGTT_CTRL 0x509d
+#define regTA_CGTT_CTRL_BASE_IDX 1
+#define regDB_CGTT_CLK_CTRL_0 0x50a4
+#define regDB_CGTT_CLK_CTRL_0_BASE_IDX 1
+#define regCB_CGTT_SCLK_CTRL 0x50a8
+#define regCB_CGTT_SCLK_CTRL_BASE_IDX 1
+#define regGFX_ICG_GL2A_CTRL 0x50ac
+#define regGFX_ICG_GL2A_CTRL_BASE_IDX 1
+#define regCGTT_CP_CLK_CTRL 0x50b0
+#define regCGTT_CP_CLK_CTRL_BASE_IDX 1
+#define regCGTT_CPF_CLK_CTRL 0x50b1
+#define regCGTT_CPF_CLK_CTRL_BASE_IDX 1
+#define regCGTT_CPC_CLK_CTRL 0x50b2
+#define regCGTT_CPC_CLK_CTRL_BASE_IDX 1
+#define regCGTT_RLC_CLK_CTRL 0x50b5
+#define regCGTT_RLC_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL3 0x50bc
+#define regCGTT_SC_CLK_CTRL3_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL4 0x50bd
+#define regCGTT_SC_CLK_CTRL4_BASE_IDX 1
+#define regGCEA_ICG_CTRL 0x50c4
+#define regGCEA_ICG_CTRL_BASE_IDX 1
+#define regGL1I_GL1R_MGCG_OVERRIDE 0x50e4
+#define regGL1I_GL1R_MGCG_OVERRIDE_BASE_IDX 1
+#define regGL1H_ICG_CTRL 0x50e8
+#define regGL1H_ICG_CTRL_BASE_IDX 1
+#define regCHI_CHR_MGCG_OVERRIDE 0x50e9
+#define regCHI_CHR_MGCG_OVERRIDE_BASE_IDX 1
+#define regICG_GL1C_CLK_CTRL 0x50ec
+#define regICG_GL1C_CLK_CTRL_BASE_IDX 1
+#define regICG_GL1A_CTRL 0x50f0
+#define regICG_GL1A_CTRL_BASE_IDX 1
+#define regICG_CHA_CTRL 0x50f1
+#define regICG_CHA_CTRL_BASE_IDX 1
+#define regGUS_ICG_CTRL 0x50f4
+#define regGUS_ICG_CTRL_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL0 0x50f8
+#define regCGTT_PH_CLK_CTRL0_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL1 0x50f9
+#define regCGTT_PH_CLK_CTRL1_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL2 0x50fa
+#define regCGTT_PH_CLK_CTRL2_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL3 0x50fb
+#define regCGTT_PH_CLK_CTRL3_BASE_IDX 1
+#define regGFX_ICG_GL2C_CTRL 0x50fc
+#define regGFX_ICG_GL2C_CTRL_BASE_IDX 1
+#define regGFX_ICG_GL2C_CTRL1 0x50fd
+#define regGFX_ICG_GL2C_CTRL1_BASE_IDX 1
+#define regICG_LDS_CLK_CTRL 0x5114
+#define regICG_LDS_CLK_CTRL_BASE_IDX 1
+#define regGFX_ICG_UTCL1_CTRL 0x511c
+#define regGFX_ICG_UTCL1_CTRL_BASE_IDX 1
+#define regICG_CHC_CLK_CTRL 0x5140
+#define regICG_CHC_CLK_CTRL_BASE_IDX 1
+#define regICG_CHCG_CLK_CTRL 0x5144
+#define regICG_CHCG_CLK_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_pspdec
+// base address: 0x3f000
+#define regCP_MES_DM_INDEX_ADDR 0x5c00
+#define regCP_MES_DM_INDEX_ADDR_BASE_IDX 1
+#define regCP_MES_DM_INDEX_DATA 0x5c01
+#define regCP_MES_DM_INDEX_DATA_BASE_IDX 1
+#define regCP_MEC_DM_INDEX_ADDR 0x5c02
+#define regCP_MEC_DM_INDEX_ADDR_BASE_IDX 1
+#define regCP_MEC_DM_INDEX_DATA 0x5c03
+#define regCP_MEC_DM_INDEX_DATA_BASE_IDX 1
+#define regCP_GFX_RS64_DM_INDEX_ADDR 0x5c04
+#define regCP_GFX_RS64_DM_INDEX_ADDR_BASE_IDX 1
+#define regCP_GFX_RS64_DM_INDEX_DATA 0x5c05
+#define regCP_GFX_RS64_DM_INDEX_DATA_BASE_IDX 1
+#define regCPG_PSP_DEBUG 0x5c10
+#define regCPG_PSP_DEBUG_BASE_IDX 1
+#define regCPC_PSP_DEBUG 0x5c11
+#define regCPC_PSP_DEBUG_BASE_IDX 1
+#define regGRBM_IOV_ERROR_FIFO 0x5e07
+#define regGRBM_IOV_ERROR_FIFO_BASE_IDX 1
+#define regGRBM_SEC_CNTL 0x5e0d
+#define regGRBM_SEC_CNTL_BASE_IDX 1
+#define regGRBM_CAM_INDEX 0x5e10
+#define regGRBM_CAM_INDEX_BASE_IDX 1
+#define regGRBM_HYP_CAM_INDEX 0x5e10
+#define regGRBM_HYP_CAM_INDEX_BASE_IDX 1
+#define regGRBM_CAM_DATA 0x5e11
+#define regGRBM_CAM_DATA_BASE_IDX 1
+#define regGRBM_HYP_CAM_DATA 0x5e11
+#define regGRBM_HYP_CAM_DATA_BASE_IDX 1
+#define regGRBM_CAM_DATA_UPPER 0x5e12
+#define regGRBM_CAM_DATA_UPPER_BASE_IDX 1
+#define regGRBM_HYP_CAM_DATA_UPPER 0x5e12
+#define regGRBM_HYP_CAM_DATA_UPPER_BASE_IDX 1
+#define regRLC_FWL_FIRST_VIOL_ADDR 0x5f26
+#define regRLC_FWL_FIRST_VIOL_ADDR_BASE_IDX 1
+
+
+// addressBlock: gc_gfx_imu_gfx_imudec
+// base address: 0x38000
+#define regGFX_IMU_C2PMSG_0 0x4000
+#define regGFX_IMU_C2PMSG_0_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_1 0x4001
+#define regGFX_IMU_C2PMSG_1_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_2 0x4002
+#define regGFX_IMU_C2PMSG_2_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_3 0x4003
+#define regGFX_IMU_C2PMSG_3_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_4 0x4004
+#define regGFX_IMU_C2PMSG_4_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_5 0x4005
+#define regGFX_IMU_C2PMSG_5_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_6 0x4006
+#define regGFX_IMU_C2PMSG_6_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_7 0x4007
+#define regGFX_IMU_C2PMSG_7_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_8 0x4008
+#define regGFX_IMU_C2PMSG_8_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_9 0x4009
+#define regGFX_IMU_C2PMSG_9_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_10 0x400a
+#define regGFX_IMU_C2PMSG_10_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_11 0x400b
+#define regGFX_IMU_C2PMSG_11_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_12 0x400c
+#define regGFX_IMU_C2PMSG_12_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_13 0x400d
+#define regGFX_IMU_C2PMSG_13_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_14 0x400e
+#define regGFX_IMU_C2PMSG_14_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_15 0x400f
+#define regGFX_IMU_C2PMSG_15_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_16 0x4010
+#define regGFX_IMU_C2PMSG_16_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_17 0x4011
+#define regGFX_IMU_C2PMSG_17_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_18 0x4012
+#define regGFX_IMU_C2PMSG_18_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_19 0x4013
+#define regGFX_IMU_C2PMSG_19_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_20 0x4014
+#define regGFX_IMU_C2PMSG_20_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_21 0x4015
+#define regGFX_IMU_C2PMSG_21_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_22 0x4016
+#define regGFX_IMU_C2PMSG_22_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_23 0x4017
+#define regGFX_IMU_C2PMSG_23_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_24 0x4018
+#define regGFX_IMU_C2PMSG_24_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_25 0x4019
+#define regGFX_IMU_C2PMSG_25_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_26 0x401a
+#define regGFX_IMU_C2PMSG_26_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_27 0x401b
+#define regGFX_IMU_C2PMSG_27_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_28 0x401c
+#define regGFX_IMU_C2PMSG_28_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_29 0x401d
+#define regGFX_IMU_C2PMSG_29_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_30 0x401e
+#define regGFX_IMU_C2PMSG_30_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_31 0x401f
+#define regGFX_IMU_C2PMSG_31_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_32 0x4020
+#define regGFX_IMU_C2PMSG_32_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_33 0x4021
+#define regGFX_IMU_C2PMSG_33_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_34 0x4022
+#define regGFX_IMU_C2PMSG_34_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_35 0x4023
+#define regGFX_IMU_C2PMSG_35_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_36 0x4024
+#define regGFX_IMU_C2PMSG_36_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_37 0x4025
+#define regGFX_IMU_C2PMSG_37_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_38 0x4026
+#define regGFX_IMU_C2PMSG_38_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_39 0x4027
+#define regGFX_IMU_C2PMSG_39_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_40 0x4028
+#define regGFX_IMU_C2PMSG_40_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_41 0x4029
+#define regGFX_IMU_C2PMSG_41_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_42 0x402a
+#define regGFX_IMU_C2PMSG_42_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_43 0x402b
+#define regGFX_IMU_C2PMSG_43_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_44 0x402c
+#define regGFX_IMU_C2PMSG_44_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_45 0x402d
+#define regGFX_IMU_C2PMSG_45_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_46 0x402e
+#define regGFX_IMU_C2PMSG_46_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_47 0x402f
+#define regGFX_IMU_C2PMSG_47_BASE_IDX 1
+#define regGFX_IMU_MSG_FLAGS 0x403f
+#define regGFX_IMU_MSG_FLAGS_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL0 0x4040
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL0_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL1 0x4041
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL1_BASE_IDX 1
+#define regGFX_IMU_PWRMGT_IRQ_CTRL 0x4042
+#define regGFX_IMU_PWRMGT_IRQ_CTRL_BASE_IDX 1
+#define regGFX_IMU_MP1_MUTEX 0x4043
+#define regGFX_IMU_MP1_MUTEX_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_4 0x4046
+#define regGFX_IMU_RLC_DATA_4_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_3 0x4047
+#define regGFX_IMU_RLC_DATA_3_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_2 0x4048
+#define regGFX_IMU_RLC_DATA_2_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_1 0x4049
+#define regGFX_IMU_RLC_DATA_1_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_0 0x404a
+#define regGFX_IMU_RLC_DATA_0_BASE_IDX 1
+#define regGFX_IMU_RLC_CMD 0x404b
+#define regGFX_IMU_RLC_CMD_BASE_IDX 1
+#define regGFX_IMU_RLC_MUTEX 0x404c
+#define regGFX_IMU_RLC_MUTEX_BASE_IDX 1
+#define regGFX_IMU_RLC_MSG_STATUS 0x404f
+#define regGFX_IMU_RLC_MSG_STATUS_BASE_IDX 1
+#define regRLC_GFX_IMU_DATA_0 0x4052
+#define regRLC_GFX_IMU_DATA_0_BASE_IDX 1
+#define regRLC_GFX_IMU_CMD 0x4053
+#define regRLC_GFX_IMU_CMD_BASE_IDX 1
+#define regGFX_IMU_RLC_STATUS 0x4054
+#define regGFX_IMU_RLC_STATUS_BASE_IDX 1
+#define regGFX_IMU_STATUS 0x4055
+#define regGFX_IMU_STATUS_BASE_IDX 1
+#define regGFX_IMU_SOC_DATA 0x4059
+#define regGFX_IMU_SOC_DATA_BASE_IDX 1
+#define regGFX_IMU_SOC_ADDR 0x405a
+#define regGFX_IMU_SOC_ADDR_BASE_IDX 1
+#define regGFX_IMU_SOC_REQ 0x405b
+#define regGFX_IMU_SOC_REQ_BASE_IDX 1
+#define regGFX_IMU_VF_CTRL 0x405c
+#define regGFX_IMU_VF_CTRL_BASE_IDX 1
+#define regGFX_IMU_TELEMETRY 0x4060
+#define regGFX_IMU_TELEMETRY_BASE_IDX 1
+#define regGFX_IMU_TELEMETRY_DATA 0x4061
+#define regGFX_IMU_TELEMETRY_DATA_BASE_IDX 1
+#define regGFX_IMU_TELEMETRY_TEMPERATURE 0x4062
+#define regGFX_IMU_TELEMETRY_TEMPERATURE_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_0 0x4068
+#define regGFX_IMU_SCRATCH_0_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_1 0x4069
+#define regGFX_IMU_SCRATCH_1_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_2 0x406a
+#define regGFX_IMU_SCRATCH_2_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_3 0x406b
+#define regGFX_IMU_SCRATCH_3_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_4 0x406c
+#define regGFX_IMU_SCRATCH_4_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_5 0x406d
+#define regGFX_IMU_SCRATCH_5_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_6 0x406e
+#define regGFX_IMU_SCRATCH_6_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_7 0x406f
+#define regGFX_IMU_SCRATCH_7_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_8 0x4070
+#define regGFX_IMU_SCRATCH_8_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_9 0x4071
+#define regGFX_IMU_SCRATCH_9_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_10 0x4072
+#define regGFX_IMU_SCRATCH_10_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_11 0x4073
+#define regGFX_IMU_SCRATCH_11_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_12 0x4074
+#define regGFX_IMU_SCRATCH_12_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_13 0x4075
+#define regGFX_IMU_SCRATCH_13_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_14 0x4076
+#define regGFX_IMU_SCRATCH_14_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_15 0x4077
+#define regGFX_IMU_SCRATCH_15_BASE_IDX 1
+#define regGFX_IMU_FW_GTS_LO 0x4078
+#define regGFX_IMU_FW_GTS_LO_BASE_IDX 1
+#define regGFX_IMU_FW_GTS_HI 0x4079
+#define regGFX_IMU_FW_GTS_HI_BASE_IDX 1
+#define regGFX_IMU_GTS_OFFSET_LO 0x407a
+#define regGFX_IMU_GTS_OFFSET_LO_BASE_IDX 1
+#define regGFX_IMU_GTS_OFFSET_HI 0x407b
+#define regGFX_IMU_GTS_OFFSET_HI_BASE_IDX 1
+#define regGFX_IMU_RLC_GTS_OFFSET_LO 0x407c
+#define regGFX_IMU_RLC_GTS_OFFSET_LO_BASE_IDX 1
+#define regGFX_IMU_RLC_GTS_OFFSET_HI 0x407d
+#define regGFX_IMU_RLC_GTS_OFFSET_HI_BASE_IDX 1
+#define regGFX_IMU_CORE_INT_STATUS 0x407f
+#define regGFX_IMU_CORE_INT_STATUS_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_MASK 0x4080
+#define regGFX_IMU_PIC_INT_MASK_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_LVL 0x4081
+#define regGFX_IMU_PIC_INT_LVL_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_EDGE 0x4082
+#define regGFX_IMU_PIC_INT_EDGE_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_0 0x4083
+#define regGFX_IMU_PIC_INT_PRI_0_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_1 0x4084
+#define regGFX_IMU_PIC_INT_PRI_1_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_2 0x4085
+#define regGFX_IMU_PIC_INT_PRI_2_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_3 0x4086
+#define regGFX_IMU_PIC_INT_PRI_3_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_4 0x4087
+#define regGFX_IMU_PIC_INT_PRI_4_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_5 0x4088
+#define regGFX_IMU_PIC_INT_PRI_5_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_6 0x4089
+#define regGFX_IMU_PIC_INT_PRI_6_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_7 0x408a
+#define regGFX_IMU_PIC_INT_PRI_7_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_STATUS 0x408b
+#define regGFX_IMU_PIC_INT_STATUS_BASE_IDX 1
+#define regGFX_IMU_PIC_INTR 0x408c
+#define regGFX_IMU_PIC_INTR_BASE_IDX 1
+#define regGFX_IMU_PIC_INTR_ID 0x408d
+#define regGFX_IMU_PIC_INTR_ID_BASE_IDX 1
+#define regGFX_IMU_IH_CTRL_1 0x4090
+#define regGFX_IMU_IH_CTRL_1_BASE_IDX 1
+#define regGFX_IMU_IH_CTRL_2 0x4091
+#define regGFX_IMU_IH_CTRL_2_BASE_IDX 1
+#define regGFX_IMU_IH_CTRL_3 0x4092
+#define regGFX_IMU_IH_CTRL_3_BASE_IDX 1
+#define regGFX_IMU_IH_STATUS 0x4093
+#define regGFX_IMU_IH_STATUS_BASE_IDX 1
+#define regGFX_IMU_FUSESTRAP 0x4094
+#define regGFX_IMU_FUSESTRAP_BASE_IDX 1
+#define regGFX_IMU_SMUIO_VIDCHG_CTRL 0x4098
+#define regGFX_IMU_SMUIO_VIDCHG_CTRL_BASE_IDX 1
+#define regGFX_IMU_GFXCLK_BYPASS_CTRL 0x409c
+#define regGFX_IMU_GFXCLK_BYPASS_CTRL_BASE_IDX 1
+#define regGFX_IMU_CLK_CTRL 0x409d
+#define regGFX_IMU_CLK_CTRL_BASE_IDX 1
+#define regGFX_IMU_DOORBELL_CONTROL 0x409e
+#define regGFX_IMU_DOORBELL_CONTROL_BASE_IDX 1
+#define regGFX_IMU_RLC_CG_CTRL 0x40a0
+#define regGFX_IMU_RLC_CG_CTRL_BASE_IDX 1
+#define regGFX_IMU_RLC_THROTTLE_GFX 0x40a1
+#define regGFX_IMU_RLC_THROTTLE_GFX_BASE_IDX 1
+#define regGFX_IMU_RLC_RESET_VECTOR 0x40a2
+#define regGFX_IMU_RLC_RESET_VECTOR_BASE_IDX 1
+#define regGFX_IMU_RLC_OVERRIDE 0x40a3
+#define regGFX_IMU_RLC_OVERRIDE_BASE_IDX 1
+#define regGFX_IMU_DPM_CONTROL 0x40a8
+#define regGFX_IMU_DPM_CONTROL_BASE_IDX 1
+#define regGFX_IMU_DPM_ACC 0x40a9
+#define regGFX_IMU_DPM_ACC_BASE_IDX 1
+#define regGFX_IMU_DPM_REF_COUNTER 0x40aa
+#define regGFX_IMU_DPM_REF_COUNTER_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_INDEX 0x40ac
+#define regGFX_IMU_RLC_RAM_INDEX_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_ADDR_HIGH 0x40ad
+#define regGFX_IMU_RLC_RAM_ADDR_HIGH_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_ADDR_LOW 0x40ae
+#define regGFX_IMU_RLC_RAM_ADDR_LOW_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_DATA 0x40af
+#define regGFX_IMU_RLC_RAM_DATA_BASE_IDX 1
+#define regGFX_IMU_FENCE_CTRL 0x40b0
+#define regGFX_IMU_FENCE_CTRL_BASE_IDX 1
+#define regGFX_IMU_FENCE_LOG_INIT 0x40b1
+#define regGFX_IMU_FENCE_LOG_INIT_BASE_IDX 1
+#define regGFX_IMU_FENCE_LOG_ADDR 0x40b2
+#define regGFX_IMU_FENCE_LOG_ADDR_BASE_IDX 1
+#define regGFX_IMU_PROGRAM_CTR 0x40b5
+#define regGFX_IMU_PROGRAM_CTR_BASE_IDX 1
+#define regGFX_IMU_CORE_CTRL 0x40b6
+#define regGFX_IMU_CORE_CTRL_BASE_IDX 1
+#define regGFX_IMU_CORE_STATUS 0x40b7
+#define regGFX_IMU_CORE_STATUS_BASE_IDX 1
+#define regGFX_IMU_PWROKRAW 0x40b8
+#define regGFX_IMU_PWROKRAW_BASE_IDX 1
+#define regGFX_IMU_PWROK 0x40b9
+#define regGFX_IMU_PWROK_BASE_IDX 1
+#define regGFX_IMU_GAP_PWROK 0x40ba
+#define regGFX_IMU_GAP_PWROK_BASE_IDX 1
+#define regGFX_IMU_RESETn 0x40bb
+#define regGFX_IMU_RESETn_BASE_IDX 1
+#define regGFX_IMU_GFX_RESET_CTRL 0x40bc
+#define regGFX_IMU_GFX_RESET_CTRL_BASE_IDX 1
+#define regGFX_IMU_AEB_OVERRIDE 0x40bd
+#define regGFX_IMU_AEB_OVERRIDE_BASE_IDX 1
+#define regGFX_IMU_VDCI_RESET_CTRL 0x40be
+#define regGFX_IMU_VDCI_RESET_CTRL_BASE_IDX 1
+#define regGFX_IMU_GFX_ISO_CTRL 0x40bf
+#define regGFX_IMU_GFX_ISO_CTRL_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CTRL0 0x40c0
+#define regGFX_IMU_TIMER0_CTRL0_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CTRL1 0x40c1
+#define regGFX_IMU_TIMER0_CTRL1_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP_AUTOINC 0x40c2
+#define regGFX_IMU_TIMER0_CMP_AUTOINC_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP_INTEN 0x40c3
+#define regGFX_IMU_TIMER0_CMP_INTEN_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP0 0x40c4
+#define regGFX_IMU_TIMER0_CMP0_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP1 0x40c5
+#define regGFX_IMU_TIMER0_CMP1_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP3 0x40c7
+#define regGFX_IMU_TIMER0_CMP3_BASE_IDX 1
+#define regGFX_IMU_TIMER0_VALUE 0x40c8
+#define regGFX_IMU_TIMER0_VALUE_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CTRL0 0x40c9
+#define regGFX_IMU_TIMER1_CTRL0_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CTRL1 0x40ca
+#define regGFX_IMU_TIMER1_CTRL1_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP_AUTOINC 0x40cb
+#define regGFX_IMU_TIMER1_CMP_AUTOINC_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP_INTEN 0x40cc
+#define regGFX_IMU_TIMER1_CMP_INTEN_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP0 0x40cd
+#define regGFX_IMU_TIMER1_CMP0_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP1 0x40ce
+#define regGFX_IMU_TIMER1_CMP1_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP3 0x40d0
+#define regGFX_IMU_TIMER1_CMP3_BASE_IDX 1
+#define regGFX_IMU_TIMER1_VALUE 0x40d1
+#define regGFX_IMU_TIMER1_VALUE_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CTRL0 0x40d2
+#define regGFX_IMU_TIMER2_CTRL0_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CTRL1 0x40d3
+#define regGFX_IMU_TIMER2_CTRL1_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP_AUTOINC 0x40d4
+#define regGFX_IMU_TIMER2_CMP_AUTOINC_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP_INTEN 0x40d5
+#define regGFX_IMU_TIMER2_CMP_INTEN_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP0 0x40d6
+#define regGFX_IMU_TIMER2_CMP0_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP1 0x40d7
+#define regGFX_IMU_TIMER2_CMP1_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP3 0x40d9
+#define regGFX_IMU_TIMER2_CMP3_BASE_IDX 1
+#define regGFX_IMU_TIMER2_VALUE 0x40da
+#define regGFX_IMU_TIMER2_VALUE_BASE_IDX 1
+#define regGFX_IMU_FUSE_CTRL 0x40e0
+#define regGFX_IMU_FUSE_CTRL_BASE_IDX 1
+#define regGFX_IMU_D_RAM_ADDR 0x40fc
+#define regGFX_IMU_D_RAM_ADDR_BASE_IDX 1
+#define regGFX_IMU_D_RAM_DATA 0x40fd
+#define regGFX_IMU_D_RAM_DATA_BASE_IDX 1
+#define regGFX_IMU_GFX_IH_GASKET_CTRL 0x40ff
+#define regGFX_IMU_GFX_IH_GASKET_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_gfx_imu_gfx_imu_pspdec
+// base address: 0x3fe00
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_HI 0x5f81
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_HI_BASE_IDX 1
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_LO 0x5f82
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_LO_BASE_IDX 1
+#define regGFX_IMU_RLC_BOOTLOADER_SIZE 0x5f83
+#define regGFX_IMU_RLC_BOOTLOADER_SIZE_BASE_IDX 1
+#define regGFX_IMU_I_RAM_ADDR 0x5f90
+#define regGFX_IMU_I_RAM_ADDR_BASE_IDX 1
+#define regGFX_IMU_I_RAM_DATA 0x5f91
+#define regGFX_IMU_I_RAM_DATA_BASE_IDX 1
+
+
+// addressBlock: gccacind
+// base address: 0x0
+#define ixGC_CAC_ID 0x0000
+#define ixGC_CAC_CNTL 0x0001
+#define ixGC_CAC_ACC_CP0 0x0010
+#define ixGC_CAC_ACC_CP1 0x0011
+#define ixGC_CAC_ACC_CP2 0x0012
+#define ixGC_CAC_ACC_EA0 0x0013
+#define ixGC_CAC_ACC_EA1 0x0014
+#define ixGC_CAC_ACC_EA2 0x0015
+#define ixGC_CAC_ACC_EA3 0x0016
+#define ixGC_CAC_ACC_EA4 0x0017
+#define ixGC_CAC_ACC_EA5 0x0018
+#define ixGC_CAC_ACC_UTCL2_ROUTER0 0x0019
+#define ixGC_CAC_ACC_UTCL2_ROUTER1 0x001a
+#define ixGC_CAC_ACC_UTCL2_ROUTER2 0x001b
+#define ixGC_CAC_ACC_UTCL2_ROUTER3 0x001c
+#define ixGC_CAC_ACC_UTCL2_ROUTER4 0x001d
+#define ixGC_CAC_ACC_UTCL2_ROUTER5 0x001e
+#define ixGC_CAC_ACC_UTCL2_ROUTER6 0x001f
+#define ixGC_CAC_ACC_UTCL2_ROUTER7 0x0020
+#define ixGC_CAC_ACC_UTCL2_ROUTER8 0x0021
+#define ixGC_CAC_ACC_UTCL2_ROUTER9 0x0022
+#define ixGC_CAC_ACC_UTCL2_VML20 0x0023
+#define ixGC_CAC_ACC_UTCL2_VML21 0x0024
+#define ixGC_CAC_ACC_UTCL2_VML22 0x0025
+#define ixGC_CAC_ACC_UTCL2_VML23 0x0026
+#define ixGC_CAC_ACC_UTCL2_VML24 0x0027
+#define ixGC_CAC_ACC_UTCL2_WALKER0 0x0028
+#define ixGC_CAC_ACC_UTCL2_WALKER1 0x0029
+#define ixGC_CAC_ACC_UTCL2_WALKER2 0x002a
+#define ixGC_CAC_ACC_UTCL2_WALKER3 0x002b
+#define ixGC_CAC_ACC_UTCL2_WALKER4 0x002c
+#define ixGC_CAC_ACC_GDS0 0x002d
+#define ixGC_CAC_ACC_GDS1 0x002e
+#define ixGC_CAC_ACC_GDS2 0x002f
+#define ixGC_CAC_ACC_GDS3 0x0030
+#define ixGC_CAC_ACC_GDS4 0x0031
+#define ixGC_CAC_ACC_GE0 0x0032
+#define ixGC_CAC_ACC_GE1 0x0033
+#define ixGC_CAC_ACC_GE2 0x0034
+#define ixGC_CAC_ACC_GE3 0x0035
+#define ixGC_CAC_ACC_GE4 0x0036
+#define ixGC_CAC_ACC_GE5 0x0037
+#define ixGC_CAC_ACC_GE6 0x0038
+#define ixGC_CAC_ACC_GE7 0x0039
+#define ixGC_CAC_ACC_GE8 0x003a
+#define ixGC_CAC_ACC_GE9 0x003b
+#define ixGC_CAC_ACC_GE10 0x003c
+#define ixGC_CAC_ACC_GE11 0x003d
+#define ixGC_CAC_ACC_GE12 0x003e
+#define ixGC_CAC_ACC_GE13 0x003f
+#define ixGC_CAC_ACC_GE14 0x0040
+#define ixGC_CAC_ACC_GE15 0x0041
+#define ixGC_CAC_ACC_GE16 0x0042
+#define ixGC_CAC_ACC_GE17 0x0043
+#define ixGC_CAC_ACC_GE18 0x0044
+#define ixGC_CAC_ACC_GE19 0x0045
+#define ixGC_CAC_ACC_GE20 0x0046
+#define ixGC_CAC_ACC_PMM0 0x0047
+#define ixGC_CAC_ACC_GL2C0 0x0048
+#define ixGC_CAC_ACC_GL2C1 0x0049
+#define ixGC_CAC_ACC_GL2C2 0x004a
+#define ixGC_CAC_ACC_GL2C3 0x004b
+#define ixGC_CAC_ACC_GL2C4 0x004c
+#define ixGC_CAC_ACC_PH0 0x004d
+#define ixGC_CAC_ACC_PH1 0x004e
+#define ixGC_CAC_ACC_PH2 0x004f
+#define ixGC_CAC_ACC_PH3 0x0050
+#define ixGC_CAC_ACC_PH4 0x0051
+#define ixGC_CAC_ACC_PH5 0x0052
+#define ixGC_CAC_ACC_PH6 0x0053
+#define ixGC_CAC_ACC_PH7 0x0054
+#define ixGC_CAC_ACC_SDMA0 0x0055
+#define ixGC_CAC_ACC_SDMA1 0x0056
+#define ixGC_CAC_ACC_SDMA2 0x0057
+#define ixGC_CAC_ACC_SDMA3 0x0058
+#define ixGC_CAC_ACC_SDMA4 0x0059
+#define ixGC_CAC_ACC_SDMA5 0x005a
+#define ixGC_CAC_ACC_SDMA6 0x005b
+#define ixGC_CAC_ACC_SDMA7 0x005c
+#define ixGC_CAC_ACC_SDMA8 0x005d
+#define ixGC_CAC_ACC_SDMA9 0x005e
+#define ixGC_CAC_ACC_SDMA10 0x005f
+#define ixGC_CAC_ACC_SDMA11 0x0060
+#define ixGC_CAC_ACC_CHC0 0x0061
+#define ixGC_CAC_ACC_CHC1 0x0062
+#define ixGC_CAC_ACC_CHC2 0x0063
+#define ixGC_CAC_ACC_GUS0 0x0064
+#define ixGC_CAC_ACC_GUS1 0x0065
+#define ixGC_CAC_ACC_GUS2 0x0066
+#define ixGC_CAC_ACC_RLC0 0x0067
+#define ixGC_CAC_ACC_UTCL2_ATCL20 0x0068
+#define ixGC_CAC_ACC_UTCL2_ATCL21 0x0069
+#define ixGC_CAC_ACC_UTCL2_ATCL22 0x006a
+#define ixGC_CAC_ACC_UTCL2_ATCL23 0x006b
+#define ixGC_CAC_ACC_UTCL2_ATCL24 0x006c
+#define ixRELEASE_TO_STALL_LUT_1_8 0x0100
+#define ixRELEASE_TO_STALL_LUT_9_16 0x0101
+#define ixRELEASE_TO_STALL_LUT_17_20 0x0102
+#define ixSTALL_TO_RELEASE_LUT_1_4 0x0103
+#define ixSTALL_TO_RELEASE_LUT_5_7 0x0104
+#define ixSTALL_TO_PWRBRK_LUT_1_4 0x0105
+#define ixSTALL_TO_PWRBRK_LUT_5_7 0x0106
+#define ixPWRBRK_STALL_TO_RELEASE_LUT_1_4 0x0107
+#define ixPWRBRK_STALL_TO_RELEASE_LUT_5_7 0x0108
+#define ixPWRBRK_RELEASE_TO_STALL_LUT_1_8 0x0109
+#define ixPWRBRK_RELEASE_TO_STALL_LUT_9_16 0x010a
+#define ixPWRBRK_RELEASE_TO_STALL_LUT_17_20 0x010b
+#define ixFIXED_PATTERN_PERF_COUNTER_1 0x010c
+#define ixFIXED_PATTERN_PERF_COUNTER_2 0x010d
+#define ixFIXED_PATTERN_PERF_COUNTER_3 0x010e
+#define ixFIXED_PATTERN_PERF_COUNTER_4 0x010f
+#define ixFIXED_PATTERN_PERF_COUNTER_5 0x0110
+#define ixFIXED_PATTERN_PERF_COUNTER_6 0x0111
+#define ixFIXED_PATTERN_PERF_COUNTER_7 0x0112
+#define ixFIXED_PATTERN_PERF_COUNTER_8 0x0113
+#define ixFIXED_PATTERN_PERF_COUNTER_9 0x0114
+#define ixFIXED_PATTERN_PERF_COUNTER_10 0x0115
+#define ixHW_LUT_UPDATE_STATUS 0x0116
+
+
+// addressBlock: secacind
+// base address: 0x0
+#define ixSE_CAC_ID 0x0000
+#define ixSE_CAC_CNTL 0x0001
+
+
+// addressBlock: grtavfsind
+// base address: 0x0
+#define ixRTAVFS_REG0 0x0000
+#define ixRTAVFS_REG1 0x0001
+#define ixRTAVFS_REG2 0x0002
+#define ixRTAVFS_REG3 0x0003
+#define ixRTAVFS_REG4 0x0004
+#define ixRTAVFS_REG5 0x0005
+#define ixRTAVFS_REG6 0x0006
+#define ixRTAVFS_REG7 0x0007
+#define ixRTAVFS_REG8 0x0008
+#define ixRTAVFS_REG9 0x0009
+#define ixRTAVFS_REG10 0x000a
+#define ixRTAVFS_REG11 0x000b
+#define ixRTAVFS_REG12 0x000c
+#define ixRTAVFS_REG13 0x000d
+#define ixRTAVFS_REG14 0x000e
+#define ixRTAVFS_REG15 0x000f
+#define ixRTAVFS_REG16 0x0010
+#define ixRTAVFS_REG17 0x0011
+#define ixRTAVFS_REG18 0x0012
+#define ixRTAVFS_REG19 0x0013
+#define ixRTAVFS_REG20 0x0014
+#define ixRTAVFS_REG21 0x0015
+#define ixRTAVFS_REG22 0x0016
+#define ixRTAVFS_REG23 0x0017
+#define ixRTAVFS_REG24 0x0018
+#define ixRTAVFS_REG25 0x0019
+#define ixRTAVFS_REG26 0x001a
+#define ixRTAVFS_REG27 0x001b
+#define ixRTAVFS_REG28 0x001c
+#define ixRTAVFS_REG29 0x001d
+#define ixRTAVFS_REG30 0x001e
+#define ixRTAVFS_REG31 0x001f
+#define ixRTAVFS_REG32 0x0020
+#define ixRTAVFS_REG33 0x0021
+#define ixRTAVFS_REG34 0x0022
+#define ixRTAVFS_REG35 0x0023
+#define ixRTAVFS_REG36 0x0024
+#define ixRTAVFS_REG37 0x0025
+#define ixRTAVFS_REG38 0x0026
+#define ixRTAVFS_REG39 0x0027
+#define ixRTAVFS_REG40 0x0028
+#define ixRTAVFS_REG41 0x0029
+#define ixRTAVFS_REG42 0x002a
+#define ixRTAVFS_REG43 0x002b
+#define ixRTAVFS_REG44 0x002c
+#define ixRTAVFS_REG45 0x002d
+#define ixRTAVFS_REG46 0x002e
+#define ixRTAVFS_REG47 0x002f
+#define ixRTAVFS_REG48 0x0030
+#define ixRTAVFS_REG49 0x0031
+#define ixRTAVFS_REG50 0x0032
+#define ixRTAVFS_REG51 0x0033
+#define ixRTAVFS_REG52 0x0034
+#define ixRTAVFS_REG53 0x0035
+#define ixRTAVFS_REG54 0x0036
+#define ixRTAVFS_REG55 0x0037
+#define ixRTAVFS_REG56 0x0038
+#define ixRTAVFS_REG57 0x0039
+#define ixRTAVFS_REG58 0x003a
+#define ixRTAVFS_REG59 0x003b
+#define ixRTAVFS_REG60 0x003c
+#define ixRTAVFS_REG61 0x003d
+#define ixRTAVFS_REG62 0x003e
+#define ixRTAVFS_REG63 0x003f
+#define ixRTAVFS_REG64 0x0040
+#define ixRTAVFS_REG65 0x0041
+#define ixRTAVFS_REG66 0x0042
+#define ixRTAVFS_REG67 0x0043
+#define ixRTAVFS_REG68 0x0044
+#define ixRTAVFS_REG69 0x0045
+#define ixRTAVFS_REG70 0x0046
+#define ixRTAVFS_REG71 0x0047
+#define ixRTAVFS_REG72 0x0048
+#define ixRTAVFS_REG73 0x0049
+#define ixRTAVFS_REG74 0x004a
+#define ixRTAVFS_REG75 0x004b
+#define ixRTAVFS_REG76 0x004c
+#define ixRTAVFS_REG77 0x004d
+#define ixRTAVFS_REG78 0x004e
+#define ixRTAVFS_REG79 0x004f
+#define ixRTAVFS_REG80 0x0050
+#define ixRTAVFS_REG81 0x0051
+#define ixRTAVFS_REG82 0x0052
+#define ixRTAVFS_REG83 0x0053
+#define ixRTAVFS_REG84 0x0054
+#define ixRTAVFS_REG85 0x0055
+#define ixRTAVFS_REG86 0x0056
+#define ixRTAVFS_REG87 0x0057
+#define ixRTAVFS_REG88 0x0058
+#define ixRTAVFS_REG89 0x0059
+#define ixRTAVFS_REG90 0x005a
+#define ixRTAVFS_REG91 0x005b
+#define ixRTAVFS_REG92 0x005c
+#define ixRTAVFS_REG93 0x005d
+#define ixRTAVFS_REG94 0x005e
+#define ixRTAVFS_REG95 0x005f
+#define ixRTAVFS_REG96 0x0060
+#define ixRTAVFS_REG97 0x0061
+#define ixRTAVFS_REG98 0x0062
+#define ixRTAVFS_REG99 0x0063
+#define ixRTAVFS_REG100 0x0064
+#define ixRTAVFS_REG101 0x0065
+#define ixRTAVFS_REG102 0x0066
+#define ixRTAVFS_REG103 0x0067
+#define ixRTAVFS_REG104 0x0068
+#define ixRTAVFS_REG105 0x0069
+#define ixRTAVFS_REG106 0x006a
+#define ixRTAVFS_REG107 0x006b
+#define ixRTAVFS_REG108 0x006c
+#define ixRTAVFS_REG109 0x006d
+#define ixRTAVFS_REG110 0x006e
+#define ixRTAVFS_REG111 0x006f
+#define ixRTAVFS_REG112 0x0070
+#define ixRTAVFS_REG113 0x0071
+#define ixRTAVFS_REG114 0x0072
+#define ixRTAVFS_REG115 0x0073
+#define ixRTAVFS_REG116 0x0074
+#define ixRTAVFS_REG117 0x0075
+#define ixRTAVFS_REG118 0x0076
+#define ixRTAVFS_REG119 0x0077
+#define ixRTAVFS_REG120 0x0078
+#define ixRTAVFS_REG121 0x0079
+#define ixRTAVFS_REG122 0x007a
+#define ixRTAVFS_REG123 0x007b
+#define ixRTAVFS_REG124 0x007c
+#define ixRTAVFS_REG125 0x007d
+#define ixRTAVFS_REG126 0x007e
+#define ixRTAVFS_REG127 0x007f
+#define ixRTAVFS_REG128 0x0080
+#define ixRTAVFS_REG129 0x0081
+#define ixRTAVFS_REG130 0x0082
+#define ixRTAVFS_REG131 0x0083
+#define ixRTAVFS_REG132 0x0084
+#define ixRTAVFS_REG133 0x0085
+#define ixRTAVFS_REG134 0x0086
+#define ixRTAVFS_REG135 0x0087
+#define ixRTAVFS_REG136 0x0088
+#define ixRTAVFS_REG137 0x0089
+#define ixRTAVFS_REG138 0x008a
+#define ixRTAVFS_REG139 0x008b
+#define ixRTAVFS_REG140 0x008c
+#define ixRTAVFS_REG141 0x008d
+#define ixRTAVFS_REG142 0x008e
+#define ixRTAVFS_REG143 0x008f
+#define ixRTAVFS_REG144 0x0090
+#define ixRTAVFS_REG145 0x0091
+#define ixRTAVFS_REG146 0x0092
+#define ixRTAVFS_REG147 0x0093
+#define ixRTAVFS_REG148 0x0094
+#define ixRTAVFS_REG149 0x0095
+#define ixRTAVFS_REG150 0x0096
+#define ixRTAVFS_REG151 0x0097
+#define ixRTAVFS_REG152 0x0098
+#define ixRTAVFS_REG153 0x0099
+#define ixRTAVFS_REG154 0x009a
+#define ixRTAVFS_REG155 0x009b
+#define ixRTAVFS_REG156 0x009c
+#define ixRTAVFS_REG157 0x009d
+#define ixRTAVFS_REG158 0x009e
+#define ixRTAVFS_REG159 0x009f
+#define ixRTAVFS_REG160 0x00a0
+#define ixRTAVFS_REG161 0x00a1
+#define ixRTAVFS_REG162 0x00a2
+#define ixRTAVFS_REG163 0x00a3
+#define ixRTAVFS_REG164 0x00a4
+#define ixRTAVFS_REG165 0x00a5
+#define ixRTAVFS_REG166 0x00a6
+#define ixRTAVFS_REG167 0x00a7
+#define ixRTAVFS_REG168 0x00a8
+#define ixRTAVFS_REG169 0x00a9
+#define ixRTAVFS_REG170 0x00aa
+#define ixRTAVFS_REG171 0x00ab
+#define ixRTAVFS_REG172 0x00ac
+#define ixRTAVFS_REG173 0x00ad
+#define ixRTAVFS_REG174 0x00ae
+#define ixRTAVFS_REG175 0x00af
+#define ixRTAVFS_REG176 0x00b0
+#define ixRTAVFS_REG177 0x00b1
+#define ixRTAVFS_REG178 0x00b2
+#define ixRTAVFS_REG179 0x00b3
+#define ixRTAVFS_REG180 0x00b4
+#define ixRTAVFS_REG181 0x00b5
+#define ixRTAVFS_REG182 0x00b6
+#define ixRTAVFS_REG183 0x00b7
+#define ixRTAVFS_REG184 0x00b8
+#define ixRTAVFS_REG185 0x00b9
+#define ixRTAVFS_REG186 0x00ba
+#define ixRTAVFS_REG187 0x00bb
+#define ixRTAVFS_REG189 0x00bd
+#define ixRTAVFS_REG190 0x00be
+#define ixRTAVFS_REG191 0x00bf
+#define ixRTAVFS_REG192 0x00c0
+#define ixRTAVFS_REG193 0x00c1
+#define ixRTAVFS_REG194 0x00c2
+
+
+// addressBlock: sqind
+// base address: 0x0
+#define ixSQ_DEBUG_STS_LOCAL 0x0008
+#define ixSQ_DEBUG_CTRL_LOCAL 0x0009
+#define ixSQ_WAVE_ACTIVE 0x000a
+#define ixSQ_WAVE_VALID_AND_IDLE 0x000b
+#define ixSQ_WAVE_MODE 0x0101
+#define ixSQ_WAVE_STATUS 0x0102
+#define ixSQ_WAVE_TRAPSTS 0x0103
+#define ixSQ_WAVE_GPR_ALLOC 0x0105
+#define ixSQ_WAVE_LDS_ALLOC 0x0106
+#define ixSQ_WAVE_IB_STS 0x0107
+#define ixSQ_WAVE_PC_LO 0x0108
+#define ixSQ_WAVE_PC_HI 0x0109
+#define ixSQ_WAVE_IB_DBG1 0x010d
+#define ixSQ_WAVE_FLUSH_IB 0x010e
+#define ixSQ_WAVE_FLAT_SCRATCH_LO 0x0114
+#define ixSQ_WAVE_FLAT_SCRATCH_HI 0x0115
+#define ixSQ_WAVE_HW_ID1 0x0117
+#define ixSQ_WAVE_HW_ID2 0x0118
+#define ixSQ_WAVE_POPS_PACKER 0x0119
+#define ixSQ_WAVE_SCHED_MODE 0x011a
+#define ixSQ_WAVE_IB_STS2 0x011c
+#define ixSQ_WAVE_SHADER_CYCLES 0x011d
+#define ixSQ_WAVE_TTMP0 0x026c
+#define ixSQ_WAVE_TTMP1 0x026d
+#define ixSQ_WAVE_TTMP2 0x026e
+#define ixSQ_WAVE_TTMP3 0x026f
+#define ixSQ_WAVE_TTMP4 0x0270
+#define ixSQ_WAVE_TTMP5 0x0271
+#define ixSQ_WAVE_TTMP6 0x0272
+#define ixSQ_WAVE_TTMP7 0x0273
+#define ixSQ_WAVE_TTMP8 0x0274
+#define ixSQ_WAVE_TTMP9 0x0275
+#define ixSQ_WAVE_TTMP10 0x0276
+#define ixSQ_WAVE_TTMP11 0x0277
+#define ixSQ_WAVE_TTMP12 0x0278
+#define ixSQ_WAVE_TTMP13 0x0279
+#define ixSQ_WAVE_TTMP14 0x027a
+#define ixSQ_WAVE_TTMP15 0x027b
+#define ixSQ_WAVE_M0 0x027d
+#define ixSQ_WAVE_EXEC_LO 0x027e
+#define ixSQ_WAVE_EXEC_HI 0x027f
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
new file mode 100644
index 000000000000..ae3ef8a9e702
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
@@ -0,0 +1,44640 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _gc_11_0_3_SH_MASK_HEADER
+#define _gc_11_0_3_SH_MASK_HEADER
+
+
+// addressBlock: gc_sdma0_sdma0dec
+//SDMA0_DEC_START
+#define SDMA0_DEC_START__START__SHIFT 0x0
+#define SDMA0_DEC_START__START_MASK 0xFFFFFFFFL
+//SDMA0_F32_MISC_CNTL
+#define SDMA0_F32_MISC_CNTL__F32_WAKEUP__SHIFT 0x0
+#define SDMA0_F32_MISC_CNTL__F32_WAKEUP_MASK 0x00000001L
+//SDMA0_GLOBAL_TIMESTAMP_LO
+#define SDMA0_GLOBAL_TIMESTAMP_LO__DATA__SHIFT 0x0
+#define SDMA0_GLOBAL_TIMESTAMP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA0_GLOBAL_TIMESTAMP_HI
+#define SDMA0_GLOBAL_TIMESTAMP_HI__DATA__SHIFT 0x0
+#define SDMA0_GLOBAL_TIMESTAMP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA0_POWER_CNTL
+#define SDMA0_POWER_CNTL__LS_ENABLE__SHIFT 0x8
+#define SDMA0_POWER_CNTL__LS_ENABLE_MASK 0x00000100L
+//SDMA0_CNTL
+#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA0_CNTL__PIO_DONE_ACK_ENABLE__SHIFT 0x6
+#define SDMA0_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE__SHIFT 0x8
+#define SDMA0_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x9
+#define SDMA0_CNTL__CP_MES_INT_ENABLE__SHIFT 0xa
+#define SDMA0_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE__SHIFT 0xb
+#define SDMA0_CNTL__PAGE_NULL_INT_ENABLE__SHIFT 0xc
+#define SDMA0_CNTL__PAGE_FAULT_INT_ENABLE__SHIFT 0xd
+#define SDMA0_CNTL__CH_PERFCNT_ENABLE__SHIFT 0x10
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA0_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA0_CNTL__RB_PREEMPT_INT_ENABLE__SHIFT 0x1f
+#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA0_CNTL__PIO_DONE_ACK_ENABLE_MASK 0x00000040L
+#define SDMA0_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE_MASK 0x00000100L
+#define SDMA0_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000200L
+#define SDMA0_CNTL__CP_MES_INT_ENABLE_MASK 0x00000400L
+#define SDMA0_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE_MASK 0x00000800L
+#define SDMA0_CNTL__PAGE_NULL_INT_ENABLE_MASK 0x00001000L
+#define SDMA0_CNTL__PAGE_FAULT_INT_ENABLE_MASK 0x00002000L
+#define SDMA0_CNTL__CH_PERFCNT_ENABLE_MASK 0x00010000L
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA0_CNTL__DRM_RESTORE_ENABLE_MASK 0x00080000L
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+#define SDMA0_CNTL__RB_PREEMPT_INT_ENABLE_MASK 0x80000000L
+//SDMA0_CHICKEN_BITS
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA0_CHICKEN_BITS__BACK_COMPAT_ENABLE__SHIFT 0x3
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x5
+#define SDMA0_CHICKEN_BITS__RD_BURST__SHIFT 0x6
+#define SDMA0_CHICKEN_BITS__WR_BURST__SHIFT 0x8
+#define SDMA0_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE__SHIFT 0xa
+#define SDMA0_CHICKEN_BITS__WR_COMBINE_256B_ENABLE__SHIFT 0xe
+#define SDMA0_CHICKEN_BITS__RD_COMBINE_256B_ENABLE__SHIFT 0xf
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA0_CHICKEN_BITS__T2L_256B_ENABLE__SHIFT 0x12
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG__SHIFT 0x13
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG__SHIFT 0x14
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG__SHIFT 0x15
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG__SHIFT 0x16
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG__SHIFT 0x17
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x18
+#define SDMA0_CHICKEN_BITS__SW_FREEZE_ENABLE__SHIFT 0x19
+#define SDMA0_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL__SHIFT 0x1a
+#define SDMA0_CHICKEN_BITS__RESERVED__SHIFT 0x1b
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA0_CHICKEN_BITS__BACK_COMPAT_ENABLE_MASK 0x00000008L
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00000020L
+#define SDMA0_CHICKEN_BITS__RD_BURST_MASK 0x000000C0L
+#define SDMA0_CHICKEN_BITS__WR_BURST_MASK 0x00000300L
+#define SDMA0_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE_MASK 0x00003C00L
+#define SDMA0_CHICKEN_BITS__WR_COMBINE_256B_ENABLE_MASK 0x00004000L
+#define SDMA0_CHICKEN_BITS__RD_COMBINE_256B_ENABLE_MASK 0x00008000L
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA0_CHICKEN_BITS__T2L_256B_ENABLE_MASK 0x00040000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG_MASK 0x00080000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG_MASK 0x00100000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG_MASK 0x00200000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG_MASK 0x00400000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG_MASK 0x00800000L
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x01000000L
+#define SDMA0_CHICKEN_BITS__SW_FREEZE_ENABLE_MASK 0x02000000L
+#define SDMA0_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL_MASK 0x04000000L
+#define SDMA0_CHICKEN_BITS__RESERVED_MASK 0xF8000000L
+//SDMA0_GB_ADDR_CONFIG
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA0_GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA0_GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA0_GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA0_GB_ADDR_CONFIG_READ
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PKRS__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PKRS_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA0_RB_RPTR_FETCH
+#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA0_RB_RPTR_FETCH_HI
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA0_IB_OFFSET_FETCH
+#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA0_PROGRAM
+#define SDMA0_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA0_STATUS_REG
+#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA0_STATUS_REG__CGCG_FENCE__SHIFT 0xb
+#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA0_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define SDMA0_STATUS_REG__DRM_MASK_FULL__SHIFT 0x18
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA0_STATUS_REG__CGCG_FENCE_MASK 0x00000800L
+#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA0_STATUS_REG__DRM_IDLE_MASK 0x00800000L
+#define SDMA0_STATUS_REG__DRM_MASK_FULL_MASK 0x01000000L
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA0_STATUS1_REG
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA0_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define SDMA0_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xb
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xc
+#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xd
+#define SDMA0_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0xe
+#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0xf
+#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x10
+#define SDMA0_STATUS1_REG__SEC_INTR_STATUS__SHIFT 0x11
+#define SDMA0_STATUS1_REG__WPTR_POLL_IDLE__SHIFT 0x12
+#define SDMA0_STATUS1_REG__SDMA_IDLE__SHIFT 0x13
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS1_REG__CE_DRM_IDLE_MASK 0x00000080L
+#define SDMA0_STATUS1_REG__CE_DRM1_IDLE_MASK 0x00000100L
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00000800L
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00001000L
+#define SDMA0_STATUS1_REG__EX_START_MASK 0x00002000L
+#define SDMA0_STATUS1_REG__DRM_CTX_RESTORE_MASK 0x00004000L
+#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00008000L
+#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00010000L
+#define SDMA0_STATUS1_REG__SEC_INTR_STATUS_MASK 0x00020000L
+#define SDMA0_STATUS1_REG__WPTR_POLL_IDLE_MASK 0x00040000L
+#define SDMA0_STATUS1_REG__SDMA_IDLE_MASK 0x00080000L
+//SDMA0_CNTL1
+#define SDMA0_CNTL1__WPTR_POLL_FREQUENCY__SHIFT 0x2
+#define SDMA0_CNTL1__WPTR_POLL_FREQUENCY_MASK 0x0000FFFCL
+//SDMA0_HBM_PAGE_CONFIG
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA0_UCODE_CHECKSUM
+#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA0_FREEZE
+#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA0_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA0_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA0_PROCESS_QUANTUM0
+#define SDMA0_PROCESS_QUANTUM0__PROCESS0_QUANTUM__SHIFT 0x0
+#define SDMA0_PROCESS_QUANTUM0__PROCESS1_QUANTUM__SHIFT 0x8
+#define SDMA0_PROCESS_QUANTUM0__PROCESS2_QUANTUM__SHIFT 0x10
+#define SDMA0_PROCESS_QUANTUM0__PROCESS3_QUANTUM__SHIFT 0x18
+#define SDMA0_PROCESS_QUANTUM0__PROCESS0_QUANTUM_MASK 0x000000FFL
+#define SDMA0_PROCESS_QUANTUM0__PROCESS1_QUANTUM_MASK 0x0000FF00L
+#define SDMA0_PROCESS_QUANTUM0__PROCESS2_QUANTUM_MASK 0x00FF0000L
+#define SDMA0_PROCESS_QUANTUM0__PROCESS3_QUANTUM_MASK 0xFF000000L
+//SDMA0_PROCESS_QUANTUM1
+#define SDMA0_PROCESS_QUANTUM1__PROCESS4_QUANTUM__SHIFT 0x0
+#define SDMA0_PROCESS_QUANTUM1__PROCESS5_QUANTUM__SHIFT 0x8
+#define SDMA0_PROCESS_QUANTUM1__PROCESS6_QUANTUM__SHIFT 0x10
+#define SDMA0_PROCESS_QUANTUM1__PROCESS7_QUANTUM__SHIFT 0x18
+#define SDMA0_PROCESS_QUANTUM1__PROCESS4_QUANTUM_MASK 0x000000FFL
+#define SDMA0_PROCESS_QUANTUM1__PROCESS5_QUANTUM_MASK 0x0000FF00L
+#define SDMA0_PROCESS_QUANTUM1__PROCESS6_QUANTUM_MASK 0x00FF0000L
+#define SDMA0_PROCESS_QUANTUM1__PROCESS7_QUANTUM_MASK 0xFF000000L
+//SDMA0_WATCHDOG_CNTL
+#define SDMA0_WATCHDOG_CNTL__QUEUE_HANG_COUNT__SHIFT 0x0
+#define SDMA0_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT__SHIFT 0x8
+#define SDMA0_WATCHDOG_CNTL__QUEUE_HANG_COUNT_MASK 0x000000FFL
+#define SDMA0_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT_MASK 0x0000FF00L
+//SDMA0_QUEUE_STATUS0
+#define SDMA0_QUEUE_STATUS0__QUEUE0_STATUS__SHIFT 0x0
+#define SDMA0_QUEUE_STATUS0__QUEUE1_STATUS__SHIFT 0x4
+#define SDMA0_QUEUE_STATUS0__QUEUE2_STATUS__SHIFT 0x8
+#define SDMA0_QUEUE_STATUS0__QUEUE3_STATUS__SHIFT 0xc
+#define SDMA0_QUEUE_STATUS0__QUEUE4_STATUS__SHIFT 0x10
+#define SDMA0_QUEUE_STATUS0__QUEUE5_STATUS__SHIFT 0x14
+#define SDMA0_QUEUE_STATUS0__QUEUE6_STATUS__SHIFT 0x18
+#define SDMA0_QUEUE_STATUS0__QUEUE7_STATUS__SHIFT 0x1c
+#define SDMA0_QUEUE_STATUS0__QUEUE0_STATUS_MASK 0x0000000FL
+#define SDMA0_QUEUE_STATUS0__QUEUE1_STATUS_MASK 0x000000F0L
+#define SDMA0_QUEUE_STATUS0__QUEUE2_STATUS_MASK 0x00000F00L
+#define SDMA0_QUEUE_STATUS0__QUEUE3_STATUS_MASK 0x0000F000L
+#define SDMA0_QUEUE_STATUS0__QUEUE4_STATUS_MASK 0x000F0000L
+#define SDMA0_QUEUE_STATUS0__QUEUE5_STATUS_MASK 0x00F00000L
+#define SDMA0_QUEUE_STATUS0__QUEUE6_STATUS_MASK 0x0F000000L
+#define SDMA0_QUEUE_STATUS0__QUEUE7_STATUS_MASK 0xF0000000L
+//SDMA0_EDC_CONFIG
+#define SDMA0_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA0_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA0_BA_THRESHOLD
+#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA0_ID
+#define SDMA0_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA0_VERSION
+#define SDMA0_VERSION__MINVER__SHIFT 0x0
+#define SDMA0_VERSION__MAJVER__SHIFT 0x8
+#define SDMA0_VERSION__REV__SHIFT 0x10
+#define SDMA0_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA0_VERSION__REV_MASK 0x003F0000L
+//SDMA0_EDC_COUNTER
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
+//SDMA0_EDC_COUNTER_CLEAR
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA0_STATUS2_REG
+#define SDMA0_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA0_STATUS2_REG__TH0F32_INSTR_PTR__SHIFT 0x2
+#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA0_STATUS2_REG__ID_MASK 0x00000003L
+#define SDMA0_STATUS2_REG__TH0F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA0_ATOMIC_CNTL
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA0_ATOMIC_PREOP_LO
+#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA0_ATOMIC_PREOP_HI
+#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_CNTL
+#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x0
+#define SDMA0_UTCL1_CNTL__PAGE_WAIT_DELAY__SHIFT 0x5
+#define SDMA0_UTCL1_CNTL__RESP_MODE__SHIFT 0x9
+#define SDMA0_UTCL1_CNTL__FORCE_INVALIDATION__SHIFT 0xe
+#define SDMA0_UTCL1_CNTL__FORCE_INVREQ_HEAVY__SHIFT 0xf
+#define SDMA0_UTCL1_CNTL__WR_EXE_PERMS_CTRL__SHIFT 0x10
+#define SDMA0_UTCL1_CNTL__RD_EXE_PERMS_CTRL__SHIFT 0x11
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0x12
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x0000001FL
+#define SDMA0_UTCL1_CNTL__PAGE_WAIT_DELAY_MASK 0x000001E0L
+#define SDMA0_UTCL1_CNTL__RESP_MODE_MASK 0x00000600L
+#define SDMA0_UTCL1_CNTL__FORCE_INVALIDATION_MASK 0x00004000L
+#define SDMA0_UTCL1_CNTL__FORCE_INVREQ_HEAVY_MASK 0x00008000L
+#define SDMA0_UTCL1_CNTL__WR_EXE_PERMS_CTRL_MASK 0x00010000L
+#define SDMA0_UTCL1_CNTL__RD_EXE_PERMS_CTRL_MASK 0x00020000L
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x003C0000L
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x3F000000L
+//SDMA0_UTCL1_WATERMK
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK__SHIFT 0x0
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP__SHIFT 0x4
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK__SHIFT 0x6
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP__SHIFT 0xa
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK__SHIFT 0xc
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP__SHIFT 0x10
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK__SHIFT 0x12
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP__SHIFT 0x16
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK_MASK 0x0000000FL
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP_MASK 0x00000030L
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK_MASK 0x000003C0L
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP_MASK 0x00000C00L
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK_MASK 0x0000F000L
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP_MASK 0x00030000L
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK_MASK 0x003C0000L
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP_MASK 0x00C00000L
+//SDMA0_UTCL1_TIMEOUT
+#define SDMA0_UTCL1_TIMEOUT__XNACK_LIMIT__SHIFT 0x0
+#define SDMA0_UTCL1_TIMEOUT__XNACK_LIMIT_MASK 0x0000FFFFL
+//SDMA0_UTCL1_PAGE
+#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA0_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0xa
+#define SDMA0_UTCL1_PAGE__USE_IO__SHIFT 0xb
+#define SDMA0_UTCL1_PAGE__RD_L2_POLICY__SHIFT 0xc
+#define SDMA0_UTCL1_PAGE__WR_L2_POLICY__SHIFT 0xe
+#define SDMA0_UTCL1_PAGE__DMA_PAGE_SIZE__SHIFT 0x10
+#define SDMA0_UTCL1_PAGE__USE_BC__SHIFT 0x16
+#define SDMA0_UTCL1_PAGE__ADDR_IS_PA__SHIFT 0x17
+#define SDMA0_UTCL1_PAGE__LLC_NOALLOC__SHIFT 0x18
+#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA0_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000003C0L
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000400L
+#define SDMA0_UTCL1_PAGE__USE_IO_MASK 0x00000800L
+#define SDMA0_UTCL1_PAGE__RD_L2_POLICY_MASK 0x00003000L
+#define SDMA0_UTCL1_PAGE__WR_L2_POLICY_MASK 0x0000C000L
+#define SDMA0_UTCL1_PAGE__DMA_PAGE_SIZE_MASK 0x003F0000L
+#define SDMA0_UTCL1_PAGE__USE_BC_MASK 0x00400000L
+#define SDMA0_UTCL1_PAGE__ADDR_IS_PA_MASK 0x00800000L
+#define SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK 0x01000000L
+//SDMA0_UTCL1_RD_STATUS
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_RD_STATUS__RESERVED0__SHIFT 0x5
+#define SDMA0_UTCL1_RD_STATUS__RESERVED1__SHIFT 0x6
+#define SDMA0_UTCL1_RD_STATUS__META_Q_EMPTY__SHIFT 0x7
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_RD_STATUS__RESERVED2__SHIFT 0xd
+#define SDMA0_UTCL1_RD_STATUS__RESERVED3__SHIFT 0xe
+#define SDMA0_UTCL1_RD_STATUS__META_Q_FULL__SHIFT 0xf
+#define SDMA0_UTCL1_RD_STATUS__RD_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA0_UTCL1_RD_STATUS__RD_REQRET_IDLE__SHIFT 0x11
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_IDLE__SHIFT 0x12
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_TYPE__SHIFT 0x13
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REG_READY__SHIFT 0x17
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA0_UTCL1_RD_STATUS__RESERVED4__SHIFT 0x1a
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_IN_RTR__SHIFT 0x1c
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA0_UTCL1_RD_STATUS__INV_BUSY__SHIFT 0x1e
+#define SDMA0_UTCL1_RD_STATUS__DBIT_REQ_IDLE__SHIFT 0x1f
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED0_MASK 0x00000020L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED1_MASK 0x00000040L
+#define SDMA0_UTCL1_RD_STATUS__META_Q_EMPTY_MASK 0x00000080L
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED2_MASK 0x00002000L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED3_MASK 0x00004000L
+#define SDMA0_UTCL1_RD_STATUS__META_Q_FULL_MASK 0x00008000L
+#define SDMA0_UTCL1_RD_STATUS__RD_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQRET_IDLE_MASK 0x00020000L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_IDLE_MASK 0x00040000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_TYPE_MASK 0x00180000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED4_MASK 0x04000000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_IN_RTR_MASK 0x10000000L
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA0_UTCL1_RD_STATUS__INV_BUSY_MASK 0x40000000L
+#define SDMA0_UTCL1_RD_STATUS__DBIT_REQ_IDLE_MASK 0x80000000L
+//SDMA0_UTCL1_WR_STATUS
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_EMPTY__SHIFT 0x5
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_EMPTY__SHIFT 0x6
+#define SDMA0_UTCL1_WR_STATUS__RESERVED0__SHIFT 0x7
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_FULL__SHIFT 0xd
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_FULL__SHIFT 0xe
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0xf
+#define SDMA0_UTCL1_WR_STATUS__WR_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA0_UTCL1_WR_STATUS__WR_REQRET_IDLE__SHIFT 0x11
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_IDLE__SHIFT 0x12
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_TYPE__SHIFT 0x13
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REG_READY__SHIFT 0x17
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL__SHIFT 0x1a
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_RTR__SHIFT 0x1c
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR__SHIFT 0x1e
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR__SHIFT 0x1f
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_EMPTY_MASK 0x00000020L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_EMPTY_MASK 0x00000040L
+#define SDMA0_UTCL1_WR_STATUS__RESERVED0_MASK 0x00000080L
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_FULL_MASK 0x00002000L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_FULL_MASK 0x00004000L
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00008000L
+#define SDMA0_UTCL1_WR_STATUS__WR_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQRET_IDLE_MASK 0x00020000L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_IDLE_MASK 0x00040000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_TYPE_MASK 0x00180000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL_MASK 0x04000000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_RTR_MASK 0x10000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR_MASK 0x40000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR_MASK 0x80000000L
+//SDMA0_UTCL1_INV0
+#define SDMA0_UTCL1_INV0__INV_PROC_BUSY__SHIFT 0x0
+#define SDMA0_UTCL1_INV0__GPUVM_FRAG_SIZE__SHIFT 0x1
+#define SDMA0_UTCL1_INV0__GPUVM_VMID__SHIFT 0x7
+#define SDMA0_UTCL1_INV0__GPUVM_MODE__SHIFT 0xb
+#define SDMA0_UTCL1_INV0__GPUVM_HIGH__SHIFT 0xd
+#define SDMA0_UTCL1_INV0__GPUVM_TAG__SHIFT 0xe
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_HIGH__SHIFT 0x12
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_LOW__SHIFT 0x16
+#define SDMA0_UTCL1_INV0__INV_TYPE__SHIFT 0x1a
+#define SDMA0_UTCL1_INV0__INV_PROC_BUSY_MASK 0x00000001L
+#define SDMA0_UTCL1_INV0__GPUVM_FRAG_SIZE_MASK 0x0000007EL
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_MASK 0x00000780L
+#define SDMA0_UTCL1_INV0__GPUVM_MODE_MASK 0x00001800L
+#define SDMA0_UTCL1_INV0__GPUVM_HIGH_MASK 0x00002000L
+#define SDMA0_UTCL1_INV0__GPUVM_TAG_MASK 0x0003C000L
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_HIGH_MASK 0x003C0000L
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_LOW_MASK 0x03C00000L
+#define SDMA0_UTCL1_INV0__INV_TYPE_MASK 0x0C000000L
+//SDMA0_UTCL1_INV1
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_INV2
+#define SDMA0_UTCL1_INV2__CPF_VMID__SHIFT 0x0
+#define SDMA0_UTCL1_INV2__CPF_FLUSH_TYPE__SHIFT 0x10
+#define SDMA0_UTCL1_INV2__CPF_FRAG_SIZE__SHIFT 0x11
+#define SDMA0_UTCL1_INV2__CPF_VMID_MASK 0x0000FFFFL
+#define SDMA0_UTCL1_INV2__CPF_FLUSH_TYPE_MASK 0x00010000L
+#define SDMA0_UTCL1_INV2__CPF_FRAG_SIZE_MASK 0x007E0000L
+//SDMA0_UTCL1_RD_XNACK0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_RD_XNACK1
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA0_UTCL1_WR_XNACK0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_WR_XNACK1
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA0_RELAX_ORDERING_LUT
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA0_CHICKEN_BITS_2
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA0_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define SDMA0_CHICKEN_BITS_2__UCODE_BUF_DS_EN__SHIFT 0x6
+#define SDMA0_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP__SHIFT 0x7
+#define SDMA0_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING__SHIFT 0x8
+#define SDMA0_CHICKEN_BITS_2__RESERVED_14_12__SHIFT 0xc
+#define SDMA0_CHICKEN_BITS_2__RESERVED_15__SHIFT 0xf
+#define SDMA0_CHICKEN_BITS_2__RB_FIFO_WATERMARK__SHIFT 0x10
+#define SDMA0_CHICKEN_BITS_2__IB_FIFO_WATERMARK__SHIFT 0x12
+#define SDMA0_CHICKEN_BITS_2__RESERVED_22_20__SHIFT 0x14
+#define SDMA0_CHICKEN_BITS_2__CH_RD_WATERMARK__SHIFT 0x17
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK__SHIFT 0x19
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB__SHIFT 0x1e
+#define SDMA0_CHICKEN_BITS_2__PIO_VFID_SOURCE__SHIFT 0x1f
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define SDMA0_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+#define SDMA0_CHICKEN_BITS_2__UCODE_BUF_DS_EN_MASK 0x00000040L
+#define SDMA0_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP_MASK 0x00000080L
+#define SDMA0_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING_MASK 0x00000F00L
+#define SDMA0_CHICKEN_BITS_2__RESERVED_14_12_MASK 0x00007000L
+#define SDMA0_CHICKEN_BITS_2__RESERVED_15_MASK 0x00008000L
+#define SDMA0_CHICKEN_BITS_2__RB_FIFO_WATERMARK_MASK 0x00030000L
+#define SDMA0_CHICKEN_BITS_2__IB_FIFO_WATERMARK_MASK 0x000C0000L
+#define SDMA0_CHICKEN_BITS_2__RESERVED_22_20_MASK 0x00700000L
+#define SDMA0_CHICKEN_BITS_2__CH_RD_WATERMARK_MASK 0x01800000L
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK_MASK 0x3E000000L
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB_MASK 0x40000000L
+#define SDMA0_CHICKEN_BITS_2__PIO_VFID_SOURCE_MASK 0x80000000L
+//SDMA0_STATUS3_REG
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA0_STATUS3_REG__AQL_PREV_CMD_IDLE__SHIFT 0x15
+#define SDMA0_STATUS3_REG__TLBI_IDLE__SHIFT 0x16
+#define SDMA0_STATUS3_REG__GCR_IDLE__SHIFT 0x17
+#define SDMA0_STATUS3_REG__INVREQ_IDLE__SHIFT 0x18
+#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x19
+#define SDMA0_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x1a
+#define SDMA0_STATUS3_REG__TMZ_MTYPE_STATUS__SHIFT 0x1e
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA0_STATUS3_REG__AQL_PREV_CMD_IDLE_MASK 0x00200000L
+#define SDMA0_STATUS3_REG__TLBI_IDLE_MASK 0x00400000L
+#define SDMA0_STATUS3_REG__GCR_IDLE_MASK 0x00800000L
+#define SDMA0_STATUS3_REG__INVREQ_IDLE_MASK 0x01000000L
+#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x02000000L
+#define SDMA0_STATUS3_REG__INT_QUEUE_ID_MASK 0x3C000000L
+#define SDMA0_STATUS3_REG__TMZ_MTYPE_STATUS_MASK 0xC0000000L
+//SDMA0_PHYSICAL_ADDR_LO
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA0_PHYSICAL_ADDR_HI
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA0_GLOBAL_QUANTUM
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM__SHIFT 0x0
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM__SHIFT 0x8
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM_MASK 0x000000FFL
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM_MASK 0x0000FF00L
+//SDMA0_ERROR_LOG
+#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA0_PUB_DUMMY_REG0
+#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG1
+#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG2
+#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG3
+#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_F32_COUNTER
+#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_CRD_CNTL
+#define SDMA0_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA0_CRD_CNTL__CH_WRREQ_CREDIT__SHIFT 0x13
+#define SDMA0_CRD_CNTL__CH_RDREQ_CREDIT__SHIFT 0x19
+#define SDMA0_CRD_CNTL__DRM_CREDIT_MASK 0x0000007FL
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+#define SDMA0_CRD_CNTL__CH_WRREQ_CREDIT_MASK 0x01F80000L
+#define SDMA0_CRD_CNTL__CH_RDREQ_CREDIT_MASK 0x7E000000L
+//SDMA0_RLC_CGCG_CTRL
+#define SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE__SHIFT 0x1
+#define SDMA0_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS__SHIFT 0x10
+#define SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS_MASK 0xFFFF0000L
+//SDMA0_GPU_IOV_VIOLATION_LOG
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA0_AQL_STATUS
+#define SDMA0_AQL_STATUS__COMPLETE_SIGNAL_EMPTY__SHIFT 0x0
+#define SDMA0_AQL_STATUS__INVALID_CMD_EMPTY__SHIFT 0x1
+#define SDMA0_AQL_STATUS__COMPLETE_SIGNAL_EMPTY_MASK 0x00000001L
+#define SDMA0_AQL_STATUS__INVALID_CMD_EMPTY_MASK 0x00000002L
+//SDMA0_EA_DBIT_ADDR_DATA
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_EA_DBIT_ADDR_INDEX
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA0_TLBI_GCR_CNTL
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CMD_DW__SHIFT 0x0
+#define SDMA0_TLBI_GCR_CNTL__GCR_CMD_DW__SHIFT 0x4
+#define SDMA0_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE__SHIFT 0x8
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CREDIT__SHIFT 0x10
+#define SDMA0_TLBI_GCR_CNTL__GCR_CREDIT__SHIFT 0x18
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CMD_DW_MASK 0x0000000FL
+#define SDMA0_TLBI_GCR_CNTL__GCR_CMD_DW_MASK 0x000000F0L
+#define SDMA0_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE_MASK 0x00000F00L
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CREDIT_MASK 0x00FF0000L
+#define SDMA0_TLBI_GCR_CNTL__GCR_CREDIT_MASK 0xFF000000L
+//SDMA0_TILING_CONFIG
+#define SDMA0_TILING_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define SDMA0_TILING_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//SDMA0_HASH
+#define SDMA0_HASH__CHANNEL_BITS__SHIFT 0x0
+#define SDMA0_HASH__BANK_BITS__SHIFT 0x4
+#define SDMA0_HASH__CHANNEL_XOR_COUNT__SHIFT 0x8
+#define SDMA0_HASH__BANK_XOR_COUNT__SHIFT 0xc
+#define SDMA0_HASH__CHANNEL_BITS_MASK 0x00000007L
+#define SDMA0_HASH__BANK_BITS_MASK 0x00000070L
+#define SDMA0_HASH__CHANNEL_XOR_COUNT_MASK 0x00000700L
+#define SDMA0_HASH__BANK_XOR_COUNT_MASK 0x00007000L
+//SDMA0_INT_STATUS
+#define SDMA0_INT_STATUS__DATA__SHIFT 0x0
+#define SDMA0_INT_STATUS__DATA_MASK 0xFFFFFFFFL
+//SDMA0_GPU_IOV_VIOLATION_LOG2
+#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//SDMA0_HOLE_ADDR_LO
+#define SDMA0_HOLE_ADDR_LO__VALUE__SHIFT 0x0
+#define SDMA0_HOLE_ADDR_LO__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_HOLE_ADDR_HI
+#define SDMA0_HOLE_ADDR_HI__VALUE__SHIFT 0x0
+#define SDMA0_HOLE_ADDR_HI__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_CLOCK_GATING_STATUS
+#define SDMA0_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS__SHIFT 0x0
+#define SDMA0_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS__SHIFT 0x2
+#define SDMA0_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS__SHIFT 0x3
+#define SDMA0_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS__SHIFT 0x4
+#define SDMA0_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS__SHIFT 0x5
+#define SDMA0_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS__SHIFT 0x6
+#define SDMA0_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS_MASK 0x00000001L
+#define SDMA0_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS_MASK 0x00000004L
+#define SDMA0_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS_MASK 0x00000008L
+#define SDMA0_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS_MASK 0x00000010L
+#define SDMA0_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS_MASK 0x00000020L
+#define SDMA0_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS_MASK 0x00000040L
+//SDMA0_STATUS4_REG
+#define SDMA0_STATUS4_REG__IDLE__SHIFT 0x0
+#define SDMA0_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define SDMA0_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define SDMA0_STATUS4_REG__CH_RD_OUTSTANDING__SHIFT 0x4
+#define SDMA0_STATUS4_REG__CH_WR_OUTSTANDING__SHIFT 0x5
+#define SDMA0_STATUS4_REG__GCR_OUTSTANDING__SHIFT 0x6
+#define SDMA0_STATUS4_REG__TLBI_OUTSTANDING__SHIFT 0x7
+#define SDMA0_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x8
+#define SDMA0_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x9
+#define SDMA0_STATUS4_REG__REG_POLLING__SHIFT 0xa
+#define SDMA0_STATUS4_REG__MEM_POLLING__SHIFT 0xb
+#define SDMA0_STATUS4_REG__RESERVED_13_12__SHIFT 0xc
+#define SDMA0_STATUS4_REG__RESERVED_15_14__SHIFT 0xe
+#define SDMA0_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA0_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x14
+#define SDMA0_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD__SHIFT 0x15
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_FAULT__SHIFT 0x16
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_NULL__SHIFT 0x17
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT__SHIFT 0x18
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_FAULT__SHIFT 0x19
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_NULL__SHIFT 0x1a
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT__SHIFT 0x1b
+#define SDMA0_STATUS4_REG__IDLE_MASK 0x00000001L
+#define SDMA0_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define SDMA0_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define SDMA0_STATUS4_REG__CH_RD_OUTSTANDING_MASK 0x00000010L
+#define SDMA0_STATUS4_REG__CH_WR_OUTSTANDING_MASK 0x00000020L
+#define SDMA0_STATUS4_REG__GCR_OUTSTANDING_MASK 0x00000040L
+#define SDMA0_STATUS4_REG__TLBI_OUTSTANDING_MASK 0x00000080L
+#define SDMA0_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000100L
+#define SDMA0_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000200L
+#define SDMA0_STATUS4_REG__REG_POLLING_MASK 0x00000400L
+#define SDMA0_STATUS4_REG__MEM_POLLING_MASK 0x00000800L
+#define SDMA0_STATUS4_REG__RESERVED_13_12_MASK 0x00003000L
+#define SDMA0_STATUS4_REG__RESERVED_15_14_MASK 0x0000C000L
+#define SDMA0_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA0_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00100000L
+#define SDMA0_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD_MASK 0x00200000L
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_FAULT_MASK 0x00400000L
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_NULL_MASK 0x00800000L
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT_MASK 0x01000000L
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_FAULT_MASK 0x02000000L
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_NULL_MASK 0x04000000L
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT_MASK 0x08000000L
+//SDMA0_SCRATCH_RAM_DATA
+#define SDMA0_SCRATCH_RAM_DATA__DATA__SHIFT 0x0
+#define SDMA0_SCRATCH_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//SDMA0_SCRATCH_RAM_ADDR
+#define SDMA0_SCRATCH_RAM_ADDR__ADDR__SHIFT 0x0
+#define SDMA0_SCRATCH_RAM_ADDR__ADDR_MASK 0x0000007FL
+//SDMA0_TIMESTAMP_CNTL
+#define SDMA0_TIMESTAMP_CNTL__CAPTURE__SHIFT 0x0
+#define SDMA0_TIMESTAMP_CNTL__CAPTURE_MASK 0x00000001L
+//SDMA0_STATUS5_REG
+#define SDMA0_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS__SHIFT 0x0
+#define SDMA0_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS__SHIFT 0x1
+#define SDMA0_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS__SHIFT 0x2
+#define SDMA0_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS__SHIFT 0x3
+#define SDMA0_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS__SHIFT 0x4
+#define SDMA0_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS__SHIFT 0x5
+#define SDMA0_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS__SHIFT 0x6
+#define SDMA0_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS__SHIFT 0x7
+#define SDMA0_STATUS5_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA0_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x14
+#define SDMA0_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x15
+#define SDMA0_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x16
+#define SDMA0_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x17
+#define SDMA0_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x18
+#define SDMA0_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x19
+#define SDMA0_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1a
+#define SDMA0_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1b
+#define SDMA0_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS_MASK 0x00000001L
+#define SDMA0_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS_MASK 0x00000002L
+#define SDMA0_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS_MASK 0x00000004L
+#define SDMA0_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS_MASK 0x00000008L
+#define SDMA0_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS_MASK 0x00000010L
+#define SDMA0_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS_MASK 0x00000020L
+#define SDMA0_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS_MASK 0x00000040L
+#define SDMA0_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS_MASK 0x00000080L
+#define SDMA0_STATUS5_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA0_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00100000L
+#define SDMA0_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00200000L
+#define SDMA0_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00400000L
+#define SDMA0_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00800000L
+#define SDMA0_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION_MASK 0x01000000L
+#define SDMA0_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION_MASK 0x02000000L
+#define SDMA0_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION_MASK 0x04000000L
+#define SDMA0_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION_MASK 0x08000000L
+//SDMA0_QUEUE_RESET_REQ
+#define SDMA0_QUEUE_RESET_REQ__QUEUE0_RESET__SHIFT 0x0
+#define SDMA0_QUEUE_RESET_REQ__QUEUE1_RESET__SHIFT 0x1
+#define SDMA0_QUEUE_RESET_REQ__QUEUE2_RESET__SHIFT 0x2
+#define SDMA0_QUEUE_RESET_REQ__QUEUE3_RESET__SHIFT 0x3
+#define SDMA0_QUEUE_RESET_REQ__QUEUE4_RESET__SHIFT 0x4
+#define SDMA0_QUEUE_RESET_REQ__QUEUE5_RESET__SHIFT 0x5
+#define SDMA0_QUEUE_RESET_REQ__QUEUE6_RESET__SHIFT 0x6
+#define SDMA0_QUEUE_RESET_REQ__QUEUE7_RESET__SHIFT 0x7
+#define SDMA0_QUEUE_RESET_REQ__RESERVED__SHIFT 0x8
+#define SDMA0_QUEUE_RESET_REQ__QUEUE0_RESET_MASK 0x00000001L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE1_RESET_MASK 0x00000002L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE2_RESET_MASK 0x00000004L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE3_RESET_MASK 0x00000008L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE4_RESET_MASK 0x00000010L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE5_RESET_MASK 0x00000020L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE6_RESET_MASK 0x00000040L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE7_RESET_MASK 0x00000080L
+#define SDMA0_QUEUE_RESET_REQ__RESERVED_MASK 0xFFFFFF00L
+//SDMA0_STATUS6_REG
+#define SDMA0_STATUS6_REG__ID__SHIFT 0x0
+#define SDMA0_STATUS6_REG__TH1F32_INSTR_PTR__SHIFT 0x2
+#define SDMA0_STATUS6_REG__TH1_EXCEPTION__SHIFT 0x10
+#define SDMA0_STATUS6_REG__ID_MASK 0x00000003L
+#define SDMA0_STATUS6_REG__TH1F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA0_STATUS6_REG__TH1_EXCEPTION_MASK 0xFFFF0000L
+//SDMA0_UCODE1_CHECKSUM
+#define SDMA0_UCODE1_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA0_UCODE1_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA0_CE_CTRL
+#define SDMA0_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define SDMA0_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define SDMA0_CE_CTRL__WR_AFIFO_WATERMARK__SHIFT 0x5
+#define SDMA0_CE_CTRL__CE_DCC_READ_128B_ENABLE__SHIFT 0x8
+#define SDMA0_CE_CTRL__RESERVED__SHIFT 0x9
+#define SDMA0_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define SDMA0_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define SDMA0_CE_CTRL__WR_AFIFO_WATERMARK_MASK 0x000000E0L
+#define SDMA0_CE_CTRL__CE_DCC_READ_128B_ENABLE_MASK 0x00000100L
+#define SDMA0_CE_CTRL__RESERVED_MASK 0xFFFFFE00L
+//SDMA0_FED_STATUS
+#define SDMA0_FED_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define SDMA0_FED_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define SDMA0_FED_STATUS__F32_DATA_ECC__SHIFT 0x2
+#define SDMA0_FED_STATUS__WPTR_ATOMIC_ECC__SHIFT 0x3
+#define SDMA0_FED_STATUS__COPY_DATA_ECC__SHIFT 0x4
+#define SDMA0_FED_STATUS__COPY_METADATA_ECC__SHIFT 0x5
+#define SDMA0_FED_STATUS__SELFLOAD_UCODE_ECC__SHIFT 0x6
+#define SDMA0_FED_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define SDMA0_FED_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define SDMA0_FED_STATUS__F32_DATA_ECC_MASK 0x00000004L
+#define SDMA0_FED_STATUS__WPTR_ATOMIC_ECC_MASK 0x00000008L
+#define SDMA0_FED_STATUS__COPY_DATA_ECC_MASK 0x00000010L
+#define SDMA0_FED_STATUS__COPY_METADATA_ECC_MASK 0x00000020L
+#define SDMA0_FED_STATUS__SELFLOAD_UCODE_ECC_MASK 0x00000040L
+//SDMA0_QUEUE0_RB_CNTL
+#define SDMA0_QUEUE0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE0_RB_BASE
+#define SDMA0_QUEUE0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_BASE_HI
+#define SDMA0_QUEUE0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE0_RB_RPTR
+#define SDMA0_QUEUE0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_RPTR_HI
+#define SDMA0_QUEUE0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR
+#define SDMA0_QUEUE0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR_HI
+#define SDMA0_QUEUE0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_IB_CNTL
+#define SDMA0_QUEUE0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE0_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE0_IB_RPTR
+#define SDMA0_QUEUE0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE0_IB_OFFSET
+#define SDMA0_QUEUE0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE0_IB_BASE_LO
+#define SDMA0_QUEUE0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE0_IB_BASE_HI
+#define SDMA0_QUEUE0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_IB_SIZE
+#define SDMA0_QUEUE0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE0_SKIP_CNTL
+#define SDMA0_QUEUE0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE0_CONTEXT_STATUS
+#define SDMA0_QUEUE0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE0_CONTEXT_STATUS__USE_IB__SHIFT 0x1
+#define SDMA0_QUEUE0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__USE_IB_MASK 0x00000002L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE0_DOORBELL
+#define SDMA0_QUEUE0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE0_DOORBELL_LOG
+#define SDMA0_QUEUE0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_DOORBELL_OFFSET
+#define SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE0_CSA_ADDR_LO
+#define SDMA0_QUEUE0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_CSA_ADDR_HI
+#define SDMA0_QUEUE0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_SCHEDULE_CNTL
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE0_IB_SUB_REMAIN
+#define SDMA0_QUEUE0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE0_PREEMPT
+#define SDMA0_QUEUE0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE0_DUMMY_REG
+#define SDMA0_QUEUE0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_RB_AQL_CNTL
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE0_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE0_RB_PREEMPT
+#define SDMA0_QUEUE0_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE0_MIDCMD_DATA0
+#define SDMA0_QUEUE0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA1
+#define SDMA0_QUEUE0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA2
+#define SDMA0_QUEUE0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA3
+#define SDMA0_QUEUE0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA4
+#define SDMA0_QUEUE0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA5
+#define SDMA0_QUEUE0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA6
+#define SDMA0_QUEUE0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA7
+#define SDMA0_QUEUE0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA8
+#define SDMA0_QUEUE0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA9
+#define SDMA0_QUEUE0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA10
+#define SDMA0_QUEUE0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_CNTL
+#define SDMA0_QUEUE0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE1_RB_CNTL
+#define SDMA0_QUEUE1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE1_RB_BASE
+#define SDMA0_QUEUE1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_BASE_HI
+#define SDMA0_QUEUE1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE1_RB_RPTR
+#define SDMA0_QUEUE1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_RPTR_HI
+#define SDMA0_QUEUE1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR
+#define SDMA0_QUEUE1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR_HI
+#define SDMA0_QUEUE1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_IB_CNTL
+#define SDMA0_QUEUE1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE1_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE1_IB_RPTR
+#define SDMA0_QUEUE1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE1_IB_OFFSET
+#define SDMA0_QUEUE1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE1_IB_BASE_LO
+#define SDMA0_QUEUE1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE1_IB_BASE_HI
+#define SDMA0_QUEUE1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_IB_SIZE
+#define SDMA0_QUEUE1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE1_SKIP_CNTL
+#define SDMA0_QUEUE1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE1_CONTEXT_STATUS
+#define SDMA0_QUEUE1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE1_DOORBELL
+#define SDMA0_QUEUE1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE1_DOORBELL_LOG
+#define SDMA0_QUEUE1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_DOORBELL_OFFSET
+#define SDMA0_QUEUE1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE1_CSA_ADDR_LO
+#define SDMA0_QUEUE1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_CSA_ADDR_HI
+#define SDMA0_QUEUE1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_SCHEDULE_CNTL
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE1_IB_SUB_REMAIN
+#define SDMA0_QUEUE1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE1_PREEMPT
+#define SDMA0_QUEUE1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE1_DUMMY_REG
+#define SDMA0_QUEUE1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_RB_AQL_CNTL
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE1_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE1_RB_PREEMPT
+#define SDMA0_QUEUE1_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE1_MIDCMD_DATA0
+#define SDMA0_QUEUE1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA1
+#define SDMA0_QUEUE1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA2
+#define SDMA0_QUEUE1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA3
+#define SDMA0_QUEUE1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA4
+#define SDMA0_QUEUE1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA5
+#define SDMA0_QUEUE1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA6
+#define SDMA0_QUEUE1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA7
+#define SDMA0_QUEUE1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA8
+#define SDMA0_QUEUE1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA9
+#define SDMA0_QUEUE1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA10
+#define SDMA0_QUEUE1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_CNTL
+#define SDMA0_QUEUE1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE2_RB_CNTL
+#define SDMA0_QUEUE2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE2_RB_BASE
+#define SDMA0_QUEUE2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_BASE_HI
+#define SDMA0_QUEUE2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE2_RB_RPTR
+#define SDMA0_QUEUE2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_RPTR_HI
+#define SDMA0_QUEUE2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR
+#define SDMA0_QUEUE2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR_HI
+#define SDMA0_QUEUE2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_IB_CNTL
+#define SDMA0_QUEUE2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE2_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE2_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE2_IB_RPTR
+#define SDMA0_QUEUE2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE2_IB_OFFSET
+#define SDMA0_QUEUE2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE2_IB_BASE_LO
+#define SDMA0_QUEUE2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE2_IB_BASE_HI
+#define SDMA0_QUEUE2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_IB_SIZE
+#define SDMA0_QUEUE2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE2_SKIP_CNTL
+#define SDMA0_QUEUE2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE2_CONTEXT_STATUS
+#define SDMA0_QUEUE2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE2_DOORBELL
+#define SDMA0_QUEUE2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE2_DOORBELL_LOG
+#define SDMA0_QUEUE2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_DOORBELL_OFFSET
+#define SDMA0_QUEUE2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE2_CSA_ADDR_LO
+#define SDMA0_QUEUE2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_CSA_ADDR_HI
+#define SDMA0_QUEUE2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_SCHEDULE_CNTL
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE2_IB_SUB_REMAIN
+#define SDMA0_QUEUE2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE2_PREEMPT
+#define SDMA0_QUEUE2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE2_DUMMY_REG
+#define SDMA0_QUEUE2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_RB_AQL_CNTL
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE2_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE2_RB_PREEMPT
+#define SDMA0_QUEUE2_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE2_MIDCMD_DATA0
+#define SDMA0_QUEUE2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA1
+#define SDMA0_QUEUE2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA2
+#define SDMA0_QUEUE2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA3
+#define SDMA0_QUEUE2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA4
+#define SDMA0_QUEUE2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA5
+#define SDMA0_QUEUE2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA6
+#define SDMA0_QUEUE2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA7
+#define SDMA0_QUEUE2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA8
+#define SDMA0_QUEUE2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA9
+#define SDMA0_QUEUE2_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA10
+#define SDMA0_QUEUE2_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_CNTL
+#define SDMA0_QUEUE2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE3_RB_CNTL
+#define SDMA0_QUEUE3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE3_RB_BASE
+#define SDMA0_QUEUE3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_BASE_HI
+#define SDMA0_QUEUE3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE3_RB_RPTR
+#define SDMA0_QUEUE3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_RPTR_HI
+#define SDMA0_QUEUE3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR
+#define SDMA0_QUEUE3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR_HI
+#define SDMA0_QUEUE3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_IB_CNTL
+#define SDMA0_QUEUE3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE3_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE3_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE3_IB_RPTR
+#define SDMA0_QUEUE3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE3_IB_OFFSET
+#define SDMA0_QUEUE3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE3_IB_BASE_LO
+#define SDMA0_QUEUE3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE3_IB_BASE_HI
+#define SDMA0_QUEUE3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_IB_SIZE
+#define SDMA0_QUEUE3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE3_SKIP_CNTL
+#define SDMA0_QUEUE3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE3_CONTEXT_STATUS
+#define SDMA0_QUEUE3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE3_DOORBELL
+#define SDMA0_QUEUE3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE3_DOORBELL_LOG
+#define SDMA0_QUEUE3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_DOORBELL_OFFSET
+#define SDMA0_QUEUE3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE3_CSA_ADDR_LO
+#define SDMA0_QUEUE3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_CSA_ADDR_HI
+#define SDMA0_QUEUE3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_SCHEDULE_CNTL
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE3_IB_SUB_REMAIN
+#define SDMA0_QUEUE3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE3_PREEMPT
+#define SDMA0_QUEUE3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE3_DUMMY_REG
+#define SDMA0_QUEUE3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_RB_AQL_CNTL
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE3_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE3_RB_PREEMPT
+#define SDMA0_QUEUE3_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE3_MIDCMD_DATA0
+#define SDMA0_QUEUE3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA1
+#define SDMA0_QUEUE3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA2
+#define SDMA0_QUEUE3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA3
+#define SDMA0_QUEUE3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA4
+#define SDMA0_QUEUE3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA5
+#define SDMA0_QUEUE3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA6
+#define SDMA0_QUEUE3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA7
+#define SDMA0_QUEUE3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA8
+#define SDMA0_QUEUE3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA9
+#define SDMA0_QUEUE3_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA10
+#define SDMA0_QUEUE3_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_CNTL
+#define SDMA0_QUEUE3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE4_RB_CNTL
+#define SDMA0_QUEUE4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE4_RB_BASE
+#define SDMA0_QUEUE4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_BASE_HI
+#define SDMA0_QUEUE4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE4_RB_RPTR
+#define SDMA0_QUEUE4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_RPTR_HI
+#define SDMA0_QUEUE4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR
+#define SDMA0_QUEUE4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR_HI
+#define SDMA0_QUEUE4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_IB_CNTL
+#define SDMA0_QUEUE4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE4_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE4_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE4_IB_RPTR
+#define SDMA0_QUEUE4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE4_IB_OFFSET
+#define SDMA0_QUEUE4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE4_IB_BASE_LO
+#define SDMA0_QUEUE4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE4_IB_BASE_HI
+#define SDMA0_QUEUE4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_IB_SIZE
+#define SDMA0_QUEUE4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE4_SKIP_CNTL
+#define SDMA0_QUEUE4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE4_CONTEXT_STATUS
+#define SDMA0_QUEUE4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE4_DOORBELL
+#define SDMA0_QUEUE4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE4_DOORBELL_LOG
+#define SDMA0_QUEUE4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_DOORBELL_OFFSET
+#define SDMA0_QUEUE4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE4_CSA_ADDR_LO
+#define SDMA0_QUEUE4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_CSA_ADDR_HI
+#define SDMA0_QUEUE4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_SCHEDULE_CNTL
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE4_IB_SUB_REMAIN
+#define SDMA0_QUEUE4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE4_PREEMPT
+#define SDMA0_QUEUE4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE4_DUMMY_REG
+#define SDMA0_QUEUE4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_RB_AQL_CNTL
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE4_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE4_RB_PREEMPT
+#define SDMA0_QUEUE4_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE4_MIDCMD_DATA0
+#define SDMA0_QUEUE4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA1
+#define SDMA0_QUEUE4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA2
+#define SDMA0_QUEUE4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA3
+#define SDMA0_QUEUE4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA4
+#define SDMA0_QUEUE4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA5
+#define SDMA0_QUEUE4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA6
+#define SDMA0_QUEUE4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA7
+#define SDMA0_QUEUE4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA8
+#define SDMA0_QUEUE4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA9
+#define SDMA0_QUEUE4_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA10
+#define SDMA0_QUEUE4_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_CNTL
+#define SDMA0_QUEUE4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE5_RB_CNTL
+#define SDMA0_QUEUE5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE5_RB_BASE
+#define SDMA0_QUEUE5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_BASE_HI
+#define SDMA0_QUEUE5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE5_RB_RPTR
+#define SDMA0_QUEUE5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_RPTR_HI
+#define SDMA0_QUEUE5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR
+#define SDMA0_QUEUE5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR_HI
+#define SDMA0_QUEUE5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_IB_CNTL
+#define SDMA0_QUEUE5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE5_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE5_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE5_IB_RPTR
+#define SDMA0_QUEUE5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE5_IB_OFFSET
+#define SDMA0_QUEUE5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE5_IB_BASE_LO
+#define SDMA0_QUEUE5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE5_IB_BASE_HI
+#define SDMA0_QUEUE5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_IB_SIZE
+#define SDMA0_QUEUE5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE5_SKIP_CNTL
+#define SDMA0_QUEUE5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE5_CONTEXT_STATUS
+#define SDMA0_QUEUE5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE5_DOORBELL
+#define SDMA0_QUEUE5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE5_DOORBELL_LOG
+#define SDMA0_QUEUE5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_DOORBELL_OFFSET
+#define SDMA0_QUEUE5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE5_CSA_ADDR_LO
+#define SDMA0_QUEUE5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_CSA_ADDR_HI
+#define SDMA0_QUEUE5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_SCHEDULE_CNTL
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE5_IB_SUB_REMAIN
+#define SDMA0_QUEUE5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE5_PREEMPT
+#define SDMA0_QUEUE5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE5_DUMMY_REG
+#define SDMA0_QUEUE5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_RB_AQL_CNTL
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE5_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE5_RB_PREEMPT
+#define SDMA0_QUEUE5_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE5_MIDCMD_DATA0
+#define SDMA0_QUEUE5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA1
+#define SDMA0_QUEUE5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA2
+#define SDMA0_QUEUE5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA3
+#define SDMA0_QUEUE5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA4
+#define SDMA0_QUEUE5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA5
+#define SDMA0_QUEUE5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA6
+#define SDMA0_QUEUE5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA7
+#define SDMA0_QUEUE5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA8
+#define SDMA0_QUEUE5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA9
+#define SDMA0_QUEUE5_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA10
+#define SDMA0_QUEUE5_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_CNTL
+#define SDMA0_QUEUE5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE6_RB_CNTL
+#define SDMA0_QUEUE6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE6_RB_BASE
+#define SDMA0_QUEUE6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_BASE_HI
+#define SDMA0_QUEUE6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE6_RB_RPTR
+#define SDMA0_QUEUE6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_RPTR_HI
+#define SDMA0_QUEUE6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR
+#define SDMA0_QUEUE6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR_HI
+#define SDMA0_QUEUE6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_IB_CNTL
+#define SDMA0_QUEUE6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE6_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE6_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE6_IB_RPTR
+#define SDMA0_QUEUE6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE6_IB_OFFSET
+#define SDMA0_QUEUE6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE6_IB_BASE_LO
+#define SDMA0_QUEUE6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE6_IB_BASE_HI
+#define SDMA0_QUEUE6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_IB_SIZE
+#define SDMA0_QUEUE6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE6_SKIP_CNTL
+#define SDMA0_QUEUE6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE6_CONTEXT_STATUS
+#define SDMA0_QUEUE6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE6_DOORBELL
+#define SDMA0_QUEUE6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE6_DOORBELL_LOG
+#define SDMA0_QUEUE6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_DOORBELL_OFFSET
+#define SDMA0_QUEUE6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE6_CSA_ADDR_LO
+#define SDMA0_QUEUE6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_CSA_ADDR_HI
+#define SDMA0_QUEUE6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_SCHEDULE_CNTL
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE6_IB_SUB_REMAIN
+#define SDMA0_QUEUE6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE6_PREEMPT
+#define SDMA0_QUEUE6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE6_DUMMY_REG
+#define SDMA0_QUEUE6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_RB_AQL_CNTL
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE6_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE6_RB_PREEMPT
+#define SDMA0_QUEUE6_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE6_MIDCMD_DATA0
+#define SDMA0_QUEUE6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA1
+#define SDMA0_QUEUE6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA2
+#define SDMA0_QUEUE6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA3
+#define SDMA0_QUEUE6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA4
+#define SDMA0_QUEUE6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA5
+#define SDMA0_QUEUE6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA6
+#define SDMA0_QUEUE6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA7
+#define SDMA0_QUEUE6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA8
+#define SDMA0_QUEUE6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA9
+#define SDMA0_QUEUE6_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA10
+#define SDMA0_QUEUE6_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_CNTL
+#define SDMA0_QUEUE6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE7_RB_CNTL
+#define SDMA0_QUEUE7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE7_RB_BASE
+#define SDMA0_QUEUE7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_BASE_HI
+#define SDMA0_QUEUE7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE7_RB_RPTR
+#define SDMA0_QUEUE7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_RPTR_HI
+#define SDMA0_QUEUE7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR
+#define SDMA0_QUEUE7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR_HI
+#define SDMA0_QUEUE7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_IB_CNTL
+#define SDMA0_QUEUE7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE7_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE7_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE7_IB_RPTR
+#define SDMA0_QUEUE7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE7_IB_OFFSET
+#define SDMA0_QUEUE7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE7_IB_BASE_LO
+#define SDMA0_QUEUE7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE7_IB_BASE_HI
+#define SDMA0_QUEUE7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_IB_SIZE
+#define SDMA0_QUEUE7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE7_SKIP_CNTL
+#define SDMA0_QUEUE7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE7_CONTEXT_STATUS
+#define SDMA0_QUEUE7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE7_DOORBELL
+#define SDMA0_QUEUE7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE7_DOORBELL_LOG
+#define SDMA0_QUEUE7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_DOORBELL_OFFSET
+#define SDMA0_QUEUE7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE7_CSA_ADDR_LO
+#define SDMA0_QUEUE7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_CSA_ADDR_HI
+#define SDMA0_QUEUE7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_SCHEDULE_CNTL
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE7_IB_SUB_REMAIN
+#define SDMA0_QUEUE7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE7_PREEMPT
+#define SDMA0_QUEUE7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE7_DUMMY_REG
+#define SDMA0_QUEUE7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_RB_AQL_CNTL
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE7_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE7_RB_PREEMPT
+#define SDMA0_QUEUE7_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE7_MIDCMD_DATA0
+#define SDMA0_QUEUE7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA1
+#define SDMA0_QUEUE7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA2
+#define SDMA0_QUEUE7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA3
+#define SDMA0_QUEUE7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA4
+#define SDMA0_QUEUE7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA5
+#define SDMA0_QUEUE7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA6
+#define SDMA0_QUEUE7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA7
+#define SDMA0_QUEUE7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA8
+#define SDMA0_QUEUE7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA9
+#define SDMA0_QUEUE7_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA10
+#define SDMA0_QUEUE7_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_CNTL
+#define SDMA0_QUEUE7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+
+// addressBlock: gc_sdma0_sdma1dec
+//SDMA1_DEC_START
+#define SDMA1_DEC_START__START__SHIFT 0x0
+#define SDMA1_DEC_START__START_MASK 0xFFFFFFFFL
+//SDMA1_F32_MISC_CNTL
+#define SDMA1_F32_MISC_CNTL__F32_WAKEUP__SHIFT 0x0
+#define SDMA1_F32_MISC_CNTL__F32_WAKEUP_MASK 0x00000001L
+//SDMA1_GLOBAL_TIMESTAMP_LO
+#define SDMA1_GLOBAL_TIMESTAMP_LO__DATA__SHIFT 0x0
+#define SDMA1_GLOBAL_TIMESTAMP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA1_GLOBAL_TIMESTAMP_HI
+#define SDMA1_GLOBAL_TIMESTAMP_HI__DATA__SHIFT 0x0
+#define SDMA1_GLOBAL_TIMESTAMP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA1_POWER_CNTL
+#define SDMA1_POWER_CNTL__LS_ENABLE__SHIFT 0x8
+#define SDMA1_POWER_CNTL__LS_ENABLE_MASK 0x00000100L
+//SDMA1_CNTL
+#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA1_CNTL__PIO_DONE_ACK_ENABLE__SHIFT 0x6
+#define SDMA1_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE__SHIFT 0x8
+#define SDMA1_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x9
+#define SDMA1_CNTL__CP_MES_INT_ENABLE__SHIFT 0xa
+#define SDMA1_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE__SHIFT 0xb
+#define SDMA1_CNTL__PAGE_NULL_INT_ENABLE__SHIFT 0xc
+#define SDMA1_CNTL__PAGE_FAULT_INT_ENABLE__SHIFT 0xd
+#define SDMA1_CNTL__CH_PERFCNT_ENABLE__SHIFT 0x10
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA1_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA1_CNTL__RB_PREEMPT_INT_ENABLE__SHIFT 0x1f
+#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA1_CNTL__PIO_DONE_ACK_ENABLE_MASK 0x00000040L
+#define SDMA1_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE_MASK 0x00000100L
+#define SDMA1_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000200L
+#define SDMA1_CNTL__CP_MES_INT_ENABLE_MASK 0x00000400L
+#define SDMA1_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE_MASK 0x00000800L
+#define SDMA1_CNTL__PAGE_NULL_INT_ENABLE_MASK 0x00001000L
+#define SDMA1_CNTL__PAGE_FAULT_INT_ENABLE_MASK 0x00002000L
+#define SDMA1_CNTL__CH_PERFCNT_ENABLE_MASK 0x00010000L
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA1_CNTL__DRM_RESTORE_ENABLE_MASK 0x00080000L
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+#define SDMA1_CNTL__RB_PREEMPT_INT_ENABLE_MASK 0x80000000L
+//SDMA1_CHICKEN_BITS
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA1_CHICKEN_BITS__BACK_COMPAT_ENABLE__SHIFT 0x3
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x5
+#define SDMA1_CHICKEN_BITS__RD_BURST__SHIFT 0x6
+#define SDMA1_CHICKEN_BITS__WR_BURST__SHIFT 0x8
+#define SDMA1_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE__SHIFT 0xa
+#define SDMA1_CHICKEN_BITS__WR_COMBINE_256B_ENABLE__SHIFT 0xe
+#define SDMA1_CHICKEN_BITS__RD_COMBINE_256B_ENABLE__SHIFT 0xf
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA1_CHICKEN_BITS__T2L_256B_ENABLE__SHIFT 0x12
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG__SHIFT 0x13
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG__SHIFT 0x14
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG__SHIFT 0x15
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG__SHIFT 0x16
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG__SHIFT 0x17
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x18
+#define SDMA1_CHICKEN_BITS__SW_FREEZE_ENABLE__SHIFT 0x19
+#define SDMA1_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL__SHIFT 0x1a
+#define SDMA1_CHICKEN_BITS__RESERVED__SHIFT 0x1b
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA1_CHICKEN_BITS__BACK_COMPAT_ENABLE_MASK 0x00000008L
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00000020L
+#define SDMA1_CHICKEN_BITS__RD_BURST_MASK 0x000000C0L
+#define SDMA1_CHICKEN_BITS__WR_BURST_MASK 0x00000300L
+#define SDMA1_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE_MASK 0x00003C00L
+#define SDMA1_CHICKEN_BITS__WR_COMBINE_256B_ENABLE_MASK 0x00004000L
+#define SDMA1_CHICKEN_BITS__RD_COMBINE_256B_ENABLE_MASK 0x00008000L
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA1_CHICKEN_BITS__T2L_256B_ENABLE_MASK 0x00040000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG_MASK 0x00080000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG_MASK 0x00100000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG_MASK 0x00200000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG_MASK 0x00400000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG_MASK 0x00800000L
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x01000000L
+#define SDMA1_CHICKEN_BITS__SW_FREEZE_ENABLE_MASK 0x02000000L
+#define SDMA1_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL_MASK 0x04000000L
+#define SDMA1_CHICKEN_BITS__RESERVED_MASK 0xF8000000L
+//SDMA1_GB_ADDR_CONFIG
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA1_GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA1_GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA1_GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA1_GB_ADDR_CONFIG_READ
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PKRS__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PKRS_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA1_RB_RPTR_FETCH
+#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA1_RB_RPTR_FETCH_HI
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA1_IB_OFFSET_FETCH
+#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA1_PROGRAM
+#define SDMA1_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA1_STATUS_REG
+#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA1_STATUS_REG__CGCG_FENCE__SHIFT 0xb
+#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA1_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define SDMA1_STATUS_REG__DRM_MASK_FULL__SHIFT 0x18
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA1_STATUS_REG__CGCG_FENCE_MASK 0x00000800L
+#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA1_STATUS_REG__DRM_IDLE_MASK 0x00800000L
+#define SDMA1_STATUS_REG__DRM_MASK_FULL_MASK 0x01000000L
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA1_STATUS1_REG
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA1_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define SDMA1_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xb
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xc
+#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xd
+#define SDMA1_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0xe
+#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0xf
+#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x10
+#define SDMA1_STATUS1_REG__SEC_INTR_STATUS__SHIFT 0x11
+#define SDMA1_STATUS1_REG__WPTR_POLL_IDLE__SHIFT 0x12
+#define SDMA1_STATUS1_REG__SDMA_IDLE__SHIFT 0x13
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS1_REG__CE_DRM_IDLE_MASK 0x00000080L
+#define SDMA1_STATUS1_REG__CE_DRM1_IDLE_MASK 0x00000100L
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00000800L
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00001000L
+#define SDMA1_STATUS1_REG__EX_START_MASK 0x00002000L
+#define SDMA1_STATUS1_REG__DRM_CTX_RESTORE_MASK 0x00004000L
+#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00008000L
+#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00010000L
+#define SDMA1_STATUS1_REG__SEC_INTR_STATUS_MASK 0x00020000L
+#define SDMA1_STATUS1_REG__WPTR_POLL_IDLE_MASK 0x00040000L
+#define SDMA1_STATUS1_REG__SDMA_IDLE_MASK 0x00080000L
+//SDMA1_CNTL1
+#define SDMA1_CNTL1__WPTR_POLL_FREQUENCY__SHIFT 0x2
+#define SDMA1_CNTL1__WPTR_POLL_FREQUENCY_MASK 0x0000FFFCL
+//SDMA1_HBM_PAGE_CONFIG
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA1_UCODE_CHECKSUM
+#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA1_FREEZE
+#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA1_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA1_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA1_PROCESS_QUANTUM0
+#define SDMA1_PROCESS_QUANTUM0__PROCESS0_QUANTUM__SHIFT 0x0
+#define SDMA1_PROCESS_QUANTUM0__PROCESS1_QUANTUM__SHIFT 0x8
+#define SDMA1_PROCESS_QUANTUM0__PROCESS2_QUANTUM__SHIFT 0x10
+#define SDMA1_PROCESS_QUANTUM0__PROCESS3_QUANTUM__SHIFT 0x18
+#define SDMA1_PROCESS_QUANTUM0__PROCESS0_QUANTUM_MASK 0x000000FFL
+#define SDMA1_PROCESS_QUANTUM0__PROCESS1_QUANTUM_MASK 0x0000FF00L
+#define SDMA1_PROCESS_QUANTUM0__PROCESS2_QUANTUM_MASK 0x00FF0000L
+#define SDMA1_PROCESS_QUANTUM0__PROCESS3_QUANTUM_MASK 0xFF000000L
+//SDMA1_PROCESS_QUANTUM1
+#define SDMA1_PROCESS_QUANTUM1__PROCESS4_QUANTUM__SHIFT 0x0
+#define SDMA1_PROCESS_QUANTUM1__PROCESS5_QUANTUM__SHIFT 0x8
+#define SDMA1_PROCESS_QUANTUM1__PROCESS6_QUANTUM__SHIFT 0x10
+#define SDMA1_PROCESS_QUANTUM1__PROCESS7_QUANTUM__SHIFT 0x18
+#define SDMA1_PROCESS_QUANTUM1__PROCESS4_QUANTUM_MASK 0x000000FFL
+#define SDMA1_PROCESS_QUANTUM1__PROCESS5_QUANTUM_MASK 0x0000FF00L
+#define SDMA1_PROCESS_QUANTUM1__PROCESS6_QUANTUM_MASK 0x00FF0000L
+#define SDMA1_PROCESS_QUANTUM1__PROCESS7_QUANTUM_MASK 0xFF000000L
+//SDMA1_WATCHDOG_CNTL
+#define SDMA1_WATCHDOG_CNTL__QUEUE_HANG_COUNT__SHIFT 0x0
+#define SDMA1_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT__SHIFT 0x8
+#define SDMA1_WATCHDOG_CNTL__QUEUE_HANG_COUNT_MASK 0x000000FFL
+#define SDMA1_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT_MASK 0x0000FF00L
+//SDMA1_QUEUE_STATUS0
+#define SDMA1_QUEUE_STATUS0__QUEUE0_STATUS__SHIFT 0x0
+#define SDMA1_QUEUE_STATUS0__QUEUE1_STATUS__SHIFT 0x4
+#define SDMA1_QUEUE_STATUS0__QUEUE2_STATUS__SHIFT 0x8
+#define SDMA1_QUEUE_STATUS0__QUEUE3_STATUS__SHIFT 0xc
+#define SDMA1_QUEUE_STATUS0__QUEUE4_STATUS__SHIFT 0x10
+#define SDMA1_QUEUE_STATUS0__QUEUE5_STATUS__SHIFT 0x14
+#define SDMA1_QUEUE_STATUS0__QUEUE6_STATUS__SHIFT 0x18
+#define SDMA1_QUEUE_STATUS0__QUEUE7_STATUS__SHIFT 0x1c
+#define SDMA1_QUEUE_STATUS0__QUEUE0_STATUS_MASK 0x0000000FL
+#define SDMA1_QUEUE_STATUS0__QUEUE1_STATUS_MASK 0x000000F0L
+#define SDMA1_QUEUE_STATUS0__QUEUE2_STATUS_MASK 0x00000F00L
+#define SDMA1_QUEUE_STATUS0__QUEUE3_STATUS_MASK 0x0000F000L
+#define SDMA1_QUEUE_STATUS0__QUEUE4_STATUS_MASK 0x000F0000L
+#define SDMA1_QUEUE_STATUS0__QUEUE5_STATUS_MASK 0x00F00000L
+#define SDMA1_QUEUE_STATUS0__QUEUE6_STATUS_MASK 0x0F000000L
+#define SDMA1_QUEUE_STATUS0__QUEUE7_STATUS_MASK 0xF0000000L
+//SDMA1_EDC_CONFIG
+#define SDMA1_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA1_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA1_BA_THRESHOLD
+#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA1_ID
+#define SDMA1_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA1_VERSION
+#define SDMA1_VERSION__MINVER__SHIFT 0x0
+#define SDMA1_VERSION__MAJVER__SHIFT 0x8
+#define SDMA1_VERSION__REV__SHIFT 0x10
+#define SDMA1_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA1_VERSION__REV_MASK 0x003F0000L
+//SDMA1_EDC_COUNTER
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
+//SDMA1_EDC_COUNTER_CLEAR
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA1_STATUS2_REG
+#define SDMA1_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA1_STATUS2_REG__TH0F32_INSTR_PTR__SHIFT 0x2
+#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA1_STATUS2_REG__ID_MASK 0x00000003L
+#define SDMA1_STATUS2_REG__TH0F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA1_ATOMIC_CNTL
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA1_ATOMIC_PREOP_LO
+#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA1_ATOMIC_PREOP_HI
+#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_CNTL
+#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x0
+#define SDMA1_UTCL1_CNTL__PAGE_WAIT_DELAY__SHIFT 0x5
+#define SDMA1_UTCL1_CNTL__RESP_MODE__SHIFT 0x9
+#define SDMA1_UTCL1_CNTL__FORCE_INVALIDATION__SHIFT 0xe
+#define SDMA1_UTCL1_CNTL__FORCE_INVREQ_HEAVY__SHIFT 0xf
+#define SDMA1_UTCL1_CNTL__WR_EXE_PERMS_CTRL__SHIFT 0x10
+#define SDMA1_UTCL1_CNTL__RD_EXE_PERMS_CTRL__SHIFT 0x11
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0x12
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x0000001FL
+#define SDMA1_UTCL1_CNTL__PAGE_WAIT_DELAY_MASK 0x000001E0L
+#define SDMA1_UTCL1_CNTL__RESP_MODE_MASK 0x00000600L
+#define SDMA1_UTCL1_CNTL__FORCE_INVALIDATION_MASK 0x00004000L
+#define SDMA1_UTCL1_CNTL__FORCE_INVREQ_HEAVY_MASK 0x00008000L
+#define SDMA1_UTCL1_CNTL__WR_EXE_PERMS_CTRL_MASK 0x00010000L
+#define SDMA1_UTCL1_CNTL__RD_EXE_PERMS_CTRL_MASK 0x00020000L
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x003C0000L
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x3F000000L
+//SDMA1_UTCL1_WATERMK
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK__SHIFT 0x0
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP__SHIFT 0x4
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK__SHIFT 0x6
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP__SHIFT 0xa
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK__SHIFT 0xc
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP__SHIFT 0x10
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK__SHIFT 0x12
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP__SHIFT 0x16
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK_MASK 0x0000000FL
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP_MASK 0x00000030L
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK_MASK 0x000003C0L
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP_MASK 0x00000C00L
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK_MASK 0x0000F000L
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP_MASK 0x00030000L
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK_MASK 0x003C0000L
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP_MASK 0x00C00000L
+//SDMA1_UTCL1_TIMEOUT
+#define SDMA1_UTCL1_TIMEOUT__XNACK_LIMIT__SHIFT 0x0
+#define SDMA1_UTCL1_TIMEOUT__XNACK_LIMIT_MASK 0x0000FFFFL
+//SDMA1_UTCL1_PAGE
+#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA1_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0xa
+#define SDMA1_UTCL1_PAGE__USE_IO__SHIFT 0xb
+#define SDMA1_UTCL1_PAGE__RD_L2_POLICY__SHIFT 0xc
+#define SDMA1_UTCL1_PAGE__WR_L2_POLICY__SHIFT 0xe
+#define SDMA1_UTCL1_PAGE__DMA_PAGE_SIZE__SHIFT 0x10
+#define SDMA1_UTCL1_PAGE__USE_BC__SHIFT 0x16
+#define SDMA1_UTCL1_PAGE__ADDR_IS_PA__SHIFT 0x17
+#define SDMA1_UTCL1_PAGE__LLC_NOALLOC__SHIFT 0x18
+#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA1_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000003C0L
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000400L
+#define SDMA1_UTCL1_PAGE__USE_IO_MASK 0x00000800L
+#define SDMA1_UTCL1_PAGE__RD_L2_POLICY_MASK 0x00003000L
+#define SDMA1_UTCL1_PAGE__WR_L2_POLICY_MASK 0x0000C000L
+#define SDMA1_UTCL1_PAGE__DMA_PAGE_SIZE_MASK 0x003F0000L
+#define SDMA1_UTCL1_PAGE__USE_BC_MASK 0x00400000L
+#define SDMA1_UTCL1_PAGE__ADDR_IS_PA_MASK 0x00800000L
+#define SDMA1_UTCL1_PAGE__LLC_NOALLOC_MASK 0x01000000L
+//SDMA1_UTCL1_RD_STATUS
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_RD_STATUS__RESERVED0__SHIFT 0x5
+#define SDMA1_UTCL1_RD_STATUS__RESERVED1__SHIFT 0x6
+#define SDMA1_UTCL1_RD_STATUS__META_Q_EMPTY__SHIFT 0x7
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_RD_STATUS__RESERVED2__SHIFT 0xd
+#define SDMA1_UTCL1_RD_STATUS__RESERVED3__SHIFT 0xe
+#define SDMA1_UTCL1_RD_STATUS__META_Q_FULL__SHIFT 0xf
+#define SDMA1_UTCL1_RD_STATUS__RD_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA1_UTCL1_RD_STATUS__RD_REQRET_IDLE__SHIFT 0x11
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_IDLE__SHIFT 0x12
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_TYPE__SHIFT 0x13
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REG_READY__SHIFT 0x17
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA1_UTCL1_RD_STATUS__RESERVED4__SHIFT 0x1a
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_IN_RTR__SHIFT 0x1c
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA1_UTCL1_RD_STATUS__INV_BUSY__SHIFT 0x1e
+#define SDMA1_UTCL1_RD_STATUS__DBIT_REQ_IDLE__SHIFT 0x1f
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED0_MASK 0x00000020L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED1_MASK 0x00000040L
+#define SDMA1_UTCL1_RD_STATUS__META_Q_EMPTY_MASK 0x00000080L
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED2_MASK 0x00002000L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED3_MASK 0x00004000L
+#define SDMA1_UTCL1_RD_STATUS__META_Q_FULL_MASK 0x00008000L
+#define SDMA1_UTCL1_RD_STATUS__RD_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQRET_IDLE_MASK 0x00020000L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_IDLE_MASK 0x00040000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_TYPE_MASK 0x00180000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED4_MASK 0x04000000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_IN_RTR_MASK 0x10000000L
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA1_UTCL1_RD_STATUS__INV_BUSY_MASK 0x40000000L
+#define SDMA1_UTCL1_RD_STATUS__DBIT_REQ_IDLE_MASK 0x80000000L
+//SDMA1_UTCL1_WR_STATUS
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_EMPTY__SHIFT 0x5
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_EMPTY__SHIFT 0x6
+#define SDMA1_UTCL1_WR_STATUS__RESERVED0__SHIFT 0x7
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_FULL__SHIFT 0xd
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_FULL__SHIFT 0xe
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0xf
+#define SDMA1_UTCL1_WR_STATUS__WR_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA1_UTCL1_WR_STATUS__WR_REQRET_IDLE__SHIFT 0x11
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_IDLE__SHIFT 0x12
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_TYPE__SHIFT 0x13
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REG_READY__SHIFT 0x17
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL__SHIFT 0x1a
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_RTR__SHIFT 0x1c
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR__SHIFT 0x1e
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR__SHIFT 0x1f
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_EMPTY_MASK 0x00000020L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_EMPTY_MASK 0x00000040L
+#define SDMA1_UTCL1_WR_STATUS__RESERVED0_MASK 0x00000080L
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_FULL_MASK 0x00002000L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_FULL_MASK 0x00004000L
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00008000L
+#define SDMA1_UTCL1_WR_STATUS__WR_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQRET_IDLE_MASK 0x00020000L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_IDLE_MASK 0x00040000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_TYPE_MASK 0x00180000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL_MASK 0x04000000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_RTR_MASK 0x10000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR_MASK 0x40000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR_MASK 0x80000000L
+//SDMA1_UTCL1_INV0
+#define SDMA1_UTCL1_INV0__INV_PROC_BUSY__SHIFT 0x0
+#define SDMA1_UTCL1_INV0__GPUVM_FRAG_SIZE__SHIFT 0x1
+#define SDMA1_UTCL1_INV0__GPUVM_VMID__SHIFT 0x7
+#define SDMA1_UTCL1_INV0__GPUVM_MODE__SHIFT 0xb
+#define SDMA1_UTCL1_INV0__GPUVM_HIGH__SHIFT 0xd
+#define SDMA1_UTCL1_INV0__GPUVM_TAG__SHIFT 0xe
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_HIGH__SHIFT 0x12
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_LOW__SHIFT 0x16
+#define SDMA1_UTCL1_INV0__INV_TYPE__SHIFT 0x1a
+#define SDMA1_UTCL1_INV0__INV_PROC_BUSY_MASK 0x00000001L
+#define SDMA1_UTCL1_INV0__GPUVM_FRAG_SIZE_MASK 0x0000007EL
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_MASK 0x00000780L
+#define SDMA1_UTCL1_INV0__GPUVM_MODE_MASK 0x00001800L
+#define SDMA1_UTCL1_INV0__GPUVM_HIGH_MASK 0x00002000L
+#define SDMA1_UTCL1_INV0__GPUVM_TAG_MASK 0x0003C000L
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_HIGH_MASK 0x003C0000L
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_LOW_MASK 0x03C00000L
+#define SDMA1_UTCL1_INV0__INV_TYPE_MASK 0x0C000000L
+//SDMA1_UTCL1_INV1
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_INV2
+#define SDMA1_UTCL1_INV2__CPF_VMID__SHIFT 0x0
+#define SDMA1_UTCL1_INV2__CPF_FLUSH_TYPE__SHIFT 0x10
+#define SDMA1_UTCL1_INV2__CPF_FRAG_SIZE__SHIFT 0x11
+#define SDMA1_UTCL1_INV2__CPF_VMID_MASK 0x0000FFFFL
+#define SDMA1_UTCL1_INV2__CPF_FLUSH_TYPE_MASK 0x00010000L
+#define SDMA1_UTCL1_INV2__CPF_FRAG_SIZE_MASK 0x007E0000L
+//SDMA1_UTCL1_RD_XNACK0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_RD_XNACK1
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA1_UTCL1_WR_XNACK0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_WR_XNACK1
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA1_RELAX_ORDERING_LUT
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA1_CHICKEN_BITS_2
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA1_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define SDMA1_CHICKEN_BITS_2__UCODE_BUF_DS_EN__SHIFT 0x6
+#define SDMA1_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP__SHIFT 0x7
+#define SDMA1_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING__SHIFT 0x8
+#define SDMA1_CHICKEN_BITS_2__RESERVED_14_12__SHIFT 0xc
+#define SDMA1_CHICKEN_BITS_2__RESERVED_15__SHIFT 0xf
+#define SDMA1_CHICKEN_BITS_2__RB_FIFO_WATERMARK__SHIFT 0x10
+#define SDMA1_CHICKEN_BITS_2__IB_FIFO_WATERMARK__SHIFT 0x12
+#define SDMA1_CHICKEN_BITS_2__RESERVED_22_20__SHIFT 0x14
+#define SDMA1_CHICKEN_BITS_2__CH_RD_WATERMARK__SHIFT 0x17
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK__SHIFT 0x19
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB__SHIFT 0x1e
+#define SDMA1_CHICKEN_BITS_2__PIO_VFID_SOURCE__SHIFT 0x1f
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define SDMA1_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+#define SDMA1_CHICKEN_BITS_2__UCODE_BUF_DS_EN_MASK 0x00000040L
+#define SDMA1_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP_MASK 0x00000080L
+#define SDMA1_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING_MASK 0x00000F00L
+#define SDMA1_CHICKEN_BITS_2__RESERVED_14_12_MASK 0x00007000L
+#define SDMA1_CHICKEN_BITS_2__RESERVED_15_MASK 0x00008000L
+#define SDMA1_CHICKEN_BITS_2__RB_FIFO_WATERMARK_MASK 0x00030000L
+#define SDMA1_CHICKEN_BITS_2__IB_FIFO_WATERMARK_MASK 0x000C0000L
+#define SDMA1_CHICKEN_BITS_2__RESERVED_22_20_MASK 0x00700000L
+#define SDMA1_CHICKEN_BITS_2__CH_RD_WATERMARK_MASK 0x01800000L
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK_MASK 0x3E000000L
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB_MASK 0x40000000L
+#define SDMA1_CHICKEN_BITS_2__PIO_VFID_SOURCE_MASK 0x80000000L
+//SDMA1_STATUS3_REG
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA1_STATUS3_REG__AQL_PREV_CMD_IDLE__SHIFT 0x15
+#define SDMA1_STATUS3_REG__TLBI_IDLE__SHIFT 0x16
+#define SDMA1_STATUS3_REG__GCR_IDLE__SHIFT 0x17
+#define SDMA1_STATUS3_REG__INVREQ_IDLE__SHIFT 0x18
+#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x19
+#define SDMA1_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x1a
+#define SDMA1_STATUS3_REG__TMZ_MTYPE_STATUS__SHIFT 0x1e
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA1_STATUS3_REG__AQL_PREV_CMD_IDLE_MASK 0x00200000L
+#define SDMA1_STATUS3_REG__TLBI_IDLE_MASK 0x00400000L
+#define SDMA1_STATUS3_REG__GCR_IDLE_MASK 0x00800000L
+#define SDMA1_STATUS3_REG__INVREQ_IDLE_MASK 0x01000000L
+#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x02000000L
+#define SDMA1_STATUS3_REG__INT_QUEUE_ID_MASK 0x3C000000L
+#define SDMA1_STATUS3_REG__TMZ_MTYPE_STATUS_MASK 0xC0000000L
+//SDMA1_PHYSICAL_ADDR_LO
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA1_PHYSICAL_ADDR_HI
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA1_GLOBAL_QUANTUM
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM__SHIFT 0x0
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM__SHIFT 0x8
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM_MASK 0x000000FFL
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM_MASK 0x0000FF00L
+//SDMA1_ERROR_LOG
+#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA1_PUB_DUMMY_REG0
+#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG1
+#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG2
+#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG3
+#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_F32_COUNTER
+#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_CRD_CNTL
+#define SDMA1_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA1_CRD_CNTL__CH_WRREQ_CREDIT__SHIFT 0x13
+#define SDMA1_CRD_CNTL__CH_RDREQ_CREDIT__SHIFT 0x19
+#define SDMA1_CRD_CNTL__DRM_CREDIT_MASK 0x0000007FL
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+#define SDMA1_CRD_CNTL__CH_WRREQ_CREDIT_MASK 0x01F80000L
+#define SDMA1_CRD_CNTL__CH_RDREQ_CREDIT_MASK 0x7E000000L
+//SDMA1_RLC_CGCG_CTRL
+#define SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE__SHIFT 0x1
+#define SDMA1_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS__SHIFT 0x10
+#define SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS_MASK 0xFFFF0000L
+//SDMA1_GPU_IOV_VIOLATION_LOG
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA1_AQL_STATUS
+#define SDMA1_AQL_STATUS__COMPLETE_SIGNAL_EMPTY__SHIFT 0x0
+#define SDMA1_AQL_STATUS__INVALID_CMD_EMPTY__SHIFT 0x1
+#define SDMA1_AQL_STATUS__COMPLETE_SIGNAL_EMPTY_MASK 0x00000001L
+#define SDMA1_AQL_STATUS__INVALID_CMD_EMPTY_MASK 0x00000002L
+//SDMA1_EA_DBIT_ADDR_DATA
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_EA_DBIT_ADDR_INDEX
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA1_TLBI_GCR_CNTL
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CMD_DW__SHIFT 0x0
+#define SDMA1_TLBI_GCR_CNTL__GCR_CMD_DW__SHIFT 0x4
+#define SDMA1_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE__SHIFT 0x8
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CREDIT__SHIFT 0x10
+#define SDMA1_TLBI_GCR_CNTL__GCR_CREDIT__SHIFT 0x18
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CMD_DW_MASK 0x0000000FL
+#define SDMA1_TLBI_GCR_CNTL__GCR_CMD_DW_MASK 0x000000F0L
+#define SDMA1_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE_MASK 0x00000F00L
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CREDIT_MASK 0x00FF0000L
+#define SDMA1_TLBI_GCR_CNTL__GCR_CREDIT_MASK 0xFF000000L
+//SDMA1_TILING_CONFIG
+#define SDMA1_TILING_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define SDMA1_TILING_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//SDMA1_HASH
+#define SDMA1_HASH__CHANNEL_BITS__SHIFT 0x0
+#define SDMA1_HASH__BANK_BITS__SHIFT 0x4
+#define SDMA1_HASH__CHANNEL_XOR_COUNT__SHIFT 0x8
+#define SDMA1_HASH__BANK_XOR_COUNT__SHIFT 0xc
+#define SDMA1_HASH__CHANNEL_BITS_MASK 0x00000007L
+#define SDMA1_HASH__BANK_BITS_MASK 0x00000070L
+#define SDMA1_HASH__CHANNEL_XOR_COUNT_MASK 0x00000700L
+#define SDMA1_HASH__BANK_XOR_COUNT_MASK 0x00007000L
+//SDMA1_INT_STATUS
+#define SDMA1_INT_STATUS__DATA__SHIFT 0x0
+#define SDMA1_INT_STATUS__DATA_MASK 0xFFFFFFFFL
+//SDMA1_GPU_IOV_VIOLATION_LOG2
+#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//SDMA1_HOLE_ADDR_LO
+#define SDMA1_HOLE_ADDR_LO__VALUE__SHIFT 0x0
+#define SDMA1_HOLE_ADDR_LO__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_HOLE_ADDR_HI
+#define SDMA1_HOLE_ADDR_HI__VALUE__SHIFT 0x0
+#define SDMA1_HOLE_ADDR_HI__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_CLOCK_GATING_STATUS
+#define SDMA1_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS__SHIFT 0x0
+#define SDMA1_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS__SHIFT 0x2
+#define SDMA1_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS__SHIFT 0x3
+#define SDMA1_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS__SHIFT 0x4
+#define SDMA1_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS__SHIFT 0x5
+#define SDMA1_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS__SHIFT 0x6
+#define SDMA1_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS_MASK 0x00000001L
+#define SDMA1_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS_MASK 0x00000004L
+#define SDMA1_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS_MASK 0x00000008L
+#define SDMA1_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS_MASK 0x00000010L
+#define SDMA1_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS_MASK 0x00000020L
+#define SDMA1_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS_MASK 0x00000040L
+//SDMA1_STATUS4_REG
+#define SDMA1_STATUS4_REG__IDLE__SHIFT 0x0
+#define SDMA1_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define SDMA1_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define SDMA1_STATUS4_REG__CH_RD_OUTSTANDING__SHIFT 0x4
+#define SDMA1_STATUS4_REG__CH_WR_OUTSTANDING__SHIFT 0x5
+#define SDMA1_STATUS4_REG__GCR_OUTSTANDING__SHIFT 0x6
+#define SDMA1_STATUS4_REG__TLBI_OUTSTANDING__SHIFT 0x7
+#define SDMA1_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x8
+#define SDMA1_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x9
+#define SDMA1_STATUS4_REG__REG_POLLING__SHIFT 0xa
+#define SDMA1_STATUS4_REG__MEM_POLLING__SHIFT 0xb
+#define SDMA1_STATUS4_REG__RESERVED_13_12__SHIFT 0xc
+#define SDMA1_STATUS4_REG__RESERVED_15_14__SHIFT 0xe
+#define SDMA1_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA1_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x14
+#define SDMA1_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD__SHIFT 0x15
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_FAULT__SHIFT 0x16
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_NULL__SHIFT 0x17
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT__SHIFT 0x18
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_FAULT__SHIFT 0x19
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_NULL__SHIFT 0x1a
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT__SHIFT 0x1b
+#define SDMA1_STATUS4_REG__IDLE_MASK 0x00000001L
+#define SDMA1_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define SDMA1_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define SDMA1_STATUS4_REG__CH_RD_OUTSTANDING_MASK 0x00000010L
+#define SDMA1_STATUS4_REG__CH_WR_OUTSTANDING_MASK 0x00000020L
+#define SDMA1_STATUS4_REG__GCR_OUTSTANDING_MASK 0x00000040L
+#define SDMA1_STATUS4_REG__TLBI_OUTSTANDING_MASK 0x00000080L
+#define SDMA1_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000100L
+#define SDMA1_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000200L
+#define SDMA1_STATUS4_REG__REG_POLLING_MASK 0x00000400L
+#define SDMA1_STATUS4_REG__MEM_POLLING_MASK 0x00000800L
+#define SDMA1_STATUS4_REG__RESERVED_13_12_MASK 0x00003000L
+#define SDMA1_STATUS4_REG__RESERVED_15_14_MASK 0x0000C000L
+#define SDMA1_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA1_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00100000L
+#define SDMA1_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD_MASK 0x00200000L
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_FAULT_MASK 0x00400000L
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_NULL_MASK 0x00800000L
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT_MASK 0x01000000L
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_FAULT_MASK 0x02000000L
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_NULL_MASK 0x04000000L
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT_MASK 0x08000000L
+//SDMA1_SCRATCH_RAM_DATA
+#define SDMA1_SCRATCH_RAM_DATA__DATA__SHIFT 0x0
+#define SDMA1_SCRATCH_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//SDMA1_SCRATCH_RAM_ADDR
+#define SDMA1_SCRATCH_RAM_ADDR__ADDR__SHIFT 0x0
+#define SDMA1_SCRATCH_RAM_ADDR__ADDR_MASK 0x0000007FL
+//SDMA1_TIMESTAMP_CNTL
+#define SDMA1_TIMESTAMP_CNTL__CAPTURE__SHIFT 0x0
+#define SDMA1_TIMESTAMP_CNTL__CAPTURE_MASK 0x00000001L
+//SDMA1_STATUS5_REG
+#define SDMA1_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS__SHIFT 0x0
+#define SDMA1_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS__SHIFT 0x1
+#define SDMA1_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS__SHIFT 0x2
+#define SDMA1_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS__SHIFT 0x3
+#define SDMA1_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS__SHIFT 0x4
+#define SDMA1_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS__SHIFT 0x5
+#define SDMA1_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS__SHIFT 0x6
+#define SDMA1_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS__SHIFT 0x7
+#define SDMA1_STATUS5_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA1_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x14
+#define SDMA1_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x15
+#define SDMA1_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x16
+#define SDMA1_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x17
+#define SDMA1_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x18
+#define SDMA1_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x19
+#define SDMA1_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1a
+#define SDMA1_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1b
+#define SDMA1_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS_MASK 0x00000001L
+#define SDMA1_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS_MASK 0x00000002L
+#define SDMA1_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS_MASK 0x00000004L
+#define SDMA1_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS_MASK 0x00000008L
+#define SDMA1_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS_MASK 0x00000010L
+#define SDMA1_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS_MASK 0x00000020L
+#define SDMA1_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS_MASK 0x00000040L
+#define SDMA1_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS_MASK 0x00000080L
+#define SDMA1_STATUS5_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA1_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00100000L
+#define SDMA1_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00200000L
+#define SDMA1_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00400000L
+#define SDMA1_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00800000L
+#define SDMA1_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION_MASK 0x01000000L
+#define SDMA1_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION_MASK 0x02000000L
+#define SDMA1_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION_MASK 0x04000000L
+#define SDMA1_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION_MASK 0x08000000L
+//SDMA1_QUEUE_RESET_REQ
+#define SDMA1_QUEUE_RESET_REQ__QUEUE0_RESET__SHIFT 0x0
+#define SDMA1_QUEUE_RESET_REQ__QUEUE1_RESET__SHIFT 0x1
+#define SDMA1_QUEUE_RESET_REQ__QUEUE2_RESET__SHIFT 0x2
+#define SDMA1_QUEUE_RESET_REQ__QUEUE3_RESET__SHIFT 0x3
+#define SDMA1_QUEUE_RESET_REQ__QUEUE4_RESET__SHIFT 0x4
+#define SDMA1_QUEUE_RESET_REQ__QUEUE5_RESET__SHIFT 0x5
+#define SDMA1_QUEUE_RESET_REQ__QUEUE6_RESET__SHIFT 0x6
+#define SDMA1_QUEUE_RESET_REQ__QUEUE7_RESET__SHIFT 0x7
+#define SDMA1_QUEUE_RESET_REQ__RESERVED__SHIFT 0x8
+#define SDMA1_QUEUE_RESET_REQ__QUEUE0_RESET_MASK 0x00000001L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE1_RESET_MASK 0x00000002L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE2_RESET_MASK 0x00000004L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE3_RESET_MASK 0x00000008L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE4_RESET_MASK 0x00000010L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE5_RESET_MASK 0x00000020L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE6_RESET_MASK 0x00000040L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE7_RESET_MASK 0x00000080L
+#define SDMA1_QUEUE_RESET_REQ__RESERVED_MASK 0xFFFFFF00L
+//SDMA1_STATUS6_REG
+#define SDMA1_STATUS6_REG__ID__SHIFT 0x0
+#define SDMA1_STATUS6_REG__TH1F32_INSTR_PTR__SHIFT 0x2
+#define SDMA1_STATUS6_REG__TH1_EXCEPTION__SHIFT 0x10
+#define SDMA1_STATUS6_REG__ID_MASK 0x00000003L
+#define SDMA1_STATUS6_REG__TH1F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA1_STATUS6_REG__TH1_EXCEPTION_MASK 0xFFFF0000L
+//SDMA1_UCODE1_CHECKSUM
+#define SDMA1_UCODE1_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA1_UCODE1_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA1_CE_CTRL
+#define SDMA1_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define SDMA1_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define SDMA1_CE_CTRL__WR_AFIFO_WATERMARK__SHIFT 0x5
+#define SDMA1_CE_CTRL__CE_DCC_READ_128B_ENABLE__SHIFT 0x8
+#define SDMA1_CE_CTRL__RESERVED__SHIFT 0x9
+#define SDMA1_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define SDMA1_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define SDMA1_CE_CTRL__WR_AFIFO_WATERMARK_MASK 0x000000E0L
+#define SDMA1_CE_CTRL__CE_DCC_READ_128B_ENABLE_MASK 0x00000100L
+#define SDMA1_CE_CTRL__RESERVED_MASK 0xFFFFFE00L
+//SDMA1_FED_STATUS
+#define SDMA1_FED_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define SDMA1_FED_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define SDMA1_FED_STATUS__F32_DATA_ECC__SHIFT 0x2
+#define SDMA1_FED_STATUS__WPTR_ATOMIC_ECC__SHIFT 0x3
+#define SDMA1_FED_STATUS__COPY_DATA_ECC__SHIFT 0x4
+#define SDMA1_FED_STATUS__COPY_METADATA_ECC__SHIFT 0x5
+#define SDMA1_FED_STATUS__SELFLOAD_UCODE_ECC__SHIFT 0x6
+#define SDMA1_FED_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define SDMA1_FED_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define SDMA1_FED_STATUS__F32_DATA_ECC_MASK 0x00000004L
+#define SDMA1_FED_STATUS__WPTR_ATOMIC_ECC_MASK 0x00000008L
+#define SDMA1_FED_STATUS__COPY_DATA_ECC_MASK 0x00000010L
+#define SDMA1_FED_STATUS__COPY_METADATA_ECC_MASK 0x00000020L
+#define SDMA1_FED_STATUS__SELFLOAD_UCODE_ECC_MASK 0x00000040L
+//SDMA1_QUEUE0_RB_CNTL
+#define SDMA1_QUEUE0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE0_RB_BASE
+#define SDMA1_QUEUE0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_BASE_HI
+#define SDMA1_QUEUE0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE0_RB_RPTR
+#define SDMA1_QUEUE0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_RPTR_HI
+#define SDMA1_QUEUE0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR
+#define SDMA1_QUEUE0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR_HI
+#define SDMA1_QUEUE0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_IB_CNTL
+#define SDMA1_QUEUE0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE0_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE0_IB_RPTR
+#define SDMA1_QUEUE0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE0_IB_OFFSET
+#define SDMA1_QUEUE0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE0_IB_BASE_LO
+#define SDMA1_QUEUE0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE0_IB_BASE_HI
+#define SDMA1_QUEUE0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_IB_SIZE
+#define SDMA1_QUEUE0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE0_SKIP_CNTL
+#define SDMA1_QUEUE0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE0_CONTEXT_STATUS
+#define SDMA1_QUEUE0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE0_CONTEXT_STATUS__USE_IB__SHIFT 0x1
+#define SDMA1_QUEUE0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__USE_IB_MASK 0x00000002L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE0_DOORBELL
+#define SDMA1_QUEUE0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE0_DOORBELL_LOG
+#define SDMA1_QUEUE0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_DOORBELL_OFFSET
+#define SDMA1_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE0_CSA_ADDR_LO
+#define SDMA1_QUEUE0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_CSA_ADDR_HI
+#define SDMA1_QUEUE0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_SCHEDULE_CNTL
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE0_IB_SUB_REMAIN
+#define SDMA1_QUEUE0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE0_PREEMPT
+#define SDMA1_QUEUE0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE0_DUMMY_REG
+#define SDMA1_QUEUE0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_RB_AQL_CNTL
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE0_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE0_RB_PREEMPT
+#define SDMA1_QUEUE0_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE0_MIDCMD_DATA0
+#define SDMA1_QUEUE0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA1
+#define SDMA1_QUEUE0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA2
+#define SDMA1_QUEUE0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA3
+#define SDMA1_QUEUE0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA4
+#define SDMA1_QUEUE0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA5
+#define SDMA1_QUEUE0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA6
+#define SDMA1_QUEUE0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA7
+#define SDMA1_QUEUE0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA8
+#define SDMA1_QUEUE0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA9
+#define SDMA1_QUEUE0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA10
+#define SDMA1_QUEUE0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_CNTL
+#define SDMA1_QUEUE0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE1_RB_CNTL
+#define SDMA1_QUEUE1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE1_RB_BASE
+#define SDMA1_QUEUE1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_BASE_HI
+#define SDMA1_QUEUE1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE1_RB_RPTR
+#define SDMA1_QUEUE1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_RPTR_HI
+#define SDMA1_QUEUE1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR
+#define SDMA1_QUEUE1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR_HI
+#define SDMA1_QUEUE1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_IB_CNTL
+#define SDMA1_QUEUE1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE1_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE1_IB_RPTR
+#define SDMA1_QUEUE1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE1_IB_OFFSET
+#define SDMA1_QUEUE1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE1_IB_BASE_LO
+#define SDMA1_QUEUE1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE1_IB_BASE_HI
+#define SDMA1_QUEUE1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_IB_SIZE
+#define SDMA1_QUEUE1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE1_SKIP_CNTL
+#define SDMA1_QUEUE1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE1_CONTEXT_STATUS
+#define SDMA1_QUEUE1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE1_DOORBELL
+#define SDMA1_QUEUE1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE1_DOORBELL_LOG
+#define SDMA1_QUEUE1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_DOORBELL_OFFSET
+#define SDMA1_QUEUE1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE1_CSA_ADDR_LO
+#define SDMA1_QUEUE1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_CSA_ADDR_HI
+#define SDMA1_QUEUE1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_SCHEDULE_CNTL
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE1_IB_SUB_REMAIN
+#define SDMA1_QUEUE1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE1_PREEMPT
+#define SDMA1_QUEUE1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE1_DUMMY_REG
+#define SDMA1_QUEUE1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_RB_AQL_CNTL
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE1_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE1_RB_PREEMPT
+#define SDMA1_QUEUE1_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE1_MIDCMD_DATA0
+#define SDMA1_QUEUE1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA1
+#define SDMA1_QUEUE1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA2
+#define SDMA1_QUEUE1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA3
+#define SDMA1_QUEUE1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA4
+#define SDMA1_QUEUE1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA5
+#define SDMA1_QUEUE1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA6
+#define SDMA1_QUEUE1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA7
+#define SDMA1_QUEUE1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA8
+#define SDMA1_QUEUE1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA9
+#define SDMA1_QUEUE1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA10
+#define SDMA1_QUEUE1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_CNTL
+#define SDMA1_QUEUE1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE2_RB_CNTL
+#define SDMA1_QUEUE2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE2_RB_BASE
+#define SDMA1_QUEUE2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_BASE_HI
+#define SDMA1_QUEUE2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE2_RB_RPTR
+#define SDMA1_QUEUE2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_RPTR_HI
+#define SDMA1_QUEUE2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR
+#define SDMA1_QUEUE2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR_HI
+#define SDMA1_QUEUE2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_IB_CNTL
+#define SDMA1_QUEUE2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE2_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE2_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE2_IB_RPTR
+#define SDMA1_QUEUE2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE2_IB_OFFSET
+#define SDMA1_QUEUE2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE2_IB_BASE_LO
+#define SDMA1_QUEUE2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE2_IB_BASE_HI
+#define SDMA1_QUEUE2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_IB_SIZE
+#define SDMA1_QUEUE2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE2_SKIP_CNTL
+#define SDMA1_QUEUE2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE2_CONTEXT_STATUS
+#define SDMA1_QUEUE2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE2_DOORBELL
+#define SDMA1_QUEUE2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE2_DOORBELL_LOG
+#define SDMA1_QUEUE2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_DOORBELL_OFFSET
+#define SDMA1_QUEUE2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE2_CSA_ADDR_LO
+#define SDMA1_QUEUE2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_CSA_ADDR_HI
+#define SDMA1_QUEUE2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_SCHEDULE_CNTL
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE2_IB_SUB_REMAIN
+#define SDMA1_QUEUE2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE2_PREEMPT
+#define SDMA1_QUEUE2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE2_DUMMY_REG
+#define SDMA1_QUEUE2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_RB_AQL_CNTL
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE2_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE2_RB_PREEMPT
+#define SDMA1_QUEUE2_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE2_MIDCMD_DATA0
+#define SDMA1_QUEUE2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA1
+#define SDMA1_QUEUE2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA2
+#define SDMA1_QUEUE2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA3
+#define SDMA1_QUEUE2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA4
+#define SDMA1_QUEUE2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA5
+#define SDMA1_QUEUE2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA6
+#define SDMA1_QUEUE2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA7
+#define SDMA1_QUEUE2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA8
+#define SDMA1_QUEUE2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA9
+#define SDMA1_QUEUE2_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA10
+#define SDMA1_QUEUE2_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_CNTL
+#define SDMA1_QUEUE2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE3_RB_CNTL
+#define SDMA1_QUEUE3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE3_RB_BASE
+#define SDMA1_QUEUE3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_BASE_HI
+#define SDMA1_QUEUE3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE3_RB_RPTR
+#define SDMA1_QUEUE3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_RPTR_HI
+#define SDMA1_QUEUE3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR
+#define SDMA1_QUEUE3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR_HI
+#define SDMA1_QUEUE3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_IB_CNTL
+#define SDMA1_QUEUE3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE3_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE3_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE3_IB_RPTR
+#define SDMA1_QUEUE3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE3_IB_OFFSET
+#define SDMA1_QUEUE3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE3_IB_BASE_LO
+#define SDMA1_QUEUE3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE3_IB_BASE_HI
+#define SDMA1_QUEUE3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_IB_SIZE
+#define SDMA1_QUEUE3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE3_SKIP_CNTL
+#define SDMA1_QUEUE3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE3_CONTEXT_STATUS
+#define SDMA1_QUEUE3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE3_DOORBELL
+#define SDMA1_QUEUE3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE3_DOORBELL_LOG
+#define SDMA1_QUEUE3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_DOORBELL_OFFSET
+#define SDMA1_QUEUE3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE3_CSA_ADDR_LO
+#define SDMA1_QUEUE3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_CSA_ADDR_HI
+#define SDMA1_QUEUE3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_SCHEDULE_CNTL
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE3_IB_SUB_REMAIN
+#define SDMA1_QUEUE3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE3_PREEMPT
+#define SDMA1_QUEUE3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE3_DUMMY_REG
+#define SDMA1_QUEUE3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_RB_AQL_CNTL
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE3_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE3_RB_PREEMPT
+#define SDMA1_QUEUE3_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE3_MIDCMD_DATA0
+#define SDMA1_QUEUE3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA1
+#define SDMA1_QUEUE3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA2
+#define SDMA1_QUEUE3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA3
+#define SDMA1_QUEUE3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA4
+#define SDMA1_QUEUE3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA5
+#define SDMA1_QUEUE3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA6
+#define SDMA1_QUEUE3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA7
+#define SDMA1_QUEUE3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA8
+#define SDMA1_QUEUE3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA9
+#define SDMA1_QUEUE3_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA10
+#define SDMA1_QUEUE3_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_CNTL
+#define SDMA1_QUEUE3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE4_RB_CNTL
+#define SDMA1_QUEUE4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE4_RB_BASE
+#define SDMA1_QUEUE4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_BASE_HI
+#define SDMA1_QUEUE4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE4_RB_RPTR
+#define SDMA1_QUEUE4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_RPTR_HI
+#define SDMA1_QUEUE4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR
+#define SDMA1_QUEUE4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR_HI
+#define SDMA1_QUEUE4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_IB_CNTL
+#define SDMA1_QUEUE4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE4_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE4_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE4_IB_RPTR
+#define SDMA1_QUEUE4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE4_IB_OFFSET
+#define SDMA1_QUEUE4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE4_IB_BASE_LO
+#define SDMA1_QUEUE4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE4_IB_BASE_HI
+#define SDMA1_QUEUE4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_IB_SIZE
+#define SDMA1_QUEUE4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE4_SKIP_CNTL
+#define SDMA1_QUEUE4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE4_CONTEXT_STATUS
+#define SDMA1_QUEUE4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE4_DOORBELL
+#define SDMA1_QUEUE4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE4_DOORBELL_LOG
+#define SDMA1_QUEUE4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_DOORBELL_OFFSET
+#define SDMA1_QUEUE4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE4_CSA_ADDR_LO
+#define SDMA1_QUEUE4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_CSA_ADDR_HI
+#define SDMA1_QUEUE4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_SCHEDULE_CNTL
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE4_IB_SUB_REMAIN
+#define SDMA1_QUEUE4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE4_PREEMPT
+#define SDMA1_QUEUE4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE4_DUMMY_REG
+#define SDMA1_QUEUE4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_RB_AQL_CNTL
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE4_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE4_RB_PREEMPT
+#define SDMA1_QUEUE4_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE4_MIDCMD_DATA0
+#define SDMA1_QUEUE4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA1
+#define SDMA1_QUEUE4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA2
+#define SDMA1_QUEUE4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA3
+#define SDMA1_QUEUE4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA4
+#define SDMA1_QUEUE4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA5
+#define SDMA1_QUEUE4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA6
+#define SDMA1_QUEUE4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA7
+#define SDMA1_QUEUE4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA8
+#define SDMA1_QUEUE4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA9
+#define SDMA1_QUEUE4_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA10
+#define SDMA1_QUEUE4_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_CNTL
+#define SDMA1_QUEUE4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE5_RB_CNTL
+#define SDMA1_QUEUE5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE5_RB_BASE
+#define SDMA1_QUEUE5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_BASE_HI
+#define SDMA1_QUEUE5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE5_RB_RPTR
+#define SDMA1_QUEUE5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_RPTR_HI
+#define SDMA1_QUEUE5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR
+#define SDMA1_QUEUE5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR_HI
+#define SDMA1_QUEUE5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_IB_CNTL
+#define SDMA1_QUEUE5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE5_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE5_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE5_IB_RPTR
+#define SDMA1_QUEUE5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE5_IB_OFFSET
+#define SDMA1_QUEUE5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE5_IB_BASE_LO
+#define SDMA1_QUEUE5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE5_IB_BASE_HI
+#define SDMA1_QUEUE5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_IB_SIZE
+#define SDMA1_QUEUE5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE5_SKIP_CNTL
+#define SDMA1_QUEUE5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE5_CONTEXT_STATUS
+#define SDMA1_QUEUE5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE5_DOORBELL
+#define SDMA1_QUEUE5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE5_DOORBELL_LOG
+#define SDMA1_QUEUE5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_DOORBELL_OFFSET
+#define SDMA1_QUEUE5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE5_CSA_ADDR_LO
+#define SDMA1_QUEUE5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_CSA_ADDR_HI
+#define SDMA1_QUEUE5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_SCHEDULE_CNTL
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE5_IB_SUB_REMAIN
+#define SDMA1_QUEUE5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE5_PREEMPT
+#define SDMA1_QUEUE5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE5_DUMMY_REG
+#define SDMA1_QUEUE5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_RB_AQL_CNTL
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE5_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE5_RB_PREEMPT
+#define SDMA1_QUEUE5_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE5_MIDCMD_DATA0
+#define SDMA1_QUEUE5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA1
+#define SDMA1_QUEUE5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA2
+#define SDMA1_QUEUE5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA3
+#define SDMA1_QUEUE5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA4
+#define SDMA1_QUEUE5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA5
+#define SDMA1_QUEUE5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA6
+#define SDMA1_QUEUE5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA7
+#define SDMA1_QUEUE5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA8
+#define SDMA1_QUEUE5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA9
+#define SDMA1_QUEUE5_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA10
+#define SDMA1_QUEUE5_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_CNTL
+#define SDMA1_QUEUE5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE6_RB_CNTL
+#define SDMA1_QUEUE6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE6_RB_BASE
+#define SDMA1_QUEUE6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_BASE_HI
+#define SDMA1_QUEUE6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE6_RB_RPTR
+#define SDMA1_QUEUE6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_RPTR_HI
+#define SDMA1_QUEUE6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR
+#define SDMA1_QUEUE6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR_HI
+#define SDMA1_QUEUE6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_IB_CNTL
+#define SDMA1_QUEUE6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE6_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE6_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE6_IB_RPTR
+#define SDMA1_QUEUE6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE6_IB_OFFSET
+#define SDMA1_QUEUE6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE6_IB_BASE_LO
+#define SDMA1_QUEUE6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE6_IB_BASE_HI
+#define SDMA1_QUEUE6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_IB_SIZE
+#define SDMA1_QUEUE6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE6_SKIP_CNTL
+#define SDMA1_QUEUE6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE6_CONTEXT_STATUS
+#define SDMA1_QUEUE6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE6_DOORBELL
+#define SDMA1_QUEUE6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE6_DOORBELL_LOG
+#define SDMA1_QUEUE6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_DOORBELL_OFFSET
+#define SDMA1_QUEUE6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE6_CSA_ADDR_LO
+#define SDMA1_QUEUE6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_CSA_ADDR_HI
+#define SDMA1_QUEUE6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_SCHEDULE_CNTL
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE6_IB_SUB_REMAIN
+#define SDMA1_QUEUE6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE6_PREEMPT
+#define SDMA1_QUEUE6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE6_DUMMY_REG
+#define SDMA1_QUEUE6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_RB_AQL_CNTL
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE6_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE6_RB_PREEMPT
+#define SDMA1_QUEUE6_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE6_MIDCMD_DATA0
+#define SDMA1_QUEUE6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA1
+#define SDMA1_QUEUE6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA2
+#define SDMA1_QUEUE6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA3
+#define SDMA1_QUEUE6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA4
+#define SDMA1_QUEUE6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA5
+#define SDMA1_QUEUE6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA6
+#define SDMA1_QUEUE6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA7
+#define SDMA1_QUEUE6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA8
+#define SDMA1_QUEUE6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA9
+#define SDMA1_QUEUE6_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA10
+#define SDMA1_QUEUE6_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_CNTL
+#define SDMA1_QUEUE6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE7_RB_CNTL
+#define SDMA1_QUEUE7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE7_RB_BASE
+#define SDMA1_QUEUE7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_BASE_HI
+#define SDMA1_QUEUE7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE7_RB_RPTR
+#define SDMA1_QUEUE7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_RPTR_HI
+#define SDMA1_QUEUE7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR
+#define SDMA1_QUEUE7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR_HI
+#define SDMA1_QUEUE7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_IB_CNTL
+#define SDMA1_QUEUE7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE7_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE7_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE7_IB_RPTR
+#define SDMA1_QUEUE7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE7_IB_OFFSET
+#define SDMA1_QUEUE7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE7_IB_BASE_LO
+#define SDMA1_QUEUE7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE7_IB_BASE_HI
+#define SDMA1_QUEUE7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_IB_SIZE
+#define SDMA1_QUEUE7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE7_SKIP_CNTL
+#define SDMA1_QUEUE7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE7_CONTEXT_STATUS
+#define SDMA1_QUEUE7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE7_DOORBELL
+#define SDMA1_QUEUE7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE7_DOORBELL_LOG
+#define SDMA1_QUEUE7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_DOORBELL_OFFSET
+#define SDMA1_QUEUE7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE7_CSA_ADDR_LO
+#define SDMA1_QUEUE7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_CSA_ADDR_HI
+#define SDMA1_QUEUE7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_SCHEDULE_CNTL
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE7_IB_SUB_REMAIN
+#define SDMA1_QUEUE7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE7_PREEMPT
+#define SDMA1_QUEUE7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE7_DUMMY_REG
+#define SDMA1_QUEUE7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_RB_AQL_CNTL
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE7_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE7_RB_PREEMPT
+#define SDMA1_QUEUE7_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE7_MIDCMD_DATA0
+#define SDMA1_QUEUE7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA1
+#define SDMA1_QUEUE7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA2
+#define SDMA1_QUEUE7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA3
+#define SDMA1_QUEUE7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA4
+#define SDMA1_QUEUE7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA5
+#define SDMA1_QUEUE7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA6
+#define SDMA1_QUEUE7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA7
+#define SDMA1_QUEUE7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA8
+#define SDMA1_QUEUE7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA9
+#define SDMA1_QUEUE7_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA10
+#define SDMA1_QUEUE7_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_CNTL
+#define SDMA1_QUEUE7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+
+// addressBlock: gc_sdma0_sdma0hypdec
+//SDMA0_UCODE_ADDR
+#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA0_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA0_UCODE_DATA
+#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_UCODE_SELFLOAD_CONTROL
+#define SDMA0_UCODE_SELFLOAD_CONTROL__GPA__SHIFT 0x0
+#define SDMA0_UCODE_SELFLOAD_CONTROL__SYS__SHIFT 0x1
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CID__SHIFT 0x4
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CACHE_POLICY__SHIFT 0x8
+#define SDMA0_UCODE_SELFLOAD_CONTROL__GPA_MASK 0x00000001L
+#define SDMA0_UCODE_SELFLOAD_CONTROL__SYS_MASK 0x00000002L
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CID_MASK 0x000000F0L
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CACHE_POLICY_MASK 0x00000300L
+//SDMA0_BROADCAST_UCODE_ADDR
+#define SDMA0_BROADCAST_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA0_BROADCAST_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA0_BROADCAST_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA0_BROADCAST_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA0_BROADCAST_UCODE_DATA
+#define SDMA0_BROADCAST_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA0_BROADCAST_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_VM_CTX_LO
+#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_VM_CTX_HI
+#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_ACTIVE_FCN_ID
+#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA0_VM_CTX_CNTL
+#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA0_VM_CTX_CNTL__MEM_PHY__SHIFT 0x8
+#define SDMA0_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE__SHIFT 0x10
+#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+#define SDMA0_VM_CTX_CNTL__MEM_PHY_MASK 0x00000300L
+#define SDMA0_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE_MASK 0x00010000L
+//SDMA0_VIRT_RESET_REQ
+#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA0_CONTEXT_REG_TYPE0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_CNTL__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE_HI__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_HI__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR_HI__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED7__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_CNTL__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_RPTR__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_OFFSET__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_LO__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_HI__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_SIZE__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_SKIP_CNTL__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_DOORBELL__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED31_19__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_CNTL_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE_HI_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED7_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_CNTL_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_RPTR_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_OFFSET_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_LO_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_HI_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_SIZE_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_SKIP_CNTL_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_DOORBELL_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED31_19_MASK 0xFFF80000L
+//SDMA0_CONTEXT_REG_TYPE1
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED8_0__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_LOG__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_SCHEDULE_CNTL__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_PREEMPT__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DUMMY_REG__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_PREEMPT__SHIFT 0x16
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x17
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED8_0_MASK 0x000001FFL
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_SCHEDULE_CNTL_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_PREEMPT_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DUMMY_REG_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_PREEMPT_MASK 0x00400000L
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFF800000L
+//SDMA0_CONTEXT_REG_TYPE2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA9__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA10__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_CNTL__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA9_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA10_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_CNTL_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFC000L
+//SDMA0_PUB_REG_TYPE0
+#define SDMA0_PUB_REG_TYPE0__SDMA0_DEC_START__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE0__RESERVED_10_1__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE0__SDMA0_F32_MISC_CNTL__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_LO__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_HI__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE0__RESERVED22__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE0__RESERVED23__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE0__RESERVED24__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE0__RESERVED25__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE0__RESERVED27__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE0__SDMA0_DEC_START_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE0__RESERVED_10_1_MASK 0x000007FEL
+#define SDMA0_PUB_REG_TYPE0__SDMA0_F32_MISC_CNTL_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_LO_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_HI_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED22_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED23_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED24_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED25_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED27_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE1__SDMA0_CNTL1__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM0__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM1__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE1__SDMA0_WATCHDOG_CNTL__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE1__RESERVED15__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE1__RESERVED16__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE1__RESERVED17__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_TIMEOUT__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_PAGE__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_CNTL1_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM0_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM1_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_WATCHDOG_CNTL_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE1__RESERVED15_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE1__RESERVED16_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE1__RESERVED17_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_TIMEOUT_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_PAGE_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_STATUS__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GLOBAL_QUANTUM__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE2__RESERVE_22_22__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE2__RESERVED23__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE2__RESERVED24__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE2__RESERVED25__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE2__RESERVED26__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RLC_CGCG_CTRL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE2__SDMA0_AQL_STATUS__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_STATUS_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_STATUS_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GLOBAL_QUANTUM_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE2__RESERVE_22_22_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED23_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED24_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED25_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED26_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RLC_CGCG_CTRL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_AQL_STATUS_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE3
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TLBI_GCR_CNTL__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TILING_CONFIG__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HASH__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE3__RESERVED5__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE3__RESERVED7__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CE_CTRL__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE3__SDMA0_FED_STATUS__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE3__RESERVED10__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE3__RESERVED11__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE3__RESERVED12__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE3__RESERVED13__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE3__RESERVED14__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE3__RESERVED15__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE3__SDMA0_INT_STATUS__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_LO__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_HI__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE3__RESERVED20__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CLOCK_GATING_STATUS__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS4_REG__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_DATA__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_ADDR__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TIMESTAMP_CNTL__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE3__RESERVED26__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE3__RESERVED27__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS5_REG__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE3__SDMA0_QUEUE_RESET_REQ__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS6_REG__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE3__SDMA0_UCODE1_CHECKSUM__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TLBI_GCR_CNTL_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TILING_CONFIG_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HASH_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE3__RESERVED5_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE3__RESERVED7_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CE_CTRL_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_FED_STATUS_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE3__RESERVED10_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE3__RESERVED11_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE3__RESERVED12_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED13_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED14_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED15_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_INT_STATUS_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_LO_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_HI_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED20_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CLOCK_GATING_STATUS_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS4_REG_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_DATA_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_ADDR_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TIMESTAMP_CNTL_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED26_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED27_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS5_REG_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_QUEUE_RESET_REQ_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS6_REG_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_UCODE1_CHECKSUM_MASK 0x80000000L
+//SDMA0_VM_CNTL
+#define SDMA0_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA0_F32_CNTL
+#define SDMA0_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA0_F32_CNTL__DBG_SELECT_BITS__SHIFT 0x2
+#define SDMA0_F32_CNTL__TH0_CHECKSUM_CLR__SHIFT 0x8
+#define SDMA0_F32_CNTL__TH0_RESET__SHIFT 0x9
+#define SDMA0_F32_CNTL__TH0_ENABLE__SHIFT 0xa
+#define SDMA0_F32_CNTL__TH1_CHECKSUM_CLR__SHIFT 0xc
+#define SDMA0_F32_CNTL__TH1_RESET__SHIFT 0xd
+#define SDMA0_F32_CNTL__TH1_ENABLE__SHIFT 0xe
+#define SDMA0_F32_CNTL__TH0_PRIORITY__SHIFT 0x10
+#define SDMA0_F32_CNTL__TH1_PRIORITY__SHIFT 0x18
+#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA0_F32_CNTL__DBG_SELECT_BITS_MASK 0x000000FCL
+#define SDMA0_F32_CNTL__TH0_CHECKSUM_CLR_MASK 0x00000100L
+#define SDMA0_F32_CNTL__TH0_RESET_MASK 0x00000200L
+#define SDMA0_F32_CNTL__TH0_ENABLE_MASK 0x00000400L
+#define SDMA0_F32_CNTL__TH1_CHECKSUM_CLR_MASK 0x00001000L
+#define SDMA0_F32_CNTL__TH1_RESET_MASK 0x00002000L
+#define SDMA0_F32_CNTL__TH1_ENABLE_MASK 0x00004000L
+#define SDMA0_F32_CNTL__TH0_PRIORITY_MASK 0x00FF0000L
+#define SDMA0_F32_CNTL__TH1_PRIORITY_MASK 0xFF000000L
+
+
+// addressBlock: gc_sdma0_sdma1hypdec
+//SDMA1_UCODE_ADDR
+#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA1_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA1_UCODE_DATA
+#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_UCODE_SELFLOAD_CONTROL
+#define SDMA1_UCODE_SELFLOAD_CONTROL__GPA__SHIFT 0x0
+#define SDMA1_UCODE_SELFLOAD_CONTROL__SYS__SHIFT 0x1
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CID__SHIFT 0x4
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CACHE_POLICY__SHIFT 0x8
+#define SDMA1_UCODE_SELFLOAD_CONTROL__GPA_MASK 0x00000001L
+#define SDMA1_UCODE_SELFLOAD_CONTROL__SYS_MASK 0x00000002L
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CID_MASK 0x000000F0L
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CACHE_POLICY_MASK 0x00000300L
+//SDMA1_BROADCAST_UCODE_ADDR
+#define SDMA1_BROADCAST_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA1_BROADCAST_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA1_BROADCAST_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA1_BROADCAST_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA1_BROADCAST_UCODE_DATA
+#define SDMA1_BROADCAST_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA1_BROADCAST_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_VM_CTX_LO
+#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_VM_CTX_HI
+#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_ACTIVE_FCN_ID
+#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA1_VM_CTX_CNTL
+#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA1_VM_CTX_CNTL__MEM_PHY__SHIFT 0x8
+#define SDMA1_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE__SHIFT 0x10
+#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+#define SDMA1_VM_CTX_CNTL__MEM_PHY_MASK 0x00000300L
+#define SDMA1_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE_MASK 0x00010000L
+//SDMA1_VIRT_RESET_REQ
+#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA1_CONTEXT_REG_TYPE0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_CNTL__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE_HI__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_HI__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR_HI__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED7__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_CNTL__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_RPTR__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_OFFSET__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_LO__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_HI__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_SIZE__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_SKIP_CNTL__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_DOORBELL__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED31_19__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_CNTL_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE_HI_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED7_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_CNTL_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_RPTR_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_OFFSET_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_LO_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_HI_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_SIZE_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_SKIP_CNTL_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_DOORBELL_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED31_19_MASK 0xFFF80000L
+//SDMA1_CONTEXT_REG_TYPE1
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED8_0__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_LOG__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_SCHEDULE_CNTL__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_PREEMPT__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DUMMY_REG__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_PREEMPT__SHIFT 0x16
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x17
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED8_0_MASK 0x000001FFL
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_SCHEDULE_CNTL_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_PREEMPT_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DUMMY_REG_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_PREEMPT_MASK 0x00400000L
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFF800000L
+//SDMA1_CONTEXT_REG_TYPE2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA9__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA10__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_CNTL__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA9_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA10_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_CNTL_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFC000L
+//SDMA1_PUB_REG_TYPE0
+#define SDMA1_PUB_REG_TYPE0__SDMA1_DEC_START__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE0__RESERVED_10_1__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE0__SDMA1_F32_MISC_CNTL__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_LO__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_HI__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE0__RESERVED22__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE0__RESERVED23__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE0__RESERVED24__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE0__RESERVED25__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE0__RESERVED27__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE0__SDMA1_DEC_START_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE0__RESERVED_10_1_MASK 0x000007FEL
+#define SDMA1_PUB_REG_TYPE0__SDMA1_F32_MISC_CNTL_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_LO_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_HI_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED22_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED23_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED24_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED25_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED27_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE1__SDMA1_CNTL1__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM0__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM1__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE1__SDMA1_WATCHDOG_CNTL__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE1__RESERVED15__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE1__RESERVED16__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE1__RESERVED17__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_TIMEOUT__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_PAGE__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_CNTL1_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM0_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM1_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_WATCHDOG_CNTL_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE1__RESERVED15_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE1__RESERVED16_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE1__RESERVED17_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_TIMEOUT_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_PAGE_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_STATUS__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GLOBAL_QUANTUM__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE2__RESERVE_22_22__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE2__RESERVED23__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE2__RESERVED24__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE2__RESERVED25__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE2__RESERVED26__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RLC_CGCG_CTRL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE2__SDMA1_AQL_STATUS__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_STATUS_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_STATUS_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GLOBAL_QUANTUM_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE2__RESERVE_22_22_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED23_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED24_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED25_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED26_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RLC_CGCG_CTRL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_AQL_STATUS_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE3
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TLBI_GCR_CNTL__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TILING_CONFIG__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HASH__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE3__RESERVED5__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE3__RESERVED7__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CE_CTRL__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE3__SDMA1_FED_STATUS__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE3__RESERVED10__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE3__RESERVED11__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE3__RESERVED12__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE3__RESERVED13__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE3__RESERVED14__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE3__RESERVED15__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE3__SDMA1_INT_STATUS__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_LO__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_HI__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE3__RESERVED20__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CLOCK_GATING_STATUS__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS4_REG__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_DATA__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_ADDR__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TIMESTAMP_CNTL__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE3__RESERVED26__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE3__RESERVED27__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS5_REG__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE3__SDMA1_QUEUE_RESET_REQ__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS6_REG__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE3__SDMA1_UCODE1_CHECKSUM__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TLBI_GCR_CNTL_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TILING_CONFIG_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HASH_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE3__RESERVED5_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE3__RESERVED7_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CE_CTRL_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_FED_STATUS_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE3__RESERVED10_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE3__RESERVED11_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE3__RESERVED12_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED13_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED14_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED15_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_INT_STATUS_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_LO_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_HI_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED20_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CLOCK_GATING_STATUS_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS4_REG_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_DATA_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_ADDR_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TIMESTAMP_CNTL_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED26_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED27_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS5_REG_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_QUEUE_RESET_REQ_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS6_REG_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_UCODE1_CHECKSUM_MASK 0x80000000L
+//SDMA1_VM_CNTL
+#define SDMA1_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA1_F32_CNTL
+#define SDMA1_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA1_F32_CNTL__DBG_SELECT_BITS__SHIFT 0x2
+#define SDMA1_F32_CNTL__TH0_CHECKSUM_CLR__SHIFT 0x8
+#define SDMA1_F32_CNTL__TH0_RESET__SHIFT 0x9
+#define SDMA1_F32_CNTL__TH0_ENABLE__SHIFT 0xa
+#define SDMA1_F32_CNTL__TH1_CHECKSUM_CLR__SHIFT 0xc
+#define SDMA1_F32_CNTL__TH1_RESET__SHIFT 0xd
+#define SDMA1_F32_CNTL__TH1_ENABLE__SHIFT 0xe
+#define SDMA1_F32_CNTL__TH0_PRIORITY__SHIFT 0x10
+#define SDMA1_F32_CNTL__TH1_PRIORITY__SHIFT 0x18
+#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA1_F32_CNTL__DBG_SELECT_BITS_MASK 0x000000FCL
+#define SDMA1_F32_CNTL__TH0_CHECKSUM_CLR_MASK 0x00000100L
+#define SDMA1_F32_CNTL__TH0_RESET_MASK 0x00000200L
+#define SDMA1_F32_CNTL__TH0_ENABLE_MASK 0x00000400L
+#define SDMA1_F32_CNTL__TH1_CHECKSUM_CLR_MASK 0x00001000L
+#define SDMA1_F32_CNTL__TH1_RESET_MASK 0x00002000L
+#define SDMA1_F32_CNTL__TH1_ENABLE_MASK 0x00004000L
+#define SDMA1_F32_CNTL__TH0_PRIORITY_MASK 0x00FF0000L
+#define SDMA1_F32_CNTL__TH1_PRIORITY_MASK 0xFF000000L
+
+
+// addressBlock: gc_sdma0_sdma0perfsdec
+//SDMA0_PERFCNT_PERFCOUNTER0_CFG
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//SDMA0_PERFCNT_PERFCOUNTER1_CFG
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SDMA0_PERFCNT_MISC_CNTL
+#define SDMA0_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define SDMA0_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+//SDMA0_PERFCOUNTER0_SELECT
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA0_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA0_PERFCOUNTER0_SELECT1
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SDMA0_PERFCOUNTER1_SELECT
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA0_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA0_PERFCOUNTER1_SELECT1
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+
+
+// addressBlock: gc_sdma0_sdma1perfsdec
+//SDMA1_PERFCNT_PERFCOUNTER0_CFG
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//SDMA1_PERFCNT_PERFCOUNTER1_CFG
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SDMA1_PERFCNT_MISC_CNTL
+#define SDMA1_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define SDMA1_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+//SDMA1_PERFCOUNTER0_SELECT
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA1_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA1_PERFCOUNTER0_SELECT1
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SDMA1_PERFCOUNTER1_SELECT
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA1_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA1_PERFCOUNTER1_SELECT1
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+
+
+// addressBlock: gc_sdma0_sdma0perfddec
+//SDMA0_PERFCNT_PERFCOUNTER_LO
+#define SDMA0_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA0_PERFCNT_PERFCOUNTER_HI
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//SDMA0_PERFCOUNTER0_LO
+#define SDMA0_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER0_HI
+#define SDMA0_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER1_LO
+#define SDMA0_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER1_HI
+#define SDMA0_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_sdma0_sdma1perfddec
+//SDMA1_PERFCNT_PERFCOUNTER_LO
+#define SDMA1_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA1_PERFCNT_PERFCOUNTER_HI
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//SDMA1_PERFCOUNTER0_LO
+#define SDMA1_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER0_HI
+#define SDMA1_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER1_LO
+#define SDMA1_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER1_HI
+#define SDMA1_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_grbmdec
+//GRBM_CNTL
+#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x0
+#define GRBM_CNTL__REPORT_LAST_RDERR__SHIFT 0x1f
+#define GRBM_CNTL__READ_TIMEOUT_MASK 0x000000FFL
+#define GRBM_CNTL__REPORT_LAST_RDERR_MASK 0x80000000L
+//GRBM_SKEW_CNTL
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x0
+#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x6
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x0000003FL
+#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0x00000FC0L
+//GRBM_STATUS2
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x4
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x6
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0xe
+#define GRBM_STATUS2__UTCL2_BUSY__SHIFT 0xf
+#define GRBM_STATUS2__EA_BUSY__SHIFT 0x10
+#define GRBM_STATUS2__RMI_BUSY__SHIFT 0x11
+#define GRBM_STATUS2__UTCL2_RQ_PENDING__SHIFT 0x12
+#define GRBM_STATUS2__SDMA_SCH_RQ_PENDING__SHIFT 0x13
+#define GRBM_STATUS2__EA_LINK_BUSY__SHIFT 0x14
+#define GRBM_STATUS2__SDMA_BUSY__SHIFT 0x15
+#define GRBM_STATUS2__SDMA0_RQ_PENDING__SHIFT 0x16
+#define GRBM_STATUS2__SDMA1_RQ_PENDING__SHIFT 0x17
+#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x1a
+#define GRBM_STATUS2__TCP_BUSY__SHIFT 0x1b
+#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x1c
+#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x1e
+#define GRBM_STATUS2__CPAXI_BUSY__SHIFT 0x1f
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x00000010L
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x00004000L
+#define GRBM_STATUS2__UTCL2_BUSY_MASK 0x00008000L
+#define GRBM_STATUS2__EA_BUSY_MASK 0x00010000L
+#define GRBM_STATUS2__RMI_BUSY_MASK 0x00020000L
+#define GRBM_STATUS2__UTCL2_RQ_PENDING_MASK 0x00040000L
+#define GRBM_STATUS2__SDMA_SCH_RQ_PENDING_MASK 0x00080000L
+#define GRBM_STATUS2__EA_LINK_BUSY_MASK 0x00100000L
+#define GRBM_STATUS2__SDMA_BUSY_MASK 0x00200000L
+#define GRBM_STATUS2__SDMA0_RQ_PENDING_MASK 0x00400000L
+#define GRBM_STATUS2__SDMA1_RQ_PENDING_MASK 0x00800000L
+#define GRBM_STATUS2__RLC_BUSY_MASK 0x04000000L
+#define GRBM_STATUS2__TCP_BUSY_MASK 0x08000000L
+#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000L
+#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000L
+#define GRBM_STATUS2__CPAXI_BUSY_MASK 0x80000000L
+//GRBM_PWR_CNTL
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE__SHIFT 0x0
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE__SHIFT 0x2
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE__SHIFT 0x4
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE__SHIFT 0x6
+#define GRBM_PWR_CNTL__GFX_REQ_EN__SHIFT 0xe
+#define GRBM_PWR_CNTL__ALL_REQ_EN__SHIFT 0xf
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE_MASK 0x00000003L
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE_MASK 0x0000000CL
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE_MASK 0x00000030L
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE_MASK 0x000000C0L
+#define GRBM_PWR_CNTL__GFX_REQ_EN_MASK 0x00004000L
+#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
+//GRBM_STATUS
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS__SDMA_RQ_PENDING__SHIFT 0x6
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS__DB_CLEAN__SHIFT 0xc
+#define GRBM_STATUS__CB_CLEAN__SHIFT 0xd
+#define GRBM_STATUS__TA_BUSY__SHIFT 0xe
+#define GRBM_STATUS__GDS_BUSY__SHIFT 0xf
+#define GRBM_STATUS__GE_BUSY_NO_DMA__SHIFT 0x10
+#define GRBM_STATUS__SX_BUSY__SHIFT 0x14
+#define GRBM_STATUS__GE_BUSY__SHIFT 0x15
+#define GRBM_STATUS__SPI_BUSY__SHIFT 0x16
+#define GRBM_STATUS__BCI_BUSY__SHIFT 0x17
+#define GRBM_STATUS__SC_BUSY__SHIFT 0x18
+#define GRBM_STATUS__PA_BUSY__SHIFT 0x19
+#define GRBM_STATUS__DB_BUSY__SHIFT 0x1a
+#define GRBM_STATUS__ANY_ACTIVE__SHIFT 0x1b
+#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x1c
+#define GRBM_STATUS__CP_BUSY__SHIFT 0x1d
+#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS__SDMA_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS__DB_CLEAN_MASK 0x00001000L
+#define GRBM_STATUS__CB_CLEAN_MASK 0x00002000L
+#define GRBM_STATUS__TA_BUSY_MASK 0x00004000L
+#define GRBM_STATUS__GDS_BUSY_MASK 0x00008000L
+#define GRBM_STATUS__GE_BUSY_NO_DMA_MASK 0x00010000L
+#define GRBM_STATUS__SX_BUSY_MASK 0x00100000L
+#define GRBM_STATUS__GE_BUSY_MASK 0x00200000L
+#define GRBM_STATUS__SPI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS__BCI_BUSY_MASK 0x00800000L
+#define GRBM_STATUS__SC_BUSY_MASK 0x01000000L
+#define GRBM_STATUS__PA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS__DB_BUSY_MASK 0x04000000L
+#define GRBM_STATUS__ANY_ACTIVE_MASK 0x08000000L
+#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000L
+#define GRBM_STATUS__CP_BUSY_MASK 0x20000000L
+#define GRBM_STATUS__CB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
+//GRBM_STATUS_SE0
+#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE0__UTCL1_BUSY__SHIFT 0x3
+#define GRBM_STATUS_SE0__TCP_BUSY__SHIFT 0x4
+#define GRBM_STATUS_SE0__GL1CC_BUSY__SHIFT 0x5
+#define GRBM_STATUS_SE0__GL1H_BUSY__SHIFT 0x6
+#define GRBM_STATUS_SE0__PC_BUSY__SHIFT 0x7
+#define GRBM_STATUS_SE0__SEDC_BUSY__SHIFT 0x8
+#define GRBM_STATUS_SE0__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE0__UTCL1_BUSY_MASK 0x00000008L
+#define GRBM_STATUS_SE0__TCP_BUSY_MASK 0x00000010L
+#define GRBM_STATUS_SE0__GL1CC_BUSY_MASK 0x00000020L
+#define GRBM_STATUS_SE0__GL1H_BUSY_MASK 0x00000040L
+#define GRBM_STATUS_SE0__PC_BUSY_MASK 0x00000080L
+#define GRBM_STATUS_SE0__SEDC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS_SE0__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS_SE1
+#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE1__UTCL1_BUSY__SHIFT 0x3
+#define GRBM_STATUS_SE1__TCP_BUSY__SHIFT 0x4
+#define GRBM_STATUS_SE1__GL1CC_BUSY__SHIFT 0x5
+#define GRBM_STATUS_SE1__GL1H_BUSY__SHIFT 0x6
+#define GRBM_STATUS_SE1__PC_BUSY__SHIFT 0x7
+#define GRBM_STATUS_SE1__SEDC_BUSY__SHIFT 0x8
+#define GRBM_STATUS_SE1__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE1__UTCL1_BUSY_MASK 0x00000008L
+#define GRBM_STATUS_SE1__TCP_BUSY_MASK 0x00000010L
+#define GRBM_STATUS_SE1__GL1CC_BUSY_MASK 0x00000020L
+#define GRBM_STATUS_SE1__GL1H_BUSY_MASK 0x00000040L
+#define GRBM_STATUS_SE1__PC_BUSY_MASK 0x00000080L
+#define GRBM_STATUS_SE1__SEDC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS_SE1__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS3
+#define GRBM_STATUS3__GRBM_RLC_INTR_CREDIT_PENDING__SHIFT 0x5
+#define GRBM_STATUS3__GRBM_CPF_INTR_CREDIT_PENDING__SHIFT 0x7
+#define GRBM_STATUS3__MESPIPE0_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS3__MESPIPE1_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS3__PH_BUSY__SHIFT 0xd
+#define GRBM_STATUS3__CH_BUSY__SHIFT 0xe
+#define GRBM_STATUS3__GL2CC_BUSY__SHIFT 0xf
+#define GRBM_STATUS3__GL1CC_BUSY__SHIFT 0x10
+#define GRBM_STATUS3__SEDC_BUSY__SHIFT 0x19
+#define GRBM_STATUS3__PC_BUSY__SHIFT 0x1a
+#define GRBM_STATUS3__GL1H_BUSY__SHIFT 0x1b
+#define GRBM_STATUS3__GUS_LINK_BUSY__SHIFT 0x1c
+#define GRBM_STATUS3__GUS_BUSY__SHIFT 0x1d
+#define GRBM_STATUS3__UTCL1_BUSY__SHIFT 0x1e
+#define GRBM_STATUS3__PMM_BUSY__SHIFT 0x1f
+#define GRBM_STATUS3__GRBM_RLC_INTR_CREDIT_PENDING_MASK 0x00000020L
+#define GRBM_STATUS3__GRBM_CPF_INTR_CREDIT_PENDING_MASK 0x00000080L
+#define GRBM_STATUS3__MESPIPE0_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS3__MESPIPE1_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS3__PH_BUSY_MASK 0x00002000L
+#define GRBM_STATUS3__CH_BUSY_MASK 0x00004000L
+#define GRBM_STATUS3__GL2CC_BUSY_MASK 0x00008000L
+#define GRBM_STATUS3__GL1CC_BUSY_MASK 0x00010000L
+#define GRBM_STATUS3__SEDC_BUSY_MASK 0x02000000L
+#define GRBM_STATUS3__PC_BUSY_MASK 0x04000000L
+#define GRBM_STATUS3__GL1H_BUSY_MASK 0x08000000L
+#define GRBM_STATUS3__GUS_LINK_BUSY_MASK 0x10000000L
+#define GRBM_STATUS3__GUS_BUSY_MASK 0x20000000L
+#define GRBM_STATUS3__UTCL1_BUSY_MASK 0x40000000L
+#define GRBM_STATUS3__PMM_BUSY_MASK 0x80000000L
+//GRBM_SOFT_RESET
+#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x0
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x2
+#define GRBM_SOFT_RESET__SOFT_RESET_UTCL2__SHIFT 0xf
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x10
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x11
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x12
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x13
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC__SHIFT 0x14
+#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI__SHIFT 0x15
+#define GRBM_SOFT_RESET__SOFT_RESET_EA__SHIFT 0x16
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT 0x17
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA1__SHIFT 0x18
+#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x00000001L
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x00000004L
+#define GRBM_SOFT_RESET__SOFT_RESET_UTCL2_MASK 0x00008000L
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x00010000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x00020000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x00040000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x00080000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC_MASK 0x00100000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI_MASK 0x00200000L
+#define GRBM_SOFT_RESET__SOFT_RESET_EA_MASK 0x00400000L
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK 0x00800000L
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK 0x01000000L
+//GRBM_GFX_CLKEN_CNTL
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x0
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x8
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000FL
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001F00L
+//GRBM_WAIT_IDLE_CLOCKS
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x0
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0x000000FFL
+//GRBM_STATUS_SE2
+#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE2__UTCL1_BUSY__SHIFT 0x3
+#define GRBM_STATUS_SE2__TCP_BUSY__SHIFT 0x4
+#define GRBM_STATUS_SE2__GL1CC_BUSY__SHIFT 0x5
+#define GRBM_STATUS_SE2__GL1H_BUSY__SHIFT 0x6
+#define GRBM_STATUS_SE2__PC_BUSY__SHIFT 0x7
+#define GRBM_STATUS_SE2__SEDC_BUSY__SHIFT 0x8
+#define GRBM_STATUS_SE2__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE2__UTCL1_BUSY_MASK 0x00000008L
+#define GRBM_STATUS_SE2__TCP_BUSY_MASK 0x00000010L
+#define GRBM_STATUS_SE2__GL1CC_BUSY_MASK 0x00000020L
+#define GRBM_STATUS_SE2__GL1H_BUSY_MASK 0x00000040L
+#define GRBM_STATUS_SE2__PC_BUSY_MASK 0x00000080L
+#define GRBM_STATUS_SE2__SEDC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS_SE2__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000L
+//GRBM_READ_ERROR
+#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x2
+#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x14
+#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x16
+#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x1f
+#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x000FFFFCL
+#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x00300000L
+#define GRBM_READ_ERROR__READ_MEID_MASK 0x00C00000L
+#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
+//GRBM_READ_ERROR2
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE0__SHIFT 0x9
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE1__SHIFT 0xa
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE2__SHIFT 0xb
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE3__SHIFT 0xc
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA0__SHIFT 0xd
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA1__SHIFT 0xe
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x15
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x16
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x17
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x18
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x19
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x1a
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x1b
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x1c
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x1d
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE0_MASK 0x00000200L
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE1_MASK 0x00000400L
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE2_MASK 0x00000800L
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE3_MASK 0x00001000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA0_MASK 0x00002000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA1_MASK 0x00004000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x00200000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x00400000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x00800000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x01000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x02000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x04000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x08000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000L
+//GRBM_INT_CNTL
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x0
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x13
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x00000001L
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x00080000L
+//GRBM_TRAP_OP
+#define GRBM_TRAP_OP__RW__SHIFT 0x0
+#define GRBM_TRAP_OP__RW_MASK 0x00000001L
+//GRBM_TRAP_ADDR
+#define GRBM_TRAP_ADDR__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_ADDR_MSK
+#define GRBM_TRAP_ADDR_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR_MSK__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_WD
+#define GRBM_TRAP_WD__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD__DATA_MASK 0xFFFFFFFFL
+//GRBM_TRAP_WD_MSK
+#define GRBM_TRAP_WD_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD_MSK__DATA_MASK 0xFFFFFFFFL
+//GRBM_DSM_BYPASS
+#define GRBM_DSM_BYPASS__BYPASS_BITS__SHIFT 0x0
+#define GRBM_DSM_BYPASS__BYPASS_EN__SHIFT 0x2
+#define GRBM_DSM_BYPASS__BYPASS_BITS_MASK 0x00000003L
+#define GRBM_DSM_BYPASS__BYPASS_EN_MASK 0x00000004L
+//GRBM_WRITE_ERROR
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC__SHIFT 0x0
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU__SHIFT 0x1
+#define GRBM_WRITE_ERROR__WRITE_SSRCID__SHIFT 0x2
+#define GRBM_WRITE_ERROR__WRITE_VFID__SHIFT 0x8
+#define GRBM_WRITE_ERROR__WRITE_VF__SHIFT 0xc
+#define GRBM_WRITE_ERROR__WRITE_VMID__SHIFT 0xd
+#define GRBM_WRITE_ERROR__TMZ__SHIFT 0x11
+#define GRBM_WRITE_ERROR__CP_SECURE_WR_ILLEGAL__SHIFT 0x12
+#define GRBM_WRITE_ERROR__WRITE_PIPEID__SHIFT 0x14
+#define GRBM_WRITE_ERROR__WRITE_MEID__SHIFT 0x16
+#define GRBM_WRITE_ERROR__WRITE_ERROR__SHIFT 0x1f
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC_MASK 0x00000001L
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU_MASK 0x00000002L
+#define GRBM_WRITE_ERROR__WRITE_SSRCID_MASK 0x0000003CL
+#define GRBM_WRITE_ERROR__WRITE_VFID_MASK 0x00000F00L
+#define GRBM_WRITE_ERROR__WRITE_VF_MASK 0x00001000L
+#define GRBM_WRITE_ERROR__WRITE_VMID_MASK 0x0001E000L
+#define GRBM_WRITE_ERROR__TMZ_MASK 0x00020000L
+#define GRBM_WRITE_ERROR__CP_SECURE_WR_ILLEGAL_MASK 0x00040000L
+#define GRBM_WRITE_ERROR__WRITE_PIPEID_MASK 0x00300000L
+#define GRBM_WRITE_ERROR__WRITE_MEID_MASK 0x00C00000L
+#define GRBM_WRITE_ERROR__WRITE_ERROR_MASK 0x80000000L
+//GRBM_CHIP_REVISION
+#define GRBM_CHIP_REVISION__CHIP_REVISION__SHIFT 0x0
+#define GRBM_CHIP_REVISION__CHIP_REVISION_MASK 0x000000FFL
+//GRBM_RSMU_CFG
+#define GRBM_RSMU_CFG__APERTURE_ID__SHIFT 0x0
+#define GRBM_RSMU_CFG__QOS__SHIFT 0xc
+#define GRBM_RSMU_CFG__POSTED_WR__SHIFT 0x10
+#define GRBM_RSMU_CFG__DEBUG_MASK__SHIFT 0x11
+#define GRBM_RSMU_CFG__APERTURE_ID_MASK 0x00000FFFL
+#define GRBM_RSMU_CFG__QOS_MASK 0x0000F000L
+#define GRBM_RSMU_CFG__POSTED_WR_MASK 0x00010000L
+#define GRBM_RSMU_CFG__DEBUG_MASK_MASK 0x00020000L
+//GRBM_IH_CREDIT
+#define GRBM_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define GRBM_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
+#define GRBM_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define GRBM_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
+//GRBM_PWR_CNTL2
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT__SHIFT 0x10
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT__SHIFT 0x14
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT_MASK 0x00010000L
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT_MASK 0x00100000L
+//GRBM_UTCL2_INVAL_RANGE_START
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA_MASK 0x0003FFFFL
+//GRBM_UTCL2_INVAL_RANGE_END
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA_MASK 0x0003FFFFL
+//GRBM_RSMU_READ_ERROR
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS__SHIFT 0x2
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF__SHIFT 0x14
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID__SHIFT 0x15
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE__SHIFT 0x1b
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR__SHIFT 0x1f
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS_MASK 0x000FFFFCL
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF_MASK 0x00100000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID_MASK 0x07E00000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE_MASK 0x08000000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_MASK 0x80000000L
+//GRBM_INVALID_PIPE
+#define GRBM_INVALID_PIPE__ADDR__SHIFT 0x2
+#define GRBM_INVALID_PIPE__PIPEID__SHIFT 0x14
+#define GRBM_INVALID_PIPE__MEID__SHIFT 0x16
+#define GRBM_INVALID_PIPE__QUEUEID__SHIFT 0x18
+#define GRBM_INVALID_PIPE__SSRCID__SHIFT 0x1b
+#define GRBM_INVALID_PIPE__INVALID_PIPE__SHIFT 0x1f
+#define GRBM_INVALID_PIPE__ADDR_MASK 0x000FFFFCL
+#define GRBM_INVALID_PIPE__PIPEID_MASK 0x00300000L
+#define GRBM_INVALID_PIPE__MEID_MASK 0x00C00000L
+#define GRBM_INVALID_PIPE__QUEUEID_MASK 0x07000000L
+#define GRBM_INVALID_PIPE__SSRCID_MASK 0x78000000L
+#define GRBM_INVALID_PIPE__INVALID_PIPE_MASK 0x80000000L
+//GRBM_FENCE_RANGE0
+#define GRBM_FENCE_RANGE0__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE0__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE0__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE0__END_MASK 0xFFFF0000L
+//GRBM_FENCE_RANGE1
+#define GRBM_FENCE_RANGE1__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE1__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE1__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE1__END_MASK 0xFFFF0000L
+//GRBM_SCRATCH_REG0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG1
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG2
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG3
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG4
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG5
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG6
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG7
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+//VIOLATION_DATA_ASYNC_VF_PROG
+#define VIOLATION_DATA_ASYNC_VF_PROG__SSRCID__SHIFT 0x0
+#define VIOLATION_DATA_ASYNC_VF_PROG__VFID__SHIFT 0x4
+#define VIOLATION_DATA_ASYNC_VF_PROG__VIOLATION_ERROR__SHIFT 0x1f
+#define VIOLATION_DATA_ASYNC_VF_PROG__SSRCID_MASK 0x0000000FL
+#define VIOLATION_DATA_ASYNC_VF_PROG__VFID_MASK 0x000003F0L
+#define VIOLATION_DATA_ASYNC_VF_PROG__VIOLATION_ERROR_MASK 0x80000000L
+
+
+// addressBlock: gc_cpdec
+//CP_CPC_DEBUG_CNTL
+#define CP_CPC_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_CPC_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+//CP_CPF_DEBUG_CNTL
+#define CP_CPF_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_CPF_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+//CP_CPC_STATUS
+#define CP_CPC_STATUS__MEC1_BUSY__SHIFT 0x0
+#define CP_CPC_STATUS__MEC2_BUSY__SHIFT 0x1
+#define CP_CPC_STATUS__DC0_BUSY__SHIFT 0x2
+#define CP_CPC_STATUS__DC1_BUSY__SHIFT 0x3
+#define CP_CPC_STATUS__RCIU1_BUSY__SHIFT 0x4
+#define CP_CPC_STATUS__RCIU2_BUSY__SHIFT 0x5
+#define CP_CPC_STATUS__ROQ1_BUSY__SHIFT 0x6
+#define CP_CPC_STATUS__ROQ2_BUSY__SHIFT 0x7
+#define CP_CPC_STATUS__TCIU_BUSY__SHIFT 0xa
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY__SHIFT 0xb
+#define CP_CPC_STATUS__QU_BUSY__SHIFT 0xc
+#define CP_CPC_STATUS__UTCL2IU_BUSY__SHIFT 0xd
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY__SHIFT 0xe
+#define CP_CPC_STATUS__GCRIU_BUSY__SHIFT 0xf
+#define CP_CPC_STATUS__MES_BUSY__SHIFT 0x10
+#define CP_CPC_STATUS__MES_SCRATCH_RAM_BUSY__SHIFT 0x11
+#define CP_CPC_STATUS__RCIU3_BUSY__SHIFT 0x12
+#define CP_CPC_STATUS__MES_INSTRUCTION_CACHE_BUSY__SHIFT 0x13
+#define CP_CPC_STATUS__MES_DATA_CACHE_BUSY__SHIFT 0x14
+#define CP_CPC_STATUS__MEC_DATA_CACHE_BUSY__SHIFT 0x15
+#define CP_CPC_STATUS__CPG_CPC_BUSY__SHIFT 0x1d
+#define CP_CPC_STATUS__CPF_CPC_BUSY__SHIFT 0x1e
+#define CP_CPC_STATUS__CPC_BUSY__SHIFT 0x1f
+#define CP_CPC_STATUS__MEC1_BUSY_MASK 0x00000001L
+#define CP_CPC_STATUS__MEC2_BUSY_MASK 0x00000002L
+#define CP_CPC_STATUS__DC0_BUSY_MASK 0x00000004L
+#define CP_CPC_STATUS__DC1_BUSY_MASK 0x00000008L
+#define CP_CPC_STATUS__RCIU1_BUSY_MASK 0x00000010L
+#define CP_CPC_STATUS__RCIU2_BUSY_MASK 0x00000020L
+#define CP_CPC_STATUS__ROQ1_BUSY_MASK 0x00000040L
+#define CP_CPC_STATUS__ROQ2_BUSY_MASK 0x00000080L
+#define CP_CPC_STATUS__TCIU_BUSY_MASK 0x00000400L
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY_MASK 0x00000800L
+#define CP_CPC_STATUS__QU_BUSY_MASK 0x00001000L
+#define CP_CPC_STATUS__UTCL2IU_BUSY_MASK 0x00002000L
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY_MASK 0x00004000L
+#define CP_CPC_STATUS__GCRIU_BUSY_MASK 0x00008000L
+#define CP_CPC_STATUS__MES_BUSY_MASK 0x00010000L
+#define CP_CPC_STATUS__MES_SCRATCH_RAM_BUSY_MASK 0x00020000L
+#define CP_CPC_STATUS__RCIU3_BUSY_MASK 0x00040000L
+#define CP_CPC_STATUS__MES_INSTRUCTION_CACHE_BUSY_MASK 0x00080000L
+#define CP_CPC_STATUS__MES_DATA_CACHE_BUSY_MASK 0x00100000L
+#define CP_CPC_STATUS__MEC_DATA_CACHE_BUSY_MASK 0x00200000L
+#define CP_CPC_STATUS__CPG_CPC_BUSY_MASK 0x20000000L
+#define CP_CPC_STATUS__CPF_CPC_BUSY_MASK 0x40000000L
+#define CP_CPC_STATUS__CPC_BUSY_MASK 0x80000000L
+//CP_CPC_BUSY_STAT
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY__SHIFT 0x0
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPHORE_BUSY__SHIFT 0x1
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY__SHIFT 0x2
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY__SHIFT 0x3
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY__SHIFT 0x4
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY__SHIFT 0x5
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY__SHIFT 0x6
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY__SHIFT 0x7
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY__SHIFT 0x9
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY__SHIFT 0xa
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY__SHIFT 0xb
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY__SHIFT 0xc
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY__SHIFT 0xd
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY__SHIFT 0x10
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPHORE_BUSY__SHIFT 0x11
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY__SHIFT 0x12
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY__SHIFT 0x13
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY__SHIFT 0x14
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY__SHIFT 0x15
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY__SHIFT 0x16
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY__SHIFT 0x17
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY__SHIFT 0x18
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY__SHIFT 0x19
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY__SHIFT 0x1a
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY__SHIFT 0x1b
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY__SHIFT 0x1c
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY__SHIFT 0x1d
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY_MASK 0x00000001L
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPHORE_BUSY_MASK 0x00000002L
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY_MASK 0x00000004L
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY_MASK 0x00000008L
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY_MASK 0x00000010L
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY_MASK 0x00000020L
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY_MASK 0x00000040L
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY_MASK 0x00000080L
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY_MASK 0x00000100L
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY_MASK 0x00000200L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY_MASK 0x00000400L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY_MASK 0x00000800L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY_MASK 0x00001000L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY_MASK 0x00002000L
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY_MASK 0x00010000L
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPHORE_BUSY_MASK 0x00020000L
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY_MASK 0x00040000L
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY_MASK 0x00080000L
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY_MASK 0x00100000L
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY_MASK 0x00200000L
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY_MASK 0x00400000L
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY_MASK 0x00800000L
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY_MASK 0x01000000L
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY_MASK 0x02000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY_MASK 0x04000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY_MASK 0x08000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY_MASK 0x10000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY_MASK 0x20000000L
+//CP_CPC_STALLED_STAT1
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL__SHIFT 0x3
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION__SHIFT 0x4
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL__SHIFT 0x6
+#define CP_CPC_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x7
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET__SHIFT 0x8
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU__SHIFT 0x9
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ__SHIFT 0xa
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA__SHIFT 0xd
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET__SHIFT 0x10
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU__SHIFT 0x11
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ__SHIFT 0x12
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA__SHIFT 0x15
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x16
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x17
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS__SHIFT 0x18
+#define CP_CPC_STALLED_STAT1__GCRIU_WAITING_ON_FREE__SHIFT 0x19
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL_MASK 0x00000008L
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION_MASK 0x00000010L
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL_MASK 0x00000040L
+#define CP_CPC_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000080L
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET_MASK 0x00000100L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_MASK 0x00000200L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ_MASK 0x00000400L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA_MASK 0x00002000L
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET_MASK 0x00010000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_MASK 0x00020000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ_MASK 0x00040000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA_MASK 0x00200000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00400000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00800000L
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS_MASK 0x01000000L
+#define CP_CPC_STALLED_STAT1__GCRIU_WAITING_ON_FREE_MASK 0x02000000L
+//CP_CPF_STATUS
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY__SHIFT 0x0
+#define CP_CPF_STATUS__CSF_BUSY__SHIFT 0x1
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY__SHIFT 0x4
+#define CP_CPF_STATUS__ROQ_RING_BUSY__SHIFT 0x5
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY__SHIFT 0x6
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY__SHIFT 0x7
+#define CP_CPF_STATUS__ROQ_STATE_BUSY__SHIFT 0x8
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY__SHIFT 0x9
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_CPF_STATUS__SEMAPHORE_BUSY__SHIFT 0xc
+#define CP_CPF_STATUS__INTERRUPT_BUSY__SHIFT 0xd
+#define CP_CPF_STATUS__TCIU_BUSY__SHIFT 0xe
+#define CP_CPF_STATUS__HQD_BUSY__SHIFT 0xf
+#define CP_CPF_STATUS__PRT_BUSY__SHIFT 0x10
+#define CP_CPF_STATUS__UTCL2IU_BUSY__SHIFT 0x11
+#define CP_CPF_STATUS__RCIU_BUSY__SHIFT 0x12
+#define CP_CPF_STATUS__RCIU_GFX_BUSY__SHIFT 0x13
+#define CP_CPF_STATUS__RCIU_CMP_BUSY__SHIFT 0x14
+#define CP_CPF_STATUS__ROQ_DATA_BUSY__SHIFT 0x15
+#define CP_CPF_STATUS__ROQ_CE_DATA_BUSY__SHIFT 0x16
+#define CP_CPF_STATUS__GCRIU_BUSY__SHIFT 0x17
+#define CP_CPF_STATUS__MES_HQD_BUSY__SHIFT 0x18
+#define CP_CPF_STATUS__CPF_GFX_BUSY__SHIFT 0x1a
+#define CP_CPF_STATUS__CPF_CMP_BUSY__SHIFT 0x1b
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY__SHIFT 0x1c
+#define CP_CPF_STATUS__CPC_CPF_BUSY__SHIFT 0x1e
+#define CP_CPF_STATUS__CPF_BUSY__SHIFT 0x1f
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY_MASK 0x00000001L
+#define CP_CPF_STATUS__CSF_BUSY_MASK 0x00000002L
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY_MASK 0x00000010L
+#define CP_CPF_STATUS__ROQ_RING_BUSY_MASK 0x00000020L
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY_MASK 0x00000040L
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY_MASK 0x00000080L
+#define CP_CPF_STATUS__ROQ_STATE_BUSY_MASK 0x00000100L
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY_MASK 0x00000200L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_CPF_STATUS__SEMAPHORE_BUSY_MASK 0x00001000L
+#define CP_CPF_STATUS__INTERRUPT_BUSY_MASK 0x00002000L
+#define CP_CPF_STATUS__TCIU_BUSY_MASK 0x00004000L
+#define CP_CPF_STATUS__HQD_BUSY_MASK 0x00008000L
+#define CP_CPF_STATUS__PRT_BUSY_MASK 0x00010000L
+#define CP_CPF_STATUS__UTCL2IU_BUSY_MASK 0x00020000L
+#define CP_CPF_STATUS__RCIU_BUSY_MASK 0x00040000L
+#define CP_CPF_STATUS__RCIU_GFX_BUSY_MASK 0x00080000L
+#define CP_CPF_STATUS__RCIU_CMP_BUSY_MASK 0x00100000L
+#define CP_CPF_STATUS__ROQ_DATA_BUSY_MASK 0x00200000L
+#define CP_CPF_STATUS__ROQ_CE_DATA_BUSY_MASK 0x00400000L
+#define CP_CPF_STATUS__GCRIU_BUSY_MASK 0x00800000L
+#define CP_CPF_STATUS__MES_HQD_BUSY_MASK 0x01000000L
+#define CP_CPF_STATUS__CPF_GFX_BUSY_MASK 0x04000000L
+#define CP_CPF_STATUS__CPF_CMP_BUSY_MASK 0x08000000L
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY_MASK 0x30000000L
+#define CP_CPF_STATUS__CPC_CPF_BUSY_MASK 0x40000000L
+#define CP_CPF_STATUS__CPF_BUSY_MASK 0x80000000L
+//CP_CPF_BUSY_STAT
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY__SHIFT 0x1
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY__SHIFT 0x2
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY__SHIFT 0x3
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY__SHIFT 0x4
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY__SHIFT 0x5
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY__SHIFT 0x6
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY__SHIFT 0x7
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY__SHIFT 0x8
+#define CP_CPF_BUSY_STAT__CSF_DATA_BUSY__SHIFT 0x9
+#define CP_CPF_BUSY_STAT__CSF_CE_DATA_BUSY__SHIFT 0xa
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY__SHIFT 0xb
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY__SHIFT 0xc
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY__SHIFT 0xd
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY__SHIFT 0xf
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY__SHIFT 0x10
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY__SHIFT 0x11
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY__SHIFT 0x12
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY__SHIFT 0x13
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY__SHIFT 0x14
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY__SHIFT 0x15
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY__SHIFT 0x17
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY__SHIFT 0x19
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY__SHIFT 0x1a
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY__SHIFT 0x1b
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY__SHIFT 0x1c
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY__SHIFT 0x1d
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY__SHIFT 0x1e
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY__SHIFT 0x1f
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY_MASK 0x00000002L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY_MASK 0x00000004L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY_MASK 0x00000008L
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY_MASK 0x00000010L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY_MASK 0x00000020L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY_MASK 0x00000040L
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY_MASK 0x00000080L
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY_MASK 0x00000100L
+#define CP_CPF_BUSY_STAT__CSF_DATA_BUSY_MASK 0x00000200L
+#define CP_CPF_BUSY_STAT__CSF_CE_DATA_BUSY_MASK 0x00000400L
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY_MASK 0x00000800L
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY_MASK 0x00001000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY_MASK 0x00002000L
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY_MASK 0x00008000L
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY_MASK 0x00010000L
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY_MASK 0x00020000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY_MASK 0x00080000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY_MASK 0x00100000L
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY_MASK 0x00200000L
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY_MASK 0x02000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY_MASK 0x04000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY_MASK 0x08000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY_MASK 0x10000000L
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY_MASK 0x20000000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY_MASK 0x40000000L
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY_MASK 0x80000000L
+//CP_CPF_STALLED_STAT1
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA__SHIFT 0x0
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA__SHIFT 0x1
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA__SHIFT 0x2
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA__SHIFT 0x3
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE__SHIFT 0x5
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x6
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x7
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x8
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS__SHIFT 0x9
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS__SHIFT 0xa
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE__SHIFT 0xb
+#define CP_CPF_STALLED_STAT1__DATA_FETCHING_DATA__SHIFT 0xc
+#define CP_CPF_STALLED_STAT1__GCRIU_WAIT_ON_FREE__SHIFT 0xd
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA_MASK 0x00000001L
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA_MASK 0x00000002L
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA_MASK 0x00000004L
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA_MASK 0x00000008L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE_MASK 0x00000020L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000040L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00000080L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00000100L
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS_MASK 0x00000200L
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS_MASK 0x00000400L
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE_MASK 0x00000800L
+#define CP_CPF_STALLED_STAT1__DATA_FETCHING_DATA_MASK 0x00001000L
+#define CP_CPF_STALLED_STAT1__GCRIU_WAIT_ON_FREE_MASK 0x00002000L
+//CP_CPC_BUSY_STAT2
+#define CP_CPC_BUSY_STAT2__MES_LOAD_BUSY__SHIFT 0x0
+#define CP_CPC_BUSY_STAT2__MES_MUTEX_BUSY__SHIFT 0x2
+#define CP_CPC_BUSY_STAT2__MES_MESSAGE_BUSY__SHIFT 0x3
+#define CP_CPC_BUSY_STAT2__MES_TC_BUSY__SHIFT 0x7
+#define CP_CPC_BUSY_STAT2__MES_DMA_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_STAT2__MES_PIPE0_BUSY__SHIFT 0xa
+#define CP_CPC_BUSY_STAT2__MES_PIPE1_BUSY__SHIFT 0xb
+#define CP_CPC_BUSY_STAT2__MES_PIPE2_BUSY__SHIFT 0xc
+#define CP_CPC_BUSY_STAT2__MES_PIPE3_BUSY__SHIFT 0xd
+#define CP_CPC_BUSY_STAT2__MES_LOAD_BUSY_MASK 0x00000001L
+#define CP_CPC_BUSY_STAT2__MES_MUTEX_BUSY_MASK 0x00000004L
+#define CP_CPC_BUSY_STAT2__MES_MESSAGE_BUSY_MASK 0x00000008L
+#define CP_CPC_BUSY_STAT2__MES_TC_BUSY_MASK 0x00000080L
+#define CP_CPC_BUSY_STAT2__MES_DMA_BUSY_MASK 0x00000100L
+#define CP_CPC_BUSY_STAT2__MES_PIPE0_BUSY_MASK 0x00000400L
+#define CP_CPC_BUSY_STAT2__MES_PIPE1_BUSY_MASK 0x00000800L
+#define CP_CPC_BUSY_STAT2__MES_PIPE2_BUSY_MASK 0x00001000L
+#define CP_CPC_BUSY_STAT2__MES_PIPE3_BUSY_MASK 0x00002000L
+//CP_CPC_GRBM_FREE_COUNT
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+//CP_CPC_PRIV_VIOLATION_ADDR
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR__SHIFT 0x0
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR_MASK 0x0003FFFFL
+//CP_MEC_ME1_HEADER_DUMP
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_HEADER_DUMP
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_CPC_SCRATCH_INDEX
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//CP_CPC_SCRATCH_DATA
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_CPF_GRBM_FREE_COUNT
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x00000007L
+//CP_CPF_BUSY_STAT2
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPG_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPC_BUSY__SHIFT 0x1
+#define CP_CPF_BUSY_STAT2__MES_HQD_DISPATCH_BUSY__SHIFT 0xc
+#define CP_CPF_BUSY_STAT2__MES_HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
+#define CP_CPF_BUSY_STAT2__MES_HQD_MESSAGE_BUSY__SHIFT 0x11
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_FETCHER_BUSY__SHIFT 0x12
+#define CP_CPF_BUSY_STAT2__MES_HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
+#define CP_CPF_BUSY_STAT2__MES_HQD_FETCHER_ARB_BUSY__SHIFT 0x17
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_PQ_BUSY__SHIFT 0x1b
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_BUSY__SHIFT 0x1e
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPG_BUSY_MASK 0x00000001L
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPC_BUSY_MASK 0x00000002L
+#define CP_CPF_BUSY_STAT2__MES_HQD_DISPATCH_BUSY_MASK 0x00001000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_MESSAGE_BUSY_MASK 0x00020000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_PQ_BUSY_MASK 0x08000000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_BUSY_MASK 0x40000000L
+//CP_CPC_HALT_HYST_COUNT
+#define CP_CPC_HALT_HYST_COUNT__COUNT__SHIFT 0x0
+#define CP_CPC_HALT_HYST_COUNT__COUNT_MASK 0x0000000FL
+//CP_STALLED_STAT3
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x2
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x3
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x4
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x5
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x6
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x7
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0xa
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0xc
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0xd
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0xe
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0xf
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x10
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x11
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE__SHIFT 0x12
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x13
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS__SHIFT 0x14
+#define CP_STALLED_STAT3__GCRIU_WAITING_ON_FREE__SHIFT 0x15
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x00000004L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x00000008L
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x00000010L
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x00000020L
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x00000080L
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x00000400L
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x00001000L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x00002000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x00004000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x00008000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM_MASK 0x00010000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00020000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE_MASK 0x00040000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS_MASK 0x00080000L
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS_MASK 0x00100000L
+#define CP_STALLED_STAT3__GCRIU_WAITING_ON_FREE_MASK 0x00200000L
+//CP_STALLED_STAT1
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R0__SHIFT 0x2
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R1__SHIFT 0x3
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R0__SHIFT 0x4
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R1__SHIFT 0x5
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0xa
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0xc
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0xd
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA__SHIFT 0xe
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0xf
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x17
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x18
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x19
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x1a
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x1b
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x1c
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x1d
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R0_MASK 0x00000004L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R1_MASK 0x00000008L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R0_MASK 0x00000010L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R1_MASK 0x00000020L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x00000400L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x00001000L
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00002000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA_MASK 0x00004000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x00008000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x00800000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x01000000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x02000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x04000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x08000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000L
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x20000000L
+//CP_STALLED_STAT2
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x2
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x4
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x5
+#define CP_STALLED_STAT2__PFP_TO_MEQ_DDID_NOT_RDY_TO_RCV__SHIFT 0x6
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x8
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x9
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0xa
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0xb
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0xc
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0xd
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0xe
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0xf
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x10
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x11
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x12
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x13
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x14
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_PULSE__SHIFT 0x15
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_WR_CONFIRM__SHIFT 0x16
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x17
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x18
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x19
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x1a
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x1b
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x1c
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x1d
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x1e
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x1f
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x00000010L
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x00000020L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_DDID_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x00000100L
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x00000200L
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x00000400L
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x00000800L
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x00001000L
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x00002000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x00004000L
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x00008000L
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00010000L
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00020000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x00040000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x00080000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00100000L
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_PULSE_MASK 0x00200000L
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_WR_CONFIRM_MASK 0x00400000L
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x00800000L
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x01000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x02000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x04000000L
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x08000000L
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000L
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000L
+//CP_BUSY_STAT
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x6
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x7
+#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x8
+#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x9
+#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0xa
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0xc
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0xd
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0xe
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0xf
+#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x11
+#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x12
+#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x13
+#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x14
+#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x15
+#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x16
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x00000040L
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x00000080L
+#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x00000100L
+#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x00000200L
+#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x00000400L
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x00001000L
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x00002000L
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x00004000L
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x00008000L
+#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x00020000L
+#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x00040000L
+#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x00080000L
+#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x00100000L
+#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x00200000L
+#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x00400000L
+//CP_STAT
+#define CP_STAT__ROQ_DB_BUSY__SHIFT 0x5
+#define CP_STAT__ROQ_CE_DB_BUSY__SHIFT 0x6
+#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x9
+#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0xc
+#define CP_STAT__DC_BUSY__SHIFT 0xd
+#define CP_STAT__UTCL2IU_BUSY__SHIFT 0xe
+#define CP_STAT__PFP_BUSY__SHIFT 0xf
+#define CP_STAT__MEQ_BUSY__SHIFT 0x10
+#define CP_STAT__ME_BUSY__SHIFT 0x11
+#define CP_STAT__QUERY_BUSY__SHIFT 0x12
+#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x13
+#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x14
+#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x15
+#define CP_STAT__DMA_BUSY__SHIFT 0x16
+#define CP_STAT__RCIU_BUSY__SHIFT 0x17
+#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x18
+#define CP_STAT__GCRIU_BUSY__SHIFT 0x19
+#define CP_STAT__CE_BUSY__SHIFT 0x1a
+#define CP_STAT__TCIU_BUSY__SHIFT 0x1b
+#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x1c
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x1d
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x1e
+#define CP_STAT__CP_BUSY__SHIFT 0x1f
+#define CP_STAT__ROQ_DB_BUSY_MASK 0x00000020L
+#define CP_STAT__ROQ_CE_DB_BUSY_MASK 0x00000040L
+#define CP_STAT__ROQ_RING_BUSY_MASK 0x00000200L
+#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_STAT__ROQ_STATE_BUSY_MASK 0x00001000L
+#define CP_STAT__DC_BUSY_MASK 0x00002000L
+#define CP_STAT__UTCL2IU_BUSY_MASK 0x00004000L
+#define CP_STAT__PFP_BUSY_MASK 0x00008000L
+#define CP_STAT__MEQ_BUSY_MASK 0x00010000L
+#define CP_STAT__ME_BUSY_MASK 0x00020000L
+#define CP_STAT__QUERY_BUSY_MASK 0x00040000L
+#define CP_STAT__SEMAPHORE_BUSY_MASK 0x00080000L
+#define CP_STAT__INTERRUPT_BUSY_MASK 0x00100000L
+#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x00200000L
+#define CP_STAT__DMA_BUSY_MASK 0x00400000L
+#define CP_STAT__RCIU_BUSY_MASK 0x00800000L
+#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x01000000L
+#define CP_STAT__GCRIU_BUSY_MASK 0x02000000L
+#define CP_STAT__CE_BUSY_MASK 0x04000000L
+#define CP_STAT__TCIU_BUSY_MASK 0x08000000L
+#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000L
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000L
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000L
+#define CP_STAT__CP_BUSY_MASK 0x80000000L
+//CP_ME_HEADER_DUMP
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x0
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_PFP_HEADER_DUMP
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x0
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_GRBM_FREE_COUNT
+#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x8
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x10
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x00003F00L
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x003F0000L
+//CP_PFP_INSTR_PNTR
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_ME_INSTR_PNTR
+#define CP_ME_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_ME_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC1_INSTR_PNTR
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC2_INSTR_PNTR
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_CSF_STAT
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x8
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x0001FF00L
+//CP_CNTX_STAT
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x0
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x8
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x14
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x1c
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0x000000FFL
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x00000700L
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0x0FF00000L
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000L
+//CP_ME_PREEMPTION
+#define CP_ME_PREEMPTION__OBSOLETE__SHIFT 0x0
+#define CP_ME_PREEMPTION__OBSOLETE_MASK 0x00000001L
+//CP_RB1_RPTR
+#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB1_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB0_RPTR
+#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB0_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_RPTR
+#define CP_RB_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_WPTR_DELAY
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x0
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x1c
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0x0FFFFFFFL
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xF0000000L
+//CP_RB_WPTR_POLL_CNTL
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x0
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0x0000FFFFL
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//CP_ROQ1_THRESHOLDS
+#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x0
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0xa
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x14
+#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0x000003FFL
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0x000FFC00L
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0x3FF00000L
+//CP_ROQ2_THRESHOLDS
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x0
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0xa
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0x000003FFL
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0x000FFC00L
+//CP_STQ_THRESHOLDS
+#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x0
+#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x8
+#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x10
+#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0x000000FFL
+#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0x0000FF00L
+#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0x00FF0000L
+//CP_MEQ_THRESHOLDS
+#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x0
+#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x8
+#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0x000000FFL
+#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0x0000FF00L
+//CP_ROQ_AVAIL
+#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x0
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x10
+#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x00000FFFL
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x0FFF0000L
+//CP_STQ_AVAIL
+#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x0
+#define CP_STQ_AVAIL__STQ_CNT_MASK 0x000001FFL
+//CP_ROQ2_AVAIL
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x0
+#define CP_ROQ2_AVAIL__ROQ_CNT_DB__SHIFT 0x10
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x00000FFFL
+#define CP_ROQ2_AVAIL__ROQ_CNT_DB_MASK 0x0FFF0000L
+//CP_MEQ_AVAIL
+#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x0
+#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x000003FFL
+//CP_CMD_INDEX
+#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x0
+#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0xc
+#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x10
+#define CP_CMD_INDEX__CMD_INDEX_MASK 0x000007FFL
+#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x00003000L
+#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x00070000L
+//CP_CMD_DATA
+#define CP_CMD_DATA__CMD_DATA__SHIFT 0x0
+#define CP_CMD_DATA__CMD_DATA_MASK 0xFFFFFFFFL
+//CP_ROQ_RB_STAT
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x0
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x10
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x00000FFFL
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x0FFF0000L
+//CP_ROQ_IB1_STAT
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x0
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x10
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x00000FFFL
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x0FFF0000L
+//CP_ROQ_IB2_STAT
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x0
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x10
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x00000FFFL
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x0FFF0000L
+//CP_STQ_STAT
+#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x0
+#define CP_STQ_STAT__STQ_RPTR_MASK 0x000003FFL
+//CP_STQ_WR_STAT
+#define CP_STQ_WR_STAT__STQ_WPTR__SHIFT 0x0
+#define CP_STQ_WR_STAT__STQ_WPTR_MASK 0x000003FFL
+//CP_MEQ_STAT
+#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x0
+#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x10
+#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x000003FFL
+#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x03FF0000L
+//CP_ROQ3_THRESHOLDS
+#define CP_ROQ3_THRESHOLDS__R0_DB_START__SHIFT 0x0
+#define CP_ROQ3_THRESHOLDS__R1_DB_START__SHIFT 0xa
+#define CP_ROQ3_THRESHOLDS__R0_DB_START_MASK 0x000003FFL
+#define CP_ROQ3_THRESHOLDS__R1_DB_START_MASK 0x000FFC00L
+//CP_ROQ_DB_STAT
+#define CP_ROQ_DB_STAT__ROQ_RPTR_DB__SHIFT 0x0
+#define CP_ROQ_DB_STAT__ROQ_WPTR_DB__SHIFT 0x10
+#define CP_ROQ_DB_STAT__ROQ_RPTR_DB_MASK 0x00000FFFL
+#define CP_ROQ_DB_STAT__ROQ_WPTR_DB_MASK 0x0FFF0000L
+//CP_INT_STAT_DEBUG
+#define CP_INT_STAT_DEBUG__RESUME_INT_ASSERTED__SHIFT 0x8
+#define CP_INT_STAT_DEBUG__SUSPEND_INT_ASSERTED__SHIFT 0x9
+#define CP_INT_STAT_DEBUG__DMA_WATCH_INT_ASSERTED__SHIFT 0xa
+#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED__SHIFT 0xb
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_INT_STAT_DEBUG__FUE_INT_STATUS_DEBUG__SHIFT 0xf
+#define CP_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED__SHIFT 0x12
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED__SHIFT 0x13
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED__SHIFT 0x14
+#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED__SHIFT 0x15
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED__SHIFT 0x16
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_INT_STAT_DEBUG__RESUME_INT_ASSERTED_MASK 0x00000100L
+#define CP_INT_STAT_DEBUG__SUSPEND_INT_ASSERTED_MASK 0x00000200L
+#define CP_INT_STAT_DEBUG__DMA_WATCH_INT_ASSERTED_MASK 0x00000400L
+#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED_MASK 0x00000800L
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_INT_STAT_DEBUG__FUE_INT_STATUS_DEBUG_MASK 0x00008000L
+#define CP_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED_MASK 0x00040000L
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED_MASK 0x00080000L
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED_MASK 0x00100000L
+#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED_MASK 0x00200000L
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED_MASK 0x00400000L
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_DEBUG_CNTL
+#define CP_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+//CP_PRIV_VIOLATION_ADDR
+#define CP_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR__SHIFT 0x0
+#define CP_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR_MASK 0x0003FFFFL
+
+
+// addressBlock: gc_padec
+//VGT_DMA_DATA_FIFO_DEPTH
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x000003FFL
+//VGT_DMA_REQ_FIFO_DEPTH
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_DRAW_INIT_FIFO_DEPTH
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_MC_LAT_CNTL
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x0
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x0000000FL
+//IA_UTCL1_STATUS_2
+#define IA_UTCL1_STATUS_2__IA_BUSY__SHIFT 0x0
+#define IA_UTCL1_STATUS_2__IA_DMA_BUSY__SHIFT 0x1
+#define IA_UTCL1_STATUS_2__IA_DMA_REQ_BUSY__SHIFT 0x2
+#define IA_UTCL1_STATUS_2__IA_GRP_BUSY__SHIFT 0x3
+#define IA_UTCL1_STATUS_2__IA_ADC_BUSY__SHIFT 0x4
+#define IA_UTCL1_STATUS_2__FAULT_DETECTED__SHIFT 0x5
+#define IA_UTCL1_STATUS_2__RETRY_DETECTED__SHIFT 0x6
+#define IA_UTCL1_STATUS_2__PRT_DETECTED__SHIFT 0x7
+#define IA_UTCL1_STATUS_2__FAULT_UTCL1ID__SHIFT 0x8
+#define IA_UTCL1_STATUS_2__RETRY_UTCL1ID__SHIFT 0x10
+#define IA_UTCL1_STATUS_2__PRT_UTCL1ID__SHIFT 0x18
+#define IA_UTCL1_STATUS_2__IA_BUSY_MASK 0x00000001L
+#define IA_UTCL1_STATUS_2__IA_DMA_BUSY_MASK 0x00000002L
+#define IA_UTCL1_STATUS_2__IA_DMA_REQ_BUSY_MASK 0x00000004L
+#define IA_UTCL1_STATUS_2__IA_GRP_BUSY_MASK 0x00000008L
+#define IA_UTCL1_STATUS_2__IA_ADC_BUSY_MASK 0x00000010L
+#define IA_UTCL1_STATUS_2__FAULT_DETECTED_MASK 0x00000020L
+#define IA_UTCL1_STATUS_2__RETRY_DETECTED_MASK 0x00000040L
+#define IA_UTCL1_STATUS_2__PRT_DETECTED_MASK 0x00000080L
+#define IA_UTCL1_STATUS_2__FAULT_UTCL1ID_MASK 0x00003F00L
+#define IA_UTCL1_STATUS_2__RETRY_UTCL1ID_MASK 0x003F0000L
+#define IA_UTCL1_STATUS_2__PRT_UTCL1ID_MASK 0x3F000000L
+//WD_CNTL_STATUS
+#define WD_CNTL_STATUS__DIST_BUSY__SHIFT 0x0
+#define WD_CNTL_STATUS__DIST_BE_BUSY__SHIFT 0x1
+#define WD_CNTL_STATUS__GE_UTCL1_BUSY__SHIFT 0x2
+#define WD_CNTL_STATUS__WD_TE11_BUSY__SHIFT 0x3
+#define WD_CNTL_STATUS__PC_MANAGER_BUSY__SHIFT 0x4
+#define WD_CNTL_STATUS__WLC_BUSY__SHIFT 0x5
+#define WD_CNTL_STATUS__DIST_BUSY_MASK 0x00000001L
+#define WD_CNTL_STATUS__DIST_BE_BUSY_MASK 0x00000002L
+#define WD_CNTL_STATUS__GE_UTCL1_BUSY_MASK 0x00000004L
+#define WD_CNTL_STATUS__WD_TE11_BUSY_MASK 0x00000008L
+#define WD_CNTL_STATUS__PC_MANAGER_BUSY_MASK 0x00000010L
+#define WD_CNTL_STATUS__WLC_BUSY_MASK 0x00000020L
+//CC_GC_PRIM_CONFIG
+#define CC_GC_PRIM_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_PRIM_CONFIG__INACTIVE_PA__SHIFT 0x4
+#define CC_GC_PRIM_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_PRIM_CONFIG__INACTIVE_PA_MASK 0x000FFFF0L
+//WD_QOS
+#define WD_QOS__DRAW_STALL__SHIFT 0x0
+#define WD_QOS__DRAW_STALL_MASK 0x00000001L
+//WD_UTCL1_CNTL
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define WD_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define WD_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define WD_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define WD_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define WD_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define WD_UTCL1_CNTL__MTYPE_OVERRIDE__SHIFT 0x1d
+#define WD_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE__SHIFT 0x1e
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define WD_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define WD_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define WD_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define WD_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define WD_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define WD_UTCL1_CNTL__MTYPE_OVERRIDE_MASK 0x20000000L
+#define WD_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE_MASK 0x40000000L
+//WD_UTCL1_STATUS
+#define WD_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define WD_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define WD_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define WD_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define WD_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define WD_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define WD_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define WD_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//IA_UTCL1_CNTL
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define IA_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define IA_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define IA_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define IA_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define IA_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define IA_UTCL1_CNTL__MTYPE_OVERRIDE__SHIFT 0x1d
+#define IA_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE__SHIFT 0x1e
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define IA_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define IA_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define IA_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define IA_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define IA_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define IA_UTCL1_CNTL__MTYPE_OVERRIDE_MASK 0x20000000L
+#define IA_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE_MASK 0x40000000L
+//IA_UTCL1_STATUS
+#define IA_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define IA_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define IA_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define IA_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define IA_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define IA_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define IA_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define IA_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CC_GC_SA_UNIT_DISABLE
+#define CC_GC_SA_UNIT_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define CC_GC_SA_UNIT_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x00FFFF00L
+//GE_RATE_CNTL_1
+#define GE_RATE_CNTL_1__ADD_X_CLKS_LS_VERT__SHIFT 0x0
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_LS_VERT__SHIFT 0x4
+#define GE_RATE_CNTL_1__ADD_X_CLKS_HS_VERT__SHIFT 0x8
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_HS_VERT__SHIFT 0xc
+#define GE_RATE_CNTL_1__ADD_X_CLKS_ES_VERT__SHIFT 0x10
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_ES_VERT__SHIFT 0x14
+#define GE_RATE_CNTL_1__ADD_X_CLKS_GS_PRIM__SHIFT 0x18
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_GS_PRIM__SHIFT 0x1c
+#define GE_RATE_CNTL_1__ADD_X_CLKS_LS_VERT_MASK 0x0000000FL
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_LS_VERT_MASK 0x000000F0L
+#define GE_RATE_CNTL_1__ADD_X_CLKS_HS_VERT_MASK 0x00000F00L
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_HS_VERT_MASK 0x0000F000L
+#define GE_RATE_CNTL_1__ADD_X_CLKS_ES_VERT_MASK 0x000F0000L
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_ES_VERT_MASK 0x00F00000L
+#define GE_RATE_CNTL_1__ADD_X_CLKS_GS_PRIM_MASK 0x0F000000L
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_GS_PRIM_MASK 0xF0000000L
+//GE_RATE_CNTL_2
+#define GE_RATE_CNTL_2__ADD_X_CLKS_VS_VERT__SHIFT 0x0
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_VS_VERT__SHIFT 0x4
+#define GE_RATE_CNTL_2__ADD_X_CLKS_PA_PRIM__SHIFT 0x8
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_PA_PRIM__SHIFT 0xc
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_HS_GS__SHIFT 0x10
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_LS_ES__SHIFT 0x14
+#define GE_RATE_CNTL_2__MERGED_HS_GS_MODE__SHIFT 0x18
+#define GE_RATE_CNTL_2__MERGED_LS_ES_MODE__SHIFT 0x19
+#define GE_RATE_CNTL_2__ENABLE_RATE_CNTL__SHIFT 0x1a
+#define GE_RATE_CNTL_2__SWAP_PRIORITY__SHIFT 0x1b
+#define GE_RATE_CNTL_2__ADD_X_CLKS_VS_VERT_MASK 0x0000000FL
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_VS_VERT_MASK 0x000000F0L
+#define GE_RATE_CNTL_2__ADD_X_CLKS_PA_PRIM_MASK 0x00000F00L
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_PA_PRIM_MASK 0x0000F000L
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_HS_GS_MASK 0x000F0000L
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_LS_ES_MASK 0x00F00000L
+#define GE_RATE_CNTL_2__MERGED_HS_GS_MODE_MASK 0x01000000L
+#define GE_RATE_CNTL_2__MERGED_LS_ES_MODE_MASK 0x02000000L
+#define GE_RATE_CNTL_2__ENABLE_RATE_CNTL_MASK 0x04000000L
+#define GE_RATE_CNTL_2__SWAP_PRIORITY_MASK 0x08000000L
+//VGT_SYS_CONFIG
+#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x0
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x1
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x7
+#define VGT_SYS_CONFIG__NUM_SUBGROUPS_IN_FLIGHT__SHIFT 0x8
+#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x00000001L
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x0000007EL
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x00000080L
+#define VGT_SYS_CONFIG__NUM_SUBGROUPS_IN_FLIGHT_MASK 0x0007FF00L
+//GE_PRIV_CONTROL
+#define GE_PRIV_CONTROL__RESERVED__SHIFT 0x0
+#define GE_PRIV_CONTROL__CLAMP_PRIMGRP_SIZE__SHIFT 0x1
+#define GE_PRIV_CONTROL__RESET_ON_PIPELINE_CHANGE__SHIFT 0xa
+#define GE_PRIV_CONTROL__FGCG_OVERRIDE__SHIFT 0xf
+#define GE_PRIV_CONTROL__CLAMP_HS_OFFCHIP_PER_SE_OVERRIDE__SHIFT 0x10
+#define GE_PRIV_CONTROL__DISABLE_ACCUM_AGM__SHIFT 0x11
+#define GE_PRIV_CONTROL__RESERVED_MASK 0x00000001L
+#define GE_PRIV_CONTROL__CLAMP_PRIMGRP_SIZE_MASK 0x000003FEL
+#define GE_PRIV_CONTROL__RESET_ON_PIPELINE_CHANGE_MASK 0x00000400L
+#define GE_PRIV_CONTROL__FGCG_OVERRIDE_MASK 0x00008000L
+#define GE_PRIV_CONTROL__CLAMP_HS_OFFCHIP_PER_SE_OVERRIDE_MASK 0x00010000L
+#define GE_PRIV_CONTROL__DISABLE_ACCUM_AGM_MASK 0x00020000L
+//GE_STATUS
+#define GE_STATUS__PERFCOUNTER_STATUS__SHIFT 0x0
+#define GE_STATUS__THREAD_TRACE_STATUS__SHIFT 0x1
+#define GE_STATUS__PERFCOUNTER_STATUS_MASK 0x00000001L
+#define GE_STATUS__THREAD_TRACE_STATUS_MASK 0x00000002L
+//VGT_GS_MAX_WAVE_ID
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+//GFX_PIPE_CONTROL
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT__SHIFT 0x0
+#define GFX_PIPE_CONTROL__RESERVED__SHIFT 0xd
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN__SHIFT 0x10
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_STALL_EN__SHIFT 0x11
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT_MASK 0x00001FFFL
+#define GFX_PIPE_CONTROL__RESERVED_MASK 0x0000E000L
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN_MASK 0x00010000L
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_STALL_EN_MASK 0x00020000L
+//CC_GC_SHADER_ARRAY_CONFIG
+#define CC_GC_SHADER_ARRAY_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT 0x10
+#define CC_GC_SHADER_ARRAY_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK 0xFFFF0000L
+//GE2_SE_CNTL_STATUS
+#define GE2_SE_CNTL_STATUS__TE_BUSY__SHIFT 0x0
+#define GE2_SE_CNTL_STATUS__NGG_BUSY__SHIFT 0x1
+#define GE2_SE_CNTL_STATUS__HS_BUSY__SHIFT 0x2
+#define GE2_SE_CNTL_STATUS__TE_BUSY_MASK 0x00000001L
+#define GE2_SE_CNTL_STATUS__NGG_BUSY_MASK 0x00000002L
+#define GE2_SE_CNTL_STATUS__HS_BUSY_MASK 0x00000004L
+//VGT_RESET_DEBUG
+#define VGT_RESET_DEBUG__GS_DISABLE__SHIFT 0x0
+#define VGT_RESET_DEBUG__TESS_DISABLE__SHIFT 0x1
+#define VGT_RESET_DEBUG__WD_DISABLE__SHIFT 0x2
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE0__SHIFT 0x3
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE1__SHIFT 0x4
+#define VGT_RESET_DEBUG__ENABLE_VMID_RESET_UTCL1__SHIFT 0x5
+#define VGT_RESET_DEBUG__DISABLE_PREFETCH__SHIFT 0x6
+#define VGT_RESET_DEBUG__DISABLE_SWITCH_MODE_STALL_FIX__SHIFT 0x7
+#define VGT_RESET_DEBUG__DISABLE_SENDING_MULTIPLE_SE_IN_PD__SHIFT 0x8
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_ON_OFF__SHIFT 0x9
+#define VGT_RESET_DEBUG__DISABLE_PATCH_OPTIMIZATION__SHIFT 0xa
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_OFF_ON__SHIFT 0xb
+#define VGT_RESET_DEBUG__DISABLE_MERGE_GRP_PERF_FIX__SHIFT 0xc
+#define VGT_RESET_DEBUG__DISABLE_MESH_SHADER_ATTR_PACKING__SHIFT 0xd
+#define VGT_RESET_DEBUG__ENABLE_SMALL_INST_PACK_ADJ_GS_OFF__SHIFT 0xe
+#define VGT_RESET_DEBUG__DISABLE_PATCH_DIST_LAST_DONUT_SE_SWITCH_LOGIC__SHIFT 0xf
+#define VGT_RESET_DEBUG__SPARE__SHIFT 0x10
+#define VGT_RESET_DEBUG__GS_DISABLE_MASK 0x00000001L
+#define VGT_RESET_DEBUG__TESS_DISABLE_MASK 0x00000002L
+#define VGT_RESET_DEBUG__WD_DISABLE_MASK 0x00000004L
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE0_MASK 0x00000008L
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE1_MASK 0x00000010L
+#define VGT_RESET_DEBUG__ENABLE_VMID_RESET_UTCL1_MASK 0x00000020L
+#define VGT_RESET_DEBUG__DISABLE_PREFETCH_MASK 0x00000040L
+#define VGT_RESET_DEBUG__DISABLE_SWITCH_MODE_STALL_FIX_MASK 0x00000080L
+#define VGT_RESET_DEBUG__DISABLE_SENDING_MULTIPLE_SE_IN_PD_MASK 0x00000100L
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_ON_OFF_MASK 0x00000200L
+#define VGT_RESET_DEBUG__DISABLE_PATCH_OPTIMIZATION_MASK 0x00000400L
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_OFF_ON_MASK 0x00000800L
+#define VGT_RESET_DEBUG__DISABLE_MERGE_GRP_PERF_FIX_MASK 0x00001000L
+#define VGT_RESET_DEBUG__DISABLE_MESH_SHADER_ATTR_PACKING_MASK 0x00002000L
+#define VGT_RESET_DEBUG__ENABLE_SMALL_INST_PACK_ADJ_GS_OFF_MASK 0x00004000L
+#define VGT_RESET_DEBUG__DISABLE_PATCH_DIST_LAST_DONUT_SE_SWITCH_LOGIC_MASK 0x00008000L
+#define VGT_RESET_DEBUG__SPARE_MASK 0xFFFF0000L
+//GE_SPI_IF_SAFE_REG
+#define GE_SPI_IF_SAFE_REG__GE_SPI_LS_ES_DATA__SHIFT 0x0
+#define GE_SPI_IF_SAFE_REG__GE_SPI_HS_GS_DATA__SHIFT 0x6
+#define GE_SPI_IF_SAFE_REG__GE_SPI_GRP__SHIFT 0xc
+#define GE_SPI_IF_SAFE_REG__GE_SPI_LS_ES_DATA_MASK 0x0000003FL
+#define GE_SPI_IF_SAFE_REG__GE_SPI_HS_GS_DATA_MASK 0x00000FC0L
+#define GE_SPI_IF_SAFE_REG__GE_SPI_GRP_MASK 0x0003F000L
+//GE_PA_IF_SAFE_REG
+#define GE_PA_IF_SAFE_REG__GE_PA_CSB__SHIFT 0x0
+#define GE_PA_IF_SAFE_REG__GE_PA_PAYLOAD__SHIFT 0xa
+#define GE_PA_IF_SAFE_REG__GE_PA_CSB_MASK 0x000003FFL
+#define GE_PA_IF_SAFE_REG__GE_PA_PAYLOAD_MASK 0x000FFC00L
+//PA_CL_CNTL_STATUS
+#define PA_CL_CNTL_STATUS__CL_BUSY__SHIFT 0x1f
+#define PA_CL_CNTL_STATUS__CL_BUSY_MASK 0x80000000L
+//PA_CL_ENHANCE
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x0
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x1
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x3
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x4
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL__SHIFT 0x5
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET__SHIFT 0x6
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS__SHIFT 0x7
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC__SHIFT 0x8
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION__SHIFT 0x9
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER__SHIFT 0xb
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH__SHIFT 0xc
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH__SHIFT 0xe
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE__SHIFT 0x11
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE__SHIFT 0x12
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE__SHIFT 0x13
+#define PA_CL_ENHANCE__DISABLE_PA_PH_INTF_FINE_CLOCK_GATE__SHIFT 0x14
+#define PA_CL_ENHANCE__DISABLE_PA_SX_REQ_INTF_FINE_CLOCK_GATE__SHIFT 0x15
+#define PA_CL_ENHANCE__ENABLE_PA_RATE_CNTL__SHIFT 0x16
+#define PA_CL_ENHANCE__CLAMP_NEGATIVE_BB_TO_ZERO__SHIFT 0x17
+#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x1c
+#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x1d
+#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x1e
+#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x1f
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x00000001L
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x00000006L
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x00000008L
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x00000010L
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL_MASK 0x00000020L
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET_MASK 0x00000040L
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS_MASK 0x00000080L
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC_MASK 0x00000100L
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION_MASK 0x00000600L
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER_MASK 0x00000800L
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH_MASK 0x00003000L
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH_MASK 0x0001C000L
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE_MASK 0x00020000L
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE_MASK 0x00040000L
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE_MASK 0x00080000L
+#define PA_CL_ENHANCE__DISABLE_PA_PH_INTF_FINE_CLOCK_GATE_MASK 0x00100000L
+#define PA_CL_ENHANCE__DISABLE_PA_SX_REQ_INTF_FINE_CLOCK_GATE_MASK 0x00200000L
+#define PA_CL_ENHANCE__ENABLE_PA_RATE_CNTL_MASK 0x00400000L
+#define PA_CL_ENHANCE__CLAMP_NEGATIVE_BB_TO_ZERO_MASK 0x00800000L
+#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000L
+#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000L
+#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000L
+#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000L
+//PA_CL_RESET_DEBUG
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE__SHIFT 0x0
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE_MASK 0x00000001L
+//PA_SU_CNTL_STATUS
+#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x1f
+#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000L
+//PA_SC_FIFO_DEPTH_CNTL
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x0
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x000003FFL
+
+
+// addressBlock: gc_sqdec
+//SQ_CONFIG
+#define SQ_CONFIG__ECO_SPARE__SHIFT 0x0
+#define SQ_CONFIG__NEW_TRANS_ARB_SCHEME__SHIFT 0x8
+#define SQ_CONFIG__DISABLE_VMEM_EXEC_ZERO_SKIP__SHIFT 0x9
+#define SQ_CONFIG__DISABLE_SGPR_RD_KILL__SHIFT 0xa
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_GS__SHIFT 0x12
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_GS__SHIFT 0x13
+#define SQ_CONFIG__WCLK_HYSTERESIS_CNT__SHIFT 0x15
+#define SQ_CONFIG__DISABLE_END_CLAUSE_TX__SHIFT 0x1b
+#define SQ_CONFIG__ECO_SPARE_MASK 0x000000FFL
+#define SQ_CONFIG__NEW_TRANS_ARB_SCHEME_MASK 0x00000100L
+#define SQ_CONFIG__DISABLE_VMEM_EXEC_ZERO_SKIP_MASK 0x00000200L
+#define SQ_CONFIG__DISABLE_SGPR_RD_KILL_MASK 0x00000400L
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_GS_MASK 0x00040000L
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_GS_MASK 0x00180000L
+#define SQ_CONFIG__WCLK_HYSTERESIS_CNT_MASK 0x00600000L
+#define SQ_CONFIG__DISABLE_END_CLAUSE_TX_MASK 0x08000000L
+//SQC_CONFIG
+#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x0
+#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x2
+#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x4
+#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x6
+#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x7
+#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x8
+#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0x9
+#define SQC_CONFIG__EVICT_LRU__SHIFT 0xa
+#define SQC_CONFIG__FORCE_2_BANK__SHIFT 0xc
+#define SQC_CONFIG__FORCE_1_BANK__SHIFT 0xd
+#define SQC_CONFIG__LS_DISABLE_CLOCKS__SHIFT 0xe
+#define SQC_CONFIG__CACHE_CTRL_GCR_FIX_DISABLE__SHIFT 0x16
+#define SQC_CONFIG__CACHE_CTRL_ALMOST_MAX_INFLIGHT_CONFIG__SHIFT 0x17
+#define SQC_CONFIG__SPARE__SHIFT 0x1a
+#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x00000003L
+#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0x0000000CL
+#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x00000030L
+#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x00000040L
+#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x00000080L
+#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x00000100L
+#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x00000200L
+#define SQC_CONFIG__EVICT_LRU_MASK 0x00000C00L
+#define SQC_CONFIG__FORCE_2_BANK_MASK 0x00001000L
+#define SQC_CONFIG__FORCE_1_BANK_MASK 0x00002000L
+#define SQC_CONFIG__LS_DISABLE_CLOCKS_MASK 0x003FC000L
+#define SQC_CONFIG__CACHE_CTRL_GCR_FIX_DISABLE_MASK 0x00400000L
+#define SQC_CONFIG__CACHE_CTRL_ALMOST_MAX_INFLIGHT_CONFIG_MASK 0x03800000L
+#define SQC_CONFIG__SPARE_MASK 0xFC000000L
+//LDS_CONFIG
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING__SHIFT 0x0
+#define LDS_CONFIG__CONF_BIT_1__SHIFT 0x1
+#define LDS_CONFIG__WAVE32_INTERP_DUAL_ISSUE_DISABLE__SHIFT 0x2
+#define LDS_CONFIG__SP_TDDATA_FGCG_OVERRIDE__SHIFT 0x3
+#define LDS_CONFIG__SQC_PERF_FGCG_OVERRIDE__SHIFT 0x4
+#define LDS_CONFIG__CONF_BIT_5__SHIFT 0x5
+#define LDS_CONFIG__CONF_BIT_6__SHIFT 0x6
+#define LDS_CONFIG__CONF_BIT_7__SHIFT 0x7
+#define LDS_CONFIG__CONF_BIT_8__SHIFT 0x8
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING_MASK 0x00000001L
+#define LDS_CONFIG__CONF_BIT_1_MASK 0x00000002L
+#define LDS_CONFIG__WAVE32_INTERP_DUAL_ISSUE_DISABLE_MASK 0x00000004L
+#define LDS_CONFIG__SP_TDDATA_FGCG_OVERRIDE_MASK 0x00000008L
+#define LDS_CONFIG__SQC_PERF_FGCG_OVERRIDE_MASK 0x00000010L
+#define LDS_CONFIG__CONF_BIT_5_MASK 0x00000020L
+#define LDS_CONFIG__CONF_BIT_6_MASK 0x00000040L
+#define LDS_CONFIG__CONF_BIT_7_MASK 0x00000080L
+#define LDS_CONFIG__CONF_BIT_8_MASK 0x00000100L
+//SQ_RANDOM_WAVE_PRI
+#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x0
+#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x7
+#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0xa
+#define SQ_RANDOM_WAVE_PRI__FORCE_IB_ARB_PRIO_MSK_VALID__SHIFT 0x1f
+#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x0000007FL
+#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x00000380L
+#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x00FFFC00L
+#define SQ_RANDOM_WAVE_PRI__FORCE_IB_ARB_PRIO_MSK_VALID_MASK 0x80000000L
+//SQG_STATUS
+#define SQG_STATUS__REG_BUSY__SHIFT 0x0
+#define SQG_STATUS__REG_BUSY_MASK 0x00000001L
+//SQ_FIFO_SIZES
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x0
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x8
+#define SQ_FIFO_SIZES__EXPORT_BUF_GS_RESERVED__SHIFT 0xc
+#define SQ_FIFO_SIZES__EXPORT_BUF_PS_RESERVED__SHIFT 0xe
+#define SQ_FIFO_SIZES__EXPORT_BUF_REDUCE__SHIFT 0x10
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x12
+#define SQ_FIFO_SIZES__EXPORT_BUF_PRIMPOS_LIMIT__SHIFT 0x14
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0x0000000FL
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0x00000300L
+#define SQ_FIFO_SIZES__EXPORT_BUF_GS_RESERVED_MASK 0x00003000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_PS_RESERVED_MASK 0x0000C000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_REDUCE_MASK 0x00030000L
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0x000C0000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_PRIMPOS_LIMIT_MASK 0x00300000L
+//SQ_DSM_CNTL
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0__SHIFT 0x0
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1__SHIFT 0x1
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0__SHIFT 0x2
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1__SHIFT 0x3
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0__SHIFT 0x8
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1__SHIFT 0x9
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE__SHIFT 0xa
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0__SHIFT 0x10
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1__SHIFT 0x11
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01__SHIFT 0x12
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2__SHIFT 0x13
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3__SHIFT 0x14
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23__SHIFT 0x15
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0__SHIFT 0x18
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1__SHIFT 0x19
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0_MASK 0x00000001L
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1_MASK 0x00000002L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0_MASK 0x00000004L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1_MASK 0x00000008L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0_MASK 0x00000100L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1_MASK 0x00000200L
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE_MASK 0x00000400L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0_MASK 0x00010000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1_MASK 0x00020000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01_MASK 0x00040000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2_MASK 0x00080000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3_MASK 0x00100000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23_MASK 0x00200000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0_MASK 0x01000000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1_MASK 0x02000000L
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQ_DSM_CNTL2
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY__SHIFT 0xe
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY__SHIFT 0x14
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY__SHIFT 0x1a
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY_MASK 0x000FC000L
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY_MASK 0x03F00000L
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY_MASK 0xFC000000L
+//SP_CONFIG
+#define SP_CONFIG__DEST_CACHE_EVICT_COUNTER__SHIFT 0x0
+#define SP_CONFIG__ALU_BUSY_MGCG_OVERRIDE__SHIFT 0x2
+#define SP_CONFIG__DISABLE_TRANS_COEXEC__SHIFT 0x3
+#define SP_CONFIG__CAC_COUNTER_OVERRIDE__SHIFT 0x4
+#define SP_CONFIG__SP_SX_EXPVDATA_FGCG_OVERRIDE__SHIFT 0x5
+#define SP_CONFIG__DEST_CACHE_EVICT_COUNTER_MASK 0x00000003L
+#define SP_CONFIG__ALU_BUSY_MGCG_OVERRIDE_MASK 0x00000004L
+#define SP_CONFIG__DISABLE_TRANS_COEXEC_MASK 0x00000008L
+#define SP_CONFIG__CAC_COUNTER_OVERRIDE_MASK 0x00000010L
+#define SP_CONFIG__SP_SX_EXPVDATA_FGCG_OVERRIDE_MASK 0x00000020L
+//SQ_ARB_CONFIG
+#define SQ_ARB_CONFIG__WG_RR_INTERVAL__SHIFT 0x0
+#define SQ_ARB_CONFIG__FWD_PROG_INTERVAL__SHIFT 0x4
+#define SQ_ARB_CONFIG__WG_RR_INTERVAL_MASK 0x00000003L
+#define SQ_ARB_CONFIG__FWD_PROG_INTERVAL_MASK 0x00000030L
+//SQ_DEBUG_HOST_TRAP_STATUS
+#define SQ_DEBUG_HOST_TRAP_STATUS__PENDING_COUNT__SHIFT 0x0
+#define SQ_DEBUG_HOST_TRAP_STATUS__PENDING_COUNT_MASK 0x0000007FL
+//SQG_GL1H_STATUS
+#define SQG_GL1H_STATUS__R0_ACK_ERR_DETECTED__SHIFT 0x0
+#define SQG_GL1H_STATUS__R0_XNACK_ERR_DETECTED__SHIFT 0x1
+#define SQG_GL1H_STATUS__R1_ACK_ERR_DETECTED__SHIFT 0x2
+#define SQG_GL1H_STATUS__R1_XNACK_ERR_DETECTED__SHIFT 0x3
+#define SQG_GL1H_STATUS__R0_ACK_ERR_DETECTED_MASK 0x00000001L
+#define SQG_GL1H_STATUS__R0_XNACK_ERR_DETECTED_MASK 0x00000002L
+#define SQG_GL1H_STATUS__R1_ACK_ERR_DETECTED_MASK 0x00000004L
+#define SQG_GL1H_STATUS__R1_XNACK_ERR_DETECTED_MASK 0x00000008L
+//SQG_CONFIG
+#define SQG_CONFIG__GL1H_PREFETCH_PAGE__SHIFT 0x0
+#define SQG_CONFIG__SQG_ICPFT_EN__SHIFT 0xd
+#define SQG_CONFIG__SQG_ICPFT_CLR__SHIFT 0xe
+#define SQG_CONFIG__XNACK_INTR_MASK__SHIFT 0x10
+#define SQG_CONFIG__GL1H_PREFETCH_PAGE_MASK 0x0000000FL
+#define SQG_CONFIG__SQG_ICPFT_EN_MASK 0x00002000L
+#define SQG_CONFIG__SQG_ICPFT_CLR_MASK 0x00004000L
+#define SQG_CONFIG__XNACK_INTR_MASK_MASK 0xFFFF0000L
+//SQ_PERF_SNAPSHOT_CTRL
+#define SQ_PERF_SNAPSHOT_CTRL__TIMER_ON_OFF__SHIFT 0x0
+#define SQ_PERF_SNAPSHOT_CTRL__VMID_MASK__SHIFT 0x1
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_SEL__SHIFT 0x11
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_INTERVAL__SHIFT 0x12
+#define SQ_PERF_SNAPSHOT_CTRL__TIMER_ON_OFF_MASK 0x00000001L
+#define SQ_PERF_SNAPSHOT_CTRL__VMID_MASK_MASK 0x0001FFFEL
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_SEL_MASK 0x00020000L
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_INTERVAL_MASK 0x003C0000L
+//CC_GC_SHADER_RATE_CONFIG
+#define CC_GC_SHADER_RATE_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define CC_GC_SHADER_RATE_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+//SQ_INTERRUPT_AUTO_MASK
+#define SQ_INTERRUPT_AUTO_MASK__MASK__SHIFT 0x0
+#define SQ_INTERRUPT_AUTO_MASK__MASK_MASK 0x00FFFFFFL
+//SQ_INTERRUPT_MSG_CTRL
+#define SQ_INTERRUPT_MSG_CTRL__STALL__SHIFT 0x0
+#define SQ_INTERRUPT_MSG_CTRL__STALL_MASK 0x00000001L
+//SQ_WATCH0_ADDR_H
+#define SQ_WATCH0_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH0_ADDR_L
+#define SQ_WATCH0_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH0_CNTL
+#define SQ_WATCH0_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH0_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH0_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH0_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH0_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH0_CNTL__VALID_MASK 0x80000000L
+//SQ_WATCH1_ADDR_H
+#define SQ_WATCH1_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH1_ADDR_L
+#define SQ_WATCH1_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH1_CNTL
+#define SQ_WATCH1_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH1_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH1_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH1_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH1_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH1_CNTL__VALID_MASK 0x80000000L
+//SQ_WATCH2_ADDR_H
+#define SQ_WATCH2_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH2_ADDR_L
+#define SQ_WATCH2_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH2_CNTL
+#define SQ_WATCH2_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH2_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH2_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH2_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH2_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH2_CNTL__VALID_MASK 0x80000000L
+//SQ_WATCH3_ADDR_H
+#define SQ_WATCH3_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH3_ADDR_L
+#define SQ_WATCH3_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH3_CNTL
+#define SQ_WATCH3_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH3_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH3_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH3_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH3_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH3_CNTL__VALID_MASK 0x80000000L
+//SQ_IND_INDEX
+#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x0
+#define SQ_IND_INDEX__WORKITEM_ID__SHIFT 0x5
+#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0xb
+#define SQ_IND_INDEX__INDEX__SHIFT 0x10
+#define SQ_IND_INDEX__WAVE_ID_MASK 0x0000001FL
+#define SQ_IND_INDEX__WORKITEM_ID_MASK 0x000007E0L
+#define SQ_IND_INDEX__AUTO_INCR_MASK 0x00000800L
+#define SQ_IND_INDEX__INDEX_MASK 0xFFFF0000L
+//SQ_IND_DATA
+#define SQ_IND_DATA__DATA__SHIFT 0x0
+#define SQ_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//SQ_CMD
+#define SQ_CMD__CMD__SHIFT 0x0
+#define SQ_CMD__MODE__SHIFT 0x4
+#define SQ_CMD__CHECK_VMID__SHIFT 0x7
+#define SQ_CMD__DATA__SHIFT 0x8
+#define SQ_CMD__WAVE_ID__SHIFT 0x10
+#define SQ_CMD__QUEUE_ID__SHIFT 0x18
+#define SQ_CMD__VM_ID__SHIFT 0x1c
+#define SQ_CMD__CMD_MASK 0x0000000FL
+#define SQ_CMD__MODE_MASK 0x00000070L
+#define SQ_CMD__CHECK_VMID_MASK 0x00000080L
+#define SQ_CMD__DATA_MASK 0x00000F00L
+#define SQ_CMD__WAVE_ID_MASK 0x001F0000L
+#define SQ_CMD__QUEUE_ID_MASK 0x07000000L
+#define SQ_CMD__VM_ID_MASK 0xF0000000L
+//SQC_MISC_CONFIG
+#define SQC_MISC_CONFIG__UNUSED__SHIFT 0x0
+#define SQC_MISC_CONFIG__SQC_SPI_TTRACE_FGCG_OVERRIDE__SHIFT 0x5
+#define SQC_MISC_CONFIG__SQ_SPI_MSG_FGCG_OVERRIDE__SHIFT 0x6
+#define SQC_MISC_CONFIG__SPI_SQ_EXPALLOC_FGCG_OVERRIDE__SHIFT 0x7
+#define SQC_MISC_CONFIG__SQC_SQ_DATA_RET_FGCG_OVERRIDE__SHIFT 0x8
+#define SQC_MISC_CONFIG__SQC_SQ_INST_RET_FGCG_OVERRIDE__SHIFT 0x9
+#define SQC_MISC_CONFIG__SQC_GCR_RSP_FGCG_OVERRIDE__SHIFT 0xa
+#define SQC_MISC_CONFIG__ICLK_MGCG_DISABLE__SHIFT 0xb
+#define SQC_MISC_CONFIG__ICLK_BANK_MGCG_DISABLE__SHIFT 0xc
+#define SQC_MISC_CONFIG__DCLK_MGCG_DISABLE__SHIFT 0xd
+#define SQC_MISC_CONFIG__GCLK_MGCG_DISABLE__SHIFT 0xe
+#define SQC_MISC_CONFIG__MCLK_MGCG_DISABLE__SHIFT 0xf
+#define SQC_MISC_CONFIG__PCLK_MGCG_DISABLE__SHIFT 0x10
+#define SQC_MISC_CONFIG__BCLK_MGCG_DISABLE__SHIFT 0x11
+#define SQC_MISC_CONFIG__SQC_TA_RESET_FGCG_OVERRIDE__SHIFT 0x12
+#define SQC_MISC_CONFIG__SQC_LDS_CONFIG_FGCG_OVERRIDE__SHIFT 0x13
+#define SQC_MISC_CONFIG__DCLK_BANK_MGCG_DISABLE__SHIFT 0x14
+#define SQC_MISC_CONFIG__SQC_SQ_BARRIER_DONE_FGCG_OVERRIDE__SHIFT 0x15
+#define SQC_MISC_CONFIG__SQC_SQ_MSGDONE_FGCG_OVERRIDE__SHIFT 0x16
+#define SQC_MISC_CONFIG__CMCLK_MGCG_DISABLE__SHIFT 0x17
+#define SQC_MISC_CONFIG__SQC_GL1_CLKEN_OVERRIDE__SHIFT 0x18
+#define SQC_MISC_CONFIG__SQC_CORE_OVERRIDE__SHIFT 0x19
+#define SQC_MISC_CONFIG__ICLK_HMF_BS_MGCG_DISABLE__SHIFT 0x1a
+#define SQC_MISC_CONFIG__ICLK_CC_MGCG_DISABLE__SHIFT 0x1b
+#define SQC_MISC_CONFIG__DCLK_HMF_BS_MGCG_DISABLE__SHIFT 0x1c
+#define SQC_MISC_CONFIG__DCLK_CC_MGCG_DISABLE__SHIFT 0x1d
+#define SQC_MISC_CONFIG__UNUSED_MASK 0x0000001FL
+#define SQC_MISC_CONFIG__SQC_SPI_TTRACE_FGCG_OVERRIDE_MASK 0x00000020L
+#define SQC_MISC_CONFIG__SQ_SPI_MSG_FGCG_OVERRIDE_MASK 0x00000040L
+#define SQC_MISC_CONFIG__SPI_SQ_EXPALLOC_FGCG_OVERRIDE_MASK 0x00000080L
+#define SQC_MISC_CONFIG__SQC_SQ_DATA_RET_FGCG_OVERRIDE_MASK 0x00000100L
+#define SQC_MISC_CONFIG__SQC_SQ_INST_RET_FGCG_OVERRIDE_MASK 0x00000200L
+#define SQC_MISC_CONFIG__SQC_GCR_RSP_FGCG_OVERRIDE_MASK 0x00000400L
+#define SQC_MISC_CONFIG__ICLK_MGCG_DISABLE_MASK 0x00000800L
+#define SQC_MISC_CONFIG__ICLK_BANK_MGCG_DISABLE_MASK 0x00001000L
+#define SQC_MISC_CONFIG__DCLK_MGCG_DISABLE_MASK 0x00002000L
+#define SQC_MISC_CONFIG__GCLK_MGCG_DISABLE_MASK 0x00004000L
+#define SQC_MISC_CONFIG__MCLK_MGCG_DISABLE_MASK 0x00008000L
+#define SQC_MISC_CONFIG__PCLK_MGCG_DISABLE_MASK 0x00010000L
+#define SQC_MISC_CONFIG__BCLK_MGCG_DISABLE_MASK 0x00020000L
+#define SQC_MISC_CONFIG__SQC_TA_RESET_FGCG_OVERRIDE_MASK 0x00040000L
+#define SQC_MISC_CONFIG__SQC_LDS_CONFIG_FGCG_OVERRIDE_MASK 0x00080000L
+#define SQC_MISC_CONFIG__DCLK_BANK_MGCG_DISABLE_MASK 0x00100000L
+#define SQC_MISC_CONFIG__SQC_SQ_BARRIER_DONE_FGCG_OVERRIDE_MASK 0x00200000L
+#define SQC_MISC_CONFIG__SQC_SQ_MSGDONE_FGCG_OVERRIDE_MASK 0x00400000L
+#define SQC_MISC_CONFIG__CMCLK_MGCG_DISABLE_MASK 0x00800000L
+#define SQC_MISC_CONFIG__SQC_GL1_CLKEN_OVERRIDE_MASK 0x01000000L
+#define SQC_MISC_CONFIG__SQC_CORE_OVERRIDE_MASK 0x02000000L
+#define SQC_MISC_CONFIG__ICLK_HMF_BS_MGCG_DISABLE_MASK 0x04000000L
+#define SQC_MISC_CONFIG__ICLK_CC_MGCG_DISABLE_MASK 0x08000000L
+#define SQC_MISC_CONFIG__DCLK_HMF_BS_MGCG_DISABLE_MASK 0x10000000L
+#define SQC_MISC_CONFIG__DCLK_CC_MGCG_DISABLE_MASK 0x20000000L
+
+
+// addressBlock: gc_shsdec
+//SX_DEBUG_BUSY
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ3__SHIFT 0x0
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ2__SHIFT 0x1
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ1__SHIFT 0x2
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ3__SHIFT 0x3
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ2__SHIFT 0x4
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ1__SHIFT 0x5
+#define SX_DEBUG_BUSY__PCCMD_VALID__SHIFT 0x6
+#define SX_DEBUG_BUSY__VDATA1_VALID__SHIFT 0x7
+#define SX_DEBUG_BUSY__VDATA0_VALID__SHIFT 0x8
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL__SHIFT 0x9
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL__SHIFT 0xa
+#define SX_DEBUG_BUSY__SX_SX_IN_VALID__SHIFT 0xb
+#define SX_DEBUG_BUSY__SX_SX_OUT_VALID__SHIFT 0xc
+#define SX_DEBUG_BUSY__RESERVED__SHIFT 0xd
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ3_MASK 0x00000001L
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ2_MASK 0x00000002L
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ1_MASK 0x00000004L
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ3_MASK 0x00000008L
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ2_MASK 0x00000010L
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ1_MASK 0x00000020L
+#define SX_DEBUG_BUSY__PCCMD_VALID_MASK 0x00000040L
+#define SX_DEBUG_BUSY__VDATA1_VALID_MASK 0x00000080L
+#define SX_DEBUG_BUSY__VDATA0_VALID_MASK 0x00000100L
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL_MASK 0x00000200L
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL_MASK 0x00000400L
+#define SX_DEBUG_BUSY__SX_SX_IN_VALID_MASK 0x00000800L
+#define SX_DEBUG_BUSY__SX_SX_OUT_VALID_MASK 0x00001000L
+#define SX_DEBUG_BUSY__RESERVED_MASK 0xFFFFE000L
+//SX_DEBUG_BUSY_2
+#define SX_DEBUG_BUSY_2__COL_SCBD0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0__SHIFT 0x1
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE__SHIFT 0x2
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0__SHIFT 0x4
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE__SHIFT 0x5
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0__SHIFT 0x7
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE__SHIFT 0x8
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0__SHIFT 0xa
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE__SHIFT 0xb
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_2__COL_DBIF3_QUAD_FREE__SHIFT 0xf
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_2__COL_DBIF2_QUAD_FREE__SHIFT 0x12
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_2__COL_DBIF1_QUAD_FREE__SHIFT 0x15
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_2__COL_DBIF0_QUAD_FREE__SHIFT 0x18
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_2__COL_SCBD0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0_MASK 0x00000002L
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE_MASK 0x00000004L
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0_MASK 0x00000010L
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE_MASK 0x00000020L
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0_MASK 0x00000080L
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE_MASK 0x00000100L
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0_MASK 0x00000400L
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE_MASK 0x00000800L
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_QUAD_FREE_MASK 0x00008000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_QUAD_FREE_MASK 0x00040000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_QUAD_FREE_MASK 0x00200000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_QUAD_FREE_MASK 0x01000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_3
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_4
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_1
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT__SHIFT 0x0
+#define SX_DEBUG_1__ENABLE_FIFO_DEBUG_WRITE__SHIFT 0x7
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x8
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x9
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0xa
+#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT__SHIFT 0xb
+#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT__SHIFT 0xc
+#define SX_DEBUG_1__DISABLE_REP_FGCG__SHIFT 0xd
+#define SX_DEBUG_1__ENABLE_SAME_PC_GDS_CGTS__SHIFT 0xe
+#define SX_DEBUG_1__DISABLE_RAM_FGCG__SHIFT 0xf
+#define SX_DEBUG_1__PC_DISABLE_SAME_ADDR_OPT__SHIFT 0x10
+#define SX_DEBUG_1__DISABLE_COL_VAL_READ_OPT__SHIFT 0x11
+#define SX_DEBUG_1__DISABLE_BC_RB_PLUS__SHIFT 0x12
+#define SX_DEBUG_1__DISABLE_NATIVE_DOWNCVT_FMT_MAPPING__SHIFT 0x13
+#define SX_DEBUG_1__DISABLE_SCBD_READ_PWR_OPT__SHIFT 0x14
+#define SX_DEBUG_1__DISABLE_GDS_CGTS_OPT__SHIFT 0x15
+#define SX_DEBUG_1__DISABLE_DOWNCVT_PWR_OPT__SHIFT 0x16
+#define SX_DEBUG_1__DISABLE_POS_BUFF_REUSE_OPT__SHIFT 0x17
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT_MASK 0x0000007FL
+#define SX_DEBUG_1__ENABLE_FIFO_DEBUG_WRITE_MASK 0x00000080L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x00000100L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS_MASK 0x00000200L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x00000400L
+#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT_MASK 0x00000800L
+#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT_MASK 0x00001000L
+#define SX_DEBUG_1__DISABLE_REP_FGCG_MASK 0x00002000L
+#define SX_DEBUG_1__ENABLE_SAME_PC_GDS_CGTS_MASK 0x00004000L
+#define SX_DEBUG_1__DISABLE_RAM_FGCG_MASK 0x00008000L
+#define SX_DEBUG_1__PC_DISABLE_SAME_ADDR_OPT_MASK 0x00010000L
+#define SX_DEBUG_1__DISABLE_COL_VAL_READ_OPT_MASK 0x00020000L
+#define SX_DEBUG_1__DISABLE_BC_RB_PLUS_MASK 0x00040000L
+#define SX_DEBUG_1__DISABLE_NATIVE_DOWNCVT_FMT_MAPPING_MASK 0x00080000L
+#define SX_DEBUG_1__DISABLE_SCBD_READ_PWR_OPT_MASK 0x00100000L
+#define SX_DEBUG_1__DISABLE_GDS_CGTS_OPT_MASK 0x00200000L
+#define SX_DEBUG_1__DISABLE_DOWNCVT_PWR_OPT_MASK 0x00400000L
+#define SX_DEBUG_1__DISABLE_POS_BUFF_REUSE_OPT_MASK 0x00800000L
+//SX_DEBUG_BUSY_5
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK6_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK6_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_6
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK6_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_6__COL_REQ3_CREDIT_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_6__COL_REQ3_FLOP_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_6__COL_REQ2_CREDIT_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_6__COL_REQ2_FLOP_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_6__COL_REQ1_CREDIT_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_6__COL_REQ1_FLOP_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_6__COL_REQ0_CREDIT_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK6_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_6__COL_REQ3_CREDIT_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_6__COL_REQ3_FLOP_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_6__COL_REQ2_CREDIT_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_6__COL_REQ2_FLOP_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_6__COL_REQ1_CREDIT_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_6__COL_REQ1_FLOP_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_6__COL_REQ0_CREDIT_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_7
+#define SX_DEBUG_BUSY_7__COL_REQ0_FLOP_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_7__COL_SCBD1_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1__SHIFT 0x2
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1_ADJ__SHIFT 0x3
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ2__SHIFT 0x4
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ3__SHIFT 0x5
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ4__SHIFT 0x6
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ5__SHIFT 0x7
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALID_OUT__SHIFT 0x8
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1__SHIFT 0x9
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1_ADJ__SHIFT 0xa
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ2__SHIFT 0xb
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ3__SHIFT 0xc
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ4__SHIFT 0xd
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ5__SHIFT 0xe
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALID_OUT__SHIFT 0xf
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1__SHIFT 0x10
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1_ADJ__SHIFT 0x11
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ2__SHIFT 0x12
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ3__SHIFT 0x13
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ4__SHIFT 0x14
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ5__SHIFT 0x15
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALID_OUT__SHIFT 0x16
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1__SHIFT 0x17
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1_ADJ__SHIFT 0x18
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ2__SHIFT 0x19
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ3__SHIFT 0x1a
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ4__SHIFT 0x1b
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ5__SHIFT 0x1c
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALID_OUT__SHIFT 0x1d
+#define SX_DEBUG_BUSY_7__RESERVED__SHIFT 0x1e
+#define SX_DEBUG_BUSY_7__COL_REQ0_FLOP_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_7__COL_SCBD1_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1_MASK 0x00000004L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1_ADJ_MASK 0x00000008L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ2_MASK 0x00000010L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ3_MASK 0x00000020L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ4_MASK 0x00000040L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ5_MASK 0x00000080L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALID_OUT_MASK 0x00000100L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1_MASK 0x00000200L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1_ADJ_MASK 0x00000400L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ2_MASK 0x00000800L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ3_MASK 0x00001000L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ4_MASK 0x00002000L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ5_MASK 0x00004000L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALID_OUT_MASK 0x00008000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1_MASK 0x00010000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1_ADJ_MASK 0x00020000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ2_MASK 0x00040000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ3_MASK 0x00080000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ4_MASK 0x00100000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ5_MASK 0x00200000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALID_OUT_MASK 0x00400000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1_MASK 0x00800000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1_ADJ_MASK 0x01000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ2_MASK 0x02000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ3_MASK 0x04000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ4_MASK 0x08000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ5_MASK 0x10000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALID_OUT_MASK 0x20000000L
+#define SX_DEBUG_BUSY_7__RESERVED_MASK 0xC0000000L
+//SX_DEBUG_BUSY_8
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL3_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL2_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL1_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL0_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL3_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL2_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL1_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL0_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL3_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL2_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL1_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL0_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL3_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL2_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL1_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL0_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL3_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL2_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL1_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL0_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL3_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL2_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL1_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL0_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL3_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL2_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL1_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL0_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL3_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL2_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL1_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL0_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL3_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL2_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL1_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL0_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL3_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL2_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL1_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL0_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL3_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL2_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL1_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL0_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL3_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL2_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL1_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL0_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL3_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL2_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL1_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL0_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL3_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL2_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL1_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL0_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL3_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL2_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL1_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL0_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL3_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL2_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL1_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL0_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_9
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL3_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL2_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL1_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL0_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL3_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL2_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL1_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL0_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL3_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL2_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL1_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL0_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL3_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL2_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL1_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL0_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL3_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL2_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL1_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL0_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL3_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL2_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL1_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL0_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL3_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL2_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL1_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL0_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL3_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL2_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL1_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL0_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL3_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL2_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL1_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL0_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL3_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL2_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL1_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL0_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL3_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL2_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL1_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL0_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL3_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL2_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL1_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL0_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL3_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL2_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL1_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL0_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL3_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL2_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL1_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL0_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL3_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL2_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL1_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL0_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL3_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL2_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL1_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL0_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_10
+#define SX_DEBUG_BUSY_10__POS_SCBD_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_10__POS_FREE_OR_VALIDS__SHIFT 0x1
+#define SX_DEBUG_BUSY_10__POS_REQUESTER_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_10__PA_SX_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ3__SHIFT 0x4
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ2__SHIFT 0x5
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ1__SHIFT 0x6
+#define SX_DEBUG_BUSY_10__IDX_SCBD_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_10__IDX_FREE_OR_VALIDS__SHIFT 0x8
+#define SX_DEBUG_BUSY_10__IDX_REQUESTER_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_10__PA_SX_IDX_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ3__SHIFT 0xb
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ2__SHIFT 0xc
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ1__SHIFT 0xd
+#define SX_DEBUG_BUSY_10__RESERVED__SHIFT 0xe
+#define SX_DEBUG_BUSY_10__POS_SCBD_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_10__POS_FREE_OR_VALIDS_MASK 0x00000002L
+#define SX_DEBUG_BUSY_10__POS_REQUESTER_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_10__PA_SX_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ3_MASK 0x00000010L
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ2_MASK 0x00000020L
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ1_MASK 0x00000040L
+#define SX_DEBUG_BUSY_10__IDX_SCBD_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_10__IDX_FREE_OR_VALIDS_MASK 0x00000100L
+#define SX_DEBUG_BUSY_10__IDX_REQUESTER_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_10__PA_SX_IDX_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ3_MASK 0x00000800L
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ2_MASK 0x00001000L
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ1_MASK 0x00002000L
+#define SX_DEBUG_BUSY_10__RESERVED_MASK 0xFFFFC000L
+//SPI_PS_MAX_WAVE_ID
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID__SHIFT 0x10
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID_MASK 0x03FF0000L
+//SPI_GFX_CNTL
+#define SPI_GFX_CNTL__RESET_COUNTS__SHIFT 0x0
+#define SPI_GFX_CNTL__RESET_COUNTS_MASK 0x00000001L
+//SPI_DEBUG_READ
+#define SPI_DEBUG_READ__DATA__SHIFT 0x0
+#define SPI_DEBUG_READ__DATA_MASK 0xFFFFFFFFL
+//SPI_DSM_CNTL
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+//SPI_DSM_CNTL2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY__SHIFT 0x3
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY_MASK 0x000001F8L
+//SPI_EDC_CNT
+#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT__SHIFT 0x0
+#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT_MASK 0x00000003L
+//SPI_DEBUG_BUSY
+#define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x0
+#define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x1
+#define SPI_DEBUG_BUSY__PS0_BUSY__SHIFT 0x2
+#define SPI_DEBUG_BUSY__PS1_BUSY__SHIFT 0x3
+#define SPI_DEBUG_BUSY__PS2_BUSY__SHIFT 0x4
+#define SPI_DEBUG_BUSY__PS3_BUSY__SHIFT 0x5
+#define SPI_DEBUG_BUSY__CSG0_BUSY__SHIFT 0x6
+#define SPI_DEBUG_BUSY__CSG1_BUSY__SHIFT 0x7
+#define SPI_DEBUG_BUSY__CS0_BUSY__SHIFT 0x8
+#define SPI_DEBUG_BUSY__CS1_BUSY__SHIFT 0x9
+#define SPI_DEBUG_BUSY__CS2_BUSY__SHIFT 0xa
+#define SPI_DEBUG_BUSY__CS3_BUSY__SHIFT 0xb
+#define SPI_DEBUG_BUSY__CS4_BUSY__SHIFT 0xc
+#define SPI_DEBUG_BUSY__CS5_BUSY__SHIFT 0xd
+#define SPI_DEBUG_BUSY__CS6_BUSY__SHIFT 0xe
+#define SPI_DEBUG_BUSY__CS7_BUSY__SHIFT 0xf
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY__SHIFT 0x10
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY__SHIFT 0x11
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY__SHIFT 0x12
+#define SPI_DEBUG_BUSY__OFC_LDS_BUSY__SHIFT 0x13
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY__SHIFT 0x14
+#define SPI_DEBUG_BUSY__GRBM_BUSY__SHIFT 0x15
+#define SPI_DEBUG_BUSY__SPIS_BUSY__SHIFT 0x16
+#define SPI_DEBUG_BUSY__RSRC_ALLOC_BUSY__SHIFT 0x17
+#define SPI_DEBUG_BUSY__PWS_BUSY__SHIFT 0x18
+#define SPI_DEBUG_BUSY__HS_BUSY_MASK 0x00000001L
+#define SPI_DEBUG_BUSY__GS_BUSY_MASK 0x00000002L
+#define SPI_DEBUG_BUSY__PS0_BUSY_MASK 0x00000004L
+#define SPI_DEBUG_BUSY__PS1_BUSY_MASK 0x00000008L
+#define SPI_DEBUG_BUSY__PS2_BUSY_MASK 0x00000010L
+#define SPI_DEBUG_BUSY__PS3_BUSY_MASK 0x00000020L
+#define SPI_DEBUG_BUSY__CSG0_BUSY_MASK 0x00000040L
+#define SPI_DEBUG_BUSY__CSG1_BUSY_MASK 0x00000080L
+#define SPI_DEBUG_BUSY__CS0_BUSY_MASK 0x00000100L
+#define SPI_DEBUG_BUSY__CS1_BUSY_MASK 0x00000200L
+#define SPI_DEBUG_BUSY__CS2_BUSY_MASK 0x00000400L
+#define SPI_DEBUG_BUSY__CS3_BUSY_MASK 0x00000800L
+#define SPI_DEBUG_BUSY__CS4_BUSY_MASK 0x00001000L
+#define SPI_DEBUG_BUSY__CS5_BUSY_MASK 0x00002000L
+#define SPI_DEBUG_BUSY__CS6_BUSY_MASK 0x00004000L
+#define SPI_DEBUG_BUSY__CS7_BUSY_MASK 0x00008000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY_MASK 0x00010000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY_MASK 0x00020000L
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY_MASK 0x00040000L
+#define SPI_DEBUG_BUSY__OFC_LDS_BUSY_MASK 0x00080000L
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY_MASK 0x00100000L
+#define SPI_DEBUG_BUSY__GRBM_BUSY_MASK 0x00200000L
+#define SPI_DEBUG_BUSY__SPIS_BUSY_MASK 0x00400000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC_BUSY_MASK 0x00800000L
+#define SPI_DEBUG_BUSY__PWS_BUSY_MASK 0x01000000L
+//SPI_CONFIG_PS_CU_EN
+#define SPI_CONFIG_PS_CU_EN__PKR_OFFSET__SHIFT 0x0
+#define SPI_CONFIG_PS_CU_EN__PKR2_OFFSET__SHIFT 0x4
+#define SPI_CONFIG_PS_CU_EN__PKR3_OFFSET__SHIFT 0x8
+#define SPI_CONFIG_PS_CU_EN__PKR_OFFSET_MASK 0x0000000FL
+#define SPI_CONFIG_PS_CU_EN__PKR2_OFFSET_MASK 0x000000F0L
+#define SPI_CONFIG_PS_CU_EN__PKR3_OFFSET_MASK 0x00000F00L
+//SPI_WF_LIFETIME_CNTL
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD__SHIFT 0x0
+#define SPI_WF_LIFETIME_CNTL__EN__SHIFT 0x4
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD_MASK 0x0000000FL
+#define SPI_WF_LIFETIME_CNTL__EN_MASK 0x00000010L
+//SPI_WF_LIFETIME_LIMIT_0
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_1
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_2
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_3
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_4
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_5
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_0
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_2
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_4
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_6
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_7
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_9
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_11
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_13
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_14
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_15
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_16
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_17
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_18
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_19
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_20
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_DEBUG
+#define SPI_WF_LIFETIME_DEBUG__START_VALUE__SHIFT 0x0
+#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_DEBUG__START_VALUE_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_21
+#define SPI_WF_LIFETIME_STATUS_21__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_21__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_21__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_21__INT_SENT_MASK 0x80000000L
+//SPI_LB_CTR_CTRL
+#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x0
+#define SPI_LB_CTR_CTRL__WAVES_SELECT__SHIFT 0x1
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ__SHIFT 0x3
+#define SPI_LB_CTR_CTRL__RESET_COUNTS__SHIFT 0x4
+#define SPI_LB_CTR_CTRL__LOAD_MASK 0x00000001L
+#define SPI_LB_CTR_CTRL__WAVES_SELECT_MASK 0x00000006L
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ_MASK 0x00000008L
+#define SPI_LB_CTR_CTRL__RESET_COUNTS_MASK 0x00000010L
+//SPI_LB_WGP_MASK
+#define SPI_LB_WGP_MASK__WGP_MASK__SHIFT 0x0
+#define SPI_LB_WGP_MASK__WGP_MASK_MASK 0xFFFFL
+//SPI_LB_DATA_REG
+#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x0
+#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xFFFFFFFFL
+//SPI_PG_ENABLE_STATIC_WGP_MASK
+#define SPI_PG_ENABLE_STATIC_WGP_MASK__WGP_MASK__SHIFT 0x0
+#define SPI_PG_ENABLE_STATIC_WGP_MASK__WGP_MASK_MASK 0xFFFFL
+//SPI_GDS_CREDITS
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x0
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x8
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0x000000FFL
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0x0000FF00L
+//SPI_SX_EXPORT_BUFFER_SIZES
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x0
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x10
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xFFFF0000L
+//SPI_SX_SCOREBOARD_BUFFER_SIZES
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x0
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x10
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xFFFF0000L
+//SPI_CSQ_WF_ACTIVE_STATUS
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE_MASK 0xFFFFFFFFL
+//SPI_CSQ_WF_ACTIVE_COUNT_0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_1
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_2
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_3
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS_MASK 0x07FF0000L
+//SPI_LB_DATA_WAVES
+#define SPI_LB_DATA_WAVES__COUNT0__SHIFT 0x0
+#define SPI_LB_DATA_WAVES__COUNT1__SHIFT 0x10
+#define SPI_LB_DATA_WAVES__COUNT0_MASK 0x0000FFFFL
+#define SPI_LB_DATA_WAVES__COUNT1_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERWGP_WAVE_HSGS
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_HS__SHIFT 0x0
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_GS__SHIFT 0x10
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_HS_MASK 0x0000FFFFL
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_GS_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERWGP_WAVE_CS
+#define SPI_LB_DATA_PERWGP_WAVE_CS__ACTIVE__SHIFT 0x0
+#define SPI_LB_DATA_PERWGP_WAVE_CS__ACTIVE_MASK 0xFFFFL
+//SPIS_DEBUG_READ
+#define SPIS_DEBUG_READ__DATA__SHIFT 0x0
+#define SPIS_DEBUG_READ__DATA_MASK 0xFFFFFFFFL
+//BCI_DEBUG_READ
+#define BCI_DEBUG_READ__DATA__SHIFT 0x0
+#define BCI_DEBUG_READ__DATA_MASK 0xFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_LO
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_HI
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_PSMA_LO
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSMA_HI
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_GPR_MIN
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+//SPI_P1_TRAP_SCREEN_PSBA_LO
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSBA_HI
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_PSMA_LO
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSMA_HI
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_GPR_MIN
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+
+
+// addressBlock: gc_tpdec
+//TD_CNTL
+#define TD_CNTL__DISABLE_MEDIAN_CALC_FOR_CUBECORNER_PHANTOM_TEXELS__SHIFT 0x0
+#define TD_CNTL__FORCE_RESIDENCY_MAP_TO_BE_MAX_FILTER__SHIFT 0x2
+#define TD_CNTL__FORCE_RESIDENCY_MAP_CC_MAX_OF_ALL_SAMPLES__SHIFT 0x7
+#define TD_CNTL__PRESERVE_VGPR_ON_UTC_ERROR__SHIFT 0xd
+#define TD_CNTL__GATHER4_FLOAT_MODE__SHIFT 0x10
+#define TD_CNTL__FORCE_RT_BVH4_ARBITER_TO_PING_PONG__SHIFT 0x11
+#define TD_CNTL__GATHER4_DX9_MODE__SHIFT 0x13
+#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x14
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO__SHIFT 0x15
+#define TD_CNTL__DISABLE_ROUND_TO_ZERO_FOR_LARGE_FLOAT_TO_SMALL_FLOAT__SHIFT 0x16
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT__SHIFT 0x17
+#define TD_CNTL__ARBITER_ROUND_ROBIN__SHIFT 0x18
+#define TD_CNTL__ARBITER_OLDEST_PRIORITY__SHIFT 0x19
+#define TD_CNTL__DONE_SCOREBOARD_DEPTH__SHIFT 0x1a
+#define TD_CNTL__DISABLE_MEDIAN_CALC_FOR_CUBECORNER_PHANTOM_TEXELS_MASK 0x00000001L
+#define TD_CNTL__FORCE_RESIDENCY_MAP_TO_BE_MAX_FILTER_MASK 0x00000004L
+#define TD_CNTL__FORCE_RESIDENCY_MAP_CC_MAX_OF_ALL_SAMPLES_MASK 0x00000080L
+#define TD_CNTL__PRESERVE_VGPR_ON_UTC_ERROR_MASK 0x00002000L
+#define TD_CNTL__GATHER4_FLOAT_MODE_MASK 0x00010000L
+#define TD_CNTL__FORCE_RT_BVH4_ARBITER_TO_PING_PONG_MASK 0x00020000L
+#define TD_CNTL__GATHER4_DX9_MODE_MASK 0x00080000L
+#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x00100000L
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO_MASK 0x00200000L
+#define TD_CNTL__DISABLE_ROUND_TO_ZERO_FOR_LARGE_FLOAT_TO_SMALL_FLOAT_MASK 0x00400000L
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT_MASK 0x00800000L
+#define TD_CNTL__ARBITER_ROUND_ROBIN_MASK 0x01000000L
+#define TD_CNTL__ARBITER_OLDEST_PRIORITY_MASK 0x02000000L
+#define TD_CNTL__DONE_SCOREBOARD_DEPTH_MASK 0xFC000000L
+//TD_STATUS
+#define TD_STATUS__BUSY__SHIFT 0x1f
+#define TD_STATUS__BUSY_MASK 0x80000000L
+//TD_POWER_CNTL
+#define TD_POWER_CNTL__DISABLE_NOFILTER_FORMATTER_POWER_OPT__SHIFT 0x6
+#define TD_POWER_CNTL__FORCE_NOFILTER_D16_FORMATTERS_ON__SHIFT 0x7
+#define TD_POWER_CNTL__ENABLE_DEBUG_REG__SHIFT 0x8
+#define TD_POWER_CNTL__DISABLE_NOFILTER_FORMATTER_POWER_OPT_MASK 0x00000040L
+#define TD_POWER_CNTL__FORCE_NOFILTER_D16_FORMATTERS_ON_MASK 0x00000080L
+#define TD_POWER_CNTL__ENABLE_DEBUG_REG_MASK 0x00000100L
+//TD_CNTL2
+#define TD_CNTL2__LDS_RETURN_FIFO_CREDIT__SHIFT 0x0
+#define TD_CNTL2__MULTI_CYCLE_16FP__SHIFT 0x3
+#define TD_CNTL2__LDS_RETURN_FIFO_CREDIT_MASK 0x00000007L
+#define TD_CNTL2__MULTI_CYCLE_16FP_MASK 0x00000008L
+//TD_DSM_CNTL
+//TD_DSM_CNTL2
+//TD_SCRATCH
+#define TD_SCRATCH__SCRATCH__SHIFT 0x0
+#define TD_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+//TA_CNTL
+#define TA_CNTL__TA_SQ_XNACK_FGCG_DISABLE__SHIFT 0x0
+#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x10
+#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x16
+#define TA_CNTL__TA_SQ_XNACK_FGCG_DISABLE_MASK 0x00000001L
+#define TA_CNTL__ALIGNER_CREDIT_MASK 0x001F0000L
+#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xFFC00000L
+//TA_CNTL_AUX
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N__SHIFT 0x0
+#define TA_CNTL_AUX__DEPTH_AS_PITCH_DIS__SHIFT 0x1
+#define TA_CNTL_AUX__CORNER_SAMPLES_MIN_DIM__SHIFT 0x2
+#define TA_CNTL_AUX__OVERRIDE_QUAD_MODE_DIS__SHIFT 0x3
+#define TA_CNTL_AUX__DERIV_ADJUST_DIS__SHIFT 0x4
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE__SHIFT 0x5
+#define TA_CNTL_AUX__GATHERH_DST_SEL__SHIFT 0x6
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE__SHIFT 0x7
+#define TA_CNTL_AUX__ANISO_MAG_STEP_CLAMP__SHIFT 0x8
+#define TA_CNTL_AUX__AUTO_ALIGN_FORMAT__SHIFT 0x9
+#define TA_CNTL_AUX__ANISO_HALF_THRESH__SHIFT 0xa
+#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS__SHIFT 0xc
+#define TA_CNTL_AUX__ANISO_STEP_ORDER__SHIFT 0xd
+#define TA_CNTL_AUX__ANISO_STEP__SHIFT 0xe
+#define TA_CNTL_AUX__MINMAG_UNNORM__SHIFT 0xf
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE__SHIFT 0x10
+#define TA_CNTL_AUX__ANISO_RATIO_LUT__SHIFT 0x11
+#define TA_CNTL_AUX__ANISO_TAP__SHIFT 0x12
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE__SHIFT 0x14
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE__SHIFT 0x15
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE__SHIFT 0x16
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE__SHIFT 0x17
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE__SHIFT 0x18
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE__SHIFT 0x19
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE__SHIFT 0x1a
+#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP__SHIFT 0x1c
+#define TA_CNTL_AUX__TRUNC_SMALL_NEG__SHIFT 0x1d
+#define TA_CNTL_AUX__ARRAY_ROUND_MODE__SHIFT 0x1e
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N_MASK 0x00000001L
+#define TA_CNTL_AUX__DEPTH_AS_PITCH_DIS_MASK 0x00000002L
+#define TA_CNTL_AUX__CORNER_SAMPLES_MIN_DIM_MASK 0x00000004L
+#define TA_CNTL_AUX__OVERRIDE_QUAD_MODE_DIS_MASK 0x00000008L
+#define TA_CNTL_AUX__DERIV_ADJUST_DIS_MASK 0x00000010L
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE_MASK 0x00000020L
+#define TA_CNTL_AUX__GATHERH_DST_SEL_MASK 0x00000040L
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE_MASK 0x00000080L
+#define TA_CNTL_AUX__ANISO_MAG_STEP_CLAMP_MASK 0x00000100L
+#define TA_CNTL_AUX__AUTO_ALIGN_FORMAT_MASK 0x00000200L
+#define TA_CNTL_AUX__ANISO_HALF_THRESH_MASK 0x00000C00L
+#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS_MASK 0x00001000L
+#define TA_CNTL_AUX__ANISO_STEP_ORDER_MASK 0x00002000L
+#define TA_CNTL_AUX__ANISO_STEP_MASK 0x00004000L
+#define TA_CNTL_AUX__MINMAG_UNNORM_MASK 0x00008000L
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE_MASK 0x00010000L
+#define TA_CNTL_AUX__ANISO_RATIO_LUT_MASK 0x00020000L
+#define TA_CNTL_AUX__ANISO_TAP_MASK 0x00040000L
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE_MASK 0x00100000L
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE_MASK 0x00200000L
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE_MASK 0x00400000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE_MASK 0x00800000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE_MASK 0x01000000L
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE_MASK 0x02000000L
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE_MASK 0x04000000L
+#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP_MASK 0x10000000L
+#define TA_CNTL_AUX__TRUNC_SMALL_NEG_MASK 0x20000000L
+#define TA_CNTL_AUX__ARRAY_ROUND_MODE_MASK 0xC0000000L
+//TA_CNTL2
+#define TA_CNTL2__POINT_SAMPLE_ACCEL_DIS__SHIFT 0x10
+#define TA_CNTL2__ELEMSIZE_HASH_DIS__SHIFT 0x11
+#define TA_CNTL2__TRUNCATE_COORD_MODE__SHIFT 0x12
+#define TA_CNTL2__ELIMINATE_UNLIT_QUAD_DIS__SHIFT 0x13
+#define TA_CNTL2__POINT_SAMPLE_ACCEL_DIS_MASK 0x00010000L
+#define TA_CNTL2__ELEMSIZE_HASH_DIS_MASK 0x00020000L
+#define TA_CNTL2__TRUNCATE_COORD_MODE_MASK 0x00040000L
+#define TA_CNTL2__ELIMINATE_UNLIT_QUAD_DIS_MASK 0x00080000L
+//TA_STATUS
+#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
+#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
+#define TA_STATUS__FG_SFIFO_EMPTYB__SHIFT 0xe
+#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x10
+#define TA_STATUS__FL_LFIFO_EMPTYB__SHIFT 0x11
+#define TA_STATUS__FL_SFIFO_EMPTYB__SHIFT 0x12
+#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x14
+#define TA_STATUS__FA_LFIFO_EMPTYB__SHIFT 0x15
+#define TA_STATUS__FA_SFIFO_EMPTYB__SHIFT 0x16
+#define TA_STATUS__IN_BUSY__SHIFT 0x18
+#define TA_STATUS__FG_BUSY__SHIFT 0x19
+#define TA_STATUS__LA_BUSY__SHIFT 0x1a
+#define TA_STATUS__FL_BUSY__SHIFT 0x1b
+#define TA_STATUS__TA_BUSY__SHIFT 0x1c
+#define TA_STATUS__FA_BUSY__SHIFT 0x1d
+#define TA_STATUS__AL_BUSY__SHIFT 0x1e
+#define TA_STATUS__BUSY__SHIFT 0x1f
+#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x00001000L
+#define TA_STATUS__FG_LFIFO_EMPTYB_MASK 0x00002000L
+#define TA_STATUS__FG_SFIFO_EMPTYB_MASK 0x00004000L
+#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x00010000L
+#define TA_STATUS__FL_LFIFO_EMPTYB_MASK 0x00020000L
+#define TA_STATUS__FL_SFIFO_EMPTYB_MASK 0x00040000L
+#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x00100000L
+#define TA_STATUS__FA_LFIFO_EMPTYB_MASK 0x00200000L
+#define TA_STATUS__FA_SFIFO_EMPTYB_MASK 0x00400000L
+#define TA_STATUS__IN_BUSY_MASK 0x01000000L
+#define TA_STATUS__FG_BUSY_MASK 0x02000000L
+#define TA_STATUS__LA_BUSY_MASK 0x04000000L
+#define TA_STATUS__FL_BUSY_MASK 0x08000000L
+#define TA_STATUS__TA_BUSY_MASK 0x10000000L
+#define TA_STATUS__FA_BUSY_MASK 0x20000000L
+#define TA_STATUS__AL_BUSY_MASK 0x40000000L
+#define TA_STATUS__BUSY_MASK 0x80000000L
+//TA_SCRATCH
+#define TA_SCRATCH__SCRATCH__SHIFT 0x0
+#define TA_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gdsdec
+//GDS_CONFIG
+#define GDS_CONFIG__WRITE_DIS__SHIFT 0x0
+#define GDS_CONFIG__UNUSED__SHIFT 0x1
+#define GDS_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define GDS_CONFIG__UNUSED_MASK 0xFFFFFFFEL
+//GDS_CNTL_STATUS
+#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x0
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x1
+#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x2
+#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x3
+#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x4
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY__SHIFT 0x5
+#define GDS_CNTL_STATUS__DS_BUSY__SHIFT 0x6
+#define GDS_CNTL_STATUS__GWS_BUSY__SHIFT 0x7
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY__SHIFT 0x8
+#define GDS_CNTL_STATUS__CREDIT_BUSY0__SHIFT 0x9
+#define GDS_CNTL_STATUS__CREDIT_BUSY1__SHIFT 0xa
+#define GDS_CNTL_STATUS__CREDIT_BUSY2__SHIFT 0xb
+#define GDS_CNTL_STATUS__CREDIT_BUSY3__SHIFT 0xc
+#define GDS_CNTL_STATUS__CREDIT_BUSY4__SHIFT 0xd
+#define GDS_CNTL_STATUS__CREDIT_BUSY5__SHIFT 0xe
+#define GDS_CNTL_STATUS__CREDIT_BUSY6__SHIFT 0xf
+#define GDS_CNTL_STATUS__CREDIT_BUSY7__SHIFT 0x10
+#define GDS_CNTL_STATUS__UNUSED__SHIFT 0x11
+#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x00000001L
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x00000002L
+#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x00000004L
+#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x00000008L
+#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x00000010L
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY_MASK 0x00000020L
+#define GDS_CNTL_STATUS__DS_BUSY_MASK 0x00000040L
+#define GDS_CNTL_STATUS__GWS_BUSY_MASK 0x00000080L
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY_MASK 0x00000100L
+#define GDS_CNTL_STATUS__CREDIT_BUSY0_MASK 0x00000200L
+#define GDS_CNTL_STATUS__CREDIT_BUSY1_MASK 0x00000400L
+#define GDS_CNTL_STATUS__CREDIT_BUSY2_MASK 0x00000800L
+#define GDS_CNTL_STATUS__CREDIT_BUSY3_MASK 0x00001000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY4_MASK 0x00002000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY5_MASK 0x00004000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY6_MASK 0x00008000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY7_MASK 0x00010000L
+#define GDS_CNTL_STATUS__UNUSED_MASK 0xFFFE0000L
+//GDS_ENHANCE
+#define GDS_ENHANCE__MISC__SHIFT 0x0
+#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x10
+#define GDS_ENHANCE__CGPG_RESTORE__SHIFT 0x11
+#define GDS_ENHANCE__UNUSED__SHIFT 0x12
+#define GDS_ENHANCE__MISC_MASK 0x0000FFFFL
+#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x00010000L
+#define GDS_ENHANCE__CGPG_RESTORE_MASK 0x00020000L
+#define GDS_ENHANCE__UNUSED_MASK 0xFFFC0000L
+//GDS_PROTECTION_FAULT
+#define GDS_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_PROTECTION_FAULT__GRBM__SHIFT 0x2
+#define GDS_PROTECTION_FAULT__SE_ID__SHIFT 0x3
+#define GDS_PROTECTION_FAULT__SA_ID__SHIFT 0x6
+#define GDS_PROTECTION_FAULT__WGP_ID__SHIFT 0x7
+#define GDS_PROTECTION_FAULT__SIMD_ID__SHIFT 0xb
+#define GDS_PROTECTION_FAULT__WAVE_ID__SHIFT 0xd
+#define GDS_PROTECTION_FAULT__ADDRESS__SHIFT 0x12
+#define GDS_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_PROTECTION_FAULT__GRBM_MASK 0x00000004L
+#define GDS_PROTECTION_FAULT__SE_ID_MASK 0x00000038L
+#define GDS_PROTECTION_FAULT__SA_ID_MASK 0x00000040L
+#define GDS_PROTECTION_FAULT__WGP_ID_MASK 0x00000780L
+#define GDS_PROTECTION_FAULT__SIMD_ID_MASK 0x00001800L
+#define GDS_PROTECTION_FAULT__WAVE_ID_MASK 0x0003E000L
+#define GDS_PROTECTION_FAULT__ADDRESS_MASK 0xFFFC0000L
+//GDS_VM_PROTECTION_FAULT
+#define GDS_VM_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_VM_PROTECTION_FAULT__GWS__SHIFT 0x2
+#define GDS_VM_PROTECTION_FAULT__OA__SHIFT 0x3
+#define GDS_VM_PROTECTION_FAULT__GRBM__SHIFT 0x4
+#define GDS_VM_PROTECTION_FAULT__TMZ__SHIFT 0x5
+#define GDS_VM_PROTECTION_FAULT__UNUSED1__SHIFT 0x6
+#define GDS_VM_PROTECTION_FAULT__VMID__SHIFT 0x8
+#define GDS_VM_PROTECTION_FAULT__UNUSED2__SHIFT 0xc
+#define GDS_VM_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
+#define GDS_VM_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_VM_PROTECTION_FAULT__GWS_MASK 0x00000004L
+#define GDS_VM_PROTECTION_FAULT__OA_MASK 0x00000008L
+#define GDS_VM_PROTECTION_FAULT__GRBM_MASK 0x00000010L
+#define GDS_VM_PROTECTION_FAULT__TMZ_MASK 0x00000020L
+#define GDS_VM_PROTECTION_FAULT__UNUSED1_MASK 0x000000C0L
+#define GDS_VM_PROTECTION_FAULT__VMID_MASK 0x00000F00L
+#define GDS_VM_PROTECTION_FAULT__UNUSED2_MASK 0x0000F000L
+#define GDS_VM_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
+//GDS_EDC_CNT
+#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0
+#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED__SHIFT 0x2
+#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_CNT__UNUSED__SHIFT 0x6
+#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L
+#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED_MASK 0x0000000CL
+#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L
+//GDS_EDC_GRBM_CNT
+#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0
+#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2
+#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4
+#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L
+#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL
+#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L
+//GDS_EDC_OA_DED
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1
+#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb
+#define GDS_EDC_OA_DED__ME0_PIPE1_CS_DED__SHIFT 0xc
+#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xd
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
+#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
+#define GDS_EDC_OA_DED__ME0_PIPE1_CS_DED_MASK 0x00001000L
+#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFE000L
+//GDS_DSM_CNTL
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0__SHIFT 0x0
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1__SHIFT 0x1
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0__SHIFT 0x3
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1__SHIFT 0x4
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0__SHIFT 0x6
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1__SHIFT 0x7
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0__SHIFT 0x9
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1__SHIFT 0xa
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0__SHIFT 0xc
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1__SHIFT 0xd
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GDS_DSM_CNTL__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0_MASK 0x00000001L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1_MASK 0x00000002L
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0_MASK 0x00000008L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1_MASK 0x00000010L
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0_MASK 0x00000040L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1_MASK 0x00000080L
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0_MASK 0x00000200L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1_MASK 0x00000400L
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0_MASK 0x00001000L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1_MASK 0x00002000L
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GDS_DSM_CNTL__UNUSED_MASK 0xFFFF8000L
+//GDS_EDC_OA_PHY_CNT
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED__SHIFT 0x8
+#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xa
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED_MASK 0x00000300L
+#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFFC00L
+//GDS_EDC_OA_PIPE_CNT
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe
+#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L
+#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L
+//GDS_DSM_CNTL2
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GDS_DSM_CNTL2__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY__SHIFT 0x1a
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GDS_DSM_CNTL2__UNUSED_MASK 0x03FF8000L
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY_MASK 0xFC000000L
+
+
+// addressBlock: gc_rbdec
+//DB_DEBUG
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x0
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x1
+#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x2
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x3
+#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x4
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x6
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x7
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x8
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0xa
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0xc
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0xe
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0xf
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x10
+#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x11
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x12
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x13
+#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x15
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x16
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x17
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x18
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x1c
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x1d
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x1e
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x1f
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x00000001L
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x00000002L
+#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x00000004L
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x00000008L
+#define DB_DEBUG__FORCE_Z_MODE_MASK 0x00000030L
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x00000040L
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x00000080L
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x00000300L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0x00000C00L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x00003000L
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x00004000L
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x00008000L
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x00010000L
+#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x00020000L
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x00040000L
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x00180000L
+#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x00200000L
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x00400000L
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x00800000L
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0x0F000000L
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000L
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000L
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000L
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000L
+//DB_DEBUG2
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x0
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x1
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x2
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x3
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x4
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL__SHIFT 0x5
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ__SHIFT 0x6
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL__SHIFT 0x7
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE__SHIFT 0x8
+#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x9
+#define DB_DEBUG2__FORCE_PERF_COUNTERS_ON__SHIFT 0xe
+#define DB_DEBUG2__FULL_TILE_CACHE_EVICT_ON_HALF_FULL__SHIFT 0xf
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES__SHIFT 0x10
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x11
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x12
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x13
+#define DB_DEBUG2__DISABLE_FULL_TILE_WAVE_BREAK__SHIFT 0x14
+#define DB_DEBUG2__ENABLE_FULL_TILE_WAVE_BREAK_FOR_ALL_TILES__SHIFT 0x15
+#define DB_DEBUG2__FORCE_ITERATE_256__SHIFT 0x18
+#define DB_DEBUG2__RESERVED1__SHIFT 0x1a
+#define DB_DEBUG2__DEBUG_BUS_FLOP_EN__SHIFT 0x1b
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x1c
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x1d
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x1e
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x1f
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x00000001L
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x00000002L
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x00000004L
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x00000008L
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x00000010L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_MASK 0x00000020L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ_MASK 0x00000040L
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL_MASK 0x00000080L
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE_MASK 0x00000100L
+#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x00003E00L
+#define DB_DEBUG2__FORCE_PERF_COUNTERS_ON_MASK 0x00004000L
+#define DB_DEBUG2__FULL_TILE_CACHE_EVICT_ON_HALF_FULL_MASK 0x00008000L
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES_MASK 0x00010000L
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x00020000L
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x00040000L
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x00080000L
+#define DB_DEBUG2__DISABLE_FULL_TILE_WAVE_BREAK_MASK 0x00100000L
+#define DB_DEBUG2__ENABLE_FULL_TILE_WAVE_BREAK_FOR_ALL_TILES_MASK 0x00200000L
+#define DB_DEBUG2__FORCE_ITERATE_256_MASK 0x03000000L
+#define DB_DEBUG2__RESERVED1_MASK 0x04000000L
+#define DB_DEBUG2__DEBUG_BUS_FLOP_EN_MASK 0x08000000L
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000L
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000L
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x40000000L
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000L
+//DB_DEBUG3
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION__SHIFT 0x0
+#define DB_DEBUG3__DISABLE_RELOAD_CONTEXT_DRAW_DATA__SHIFT 0x1
+#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x2
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x3
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x4
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x5
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x6
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x8
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0xa
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0xb
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0xd
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0xe
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0xf
+#define DB_DEBUG3__DISABLE_SLOCS_PER_CTXT_MATCH__SHIFT 0x10
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x11
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x13
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x14
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x15
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x16
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x17
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x18
+#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x19
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x1a
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x1b
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x1c
+#define DB_DEBUG3__DELETE_CONTEXT_SUSPEND__SHIFT 0x1d
+#define DB_DEBUG3__DISABLE_TS_WRITE_L0__SHIFT 0x1e
+#define DB_DEBUG3__DISABLE_MULTIDTAG_FL_PANIC_REQUIREMENT__SHIFT 0x1f
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION_MASK 0x00000001L
+#define DB_DEBUG3__DISABLE_RELOAD_CONTEXT_DRAW_DATA_MASK 0x00000002L
+#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x00000004L
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x00000008L
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x00000010L
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x00000020L
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x00000040L
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x00000100L
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x00000400L
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x00000800L
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x00002000L
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x00004000L
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x00008000L
+#define DB_DEBUG3__DISABLE_SLOCS_PER_CTXT_MATCH_MASK 0x00010000L
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x00020000L
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x00080000L
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x00100000L
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x00200000L
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x00400000L
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x00800000L
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x01000000L
+#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x02000000L
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x04000000L
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x08000000L
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000L
+#define DB_DEBUG3__DELETE_CONTEXT_SUSPEND_MASK 0x20000000L
+#define DB_DEBUG3__DISABLE_TS_WRITE_L0_MASK 0x40000000L
+#define DB_DEBUG3__DISABLE_MULTIDTAG_FL_PANIC_REQUIREMENT_MASK 0x80000000L
+//DB_DEBUG4
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x0
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x1
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x2
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x3
+#define DB_DEBUG4__DISABLE_SEPARATE_OP_PIPE_CLK__SHIFT 0x4
+#define DB_DEBUG4__DISABLE_SEPARATE_SX_CLK__SHIFT 0x5
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN__SHIFT 0x6
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE__SHIFT 0x7
+#define DB_DEBUG4__DISABLE_SEPARATE_DBG_CLK__SHIFT 0x8
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR__SHIFT 0x9
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR__SHIFT 0xa
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR__SHIFT 0xb
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION__SHIFT 0xc
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP__SHIFT 0xd
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION__SHIFT 0xe
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE__SHIFT 0xf
+#define DB_DEBUG4__DISABLE_HIZ_TS_COLLISION_DETECT__SHIFT 0x10
+#define DB_DEBUG4__DISABLE_LAST_OF_BURST_ON_FLUSH_CHUNK0_ALL_DONE__SHIFT 0x12
+#define DB_DEBUG4__ENABLE_CZ_OVERFLOW_TESTMODE__SHIFT 0x13
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO__SHIFT 0x15
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO_CONFLICT__SHIFT 0x16
+#define DB_DEBUG4__WR_MEM_BURST_CTL__SHIFT 0x18
+#define DB_DEBUG4__DISABLE_WR_MEM_BURST_POOLING__SHIFT 0x1b
+#define DB_DEBUG4__DISABLE_RD_MEM_BURST__SHIFT 0x1c
+#define DB_DEBUG4__LATE_ACK_SCOREBOARD_MULTIPLE_SLOT__SHIFT 0x1e
+#define DB_DEBUG4__LATE_ACK_PSD_EOP_OLD_METHOD__SHIFT 0x1f
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x00000001L
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x00000002L
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x00000004L
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x00000008L
+#define DB_DEBUG4__DISABLE_SEPARATE_OP_PIPE_CLK_MASK 0x00000010L
+#define DB_DEBUG4__DISABLE_SEPARATE_SX_CLK_MASK 0x00000020L
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN_MASK 0x00000040L
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE_MASK 0x00000080L
+#define DB_DEBUG4__DISABLE_SEPARATE_DBG_CLK_MASK 0x00000100L
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR_MASK 0x00000200L
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR_MASK 0x00000400L
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR_MASK 0x00000800L
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION_MASK 0x00001000L
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP_MASK 0x00002000L
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION_MASK 0x00004000L
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE_MASK 0x00008000L
+#define DB_DEBUG4__DISABLE_HIZ_TS_COLLISION_DETECT_MASK 0x00010000L
+#define DB_DEBUG4__DISABLE_LAST_OF_BURST_ON_FLUSH_CHUNK0_ALL_DONE_MASK 0x00040000L
+#define DB_DEBUG4__ENABLE_CZ_OVERFLOW_TESTMODE_MASK 0x00080000L
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO_MASK 0x00200000L
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO_CONFLICT_MASK 0x00400000L
+#define DB_DEBUG4__WR_MEM_BURST_CTL_MASK 0x07000000L
+#define DB_DEBUG4__DISABLE_WR_MEM_BURST_POOLING_MASK 0x08000000L
+#define DB_DEBUG4__DISABLE_RD_MEM_BURST_MASK 0x10000000L
+#define DB_DEBUG4__LATE_ACK_SCOREBOARD_MULTIPLE_SLOT_MASK 0x40000000L
+#define DB_DEBUG4__LATE_ACK_PSD_EOP_OLD_METHOD_MASK 0x80000000L
+//DB_ETILE_STUTTER_CONTROL
+#define DB_ETILE_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_ETILE_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_ETILE_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_ETILE_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_LTILE_STUTTER_CONTROL
+#define DB_LTILE_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_LTILE_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_LTILE_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_LTILE_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_EQUAD_STUTTER_CONTROL
+#define DB_EQUAD_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_EQUAD_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_EQUAD_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_EQUAD_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_LQUAD_STUTTER_CONTROL
+#define DB_LQUAD_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_LQUAD_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_LQUAD_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_LQUAD_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_CREDIT_LIMIT
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x0
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x5
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0xa
+#define DB_CREDIT_LIMIT__DB_SC_WAVE_CREDITS__SHIFT 0xd
+#define DB_CREDIT_LIMIT__DB_SC_FREE_WAVE_CREDITS__SHIFT 0x12
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x0000001FL
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x000003E0L
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x00001C00L
+#define DB_CREDIT_LIMIT__DB_SC_WAVE_CREDITS_MASK 0x0003E000L
+#define DB_CREDIT_LIMIT__DB_SC_FREE_WAVE_CREDITS_MASK 0x007C0000L
+//DB_WATERMARKS
+#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x0
+#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x8
+#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0x10
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x18
+#define DB_WATERMARKS__DEPTH_FREE_MASK 0x000000FFL
+#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x0000FF00L
+#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0x00FF0000L
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0xFF000000L
+//DB_SUBTILE_CONTROL
+#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x0
+#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x2
+#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x4
+#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x6
+#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x8
+#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0xa
+#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0xc
+#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0xe
+#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x10
+#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x12
+#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x00000003L
+#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0x0000000CL
+#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x00000030L
+#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0x000000C0L
+#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x00000300L
+#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0x00000C00L
+#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x00003000L
+#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0x0000C000L
+#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x00030000L
+#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0x000C0000L
+//DB_FREE_CACHELINES
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x0
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x8
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0x10
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x18
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x000000FFL
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x0000FF00L
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x00FF0000L
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0xFF000000L
+//DB_FIFO_DEPTH1
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x18
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0x00FF0000L
+#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0xFF000000L
+//DB_FIFO_DEPTH2
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x19
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x01FF0000L
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xFE000000L
+//DB_LAST_OF_BURST_CONFIG
+#define DB_LAST_OF_BURST_CONFIG__MAXBURST__SHIFT 0x0
+#define DB_LAST_OF_BURST_CONFIG__TIMEOUT__SHIFT 0x8
+#define DB_LAST_OF_BURST_CONFIG__DBCB_LOB_SWITCH_TIMEOUT__SHIFT 0xb
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_FG_DEFAULT_TIMEOUT__SHIFT 0x11
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_COUNT_RESET_ON_LOB__SHIFT 0x12
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_FLQ_LOB_EVERY_256B__SHIFT 0x13
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_ZCACHE_FL_OP_EVEN_ARB__SHIFT 0x14
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_FORCE_FLUSH_BEFORE_FIFO__SHIFT 0x15
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_DKG_LOB_GEN__SHIFT 0x16
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_LPF_LOB_GEN__SHIFT 0x17
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FL_BURST__SHIFT 0x19
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FG_LOB_FWDR__SHIFT 0x1a
+#define DB_LAST_OF_BURST_CONFIG__BYPASS_SORT_RD_BA__SHIFT 0x1c
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_256B_COALESCE__SHIFT 0x1d
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_RD_BURST__SHIFT 0x1e
+#define DB_LAST_OF_BURST_CONFIG__LEGACY_LOB_INSERT_EN__SHIFT 0x1f
+#define DB_LAST_OF_BURST_CONFIG__MAXBURST_MASK 0x000000FFL
+#define DB_LAST_OF_BURST_CONFIG__TIMEOUT_MASK 0x00000700L
+#define DB_LAST_OF_BURST_CONFIG__DBCB_LOB_SWITCH_TIMEOUT_MASK 0x0000F800L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_FG_DEFAULT_TIMEOUT_MASK 0x00020000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_COUNT_RESET_ON_LOB_MASK 0x00040000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_FLQ_LOB_EVERY_256B_MASK 0x00080000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_ZCACHE_FL_OP_EVEN_ARB_MASK 0x00100000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_FORCE_FLUSH_BEFORE_FIFO_MASK 0x00200000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_DKG_LOB_GEN_MASK 0x00400000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_LPF_LOB_GEN_MASK 0x00800000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FL_BURST_MASK 0x02000000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FG_LOB_FWDR_MASK 0x04000000L
+#define DB_LAST_OF_BURST_CONFIG__BYPASS_SORT_RD_BA_MASK 0x10000000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_256B_COALESCE_MASK 0x20000000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_RD_BURST_MASK 0x40000000L
+#define DB_LAST_OF_BURST_CONFIG__LEGACY_LOB_INSERT_EN_MASK 0x80000000L
+//DB_RING_CONTROL
+#define DB_RING_CONTROL__COUNTER_CONTROL__SHIFT 0x0
+#define DB_RING_CONTROL__COUNTER_CONTROL_MASK 0x00000003L
+//DB_MEM_ARB_WATERMARKS
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK__SHIFT 0x0
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK__SHIFT 0x8
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK__SHIFT 0x10
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK__SHIFT 0x18
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK_MASK 0x00000007L
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK_MASK 0x00000700L
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK_MASK 0x00070000L
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK_MASK 0x07000000L
+//DB_FIFO_DEPTH3
+#define DB_FIFO_DEPTH3__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH3__OSB_WAVE_TABLE_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH3__OREO_WAVE_HIDE_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH3__QUAD_READ_REQS__SHIFT 0x18
+#define DB_FIFO_DEPTH3__LTILE_PROBE_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH3__OSB_WAVE_TABLE_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH3__OREO_WAVE_HIDE_DEPTH_MASK 0x00FF0000L
+#define DB_FIFO_DEPTH3__QUAD_READ_REQS_MASK 0xFF000000L
+//DB_DEBUG6
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_CONFLICT__SHIFT 0x0
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_HARD_CONFLICT__SHIFT 0x1
+#define DB_DEBUG6__FORCE_DB_SC_QUAD_CONFLICT__SHIFT 0x2
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ALL__SHIFT 0x3
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ID__SHIFT 0x4
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_EN__SHIFT 0xa
+#define DB_DEBUG6__DISABLE_PWS_PLUS_TCP_CM_LIVENESS_STALL__SHIFT 0xb
+#define DB_DEBUG6__DISABLE_PWS_PLUS_DTT_TAG_LIVENESS_STALL__SHIFT 0xc
+#define DB_DEBUG6__SET_DB_PERFMON_PWS_PIPE_ID__SHIFT 0xd
+#define DB_DEBUG6__FTWB_MAX_TIMEOUT_VAL__SHIFT 0x10
+#define DB_DEBUG6__DISABLE_LQO_SMT_RAM_OPT__SHIFT 0x18
+#define DB_DEBUG6__FORCE_MAX_TILES_IN_WAVE_CHECK__SHIFT 0x19
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_FIX__SHIFT 0x1a
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_WAIT_PANIC__SHIFT 0x1b
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_CONFLICT_MASK 0x00000001L
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_HARD_CONFLICT_MASK 0x00000002L
+#define DB_DEBUG6__FORCE_DB_SC_QUAD_CONFLICT_MASK 0x00000004L
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ALL_MASK 0x00000008L
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ID_MASK 0x000003F0L
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_EN_MASK 0x00000400L
+#define DB_DEBUG6__DISABLE_PWS_PLUS_TCP_CM_LIVENESS_STALL_MASK 0x00000800L
+#define DB_DEBUG6__DISABLE_PWS_PLUS_DTT_TAG_LIVENESS_STALL_MASK 0x00001000L
+#define DB_DEBUG6__SET_DB_PERFMON_PWS_PIPE_ID_MASK 0x00006000L
+#define DB_DEBUG6__FTWB_MAX_TIMEOUT_VAL_MASK 0x00FF0000L
+#define DB_DEBUG6__DISABLE_LQO_SMT_RAM_OPT_MASK 0x01000000L
+#define DB_DEBUG6__FORCE_MAX_TILES_IN_WAVE_CHECK_MASK 0x02000000L
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_FIX_MASK 0x04000000L
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_WAIT_PANIC_MASK 0x08000000L
+//DB_EXCEPTION_CONTROL
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE__SHIFT 0x0
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE__SHIFT 0x1
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE__SHIFT 0x2
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE__SHIFT 0x3
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD__SHIFT 0x4
+#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE__SHIFT 0x8
+#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK__SHIFT 0x18
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE_MASK 0x00000004L
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE_MASK 0x00000008L
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD_MASK 0x00000010L
+#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE_MASK 0x00000F00L
+#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK_MASK 0x7F000000L
+//DB_DEBUG7
+#define DB_DEBUG7__SPARE_BITS__SHIFT 0x0
+#define DB_DEBUG7__SPARE_BITS_MASK 0xFFFFFFFFL
+//DB_DEBUG5
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PRELOAD__SHIFT 0x0
+#define DB_DEBUG5__ENABLE_SECONDARY_MIPS_TAILS_COMPRESSION__SHIFT 0x1
+#define DB_DEBUG5__DISABLE_CLEAR_VALUE_UPDATE_ON_TILE_CACHE_HIT__SHIFT 0x2
+#define DB_DEBUG5__DISABLE_2SRC_VRS_HARD_CONFLICT__SHIFT 0x3
+#define DB_DEBUG5__DISABLE_FLQ_MCC_DTILEID_CHECK__SHIFT 0x4
+#define DB_DEBUG5__DISABLE_NOZ_POWER_SAVINGS__SHIFT 0x5
+#define DB_DEBUG5__DISABLE_TILE_INFLIGHT_DEC_POSTZ_FIX__SHIFT 0x6
+#define DB_DEBUG5__DISABLE_MGCG_GATING_ON_SHADER_WAIT__SHIFT 0x7
+#define DB_DEBUG5__DISABLE_VRS_1X2_2XAA__SHIFT 0x8
+#define DB_DEBUG5__ENABLE_FULL_TILE_WAVE_BREAK_ON_COARSE__SHIFT 0x9
+#define DB_DEBUG5__DISABLE_HTILE_HARVESTING__SHIFT 0xa
+#define DB_DEBUG5__DISABLE_SEPARATE_TILE_CLK__SHIFT 0xb
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PREFETCH__SHIFT 0xc
+#define DB_DEBUG5__DISABLE_PSL_AUTO_MODE_FIX__SHIFT 0xd
+#define DB_DEBUG5__DISABLE_FORCE_ZMASK_EXPANDED__SHIFT 0xe
+#define DB_DEBUG5__DISABLE_SEPARATE_LQO_CLK__SHIFT 0xf
+#define DB_DEBUG5__DISABLE_Z_WITHOUT_PLANES_FLQ__SHIFT 0x10
+#define DB_DEBUG5__PRESERVE_QMASK_FOR_POSTZ_OP_PIPE__SHIFT 0x11
+#define DB_DEBUG5__Z_NACK_BEHAVIOR_ONLY_WHEN_Z_IS_PRT__SHIFT 0x12
+#define DB_DEBUG5__S_NACK_BEHAVIOR_ONLY_WHEN_S_IS_PRT__SHIFT 0x13
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_Z__SHIFT 0x14
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_STENCIL__SHIFT 0x15
+#define DB_DEBUG5__DISABLE_LQO_FTCQ_DUAL_QUAD_REGION_CHECK__SHIFT 0x16
+#define DB_DEBUG5__DISABLE_EVENT_INSERTION_AFTER_ZPC_BEFORE_CONTEXT_DONE__SHIFT 0x17
+#define DB_DEBUG5__SPARE_BITS__SHIFT 0x18
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PRELOAD_MASK 0x00000001L
+#define DB_DEBUG5__ENABLE_SECONDARY_MIPS_TAILS_COMPRESSION_MASK 0x00000002L
+#define DB_DEBUG5__DISABLE_CLEAR_VALUE_UPDATE_ON_TILE_CACHE_HIT_MASK 0x00000004L
+#define DB_DEBUG5__DISABLE_2SRC_VRS_HARD_CONFLICT_MASK 0x00000008L
+#define DB_DEBUG5__DISABLE_FLQ_MCC_DTILEID_CHECK_MASK 0x00000010L
+#define DB_DEBUG5__DISABLE_NOZ_POWER_SAVINGS_MASK 0x00000020L
+#define DB_DEBUG5__DISABLE_TILE_INFLIGHT_DEC_POSTZ_FIX_MASK 0x00000040L
+#define DB_DEBUG5__DISABLE_MGCG_GATING_ON_SHADER_WAIT_MASK 0x00000080L
+#define DB_DEBUG5__DISABLE_VRS_1X2_2XAA_MASK 0x00000100L
+#define DB_DEBUG5__ENABLE_FULL_TILE_WAVE_BREAK_ON_COARSE_MASK 0x00000200L
+#define DB_DEBUG5__DISABLE_HTILE_HARVESTING_MASK 0x00000400L
+#define DB_DEBUG5__DISABLE_SEPARATE_TILE_CLK_MASK 0x00000800L
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PREFETCH_MASK 0x00001000L
+#define DB_DEBUG5__DISABLE_PSL_AUTO_MODE_FIX_MASK 0x00002000L
+#define DB_DEBUG5__DISABLE_FORCE_ZMASK_EXPANDED_MASK 0x00004000L
+#define DB_DEBUG5__DISABLE_SEPARATE_LQO_CLK_MASK 0x00008000L
+#define DB_DEBUG5__DISABLE_Z_WITHOUT_PLANES_FLQ_MASK 0x00010000L
+#define DB_DEBUG5__PRESERVE_QMASK_FOR_POSTZ_OP_PIPE_MASK 0x00020000L
+#define DB_DEBUG5__Z_NACK_BEHAVIOR_ONLY_WHEN_Z_IS_PRT_MASK 0x00040000L
+#define DB_DEBUG5__S_NACK_BEHAVIOR_ONLY_WHEN_S_IS_PRT_MASK 0x00080000L
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_Z_MASK 0x00100000L
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_STENCIL_MASK 0x00200000L
+#define DB_DEBUG5__DISABLE_LQO_FTCQ_DUAL_QUAD_REGION_CHECK_MASK 0x00400000L
+#define DB_DEBUG5__DISABLE_EVENT_INSERTION_AFTER_ZPC_BEFORE_CONTEXT_DONE_MASK 0x00800000L
+#define DB_DEBUG5__SPARE_BITS_MASK 0xFF000000L
+//DB_FGCG_SRAMS_CLK_CTRL
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE0__SHIFT 0x0
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE1__SHIFT 0x1
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE2__SHIFT 0x2
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE3__SHIFT 0x3
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE4__SHIFT 0x4
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE5__SHIFT 0x5
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE6__SHIFT 0x6
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE7__SHIFT 0x7
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE8__SHIFT 0x8
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE9__SHIFT 0x9
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE10__SHIFT 0xa
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE11__SHIFT 0xb
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE12__SHIFT 0xc
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE13__SHIFT 0xd
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE14__SHIFT 0xe
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE15__SHIFT 0xf
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE16__SHIFT 0x10
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE17__SHIFT 0x11
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE18__SHIFT 0x12
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE19__SHIFT 0x13
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE20__SHIFT 0x14
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE21__SHIFT 0x15
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE22__SHIFT 0x16
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE23__SHIFT 0x17
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE24__SHIFT 0x18
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE25__SHIFT 0x19
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE26__SHIFT 0x1a
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE27__SHIFT 0x1b
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE28__SHIFT 0x1c
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE29__SHIFT 0x1d
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE30__SHIFT 0x1e
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE31__SHIFT 0x1f
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE0_MASK 0x00000001L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE1_MASK 0x00000002L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE2_MASK 0x00000004L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE3_MASK 0x00000008L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE4_MASK 0x00000010L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE5_MASK 0x00000020L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE6_MASK 0x00000040L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE7_MASK 0x00000080L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE8_MASK 0x00000100L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE9_MASK 0x00000200L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE10_MASK 0x00000400L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE11_MASK 0x00000800L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE12_MASK 0x00001000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE13_MASK 0x00002000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE14_MASK 0x00004000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE15_MASK 0x00008000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE16_MASK 0x00010000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE17_MASK 0x00020000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE18_MASK 0x00040000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE19_MASK 0x00080000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE20_MASK 0x00100000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE21_MASK 0x00200000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE22_MASK 0x00400000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE23_MASK 0x00800000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE24_MASK 0x01000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE25_MASK 0x02000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE26_MASK 0x04000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE27_MASK 0x08000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE28_MASK 0x10000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE29_MASK 0x20000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE30_MASK 0x40000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE31_MASK 0x80000000L
+//DB_FGCG_INTERFACES_CLK_CTRL
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_QUAD_OVERRIDE__SHIFT 0x0
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_EXPORT_OVERRIDE__SHIFT 0x2
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_RDREQ_OVERRIDE__SHIFT 0x3
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_WRREQ_OVERRIDE__SHIFT 0x4
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_TILE_OVERRIDE__SHIFT 0x5
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_RMIRET_OVERRIDE__SHIFT 0x6
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_WAVE_OVERRIDE__SHIFT 0x7
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_FREE_WAVE_OVERRIDE__SHIFT 0x8
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_QUAD_OVERRIDE_MASK 0x00000001L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_EXPORT_OVERRIDE_MASK 0x00000004L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_RDREQ_OVERRIDE_MASK 0x00000008L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_WRREQ_OVERRIDE_MASK 0x00000010L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_TILE_OVERRIDE_MASK 0x00000020L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_RMIRET_OVERRIDE_MASK 0x00000040L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_WAVE_OVERRIDE_MASK 0x00000080L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_FREE_WAVE_OVERRIDE_MASK 0x00000100L
+//DB_FIFO_DEPTH4
+#define DB_FIFO_DEPTH4__OSB_SQUAD_TABLE_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH4__OSB_TILE_TABLE_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH4__OSB_SCORE_BOARD_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH4__OSB_EVENT_FIFO_DEPTH__SHIFT 0x18
+#define DB_FIFO_DEPTH4__OSB_SQUAD_TABLE_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH4__OSB_TILE_TABLE_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH4__OSB_SCORE_BOARD_DEPTH_MASK 0x00FF0000L
+#define DB_FIFO_DEPTH4__OSB_EVENT_FIFO_DEPTH_MASK 0xFF000000L
+//CC_RB_REDUNDANCY
+#define CC_RB_REDUNDANCY__WRITE_DIS__SHIFT 0x0
+#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define CC_RB_REDUNDANCY__WRITE_DIS_MASK 0x00000001L
+#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//CC_RB_BACKEND_DISABLE
+#define CC_RB_BACKEND_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CC_RB_BACKEND_DISABLE__RESERVED__SHIFT 0x2
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x4
+#define CC_RB_BACKEND_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CC_RB_BACKEND_DISABLE__RESERVED_MASK 0x0000000CL
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0xFFFFFFF0L
+//GB_ADDR_CONFIG
+#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+//GB_BACKEND_MAP
+#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x0
+#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xFFFFFFFFL
+//GB_GPU_ID
+#define GB_GPU_ID__GPU_ID__SHIFT 0x0
+#define GB_GPU_ID__GPU_ID_MASK 0x0000000FL
+//CC_RB_DAISY_CHAIN
+#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x0
+#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x4
+#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x8
+#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0xc
+#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x10
+#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x14
+#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x18
+#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x1c
+#define CC_RB_DAISY_CHAIN__RB_0_MASK 0x0000000FL
+#define CC_RB_DAISY_CHAIN__RB_1_MASK 0x000000F0L
+#define CC_RB_DAISY_CHAIN__RB_2_MASK 0x00000F00L
+#define CC_RB_DAISY_CHAIN__RB_3_MASK 0x0000F000L
+#define CC_RB_DAISY_CHAIN__RB_4_MASK 0x000F0000L
+#define CC_RB_DAISY_CHAIN__RB_5_MASK 0x00F00000L
+#define CC_RB_DAISY_CHAIN__RB_6_MASK 0x0F000000L
+#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xF0000000L
+//GB_ADDR_CONFIG_READ
+#define GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG_READ__NUM_PKRS__SHIFT 0x8
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG_READ__NUM_PKRS_MASK 0x00000700L
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+//CB_HW_CONTROL_4
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_NUM_QB_LOG2__SHIFT 0x0
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_ALGORITHM__SHIFT 0x3
+#define CB_HW_CONTROL_4__DISABLE_USE_OF_SMT_SCORE__SHIFT 0x5
+#define CB_HW_CONTROL_4__SPARE_10__SHIFT 0x6
+#define CB_HW_CONTROL_4__SPARE_11__SHIFT 0x7
+#define CB_HW_CONTROL_4__SPARE_12__SHIFT 0x8
+#define CB_HW_CONTROL_4__DISABLE_MA_WAIT_FOR_LAST__SHIFT 0x9
+#define CB_HW_CONTROL_4__SMT_TIMEOUT_THRESHOLD__SHIFT 0xa
+#define CB_HW_CONTROL_4__SMT_QPFIFO_THRESHOLD__SHIFT 0xd
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_RAW_HAZARD__SHIFT 0x10
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_COARSE_RAW_HAZARD__SHIFT 0x11
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_DS_RAW_HAZARD__SHIFT 0x12
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_NUM_QB_LOG2_MASK 0x00000007L
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_ALGORITHM_MASK 0x00000018L
+#define CB_HW_CONTROL_4__DISABLE_USE_OF_SMT_SCORE_MASK 0x00000020L
+#define CB_HW_CONTROL_4__SPARE_10_MASK 0x00000040L
+#define CB_HW_CONTROL_4__SPARE_11_MASK 0x00000080L
+#define CB_HW_CONTROL_4__SPARE_12_MASK 0x00000100L
+#define CB_HW_CONTROL_4__DISABLE_MA_WAIT_FOR_LAST_MASK 0x00000200L
+#define CB_HW_CONTROL_4__SMT_TIMEOUT_THRESHOLD_MASK 0x00001C00L
+#define CB_HW_CONTROL_4__SMT_QPFIFO_THRESHOLD_MASK 0x0000E000L
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_RAW_HAZARD_MASK 0x00010000L
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_COARSE_RAW_HAZARD_MASK 0x00020000L
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_DS_RAW_HAZARD_MASK 0x00040000L
+//CB_HW_CONTROL_3
+#define CB_HW_CONTROL_3__SPARE_5__SHIFT 0x0
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED__SHIFT 0x1
+#define CB_HW_CONTROL_3__SPARE_6__SHIFT 0x2
+#define CB_HW_CONTROL_3__SPARE_7__SHIFT 0x3
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM__SHIFT 0x4
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING__SHIFT 0x5
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS__SHIFT 0x6
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS__SHIFT 0x7
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH__SHIFT 0xb
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH__SHIFT 0xc
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC__SHIFT 0xd
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC__SHIFT 0xe
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC__SHIFT 0xf
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC__SHIFT 0x10
+#define CB_HW_CONTROL_3__SPARE_8__SHIFT 0x11
+#define CB_HW_CONTROL_3__SPARE_9__SHIFT 0x12
+#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT__SHIFT 0x14
+#define CB_HW_CONTROL_3__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x15
+#define CB_HW_CONTROL_3__SPARE_5_MASK 0x00000001L
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED_MASK 0x00000002L
+#define CB_HW_CONTROL_3__SPARE_6_MASK 0x00000004L
+#define CB_HW_CONTROL_3__SPARE_7_MASK 0x00000008L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM_MASK 0x00000010L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING_MASK 0x00000020L
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS_MASK 0x00000040L
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS_MASK 0x00000080L
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH_MASK 0x00000800L
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH_MASK 0x00001000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC_MASK 0x00002000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC_MASK 0x00004000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC_MASK 0x00008000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC_MASK 0x00010000L
+#define CB_HW_CONTROL_3__SPARE_8_MASK 0x00020000L
+#define CB_HW_CONTROL_3__SPARE_9_MASK 0x00040000L
+#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT_MASK 0x00100000L
+#define CB_HW_CONTROL_3__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00200000L
+//CB_HW_CONTROL
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x0
+#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION__SHIFT 0x1
+#define CB_HW_CONTROL__DISABLE_SMT_WHEN_NO_FDCC_FIX__SHIFT 0x2
+#define CB_HW_CONTROL__RMI_CREDITS__SHIFT 0x6
+#define CB_HW_CONTROL__NUM_CCC_SKID_FIFO_ENTRIES__SHIFT 0xc
+#define CB_HW_CONTROL__FORCE_FEA_HIGH__SHIFT 0xf
+#define CB_HW_CONTROL__FORCE_EVICT_ALL_VALID__SHIFT 0x10
+#define CB_HW_CONTROL__DISABLE_DCC_CACHE_BYTEMASKING__SHIFT 0x11
+#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x13
+#define CB_HW_CONTROL__DISABLE_USE_OF_SET_HASH__SHIFT 0x14
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x15
+#define CB_HW_CONTROL__SPARE_2__SHIFT 0x16
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x18
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x19
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x1a
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x1b
+#define CB_HW_CONTROL__SPARE_3__SHIFT 0x1d
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00000001L
+#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION_MASK 0x00000002L
+#define CB_HW_CONTROL__DISABLE_SMT_WHEN_NO_FDCC_FIX_MASK 0x00000004L
+#define CB_HW_CONTROL__RMI_CREDITS_MASK 0x00000FC0L
+#define CB_HW_CONTROL__NUM_CCC_SKID_FIFO_ENTRIES_MASK 0x00007000L
+#define CB_HW_CONTROL__FORCE_FEA_HIGH_MASK 0x00008000L
+#define CB_HW_CONTROL__FORCE_EVICT_ALL_VALID_MASK 0x00010000L
+#define CB_HW_CONTROL__DISABLE_DCC_CACHE_BYTEMASKING_MASK 0x00020000L
+#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x00080000L
+#define CB_HW_CONTROL__DISABLE_USE_OF_SET_HASH_MASK 0x00100000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x00200000L
+#define CB_HW_CONTROL__SPARE_2_MASK 0x00400000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x01000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x02000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x04000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x08000000L
+#define CB_HW_CONTROL__SPARE_3_MASK 0x20000000L
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000L
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000L
+//CB_HW_CONTROL_1
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0x0
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x0000003FL
+//CB_HW_CONTROL_2
+#define CB_HW_CONTROL_2__SPARE_4__SHIFT 0x0
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8__SHIFT 0x8
+#define CB_HW_CONTROL_2__SPARE__SHIFT 0xe
+#define CB_HW_CONTROL_2__SPARE_4_MASK 0x000000FFL
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8_MASK 0x00003F00L
+#define CB_HW_CONTROL_2__SPARE_MASK 0xFFFFC000L
+//CB_DCC_CONFIG
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DEPTH__SHIFT 0x0
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x5
+#define CB_DCC_CONFIG__SPARE_13__SHIFT 0x6
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE__SHIFT 0x7
+#define CB_DCC_CONFIG__SPARE_14__SHIFT 0x8
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH__SHIFT 0x10
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS__SHIFT 0x19
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DEPTH_MASK 0x0000001FL
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000020L
+#define CB_DCC_CONFIG__SPARE_13_MASK 0x00000040L
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE_MASK 0x00000080L
+#define CB_DCC_CONFIG__SPARE_14_MASK 0x0000FF00L
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH_MASK 0x01FF0000L
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS_MASK 0xFE000000L
+//CB_HW_MEM_ARBITER_RD
+#define CB_HW_MEM_ARBITER_RD__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE__SHIFT 0x13
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x19
+#define CB_HW_MEM_ARBITER_RD__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS_MASK 0x00040000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE_MASK 0x00380000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT_MASK 0x01C00000L
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS_MASK 0x02000000L
+//CB_HW_MEM_ARBITER_WR
+#define CB_HW_MEM_ARBITER_WR__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE__SHIFT 0x13
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x19
+#define CB_HW_MEM_ARBITER_WR__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK_MASK 0x00040000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE_MASK 0x00380000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT_MASK 0x01C00000L
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS_MASK 0x02000000L
+//CB_FGCG_SRAM_OVERRIDE
+#define CB_FGCG_SRAM_OVERRIDE__DISABLE_FGCG__SHIFT 0x0
+#define CB_FGCG_SRAM_OVERRIDE__DISABLE_FGCG_MASK 0x000FFFFFL
+//CB_DCC_CONFIG2
+#define CB_DCC_CONFIG2__INVALID_KEY_ERROR_CODE__SHIFT 0x0
+#define CB_DCC_CONFIG2__CLEAR_FRAG2DCC_KEY_ERROR_CODE__SHIFT 0x8
+#define CB_DCC_CONFIG2__ENABLE_COMP_KEY_ERROR_DETECTION__SHIFT 0x9
+#define CB_DCC_CONFIG2__INVALID_KEY_ERROR_CODE_MASK 0x000000FFL
+#define CB_DCC_CONFIG2__CLEAR_FRAG2DCC_KEY_ERROR_CODE_MASK 0x00000100L
+#define CB_DCC_CONFIG2__ENABLE_COMP_KEY_ERROR_DETECTION_MASK 0x00000200L
+//CHICKEN_BITS
+#define CHICKEN_BITS__SPARE__SHIFT 0x0
+#define CHICKEN_BITS__SPARE_MASK 0xFFFFFFFFL
+//CB_CACHE_EVICT_POINTS
+#define CB_CACHE_EVICT_POINTS__CC_COLOR_EVICT_POINT__SHIFT 0x0
+#define CB_CACHE_EVICT_POINTS__CC_FMASK_EVICT_POINT__SHIFT 0x8
+#define CB_CACHE_EVICT_POINTS__DCC_CACHE_EVICT_POINT__SHIFT 0x10
+#define CB_CACHE_EVICT_POINTS__CC_CACHE_EVICT_POINT__SHIFT 0x18
+#define CB_CACHE_EVICT_POINTS__CC_COLOR_EVICT_POINT_MASK 0x000000FFL
+#define CB_CACHE_EVICT_POINTS__CC_FMASK_EVICT_POINT_MASK 0x0000FF00L
+#define CB_CACHE_EVICT_POINTS__DCC_CACHE_EVICT_POINT_MASK 0x00FF0000L
+#define CB_CACHE_EVICT_POINTS__CC_CACHE_EVICT_POINT_MASK 0xFF000000L
+
+
+// addressBlock: gc_gceadec
+//GCEA_DRAM_RD_CLI2GRP_MAP0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_CLI2GRP_MAP1
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP1
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_GRP2VC_MAP
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_WR_GRP2VC_MAP
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_RD_LAZY
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//GCEA_DRAM_WR_LAZY
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//GCEA_DRAM_RD_CAM_CNTL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define GCEA_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//GCEA_DRAM_WR_CAM_CNTL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define GCEA_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//GCEA_DRAM_PAGE_BURST
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_AGE
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_WR_PRI_AGE
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_RD_PRI_QUEUING
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_QUEUING
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_FIXED
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_FIXED
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_URGENCY
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_WR_PRI_URGENCY
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI1
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI2
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI3
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI1
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI2
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI3
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_CLI2GRP_MAP0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_CLI2GRP_MAP1
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP1
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_COMBINE_FLUSH
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GCEA_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//GCEA_IO_WR_COMBINE_FLUSH
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define GCEA_IO_WR_COMBINE_FLUSH__DISABLE_MAM_CHAINING__SHIFT 0x12
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GCEA_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+#define GCEA_IO_WR_COMBINE_FLUSH__DISABLE_MAM_CHAINING_MASK 0x00040000L
+//GCEA_IO_GROUP_BURST
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_AGE
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_WR_PRI_AGE
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_RD_PRI_QUEUING
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_QUEUING
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_FIXED
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_FIXED
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_URGENCY
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_WR_PRI_URGENCY
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_RD_PRI_URGENCY_MASKING
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_WR_PRI_URGENCY_MASKING
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_RD_PRI_QUANT_PRI1
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI2
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI3
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI1
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI2
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI3
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_SDP_ARB_DRAM
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define GCEA_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define GCEA_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define GCEA_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define GCEA_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//GCEA_SDP_ARB_FINAL
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define GCEA_SDP_ARB_FINAL__DRAM_RD_THROTTLE__SHIFT 0x1c
+#define GCEA_SDP_ARB_FINAL__DRAM_WR_THROTTLE__SHIFT 0x1d
+#define GCEA_SDP_ARB_FINAL__GMI_RD_THROTTLE__SHIFT 0x1e
+#define GCEA_SDP_ARB_FINAL__GMI_WR_THROTTLE__SHIFT 0x1f
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+#define GCEA_SDP_ARB_FINAL__DRAM_RD_THROTTLE_MASK 0x10000000L
+#define GCEA_SDP_ARB_FINAL__DRAM_WR_THROTTLE_MASK 0x20000000L
+#define GCEA_SDP_ARB_FINAL__GMI_RD_THROTTLE_MASK 0x40000000L
+#define GCEA_SDP_ARB_FINAL__GMI_WR_THROTTLE_MASK 0x80000000L
+//GCEA_SDP_DRAM_PRIORITY
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_IO_PRIORITY
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_CREDITS
+#define GCEA_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS__SHIFT 0x18
+#define GCEA_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_TAG_RESERVE0
+#define GCEA_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//GCEA_SDP_TAG_RESERVE1
+#define GCEA_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//GCEA_SDP_VCC_RESERVE0
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_VCC_RESERVE1
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_VCD_RESERVE0
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+
+
+// addressBlock: gc_gceadec2
+//GCEA_SDP_VCD_RESERVE1
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_REQ_CNTL
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ__SHIFT 0x6
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE__SHIFT 0x8
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC__SHIFT 0xa
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ_MASK 0x000000C0L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE_MASK 0x00000300L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC_MASK 0x00000C00L
+//GCEA_MISC
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define GCEA_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define GCEA_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//GCEA_LATENCY_SAMPLING
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//GCEA_MAM_CTRL2
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_DISABLE__SHIFT 0x0
+#define GCEA_MAM_CTRL2__DBIT_PF_CLR_ONLY__SHIFT 0x1
+#define GCEA_MAM_CTRL2__DBIT_PF_RD_ONLY__SHIFT 0x2
+#define GCEA_MAM_CTRL2__DBIT_TRACK_SEGMENT__SHIFT 0x3
+#define GCEA_MAM_CTRL2__ARAM_TRACK_SEGMENT__SHIFT 0x6
+#define GCEA_MAM_CTRL2__ARAM_FB_TRACK_SIZE__SHIFT 0x9
+#define GCEA_MAM_CTRL2__ARAM_RB_ENTRY_SIZE__SHIFT 0xf
+#define GCEA_MAM_CTRL2__ARAM_OVERRIDE_EA_STRAP__SHIFT 0x12
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_ENABLE__SHIFT 0x13
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_VALUE__SHIFT 0x14
+#define GCEA_MAM_CTRL2__ARAM_REMOVE_TRACKER__SHIFT 0x15
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_ENABLE__SHIFT 0x16
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_VALUE__SHIFT 0x17
+#define GCEA_MAM_CTRL2__RESERVED_FIELD__SHIFT 0x18
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_DISABLE_MASK 0x00000001L
+#define GCEA_MAM_CTRL2__DBIT_PF_CLR_ONLY_MASK 0x00000002L
+#define GCEA_MAM_CTRL2__DBIT_PF_RD_ONLY_MASK 0x00000004L
+#define GCEA_MAM_CTRL2__DBIT_TRACK_SEGMENT_MASK 0x00000038L
+#define GCEA_MAM_CTRL2__ARAM_TRACK_SEGMENT_MASK 0x000001C0L
+#define GCEA_MAM_CTRL2__ARAM_FB_TRACK_SIZE_MASK 0x00007E00L
+#define GCEA_MAM_CTRL2__ARAM_RB_ENTRY_SIZE_MASK 0x00038000L
+#define GCEA_MAM_CTRL2__ARAM_OVERRIDE_EA_STRAP_MASK 0x00040000L
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_ENABLE_MASK 0x00080000L
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_VALUE_MASK 0x00100000L
+#define GCEA_MAM_CTRL2__ARAM_REMOVE_TRACKER_MASK 0x00200000L
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_ENABLE_MASK 0x00400000L
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_VALUE_MASK 0x00800000L
+#define GCEA_MAM_CTRL2__RESERVED_FIELD_MASK 0xFF000000L
+//GCEA_MAM_CTRL
+#define GCEA_MAM_CTRL__MAM_DISABLE__SHIFT 0x0
+#define GCEA_MAM_CTRL__DBIT_COALESCE_DISABLE__SHIFT 0x1
+#define GCEA_MAM_CTRL__ARAM_COALESCE_DISABLE__SHIFT 0x2
+#define GCEA_MAM_CTRL__ARAM_FLUSH_SNOOP_EN__SHIFT 0x3
+#define GCEA_MAM_CTRL__SDMA_UPDT_ARAM__SHIFT 0x4
+#define GCEA_MAM_CTRL__ARAM_FLUSH_NOALLOC__SHIFT 0x5
+#define GCEA_MAM_CTRL__FLUSH_TRACKER__SHIFT 0x6
+#define GCEA_MAM_CTRL__CLEAR_TRACKER__SHIFT 0x7
+#define GCEA_MAM_CTRL__SDP_PRIORITY__SHIFT 0x8
+#define GCEA_MAM_CTRL__FORCE_FLUSH_UPDT_TRACKER__SHIFT 0xc
+#define GCEA_MAM_CTRL__FORCE_FLUSH_GEN_INTERRUPT__SHIFT 0xd
+#define GCEA_MAM_CTRL__TIMER_FLUSH_UPDT_TRACKER__SHIFT 0xe
+#define GCEA_MAM_CTRL__TIMER_FLUSH_GEN_INTERRUPT__SHIFT 0xf
+#define GCEA_MAM_CTRL__RESERVED_FIELD__SHIFT 0x10
+#define GCEA_MAM_CTRL__ARAM_NUM_RB_ENTRIES__SHIFT 0x17
+#define GCEA_MAM_CTRL__ARAM_RB_ADDR_HI__SHIFT 0x1c
+#define GCEA_MAM_CTRL__MAM_DISABLE_MASK 0x00000001L
+#define GCEA_MAM_CTRL__DBIT_COALESCE_DISABLE_MASK 0x00000002L
+#define GCEA_MAM_CTRL__ARAM_COALESCE_DISABLE_MASK 0x00000004L
+#define GCEA_MAM_CTRL__ARAM_FLUSH_SNOOP_EN_MASK 0x00000008L
+#define GCEA_MAM_CTRL__SDMA_UPDT_ARAM_MASK 0x00000010L
+#define GCEA_MAM_CTRL__ARAM_FLUSH_NOALLOC_MASK 0x00000020L
+#define GCEA_MAM_CTRL__FLUSH_TRACKER_MASK 0x00000040L
+#define GCEA_MAM_CTRL__CLEAR_TRACKER_MASK 0x00000080L
+#define GCEA_MAM_CTRL__SDP_PRIORITY_MASK 0x00000F00L
+#define GCEA_MAM_CTRL__FORCE_FLUSH_UPDT_TRACKER_MASK 0x00001000L
+#define GCEA_MAM_CTRL__FORCE_FLUSH_GEN_INTERRUPT_MASK 0x00002000L
+#define GCEA_MAM_CTRL__TIMER_FLUSH_UPDT_TRACKER_MASK 0x00004000L
+#define GCEA_MAM_CTRL__TIMER_FLUSH_GEN_INTERRUPT_MASK 0x00008000L
+#define GCEA_MAM_CTRL__RESERVED_FIELD_MASK 0x007F0000L
+#define GCEA_MAM_CTRL__ARAM_NUM_RB_ENTRIES_MASK 0x0F800000L
+#define GCEA_MAM_CTRL__ARAM_RB_ADDR_HI_MASK 0xF0000000L
+//GCEA_EDC_CNT
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SEC_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT__IOWR_DATAMEM_DED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SEC_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_DED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0xC0000000L
+//GCEA_EDC_CNT2
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_DSM_CNTL
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//GCEA_DSM_CNTLA
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//GCEA_DSM_CNTLB
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLB__MAM_A0MEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTLB__MAM_A0MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTLB__MAM_A1MEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTLB__MAM_A1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTLB__MAM_A2MEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTLB__MAM_A2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTLB__MAM_A3MEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define GCEA_DSM_CNTLB__MAM_A3MEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define GCEA_DSM_CNTLB__MAM_AFMEM_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define GCEA_DSM_CNTLB__MAM_AFMEM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTLB__MAM_A0MEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTLB__MAM_A0MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTLB__MAM_A1MEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTLB__MAM_A1MEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTLB__MAM_A2MEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTLB__MAM_A2MEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define GCEA_DSM_CNTLB__MAM_A3MEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define GCEA_DSM_CNTLB__MAM_A3MEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define GCEA_DSM_CNTLB__MAM_AFMEM_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define GCEA_DSM_CNTLB__MAM_AFMEM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//GCEA_DSM_CNTL2
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define GCEA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define GCEA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//GCEA_DSM_CNTL2A
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//GCEA_DSM_CNTL2B
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_SELECT_INJECT_DELAY_MASK 0x04000000L
+//GCEA_GL2C_XBR_CREDITS
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_LIMIT__SHIFT 0x0
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_RESERVE__SHIFT 0x6
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_LIMIT__SHIFT 0x8
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_RESERVE__SHIFT 0xe
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_LIMIT__SHIFT 0x10
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_RESERVE__SHIFT 0x16
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_LIMIT__SHIFT 0x18
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_RESERVE__SHIFT 0x1e
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_LIMIT_MASK 0x0000003FL
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_RESERVE_MASK 0x000000C0L
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_LIMIT_MASK 0x00003F00L
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_RESERVE_MASK 0x0000C000L
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_LIMIT_MASK 0x003F0000L
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_RESERVE_MASK 0x00C00000L
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_LIMIT_MASK 0x3F000000L
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_RESERVE_MASK 0xC0000000L
+//GCEA_GL2C_XBR_MAXBURST
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD__SHIFT 0x0
+#define GCEA_GL2C_XBR_MAXBURST__IO_RD__SHIFT 0x4
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR__SHIFT 0x8
+#define GCEA_GL2C_XBR_MAXBURST__IO_WR__SHIFT 0xc
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_FLUSH_TIMER__SHIFT 0x10
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_SAME64B_ONLY__SHIFT 0x13
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_FLUSH_TIMER__SHIFT 0x14
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_SAME64B_ONLY__SHIFT 0x17
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_MASK 0x0000000FL
+#define GCEA_GL2C_XBR_MAXBURST__IO_RD_MASK 0x000000F0L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_MASK 0x00000F00L
+#define GCEA_GL2C_XBR_MAXBURST__IO_WR_MASK 0x0000F000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_FLUSH_TIMER_MASK 0x00070000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_SAME64B_ONLY_MASK 0x00080000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_FLUSH_TIMER_MASK 0x00700000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_SAME64B_ONLY_MASK 0x00800000L
+//GCEA_PROBE_CNTL
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY__SHIFT 0x0
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE__SHIFT 0x5
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY_MASK 0x0000001FL
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE_MASK 0x00000020L
+//GCEA_PROBE_MAP
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTGL2C__SHIFT 0x0
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTGL2C__SHIFT 0x1
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTGL2C__SHIFT 0x2
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTGL2C__SHIFT 0x3
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTGL2C__SHIFT 0x4
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTGL2C__SHIFT 0x5
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTGL2C__SHIFT 0x6
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTGL2C__SHIFT 0x7
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTGL2C__SHIFT 0x8
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTGL2C__SHIFT 0x9
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTGL2C__SHIFT 0xa
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTGL2C__SHIFT 0xb
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTGL2C__SHIFT 0xc
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTGL2C__SHIFT 0xd
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTGL2C__SHIFT 0xe
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTGL2C__SHIFT 0xf
+#define GCEA_PROBE_MAP__INTLV_SIZE__SHIFT 0x10
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTGL2C_MASK 0x00000001L
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTGL2C_MASK 0x00000002L
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTGL2C_MASK 0x00000004L
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTGL2C_MASK 0x00000008L
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTGL2C_MASK 0x00000010L
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTGL2C_MASK 0x00000020L
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTGL2C_MASK 0x00000040L
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTGL2C_MASK 0x00000080L
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTGL2C_MASK 0x00000100L
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTGL2C_MASK 0x00000200L
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTGL2C_MASK 0x00000400L
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTGL2C_MASK 0x00000800L
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTGL2C_MASK 0x00001000L
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTGL2C_MASK 0x00002000L
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTGL2C_MASK 0x00004000L
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTGL2C_MASK 0x00008000L
+#define GCEA_PROBE_MAP__INTLV_SIZE_MASK 0x00030000L
+//GCEA_ERR_STATUS
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GCEA_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GCEA_ERR_STATUS__IGNORE_RDRSP_FED__SHIFT 0xe
+#define GCEA_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0xf
+#define GCEA_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL__SHIFT 0x10
+#define GCEA_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x11
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GCEA_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+#define GCEA_ERR_STATUS__IGNORE_RDRSP_FED_MASK 0x00004000L
+#define GCEA_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00008000L
+#define GCEA_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL_MASK 0x00010000L
+#define GCEA_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00020000L
+//GCEA_MISC2
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define GCEA_MISC2__BLOCK_REQUESTS__SHIFT 0xd
+#define GCEA_MISC2__REQUESTS_BLOCKED__SHIFT 0xe
+#define GCEA_MISC2__FGCLKEN_OVERRIDE__SHIFT 0xf
+#define GCEA_MISC2__LINKMGR_CRBUSY_MASK__SHIFT 0x10
+#define GCEA_MISC2__RDRET_FED_MASK__SHIFT 0x11
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define GCEA_MISC2__BLOCK_REQUESTS_MASK 0x00002000L
+#define GCEA_MISC2__REQUESTS_BLOCKED_MASK 0x00004000L
+#define GCEA_MISC2__FGCLKEN_OVERRIDE_MASK 0x00008000L
+#define GCEA_MISC2__LINKMGR_CRBUSY_MASK_MASK 0x00010000L
+#define GCEA_MISC2__RDRET_FED_MASK_MASK 0x00020000L
+
+
+// addressBlock: gc_gceadec3
+//GCEA_SDP_BACKDOOR_CMDCREDITS0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_CMDCREDITS1
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS1
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_MISCCREDITS
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED_MASK 0x0000007FL
+//GCEA_RRET_MEM_RESERVE
+#define GCEA_RRET_MEM_RESERVE__VC0__SHIFT 0x0
+#define GCEA_RRET_MEM_RESERVE__VC1__SHIFT 0x4
+#define GCEA_RRET_MEM_RESERVE__VC2__SHIFT 0x8
+#define GCEA_RRET_MEM_RESERVE__VC3__SHIFT 0xc
+#define GCEA_RRET_MEM_RESERVE__VC4__SHIFT 0x10
+#define GCEA_RRET_MEM_RESERVE__VC5__SHIFT 0x14
+#define GCEA_RRET_MEM_RESERVE__VC6__SHIFT 0x18
+#define GCEA_RRET_MEM_RESERVE__VC7__SHIFT 0x1c
+#define GCEA_RRET_MEM_RESERVE__VC0_MASK 0x0000000FL
+#define GCEA_RRET_MEM_RESERVE__VC1_MASK 0x000000F0L
+#define GCEA_RRET_MEM_RESERVE__VC2_MASK 0x00000F00L
+#define GCEA_RRET_MEM_RESERVE__VC3_MASK 0x0000F000L
+#define GCEA_RRET_MEM_RESERVE__VC4_MASK 0x000F0000L
+#define GCEA_RRET_MEM_RESERVE__VC5_MASK 0x00F00000L
+#define GCEA_RRET_MEM_RESERVE__VC6_MASK 0x0F000000L
+#define GCEA_RRET_MEM_RESERVE__VC7_MASK 0xF0000000L
+//GCEA_EDC_CNT3
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT3__MAM_AFMEM_SEC_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_SEC_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_SDP_ENABLE
+#define GCEA_SDP_ENABLE__ENABLE__SHIFT 0x0
+#define GCEA_SDP_ENABLE__EARLY_CREDIT_REQUEST__SHIFT 0x1
+#define GCEA_SDP_ENABLE__ENABLE_MASK 0x00000001L
+#define GCEA_SDP_ENABLE__EARLY_CREDIT_REQUEST_MASK 0x00000002L
+
+
+// addressBlock: gc_spipdec2
+//SPI_PQEV_CTRL
+#define SPI_PQEV_CTRL__SCAN_PERIOD__SHIFT 0x0
+#define SPI_PQEV_CTRL__QUEUE_DURATION__SHIFT 0xa
+#define SPI_PQEV_CTRL__COMPUTE_PIPE_EN__SHIFT 0x10
+#define SPI_PQEV_CTRL__SCAN_PERIOD_MASK 0x000003FFL
+#define SPI_PQEV_CTRL__QUEUE_DURATION_MASK 0x0000FC00L
+#define SPI_PQEV_CTRL__COMPUTE_PIPE_EN_MASK 0x00FF0000L
+//SPI_EXP_THROTTLE_CTRL
+#define SPI_EXP_THROTTLE_CTRL__ENABLE__SHIFT 0x0
+#define SPI_EXP_THROTTLE_CTRL__PERIOD__SHIFT 0x1
+#define SPI_EXP_THROTTLE_CTRL__UPSTEP__SHIFT 0x5
+#define SPI_EXP_THROTTLE_CTRL__DOWNSTEP__SHIFT 0x9
+#define SPI_EXP_THROTTLE_CTRL__LOW_STALL_MON_HIST_COUNT__SHIFT 0xd
+#define SPI_EXP_THROTTLE_CTRL__HIGH_STALL_MON_HIST_COUNT__SHIFT 0x10
+#define SPI_EXP_THROTTLE_CTRL__EXP_STALL_THRESHOLD__SHIFT 0x13
+#define SPI_EXP_THROTTLE_CTRL__SKEW_COUNT__SHIFT 0x1a
+#define SPI_EXP_THROTTLE_CTRL__THROTTLE_RESET__SHIFT 0x1d
+#define SPI_EXP_THROTTLE_CTRL__ENABLE_MASK 0x00000001L
+#define SPI_EXP_THROTTLE_CTRL__PERIOD_MASK 0x0000001EL
+#define SPI_EXP_THROTTLE_CTRL__UPSTEP_MASK 0x000001E0L
+#define SPI_EXP_THROTTLE_CTRL__DOWNSTEP_MASK 0x00001E00L
+#define SPI_EXP_THROTTLE_CTRL__LOW_STALL_MON_HIST_COUNT_MASK 0x0000E000L
+#define SPI_EXP_THROTTLE_CTRL__HIGH_STALL_MON_HIST_COUNT_MASK 0x00070000L
+#define SPI_EXP_THROTTLE_CTRL__EXP_STALL_THRESHOLD_MASK 0x03F80000L
+#define SPI_EXP_THROTTLE_CTRL__SKEW_COUNT_MASK 0x1C000000L
+#define SPI_EXP_THROTTLE_CTRL__THROTTLE_RESET_MASK 0x20000000L
+
+
+// addressBlock: gc_rmi_rmidec
+//RMI_GENERAL_CNTL
+#define RMI_GENERAL_CNTL__BURST_DISABLE__SHIFT 0x0
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE__SHIFT 0x1
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN__SHIFT 0x13
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE__SHIFT 0x15
+#define RMI_GENERAL_CNTL__BURST_DISABLE_MASK 0x00000001L
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE_MASK 0x0001FFFEL
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN_MASK 0x00080000L
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE_MASK 0x01E00000L
+//RMI_GENERAL_CNTL1
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE__SHIFT 0x0
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE__SHIFT 0x4
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE__SHIFT 0x6
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK__SHIFT 0x8
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE__SHIFT 0x9
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE__SHIFT 0xb
+#define RMI_GENERAL_CNTL1__ARBITER_ADDRESS_CHANGE_ENABLE__SHIFT 0xe
+#define RMI_GENERAL_CNTL1__LAST_OF_BURST_INSERTION_DISABLE__SHIFT 0xf
+#define RMI_GENERAL_CNTL1__TCIW0_PRODUCER_CREDITS__SHIFT 0x10
+#define RMI_GENERAL_CNTL1__TCIW1_PRODUCER_CREDITS__SHIFT 0x16
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE_MASK 0x0000000FL
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE_MASK 0x00000030L
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE_MASK 0x000000C0L
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK_MASK 0x00000100L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE_MASK 0x00000600L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_MASK 0x00000800L
+#define RMI_GENERAL_CNTL1__ARBITER_ADDRESS_CHANGE_ENABLE_MASK 0x00004000L
+#define RMI_GENERAL_CNTL1__LAST_OF_BURST_INSERTION_DISABLE_MASK 0x00008000L
+#define RMI_GENERAL_CNTL1__TCIW0_PRODUCER_CREDITS_MASK 0x003F0000L
+#define RMI_GENERAL_CNTL1__TCIW1_PRODUCER_CREDITS_MASK 0x0FC00000L
+//RMI_GENERAL_STATUS
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED__SHIFT 0x0
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR__SHIFT 0x1
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR__SHIFT 0x2
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR__SHIFT 0x3
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR__SHIFT 0x4
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY__SHIFT 0x5
+#define RMI_GENERAL_STATUS__RESERVED_BIT_6__SHIFT 0x6
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY__SHIFT 0x7
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY__SHIFT 0x8
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY__SHIFT 0x9
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY__SHIFT 0xa
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xb
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xc
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY__SHIFT 0xd
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY__SHIFT 0xe
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY__SHIFT 0xf
+#define RMI_GENERAL_STATUS__RESERVED_BIT_18__SHIFT 0x12
+#define RMI_GENERAL_STATUS__RESERVED_BIT_19__SHIFT 0x13
+#define RMI_GENERAL_STATUS__RESERVED_BIT_20__SHIFT 0x14
+#define RMI_GENERAL_STATUS__RESERVED_BITS_28_21__SHIFT 0x15
+#define RMI_GENERAL_STATUS__RESERVED_BIT_29__SHIFT 0x1d
+#define RMI_GENERAL_STATUS__RESERVED_BIT_30__SHIFT 0x1e
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR__SHIFT 0x1f
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED_MASK 0x00000001L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR_MASK 0x00000002L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR_MASK 0x00000004L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR_MASK 0x00000008L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR_MASK 0x00000010L
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY_MASK 0x00000020L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_6_MASK 0x00000040L
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY_MASK 0x00000080L
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY_MASK 0x00000100L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY_MASK 0x00000200L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY_MASK 0x00000400L
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00000800L
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00001000L
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY_MASK 0x00002000L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY_MASK 0x00004000L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY_MASK 0x00008000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_18_MASK 0x00040000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_19_MASK 0x00080000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_20_MASK 0x00100000L
+#define RMI_GENERAL_STATUS__RESERVED_BITS_28_21_MASK 0x1FE00000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_29_MASK 0x20000000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_30_MASK 0x40000000L
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK 0x80000000L
+//RMI_SUBBLOCK_STATUS0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0__SHIFT 0x7
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0__SHIFT 0x8
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1__SHIFT 0x10
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1__SHIFT 0x11
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT__SHIFT 0x12
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0_MASK 0x0000007FL
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0_MASK 0x00000080L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0_MASK 0x00000100L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1_MASK 0x0000FE00L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1_MASK 0x00010000L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1_MASK 0x00020000L
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT_MASK 0x0FFC0000L
+//RMI_SUBBLOCK_STATUS1
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT__SHIFT 0x14
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE_MASK 0x000FFC00L
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT_MASK 0x3FF00000L
+//RMI_SUBBLOCK_STATUS2
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED_MASK 0x000001FFL
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED_MASK 0x0003FE00L
+//RMI_SUBBLOCK_STATUS3
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL_MASK 0x000FFC00L
+//RMI_XBAR_CONFIG
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE__SHIFT 0x0
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE__SHIFT 0x2
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_CONFIG__ARBITER_DIS__SHIFT 0x7
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ__SHIFT 0x8
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE__SHIFT 0xc
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0__SHIFT 0xd
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE_MASK 0x00000003L
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE_MASK 0x0000003CL
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE_MASK 0x00000040L
+#define RMI_XBAR_CONFIG__ARBITER_DIS_MASK 0x00000080L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_MASK 0x00000F00L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE_MASK 0x00001000L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0_MASK 0x00002000L
+//RMI_PROBE_POP_LOGIC_CNTL
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH__SHIFT 0x0
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS__SHIFT 0x7
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2__SHIFT 0x8
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH__SHIFT 0xa
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS__SHIFT 0x11
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH_MASK 0x0000007FL
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS_MASK 0x00000080L
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2_MASK 0x00000300L
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH_MASK 0x0001FC00L
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS_MASK 0x00020000L
+//RMI_UTC_XNACK_N_MISC_CNTL
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC__SHIFT 0x0
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE__SHIFT 0xc
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE__SHIFT 0xd
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC_MASK 0x000000FFL
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE_MASK 0x00000F00L
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE_MASK 0x00001000L
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE_MASK 0x00002000L
+//RMI_DEMUX_CNTL
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_OVERRIDE_EN__SHIFT 0x2
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x6
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE__SHIFT 0xe
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_OVERRIDE_EN__SHIFT 0x12
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x16
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE__SHIFT 0x1e
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_OVERRIDE_EN_MASK 0x00000004L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE_MASK 0x00003FC0L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_MASK 0x0000C000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_OVERRIDE_EN_MASK 0x00040000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE_MASK 0x3FC00000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_MASK 0xC0000000L
+//RMI_UTCL1_CNTL1
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define RMI_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define RMI_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define RMI_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define RMI_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define RMI_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define RMI_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define RMI_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define RMI_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define RMI_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define RMI_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//RMI_UTCL1_CNTL2
+#define RMI_UTCL1_CNTL2__UTC_SPARE__SHIFT 0x0
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define RMI_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define RMI_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE__SHIFT 0x10
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR__SHIFT 0x13
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID__SHIFT 0x15
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ__SHIFT 0x19
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define RMI_UTCL1_CNTL2__PERM_MODE_OVRD__SHIFT 0x1b
+#define RMI_UTCL1_CNTL2__LINE_INVALIDATE_OPT__SHIFT 0x1c
+#define RMI_UTCL1_CNTL2__GPUVM_16K_DEFAULT__SHIFT 0x1d
+#define RMI_UTCL1_CNTL2__FGCG_DISABLE__SHIFT 0x1e
+#define RMI_UTCL1_CNTL2__RESERVED__SHIFT 0x1f
+#define RMI_UTCL1_CNTL2__UTC_SPARE_MASK 0x000000FFL
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define RMI_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define RMI_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE_MASK 0x00030000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR_MASK 0x00080000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID_MASK 0x01E00000L
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ_MASK 0x02000000L
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define RMI_UTCL1_CNTL2__PERM_MODE_OVRD_MASK 0x08000000L
+#define RMI_UTCL1_CNTL2__LINE_INVALIDATE_OPT_MASK 0x10000000L
+#define RMI_UTCL1_CNTL2__GPUVM_16K_DEFAULT_MASK 0x20000000L
+#define RMI_UTCL1_CNTL2__FGCG_DISABLE_MASK 0x40000000L
+#define RMI_UTCL1_CNTL2__RESERVED_MASK 0x80000000L
+//RMI_UTC_UNIT_CONFIG
+#define RMI_UTC_UNIT_CONFIG__TMZ_REQ_EN__SHIFT 0x0
+#define RMI_UTC_UNIT_CONFIG__TMZ_REQ_EN_MASK 0x0000FFFFL
+//RMI_TCIW_FORMATTER0_CNTL
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA_MASK 0x80000000L
+//RMI_TCIW_FORMATTER1_CNTL
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE__SHIFT 0x0
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW__SHIFT 0x1
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE_MASK 0x00000001L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW_MASK 0x000001FEL
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA_MASK 0x80000000L
+//RMI_SCOREBOARD_CNTL
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH__SHIFT 0x0
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0__SHIFT 0x1
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH__SHIFT 0x2
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1__SHIFT 0x3
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN__SHIFT 0x5
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE__SHIFT 0x6
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE__SHIFT 0x9
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH_MASK 0x00000001L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0_MASK 0x00000002L
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH_MASK 0x00000004L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1_MASK 0x00000008L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN_MASK 0x00000020L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE_MASK 0x00000040L
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE_MASK 0x001FFE00L
+//RMI_SCOREBOARD_STATUS0
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG__SHIFT 0x1
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID__SHIFT 0x2
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE__SHIFT 0x12
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE__SHIFT 0x13
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE__SHIFT 0x14
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE__SHIFT 0x15
+#define RMI_SCOREBOARD_STATUS0__COUNTER_SELECT__SHIFT 0x16
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID_MASK 0x00000001L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG_MASK 0x00000002L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID_MASK 0x0003FFFCL
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE_MASK 0x00040000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE_MASK 0x00080000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE_MASK 0x00100000L
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE_MASK 0x00200000L
+#define RMI_SCOREBOARD_STATUS0__COUNTER_SELECT_MASK 0x07C00000L
+//RMI_SCOREBOARD_STATUS1
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED__SHIFT 0xe
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1__SHIFT 0xf
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0_MASK 0x00002000L
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED_MASK 0x00004000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1_MASK 0x07FF8000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0_MASK 0x40000000L
+//RMI_SCOREBOARD_STATUS2
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1__SHIFT 0x19
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1__SHIFT 0x1a
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1__SHIFT 0x1f
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1_MASK 0x01FFE000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1_MASK 0x02000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1_MASK 0x04000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0_MASK 0x40000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1_MASK 0x80000000L
+//RMI_XBAR_ARBITER_CONFIG
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x2
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL__SHIFT 0x3
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x4
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_OVERRIDE_EN__SHIFT 0x5
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE__SHIFT 0x10
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x12
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL__SHIFT 0x13
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x14
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_OVERRIDE_EN__SHIFT 0x15
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x16
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x18
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_MASK 0x00000003L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00000004L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_MASK 0x00000008L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000010L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_OVERRIDE_EN_MASK 0x00000020L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE_MASK 0x000000C0L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE_MASK 0x0000FF00L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_MASK 0x00030000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00040000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_MASK 0x00080000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00100000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_OVERRIDE_EN_MASK 0x00200000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00C00000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE_MASK 0xFF000000L
+//RMI_XBAR_ARBITER_CONFIG_1
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD_MASK 0x000000FFL
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR_MASK 0x0000FF00L
+//RMI_CLOCK_CNTRL
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK__SHIFT 0x0
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK__SHIFT 0x5
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK__SHIFT 0xa
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK__SHIFT 0xf
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK_MASK 0x0000001FL
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK_MASK 0x000003E0L
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK_MASK 0x00007C00L
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK_MASK 0x000F8000L
+//RMI_UTCL1_STATUS
+#define RMI_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RMI_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RMI_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RMI_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RMI_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RMI_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+//RMI_RB_GLX_CID_MAP
+#define RMI_RB_GLX_CID_MAP__CB_COLOR_MAP__SHIFT 0x0
+#define RMI_RB_GLX_CID_MAP__CB_FMASK_MAP__SHIFT 0x4
+#define RMI_RB_GLX_CID_MAP__CB_CMASK_MAP__SHIFT 0x8
+#define RMI_RB_GLX_CID_MAP__CB_DCC_MAP__SHIFT 0xc
+#define RMI_RB_GLX_CID_MAP__DB_Z_MAP__SHIFT 0x10
+#define RMI_RB_GLX_CID_MAP__DB_S_MAP__SHIFT 0x14
+#define RMI_RB_GLX_CID_MAP__DB_TILE_MAP__SHIFT 0x18
+#define RMI_RB_GLX_CID_MAP__DB_ZPCPSD_MAP__SHIFT 0x1c
+#define RMI_RB_GLX_CID_MAP__CB_COLOR_MAP_MASK 0x0000000FL
+#define RMI_RB_GLX_CID_MAP__CB_FMASK_MAP_MASK 0x000000F0L
+#define RMI_RB_GLX_CID_MAP__CB_CMASK_MAP_MASK 0x00000F00L
+#define RMI_RB_GLX_CID_MAP__CB_DCC_MAP_MASK 0x0000F000L
+#define RMI_RB_GLX_CID_MAP__DB_Z_MAP_MASK 0x000F0000L
+#define RMI_RB_GLX_CID_MAP__DB_S_MAP_MASK 0x00F00000L
+#define RMI_RB_GLX_CID_MAP__DB_TILE_MAP_MASK 0x0F000000L
+#define RMI_RB_GLX_CID_MAP__DB_ZPCPSD_MAP_MASK 0xF0000000L
+//RMI_XNACK_DEBUG
+#define RMI_XNACK_DEBUG__XNACK_PER_VMID__SHIFT 0x0
+#define RMI_XNACK_DEBUG__XNACK_PER_VMID_MASK 0x0000FFFFL
+//RMI_SPARE
+#define RMI_SPARE__RMI_2_GL1_128B_READ_DISABLE__SHIFT 0x1
+#define RMI_SPARE__RMI_2_GL1_REPEATER_FGCG_DISABLE__SHIFT 0x2
+#define RMI_SPARE__RMI_2_RB_REPEATER_FGCG_DISABLE__SHIFT 0x3
+#define RMI_SPARE__EARLY_WRITE_ACK_ENABLE_C_RW_NOA_RESOLVE_DIS__SHIFT 0x4
+#define RMI_SPARE__RMI_REORDER_BYPASS_CHANNEL_DIS__SHIFT 0x5
+#define RMI_SPARE__XNACK_RETURN_DATA_OVERRIDE__SHIFT 0x6
+#define RMI_SPARE__SPARE_BIT_7__SHIFT 0x7
+#define RMI_SPARE__NOFILL_RMI_CID_CC__SHIFT 0x8
+#define RMI_SPARE__NOFILL_RMI_CID_FC__SHIFT 0x9
+#define RMI_SPARE__NOFILL_RMI_CID_CM__SHIFT 0xa
+#define RMI_SPARE__NOFILL_RMI_CID_DC__SHIFT 0xb
+#define RMI_SPARE__NOFILL_RMI_CID_Z__SHIFT 0xc
+#define RMI_SPARE__NOFILL_RMI_CID_S__SHIFT 0xd
+#define RMI_SPARE__NOFILL_RMI_CID_TILE__SHIFT 0xe
+#define RMI_SPARE__SPARE_BIT_15_0__SHIFT 0xf
+#define RMI_SPARE__ARBITER_ADDRESS_MASK__SHIFT 0x10
+#define RMI_SPARE__RMI_2_GL1_128B_READ_DISABLE_MASK 0x00000002L
+#define RMI_SPARE__RMI_2_GL1_REPEATER_FGCG_DISABLE_MASK 0x00000004L
+#define RMI_SPARE__RMI_2_RB_REPEATER_FGCG_DISABLE_MASK 0x00000008L
+#define RMI_SPARE__EARLY_WRITE_ACK_ENABLE_C_RW_NOA_RESOLVE_DIS_MASK 0x00000010L
+#define RMI_SPARE__RMI_REORDER_BYPASS_CHANNEL_DIS_MASK 0x00000020L
+#define RMI_SPARE__XNACK_RETURN_DATA_OVERRIDE_MASK 0x00000040L
+#define RMI_SPARE__SPARE_BIT_7_MASK 0x00000080L
+#define RMI_SPARE__NOFILL_RMI_CID_CC_MASK 0x00000100L
+#define RMI_SPARE__NOFILL_RMI_CID_FC_MASK 0x00000200L
+#define RMI_SPARE__NOFILL_RMI_CID_CM_MASK 0x00000400L
+#define RMI_SPARE__NOFILL_RMI_CID_DC_MASK 0x00000800L
+#define RMI_SPARE__NOFILL_RMI_CID_Z_MASK 0x00001000L
+#define RMI_SPARE__NOFILL_RMI_CID_S_MASK 0x00002000L
+#define RMI_SPARE__NOFILL_RMI_CID_TILE_MASK 0x00004000L
+#define RMI_SPARE__SPARE_BIT_15_0_MASK 0x00008000L
+#define RMI_SPARE__ARBITER_ADDRESS_MASK_MASK 0xFFFF0000L
+//RMI_SPARE_1
+#define RMI_SPARE_1__EARLY_WRACK_FIFO_DISABLE__SHIFT 0x0
+#define RMI_SPARE_1__SPARE_BIT_9__SHIFT 0x1
+#define RMI_SPARE_1__SPARE_BIT_10__SHIFT 0x2
+#define RMI_SPARE_1__SPARE_BIT_11__SHIFT 0x3
+#define RMI_SPARE_1__SPARE_BIT_12__SHIFT 0x4
+#define RMI_SPARE_1__SPARE_BIT_13__SHIFT 0x5
+#define RMI_SPARE_1__SPARE_BIT_14__SHIFT 0x6
+#define RMI_SPARE_1__SPARE_BIT_15__SHIFT 0x7
+#define RMI_SPARE_1__RMI_REORDER_DIS_BY_CID__SHIFT 0x8
+#define RMI_SPARE_1__SPARE_BIT_16_1__SHIFT 0x10
+#define RMI_SPARE_1__EARLY_WRACK_FIFO_DISABLE_MASK 0x00000001L
+#define RMI_SPARE_1__SPARE_BIT_9_MASK 0x00000002L
+#define RMI_SPARE_1__SPARE_BIT_10_MASK 0x00000004L
+#define RMI_SPARE_1__SPARE_BIT_11_MASK 0x00000008L
+#define RMI_SPARE_1__SPARE_BIT_12_MASK 0x00000010L
+#define RMI_SPARE_1__SPARE_BIT_13_MASK 0x00000020L
+#define RMI_SPARE_1__SPARE_BIT_14_MASK 0x00000040L
+#define RMI_SPARE_1__SPARE_BIT_15_MASK 0x00000080L
+#define RMI_SPARE_1__RMI_REORDER_DIS_BY_CID_MASK 0x0000FF00L
+#define RMI_SPARE_1__SPARE_BIT_16_1_MASK 0xFFFF0000L
+//RMI_SPARE_2
+#define RMI_SPARE_2__ERROR_ZERO_BYTE_MASK_CID__SHIFT 0x0
+#define RMI_SPARE_2__SPARE_BIT_8_2__SHIFT 0x10
+#define RMI_SPARE_2__SPARE_BIT_8_3__SHIFT 0x18
+#define RMI_SPARE_2__ERROR_ZERO_BYTE_MASK_CID_MASK 0x0000FFFFL
+#define RMI_SPARE_2__SPARE_BIT_8_2_MASK 0x00FF0000L
+#define RMI_SPARE_2__SPARE_BIT_8_3_MASK 0xFF000000L
+//CC_RMI_REDUNDANCY
+#define CC_RMI_REDUNDANCY__WRITE_DIS__SHIFT 0x0
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_0__SHIFT 0x1
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_1__SHIFT 0x2
+#define CC_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE__SHIFT 0x3
+#define CC_RMI_REDUNDANCY__REPAIR_ID_SWAP__SHIFT 0x4
+#define CC_RMI_REDUNDANCY__WRITE_DIS_MASK 0x00000001L
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_0_MASK 0x00000002L
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_1_MASK 0x00000004L
+#define CC_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE_MASK 0x00000008L
+#define CC_RMI_REDUNDANCY__REPAIR_ID_SWAP_MASK 0x00000010L
+
+
+// addressBlock: gc_pmmdec
+//GCR_PIO_CNTL
+#define GCR_PIO_CNTL__GCR_DATA_INDEX__SHIFT 0x0
+#define GCR_PIO_CNTL__GCR_REG_DONE__SHIFT 0x2
+#define GCR_PIO_CNTL__GCR_REG_RESET__SHIFT 0x3
+#define GCR_PIO_CNTL__GCR_PIO_RSP_TAG__SHIFT 0x10
+#define GCR_PIO_CNTL__GCR_PIO_RSP_DONE__SHIFT 0x1e
+#define GCR_PIO_CNTL__GCR_READY__SHIFT 0x1f
+#define GCR_PIO_CNTL__GCR_DATA_INDEX_MASK 0x00000003L
+#define GCR_PIO_CNTL__GCR_REG_DONE_MASK 0x00000004L
+#define GCR_PIO_CNTL__GCR_REG_RESET_MASK 0x00000008L
+#define GCR_PIO_CNTL__GCR_PIO_RSP_TAG_MASK 0x00FF0000L
+#define GCR_PIO_CNTL__GCR_PIO_RSP_DONE_MASK 0x40000000L
+#define GCR_PIO_CNTL__GCR_READY_MASK 0x80000000L
+//GCR_PIO_DATA
+#define GCR_PIO_DATA__GCR_DATA__SHIFT 0x0
+#define GCR_PIO_DATA__GCR_DATA_MASK 0xFFFFFFFFL
+//PMM_CNTL
+#define PMM_CNTL__PMM_DISABLE__SHIFT 0x0
+#define PMM_CNTL__ABIT_FORCE_FLUSH__SHIFT 0x1
+#define PMM_CNTL__ABIT_TIMER_THRESHOLD__SHIFT 0x2
+#define PMM_CNTL__ABIT_TIMER_DISABLE__SHIFT 0x6
+#define PMM_CNTL__ABIT_TIMER_RESET__SHIFT 0x7
+#define PMM_CNTL__INTERRUPT_PRIORITY__SHIFT 0x8
+#define PMM_CNTL__PMM_INTERRUPTS_DISABLE__SHIFT 0xa
+#define PMM_CNTL__RESERVED__SHIFT 0xb
+#define PMM_CNTL__PMM_DISABLE_MASK 0x00000001L
+#define PMM_CNTL__ABIT_FORCE_FLUSH_MASK 0x00000002L
+#define PMM_CNTL__ABIT_TIMER_THRESHOLD_MASK 0x0000003CL
+#define PMM_CNTL__ABIT_TIMER_DISABLE_MASK 0x00000040L
+#define PMM_CNTL__ABIT_TIMER_RESET_MASK 0x00000080L
+#define PMM_CNTL__INTERRUPT_PRIORITY_MASK 0x00000300L
+#define PMM_CNTL__PMM_INTERRUPTS_DISABLE_MASK 0x00000400L
+#define PMM_CNTL__RESERVED_MASK 0xFFFFF800L
+//PMM_STATUS
+#define PMM_STATUS__PMM_IDLE__SHIFT 0x0
+#define PMM_STATUS__ABIT_FORCE_FLUSH_IN_PROGRESS__SHIFT 0x1
+#define PMM_STATUS__ABIT_FORCE_FLUSH_DONE__SHIFT 0x2
+#define PMM_STATUS__ABIT_TIMER_FLUSH_IN_PROGRESS__SHIFT 0x3
+#define PMM_STATUS__ABIT_TIMER_FLUSH_DONE__SHIFT 0x4
+#define PMM_STATUS__ABIT_TIMER_RUNNING__SHIFT 0x5
+#define PMM_STATUS__PMM_INTERRUPTS_PENDING__SHIFT 0x6
+#define PMM_STATUS__ABIT_FLUSH_ERROR__SHIFT 0x7
+#define PMM_STATUS__ABIT_TIMER_RESET_CDC_IN_PROGRESS__SHIFT 0x8
+#define PMM_STATUS__ABIT_TIMER_ENABLE_CDC_IN_PROGRESS__SHIFT 0x9
+#define PMM_STATUS__ABIT_TIMER_THRESHOLD_CDC_IN_PROGRESS__SHIFT 0xa
+#define PMM_STATUS__RESERVED__SHIFT 0xb
+#define PMM_STATUS__PMM_IDLE_MASK 0x00000001L
+#define PMM_STATUS__ABIT_FORCE_FLUSH_IN_PROGRESS_MASK 0x00000002L
+#define PMM_STATUS__ABIT_FORCE_FLUSH_DONE_MASK 0x00000004L
+#define PMM_STATUS__ABIT_TIMER_FLUSH_IN_PROGRESS_MASK 0x00000008L
+#define PMM_STATUS__ABIT_TIMER_FLUSH_DONE_MASK 0x00000010L
+#define PMM_STATUS__ABIT_TIMER_RUNNING_MASK 0x00000020L
+#define PMM_STATUS__PMM_INTERRUPTS_PENDING_MASK 0x00000040L
+#define PMM_STATUS__ABIT_FLUSH_ERROR_MASK 0x00000080L
+#define PMM_STATUS__ABIT_TIMER_RESET_CDC_IN_PROGRESS_MASK 0x00000100L
+#define PMM_STATUS__ABIT_TIMER_ENABLE_CDC_IN_PROGRESS_MASK 0x00000200L
+#define PMM_STATUS__ABIT_TIMER_THRESHOLD_CDC_IN_PROGRESS_MASK 0x00000400L
+#define PMM_STATUS__RESERVED_MASK 0xFFFFF800L
+
+
+// addressBlock: gc_utcl1dec
+//UTCL1_CTRL_1
+#define UTCL1_CTRL_1__UTCL1_CACHE_CORE_BYPASS__SHIFT 0x0
+#define UTCL1_CTRL_1__UTCL1_TCP_BYPASS__SHIFT 0x1
+#define UTCL1_CTRL_1__UTCL1_SQCI_BYPASS__SHIFT 0x2
+#define UTCL1_CTRL_1__UTCL1_SQCD_BYPASS__SHIFT 0x3
+#define UTCL1_CTRL_1__UTCL1_RMI_BYPASS__SHIFT 0x4
+#define UTCL1_CTRL_1__UTCL1_SQG_BYPASS__SHIFT 0x5
+#define UTCL1_CTRL_1__UTCL1_FORCE_RANGE_INV_TO_VMID__SHIFT 0x6
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL__SHIFT 0x7
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL_DONE__SHIFT 0x8
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_1__SHIFT 0x9
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_2__SHIFT 0xb
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_3__SHIFT 0xd
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_4__SHIFT 0xf
+#define UTCL1_CTRL_1__RESERVED__SHIFT 0x11
+#define UTCL1_CTRL_1__UTCL1_CACHE_CORE_BYPASS_MASK 0x00000001L
+#define UTCL1_CTRL_1__UTCL1_TCP_BYPASS_MASK 0x00000002L
+#define UTCL1_CTRL_1__UTCL1_SQCI_BYPASS_MASK 0x00000004L
+#define UTCL1_CTRL_1__UTCL1_SQCD_BYPASS_MASK 0x00000008L
+#define UTCL1_CTRL_1__UTCL1_RMI_BYPASS_MASK 0x00000010L
+#define UTCL1_CTRL_1__UTCL1_SQG_BYPASS_MASK 0x00000020L
+#define UTCL1_CTRL_1__UTCL1_FORCE_RANGE_INV_TO_VMID_MASK 0x00000040L
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL_MASK 0x00000080L
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL_DONE_MASK 0x00000100L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_1_MASK 0x00000600L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_2_MASK 0x00001800L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_3_MASK 0x00006000L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_4_MASK 0x00018000L
+#define UTCL1_CTRL_1__RESERVED_MASK 0xFFFE0000L
+//UTCL1_ALOG
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_THRESHOLD__SHIFT 0x0
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER2_BYPASS__SHIFT 0x3
+#define UTCL1_ALOG__UTCL1_ALOG_ACTIVE__SHIFT 0x4
+#define UTCL1_ALOG__UTCL1_ALOG_MODE__SHIFT 0x5
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_LOCK_WINDOW__SHIFT 0x6
+#define UTCL1_ALOG__UTCL1_ALOG_ONLY_MISS__SHIFT 0x9
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_INTR_THRESHOLD__SHIFT 0xa
+#define UTCL1_ALOG__UTCL1_ALOG_SPACE_EN__SHIFT 0xc
+#define UTCL1_ALOG__UTCL1_ALOG_CLEAN__SHIFT 0xf
+#define UTCL1_ALOG__UTCL1_ALOG_IDLE__SHIFT 0x10
+#define UTCL1_ALOG__UTCL1_ALOG_TRACK_SEGMENT_SIZE__SHIFT 0x11
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_BYPASS__SHIFT 0x17
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_INTR_ON_ALLOC__SHIFT 0x18
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_THRESHOLD_MASK 0x00000007L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER2_BYPASS_MASK 0x00000008L
+#define UTCL1_ALOG__UTCL1_ALOG_ACTIVE_MASK 0x00000010L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE_MASK 0x00000020L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_LOCK_WINDOW_MASK 0x000001C0L
+#define UTCL1_ALOG__UTCL1_ALOG_ONLY_MISS_MASK 0x00000200L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_INTR_THRESHOLD_MASK 0x00000C00L
+#define UTCL1_ALOG__UTCL1_ALOG_SPACE_EN_MASK 0x00007000L
+#define UTCL1_ALOG__UTCL1_ALOG_CLEAN_MASK 0x00008000L
+#define UTCL1_ALOG__UTCL1_ALOG_IDLE_MASK 0x00010000L
+#define UTCL1_ALOG__UTCL1_ALOG_TRACK_SEGMENT_SIZE_MASK 0x007E0000L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_BYPASS_MASK 0x00800000L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_INTR_ON_ALLOC_MASK 0x01000000L
+//UTCL1_STATUS
+#define UTCL1_STATUS__UTCL1_HIT_PATH_BUSY__SHIFT 0x0
+#define UTCL1_STATUS__UTCL1_MH_BUSY__SHIFT 0x1
+#define UTCL1_STATUS__UTCL1_INV_BUSY__SHIFT 0x2
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_REQ__SHIFT 0x3
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_RET__SHIFT 0x4
+#define UTCL1_STATUS__UTCL1_LAST_UTCL2_RET_XNACK__SHIFT 0x5
+#define UTCL1_STATUS__UTCL1_RANGE_INV_IN_PROGRESS__SHIFT 0x7
+#define UTCL1_STATUS__RESERVED__SHIFT 0x8
+#define UTCL1_STATUS__UTCL1_HIT_PATH_BUSY_MASK 0x00000001L
+#define UTCL1_STATUS__UTCL1_MH_BUSY_MASK 0x00000002L
+#define UTCL1_STATUS__UTCL1_INV_BUSY_MASK 0x00000004L
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_REQ_MASK 0x00000008L
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_RET_MASK 0x00000010L
+#define UTCL1_STATUS__UTCL1_LAST_UTCL2_RET_XNACK_MASK 0x00000060L
+#define UTCL1_STATUS__UTCL1_RANGE_INV_IN_PROGRESS_MASK 0x00000080L
+#define UTCL1_STATUS__RESERVED_MASK 0x00000100L
+
+
+// addressBlock: gc_gcvmsharedpfdec
+//GCMC_VM_NB_MMIOBASE
+#define GCMC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define GCMC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//GCMC_VM_NB_MMIOLIMIT
+#define GCMC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define GCMC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//GCMC_VM_NB_PCI_CTRL
+#define GCMC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
+#define GCMC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
+//GCMC_VM_NB_PCI_ARB
+#define GCMC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
+#define GCMC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
+//GCMC_VM_NB_TOP_OF_DRAM_SLOT1
+#define GCMC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
+#define GCMC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
+//GCMC_VM_NB_LOWER_TOP_OF_DRAM2
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//GCMC_VM_NB_UPPER_TOP_OF_DRAM2
+#define GCMC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define GCMC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
+//GCMC_VM_FB_OFFSET
+#define GCMC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define GCMC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
+//GCMC_VM_STEERING
+#define GCMC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
+#define GCMC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
+//GCMC_SHARED_VIRT_RESET_REQ
+#define GCMC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define GCMC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define GCMC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define GCMC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//GCMC_MEM_POWER_LS
+#define GCMC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define GCMC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define GCMC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define GCMC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//GCMC_VM_CACHEABLE_DRAM_ADDRESS_START
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_CACHEABLE_DRAM_ADDRESS_END
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_SYSMEM_ADDRESS_START
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_SYSMEM_ADDRESS_END
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_APT_CNTL
+#define GCMC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
+#define GCMC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
+#define GCMC_VM_APT_CNTL__FRAG_APT_INTXN_MODE__SHIFT 0x2
+#define GCMC_VM_APT_CNTL__CHECK_IS_LOCAL__SHIFT 0x4
+#define GCMC_VM_APT_CNTL__CAP_FRAG_SIZE_2M__SHIFT 0x5
+#define GCMC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL__SHIFT 0x6
+#define GCMC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
+#define GCMC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
+#define GCMC_VM_APT_CNTL__FRAG_APT_INTXN_MODE_MASK 0x0000000CL
+#define GCMC_VM_APT_CNTL__CHECK_IS_LOCAL_MASK 0x00000010L
+#define GCMC_VM_APT_CNTL__CAP_FRAG_SIZE_2M_MASK 0x00000020L
+#define GCMC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL_MASK 0x000000C0L
+//GCMC_VM_LOCAL_FB_ADDRESS_START
+#define GCMC_VM_LOCAL_FB_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_FB_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_FB_ADDRESS_END
+#define GCMC_VM_LOCAL_FB_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_FB_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL
+#define GCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define GCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//GCUTCL2_ICG_CTRL
+#define GCUTCL2_ICG_CTRL__OFF_HYSTERESIS__SHIFT 0x0
+#define GCUTCL2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE__SHIFT 0x4
+#define GCUTCL2_ICG_CTRL__STATIC_CLOCK_OVERRIDE__SHIFT 0x5
+#define GCUTCL2_ICG_CTRL__AON_CLOCK_OVERRIDE__SHIFT 0x6
+#define GCUTCL2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE__SHIFT 0x7
+#define GCUTCL2_ICG_CTRL__OFF_HYSTERESIS_MASK 0x0000000FL
+#define GCUTCL2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE_MASK 0x00000010L
+#define GCUTCL2_ICG_CTRL__STATIC_CLOCK_OVERRIDE_MASK 0x00000020L
+#define GCUTCL2_ICG_CTRL__AON_CLOCK_OVERRIDE_MASK 0x00000040L
+#define GCUTCL2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE_MASK 0x00000080L
+//GCMC_SHARED_ACTIVE_FCN_ID
+#define GCMC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define GCMC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1e
+#define GCMC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define GCMC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x40000000L
+//GCUTCL2_CGTT_BUSY_CTRL
+#define GCUTCL2_CGTT_BUSY_CTRL__READ_DELAY__SHIFT 0x0
+#define GCUTCL2_CGTT_BUSY_CTRL__ALWAYS_BUSY__SHIFT 0x5
+#define GCUTCL2_CGTT_BUSY_CTRL__READ_DELAY_MASK 0x0000001FL
+#define GCUTCL2_CGTT_BUSY_CTRL__ALWAYS_BUSY_MASK 0x00000020L
+//GCMC_VM_FB_NOALLOC_CNTL
+#define GCMC_VM_FB_NOALLOC_CNTL__LOCAL_FB_NOALLOC_NOPTE__SHIFT 0x0
+#define GCMC_VM_FB_NOALLOC_CNTL__REMOTE_FB_NOALLOC_NOPTE__SHIFT 0x1
+#define GCMC_VM_FB_NOALLOC_CNTL__FB_NOALLOC_WALKER_FETCH__SHIFT 0x2
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_ATCL2_NOALLOC__SHIFT 0x3
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE2_NOALLOC__SHIFT 0x4
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE3_NOALLOC__SHIFT 0x5
+#define GCMC_VM_FB_NOALLOC_CNTL__LOCAL_FB_NOALLOC_NOPTE_MASK 0x00000001L
+#define GCMC_VM_FB_NOALLOC_CNTL__REMOTE_FB_NOALLOC_NOPTE_MASK 0x00000002L
+#define GCMC_VM_FB_NOALLOC_CNTL__FB_NOALLOC_WALKER_FETCH_MASK 0x00000004L
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_ATCL2_NOALLOC_MASK 0x00000008L
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE2_NOALLOC_MASK 0x00000010L
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE3_NOALLOC_MASK 0x00000020L
+//GCUTCL2_HARVEST_BYPASS_GROUPS
+#define GCUTCL2_HARVEST_BYPASS_GROUPS__BYPASS_GROUPS__SHIFT 0x0
+#define GCUTCL2_HARVEST_BYPASS_GROUPS__BYPASS_GROUPS_MASK 0xFFFFFFFFL
+//GCUTCL2_GROUP_RET_FAULT_STATUS
+#define GCUTCL2_GROUP_RET_FAULT_STATUS__FAULT_GROUPS__SHIFT 0x0
+#define GCUTCL2_GROUP_RET_FAULT_STATUS__FAULT_GROUPS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gcvml2pfdec
+//GCVM_L2_CNTL
+#define GCVM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
+#define GCVM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
+#define GCVM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
+#define GCVM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
+#define GCVM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
+#define GCVM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
+#define GCVM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
+#define GCVM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
+#define GCVM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
+#define GCVM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
+#define GCVM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
+#define GCVM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
+#define GCVM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define GCVM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define GCVM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
+#define GCVM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define GCVM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define GCVM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define GCVM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define GCVM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define GCVM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define GCVM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define GCVM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
+#define GCVM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
+//GCVM_L2_CNTL2
+#define GCVM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
+#define GCVM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
+#define GCVM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
+#define GCVM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
+#define GCVM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
+#define GCVM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
+#define GCVM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
+#define GCVM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define GCVM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define GCVM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define GCVM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define GCVM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
+#define GCVM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
+#define GCVM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
+//GCVM_L2_CNTL3
+#define GCVM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
+#define GCVM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
+#define GCVM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
+#define GCVM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
+#define GCVM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
+#define GCVM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define GCVM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
+#define GCVM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define GCVM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
+#define GCVM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
+//GCVM_L2_STATUS
+#define GCVM_L2_STATUS__L2_BUSY__SHIFT 0x0
+#define GCVM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
+#define GCVM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
+#define GCVM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
+#define GCVM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
+#define GCVM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
+#define GCVM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
+#define GCVM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define GCVM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
+#define GCVM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
+#define GCVM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
+#define GCVM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
+#define GCVM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
+#define GCVM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
+//GCVM_DUMMY_PAGE_FAULT_CNTL
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
+//GCVM_DUMMY_PAGE_FAULT_ADDR_LO32
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//GCVM_DUMMY_PAGE_FAULT_ADDR_HI32
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
+//GCVM_INVALIDATE_CNTL
+#define GCVM_INVALIDATE_CNTL__PRI_REG_ALTERNATING__SHIFT 0x0
+#define GCVM_INVALIDATE_CNTL__MAX_REG_OUTSTANDING__SHIFT 0x8
+#define GCVM_INVALIDATE_CNTL__PRI_REG_ALTERNATING_MASK 0x000000FFL
+#define GCVM_INVALIDATE_CNTL__MAX_REG_OUTSTANDING_MASK 0x0000FF00L
+//GCVM_L2_PROTECTION_FAULT_CNTL
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
+#define GCVM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
+#define GCVM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
+#define GCVM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define GCVM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
+#define GCVM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
+#define GCVM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
+#define GCVM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
+#define GCVM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
+//GCVM_L2_PROTECTION_FAULT_CNTL2
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
+//GCVM_L2_PROTECTION_FAULT_MM_CNTL3
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_MM_CNTL4
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_STATUS
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
+#define GCVM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
+#define GCVM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
+#define GCVM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PRT__SHIFT 0x1d
+#define GCVM_L2_PROTECTION_FAULT_STATUS__FED__SHIFT 0x1e
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PRT_MASK 0x20000000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__FED_MASK 0x40000000L
+//GCVM_L2_PROTECTION_FAULT_ADDR_LO32
+#define GCVM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_ADDR_HI32
+#define GCVM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
+//GCVM_L2_CNTL4
+#define GCVM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
+#define GCVM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
+#define GCVM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
+#define GCVM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
+#define GCVM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
+#define GCVM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
+#define GCVM_L2_CNTL4__GC_CH_FGCG_OFF__SHIFT 0x1d
+#define GCVM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE__SHIFT 0x1e
+#define GCVM_L2_CNTL4__VFIFO_VISIBLE_BANK_SILOS__SHIFT 0x1f
+#define GCVM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
+#define GCVM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
+#define GCVM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
+#define GCVM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
+#define GCVM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
+#define GCVM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
+#define GCVM_L2_CNTL4__GC_CH_FGCG_OFF_MASK 0x20000000L
+#define GCVM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE_MASK 0x40000000L
+#define GCVM_L2_CNTL4__VFIFO_VISIBLE_BANK_SILOS_MASK 0x80000000L
+//GCVM_L2_MM_GROUP_RT_CLASSES
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
+//GCVM_L2_BANK_SELECT_RESERVED_CID
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_FRAGMENT_SIZE__SHIFT 0x1a
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_FRAGMENT_SIZE_MASK 0x7C000000L
+//GCVM_L2_BANK_SELECT_RESERVED_CID2
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_FRAGMENT_SIZE__SHIFT 0x1a
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_FRAGMENT_SIZE_MASK 0x7C000000L
+//GCVM_L2_CACHE_PARITY_CNTL
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
+//GCVM_L2_ICG_CTRL
+#define GCVM_L2_ICG_CTRL__OFF_HYSTERESIS__SHIFT 0x0
+#define GCVM_L2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE__SHIFT 0x4
+#define GCVM_L2_ICG_CTRL__STATIC_CLOCK_OVERRIDE__SHIFT 0x5
+#define GCVM_L2_ICG_CTRL__AON_CLOCK_OVERRIDE__SHIFT 0x6
+#define GCVM_L2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE__SHIFT 0x7
+#define GCVM_L2_ICG_CTRL__OFF_HYSTERESIS_MASK 0x0000000FL
+#define GCVM_L2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE_MASK 0x00000010L
+#define GCVM_L2_ICG_CTRL__STATIC_CLOCK_OVERRIDE_MASK 0x00000020L
+#define GCVM_L2_ICG_CTRL__AON_CLOCK_OVERRIDE_MASK 0x00000040L
+#define GCVM_L2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE_MASK 0x00000080L
+//GCVM_L2_CNTL5
+#define GCVM_L2_CNTL5__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CNTL5__WALKER_PRIORITY_CLIENT_ID__SHIFT 0x5
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE__SHIFT 0xe
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE__SHIFT 0xf
+#define GCVM_L2_CNTL5__UTCL2_ATC_REQ_FGCG_OFF__SHIFT 0x10
+#define GCVM_L2_CNTL5__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CNTL5__WALKER_PRIORITY_CLIENT_ID_MASK 0x00003FE0L
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE_MASK 0x00004000L
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE_MASK 0x00008000L
+#define GCVM_L2_CNTL5__UTCL2_ATC_REQ_FGCG_OFF_MASK 0x00010000L
+//GCVM_L2_GCR_CNTL
+#define GCVM_L2_GCR_CNTL__GCR_ENABLE__SHIFT 0x0
+#define GCVM_L2_GCR_CNTL__GCR_CLIENT_ID__SHIFT 0x1
+#define GCVM_L2_GCR_CNTL__GCR_ENABLE_MASK 0x00000001L
+#define GCVM_L2_GCR_CNTL__GCR_CLIENT_ID_MASK 0x000003FEL
+//GCVML2_WALKER_MACRO_THROTTLE_TIME
+#define GCVML2_WALKER_MACRO_THROTTLE_TIME__TIME__SHIFT 0x0
+#define GCVML2_WALKER_MACRO_THROTTLE_TIME__TIME_MASK 0x00FFFFFFL
+//GCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT
+#define GCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT__LIMIT__SHIFT 0x1
+#define GCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT__LIMIT_MASK 0x0000FFFEL
+//GCVML2_WALKER_MICRO_THROTTLE_TIME
+#define GCVML2_WALKER_MICRO_THROTTLE_TIME__TIME__SHIFT 0x0
+#define GCVML2_WALKER_MICRO_THROTTLE_TIME__TIME_MASK 0x00FFFFFFL
+//GCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT
+#define GCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT__LIMIT__SHIFT 0x1
+#define GCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT__LIMIT_MASK 0x0000FFFEL
+//GCVM_L2_CGTT_BUSY_CTRL
+#define GCVM_L2_CGTT_BUSY_CTRL__READ_DELAY__SHIFT 0x0
+#define GCVM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY__SHIFT 0x5
+#define GCVM_L2_CGTT_BUSY_CTRL__READ_DELAY_MASK 0x0000001FL
+#define GCVM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY_MASK 0x00000020L
+//GCVM_L2_PTE_CACHE_DUMP_CNTL
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ENABLE__SHIFT 0x0
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__READY__SHIFT 0x1
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__BANK__SHIFT 0x4
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__CACHE__SHIFT 0x8
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ASSOC__SHIFT 0xc
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__INDEX__SHIFT 0x10
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ENABLE_MASK 0x00000001L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__READY_MASK 0x00000002L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__BANK_MASK 0x000000F0L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__CACHE_MASK 0x00000F00L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ASSOC_MASK 0x0000F000L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__INDEX_MASK 0xFFFF0000L
+//GCVM_L2_PTE_CACHE_DUMP_READ
+#define GCVM_L2_PTE_CACHE_DUMP_READ__DATA__SHIFT 0x0
+#define GCVM_L2_PTE_CACHE_DUMP_READ__DATA_MASK 0xFFFFFFFFL
+//GCVM_L2_BANK_SELECT_MASKS
+#define GCVM_L2_BANK_SELECT_MASKS__MASK0__SHIFT 0x0
+#define GCVM_L2_BANK_SELECT_MASKS__MASK1__SHIFT 0x4
+#define GCVM_L2_BANK_SELECT_MASKS__MASK2__SHIFT 0x8
+#define GCVM_L2_BANK_SELECT_MASKS__MASK3__SHIFT 0xc
+#define GCVM_L2_BANK_SELECT_MASKS__MASK0_MASK 0x0000000FL
+#define GCVM_L2_BANK_SELECT_MASKS__MASK1_MASK 0x000000F0L
+#define GCVM_L2_BANK_SELECT_MASKS__MASK2_MASK 0x00000F00L
+#define GCVM_L2_BANK_SELECT_MASKS__MASK3_MASK 0x0000F000L
+//GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__CREDITS__SHIFT 0x0
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__UPDATE__SHIFT 0xa
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__CREDITS_MASK 0x000003FFL
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__UPDATE_MASK 0x00000400L
+//GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__CREDITS__SHIFT 0x0
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__UPDATE__SHIFT 0xa
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__CREDITS_MASK 0x000003FFL
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__UPDATE_MASK 0x00000400L
+//GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__CREDITS__SHIFT 0x0
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__UPDATE__SHIFT 0xa
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__CREDITS_MASK 0x000003FFL
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__UPDATE_MASK 0x00000400L
+//GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__CREDITS__SHIFT 0x0
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__UPDATE__SHIFT 0xa
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__CREDITS_MASK 0x000003FFL
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__UPDATE_MASK 0x00000400L
+//GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__CREDITS__SHIFT 0x0
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__UPDATE__SHIFT 0xa
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__CREDITS_MASK 0x000003FFL
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__UPDATE_MASK 0x00000400L
+
+
+// addressBlock: gc_gcatcl2dec
+//GC_ATC_L2_CNTL
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS__SHIFT 0x8
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS__SHIFT 0xb
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0xe
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0xf
+#define GC_ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x10
+#define GC_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0x13
+#define GC_ATC_L2_CNTL__FRAG_APT_INTXN_MODE__SHIFT 0x14
+#define GC_ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE__SHIFT 0x16
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS_MASK 0x00000300L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS_MASK 0x00001800L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00004000L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00008000L
+#define GC_ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00070000L
+#define GC_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00080000L
+#define GC_ATC_L2_CNTL__FRAG_APT_INTXN_MODE_MASK 0x00300000L
+#define GC_ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE_MASK 0x0FC00000L
+//GC_ATC_L2_CNTL2
+#define GC_ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
+#define GC_ATC_L2_CNTL2__NUM_BANKS_LOG2__SHIFT 0x6
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x9
+#define GC_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xb
+#define GC_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0xc
+#define GC_ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xf
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x12
+#define GC_ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
+#define GC_ATC_L2_CNTL2__NUM_BANKS_LOG2_MASK 0x000001C0L
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x00000600L
+#define GC_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000800L
+#define GC_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00007000L
+#define GC_ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00038000L
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00FC0000L
+//GC_ATC_L2_CACHE_DATA0
+#define GC_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
+#define GC_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
+#define GC_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
+#define GC_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x18
+#define GC_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
+#define GC_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
+#define GC_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x00FFFFFCL
+#define GC_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x0F000000L
+//GC_ATC_L2_CACHE_DATA1
+#define GC_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
+#define GC_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//GC_ATC_L2_CACHE_DATA2
+#define GC_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define GC_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//GC_ATC_L2_CNTL3
+#define GC_ATC_L2_CNTL3__L2_SMALLK_CACHE_FRAGMENT_SIZE__SHIFT 0x0
+#define GC_ATC_L2_CNTL3__L2_MIDK_CACHE_FRAGMENT_SIZE__SHIFT 0x6
+#define GC_ATC_L2_CNTL3__L2_BIGK_CACHE_FRAGMENT_SIZE__SHIFT 0xc
+#define GC_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x12
+#define GC_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x15
+#define GC_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x1b
+#define GC_ATC_L2_CNTL3__REPEATER_FGCG_OFF__SHIFT 0x1e
+#define GC_ATC_L2_CNTL3__L2_SMALLK_CACHE_FRAGMENT_SIZE_MASK 0x0000003FL
+#define GC_ATC_L2_CNTL3__L2_MIDK_CACHE_FRAGMENT_SIZE_MASK 0x00000FC0L
+#define GC_ATC_L2_CNTL3__L2_BIGK_CACHE_FRAGMENT_SIZE_MASK 0x0003F000L
+#define GC_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x001C0000L
+#define GC_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x07E00000L
+#define GC_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x38000000L
+#define GC_ATC_L2_CNTL3__REPEATER_FGCG_OFF_MASK 0x40000000L
+//GC_ATC_L2_STATUS
+#define GC_ATC_L2_STATUS__BUSY__SHIFT 0x0
+#define GC_ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS__SHIFT 0x1
+#define GC_ATC_L2_STATUS__BUSY_MASK 0x00000001L
+#define GC_ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS_MASK 0x00000002L
+//GC_ATC_L2_STATUS2
+#define GC_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
+#define GC_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
+#define GC_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
+#define GC_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
+//GC_ATC_L2_MISC_CG
+#define GC_ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
+#define GC_ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
+#define GC_ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
+#define GC_ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
+#define GC_ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
+#define GC_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+//GC_ATC_L2_MEM_POWER_LS
+#define GC_ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define GC_ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define GC_ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define GC_ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//GC_ATC_L2_SDPPORT_CTRL
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKEN__SHIFT 0x0
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKENRCV__SHIFT 0x1
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKEN__SHIFT 0x2
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x3
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKEN__SHIFT 0x4
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKENRCV__SHIFT 0x5
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKEN__SHIFT 0x6
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKENRCV__SHIFT 0x7
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKEN__SHIFT 0x8
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKENRCV__SHIFT 0x9
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKEN_MASK 0x00000001L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKENRCV_MASK 0x00000002L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKEN_MASK 0x00000004L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKENRCV_MASK 0x00000008L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKEN_MASK 0x00000010L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKENRCV_MASK 0x00000020L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKEN_MASK 0x00000040L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKENRCV_MASK 0x00000080L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKEN_MASK 0x00000100L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKENRCV_MASK 0x00000200L
+
+
+// addressBlock: gc_gcl2tlbpfdec
+//GCL2TLB_TLB0_STATUS
+#define GCL2TLB_TLB0_STATUS__BUSY__SHIFT 0x0
+#define GCL2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define GCL2TLB_TLB0_STATUS__FOUND_APERTURE_FAULTS__SHIFT 0x2
+#define GCL2TLB_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define GCL2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+#define GCL2TLB_TLB0_STATUS__FOUND_APERTURE_FAULTS_MASK 0x00000004L
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR_MASK 0xFFFFFFFFL
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID__SHIFT 0x4
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID__SHIFT 0x8
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF__SHIFT 0xc
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA__SHIFT 0xd
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM__SHIFT 0xf
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM__SHIFT 0x10
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM__SHIFT 0x11
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID__SHIFT 0x12
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ__SHIFT 0x1e
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR_MASK 0x0000000FL
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID_MASK 0x000000F0L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID_MASK 0x00000F00L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF_MASK 0x00001000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA_MASK 0x00006000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM_MASK 0x00008000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM_MASK 0x00010000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM_MASK 0x00020000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID_MASK 0x07FC0000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ_MASK 0x40000000L
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR_MASK 0xFFFFFFFFL
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS__SHIFT 0x4
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE__SHIFT 0x7
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP__SHIFT 0xd
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA__SHIFT 0xe
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO__SHIFT 0xf
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ__SHIFT 0x10
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE__SHIFT 0x11
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE__SHIFT 0x12
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG__SHIFT 0x15
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK__SHIFT 0x16
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC__SHIFT 0x18
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK__SHIFT 0x1f
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR_MASK 0x0000000FL
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS_MASK 0x00000070L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE_MASK 0x00001F80L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP_MASK 0x00002000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA_MASK 0x00004000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO_MASK 0x00008000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ_MASK 0x00010000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE_MASK 0x00020000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE_MASK 0x001C0000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG_MASK 0x00200000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK_MASK 0x00C00000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC_MASK 0x01000000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK_MASK 0x80000000L
+
+
+// addressBlock: gc_gcvmsharedvcdec
+//GCMC_VM_FB_LOCATION_BASE
+#define GCMC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//GCMC_VM_FB_LOCATION_TOP
+#define GCMC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define GCMC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//GCMC_VM_AGP_TOP
+#define GCMC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define GCMC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//GCMC_VM_AGP_BOT
+#define GCMC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define GCMC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//GCMC_VM_AGP_BASE
+#define GCMC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define GCMC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_LOW_ADDR
+#define GCMC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define GCMC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//GCMC_VM_MX_L1_TLB_CNTL
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define GCMC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
+#define GCMC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define GCMC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define GCMC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00003800L
+
+
+// addressBlock: gc_gcvml2vcdec
+//GCVM_CONTEXT0_CNTL
+#define GCVM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT1_CNTL
+#define GCVM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT2_CNTL
+#define GCVM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT3_CNTL
+#define GCVM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT4_CNTL
+#define GCVM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT5_CNTL
+#define GCVM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT6_CNTL
+#define GCVM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT7_CNTL
+#define GCVM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT8_CNTL
+#define GCVM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT9_CNTL
+#define GCVM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT10_CNTL
+#define GCVM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT11_CNTL
+#define GCVM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT12_CNTL
+#define GCVM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT13_CNTL
+#define GCVM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT14_CNTL
+#define GCVM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT15_CNTL
+#define GCVM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXTS_DISABLE
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+//GCVM_INVALIDATE_ENG0_SEM
+#define GCVM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG1_SEM
+#define GCVM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG2_SEM
+#define GCVM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG3_SEM
+#define GCVM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG4_SEM
+#define GCVM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG5_SEM
+#define GCVM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG6_SEM
+#define GCVM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG7_SEM
+#define GCVM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG8_SEM
+#define GCVM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG9_SEM
+#define GCVM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG10_SEM
+#define GCVM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG11_SEM
+#define GCVM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG12_SEM
+#define GCVM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG13_SEM
+#define GCVM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG14_SEM
+#define GCVM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG15_SEM
+#define GCVM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG16_SEM
+#define GCVM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG17_SEM
+#define GCVM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG0_REQ
+#define GCVM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG0_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG0_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG1_REQ
+#define GCVM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG1_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG1_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG2_REQ
+#define GCVM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG2_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG2_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG3_REQ
+#define GCVM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG3_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG3_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG4_REQ
+#define GCVM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG4_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG4_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG5_REQ
+#define GCVM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG5_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG5_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG6_REQ
+#define GCVM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG6_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG6_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG7_REQ
+#define GCVM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG7_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG7_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG8_REQ
+#define GCVM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG8_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG8_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG9_REQ
+#define GCVM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG9_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG9_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG10_REQ
+#define GCVM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG10_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG10_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG11_REQ
+#define GCVM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG11_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG11_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG12_REQ
+#define GCVM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG12_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG12_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG13_REQ
+#define GCVM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG13_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG13_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG14_REQ
+#define GCVM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG14_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG14_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG15_REQ
+#define GCVM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG15_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG15_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG16_REQ
+#define GCVM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG16_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG16_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG17_REQ
+#define GCVM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG17_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG17_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG0_ACK
+#define GCVM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG1_ACK
+#define GCVM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG2_ACK
+#define GCVM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG3_ACK
+#define GCVM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG4_ACK
+#define GCVM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG5_ACK
+#define GCVM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG6_ACK
+#define GCVM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG7_ACK
+#define GCVM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG8_ACK
+#define GCVM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG9_ACK
+#define GCVM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG10_ACK
+#define GCVM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG11_ACK
+#define GCVM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG12_ACK
+#define GCVM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG13_ACK
+#define GCVM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG14_ACK
+#define GCVM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG15_ACK
+#define GCVM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG16_ACK
+#define GCVM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG17_ACK
+#define GCVM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+
+
+// addressBlock: gc_gcvml2perfddec
+//GCVML2_PERFCOUNTER2_0_LO
+#define GCVML2_PERFCOUNTER2_0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCVML2_PERFCOUNTER2_1_LO
+#define GCVML2_PERFCOUNTER2_1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCVML2_PERFCOUNTER2_0_HI
+#define GCVML2_PERFCOUNTER2_0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCVML2_PERFCOUNTER2_1_HI
+#define GCVML2_PERFCOUNTER2_1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gcvml2prdec
+//GCMC_VM_L2_PERFCOUNTER_LO
+#define GCMC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCMC_VM_L2_PERFCOUNTER_HI
+#define GCMC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCMC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCMC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//GCUTCL2_PERFCOUNTER_LO
+#define GCUTCL2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCUTCL2_PERFCOUNTER_HI
+#define GCUTCL2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCUTCL2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCUTCL2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_gcatcl2perfddec
+//GC_ATC_L2_PERFCOUNTER2_LO
+#define GC_ATC_L2_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GC_ATC_L2_PERFCOUNTER2_HI
+#define GC_ATC_L2_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gcatcl2pfcntrdec
+//GC_ATC_L2_PERFCOUNTER_LO
+#define GC_ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GC_ATC_L2_PERFCOUNTER_HI
+#define GC_ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GC_ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GC_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_gcl2tlbprdec
+//GCL2TLB_PERFCOUNTER_LO
+#define GCL2TLB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCL2TLB_PERFCOUNTER_HI
+#define GCL2TLB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCL2TLB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCL2TLB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_gcvml2perfsdec
+//GCVML2_PERFCOUNTER2_0_SELECT
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_1_SELECT
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_0_SELECT1
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_1_SELECT1
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_0_MODE
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+//GCVML2_PERFCOUNTER2_1_MODE
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+
+
+// addressBlock: gc_gcvml2pldec
+//GCMC_VM_L2_PERFCOUNTER0_CFG
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER1_CFG
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER2_CFG
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER3_CFG
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER4_CFG
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER5_CFG
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER6_CFG
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER7_CFG
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//GCUTCL2_PERFCOUNTER0_CFG
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER1_CFG
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER2_CFG
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER3_CFG
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER_RSLT_CNTL
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gcatcl2perfsdec
+//GC_ATC_L2_PERFCOUNTER2_SELECT
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GC_ATC_L2_PERFCOUNTER2_SELECT1
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GC_ATC_L2_PERFCOUNTER2_MODE
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+
+
+// addressBlock: gc_gcatcl2pfcntldec
+//GC_ATC_L2_PERFCOUNTER0_CFG
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GC_ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GC_ATC_L2_PERFCOUNTER1_CFG
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GC_ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GC_ATC_L2_PERFCOUNTER_RSLT_CNTL
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gcl2tlbpldec
+//GCL2TLB_PERFCOUNTER0_CFG
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER1_CFG
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER2_CFG
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER3_CFG
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER_RSLT_CNTL
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gcvml2pspdec
+//GCUTCL2_TRANSLATION_BYPASS_BY_VMID
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__TRANS_BYPASS_VMIDS__SHIFT 0x0
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__GPA_MODE_VMIDS__SHIFT 0x10
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__TRANS_BYPASS_VMIDS_MASK 0x0000FFFFL
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__GPA_MODE_VMIDS_MASK 0xFFFF0000L
+//GCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE
+#define GCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE__GPU_HOST_TRANSLATION_ENABLE__SHIFT 0x0
+#define GCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE__GPU_HOST_TRANSLATION_ENABLE_MASK 0x00000001L
+//GCVM_IOMMU_CONTROL_REGISTER
+#define GCVM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
+#define GCVM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
+//GCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
+#define GCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
+#define GCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
+//GCVM_IOMMU_MMIO_CNTRL_1
+#define GCVM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
+#define GCVM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
+//GCMC_VM_MARC_BASE_LO_0
+#define GCMC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_1
+#define GCMC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_2
+#define GCMC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_3
+#define GCMC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_4
+#define GCMC_VM_MARC_BASE_LO_4__MARC_BASE_LO_4__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_4__MARC_BASE_LO_4_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_5
+#define GCMC_VM_MARC_BASE_LO_5__MARC_BASE_LO_5__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_5__MARC_BASE_LO_5_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_6
+#define GCMC_VM_MARC_BASE_LO_6__MARC_BASE_LO_6__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_6__MARC_BASE_LO_6_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_7
+#define GCMC_VM_MARC_BASE_LO_7__MARC_BASE_LO_7__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_7__MARC_BASE_LO_7_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_8
+#define GCMC_VM_MARC_BASE_LO_8__MARC_BASE_LO_8__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_8__MARC_BASE_LO_8_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_9
+#define GCMC_VM_MARC_BASE_LO_9__MARC_BASE_LO_9__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_9__MARC_BASE_LO_9_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_10
+#define GCMC_VM_MARC_BASE_LO_10__MARC_BASE_LO_10__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_10__MARC_BASE_LO_10_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_11
+#define GCMC_VM_MARC_BASE_LO_11__MARC_BASE_LO_11__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_11__MARC_BASE_LO_11_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_12
+#define GCMC_VM_MARC_BASE_LO_12__MARC_BASE_LO_12__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_12__MARC_BASE_LO_12_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_13
+#define GCMC_VM_MARC_BASE_LO_13__MARC_BASE_LO_13__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_13__MARC_BASE_LO_13_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_14
+#define GCMC_VM_MARC_BASE_LO_14__MARC_BASE_LO_14__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_14__MARC_BASE_LO_14_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_15
+#define GCMC_VM_MARC_BASE_LO_15__MARC_BASE_LO_15__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_15__MARC_BASE_LO_15_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_HI_0
+#define GCMC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_1
+#define GCMC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_2
+#define GCMC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_3
+#define GCMC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_4
+#define GCMC_VM_MARC_BASE_HI_4__MARC_BASE_HI_4__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_4__MARC_BASE_HI_4_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_5
+#define GCMC_VM_MARC_BASE_HI_5__MARC_BASE_HI_5__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_5__MARC_BASE_HI_5_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_6
+#define GCMC_VM_MARC_BASE_HI_6__MARC_BASE_HI_6__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_6__MARC_BASE_HI_6_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_7
+#define GCMC_VM_MARC_BASE_HI_7__MARC_BASE_HI_7__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_7__MARC_BASE_HI_7_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_8
+#define GCMC_VM_MARC_BASE_HI_8__MARC_BASE_HI_8__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_8__MARC_BASE_HI_8_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_9
+#define GCMC_VM_MARC_BASE_HI_9__MARC_BASE_HI_9__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_9__MARC_BASE_HI_9_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_10
+#define GCMC_VM_MARC_BASE_HI_10__MARC_BASE_HI_10__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_10__MARC_BASE_HI_10_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_11
+#define GCMC_VM_MARC_BASE_HI_11__MARC_BASE_HI_11__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_11__MARC_BASE_HI_11_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_12
+#define GCMC_VM_MARC_BASE_HI_12__MARC_BASE_HI_12__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_12__MARC_BASE_HI_12_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_13
+#define GCMC_VM_MARC_BASE_HI_13__MARC_BASE_HI_13__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_13__MARC_BASE_HI_13_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_14
+#define GCMC_VM_MARC_BASE_HI_14__MARC_BASE_HI_14__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_14__MARC_BASE_HI_14_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_15
+#define GCMC_VM_MARC_BASE_HI_15__MARC_BASE_HI_15__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_15__MARC_BASE_HI_15_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_LO_0
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_1
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_2
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_3
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_4
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_ENABLE_4__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_READONLY_4__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_RELOC_LO_4__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_ENABLE_4_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_READONLY_4_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_RELOC_LO_4_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_5
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_ENABLE_5__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_READONLY_5__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_RELOC_LO_5__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_ENABLE_5_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_READONLY_5_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_RELOC_LO_5_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_6
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_ENABLE_6__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_READONLY_6__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_RELOC_LO_6__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_ENABLE_6_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_READONLY_6_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_RELOC_LO_6_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_7
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_ENABLE_7__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_READONLY_7__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_RELOC_LO_7__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_ENABLE_7_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_READONLY_7_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_RELOC_LO_7_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_8
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_ENABLE_8__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_READONLY_8__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_RELOC_LO_8__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_ENABLE_8_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_READONLY_8_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_RELOC_LO_8_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_9
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_ENABLE_9__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_READONLY_9__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_RELOC_LO_9__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_ENABLE_9_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_READONLY_9_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_RELOC_LO_9_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_10
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_ENABLE_10__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_READONLY_10__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_RELOC_LO_10__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_ENABLE_10_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_READONLY_10_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_RELOC_LO_10_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_11
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_ENABLE_11__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_READONLY_11__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_RELOC_LO_11__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_ENABLE_11_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_READONLY_11_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_RELOC_LO_11_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_12
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_ENABLE_12__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_READONLY_12__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_RELOC_LO_12__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_ENABLE_12_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_READONLY_12_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_RELOC_LO_12_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_13
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_ENABLE_13__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_READONLY_13__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_RELOC_LO_13__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_ENABLE_13_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_READONLY_13_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_RELOC_LO_13_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_14
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_ENABLE_14__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_READONLY_14__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_RELOC_LO_14__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_ENABLE_14_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_READONLY_14_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_RELOC_LO_14_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_15
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_ENABLE_15__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_READONLY_15__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_RELOC_LO_15__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_ENABLE_15_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_READONLY_15_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_RELOC_LO_15_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_HI_0
+#define GCMC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_1
+#define GCMC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_2
+#define GCMC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_3
+#define GCMC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_4
+#define GCMC_VM_MARC_RELOC_HI_4__MARC_RELOC_HI_4__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_4__MARC_RELOC_HI_4_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_5
+#define GCMC_VM_MARC_RELOC_HI_5__MARC_RELOC_HI_5__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_5__MARC_RELOC_HI_5_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_6
+#define GCMC_VM_MARC_RELOC_HI_6__MARC_RELOC_HI_6__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_6__MARC_RELOC_HI_6_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_7
+#define GCMC_VM_MARC_RELOC_HI_7__MARC_RELOC_HI_7__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_7__MARC_RELOC_HI_7_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_8
+#define GCMC_VM_MARC_RELOC_HI_8__MARC_RELOC_HI_8__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_8__MARC_RELOC_HI_8_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_9
+#define GCMC_VM_MARC_RELOC_HI_9__MARC_RELOC_HI_9__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_9__MARC_RELOC_HI_9_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_10
+#define GCMC_VM_MARC_RELOC_HI_10__MARC_RELOC_HI_10__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_10__MARC_RELOC_HI_10_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_11
+#define GCMC_VM_MARC_RELOC_HI_11__MARC_RELOC_HI_11__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_11__MARC_RELOC_HI_11_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_12
+#define GCMC_VM_MARC_RELOC_HI_12__MARC_RELOC_HI_12__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_12__MARC_RELOC_HI_12_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_13
+#define GCMC_VM_MARC_RELOC_HI_13__MARC_RELOC_HI_13__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_13__MARC_RELOC_HI_13_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_14
+#define GCMC_VM_MARC_RELOC_HI_14__MARC_RELOC_HI_14__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_14__MARC_RELOC_HI_14_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_15
+#define GCMC_VM_MARC_RELOC_HI_15__MARC_RELOC_HI_15__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_15__MARC_RELOC_HI_15_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_LO_0
+#define GCMC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_1
+#define GCMC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_2
+#define GCMC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_3
+#define GCMC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_4
+#define GCMC_VM_MARC_LEN_LO_4__MARC_LEN_LO_4__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_4__MARC_LEN_LO_4_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_5
+#define GCMC_VM_MARC_LEN_LO_5__MARC_LEN_LO_5__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_5__MARC_LEN_LO_5_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_6
+#define GCMC_VM_MARC_LEN_LO_6__MARC_LEN_LO_6__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_6__MARC_LEN_LO_6_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_7
+#define GCMC_VM_MARC_LEN_LO_7__MARC_LEN_LO_7__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_7__MARC_LEN_LO_7_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_8
+#define GCMC_VM_MARC_LEN_LO_8__MARC_LEN_LO_8__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_8__MARC_LEN_LO_8_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_9
+#define GCMC_VM_MARC_LEN_LO_9__MARC_LEN_LO_9__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_9__MARC_LEN_LO_9_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_10
+#define GCMC_VM_MARC_LEN_LO_10__MARC_LEN_LO_10__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_10__MARC_LEN_LO_10_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_11
+#define GCMC_VM_MARC_LEN_LO_11__MARC_LEN_LO_11__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_11__MARC_LEN_LO_11_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_12
+#define GCMC_VM_MARC_LEN_LO_12__MARC_LEN_LO_12__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_12__MARC_LEN_LO_12_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_13
+#define GCMC_VM_MARC_LEN_LO_13__MARC_LEN_LO_13__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_13__MARC_LEN_LO_13_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_14
+#define GCMC_VM_MARC_LEN_LO_14__MARC_LEN_LO_14__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_14__MARC_LEN_LO_14_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_15
+#define GCMC_VM_MARC_LEN_LO_15__MARC_LEN_LO_15__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_15__MARC_LEN_LO_15_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_HI_0
+#define GCMC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_1
+#define GCMC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_2
+#define GCMC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_3
+#define GCMC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_4
+#define GCMC_VM_MARC_LEN_HI_4__MARC_LEN_HI_4__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_4__MARC_LEN_HI_4_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_5
+#define GCMC_VM_MARC_LEN_HI_5__MARC_LEN_HI_5__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_5__MARC_LEN_HI_5_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_6
+#define GCMC_VM_MARC_LEN_HI_6__MARC_LEN_HI_6__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_6__MARC_LEN_HI_6_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_7
+#define GCMC_VM_MARC_LEN_HI_7__MARC_LEN_HI_7__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_7__MARC_LEN_HI_7_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_8
+#define GCMC_VM_MARC_LEN_HI_8__MARC_LEN_HI_8__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_8__MARC_LEN_HI_8_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_9
+#define GCMC_VM_MARC_LEN_HI_9__MARC_LEN_HI_9__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_9__MARC_LEN_HI_9_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_10
+#define GCMC_VM_MARC_LEN_HI_10__MARC_LEN_HI_10__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_10__MARC_LEN_HI_10_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_11
+#define GCMC_VM_MARC_LEN_HI_11__MARC_LEN_HI_11__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_11__MARC_LEN_HI_11_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_12
+#define GCMC_VM_MARC_LEN_HI_12__MARC_LEN_HI_12__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_12__MARC_LEN_HI_12_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_13
+#define GCMC_VM_MARC_LEN_HI_13__MARC_LEN_HI_13__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_13__MARC_LEN_HI_13_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_14
+#define GCMC_VM_MARC_LEN_HI_14__MARC_LEN_HI_14__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_14__MARC_LEN_HI_14_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_15
+#define GCMC_VM_MARC_LEN_HI_15__MARC_LEN_HI_15__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_15__MARC_LEN_HI_15_MASK 0x000FFFFFL
+//GCMC_VM_MARC_PFVF_MAPPING_0
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_1
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_2
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_3
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_4
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_5
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_6
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_7
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_8
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_9
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_10
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_11
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_12
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_13
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_14
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_15
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_PF_MASK 0x00010000L
+//GCUTC_TRANSLATION_FAULT_CNTL0
+#define GCUTC_TRANSLATION_FAULT_CNTL0__DEFAULT_PHYSICAL_PAGE_ADDRESS_LSB__SHIFT 0x0
+#define GCUTC_TRANSLATION_FAULT_CNTL0__DEFAULT_PHYSICAL_PAGE_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//GCUTC_TRANSLATION_FAULT_CNTL1
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_PHYSICAL_PAGE_ADDRESS_MSB__SHIFT 0x0
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_IO__SHIFT 0x4
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SPA__SHIFT 0x5
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SNOOP__SHIFT 0x6
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_PHYSICAL_PAGE_ADDRESS_MSB_MASK 0x0000000FL
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_IO_MASK 0x00000010L
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SPA_MASK 0x00000020L
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SNOOP_MASK 0x00000040L
+
+
+// addressBlock: gc_gcl2tlbpspdec
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL__ENABLE__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL__ENABLE_MASK 0x00000001L
+
+
+// addressBlock: gc_shdec
+//SPI_SHADER_PGM_RSRC4_PS
+#define SPI_SHADER_PGM_RSRC4_PS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_PS__INST_PREF_SIZE__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_START__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_END__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC4_PS__IMAGE_OP__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC4_PS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC4_PS__INST_PREF_SIZE_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_START_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_END_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC4_PS__IMAGE_OP_MASK 0x80000000L
+//SPI_SHADER_PGM_CHKSUM_PS
+#define SPI_SHADER_PGM_CHKSUM_PS__CHECKSUM__SHIFT 0x0
+#define SPI_SHADER_PGM_CHKSUM_PS__CHECKSUM_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC3_PS
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_PS__LDS_GROUP_SIZE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_PS__LDS_GROUP_SIZE_MASK 0x00C00000L
+//SPI_SHADER_PGM_LO_PS
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_PS
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_PS
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_PS__MEM_ORDERED__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC1_PS__FWD_PROGRESS__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_PS__LOAD_PROVOKING_VTX__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_PS__MEM_ORDERED_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC1_PS__FWD_PROGRESS_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_PS__LOAD_PROVOKING_VTX_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL_MASK 0x20000000L
+//SPI_SHADER_PGM_RSRC2_PS
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_PS__SHARED_VGPR_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0x0000FF00L
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x01FF0000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_PS__SHARED_VGPR_CNT_MASK 0xF0000000L
+//SPI_SHADER_USER_DATA_PS_0
+#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_1
+#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_2
+#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_3
+#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_4
+#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_5
+#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_6
+#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_7
+#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_8
+#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_9
+#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_10
+#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_11
+#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_12
+#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_13
+#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_14
+#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_15
+#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_16
+#define SPI_SHADER_USER_DATA_PS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_17
+#define SPI_SHADER_USER_DATA_PS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_18
+#define SPI_SHADER_USER_DATA_PS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_19
+#define SPI_SHADER_USER_DATA_PS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_20
+#define SPI_SHADER_USER_DATA_PS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_21
+#define SPI_SHADER_USER_DATA_PS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_22
+#define SPI_SHADER_USER_DATA_PS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_23
+#define SPI_SHADER_USER_DATA_PS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_24
+#define SPI_SHADER_USER_DATA_PS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_25
+#define SPI_SHADER_USER_DATA_PS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_26
+#define SPI_SHADER_USER_DATA_PS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_27
+#define SPI_SHADER_USER_DATA_PS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_28
+#define SPI_SHADER_USER_DATA_PS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_29
+#define SPI_SHADER_USER_DATA_PS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_30
+#define SPI_SHADER_USER_DATA_PS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_31
+#define SPI_SHADER_USER_DATA_PS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_REQ_CTRL_PS
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_EN__SHIFT 0x0
+#define SPI_SHADER_REQ_CTRL_PS__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define SPI_SHADER_REQ_CTRL_PS__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define SPI_SHADER_REQ_CTRL_PS__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define SPI_SHADER_REQ_CTRL_PS__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_EN_MASK 0x00000001L
+#define SPI_SHADER_REQ_CTRL_PS__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define SPI_SHADER_REQ_CTRL_PS__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define SPI_SHADER_REQ_CTRL_PS__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define SPI_SHADER_REQ_CTRL_PS__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+//SPI_SHADER_USER_ACCUM_PS_0
+#define SPI_SHADER_USER_ACCUM_PS_0__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_0__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_PS_1
+#define SPI_SHADER_USER_ACCUM_PS_1__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_1__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_PS_2
+#define SPI_SHADER_USER_ACCUM_PS_2__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_2__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_PS_3
+#define SPI_SHADER_USER_ACCUM_PS_3__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_3__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_PGM_CHKSUM_GS
+#define SPI_SHADER_PGM_CHKSUM_GS__CHECKSUM__SHIFT 0x0
+#define SPI_SHADER_PGM_CHKSUM_GS__CHECKSUM_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC4_GS
+#define SPI_SHADER_PGM_RSRC4_GS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_GS__RESERVED__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC4_GS__PH_THROTTLE_EN__SHIFT 0xe
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_THROTTLE_EN__SHIFT 0xf
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC4_GS__INST_PREF_SIZE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_START__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_END__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC4_GS__IMAGE_OP__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC4_GS__CU_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC4_GS__RESERVED_MASK 0x00003FFEL
+#define SPI_SHADER_PGM_RSRC4_GS__PH_THROTTLE_EN_MASK 0x00004000L
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_THROTTLE_EN_MASK 0x00008000L
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS_MASK 0x007F0000L
+#define SPI_SHADER_PGM_RSRC4_GS__INST_PREF_SIZE_MASK 0x1F800000L
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_START_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_END_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC4_GS__IMAGE_OP_MASK 0x80000000L
+//SPI_SHADER_USER_DATA_ADDR_LO_GS
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_GS
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_ES_GS
+#define SPI_SHADER_PGM_LO_ES_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_ES_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_ES_GS
+#define SPI_SHADER_PGM_HI_ES_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_ES_GS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_GS
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_GS__GROUP_FIFO_DEPTH__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
+#define SPI_SHADER_PGM_RSRC3_GS__GROUP_FIFO_DEPTH_MASK 0xFC000000L
+//SPI_SHADER_PGM_LO_GS
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_GS
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC1_GS
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_GS__MEM_ORDERED__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC1_GS__FWD_PROGRESS__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_GS__WGP_MODE__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_GS__MEM_ORDERED_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC1_GS__FWD_PROGRESS_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_GS__WGP_MODE_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT_MASK 0x60000000L
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL_MASK 0x80000000L
+//SPI_SHADER_PGM_RSRC2_GS
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE__SHIFT 0x13
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_GS__SHARED_VGPR_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0x0000FF80L
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT_MASK 0x00030000L
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN_MASK 0x00040000L
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE_MASK 0x07F80000L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_GS__SHARED_VGPR_CNT_MASK 0xF0000000L
+//SPI_SHADER_USER_DATA_GS_0
+#define SPI_SHADER_USER_DATA_GS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_1
+#define SPI_SHADER_USER_DATA_GS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_2
+#define SPI_SHADER_USER_DATA_GS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_3
+#define SPI_SHADER_USER_DATA_GS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_4
+#define SPI_SHADER_USER_DATA_GS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_5
+#define SPI_SHADER_USER_DATA_GS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_6
+#define SPI_SHADER_USER_DATA_GS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_7
+#define SPI_SHADER_USER_DATA_GS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_8
+#define SPI_SHADER_USER_DATA_GS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_9
+#define SPI_SHADER_USER_DATA_GS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_10
+#define SPI_SHADER_USER_DATA_GS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_11
+#define SPI_SHADER_USER_DATA_GS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_12
+#define SPI_SHADER_USER_DATA_GS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_13
+#define SPI_SHADER_USER_DATA_GS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_14
+#define SPI_SHADER_USER_DATA_GS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_15
+#define SPI_SHADER_USER_DATA_GS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_16
+#define SPI_SHADER_USER_DATA_GS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_17
+#define SPI_SHADER_USER_DATA_GS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_18
+#define SPI_SHADER_USER_DATA_GS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_19
+#define SPI_SHADER_USER_DATA_GS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_20
+#define SPI_SHADER_USER_DATA_GS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_21
+#define SPI_SHADER_USER_DATA_GS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_22
+#define SPI_SHADER_USER_DATA_GS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_23
+#define SPI_SHADER_USER_DATA_GS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_24
+#define SPI_SHADER_USER_DATA_GS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_25
+#define SPI_SHADER_USER_DATA_GS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_26
+#define SPI_SHADER_USER_DATA_GS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_27
+#define SPI_SHADER_USER_DATA_GS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_28
+#define SPI_SHADER_USER_DATA_GS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_29
+#define SPI_SHADER_USER_DATA_GS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_30
+#define SPI_SHADER_USER_DATA_GS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_31
+#define SPI_SHADER_USER_DATA_GS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_GS_MESHLET_DIM
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_X__SHIFT 0x0
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Y__SHIFT 0x8
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Z__SHIFT 0x10
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_THREADGROUP_SIZE__SHIFT 0x18
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_X_MASK 0x000000FFL
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Y_MASK 0x0000FF00L
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Z_MASK 0x00FF0000L
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_THREADGROUP_SIZE_MASK 0xFF000000L
+//SPI_SHADER_GS_MESHLET_EXP_ALLOC
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_VERTS__SHIFT 0x0
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_PRIMS__SHIFT 0x9
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_VERTS_MASK 0x000001FFL
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_PRIMS_MASK 0x0003FE00L
+//SPI_SHADER_REQ_CTRL_ESGS
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_EN__SHIFT 0x0
+#define SPI_SHADER_REQ_CTRL_ESGS__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define SPI_SHADER_REQ_CTRL_ESGS__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define SPI_SHADER_REQ_CTRL_ESGS__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define SPI_SHADER_REQ_CTRL_ESGS__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_EN_MASK 0x00000001L
+#define SPI_SHADER_REQ_CTRL_ESGS__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define SPI_SHADER_REQ_CTRL_ESGS__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define SPI_SHADER_REQ_CTRL_ESGS__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define SPI_SHADER_REQ_CTRL_ESGS__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+//SPI_SHADER_USER_ACCUM_ESGS_0
+#define SPI_SHADER_USER_ACCUM_ESGS_0__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_0__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_ESGS_1
+#define SPI_SHADER_USER_ACCUM_ESGS_1__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_1__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_ESGS_2
+#define SPI_SHADER_USER_ACCUM_ESGS_2__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_2__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_ESGS_3
+#define SPI_SHADER_USER_ACCUM_ESGS_3__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_3__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_PGM_LO_ES
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_ES
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_CHKSUM_HS
+#define SPI_SHADER_PGM_CHKSUM_HS__CHECKSUM__SHIFT 0x0
+#define SPI_SHADER_PGM_CHKSUM_HS__CHECKSUM_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC4_HS
+#define SPI_SHADER_PGM_RSRC4_HS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_HS__INST_PREF_SIZE__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_START__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_END__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC4_HS__IMAGE_OP__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC4_HS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC4_HS__INST_PREF_SIZE_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_START_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_END_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC4_HS__IMAGE_OP_MASK 0x80000000L
+//SPI_SHADER_USER_DATA_ADDR_LO_HS
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_HS
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_LS_HS
+#define SPI_SHADER_PGM_LO_LS_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_LS_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_LS_HS
+#define SPI_SHADER_PGM_HI_LS_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_LS_HS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_HS
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC3_HS__GROUP_FIFO_DEPTH__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC3_HS__GROUP_FIFO_DEPTH_MASK 0x0000FC00L
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN_MASK 0xFFFF0000L
+//SPI_SHADER_PGM_LO_HS
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_HS
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC1_HS
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_HS__MEM_ORDERED__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_HS__FWD_PROGRESS__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC1_HS__WGP_MODE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_HS__MEM_ORDERED_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_HS__FWD_PROGRESS_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC1_HS__WGP_MODE_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT_MASK 0x30000000L
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL_MASK 0x40000000L
+//SPI_SHADER_PGM_RSRC2_HS
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x9
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_HS__SHARED_VGPR_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN_MASK 0x00000100L
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x0003FE00L
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE_MASK 0x07FC0000L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_HS__SHARED_VGPR_CNT_MASK 0xF0000000L
+//SPI_SHADER_USER_DATA_HS_0
+#define SPI_SHADER_USER_DATA_HS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_1
+#define SPI_SHADER_USER_DATA_HS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_2
+#define SPI_SHADER_USER_DATA_HS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_3
+#define SPI_SHADER_USER_DATA_HS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_4
+#define SPI_SHADER_USER_DATA_HS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_5
+#define SPI_SHADER_USER_DATA_HS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_6
+#define SPI_SHADER_USER_DATA_HS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_7
+#define SPI_SHADER_USER_DATA_HS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_8
+#define SPI_SHADER_USER_DATA_HS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_9
+#define SPI_SHADER_USER_DATA_HS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_10
+#define SPI_SHADER_USER_DATA_HS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_11
+#define SPI_SHADER_USER_DATA_HS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_12
+#define SPI_SHADER_USER_DATA_HS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_13
+#define SPI_SHADER_USER_DATA_HS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_14
+#define SPI_SHADER_USER_DATA_HS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_15
+#define SPI_SHADER_USER_DATA_HS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_16
+#define SPI_SHADER_USER_DATA_HS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_17
+#define SPI_SHADER_USER_DATA_HS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_18
+#define SPI_SHADER_USER_DATA_HS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_19
+#define SPI_SHADER_USER_DATA_HS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_20
+#define SPI_SHADER_USER_DATA_HS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_21
+#define SPI_SHADER_USER_DATA_HS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_22
+#define SPI_SHADER_USER_DATA_HS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_23
+#define SPI_SHADER_USER_DATA_HS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_24
+#define SPI_SHADER_USER_DATA_HS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_25
+#define SPI_SHADER_USER_DATA_HS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_26
+#define SPI_SHADER_USER_DATA_HS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_27
+#define SPI_SHADER_USER_DATA_HS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_28
+#define SPI_SHADER_USER_DATA_HS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_29
+#define SPI_SHADER_USER_DATA_HS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_30
+#define SPI_SHADER_USER_DATA_HS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_31
+#define SPI_SHADER_USER_DATA_HS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_REQ_CTRL_LSHS
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_EN__SHIFT 0x0
+#define SPI_SHADER_REQ_CTRL_LSHS__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define SPI_SHADER_REQ_CTRL_LSHS__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define SPI_SHADER_REQ_CTRL_LSHS__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define SPI_SHADER_REQ_CTRL_LSHS__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_EN_MASK 0x00000001L
+#define SPI_SHADER_REQ_CTRL_LSHS__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define SPI_SHADER_REQ_CTRL_LSHS__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define SPI_SHADER_REQ_CTRL_LSHS__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define SPI_SHADER_REQ_CTRL_LSHS__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+//SPI_SHADER_USER_ACCUM_LSHS_0
+#define SPI_SHADER_USER_ACCUM_LSHS_0__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_0__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_LSHS_1
+#define SPI_SHADER_USER_ACCUM_LSHS_1__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_1__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_LSHS_2
+#define SPI_SHADER_USER_ACCUM_LSHS_2__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_2__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_LSHS_3
+#define SPI_SHADER_USER_ACCUM_LSHS_3__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_3__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_PGM_LO_LS
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_LS
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0xFFL
+//COMPUTE_DISPATCH_INITIATOR
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x0
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x1
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x2
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x3
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x4
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x5
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x6
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0xa
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0xb
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED__SHIFT 0xc
+#define COMPUTE_DISPATCH_INITIATOR__TUNNEL_ENABLE__SHIFT 0xd
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0xe
+#define COMPUTE_DISPATCH_INITIATOR__CS_W32_EN__SHIFT 0xf
+#define COMPUTE_DISPATCH_INITIATOR__AMP_SHADER_EN__SHIFT 0x10
+#define COMPUTE_DISPATCH_INITIATOR__DISABLE_DISP_PREMPT_EN__SHIFT 0x11
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x00000001L
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x00000002L
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x00000004L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x00000008L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x00000010L
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x00000020L
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x00000040L
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x00000400L
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x00000800L
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED_MASK 0x00001000L
+#define COMPUTE_DISPATCH_INITIATOR__TUNNEL_ENABLE_MASK 0x00002000L
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x00004000L
+#define COMPUTE_DISPATCH_INITIATOR__CS_W32_EN_MASK 0x00008000L
+#define COMPUTE_DISPATCH_INITIATOR__AMP_SHADER_EN_MASK 0x00010000L
+#define COMPUTE_DISPATCH_INITIATOR__DISABLE_DISP_PREMPT_EN_MASK 0x00020000L
+//COMPUTE_DIM_X
+#define COMPUTE_DIM_X__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_X__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Y
+#define COMPUTE_DIM_Y__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Y__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Z
+#define COMPUTE_DIM_Z__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Z__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_START_X
+#define COMPUTE_START_X__START__SHIFT 0x0
+#define COMPUTE_START_X__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Y
+#define COMPUTE_START_Y__START__SHIFT 0x0
+#define COMPUTE_START_Y__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Z
+#define COMPUTE_START_Z__START__SHIFT 0x0
+#define COMPUTE_START_Z__START_MASK 0xFFFFFFFFL
+//COMPUTE_NUM_THREAD_X
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Y
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Z
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_PIPELINESTAT_ENABLE
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE__SHIFT 0x0
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE_MASK 0x00000001L
+//COMPUTE_PERFCOUNT_ENABLE
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE__SHIFT 0x0
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE_MASK 0x00000001L
+//COMPUTE_PGM_LO
+#define COMPUTE_PGM_LO__DATA__SHIFT 0x0
+#define COMPUTE_PGM_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_PGM_HI
+#define COMPUTE_PGM_HI__DATA__SHIFT 0x0
+#define COMPUTE_PGM_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_PKT_ADDR_LO
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_PKT_ADDR_HI
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_LO
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_HI
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
+//COMPUTE_PGM_RSRC1
+#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x0
+#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x6
+#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0xa
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0xc
+#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x14
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x15
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE__SHIFT 0x16
+#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x17
+#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x18
+#define COMPUTE_PGM_RSRC1__CDBG_USER__SHIFT 0x19
+#define COMPUTE_PGM_RSRC1__FP16_OVFL__SHIFT 0x1a
+#define COMPUTE_PGM_RSRC1__WGP_MODE__SHIFT 0x1d
+#define COMPUTE_PGM_RSRC1__MEM_ORDERED__SHIFT 0x1e
+#define COMPUTE_PGM_RSRC1__FWD_PROGRESS__SHIFT 0x1f
+#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x0000003FL
+#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x000003C0L
+#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0x00000C00L
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0x000FF000L
+#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x00100000L
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x00200000L
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE_MASK 0x00400000L
+#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x00800000L
+#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x01000000L
+#define COMPUTE_PGM_RSRC1__CDBG_USER_MASK 0x02000000L
+#define COMPUTE_PGM_RSRC1__FP16_OVFL_MASK 0x04000000L
+#define COMPUTE_PGM_RSRC1__WGP_MODE_MASK 0x20000000L
+#define COMPUTE_PGM_RSRC1__MEM_ORDERED_MASK 0x40000000L
+#define COMPUTE_PGM_RSRC1__FWD_PROGRESS_MASK 0x80000000L
+//COMPUTE_PGM_RSRC2
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x0
+#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x1
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x6
+#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x7
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x8
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x9
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0xa
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0xb
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0xd
+#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0xf
+#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x18
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x00000001L
+#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x0000003EL
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x00000040L
+#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x00000080L
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x00000100L
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x00000200L
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x00001800L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x00006000L
+#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0x00FF8000L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7F000000L
+//COMPUTE_VMID
+#define COMPUTE_VMID__DATA__SHIFT 0x0
+#define COMPUTE_VMID__DATA_MASK 0x0000000FL
+//COMPUTE_RESOURCE_LIMITS
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x0
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0xc
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x10
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x16
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x17
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x18
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x000003FFL
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0x0000F000L
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x003F0000L
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x00400000L
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x00800000L
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x07000000L
+//COMPUTE_DESTINATION_EN_SE0
+#define COMPUTE_DESTINATION_EN_SE0__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE0__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_DESTINATION_EN_SE1
+#define COMPUTE_DESTINATION_EN_SE1__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE1__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE1
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_TMPRING_SIZE
+#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x07FFF000L
+//COMPUTE_DESTINATION_EN_SE2
+#define COMPUTE_DESTINATION_EN_SE2__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE2__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE2
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_DESTINATION_EN_SE3
+#define COMPUTE_DESTINATION_EN_SE3__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE3__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE3
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_RESTART_X
+#define COMPUTE_RESTART_X__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_X__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Y
+#define COMPUTE_RESTART_Y__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Y__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Z
+#define COMPUTE_RESTART_Z__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Z__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_THREAD_TRACE_ENABLE
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE__SHIFT 0x0
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE_MASK 0x00000001L
+//COMPUTE_MISC_RESERVED
+#define COMPUTE_MISC_RESERVED__SEND_SEID__SHIFT 0x0
+#define COMPUTE_MISC_RESERVED__RESERVED3__SHIFT 0x3
+#define COMPUTE_MISC_RESERVED__RESERVED4__SHIFT 0x4
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE__SHIFT 0x5
+#define COMPUTE_MISC_RESERVED__SEND_SEID_MASK 0x00000007L
+#define COMPUTE_MISC_RESERVED__RESERVED3_MASK 0x00000008L
+#define COMPUTE_MISC_RESERVED__RESERVED4_MASK 0x00000010L
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE_MASK 0x0001FFE0L
+//COMPUTE_DISPATCH_ID
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID__SHIFT 0x0
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID_MASK 0xFFFFFFFFL
+//COMPUTE_THREADGROUP_ID
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID__SHIFT 0x0
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID_MASK 0xFFFFFFFFL
+//COMPUTE_REQ_CTRL
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_EN__SHIFT 0x0
+#define COMPUTE_REQ_CTRL__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define COMPUTE_REQ_CTRL__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define COMPUTE_REQ_CTRL__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define COMPUTE_REQ_CTRL__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define COMPUTE_REQ_CTRL__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define COMPUTE_REQ_CTRL__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define COMPUTE_REQ_CTRL__DEDICATED_PREALLOCATION_BUFFER_LIMIT__SHIFT 0x14
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_EN_MASK 0x00000001L
+#define COMPUTE_REQ_CTRL__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define COMPUTE_REQ_CTRL__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define COMPUTE_REQ_CTRL__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define COMPUTE_REQ_CTRL__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define COMPUTE_REQ_CTRL__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define COMPUTE_REQ_CTRL__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+#define COMPUTE_REQ_CTRL__DEDICATED_PREALLOCATION_BUFFER_LIMIT_MASK 0x07F00000L
+//COMPUTE_USER_ACCUM_0
+#define COMPUTE_USER_ACCUM_0__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_0__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_USER_ACCUM_1
+#define COMPUTE_USER_ACCUM_1__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_1__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_USER_ACCUM_2
+#define COMPUTE_USER_ACCUM_2__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_2__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_USER_ACCUM_3
+#define COMPUTE_USER_ACCUM_3__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_3__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_PGM_RSRC3
+#define COMPUTE_PGM_RSRC3__SHARED_VGPR_CNT__SHIFT 0x0
+#define COMPUTE_PGM_RSRC3__INST_PREF_SIZE__SHIFT 0x4
+#define COMPUTE_PGM_RSRC3__TRAP_ON_START__SHIFT 0xa
+#define COMPUTE_PGM_RSRC3__TRAP_ON_END__SHIFT 0xb
+#define COMPUTE_PGM_RSRC3__IMAGE_OP__SHIFT 0x1f
+#define COMPUTE_PGM_RSRC3__SHARED_VGPR_CNT_MASK 0x0000000FL
+#define COMPUTE_PGM_RSRC3__INST_PREF_SIZE_MASK 0x000003F0L
+#define COMPUTE_PGM_RSRC3__TRAP_ON_START_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC3__TRAP_ON_END_MASK 0x00000800L
+#define COMPUTE_PGM_RSRC3__IMAGE_OP_MASK 0x80000000L
+//COMPUTE_DDID_INDEX
+#define COMPUTE_DDID_INDEX__INDEX__SHIFT 0x0
+#define COMPUTE_DDID_INDEX__INDEX_MASK 0x000007FFL
+//COMPUTE_SHADER_CHKSUM
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM__SHIFT 0x0
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE4
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE5
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE6
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE7
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_DISPATCH_INTERLEAVE
+#define COMPUTE_DISPATCH_INTERLEAVE__INTERLEAVE__SHIFT 0x0
+#define COMPUTE_DISPATCH_INTERLEAVE__INTERLEAVE_MASK 0x000003FFL
+//COMPUTE_RELAUNCH
+#define COMPUTE_RELAUNCH__PAYLOAD__SHIFT 0x0
+#define COMPUTE_RELAUNCH__IS_EVENT__SHIFT 0x1e
+#define COMPUTE_RELAUNCH__IS_STATE__SHIFT 0x1f
+#define COMPUTE_RELAUNCH__PAYLOAD_MASK 0x3FFFFFFFL
+#define COMPUTE_RELAUNCH__IS_EVENT_MASK 0x40000000L
+#define COMPUTE_RELAUNCH__IS_STATE_MASK 0x80000000L
+//COMPUTE_WAVE_RESTORE_ADDR_LO
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR_MASK 0xFFFFFFFFL
+//COMPUTE_WAVE_RESTORE_ADDR_HI
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR_MASK 0xFFFFL
+//COMPUTE_RELAUNCH2
+#define COMPUTE_RELAUNCH2__PAYLOAD__SHIFT 0x0
+#define COMPUTE_RELAUNCH2__IS_EVENT__SHIFT 0x1e
+#define COMPUTE_RELAUNCH2__IS_STATE__SHIFT 0x1f
+#define COMPUTE_RELAUNCH2__PAYLOAD_MASK 0x3FFFFFFFL
+#define COMPUTE_RELAUNCH2__IS_EVENT_MASK 0x40000000L
+#define COMPUTE_RELAUNCH2__IS_STATE_MASK 0x80000000L
+//COMPUTE_USER_DATA_0
+#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_0__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_1
+#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_1__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_2
+#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_2__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_3
+#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_3__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_4
+#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_4__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_5
+#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_5__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_6
+#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_6__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_7
+#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_7__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_8
+#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_8__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_9
+#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_9__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_10
+#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_10__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_11
+#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_11__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_12
+#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_12__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_13
+#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_13__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_14
+#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_14__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_15
+#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_15__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_TUNNEL
+#define COMPUTE_DISPATCH_TUNNEL__OFF_DELAY__SHIFT 0x0
+#define COMPUTE_DISPATCH_TUNNEL__IMMEDIATE__SHIFT 0xa
+#define COMPUTE_DISPATCH_TUNNEL__OFF_DELAY_MASK 0x000003FFL
+#define COMPUTE_DISPATCH_TUNNEL__IMMEDIATE_MASK 0x00000400L
+//COMPUTE_DISPATCH_END
+#define COMPUTE_DISPATCH_END__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_END__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_NOWHERE
+#define COMPUTE_NOWHERE__DATA__SHIFT 0x0
+#define COMPUTE_NOWHERE__DATA_MASK 0xFFFFFFFFL
+//SH_RESERVED_REG0
+#define SH_RESERVED_REG0__DATA__SHIFT 0x0
+#define SH_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//SH_RESERVED_REG1
+#define SH_RESERVED_REG1__DATA__SHIFT 0x0
+#define SH_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_cppdec
+//CP_CU_MASK_ADDR_LO
+#define CP_CU_MASK_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_CU_MASK_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_CU_MASK_ADDR_HI
+#define CP_CU_MASK_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_CU_MASK_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//CP_CU_MASK_CNTL
+#define CP_CU_MASK_CNTL__POLICY__SHIFT 0x0
+#define CP_CU_MASK_CNTL__POLICY_MASK 0x00000001L
+//CP_EOPQ_WAIT_TIME
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME__SHIFT 0x0
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT__SHIFT 0xa
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME_MASK 0x000003FFL
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT_MASK 0x0003FC00L
+//CP_CPC_MGCG_SYNC_CNTL
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD__SHIFT 0x0
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD__SHIFT 0x8
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD_MASK 0x000000FFL
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD_MASK 0x0000FF00L
+//CPC_INT_INFO
+#define CPC_INT_INFO__ADDR_HI__SHIFT 0x0
+#define CPC_INT_INFO__TYPE__SHIFT 0x10
+#define CPC_INT_INFO__VMID__SHIFT 0x14
+#define CPC_INT_INFO__QUEUE_ID__SHIFT 0x1c
+#define CPC_INT_INFO__ADDR_HI_MASK 0x0000FFFFL
+#define CPC_INT_INFO__TYPE_MASK 0x00010000L
+#define CPC_INT_INFO__VMID_MASK 0x00F00000L
+#define CPC_INT_INFO__QUEUE_ID_MASK 0x70000000L
+//CP_VIRT_STATUS
+#define CP_VIRT_STATUS__VIRT_STATUS__SHIFT 0x0
+#define CP_VIRT_STATUS__VIRT_STATUS_MASK 0xFFFFFFFFL
+//CPC_INT_ADDR
+#define CPC_INT_ADDR__ADDR__SHIFT 0x0
+#define CPC_INT_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CPC_INT_PASID
+#define CPC_INT_PASID__PASID__SHIFT 0x0
+#define CPC_INT_PASID__BYPASS_PASID__SHIFT 0x10
+#define CPC_INT_PASID__PASID_MASK 0x0000FFFFL
+#define CPC_INT_PASID__BYPASS_PASID_MASK 0x00010000L
+//CP_GFX_ERROR
+#define CP_GFX_ERROR__ME_INSTR_CACHE_UTCL1_ERROR__SHIFT 0x0
+#define CP_GFX_ERROR__PFP_INSTR_CACHE_UTCL1_ERROR__SHIFT 0x1
+#define CP_GFX_ERROR__DDID_DRAW_UTCL1_ERROR__SHIFT 0x2
+#define CP_GFX_ERROR__DDID_DISPATCH_UTCL1_ERROR__SHIFT 0x3
+#define CP_GFX_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_GFX_ERROR__DATA_FETCHER_UTCL1_ERROR__SHIFT 0x6
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR__SHIFT 0x7
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR__SHIFT 0x9
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR__SHIFT 0xa
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR__SHIFT 0xb
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR__SHIFT 0xc
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR__SHIFT 0xd
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR__SHIFT 0xe
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR__SHIFT 0xf
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0x12
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x13
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR__SHIFT 0x14
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR__SHIFT 0x15
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR__SHIFT 0x17
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR__SHIFT 0x18
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR__SHIFT 0x19
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR__SHIFT 0x1a
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR__SHIFT 0x1b
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR__SHIFT 0x1e
+#define CP_GFX_ERROR__RESERVED__SHIFT 0x1f
+#define CP_GFX_ERROR__ME_INSTR_CACHE_UTCL1_ERROR_MASK 0x00000001L
+#define CP_GFX_ERROR__PFP_INSTR_CACHE_UTCL1_ERROR_MASK 0x00000002L
+#define CP_GFX_ERROR__DDID_DRAW_UTCL1_ERROR_MASK 0x00000004L
+#define CP_GFX_ERROR__DDID_DISPATCH_UTCL1_ERROR_MASK 0x00000008L
+#define CP_GFX_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_GFX_ERROR__DATA_FETCHER_UTCL1_ERROR_MASK 0x00000040L
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR_MASK 0x00000080L
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR_MASK 0x00000200L
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR_MASK 0x00000400L
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR_MASK 0x00001000L
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR_MASK 0x00002000L
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR_MASK 0x00004000L
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR_MASK 0x00008000L
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00040000L
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00080000L
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR_MASK 0x00100000L
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR_MASK 0x00200000L
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR_MASK 0x00800000L
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR_MASK 0x01000000L
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR_MASK 0x02000000L
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR_MASK 0x04000000L
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR_MASK 0x08000000L
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR_MASK 0x40000000L
+#define CP_GFX_ERROR__RESERVED_MASK 0x80000000L
+//CPG_UTCL1_CNTL
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPG_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPG_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPG_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPG_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPG_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPG_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPG_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPG_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPC_UTCL1_CNTL
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPC_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPC_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPC_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPC_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPC_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPC_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPC_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPC_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPF_UTCL1_CNTL
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPF_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPF_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPF_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPF_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE__SHIFT 0x1f
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPF_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPF_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPF_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPF_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE_MASK 0x80000000L
+//CP_AQL_SMM_STATUS
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM__SHIFT 0x0
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM_MASK 0xFFFFFFFFL
+//CP_RB0_BASE
+#define CP_RB0_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB0_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB_BASE
+#define CP_RB_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB0_CNTL
+#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB0_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_RB0_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB0_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB0_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB0_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_RB0_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB0_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_RB0_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB0_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB0_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB0_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_RB0_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_CNTL
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_RB_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_RB_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_RB_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_RB_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_RB_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_RPTR_WR
+#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x0
+#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0x000FFFFFL
+//CP_RB0_RPTR_ADDR
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB_RPTR_ADDR
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB0_RPTR_ADDR_HI
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_RPTR_ADDR_HI
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB0_BUFSZ_MASK
+#define CP_RB0_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB0_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_RB_BUFSZ_MASK
+#define CP_RB_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//GC_PRIV_MODE
+#define GC_PRIV_MODE__MC_PRIV_MODE__SHIFT 0x0
+#define GC_PRIV_MODE__MC_PRIV_MODE_MASK 0x00000001L
+//CP_INT_CNTL
+#define CP_INT_CNTL__RESUME_INT_ENABLE__SHIFT 0x8
+#define CP_INT_CNTL__SUSPEND_INT_ENABLE__SHIFT 0x9
+#define CP_INT_CNTL__DMA_WATCH_INT_ENABLE__SHIFT 0xa
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL__RESUME_INT_ENABLE_MASK 0x00000100L
+#define CP_INT_CNTL__SUSPEND_INT_ENABLE_MASK 0x00000200L
+#define CP_INT_CNTL__DMA_WATCH_INT_ENABLE_MASK 0x00000400L
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS
+#define CP_INT_STATUS__RESUME_INT_STAT__SHIFT 0x8
+#define CP_INT_STATUS__SUSPEND_INT_STAT__SHIFT 0x9
+#define CP_INT_STATUS__DMA_WATCH_INT_STAT__SHIFT 0xa
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS__RESUME_INT_STAT_MASK 0x00000100L
+#define CP_INT_STATUS__SUSPEND_INT_STAT_MASK 0x00000200L
+#define CP_INT_STATUS__DMA_WATCH_INT_STAT_MASK 0x00000400L
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_DEVICE_ID
+#define CP_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define CP_DEVICE_ID__DEVICE_ID_MASK 0x000000FFL
+//CP_ME0_PIPE_PRIORITY_CNTS
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_RING_PRIORITY_CNTS
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME0_PIPE0_PRIORITY
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING0_PRIORITY
+#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME0_PIPE1_PRIORITY
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING1_PRIORITY
+#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_FATAL_ERROR
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR__SHIFT 0x0
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR__SHIFT 0x1
+#define CP_FATAL_ERROR__GFX_HALT_PROC__SHIFT 0x2
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR__SHIFT 0x3
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN__SHIFT 0x4
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR_MASK 0x00000001L
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR_MASK 0x00000002L
+#define CP_FATAL_ERROR__GFX_HALT_PROC_MASK 0x00000004L
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR_MASK 0x00000008L
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN_MASK 0x00000010L
+//CP_RB_VMID
+#define CP_RB_VMID__RB0_VMID__SHIFT 0x0
+#define CP_RB_VMID__RB1_VMID__SHIFT 0x8
+#define CP_RB_VMID__RB2_VMID__SHIFT 0x10
+#define CP_RB_VMID__RB0_VMID_MASK 0x0000000FL
+#define CP_RB_VMID__RB1_VMID_MASK 0x00000F00L
+#define CP_RB_VMID__RB2_VMID_MASK 0x000F0000L
+//CP_ME0_PIPE0_VMID
+#define CP_ME0_PIPE0_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE0_VMID__VMID_MASK 0x0000000FL
+//CP_ME0_PIPE1_VMID
+#define CP_ME0_PIPE1_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE1_VMID__VMID_MASK 0x0000000FL
+//CP_RB0_WPTR
+#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR
+#define CP_RB_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB0_WPTR_HI
+#define CP_RB0_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR_HI
+#define CP_RB_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR
+#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR_HI
+#define CP_RB1_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_PROCESS_QUANTUM
+#define CP_PROCESS_QUANTUM__QUANTUM_DURATION__SHIFT 0x0
+#define CP_PROCESS_QUANTUM__TIMER_EXPIRED__SHIFT 0x1c
+#define CP_PROCESS_QUANTUM__QUANTUM_SCALE__SHIFT 0x1d
+#define CP_PROCESS_QUANTUM__QUANTUM_EN__SHIFT 0x1f
+#define CP_PROCESS_QUANTUM__QUANTUM_DURATION_MASK 0x0FFFFFFFL
+#define CP_PROCESS_QUANTUM__TIMER_EXPIRED_MASK 0x10000000L
+#define CP_PROCESS_QUANTUM__QUANTUM_SCALE_MASK 0x60000000L
+#define CP_PROCESS_QUANTUM__QUANTUM_EN_MASK 0x80000000L
+//CP_RB_DOORBELL_RANGE_LOWER
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x00000FFCL
+//CP_RB_DOORBELL_RANGE_UPPER
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x00000FFCL
+//CP_MEC_DOORBELL_RANGE_LOWER
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x00000FFCL
+//CP_MEC_DOORBELL_RANGE_UPPER
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x00000FFCL
+//CPG_UTCL1_ERROR
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CPC_UTCL1_ERROR
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CP_RB1_BASE
+#define CP_RB1_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB1_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB1_CNTL
+#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB1_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_RB1_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB1_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB1_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB1_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_RB1_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB1_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_RB1_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB1_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB1_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB1_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_RB1_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB1_RPTR_ADDR
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB1_RPTR_ADDR_HI
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB1_BUFSZ_MASK
+#define CP_RB1_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB1_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_INT_CNTL_RING0
+#define CP_INT_CNTL_RING0__RESUME_INT_ENABLE__SHIFT 0x8
+#define CP_INT_CNTL_RING0__SUSPEND_INT_ENABLE__SHIFT 0x9
+#define CP_INT_CNTL_RING0__DMA_WATCH_INT_ENABLE__SHIFT 0xa
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING0__RESUME_INT_ENABLE_MASK 0x00000100L
+#define CP_INT_CNTL_RING0__SUSPEND_INT_ENABLE_MASK 0x00000200L
+#define CP_INT_CNTL_RING0__DMA_WATCH_INT_ENABLE_MASK 0x00000400L
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_CNTL_RING1
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS_RING0
+#define CP_INT_STATUS_RING0__RESUME_INT_STAT__SHIFT 0x8
+#define CP_INT_STATUS_RING0__SUSPEND_INT_STAT__SHIFT 0x9
+#define CP_INT_STATUS_RING0__DMA_WATCH_INT_STAT__SHIFT 0xa
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING0__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING0__RESUME_INT_STAT_MASK 0x00000100L
+#define CP_INT_STATUS_RING0__SUSPEND_INT_STAT_MASK 0x00000200L
+#define CP_INT_STATUS_RING0__DMA_WATCH_INT_STAT_MASK 0x00000400L
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING0__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_INT_STATUS_RING1
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING1__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING1__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_ME_F32_INTERRUPT
+#define CP_ME_F32_INTERRUPT__ECC_ERROR_INT__SHIFT 0x0
+#define CP_ME_F32_INTERRUPT__TIME_STAMP_INT__SHIFT 0x1
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_2__SHIFT 0x2
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_3__SHIFT 0x3
+#define CP_ME_F32_INTERRUPT__ECC_ERROR_INT_MASK 0x00000001L
+#define CP_ME_F32_INTERRUPT__TIME_STAMP_INT_MASK 0x00000002L
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_2_MASK 0x00000004L
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_3_MASK 0x00000008L
+//CP_PFP_F32_INTERRUPT
+#define CP_PFP_F32_INTERRUPT__ECC_ERROR_INT__SHIFT 0x0
+#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_PFP_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_PFP_F32_INTERRUPT__PFP_F32_INT_3__SHIFT 0x3
+#define CP_PFP_F32_INTERRUPT__ECC_ERROR_INT_MASK 0x00000001L
+#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_PFP_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_PFP_F32_INTERRUPT__PFP_F32_INT_3_MASK 0x00000008L
+//CP_MEC1_F32_INTERRUPT
+#define CP_MEC1_F32_INTERRUPT__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC1_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC1_F32_INTERRUPT__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC1_F32_INTERRUPT__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC1_F32_INTERRUPT__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC1_F32_INTERRUPT__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC1_F32_INTERRUPT__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC1_F32_INTERRUPT__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC1_F32_INTERRUPT__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC1_F32_INTERRUPT__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC1_F32_INTERRUPT__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC1_F32_INTERRUPT__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC1_F32_INTERRUPT__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC1_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC1_F32_INTERRUPT__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC1_F32_INTERRUPT__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC1_F32_INTERRUPT__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC1_F32_INTERRUPT__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC1_F32_INTERRUPT__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC1_F32_INTERRUPT__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC1_F32_INTERRUPT__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC1_F32_INTERRUPT__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC1_F32_INTERRUPT__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC1_F32_INTERRUPT__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_MEC2_F32_INTERRUPT
+#define CP_MEC2_F32_INTERRUPT__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC2_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC2_F32_INTERRUPT__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC2_F32_INTERRUPT__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC2_F32_INTERRUPT__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC2_F32_INTERRUPT__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC2_F32_INTERRUPT__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC2_F32_INTERRUPT__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC2_F32_INTERRUPT__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC2_F32_INTERRUPT__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC2_F32_INTERRUPT__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC2_F32_INTERRUPT__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC2_F32_INTERRUPT__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC2_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC2_F32_INTERRUPT__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC2_F32_INTERRUPT__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC2_F32_INTERRUPT__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC2_F32_INTERRUPT__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC2_F32_INTERRUPT__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC2_F32_INTERRUPT__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC2_F32_INTERRUPT__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC2_F32_INTERRUPT__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC2_F32_INTERRUPT__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC2_F32_INTERRUPT__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_PWR_CNTL
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0__SHIFT 0x0
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1__SHIFT 0x1
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0__SHIFT 0x8
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1__SHIFT 0x9
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2__SHIFT 0xa
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3__SHIFT 0xb
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0__SHIFT 0x10
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1__SHIFT 0x11
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2__SHIFT 0x12
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3__SHIFT 0x13
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE0__SHIFT 0x14
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE1__SHIFT 0x15
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE2__SHIFT 0x16
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE3__SHIFT 0x17
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0_MASK 0x00000001L
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1_MASK 0x00000002L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0_MASK 0x00000100L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1_MASK 0x00000200L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2_MASK 0x00000400L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3_MASK 0x00000800L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0_MASK 0x00010000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1_MASK 0x00020000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2_MASK 0x00040000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3_MASK 0x00080000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE0_MASK 0x00100000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE1_MASK 0x00200000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE2_MASK 0x00400000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE3_MASK 0x00800000L
+//CP_ECC_FIRSTOCCURRENCE
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT__SHIFT 0x4
+#define CP_ECC_FIRSTOCCURRENCE__ME__SHIFT 0x8
+#define CP_ECC_FIRSTOCCURRENCE__PIPE__SHIFT 0xa
+#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x10
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT_MASK 0x000000F0L
+#define CP_ECC_FIRSTOCCURRENCE__ME_MASK 0x00000300L
+#define CP_ECC_FIRSTOCCURRENCE__PIPE_MASK 0x00000C00L
+#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0x000F0000L
+//CP_ECC_FIRSTOCCURRENCE_RING0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_ECC_FIRSTOCCURRENCE_RING1
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE_MASK 0xFFFFFFFFL
+//GB_EDC_MODE
+#define GB_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0xf
+#define GB_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define GB_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define GB_EDC_MODE__DED_MODE__SHIFT 0x14
+#define GB_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define GB_EDC_MODE__BYPASS__SHIFT 0x1f
+#define GB_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00008000L
+#define GB_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define GB_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define GB_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define GB_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define GB_EDC_MODE__BYPASS_MASK 0x80000000L
+//CP_DEBUG
+#define CP_DEBUG__PERFMON_RING_SEL__SHIFT 0x0
+#define CP_DEBUG__DEBUG_BUS_SELECT_BITS__SHIFT 0x2
+#define CP_DEBUG__DEBUG_BUS_FLOP_EN__SHIFT 0x8
+#define CP_DEBUG__CPG_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define CP_DEBUG__PACKET_FILTER_DISABLE__SHIFT 0xa
+#define CP_DEBUG__NOT_EOP_PREEMPT_DISABLE__SHIFT 0xb
+#define CP_DEBUG__CPG_CHIU_RO_DISABLE__SHIFT 0xc
+#define CP_DEBUG__CPG_GCR_CNTL_BYPASS__SHIFT 0xd
+#define CP_DEBUG__CPG_RAM_CLK_GATING_DISABLE__SHIFT 0xe
+#define CP_DEBUG__CPG_UTCL1_ERROR_HALT_DISABLE__SHIFT 0xf
+#define CP_DEBUG__SURFSYNC_CNTX_RDADDR__SHIFT 0x10
+#define CP_DEBUG__CPG_DATA_POISONING_INT_DISABLE__SHIFT 0x13
+#define CP_DEBUG__PRIV_VIOLATION_WRITE_DISABLE__SHIFT 0x14
+#define CP_DEBUG__CPG_CHIU_GUS_DISABLE__SHIFT 0x15
+#define CP_DEBUG__INTERRUPT_DISABLE__SHIFT 0x16
+#define CP_DEBUG__PREDICATE_DISABLE__SHIFT 0x17
+#define CP_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_DEBUG__EVENT_FILT_DISABLE__SHIFT 0x1a
+#define CP_DEBUG__CPG_CHIU_MTYPE_OVERRIDE__SHIFT 0x1b
+#define CP_DEBUG__CPG_TC_ONE_CYCLE_WRITE_DISABLE__SHIFT 0x1c
+#define CP_DEBUG__CS_STATE_FILT_DISABLE__SHIFT 0x1d
+#define CP_DEBUG__CS_PIPELINE_RESET_DISABLE__SHIFT 0x1e
+#define CP_DEBUG__IB_PACKET_INJECTOR_DISABLE__SHIFT 0x1f
+#define CP_DEBUG__PERFMON_RING_SEL_MASK 0x00000003L
+#define CP_DEBUG__DEBUG_BUS_SELECT_BITS_MASK 0x000000FCL
+#define CP_DEBUG__DEBUG_BUS_FLOP_EN_MASK 0x00000100L
+#define CP_DEBUG__CPG_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+#define CP_DEBUG__PACKET_FILTER_DISABLE_MASK 0x00000400L
+#define CP_DEBUG__NOT_EOP_PREEMPT_DISABLE_MASK 0x00000800L
+#define CP_DEBUG__CPG_CHIU_RO_DISABLE_MASK 0x00001000L
+#define CP_DEBUG__CPG_GCR_CNTL_BYPASS_MASK 0x00002000L
+#define CP_DEBUG__CPG_RAM_CLK_GATING_DISABLE_MASK 0x00004000L
+#define CP_DEBUG__CPG_UTCL1_ERROR_HALT_DISABLE_MASK 0x00008000L
+#define CP_DEBUG__SURFSYNC_CNTX_RDADDR_MASK 0x00070000L
+#define CP_DEBUG__CPG_DATA_POISONING_INT_DISABLE_MASK 0x00080000L
+#define CP_DEBUG__PRIV_VIOLATION_WRITE_DISABLE_MASK 0x00100000L
+#define CP_DEBUG__CPG_CHIU_GUS_DISABLE_MASK 0x00200000L
+#define CP_DEBUG__INTERRUPT_DISABLE_MASK 0x00400000L
+#define CP_DEBUG__PREDICATE_DISABLE_MASK 0x00800000L
+#define CP_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_DEBUG__EVENT_FILT_DISABLE_MASK 0x04000000L
+#define CP_DEBUG__CPG_CHIU_MTYPE_OVERRIDE_MASK 0x08000000L
+#define CP_DEBUG__CPG_TC_ONE_CYCLE_WRITE_DISABLE_MASK 0x10000000L
+#define CP_DEBUG__CS_STATE_FILT_DISABLE_MASK 0x20000000L
+#define CP_DEBUG__CS_PIPELINE_RESET_DISABLE_MASK 0x40000000L
+#define CP_DEBUG__IB_PACKET_INJECTOR_DISABLE_MASK 0x80000000L
+//CP_CPF_DEBUG
+#define CP_CPF_DEBUG__DEBUG_BUS_FLOP_EN__SHIFT 0xe
+#define CP_CPF_DEBUG__CPF_REPEATER_FGCG_OVERRIDE__SHIFT 0x10
+#define CP_CPF_DEBUG__CPF_GCR_CNTL_BYPASS__SHIFT 0x11
+#define CP_CPF_DEBUG__CPF_RAM_CLK_GATING_DISABLE__SHIFT 0x12
+#define CP_CPF_DEBUG__CPF_DATA_POISONING_INT_DISABLE__SHIFT 0x13
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_DELAY_OVERRIDE__SHIFT 0x16
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_OVERRIDE__SHIFT 0x17
+#define CP_CPF_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_CPF_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_CPF_DEBUG__CPF_CHIU_NOALLOC_OVERRIDE__SHIFT 0x1a
+#define CP_CPF_DEBUG__CE_FETCHER_DISABLE__SHIFT 0x1b
+#define CP_CPF_DEBUG__CPF_CHIU_GUS_DISABLE__SHIFT 0x1c
+#define CP_CPF_DEBUG__CPF_PRIORITY_YIELD_ACTIVE_DIS__SHIFT 0x1d
+#define CP_CPF_DEBUG__CPF_CHIU_MTYPE_OVERRIDE__SHIFT 0x1e
+#define CP_CPF_DEBUG__DBGU_TRIGGER__SHIFT 0x1f
+#define CP_CPF_DEBUG__DEBUG_BUS_FLOP_EN_MASK 0x00004000L
+#define CP_CPF_DEBUG__CPF_REPEATER_FGCG_OVERRIDE_MASK 0x00010000L
+#define CP_CPF_DEBUG__CPF_GCR_CNTL_BYPASS_MASK 0x00020000L
+#define CP_CPF_DEBUG__CPF_RAM_CLK_GATING_DISABLE_MASK 0x00040000L
+#define CP_CPF_DEBUG__CPF_DATA_POISONING_INT_DISABLE_MASK 0x00080000L
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_DELAY_OVERRIDE_MASK 0x00400000L
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_OVERRIDE_MASK 0x00800000L
+#define CP_CPF_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_CPF_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_CPF_DEBUG__CPF_CHIU_NOALLOC_OVERRIDE_MASK 0x04000000L
+#define CP_CPF_DEBUG__CE_FETCHER_DISABLE_MASK 0x08000000L
+#define CP_CPF_DEBUG__CPF_CHIU_GUS_DISABLE_MASK 0x10000000L
+#define CP_CPF_DEBUG__CPF_PRIORITY_YIELD_ACTIVE_DIS_MASK 0x20000000L
+#define CP_CPF_DEBUG__CPF_CHIU_MTYPE_OVERRIDE_MASK 0x40000000L
+#define CP_CPF_DEBUG__DBGU_TRIGGER_MASK 0x80000000L
+//CP_CPC_DEBUG
+#define CP_CPC_DEBUG__PIPE_SELECT__SHIFT 0x0
+#define CP_CPC_DEBUG__ME_SELECT__SHIFT 0x2
+#define CP_CPC_DEBUG__ADC_INTERLEAVE_DISABLE__SHIFT 0x4
+#define CP_CPC_DEBUG__DEBUG_BUS_FLOP_EN__SHIFT 0xe
+#define CP_CPC_DEBUG__CPC_REPEATER_FGCG_OVERRIDE__SHIFT 0xf
+#define CP_CPC_DEBUG__CPC_CHIU_NOALLOC_OVERRIDE__SHIFT 0x10
+#define CP_CPC_DEBUG__CPC_GCR_CNTL_BYPASS__SHIFT 0x11
+#define CP_CPC_DEBUG__CPC_RAM_CLK_GATING_DISABLE__SHIFT 0x12
+#define CP_CPC_DEBUG__CPC_DATA_POISONING_INT_DISABLE__SHIFT 0x13
+#define CP_CPC_DEBUG__PRIV_VIOLATION_WRITE_DISABLE__SHIFT 0x14
+#define CP_CPC_DEBUG__UCODE_ECC_ERROR_DISABLE__SHIFT 0x15
+#define CP_CPC_DEBUG__INTERRUPT_DISABLE__SHIFT 0x16
+#define CP_CPC_DEBUG__CPC_CHIU_RO_DISABLE__SHIFT 0x17
+#define CP_CPC_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_CPC_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_CPC_DEBUG__EVENT_FILT_DISABLE__SHIFT 0x1a
+#define CP_CPC_DEBUG__CPC_CHIU_GUS_DISABLE__SHIFT 0x1b
+#define CP_CPC_DEBUG__CPC_TC_ONE_CYCLE_WRITE_DISABLE__SHIFT 0x1c
+#define CP_CPC_DEBUG__CS_STATE_FILT_DISABLE__SHIFT 0x1d
+#define CP_CPC_DEBUG__CPC_CHIU_MTYPE_OVERRIDE__SHIFT 0x1e
+#define CP_CPC_DEBUG__ME2_UCODE_RAM_ENABLE__SHIFT 0x1f
+#define CP_CPC_DEBUG__PIPE_SELECT_MASK 0x00000003L
+#define CP_CPC_DEBUG__ME_SELECT_MASK 0x00000004L
+#define CP_CPC_DEBUG__ADC_INTERLEAVE_DISABLE_MASK 0x00000010L
+#define CP_CPC_DEBUG__DEBUG_BUS_FLOP_EN_MASK 0x00004000L
+#define CP_CPC_DEBUG__CPC_REPEATER_FGCG_OVERRIDE_MASK 0x00008000L
+#define CP_CPC_DEBUG__CPC_CHIU_NOALLOC_OVERRIDE_MASK 0x00010000L
+#define CP_CPC_DEBUG__CPC_GCR_CNTL_BYPASS_MASK 0x00020000L
+#define CP_CPC_DEBUG__CPC_RAM_CLK_GATING_DISABLE_MASK 0x00040000L
+#define CP_CPC_DEBUG__CPC_DATA_POISONING_INT_DISABLE_MASK 0x00080000L
+#define CP_CPC_DEBUG__PRIV_VIOLATION_WRITE_DISABLE_MASK 0x00100000L
+#define CP_CPC_DEBUG__UCODE_ECC_ERROR_DISABLE_MASK 0x00200000L
+#define CP_CPC_DEBUG__INTERRUPT_DISABLE_MASK 0x00400000L
+#define CP_CPC_DEBUG__CPC_CHIU_RO_DISABLE_MASK 0x00800000L
+#define CP_CPC_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_CPC_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_CPC_DEBUG__EVENT_FILT_DISABLE_MASK 0x04000000L
+#define CP_CPC_DEBUG__CPC_CHIU_GUS_DISABLE_MASK 0x08000000L
+#define CP_CPC_DEBUG__CPC_TC_ONE_CYCLE_WRITE_DISABLE_MASK 0x10000000L
+#define CP_CPC_DEBUG__CS_STATE_FILT_DISABLE_MASK 0x20000000L
+#define CP_CPC_DEBUG__CPC_CHIU_MTYPE_OVERRIDE_MASK 0x40000000L
+#define CP_CPC_DEBUG__ME2_UCODE_RAM_ENABLE_MASK 0x80000000L
+//CP_PQ_WPTR_POLL_CNTL
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT__SHIFT 0x1d
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE__SHIFT 0x1e
+#define CP_PQ_WPTR_POLL_CNTL__EN__SHIFT 0x1f
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD_MASK 0x000000FFL
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT_MASK 0x20000000L
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE_MASK 0x40000000L
+#define CP_PQ_WPTR_POLL_CNTL__EN_MASK 0x80000000L
+//CP_PQ_WPTR_POLL_CNTL1
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK_MASK 0xFFFFFFFFL
+//CP_ME1_PIPE0_INT_CNTL
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_CNTL
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_CNTL
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_CNTL
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_CNTL
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_CNTL
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_CNTL
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_CNTL
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE0_INT_STATUS
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_STATUS
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_STATUS
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_STATUS
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_STATUS
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_STATUS
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_STATUS
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_STATUS
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_GFX_QUEUE_INDEX
+#define CP_GFX_QUEUE_INDEX__QUEUE_ACCESS__SHIFT 0x0
+#define CP_GFX_QUEUE_INDEX__PIPE_ID__SHIFT 0x4
+#define CP_GFX_QUEUE_INDEX__QUEUE_ID__SHIFT 0x8
+#define CP_GFX_QUEUE_INDEX__QUEUE_ACCESS_MASK 0x00000001L
+#define CP_GFX_QUEUE_INDEX__PIPE_ID_MASK 0x00000030L
+#define CP_GFX_QUEUE_INDEX__QUEUE_ID_MASK 0x00000700L
+//CC_GC_EDC_CONFIG
+#define CC_GC_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define CC_GC_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//CP_ME1_PIPE_PRIORITY_CNTS
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME1_PIPE0_PRIORITY
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE1_PRIORITY
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE2_PRIORITY
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE3_PRIORITY
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE_PRIORITY_CNTS
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME2_PIPE0_PRIORITY
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE1_PRIORITY
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE2_PRIORITY
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE3_PRIORITY
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_PFP_PRGRM_CNTR_START
+#define CP_PFP_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_PFP_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_ME_PRGRM_CNTR_START
+#define CP_ME_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_ME_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_MEC1_PRGRM_CNTR_START
+#define CP_MEC1_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC1_PRGRM_CNTR_START__IP_START_MASK 0x000FFFFFL
+//CP_MEC2_PRGRM_CNTR_START
+#define CP_MEC2_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC2_PRGRM_CNTR_START__IP_START_MASK 0x000FFFFFL
+//CP_PFP_INTR_ROUTINE_START
+#define CP_PFP_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_PFP_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//CP_ME_INTR_ROUTINE_START
+#define CP_ME_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_ME_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//CP_MEC1_INTR_ROUTINE_START
+#define CP_MEC1_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC1_INTR_ROUTINE_START__IR_START_MASK 0x000FFFFFL
+//CP_MEC2_INTR_ROUTINE_START
+#define CP_MEC2_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC2_INTR_ROUTINE_START__IR_START_MASK 0x000FFFFFL
+//CP_CONTEXT_CNTL
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_GE_CNTX__SHIFT 0x0
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX__SHIFT 0x4
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_GE_CNTX__SHIFT 0x10
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX__SHIFT 0x14
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_GE_CNTX_MASK 0x00000007L
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX_MASK 0x00000070L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_GE_CNTX_MASK 0x00070000L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX_MASK 0x00700000L
+//CP_MAX_CONTEXT
+#define CP_MAX_CONTEXT__MAX_CONTEXT__SHIFT 0x0
+#define CP_MAX_CONTEXT__MAX_CONTEXT_MASK 0x00000007L
+//CP_IQ_WAIT_TIME1
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD__SHIFT 0x0
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD__SHIFT 0x8
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD__SHIFT 0x10
+#define CP_IQ_WAIT_TIME1__GWS__SHIFT 0x18
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME1__GWS_MASK 0xFF000000L
+//CP_IQ_WAIT_TIME2
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP__SHIFT 0x0
+#define CP_IQ_WAIT_TIME2__SCH_WAVE__SHIFT 0x8
+#define CP_IQ_WAIT_TIME2__SEM_REARM__SHIFT 0x10
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY__SHIFT 0x18
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME2__SCH_WAVE_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME2__SEM_REARM_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY_MASK 0xFF000000L
+//CP_RB0_BASE_HI
+#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_RB1_BASE_HI
+#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_VMID_RESET
+#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x0
+#define CP_VMID_RESET__PIPE0_QUEUES__SHIFT 0x10
+#define CP_VMID_RESET__PIPE1_QUEUES__SHIFT 0x18
+#define CP_VMID_RESET__RESET_REQUEST_MASK 0x0000FFFFL
+#define CP_VMID_RESET__PIPE0_QUEUES_MASK 0x00FF0000L
+#define CP_VMID_RESET__PIPE1_QUEUES_MASK 0xFF000000L
+//CPC_INT_CNTL
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CPC_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CPC_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CPC_INT_STATUS
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CPC_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CPC_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_VMID_PREEMPT
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x0
+#define CP_VMID_PREEMPT__VIRT_COMMAND__SHIFT 0x10
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0x0000FFFFL
+#define CP_VMID_PREEMPT__VIRT_COMMAND_MASK 0x000F0000L
+//CPC_INT_CNTX_ID
+#define CPC_INT_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CPC_INT_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_PQ_STATUS
+#define CP_PQ_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_PQ_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_PQ_STATUS__DOORBELL_UPDATED_EN__SHIFT 0x2
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MODE__SHIFT 0x3
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_PQ_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+#define CP_PQ_STATUS__DOORBELL_UPDATED_EN_MASK 0x00000004L
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MODE_MASK 0x00000008L
+//CP_PFP_PRGRM_CNTR_START_HI
+#define CP_PFP_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_PFP_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_MAX_DRAW_COUNT
+#define CP_MAX_DRAW_COUNT__MAX_DRAW_COUNT__SHIFT 0x0
+#define CP_MAX_DRAW_COUNT__MAX_DRAW_COUNT_MASK 0xFFFFFFFFL
+//CP_MEC1_F32_INT_DIS
+#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_MEC2_F32_INT_DIS
+#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_VMID_STATUS
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS__SHIFT 0x0
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0x0000FFFFL
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xFFFF0000L
+//CPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//CPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CPC_SUSPEND_CTX_SAVE_CONTROL
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__POLICY_MASK 0x00000018L
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
+//CPC_SUSPEND_CNTL_STACK_OFFSET
+#define CPC_SUSPEND_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CPC_SUSPEND_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CPC_SUSPEND_CNTL_STACK_SIZE
+#define CPC_SUSPEND_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
+#define CPC_SUSPEND_CNTL_STACK_SIZE__SIZE_MASK 0x0000F000L
+//CPC_SUSPEND_WG_STATE_OFFSET
+#define CPC_SUSPEND_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CPC_SUSPEND_WG_STATE_OFFSET__OFFSET_MASK 0x03FFFFFCL
+//CPC_SUSPEND_CTX_SAVE_SIZE
+#define CPC_SUSPEND_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
+#define CPC_SUSPEND_CTX_SAVE_SIZE__SIZE_MASK 0x03FFF000L
+//CPC_OS_PIPES
+#define CPC_OS_PIPES__OS_PIPES__SHIFT 0x0
+#define CPC_OS_PIPES__OS_PIPES_MASK 0x000000FFL
+//CP_SUSPEND_RESUME_REQ
+#define CP_SUSPEND_RESUME_REQ__SUSPEND_REQ__SHIFT 0x0
+#define CP_SUSPEND_RESUME_REQ__RESUME_REQ__SHIFT 0x1
+#define CP_SUSPEND_RESUME_REQ__SUSPEND_REQ_MASK 0x00000001L
+#define CP_SUSPEND_RESUME_REQ__RESUME_REQ_MASK 0x00000002L
+//CP_SUSPEND_CNTL
+#define CP_SUSPEND_CNTL__SUSPEND_MODE__SHIFT 0x0
+#define CP_SUSPEND_CNTL__SUSPEND_ENABLE__SHIFT 0x1
+#define CP_SUSPEND_CNTL__RESUME_LOCK__SHIFT 0x2
+#define CP_SUSPEND_CNTL__ACE_SUSPEND_ACTIVE__SHIFT 0x3
+#define CP_SUSPEND_CNTL__SUSPEND_MODE_MASK 0x00000001L
+#define CP_SUSPEND_CNTL__SUSPEND_ENABLE_MASK 0x00000002L
+#define CP_SUSPEND_CNTL__RESUME_LOCK_MASK 0x00000004L
+#define CP_SUSPEND_CNTL__ACE_SUSPEND_ACTIVE_MASK 0x00000008L
+//CP_IQ_WAIT_TIME3
+#define CP_IQ_WAIT_TIME3__SUSPEND_QUE__SHIFT 0x0
+#define CP_IQ_WAIT_TIME3__SUSPEND_QUE_MASK 0x000000FFL
+//CPC_DDID_BASE_ADDR_LO
+#define CPC_DDID_BASE_ADDR_LO__BASE_ADDR_LO__SHIFT 0x6
+#define CPC_DDID_BASE_ADDR_LO__BASE_ADDR_LO_MASK 0xFFFFFFC0L
+//CP_DDID_BASE_ADDR_LO
+#define CP_DDID_BASE_ADDR_LO__BASE_ADDR_LO__SHIFT 0x6
+#define CP_DDID_BASE_ADDR_LO__BASE_ADDR_LO_MASK 0xFFFFFFC0L
+//CPC_DDID_BASE_ADDR_HI
+#define CPC_DDID_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CPC_DDID_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_DDID_BASE_ADDR_HI
+#define CP_DDID_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_DDID_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CPC_DDID_CNTL
+#define CPC_DDID_CNTL__THRESHOLD__SHIFT 0x0
+#define CPC_DDID_CNTL__SIZE__SHIFT 0x10
+#define CPC_DDID_CNTL__NO_RING_MEMORY__SHIFT 0x13
+#define CPC_DDID_CNTL__POLICY__SHIFT 0x1c
+#define CPC_DDID_CNTL__MODE__SHIFT 0x1e
+#define CPC_DDID_CNTL__ENABLE__SHIFT 0x1f
+#define CPC_DDID_CNTL__THRESHOLD_MASK 0x000000FFL
+#define CPC_DDID_CNTL__SIZE_MASK 0x00010000L
+#define CPC_DDID_CNTL__NO_RING_MEMORY_MASK 0x00080000L
+#define CPC_DDID_CNTL__POLICY_MASK 0x30000000L
+#define CPC_DDID_CNTL__MODE_MASK 0x40000000L
+#define CPC_DDID_CNTL__ENABLE_MASK 0x80000000L
+//CP_DDID_CNTL
+#define CP_DDID_CNTL__THRESHOLD__SHIFT 0x0
+#define CP_DDID_CNTL__SIZE__SHIFT 0x10
+#define CP_DDID_CNTL__NO_RING_MEMORY__SHIFT 0x13
+#define CP_DDID_CNTL__VMID__SHIFT 0x14
+#define CP_DDID_CNTL__VMID_SEL__SHIFT 0x18
+#define CP_DDID_CNTL__POLICY__SHIFT 0x1c
+#define CP_DDID_CNTL__MODE__SHIFT 0x1e
+#define CP_DDID_CNTL__ENABLE__SHIFT 0x1f
+#define CP_DDID_CNTL__THRESHOLD_MASK 0x000000FFL
+#define CP_DDID_CNTL__SIZE_MASK 0x00010000L
+#define CP_DDID_CNTL__NO_RING_MEMORY_MASK 0x00080000L
+#define CP_DDID_CNTL__VMID_MASK 0x00F00000L
+#define CP_DDID_CNTL__VMID_SEL_MASK 0x01000000L
+#define CP_DDID_CNTL__POLICY_MASK 0x30000000L
+#define CP_DDID_CNTL__MODE_MASK 0x40000000L
+#define CP_DDID_CNTL__ENABLE_MASK 0x80000000L
+//CP_GFX_DDID_INFLIGHT_COUNT
+#define CP_GFX_DDID_INFLIGHT_COUNT__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_INFLIGHT_COUNT__COUNT_MASK 0x0000FFFFL
+//CP_GFX_DDID_WPTR
+#define CP_GFX_DDID_WPTR__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_WPTR__COUNT_MASK 0x0000FFFFL
+//CP_GFX_DDID_RPTR
+#define CP_GFX_DDID_RPTR__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_RPTR__COUNT_MASK 0x0000FFFFL
+//CP_GFX_DDID_DELTA_RPT_COUNT
+#define CP_GFX_DDID_DELTA_RPT_COUNT__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_DELTA_RPT_COUNT__COUNT_MASK 0x000000FFL
+//CP_GFX_HPD_STATUS0
+#define CP_GFX_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
+#define CP_GFX_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
+#define CP_GFX_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
+#define CP_GFX_HPD_STATUS0__FORCE_MAPPED_QUEUE__SHIFT 0x10
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
+#define CP_GFX_HPD_STATUS0__SUSPEND_REQ__SHIFT 0x1c
+#define CP_GFX_HPD_STATUS0__ENABLE_OVERIDE_QUEUEID__SHIFT 0x1d
+#define CP_GFX_HPD_STATUS0__OVERIDE_QUEUEID__SHIFT 0x1e
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
+#define CP_GFX_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
+#define CP_GFX_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
+#define CP_GFX_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
+#define CP_GFX_HPD_STATUS0__FORCE_MAPPED_QUEUE_MASK 0x00070000L
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
+#define CP_GFX_HPD_STATUS0__SUSPEND_REQ_MASK 0x10000000L
+#define CP_GFX_HPD_STATUS0__ENABLE_OVERIDE_QUEUEID_MASK 0x20000000L
+#define CP_GFX_HPD_STATUS0__OVERIDE_QUEUEID_MASK 0x40000000L
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
+//CP_GFX_HPD_CONTROL0
+#define CP_GFX_HPD_CONTROL0__SUSPEND_ENABLE__SHIFT 0x0
+#define CP_GFX_HPD_CONTROL0__PIPE_HOLDING__SHIFT 0x4
+#define CP_GFX_HPD_CONTROL0__RB_CE_ROQ_CNTL__SHIFT 0x8
+#define CP_GFX_HPD_CONTROL0__SUSPEND_ENABLE_MASK 0x00000001L
+#define CP_GFX_HPD_CONTROL0__PIPE_HOLDING_MASK 0x00000010L
+#define CP_GFX_HPD_CONTROL0__RB_CE_ROQ_CNTL_MASK 0x00000100L
+//CP_GFX_HPD_OSPRE_FENCE_ADDR_LO
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_GFX_HPD_OSPRE_FENCE_ADDR_HI
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_GFX_HPD_OSPRE_FENCE_DATA_LO
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_LO__DATA_LO__SHIFT 0x0
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
+//CP_GFX_HPD_OSPRE_FENCE_DATA_HI
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_HI__DATA_HI__SHIFT 0x0
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
+//CP_GFX_INDEX_MUTEX
+#define CP_GFX_INDEX_MUTEX__REQUEST__SHIFT 0x0
+#define CP_GFX_INDEX_MUTEX__CLIENTID__SHIFT 0x1
+#define CP_GFX_INDEX_MUTEX__REQUEST_MASK 0x00000001L
+#define CP_GFX_INDEX_MUTEX__CLIENTID_MASK 0x0000000EL
+//CP_ME_PRGRM_CNTR_START_HI
+#define CP_ME_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_ME_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_PFP_INTR_ROUTINE_START_HI
+#define CP_PFP_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define CP_PFP_INTR_ROUTINE_START_HI__IR_START_MASK 0x3FFFFFFFL
+//CP_ME_INTR_ROUTINE_START_HI
+#define CP_ME_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define CP_ME_INTR_ROUTINE_START_HI__IR_START_MASK 0x3FFFFFFFL
+//CP_GFX_MQD_BASE_ADDR
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_GFX_MQD_BASE_ADDR_HI
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_GFX_MQD_BASE_ADDR_HI__APP_VMID__SHIFT 0x1c
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+#define CP_GFX_MQD_BASE_ADDR_HI__APP_VMID_MASK 0xF0000000L
+//CP_GFX_HQD_ACTIVE
+#define CP_GFX_HQD_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_GFX_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_GFX_HQD_VMID
+#define CP_GFX_HQD_VMID__VMID__SHIFT 0x0
+#define CP_GFX_HQD_VMID__VMID_MASK 0x0000000FL
+//CP_GFX_HQD_QUEUE_PRIORITY
+#define CP_GFX_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
+#define CP_GFX_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
+//CP_GFX_HQD_QUANTUM
+#define CP_GFX_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
+#define CP_GFX_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x3
+#define CP_GFX_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
+#define CP_GFX_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
+#define CP_GFX_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
+#define CP_GFX_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000018L
+#define CP_GFX_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x0000FF00L
+#define CP_GFX_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
+//CP_GFX_HQD_BASE
+#define CP_GFX_HQD_BASE__RB_BASE__SHIFT 0x0
+#define CP_GFX_HQD_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_GFX_HQD_BASE_HI
+#define CP_GFX_HQD_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_GFX_HQD_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_GFX_HQD_RPTR
+#define CP_GFX_HQD_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_GFX_HQD_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_GFX_HQD_RPTR_ADDR
+#define CP_GFX_HQD_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_GFX_HQD_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_GFX_HQD_RPTR_ADDR_HI
+#define CP_GFX_HQD_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_GFX_HQD_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_WPTR_POLL_ADDR_LO
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x2
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_RB_WPTR_POLL_ADDR_HI
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x0
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_DOORBELL_CONTROL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_GFX_HQD_OFFSET
+#define CP_GFX_HQD_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_GFX_HQD_OFFSET__DISABLE_RB_OFFSET__SHIFT 0x1f
+#define CP_GFX_HQD_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+#define CP_GFX_HQD_OFFSET__DISABLE_RB_OFFSET_MASK 0x80000000L
+//CP_GFX_HQD_CNTL
+#define CP_GFX_HQD_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_GFX_HQD_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_GFX_HQD_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_GFX_HQD_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_GFX_HQD_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_GFX_HQD_CNTL__BUF_SWAP__SHIFT 0x10
+#define CP_GFX_HQD_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_GFX_HQD_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_GFX_HQD_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_HQD_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_GFX_HQD_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_GFX_HQD_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_GFX_HQD_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_GFX_HQD_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_GFX_HQD_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_GFX_HQD_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_GFX_HQD_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_GFX_HQD_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_GFX_HQD_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_GFX_HQD_CNTL__BUF_SWAP_MASK 0x00030000L
+#define CP_GFX_HQD_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_GFX_HQD_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_GFX_HQD_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_GFX_HQD_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_GFX_HQD_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_GFX_HQD_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_GFX_HQD_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_GFX_HQD_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_GFX_HQD_CSMD_RPTR
+#define CP_GFX_HQD_CSMD_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_GFX_HQD_CSMD_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_GFX_HQD_WPTR
+#define CP_GFX_HQD_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_GFX_HQD_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_GFX_HQD_WPTR_HI
+#define CP_GFX_HQD_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_GFX_HQD_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_GFX_HQD_DEQUEUE_REQUEST
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x00000001L
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
+//CP_GFX_HQD_MAPPED
+#define CP_GFX_HQD_MAPPED__MAPPED__SHIFT 0x0
+#define CP_GFX_HQD_MAPPED__MAPPED_MASK 0x00000001L
+//CP_GFX_HQD_QUE_MGR_CONTROL
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_IDLE_QUEUE_DISCONNECT__SHIFT 0x0
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_CONNECT_HANDSHAKE__SHIFT 0x4
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_FETCHER_DISCONNECT__SHIFT 0x5
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE_ACTIVE_EN__SHIFT 0x6
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_ALLOW_DB_UPDATE_EN__SHIFT 0x7
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE__SHIFT 0x8
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_OFFSET_UPDATE__SHIFT 0xb
+#define CP_GFX_HQD_QUE_MGR_CONTROL__PRIORITY_PREEMPT_DISABLE__SHIFT 0xd
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_QUEUE_MGR__SHIFT 0xf
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_IDLE_MESSAGE__SHIFT 0x10
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_SWITCH_MESSAGE_IDLE__SHIFT 0x11
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_SWITCH_MSG_PREEMPT__SHIFT 0x12
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_MAPPED_QUEUE_IDLE_MSG__SHIFT 0x17
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_IDLE_QUEUE_DISCONNECT_MASK 0x00000001L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_CONNECT_HANDSHAKE_MASK 0x00000010L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_FETCHER_DISCONNECT_MASK 0x00000020L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE_ACTIVE_EN_MASK 0x00000040L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_ALLOW_DB_UPDATE_EN_MASK 0x00000080L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE_MASK 0x00000700L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_OFFSET_UPDATE_MASK 0x00000800L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__PRIORITY_PREEMPT_DISABLE_MASK 0x00002000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_QUEUE_MGR_MASK 0x00008000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_IDLE_MESSAGE_MASK 0x00010000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_SWITCH_MESSAGE_IDLE_MASK 0x00020000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_SWITCH_MSG_PREEMPT_MASK 0x00040000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_MAPPED_QUEUE_IDLE_MSG_MASK 0x00800000L
+//CP_GFX_HQD_IQ_TIMER
+#define CP_GFX_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
+#define CP_GFX_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
+#define CP_GFX_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
+#define CP_GFX_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
+#define CP_GFX_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
+#define CP_GFX_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
+#define CP_GFX_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x1b
+#define CP_GFX_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
+#define CP_GFX_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
+#define CP_GFX_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
+#define CP_GFX_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
+#define CP_GFX_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
+#define CP_GFX_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
+#define CP_GFX_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
+#define CP_GFX_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
+#define CP_GFX_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x08000000L
+#define CP_GFX_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
+#define CP_GFX_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
+//CP_GFX_HQD_HQ_STATUS0
+#define CP_GFX_HQD_HQ_STATUS0__DEQUEUE_STATUS__SHIFT 0x0
+#define CP_GFX_HQD_HQ_STATUS0__OS_PREEMPT_STATUS__SHIFT 0x4
+#define CP_GFX_HQD_HQ_STATUS0__PREEMPT_ACK__SHIFT 0x6
+#define CP_GFX_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_GFX_HQD_HQ_STATUS0__DEQUEUE_STATUS_MASK 0x00000001L
+#define CP_GFX_HQD_HQ_STATUS0__OS_PREEMPT_STATUS_MASK 0x00000030L
+#define CP_GFX_HQD_HQ_STATUS0__PREEMPT_ACK_MASK 0x00000040L
+#define CP_GFX_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
+//CP_GFX_HQD_HQ_CONTROL0
+#define CP_GFX_HQD_HQ_CONTROL0__COMMAND__SHIFT 0x0
+#define CP_GFX_HQD_HQ_CONTROL0__SPARES__SHIFT 0x4
+#define CP_GFX_HQD_HQ_CONTROL0__COMMAND_MASK 0x0000000FL
+#define CP_GFX_HQD_HQ_CONTROL0__SPARES_MASK 0x000000F0L
+//CP_GFX_MQD_CONTROL
+#define CP_GFX_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_GFX_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_GFX_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY_MASK 0x03000000L
+//CP_HQD_GFX_CONTROL
+#define CP_HQD_GFX_CONTROL__MESSAGE__SHIFT 0x0
+#define CP_HQD_GFX_CONTROL__MISC__SHIFT 0x4
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT 0xf
+#define CP_HQD_GFX_CONTROL__MESSAGE_MASK 0x0000000FL
+#define CP_HQD_GFX_CONTROL__MISC_MASK 0x00007FF0L
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN_MASK 0x00008000L
+//CP_HQD_GFX_STATUS
+#define CP_HQD_GFX_STATUS__STATUS__SHIFT 0x0
+#define CP_HQD_GFX_STATUS__STATUS_MASK 0x0000FFFFL
+//CP_DMA_WATCH0_ADDR_LO
+#define CP_DMA_WATCH0_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH0_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH0_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH0_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH0_ADDR_HI
+#define CP_DMA_WATCH0_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH0_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH0_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH0_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH0_MASK
+#define CP_DMA_WATCH0_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH0_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH0_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH0_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH0_CNTL
+#define CP_DMA_WATCH0_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH0_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH0_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH0_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH0_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH0_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH0_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH0_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH0_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH0_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH0_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH0_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH1_ADDR_LO
+#define CP_DMA_WATCH1_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH1_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH1_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH1_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH1_ADDR_HI
+#define CP_DMA_WATCH1_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH1_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH1_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH1_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH1_MASK
+#define CP_DMA_WATCH1_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH1_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH1_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH1_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH1_CNTL
+#define CP_DMA_WATCH1_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH1_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH1_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH1_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH1_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH1_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH1_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH1_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH1_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH1_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH1_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH1_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH2_ADDR_LO
+#define CP_DMA_WATCH2_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH2_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH2_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH2_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH2_ADDR_HI
+#define CP_DMA_WATCH2_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH2_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH2_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH2_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH2_MASK
+#define CP_DMA_WATCH2_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH2_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH2_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH2_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH2_CNTL
+#define CP_DMA_WATCH2_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH2_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH2_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH2_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH2_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH2_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH2_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH2_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH2_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH2_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH2_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH2_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH3_ADDR_LO
+#define CP_DMA_WATCH3_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH3_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH3_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH3_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH3_ADDR_HI
+#define CP_DMA_WATCH3_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH3_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH3_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH3_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH3_MASK
+#define CP_DMA_WATCH3_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH3_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH3_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH3_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH3_CNTL
+#define CP_DMA_WATCH3_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH3_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH3_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH3_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH3_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH3_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH3_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH3_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH3_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH3_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH3_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH3_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH_STAT_ADDR_LO
+#define CP_DMA_WATCH_STAT_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_DMA_WATCH_STAT_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_DMA_WATCH_STAT_ADDR_HI
+#define CP_DMA_WATCH_STAT_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH_STAT_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_WATCH_STAT
+#define CP_DMA_WATCH_STAT__VMID__SHIFT 0x0
+#define CP_DMA_WATCH_STAT__QUEUE_ID__SHIFT 0x4
+#define CP_DMA_WATCH_STAT__CLIENT_ID__SHIFT 0x8
+#define CP_DMA_WATCH_STAT__PIPE__SHIFT 0xc
+#define CP_DMA_WATCH_STAT__WATCH_ID__SHIFT 0x10
+#define CP_DMA_WATCH_STAT__RD_WR__SHIFT 0x14
+#define CP_DMA_WATCH_STAT__TRAP_FLAG__SHIFT 0x1f
+#define CP_DMA_WATCH_STAT__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH_STAT__QUEUE_ID_MASK 0x00000070L
+#define CP_DMA_WATCH_STAT__CLIENT_ID_MASK 0x00000700L
+#define CP_DMA_WATCH_STAT__PIPE_MASK 0x00003000L
+#define CP_DMA_WATCH_STAT__WATCH_ID_MASK 0x00030000L
+#define CP_DMA_WATCH_STAT__RD_WR_MASK 0x00100000L
+#define CP_DMA_WATCH_STAT__TRAP_FLAG_MASK 0x80000000L
+//CP_PFP_JT_STAT
+#define CP_PFP_JT_STAT__JT_LOADED__SHIFT 0x0
+#define CP_PFP_JT_STAT__WR_MASK__SHIFT 0x10
+#define CP_PFP_JT_STAT__JT_LOADED_MASK 0x00000003L
+#define CP_PFP_JT_STAT__WR_MASK_MASK 0x00030000L
+//CP_MEC_JT_STAT
+#define CP_MEC_JT_STAT__JT_LOADED__SHIFT 0x0
+#define CP_MEC_JT_STAT__WR_MASK__SHIFT 0x10
+#define CP_MEC_JT_STAT__JT_LOADED_MASK 0x000000FFL
+#define CP_MEC_JT_STAT__WR_MASK_MASK 0x00FF0000L
+//CP_CPC_BUSY_HYSTERESIS
+#define CP_CPC_BUSY_HYSTERESIS__CAC_ACTIVE__SHIFT 0x0
+#define CP_CPC_BUSY_HYSTERESIS__CPC_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_HYSTERESIS__CAC_ACTIVE_MASK 0x000000FFL
+#define CP_CPC_BUSY_HYSTERESIS__CPC_BUSY_MASK 0x0000FF00L
+//CP_CPF_BUSY_HYSTERESIS1
+#define CP_CPF_BUSY_HYSTERESIS1__CAC_ACTIVE__SHIFT 0x0
+#define CP_CPF_BUSY_HYSTERESIS1__CPF_BUSY__SHIFT 0x8
+#define CP_CPF_BUSY_HYSTERESIS1__CORE_BUSY__SHIFT 0x10
+#define CP_CPF_BUSY_HYSTERESIS1__GFX_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_HYSTERESIS1__CAC_ACTIVE_MASK 0x000000FFL
+#define CP_CPF_BUSY_HYSTERESIS1__CPF_BUSY_MASK 0x0000FF00L
+#define CP_CPF_BUSY_HYSTERESIS1__CORE_BUSY_MASK 0x00FF0000L
+#define CP_CPF_BUSY_HYSTERESIS1__GFX_BUSY_MASK 0xFF000000L
+//CP_CPF_BUSY_HYSTERESIS2
+#define CP_CPF_BUSY_HYSTERESIS2__CMP_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_HYSTERESIS2__CMP_BUSY_MASK 0x000000FFL
+//CP_CPG_BUSY_HYSTERESIS1
+#define CP_CPG_BUSY_HYSTERESIS1__CAC_ACTIVE__SHIFT 0x0
+#define CP_CPG_BUSY_HYSTERESIS1__CP_BUSY__SHIFT 0x8
+#define CP_CPG_BUSY_HYSTERESIS1__DMA_BUSY__SHIFT 0x10
+#define CP_CPG_BUSY_HYSTERESIS1__GFX_BUSY__SHIFT 0x18
+#define CP_CPG_BUSY_HYSTERESIS1__CAC_ACTIVE_MASK 0x000000FFL
+#define CP_CPG_BUSY_HYSTERESIS1__CP_BUSY_MASK 0x0000FF00L
+#define CP_CPG_BUSY_HYSTERESIS1__DMA_BUSY_MASK 0x00FF0000L
+#define CP_CPG_BUSY_HYSTERESIS1__GFX_BUSY_MASK 0xFF000000L
+//CP_CPG_BUSY_HYSTERESIS2
+#define CP_CPG_BUSY_HYSTERESIS2__CMP_BUSY__SHIFT 0x0
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_0__SHIFT 0x8
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_1__SHIFT 0x10
+#define CP_CPG_BUSY_HYSTERESIS2__CMP_BUSY_MASK 0x000000FFL
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_0_MASK 0x0000FF00L
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_1_MASK 0x00FF0000L
+//CP_RB_DOORBELL_CLEAR
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE__SHIFT 0x0
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR__SHIFT 0x8
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR__SHIFT 0x9
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR__SHIFT 0xa
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR__SHIFT 0xb
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR__SHIFT 0xc
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR__SHIFT 0xd
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE_MASK 0x00000007L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR_MASK 0x00000100L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR_MASK 0x00000200L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR_MASK 0x00000400L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR_MASK 0x00000800L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR_MASK 0x00001000L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR_MASK 0x00002000L
+//CP_RB0_ACTIVE
+#define CP_RB0_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB0_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB_ACTIVE
+#define CP_RB_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB1_ACTIVE
+#define CP_RB1_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB1_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB_STATUS
+#define CP_RB_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_RB_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_RB_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_RB_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+//CPG_RCIU_CAM_INDEX
+#define CPG_RCIU_CAM_INDEX__INDEX__SHIFT 0x0
+#define CPG_RCIU_CAM_INDEX__INDEX_MASK 0x0000001FL
+//CPG_RCIU_CAM_DATA
+#define CPG_RCIU_CAM_DATA__DATA__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_RCIU_CAM_DATA_PHASE0
+#define CPG_RCIU_CAM_DATA_PHASE0__ADDR__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE0_EN__SHIFT 0x18
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE1_EN__SHIFT 0x19
+#define CPG_RCIU_CAM_DATA_PHASE0__SKIP_WR__SHIFT 0x1f
+#define CPG_RCIU_CAM_DATA_PHASE0__ADDR_MASK 0x0003FFFFL
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE0_EN_MASK 0x01000000L
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE1_EN_MASK 0x02000000L
+#define CPG_RCIU_CAM_DATA_PHASE0__SKIP_WR_MASK 0x80000000L
+//CPG_RCIU_CAM_DATA_PHASE1
+#define CPG_RCIU_CAM_DATA_PHASE1__MASK__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA_PHASE1__MASK_MASK 0xFFFFFFFFL
+//CPG_RCIU_CAM_DATA_PHASE2
+#define CPG_RCIU_CAM_DATA_PHASE2__VALUE__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA_PHASE2__VALUE_MASK 0xFFFFFFFFL
+//CP_GPU_TIMESTAMP_OFFSET_LO
+#define CP_GPU_TIMESTAMP_OFFSET_LO__OFFSET_LO__SHIFT 0x0
+#define CP_GPU_TIMESTAMP_OFFSET_LO__OFFSET_LO_MASK 0xFFFFFFFFL
+//CP_GPU_TIMESTAMP_OFFSET_HI
+#define CP_GPU_TIMESTAMP_OFFSET_HI__OFFSET_HI__SHIFT 0x0
+#define CP_GPU_TIMESTAMP_OFFSET_HI__OFFSET_HI_MASK 0xFFFFFFFFL
+//CP_SDMA_DMA_DONE
+#define CP_SDMA_DMA_DONE__SDMA_ID__SHIFT 0x0
+#define CP_SDMA_DMA_DONE__SDMA_ID_MASK 0x0000000FL
+//CP_PFP_SDMA_CS
+#define CP_PFP_SDMA_CS__REQUEST_GRANT__SHIFT 0x0
+#define CP_PFP_SDMA_CS__SDMA_ID__SHIFT 0x4
+#define CP_PFP_SDMA_CS__REQUEST_POSITION__SHIFT 0x8
+#define CP_PFP_SDMA_CS__SDMA_COUNT__SHIFT 0xc
+#define CP_PFP_SDMA_CS__REQUEST_GRANT_MASK 0x00000001L
+#define CP_PFP_SDMA_CS__SDMA_ID_MASK 0x000000F0L
+#define CP_PFP_SDMA_CS__REQUEST_POSITION_MASK 0x00000F00L
+#define CP_PFP_SDMA_CS__SDMA_COUNT_MASK 0x00003000L
+//CP_ME_SDMA_CS
+#define CP_ME_SDMA_CS__REQUEST_GRANT__SHIFT 0x0
+#define CP_ME_SDMA_CS__SDMA_ID__SHIFT 0x4
+#define CP_ME_SDMA_CS__REQUEST_POSITION__SHIFT 0x8
+#define CP_ME_SDMA_CS__SDMA_COUNT__SHIFT 0xc
+#define CP_ME_SDMA_CS__REQUEST_GRANT_MASK 0x00000001L
+#define CP_ME_SDMA_CS__SDMA_ID_MASK 0x000000F0L
+#define CP_ME_SDMA_CS__REQUEST_POSITION_MASK 0x00000F00L
+#define CP_ME_SDMA_CS__SDMA_COUNT_MASK 0x00003000L
+//CPF_GCR_CNTL
+#define CPF_GCR_CNTL__GCR_GL_CMD__SHIFT 0x0
+#define CPF_GCR_CNTL__GCR_GL_CMD_MASK 0x0007FFFFL
+//CPG_UTCL1_STATUS
+#define CPG_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPG_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPG_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPG_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPG_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPG_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPC_UTCL1_STATUS
+#define CPC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPF_UTCL1_STATUS
+#define CPF_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPF_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPF_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPF_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPF_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPF_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CP_SD_CNTL
+#define CP_SD_CNTL__CPF_EN__SHIFT 0x0
+#define CP_SD_CNTL__CPG_EN__SHIFT 0x1
+#define CP_SD_CNTL__CPC_EN__SHIFT 0x2
+#define CP_SD_CNTL__RLC_EN__SHIFT 0x3
+#define CP_SD_CNTL__GE_EN__SHIFT 0x5
+#define CP_SD_CNTL__UTCL1_EN__SHIFT 0x6
+#define CP_SD_CNTL__EA_EN__SHIFT 0x9
+#define CP_SD_CNTL__SDMA_EN__SHIFT 0xa
+#define CP_SD_CNTL__SD_VMIDVEC_OVERRIDE__SHIFT 0x1f
+#define CP_SD_CNTL__CPF_EN_MASK 0x00000001L
+#define CP_SD_CNTL__CPG_EN_MASK 0x00000002L
+#define CP_SD_CNTL__CPC_EN_MASK 0x00000004L
+#define CP_SD_CNTL__RLC_EN_MASK 0x00000008L
+#define CP_SD_CNTL__GE_EN_MASK 0x00000020L
+#define CP_SD_CNTL__UTCL1_EN_MASK 0x00000040L
+#define CP_SD_CNTL__EA_EN_MASK 0x00000200L
+#define CP_SD_CNTL__SDMA_EN_MASK 0x00000400L
+#define CP_SD_CNTL__SD_VMIDVEC_OVERRIDE_MASK 0x80000000L
+//CP_SOFT_RESET_CNTL
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET__SHIFT 0x0
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET__SHIFT 0x1
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET__SHIFT 0x2
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET__SHIFT 0x3
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET__SHIFT 0x4
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET__SHIFT 0x5
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET__SHIFT 0x6
+#define CP_SOFT_RESET_CNTL__GFX_HQD_REG_RESET__SHIFT 0x7
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET_MASK 0x00000001L
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET_MASK 0x00000002L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET_MASK 0x00000004L
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET_MASK 0x00000008L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET_MASK 0x00000010L
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET_MASK 0x00000020L
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET_MASK 0x00000040L
+#define CP_SOFT_RESET_CNTL__GFX_HQD_REG_RESET_MASK 0x00000080L
+//CP_CPC_GFX_CNTL
+#define CP_CPC_GFX_CNTL__QUEUEID__SHIFT 0x0
+#define CP_CPC_GFX_CNTL__PIPEID__SHIFT 0x3
+#define CP_CPC_GFX_CNTL__MEID__SHIFT 0x5
+#define CP_CPC_GFX_CNTL__VALID__SHIFT 0x7
+#define CP_CPC_GFX_CNTL__QUEUEID_MASK 0x00000007L
+#define CP_CPC_GFX_CNTL__PIPEID_MASK 0x00000018L
+#define CP_CPC_GFX_CNTL__MEID_MASK 0x00000060L
+#define CP_CPC_GFX_CNTL__VALID_MASK 0x00000080L
+
+
+// addressBlock: gc_spipdec
+//SPI_ARB_PRIORITY
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x0
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x3
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x6
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x9
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0xc
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0xe
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x10
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x12
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x00000007L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x00000038L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x000001C0L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0x00000E00L
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x00003000L
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0x0000C000L
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x00030000L
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0x000C0000L
+//SPI_ARB_CYCLES_0
+#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xFFFF0000L
+//SPI_ARB_CYCLES_1
+#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xFFFF0000L
+//SPI_WCL_PIPE_PERCENT_GFX
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_HP3D
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_CS0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS1
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS2
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS3
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS4
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS5
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS6
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS7
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL
+//SPI_USER_ACCUM_VMID_CNTL
+#define SPI_USER_ACCUM_VMID_CNTL__EN_USER_ACCUM__SHIFT 0x0
+#define SPI_USER_ACCUM_VMID_CNTL__EN_USER_ACCUM_MASK 0x0000000FL
+//SPI_GDBG_PER_VMID_CNTL
+#define SPI_GDBG_PER_VMID_CNTL__STALL_VMID__SHIFT 0x0
+#define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE__SHIFT 0x1
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN__SHIFT 0x3
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN__SHIFT 0x4
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE__SHIFT 0xd
+#define SPI_GDBG_PER_VMID_CNTL__STALL_VMID_MASK 0x00000001L
+#define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE_MASK 0x00000006L
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN_MASK 0x00000008L
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN_MASK 0x00001FF0L
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE_MASK 0x00002000L
+//SPI_COMPUTE_QUEUE_RESET
+#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
+#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L
+//SPI_COMPUTE_WF_CTX_SAVE
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE__SHIFT 0x0
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN__SHIFT 0x1
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN__SHIFT 0x2
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY__SHIFT 0x1e
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY__SHIFT 0x1f
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE_MASK 0x00000001L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN_MASK 0x00000002L
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN_MASK 0x00000004L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY_MASK 0x40000000L
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY_MASK 0x80000000L
+
+
+// addressBlock: gc_cpphqddec
+//CP_HPD_UTCL1_CNTL
+#define CP_HPD_UTCL1_CNTL__SELECT__SHIFT 0x0
+#define CP_HPD_UTCL1_CNTL__DISABLE_ERROR_REPORT__SHIFT 0xa
+#define CP_HPD_UTCL1_CNTL__SELECT_MASK 0x0000000FL
+#define CP_HPD_UTCL1_CNTL__DISABLE_ERROR_REPORT_MASK 0x00000400L
+//CP_HPD_UTCL1_ERROR
+#define CP_HPD_UTCL1_ERROR__ADDR_HI__SHIFT 0x0
+#define CP_HPD_UTCL1_ERROR__TYPE__SHIFT 0x10
+#define CP_HPD_UTCL1_ERROR__VMID__SHIFT 0x14
+#define CP_HPD_UTCL1_ERROR__ADDR_HI_MASK 0x0000FFFFL
+#define CP_HPD_UTCL1_ERROR__TYPE_MASK 0x00010000L
+#define CP_HPD_UTCL1_ERROR__VMID_MASK 0x00F00000L
+//CP_HPD_UTCL1_ERROR_ADDR
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR__SHIFT 0xc
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR_MASK 0xFFFFF000L
+//CP_MQD_BASE_ADDR
+#define CP_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_MQD_BASE_ADDR_HI
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_ACTIVE
+#define CP_HQD_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_HQD_ACTIVE__BUSY_GATE__SHIFT 0x1
+#define CP_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
+#define CP_HQD_ACTIVE__BUSY_GATE_MASK 0x00000002L
+//CP_HQD_VMID
+#define CP_HQD_VMID__VMID__SHIFT 0x0
+#define CP_HQD_VMID__IB_VMID__SHIFT 0x8
+#define CP_HQD_VMID__VQID__SHIFT 0x10
+#define CP_HQD_VMID__VMID_MASK 0x0000000FL
+#define CP_HQD_VMID__IB_VMID_MASK 0x00000F00L
+#define CP_HQD_VMID__VQID_MASK 0x03FF0000L
+//CP_HQD_PERSISTENT_STATE
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ__SHIFT 0x0
+#define CP_HQD_PERSISTENT_STATE__TMZ_CONNECT_OVERRIDE__SHIFT 0x1
+#define CP_HQD_PERSISTENT_STATE__SUSPEND_STATUS__SHIFT 0x7
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT 0x8
+#define CP_HQD_PERSISTENT_STATE__TMZ_SWITCH_EXEMPT__SHIFT 0x12
+#define CP_HQD_PERSISTENT_STATE__TMZ_MATCH_DIS__SHIFT 0x13
+#define CP_HQD_PERSISTENT_STATE__WPP_CLAMP_EN__SHIFT 0x14
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN__SHIFT 0x15
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN__SHIFT 0x16
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN__SHIFT 0x17
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN__SHIFT 0x18
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN__SHIFT 0x19
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN__SHIFT 0x1a
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN__SHIFT 0x1b
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE__SHIFT 0x1c
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES__SHIFT 0x1d
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT 0x1e
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE__SHIFT 0x1f
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK 0x00000001L
+#define CP_HQD_PERSISTENT_STATE__TMZ_CONNECT_OVERRIDE_MASK 0x00000002L
+#define CP_HQD_PERSISTENT_STATE__SUSPEND_STATUS_MASK 0x00000080L
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE_MASK 0x0003FF00L
+#define CP_HQD_PERSISTENT_STATE__TMZ_SWITCH_EXEMPT_MASK 0x00040000L
+#define CP_HQD_PERSISTENT_STATE__TMZ_MATCH_DIS_MASK 0x00080000L
+#define CP_HQD_PERSISTENT_STATE__WPP_CLAMP_EN_MASK 0x00100000L
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN_MASK 0x00200000L
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN_MASK 0x00400000L
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN_MASK 0x00800000L
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN_MASK 0x01000000L
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN_MASK 0x02000000L
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN_MASK 0x04000000L
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN_MASK 0x08000000L
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE_MASK 0x10000000L
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES_MASK 0x20000000L
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE_MASK 0x40000000L
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE_MASK 0x80000000L
+//CP_HQD_PIPE_PRIORITY
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY__SHIFT 0x0
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY_MASK 0x00000003L
+//CP_HQD_QUEUE_PRIORITY
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
+//CP_HQD_QUANTUM
+#define CP_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
+#define CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x4
+#define CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
+#define CP_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
+#define CP_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000010L
+#define CP_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x00003F00L
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
+//CP_HQD_PQ_BASE
+#define CP_HQD_PQ_BASE__ADDR__SHIFT 0x0
+#define CP_HQD_PQ_BASE__ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_BASE_HI
+#define CP_HQD_PQ_BASE_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_BASE_HI__ADDR_HI_MASK 0x000000FFL
+//CP_HQD_PQ_RPTR
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_RPTR_REPORT_ADDR
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR__SHIFT 0x2
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_PQ_RPTR_REPORT_ADDR_HI
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_WPTR_POLL_ADDR
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR__SHIFT 0x3
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR_MASK 0xFFFFFFF8L
+//CP_HQD_PQ_WPTR_POLL_ADDR_HI
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_DOORBELL_CONTROL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT 0x0
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE__SHIFT 0x1c
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT__SHIFT 0x1d
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE_MASK 0x00000001L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK 0x10000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT_MASK 0x20000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_HQD_PQ_CONTROL
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE__SHIFT 0x0
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY__SHIFT 0x6
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY__SHIFT 0x7
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT 0x8
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT 0xe
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY__SHIFT 0xf
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT 0x12
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_PQ_CONTROL__TMZ__SHIFT 0x16
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_PQ_CONTROL__PQ_VOLATILE__SHIFT 0x1a
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR__SHIFT 0x1b
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH__SHIFT 0x1c
+#define CP_HQD_PQ_CONTROL__TUNNEL_DISPATCH__SHIFT 0x1d
+#define CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT 0x1e
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT 0x1f
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK 0x0000003FL
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY_MASK 0x00000040L
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY_MASK 0x00000080L
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK 0x00003F00L
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN_MASK 0x00004000L
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY_MASK 0x00008000L
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR_MASK 0x000C0000L
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_PQ_CONTROL__TMZ_MASK 0x00400000L
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK 0x04000000L
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK 0x08000000L
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK 0x10000000L
+#define CP_HQD_PQ_CONTROL__TUNNEL_DISPATCH_MASK 0x20000000L
+#define CP_HQD_PQ_CONTROL__PRIV_STATE_MASK 0x40000000L
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK 0x80000000L
+//CP_HQD_IB_BASE_ADDR
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR__SHIFT 0x2
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_IB_BASE_ADDR_HI
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_IB_RPTR
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET_MASK 0x000FFFFFL
+//CP_HQD_IB_CONTROL
+#define CP_HQD_IB_CONTROL__IB_SIZE__SHIFT 0x0
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IB_CONTROL__IB_VOLATILE__SHIFT 0x1a
+#define CP_HQD_IB_CONTROL__IB_PRIV_STATE__SHIFT 0x1e
+#define CP_HQD_IB_CONTROL__PROCESSING_IB__SHIFT 0x1f
+#define CP_HQD_IB_CONTROL__IB_SIZE_MASK 0x000FFFFFL
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_IB_CONTROL__IB_VOLATILE_MASK 0x04000000L
+#define CP_HQD_IB_CONTROL__IB_PRIV_STATE_MASK 0x40000000L
+#define CP_HQD_IB_CONTROL__PROCESSING_IB_MASK 0x80000000L
+//CP_HQD_IQ_TIMER
+#define CP_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
+#define CP_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE__SHIFT 0x10
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
+#define CP_HQD_IQ_TIMER__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IQ_TIMER__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IQ_TIMER__IQ_VOLATILE__SHIFT 0x1a
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x1b
+#define CP_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN__SHIFT 0x1d
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ__SHIFT 0x1e
+#define CP_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
+#define CP_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
+#define CP_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE_MASK 0x003F0000L
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
+#define CP_HQD_IQ_TIMER__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IQ_TIMER__CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_IQ_TIMER__IQ_VOLATILE_MASK 0x04000000L
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x08000000L
+#define CP_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN_MASK 0x20000000L
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ_MASK 0x40000000L
+#define CP_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
+//CP_HQD_IQ_RPTR
+#define CP_HQD_IQ_RPTR__OFFSET__SHIFT 0x0
+#define CP_HQD_IQ_RPTR__OFFSET_MASK 0x0000003FL
+//CP_HQD_DEQUEUE_REQUEST
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT__SHIFT 0x8
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x0000000FL
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT_MASK 0x00000100L
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
+//CP_HQD_DMA_OFFLOAD
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
+//CP_HQD_OFFLOAD
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
+//CP_HQD_SEMA_CMD
+#define CP_HQD_SEMA_CMD__RETRY__SHIFT 0x0
+#define CP_HQD_SEMA_CMD__RESULT__SHIFT 0x1
+#define CP_HQD_SEMA_CMD__POLLING_DIS__SHIFT 0x8
+#define CP_HQD_SEMA_CMD__MESSAGE_EN__SHIFT 0x9
+#define CP_HQD_SEMA_CMD__RETRY_MASK 0x00000001L
+#define CP_HQD_SEMA_CMD__RESULT_MASK 0x00000006L
+#define CP_HQD_SEMA_CMD__POLLING_DIS_MASK 0x00000100L
+#define CP_HQD_SEMA_CMD__MESSAGE_EN_MASK 0x00000200L
+//CP_HQD_MSG_TYPE
+#define CP_HQD_MSG_TYPE__ACTION__SHIFT 0x0
+#define CP_HQD_MSG_TYPE__SAVE_STATE__SHIFT 0x4
+#define CP_HQD_MSG_TYPE__ACTION_MASK 0x00000007L
+#define CP_HQD_MSG_TYPE__SAVE_STATE_MASK 0x00000070L
+//CP_HQD_ATOMIC0_PREOP_LO
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC0_PREOP_HI
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_LO
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_HI
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER0
+#define CP_HQD_HQ_SCHEDULER0__CWSR__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER0__SAVE_STATUS__SHIFT 0x1
+#define CP_HQD_HQ_SCHEDULER0__RSRV__SHIFT 0x2
+#define CP_HQD_HQ_SCHEDULER0__STATIC_QUEUE__SHIFT 0x3
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_RUN_ONCE__SHIFT 0x6
+#define CP_HQD_HQ_SCHEDULER0__SCRATCH_RAM_INIT__SHIFT 0x7
+#define CP_HQD_HQ_SCHEDULER0__TCL2_DIRTY__SHIFT 0x8
+#define CP_HQD_HQ_SCHEDULER0__C_INHERIT_VMID__SHIFT 0x9
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SCHEDULER_TYPE__SHIFT 0xa
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_USE_GWS__SHIFT 0xd
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_DEBUG_EN__SHIFT 0xe
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SLOT_CONNECTED__SHIFT 0xf
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_ENABLED__SHIFT 0x14
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_PIPE__SHIFT 0x15
+#define CP_HQD_HQ_SCHEDULER0__CONCURRENT_PROCESS_COUNT__SHIFT 0x18
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_HQD_HQ_SCHEDULER0__DB_UPDATED_MSG_EN__SHIFT 0x1f
+#define CP_HQD_HQ_SCHEDULER0__CWSR_MASK 0x00000001L
+#define CP_HQD_HQ_SCHEDULER0__SAVE_STATUS_MASK 0x00000002L
+#define CP_HQD_HQ_SCHEDULER0__RSRV_MASK 0x00000004L
+#define CP_HQD_HQ_SCHEDULER0__STATIC_QUEUE_MASK 0x00000038L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_RUN_ONCE_MASK 0x00000040L
+#define CP_HQD_HQ_SCHEDULER0__SCRATCH_RAM_INIT_MASK 0x00000080L
+#define CP_HQD_HQ_SCHEDULER0__TCL2_DIRTY_MASK 0x00000100L
+#define CP_HQD_HQ_SCHEDULER0__C_INHERIT_VMID_MASK 0x00000200L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SCHEDULER_TYPE_MASK 0x00001C00L
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_USE_GWS_MASK 0x00002000L
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_DEBUG_EN_MASK 0x00004000L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SLOT_CONNECTED_MASK 0x00008000L
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_ENABLED_MASK 0x00100000L
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_PIPE_MASK 0x00600000L
+#define CP_HQD_HQ_SCHEDULER0__CONCURRENT_PROCESS_COUNT_MASK 0x0F000000L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_IDLE_MASK 0x40000000L
+#define CP_HQD_HQ_SCHEDULER0__DB_UPDATED_MSG_EN_MASK 0x80000000L
+//CP_HQD_HQ_STATUS0
+#define CP_HQD_HQ_STATUS0__CWSR__SHIFT 0x0
+#define CP_HQD_HQ_STATUS0__SAVE_STATUS__SHIFT 0x1
+#define CP_HQD_HQ_STATUS0__RSRV__SHIFT 0x2
+#define CP_HQD_HQ_STATUS0__STATIC_QUEUE__SHIFT 0x3
+#define CP_HQD_HQ_STATUS0__QUEUE_RUN_ONCE__SHIFT 0x6
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT__SHIFT 0x7
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY__SHIFT 0x8
+#define CP_HQD_HQ_STATUS0__C_INHERIT_VMID__SHIFT 0x9
+#define CP_HQD_HQ_STATUS0__QUEUE_SCHEDULER_TYPE__SHIFT 0xa
+#define CP_HQD_HQ_STATUS0__C_QUEUE_USE_GWS__SHIFT 0xd
+#define CP_HQD_HQ_STATUS0__C_QUEUE_DEBUG_EN__SHIFT 0xe
+#define CP_HQD_HQ_STATUS0__QUEUE_SLOT_CONNECTED__SHIFT 0xf
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_ENABLED__SHIFT 0x14
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_PIPE__SHIFT 0x15
+#define CP_HQD_HQ_STATUS0__CONCURRENT_PROCESS_COUNT__SHIFT 0x18
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN__SHIFT 0x1f
+#define CP_HQD_HQ_STATUS0__CWSR_MASK 0x00000001L
+#define CP_HQD_HQ_STATUS0__SAVE_STATUS_MASK 0x00000002L
+#define CP_HQD_HQ_STATUS0__RSRV_MASK 0x00000004L
+#define CP_HQD_HQ_STATUS0__STATIC_QUEUE_MASK 0x00000038L
+#define CP_HQD_HQ_STATUS0__QUEUE_RUN_ONCE_MASK 0x00000040L
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT_MASK 0x00000080L
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY_MASK 0x00000100L
+#define CP_HQD_HQ_STATUS0__C_INHERIT_VMID_MASK 0x00000200L
+#define CP_HQD_HQ_STATUS0__QUEUE_SCHEDULER_TYPE_MASK 0x00001C00L
+#define CP_HQD_HQ_STATUS0__C_QUEUE_USE_GWS_MASK 0x00002000L
+#define CP_HQD_HQ_STATUS0__C_QUEUE_DEBUG_EN_MASK 0x00004000L
+#define CP_HQD_HQ_STATUS0__QUEUE_SLOT_CONNECTED_MASK 0x00008000L
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_ENABLED_MASK 0x00100000L
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_PIPE_MASK 0x00600000L
+#define CP_HQD_HQ_STATUS0__CONCURRENT_PROCESS_COUNT_MASK 0x0F000000L
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN_MASK 0x80000000L
+//CP_HQD_HQ_CONTROL0
+#define CP_HQD_HQ_CONTROL0__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL0__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER1
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER_MASK 0xFFFFFFFFL
+//CP_MQD_CONTROL
+#define CP_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+#define CP_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
+#define CP_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_MQD_CONTROL__MQD_VOLATILE__SHIFT 0x1a
+#define CP_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
+#define CP_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
+#define CP_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_MQD_CONTROL__CACHE_POLICY_MASK 0x03000000L
+#define CP_MQD_CONTROL__MQD_VOLATILE_MASK 0x04000000L
+//CP_HQD_HQ_STATUS1
+#define CP_HQD_HQ_STATUS1__STATUS__SHIFT 0x0
+#define CP_HQD_HQ_STATUS1__STATUS_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_CONTROL1
+#define CP_HQD_HQ_CONTROL1__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL1__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR_HI
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x000000FFL
+//CP_HQD_EOP_CONTROL
+#define CP_HQD_EOP_CONTROL__EOP_SIZE__SHIFT 0x0
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP__SHIFT 0x8
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN__SHIFT 0xc
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB__SHIFT 0xd
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN__SHIFT 0xe
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER__SHIFT 0x15
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN__SHIFT 0x16
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_EOP_CONTROL__EOP_VOLATILE__SHIFT 0x1a
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT__SHIFT 0x1d
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM__SHIFT 0x1f
+#define CP_HQD_EOP_CONTROL__EOP_SIZE_MASK 0x0000003FL
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP_MASK 0x00000100L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN_MASK 0x00001000L
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB_MASK 0x00002000L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN_MASK 0x00004000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_MASK 0x00200000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN_MASK 0x00400000L
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_EOP_CONTROL__EOP_VOLATILE_MASK 0x04000000L
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT_MASK 0x60000000L
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM_MASK 0x80000000L
+//CP_HQD_EOP_RPTR
+#define CP_HQD_EOP_RPTR__RPTR__SHIFT 0x0
+#define CP_HQD_EOP_RPTR__RESET_FETCHER__SHIFT 0x1c
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND__SHIFT 0x1d
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR__SHIFT 0x1e
+#define CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT 0x1f
+#define CP_HQD_EOP_RPTR__RPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_RPTR__RESET_FETCHER_MASK 0x10000000L
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND_MASK 0x20000000L
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR_MASK 0x40000000L
+#define CP_HQD_EOP_RPTR__INIT_FETCHER_MASK 0x80000000L
+//CP_HQD_EOP_WPTR
+#define CP_HQD_EOP_WPTR__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR__EOP_EMPTY__SHIFT 0xf
+#define CP_HQD_EOP_WPTR__EOP_AVAIL__SHIFT 0x10
+#define CP_HQD_EOP_WPTR__WPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_WPTR__EOP_EMPTY_MASK 0x00008000L
+#define CP_HQD_EOP_WPTR__EOP_AVAIL_MASK 0x1FFF0000L
+//CP_HQD_EOP_EVENTS
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT__SHIFT 0x0
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND__SHIFT 0x10
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT_MASK 0x00000FFFL
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND_MASK 0x00010000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_LO
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_HI
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_CTX_SAVE_CONTROL
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY_MASK 0x00000018L
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
+//CP_HQD_CNTL_STACK_OFFSET
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CP_HQD_CNTL_STACK_SIZE
+#define CP_HQD_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CNTL_STACK_SIZE__SIZE_MASK 0x0000F000L
+//CP_HQD_WG_STATE_OFFSET
+#define CP_HQD_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_WG_STATE_OFFSET__OFFSET_MASK 0x03FFFFFCL
+//CP_HQD_CTX_SAVE_SIZE
+#define CP_HQD_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_SIZE__SIZE_MASK 0x03FFF000L
+//CP_HQD_GDS_RESOURCE_STATE
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED__SHIFT 0x0
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED__SHIFT 0x1
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE__SHIFT 0x4
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR__SHIFT 0xc
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED_MASK 0x00000001L
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED_MASK 0x00000002L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE_MASK 0x000003F0L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR_MASK 0x0003F000L
+//CP_HQD_ERROR
+#define CP_HQD_ERROR__EDC_ERROR_ID__SHIFT 0x0
+#define CP_HQD_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_HQD_ERROR__AQL_ERROR__SHIFT 0x5
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR__SHIFT 0x8
+#define CP_HQD_ERROR__IB_UTCL1_ERROR__SHIFT 0x9
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR__SHIFT 0xa
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR__SHIFT 0xb
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR__SHIFT 0xc
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR__SHIFT 0xd
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR__SHIFT 0xe
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0xf
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x10
+#define CP_HQD_ERROR__SR_UTCL1_ERROR__SHIFT 0x11
+#define CP_HQD_ERROR__QU_UTCL1_ERROR__SHIFT 0x12
+#define CP_HQD_ERROR__TC_UTCL1_ERROR__SHIFT 0x13
+#define CP_HQD_ERROR__EDC_ERROR_ID_MASK 0x0000000FL
+#define CP_HQD_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_HQD_ERROR__AQL_ERROR_MASK 0x00000020L
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR_MASK 0x00000100L
+#define CP_HQD_ERROR__IB_UTCL1_ERROR_MASK 0x00000200L
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR_MASK 0x00000400L
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR_MASK 0x00001000L
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR_MASK 0x00002000L
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR_MASK 0x00004000L
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00008000L
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00010000L
+#define CP_HQD_ERROR__SR_UTCL1_ERROR_MASK 0x00020000L
+#define CP_HQD_ERROR__QU_UTCL1_ERROR_MASK 0x00040000L
+#define CP_HQD_ERROR__TC_UTCL1_ERROR_MASK 0x00080000L
+//CP_HQD_EOP_WPTR_MEM
+#define CP_HQD_EOP_WPTR_MEM__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR_MEM__WPTR_MASK 0x00001FFFL
+//CP_HQD_AQL_CONTROL
+#define CP_HQD_AQL_CONTROL__CONTROL0__SHIFT 0x0
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN__SHIFT 0xf
+#define CP_HQD_AQL_CONTROL__CONTROL1__SHIFT 0x10
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN__SHIFT 0x1f
+#define CP_HQD_AQL_CONTROL__CONTROL0_MASK 0x00007FFFL
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN_MASK 0x00008000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_MASK 0x7FFF0000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN_MASK 0x80000000L
+//CP_HQD_PQ_WPTR_LO
+#define CP_HQD_PQ_WPTR_LO__OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_LO__OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_WPTR_HI
+#define CP_HQD_PQ_WPTR_HI__DATA__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_HI__DATA_MASK 0xFFFFFFFFL
+//CP_HQD_SUSPEND_CNTL_STACK_OFFSET
+#define CP_HQD_SUSPEND_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_SUSPEND_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CP_HQD_SUSPEND_CNTL_STACK_DW_CNT
+#define CP_HQD_SUSPEND_CNTL_STACK_DW_CNT__CNT__SHIFT 0x0
+#define CP_HQD_SUSPEND_CNTL_STACK_DW_CNT__CNT_MASK 0x00003FFFL
+//CP_HQD_SUSPEND_WG_STATE_OFFSET
+#define CP_HQD_SUSPEND_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_SUSPEND_WG_STATE_OFFSET__OFFSET_MASK 0x03FFFFFCL
+//CP_HQD_DDID_RPTR
+#define CP_HQD_DDID_RPTR__RPTR__SHIFT 0x0
+#define CP_HQD_DDID_RPTR__RPTR_MASK 0x000007FFL
+//CP_HQD_DDID_WPTR
+#define CP_HQD_DDID_WPTR__WPTR__SHIFT 0x0
+#define CP_HQD_DDID_WPTR__WPTR_MASK 0x000007FFL
+//CP_HQD_DDID_INFLIGHT_COUNT
+#define CP_HQD_DDID_INFLIGHT_COUNT__COUNT__SHIFT 0x0
+#define CP_HQD_DDID_INFLIGHT_COUNT__COUNT_MASK 0x0000FFFFL
+//CP_HQD_DDID_DELTA_RPT_COUNT
+#define CP_HQD_DDID_DELTA_RPT_COUNT__COUNT__SHIFT 0x0
+#define CP_HQD_DDID_DELTA_RPT_COUNT__COUNT_MASK 0x000000FFL
+//CP_HQD_DEQUEUE_STATUS
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT__SHIFT 0x0
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND__SHIFT 0x4
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND_EN__SHIFT 0x9
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT_EN__SHIFT 0xa
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT_MASK 0x0000000FL
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND_MASK 0x00000010L
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND_EN_MASK 0x00000200L
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT_EN_MASK 0x00000400L
+
+
+// addressBlock: gc_tcpdec
+//TCP_WATCH0_ADDR_H
+#define TCP_WATCH0_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH0_ADDR_L
+#define TCP_WATCH0_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH0_CNTL
+#define TCP_WATCH0_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH0_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH0_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH0_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH0_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH0_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH0_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH0_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH1_ADDR_H
+#define TCP_WATCH1_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH1_ADDR_L
+#define TCP_WATCH1_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH1_CNTL
+#define TCP_WATCH1_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH1_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH1_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH1_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH1_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH1_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH1_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH1_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH2_ADDR_H
+#define TCP_WATCH2_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH2_ADDR_L
+#define TCP_WATCH2_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH2_CNTL
+#define TCP_WATCH2_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH2_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH2_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH2_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH2_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH2_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH2_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH2_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH3_ADDR_H
+#define TCP_WATCH3_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH3_ADDR_L
+#define TCP_WATCH3_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH3_CNTL
+#define TCP_WATCH3_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH3_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH3_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH3_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH3_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH3_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH3_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH3_CNTL__VALID_MASK 0x80000000L
+
+
+// addressBlock: gc_gdspdec
+//GDS_VMID0_BASE
+#define GDS_VMID0_BASE__BASE__SHIFT 0x0
+#define GDS_VMID0_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID0_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID0_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID0_SIZE
+#define GDS_VMID0_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID0_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID0_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID0_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID1_BASE
+#define GDS_VMID1_BASE__BASE__SHIFT 0x0
+#define GDS_VMID1_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID1_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID1_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID1_SIZE
+#define GDS_VMID1_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID1_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID1_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID1_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID2_BASE
+#define GDS_VMID2_BASE__BASE__SHIFT 0x0
+#define GDS_VMID2_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID2_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID2_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID2_SIZE
+#define GDS_VMID2_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID2_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID2_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID2_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID3_BASE
+#define GDS_VMID3_BASE__BASE__SHIFT 0x0
+#define GDS_VMID3_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID3_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID3_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID3_SIZE
+#define GDS_VMID3_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID3_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID3_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID3_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID4_BASE
+#define GDS_VMID4_BASE__BASE__SHIFT 0x0
+#define GDS_VMID4_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID4_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID4_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID4_SIZE
+#define GDS_VMID4_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID4_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID4_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID4_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID5_BASE
+#define GDS_VMID5_BASE__BASE__SHIFT 0x0
+#define GDS_VMID5_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID5_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID5_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID5_SIZE
+#define GDS_VMID5_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID5_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID5_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID5_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID6_BASE
+#define GDS_VMID6_BASE__BASE__SHIFT 0x0
+#define GDS_VMID6_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID6_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID6_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID6_SIZE
+#define GDS_VMID6_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID6_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID6_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID6_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID7_BASE
+#define GDS_VMID7_BASE__BASE__SHIFT 0x0
+#define GDS_VMID7_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID7_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID7_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID7_SIZE
+#define GDS_VMID7_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID7_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID7_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID7_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID8_BASE
+#define GDS_VMID8_BASE__BASE__SHIFT 0x0
+#define GDS_VMID8_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID8_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID8_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID8_SIZE
+#define GDS_VMID8_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID8_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID8_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID8_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID9_BASE
+#define GDS_VMID9_BASE__BASE__SHIFT 0x0
+#define GDS_VMID9_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID9_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID9_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID9_SIZE
+#define GDS_VMID9_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID9_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID9_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID9_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID10_BASE
+#define GDS_VMID10_BASE__BASE__SHIFT 0x0
+#define GDS_VMID10_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID10_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID10_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID10_SIZE
+#define GDS_VMID10_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID10_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID10_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID10_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID11_BASE
+#define GDS_VMID11_BASE__BASE__SHIFT 0x0
+#define GDS_VMID11_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID11_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID11_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID11_SIZE
+#define GDS_VMID11_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID11_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID11_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID11_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID12_BASE
+#define GDS_VMID12_BASE__BASE__SHIFT 0x0
+#define GDS_VMID12_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID12_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID12_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID12_SIZE
+#define GDS_VMID12_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID12_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID12_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID12_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID13_BASE
+#define GDS_VMID13_BASE__BASE__SHIFT 0x0
+#define GDS_VMID13_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID13_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID13_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID13_SIZE
+#define GDS_VMID13_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID13_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID13_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID13_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID14_BASE
+#define GDS_VMID14_BASE__BASE__SHIFT 0x0
+#define GDS_VMID14_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID14_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID14_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID14_SIZE
+#define GDS_VMID14_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID14_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID14_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID14_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID15_BASE
+#define GDS_VMID15_BASE__BASE__SHIFT 0x0
+#define GDS_VMID15_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID15_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID15_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID15_SIZE
+#define GDS_VMID15_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID15_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID15_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID15_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_GWS_VMID0
+#define GDS_GWS_VMID0__BASE__SHIFT 0x0
+#define GDS_GWS_VMID0__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID0__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID0__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID0__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID0__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID0__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID0__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID1
+#define GDS_GWS_VMID1__BASE__SHIFT 0x0
+#define GDS_GWS_VMID1__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID1__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID1__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID1__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID1__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID1__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID1__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID2
+#define GDS_GWS_VMID2__BASE__SHIFT 0x0
+#define GDS_GWS_VMID2__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID2__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID2__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID2__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID2__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID2__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID2__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID3
+#define GDS_GWS_VMID3__BASE__SHIFT 0x0
+#define GDS_GWS_VMID3__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID3__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID3__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID3__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID3__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID3__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID3__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID4
+#define GDS_GWS_VMID4__BASE__SHIFT 0x0
+#define GDS_GWS_VMID4__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID4__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID4__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID4__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID4__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID4__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID4__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID5
+#define GDS_GWS_VMID5__BASE__SHIFT 0x0
+#define GDS_GWS_VMID5__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID5__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID5__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID5__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID5__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID5__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID5__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID6
+#define GDS_GWS_VMID6__BASE__SHIFT 0x0
+#define GDS_GWS_VMID6__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID6__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID6__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID6__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID6__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID6__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID6__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID7
+#define GDS_GWS_VMID7__BASE__SHIFT 0x0
+#define GDS_GWS_VMID7__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID7__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID7__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID7__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID7__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID7__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID7__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID8
+#define GDS_GWS_VMID8__BASE__SHIFT 0x0
+#define GDS_GWS_VMID8__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID8__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID8__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID8__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID8__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID8__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID8__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID9
+#define GDS_GWS_VMID9__BASE__SHIFT 0x0
+#define GDS_GWS_VMID9__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID9__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID9__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID9__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID9__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID9__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID9__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID10
+#define GDS_GWS_VMID10__BASE__SHIFT 0x0
+#define GDS_GWS_VMID10__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID10__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID10__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID10__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID10__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID10__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID10__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID11
+#define GDS_GWS_VMID11__BASE__SHIFT 0x0
+#define GDS_GWS_VMID11__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID11__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID11__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID11__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID11__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID11__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID11__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID12
+#define GDS_GWS_VMID12__BASE__SHIFT 0x0
+#define GDS_GWS_VMID12__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID12__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID12__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID12__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID12__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID12__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID12__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID13
+#define GDS_GWS_VMID13__BASE__SHIFT 0x0
+#define GDS_GWS_VMID13__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID13__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID13__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID13__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID13__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID13__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID13__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID14
+#define GDS_GWS_VMID14__BASE__SHIFT 0x0
+#define GDS_GWS_VMID14__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID14__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID14__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID14__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID14__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID14__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID14__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID15
+#define GDS_GWS_VMID15__BASE__SHIFT 0x0
+#define GDS_GWS_VMID15__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID15__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID15__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID15__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID15__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID15__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID15__UNUSED2_MASK 0xFF800000L
+//GDS_OA_VMID0
+#define GDS_OA_VMID0__MASK__SHIFT 0x0
+#define GDS_OA_VMID0__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID0__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID0__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID1
+#define GDS_OA_VMID1__MASK__SHIFT 0x0
+#define GDS_OA_VMID1__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID1__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID1__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID2
+#define GDS_OA_VMID2__MASK__SHIFT 0x0
+#define GDS_OA_VMID2__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID2__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID2__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID3
+#define GDS_OA_VMID3__MASK__SHIFT 0x0
+#define GDS_OA_VMID3__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID3__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID3__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID4
+#define GDS_OA_VMID4__MASK__SHIFT 0x0
+#define GDS_OA_VMID4__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID4__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID4__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID5
+#define GDS_OA_VMID5__MASK__SHIFT 0x0
+#define GDS_OA_VMID5__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID5__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID5__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID6
+#define GDS_OA_VMID6__MASK__SHIFT 0x0
+#define GDS_OA_VMID6__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID6__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID6__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID7
+#define GDS_OA_VMID7__MASK__SHIFT 0x0
+#define GDS_OA_VMID7__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID7__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID7__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID8
+#define GDS_OA_VMID8__MASK__SHIFT 0x0
+#define GDS_OA_VMID8__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID8__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID8__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID9
+#define GDS_OA_VMID9__MASK__SHIFT 0x0
+#define GDS_OA_VMID9__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID9__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID9__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID10
+#define GDS_OA_VMID10__MASK__SHIFT 0x0
+#define GDS_OA_VMID10__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID10__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID10__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID11
+#define GDS_OA_VMID11__MASK__SHIFT 0x0
+#define GDS_OA_VMID11__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID11__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID11__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID12
+#define GDS_OA_VMID12__MASK__SHIFT 0x0
+#define GDS_OA_VMID12__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID12__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID12__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID13
+#define GDS_OA_VMID13__MASK__SHIFT 0x0
+#define GDS_OA_VMID13__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID13__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID13__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID14
+#define GDS_OA_VMID14__MASK__SHIFT 0x0
+#define GDS_OA_VMID14__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID14__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID14__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID15
+#define GDS_OA_VMID15__MASK__SHIFT 0x0
+#define GDS_OA_VMID15__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID15__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID15__UNUSED_MASK 0xFFFF0000L
+//GDS_GWS_RESET0
+#define GDS_GWS_RESET0__RESOURCE0_RESET__SHIFT 0x0
+#define GDS_GWS_RESET0__RESOURCE1_RESET__SHIFT 0x1
+#define GDS_GWS_RESET0__RESOURCE2_RESET__SHIFT 0x2
+#define GDS_GWS_RESET0__RESOURCE3_RESET__SHIFT 0x3
+#define GDS_GWS_RESET0__RESOURCE4_RESET__SHIFT 0x4
+#define GDS_GWS_RESET0__RESOURCE5_RESET__SHIFT 0x5
+#define GDS_GWS_RESET0__RESOURCE6_RESET__SHIFT 0x6
+#define GDS_GWS_RESET0__RESOURCE7_RESET__SHIFT 0x7
+#define GDS_GWS_RESET0__RESOURCE8_RESET__SHIFT 0x8
+#define GDS_GWS_RESET0__RESOURCE9_RESET__SHIFT 0x9
+#define GDS_GWS_RESET0__RESOURCE10_RESET__SHIFT 0xa
+#define GDS_GWS_RESET0__RESOURCE11_RESET__SHIFT 0xb
+#define GDS_GWS_RESET0__RESOURCE12_RESET__SHIFT 0xc
+#define GDS_GWS_RESET0__RESOURCE13_RESET__SHIFT 0xd
+#define GDS_GWS_RESET0__RESOURCE14_RESET__SHIFT 0xe
+#define GDS_GWS_RESET0__RESOURCE15_RESET__SHIFT 0xf
+#define GDS_GWS_RESET0__RESOURCE16_RESET__SHIFT 0x10
+#define GDS_GWS_RESET0__RESOURCE17_RESET__SHIFT 0x11
+#define GDS_GWS_RESET0__RESOURCE18_RESET__SHIFT 0x12
+#define GDS_GWS_RESET0__RESOURCE19_RESET__SHIFT 0x13
+#define GDS_GWS_RESET0__RESOURCE20_RESET__SHIFT 0x14
+#define GDS_GWS_RESET0__RESOURCE21_RESET__SHIFT 0x15
+#define GDS_GWS_RESET0__RESOURCE22_RESET__SHIFT 0x16
+#define GDS_GWS_RESET0__RESOURCE23_RESET__SHIFT 0x17
+#define GDS_GWS_RESET0__RESOURCE24_RESET__SHIFT 0x18
+#define GDS_GWS_RESET0__RESOURCE25_RESET__SHIFT 0x19
+#define GDS_GWS_RESET0__RESOURCE26_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET0__RESOURCE27_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET0__RESOURCE28_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET0__RESOURCE29_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET0__RESOURCE30_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET0__RESOURCE31_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET0__RESOURCE0_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET0__RESOURCE1_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET0__RESOURCE2_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET0__RESOURCE3_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET0__RESOURCE4_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET0__RESOURCE5_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET0__RESOURCE6_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET0__RESOURCE7_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET0__RESOURCE8_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET0__RESOURCE9_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET0__RESOURCE10_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET0__RESOURCE11_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET0__RESOURCE12_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET0__RESOURCE13_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET0__RESOURCE14_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET0__RESOURCE15_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET0__RESOURCE16_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET0__RESOURCE17_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET0__RESOURCE18_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET0__RESOURCE19_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET0__RESOURCE20_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET0__RESOURCE21_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET0__RESOURCE22_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET0__RESOURCE23_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET0__RESOURCE24_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET0__RESOURCE25_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET0__RESOURCE26_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET0__RESOURCE27_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET0__RESOURCE28_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET0__RESOURCE29_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET0__RESOURCE30_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET0__RESOURCE31_RESET_MASK 0x80000000L
+//GDS_GWS_RESET1
+#define GDS_GWS_RESET1__RESOURCE32_RESET__SHIFT 0x0
+#define GDS_GWS_RESET1__RESOURCE33_RESET__SHIFT 0x1
+#define GDS_GWS_RESET1__RESOURCE34_RESET__SHIFT 0x2
+#define GDS_GWS_RESET1__RESOURCE35_RESET__SHIFT 0x3
+#define GDS_GWS_RESET1__RESOURCE36_RESET__SHIFT 0x4
+#define GDS_GWS_RESET1__RESOURCE37_RESET__SHIFT 0x5
+#define GDS_GWS_RESET1__RESOURCE38_RESET__SHIFT 0x6
+#define GDS_GWS_RESET1__RESOURCE39_RESET__SHIFT 0x7
+#define GDS_GWS_RESET1__RESOURCE40_RESET__SHIFT 0x8
+#define GDS_GWS_RESET1__RESOURCE41_RESET__SHIFT 0x9
+#define GDS_GWS_RESET1__RESOURCE42_RESET__SHIFT 0xa
+#define GDS_GWS_RESET1__RESOURCE43_RESET__SHIFT 0xb
+#define GDS_GWS_RESET1__RESOURCE44_RESET__SHIFT 0xc
+#define GDS_GWS_RESET1__RESOURCE45_RESET__SHIFT 0xd
+#define GDS_GWS_RESET1__RESOURCE46_RESET__SHIFT 0xe
+#define GDS_GWS_RESET1__RESOURCE47_RESET__SHIFT 0xf
+#define GDS_GWS_RESET1__RESOURCE48_RESET__SHIFT 0x10
+#define GDS_GWS_RESET1__RESOURCE49_RESET__SHIFT 0x11
+#define GDS_GWS_RESET1__RESOURCE50_RESET__SHIFT 0x12
+#define GDS_GWS_RESET1__RESOURCE51_RESET__SHIFT 0x13
+#define GDS_GWS_RESET1__RESOURCE52_RESET__SHIFT 0x14
+#define GDS_GWS_RESET1__RESOURCE53_RESET__SHIFT 0x15
+#define GDS_GWS_RESET1__RESOURCE54_RESET__SHIFT 0x16
+#define GDS_GWS_RESET1__RESOURCE55_RESET__SHIFT 0x17
+#define GDS_GWS_RESET1__RESOURCE56_RESET__SHIFT 0x18
+#define GDS_GWS_RESET1__RESOURCE57_RESET__SHIFT 0x19
+#define GDS_GWS_RESET1__RESOURCE58_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET1__RESOURCE59_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET1__RESOURCE60_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET1__RESOURCE61_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET1__RESOURCE62_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET1__RESOURCE63_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET1__RESOURCE32_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET1__RESOURCE33_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET1__RESOURCE34_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET1__RESOURCE35_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET1__RESOURCE36_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET1__RESOURCE37_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET1__RESOURCE38_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET1__RESOURCE39_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET1__RESOURCE40_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET1__RESOURCE41_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET1__RESOURCE42_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET1__RESOURCE43_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET1__RESOURCE44_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET1__RESOURCE45_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET1__RESOURCE46_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET1__RESOURCE47_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET1__RESOURCE48_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET1__RESOURCE49_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET1__RESOURCE50_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET1__RESOURCE51_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET1__RESOURCE52_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET1__RESOURCE53_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET1__RESOURCE54_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET1__RESOURCE55_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET1__RESOURCE56_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET1__RESOURCE57_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET1__RESOURCE58_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET1__RESOURCE59_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET1__RESOURCE60_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET1__RESOURCE61_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET1__RESOURCE62_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET1__RESOURCE63_RESET_MASK 0x80000000L
+//GDS_GWS_RESOURCE_RESET
+#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x0
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x8
+#define GDS_GWS_RESOURCE_RESET__UNUSED__SHIFT 0x10
+#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x00000001L
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0x0000FF00L
+#define GDS_GWS_RESOURCE_RESET__UNUSED_MASK 0xFFFF0000L
+//GDS_COMPUTE_MAX_WAVE_ID
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define GDS_COMPUTE_MAX_WAVE_ID__UNUSED__SHIFT 0xc
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+#define GDS_COMPUTE_MAX_WAVE_ID__UNUSED_MASK 0xFFFFF000L
+//GDS_OA_RESET_MASK
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET__SHIFT 0x0
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET__SHIFT 0x1
+#define GDS_OA_RESET_MASK__ME0_CS_RESET__SHIFT 0x2
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET__SHIFT 0x3
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET__SHIFT 0x4
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET__SHIFT 0x5
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET__SHIFT 0x6
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET__SHIFT 0x7
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET__SHIFT 0x8
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET__SHIFT 0x9
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET__SHIFT 0xa
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET__SHIFT 0xb
+#define GDS_OA_RESET_MASK__ME0_PIPE1_CS_RESET__SHIFT 0xc
+#define GDS_OA_RESET_MASK__UNUSED1__SHIFT 0xd
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET_MASK 0x00000001L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET_MASK 0x00000002L
+#define GDS_OA_RESET_MASK__ME0_CS_RESET_MASK 0x00000004L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET_MASK 0x00000008L
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET_MASK 0x00000010L
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET_MASK 0x00000020L
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET_MASK 0x00000040L
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET_MASK 0x00000080L
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET_MASK 0x00000100L
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET_MASK 0x00000200L
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET_MASK 0x00000400L
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET_MASK 0x00000800L
+#define GDS_OA_RESET_MASK__ME0_PIPE1_CS_RESET_MASK 0x00001000L
+#define GDS_OA_RESET_MASK__UNUSED1_MASK 0xFFFFE000L
+//GDS_OA_RESET
+#define GDS_OA_RESET__RESET__SHIFT 0x0
+#define GDS_OA_RESET__PIPE_ID__SHIFT 0x8
+#define GDS_OA_RESET__UNUSED__SHIFT 0x10
+#define GDS_OA_RESET__RESET_MASK 0x00000001L
+#define GDS_OA_RESET__PIPE_ID_MASK 0x0000FF00L
+#define GDS_OA_RESET__UNUSED_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_STATUS
+#define GDS_CS_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_CS_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_CS_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_CS_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_CS_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_CS_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_CS_CTXSW_CNT0
+#define GDS_CS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT1
+#define GDS_CS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT2
+#define GDS_CS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT3
+#define GDS_CS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_GFX_CTXSW_STATUS
+#define GDS_GFX_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_GFX_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_GFX_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_GFX_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_GFX_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_GFX_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_PS_CTXSW_CNT0
+#define GDS_PS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_CNT1
+#define GDS_PS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_CNT2
+#define GDS_PS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_CNT3
+#define GDS_PS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_IDX
+#define GDS_PS_CTXSW_IDX__PACKER_ID__SHIFT 0x0
+#define GDS_PS_CTXSW_IDX__UNUSED__SHIFT 0x6
+#define GDS_PS_CTXSW_IDX__PACKER_ID_MASK 0x0000003FL
+#define GDS_PS_CTXSW_IDX__UNUSED_MASK 0xFFFFFFC0L
+//GDS_GS_CTXSW_CNT0
+#define GDS_GS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT1
+#define GDS_GS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT2
+#define GDS_GS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT3
+#define GDS_GS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_MEMORY_CLEAN
+#define GDS_MEMORY_CLEAN__START__SHIFT 0x0
+#define GDS_MEMORY_CLEAN__FINISH__SHIFT 0x1
+#define GDS_MEMORY_CLEAN__UNUSED__SHIFT 0x2
+#define GDS_MEMORY_CLEAN__START_MASK 0x00000001L
+#define GDS_MEMORY_CLEAN__FINISH_MASK 0x00000002L
+#define GDS_MEMORY_CLEAN__UNUSED_MASK 0xFFFFFFFCL
+
+
+// addressBlock: gc_rasdec
+//RAS_SIGNATURE_CONTROL
+#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x0
+#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x00000001L
+//RAS_SIGNATURE_MASK
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x0
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE0
+#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE1
+#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE2
+#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE3
+#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_DB_SIGNATURE0
+#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_PA_SIGNATURE0
+#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE0
+#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE1
+#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE2
+#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE3
+#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE4
+#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE5
+#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE6
+#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE7
+#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE0
+#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE1
+#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_CB_SIGNATURE0
+#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE0
+#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE1
+#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gusdec
+//GUS_IO_RD_COMBINE_FLUSH
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP4_TIMER__SHIFT 0x10
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP5_TIMER__SHIFT 0x14
+#define GUS_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x18
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP4_TIMER_MASK 0x000F0000L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP5_TIMER_MASK 0x00F00000L
+#define GUS_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x03000000L
+//GUS_IO_WR_COMBINE_FLUSH
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP4_TIMER__SHIFT 0x10
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP5_TIMER__SHIFT 0x14
+#define GUS_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x18
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP4_TIMER_MASK 0x000F0000L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP5_TIMER_MASK 0x00F00000L
+#define GUS_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x03000000L
+//GUS_IO_RD_PRI_AGE_RATE
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP4_AGING_RATE__SHIFT 0xc
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP5_AGING_RATE__SHIFT 0xf
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP4_AGING_RATE_MASK 0x00007000L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP5_AGING_RATE_MASK 0x00038000L
+//GUS_IO_WR_PRI_AGE_RATE
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP4_AGING_RATE__SHIFT 0xc
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP5_AGING_RATE__SHIFT 0xf
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP4_AGING_RATE_MASK 0x00007000L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP5_AGING_RATE_MASK 0x00038000L
+//GUS_IO_RD_PRI_AGE_COEFF
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_AGE_COEFF
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_QUEUING
+#define GUS_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_QUEUING
+#define GUS_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_FIXED
+#define GUS_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_FIXED__GROUP4_FIXED_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_FIXED__GROUP5_FIXED_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_FIXED__GROUP4_FIXED_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_FIXED__GROUP5_FIXED_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_FIXED
+#define GUS_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_FIXED__GROUP4_FIXED_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_FIXED__GROUP5_FIXED_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_FIXED__GROUP4_FIXED_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_FIXED__GROUP5_FIXED_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_URGENCY_COEFF
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_URGENCY_COEFF
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_URGENCY_MODE
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE__SHIFT 0x0
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE__SHIFT 0x1
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE__SHIFT 0x2
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE__SHIFT 0x3
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE__SHIFT 0x4
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE__SHIFT 0x5
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE_MASK 0x00000001L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE_MASK 0x00000002L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE_MASK 0x00000004L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE_MASK 0x00000008L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE_MASK 0x00000010L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE_MASK 0x00000020L
+//GUS_IO_WR_PRI_URGENCY_MODE
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE__SHIFT 0x0
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE__SHIFT 0x1
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE__SHIFT 0x2
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE__SHIFT 0x3
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE__SHIFT 0x4
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE__SHIFT 0x5
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE_MASK 0x00000001L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE_MASK 0x00000002L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE_MASK 0x00000004L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE_MASK 0x00000008L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE_MASK 0x00000010L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE_MASK 0x00000020L
+//GUS_IO_RD_PRI_QUANT_PRI1
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT_PRI2
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT_PRI3
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT_PRI4
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI1
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI2
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI3
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI4
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT1_PRI1
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_RD_PRI_QUANT1_PRI2
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_RD_PRI_QUANT1_PRI3
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_RD_PRI_QUANT1_PRI4
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI1
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI2
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI3
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI4
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_COMBINE_FLUSH
+#define GUS_DRAM_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GUS_DRAM_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GUS_DRAM_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GUS_DRAM_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GUS_DRAM_COMBINE_FLUSH__GROUP4_TIMER__SHIFT 0x10
+#define GUS_DRAM_COMBINE_FLUSH__GROUP5_TIMER__SHIFT 0x14
+#define GUS_DRAM_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GUS_DRAM_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP4_TIMER_MASK 0x000F0000L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP5_TIMER_MASK 0x00F00000L
+//GUS_DRAM_COMBINE_RD_WR_EN
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP0_TIMER__SHIFT 0x0
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP1_TIMER__SHIFT 0x2
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP2_TIMER__SHIFT 0x4
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP3_TIMER__SHIFT 0x6
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP4_TIMER__SHIFT 0x8
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP5_TIMER__SHIFT 0xa
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP0_TIMER_MASK 0x00000003L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP1_TIMER_MASK 0x0000000CL
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP2_TIMER_MASK 0x00000030L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP3_TIMER_MASK 0x000000C0L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP4_TIMER_MASK 0x00000300L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP5_TIMER_MASK 0x00000C00L
+//GUS_DRAM_PRI_AGE_RATE
+#define GUS_DRAM_PRI_AGE_RATE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GUS_DRAM_PRI_AGE_RATE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GUS_DRAM_PRI_AGE_RATE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GUS_DRAM_PRI_AGE_RATE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GUS_DRAM_PRI_AGE_RATE__GROUP4_AGING_RATE__SHIFT 0xc
+#define GUS_DRAM_PRI_AGE_RATE__GROUP5_AGING_RATE__SHIFT 0xf
+#define GUS_DRAM_PRI_AGE_RATE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP4_AGING_RATE_MASK 0x00007000L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP5_AGING_RATE_MASK 0x00038000L
+//GUS_DRAM_PRI_AGE_COEFF
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_QUEUING
+#define GUS_DRAM_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_FIXED
+#define GUS_DRAM_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_FIXED__GROUP4_FIXED_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_FIXED__GROUP5_FIXED_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_FIXED__GROUP4_FIXED_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_FIXED__GROUP5_FIXED_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_URGENCY_COEFF
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_URGENCY_MODE
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE__SHIFT 0x0
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE__SHIFT 0x1
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE__SHIFT 0x2
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE__SHIFT 0x3
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE__SHIFT 0x4
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE__SHIFT 0x5
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE_MASK 0x00000001L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE_MASK 0x00000002L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE_MASK 0x00000004L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE_MASK 0x00000008L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE_MASK 0x00000010L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE_MASK 0x00000020L
+//GUS_DRAM_PRI_QUANT_PRI1
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI2
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI3
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI4
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI5
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT1_PRI1
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI2
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI3
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI4
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI5
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_GROUP_BURST
+#define GUS_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GUS_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GUS_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GUS_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GUS_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GUS_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GUS_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GUS_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GUS_DRAM_GROUP_BURST
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_LO__SHIFT 0x0
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_HI__SHIFT 0x8
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_LO_MASK 0x000000FFL
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_HI_MASK 0x0000FF00L
+//GUS_SDP_ARB_FINAL
+#define GUS_SDP_ARB_FINAL__HI_DRAM_BURST_LIMIT__SHIFT 0x0
+#define GUS_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x5
+#define GUS_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define GUS_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define GUS_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x11
+#define GUS_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x12
+#define GUS_SDP_ARB_FINAL__HI_DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define GUS_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x000003E0L
+#define GUS_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define GUS_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define GUS_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x00020000L
+#define GUS_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x00040000L
+//GUS_SDP_QOS_VC_PRIORITY
+#define GUS_SDP_QOS_VC_PRIORITY__VC2_IORD__SHIFT 0x0
+#define GUS_SDP_QOS_VC_PRIORITY__VC3_IOWR__SHIFT 0x4
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_DRAM__SHIFT 0x8
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_HI_DRAM__SHIFT 0xc
+#define GUS_SDP_QOS_VC_PRIORITY__VC2_IORD_MASK 0x0000000FL
+#define GUS_SDP_QOS_VC_PRIORITY__VC3_IOWR_MASK 0x000000F0L
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_DRAM_MASK 0x00000F00L
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_HI_DRAM_MASK 0x0000F000L
+//GUS_SDP_CREDITS
+#define GUS_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define GUS_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define GUS_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define GUS_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define GUS_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define GUS_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//GUS_SDP_TAG_RESERVE0
+#define GUS_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define GUS_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define GUS_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define GUS_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define GUS_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define GUS_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define GUS_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define GUS_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//GUS_SDP_TAG_RESERVE1
+#define GUS_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define GUS_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define GUS_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define GUS_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define GUS_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define GUS_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define GUS_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define GUS_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//GUS_SDP_VCC_RESERVE0
+#define GUS_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GUS_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GUS_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GUS_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GUS_SDP_VCC_RESERVE1
+#define GUS_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GUS_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GUS_SDP_VCD_RESERVE0
+#define GUS_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GUS_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GUS_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GUS_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GUS_SDP_VCD_RESERVE1
+#define GUS_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GUS_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GUS_SDP_REQ_CNTL
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define GUS_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define GUS_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x4
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define GUS_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define GUS_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000010L
+//GUS_MISC
+#define GUS_MISC__RELATIVE_PRI_IN_DRAM_ARB__SHIFT 0x0
+#define GUS_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x1
+#define GUS_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x2
+#define GUS_MISC__EARLY_SDP_ORIGDATA__SHIFT 0x3
+#define GUS_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0x4
+#define GUS_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x6
+#define GUS_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x8
+#define GUS_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0xa
+#define GUS_MISC__SEND0_IOWR_ONLY__SHIFT 0xf
+#define GUS_MISC__RELATIVE_PRI_IN_DRAM_ARB_MASK 0x00000001L
+#define GUS_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000002L
+#define GUS_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000004L
+#define GUS_MISC__EARLY_SDP_ORIGDATA_MASK 0x00000008L
+#define GUS_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00000030L
+#define GUS_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x000000C0L
+#define GUS_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00000300L
+#define GUS_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x00007C00L
+#define GUS_MISC__SEND0_IOWR_ONLY_MASK 0x00008000L
+//GUS_LATENCY_SAMPLING
+#define GUS_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define GUS_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define GUS_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x2
+#define GUS_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x3
+#define GUS_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x4
+#define GUS_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x5
+#define GUS_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x6
+#define GUS_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x7
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0x8
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0x9
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xa
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xb
+#define GUS_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xc
+#define GUS_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x14
+#define GUS_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000004L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000008L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000010L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000020L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000040L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000080L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000100L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000200L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00000400L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00000800L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x000FF000L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x0FF00000L
+//GUS_ERR_STATUS
+#define GUS_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GUS_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GUS_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GUS_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GUS_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GUS_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GUS_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GUS_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GUS_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GUS_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GUS_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GUS_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GUS_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GUS_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//GUS_MISC2
+#define GUS_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0x0
+#define GUS_MISC2__CH_L1_RO_MASK__SHIFT 0x1
+#define GUS_MISC2__SA0_L1_RO_MASK__SHIFT 0x2
+#define GUS_MISC2__SA1_L1_RO_MASK__SHIFT 0x3
+#define GUS_MISC2__SA2_L1_RO_MASK__SHIFT 0x4
+#define GUS_MISC2__SA3_L1_RO_MASK__SHIFT 0x5
+#define GUS_MISC2__CH_L1_PERF_MASK__SHIFT 0x6
+#define GUS_MISC2__SA0_L1_PERF_MASK__SHIFT 0x7
+#define GUS_MISC2__SA1_L1_PERF_MASK__SHIFT 0x8
+#define GUS_MISC2__SA2_L1_PERF_MASK__SHIFT 0x9
+#define GUS_MISC2__SA3_L1_PERF_MASK__SHIFT 0xa
+#define GUS_MISC2__FP_ATOMICS_ENABLE__SHIFT 0xb
+#define GUS_MISC2__L1_RET_CLKEN__SHIFT 0xc
+#define GUS_MISC2__FGCLKEN_HIGH__SHIFT 0xd
+#define GUS_MISC2__BLOCK_REQUESTS__SHIFT 0xe
+#define GUS_MISC2__REQUESTS_BLOCKED__SHIFT 0xf
+#define GUS_MISC2__RIO_ICG_L1_ROUTER_BUSY_MASK__SHIFT 0x10
+#define GUS_MISC2__WIO_ICG_L1_ROUTER_BUSY_MASK__SHIFT 0x11
+#define GUS_MISC2__DRAM_ICG_L1_ROUTER_BUSY_MASK__SHIFT 0x12
+#define GUS_MISC2__RDRET_FED_MASK__SHIFT 0x13
+#define GUS_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00000001L
+#define GUS_MISC2__CH_L1_RO_MASK_MASK 0x00000002L
+#define GUS_MISC2__SA0_L1_RO_MASK_MASK 0x00000004L
+#define GUS_MISC2__SA1_L1_RO_MASK_MASK 0x00000008L
+#define GUS_MISC2__SA2_L1_RO_MASK_MASK 0x00000010L
+#define GUS_MISC2__SA3_L1_RO_MASK_MASK 0x00000020L
+#define GUS_MISC2__CH_L1_PERF_MASK_MASK 0x00000040L
+#define GUS_MISC2__SA0_L1_PERF_MASK_MASK 0x00000080L
+#define GUS_MISC2__SA1_L1_PERF_MASK_MASK 0x00000100L
+#define GUS_MISC2__SA2_L1_PERF_MASK_MASK 0x00000200L
+#define GUS_MISC2__SA3_L1_PERF_MASK_MASK 0x00000400L
+#define GUS_MISC2__FP_ATOMICS_ENABLE_MASK 0x00000800L
+#define GUS_MISC2__L1_RET_CLKEN_MASK 0x00001000L
+#define GUS_MISC2__FGCLKEN_HIGH_MASK 0x00002000L
+#define GUS_MISC2__BLOCK_REQUESTS_MASK 0x00004000L
+#define GUS_MISC2__REQUESTS_BLOCKED_MASK 0x00008000L
+#define GUS_MISC2__RIO_ICG_L1_ROUTER_BUSY_MASK_MASK 0x00010000L
+#define GUS_MISC2__WIO_ICG_L1_ROUTER_BUSY_MASK_MASK 0x00020000L
+#define GUS_MISC2__DRAM_ICG_L1_ROUTER_BUSY_MASK_MASK 0x00040000L
+#define GUS_MISC2__RDRET_FED_MASK_MASK 0x00080000L
+//GUS_SDP_BACKDOOR_CMDCREDITS0
+#define GUS_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
+//GUS_SDP_BACKDOOR_CMDCREDITS1
+#define GUS_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
+//GUS_SDP_BACKDOOR_DATACREDITS0
+#define GUS_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
+//GUS_SDP_BACKDOOR_DATACREDITS1
+#define GUS_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
+//GUS_SDP_BACKDOOR_MISCCREDITS
+#define GUS_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED__SHIFT 0x8
+#define GUS_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED_MASK 0x000000FFL
+#define GUS_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED_MASK 0x0000FF00L
+//GUS_SDP_ENABLE
+#define GUS_SDP_ENABLE__ENABLE__SHIFT 0x0
+#define GUS_SDP_ENABLE__ENABLE_MASK 0x00000001L
+//GUS_L1_CH0_CMD_IN
+#define GUS_L1_CH0_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_CMD_OUT
+#define GUS_L1_CH0_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_IN
+#define GUS_L1_CH0_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_OUT
+#define GUS_L1_CH0_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_U_IN
+#define GUS_L1_CH0_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_U_OUT
+#define GUS_L1_CH0_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_CMD_IN
+#define GUS_L1_CH1_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_CMD_OUT
+#define GUS_L1_CH1_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_IN
+#define GUS_L1_CH1_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_OUT
+#define GUS_L1_CH1_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_U_IN
+#define GUS_L1_CH1_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_U_OUT
+#define GUS_L1_CH1_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_CMD_IN
+#define GUS_L1_SA0_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_CMD_OUT
+#define GUS_L1_SA0_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_IN
+#define GUS_L1_SA0_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_OUT
+#define GUS_L1_SA0_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_U_IN
+#define GUS_L1_SA0_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_U_OUT
+#define GUS_L1_SA0_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_CMD_IN
+#define GUS_L1_SA1_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_CMD_OUT
+#define GUS_L1_SA1_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_IN
+#define GUS_L1_SA1_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_OUT
+#define GUS_L1_SA1_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_U_IN
+#define GUS_L1_SA1_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_U_OUT
+#define GUS_L1_SA1_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_CMD_IN
+#define GUS_L1_SA2_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_CMD_OUT
+#define GUS_L1_SA2_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_IN
+#define GUS_L1_SA2_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_OUT
+#define GUS_L1_SA2_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_U_IN
+#define GUS_L1_SA2_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_U_OUT
+#define GUS_L1_SA2_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_CMD_IN
+#define GUS_L1_SA3_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_CMD_OUT
+#define GUS_L1_SA3_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_IN
+#define GUS_L1_SA3_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_OUT
+#define GUS_L1_SA3_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_U_IN
+#define GUS_L1_SA3_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_U_OUT
+#define GUS_L1_SA3_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_MISC3
+#define GUS_MISC3__FP_ATOMICS_LOG__SHIFT 0x0
+#define GUS_MISC3__CLEAR_LOG__SHIFT 0x1
+#define GUS_MISC3__FP_ATOMICS_LOG_MASK 0x00000001L
+#define GUS_MISC3__CLEAR_LOG_MASK 0x00000002L
+//GUS_WRRSP_FIFO_CNTL
+#define GUS_WRRSP_FIFO_CNTL__THRESHOLD__SHIFT 0x0
+#define GUS_WRRSP_FIFO_CNTL__THRESHOLD_MASK 0x0000003FL
+
+
+// addressBlock: gc_gfxdec0
+//DB_RENDER_CONTROL
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x0
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x1
+#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x2
+#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x3
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x4
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x5
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x6
+#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x7
+#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x8
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE__SHIFT 0xc
+#define DB_RENDER_CONTROL__PS_INVOKE_DISABLE__SHIFT 0xe
+#define DB_RENDER_CONTROL__OREO_MODE__SHIFT 0x10
+#define DB_RENDER_CONTROL__FORCE_OREO_MODE__SHIFT 0x12
+#define DB_RENDER_CONTROL__FORCE_EXPORT_ORDER__SHIFT 0x13
+#define DB_RENDER_CONTROL__MAX_ALLOWED_TILES_IN_WAVE__SHIFT 0x14
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x00000001L
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x00000002L
+#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x00000004L
+#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x00000008L
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x00000010L
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x00000020L
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x00000040L
+#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x00000080L
+#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0x00000F00L
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE_MASK 0x00001000L
+#define DB_RENDER_CONTROL__PS_INVOKE_DISABLE_MASK 0x00004000L
+#define DB_RENDER_CONTROL__OREO_MODE_MASK 0x00030000L
+#define DB_RENDER_CONTROL__FORCE_OREO_MODE_MASK 0x00040000L
+#define DB_RENDER_CONTROL__FORCE_EXPORT_ORDER_MASK 0x00080000L
+#define DB_RENDER_CONTROL__MAX_ALLOWED_TILES_IN_WAVE_MASK 0x00F00000L
+//DB_COUNT_CONTROL
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x1
+#define DB_COUNT_CONTROL__DISABLE_CONSERVATIVE_ZPASS_COUNTS__SHIFT 0x2
+#define DB_COUNT_CONTROL__ENHANCED_CONSERVATIVE_ZPASS_COUNTS__SHIFT 0x3
+#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x4
+#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x8
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0xc
+#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x10
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x14
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x18
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x1c
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x00000002L
+#define DB_COUNT_CONTROL__DISABLE_CONSERVATIVE_ZPASS_COUNTS_MASK 0x00000004L
+#define DB_COUNT_CONTROL__ENHANCED_CONSERVATIVE_ZPASS_COUNTS_MASK 0x00000008L
+#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x00000070L
+#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0x00000F00L
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0x0000F000L
+#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0x000F0000L
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0x00F00000L
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x0F000000L
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xF0000000L
+//DB_DEPTH_VIEW
+#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x0
+#define DB_DEPTH_VIEW__SLICE_START_HI__SHIFT 0xb
+#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0xd
+#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x18
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x19
+#define DB_DEPTH_VIEW__MIPID__SHIFT 0x1a
+#define DB_DEPTH_VIEW__SLICE_MAX_HI__SHIFT 0x1e
+#define DB_DEPTH_VIEW__SLICE_START_MASK 0x000007FFL
+#define DB_DEPTH_VIEW__SLICE_START_HI_MASK 0x00001800L
+#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x01000000L
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x02000000L
+#define DB_DEPTH_VIEW__MIPID_MASK 0x3C000000L
+#define DB_DEPTH_VIEW__SLICE_MAX_HI_MASK 0xC0000000L
+//DB_RENDER_OVERRIDE
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x0
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x2
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x4
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x6
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x7
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x8
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x9
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0xa
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0xb
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0xc
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0xd
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x10
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x11
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x12
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x13
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x15
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x1a
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x1b
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x1c
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x1d
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x1e
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x1f
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0x0000000CL
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x00000030L
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x00001000L
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x00006000L
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x00010000L
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x00020000L
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x00040000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x00180000L
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x03E00000L
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x04000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x08000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000L
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000L
+//DB_RENDER_OVERRIDE2
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x0
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x2
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x5
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x6
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x7
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x8
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x9
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0xa
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0xb
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0xc
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0xf
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x12
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x15
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
+#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE__SHIFT 0x1b
+#define DB_RENDER_OVERRIDE2__DISABLE_NOZ__SHIFT 0x1d
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x00000020L
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x00007000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x00038000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x001C0000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x00200000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
+#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE_MASK 0x18000000L
+#define DB_RENDER_OVERRIDE2__DISABLE_NOZ_MASK 0x20000000L
+//DB_HTILE_DATA_BASE
+#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
+#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_DEPTH_SIZE_XY
+#define DB_DEPTH_SIZE_XY__X_MAX__SHIFT 0x0
+#define DB_DEPTH_SIZE_XY__Y_MAX__SHIFT 0x10
+#define DB_DEPTH_SIZE_XY__X_MAX_MASK 0x00003FFFL
+#define DB_DEPTH_SIZE_XY__Y_MAX_MASK 0x3FFF0000L
+//DB_DEPTH_BOUNDS_MIN
+#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xFFFFFFFFL
+//DB_DEPTH_BOUNDS_MAX
+#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xFFFFFFFFL
+//DB_STENCIL_CLEAR
+#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x0
+#define DB_STENCIL_CLEAR__CLEAR_MASK 0x000000FFL
+//DB_DEPTH_CLEAR
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x0
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xFFFFFFFFL
+//PA_SC_SCREEN_SCISSOR_TL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_SCISSOR_BR
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xFFFF0000L
+//DB_RESERVED_REG_2
+#define DB_RESERVED_REG_2__FIELD_1__SHIFT 0x0
+#define DB_RESERVED_REG_2__FIELD_2__SHIFT 0x4
+#define DB_RESERVED_REG_2__FIELD_3__SHIFT 0x8
+#define DB_RESERVED_REG_2__FIELD_4__SHIFT 0xd
+#define DB_RESERVED_REG_2__FIELD_5__SHIFT 0xf
+#define DB_RESERVED_REG_2__FIELD_6__SHIFT 0x11
+#define DB_RESERVED_REG_2__FIELD_7__SHIFT 0x13
+#define DB_RESERVED_REG_2__FIELD_8__SHIFT 0x1c
+#define DB_RESERVED_REG_2__FIELD_1_MASK 0x0000000FL
+#define DB_RESERVED_REG_2__FIELD_2_MASK 0x000000F0L
+#define DB_RESERVED_REG_2__FIELD_3_MASK 0x00001F00L
+#define DB_RESERVED_REG_2__FIELD_4_MASK 0x00006000L
+#define DB_RESERVED_REG_2__FIELD_5_MASK 0x00018000L
+#define DB_RESERVED_REG_2__FIELD_6_MASK 0x00060000L
+#define DB_RESERVED_REG_2__FIELD_7_MASK 0x00180000L
+#define DB_RESERVED_REG_2__FIELD_8_MASK 0xF0000000L
+//DB_Z_INFO
+#define DB_Z_INFO__FORMAT__SHIFT 0x0
+#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x2
+#define DB_Z_INFO__SW_MODE__SHIFT 0x4
+#define DB_Z_INFO__FAULT_BEHAVIOR__SHIFT 0x9
+#define DB_Z_INFO__ITERATE_FLUSH__SHIFT 0xb
+#define DB_Z_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_Z_INFO__RESERVED_FIELD_1__SHIFT 0xd
+#define DB_Z_INFO__MAXMIP__SHIFT 0x10
+#define DB_Z_INFO__ITERATE_256__SHIFT 0x14
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES__SHIFT 0x17
+#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_Z_INFO__READ_SIZE__SHIFT 0x1c
+#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x1d
+#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x1f
+#define DB_Z_INFO__FORMAT_MASK 0x00000003L
+#define DB_Z_INFO__NUM_SAMPLES_MASK 0x0000000CL
+#define DB_Z_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_Z_INFO__FAULT_BEHAVIOR_MASK 0x00000600L
+#define DB_Z_INFO__ITERATE_FLUSH_MASK 0x00000800L
+#define DB_Z_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_Z_INFO__RESERVED_FIELD_1_MASK 0x0000E000L
+#define DB_Z_INFO__MAXMIP_MASK 0x000F0000L
+#define DB_Z_INFO__ITERATE_256_MASK 0x00100000L
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES_MASK 0x07800000L
+#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_Z_INFO__READ_SIZE_MASK 0x10000000L
+#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000L
+#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000L
+//DB_STENCIL_INFO
+#define DB_STENCIL_INFO__FORMAT__SHIFT 0x0
+#define DB_STENCIL_INFO__SW_MODE__SHIFT 0x4
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR__SHIFT 0x9
+#define DB_STENCIL_INFO__ITERATE_FLUSH__SHIFT 0xb
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_STENCIL_INFO__RESERVED_FIELD_1__SHIFT 0xd
+#define DB_STENCIL_INFO__ITERATE_256__SHIFT 0x14
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x1d
+#define DB_STENCIL_INFO__FORMAT_MASK 0x00000001L
+#define DB_STENCIL_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR_MASK 0x00000600L
+#define DB_STENCIL_INFO__ITERATE_FLUSH_MASK 0x00000800L
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_STENCIL_INFO__RESERVED_FIELD_1_MASK 0x0000E000L
+#define DB_STENCIL_INFO__ITERATE_256_MASK 0x00100000L
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000L
+//DB_Z_READ_BASE
+#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_READ_BASE
+#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_Z_WRITE_BASE
+#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_WRITE_BASE
+#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_RESERVED_REG_1
+#define DB_RESERVED_REG_1__FIELD_1__SHIFT 0x0
+#define DB_RESERVED_REG_1__FIELD_2__SHIFT 0xb
+#define DB_RESERVED_REG_1__FIELD_1_MASK 0x000007FFL
+#define DB_RESERVED_REG_1__FIELD_2_MASK 0x003FF800L
+//DB_RESERVED_REG_3
+#define DB_RESERVED_REG_3__FIELD_1__SHIFT 0x0
+#define DB_RESERVED_REG_3__FIELD_1_MASK 0x003FFFFFL
+//DB_Z_READ_BASE_HI
+#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_READ_BASE_HI
+#define DB_STENCIL_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_Z_WRITE_BASE_HI
+#define DB_Z_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_WRITE_BASE_HI
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_HTILE_DATA_BASE_HI
+#define DB_HTILE_DATA_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_HTILE_DATA_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_RMI_L2_CACHE_CONTROL
+#define DB_RMI_L2_CACHE_CONTROL__Z_WR_POLICY__SHIFT 0x0
+#define DB_RMI_L2_CACHE_CONTROL__S_WR_POLICY__SHIFT 0x2
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_WR_POLICY__SHIFT 0x4
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_WR_POLICY__SHIFT 0x6
+#define DB_RMI_L2_CACHE_CONTROL__Z_RD_POLICY__SHIFT 0x10
+#define DB_RMI_L2_CACHE_CONTROL__S_RD_POLICY__SHIFT 0x12
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_RD_POLICY__SHIFT 0x14
+#define DB_RMI_L2_CACHE_CONTROL__Z_BIG_PAGE__SHIFT 0x18
+#define DB_RMI_L2_CACHE_CONTROL__S_BIG_PAGE__SHIFT 0x19
+#define DB_RMI_L2_CACHE_CONTROL__Z_NOALLOC__SHIFT 0x1a
+#define DB_RMI_L2_CACHE_CONTROL__S_NOALLOC__SHIFT 0x1b
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_NOALLOC__SHIFT 0x1c
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_NOALLOC__SHIFT 0x1d
+#define DB_RMI_L2_CACHE_CONTROL__Z_WR_POLICY_MASK 0x00000003L
+#define DB_RMI_L2_CACHE_CONTROL__S_WR_POLICY_MASK 0x0000000CL
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_WR_POLICY_MASK 0x00000030L
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_WR_POLICY_MASK 0x000000C0L
+#define DB_RMI_L2_CACHE_CONTROL__Z_RD_POLICY_MASK 0x00030000L
+#define DB_RMI_L2_CACHE_CONTROL__S_RD_POLICY_MASK 0x000C0000L
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_RD_POLICY_MASK 0x00300000L
+#define DB_RMI_L2_CACHE_CONTROL__Z_BIG_PAGE_MASK 0x01000000L
+#define DB_RMI_L2_CACHE_CONTROL__S_BIG_PAGE_MASK 0x02000000L
+#define DB_RMI_L2_CACHE_CONTROL__Z_NOALLOC_MASK 0x04000000L
+#define DB_RMI_L2_CACHE_CONTROL__S_NOALLOC_MASK 0x08000000L
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_NOALLOC_MASK 0x10000000L
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_NOALLOC_MASK 0x20000000L
+//TA_BC_BASE_ADDR
+#define TA_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
+#define TA_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
+//TA_BC_BASE_ADDR_HI
+#define TA_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
+#define TA_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_1
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_2
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_3
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_2
+#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_3
+#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_WINDOW_OFFSET
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x0
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x10
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0x0000FFFFL
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xFFFF0000L
+//PA_SC_WINDOW_SCISSOR_TL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_WINDOW_SCISSOR_BR
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_RULE
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x0
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0x0000FFFFL
+//PA_SC_CLIPRECT_0_TL
+#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_0_BR
+#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_TL
+#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_BR
+#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_TL
+#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_BR
+#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_TL
+#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_BR
+#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_EDGERULE
+#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x0
+#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x4
+#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x8
+#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0xc
+#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x12
+#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x18
+#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x1c
+#define PA_SC_EDGERULE__ER_TRI_MASK 0x0000000FL
+#define PA_SC_EDGERULE__ER_POINT_MASK 0x000000F0L
+#define PA_SC_EDGERULE__ER_RECT_MASK 0x00000F00L
+#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x0003F000L
+#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0x00FC0000L
+#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0x0F000000L
+#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xF0000000L
+//PA_SU_HARDWARE_SCREEN_OFFSET
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x0
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x10
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x000001FFL
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x01FF0000L
+//CB_TARGET_MASK
+#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x0
+#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x4
+#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x8
+#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0xc
+#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x10
+#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x14
+#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x18
+#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x1c
+#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0x0000000FL
+#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0x000000F0L
+#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0x00000F00L
+#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0x0000F000L
+#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0x000F0000L
+#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0x00F00000L
+#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0x0F000000L
+#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xF0000000L
+//CB_SHADER_MASK
+#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x0
+#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x4
+#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x8
+#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0xc
+#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x10
+#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x14
+#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x18
+#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x1c
+#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0x0000000FL
+#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0x000000F0L
+#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0x00000F00L
+#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0x0000F000L
+#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0x000F0000L
+#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0x00F00000L
+#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0x0F000000L
+#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xF0000000L
+//PA_SC_GENERIC_SCISSOR_TL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_GENERIC_SCISSOR_BR
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//COHER_DEST_BASE_0
+#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_1
+#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_SCISSOR_0_TL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_0_BR
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_1_TL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_1_BR
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_2_TL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_2_BR
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_3_TL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_3_BR
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_4_TL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_4_BR
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_5_TL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_5_BR
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_6_TL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_6_BR
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_7_TL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_7_BR
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_8_TL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_8_BR
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_9_TL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_9_BR
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_10_TL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_10_BR
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_11_TL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_11_BR
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_12_TL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_12_BR
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_13_TL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_13_BR
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_14_TL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_14_BR
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_15_TL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_15_BR
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_ZMIN_0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_1
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_1
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_2
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_2
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_3
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_3
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_4
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_4
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_5
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_5
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_6
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_6
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_7
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_7
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_8
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_8
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_9
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_9
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_10
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_10
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_11
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_11
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_12
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_12
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_13
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_13
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_14
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_14
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_15
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_15
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_RASTER_CONFIG
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x4
+#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x6
+#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x7
+#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x8
+#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0xa
+#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0xc
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0xe
+#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x10
+#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x12
+#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x14
+#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x18
+#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x1a
+#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x1c
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0x0000000CL
+#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x00000030L
+#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x00000040L
+#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x00000080L
+#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x00000300L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0x00000C00L
+#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x00003000L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0x0000C000L
+#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x00030000L
+#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0x000C0000L
+#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x00300000L
+#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x03000000L
+#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0x0C000000L
+#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0x30000000L
+//PA_SC_RASTER_CONFIG_1
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x4
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0x0000000CL
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x00000030L
+//PA_SC_SCREEN_EXTENT_CONTROL
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x2
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x00000003L
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE_MASK 0x0000000CL
+//PA_SC_TILE_STEERING_OVERRIDE
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT 0xc
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT 0x10
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT 0x14
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK 0x00003000L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK 0x00030000L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK 0x00300000L
+//CP_PERFMON_CNTX_CNTL
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
+//CP_PIPEID
+#define CP_PIPEID__PIPE_ID__SHIFT 0x0
+#define CP_PIPEID__PIPE_ID_MASK 0x00000003L
+//CP_RINGID
+#define CP_RINGID__RINGID__SHIFT 0x0
+#define CP_RINGID__RINGID_MASK 0x00000003L
+//CP_VMID
+#define CP_VMID__VMID__SHIFT 0x0
+#define CP_VMID__VMID_MASK 0x0000000FL
+//CONTEXT_RESERVED_REG0
+#define CONTEXT_RESERVED_REG0__DATA__SHIFT 0x0
+#define CONTEXT_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//CONTEXT_RESERVED_REG1
+#define CONTEXT_RESERVED_REG1__DATA__SHIFT 0x0
+#define CONTEXT_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+//PA_SC_VRS_OVERRIDE_CNTL
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE__SHIFT 0x0
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_RATE__SHIFT 0x4
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_SURFACE_ENABLE__SHIFT 0xc
+#define PA_SC_VRS_OVERRIDE_CNTL__RATE_HINT_WRITE_BACK_ENABLE__SHIFT 0xd
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE__SHIFT 0xe
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE_MASK 0x00000007L
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_RATE_MASK 0x000000F0L
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_SURFACE_ENABLE_MASK 0x00001000L
+#define PA_SC_VRS_OVERRIDE_CNTL__RATE_HINT_WRITE_BACK_ENABLE_MASK 0x00002000L
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE_MASK 0x00004000L
+//PA_SC_VRS_RATE_FEEDBACK_BASE
+#define PA_SC_VRS_RATE_FEEDBACK_BASE__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_FEEDBACK_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VRS_RATE_FEEDBACK_BASE_EXT
+#define PA_SC_VRS_RATE_FEEDBACK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_FEEDBACK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//PA_SC_VRS_RATE_FEEDBACK_SIZE_XY
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__X_MAX__SHIFT 0x0
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__Y_MAX__SHIFT 0x10
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__X_MAX_MASK 0x000007FFL
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__Y_MAX_MASK 0x07FF0000L
+//PA_SC_VRS_RATE_CACHE_CNTL
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_RD__SHIFT 0x0
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_WR__SHIFT 0x1
+#define PA_SC_VRS_RATE_CACHE_CNTL__L1_RD_POLICY__SHIFT 0x2
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_RD_POLICY__SHIFT 0x4
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_WR_POLICY__SHIFT 0x6
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_RD_NOALLOC__SHIFT 0x8
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_WR_NOALLOC__SHIFT 0x9
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_RD__SHIFT 0xa
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_WR__SHIFT 0xb
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_RD__SHIFT 0xc
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_WR__SHIFT 0xd
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_RD_MASK 0x00000001L
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_WR_MASK 0x00000002L
+#define PA_SC_VRS_RATE_CACHE_CNTL__L1_RD_POLICY_MASK 0x0000000CL
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_RD_POLICY_MASK 0x00000030L
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_WR_POLICY_MASK 0x000000C0L
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_RD_NOALLOC_MASK 0x00000100L
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_WR_NOALLOC_MASK 0x00000200L
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_RD_MASK 0x00000400L
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_WR_MASK 0x00000800L
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_RD_MASK 0x00001000L
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_WR_MASK 0x00002000L
+//PA_SC_VRS_RATE_BASE
+#define PA_SC_VRS_RATE_BASE__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VRS_RATE_BASE_EXT
+#define PA_SC_VRS_RATE_BASE_EXT__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_BASE_EXT__TB_SYNC_SIM_ID__SHIFT 0x1c
+#define PA_SC_VRS_RATE_BASE_EXT__BASE_256B_MASK 0x000000FFL
+#define PA_SC_VRS_RATE_BASE_EXT__TB_SYNC_SIM_ID_MASK 0xF0000000L
+//PA_SC_VRS_RATE_SIZE_XY
+#define PA_SC_VRS_RATE_SIZE_XY__X_MAX__SHIFT 0x0
+#define PA_SC_VRS_RATE_SIZE_XY__Y_MAX__SHIFT 0x10
+#define PA_SC_VRS_RATE_SIZE_XY__X_MAX_MASK 0x000007FFL
+#define PA_SC_VRS_RATE_SIZE_XY__Y_MAX_MASK 0x07FF0000L
+//VGT_MULTI_PRIM_IB_RESET_INDX
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
+//CB_RMI_GL2_CACHE_CONTROL
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_WR_POLICY__SHIFT 0x0
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_WR_POLICY__SHIFT 0x2
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_RD_POLICY__SHIFT 0x14
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_RD_POLICY__SHIFT 0x16
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_L3_BYPASS__SHIFT 0x1a
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_L3_BYPASS__SHIFT 0x1b
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_BIG_PAGE__SHIFT 0x1f
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_WR_POLICY_MASK 0x00000003L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_WR_POLICY_MASK 0x0000000CL
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_RD_POLICY_MASK 0x00300000L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_RD_POLICY_MASK 0x00C00000L
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_L3_BYPASS_MASK 0x04000000L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_L3_BYPASS_MASK 0x08000000L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_BIG_PAGE_MASK 0x80000000L
+//CB_BLEND_RED
+#define CB_BLEND_RED__BLEND_RED__SHIFT 0x0
+#define CB_BLEND_RED__BLEND_RED_MASK 0xFFFFFFFFL
+//CB_BLEND_GREEN
+#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x0
+#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xFFFFFFFFL
+//CB_BLEND_BLUE
+#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x0
+#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xFFFFFFFFL
+//CB_BLEND_ALPHA
+#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x0
+#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xFFFFFFFFL
+//CB_FDCC_CONTROL
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_WATERMARK__SHIFT 0x2
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01__SHIFT 0x8
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE__SHIFT 0x9
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0xa
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01__SHIFT 0xc
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE__SHIFT 0xd
+#define CB_FDCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG__SHIFT 0xe
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_WATERMARK_MASK 0x0000007CL
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01_MASK 0x00000100L
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE_MASK 0x00000200L
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00000400L
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01_MASK 0x00001000L
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE_MASK 0x00002000L
+#define CB_FDCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG_MASK 0x00004000L
+//CB_COVERAGE_OUT_CONTROL
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_ENABLE__SHIFT 0x0
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_MRT__SHIFT 0x1
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_CHANNEL__SHIFT 0x4
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_SAMPLES__SHIFT 0x8
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_ENABLE_MASK 0x00000001L
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_MRT_MASK 0x0000000EL
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_CHANNEL_MASK 0x00000030L
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_SAMPLES_MASK 0x00000F00L
+//DB_STENCIL_CONTROL
+#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x0
+#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x4
+#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x8
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0xc
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x10
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x14
+#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0x0000000FL
+#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0x000000F0L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0x00000F00L
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0x0000F000L
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0x000F0000L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0x00F00000L
+//DB_STENCILREFMASK
+#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x0
+#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x8
+#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x10
+#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x18
+#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0x000000FFL
+#define DB_STENCILREFMASK__STENCILMASK_MASK 0x0000FF00L
+#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0x00FF0000L
+#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xFF000000L
+//DB_STENCILREFMASK_BF
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x0
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x8
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x10
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x18
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0x000000FFL
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0x0000FF00L
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0x00FF0000L
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xFF000000L
+//PA_CL_VPORT_XSCALE
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_1
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_1
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_1
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_1
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_1
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_1
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_2
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_2
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_2
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_2
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_2
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_2
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_3
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_3
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_3
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_3
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_3
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_3
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_4
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_4
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_4
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_4
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_4
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_4
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_5
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_5
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_5
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_5
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_5
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_5
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_6
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_6
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_6
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_6
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_6
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_6
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_7
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_7
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_7
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_7
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_7
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_7
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_8
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_8
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_8
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_8
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_8
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_8
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_9
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_9
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_9
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_9
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_9
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_9
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_10
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_10
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_10
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_10
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_10
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_10
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_11
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_11
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_11
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_11
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_11
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_11
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_12
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_12
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_12
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_12
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_12
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_12
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_13
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_13
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_13
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_13
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_13
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_13
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_14
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_14
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_14
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_14
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_14
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_14
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_15
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_15
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_15
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_15
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_15
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_15
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_X
+#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Y
+#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Z
+#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_W
+#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_X
+#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Y
+#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Z
+#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_W
+#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_X
+#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Y
+#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Z
+#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_W
+#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_X
+#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Y
+#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Z
+#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_W
+#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_X
+#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Y
+#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Z
+#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_W
+#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_X
+#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Y
+#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Z
+#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_W
+#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_PROG_NEAR_CLIP_Z
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_RATE_CNTL
+#define PA_RATE_CNTL__VERTEX_RATE__SHIFT 0x0
+#define PA_RATE_CNTL__PRIM_RATE__SHIFT 0x4
+#define PA_RATE_CNTL__VERTEX_RATE_MASK 0x0000000FL
+#define PA_RATE_CNTL__PRIM_RATE_MASK 0x000000F0L
+//SPI_PS_INPUT_CNTL_0
+#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_0__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_0__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_0__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_0__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_1
+#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_1__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_1__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_1__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_1__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_2
+#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_2__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_2__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_2__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_2__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_3
+#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_3__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_3__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_3__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_3__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_4
+#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_4__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_4__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_4__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_4__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_5
+#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_5__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_5__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_5__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_5__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_6
+#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_6__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_6__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_6__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_6__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_7
+#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_7__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_7__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_7__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_7__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_8
+#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_8__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_8__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_8__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_8__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_9
+#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_9__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_9__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_9__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_9__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_10
+#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_10__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_10__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_10__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_10__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_11
+#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_11__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_11__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_11__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_11__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_12
+#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_12__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_12__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_12__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_12__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_13
+#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_13__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_13__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_13__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_13__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_14
+#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_14__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_14__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_14__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_14__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_15
+#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_15__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_15__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_15__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_15__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_16
+#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_16__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_16__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_16__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_16__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_17
+#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_17__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_17__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_17__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_17__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_18
+#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_18__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_18__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_18__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_18__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_19
+#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_19__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_19__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_19__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_19__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_20
+#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_20__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_20__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_20__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_20__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_21
+#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_21__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_21__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_21__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_21__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_22
+#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_22__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_22__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_22__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_22__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_23
+#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_23__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_23__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_23__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_23__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_24
+#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_24__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_24__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_24__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_24__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_25
+#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_25__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_25__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_25__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_25__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_26
+#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_26__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_26__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_26__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_26__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_27
+#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_27__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_27__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_27__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_27__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_28
+#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_28__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_28__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_28__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_28__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_29
+#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_29__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_29__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_29__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_29__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_30
+#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_30__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_30__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_30__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_30__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_31
+#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_31__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_31__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_31__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_31__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID_MASK 0x02000000L
+//SPI_VS_OUT_CONFIG
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x1
+#define SPI_VS_OUT_CONFIG__NO_PC_EXPORT__SHIFT 0x7
+#define SPI_VS_OUT_CONFIG__PRIM_EXPORT_COUNT__SHIFT 0x8
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x0000003EL
+#define SPI_VS_OUT_CONFIG__NO_PC_EXPORT_MASK 0x00000080L
+#define SPI_VS_OUT_CONFIG__PRIM_EXPORT_COUNT_MASK 0x00001F00L
+//SPI_PS_INPUT_ENA
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_PS_INPUT_ADDR
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_INTERP_CONTROL_0
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x0
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x1
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x2
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x5
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x8
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0xb
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0xe
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x00000001L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x00000002L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x0000001CL
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0x000000E0L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x00000700L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x00003800L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x00004000L
+//SPI_PS_IN_CONTROL
+#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x0
+#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x6
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN__SHIFT 0x7
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC__SHIFT 0x8
+#define SPI_PS_IN_CONTROL__NUM_PRIM_INTERP__SHIFT 0x9
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0xe
+#define SPI_PS_IN_CONTROL__PS_W32_EN__SHIFT 0xf
+#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x0000003FL
+#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x00000040L
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN_MASK 0x00000080L
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC_MASK 0x00000100L
+#define SPI_PS_IN_CONTROL__NUM_PRIM_INTERP_MASK 0x00003E00L
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x00004000L
+#define SPI_PS_IN_CONTROL__PS_W32_EN_MASK 0x00008000L
+//SPI_BARYC_CNTL
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x0
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x4
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x8
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0xc
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x10
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x14
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x18
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x00000001L
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x00000010L
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x00000100L
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x00001000L
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x00030000L
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x00100000L
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x01000000L
+//SPI_TMPRING_SIZE
+#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define SPI_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x07FFF000L
+//SPI_GFX_SCRATCH_BASE_LO
+#define SPI_GFX_SCRATCH_BASE_LO__DATA__SHIFT 0x0
+#define SPI_GFX_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
+//SPI_GFX_SCRATCH_BASE_HI
+#define SPI_GFX_SCRATCH_BASE_HI__DATA__SHIFT 0x0
+#define SPI_GFX_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
+//SPI_SHADER_IDX_FORMAT
+#define SPI_SHADER_IDX_FORMAT__IDX0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_IDX_FORMAT__IDX0_EXPORT_FORMAT_MASK 0x0000000FL
+//SPI_SHADER_POS_FORMAT
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_POS_FORMAT__POS4_EXPORT_FORMAT__SHIFT 0x10
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0x0000F000L
+#define SPI_SHADER_POS_FORMAT__POS4_EXPORT_FORMAT_MASK 0x000F0000L
+//SPI_SHADER_Z_FORMAT
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0x0000000FL
+//SPI_SHADER_COL_FORMAT
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x10
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x14
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x18
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x1c
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0x0000F000L
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0x000F0000L
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0x00F00000L
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0x0F000000L
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xF0000000L
+//SX_PS_DOWNCONVERT_CONTROL
+#define SX_PS_DOWNCONVERT_CONTROL__MRT0_FMT_MAPPING_DISABLE__SHIFT 0x0
+#define SX_PS_DOWNCONVERT_CONTROL__MRT1_FMT_MAPPING_DISABLE__SHIFT 0x1
+#define SX_PS_DOWNCONVERT_CONTROL__MRT2_FMT_MAPPING_DISABLE__SHIFT 0x2
+#define SX_PS_DOWNCONVERT_CONTROL__MRT3_FMT_MAPPING_DISABLE__SHIFT 0x3
+#define SX_PS_DOWNCONVERT_CONTROL__MRT4_FMT_MAPPING_DISABLE__SHIFT 0x4
+#define SX_PS_DOWNCONVERT_CONTROL__MRT5_FMT_MAPPING_DISABLE__SHIFT 0x5
+#define SX_PS_DOWNCONVERT_CONTROL__MRT6_FMT_MAPPING_DISABLE__SHIFT 0x6
+#define SX_PS_DOWNCONVERT_CONTROL__MRT7_FMT_MAPPING_DISABLE__SHIFT 0x7
+#define SX_PS_DOWNCONVERT_CONTROL__MRT0_FMT_MAPPING_DISABLE_MASK 0x00000001L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT1_FMT_MAPPING_DISABLE_MASK 0x00000002L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT2_FMT_MAPPING_DISABLE_MASK 0x00000004L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT3_FMT_MAPPING_DISABLE_MASK 0x00000008L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT4_FMT_MAPPING_DISABLE_MASK 0x00000010L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT5_FMT_MAPPING_DISABLE_MASK 0x00000020L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT6_FMT_MAPPING_DISABLE_MASK 0x00000040L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT7_FMT_MAPPING_DISABLE_MASK 0x00000080L
+//SX_PS_DOWNCONVERT
+#define SX_PS_DOWNCONVERT__MRT0__SHIFT 0x0
+#define SX_PS_DOWNCONVERT__MRT1__SHIFT 0x4
+#define SX_PS_DOWNCONVERT__MRT2__SHIFT 0x8
+#define SX_PS_DOWNCONVERT__MRT3__SHIFT 0xc
+#define SX_PS_DOWNCONVERT__MRT4__SHIFT 0x10
+#define SX_PS_DOWNCONVERT__MRT5__SHIFT 0x14
+#define SX_PS_DOWNCONVERT__MRT6__SHIFT 0x18
+#define SX_PS_DOWNCONVERT__MRT7__SHIFT 0x1c
+#define SX_PS_DOWNCONVERT__MRT0_MASK 0x0000000FL
+#define SX_PS_DOWNCONVERT__MRT1_MASK 0x000000F0L
+#define SX_PS_DOWNCONVERT__MRT2_MASK 0x00000F00L
+#define SX_PS_DOWNCONVERT__MRT3_MASK 0x0000F000L
+#define SX_PS_DOWNCONVERT__MRT4_MASK 0x000F0000L
+#define SX_PS_DOWNCONVERT__MRT5_MASK 0x00F00000L
+#define SX_PS_DOWNCONVERT__MRT6_MASK 0x0F000000L
+#define SX_PS_DOWNCONVERT__MRT7_MASK 0xF0000000L
+//SX_BLEND_OPT_EPSILON
+#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON__SHIFT 0x0
+#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON__SHIFT 0x4
+#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON__SHIFT 0x8
+#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON__SHIFT 0xc
+#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON__SHIFT 0x10
+#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON__SHIFT 0x14
+#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON__SHIFT 0x18
+#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON__SHIFT 0x1c
+#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON_MASK 0x0000000FL
+#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON_MASK 0x000000F0L
+#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON_MASK 0x00000F00L
+#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON_MASK 0x0000F000L
+#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON_MASK 0x000F0000L
+#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON_MASK 0x00F00000L
+#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON_MASK 0x0F000000L
+#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON_MASK 0xF0000000L
+//SX_BLEND_OPT_CONTROL
+#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE__SHIFT 0x0
+#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE__SHIFT 0x1
+#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE__SHIFT 0x4
+#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE__SHIFT 0x5
+#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE__SHIFT 0x8
+#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE__SHIFT 0x9
+#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE__SHIFT 0xc
+#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE__SHIFT 0xd
+#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE__SHIFT 0x10
+#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE__SHIFT 0x11
+#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE__SHIFT 0x14
+#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE__SHIFT 0x15
+#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE__SHIFT 0x18
+#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE__SHIFT 0x19
+#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE__SHIFT 0x1c
+#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE__SHIFT 0x1d
+#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE__SHIFT 0x1f
+#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE_MASK 0x00000001L
+#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE_MASK 0x00000002L
+#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE_MASK 0x00000010L
+#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE_MASK 0x00000020L
+#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE_MASK 0x00000100L
+#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE_MASK 0x00000200L
+#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE_MASK 0x00001000L
+#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE_MASK 0x00002000L
+#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE_MASK 0x00010000L
+#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE_MASK 0x00020000L
+#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE_MASK 0x00100000L
+#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE_MASK 0x00200000L
+#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE_MASK 0x01000000L
+#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE_MASK 0x02000000L
+#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE_MASK 0x10000000L
+#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE_MASK 0x20000000L
+#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE_MASK 0x80000000L
+//SX_MRT0_BLEND_OPT
+#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT1_BLEND_OPT
+#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT2_BLEND_OPT
+#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT3_BLEND_OPT
+#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT4_BLEND_OPT
+#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT5_BLEND_OPT
+#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT6_BLEND_OPT
+#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT7_BLEND_OPT
+#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//CB_BLEND0_CONTROL
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND1_CONTROL
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND2_CONTROL
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND3_CONTROL
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND4_CONTROL
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND5_CONTROL
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND6_CONTROL
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND7_CONTROL
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//GFX_COPY_STATE
+#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
+#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+//PA_CL_POINT_X_RAD
+#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_Y_RAD
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_SIZE
+#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_CULL_RAD
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//VGT_DMA_BASE_HI
+#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0x0000FFFFL
+//VGT_DMA_BASE
+#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE__BASE_ADDR_MASK 0xFFFFFFFFL
+//VGT_DRAW_INITIATOR
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x0
+#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x2
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x4
+#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x5
+#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x6
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX__SHIFT 0x1d
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x00000003L
+#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0x0000000CL
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x00000010L
+#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x00000020L
+#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x00000040L
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX_MASK 0xE0000000L
+//VGT_EVENT_ADDRESS_REG
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x0
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0x0FFFFFFFL
+//GE_MAX_OUTPUT_PER_SUBGROUP
+#define GE_MAX_OUTPUT_PER_SUBGROUP__MAX_VERTS_PER_SUBGROUP__SHIFT 0x0
+#define GE_MAX_OUTPUT_PER_SUBGROUP__MAX_VERTS_PER_SUBGROUP_MASK 0x000003FFL
+//DB_DEPTH_CONTROL
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x0
+#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x1
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x2
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x3
+#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x4
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x7
+#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x8
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x14
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x1e
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x1f
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x00000001L
+#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x00000002L
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x00000004L
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x00000008L
+#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x00000070L
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x00000080L
+#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x00000700L
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x00700000L
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000L
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000L
+//DB_EQAA
+#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x0
+#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x4
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x8
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0xc
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x10
+#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x11
+#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x12
+#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x13
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x14
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x15
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x18
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x1b
+#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x00000007L
+#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x00000070L
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x00000700L
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x00007000L
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x00010000L
+#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x00020000L
+#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x00040000L
+#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x00080000L
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x00100000L
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x00200000L
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x07000000L
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x08000000L
+//CB_COLOR_CONTROL
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD__SHIFT 0x0
+#define CB_COLOR_CONTROL__ENABLE_1FRAG_PS_INVOKE__SHIFT 0x1
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x3
+#define CB_COLOR_CONTROL__MODE__SHIFT 0x4
+#define CB_COLOR_CONTROL__ROP3__SHIFT 0x10
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD_MASK 0x00000001L
+#define CB_COLOR_CONTROL__ENABLE_1FRAG_PS_INVOKE_MASK 0x00000002L
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x00000008L
+#define CB_COLOR_CONTROL__MODE_MASK 0x00000070L
+#define CB_COLOR_CONTROL__ROP3_MASK 0x00FF0000L
+//DB_SHADER_CONTROL
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x0
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x1
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x2
+#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x4
+#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x6
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x7
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x8
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x9
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0xa
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0xb
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0xc
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0xd
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE__SHIFT 0xf
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER__SHIFT 0x10
+#define DB_SHADER_CONTROL__PRE_SHADER_DEPTH_COVERAGE_ENABLE__SHIFT 0x17
+#define DB_SHADER_CONTROL__OREO_BLEND_ENABLE__SHIFT 0x18
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE_ENABLE__SHIFT 0x19
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE__SHIFT 0x1a
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x00000001L
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x00000002L
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x00000004L
+#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x00000030L
+#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x00000040L
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x00000080L
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x00000100L
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x00000200L
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x00000400L
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x00000800L
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x00001000L
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x00006000L
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE_MASK 0x00008000L
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER_MASK 0x00010000L
+#define DB_SHADER_CONTROL__PRE_SHADER_DEPTH_COVERAGE_ENABLE_MASK 0x00800000L
+#define DB_SHADER_CONTROL__OREO_BLEND_ENABLE_MASK 0x01000000L
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE_ENABLE_MASK 0x02000000L
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE_MASK 0x1C000000L
+//PA_CL_CLIP_CNTL
+#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x0
+#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x1
+#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x2
+#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x3
+#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x4
+#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x5
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0xd
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0xe
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x10
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x11
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x12
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x13
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x14
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x15
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x16
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x18
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x19
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x1a
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x1b
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA__SHIFT 0x1c
+#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x00000001L
+#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x00000002L
+#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x00000004L
+#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x00000008L
+#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x00000010L
+#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x00000020L
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x00002000L
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0x0000C000L
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x00010000L
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x00020000L
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x00040000L
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x00080000L
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x00100000L
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x00200000L
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x00400000L
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x01000000L
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x02000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x04000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x08000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA_MASK 0x10000000L
+//PA_SU_SC_MODE_CNTL
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x0
+#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x1
+#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x2
+#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x3
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x5
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x8
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0xb
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0xc
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0xd
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x10
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x13
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x14
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x15
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF__SHIFT 0x16
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION__SHIFT 0x17
+#define PA_SU_SC_MODE_CNTL__KEEP_TOGETHER_ENABLE__SHIFT 0x18
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x00000001L
+#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x00000002L
+#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x00000004L
+#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x00000018L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0x000000E0L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x00000700L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x00000800L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x00001000L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x00002000L
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x00010000L
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x00080000L
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x00100000L
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x00200000L
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF_MASK 0x00400000L
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION_MASK 0x00800000L
+#define PA_SU_SC_MODE_CNTL__KEEP_TOGETHER_ENABLE_MASK 0x01000000L
+//PA_CL_VTE_CNTL
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x0
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x1
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x2
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x3
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x4
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x5
+#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x8
+#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x9
+#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0xa
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0xb
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x00000001L
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x00000002L
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x00000004L
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x00000008L
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x00000010L
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x00000020L
+#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100L
+#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200L
+#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x00000400L
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x00000800L
+//PA_CL_VS_OUT_CNTL
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x0
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x1
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x2
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x3
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x4
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x5
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x6
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x7
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x8
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x9
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0xa
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0xb
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0xc
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0xd
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0xe
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0xf
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x10
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x11
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x12
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x13
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x14
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x15
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x16
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x17
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1b
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE__SHIFT 0x1c
+#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER__SHIFT 0x1d
+#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER__SHIFT 0x1e
+#define PA_CL_VS_OUT_CNTL__USE_VTX_FSR_SELECT__SHIFT 0x1f
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x00000002L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x00000004L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x00000008L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x00000010L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x00000020L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x00000040L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x00000080L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x00000100L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x00000200L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x00000400L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x00000800L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x00001000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x00002000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x00004000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x00008000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x00010000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x00020000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x00040000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x00080000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x00100000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x00200000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x00400000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x00800000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x08000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE_MASK 0x10000000L
+#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER_MASK 0x20000000L
+#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER_MASK 0x40000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_FSR_SELECT_MASK 0x80000000L
+//PA_CL_NANINF_CNTL
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x0
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x1
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x2
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x3
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x4
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x5
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x6
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x7
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x8
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x9
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0xa
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0xb
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0xc
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0xd
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0xe
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x14
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x00000001L
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x00000002L
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x00000004L
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x00000008L
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x00000010L
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x00000020L
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x00000040L
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x00000080L
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x00000100L
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x00000200L
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x00000400L
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x00000800L
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x00001000L
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x00002000L
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x00004000L
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x00100000L
+//PA_SU_LINE_STIPPLE_CNTL
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x2
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x3
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x00000003L
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x00000004L
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x00000008L
+//PA_SU_LINE_STIPPLE_SCALE
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xFFFFFFFFL
+//PA_SU_PRIM_FILTER_CNTL
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x0
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x4
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x5
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x6
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x7
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x8
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x1e
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x1f
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000001L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x00000010L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x00000020L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x00000040L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x00000080L
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0x0000FF00L
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000L
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000L
+//PA_SU_SMALL_PRIM_FILTER_CNTL
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE__SHIFT 0x0
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE__SHIFT 0x6
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE_MASK 0x00000040L
+//PA_CL_NGG_CNTL
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF__SHIFT 0x0
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA__SHIFT 0x1
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_DEPTH__SHIFT 0x2
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF_MASK 0x00000001L
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA_MASK 0x00000002L
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_DEPTH_MASK 0x000003FCL
+//PA_SU_OVER_RASTERIZATION_CNTL
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES__SHIFT 0x0
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES__SHIFT 0x1
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS__SHIFT 0x2
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES__SHIFT 0x3
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW__SHIFT 0x4
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES_MASK 0x00000001L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES_MASK 0x00000002L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS_MASK 0x00000004L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES_MASK 0x00000008L
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW_MASK 0x00000010L
+//PA_STEREO_CNTL
+#define PA_STEREO_CNTL__STEREO_MODE__SHIFT 0x1
+#define PA_STEREO_CNTL__RT_SLICE_MODE__SHIFT 0x5
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET__SHIFT 0x8
+#define PA_STEREO_CNTL__VP_ID_MODE__SHIFT 0x10
+#define PA_STEREO_CNTL__VP_ID_OFFSET__SHIFT 0x13
+#define PA_STEREO_CNTL__FSR_MODE__SHIFT 0x18
+#define PA_STEREO_CNTL__FSR_OFFSET__SHIFT 0x1a
+#define PA_STEREO_CNTL__STEREO_MODE_MASK 0x0000001EL
+#define PA_STEREO_CNTL__RT_SLICE_MODE_MASK 0x000000E0L
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET_MASK 0x00000F00L
+#define PA_STEREO_CNTL__VP_ID_MODE_MASK 0x00070000L
+#define PA_STEREO_CNTL__VP_ID_OFFSET_MASK 0x00780000L
+#define PA_STEREO_CNTL__FSR_MODE_MASK 0x03000000L
+#define PA_STEREO_CNTL__FSR_OFFSET_MASK 0x0C000000L
+//PA_STATE_STEREO_X
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT 0x0
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VRS_CNTL
+#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE__SHIFT 0x0
+#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE__SHIFT 0x3
+#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE__SHIFT 0x6
+#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE__SHIFT 0x9
+#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK__SHIFT 0xd
+#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO__SHIFT 0xe
+#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE_MASK 0x00000007L
+#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE_MASK 0x00000038L
+#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE_MASK 0x000001C0L
+#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE_MASK 0x00000E00L
+#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK_MASK 0x00002000L
+#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO_MASK 0x00004000L
+//PA_SU_POINT_SIZE
+#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
+#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
+#define PA_SU_POINT_SIZE__HEIGHT_MASK 0x0000FFFFL
+#define PA_SU_POINT_SIZE__WIDTH_MASK 0xFFFF0000L
+//PA_SU_POINT_MINMAX
+#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x0
+#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x10
+#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0x0000FFFFL
+#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xFFFF0000L
+//PA_SU_LINE_CNTL
+#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x0
+#define PA_SU_LINE_CNTL__WIDTH_MASK 0x0000FFFFL
+//PA_SC_LINE_STIPPLE
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x10
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x1c
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x1d
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0x0000FFFFL
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0x00FF0000L
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000L
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000L
+//VGT_HOS_MAX_TESS_LEVEL
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x0
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xFFFFFFFFL
+//VGT_HOS_MIN_TESS_LEVEL
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x0
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xFFFFFFFFL
+//PA_SC_MODE_CNTL_0
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x1
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x2
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x3
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE__SHIFT 0x5
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB__SHIFT 0x6
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE_MASK 0x00000020L
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB_MASK 0x00000040L
+//PA_SC_MODE_CNTL_1
+#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x1
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x2
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x3
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x4
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x7
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x8
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x9
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0xa
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0xb
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0xc
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0xd
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0xe
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0xf
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x10
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x11
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x12
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x13
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x14
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x18
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x19
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x1a
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x1b
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x1c
+#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x00000070L
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x00000080L
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x00000100L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x00000200L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x00000400L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x00000800L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x00001000L
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x00002000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x00004000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x00008000L
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x00010000L
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x00020000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x00040000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x00080000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0x00F00000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x01000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x02000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x04000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x08000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000L
+//VGT_ENHANCE
+#define VGT_ENHANCE__MISC__SHIFT 0x0
+#define VGT_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//IA_ENHANCE
+#define IA_ENHANCE__MISC__SHIFT 0x0
+#define IA_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_DMA_SIZE
+#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x0
+#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_DMA_MAX_SIZE
+#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x0
+#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xFFFFFFFFL
+//VGT_DMA_INDEX_TYPE
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x2
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x4
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x6
+#define VGT_DMA_INDEX_TYPE__ATC__SHIFT 0x8
+#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x9
+#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0xa
+#define VGT_DMA_INDEX_TYPE__MTYPE__SHIFT 0xb
+#define VGT_DMA_INDEX_TYPE__DISABLE_INSTANCE_PACKING__SHIFT 0xe
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0x0000000CL
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x00000030L
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x000000C0L
+#define VGT_DMA_INDEX_TYPE__ATC_MASK 0x00000100L
+#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x00000200L
+#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x00000400L
+#define VGT_DMA_INDEX_TYPE__MTYPE_MASK 0x00003800L
+#define VGT_DMA_INDEX_TYPE__DISABLE_INSTANCE_PACKING_MASK 0x00004000L
+//WD_ENHANCE
+#define WD_ENHANCE__MISC__SHIFT 0x0
+#define WD_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_EN
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x0
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x1
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE__SHIFT 0x2
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x00000001L
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x00000002L
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE_MASK 0x00000004L
+//VGT_DMA_NUM_INSTANCES
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_RESET
+#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x0
+#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xFFFFFFFFL
+//VGT_EVENT_INITIATOR
+#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
+#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
+#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
+#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
+//VGT_DRAW_PAYLOAD_CNTL
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX__SHIFT 0x1
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PRIM_PAYLOAD__SHIFT 0x3
+#define VGT_DRAW_PAYLOAD_CNTL__EN_DRAW_VP__SHIFT 0x4
+#define VGT_DRAW_PAYLOAD_CNTL__EN_FSR__SHIFT 0x5
+#define VGT_DRAW_PAYLOAD_CNTL__EN_VRS_RATE__SHIFT 0x6
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PRIM_PAYLOAD_MASK 0x00000008L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_DRAW_VP_MASK 0x00000010L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_FSR_MASK 0x00000020L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_VRS_RATE_MASK 0x00000040L
+//VGT_ESGS_RING_ITEMSIZE
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
+//VGT_REUSE_OFF
+#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x0
+#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x00000001L
+//DB_HTILE_SURFACE
+#define DB_HTILE_SURFACE__RESERVED_FIELD_1__SHIFT 0x0
+#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x1
+#define DB_HTILE_SURFACE__RESERVED_FIELD_2__SHIFT 0x2
+#define DB_HTILE_SURFACE__RESERVED_FIELD_3__SHIFT 0x3
+#define DB_HTILE_SURFACE__RESERVED_FIELD_4__SHIFT 0x4
+#define DB_HTILE_SURFACE__RESERVED_FIELD_5__SHIFT 0xa
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
+#define DB_HTILE_SURFACE__RESERVED_FIELD_6__SHIFT 0x11
+#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
+#define DB_HTILE_SURFACE__RESERVED_FIELD_1_MASK 0x00000001L
+#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_2_MASK 0x00000004L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_3_MASK 0x00000008L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_4_MASK 0x000003F0L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_5_MASK 0x0000FC00L
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_6_MASK 0x00020000L
+#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
+//DB_SRESULTS_COMPARE_STATE0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x01000000L
+//DB_SRESULTS_COMPARE_STATE1
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x01000000L
+//DB_PRELOAD_CONTROL
+#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x0
+#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x8
+#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x10
+#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x18
+#define DB_PRELOAD_CONTROL__START_X_MASK 0x000000FFL
+#define DB_PRELOAD_CONTROL__START_Y_MASK 0x0000FF00L
+#define DB_PRELOAD_CONTROL__MAX_X_MASK 0x00FF0000L
+#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xFF000000L
+//VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x000001FFL
+//VGT_GS_MAX_VERT_OUT
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x0
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x000007FFL
+//GE_NGG_SUBGRP_CNTL
+#define GE_NGG_SUBGRP_CNTL__PRIM_AMP_FACTOR__SHIFT 0x0
+#define GE_NGG_SUBGRP_CNTL__THDS_PER_SUBGRP__SHIFT 0x9
+#define GE_NGG_SUBGRP_CNTL__PRIM_AMP_FACTOR_MASK 0x000001FFL
+#define GE_NGG_SUBGRP_CNTL__THDS_PER_SUBGRP_MASK 0x0003FE00L
+//VGT_TESS_DISTRIBUTION
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE__SHIFT 0x0
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT__SHIFT 0x1d
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE_MASK 0x000000FFL
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI_MASK 0x0000FF00L
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0x00FF0000L
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0x1F000000L
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT_MASK 0xE0000000L
+//VGT_SHADER_STAGES_EN
+#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x0
+#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x2
+#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x3
+#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x5
+#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x6
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS__SHIFT 0x8
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN__SHIFT 0xc
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN__SHIFT 0xd
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE__SHIFT 0xe
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE__SHIFT 0xf
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH__SHIFT 0x13
+#define VGT_SHADER_STAGES_EN__HS_W32_EN__SHIFT 0x15
+#define VGT_SHADER_STAGES_EN__GS_W32_EN__SHIFT 0x16
+#define VGT_SHADER_STAGES_EN__VS_W32_EN__SHIFT 0x17
+#define VGT_SHADER_STAGES_EN__NGG_WAVE_ID_EN__SHIFT 0x18
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_EN__SHIFT 0x19
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_NO_MSG__SHIFT 0x1a
+#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x00000003L
+#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x00000004L
+#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x00000018L
+#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x00000020L
+#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0x000000C0L
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS_MASK 0x00000100L
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN_MASK 0x00001000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN_MASK 0x00002000L
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE_MASK 0x00004000L
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE_MASK 0x00078000L
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH_MASK 0x00180000L
+#define VGT_SHADER_STAGES_EN__HS_W32_EN_MASK 0x00200000L
+#define VGT_SHADER_STAGES_EN__GS_W32_EN_MASK 0x00400000L
+#define VGT_SHADER_STAGES_EN__VS_W32_EN_MASK 0x00800000L
+#define VGT_SHADER_STAGES_EN__NGG_WAVE_ID_EN_MASK 0x01000000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_EN_MASK 0x02000000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_NO_MSG_MASK 0x04000000L
+//VGT_LS_HS_CONFIG
+#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x0
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0xe
+#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0x000000FFL
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0x000FC000L
+//VGT_TF_PARAM
+#define VGT_TF_PARAM__TYPE__SHIFT 0x0
+#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x2
+#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x5
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x8
+#define VGT_TF_PARAM__NOT_USED__SHIFT 0x9
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD__SHIFT 0xa
+#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0xe
+#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0xf
+#define VGT_TF_PARAM__DISTRIBUTION_MODE__SHIFT 0x11
+#define VGT_TF_PARAM__DETECT_ONE__SHIFT 0x13
+#define VGT_TF_PARAM__DETECT_ZERO__SHIFT 0x14
+#define VGT_TF_PARAM__MTYPE__SHIFT 0x17
+#define VGT_TF_PARAM__TYPE_MASK 0x00000003L
+#define VGT_TF_PARAM__PARTITIONING_MASK 0x0000001CL
+#define VGT_TF_PARAM__TOPOLOGY_MASK 0x000000E0L
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x00000100L
+#define VGT_TF_PARAM__NOT_USED_MASK 0x00000200L
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD_MASK 0x00003C00L
+#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x00004000L
+#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x00018000L
+#define VGT_TF_PARAM__DISTRIBUTION_MODE_MASK 0x00060000L
+#define VGT_TF_PARAM__DETECT_ONE_MASK 0x00080000L
+#define VGT_TF_PARAM__DETECT_ZERO_MASK 0x00100000L
+#define VGT_TF_PARAM__MTYPE_MASK 0x03800000L
+//DB_ALPHA_TO_MASK
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x0
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x8
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0xa
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0xc
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0xe
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x10
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x00000001L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x00000300L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0x00000C00L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x00003000L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0x0000C000L
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x00010000L
+//PA_SU_POLY_OFFSET_DB_FMT_CNTL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x8
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0x000000FFL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x00000100L
+//PA_SU_POLY_OFFSET_CLAMP
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_SCALE
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_OFFSET
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_SCALE
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_OFFSET
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_GS_INSTANCE_CNT
+#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x0
+#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x2
+#define VGT_GS_INSTANCE_CNT__EN_MAX_VERT_OUT_PER_GS_INSTANCE__SHIFT 0x1f
+#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x00000001L
+#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x000001FCL
+#define VGT_GS_INSTANCE_CNT__EN_MAX_VERT_OUT_PER_GS_INSTANCE_MASK 0x80000000L
+//PA_SC_CENTROID_PRIORITY_0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xF0000000L
+//PA_SC_CENTROID_PRIORITY_1
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xF0000000L
+//PA_SC_LINE_CNTL
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x9
+#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0xa
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0xb
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0xc
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION__SHIFT 0xd
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x00000200L
+#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x00000400L
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x00000800L
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x00001000L
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION_MASK 0x00002000L
+//PA_SC_AA_CONFIG
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x0
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x4
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0xd
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x14
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x18
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT__SHIFT 0x1a
+#define PA_SC_AA_CONFIG__SAMPLE_COVERAGE_ENCODING__SHIFT 0x1c
+#define PA_SC_AA_CONFIG__COVERED_CENTROID_IS_CENTER__SHIFT 0x1d
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x00000007L
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x00000010L
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x0001E000L
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x00700000L
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x03000000L
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT_MASK 0x0C000000L
+#define PA_SC_AA_CONFIG__SAMPLE_COVERAGE_ENCODING_MASK 0x10000000L
+#define PA_SC_AA_CONFIG__COVERED_CENTROID_IS_CENTER_MASK 0x20000000L
+//PA_SU_VTX_CNTL
+#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x0
+#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x1
+#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x3
+#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x00000001L
+#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x00000006L
+#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x00000038L
+//PA_CL_GB_VERT_CLIP_ADJ
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_VERT_DISC_ADJ
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_CLIP_ADJ
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_DISC_ADJ
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_MASK_X0Y0_X1Y0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xFFFF0000L
+//PA_SC_AA_MASK_X0Y1_X1Y1
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xFFFF0000L
+//PA_SC_SHADER_CONTROL
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES__SHIFT 0x0
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID__SHIFT 0x2
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION__SHIFT 0x3
+#define PA_SC_SHADER_CONTROL__WAVE_BREAK_REGION_SIZE__SHIFT 0x5
+#define PA_SC_SHADER_CONTROL__DISABLE_OREO_CONFLICT_QUAD__SHIFT 0x7
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES_MASK 0x00000003L
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID_MASK 0x00000004L
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION_MASK 0x00000008L
+#define PA_SC_SHADER_CONTROL__WAVE_BREAK_REGION_SIZE_MASK 0x00000060L
+#define PA_SC_SHADER_CONTROL__DISABLE_OREO_CONFLICT_QUAD_MASK 0x00000080L
+//PA_SC_BINNER_CNTL_0
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X__SHIFT 0x2
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y__SHIFT 0x3
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND__SHIFT 0x4
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND__SHIFT 0x7
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN__SHIFT 0xa
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM__SHIFT 0x12
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH__SHIFT 0x13
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION__SHIFT 0x1b
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1c
+#define PA_SC_BINNER_CNTL_0__BIN_MAPPING_MODE__SHIFT 0x1d
+#define PA_SC_BINNER_CNTL_0__FSR_EXPANSION_ENABLE__SHIFT 0x1f
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE_MASK 0x00000003L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_MASK 0x00000004L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_MASK 0x00000008L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND_MASK 0x00000070L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND_MASK 0x00000380L
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM_MASK 0x00040000L
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH_MASK 0x07F80000L
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION_MASK 0x08000000L
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION_MASK 0x10000000L
+#define PA_SC_BINNER_CNTL_0__BIN_MAPPING_MODE_MASK 0x60000000L
+#define PA_SC_BINNER_CNTL_0__FSR_EXPANSION_ENABLE_MASK 0x80000000L
+//PA_SC_BINNER_CNTL_1
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH__SHIFT 0x10
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT_MASK 0x0000FFFFL
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH_MASK 0xFFFF0000L
+//PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE__SHIFT 0x0
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT__SHIFT 0x1
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE__SHIFT 0x5
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT__SHIFT 0x6
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE__SHIFT 0xa
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT__SHIFT 0xb
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET__SHIFT 0xc
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL__SHIFT 0xd
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL__SHIFT 0xe
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE__SHIFT 0xf
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE__SHIFT 0x10
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x12
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x13
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE__SHIFT 0x14
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE__SHIFT 0x15
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE__SHIFT 0x16
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE__SHIFT 0x17
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE__SHIFT 0x18
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MULT__SHIFT 0x19
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_PBB_MULT__SHIFT 0x1b
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE_MASK 0x00000001L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT_MASK 0x0000001EL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE_MASK 0x00000020L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT_MASK 0x000003C0L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE_MASK 0x00000400L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT_MASK 0x00000800L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET_MASK 0x00001000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL_MASK 0x00002000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL_MASK 0x00004000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE_MASK 0x00008000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE_MASK 0x00030000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00040000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00080000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE_MASK 0x00100000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE_MASK 0x00200000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE_MASK 0x00400000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE_MASK 0x00800000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE_MASK 0x01000000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MULT_MASK 0x06000000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_PBB_MULT_MASK 0x18000000L
+//PA_SC_NGG_MODE_CNTL
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE__SHIFT 0x0
+#define PA_SC_NGG_MODE_CNTL__DISABLE_FPOG_AND_DEALLOC_CONFLICT__SHIFT 0xc
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_DEALLOC__SHIFT 0xd
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_ATTRIBUTES__SHIFT 0xe
+#define PA_SC_NGG_MODE_CNTL__MAX_FPOVS_IN_WAVE__SHIFT 0x10
+#define PA_SC_NGG_MODE_CNTL__MAX_ATTRIBUTES_IN_WAVE__SHIFT 0x18
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE_MASK 0x000007FFL
+#define PA_SC_NGG_MODE_CNTL__DISABLE_FPOG_AND_DEALLOC_CONFLICT_MASK 0x00001000L
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_DEALLOC_MASK 0x00002000L
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_ATTRIBUTES_MASK 0x00004000L
+#define PA_SC_NGG_MODE_CNTL__MAX_FPOVS_IN_WAVE_MASK 0x00FF0000L
+#define PA_SC_NGG_MODE_CNTL__MAX_ATTRIBUTES_IN_WAVE_MASK 0xFF000000L
+//PA_SC_BINNER_CNTL_2
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_X_MULT_BY_1P5X__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_Y_MULT_BY_1P5X__SHIFT 0x1
+#define PA_SC_BINNER_CNTL_2__ENABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION__SHIFT 0x2
+#define PA_SC_BINNER_CNTL_2__DUAL_LIGHT_SHAFT_IN_DRAW__SHIFT 0x3
+#define PA_SC_BINNER_CNTL_2__LIGHT_SHAFT_DRAW_CALL_LIMIT__SHIFT 0x4
+#define PA_SC_BINNER_CNTL_2__CONTEXT_DONE_EVENTS_PER_BIN__SHIFT 0x7
+#define PA_SC_BINNER_CNTL_2__ZPP_ENABLED__SHIFT 0xb
+#define PA_SC_BINNER_CNTL_2__ZPP_OPTIMIZATION_ENABLED__SHIFT 0xc
+#define PA_SC_BINNER_CNTL_2__ZPP_AREA_THRESHOLD__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_2__DISABLE_NOPCEXPORT_BREAKBATCH_CONDITION__SHIFT 0x15
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_X_MULT_BY_1P5X_MASK 0x00000001L
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_Y_MULT_BY_1P5X_MASK 0x00000002L
+#define PA_SC_BINNER_CNTL_2__ENABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION_MASK 0x00000004L
+#define PA_SC_BINNER_CNTL_2__DUAL_LIGHT_SHAFT_IN_DRAW_MASK 0x00000008L
+#define PA_SC_BINNER_CNTL_2__LIGHT_SHAFT_DRAW_CALL_LIMIT_MASK 0x00000070L
+#define PA_SC_BINNER_CNTL_2__CONTEXT_DONE_EVENTS_PER_BIN_MASK 0x00000780L
+#define PA_SC_BINNER_CNTL_2__ZPP_ENABLED_MASK 0x00000800L
+#define PA_SC_BINNER_CNTL_2__ZPP_OPTIMIZATION_ENABLED_MASK 0x00001000L
+#define PA_SC_BINNER_CNTL_2__ZPP_AREA_THRESHOLD_MASK 0x001FE000L
+#define PA_SC_BINNER_CNTL_2__DISABLE_NOPCEXPORT_BREAKBATCH_CONDITION_MASK 0x00200000L
+//CB_COLOR0_BASE
+#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_VIEW
+#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR0_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR0_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR0_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR0_INFO
+#define CB_COLOR0_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR0_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR0_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR0_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR0_ATTRIB
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR0_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR0_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR0_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR0_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR0_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR0_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR0_FDCC_CONTROL
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR0_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR0_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR0_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR0_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR0_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR0_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR0_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR0_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR0_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR0_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR0_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR0_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR0_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR0_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR0_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR0_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR0_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR0_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR0_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR0_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR0_DCC_BASE
+#define CB_COLOR0_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_BASE
+#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_VIEW
+#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR1_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR1_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR1_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR1_INFO
+#define CB_COLOR1_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR1_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR1_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR1_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR1_ATTRIB
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR1_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR1_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR1_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR1_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR1_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR1_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR1_FDCC_CONTROL
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR1_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR1_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR1_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR1_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR1_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR1_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR1_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR1_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR1_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR1_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR1_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR1_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR1_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR1_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR1_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR1_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR1_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR1_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR1_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR1_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR1_DCC_BASE
+#define CB_COLOR1_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_BASE
+#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_VIEW
+#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR2_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR2_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR2_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR2_INFO
+#define CB_COLOR2_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR2_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR2_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR2_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR2_ATTRIB
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR2_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR2_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR2_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR2_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR2_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR2_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR2_FDCC_CONTROL
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR2_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR2_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR2_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR2_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR2_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR2_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR2_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR2_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR2_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR2_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR2_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR2_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR2_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR2_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR2_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR2_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR2_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR2_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR2_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR2_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR2_DCC_BASE
+#define CB_COLOR2_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_BASE
+#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_VIEW
+#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR3_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR3_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR3_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR3_INFO
+#define CB_COLOR3_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR3_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR3_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR3_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR3_ATTRIB
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR3_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR3_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR3_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR3_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR3_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR3_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR3_FDCC_CONTROL
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR3_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR3_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR3_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR3_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR3_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR3_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR3_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR3_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR3_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR3_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR3_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR3_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR3_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR3_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR3_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR3_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR3_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR3_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR3_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR3_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR3_DCC_BASE
+#define CB_COLOR3_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_BASE
+#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_VIEW
+#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR4_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR4_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR4_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR4_INFO
+#define CB_COLOR4_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR4_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR4_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR4_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR4_ATTRIB
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR4_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR4_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR4_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR4_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR4_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR4_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR4_FDCC_CONTROL
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR4_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR4_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR4_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR4_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR4_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR4_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR4_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR4_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR4_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR4_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR4_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR4_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR4_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR4_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR4_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR4_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR4_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR4_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR4_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR4_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR4_DCC_BASE
+#define CB_COLOR4_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_BASE
+#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_VIEW
+#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR5_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR5_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR5_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR5_INFO
+#define CB_COLOR5_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR5_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR5_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR5_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR5_ATTRIB
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR5_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR5_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR5_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR5_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR5_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR5_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR5_FDCC_CONTROL
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR5_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR5_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR5_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR5_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR5_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR5_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR5_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR5_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR5_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR5_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR5_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR5_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR5_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR5_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR5_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR5_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR5_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR5_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR5_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR5_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR5_DCC_BASE
+#define CB_COLOR5_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_BASE
+#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_VIEW
+#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR6_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR6_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR6_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR6_INFO
+#define CB_COLOR6_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR6_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR6_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR6_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR6_ATTRIB
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR6_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR6_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR6_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR6_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR6_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR6_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR6_FDCC_CONTROL
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR6_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR6_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR6_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR6_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR6_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR6_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR6_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR6_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR6_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR6_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR6_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR6_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR6_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR6_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR6_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR6_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR6_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR6_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR6_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR6_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR6_DCC_BASE
+#define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_BASE
+#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_VIEW
+#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR7_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR7_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR7_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR7_INFO
+#define CB_COLOR7_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR7_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR7_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR7_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR7_ATTRIB
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR7_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR7_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR7_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR7_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR7_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR7_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR7_FDCC_CONTROL
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR7_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR7_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR7_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR7_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR7_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR7_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR7_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR7_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR7_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR7_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR7_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR7_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR7_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR7_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR7_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR7_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR7_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR7_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR7_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR7_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR7_DCC_BASE
+#define CB_COLOR7_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_BASE_EXT
+#define CB_COLOR0_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_BASE_EXT
+#define CB_COLOR1_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_BASE_EXT
+#define CB_COLOR2_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_BASE_EXT
+#define CB_COLOR3_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_BASE_EXT
+#define CB_COLOR4_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_BASE_EXT
+#define CB_COLOR5_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_BASE_EXT
+#define CB_COLOR6_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_BASE_EXT
+#define CB_COLOR7_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_DCC_BASE_EXT
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_DCC_BASE_EXT
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_DCC_BASE_EXT
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_DCC_BASE_EXT
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_DCC_BASE_EXT
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_DCC_BASE_EXT
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_DCC_BASE_EXT
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_DCC_BASE_EXT
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_ATTRIB2
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR0_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR0_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR1_ATTRIB2
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR1_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR1_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR2_ATTRIB2
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR2_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR2_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR3_ATTRIB2
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR3_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR3_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR4_ATTRIB2
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR4_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR4_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR5_ATTRIB2
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR5_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR5_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR6_ATTRIB2
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR6_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR6_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR7_ATTRIB2
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR7_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR7_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR0_ATTRIB3
+#define CB_COLOR0_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR0_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR0_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR0_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR0_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR0_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR0_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR0_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR1_ATTRIB3
+#define CB_COLOR1_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR1_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR1_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR1_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR1_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR1_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR1_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR1_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR2_ATTRIB3
+#define CB_COLOR2_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR2_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR2_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR2_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR2_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR2_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR2_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR2_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR3_ATTRIB3
+#define CB_COLOR3_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR3_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR3_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR3_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR3_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR3_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR3_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR3_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR4_ATTRIB3
+#define CB_COLOR4_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR4_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR4_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR4_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR4_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR4_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR4_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR4_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR5_ATTRIB3
+#define CB_COLOR5_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR5_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR5_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR5_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR5_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR5_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR5_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR5_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR6_ATTRIB3
+#define CB_COLOR6_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR6_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR6_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR6_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR6_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR6_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR6_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR6_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR7_ATTRIB3
+#define CB_COLOR7_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR7_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR7_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR7_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR7_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR7_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR7_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR7_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+
+
+// addressBlock: gc_pfvf_cpdec
+//CONFIG_RESERVED_REG0
+#define CONFIG_RESERVED_REG0__DATA__SHIFT 0x0
+#define CONFIG_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//CONFIG_RESERVED_REG1
+#define CONFIG_RESERVED_REG1__DATA__SHIFT 0x0
+#define CONFIG_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_CNTL
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET__SHIFT 0x10
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET__SHIFT 0x11
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET__SHIFT 0x12
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET__SHIFT 0x13
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET__SHIFT 0x14
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET__SHIFT 0x15
+#define CP_MEC_CNTL__MEC_ME2_PIPE2_RESET__SHIFT 0x16
+#define CP_MEC_CNTL__MEC_ME2_PIPE3_RESET__SHIFT 0x17
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x1b
+#define CP_MEC_CNTL__MEC_ME2_HALT__SHIFT 0x1c
+#define CP_MEC_CNTL__MEC_ME2_STEP__SHIFT 0x1d
+#define CP_MEC_CNTL__MEC_ME1_HALT__SHIFT 0x1e
+#define CP_MEC_CNTL__MEC_ME1_STEP__SHIFT 0x1f
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK 0x00010000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK 0x00020000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK 0x00040000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK 0x00080000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK 0x00100000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK 0x00200000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE2_RESET_MASK 0x00400000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE3_RESET_MASK 0x00800000L
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x08000000L
+#define CP_MEC_CNTL__MEC_ME2_HALT_MASK 0x10000000L
+#define CP_MEC_CNTL__MEC_ME2_STEP_MASK 0x20000000L
+#define CP_MEC_CNTL__MEC_ME1_HALT_MASK 0x40000000L
+#define CP_MEC_CNTL__MEC_ME1_STEP_MASK 0x80000000L
+//CP_ME_CNTL
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x6
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x8
+#define CP_ME_CNTL__PFP_PIPE0_DISABLE__SHIFT 0xc
+#define CP_ME_CNTL__PFP_PIPE1_DISABLE__SHIFT 0xd
+#define CP_ME_CNTL__ME_PIPE0_DISABLE__SHIFT 0xe
+#define CP_ME_CNTL__ME_PIPE1_DISABLE__SHIFT 0xf
+#define CP_ME_CNTL__CE_PIPE0_RESET__SHIFT 0x10
+#define CP_ME_CNTL__CE_PIPE1_RESET__SHIFT 0x11
+#define CP_ME_CNTL__PFP_PIPE0_RESET__SHIFT 0x12
+#define CP_ME_CNTL__PFP_PIPE1_RESET__SHIFT 0x13
+#define CP_ME_CNTL__ME_PIPE0_RESET__SHIFT 0x14
+#define CP_ME_CNTL__ME_PIPE1_RESET__SHIFT 0x15
+#define CP_ME_CNTL__CE_HALT__SHIFT 0x18
+#define CP_ME_CNTL__CE_STEP__SHIFT 0x19
+#define CP_ME_CNTL__PFP_HALT__SHIFT 0x1a
+#define CP_ME_CNTL__PFP_STEP__SHIFT 0x1b
+#define CP_ME_CNTL__ME_HALT__SHIFT 0x1c
+#define CP_ME_CNTL__ME_STEP__SHIFT 0x1d
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x00000040L
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x00000100L
+#define CP_ME_CNTL__PFP_PIPE0_DISABLE_MASK 0x00001000L
+#define CP_ME_CNTL__PFP_PIPE1_DISABLE_MASK 0x00002000L
+#define CP_ME_CNTL__ME_PIPE0_DISABLE_MASK 0x00004000L
+#define CP_ME_CNTL__ME_PIPE1_DISABLE_MASK 0x00008000L
+#define CP_ME_CNTL__CE_PIPE0_RESET_MASK 0x00010000L
+#define CP_ME_CNTL__CE_PIPE1_RESET_MASK 0x00020000L
+#define CP_ME_CNTL__PFP_PIPE0_RESET_MASK 0x00040000L
+#define CP_ME_CNTL__PFP_PIPE1_RESET_MASK 0x00080000L
+#define CP_ME_CNTL__ME_PIPE0_RESET_MASK 0x00100000L
+#define CP_ME_CNTL__ME_PIPE1_RESET_MASK 0x00200000L
+#define CP_ME_CNTL__CE_HALT_MASK 0x01000000L
+#define CP_ME_CNTL__CE_STEP_MASK 0x02000000L
+#define CP_ME_CNTL__PFP_HALT_MASK 0x04000000L
+#define CP_ME_CNTL__PFP_STEP_MASK 0x08000000L
+#define CP_ME_CNTL__ME_HALT_MASK 0x10000000L
+#define CP_ME_CNTL__ME_STEP_MASK 0x20000000L
+
+
+// addressBlock: gc_pfvf_grbmdec
+//GRBM_GFX_CNTL
+#define GRBM_GFX_CNTL__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL__CTXID__SHIFT 0xb
+#define GRBM_GFX_CNTL__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL__QUEUEID_MASK 0x00000700L
+#define GRBM_GFX_CNTL__CTXID_MASK 0x00003800L
+//GRBM_NOWHERE
+#define GRBM_NOWHERE__DATA__SHIFT 0x0
+#define GRBM_NOWHERE__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_pfvf_padec
+//PA_SC_VRS_SURFACE_CNTL
+#define PA_SC_VRS_SURFACE_CNTL__VRC_CONTEXT_DONE_SYNC_DISABLE__SHIFT 0x6
+#define PA_SC_VRS_SURFACE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE__SHIFT 0x7
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_EVENT_MASK_DISABLE__SHIFT 0x8
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PREFETCH_DISABLE__SHIFT 0xd
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_NO_INV_DISABLE__SHIFT 0xe
+#define PA_SC_VRS_SURFACE_CNTL__VRC_NONSTALLING_FLUSH_DISABLE__SHIFT 0xf
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PARTIAL_FLUSH_DISABLE__SHIFT 0x10
+#define PA_SC_VRS_SURFACE_CNTL__VRC_AUTO_FLUSH__SHIFT 0x11
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EOP_SYNC_DISABLE__SHIFT 0x12
+#define PA_SC_VRS_SURFACE_CNTL__VRC_MAX_TAGS__SHIFT 0x13
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EVICT_POINT__SHIFT 0x1a
+#define PA_SC_VRS_SURFACE_CNTL__VRC_CONTEXT_DONE_SYNC_DISABLE_MASK 0x00000040L
+#define PA_SC_VRS_SURFACE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE_MASK 0x00000080L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_EVENT_MASK_DISABLE_MASK 0x00001F00L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PREFETCH_DISABLE_MASK 0x00002000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_NO_INV_DISABLE_MASK 0x00004000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_NONSTALLING_FLUSH_DISABLE_MASK 0x00008000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PARTIAL_FLUSH_DISABLE_MASK 0x00010000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_AUTO_FLUSH_MASK 0x00020000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EOP_SYNC_DISABLE_MASK 0x00040000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_MAX_TAGS_MASK 0x03F80000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EVICT_POINT_MASK 0xFC000000L
+//PA_SC_ENHANCE
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x0
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x1
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x2
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x3
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x4
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x5
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x6
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x7
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0x8
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0x9
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0xa
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0xb
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0xc
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0xd
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0xe
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0xf
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x10
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x11
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x12
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x13
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x14
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x15
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x16
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO__SHIFT 0x17
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING__SHIFT 0x19
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET__SHIFT 0x1a
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET__SHIFT 0x1b
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE__SHIFT 0x1c
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING__SHIFT 0x1d
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x00000001L
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x00000002L
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x00000004L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x00000008L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x00000010L
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x00000020L
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x00000040L
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x00000080L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x00000100L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x00000200L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x00000400L
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x00000800L
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x00001000L
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x00002000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x00004000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x00008000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x00010000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x00020000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x00040000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x00080000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x00100000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x00200000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x00400000L
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO_MASK 0x00800000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING_MASK 0x02000000L
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET_MASK 0x04000000L
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET_MASK 0x08000000L
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE_MASK 0x10000000L
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING_MASK 0x20000000L
+//PA_SC_ENHANCE_1
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE__SHIFT 0x0
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE__SHIFT 0x1
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING__SHIFT 0x3
+#define PA_SC_ENHANCE_1__BYPASS_PBB__SHIFT 0x4
+#define PA_SC_ENHANCE_1__DISABLE_NONBINNED_LIVE_PRIM_DG1_LS0_CL0_EOPKT_POKE__SHIFT 0x5
+#define PA_SC_ENHANCE_1__ECO_SPARE1__SHIFT 0x6
+#define PA_SC_ENHANCE_1__ECO_SPARE2__SHIFT 0x7
+#define PA_SC_ENHANCE_1__ECO_SPARE3__SHIFT 0x8
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB__SHIFT 0x9
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT__SHIFT 0xa
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM__SHIFT 0xb
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_COUNT_PIXELS__SHIFT 0xd
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE__SHIFT 0xe
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE__SHIFT 0x10
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION__SHIFT 0x12
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS__SHIFT 0x13
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION__SHIFT 0x14
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION__SHIFT 0x15
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION__SHIFT 0x16
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG__SHIFT 0x17
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER__SHIFT 0x19
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1a
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE__SHIFT 0x1b
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX__SHIFT 0x1c
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1__SHIFT 0x1d
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI__SHIFT 0x1e
+#define PA_SC_ENHANCE_1__DISABLE_FSR_NEAR_AXIS_LINE_VERT_ORDER_SORT_FIX__SHIFT 0x1f
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE_MASK 0x00000001L
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_MASK 0x00000006L
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING_MASK 0x00000008L
+#define PA_SC_ENHANCE_1__BYPASS_PBB_MASK 0x00000010L
+#define PA_SC_ENHANCE_1__DISABLE_NONBINNED_LIVE_PRIM_DG1_LS0_CL0_EOPKT_POKE_MASK 0x00000020L
+#define PA_SC_ENHANCE_1__ECO_SPARE1_MASK 0x00000040L
+#define PA_SC_ENHANCE_1__ECO_SPARE2_MASK 0x00000080L
+#define PA_SC_ENHANCE_1__ECO_SPARE3_MASK 0x00000100L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB_MASK 0x00000200L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT_MASK 0x00000400L
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM_MASK 0x00000800L
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_COUNT_PIXELS_MASK 0x00002000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE_MASK 0x00004000L
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE_MASK 0x00010000L
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION_MASK 0x00040000L
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS_MASK 0x00080000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION_MASK 0x00100000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION_MASK 0x00200000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION_MASK 0x00400000L
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG_MASK 0x00800000L
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER_MASK 0x02000000L
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION_MASK 0x04000000L
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE_MASK 0x08000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_MASK 0x10000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1_MASK 0x20000000L
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI_MASK 0x40000000L
+#define PA_SC_ENHANCE_1__DISABLE_FSR_NEAR_AXIS_LINE_VERT_ORDER_SORT_FIX_MASK 0x80000000L
+//PA_SC_ENHANCE_2
+#define PA_SC_ENHANCE_2__DISABLE_SC_MEM_MACRO_FINE_CLOCK_GATE__SHIFT 0x0
+#define PA_SC_ENHANCE_2__DISABLE_SC_DB_QUAD_INTF_FINE_CLOCK_GATE__SHIFT 0x1
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_QUAD_INTF_FINE_CLOCK_GATE__SHIFT 0x2
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_PRIM_INTF_FINE_CLOCK_GATE__SHIFT 0x3
+#define PA_SC_ENHANCE_2__ENABLE_LPOV_WAVE_BREAK__SHIFT 0x4
+#define PA_SC_ENHANCE_2__ENABLE_FPOV_WAVE_BREAK__SHIFT 0x5
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PRIM_PAYLOAD__SHIFT 0x7
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPE_SWITCH__SHIFT 0x8
+#define PA_SC_ENHANCE_2__DISABLE_FULL_TILE_WAVE_BREAK__SHIFT 0x9
+#define PA_SC_ENHANCE_2__ENABLE_VPZ_INJECTION_BEFORE_NULL_PRIMS__SHIFT 0xa
+#define PA_SC_ENHANCE_2__PBB_TIMEOUT_THRESHOLD_MODE__SHIFT 0xb
+#define PA_SC_ENHANCE_2__DISABLE_PACKER_GRAD_FDCE_ENHANCE__SHIFT 0xc
+#define PA_SC_ENHANCE_2__DISABLE_SC_SPI_INTF_EARLY_WAKEUP__SHIFT 0xd
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_INTF_EARLY_WAKEUP__SHIFT 0xe
+#define PA_SC_ENHANCE_2__DISABLE_EXPOSED_GT_DETAIL_RATE_TILE_COV_ADJ__SHIFT 0xf
+#define PA_SC_ENHANCE_2__PBB_WARP_CLK_MAIN_CLK_WAKEUP__SHIFT 0x10
+#define PA_SC_ENHANCE_2__PBB_MAIN_CLK_REG_BUSY_WAKEUP__SHIFT 0x11
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPELINE_RESET__SHIFT 0x12
+#define PA_SC_ENHANCE_2__DISABLE_SC_DBR_DATAPATH_FGCG__SHIFT 0x15
+#define PA_SC_ENHANCE_2__FSR_BB_OPTIMIZATION_DISABLE_OVERRIDE__SHIFT 0x16
+#define PA_SC_ENHANCE_2__PROCESS_RESET_FORCE_STILE_MASK_TO_ZERO__SHIFT 0x17
+#define PA_SC_ENHANCE_2__BREAK_WHEN_ONE_NULL_PRIM_BATCH__SHIFT 0x1a
+#define PA_SC_ENHANCE_2__NULL_PRIM_BREAK_BATCH_LIMIT__SHIFT 0x1b
+#define PA_SC_ENHANCE_2__DISABLE_MAX_DEALLOC_FORCE_EOV_RESET_N_WAVES_COUNT__SHIFT 0x1e
+#define PA_SC_ENHANCE_2__RSVD__SHIFT 0x1f
+#define PA_SC_ENHANCE_2__DISABLE_SC_MEM_MACRO_FINE_CLOCK_GATE_MASK 0x00000001L
+#define PA_SC_ENHANCE_2__DISABLE_SC_DB_QUAD_INTF_FINE_CLOCK_GATE_MASK 0x00000002L
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_QUAD_INTF_FINE_CLOCK_GATE_MASK 0x00000004L
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_PRIM_INTF_FINE_CLOCK_GATE_MASK 0x00000008L
+#define PA_SC_ENHANCE_2__ENABLE_LPOV_WAVE_BREAK_MASK 0x00000010L
+#define PA_SC_ENHANCE_2__ENABLE_FPOV_WAVE_BREAK_MASK 0x00000020L
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PRIM_PAYLOAD_MASK 0x00000080L
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPE_SWITCH_MASK 0x00000100L
+#define PA_SC_ENHANCE_2__DISABLE_FULL_TILE_WAVE_BREAK_MASK 0x00000200L
+#define PA_SC_ENHANCE_2__ENABLE_VPZ_INJECTION_BEFORE_NULL_PRIMS_MASK 0x00000400L
+#define PA_SC_ENHANCE_2__PBB_TIMEOUT_THRESHOLD_MODE_MASK 0x00000800L
+#define PA_SC_ENHANCE_2__DISABLE_PACKER_GRAD_FDCE_ENHANCE_MASK 0x00001000L
+#define PA_SC_ENHANCE_2__DISABLE_SC_SPI_INTF_EARLY_WAKEUP_MASK 0x00002000L
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_INTF_EARLY_WAKEUP_MASK 0x00004000L
+#define PA_SC_ENHANCE_2__DISABLE_EXPOSED_GT_DETAIL_RATE_TILE_COV_ADJ_MASK 0x00008000L
+#define PA_SC_ENHANCE_2__PBB_WARP_CLK_MAIN_CLK_WAKEUP_MASK 0x00010000L
+#define PA_SC_ENHANCE_2__PBB_MAIN_CLK_REG_BUSY_WAKEUP_MASK 0x00020000L
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPELINE_RESET_MASK 0x00040000L
+#define PA_SC_ENHANCE_2__DISABLE_SC_DBR_DATAPATH_FGCG_MASK 0x00200000L
+#define PA_SC_ENHANCE_2__FSR_BB_OPTIMIZATION_DISABLE_OVERRIDE_MASK 0x00400000L
+#define PA_SC_ENHANCE_2__PROCESS_RESET_FORCE_STILE_MASK_TO_ZERO_MASK 0x00800000L
+#define PA_SC_ENHANCE_2__BREAK_WHEN_ONE_NULL_PRIM_BATCH_MASK 0x04000000L
+#define PA_SC_ENHANCE_2__NULL_PRIM_BREAK_BATCH_LIMIT_MASK 0x38000000L
+#define PA_SC_ENHANCE_2__DISABLE_MAX_DEALLOC_FORCE_EOV_RESET_N_WAVES_COUNT_MASK 0x40000000L
+#define PA_SC_ENHANCE_2__RSVD_MASK 0x80000000L
+//PA_SC_ENHANCE_3
+#define PA_SC_ENHANCE_3__FORCE_USE_OF_SC_CENTROID_DATA__SHIFT 0x0
+#define PA_SC_ENHANCE_3__DISABLE_RB_MASK_COPY_FOR_NONP2_SA_PAIR_HARVEST__SHIFT 0x2
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
+#define PA_SC_ENHANCE_3__DISABLE_PKR_BCI_QUAD_NEW_PRIM_DATA_LOAD_OPTIMIZATION__SHIFT 0x4
+#define PA_SC_ENHANCE_3__DISABLE_CP_CONTEXT_DONE_PERFCOUNT_SAMPLE_EN__SHIFT 0x5
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_FIRST_PHASE_FILTER__SHIFT 0x6
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER__SHIFT 0x7
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER_FOR_PBB_BINNED_PRIMS__SHIFT 0x8
+#define PA_SC_ENHANCE_3__DISABLE_SET_VPZ_DIRTY_EOPKT_LAST_PHASE_ONLY__SHIFT 0x9
+#define PA_SC_ENHANCE_3__DISABLE_PBB_EOP_OPTIMIZATION_WITH_SAME_CONTEXT_BATCHES__SHIFT 0xa
+#define PA_SC_ENHANCE_3__DISABLE_FAST_NULL_PRIM_OPTIMIZATION__SHIFT 0xb
+#define PA_SC_ENHANCE_3__USE_PBB_PRIM_STORAGE_WHEN_STALLED__SHIFT 0xc
+#define PA_SC_ENHANCE_3__DISABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION__SHIFT 0xd
+#define PA_SC_ENHANCE_3__DISABLE_ZPRE_PASS_OPTIMIZATION__SHIFT 0xe
+#define PA_SC_ENHANCE_3__DISABLE_EVENT_INCLUSION_IN_CONTEXT_STATES_PER_BIN__SHIFT 0xf
+#define PA_SC_ENHANCE_3__DISABLE_PIXEL_WAIT_SYNC_COUNTERS__SHIFT 0x10
+#define PA_SC_ENHANCE_3__DISABLE_SC_CPG_PSINVOC_SEDC_ISOLATION_ACCUM__SHIFT 0x11
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_FB_FINE_CLOCK_GATE__SHIFT 0x12
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_CACHE_RD_FINE_CLOCK_GATE__SHIFT 0x13
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_REZ_CNT_FOR_SPI_BACKPRESSURE_ONLY__SHIFT 0x14
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_CLK_CNT_FOR_SPI_BACKPRESSURE_ONLY__SHIFT 0x15
+#define PA_SC_ENHANCE_3__DO_NOT_INCLUDE_OREO_WAVEID_IN_FORCE_EOV_MAX_CNT_DISABLE__SHIFT 0x16
+#define PA_SC_ENHANCE_3__DISABLE_PWS_PRE_DEPTH_WAIT_SYNC_VPZ_INSERTION__SHIFT 0x17
+#define PA_SC_ENHANCE_3__PKR_CNT_FORCE_EOV_AT_QS_EMPTY_ONLY__SHIFT 0x18
+#define PA_SC_ENHANCE_3__PKR_S0_FORCE_EOV_STALL__SHIFT 0x19
+#define PA_SC_ENHANCE_3__PKR_S1_FORCE_EOV_STALL__SHIFT 0x1a
+#define PA_SC_ENHANCE_3__PKR_S2_FORCE_EOV_STALL__SHIFT 0x1b
+#define PA_SC_ENHANCE_3__ECO_SPARE0__SHIFT 0x1c
+#define PA_SC_ENHANCE_3__ECO_SPARE1__SHIFT 0x1d
+#define PA_SC_ENHANCE_3__ECO_SPARE2__SHIFT 0x1e
+#define PA_SC_ENHANCE_3__ECO_SPARE3__SHIFT 0x1f
+#define PA_SC_ENHANCE_3__FORCE_USE_OF_SC_CENTROID_DATA_MASK 0x00000001L
+#define PA_SC_ENHANCE_3__DISABLE_RB_MASK_COPY_FOR_NONP2_SA_PAIR_HARVEST_MASK 0x00000004L
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L
+#define PA_SC_ENHANCE_3__DISABLE_PKR_BCI_QUAD_NEW_PRIM_DATA_LOAD_OPTIMIZATION_MASK 0x00000010L
+#define PA_SC_ENHANCE_3__DISABLE_CP_CONTEXT_DONE_PERFCOUNT_SAMPLE_EN_MASK 0x00000020L
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_FIRST_PHASE_FILTER_MASK 0x00000040L
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER_MASK 0x00000080L
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER_FOR_PBB_BINNED_PRIMS_MASK 0x00000100L
+#define PA_SC_ENHANCE_3__DISABLE_SET_VPZ_DIRTY_EOPKT_LAST_PHASE_ONLY_MASK 0x00000200L
+#define PA_SC_ENHANCE_3__DISABLE_PBB_EOP_OPTIMIZATION_WITH_SAME_CONTEXT_BATCHES_MASK 0x00000400L
+#define PA_SC_ENHANCE_3__DISABLE_FAST_NULL_PRIM_OPTIMIZATION_MASK 0x00000800L
+#define PA_SC_ENHANCE_3__USE_PBB_PRIM_STORAGE_WHEN_STALLED_MASK 0x00001000L
+#define PA_SC_ENHANCE_3__DISABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION_MASK 0x00002000L
+#define PA_SC_ENHANCE_3__DISABLE_ZPRE_PASS_OPTIMIZATION_MASK 0x00004000L
+#define PA_SC_ENHANCE_3__DISABLE_EVENT_INCLUSION_IN_CONTEXT_STATES_PER_BIN_MASK 0x00008000L
+#define PA_SC_ENHANCE_3__DISABLE_PIXEL_WAIT_SYNC_COUNTERS_MASK 0x00010000L
+#define PA_SC_ENHANCE_3__DISABLE_SC_CPG_PSINVOC_SEDC_ISOLATION_ACCUM_MASK 0x00020000L
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_FB_FINE_CLOCK_GATE_MASK 0x00040000L
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_CACHE_RD_FINE_CLOCK_GATE_MASK 0x00080000L
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_REZ_CNT_FOR_SPI_BACKPRESSURE_ONLY_MASK 0x00100000L
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_CLK_CNT_FOR_SPI_BACKPRESSURE_ONLY_MASK 0x00200000L
+#define PA_SC_ENHANCE_3__DO_NOT_INCLUDE_OREO_WAVEID_IN_FORCE_EOV_MAX_CNT_DISABLE_MASK 0x00400000L
+#define PA_SC_ENHANCE_3__DISABLE_PWS_PRE_DEPTH_WAIT_SYNC_VPZ_INSERTION_MASK 0x00800000L
+#define PA_SC_ENHANCE_3__PKR_CNT_FORCE_EOV_AT_QS_EMPTY_ONLY_MASK 0x01000000L
+#define PA_SC_ENHANCE_3__PKR_S0_FORCE_EOV_STALL_MASK 0x02000000L
+#define PA_SC_ENHANCE_3__PKR_S1_FORCE_EOV_STALL_MASK 0x04000000L
+#define PA_SC_ENHANCE_3__PKR_S2_FORCE_EOV_STALL_MASK 0x08000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE0_MASK 0x10000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE1_MASK 0x20000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE2_MASK 0x40000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE3_MASK 0x80000000L
+//PA_SC_BINNER_CNTL_OVERRIDE
+#define PA_SC_BINNER_CNTL_OVERRIDE__BINNING_MODE__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_OVERRIDE__CONTEXT_STATES_PER_BIN__SHIFT 0xa
+#define PA_SC_BINNER_CNTL_OVERRIDE__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_OVERRIDE__FPOVS_PER_BATCH__SHIFT 0x13
+#define PA_SC_BINNER_CNTL_OVERRIDE__DIRECT_OVERRIDE_MODE__SHIFT 0x1b
+#define PA_SC_BINNER_CNTL_OVERRIDE__OVERRIDE__SHIFT 0x1c
+#define PA_SC_BINNER_CNTL_OVERRIDE__BINNING_MODE_MASK 0x00000003L
+#define PA_SC_BINNER_CNTL_OVERRIDE__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
+#define PA_SC_BINNER_CNTL_OVERRIDE__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
+#define PA_SC_BINNER_CNTL_OVERRIDE__FPOVS_PER_BATCH_MASK 0x07F80000L
+#define PA_SC_BINNER_CNTL_OVERRIDE__DIRECT_OVERRIDE_MODE_MASK 0x08000000L
+#define PA_SC_BINNER_CNTL_OVERRIDE__OVERRIDE_MASK 0xF0000000L
+//PA_SC_PBB_OVERRIDE_FLAG
+#define PA_SC_PBB_OVERRIDE_FLAG__OVERRIDE__SHIFT 0x0
+#define PA_SC_PBB_OVERRIDE_FLAG__PIPE_ID__SHIFT 0x1
+#define PA_SC_PBB_OVERRIDE_FLAG__OVERRIDE_MASK 0x00000001L
+#define PA_SC_PBB_OVERRIDE_FLAG__PIPE_ID_MASK 0x00000002L
+//PA_SC_DSM_CNTL
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0__SHIFT 0x0
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1__SHIFT 0x1
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0_MASK 0x00000001L
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1_MASK 0x00000002L
+//PA_SC_TILE_STEERING_CREST_OVERRIDE
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT__SHIFT 0x1
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT__SHIFT 0x5
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SA_SELECT__SHIFT 0x8
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__FORCE_TILE_STEERING_OVERRIDE_USE__SHIFT 0x1f
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT_MASK 0x00000006L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT_MASK 0x00000060L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SA_SELECT_MASK 0x00000700L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__FORCE_TILE_STEERING_OVERRIDE_USE_MASK 0x80000000L
+//PA_SC_FIFO_SIZE
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0xf
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x15
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x00007FC0L
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x001F8000L
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xFFE00000L
+//PA_SC_IF_FIFO_SIZE
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0xc
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x12
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0x00000FC0L
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x0003F000L
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0x00FC0000L
+//PA_SC_PACKER_WAVE_ID_CNTL
+#define PA_SC_PACKER_WAVE_ID_CNTL__WAVE_TABLE_SIZE__SHIFT 0x0
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_DB_WAVE_IF_FIFO_SIZE__SHIFT 0xa
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_DB_WAVE_IF_FGCG_EN__SHIFT 0x10
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_SPI_WAVE_IF_FIFO_SIZE__SHIFT 0x11
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_SPI_WAVE_IF_FGCG_EN__SHIFT 0x17
+#define PA_SC_PACKER_WAVE_ID_CNTL__DEBUG_CONFLICT_QUAD__SHIFT 0x18
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_OREO_CONFLICT_QUAD__SHIFT 0x1f
+#define PA_SC_PACKER_WAVE_ID_CNTL__WAVE_TABLE_SIZE_MASK 0x000003FFL
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_DB_WAVE_IF_FIFO_SIZE_MASK 0x0000FC00L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_DB_WAVE_IF_FGCG_EN_MASK 0x00010000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_SPI_WAVE_IF_FIFO_SIZE_MASK 0x007E0000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_SPI_WAVE_IF_FGCG_EN_MASK 0x00800000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DEBUG_CONFLICT_QUAD_MASK 0x0F000000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_OREO_CONFLICT_QUAD_MASK 0x80000000L
+//PA_SC_ATM_CNTL
+#define PA_SC_ATM_CNTL__SC_PC_IF_SIZE__SHIFT 0x0
+#define PA_SC_ATM_CNTL__DISABLE_SC_PC_IF_FGCG_EN__SHIFT 0x7
+#define PA_SC_ATM_CNTL__MAX_ATTRIBUTES_IN_WAVE__SHIFT 0x8
+#define PA_SC_ATM_CNTL__DISABLE_MAX_ATTRIBUTES__SHIFT 0x10
+#define PA_SC_ATM_CNTL__SELECT_MAX_ATTRIBUTES__SHIFT 0x11
+#define PA_SC_ATM_CNTL__SC_PC_IF_SIZE_MASK 0x0000003FL
+#define PA_SC_ATM_CNTL__DISABLE_SC_PC_IF_FGCG_EN_MASK 0x00000080L
+#define PA_SC_ATM_CNTL__MAX_ATTRIBUTES_IN_WAVE_MASK 0x0000FF00L
+#define PA_SC_ATM_CNTL__DISABLE_MAX_ATTRIBUTES_MASK 0x00010000L
+#define PA_SC_ATM_CNTL__SELECT_MAX_ATTRIBUTES_MASK 0x00020000L
+//PA_SC_PKR_WAVE_TABLE_CNTL
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE__SHIFT 0x0
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE_MASK 0x0000003FL
+//PA_SC_FORCE_EOV_MAX_CNTS
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x0
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x10
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0x0000FFFFL
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xFFFF0000L
+//PA_SC_BINNER_EVENT_CNTL_0
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_1
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_1__WAIT_SYNC__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_1__BIN_CONF_OVERRIDE_CHECK__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_1__WAIT_SYNC_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__BIN_CONF_OVERRIDE_CHECK_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_2
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_35__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_41__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_35_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_41_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_3
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_50__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_DRAW__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_PIPELINE_NOT_USED__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_3__DRAW_DONE__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_50_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_DRAW_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_PIPELINE_NOT_USED_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__DRAW_DONE_MASK 0xC0000000L
+//PA_SC_BINNER_TIMEOUT_COUNTER
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_BINNER_PERF_CNTL_0
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x14
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x17
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000003FFL
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000FFC00L
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x00700000L
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x03800000L
+//PA_SC_BINNER_PERF_CNTL_1
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x5
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x0000001FL
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x000003E0L
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD_MASK 0x03FFFC00L
+//PA_SC_BINNER_PERF_CNTL_2
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD__SHIFT 0xb
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD_MASK 0x000007FFL
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD_MASK 0x003FF800L
+//PA_SC_BINNER_PERF_CNTL_3
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_P3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_HP3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_TRAP_SCREEN_HV_LOCK
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_PH_INTERFACE_FIFO_SIZE
+#define PA_PH_INTERFACE_FIFO_SIZE__PA_PH_IF_FIFO_SIZE__SHIFT 0x0
+#define PA_PH_INTERFACE_FIFO_SIZE__PH_SC_IF_FIFO_SIZE__SHIFT 0x10
+#define PA_PH_INTERFACE_FIFO_SIZE__PA_PH_IF_FIFO_SIZE_MASK 0x000003FFL
+#define PA_PH_INTERFACE_FIFO_SIZE__PH_SC_IF_FIFO_SIZE_MASK 0x003F0000L
+//PA_PH_ENHANCE
+#define PA_PH_ENHANCE__ECO_SPARE0__SHIFT 0x0
+#define PA_PH_ENHANCE__ECO_SPARE1__SHIFT 0x1
+#define PA_PH_ENHANCE__ECO_SPARE2__SHIFT 0x2
+#define PA_PH_ENHANCE__ECO_SPARE3__SHIFT 0x3
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_FINE_CLOCK_GATE__SHIFT 0x4
+#define PA_PH_ENHANCE__DISABLE_FOPKT__SHIFT 0x5
+#define PA_PH_ENHANCE__DISABLE_FOPKT_SCAN_POST_RESET__SHIFT 0x6
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_CLKEN_CLOCK_GATE__SHIFT 0x7
+#define PA_PH_ENHANCE__DISABLE_PH_DEBUG_REG_FGCG__SHIFT 0x8
+#define PA_PH_ENHANCE__DISABLE_PH_PERF_REG_FGCG__SHIFT 0x9
+#define PA_PH_ENHANCE__ENABLE_PH_INTF_CLKEN_STRETCH__SHIFT 0xa
+#define PA_PH_ENHANCE__DISABLE_USE_LAST_PH_ARBITER_PERFCOUNTER_SAMPLE_EVENT__SHIFT 0xd
+#define PA_PH_ENHANCE__USE_PERFCOUNTER_START_STOP_EVENTS__SHIFT 0xe
+#define PA_PH_ENHANCE__FORCE_PH_PERFCOUNTER_SAMPLE_ENABLE_ON__SHIFT 0xf
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE__SHIFT 0x10
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE_DISABLE__SHIFT 0x11
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_PERFCOUNTER_COUNT_MODE__SHIFT 0x12
+#define PA_PH_ENHANCE__ECO_SPARE0_MASK 0x00000001L
+#define PA_PH_ENHANCE__ECO_SPARE1_MASK 0x00000002L
+#define PA_PH_ENHANCE__ECO_SPARE2_MASK 0x00000004L
+#define PA_PH_ENHANCE__ECO_SPARE3_MASK 0x00000008L
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_FINE_CLOCK_GATE_MASK 0x00000010L
+#define PA_PH_ENHANCE__DISABLE_FOPKT_MASK 0x00000020L
+#define PA_PH_ENHANCE__DISABLE_FOPKT_SCAN_POST_RESET_MASK 0x00000040L
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_CLKEN_CLOCK_GATE_MASK 0x00000080L
+#define PA_PH_ENHANCE__DISABLE_PH_DEBUG_REG_FGCG_MASK 0x00000100L
+#define PA_PH_ENHANCE__DISABLE_PH_PERF_REG_FGCG_MASK 0x00000200L
+#define PA_PH_ENHANCE__ENABLE_PH_INTF_CLKEN_STRETCH_MASK 0x00001C00L
+#define PA_PH_ENHANCE__DISABLE_USE_LAST_PH_ARBITER_PERFCOUNTER_SAMPLE_EVENT_MASK 0x00002000L
+#define PA_PH_ENHANCE__USE_PERFCOUNTER_START_STOP_EVENTS_MASK 0x00004000L
+#define PA_PH_ENHANCE__FORCE_PH_PERFCOUNTER_SAMPLE_ENABLE_ON_MASK 0x00008000L
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE_MASK 0x00010000L
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE_DISABLE_MASK 0x00020000L
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_PERFCOUNTER_COUNT_MODE_MASK 0x00040000L
+//PA_SC_VRS_SURFACE_CNTL_1
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE__SHIFT 0x0
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_SHADER_KILL_ENABLE__SHIFT 0x1
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_MASK_OPS_ENABLE__SHIFT 0x2
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_RATE_16XAA__SHIFT 0x3
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_Z_OR_STENCIL__SHIFT 0x4
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_PRE_SHADER_DEPTH_COVERAGE_ENABLED__SHIFT 0x5
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POST_DEPTH_IMPORT__SHIFT 0x6
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POPS__SHIFT 0x7
+#define PA_SC_VRS_SURFACE_CNTL_1__USE_ONLY_VRS_RATE_FINE_CFG__SHIFT 0x8
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_VRS_RATE_NORMALIZATION__SHIFT 0xc
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_PS_ITER_RATE_COMBINER_PASSTHRU_OVERRIDE__SHIFT 0xf
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_CMASK_RATE_HINT_FORCE_ZERO_OVERRIDE__SHIFT 0x13
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_DETAIL_TO_EXPOSED_RATE_CLAMPING__SHIFT 0x14
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_0__SHIFT 0x15
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_1__SHIFT 0x16
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_2__SHIFT 0x17
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_3__SHIFT 0x18
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_4__SHIFT 0x19
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_5__SHIFT 0x1a
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_6__SHIFT 0x1b
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_7__SHIFT 0x1c
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_8__SHIFT 0x1d
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_9__SHIFT 0x1e
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_10__SHIFT 0x1f
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_MASK 0x00000001L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_SHADER_KILL_ENABLE_MASK 0x00000002L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_MASK_OPS_ENABLE_MASK 0x00000004L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_RATE_16XAA_MASK 0x00000008L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_Z_OR_STENCIL_MASK 0x00000010L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_PRE_SHADER_DEPTH_COVERAGE_ENABLED_MASK 0x00000020L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POST_DEPTH_IMPORT_MASK 0x00000040L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POPS_MASK 0x00000080L
+#define PA_SC_VRS_SURFACE_CNTL_1__USE_ONLY_VRS_RATE_FINE_CFG_MASK 0x00000100L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_VRS_RATE_NORMALIZATION_MASK 0x00001000L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_PS_ITER_RATE_COMBINER_PASSTHRU_OVERRIDE_MASK 0x00008000L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_CMASK_RATE_HINT_FORCE_ZERO_OVERRIDE_MASK 0x00080000L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_DETAIL_TO_EXPOSED_RATE_CLAMPING_MASK 0x00100000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_0_MASK 0x00200000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_1_MASK 0x00400000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_2_MASK 0x00800000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_3_MASK 0x01000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_4_MASK 0x02000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_5_MASK 0x04000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_6_MASK 0x08000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_7_MASK 0x10000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_8_MASK 0x20000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_9_MASK 0x40000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_10_MASK 0x80000000L
+
+
+// addressBlock: gc_pfvf_sqdec
+//SQ_RUNTIME_CONFIG
+#define SQ_RUNTIME_CONFIG__UNUSED_REGISTER__SHIFT 0x0
+#define SQ_RUNTIME_CONFIG__UNUSED_REGISTER_MASK 0x00000001L
+//SQ_DEBUG_STS_GLOBAL
+#define SQ_DEBUG_STS_GLOBAL__BUSY__SHIFT 0x0
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_BUSY__SHIFT 0x1
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA0__SHIFT 0x4
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA1__SHIFT 0x10
+#define SQ_DEBUG_STS_GLOBAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_BUSY_MASK 0x00000002L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA0_MASK 0x0000FFF0L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA1_MASK 0x0FFF0000L
+//SQ_DEBUG_STS_GLOBAL2
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX0__SHIFT 0x0
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX1__SHIFT 0x8
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_COMPUTE__SHIFT 0x10
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX0_MASK 0x000000FFL
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX1_MASK 0x0000FF00L
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_COMPUTE_MASK 0x00FF0000L
+//SH_MEM_BASES
+#define SH_MEM_BASES__PRIVATE_BASE__SHIFT 0x0
+#define SH_MEM_BASES__SHARED_BASE__SHIFT 0x10
+#define SH_MEM_BASES__PRIVATE_BASE_MASK 0x0000FFFFL
+#define SH_MEM_BASES__SHARED_BASE_MASK 0xFFFF0000L
+//SH_MEM_CONFIG
+#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
+#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x2
+#define SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT 0xe
+#define SH_MEM_CONFIG__ICACHE_USE_GL1__SHIFT 0x12
+#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
+#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x0000000CL
+#define SH_MEM_CONFIG__INITIAL_INST_PREFETCH_MASK 0x0000C000L
+#define SH_MEM_CONFIG__ICACHE_USE_GL1_MASK 0x00040000L
+//SQ_DEBUG
+#define SQ_DEBUG__SINGLE_MEMOP__SHIFT 0x0
+#define SQ_DEBUG__SINGLE_ALU_OP__SHIFT 0x1
+#define SQ_DEBUG__WAIT_DEP_CTR_ZERO__SHIFT 0x2
+#define SQ_DEBUG__SINGLE_MEMOP_MASK 0x00000001L
+#define SQ_DEBUG__SINGLE_ALU_OP_MASK 0x00000002L
+#define SQ_DEBUG__WAIT_DEP_CTR_ZERO_MASK 0x00000004L
+//SQ_SHADER_TBA_LO
+#define SQ_SHADER_TBA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TBA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TBA_HI
+#define SQ_SHADER_TBA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TBA_HI__TRAP_EN__SHIFT 0x1f
+#define SQ_SHADER_TBA_HI__ADDR_HI_MASK 0x000000FFL
+#define SQ_SHADER_TBA_HI__TRAP_EN_MASK 0x80000000L
+//SQ_SHADER_TMA_LO
+#define SQ_SHADER_TMA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TMA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TMA_HI
+#define SQ_SHADER_TMA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TMA_HI__ADDR_HI_MASK 0x000000FFL
+
+
+// addressBlock: gc_pfonly_cpdec
+//CP_DEBUG_2
+#define CP_DEBUG_2__CHIU_NOALLOC_OVERRIDE__SHIFT 0xc
+#define CP_DEBUG_2__RCIU_SECURE_CHECK_DISABLE__SHIFT 0xd
+#define CP_DEBUG_2__RB_PACKET_INJECTOR_DISABLE__SHIFT 0xe
+#define CP_DEBUG_2__CNTX_DONE_COPY_STATE_DISABLE__SHIFT 0xf
+#define CP_DEBUG_2__NOP_DISCARD_DISABLE__SHIFT 0x10
+#define CP_DEBUG_2__DC_INTERLEAVE_DISABLE__SHIFT 0x11
+#define CP_DEBUG_2__BC_LOOKUP_CB_DB_FLUSH_DISABLE__SHIFT 0x1b
+#define CP_DEBUG_2__DC_FORCE_CLK_EN__SHIFT 0x1c
+#define CP_DEBUG_2__DC_DISABLE_BROADCAST__SHIFT 0x1d
+#define CP_DEBUG_2__NOT_EOP_HW_DETECT_DISABLE__SHIFT 0x1e
+#define CP_DEBUG_2__PFP_DDID_HW_DETECT_DISABLE__SHIFT 0x1f
+#define CP_DEBUG_2__CHIU_NOALLOC_OVERRIDE_MASK 0x00001000L
+#define CP_DEBUG_2__RCIU_SECURE_CHECK_DISABLE_MASK 0x00002000L
+#define CP_DEBUG_2__RB_PACKET_INJECTOR_DISABLE_MASK 0x00004000L
+#define CP_DEBUG_2__CNTX_DONE_COPY_STATE_DISABLE_MASK 0x00008000L
+#define CP_DEBUG_2__NOP_DISCARD_DISABLE_MASK 0x00010000L
+#define CP_DEBUG_2__DC_INTERLEAVE_DISABLE_MASK 0x00020000L
+#define CP_DEBUG_2__BC_LOOKUP_CB_DB_FLUSH_DISABLE_MASK 0x08000000L
+#define CP_DEBUG_2__DC_FORCE_CLK_EN_MASK 0x10000000L
+#define CP_DEBUG_2__DC_DISABLE_BROADCAST_MASK 0x20000000L
+#define CP_DEBUG_2__NOT_EOP_HW_DETECT_DISABLE_MASK 0x40000000L
+#define CP_DEBUG_2__PFP_DDID_HW_DETECT_DISABLE_MASK 0x80000000L
+//CP_FETCHER_SOURCE
+#define CP_FETCHER_SOURCE__ME_SRC__SHIFT 0x0
+#define CP_FETCHER_SOURCE__ME_SRC_MASK 0x00000001L
+//CP_DFY_CNTL
+#define CP_DFY_CNTL__POLICY__SHIFT 0x8
+#define CP_DFY_CNTL__VOL__SHIFT 0xa
+#define CP_DFY_CNTL__MTYPE__SHIFT 0xc
+#define CP_DFY_CNTL__REPEATER_FGCG_DISABLE__SHIFT 0x19
+#define CP_DFY_CNTL__TPI_SDP_SEL__SHIFT 0x1a
+#define CP_DFY_CNTL__WRITE_DIS__SHIFT 0x1b
+#define CP_DFY_CNTL__LFSR_RESET__SHIFT 0x1c
+#define CP_DFY_CNTL__MODE__SHIFT 0x1d
+#define CP_DFY_CNTL__ENABLE__SHIFT 0x1f
+#define CP_DFY_CNTL__POLICY_MASK 0x00000300L
+#define CP_DFY_CNTL__VOL_MASK 0x00000400L
+#define CP_DFY_CNTL__MTYPE_MASK 0x00007000L
+#define CP_DFY_CNTL__REPEATER_FGCG_DISABLE_MASK 0x02000000L
+#define CP_DFY_CNTL__TPI_SDP_SEL_MASK 0x04000000L
+#define CP_DFY_CNTL__WRITE_DIS_MASK 0x08000000L
+#define CP_DFY_CNTL__LFSR_RESET_MASK 0x10000000L
+#define CP_DFY_CNTL__MODE_MASK 0x60000000L
+#define CP_DFY_CNTL__ENABLE_MASK 0x80000000L
+//CP_DFY_STAT
+#define CP_DFY_STAT__BURST_COUNT__SHIFT 0x0
+#define CP_DFY_STAT__TAGS_PENDING__SHIFT 0x10
+#define CP_DFY_STAT__BUSY__SHIFT 0x1f
+#define CP_DFY_STAT__BURST_COUNT_MASK 0x0000FFFFL
+#define CP_DFY_STAT__TAGS_PENDING_MASK 0x07FF0000L
+#define CP_DFY_STAT__BUSY_MASK 0x80000000L
+//CP_DFY_ADDR_HI
+#define CP_DFY_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DFY_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DFY_ADDR_LO
+#define CP_DFY_ADDR_LO__ADDR_LO__SHIFT 0x5
+#define CP_DFY_ADDR_LO__ADDR_LO_MASK 0xFFFFFFE0L
+//CP_DFY_DATA_0
+#define CP_DFY_DATA_0__DATA__SHIFT 0x0
+#define CP_DFY_DATA_0__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_1
+#define CP_DFY_DATA_1__DATA__SHIFT 0x0
+#define CP_DFY_DATA_1__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_2
+#define CP_DFY_DATA_2__DATA__SHIFT 0x0
+#define CP_DFY_DATA_2__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_3
+#define CP_DFY_DATA_3__DATA__SHIFT 0x0
+#define CP_DFY_DATA_3__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_4
+#define CP_DFY_DATA_4__DATA__SHIFT 0x0
+#define CP_DFY_DATA_4__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_5
+#define CP_DFY_DATA_5__DATA__SHIFT 0x0
+#define CP_DFY_DATA_5__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_6
+#define CP_DFY_DATA_6__DATA__SHIFT 0x0
+#define CP_DFY_DATA_6__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_7
+#define CP_DFY_DATA_7__DATA__SHIFT 0x0
+#define CP_DFY_DATA_7__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_8
+#define CP_DFY_DATA_8__DATA__SHIFT 0x0
+#define CP_DFY_DATA_8__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_9
+#define CP_DFY_DATA_9__DATA__SHIFT 0x0
+#define CP_DFY_DATA_9__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_10
+#define CP_DFY_DATA_10__DATA__SHIFT 0x0
+#define CP_DFY_DATA_10__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_11
+#define CP_DFY_DATA_11__DATA__SHIFT 0x0
+#define CP_DFY_DATA_11__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_12
+#define CP_DFY_DATA_12__DATA__SHIFT 0x0
+#define CP_DFY_DATA_12__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_13
+#define CP_DFY_DATA_13__DATA__SHIFT 0x0
+#define CP_DFY_DATA_13__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_14
+#define CP_DFY_DATA_14__DATA__SHIFT 0x0
+#define CP_DFY_DATA_14__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_15
+#define CP_DFY_DATA_15__DATA__SHIFT 0x0
+#define CP_DFY_DATA_15__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_CMD
+#define CP_DFY_CMD__SIZE__SHIFT 0x10
+#define CP_DFY_CMD__SIZE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_pfonly_cpphqddec
+//CP_HPD_MES_ROQ_OFFSETS
+#define CP_HPD_MES_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
+#define CP_HPD_MES_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
+#define CP_HPD_MES_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
+#define CP_HPD_MES_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
+#define CP_HPD_MES_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
+#define CP_HPD_MES_ROQ_OFFSETS__IB_OFFSET_MASK 0x007F0000L
+//CP_HPD_ROQ_OFFSETS
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET_MASK 0x007F0000L
+//CP_HPD_STATUS0
+#define CP_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
+#define CP_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
+#define CP_HPD_STATUS0__FETCHING_MQD__SHIFT 0x10
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB__SHIFT 0x11
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ__SHIFT 0x12
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
+#define CP_HPD_STATUS0__MASTER_QUEUE_IDLE_DIS__SHIFT 0x1b
+#define CP_HPD_STATUS0__ENABLE_OFFLOAD_CHECK__SHIFT 0x1c
+#define CP_HPD_STATUS0__FREEZE_QUEUE_STATE__SHIFT 0x1e
+#define CP_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
+#define CP_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
+#define CP_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
+#define CP_HPD_STATUS0__FETCHING_MQD_MASK 0x00010000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB_MASK 0x00020000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ_MASK 0x00040000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
+#define CP_HPD_STATUS0__MASTER_QUEUE_IDLE_DIS_MASK 0x08000000L
+#define CP_HPD_STATUS0__ENABLE_OFFLOAD_CHECK_MASK 0x30000000L
+#define CP_HPD_STATUS0__FREEZE_QUEUE_STATE_MASK 0x40000000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
+
+
+// addressBlock: gc_pfonly_didtdec
+//DIDT_INDEX_AUTO_INCR_EN
+#define DIDT_INDEX_AUTO_INCR_EN__DIDT_INDEX_AUTO_INCR_EN__SHIFT 0x0
+#define DIDT_INDEX_AUTO_INCR_EN__DIDT_INDEX_AUTO_INCR_EN_MASK 0x00000001L
+//DIDT_EDC_CTRL
+#define DIDT_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define DIDT_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define DIDT_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define DIDT_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0xa
+#define DIDT_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0xe
+#define DIDT_EDC_CTRL__EDC_ALGORITHM_MODE__SHIFT 0xf
+#define DIDT_EDC_CTRL__EDC_AVGDIV__SHIFT 0x10
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_SEL__SHIFT 0x14
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_BIT_NUMS__SHIFT 0x15
+#define DIDT_EDC_CTRL__RLC_FORCE_STALL_EN__SHIFT 0x18
+#define DIDT_EDC_CTRL__RLC_STALL_LEVEL_SEL__SHIFT 0x19
+#define DIDT_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define DIDT_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define DIDT_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define DIDT_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000003F0L
+#define DIDT_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x00003C00L
+#define DIDT_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00004000L
+#define DIDT_EDC_CTRL__EDC_ALGORITHM_MODE_MASK 0x00008000L
+#define DIDT_EDC_CTRL__EDC_AVGDIV_MASK 0x000F0000L
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_SEL_MASK 0x00100000L
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_BIT_NUMS_MASK 0x00E00000L
+#define DIDT_EDC_CTRL__RLC_FORCE_STALL_EN_MASK 0x01000000L
+#define DIDT_EDC_CTRL__RLC_STALL_LEVEL_SEL_MASK 0x02000000L
+//DIDT_EDC_THROTTLE_CTRL
+#define DIDT_EDC_THROTTLE_CTRL__SQ_STALL_EN__SHIFT 0x0
+#define DIDT_EDC_THROTTLE_CTRL__DB_STALL_EN__SHIFT 0x1
+#define DIDT_EDC_THROTTLE_CTRL__TCP_STALL_EN__SHIFT 0x2
+#define DIDT_EDC_THROTTLE_CTRL__TD_STALL_EN__SHIFT 0x3
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_EN__SHIFT 0x4
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_MODE__SHIFT 0x5
+#define DIDT_EDC_THROTTLE_CTRL__SQ_STALL_EN_MASK 0x00000001L
+#define DIDT_EDC_THROTTLE_CTRL__DB_STALL_EN_MASK 0x00000002L
+#define DIDT_EDC_THROTTLE_CTRL__TCP_STALL_EN_MASK 0x00000004L
+#define DIDT_EDC_THROTTLE_CTRL__TD_STALL_EN_MASK 0x00000008L
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_EN_MASK 0x00000010L
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_MODE_MASK 0x000000E0L
+//DIDT_EDC_THRESHOLD
+#define DIDT_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define DIDT_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//DIDT_EDC_STALL_PATTERN_1_2
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_EDC_STALL_PATTERN_3_4
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_EDC_STALL_PATTERN_5_6
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_EDC_STALL_PATTERN_7
+#define DIDT_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_EDC_STATUS
+#define DIDT_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
+#define DIDT_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
+#define DIDT_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
+#define DIDT_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
+//DIDT_EDC_DYNAMIC_THRESHOLD_RO
+#define DIDT_EDC_DYNAMIC_THRESHOLD_RO__EDC_DYNAMIC_THRESHOLD_RO__SHIFT 0x0
+#define DIDT_EDC_DYNAMIC_THRESHOLD_RO__EDC_DYNAMIC_THRESHOLD_RO_MASK 0x00000001L
+//DIDT_EDC_OVERFLOW
+#define DIDT_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
+#define DIDT_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
+#define DIDT_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
+#define DIDT_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
+//DIDT_EDC_ROLLING_POWER_DELTA
+#define DIDT_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
+#define DIDT_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
+//DIDT_IND_INDEX
+#define DIDT_IND_INDEX__DIDT_IND_INDEX__SHIFT 0x0
+#define DIDT_IND_INDEX__DIDT_IND_INDEX_MASK 0xFFFFFFFFL
+//DIDT_IND_DATA
+#define DIDT_IND_DATA__DIDT_IND_DATA__SHIFT 0x0
+#define DIDT_IND_DATA__DIDT_IND_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_pfonly_spidec
+//SPI_CDBG_SYS_GFX
+#define SPI_CDBG_SYS_GFX__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_GFX__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_GFX__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_GFX__CS_EN__SHIFT 0x6
+#define SPI_CDBG_SYS_GFX__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_GFX__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_GFX__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_GFX__CS_EN_MASK 0x0040L
+//SPI_CDBG_SYS_HP3D
+#define SPI_CDBG_SYS_HP3D__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_HP3D__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_HP3D__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_HP3D__CS_EN__SHIFT 0x6
+#define SPI_CDBG_SYS_HP3D__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_HP3D__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_HP3D__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_HP3D__CS_EN_MASK 0x0040L
+//SPI_CDBG_SYS_CS0
+#define SPI_CDBG_SYS_CS0__PIPE0__SHIFT 0x0
+#define SPI_CDBG_SYS_CS0__PIPE1__SHIFT 0x8
+#define SPI_CDBG_SYS_CS0__PIPE2__SHIFT 0x10
+#define SPI_CDBG_SYS_CS0__PIPE3__SHIFT 0x18
+#define SPI_CDBG_SYS_CS0__PIPE0_MASK 0x000000FFL
+#define SPI_CDBG_SYS_CS0__PIPE1_MASK 0x0000FF00L
+#define SPI_CDBG_SYS_CS0__PIPE2_MASK 0x00FF0000L
+#define SPI_CDBG_SYS_CS0__PIPE3_MASK 0xFF000000L
+//SPI_GDBG_WAVE_CNTL
+#define SPI_GDBG_WAVE_CNTL__STALL_RA__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL__STALL_LAUNCH__SHIFT 0x1
+#define SPI_GDBG_WAVE_CNTL__STALL_RA_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL__STALL_LAUNCH_MASK 0x00000002L
+//SPI_GDBG_TRAP_CONFIG
+#define SPI_GDBG_TRAP_CONFIG__PIPE0_EN__SHIFT 0x0
+#define SPI_GDBG_TRAP_CONFIG__PIPE1_EN__SHIFT 0x8
+#define SPI_GDBG_TRAP_CONFIG__PIPE2_EN__SHIFT 0x10
+#define SPI_GDBG_TRAP_CONFIG__PIPE3_EN__SHIFT 0x18
+#define SPI_GDBG_TRAP_CONFIG__PIPE0_EN_MASK 0x000000FFL
+#define SPI_GDBG_TRAP_CONFIG__PIPE1_EN_MASK 0x0000FF00L
+#define SPI_GDBG_TRAP_CONFIG__PIPE2_EN_MASK 0x00FF0000L
+#define SPI_GDBG_TRAP_CONFIG__PIPE3_EN_MASK 0xFF000000L
+//SPI_GDBG_WAVE_CNTL3
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS__SHIFT 0x2
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS__SHIFT 0x3
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG__SHIFT 0x4
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0__SHIFT 0x5
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1__SHIFT 0x6
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2__SHIFT 0x7
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3__SHIFT 0x8
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4__SHIFT 0x9
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5__SHIFT 0xa
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6__SHIFT 0xb
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7__SHIFT 0xc
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION__SHIFT 0xd
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT__SHIFT 0x1c
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS_MASK 0x00000004L
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS_MASK 0x00000008L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG_MASK 0x00000010L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0_MASK 0x00000020L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1_MASK 0x00000040L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2_MASK 0x00000080L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3_MASK 0x00000100L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4_MASK 0x00000200L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5_MASK 0x00000400L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6_MASK 0x00000800L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7_MASK 0x00001000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION_MASK 0x0FFFE000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT_MASK 0x10000000L
+//SPI_RESET_DEBUG
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET__SHIFT 0x0
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID__SHIFT 0x1
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID__SHIFT 0x2
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE__SHIFT 0x3
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY__SHIFT 0x4
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_MASK 0x01L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID_MASK 0x02L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID_MASK 0x04L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE_MASK 0x08L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY_MASK 0x10L
+//SPI_ARB_CNTL_0
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT__SHIFT 0x0
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT__SHIFT 0x4
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT__SHIFT 0x8
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT_MASK 0x0000000FL
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT_MASK 0x000000F0L
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT_MASK 0x00000F00L
+//SPI_FEATURE_CTRL
+#define SPI_FEATURE_CTRL__TUNNELING_WAVE_LIMIT__SHIFT 0x0
+#define SPI_FEATURE_CTRL__RA_PROBE_IGNORE__SHIFT 0x4
+#define SPI_FEATURE_CTRL__PS_THROTTLE_MAX_WAVE_LIMIT__SHIFT 0x5
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_WIF_CTRL__SHIFT 0xb
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_OOO_CTRL__SHIFT 0xd
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_DISABLE__SHIFT 0xe
+#define SPI_FEATURE_CTRL__TUNNELING_WAVE_LIMIT_MASK 0x0000000FL
+#define SPI_FEATURE_CTRL__RA_PROBE_IGNORE_MASK 0x00000010L
+#define SPI_FEATURE_CTRL__PS_THROTTLE_MAX_WAVE_LIMIT_MASK 0x000007E0L
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_WIF_CTRL_MASK 0x00001800L
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_OOO_CTRL_MASK 0x00002000L
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_DISABLE_MASK 0x00004000L
+//SPI_SHADER_RSRC_LIMIT_CTRL
+#define SPI_SHADER_RSRC_LIMIT_CTRL__WAVES_PER_SIMD32__SHIFT 0x0
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_PER_SIMD32__SHIFT 0x5
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_WRAP_DISABLE__SHIFT 0xc
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT__SHIFT 0xd
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT_HIERARCHY_LEVEL__SHIFT 0x13
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT__SHIFT 0x14
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT_HIERARCHY_LEVEL__SHIFT 0x1c
+#define SPI_SHADER_RSRC_LIMIT_CTRL__PERFORMANCE_LIMIT_ENABLE__SHIFT 0x1f
+#define SPI_SHADER_RSRC_LIMIT_CTRL__WAVES_PER_SIMD32_MASK 0x0000001FL
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_PER_SIMD32_MASK 0x00000FE0L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_WRAP_DISABLE_MASK 0x00001000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT_MASK 0x0007E000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT_HIERARCHY_LEVEL_MASK 0x00080000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT_MASK 0x0FF00000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT_HIERARCHY_LEVEL_MASK 0x10000000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__PERFORMANCE_LIMIT_ENABLE_MASK 0x80000000L
+//SPI_COMPUTE_WF_CTX_SAVE_STATUS
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE0_SAVE_BUSY__SHIFT 0x0
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE1_SAVE_BUSY__SHIFT 0x1
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE2_SAVE_BUSY__SHIFT 0x2
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE3_SAVE_BUSY__SHIFT 0x3
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE4_SAVE_BUSY__SHIFT 0x4
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE5_SAVE_BUSY__SHIFT 0x5
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE6_SAVE_BUSY__SHIFT 0x6
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE7_SAVE_BUSY__SHIFT 0x7
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE0_SAVE_BUSY__SHIFT 0x8
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE1_SAVE_BUSY__SHIFT 0x9
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE2_SAVE_BUSY__SHIFT 0xa
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE3_SAVE_BUSY__SHIFT 0xb
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE4_SAVE_BUSY__SHIFT 0xc
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE5_SAVE_BUSY__SHIFT 0xd
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE6_SAVE_BUSY__SHIFT 0xe
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE7_SAVE_BUSY__SHIFT 0xf
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE0_SAVE_BUSY__SHIFT 0x10
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE1_SAVE_BUSY__SHIFT 0x11
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE2_SAVE_BUSY__SHIFT 0x12
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE3_SAVE_BUSY__SHIFT 0x13
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE4_SAVE_BUSY__SHIFT 0x14
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE5_SAVE_BUSY__SHIFT 0x15
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE6_SAVE_BUSY__SHIFT 0x16
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE7_SAVE_BUSY__SHIFT 0x17
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE0_SAVE_BUSY__SHIFT 0x18
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE1_SAVE_BUSY__SHIFT 0x19
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE2_SAVE_BUSY__SHIFT 0x1a
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE3_SAVE_BUSY__SHIFT 0x1b
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE4_SAVE_BUSY__SHIFT 0x1c
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE5_SAVE_BUSY__SHIFT 0x1d
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE6_SAVE_BUSY__SHIFT 0x1e
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE7_SAVE_BUSY__SHIFT 0x1f
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE0_SAVE_BUSY_MASK 0x00000001L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE1_SAVE_BUSY_MASK 0x00000002L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE2_SAVE_BUSY_MASK 0x00000004L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE3_SAVE_BUSY_MASK 0x00000008L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE4_SAVE_BUSY_MASK 0x00000010L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE5_SAVE_BUSY_MASK 0x00000020L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE6_SAVE_BUSY_MASK 0x00000040L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE7_SAVE_BUSY_MASK 0x00000080L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE0_SAVE_BUSY_MASK 0x00000100L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE1_SAVE_BUSY_MASK 0x00000200L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE2_SAVE_BUSY_MASK 0x00000400L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE3_SAVE_BUSY_MASK 0x00000800L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE4_SAVE_BUSY_MASK 0x00001000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE5_SAVE_BUSY_MASK 0x00002000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE6_SAVE_BUSY_MASK 0x00004000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE7_SAVE_BUSY_MASK 0x00008000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE0_SAVE_BUSY_MASK 0x00010000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE1_SAVE_BUSY_MASK 0x00020000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE2_SAVE_BUSY_MASK 0x00040000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE3_SAVE_BUSY_MASK 0x00080000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE4_SAVE_BUSY_MASK 0x00100000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE5_SAVE_BUSY_MASK 0x00200000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE6_SAVE_BUSY_MASK 0x00400000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE7_SAVE_BUSY_MASK 0x00800000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE0_SAVE_BUSY_MASK 0x01000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE1_SAVE_BUSY_MASK 0x02000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE2_SAVE_BUSY_MASK 0x04000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE3_SAVE_BUSY_MASK 0x08000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE4_SAVE_BUSY_MASK 0x10000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE5_SAVE_BUSY_MASK 0x20000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE6_SAVE_BUSY_MASK 0x40000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE7_SAVE_BUSY_MASK 0x80000000L
+
+
+// addressBlock: gc_pfonly_tcpdec
+//TCP_INVALIDATE
+#define TCP_INVALIDATE__START__SHIFT 0x0
+#define TCP_INVALIDATE__START_MASK 0x00000001L
+//TCP_STATUS
+#define TCP_STATUS__TCP_BUSY__SHIFT 0x0
+#define TCP_STATUS__INPUT_BUSY__SHIFT 0x1
+#define TCP_STATUS__ADRS_BUSY__SHIFT 0x2
+#define TCP_STATUS__TAGRAMS_BUSY__SHIFT 0x3
+#define TCP_STATUS__CNTRL_BUSY__SHIFT 0x4
+#define TCP_STATUS__LFIFO_BUSY__SHIFT 0x5
+#define TCP_STATUS__READ_BUSY__SHIFT 0x6
+#define TCP_STATUS__FORMAT_BUSY__SHIFT 0x7
+#define TCP_STATUS__VM_BUSY__SHIFT 0x8
+#define TCP_STATUS__MEMIF_BUSY__SHIFT 0x9
+#define TCP_STATUS__GCR_BUSY__SHIFT 0xa
+#define TCP_STATUS__OFIFO_BUSY__SHIFT 0xb
+#define TCP_STATUS__OFIFO_QUEUE_BUSY__SHIFT 0xc
+#define TCP_STATUS__XNACK_PRT__SHIFT 0xf
+#define TCP_STATUS__TCP_BUSY_MASK 0x00000001L
+#define TCP_STATUS__INPUT_BUSY_MASK 0x00000002L
+#define TCP_STATUS__ADRS_BUSY_MASK 0x00000004L
+#define TCP_STATUS__TAGRAMS_BUSY_MASK 0x00000008L
+#define TCP_STATUS__CNTRL_BUSY_MASK 0x00000010L
+#define TCP_STATUS__LFIFO_BUSY_MASK 0x00000020L
+#define TCP_STATUS__READ_BUSY_MASK 0x00000040L
+#define TCP_STATUS__FORMAT_BUSY_MASK 0x00000080L
+#define TCP_STATUS__VM_BUSY_MASK 0x00000100L
+#define TCP_STATUS__MEMIF_BUSY_MASK 0x00000200L
+#define TCP_STATUS__GCR_BUSY_MASK 0x00000400L
+#define TCP_STATUS__OFIFO_BUSY_MASK 0x00000800L
+#define TCP_STATUS__OFIFO_QUEUE_BUSY_MASK 0x00003000L
+#define TCP_STATUS__XNACK_PRT_MASK 0x00008000L
+//TCP_CNTL
+#define TCP_CNTL__FORCE_HIT__SHIFT 0x0
+#define TCP_CNTL__FORCE_MISS__SHIFT 0x1
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x5
+#define TCP_CNTL__TD_DATA_EN_OVERRIDE__SHIFT 0x6
+#define TCP_CNTL__ENABLE_128B_DCC_COMP_READ_FOR_INDEP64__SHIFT 0x7
+#define TCP_CNTL__DISABLE_WRITE_COMBINING__SHIFT 0x9
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0xf
+#define TCP_CNTL__FORCE_EOW_SET_CNT__SHIFT 0x16
+#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x1c
+#define TCP_CNTL__FORCE_ORDER_BETWEEN_READ_WRITE_TO_SAME_ADDRESS__SHIFT 0x1d
+#define TCP_CNTL__ASTC_VE_MSB_TOLERANT__SHIFT 0x1f
+#define TCP_CNTL__FORCE_HIT_MASK 0x00000001L
+#define TCP_CNTL__FORCE_MISS_MASK 0x00000002L
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x00000020L
+#define TCP_CNTL__TD_DATA_EN_OVERRIDE_MASK 0x00000040L
+#define TCP_CNTL__ENABLE_128B_DCC_COMP_READ_FOR_INDEP64_MASK 0x00000080L
+#define TCP_CNTL__DISABLE_WRITE_COMBINING_MASK 0x00000200L
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x001F8000L
+#define TCP_CNTL__FORCE_EOW_SET_CNT_MASK 0x07C00000L
+#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000L
+#define TCP_CNTL__FORCE_ORDER_BETWEEN_READ_WRITE_TO_SAME_ADDRESS_MASK 0x20000000L
+#define TCP_CNTL__ASTC_VE_MSB_TOLERANT_MASK 0x80000000L
+//TCP_CNTL2
+#define TCP_CNTL2__LS_DISABLE_CLOCKS__SHIFT 0x0
+#define TCP_CNTL2__TCP_FMT_MGCG_DISABLE__SHIFT 0x8
+#define TCP_CNTL2__TCPF_LATENCY_BYPASS_DISABLE__SHIFT 0x9
+#define TCP_CNTL2__TCP_WRITE_DATA_MGCG_DISABLE__SHIFT 0xa
+#define TCP_CNTL2__TCP_INNER_BLOCK_MGCG_DISABLE__SHIFT 0xb
+#define TCP_CNTL2__TCP_ADRS_IMG_CALC_MGCG_DISABLE__SHIFT 0xc
+#define TCP_CNTL2__V64_COMBINE_ENABLE__SHIFT 0xd
+#define TCP_CNTL2__TAGRAM_ADDR_SWIZZLE_DISABLE__SHIFT 0xe
+#define TCP_CNTL2__RETURN_ORDER_OVERRIDE__SHIFT 0xf
+#define TCP_CNTL2__POWER_OPT_DISABLE__SHIFT 0x10
+#define TCP_CNTL2__GCR_RSP_FGCG_DISABLE__SHIFT 0x11
+#define TCP_CNTL2__PERF_EN_OVERRIDE__SHIFT 0x12
+#define TCP_CNTL2__TC_TD_RAM_CLKEN_DISABLE__SHIFT 0x14
+#define TCP_CNTL2__TC_TD_DATA_CLKEN_DISABLE__SHIFT 0x15
+#define TCP_CNTL2__TCP_GL1_REQ_CLKEN_DISABLE__SHIFT 0x16
+#define TCP_CNTL2__TCP_GL1R_SRC_CLKEN_DISABLE__SHIFT 0x17
+#define TCP_CNTL2__SPARE_BIT__SHIFT 0x1a
+#define TCP_CNTL2__TAGRAM_XY_BIAS_OVERRIDE__SHIFT 0x1b
+#define TCP_CNTL2__TCP_REQ_MGCG_DISABLE__SHIFT 0x1d
+#define TCP_CNTL2__TCP_MISS_MGCG_DISABLE__SHIFT 0x1e
+#define TCP_CNTL2__DISABLE_MIPMAP_PARAM_CALC_SELF_GATING__SHIFT 0x1f
+#define TCP_CNTL2__LS_DISABLE_CLOCKS_MASK 0x000000FFL
+#define TCP_CNTL2__TCP_FMT_MGCG_DISABLE_MASK 0x00000100L
+#define TCP_CNTL2__TCPF_LATENCY_BYPASS_DISABLE_MASK 0x00000200L
+#define TCP_CNTL2__TCP_WRITE_DATA_MGCG_DISABLE_MASK 0x00000400L
+#define TCP_CNTL2__TCP_INNER_BLOCK_MGCG_DISABLE_MASK 0x00000800L
+#define TCP_CNTL2__TCP_ADRS_IMG_CALC_MGCG_DISABLE_MASK 0x00001000L
+#define TCP_CNTL2__V64_COMBINE_ENABLE_MASK 0x00002000L
+#define TCP_CNTL2__TAGRAM_ADDR_SWIZZLE_DISABLE_MASK 0x00004000L
+#define TCP_CNTL2__RETURN_ORDER_OVERRIDE_MASK 0x00008000L
+#define TCP_CNTL2__POWER_OPT_DISABLE_MASK 0x00010000L
+#define TCP_CNTL2__GCR_RSP_FGCG_DISABLE_MASK 0x00020000L
+#define TCP_CNTL2__PERF_EN_OVERRIDE_MASK 0x000C0000L
+#define TCP_CNTL2__TC_TD_RAM_CLKEN_DISABLE_MASK 0x00100000L
+#define TCP_CNTL2__TC_TD_DATA_CLKEN_DISABLE_MASK 0x00200000L
+#define TCP_CNTL2__TCP_GL1_REQ_CLKEN_DISABLE_MASK 0x00400000L
+#define TCP_CNTL2__TCP_GL1R_SRC_CLKEN_DISABLE_MASK 0x00800000L
+#define TCP_CNTL2__SPARE_BIT_MASK 0x04000000L
+#define TCP_CNTL2__TAGRAM_XY_BIAS_OVERRIDE_MASK 0x18000000L
+#define TCP_CNTL2__TCP_REQ_MGCG_DISABLE_MASK 0x20000000L
+#define TCP_CNTL2__TCP_MISS_MGCG_DISABLE_MASK 0x40000000L
+#define TCP_CNTL2__DISABLE_MIPMAP_PARAM_CALC_SELF_GATING_MASK 0x80000000L
+//TCP_CREDIT
+#define TCP_CREDIT__LFIFO_RAM_DEPTH__SHIFT 0x0
+#define TCP_CREDIT__GL1_REQ_CREDIT__SHIFT 0xa
+#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
+#define TCP_CREDIT__TD_RAM_CREDIT__SHIFT 0x17
+#define TCP_CREDIT__TD_DATA_CREDIT__SHIFT 0x1d
+#define TCP_CREDIT__LFIFO_RAM_DEPTH_MASK 0x000003FFL
+#define TCP_CREDIT__GL1_REQ_CREDIT_MASK 0x0000FC00L
+#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x007F0000L
+#define TCP_CREDIT__TD_RAM_CREDIT_MASK 0x0F800000L
+#define TCP_CREDIT__TD_DATA_CREDIT_MASK 0xE0000000L
+
+
+// addressBlock: gc_pfonly_gdsdec
+//GDS_ENHANCE2
+#define GDS_ENHANCE2__DISABLE_MEMORY_VIOLATION_REPORT__SHIFT 0x0
+#define GDS_ENHANCE2__GDS_INTERFACES_FGCG_OVERRIDE__SHIFT 0x1
+#define GDS_ENHANCE2__DISABLE_PIPE_MEMORY_RD_OPT__SHIFT 0x2
+#define GDS_ENHANCE2__UNUSED__SHIFT 0x3
+#define GDS_ENHANCE2__DISABLE_MEMORY_VIOLATION_REPORT_MASK 0x00000001L
+#define GDS_ENHANCE2__GDS_INTERFACES_FGCG_OVERRIDE_MASK 0x00000002L
+#define GDS_ENHANCE2__DISABLE_PIPE_MEMORY_RD_OPT_MASK 0x00000004L
+#define GDS_ENHANCE2__UNUSED_MASK 0xFFFFFFF8L
+//GDS_OA_CGPG_RESTORE
+#define GDS_OA_CGPG_RESTORE__VMID__SHIFT 0x0
+#define GDS_OA_CGPG_RESTORE__MEID__SHIFT 0x8
+#define GDS_OA_CGPG_RESTORE__PIPEID__SHIFT 0xc
+#define GDS_OA_CGPG_RESTORE__QUEUEID__SHIFT 0x10
+#define GDS_OA_CGPG_RESTORE__UNUSED__SHIFT 0x14
+#define GDS_OA_CGPG_RESTORE__VMID_MASK 0x000000FFL
+#define GDS_OA_CGPG_RESTORE__MEID_MASK 0x00000F00L
+#define GDS_OA_CGPG_RESTORE__PIPEID_MASK 0x0000F000L
+#define GDS_OA_CGPG_RESTORE__QUEUEID_MASK 0x000F0000L
+#define GDS_OA_CGPG_RESTORE__UNUSED_MASK 0xFFF00000L
+
+
+// addressBlock: gc_pfonly_utcl1dec
+//UTCL1_CTRL_0
+#define UTCL1_CTRL_0__UTCL1_L0_REQ_VFIFO_DISABLE__SHIFT 0x0
+#define UTCL1_CTRL_0__UTCL1_UTCL2_INVACK_CDC_FIFO_DISABLE__SHIFT 0x1
+#define UTCL1_CTRL_0__RESERVED_0__SHIFT 0x2
+#define UTCL1_CTRL_0__UTCL1_UTCL2_REQ_CREDITS__SHIFT 0x3
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_CREDITS__SHIFT 0x9
+#define UTCL1_CTRL_0__UTCL1_LIMIT_INV_TO_ONE__SHIFT 0xd
+#define UTCL1_CTRL_0__UTCL1_LIMIT_XLAT_TO_ONE__SHIFT 0xe
+#define UTCL1_CTRL_0__UTCL1_UTCL2_FGCG_REPEATERS_OVERRIDE__SHIFT 0xf
+#define UTCL1_CTRL_0__UTCL1_INV_FILTER_VMID__SHIFT 0x10
+#define UTCL1_CTRL_0__UTCL1_RANGE_INV_FORCE_CHK_ALL__SHIFT 0x11
+#define UTCL1_CTRL_0__UTCL1_UTCL0_RET_FGCG_REPEATERS_OVERRIDE__SHIFT 0x12
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_FGCG_REPEATERS_OVERRIDE__SHIFT 0x13
+#define UTCL1_CTRL_0__GCRD_FGCG_DISABLE__SHIFT 0x14
+#define UTCL1_CTRL_0__UTCL1_MH_RANGE_INV_TO_VMID_OVERRIDE__SHIFT 0x15
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_DUPLICATES__SHIFT 0x16
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_REQUEST_SQUASHING__SHIFT 0x17
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_RECENT_BUFFER__SHIFT 0x18
+#define UTCL1_CTRL_0__UTCL1_XLAT_FAULT_LOCK_CTRL__SHIFT 0x19
+#define UTCL1_CTRL_0__UTCL1_REDUCE_CC_SIZE__SHIFT 0x1b
+#define UTCL1_CTRL_0__RESERVED_1__SHIFT 0x1d
+#define UTCL1_CTRL_0__MH_SPARE0__SHIFT 0x1e
+#define UTCL1_CTRL_0__RESERVED_2__SHIFT 0x1f
+#define UTCL1_CTRL_0__UTCL1_L0_REQ_VFIFO_DISABLE_MASK 0x00000001L
+#define UTCL1_CTRL_0__UTCL1_UTCL2_INVACK_CDC_FIFO_DISABLE_MASK 0x00000002L
+#define UTCL1_CTRL_0__RESERVED_0_MASK 0x00000004L
+#define UTCL1_CTRL_0__UTCL1_UTCL2_REQ_CREDITS_MASK 0x000001F8L
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_CREDITS_MASK 0x00001E00L
+#define UTCL1_CTRL_0__UTCL1_LIMIT_INV_TO_ONE_MASK 0x00002000L
+#define UTCL1_CTRL_0__UTCL1_LIMIT_XLAT_TO_ONE_MASK 0x00004000L
+#define UTCL1_CTRL_0__UTCL1_UTCL2_FGCG_REPEATERS_OVERRIDE_MASK 0x00008000L
+#define UTCL1_CTRL_0__UTCL1_INV_FILTER_VMID_MASK 0x00010000L
+#define UTCL1_CTRL_0__UTCL1_RANGE_INV_FORCE_CHK_ALL_MASK 0x00020000L
+#define UTCL1_CTRL_0__UTCL1_UTCL0_RET_FGCG_REPEATERS_OVERRIDE_MASK 0x00040000L
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_FGCG_REPEATERS_OVERRIDE_MASK 0x00080000L
+#define UTCL1_CTRL_0__GCRD_FGCG_DISABLE_MASK 0x00100000L
+#define UTCL1_CTRL_0__UTCL1_MH_RANGE_INV_TO_VMID_OVERRIDE_MASK 0x00200000L
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_DUPLICATES_MASK 0x00400000L
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_REQUEST_SQUASHING_MASK 0x00800000L
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_RECENT_BUFFER_MASK 0x01000000L
+#define UTCL1_CTRL_0__UTCL1_XLAT_FAULT_LOCK_CTRL_MASK 0x06000000L
+#define UTCL1_CTRL_0__UTCL1_REDUCE_CC_SIZE_MASK 0x18000000L
+#define UTCL1_CTRL_0__RESERVED_1_MASK 0x20000000L
+#define UTCL1_CTRL_0__MH_SPARE0_MASK 0x40000000L
+#define UTCL1_CTRL_0__RESERVED_2_MASK 0x80000000L
+//UTCL1_UTCL0_INVREQ_DISABLE
+#define UTCL1_UTCL0_INVREQ_DISABLE__UTCL1_UTCL0_INVREQ_DISABLE__SHIFT 0x0
+#define UTCL1_UTCL0_INVREQ_DISABLE__UTCL1_UTCL0_INVREQ_DISABLE_MASK 0xFFFFFFFFL
+//UTCL1_CTRL_2
+#define UTCL1_CTRL_2__UTCL1_RNG_TO_VMID_INV_OVRD__SHIFT 0x0
+#define UTCL1_CTRL_2__UTCL1_PMM_INTERRUPT_CREDITS_OVERRIDE__SHIFT 0x4
+#define UTCL1_CTRL_2__UTCL1_CACHE_WRITE_PERM__SHIFT 0xa
+#define UTCL1_CTRL_2__UTCL1_PAGE_OVRD_DISABLE__SHIFT 0xb
+#define UTCL1_CTRL_2__UTCL1_SPARE0__SHIFT 0xc
+#define UTCL1_CTRL_2__UTCL1_SPARE1__SHIFT 0xd
+#define UTCL1_CTRL_2__RESERVED__SHIFT 0xe
+#define UTCL1_CTRL_2__UTCL1_RNG_TO_VMID_INV_OVRD_MASK 0x0000000FL
+#define UTCL1_CTRL_2__UTCL1_PMM_INTERRUPT_CREDITS_OVERRIDE_MASK 0x000003F0L
+#define UTCL1_CTRL_2__UTCL1_CACHE_WRITE_PERM_MASK 0x00000400L
+#define UTCL1_CTRL_2__UTCL1_PAGE_OVRD_DISABLE_MASK 0x00000800L
+#define UTCL1_CTRL_2__UTCL1_SPARE0_MASK 0x00001000L
+#define UTCL1_CTRL_2__UTCL1_SPARE1_MASK 0x00002000L
+#define UTCL1_CTRL_2__RESERVED_MASK 0xFFFFC000L
+//UTCL1_FIFO_SIZING
+#define UTCL1_FIFO_SIZING__UTCL1_UTCL2_INVACK_CDC_FIFO_THRESH__SHIFT 0x0
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_LOW__SHIFT 0x3
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_HIGH__SHIFT 0x10
+#define UTCL1_FIFO_SIZING__UTCL1_UTCL2_INVACK_CDC_FIFO_THRESH_MASK 0x00000007L
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_LOW_MASK 0x0000FFF8L
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_HIGH_MASK 0xFFFF0000L
+//GCRD_SA0_TARGETS_DISABLE
+#define GCRD_SA0_TARGETS_DISABLE__GCRD_SA0_TARGETS_DISABLE__SHIFT 0x0
+#define GCRD_SA0_TARGETS_DISABLE__GCRD_SA0_TARGETS_DISABLE_MASK 0x0007FFFFL
+//GCRD_SA1_TARGETS_DISABLE
+#define GCRD_SA1_TARGETS_DISABLE__GCRD_SA1_TARGETS_DISABLE__SHIFT 0x0
+#define GCRD_SA1_TARGETS_DISABLE__GCRD_SA1_TARGETS_DISABLE_MASK 0x0007FFFFL
+//GCRD_CREDIT_SAFE
+#define GCRD_CREDIT_SAFE__GCRD_CHAIN_CREDIT_SAFE_REG__SHIFT 0x0
+#define GCRD_CREDIT_SAFE__GCRD_TARGET_CREDIT_SAFE_REG__SHIFT 0x4
+#define GCRD_CREDIT_SAFE__GCRD_CHAIN_CREDIT_SAFE_REG_MASK 0x00000007L
+#define GCRD_CREDIT_SAFE__GCRD_TARGET_CREDIT_SAFE_REG_MASK 0x00000070L
+
+
+// addressBlock: gc_pfonly_pmmdec
+//GCR_GENERAL_CNTL
+#define GCR_GENERAL_CNTL__FORCE_4K_L2_RESP__SHIFT 0x0
+#define GCR_GENERAL_CNTL__REDUCE_HALF_MAIN_WQ__SHIFT 0x1
+#define GCR_GENERAL_CNTL__REDUCE_HALF_PHY_WQ__SHIFT 0x2
+#define GCR_GENERAL_CNTL__FORCE_INV_ALL__SHIFT 0x3
+#define GCR_GENERAL_CNTL__HI_PRIORITY_CNTL__SHIFT 0x4
+#define GCR_GENERAL_CNTL__HI_PRIORITY_DISABLE__SHIFT 0x6
+#define GCR_GENERAL_CNTL__BIG_PAGE_FILTER_DISABLE__SHIFT 0x7
+#define GCR_GENERAL_CNTL__PERF_CNTR_ENABLE__SHIFT 0x8
+#define GCR_GENERAL_CNTL__FORCE_SINGLE_WQ__SHIFT 0x9
+#define GCR_GENERAL_CNTL__UTCL2_REQ_PERM__SHIFT 0xa
+#define GCR_GENERAL_CNTL__TARGET_MGCG_CLKEN_DIS__SHIFT 0xd
+#define GCR_GENERAL_CNTL__MIXED_RANGE_MODE_DIS__SHIFT 0xe
+#define GCR_GENERAL_CNTL__ENABLE_16K_UTCL2_REQ__SHIFT 0xf
+#define GCR_GENERAL_CNTL__DISABLE_FGCG__SHIFT 0x10
+#define GCR_GENERAL_CNTL__CLIENT_ID__SHIFT 0x14
+#define GCR_GENERAL_CNTL__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define GCR_GENERAL_CNTL__REDUCE_HALF_MAIN_WQ_MASK 0x00000002L
+#define GCR_GENERAL_CNTL__REDUCE_HALF_PHY_WQ_MASK 0x00000004L
+#define GCR_GENERAL_CNTL__FORCE_INV_ALL_MASK 0x00000008L
+#define GCR_GENERAL_CNTL__HI_PRIORITY_CNTL_MASK 0x00000030L
+#define GCR_GENERAL_CNTL__HI_PRIORITY_DISABLE_MASK 0x00000040L
+#define GCR_GENERAL_CNTL__BIG_PAGE_FILTER_DISABLE_MASK 0x00000080L
+#define GCR_GENERAL_CNTL__PERF_CNTR_ENABLE_MASK 0x00000100L
+#define GCR_GENERAL_CNTL__FORCE_SINGLE_WQ_MASK 0x00000200L
+#define GCR_GENERAL_CNTL__UTCL2_REQ_PERM_MASK 0x00001C00L
+#define GCR_GENERAL_CNTL__TARGET_MGCG_CLKEN_DIS_MASK 0x00002000L
+#define GCR_GENERAL_CNTL__MIXED_RANGE_MODE_DIS_MASK 0x00004000L
+#define GCR_GENERAL_CNTL__ENABLE_16K_UTCL2_REQ_MASK 0x00008000L
+#define GCR_GENERAL_CNTL__DISABLE_FGCG_MASK 0x00010000L
+#define GCR_GENERAL_CNTL__CLIENT_ID_MASK 0x1FF00000L
+//GCR_TARGET_DISABLE
+#define GCR_TARGET_DISABLE__DISABLE_SE0_PHY__SHIFT 0x0
+#define GCR_TARGET_DISABLE__DISABLE_SE0_VIRT__SHIFT 0x1
+#define GCR_TARGET_DISABLE__DISABLE_SE1_PHY__SHIFT 0x2
+#define GCR_TARGET_DISABLE__DISABLE_SE1_VIRT__SHIFT 0x3
+#define GCR_TARGET_DISABLE__DISABLE_SE2_PHY__SHIFT 0x4
+#define GCR_TARGET_DISABLE__DISABLE_SE2_VIRT__SHIFT 0x5
+#define GCR_TARGET_DISABLE__DISABLE_GL2A0_PHY__SHIFT 0x6
+#define GCR_TARGET_DISABLE__DISABLE_GL2A1_PHY__SHIFT 0x7
+#define GCR_TARGET_DISABLE__DISABLE_GL2A2_PHY__SHIFT 0x8
+#define GCR_TARGET_DISABLE__DISABLE_GL2A3_PHY__SHIFT 0x9
+#define GCR_TARGET_DISABLE__DISABLE_SE3_PHY__SHIFT 0xa
+#define GCR_TARGET_DISABLE__DISABLE_SE3_VIRT__SHIFT 0xb
+#define GCR_TARGET_DISABLE__DISABLE_SE4_PHY__SHIFT 0xc
+#define GCR_TARGET_DISABLE__DISABLE_SE4_VIRT__SHIFT 0xd
+#define GCR_TARGET_DISABLE__DISABLE_SE5_PHY__SHIFT 0xe
+#define GCR_TARGET_DISABLE__DISABLE_SE5_VIRT__SHIFT 0xf
+#define GCR_TARGET_DISABLE__SE0_INACTIVE_STATUS__SHIFT 0x10
+#define GCR_TARGET_DISABLE__SE1_INACTIVE_STATUS__SHIFT 0x11
+#define GCR_TARGET_DISABLE__SE2_INACTIVE_STATUS__SHIFT 0x12
+#define GCR_TARGET_DISABLE__SE3_INACTIVE_STATUS__SHIFT 0x13
+#define GCR_TARGET_DISABLE__SE4_INACTIVE_STATUS__SHIFT 0x14
+#define GCR_TARGET_DISABLE__SE5_INACTIVE_STATUS__SHIFT 0x15
+#define GCR_TARGET_DISABLE__DISABLE_SE0_PHY_MASK 0x00000001L
+#define GCR_TARGET_DISABLE__DISABLE_SE0_VIRT_MASK 0x00000002L
+#define GCR_TARGET_DISABLE__DISABLE_SE1_PHY_MASK 0x00000004L
+#define GCR_TARGET_DISABLE__DISABLE_SE1_VIRT_MASK 0x00000008L
+#define GCR_TARGET_DISABLE__DISABLE_SE2_PHY_MASK 0x00000010L
+#define GCR_TARGET_DISABLE__DISABLE_SE2_VIRT_MASK 0x00000020L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A0_PHY_MASK 0x00000040L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A1_PHY_MASK 0x00000080L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A2_PHY_MASK 0x00000100L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A3_PHY_MASK 0x00000200L
+#define GCR_TARGET_DISABLE__DISABLE_SE3_PHY_MASK 0x00000400L
+#define GCR_TARGET_DISABLE__DISABLE_SE3_VIRT_MASK 0x00000800L
+#define GCR_TARGET_DISABLE__DISABLE_SE4_PHY_MASK 0x00001000L
+#define GCR_TARGET_DISABLE__DISABLE_SE4_VIRT_MASK 0x00002000L
+#define GCR_TARGET_DISABLE__DISABLE_SE5_PHY_MASK 0x00004000L
+#define GCR_TARGET_DISABLE__DISABLE_SE5_VIRT_MASK 0x00008000L
+#define GCR_TARGET_DISABLE__SE0_INACTIVE_STATUS_MASK 0x00010000L
+#define GCR_TARGET_DISABLE__SE1_INACTIVE_STATUS_MASK 0x00020000L
+#define GCR_TARGET_DISABLE__SE2_INACTIVE_STATUS_MASK 0x00040000L
+#define GCR_TARGET_DISABLE__SE3_INACTIVE_STATUS_MASK 0x00080000L
+#define GCR_TARGET_DISABLE__SE4_INACTIVE_STATUS_MASK 0x00100000L
+#define GCR_TARGET_DISABLE__SE5_INACTIVE_STATUS_MASK 0x00200000L
+//GCR_CMD_STATUS
+#define GCR_CMD_STATUS__GCR_CONTROL__SHIFT 0x0
+#define GCR_CMD_STATUS__GCR_SRC__SHIFT 0x13
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN__SHIFT 0x17
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN_VMID__SHIFT 0x18
+#define GCR_CMD_STATUS__UTCL2_NACK_STATUS__SHIFT 0x1c
+#define GCR_CMD_STATUS__GCR_SEQ_OP_ERROR__SHIFT 0x1e
+#define GCR_CMD_STATUS__UTCL2_NACK_ERROR__SHIFT 0x1f
+#define GCR_CMD_STATUS__GCR_CONTROL_MASK 0x0007FFFFL
+#define GCR_CMD_STATUS__GCR_SRC_MASK 0x00380000L
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN_MASK 0x00800000L
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN_VMID_MASK 0x0F000000L
+#define GCR_CMD_STATUS__UTCL2_NACK_STATUS_MASK 0x30000000L
+#define GCR_CMD_STATUS__GCR_SEQ_OP_ERROR_MASK 0x40000000L
+#define GCR_CMD_STATUS__UTCL2_NACK_ERROR_MASK 0x80000000L
+//GCR_SPARE
+#define GCR_SPARE__SPARE_BIT_1__SHIFT 0x1
+#define GCR_SPARE__SPARE_BIT_2__SHIFT 0x2
+#define GCR_SPARE__SPARE_BIT_3__SHIFT 0x3
+#define GCR_SPARE__SPARE_BIT_4__SHIFT 0x4
+#define GCR_SPARE__SPARE_BIT_5__SHIFT 0x5
+#define GCR_SPARE__SPARE_BIT_6__SHIFT 0x6
+#define GCR_SPARE__SPARE_BIT_7__SHIFT 0x7
+#define GCR_SPARE__UTCL2_REQ_CREDIT__SHIFT 0x8
+#define GCR_SPARE__GCRD_GL2A_REQ_CREDIT__SHIFT 0x10
+#define GCR_SPARE__GCRD_SE_REQ_CREDIT__SHIFT 0x14
+#define GCR_SPARE__SPARE_BIT_31_24__SHIFT 0x18
+#define GCR_SPARE__SPARE_BIT_1_MASK 0x00000002L
+#define GCR_SPARE__SPARE_BIT_2_MASK 0x00000004L
+#define GCR_SPARE__SPARE_BIT_3_MASK 0x00000008L
+#define GCR_SPARE__SPARE_BIT_4_MASK 0x00000010L
+#define GCR_SPARE__SPARE_BIT_5_MASK 0x00000020L
+#define GCR_SPARE__SPARE_BIT_6_MASK 0x00000040L
+#define GCR_SPARE__SPARE_BIT_7_MASK 0x00000080L
+#define GCR_SPARE__UTCL2_REQ_CREDIT_MASK 0x0000FF00L
+#define GCR_SPARE__GCRD_GL2A_REQ_CREDIT_MASK 0x000F0000L
+#define GCR_SPARE__GCRD_SE_REQ_CREDIT_MASK 0x00F00000L
+#define GCR_SPARE__SPARE_BIT_31_24_MASK 0xFF000000L
+//PMM_CNTL2
+#define PMM_CNTL2__GCEA_MAM_DISABLE__SHIFT 0x0
+#define PMM_CNTL2__ABIT_FORCE_FLUSH_OVERRIDE__SHIFT 0x18
+#define PMM_CNTL2__ABIT_TIMER_FLUSH_OVERRIDE__SHIFT 0x19
+#define PMM_CNTL2__PMM_IH_INTERRUPT_CREDITS_OVERRIDE__SHIFT 0x1a
+#define PMM_CNTL2__ABIT_INTR_ON_FLUSH_DONE__SHIFT 0x1e
+#define PMM_CNTL2__RESERVED__SHIFT 0x1f
+#define PMM_CNTL2__GCEA_MAM_DISABLE_MASK 0x00FFFFFFL
+#define PMM_CNTL2__ABIT_FORCE_FLUSH_OVERRIDE_MASK 0x01000000L
+#define PMM_CNTL2__ABIT_TIMER_FLUSH_OVERRIDE_MASK 0x02000000L
+#define PMM_CNTL2__PMM_IH_INTERRUPT_CREDITS_OVERRIDE_MASK 0x3C000000L
+#define PMM_CNTL2__ABIT_INTR_ON_FLUSH_DONE_MASK 0x40000000L
+#define PMM_CNTL2__RESERVED_MASK 0x80000000L
+
+
+// addressBlock: gc_pfonly_gccacdec
+//GC_CAC_CTRL_1
+#define GC_CAC_CTRL_1__CAC_WINDOW__SHIFT 0x0
+#define GC_CAC_CTRL_1__TDP_WINDOW__SHIFT 0x8
+#define GC_CAC_CTRL_1__CAC_WINDOW_MASK 0x000000FFL
+#define GC_CAC_CTRL_1__TDP_WINDOW_MASK 0xFFFFFF00L
+//GC_CAC_CTRL_2
+#define GC_CAC_CTRL_2__CAC_ENABLE__SHIFT 0x0
+#define GC_CAC_CTRL_2__GC_LCAC_ENABLE__SHIFT 0x1
+#define GC_CAC_CTRL_2__GC_CAC_INDEX_AUTO_INCR_EN__SHIFT 0x2
+#define GC_CAC_CTRL_2__TOGGLE_EN__SHIFT 0x3
+#define GC_CAC_CTRL_2__INTR_EN__SHIFT 0x4
+#define GC_CAC_CTRL_2__CAC_COUNTER_SNAP_SEL__SHIFT 0x5
+#define GC_CAC_CTRL_2__SE_AGGR_ACC_EN__SHIFT 0x6
+#define GC_CAC_CTRL_2__GC_AGGR_ACC_EN__SHIFT 0xe
+#define GC_CAC_CTRL_2__CAC_ENABLE_MASK 0x00000001L
+#define GC_CAC_CTRL_2__GC_LCAC_ENABLE_MASK 0x00000002L
+#define GC_CAC_CTRL_2__GC_CAC_INDEX_AUTO_INCR_EN_MASK 0x00000004L
+#define GC_CAC_CTRL_2__TOGGLE_EN_MASK 0x00000008L
+#define GC_CAC_CTRL_2__INTR_EN_MASK 0x00000010L
+#define GC_CAC_CTRL_2__CAC_COUNTER_SNAP_SEL_MASK 0x00000020L
+#define GC_CAC_CTRL_2__SE_AGGR_ACC_EN_MASK 0x00003FC0L
+#define GC_CAC_CTRL_2__GC_AGGR_ACC_EN_MASK 0x00004000L
+//GC_CAC_AGGR_LOWER
+#define GC_CAC_AGGR_LOWER__GC_AGGR_31_0__SHIFT 0x0
+#define GC_CAC_AGGR_LOWER__GC_AGGR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_AGGR_UPPER
+#define GC_CAC_AGGR_UPPER__GC_AGGR_63_32__SHIFT 0x0
+#define GC_CAC_AGGR_UPPER__GC_AGGR_63_32_MASK 0xFFFFFFFFL
+//SE0_CAC_AGGR_LOWER
+#define SE0_CAC_AGGR_LOWER__SE0_AGGR_31_0__SHIFT 0x0
+#define SE0_CAC_AGGR_LOWER__SE0_AGGR_31_0_MASK 0xFFFFFFFFL
+//SE0_CAC_AGGR_UPPER
+#define SE0_CAC_AGGR_UPPER__SE0_AGGR_63_32__SHIFT 0x0
+#define SE0_CAC_AGGR_UPPER__SE0_AGGR_63_32_MASK 0xFFFFFFFFL
+//SE1_CAC_AGGR_LOWER
+#define SE1_CAC_AGGR_LOWER__SE1_AGGR_31_0__SHIFT 0x0
+#define SE1_CAC_AGGR_LOWER__SE1_AGGR_31_0_MASK 0xFFFFFFFFL
+//SE1_CAC_AGGR_UPPER
+#define SE1_CAC_AGGR_UPPER__SE1_AGGR_63_32__SHIFT 0x0
+#define SE1_CAC_AGGR_UPPER__SE1_AGGR_63_32_MASK 0xFFFFFFFFL
+//SE2_CAC_AGGR_LOWER
+#define SE2_CAC_AGGR_LOWER__SE2_AGGR_31_0__SHIFT 0x0
+#define SE2_CAC_AGGR_LOWER__SE2_AGGR_31_0_MASK 0xFFFFFFFFL
+//SE2_CAC_AGGR_UPPER
+#define SE2_CAC_AGGR_UPPER__SE2_AGGR_63_32__SHIFT 0x0
+#define SE2_CAC_AGGR_UPPER__SE2_AGGR_63_32_MASK 0xFFFFFFFFL
+//GC_CAC_AGGR_GFXCLK_CYCLE
+#define GC_CAC_AGGR_GFXCLK_CYCLE__GC_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define GC_CAC_AGGR_GFXCLK_CYCLE__GC_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//SE0_CAC_AGGR_GFXCLK_CYCLE
+#define SE0_CAC_AGGR_GFXCLK_CYCLE__SE0_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define SE0_CAC_AGGR_GFXCLK_CYCLE__SE0_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//SE1_CAC_AGGR_GFXCLK_CYCLE
+#define SE1_CAC_AGGR_GFXCLK_CYCLE__SE1_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define SE1_CAC_AGGR_GFXCLK_CYCLE__SE1_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//SE2_CAC_AGGR_GFXCLK_CYCLE
+#define SE2_CAC_AGGR_GFXCLK_CYCLE__SE2_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define SE2_CAC_AGGR_GFXCLK_CYCLE__SE2_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//GC_EDC_CTRL
+#define GC_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define GC_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0xa
+#define GC_EDC_CTRL__EDC_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0xb
+#define GC_EDC_CTRL__EDC_LEVEL_SEL__SHIFT 0xf
+#define GC_EDC_CTRL__EDC_ALGORITHM_MODE__SHIFT 0x10
+#define GC_EDC_CTRL__EDC_AVGDIV__SHIFT 0x11
+#define GC_EDC_CTRL__PSM_THROTTLE_SRC_SEL__SHIFT 0x15
+#define GC_EDC_CTRL__THROTTLE_SRC0_MASK__SHIFT 0x18
+#define GC_EDC_CTRL__THROTTLE_SRC1_MASK__SHIFT 0x19
+#define GC_EDC_CTRL__THROTTLE_SRC2_MASK__SHIFT 0x1a
+#define GC_EDC_CTRL__THROTTLE_SRC3_MASK__SHIFT 0x1b
+#define GC_EDC_CTRL__EDC_CREDIT_SHIFT_BIT_NUMS__SHIFT 0x1c
+#define GC_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define GC_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define GC_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000003F0L
+#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00000400L
+#define GC_EDC_CTRL__EDC_THROTTLE_PATTERN_BIT_NUMS_MASK 0x00007800L
+#define GC_EDC_CTRL__EDC_LEVEL_SEL_MASK 0x00008000L
+#define GC_EDC_CTRL__EDC_ALGORITHM_MODE_MASK 0x00010000L
+#define GC_EDC_CTRL__EDC_AVGDIV_MASK 0x001E0000L
+#define GC_EDC_CTRL__PSM_THROTTLE_SRC_SEL_MASK 0x00E00000L
+#define GC_EDC_CTRL__THROTTLE_SRC0_MASK_MASK 0x01000000L
+#define GC_EDC_CTRL__THROTTLE_SRC1_MASK_MASK 0x02000000L
+#define GC_EDC_CTRL__THROTTLE_SRC2_MASK_MASK 0x04000000L
+#define GC_EDC_CTRL__THROTTLE_SRC3_MASK_MASK 0x08000000L
+#define GC_EDC_CTRL__EDC_CREDIT_SHIFT_BIT_NUMS_MASK 0xF0000000L
+//GC_EDC_THRESHOLD
+#define GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//GC_EDC_STRETCH_CTRL
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_EN__SHIFT 0x0
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_DELAY__SHIFT 0x1
+#define GC_EDC_STRETCH_CTRL__EDC_UNSTRETCH_DELAY__SHIFT 0xa
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_EN_MASK 0x00000001L
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_DELAY_MASK 0x000003FEL
+#define GC_EDC_STRETCH_CTRL__EDC_UNSTRETCH_DELAY_MASK 0x0007FC00L
+//GC_EDC_STRETCH_THRESHOLD
+#define GC_EDC_STRETCH_THRESHOLD__EDC_STRETCH_THRESHOLD__SHIFT 0x0
+#define GC_EDC_STRETCH_THRESHOLD__EDC_STRETCH_THRESHOLD_MASK 0xFFFFFFFFL
+//EDC_HYSTERESIS_CNTL
+#define EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_TIMER__SHIFT 0x8
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_EN__SHIFT 0x10
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_MODE__SHIFT 0x11
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_MODE__SHIFT 0x14
+#define EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_TIMER_MASK 0x0000FF00L
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_EN_MASK 0x00010000L
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_MODE_MASK 0x000E0000L
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_MODE_MASK 0x00100000L
+//GC_THROTTLE_CTRL
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_SW_RST__SHIFT 0x0
+#define GC_THROTTLE_CTRL__GC_EDC_STALL_EN__SHIFT 0x1
+#define GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT 0x2
+#define GC_THROTTLE_CTRL__PWRBRK_POLARITY_CNTL__SHIFT 0x3
+#define GC_THROTTLE_CTRL__PCC_STALL_EN__SHIFT 0x4
+#define GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT 0x5
+#define GC_THROTTLE_CTRL__GC_EDC_ONLY_MODE__SHIFT 0x6
+#define GC_THROTTLE_CTRL__GC_EDC_OVERRIDE__SHIFT 0x7
+#define GC_THROTTLE_CTRL__PCC_OVERRIDE__SHIFT 0x8
+#define GC_THROTTLE_CTRL__PWRBRK_OVERRIDE__SHIFT 0x9
+#define GC_THROTTLE_CTRL__GC_EDC_PERF_COUNTER_EN__SHIFT 0xa
+#define GC_THROTTLE_CTRL__PCC_PERF_COUNTER_EN__SHIFT 0xb
+#define GC_THROTTLE_CTRL__PWRBRK_PERF_COUNTER_EN__SHIFT 0xc
+#define GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT 0xd
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_PERF_COUNTER_EN__SHIFT 0x17
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_LOG_INDEX__SHIFT 0x18
+#define GC_THROTTLE_CTRL__LUT_HW_UPDATE__SHIFT 0x1d
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_CLK_EN_OVERRIDE__SHIFT 0x1e
+#define GC_THROTTLE_CTRL__PCC_POLARITY_CNTL__SHIFT 0x1f
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_SW_RST_MASK 0x00000001L
+#define GC_THROTTLE_CTRL__GC_EDC_STALL_EN_MASK 0x00000002L
+#define GC_THROTTLE_CTRL__PWRBRK_STALL_EN_MASK 0x00000004L
+#define GC_THROTTLE_CTRL__PWRBRK_POLARITY_CNTL_MASK 0x00000008L
+#define GC_THROTTLE_CTRL__PCC_STALL_EN_MASK 0x00000010L
+#define GC_THROTTLE_CTRL__PATTERN_MODE_MASK 0x00000020L
+#define GC_THROTTLE_CTRL__GC_EDC_ONLY_MODE_MASK 0x00000040L
+#define GC_THROTTLE_CTRL__GC_EDC_OVERRIDE_MASK 0x00000080L
+#define GC_THROTTLE_CTRL__PCC_OVERRIDE_MASK 0x00000100L
+#define GC_THROTTLE_CTRL__PWRBRK_OVERRIDE_MASK 0x00000200L
+#define GC_THROTTLE_CTRL__GC_EDC_PERF_COUNTER_EN_MASK 0x00000400L
+#define GC_THROTTLE_CTRL__PCC_PERF_COUNTER_EN_MASK 0x00000800L
+#define GC_THROTTLE_CTRL__PWRBRK_PERF_COUNTER_EN_MASK 0x00001000L
+#define GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL_MASK 0x007FE000L
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_PERF_COUNTER_EN_MASK 0x00800000L
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_LOG_INDEX_MASK 0x1F000000L
+#define GC_THROTTLE_CTRL__LUT_HW_UPDATE_MASK 0x20000000L
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_CLK_EN_OVERRIDE_MASK 0x40000000L
+#define GC_THROTTLE_CTRL__PCC_POLARITY_CNTL_MASK 0x80000000L
+//GC_THROTTLE_CTRL1
+#define GC_THROTTLE_CTRL1__PCC_FP_PROGRAM_STEP_EN__SHIFT 0x0
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MIN_STEP__SHIFT 0x1
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MAX_STEP__SHIFT 0x5
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_UPWARDS_STEP_SIZE__SHIFT 0xa
+#define GC_THROTTLE_CTRL1__PWRBRK_FP_PROGRAM_STEP_EN__SHIFT 0xd
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MIN_STEP__SHIFT 0xe
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MAX_STEP__SHIFT 0x12
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_UPWARDS_STEP_SIZE__SHIFT 0x17
+#define GC_THROTTLE_CTRL1__FIXED_PATTERN_SELECT__SHIFT 0x1a
+#define GC_THROTTLE_CTRL1__GC_EDC_STRETCH_PERF_COUNTER_EN__SHIFT 0x1e
+#define GC_THROTTLE_CTRL1__GC_EDC_UNSTRETCH_PERF_COUNTER_EN__SHIFT 0x1f
+#define GC_THROTTLE_CTRL1__PCC_FP_PROGRAM_STEP_EN_MASK 0x00000001L
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MIN_STEP_MASK 0x0000001EL
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MAX_STEP_MASK 0x000003E0L
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_UPWARDS_STEP_SIZE_MASK 0x00001C00L
+#define GC_THROTTLE_CTRL1__PWRBRK_FP_PROGRAM_STEP_EN_MASK 0x00002000L
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MIN_STEP_MASK 0x0003C000L
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MAX_STEP_MASK 0x007C0000L
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_UPWARDS_STEP_SIZE_MASK 0x03800000L
+#define GC_THROTTLE_CTRL1__FIXED_PATTERN_SELECT_MASK 0x0C000000L
+#define GC_THROTTLE_CTRL1__GC_EDC_STRETCH_PERF_COUNTER_EN_MASK 0x40000000L
+#define GC_THROTTLE_CTRL1__GC_EDC_UNSTRETCH_PERF_COUNTER_EN_MASK 0x80000000L
+//PCC_STALL_PATTERN_CTRL
+#define PCC_STALL_PATTERN_CTRL__PCC_STEP_INTERVAL__SHIFT 0x0
+#define PCC_STALL_PATTERN_CTRL__PCC_BEGIN_STEP__SHIFT 0xa
+#define PCC_STALL_PATTERN_CTRL__PCC_END_STEP__SHIFT 0xf
+#define PCC_STALL_PATTERN_CTRL__PCC_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0x14
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_INCR__SHIFT 0x18
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_DECR__SHIFT 0x19
+#define PCC_STALL_PATTERN_CTRL__PCC_DITHER_MODE__SHIFT 0x1a
+#define PCC_STALL_PATTERN_CTRL__PCC_STEP_INTERVAL_MASK 0x000003FFL
+#define PCC_STALL_PATTERN_CTRL__PCC_BEGIN_STEP_MASK 0x00007C00L
+#define PCC_STALL_PATTERN_CTRL__PCC_END_STEP_MASK 0x000F8000L
+#define PCC_STALL_PATTERN_CTRL__PCC_THROTTLE_PATTERN_BIT_NUMS_MASK 0x00F00000L
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_INCR_MASK 0x01000000L
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_DECR_MASK 0x02000000L
+#define PCC_STALL_PATTERN_CTRL__PCC_DITHER_MODE_MASK 0x04000000L
+//PWRBRK_STALL_PATTERN_CTRL
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT 0xa
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT 0xf
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0x14
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL_MASK 0x000003FFL
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP_MASK 0x00007C00L
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP_MASK 0x000F8000L
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS_MASK 0x00F00000L
+//PCC_STALL_PATTERN_1_2
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_1__SHIFT 0x0
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_2__SHIFT 0x10
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_3_4
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_3__SHIFT 0x0
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_4__SHIFT 0x10
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_5_6
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_5__SHIFT 0x0
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_6__SHIFT 0x10
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_7
+#define PCC_STALL_PATTERN_7__PCC_STALL_PATTERN_7__SHIFT 0x0
+#define PCC_STALL_PATTERN_7__PCC_STALL_PATTERN_7_MASK 0x00007FFFL
+//PWRBRK_STALL_PATTERN_1_2
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_1__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_2__SHIFT 0x10
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_1_MASK 0x00007FFFL
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_2_MASK 0x7FFF0000L
+//PWRBRK_STALL_PATTERN_3_4
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_3__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_4__SHIFT 0x10
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_3_MASK 0x00007FFFL
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_4_MASK 0x7FFF0000L
+//PWRBRK_STALL_PATTERN_5_6
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_5__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_6__SHIFT 0x10
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_5_MASK 0x00007FFFL
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_6_MASK 0x7FFF0000L
+//PWRBRK_STALL_PATTERN_7
+#define PWRBRK_STALL_PATTERN_7__PWRBRK_STALL_PATTERN_7__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_7__PWRBRK_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_STALL_PATTERN_CTRL
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CTRL_EN__SHIFT 0x0
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_SW_RST__SHIFT 0x1
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_STALL_PATTERN_CTRL__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0x3
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_EN__SHIFT 0x7
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_MODE__SHIFT 0x8
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CTRL_EN_MASK 0x00000001L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_SW_RST_MASK 0x00000002L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x00000078L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_EN_MASK 0x00000080L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_MODE_MASK 0x00000700L
+//DIDT_STALL_PATTERN_1_2
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_STALL_PATTERN_3_4
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_STALL_PATTERN_5_6
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_STALL_PATTERN_7
+#define DIDT_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
+//PCC_PWRBRK_HYSTERESIS_CTRL
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PCC_MAX_HYSTERESIS__SHIFT 0x0
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PWRBRK_MAX_HYSTERESIS__SHIFT 0x8
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PCC_MAX_HYSTERESIS_MASK 0x000000FFL
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PWRBRK_MAX_HYSTERESIS_MASK 0x0000FF00L
+//EDC_STRETCH_PERF_COUNTER
+#define EDC_STRETCH_PERF_COUNTER__STRETCH_PERF_COUNTER__SHIFT 0x0
+#define EDC_STRETCH_PERF_COUNTER__STRETCH_PERF_COUNTER_MASK 0xFFFFFFFFL
+//EDC_UNSTRETCH_PERF_COUNTER
+#define EDC_UNSTRETCH_PERF_COUNTER__UNSTRETCH_PERF_COUNTER__SHIFT 0x0
+#define EDC_UNSTRETCH_PERF_COUNTER__UNSTRETCH_PERF_COUNTER_MASK 0xFFFFFFFFL
+//EDC_STRETCH_NUM_PERF_COUNTER
+#define EDC_STRETCH_NUM_PERF_COUNTER__STRETCH_NUM_PERF_COUNTER__SHIFT 0x0
+#define EDC_STRETCH_NUM_PERF_COUNTER__STRETCH_NUM_PERF_COUNTER_MASK 0xFFFFFFFFL
+//GC_EDC_STATUS
+#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x0
+#define GC_EDC_STATUS__GPIO_IN_0__SHIFT 0x3
+#define GC_EDC_STATUS__GPIO_IN_1__SHIFT 0x4
+#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x00000007L
+#define GC_EDC_STATUS__GPIO_IN_0_MASK 0x00000008L
+#define GC_EDC_STATUS__GPIO_IN_1_MASK 0x00000010L
+//GC_EDC_OVERFLOW
+#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
+#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
+#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
+#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
+//GC_EDC_ROLLING_POWER_DELTA
+#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
+#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
+//GC_THROTTLE_STATUS
+#define GC_THROTTLE_STATUS__FSM_STATE__SHIFT 0x0
+#define GC_THROTTLE_STATUS__PATTERN_INDEX__SHIFT 0x4
+#define GC_THROTTLE_STATUS__FSM_STATE_MASK 0x0000000FL
+#define GC_THROTTLE_STATUS__PATTERN_INDEX_MASK 0x000001F0L
+//EDC_PERF_COUNTER
+#define EDC_PERF_COUNTER__EDC_PERF_COUNTER__SHIFT 0x0
+#define EDC_PERF_COUNTER__EDC_PERF_COUNTER_MASK 0xFFFFFFFFL
+//PCC_PERF_COUNTER
+#define PCC_PERF_COUNTER__PCC_PERF_COUNTER__SHIFT 0x0
+#define PCC_PERF_COUNTER__PCC_PERF_COUNTER_MASK 0xFFFFFFFFL
+//PWRBRK_PERF_COUNTER
+#define PWRBRK_PERF_COUNTER__PWRBRK_PERF_COUNTER__SHIFT 0x0
+#define PWRBRK_PERF_COUNTER__PWRBRK_PERF_COUNTER_MASK 0xFFFFFFFFL
+//EDC_HYSTERESIS_STAT
+#define EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define EDC_HYSTERESIS_STAT__EDC_STATUS__SHIFT 0x8
+#define EDC_HYSTERESIS_STAT__EDC_CREDIT_INCR_OVERFLOW__SHIFT 0x9
+#define EDC_HYSTERESIS_STAT__EDC_THRESHOLD_SEL__SHIFT 0xa
+#define EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define EDC_HYSTERESIS_STAT__EDC_STATUS_MASK 0x00000100L
+#define EDC_HYSTERESIS_STAT__EDC_CREDIT_INCR_OVERFLOW_MASK 0x00000200L
+#define EDC_HYSTERESIS_STAT__EDC_THRESHOLD_SEL_MASK 0x00000400L
+//GC_CAC_WEIGHT_CP_0
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CP_1
+#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_EA_0
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_EA_1
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_EA_2
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_1
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_2
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_3
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_4
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_0
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_1
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_2
+#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_UTCL2_WALKER_0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_WALKER_1
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_WALKER_2
+#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GDS_0
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GDS_1
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GDS_2
+#define GC_CAC_WEIGHT_GDS_2__WEIGHT_GDS_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_2__WEIGHT_GDS_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GE_0
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GE_1
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GE_2
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GE_3
+#define GC_CAC_WEIGHT_GE_3__WEIGHT_GE_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_3__WEIGHT_GE_SIG6_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_PMM_0
+#define GC_CAC_WEIGHT_PMM_0__WEIGHT_PMM_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_PMM_0__WEIGHT_PMM_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GL2C_0
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GL2C_1
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GL2C_2
+#define GC_CAC_WEIGHT_GL2C_2__WEIGHT_GL2C_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_GL2C_2__WEIGHT_GL2C_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_PH_0
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PH_1
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PH_2
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PH_3
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_0
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_1
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_2
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_3
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_4
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG8_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG9_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_5
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG10__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG11__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG10_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG11_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CHC_0
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CHC_1
+#define GC_CAC_WEIGHT_CHC_1__WEIGHT_CHC_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CHC_1__WEIGHT_CHC_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GUS_0
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GUS_1
+#define GC_CAC_WEIGHT_GUS_1__WEIGHT_GUS_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GUS_1__WEIGHT_GUS_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_RLC_0
+#define GC_CAC_WEIGHT_RLC_0__WEIGHT_RLC_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_RLC_0__WEIGHT_RLC_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GRBM_0
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG1_MASK 0xFFFF0000L
+//GC_EDC_CLK_MONITOR_CTRL
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_EN__SHIFT 0x0
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_INTERVAL__SHIFT 0x1
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_THRESHOLD__SHIFT 0x5
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_EN_MASK 0x00000001L
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_INTERVAL_MASK 0x0000001EL
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_THRESHOLD_MASK 0x0001FFE0L
+//GC_CAC_IND_INDEX
+#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR__SHIFT 0x0
+#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR_MASK 0xFFFFFFFFL
+//GC_CAC_IND_DATA
+#define GC_CAC_IND_DATA__GC_CAC_IND_DATA__SHIFT 0x0
+#define GC_CAC_IND_DATA__GC_CAC_IND_DATA_MASK 0xFFFFFFFFL
+//SE_CAC_CTRL_1
+#define SE_CAC_CTRL_1__CAC_WINDOW__SHIFT 0x0
+#define SE_CAC_CTRL_1__TDP_WINDOW__SHIFT 0x8
+#define SE_CAC_CTRL_1__CAC_WINDOW_MASK 0x000000FFL
+#define SE_CAC_CTRL_1__TDP_WINDOW_MASK 0xFFFFFF00L
+//SE_CAC_CTRL_2
+#define SE_CAC_CTRL_2__CAC_ENABLE__SHIFT 0x0
+#define SE_CAC_CTRL_2__SE_LCAC_ENABLE__SHIFT 0x1
+#define SE_CAC_CTRL_2__WGP_CAC_CLK_OVERRIDE__SHIFT 0x2
+#define SE_CAC_CTRL_2__SE_CAC_INDEX_AUTO_INCR_EN__SHIFT 0x3
+#define SE_CAC_CTRL_2__CAC_ENABLE_MASK 0x00000001L
+#define SE_CAC_CTRL_2__SE_LCAC_ENABLE_MASK 0x00000002L
+#define SE_CAC_CTRL_2__WGP_CAC_CLK_OVERRIDE_MASK 0x00000004L
+#define SE_CAC_CTRL_2__SE_CAC_INDEX_AUTO_INCR_EN_MASK 0x00000008L
+//SE_CAC_WEIGHT_TA_0
+#define SE_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_TD_0
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_1
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_2
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_3
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_4
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG8__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG9__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG8_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG9_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_5
+#define SE_CAC_WEIGHT_TD_5__WEIGHT_TD_SIG10__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_5__WEIGHT_TD_SIG10_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_TCP_0
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TCP_1
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TCP_2
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TCP_3
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQ_0
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQ_1
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQ_2
+#define SE_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_SP_0
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SP_1
+#define SE_CAC_WEIGHT_SP_1__WEIGHT_SP_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SP_1__WEIGHT_SP_SIG2_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_LDS_0
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_LDS_1
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_LDS_2
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_LDS_3
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQC_0
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQC_1
+#define SE_CAC_WEIGHT_SQC_1__WEIGHT_SQC_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQC_1__WEIGHT_SQC_SIG2_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_CU_0
+#define SE_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_BCI_0
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_0
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_1
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_2
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_3
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_4
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG8__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG9__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG8_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG9_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_5
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG10__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG11__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG10_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG11_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_6
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG12__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG13__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG12_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG13_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_7
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG14__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG15__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG14_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG15_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_8
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG16__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG17__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG16_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG17_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_9
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG18__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG19__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG18_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG19_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_10
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG20__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG21__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG20_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG21_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_11
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG22__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG23__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG22_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG23_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_0
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_1
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_2
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_3
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_4
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG8__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG9__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG8_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG9_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_RMI_0
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_RMI_1
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SX_0
+#define SE_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_SXRB_0
+#define SE_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_UTCL1_0
+#define SE_CAC_WEIGHT_UTCL1_0__WEIGHT_UTCL1_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_UTCL1_0__WEIGHT_UTCL1_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_GL1C_0
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_GL1C_1
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_GL1C_2
+#define SE_CAC_WEIGHT_GL1C_2__WEIGHT_GL1C_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_GL1C_2__WEIGHT_GL1C_SIG4_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_SPI_0
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SPI_1
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SPI_2
+#define SE_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_PC_0
+#define SE_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_PA_0
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_PA_1
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_PA_2
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_PA_3
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_0
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_1
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_2
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_3
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WINDOW_AGGR_VALUE
+#define SE_CAC_WINDOW_AGGR_VALUE__SE_CAC_WINDOW_AGGR_VALUE__SHIFT 0x0
+#define SE_CAC_WINDOW_AGGR_VALUE__SE_CAC_WINDOW_AGGR_VALUE_MASK 0xFFFFFFFFL
+//SE_CAC_WINDOW_GFXCLK_CYCLE
+#define SE_CAC_WINDOW_GFXCLK_CYCLE__SE_CAC_WINDOW_GFXCLK_CYCLE__SHIFT 0x0
+#define SE_CAC_WINDOW_GFXCLK_CYCLE__SE_CAC_WINDOW_GFXCLK_CYCLE_MASK 0x000003FFL
+//SE_CAC_IND_INDEX
+#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR__SHIFT 0x0
+#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR_MASK 0xFFFFFFFFL
+//SE_CAC_IND_DATA
+#define SE_CAC_IND_DATA__SE_CAC_IND_DATA__SHIFT 0x0
+#define SE_CAC_IND_DATA__SE_CAC_IND_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_pfonly2_spidec
+//SPI_RESOURCE_RESERVE_CU_0
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_0__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_0__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_1
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_1__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_1__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_2
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_2__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_2__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_3
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_3__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_3__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_4
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_4__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_4__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_5
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_5__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_5__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_6
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_6__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_6__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_7
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_7__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_7__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_8
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_8__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_8__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_9
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_9__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_9__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_10
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_10__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_10__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_11
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_11__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_11__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_12
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_12__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_12__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_13
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_13__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_13__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_14
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_14__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_14__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_15
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_15__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_15__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_EN_CU_0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_2
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_3
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_4
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_5
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_6
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_7
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_8
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_9
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_11
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_12
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_13
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_14
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_15
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK_MASK 0x00FF0000L
+
+
+// addressBlock: gc_gfxudec
+//CP_EOP_DONE_ADDR_LO
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_EOP_DONE_ADDR_HI
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_EOP_DONE_DATA_LO
+#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x0
+#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
+//CP_EOP_DONE_DATA_HI
+#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x0
+#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_LO
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_HI
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_ADDR_LO
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x2
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_PIPE_STATS_ADDR_HI
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x0
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0x0000FFFFL
+//CP_VGT_IAVERT_COUNT_LO
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAVERT_COUNT_HI
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_LO
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_HI
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_LO
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_HI
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_LO
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_HI
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_LO
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_HI
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_LO
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_HI
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_LO
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_HI
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_LO
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_HI
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_LO
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_HI
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_LO
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_HI
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_LO
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_HI
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_LO
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_HI
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_ASINVOC_COUNT_LO
+#define CP_VGT_ASINVOC_COUNT_LO__ASINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_ASINVOC_COUNT_LO__ASINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_ASINVOC_COUNT_HI
+#define CP_VGT_ASINVOC_COUNT_HI__ASINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_ASINVOC_COUNT_HI__ASINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_CONTROL
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY__SHIFT 0x19
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY_MASK 0x06000000L
+//SCRATCH_REG0
+#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//SCRATCH_REG1
+#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//SCRATCH_REG2
+#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//SCRATCH_REG3
+#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//SCRATCH_REG4
+#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//SCRATCH_REG5
+#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//SCRATCH_REG6
+#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//SCRATCH_REG7
+#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+//SCRATCH_REG_ATOMIC
+#define SCRATCH_REG_ATOMIC__IMMED__SHIFT 0x0
+#define SCRATCH_REG_ATOMIC__ID__SHIFT 0x18
+#define SCRATCH_REG_ATOMIC__reserved27__SHIFT 0x1b
+#define SCRATCH_REG_ATOMIC__OP__SHIFT 0x1c
+#define SCRATCH_REG_ATOMIC__reserved31__SHIFT 0x1f
+#define SCRATCH_REG_ATOMIC__IMMED_MASK 0x00FFFFFFL
+#define SCRATCH_REG_ATOMIC__ID_MASK 0x07000000L
+#define SCRATCH_REG_ATOMIC__reserved27_MASK 0x08000000L
+#define SCRATCH_REG_ATOMIC__OP_MASK 0x70000000L
+#define SCRATCH_REG_ATOMIC__reserved31_MASK 0x80000000L
+//SCRATCH_REG_CMPSWAP_ATOMIC
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_COMPARE__SHIFT 0x0
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_REPLACE__SHIFT 0xc
+#define SCRATCH_REG_CMPSWAP_ATOMIC__ID__SHIFT 0x18
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved27__SHIFT 0x1b
+#define SCRATCH_REG_CMPSWAP_ATOMIC__OP__SHIFT 0x1c
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved31__SHIFT 0x1f
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_COMPARE_MASK 0x00000FFFL
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_REPLACE_MASK 0x00FFF000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__ID_MASK 0x07000000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved27_MASK 0x08000000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__OP_MASK 0x70000000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved31_MASK 0x80000000L
+//CP_APPEND_DDID_CNT
+#define CP_APPEND_DDID_CNT__DATA__SHIFT 0x0
+#define CP_APPEND_DDID_CNT__DATA_MASK 0x000000FFL
+//CP_APPEND_DATA_HI
+#define CP_APPEND_DATA_HI__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_HI
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_HI
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_LO
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_HI
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_LO
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_HI
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_LO
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_HI
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_APPEND_ADDR_LO
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x2
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_APPEND_ADDR_HI
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x0
+#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x10
+#define CP_APPEND_ADDR_HI__FENCE_SIZE__SHIFT 0x12
+#define CP_APPEND_ADDR_HI__PWS_ENABLE__SHIFT 0x13
+#define CP_APPEND_ADDR_HI__CACHE_POLICY__SHIFT 0x19
+#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x1d
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x00030000L
+#define CP_APPEND_ADDR_HI__FENCE_SIZE_MASK 0x00040000L
+#define CP_APPEND_ADDR_HI__PWS_ENABLE_MASK 0x00080000L
+#define CP_APPEND_ADDR_HI__CACHE_POLICY_MASK 0x06000000L
+#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xE0000000L
+//CP_APPEND_DATA
+#define CP_APPEND_DATA__DATA__SHIFT 0x0
+#define CP_APPEND_DATA__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_DATA_LO
+#define CP_APPEND_DATA_LO__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_LO
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_LO
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_LO
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_LO
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_HI
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_HI
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_LO
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_LO
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_HI
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_HI
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_LO
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_LO
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_HI
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_HI
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_WADDR_LO
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x2
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_WADDR_HI
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x0
+#define CP_ME_MC_WADDR_HI__WRITE_CONFIRM__SHIFT 0x11
+#define CP_ME_MC_WADDR_HI__WRITE64__SHIFT 0x12
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_WADDR_HI__VMID__SHIFT 0x18
+#define CP_ME_MC_WADDR_HI__RINGID__SHIFT 0x1c
+#define CP_ME_MC_WADDR_HI__PRIVILEGE__SHIFT 0x1f
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_WADDR_HI__WRITE_CONFIRM_MASK 0x00020000L
+#define CP_ME_MC_WADDR_HI__WRITE64_MASK 0x00040000L
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY_MASK 0x00C00000L
+#define CP_ME_MC_WADDR_HI__VMID_MASK 0x0F000000L
+#define CP_ME_MC_WADDR_HI__RINGID_MASK 0x30000000L
+#define CP_ME_MC_WADDR_HI__PRIVILEGE_MASK 0x80000000L
+//CP_ME_MC_WDATA_LO
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x0
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xFFFFFFFFL
+//CP_ME_MC_WDATA_HI
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x0
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_RADDR_LO
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x2
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_RADDR_HI
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x0
+#define CP_ME_MC_RADDR_HI__SIZE__SHIFT 0x10
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_RADDR_HI__VMID__SHIFT 0x18
+#define CP_ME_MC_RADDR_HI__PRIVILEGE__SHIFT 0x1f
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_RADDR_HI__SIZE_MASK 0x000F0000L
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY_MASK 0x00C00000L
+#define CP_ME_MC_RADDR_HI__VMID_MASK 0x0F000000L
+#define CP_ME_MC_RADDR_HI__PRIVILEGE_MASK 0x80000000L
+//CP_SEM_WAIT_TIMER
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x0
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xFFFFFFFFL
+//CP_SIG_SEM_ADDR_LO
+#define CP_SIG_SEM_ADDR_LO__SEM_PRIV__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_SIG_SEM_ADDR_LO__SEM_PRIV_MASK 0x00000001L
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_SIG_SEM_ADDR_HI
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_WAIT_REG_MEM_TIMEOUT
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x0
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xFFFFFFFFL
+//CP_WAIT_SEM_ADDR_LO
+#define CP_WAIT_SEM_ADDR_LO__SEM_PRIV__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_WAIT_SEM_ADDR_LO__SEM_PRIV_MASK 0x00000001L
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_WAIT_SEM_ADDR_HI
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_DMA_PFP_CONTROL
+#define CP_DMA_PFP_CONTROL__VMID__SHIFT 0x0
+#define CP_DMA_PFP_CONTROL__TMZ__SHIFT 0x4
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_PFP_CONTROL__SRC_VOLATLE__SHIFT 0xf
+#define CP_DMA_PFP_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_PFP_CONTROL__DST_VOLATLE__SHIFT 0x1b
+#define CP_DMA_PFP_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_PFP_CONTROL__VMID_MASK 0x0000000FL
+#define CP_DMA_PFP_CONTROL__TMZ_MASK 0x00000010L
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY_MASK 0x00006000L
+#define CP_DMA_PFP_CONTROL__SRC_VOLATLE_MASK 0x00008000L
+#define CP_DMA_PFP_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY_MASK 0x06000000L
+#define CP_DMA_PFP_CONTROL__DST_VOLATLE_MASK 0x08000000L
+#define CP_DMA_PFP_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_DMA_ME_CONTROL
+#define CP_DMA_ME_CONTROL__VMID__SHIFT 0x0
+#define CP_DMA_ME_CONTROL__TMZ__SHIFT 0x4
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_ME_CONTROL__SRC_VOLATLE__SHIFT 0xf
+#define CP_DMA_ME_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_ME_CONTROL__DST_VOLATLE__SHIFT 0x1b
+#define CP_DMA_ME_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_ME_CONTROL__VMID_MASK 0x0000000FL
+#define CP_DMA_ME_CONTROL__TMZ_MASK 0x00000010L
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY_MASK 0x00006000L
+#define CP_DMA_ME_CONTROL__SRC_VOLATLE_MASK 0x00008000L
+#define CP_DMA_ME_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY_MASK 0x06000000L
+#define CP_DMA_ME_CONTROL__DST_VOLATLE_MASK 0x08000000L
+#define CP_DMA_ME_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_DMA_ME_SRC_ADDR
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_SRC_ADDR_HI
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_DST_ADDR
+#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_DST_ADDR_HI
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_COMMAND
+#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_ME_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_ME_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_PFP_SRC_ADDR
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_SRC_ADDR_HI
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_DST_ADDR
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_DST_ADDR_HI
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_COMMAND
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_PFP_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_PFP_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_CNTL
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL__SHIFT 0x0
+#define CP_DMA_CNTL__WATCH_CONTROL__SHIFT 0x1
+#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x4
+#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x10
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x1d
+#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x1e
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL_MASK 0x00000001L
+#define CP_DMA_CNTL__WATCH_CONTROL_MASK 0x00000002L
+#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x00000030L
+#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0x01FF0000L
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000L
+#define CP_DMA_CNTL__PIO_COUNT_MASK 0xC0000000L
+//CP_DMA_READ_TAGS
+#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x0
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x1c
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x03FFFFFFL
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000L
+//CP_PFP_IB_CONTROL
+#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x0
+#define CP_PFP_IB_CONTROL__IB_EN_MASK 0x000000FFL
+//CP_PFP_LOAD_CONTROL
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x0
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x1
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN__SHIFT 0xf
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x10
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x18
+#define CP_PFP_LOAD_CONTROL__LOAD_ORDINAL__SHIFT 0x1f
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x00000001L
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x00000002L
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN_MASK 0x00008000L
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x00010000L
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x01000000L
+#define CP_PFP_LOAD_CONTROL__LOAD_ORDINAL_MASK 0x80000000L
+//CP_SCRATCH_INDEX
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//CP_SCRATCH_DATA
+#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_RB_OFFSET
+#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_OFFSET
+#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
+#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
+//CP_IB2_OFFSET
+#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
+#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_BEGIN
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_END
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x0
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_BEGIN
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_END
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x0
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_DMA_ME_CMD_ADDR_LO
+#define CP_DMA_ME_CMD_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_ME_CMD_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_DMA_ME_CMD_ADDR_LO__RSVD_MASK 0x00000003L
+#define CP_DMA_ME_CMD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_DMA_ME_CMD_ADDR_HI
+#define CP_DMA_ME_CMD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_CMD_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_ME_CMD_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_ME_CMD_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_PFP_CMD_ADDR_LO
+#define CP_DMA_PFP_CMD_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_PFP_CMD_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_DMA_PFP_CMD_ADDR_LO__RSVD_MASK 0x00000003L
+#define CP_DMA_PFP_CMD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_DMA_PFP_CMD_ADDR_HI
+#define CP_DMA_PFP_CMD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_CMD_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_PFP_CMD_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_PFP_CMD_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_APPEND_CMD_ADDR_LO
+#define CP_APPEND_CMD_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_APPEND_CMD_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_APPEND_CMD_ADDR_LO__RSVD_MASK 0x00000003L
+#define CP_APPEND_CMD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_APPEND_CMD_ADDR_HI
+#define CP_APPEND_CMD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_APPEND_CMD_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_APPEND_CMD_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_APPEND_CMD_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//UCONFIG_RESERVED_REG0
+#define UCONFIG_RESERVED_REG0__DATA__SHIFT 0x0
+#define UCONFIG_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//UCONFIG_RESERVED_REG1
+#define UCONFIG_RESERVED_REG1__DATA__SHIFT 0x0
+#define UCONFIG_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+//CP_PA_MSPRIM_COUNT_LO
+#define CP_PA_MSPRIM_COUNT_LO__MSPRIM_COUNT_LO__SHIFT 0x0
+#define CP_PA_MSPRIM_COUNT_LO__MSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_MSPRIM_COUNT_HI
+#define CP_PA_MSPRIM_COUNT_HI__MSPRIM_COUNT_HI__SHIFT 0x0
+#define CP_PA_MSPRIM_COUNT_HI__MSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_GE_MSINVOC_COUNT_LO
+#define CP_GE_MSINVOC_COUNT_LO__MSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_GE_MSINVOC_COUNT_LO__MSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_GE_MSINVOC_COUNT_HI
+#define CP_GE_MSINVOC_COUNT_HI__MSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_GE_MSINVOC_COUNT_HI__MSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_IB1_CMD_BUFSZ
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB2_CMD_BUFSZ
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_ST_CMD_BUFSZ
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ__SHIFT 0x0
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB1_BASE_LO
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB1_BASE_HI
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
+//CP_IB1_BUFSZ
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
+//CP_IB2_BASE_LO
+#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
+#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB2_BASE_HI
+#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
+#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
+//CP_IB2_BUFSZ
+#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
+#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
+//CP_ST_BASE_LO
+#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x2
+#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xFFFFFFFCL
+//CP_ST_BASE_HI
+#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x0
+#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0x0000FFFFL
+//CP_ST_BUFSZ
+#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x0
+#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0x000FFFFFL
+//CP_EOP_DONE_EVENT_CNTL
+#define CP_EOP_DONE_EVENT_CNTL__GCR_CNTL__SHIFT 0xc
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY__SHIFT 0x19
+#define CP_EOP_DONE_EVENT_CNTL__EOP_VOLATILE__SHIFT 0x1b
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE__SHIFT 0x1c
+#define CP_EOP_DONE_EVENT_CNTL__GLK_INV__SHIFT 0x1e
+#define CP_EOP_DONE_EVENT_CNTL__PWS_ENABLE__SHIFT 0x1f
+#define CP_EOP_DONE_EVENT_CNTL__GCR_CNTL_MASK 0x01FFF000L
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY_MASK 0x06000000L
+#define CP_EOP_DONE_EVENT_CNTL__EOP_VOLATILE_MASK 0x08000000L
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE_MASK 0x10000000L
+#define CP_EOP_DONE_EVENT_CNTL__GLK_INV_MASK 0x40000000L
+#define CP_EOP_DONE_EVENT_CNTL__PWS_ENABLE_MASK 0x80000000L
+//CP_EOP_DONE_DATA_CNTL
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x10
+#define CP_EOP_DONE_DATA_CNTL__SEMAPHORE_SIGNAL_TYPE__SHIFT 0x13
+#define CP_EOP_DONE_DATA_CNTL__ACTION_PIPE_ID__SHIFT 0x14
+#define CP_EOP_DONE_DATA_CNTL__ACTION_ID__SHIFT 0x16
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x18
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x1d
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x00030000L
+#define CP_EOP_DONE_DATA_CNTL__SEMAPHORE_SIGNAL_TYPE_MASK 0x00080000L
+#define CP_EOP_DONE_DATA_CNTL__ACTION_PIPE_ID_MASK 0x00300000L
+#define CP_EOP_DONE_DATA_CNTL__ACTION_ID_MASK 0x00C00000L
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x07000000L
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xE0000000L
+//CP_EOP_DONE_CNTX_ID
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_DB_BASE_LO
+#define CP_DB_BASE_LO__DB_BASE_LO__SHIFT 0x2
+#define CP_DB_BASE_LO__DB_BASE_LO_MASK 0xFFFFFFFCL
+//CP_DB_BASE_HI
+#define CP_DB_BASE_HI__DB_BASE_HI__SHIFT 0x0
+#define CP_DB_BASE_HI__DB_BASE_HI_MASK 0x0000FFFFL
+//CP_DB_BUFSZ
+#define CP_DB_BUFSZ__DB_BUFSZ__SHIFT 0x0
+#define CP_DB_BUFSZ__DB_BUFSZ_MASK 0x000FFFFFL
+//CP_DB_CMD_BUFSZ
+#define CP_DB_CMD_BUFSZ__DB_CMD_REQSZ__SHIFT 0x0
+#define CP_DB_CMD_BUFSZ__DB_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_PFP_COMPLETION_STATUS
+#define CP_PFP_COMPLETION_STATUS__STATUS__SHIFT 0x0
+#define CP_PFP_COMPLETION_STATUS__STATUS_MASK 0x00000003L
+//CP_PRED_NOT_VISIBLE
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE__SHIFT 0x0
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE_MASK 0x00000001L
+//CP_PFP_METADATA_BASE_ADDR
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_PFP_METADATA_BASE_ADDR_HI
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DRAW_INDX_INDR_ADDR
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DRAW_INDX_INDR_ADDR_HI
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DISPATCH_INDR_ADDR
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DISPATCH_INDR_ADDR_HI
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_BASE_ADDR
+#define CP_INDEX_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_INDEX_BASE_ADDR_HI
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_TYPE
+#define CP_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define CP_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+//CP_GDS_BKUP_ADDR
+#define CP_GDS_BKUP_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_GDS_BKUP_ADDR_HI
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_SAMPLE_STATUS
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE__SHIFT 0x0
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE__SHIFT 0x1
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE__SHIFT 0x2
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE__SHIFT 0x3
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE__SHIFT 0x4
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE__SHIFT 0x5
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE__SHIFT 0x6
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE__SHIFT 0x7
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE_MASK 0x00000001L
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE_MASK 0x00000002L
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE_MASK 0x00000004L
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE_MASK 0x00000008L
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE_MASK 0x00000010L
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE_MASK 0x00000020L
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE_MASK 0x00000040L
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE_MASK 0x00000080L
+//CP_ME_COHER_CNTL
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x0
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x1
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x6
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x7
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x8
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x9
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0xa
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0xb
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0xc
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0xd
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0xe
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x13
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x15
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x00000001L
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x00000002L
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x00000040L
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x00000080L
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x00000100L
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x00000200L
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x00000400L
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x00000800L
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x00001000L
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x00002000L
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x00004000L
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x00080000L
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x00200000L
+//CP_ME_COHER_SIZE
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_SIZE_HI
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_BASE
+#define CP_ME_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_BASE_HI
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_STATUS
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x0
+#define CP_ME_COHER_STATUS__STATUS__SHIFT 0x1f
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0x000000FFL
+#define CP_ME_COHER_STATUS__STATUS_MASK 0x80000000L
+//RLC_GPM_PERF_COUNT_0
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_0__SA_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_0__WGP_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_0__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_0__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_0__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_0__SA_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_0__WGP_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_0__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_0__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_0__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_PERF_COUNT_1
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_1__SA_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_1__WGP_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_1__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_1__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_1__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_1__SA_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_1__WGP_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_1__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_1__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_1__RESERVED_MASK 0xFFE00000L
+//GRBM_GFX_INDEX
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX__SA_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX__SA_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX__SA_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
+//VGT_PRIMITIVE_TYPE
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
+//VGT_INDEX_TYPE
+#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_INDEX_TYPE__DISABLE_INSTANCE_PACKING__SHIFT 0xe
+#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_INDEX_TYPE__DISABLE_INSTANCE_PACKING_MASK 0x00004000L
+//GE_MIN_VTX_INDX
+#define GE_MIN_VTX_INDX__MIN_INDX__SHIFT 0x0
+#define GE_MIN_VTX_INDX__MIN_INDX_MASK 0xFFFFFFFFL
+//GE_INDX_OFFSET
+#define GE_INDX_OFFSET__INDX_OFFSET__SHIFT 0x0
+#define GE_INDX_OFFSET__INDX_OFFSET_MASK 0xFFFFFFFFL
+//GE_MULTI_PRIM_IB_RESET_EN
+#define GE_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x0
+#define GE_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS__SHIFT 0x1
+#define GE_MULTI_PRIM_IB_RESET_EN__DISABLE_FOR_AUTO_INDEX__SHIFT 0x2
+#define GE_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x00000001L
+#define GE_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS_MASK 0x00000002L
+#define GE_MULTI_PRIM_IB_RESET_EN__DISABLE_FOR_AUTO_INDEX_MASK 0x00000004L
+//VGT_NUM_INDICES
+#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x0
+#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_NUM_INSTANCES
+#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_TF_RING_SIZE
+#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
+#define VGT_TF_RING_SIZE__SIZE_MASK 0x0001FFFFL
+//VGT_HS_OFFCHIP_PARAM
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x0
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0xa
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x000003FFL
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x00000C00L
+//VGT_TF_MEMORY_BASE
+#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE__BASE_MASK 0xFFFFFFFFL
+//GE_MAX_VTX_INDX
+#define GE_MAX_VTX_INDX__MAX_INDX__SHIFT 0x0
+#define GE_MAX_VTX_INDX__MAX_INDX_MASK 0xFFFFFFFFL
+//VGT_INSTANCE_BASE_ID
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
+//GE_CNTL
+#define GE_CNTL__PRIMS_PER_SUBGRP__SHIFT 0x0
+#define GE_CNTL__VERTS_PER_SUBGRP__SHIFT 0x9
+#define GE_CNTL__BREAK_SUBGRP_AT_EOI__SHIFT 0x12
+#define GE_CNTL__PACKET_TO_ONE_PA__SHIFT 0x13
+#define GE_CNTL__BREAK_PRIMGRP_AT_EOI__SHIFT 0x14
+#define GE_CNTL__PRIM_GRP_SIZE__SHIFT 0x15
+#define GE_CNTL__GCR_DISABLE__SHIFT 0x1e
+#define GE_CNTL__DIS_PG_SIZE_ADJUST_FOR_STRIP__SHIFT 0x1f
+#define GE_CNTL__PRIMS_PER_SUBGRP_MASK 0x000001FFL
+#define GE_CNTL__VERTS_PER_SUBGRP_MASK 0x0003FE00L
+#define GE_CNTL__BREAK_SUBGRP_AT_EOI_MASK 0x00040000L
+#define GE_CNTL__PACKET_TO_ONE_PA_MASK 0x00080000L
+#define GE_CNTL__BREAK_PRIMGRP_AT_EOI_MASK 0x00100000L
+#define GE_CNTL__PRIM_GRP_SIZE_MASK 0x3FE00000L
+#define GE_CNTL__GCR_DISABLE_MASK 0x40000000L
+#define GE_CNTL__DIS_PG_SIZE_ADJUST_FOR_STRIP_MASK 0x80000000L
+//GE_USER_VGPR1
+#define GE_USER_VGPR1__DATA__SHIFT 0x0
+#define GE_USER_VGPR1__DATA_MASK 0xFFFFFFFFL
+//GE_USER_VGPR2
+#define GE_USER_VGPR2__DATA__SHIFT 0x0
+#define GE_USER_VGPR2__DATA_MASK 0xFFFFFFFFL
+//GE_USER_VGPR3
+#define GE_USER_VGPR3__DATA__SHIFT 0x0
+#define GE_USER_VGPR3__DATA_MASK 0xFFFFFFFFL
+//GE_STEREO_CNTL
+#define GE_STEREO_CNTL__RT_SLICE__SHIFT 0x0
+#define GE_STEREO_CNTL__VIEWPORT__SHIFT 0x3
+#define GE_STEREO_CNTL__FSR_SELECT__SHIFT 0x7
+#define GE_STEREO_CNTL__EN_STEREO__SHIFT 0x8
+#define GE_STEREO_CNTL__RT_SLICE_MASK 0x00000007L
+#define GE_STEREO_CNTL__VIEWPORT_MASK 0x00000078L
+#define GE_STEREO_CNTL__FSR_SELECT_MASK 0x00000080L
+#define GE_STEREO_CNTL__EN_STEREO_MASK 0x00000100L
+//GE_PC_ALLOC
+#define GE_PC_ALLOC__OVERSUB_EN__SHIFT 0x0
+#define GE_PC_ALLOC__NUM_PC_LINES__SHIFT 0x1
+#define GE_PC_ALLOC__OVERSUB_EN_MASK 0x00000001L
+#define GE_PC_ALLOC__NUM_PC_LINES_MASK 0x000007FEL
+//VGT_TF_MEMORY_BASE_HI
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI_MASK 0x000000FFL
+//GE_USER_VGPR_EN
+#define GE_USER_VGPR_EN__EN_USER_VGPR1__SHIFT 0x0
+#define GE_USER_VGPR_EN__EN_USER_VGPR2__SHIFT 0x1
+#define GE_USER_VGPR_EN__EN_USER_VGPR3__SHIFT 0x2
+#define GE_USER_VGPR_EN__EN_USER_VGPR1_MASK 0x00000001L
+#define GE_USER_VGPR_EN__EN_USER_VGPR2_MASK 0x00000002L
+#define GE_USER_VGPR_EN__EN_USER_VGPR3_MASK 0x00000004L
+//GE_GS_FAST_LAUNCH_WG_DIM
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_X__SHIFT 0x0
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_Y__SHIFT 0x10
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_X_MASK 0x0000FFFFL
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_Y_MASK 0xFFFF0000L
+//GE_GS_FAST_LAUNCH_WG_DIM_1
+#define GE_GS_FAST_LAUNCH_WG_DIM_1__GS_FL_DIM_Z__SHIFT 0x0
+#define GE_GS_FAST_LAUNCH_WG_DIM_1__GS_FL_DIM_Z_MASK 0x0000FFFFL
+//VGT_GS_OUT_PRIM_TYPE
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x0
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x0000003FL
+//PA_SU_LINE_STIPPLE_VALUE
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0x00FFFFFFL
+//PA_SC_LINE_STIPPLE_STATE
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x8
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0x0000000FL
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0x0000FF00L
+//PA_SC_SCREEN_EXTENT_MIN_0
+#define PA_SC_SCREEN_EXTENT_MIN_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_0
+#define PA_SC_SCREEN_EXTENT_MAX_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MIN_1
+#define PA_SC_SCREEN_EXTENT_MIN_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_1
+#define PA_SC_SCREEN_EXTENT_MAX_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y_MASK 0xFFFF0000L
+//PA_SC_P3D_TRAP_SCREEN_HV_EN
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_P3D_TRAP_SCREEN_H
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_V
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_P3D_TRAP_SCREEN_COUNT
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_HV_EN
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_HP3D_TRAP_SCREEN_H
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_V
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_COUNT
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_HV_EN
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_TRAP_SCREEN_H
+#define PA_SC_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_V
+#define PA_SC_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_COUNT
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//SQ_THREAD_TRACE_USERDATA_0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_1
+#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_2
+#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_3
+#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_4
+#define SQ_THREAD_TRACE_USERDATA_4__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_4__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_5
+#define SQ_THREAD_TRACE_USERDATA_5__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_5__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_6
+#define SQ_THREAD_TRACE_USERDATA_6__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_6__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_7
+#define SQ_THREAD_TRACE_USERDATA_7__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_7__DATA_MASK 0xFFFFFFFFL
+//SQC_CACHES
+#define SQC_CACHES__TARGET_INST__SHIFT 0x0
+#define SQC_CACHES__TARGET_DATA__SHIFT 0x1
+#define SQC_CACHES__INVALIDATE__SHIFT 0x2
+#define SQC_CACHES__COMPLETE__SHIFT 0x10
+#define SQC_CACHES__TARGET_INST_MASK 0x00000001L
+#define SQC_CACHES__TARGET_DATA_MASK 0x00000002L
+#define SQC_CACHES__INVALIDATE_MASK 0x00000004L
+#define SQC_CACHES__COMPLETE_MASK 0x00010000L
+//TA_CS_BC_BASE_ADDR
+#define TA_CS_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
+#define TA_CS_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
+//TA_CS_BC_BASE_ADDR_HI
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
+//DB_OCCLUSION_COUNT0_LOW
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT0_HI
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT1_LOW
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT1_HI
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT2_LOW
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT2_HI
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT3_LOW
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT3_HI
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//GDS_RD_ADDR
+#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x0
+#define GDS_RD_ADDR__READ_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_DATA
+#define GDS_RD_DATA__READ_DATA__SHIFT 0x0
+#define GDS_RD_DATA__READ_DATA_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_ADDR
+#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x0
+#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_COUNT
+#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x0
+#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_DATA
+#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x0
+#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_ADDR
+#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_DATA
+#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_ADDR
+#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_DATA
+#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WRITE_COMPLETE
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x0
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xFFFFFFFFL
+//GDS_ATOM_CNTL
+#define GDS_ATOM_CNTL__AINC__SHIFT 0x0
+#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x6
+#define GDS_ATOM_CNTL__DMODE__SHIFT 0x8
+#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0xa
+#define GDS_ATOM_CNTL__AINC_MASK 0x0000003FL
+#define GDS_ATOM_CNTL__UNUSED1_MASK 0x000000C0L
+#define GDS_ATOM_CNTL__DMODE_MASK 0x00000300L
+#define GDS_ATOM_CNTL__UNUSED2_MASK 0xFFFFFC00L
+//GDS_ATOM_COMPLETE
+#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x0
+#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x1
+#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x00000001L
+#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xFFFFFFFEL
+//GDS_ATOM_BASE
+#define GDS_ATOM_BASE__BASE__SHIFT 0x0
+#define GDS_ATOM_BASE__UNUSED__SHIFT 0xc
+#define GDS_ATOM_BASE__BASE_MASK 0x00000FFFL
+#define GDS_ATOM_BASE__UNUSED_MASK 0xFFFFF000L
+//GDS_ATOM_SIZE
+#define GDS_ATOM_SIZE__SIZE__SHIFT 0x0
+#define GDS_ATOM_SIZE__UNUSED__SHIFT 0xd
+#define GDS_ATOM_SIZE__SIZE_MASK 0x00001FFFL
+#define GDS_ATOM_SIZE__UNUSED_MASK 0xFFFFE000L
+//GDS_ATOM_OFFSET0
+#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x0
+#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_OFFSET1
+#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x0
+#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_DST
+#define GDS_ATOM_DST__DST__SHIFT 0x0
+#define GDS_ATOM_DST__DST_MASK 0xFFFFFFFFL
+//GDS_ATOM_OP
+#define GDS_ATOM_OP__OP__SHIFT 0x0
+#define GDS_ATOM_OP__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OP__OP_MASK 0x000000FFL
+#define GDS_ATOM_OP__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_SRC0
+#define GDS_ATOM_SRC0__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC0_U
+#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1
+#define GDS_ATOM_SRC1__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1_U
+#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0
+#define GDS_ATOM_READ0__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0_U
+#define GDS_ATOM_READ0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1
+#define GDS_ATOM_READ1__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1_U
+#define GDS_ATOM_READ1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_GWS_RESOURCE_CNTL
+#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x6
+#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x0000003FL
+#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xFFFFFFC0L
+//GDS_GWS_RESOURCE
+#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x0
+#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x1
+#define GDS_GWS_RESOURCE__TYPE__SHIFT 0xd
+#define GDS_GWS_RESOURCE__DED__SHIFT 0xe
+#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0xf
+#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x10
+#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x1d
+#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x1e
+#define GDS_GWS_RESOURCE__HALTED__SHIFT 0x1f
+#define GDS_GWS_RESOURCE__FLAG_MASK 0x00000001L
+#define GDS_GWS_RESOURCE__COUNTER_MASK 0x00001FFEL
+#define GDS_GWS_RESOURCE__TYPE_MASK 0x00002000L
+#define GDS_GWS_RESOURCE__DED_MASK 0x00004000L
+#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x00008000L
+#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0x1FFF0000L
+#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x20000000L
+#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x40000000L
+#define GDS_GWS_RESOURCE__HALTED_MASK 0x80000000L
+//GDS_GWS_RESOURCE_CNT
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x10
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0x0000FFFFL
+#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_CNTL
+#define GDS_OA_CNTL__INDEX__SHIFT 0x0
+#define GDS_OA_CNTL__UNUSED__SHIFT 0x4
+#define GDS_OA_CNTL__INDEX_MASK 0x0000000FL
+#define GDS_OA_CNTL__UNUSED_MASK 0xFFFFFFF0L
+//GDS_OA_COUNTER
+#define GDS_OA_COUNTER__SPACE_AVAILABLE__SHIFT 0x0
+#define GDS_OA_COUNTER__SPACE_AVAILABLE_MASK 0xFFFFFFFFL
+//GDS_OA_ADDRESS
+#define GDS_OA_ADDRESS__DS_ADDRESS__SHIFT 0x0
+#define GDS_OA_ADDRESS__CRAWLER_TYPE__SHIFT 0x10
+#define GDS_OA_ADDRESS__CRAWLER__SHIFT 0x14
+#define GDS_OA_ADDRESS__UNUSED__SHIFT 0x18
+#define GDS_OA_ADDRESS__NO_ALLOC__SHIFT 0x1e
+#define GDS_OA_ADDRESS__ENABLE__SHIFT 0x1f
+#define GDS_OA_ADDRESS__DS_ADDRESS_MASK 0x0000FFFFL
+#define GDS_OA_ADDRESS__CRAWLER_TYPE_MASK 0x000F0000L
+#define GDS_OA_ADDRESS__CRAWLER_MASK 0x00F00000L
+#define GDS_OA_ADDRESS__UNUSED_MASK 0x3F000000L
+#define GDS_OA_ADDRESS__NO_ALLOC_MASK 0x40000000L
+#define GDS_OA_ADDRESS__ENABLE_MASK 0x80000000L
+//GDS_OA_INCDEC
+#define GDS_OA_INCDEC__VALUE__SHIFT 0x0
+#define GDS_OA_INCDEC__INCDEC__SHIFT 0x1f
+#define GDS_OA_INCDEC__VALUE_MASK 0x7FFFFFFFL
+#define GDS_OA_INCDEC__INCDEC_MASK 0x80000000L
+//GDS_OA_RING_SIZE
+#define GDS_OA_RING_SIZE__RING_SIZE__SHIFT 0x0
+#define GDS_OA_RING_SIZE__RING_SIZE_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_0
+#define GDS_STRMOUT_DWORDS_WRITTEN_0__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_0__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_1
+#define GDS_STRMOUT_DWORDS_WRITTEN_1__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_1__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_2
+#define GDS_STRMOUT_DWORDS_WRITTEN_2__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_2__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_3
+#define GDS_STRMOUT_DWORDS_WRITTEN_3__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_3__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_0
+#define GDS_GS_0__DATA__SHIFT 0x0
+#define GDS_GS_0__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_1
+#define GDS_GS_1__DATA__SHIFT 0x0
+#define GDS_GS_1__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_2
+#define GDS_GS_2__DATA__SHIFT 0x0
+#define GDS_GS_2__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_3
+#define GDS_GS_3__DATA__SHIFT 0x0
+#define GDS_GS_3__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_0_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_0_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_0_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_0_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_0_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_0_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_0_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_0_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_1_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_1_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_1_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_1_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_1_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_1_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_1_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_1_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_2_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_2_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_2_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_2_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_2_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_2_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_2_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_2_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_3_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_3_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_3_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_3_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_3_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_3_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_3_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_3_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_HI__DATA_MASK 0xFFFFFFFFL
+//SPI_CONFIG_CNTL
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x0
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x15
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x18
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x19
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA__SHIFT 0x1c
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA__SHIFT 0x1d
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL__SHIFT 0x1e
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x001FFFFFL
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0x00E00000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x01000000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x02000000L
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA_MASK 0x10000000L
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA_MASK 0x20000000L
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL_MASK 0xC0000000L
+//SPI_CONFIG_CNTL_1
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x0
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x4
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x5
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x7
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT_MODE__SHIFT 0x8
+#define SPI_CONFIG_CNTL_1__OREO_EXPALLOC_STALL__SHIFT 0x9
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0xa
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE__SHIFT 0xe
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE__SHIFT 0xf
+#define SPI_CONFIG_CNTL_1__MAX_VTX_SYNC_CNT__SHIFT 0x10
+#define SPI_CONFIG_CNTL_1__EN_USER_ACCUM__SHIFT 0x15
+#define SPI_CONFIG_CNTL_1__SA_SCREEN_MAP__SHIFT 0x16
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT__SHIFT 0x17
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x00000010L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x00000060L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x00000080L
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT_MODE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_1__OREO_EXPALLOC_STALL_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x00003C00L
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE_MASK 0x00004000L
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE_MASK 0x00008000L
+#define SPI_CONFIG_CNTL_1__MAX_VTX_SYNC_CNT_MASK 0x001F0000L
+#define SPI_CONFIG_CNTL_1__EN_USER_ACCUM_MASK 0x00200000L
+#define SPI_CONFIG_CNTL_1__SA_SCREEN_MAP_MASK 0x00400000L
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT_MASK 0xFF800000L
+//SPI_CONFIG_CNTL_2
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD__SHIFT 0x0
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD__SHIFT 0x4
+#define SPI_CONFIG_CNTL_2__PWS_CSG_WAIT_DISABLE__SHIFT 0x8
+#define SPI_CONFIG_CNTL_2__PWS_HS_WAIT_DISABLE__SHIFT 0x9
+#define SPI_CONFIG_CNTL_2__PWS_GS_WAIT_DISABLE__SHIFT 0xa
+#define SPI_CONFIG_CNTL_2__PWS_PS_WAIT_DISABLE__SHIFT 0xb
+#define SPI_CONFIG_CNTL_2__CSC_HALT_ACK_DELAY__SHIFT 0xc
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD_MASK 0x000000F0L
+#define SPI_CONFIG_CNTL_2__PWS_CSG_WAIT_DISABLE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_2__PWS_HS_WAIT_DISABLE_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_2__PWS_GS_WAIT_DISABLE_MASK 0x00000400L
+#define SPI_CONFIG_CNTL_2__PWS_PS_WAIT_DISABLE_MASK 0x00000800L
+#define SPI_CONFIG_CNTL_2__CSC_HALT_ACK_DELAY_MASK 0x0001F000L
+//SPI_WAVE_LIMIT_CNTL
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN__SHIFT 0x0
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN__SHIFT 0x4
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN__SHIFT 0x6
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN_MASK 0x00000003L
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN_MASK 0x00000030L
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN_MASK 0x000000C0L
+//SPI_GS_THROTTLE_CNTL1
+#define SPI_GS_THROTTLE_CNTL1__PH_POLL_INTERVAL__SHIFT 0x0
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_BASE__SHIFT 0x4
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_STEP_SIZE__SHIFT 0x8
+#define SPI_GS_THROTTLE_CNTL1__SPI_VGPR_THRESHOLD__SHIFT 0xc
+#define SPI_GS_THROTTLE_CNTL1__SPI_LDS_THRESHOLD__SHIFT 0x10
+#define SPI_GS_THROTTLE_CNTL1__SPI_POLL_INTERVAL__SHIFT 0x14
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_BASE__SHIFT 0x18
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_STEP_SIZE__SHIFT 0x1c
+#define SPI_GS_THROTTLE_CNTL1__PH_POLL_INTERVAL_MASK 0x0000000FL
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_BASE_MASK 0x000000F0L
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_STEP_SIZE_MASK 0x00000F00L
+#define SPI_GS_THROTTLE_CNTL1__SPI_VGPR_THRESHOLD_MASK 0x0000F000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_LDS_THRESHOLD_MASK 0x000F0000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_POLL_INTERVAL_MASK 0x00F00000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_BASE_MASK 0x0F000000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_STEP_SIZE_MASK 0xF0000000L
+//SPI_GS_THROTTLE_CNTL2
+#define SPI_GS_THROTTLE_CNTL2__SPI_THROTTLE_MODE__SHIFT 0x0
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD__SHIFT 0x2
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD_FACTOR__SHIFT 0x6
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY1__SHIFT 0x8
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY2__SHIFT 0xb
+#define SPI_GS_THROTTLE_CNTL2__PS_STALL_THRESHOLD__SHIFT 0xe
+#define SPI_GS_THROTTLE_CNTL2__PH_MODE__SHIFT 0x10
+#define SPI_GS_THROTTLE_CNTL2__RESERVED__SHIFT 0x11
+#define SPI_GS_THROTTLE_CNTL2__SPI_THROTTLE_MODE_MASK 0x00000003L
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD_MASK 0x0000003CL
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD_FACTOR_MASK 0x000000C0L
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY1_MASK 0x00000700L
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY2_MASK 0x00003800L
+#define SPI_GS_THROTTLE_CNTL2__PS_STALL_THRESHOLD_MASK 0x0000C000L
+#define SPI_GS_THROTTLE_CNTL2__PH_MODE_MASK 0x00010000L
+#define SPI_GS_THROTTLE_CNTL2__RESERVED_MASK 0xFFFE0000L
+//SPI_ATTRIBUTE_RING_BASE
+#define SPI_ATTRIBUTE_RING_BASE__BASE__SHIFT 0x0
+#define SPI_ATTRIBUTE_RING_BASE__BASE_MASK 0xFFFFFFFFL
+//SPI_ATTRIBUTE_RING_SIZE
+#define SPI_ATTRIBUTE_RING_SIZE__MEM_SIZE__SHIFT 0x0
+#define SPI_ATTRIBUTE_RING_SIZE__BIG_PAGE__SHIFT 0x10
+#define SPI_ATTRIBUTE_RING_SIZE__L1_POLICY__SHIFT 0x11
+#define SPI_ATTRIBUTE_RING_SIZE__L2_POLICY__SHIFT 0x13
+#define SPI_ATTRIBUTE_RING_SIZE__LLC_NOALLOC__SHIFT 0x15
+#define SPI_ATTRIBUTE_RING_SIZE__GL1_PERF_COUNTER_DISABLE__SHIFT 0x16
+#define SPI_ATTRIBUTE_RING_SIZE__MEM_SIZE_MASK 0x000000FFL
+#define SPI_ATTRIBUTE_RING_SIZE__BIG_PAGE_MASK 0x00010000L
+#define SPI_ATTRIBUTE_RING_SIZE__L1_POLICY_MASK 0x00060000L
+#define SPI_ATTRIBUTE_RING_SIZE__L2_POLICY_MASK 0x00180000L
+#define SPI_ATTRIBUTE_RING_SIZE__LLC_NOALLOC_MASK 0x00200000L
+#define SPI_ATTRIBUTE_RING_SIZE__GL1_PERF_COUNTER_DISABLE_MASK 0x00400000L
+
+
+// addressBlock: gc_cprs64dec
+//CP_MES_PRGRM_CNTR_START
+#define CP_MES_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MES_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_MES_INTR_ROUTINE_START
+#define CP_MES_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MES_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//CP_MES_MTVEC_LO
+#define CP_MES_MTVEC_LO__ADDR_LO__SHIFT 0x0
+#define CP_MES_MTVEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MES_INTR_ROUTINE_START_HI
+#define CP_MES_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define CP_MES_INTR_ROUTINE_START_HI__IR_START_MASK 0xFFFFFFFFL
+//CP_MES_MTVEC_HI
+#define CP_MES_MTVEC_HI__ADDR_LO__SHIFT 0x0
+#define CP_MES_MTVEC_HI__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MES_CNTL
+#define CP_MES_CNTL__MES_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_MES_CNTL__MES_PIPE0_RESET__SHIFT 0x10
+#define CP_MES_CNTL__MES_PIPE1_RESET__SHIFT 0x11
+#define CP_MES_CNTL__MES_PIPE2_RESET__SHIFT 0x12
+#define CP_MES_CNTL__MES_PIPE3_RESET__SHIFT 0x13
+#define CP_MES_CNTL__MES_PIPE0_ACTIVE__SHIFT 0x1a
+#define CP_MES_CNTL__MES_PIPE1_ACTIVE__SHIFT 0x1b
+#define CP_MES_CNTL__MES_PIPE2_ACTIVE__SHIFT 0x1c
+#define CP_MES_CNTL__MES_PIPE3_ACTIVE__SHIFT 0x1d
+#define CP_MES_CNTL__MES_HALT__SHIFT 0x1e
+#define CP_MES_CNTL__MES_STEP__SHIFT 0x1f
+#define CP_MES_CNTL__MES_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_MES_CNTL__MES_PIPE0_RESET_MASK 0x00010000L
+#define CP_MES_CNTL__MES_PIPE1_RESET_MASK 0x00020000L
+#define CP_MES_CNTL__MES_PIPE2_RESET_MASK 0x00040000L
+#define CP_MES_CNTL__MES_PIPE3_RESET_MASK 0x00080000L
+#define CP_MES_CNTL__MES_PIPE0_ACTIVE_MASK 0x04000000L
+#define CP_MES_CNTL__MES_PIPE1_ACTIVE_MASK 0x08000000L
+#define CP_MES_CNTL__MES_PIPE2_ACTIVE_MASK 0x10000000L
+#define CP_MES_CNTL__MES_PIPE3_ACTIVE_MASK 0x20000000L
+#define CP_MES_CNTL__MES_HALT_MASK 0x40000000L
+#define CP_MES_CNTL__MES_STEP_MASK 0x80000000L
+//CP_MES_PIPE_PRIORITY_CNTS
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_MES_PIPE0_PRIORITY
+#define CP_MES_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_PIPE1_PRIORITY
+#define CP_MES_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_PIPE2_PRIORITY
+#define CP_MES_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_PIPE3_PRIORITY
+#define CP_MES_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_HEADER_DUMP
+#define CP_MES_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MES_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_MES_MIE_LO
+#define CP_MES_MIE_LO__MES_INT__SHIFT 0x0
+#define CP_MES_MIE_LO__MES_INT_MASK 0xFFFFFFFFL
+//CP_MES_MIE_HI
+#define CP_MES_MIE_HI__MES_INT__SHIFT 0x0
+#define CP_MES_MIE_HI__MES_INT_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT
+#define CP_MES_INTERRUPT__MES_INT__SHIFT 0x0
+#define CP_MES_INTERRUPT__MES_INT_MASK 0xFFFFFFFFL
+//CP_MES_SCRATCH_INDEX
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//CP_MES_SCRATCH_DATA
+#define CP_MES_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_MES_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_MES_INSTR_PNTR
+#define CP_MES_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MES_INSTR_PNTR__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_MES_MSCRATCH_HI
+#define CP_MES_MSCRATCH_HI__DATA__SHIFT 0x0
+#define CP_MES_MSCRATCH_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_MSCRATCH_LO
+#define CP_MES_MSCRATCH_LO__DATA__SHIFT 0x0
+#define CP_MES_MSCRATCH_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_MSTATUS_LO
+#define CP_MES_MSTATUS_LO__STATUS_LO__SHIFT 0x0
+#define CP_MES_MSTATUS_LO__STATUS_LO_MASK 0xFFFFFFFFL
+//CP_MES_MSTATUS_HI
+#define CP_MES_MSTATUS_HI__STATUS_HI__SHIFT 0x0
+#define CP_MES_MSTATUS_HI__STATUS_HI_MASK 0xFFFFFFFFL
+//CP_MES_MEPC_LO
+#define CP_MES_MEPC_LO__MEPC_LO__SHIFT 0x0
+#define CP_MES_MEPC_LO__MEPC_LO_MASK 0xFFFFFFFFL
+//CP_MES_MEPC_HI
+#define CP_MES_MEPC_HI__MEPC_HI__SHIFT 0x0
+#define CP_MES_MEPC_HI__MEPC_HI_MASK 0xFFFFFFFFL
+//CP_MES_MCAUSE_LO
+#define CP_MES_MCAUSE_LO__CAUSE_LO__SHIFT 0x0
+#define CP_MES_MCAUSE_LO__CAUSE_LO_MASK 0xFFFFFFFFL
+//CP_MES_MCAUSE_HI
+#define CP_MES_MCAUSE_HI__CAUSE_HI__SHIFT 0x0
+#define CP_MES_MCAUSE_HI__CAUSE_HI_MASK 0xFFFFFFFFL
+//CP_MES_MBADADDR_LO
+#define CP_MES_MBADADDR_LO__ADDR_LO__SHIFT 0x0
+#define CP_MES_MBADADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MES_MBADADDR_HI
+#define CP_MES_MBADADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_MES_MBADADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//CP_MES_MIP_LO
+#define CP_MES_MIP_LO__MIP_LO__SHIFT 0x0
+#define CP_MES_MIP_LO__MIP_LO_MASK 0xFFFFFFFFL
+//CP_MES_MIP_HI
+#define CP_MES_MIP_HI__MIP_HI__SHIFT 0x0
+#define CP_MES_MIP_HI__MIP_HI_MASK 0xFFFFFFFFL
+//CP_MES_IC_OP_CNTL
+#define CP_MES_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_MES_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_MES_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_MES_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_MES_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_MES_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_MES_MCYCLE_LO
+#define CP_MES_MCYCLE_LO__CYCLE_LO__SHIFT 0x0
+#define CP_MES_MCYCLE_LO__CYCLE_LO_MASK 0xFFFFFFFFL
+//CP_MES_MCYCLE_HI
+#define CP_MES_MCYCLE_HI__CYCLE_HI__SHIFT 0x0
+#define CP_MES_MCYCLE_HI__CYCLE_HI_MASK 0xFFFFFFFFL
+//CP_MES_MTIME_LO
+#define CP_MES_MTIME_LO__TIME_LO__SHIFT 0x0
+#define CP_MES_MTIME_LO__TIME_LO_MASK 0xFFFFFFFFL
+//CP_MES_MTIME_HI
+#define CP_MES_MTIME_HI__TIME_HI__SHIFT 0x0
+#define CP_MES_MTIME_HI__TIME_HI_MASK 0xFFFFFFFFL
+//CP_MES_MINSTRET_LO
+#define CP_MES_MINSTRET_LO__INSTRET_LO__SHIFT 0x0
+#define CP_MES_MINSTRET_LO__INSTRET_LO_MASK 0xFFFFFFFFL
+//CP_MES_MINSTRET_HI
+#define CP_MES_MINSTRET_HI__INSTRET_HI__SHIFT 0x0
+#define CP_MES_MINSTRET_HI__INSTRET_HI_MASK 0xFFFFFFFFL
+//CP_MES_MISA_LO
+#define CP_MES_MISA_LO__MISA_LO__SHIFT 0x0
+#define CP_MES_MISA_LO__MISA_LO_MASK 0xFFFFFFFFL
+//CP_MES_MISA_HI
+#define CP_MES_MISA_HI__MISA_HI__SHIFT 0x0
+#define CP_MES_MISA_HI__MISA_HI_MASK 0xFFFFFFFFL
+//CP_MES_MVENDORID_LO
+#define CP_MES_MVENDORID_LO__MVENDORID_LO__SHIFT 0x0
+#define CP_MES_MVENDORID_LO__MVENDORID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MVENDORID_HI
+#define CP_MES_MVENDORID_HI__MVENDORID_HI__SHIFT 0x0
+#define CP_MES_MVENDORID_HI__MVENDORID_HI_MASK 0xFFFFFFFFL
+//CP_MES_MARCHID_LO
+#define CP_MES_MARCHID_LO__MARCHID_LO__SHIFT 0x0
+#define CP_MES_MARCHID_LO__MARCHID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MARCHID_HI
+#define CP_MES_MARCHID_HI__MARCHID_HI__SHIFT 0x0
+#define CP_MES_MARCHID_HI__MARCHID_HI_MASK 0xFFFFFFFFL
+//CP_MES_MIMPID_LO
+#define CP_MES_MIMPID_LO__MIMPID_LO__SHIFT 0x0
+#define CP_MES_MIMPID_LO__MIMPID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MIMPID_HI
+#define CP_MES_MIMPID_HI__MIMPID_HI__SHIFT 0x0
+#define CP_MES_MIMPID_HI__MIMPID_HI_MASK 0xFFFFFFFFL
+//CP_MES_MHARTID_LO
+#define CP_MES_MHARTID_LO__MHARTID_LO__SHIFT 0x0
+#define CP_MES_MHARTID_LO__MHARTID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MHARTID_HI
+#define CP_MES_MHARTID_HI__MHARTID_HI__SHIFT 0x0
+#define CP_MES_MHARTID_HI__MHARTID_HI_MASK 0xFFFFFFFFL
+//CP_MES_DC_BASE_CNTL
+#define CP_MES_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_MES_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MES_DC_OP_CNTL
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define CP_MES_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define CP_MES_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+//CP_MES_MTIMECMP_LO
+#define CP_MES_MTIMECMP_LO__TIME_LO__SHIFT 0x0
+#define CP_MES_MTIMECMP_LO__TIME_LO_MASK 0xFFFFFFFFL
+//CP_MES_MTIMECMP_HI
+#define CP_MES_MTIMECMP_HI__TIME_HI__SHIFT 0x0
+#define CP_MES_MTIMECMP_HI__TIME_HI_MASK 0xFFFFFFFFL
+//CP_MES_PROCESS_QUANTUM_PIPE0
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_DURATION__SHIFT 0x0
+#define CP_MES_PROCESS_QUANTUM_PIPE0__TIMER_EXPIRED__SHIFT 0x1c
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_SCALE__SHIFT 0x1d
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_EN__SHIFT 0x1f
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_DURATION_MASK 0x0FFFFFFFL
+#define CP_MES_PROCESS_QUANTUM_PIPE0__TIMER_EXPIRED_MASK 0x10000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_SCALE_MASK 0x60000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_EN_MASK 0x80000000L
+//CP_MES_PROCESS_QUANTUM_PIPE1
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_DURATION__SHIFT 0x0
+#define CP_MES_PROCESS_QUANTUM_PIPE1__TIMER_EXPIRED__SHIFT 0x1c
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_SCALE__SHIFT 0x1d
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_EN__SHIFT 0x1f
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_DURATION_MASK 0x0FFFFFFFL
+#define CP_MES_PROCESS_QUANTUM_PIPE1__TIMER_EXPIRED_MASK 0x10000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_SCALE_MASK 0x60000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_EN_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL1
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL2
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL3
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL4
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL5
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL6
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_GP0_LO
+#define CP_MES_GP0_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MES_GP0_LO__DATA__SHIFT 0x1
+#define CP_MES_GP0_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MES_GP0_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MES_GP0_HI
+#define CP_MES_GP0_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MES_GP0_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MES_GP1_LO
+#define CP_MES_GP1_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MES_GP1_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP1_HI
+#define CP_MES_GP1_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MES_GP1_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP2_LO
+#define CP_MES_GP2_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MES_GP2_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP2_HI
+#define CP_MES_GP2_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MES_GP2_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP3_LO
+#define CP_MES_GP3_LO__DATA__SHIFT 0x0
+#define CP_MES_GP3_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP3_HI
+#define CP_MES_GP3_HI__DATA__SHIFT 0x0
+#define CP_MES_GP3_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP4_LO
+#define CP_MES_GP4_LO__DATA__SHIFT 0x0
+#define CP_MES_GP4_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP4_HI
+#define CP_MES_GP4_HI__DATA__SHIFT 0x0
+#define CP_MES_GP4_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP5_LO
+#define CP_MES_GP5_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MES_GP5_LO__DATA__SHIFT 0x1
+#define CP_MES_GP5_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MES_GP5_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MES_GP5_HI
+#define CP_MES_GP5_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MES_GP5_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MES_GP6_LO
+#define CP_MES_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MES_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP6_HI
+#define CP_MES_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MES_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP7_LO
+#define CP_MES_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MES_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP7_HI
+#define CP_MES_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MES_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP8_LO
+#define CP_MES_GP8_LO__DATA__SHIFT 0x0
+#define CP_MES_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP8_HI
+#define CP_MES_GP8_HI__DATA__SHIFT 0x0
+#define CP_MES_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP9_LO
+#define CP_MES_GP9_LO__DATA__SHIFT 0x0
+#define CP_MES_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP9_HI
+#define CP_MES_GP9_HI__DATA__SHIFT 0x0
+#define CP_MES_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_LOCAL_BASE0_LO
+#define CP_MES_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define CP_MES_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_BASE0_HI
+#define CP_MES_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define CP_MES_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_MASK0_LO
+#define CP_MES_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define CP_MES_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_MASK0_HI
+#define CP_MES_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define CP_MES_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_APERTURE
+#define CP_MES_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MES_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MES_LOCAL_INSTR_BASE_LO
+#define CP_MES_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MES_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_INSTR_BASE_HI
+#define CP_MES_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MES_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_INSTR_MASK_LO
+#define CP_MES_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define CP_MES_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_INSTR_MASK_HI
+#define CP_MES_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define CP_MES_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_INSTR_APERTURE
+#define CP_MES_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MES_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MES_LOCAL_SCRATCH_APERTURE
+#define CP_MES_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MES_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MES_LOCAL_SCRATCH_BASE_LO
+#define CP_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_SCRATCH_BASE_HI
+#define CP_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MES_PERFCOUNT_CNTL
+#define CP_MES_PERFCOUNT_CNTL__EVENT_SEL__SHIFT 0x0
+#define CP_MES_PERFCOUNT_CNTL__EVENT_SEL_MASK 0x0000001FL
+//CP_MES_PENDING_INTERRUPT
+#define CP_MES_PENDING_INTERRUPT__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_MES_PENDING_INTERRUPT__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_MES_PRGRM_CNTR_START_HI
+#define CP_MES_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_MES_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_MES_INTERRUPT_DATA_16
+#define CP_MES_INTERRUPT_DATA_16__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_16__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_17
+#define CP_MES_INTERRUPT_DATA_17__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_17__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_18
+#define CP_MES_INTERRUPT_DATA_18__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_18__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_19
+#define CP_MES_INTERRUPT_DATA_19__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_19__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_20
+#define CP_MES_INTERRUPT_DATA_20__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_20__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_21
+#define CP_MES_INTERRUPT_DATA_21__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_21__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_22
+#define CP_MES_INTERRUPT_DATA_22__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_22__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_23
+#define CP_MES_INTERRUPT_DATA_23__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_23__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_24
+#define CP_MES_INTERRUPT_DATA_24__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_24__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_25
+#define CP_MES_INTERRUPT_DATA_25__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_25__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_26
+#define CP_MES_INTERRUPT_DATA_26__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_26__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_27
+#define CP_MES_INTERRUPT_DATA_27__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_27__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_28
+#define CP_MES_INTERRUPT_DATA_28__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_28__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_29
+#define CP_MES_INTERRUPT_DATA_29__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_29__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_30
+#define CP_MES_INTERRUPT_DATA_30__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_30__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_31
+#define CP_MES_INTERRUPT_DATA_31__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_31__DATA_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE0_BASE
+#define CP_MES_DC_APERTURE0_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE0_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE0_MASK
+#define CP_MES_DC_APERTURE0_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE0_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE0_CNTL
+#define CP_MES_DC_APERTURE0_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE0_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE0_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE0_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE1_BASE
+#define CP_MES_DC_APERTURE1_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE1_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE1_MASK
+#define CP_MES_DC_APERTURE1_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE1_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE1_CNTL
+#define CP_MES_DC_APERTURE1_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE1_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE1_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE1_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE2_BASE
+#define CP_MES_DC_APERTURE2_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE2_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE2_MASK
+#define CP_MES_DC_APERTURE2_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE2_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE2_CNTL
+#define CP_MES_DC_APERTURE2_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE2_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE2_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE2_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE3_BASE
+#define CP_MES_DC_APERTURE3_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE3_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE3_MASK
+#define CP_MES_DC_APERTURE3_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE3_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE3_CNTL
+#define CP_MES_DC_APERTURE3_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE3_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE3_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE3_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE4_BASE
+#define CP_MES_DC_APERTURE4_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE4_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE4_MASK
+#define CP_MES_DC_APERTURE4_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE4_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE4_CNTL
+#define CP_MES_DC_APERTURE4_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE4_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE4_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE4_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE5_BASE
+#define CP_MES_DC_APERTURE5_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE5_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE5_MASK
+#define CP_MES_DC_APERTURE5_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE5_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE5_CNTL
+#define CP_MES_DC_APERTURE5_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE5_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE5_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE5_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE6_BASE
+#define CP_MES_DC_APERTURE6_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE6_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE6_MASK
+#define CP_MES_DC_APERTURE6_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE6_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE6_CNTL
+#define CP_MES_DC_APERTURE6_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE6_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE6_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE6_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE7_BASE
+#define CP_MES_DC_APERTURE7_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE7_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE7_MASK
+#define CP_MES_DC_APERTURE7_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE7_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE7_CNTL
+#define CP_MES_DC_APERTURE7_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE7_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE7_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE7_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE8_BASE
+#define CP_MES_DC_APERTURE8_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE8_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE8_MASK
+#define CP_MES_DC_APERTURE8_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE8_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE8_CNTL
+#define CP_MES_DC_APERTURE8_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE8_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE8_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE8_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE9_BASE
+#define CP_MES_DC_APERTURE9_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE9_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE9_MASK
+#define CP_MES_DC_APERTURE9_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE9_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE9_CNTL
+#define CP_MES_DC_APERTURE9_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE9_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE9_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE9_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE10_BASE
+#define CP_MES_DC_APERTURE10_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE10_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE10_MASK
+#define CP_MES_DC_APERTURE10_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE10_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE10_CNTL
+#define CP_MES_DC_APERTURE10_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE10_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE10_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE10_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE11_BASE
+#define CP_MES_DC_APERTURE11_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE11_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE11_MASK
+#define CP_MES_DC_APERTURE11_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE11_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE11_CNTL
+#define CP_MES_DC_APERTURE11_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE11_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE11_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE11_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE12_BASE
+#define CP_MES_DC_APERTURE12_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE12_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE12_MASK
+#define CP_MES_DC_APERTURE12_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE12_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE12_CNTL
+#define CP_MES_DC_APERTURE12_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE12_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE12_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE12_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE13_BASE
+#define CP_MES_DC_APERTURE13_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE13_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE13_MASK
+#define CP_MES_DC_APERTURE13_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE13_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE13_CNTL
+#define CP_MES_DC_APERTURE13_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE13_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE13_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE13_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE14_BASE
+#define CP_MES_DC_APERTURE14_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE14_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE14_MASK
+#define CP_MES_DC_APERTURE14_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE14_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE14_CNTL
+#define CP_MES_DC_APERTURE14_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE14_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE14_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE14_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE15_BASE
+#define CP_MES_DC_APERTURE15_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE15_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE15_MASK
+#define CP_MES_DC_APERTURE15_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE15_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE15_CNTL
+#define CP_MES_DC_APERTURE15_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE15_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE15_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE15_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_RS64_PRGRM_CNTR_START
+#define CP_MEC_RS64_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC_RS64_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_MEC_MTVEC_LO
+#define CP_MEC_MTVEC_LO__ADDR_LO__SHIFT 0x0
+#define CP_MEC_MTVEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MTVEC_HI
+#define CP_MEC_MTVEC_HI__ADDR_LO__SHIFT 0x0
+#define CP_MEC_MTVEC_HI__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_ISA_CNTL
+#define CP_MEC_ISA_CNTL__ISA_MODE__SHIFT 0x0
+#define CP_MEC_ISA_CNTL__ISA_MODE_MASK 0x00000001L
+//CP_MEC_RS64_CNTL
+#define CP_MEC_RS64_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_RESET__SHIFT 0x10
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_RESET__SHIFT 0x11
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_RESET__SHIFT 0x12
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_RESET__SHIFT 0x13
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_ACTIVE__SHIFT 0x1a
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_ACTIVE__SHIFT 0x1b
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_ACTIVE__SHIFT 0x1c
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_ACTIVE__SHIFT 0x1d
+#define CP_MEC_RS64_CNTL__MEC_HALT__SHIFT 0x1e
+#define CP_MEC_RS64_CNTL__MEC_STEP__SHIFT 0x1f
+#define CP_MEC_RS64_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_RESET_MASK 0x00010000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_RESET_MASK 0x00020000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_RESET_MASK 0x00040000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_RESET_MASK 0x00080000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_ACTIVE_MASK 0x04000000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_ACTIVE_MASK 0x08000000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_ACTIVE_MASK 0x10000000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_ACTIVE_MASK 0x20000000L
+#define CP_MEC_RS64_CNTL__MEC_HALT_MASK 0x40000000L
+#define CP_MEC_RS64_CNTL__MEC_STEP_MASK 0x80000000L
+//CP_MEC_MIE_LO
+#define CP_MEC_MIE_LO__MEC_INT__SHIFT 0x0
+#define CP_MEC_MIE_LO__MEC_INT_MASK 0xFFFFFFFFL
+//CP_MEC_MIE_HI
+#define CP_MEC_MIE_HI__MEC_INT__SHIFT 0x0
+#define CP_MEC_MIE_HI__MEC_INT_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT
+#define CP_MEC_RS64_INTERRUPT__MEC_INT__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT__MEC_INT_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INSTR_PNTR
+#define CP_MEC_RS64_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC_RS64_INSTR_PNTR__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_MEC_MIP_LO
+#define CP_MEC_MIP_LO__MIP_LO__SHIFT 0x0
+#define CP_MEC_MIP_LO__MIP_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MIP_HI
+#define CP_MEC_MIP_HI__MIP_HI__SHIFT 0x0
+#define CP_MEC_MIP_HI__MIP_HI_MASK 0xFFFFFFFFL
+//CP_MEC_DC_BASE_CNTL
+#define CP_MEC_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_MEC_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MEC_DC_OP_CNTL
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define CP_MEC_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define CP_MEC_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+//CP_MEC_MTIMECMP_LO
+#define CP_MEC_MTIMECMP_LO__TIME_LO__SHIFT 0x0
+#define CP_MEC_MTIMECMP_LO__TIME_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MTIMECMP_HI
+#define CP_MEC_MTIMECMP_HI__TIME_HI__SHIFT 0x0
+#define CP_MEC_MTIMECMP_HI__TIME_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP0_LO
+#define CP_MEC_GP0_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MEC_GP0_LO__DATA__SHIFT 0x1
+#define CP_MEC_GP0_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MEC_GP0_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MEC_GP0_HI
+#define CP_MEC_GP0_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MEC_GP0_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MEC_GP1_LO
+#define CP_MEC_GP1_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MEC_GP1_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP1_HI
+#define CP_MEC_GP1_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MEC_GP1_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP2_LO
+#define CP_MEC_GP2_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MEC_GP2_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP2_HI
+#define CP_MEC_GP2_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MEC_GP2_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP3_LO
+#define CP_MEC_GP3_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP3_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP3_HI
+#define CP_MEC_GP3_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP3_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP4_LO
+#define CP_MEC_GP4_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP4_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP4_HI
+#define CP_MEC_GP4_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP4_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP5_LO
+#define CP_MEC_GP5_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MEC_GP5_LO__DATA__SHIFT 0x1
+#define CP_MEC_GP5_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MEC_GP5_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MEC_GP5_HI
+#define CP_MEC_GP5_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MEC_GP5_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MEC_GP6_LO
+#define CP_MEC_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MEC_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP6_HI
+#define CP_MEC_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MEC_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP7_LO
+#define CP_MEC_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MEC_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP7_HI
+#define CP_MEC_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MEC_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP8_LO
+#define CP_MEC_GP8_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP8_HI
+#define CP_MEC_GP8_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP9_LO
+#define CP_MEC_GP9_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP9_HI
+#define CP_MEC_GP9_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_LOCAL_BASE0_LO
+#define CP_MEC_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_BASE0_HI
+#define CP_MEC_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_MASK0_LO
+#define CP_MEC_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_MASK0_HI
+#define CP_MEC_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_APERTURE
+#define CP_MEC_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MEC_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MEC_LOCAL_INSTR_BASE_LO
+#define CP_MEC_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_INSTR_BASE_HI
+#define CP_MEC_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_INSTR_MASK_LO
+#define CP_MEC_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_INSTR_MASK_HI
+#define CP_MEC_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_INSTR_APERTURE
+#define CP_MEC_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MEC_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MEC_LOCAL_SCRATCH_APERTURE
+#define CP_MEC_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MEC_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MEC_LOCAL_SCRATCH_BASE_LO
+#define CP_MEC_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_SCRATCH_BASE_HI
+#define CP_MEC_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_RS64_PERFCOUNT_CNTL
+#define CP_MEC_RS64_PERFCOUNT_CNTL__EVENT_SEL__SHIFT 0x0
+#define CP_MEC_RS64_PERFCOUNT_CNTL__EVENT_SEL_MASK 0x0000001FL
+//CP_MEC_RS64_PENDING_INTERRUPT
+#define CP_MEC_RS64_PENDING_INTERRUPT__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_MEC_RS64_PENDING_INTERRUPT__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_PRGRM_CNTR_START_HI
+#define CP_MEC_RS64_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_MEC_RS64_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_16
+#define CP_MEC_RS64_INTERRUPT_DATA_16__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_16__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_17
+#define CP_MEC_RS64_INTERRUPT_DATA_17__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_17__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_18
+#define CP_MEC_RS64_INTERRUPT_DATA_18__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_18__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_19
+#define CP_MEC_RS64_INTERRUPT_DATA_19__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_19__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_20
+#define CP_MEC_RS64_INTERRUPT_DATA_20__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_20__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_21
+#define CP_MEC_RS64_INTERRUPT_DATA_21__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_21__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_22
+#define CP_MEC_RS64_INTERRUPT_DATA_22__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_22__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_23
+#define CP_MEC_RS64_INTERRUPT_DATA_23__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_23__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_24
+#define CP_MEC_RS64_INTERRUPT_DATA_24__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_24__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_25
+#define CP_MEC_RS64_INTERRUPT_DATA_25__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_25__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_26
+#define CP_MEC_RS64_INTERRUPT_DATA_26__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_26__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_27
+#define CP_MEC_RS64_INTERRUPT_DATA_27__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_27__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_28
+#define CP_MEC_RS64_INTERRUPT_DATA_28__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_28__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_29
+#define CP_MEC_RS64_INTERRUPT_DATA_29__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_29__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_30
+#define CP_MEC_RS64_INTERRUPT_DATA_30__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_30__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_31
+#define CP_MEC_RS64_INTERRUPT_DATA_31__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_31__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE0_BASE
+#define CP_MEC_DC_APERTURE0_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE0_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE0_MASK
+#define CP_MEC_DC_APERTURE0_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE0_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE0_CNTL
+#define CP_MEC_DC_APERTURE0_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE0_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE0_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE0_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE1_BASE
+#define CP_MEC_DC_APERTURE1_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE1_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE1_MASK
+#define CP_MEC_DC_APERTURE1_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE1_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE1_CNTL
+#define CP_MEC_DC_APERTURE1_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE1_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE1_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE1_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE2_BASE
+#define CP_MEC_DC_APERTURE2_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE2_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE2_MASK
+#define CP_MEC_DC_APERTURE2_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE2_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE2_CNTL
+#define CP_MEC_DC_APERTURE2_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE2_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE2_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE2_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE3_BASE
+#define CP_MEC_DC_APERTURE3_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE3_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE3_MASK
+#define CP_MEC_DC_APERTURE3_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE3_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE3_CNTL
+#define CP_MEC_DC_APERTURE3_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE3_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE3_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE3_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE4_BASE
+#define CP_MEC_DC_APERTURE4_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE4_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE4_MASK
+#define CP_MEC_DC_APERTURE4_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE4_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE4_CNTL
+#define CP_MEC_DC_APERTURE4_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE4_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE4_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE4_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE5_BASE
+#define CP_MEC_DC_APERTURE5_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE5_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE5_MASK
+#define CP_MEC_DC_APERTURE5_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE5_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE5_CNTL
+#define CP_MEC_DC_APERTURE5_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE5_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE5_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE5_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE6_BASE
+#define CP_MEC_DC_APERTURE6_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE6_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE6_MASK
+#define CP_MEC_DC_APERTURE6_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE6_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE6_CNTL
+#define CP_MEC_DC_APERTURE6_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE6_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE6_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE6_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE7_BASE
+#define CP_MEC_DC_APERTURE7_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE7_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE7_MASK
+#define CP_MEC_DC_APERTURE7_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE7_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE7_CNTL
+#define CP_MEC_DC_APERTURE7_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE7_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE7_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE7_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE8_BASE
+#define CP_MEC_DC_APERTURE8_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE8_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE8_MASK
+#define CP_MEC_DC_APERTURE8_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE8_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE8_CNTL
+#define CP_MEC_DC_APERTURE8_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE8_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE8_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE8_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE9_BASE
+#define CP_MEC_DC_APERTURE9_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE9_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE9_MASK
+#define CP_MEC_DC_APERTURE9_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE9_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE9_CNTL
+#define CP_MEC_DC_APERTURE9_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE9_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE9_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE9_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE10_BASE
+#define CP_MEC_DC_APERTURE10_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE10_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE10_MASK
+#define CP_MEC_DC_APERTURE10_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE10_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE10_CNTL
+#define CP_MEC_DC_APERTURE10_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE10_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE10_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE10_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE11_BASE
+#define CP_MEC_DC_APERTURE11_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE11_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE11_MASK
+#define CP_MEC_DC_APERTURE11_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE11_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE11_CNTL
+#define CP_MEC_DC_APERTURE11_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE11_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE11_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE11_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE12_BASE
+#define CP_MEC_DC_APERTURE12_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE12_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE12_MASK
+#define CP_MEC_DC_APERTURE12_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE12_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE12_CNTL
+#define CP_MEC_DC_APERTURE12_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE12_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE12_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE12_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE13_BASE
+#define CP_MEC_DC_APERTURE13_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE13_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE13_MASK
+#define CP_MEC_DC_APERTURE13_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE13_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE13_CNTL
+#define CP_MEC_DC_APERTURE13_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE13_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE13_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE13_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE14_BASE
+#define CP_MEC_DC_APERTURE14_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE14_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE14_MASK
+#define CP_MEC_DC_APERTURE14_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE14_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE14_CNTL
+#define CP_MEC_DC_APERTURE14_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE14_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE14_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE14_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE15_BASE
+#define CP_MEC_DC_APERTURE15_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE15_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE15_MASK
+#define CP_MEC_DC_APERTURE15_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE15_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE15_CNTL
+#define CP_MEC_DC_APERTURE15_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE15_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE15_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE15_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_CPC_IC_OP_CNTL
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE__SHIFT 0x1
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE_MASK 0x00000002L
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_GFX_CNTL
+#define CP_GFX_CNTL__ENGINE_SEL__SHIFT 0x0
+#define CP_GFX_CNTL__CONFIG__SHIFT 0x1
+#define CP_GFX_CNTL__ENGINE_SEL_MASK 0x00000001L
+#define CP_GFX_CNTL__CONFIG_MASK 0x00000006L
+//CP_GFX_RS64_INTERRUPT0
+#define CP_GFX_RS64_INTERRUPT0__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTERRUPT0__ME_INT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_INTR_EN0
+#define CP_GFX_RS64_INTR_EN0__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTR_EN0__ME_INT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_INTR_EN1
+#define CP_GFX_RS64_INTR_EN1__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTR_EN1__ME_INT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_BASE_CNTL
+#define CP_GFX_RS64_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_RS64_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_GFX_RS64_DC_OP_CNTL
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define CP_GFX_RS64_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define CP_GFX_RS64_DC_OP_CNTL__RESERVED__SHIFT 0x3
+#define CP_GFX_RS64_DC_OP_CNTL__PRIME_DCACHE__SHIFT 0x4
+#define CP_GFX_RS64_DC_OP_CNTL__DCACHE_PRIMED__SHIFT 0x5
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define CP_GFX_RS64_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+#define CP_GFX_RS64_DC_OP_CNTL__RESERVED_MASK 0x00000008L
+#define CP_GFX_RS64_DC_OP_CNTL__PRIME_DCACHE_MASK 0x00000010L
+#define CP_GFX_RS64_DC_OP_CNTL__DCACHE_PRIMED_MASK 0x00000020L
+//CP_GFX_RS64_LOCAL_BASE0_LO
+#define CP_GFX_RS64_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_BASE0_HI
+#define CP_GFX_RS64_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_MASK0_LO
+#define CP_GFX_RS64_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_MASK0_HI
+#define CP_GFX_RS64_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_APERTURE
+#define CP_GFX_RS64_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//CP_GFX_RS64_LOCAL_INSTR_BASE_LO
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_INSTR_BASE_HI
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_INSTR_MASK_LO
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_INSTR_MASK_HI
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_INSTR_APERTURE
+#define CP_GFX_RS64_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//CP_GFX_RS64_LOCAL_SCRATCH_APERTURE
+#define CP_GFX_RS64_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//CP_GFX_RS64_LOCAL_SCRATCH_BASE_LO
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_SCRATCH_BASE_HI
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_PERFCOUNT_CNTL0
+#define CP_GFX_RS64_PERFCOUNT_CNTL0__EVENT_SEL__SHIFT 0x0
+#define CP_GFX_RS64_PERFCOUNT_CNTL0__EVENT_SEL_MASK 0x0000001FL
+//CP_GFX_RS64_PERFCOUNT_CNTL1
+#define CP_GFX_RS64_PERFCOUNT_CNTL1__EVENT_SEL__SHIFT 0x0
+#define CP_GFX_RS64_PERFCOUNT_CNTL1__EVENT_SEL_MASK 0x0000001FL
+//CP_GFX_RS64_MIP_LO0
+#define CP_GFX_RS64_MIP_LO0__MIP_LO__SHIFT 0x0
+#define CP_GFX_RS64_MIP_LO0__MIP_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIP_LO1
+#define CP_GFX_RS64_MIP_LO1__MIP_LO__SHIFT 0x0
+#define CP_GFX_RS64_MIP_LO1__MIP_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIP_HI0
+#define CP_GFX_RS64_MIP_HI0__MIP_HI__SHIFT 0x0
+#define CP_GFX_RS64_MIP_HI0__MIP_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIP_HI1
+#define CP_GFX_RS64_MIP_HI1__MIP_HI__SHIFT 0x0
+#define CP_GFX_RS64_MIP_HI1__MIP_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_LO0
+#define CP_GFX_RS64_MTIMECMP_LO0__TIME_LO__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_LO0__TIME_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_LO1
+#define CP_GFX_RS64_MTIMECMP_LO1__TIME_LO__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_LO1__TIME_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_HI0
+#define CP_GFX_RS64_MTIMECMP_HI0__TIME_HI__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_HI0__TIME_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_HI1
+#define CP_GFX_RS64_MTIMECMP_HI1__TIME_HI__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_HI1__TIME_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP0_LO0
+#define CP_GFX_RS64_GP0_LO0__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP0_LO0__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP0_LO0__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP0_LO0__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP0_LO1
+#define CP_GFX_RS64_GP0_LO1__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP0_LO1__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP0_LO1__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP0_LO1__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP0_HI0
+#define CP_GFX_RS64_GP0_HI0__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP0_HI0__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP0_HI1
+#define CP_GFX_RS64_GP0_HI1__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP0_HI1__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_LO0
+#define CP_GFX_RS64_GP1_LO0__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP1_LO0__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_LO1
+#define CP_GFX_RS64_GP1_LO1__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP1_LO1__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_HI0
+#define CP_GFX_RS64_GP1_HI0__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP1_HI0__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_HI1
+#define CP_GFX_RS64_GP1_HI1__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP1_HI1__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_LO0
+#define CP_GFX_RS64_GP2_LO0__STACK_PNTR_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP2_LO0__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_LO1
+#define CP_GFX_RS64_GP2_LO1__STACK_PNTR_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP2_LO1__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_HI0
+#define CP_GFX_RS64_GP2_HI0__STACK_PNTR_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP2_HI0__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_HI1
+#define CP_GFX_RS64_GP2_HI1__STACK_PNTR_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP2_HI1__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_LO0
+#define CP_GFX_RS64_GP3_LO0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_LO0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_LO1
+#define CP_GFX_RS64_GP3_LO1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_LO1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_HI0
+#define CP_GFX_RS64_GP3_HI0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_HI0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_HI1
+#define CP_GFX_RS64_GP3_HI1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_HI1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_LO0
+#define CP_GFX_RS64_GP4_LO0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_LO0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_LO1
+#define CP_GFX_RS64_GP4_LO1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_LO1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_HI0
+#define CP_GFX_RS64_GP4_HI0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_HI0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_HI1
+#define CP_GFX_RS64_GP4_HI1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_HI1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP5_LO0
+#define CP_GFX_RS64_GP5_LO0__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP5_LO0__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP5_LO0__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP5_LO0__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP5_LO1
+#define CP_GFX_RS64_GP5_LO1__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP5_LO1__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP5_LO1__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP5_LO1__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP5_HI0
+#define CP_GFX_RS64_GP5_HI0__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP5_HI0__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP5_HI1
+#define CP_GFX_RS64_GP5_HI1__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP5_HI1__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP6_LO
+#define CP_GFX_RS64_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP6_HI
+#define CP_GFX_RS64_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP7_LO
+#define CP_GFX_RS64_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP7_HI
+#define CP_GFX_RS64_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP8_LO
+#define CP_GFX_RS64_GP8_LO__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP8_HI
+#define CP_GFX_RS64_GP8_HI__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP9_LO
+#define CP_GFX_RS64_GP9_LO__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP9_HI
+#define CP_GFX_RS64_GP9_HI__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_INSTR_PNTR0
+#define CP_GFX_RS64_INSTR_PNTR0__INSTR_PNTR__SHIFT 0x0
+#define CP_GFX_RS64_INSTR_PNTR0__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_GFX_RS64_INSTR_PNTR1
+#define CP_GFX_RS64_INSTR_PNTR1__INSTR_PNTR__SHIFT 0x0
+#define CP_GFX_RS64_INSTR_PNTR1__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_GFX_RS64_PENDING_INTERRUPT0
+#define CP_GFX_RS64_PENDING_INTERRUPT0__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_GFX_RS64_PENDING_INTERRUPT0__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_PENDING_INTERRUPT1
+#define CP_GFX_RS64_PENDING_INTERRUPT1__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_GFX_RS64_PENDING_INTERRUPT1__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_BASE0
+#define CP_GFX_RS64_DC_APERTURE0_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_MASK0
+#define CP_GFX_RS64_DC_APERTURE0_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_CNTL0
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE1_BASE0
+#define CP_GFX_RS64_DC_APERTURE1_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_MASK0
+#define CP_GFX_RS64_DC_APERTURE1_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_CNTL0
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE2_BASE0
+#define CP_GFX_RS64_DC_APERTURE2_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_MASK0
+#define CP_GFX_RS64_DC_APERTURE2_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_CNTL0
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE3_BASE0
+#define CP_GFX_RS64_DC_APERTURE3_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_MASK0
+#define CP_GFX_RS64_DC_APERTURE3_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_CNTL0
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE4_BASE0
+#define CP_GFX_RS64_DC_APERTURE4_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_MASK0
+#define CP_GFX_RS64_DC_APERTURE4_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_CNTL0
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE5_BASE0
+#define CP_GFX_RS64_DC_APERTURE5_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_MASK0
+#define CP_GFX_RS64_DC_APERTURE5_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_CNTL0
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE6_BASE0
+#define CP_GFX_RS64_DC_APERTURE6_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_MASK0
+#define CP_GFX_RS64_DC_APERTURE6_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_CNTL0
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE7_BASE0
+#define CP_GFX_RS64_DC_APERTURE7_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_MASK0
+#define CP_GFX_RS64_DC_APERTURE7_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_CNTL0
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE8_BASE0
+#define CP_GFX_RS64_DC_APERTURE8_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_MASK0
+#define CP_GFX_RS64_DC_APERTURE8_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_CNTL0
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE9_BASE0
+#define CP_GFX_RS64_DC_APERTURE9_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_MASK0
+#define CP_GFX_RS64_DC_APERTURE9_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_CNTL0
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE10_BASE0
+#define CP_GFX_RS64_DC_APERTURE10_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_MASK0
+#define CP_GFX_RS64_DC_APERTURE10_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_CNTL0
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE11_BASE0
+#define CP_GFX_RS64_DC_APERTURE11_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_MASK0
+#define CP_GFX_RS64_DC_APERTURE11_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_CNTL0
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE12_BASE0
+#define CP_GFX_RS64_DC_APERTURE12_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_MASK0
+#define CP_GFX_RS64_DC_APERTURE12_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_CNTL0
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE13_BASE0
+#define CP_GFX_RS64_DC_APERTURE13_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_MASK0
+#define CP_GFX_RS64_DC_APERTURE13_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_CNTL0
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE14_BASE0
+#define CP_GFX_RS64_DC_APERTURE14_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_MASK0
+#define CP_GFX_RS64_DC_APERTURE14_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_CNTL0
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE15_BASE0
+#define CP_GFX_RS64_DC_APERTURE15_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_MASK0
+#define CP_GFX_RS64_DC_APERTURE15_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_CNTL0
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE0_BASE1
+#define CP_GFX_RS64_DC_APERTURE0_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_MASK1
+#define CP_GFX_RS64_DC_APERTURE0_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_CNTL1
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE1_BASE1
+#define CP_GFX_RS64_DC_APERTURE1_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_MASK1
+#define CP_GFX_RS64_DC_APERTURE1_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_CNTL1
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE2_BASE1
+#define CP_GFX_RS64_DC_APERTURE2_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_MASK1
+#define CP_GFX_RS64_DC_APERTURE2_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_CNTL1
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE3_BASE1
+#define CP_GFX_RS64_DC_APERTURE3_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_MASK1
+#define CP_GFX_RS64_DC_APERTURE3_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_CNTL1
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE4_BASE1
+#define CP_GFX_RS64_DC_APERTURE4_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_MASK1
+#define CP_GFX_RS64_DC_APERTURE4_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_CNTL1
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE5_BASE1
+#define CP_GFX_RS64_DC_APERTURE5_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_MASK1
+#define CP_GFX_RS64_DC_APERTURE5_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_CNTL1
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE6_BASE1
+#define CP_GFX_RS64_DC_APERTURE6_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_MASK1
+#define CP_GFX_RS64_DC_APERTURE6_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_CNTL1
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE7_BASE1
+#define CP_GFX_RS64_DC_APERTURE7_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_MASK1
+#define CP_GFX_RS64_DC_APERTURE7_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_CNTL1
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE8_BASE1
+#define CP_GFX_RS64_DC_APERTURE8_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_MASK1
+#define CP_GFX_RS64_DC_APERTURE8_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_CNTL1
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE9_BASE1
+#define CP_GFX_RS64_DC_APERTURE9_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_MASK1
+#define CP_GFX_RS64_DC_APERTURE9_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_CNTL1
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE10_BASE1
+#define CP_GFX_RS64_DC_APERTURE10_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_MASK1
+#define CP_GFX_RS64_DC_APERTURE10_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_CNTL1
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE11_BASE1
+#define CP_GFX_RS64_DC_APERTURE11_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_MASK1
+#define CP_GFX_RS64_DC_APERTURE11_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_CNTL1
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE12_BASE1
+#define CP_GFX_RS64_DC_APERTURE12_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_MASK1
+#define CP_GFX_RS64_DC_APERTURE12_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_CNTL1
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE13_BASE1
+#define CP_GFX_RS64_DC_APERTURE13_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_MASK1
+#define CP_GFX_RS64_DC_APERTURE13_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_CNTL1
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE14_BASE1
+#define CP_GFX_RS64_DC_APERTURE14_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_MASK1
+#define CP_GFX_RS64_DC_APERTURE14_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_CNTL1
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE15_BASE1
+#define CP_GFX_RS64_DC_APERTURE15_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_MASK1
+#define CP_GFX_RS64_DC_APERTURE15_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_CNTL1
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_INTERRUPT1
+#define CP_GFX_RS64_INTERRUPT1__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTERRUPT1__ME_INT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gl1dec
+//GL1_ARB_CTRL
+#define GL1_ARB_CTRL__NUM_MEM_PIPES__SHIFT 0x0
+#define GL1_ARB_CTRL__FGCG_DISABLE__SHIFT 0x2
+#define GL1_ARB_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0x3
+#define GL1_ARB_CTRL__CHICKEN_BITS__SHIFT 0x4
+#define GL1_ARB_CTRL__NUM_MEM_PIPES_MASK 0x00000003L
+#define GL1_ARB_CTRL__FGCG_DISABLE_MASK 0x00000004L
+#define GL1_ARB_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x00000008L
+#define GL1_ARB_CTRL__CHICKEN_BITS_MASK 0x00000FF0L
+//GL1_DRAM_BURST_MASK
+#define GL1_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK__SHIFT 0x0
+#define GL1_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK_MASK 0x000000FFL
+//GL1_ARB_STATUS
+#define GL1_ARB_STATUS__REQ_ARB_BUSY__SHIFT 0x0
+#define GL1_ARB_STATUS__RET_ARB_BUSY__SHIFT 0x1
+#define GL1_ARB_STATUS__REQ_ARB_BUSY_MASK 0x00000001L
+#define GL1_ARB_STATUS__RET_ARB_BUSY_MASK 0x00000002L
+//GL1_DRAM_BURST_CTRL
+#define GL1_DRAM_BURST_CTRL__MAX_DRAM_BURST__SHIFT 0x0
+#define GL1_DRAM_BURST_CTRL__BURST_DISABLE__SHIFT 0x3
+#define GL1_DRAM_BURST_CTRL__GATHER_64B_BURST_DISABLE__SHIFT 0x4
+#define GL1_DRAM_BURST_CTRL__GATHER_32B_BURST_DISABLE__SHIFT 0x5
+#define GL1_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE__SHIFT 0x8
+#define GL1_DRAM_BURST_CTRL__MAX_DRAM_BURST_MASK 0x00000007L
+#define GL1_DRAM_BURST_CTRL__BURST_DISABLE_MASK 0x00000008L
+#define GL1_DRAM_BURST_CTRL__GATHER_64B_BURST_DISABLE_MASK 0x00000010L
+#define GL1_DRAM_BURST_CTRL__GATHER_32B_BURST_DISABLE_MASK 0x00000020L
+#define GL1_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE_MASK 0x00000100L
+//GL1I_GL1R_REP_FGCG_OVERRIDE
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IR_REP_FGCG_OVERRIDE__SHIFT 0x0
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IW_REP_FGCG_OVERRIDE__SHIFT 0x1
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_SRC_REP_FGCG_OVERRIDE__SHIFT 0x2
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_RET_REP_FGCG_OVERRIDE__SHIFT 0x3
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IR_REP_FGCG_OVERRIDE_MASK 0x00000001L
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IW_REP_FGCG_OVERRIDE_MASK 0x00000002L
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_SRC_REP_FGCG_OVERRIDE_MASK 0x00000004L
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_RET_REP_FGCG_OVERRIDE_MASK 0x00000008L
+//GL1C_CTRL
+#define GL1C_CTRL__FORCE_MISS__SHIFT 0x0
+#define GL1C_CTRL__FORCE_HIT__SHIFT 0x1
+#define GL1C_CTRL__NOFILL_32B__SHIFT 0x2
+#define GL1C_CTRL__NOFILL_64B__SHIFT 0x3
+#define GL1C_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x4
+#define GL1C_CTRL__ACK_QUEUE_DISABLE__SHIFT 0x8
+#define GL1C_CTRL__RMI_META_READ_MISS_QUEUE_DISABLE__SHIFT 0x9
+#define GL1C_CTRL__HIT_QUEUE_DISABLE__SHIFT 0xa
+#define GL1C_CTRL__GL2_REQ_CREDITS__SHIFT 0xb
+#define GL1C_CTRL__GL2_DATA_CREDITS__SHIFT 0x12
+#define GL1C_CTRL__TO_L1_REPEATER_FGCG_DISABLE__SHIFT 0x19
+#define GL1C_CTRL__TO_L2_REPEATER_FGCG_DISABLE__SHIFT 0x1a
+#define GL1C_CTRL__GCR_RSP_FGCG_DISABLE__SHIFT 0x1b
+#define GL1C_CTRL__DISABLE_HASH_TO_UPPER_16_SETS__SHIFT 0x1c
+#define GL1C_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT__SHIFT 0x1d
+#define GL1C_CTRL__DISABLE_PERF_SPLIT_EVICT_WRITE__SHIFT 0x1e
+#define GL1C_CTRL__FORCE_MISS_MASK 0x00000001L
+#define GL1C_CTRL__FORCE_HIT_MASK 0x00000002L
+#define GL1C_CTRL__NOFILL_32B_MASK 0x00000004L
+#define GL1C_CTRL__NOFILL_64B_MASK 0x00000008L
+#define GL1C_CTRL__LATENCY_FIFO_SIZE_MASK 0x000000F0L
+#define GL1C_CTRL__ACK_QUEUE_DISABLE_MASK 0x00000100L
+#define GL1C_CTRL__RMI_META_READ_MISS_QUEUE_DISABLE_MASK 0x00000200L
+#define GL1C_CTRL__HIT_QUEUE_DISABLE_MASK 0x00000400L
+#define GL1C_CTRL__GL2_REQ_CREDITS_MASK 0x0003F800L
+#define GL1C_CTRL__GL2_DATA_CREDITS_MASK 0x01FC0000L
+#define GL1C_CTRL__TO_L1_REPEATER_FGCG_DISABLE_MASK 0x02000000L
+#define GL1C_CTRL__TO_L2_REPEATER_FGCG_DISABLE_MASK 0x04000000L
+#define GL1C_CTRL__GCR_RSP_FGCG_DISABLE_MASK 0x08000000L
+#define GL1C_CTRL__DISABLE_HASH_TO_UPPER_16_SETS_MASK 0x10000000L
+#define GL1C_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT_MASK 0x20000000L
+#define GL1C_CTRL__DISABLE_PERF_SPLIT_EVICT_WRITE_MASK 0x40000000L
+//GL1C_STATUS
+#define GL1C_STATUS__INPUT_BUFFER_VC0_FIFO_FULL__SHIFT 0x0
+#define GL1C_STATUS__OUTPUT_FIFOS_BUSY__SHIFT 0x1
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_FULL__SHIFT 0x2
+#define GL1C_STATUS__GL2_REQ_VC0_STALL__SHIFT 0x3
+#define GL1C_STATUS__GL2_DATA_VC0_STALL__SHIFT 0x4
+#define GL1C_STATUS__GL2_REQ_VC1_STALL__SHIFT 0x5
+#define GL1C_STATUS__GL2_DATA_VC1_STALL__SHIFT 0x6
+#define GL1C_STATUS__INPUT_BUFFER_VC0_BUSY__SHIFT 0x7
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_BUSY__SHIFT 0x8
+#define GL1C_STATUS__GL2_RH_BUSY__SHIFT 0x9
+#define GL1C_STATUS__NUM_REQ_PENDING_FROM_L2__SHIFT 0xa
+#define GL1C_STATUS__LATENCY_FIFO_FULL_STALL__SHIFT 0x14
+#define GL1C_STATUS__TAG_STALL__SHIFT 0x15
+#define GL1C_STATUS__TAG_BUSY__SHIFT 0x16
+#define GL1C_STATUS__TAG_ACK_STALL__SHIFT 0x17
+#define GL1C_STATUS__TAG_GCR_INV_STALL__SHIFT 0x18
+#define GL1C_STATUS__TAG_NO_AVAILABLE_LINE_TO_EVICT_STALL__SHIFT 0x19
+#define GL1C_STATUS__TAG_EVICT__SHIFT 0x1a
+#define GL1C_STATUS__TAG_REQUEST_STATE_OPERATION__SHIFT 0x1b
+#define GL1C_STATUS__TRACKER_LAST_SET_MATCHES_CURRENT_SET__SHIFT 0x1f
+#define GL1C_STATUS__INPUT_BUFFER_VC0_FIFO_FULL_MASK 0x00000001L
+#define GL1C_STATUS__OUTPUT_FIFOS_BUSY_MASK 0x00000002L
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_FULL_MASK 0x00000004L
+#define GL1C_STATUS__GL2_REQ_VC0_STALL_MASK 0x00000008L
+#define GL1C_STATUS__GL2_DATA_VC0_STALL_MASK 0x00000010L
+#define GL1C_STATUS__GL2_REQ_VC1_STALL_MASK 0x00000020L
+#define GL1C_STATUS__GL2_DATA_VC1_STALL_MASK 0x00000040L
+#define GL1C_STATUS__INPUT_BUFFER_VC0_BUSY_MASK 0x00000080L
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_BUSY_MASK 0x00000100L
+#define GL1C_STATUS__GL2_RH_BUSY_MASK 0x00000200L
+#define GL1C_STATUS__NUM_REQ_PENDING_FROM_L2_MASK 0x000FFC00L
+#define GL1C_STATUS__LATENCY_FIFO_FULL_STALL_MASK 0x00100000L
+#define GL1C_STATUS__TAG_STALL_MASK 0x00200000L
+#define GL1C_STATUS__TAG_BUSY_MASK 0x00400000L
+#define GL1C_STATUS__TAG_ACK_STALL_MASK 0x00800000L
+#define GL1C_STATUS__TAG_GCR_INV_STALL_MASK 0x01000000L
+#define GL1C_STATUS__TAG_NO_AVAILABLE_LINE_TO_EVICT_STALL_MASK 0x02000000L
+#define GL1C_STATUS__TAG_EVICT_MASK 0x04000000L
+#define GL1C_STATUS__TAG_REQUEST_STATE_OPERATION_MASK 0x78000000L
+#define GL1C_STATUS__TRACKER_LAST_SET_MATCHES_CURRENT_SET_MASK 0x80000000L
+//GL1C_UTCL0_CNTL2
+#define GL1C_UTCL0_CNTL2__SPARE__SHIFT 0x0
+#define GL1C_UTCL0_CNTL2__COMP_SYNC_DISABLE__SHIFT 0x8
+#define GL1C_UTCL0_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define GL1C_UTCL0_CNTL2__ANY_LINE_VALID__SHIFT 0xa
+#define GL1C_UTCL0_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define GL1C_UTCL0_CNTL2__DISABLE_BURST__SHIFT 0x11
+#define GL1C_UTCL0_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define GL1C_UTCL0_CNTL2__FGCG_DISABLE__SHIFT 0x1e
+#define GL1C_UTCL0_CNTL2__BIG_PAGE_DISABLE__SHIFT 0x1f
+#define GL1C_UTCL0_CNTL2__SPARE_MASK 0x000000FFL
+#define GL1C_UTCL0_CNTL2__COMP_SYNC_DISABLE_MASK 0x00000100L
+#define GL1C_UTCL0_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define GL1C_UTCL0_CNTL2__ANY_LINE_VALID_MASK 0x00000400L
+#define GL1C_UTCL0_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define GL1C_UTCL0_CNTL2__DISABLE_BURST_MASK 0x00020000L
+#define GL1C_UTCL0_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define GL1C_UTCL0_CNTL2__FGCG_DISABLE_MASK 0x40000000L
+#define GL1C_UTCL0_CNTL2__BIG_PAGE_DISABLE_MASK 0x80000000L
+//GL1C_UTCL0_STATUS
+#define GL1C_UTCL0_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define GL1C_UTCL0_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define GL1C_UTCL0_STATUS__PRT_DETECTED__SHIFT 0x2
+#define GL1C_UTCL0_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define GL1C_UTCL0_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define GL1C_UTCL0_STATUS__PRT_DETECTED_MASK 0x00000004L
+//GL1C_UTCL0_RETRY
+#define GL1C_UTCL0_RETRY__INCR__SHIFT 0x0
+#define GL1C_UTCL0_RETRY__COUNT__SHIFT 0x8
+#define GL1C_UTCL0_RETRY__INCR_MASK 0x000000FFL
+#define GL1C_UTCL0_RETRY__COUNT_MASK 0x00000F00L
+//GL1C_CTRL2
+#define GL1C_CTRL2__UTCL0_INFLIGHT_MAX__SHIFT 0x0
+#define GL1C_CTRL2__UTCL0_SD_SIDEBAND_IF_DISABLE__SHIFT 0x8
+#define GL1C_CTRL2__REDUCE_REQ_PROTECTION_LINE_LEVEL__SHIFT 0x9
+#define GL1C_CTRL2__UTCL0_INFLIGHT_MAX_MASK 0x000000FFL
+#define GL1C_CTRL2__UTCL0_SD_SIDEBAND_IF_DISABLE_MASK 0x00000100L
+#define GL1C_CTRL2__REDUCE_REQ_PROTECTION_LINE_LEVEL_MASK 0x00003E00L
+
+
+// addressBlock: gc_chdec
+//CH_ARB_CTRL
+#define CH_ARB_CTRL__NUM_MEM_PIPES__SHIFT 0x0
+#define CH_ARB_CTRL__UC_IO_WR_PATH__SHIFT 0x2
+#define CH_ARB_CTRL__FGCG_DISABLE__SHIFT 0x3
+#define CH_ARB_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0x4
+#define CH_ARB_CTRL__CHICKEN_BITS__SHIFT 0x5
+#define CH_ARB_CTRL__NUM_MEM_PIPES_MASK 0x00000003L
+#define CH_ARB_CTRL__UC_IO_WR_PATH_MASK 0x00000004L
+#define CH_ARB_CTRL__FGCG_DISABLE_MASK 0x00000008L
+#define CH_ARB_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x00000010L
+#define CH_ARB_CTRL__CHICKEN_BITS_MASK 0x00001FE0L
+//CH_DRAM_BURST_MASK
+#define CH_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK__SHIFT 0x0
+#define CH_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK_MASK 0x000000FFL
+//CH_ARB_STATUS
+#define CH_ARB_STATUS__REQ_ARB_BUSY__SHIFT 0x0
+#define CH_ARB_STATUS__RET_ARB_BUSY__SHIFT 0x1
+#define CH_ARB_STATUS__REQ_ARB_BUSY_MASK 0x00000001L
+#define CH_ARB_STATUS__RET_ARB_BUSY_MASK 0x00000002L
+//CH_DRAM_BURST_CTRL
+#define CH_DRAM_BURST_CTRL__MAX_DRAM_BURST__SHIFT 0x0
+#define CH_DRAM_BURST_CTRL__BURST_DISABLE__SHIFT 0x3
+#define CH_DRAM_BURST_CTRL__GATHER_64B_MEMORY_BURST_DISABLE__SHIFT 0x4
+#define CH_DRAM_BURST_CTRL__GATHER_64B_IO_BURST_DISABLE__SHIFT 0x5
+#define CH_DRAM_BURST_CTRL__GATHER_32B_MEMORY_BURST_DISABLE__SHIFT 0x6
+#define CH_DRAM_BURST_CTRL__GATHER_32B_IO_BURST_DISABLE__SHIFT 0x7
+#define CH_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE__SHIFT 0x8
+#define CH_DRAM_BURST_CTRL__MAX_DRAM_BURST_MASK 0x00000007L
+#define CH_DRAM_BURST_CTRL__BURST_DISABLE_MASK 0x00000008L
+#define CH_DRAM_BURST_CTRL__GATHER_64B_MEMORY_BURST_DISABLE_MASK 0x00000010L
+#define CH_DRAM_BURST_CTRL__GATHER_64B_IO_BURST_DISABLE_MASK 0x00000020L
+#define CH_DRAM_BURST_CTRL__GATHER_32B_MEMORY_BURST_DISABLE_MASK 0x00000040L
+#define CH_DRAM_BURST_CTRL__GATHER_32B_IO_BURST_DISABLE_MASK 0x00000080L
+#define CH_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE_MASK 0x00000100L
+//CHA_CHC_CREDITS
+#define CHA_CHC_CREDITS__CHC_REQ_CREDITS__SHIFT 0x0
+#define CHA_CHC_CREDITS__CHCG_REQ_CREDITS__SHIFT 0x8
+#define CHA_CHC_CREDITS__CHC_REQ_CREDITS_MASK 0x000000FFL
+#define CHA_CHC_CREDITS__CHCG_REQ_CREDITS_MASK 0x0000FF00L
+//CHA_CLIENT_FREE_DELAY
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_0_FREE_DELAY__SHIFT 0x0
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_1_FREE_DELAY__SHIFT 0x3
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_2_FREE_DELAY__SHIFT 0x6
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_3_FREE_DELAY__SHIFT 0x9
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_4_FREE_DELAY__SHIFT 0xc
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_0_FREE_DELAY_MASK 0x00000007L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_1_FREE_DELAY_MASK 0x00000038L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_2_FREE_DELAY_MASK 0x000001C0L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_3_FREE_DELAY_MASK 0x00000E00L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_4_FREE_DELAY_MASK 0x00007000L
+//CHI_CHR_REP_FGCG_OVERRIDE
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIW_REP_FGCG_OVERRIDE__SHIFT 0x0
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIR_REP_FGCG_OVERRIDE__SHIFT 0x1
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_SRC_REP_FGCG_OVERRIDE__SHIFT 0x2
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_RET_REP_FGCG_OVERRIDE__SHIFT 0x3
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIW_REP_FGCG_OVERRIDE_MASK 0x00000001L
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIR_REP_FGCG_OVERRIDE_MASK 0x00000002L
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_SRC_REP_FGCG_OVERRIDE_MASK 0x00000004L
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_RET_REP_FGCG_OVERRIDE_MASK 0x00000008L
+//CH_VC5_ENABLE
+#define CH_VC5_ENABLE__UTCL2_VC5_ENABLE__SHIFT 0x1
+#define CH_VC5_ENABLE__UTCL2_VC5_ENABLE_MASK 0x00000002L
+//CHC_CTRL
+#define CHC_CTRL__BUFFER_DEPTH_MAX__SHIFT 0x0
+#define CHC_CTRL__GL2_REQ_CREDITS__SHIFT 0x4
+#define CHC_CTRL__GL2_DATA_CREDITS__SHIFT 0xb
+#define CHC_CTRL__TO_L1_REPEATER_FGCG_DISABLE__SHIFT 0x12
+#define CHC_CTRL__TO_L2_REPEATER_FGCG_DISABLE__SHIFT 0x13
+#define CHC_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT__SHIFT 0x1d
+#define CHC_CTRL__BUFFER_DEPTH_MAX_MASK 0x0000000FL
+#define CHC_CTRL__GL2_REQ_CREDITS_MASK 0x000007F0L
+#define CHC_CTRL__GL2_DATA_CREDITS_MASK 0x0003F800L
+#define CHC_CTRL__TO_L1_REPEATER_FGCG_DISABLE_MASK 0x00040000L
+#define CHC_CTRL__TO_L2_REPEATER_FGCG_DISABLE_MASK 0x00080000L
+#define CHC_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT_MASK 0x20000000L
+//CHC_STATUS
+#define CHC_STATUS__INPUT_BUFFER_VC0_FIFO_FULL__SHIFT 0x0
+#define CHC_STATUS__OUTPUT_FIFOS_BUSY__SHIFT 0x1
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_FULL__SHIFT 0x2
+#define CHC_STATUS__GL2_REQ_VC0_STALL__SHIFT 0x3
+#define CHC_STATUS__GL2_DATA_VC0_STALL__SHIFT 0x4
+#define CHC_STATUS__GL2_REQ_VC1_STALL__SHIFT 0x5
+#define CHC_STATUS__GL2_DATA_VC1_STALL__SHIFT 0x6
+#define CHC_STATUS__INPUT_BUFFER_VC0_BUSY__SHIFT 0x7
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_BUSY__SHIFT 0x8
+#define CHC_STATUS__GL2_RH_BUSY__SHIFT 0x9
+#define CHC_STATUS__NUM_REQ_PENDING_FROM_L2__SHIFT 0xa
+#define CHC_STATUS__VIRTUAL_FIFO_FULL_STALL__SHIFT 0x14
+#define CHC_STATUS__REQUEST_TRACKER_BUFFER_STALL__SHIFT 0x15
+#define CHC_STATUS__REQUEST_TRACKER_BUSY__SHIFT 0x16
+#define CHC_STATUS__BUFFER_FULL__SHIFT 0x17
+#define CHC_STATUS__INPUT_BUFFER_VC0_FIFO_FULL_MASK 0x00000001L
+#define CHC_STATUS__OUTPUT_FIFOS_BUSY_MASK 0x00000002L
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_FULL_MASK 0x00000004L
+#define CHC_STATUS__GL2_REQ_VC0_STALL_MASK 0x00000008L
+#define CHC_STATUS__GL2_DATA_VC0_STALL_MASK 0x00000010L
+#define CHC_STATUS__GL2_REQ_VC1_STALL_MASK 0x00000020L
+#define CHC_STATUS__GL2_DATA_VC1_STALL_MASK 0x00000040L
+#define CHC_STATUS__INPUT_BUFFER_VC0_BUSY_MASK 0x00000080L
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_BUSY_MASK 0x00000100L
+#define CHC_STATUS__GL2_RH_BUSY_MASK 0x00000200L
+#define CHC_STATUS__NUM_REQ_PENDING_FROM_L2_MASK 0x000FFC00L
+#define CHC_STATUS__VIRTUAL_FIFO_FULL_STALL_MASK 0x00100000L
+#define CHC_STATUS__REQUEST_TRACKER_BUFFER_STALL_MASK 0x00200000L
+#define CHC_STATUS__REQUEST_TRACKER_BUSY_MASK 0x00400000L
+#define CHC_STATUS__BUFFER_FULL_MASK 0x00800000L
+//CHCG_CTRL
+#define CHCG_CTRL__BUFFER_DEPTH_MAX__SHIFT 0x0
+#define CHCG_CTRL__VC0_BUFFER_DEPTH_MAX__SHIFT 0x4
+#define CHCG_CTRL__GL2_REQ_CREDITS__SHIFT 0x8
+#define CHCG_CTRL__GL2_DATA_CREDITS__SHIFT 0xf
+#define CHCG_CTRL__TO_L1_REPEATER_FGCG_DISABLE__SHIFT 0x16
+#define CHCG_CTRL__TO_L2_REPEATER_FGCG_DISABLE__SHIFT 0x17
+#define CHCG_CTRL__BUFFER_DEPTH_MAX_MASK 0x0000000FL
+#define CHCG_CTRL__VC0_BUFFER_DEPTH_MAX_MASK 0x000000F0L
+#define CHCG_CTRL__GL2_REQ_CREDITS_MASK 0x00007F00L
+#define CHCG_CTRL__GL2_DATA_CREDITS_MASK 0x003F8000L
+#define CHCG_CTRL__TO_L1_REPEATER_FGCG_DISABLE_MASK 0x00400000L
+#define CHCG_CTRL__TO_L2_REPEATER_FGCG_DISABLE_MASK 0x00800000L
+//CHCG_STATUS
+#define CHCG_STATUS__INPUT_BUFFER_VC0_FIFO_FULL__SHIFT 0x0
+#define CHCG_STATUS__OUTPUT_FIFOS_BUSY__SHIFT 0x1
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_FULL__SHIFT 0x2
+#define CHCG_STATUS__GL2_REQ_VC0_STALL__SHIFT 0x3
+#define CHCG_STATUS__GL2_DATA_VC0_STALL__SHIFT 0x4
+#define CHCG_STATUS__GL2_REQ_VC1_STALL__SHIFT 0x5
+#define CHCG_STATUS__GL2_DATA_VC1_STALL__SHIFT 0x6
+#define CHCG_STATUS__INPUT_BUFFER_VC0_BUSY__SHIFT 0x7
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_BUSY__SHIFT 0x8
+#define CHCG_STATUS__GL2_RH_BUSY__SHIFT 0x9
+#define CHCG_STATUS__NUM_REQ_PENDING_FROM_L2__SHIFT 0xa
+#define CHCG_STATUS__VIRTUAL_FIFO_FULL_STALL__SHIFT 0x14
+#define CHCG_STATUS__REQUEST_TRACKER_BUFFER_STALL__SHIFT 0x15
+#define CHCG_STATUS__REQUEST_TRACKER_BUSY__SHIFT 0x16
+#define CHCG_STATUS__BUFFER_FULL__SHIFT 0x17
+#define CHCG_STATUS__INPUT_BUFFER_VC1_BUSY__SHIFT 0x18
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_BUSY__SHIFT 0x19
+#define CHCG_STATUS__INPUT_BUFFER_VC1_FIFO_FULL__SHIFT 0x1a
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_FULL__SHIFT 0x1b
+#define CHCG_STATUS__INPUT_BUFFER_VC0_FIFO_FULL_MASK 0x00000001L
+#define CHCG_STATUS__OUTPUT_FIFOS_BUSY_MASK 0x00000002L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_FULL_MASK 0x00000004L
+#define CHCG_STATUS__GL2_REQ_VC0_STALL_MASK 0x00000008L
+#define CHCG_STATUS__GL2_DATA_VC0_STALL_MASK 0x00000010L
+#define CHCG_STATUS__GL2_REQ_VC1_STALL_MASK 0x00000020L
+#define CHCG_STATUS__GL2_DATA_VC1_STALL_MASK 0x00000040L
+#define CHCG_STATUS__INPUT_BUFFER_VC0_BUSY_MASK 0x00000080L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_BUSY_MASK 0x00000100L
+#define CHCG_STATUS__GL2_RH_BUSY_MASK 0x00000200L
+#define CHCG_STATUS__NUM_REQ_PENDING_FROM_L2_MASK 0x000FFC00L
+#define CHCG_STATUS__VIRTUAL_FIFO_FULL_STALL_MASK 0x00100000L
+#define CHCG_STATUS__REQUEST_TRACKER_BUFFER_STALL_MASK 0x00200000L
+#define CHCG_STATUS__REQUEST_TRACKER_BUSY_MASK 0x00400000L
+#define CHCG_STATUS__BUFFER_FULL_MASK 0x00800000L
+#define CHCG_STATUS__INPUT_BUFFER_VC1_BUSY_MASK 0x01000000L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_BUSY_MASK 0x02000000L
+#define CHCG_STATUS__INPUT_BUFFER_VC1_FIFO_FULL_MASK 0x04000000L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_FULL_MASK 0x08000000L
+
+
+// addressBlock: gc_gl2dec
+//GL2C_CTRL
+#define GL2C_CTRL__CACHE_SIZE__SHIFT 0x0
+#define GL2C_CTRL__RATE__SHIFT 0x2
+#define GL2C_CTRL__WRITEBACK_MARGIN__SHIFT 0x4
+#define GL2C_CTRL__METADATA_LATENCY_FIFO_SIZE__SHIFT 0x8
+#define GL2C_CTRL__SRC_FIFO_SIZE__SHIFT 0xc
+#define GL2C_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x10
+#define GL2C_CTRL__METADATA_TO_HI_PRIORITY__SHIFT 0x14
+#define GL2C_CTRL__LINEAR_SET_HASH__SHIFT 0x15
+#define GL2C_CTRL__FORCE_HIT_QUEUE_POP__SHIFT 0x16
+#define GL2C_CTRL__MDC_SIZE__SHIFT 0x18
+#define GL2C_CTRL__METADATA_TO_HIT_QUEUE__SHIFT 0x1a
+#define GL2C_CTRL__IGNORE_FULLY_WRITTEN__SHIFT 0x1b
+#define GL2C_CTRL__MDC_SIDEBAND_FIFO_SIZE__SHIFT 0x1c
+#define GL2C_CTRL__CACHE_SIZE_MASK 0x00000003L
+#define GL2C_CTRL__RATE_MASK 0x0000000CL
+#define GL2C_CTRL__WRITEBACK_MARGIN_MASK 0x000000F0L
+#define GL2C_CTRL__METADATA_LATENCY_FIFO_SIZE_MASK 0x00000F00L
+#define GL2C_CTRL__SRC_FIFO_SIZE_MASK 0x0000F000L
+#define GL2C_CTRL__LATENCY_FIFO_SIZE_MASK 0x000F0000L
+#define GL2C_CTRL__METADATA_TO_HI_PRIORITY_MASK 0x00100000L
+#define GL2C_CTRL__LINEAR_SET_HASH_MASK 0x00200000L
+#define GL2C_CTRL__FORCE_HIT_QUEUE_POP_MASK 0x00C00000L
+#define GL2C_CTRL__MDC_SIZE_MASK 0x03000000L
+#define GL2C_CTRL__METADATA_TO_HIT_QUEUE_MASK 0x04000000L
+#define GL2C_CTRL__IGNORE_FULLY_WRITTEN_MASK 0x08000000L
+#define GL2C_CTRL__MDC_SIDEBAND_FIFO_SIZE_MASK 0xF0000000L
+//GL2C_CTRL2
+#define GL2C_CTRL2__PROBE_FIFO_SIZE__SHIFT 0x0
+#define GL2C_CTRL2__ADDR_MATCH_DISABLE__SHIFT 0x4
+#define GL2C_CTRL2__FILL_SIZE_32__SHIFT 0x5
+#define GL2C_CTRL2__RB_TO_HI_PRIORITY__SHIFT 0x6
+#define GL2C_CTRL2__HIT_UNDER_MISS_DISABLE__SHIFT 0x7
+#define GL2C_CTRL2__RO_DISABLE__SHIFT 0x8
+#define GL2C_CTRL2__FORCE_MDC_INV__SHIFT 0x9
+#define GL2C_CTRL2__GCR_ARB_CTRL__SHIFT 0xa
+#define GL2C_CTRL2__GCR_ALL_SET__SHIFT 0xd
+#define GL2C_CTRL2__FILL_SIZE_64__SHIFT 0x11
+#define GL2C_CTRL2__USE_EA_EARLYWRRET_ON_WRITEBACK__SHIFT 0x12
+#define GL2C_CTRL2__WRITEBACK_ALL_WAIT_FOR_ALL_EA_WRITE_COMPLETE__SHIFT 0x13
+#define GL2C_CTRL2__METADATA_VOLATILE_EN__SHIFT 0x14
+#define GL2C_CTRL2__RB_VOLATILE_EN__SHIFT 0x15
+#define GL2C_CTRL2__PROBE_UNSHARED_EN__SHIFT 0x16
+#define GL2C_CTRL2__MAX_MIN_CTRL__SHIFT 0x17
+#define GL2C_CTRL2__MDC_UC_TO_C_RO_EN__SHIFT 0x1a
+#define GL2C_CTRL2__PROBE_FIFO_SIZE_MASK 0x0000000FL
+#define GL2C_CTRL2__ADDR_MATCH_DISABLE_MASK 0x00000010L
+#define GL2C_CTRL2__FILL_SIZE_32_MASK 0x00000020L
+#define GL2C_CTRL2__RB_TO_HI_PRIORITY_MASK 0x00000040L
+#define GL2C_CTRL2__HIT_UNDER_MISS_DISABLE_MASK 0x00000080L
+#define GL2C_CTRL2__RO_DISABLE_MASK 0x00000100L
+#define GL2C_CTRL2__FORCE_MDC_INV_MASK 0x00000200L
+#define GL2C_CTRL2__GCR_ARB_CTRL_MASK 0x00001C00L
+#define GL2C_CTRL2__GCR_ALL_SET_MASK 0x00002000L
+#define GL2C_CTRL2__FILL_SIZE_64_MASK 0x00020000L
+#define GL2C_CTRL2__USE_EA_EARLYWRRET_ON_WRITEBACK_MASK 0x00040000L
+#define GL2C_CTRL2__WRITEBACK_ALL_WAIT_FOR_ALL_EA_WRITE_COMPLETE_MASK 0x00080000L
+#define GL2C_CTRL2__METADATA_VOLATILE_EN_MASK 0x00100000L
+#define GL2C_CTRL2__RB_VOLATILE_EN_MASK 0x00200000L
+#define GL2C_CTRL2__PROBE_UNSHARED_EN_MASK 0x00400000L
+#define GL2C_CTRL2__MAX_MIN_CTRL_MASK 0x01800000L
+#define GL2C_CTRL2__MDC_UC_TO_C_RO_EN_MASK 0x04000000L
+//GL2C_STATUS
+#define GL2C_STATUS__NONCACHEABLE_FLOAT_ATOMIC__SHIFT 0x0
+#define GL2C_STATUS__NONCACHEABLE_U8_ATOMIC__SHIFT 0x4
+#define GL2C_STATUS__NONCACHEABLE_CLAMP_SUB_ATOMIC__SHIFT 0x5
+#define GL2C_STATUS__WRRET_NACK_FAULT__SHIFT 0x6
+#define GL2C_STATUS__RDRET_NACK_FAULT__SHIFT 0x7
+#define GL2C_STATUS__METADATA_FED__SHIFT 0x8
+#define GL2C_STATUS__FED_FSM_STATE__SHIFT 0x9
+#define GL2C_STATUS__SAFE_MODE_FED__SHIFT 0xb
+#define GL2C_STATUS__DCC_OUT_INVALID_KEY_ERROR_CODE__SHIFT 0x12
+#define GL2C_STATUS__NONCACHEABLE_FLOAT_ATOMIC_MASK 0x00000001L
+#define GL2C_STATUS__NONCACHEABLE_U8_ATOMIC_MASK 0x00000010L
+#define GL2C_STATUS__NONCACHEABLE_CLAMP_SUB_ATOMIC_MASK 0x00000020L
+#define GL2C_STATUS__WRRET_NACK_FAULT_MASK 0x00000040L
+#define GL2C_STATUS__RDRET_NACK_FAULT_MASK 0x00000080L
+#define GL2C_STATUS__METADATA_FED_MASK 0x00000100L
+#define GL2C_STATUS__FED_FSM_STATE_MASK 0x00000600L
+#define GL2C_STATUS__SAFE_MODE_FED_MASK 0x00000800L
+#define GL2C_STATUS__DCC_OUT_INVALID_KEY_ERROR_CODE_MASK 0x007C0000L
+//GL2C_ADDR_MATCH_MASK
+#define GL2C_ADDR_MATCH_MASK__ADDR_MASK__SHIFT 0x0
+#define GL2C_ADDR_MATCH_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
+//GL2C_ADDR_MATCH_SIZE
+#define GL2C_ADDR_MATCH_SIZE__MAX_COUNT__SHIFT 0x0
+#define GL2C_ADDR_MATCH_SIZE__MAX_COUNT_MASK 0x00000007L
+//GL2C_WBINVL2
+#define GL2C_WBINVL2__DONE__SHIFT 0x4
+#define GL2C_WBINVL2__DONE_MASK 0x00000010L
+//GL2C_SOFT_RESET
+#define GL2C_SOFT_RESET__HALT_FOR_RESET__SHIFT 0x0
+#define GL2C_SOFT_RESET__HALT_FOR_RESET_MASK 0x00000001L
+//GL2C_CM_CTRL0
+#define GL2C_CM_CTRL0__HASH_MASK__SHIFT 0x0
+#define GL2C_CM_CTRL0__HASH_MASK_MASK 0xFFFFFFFFL
+//GL2C_CM_CTRL1
+#define GL2C_CM_CTRL1__HASH_MASK__SHIFT 0x0
+#define GL2C_CM_CTRL1__BURST_TIMER__SHIFT 0x8
+#define GL2C_CM_CTRL1__RVF_SIZE__SHIFT 0x10
+#define GL2C_CM_CTRL1__WRITE_COH_MODE__SHIFT 0x17
+#define GL2C_CM_CTRL1__MDC_ARB_MODE__SHIFT 0x19
+#define GL2C_CM_CTRL1__READ_REQ_ONLY__SHIFT 0x1a
+#define GL2C_CM_CTRL1__COMP_TO_CONSTANT_EN__SHIFT 0x1b
+#define GL2C_CM_CTRL1__COMP_TO_SINGLE_EN__SHIFT 0x1c
+#define GL2C_CM_CTRL1__BURST_MODE__SHIFT 0x1d
+#define GL2C_CM_CTRL1__UNCOMP_READBACK_FILTER__SHIFT 0x1e
+#define GL2C_CM_CTRL1__WAIT_ATOMIC_RECOMP_WRITE__SHIFT 0x1f
+#define GL2C_CM_CTRL1__HASH_MASK_MASK 0x0000000FL
+#define GL2C_CM_CTRL1__BURST_TIMER_MASK 0x0000FF00L
+#define GL2C_CM_CTRL1__RVF_SIZE_MASK 0x000F0000L
+#define GL2C_CM_CTRL1__WRITE_COH_MODE_MASK 0x01800000L
+#define GL2C_CM_CTRL1__MDC_ARB_MODE_MASK 0x02000000L
+#define GL2C_CM_CTRL1__READ_REQ_ONLY_MASK 0x04000000L
+#define GL2C_CM_CTRL1__COMP_TO_CONSTANT_EN_MASK 0x08000000L
+#define GL2C_CM_CTRL1__COMP_TO_SINGLE_EN_MASK 0x10000000L
+#define GL2C_CM_CTRL1__BURST_MODE_MASK 0x20000000L
+#define GL2C_CM_CTRL1__UNCOMP_READBACK_FILTER_MASK 0x40000000L
+#define GL2C_CM_CTRL1__WAIT_ATOMIC_RECOMP_WRITE_MASK 0x80000000L
+//GL2C_CM_STALL
+#define GL2C_CM_STALL__QUEUE__SHIFT 0x0
+#define GL2C_CM_STALL__QUEUE_MASK 0xFFFFFFFFL
+//GL2C_CM_CTRL2
+#define GL2C_CM_CTRL2__READ_BURST_TIMER__SHIFT 0x0
+#define GL2C_CM_CTRL2__VRS_DISABLE__SHIFT 0x8
+#define GL2C_CM_CTRL2__SKIP_LOW_COMP_RATIO__SHIFT 0x9
+#define GL2C_CM_CTRL2__CM_NBC_IND64_DISABLE__SHIFT 0xa
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MODE__SHIFT 0xb
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_METADATA_WR_MODE__SHIFT 0xc
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MAX_UNCOMP_BLK_SZ_MODE__SHIFT 0xd
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_SECTOR_READBACK_MODE__SHIFT 0xf
+#define GL2C_CM_CTRL2__RECOMP_DISABLE__SHIFT 0x10
+#define GL2C_CM_CTRL2__DCC_COMP_KEY_ERROR_DETECTION_EN__SHIFT 0x11
+#define GL2C_CM_CTRL2__DCC_CLEAR_FRAG2DCC_KEY_ERROR_CODE__SHIFT 0x12
+#define GL2C_CM_CTRL2__READ_BURST_TIMER_MASK 0x000000FFL
+#define GL2C_CM_CTRL2__VRS_DISABLE_MASK 0x00000100L
+#define GL2C_CM_CTRL2__SKIP_LOW_COMP_RATIO_MASK 0x00000200L
+#define GL2C_CM_CTRL2__CM_NBC_IND64_DISABLE_MASK 0x00000400L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MODE_MASK 0x00000800L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_METADATA_WR_MODE_MASK 0x00001000L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MAX_UNCOMP_BLK_SZ_MODE_MASK 0x00006000L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_SECTOR_READBACK_MODE_MASK 0x00008000L
+#define GL2C_CM_CTRL2__RECOMP_DISABLE_MASK 0x00010000L
+#define GL2C_CM_CTRL2__DCC_COMP_KEY_ERROR_DETECTION_EN_MASK 0x00020000L
+#define GL2C_CM_CTRL2__DCC_CLEAR_FRAG2DCC_KEY_ERROR_CODE_MASK 0x00040000L
+//GL2C_CTRL3
+#define GL2C_CTRL3__METADATA_MTYPE_COHERENCY__SHIFT 0x0
+#define GL2C_CTRL3__METADATA_NOFILL__SHIFT 0x3
+#define GL2C_CTRL3__METADATA_NEXT_CL_PREFETCH__SHIFT 0x4
+#define GL2C_CTRL3__BANK_LINEAR_HASH_MODE__SHIFT 0x5
+#define GL2C_CTRL3__HTILE_TO_HI_PRIORITY__SHIFT 0x6
+#define GL2C_CTRL3__UNCACHED_WRITE_ATOMIC_TO_UC_WRITE__SHIFT 0x7
+#define GL2C_CTRL3__IO_CHANNEL_ENABLE__SHIFT 0x8
+#define GL2C_CTRL3__FMASK_TO_HI_PRIORITY__SHIFT 0x9
+#define GL2C_CTRL3__DCC_CMASK_TO_HI_PRIORITY__SHIFT 0xa
+#define GL2C_CTRL3__BANK_LINEAR_HASH_ENABLE__SHIFT 0xb
+#define GL2C_CTRL3__HASH_256B_ENABLE__SHIFT 0xc
+#define GL2C_CTRL3__DECOMP_NBC_IND64_DISABLE__SHIFT 0xd
+#define GL2C_CTRL3__FORCE_READ_ON_WRITE_OP__SHIFT 0xe
+#define GL2C_CTRL3__FGCG_OVERRIDE__SHIFT 0xf
+#define GL2C_CTRL3__FORCE_MTYPE_UC__SHIFT 0x10
+#define GL2C_CTRL3__DGPU_SHARED_MODE__SHIFT 0x11
+#define GL2C_CTRL3__WRITE_SET_SECTOR_FULLY_WRITTEN__SHIFT 0x12
+#define GL2C_CTRL3__EA_READ_SIZE_LIMIT__SHIFT 0x13
+#define GL2C_CTRL3__READ_BYPASS_AS_UC__SHIFT 0x14
+#define GL2C_CTRL3__WB_OPT_ENABLE__SHIFT 0x15
+#define GL2C_CTRL3__WB_OPT_BURST_MAX_COUNT__SHIFT 0x16
+#define GL2C_CTRL3__SET_GROUP_LINEAR_HASH_ENABLE__SHIFT 0x18
+#define GL2C_CTRL3__EA_GMI_DISABLE__SHIFT 0x19
+#define GL2C_CTRL3__SQC_TO_HI_PRIORITY__SHIFT 0x1a
+#define GL2C_CTRL3__INF_NAN_CLAMP__SHIFT 0x1b
+#define GL2C_CTRL3__SCRATCH__SHIFT 0x1c
+#define GL2C_CTRL3__METADATA_MTYPE_COHERENCY_MASK 0x00000003L
+#define GL2C_CTRL3__METADATA_NOFILL_MASK 0x00000008L
+#define GL2C_CTRL3__METADATA_NEXT_CL_PREFETCH_MASK 0x00000010L
+#define GL2C_CTRL3__BANK_LINEAR_HASH_MODE_MASK 0x00000020L
+#define GL2C_CTRL3__HTILE_TO_HI_PRIORITY_MASK 0x00000040L
+#define GL2C_CTRL3__UNCACHED_WRITE_ATOMIC_TO_UC_WRITE_MASK 0x00000080L
+#define GL2C_CTRL3__IO_CHANNEL_ENABLE_MASK 0x00000100L
+#define GL2C_CTRL3__FMASK_TO_HI_PRIORITY_MASK 0x00000200L
+#define GL2C_CTRL3__DCC_CMASK_TO_HI_PRIORITY_MASK 0x00000400L
+#define GL2C_CTRL3__BANK_LINEAR_HASH_ENABLE_MASK 0x00000800L
+#define GL2C_CTRL3__HASH_256B_ENABLE_MASK 0x00001000L
+#define GL2C_CTRL3__DECOMP_NBC_IND64_DISABLE_MASK 0x00002000L
+#define GL2C_CTRL3__FORCE_READ_ON_WRITE_OP_MASK 0x00004000L
+#define GL2C_CTRL3__FGCG_OVERRIDE_MASK 0x00008000L
+#define GL2C_CTRL3__FORCE_MTYPE_UC_MASK 0x00010000L
+#define GL2C_CTRL3__DGPU_SHARED_MODE_MASK 0x00020000L
+#define GL2C_CTRL3__WRITE_SET_SECTOR_FULLY_WRITTEN_MASK 0x00040000L
+#define GL2C_CTRL3__EA_READ_SIZE_LIMIT_MASK 0x00080000L
+#define GL2C_CTRL3__READ_BYPASS_AS_UC_MASK 0x00100000L
+#define GL2C_CTRL3__WB_OPT_ENABLE_MASK 0x00200000L
+#define GL2C_CTRL3__WB_OPT_BURST_MAX_COUNT_MASK 0x00C00000L
+#define GL2C_CTRL3__SET_GROUP_LINEAR_HASH_ENABLE_MASK 0x01000000L
+#define GL2C_CTRL3__EA_GMI_DISABLE_MASK 0x02000000L
+#define GL2C_CTRL3__SQC_TO_HI_PRIORITY_MASK 0x04000000L
+#define GL2C_CTRL3__INF_NAN_CLAMP_MASK 0x08000000L
+#define GL2C_CTRL3__SCRATCH_MASK 0xF0000000L
+//GL2C_LB_CTR_CTRL
+#define GL2C_LB_CTR_CTRL__START__SHIFT 0x0
+#define GL2C_LB_CTR_CTRL__LOAD__SHIFT 0x1
+#define GL2C_LB_CTR_CTRL__CLEAR__SHIFT 0x2
+#define GL2C_LB_CTR_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0x1f
+#define GL2C_LB_CTR_CTRL__START_MASK 0x00000001L
+#define GL2C_LB_CTR_CTRL__LOAD_MASK 0x00000002L
+#define GL2C_LB_CTR_CTRL__CLEAR_MASK 0x00000004L
+#define GL2C_LB_CTR_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x80000000L
+//GL2C_LB_DATA0
+#define GL2C_LB_DATA0__DATA__SHIFT 0x0
+#define GL2C_LB_DATA0__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_DATA1
+#define GL2C_LB_DATA1__DATA__SHIFT 0x0
+#define GL2C_LB_DATA1__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_DATA2
+#define GL2C_LB_DATA2__DATA__SHIFT 0x0
+#define GL2C_LB_DATA2__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_DATA3
+#define GL2C_LB_DATA3__DATA__SHIFT 0x0
+#define GL2C_LB_DATA3__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_CTR_SEL0
+#define GL2C_LB_CTR_SEL0__SEL0__SHIFT 0x0
+#define GL2C_LB_CTR_SEL0__DIV0__SHIFT 0xf
+#define GL2C_LB_CTR_SEL0__SEL1__SHIFT 0x10
+#define GL2C_LB_CTR_SEL0__DIV1__SHIFT 0x1f
+#define GL2C_LB_CTR_SEL0__SEL0_MASK 0x000000FFL
+#define GL2C_LB_CTR_SEL0__DIV0_MASK 0x00008000L
+#define GL2C_LB_CTR_SEL0__SEL1_MASK 0x00FF0000L
+#define GL2C_LB_CTR_SEL0__DIV1_MASK 0x80000000L
+//GL2C_LB_CTR_SEL1
+#define GL2C_LB_CTR_SEL1__SEL2__SHIFT 0x0
+#define GL2C_LB_CTR_SEL1__DIV2__SHIFT 0xf
+#define GL2C_LB_CTR_SEL1__SEL3__SHIFT 0x10
+#define GL2C_LB_CTR_SEL1__DIV3__SHIFT 0x1f
+#define GL2C_LB_CTR_SEL1__SEL2_MASK 0x000000FFL
+#define GL2C_LB_CTR_SEL1__DIV2_MASK 0x00008000L
+#define GL2C_LB_CTR_SEL1__SEL3_MASK 0x00FF0000L
+#define GL2C_LB_CTR_SEL1__DIV3_MASK 0x80000000L
+//GL2C_CTRL4
+#define GL2C_CTRL4__METADATA_WR_OP_CID__SHIFT 0x0
+#define GL2C_CTRL4__SPA_CHANNEL_ENABLE__SHIFT 0x1
+#define GL2C_CTRL4__SRC_FIFO_MDC_LOW_PRIORITY__SHIFT 0x2
+#define GL2C_CTRL4__WRITEBACK_FIFO_STALL_ENABLE__SHIFT 0x3
+#define GL2C_CTRL4__CM_MGCG_MODE__SHIFT 0x4
+#define GL2C_CTRL4__MDC_MGCG_MODE__SHIFT 0x5
+#define GL2C_CTRL4__TAG_MGCG_MODE__SHIFT 0x6
+#define GL2C_CTRL4__CORE_MGCG_MODE__SHIFT 0x7
+#define GL2C_CTRL4__EXECUTE_MGCG_MODE__SHIFT 0x8
+#define GL2C_CTRL4__EA_NACK_DISABLE__SHIFT 0x9
+#define GL2C_CTRL4__FED_SAFE_MODE__SHIFT 0xa
+#define GL2C_CTRL4__FLUSH_SET_COUNTER_MASK_DISABLE__SHIFT 0xb
+#define GL2C_CTRL4__NO_WRITE_ACK_TO_HIT_QUEUE__SHIFT 0x1a
+#define GL2C_CTRL4__METADATA_WR_OP_CID_MASK 0x00000001L
+#define GL2C_CTRL4__SPA_CHANNEL_ENABLE_MASK 0x00000002L
+#define GL2C_CTRL4__SRC_FIFO_MDC_LOW_PRIORITY_MASK 0x00000004L
+#define GL2C_CTRL4__WRITEBACK_FIFO_STALL_ENABLE_MASK 0x00000008L
+#define GL2C_CTRL4__CM_MGCG_MODE_MASK 0x00000010L
+#define GL2C_CTRL4__MDC_MGCG_MODE_MASK 0x00000020L
+#define GL2C_CTRL4__TAG_MGCG_MODE_MASK 0x00000040L
+#define GL2C_CTRL4__CORE_MGCG_MODE_MASK 0x00000080L
+#define GL2C_CTRL4__EXECUTE_MGCG_MODE_MASK 0x00000100L
+#define GL2C_CTRL4__EA_NACK_DISABLE_MASK 0x00000200L
+#define GL2C_CTRL4__FED_SAFE_MODE_MASK 0x00000400L
+#define GL2C_CTRL4__FLUSH_SET_COUNTER_MASK_DISABLE_MASK 0x00000800L
+#define GL2C_CTRL4__NO_WRITE_ACK_TO_HIT_QUEUE_MASK 0x04000000L
+//GL2C_DISCARD_STALL_CTRL
+#define GL2C_DISCARD_STALL_CTRL__LIMIT__SHIFT 0x0
+#define GL2C_DISCARD_STALL_CTRL__WINDOW__SHIFT 0xf
+#define GL2C_DISCARD_STALL_CTRL__DROP_NEXT__SHIFT 0x1e
+#define GL2C_DISCARD_STALL_CTRL__ENABLE__SHIFT 0x1f
+#define GL2C_DISCARD_STALL_CTRL__LIMIT_MASK 0x00007FFFL
+#define GL2C_DISCARD_STALL_CTRL__WINDOW_MASK 0x3FFF8000L
+#define GL2C_DISCARD_STALL_CTRL__DROP_NEXT_MASK 0x40000000L
+#define GL2C_DISCARD_STALL_CTRL__ENABLE_MASK 0x80000000L
+//GL2A_ADDR_MATCH_CTRL
+#define GL2A_ADDR_MATCH_CTRL__DISABLE__SHIFT 0x0
+#define GL2A_ADDR_MATCH_CTRL__DISABLE_MASK 0xFFFFFFFFL
+//GL2A_ADDR_MATCH_MASK
+#define GL2A_ADDR_MATCH_MASK__ADDR_MASK__SHIFT 0x0
+#define GL2A_ADDR_MATCH_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
+//GL2A_ADDR_MATCH_SIZE
+#define GL2A_ADDR_MATCH_SIZE__MAX_COUNT__SHIFT 0x0
+#define GL2A_ADDR_MATCH_SIZE__MAX_COUNT_MASK 0x00000007L
+//GL2A_PRIORITY_CTRL
+#define GL2A_PRIORITY_CTRL__DISABLE__SHIFT 0x0
+#define GL2A_PRIORITY_CTRL__DISABLE_MASK 0xFFFFFFFFL
+//GL2A_CTRL
+#define GL2A_CTRL__RTN_ARB_TIMER_RESET_VALUE__SHIFT 0x0
+#define GL2A_CTRL__STAY_ON_BURST__SHIFT 0x1
+#define GL2A_CTRL__FGCG_OVERRIDE__SHIFT 0x2
+#define GL2A_CTRL__CLIENT_ARB_PRIO_STAY__SHIFT 0x3
+#define GL2A_CTRL__GCRD_CREDIT_SAFE_REG__SHIFT 0x4
+#define GL2A_CTRL__REQ_CREDIT_SAFE_REG__SHIFT 0x8
+#define GL2A_CTRL__WRITE_COMBINE_TIMEOUT_COUNT__SHIFT 0xc
+#define GL2A_CTRL__INTERNAL_RETURN_BYPASS_ENABLE__SHIFT 0x11
+#define GL2A_CTRL__ADDR_REMOVE_COLBITS__SHIFT 0x12
+#define GL2A_CTRL__RTN_ARB_TIMER_RESET_VALUE_MASK 0x00000001L
+#define GL2A_CTRL__STAY_ON_BURST_MASK 0x00000002L
+#define GL2A_CTRL__FGCG_OVERRIDE_MASK 0x00000004L
+#define GL2A_CTRL__CLIENT_ARB_PRIO_STAY_MASK 0x00000008L
+#define GL2A_CTRL__GCRD_CREDIT_SAFE_REG_MASK 0x000000F0L
+#define GL2A_CTRL__REQ_CREDIT_SAFE_REG_MASK 0x00000F00L
+#define GL2A_CTRL__WRITE_COMBINE_TIMEOUT_COUNT_MASK 0x0001F000L
+#define GL2A_CTRL__INTERNAL_RETURN_BYPASS_ENABLE_MASK 0x00020000L
+#define GL2A_CTRL__ADDR_REMOVE_COLBITS_MASK 0x00040000L
+//GL2A_RESP_THROTTLE_CTRL
+#define GL2A_RESP_THROTTLE_CTRL__DISABLE__SHIFT 0x0
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_GL1__SHIFT 0x10
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_CH__SHIFT 0x18
+#define GL2A_RESP_THROTTLE_CTRL__DISABLE_MASK 0x0000FFFFL
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_GL1_MASK 0x00FF0000L
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_CH_MASK 0xFF000000L
+
+
+// addressBlock: gc_gl1hdec
+//GL1H_ARB_CTRL
+#define GL1H_ARB_CTRL__REQ_FGCG_DISABLE__SHIFT 0x0
+#define GL1H_ARB_CTRL__SRC_FGCG_DISABLE__SHIFT 0x1
+#define GL1H_ARB_CTRL__RET_FGCG_DISABLE__SHIFT 0x2
+#define GL1H_ARB_CTRL__CHICKEN_BITS__SHIFT 0x3
+#define GL1H_ARB_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0xb
+#define GL1H_ARB_CTRL__REQ_FGCG_DISABLE_MASK 0x00000001L
+#define GL1H_ARB_CTRL__SRC_FGCG_DISABLE_MASK 0x00000002L
+#define GL1H_ARB_CTRL__RET_FGCG_DISABLE_MASK 0x00000004L
+#define GL1H_ARB_CTRL__CHICKEN_BITS_MASK 0x000007F8L
+#define GL1H_ARB_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x00000800L
+//GL1H_GL1_CREDITS
+#define GL1H_GL1_CREDITS__GL1_REQ_CREDITS__SHIFT 0x0
+#define GL1H_GL1_CREDITS__GL1_REQ_CREDITS_MASK 0x000000FFL
+//GL1H_BURST_MASK
+#define GL1H_BURST_MASK__BURST_ADDR_MASK__SHIFT 0x0
+#define GL1H_BURST_MASK__BURST_ADDR_MASK_MASK 0x000000FFL
+//GL1H_BURST_CTRL
+#define GL1H_BURST_CTRL__MAX_BURST_SIZE__SHIFT 0x0
+#define GL1H_BURST_CTRL__BURST_DISABLE__SHIFT 0x3
+#define GL1H_BURST_CTRL__SPARE_BURST_CTRL_BITS__SHIFT 0x4
+#define GL1H_BURST_CTRL__MAX_BURST_SIZE_MASK 0x00000007L
+#define GL1H_BURST_CTRL__BURST_DISABLE_MASK 0x00000008L
+#define GL1H_BURST_CTRL__SPARE_BURST_CTRL_BITS_MASK 0x00000030L
+//GL1H_ARB_STATUS
+#define GL1H_ARB_STATUS__REQ_ARB_BUSY__SHIFT 0x0
+#define GL1H_ARB_STATUS__CLIENT1_ILLEGAL_REQ__SHIFT 0x1
+#define GL1H_ARB_STATUS__REQ_ARB_BUSY_MASK 0x00000001L
+#define GL1H_ARB_STATUS__CLIENT1_ILLEGAL_REQ_MASK 0x00000002L
+
+
+// addressBlock: gc_perfddec
+//CPG_PERFCOUNTER1_LO
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER1_HI
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_LO
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_HI
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_LO
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_HI
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_LO
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_HI
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_LO
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_HI
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_LO
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_HI
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_LATENCY_STATS_DATA
+#define CPF_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPF_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_LATENCY_STATS_DATA
+#define CPG_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPG_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPC_LATENCY_STATS_DATA
+#define CPC_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPC_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_LO
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_HI
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_LO
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_HI
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_LO
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_HI
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_LO
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_HI
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_LO
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_HI
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_LO
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_HI
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER0_LO
+#define GE1_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER0_HI
+#define GE1_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER1_LO
+#define GE1_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER1_HI
+#define GE1_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER2_LO
+#define GE1_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER2_HI
+#define GE1_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER3_LO
+#define GE1_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER3_HI
+#define GE1_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER0_LO
+#define GE2_DIST_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER0_HI
+#define GE2_DIST_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER1_LO
+#define GE2_DIST_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER1_HI
+#define GE2_DIST_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER2_LO
+#define GE2_DIST_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER2_HI
+#define GE2_DIST_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER3_LO
+#define GE2_DIST_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER3_HI
+#define GE2_DIST_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER0_LO
+#define GE2_SE_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER0_HI
+#define GE2_SE_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER1_LO
+#define GE2_SE_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER1_HI
+#define GE2_SE_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER2_LO
+#define GE2_SE_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER2_HI
+#define GE2_SE_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER3_LO
+#define GE2_SE_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER3_HI
+#define GE2_SE_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_LO
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_HI
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER1_LO
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER1_HI
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER2_LO
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER2_HI
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER3_LO
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER3_HI
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER0_LO
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER0_HI
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_LO
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_HI
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_LO
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_HI
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_LO
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_HI
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_LO
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_HI
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_LO
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_HI
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_LO
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_HI
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_LO
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_HI
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_HI
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_LO
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_HI
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_LO
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_HI
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_LO
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_HI
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_LO
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_HI
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_LO
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_HI
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_LO
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER0_HI
+#define PC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER0_LO
+#define PC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER1_HI
+#define PC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER1_LO
+#define PC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER2_HI
+#define PC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER2_LO
+#define PC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER3_HI
+#define PC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER3_LO
+#define PC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER0_LO
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER1_LO
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER2_LO
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER3_LO
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER4_LO
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER5_LO
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER6_LO
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER7_LO
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER0_LO
+#define SQG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER0_HI
+#define SQG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER1_LO
+#define SQG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER1_HI
+#define SQG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER2_LO
+#define SQG_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER2_HI
+#define SQG_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER3_LO
+#define SQG_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER3_HI
+#define SQG_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER4_LO
+#define SQG_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER4_HI
+#define SQG_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER5_LO
+#define SQG_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER5_HI
+#define SQG_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER6_LO
+#define SQG_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER6_HI
+#define SQG_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER7_LO
+#define SQG_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER7_HI
+#define SQG_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_LO
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_HI
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_LO
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_HI
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_LO
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_HI
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_LO
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_HI
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER2_LO
+#define GCEA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER2_HI
+#define GCEA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER_LO
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER_HI
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//GDS_PERFCOUNTER0_LO
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER0_HI
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_LO
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_HI
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_LO
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_HI
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_LO
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_HI
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_LO
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_HI
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_LO
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_HI
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_LO
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_HI
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_LO
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_HI
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_LO
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_HI
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_LO
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_HI
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_LO
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_HI
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_LO
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_HI
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER_FILTER
+#define TCP_PERFCOUNTER_FILTER__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT__SHIFT 0xd
+#define TCP_PERFCOUNTER_FILTER__SW_MODE__SHIFT 0x11
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES__SHIFT 0x16
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE__SHIFT 0x18
+#define TCP_PERFCOUNTER_FILTER__SLC__SHIFT 0x1b
+#define TCP_PERFCOUNTER_FILTER__DLC__SHIFT 0x1c
+#define TCP_PERFCOUNTER_FILTER__GLC__SHIFT 0x1d
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE__SHIFT 0x1e
+#define TCP_PERFCOUNTER_FILTER__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER__DIM_MASK 0x0000001CL
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT_MASK 0x00000FE0L
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT_MASK 0x0001E000L
+#define TCP_PERFCOUNTER_FILTER__SW_MODE_MASK 0x003E0000L
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES_MASK 0x00C00000L
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE_MASK 0x07000000L
+#define TCP_PERFCOUNTER_FILTER__SLC_MASK 0x08000000L
+#define TCP_PERFCOUNTER_FILTER__DLC_MASK 0x10000000L
+#define TCP_PERFCOUNTER_FILTER__GLC_MASK 0x20000000L
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE_MASK 0x40000000L
+//TCP_PERFCOUNTER_FILTER2
+#define TCP_PERFCOUNTER_FILTER2__REQ_MODE__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER2__REQ_MODE_MASK 0x00000007L
+//TCP_PERFCOUNTER_FILTER_EN
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER_EN__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT__SHIFT 0x3
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT__SHIFT 0x4
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES__SHIFT 0x6
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE__SHIFT 0x7
+#define TCP_PERFCOUNTER_FILTER_EN__SLC__SHIFT 0x8
+#define TCP_PERFCOUNTER_FILTER_EN__DLC__SHIFT 0x9
+#define TCP_PERFCOUNTER_FILTER_EN__GLC__SHIFT 0xa
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE__SHIFT 0xb
+#define TCP_PERFCOUNTER_FILTER_EN__REQ_MODE__SHIFT 0xc
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER_EN__DIM_MASK 0x00000004L
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT_MASK 0x00000008L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT_MASK 0x00000010L
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE_MASK 0x00000020L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES_MASK 0x00000040L
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE_MASK 0x00000080L
+#define TCP_PERFCOUNTER_FILTER_EN__SLC_MASK 0x00000100L
+#define TCP_PERFCOUNTER_FILTER_EN__DLC_MASK 0x00000200L
+#define TCP_PERFCOUNTER_FILTER_EN__GLC_MASK 0x00000400L
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE_MASK 0x00000800L
+#define TCP_PERFCOUNTER_FILTER_EN__REQ_MODE_MASK 0x00001000L
+//GL2C_PERFCOUNTER0_LO
+#define GL2C_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER0_HI
+#define GL2C_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER1_LO
+#define GL2C_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER1_HI
+#define GL2C_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER2_LO
+#define GL2C_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER2_HI
+#define GL2C_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER3_LO
+#define GL2C_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER3_HI
+#define GL2C_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER0_LO
+#define GL2A_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER0_HI
+#define GL2A_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER1_LO
+#define GL2A_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER1_HI
+#define GL2A_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER2_LO
+#define GL2A_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER2_HI
+#define GL2A_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER3_LO
+#define GL2A_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER3_HI
+#define GL2A_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER0_LO
+#define GL1C_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER0_HI
+#define GL1C_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER1_LO
+#define GL1C_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER1_HI
+#define GL1C_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER2_LO
+#define GL1C_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER2_HI
+#define GL1C_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER3_LO
+#define GL1C_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER3_HI
+#define GL1C_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER0_LO
+#define CHC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER0_HI
+#define CHC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER1_LO
+#define CHC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER1_HI
+#define CHC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER2_LO
+#define CHC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER2_HI
+#define CHC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER3_LO
+#define CHC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER3_HI
+#define CHC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER0_LO
+#define CHCG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER0_HI
+#define CHCG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER1_LO
+#define CHCG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER1_HI
+#define CHCG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER2_LO
+#define CHCG_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER2_HI
+#define CHCG_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER3_LO
+#define CHCG_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER3_HI
+#define CHCG_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_LO
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_HI
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_LO
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_HI
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_LO
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_HI
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_LO
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_HI
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_LO
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_HI
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_LO
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_HI
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_LO
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_HI
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_LO
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_HI
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_LO
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_HI
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_LO
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_HI
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_LO
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_HI
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_LO
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_HI
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_LO
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_HI
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_LO
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_HI
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER0_LO
+#define GCR_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCR_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER0_HI
+#define GCR_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCR_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER1_LO
+#define GCR_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCR_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER1_HI
+#define GCR_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCR_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER0_LO
+#define PA_PH_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER0_HI
+#define PA_PH_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER1_LO
+#define PA_PH_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER1_HI
+#define PA_PH_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER2_LO
+#define PA_PH_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER2_HI
+#define PA_PH_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER3_LO
+#define PA_PH_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER3_HI
+#define PA_PH_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER4_LO
+#define PA_PH_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER4_HI
+#define PA_PH_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER5_LO
+#define PA_PH_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER5_HI
+#define PA_PH_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER6_LO
+#define PA_PH_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER6_HI
+#define PA_PH_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER7_LO
+#define PA_PH_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER7_HI
+#define PA_PH_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER0_LO
+#define UTCL1_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER0_HI
+#define UTCL1_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER1_LO
+#define UTCL1_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER1_HI
+#define UTCL1_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER2_LO
+#define UTCL1_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER2_HI
+#define UTCL1_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER3_LO
+#define UTCL1_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER3_HI
+#define UTCL1_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER0_LO
+#define GL1A_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER0_HI
+#define GL1A_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER1_LO
+#define GL1A_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER1_HI
+#define GL1A_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER2_LO
+#define GL1A_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER2_HI
+#define GL1A_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER3_LO
+#define GL1A_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER3_HI
+#define GL1A_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER0_LO
+#define GL1H_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER0_HI
+#define GL1H_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER1_LO
+#define GL1H_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER1_HI
+#define GL1H_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER2_LO
+#define GL1H_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER2_HI
+#define GL1H_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER3_LO
+#define GL1H_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER3_HI
+#define GL1H_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER0_LO
+#define CHA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER0_HI
+#define CHA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER1_LO
+#define CHA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER1_HI
+#define CHA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER2_LO
+#define CHA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER2_HI
+#define CHA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER3_LO
+#define CHA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER3_HI
+#define CHA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER2_LO
+#define GUS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GUS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER2_HI
+#define GUS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GUS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER_LO
+#define GUS_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GUS_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER_HI
+#define GUS_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GUS_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GUS_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GUS_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_perfsdec
+//CPG_PERFCOUNTER1_SELECT
+#define CPG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x1c
+#define CPG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT1
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPC_PERFCOUNTER1_SELECT
+#define CPC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x1c
+#define CPC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xF0000000L
+//CPC_PERFCOUNTER0_SELECT1
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER1_SELECT
+#define CPF_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x1c
+#define CPF_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT1
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CP_PERFMON_CNTL
+#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x4
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x8
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000FL
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0x000000F0L
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//CPC_PERFCOUNTER0_SELECT
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPF_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x00000007L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPG_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPF_LATENCY_STATS_SELECT
+#define CPF_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPF_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPF_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_LATENCY_STATS_SELECT__INDEX_MASK 0x0000000FL
+#define CPF_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPF_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPG_LATENCY_STATS_SELECT
+#define CPG_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPG_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPG_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPG_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPC_LATENCY_STATS_SELECT
+#define CPC_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPC_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPC_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPC_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
+#define CPC_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPC_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPC_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CP_DRAW_OBJECT
+#define CP_DRAW_OBJECT__OBJECT__SHIFT 0x0
+#define CP_DRAW_OBJECT__OBJECT_MASK 0xFFFFFFFFL
+//CP_DRAW_OBJECT_COUNTER
+#define CP_DRAW_OBJECT_COUNTER__COUNT__SHIFT 0x0
+#define CP_DRAW_OBJECT_COUNTER__COUNT_MASK 0x0000FFFFL
+//CP_DRAW_WINDOW_MASK_HI
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_HI
+#define CP_DRAW_WINDOW_HI__WINDOW_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_HI__WINDOW_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_LO
+#define CP_DRAW_WINDOW_LO__MIN__SHIFT 0x0
+#define CP_DRAW_WINDOW_LO__MAX__SHIFT 0x10
+#define CP_DRAW_WINDOW_LO__MIN_MASK 0x0000FFFFL
+#define CP_DRAW_WINDOW_LO__MAX_MASK 0xFFFF0000L
+//CP_DRAW_WINDOW_CNTL
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX__SHIFT 0x0
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN__SHIFT 0x1
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI__SHIFT 0x2
+#define CP_DRAW_WINDOW_CNTL__MODE__SHIFT 0x8
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX_MASK 0x00000001L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN_MASK 0x00000002L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI_MASK 0x00000004L
+#define CP_DRAW_WINDOW_CNTL__MODE_MASK 0x00000100L
+//GRBM_PERFCOUNTER0_SELECT
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER0_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER0_SELECT__GE_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER0_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER0_SELECT__GE_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_PERFCOUNTER1_SELECT
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER1_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER1_SELECT__GE_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER1_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER1_SELECT__GE_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_SE0_PERFCOUNTER_SELECT
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE0_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE0_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE0_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE0_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_SE1_PERFCOUNTER_SELECT
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE1_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE1_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE1_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE1_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_SE2_PERFCOUNTER_SELECT
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE2_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE2_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE2_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE2_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_SE3_PERFCOUNTER_SELECT
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE3_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE3_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE3_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE3_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_PERFCOUNTER0_SELECT_HI
+#define GRBM_PERFCOUNTER0_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x1
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK__SHIFT 0x2
+#define GRBM_PERFCOUNTER0_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK__SHIFT 0x3
+#define GRBM_PERFCOUNTER0_SELECT_HI__CH_BUSY_USER_DEFINED_MASK__SHIFT 0x4
+#define GRBM_PERFCOUNTER0_SELECT_HI__PH_BUSY_USER_DEFINED_MASK__SHIFT 0x5
+#define GRBM_PERFCOUNTER0_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK__SHIFT 0x6
+#define GRBM_PERFCOUNTER0_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK__SHIFT 0x7
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x8
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x9
+#define GRBM_PERFCOUNTER0_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00000002L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK_MASK 0x00000004L
+#define GRBM_PERFCOUNTER0_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK_MASK 0x00000008L
+#define GRBM_PERFCOUNTER0_SELECT_HI__CH_BUSY_USER_DEFINED_MASK_MASK 0x00000010L
+#define GRBM_PERFCOUNTER0_SELECT_HI__PH_BUSY_USER_DEFINED_MASK_MASK 0x00000020L
+#define GRBM_PERFCOUNTER0_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK_MASK 0x00000040L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK_MASK 0x00000080L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x00000100L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x00000200L
+//GRBM_PERFCOUNTER1_SELECT_HI
+#define GRBM_PERFCOUNTER1_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x1
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK__SHIFT 0x2
+#define GRBM_PERFCOUNTER1_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK__SHIFT 0x3
+#define GRBM_PERFCOUNTER1_SELECT_HI__CH_BUSY_USER_DEFINED_MASK__SHIFT 0x4
+#define GRBM_PERFCOUNTER1_SELECT_HI__PH_BUSY_USER_DEFINED_MASK__SHIFT 0x5
+#define GRBM_PERFCOUNTER1_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK__SHIFT 0x6
+#define GRBM_PERFCOUNTER1_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK__SHIFT 0x7
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x8
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x9
+#define GRBM_PERFCOUNTER1_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00000002L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK_MASK 0x00000004L
+#define GRBM_PERFCOUNTER1_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK_MASK 0x00000008L
+#define GRBM_PERFCOUNTER1_SELECT_HI__CH_BUSY_USER_DEFINED_MASK_MASK 0x00000010L
+#define GRBM_PERFCOUNTER1_SELECT_HI__PH_BUSY_USER_DEFINED_MASK_MASK 0x00000020L
+#define GRBM_PERFCOUNTER1_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK_MASK 0x00000040L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK_MASK 0x00000080L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x00000100L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x00000200L
+//GE1_PERFCOUNTER0_SELECT
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER0_SELECT1
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE1_PERFCOUNTER1_SELECT
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER1_SELECT1
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE1_PERFCOUNTER2_SELECT
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER2_SELECT1
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE1_PERFCOUNTER3_SELECT
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER3_SELECT1
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER0_SELECT
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER0_SELECT1
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER1_SELECT
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER1_SELECT1
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER2_SELECT
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER2_SELECT1
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER3_SELECT
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER3_SELECT1
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER0_SELECT
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER0_SELECT1
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER1_SELECT
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER1_SELECT1
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER2_SELECT
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER2_SELECT1
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER3_SELECT
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER3_SELECT1
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER0_SELECT
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER0_SELECT1
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT1
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER2_SELECT
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER2_SELECT1
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER3_SELECT
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER3_SELECT1
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT1
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER1_SELECT
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER2_SELECT
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER3_SELECT
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER4_SELECT
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER5_SELECT
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER6_SELECT
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER7_SELECT
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER0_SELECT
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER0_SELECT1
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT1
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT1
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT1
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER4_SELECT
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER5_SELECT
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER_BINS
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x0
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x4
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x8
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0xc
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x10
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x14
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x18
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x1c
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0x0000000FL
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0x000000F0L
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0x00000F00L
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0x0000F000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0x000F0000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0x00F00000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0x0F000000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xF0000000L
+//PC_PERFCOUNTER0_SELECT
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER1_SELECT
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER2_SELECT
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER3_SELECT
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER0_SELECT1
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PC_PERFCOUNTER1_SELECT1
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PC_PERFCOUNTER2_SELECT1
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PC_PERFCOUNTER3_SELECT1
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SQ_PERFCOUNTER0_SELECT
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER1_SELECT
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER2_SELECT
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER3_SELECT
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER4_SELECT
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER5_SELECT
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER6_SELECT
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER7_SELECT
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER8_SELECT
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER9_SELECT
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER10_SELECT
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER11_SELECT
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER12_SELECT
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER13_SELECT
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER14_SELECT
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER15_SELECT
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER0_SELECT
+#define SQG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER1_SELECT
+#define SQG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER2_SELECT
+#define SQG_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER3_SELECT
+#define SQG_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER4_SELECT
+#define SQG_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER5_SELECT
+#define SQG_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER6_SELECT
+#define SQG_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER7_SELECT
+#define SQG_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER_CTRL
+#define SQG_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
+#define SQG_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
+#define SQG_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
+#define SQG_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF__SHIFT 0xe
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF__SHIFT 0xf
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF__SHIFT 0x10
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF__SHIFT 0x11
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF__SHIFT 0x12
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF__SHIFT 0x13
+#define SQG_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQG_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQG_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQG_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF_MASK 0x00004000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF_MASK 0x00008000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF_MASK 0x00010000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF_MASK 0x00020000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF_MASK 0x00040000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF_MASK 0x00080000L
+//SQG_PERFCOUNTER_CTRL2
+#define SQG_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
+#define SQG_PERFCOUNTER_CTRL2__VMID_EN__SHIFT 0x1
+#define SQG_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+#define SQG_PERFCOUNTER_CTRL2__VMID_EN_MASK 0x0001FFFEL
+//SQG_PERF_SAMPLE_FINISH
+#define SQG_PERF_SAMPLE_FINISH__STATUS__SHIFT 0x0
+#define SQG_PERF_SAMPLE_FINISH__STATUS_MASK 0x0000007FL
+//SQ_PERFCOUNTER_CTRL
+#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
+#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
+#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF__SHIFT 0xe
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF__SHIFT 0xf
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF__SHIFT 0x10
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF__SHIFT 0x11
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF__SHIFT 0x12
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF__SHIFT 0x13
+#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF_MASK 0x00004000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF_MASK 0x00008000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF_MASK 0x00010000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF_MASK 0x00020000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF_MASK 0x00040000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF_MASK 0x00080000L
+//SQ_PERFCOUNTER_CTRL2
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL2__VMID_EN__SHIFT 0x1
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL2__VMID_EN_MASK 0x0001FFFEL
+//SQ_THREAD_TRACE_BUF0_BASE
+#define SQ_THREAD_TRACE_BUF0_BASE__BASE_LO__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF0_BASE__BASE_LO_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_BUF0_SIZE
+#define SQ_THREAD_TRACE_BUF0_SIZE__BASE_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF0_SIZE__SIZE__SHIFT 0x8
+#define SQ_THREAD_TRACE_BUF0_SIZE__BASE_HI_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_BUF0_SIZE__SIZE_MASK 0x3FFFFF00L
+//SQ_THREAD_TRACE_BUF1_BASE
+#define SQ_THREAD_TRACE_BUF1_BASE__BASE_LO__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF1_BASE__BASE_LO_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_BUF1_SIZE
+#define SQ_THREAD_TRACE_BUF1_SIZE__BASE_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF1_SIZE__SIZE__SHIFT 0x8
+#define SQ_THREAD_TRACE_BUF1_SIZE__BASE_HI_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_BUF1_SIZE__SIZE_MASK 0x3FFFFF00L
+//SQ_THREAD_TRACE_CTRL
+#define SQ_THREAD_TRACE_CTRL__MODE__SHIFT 0x0
+#define SQ_THREAD_TRACE_CTRL__ALL_VMID__SHIFT 0x2
+#define SQ_THREAD_TRACE_CTRL__GL1_PERF_EN__SHIFT 0x3
+#define SQ_THREAD_TRACE_CTRL__INTERRUPT_EN__SHIFT 0x4
+#define SQ_THREAD_TRACE_CTRL__DOUBLE_BUFFER__SHIFT 0x5
+#define SQ_THREAD_TRACE_CTRL__HIWATER__SHIFT 0x6
+#define SQ_THREAD_TRACE_CTRL__REG_AT_HWM__SHIFT 0x9
+#define SQ_THREAD_TRACE_CTRL__SPI_STALL_EN__SHIFT 0xb
+#define SQ_THREAD_TRACE_CTRL__SQ_STALL_EN__SHIFT 0xc
+#define SQ_THREAD_TRACE_CTRL__UTIL_TIMER__SHIFT 0xd
+#define SQ_THREAD_TRACE_CTRL__WAVESTART_MODE__SHIFT 0xe
+#define SQ_THREAD_TRACE_CTRL__RT_FREQ__SHIFT 0x10
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_MARKERS__SHIFT 0x12
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_DRAWS__SHIFT 0x13
+#define SQ_THREAD_TRACE_CTRL__LOWATER_OFFSET__SHIFT 0x14
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_PADDING_DIS__SHIFT 0x1c
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_MODE__SHIFT 0x1d
+#define SQ_THREAD_TRACE_CTRL__DRAW_EVENT_EN__SHIFT 0x1f
+#define SQ_THREAD_TRACE_CTRL__MODE_MASK 0x00000003L
+#define SQ_THREAD_TRACE_CTRL__ALL_VMID_MASK 0x00000004L
+#define SQ_THREAD_TRACE_CTRL__GL1_PERF_EN_MASK 0x00000008L
+#define SQ_THREAD_TRACE_CTRL__INTERRUPT_EN_MASK 0x00000010L
+#define SQ_THREAD_TRACE_CTRL__DOUBLE_BUFFER_MASK 0x00000020L
+#define SQ_THREAD_TRACE_CTRL__HIWATER_MASK 0x000001C0L
+#define SQ_THREAD_TRACE_CTRL__REG_AT_HWM_MASK 0x00000600L
+#define SQ_THREAD_TRACE_CTRL__SPI_STALL_EN_MASK 0x00000800L
+#define SQ_THREAD_TRACE_CTRL__SQ_STALL_EN_MASK 0x00001000L
+#define SQ_THREAD_TRACE_CTRL__UTIL_TIMER_MASK 0x00002000L
+#define SQ_THREAD_TRACE_CTRL__WAVESTART_MODE_MASK 0x0000C000L
+#define SQ_THREAD_TRACE_CTRL__RT_FREQ_MASK 0x00030000L
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_MARKERS_MASK 0x00040000L
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_DRAWS_MASK 0x00080000L
+#define SQ_THREAD_TRACE_CTRL__LOWATER_OFFSET_MASK 0x00700000L
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_PADDING_DIS_MASK 0x10000000L
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_MODE_MASK 0x20000000L
+#define SQ_THREAD_TRACE_CTRL__DRAW_EVENT_EN_MASK 0x80000000L
+//SQ_THREAD_TRACE_MASK
+#define SQ_THREAD_TRACE_MASK__SIMD_SEL__SHIFT 0x0
+#define SQ_THREAD_TRACE_MASK__WGP_SEL__SHIFT 0x4
+#define SQ_THREAD_TRACE_MASK__SA_SEL__SHIFT 0x9
+#define SQ_THREAD_TRACE_MASK__WTYPE_INCLUDE__SHIFT 0xa
+#define SQ_THREAD_TRACE_MASK__EXCLUDE_NONDETAIL_SHADERDATA__SHIFT 0x11
+#define SQ_THREAD_TRACE_MASK__SIMD_SEL_MASK 0x00000003L
+#define SQ_THREAD_TRACE_MASK__WGP_SEL_MASK 0x000000F0L
+#define SQ_THREAD_TRACE_MASK__SA_SEL_MASK 0x00000200L
+#define SQ_THREAD_TRACE_MASK__WTYPE_INCLUDE_MASK 0x0001FC00L
+#define SQ_THREAD_TRACE_MASK__EXCLUDE_NONDETAIL_SHADERDATA_MASK 0x00020000L
+//SQ_THREAD_TRACE_TOKEN_MASK
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_EXCLUDE__SHIFT 0x0
+#define SQ_THREAD_TRACE_TOKEN_MASK__TTRACE_EXEC__SHIFT 0xb
+#define SQ_THREAD_TRACE_TOKEN_MASK__BOP_EVENTS_TOKEN_INCLUDE__SHIFT 0xc
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_INCLUDE__SHIFT 0x10
+#define SQ_THREAD_TRACE_TOKEN_MASK__INST_EXCLUDE__SHIFT 0x18
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_EXCLUDE__SHIFT 0x1a
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DETAIL_ALL__SHIFT 0x1f
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_EXCLUDE_MASK 0x000007FFL
+#define SQ_THREAD_TRACE_TOKEN_MASK__TTRACE_EXEC_MASK 0x00000800L
+#define SQ_THREAD_TRACE_TOKEN_MASK__BOP_EVENTS_TOKEN_INCLUDE_MASK 0x00001000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_INCLUDE_MASK 0x00FF0000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__INST_EXCLUDE_MASK 0x03000000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_EXCLUDE_MASK 0x1C000000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DETAIL_ALL_MASK 0x80000000L
+//SQ_THREAD_TRACE_WPTR
+#define SQ_THREAD_TRACE_WPTR__OFFSET__SHIFT 0x0
+#define SQ_THREAD_TRACE_WPTR__BUFFER_ID__SHIFT 0x1f
+#define SQ_THREAD_TRACE_WPTR__OFFSET_MASK 0x1FFFFFFFL
+#define SQ_THREAD_TRACE_WPTR__BUFFER_ID_MASK 0x80000000L
+//SQ_THREAD_TRACE_STATUS
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x0
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0xc
+#define SQ_THREAD_TRACE_STATUS__WRITE_ERROR__SHIFT 0x18
+#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x19
+#define SQ_THREAD_TRACE_STATUS__OWNER_VMID__SHIFT 0x1c
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x00000FFFL
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x00FFF000L
+#define SQ_THREAD_TRACE_STATUS__WRITE_ERROR_MASK 0x01000000L
+#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x02000000L
+#define SQ_THREAD_TRACE_STATUS__OWNER_VMID_MASK 0xF0000000L
+//SQ_THREAD_TRACE_STATUS2
+#define SQ_THREAD_TRACE_STATUS2__BUF0_FULL__SHIFT 0x0
+#define SQ_THREAD_TRACE_STATUS2__BUF1_FULL__SHIFT 0x1
+#define SQ_THREAD_TRACE_STATUS2__PACKET_LOST_BUF_NO_LOCKDOWN__SHIFT 0x4
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE_STATUS__SHIFT 0x8
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE__SHIFT 0xd
+#define SQ_THREAD_TRACE_STATUS2__WRITE_BUF_FULL__SHIFT 0xe
+#define SQ_THREAD_TRACE_STATUS2__BUF0_FULL_MASK 0x00000001L
+#define SQ_THREAD_TRACE_STATUS2__BUF1_FULL_MASK 0x00000002L
+#define SQ_THREAD_TRACE_STATUS2__PACKET_LOST_BUF_NO_LOCKDOWN_MASK 0x00000010L
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE_STATUS_MASK 0x00001F00L
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE_MASK 0x00002000L
+#define SQ_THREAD_TRACE_STATUS2__WRITE_BUF_FULL_MASK 0x00004000L
+//SQ_THREAD_TRACE_GFX_DRAW_CNTR
+#define SQ_THREAD_TRACE_GFX_DRAW_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_GFX_DRAW_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_GFX_MARKER_CNTR
+#define SQ_THREAD_TRACE_GFX_MARKER_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_GFX_MARKER_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_HP3D_DRAW_CNTR
+#define SQ_THREAD_TRACE_HP3D_DRAW_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_HP3D_DRAW_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_HP3D_MARKER_CNTR
+#define SQ_THREAD_TRACE_HP3D_MARKER_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_HP3D_MARKER_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_DROPPED_CNTR
+#define SQ_THREAD_TRACE_DROPPED_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_DROPPED_CNTR__CNTR_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER2_SELECT
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCEA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCEA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCEA_PERFCOUNTER2_SELECT1
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCEA_PERFCOUNTER2_MODE
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+//GCEA_PERFCOUNTER0_CFG
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCEA_PERFCOUNTER1_CFG
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCEA_PERFCOUNTER_RSLT_CNTL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SX_PERFCOUNTER0_SELECT
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER2_SELECT
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER3_SELECT
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER0_SELECT1
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT1
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER1_SELECT
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER2_SELECT
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER3_SELECT
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT1
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER1_SELECT1
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER2_SELECT1
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER3_SELECT1
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT1
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER1_SELECT
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT1
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TD_PERFCOUNTER1_SELECT
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT1
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT1
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER2_SELECT
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER3_SELECT
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER0_SELECT
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2C_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER0_SELECT1
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2C_PERFCOUNTER1_SELECT
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2C_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER1_SELECT1
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2C_PERFCOUNTER2_SELECT
+#define GL2C_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER3_SELECT
+#define GL2C_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER0_SELECT
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2A_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER0_SELECT1
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2A_PERFCOUNTER1_SELECT
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2A_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER1_SELECT1
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2A_PERFCOUNTER2_SELECT
+#define GL2A_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER3_SELECT
+#define GL2A_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER0_SELECT
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL1C_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL1C_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER0_SELECT1
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL1C_PERFCOUNTER1_SELECT
+#define GL1C_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER2_SELECT
+#define GL1C_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER3_SELECT
+#define GL1C_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER0_SELECT
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CHC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CHC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER0_SELECT1
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//CHC_PERFCOUNTER1_SELECT
+#define CHC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER2_SELECT
+#define CHC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER3_SELECT
+#define CHC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER0_SELECT
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CHCG_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CHCG_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER0_SELECT1
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//CHCG_PERFCOUNTER1_SELECT
+#define CHCG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER2_SELECT
+#define CHCG_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER3_SELECT
+#define CHCG_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER_FILTER
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE__SHIFT 0x0
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL__SHIFT 0x1
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE__SHIFT 0x4
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL__SHIFT 0x5
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE__SHIFT 0xa
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL__SHIFT 0xb
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE__SHIFT 0xc
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL__SHIFT 0xd
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE__SHIFT 0x11
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL__SHIFT 0x12
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE__SHIFT 0x15
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL__SHIFT 0x16
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE_MASK 0x00000001L
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL_MASK 0x0000000EL
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE_MASK 0x00000010L
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL_MASK 0x000003E0L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE_MASK 0x00000400L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL_MASK 0x00000800L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE_MASK 0x00001000L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL_MASK 0x0000E000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE_MASK 0x00020000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL_MASK 0x001C0000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE_MASK 0x00200000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL_MASK 0x00C00000L
+//CB_PERFCOUNTER0_SELECT
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER0_SELECT1
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//CB_PERFCOUNTER1_SELECT
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER2_SELECT
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER3_SELECT
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT1
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT1
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER2_SELECT
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER3_SELECT
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RLC_SPM_PERFMON_CNTL
+#define RLC_SPM_PERFMON_CNTL__RESERVED1__SHIFT 0x0
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE__SHIFT 0xc
+#define RLC_SPM_PERFMON_CNTL__DISABLE_GFXCLOCK_COUNT__SHIFT 0xe
+#define RLC_SPM_PERFMON_CNTL__RESERVED__SHIFT 0xf
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL__SHIFT 0x10
+#define RLC_SPM_PERFMON_CNTL__RESERVED1_MASK 0x00000FFFL
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE_MASK 0x00003000L
+#define RLC_SPM_PERFMON_CNTL__DISABLE_GFXCLOCK_COUNT_MASK 0x00004000L
+#define RLC_SPM_PERFMON_CNTL__RESERVED_MASK 0x00008000L
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_BASE_LO
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO_MASK 0xFFFFFFFFL
+//RLC_SPM_PERFMON_RING_BASE_HI
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED__SHIFT 0x10
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI_MASK 0x0000FFFFL
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_SIZE
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE_MASK 0xFFFFFFFFL
+//RLC_SPM_RING_WRPTR
+#define RLC_SPM_RING_WRPTR__RESERVED__SHIFT 0x0
+#define RLC_SPM_RING_WRPTR__PERFMON_RING_WRPTR__SHIFT 0x5
+#define RLC_SPM_RING_WRPTR__RESERVED_MASK 0x0000001FL
+#define RLC_SPM_RING_WRPTR__PERFMON_RING_WRPTR_MASK 0xFFFFFFE0L
+//RLC_SPM_RING_RDPTR
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR__SHIFT 0x0
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR_MASK 0xFFFFFFFFL
+//RLC_SPM_SEGMENT_THRESHOLD
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD__SHIFT 0x0
+#define RLC_SPM_SEGMENT_THRESHOLD__RESERVED__SHIFT 0x8
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD_MASK 0x000000FFL
+#define RLC_SPM_SEGMENT_THRESHOLD__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_PERFMON_SEGMENT_SIZE
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__TOTAL_NUM_SEGMENT__SHIFT 0x0
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_SEGMENT__SHIFT 0x10
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE_NUM_SEGMENT__SHIFT 0x18
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__TOTAL_NUM_SEGMENT_MASK 0x0000FFFFL
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_SEGMENT_MASK 0x00FF0000L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE_NUM_SEGMENT_MASK 0xFF000000L
+//RLC_SPM_GLOBAL_MUXSEL_ADDR
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_GLOBAL_MUXSEL_DATA
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL0__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL1__SHIFT 0x10
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL0_MASK 0x0000FFFFL
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL1_MASK 0xFFFF0000L
+//RLC_SPM_SE_MUXSEL_ADDR
+#define RLC_SPM_SE_MUXSEL_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_SE_MUXSEL_DATA
+#define RLC_SPM_SE_MUXSEL_DATA__SEL0__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_DATA__SEL1__SHIFT 0x10
+#define RLC_SPM_SE_MUXSEL_DATA__SEL0_MASK 0x0000FFFFL
+#define RLC_SPM_SE_MUXSEL_DATA__SEL1_MASK 0xFFFF0000L
+//RLC_SPM_ACCUM_DATARAM_ADDR
+#define RLC_SPM_ACCUM_DATARAM_ADDR__addr__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_ADDR__RESERVED__SHIFT 0x7
+#define RLC_SPM_ACCUM_DATARAM_ADDR__addr_MASK 0x0000007FL
+#define RLC_SPM_ACCUM_DATARAM_ADDR__RESERVED_MASK 0xFFFFFF80L
+//RLC_SPM_ACCUM_DATARAM_DATA
+#define RLC_SPM_ACCUM_DATARAM_DATA__data__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_DATA__data_MASK 0xFFFFFFFFL
+//RLC_SPM_ACCUM_SWA_DATARAM_ADDR
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__addr__SHIFT 0x0
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__RESERVED__SHIFT 0x7
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__addr_MASK 0x0000007FL
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__RESERVED_MASK 0xFFFFFF80L
+//RLC_SPM_ACCUM_SWA_DATARAM_DATA
+#define RLC_SPM_ACCUM_SWA_DATARAM_DATA__data__SHIFT 0x0
+#define RLC_SPM_ACCUM_SWA_DATARAM_DATA__data_MASK 0xFFFFFFFFL
+//RLC_SPM_ACCUM_CTRLRAM_ADDR
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__addr__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__RESERVED__SHIFT 0xb
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__addr_MASK 0x000007FFL
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__RESERVED_MASK 0xFFFFF800L
+//RLC_SPM_ACCUM_CTRLRAM_DATA
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__data__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__RESERVED__SHIFT 0x8
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__data_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__global_offset__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_se_offset__SHIFT 0x8
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_global_offset__SHIFT 0x10
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__RESERVED__SHIFT 0x18
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__global_offset_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_se_offset_MASK 0x0000FF00L
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_global_offset_MASK 0x00FF0000L
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__RESERVED_MASK 0xFF000000L
+//RLC_SPM_ACCUM_STATUS
+#define RLC_SPM_ACCUM_STATUS__NumbSamplesCompleted__SHIFT 0x0
+#define RLC_SPM_ACCUM_STATUS__AccumDone__SHIFT 0x8
+#define RLC_SPM_ACCUM_STATUS__SpmDone__SHIFT 0x9
+#define RLC_SPM_ACCUM_STATUS__AccumOverflow__SHIFT 0xa
+#define RLC_SPM_ACCUM_STATUS__AccumArmed__SHIFT 0xb
+#define RLC_SPM_ACCUM_STATUS__SequenceInProgress__SHIFT 0xc
+#define RLC_SPM_ACCUM_STATUS__FinalSequenceInProgress__SHIFT 0xd
+#define RLC_SPM_ACCUM_STATUS__AllFifosEmpty__SHIFT 0xe
+#define RLC_SPM_ACCUM_STATUS__FSMIsIdle__SHIFT 0xf
+#define RLC_SPM_ACCUM_STATUS__SwaAccumDone__SHIFT 0x10
+#define RLC_SPM_ACCUM_STATUS__SwaSpmDone__SHIFT 0x11
+#define RLC_SPM_ACCUM_STATUS__SwaAccumOverflow__SHIFT 0x12
+#define RLC_SPM_ACCUM_STATUS__SwaAccumArmed__SHIFT 0x13
+#define RLC_SPM_ACCUM_STATUS__AllSegsDone__SHIFT 0x14
+#define RLC_SPM_ACCUM_STATUS__RearmSwaPending__SHIFT 0x15
+#define RLC_SPM_ACCUM_STATUS__RearmSppPending__SHIFT 0x16
+#define RLC_SPM_ACCUM_STATUS__MultiSampleAborted__SHIFT 0x17
+#define RLC_SPM_ACCUM_STATUS__RESERVED__SHIFT 0x18
+#define RLC_SPM_ACCUM_STATUS__NumbSamplesCompleted_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_STATUS__AccumDone_MASK 0x00000100L
+#define RLC_SPM_ACCUM_STATUS__SpmDone_MASK 0x00000200L
+#define RLC_SPM_ACCUM_STATUS__AccumOverflow_MASK 0x00000400L
+#define RLC_SPM_ACCUM_STATUS__AccumArmed_MASK 0x00000800L
+#define RLC_SPM_ACCUM_STATUS__SequenceInProgress_MASK 0x00001000L
+#define RLC_SPM_ACCUM_STATUS__FinalSequenceInProgress_MASK 0x00002000L
+#define RLC_SPM_ACCUM_STATUS__AllFifosEmpty_MASK 0x00004000L
+#define RLC_SPM_ACCUM_STATUS__FSMIsIdle_MASK 0x00008000L
+#define RLC_SPM_ACCUM_STATUS__SwaAccumDone_MASK 0x00010000L
+#define RLC_SPM_ACCUM_STATUS__SwaSpmDone_MASK 0x00020000L
+#define RLC_SPM_ACCUM_STATUS__SwaAccumOverflow_MASK 0x00040000L
+#define RLC_SPM_ACCUM_STATUS__SwaAccumArmed_MASK 0x00080000L
+#define RLC_SPM_ACCUM_STATUS__AllSegsDone_MASK 0x00100000L
+#define RLC_SPM_ACCUM_STATUS__RearmSwaPending_MASK 0x00200000L
+#define RLC_SPM_ACCUM_STATUS__RearmSppPending_MASK 0x00400000L
+#define RLC_SPM_ACCUM_STATUS__MultiSampleAborted_MASK 0x00800000L
+#define RLC_SPM_ACCUM_STATUS__RESERVED_MASK 0xFF000000L
+//RLC_SPM_ACCUM_CTRL
+#define RLC_SPM_ACCUM_CTRL__StrobeResetPerfMonitors__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRL__StrobeStartAccumulation__SHIFT 0x1
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmAccum__SHIFT 0x2
+#define RLC_SPM_ACCUM_CTRL__StrobeResetSpmBlock__SHIFT 0x3
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSpm__SHIFT 0x4
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmSwaAccum__SHIFT 0x8
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSwa__SHIFT 0x9
+#define RLC_SPM_ACCUM_CTRL__StrobePerfmonSampleWires__SHIFT 0xa
+#define RLC_SPM_ACCUM_CTRL__RESERVED__SHIFT 0xb
+#define RLC_SPM_ACCUM_CTRL__StrobeResetPerfMonitors_MASK 0x00000001L
+#define RLC_SPM_ACCUM_CTRL__StrobeStartAccumulation_MASK 0x00000002L
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmAccum_MASK 0x00000004L
+#define RLC_SPM_ACCUM_CTRL__StrobeResetSpmBlock_MASK 0x00000008L
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSpm_MASK 0x000000F0L
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmSwaAccum_MASK 0x00000100L
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSwa_MASK 0x00000200L
+#define RLC_SPM_ACCUM_CTRL__StrobePerfmonSampleWires_MASK 0x00000400L
+#define RLC_SPM_ACCUM_CTRL__RESERVED_MASK 0xFFFFF800L
+//RLC_SPM_ACCUM_MODE
+#define RLC_SPM_ACCUM_MODE__EnableAccum__SHIFT 0x0
+#define RLC_SPM_ACCUM_MODE__EnableSpmWithAccumMode__SHIFT 0x1
+#define RLC_SPM_ACCUM_MODE__EnableSPPMode__SHIFT 0x2
+#define RLC_SPM_ACCUM_MODE__AutoResetPerfmonDisable__SHIFT 0x3
+#define RLC_SPM_ACCUM_MODE__AutoAccumEn__SHIFT 0x5
+#define RLC_SPM_ACCUM_MODE__SwaAutoAccumEn__SHIFT 0x6
+#define RLC_SPM_ACCUM_MODE__AutoSpmEn__SHIFT 0x7
+#define RLC_SPM_ACCUM_MODE__SwaAutoSpmEn__SHIFT 0x8
+#define RLC_SPM_ACCUM_MODE__Globals_LoadOverride__SHIFT 0x9
+#define RLC_SPM_ACCUM_MODE__Globals_SwaLoadOverride__SHIFT 0xa
+#define RLC_SPM_ACCUM_MODE__SE0_LoadOverride__SHIFT 0xb
+#define RLC_SPM_ACCUM_MODE__SE0_SwaLoadOverride__SHIFT 0xc
+#define RLC_SPM_ACCUM_MODE__SE1_LoadOverride__SHIFT 0xd
+#define RLC_SPM_ACCUM_MODE__SE1_SwaLoadOverride__SHIFT 0xe
+#define RLC_SPM_ACCUM_MODE__SE2_LoadOverride__SHIFT 0xf
+#define RLC_SPM_ACCUM_MODE__SE2_SwaLoadOverride__SHIFT 0x10
+#define RLC_SPM_ACCUM_MODE__EnableAccum_MASK 0x00000001L
+#define RLC_SPM_ACCUM_MODE__EnableSpmWithAccumMode_MASK 0x00000002L
+#define RLC_SPM_ACCUM_MODE__EnableSPPMode_MASK 0x00000004L
+#define RLC_SPM_ACCUM_MODE__AutoResetPerfmonDisable_MASK 0x00000008L
+#define RLC_SPM_ACCUM_MODE__AutoAccumEn_MASK 0x00000020L
+#define RLC_SPM_ACCUM_MODE__SwaAutoAccumEn_MASK 0x00000040L
+#define RLC_SPM_ACCUM_MODE__AutoSpmEn_MASK 0x00000080L
+#define RLC_SPM_ACCUM_MODE__SwaAutoSpmEn_MASK 0x00000100L
+#define RLC_SPM_ACCUM_MODE__Globals_LoadOverride_MASK 0x00000200L
+#define RLC_SPM_ACCUM_MODE__Globals_SwaLoadOverride_MASK 0x00000400L
+#define RLC_SPM_ACCUM_MODE__SE0_LoadOverride_MASK 0x00000800L
+#define RLC_SPM_ACCUM_MODE__SE0_SwaLoadOverride_MASK 0x00001000L
+#define RLC_SPM_ACCUM_MODE__SE1_LoadOverride_MASK 0x00002000L
+#define RLC_SPM_ACCUM_MODE__SE1_SwaLoadOverride_MASK 0x00004000L
+#define RLC_SPM_ACCUM_MODE__SE2_LoadOverride_MASK 0x00008000L
+#define RLC_SPM_ACCUM_MODE__SE2_SwaLoadOverride_MASK 0x00010000L
+//RLC_SPM_ACCUM_THRESHOLD
+#define RLC_SPM_ACCUM_THRESHOLD__Threshold__SHIFT 0x0
+#define RLC_SPM_ACCUM_THRESHOLD__Threshold_MASK 0x0000FFFFL
+//RLC_SPM_ACCUM_SAMPLES_REQUESTED
+#define RLC_SPM_ACCUM_SAMPLES_REQUESTED__SamplesRequested__SHIFT 0x0
+#define RLC_SPM_ACCUM_SAMPLES_REQUESTED__SamplesRequested_MASK 0x000000FFL
+//RLC_SPM_ACCUM_DATARAM_WRCOUNT
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__DataRamWrCount__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__RESERVED__SHIFT 0x13
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__DataRamWrCount_MASK 0x0007FFFFL
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__RESERVED_MASK 0xFFF80000L
+//RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__spp_addr_region__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__swa_addr_region__SHIFT 0x8
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__RESERVED__SHIFT 0x10
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__spp_addr_region_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__swa_addr_region_MASK 0x0000FF00L
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__RESERVED_MASK 0xFFFF0000L
+//RLC_SPM_PAUSE
+#define RLC_SPM_PAUSE__PAUSE__SHIFT 0x0
+#define RLC_SPM_PAUSE__PAUSED__SHIFT 0x1
+#define RLC_SPM_PAUSE__PAUSE_MASK 0x00000001L
+#define RLC_SPM_PAUSE__PAUSED_MASK 0x00000002L
+//RLC_SPM_STATUS
+#define RLC_SPM_STATUS__CTL_BUSY__SHIFT 0x0
+#define RLC_SPM_STATUS__RSPM_REG_BUSY__SHIFT 0x1
+#define RLC_SPM_STATUS__SPM_RSPM_BUSY__SHIFT 0x2
+#define RLC_SPM_STATUS__SPM_RSPM_IO_BUSY__SHIFT 0x3
+#define RLC_SPM_STATUS__SE_RSPM_IO_BUSY__SHIFT 0x4
+#define RLC_SPM_STATUS__ACCUM_BUSY__SHIFT 0xf
+#define RLC_SPM_STATUS__FSM_MASTER_STATE__SHIFT 0x10
+#define RLC_SPM_STATUS__FSM_MEMORY_STATE__SHIFT 0x14
+#define RLC_SPM_STATUS__CTL_REQ_STATE__SHIFT 0x18
+#define RLC_SPM_STATUS__CTL_RET_STATE__SHIFT 0x1a
+#define RLC_SPM_STATUS__CTL_BUSY_MASK 0x00000001L
+#define RLC_SPM_STATUS__RSPM_REG_BUSY_MASK 0x00000002L
+#define RLC_SPM_STATUS__SPM_RSPM_BUSY_MASK 0x00000004L
+#define RLC_SPM_STATUS__SPM_RSPM_IO_BUSY_MASK 0x00000008L
+#define RLC_SPM_STATUS__SE_RSPM_IO_BUSY_MASK 0x00000FF0L
+#define RLC_SPM_STATUS__ACCUM_BUSY_MASK 0x00008000L
+#define RLC_SPM_STATUS__FSM_MASTER_STATE_MASK 0x000F0000L
+#define RLC_SPM_STATUS__FSM_MEMORY_STATE_MASK 0x00F00000L
+#define RLC_SPM_STATUS__CTL_REQ_STATE_MASK 0x03000000L
+#define RLC_SPM_STATUS__CTL_RET_STATE_MASK 0x04000000L
+//RLC_SPM_GFXCLOCK_LOWCOUNT
+#define RLC_SPM_GFXCLOCK_LOWCOUNT__GFXCLOCK_LOWCOUNT__SHIFT 0x0
+#define RLC_SPM_GFXCLOCK_LOWCOUNT__GFXCLOCK_LOWCOUNT_MASK 0xFFFFFFFFL
+//RLC_SPM_GFXCLOCK_HIGHCOUNT
+#define RLC_SPM_GFXCLOCK_HIGHCOUNT__GFXCLOCK_HIGHCOUNT__SHIFT 0x0
+#define RLC_SPM_GFXCLOCK_HIGHCOUNT__GFXCLOCK_HIGHCOUNT_MASK 0xFFFFFFFFL
+//RLC_SPM_MODE
+#define RLC_SPM_MODE__MODE__SHIFT 0x0
+#define RLC_SPM_MODE__MODE_MASK 0x00000001L
+//RLC_SPM_RSPM_REQ_DATA_LO
+#define RLC_SPM_RSPM_REQ_DATA_LO__DATA__SHIFT 0x0
+#define RLC_SPM_RSPM_REQ_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_RSPM_REQ_DATA_HI
+#define RLC_SPM_RSPM_REQ_DATA_HI__DATA__SHIFT 0x0
+#define RLC_SPM_RSPM_REQ_DATA_HI__DATA_MASK 0x00000FFFL
+//RLC_SPM_RSPM_REQ_OP
+#define RLC_SPM_RSPM_REQ_OP__OP__SHIFT 0x0
+#define RLC_SPM_RSPM_REQ_OP__OP_MASK 0x0000000FL
+//RLC_SPM_RSPM_RET_DATA
+#define RLC_SPM_RSPM_RET_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_RSPM_RET_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_RSPM_RET_OP
+#define RLC_SPM_RSPM_RET_OP__OP__SHIFT 0x0
+#define RLC_SPM_RSPM_RET_OP__VALID__SHIFT 0x8
+#define RLC_SPM_RSPM_RET_OP__OP_MASK 0x0000000FL
+#define RLC_SPM_RSPM_RET_OP__VALID_MASK 0x00000100L
+//RLC_SPM_SE_RSPM_REQ_DATA_LO
+#define RLC_SPM_SE_RSPM_REQ_DATA_LO__DATA__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_REQ_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_SE_RSPM_REQ_DATA_HI
+#define RLC_SPM_SE_RSPM_REQ_DATA_HI__DATA__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_REQ_DATA_HI__DATA_MASK 0x00000FFFL
+//RLC_SPM_SE_RSPM_REQ_OP
+#define RLC_SPM_SE_RSPM_REQ_OP__OP__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_REQ_OP__OP_MASK 0x0000000FL
+//RLC_SPM_SE_RSPM_RET_DATA
+#define RLC_SPM_SE_RSPM_RET_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_RET_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_SE_RSPM_RET_OP
+#define RLC_SPM_SE_RSPM_RET_OP__OP__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_RET_OP__VALID__SHIFT 0x8
+#define RLC_SPM_SE_RSPM_RET_OP__OP_MASK 0x0000000FL
+#define RLC_SPM_SE_RSPM_RET_OP__VALID_MASK 0x00000100L
+//RLC_SPM_RSPM_CMD
+#define RLC_SPM_RSPM_CMD__CMD__SHIFT 0x0
+#define RLC_SPM_RSPM_CMD__CMD_MASK 0x0000000FL
+//RLC_SPM_RSPM_CMD_ACK
+#define RLC_SPM_RSPM_CMD_ACK__SE0_ACK__SHIFT 0x0
+#define RLC_SPM_RSPM_CMD_ACK__SE1_ACK__SHIFT 0x1
+#define RLC_SPM_RSPM_CMD_ACK__SE2_ACK__SHIFT 0x2
+#define RLC_SPM_RSPM_CMD_ACK__SE3_ACK__SHIFT 0x3
+#define RLC_SPM_RSPM_CMD_ACK__SE4_ACK__SHIFT 0x4
+#define RLC_SPM_RSPM_CMD_ACK__SE5_ACK__SHIFT 0x5
+#define RLC_SPM_RSPM_CMD_ACK__SE6_ACK__SHIFT 0x6
+#define RLC_SPM_RSPM_CMD_ACK__SE7_ACK__SHIFT 0x7
+#define RLC_SPM_RSPM_CMD_ACK__SPM_ACK__SHIFT 0x8
+#define RLC_SPM_RSPM_CMD_ACK__SE0_ACK_MASK 0x00000001L
+#define RLC_SPM_RSPM_CMD_ACK__SE1_ACK_MASK 0x00000002L
+#define RLC_SPM_RSPM_CMD_ACK__SE2_ACK_MASK 0x00000004L
+#define RLC_SPM_RSPM_CMD_ACK__SE3_ACK_MASK 0x00000008L
+#define RLC_SPM_RSPM_CMD_ACK__SE4_ACK_MASK 0x00000010L
+#define RLC_SPM_RSPM_CMD_ACK__SE5_ACK_MASK 0x00000020L
+#define RLC_SPM_RSPM_CMD_ACK__SE6_ACK_MASK 0x00000040L
+#define RLC_SPM_RSPM_CMD_ACK__SE7_ACK_MASK 0x00000080L
+#define RLC_SPM_RSPM_CMD_ACK__SPM_ACK_MASK 0x00000100L
+//RLC_SPM_SPARE
+#define RLC_SPM_SPARE__SPARE__SHIFT 0x0
+#define RLC_SPM_SPARE__SPARE_MASK 0xFFFFFFFFL
+//RLC_PERFMON_CNTL
+#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000007L
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//RLC_PERFCOUNTER0_SELECT
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000000FFL
+//RLC_PERFCOUNTER1_SELECT
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000000FFL
+//RLC_GPU_IOV_PERF_CNT_CNTL
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT__SHIFT 0x1
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET__SHIFT 0x2
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED__SHIFT 0x3
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT_MASK 0x00000002L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET_MASK 0x00000004L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED_MASK 0xFFFFFFF8L
+//RLC_GPU_IOV_PERF_CNT_WR_ADDR
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_WR_DATA
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_PERF_CNT_RD_ADDR
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_RD_DATA
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_SELECT
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER0_SELECT1
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER1_SELECT
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT1
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER3_SELECT
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERF_COUNTER_CNTL
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL__SHIFT 0x0
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL__SHIFT 0x2
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL__SHIFT 0x4
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0__SHIFT 0x6
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1__SHIFT 0x8
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID__SHIFT 0xa
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID__SHIFT 0xe
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD__SHIFT 0x13
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET__SHIFT 0x19
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL__SHIFT 0x1a
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL_MASK 0x00000003L
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL_MASK 0x0000000CL
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL_MASK 0x00000030L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0_MASK 0x000000C0L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1_MASK 0x00000300L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID_MASK 0x00003C00L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID_MASK 0x0007C000L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD_MASK 0x01F80000L
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET_MASK 0x02000000L
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL_MASK 0x04000000L
+//GCR_PERFCOUNTER0_SELECT
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCR_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCR_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCR_PERFCOUNTER0_SELECT1
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCR_PERFCOUNTER1_SELECT
+#define GCR_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GCR_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCR_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCR_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCR_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCR_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER0_SELECT
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER0_SELECT1
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER1_SELECT
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER2_SELECT
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER3_SELECT
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER4_SELECT
+#define PA_PH_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER5_SELECT
+#define PA_PH_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER6_SELECT
+#define PA_PH_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER7_SELECT
+#define PA_PH_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER1_SELECT1
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER2_SELECT1
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER3_SELECT1
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER0_SELECT
+#define UTCL1_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER0_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER0_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER1_SELECT
+#define UTCL1_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER1_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER1_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER2_SELECT
+#define UTCL1_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER2_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER2_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER3_SELECT
+#define UTCL1_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER3_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER3_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER0_SELECT
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL1A_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL1A_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER0_SELECT1
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL1A_PERFCOUNTER1_SELECT
+#define GL1A_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER2_SELECT
+#define GL1A_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER3_SELECT
+#define GL1A_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER0_SELECT
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL1H_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL1H_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER0_SELECT1
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL1H_PERFCOUNTER1_SELECT
+#define GL1H_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER2_SELECT
+#define GL1H_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER3_SELECT
+#define GL1H_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER0_SELECT
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CHA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CHA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER0_SELECT1
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//CHA_PERFCOUNTER1_SELECT
+#define CHA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER2_SELECT
+#define CHA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER3_SELECT
+#define CHA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GUS_PERFCOUNTER2_SELECT
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GUS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GUS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GUS_PERFCOUNTER2_SELECT1
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GUS_PERFCOUNTER2_MODE
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+//GUS_PERFCOUNTER0_CFG
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GUS_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GUS_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GUS_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GUS_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GUS_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GUS_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GUS_PERFCOUNTER1_CFG
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GUS_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GUS_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GUS_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GUS_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GUS_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GUS_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GUS_PERFCOUNTER_RSLT_CNTL
+#define GUS_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GUS_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GUS_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GUS_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GUS_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GUS_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GUS_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GUS_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gdfll_gdfll_dec
+//GDFLL_EDC_HYSTERESIS_CNTL
+#define GDFLL_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define GDFLL_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+//GDFLL_EDC_HYSTERESIS_STAT
+#define GDFLL_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define GDFLL_EDC_HYSTERESIS_STAT__EDC__SHIFT 0x8
+#define GDFLL_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define GDFLL_EDC_HYSTERESIS_STAT__EDC_MASK 0x00000100L
+
+
+// addressBlock: gc_gdfll_se_gdfll_dec
+//GDFLL_SE_EDC_HYSTERESIS_CNTL
+#define GDFLL_SE_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define GDFLL_SE_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+//GDFLL_SE_EDC_HYSTERESIS_STAT
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__EDC__SHIFT 0x8
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__EDC_MASK 0x00000100L
+
+
+// addressBlock: gc_grtavfs_grtavfs_dec
+//GRTAVFS_RTAVFS_REG_ADDR
+#define GRTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR__SHIFT 0x0
+#define GRTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR_MASK 0x000003FFL
+//GRTAVFS_RTAVFS_WR_DATA
+#define GRTAVFS_RTAVFS_WR_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_RTAVFS_WR_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_GENERAL_0
+#define GRTAVFS_GENERAL_0__DATA__SHIFT 0x0
+#define GRTAVFS_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//GRTAVFS_RTAVFS_RD_DATA
+#define GRTAVFS_RTAVFS_RD_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_RTAVFS_RD_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_RTAVFS_REG_CTRL
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_WR_EN__SHIFT 0x0
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_RD_EN__SHIFT 0x1
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_WR_EN_MASK 0x00000001L
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_RD_EN_MASK 0x00000002L
+//GRTAVFS_RTAVFS_REG_STATUS
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_WR_ACK__SHIFT 0x0
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID__SHIFT 0x1
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_WR_ACK_MASK 0x00000001L
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID_MASK 0x00000002L
+//GRTAVFS_TARG_FREQ
+#define GRTAVFS_TARG_FREQ__TARGET_FREQUENCY__SHIFT 0x0
+#define GRTAVFS_TARG_FREQ__REQUEST__SHIFT 0x10
+#define GRTAVFS_TARG_FREQ__RESERVED__SHIFT 0x11
+#define GRTAVFS_TARG_FREQ__TARGET_FREQUENCY_MASK 0x0000FFFFL
+#define GRTAVFS_TARG_FREQ__REQUEST_MASK 0x00010000L
+#define GRTAVFS_TARG_FREQ__RESERVED_MASK 0xFFFE0000L
+//GRTAVFS_TARG_VOLT
+#define GRTAVFS_TARG_VOLT__TARGET_VOLTAGE__SHIFT 0x0
+#define GRTAVFS_TARG_VOLT__VALID__SHIFT 0xa
+#define GRTAVFS_TARG_VOLT__RESERVED__SHIFT 0xb
+#define GRTAVFS_TARG_VOLT__TARGET_VOLTAGE_MASK 0x000003FFL
+#define GRTAVFS_TARG_VOLT__VALID_MASK 0x00000400L
+#define GRTAVFS_TARG_VOLT__RESERVED_MASK 0xFFFFF800L
+//GRTAVFS_SOFT_RESET
+#define GRTAVFS_SOFT_RESET__RESETN_OVERRIDE__SHIFT 0x0
+#define GRTAVFS_SOFT_RESET__RESERVED__SHIFT 0x1
+#define GRTAVFS_SOFT_RESET__RESETN_OVERRIDE_MASK 0x00000001L
+#define GRTAVFS_SOFT_RESET__RESERVED_MASK 0xFFFFFFFEL
+//GRTAVFS_PSM_CNTL
+#define GRTAVFS_PSM_CNTL__PSM_COUNT__SHIFT 0x0
+#define GRTAVFS_PSM_CNTL__PSM_SAMPLE_EN__SHIFT 0xe
+#define GRTAVFS_PSM_CNTL__RESERVED__SHIFT 0xf
+#define GRTAVFS_PSM_CNTL__PSM_COUNT_MASK 0x00003FFFL
+#define GRTAVFS_PSM_CNTL__PSM_SAMPLE_EN_MASK 0x00004000L
+#define GRTAVFS_PSM_CNTL__RESERVED_MASK 0xFFFF8000L
+//GRTAVFS_CLK_CNTL
+#define GRTAVFS_CLK_CNTL__GRTAVFS_MUX_CLK_SEL__SHIFT 0x0
+#define GRTAVFS_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL__SHIFT 0x1
+#define GRTAVFS_CLK_CNTL__RESERVED__SHIFT 0x2
+#define GRTAVFS_CLK_CNTL__GRTAVFS_MUX_CLK_SEL_MASK 0x00000001L
+#define GRTAVFS_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL_MASK 0x00000002L
+#define GRTAVFS_CLK_CNTL__RESERVED_MASK 0xFFFFFFFCL
+
+
+// addressBlock: gc_grtavfs_se_grtavfs_dec
+//GRTAVFS_SE_RTAVFS_REG_ADDR
+#define GRTAVFS_SE_RTAVFS_REG_ADDR__RTAVFSADDR__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_REG_ADDR__RTAVFSADDR_MASK 0x000003FFL
+//GRTAVFS_SE_RTAVFS_WR_DATA
+#define GRTAVFS_SE_RTAVFS_WR_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_WR_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_SE_GENERAL_0
+#define GRTAVFS_SE_GENERAL_0__DATA__SHIFT 0x0
+#define GRTAVFS_SE_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//GRTAVFS_SE_RTAVFS_RD_DATA
+#define GRTAVFS_SE_RTAVFS_RD_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_RD_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_SE_RTAVFS_REG_CTRL
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_WR_EN__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_RD_EN__SHIFT 0x1
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_WR_EN_MASK 0x00000001L
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_RD_EN_MASK 0x00000002L
+//GRTAVFS_SE_RTAVFS_REG_STATUS
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_WR_ACK__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID__SHIFT 0x1
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_WR_ACK_MASK 0x00000001L
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID_MASK 0x00000002L
+//GRTAVFS_SE_TARG_FREQ
+#define GRTAVFS_SE_TARG_FREQ__TARGET_FREQUENCY__SHIFT 0x0
+#define GRTAVFS_SE_TARG_FREQ__REQUEST__SHIFT 0x10
+#define GRTAVFS_SE_TARG_FREQ__RESERVED__SHIFT 0x11
+#define GRTAVFS_SE_TARG_FREQ__TARGET_FREQUENCY_MASK 0x0000FFFFL
+#define GRTAVFS_SE_TARG_FREQ__REQUEST_MASK 0x00010000L
+#define GRTAVFS_SE_TARG_FREQ__RESERVED_MASK 0xFFFE0000L
+//GRTAVFS_SE_TARG_VOLT
+#define GRTAVFS_SE_TARG_VOLT__TARGET_VOLTAGE__SHIFT 0x0
+#define GRTAVFS_SE_TARG_VOLT__VALID__SHIFT 0xa
+#define GRTAVFS_SE_TARG_VOLT__RESERVED__SHIFT 0xb
+#define GRTAVFS_SE_TARG_VOLT__TARGET_VOLTAGE_MASK 0x000003FFL
+#define GRTAVFS_SE_TARG_VOLT__VALID_MASK 0x00000400L
+#define GRTAVFS_SE_TARG_VOLT__RESERVED_MASK 0xFFFFF800L
+//GRTAVFS_SE_SOFT_RESET
+#define GRTAVFS_SE_SOFT_RESET__RESETN_OVERRIDE__SHIFT 0x0
+#define GRTAVFS_SE_SOFT_RESET__RESERVED__SHIFT 0x1
+#define GRTAVFS_SE_SOFT_RESET__RESETN_OVERRIDE_MASK 0x00000001L
+#define GRTAVFS_SE_SOFT_RESET__RESERVED_MASK 0xFFFFFFFEL
+//GRTAVFS_SE_PSM_CNTL
+#define GRTAVFS_SE_PSM_CNTL__PSM_COUNT__SHIFT 0x0
+#define GRTAVFS_SE_PSM_CNTL__PSM_SAMPLE_EN__SHIFT 0xe
+#define GRTAVFS_SE_PSM_CNTL__RESERVED__SHIFT 0xf
+#define GRTAVFS_SE_PSM_CNTL__PSM_COUNT_MASK 0x00003FFFL
+#define GRTAVFS_SE_PSM_CNTL__PSM_SAMPLE_EN_MASK 0x00004000L
+#define GRTAVFS_SE_PSM_CNTL__RESERVED_MASK 0xFFFF8000L
+//GRTAVFS_SE_CLK_CNTL
+#define GRTAVFS_SE_CLK_CNTL__GRTAVFS_MUX_CLK_SEL__SHIFT 0x0
+#define GRTAVFS_SE_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL__SHIFT 0x1
+#define GRTAVFS_SE_CLK_CNTL__RESERVED__SHIFT 0x2
+#define GRTAVFS_SE_CLK_CNTL__GRTAVFS_MUX_CLK_SEL_MASK 0x00000001L
+#define GRTAVFS_SE_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL_MASK 0x00000002L
+#define GRTAVFS_SE_CLK_CNTL__RESERVED_MASK 0xFFFFFFFCL
+
+
+// addressBlock: gc_grtavfsdec
+//RTAVFS_RTAVFS_REG_ADDR
+#define RTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR__SHIFT 0x0
+#define RTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR_MASK 0x000003FFL
+//RTAVFS_RTAVFS_WR_DATA
+#define RTAVFS_RTAVFS_WR_DATA__RTAVFSDATA__SHIFT 0x0
+#define RTAVFS_RTAVFS_WR_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_hypdec
+//GFX_PIPE_PRIORITY
+#define GFX_PIPE_PRIORITY__HP_PIPE_SELECT__SHIFT 0x0
+#define GFX_PIPE_PRIORITY__HP_PIPE_SELECT_MASK 0x00000001L
+//RLC_GPU_IOV_VF_ENABLE
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM__SHIFT 0x10
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED_MASK 0x0000FFFEL
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM_MASK 0xFFFF0000L
+//RLC_GPU_IOV_CFG_REG6
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION__SHIFT 0x7
+#define RLC_GPU_IOV_CFG_REG6__RESERVED__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET__SHIFT 0xa
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE_MASK 0x0000007FL
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION_MASK 0x00000080L
+#define RLC_GPU_IOV_CFG_REG6__RESERVED_MASK 0x00000300L
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET_MASK 0xFFFFFC00L
+//RLC_SDMA0_STATUS
+#define RLC_SDMA0_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA0_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA1_STATUS
+#define RLC_SDMA1_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA1_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA2_STATUS
+#define RLC_SDMA2_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA2_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA3_STATUS
+#define RLC_SDMA3_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA3_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA0_BUSY_STATUS
+#define RLC_SDMA0_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA0_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA1_BUSY_STATUS
+#define RLC_SDMA1_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA1_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA2_BUSY_STATUS
+#define RLC_SDMA2_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA2_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA3_BUSY_STATUS
+#define RLC_SDMA3_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA3_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_CFG_REG8
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_0
+#define RLC_RLCV_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_1
+#define RLC_RLCV_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_CTRL
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_AUTO_REARM__SHIFT 0x2
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_AUTO_REARM__SHIFT 0x3
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_INT_CLEAR__SHIFT 0x4
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_INT_CLEAR__SHIFT 0x5
+#define RLC_RLCV_TIMER_CTRL__RESERVED__SHIFT 0x6
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_AUTO_REARM_MASK 0x00000004L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_AUTO_REARM_MASK 0x00000008L
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_INT_CLEAR_MASK 0x00000010L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_INT_CLEAR_MASK 0x00000020L
+#define RLC_RLCV_TIMER_CTRL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_RLCV_TIMER_STAT
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_RLCV_TIMER_STAT__RESERVED__SHIFT 0x2
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_RLCV_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC__SHIFT 0xa
+#define RLC_RLCV_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC__SHIFT 0xb
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_RLCV_TIMER_STAT__RESERVED_MASK 0x000000FCL
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_RLCV_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC_MASK 0x00000400L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC_MASK 0x00000800L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_SET
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR_MASK 0x80000000L
+//RLC_GPU_IOV_VF_MASK
+#define RLC_GPU_IOV_VF_MASK__VF_MASK__SHIFT 0x0
+#define RLC_GPU_IOV_VF_MASK__VF_MASK_MASK 0x7FFFFFFFL
+//RLC_HYP_SEMAPHORE_0
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_1
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_BUSY_CLK_CNTL
+#define RLC_BUSY_CLK_CNTL__BUSY_OFF_LATENCY__SHIFT 0x0
+#define RLC_BUSY_CLK_CNTL__GRBM_BUSY_OFF_LATENCY__SHIFT 0x8
+#define RLC_BUSY_CLK_CNTL__BUSY_OFF_LATENCY_MASK 0x0000003FL
+#define RLC_BUSY_CLK_CNTL__GRBM_BUSY_OFF_LATENCY_MASK 0x00003F00L
+//RLC_CLK_CNTL
+#define RLC_CLK_CNTL__RLC_SRM_ICG_OVERRIDE__SHIFT 0x0
+#define RLC_CLK_CNTL__RLC_IMU_ICG_OVERRIDE__SHIFT 0x1
+#define RLC_CLK_CNTL__RLC_SPM_ICG_OVERRIDE__SHIFT 0x2
+#define RLC_CLK_CNTL__RLC_SPM_RSPM_ICG_OVERRIDE__SHIFT 0x3
+#define RLC_CLK_CNTL__RLC_GPM_ICG_OVERRIDE__SHIFT 0x4
+#define RLC_CLK_CNTL__RLC_CMN_ICG_OVERRIDE__SHIFT 0x5
+#define RLC_CLK_CNTL__RLC_TC_ICG_OVERRIDE__SHIFT 0x6
+#define RLC_CLK_CNTL__RLC_REG_ICG_OVERRIDE__SHIFT 0x7
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE__SHIFT 0x8
+#define RLC_CLK_CNTL__RESERVED_9__SHIFT 0x9
+#define RLC_CLK_CNTL__RLC_SPP_ICG_OVERRIDE__SHIFT 0xa
+#define RLC_CLK_CNTL__RESERVED_11__SHIFT 0xb
+#define RLC_CLK_CNTL__RLC_TC_FGCG_REP_OVERRIDE__SHIFT 0xc
+#define RLC_CLK_CNTL__RLC_DFLL_ICG_OVERRIDE__SHIFT 0xd
+#define RLC_CLK_CNTL__RESERVED_15__SHIFT 0xf
+#define RLC_CLK_CNTL__RLC_LX6_CORE_ICG_OVERRIDE__SHIFT 0x10
+#define RLC_CLK_CNTL__RLC_LX6_ICG_OVERRIDE__SHIFT 0x11
+#define RLC_CLK_CNTL__RLC_UTCL2_FGCG_OVERRIDE__SHIFT 0x12
+#define RLC_CLK_CNTL__RLC_IH_GASKET_ICG_OVERRIDE__SHIFT 0x13
+#define RLC_CLK_CNTL__RESERVED__SHIFT 0x14
+#define RLC_CLK_CNTL__RLC_SRM_ICG_OVERRIDE_MASK 0x00000001L
+#define RLC_CLK_CNTL__RLC_IMU_ICG_OVERRIDE_MASK 0x00000002L
+#define RLC_CLK_CNTL__RLC_SPM_ICG_OVERRIDE_MASK 0x00000004L
+#define RLC_CLK_CNTL__RLC_SPM_RSPM_ICG_OVERRIDE_MASK 0x00000008L
+#define RLC_CLK_CNTL__RLC_GPM_ICG_OVERRIDE_MASK 0x00000010L
+#define RLC_CLK_CNTL__RLC_CMN_ICG_OVERRIDE_MASK 0x00000020L
+#define RLC_CLK_CNTL__RLC_TC_ICG_OVERRIDE_MASK 0x00000040L
+#define RLC_CLK_CNTL__RLC_REG_ICG_OVERRIDE_MASK 0x00000080L
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK 0x00000100L
+#define RLC_CLK_CNTL__RESERVED_9_MASK 0x00000200L
+#define RLC_CLK_CNTL__RLC_SPP_ICG_OVERRIDE_MASK 0x00000400L
+#define RLC_CLK_CNTL__RESERVED_11_MASK 0x00000800L
+#define RLC_CLK_CNTL__RLC_TC_FGCG_REP_OVERRIDE_MASK 0x00001000L
+#define RLC_CLK_CNTL__RLC_DFLL_ICG_OVERRIDE_MASK 0x00002000L
+#define RLC_CLK_CNTL__RESERVED_15_MASK 0x00008000L
+#define RLC_CLK_CNTL__RLC_LX6_CORE_ICG_OVERRIDE_MASK 0x00010000L
+#define RLC_CLK_CNTL__RLC_LX6_ICG_OVERRIDE_MASK 0x00020000L
+#define RLC_CLK_CNTL__RLC_UTCL2_FGCG_OVERRIDE_MASK 0x00040000L
+#define RLC_CLK_CNTL__RLC_IH_GASKET_ICG_OVERRIDE_MASK 0x00080000L
+#define RLC_CLK_CNTL__RESERVED_MASK 0xFFF00000L
+//RLC_PACE_TIMER_STAT
+#define RLC_PACE_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_PACE_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_PACE_TIMER_STAT__RESERVED__SHIFT 0x2
+#define RLC_PACE_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_PACE_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_PACE_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC__SHIFT 0xa
+#define RLC_PACE_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC__SHIFT 0xb
+#define RLC_PACE_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_PACE_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_PACE_TIMER_STAT__RESERVED_MASK 0x000000FCL
+#define RLC_PACE_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_PACE_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_PACE_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC_MASK 0x00000400L
+#define RLC_PACE_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC_MASK 0x00000800L
+//RLC_GPU_IOV_SCH_BLOCK
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver__SHIFT 0x4
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size__SHIFT 0x8
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver_MASK 0x000000F0L
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size_MASK 0x0000FF00L
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED_MASK 0xFFFF0000L
+//RLC_GPU_IOV_CFG_REG1
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define RLC_GPU_IOV_CFG_REG1__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID__SHIFT 0x10
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1__SHIFT 0x18
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_MASK 0x00000010L
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED_MASK 0x000000C0L
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID_MASK 0x0000FF00L
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID_MASK 0x00FF0000L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1_MASK 0xFF000000L
+//RLC_GPU_IOV_CFG_REG2
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG2__RESERVED__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG2__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPU_IOV_VM_BUSY_STATUS
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_ACTIVE_FCN_ID
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_7_4__SHIFT 0x4
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__ACTIVE_FCN_ID_STATUS__SHIFT 0x8
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_30_12__SHIFT 0xc
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_7_4_MASK 0x000000F0L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__ACTIVE_FCN_ID_STATUS_MASK 0x00000F00L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_30_12_MASK 0x7FFFF000L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//RLC_GPU_IOV_SCH_3
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_1
+#define RLC_GPU_IOV_SCH_1__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_2
+#define RLC_GPU_IOV_SCH_2__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_2__DATA_MASK 0xFFFFFFFFL
+//RLC_PACE_INT_FORCE
+#define RLC_PACE_INT_FORCE__FORCE_INT__SHIFT 0x0
+#define RLC_PACE_INT_FORCE__FORCE_INT_MASK 0xFFFFFFFFL
+//RLC_PACE_INT_CLEAR
+#define RLC_PACE_INT_CLEAR__SMU_STRETCH_PCC_CLEAR__SHIFT 0x0
+#define RLC_PACE_INT_CLEAR__SMU_PCC_CLEAR__SHIFT 0x1
+#define RLC_PACE_INT_CLEAR__SMU_STRETCH_PCC_CLEAR_MASK 0x00000001L
+#define RLC_PACE_INT_CLEAR__SMU_PCC_CLEAR_MASK 0x00000002L
+//RLC_GPU_IOV_INT_STAT
+#define RLC_GPU_IOV_INT_STAT__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_INT_STAT__STATUS_MASK 0xFFFFFFFFL
+//RLC_IH_COOKIE
+#define RLC_IH_COOKIE__DATA__SHIFT 0x0
+#define RLC_IH_COOKIE__DATA_MASK 0xFFFFFFFFL
+//RLC_IH_COOKIE_CNTL
+#define RLC_IH_COOKIE_CNTL__CREDIT__SHIFT 0x0
+#define RLC_IH_COOKIE_CNTL__RESET_COUNTER__SHIFT 0x2
+#define RLC_IH_COOKIE_CNTL__CREDIT_MASK 0x00000003L
+#define RLC_IH_COOKIE_CNTL__RESET_COUNTER_MASK 0x00000004L
+//RLC_HYP_RLCG_UCODE_CHKSUM
+#define RLC_HYP_RLCG_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define RLC_HYP_RLCG_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_HYP_RLCP_UCODE_CHKSUM
+#define RLC_HYP_RLCP_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define RLC_HYP_RLCP_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_HYP_RLCV_UCODE_CHKSUM
+#define RLC_HYP_RLCV_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define RLC_HYP_RLCV_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_F32_CNTL
+#define RLC_GPU_IOV_F32_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_F32_CNTL__ENABLE_MASK 0x00000001L
+//RLC_GPU_IOV_F32_RESET
+#define RLC_GPU_IOV_F32_RESET__RESET__SHIFT 0x0
+#define RLC_GPU_IOV_F32_RESET__RESET_MASK 0x00000001L
+//RLC_GPU_IOV_UCODE_ADDR
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED__SHIFT 0xc
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_GPU_IOV_UCODE_DATA
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SMU_RESPONSE
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_F32_INVALIDATE_CACHE
+#define RLC_GPU_IOV_F32_INVALIDATE_CACHE__INVALIDATE_CACHE__SHIFT 0x0
+#define RLC_GPU_IOV_F32_INVALIDATE_CACHE__INVALIDATE_CACHE_MASK 0x00000001L
+//RLC_GPU_IOV_VIRT_RESET_REQ
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR_MASK 0x80000000L
+//RLC_GPU_IOV_RLC_RESPONSE
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_DISABLE
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE_INT__SHIFT 0x0
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE_INT_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_FORCE
+#define RLC_GPU_IOV_INT_FORCE__FORCE_INT__SHIFT 0x0
+#define RLC_GPU_IOV_INT_FORCE__FORCE_INT_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCRATCH_ADDR
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR_MASK 0x0000FFFFL
+//RLC_GPU_IOV_SCRATCH_DATA
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_HYP_SEMAPHORE_2
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_3
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+//RLC_LX6_SCRATCH_ADDR
+#define RLC_LX6_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_SCRATCH_ADDR__ADDR_MASK 0x000000FFL
+//RLC_LX6_CORE1_SCRATCH_ADDR
+#define RLC_LX6_CORE1_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_CORE1_SCRATCH_ADDR__ADDR_MASK 0x000000FFL
+//RLC_GPM_UCODE_ADDR
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPM_UCODE_ADDR__RESERVED__SHIFT 0xe
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
+#define RLC_GPM_UCODE_ADDR__RESERVED_MASK 0xFFFFC000L
+//RLC_GPM_UCODE_DATA
+#define RLC_GPM_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPM_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_IRAM_ADDR
+#define RLC_GPM_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPM_IRAM_ADDR__ADDR_MASK 0xFFFFFFFFL
+//RLC_GPM_IRAM_DATA
+#define RLC_GPM_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_GPM_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_IRAM_ADDR
+#define RLC_RLCP_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_RLCP_IRAM_ADDR__ADDR_MASK 0xFFFFFFFFL
+//RLC_RLCP_IRAM_DATA
+#define RLC_RLCP_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_RLCP_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_IRAM_ADDR
+#define RLC_RLCV_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_RLCV_IRAM_ADDR__ADDR_MASK 0xFFFFFFFFL
+//RLC_RLCV_IRAM_DATA
+#define RLC_RLCV_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_RLCV_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_LX6_DRAM_ADDR
+#define RLC_LX6_DRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_DRAM_ADDR__ADDR_MASK 0x000007FFL
+//RLC_LX6_DRAM_DATA
+#define RLC_LX6_DRAM_DATA__DATA__SHIFT 0x0
+#define RLC_LX6_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_LX6_IRAM_ADDR
+#define RLC_LX6_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_IRAM_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_LX6_IRAM_DATA
+#define RLC_LX6_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_LX6_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_PACE_UCODE_ADDR
+#define RLC_PACE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_PACE_UCODE_ADDR__RESERVED__SHIFT 0xc
+#define RLC_PACE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+#define RLC_PACE_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_PACE_UCODE_DATA
+#define RLC_PACE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_PACE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_SCRATCH_ADDR
+#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x0000FFFFL
+//RLC_GPM_SCRATCH_DATA
+#define RLC_GPM_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPM_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_DRAM_ADDR
+#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xd
+#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x00001FFFL
+#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xFFFFE000L
+//RLC_SRM_DRAM_DATA
+#define RLC_SRM_DRAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_ARAM_ADDR
+#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xd
+#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x00001FFFL
+#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xFFFFE000L
+//RLC_SRM_ARAM_DATA
+#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_ARAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_PACE_SCRATCH_ADDR
+#define RLC_PACE_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_PACE_SCRATCH_ADDR__ADDR_MASK 0x0000FFFFL
+//RLC_PACE_SCRATCH_DATA
+#define RLC_PACE_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_PACE_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_GTS_OFFSET_LSB
+#define RLC_GTS_OFFSET_LSB__DATA__SHIFT 0x0
+#define RLC_GTS_OFFSET_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_GTS_OFFSET_MSB
+#define RLC_GTS_OFFSET_MSB__DATA__SHIFT 0x0
+#define RLC_GTS_OFFSET_MSB__DATA_MASK 0xFFFFFFFFL
+//GL2_PIPE_STEER_0
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q0__SHIFT 0x0
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q0__SHIFT 0x4
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q0__SHIFT 0x8
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q0__SHIFT 0xc
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q1__SHIFT 0x10
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q1__SHIFT 0x14
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q1__SHIFT 0x18
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q1__SHIFT 0x1c
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q0_MASK 0x00000007L
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q0_MASK 0x00000070L
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q0_MASK 0x00000700L
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q0_MASK 0x00007000L
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q1_MASK 0x00070000L
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q1_MASK 0x00700000L
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q1_MASK 0x07000000L
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q1_MASK 0x70000000L
+//GL2_PIPE_STEER_1
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q2__SHIFT 0x0
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q2__SHIFT 0x4
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q2__SHIFT 0x8
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q2__SHIFT 0xc
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q3__SHIFT 0x10
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q3__SHIFT 0x14
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q3__SHIFT 0x18
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q3__SHIFT 0x1c
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q2_MASK 0x00000007L
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q2_MASK 0x00000070L
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q2_MASK 0x00000700L
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q2_MASK 0x00007000L
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q3_MASK 0x00070000L
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q3_MASK 0x00700000L
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q3_MASK 0x07000000L
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q3_MASK 0x70000000L
+//GL2_PIPE_STEER_2
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q0__SHIFT 0x0
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q0__SHIFT 0x4
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q0__SHIFT 0x8
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q0__SHIFT 0xc
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q1__SHIFT 0x10
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q1__SHIFT 0x14
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q1__SHIFT 0x18
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q1__SHIFT 0x1c
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q0_MASK 0x00000007L
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q0_MASK 0x00000070L
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q0_MASK 0x00000700L
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q0_MASK 0x00007000L
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q1_MASK 0x00070000L
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q1_MASK 0x00700000L
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q1_MASK 0x07000000L
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q1_MASK 0x70000000L
+//GL2_PIPE_STEER_3
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q2__SHIFT 0x0
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q2__SHIFT 0x4
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q2__SHIFT 0x8
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q2__SHIFT 0xc
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q3__SHIFT 0x10
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q3__SHIFT 0x14
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q3__SHIFT 0x18
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q3__SHIFT 0x1c
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q2_MASK 0x00000007L
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q2_MASK 0x00000070L
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q2_MASK 0x00000700L
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q2_MASK 0x00007000L
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q3_MASK 0x00070000L
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q3_MASK 0x00700000L
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q3_MASK 0x07000000L
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q3_MASK 0x70000000L
+//GL1_PIPE_STEER
+#define GL1_PIPE_STEER__PIPE0__SHIFT 0x0
+#define GL1_PIPE_STEER__PIPE1__SHIFT 0x2
+#define GL1_PIPE_STEER__PIPE2__SHIFT 0x4
+#define GL1_PIPE_STEER__PIPE3__SHIFT 0x6
+#define GL1_PIPE_STEER__PIPE0_MASK 0x00000003L
+#define GL1_PIPE_STEER__PIPE1_MASK 0x0000000CL
+#define GL1_PIPE_STEER__PIPE2_MASK 0x00000030L
+#define GL1_PIPE_STEER__PIPE3_MASK 0x000000C0L
+//CH_PIPE_STEER
+#define CH_PIPE_STEER__PIPE0__SHIFT 0x0
+#define CH_PIPE_STEER__PIPE1__SHIFT 0x2
+#define CH_PIPE_STEER__PIPE2__SHIFT 0x4
+#define CH_PIPE_STEER__PIPE3__SHIFT 0x6
+#define CH_PIPE_STEER__PIPE0_MASK 0x00000003L
+#define CH_PIPE_STEER__PIPE1_MASK 0x0000000CL
+#define CH_PIPE_STEER__PIPE2_MASK 0x00000030L
+#define CH_PIPE_STEER__PIPE3_MASK 0x000000C0L
+//GC_USER_SHADER_ARRAY_CONFIG
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT 0x10
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK 0xFFFF0000L
+//GC_USER_PRIM_CONFIG
+#define GC_USER_PRIM_CONFIG__INACTIVE_PA__SHIFT 0x4
+#define GC_USER_PRIM_CONFIG__INACTIVE_PA_MASK 0x000FFFF0L
+//GC_USER_SA_UNIT_DISABLE
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x00FFFF00L
+//GC_USER_RB_REDUNDANCY
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//GC_USER_RB_BACKEND_DISABLE
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x4
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0xFFFFFFF0L
+//GC_USER_RMI_REDUNDANCY
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_0__SHIFT 0x1
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_1__SHIFT 0x2
+#define GC_USER_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE__SHIFT 0x3
+#define GC_USER_RMI_REDUNDANCY__REPAIR_ID_SWAP__SHIFT 0x4
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_0_MASK 0x00000002L
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_1_MASK 0x00000004L
+#define GC_USER_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE_MASK 0x00000008L
+#define GC_USER_RMI_REDUNDANCY__REPAIR_ID_SWAP_MASK 0x00000010L
+//CGTS_USER_TCC_DISABLE
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//GC_USER_SHADER_RATE_CONFIG
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+//RLC_GPU_IOV_SDMA0_STATUS
+#define RLC_GPU_IOV_SDMA0_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA1_STATUS
+#define RLC_GPU_IOV_SDMA1_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA2_STATUS
+#define RLC_GPU_IOV_SDMA2_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA2_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA3_STATUS
+#define RLC_GPU_IOV_SDMA3_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA3_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA4_STATUS
+#define RLC_GPU_IOV_SDMA4_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA4_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA5_STATUS
+#define RLC_GPU_IOV_SDMA5_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA5_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA6_STATUS
+#define RLC_GPU_IOV_SDMA6_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA6_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA7_STATUS
+#define RLC_GPU_IOV_SDMA7_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA7_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA0_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA1_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA2_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA2_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA2_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA3_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA3_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA3_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA4_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA4_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA4_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA5_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA5_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA5_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA6_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA6_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA6_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA7_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA7_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA7_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_cphypdec
+//CP_HYP_PFP_UCODE_ADDR
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_PFP_UCODE_ADDR
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_HYP_PFP_UCODE_DATA
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_PFP_UCODE_DATA
+#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_ADDR
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_ME_RAM_RADDR
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x0
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x000FFFFFL
+//CP_ME_RAM_WADDR
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x0
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x001FFFFFL
+//CP_HYP_ME_UCODE_DATA
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_ME_RAM_DATA
+#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x0
+#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC1_UCODE_ADDR
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_MEC_ME1_UCODE_ADDR
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_HYP_MEC1_UCODE_DATA
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME1_UCODE_DATA
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC2_UCODE_ADDR
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_MEC_ME2_UCODE_ADDR
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_HYP_MEC2_UCODE_DATA
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_UCODE_DATA
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_PFP_UCODE_CHKSUM
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_CHKSUM
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME1_UCODE_CHKSUM
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME2_UCODE_CHKSUM
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_PFP_IC_BASE_LO
+#define CP_PFP_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_PFP_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_PFP_IC_BASE_HI
+#define CP_PFP_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_PFP_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_PFP_IC_BASE_CNTL
+#define CP_PFP_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_PFP_IC_BASE_CNTL__ADDRESS_CLAMP__SHIFT 0x4
+#define CP_PFP_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_PFP_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_PFP_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_PFP_IC_BASE_CNTL__ADDRESS_CLAMP_MASK 0x00000010L
+#define CP_PFP_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_PFP_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_PFP_IC_OP_CNTL
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE__SHIFT 0x1
+#define CP_PFP_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_PFP_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE_MASK 0x00000002L
+#define CP_PFP_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_PFP_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_ME_IC_BASE_LO
+#define CP_ME_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_ME_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_ME_IC_BASE_HI
+#define CP_ME_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_ME_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_ME_IC_BASE_CNTL
+#define CP_ME_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_ME_IC_BASE_CNTL__ADDRESS_CLAMP__SHIFT 0x4
+#define CP_ME_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_ME_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_ME_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_ME_IC_BASE_CNTL__ADDRESS_CLAMP_MASK 0x00000010L
+#define CP_ME_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_ME_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_ME_IC_OP_CNTL
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE__SHIFT 0x1
+#define CP_ME_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_ME_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE_MASK 0x00000002L
+#define CP_ME_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_ME_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_CPC_IC_BASE_LO
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_CPC_IC_BASE_HI
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_CPC_IC_BASE_CNTL
+#define CP_CPC_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_CPC_IC_BASE_CNTL__ADDRESS_CLAMP__SHIFT 0x4
+#define CP_CPC_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_CPC_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_CPC_IC_BASE_CNTL__ADDRESS_CLAMP_MASK 0x00000010L
+#define CP_CPC_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MES_IC_BASE_LO
+#define CP_MES_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_MES_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_MES_MIBASE_LO
+#define CP_MES_MIBASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_MES_MIBASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_MES_IC_BASE_HI
+#define CP_MES_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_MES_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_MES_MIBASE_HI
+#define CP_MES_MIBASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_MES_MIBASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_MES_IC_BASE_CNTL
+#define CP_MES_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_MES_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_MES_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_MES_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_MES_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MES_DC_BASE_LO
+#define CP_MES_DC_BASE_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_MES_DC_BASE_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_MES_MDBASE_LO
+#define CP_MES_MDBASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MES_MDBASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MES_DC_BASE_HI
+#define CP_MES_DC_BASE_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_MES_DC_BASE_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_MES_MDBASE_HI
+#define CP_MES_MDBASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MES_MDBASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MES_MIBOUND_LO
+#define CP_MES_MIBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MES_MIBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MES_MIBOUND_HI
+#define CP_MES_MIBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MES_MIBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//CP_MES_MDBOUND_LO
+#define CP_MES_MDBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MES_MDBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MES_MDBOUND_HI
+#define CP_MES_MDBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MES_MDBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_BASE0_LO
+#define CP_GFX_RS64_DC_BASE0_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_DC_BASE0_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_DC_BASE1_LO
+#define CP_GFX_RS64_DC_BASE1_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_DC_BASE1_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_DC_BASE0_HI
+#define CP_GFX_RS64_DC_BASE0_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_DC_BASE0_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_DC_BASE1_HI
+#define CP_GFX_RS64_DC_BASE1_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_DC_BASE1_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_MIBOUND_LO
+#define CP_GFX_RS64_MIBOUND_LO__BOUND__SHIFT 0x0
+#define CP_GFX_RS64_MIBOUND_LO__BOUND_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIBOUND_HI
+#define CP_GFX_RS64_MIBOUND_HI__BOUND__SHIFT 0x0
+#define CP_GFX_RS64_MIBOUND_HI__BOUND_MASK 0xFFFFFFFFL
+//CP_MEC_DC_BASE_LO
+#define CP_MEC_DC_BASE_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_MEC_DC_BASE_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_MDBASE_LO
+#define CP_MEC_MDBASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MEC_MDBASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_DC_BASE_HI
+#define CP_MEC_DC_BASE_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_MEC_DC_BASE_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_MDBASE_HI
+#define CP_MEC_MDBASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MEC_MDBASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_MIBOUND_LO
+#define CP_MEC_MIBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MEC_MIBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MIBOUND_HI
+#define CP_MEC_MIBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MEC_MIBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//CP_MEC_MDBOUND_LO
+#define CP_MEC_MDBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MEC_MDBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MDBOUND_HI
+#define CP_MEC_MDBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MEC_MDBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_grbm_hypdec
+//GRBM_GFX_INDEX_SR_SELECT
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_SELECT__VF_PF__SHIFT 0x1f
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX_MASK 0x00000007L
+#define GRBM_GFX_INDEX_SR_SELECT__VF_PF_MASK 0x80000000L
+//GRBM_GFX_INDEX_SR_DATA
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_DATA__SA_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX_SR_DATA__SA_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX_SR_DATA__SA_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX_SR_DATA__SA_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES_MASK 0x80000000L
+//GRBM_GFX_CNTL_SR_SELECT
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_SELECT__VF_PF__SHIFT 0x1f
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX_MASK 0x00000007L
+#define GRBM_GFX_CNTL_SR_SELECT__VF_PF_MASK 0x80000000L
+//GRBM_GFX_CNTL_SR_DATA
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_DATA__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL_SR_DATA__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL_SR_DATA__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL_SR_DATA__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID_MASK 0x00000700L
+//GC_IH_COOKIE_0_PTR
+#define GC_IH_COOKIE_0_PTR__ADDR__SHIFT 0x0
+#define GC_IH_COOKIE_0_PTR__ADDR_MASK 0x000FFFFFL
+//GRBM_SE_REMAP_CNTL
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP_EN__SHIFT 0x0
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP__SHIFT 0x1
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP_EN__SHIFT 0x4
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP__SHIFT 0x5
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP_EN__SHIFT 0x8
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP__SHIFT 0x9
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP_EN__SHIFT 0xc
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP__SHIFT 0xd
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP_EN__SHIFT 0x10
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP__SHIFT 0x11
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP_EN__SHIFT 0x14
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP__SHIFT 0x15
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP_EN__SHIFT 0x18
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP__SHIFT 0x19
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP_EN__SHIFT 0x1c
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP__SHIFT 0x1d
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP_EN_MASK 0x00000001L
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP_MASK 0x0000000EL
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP_EN_MASK 0x00000010L
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP_MASK 0x000000E0L
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP_EN_MASK 0x00000100L
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP_MASK 0x00000E00L
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP_EN_MASK 0x00001000L
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP_MASK 0x0000E000L
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP_EN_MASK 0x00010000L
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP_MASK 0x000E0000L
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP_EN_MASK 0x00100000L
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP_MASK 0x00E00000L
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP_EN_MASK 0x01000000L
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP_MASK 0x0E000000L
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP_EN_MASK 0x10000000L
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP_MASK 0xE0000000L
+
+
+// addressBlock: gc_gcvmsharedhvdec
+//GCMC_VM_FB_SIZE_OFFSET_VF0
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF1
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF2
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF3
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF4
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF5
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF6
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF7
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF8
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF9
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF10
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF11
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF12
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF13
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF14
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF15
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_rlcdec
+//RLC_CNTL
+#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x0
+#define RLC_CNTL__FORCE_RETRY__SHIFT 0x1
+#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x2
+#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x3
+#define RLC_CNTL__RESERVED__SHIFT 0x4
+#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x00000001L
+#define RLC_CNTL__FORCE_RETRY_MASK 0x00000002L
+#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x00000004L
+#define RLC_CNTL__RLC_STEP_F32_MASK 0x00000008L
+#define RLC_CNTL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_F32_UCODE_VERSION
+#define RLC_F32_UCODE_VERSION__THREAD0_VERSION__SHIFT 0x0
+#define RLC_F32_UCODE_VERSION__THREAD1_VERSION__SHIFT 0xa
+#define RLC_F32_UCODE_VERSION__THREAD2_VERSION__SHIFT 0x14
+#define RLC_F32_UCODE_VERSION__THREAD0_VERSION_MASK 0x000003FFL
+#define RLC_F32_UCODE_VERSION__THREAD1_VERSION_MASK 0x000FFC00L
+#define RLC_F32_UCODE_VERSION__THREAD2_VERSION_MASK 0x3FF00000L
+//RLC_STAT
+#define RLC_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_STAT__RLC_SRM_BUSY__SHIFT 0x1
+#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x2
+#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x3
+#define RLC_STAT__MC_BUSY__SHIFT 0x4
+#define RLC_STAT__RLC_THREAD_0_BUSY__SHIFT 0x5
+#define RLC_STAT__RLC_THREAD_1_BUSY__SHIFT 0x6
+#define RLC_STAT__RLC_THREAD_2_BUSY__SHIFT 0x7
+#define RLC_STAT__RESERVED__SHIFT 0x8
+#define RLC_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_STAT__RLC_SRM_BUSY_MASK 0x00000002L
+#define RLC_STAT__RLC_GPM_BUSY_MASK 0x00000004L
+#define RLC_STAT__RLC_SPM_BUSY_MASK 0x00000008L
+#define RLC_STAT__MC_BUSY_MASK 0x00000010L
+#define RLC_STAT__RLC_THREAD_0_BUSY_MASK 0x00000020L
+#define RLC_STAT__RLC_THREAD_1_BUSY_MASK 0x00000040L
+#define RLC_STAT__RLC_THREAD_2_BUSY_MASK 0x00000080L
+#define RLC_STAT__RESERVED_MASK 0xFFFFFF00L
+//RLC_REFCLOCK_TIMESTAMP_LSB
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB_MASK 0xFFFFFFFFL
+//RLC_REFCLOCK_TIMESTAMP_MSB
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_0
+#define RLC_GPM_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_1
+#define RLC_GPM_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_2
+#define RLC_GPM_TIMER_INT_2__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_2__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_3
+#define RLC_GPM_TIMER_INT_3__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_3__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_4
+#define RLC_GPM_TIMER_INT_4__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_4__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_CTRL
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN__SHIFT 0x2
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN__SHIFT 0x3
+#define RLC_GPM_TIMER_CTRL__TIMER_4_EN__SHIFT 0x4
+#define RLC_GPM_TIMER_CTRL__RESERVED_1__SHIFT 0x5
+#define RLC_GPM_TIMER_CTRL__TIMER_0_AUTO_REARM__SHIFT 0x8
+#define RLC_GPM_TIMER_CTRL__TIMER_1_AUTO_REARM__SHIFT 0x9
+#define RLC_GPM_TIMER_CTRL__TIMER_2_AUTO_REARM__SHIFT 0xa
+#define RLC_GPM_TIMER_CTRL__TIMER_3_AUTO_REARM__SHIFT 0xb
+#define RLC_GPM_TIMER_CTRL__TIMER_4_AUTO_REARM__SHIFT 0xc
+#define RLC_GPM_TIMER_CTRL__RESERVED_2__SHIFT 0xd
+#define RLC_GPM_TIMER_CTRL__TIMER_0_INT_CLEAR__SHIFT 0x10
+#define RLC_GPM_TIMER_CTRL__TIMER_1_INT_CLEAR__SHIFT 0x11
+#define RLC_GPM_TIMER_CTRL__TIMER_2_INT_CLEAR__SHIFT 0x12
+#define RLC_GPM_TIMER_CTRL__TIMER_3_INT_CLEAR__SHIFT 0x13
+#define RLC_GPM_TIMER_CTRL__TIMER_4_INT_CLEAR__SHIFT 0x14
+#define RLC_GPM_TIMER_CTRL__RESERVED__SHIFT 0x15
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN_MASK 0x00000004L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN_MASK 0x00000008L
+#define RLC_GPM_TIMER_CTRL__TIMER_4_EN_MASK 0x00000010L
+#define RLC_GPM_TIMER_CTRL__RESERVED_1_MASK 0x000000E0L
+#define RLC_GPM_TIMER_CTRL__TIMER_0_AUTO_REARM_MASK 0x00000100L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_AUTO_REARM_MASK 0x00000200L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_AUTO_REARM_MASK 0x00000400L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_AUTO_REARM_MASK 0x00000800L
+#define RLC_GPM_TIMER_CTRL__TIMER_4_AUTO_REARM_MASK 0x00001000L
+#define RLC_GPM_TIMER_CTRL__RESERVED_2_MASK 0x0000E000L
+#define RLC_GPM_TIMER_CTRL__TIMER_0_INT_CLEAR_MASK 0x00010000L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_INT_CLEAR_MASK 0x00020000L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_INT_CLEAR_MASK 0x00040000L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_INT_CLEAR_MASK 0x00080000L
+#define RLC_GPM_TIMER_CTRL__TIMER_4_INT_CLEAR_MASK 0x00100000L
+#define RLC_GPM_TIMER_CTRL__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_TIMER_STAT
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT__SHIFT 0x2
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT__SHIFT 0x3
+#define RLC_GPM_TIMER_STAT__TIMER_4_STAT__SHIFT 0x4
+#define RLC_GPM_TIMER_STAT__RESERVED_1__SHIFT 0x5
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC__SHIFT 0xa
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC__SHIFT 0xb
+#define RLC_GPM_TIMER_STAT__TIMER_4_ENABLE_SYNC__SHIFT 0xc
+#define RLC_GPM_TIMER_STAT__RESERVED_2__SHIFT 0xd
+#define RLC_GPM_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC__SHIFT 0x10
+#define RLC_GPM_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC__SHIFT 0x11
+#define RLC_GPM_TIMER_STAT__TIMER_2_AUTO_REARM_SYNC__SHIFT 0x12
+#define RLC_GPM_TIMER_STAT__TIMER_3_AUTO_REARM_SYNC__SHIFT 0x13
+#define RLC_GPM_TIMER_STAT__TIMER_4_AUTO_REARM_SYNC__SHIFT 0x14
+#define RLC_GPM_TIMER_STAT__RESERVED__SHIFT 0x15
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT_MASK 0x00000004L
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT_MASK 0x00000008L
+#define RLC_GPM_TIMER_STAT__TIMER_4_STAT_MASK 0x00000010L
+#define RLC_GPM_TIMER_STAT__RESERVED_1_MASK 0x000000E0L
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC_MASK 0x00000400L
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC_MASK 0x00000800L
+#define RLC_GPM_TIMER_STAT__TIMER_4_ENABLE_SYNC_MASK 0x00001000L
+#define RLC_GPM_TIMER_STAT__RESERVED_2_MASK 0x0000E000L
+#define RLC_GPM_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC_MASK 0x00010000L
+#define RLC_GPM_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC_MASK 0x00020000L
+#define RLC_GPM_TIMER_STAT__TIMER_2_AUTO_REARM_SYNC_MASK 0x00040000L
+#define RLC_GPM_TIMER_STAT__TIMER_3_AUTO_REARM_SYNC_MASK 0x00080000L
+#define RLC_GPM_TIMER_STAT__TIMER_4_AUTO_REARM_SYNC_MASK 0x00100000L
+#define RLC_GPM_TIMER_STAT__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_LEGACY_INT_STAT
+#define RLC_GPM_LEGACY_INT_STAT__SPP_PVT_INT_CHANGED__SHIFT 0x0
+#define RLC_GPM_LEGACY_INT_STAT__CP_RLC_STAT_INVAL_PEND_CHANGED__SHIFT 0x1
+#define RLC_GPM_LEGACY_INT_STAT__RLC_EOF_INT_CHANGED__SHIFT 0x2
+#define RLC_GPM_LEGACY_INT_STAT__RLC_PG_CNTL_CHANGED__SHIFT 0x3
+#define RLC_GPM_LEGACY_INT_STAT__STORE_LOAD_TIMER3_EXPIRED_T0__SHIFT 0x4
+#define RLC_GPM_LEGACY_INT_STAT__SPP_PVT_INT_CHANGED_MASK 0x00000001L
+#define RLC_GPM_LEGACY_INT_STAT__CP_RLC_STAT_INVAL_PEND_CHANGED_MASK 0x00000002L
+#define RLC_GPM_LEGACY_INT_STAT__RLC_EOF_INT_CHANGED_MASK 0x00000004L
+#define RLC_GPM_LEGACY_INT_STAT__RLC_PG_CNTL_CHANGED_MASK 0x00000008L
+#define RLC_GPM_LEGACY_INT_STAT__STORE_LOAD_TIMER3_EXPIRED_T0_MASK 0x00000010L
+//RLC_GPM_LEGACY_INT_CLEAR
+#define RLC_GPM_LEGACY_INT_CLEAR__SPP_PVT_INT_CHANGED__SHIFT 0x0
+#define RLC_GPM_LEGACY_INT_CLEAR__CP_RLC_STAT_INVAL_PEND_CHANGED__SHIFT 0x1
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_EOF_INT_CHANGED__SHIFT 0x2
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_PG_CNTL_CHANGED__SHIFT 0x3
+#define RLC_GPM_LEGACY_INT_CLEAR__RESERVED_4__SHIFT 0x4
+#define RLC_GPM_LEGACY_INT_CLEAR__SPP_PVT_INT_CHANGED_MASK 0x00000001L
+#define RLC_GPM_LEGACY_INT_CLEAR__CP_RLC_STAT_INVAL_PEND_CHANGED_MASK 0x00000002L
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_EOF_INT_CHANGED_MASK 0x00000004L
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_PG_CNTL_CHANGED_MASK 0x00000008L
+#define RLC_GPM_LEGACY_INT_CLEAR__RESERVED_4_MASK 0x00000010L
+//RLC_INT_STAT
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID__SHIFT 0x0
+#define RLC_INT_STAT__CP_RLC_INT_PENDING__SHIFT 0x8
+#define RLC_INT_STAT__RESERVED__SHIFT 0x9
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID_MASK 0x000000FFL
+#define RLC_INT_STAT__CP_RLC_INT_PENDING_MASK 0x00000100L
+#define RLC_INT_STAT__RESERVED_MASK 0xFFFFFE00L
+//RLC_MGCG_CTRL
+#define RLC_MGCG_CTRL__MGCG_EN__SHIFT 0x0
+#define RLC_MGCG_CTRL__SILICON_EN__SHIFT 0x1
+#define RLC_MGCG_CTRL__SIMULATION_EN__SHIFT 0x2
+#define RLC_MGCG_CTRL__ON_DELAY__SHIFT 0x3
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS__SHIFT 0x7
+#define RLC_MGCG_CTRL__SPARE__SHIFT 0xf
+#define RLC_MGCG_CTRL__MGCG_EN_MASK 0x00000001L
+#define RLC_MGCG_CTRL__SILICON_EN_MASK 0x00000002L
+#define RLC_MGCG_CTRL__SIMULATION_EN_MASK 0x00000004L
+#define RLC_MGCG_CTRL__ON_DELAY_MASK 0x00000078L
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS_MASK 0x00007F80L
+#define RLC_MGCG_CTRL__SPARE_MASK 0xFFFF8000L
+//RLC_JUMP_TABLE_RESTORE
+#define RLC_JUMP_TABLE_RESTORE__ADDR__SHIFT 0x0
+#define RLC_JUMP_TABLE_RESTORE__ADDR_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_2
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE__SHIFT 0x0
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY_2__PERWGP_TIMEOUT_VALUE__SHIFT 0x10
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE_MASK 0x000000FFL
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY_2__PERWGP_TIMEOUT_VALUE_MASK 0xFFFF0000L
+//RLC_GPU_CLOCK_COUNT_LSB
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_UCODE_CNTL
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x0
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xFFFFFFFFL
+//RLC_GPM_THREAD_RESET
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET__SHIFT 0x0
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET__SHIFT 0x1
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET__SHIFT 0x2
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET__SHIFT 0x3
+#define RLC_GPM_THREAD_RESET__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET_MASK 0x00000001L
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET_MASK 0x00000002L
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET_MASK 0x00000004L
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET_MASK 0x00000008L
+#define RLC_GPM_THREAD_RESET__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPM_CP_DMA_COMPLETE_T0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPM_CP_DMA_COMPLETE_T1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPM_THREAD_INVALIDATE_CACHE
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD0_INVALIDATE_CACHE__SHIFT 0x0
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD1_INVALIDATE_CACHE__SHIFT 0x1
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD2_INVALIDATE_CACHE__SHIFT 0x2
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD3_INVALIDATE_CACHE__SHIFT 0x3
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD0_INVALIDATE_CACHE_MASK 0x00000001L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD1_INVALIDATE_CACHE_MASK 0x00000002L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD2_INVALIDATE_CACHE_MASK 0x00000004L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD3_INVALIDATE_CACHE_MASK 0x00000008L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__RESERVED_MASK 0xFFFFFFF0L
+//RLC_CLK_COUNT_GFXCLK_LSB
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_GFXCLK_MSB
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_LSB
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_MSB
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_CTRL
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN__SHIFT 0x0
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET__SHIFT 0x1
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE__SHIFT 0x2
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN__SHIFT 0x3
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET__SHIFT 0x4
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE__SHIFT 0x5
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN_MASK 0x00000001L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET_MASK 0x00000002L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE_MASK 0x00000004L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN_MASK 0x00000008L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET_MASK 0x00000010L
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE_MASK 0x00000020L
+//RLC_CLK_COUNT_STAT
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID__SHIFT 0x0
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID__SHIFT 0x1
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC__SHIFT 0x2
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC__SHIFT 0x3
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC__SHIFT 0x4
+#define RLC_CLK_COUNT_STAT__RESERVED__SHIFT 0x5
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID_MASK 0x00000001L
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID_MASK 0x00000002L
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC_MASK 0x00000004L
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC_MASK 0x00000008L
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC_MASK 0x00000010L
+#define RLC_CLK_COUNT_STAT__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCG_DOORBELL_CNTL
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_RLCG_DOORBELL_CNTL__RESERVED__SHIFT 0x16
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+#define RLC_RLCG_DOORBELL_CNTL__RESERVED_MASK 0xFFC00000L
+//RLC_RLCG_DOORBELL_STAT
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_RLCG_DOORBELL_0_DATA_LO
+#define RLC_RLCG_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_0_DATA_HI
+#define RLC_RLCG_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_1_DATA_LO
+#define RLC_RLCG_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_1_DATA_HI
+#define RLC_RLCG_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_2_DATA_LO
+#define RLC_RLCG_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_2_DATA_HI
+#define RLC_RLCG_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_3_DATA_LO
+#define RLC_RLCG_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_3_DATA_HI
+#define RLC_RLCG_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_32_RES_SEL
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x0
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x6
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x0000003FL
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_CLOCK_32
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x0
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xFFFFFFFFL
+//RLC_PG_CNTL
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x0
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x1
+#define RLC_PG_CNTL__DYN_PER_WGP_PG_ENABLE__SHIFT 0x2
+#define RLC_PG_CNTL__STATIC_PER_WGP_PG_ENABLE__SHIFT 0x3
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE__SHIFT 0x4
+#define RLC_PG_CNTL__RESERVED__SHIFT 0x5
+#define RLC_PG_CNTL__MEM_DS_DISABLE__SHIFT 0xd
+#define RLC_PG_CNTL__PG_OVERRIDE__SHIFT 0xe
+#define RLC_PG_CNTL__CP_PG_DISABLE__SHIFT 0xf
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x10
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x11
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x13
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable__SHIFT 0x15
+#define RLC_PG_CNTL__RESERVED2__SHIFT 0x16
+#define RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE__SHIFT 0x17
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x00000001L
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x00000002L
+#define RLC_PG_CNTL__DYN_PER_WGP_PG_ENABLE_MASK 0x00000004L
+#define RLC_PG_CNTL__STATIC_PER_WGP_PG_ENABLE_MASK 0x00000008L
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK 0x00000010L
+#define RLC_PG_CNTL__RESERVED_MASK 0x00001FE0L
+#define RLC_PG_CNTL__MEM_DS_DISABLE_MASK 0x00002000L
+#define RLC_PG_CNTL__PG_OVERRIDE_MASK 0x00004000L
+#define RLC_PG_CNTL__CP_PG_DISABLE_MASK 0x00008000L
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x00010000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x00020000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x00040000L
+#define RLC_PG_CNTL__RESERVED1_MASK 0x00180000L
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable_MASK 0x00200000L
+#define RLC_PG_CNTL__RESERVED2_MASK 0x00400000L
+#define RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK 0x00800000L
+//RLC_GPM_THREAD_PRIORITY
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY__SHIFT 0x8
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY__SHIFT 0x10
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY__SHIFT 0x18
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0x000000FFL
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0x0000FF00L
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY_MASK 0x00FF0000L
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY_MASK 0xFF000000L
+//RLC_GPM_THREAD_ENABLE
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE__SHIFT 0x0
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE__SHIFT 0x1
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE__SHIFT 0x2
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE__SHIFT 0x3
+#define RLC_GPM_THREAD_ENABLE__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE_MASK 0x00000001L
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE_MASK 0x00000002L
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE_MASK 0x00000004L
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE_MASK 0x00000008L
+#define RLC_GPM_THREAD_ENABLE__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCG_DOORBELL_RANGE
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_CGTT_MGCG_OVERRIDE
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE__SHIFT 0x0
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE__SHIFT 0x2
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE__SHIFT 0x3
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE__SHIFT 0x4
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE__SHIFT 0x5
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE__SHIFT 0x6
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE__SHIFT 0x7
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE__SHIFT 0x8
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE__SHIFT 0xa
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_16_11__SHIFT 0xb
+#define RLC_CGTT_MGCG_OVERRIDE__GC_CAC_MGCG_CLK_CNTL__SHIFT 0x11
+#define RLC_CGTT_MGCG_OVERRIDE__SE_CAC_MGCG_CLK_CNTL__SHIFT 0x12
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_19__SHIFT 0x13
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK 0x00000001L
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK 0x00000002L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK 0x00000004L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK 0x00000008L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK 0x00000010L
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK 0x00000020L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK 0x00000040L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK 0x00000080L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK 0x00000100L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+#define RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK 0x00000400L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_16_11_MASK 0x0001F800L
+#define RLC_CGTT_MGCG_OVERRIDE__GC_CAC_MGCG_CLK_CNTL_MASK 0x00020000L
+#define RLC_CGTT_MGCG_OVERRIDE__SE_CAC_MGCG_CLK_CNTL_MASK 0x00040000L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_19_MASK 0xFFF80000L
+//RLC_CGCG_CGLS_CTRL
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x1b
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x1c
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x1d
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN__SHIFT 0x1f
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN_MASK 0x80000000L
+//RLC_CGCG_RAMP_CTRL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x0
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x4
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x8
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0xc
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x10
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x1c
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0x0000000FL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0x00000F00L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0x0000F000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0x0FFF0000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xF0000000L
+//RLC_DYN_PG_STATUS
+#define RLC_DYN_PG_STATUS__PG_STATUS_WGP_MASK__SHIFT 0x0
+#define RLC_DYN_PG_STATUS__PG_STATUS_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_DYN_PG_REQUEST
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_WGP_MASK__SHIFT 0x0
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY
+#define RLC_PG_DELAY__POWER_UP_DELAY__SHIFT 0x0
+#define RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT 0x10
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT 0x18
+#define RLC_PG_DELAY__POWER_UP_DELAY_MASK 0x000000FFL
+#define RLC_PG_DELAY__POWER_DOWN_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY_MASK 0x00FF0000L
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY_MASK 0xFF000000L
+//RLC_WGP_STATUS
+#define RLC_WGP_STATUS__WORK_PENDING__SHIFT 0x0
+#define RLC_WGP_STATUS__WORK_PENDING_MASK 0xFFFFFFFFL
+//RLC_PG_ALWAYS_ON_WGP_MASK
+#define RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK__SHIFT 0x0
+#define RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_MAX_PG_WGP
+#define RLC_MAX_PG_WGP__MAX_POWERED_UP_WGP__SHIFT 0x0
+#define RLC_MAX_PG_WGP__SPARE__SHIFT 0x8
+#define RLC_MAX_PG_WGP__MAX_POWERED_UP_WGP_MASK 0x000000FFL
+#define RLC_MAX_PG_WGP__SPARE_MASK 0xFFFFFF00L
+//RLC_AUTO_PG_CTRL
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x0
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x1
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x2
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x3
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x13
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x00000001L
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x00000002L
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x00000004L
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x0007FFF8L
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xFFF80000L
+//RLC_SERDES_RD_INDEX
+#define RLC_SERDES_RD_INDEX__DATA_REG_ID__SHIFT 0x0
+#define RLC_SERDES_RD_INDEX__SPARE__SHIFT 0x2
+#define RLC_SERDES_RD_INDEX__DATA_REG_ID_MASK 0x00000003L
+#define RLC_SERDES_RD_INDEX__SPARE_MASK 0xFFFFFFFCL
+//RLC_SERDES_RD_DATA_0
+#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_1
+#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_2
+#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_3
+#define RLC_SERDES_RD_DATA_3__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_3__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_MASK
+#define RLC_SERDES_MASK__GC_CENTER_HUB_0__SHIFT 0x0
+#define RLC_SERDES_MASK__GC_CENTER_HUB_1__SHIFT 0x1
+#define RLC_SERDES_MASK__RESERVED__SHIFT 0x2
+#define RLC_SERDES_MASK__GC_SE_0__SHIFT 0x10
+#define RLC_SERDES_MASK__GC_SE_1__SHIFT 0x11
+#define RLC_SERDES_MASK__GC_SE_2__SHIFT 0x12
+#define RLC_SERDES_MASK__GC_SE_3__SHIFT 0x13
+#define RLC_SERDES_MASK__GC_SE_4__SHIFT 0x14
+#define RLC_SERDES_MASK__GC_SE_5__SHIFT 0x15
+#define RLC_SERDES_MASK__GC_SE_6__SHIFT 0x16
+#define RLC_SERDES_MASK__GC_SE_7__SHIFT 0x17
+#define RLC_SERDES_MASK__RESERVED_31_24__SHIFT 0x18
+#define RLC_SERDES_MASK__GC_CENTER_HUB_0_MASK 0x00000001L
+#define RLC_SERDES_MASK__GC_CENTER_HUB_1_MASK 0x00000002L
+#define RLC_SERDES_MASK__RESERVED_MASK 0x0000FFFCL
+#define RLC_SERDES_MASK__GC_SE_0_MASK 0x00010000L
+#define RLC_SERDES_MASK__GC_SE_1_MASK 0x00020000L
+#define RLC_SERDES_MASK__GC_SE_2_MASK 0x00040000L
+#define RLC_SERDES_MASK__GC_SE_3_MASK 0x00080000L
+#define RLC_SERDES_MASK__GC_SE_4_MASK 0x00100000L
+#define RLC_SERDES_MASK__GC_SE_5_MASK 0x00200000L
+#define RLC_SERDES_MASK__GC_SE_6_MASK 0x00400000L
+#define RLC_SERDES_MASK__GC_SE_7_MASK 0x00800000L
+#define RLC_SERDES_MASK__RESERVED_31_24_MASK 0xFF000000L
+//RLC_SERDES_CTRL
+#define RLC_SERDES_CTRL__BPM_BROADCAST__SHIFT 0x0
+#define RLC_SERDES_CTRL__BPM_REG_WRITE__SHIFT 0x1
+#define RLC_SERDES_CTRL__BPM_LONG_CMD__SHIFT 0x2
+#define RLC_SERDES_CTRL__BPM_ADDR__SHIFT 0x3
+#define RLC_SERDES_CTRL__REG_ADDR__SHIFT 0x10
+#define RLC_SERDES_CTRL__BPM_BROADCAST_MASK 0x000001L
+#define RLC_SERDES_CTRL__BPM_REG_WRITE_MASK 0x000002L
+#define RLC_SERDES_CTRL__BPM_LONG_CMD_MASK 0x000004L
+#define RLC_SERDES_CTRL__BPM_ADDR_MASK 0x00FFF8L
+#define RLC_SERDES_CTRL__REG_ADDR_MASK 0xFF0000L
+//RLC_SERDES_DATA
+#define RLC_SERDES_DATA__DATA__SHIFT 0x0
+#define RLC_SERDES_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_BUSY
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_0__SHIFT 0x0
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_1__SHIFT 0x1
+#define RLC_SERDES_BUSY__RESERVED__SHIFT 0x2
+#define RLC_SERDES_BUSY__GC_SE_0__SHIFT 0x10
+#define RLC_SERDES_BUSY__GC_SE_1__SHIFT 0x11
+#define RLC_SERDES_BUSY__GC_SE_2__SHIFT 0x12
+#define RLC_SERDES_BUSY__GC_SE_3__SHIFT 0x13
+#define RLC_SERDES_BUSY__GC_SE_4__SHIFT 0x14
+#define RLC_SERDES_BUSY__GC_SE_5__SHIFT 0x15
+#define RLC_SERDES_BUSY__GC_SE_6__SHIFT 0x16
+#define RLC_SERDES_BUSY__GC_SE_7__SHIFT 0x17
+#define RLC_SERDES_BUSY__RESERVED_29_24__SHIFT 0x18
+#define RLC_SERDES_BUSY__RD_FIFO_NOT_EMPTY__SHIFT 0x1e
+#define RLC_SERDES_BUSY__RD_PENDING__SHIFT 0x1f
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_0_MASK 0x00000001L
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_1_MASK 0x00000002L
+#define RLC_SERDES_BUSY__RESERVED_MASK 0x0000FFFCL
+#define RLC_SERDES_BUSY__GC_SE_0_MASK 0x00010000L
+#define RLC_SERDES_BUSY__GC_SE_1_MASK 0x00020000L
+#define RLC_SERDES_BUSY__GC_SE_2_MASK 0x00040000L
+#define RLC_SERDES_BUSY__GC_SE_3_MASK 0x00080000L
+#define RLC_SERDES_BUSY__GC_SE_4_MASK 0x00100000L
+#define RLC_SERDES_BUSY__GC_SE_5_MASK 0x00200000L
+#define RLC_SERDES_BUSY__GC_SE_6_MASK 0x00400000L
+#define RLC_SERDES_BUSY__GC_SE_7_MASK 0x00800000L
+#define RLC_SERDES_BUSY__RESERVED_29_24_MASK 0x3F000000L
+#define RLC_SERDES_BUSY__RD_FIFO_NOT_EMPTY_MASK 0x40000000L
+#define RLC_SERDES_BUSY__RD_PENDING_MASK 0x80000000L
+//RLC_GPM_GENERAL_0
+#define RLC_GPM_GENERAL_0__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_1
+#define RLC_GPM_GENERAL_1__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_2
+#define RLC_GPM_GENERAL_2__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_3
+#define RLC_GPM_GENERAL_3__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_3__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_4
+#define RLC_GPM_GENERAL_4__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_4__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_5
+#define RLC_GPM_GENERAL_5__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_5__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_6
+#define RLC_GPM_GENERAL_6__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_6__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_7
+#define RLC_GPM_GENERAL_7__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_7__DATA_MASK 0xFFFFFFFFL
+//RLC_STATIC_PG_STATUS
+#define RLC_STATIC_PG_STATUS__PG_STATUS_WGP_MASK__SHIFT 0x0
+#define RLC_STATIC_PG_STATUS__PG_STATUS_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_16
+#define RLC_GPM_GENERAL_16__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_16__DATA_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_3
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
+#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0x000000FFL
+#define RLC_PG_DELAY_3__RESERVED_MASK 0xFFFFFF00L
+//RLC_GPR_REG1
+#define RLC_GPR_REG1__DATA__SHIFT 0x0
+#define RLC_GPR_REG1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPR_REG2
+#define RLC_GPR_REG2__DATA__SHIFT 0x0
+#define RLC_GPR_REG2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_DISABLE_TH0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE_INT__SHIFT 0x0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE_INT_MASK 0xFFFFFFFFL
+//RLC_GPM_LEGACY_INT_DISABLE
+#define RLC_GPM_LEGACY_INT_DISABLE__SPP_PVT_INT_CHANGED__SHIFT 0x0
+#define RLC_GPM_LEGACY_INT_DISABLE__CP_RLC_STAT_INVAL_PEND_CHANGED__SHIFT 0x1
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_EOF_INT_CHANGED__SHIFT 0x2
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_PG_CNTL_CHANGED__SHIFT 0x3
+#define RLC_GPM_LEGACY_INT_DISABLE__STORE_LOAD_TIMER3_EXPIRED_T0__SHIFT 0x4
+#define RLC_GPM_LEGACY_INT_DISABLE__SPP_PVT_INT_CHANGED_MASK 0x00000001L
+#define RLC_GPM_LEGACY_INT_DISABLE__CP_RLC_STAT_INVAL_PEND_CHANGED_MASK 0x00000002L
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_EOF_INT_CHANGED_MASK 0x00000004L
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_PG_CNTL_CHANGED_MASK 0x00000008L
+#define RLC_GPM_LEGACY_INT_DISABLE__STORE_LOAD_TIMER3_EXPIRED_T0_MASK 0x00000010L
+//RLC_GPM_INT_FORCE_TH0
+#define RLC_GPM_INT_FORCE_TH0__FORCE_INT__SHIFT 0x0
+#define RLC_GPM_INT_FORCE_TH0__FORCE_INT_MASK 0xFFFFFFFFL
+//RLC_SRM_CNTL
+#define RLC_SRM_CNTL__SRM_ENABLE__SHIFT 0x0
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR__SHIFT 0x1
+#define RLC_SRM_CNTL__RESERVED__SHIFT 0x2
+#define RLC_SRM_CNTL__SRM_ENABLE_MASK 0x00000001L
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK 0x00000002L
+#define RLC_SRM_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_GPM_COMMAND_STATUS
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_INDEX_CNTL_ADDR_0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_1
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_2
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_3
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_4
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_5
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_6
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_7
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_DATA_0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_1
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_2
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_3
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_4
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_5
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_6
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_7
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_STAT
+#define RLC_SRM_STAT__SRM_BUSY__SHIFT 0x0
+#define RLC_SRM_STAT__SRM_BUSY_DELAY__SHIFT 0x1
+#define RLC_SRM_STAT__RESERVED__SHIFT 0x2
+#define RLC_SRM_STAT__SRM_BUSY_MASK 0x00000001L
+#define RLC_SRM_STAT__SRM_BUSY_DELAY_MASK 0x00000002L
+#define RLC_SRM_STAT__RESERVED_MASK 0xFFFFFFFCL
+//RLC_GPM_GENERAL_8
+#define RLC_GPM_GENERAL_8__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_8__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_9
+#define RLC_GPM_GENERAL_9__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_9__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_10
+#define RLC_GPM_GENERAL_10__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_10__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_11
+#define RLC_GPM_GENERAL_11__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_11__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_12
+#define RLC_GPM_GENERAL_12__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_12__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_CNTL_0
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_0__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_0__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED_MASK 0xC0000000L
+//RLC_GPM_UTCL1_CNTL_1
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_1__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_1__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED_MASK 0xC0000000L
+//RLC_GPM_UTCL1_CNTL_2
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_2__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_2__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED_MASK 0xC0000000L
+//RLC_SPM_UTCL1_CNTL
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define RLC_SPM_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_SPM_UTCL1_CNTL__RESERVED__SHIFT 0x1e
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define RLC_SPM_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_SPM_UTCL1_CNTL__RESERVED_MASK 0xC0000000L
+//RLC_UTCL1_STATUS_2
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY__SHIFT 0x0
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY__SHIFT 0x1
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY__SHIFT 0x2
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY__SHIFT 0x3
+#define RLC_UTCL1_STATUS_2__RESERVED_1__SHIFT 0x4
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans__SHIFT 0x5
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans__SHIFT 0x6
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans__SHIFT 0x7
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans__SHIFT 0x8
+#define RLC_UTCL1_STATUS_2__RESERVED__SHIFT 0x9
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY_MASK 0x00000001L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY_MASK 0x00000002L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY_MASK 0x00000004L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY_MASK 0x00000008L
+#define RLC_UTCL1_STATUS_2__RESERVED_1_MASK 0x00000010L
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans_MASK 0x00000020L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans_MASK 0x00000040L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans_MASK 0x00000080L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans_MASK 0x00000100L
+#define RLC_UTCL1_STATUS_2__RESERVED_MASK 0xFFFFFE00L
+//RLC_SPM_UTCL1_ERROR_1
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_SPM_UTCL1_ERROR_2
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH0_ERROR_1
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH0_ERROR_2
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH1_ERROR_1
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH1_ERROR_2
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH2_ERROR_1
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH2_ERROR_2
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_CGCG_CGLS_CTRL_3D
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER__SHIFT 0x1b
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL__SHIFT 0x1c
+#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE__SHIFT 0x1d
+#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN__SHIFT 0x1f
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN_MASK 0x80000000L
+//RLC_CGCG_RAMP_CTRL_3D
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT__SHIFT 0x0
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT__SHIFT 0x4
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT__SHIFT 0x8
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT__SHIFT 0xc
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT__SHIFT 0x10
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT__SHIFT 0x1c
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT_MASK 0x0000000FL
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT_MASK 0x00000F00L
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT_MASK 0x0000F000L
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT_MASK 0x0FFF0000L
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT_MASK 0xF0000000L
+//RLC_SEMAPHORE_0
+#define RLC_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_1
+#define RLC_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_2
+#define RLC_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_3
+#define RLC_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+//RLC_PACE_INT_STAT
+#define RLC_PACE_INT_STAT__STATUS__SHIFT 0x0
+#define RLC_PACE_INT_STAT__STATUS_MASK 0xFFFFFFFFL
+//RLC_UTCL1_STATUS
+#define RLC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RLC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RLC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RLC_UTCL1_STATUS__RESERVED__SHIFT 0x3
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define RLC_UTCL1_STATUS__RESERVED_1__SHIFT 0xe
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define RLC_UTCL1_STATUS__RESERVED_2__SHIFT 0x16
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define RLC_UTCL1_STATUS__RESERVED_3__SHIFT 0x1e
+#define RLC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RLC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RLC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define RLC_UTCL1_STATUS__RESERVED_MASK 0x000000F8L
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define RLC_UTCL1_STATUS__RESERVED_1_MASK 0x0000C000L
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define RLC_UTCL1_STATUS__RESERVED_2_MASK 0x00C00000L
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+#define RLC_UTCL1_STATUS__RESERVED_3_MASK 0xC0000000L
+//RLC_R2I_CNTL_0
+#define RLC_R2I_CNTL_0__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_0__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_1
+#define RLC_R2I_CNTL_1__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_1__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_2
+#define RLC_R2I_CNTL_2__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_2__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_3
+#define RLC_R2I_CNTL_3__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_3__Data_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_STAT_TH0
+#define RLC_GPM_INT_STAT_TH0__STATUS__SHIFT 0x0
+#define RLC_GPM_INT_STAT_TH0__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_13
+#define RLC_GPM_GENERAL_13__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_13__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_14
+#define RLC_GPM_GENERAL_14__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_14__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_15
+#define RLC_GPM_GENERAL_15__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_15__DATA_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPU_CLOCK_COUNT_LSB_2
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_2
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_PACE_INT_DISABLE
+#define RLC_PACE_INT_DISABLE__DISABLE_INT__SHIFT 0x0
+#define RLC_PACE_INT_DISABLE__DISABLE_INT_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_2
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCV_DOORBELL_RANGE
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_RLCV_DOORBELL_CNTL
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_RLCV_DOORBELL_STAT
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_RLCV_DOORBELL_0_DATA_LO
+#define RLC_RLCV_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_0_DATA_HI
+#define RLC_RLCV_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_1_DATA_LO
+#define RLC_RLCV_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_1_DATA_HI
+#define RLC_RLCV_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_2_DATA_LO
+#define RLC_RLCV_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_2_DATA_HI
+#define RLC_RLCV_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_3_DATA_LO
+#define RLC_RLCV_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_3_DATA_HI
+#define RLC_RLCV_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_LSB_1
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_1
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_RLCV_SPARE_INT
+#define RLC_RLCV_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_FIREWALL_VIOLATION
+#define RLC_FIREWALL_VIOLATION__ADDR__SHIFT 0x0
+#define RLC_FIREWALL_VIOLATION__ADDR_MASK 0xFFFFFFFFL
+//RLC_PACE_TIMER_INT_0
+#define RLC_PACE_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_PACE_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_PACE_TIMER_INT_1
+#define RLC_PACE_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_PACE_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_PACE_TIMER_CTRL
+#define RLC_PACE_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_PACE_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_PACE_TIMER_CTRL__TIMER_0_AUTO_REARM__SHIFT 0x2
+#define RLC_PACE_TIMER_CTRL__TIMER_1_AUTO_REARM__SHIFT 0x3
+#define RLC_PACE_TIMER_CTRL__TIMER_0_INT_CLEAR__SHIFT 0x4
+#define RLC_PACE_TIMER_CTRL__TIMER_1_INT_CLEAR__SHIFT 0x5
+#define RLC_PACE_TIMER_CTRL__RESERVED__SHIFT 0x6
+#define RLC_PACE_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_PACE_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_PACE_TIMER_CTRL__TIMER_0_AUTO_REARM_MASK 0x00000004L
+#define RLC_PACE_TIMER_CTRL__TIMER_1_AUTO_REARM_MASK 0x00000008L
+#define RLC_PACE_TIMER_CTRL__TIMER_0_INT_CLEAR_MASK 0x00000010L
+#define RLC_PACE_TIMER_CTRL__TIMER_1_INT_CLEAR_MASK 0x00000020L
+#define RLC_PACE_TIMER_CTRL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_SMU_CLK_REQ
+#define RLC_SMU_CLK_REQ__VALID__SHIFT 0x0
+#define RLC_SMU_CLK_REQ__VALID_MASK 0x00000001L
+//RLC_CP_STAT_INVAL_STAT
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND__SHIFT 0x0
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND__SHIFT 0x1
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND__SHIFT 0x2
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND_CHANGED__SHIFT 0x3
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND_CHANGED__SHIFT 0x4
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND_CHANGED__SHIFT 0x5
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND_MASK 0x00000001L
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND_MASK 0x00000002L
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND_MASK 0x00000004L
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND_CHANGED_MASK 0x00000008L
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND_CHANGED_MASK 0x00000010L
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND_CHANGED_MASK 0x00000020L
+//RLC_CP_STAT_INVAL_CTRL
+#define RLC_CP_STAT_INVAL_CTRL__CPG_STAT_INVAL_PEND_EN__SHIFT 0x0
+#define RLC_CP_STAT_INVAL_CTRL__CPC_STAT_INVAL_PEND_EN__SHIFT 0x1
+#define RLC_CP_STAT_INVAL_CTRL__CPF_STAT_INVAL_PEND_EN__SHIFT 0x2
+#define RLC_CP_STAT_INVAL_CTRL__CPG_STAT_INVAL_PEND_EN_MASK 0x00000001L
+#define RLC_CP_STAT_INVAL_CTRL__CPC_STAT_INVAL_PEND_EN_MASK 0x00000002L
+#define RLC_CP_STAT_INVAL_CTRL__CPF_STAT_INVAL_PEND_EN_MASK 0x00000004L
+//RLC_SPARE
+#define RLC_SPARE__SPARE__SHIFT 0x0
+#define RLC_SPARE__SPARE_MASK 0xFFFFFFFFL
+//RLC_SPP_CTRL
+#define RLC_SPP_CTRL__ENABLE__SHIFT 0x0
+#define RLC_SPP_CTRL__ENABLE_PPROF__SHIFT 0x1
+#define RLC_SPP_CTRL__ENABLE_PWR_OPT__SHIFT 0x2
+#define RLC_SPP_CTRL__PAUSE__SHIFT 0x3
+#define RLC_SPP_CTRL__ENABLE_MASK 0x00000001L
+#define RLC_SPP_CTRL__ENABLE_PPROF_MASK 0x00000002L
+#define RLC_SPP_CTRL__ENABLE_PWR_OPT_MASK 0x00000004L
+#define RLC_SPP_CTRL__PAUSE_MASK 0x00000008L
+//RLC_SPP_SHADER_PROFILE_EN
+#define RLC_SPP_SHADER_PROFILE_EN__PS_ENABLE__SHIFT 0x0
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_1__SHIFT 0x1
+#define RLC_SPP_SHADER_PROFILE_EN__GS_ENABLE__SHIFT 0x2
+#define RLC_SPP_SHADER_PROFILE_EN__HS_ENABLE__SHIFT 0x3
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_ENABLE__SHIFT 0x4
+#define RLC_SPP_SHADER_PROFILE_EN__CS_ENABLE__SHIFT 0x5
+#define RLC_SPP_SHADER_PROFILE_EN__PS_STOP_CONDITION__SHIFT 0x6
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_7__SHIFT 0x7
+#define RLC_SPP_SHADER_PROFILE_EN__GS_STOP_CONDITION__SHIFT 0x8
+#define RLC_SPP_SHADER_PROFILE_EN__HS_STOP_CONDITION__SHIFT 0x9
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_STOP_CONDITION__SHIFT 0xa
+#define RLC_SPP_SHADER_PROFILE_EN__CS_STOP_CONDITION__SHIFT 0xb
+#define RLC_SPP_SHADER_PROFILE_EN__PS_START_CONDITION__SHIFT 0xc
+#define RLC_SPP_SHADER_PROFILE_EN__CS_START_CONDITION__SHIFT 0xd
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_MISS__SHIFT 0xe
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_UNLOCKED__SHIFT 0xf
+#define RLC_SPP_SHADER_PROFILE_EN__ENABLE_PROF_INFO_LOCK__SHIFT 0x10
+#define RLC_SPP_SHADER_PROFILE_EN__PS_ENABLE_MASK 0x00000001L
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_1_MASK 0x00000002L
+#define RLC_SPP_SHADER_PROFILE_EN__GS_ENABLE_MASK 0x00000004L
+#define RLC_SPP_SHADER_PROFILE_EN__HS_ENABLE_MASK 0x00000008L
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_ENABLE_MASK 0x00000010L
+#define RLC_SPP_SHADER_PROFILE_EN__CS_ENABLE_MASK 0x00000020L
+#define RLC_SPP_SHADER_PROFILE_EN__PS_STOP_CONDITION_MASK 0x00000040L
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_7_MASK 0x00000080L
+#define RLC_SPP_SHADER_PROFILE_EN__GS_STOP_CONDITION_MASK 0x00000100L
+#define RLC_SPP_SHADER_PROFILE_EN__HS_STOP_CONDITION_MASK 0x00000200L
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_STOP_CONDITION_MASK 0x00000400L
+#define RLC_SPP_SHADER_PROFILE_EN__CS_STOP_CONDITION_MASK 0x00000800L
+#define RLC_SPP_SHADER_PROFILE_EN__PS_START_CONDITION_MASK 0x00001000L
+#define RLC_SPP_SHADER_PROFILE_EN__CS_START_CONDITION_MASK 0x00002000L
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_MISS_MASK 0x00004000L
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_UNLOCKED_MASK 0x00008000L
+#define RLC_SPP_SHADER_PROFILE_EN__ENABLE_PROF_INFO_LOCK_MASK 0x00010000L
+//RLC_SPP_SSF_CAPTURE_EN
+#define RLC_SPP_SSF_CAPTURE_EN__PS_ENABLE__SHIFT 0x0
+#define RLC_SPP_SSF_CAPTURE_EN__RESERVED_1__SHIFT 0x1
+#define RLC_SPP_SSF_CAPTURE_EN__GS_ENABLE__SHIFT 0x2
+#define RLC_SPP_SSF_CAPTURE_EN__HS_ENABLE__SHIFT 0x3
+#define RLC_SPP_SSF_CAPTURE_EN__CSG_ENABLE__SHIFT 0x4
+#define RLC_SPP_SSF_CAPTURE_EN__CS_ENABLE__SHIFT 0x5
+#define RLC_SPP_SSF_CAPTURE_EN__PS_ENABLE_MASK 0x00000001L
+#define RLC_SPP_SSF_CAPTURE_EN__RESERVED_1_MASK 0x00000002L
+#define RLC_SPP_SSF_CAPTURE_EN__GS_ENABLE_MASK 0x00000004L
+#define RLC_SPP_SSF_CAPTURE_EN__HS_ENABLE_MASK 0x00000008L
+#define RLC_SPP_SSF_CAPTURE_EN__CSG_ENABLE_MASK 0x00000010L
+#define RLC_SPP_SSF_CAPTURE_EN__CS_ENABLE_MASK 0x00000020L
+//RLC_SPP_SSF_THRESHOLD_0
+#define RLC_SPP_SSF_THRESHOLD_0__PS_THRESHOLD__SHIFT 0x0
+#define RLC_SPP_SSF_THRESHOLD_0__RESERVED__SHIFT 0x10
+#define RLC_SPP_SSF_THRESHOLD_0__PS_THRESHOLD_MASK 0x0000FFFFL
+#define RLC_SPP_SSF_THRESHOLD_0__RESERVED_MASK 0xFFFF0000L
+//RLC_SPP_SSF_THRESHOLD_1
+#define RLC_SPP_SSF_THRESHOLD_1__GS_THRESHOLD__SHIFT 0x0
+#define RLC_SPP_SSF_THRESHOLD_1__HS_THRESHOLD__SHIFT 0x10
+#define RLC_SPP_SSF_THRESHOLD_1__GS_THRESHOLD_MASK 0x0000FFFFL
+#define RLC_SPP_SSF_THRESHOLD_1__HS_THRESHOLD_MASK 0xFFFF0000L
+//RLC_SPP_SSF_THRESHOLD_2
+#define RLC_SPP_SSF_THRESHOLD_2__CSG_THRESHOLD__SHIFT 0x0
+#define RLC_SPP_SSF_THRESHOLD_2__CS_THRESHOLD__SHIFT 0x10
+#define RLC_SPP_SSF_THRESHOLD_2__CSG_THRESHOLD_MASK 0x0000FFFFL
+#define RLC_SPP_SSF_THRESHOLD_2__CS_THRESHOLD_MASK 0xFFFF0000L
+//RLC_SPP_INFLIGHT_RD_ADDR
+#define RLC_SPP_INFLIGHT_RD_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPP_INFLIGHT_RD_ADDR__ADDR_MASK 0x0000001FL
+//RLC_SPP_INFLIGHT_RD_DATA
+#define RLC_SPP_INFLIGHT_RD_DATA__DATA__SHIFT 0x0
+#define RLC_SPP_INFLIGHT_RD_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SPP_PROF_INFO_1
+#define RLC_SPP_PROF_INFO_1__SH_ID__SHIFT 0x0
+#define RLC_SPP_PROF_INFO_1__SH_ID_MASK 0xFFFFFFFFL
+//RLC_SPP_PROF_INFO_2
+#define RLC_SPP_PROF_INFO_2__SH_TYPE__SHIFT 0x0
+#define RLC_SPP_PROF_INFO_2__CAM_HIT__SHIFT 0x4
+#define RLC_SPP_PROF_INFO_2__CAM_LOCK__SHIFT 0x5
+#define RLC_SPP_PROF_INFO_2__CAM_CONFLICT__SHIFT 0x6
+#define RLC_SPP_PROF_INFO_2__SH_TYPE_MASK 0x0000000FL
+#define RLC_SPP_PROF_INFO_2__CAM_HIT_MASK 0x00000010L
+#define RLC_SPP_PROF_INFO_2__CAM_LOCK_MASK 0x00000020L
+#define RLC_SPP_PROF_INFO_2__CAM_CONFLICT_MASK 0x00000040L
+//RLC_SPP_GLOBAL_SH_ID
+#define RLC_SPP_GLOBAL_SH_ID__SH_ID__SHIFT 0x0
+#define RLC_SPP_GLOBAL_SH_ID__SH_ID_MASK 0xFFFFFFFFL
+//RLC_SPP_GLOBAL_SH_ID_VALID
+#define RLC_SPP_GLOBAL_SH_ID_VALID__VALID__SHIFT 0x0
+#define RLC_SPP_GLOBAL_SH_ID_VALID__VALID_MASK 0x00000001L
+//RLC_SPP_STATUS
+#define RLC_SPP_STATUS__RESERVED_0__SHIFT 0x0
+#define RLC_SPP_STATUS__SSF_BUSY__SHIFT 0x1
+#define RLC_SPP_STATUS__EVENT_ARB_BUSY__SHIFT 0x2
+#define RLC_SPP_STATUS__SPP_BUSY__SHIFT 0x1f
+#define RLC_SPP_STATUS__RESERVED_0_MASK 0x00000001L
+#define RLC_SPP_STATUS__SSF_BUSY_MASK 0x00000002L
+#define RLC_SPP_STATUS__EVENT_ARB_BUSY_MASK 0x00000004L
+#define RLC_SPP_STATUS__SPP_BUSY_MASK 0x80000000L
+//RLC_SPP_PVT_STAT_0
+#define RLC_SPP_PVT_STAT_0__LEVEL_0_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_0__LEVEL_1_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_0__LEVEL_2_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_0__LEVEL_3_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_0__LEVEL_0_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_0__LEVEL_1_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_0__LEVEL_2_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_0__LEVEL_3_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_STAT_1
+#define RLC_SPP_PVT_STAT_1__LEVEL_4_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_1__LEVEL_5_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_1__LEVEL_6_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_1__LEVEL_7_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_1__LEVEL_4_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_1__LEVEL_5_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_1__LEVEL_6_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_1__LEVEL_7_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_STAT_2
+#define RLC_SPP_PVT_STAT_2__LEVEL_8_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_2__LEVEL_9_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_2__LEVEL_10_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_2__LEVEL_11_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_2__LEVEL_8_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_2__LEVEL_9_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_2__LEVEL_10_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_2__LEVEL_11_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_STAT_3
+#define RLC_SPP_PVT_STAT_3__LEVEL_12_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_3__LEVEL_13_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_3__LEVEL_14_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_3__LEVEL_15_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_3__LEVEL_12_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_3__LEVEL_13_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_3__LEVEL_14_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_3__LEVEL_15_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_LEVEL_MAX
+#define RLC_SPP_PVT_LEVEL_MAX__LEVEL__SHIFT 0x0
+#define RLC_SPP_PVT_LEVEL_MAX__LEVEL_MASK 0x0000000FL
+//RLC_SPP_STALL_STATE_UPDATE
+#define RLC_SPP_STALL_STATE_UPDATE__STALL__SHIFT 0x0
+#define RLC_SPP_STALL_STATE_UPDATE__ENABLE__SHIFT 0x1
+#define RLC_SPP_STALL_STATE_UPDATE__STALL_MASK 0x00000001L
+#define RLC_SPP_STALL_STATE_UPDATE__ENABLE_MASK 0x00000002L
+//RLC_SPP_PBB_INFO
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE__SHIFT 0x0
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE_VALID__SHIFT 0x1
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE__SHIFT 0x2
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE_VALID__SHIFT 0x3
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE_MASK 0x00000001L
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE_VALID_MASK 0x00000002L
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE_MASK 0x00000004L
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE_VALID_MASK 0x00000008L
+//RLC_SPP_RESET
+#define RLC_SPP_RESET__SSF_RESET__SHIFT 0x0
+#define RLC_SPP_RESET__EVENT_ARB_RESET__SHIFT 0x1
+#define RLC_SPP_RESET__CAM_RESET__SHIFT 0x2
+#define RLC_SPP_RESET__PVT_RESET__SHIFT 0x3
+#define RLC_SPP_RESET__SSF_RESET_MASK 0x00000001L
+#define RLC_SPP_RESET__EVENT_ARB_RESET_MASK 0x00000002L
+#define RLC_SPP_RESET__CAM_RESET_MASK 0x00000004L
+#define RLC_SPP_RESET__PVT_RESET_MASK 0x00000008L
+//RLC_RLCP_DOORBELL_RANGE
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_RLCP_DOORBELL_CNTL
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_RLCP_DOORBELL_STAT
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_RLCP_DOORBELL_0_DATA_LO
+#define RLC_RLCP_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_0_DATA_HI
+#define RLC_RLCP_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_1_DATA_LO
+#define RLC_RLCP_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_1_DATA_HI
+#define RLC_RLCP_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_2_DATA_LO
+#define RLC_RLCP_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_2_DATA_HI
+#define RLC_RLCP_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_3_DATA_LO
+#define RLC_RLCP_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_3_DATA_HI
+#define RLC_RLCP_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_CAC_MASK_CNTL
+#define RLC_CAC_MASK_CNTL__RLC_CAC_MASK__SHIFT 0x0
+#define RLC_CAC_MASK_CNTL__RLC_CAC_MASK_MASK 0xFFFFFFFFL
+//RLC_POWER_RESIDENCY_CNTR_CTRL
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_CLK_RESIDENCY_CNTR_CTRL
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_DS_RESIDENCY_CNTR_CTRL
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_DS_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_ULV_RESIDENCY_CNTR_CTRL
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_PCC_RESIDENCY_CNTR_CTRL
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__EVENT_SEL__SHIFT 0x5
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x9
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__EVENT_SEL_MASK 0x000001E0L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFE00L
+//RLC_GENERAL_RESIDENCY_CNTR_CTRL
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_POWER_RESIDENCY_EVENT_CNTR
+#define RLC_POWER_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_POWER_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_CLK_RESIDENCY_EVENT_CNTR
+#define RLC_CLK_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_CLK_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_DS_RESIDENCY_EVENT_CNTR
+#define RLC_DS_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_DS_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_ULV_RESIDENCY_EVENT_CNTR
+#define RLC_ULV_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_ULV_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_PCC_RESIDENCY_EVENT_CNTR
+#define RLC_PCC_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_PCC_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_GENERAL_RESIDENCY_EVENT_CNTR
+#define RLC_GENERAL_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_GENERAL_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_POWER_RESIDENCY_REF_CNTR
+#define RLC_POWER_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_POWER_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_CLK_RESIDENCY_REF_CNTR
+#define RLC_CLK_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_CLK_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_DS_RESIDENCY_REF_CNTR
+#define RLC_DS_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_DS_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_ULV_RESIDENCY_REF_CNTR
+#define RLC_ULV_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_ULV_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_PCC_RESIDENCY_REF_CNTR
+#define RLC_PCC_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_PCC_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_GENERAL_RESIDENCY_REF_CNTR
+#define RLC_GENERAL_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_GENERAL_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_GFX_IH_CLIENT_CTRL
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_MASK__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_MASK__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_MASK__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_MASK__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_MASK__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_15__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_ERROR_CLEAR__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_ERROR_CLEAR__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_ERROR_CLEAR__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_ERROR_CLEAR__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_ERROR_CLEAR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_31__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_MASK_MASK 0x000000FFL
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_MASK_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_MASK_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_MASK_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_MASK_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_15_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_ERROR_CLEAR_MASK 0x00FF0000L
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_ERROR_CLEAR_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_ERROR_CLEAR_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_ERROR_CLEAR_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_ERROR_CLEAR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_31_MASK 0x80000000L
+//RLC_GFX_IH_ARBITER_STAT
+#define RLC_GFX_IH_ARBITER_STAT__CLIENT_GRANTED__SHIFT 0x0
+#define RLC_GFX_IH_ARBITER_STAT__RESERVED__SHIFT 0x10
+#define RLC_GFX_IH_ARBITER_STAT__LAST_CLIENT_GRANTED__SHIFT 0x1c
+#define RLC_GFX_IH_ARBITER_STAT__CLIENT_GRANTED_MASK 0x0000FFFFL
+#define RLC_GFX_IH_ARBITER_STAT__RESERVED_MASK 0x0FFF0000L
+#define RLC_GFX_IH_ARBITER_STAT__LAST_CLIENT_GRANTED_MASK 0xF0000000L
+//RLC_GFX_IH_CLIENT_SE_STAT_L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LEVEL__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LOADING__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_OVERFLOW__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_PROTOCOL_ERROR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_RESERVED__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LEVEL_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LOADING_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_OVERFLOW_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_PROTOCOL_ERROR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_RESERVED_MASK 0x80000000L
+//RLC_GFX_IH_CLIENT_SE_STAT_H
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LEVEL__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LOADING__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_OVERFLOW__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_PROTOCOL_ERROR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_RESERVED__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LEVEL_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LOADING_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_OVERFLOW_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_PROTOCOL_ERROR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_RESERVED_MASK 0x80000000L
+//RLC_GFX_IH_CLIENT_SDMA_STAT
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LEVEL__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LOADING__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_OVERFLOW__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_PROTOCOL_ERROR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_RESERVED__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LEVEL_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LOADING_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_OVERFLOW_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_PROTOCOL_ERROR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_RESERVED_MASK 0x80000000L
+//RLC_GFX_IH_CLIENT_OTHER_STAT
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__RESERVED_31_24__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__RESERVED_31_24_MASK 0xFF000000L
+//RLC_SPM_GLOBAL_DELAY_IND_ADDR
+#define RLC_SPM_GLOBAL_DELAY_IND_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_GLOBAL_DELAY_IND_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_GLOBAL_DELAY_IND_DATA
+#define RLC_SPM_GLOBAL_DELAY_IND_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_GLOBAL_DELAY_IND_DATA__DATA_MASK 0x0000003FL
+//RLC_SPM_SE_DELAY_IND_ADDR
+#define RLC_SPM_SE_DELAY_IND_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_SE_DELAY_IND_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_SE_DELAY_IND_DATA
+#define RLC_SPM_SE_DELAY_IND_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_SE_DELAY_IND_DATA__DATA_MASK 0x0000003FL
+//RLC_LX6_CNTL
+#define RLC_LX6_CNTL__BRESET__SHIFT 0x0
+#define RLC_LX6_CNTL__RUNSTALL__SHIFT 0x1
+#define RLC_LX6_CNTL__PDEBUG_ENABLE__SHIFT 0x2
+#define RLC_LX6_CNTL__STAT_VECTOR_SEL__SHIFT 0x3
+#define RLC_LX6_CNTL__BRESET_MASK 0x00000001L
+#define RLC_LX6_CNTL__RUNSTALL_MASK 0x00000002L
+#define RLC_LX6_CNTL__PDEBUG_ENABLE_MASK 0x00000004L
+#define RLC_LX6_CNTL__STAT_VECTOR_SEL_MASK 0x00000008L
+//RLC_XT_CORE_STATUS
+#define RLC_XT_CORE_STATUS__P_WAIT_MODE__SHIFT 0x0
+#define RLC_XT_CORE_STATUS__P_FATAL_ERROR__SHIFT 0x1
+#define RLC_XT_CORE_STATUS__DOUBLE_EXCEPTION_ERROR__SHIFT 0x2
+#define RLC_XT_CORE_STATUS__P_WAIT_MODE_MASK 0x00000001L
+#define RLC_XT_CORE_STATUS__P_FATAL_ERROR_MASK 0x00000002L
+#define RLC_XT_CORE_STATUS__DOUBLE_EXCEPTION_ERROR_MASK 0x00000004L
+//RLC_XT_CORE_INTERRUPT
+#define RLC_XT_CORE_INTERRUPT__EXTINT1__SHIFT 0x0
+#define RLC_XT_CORE_INTERRUPT__EXTINT2__SHIFT 0x1a
+#define RLC_XT_CORE_INTERRUPT__NMI__SHIFT 0x1b
+#define RLC_XT_CORE_INTERRUPT__EXTINT1_MASK 0x03FFFFFFL
+#define RLC_XT_CORE_INTERRUPT__EXTINT2_MASK 0x04000000L
+#define RLC_XT_CORE_INTERRUPT__NMI_MASK 0x08000000L
+//RLC_XT_CORE_FAULT_INFO
+#define RLC_XT_CORE_FAULT_INFO__FAULT_INFO__SHIFT 0x0
+#define RLC_XT_CORE_FAULT_INFO__FAULT_INFO_MASK 0xFFFFFFFFL
+//RLC_XT_CORE_ALT_RESET_VEC
+#define RLC_XT_CORE_ALT_RESET_VEC__ALT_RESET_VEC__SHIFT 0x0
+#define RLC_XT_CORE_ALT_RESET_VEC__ALT_RESET_VEC_MASK 0xFFFFFFFFL
+//RLC_XT_CORE_RESERVED
+#define RLC_XT_CORE_RESERVED__RESERVED__SHIFT 0x0
+#define RLC_XT_CORE_RESERVED__RESERVED_MASK 0xFFFFFFFFL
+//RLC_XT_INT_VEC_FORCE
+#define RLC_XT_INT_VEC_FORCE__NUM_0__SHIFT 0x0
+#define RLC_XT_INT_VEC_FORCE__NUM_1__SHIFT 0x1
+#define RLC_XT_INT_VEC_FORCE__NUM_2__SHIFT 0x2
+#define RLC_XT_INT_VEC_FORCE__NUM_3__SHIFT 0x3
+#define RLC_XT_INT_VEC_FORCE__NUM_4__SHIFT 0x4
+#define RLC_XT_INT_VEC_FORCE__NUM_5__SHIFT 0x5
+#define RLC_XT_INT_VEC_FORCE__NUM_6__SHIFT 0x6
+#define RLC_XT_INT_VEC_FORCE__NUM_7__SHIFT 0x7
+#define RLC_XT_INT_VEC_FORCE__NUM_8__SHIFT 0x8
+#define RLC_XT_INT_VEC_FORCE__NUM_9__SHIFT 0x9
+#define RLC_XT_INT_VEC_FORCE__NUM_10__SHIFT 0xa
+#define RLC_XT_INT_VEC_FORCE__NUM_11__SHIFT 0xb
+#define RLC_XT_INT_VEC_FORCE__NUM_12__SHIFT 0xc
+#define RLC_XT_INT_VEC_FORCE__NUM_13__SHIFT 0xd
+#define RLC_XT_INT_VEC_FORCE__NUM_14__SHIFT 0xe
+#define RLC_XT_INT_VEC_FORCE__NUM_15__SHIFT 0xf
+#define RLC_XT_INT_VEC_FORCE__NUM_16__SHIFT 0x10
+#define RLC_XT_INT_VEC_FORCE__NUM_17__SHIFT 0x11
+#define RLC_XT_INT_VEC_FORCE__NUM_18__SHIFT 0x12
+#define RLC_XT_INT_VEC_FORCE__NUM_19__SHIFT 0x13
+#define RLC_XT_INT_VEC_FORCE__NUM_20__SHIFT 0x14
+#define RLC_XT_INT_VEC_FORCE__NUM_21__SHIFT 0x15
+#define RLC_XT_INT_VEC_FORCE__NUM_22__SHIFT 0x16
+#define RLC_XT_INT_VEC_FORCE__NUM_23__SHIFT 0x17
+#define RLC_XT_INT_VEC_FORCE__NUM_24__SHIFT 0x18
+#define RLC_XT_INT_VEC_FORCE__NUM_25__SHIFT 0x19
+#define RLC_XT_INT_VEC_FORCE__NUM_0_MASK 0x00000001L
+#define RLC_XT_INT_VEC_FORCE__NUM_1_MASK 0x00000002L
+#define RLC_XT_INT_VEC_FORCE__NUM_2_MASK 0x00000004L
+#define RLC_XT_INT_VEC_FORCE__NUM_3_MASK 0x00000008L
+#define RLC_XT_INT_VEC_FORCE__NUM_4_MASK 0x00000010L
+#define RLC_XT_INT_VEC_FORCE__NUM_5_MASK 0x00000020L
+#define RLC_XT_INT_VEC_FORCE__NUM_6_MASK 0x00000040L
+#define RLC_XT_INT_VEC_FORCE__NUM_7_MASK 0x00000080L
+#define RLC_XT_INT_VEC_FORCE__NUM_8_MASK 0x00000100L
+#define RLC_XT_INT_VEC_FORCE__NUM_9_MASK 0x00000200L
+#define RLC_XT_INT_VEC_FORCE__NUM_10_MASK 0x00000400L
+#define RLC_XT_INT_VEC_FORCE__NUM_11_MASK 0x00000800L
+#define RLC_XT_INT_VEC_FORCE__NUM_12_MASK 0x00001000L
+#define RLC_XT_INT_VEC_FORCE__NUM_13_MASK 0x00002000L
+#define RLC_XT_INT_VEC_FORCE__NUM_14_MASK 0x00004000L
+#define RLC_XT_INT_VEC_FORCE__NUM_15_MASK 0x00008000L
+#define RLC_XT_INT_VEC_FORCE__NUM_16_MASK 0x00010000L
+#define RLC_XT_INT_VEC_FORCE__NUM_17_MASK 0x00020000L
+#define RLC_XT_INT_VEC_FORCE__NUM_18_MASK 0x00040000L
+#define RLC_XT_INT_VEC_FORCE__NUM_19_MASK 0x00080000L
+#define RLC_XT_INT_VEC_FORCE__NUM_20_MASK 0x00100000L
+#define RLC_XT_INT_VEC_FORCE__NUM_21_MASK 0x00200000L
+#define RLC_XT_INT_VEC_FORCE__NUM_22_MASK 0x00400000L
+#define RLC_XT_INT_VEC_FORCE__NUM_23_MASK 0x00800000L
+#define RLC_XT_INT_VEC_FORCE__NUM_24_MASK 0x01000000L
+#define RLC_XT_INT_VEC_FORCE__NUM_25_MASK 0x02000000L
+//RLC_XT_INT_VEC_CLEAR
+#define RLC_XT_INT_VEC_CLEAR__NUM_0__SHIFT 0x0
+#define RLC_XT_INT_VEC_CLEAR__NUM_1__SHIFT 0x1
+#define RLC_XT_INT_VEC_CLEAR__NUM_2__SHIFT 0x2
+#define RLC_XT_INT_VEC_CLEAR__NUM_3__SHIFT 0x3
+#define RLC_XT_INT_VEC_CLEAR__NUM_4__SHIFT 0x4
+#define RLC_XT_INT_VEC_CLEAR__NUM_5__SHIFT 0x5
+#define RLC_XT_INT_VEC_CLEAR__NUM_6__SHIFT 0x6
+#define RLC_XT_INT_VEC_CLEAR__NUM_7__SHIFT 0x7
+#define RLC_XT_INT_VEC_CLEAR__NUM_8__SHIFT 0x8
+#define RLC_XT_INT_VEC_CLEAR__NUM_9__SHIFT 0x9
+#define RLC_XT_INT_VEC_CLEAR__NUM_10__SHIFT 0xa
+#define RLC_XT_INT_VEC_CLEAR__NUM_11__SHIFT 0xb
+#define RLC_XT_INT_VEC_CLEAR__NUM_12__SHIFT 0xc
+#define RLC_XT_INT_VEC_CLEAR__NUM_13__SHIFT 0xd
+#define RLC_XT_INT_VEC_CLEAR__NUM_14__SHIFT 0xe
+#define RLC_XT_INT_VEC_CLEAR__NUM_15__SHIFT 0xf
+#define RLC_XT_INT_VEC_CLEAR__NUM_16__SHIFT 0x10
+#define RLC_XT_INT_VEC_CLEAR__NUM_17__SHIFT 0x11
+#define RLC_XT_INT_VEC_CLEAR__NUM_18__SHIFT 0x12
+#define RLC_XT_INT_VEC_CLEAR__NUM_19__SHIFT 0x13
+#define RLC_XT_INT_VEC_CLEAR__NUM_20__SHIFT 0x14
+#define RLC_XT_INT_VEC_CLEAR__NUM_21__SHIFT 0x15
+#define RLC_XT_INT_VEC_CLEAR__NUM_22__SHIFT 0x16
+#define RLC_XT_INT_VEC_CLEAR__NUM_23__SHIFT 0x17
+#define RLC_XT_INT_VEC_CLEAR__NUM_24__SHIFT 0x18
+#define RLC_XT_INT_VEC_CLEAR__NUM_25__SHIFT 0x19
+#define RLC_XT_INT_VEC_CLEAR__NUM_0_MASK 0x00000001L
+#define RLC_XT_INT_VEC_CLEAR__NUM_1_MASK 0x00000002L
+#define RLC_XT_INT_VEC_CLEAR__NUM_2_MASK 0x00000004L
+#define RLC_XT_INT_VEC_CLEAR__NUM_3_MASK 0x00000008L
+#define RLC_XT_INT_VEC_CLEAR__NUM_4_MASK 0x00000010L
+#define RLC_XT_INT_VEC_CLEAR__NUM_5_MASK 0x00000020L
+#define RLC_XT_INT_VEC_CLEAR__NUM_6_MASK 0x00000040L
+#define RLC_XT_INT_VEC_CLEAR__NUM_7_MASK 0x00000080L
+#define RLC_XT_INT_VEC_CLEAR__NUM_8_MASK 0x00000100L
+#define RLC_XT_INT_VEC_CLEAR__NUM_9_MASK 0x00000200L
+#define RLC_XT_INT_VEC_CLEAR__NUM_10_MASK 0x00000400L
+#define RLC_XT_INT_VEC_CLEAR__NUM_11_MASK 0x00000800L
+#define RLC_XT_INT_VEC_CLEAR__NUM_12_MASK 0x00001000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_13_MASK 0x00002000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_14_MASK 0x00004000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_15_MASK 0x00008000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_16_MASK 0x00010000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_17_MASK 0x00020000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_18_MASK 0x00040000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_19_MASK 0x00080000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_20_MASK 0x00100000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_21_MASK 0x00200000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_22_MASK 0x00400000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_23_MASK 0x00800000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_24_MASK 0x01000000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_25_MASK 0x02000000L
+//RLC_XT_INT_VEC_MUX_SEL
+#define RLC_XT_INT_VEC_MUX_SEL__MUX_SEL__SHIFT 0x0
+#define RLC_XT_INT_VEC_MUX_SEL__MUX_SEL_MASK 0x0000001FL
+//RLC_XT_INT_VEC_MUX_INT_SEL
+#define RLC_XT_INT_VEC_MUX_INT_SEL__INT_SEL__SHIFT 0x0
+#define RLC_XT_INT_VEC_MUX_INT_SEL__INT_SEL_MASK 0x0000003FL
+//RLC_GPU_CLOCK_COUNT_SPM_LSB
+#define RLC_GPU_CLOCK_COUNT_SPM_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_SPM_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_SPM_MSB
+#define RLC_GPU_CLOCK_COUNT_SPM_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_SPM_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_SPM_THREAD_TRACE_CTRL
+#define RLC_SPM_THREAD_TRACE_CTRL__THREAD_TRACE_INT_EN__SHIFT 0x0
+#define RLC_SPM_THREAD_TRACE_CTRL__THREAD_TRACE_INT_EN_MASK 0x00000001L
+//RLC_SPP_CAM_ADDR
+#define RLC_SPP_CAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPP_CAM_ADDR__ADDR_MASK 0x000000FFL
+//RLC_SPP_CAM_DATA
+#define RLC_SPP_CAM_DATA__DATA__SHIFT 0x0
+#define RLC_SPP_CAM_DATA__TAG__SHIFT 0x8
+#define RLC_SPP_CAM_DATA__DATA_MASK 0x000000FFL
+#define RLC_SPP_CAM_DATA__TAG_MASK 0xFFFFFF00L
+//RLC_SPP_CAM_EXT_ADDR
+#define RLC_SPP_CAM_EXT_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPP_CAM_EXT_ADDR__ADDR_MASK 0x000000FFL
+//RLC_SPP_CAM_EXT_DATA
+#define RLC_SPP_CAM_EXT_DATA__VALID__SHIFT 0x0
+#define RLC_SPP_CAM_EXT_DATA__LOCK__SHIFT 0x1
+#define RLC_SPP_CAM_EXT_DATA__VALID_MASK 0x00000001L
+#define RLC_SPP_CAM_EXT_DATA__LOCK_MASK 0x00000002L
+//RLC_CPAXI_DOORBELL_MON_CTRL
+#define RLC_CPAXI_DOORBELL_MON_CTRL__EN__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_CTRL__ID__SHIFT 0x1
+#define RLC_CPAXI_DOORBELL_MON_CTRL__EN_MASK 0x00000001L
+#define RLC_CPAXI_DOORBELL_MON_CTRL__ID_MASK 0x0000003EL
+//RLC_CPAXI_DOORBELL_MON_STAT
+#define RLC_CPAXI_DOORBELL_MON_STAT__ID_MATCH__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_STAT__MATCH_CLEAR__SHIFT 0x1
+#define RLC_CPAXI_DOORBELL_MON_STAT__ADDR__SHIFT 0x2
+#define RLC_CPAXI_DOORBELL_MON_STAT__ID_MATCH_MASK 0x00000001L
+#define RLC_CPAXI_DOORBELL_MON_STAT__MATCH_CLEAR_MASK 0x00000002L
+#define RLC_CPAXI_DOORBELL_MON_STAT__ADDR_MASK 0x0FFFFFFCL
+//RLC_CPAXI_DOORBELL_MON_DATA_LSB
+#define RLC_CPAXI_DOORBELL_MON_DATA_LSB__DATA__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_DATA_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_CPAXI_DOORBELL_MON_DATA_MSB
+#define RLC_CPAXI_DOORBELL_MON_DATA_MSB__DATA__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_DATA_MSB__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_RANGE
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_XT_DOORBELL_CNTL
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_XT_DOORBELL_STAT
+#define RLC_XT_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_XT_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_XT_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_XT_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_XT_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_XT_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_XT_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_XT_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_XT_DOORBELL_0_DATA_LO
+#define RLC_XT_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_0_DATA_HI
+#define RLC_XT_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_1_DATA_LO
+#define RLC_XT_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_1_DATA_HI
+#define RLC_XT_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_2_DATA_LO
+#define RLC_XT_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_2_DATA_HI
+#define RLC_XT_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_3_DATA_LO
+#define RLC_XT_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_3_DATA_HI
+#define RLC_XT_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_MEM_SLP_CNTL
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x0
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x1
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_LS_OVERRIDE__SHIFT 0x2
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_DS_OVERRIDE__SHIFT 0x3
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_LS_OVERRIDE__SHIFT 0x4
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_DS_OVERRIDE__SHIFT 0x5
+#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x6
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x8
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x10
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_LS_OVERRIDE__SHIFT 0x18
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_DS_OVERRIDE__SHIFT 0x19
+#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x1a
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x00000001L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x00000002L
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_LS_OVERRIDE_MASK 0x00000004L
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_DS_OVERRIDE_MASK 0x00000008L
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_LS_OVERRIDE_MASK 0x00000010L
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_DS_OVERRIDE_MASK 0x00000020L
+#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x00000040L
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0x0000FF00L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_LS_OVERRIDE_MASK 0x01000000L
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_DS_OVERRIDE_MASK 0x02000000L
+#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xFC000000L
+//SMU_RLC_RESPONSE
+#define SMU_RLC_RESPONSE__RESP__SHIFT 0x0
+#define SMU_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_RLCV_SAFE_MODE
+#define RLC_RLCV_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_RLCV_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_RLCV_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_RLCV_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_RLCV_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_RLCV_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_RLCV_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_RLCV_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_RLCV_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_RLCV_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_SMU_SAFE_MODE
+#define RLC_SMU_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SMU_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SMU_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SMU_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SMU_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SMU_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SMU_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SMU_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SMU_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SMU_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_RLCV_COMMAND
+#define RLC_RLCV_COMMAND__CMD__SHIFT 0x0
+#define RLC_RLCV_COMMAND__RESERVED__SHIFT 0x4
+#define RLC_RLCV_COMMAND__CMD_MASK 0x0000000FL
+#define RLC_RLCV_COMMAND__RESERVED_MASK 0xFFFFFFF0L
+//RLC_SMU_MESSAGE
+#define RLC_SMU_MESSAGE__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE__CMD_MASK 0xFFFFFFFFL
+//RLC_SMU_MESSAGE_1
+#define RLC_SMU_MESSAGE_1__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE_1__CMD_MASK 0xFFFFFFFFL
+//RLC_SMU_MESSAGE_2
+#define RLC_SMU_MESSAGE_2__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE_2__CMD_MASK 0xFFFFFFFFL
+//RLC_SRM_GPM_COMMAND
+#define RLC_SRM_GPM_COMMAND__OP__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND__SIZE__SHIFT 0x5
+#define RLC_SRM_GPM_COMMAND__START_OFFSET__SHIFT 0x12
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY__SHIFT 0x1f
+#define RLC_SRM_GPM_COMMAND__OP_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM_MASK 0x0000001CL
+#define RLC_SRM_GPM_COMMAND__SIZE_MASK 0x0003FFE0L
+#define RLC_SRM_GPM_COMMAND__START_OFFSET_MASK 0x7FFC0000L
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY_MASK 0x80000000L
+//RLC_SRM_GPM_ABORT
+#define RLC_SRM_GPM_ABORT__ABORT__SHIFT 0x0
+#define RLC_SRM_GPM_ABORT__RESERVED__SHIFT 0x1
+#define RLC_SRM_GPM_ABORT__ABORT_MASK 0x00000001L
+#define RLC_SRM_GPM_ABORT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SMU_COMMAND
+#define RLC_SMU_COMMAND__CMD__SHIFT 0x0
+#define RLC_SMU_COMMAND__CMD_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_1
+#define RLC_SMU_ARGUMENT_1__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_1__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_2
+#define RLC_SMU_ARGUMENT_2__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_2__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_3
+#define RLC_SMU_ARGUMENT_3__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_3__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_4
+#define RLC_SMU_ARGUMENT_4__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_4__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_5
+#define RLC_SMU_ARGUMENT_5__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_5__ARG_MASK 0xFFFFFFFFL
+//RLC_IMU_BOOTLOAD_ADDR_HI
+#define RLC_IMU_BOOTLOAD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define RLC_IMU_BOOTLOAD_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//RLC_IMU_BOOTLOAD_ADDR_LO
+#define RLC_IMU_BOOTLOAD_ADDR_LO__ADDR_LO__SHIFT 0x0
+#define RLC_IMU_BOOTLOAD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//RLC_IMU_BOOTLOAD_SIZE
+#define RLC_IMU_BOOTLOAD_SIZE__SIZE__SHIFT 0x0
+#define RLC_IMU_BOOTLOAD_SIZE__RESERVED__SHIFT 0x1a
+#define RLC_IMU_BOOTLOAD_SIZE__SIZE_MASK 0x03FFFFFFL
+#define RLC_IMU_BOOTLOAD_SIZE__RESERVED_MASK 0xFC000000L
+//RLC_IMU_MISC
+#define RLC_IMU_MISC__THROTTLE_GFX__SHIFT 0x0
+#define RLC_IMU_MISC__EARLY_MGCG__SHIFT 0x1
+#define RLC_IMU_MISC__RESERVED__SHIFT 0x2
+#define RLC_IMU_MISC__THROTTLE_GFX_MASK 0x00000001L
+#define RLC_IMU_MISC__EARLY_MGCG_MASK 0x00000002L
+#define RLC_IMU_MISC__RESERVED_MASK 0xFFFFFFFCL
+//RLC_IMU_RESET_VECTOR
+#define RLC_IMU_RESET_VECTOR__COLD_BOOT_EXIT__SHIFT 0x0
+#define RLC_IMU_RESET_VECTOR__VDDGFX_EXIT__SHIFT 0x1
+#define RLC_IMU_RESET_VECTOR__VECTOR__SHIFT 0x2
+#define RLC_IMU_RESET_VECTOR__RESERVED__SHIFT 0x8
+#define RLC_IMU_RESET_VECTOR__COLD_BOOT_EXIT_MASK 0x00000001L
+#define RLC_IMU_RESET_VECTOR__VDDGFX_EXIT_MASK 0x00000002L
+#define RLC_IMU_RESET_VECTOR__VECTOR_MASK 0x000000FCL
+#define RLC_IMU_RESET_VECTOR__RESERVED_MASK 0xFFFFFF00L
+
+
+// addressBlock: gc_rlcsdec
+//RLC_RLCS_DEC_START
+//RLC_RLCS_DEC_DUMP_ADDR
+//RLC_RLCS_EXCEPTION_REG_1
+#define RLC_RLCS_EXCEPTION_REG_1__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_1__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_1__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_1__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_EXCEPTION_REG_2
+#define RLC_RLCS_EXCEPTION_REG_2__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_2__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_2__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_2__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_EXCEPTION_REG_3
+#define RLC_RLCS_EXCEPTION_REG_3__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_3__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_3__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_3__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_EXCEPTION_REG_4
+#define RLC_RLCS_EXCEPTION_REG_4__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_4__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_4__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_4__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_CGCG_REQUEST
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST__SHIFT 0x0
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST_3D__SHIFT 0x1
+#define RLC_RLCS_CGCG_REQUEST__RESERVED__SHIFT 0x2
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST_MASK 0x00000001L
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST_3D_MASK 0x00000002L
+#define RLC_RLCS_CGCG_REQUEST__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_CGCG_STATUS
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS__SHIFT 0x0
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS__SHIFT 0x2
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS_3D__SHIFT 0x3
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS_3D__SHIFT 0x5
+#define RLC_RLCS_CGCG_STATUS__RESERVED__SHIFT 0x6
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS_MASK 0x00000003L
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS_MASK 0x00000004L
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS_3D_MASK 0x00000018L
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS_3D_MASK 0x00000020L
+#define RLC_RLCS_CGCG_STATUS__RESERVED_MASK 0xFFFFFFC0L
+//RLC_RLCS_SOC_DS_CNTL
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_ALLOW__SHIFT 0x0
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK__SHIFT 0x1
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK__SHIFT 0x2
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_GFX_PWR_STALLED_MASK__SHIFT 0x6
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_NON3D_PWR_STALLED_MASK__SHIFT 0x7
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_0_BUSY_MASK__SHIFT 0x10
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_1_BUSY_MASK__SHIFT 0x11
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_2_BUSY_MASK__SHIFT 0x12
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_3_BUSY_MASK__SHIFT 0x13
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_4_BUSY_MASK__SHIFT 0x14
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_5_BUSY_MASK__SHIFT 0x15
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_6_BUSY_MASK__SHIFT 0x16
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_7_BUSY_MASK__SHIFT 0x17
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_ALLOW_MASK 0x00000001L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK_MASK 0x00000002L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK_MASK 0x00000004L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_GFX_PWR_STALLED_MASK_MASK 0x00000040L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_NON3D_PWR_STALLED_MASK_MASK 0x00000080L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_0_BUSY_MASK_MASK 0x00010000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_1_BUSY_MASK_MASK 0x00020000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_2_BUSY_MASK_MASK 0x00040000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_3_BUSY_MASK_MASK 0x00080000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_4_BUSY_MASK_MASK 0x00100000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_5_BUSY_MASK_MASK 0x00200000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_6_BUSY_MASK_MASK 0x00400000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_7_BUSY_MASK_MASK 0x00800000L
+//RLC_RLCS_GFX_DS_CNTL
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_ALLOW__SHIFT 0x0
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK__SHIFT 0x1
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK__SHIFT 0x2
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_GFX_PWR_STALLED_MASK__SHIFT 0x6
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_NON3D_PWR_STALLED_MASK__SHIFT 0x7
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_IMU_DISABLE_MASK__SHIFT 0x8
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_0_BUSY_MASK__SHIFT 0x10
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_1_BUSY_MASK__SHIFT 0x11
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_2_BUSY_MASK__SHIFT 0x12
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_3_BUSY_MASK__SHIFT 0x13
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_4_BUSY_MASK__SHIFT 0x14
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_5_BUSY_MASK__SHIFT 0x15
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_6_BUSY_MASK__SHIFT 0x16
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_7_BUSY_MASK__SHIFT 0x17
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_ALLOW_MASK 0x00000001L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK_MASK 0x00000002L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK_MASK 0x00000004L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_GFX_PWR_STALLED_MASK_MASK 0x00000040L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_NON3D_PWR_STALLED_MASK_MASK 0x00000080L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_IMU_DISABLE_MASK_MASK 0x00000100L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_0_BUSY_MASK_MASK 0x00010000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_1_BUSY_MASK_MASK 0x00020000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_2_BUSY_MASK_MASK 0x00040000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_3_BUSY_MASK_MASK 0x00080000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_4_BUSY_MASK_MASK 0x00100000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_5_BUSY_MASK_MASK 0x00200000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_6_BUSY_MASK_MASK 0x00400000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_7_BUSY_MASK_MASK 0x00800000L
+//RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL__SHIFT 0x0
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE0__SHIFT 0x1
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE1__SHIFT 0x2
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE2__SHIFT 0x3
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_MASK 0x00000001L
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE0_MASK 0x00000002L
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE1_MASK 0x00000004L
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE2_MASK 0x00000008L
+//RLC_GPM_STAT
+#define RLC_GPM_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
+#define RLC_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
+#define RLC_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
+#define RLC_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_UP__SHIFT 0xd
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_DOWN__SHIFT 0xe
+#define RLC_GPM_STAT__DYN_WGP_POWERING_UP__SHIFT 0xf
+#define RLC_GPM_STAT__DYN_WGP_POWERING_DOWN__SHIFT 0x10
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
+#define RLC_GPM_STAT__CMP_power_status__SHIFT 0x12
+#define RLC_GPM_STAT__GFX_LS_STATUS_3D__SHIFT 0x13
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D__SHIFT 0x14
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS__SHIFT 0x17
+#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
+#define RLC_GPM_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
+#define RLC_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
+#define RLC_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
+#define RLC_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_UP_MASK 0x00002000L
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_DOWN_MASK 0x00004000L
+#define RLC_GPM_STAT__DYN_WGP_POWERING_UP_MASK 0x00008000L
+#define RLC_GPM_STAT__DYN_WGP_POWERING_DOWN_MASK 0x00010000L
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
+#define RLC_GPM_STAT__CMP_power_status_MASK 0x00040000L
+#define RLC_GPM_STAT__GFX_LS_STATUS_3D_MASK 0x00080000L
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D_MASK 0x00100000L
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS_MASK 0x00800000L
+#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
+//RLC_RLCS_GPM_STAT
+#define RLC_RLCS_GPM_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_RLCS_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
+#define RLC_RLCS_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
+#define RLC_RLCS_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
+#define RLC_RLCS_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
+#define RLC_RLCS_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
+#define RLC_RLCS_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
+#define RLC_RLCS_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
+#define RLC_RLCS_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
+#define RLC_RLCS_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
+#define RLC_RLCS_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_UP__SHIFT 0xd
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_DOWN__SHIFT 0xe
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_UP__SHIFT 0xf
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_DOWN__SHIFT 0x10
+#define RLC_RLCS_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
+#define RLC_RLCS_GPM_STAT__CMP_POWER_STATUS__SHIFT 0x12
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS_3D__SHIFT 0x13
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS_3D__SHIFT 0x14
+#define RLC_RLCS_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
+#define RLC_RLCS_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
+#define RLC_RLCS_GPM_STAT__FGCG_OVERRIDE_STATUS__SHIFT 0x17
+#define RLC_RLCS_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
+#define RLC_RLCS_GPM_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_RLCS_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
+#define RLC_RLCS_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
+#define RLC_RLCS_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
+#define RLC_RLCS_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
+#define RLC_RLCS_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
+#define RLC_RLCS_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
+#define RLC_RLCS_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
+#define RLC_RLCS_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
+#define RLC_RLCS_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
+#define RLC_RLCS_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_UP_MASK 0x00002000L
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_DOWN_MASK 0x00004000L
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_UP_MASK 0x00008000L
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_DOWN_MASK 0x00010000L
+#define RLC_RLCS_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
+#define RLC_RLCS_GPM_STAT__CMP_POWER_STATUS_MASK 0x00040000L
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS_3D_MASK 0x00080000L
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS_3D_MASK 0x00100000L
+#define RLC_RLCS_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
+#define RLC_RLCS_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
+#define RLC_RLCS_GPM_STAT__FGCG_OVERRIDE_STATUS_MASK 0x00800000L
+#define RLC_RLCS_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
+//RLC_RLCS_ABORTED_PD_SEQUENCE
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__APS__SHIFT 0x0
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__RESERVED__SHIFT 0x10
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__APS_MASK 0x0000FFFFL
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_DIDT_FORCE_STALL
+#define RLC_RLCS_DIDT_FORCE_STALL__DFS__SHIFT 0x0
+#define RLC_RLCS_DIDT_FORCE_STALL__VALID__SHIFT 0x3
+#define RLC_RLCS_DIDT_FORCE_STALL__RESERVED__SHIFT 0x4
+#define RLC_RLCS_DIDT_FORCE_STALL__DFS_MASK 0x00000007L
+#define RLC_RLCS_DIDT_FORCE_STALL__VALID_MASK 0x00000008L
+#define RLC_RLCS_DIDT_FORCE_STALL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_IOV_CMD_STATUS
+#define RLC_RLCS_IOV_CMD_STATUS__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_CMD_STATUS__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IOV_CNTX_LOC_SIZE
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__RESERVED__SHIFT 0x8
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__DATA_MASK 0x000000FFL
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__RESERVED_MASK 0xFFFFFF00L
+//RLC_RLCS_IOV_SCH_BLOCK
+#define RLC_RLCS_IOV_SCH_BLOCK__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_SCH_BLOCK__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IOV_VM_BUSY_STATUS
+#define RLC_RLCS_IOV_VM_BUSY_STATUS__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_VM_BUSY_STATUS__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GPM_STAT_2
+#define RLC_RLCS_GPM_STAT_2__TC_TRANS_ERROR__SHIFT 0x0
+#define RLC_RLCS_GPM_STAT_2__RLC_PWR_NON3D_STALLED__SHIFT 0x1
+#define RLC_RLCS_GPM_STAT_2__GFX_PWR_STALLED_STATUS__SHIFT 0x2
+#define RLC_RLCS_GPM_STAT_2__GFX_ULV_STATUS__SHIFT 0x3
+#define RLC_RLCS_GPM_STAT_2__GFX_GENERAL_STATUS__SHIFT 0x4
+#define RLC_RLCS_GPM_STAT_2__RESERVED__SHIFT 0x5
+#define RLC_RLCS_GPM_STAT_2__TC_TRANS_ERROR_MASK 0x00000001L
+#define RLC_RLCS_GPM_STAT_2__RLC_PWR_NON3D_STALLED_MASK 0x00000002L
+#define RLC_RLCS_GPM_STAT_2__GFX_PWR_STALLED_STATUS_MASK 0x00000004L
+#define RLC_RLCS_GPM_STAT_2__GFX_ULV_STATUS_MASK 0x00000008L
+#define RLC_RLCS_GPM_STAT_2__GFX_GENERAL_STATUS_MASK 0x00000010L
+#define RLC_RLCS_GPM_STAT_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_GRBM_SOFT_RESET
+#define RLC_RLCS_GRBM_SOFT_RESET__RESET__SHIFT 0x0
+#define RLC_RLCS_GRBM_SOFT_RESET__RESERVED__SHIFT 0x1
+#define RLC_RLCS_GRBM_SOFT_RESET__RESET_MASK 0x00000001L
+#define RLC_RLCS_GRBM_SOFT_RESET__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_PG_CHANGE_STATUS
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_CNTL_CHANGED__SHIFT 0x0
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_REG_CHANGED__SHIFT 0x1
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_REQ_CHANGED__SHIFT 0x3
+#define RLC_RLCS_PG_CHANGE_STATUS__RESERVED__SHIFT 0x4
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_CNTL_CHANGED_MASK 0x00000001L
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_REG_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_REQ_CHANGED_MASK 0x00000008L
+#define RLC_RLCS_PG_CHANGE_STATUS__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_PG_CHANGE_READ
+#define RLC_RLCS_PG_CHANGE_READ__RESERVED__SHIFT 0x0
+#define RLC_RLCS_PG_CHANGE_READ__PG_REG_CHANGED__SHIFT 0x1
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_REQ_CHANGED__SHIFT 0x3
+#define RLC_RLCS_PG_CHANGE_READ__RESERVED_MASK 0x00000001L
+#define RLC_RLCS_PG_CHANGE_READ__PG_REG_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_REQ_CHANGED_MASK 0x00000008L
+//RLC_RLCS_IH_SEMAPHORE
+#define RLC_RLCS_IH_SEMAPHORE__CLIENT_ID__SHIFT 0x0
+#define RLC_RLCS_IH_SEMAPHORE__RESERVED__SHIFT 0x5
+#define RLC_RLCS_IH_SEMAPHORE__CLIENT_ID_MASK 0x0000001FL
+#define RLC_RLCS_IH_SEMAPHORE__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_IH_COOKIE_SEMAPHORE
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__CLIENT_ID__SHIFT 0x0
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__RESERVED__SHIFT 0x5
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__CLIENT_ID_MASK 0x0000001FL
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_WGP_STATUS
+#define RLC_RLCS_WGP_STATUS__CS_WORK_ACTIVE__SHIFT 0x0
+#define RLC_RLCS_WGP_STATUS__STATIC_WGP_STATUS_CHANGED__SHIFT 0x1
+#define RLC_RLCS_WGP_STATUS__DYMANIC_WGP_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_WGP_STATUS__STATIC_PERWGP_PD_INCOMPLETE__SHIFT 0x3
+#define RLC_RLCS_WGP_STATUS__RESERVED__SHIFT 0x4
+#define RLC_RLCS_WGP_STATUS__CS_WORK_ACTIVE_MASK 0x00000001L
+#define RLC_RLCS_WGP_STATUS__STATIC_WGP_STATUS_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_WGP_STATUS__DYMANIC_WGP_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_WGP_STATUS__STATIC_PERWGP_PD_INCOMPLETE_MASK 0x00000008L
+#define RLC_RLCS_WGP_STATUS__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_WGP_READ
+#define RLC_RLCS_WGP_READ__CS_WORK_ACTIVE__SHIFT 0x0
+#define RLC_RLCS_WGP_READ__STATIC_WGP_STATUS_CHANGED__SHIFT 0x1
+#define RLC_RLCS_WGP_READ__DYMANIC_WGP_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_WGP_READ__RESERVED__SHIFT 0x3
+#define RLC_RLCS_WGP_READ__CS_WORK_ACTIVE_MASK 0x00000001L
+#define RLC_RLCS_WGP_READ__STATIC_WGP_STATUS_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_WGP_READ__DYMANIC_WGP_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_WGP_READ__RESERVED_MASK 0xFFFFFFF8L
+//RLC_RLCS_CP_INT_CTRL_1
+#define RLC_RLCS_CP_INT_CTRL_1__INTERRUPT_ACK__SHIFT 0x0
+#define RLC_RLCS_CP_INT_CTRL_1__RESERVED__SHIFT 0x1
+#define RLC_RLCS_CP_INT_CTRL_1__INTERRUPT_ACK_MASK 0x00000001L
+#define RLC_RLCS_CP_INT_CTRL_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_CP_INT_CTRL_2
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_EN__SHIFT 0x0
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_EN__SHIFT 0x1
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_ACTIVE__SHIFT 0x2
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_ACTIVE__SHIFT 0x3
+#define RLC_RLCS_CP_INT_CTRL_2__INTERRUPT_PENDING__SHIFT 0x4
+#define RLC_RLCS_CP_INT_CTRL_2__RESERVED__SHIFT 0x5
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_EN_MASK 0x00000001L
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_EN_MASK 0x00000002L
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_ACTIVE_MASK 0x00000004L
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_ACTIVE_MASK 0x00000008L
+#define RLC_RLCS_CP_INT_CTRL_2__INTERRUPT_PENDING_MASK 0x00000010L
+#define RLC_RLCS_CP_INT_CTRL_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_CP_INT_INFO_1
+#define RLC_RLCS_CP_INT_INFO_1__INTERRUPT_INFO_1__SHIFT 0x0
+#define RLC_RLCS_CP_INT_INFO_1__INTERRUPT_INFO_1_MASK 0xFFFFFFFFL
+//RLC_RLCS_CP_INT_INFO_2
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_INFO_2__SHIFT 0x0
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_ID__SHIFT 0x10
+#define RLC_RLCS_CP_INT_INFO_2__RESERVED__SHIFT 0x19
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_INFO_2_MASK 0x0000FFFFL
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_ID_MASK 0x01FF0000L
+#define RLC_RLCS_CP_INT_INFO_2__RESERVED_MASK 0xFE000000L
+//RLC_RLCS_SPM_INT_CTRL
+#define RLC_RLCS_SPM_INT_CTRL__INTERRUPT_ACK__SHIFT 0x0
+#define RLC_RLCS_SPM_INT_CTRL__RESERVED__SHIFT 0x1
+#define RLC_RLCS_SPM_INT_CTRL__INTERRUPT_ACK_MASK 0x00000001L
+#define RLC_RLCS_SPM_INT_CTRL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_SPM_INT_INFO_1
+#define RLC_RLCS_SPM_INT_INFO_1__INTERRUPT_INFO_1__SHIFT 0x0
+#define RLC_RLCS_SPM_INT_INFO_1__INTERRUPT_INFO_1_MASK 0xFFFFFFFFL
+//RLC_RLCS_SPM_INT_INFO_2
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_INFO_2__SHIFT 0x0
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_ID__SHIFT 0x10
+#define RLC_RLCS_SPM_INT_INFO_2__RESERVED__SHIFT 0x19
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_INFO_2_MASK 0x0000FFFFL
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_ID_MASK 0x01FF0000L
+#define RLC_RLCS_SPM_INT_INFO_2__RESERVED_MASK 0xFE000000L
+//RLC_RLCS_DSM_TRIG
+#define RLC_RLCS_DSM_TRIG__START__SHIFT 0x0
+#define RLC_RLCS_DSM_TRIG__RESERVED__SHIFT 0x1
+#define RLC_RLCS_DSM_TRIG__START_MASK 0x00000001L
+#define RLC_RLCS_DSM_TRIG__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_BOOTLOAD_STATUS
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_INIT_DONE__SHIFT 0x0
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_LOADED__SHIFT 0x1
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_DONE__SHIFT 0x2
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_LOADED__SHIFT 0x3
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_DONE__SHIFT 0x4
+#define RLC_RLCS_BOOTLOAD_STATUS__RESERVED__SHIFT 0x5
+#define RLC_RLCS_BOOTLOAD_STATUS__BOOTLOAD_COMPLETE__SHIFT 0x1f
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_INIT_DONE_MASK 0x00000001L
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_LOADED_MASK 0x00000002L
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_DONE_MASK 0x00000004L
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_LOADED_MASK 0x00000008L
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_DONE_MASK 0x00000010L
+#define RLC_RLCS_BOOTLOAD_STATUS__RESERVED_MASK 0x7FFFFFE0L
+#define RLC_RLCS_BOOTLOAD_STATUS__BOOTLOAD_COMPLETE_MASK 0x80000000L
+//RLC_RLCS_POWER_BRAKE_CNTL
+#define RLC_RLCS_POWER_BRAKE_CNTL__POWER_BRAKE__SHIFT 0x0
+#define RLC_RLCS_POWER_BRAKE_CNTL__INT_CLEAR__SHIFT 0x1
+#define RLC_RLCS_POWER_BRAKE_CNTL__MAX_HYSTERESIS__SHIFT 0x2
+#define RLC_RLCS_POWER_BRAKE_CNTL__HYSTERESIS_CNT__SHIFT 0xa
+#define RLC_RLCS_POWER_BRAKE_CNTL__RESERVED__SHIFT 0x12
+#define RLC_RLCS_POWER_BRAKE_CNTL__POWER_BRAKE_MASK 0x00000001L
+#define RLC_RLCS_POWER_BRAKE_CNTL__INT_CLEAR_MASK 0x00000002L
+#define RLC_RLCS_POWER_BRAKE_CNTL__MAX_HYSTERESIS_MASK 0x000003FCL
+#define RLC_RLCS_POWER_BRAKE_CNTL__HYSTERESIS_CNT_MASK 0x0003FC00L
+#define RLC_RLCS_POWER_BRAKE_CNTL__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_POWER_BRAKE_CNTL_TH1
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__POWER_BRAKE__SHIFT 0x0
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__INT_CLEAR__SHIFT 0x1
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__MAX_HYSTERESIS__SHIFT 0x2
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__HYSTERESIS_CNT__SHIFT 0xa
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__RESERVED__SHIFT 0x12
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__POWER_BRAKE_MASK 0x00000001L
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__INT_CLEAR_MASK 0x00000002L
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__MAX_HYSTERESIS_MASK 0x000003FCL
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__HYSTERESIS_CNT_MASK 0x0003FC00L
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_GRBM_IDLE_BUSY_STAT
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__GRBM_RLC_GC_STAT_IDLE__SHIFT 0x0
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY__SHIFT 0x10
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY__SHIFT 0x11
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY__SHIFT 0x12
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY__SHIFT 0x13
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY__SHIFT 0x14
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY__SHIFT 0x15
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY__SHIFT 0x16
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY__SHIFT 0x17
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY_CHANGED__SHIFT 0x18
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY_CHANGED__SHIFT 0x19
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY_CHANGED__SHIFT 0x1a
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY_CHANGED__SHIFT 0x1b
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY_CHANGED__SHIFT 0x1c
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY_CHANGED__SHIFT 0x1d
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY_CHANGED__SHIFT 0x1e
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY_CHANGED__SHIFT 0x1f
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__GRBM_RLC_GC_STAT_IDLE_MASK 0x00000003L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY_MASK 0x00010000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY_MASK 0x00020000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY_MASK 0x00040000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY_MASK 0x00080000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY_MASK 0x00100000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY_MASK 0x00200000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY_MASK 0x00400000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY_MASK 0x00800000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY_CHANGED_MASK 0x01000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY_CHANGED_MASK 0x02000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY_CHANGED_MASK 0x04000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY_CHANGED_MASK 0x08000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY_CHANGED_MASK 0x10000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY_CHANGED_MASK 0x20000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY_CHANGED_MASK 0x40000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY_CHANGED_MASK 0x80000000L
+//RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA0_BUSY_INT_CLEAR__SHIFT 0x0
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA1_BUSY_INT_CLEAR__SHIFT 0x1
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA2_BUSY_INT_CLEAR__SHIFT 0x2
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA3_BUSY_INT_CLEAR__SHIFT 0x3
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA4_BUSY_INT_CLEAR__SHIFT 0x4
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA5_BUSY_INT_CLEAR__SHIFT 0x5
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA6_BUSY_INT_CLEAR__SHIFT 0x6
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA7_BUSY_INT_CLEAR__SHIFT 0x7
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA0_BUSY_INT_CLEAR_MASK 0x00000001L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA1_BUSY_INT_CLEAR_MASK 0x00000002L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA2_BUSY_INT_CLEAR_MASK 0x00000004L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA3_BUSY_INT_CLEAR_MASK 0x00000008L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA4_BUSY_INT_CLEAR_MASK 0x00000010L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA5_BUSY_INT_CLEAR_MASK 0x00000020L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA6_BUSY_INT_CLEAR_MASK 0x00000040L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA7_BUSY_INT_CLEAR_MASK 0x00000080L
+//RLC_RLCS_CMP_IDLE_CNTL
+#define RLC_RLCS_CMP_IDLE_CNTL__INT_CLEAR__SHIFT 0x0
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE_HYST__SHIFT 0x1
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE__SHIFT 0x2
+#define RLC_RLCS_CMP_IDLE_CNTL__MAX_HYSTERESIS__SHIFT 0x3
+#define RLC_RLCS_CMP_IDLE_CNTL__HYSTERESIS_CNT__SHIFT 0xb
+#define RLC_RLCS_CMP_IDLE_CNTL__RESERVED__SHIFT 0x13
+#define RLC_RLCS_CMP_IDLE_CNTL__INT_CLEAR_MASK 0x00000001L
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE_HYST_MASK 0x00000002L
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE_MASK 0x00000004L
+#define RLC_RLCS_CMP_IDLE_CNTL__MAX_HYSTERESIS_MASK 0x000007F8L
+#define RLC_RLCS_CMP_IDLE_CNTL__HYSTERESIS_CNT_MASK 0x0007F800L
+#define RLC_RLCS_CMP_IDLE_CNTL__RESERVED_MASK 0xFFF80000L
+//RLC_RLCS_GENERAL_0
+#define RLC_RLCS_GENERAL_0__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_1
+#define RLC_RLCS_GENERAL_1__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_2
+#define RLC_RLCS_GENERAL_2__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_2__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_3
+#define RLC_RLCS_GENERAL_3__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_3__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_4
+#define RLC_RLCS_GENERAL_4__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_4__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_5
+#define RLC_RLCS_GENERAL_5__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_5__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_6
+#define RLC_RLCS_GENERAL_6__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_6__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_7
+#define RLC_RLCS_GENERAL_7__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_7__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_8
+#define RLC_RLCS_GENERAL_8__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_8__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_9
+#define RLC_RLCS_GENERAL_9__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_9__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_10
+#define RLC_RLCS_GENERAL_10__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_10__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_11
+#define RLC_RLCS_GENERAL_11__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_11__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_12
+#define RLC_RLCS_GENERAL_12__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_12__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_13
+#define RLC_RLCS_GENERAL_13__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_13__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_14
+#define RLC_RLCS_GENERAL_14__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_14__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_15
+#define RLC_RLCS_GENERAL_15__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_15__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_16
+#define RLC_RLCS_GENERAL_16__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_16__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_AUXILIARY_REG_1
+#define RLC_RLCS_AUXILIARY_REG_1__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_1__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_1__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_1__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_AUXILIARY_REG_2
+#define RLC_RLCS_AUXILIARY_REG_2__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_2__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_2__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_2__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_AUXILIARY_REG_3
+#define RLC_RLCS_AUXILIARY_REG_3__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_3__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_3__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_3__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_AUXILIARY_REG_4
+#define RLC_RLCS_AUXILIARY_REG_4__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_4__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_4__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_4__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_SPM_SQTT_MODE
+#define RLC_RLCS_SPM_SQTT_MODE__MODE__SHIFT 0x0
+#define RLC_RLCS_SPM_SQTT_MODE__MODE_MASK 0x00000001L
+//RLC_RLCS_CP_DMA_SRCID_OVER
+#define RLC_RLCS_CP_DMA_SRCID_OVER__SRCID_OVERRIDE__SHIFT 0x0
+#define RLC_RLCS_CP_DMA_SRCID_OVER__SRCID_OVERRIDE_MASK 0x00000001L
+//RLC_RLCS_BOOTLOAD_ID_STATUS1
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_0_LOADED__SHIFT 0x0
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_1_LOADED__SHIFT 0x1
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_2_LOADED__SHIFT 0x2
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_3_LOADED__SHIFT 0x3
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_4_LOADED__SHIFT 0x4
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_5_LOADED__SHIFT 0x5
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_6_LOADED__SHIFT 0x6
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_7_LOADED__SHIFT 0x7
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_8_LOADED__SHIFT 0x8
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_9_LOADED__SHIFT 0x9
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_10_LOADED__SHIFT 0xa
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_11_LOADED__SHIFT 0xb
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_12_LOADED__SHIFT 0xc
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_13_LOADED__SHIFT 0xd
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_14_LOADED__SHIFT 0xe
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_15_LOADED__SHIFT 0xf
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_16_LOADED__SHIFT 0x10
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_17_LOADED__SHIFT 0x11
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_18_LOADED__SHIFT 0x12
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_19_LOADED__SHIFT 0x13
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_20_LOADED__SHIFT 0x14
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_21_LOADED__SHIFT 0x15
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_22_LOADED__SHIFT 0x16
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_23_LOADED__SHIFT 0x17
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_24_LOADED__SHIFT 0x18
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_25_LOADED__SHIFT 0x19
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_26_LOADED__SHIFT 0x1a
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_27_LOADED__SHIFT 0x1b
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_28_LOADED__SHIFT 0x1c
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_29_LOADED__SHIFT 0x1d
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_30_LOADED__SHIFT 0x1e
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_31_LOADED__SHIFT 0x1f
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_0_LOADED_MASK 0x00000001L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_1_LOADED_MASK 0x00000002L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_2_LOADED_MASK 0x00000004L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_3_LOADED_MASK 0x00000008L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_4_LOADED_MASK 0x00000010L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_5_LOADED_MASK 0x00000020L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_6_LOADED_MASK 0x00000040L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_7_LOADED_MASK 0x00000080L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_8_LOADED_MASK 0x00000100L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_9_LOADED_MASK 0x00000200L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_10_LOADED_MASK 0x00000400L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_11_LOADED_MASK 0x00000800L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_12_LOADED_MASK 0x00001000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_13_LOADED_MASK 0x00002000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_14_LOADED_MASK 0x00004000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_15_LOADED_MASK 0x00008000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_16_LOADED_MASK 0x00010000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_17_LOADED_MASK 0x00020000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_18_LOADED_MASK 0x00040000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_19_LOADED_MASK 0x00080000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_20_LOADED_MASK 0x00100000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_21_LOADED_MASK 0x00200000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_22_LOADED_MASK 0x00400000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_23_LOADED_MASK 0x00800000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_24_LOADED_MASK 0x01000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_25_LOADED_MASK 0x02000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_26_LOADED_MASK 0x04000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_27_LOADED_MASK 0x08000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_28_LOADED_MASK 0x10000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_29_LOADED_MASK 0x20000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_30_LOADED_MASK 0x40000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_31_LOADED_MASK 0x80000000L
+//RLC_RLCS_BOOTLOAD_ID_STATUS2
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_32_LOADED__SHIFT 0x0
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_33_LOADED__SHIFT 0x1
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_34_LOADED__SHIFT 0x2
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_35_LOADED__SHIFT 0x3
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_36_LOADED__SHIFT 0x4
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_37_LOADED__SHIFT 0x5
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_38_LOADED__SHIFT 0x6
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_39_LOADED__SHIFT 0x7
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_40_LOADED__SHIFT 0x8
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_41_LOADED__SHIFT 0x9
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_42_LOADED__SHIFT 0xa
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_43_LOADED__SHIFT 0xb
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_44_LOADED__SHIFT 0xc
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_45_LOADED__SHIFT 0xd
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_46_LOADED__SHIFT 0xe
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_47_LOADED__SHIFT 0xf
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_48_LOADED__SHIFT 0x10
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_49_LOADED__SHIFT 0x11
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_50_LOADED__SHIFT 0x12
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_51_LOADED__SHIFT 0x13
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_52_LOADED__SHIFT 0x14
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_53_LOADED__SHIFT 0x15
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_54_LOADED__SHIFT 0x16
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_55_LOADED__SHIFT 0x17
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_56_LOADED__SHIFT 0x18
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_57_LOADED__SHIFT 0x19
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_58_LOADED__SHIFT 0x1a
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_59_LOADED__SHIFT 0x1b
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_60_LOADED__SHIFT 0x1c
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_61_LOADED__SHIFT 0x1d
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_62_LOADED__SHIFT 0x1e
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_63_LOADED__SHIFT 0x1f
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_32_LOADED_MASK 0x00000001L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_33_LOADED_MASK 0x00000002L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_34_LOADED_MASK 0x00000004L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_35_LOADED_MASK 0x00000008L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_36_LOADED_MASK 0x00000010L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_37_LOADED_MASK 0x00000020L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_38_LOADED_MASK 0x00000040L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_39_LOADED_MASK 0x00000080L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_40_LOADED_MASK 0x00000100L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_41_LOADED_MASK 0x00000200L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_42_LOADED_MASK 0x00000400L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_43_LOADED_MASK 0x00000800L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_44_LOADED_MASK 0x00001000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_45_LOADED_MASK 0x00002000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_46_LOADED_MASK 0x00004000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_47_LOADED_MASK 0x00008000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_48_LOADED_MASK 0x00010000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_49_LOADED_MASK 0x00020000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_50_LOADED_MASK 0x00040000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_51_LOADED_MASK 0x00080000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_52_LOADED_MASK 0x00100000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_53_LOADED_MASK 0x00200000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_54_LOADED_MASK 0x00400000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_55_LOADED_MASK 0x00800000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_56_LOADED_MASK 0x01000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_57_LOADED_MASK 0x02000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_58_LOADED_MASK 0x04000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_59_LOADED_MASK 0x08000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_60_LOADED_MASK 0x10000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_61_LOADED_MASK 0x20000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_62_LOADED_MASK 0x40000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_63_LOADED_MASK 0x80000000L
+//RLC_RLCS_IMU_VIDCHG_CNTL
+#define RLC_RLCS_IMU_VIDCHG_CNTL__REQ__SHIFT 0x0
+#define RLC_RLCS_IMU_VIDCHG_CNTL__DATA__SHIFT 0x1
+#define RLC_RLCS_IMU_VIDCHG_CNTL__PSIEN__SHIFT 0xa
+#define RLC_RLCS_IMU_VIDCHG_CNTL__ACK__SHIFT 0xb
+#define RLC_RLCS_IMU_VIDCHG_CNTL__RESERVED__SHIFT 0xc
+#define RLC_RLCS_IMU_VIDCHG_CNTL__REQ_MASK 0x00000001L
+#define RLC_RLCS_IMU_VIDCHG_CNTL__DATA_MASK 0x000003FEL
+#define RLC_RLCS_IMU_VIDCHG_CNTL__PSIEN_MASK 0x00000400L
+#define RLC_RLCS_IMU_VIDCHG_CNTL__ACK_MASK 0x00000800L
+#define RLC_RLCS_IMU_VIDCHG_CNTL__RESERVED_MASK 0xFFFFF000L
+//RLC_RLCS_EDC_INT_CNTL
+#define RLC_RLCS_EDC_INT_CNTL__EDC_EVENT_INT_CLEAR__SHIFT 0x0
+#define RLC_RLCS_EDC_INT_CNTL__EDC_EVENT_INT_CLEAR_MASK 0x00000001L
+//RLC_RLCS_KMD_LOG_CNTL1
+#define RLC_RLCS_KMD_LOG_CNTL1__DATA__SHIFT 0x0
+#define RLC_RLCS_KMD_LOG_CNTL1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_KMD_LOG_CNTL2
+#define RLC_RLCS_KMD_LOG_CNTL2__DATA__SHIFT 0x0
+#define RLC_RLCS_KMD_LOG_CNTL2__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GPM_LEGACY_INT_STAT
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GC_CAC_EDC_EVENT_CHANGED__SHIFT 0x0
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GFX_POWER_BRAKE_CHANGED__SHIFT 0x1
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GC_CAC_EDC_EVENT_CHANGED_MASK 0x00000001L
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GFX_POWER_BRAKE_CHANGED_MASK 0x00000002L
+//RLC_RLCS_GPM_LEGACY_INT_DISABLE
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GC_CAC_EDC_EVENT_CHANGED__SHIFT 0x0
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GFX_POWER_BRAKE_CHANGED__SHIFT 0x1
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GC_CAC_EDC_EVENT_CHANGED_MASK 0x00000001L
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GFX_POWER_BRAKE_CHANGED_MASK 0x00000002L
+//RLC_RLCS_SRM_SRCID_CNTL
+#define RLC_RLCS_SRM_SRCID_CNTL__SRCID__SHIFT 0x0
+#define RLC_RLCS_SRM_SRCID_CNTL__SRCID_MASK 0x00000007L
+//RLC_RLCS_GCR_DATA_0
+#define RLC_RLCS_GCR_DATA_0__PHASE_0__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_0__PHASE_1__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_0__PHASE_0_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_0__PHASE_1_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_DATA_1
+#define RLC_RLCS_GCR_DATA_1__PHASE_2__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_1__PHASE_3__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_1__PHASE_2_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_1__PHASE_3_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_DATA_2
+#define RLC_RLCS_GCR_DATA_2__PHASE_4__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_2__PHASE_5__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_2__PHASE_4_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_2__PHASE_5_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_DATA_3
+#define RLC_RLCS_GCR_DATA_3__PHASE_6__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_3__PHASE_7__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_3__PHASE_6_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_3__PHASE_7_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_STATUS
+#define RLC_RLCS_GCR_STATUS__GCR_BUSY__SHIFT 0x0
+#define RLC_RLCS_GCR_STATUS__GCR_OUT_COUNT__SHIFT 0x1
+#define RLC_RLCS_GCR_STATUS__RESERVED_2__SHIFT 0x5
+#define RLC_RLCS_GCR_STATUS__GCRIU_CLI_RSP_TAG__SHIFT 0x8
+#define RLC_RLCS_GCR_STATUS__RESERVED__SHIFT 0x10
+#define RLC_RLCS_GCR_STATUS__GCR_BUSY_MASK 0x00000001L
+#define RLC_RLCS_GCR_STATUS__GCR_OUT_COUNT_MASK 0x0000001EL
+#define RLC_RLCS_GCR_STATUS__RESERVED_2_MASK 0x000000E0L
+#define RLC_RLCS_GCR_STATUS__GCRIU_CLI_RSP_TAG_MASK 0x0000FF00L
+#define RLC_RLCS_GCR_STATUS__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_PERFMON_CLK_CNTL_UCODE
+#define RLC_RLCS_PERFMON_CLK_CNTL_UCODE__PERFMON_CLOCK_STATE__SHIFT 0x0
+#define RLC_RLCS_PERFMON_CLK_CNTL_UCODE__PERFMON_CLOCK_STATE_MASK 0x00000001L
+//RLC_RLCS_UTCL2_CNTL
+#define RLC_RLCS_UTCL2_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x0
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE__SHIFT 0x1
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE__SHIFT 0x2
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE_VALUE__SHIFT 0x3
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE_VALUE__SHIFT 0x5
+#define RLC_RLCS_UTCL2_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x6
+#define RLC_RLCS_UTCL2_CNTL__RESERVED__SHIFT 0x7
+#define RLC_RLCS_UTCL2_CNTL__MTYPE_NO_PTE_MODE_MASK 0x00000001L
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE_MASK 0x00000002L
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE_MASK 0x00000004L
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE_VALUE_MASK 0x00000018L
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE_VALUE_MASK 0x00000020L
+#define RLC_RLCS_UTCL2_CNTL__IGNORE_PTE_PERMISSION_MASK 0x00000040L
+#define RLC_RLCS_UTCL2_CNTL__RESERVED_MASK 0xFFFFFF80L
+//RLC_RLCS_IMU_RLC_MSG_DATA0
+#define RLC_RLCS_IMU_RLC_MSG_DATA0__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA1
+#define RLC_RLCS_IMU_RLC_MSG_DATA1__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA2
+#define RLC_RLCS_IMU_RLC_MSG_DATA2__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA2__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA3
+#define RLC_RLCS_IMU_RLC_MSG_DATA3__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA3__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA4
+#define RLC_RLCS_IMU_RLC_MSG_DATA4__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA4__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_CONTROL
+#define RLC_RLCS_IMU_RLC_MSG_CONTROL__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_CONTROL__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_CNTL
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__DONETOG__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__CHGTOG__SHIFT 0x1
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__DONETOG_MASK 0x00000001L
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__CHGTOG_MASK 0x00000002L
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_RLC_IMU_MSG_DATA0
+#define RLC_RLCS_RLC_IMU_MSG_DATA0__DATA__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_MSG_DATA0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_RLC_IMU_MSG_CONTROL
+#define RLC_RLCS_RLC_IMU_MSG_CONTROL__DATA__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_MSG_CONTROL__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_RLC_IMU_MSG_CNTL
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__CHGTOG__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__DONETOG__SHIFT 0x1
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__CHGTOG_MASK 0x00000001L
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__DONETOG_MASK 0x00000002L
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__CURRENT__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__VOLTAGE__SHIFT 0x10
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__CURRENT_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__VOLTAGE_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__TEMPERATURE1__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__TEMPERATURE1_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RLC_MUTEX_CNTL
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__REQ__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__ACQUIRE__SHIFT 0x1
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__REQ_MASK 0x00000001L
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__ACQUIRE_MASK 0x00000002L
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_IMU_RLC_STATUS
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_GFXOFF__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_FA_DCS__SHIFT 0x1
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED_14_2__SHIFT 0x2
+#define RLC_RLCS_IMU_RLC_STATUS__DISABLE_GFXCLK_DS__SHIFT 0xf
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_GFXOFF_MASK 0x00000001L
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_FA_DCS_MASK 0x00000002L
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED_14_2_MASK 0x00007FFCL
+#define RLC_RLCS_IMU_RLC_STATUS__DISABLE_GFXCLK_DS_MASK 0x00008000L
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_RLC_IMU_STATUS
+#define RLC_RLCS_RLC_IMU_STATUS__PWR_DOWN_ACTIVE__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_STATUS__RLC_ALIVE__SHIFT 0x1
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED_3_2__SHIFT 0x2
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED__SHIFT 0x4
+#define RLC_RLCS_RLC_IMU_STATUS__PWR_DOWN_ACTIVE_MASK 0x00000001L
+#define RLC_RLCS_RLC_IMU_STATUS__RLC_ALIVE_MASK 0x00000002L
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED_3_2_MASK 0x0000000CL
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_IMU_RAM_DATA_1
+#define RLC_RLCS_IMU_RAM_DATA_1__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_1_LSB
+#define RLC_RLCS_IMU_RAM_ADDR_1_LSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_1_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_1_MSB
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__DATA_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RAM_DATA_0
+#define RLC_RLCS_IMU_RAM_DATA_0__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_0_LSB
+#define RLC_RLCS_IMU_RAM_ADDR_0_LSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_0_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_0_MSB
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__DATA_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RAM_CNTL
+#define RLC_RLCS_IMU_RAM_CNTL__REQTOG__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_CNTL__ACKTOG__SHIFT 0x1
+#define RLC_RLCS_IMU_RAM_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_RAM_CNTL__REQTOG_MASK 0x00000001L
+#define RLC_RLCS_IMU_RAM_CNTL__ACKTOG_MASK 0x00000002L
+#define RLC_RLCS_IMU_RAM_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_IMU_GFX_DOORBELL_FENCE
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ENABLE__SHIFT 0x0
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ACK__SHIFT 0x1
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ENABLE_MASK 0x00000001L
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ACK_MASK 0x00000002L
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_SDMA_INT_CNTL_1
+#define RLC_RLCS_SDMA_INT_CNTL_1__INTERRUPT_ACK__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESP_ID__SHIFT 0x1
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESERVED__SHIFT 0x2
+#define RLC_RLCS_SDMA_INT_CNTL_1__INTERRUPT_ACK_MASK 0x00000001L
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESP_ID_MASK 0x00000002L
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_SDMA_INT_CNTL_2
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_EN__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_ACTIVE__SHIFT 0x1
+#define RLC_RLCS_SDMA_INT_CNTL_2__RESERVED__SHIFT 0x2
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_EN_MASK 0x00000001L
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_ACTIVE_MASK 0x00000002L
+#define RLC_RLCS_SDMA_INT_CNTL_2__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_SDMA_INT_STAT
+#define RLC_RLCS_SDMA_INT_STAT__REQ_IDLE_HIST__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_STAT__REQ_BUSY_HIST__SHIFT 0x8
+#define RLC_RLCS_SDMA_INT_STAT__LAST_SDMA_RLC_INT_ID__SHIFT 0x10
+#define RLC_RLCS_SDMA_INT_STAT__SDMA_RLC_INT_PENDING__SHIFT 0x11
+#define RLC_RLCS_SDMA_INT_STAT__RESERVED__SHIFT 0x12
+#define RLC_RLCS_SDMA_INT_STAT__REQ_IDLE_HIST_MASK 0x000000FFL
+#define RLC_RLCS_SDMA_INT_STAT__REQ_BUSY_HIST_MASK 0x0000FF00L
+#define RLC_RLCS_SDMA_INT_STAT__LAST_SDMA_RLC_INT_ID_MASK 0x00010000L
+#define RLC_RLCS_SDMA_INT_STAT__SDMA_RLC_INT_PENDING_MASK 0x00020000L
+#define RLC_RLCS_SDMA_INT_STAT__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_SDMA_INT_INFO
+#define RLC_RLCS_SDMA_INT_INFO__REQ_IDLE_TO_FW__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_INFO__REQ_BUSY_TO_FW__SHIFT 0x8
+#define RLC_RLCS_SDMA_INT_INFO__INTERRUPT_ID__SHIFT 0x10
+#define RLC_RLCS_SDMA_INT_INFO__RESERVED__SHIFT 0x11
+#define RLC_RLCS_SDMA_INT_INFO__REQ_IDLE_TO_FW_MASK 0x000000FFL
+#define RLC_RLCS_SDMA_INT_INFO__REQ_BUSY_TO_FW_MASK 0x0000FF00L
+#define RLC_RLCS_SDMA_INT_INFO__INTERRUPT_ID_MASK 0x00010000L
+#define RLC_RLCS_SDMA_INT_INFO__RESERVED_MASK 0xFFFE0000L
+//RLC_RLCS_PMM_CGCG_CNTL
+#define RLC_RLCS_PMM_CGCG_CNTL__VALID__SHIFT 0x0
+#define RLC_RLCS_PMM_CGCG_CNTL__CLEAN__SHIFT 0x1
+#define RLC_RLCS_PMM_CGCG_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_PMM_CGCG_CNTL__VALID_MASK 0x00000001L
+#define RLC_RLCS_PMM_CGCG_CNTL__CLEAN_MASK 0x00000002L
+#define RLC_RLCS_PMM_CGCG_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_GFX_MEM_POWER_CTRL_LO
+#define RLC_RLCS_GFX_MEM_POWER_CTRL_LO__DATA__SHIFT 0x0
+#define RLC_RLCS_GFX_MEM_POWER_CTRL_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GFX_RM_CNTL
+#define RLC_RLCS_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
+#define RLC_RLCS_GFX_RM_CNTL__RESERVED__SHIFT 0x1
+#define RLC_RLCS_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
+#define RLC_RLCS_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_IH_CTRL_1
+#define RLC_RLCS_IH_CTRL_1__IH_CONTEXT_ID_1__SHIFT 0x0
+#define RLC_RLCS_IH_CTRL_1__IH_CONTEXT_ID_1_MASK 0xFFFFFFFFL
+//RLC_RLCS_IH_CTRL_2
+#define RLC_RLCS_IH_CTRL_2__IH_CONTEXT_ID_2__SHIFT 0x0
+#define RLC_RLCS_IH_CTRL_2__IH_RING_ID__SHIFT 0x8
+#define RLC_RLCS_IH_CTRL_2__IH_VM_ID__SHIFT 0x10
+#define RLC_RLCS_IH_CTRL_2__RESERVED__SHIFT 0x14
+#define RLC_RLCS_IH_CTRL_2__IH_CONTEXT_ID_2_MASK 0x000000FFL
+#define RLC_RLCS_IH_CTRL_2__IH_RING_ID_MASK 0x0000FF00L
+#define RLC_RLCS_IH_CTRL_2__IH_VM_ID_MASK 0x000F0000L
+#define RLC_RLCS_IH_CTRL_2__RESERVED_MASK 0xFFF00000L
+//RLC_RLCS_IH_CTRL_3
+#define RLC_RLCS_IH_CTRL_3__IH_SOURCE_ID__SHIFT 0x0
+#define RLC_RLCS_IH_CTRL_3__IH_VF_ID__SHIFT 0x8
+#define RLC_RLCS_IH_CTRL_3__IH_VF__SHIFT 0xd
+#define RLC_RLCS_IH_CTRL_3__RESERVED__SHIFT 0xe
+#define RLC_RLCS_IH_CTRL_3__IH_SOURCE_ID_MASK 0x000000FFL
+#define RLC_RLCS_IH_CTRL_3__IH_VF_ID_MASK 0x00001F00L
+#define RLC_RLCS_IH_CTRL_3__IH_VF_MASK 0x00002000L
+#define RLC_RLCS_IH_CTRL_3__RESERVED_MASK 0xFFFFC000L
+//RLC_RLCS_IH_STATUS
+#define RLC_RLCS_IH_STATUS__IH_CREDIT_COUNT__SHIFT 0x0
+#define RLC_RLCS_IH_STATUS__IH_BUSY__SHIFT 0x6
+#define RLC_RLCS_IH_STATUS__IH_WRITE_DONE__SHIFT 0x7
+#define RLC_RLCS_IH_STATUS__RESERVED__SHIFT 0x8
+#define RLC_RLCS_IH_STATUS__IH_CREDIT_COUNT_MASK 0x0000003FL
+#define RLC_RLCS_IH_STATUS__IH_BUSY_MASK 0x00000040L
+#define RLC_RLCS_IH_STATUS__IH_WRITE_DONE_MASK 0x00000080L
+#define RLC_RLCS_IH_STATUS__RESERVED_MASK 0xFFFFFF00L
+//RLC_RLCS_DEC_END
+
+
+// addressBlock: gc_pfvfdec_rlc
+//RLC_SAFE_MODE
+#define RLC_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_SPM_SAMPLE_CNT
+#define RLC_SPM_SAMPLE_CNT__COUNT__SHIFT 0x0
+#define RLC_SPM_SAMPLE_CNT__COUNT_MASK 0xFFFFFFFFL
+//RLC_SPM_MC_CNTL
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT 0x0
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY__SHIFT 0x4
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR__SHIFT 0x6
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED__SHIFT 0x7
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER__SHIFT 0x8
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE__SHIFT 0x9
+#define RLC_SPM_MC_CNTL__RLC_SPM_BC__SHIFT 0xc
+#define RLC_SPM_MC_CNTL__RLC_SPM_RO__SHIFT 0xd
+#define RLC_SPM_MC_CNTL__RLC_SPM_VOL__SHIFT 0xe
+#define RLC_SPM_MC_CNTL__RLC_SPM_NOFILL__SHIFT 0xf
+#define RLC_SPM_MC_CNTL__RESERVED_3__SHIFT 0x10
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC__SHIFT 0x12
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC_OVER__SHIFT 0x13
+#define RLC_SPM_MC_CNTL__RESERVED__SHIFT 0x14
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK 0x0000000FL
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY_MASK 0x00000030L
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR_MASK 0x00000040L
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED_MASK 0x00000080L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER_MASK 0x00000100L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_MASK 0x00000E00L
+#define RLC_SPM_MC_CNTL__RLC_SPM_BC_MASK 0x00001000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_RO_MASK 0x00002000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_VOL_MASK 0x00004000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_NOFILL_MASK 0x00008000L
+#define RLC_SPM_MC_CNTL__RESERVED_3_MASK 0x00030000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC_MASK 0x00040000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC_OVER_MASK 0x00080000L
+#define RLC_SPM_MC_CNTL__RESERVED_MASK 0xFFF00000L
+//RLC_SPM_INT_CNTL
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL__SHIFT 0x0
+#define RLC_SPM_INT_CNTL__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL_MASK 0x00000001L
+#define RLC_SPM_INT_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SPM_INT_STATUS
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS__SHIFT 0x0
+#define RLC_SPM_INT_STATUS__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS_MASK 0x00000001L
+#define RLC_SPM_INT_STATUS__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SPM_INT_INFO_1
+#define RLC_SPM_INT_INFO_1__INTERRUPT_INFO_1__SHIFT 0x0
+#define RLC_SPM_INT_INFO_1__INTERRUPT_INFO_1_MASK 0xFFFFFFFFL
+//RLC_SPM_INT_INFO_2
+#define RLC_SPM_INT_INFO_2__INTERRUPT_INFO_2__SHIFT 0x0
+#define RLC_SPM_INT_INFO_2__INTERRUPT_ID__SHIFT 0x10
+#define RLC_SPM_INT_INFO_2__RESERVED__SHIFT 0x18
+#define RLC_SPM_INT_INFO_2__INTERRUPT_INFO_2_MASK 0x0000FFFFL
+#define RLC_SPM_INT_INFO_2__INTERRUPT_ID_MASK 0x00FF0000L
+#define RLC_SPM_INT_INFO_2__RESERVED_MASK 0xFF000000L
+//RLC_CSIB_ADDR_LO
+#define RLC_CSIB_ADDR_LO__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_LO__ADDRESS_MASK 0xFFFFFFFFL
+//RLC_CSIB_ADDR_HI
+#define RLC_CSIB_ADDR_HI__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_HI__ADDRESS_MASK 0x0000FFFFL
+//RLC_CSIB_LENGTH
+#define RLC_CSIB_LENGTH__LENGTH__SHIFT 0x0
+#define RLC_CSIB_LENGTH__LENGTH_MASK 0xFFFFFFFFL
+//RLC_CP_SCHEDULERS
+#define RLC_CP_SCHEDULERS__scheduler0__SHIFT 0x0
+#define RLC_CP_SCHEDULERS__scheduler1__SHIFT 0x8
+#define RLC_CP_SCHEDULERS__scheduler0_MASK 0x000000FFL
+#define RLC_CP_SCHEDULERS__scheduler1_MASK 0x0000FF00L
+//RLC_CP_EOF_INT
+#define RLC_CP_EOF_INT__INTERRUPT__SHIFT 0x0
+#define RLC_CP_EOF_INT__RESERVED__SHIFT 0x1
+#define RLC_CP_EOF_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_CP_EOF_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_CP_EOF_INT_CNT
+#define RLC_CP_EOF_INT_CNT__CNT__SHIFT 0x0
+#define RLC_CP_EOF_INT_CNT__CNT_MASK 0xFFFFFFFFL
+//RLC_SPARE_INT_0
+#define RLC_SPARE_INT_0__DATA__SHIFT 0x0
+#define RLC_SPARE_INT_0__PROCESSING__SHIFT 0x1e
+#define RLC_SPARE_INT_0__COMPLETE__SHIFT 0x1f
+#define RLC_SPARE_INT_0__DATA_MASK 0x3FFFFFFFL
+#define RLC_SPARE_INT_0__PROCESSING_MASK 0x40000000L
+#define RLC_SPARE_INT_0__COMPLETE_MASK 0x80000000L
+//RLC_SPARE_INT_1
+#define RLC_SPARE_INT_1__DATA__SHIFT 0x0
+#define RLC_SPARE_INT_1__PROCESSING__SHIFT 0x1e
+#define RLC_SPARE_INT_1__COMPLETE__SHIFT 0x1f
+#define RLC_SPARE_INT_1__DATA_MASK 0x3FFFFFFFL
+#define RLC_SPARE_INT_1__PROCESSING_MASK 0x40000000L
+#define RLC_SPARE_INT_1__COMPLETE_MASK 0x80000000L
+//RLC_SPARE_INT_2
+#define RLC_SPARE_INT_2__DATA__SHIFT 0x0
+#define RLC_SPARE_INT_2__PROCESSING__SHIFT 0x1e
+#define RLC_SPARE_INT_2__COMPLETE__SHIFT 0x1f
+#define RLC_SPARE_INT_2__DATA_MASK 0x3FFFFFFFL
+#define RLC_SPARE_INT_2__PROCESSING_MASK 0x40000000L
+#define RLC_SPARE_INT_2__COMPLETE_MASK 0x80000000L
+//RLC_PACE_SPARE_INT
+#define RLC_PACE_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_PACE_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_PACE_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_PACE_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_PACE_SPARE_INT_1
+#define RLC_PACE_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_PACE_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_PACE_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_PACE_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCV_SPARE_INT_1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+
+
+// addressBlock: gc_pwrdec
+//CGTS_TCC_DISABLE
+#define CGTS_TCC_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_TCC_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTX_SPI_DEBUG_CLK_CTRL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x0
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x6
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x7
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL__SHIFT 0x8
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x0000003FL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x00000040L
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x00000080L
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL_MASK 0x00000100L
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+//CGTT_VGT_CLK_CTRL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_VGT_CLK_CTRL__PI1_OVERRIDE__SHIFT 0x17
+#define CGTT_VGT_CLK_CTRL__PI0_OVERRIDE__SHIFT 0x18
+#define CGTT_VGT_CLK_CTRL__HS_OVERRIDE__SHIFT 0x19
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_VGT_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_VGT_CLK_CTRL__PI1_OVERRIDE_MASK 0x00800000L
+#define CGTT_VGT_CLK_CTRL__PI0_OVERRIDE_MASK 0x01000000L
+#define CGTT_VGT_CLK_CTRL__HS_OVERRIDE_MASK 0x02000000L
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_IA_CLK_CTRL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_IA_CLK_CTRL__DIST_OVERRIDE__SHIFT 0x1a
+#define CGTT_IA_CLK_CTRL__PERF_OVERRIDE__SHIFT 0x1b
+#define CGTT_IA_CLK_CTRL__PCM_OVERRIDE__SHIFT 0x1c
+#define CGTT_IA_CLK_CTRL__TESS_DIST_OVERRIDE__SHIFT 0x1d
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__DIST_OVERRIDE_MASK 0x04000000L
+#define CGTT_IA_CLK_CTRL__PERF_OVERRIDE_MASK 0x08000000L
+#define CGTT_IA_CLK_CTRL__PCM_OVERRIDE_MASK 0x10000000L
+#define CGTT_IA_CLK_CTRL__TESS_DIST_OVERRIDE_MASK 0x20000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_WD_CLK_CTRL
+#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_WD_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_WD_CLK_CTRL__FE_OUT_OVERRIDE__SHIFT 0x17
+#define CGTT_WD_CLK_CTRL__ASSEMBLER_OVERRIDE__SHIFT 0x18
+#define CGTT_WD_CLK_CTRL__DMA_PROC0_OVERRIDE__SHIFT 0x19
+#define CGTT_WD_CLK_CTRL__DMA_PROC1_OVERRIDE__SHIFT 0x1a
+#define CGTT_WD_CLK_CTRL__PERF_OVERRIDE__SHIFT 0x1b
+#define CGTT_WD_CLK_CTRL__DMA_OVERRIDE__SHIFT 0x1c
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_WD_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_WD_CLK_CTRL__FE_OUT_OVERRIDE_MASK 0x00800000L
+#define CGTT_WD_CLK_CTRL__ASSEMBLER_OVERRIDE_MASK 0x01000000L
+#define CGTT_WD_CLK_CTRL__DMA_PROC0_OVERRIDE_MASK 0x02000000L
+#define CGTT_WD_CLK_CTRL__DMA_PROC1_OVERRIDE_MASK 0x04000000L
+#define CGTT_WD_CLK_CTRL__PERF_OVERRIDE_MASK 0x08000000L
+#define CGTT_WD_CLK_CTRL__DMA_OVERRIDE_MASK 0x10000000L
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_GS_NGG_CLK_CTRL
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GS_NGG_CLK_CTRL__PERF_OVERRIDE__SHIFT 0x1b
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1c
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_OVERRIDE_MASK 0x08000000L
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x10000000L
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PA_CLK_CTRL
+#define CGTT_PA_CLK_CTRL__CLIP_SU_PRIM_FIFO_CLK_OVERRIDE__SHIFT 0xc
+#define CGTT_PA_CLK_CTRL__SXIFCCG_CLK_OVERRIDE__SHIFT 0xd
+#define CGTT_PA_CLK_CTRL__AG_CLK_OVERRIDE__SHIFT 0xe
+#define CGTT_PA_CLK_CTRL__VE_VTE_REC_CLK_OVERRIDE__SHIFT 0xf
+#define CGTT_PA_CLK_CTRL__ENGG_CLK_OVERRIDE__SHIFT 0x10
+#define CGTT_PA_CLK_CTRL__CL_VTE_CLK_OVERRIDE__SHIFT 0x11
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PA_CLK_CTRL__AG_REG_CLK_OVERRIDE__SHIFT 0x14
+#define CGTT_PA_CLK_CTRL__CL_VTE_REG_CLK_OVERRIDE__SHIFT 0x15
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PA_CLK_CTRL__DEBUG_BUS_EN__SHIFT 0x17
+#define CGTT_PA_CLK_CTRL__VTE_REG_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_PA_CLK_CTRL__PERFMON_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PA_CLK_CTRL__NGG_INDEX_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_PA_CLK_CTRL__NGG_CSB_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PA_CLK_CTRL__SU_CL_REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PA_CLK_CTRL__CLIP_SU_PRIM_FIFO_CLK_OVERRIDE_MASK 0x00001000L
+#define CGTT_PA_CLK_CTRL__SXIFCCG_CLK_OVERRIDE_MASK 0x00002000L
+#define CGTT_PA_CLK_CTRL__AG_CLK_OVERRIDE_MASK 0x00004000L
+#define CGTT_PA_CLK_CTRL__VE_VTE_REC_CLK_OVERRIDE_MASK 0x00008000L
+#define CGTT_PA_CLK_CTRL__ENGG_CLK_OVERRIDE_MASK 0x00010000L
+#define CGTT_PA_CLK_CTRL__CL_VTE_CLK_OVERRIDE_MASK 0x00020000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PA_CLK_CTRL__AG_REG_CLK_OVERRIDE_MASK 0x00100000L
+#define CGTT_PA_CLK_CTRL__CL_VTE_REG_CLK_OVERRIDE_MASK 0x00200000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PA_CLK_CTRL__DEBUG_BUS_EN_MASK 0x00800000L
+#define CGTT_PA_CLK_CTRL__VTE_REG_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__PERFMON_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__NGG_INDEX_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__NGG_CSB_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__SU_CL_REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL0
+#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL1
+#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL2
+#define CGTT_SC_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL2__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON__SHIFT 0xf
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_VRS_INTF_CLK_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL2__SC_DB_COURSE_MGCG_BUSY_ENABLE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL2__SC_DB_STAGE_IN_TP_PFFB_WR_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_Z_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_PROC_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_ACCUM_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL2__SC_DB_PFFB_RP_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL2__SC_DB_PKR_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_FREE_WAVE_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_WAVE_2_SC_SPI_WAVE_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL2__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON_MASK 0x00008000L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_VRS_INTF_CLK_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_COURSE_MGCG_BUSY_ENABLE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_STAGE_IN_TP_PFFB_WR_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_Z_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_PROC_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_ACCUM_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_PFFB_RP_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_PKR_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_FREE_WAVE_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_WAVE_2_SC_SPI_WAVE_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SQG_CLK_CTRL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQG_CLK_CTRL__FORCE_GL1H_CLKEN__SHIFT 0x17
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPALLOC_FGCG__SHIFT 0x18
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPGRANT_FGCG__SHIFT 0x19
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPREQ_FGCG__SHIFT 0x1a
+#define CGTT_SQG_CLK_CTRL__FORCE_CMD_FGCG__SHIFT 0x1b
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQG_CLK_CTRL__FORCE_GL1H_CLKEN_MASK 0x00800000L
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPALLOC_FGCG_MASK 0x01000000L
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPGRANT_FGCG_MASK 0x02000000L
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPREQ_FGCG_MASK 0x04000000L
+#define CGTT_SQG_CLK_CTRL__FORCE_CMD_FGCG_MASK 0x08000000L
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//SQ_ALU_CLK_CTRL
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//SQ_TEX_CLK_CTRL
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//SQ_LDS_CLK_CTRL
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//ICG_SP_CLK_CTRL
+#define ICG_SP_CLK_CTRL__CLK_OVERRIDE__SHIFT 0x0
+#define ICG_SP_CLK_CTRL__CLK_OVERRIDE_MASK 0xFFFFFFFFL
+//TA_CGTT_CTRL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//DB_CGTT_CLK_CTRL_0
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x0
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x2
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x3
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x4
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x5
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x6
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x7
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE8__SHIFT 0x8
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0x9
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x00000001L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x00000002L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x00000004L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x00000008L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x00000010L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x00000020L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x00000040L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x00000080L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE8_MASK 0x00000100L
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0xFFFFFE00L
+//CB_CGTT_SCLK_CTRL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GFX_ICG_GL2A_CTRL
+#define GFX_ICG_GL2A_CTRL__REG_OVERRIDE__SHIFT 0x0
+#define GFX_ICG_GL2A_CTRL__PERFMON_OVERRIDE__SHIFT 0x1
+#define GFX_ICG_GL2A_CTRL__CROSSBAR_OVERRIDE__SHIFT 0x2
+#define GFX_ICG_GL2A_CTRL__RTN_ARB_OVERRIDE__SHIFT 0x3
+#define GFX_ICG_GL2A_CTRL__GCRD_OVERRIDE__SHIFT 0x4
+#define GFX_ICG_GL2A_CTRL__CLIENT0_OVERRIDE__SHIFT 0x8
+#define GFX_ICG_GL2A_CTRL__CLIENT1_OVERRIDE__SHIFT 0x9
+#define GFX_ICG_GL2A_CTRL__CLIENT2_OVERRIDE__SHIFT 0xa
+#define GFX_ICG_GL2A_CTRL__CLIENT3_OVERRIDE__SHIFT 0xb
+#define GFX_ICG_GL2A_CTRL__CLIENT4_OVERRIDE__SHIFT 0xc
+#define GFX_ICG_GL2A_CTRL__CLIENT5_OVERRIDE__SHIFT 0xd
+#define GFX_ICG_GL2A_CTRL__CLIENT6_OVERRIDE__SHIFT 0xe
+#define GFX_ICG_GL2A_CTRL__CLIENT7_OVERRIDE__SHIFT 0xf
+#define GFX_ICG_GL2A_CTRL__CLIENT8_OVERRIDE__SHIFT 0x10
+#define GFX_ICG_GL2A_CTRL__CLIENT9_OVERRIDE__SHIFT 0x11
+#define GFX_ICG_GL2A_CTRL__CLIENT10_OVERRIDE__SHIFT 0x12
+#define GFX_ICG_GL2A_CTRL__CLIENT11_OVERRIDE__SHIFT 0x13
+#define GFX_ICG_GL2A_CTRL__CLIENT12_OVERRIDE__SHIFT 0x14
+#define GFX_ICG_GL2A_CTRL__CLIENT13_OVERRIDE__SHIFT 0x15
+#define GFX_ICG_GL2A_CTRL__CLIENT14_OVERRIDE__SHIFT 0x16
+#define GFX_ICG_GL2A_CTRL__CLIENT15_OVERRIDE__SHIFT 0x17
+#define GFX_ICG_GL2A_CTRL__REG_OVERRIDE_MASK 0x00000001L
+#define GFX_ICG_GL2A_CTRL__PERFMON_OVERRIDE_MASK 0x00000002L
+#define GFX_ICG_GL2A_CTRL__CROSSBAR_OVERRIDE_MASK 0x00000004L
+#define GFX_ICG_GL2A_CTRL__RTN_ARB_OVERRIDE_MASK 0x00000008L
+#define GFX_ICG_GL2A_CTRL__GCRD_OVERRIDE_MASK 0x00000010L
+#define GFX_ICG_GL2A_CTRL__CLIENT0_OVERRIDE_MASK 0x00000100L
+#define GFX_ICG_GL2A_CTRL__CLIENT1_OVERRIDE_MASK 0x00000200L
+#define GFX_ICG_GL2A_CTRL__CLIENT2_OVERRIDE_MASK 0x00000400L
+#define GFX_ICG_GL2A_CTRL__CLIENT3_OVERRIDE_MASK 0x00000800L
+#define GFX_ICG_GL2A_CTRL__CLIENT4_OVERRIDE_MASK 0x00001000L
+#define GFX_ICG_GL2A_CTRL__CLIENT5_OVERRIDE_MASK 0x00002000L
+#define GFX_ICG_GL2A_CTRL__CLIENT6_OVERRIDE_MASK 0x00004000L
+#define GFX_ICG_GL2A_CTRL__CLIENT7_OVERRIDE_MASK 0x00008000L
+#define GFX_ICG_GL2A_CTRL__CLIENT8_OVERRIDE_MASK 0x00010000L
+#define GFX_ICG_GL2A_CTRL__CLIENT9_OVERRIDE_MASK 0x00020000L
+#define GFX_ICG_GL2A_CTRL__CLIENT10_OVERRIDE_MASK 0x00040000L
+#define GFX_ICG_GL2A_CTRL__CLIENT11_OVERRIDE_MASK 0x00080000L
+#define GFX_ICG_GL2A_CTRL__CLIENT12_OVERRIDE_MASK 0x00100000L
+#define GFX_ICG_GL2A_CTRL__CLIENT13_OVERRIDE_MASK 0x00200000L
+#define GFX_ICG_GL2A_CTRL__CLIENT14_OVERRIDE_MASK 0x00400000L
+#define GFX_ICG_GL2A_CTRL__CLIENT15_OVERRIDE_MASK 0x00800000L
+//CGTT_CP_CLK_CTRL
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPF_CLK_CTRL
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1a
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT__SHIFT 0x1b
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP__SHIFT 0x1c
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX__SHIFT 0x1d
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x04000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT_MASK 0x08000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP_MASK 0x10000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX_MASK 0x20000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPC_CLK_CTRL
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_RLC_CLK_CTRL
+#define CGTT_RLC_CLK_CTRL__RESERVED__SHIFT 0x0
+#define CGTT_RLC_CLK_CTRL__RESERVED_MASK 0xFFFFFFFFL
+//CGTT_SC_CLK_CTRL3
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_STALL_OVERRIDE__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_STALL_OVERRIDE__SHIFT 0x1
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_STALL_OVERRIDE__SHIFT 0x2
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_STALL_OVERRIDE__SHIFT 0x3
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_STALL_OVERRIDE__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_STALL_OVERRIDE__SHIFT 0x5
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_STALL_OVERRIDE__SHIFT 0x6
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_STALL_OVERRIDE__SHIFT 0x7
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_STALL_OVERRIDE__SHIFT 0x8
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_STALL_OVERRIDE__SHIFT 0x9
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_STALL_OVERRIDE__SHIFT 0xa
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_STALL_OVERRIDE__SHIFT 0xb
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_STALL_OVERRIDE__SHIFT 0xc
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_STALL_OVERRIDE__SHIFT 0xd
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_STALL_OVERRIDE_MASK 0x00000001L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_STALL_OVERRIDE_MASK 0x00000002L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_STALL_OVERRIDE_MASK 0x00000004L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_STALL_OVERRIDE_MASK 0x00000008L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_STALL_OVERRIDE_MASK 0x00000010L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_STALL_OVERRIDE_MASK 0x00000020L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_STALL_OVERRIDE_MASK 0x00000040L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_STALL_OVERRIDE_MASK 0x00000080L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_STALL_OVERRIDE_MASK 0x00000100L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_STALL_OVERRIDE_MASK 0x00000200L
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_STALL_OVERRIDE_MASK 0x00000400L
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_STALL_OVERRIDE_MASK 0x00000800L
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_STALL_OVERRIDE_MASK 0x00001000L
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_STALL_OVERRIDE_MASK 0x00002000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL4
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_STALL_OVERRIDE__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_STALL_OVERRIDE__SHIFT 0x1
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_STALL_OVERRIDE__SHIFT 0x2
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_STALL_OVERRIDE__SHIFT 0x3
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_STALL_OVERRIDE__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_STALL_OVERRIDE__SHIFT 0x5
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_STALL_OVERRIDE__SHIFT 0x6
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_STALL_OVERRIDE__SHIFT 0x7
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_STALL_OVERRIDE__SHIFT 0x8
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_STALL_OVERRIDE__SHIFT 0x9
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_STALL_OVERRIDE__SHIFT 0xa
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_STALL_OVERRIDE__SHIFT 0xb
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_STALL_OVERRIDE__SHIFT 0xc
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_STALL_OVERRIDE_MASK 0x00000001L
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_STALL_OVERRIDE_MASK 0x00000002L
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_STALL_OVERRIDE_MASK 0x00000004L
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_STALL_OVERRIDE_MASK 0x00000008L
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_STALL_OVERRIDE_MASK 0x00000010L
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_STALL_OVERRIDE_MASK 0x00000020L
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_STALL_OVERRIDE_MASK 0x00000040L
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_STALL_OVERRIDE_MASK 0x00000080L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_STALL_OVERRIDE_MASK 0x00000100L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_STALL_OVERRIDE_MASK 0x00000200L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_STALL_OVERRIDE_MASK 0x00000400L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_STALL_OVERRIDE_MASK 0x00000800L
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_STALL_OVERRIDE_MASK 0x00001000L
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_OVERRIDE_MASK 0x80000000L
+//GCEA_ICG_CTRL
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x0
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x2
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x3
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x4
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_MAM__SHIFT 0x5
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x00000001L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_READ_MASK 0x00000002L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x00000004L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x00000008L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x00000010L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_MAM_MASK 0x00000020L
+//GL1I_GL1R_MGCG_OVERRIDE
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_SCLK_OVERRIDE__SHIFT 0x0
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x1
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SCLK_OVERRIDE__SHIFT 0x2
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x3
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SRC_DCLK_OVERRIDE__SHIFT 0x4
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_SRC_MGCG_SCLK_OVERRIDE__SHIFT 0x5
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_RET_MGCG_SCLK_OVERRIDE__SHIFT 0x6
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_SCLK_OVERRIDE_MASK 0x00000001L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000002L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SCLK_OVERRIDE_MASK 0x00000004L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000008L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SRC_DCLK_OVERRIDE_MASK 0x00000010L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_SRC_MGCG_SCLK_OVERRIDE_MASK 0x00000020L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_RET_MGCG_SCLK_OVERRIDE_MASK 0x00000040L
+//GL1H_ICG_CTRL
+#define GL1H_ICG_CTRL__REG_DCLK_OVERRIDE__SHIFT 0x0
+#define GL1H_ICG_CTRL__REQ_ARB_DCLK_OVERRIDE__SHIFT 0x1
+#define GL1H_ICG_CTRL__PERFMON_DCLK_OVERRIDE__SHIFT 0x2
+#define GL1H_ICG_CTRL__REQ_ARB_CLI0_DCLK_OVERRIDE__SHIFT 0x3
+#define GL1H_ICG_CTRL__REQ_ARB_CLI1_DCLK_OVERRIDE__SHIFT 0x4
+#define GL1H_ICG_CTRL__REQ_ARB_CLI2_DCLK_OVERRIDE__SHIFT 0x5
+#define GL1H_ICG_CTRL__REQ_ARB_CLI3_DCLK_OVERRIDE__SHIFT 0x6
+#define GL1H_ICG_CTRL__SRC_DCLK_OVERRIDE__SHIFT 0x7
+#define GL1H_ICG_CTRL__RET_DCLK_OVERRIDE__SHIFT 0x8
+#define GL1H_ICG_CTRL__REG_DCLK_OVERRIDE_MASK 0x00000001L
+#define GL1H_ICG_CTRL__REQ_ARB_DCLK_OVERRIDE_MASK 0x00000002L
+#define GL1H_ICG_CTRL__PERFMON_DCLK_OVERRIDE_MASK 0x00000004L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI0_DCLK_OVERRIDE_MASK 0x00000008L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI1_DCLK_OVERRIDE_MASK 0x00000010L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI2_DCLK_OVERRIDE_MASK 0x00000020L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI3_DCLK_OVERRIDE_MASK 0x00000040L
+#define GL1H_ICG_CTRL__SRC_DCLK_OVERRIDE_MASK 0x00000080L
+#define GL1H_ICG_CTRL__RET_DCLK_OVERRIDE_MASK 0x00000100L
+//CHI_CHR_MGCG_OVERRIDE
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_SCLK_OVERRIDE__SHIFT 0x0
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x1
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SCLK_OVERRIDE__SHIFT 0x2
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x3
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SRC_DCLK_OVERRIDE__SHIFT 0x4
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_RET_MGCG_SCLK_OVERRIDE__SHIFT 0x5
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_SRC_MGCG_SCLK_OVERRIDE__SHIFT 0x6
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_SCLK_OVERRIDE_MASK 0x00000001L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000002L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SCLK_OVERRIDE_MASK 0x00000004L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000008L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SRC_DCLK_OVERRIDE_MASK 0x00000010L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_RET_MGCG_SCLK_OVERRIDE_MASK 0x00000020L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_SRC_MGCG_SCLK_OVERRIDE_MASK 0x00000040L
+//ICG_GL1C_CLK_CTRL
+#define ICG_GL1C_CLK_CTRL__GLOBAL_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_GL1C_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_GL1C_CLK_CTRL__REQUEST_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_GL1C_CLK_CTRL__VM_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_GL1C_CLK_CTRL__TAG_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_GL1C_CLK_CTRL__GCR_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_GL1C_CLK_CTRL__SRC_DATA_CLK_OVERRIDE__SHIFT 0x6
+#define ICG_GL1C_CLK_CTRL__RETURN_CLK_OVERRIDE__SHIFT 0x7
+#define ICG_GL1C_CLK_CTRL__GRBM_CLK_OVERRIDE__SHIFT 0x8
+#define ICG_GL1C_CLK_CTRL__PERF_CLK_OVERRIDE__SHIFT 0x9
+#define ICG_GL1C_CLK_CTRL__LATENCY_FIFO_CLK_OVERRIDE__SHIFT 0xa
+#define ICG_GL1C_CLK_CTRL__GLOBAL_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_GL1C_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_GL1C_CLK_CTRL__REQUEST_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_GL1C_CLK_CTRL__VM_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_GL1C_CLK_CTRL__TAG_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_GL1C_CLK_CTRL__GCR_CLK_OVERRIDE_MASK 0x00000020L
+#define ICG_GL1C_CLK_CTRL__SRC_DATA_CLK_OVERRIDE_MASK 0x00000040L
+#define ICG_GL1C_CLK_CTRL__RETURN_CLK_OVERRIDE_MASK 0x00000080L
+#define ICG_GL1C_CLK_CTRL__GRBM_CLK_OVERRIDE_MASK 0x00000100L
+#define ICG_GL1C_CLK_CTRL__PERF_CLK_OVERRIDE_MASK 0x00000200L
+#define ICG_GL1C_CLK_CTRL__LATENCY_FIFO_CLK_OVERRIDE_MASK 0x00000400L
+//ICG_GL1A_CTRL
+#define ICG_GL1A_CTRL__REG_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_GL1A_CTRL__REQ_CLI_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_GL1A_CTRL__REQ_ARB_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_GL1A_CTRL__RET_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_GL1A_CTRL__REQ_CREDIT_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_GL1A_CTRL__PERFMON_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_GL1A_CTRL__REG_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_GL1A_CTRL__REQ_CLI_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_GL1A_CTRL__REQ_ARB_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_GL1A_CTRL__RET_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_GL1A_CTRL__REQ_CREDIT_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_GL1A_CTRL__PERFMON_CLK_OVERRIDE_MASK 0x00000020L
+//ICG_CHA_CTRL
+#define ICG_CHA_CTRL__REG_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_CHA_CTRL__REQ_CLI_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_CHA_CTRL__REQ_ARB_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_CHA_CTRL__RET_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_CHA_CTRL__REQ_CREDIT_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_CHA_CTRL__PERFMON_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_CHA_CTRL__REG_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_CHA_CTRL__REQ_CLI_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_CHA_CTRL__REQ_ARB_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_CHA_CTRL__RET_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_CHA_CTRL__REQ_CREDIT_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_CHA_CTRL__PERFMON_CLK_OVERRIDE_MASK 0x00000020L
+//GUS_ICG_CTRL
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_DRAM__SHIFT 0x0
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x2
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_DEMUX__SHIFT 0x3
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_WRITE__SHIFT 0x4
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_READ__SHIFT 0x5
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x6
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x7
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_STATIC__SHIFT 0x8
+#define GUS_ICG_CTRL__SPARE1__SHIFT 0x9
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_DRAM_MASK 0x00000001L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x00000002L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_READ_MASK 0x00000004L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_DEMUX_MASK 0x00000008L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_WRITE_MASK 0x00000010L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_READ_MASK 0x00000020L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x00000040L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x00000080L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_STATIC_MASK 0x00000100L
+#define GUS_ICG_CTRL__SPARE1_MASK 0x0003FE00L
+//CGTT_PH_CLK_CTRL0
+#define CGTT_PH_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL0__DEBUG_BUS_EN__SHIFT 0x17
+#define CGTT_PH_CLK_CTRL0__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PH_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL0__DEBUG_BUS_EN_MASK 0x00800000L
+#define CGTT_PH_CLK_CTRL0__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_PH_CLK_CTRL1
+#define CGTT_PH_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL2
+#define CGTT_PH_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL3
+#define CGTT_PH_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+//GFX_ICG_GL2C_CTRL
+#define GFX_ICG_GL2C_CTRL__REG_OVERRIDE__SHIFT 0x0
+#define GFX_ICG_GL2C_CTRL__PERFMON_OVERRIDE__SHIFT 0x1
+#define GFX_ICG_GL2C_CTRL__IB_OVERRIDE__SHIFT 0x2
+#define GFX_ICG_GL2C_CTRL__TAG_OVERRIDE__SHIFT 0x3
+#define GFX_ICG_GL2C_CTRL__CM_CORE_OVERRIDE__SHIFT 0x4
+#define GFX_ICG_GL2C_CTRL__CORE_OVERRIDE__SHIFT 0x5
+#define GFX_ICG_GL2C_CTRL__CACHE_RAM_OVERRIDE__SHIFT 0x6
+#define GFX_ICG_GL2C_CTRL__GCR_OVERRIDE__SHIFT 0x7
+#define GFX_ICG_GL2C_CTRL__EXECUTE_OVERRIDE__SHIFT 0x8
+#define GFX_ICG_GL2C_CTRL__RETURN_BUFFER_OVERRIDE__SHIFT 0x9
+#define GFX_ICG_GL2C_CTRL__LATENCY_FIFO_OVERRIDE__SHIFT 0xa
+#define GFX_ICG_GL2C_CTRL__OUTPUT_FIFOS_OVERRIDE__SHIFT 0xb
+#define GFX_ICG_GL2C_CTRL__MC_WRITE_OVERRIDE__SHIFT 0xc
+#define GFX_ICG_GL2C_CTRL__EXECUTE_DECOMP_OVERRIDE__SHIFT 0xd
+#define GFX_ICG_GL2C_CTRL__EXECUTE_WRITE_OVERRIDE__SHIFT 0xe
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP0_OVERRIDE__SHIFT 0xf
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP1_OVERRIDE__SHIFT 0x10
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP2_OVERRIDE__SHIFT 0x11
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP3_OVERRIDE__SHIFT 0x12
+#define GFX_ICG_GL2C_CTRL__CM_RVF_OVERRIDE__SHIFT 0x14
+#define GFX_ICG_GL2C_CTRL__CM_SDR_OVERRIDE__SHIFT 0x15
+#define GFX_ICG_GL2C_CTRL__CM_RPF_OVERRIDE__SHIFT 0x16
+#define GFX_ICG_GL2C_CTRL__CM_STS_OVERRIDE__SHIFT 0x17
+#define GFX_ICG_GL2C_CTRL__CM_READ_OVERRIDE__SHIFT 0x18
+#define GFX_ICG_GL2C_CTRL__CM_MERGE_OVERRIDE__SHIFT 0x19
+#define GFX_ICG_GL2C_CTRL__CM_COMP_OVERRIDE__SHIFT 0x1a
+#define GFX_ICG_GL2C_CTRL__CM_DCC_OVERRIDE__SHIFT 0x1b
+#define GFX_ICG_GL2C_CTRL__CM_WRITE_OVERRIDE__SHIFT 0x1c
+#define GFX_ICG_GL2C_CTRL__CM_NOOP_OVERRIDE__SHIFT 0x1d
+#define GFX_ICG_GL2C_CTRL__MDC_TAG_OVERRIDE__SHIFT 0x1e
+#define GFX_ICG_GL2C_CTRL__MDC_DATA_OVERRIDE__SHIFT 0x1f
+#define GFX_ICG_GL2C_CTRL__REG_OVERRIDE_MASK 0x00000001L
+#define GFX_ICG_GL2C_CTRL__PERFMON_OVERRIDE_MASK 0x00000002L
+#define GFX_ICG_GL2C_CTRL__IB_OVERRIDE_MASK 0x00000004L
+#define GFX_ICG_GL2C_CTRL__TAG_OVERRIDE_MASK 0x00000008L
+#define GFX_ICG_GL2C_CTRL__CM_CORE_OVERRIDE_MASK 0x00000010L
+#define GFX_ICG_GL2C_CTRL__CORE_OVERRIDE_MASK 0x00000020L
+#define GFX_ICG_GL2C_CTRL__CACHE_RAM_OVERRIDE_MASK 0x00000040L
+#define GFX_ICG_GL2C_CTRL__GCR_OVERRIDE_MASK 0x00000080L
+#define GFX_ICG_GL2C_CTRL__EXECUTE_OVERRIDE_MASK 0x00000100L
+#define GFX_ICG_GL2C_CTRL__RETURN_BUFFER_OVERRIDE_MASK 0x00000200L
+#define GFX_ICG_GL2C_CTRL__LATENCY_FIFO_OVERRIDE_MASK 0x00000400L
+#define GFX_ICG_GL2C_CTRL__OUTPUT_FIFOS_OVERRIDE_MASK 0x00000800L
+#define GFX_ICG_GL2C_CTRL__MC_WRITE_OVERRIDE_MASK 0x00001000L
+#define GFX_ICG_GL2C_CTRL__EXECUTE_DECOMP_OVERRIDE_MASK 0x00002000L
+#define GFX_ICG_GL2C_CTRL__EXECUTE_WRITE_OVERRIDE_MASK 0x00004000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP0_OVERRIDE_MASK 0x00008000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP1_OVERRIDE_MASK 0x00010000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP2_OVERRIDE_MASK 0x00020000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP3_OVERRIDE_MASK 0x00040000L
+#define GFX_ICG_GL2C_CTRL__CM_RVF_OVERRIDE_MASK 0x00100000L
+#define GFX_ICG_GL2C_CTRL__CM_SDR_OVERRIDE_MASK 0x00200000L
+#define GFX_ICG_GL2C_CTRL__CM_RPF_OVERRIDE_MASK 0x00400000L
+#define GFX_ICG_GL2C_CTRL__CM_STS_OVERRIDE_MASK 0x00800000L
+#define GFX_ICG_GL2C_CTRL__CM_READ_OVERRIDE_MASK 0x01000000L
+#define GFX_ICG_GL2C_CTRL__CM_MERGE_OVERRIDE_MASK 0x02000000L
+#define GFX_ICG_GL2C_CTRL__CM_COMP_OVERRIDE_MASK 0x04000000L
+#define GFX_ICG_GL2C_CTRL__CM_DCC_OVERRIDE_MASK 0x08000000L
+#define GFX_ICG_GL2C_CTRL__CM_WRITE_OVERRIDE_MASK 0x10000000L
+#define GFX_ICG_GL2C_CTRL__CM_NOOP_OVERRIDE_MASK 0x20000000L
+#define GFX_ICG_GL2C_CTRL__MDC_TAG_OVERRIDE_MASK 0x40000000L
+#define GFX_ICG_GL2C_CTRL__MDC_DATA_OVERRIDE_MASK 0x80000000L
+//GFX_ICG_GL2C_CTRL1
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT0_OVERRIDE__SHIFT 0x0
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT1_OVERRIDE__SHIFT 0x1
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT2_OVERRIDE__SHIFT 0x2
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT3_OVERRIDE__SHIFT 0x3
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT4_OVERRIDE__SHIFT 0x4
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT5_OVERRIDE__SHIFT 0x5
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT6_OVERRIDE__SHIFT 0x6
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT7_OVERRIDE__SHIFT 0x7
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT8_OVERRIDE__SHIFT 0x8
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT9_OVERRIDE__SHIFT 0x9
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT10_OVERRIDE__SHIFT 0xa
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT11_OVERRIDE__SHIFT 0xb
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT12_OVERRIDE__SHIFT 0xc
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT13_OVERRIDE__SHIFT 0xd
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT14_OVERRIDE__SHIFT 0xe
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT15_OVERRIDE__SHIFT 0xf
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT16_OVERRIDE__SHIFT 0x10
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT17_OVERRIDE__SHIFT 0x11
+#define GFX_ICG_GL2C_CTRL1__TAG_PROBE_OVERRIDE__SHIFT 0x18
+#define GFX_ICG_GL2C_CTRL1__DCC_UPPER_OVERRIDE__SHIFT 0x19
+#define GFX_ICG_GL2C_CTRL1__DCC_LOWER_OVERRIDE__SHIFT 0x1a
+#define GFX_ICG_GL2C_CTRL1__ZD_UPPER_OVERRIDE__SHIFT 0x1b
+#define GFX_ICG_GL2C_CTRL1__ZD_LOWER_OVERRIDE__SHIFT 0x1c
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT0_OVERRIDE_MASK 0x00000001L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT1_OVERRIDE_MASK 0x00000002L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT2_OVERRIDE_MASK 0x00000004L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT3_OVERRIDE_MASK 0x00000008L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT4_OVERRIDE_MASK 0x00000010L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT5_OVERRIDE_MASK 0x00000020L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT6_OVERRIDE_MASK 0x00000040L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT7_OVERRIDE_MASK 0x00000080L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT8_OVERRIDE_MASK 0x00000100L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT9_OVERRIDE_MASK 0x00000200L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT10_OVERRIDE_MASK 0x00000400L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT11_OVERRIDE_MASK 0x00000800L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT12_OVERRIDE_MASK 0x00001000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT13_OVERRIDE_MASK 0x00002000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT14_OVERRIDE_MASK 0x00004000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT15_OVERRIDE_MASK 0x00008000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT16_OVERRIDE_MASK 0x00010000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT17_OVERRIDE_MASK 0x00020000L
+#define GFX_ICG_GL2C_CTRL1__TAG_PROBE_OVERRIDE_MASK 0x01000000L
+#define GFX_ICG_GL2C_CTRL1__DCC_UPPER_OVERRIDE_MASK 0x02000000L
+#define GFX_ICG_GL2C_CTRL1__DCC_LOWER_OVERRIDE_MASK 0x04000000L
+#define GFX_ICG_GL2C_CTRL1__ZD_UPPER_OVERRIDE_MASK 0x08000000L
+#define GFX_ICG_GL2C_CTRL1__ZD_LOWER_OVERRIDE_MASK 0x10000000L
+//ICG_LDS_CLK_CTRL
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD0_OVERRIDE__SHIFT 0x0
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD1_OVERRIDE__SHIFT 0x1
+#define ICG_LDS_CLK_CTRL__LDS_WGP_ARB_OVERRIDE__SHIFT 0x2
+#define ICG_LDS_CLK_CTRL__LDS_TD_OVERRIDE__SHIFT 0x3
+#define ICG_LDS_CLK_CTRL__LDS_ATTR_WR_OVERRIDE__SHIFT 0x4
+#define ICG_LDS_CLK_CTRL__LDS_CONFIG_REG_OVERRIDE__SHIFT 0x5
+#define ICG_LDS_CLK_CTRL__LDS_IDX_PIPE_OVERRIDE__SHIFT 0x6
+#define ICG_LDS_CLK_CTRL__LDS_IDX_DIR_OVERRIDE__SHIFT 0x7
+#define ICG_LDS_CLK_CTRL__LDS_IDX_WR_OVERRIDE__SHIFT 0x8
+#define ICG_LDS_CLK_CTRL__LDS_IDX_INPUT_QUEUE_OVERRIDE__SHIFT 0x9
+#define ICG_LDS_CLK_CTRL__LDS_MEM_OVERRIDE__SHIFT 0xa
+#define ICG_LDS_CLK_CTRL__LDS_IDX_OUTPUT_ALIGNER_OVERRIDE__SHIFT 0xb
+#define ICG_LDS_CLK_CTRL__LDS_DIR_OUTPUT_ALIGNER_OVERRIDE__SHIFT 0xc
+#define ICG_LDS_CLK_CTRL__LDS_IDX_BANK_CONFLICT_OVERRIDE__SHIFT 0xd
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_INPUT_OVERRIDE__SHIFT 0xe
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_OUTPUT_OVERRIDE__SHIFT 0xf
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_PIPE_OVERRIDE__SHIFT 0x10
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHEDULER_OVERRIDE__SHIFT 0x11
+#define ICG_LDS_CLK_CTRL__LDS_IDX_RDRTN_OVERRIDE__SHIFT 0x12
+#define ICG_LDS_CLK_CTRL__LDS_SP_DONE_OVERRIDE__SHIFT 0x13
+#define ICG_LDS_CLK_CTRL__LDS_SQC_PERF_OVERRIDE__SHIFT 0x14
+#define ICG_LDS_CLK_CTRL__LDS_SP_READ_OVERRIDE__SHIFT 0x15
+#define ICG_LDS_CLK_CTRL__SQ_LDS_VMEMCMD_OVERRIDE__SHIFT 0x16
+#define ICG_LDS_CLK_CTRL__SP_LDS_VMEMREQ_OVERRIDE__SHIFT 0x17
+#define ICG_LDS_CLK_CTRL__SPI_LDS_STALL_OVERRIDE__SHIFT 0x18
+#define ICG_LDS_CLK_CTRL__MEM_WR_OVERRIDE__SHIFT 0x19
+#define ICG_LDS_CLK_CTRL__LDS_CLK_OVERRIDE_UNUSED__SHIFT 0x1a
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD0_OVERRIDE_MASK 0x00000001L
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD1_OVERRIDE_MASK 0x00000002L
+#define ICG_LDS_CLK_CTRL__LDS_WGP_ARB_OVERRIDE_MASK 0x00000004L
+#define ICG_LDS_CLK_CTRL__LDS_TD_OVERRIDE_MASK 0x00000008L
+#define ICG_LDS_CLK_CTRL__LDS_ATTR_WR_OVERRIDE_MASK 0x00000010L
+#define ICG_LDS_CLK_CTRL__LDS_CONFIG_REG_OVERRIDE_MASK 0x00000020L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_PIPE_OVERRIDE_MASK 0x00000040L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_DIR_OVERRIDE_MASK 0x00000080L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_WR_OVERRIDE_MASK 0x00000100L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_INPUT_QUEUE_OVERRIDE_MASK 0x00000200L
+#define ICG_LDS_CLK_CTRL__LDS_MEM_OVERRIDE_MASK 0x00000400L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_OUTPUT_ALIGNER_OVERRIDE_MASK 0x00000800L
+#define ICG_LDS_CLK_CTRL__LDS_DIR_OUTPUT_ALIGNER_OVERRIDE_MASK 0x00001000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_BANK_CONFLICT_OVERRIDE_MASK 0x00002000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_INPUT_OVERRIDE_MASK 0x00004000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_OUTPUT_OVERRIDE_MASK 0x00008000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_PIPE_OVERRIDE_MASK 0x00010000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHEDULER_OVERRIDE_MASK 0x00020000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_RDRTN_OVERRIDE_MASK 0x00040000L
+#define ICG_LDS_CLK_CTRL__LDS_SP_DONE_OVERRIDE_MASK 0x00080000L
+#define ICG_LDS_CLK_CTRL__LDS_SQC_PERF_OVERRIDE_MASK 0x00100000L
+#define ICG_LDS_CLK_CTRL__LDS_SP_READ_OVERRIDE_MASK 0x00200000L
+#define ICG_LDS_CLK_CTRL__SQ_LDS_VMEMCMD_OVERRIDE_MASK 0x00400000L
+#define ICG_LDS_CLK_CTRL__SP_LDS_VMEMREQ_OVERRIDE_MASK 0x00800000L
+#define ICG_LDS_CLK_CTRL__SPI_LDS_STALL_OVERRIDE_MASK 0x01000000L
+#define ICG_LDS_CLK_CTRL__MEM_WR_OVERRIDE_MASK 0x02000000L
+#define ICG_LDS_CLK_CTRL__LDS_CLK_OVERRIDE_UNUSED_MASK 0xFC000000L
+//GFX_ICG_UTCL1_CTRL
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE0__SHIFT 0x0
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE1__SHIFT 0x1
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE2__SHIFT 0x2
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE3__SHIFT 0x3
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE4__SHIFT 0x4
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE5__SHIFT 0x5
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE6__SHIFT 0x6
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE7__SHIFT 0x7
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE8__SHIFT 0x8
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE9__SHIFT 0x9
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE10__SHIFT 0xa
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE11__SHIFT 0xb
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE12__SHIFT 0xc
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE13__SHIFT 0xd
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE14__SHIFT 0xe
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE15_31__SHIFT 0xf
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE0_MASK 0x00000001L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE1_MASK 0x00000002L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE2_MASK 0x00000004L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE3_MASK 0x00000008L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE4_MASK 0x00000010L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE5_MASK 0x00000020L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE6_MASK 0x00000040L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE7_MASK 0x00000080L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE8_MASK 0x00000100L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE9_MASK 0x00000200L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE10_MASK 0x00000400L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE11_MASK 0x00000800L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE12_MASK 0x00001000L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE13_MASK 0x00002000L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE14_MASK 0x00004000L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE15_31_MASK 0xFFFF8000L
+//ICG_CHC_CLK_CTRL
+#define ICG_CHC_CLK_CTRL__GLOBAL_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_CHC_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_CHC_CLK_CTRL__REQUEST_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_CHC_CLK_CTRL__SRC_DATA_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_CHC_CLK_CTRL__RETURN_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_CHC_CLK_CTRL__GRBM_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_CHC_CLK_CTRL__PERF_CLK_OVERRIDE__SHIFT 0x6
+#define ICG_CHC_CLK_CTRL__GLOBAL_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_CHC_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_CHC_CLK_CTRL__REQUEST_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_CHC_CLK_CTRL__SRC_DATA_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_CHC_CLK_CTRL__RETURN_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_CHC_CLK_CTRL__GRBM_CLK_OVERRIDE_MASK 0x00000020L
+#define ICG_CHC_CLK_CTRL__PERF_CLK_OVERRIDE_MASK 0x00000040L
+//ICG_CHCG_CLK_CTRL
+#define ICG_CHCG_CLK_CTRL__GLOBAL_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_CHCG_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_CHCG_CLK_CTRL__REQUEST_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_CHCG_CLK_CTRL__SRC_DATA_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_CHCG_CLK_CTRL__RETURN_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_CHCG_CLK_CTRL__GRBM_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_CHCG_CLK_CTRL__PERF_CLK_OVERRIDE__SHIFT 0x6
+#define ICG_CHCG_CLK_CTRL__GLOBAL_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_CHCG_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_CHCG_CLK_CTRL__REQUEST_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_CHCG_CLK_CTRL__SRC_DATA_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_CHCG_CLK_CTRL__RETURN_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_CHCG_CLK_CTRL__GRBM_CLK_OVERRIDE_MASK 0x00000020L
+#define ICG_CHCG_CLK_CTRL__PERF_CLK_OVERRIDE_MASK 0x00000040L
+
+
+// addressBlock: gc_pspdec
+//CP_MES_DM_INDEX_ADDR
+#define CP_MES_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define CP_MES_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CP_MES_DM_INDEX_DATA
+#define CP_MES_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define CP_MES_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_DM_INDEX_ADDR
+#define CP_MEC_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define CP_MEC_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CP_MEC_DM_INDEX_DATA
+#define CP_MEC_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define CP_MEC_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DM_INDEX_ADDR
+#define CP_GFX_RS64_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define CP_GFX_RS64_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DM_INDEX_DATA
+#define CP_GFX_RS64_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define CP_GFX_RS64_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_PSP_DEBUG
+#define CPG_PSP_DEBUG__PRIV_VIOLATION_CNTL__SHIFT 0x0
+#define CPG_PSP_DEBUG__VMID_VIOLATION_CNTL__SHIFT 0x2
+#define CPG_PSP_DEBUG__GPA_OVERRIDE__SHIFT 0x3
+#define CPG_PSP_DEBUG__UCODE_VF_OVERRIDE__SHIFT 0x4
+#define CPG_PSP_DEBUG__MTYPE_TMZ_OVERRIDE__SHIFT 0x5
+#define CPG_PSP_DEBUG__SECURE_REG_OVERRIDE__SHIFT 0x6
+#define CPG_PSP_DEBUG__PRIV_VIOLATION_CNTL_MASK 0x00000003L
+#define CPG_PSP_DEBUG__VMID_VIOLATION_CNTL_MASK 0x00000004L
+#define CPG_PSP_DEBUG__GPA_OVERRIDE_MASK 0x00000008L
+#define CPG_PSP_DEBUG__UCODE_VF_OVERRIDE_MASK 0x00000010L
+#define CPG_PSP_DEBUG__MTYPE_TMZ_OVERRIDE_MASK 0x00000020L
+#define CPG_PSP_DEBUG__SECURE_REG_OVERRIDE_MASK 0x00000040L
+//CPC_PSP_DEBUG
+#define CPC_PSP_DEBUG__PRIV_VIOLATION_CNTL__SHIFT 0x0
+#define CPC_PSP_DEBUG__GPA_OVERRIDE__SHIFT 0x3
+#define CPC_PSP_DEBUG__UCODE_VF_OVERRIDE__SHIFT 0x4
+#define CPC_PSP_DEBUG__MTYPE_TMZ_OVERRIDE__SHIFT 0x5
+#define CPC_PSP_DEBUG__SECURE_REG_OVERRIDE__SHIFT 0x6
+#define CPC_PSP_DEBUG__PRIV_VIOLATION_CNTL_MASK 0x00000003L
+#define CPC_PSP_DEBUG__GPA_OVERRIDE_MASK 0x00000008L
+#define CPC_PSP_DEBUG__UCODE_VF_OVERRIDE_MASK 0x00000010L
+#define CPC_PSP_DEBUG__MTYPE_TMZ_OVERRIDE_MASK 0x00000020L
+#define CPC_PSP_DEBUG__SECURE_REG_OVERRIDE_MASK 0x00000040L
+//GRBM_IOV_ERROR_FIFO
+#define GRBM_IOV_ERROR_FIFO__IOV_ADDR__SHIFT 0x0
+#define GRBM_IOV_ERROR_FIFO__IOV_VFID__SHIFT 0x12
+#define GRBM_IOV_ERROR_FIFO__IOV_SSRCID__SHIFT 0x18
+#define GRBM_IOV_ERROR_FIFO__IOV_OP__SHIFT 0x1c
+#define GRBM_IOV_ERROR_FIFO__IOV_VF__SHIFT 0x1d
+#define GRBM_IOV_ERROR_FIFO__FIFO_OVERFLOW__SHIFT 0x1e
+#define GRBM_IOV_ERROR_FIFO__READ_VALID__SHIFT 0x1f
+#define GRBM_IOV_ERROR_FIFO__IOV_ADDR_MASK 0x0003FFFFL
+#define GRBM_IOV_ERROR_FIFO__IOV_VFID_MASK 0x00FC0000L
+#define GRBM_IOV_ERROR_FIFO__IOV_SSRCID_MASK 0x0F000000L
+#define GRBM_IOV_ERROR_FIFO__IOV_OP_MASK 0x10000000L
+#define GRBM_IOV_ERROR_FIFO__IOV_VF_MASK 0x20000000L
+#define GRBM_IOV_ERROR_FIFO__FIFO_OVERFLOW_MASK 0x40000000L
+#define GRBM_IOV_ERROR_FIFO__READ_VALID_MASK 0x80000000L
+//GRBM_SEC_CNTL
+#define GRBM_SEC_CNTL__DEBUG_ENABLE__SHIFT 0x0
+#define GRBM_SEC_CNTL__DEBUG_ENABLE_MASK 0x00000001L
+//GRBM_CAM_INDEX
+#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x0000000FL
+//GRBM_HYP_CAM_INDEX
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX_MASK 0x0000000FL
+//GRBM_CAM_DATA
+#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//GRBM_HYP_CAM_DATA
+#define GRBM_HYP_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_HYP_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//GRBM_CAM_DATA_UPPER
+#define GRBM_CAM_DATA_UPPER__CAM_ADDR__SHIFT 0x0
+#define GRBM_CAM_DATA_UPPER__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_CAM_DATA_UPPER__CAM_ADDR_MASK 0x00000003L
+#define GRBM_CAM_DATA_UPPER__CAM_REMAPADDR_MASK 0x00030000L
+//GRBM_HYP_CAM_DATA_UPPER
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_ADDR__SHIFT 0x0
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_ADDR_MASK 0x00000003L
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_REMAPADDR_MASK 0x00030000L
+//RLC_FWL_FIRST_VIOL_ADDR
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_ADDR__SHIFT 0x0
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_APERTURE_ID__SHIFT 0x12
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_OP__SHIFT 0x1e
+#define RLC_FWL_FIRST_VIOL_ADDR__RESERVED__SHIFT 0x1f
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_ADDR_MASK 0x0003FFFFL
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_APERTURE_ID_MASK 0x3FFC0000L
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_OP_MASK 0x40000000L
+#define RLC_FWL_FIRST_VIOL_ADDR__RESERVED_MASK 0x80000000L
+
+
+// addressBlock: gc_gfx_imu_gfx_imudec
+//GFX_IMU_C2PMSG_0
+#define GFX_IMU_C2PMSG_0__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_0__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_1
+#define GFX_IMU_C2PMSG_1__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_1__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_2
+#define GFX_IMU_C2PMSG_2__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_2__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_3
+#define GFX_IMU_C2PMSG_3__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_3__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_4
+#define GFX_IMU_C2PMSG_4__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_4__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_5
+#define GFX_IMU_C2PMSG_5__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_5__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_6
+#define GFX_IMU_C2PMSG_6__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_6__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_7
+#define GFX_IMU_C2PMSG_7__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_7__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_8
+#define GFX_IMU_C2PMSG_8__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_8__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_9
+#define GFX_IMU_C2PMSG_9__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_9__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_10
+#define GFX_IMU_C2PMSG_10__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_10__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_11
+#define GFX_IMU_C2PMSG_11__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_11__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_12
+#define GFX_IMU_C2PMSG_12__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_12__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_13
+#define GFX_IMU_C2PMSG_13__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_13__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_14
+#define GFX_IMU_C2PMSG_14__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_14__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_15
+#define GFX_IMU_C2PMSG_15__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_15__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_16
+#define GFX_IMU_C2PMSG_16__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_16__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_17
+#define GFX_IMU_C2PMSG_17__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_17__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_18
+#define GFX_IMU_C2PMSG_18__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_18__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_19
+#define GFX_IMU_C2PMSG_19__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_19__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_20
+#define GFX_IMU_C2PMSG_20__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_20__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_21
+#define GFX_IMU_C2PMSG_21__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_21__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_22
+#define GFX_IMU_C2PMSG_22__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_22__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_23
+#define GFX_IMU_C2PMSG_23__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_23__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_24
+#define GFX_IMU_C2PMSG_24__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_24__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_25
+#define GFX_IMU_C2PMSG_25__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_25__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_26
+#define GFX_IMU_C2PMSG_26__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_26__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_27
+#define GFX_IMU_C2PMSG_27__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_27__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_28
+#define GFX_IMU_C2PMSG_28__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_28__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_29
+#define GFX_IMU_C2PMSG_29__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_29__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_30
+#define GFX_IMU_C2PMSG_30__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_30__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_31
+#define GFX_IMU_C2PMSG_31__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_31__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_32
+#define GFX_IMU_C2PMSG_32__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_32__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_33
+#define GFX_IMU_C2PMSG_33__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_33__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_34
+#define GFX_IMU_C2PMSG_34__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_34__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_35
+#define GFX_IMU_C2PMSG_35__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_35__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_36
+#define GFX_IMU_C2PMSG_36__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_36__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_37
+#define GFX_IMU_C2PMSG_37__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_37__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_38
+#define GFX_IMU_C2PMSG_38__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_38__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_39
+#define GFX_IMU_C2PMSG_39__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_39__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_40
+#define GFX_IMU_C2PMSG_40__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_40__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_41
+#define GFX_IMU_C2PMSG_41__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_41__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_42
+#define GFX_IMU_C2PMSG_42__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_42__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_43
+#define GFX_IMU_C2PMSG_43__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_43__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_44
+#define GFX_IMU_C2PMSG_44__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_44__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_45
+#define GFX_IMU_C2PMSG_45__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_45__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_46
+#define GFX_IMU_C2PMSG_46__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_46__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_47
+#define GFX_IMU_C2PMSG_47__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_47__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_MSG_FLAGS
+#define GFX_IMU_MSG_FLAGS__STATUS__SHIFT 0x0
+#define GFX_IMU_MSG_FLAGS__STATUS_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_ACCESS_CTRL0
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC0__SHIFT 0x0
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC1__SHIFT 0x3
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC2__SHIFT 0x6
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC3__SHIFT 0x9
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC4__SHIFT 0xc
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC5__SHIFT 0xf
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC6__SHIFT 0x12
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC7__SHIFT 0x15
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC0_MASK 0x00000007L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC1_MASK 0x00000038L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC2_MASK 0x000001C0L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC3_MASK 0x00000E00L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC4_MASK 0x00007000L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC5_MASK 0x00038000L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC6_MASK 0x001C0000L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC7_MASK 0x00E00000L
+//GFX_IMU_C2PMSG_ACCESS_CTRL1
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC8_15__SHIFT 0x0
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC16_23__SHIFT 0x3
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC24_31__SHIFT 0x6
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC32_39__SHIFT 0x9
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC40_47__SHIFT 0xc
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC8_15_MASK 0x00000007L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC16_23_MASK 0x00000038L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC24_31_MASK 0x000001C0L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC32_39_MASK 0x00000E00L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC40_47_MASK 0x00007000L
+//GFX_IMU_PWRMGT_IRQ_CTRL
+#define GFX_IMU_PWRMGT_IRQ_CTRL__REQ__SHIFT 0x0
+#define GFX_IMU_PWRMGT_IRQ_CTRL__REQ_MASK 0x00000001L
+//GFX_IMU_MP1_MUTEX
+#define GFX_IMU_MP1_MUTEX__MUTEX__SHIFT 0x0
+#define GFX_IMU_MP1_MUTEX__MUTEX_MASK 0x00000003L
+//GFX_IMU_RLC_DATA_4
+#define GFX_IMU_RLC_DATA_4__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_4__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_3
+#define GFX_IMU_RLC_DATA_3__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_3__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_2
+#define GFX_IMU_RLC_DATA_2__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_2__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_1
+#define GFX_IMU_RLC_DATA_1__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_1__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_0
+#define GFX_IMU_RLC_DATA_0__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_0__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_CMD
+#define GFX_IMU_RLC_CMD__CMD__SHIFT 0x0
+#define GFX_IMU_RLC_CMD__CMD_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_MUTEX
+#define GFX_IMU_RLC_MUTEX__MUTEX__SHIFT 0x0
+#define GFX_IMU_RLC_MUTEX__MUTEX_MASK 0x00000003L
+//GFX_IMU_RLC_MSG_STATUS
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_BUSY__SHIFT 0x0
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_MSG_ERROR__SHIFT 0x1
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_MSGDONE__SHIFT 0x10
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_CHGTOG__SHIFT 0x1e
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_DONETOG__SHIFT 0x1f
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_BUSY_MASK 0x00000001L
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_MSG_ERROR_MASK 0x00000002L
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_MSGDONE_MASK 0x00010000L
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_CHGTOG_MASK 0x40000000L
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_DONETOG_MASK 0x80000000L
+//RLC_GFX_IMU_DATA_0
+#define RLC_GFX_IMU_DATA_0__DATA__SHIFT 0x0
+#define RLC_GFX_IMU_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_GFX_IMU_CMD
+#define RLC_GFX_IMU_CMD__CMD__SHIFT 0x0
+#define RLC_GFX_IMU_CMD__CMD_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_STATUS
+#define GFX_IMU_RLC_STATUS__PD_ACTIVE__SHIFT 0x0
+#define GFX_IMU_RLC_STATUS__RLC_ALIVE__SHIFT 0x1
+#define GFX_IMU_RLC_STATUS__TBD2__SHIFT 0x2
+#define GFX_IMU_RLC_STATUS__TBD3__SHIFT 0x3
+#define GFX_IMU_RLC_STATUS__PD_ACTIVE_MASK 0x00000001L
+#define GFX_IMU_RLC_STATUS__RLC_ALIVE_MASK 0x00000002L
+#define GFX_IMU_RLC_STATUS__TBD2_MASK 0x00000004L
+#define GFX_IMU_RLC_STATUS__TBD3_MASK 0x00000008L
+//GFX_IMU_STATUS
+#define GFX_IMU_STATUS__ALLOW_GFXOFF__SHIFT 0x0
+#define GFX_IMU_STATUS__ALLOW_FA_DCS__SHIFT 0x1
+#define GFX_IMU_STATUS__TBD2__SHIFT 0x2
+#define GFX_IMU_STATUS__TBD3__SHIFT 0x3
+#define GFX_IMU_STATUS__TBD4__SHIFT 0x4
+#define GFX_IMU_STATUS__TBD5__SHIFT 0x5
+#define GFX_IMU_STATUS__TBD6__SHIFT 0x6
+#define GFX_IMU_STATUS__TBD7__SHIFT 0x7
+#define GFX_IMU_STATUS__TBD8__SHIFT 0x8
+#define GFX_IMU_STATUS__TBD9__SHIFT 0x9
+#define GFX_IMU_STATUS__TBD10__SHIFT 0xa
+#define GFX_IMU_STATUS__TBD11__SHIFT 0xb
+#define GFX_IMU_STATUS__TBD12__SHIFT 0xc
+#define GFX_IMU_STATUS__TBD13__SHIFT 0xd
+#define GFX_IMU_STATUS__TBD14__SHIFT 0xe
+#define GFX_IMU_STATUS__DISABLE_GFXCLK_DS__SHIFT 0xf
+#define GFX_IMU_STATUS__ALLOW_GFXOFF_MASK 0x00000001L
+#define GFX_IMU_STATUS__ALLOW_FA_DCS_MASK 0x00000002L
+#define GFX_IMU_STATUS__TBD2_MASK 0x00000004L
+#define GFX_IMU_STATUS__TBD3_MASK 0x00000008L
+#define GFX_IMU_STATUS__TBD4_MASK 0x00000010L
+#define GFX_IMU_STATUS__TBD5_MASK 0x00000020L
+#define GFX_IMU_STATUS__TBD6_MASK 0x00000040L
+#define GFX_IMU_STATUS__TBD7_MASK 0x00000080L
+#define GFX_IMU_STATUS__TBD8_MASK 0x00000100L
+#define GFX_IMU_STATUS__TBD9_MASK 0x00000200L
+#define GFX_IMU_STATUS__TBD10_MASK 0x00000400L
+#define GFX_IMU_STATUS__TBD11_MASK 0x00000800L
+#define GFX_IMU_STATUS__TBD12_MASK 0x00001000L
+#define GFX_IMU_STATUS__TBD13_MASK 0x00002000L
+#define GFX_IMU_STATUS__TBD14_MASK 0x00004000L
+#define GFX_IMU_STATUS__DISABLE_GFXCLK_DS_MASK 0x00008000L
+//GFX_IMU_SOC_DATA
+#define GFX_IMU_SOC_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_SOC_DATA__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SOC_ADDR
+#define GFX_IMU_SOC_ADDR__ADDR__SHIFT 0x0
+#define GFX_IMU_SOC_ADDR__ADDR_MASK 0xFFFFFFFFL
+//GFX_IMU_SOC_REQ
+#define GFX_IMU_SOC_REQ__REQ_BUSY__SHIFT 0x0
+#define GFX_IMU_SOC_REQ__R_W__SHIFT 0x1
+#define GFX_IMU_SOC_REQ__ERR__SHIFT 0x1f
+#define GFX_IMU_SOC_REQ__REQ_BUSY_MASK 0x00000001L
+#define GFX_IMU_SOC_REQ__R_W_MASK 0x00000002L
+#define GFX_IMU_SOC_REQ__ERR_MASK 0x80000000L
+//GFX_IMU_VF_CTRL
+#define GFX_IMU_VF_CTRL__VF__SHIFT 0x0
+#define GFX_IMU_VF_CTRL__VFID__SHIFT 0x1
+#define GFX_IMU_VF_CTRL__QOS__SHIFT 0x7
+#define GFX_IMU_VF_CTRL__VF_MASK 0x00000001L
+#define GFX_IMU_VF_CTRL__VFID_MASK 0x0000007EL
+#define GFX_IMU_VF_CTRL__QOS_MASK 0x00000780L
+//GFX_IMU_TELEMETRY
+#define GFX_IMU_TELEMETRY__TELEMETRY_ENTRIES__SHIFT 0x0
+#define GFX_IMU_TELEMETRY__TELEMETRY_DATA_SAMPLE_SIZE__SHIFT 0x5
+#define GFX_IMU_TELEMETRY__FIFO_OVERFLOW__SHIFT 0x6
+#define GFX_IMU_TELEMETRY__FIFO_UNDERFLOW__SHIFT 0x7
+#define GFX_IMU_TELEMETRY__FSM_STATE__SHIFT 0x8
+#define GFX_IMU_TELEMETRY__SVI_TYPE__SHIFT 0xc
+#define GFX_IMU_TELEMETRY__ENABLE_FIFO__SHIFT 0x1e
+#define GFX_IMU_TELEMETRY__ENABLE_IMU_RLC_TELEMETRY__SHIFT 0x1f
+#define GFX_IMU_TELEMETRY__TELEMETRY_ENTRIES_MASK 0x0000001FL
+#define GFX_IMU_TELEMETRY__TELEMETRY_DATA_SAMPLE_SIZE_MASK 0x00000020L
+#define GFX_IMU_TELEMETRY__FIFO_OVERFLOW_MASK 0x00000040L
+#define GFX_IMU_TELEMETRY__FIFO_UNDERFLOW_MASK 0x00000080L
+#define GFX_IMU_TELEMETRY__FSM_STATE_MASK 0x00000700L
+#define GFX_IMU_TELEMETRY__SVI_TYPE_MASK 0x00003000L
+#define GFX_IMU_TELEMETRY__ENABLE_FIFO_MASK 0x40000000L
+#define GFX_IMU_TELEMETRY__ENABLE_IMU_RLC_TELEMETRY_MASK 0x80000000L
+//GFX_IMU_TELEMETRY_DATA
+#define GFX_IMU_TELEMETRY_DATA__CURRENT__SHIFT 0x0
+#define GFX_IMU_TELEMETRY_DATA__VOLTAGE__SHIFT 0x10
+#define GFX_IMU_TELEMETRY_DATA__CURRENT_MASK 0x0000FFFFL
+#define GFX_IMU_TELEMETRY_DATA__VOLTAGE_MASK 0xFFFF0000L
+//GFX_IMU_TELEMETRY_TEMPERATURE
+#define GFX_IMU_TELEMETRY_TEMPERATURE__TEMPERATURE__SHIFT 0x0
+#define GFX_IMU_TELEMETRY_TEMPERATURE__TEMPERATURE_MASK 0x0000FFFFL
+//GFX_IMU_SCRATCH_0
+#define GFX_IMU_SCRATCH_0__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_0__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_1
+#define GFX_IMU_SCRATCH_1__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_1__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_2
+#define GFX_IMU_SCRATCH_2__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_2__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_3
+#define GFX_IMU_SCRATCH_3__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_3__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_4
+#define GFX_IMU_SCRATCH_4__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_4__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_5
+#define GFX_IMU_SCRATCH_5__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_5__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_6
+#define GFX_IMU_SCRATCH_6__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_6__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_7
+#define GFX_IMU_SCRATCH_7__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_7__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_8
+#define GFX_IMU_SCRATCH_8__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_8__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_9
+#define GFX_IMU_SCRATCH_9__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_9__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_10
+#define GFX_IMU_SCRATCH_10__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_10__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_11
+#define GFX_IMU_SCRATCH_11__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_11__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_12
+#define GFX_IMU_SCRATCH_12__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_12__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_13
+#define GFX_IMU_SCRATCH_13__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_13__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_14
+#define GFX_IMU_SCRATCH_14__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_14__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_15
+#define GFX_IMU_SCRATCH_15__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_15__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_FW_GTS_LO
+#define GFX_IMU_FW_GTS_LO__TSTAMP_LO__SHIFT 0x0
+#define GFX_IMU_FW_GTS_LO__TSTAMP_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_FW_GTS_HI
+#define GFX_IMU_FW_GTS_HI__TSTAMP_HI__SHIFT 0x0
+#define GFX_IMU_FW_GTS_HI__TSTAMP_HI_MASK 0x00FFFFFFL
+//GFX_IMU_GTS_OFFSET_LO
+#define GFX_IMU_GTS_OFFSET_LO__GTS_OFFSET_LO__SHIFT 0x0
+#define GFX_IMU_GTS_OFFSET_LO__GTS_OFFSET_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_GTS_OFFSET_HI
+#define GFX_IMU_GTS_OFFSET_HI__GTS_OFFSET_HI__SHIFT 0x0
+#define GFX_IMU_GTS_OFFSET_HI__GTS_OFFSET_HI_MASK 0x00FFFFFFL
+//GFX_IMU_RLC_GTS_OFFSET_LO
+#define GFX_IMU_RLC_GTS_OFFSET_LO__GTS_OFFSET_LO__SHIFT 0x0
+#define GFX_IMU_RLC_GTS_OFFSET_LO__GTS_OFFSET_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_GTS_OFFSET_HI
+#define GFX_IMU_RLC_GTS_OFFSET_HI__GTS_OFFSET_HI__SHIFT 0x0
+#define GFX_IMU_RLC_GTS_OFFSET_HI__GTS_OFFSET_HI_MASK 0x00FFFFFFL
+//GFX_IMU_CORE_INT_STATUS
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_24__SHIFT 0x18
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_25__SHIFT 0x19
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_29__SHIFT 0x1d
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_24_MASK 0x01000000L
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_25_MASK 0x02000000L
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_29_MASK 0x20000000L
+//GFX_IMU_PIC_INT_MASK
+#define GFX_IMU_PIC_INT_MASK__MASK_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_MASK__MASK_1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_MASK__MASK_2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_MASK__MASK_3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_MASK__MASK_4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_MASK__MASK_5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_MASK__MASK_6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_MASK__MASK_7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_MASK__MASK_8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_MASK__MASK_9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_MASK__MASK_10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_MASK__MASK_11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_MASK__MASK_12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_MASK__MASK_13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_MASK__MASK_14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_MASK__MASK_15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_MASK__MASK_16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_MASK__MASK_17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_MASK__MASK_18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_MASK__MASK_19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_MASK__MASK_20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_MASK__MASK_21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_MASK__MASK_22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_MASK__MASK_23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_MASK__MASK_24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_MASK__MASK_25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_MASK__MASK_26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_MASK__MASK_27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_MASK__MASK_28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_MASK__MASK_29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_MASK__MASK_30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_MASK__MASK_31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_MASK__MASK_0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_MASK__MASK_1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_MASK__MASK_2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_MASK__MASK_3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_MASK__MASK_4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_MASK__MASK_5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_MASK__MASK_6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_MASK__MASK_7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_MASK__MASK_8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_MASK__MASK_9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_MASK__MASK_10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_MASK__MASK_11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_MASK__MASK_12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_MASK__MASK_13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_MASK__MASK_14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_MASK__MASK_15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_MASK__MASK_16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_MASK__MASK_17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_MASK__MASK_18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_MASK__MASK_19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_MASK__MASK_20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_MASK__MASK_21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_MASK__MASK_22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_MASK__MASK_23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_MASK__MASK_24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_31_MASK 0x80000000L
+//GFX_IMU_PIC_INT_LVL
+#define GFX_IMU_PIC_INT_LVL__LVL_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_LVL__LVL_1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_LVL__LVL_2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_LVL__LVL_3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_LVL__LVL_4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_LVL__LVL_5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_LVL__LVL_6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_LVL__LVL_7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_LVL__LVL_8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_LVL__LVL_9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_LVL__LVL_10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_LVL__LVL_11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_LVL__LVL_12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_LVL__LVL_13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_LVL__LVL_14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_LVL__LVL_15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_LVL__LVL_16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_LVL__LVL_17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_LVL__LVL_18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_LVL__LVL_19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_LVL__LVL_20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_LVL__LVL_21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_LVL__LVL_22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_LVL__LVL_23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_LVL__LVL_24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_LVL__LVL_25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_LVL__LVL_26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_LVL__LVL_27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_LVL__LVL_28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_LVL__LVL_29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_LVL__LVL_30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_LVL__LVL_31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_LVL__LVL_0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_LVL__LVL_1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_LVL__LVL_2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_LVL__LVL_3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_LVL__LVL_4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_LVL__LVL_5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_LVL__LVL_6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_LVL__LVL_7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_LVL__LVL_8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_LVL__LVL_9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_LVL__LVL_10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_LVL__LVL_11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_LVL__LVL_12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_LVL__LVL_13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_LVL__LVL_14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_LVL__LVL_15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_LVL__LVL_16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_LVL__LVL_17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_LVL__LVL_18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_LVL__LVL_19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_LVL__LVL_20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_LVL__LVL_21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_LVL__LVL_22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_LVL__LVL_23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_LVL__LVL_24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_31_MASK 0x80000000L
+//GFX_IMU_PIC_INT_EDGE
+#define GFX_IMU_PIC_INT_EDGE__EDGE_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_EDGE__EDGE_1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_EDGE__EDGE_2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_EDGE__EDGE_3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_EDGE__EDGE_4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_EDGE__EDGE_5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_EDGE__EDGE_6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_EDGE__EDGE_7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_EDGE__EDGE_8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_EDGE__EDGE_9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_EDGE__EDGE_10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_EDGE__EDGE_11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_EDGE__EDGE_12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_EDGE__EDGE_13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_EDGE__EDGE_14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_EDGE__EDGE_15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_EDGE__EDGE_16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_EDGE__EDGE_17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_EDGE__EDGE_18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_EDGE__EDGE_19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_EDGE__EDGE_20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_EDGE__EDGE_21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_EDGE__EDGE_22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_EDGE__EDGE_23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_EDGE__EDGE_24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_EDGE__EDGE_25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_EDGE__EDGE_26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_EDGE__EDGE_27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_EDGE__EDGE_28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_EDGE__EDGE_29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_EDGE__EDGE_30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_EDGE__EDGE_31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_EDGE__EDGE_0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_31_MASK 0x80000000L
+//GFX_IMU_PIC_INT_PRI_0
+#define GFX_IMU_PIC_INT_PRI_0__PRI_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_0__PRI_1__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_0__PRI_2__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_0__PRI_3__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_0__PRI_0_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_0__PRI_1_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_0__PRI_2_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_0__PRI_3_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_1
+#define GFX_IMU_PIC_INT_PRI_1__PRI_4__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_1__PRI_5__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_1__PRI_6__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_1__PRI_7__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_1__PRI_4_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_1__PRI_5_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_1__PRI_6_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_1__PRI_7_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_2
+#define GFX_IMU_PIC_INT_PRI_2__PRI_8__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_2__PRI_9__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_2__PRI_10__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_2__PRI_11__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_2__PRI_8_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_2__PRI_9_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_2__PRI_10_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_2__PRI_11_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_3
+#define GFX_IMU_PIC_INT_PRI_3__PRI_12__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_3__PRI_13__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_3__PRI_14__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_3__PRI_15__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_3__PRI_12_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_3__PRI_13_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_3__PRI_14_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_3__PRI_15_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_4
+#define GFX_IMU_PIC_INT_PRI_4__PRI_16__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_4__PRI_17__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_4__PRI_18__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_4__PRI_19__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_4__PRI_16_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_4__PRI_17_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_4__PRI_18_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_4__PRI_19_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_5
+#define GFX_IMU_PIC_INT_PRI_5__PRI_20__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_5__PRI_21__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_5__PRI_22__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_5__PRI_23__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_5__PRI_20_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_5__PRI_21_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_5__PRI_22_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_5__PRI_23_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_6
+#define GFX_IMU_PIC_INT_PRI_6__PRI_24__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_6__PRI_25__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_6__PRI_26__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_6__PRI_27__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_6__PRI_24_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_6__PRI_25_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_6__PRI_26_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_6__PRI_27_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_7
+#define GFX_IMU_PIC_INT_PRI_7__PRI_28__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_7__PRI_29__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_7__PRI_30__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_7__PRI_31__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_7__PRI_28_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_7__PRI_29_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_7__PRI_30_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_7__PRI_31_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_STATUS
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS31_MASK 0x80000000L
+//GFX_IMU_PIC_INTR
+#define GFX_IMU_PIC_INTR__INTR_n__SHIFT 0x0
+#define GFX_IMU_PIC_INTR__INTR_n_MASK 0x00000001L
+//GFX_IMU_PIC_INTR_ID
+#define GFX_IMU_PIC_INTR_ID__INTR_n__SHIFT 0x0
+#define GFX_IMU_PIC_INTR_ID__INTR_n_MASK 0x000000FFL
+//GFX_IMU_IH_CTRL_1
+#define GFX_IMU_IH_CTRL_1__CONTEXT_ID__SHIFT 0x0
+#define GFX_IMU_IH_CTRL_1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//GFX_IMU_IH_CTRL_2
+#define GFX_IMU_IH_CTRL_2__CONTEXT_ID__SHIFT 0x0
+#define GFX_IMU_IH_CTRL_2__RING_ID__SHIFT 0x8
+#define GFX_IMU_IH_CTRL_2__VM_ID__SHIFT 0x10
+#define GFX_IMU_IH_CTRL_2__SRSTB__SHIFT 0x1f
+#define GFX_IMU_IH_CTRL_2__CONTEXT_ID_MASK 0x000000FFL
+#define GFX_IMU_IH_CTRL_2__RING_ID_MASK 0x0000FF00L
+#define GFX_IMU_IH_CTRL_2__VM_ID_MASK 0x000F0000L
+#define GFX_IMU_IH_CTRL_2__SRSTB_MASK 0x80000000L
+//GFX_IMU_IH_CTRL_3
+#define GFX_IMU_IH_CTRL_3__SOURCE_ID__SHIFT 0x0
+#define GFX_IMU_IH_CTRL_3__VF_ID__SHIFT 0x8
+#define GFX_IMU_IH_CTRL_3__VF__SHIFT 0xd
+#define GFX_IMU_IH_CTRL_3__SOURCE_ID_MASK 0x000000FFL
+#define GFX_IMU_IH_CTRL_3__VF_ID_MASK 0x00001F00L
+#define GFX_IMU_IH_CTRL_3__VF_MASK 0x00002000L
+//GFX_IMU_IH_STATUS
+#define GFX_IMU_IH_STATUS__IH_BUSY__SHIFT 0x0
+#define GFX_IMU_IH_STATUS__IH_BUSY_MASK 0x00000001L
+//GFX_IMU_FUSESTRAP
+#define GFX_IMU_FUSESTRAP__BOOT_VID__SHIFT 0x0
+#define GFX_IMU_FUSESTRAP__BOOT_VID_MASK 0x000001FFL
+//GFX_IMU_SMUIO_VIDCHG_CTRL
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__REQ__SHIFT 0x0
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__DATA__SHIFT 0x1
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__PSIEN__SHIFT 0xa
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__ACK__SHIFT 0xb
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__SRC_SEL__SHIFT 0x1f
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__REQ_MASK 0x00000001L
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__DATA_MASK 0x000003FEL
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__PSIEN_MASK 0x00000400L
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__ACK_MASK 0x00000800L
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__SRC_SEL_MASK 0x80000000L
+//GFX_IMU_GFXCLK_BYPASS_CTRL
+#define GFX_IMU_GFXCLK_BYPASS_CTRL__BYPASS_SEL__SHIFT 0x0
+#define GFX_IMU_GFXCLK_BYPASS_CTRL__BYPASS_SEL_MASK 0x00000001L
+//GFX_IMU_CLK_CTRL
+#define GFX_IMU_CLK_CTRL__CG_OVR__SHIFT 0x0
+#define GFX_IMU_CLK_CTRL__CG_OVR_CORE__SHIFT 0x1
+#define GFX_IMU_CLK_CTRL__CLKDIV__SHIFT 0x4
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_CHGTOG__SHIFT 0x8
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DONETOG__SHIFT 0x9
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DIV__SHIFT 0x10
+#define GFX_IMU_CLK_CTRL__COOLDOWN_PERIOD__SHIFT 0x1c
+#define GFX_IMU_CLK_CTRL__CG_OVR_MASK 0x00000001L
+#define GFX_IMU_CLK_CTRL__CG_OVR_CORE_MASK 0x00000002L
+#define GFX_IMU_CLK_CTRL__CLKDIV_MASK 0x00000010L
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_CHGTOG_MASK 0x00000100L
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DONETOG_MASK 0x00000200L
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DIV_MASK 0x007F0000L
+#define GFX_IMU_CLK_CTRL__COOLDOWN_PERIOD_MASK 0xF0000000L
+//GFX_IMU_DOORBELL_CONTROL
+#define GFX_IMU_DOORBELL_CONTROL__OVR_EN__SHIFT 0x0
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_OVR__SHIFT 0x1
+#define GFX_IMU_DOORBELL_CONTROL__CP_DB_RESP_PEND_COUNT__SHIFT 0x18
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_STATUS__SHIFT 0x1f
+#define GFX_IMU_DOORBELL_CONTROL__OVR_EN_MASK 0x00000001L
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_OVR_MASK 0x00000002L
+#define GFX_IMU_DOORBELL_CONTROL__CP_DB_RESP_PEND_COUNT_MASK 0x7F000000L
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_STATUS_MASK 0x80000000L
+//GFX_IMU_RLC_CG_CTRL
+#define GFX_IMU_RLC_CG_CTRL__FORCE_CGCG__SHIFT 0x0
+#define GFX_IMU_RLC_CG_CTRL__MGCG_EARLY_EN__SHIFT 0x1
+#define GFX_IMU_RLC_CG_CTRL__FORCE_CGCG_MASK 0x00000001L
+#define GFX_IMU_RLC_CG_CTRL__MGCG_EARLY_EN_MASK 0x00000002L
+//GFX_IMU_RLC_THROTTLE_GFX
+#define GFX_IMU_RLC_THROTTLE_GFX__THROTTLE_EN__SHIFT 0x0
+#define GFX_IMU_RLC_THROTTLE_GFX__THROTTLE_EN_MASK 0x00000001L
+//GFX_IMU_RLC_RESET_VECTOR
+#define GFX_IMU_RLC_RESET_VECTOR__COLD_VS_GFXOFF__SHIFT 0x0
+#define GFX_IMU_RLC_RESET_VECTOR__WARM_RESET_EXIT__SHIFT 0x2
+#define GFX_IMU_RLC_RESET_VECTOR__VF_FLR_EXIT__SHIFT 0x3
+#define GFX_IMU_RLC_RESET_VECTOR__VECTOR__SHIFT 0x4
+#define GFX_IMU_RLC_RESET_VECTOR__COLD_VS_GFXOFF_MASK 0x00000001L
+#define GFX_IMU_RLC_RESET_VECTOR__WARM_RESET_EXIT_MASK 0x00000004L
+#define GFX_IMU_RLC_RESET_VECTOR__VF_FLR_EXIT_MASK 0x00000008L
+#define GFX_IMU_RLC_RESET_VECTOR__VECTOR_MASK 0x000000F0L
+//GFX_IMU_RLC_OVERRIDE
+#define GFX_IMU_RLC_OVERRIDE__DS_ALLOW__SHIFT 0x0
+#define GFX_IMU_RLC_OVERRIDE__DS_ALLOW_MASK 0x00000001L
+//GFX_IMU_DPM_CONTROL
+#define GFX_IMU_DPM_CONTROL__ACC_RESET__SHIFT 0x0
+#define GFX_IMU_DPM_CONTROL__ACC_START__SHIFT 0x1
+#define GFX_IMU_DPM_CONTROL__BUSY_MASK__SHIFT 0x2
+#define GFX_IMU_DPM_CONTROL__ACC_RESET_MASK 0x00000001L
+#define GFX_IMU_DPM_CONTROL__ACC_START_MASK 0x00000002L
+#define GFX_IMU_DPM_CONTROL__BUSY_MASK_MASK 0x0003FFFCL
+//GFX_IMU_DPM_ACC
+#define GFX_IMU_DPM_ACC__COUNT__SHIFT 0x0
+#define GFX_IMU_DPM_ACC__COUNT_MASK 0x00FFFFFFL
+//GFX_IMU_DPM_REF_COUNTER
+#define GFX_IMU_DPM_REF_COUNTER__COUNT__SHIFT 0x0
+#define GFX_IMU_DPM_REF_COUNTER__COUNT_MASK 0x00FFFFFFL
+//GFX_IMU_RLC_RAM_INDEX
+#define GFX_IMU_RLC_RAM_INDEX__INDEX__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_INDEX__RLC_INDEX__SHIFT 0x10
+#define GFX_IMU_RLC_RAM_INDEX__RAM_VALID__SHIFT 0x1f
+#define GFX_IMU_RLC_RAM_INDEX__INDEX_MASK 0x000000FFL
+#define GFX_IMU_RLC_RAM_INDEX__RLC_INDEX_MASK 0x00FF0000L
+#define GFX_IMU_RLC_RAM_INDEX__RAM_VALID_MASK 0x80000000L
+//GFX_IMU_RLC_RAM_ADDR_HIGH
+#define GFX_IMU_RLC_RAM_ADDR_HIGH__ADDR_MSB__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_ADDR_HIGH__ADDR_MSB_MASK 0x0000FFFFL
+//GFX_IMU_RLC_RAM_ADDR_LOW
+#define GFX_IMU_RLC_RAM_ADDR_LOW__ADDR_LSB__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_ADDR_LOW__ADDR_LSB_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_RAM_DATA
+#define GFX_IMU_RLC_RAM_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_FENCE_CTRL
+#define GFX_IMU_FENCE_CTRL__ENABLED__SHIFT 0x0
+#define GFX_IMU_FENCE_CTRL__ARM_LOG__SHIFT 0x1
+#define GFX_IMU_FENCE_CTRL__GRBM_RSMU_FENCE_ENABLE__SHIFT 0x2
+#define GFX_IMU_FENCE_CTRL__FLUSH_ARBITER_CREDITS__SHIFT 0x3
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR_EN__SHIFT 0x8
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR__SHIFT 0x9
+#define GFX_IMU_FENCE_CTRL__ENABLED_MASK 0x00000001L
+#define GFX_IMU_FENCE_CTRL__ARM_LOG_MASK 0x00000002L
+#define GFX_IMU_FENCE_CTRL__GRBM_RSMU_FENCE_ENABLE_MASK 0x00000004L
+#define GFX_IMU_FENCE_CTRL__FLUSH_ARBITER_CREDITS_MASK 0x00000008L
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR_EN_MASK 0x00000100L
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR_MASK 0x00000200L
+//GFX_IMU_FENCE_LOG_INIT
+#define GFX_IMU_FENCE_LOG_INIT__UNIT_ID__SHIFT 0x0
+#define GFX_IMU_FENCE_LOG_INIT__INITIATOR_ID__SHIFT 0x7
+#define GFX_IMU_FENCE_LOG_INIT__UNIT_ID_MASK 0x0000007FL
+#define GFX_IMU_FENCE_LOG_INIT__INITIATOR_ID_MASK 0x0001FF80L
+//GFX_IMU_FENCE_LOG_ADDR
+#define GFX_IMU_FENCE_LOG_ADDR__ADDR__SHIFT 0x2
+#define GFX_IMU_FENCE_LOG_ADDR__ADDR_MASK 0x000FFFFCL
+//GFX_IMU_PROGRAM_CTR
+#define GFX_IMU_PROGRAM_CTR__PC__SHIFT 0x0
+#define GFX_IMU_PROGRAM_CTR__PC_MASK 0xFFFFFFFFL
+//GFX_IMU_CORE_CTRL
+#define GFX_IMU_CORE_CTRL__CRESET__SHIFT 0x0
+#define GFX_IMU_CORE_CTRL__CSTALL__SHIFT 0x1
+#define GFX_IMU_CORE_CTRL__CDBGENABLE__SHIFT 0x2
+#define GFX_IMU_CORE_CTRL__DRESET__SHIFT 0x3
+#define GFX_IMU_CORE_CTRL__HALT_ON_RESET__SHIFT 0x4
+#define GFX_IMU_CORE_CTRL__BREAK_IN__SHIFT 0x8
+#define GFX_IMU_CORE_CTRL__BREAK_OUT_ACK__SHIFT 0x9
+#define GFX_IMU_CORE_CTRL__CRESET_MASK 0x00000001L
+#define GFX_IMU_CORE_CTRL__CSTALL_MASK 0x00000002L
+#define GFX_IMU_CORE_CTRL__CDBGENABLE_MASK 0x00000004L
+#define GFX_IMU_CORE_CTRL__DRESET_MASK 0x00000008L
+#define GFX_IMU_CORE_CTRL__HALT_ON_RESET_MASK 0x00000010L
+#define GFX_IMU_CORE_CTRL__BREAK_IN_MASK 0x00000100L
+#define GFX_IMU_CORE_CTRL__BREAK_OUT_ACK_MASK 0x00000200L
+//GFX_IMU_CORE_STATUS
+#define GFX_IMU_CORE_STATUS__CBUSY__SHIFT 0x0
+#define GFX_IMU_CORE_STATUS__PWAIT_MODE__SHIFT 0x1
+#define GFX_IMU_CORE_STATUS__PSP_ACC_ERR__SHIFT 0x2
+#define GFX_IMU_CORE_STATUS__CINTLEVEL__SHIFT 0x4
+#define GFX_IMU_CORE_STATUS__BREAK_IN_ACK__SHIFT 0x8
+#define GFX_IMU_CORE_STATUS__BREAK_OUT__SHIFT 0x9
+#define GFX_IMU_CORE_STATUS__DEBUG_MODE__SHIFT 0xa
+#define GFX_IMU_CORE_STATUS__P_FATAL_ERROR__SHIFT 0xb
+#define GFX_IMU_CORE_STATUS__FAULT_SEVERITY_LEVEL__SHIFT 0x18
+#define GFX_IMU_CORE_STATUS__FAULT_TYPE__SHIFT 0x1c
+#define GFX_IMU_CORE_STATUS__CBUSY_MASK 0x00000001L
+#define GFX_IMU_CORE_STATUS__PWAIT_MODE_MASK 0x00000002L
+#define GFX_IMU_CORE_STATUS__PSP_ACC_ERR_MASK 0x00000004L
+#define GFX_IMU_CORE_STATUS__CINTLEVEL_MASK 0x000000F0L
+#define GFX_IMU_CORE_STATUS__BREAK_IN_ACK_MASK 0x00000100L
+#define GFX_IMU_CORE_STATUS__BREAK_OUT_MASK 0x00000200L
+#define GFX_IMU_CORE_STATUS__DEBUG_MODE_MASK 0x00000400L
+#define GFX_IMU_CORE_STATUS__P_FATAL_ERROR_MASK 0x00000800L
+#define GFX_IMU_CORE_STATUS__FAULT_SEVERITY_LEVEL_MASK 0x0F000000L
+#define GFX_IMU_CORE_STATUS__FAULT_TYPE_MASK 0xF0000000L
+//GFX_IMU_PWROKRAW
+#define GFX_IMU_PWROKRAW__PWROKRAW__SHIFT 0x0
+#define GFX_IMU_PWROKRAW__PWROKRAW_MASK 0x00000001L
+//GFX_IMU_PWROK
+#define GFX_IMU_PWROK__PWROK__SHIFT 0x0
+#define GFX_IMU_PWROK__PWROK_MASK 0x00000001L
+//GFX_IMU_GAP_PWROK
+#define GFX_IMU_GAP_PWROK__GAP_PWROK__SHIFT 0x0
+#define GFX_IMU_GAP_PWROK__GAP_PWROK_MASK 0x00000001L
+//GFX_IMU_RESETn
+#define GFX_IMU_RESETn__Cpl_RESETn__SHIFT 0x0
+#define GFX_IMU_RESETn__Cpl_RESETn_MASK 0x00000001L
+//GFX_IMU_GFX_RESET_CTRL
+#define GFX_IMU_GFX_RESET_CTRL__HARD_RESETB__SHIFT 0x0
+#define GFX_IMU_GFX_RESET_CTRL__EA_RESETB__SHIFT 0x1
+#define GFX_IMU_GFX_RESET_CTRL__UTCL2_RESETB__SHIFT 0x2
+#define GFX_IMU_GFX_RESET_CTRL__SDMA_RESETB__SHIFT 0x3
+#define GFX_IMU_GFX_RESET_CTRL__GRBM_RESETB__SHIFT 0x4
+#define GFX_IMU_GFX_RESET_CTRL__HARD_RESETB_MASK 0x00000001L
+#define GFX_IMU_GFX_RESET_CTRL__EA_RESETB_MASK 0x00000002L
+#define GFX_IMU_GFX_RESET_CTRL__UTCL2_RESETB_MASK 0x00000004L
+#define GFX_IMU_GFX_RESET_CTRL__SDMA_RESETB_MASK 0x00000008L
+#define GFX_IMU_GFX_RESET_CTRL__GRBM_RESETB_MASK 0x00000010L
+//GFX_IMU_AEB_OVERRIDE
+#define GFX_IMU_AEB_OVERRIDE__AEB_OVERRIDE_CTRL__SHIFT 0x0
+#define GFX_IMU_AEB_OVERRIDE__AEB_RESET_VALUE__SHIFT 0x1
+#define GFX_IMU_AEB_OVERRIDE__AEB_VALID_VALUE__SHIFT 0x2
+#define GFX_IMU_AEB_OVERRIDE__AEB_OVERRIDE_CTRL_MASK 0x00000001L
+#define GFX_IMU_AEB_OVERRIDE__AEB_RESET_VALUE_MASK 0x00000002L
+#define GFX_IMU_AEB_OVERRIDE__AEB_VALID_VALUE_MASK 0x00000004L
+//GFX_IMU_VDCI_RESET_CTRL
+#define GFX_IMU_VDCI_RESET_CTRL__SOC2GFX_VDCI_RESETn__SHIFT 0x0
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_EA_SDF_VDCI_RESET__SHIFT 0x1
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_UTCL2_ATHUB_VDCI_RESET__SHIFT 0x2
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_IMUAXI_SYSHUB_VDCI_RESET__SHIFT 0x3
+#define GFX_IMU_VDCI_RESET_CTRL__IMU2GFX_VDCI_RESETn__SHIFT 0x4
+#define GFX_IMU_VDCI_RESET_CTRL__SOC2GFX_VDCI_RESETn_MASK 0x00000001L
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_EA_SDF_VDCI_RESET_MASK 0x00000002L
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_UTCL2_ATHUB_VDCI_RESET_MASK 0x00000004L
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_IMUAXI_SYSHUB_VDCI_RESET_MASK 0x00000008L
+#define GFX_IMU_VDCI_RESET_CTRL__IMU2GFX_VDCI_RESETn_MASK 0x00000010L
+//GFX_IMU_GFX_ISO_CTRL
+#define GFX_IMU_GFX_ISO_CTRL__GFX2IMU_ISOn__SHIFT 0x0
+#define GFX_IMU_GFX_ISO_CTRL__SOC_EA_SDF_VDCI_ISOn_EN__SHIFT 0x1
+#define GFX_IMU_GFX_ISO_CTRL__SOC_UTCL2_ATHUB_VDCI_ISOn_EN__SHIFT 0x2
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_ISOn__SHIFT 0x3
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_CLK_ISOn__SHIFT 0x4
+#define GFX_IMU_GFX_ISO_CTRL__GFX2IMU_ISOn_MASK 0x00000001L
+#define GFX_IMU_GFX_ISO_CTRL__SOC_EA_SDF_VDCI_ISOn_EN_MASK 0x00000002L
+#define GFX_IMU_GFX_ISO_CTRL__SOC_UTCL2_ATHUB_VDCI_ISOn_EN_MASK 0x00000004L
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_ISOn_MASK 0x00000008L
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_CLK_ISOn_MASK 0x00000010L
+//GFX_IMU_TIMER0_CTRL0
+#define GFX_IMU_TIMER0_CTRL0__START_STOP__SHIFT 0x0
+#define GFX_IMU_TIMER0_CTRL0__CLEAR__SHIFT 0x8
+#define GFX_IMU_TIMER0_CTRL0__UP_DOWN__SHIFT 0x10
+#define GFX_IMU_TIMER0_CTRL0__PULSE_EN__SHIFT 0x18
+#define GFX_IMU_TIMER0_CTRL0__START_STOP_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CTRL0__CLEAR_MASK 0x00000100L
+#define GFX_IMU_TIMER0_CTRL0__UP_DOWN_MASK 0x00010000L
+#define GFX_IMU_TIMER0_CTRL0__PULSE_EN_MASK 0x01000000L
+//GFX_IMU_TIMER0_CTRL1
+#define GFX_IMU_TIMER0_CTRL1__PWM_EN__SHIFT 0x0
+#define GFX_IMU_TIMER0_CTRL1__TS_MODE__SHIFT 0x8
+#define GFX_IMU_TIMER0_CTRL1__SAT_EN__SHIFT 0x10
+#define GFX_IMU_TIMER0_CTRL1__PWM_EN_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CTRL1__TS_MODE_MASK 0x00000100L
+#define GFX_IMU_TIMER0_CTRL1__SAT_EN_MASK 0x00010000L
+//GFX_IMU_TIMER0_CMP_AUTOINC
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER0_CMP_INTEN
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER0_CMP0
+#define GFX_IMU_TIMER0_CMP0__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP0__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER0_CMP1
+#define GFX_IMU_TIMER0_CMP1__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP1__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER0_CMP3
+#define GFX_IMU_TIMER0_CMP3__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP3__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER0_VALUE
+#define GFX_IMU_TIMER0_VALUE__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_VALUE__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_CTRL0
+#define GFX_IMU_TIMER1_CTRL0__START_STOP__SHIFT 0x0
+#define GFX_IMU_TIMER1_CTRL0__CLEAR__SHIFT 0x8
+#define GFX_IMU_TIMER1_CTRL0__UP_DOWN__SHIFT 0x10
+#define GFX_IMU_TIMER1_CTRL0__PULSE_EN__SHIFT 0x18
+#define GFX_IMU_TIMER1_CTRL0__START_STOP_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CTRL0__CLEAR_MASK 0x00000100L
+#define GFX_IMU_TIMER1_CTRL0__UP_DOWN_MASK 0x00010000L
+#define GFX_IMU_TIMER1_CTRL0__PULSE_EN_MASK 0x01000000L
+//GFX_IMU_TIMER1_CTRL1
+#define GFX_IMU_TIMER1_CTRL1__PWM_EN__SHIFT 0x0
+#define GFX_IMU_TIMER1_CTRL1__TS_MODE__SHIFT 0x8
+#define GFX_IMU_TIMER1_CTRL1__SAT_EN__SHIFT 0x10
+#define GFX_IMU_TIMER1_CTRL1__PWM_EN_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CTRL1__TS_MODE_MASK 0x00000100L
+#define GFX_IMU_TIMER1_CTRL1__SAT_EN_MASK 0x00010000L
+//GFX_IMU_TIMER1_CMP_AUTOINC
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER1_CMP_INTEN
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER1_CMP0
+#define GFX_IMU_TIMER1_CMP0__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP0__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_CMP1
+#define GFX_IMU_TIMER1_CMP1__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP1__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_CMP3
+#define GFX_IMU_TIMER1_CMP3__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP3__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_VALUE
+#define GFX_IMU_TIMER1_VALUE__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_VALUE__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_CTRL0
+#define GFX_IMU_TIMER2_CTRL0__START_STOP__SHIFT 0x0
+#define GFX_IMU_TIMER2_CTRL0__CLEAR__SHIFT 0x8
+#define GFX_IMU_TIMER2_CTRL0__UP_DOWN__SHIFT 0x10
+#define GFX_IMU_TIMER2_CTRL0__PULSE_EN__SHIFT 0x18
+#define GFX_IMU_TIMER2_CTRL0__START_STOP_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CTRL0__CLEAR_MASK 0x00000100L
+#define GFX_IMU_TIMER2_CTRL0__UP_DOWN_MASK 0x00010000L
+#define GFX_IMU_TIMER2_CTRL0__PULSE_EN_MASK 0x01000000L
+//GFX_IMU_TIMER2_CTRL1
+#define GFX_IMU_TIMER2_CTRL1__PWM_EN__SHIFT 0x0
+#define GFX_IMU_TIMER2_CTRL1__TS_MODE__SHIFT 0x8
+#define GFX_IMU_TIMER2_CTRL1__SAT_EN__SHIFT 0x10
+#define GFX_IMU_TIMER2_CTRL1__PWM_EN_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CTRL1__TS_MODE_MASK 0x00000100L
+#define GFX_IMU_TIMER2_CTRL1__SAT_EN_MASK 0x00010000L
+//GFX_IMU_TIMER2_CMP_AUTOINC
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER2_CMP_INTEN
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER2_CMP0
+#define GFX_IMU_TIMER2_CMP0__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP0__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_CMP1
+#define GFX_IMU_TIMER2_CMP1__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP1__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_CMP3
+#define GFX_IMU_TIMER2_CMP3__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP3__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_VALUE
+#define GFX_IMU_TIMER2_VALUE__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_VALUE__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_FUSE_CTRL
+#define GFX_IMU_FUSE_CTRL__DIV_OVR__SHIFT 0x0
+#define GFX_IMU_FUSE_CTRL__DIV_OVR_EN__SHIFT 0x5
+#define GFX_IMU_FUSE_CTRL__FORCE_DONE__SHIFT 0x6
+#define GFX_IMU_FUSE_CTRL__DIV_OVR_MASK 0x0000001FL
+#define GFX_IMU_FUSE_CTRL__DIV_OVR_EN_MASK 0x00000020L
+#define GFX_IMU_FUSE_CTRL__FORCE_DONE_MASK 0x00000040L
+//GFX_IMU_D_RAM_ADDR
+#define GFX_IMU_D_RAM_ADDR__ADDR__SHIFT 0x2
+#define GFX_IMU_D_RAM_ADDR__ADDR_MASK 0x0000FFFCL
+//GFX_IMU_D_RAM_DATA
+#define GFX_IMU_D_RAM_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_D_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_GFX_IH_GASKET_CTRL
+#define GFX_IMU_GFX_IH_GASKET_CTRL__SRSTB__SHIFT 0x0
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_LEVEL__SHIFT 0x10
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_OVERFLOW__SHIFT 0x14
+#define GFX_IMU_GFX_IH_GASKET_CTRL__SRSTB_MASK 0x00000001L
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_LEVEL_MASK 0x000F0000L
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_OVERFLOW_MASK 0x00100000L
+
+
+// addressBlock: gc_gfx_imu_gfx_imu_pspdec
+//GFX_IMU_RLC_BOOTLOADER_ADDR_HI
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_BOOTLOADER_ADDR_LO
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_LO__ADDR_LO__SHIFT 0x0
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_BOOTLOADER_SIZE
+#define GFX_IMU_RLC_BOOTLOADER_SIZE__SIZE__SHIFT 0x0
+#define GFX_IMU_RLC_BOOTLOADER_SIZE__SIZE_MASK 0x03FFFFFFL
+//GFX_IMU_I_RAM_ADDR
+#define GFX_IMU_I_RAM_ADDR__ADDR__SHIFT 0x2
+#define GFX_IMU_I_RAM_ADDR__ADDR_MASK 0x0000FFFCL
+//GFX_IMU_I_RAM_DATA
+#define GFX_IMU_I_RAM_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_I_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gccacind
+//GC_CAC_ID
+#define GC_CAC_ID__CAC_BLOCK_ID__SHIFT 0x0
+#define GC_CAC_ID__CAC_SIGNAL_ID__SHIFT 0x6
+#define GC_CAC_ID__CAC_BLOCK_ID_MASK 0x0000003FL
+#define GC_CAC_ID__CAC_SIGNAL_ID_MASK 0x00003FC0L
+//GC_CAC_CNTL
+#define GC_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x0
+#define GC_CAC_CNTL__CAC_THRESHOLD_MASK 0x0000FFFFL
+//GC_CAC_ACC_CP0
+#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CP1
+#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CP2
+#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA0
+#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA1
+#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA2
+#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA3
+#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA4
+#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA5
+#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER0
+#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER1
+#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER2
+#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER3
+#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER4
+#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER5
+#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER6
+#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER7
+#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER8
+#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER9
+#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML20
+#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML21
+#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML22
+#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML23
+#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML24
+#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER0
+#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER1
+#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER2
+#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER3
+#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER4
+#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS0
+#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS1
+#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS2
+#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS3
+#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS4
+#define GC_CAC_ACC_GDS4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE0
+#define GC_CAC_ACC_GE0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE1
+#define GC_CAC_ACC_GE1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE2
+#define GC_CAC_ACC_GE2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE3
+#define GC_CAC_ACC_GE3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE4
+#define GC_CAC_ACC_GE4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE5
+#define GC_CAC_ACC_GE5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE6
+#define GC_CAC_ACC_GE6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE7
+#define GC_CAC_ACC_GE7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE8
+#define GC_CAC_ACC_GE8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE9
+#define GC_CAC_ACC_GE9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE10
+#define GC_CAC_ACC_GE10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE10__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE11
+#define GC_CAC_ACC_GE11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE11__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE12
+#define GC_CAC_ACC_GE12__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE12__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE13
+#define GC_CAC_ACC_GE13__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE13__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE14
+#define GC_CAC_ACC_GE14__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE14__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE15
+#define GC_CAC_ACC_GE15__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE15__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE16
+#define GC_CAC_ACC_GE16__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE16__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE17
+#define GC_CAC_ACC_GE17__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE17__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE18
+#define GC_CAC_ACC_GE18__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE18__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE19
+#define GC_CAC_ACC_GE19__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE19__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE20
+#define GC_CAC_ACC_GE20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PMM0
+#define GC_CAC_ACC_PMM0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PMM0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C0
+#define GC_CAC_ACC_GL2C0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C1
+#define GC_CAC_ACC_GL2C1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C2
+#define GC_CAC_ACC_GL2C2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C3
+#define GC_CAC_ACC_GL2C3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C4
+#define GC_CAC_ACC_GL2C4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH0
+#define GC_CAC_ACC_PH0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH1
+#define GC_CAC_ACC_PH1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH2
+#define GC_CAC_ACC_PH2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH3
+#define GC_CAC_ACC_PH3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH4
+#define GC_CAC_ACC_PH4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH5
+#define GC_CAC_ACC_PH5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH6
+#define GC_CAC_ACC_PH6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH7
+#define GC_CAC_ACC_PH7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA0
+#define GC_CAC_ACC_SDMA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA1
+#define GC_CAC_ACC_SDMA1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA2
+#define GC_CAC_ACC_SDMA2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA3
+#define GC_CAC_ACC_SDMA3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA4
+#define GC_CAC_ACC_SDMA4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA5
+#define GC_CAC_ACC_SDMA5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA6
+#define GC_CAC_ACC_SDMA6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA7
+#define GC_CAC_ACC_SDMA7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA8
+#define GC_CAC_ACC_SDMA8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA9
+#define GC_CAC_ACC_SDMA9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA10
+#define GC_CAC_ACC_SDMA10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA10__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA11
+#define GC_CAC_ACC_SDMA11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA11__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CHC0
+#define GC_CAC_ACC_CHC0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CHC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CHC1
+#define GC_CAC_ACC_CHC1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CHC1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CHC2
+#define GC_CAC_ACC_CHC2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CHC2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GUS0
+#define GC_CAC_ACC_GUS0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GUS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GUS1
+#define GC_CAC_ACC_GUS1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GUS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GUS2
+#define GC_CAC_ACC_GUS2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GUS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_RLC0
+#define GC_CAC_ACC_RLC0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_RLC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL20
+#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL21
+#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL22
+#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL23
+#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL24
+#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//RELEASE_TO_STALL_LUT_1_8
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1__SHIFT 0x0
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2__SHIFT 0x4
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3__SHIFT 0x8
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4__SHIFT 0xc
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5__SHIFT 0x10
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6__SHIFT 0x14
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7__SHIFT 0x18
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8__SHIFT 0x1c
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1_MASK 0x00000007L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2_MASK 0x00000070L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3_MASK 0x00000700L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4_MASK 0x00007000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5_MASK 0x00070000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6_MASK 0x00700000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7_MASK 0x07000000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8_MASK 0x70000000L
+//RELEASE_TO_STALL_LUT_9_16
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9__SHIFT 0x0
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10__SHIFT 0x4
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11__SHIFT 0x8
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12__SHIFT 0xc
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13__SHIFT 0x10
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14__SHIFT 0x14
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15__SHIFT 0x18
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16__SHIFT 0x1c
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9_MASK 0x00000007L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10_MASK 0x00000070L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11_MASK 0x00000700L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12_MASK 0x00007000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13_MASK 0x00070000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14_MASK 0x00700000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15_MASK 0x07000000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16_MASK 0x70000000L
+//RELEASE_TO_STALL_LUT_17_20
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17__SHIFT 0x0
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18__SHIFT 0x4
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19__SHIFT 0x8
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20__SHIFT 0xc
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17_MASK 0x00000007L
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18_MASK 0x00000070L
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19_MASK 0x00000700L
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20_MASK 0x00007000L
+//STALL_TO_RELEASE_LUT_1_4
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1_MASK 0x0000001FL
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2_MASK 0x00001F00L
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3_MASK 0x001F0000L
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4_MASK 0x1F000000L
+//STALL_TO_RELEASE_LUT_5_7
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5_MASK 0x0000001FL
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6_MASK 0x00001F00L
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7_MASK 0x001F0000L
+//STALL_TO_PWRBRK_LUT_1_4
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_1_MASK 0x00000007L
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_2_MASK 0x00000700L
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_3_MASK 0x00070000L
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_4_MASK 0x07000000L
+//STALL_TO_PWRBRK_LUT_5_7
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_5_MASK 0x00000007L
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_6_MASK 0x00000700L
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_7_MASK 0x00070000L
+//PWRBRK_STALL_TO_RELEASE_LUT_1_4
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1_MASK 0x0000001FL
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2_MASK 0x00001F00L
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3_MASK 0x001F0000L
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4_MASK 0x1F000000L
+//PWRBRK_STALL_TO_RELEASE_LUT_5_7
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5_MASK 0x0000001FL
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6_MASK 0x00001F00L
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7_MASK 0x001F0000L
+//PWRBRK_RELEASE_TO_STALL_LUT_1_8
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1__SHIFT 0x0
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2__SHIFT 0x4
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3__SHIFT 0x8
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4__SHIFT 0xc
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5__SHIFT 0x10
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6__SHIFT 0x14
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7__SHIFT 0x18
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8__SHIFT 0x1c
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1_MASK 0x00000007L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2_MASK 0x00000070L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3_MASK 0x00000700L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4_MASK 0x00007000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5_MASK 0x00070000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6_MASK 0x00700000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7_MASK 0x07000000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8_MASK 0x70000000L
+//PWRBRK_RELEASE_TO_STALL_LUT_9_16
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9__SHIFT 0x0
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10__SHIFT 0x4
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11__SHIFT 0x8
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12__SHIFT 0xc
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13__SHIFT 0x10
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14__SHIFT 0x14
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15__SHIFT 0x18
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16__SHIFT 0x1c
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9_MASK 0x00000007L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10_MASK 0x00000070L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11_MASK 0x00000700L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12_MASK 0x00007000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13_MASK 0x00070000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14_MASK 0x00700000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15_MASK 0x07000000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16_MASK 0x70000000L
+//PWRBRK_RELEASE_TO_STALL_LUT_17_20
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17__SHIFT 0x0
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18__SHIFT 0x4
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19__SHIFT 0x8
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20__SHIFT 0xc
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17_MASK 0x00000007L
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18_MASK 0x00000070L
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19_MASK 0x00000700L
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20_MASK 0x00007000L
+//FIXED_PATTERN_PERF_COUNTER_1
+#define FIXED_PATTERN_PERF_COUNTER_1__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_1__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_2
+#define FIXED_PATTERN_PERF_COUNTER_2__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_2__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_3
+#define FIXED_PATTERN_PERF_COUNTER_3__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_3__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_4
+#define FIXED_PATTERN_PERF_COUNTER_4__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_4__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_5
+#define FIXED_PATTERN_PERF_COUNTER_5__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_5__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_6
+#define FIXED_PATTERN_PERF_COUNTER_6__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_6__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_7
+#define FIXED_PATTERN_PERF_COUNTER_7__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_7__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_8
+#define FIXED_PATTERN_PERF_COUNTER_8__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_8__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_9
+#define FIXED_PATTERN_PERF_COUNTER_9__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_9__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_10
+#define FIXED_PATTERN_PERF_COUNTER_10__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_10__PERF_COUNTER_MASK 0x0001FFFFL
+//HW_LUT_UPDATE_STATUS
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_DONE__SHIFT 0x0
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR__SHIFT 0x1
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR_STEP__SHIFT 0x2
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_DONE__SHIFT 0x5
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR__SHIFT 0x6
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR_STEP__SHIFT 0x7
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_DONE__SHIFT 0xa
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR__SHIFT 0xb
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR_STEP__SHIFT 0xc
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_DONE__SHIFT 0x11
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR__SHIFT 0x12
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR_STEP__SHIFT 0x13
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_DONE__SHIFT 0x16
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR__SHIFT 0x17
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR_STEP__SHIFT 0x18
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_DONE_MASK 0x00000001L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR_MASK 0x00000002L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR_STEP_MASK 0x0000001CL
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_DONE_MASK 0x00000020L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR_MASK 0x00000040L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR_STEP_MASK 0x00000380L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_DONE_MASK 0x00000400L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR_MASK 0x00000800L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR_STEP_MASK 0x0001F000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_DONE_MASK 0x00020000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR_MASK 0x00040000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR_STEP_MASK 0x00380000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_DONE_MASK 0x00400000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR_MASK 0x00800000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR_STEP_MASK 0x1F000000L
+
+
+// addressBlock: secacind
+//SE_CAC_ID
+#define SE_CAC_ID__CAC_BLOCK_ID__SHIFT 0x0
+#define SE_CAC_ID__CAC_SIGNAL_ID__SHIFT 0x6
+#define SE_CAC_ID__CAC_BLOCK_ID_MASK 0x0000003FL
+#define SE_CAC_ID__CAC_SIGNAL_ID_MASK 0x00003FC0L
+//SE_CAC_CNTL
+#define SE_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x0
+#define SE_CAC_CNTL__CAC_THRESHOLD_MASK 0x0000FFFFL
+
+
+// addressBlock: grtavfsind
+//RTAVFS_REG0
+#define RTAVFS_REG0__RTAVFSZONE0STARTCNT__SHIFT 0x0
+#define RTAVFS_REG0__RTAVFSZONE0STOPCNT__SHIFT 0x10
+#define RTAVFS_REG0__RTAVFSZONE0STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG0__RTAVFSZONE0STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG1
+#define RTAVFS_REG1__RTAVFSZONE1STARTCNT__SHIFT 0x0
+#define RTAVFS_REG1__RTAVFSZONE1STOPCNT__SHIFT 0x10
+#define RTAVFS_REG1__RTAVFSZONE1STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG1__RTAVFSZONE1STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG2
+#define RTAVFS_REG2__RTAVFSZONE2STARTCNT__SHIFT 0x0
+#define RTAVFS_REG2__RTAVFSZONE2STOPCNT__SHIFT 0x10
+#define RTAVFS_REG2__RTAVFSZONE2STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG2__RTAVFSZONE2STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG3
+#define RTAVFS_REG3__RTAVFSZONE3STARTCNT__SHIFT 0x0
+#define RTAVFS_REG3__RTAVFSZONE3STOPCNT__SHIFT 0x10
+#define RTAVFS_REG3__RTAVFSZONE3STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG3__RTAVFSZONE3STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG4
+#define RTAVFS_REG4__RTAVFSZONE4STARTCNT__SHIFT 0x0
+#define RTAVFS_REG4__RTAVFSZONE4STOPCNT__SHIFT 0x10
+#define RTAVFS_REG4__RTAVFSZONE4STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG4__RTAVFSZONE4STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG5
+#define RTAVFS_REG5__RTAVFSZONE0EN0__SHIFT 0x0
+#define RTAVFS_REG5__RTAVFSZONE0EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG6
+#define RTAVFS_REG6__RTAVFSZONE0EN1__SHIFT 0x0
+#define RTAVFS_REG6__RTAVFSZONE0EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG7
+#define RTAVFS_REG7__RTAVFSZONE1EN0__SHIFT 0x0
+#define RTAVFS_REG7__RTAVFSZONE1EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG8
+#define RTAVFS_REG8__RTAVFSZONE1EN1__SHIFT 0x0
+#define RTAVFS_REG8__RTAVFSZONE1EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG9
+#define RTAVFS_REG9__RTAVFSZONE2EN0__SHIFT 0x0
+#define RTAVFS_REG9__RTAVFSZONE2EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG10
+#define RTAVFS_REG10__RTAVFSZONE2EN1__SHIFT 0x0
+#define RTAVFS_REG10__RTAVFSZONE2EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG11
+#define RTAVFS_REG11__RTAVFSZONE3EN0__SHIFT 0x0
+#define RTAVFS_REG11__RTAVFSZONE3EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG12
+#define RTAVFS_REG12__RTAVFSZONE3EN1__SHIFT 0x0
+#define RTAVFS_REG12__RTAVFSZONE3EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG13
+#define RTAVFS_REG13__RTAVFSZONE4EN0__SHIFT 0x0
+#define RTAVFS_REG13__RTAVFSZONE4EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG14
+#define RTAVFS_REG14__RTAVFSZONE4EN1__SHIFT 0x0
+#define RTAVFS_REG14__RTAVFSZONE4EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG15
+#define RTAVFS_REG15__RTAVFSVF0FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG15__RTAVFSVF0VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG15__RTAVFSVF0FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG15__RTAVFSVF0VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG16
+#define RTAVFS_REG16__RTAVFSVF1FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG16__RTAVFSVF1VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG16__RTAVFSVF1FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG16__RTAVFSVF1VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG17
+#define RTAVFS_REG17__RTAVFSVF2FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG17__RTAVFSVF2VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG17__RTAVFSVF2FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG17__RTAVFSVF2VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG18
+#define RTAVFS_REG18__RTAVFSVF3FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG18__RTAVFSVF3VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG18__RTAVFSVF3FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG18__RTAVFSVF3VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG19
+#define RTAVFS_REG19__RTAVFSGB_ZONE0__SHIFT 0x0
+#define RTAVFS_REG19__RTAVFSGB_ZONE1__SHIFT 0x6
+#define RTAVFS_REG19__RTAVFSGB_ZONE2__SHIFT 0xc
+#define RTAVFS_REG19__RTAVFSGB_ZONE3__SHIFT 0x12
+#define RTAVFS_REG19__RTAVFSGB_ZONE4__SHIFT 0x19
+#define RTAVFS_REG19__RTAVFSGB_ZONE0_MASK 0x0000003FL
+#define RTAVFS_REG19__RTAVFSGB_ZONE1_MASK 0x00000FC0L
+#define RTAVFS_REG19__RTAVFSGB_ZONE2_MASK 0x0003F000L
+#define RTAVFS_REG19__RTAVFSGB_ZONE3_MASK 0x01FC0000L
+#define RTAVFS_REG19__RTAVFSGB_ZONE4_MASK 0xFE000000L
+//RTAVFS_REG20
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG20__RTAVFSZONE0RESERVED__SHIFT 0x12
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG20__RTAVFSZONE0RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG21
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG21__RTAVFSZONE1RESERVED__SHIFT 0x12
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG21__RTAVFSZONE1RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG22
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG22__RTAVFSZONE2RESERVED__SHIFT 0x12
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG22__RTAVFSZONE2RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG23
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG23__RTAVFSZONE3RESERVED__SHIFT 0x12
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG23__RTAVFSZONE3RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG24
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG24__RTAVFSZONE4RESERVED__SHIFT 0x12
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG24__RTAVFSZONE4RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG25
+#define RTAVFS_REG25__RTAVFSRESERVED0__SHIFT 0x0
+#define RTAVFS_REG25__RTAVFSRESERVED0_MASK 0xFFFFFFFFL
+//RTAVFS_REG26
+#define RTAVFS_REG26__RTAVFSRESERVED1__SHIFT 0x0
+#define RTAVFS_REG26__RTAVFSRESERVED1_MASK 0xFFFFFFFFL
+//RTAVFS_REG27
+#define RTAVFS_REG27__RTAVFSRESERVED2__SHIFT 0x0
+#define RTAVFS_REG27__RTAVFSRESERVED2_MASK 0xFFFFFFFFL
+//RTAVFS_REG28
+#define RTAVFS_REG28__RTAVFSZONE0INTERCEPT__SHIFT 0x0
+#define RTAVFS_REG28__RTAVFSZONE1INTERCEPT__SHIFT 0x10
+#define RTAVFS_REG28__RTAVFSZONE0INTERCEPT_MASK 0x0000FFFFL
+#define RTAVFS_REG28__RTAVFSZONE1INTERCEPT_MASK 0xFFFF0000L
+//RTAVFS_REG29
+#define RTAVFS_REG29__RTAVFSZONE2INTERCEPT__SHIFT 0x0
+#define RTAVFS_REG29__RTAVFSZONE3INTERCEPT__SHIFT 0x10
+#define RTAVFS_REG29__RTAVFSZONE2INTERCEPT_MASK 0x0000FFFFL
+#define RTAVFS_REG29__RTAVFSZONE3INTERCEPT_MASK 0xFFFF0000L
+//RTAVFS_REG30
+#define RTAVFS_REG30__RTAVFSZONE4INTERCEPT__SHIFT 0x0
+#define RTAVFS_REG30__RTAVFSRESERVEDINTERCEPT__SHIFT 0x10
+#define RTAVFS_REG30__RTAVFSZONE4INTERCEPT_MASK 0x0000FFFFL
+#define RTAVFS_REG30__RTAVFSRESERVEDINTERCEPT_MASK 0xFFFF0000L
+//RTAVFS_REG31
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV0__SHIFT 0x0
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV1__SHIFT 0x2
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV2__SHIFT 0x4
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV3__SHIFT 0x6
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV4__SHIFT 0x8
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV5__SHIFT 0xa
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV6__SHIFT 0xc
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV7__SHIFT 0xe
+#define RTAVFS_REG31__RESERVED__SHIFT 0x10
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV0_MASK 0x00000003L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV1_MASK 0x0000000CL
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV2_MASK 0x00000030L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV3_MASK 0x000000C0L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV4_MASK 0x00000300L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV5_MASK 0x00000C00L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV6_MASK 0x00003000L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV7_MASK 0x0000C000L
+#define RTAVFS_REG31__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG32
+#define RTAVFS_REG32__RTAVFSFSMSTARTUPCNT__SHIFT 0x0
+#define RTAVFS_REG32__RESERVED__SHIFT 0x10
+#define RTAVFS_REG32__RTAVFSFSMSTARTUPCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG32__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG33
+#define RTAVFS_REG33__RTAVFSFSMIDLECNT__SHIFT 0x0
+#define RTAVFS_REG33__RESERVED__SHIFT 0x10
+#define RTAVFS_REG33__RTAVFSFSMIDLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG33__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG34
+#define RTAVFS_REG34__RTAVFSFSMRESETCPORIPPLECOUNTERSCNT__SHIFT 0x0
+#define RTAVFS_REG34__RESERVED__SHIFT 0x10
+#define RTAVFS_REG34__RTAVFSFSMRESETCPORIPPLECOUNTERSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG34__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG35
+#define RTAVFS_REG35__RTAVFSFSMSTARTCPOSCNT__SHIFT 0x0
+#define RTAVFS_REG35__RESERVED__SHIFT 0x10
+#define RTAVFS_REG35__RTAVFSFSMSTARTCPOSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG35__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG36
+#define RTAVFS_REG36__RTAVFSFSMSTARTRIPPLECOUNTERSCNT__SHIFT 0x0
+#define RTAVFS_REG36__RESERVED__SHIFT 0x10
+#define RTAVFS_REG36__RTAVFSFSMSTARTRIPPLECOUNTERSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG36__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG37
+#define RTAVFS_REG37__RTAVFSFSMRIPPLECOUNTERSDONECNT__SHIFT 0x0
+#define RTAVFS_REG37__RESERVED__SHIFT 0x10
+#define RTAVFS_REG37__RTAVFSFSMRIPPLECOUNTERSDONECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG37__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG38
+#define RTAVFS_REG38__RTAVFSFSMCPOFINALRESULTREADYCNT__SHIFT 0x0
+#define RTAVFS_REG38__RESERVED__SHIFT 0x10
+#define RTAVFS_REG38__RTAVFSFSMCPOFINALRESULTREADYCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG38__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG39
+#define RTAVFS_REG39__RTAVFSFSMVOLTCODEREADYCNT__SHIFT 0x0
+#define RTAVFS_REG39__RESERVED__SHIFT 0x10
+#define RTAVFS_REG39__RTAVFSFSMVOLTCODEREADYCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG39__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG40
+#define RTAVFS_REG40__RTAVFSFSMTARGETVOLTAGEREADYCNT__SHIFT 0x0
+#define RTAVFS_REG40__RESERVED__SHIFT 0x10
+#define RTAVFS_REG40__RTAVFSFSMTARGETVOLTAGEREADYCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG40__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG41
+#define RTAVFS_REG41__RTAVFSFSMSTOPCPOSCNT__SHIFT 0x0
+#define RTAVFS_REG41__RESERVED__SHIFT 0x10
+#define RTAVFS_REG41__RTAVFSFSMSTOPCPOSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG41__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG42
+#define RTAVFS_REG42__RTAVFSFSMWAITFORACKCNT__SHIFT 0x0
+#define RTAVFS_REG42__RESERVED__SHIFT 0x10
+#define RTAVFS_REG42__RTAVFSFSMWAITFORACKCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG42__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG43
+#define RTAVFS_REG43__RTAVFSKP0__SHIFT 0x0
+#define RTAVFS_REG43__RTAVFSKP1__SHIFT 0x4
+#define RTAVFS_REG43__RTAVFSKP2__SHIFT 0x8
+#define RTAVFS_REG43__RTAVFSKP3__SHIFT 0xc
+#define RTAVFS_REG43__RTAVFSKI0__SHIFT 0x10
+#define RTAVFS_REG43__RTAVFSKI1__SHIFT 0x14
+#define RTAVFS_REG43__RTAVFSKI2__SHIFT 0x18
+#define RTAVFS_REG43__RTAVFSKI3__SHIFT 0x1c
+#define RTAVFS_REG43__RTAVFSKP0_MASK 0x0000000FL
+#define RTAVFS_REG43__RTAVFSKP1_MASK 0x000000F0L
+#define RTAVFS_REG43__RTAVFSKP2_MASK 0x00000F00L
+#define RTAVFS_REG43__RTAVFSKP3_MASK 0x0000F000L
+#define RTAVFS_REG43__RTAVFSKI0_MASK 0x000F0000L
+#define RTAVFS_REG43__RTAVFSKI1_MASK 0x00F00000L
+#define RTAVFS_REG43__RTAVFSKI2_MASK 0x0F000000L
+#define RTAVFS_REG43__RTAVFSKI3_MASK 0xF0000000L
+//RTAVFS_REG44
+#define RTAVFS_REG44__RTAVFSV1__SHIFT 0x0
+#define RTAVFS_REG44__RTAVFSV2__SHIFT 0xa
+#define RTAVFS_REG44__RTAVFSV3__SHIFT 0x14
+#define RTAVFS_REG44__RTAVFSUSEBINARYSEARCH__SHIFT 0x1e
+#define RTAVFS_REG44__RTAVFSVOLTCODEHWCAL__SHIFT 0x1f
+#define RTAVFS_REG44__RTAVFSV1_MASK 0x000003FFL
+#define RTAVFS_REG44__RTAVFSV2_MASK 0x000FFC00L
+#define RTAVFS_REG44__RTAVFSV3_MASK 0x3FF00000L
+#define RTAVFS_REG44__RTAVFSUSEBINARYSEARCH_MASK 0x40000000L
+#define RTAVFS_REG44__RTAVFSVOLTCODEHWCAL_MASK 0x80000000L
+//RTAVFS_REG45
+#define RTAVFS_REG45__RTAVFSVRBLEEDCNTRL__SHIFT 0x0
+#define RTAVFS_REG45__RTAVFSVRENABLE__SHIFT 0x1
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDE__SHIFT 0x2
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDESEL__SHIFT 0xc
+#define RTAVFS_REG45__RTAVFSLOWPWREN__SHIFT 0xd
+#define RTAVFS_REG45__RTAVFSUREGENABLE__SHIFT 0xe
+#define RTAVFS_REG45__RTAVFSBGENABLE__SHIFT 0xf
+#define RTAVFS_REG45__RTAVFSENABLEVDDRETSENSING__SHIFT 0x10
+#define RTAVFS_REG45__RESERVED__SHIFT 0x11
+#define RTAVFS_REG45__RTAVFSVRBLEEDCNTRL_MASK 0x00000001L
+#define RTAVFS_REG45__RTAVFSVRENABLE_MASK 0x00000002L
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDE_MASK 0x00000FFCL
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDESEL_MASK 0x00001000L
+#define RTAVFS_REG45__RTAVFSLOWPWREN_MASK 0x00002000L
+#define RTAVFS_REG45__RTAVFSUREGENABLE_MASK 0x00004000L
+#define RTAVFS_REG45__RTAVFSBGENABLE_MASK 0x00008000L
+#define RTAVFS_REG45__RTAVFSENABLEVDDRETSENSING_MASK 0x00010000L
+#define RTAVFS_REG45__RESERVED_MASK 0xFFFE0000L
+//RTAVFS_REG46
+#define RTAVFS_REG46__RTAVFSKP__SHIFT 0x0
+#define RTAVFS_REG46__RTAVFSKI__SHIFT 0x4
+#define RTAVFS_REG46__RTAVFSPIENABLEANTIWINDUP__SHIFT 0x8
+#define RTAVFS_REG46__RTAVFSPISHIFT__SHIFT 0x9
+#define RTAVFS_REG46__RTAVFSPIERREN__SHIFT 0xd
+#define RTAVFS_REG46__RTAVFSPISHIFTOUT__SHIFT 0xe
+#define RTAVFS_REG46__RTAVFSUSELUTKPKI__SHIFT 0x12
+#define RTAVFS_REG46__RESERVED__SHIFT 0x13
+#define RTAVFS_REG46__RTAVFSKP_MASK 0x0000000FL
+#define RTAVFS_REG46__RTAVFSKI_MASK 0x000000F0L
+#define RTAVFS_REG46__RTAVFSPIENABLEANTIWINDUP_MASK 0x00000100L
+#define RTAVFS_REG46__RTAVFSPISHIFT_MASK 0x00001E00L
+#define RTAVFS_REG46__RTAVFSPIERREN_MASK 0x00002000L
+#define RTAVFS_REG46__RTAVFSPISHIFTOUT_MASK 0x0003C000L
+#define RTAVFS_REG46__RTAVFSUSELUTKPKI_MASK 0x00040000L
+#define RTAVFS_REG46__RESERVED_MASK 0xFFF80000L
+//RTAVFS_REG47
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMIN__SHIFT 0x0
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMAX__SHIFT 0xa
+#define RTAVFS_REG47__RTAVFSPIERRMASK__SHIFT 0x14
+#define RTAVFS_REG47__RTAVFSFORCEDISABLEPI__SHIFT 0x1b
+#define RTAVFS_REG47__RESERVED__SHIFT 0x1c
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMIN_MASK 0x000003FFL
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMAX_MASK 0x000FFC00L
+#define RTAVFS_REG47__RTAVFSPIERRMASK_MASK 0x07F00000L
+#define RTAVFS_REG47__RTAVFSFORCEDISABLEPI_MASK 0x08000000L
+#define RTAVFS_REG47__RESERVED_MASK 0xF0000000L
+//RTAVFS_REG48
+#define RTAVFS_REG48__RTAVFSPILOOPNITERATIONS__SHIFT 0x0
+#define RTAVFS_REG48__RTAVFSPIERRTHRESHOLD__SHIFT 0x10
+#define RTAVFS_REG48__RTAVFSPILOOPNITERATIONS_MASK 0x0000FFFFL
+#define RTAVFS_REG48__RTAVFSPIERRTHRESHOLD_MASK 0xFFFF0000L
+//RTAVFS_REG49
+#define RTAVFS_REG49__RTAVFSPSMRSTAVGVDD__SHIFT 0x0
+#define RTAVFS_REG49__RTAVFSPSMMEASMAXVDD__SHIFT 0x1
+#define RTAVFS_REG49__RTAVFSPSMCLKDIVVDD__SHIFT 0x2
+#define RTAVFS_REG49__RTAVFSPSMAVGDIVVDD__SHIFT 0x4
+#define RTAVFS_REG49__RTAVFSPSMOSCENVDD__SHIFT 0xa
+#define RTAVFS_REG49__RTAVFSPSMAVGENVDD__SHIFT 0xb
+#define RTAVFS_REG49__RTAVFSPSMRSTMINMAXVDD__SHIFT 0xc
+#define RTAVFS_REG49__RESERVED__SHIFT 0xd
+#define RTAVFS_REG49__RTAVFSPSMRSTAVGVDD_MASK 0x00000001L
+#define RTAVFS_REG49__RTAVFSPSMMEASMAXVDD_MASK 0x00000002L
+#define RTAVFS_REG49__RTAVFSPSMCLKDIVVDD_MASK 0x0000000CL
+#define RTAVFS_REG49__RTAVFSPSMAVGDIVVDD_MASK 0x000003F0L
+#define RTAVFS_REG49__RTAVFSPSMOSCENVDD_MASK 0x00000400L
+#define RTAVFS_REG49__RTAVFSPSMAVGENVDD_MASK 0x00000800L
+#define RTAVFS_REG49__RTAVFSPSMRSTMINMAXVDD_MASK 0x00001000L
+#define RTAVFS_REG49__RESERVED_MASK 0xFFFFE000L
+//RTAVFS_REG50
+#define RTAVFS_REG50__RTAVFSPSMRSTAVGVREG__SHIFT 0x0
+#define RTAVFS_REG50__RTAVFSPSMMEASMAXVREG__SHIFT 0x1
+#define RTAVFS_REG50__RTAVFSPSMCLKDIVVREG__SHIFT 0x2
+#define RTAVFS_REG50__RTAVFSPSMAVGDIVVREG__SHIFT 0x4
+#define RTAVFS_REG50__RTAVFSPSMOSCENVREG__SHIFT 0xa
+#define RTAVFS_REG50__RTAVFSPSMAVGENVREG__SHIFT 0xb
+#define RTAVFS_REG50__RTAVFSPSMRSTMINMAXVREG__SHIFT 0xc
+#define RTAVFS_REG50__RESERVED__SHIFT 0xd
+#define RTAVFS_REG50__RTAVFSPSMRSTAVGVREG_MASK 0x00000001L
+#define RTAVFS_REG50__RTAVFSPSMMEASMAXVREG_MASK 0x00000002L
+#define RTAVFS_REG50__RTAVFSPSMCLKDIVVREG_MASK 0x0000000CL
+#define RTAVFS_REG50__RTAVFSPSMAVGDIVVREG_MASK 0x000003F0L
+#define RTAVFS_REG50__RTAVFSPSMOSCENVREG_MASK 0x00000400L
+#define RTAVFS_REG50__RTAVFSPSMAVGENVREG_MASK 0x00000800L
+#define RTAVFS_REG50__RTAVFSPSMRSTMINMAXVREG_MASK 0x00001000L
+#define RTAVFS_REG50__RESERVED_MASK 0xFFFFE000L
+//RTAVFS_REG51
+#define RTAVFS_REG51__RTAVFSAVFSENABLE__SHIFT 0x0
+#define RTAVFS_REG51__RTAVFSCPOTURNONDELAY__SHIFT 0x1
+#define RTAVFS_REG51__RTAVFSSELECTMINMAX__SHIFT 0x5
+#define RTAVFS_REG51__RTAVFSSELECTPERPATHSCALING__SHIFT 0x6
+#define RTAVFS_REG51__RTAVFSADDVOLTCODEGUARDBAND__SHIFT 0x7
+#define RTAVFS_REG51__RTAVFSSENDAVGPSMTOPSMOUT__SHIFT 0x8
+#define RTAVFS_REG51__RTAVFSUPDATEANCHORVOLTAGES__SHIFT 0x9
+#define RTAVFS_REG51__RTAVFSSENDVDDTOPSMOUT__SHIFT 0xa
+#define RTAVFS_REG51__RESERVED__SHIFT 0xb
+#define RTAVFS_REG51__RTAVFSAVFSENABLE_MASK 0x00000001L
+#define RTAVFS_REG51__RTAVFSCPOTURNONDELAY_MASK 0x0000001EL
+#define RTAVFS_REG51__RTAVFSSELECTMINMAX_MASK 0x00000020L
+#define RTAVFS_REG51__RTAVFSSELECTPERPATHSCALING_MASK 0x00000040L
+#define RTAVFS_REG51__RTAVFSADDVOLTCODEGUARDBAND_MASK 0x00000080L
+#define RTAVFS_REG51__RTAVFSSENDAVGPSMTOPSMOUT_MASK 0x00000100L
+#define RTAVFS_REG51__RTAVFSUPDATEANCHORVOLTAGES_MASK 0x00000200L
+#define RTAVFS_REG51__RTAVFSSENDVDDTOPSMOUT_MASK 0x00000400L
+#define RTAVFS_REG51__RESERVED_MASK 0xFFFFF800L
+//RTAVFS_REG52
+#define RTAVFS_REG52__RTAVFSMINMAXPSMVDD__SHIFT 0x0
+#define RTAVFS_REG52__RTAVFSAVGPSMVDD__SHIFT 0xe
+#define RTAVFS_REG52__RESERVED__SHIFT 0x1c
+#define RTAVFS_REG52__RTAVFSMINMAXPSMVDD_MASK 0x00003FFFL
+#define RTAVFS_REG52__RTAVFSAVGPSMVDD_MASK 0x0FFFC000L
+#define RTAVFS_REG52__RESERVED_MASK 0xF0000000L
+//RTAVFS_REG53
+#define RTAVFS_REG53__RTAVFSMINMAXPSMVREG__SHIFT 0x0
+#define RTAVFS_REG53__RTAVFSAVGPSMVREG__SHIFT 0xe
+#define RTAVFS_REG53__RESERVED__SHIFT 0x1c
+#define RTAVFS_REG53__RTAVFSMINMAXPSMVREG_MASK 0x00003FFFL
+#define RTAVFS_REG53__RTAVFSAVGPSMVREG_MASK 0x0FFFC000L
+#define RTAVFS_REG53__RESERVED_MASK 0xF0000000L
+//RTAVFS_REG54
+#define RTAVFS_REG54__RTAVFSCPO0_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG54__RTAVFSCPO0_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG54__RTAVFSCPO0_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG54__RTAVFSCPO0_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG55
+#define RTAVFS_REG55__RTAVFSCPO1_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG55__RTAVFSCPO1_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG55__RTAVFSCPO1_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG55__RTAVFSCPO1_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG56
+#define RTAVFS_REG56__RTAVFSCPO2_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG56__RTAVFSCPO2_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG56__RTAVFSCPO2_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG56__RTAVFSCPO2_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG57
+#define RTAVFS_REG57__RTAVFSCPO3_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG57__RTAVFSCPO3_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG57__RTAVFSCPO3_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG57__RTAVFSCPO3_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG58
+#define RTAVFS_REG58__RTAVFSCPO4_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG58__RTAVFSCPO4_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG58__RTAVFSCPO4_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG58__RTAVFSCPO4_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG59
+#define RTAVFS_REG59__RTAVFSCPO5_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG59__RTAVFSCPO5_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG59__RTAVFSCPO5_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG59__RTAVFSCPO5_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG60
+#define RTAVFS_REG60__RTAVFSCPO6_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG60__RTAVFSCPO6_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG60__RTAVFSCPO6_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG60__RTAVFSCPO6_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG61
+#define RTAVFS_REG61__RTAVFSCPO7_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG61__RTAVFSCPO7_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG61__RTAVFSCPO7_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG61__RTAVFSCPO7_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG62
+#define RTAVFS_REG62__RTAVFSCPO8_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG62__RTAVFSCPO8_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG62__RTAVFSCPO8_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG62__RTAVFSCPO8_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG63
+#define RTAVFS_REG63__RTAVFSCPO9_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG63__RTAVFSCPO9_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG63__RTAVFSCPO9_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG63__RTAVFSCPO9_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG64
+#define RTAVFS_REG64__RTAVFSCPO10_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG64__RTAVFSCPO10_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG64__RTAVFSCPO10_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG64__RTAVFSCPO10_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG65
+#define RTAVFS_REG65__RTAVFSCPO11_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG65__RTAVFSCPO11_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG65__RTAVFSCPO11_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG65__RTAVFSCPO11_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG66
+#define RTAVFS_REG66__RTAVFSCPO12_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG66__RTAVFSCPO12_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG66__RTAVFSCPO12_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG66__RTAVFSCPO12_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG67
+#define RTAVFS_REG67__RTAVFSCPO13_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG67__RTAVFSCPO13_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG67__RTAVFSCPO13_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG67__RTAVFSCPO13_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG68
+#define RTAVFS_REG68__RTAVFSCPO14_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG68__RTAVFSCPO14_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG68__RTAVFSCPO14_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG68__RTAVFSCPO14_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG69
+#define RTAVFS_REG69__RTAVFSCPO15_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG69__RTAVFSCPO15_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG69__RTAVFSCPO15_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG69__RTAVFSCPO15_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG70
+#define RTAVFS_REG70__RTAVFSCPO16_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG70__RTAVFSCPO16_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG70__RTAVFSCPO16_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG70__RTAVFSCPO16_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG71
+#define RTAVFS_REG71__RTAVFSCPO17_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG71__RTAVFSCPO17_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG71__RTAVFSCPO17_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG71__RTAVFSCPO17_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG72
+#define RTAVFS_REG72__RTAVFSCPO18_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG72__RTAVFSCPO18_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG72__RTAVFSCPO18_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG72__RTAVFSCPO18_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG73
+#define RTAVFS_REG73__RTAVFSCPO19_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG73__RTAVFSCPO19_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG73__RTAVFSCPO19_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG73__RTAVFSCPO19_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG74
+#define RTAVFS_REG74__RTAVFSCPO20_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG74__RTAVFSCPO20_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG74__RTAVFSCPO20_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG74__RTAVFSCPO20_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG75
+#define RTAVFS_REG75__RTAVFSCPO21_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG75__RTAVFSCPO21_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG75__RTAVFSCPO21_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG75__RTAVFSCPO21_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG76
+#define RTAVFS_REG76__RTAVFSCPO22_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG76__RTAVFSCPO22_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG76__RTAVFSCPO22_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG76__RTAVFSCPO22_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG77
+#define RTAVFS_REG77__RTAVFSCPO23_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG77__RTAVFSCPO23_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG77__RTAVFSCPO23_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG77__RTAVFSCPO23_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG78
+#define RTAVFS_REG78__RTAVFSCPO24_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG78__RTAVFSCPO24_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG78__RTAVFSCPO24_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG78__RTAVFSCPO24_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG79
+#define RTAVFS_REG79__RTAVFSCPO25_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG79__RTAVFSCPO25_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG79__RTAVFSCPO25_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG79__RTAVFSCPO25_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG80
+#define RTAVFS_REG80__RTAVFSCPO26_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG80__RTAVFSCPO26_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG80__RTAVFSCPO26_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG80__RTAVFSCPO26_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG81
+#define RTAVFS_REG81__RTAVFSCPO27_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG81__RTAVFSCPO27_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG81__RTAVFSCPO27_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG81__RTAVFSCPO27_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG82
+#define RTAVFS_REG82__RTAVFSCPO28_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG82__RTAVFSCPO28_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG82__RTAVFSCPO28_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG82__RTAVFSCPO28_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG83
+#define RTAVFS_REG83__RTAVFSCPO29_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG83__RTAVFSCPO29_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG83__RTAVFSCPO29_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG83__RTAVFSCPO29_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG84
+#define RTAVFS_REG84__RTAVFSCPO30_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG84__RTAVFSCPO30_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG84__RTAVFSCPO30_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG84__RTAVFSCPO30_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG85
+#define RTAVFS_REG85__RTAVFSCPO31_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG85__RTAVFSCPO31_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG85__RTAVFSCPO31_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG85__RTAVFSCPO31_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG86
+#define RTAVFS_REG86__RTAVFSCPO32_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG86__RTAVFSCPO32_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG86__RTAVFSCPO32_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG86__RTAVFSCPO32_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG87
+#define RTAVFS_REG87__RTAVFSCPO33_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG87__RTAVFSCPO33_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG87__RTAVFSCPO33_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG87__RTAVFSCPO33_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG88
+#define RTAVFS_REG88__RTAVFSCPO34_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG88__RTAVFSCPO34_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG88__RTAVFSCPO34_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG88__RTAVFSCPO34_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG89
+#define RTAVFS_REG89__RTAVFSCPO35_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG89__RTAVFSCPO35_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG89__RTAVFSCPO35_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG89__RTAVFSCPO35_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG90
+#define RTAVFS_REG90__RTAVFSCPO36_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG90__RTAVFSCPO36_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG90__RTAVFSCPO36_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG90__RTAVFSCPO36_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG91
+#define RTAVFS_REG91__RTAVFSCPO37_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG91__RTAVFSCPO37_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG91__RTAVFSCPO37_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG91__RTAVFSCPO37_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG92
+#define RTAVFS_REG92__RTAVFSCPO38_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG92__RTAVFSCPO38_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG92__RTAVFSCPO38_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG92__RTAVFSCPO38_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG93
+#define RTAVFS_REG93__RTAVFSCPO39_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG93__RTAVFSCPO39_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG93__RTAVFSCPO39_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG93__RTAVFSCPO39_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG94
+#define RTAVFS_REG94__RTAVFSCPO40_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG94__RTAVFSCPO40_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG94__RTAVFSCPO40_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG94__RTAVFSCPO40_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG95
+#define RTAVFS_REG95__RTAVFSCPO41_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG95__RTAVFSCPO41_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG95__RTAVFSCPO41_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG95__RTAVFSCPO41_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG96
+#define RTAVFS_REG96__RTAVFSCPO42_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG96__RTAVFSCPO42_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG96__RTAVFSCPO42_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG96__RTAVFSCPO42_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG97
+#define RTAVFS_REG97__RTAVFSCPO43_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG97__RTAVFSCPO43_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG97__RTAVFSCPO43_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG97__RTAVFSCPO43_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG98
+#define RTAVFS_REG98__RTAVFSCPO44_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG98__RTAVFSCPO44_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG98__RTAVFSCPO44_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG98__RTAVFSCPO44_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG99
+#define RTAVFS_REG99__RTAVFSCPO45_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG99__RTAVFSCPO45_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG99__RTAVFSCPO45_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG99__RTAVFSCPO45_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG100
+#define RTAVFS_REG100__RTAVFSCPO46_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG100__RTAVFSCPO46_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG100__RTAVFSCPO46_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG100__RTAVFSCPO46_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG101
+#define RTAVFS_REG101__RTAVFSCPO47_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG101__RTAVFSCPO47_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG101__RTAVFSCPO47_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG101__RTAVFSCPO47_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG102
+#define RTAVFS_REG102__RTAVFSCPO48_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG102__RTAVFSCPO48_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG102__RTAVFSCPO48_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG102__RTAVFSCPO48_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG103
+#define RTAVFS_REG103__RTAVFSCPO49_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG103__RTAVFSCPO49_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG103__RTAVFSCPO49_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG103__RTAVFSCPO49_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG104
+#define RTAVFS_REG104__RTAVFSCPO50_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG104__RTAVFSCPO50_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG104__RTAVFSCPO50_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG104__RTAVFSCPO50_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG105
+#define RTAVFS_REG105__RTAVFSCPO51_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG105__RTAVFSCPO51_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG105__RTAVFSCPO51_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG105__RTAVFSCPO51_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG106
+#define RTAVFS_REG106__RTAVFSCPO52_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG106__RTAVFSCPO52_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG106__RTAVFSCPO52_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG106__RTAVFSCPO52_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG107
+#define RTAVFS_REG107__RTAVFSCPO53_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG107__RTAVFSCPO53_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG107__RTAVFSCPO53_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG107__RTAVFSCPO53_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG108
+#define RTAVFS_REG108__RTAVFSCPO54_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG108__RTAVFSCPO54_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG108__RTAVFSCPO54_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG108__RTAVFSCPO54_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG109
+#define RTAVFS_REG109__RTAVFSCPO55_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG109__RTAVFSCPO55_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG109__RTAVFSCPO55_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG109__RTAVFSCPO55_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG110
+#define RTAVFS_REG110__RTAVFSCPO56_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG110__RTAVFSCPO56_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG110__RTAVFSCPO56_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG110__RTAVFSCPO56_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG111
+#define RTAVFS_REG111__RTAVFSCPO57_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG111__RTAVFSCPO57_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG111__RTAVFSCPO57_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG111__RTAVFSCPO57_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG112
+#define RTAVFS_REG112__RTAVFSCPO58_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG112__RTAVFSCPO58_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG112__RTAVFSCPO58_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG112__RTAVFSCPO58_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG113
+#define RTAVFS_REG113__RTAVFSCPO59_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG113__RTAVFSCPO59_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG113__RTAVFSCPO59_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG113__RTAVFSCPO59_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG114
+#define RTAVFS_REG114__RTAVFSCPO60_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG114__RTAVFSCPO60_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG114__RTAVFSCPO60_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG114__RTAVFSCPO60_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG115
+#define RTAVFS_REG115__RTAVFSCPO61_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG115__RTAVFSCPO61_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG115__RTAVFSCPO61_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG115__RTAVFSCPO61_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG116
+#define RTAVFS_REG116__RTAVFSCPO62_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG116__RTAVFSCPO62_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG116__RTAVFSCPO62_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG116__RTAVFSCPO62_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG117
+#define RTAVFS_REG117__RTAVFSCPO63_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG117__RTAVFSCPO63_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG117__RTAVFSCPO63_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG117__RTAVFSCPO63_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG118
+#define RTAVFS_REG118__RTAVFSCPOEN0__SHIFT 0x0
+#define RTAVFS_REG118__RTAVFSCPOEN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG119
+#define RTAVFS_REG119__RTAVFSCPOEN1__SHIFT 0x0
+#define RTAVFS_REG119__RTAVFSCPOEN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG120
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG120__RTAVFSCPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG120__RESERVED__SHIFT 0x12
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG120__RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG121
+#define RTAVFS_REG121__RTAVFSZONE0INUSE__SHIFT 0x0
+#define RTAVFS_REG121__RTAVFSZONE1INUSE__SHIFT 0x1
+#define RTAVFS_REG121__RTAVFSZONE2INUSE__SHIFT 0x2
+#define RTAVFS_REG121__RTAVFSZONE3INUSE__SHIFT 0x3
+#define RTAVFS_REG121__RTAVFSZONE4INUSE__SHIFT 0x4
+#define RTAVFS_REG121__RTAVFSRESERVED__SHIFT 0x5
+#define RTAVFS_REG121__RTAVFSERRORCODE__SHIFT 0x1c
+#define RTAVFS_REG121__RTAVFSZONE0INUSE_MASK 0x00000001L
+#define RTAVFS_REG121__RTAVFSZONE1INUSE_MASK 0x00000002L
+#define RTAVFS_REG121__RTAVFSZONE2INUSE_MASK 0x00000004L
+#define RTAVFS_REG121__RTAVFSZONE3INUSE_MASK 0x00000008L
+#define RTAVFS_REG121__RTAVFSZONE4INUSE_MASK 0x00000010L
+#define RTAVFS_REG121__RTAVFSRESERVED_MASK 0x0FFFFFE0L
+#define RTAVFS_REG121__RTAVFSERRORCODE_MASK 0xF0000000L
+//RTAVFS_REG122
+#define RTAVFS_REG122__RTAVFSCPO0_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG122__RESERVED__SHIFT 0x10
+#define RTAVFS_REG122__RTAVFSCPO0_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG122__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG123
+#define RTAVFS_REG123__RTAVFSCPO1_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG123__RESERVED__SHIFT 0x10
+#define RTAVFS_REG123__RTAVFSCPO1_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG123__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG124
+#define RTAVFS_REG124__RTAVFSCPO2_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG124__RESERVED__SHIFT 0x10
+#define RTAVFS_REG124__RTAVFSCPO2_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG124__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG125
+#define RTAVFS_REG125__RTAVFSCPO3_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG125__RESERVED__SHIFT 0x10
+#define RTAVFS_REG125__RTAVFSCPO3_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG125__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG126
+#define RTAVFS_REG126__RTAVFSCPO4_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG126__RESERVED__SHIFT 0x10
+#define RTAVFS_REG126__RTAVFSCPO4_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG126__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG127
+#define RTAVFS_REG127__RTAVFSCPO5_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG127__RESERVED__SHIFT 0x10
+#define RTAVFS_REG127__RTAVFSCPO5_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG127__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG128
+#define RTAVFS_REG128__RTAVFSCPO6_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG128__RESERVED__SHIFT 0x10
+#define RTAVFS_REG128__RTAVFSCPO6_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG128__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG129
+#define RTAVFS_REG129__RTAVFSCPO7_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG129__RESERVED__SHIFT 0x10
+#define RTAVFS_REG129__RTAVFSCPO7_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG129__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG130
+#define RTAVFS_REG130__RTAVFSCPO8_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG130__RESERVED__SHIFT 0x10
+#define RTAVFS_REG130__RTAVFSCPO8_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG130__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG131
+#define RTAVFS_REG131__RTAVFSCPO9_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG131__RESERVED__SHIFT 0x10
+#define RTAVFS_REG131__RTAVFSCPO9_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG131__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG132
+#define RTAVFS_REG132__RTAVFSCPO10_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG132__RESERVED__SHIFT 0x10
+#define RTAVFS_REG132__RTAVFSCPO10_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG132__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG133
+#define RTAVFS_REG133__RTAVFSCPO11_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG133__RESERVED__SHIFT 0x10
+#define RTAVFS_REG133__RTAVFSCPO11_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG133__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG134
+#define RTAVFS_REG134__RTAVFSCPO12_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG134__RESERVED__SHIFT 0x10
+#define RTAVFS_REG134__RTAVFSCPO12_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG134__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG135
+#define RTAVFS_REG135__RTAVFSCPO13_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG135__RESERVED__SHIFT 0x10
+#define RTAVFS_REG135__RTAVFSCPO13_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG135__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG136
+#define RTAVFS_REG136__RTAVFSCPO14_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG136__RESERVED__SHIFT 0x10
+#define RTAVFS_REG136__RTAVFSCPO14_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG136__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG137
+#define RTAVFS_REG137__RTAVFSCPO15_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG137__RESERVED__SHIFT 0x10
+#define RTAVFS_REG137__RTAVFSCPO15_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG137__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG138
+#define RTAVFS_REG138__RTAVFSCPO16_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG138__RESERVED__SHIFT 0x10
+#define RTAVFS_REG138__RTAVFSCPO16_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG138__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG139
+#define RTAVFS_REG139__RTAVFSCPO17_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG139__RESERVED__SHIFT 0x10
+#define RTAVFS_REG139__RTAVFSCPO17_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG139__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG140
+#define RTAVFS_REG140__RTAVFSCPO18_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG140__RESERVED__SHIFT 0x10
+#define RTAVFS_REG140__RTAVFSCPO18_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG140__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG141
+#define RTAVFS_REG141__RTAVFSCPO19_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG141__RESERVED__SHIFT 0x10
+#define RTAVFS_REG141__RTAVFSCPO19_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG141__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG142
+#define RTAVFS_REG142__RTAVFSCPO20_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG142__RESERVED__SHIFT 0x10
+#define RTAVFS_REG142__RTAVFSCPO20_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG142__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG143
+#define RTAVFS_REG143__RTAVFSCPO21_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG143__RESERVED__SHIFT 0x10
+#define RTAVFS_REG143__RTAVFSCPO21_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG143__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG144
+#define RTAVFS_REG144__RTAVFSCPO22_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG144__RESERVED__SHIFT 0x10
+#define RTAVFS_REG144__RTAVFSCPO22_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG144__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG145
+#define RTAVFS_REG145__RTAVFSCPO23_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG145__RESERVED__SHIFT 0x10
+#define RTAVFS_REG145__RTAVFSCPO23_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG145__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG146
+#define RTAVFS_REG146__RTAVFSCPO24_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG146__RESERVED__SHIFT 0x10
+#define RTAVFS_REG146__RTAVFSCPO24_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG146__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG147
+#define RTAVFS_REG147__RTAVFSCPO25_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG147__RESERVED__SHIFT 0x10
+#define RTAVFS_REG147__RTAVFSCPO25_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG147__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG148
+#define RTAVFS_REG148__RTAVFSCPO26_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG148__RESERVED__SHIFT 0x10
+#define RTAVFS_REG148__RTAVFSCPO26_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG148__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG149
+#define RTAVFS_REG149__RTAVFSCPO27_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG149__RESERVED__SHIFT 0x10
+#define RTAVFS_REG149__RTAVFSCPO27_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG149__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG150
+#define RTAVFS_REG150__RTAVFSCPO28_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG150__RESERVED__SHIFT 0x10
+#define RTAVFS_REG150__RTAVFSCPO28_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG150__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG151
+#define RTAVFS_REG151__RTAVFSCPO29_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG151__RESERVED__SHIFT 0x10
+#define RTAVFS_REG151__RTAVFSCPO29_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG151__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG152
+#define RTAVFS_REG152__RTAVFSCPO30_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG152__RESERVED__SHIFT 0x10
+#define RTAVFS_REG152__RTAVFSCPO30_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG152__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG153
+#define RTAVFS_REG153__RTAVFSCPO31_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG153__RESERVED__SHIFT 0x10
+#define RTAVFS_REG153__RTAVFSCPO31_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG153__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG154
+#define RTAVFS_REG154__RTAVFSCPO32_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG154__RESERVED__SHIFT 0x10
+#define RTAVFS_REG154__RTAVFSCPO32_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG154__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG155
+#define RTAVFS_REG155__RTAVFSCPO33_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG155__RESERVED__SHIFT 0x10
+#define RTAVFS_REG155__RTAVFSCPO33_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG155__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG156
+#define RTAVFS_REG156__RTAVFSCPO34_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG156__RESERVED__SHIFT 0x10
+#define RTAVFS_REG156__RTAVFSCPO34_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG156__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG157
+#define RTAVFS_REG157__RTAVFSCPO35_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG157__RESERVED__SHIFT 0x10
+#define RTAVFS_REG157__RTAVFSCPO35_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG157__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG158
+#define RTAVFS_REG158__RTAVFSCPO36_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG158__RESERVED__SHIFT 0x10
+#define RTAVFS_REG158__RTAVFSCPO36_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG158__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG159
+#define RTAVFS_REG159__RTAVFSCPO37_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG159__RESERVED__SHIFT 0x10
+#define RTAVFS_REG159__RTAVFSCPO37_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG159__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG160
+#define RTAVFS_REG160__RTAVFSCPO38_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG160__RESERVED__SHIFT 0x10
+#define RTAVFS_REG160__RTAVFSCPO38_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG160__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG161
+#define RTAVFS_REG161__RTAVFSCPO39_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG161__RESERVED__SHIFT 0x10
+#define RTAVFS_REG161__RTAVFSCPO39_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG161__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG162
+#define RTAVFS_REG162__RTAVFSCPO40_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG162__RESERVED__SHIFT 0x10
+#define RTAVFS_REG162__RTAVFSCPO40_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG162__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG163
+#define RTAVFS_REG163__RTAVFSCPO41_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG163__RESERVED__SHIFT 0x10
+#define RTAVFS_REG163__RTAVFSCPO41_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG163__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG164
+#define RTAVFS_REG164__RTAVFSCPO42_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG164__RESERVED__SHIFT 0x10
+#define RTAVFS_REG164__RTAVFSCPO42_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG164__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG165
+#define RTAVFS_REG165__RTAVFSCPO43_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG165__RESERVED__SHIFT 0x10
+#define RTAVFS_REG165__RTAVFSCPO43_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG165__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG166
+#define RTAVFS_REG166__RTAVFSCPO44_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG166__RESERVED__SHIFT 0x10
+#define RTAVFS_REG166__RTAVFSCPO44_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG166__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG167
+#define RTAVFS_REG167__RTAVFSCPO45_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG167__RESERVED__SHIFT 0x10
+#define RTAVFS_REG167__RTAVFSCPO45_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG167__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG168
+#define RTAVFS_REG168__RTAVFSCPO46_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG168__RESERVED__SHIFT 0x10
+#define RTAVFS_REG168__RTAVFSCPO46_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG168__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG169
+#define RTAVFS_REG169__RTAVFSCPO47_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG169__RESERVED__SHIFT 0x10
+#define RTAVFS_REG169__RTAVFSCPO47_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG169__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG170
+#define RTAVFS_REG170__RTAVFSCPO48_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG170__RESERVED__SHIFT 0x10
+#define RTAVFS_REG170__RTAVFSCPO48_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG170__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG171
+#define RTAVFS_REG171__RTAVFSCPO49_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG171__RESERVED__SHIFT 0x10
+#define RTAVFS_REG171__RTAVFSCPO49_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG171__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG172
+#define RTAVFS_REG172__RTAVFSCPO50_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG172__RESERVED__SHIFT 0x10
+#define RTAVFS_REG172__RTAVFSCPO50_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG172__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG173
+#define RTAVFS_REG173__RTAVFSCPO51_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG173__RESERVED__SHIFT 0x10
+#define RTAVFS_REG173__RTAVFSCPO51_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG173__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG174
+#define RTAVFS_REG174__RTAVFSCPO52_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG174__RESERVED__SHIFT 0x10
+#define RTAVFS_REG174__RTAVFSCPO52_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG174__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG175
+#define RTAVFS_REG175__RTAVFSCPO53_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG175__RESERVED__SHIFT 0x10
+#define RTAVFS_REG175__RTAVFSCPO53_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG175__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG176
+#define RTAVFS_REG176__RTAVFSCPO54_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG176__RESERVED__SHIFT 0x10
+#define RTAVFS_REG176__RTAVFSCPO54_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG176__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG177
+#define RTAVFS_REG177__RTAVFSCPO55_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG177__RESERVED__SHIFT 0x10
+#define RTAVFS_REG177__RTAVFSCPO55_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG177__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG178
+#define RTAVFS_REG178__RTAVFSCPO56_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG178__RESERVED__SHIFT 0x10
+#define RTAVFS_REG178__RTAVFSCPO56_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG178__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG179
+#define RTAVFS_REG179__RTAVFSCPO57_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG179__RESERVED__SHIFT 0x10
+#define RTAVFS_REG179__RTAVFSCPO57_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG179__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG180
+#define RTAVFS_REG180__RTAVFSCPO58_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG180__RESERVED__SHIFT 0x10
+#define RTAVFS_REG180__RTAVFSCPO58_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG180__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG181
+#define RTAVFS_REG181__RTAVFSCPO59_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG181__RESERVED__SHIFT 0x10
+#define RTAVFS_REG181__RTAVFSCPO59_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG181__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG182
+#define RTAVFS_REG182__RTAVFSCPO60_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG182__RESERVED__SHIFT 0x10
+#define RTAVFS_REG182__RTAVFSCPO60_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG182__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG183
+#define RTAVFS_REG183__RTAVFSCPO61_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG183__RESERVED__SHIFT 0x10
+#define RTAVFS_REG183__RTAVFSCPO61_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG183__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG184
+#define RTAVFS_REG184__RTAVFSCPO62_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG184__RESERVED__SHIFT 0x10
+#define RTAVFS_REG184__RTAVFSCPO62_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG184__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG185
+#define RTAVFS_REG185__RTAVFSCPO63_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG185__RESERVED__SHIFT 0x10
+#define RTAVFS_REG185__RTAVFSCPO63_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG185__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG186
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDE__SHIFT 0x0
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDESEL__SHIFT 0x10
+#define RTAVFS_REG186__RESERVED__SHIFT 0x11
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDE_MASK 0x0000FFFFL
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDESEL_MASK 0x00010000L
+#define RTAVFS_REG186__RESERVED_MASK 0xFFFE0000L
+//RTAVFS_REG187
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDE__SHIFT 0x0
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDESEL__SHIFT 0x10
+#define RTAVFS_REG187__RESERVED__SHIFT 0x11
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDE_MASK 0x0000FFFFL
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDESEL_MASK 0x00010000L
+#define RTAVFS_REG187__RESERVED_MASK 0xFFFE0000L
+//RTAVFS_REG189
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMPI__SHIFT 0x0
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMBINARYSEARCH__SHIFT 0xa
+#define RTAVFS_REG189__RTAVFSVDDREGON__SHIFT 0x14
+#define RTAVFS_REG189__RTAVFSVDDABOVEVDDRET__SHIFT 0x15
+#define RTAVFS_REG189__RESERVED__SHIFT 0x16
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMPI_MASK 0x000003FFL
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMBINARYSEARCH_MASK 0x000FFC00L
+#define RTAVFS_REG189__RTAVFSVDDREGON_MASK 0x00100000L
+#define RTAVFS_REG189__RTAVFSVDDABOVEVDDRET_MASK 0x00200000L
+#define RTAVFS_REG189__RESERVED_MASK 0xFFC00000L
+//RTAVFS_REG190
+#define RTAVFS_REG190__RTAVFSIGNORERLCREQ__SHIFT 0x0
+#define RTAVFS_REG190__RTAVFSRIPPLECOUNTEROUTSEL__SHIFT 0x1
+#define RTAVFS_REG190__RTAVFSRUNLOOP__SHIFT 0x6
+#define RTAVFS_REG190__RTAVFSSAVECPOWEIGHTS__SHIFT 0x7
+#define RTAVFS_REG190__RTAVFSRESTORECPOWEIGHTS__SHIFT 0x8
+#define RTAVFS_REG190__RTAVFSRESETRETENTIONREGS__SHIFT 0x9
+#define RTAVFS_REG190__RESERVED__SHIFT 0xa
+#define RTAVFS_REG190__RTAVFSIGNORERLCREQ_MASK 0x00000001L
+#define RTAVFS_REG190__RTAVFSRIPPLECOUNTEROUTSEL_MASK 0x0000003EL
+#define RTAVFS_REG190__RTAVFSRUNLOOP_MASK 0x00000040L
+#define RTAVFS_REG190__RTAVFSSAVECPOWEIGHTS_MASK 0x00000080L
+#define RTAVFS_REG190__RTAVFSRESTORECPOWEIGHTS_MASK 0x00000100L
+#define RTAVFS_REG190__RTAVFSRESETRETENTIONREGS_MASK 0x00000200L
+#define RTAVFS_REG190__RESERVED_MASK 0xFFFFFC00L
+//RTAVFS_REG191
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTUP__SHIFT 0x0
+#define RTAVFS_REG191__RTAVFSSTOPATIDLE__SHIFT 0x1
+#define RTAVFS_REG191__RTAVFSSTOPATRESETCPORIPPLECOUNTERS__SHIFT 0x2
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTCPOS__SHIFT 0x3
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTRIPPLECOUNTERS__SHIFT 0x4
+#define RTAVFS_REG191__RTAVFSSTOPATRIPPLECOUNTERSDONE__SHIFT 0x5
+#define RTAVFS_REG191__RTAVFSSTOPATCPOFINALRESULTREADY__SHIFT 0x6
+#define RTAVFS_REG191__RTAVFSSTOPATVOLTCODEREADY__SHIFT 0x7
+#define RTAVFS_REG191__RTAVFSSTOPATTARGETVOLATGEREADY__SHIFT 0x8
+#define RTAVFS_REG191__RTAVFSSTOPATSTOPCPOS__SHIFT 0x9
+#define RTAVFS_REG191__RTAVFSSTOPATWAITFORACK__SHIFT 0xa
+#define RTAVFS_REG191__RESERVED__SHIFT 0xb
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTUP_MASK 0x00000001L
+#define RTAVFS_REG191__RTAVFSSTOPATIDLE_MASK 0x00000002L
+#define RTAVFS_REG191__RTAVFSSTOPATRESETCPORIPPLECOUNTERS_MASK 0x00000004L
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTCPOS_MASK 0x00000008L
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTRIPPLECOUNTERS_MASK 0x00000010L
+#define RTAVFS_REG191__RTAVFSSTOPATRIPPLECOUNTERSDONE_MASK 0x00000020L
+#define RTAVFS_REG191__RTAVFSSTOPATCPOFINALRESULTREADY_MASK 0x00000040L
+#define RTAVFS_REG191__RTAVFSSTOPATVOLTCODEREADY_MASK 0x00000080L
+#define RTAVFS_REG191__RTAVFSSTOPATTARGETVOLATGEREADY_MASK 0x00000100L
+#define RTAVFS_REG191__RTAVFSSTOPATSTOPCPOS_MASK 0x00000200L
+#define RTAVFS_REG191__RTAVFSSTOPATWAITFORACK_MASK 0x00000400L
+#define RTAVFS_REG191__RESERVED_MASK 0xFFFFF800L
+//RTAVFS_REG192
+#define RTAVFS_REG192__RTAVFSAVFSSCALEDCPOCOUNT__SHIFT 0x0
+#define RTAVFS_REG192__RTAVFSAVFSFINALMINCPOCOUNT__SHIFT 0x10
+#define RTAVFS_REG192__RTAVFSAVFSSCALEDCPOCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG192__RTAVFSAVFSFINALMINCPOCOUNT_MASK 0xFFFF0000L
+//RTAVFS_REG193
+#define RTAVFS_REG193__RTAVFSFSMSTATE__SHIFT 0x0
+#define RTAVFS_REG193__RESERVED__SHIFT 0x10
+#define RTAVFS_REG193__RTAVFSFSMSTATE_MASK 0x0000FFFFL
+#define RTAVFS_REG193__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG194
+#define RTAVFS_REG194__RTAVFSRIPPLECNTREAD__SHIFT 0x0
+#define RTAVFS_REG194__RTAVFSRIPPLECNTREAD_MASK 0xFFFFFFFFL
+
+
+// addressBlock: sqind
+//SQ_DEBUG_STS_LOCAL
+#define SQ_DEBUG_STS_LOCAL__BUSY__SHIFT 0x0
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL__SHIFT 0x4
+#define SQ_DEBUG_STS_LOCAL__SQ_BUSY__SHIFT 0xc
+#define SQ_DEBUG_STS_LOCAL__IS_BUSY__SHIFT 0xd
+#define SQ_DEBUG_STS_LOCAL__IB_BUSY__SHIFT 0xe
+#define SQ_DEBUG_STS_LOCAL__ARB_BUSY__SHIFT 0xf
+#define SQ_DEBUG_STS_LOCAL__EXP_BUSY__SHIFT 0x10
+#define SQ_DEBUG_STS_LOCAL__BRMSG_BUSY__SHIFT 0x11
+#define SQ_DEBUG_STS_LOCAL__VM_BUSY__SHIFT 0x12
+#define SQ_DEBUG_STS_LOCAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL_MASK 0x000003F0L
+#define SQ_DEBUG_STS_LOCAL__SQ_BUSY_MASK 0x00001000L
+#define SQ_DEBUG_STS_LOCAL__IS_BUSY_MASK 0x00002000L
+#define SQ_DEBUG_STS_LOCAL__IB_BUSY_MASK 0x00004000L
+#define SQ_DEBUG_STS_LOCAL__ARB_BUSY_MASK 0x00008000L
+#define SQ_DEBUG_STS_LOCAL__EXP_BUSY_MASK 0x00010000L
+#define SQ_DEBUG_STS_LOCAL__BRMSG_BUSY_MASK 0x00020000L
+#define SQ_DEBUG_STS_LOCAL__VM_BUSY_MASK 0x00040000L
+//SQ_DEBUG_CTRL_LOCAL
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED__SHIFT 0x0
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED_MASK 0x000000FFL
+//SQ_WAVE_ACTIVE
+#define SQ_WAVE_ACTIVE__WAVE_SLOT__SHIFT 0x0
+#define SQ_WAVE_ACTIVE__WAVE_SLOT_MASK 0x000FFFFFL
+//SQ_WAVE_VALID_AND_IDLE
+#define SQ_WAVE_VALID_AND_IDLE__WAVE_SLOT__SHIFT 0x0
+#define SQ_WAVE_VALID_AND_IDLE__WAVE_SLOT_MASK 0x000FFFFFL
+//SQ_WAVE_MODE
+#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x0
+#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x4
+#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x8
+#define SQ_WAVE_MODE__IEEE__SHIFT 0x9
+#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0xa
+#define SQ_WAVE_MODE__TRAP_AFTER_INST_EN__SHIFT 0xb
+#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0xc
+#define SQ_WAVE_MODE__WAVE_END__SHIFT 0x15
+#define SQ_WAVE_MODE__FP16_OVFL__SHIFT 0x17
+#define SQ_WAVE_MODE__DISABLE_PERF__SHIFT 0x1b
+#define SQ_WAVE_MODE__FP_ROUND_MASK 0x0000000FL
+#define SQ_WAVE_MODE__FP_DENORM_MASK 0x000000F0L
+#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x00000100L
+#define SQ_WAVE_MODE__IEEE_MASK 0x00000200L
+#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x00000400L
+#define SQ_WAVE_MODE__TRAP_AFTER_INST_EN_MASK 0x00000800L
+#define SQ_WAVE_MODE__EXCP_EN_MASK 0x001FF000L
+#define SQ_WAVE_MODE__WAVE_END_MASK 0x00200000L
+#define SQ_WAVE_MODE__FP16_OVFL_MASK 0x00800000L
+#define SQ_WAVE_MODE__DISABLE_PERF_MASK 0x08000000L
+//SQ_WAVE_STATUS
+#define SQ_WAVE_STATUS__SCC__SHIFT 0x0
+#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x1
+#define SQ_WAVE_STATUS__USER_PRIO__SHIFT 0x3
+#define SQ_WAVE_STATUS__PRIV__SHIFT 0x5
+#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x6
+#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x7
+#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x8
+#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x9
+#define SQ_WAVE_STATUS__VCCZ__SHIFT 0xa
+#define SQ_WAVE_STATUS__IN_TG__SHIFT 0xb
+#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0xc
+#define SQ_WAVE_STATUS__HALT__SHIFT 0xd
+#define SQ_WAVE_STATUS__TRAP__SHIFT 0xe
+#define SQ_WAVE_STATUS__TTRACE_SIMD_EN__SHIFT 0xf
+#define SQ_WAVE_STATUS__VALID__SHIFT 0x10
+#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x11
+#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x12
+#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x13
+#define SQ_WAVE_STATUS__COND_DBG_USER__SHIFT 0x14
+#define SQ_WAVE_STATUS__COND_DBG_SYS__SHIFT 0x15
+#define SQ_WAVE_STATUS__OREO_CONFLICT__SHIFT 0x16
+#define SQ_WAVE_STATUS__FATAL_HALT__SHIFT 0x17
+#define SQ_WAVE_STATUS__NO_VGPRS__SHIFT 0x18
+#define SQ_WAVE_STATUS__LDS_PARAM_READY__SHIFT 0x19
+#define SQ_WAVE_STATUS__MUST_GS_ALLOC__SHIFT 0x1a
+#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x1b
+#define SQ_WAVE_STATUS__IDLE__SHIFT 0x1c
+#define SQ_WAVE_STATUS__SCRATCH_EN__SHIFT 0x1d
+#define SQ_WAVE_STATUS__SCC_MASK 0x00000001L
+#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x00000006L
+#define SQ_WAVE_STATUS__USER_PRIO_MASK 0x00000018L
+#define SQ_WAVE_STATUS__PRIV_MASK 0x00000020L
+#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x00000040L
+#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x00000080L
+#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x00000100L
+#define SQ_WAVE_STATUS__EXECZ_MASK 0x00000200L
+#define SQ_WAVE_STATUS__VCCZ_MASK 0x00000400L
+#define SQ_WAVE_STATUS__IN_TG_MASK 0x00000800L
+#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x00001000L
+#define SQ_WAVE_STATUS__HALT_MASK 0x00002000L
+#define SQ_WAVE_STATUS__TRAP_MASK 0x00004000L
+#define SQ_WAVE_STATUS__TTRACE_SIMD_EN_MASK 0x00008000L
+#define SQ_WAVE_STATUS__VALID_MASK 0x00010000L
+#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x00020000L
+#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x00040000L
+#define SQ_WAVE_STATUS__PERF_EN_MASK 0x00080000L
+#define SQ_WAVE_STATUS__COND_DBG_USER_MASK 0x00100000L
+#define SQ_WAVE_STATUS__COND_DBG_SYS_MASK 0x00200000L
+#define SQ_WAVE_STATUS__OREO_CONFLICT_MASK 0x00400000L
+#define SQ_WAVE_STATUS__FATAL_HALT_MASK 0x00800000L
+#define SQ_WAVE_STATUS__NO_VGPRS_MASK 0x01000000L
+#define SQ_WAVE_STATUS__LDS_PARAM_READY_MASK 0x02000000L
+#define SQ_WAVE_STATUS__MUST_GS_ALLOC_MASK 0x04000000L
+#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x08000000L
+#define SQ_WAVE_STATUS__IDLE_MASK 0x10000000L
+#define SQ_WAVE_STATUS__SCRATCH_EN_MASK 0x20000000L
+//SQ_WAVE_TRAPSTS
+#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x0
+#define SQ_WAVE_TRAPSTS__SAVECTX__SHIFT 0xa
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST__SHIFT 0xb
+#define SQ_WAVE_TRAPSTS__EXCP_HI__SHIFT 0xc
+#define SQ_WAVE_TRAPSTS__BUFFER_OOB__SHIFT 0xf
+#define SQ_WAVE_TRAPSTS__HOST_TRAP__SHIFT 0x10
+#define SQ_WAVE_TRAPSTS__WAVESTART__SHIFT 0x11
+#define SQ_WAVE_TRAPSTS__WAVE_END__SHIFT 0x12
+#define SQ_WAVE_TRAPSTS__PERF_SNAPSHOT__SHIFT 0x13
+#define SQ_WAVE_TRAPSTS__TRAP_AFTER_INST__SHIFT 0x14
+#define SQ_WAVE_TRAPSTS__UTC_ERROR__SHIFT 0x1c
+#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x000001FFL
+#define SQ_WAVE_TRAPSTS__SAVECTX_MASK 0x00000400L
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST_MASK 0x00000800L
+#define SQ_WAVE_TRAPSTS__EXCP_HI_MASK 0x00007000L
+#define SQ_WAVE_TRAPSTS__BUFFER_OOB_MASK 0x00008000L
+#define SQ_WAVE_TRAPSTS__HOST_TRAP_MASK 0x00010000L
+#define SQ_WAVE_TRAPSTS__WAVESTART_MASK 0x00020000L
+#define SQ_WAVE_TRAPSTS__WAVE_END_MASK 0x00040000L
+#define SQ_WAVE_TRAPSTS__PERF_SNAPSHOT_MASK 0x00080000L
+#define SQ_WAVE_TRAPSTS__TRAP_AFTER_INST_MASK 0x00100000L
+#define SQ_WAVE_TRAPSTS__UTC_ERROR_MASK 0x10000000L
+//SQ_WAVE_GPR_ALLOC
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x0
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0xc
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x000001FFL
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x000FF000L
+//SQ_WAVE_LDS_ALLOC
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x0
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0xc
+#define SQ_WAVE_LDS_ALLOC__VGPR_SHARED_SIZE__SHIFT 0x18
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0x000001FFL
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x001FF000L
+#define SQ_WAVE_LDS_ALLOC__VGPR_SHARED_SIZE_MASK 0x0F000000L
+//SQ_WAVE_IB_STS
+#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x0
+#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x4
+#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0xa
+#define SQ_WAVE_IB_STS__VS_CNT__SHIFT 0x1a
+#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x00000007L
+#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0x000003F0L
+#define SQ_WAVE_IB_STS__VM_CNT_MASK 0x0000FC00L
+#define SQ_WAVE_IB_STS__VS_CNT_MASK 0xFC000000L
+//SQ_WAVE_PC_LO
+#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x0
+#define SQ_WAVE_PC_LO__PC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_PC_HI
+#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x0
+#define SQ_WAVE_PC_HI__PC_HI_MASK 0x0000FFFFL
+//SQ_WAVE_IB_DBG1
+#define SQ_WAVE_IB_DBG1__WAVE_IDLE__SHIFT 0x18
+#define SQ_WAVE_IB_DBG1__MISC_CNT__SHIFT 0x19
+#define SQ_WAVE_IB_DBG1__WAVE_IDLE_MASK 0x01000000L
+#define SQ_WAVE_IB_DBG1__MISC_CNT_MASK 0xFE000000L
+//SQ_WAVE_FLUSH_IB
+#define SQ_WAVE_FLUSH_IB__UNUSED__SHIFT 0x0
+#define SQ_WAVE_FLUSH_IB__UNUSED_MASK 0xFFFFFFFFL
+//SQ_WAVE_FLAT_SCRATCH_LO
+#define SQ_WAVE_FLAT_SCRATCH_LO__DATA__SHIFT 0x0
+#define SQ_WAVE_FLAT_SCRATCH_LO__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_FLAT_SCRATCH_HI
+#define SQ_WAVE_FLAT_SCRATCH_HI__DATA__SHIFT 0x0
+#define SQ_WAVE_FLAT_SCRATCH_HI__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_HW_ID1
+#define SQ_WAVE_HW_ID1__WAVE_ID__SHIFT 0x0
+#define SQ_WAVE_HW_ID1__SIMD_ID__SHIFT 0x8
+#define SQ_WAVE_HW_ID1__WGP_ID__SHIFT 0xa
+#define SQ_WAVE_HW_ID1__SA_ID__SHIFT 0x10
+#define SQ_WAVE_HW_ID1__SE_ID__SHIFT 0x12
+#define SQ_WAVE_HW_ID1__DP_RATE__SHIFT 0x1d
+#define SQ_WAVE_HW_ID1__WAVE_ID_MASK 0x0000001FL
+#define SQ_WAVE_HW_ID1__SIMD_ID_MASK 0x00000300L
+#define SQ_WAVE_HW_ID1__WGP_ID_MASK 0x00003C00L
+#define SQ_WAVE_HW_ID1__SA_ID_MASK 0x00010000L
+#define SQ_WAVE_HW_ID1__SE_ID_MASK 0x001C0000L
+#define SQ_WAVE_HW_ID1__DP_RATE_MASK 0xE0000000L
+//SQ_WAVE_HW_ID2
+#define SQ_WAVE_HW_ID2__QUEUE_ID__SHIFT 0x0
+#define SQ_WAVE_HW_ID2__PIPE_ID__SHIFT 0x4
+#define SQ_WAVE_HW_ID2__ME_ID__SHIFT 0x8
+#define SQ_WAVE_HW_ID2__STATE_ID__SHIFT 0xc
+#define SQ_WAVE_HW_ID2__WG_ID__SHIFT 0x10
+#define SQ_WAVE_HW_ID2__VM_ID__SHIFT 0x18
+#define SQ_WAVE_HW_ID2__QUEUE_ID_MASK 0x0000000FL
+#define SQ_WAVE_HW_ID2__PIPE_ID_MASK 0x00000030L
+#define SQ_WAVE_HW_ID2__ME_ID_MASK 0x00000300L
+#define SQ_WAVE_HW_ID2__STATE_ID_MASK 0x00007000L
+#define SQ_WAVE_HW_ID2__WG_ID_MASK 0x001F0000L
+#define SQ_WAVE_HW_ID2__VM_ID_MASK 0x0F000000L
+//SQ_WAVE_POPS_PACKER
+#define SQ_WAVE_POPS_PACKER__POPS_EN__SHIFT 0x0
+#define SQ_WAVE_POPS_PACKER__POPS_PACKER_ID__SHIFT 0x1
+#define SQ_WAVE_POPS_PACKER__POPS_EN_MASK 0x00000001L
+#define SQ_WAVE_POPS_PACKER__POPS_PACKER_ID_MASK 0x00000006L
+//SQ_WAVE_SCHED_MODE
+#define SQ_WAVE_SCHED_MODE__DEP_MODE__SHIFT 0x0
+#define SQ_WAVE_SCHED_MODE__DEP_MODE_MASK 0x00000003L
+//SQ_WAVE_IB_STS2
+#define SQ_WAVE_IB_STS2__INST_PREFETCH__SHIFT 0x0
+#define SQ_WAVE_IB_STS2__MEM_ORDER__SHIFT 0x8
+#define SQ_WAVE_IB_STS2__FWD_PROGRESS__SHIFT 0xa
+#define SQ_WAVE_IB_STS2__WAVE64__SHIFT 0xb
+#define SQ_WAVE_IB_STS2__INST_PREFETCH_MASK 0x00000003L
+#define SQ_WAVE_IB_STS2__MEM_ORDER_MASK 0x00000300L
+#define SQ_WAVE_IB_STS2__FWD_PROGRESS_MASK 0x00000400L
+#define SQ_WAVE_IB_STS2__WAVE64_MASK 0x00000800L
+//SQ_WAVE_SHADER_CYCLES
+#define SQ_WAVE_SHADER_CYCLES__CYCLES__SHIFT 0x0
+#define SQ_WAVE_SHADER_CYCLES__CYCLES_MASK 0x000FFFFFL
+//SQ_WAVE_TTMP0
+#define SQ_WAVE_TTMP0__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP0__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP1
+#define SQ_WAVE_TTMP1__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP1__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP2
+#define SQ_WAVE_TTMP2__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP2__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP3
+#define SQ_WAVE_TTMP3__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP3__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP4
+#define SQ_WAVE_TTMP4__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP4__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP5
+#define SQ_WAVE_TTMP5__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP5__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP6
+#define SQ_WAVE_TTMP6__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP6__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP7
+#define SQ_WAVE_TTMP7__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP7__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP8
+#define SQ_WAVE_TTMP8__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP8__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP9
+#define SQ_WAVE_TTMP9__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP9__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP10
+#define SQ_WAVE_TTMP10__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP10__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP11
+#define SQ_WAVE_TTMP11__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP11__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP12
+#define SQ_WAVE_TTMP12__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP12__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP13
+#define SQ_WAVE_TTMP13__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP13__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP14
+#define SQ_WAVE_TTMP14__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP14__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP15
+#define SQ_WAVE_TTMP15__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP15__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_M0
+#define SQ_WAVE_M0__M0__SHIFT 0x0
+#define SQ_WAVE_M0__M0_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_LO
+#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x0
+#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_HI
+#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x0
+#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xFFFFFFFFL
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_offset.h
new file mode 100644
index 000000000000..82312ecc0216
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_offset.h
@@ -0,0 +1,402 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _mp_13_0_4_OFFSET_HEADER
+#define _mp_13_0_4_OFFSET_HEADER
+
+
+
+// addressBlock: mp_SmuMp0_SmnDec
+// base address: 0x0
+#define regMP0_SMN_C2PMSG_32 0x0060
+#define regMP0_SMN_C2PMSG_32_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_33 0x0061
+#define regMP0_SMN_C2PMSG_33_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_34 0x0062
+#define regMP0_SMN_C2PMSG_34_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_35 0x0063
+#define regMP0_SMN_C2PMSG_35_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_36 0x0064
+#define regMP0_SMN_C2PMSG_36_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_37 0x0065
+#define regMP0_SMN_C2PMSG_37_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_38 0x0066
+#define regMP0_SMN_C2PMSG_38_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_39 0x0067
+#define regMP0_SMN_C2PMSG_39_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_40 0x0068
+#define regMP0_SMN_C2PMSG_40_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_41 0x0069
+#define regMP0_SMN_C2PMSG_41_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_42 0x006a
+#define regMP0_SMN_C2PMSG_42_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_43 0x006b
+#define regMP0_SMN_C2PMSG_43_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_44 0x006c
+#define regMP0_SMN_C2PMSG_44_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_45 0x006d
+#define regMP0_SMN_C2PMSG_45_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_46 0x006e
+#define regMP0_SMN_C2PMSG_46_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_47 0x006f
+#define regMP0_SMN_C2PMSG_47_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_48 0x0070
+#define regMP0_SMN_C2PMSG_48_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_49 0x0071
+#define regMP0_SMN_C2PMSG_49_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_50 0x0072
+#define regMP0_SMN_C2PMSG_50_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_51 0x0073
+#define regMP0_SMN_C2PMSG_51_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_52 0x0074
+#define regMP0_SMN_C2PMSG_52_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_53 0x0075
+#define regMP0_SMN_C2PMSG_53_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_54 0x0076
+#define regMP0_SMN_C2PMSG_54_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_55 0x0077
+#define regMP0_SMN_C2PMSG_55_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_56 0x0078
+#define regMP0_SMN_C2PMSG_56_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_57 0x0079
+#define regMP0_SMN_C2PMSG_57_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_58 0x007a
+#define regMP0_SMN_C2PMSG_58_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_59 0x007b
+#define regMP0_SMN_C2PMSG_59_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_60 0x007c
+#define regMP0_SMN_C2PMSG_60_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_61 0x007d
+#define regMP0_SMN_C2PMSG_61_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_62 0x007e
+#define regMP0_SMN_C2PMSG_62_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_63 0x007f
+#define regMP0_SMN_C2PMSG_63_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_64 0x0080
+#define regMP0_SMN_C2PMSG_64_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_65 0x0081
+#define regMP0_SMN_C2PMSG_65_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_66 0x0082
+#define regMP0_SMN_C2PMSG_66_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_67 0x0083
+#define regMP0_SMN_C2PMSG_67_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_68 0x0084
+#define regMP0_SMN_C2PMSG_68_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_69 0x0085
+#define regMP0_SMN_C2PMSG_69_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_70 0x0086
+#define regMP0_SMN_C2PMSG_70_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_71 0x0087
+#define regMP0_SMN_C2PMSG_71_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_72 0x0088
+#define regMP0_SMN_C2PMSG_72_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_73 0x0089
+#define regMP0_SMN_C2PMSG_73_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_74 0x008a
+#define regMP0_SMN_C2PMSG_74_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_75 0x008b
+#define regMP0_SMN_C2PMSG_75_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_76 0x008c
+#define regMP0_SMN_C2PMSG_76_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_77 0x008d
+#define regMP0_SMN_C2PMSG_77_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_78 0x008e
+#define regMP0_SMN_C2PMSG_78_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_79 0x008f
+#define regMP0_SMN_C2PMSG_79_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_80 0x0090
+#define regMP0_SMN_C2PMSG_80_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_81 0x0091
+#define regMP0_SMN_C2PMSG_81_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_82 0x0092
+#define regMP0_SMN_C2PMSG_82_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_83 0x0093
+#define regMP0_SMN_C2PMSG_83_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_84 0x0094
+#define regMP0_SMN_C2PMSG_84_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_85 0x0095
+#define regMP0_SMN_C2PMSG_85_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_86 0x0096
+#define regMP0_SMN_C2PMSG_86_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_87 0x0097
+#define regMP0_SMN_C2PMSG_87_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_88 0x0098
+#define regMP0_SMN_C2PMSG_88_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_89 0x0099
+#define regMP0_SMN_C2PMSG_89_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_90 0x009a
+#define regMP0_SMN_C2PMSG_90_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_91 0x009b
+#define regMP0_SMN_C2PMSG_91_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_92 0x009c
+#define regMP0_SMN_C2PMSG_92_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_93 0x009d
+#define regMP0_SMN_C2PMSG_93_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_94 0x009e
+#define regMP0_SMN_C2PMSG_94_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_95 0x009f
+#define regMP0_SMN_C2PMSG_95_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_96 0x00a0
+#define regMP0_SMN_C2PMSG_96_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_97 0x00a1
+#define regMP0_SMN_C2PMSG_97_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_98 0x00a2
+#define regMP0_SMN_C2PMSG_98_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_99 0x00a3
+#define regMP0_SMN_C2PMSG_99_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_100 0x00a4
+#define regMP0_SMN_C2PMSG_100_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_101 0x00a5
+#define regMP0_SMN_C2PMSG_101_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_102 0x00a6
+#define regMP0_SMN_C2PMSG_102_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_103 0x00a7
+#define regMP0_SMN_C2PMSG_103_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_104 0x00a8
+#define regMP0_SMN_C2PMSG_104_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_105 0x00a9
+#define regMP0_SMN_C2PMSG_105_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_106 0x00aa
+#define regMP0_SMN_C2PMSG_106_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_107 0x00ab
+#define regMP0_SMN_C2PMSG_107_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_108 0x00ac
+#define regMP0_SMN_C2PMSG_108_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_109 0x00ad
+#define regMP0_SMN_C2PMSG_109_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_110 0x00ae
+#define regMP0_SMN_C2PMSG_110_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_111 0x00af
+#define regMP0_SMN_C2PMSG_111_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_112 0x00b0
+#define regMP0_SMN_C2PMSG_112_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_113 0x00b1
+#define regMP0_SMN_C2PMSG_113_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_114 0x00b2
+#define regMP0_SMN_C2PMSG_114_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_115 0x00b3
+#define regMP0_SMN_C2PMSG_115_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_116 0x00b4
+#define regMP0_SMN_C2PMSG_116_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_117 0x00b5
+#define regMP0_SMN_C2PMSG_117_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_118 0x00b6
+#define regMP0_SMN_C2PMSG_118_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_119 0x00b7
+#define regMP0_SMN_C2PMSG_119_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_120 0x00b8
+#define regMP0_SMN_C2PMSG_120_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_121 0x00b9
+#define regMP0_SMN_C2PMSG_121_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_122 0x00ba
+#define regMP0_SMN_C2PMSG_122_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_123 0x00bb
+#define regMP0_SMN_C2PMSG_123_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_124 0x00bc
+#define regMP0_SMN_C2PMSG_124_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_125 0x00bd
+#define regMP0_SMN_C2PMSG_125_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_126 0x00be
+#define regMP0_SMN_C2PMSG_126_BASE_IDX 1
+#define regMP0_SMN_C2PMSG_127 0x00bf
+#define regMP0_SMN_C2PMSG_127_BASE_IDX 1
+#define regMP0_SMN_IH_CREDIT 0x00c1
+#define regMP0_SMN_IH_CREDIT_BASE_IDX 1
+#define regMP0_SMN_IH_SW_INT 0x00c2
+#define regMP0_SMN_IH_SW_INT_BASE_IDX 1
+#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3
+#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 1
+
+// addressBlock: mp_SmuMp1_SmnDec
+// base address: 0x0
+#define regMP1_SMN_C2PMSG_32 0x0260
+#define regMP1_SMN_C2PMSG_32_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_33 0x0261
+#define regMP1_SMN_C2PMSG_33_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_34 0x0262
+#define regMP1_SMN_C2PMSG_34_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_35 0x0263
+#define regMP1_SMN_C2PMSG_35_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_36 0x0264
+#define regMP1_SMN_C2PMSG_36_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_37 0x0265
+#define regMP1_SMN_C2PMSG_37_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_38 0x0266
+#define regMP1_SMN_C2PMSG_38_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_39 0x0267
+#define regMP1_SMN_C2PMSG_39_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_40 0x0268
+#define regMP1_SMN_C2PMSG_40_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_41 0x0269
+#define regMP1_SMN_C2PMSG_41_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_42 0x026a
+#define regMP1_SMN_C2PMSG_42_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_43 0x026b
+#define regMP1_SMN_C2PMSG_43_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_44 0x026c
+#define regMP1_SMN_C2PMSG_44_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_45 0x026d
+#define regMP1_SMN_C2PMSG_45_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_46 0x026e
+#define regMP1_SMN_C2PMSG_46_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_47 0x026f
+#define regMP1_SMN_C2PMSG_47_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_48 0x0270
+#define regMP1_SMN_C2PMSG_48_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_49 0x0271
+#define regMP1_SMN_C2PMSG_49_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_50 0x0272
+#define regMP1_SMN_C2PMSG_50_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_51 0x0273
+#define regMP1_SMN_C2PMSG_51_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_52 0x0274
+#define regMP1_SMN_C2PMSG_52_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_53 0x0275
+#define regMP1_SMN_C2PMSG_53_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_54 0x0276
+#define regMP1_SMN_C2PMSG_54_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_55 0x0277
+#define regMP1_SMN_C2PMSG_55_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_56 0x0278
+#define regMP1_SMN_C2PMSG_56_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_57 0x0279
+#define regMP1_SMN_C2PMSG_57_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_58 0x027a
+#define regMP1_SMN_C2PMSG_58_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_59 0x027b
+#define regMP1_SMN_C2PMSG_59_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_60 0x027c
+#define regMP1_SMN_C2PMSG_60_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_61 0x027d
+#define regMP1_SMN_C2PMSG_61_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_62 0x027e
+#define regMP1_SMN_C2PMSG_62_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_63 0x027f
+#define regMP1_SMN_C2PMSG_63_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_64 0x0280
+#define regMP1_SMN_C2PMSG_64_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_65 0x0281
+#define regMP1_SMN_C2PMSG_65_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_66 0x0282
+#define regMP1_SMN_C2PMSG_66_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_67 0x0283
+#define regMP1_SMN_C2PMSG_67_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_68 0x0284
+#define regMP1_SMN_C2PMSG_68_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_69 0x0285
+#define regMP1_SMN_C2PMSG_69_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_70 0x0286
+#define regMP1_SMN_C2PMSG_70_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_71 0x0287
+#define regMP1_SMN_C2PMSG_71_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_72 0x0288
+#define regMP1_SMN_C2PMSG_72_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_73 0x0289
+#define regMP1_SMN_C2PMSG_73_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_74 0x028a
+#define regMP1_SMN_C2PMSG_74_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_75 0x028b
+#define regMP1_SMN_C2PMSG_75_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_76 0x028c
+#define regMP1_SMN_C2PMSG_76_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_77 0x028d
+#define regMP1_SMN_C2PMSG_77_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_78 0x028e
+#define regMP1_SMN_C2PMSG_78_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_79 0x028f
+#define regMP1_SMN_C2PMSG_79_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_80 0x0290
+#define regMP1_SMN_C2PMSG_80_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_81 0x0291
+#define regMP1_SMN_C2PMSG_81_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_82 0x0292
+#define regMP1_SMN_C2PMSG_82_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_83 0x0293
+#define regMP1_SMN_C2PMSG_83_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_84 0x0294
+#define regMP1_SMN_C2PMSG_84_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_85 0x0295
+#define regMP1_SMN_C2PMSG_85_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_86 0x0296
+#define regMP1_SMN_C2PMSG_86_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_87 0x0297
+#define regMP1_SMN_C2PMSG_87_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_88 0x0298
+#define regMP1_SMN_C2PMSG_88_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_89 0x0299
+#define regMP1_SMN_C2PMSG_89_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_90 0x029a
+#define regMP1_SMN_C2PMSG_90_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_91 0x029b
+#define regMP1_SMN_C2PMSG_91_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_92 0x029c
+#define regMP1_SMN_C2PMSG_92_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_93 0x029d
+#define regMP1_SMN_C2PMSG_93_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_94 0x029e
+#define regMP1_SMN_C2PMSG_94_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_95 0x029f
+#define regMP1_SMN_C2PMSG_95_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_96 0x02a0
+#define regMP1_SMN_C2PMSG_96_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_97 0x02a1
+#define regMP1_SMN_C2PMSG_97_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_98 0x02a2
+#define regMP1_SMN_C2PMSG_98_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_99 0x02a3
+#define regMP1_SMN_C2PMSG_99_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_100 0x02a4
+#define regMP1_SMN_C2PMSG_100_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_101 0x02a5
+#define regMP1_SMN_C2PMSG_101_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_102 0x02a6
+#define regMP1_SMN_C2PMSG_102_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_103 0x02a7
+#define regMP1_SMN_C2PMSG_103_BASE_IDX 1
+#define regMP1_SMN_IH_CREDIT 0x02c1
+#define regMP1_SMN_IH_CREDIT_BASE_IDX 1
+#define regMP1_SMN_IH_SW_INT 0x02c2
+#define regMP1_SMN_IH_SW_INT_BASE_IDX 1
+#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3
+#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 1
+#define regMP1_SMN_FPS_CNT 0x02c4
+#define regMP1_SMN_FPS_CNT_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH0 0x0340
+#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH1 0x0341
+#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH2 0x0342
+#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH3 0x0343
+#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH4 0x0344
+#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH5 0x0345
+#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH6 0x0346
+#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH7 0x0347
+#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 1
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_sh_mask.h
new file mode 100644
index 000000000000..b0153b335951
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_sh_mask.h
@@ -0,0 +1,595 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _mp_13_0_4_SH_MASK_HEADER
+#define _mp_13_0_4_SH_MASK_HEADER
+
+
+// addressBlock: mp_SmuMp0_SmnDec
+//MP0_SMN_C2PMSG_32
+#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_33
+#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_34
+#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_35
+#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_36
+#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_37
+#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_38
+#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_39
+#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_40
+#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_41
+#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_42
+#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_43
+#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_44
+#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_45
+#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_46
+#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_47
+#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_48
+#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_49
+#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_50
+#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_51
+#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_52
+#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_53
+#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_54
+#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_55
+#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_56
+#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_57
+#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_58
+#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_59
+#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_60
+#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_61
+#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_62
+#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_63
+#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_64
+#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_65
+#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_66
+#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_67
+#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_68
+#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_69
+#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_70
+#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_71
+#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_72
+#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_73
+#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_74
+#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_75
+#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_76
+#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_77
+#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_78
+#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_79
+#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_80
+#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_81
+#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_82
+#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_83
+#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_84
+#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_85
+#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_86
+#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_87
+#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_88
+#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_89
+#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_90
+#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_91
+#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_92
+#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_93
+#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_94
+#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_95
+#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_96
+#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_97
+#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_98
+#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_99
+#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_100
+#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_101
+#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_102
+#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_103
+#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_IH_CREDIT
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP0_SMN_IH_SW_INT
+#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP0_SMN_IH_SW_INT_CTRL
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+
+
+// addressBlock: mp_SmuMp1_SmnDec
+//MP1_SMN_C2PMSG_32
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_33
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_34
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_35
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_36
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_37
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_38
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_39
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_40
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_41
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_42
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_43
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_44
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_45
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_46
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_47
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_48
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_49
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_50
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_51
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_52
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_53
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_54
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_55
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_56
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_57
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_58
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_59
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_60
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_61
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_62
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_63
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_64
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_65
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_66
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_67
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_68
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_69
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_70
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_71
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_72
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_73
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_74
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_75
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_76
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_77
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_78
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_79
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_80
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_81
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_82
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_83
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_84
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_85
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_86
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_87
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_88
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_89
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_90
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_91
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_92
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_93
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_94
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_95
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_96
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_97
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_98
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_99
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_100
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_101
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_102
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_103
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_104
+#define MP1_SMN_C2PMSG_104__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_104__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_105
+#define MP1_SMN_C2PMSG_105__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_105__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_106
+#define MP1_SMN_C2PMSG_106__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_106__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_107
+#define MP1_SMN_C2PMSG_107__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_107__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_108
+#define MP1_SMN_C2PMSG_108__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_108__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_109
+#define MP1_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_110
+#define MP1_SMN_C2PMSG_110__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_110__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_111
+#define MP1_SMN_C2PMSG_111__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_111__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_112
+#define MP1_SMN_C2PMSG_112__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_112__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_113
+#define MP1_SMN_C2PMSG_113__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_113__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_114
+#define MP1_SMN_C2PMSG_114__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_114__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_115
+#define MP1_SMN_C2PMSG_115__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_116
+#define MP1_SMN_C2PMSG_116__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_117
+#define MP1_SMN_C2PMSG_117__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_117__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_118
+#define MP1_SMN_C2PMSG_118__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_118__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_119
+#define MP1_SMN_C2PMSG_119__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_119__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_120
+#define MP1_SMN_C2PMSG_120__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_120__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_121
+#define MP1_SMN_C2PMSG_121__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_121__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_122
+#define MP1_SMN_C2PMSG_122__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_122__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_123
+#define MP1_SMN_C2PMSG_123__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_123__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_124
+#define MP1_SMN_C2PMSG_124__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_124__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_125
+#define MP1_SMN_C2PMSG_125__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_125__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_126
+#define MP1_SMN_C2PMSG_126__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_126__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_127
+#define MP1_SMN_C2PMSG_127__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_127__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_IH_CREDIT
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_SMN_IH_SW_INT
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP1_SMN_IH_SW_INT_CTRL
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+//MP1_SMN_FPS_CNT
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH0
+#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH1
+#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH2
+#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH3
+#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH4
+#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH5
+#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH6
+#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH7
+#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
index 2ed95790a600..cf8d60c4df1b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
@@ -15243,6 +15243,8 @@
#define regBIF0_PCIE_TX_TRACKING_ADDR_HI_BASE_IDX 5
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS 0x420186
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS_BASE_IDX 5
+#define regBIF0_PCIE_TX_POWER_CTRL_1 0x420187
+#define regBIF0_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
#define regBIF0_PCIE_TX_CTRL_4 0x42018b
#define regBIF0_PCIE_TX_CTRL_4_BASE_IDX 5
#define regBIF0_PCIE_TX_STATUS 0x420194
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
index eb62a18fcc48..3d60c9e92548 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
@@ -85627,6 +85627,19 @@
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L
+//BIF0_PCIE_TX_POWER_CTRL_1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN__SHIFT 0x0
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN__SHIFT 0x1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN__SHIFT 0x2
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN__SHIFT 0x3
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN__SHIFT 0x4
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN__SHIFT 0x5
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN_MASK 0x00000002L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN_MASK 0x00000004L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN_MASK 0x00000010L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN_MASK 0x00000020L
//BIF0_PCIE_TX_CTRL_4
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW__SHIFT 0x0
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW_MASK 0x0000000FL
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 80dab1146439..50bfa513cb35 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -268,7 +268,8 @@ union MESAPI__ADD_QUEUE {
uint32_t is_tmz_queue : 1;
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
- uint32_t reserved : 22;
+ uint32_t trap_en : 1;
+ uint32_t reserved : 21;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 956b6ce81c84..1b300c569faf 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -668,6 +668,51 @@ int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_set_residency_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_residency_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_entrycount_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
{
struct smu_context *smu = adev->powerplay.pp_handle;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 65624d091ed2..cb5b9df78b4d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -435,6 +435,9 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev);
int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
uint64_t event_arg);
+int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value);
+int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value);
+int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value);
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev);
void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 6d9b3c6af164..13c5c7f1ecb9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -90,6 +90,30 @@ static int smu_sys_set_pp_feature_mask(void *handle,
return smu_set_pp_feature_mask(smu, new_mask);
}
+int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
+{
+ if (!smu->ppt_funcs->set_gfx_off_residency)
+ return -EINVAL;
+
+ return smu_set_gfx_off_residency(smu, value);
+}
+
+int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
+{
+ if (!smu->ppt_funcs->get_gfx_off_residency)
+ return -EINVAL;
+
+ return smu_get_gfx_off_residency(smu, value);
+}
+
+int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
+{
+ if (!smu->ppt_funcs->get_gfx_off_entrycount)
+ return -EINVAL;
+
+ return smu_get_gfx_off_entrycount(smu, value);
+}
+
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
{
if (!smu->ppt_funcs->get_gfx_off_status)
@@ -581,6 +605,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
smu->od_enabled = true;
break;
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 10):
smu_v13_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(13, 0, 7):
@@ -1361,6 +1386,15 @@ static int smu_hw_init(void *handle)
}
if (smu->is_apu) {
+ if ((smu->ppt_funcs->set_gfx_power_up_by_imu) &&
+ likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to Enable gfx imu!\n");
+ return ret;
+ }
+ }
+
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
smu_set_gfx_cgpg(smu, true);
@@ -1567,6 +1601,7 @@ static int smu_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
+ uint64_t count;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1584,6 +1619,14 @@ static int smu_suspend(void *handle)
smu_set_gfx_cgpg(smu, false);
+ /*
+ * pwfw resets entrycount when device is suspended, so we save the
+ * last value to be used when we resume to keep it consistent
+ */
+ ret = smu_get_entrycount_gfxoff(smu, &count);
+ if (!ret)
+ adev->gfx.gfx_off_entrycount = count;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b81c657c7386..e2fa3b066b96 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1112,6 +1112,22 @@ struct pptable_funcs {
uint32_t (*get_gfx_off_status)(struct smu_context *smu);
/**
+ * @gfx_off_entrycount: total GFXOFF entry count at the time of
+ * query since system power-up
+ */
+ u32 (*get_gfx_off_entrycount)(struct smu_context *smu, uint64_t *entrycount);
+
+ /**
+ * @set_gfx_off_residency: set 1 to start logging, 0 to stop logging
+ */
+ u32 (*set_gfx_off_residency)(struct smu_context *smu, bool start);
+
+ /**
+ * @get_gfx_off_residency: Average GFXOFF residency % during the logging interval
+ */
+ u32 (*get_gfx_off_residency)(struct smu_context *smu, uint32_t *residency);
+
+ /**
* @register_irq_handler: Register interupt request handlers.
*/
int (*register_irq_handler)(struct smu_context *smu);
@@ -1454,6 +1470,12 @@ int smu_set_ac_dc(struct smu_context *smu);
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
+int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value);
+
+int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value);
+
+int smu_set_residency_gfxoff(struct smu_context *smu, bool value);
+
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index 2b672d102c96..063f4a737605 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -24,12 +24,8 @@
#ifndef SMU13_DRIVER_IF_V13_0_0_H
#define SMU13_DRIVER_IF_V13_0_0_H
-// *** IMPORTANT ***
-// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x23
-
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x24
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -97,7 +93,7 @@
#define FEATURE_MEM_TEMP_READ_BIT 47
#define FEATURE_ATHUB_MMHUB_PG_BIT 48
#define FEATURE_SOC_PCC_BIT 49
-#define FEATURE_SPARE_50_BIT 50
+#define FEATURE_EDC_PWRBRK_BIT 50
#define FEATURE_SPARE_51_BIT 51
#define FEATURE_SPARE_52_BIT 52
#define FEATURE_SPARE_53_BIT 53
@@ -1193,8 +1189,17 @@ typedef struct {
// SECTION: Advanced Options
uint32_t DebugOverrides;
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[3];
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
// SECTION: Sku Reserved
- uint32_t Spare[64];
+ uint32_t Spare[61];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@@ -1259,7 +1264,8 @@ typedef struct {
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
- uint16_t UclkSpreadPadding;
+ uint8_t UclkTrainingModeSpreadPercent;
+ uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
@@ -1272,11 +1278,7 @@ typedef struct {
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
- uint8_t PaddingMem1[3];
-
- // Section: Total Board Power
- uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
- uint16_t BoardPowerPadding;
+ uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
@@ -1375,8 +1377,11 @@ typedef struct {
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
- uint16_t AverageSocketPower ;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t TempPadding;
uint8_t PcieRate ;
uint8_t PcieWidth ;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index 76f695a1d065..ae2d337158f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 4
+#define PMFW_DRIVER_IF_VERSION 5
typedef struct {
int32_t value;
@@ -197,6 +197,8 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
+ uint16_t CurTemp; //[centi-Celsius]
+ uint16_t spare2;
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
index d2e10a724560..82cf9e563065 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
@@ -137,7 +137,7 @@
#define PPSMC_MSG_DisallowGpo 0x56
#define PPSMC_MSG_Enable2ndUSB20Port 0x57
-
-#define PPSMC_Message_Count 0x58
+#define PPSMC_MSG_DriverMode2Reset 0x5D
+#define PPSMC_Message_Count 0x5E
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
index fe130a497d6c..7471e2df2828 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
@@ -108,7 +108,10 @@
#define PPSMC_MSG_SetSlowPPTLimit 0x4A
#define PPSMC_MSG_GetFastPPTLimit 0x4B
#define PPSMC_MSG_GetSlowPPTLimit 0x4C
-#define PPSMC_Message_Count 0x4D
+#define PPSMC_MSG_GetGfxOffStatus 0x50
+#define PPSMC_MSG_GetGfxOffEntryCount 0x51
+#define PPSMC_MSG_LogGfxOffResidency 0x52
+#define PPSMC_Message_Count 0x53
//Argument for PPSMC_MSG_GfxDeviceDriverReset
enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 19084a4fcb2b..58098b82df66 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -235,7 +235,11 @@
__SMU_DUMMY_MAP(UnforceGfxVid), \
__SMU_DUMMY_MAP(HeavySBR), \
__SMU_DUMMY_MAP(SetBadHBMPagesRetiredFlagsPerChannel), \
- __SMU_DUMMY_MAP(EnableGfxImu),
+ __SMU_DUMMY_MAP(EnableGfxImu), \
+ __SMU_DUMMY_MAP(DriverMode2Reset), \
+ __SMU_DUMMY_MAP(GetGfxOffStatus), \
+ __SMU_DUMMY_MAP(GetGfxOffEntryCount), \
+ __SMU_DUMMY_MAP(LogGfxOffResidency),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 72b553618116..9d62ea2af132 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,10 +28,11 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2B
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -291,5 +292,11 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
int smu_v13_0_mode1_reset(struct smu_context *smu);
+
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index fa520d79ef67..7ed4d4265797 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -154,6 +154,7 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetGpoFeaturePMask, PPSMC_MSG_SetGpoFeaturePMask, 0),
MSG_MAP(DisallowGpo, PPSMC_MSG_DisallowGpo, 0),
MSG_MAP(Enable2ndUSB20Port, PPSMC_MSG_Enable2ndUSB20Port, 0),
+ MSG_MAP(DriverMode2Reset, PPSMC_MSG_DriverMode2Reset, 0),
};
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
@@ -4254,6 +4255,57 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
return 0;
}
+static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
+{
+ return true;
+}
+
+static int sienna_cichlid_mode2_reset(struct smu_context *smu)
+{
+ u32 smu_version;
+ int ret = 0, index;
+ struct amdgpu_device *adev = smu->adev;
+ int timeout = 100;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_DriverMode2Reset);
+
+ mutex_lock(&smu->message_lock);
+
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
+ SMU_RESET_MODE_2);
+
+ ret = smu_cmn_wait_for_response(smu);
+ while (ret != 0 && timeout) {
+ ret = smu_cmn_wait_for_response(smu);
+ /* Wait a bit more time for getting ACK */
+ if (ret != 0) {
+ --timeout;
+ usleep_range(500, 1000);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ if (!timeout) {
+ dev_err(adev->dev,
+ "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+ SMU_RESET_MODE_2, ret);
+ goto out;
+ }
+
+ dev_info(smu->adev->dev, "restore config space...\n");
+ /* Restore the config space saved during init */
+ amdgpu_device_load_pci_state(adev->pdev);
+out:
+ mutex_unlock(&smu->message_lock);
+
+ return ret;
+}
+
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -4283,6 +4335,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
+ .fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
@@ -4348,6 +4401,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
.get_unique_id = sienna_cichlid_get_unique_id,
+ .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported,
+ .mode2_reset = sienna_cichlid_mode2_reset,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 89504ff8e9ed..847990145dcd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -138,6 +138,9 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0),
MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0),
MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0),
+ MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0),
+ MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0),
+ MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0),
};
static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -2200,6 +2203,76 @@ static int vangogh_set_power_limit(struct smu_context *smu,
return ret;
}
+/**
+ * vangogh_set_gfxoff_residency
+ *
+ * @smu: amdgpu_device pointer
+ * @start: start/stop residency log
+ *
+ * This function will be used to log gfxoff residency
+ *
+ *
+ * Returns standard response codes.
+ */
+static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
+{
+ int ret = 0;
+ u32 residency;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
+ start, &residency);
+
+ if (!start)
+ adev->gfx.gfx_off_residency = residency;
+
+ return ret;
+}
+
+/**
+ * vangogh_get_gfxoff_residency
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This function will be used to get gfxoff residency.
+ *
+ * Returns standard response codes.
+ */
+static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *residency = adev->gfx.gfx_off_residency;
+
+ return 0;
+}
+
+/**
+ * vangogh_get_gfxoff_entrycount - get gfxoff entry count
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This function will be used to get gfxoff entry count
+ *
+ * Returns standard response codes.
+ */
+static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount)
+{
+ int ret = 0, value = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value);
+ *entrycount = value + adev->gfx.gfx_off_entrycount;
+
+ return ret;
+}
+
static const struct pptable_funcs vangogh_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
@@ -2237,6 +2310,9 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.mode2_reset = vangogh_mode2_reset,
.gfx_off_control = smu_v11_0_gfx_off_control,
.get_gfx_off_status = vangogh_get_gfxoff_status,
+ .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount,
+ .get_gfx_off_residency = vangogh_get_gfxoff_residency,
+ .set_gfx_off_residency = vangogh_set_gfxoff_residency,
.get_ppt_limit = vangogh_get_ppt_limit,
.get_power_limit = vangogh_get_power_limit,
.set_power_limit = vangogh_set_power_limit,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e8fe84f806d1..6e4a052dc53d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -59,6 +59,7 @@
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
+MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
#define mmMP1_SMN_C2PMSG_66 0x0282
#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
@@ -84,9 +85,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160};
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id);
-
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -212,6 +210,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+ return 0;
+
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
@@ -219,24 +220,17 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3667)
- pptable_id = 36671;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3688)
- pptable_id = 36881;
/*
* Temporary solution for SMU V13.0.0 with SCPM enabled:
- * - use 36831 signed pptable when pp_table_id is 3683
- * - use 36641 signed pptable when pp_table_id is 3664 or 0
- * TODO: drop these when the pptable carried in vbios is ready.
+ * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
+ * - use 36831 soft pptable when pptable_id is 3683
*/
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
switch (pptable_id) {
- case 0:
case 3664:
- pptable_id = 36641;
+ case 3715:
+ case 3795:
+ pptable_id = 0;
break;
case 3683:
pptable_id = 36831;
@@ -330,6 +324,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case IP_VERSION(13, 0, 5):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
break;
+ case IP_VERSION(13, 0, 10):
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_10;
+ break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
adev->ip_versions[MP1_HWIP][0]);
@@ -425,8 +422,10 @@ static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **tabl
return 0;
}
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id)
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
{
const struct smc_firmware_header_v1_0 *hdr;
struct amdgpu_device *adev = smu->adev;
@@ -478,7 +477,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
/*
* Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664 or 3683 on request
+ * - use 3664, 3683 or 3715 on request
* - use 3664 when pptable_id is 0
* TODO: drop these when the pptable carried in vbios is ready.
*/
@@ -489,11 +488,14 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
break;
case 3664:
case 3683:
+ case 3715:
break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
return -EINVAL;
}
+ } else if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) {
+ pptable_id = 6666;
}
}
@@ -1106,6 +1108,9 @@ int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
{
int ret = 0;
+ if (!smu->irq_source.num_types)
+ return 0;
+
ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
if (ret)
return ret;
@@ -1115,6 +1120,9 @@ int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
{
+ if (!smu->irq_source.num_types)
+ return 0;
+
return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
}
@@ -1486,6 +1494,9 @@ int smu_v13_0_register_irq_handler(struct smu_context *smu)
struct amdgpu_irq_src *irq_src = &smu->irq_source;
int ret = 0;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
irq_src->num_types = 1;
irq_src->funcs = &smu_v13_0_irq_funcs;
@@ -2344,8 +2355,8 @@ int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu);
-
- return smu_cmn_send_msg_without_waiting(smu, index, 0);
+ /* Param 1 to tell PMFW to enable GFXOFF feature */
+ return smu_cmn_send_msg_without_waiting(smu, index, 1);
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 931c775fe27e..7db2fd9ea74a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -313,6 +313,9 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
+ if (adev->pm.pp_feature & PP_ULV_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+
return 0;
}
@@ -385,11 +388,29 @@ static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_0_powerplay_table);
+
+ return 0;
+}
+
+static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
+ uint32_t pptable_id;
int ret = 0;
/*
@@ -398,17 +419,51 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
* rely on the combo pptable(and its revelant SMU message).
*/
if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_0_powerplay_table);
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
} else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu_table->boot_values.pp_table_id;
+ }
+
+ /*
+ * Temporary solution for SMU V13.0.0 with SCPM disabled:
+ * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
+ * - use soft pptable when pptable_id is 3683
+ */
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
+ switch (pptable_id) {
+ case 3664:
+ case 3715:
+ case 3795:
+ pptable_id = 0;
+ break;
+ case 3683:
+ break;
+ default:
+ dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
+ return -EINVAL;
+ }
+ }
+
+ /* force using vbios pptable in sriov mode */
+ if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ else
+ ret = smu_v13_0_get_pptable_from_firmware(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size,
+ pptable_id);
}
+ if (ret)
+ return ret;
ret = smu_v13_0_0_store_powerplay_table(smu);
if (ret)
@@ -1789,7 +1844,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.dump_pptable = smu_v13_0_0_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_0_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_check_fw_status,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 82d3718d8324..97e1d55dcaad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -71,7 +71,6 @@ static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
@@ -199,6 +198,9 @@ static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
@@ -226,18 +228,6 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
return ret;
}
-static int smu_v13_0_4_post_smu_init(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0;
-
- /* allow message will be sent after enable message */
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
- if (ret)
- dev_err(adev->dev, "Failed to Enable GfxOff!\n");
- return ret;
-}
-
static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1026,7 +1016,6 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
- .post_init = smu_v13_0_4_post_smu_init,
.mode2_reset = smu_v13_0_4_mode2_reset,
.get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v13_0_od_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 47360ef5c175..66445964efbd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -176,6 +176,9 @@ static int smu_v13_0_5_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9dd56e73218b..c422bf8a09b1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
+ MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -400,11 +401,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
return 0;
}
+static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_7_powerplay_table);
+
+ return 0;
+}
static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- void *combo_pptable = smu_table->combo_pptable;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
@@ -413,18 +430,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
* be used directly by driver. To get the raw pptable, we need to
* rely on the combo pptable(and its revelant SMU message).
*/
- if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table);
- } else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
- }
+ ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
ret = smu_v13_0_7_store_powerplay_table(smu);
if (ret)
@@ -1567,6 +1577,16 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
+static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ /* SRIOV does not support SMU mode1 reset */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return true;
+}
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1574,7 +1594,9 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dump_pptable = smu_v13_0_7_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_7_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_7_check_fw_status,
@@ -1624,6 +1646,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
+ .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
+ .mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 70cbc46341a3..04e56b0b3033 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -42,6 +42,11 @@
#undef pr_info
#undef pr_debug
+#define regSMUIO_GFX_MISC_CNTL 0x00c5
+#define regSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1L
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -587,6 +592,31 @@ static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_1);
}
+/**
+ * yellow_carp_get_gfxoff_status - get gfxoff status
+ *
+ * @smu: smu_context pointer
+ *
+ * This function will be used to get gfxoff status
+ *
+ * Returns 0=GFXOFF(default).
+ * Returns 1=Transition out of GFX State.
+ * Returns 2=Not in GFXOFF.
+ * Returns 3=Transition into GFXOFF.
+ */
+static uint32_t yellow_carp_get_gfxoff_status(struct smu_context *smu)
+{
+ uint32_t reg;
+ uint32_t gfxoff_status = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ reg = RREG32_SOC15(SMUIO, 0, regSMUIO_GFX_MISC_CNTL);
+ gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
+ >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
+
+ return gfxoff_status;
+}
+
static int yellow_carp_set_default_dpm_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -1186,6 +1216,7 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
+ .get_gfx_off_status = yellow_carp_get_gfxoff_status,
.post_init = yellow_carp_post_smu_init,
.mode2_reset = yellow_carp_mode2_reset,
.get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 7469bbfce1fb..ceb13c838067 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -47,6 +47,9 @@
#define smu_notify_memory_pool_location(smu) smu_ppt_funcs(notify_memory_pool_location, 0, smu)
#define smu_gfx_off_control(smu, enable) smu_ppt_funcs(gfx_off_control, 0, smu, enable)
#define smu_get_gfx_off_status(smu) smu_ppt_funcs(get_gfx_off_status, 0, smu)
+#define smu_get_gfx_off_entrycount(smu, value) smu_ppt_funcs(get_gfx_off_entrycount, 0, smu, value)
+#define smu_get_gfx_off_residency(smu, value) smu_ppt_funcs(get_gfx_off_residency, 0, smu, value)
+#define smu_set_gfx_off_residency(smu, value) smu_ppt_funcs(set_gfx_off_residency, 0, smu, value)
#define smu_set_last_dcef_min_deep_sleep_clk(smu) smu_ppt_funcs(set_last_dcef_min_deep_sleep_clk, 0, smu)
#define smu_system_features_control(smu, en) smu_ppt_funcs(system_features_control, 0, smu, en)
#define smu_init_max_sustainable_clocks(smu) smu_ppt_funcs(init_max_sustainable_clocks, 0, smu)
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 281f8a9ba4fd..7ab38d734ad6 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -550,8 +550,9 @@ static int sii902x_audio_hw_params(struct device *dev, void *data,
unsigned long mclk_rate;
int i, ret;
- if (daifmt->bit_clk_master || daifmt->frame_clk_master) {
- dev_dbg(dev, "%s: I2S master mode not supported\n", __func__);
+ if (daifmt->bit_clk_provider || daifmt->frame_clk_provider) {
+ dev_dbg(dev, "%s: I2S clock provider mode not supported\n",
+ __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index f50b47ac11a8..a2f0860b20bb 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -45,7 +45,7 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
u8 inputclkfs = 0;
/* it cares I2S only */
- if (fmt->bit_clk_master | fmt->frame_clk_master) {
+ if (fmt->bit_clk_provider | fmt->frame_clk_provider) {
dev_err(dev, "unsupported clock settings\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 4442cc5602d4..ecd22c038c8c 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -5793,8 +5793,10 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* Enpoint decompression with DP-to-DP peer device */
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE) &&
- (upstream_dsc & 0x2) /* DSC passthrough */)
+ (upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) {
+ port->passthrough_aux = &immediate_upstream_port->aux;
return &port->aux;
+ }
/* Virtual DPCD decompression with DP-to-DP peer device */
return &immediate_upstream_port->aux;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 63212f02f097..a8b4d918e9a3 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -547,8 +547,7 @@ EXPORT_SYMBOL(drm_release_noglobal);
* Since events are used by the KMS API for vblank and page flip completion this
* means all modern display drivers must use it.
*
- * @offset is ignored, DRM events are read like a pipe. Therefore drivers also
- * must set the &file_operation.llseek to no_llseek(). Polling support is
+ * @offset is ignored, DRM events are read like a pipe. Polling support is
* provided by drm_poll().
*
* This function will only ever read a full event. Therefore userspace must
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index eb0c2d041f13..86d670c71286 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1226,7 +1226,7 @@ retry:
ret = dma_resv_lock_slow_interruptible(obj->resv,
acquire_ctx);
if (ret) {
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
@@ -1251,7 +1251,7 @@ retry:
goto retry;
}
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 05ca028dd152..35138f8a375c 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -302,6 +302,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (WARN_ON(map->is_iomem)) {
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
ret = -EIO;
goto err_put_pages;
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index d5962a34c01d..e5fc875990c4 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct iosys_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ int ret;
+
+ dma_resv_lock(gem->resv, NULL);
+ ret = ttm_bo_vmap(bo, map);
+ dma_resv_unlock(gem->resv);
- return ttm_bo_vmap(bo, map);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_ttm_vmap);
@@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ dma_resv_lock(gem->resv, NULL);
ttm_bo_vunmap(bo, map);
+ dma_resv_unlock(gem->resv);
}
EXPORT_SYMBOL(drm_gem_ttm_vunmap);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 7a9eeed239f3..fc1728d46ac2 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -286,6 +286,21 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Tablet 2 830F / 830L */
+ .matches = {
+ /*
+ * Note this also matches the Lenovo Yoga Tablet 2 1050F/L
+ * since that uses the same mainboard. The resolution match
+ * will limit this to only matching on the 830F/L. Neither has
+ * any external video outputs so those are not a concern.
+ */
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Partial match on beginning of BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* OneGX1 Pro */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 7655142a4651..10b0036f8a2e 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1594,12 +1594,12 @@ static int hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_context *hdata = dev_get_drvdata(dev);
if (daifmt->fmt != HDMI_I2S || daifmt->bit_clk_inv ||
- daifmt->frame_clk_inv || daifmt->bit_clk_master ||
- daifmt->frame_clk_master) {
+ daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
+ daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 7c4455541dbb..f8eb6f69be05 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1096,11 +1096,11 @@ static int tda998x_audio_hw_params(struct device *dev, void *data,
if (!spdif &&
(daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
- daifmt->bit_clk_master || daifmt->frame_clk_master)) {
+ daifmt->bit_clk_provider || daifmt->frame_clk_provider)) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 2a45a25e42fb..f4f7c3414762 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -27,7 +27,6 @@
#include <acpi/video.h>
#include <linux/i2c.h>
#include <linux/input.h>
-#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-resv.h>
@@ -671,7 +670,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
return DISPLAY_VER(dev_priv) < 4 ||
(plane->fbc &&
- plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
+ plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index fa5371036239..cef251025d7a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -45,7 +45,7 @@ struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
struct i915_address_space;
-struct i915_ggtt_view;
+struct i915_gtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 0da9b208d56e..01977cd237eb 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -105,7 +105,7 @@ struct intel_fb_view {
* In the normal view the FB object's backing store sg list is used
* directly and hence the remap information here is not used.
*/
- struct i915_ggtt_view gtt;
+ struct i915_gtt_view gtt;
/*
* The GTT view (gtt.type) specific information for each FB color
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index b191915ab351..eefa33c555ac 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1395,7 +1395,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
plane_view_height_tiles(fb, color_plane, dims, y));
}
- if (view->gtt.type == I915_GGTT_VIEW_ROTATED) {
+ if (view->gtt.type == I915_GTT_VIEW_ROTATED) {
drm_WARN_ON(&i915->drm, remap_info->linear);
check_array_bounds(i915, view->gtt.rotated.plane, color_plane);
@@ -1420,7 +1420,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
/* rotate the tile dimensions to match the GTT view */
swap(tile_width, tile_height);
} else {
- drm_WARN_ON(&i915->drm, view->gtt.type != I915_GGTT_VIEW_REMAPPED);
+ drm_WARN_ON(&i915->drm, view->gtt.type != I915_GTT_VIEW_REMAPPED);
check_array_bounds(i915, view->gtt.remapped.plane, color_plane);
@@ -1503,12 +1503,12 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
}
static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_view *view,
- enum i915_ggtt_view_type view_type)
+ enum i915_gtt_view_type view_type)
{
memset(view, 0, sizeof(*view));
view->gtt.type = view_type;
- if (view_type == I915_GGTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
+ if (view_type == I915_GTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
}
@@ -1530,16 +1530,16 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
int i, num_planes = fb->base.format->num_planes;
unsigned int tile_size = intel_tile_size(i915);
- intel_fb_view_init(i915, &fb->normal_view, I915_GGTT_VIEW_NORMAL);
+ intel_fb_view_init(i915, &fb->normal_view, I915_GTT_VIEW_NORMAL);
drm_WARN_ON(&i915->drm,
intel_fb_supports_90_270_rotation(fb) &&
intel_fb_needs_pot_stride_remap(fb));
if (intel_fb_supports_90_270_rotation(fb))
- intel_fb_view_init(i915, &fb->rotated_view, I915_GGTT_VIEW_ROTATED);
+ intel_fb_view_init(i915, &fb->rotated_view, I915_GTT_VIEW_ROTATED);
if (intel_fb_needs_pot_stride_remap(fb))
- intel_fb_view_init(i915, &fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
+ intel_fb_view_init(i915, &fb->remapped_view, I915_GTT_VIEW_REMAPPED);
for (i = 0; i < num_planes; i++) {
struct fb_plane_view_dims view_dims;
@@ -1620,8 +1620,8 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
u32 gtt_offset = 0;
intel_fb_view_init(i915, &plane_state->view,
- drm_rotation_90_or_270(rotation) ? I915_GGTT_VIEW_ROTATED :
- I915_GGTT_VIEW_REMAPPED);
+ drm_rotation_90_or_270(rotation) ? I915_GTT_VIEW_ROTATED :
+ I915_GTT_VIEW_REMAPPED);
src_x = plane_state->uapi.src.x1 >> 16;
src_y = plane_state->uapi.src.y1 >> 16;
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index bd6e7c98e751..c86e5d4ee016 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -18,7 +18,7 @@
static struct i915_vma *
intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags,
struct i915_address_space *vm)
@@ -79,7 +79,7 @@ err:
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
bool phys_cursor,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
index e4fcd0218d9d..de0efaa25905 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -11,12 +11,12 @@
struct drm_framebuffer;
struct i915_vma;
struct intel_plane_state;
-struct i915_ggtt_view;
+struct i915_gtt_view;
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
bool phys_cursor,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 221336178991..1922f62d04c0 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -198,8 +198,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
- const struct i915_ggtt_view view = {
- .type = I915_GGTT_VIEW_NORMAL,
+ const struct i915_gtt_view view = {
+ .type = I915_GTT_VIEW_NORMAL,
};
intel_wakeref_t wakeref;
struct fb_info *info;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 1674b0c5802b..d44a152ce680 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -397,7 +397,7 @@ struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
u32 alignment,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
unsigned int flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -434,7 +434,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
*/
vma = ERR_PTR(-ENOSPC);
if ((flags & PIN_MAPPABLE) == 0 &&
- (!view || view->type == I915_GGTT_VIEW_NORMAL))
+ (!view || view->type == I915_GTT_VIEW_NORMAL))
vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment,
flags | PIN_MAPPABLE |
PIN_NONBLOCK);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b7b2c14fd9e1..cd75b0ca2555 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -6,7 +6,6 @@
#include <linux/dma-resv.h>
#include <linux/highmem.h>
-#include <linux/intel-iommu.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 0c5c43852e24..3218981488cc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -194,17 +194,17 @@ int i915_gem_mmap_gtt_version(void)
return 4;
}
-static inline struct i915_ggtt_view
+static inline struct i915_gtt_view
compute_partial_view(const struct drm_i915_gem_object *obj,
pgoff_t page_offset,
unsigned int chunk)
{
- struct i915_ggtt_view view;
+ struct i915_gtt_view view;
if (i915_gem_object_is_tiled(obj))
chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
- view.type = I915_GGTT_VIEW_PARTIAL;
+ view.type = I915_GTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);
view.partial.size =
min_t(unsigned int, chunk,
@@ -212,7 +212,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
/* If the partial covers the entire object, just create a normal VMA. */
if (chunk >= obj->base.size >> PAGE_SHIFT)
- view.type = I915_GGTT_VIEW_NORMAL;
+ view.type = I915_GTT_VIEW_NORMAL;
return view;
}
@@ -341,12 +341,12 @@ retry:
PIN_NOEVICT);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
/* Use a partial view if it is bigger than available space */
- struct i915_ggtt_view view =
+ struct i915_gtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
unsigned int flags;
flags = PIN_MAPPABLE | PIN_NOSEARCH;
- if (view.type == I915_GGTT_VIEW_NORMAL)
+ if (view.type == I915_GTT_VIEW_NORMAL)
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
/*
@@ -357,7 +357,7 @@ retry:
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
flags = PIN_MAPPABLE;
- view.type = I915_GGTT_VIEW_PARTIAL;
+ view.type = I915_GTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
}
@@ -394,7 +394,7 @@ retry:
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index ccec4055fde3..85482a04d158 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
*/
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
- assert_object_held(obj);
+ assert_object_held_shared(obj);
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
continue;
}
- if (!i915_gem_object_trylock(obj, NULL)) {
- /* busy, toss it back to the pile */
- if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
- continue;
- }
-
__i915_gem_object_pages_fini(obj);
- i915_gem_object_unlock(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
@@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work.work);
+ container_of(work, struct drm_i915_private, mm.free_work);
i915_gem_flush_free_objects(i915);
}
@@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
+ queue_work(i915->wq, &i915->mm.free_work);
}
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -731,6 +723,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
bool lmem_placement = false;
int i;
+ if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
+ return false;
+
for (i = 0; i < obj->mm.n_placements; i++) {
/* Compression is not allowed for the objects with smem placement */
if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
@@ -745,7 +740,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
void i915_gem_init__objects(struct drm_i915_private *i915)
{
- INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
void i915_objects_module_exit(void)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 6f0a3ce35567..7317d4102955 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -543,7 +543,7 @@ struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
u32 alignment,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
unsigned int flags);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5cf36a130061..9f6b14ec189a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -335,7 +335,6 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(7)
#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(9)
-#define I915_BO_WAS_BOUND_BIT 10
/**
* @mem_flags - Mutable placement-related flags
*
@@ -616,6 +615,8 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
+
+ u32 tlb;
} mm;
struct {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 97c820eee115..8357dbdcab5c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -6,14 +6,15 @@
#include <drm/drm_cache.h>
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
-#include "gt/intel_gt.h"
-
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -190,6 +191,18 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
vunmap(ptr);
}
+static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt = to_gt(i915);
+
+ if (!obj->mm.tlb)
+ return;
+
+ intel_gt_invalidate_tlb(gt, obj->mm.tlb);
+ obj->mm.tlb = 0;
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
@@ -215,13 +228,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
- if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
- intel_gt_invalidate_tlbs(to_gt(i915));
- }
+ flush_tlb_invalidate(obj);
return pages;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4eed3dd90ba8..f42ca1179f37 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -75,7 +75,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
if (size > resource_size(&mr->region))
return -ENOMEM;
- if (sg_alloc_table(st, page_count, GFP_KERNEL))
+ if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
return -ENOMEM;
/*
@@ -137,7 +137,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
* trigger the out-of-memory killer and for
* this we want __GFP_RETRY_MAYFAIL.
*/
- gfp |= __GFP_RETRY_MAYFAIL;
+ gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
}
} while (1);
@@ -209,7 +209,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
rebuild_st:
- st = kmalloc(sizeof(*st), GFP_KERNEL);
+ st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
if (!st)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 1030053571a2..8dc5c8874d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -426,7 +426,8 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
i915->mm.shrinker.seeks = DEFAULT_SEEKS;
i915->mm.shrinker.batch = 4096;
- drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
+ drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker,
+ "drm-i915_gem"));
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index bc9c432edffe..f64a3deb12fc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -297,7 +297,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
i915_tt->is_shmem = true;
}
- if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
+ if (i915_gem_object_needs_ccs_pages(obj))
ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
NUM_BYTES_PER_CCS_BYTE),
PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 9aad84059d56..07e49f22f2de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -79,7 +79,12 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
goto out_no_populate;
err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
- GEM_WARN_ON(err);
+ if (err) {
+ drm_err(&i915->drm,
+ "Unable to copy from device to system memory, err:%pe\n",
+ ERR_PTR(err));
+ goto out_no_populate;
+ }
ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = backup;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 094f06b4ce33..8423df021b71 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
* However...!
*
* The mmu-notifier can be invalidated for a
- * migrate_page, that is alreadying holding the lock
- * on the page. Such a try_to_unmap() will result
+ * migrate_folio, that is alreadying holding the lock
+ * on the folio. Such a try_to_unmap() will result
* in us calling put_pages() and so recursively try
* to lock the page. We avoid that deadlock with
* a trylock_page() and in exchange we risk missing
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 13b088cc787e..a666d7e610f5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -434,5 +434,5 @@ int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_coherency),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 62c61af77a42..51ed824b020c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -476,5 +476,5 @@ int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 3ced9948a331..55bf23dc0e54 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -93,7 +93,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
{
const unsigned long npages = obj->base.size / PAGE_SIZE;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt_view view;
+ struct i915_gtt_view view;
struct i915_vma *vma;
unsigned long page;
u32 __iomem *io;
@@ -210,7 +210,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
}
for_each_prime_number_from(page, 1, npages) {
- struct i915_ggtt_view view =
+ struct i915_gtt_view view =
compute_partial_view(obj, page, MIN_CHUNK_PAGES);
u32 __iomem *io;
struct page *p;
@@ -1844,5 +1844,5 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_mmap_gpu),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index fe0a890775e2..bdf5bb40ccf1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -95,5 +95,5 @@ int i915_gem_object_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_huge),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index d2d75d9c0c8d..04eacae1aca5 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -275,10 +275,17 @@ struct intel_context {
u8 child_index;
/** @guc: GuC specific members for parallel submission */
struct {
- /** @wqi_head: head pointer in work queue */
+ /** @wqi_head: cached head pointer in work queue */
u16 wqi_head;
- /** @wqi_tail: tail pointer in work queue */
+ /** @wqi_tail: cached tail pointer in work queue */
u16 wqi_tail;
+ /** @wq_head: pointer to the actual head in work queue */
+ u32 *wq_head;
+ /** @wq_tail: pointer to the actual head in work queue */
+ u32 *wq_tail;
+ /** @wq_status: pointer to the status in work queue */
+ u32 *wq_status;
+
/**
* @parent_page: page in context state (ce->state) used
* by parent for work queue, process descriptor
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 37fa813af766..17e7f20bbb48 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -672,7 +672,7 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
unsigned long ccs_mask;
unsigned int i;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (hweight32(CCS_MASK(gt)) <= 1)
return;
ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 68c2b0d8f187..f435e06125aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -11,7 +11,9 @@
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
@@ -36,8 +38,6 @@ static void __intel_gt_init_early(struct intel_gt *gt)
{
spin_lock_init(&gt->irq_lock);
- mutex_init(&gt->tlb_invalidate_lock);
-
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -48,6 +48,8 @@ static void __intel_gt_init_early(struct intel_gt *gt)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
+ mutex_init(&gt->tlb.invalidate_lock);
+ seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
intel_gt_pm_init_early(gt);
intel_uc_init_early(&gt->uc);
@@ -768,6 +770,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
+ mutex_destroy(&gt->tlb.invalidate_lock);
intel_engines_free(gt);
}
}
@@ -906,7 +909,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
return rb;
}
-void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+static void mmio_invalidate_full(struct intel_gt *gt)
{
static const i915_reg_t gen8_regs[] = {
[RENDER_CLASS] = GEN8_RTCR,
@@ -924,13 +927,11 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
const i915_reg_t *regs;
unsigned int num = 0;
- if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
- return;
-
if (GRAPHICS_VER(i915) == 12) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
@@ -945,28 +946,41 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
"Platform does not implement TLB invalidation!"))
return;
- GEM_TRACE("\n");
-
- assert_rpm_wakelock_held(&i915->runtime_pm);
-
- mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+ awake = 0;
for_each_engine(engine, gt, id) {
struct reg_and_bit rb;
+ if (!intel_engine_pm_is_awake(engine))
+ continue;
+
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ awake |= engine->mask;
}
+ GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+ /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+ if (awake &&
+ (IS_TIGERLAKE(i915) ||
+ IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) ||
+ IS_ALDERLAKE_P(i915)))
+ intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
spin_unlock_irq(&uncore->lock);
- for_each_engine(engine, gt, id) {
+ for_each_engine_masked(engine, gt, awake, tmp) {
+ struct reg_and_bit rb;
+
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -974,12 +988,8 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
*/
const unsigned int timeout_us = 100;
const unsigned int timeout_ms = 4;
- struct reg_and_bit rb;
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
@@ -996,5 +1006,38 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
- mutex_unlock(&gt->tlb_invalidate_lock);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+ u32 cur = intel_gt_tlb_seqno(gt);
+
+ /* Only skip if a *full* TLB invalidate barrier has passed */
+ return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
+{
+ intel_wakeref_t wakeref;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (intel_gt_is_wedged(gt))
+ return;
+
+ if (tlb_seqno_passed(gt, seqno))
+ return;
+
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ mutex_lock(&gt->tlb.invalidate_lock);
+ if (tlb_seqno_passed(gt, seqno))
+ goto unlock;
+
+ mmio_invalidate_full(gt);
+
+ write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+ mutex_unlock(&gt->tlb.invalidate_lock);
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 82d6f248d876..40b06adf509a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -101,6 +101,16 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
-void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+ return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+ return intel_gt_tlb_seqno(gt) | 1;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index bc898df7a48c..6c9a46452364 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -55,6 +55,17 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)
+/**
+ * with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
+ * it to sleep, run some code and then asynchrously put the reference
+ * away.
+ *
+ * @gt: pointer to the gt
+ * @wf: pointer to a temporary wakeref.
+ */
+#define with_intel_gt_pm_if_awake(gt, wf) \
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 60d6eb5f245b..d414785003cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -259,6 +259,9 @@
#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
+#define DRAW_WATERMARK _MMIO(0x26c0)
+#define VERT_WM_VAL REG_GENMASK(9, 0)
+
#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
#define RENDER_HWS_PGA_GEN7 _MMIO(0x4080)
@@ -374,6 +377,9 @@
#define CHICKEN_RASTER_1 _MMIO(0x6204)
#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
+#define CHICKEN_RASTER_2 _MMIO(0x6208)
+#define TBIMR_FAST_CLIP REG_BIT(5)
+
#define VFLSKPD _MMIO(0x62a8)
#define DIS_OVER_FETCH_CACHE REG_BIT(1)
#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
@@ -1007,6 +1013,8 @@
#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
+#define GUCPMTIMESTAMP _MMIO(0xc3e8)
+
#define __GEN9_RCS0_MOCS0 0xc800
#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
#define __GEN9_VCS0_MOCS0 0xc900
@@ -1078,6 +1086,7 @@
#define GEN10_SAMPLER_MODE _MMIO(0xe18c)
#define ENABLE_SMALLPL REG_BIT(15)
+#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
@@ -1101,6 +1110,8 @@
#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
+#define THREAD_EX_ARB_MODE REG_GENMASK(3, 2)
+#define THREAD_EX_ARB_MODE_RR_AFTER_DEP REG_FIELD_PREP(THREAD_EX_ARB_MODE, 0x2)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -1123,6 +1134,8 @@
#define RT_CTRL _MMIO(0xe530)
#define DIS_NULL_QUERY REG_BIT(10)
+#define STACKID_CTRL REG_GENMASK(6, 5)
+#define STACKID_CTRL_512 REG_FIELD_PREP(STACKID_CTRL, 0x2)
#define EU_PERF_CNTL1 _MMIO(0xe558)
#define EU_PERF_CNTL5 _MMIO(0xe55c)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 9e4ebf53379b..d651ccd0ab20 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -22,11 +22,6 @@ bool is_object_gt(struct kobject *kobj)
return !strncmp(kobj->name, "gt", 2);
}
-static struct intel_gt *kobj_to_gt(struct kobject *kobj)
-{
- return container_of(kobj, struct intel_gt, sysfs_gt);
-}
-
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
const char *name)
{
@@ -101,6 +96,10 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
gt->i915->sysfs_gt, "gt%d", gt->info.id))
goto exit_fail;
+ gt->sysfs_defaults = kobject_create_and_add(".defaults", &gt->sysfs_gt);
+ if (!gt->sysfs_defaults)
+ goto exit_fail;
+
intel_gt_sysfs_pm_init(gt, &gt->sysfs_gt);
return;
@@ -113,5 +112,6 @@ exit_fail:
void intel_gt_sysfs_unregister(struct intel_gt *gt)
{
+ kobject_put(gt->sysfs_defaults);
kobject_put(&gt->sysfs_gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
index a99aa7e8b01a..6232923a420d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
@@ -10,6 +10,7 @@
#include <linux/kobject.h>
#include "i915_gem.h" /* GEM_BUG_ON() */
+#include "intel_gt_types.h"
struct intel_gt;
@@ -22,6 +23,11 @@ intel_gt_create_kobj(struct intel_gt *gt,
struct kobject *dir,
const char *name);
+static inline struct intel_gt *kobj_to_gt(struct kobject *kobj)
+{
+ return container_of(kobj, struct intel_gt, sysfs_gt);
+}
+
void intel_gt_sysfs_register(struct intel_gt *gt);
void intel_gt_sysfs_unregister(struct intel_gt *gt);
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index 73a8b46e0234..e066cc33d9f2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -727,6 +727,34 @@ static const struct attribute *media_perf_power_attrs[] = {
NULL
};
+static ssize_t
+default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.min_freq);
+}
+
+static struct kobj_attribute default_min_freq_mhz =
+__ATTR(rps_min_freq_mhz, 0444, default_min_freq_mhz_show, NULL);
+
+static ssize_t
+default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.max_freq);
+}
+
+static struct kobj_attribute default_max_freq_mhz =
+__ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL);
+
+static const struct attribute * const rps_defaults_attrs[] = {
+ &default_min_freq_mhz.attr,
+ &default_max_freq_mhz.attr,
+ NULL
+};
+
static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
const struct attribute * const *attrs)
{
@@ -776,4 +804,10 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
"failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
gt->info.id, ERR_PTR(ret));
}
+
+ ret = sysfs_create_files(gt->sysfs_defaults, rps_defaults_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to add gt%u rps defaults (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index df708802889d..4d56f7d5a3be 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -11,6 +11,7 @@
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/seqlock.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -75,6 +76,11 @@ enum intel_submission_method {
INTEL_SUBMISSION_GUC,
};
+struct gt_defaults {
+ u32 min_freq;
+ u32 max_freq;
+};
+
struct intel_gt {
struct drm_i915_private *i915;
struct intel_uncore *uncore;
@@ -83,7 +89,22 @@ struct intel_gt {
struct intel_uc uc;
struct intel_gsc gsc;
- struct mutex tlb_invalidate_lock;
+ struct {
+ /* Serialize global tlb invalidations */
+ struct mutex invalidate_lock;
+
+ /*
+ * Batch TLB invalidations
+ *
+ * After unbinding the PTE, we need to ensure the TLB
+ * are invalidated prior to releasing the physical pages.
+ * But we only need one such invalidation for all unbinds,
+ * so we track how many TLB invalidations have been
+ * performed since unbind the PTE and only emit an extra
+ * invalidate if no full barrier has been passed.
+ */
+ seqcount_mutex_t seqno;
+ } tlb;
struct i915_wa_list wa_list;
@@ -235,6 +256,10 @@ struct intel_gt {
/* gt/gtN sysfs */
struct kobject sysfs_gt;
+
+ /* sysfs defaults per gt */
+ struct gt_defaults defaults;
+ struct kobject *sysfs_defaults;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 14fe65812e42..1d19c073ba2e 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -12,6 +12,7 @@
#include "intel_llc.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "intel_rps.h"
struct ia_constants {
unsigned int min_gpu_freq;
@@ -55,9 +56,6 @@ static bool get_ia_constants(struct intel_llc *llc,
if (!HAS_LLC(i915) || IS_DGFX(i915))
return false;
- if (rps->max_freq <= rps->min_freq)
- return false;
-
consts->max_ia_freq = cpu_max_MHz();
consts->min_ring_freq =
@@ -65,13 +63,8 @@ static bool get_ia_constants(struct intel_llc *llc,
/* convert DDR frequency from units of 266.6MHz to bandwidth */
consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
- consts->min_gpu_freq = rps->min_freq;
- consts->max_gpu_freq = rps->max_freq;
- if (GRAPHICS_VER(i915) >= 9) {
- /* Convert GT frequency to 50 HZ units */
- consts->min_gpu_freq /= GEN9_FREQ_SCALER;
- consts->max_gpu_freq /= GEN9_FREQ_SCALER;
- }
+ consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps);
+ consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps);
return true;
}
@@ -131,6 +124,12 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
return;
/*
+ * Although this is unlikely on any platform during initialization,
+ * let's ensure we don't get accidentally into infinite loop
+ */
+ if (consts.max_gpu_freq <= consts.min_gpu_freq)
+ return;
+ /*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index eec73c66406c..070cec4ff8a4 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1242,6 +1242,23 @@ dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
return cs;
}
+/*
+ * The bspec's tuning guide asks us to program a vertical watermark value of
+ * 0x3FF. However this register is not saved/restored properly by the
+ * hardware, so we're required to apply the desired value via INDIRECT_CTX
+ * batch buffer to ensure the value takes effect properly. All other bits
+ * in this register should remain at 0 (the hardware default).
+ */
+static u32 *
+dg2_emit_draw_watermark_setting(u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(DRAW_WATERMARK);
+ *cs++ = REG_FIELD_PREP(VERT_WM_VAL, 0x3FF);
+
+ return cs;
+}
+
static u32 *
gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
{
@@ -1263,6 +1280,10 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
if (!HAS_FLAT_CCS(ce->engine->i915))
cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+ /* Wa_16014892111 */
+ if (IS_DG2(ce->engine->i915))
+ cs = dg2_emit_draw_watermark_setting(cs);
+
return cs;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 2c35324b5f68..aaaf1906026c 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -511,44 +511,16 @@ static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
return cmd;
}
-static u32 calc_ctrl_surf_instr_size(struct drm_i915_private *i915, int size)
-{
- u32 num_cmds, num_blks, total_size;
-
- if (!GET_CCS_BYTES(i915, size))
- return 0;
-
- /*
- * XY_CTRL_SURF_COPY_BLT transfers CCS in 256 byte
- * blocks. one XY_CTRL_SURF_COPY_BLT command can
- * transfer upto 1024 blocks.
- */
- num_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
- NUM_CCS_BYTES_PER_BLOCK);
- num_cmds = DIV_ROUND_UP(num_blks, NUM_CCS_BLKS_PER_XFER);
- total_size = XY_CTRL_SURF_INSTR_SIZE * num_cmds;
-
- /*
- * Adding a flush before and after XY_CTRL_SURF_COPY_BLT
- */
- total_size += 2 * MI_FLUSH_DW_SIZE;
-
- return total_size;
-}
-
static int emit_copy_ccs(struct i915_request *rq,
u32 dst_offset, u8 dst_access,
u32 src_offset, u8 src_access, int size)
{
struct drm_i915_private *i915 = rq->engine->i915;
int mocs = rq->engine->gt->mocs.uc_index << 1;
- u32 num_ccs_blks, ccs_ring_size;
+ u32 num_ccs_blks;
u32 *cs;
- ccs_ring_size = calc_ctrl_surf_instr_size(i915, size);
- WARN_ON(!ccs_ring_size);
-
- cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2));
+ cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -583,8 +555,7 @@ static int emit_copy_ccs(struct i915_request *rq,
FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
- if (ccs_ring_size & 1)
- *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -638,40 +609,38 @@ static int emit_copy(struct i915_request *rq,
return 0;
}
-static int scatter_list_length(struct scatterlist *sg)
+static u64 scatter_list_length(struct scatterlist *sg)
{
- int len = 0;
+ u64 len = 0;
while (sg && sg_dma_len(sg)) {
len += sg_dma_len(sg);
sg = sg_next(sg);
- };
+ }
return len;
}
-static void
+static int
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
- int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy)
+ u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
{
- if (ccs_bytes_to_cpy) {
- if (!src_is_lmem)
- /*
- * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
- * will be taken for the blt. in Flat-ccs supported
- * platform Smem obj will have more pages than required
- * for main meory hence limit it to the required size
- * for main memory
- */
- *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
- } else { /* ccs handling is not required */
- *src_sz = CHUNK_SZ;
- }
+ if (ccs_bytes_to_cpy && !src_is_lmem)
+ /*
+ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
+ * will be taken for the blt. in Flat-ccs supported
+ * platform Smem obj will have more pages than required
+ * for main meory hence limit it to the required size
+ * for main memory
+ */
+ return min_t(u64, bytes_to_cpy, CHUNK_SZ);
+ else
+ return CHUNK_SZ;
}
-static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
+static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
{
- u32 len;
+ u64 len;
do {
GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
@@ -702,13 +671,13 @@ intel_context_migrate_copy(struct intel_context *ce,
{
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
struct drm_i915_private *i915 = ce->engine->i915;
- u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
+ u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
enum i915_cache_level ccs_cache_level;
u32 src_offset, dst_offset;
u8 src_access, dst_access;
struct i915_request *rq;
- int src_sz, dst_sz;
- bool ccs_is_src;
+ u64 src_sz, dst_sz;
+ bool ccs_is_src, overwrite_ccs;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
@@ -749,6 +718,8 @@ intel_context_migrate_copy(struct intel_context *ce,
get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
}
+ overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
+
src_offset = 0;
dst_offset = CHUNK_SZ;
if (HAS_64K_PAGES(ce->engine->i915)) {
@@ -788,8 +759,8 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
- calculate_chunk_sz(i915, src_is_lmem, &src_sz,
- bytes_to_cpy, ccs_bytes_to_cpy);
+ src_sz = calculate_chunk_sz(i915, src_is_lmem,
+ bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
src_offset, src_sz);
@@ -852,6 +823,25 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
ccs_bytes_to_cpy -= ccs_sz;
+ } else if (overwrite_ccs) {
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ /*
+ * While we can't always restore/manage the CCS state,
+ * we still need to ensure we don't leak the CCS state
+ * from the previous user, so make sure we overwrite it
+ * with something.
+ */
+ err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
+ dst_offset, DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
}
/* Arbitration is re-enabled between requests. */
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index d8b94d638559..6ee8d1127016 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -206,8 +206,12 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
- if (vma_res->allocated)
- vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (!vma_res->allocated)
+ return;
+
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (vma_res->tlb)
+ vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 6e90032e12e9..aa6aed837194 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
+#ifdef CONFIG_64BIT
static void _release_bars(struct pci_dev *pdev)
{
int resno;
@@ -111,6 +112,9 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
}
+#else
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
+#endif
static int
region_lmem_release(struct intel_memory_region *mem)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index c68d36fb5bbd..b36674356986 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -776,7 +776,7 @@ static void revoke_mmaps(struct intel_gt *gt)
continue;
node = &vma->mmo->vma_node;
- vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
@@ -1281,9 +1281,6 @@ static void intel_gt_reset_global(struct intel_gt *gt,
intel_wedge_on_timeout(&w, gt, 5 * HZ) {
intel_display_prepare_reset(gt->i915);
- /* Flush everyone using a resource about to be clobbered */
- synchronize_srcu_expedited(&gt->reset.backoff_srcu);
-
intel_gt_reset(gt, engine_mask, reason);
intel_display_finish_reset(gt->i915);
@@ -1392,6 +1389,9 @@ void intel_gt_handle_error(struct intel_gt *gt,
}
}
+ /* Flush everyone using a resource about to be clobbered */
+ synchronize_srcu_expedited(&gt->reset.backoff_srcu);
+
intel_gt_reset_global(gt, engine_mask, msg);
if (!intel_uc_uses_guc_submission(&gt->uc)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index fb3f57ee450b..6fadde4ee7bf 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1107,7 +1107,12 @@ void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *c
caps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
caps->rp0_freq = (rp_state_cap >> 0) & 0xff;
- caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ if (GRAPHICS_VER(i915) >= 10)
+ caps->rp1_freq = REG_FIELD_GET(RPE_MASK,
+ intel_uncore_read(to_gt(i915)->uncore,
+ GEN10_FREQ_INFO_REC));
+ else
+ caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
caps->min_freq = (rp_state_cap >> 16) & 0xff;
}
@@ -1546,6 +1551,9 @@ void intel_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ if (!intel_rps_is_enabled(rps))
+ return;
+
intel_rps_clear_enabled(rps);
intel_rps_clear_interrupts(rps);
intel_rps_clear_timer(rps);
@@ -1979,7 +1987,9 @@ void intel_rps_init(struct intel_rps *rps)
/* Derive initial user preferences/limits from the hardware limits */
rps->max_freq_softlimit = rps->max_freq;
+ rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit;
rps->min_freq_softlimit = rps->min_freq;
+ rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit;
/* After setting max-softlimit, find the overclock max freq */
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
@@ -2126,6 +2136,31 @@ u32 intel_rps_get_max_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->max_freq_softlimit);
}
+/**
+ * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the max frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->rp0_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->max_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
@@ -2214,6 +2249,31 @@ u32 intel_rps_get_min_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->min_freq_softlimit);
}
+/**
+ * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the min frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->min_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->min_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
static int set_min_freq(struct intel_rps *rps, u32 val)
{
int ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 1e8d56491308..4509dfdc52e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -37,8 +37,10 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
u32 intel_rps_get_min_frequency(struct intel_rps *rps);
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_max_frequency(struct intel_rps *rps);
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps);
int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index e8111fce56d0..6d2003d598e6 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -568,6 +568,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
wa_add(wal,
@@ -2102,13 +2103,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
/* Wa_1509235366:dg2 */
wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
GLOBAL_INVALIDATION_MODE);
-
- /*
- * The following are not actually "workarounds" but rather
- * recommended tuning settings documented in the bspec's
- * performance guide section.
- */
- wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
}
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
@@ -2119,6 +2113,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
}
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
+ /* Wa_1509727124:dg2 */
+ wa_masked_en(wal, GEN10_SAMPLER_MODE,
+ SC_DISABLE_POWER_OPTIMIZATION_EBB);
+ }
+
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14012419201:dg2 */
@@ -2195,15 +2196,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) ||
- IS_DG2_G11(i915)) {
- /* Wa_22012654132:dg2 */
- wa_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
- 0 /* write-only, so skip validation */,
- true);
- }
-
/* Wa_14013202645:dg2 */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
@@ -2397,7 +2389,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
FF_DOP_CLOCK_GATE_DISABLE);
}
- if (HAS_PERCTX_PREEMPT_CTRL(i915)) {
+ if (IS_GRAPHICS_VER(i915, 9, 12)) {
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
@@ -2670,6 +2662,56 @@ ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
/*
+ * The bspec performance guide has recommended MMIO tuning settings. These
+ * aren't truly "workarounds" but we want to program them with the same
+ * workaround infrastructure to ensure that they're automatically added to
+ * the GuC save/restore lists, re-applied at the right times, and checked for
+ * any conflicting programming requested by real workarounds.
+ *
+ * Programming settings should be added here only if their registers are not
+ * part of an engine's register state context. If a register is part of a
+ * context, then any tuning settings should be programmed in an appropriate
+ * function invoked by __intel_engine_init_ctx_wa().
+ */
+static void
+add_render_compute_tuning_settings(struct drm_i915_private *i915,
+ struct i915_wa_list *wal)
+{
+ if (IS_PONTEVECCHIO(i915)) {
+ wa_write(wal, XEHPC_L3SCRUB,
+ SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ }
+
+ if (IS_DG2(i915)) {
+ wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
+
+ /*
+ * This is also listed as Wa_22012654132 for certain DG2
+ * steppings, but the tuning setting programming is a superset
+ * since it applies to all DG2 variants and steppings.
+ *
+ * Note that register 0xE420 is write-only and cannot be read
+ * back for verification on DG2 (due to Wa_14012342262), so
+ * we need to explicitly skip the readback.
+ */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ 0 /* write-only, so skip validation */,
+ true);
+ }
+
+ /*
+ * This tuning setting proves beneficial only on ATS-M designs; the
+ * default "age based" setting is optimal on regular DG2 and other
+ * platforms.
+ */
+ if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
+ wa_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
+ THREAD_EX_ARB_MODE_RR_AFTER_DEP);
+}
+
+/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
* specific engine. Since all render+compute engines get reset
@@ -2683,14 +2725,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_PONTEVECCHIO(i915)) {
- /*
- * The following is not actually a "workaround" but rather
- * a recommended tuning setting documented in the bspec's
- * performance guide section.
- */
- wa_write(wal, XEHPC_L3SCRUB, SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ add_render_compute_tuning_settings(i915, wal);
+ if (IS_PONTEVECCHIO(i915)) {
/* Wa_16016694945 */
wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 09f8cd2d0e2c..1e08b2473b99 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -2077,7 +2077,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
goto out;
}
- intel_context_set_banned(rq->context);
+ intel_context_ban(rq->context, rq);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -2136,7 +2136,7 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
if (err)
goto out;
- intel_context_set_banned(rq[1]->context);
+ intel_context_ban(rq[1]->context, rq[1]);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -2219,7 +2219,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- intel_context_set_banned(rq[2]->context);
+ intel_context_ban(rq[2]->context, rq[2]);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -2234,7 +2234,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
goto out;
}
- if (rq[1]->fence.error != 0) {
+ /*
+ * The behavior between having semaphores and not is different. With
+ * semaphores the subsequent request is on the hardware and not cancelled
+ * while without the request is held in the driver and cancelled.
+ */
+ if (intel_engine_has_semaphores(rq[1]->engine) &&
+ rq[1]->fence.error != 0) {
pr_err("Normal inflight1 request did not complete\n");
err = -EINVAL;
goto out;
@@ -2282,7 +2288,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
goto out;
}
- intel_context_set_banned(rq->context);
+ intel_context_ban(rq->context, rq);
err = intel_engine_pulse(arg->engine); /* force reset */
if (err)
goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 6493265d5f64..7f3bb1d34dfb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1302,13 +1302,15 @@ static int igt_reset_wait(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->engine[RCS0];
+ struct intel_engine_cs *engine;
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
int err;
+ engine = intel_selftest_find_any_engine(gt);
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
@@ -1432,7 +1434,7 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
int (*fn)(void *),
unsigned int flags)
{
- struct intel_engine_cs *engine = gt->engine[RCS0];
+ struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
@@ -1444,6 +1446,8 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
return 0;
+ engine = intel_selftest_find_any_engine(gt);
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
@@ -1819,12 +1823,14 @@ static int igt_handle_error(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->engine[RCS0];
+ struct intel_engine_cs *engine;
struct hang h;
struct i915_request *rq;
struct i915_gpu_coredump *error;
int err;
+ engine = intel_selftest_find_any_engine(gt);
+
/* Check that we can issue a global GPU and engine reset */
if (!intel_has_reset_engine(gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index ac29691e0b1a..f8a1d27df272 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -166,6 +166,15 @@ static int run_test(struct intel_gt *gt, int test_type)
return -EIO;
}
+ /*
+ * FIXME: With efficient frequency enabled, GuC can request
+ * frequencies higher than the SLPC max. While this is fixed
+ * in GuC, we level set these tests with RPn as min.
+ */
+ err = slpc_set_min_freq(slpc, slpc->min_freq);
+ if (err)
+ return err;
+
if (slpc->min_freq == slpc->rp0_freq) {
pr_err("Min/Max are fused to the same value\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 4ef9990ed7f8..29ef8afc8c2e 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -122,6 +122,9 @@ enum intel_guc_action {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY = 0x1005,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
index df83c1cc7c7a..28b8387f97b7 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
@@ -37,6 +37,7 @@
* | | | - _`GUC_CTB_STATUS_OVERFLOW` = 1 (head/tail too large) |
* | | | - _`GUC_CTB_STATUS_UNDERFLOW` = 2 (truncated message) |
* | | | - _`GUC_CTB_STATUS_MISMATCH` = 4 (head/tail modified) |
+ * | | | - _`GUC_CTB_STATUS_UNUSED` = 8 (CTB is not in use) |
* +---+-------+--------------------------------------------------------------+
* |...| | RESERVED = MBZ |
* +---+-------+--------------------------------------------------------------+
@@ -49,9 +50,10 @@ struct guc_ct_buffer_desc {
u32 tail;
u32 status;
#define GUC_CTB_STATUS_NO_ERROR 0
-#define GUC_CTB_STATUS_OVERFLOW (1 << 0)
-#define GUC_CTB_STATUS_UNDERFLOW (1 << 1)
-#define GUC_CTB_STATUS_MISMATCH (1 << 2)
+#define GUC_CTB_STATUS_OVERFLOW BIT(0)
+#define GUC_CTB_STATUS_UNDERFLOW BIT(1)
+#define GUC_CTB_STATUS_MISMATCH BIT(2)
+#define GUC_CTB_STATUS_UNUSED BIT(3)
u32 reserved[13];
} __packed;
static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2706a8c65090..24451d000a6a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -224,53 +224,22 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
- u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
- u32 flags;
-
- #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
- #define LOG_UNIT SZ_1M
- #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
- #else
- #define LOG_UNIT SZ_4K
- #define LOG_FLAG 0
- #endif
-
- #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
- #define CAPTURE_UNIT SZ_1M
- #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
- #else
- #define CAPTURE_UNIT SZ_4K
- #define CAPTURE_FLAG 0
- #endif
-
- BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
- BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
- BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
-
- BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
- BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
- BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
- (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
+ struct intel_guc_log *log = &guc->log;
+ u32 offset, flags;
+
+ GEM_BUG_ON(!log->sizes_initialised);
+
+ offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
- CAPTURE_FLAG |
- LOG_FLAG |
- ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
- ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
- ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << GUC_LOG_CAPTURE_SHIFT) |
+ log->sizes[GUC_LOG_SECTIONS_DEBUG].flag |
+ log->sizes[GUC_LOG_SECTIONS_CAPTURE].flag |
+ (log->sizes[GUC_LOG_SECTIONS_CRASH].count << GUC_LOG_CRASH_SHIFT) |
+ (log->sizes[GUC_LOG_SECTIONS_DEBUG].count << GUC_LOG_DEBUG_SHIFT) |
+ (log->sizes[GUC_LOG_SECTIONS_CAPTURE].count << GUC_LOG_CAPTURE_SHIFT) |
(offset << GUC_LOG_BUF_ADDR_SHIFT);
- #undef LOG_UNIT
- #undef LOG_FLAG
- #undef CAPTURE_UNIT
- #undef CAPTURE_FLAG
-
return flags;
}
@@ -389,6 +358,23 @@ void intel_guc_write_params(struct intel_guc *guc)
intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
}
+void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+ u32 stamp = 0;
+ u64 ktime;
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ stamp = intel_uncore_read(gt->uncore, GUCPMTIMESTAMP);
+ ktime = ktime_get_boottime_ns();
+
+ drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime);
+ drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", stamp, stamp);
+ drm_printf(p, "CS timestamp frequency: %u Hz, %u ns\n",
+ gt->clock_frequency, gt->clock_period_ns);
+}
+
int intel_guc_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index d0d99f178f2d..804133df1ac9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -170,6 +170,11 @@ struct intel_guc {
/** @ads_engine_usage_size: size of engine usage in the ADS */
u32 ads_engine_usage_size;
+ /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
+ struct i915_vma *lrc_desc_pool_v69;
+ /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */
+ void *lrc_desc_pool_vaddr_v69;
+
/**
* @context_lookup: used to resolve intel_context from guc_id, if a
* context is present in this structure it is registered with the GuC
@@ -459,4 +464,6 @@ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
void intel_guc_write_barrier(struct intel_guc *guc);
+void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index ba7541f3ca61..74cbe8eaf531 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -464,7 +464,11 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
}
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
-#define LRC_SKIP_SIZE (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE)
+#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
+#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
+ XEHP_LR_HW_CONTEXT_SIZE : \
+ LR_HW_CONTEXT_SIZE)
+#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
static int guc_prep_golden_context(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -525,7 +529,7 @@ static int guc_prep_golden_context(struct intel_guc *guc)
* on all engines).
*/
ads_blob_write(guc, ads.eng_state_size[guc_class],
- real_size - LRC_SKIP_SIZE);
+ real_size - LRC_SKIP_SIZE(gt->i915));
ads_blob_write(guc, ads.golden_context_lrca[guc_class],
addr_ggtt);
@@ -599,7 +603,7 @@ static void guc_init_golden_context(struct intel_guc *guc)
}
GEM_BUG_ON(ads_blob_read(guc, ads.eng_state_size[guc_class]) !=
- real_size - LRC_SKIP_SIZE);
+ real_size - LRC_SKIP_SIZE(gt->i915));
GEM_BUG_ON(ads_blob_read(guc, ads.golden_context_lrca[guc_class]) != addr_ggtt);
addr_ggtt += alloc_size;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index 75257bd20ff0..8f1165146013 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -600,10 +600,8 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
return 0;
}
-#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
-
-int
-intel_guc_capture_output_min_size_est(struct intel_guc *guc)
+static int
+guc_capture_output_min_size_est(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
@@ -623,13 +621,8 @@ intel_guc_capture_output_min_size_est(struct intel_guc *guc)
* For each engine instance, there would be 1 x guc_state_capture_group_t output
* followed by 3 x guc_state_capture_t lists. The latter is how the register
* dumps are split across different register types (where the '3' are global vs class
- * vs instance). Finally, let's multiply the whole thing by 3x (just so we are
- * not limited to just 1 round of data in a worst case full register dump log)
- *
- * NOTE: intel_guc_log that allocates the log buffer would round this size up to
- * a power of two.
+ * vs instance).
*/
-
for_each_engine(engine, gt, id) {
worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
(3 * sizeof(struct guc_state_capture_header_t));
@@ -649,7 +642,31 @@ intel_guc_capture_output_min_size_est(struct intel_guc *guc)
worst_min_size += (num_regs * sizeof(struct guc_mmio_reg));
- return (worst_min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER);
+ return worst_min_size;
+}
+
+/*
+ * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
+ * before the i915 can read the data out and process it
+ */
+#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
+
+static void check_guc_capture_size(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int min_size = guc_capture_output_min_size_est(guc);
+ int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
+ u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
+
+ if (min_size < 0)
+ drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ min_size);
+ else if (min_size > buffer_size)
+ drm_warn(&i915->drm, "GuC error state capture buffer is too small: %d < %d\n",
+ buffer_size, min_size);
+ else if (spare_size > buffer_size)
+ drm_notice(&i915->drm, "GuC error state capture buffer maybe too small: %d < %d (min = %d)\n",
+ buffer_size, spare_size, min_size);
}
/*
@@ -1278,7 +1295,8 @@ static void __guc_capture_process_output(struct intel_guc *guc)
log_buf_state = guc->log.buf_addr +
(sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
- src_data = guc->log.buf_addr + intel_guc_get_log_buffer_offset(GUC_CAPTURE_LOG_BUFFER);
+ src_data = guc->log.buf_addr +
+ intel_guc_get_log_buffer_offset(&guc->log, GUC_CAPTURE_LOG_BUFFER);
/*
* Make a copy of the state structure, inside GuC log buffer
@@ -1286,7 +1304,7 @@ static void __guc_capture_process_output(struct intel_guc *guc)
* from it multiple times.
*/
memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
- buffer_size = intel_guc_get_log_buffer_size(GUC_CAPTURE_LOG_BUFFER);
+ buffer_size = intel_guc_get_log_buffer_size(&guc->log, GUC_CAPTURE_LOG_BUFFER);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_count = log_buf_state_local.buffer_full_cnt;
@@ -1365,33 +1383,22 @@ guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
return NULL;
}
-#ifdef CONFIG_DRM_I915_DEBUG_GUC
-#define __out(a, ...) \
- do { \
- drm_warn((&(a)->i915->drm), __VA_ARGS__); \
- i915_error_printf((a), __VA_ARGS__); \
- } while (0)
-#else
-#define __out(a, ...) \
- i915_error_printf(a, __VA_ARGS__)
-#endif
-
#define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
do { \
- __out(ebuf, " i915-Eng-Name: %s command stream\n", \
- (eng)->name); \
- __out(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
- __out(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
- __out(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
- (eng)->logical_mask); \
+ i915_error_printf(ebuf, " i915-Eng-Name: %s command stream\n", \
+ (eng)->name); \
+ i915_error_printf(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
+ i915_error_printf(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
+ i915_error_printf(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
+ (eng)->logical_mask); \
} while (0)
#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
do { \
- __out(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
- (node)->eng_inst); \
- __out(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
- __out(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
+ i915_error_printf(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
+ (node)->eng_inst); \
+ i915_error_printf(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
+ i915_error_printf(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
} while (0)
int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
@@ -1423,57 +1430,57 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
guc = &ee->engine->gt->uc.guc;
- __out(ebuf, "global --- GuC Error Capture on %s command stream:\n",
- ee->engine->name);
+ i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
+ ee->engine->name);
node = ee->guc_capture_node;
if (!node) {
- __out(ebuf, " No matching ee-node\n");
+ i915_error_printf(ebuf, " No matching ee-node\n");
return 0;
}
- __out(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
+ i915_error_printf(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
- __out(ebuf, " RegListType: %s\n",
- datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
- __out(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
+ i915_error_printf(ebuf, " RegListType: %s\n",
+ datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
+ i915_error_printf(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
switch (i) {
case GUC_CAPTURE_LIST_TYPE_GLOBAL:
default:
break;
case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
- __out(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
- __out(ebuf, " i915-Eng-Class: %d\n",
- guc_class_to_engine_class(node->eng_class));
+ i915_error_printf(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
+ i915_error_printf(ebuf, " i915-Eng-Class: %d\n",
+ guc_class_to_engine_class(node->eng_class));
break;
case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
if (eng)
GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
else
- __out(ebuf, " i915-Eng-Lookup Fail!\n");
+ i915_error_printf(ebuf, " i915-Eng-Lookup Fail!\n");
GCAP_PRINT_GUC_INST_INFO(ebuf, node);
break;
}
numregs = node->reginfo[i].num_regs;
- __out(ebuf, " NumRegs: %d\n", numregs);
+ i915_error_printf(ebuf, " NumRegs: %d\n", numregs);
j = 0;
while (numregs--) {
regs = node->reginfo[i].regs;
str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
node->eng_class, 0, regs[j].offset, &is_ext);
if (!str)
- __out(ebuf, " REG-0x%08x", regs[j].offset);
+ i915_error_printf(ebuf, " REG-0x%08x", regs[j].offset);
else
- __out(ebuf, " %s", str);
+ i915_error_printf(ebuf, " %s", str);
if (is_ext)
- __out(ebuf, "[%ld][%ld]",
- FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
- FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
- __out(ebuf, ": 0x%08x\n", regs[j].value);
+ i915_error_printf(ebuf, "[%ld][%ld]",
+ FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
+ FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
+ i915_error_printf(ebuf, ": 0x%08x\n", regs[j].value);
++j;
}
}
@@ -1580,5 +1587,7 @@ int intel_guc_capture_init(struct intel_guc *guc)
INIT_LIST_HEAD(&guc->capture->outlist);
INIT_LIST_HEAD(&guc->capture->cachelist);
+ check_guc_capture_size(guc);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
index d3d7bd0b6db6..fbd3713c7832 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
@@ -21,7 +21,6 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m,
void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee,
struct intel_context *ce);
void intel_guc_capture_process(struct intel_guc *guc);
-int intel_guc_capture_output_min_size_est(struct intel_guc *guc);
int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
void **outptr);
int intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index f01325cd1b62..2b22065e87bf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -455,6 +455,7 @@ corrupted:
/**
* wait_for_ct_request_update - Wait for CT request state update.
+ * @ct: pointer to CT
* @req: pointer to pending request
* @status: placeholder for status
*
@@ -467,9 +468,10 @@ corrupted:
* * 0 response received (status is valid)
* * -ETIMEDOUT no response within hardcoded timeout
*/
-static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
+static int wait_for_ct_request_update(struct intel_guc_ct *ct, struct ct_request *req, u32 *status)
{
int err;
+ bool ct_enabled;
/*
* Fast commands should complete in less than 10us, so sample quickly
@@ -481,12 +483,15 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
#define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
#define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
#define done \
- (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
+ (!(ct_enabled = intel_guc_ct_enabled(ct)) || \
+ FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
GUC_HXG_ORIGIN_GUC)
err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
if (err)
err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
#undef done
+ if (!ct_enabled)
+ err = -ENODEV;
*status = req->status;
return err;
@@ -703,11 +708,18 @@ retry:
intel_guc_notify(ct_to_guc(ct));
- err = wait_for_ct_request_update(&request, status);
+ err = wait_for_ct_request_update(ct, &request, status);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
if (unlikely(err)) {
- CT_ERROR(ct, "No response for request %#x (fence %u)\n",
- action[0], request.fence);
+ if (err == -ENODEV)
+ /* wait_for_ct_request_update returns -ENODEV on reset/suspend in progress.
+ * In this case, output is debug rather than error info
+ */
+ CT_DEBUG(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n",
+ action[0], request.fence);
+ else
+ CT_ERROR(ct, "No response for request %#x (fence %u)\n",
+ action[0], request.fence);
goto unlink;
}
@@ -771,8 +783,9 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
- CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
- action[0], ERR_PTR(ret), status);
+ if (ret != -ENODEV)
+ CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
+ action[0], ERR_PTR(ret), status);
} else if (unlikely(ret)) {
CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
action[0], ret, ret);
@@ -816,8 +829,22 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
if (unlikely(ctb->broken))
return -EPIPE;
- if (unlikely(desc->status))
- goto corrupted;
+ if (unlikely(desc->status)) {
+ u32 status = desc->status;
+
+ if (status & GUC_CTB_STATUS_UNUSED) {
+ /*
+ * Potentially valid if a CLIENT_RESET request resulted in
+ * contexts/engines being reset. But should never happen as
+ * no contexts should be active when CLIENT_RESET is sent.
+ */
+ CT_ERROR(ct, "Unexpected G2H after GuC has stopped!\n");
+ status &= ~GUC_CTB_STATUS_UNUSED;
+ }
+
+ if (status)
+ goto corrupted;
+ }
GEM_BUG_ON(head > size);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index b3c9a9327f76..323b055e5db9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -204,6 +204,20 @@ struct guc_wq_item {
u32 fence_id;
} __packed;
+struct guc_process_desc_v69 {
+ u32 stage_id;
+ u64 db_base_addr;
+ u32 head;
+ u32 tail;
+ u32 error_offset;
+ u64 wq_base_addr;
+ u32 wq_size_bytes;
+ u32 wq_status;
+ u32 engine_presence;
+ u32 priority;
+ u32 reserved[36];
+} __packed;
+
struct guc_sched_wq_desc {
u32 head;
u32 tail;
@@ -228,6 +242,37 @@ struct guc_ctxt_registration_info {
};
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
+/* Preempt to idle on quantum expiry */
+#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69 BIT(0)
+
+/*
+ * GuC Context registration descriptor.
+ * FIXME: This is only required to exist during context registration.
+ * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
+ * is not required.
+ */
+struct guc_lrc_desc_v69 {
+ u32 hw_context_desc;
+ u32 slpm_perf_mode_hint; /* SPLC v1 only */
+ u32 slpm_freq_hint;
+ u32 engine_submit_mask; /* In logical space */
+ u8 engine_class;
+ u8 reserved0[3];
+ u32 priority;
+ u32 process_desc;
+ u32 wq_addr;
+ u32 wq_size;
+ u32 context_flags; /* CONTEXT_REGISTRATION_* */
+ /* Time for one workload to execute. (in micro seconds) */
+ u32 execution_quantum;
+ /* Time to wait for a preemption request to complete before issuing a
+ * reset. (in micro seconds).
+ */
+ u32 preemption_timeout;
+ u32 policy_flags; /* CONTEXT_POLICY_* */
+ u32 reserved1[19];
+} __packed;
+
/* 32-bit KLV structure as used by policy updates and others */
struct guc_klv_generic_dw_t {
u32 kl;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 25b2d7ce6640..b071973ac41c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -13,8 +13,186 @@
#include "intel_guc_capture.h"
#include "intel_guc_log.h"
+#if defined(CONFIG_DRM_I915_DEBUG_GUC)
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
+#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
+#else
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_2M
+#endif
+
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
+struct guc_log_section {
+ u32 max;
+ u32 flag;
+ u32 default_val;
+ const char *name;
+};
+
+static s32 scale_log_param(struct intel_guc_log *log, const struct guc_log_section *section,
+ s32 param)
+{
+ /* -1 means default */
+ if (param < 0)
+ return section->default_val;
+
+ /* Check for 32-bit overflow */
+ if (param >= SZ_4K) {
+ drm_err(&guc_to_gt(log_to_guc(log))->i915->drm, "Size too large for GuC %s log: %dMB!",
+ section->name, param);
+ return section->default_val;
+ }
+
+ /* Param units are 1MB */
+ return param * SZ_1M;
+}
+
+static void _guc_log_init_sizes(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = {
+ {
+ GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT,
+ GUC_LOG_LOG_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE,
+ "crash dump"
+ },
+ {
+ GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT,
+ GUC_LOG_LOG_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE,
+ "debug",
+ },
+ {
+ GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT,
+ GUC_LOG_CAPTURE_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE,
+ "capture",
+ }
+ };
+ s32 params[GUC_LOG_SECTIONS_LIMIT] = {
+ GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE / SZ_1M,
+ GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE / SZ_1M,
+ GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE / SZ_1M,
+ };
+ int i;
+
+ for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++)
+ log->sizes[i].bytes = scale_log_param(log, sections + i, params[i]);
+
+ /* If debug size > 1MB then bump default crash size to keep the same units */
+ if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M &&
+ GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE < SZ_1M)
+ log->sizes[GUC_LOG_SECTIONS_CRASH].bytes = SZ_1M;
+
+ /* Prepare the GuC API structure fields: */
+ for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) {
+ /* Convert to correct units */
+ if ((log->sizes[i].bytes % SZ_1M) == 0) {
+ log->sizes[i].units = SZ_1M;
+ log->sizes[i].flag = sections[i].flag;
+ } else {
+ log->sizes[i].units = SZ_4K;
+ log->sizes[i].flag = 0;
+ }
+
+ if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units))
+ drm_err(&i915->drm, "Mis-aligned GuC log %s size: 0x%X vs 0x%X!",
+ sections[i].name, log->sizes[i].bytes, log->sizes[i].units);
+ log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units;
+
+ if (!log->sizes[i].count) {
+ drm_err(&i915->drm, "Zero GuC log %s size!", sections[i].name);
+ } else {
+ /* Size is +1 unit */
+ log->sizes[i].count--;
+ }
+
+ /* Clip to field size */
+ if (log->sizes[i].count > sections[i].max) {
+ drm_err(&i915->drm, "GuC log %s size too large: %d vs %d!",
+ sections[i].name, log->sizes[i].count + 1, sections[i].max + 1);
+ log->sizes[i].count = sections[i].max;
+ }
+ }
+
+ if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) {
+ drm_err(&i915->drm, "Unit mis-match for GuC log crash and debug sections: %d vs %d!",
+ log->sizes[GUC_LOG_SECTIONS_CRASH].units,
+ log->sizes[GUC_LOG_SECTIONS_DEBUG].units);
+ log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units;
+ log->sizes[GUC_LOG_SECTIONS_CRASH].count = 0;
+ }
+
+ log->sizes_initialised = true;
+}
+
+static void guc_log_init_sizes(struct intel_guc_log *log)
+{
+ if (log->sizes_initialised)
+ return;
+
+ _guc_log_init_sizes(log);
+}
+
+static u32 intel_guc_log_section_size_crash(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_CRASH].bytes;
+}
+
+static u32 intel_guc_log_section_size_debug(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes;
+}
+
+u32 intel_guc_log_section_size_capture(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_CAPTURE].bytes;
+}
+
+static u32 intel_guc_log_size(struct intel_guc_log *log)
+{
+ /*
+ * GuC Log buffer Layout:
+ *
+ * NB: Ordering must follow "enum guc_log_buffer_type".
+ *
+ * +===============================+ 00B
+ * | Debug state header |
+ * +-------------------------------+ 32B
+ * | Crash dump state header |
+ * +-------------------------------+ 64B
+ * | Capture state header |
+ * +-------------------------------+ 96B
+ * | |
+ * +===============================+ PAGE_SIZE (4KB)
+ * | Debug logs |
+ * +===============================+ + DEBUG_SIZE
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
+ * | Capture logs |
+ * +===============================+ + CAPTURE_SIZE
+ */
+ return PAGE_SIZE +
+ intel_guc_log_section_size_crash(log) +
+ intel_guc_log_section_size_debug(log) +
+ intel_guc_log_section_size_capture(log);
+}
+
/**
* DOC: GuC firmware log
*
@@ -139,7 +317,8 @@ static void guc_move_to_next_buf(struct intel_guc_log *log)
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(log->relay.channel, log->vma->obj->base.size - CAPTURE_BUFFER_SIZE);
+ relay_reserve(log->relay.channel, log->vma->obj->base.size -
+ intel_guc_log_section_size_capture(log));
/* Switch to the next sub buffer */
relay_flush(log->relay.channel);
@@ -184,15 +363,16 @@ bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
return overflow;
}
-unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type)
+unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
+ enum guc_log_buffer_type type)
{
switch (type) {
case GUC_DEBUG_LOG_BUFFER:
- return DEBUG_BUFFER_SIZE;
+ return intel_guc_log_section_size_debug(log);
case GUC_CRASH_DUMP_LOG_BUFFER:
- return CRASH_BUFFER_SIZE;
+ return intel_guc_log_section_size_crash(log);
case GUC_CAPTURE_LOG_BUFFER:
- return CAPTURE_BUFFER_SIZE;
+ return intel_guc_log_section_size_capture(log);
default:
MISSING_CASE(type);
}
@@ -200,7 +380,8 @@ unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type)
return 0;
}
-size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type)
+size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log,
+ enum guc_log_buffer_type type)
{
enum guc_log_buffer_type i;
size_t offset = PAGE_SIZE;/* for the log_buffer_states */
@@ -208,7 +389,7 @@ size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type)
for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) {
if (i == type)
break;
- offset += intel_guc_get_log_buffer_size(i);
+ offset += intel_guc_get_log_buffer_size(log, i);
}
return offset;
@@ -259,7 +440,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
*/
memcpy(&log_buf_state_local, log_buf_state,
sizeof(struct guc_log_buffer_state));
- buffer_size = intel_guc_get_log_buffer_size(type);
+ buffer_size = intel_guc_get_log_buffer_size(log, type);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_cnt = log_buf_state_local.buffer_full_cnt;
@@ -374,7 +555,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
* Keep the size of sub buffers same as shared log buffer
* but GuC log-events excludes the error-state-capture logs
*/
- subbuf_size = log->vma->size - CAPTURE_BUFFER_SIZE;
+ subbuf_size = log->vma->size - intel_guc_log_section_size_capture(log);
/*
* Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -461,32 +642,7 @@ int intel_guc_log_create(struct intel_guc_log *log)
GEM_BUG_ON(log->vma);
- /*
- * GuC Log buffer Layout
- * (this ordering must follow "enum guc_log_buffer_type" definition)
- *
- * +===============================+ 00B
- * | Debug state header |
- * +-------------------------------+ 32B
- * | Crash dump state header |
- * +-------------------------------+ 64B
- * | Capture state header |
- * +-------------------------------+ 96B
- * | |
- * +===============================+ PAGE_SIZE (4KB)
- * | Debug logs |
- * +===============================+ + DEBUG_SIZE
- * | Crash Dump logs |
- * +===============================+ + CRASH_SIZE
- * | Capture logs |
- * +===============================+ + CAPTURE_SIZE
- */
- if (intel_guc_capture_output_min_size_est(guc) > CAPTURE_BUFFER_SIZE)
- DRM_WARN("GuC log buffer for state_capture maybe too small. %d < %d\n",
- CAPTURE_BUFFER_SIZE, intel_guc_capture_output_min_size_est(guc));
-
- guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
- CAPTURE_BUFFER_SIZE;
+ guc_log_size = intel_guc_log_size(log);
vma = intel_guc_allocate_vma(guc, guc_log_size);
if (IS_ERR(vma)) {
@@ -749,8 +905,9 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
struct intel_guc *guc = log_to_guc(log);
struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
struct drm_i915_gem_object *obj = NULL;
- u32 *map;
- int i = 0;
+ void *map;
+ u32 *page;
+ int i, j;
if (!intel_guc_is_supported(guc))
return -ENODEV;
@@ -763,21 +920,34 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
if (!obj)
return 0;
+ page = (u32 *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ intel_guc_dump_time_info(guc, p);
+
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
DRM_DEBUG("Failed to pin object\n");
drm_puts(p, "(log data unaccessible)\n");
+ free_page((unsigned long)page);
return PTR_ERR(map);
}
- for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
- drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(map + i), *(map + i + 1),
- *(map + i + 2), *(map + i + 3));
+ for (i = 0; i < obj->base.size; i += PAGE_SIZE) {
+ if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE))
+ memcpy(page, map + i, PAGE_SIZE);
+
+ for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4)
+ drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(page + j + 0), *(page + j + 1),
+ *(page + j + 2), *(page + j + 3));
+ }
drm_puts(p, "\n");
i915_gem_object_unpin_map(obj);
+ free_page((unsigned long)page);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 18007e639be9..02127703be80 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -15,20 +15,6 @@
struct intel_guc;
-#if defined(CONFIG_DRM_I915_DEBUG_GUC)
-#define CRASH_BUFFER_SIZE SZ_2M
-#define DEBUG_BUFFER_SIZE SZ_16M
-#define CAPTURE_BUFFER_SIZE SZ_4M
-#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
-#define CRASH_BUFFER_SIZE SZ_1M
-#define DEBUG_BUFFER_SIZE SZ_2M
-#define CAPTURE_BUFFER_SIZE SZ_1M
-#else
-#define CRASH_BUFFER_SIZE SZ_8K
-#define DEBUG_BUFFER_SIZE SZ_64K
-#define CAPTURE_BUFFER_SIZE SZ_16K
-#endif
-
/*
* While we're using plain log level in i915, GuC controls are much more...
* "elaborate"? We have a couple of bits for verbosity, separate bit for actual
@@ -46,10 +32,30 @@ struct intel_guc;
#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2)
#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
+enum {
+ GUC_LOG_SECTIONS_CRASH,
+ GUC_LOG_SECTIONS_DEBUG,
+ GUC_LOG_SECTIONS_CAPTURE,
+ GUC_LOG_SECTIONS_LIMIT
+};
+
struct intel_guc_log {
u32 level;
+
+ /* Allocation settings */
+ struct {
+ s32 bytes; /* Size in bytes */
+ s32 units; /* GuC API units - 1MB or 4KB */
+ s32 count; /* Number of API units */
+ u32 flag; /* GuC API units flag */
+ } sizes[GUC_LOG_SECTIONS_LIMIT];
+ bool sizes_initialised;
+
+ /* Combined buffer allocation */
struct i915_vma *vma;
void *buf_addr;
+
+ /* RelayFS support */
struct {
bool buf_in_use;
bool started;
@@ -58,6 +64,7 @@ struct intel_guc_log {
struct mutex lock;
u32 full_count;
} relay;
+
/* logging related stats */
struct {
u32 sampled_overflow;
@@ -69,8 +76,9 @@ struct intel_guc_log {
void intel_guc_log_init_early(struct intel_guc_log *log);
bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, enum guc_log_buffer_type type,
unsigned int full_cnt);
-unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type);
-size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type);
+unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
+ enum guc_log_buffer_type type);
+size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log, enum guc_log_buffer_type type);
int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
@@ -92,4 +100,6 @@ void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p);
int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
bool dump_load_err);
+u32 intel_guc_log_section_size_capture(struct intel_guc_log *log);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 8dc063f087eb..a7092f711e9c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -102,6 +102,10 @@
#define GUC_SEND_TRIGGER (1<<0)
#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
+#define GEN12_GUC_SEM_INTR_ENABLES _MMIO(0xc71c)
+#define GUC_SEM_INTR_ROUTE_TO_GUC BIT(31)
+#define GUC_SEM_INTR_ENABLE_ALL (0xff)
+
#define GUC_NUM_DOORBELLS 256
/* format of the HW-monitored doorbell cacheline */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index ec9c4ca0f615..fdd895f73f9f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -137,17 +137,6 @@ static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
return ret > 0 ? -EPROTO : ret;
}
-static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
-{
- u32 request[] = {
- GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
- SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
- id,
- };
-
- return intel_guc_send(guc, request, ARRAY_SIZE(request));
-}
-
static bool slpc_is_running(struct intel_guc_slpc *slpc)
{
return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
@@ -201,16 +190,6 @@ static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
return ret;
}
-static int slpc_unset_param(struct intel_guc_slpc *slpc,
- u8 id)
-{
- struct intel_guc *guc = slpc_to_guc(slpc);
-
- GEM_BUG_ON(id >= SLPC_MAX_PARAM);
-
- return guc_action_slpc_unset_param(guc, id);
-}
-
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -488,23 +467,33 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
/* Need a lock now since waitboost can be modifying min as well */
mutex_lock(&slpc->lock);
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-
- ret = slpc_set_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
- val);
-
- /* Return standardized err code for sysfs calls */
- if (ret)
- ret = -EIO;
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ /* Ignore efficient freq if lower min freq is requested */
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+ val < slpc->rp1_freq);
+ if (ret) {
+ i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
+ ERR_PTR(ret));
+ goto out;
}
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ val);
+
if (!ret)
slpc->min_freq_softlimit = val;
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
+ /* Return standardized err code for sysfs calls */
+ if (ret)
+ ret = -EIO;
+
return ret;
}
@@ -575,45 +564,28 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
* unless they have deviated from defaults, in which case,
* we retain the values and set min/max accordingly.
*/
- if (!slpc->max_freq_softlimit)
+ if (!slpc->max_freq_softlimit) {
slpc->max_freq_softlimit = slpc->rp0_freq;
- else if (slpc->max_freq_softlimit != slpc->rp0_freq)
+ slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
+ } else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
ret = intel_guc_slpc_set_max_freq(slpc,
slpc->max_freq_softlimit);
+ }
if (unlikely(ret))
return ret;
- if (!slpc->min_freq_softlimit)
- slpc->min_freq_softlimit = slpc->min_freq;
- else if (slpc->min_freq_softlimit != slpc->min_freq)
+ if (!slpc->min_freq_softlimit) {
+ ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
+ if (unlikely(ret))
+ return ret;
+ slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
+ } else if (slpc->min_freq_softlimit != slpc->min_freq) {
return intel_guc_slpc_set_min_freq(slpc,
slpc->min_freq_softlimit);
-
- return 0;
-}
-
-static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore)
-{
- int ret = 0;
-
- if (ignore) {
- ret = slpc_set_param(slpc,
- SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
- ignore);
- if (!ret)
- return slpc_set_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
- slpc->min_freq);
- } else {
- ret = slpc_unset_param(slpc,
- SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY);
- if (!ret)
- return slpc_unset_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ);
}
- return ret;
+ return 0;
}
static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
@@ -675,14 +647,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
slpc_get_rp_values(slpc);
- /* Ignore efficient freq and set min to platform min */
- ret = slpc_ignore_eff_freq(slpc, true);
- if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC min to RPn (%pe)\n",
- ERR_PTR(ret));
- return ret;
- }
-
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 40f726c61e95..64c4e83153f4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -414,12 +414,15 @@ struct sync_semaphore {
};
struct parent_scratch {
- struct guc_sched_wq_desc wq_desc;
+ union guc_descs {
+ struct guc_sched_wq_desc wq_desc;
+ struct guc_process_desc_v69 pdesc;
+ } descs;
struct sync_semaphore go;
struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
- u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) -
+ u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
u32 wq[WQ_SIZE / sizeof(u32)];
@@ -456,17 +459,23 @@ __get_parent_scratch(struct intel_context *ce)
LRC_STATE_OFFSET) / sizeof(u32)));
}
+static struct guc_process_desc_v69 *
+__get_process_desc_v69(struct intel_context *ce)
+{
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+
+ return &ps->descs.pdesc;
+}
+
static struct guc_sched_wq_desc *
-__get_wq_desc(struct intel_context *ce)
+__get_wq_desc_v70(struct intel_context *ce)
{
struct parent_scratch *ps = __get_parent_scratch(ce);
- return &ps->wq_desc;
+ return &ps->descs.wq_desc;
}
-static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
- struct intel_context *ce,
- u32 wqi_size)
+static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
{
/*
* Check for space in work queue. Caching a value of head pointer in
@@ -476,7 +485,7 @@ static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
#define AVAILABLE_SPACE \
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
if (wqi_size > AVAILABLE_SPACE) {
- ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head);
+ ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
if (wqi_size > AVAILABLE_SPACE)
return NULL;
@@ -495,11 +504,55 @@ static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
return ce;
}
+static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
+{
+ struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
+
+ if (!base)
+ return NULL;
+
+ GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
+
+ return &base[index];
+}
+
+static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
+{
+ u32 size;
+ int ret;
+
+ size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
+ GUC_MAX_CONTEXT_ID);
+ ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
+ (void **)&guc->lrc_desc_pool_vaddr_v69);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
+{
+ if (!guc->lrc_desc_pool_vaddr_v69)
+ return;
+
+ guc->lrc_desc_pool_vaddr_v69 = NULL;
+ i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
+}
+
static inline bool guc_submission_initialized(struct intel_guc *guc)
{
return guc->submission_initialized;
}
+static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
+{
+ struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
+
+ if (desc)
+ memset(desc, 0, sizeof(*desc));
+}
+
static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
{
return __get_context(guc, id);
@@ -526,6 +579,8 @@ static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
if (unlikely(!guc_submission_initialized(guc)))
return;
+ _reset_lrc_desc_v69(guc, id);
+
/*
* xarray API doesn't have xa_erase_irqsave wrapper, so calling
* the lower level functions directly.
@@ -611,7 +666,7 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
true, timeout);
}
-static int guc_context_policy_init(struct intel_context *ce, bool loop);
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
static int try_context_registration(struct intel_context *ce, bool loop);
static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
@@ -639,7 +694,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
GEM_BUG_ON(context_guc_id_invalid(ce));
if (context_policy_required(ce)) {
- err = guc_context_policy_init(ce, false);
+ err = guc_context_policy_init_v70(ce, false);
if (err)
return err;
}
@@ -737,9 +792,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce)
return (WQ_SIZE - ce->parallel.guc.wqi_tail);
}
-static void write_wqi(struct guc_sched_wq_desc *wq_desc,
- struct intel_context *ce,
- u32 wqi_size)
+static void write_wqi(struct intel_context *ce, u32 wqi_size)
{
BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
@@ -750,13 +803,12 @@ static void write_wqi(struct guc_sched_wq_desc *wq_desc,
ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
(WQ_SIZE - 1);
- WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail);
+ WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
}
static int guc_wq_noop_append(struct intel_context *ce)
{
- struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
- u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce));
+ u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
if (!wqi)
@@ -775,7 +827,6 @@ static int __guc_wq_item_append(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_context *child;
- struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
unsigned int wqi_size = (ce->parallel.number_children + 4) *
sizeof(u32);
u32 *wqi;
@@ -795,7 +846,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
return ret;
}
- wqi = get_wq_pointer(wq_desc, ce, wqi_size);
+ wqi = get_wq_pointer(ce, wqi_size);
if (!wqi)
return -EBUSY;
@@ -810,7 +861,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
for_each_child(ce, child)
*wqi++ = child->ring->tail / sizeof(u64);
- write_wqi(wq_desc, ce, wqi_size);
+ write_wqi(ce, wqi_size);
return 0;
}
@@ -1812,20 +1863,34 @@ static void reset_fail_worker_func(struct work_struct *w);
int intel_guc_submission_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
if (guc->submission_initialized)
return 0;
+ if (GET_UC_VER(guc) < MAKE_UC_VER(70, 0, 0)) {
+ ret = guc_lrc_desc_pool_create_v69(guc);
+ if (ret)
+ return ret;
+ }
+
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
- if (!guc->submission_state.guc_ids_bitmap)
- return -ENOMEM;
+ if (!guc->submission_state.guc_ids_bitmap) {
+ ret = -ENOMEM;
+ goto destroy_pool;
+ }
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
guc->timestamp.shift = gpm_timestamp_shift(gt);
guc->submission_initialized = true;
return 0;
+
+destroy_pool:
+ guc_lrc_desc_pool_destroy_v69(guc);
+
+ return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1834,6 +1899,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
return;
guc_flush_destroyed_contexts(guc);
+ guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap);
guc->submission_initialized = false;
@@ -2091,10 +2157,34 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
-static int __guc_action_register_multi_lrc(struct intel_guc *guc,
- struct intel_context *ce,
- struct guc_ctxt_registration_info *info,
- bool loop)
+static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
+ struct intel_context *ce,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ struct intel_context *child;
+ u32 action[4 + MAX_ENGINE_INSTANCE];
+ int len = 0;
+
+ GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+ action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+ action[len++] = guc_id;
+ action[len++] = ce->parallel.number_children + 1;
+ action[len++] = offset;
+ for_each_child(ce, child) {
+ offset += sizeof(struct guc_lrc_desc_v69);
+ action[len++] = offset;
+ }
+
+ return guc_submission_send_busy_loop(guc, action, len, 0, loop);
+}
+
+static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
+ struct intel_context *ce,
+ struct guc_ctxt_registration_info *info,
+ bool loop)
{
struct intel_context *child;
u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
@@ -2134,9 +2224,24 @@ static int __guc_action_register_multi_lrc(struct intel_guc *guc,
return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
-static int __guc_action_register_context(struct intel_guc *guc,
- struct guc_ctxt_registration_info *info,
- bool loop)
+static int __guc_action_register_context_v69(struct intel_guc *guc,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_REGISTER_CONTEXT,
+ guc_id,
+ offset,
+ };
+
+ return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ 0, loop);
+}
+
+static int __guc_action_register_context_v70(struct intel_guc *guc,
+ struct guc_ctxt_registration_info *info,
+ bool loop)
{
u32 action[] = {
INTEL_GUC_ACTION_REGISTER_CONTEXT,
@@ -2157,24 +2262,52 @@ static int __guc_action_register_context(struct intel_guc *guc,
0, loop);
}
-static void prepare_context_registration_info(struct intel_context *ce,
- struct guc_ctxt_registration_info *info);
+static void prepare_context_registration_info_v69(struct intel_context *ce);
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info);
-static int register_context(struct intel_context *ce, bool loop)
+static int
+register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
+{
+ u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
+ ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
+
+ prepare_context_registration_info_v69(ce);
+
+ if (intel_context_is_parent(ce))
+ return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
+ offset, loop);
+ else
+ return __guc_action_register_context_v69(guc, ce->guc_id.id,
+ offset, loop);
+}
+
+static int
+register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
{
struct guc_ctxt_registration_info info;
+
+ prepare_context_registration_info_v70(ce, &info);
+
+ if (intel_context_is_parent(ce))
+ return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
+ else
+ return __guc_action_register_context_v70(guc, &info, loop);
+}
+
+static int register_context(struct intel_context *ce, bool loop)
+{
struct intel_guc *guc = ce_to_guc(ce);
int ret;
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
- prepare_context_registration_info(ce, &info);
-
- if (intel_context_is_parent(ce))
- ret = __guc_action_register_multi_lrc(guc, ce, &info, loop);
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
+ ret = register_context_v70(guc, ce, loop);
else
- ret = __guc_action_register_context(guc, &info, loop);
+ ret = register_context_v69(guc, ce, loop);
+
if (likely(!ret)) {
unsigned long flags;
@@ -2182,7 +2315,8 @@ static int register_context(struct intel_context *ce, bool loop)
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- guc_context_policy_init(ce, loop);
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
+ guc_context_policy_init_v70(ce, loop);
}
return ret;
@@ -2279,14 +2413,13 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
0, loop);
}
-static int guc_context_policy_init(struct intel_context *ce, bool loop)
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
struct context_policy policy;
u32 execution_quantum;
u32 preemption_timeout;
- bool missing = false;
unsigned long flags;
int ret;
@@ -2304,32 +2437,9 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop)
__guc_context_policy_add_preempt_to_idle(&policy, 1);
ret = __guc_context_set_context_policies(guc, &policy, loop);
- missing = ret != 0;
-
- if (!missing && intel_context_is_parent(ce)) {
- struct intel_context *child;
-
- for_each_child(ce, child) {
- __guc_context_policy_start_klv(&policy, child->guc_id.id);
-
- if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
- __guc_context_policy_add_preempt_to_idle(&policy, 1);
-
- child->guc_state.prio = ce->guc_state.prio;
- __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
- __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
- __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
-
- ret = __guc_context_set_context_policies(guc, &policy, loop);
- if (ret) {
- missing = true;
- break;
- }
- }
- }
spin_lock_irqsave(&ce->guc_state.lock, flags);
- if (missing)
+ if (ret != 0)
set_context_policy_required(ce);
else
clr_context_policy_required(ce);
@@ -2338,6 +2448,19 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop)
return ret;
}
+static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
+ struct guc_lrc_desc_v69 *desc)
+{
+ desc->policy_flags = 0;
+
+ if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+ desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
+
+ /* NB: For both of these, zero means disabled. */
+ desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+}
+
static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
{
/*
@@ -2358,8 +2481,75 @@ static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
}
}
-static void prepare_context_registration_info(struct intel_context *ce,
- struct guc_ctxt_registration_info *info)
+static void prepare_context_registration_info_v69(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ u32 ctx_id = ce->guc_id.id;
+ struct guc_lrc_desc_v69 *desc;
+ struct intel_context *child;
+
+ GEM_BUG_ON(!engine->mask);
+
+ /*
+ * Ensure LRC + CT vmas are is same region as write barrier is done
+ * based on CT vma region.
+ */
+ GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
+ i915_gem_object_is_lmem(ce->ring->vma->obj));
+
+ desc = __get_lrc_desc_v69(guc, ctx_id);
+ desc->engine_class = engine_class_to_guc_class(engine->class);
+ desc->engine_submit_mask = engine->logical_mask;
+ desc->hw_context_desc = ce->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init_v69(engine, desc);
+
+ /*
+ * If context is a parent, we need to register a process descriptor
+ * describing a work queue and register all child contexts.
+ */
+ if (intel_context_is_parent(ce)) {
+ struct guc_process_desc_v69 *pdesc;
+
+ ce->parallel.guc.wqi_tail = 0;
+ ce->parallel.guc.wqi_head = 0;
+
+ desc->process_desc = i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+ desc->wq_addr = i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ desc->wq_size = WQ_SIZE;
+
+ pdesc = __get_process_desc_v69(ce);
+ memset(pdesc, 0, sizeof(*(pdesc)));
+ pdesc->stage_id = ce->guc_id.id;
+ pdesc->wq_base_addr = desc->wq_addr;
+ pdesc->wq_size_bytes = desc->wq_size;
+ pdesc->wq_status = WQ_STATUS_ACTIVE;
+
+ ce->parallel.guc.wq_head = &pdesc->head;
+ ce->parallel.guc.wq_tail = &pdesc->tail;
+ ce->parallel.guc.wq_status = &pdesc->wq_status;
+
+ for_each_child(ce, child) {
+ desc = __get_lrc_desc_v69(guc, child->guc_id.id);
+
+ desc->engine_class =
+ engine_class_to_guc_class(engine->class);
+ desc->hw_context_desc = child->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init_v69(engine, desc);
+ }
+
+ clear_children_join_go_memory(ce);
+ }
+}
+
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2409,10 +2599,14 @@ static void prepare_context_registration_info(struct intel_context *ce,
info->wq_base_hi = upper_32_bits(wq_base_offset);
info->wq_size = WQ_SIZE;
- wq_desc = __get_wq_desc(ce);
+ wq_desc = __get_wq_desc_v70(ce);
memset(wq_desc, 0, sizeof(*wq_desc));
wq_desc->wq_status = WQ_STATUS_ACTIVE;
+ ce->parallel.guc.wq_head = &wq_desc->head;
+ ce->parallel.guc.wq_tail = &wq_desc->tail;
+ ce->parallel.guc.wq_status = &wq_desc->wq_status;
+
clear_children_join_go_memory(ce);
}
}
@@ -2727,11 +2921,21 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
- struct context_policy policy;
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
+ struct context_policy policy;
- __guc_context_policy_start_klv(&policy, guc_id);
- __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
- __guc_context_set_context_policies(guc, &policy, true);
+ __guc_context_policy_start_klv(&policy, guc_id);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_set_context_policies(guc, &policy, true);
+ } else {
+ u32 action[] = {
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
+ guc_id,
+ preemption_timeout
+ };
+
+ intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ }
}
static void
@@ -2982,11 +3186,21 @@ static int guc_context_alloc(struct intel_context *ce)
static void __guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce)
{
- struct context_policy policy;
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
+ struct context_policy policy;
- __guc_context_policy_start_klv(&policy, ce->guc_id.id);
- __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
- __guc_context_set_context_policies(guc, &policy, true);
+ __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_set_context_policies(guc, &policy, true);
+ } else {
+ u32 action[] = {
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
+ ce->guc_id.id,
+ ce->guc_state.prio,
+ };
+
+ guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ }
}
static void guc_context_set_prio(struct intel_guc *guc,
@@ -3789,6 +4003,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
xa_destroy(&guc->context_lookup);
/*
+ * A reset might have occurred while we had a pending stalled request,
+ * so make sure we clean that up.
+ */
+ guc->stalled_request = NULL;
+ guc->submission_stall_reason = STALL_NONE;
+
+ /*
* Some contexts might have been pinned before we enabled GuC
* submission, so we need to add them to the GuC bookeeping.
* Also, after a reset the of the GuC we want to make sure that the
@@ -3953,13 +4174,27 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
void intel_guc_submission_enable(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ /* Enable and route to GuC */
+ if (GRAPHICS_VER(gt->i915) >= 12)
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
+ GUC_SEM_INTR_ROUTE_TO_GUC |
+ GUC_SEM_INTR_ENABLE_ALL);
+
guc_init_lrc_mapping(guc);
guc_init_engine_stats(guc);
}
void intel_guc_submission_disable(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+
/* Note: By the time we're here, GuC may have already been reset */
+
+ /* Disable and route to host */
+ if (GRAPHICS_VER(gt->i915) >= 12)
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
}
static bool __guc_submission_supported(struct intel_guc *guc)
@@ -4496,17 +4731,19 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
guc_log_context_priority(p, ce);
if (intel_context_is_parent(ce)) {
- struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
struct intel_context *child;
drm_printf(p, "\t\tNumber children: %u\n",
ce->parallel.number_children);
- drm_printf(p, "\t\tWQI Head: %u\n",
- READ_ONCE(wq_desc->head));
- drm_printf(p, "\t\tWQI Tail: %u\n",
- READ_ONCE(wq_desc->tail));
- drm_printf(p, "\t\tWQI Status: %u\n\n",
- READ_ONCE(wq_desc->wq_status));
+
+ if (ce->parallel.guc.wq_status) {
+ drm_printf(p, "\t\tWQI Head: %u\n",
+ READ_ONCE(*ce->parallel.guc.wq_head));
+ drm_printf(p, "\t\tWQI Tail: %u\n",
+ READ_ONCE(*ce->parallel.guc.wq_tail));
+ drm_printf(p, "\t\tWQI Status: %u\n\n",
+ READ_ONCE(*ce->parallel.guc.wq_status));
+ }
if (ce->engine->emit_bb_start ==
emit_bb_start_parent_no_preempt_mid_batch) {
@@ -4923,4 +5160,5 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_guc.c"
#include "selftest_guc_multi_lrc.c"
+#include "selftest_guc_hangcheck.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index f2e7c82985ef..abf4e142596d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -435,9 +435,11 @@ static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
- drm_info(&i915->drm, "%s firmware %s version %u.%u\n",
- intel_uc_fw_type_repr(fw->type), fw->path,
- fw->major_ver_found, fw->minor_ver_found);
+ drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n",
+ intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
+ fw->file_selected.major_ver,
+ fw->file_selected.minor_ver,
+ fw->file_selected.patch_ver);
}
static int __uc_init_hw(struct intel_uc *uc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 27363091e1af..af425916cdf6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -41,7 +41,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
"%s firmware -> %s\n",
intel_uc_fw_type_repr(uc_fw->type),
status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->path : intel_uc_fw_status_repr(status));
+ uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
}
#endif
@@ -51,79 +51,149 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
*
* Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
* firmware as TGL.
+ *
+ * Version numbers:
+ * Originally, the driver required an exact match major/minor/patch furmware
+ * file and only supported that one version for any given platform. However,
+ * the new direction from upstream is to be backwards compatible with all
+ * prior releases and to be as flexible as possible as to what firmware is
+ * loaded.
+ *
+ * For GuC, the major version number signifies a backwards breaking API change.
+ * So, new format GuC firmware files are labelled by their major version only.
+ * For HuC, there is no KMD interaction, hence no version matching requirement.
+ * So, new format HuC firmware files have no version number at all.
+ *
+ * All of which means that the table below must keep all old format files with
+ * full three point version number. But newer files have reduced requirements.
+ * Having said that, the driver still needs to track the minor version number
+ * for GuC at least. As it is useful to report to the user that they are not
+ * running with a recent enough version for all KMD supported features,
+ * security fixes, etc. to be enabled.
*/
-#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
- fw_def(DG2, 0, guc_def(dg2, 70, 1, 2)) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 70, 1, 1)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 70, 1, 1)) \
- fw_def(DG1, 0, guc_def(dg1, 70, 1, 1)) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 70, 1, 1)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 70, 1, 1)) \
- fw_def(JASPERLAKE, 0, guc_def(ehl, 70, 1, 1)) \
- fw_def(ELKHARTLAKE, 0, guc_def(ehl, 70, 1, 1)) \
- fw_def(ICELAKE, 0, guc_def(icl, 70, 1, 1)) \
- fw_def(COMETLAKE, 5, guc_def(cml, 70, 1, 1)) \
- fw_def(COMETLAKE, 0, guc_def(kbl, 70, 1, 1)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 70, 1, 1)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 70, 1, 1)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 70, 1, 1)) \
- fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1))
-
-#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
- fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(DG1, 0, huc_def(dg1, 7, 9, 3)) \
- fw_def(ROCKETLAKE, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(TIGERLAKE, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(JASPERLAKE, 0, huc_def(ehl, 9, 0, 0)) \
- fw_def(ELKHARTLAKE, 0, huc_def(ehl, 9, 0, 0)) \
- fw_def(ICELAKE, 0, huc_def(icl, 9, 0, 0)) \
- fw_def(COMETLAKE, 5, huc_def(cml, 4, 0, 0)) \
- fw_def(COMETLAKE, 0, huc_def(kbl, 4, 0, 0)) \
- fw_def(COFFEELAKE, 0, huc_def(kbl, 4, 0, 0)) \
- fw_def(GEMINILAKE, 0, huc_def(glk, 4, 0, 0)) \
- fw_def(KABYLAKE, 0, huc_def(kbl, 4, 0, 0)) \
- fw_def(BROXTON, 0, huc_def(bxt, 2, 0, 0)) \
- fw_def(SKYLAKE, 0, huc_def(skl, 2, 0, 0))
-
-#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
+#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
+ fw_def(DG2, 0, guc_mmp(dg2, 70, 4, 1)) \
+ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
+ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
+ fw_def(DG1, 0, guc_mmp(dg1, 70, 1, 1)) \
+ fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
+ fw_def(ELKHARTLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
+ fw_def(ICELAKE, 0, guc_mmp(icl, 70, 1, 1)) \
+ fw_def(COMETLAKE, 5, guc_mmp(cml, 70, 1, 1)) \
+ fw_def(COMETLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(COFFEELAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(GEMINILAKE, 0, guc_mmp(glk, 70, 1, 1)) \
+ fw_def(KABYLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(BROXTON, 0, guc_mmp(bxt, 70, 1, 1)) \
+ fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
+
+#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \
+ fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(DG1, 0, huc_mmp(dg1, 7, 9, 3)) \
+ fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
+ fw_def(ELKHARTLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, huc_mmp(icl, 9, 0, 0)) \
+ fw_def(COMETLAKE, 5, huc_mmp(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, huc_mmp(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, huc_mmp(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, huc_mmp(skl, 2, 0, 0))
+
+/*
+ * Set of macros for producing a list of filenames from the above table.
+ */
+#define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \
+ "i915/" \
+ __stringify(prefix_) name_ ".bin"
+
+#define __MAKE_UC_FW_PATH_MAJOR(prefix_, name_, major_) \
+ "i915/" \
+ __stringify(prefix_) name_ \
+ __stringify(major_) ".bin"
+
+#define __MAKE_UC_FW_PATH_MMP(prefix_, name_, major_, minor_, patch_) \
"i915/" \
__stringify(prefix_) name_ \
__stringify(major_) "." \
__stringify(minor_) "." \
__stringify(patch_) ".bin"
-#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
+/* Minor for internal driver use, not part of file name */
+#define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_) \
+ __MAKE_UC_FW_PATH_MAJOR(prefix_, "_guc_", major_)
-#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
- __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
+#define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH_MMP(prefix_, "_guc_", major_, minor_, patch_)
-/* All blobs need to be declared via MODULE_FIRMWARE() */
+#define MAKE_HUC_FW_PATH_BLANK(prefix_) \
+ __MAKE_UC_FW_PATH_BLANK(prefix_, "_huc")
+
+#define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH_MMP(prefix_, "_huc_", major_, minor_, patch_)
+
+/*
+ * All blobs need to be declared via MODULE_FIRMWARE().
+ * This first expansion of the table macros is solely to provide
+ * that declaration.
+ */
#define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
MODULE_FIRMWARE(uc_);
-INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
-INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH_MAJOR, MAKE_GUC_FW_PATH_MMP)
+INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP)
-/* The below structs and macros are used to iterate across the list of blobs */
+/*
+ * The next expansion of the table macros (in __uc_fw_auto_select below) provides
+ * actual data structures with both the filename and the version information.
+ * These structure arrays are then iterated over to the list of suitable files
+ * for the current platform and to then attempt to load those files, in the order
+ * listed, until one is successfully found.
+ */
struct __packed uc_fw_blob {
+ const char *path;
+ bool legacy;
u8 major;
u8 minor;
- const char *path;
+ u8 patch;
};
-#define UC_FW_BLOB(major_, minor_, path_) \
- { .major = major_, .minor = minor_, .path = path_ }
+#define UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .major = major_, \
+ .minor = minor_, \
+ .patch = patch_, \
+ .path = path_,
+
+#define UC_FW_BLOB_NEW(major_, minor_, patch_, path_) \
+ { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .legacy = false }
+
+#define UC_FW_BLOB_OLD(major_, minor_, patch_, path_) \
+ { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .legacy = true }
-#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
- UC_FW_BLOB(major_, minor_, \
- MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
+#define GUC_FW_BLOB(prefix_, major_, minor_) \
+ UC_FW_BLOB_NEW(major_, minor_, 0, \
+ MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_))
-#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
- UC_FW_BLOB(major_, minor_, \
- MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
+#define GUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB_OLD(major_, minor_, patch_, \
+ MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
+
+#define HUC_FW_BLOB(prefix_) \
+ UC_FW_BLOB_NEW(0, 0, 0, MAKE_HUC_FW_PATH_BLANK(prefix_))
+
+#define HUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB_OLD(major_, minor_, patch_, \
+ MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
struct __packed uc_fw_platform_requirement {
enum intel_platform p;
@@ -147,15 +217,16 @@ static void
__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
static const struct uc_fw_platform_requirement blobs_guc[] = {
- INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
+ INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
};
static const struct uc_fw_platform_requirement blobs_huc[] = {
- INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
+ INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP)
};
static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
};
+ static bool verified;
const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
@@ -176,32 +247,94 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
fw_count = blobs_all[uc_fw->type].count;
for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
- if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
- const struct uc_fw_blob *blob = &fw_blobs[i].blob;
- uc_fw->path = blob->path;
- uc_fw->major_ver_wanted = blob->major;
- uc_fw->minor_ver_wanted = blob->minor;
- break;
+ const struct uc_fw_blob *blob = &fw_blobs[i].blob;
+
+ if (p != fw_blobs[i].p)
+ continue;
+
+ if (rev < fw_blobs[i].rev)
+ continue;
+
+ if (uc_fw->file_selected.path) {
+ if (uc_fw->file_selected.path == blob->path)
+ uc_fw->file_selected.path = NULL;
+
+ continue;
}
+
+ uc_fw->file_selected.path = blob->path;
+ uc_fw->file_wanted.path = blob->path;
+ uc_fw->file_wanted.major_ver = blob->major;
+ uc_fw->file_wanted.minor_ver = blob->minor;
+ break;
}
/* make sure the list is ordered as expected */
- if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) {
+ verified = true;
+
for (i = 1; i < fw_count; i++) {
+ /* Next platform is good: */
if (fw_blobs[i].p < fw_blobs[i - 1].p)
continue;
+ /* Next platform revision is good: */
if (fw_blobs[i].p == fw_blobs[i - 1].p &&
fw_blobs[i].rev < fw_blobs[i - 1].rev)
continue;
- pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
- intel_platform_name(fw_blobs[i - 1].p),
- fw_blobs[i - 1].rev,
- intel_platform_name(fw_blobs[i].p),
- fw_blobs[i].rev);
+ /* Platform/revision must be in order: */
+ if (fw_blobs[i].p != fw_blobs[i - 1].p ||
+ fw_blobs[i].rev != fw_blobs[i - 1].rev)
+ goto bad;
+
+ /* Next major version is good: */
+ if (fw_blobs[i].blob.major < fw_blobs[i - 1].blob.major)
+ continue;
+
+ /* New must be before legacy: */
+ if (!fw_blobs[i].blob.legacy && fw_blobs[i - 1].blob.legacy)
+ goto bad;
+
+ /* New to legacy also means 0.0 to X.Y (HuC), or X.0 to X.Y (GuC) */
+ if (fw_blobs[i].blob.legacy && !fw_blobs[i - 1].blob.legacy) {
+ if (!fw_blobs[i - 1].blob.major)
+ continue;
+
+ if (fw_blobs[i].blob.major == fw_blobs[i - 1].blob.major)
+ continue;
+ }
+
+ /* Major versions must be in order: */
+ if (fw_blobs[i].blob.major != fw_blobs[i - 1].blob.major)
+ goto bad;
+
+ /* Next minor version is good: */
+ if (fw_blobs[i].blob.minor < fw_blobs[i - 1].blob.minor)
+ continue;
+
+ /* Minor versions must be in order: */
+ if (fw_blobs[i].blob.minor != fw_blobs[i - 1].blob.minor)
+ goto bad;
- uc_fw->path = NULL;
+ /* Patch versions must be in order: */
+ if (fw_blobs[i].blob.patch <= fw_blobs[i - 1].blob.patch)
+ continue;
+
+bad:
+ drm_err(&i915->drm, "\x1B[35;1mInvalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
+ fw_blobs[i - 1].blob.legacy ? "L" : "v",
+ fw_blobs[i - 1].blob.major,
+ fw_blobs[i - 1].blob.minor,
+ fw_blobs[i - 1].blob.patch,
+ intel_platform_name(fw_blobs[i].p), fw_blobs[i].rev,
+ fw_blobs[i].blob.legacy ? "L" : "v",
+ fw_blobs[i].blob.major,
+ fw_blobs[i].blob.minor,
+ fw_blobs[i].blob.patch);
+
+ uc_fw->file_selected.path = NULL;
}
}
}
@@ -234,7 +367,7 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc
}
if (unlikely(path)) {
- uc_fw->path = path;
+ uc_fw->file_selected.path = path;
uc_fw->user_overridden = true;
}
}
@@ -258,7 +391,7 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
*/
BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
GEM_BUG_ON(uc_fw->status);
- GEM_BUG_ON(uc_fw->path);
+ GEM_BUG_ON(uc_fw->file_selected.path);
uc_fw->type = type;
@@ -267,7 +400,7 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
__uc_fw_user_override(i915, uc_fw);
}
- intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
+ intel_uc_fw_change_status(uc_fw, uc_fw->file_selected.path ? *uc_fw->file_selected.path ?
INTEL_UC_FIRMWARE_SELECTED :
INTEL_UC_FIRMWARE_DISABLED :
INTEL_UC_FIRMWARE_NOT_SUPPORTED);
@@ -280,32 +413,32 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
if (i915_inject_probe_error(i915, e)) {
/* non-existing blob */
- uc_fw->path = "<invalid>";
+ uc_fw->file_selected.path = "<invalid>";
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next major version */
- uc_fw->major_ver_wanted += 1;
- uc_fw->minor_ver_wanted = 0;
+ uc_fw->file_wanted.major_ver += 1;
+ uc_fw->file_wanted.minor_ver = 0;
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next minor version */
- uc_fw->minor_ver_wanted += 1;
+ uc_fw->file_wanted.minor_ver += 1;
uc_fw->user_overridden = user;
- } else if (uc_fw->major_ver_wanted &&
+ } else if (uc_fw->file_wanted.major_ver &&
i915_inject_probe_error(i915, e)) {
/* require prev major version */
- uc_fw->major_ver_wanted -= 1;
- uc_fw->minor_ver_wanted = 0;
+ uc_fw->file_wanted.major_ver -= 1;
+ uc_fw->file_wanted.minor_ver = 0;
uc_fw->user_overridden = user;
- } else if (uc_fw->minor_ver_wanted &&
+ } else if (uc_fw->file_wanted.minor_ver &&
i915_inject_probe_error(i915, e)) {
/* require prev minor version - hey, this should work! */
- uc_fw->minor_ver_wanted -= 1;
+ uc_fw->file_wanted.minor_ver -= 1;
uc_fw->user_overridden = user;
} else if (user && i915_inject_probe_error(i915, e)) {
/* officially unsupported platform */
- uc_fw->major_ver_wanted = 0;
- uc_fw->minor_ver_wanted = 0;
+ uc_fw->file_wanted.major_ver = 0;
+ uc_fw->file_wanted.minor_ver = 0;
uc_fw->user_overridden = true;
}
}
@@ -314,10 +447,12 @@ static int check_gsc_manifest(const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
u32 *dw = (u32 *)fw->data;
- u32 version = dw[HUC_GSC_VERSION_DW];
+ u32 version_hi = dw[HUC_GSC_VERSION_HI_DW];
+ u32 version_lo = dw[HUC_GSC_VERSION_LO_DW];
- uc_fw->major_ver_found = FIELD_GET(HUC_GSC_MAJOR_VER_MASK, version);
- uc_fw->minor_ver_found = FIELD_GET(HUC_GSC_MINOR_VER_MASK, version);
+ uc_fw->file_selected.major_ver = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.minor_ver = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.patch_ver = FIELD_GET(HUC_GSC_PATCH_VER_LO_MASK, version_lo);
return 0;
}
@@ -332,7 +467,7 @@ static int check_ccs_header(struct drm_i915_private *i915,
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size, sizeof(struct uc_css_header));
return -ENODATA;
}
@@ -345,7 +480,7 @@ static int check_ccs_header(struct drm_i915_private *i915,
if (unlikely(size != sizeof(struct uc_css_header))) {
drm_warn(&i915->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size, sizeof(struct uc_css_header));
return -EPROTO;
}
@@ -360,7 +495,7 @@ static int check_ccs_header(struct drm_i915_private *i915,
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size, size);
return -ENOEXEC;
}
@@ -369,16 +504,18 @@ static int check_ccs_header(struct drm_i915_private *i915,
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= i915->wopcm.size)) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
size, (size_t)i915->wopcm.size);
return -E2BIG;
}
/* Get version numbers from the CSS header */
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
- css->sw_version);
+ uc_fw->file_selected.major_ver = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
+ css->sw_version);
+ uc_fw->file_selected.minor_ver = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
+ css->sw_version);
+ uc_fw->file_selected.patch_ver = FIELD_GET(CSS_SW_VERSION_UC_PATCH,
+ css->sw_version);
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
uc_fw->private_data_size = css->private_data_size;
@@ -397,9 +534,11 @@ static int check_ccs_header(struct drm_i915_private *i915,
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
{
struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ struct intel_uc_fw_file file_ideal;
struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
+ bool old_ver = false;
int err;
GEM_BUG_ON(!i915->wopcm.size);
@@ -412,10 +551,33 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
- err = request_firmware(&fw, uc_fw->path, dev);
+ err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
+ if (!err || intel_uc_fw_is_overridden(uc_fw))
+ goto done;
+
+ while (err == -ENOENT) {
+ __uc_fw_auto_select(i915, uc_fw);
+ if (!uc_fw->file_selected.path) {
+ /*
+ * No more options! But set the path back to something
+ * valid just in case it gets dereferenced.
+ */
+ uc_fw->file_selected.path = file_ideal.path;
+
+ /* Also, preserve the version that was really wanted */
+ memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
+ break;
+ }
+
+ err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ }
+
if (err)
goto fail;
+ old_ver = true;
+done:
if (uc_fw->loaded_via_gsc)
err = check_gsc_manifest(fw, uc_fw);
else
@@ -423,18 +585,39 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (err)
goto fail;
- if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- if (!intel_uc_fw_is_overridden(uc_fw)) {
- err = -ENOEXEC;
- goto fail;
+ if (uc_fw->file_wanted.major_ver) {
+ /* Check the file's major version was as it claimed */
+ if (uc_fw->file_selected.major_ver != uc_fw->file_wanted.major_ver) {
+ drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver,
+ uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver);
+ if (!intel_uc_fw_is_overridden(uc_fw)) {
+ err = -ENOEXEC;
+ goto fail;
+ }
+ } else {
+ if (uc_fw->file_selected.minor_ver < uc_fw->file_wanted.minor_ver)
+ old_ver = true;
}
}
+ if (old_ver) {
+ /* Preserve the version that was really wanted */
+ memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
+
+ drm_notice(&i915->drm,
+ "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_wanted.path,
+ uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver,
+ uc_fw->file_selected.path,
+ uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver);
+ drm_info(&i915->drm,
+ "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
+ }
+
if (HAS_LMEM(i915)) {
obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
if (!IS_ERR(obj))
@@ -460,8 +643,8 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err);
drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
@@ -603,7 +786,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
fail:
i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
err);
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
@@ -821,13 +1004,34 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
*/
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
+ u32 ver_sel, ver_want;
+
drm_printf(p, "%s firmware: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path);
+ if (uc_fw->file_selected.path != uc_fw->file_wanted.path)
+ drm_printf(p, "%s firmware wanted: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_wanted.path);
drm_printf(p, "\tstatus: %s\n",
intel_uc_fw_status_repr(uc_fw->status));
- drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
+ ver_sel = MAKE_UC_VER(uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
+ ver_want = MAKE_UC_VER(uc_fw->file_wanted.major_ver,
+ uc_fw->file_wanted.minor_ver,
+ uc_fw->file_wanted.patch_ver);
+ if (ver_sel < ver_want)
+ drm_printf(p, "\tversion: wanted %u.%u.%u, found %u.%u.%u\n",
+ uc_fw->file_wanted.major_ver,
+ uc_fw->file_wanted.minor_ver,
+ uc_fw->file_wanted.patch_ver,
+ uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
+ else
+ drm_printf(p, "\tversion: found %u.%u.%u\n",
+ uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 4f169035f504..cb586f7df270 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -65,6 +65,18 @@ enum intel_uc_fw_type {
#define INTEL_UC_FW_NUM_TYPES 2
/*
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ */
+struct intel_uc_fw_file {
+ const char *path;
+ u16 major_ver;
+ u16 minor_ver;
+ u16 patch_ver;
+};
+
+/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the uC.
*/
@@ -74,10 +86,12 @@ struct intel_uc_fw {
const enum intel_uc_fw_status status;
enum intel_uc_fw_status __status; /* no accidental overwrites */
};
- const char *path;
+ struct intel_uc_fw_file file_wanted;
+ struct intel_uc_fw_file file_selected;
bool user_overridden;
size_t size;
struct drm_i915_gem_object *obj;
+
/**
* @dummy: A vma used in binding the uc fw to ggtt. We can't define this
* vma on the stack as it can lead to a stack overflow, so we define it
@@ -88,24 +102,18 @@ struct intel_uc_fw {
struct i915_vma_resource dummy;
struct i915_vma *rsa_data;
- /*
- * The firmware build process will generate a version header file with major and
- * minor version defined. The versions are built into CSS header of firmware.
- * i915 kernel driver set the minimal firmware version required per platform.
- */
- u16 major_ver_wanted;
- u16 minor_ver_wanted;
- u16 major_ver_found;
- u16 minor_ver_found;
-
u32 rsa_size;
u32 ucode_size;
-
u32 private_data_size;
bool loaded_via_gsc;
};
+#define MAKE_UC_VER(maj, min, pat) ((pat) | ((min) << 8) | ((maj) << 16))
+#define GET_UC_VER(uc) (MAKE_UC_VER((uc)->fw.file_selected.major_ver, \
+ (uc)->fw.file_selected.minor_ver, \
+ (uc)->fw.file_selected.patch_ver))
+
#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index b05e0e35b734..7a411178bdbf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -83,8 +83,10 @@ struct uc_css_header {
} __packed;
static_assert(sizeof(struct uc_css_header) == 128);
-#define HUC_GSC_VERSION_DW 44
-#define HUC_GSC_MAJOR_VER_MASK (0xFF << 0)
-#define HUC_GSC_MINOR_VER_MASK (0xFF << 16)
+#define HUC_GSC_VERSION_HI_DW 44
+#define HUC_GSC_MAJOR_VER_HI_MASK (0xFF << 0)
+#define HUC_GSC_MINOR_VER_HI_MASK (0xFF << 16)
+#define HUC_GSC_VERSION_LO_DW 45
+#define HUC_GSC_PATCH_VER_LO_MASK (0xFF << 0)
#endif /* _INTEL_UC_FW_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index 1df71d0796ae..e28518fe8b90 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -54,6 +54,9 @@ static int intel_guc_scrub_ctbs(void *arg)
struct intel_engine_cs *engine;
struct intel_context *ce;
+ if (!intel_has_gpu_reset(gt))
+ return 0;
+
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine = intel_selftest_find_any_engine(gt);
@@ -62,7 +65,7 @@ static int intel_guc_scrub_ctbs(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- pr_err("Failed to create context, %d: %d\n", i, ret);
+ drm_err(&gt->i915->drm, "Failed to create context, %d: %d\n", i, ret);
goto err;
}
@@ -83,7 +86,7 @@ static int intel_guc_scrub_ctbs(void *arg)
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- pr_err("Failed to create request, %d: %d\n", i, ret);
+ drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n", i, ret);
goto err;
}
@@ -93,7 +96,7 @@ static int intel_guc_scrub_ctbs(void *arg)
for (i = 0; i < 3; ++i) {
ret = i915_request_wait(last[i], 0, HZ);
if (ret < 0) {
- pr_err("Last request failed to complete: %d\n", ret);
+ drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
goto err;
}
i915_request_put(last[i]);
@@ -110,7 +113,7 @@ static int intel_guc_scrub_ctbs(void *arg)
/* GT will not idle if G2H are lost */
ret = intel_gt_wait_for_idle(gt, HZ);
if (ret < 0) {
- pr_err("GT failed to idle: %d\n", ret);
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
goto err;
}
@@ -150,7 +153,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
- pr_err("Context array allocation failed\n");
+ drm_err(&gt->i915->drm, "Context array allocation failed\n");
return -ENOMEM;
}
@@ -164,24 +167,24 @@ static int intel_guc_steal_guc_ids(void *arg)
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index]);
ce[context_index] = NULL;
- pr_err("Failed to create context: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_wakeref;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- pr_err("Failed to create spinner: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
goto err_contexts;
}
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
MI_ARB_CHECK);
if (IS_ERR(spin_rq)) {
ret = PTR_ERR(spin_rq);
- pr_err("Failed to create spinner request: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
goto err_contexts;
}
ret = request_add_spin(spin_rq, &spin);
if (ret) {
- pr_err("Failed to add Spinner request: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
goto err_spin_rq;
}
@@ -191,7 +194,7 @@ static int intel_guc_steal_guc_ids(void *arg)
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index--]);
ce[context_index] = NULL;
- pr_err("Failed to create context: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_spin_rq;
}
@@ -200,8 +203,8 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = PTR_ERR(rq);
rq = NULL;
if (ret != -EAGAIN) {
- pr_err("Failed to create request, %d: %d\n",
- context_index, ret);
+ drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n",
+ context_index, ret);
goto err_spin_rq;
}
} else {
@@ -215,7 +218,7 @@ static int intel_guc_steal_guc_ids(void *arg)
igt_spinner_end(&spin);
ret = intel_selftest_wait_for_rq(spin_rq);
if (ret) {
- pr_err("Spin request failed to complete: %d\n", ret);
+ drm_err(&gt->i915->drm, "Spin request failed to complete: %d\n", ret);
i915_request_put(last);
goto err_spin_rq;
}
@@ -227,7 +230,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(last, 0, HZ * 30);
i915_request_put(last);
if (ret < 0) {
- pr_err("Last request failed to complete: %d\n", ret);
+ drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
goto err_spin_rq;
}
@@ -235,7 +238,7 @@ static int intel_guc_steal_guc_ids(void *arg)
rq = nop_user_request(ce[context_index], NULL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- pr_err("Failed to steal guc_id, %d: %d\n", context_index, ret);
+ drm_err(&gt->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret);
goto err_spin_rq;
}
@@ -243,21 +246,20 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
if (ret < 0) {
- pr_err("Request with stolen guc_id failed to complete: %d\n",
- ret);
+ drm_err(&gt->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret);
goto err_spin_rq;
}
/* Wait for idle */
ret = intel_gt_wait_for_idle(gt, HZ * 30);
if (ret < 0) {
- pr_err("GT failed to idle: %d\n", ret);
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
goto err_spin_rq;
}
/* Verify a guc_id was stolen */
if (guc->number_guc_id_stolen == number_guc_id_stolen) {
- pr_err("No guc_id was stolen");
+ drm_err(&gt->i915->drm, "No guc_id was stolen");
ret = -EINVAL;
} else {
ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
new file mode 100644
index 000000000000..01f8cd3c3134
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "selftests/igt_spinner.h"
+#include "selftests/igt_reset.h"
+#include "selftests/intel_scheduler_helpers.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gem/selftests/mock_context.h"
+
+#define BEAT_INTERVAL 100
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static int intel_hang_guc(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int ret = 0;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ unsigned int reset_count;
+ u32 guc_status;
+ u32 old_beat;
+
+ ctx = kernel_context(gt->i915, NULL);
+ if (IS_ERR(ctx)) {
+ drm_err(&gt->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
+ return PTR_ERR(ctx);
+ }
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ ce = intel_context_create(gt->engine[BCS0]);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ goto err;
+ }
+
+ engine = ce->engine;
+ reset_count = i915_reset_count(global);
+
+ old_beat = engine->props.heartbeat_interval_ms;
+ ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to boost heatbeat interval: %d\n", ret);
+ goto err;
+ }
+
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ goto err;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ goto err_spin;
+ }
+
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ goto err_spin;
+ }
+
+ ret = intel_reset_guc(gt);
+ if (ret) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "Failed to reset GuC, ret = %d\n", ret);
+ goto err_spin;
+ }
+
+ guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
+ if (!(guc_status & GS_MIA_IN_RESET)) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status);
+ ret = -EIO;
+ goto err_spin;
+ }
+
+ /* Wait for the heartbeat to cause a reset */
+ ret = intel_selftest_wait_for_rq(rq);
+ i915_request_put(rq);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Request failed to complete: %d\n", ret);
+ goto err_spin;
+ }
+
+ if (i915_reset_count(global) == reset_count) {
+ drm_err(&gt->i915->drm, "Failed to record a GPU reset\n");
+ ret = -EINVAL;
+ goto err_spin;
+ }
+
+err_spin:
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+ intel_engine_set_heartbeat(engine, old_beat);
+
+ if (ret == 0) {
+ rq = nop_request(engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto err;
+ }
+
+ ret = intel_selftest_wait_for_rq(rq);
+ i915_request_put(rq);
+ if (ret) {
+ drm_err(&gt->i915->drm, "No-op failed to complete: %d\n", ret);
+ goto err;
+ }
+ }
+
+err:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ kernel_context_close(ctx);
+
+ return ret;
+}
+
+int intel_guc_hang_check(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_hang_guc),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index 812220a43df8..d17982c36d25 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -115,30 +115,30 @@ static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
parent = multi_lrc_create_parent(gt, class, 0);
if (IS_ERR(parent)) {
- pr_err("Failed creating contexts: %ld", PTR_ERR(parent));
+ drm_err(&gt->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent));
return PTR_ERR(parent);
} else if (!parent) {
- pr_debug("Not enough engines in class: %d", class);
+ drm_dbg(&gt->i915->drm, "Not enough engines in class: %d", class);
return 0;
}
rq = multi_lrc_nop_request(parent);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- pr_err("Failed creating requests: %d", ret);
+ drm_err(&gt->i915->drm, "Failed creating requests: %d", ret);
goto out;
}
ret = intel_selftest_wait_for_rq(rq);
if (ret)
- pr_err("Failed waiting on request: %d", ret);
+ drm_err(&gt->i915->drm, "Failed waiting on request: %d", ret);
i915_request_put(rq);
if (ret >= 0) {
ret = intel_gt_wait_for_idle(gt, HZ * 5);
if (ret < 0)
- pr_err("GT failed to idle: %d\n", ret);
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
}
out:
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 0ba2a3455d99..de13f102d4fd 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -3117,9 +3117,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
continue;
vaddr = shmem_pin_map(engine->default_state);
- if (IS_ERR(vaddr)) {
- gvt_err("failed to map %s->default state, err:%zd\n",
- engine->name, PTR_ERR(vaddr));
+ if (!vaddr) {
+ gvt_err("failed to map %s->default state\n",
+ engine->name);
return;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index aee1a45da74b..705689e64011 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -226,7 +226,6 @@ struct intel_vgpu {
unsigned long nr_cache_entries;
struct mutex cache_lock;
- struct notifier_block iommu_notifier;
atomic_t released;
struct kvm_page_track_notifier_node track_node;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index e2f6c56ab342..e3cd58946477 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -231,57 +231,38 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- int total_pages;
- int npage;
- int ret;
-
- total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
-
- for (npage = 0; npage < total_pages; npage++) {
- unsigned long cur_gfn = gfn + npage;
-
- ret = vfio_unpin_pages(&vgpu->vfio_device, &cur_gfn, 1);
- drm_WARN_ON(&i915->drm, ret != 1);
- }
+ vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT,
+ DIV_ROUND_UP(size, PAGE_SIZE));
}
/* Pin a normal or compound guest page for dma. */
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
- unsigned long base_pfn = 0;
- int total_pages;
+ int total_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct page *base_page = NULL;
int npage;
int ret;
- total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
/*
* We pin the pages one-by-one to avoid allocating a big arrary
* on stack to hold pfns.
*/
for (npage = 0; npage < total_pages; npage++) {
- unsigned long cur_gfn = gfn + npage;
- unsigned long pfn;
+ dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT;
+ struct page *cur_page;
- ret = vfio_pin_pages(&vgpu->vfio_device, &cur_gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
+ IOMMU_READ | IOMMU_WRITE, &cur_page);
if (ret != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
- cur_gfn, ret);
- goto err;
- }
-
- if (!pfn_valid(pfn)) {
- gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
- npage++;
- ret = -EFAULT;
+ gvt_vgpu_err("vfio_pin_pages failed for iova %pad, ret %d\n",
+ &cur_iova, ret);
goto err;
}
if (npage == 0)
- base_pfn = pfn;
- else if (base_pfn + npage != pfn) {
+ base_page = cur_page;
+ else if (base_page + npage != cur_page) {
gvt_vgpu_err("The pages are not continuous\n");
ret = -EINVAL;
npage++;
@@ -289,7 +270,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
}
}
- *page = pfn_to_page(base_pfn);
+ *page = base_page;
return 0;
err:
gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
@@ -729,34 +710,25 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
return ret;
}
-static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static void intel_vgpu_dma_unmap(struct vfio_device *vfio_dev, u64 iova,
+ u64 length)
{
- struct intel_vgpu *vgpu =
- container_of(nb, struct intel_vgpu, iommu_notifier);
-
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
- struct gvt_dma *entry;
- unsigned long iov_pfn, end_iov_pfn;
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ struct gvt_dma *entry;
+ u64 iov_pfn = iova >> PAGE_SHIFT;
+ u64 end_iov_pfn = iov_pfn + length / PAGE_SIZE;
- iov_pfn = unmap->iova >> PAGE_SHIFT;
- end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
+ mutex_lock(&vgpu->cache_lock);
+ for (; iov_pfn < end_iov_pfn; iov_pfn++) {
+ entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
+ if (!entry)
+ continue;
- mutex_lock(&vgpu->cache_lock);
- for (; iov_pfn < end_iov_pfn; iov_pfn++) {
- entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
- if (!entry)
- continue;
-
- gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
- entry->size);
- __gvt_cache_remove_entry(vgpu, entry);
- }
- mutex_unlock(&vgpu->cache_lock);
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
+ __gvt_cache_remove_entry(vgpu, entry);
}
-
- return NOTIFY_OK;
+ mutex_unlock(&vgpu->cache_lock);
}
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
@@ -783,36 +755,20 @@ out:
static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- unsigned long events;
- int ret;
-
- vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
-
- events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, &events,
- &vgpu->iommu_notifier);
- if (ret != 0) {
- gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
- ret);
- goto out;
- }
- ret = -EEXIST;
if (vgpu->attached)
- goto undo_iommu;
+ return -EEXIST;
- ret = -ESRCH;
if (!vgpu->vfio_device.kvm ||
vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- goto undo_iommu;
+ return -ESRCH;
}
kvm_get_kvm(vgpu->vfio_device.kvm);
- ret = -EEXIST;
if (__kvmgt_vgpu_exist(vgpu))
- goto undo_iommu;
+ return -EEXIST;
vgpu->attached = true;
@@ -831,12 +787,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
atomic_set(&vgpu->released, 0);
return 0;
-
-undo_iommu:
- vfio_unregister_notifier(vfio_dev, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
-out:
- return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
@@ -853,8 +803,6 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- int ret;
if (!vgpu->attached)
return;
@@ -864,11 +812,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
intel_gvt_release_vgpu(vgpu);
- ret = vfio_unregister_notifier(&vgpu->vfio_device, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
- drm_WARN(&i915->drm, ret,
- "vfio_unregister_notifier for iommu failed: %d\n", ret);
-
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
@@ -1610,6 +1553,7 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
.write = intel_vgpu_write,
.mmap = intel_vgpu_mmap,
.ioctl = intel_vgpu_ioctl,
+ .dma_unmap = intel_vgpu_dma_unmap,
};
static int intel_vgpu_probe(struct mdev_device *mdev)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 94e5c29d2ee3..3a8450058548 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -188,47 +188,47 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
stringify_page_sizes(vma->resource->page_sizes_gtt,
NULL, 0));
if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
- switch (vma->ggtt_view.type) {
- case I915_GGTT_VIEW_NORMAL:
+ switch (vma->gtt_view.type) {
+ case I915_GTT_VIEW_NORMAL:
seq_puts(m, ", normal");
break;
- case I915_GGTT_VIEW_PARTIAL:
+ case I915_GTT_VIEW_PARTIAL:
seq_printf(m, ", partial [%08llx+%x]",
- vma->ggtt_view.partial.offset << PAGE_SHIFT,
- vma->ggtt_view.partial.size << PAGE_SHIFT);
+ vma->gtt_view.partial.offset << PAGE_SHIFT,
+ vma->gtt_view.partial.size << PAGE_SHIFT);
break;
- case I915_GGTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_ROTATED:
seq_printf(m, ", rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
- vma->ggtt_view.rotated.plane[0].width,
- vma->ggtt_view.rotated.plane[0].height,
- vma->ggtt_view.rotated.plane[0].src_stride,
- vma->ggtt_view.rotated.plane[0].dst_stride,
- vma->ggtt_view.rotated.plane[0].offset,
- vma->ggtt_view.rotated.plane[1].width,
- vma->ggtt_view.rotated.plane[1].height,
- vma->ggtt_view.rotated.plane[1].src_stride,
- vma->ggtt_view.rotated.plane[1].dst_stride,
- vma->ggtt_view.rotated.plane[1].offset);
+ vma->gtt_view.rotated.plane[0].width,
+ vma->gtt_view.rotated.plane[0].height,
+ vma->gtt_view.rotated.plane[0].src_stride,
+ vma->gtt_view.rotated.plane[0].dst_stride,
+ vma->gtt_view.rotated.plane[0].offset,
+ vma->gtt_view.rotated.plane[1].width,
+ vma->gtt_view.rotated.plane[1].height,
+ vma->gtt_view.rotated.plane[1].src_stride,
+ vma->gtt_view.rotated.plane[1].dst_stride,
+ vma->gtt_view.rotated.plane[1].offset);
break;
- case I915_GGTT_VIEW_REMAPPED:
+ case I915_GTT_VIEW_REMAPPED:
seq_printf(m, ", remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
- vma->ggtt_view.remapped.plane[0].width,
- vma->ggtt_view.remapped.plane[0].height,
- vma->ggtt_view.remapped.plane[0].src_stride,
- vma->ggtt_view.remapped.plane[0].dst_stride,
- vma->ggtt_view.remapped.plane[0].offset,
- vma->ggtt_view.remapped.plane[1].width,
- vma->ggtt_view.remapped.plane[1].height,
- vma->ggtt_view.remapped.plane[1].src_stride,
- vma->ggtt_view.remapped.plane[1].dst_stride,
- vma->ggtt_view.remapped.plane[1].offset);
+ vma->gtt_view.remapped.plane[0].width,
+ vma->gtt_view.remapped.plane[0].height,
+ vma->gtt_view.remapped.plane[0].src_stride,
+ vma->gtt_view.remapped.plane[0].dst_stride,
+ vma->gtt_view.remapped.plane[0].offset,
+ vma->gtt_view.remapped.plane[1].width,
+ vma->gtt_view.remapped.plane[1].height,
+ vma->gtt_view.remapped.plane[1].src_stride,
+ vma->gtt_view.remapped.plane[1].dst_stride,
+ vma->gtt_view.remapped.plane[1].offset);
break;
default:
- MISSING_CASE(vma->ggtt_view.type);
+ MISSING_CASE(vma->gtt_view.type);
break;
}
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d25647be25d1..7c0a34a33fec 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -247,7 +247,7 @@ struct i915_gem_mm {
* List of objects which are pending destruction.
*/
struct llist_head free_list;
- struct delayed_work free_work;
+ struct work_struct free_work;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -1352,9 +1352,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GUC_DEPRIVILEGE(dev_priv) \
(INTEL_INFO(dev_priv)->has_guc_deprivilege)
-#define HAS_PERCTX_PREEMPT_CTRL(i915) \
- ((GRAPHICS_VER(i915) >= 9) && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
-
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
IS_ALDERLAKE_S(dev_priv))
@@ -1378,7 +1375,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
* armed the work again.
*/
while (atomic_read(&i915->mm.free_count)) {
- flush_delayed_work(&i915->mm.free_work);
+ flush_work(&i915->mm.free_work);
flush_delayed_work(&i915->bdev.wq);
rcu_barrier();
}
@@ -1409,12 +1406,12 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
struct i915_vma * __must_check
i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags);
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 702e5b89be22..a3373699835d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -885,7 +885,7 @@ static void discard_ggtt_vma(struct i915_vma *vma)
struct i915_vma *
i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -896,7 +896,7 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
GEM_WARN_ON(!ww);
if (flags & PIN_MAPPABLE &&
- (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
+ (!view || view->type == I915_GTT_VIEW_NORMAL)) {
/*
* If the required space is larger than the available
* aperture, we will not able to find a slot for the
@@ -987,7 +987,7 @@ new_vma:
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags)
{
struct i915_gem_ww_ctx ww;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 32e92651ef7c..16d8b7ba65dc 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -671,6 +671,18 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
}
+static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
+ const char *name,
+ const struct intel_ctb_coredump *ctb)
+{
+ if (!ctb->size)
+ return;
+
+ err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
+ name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
+ ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
+}
+
static void err_print_uc(struct drm_i915_error_state_buf *m,
const struct intel_uc_coredump *error_uc)
{
@@ -678,7 +690,12 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
- intel_gpu_error_print_vma(m, NULL, error_uc->guc_log);
+ err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
+ intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
+ err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
+ err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
+ err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
+ intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
}
static void err_free_sgl(struct scatterlist *sgl)
@@ -720,6 +737,8 @@ static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
int i;
err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
+ err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
+ gt->clock_frequency, gt->clock_period_ns);
err_printf(m, "EIR: 0x%08x\n", gt->eir);
err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
@@ -851,7 +870,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->gt) {
bool print_guc_capture = false;
- if (error->gt->uc && error->gt->uc->is_guc_capture)
+ if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
print_guc_capture = true;
err_print_gt_display(m, error->gt);
@@ -1004,9 +1023,12 @@ static void cleanup_params(struct i915_gpu_coredump *error)
static void cleanup_uc(struct intel_uc_coredump *uc)
{
- kfree(uc->guc_fw.path);
- kfree(uc->huc_fw.path);
- i915_vma_coredump_free(uc->guc_log);
+ kfree(uc->guc_fw.file_selected.path);
+ kfree(uc->huc_fw.file_selected.path);
+ kfree(uc->guc_fw.file_wanted.path);
+ kfree(uc->huc_fw.file_wanted.path);
+ i915_vma_coredump_free(uc->guc.vma_log);
+ i915_vma_coredump_free(uc->guc.vma_ctb);
kfree(uc);
}
@@ -1655,6 +1677,23 @@ gt_record_engines(struct intel_gt_coredump *gt,
}
}
+static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
+ const struct intel_guc_ct_buffer *ctb,
+ const void *blob_ptr, struct intel_guc *guc)
+{
+ if (!ctb || !ctb->desc)
+ return;
+
+ saved->raw_status = ctb->desc->status;
+ saved->raw_head = ctb->desc->head;
+ saved->raw_tail = ctb->desc->tail;
+ saved->head = ctb->head;
+ saved->tail = ctb->tail;
+ saved->size = ctb->size;
+ saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
+ saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
+}
+
static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump *gt,
struct i915_vma_compress *compress)
@@ -1669,14 +1708,26 @@ gt_record_uc(struct intel_gt_coredump *gt,
memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
- /* Non-default firmware paths will be specified by the modparam.
- * As modparams are generally accesible from the userspace make
- * explicit copies of the firmware paths.
+ error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
+ error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
+ error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
+ error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
+
+ /*
+ * Save the GuC log and include a timestamp reference for converting the
+ * log times to system times (in conjunction with the error->boottime and
+ * gt->clock_frequency fields saved elsewhere).
*/
- error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
- error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
- error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
- "GuC log buffer", compress);
+ error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
+ error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
+ "GuC log buffer", compress);
+ error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
+ "GuC CT buffer", compress);
+ error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
+ gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
+ uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
+ gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
+ uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
return error_uc;
}
@@ -1833,6 +1884,8 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
static void gt_record_info(struct intel_gt_coredump *gt)
{
memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
+ gt->clock_frequency = gt->_gt->clock_frequency;
+ gt->clock_period_ns = gt->_gt->clock_period_ns;
}
/*
@@ -2027,9 +2080,9 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
error->gt->uc = gt_record_uc(error->gt, compress);
if (error->gt->uc) {
if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
- error->gt->uc->is_guc_capture = true;
+ error->gt->uc->guc.is_guc_capture = true;
else
- GEM_BUG_ON(error->gt->uc->is_guc_capture);
+ GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 55a143b92d10..efc75cc2ffdb 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -125,6 +125,15 @@ struct intel_engine_coredump {
struct intel_engine_coredump *next;
};
+struct intel_ctb_coredump {
+ u32 raw_head, head;
+ u32 raw_tail, tail;
+ u32 raw_status;
+ u32 desc_offset;
+ u32 cmds_offset;
+ u32 size;
+};
+
struct intel_gt_coredump {
const struct intel_gt *_gt;
bool awake;
@@ -150,6 +159,8 @@ struct intel_gt_coredump {
u32 gtt_cache;
u32 aux_err; /* gen12 */
u32 gam_done; /* gen12 */
+ u32 clock_frequency;
+ u32 clock_period_ns;
/* Display related */
u32 derrmr;
@@ -163,8 +174,14 @@ struct intel_gt_coredump {
struct intel_uc_coredump {
struct intel_uc_fw guc_fw;
struct intel_uc_fw huc_fw;
- struct i915_vma_coredump *guc_log;
- bool is_guc_capture;
+ struct guc_info {
+ struct intel_ctb_coredump ctb[2];
+ struct i915_vma_coredump *vma_ctb;
+ struct i915_vma_coredump *vma_log;
+ u32 timestamp;
+ u16 last_fence;
+ bool is_guc_capture;
+ } guc;
} *uc;
struct intel_gt_coredump *next;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index aacc10f2e73f..59a579ed03bb 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -1079,6 +1079,7 @@ static const struct intel_device_info ats_m_info = {
DG2_FEATURES,
.display = { 0 },
.require_force_probe = 1,
+ .tuning_thread_rr_after_dep = 1,
};
#define XE_HPC_FEATURES \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ef3b04c7e153..f17c09ead7d7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -109,7 +109,7 @@ static void __i915_vma_retire(struct i915_active *ref)
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
@@ -141,9 +141,9 @@ vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->obj_link);
RB_CLEAR_NODE(&vma->obj_node);
- if (view && view->type != I915_GGTT_VIEW_NORMAL) {
- vma->ggtt_view = *view;
- if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ if (view && view->type != I915_GTT_VIEW_NORMAL) {
+ vma->gtt_view = *view;
+ if (view->type == I915_GTT_VIEW_PARTIAL) {
GEM_BUG_ON(range_overflows_t(u64,
view->partial.offset,
view->partial.size,
@@ -151,10 +151,10 @@ vma_create(struct drm_i915_gem_object *obj,
vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
GEM_BUG_ON(vma->size > obj->base.size);
- } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ } else if (view->type == I915_GTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
- } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
+ } else if (view->type == I915_GTT_VIEW_REMAPPED) {
vma->size = intel_remapped_info_size(&view->remapped);
vma->size <<= PAGE_SHIFT;
}
@@ -248,7 +248,7 @@ err_vma:
static struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct rb_node *rb;
@@ -286,7 +286,7 @@ i915_vma_lookup(struct drm_i915_gem_object *obj,
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct i915_vma *vma;
@@ -538,8 +538,6 @@ int i915_vma_bind(struct i915_vma *vma,
bind_flags);
}
- set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
-
atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -1205,7 +1203,7 @@ err_st_alloc:
}
static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
+intel_partial_pages(const struct i915_gtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
@@ -1249,33 +1247,33 @@ __i915_vma_get_pages(struct i915_vma *vma)
*/
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
- switch (vma->ggtt_view.type) {
+ switch (vma->gtt_view.type) {
default:
- GEM_BUG_ON(vma->ggtt_view.type);
+ GEM_BUG_ON(vma->gtt_view.type);
fallthrough;
- case I915_GGTT_VIEW_NORMAL:
+ case I915_GTT_VIEW_NORMAL:
pages = vma->obj->mm.pages;
break;
- case I915_GGTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_ROTATED:
pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+ intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
break;
- case I915_GGTT_VIEW_REMAPPED:
+ case I915_GTT_VIEW_REMAPPED:
pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
break;
- case I915_GGTT_VIEW_PARTIAL:
- pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
+ case I915_GTT_VIEW_PARTIAL:
+ pages = intel_partial_pages(&vma->gtt_view, vma->obj);
break;
}
if (IS_ERR(pages)) {
drm_err(&vma->vm->i915->drm,
"Failed to get pages for VMA view type %u (%ld)!\n",
- vma->ggtt_view.type, PTR_ERR(pages));
+ vma->gtt_view.type, PTR_ERR(pages));
return PTR_ERR(pages);
}
@@ -1310,6 +1308,19 @@ err_unpin:
return err;
}
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
+{
+ /*
+ * Before we release the pages that were bound by this vma, we
+ * must invalidate all the TLBs that may still have a reference
+ * back to our physical address. It only needs to be done once,
+ * so after updating the PTE to point away from the pages, record
+ * the most recent TLB invalidation seqno, and if we have not yet
+ * flushed the TLBs upon release, perform a full invalidation.
+ */
+ WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+}
+
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
@@ -1795,7 +1806,7 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
GEM_BUG_ON(!vma->obj->userfault_count);
node = &vma->mmo->vma_node;
- vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
@@ -1871,12 +1882,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
enum dma_resv_usage usage;
int idx;
- obj->read_domains = 0;
if (flags & EXEC_OBJECT_WRITE) {
usage = DMA_RESV_USAGE_WRITE;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->read_domains = 0;
} else {
usage = DMA_RESV_USAGE_READ;
+ obj->write_domain = 0;
}
dma_fence_array_for_each(curr, idx, fence)
@@ -1941,7 +1953,12 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
- unbind_fence = i915_vma_resource_unbind(vma_res);
+ if (async)
+ unbind_fence = i915_vma_resource_unbind(vma_res,
+ &vma->obj->mm.tlb);
+ else
+ unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
+
vma->resource = NULL;
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
@@ -1949,10 +1966,13 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
i915_vma_detach(vma);
- if (!async && unbind_fence) {
- dma_fence_wait(unbind_fence, false);
- dma_fence_put(unbind_fence);
- unbind_fence = NULL;
+ if (!async) {
+ if (unbind_fence) {
+ dma_fence_wait(unbind_fence, false);
+ dma_fence_put(unbind_fence);
+ unbind_fence = NULL;
+ }
+ vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 88ca0bd9c900..aecd9c64486b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -43,7 +43,7 @@
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+ const struct i915_gtt_view *view);
void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
#define I915_VMA_RELEASE_MAP BIT(0)
@@ -160,7 +160,7 @@ static inline void i915_vma_put(struct i915_vma *vma)
static inline long
i915_vma_compare(struct i915_vma *vma,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
ptrdiff_t cmp;
@@ -170,8 +170,8 @@ i915_vma_compare(struct i915_vma *vma,
if (cmp)
return cmp;
- BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
- cmp = vma->ggtt_view.type;
+ BUILD_BUG_ON(I915_GTT_VIEW_NORMAL != 0);
+ cmp = vma->gtt_view.type;
if (!view)
return cmp;
@@ -181,7 +181,7 @@ i915_vma_compare(struct i915_vma *vma,
assert_i915_gem_gtt_types();
- /* ggtt_view.type also encodes its size so that we both distinguish
+ /* gtt_view.type also encodes its size so that we both distinguish
* different views using it as a "type" and also use a compact (no
* accessing of uninitialised padding bytes) memcmp without storing
* an extra parameter or adding more code.
@@ -191,14 +191,14 @@ i915_vma_compare(struct i915_vma *vma,
* we assert above that all branches have the same address, and that
* each branch has a unique type/size.
*/
- BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
- BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
- BUILD_BUG_ON(I915_GGTT_VIEW_ROTATED >= I915_GGTT_VIEW_REMAPPED);
+ BUILD_BUG_ON(I915_GTT_VIEW_NORMAL >= I915_GTT_VIEW_PARTIAL);
+ BUILD_BUG_ON(I915_GTT_VIEW_PARTIAL >= I915_GTT_VIEW_ROTATED);
+ BUILD_BUG_ON(I915_GTT_VIEW_ROTATED >= I915_GTT_VIEW_REMAPPED);
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), partial));
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), remapped));
- return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
+ return memcmp(&vma->gtt_view.partial, &view->partial, view->type);
}
struct i915_vma_work *i915_vma_work(void);
@@ -213,6 +213,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 27c55027387a..de1342dbfa12 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -216,6 +216,10 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
/**
* i915_vma_resource_unbind - Unbind a vma resource
* @vma_res: The vma resource to unbind.
+ * @tlb: pointer to vma->obj->mm.tlb associated with the resource
+ * to be stored at vma_res->tlb. When not-NULL, it will be used
+ * to do TLB cache invalidation before freeing a VMA resource.
+ * Used only for async unbind.
*
* At this point this function does little more than publish a fence that
* signals immediately unless signaling is held back.
@@ -223,10 +227,13 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
* Return: A refcounted pointer to a dma-fence that signals when unbinding is
* complete.
*/
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res)
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb)
{
struct i915_address_space *vm = vma_res->vm;
+ vma_res->tlb = tlb;
+
/* Reference for the sw fence */
i915_vma_resource_get(vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index 5d8427caa2ba..06923d1816e7 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -67,6 +67,7 @@ struct i915_page_sizes {
* taken when the unbind is scheduled.
* @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
* needs to be skipped for unbind.
+ * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
*
* The lifetime of a struct i915_vma_resource is from a binding request to
* the actual possible asynchronous unbind has completed.
@@ -119,6 +120,8 @@ struct i915_vma_resource {
bool immediate_unbind:1;
bool needs_wakeref:1;
bool skip_pte_rewrite:1;
+
+ u32 *tlb;
};
bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
@@ -131,7 +134,8 @@ struct i915_vma_resource *i915_vma_resource_alloc(void);
void i915_vma_resource_free(struct i915_vma_resource *vma_res);
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb);
void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index be6e028c3b57..ec0f6c9f57d0 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -67,30 +67,30 @@ enum i915_cache_level;
* Implementation and usage
*
* GGTT views are implemented using VMAs and are distinguished via enum
- * i915_ggtt_view_type and struct i915_ggtt_view.
+ * i915_gtt_view_type and struct i915_gtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
* added with the _ggtt_ infix, and sometimes with _view postfix to avoid
- * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * renaming in large amounts of code. They take the struct i915_gtt_view
* parameter encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
- * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * globally const i915_gtt_view_normal singleton instance exists. All old core
* GEM API functions, the ones not taking the view parameter, are operating on,
* or with the normal GGTT view.
*
* Code wanting to add or use a new GGTT view needs to:
*
* 1. Add a new enum with a suitable name.
- * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 2. Extend the metadata in the i915_gtt_view structure if required.
* 3. Add support to i915_get_vma_pages().
*
* New views are required to build a scatter-gather table from within the
- * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * i915_get_vma_pages function. This table is stored in the vma.gtt_view and
* exists for the lifetime of an VMA.
*
* Core API is designed to have copy semantics which means that passed in
- * struct i915_ggtt_view does not need to be persistent (left around after
+ * struct i915_gtt_view does not need to be persistent (left around after
* calling the core API functions).
*
*/
@@ -130,11 +130,11 @@ struct intel_partial_info {
unsigned int size;
} __packed;
-enum i915_ggtt_view_type {
- I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
- I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+enum i915_gtt_view_type {
+ I915_GTT_VIEW_NORMAL = 0,
+ I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
};
static inline void assert_i915_gem_gtt_types(void)
@@ -152,18 +152,18 @@ static inline void assert_i915_gem_gtt_types(void)
/* As we encode the size of each branch inside the union into its type,
* we have to be careful that each branch has a unique size.
*/
- switch ((enum i915_ggtt_view_type)0) {
- case I915_GGTT_VIEW_NORMAL:
- case I915_GGTT_VIEW_PARTIAL:
- case I915_GGTT_VIEW_ROTATED:
- case I915_GGTT_VIEW_REMAPPED:
+ switch ((enum i915_gtt_view_type)0) {
+ case I915_GTT_VIEW_NORMAL:
+ case I915_GTT_VIEW_PARTIAL:
+ case I915_GTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_REMAPPED:
/* gcc complains if these are identical cases */
break;
}
}
-struct i915_ggtt_view {
- enum i915_ggtt_view_type type;
+struct i915_gtt_view {
+ enum i915_gtt_view_type type;
union {
/* Members need to contain no holes/padding */
struct intel_partial_info partial;
@@ -280,11 +280,11 @@ struct i915_vma {
/**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * i915_gtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GTT_VIEW_NORMAL) is default and also
* assumed in GEM functions which take no ggtt view parameter.
*/
- struct i915_ggtt_view ggtt_view;
+ struct i915_gtt_view gtt_view;
/** This object's place on the active/inactive lists */
struct list_head vm_link;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 23bf230aa104..e681bc6ed8e9 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -172,6 +172,7 @@ enum intel_ppgtt_type {
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
+ func(tuning_thread_rr_after_dep); \
func(unfenced_needs_alignment); \
func(hws_needs_physical);
diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h
index 2aad2f0cc8db..ffc702b79579 100644
--- a/drivers/gpu/drm/i915/intel_mchbar_regs.h
+++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h
@@ -196,6 +196,9 @@
#define RP1_CAP_MASK REG_GENMASK(15, 8)
#define RPN_CAP_MASK REG_GENMASK(23, 16)
+#define GEN10_FREQ_INFO_REC _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
+#define RPE_MASK REG_GENMASK(15, 8)
+
/* snb MCH registers for priority tuning */
#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
#define SSKPD_NEW_WM0_MASK_HSW REG_GENMASK64(63, 56)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a852c471d1b3..c7ef5f2ff2e1 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -112,8 +112,11 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
* trying to reset here does exist at this point (engines could be fused
* off in ICL+), so no waiting for acks
*/
- /* WaRsClearFWBitsAtReset:bdw,skl */
- fw_clear(d, 0xffff);
+ /* WaRsClearFWBitsAtReset */
+ if (GRAPHICS_VER(d->uncore->i915) >= 12)
+ fw_clear(d, 0xefff);
+ else
+ fw_clear(d, 0xffff);
}
static inline void
@@ -2232,14 +2235,15 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
- * For dgfx chips register range is expanded to 4MB.
+ * For dgfx chips register range is expanded to 4MB, and this larger
+ * range is also used for integrated gpus beginning with Meteor Lake.
*/
- if (GRAPHICS_VER(i915) < 5)
- mmio_size = 512 * 1024;
- else if (IS_DGFX(i915))
+ if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
mmio_size = 4 * 1024 * 1024;
- else
+ else if (GRAPHICS_VER(i915) >= 5)
mmio_size = 2 * 1024 * 1024;
+ else
+ mmio_size = 512 * 1024;
uncore->regs = ioremap(phys_addr, mmio_size);
if (uncore->regs == NULL) {
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index 15311eaed848..17109c513259 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -176,6 +176,18 @@ static void pxp_queue_termination(struct intel_pxp *pxp)
spin_unlock_irq(&gt->irq_lock);
}
+static bool pxp_component_bound(struct intel_pxp *pxp)
+{
+ bool bound = false;
+
+ mutex_lock(&pxp->tee_mutex);
+ if (pxp->pxp_component)
+ bound = true;
+ mutex_unlock(&pxp->tee_mutex);
+
+ return bound;
+}
+
/*
* the arb session is restarted from the irq work when we receive the
* termination completion interrupt
@@ -187,6 +199,9 @@ int intel_pxp_start(struct intel_pxp *pxp)
if (!intel_pxp_is_enabled(pxp))
return -ENODEV;
+ if (wait_for(pxp_component_bound(pxp), 250))
+ return -ENXIO;
+
mutex_lock(&pxp->arb_mutex);
if (pxp->arb_is_valid)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index ab9f17fc85bc..e050a2de5fd1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1080,7 +1080,7 @@ static int misaligned_case(struct i915_address_space *vm, struct intel_memory_re
bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL;
- obj = i915_gem_object_create_region(mr, size, 0, 0);
+ obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY);
if (IS_ERR(obj)) {
/* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
if (PTR_ERR(obj) == -ENODEV && is_stolen)
@@ -2324,5 +2324,5 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index bdd290f2bf3c..aaf8a380e5c7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -49,5 +49,6 @@ selftest(perf, i915_perf_live_selftests)
selftest(slpc, intel_slpc_live_selftests)
selftest(guc, intel_guc_live_selftests)
selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests)
+selftest(guc_hang, intel_guc_hang_check)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index 88db2e3d81d0..429c6d73b159 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -431,7 +431,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
if (err)
return err;
- err = i915_subtests(tests, i915);
+ err = i915_live_subtests(tests, i915);
destroy_empty_config(&i915->perf);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index c56a0c2cd2f7..818a4909c1f3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -971,7 +971,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
- /* Force the wait wait now to avoid including it in the benchmark */
+ /* Force the wait now to avoid including it in the benchmark */
err = i915_vma_sync(vma);
if (err)
goto err_pin;
@@ -1821,7 +1821,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
static int switch_to_kernel_sync(struct intel_context *ce, int err)
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 6921ba128015..71b52d5efef4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -51,9 +51,9 @@ static bool assert_vma(struct i915_vma *vma,
ok = false;
}
- if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) {
pr_err("VMA created with wrong type [%d]\n",
- vma->ggtt_view.type);
+ vma->gtt_view.type);
ok = false;
}
@@ -63,7 +63,7 @@ static bool assert_vma(struct i915_vma *vma,
static struct i915_vma *
checked_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct i915_vma *vma;
bool ok = true;
@@ -91,7 +91,7 @@ checked_vma_instance(struct drm_i915_gem_object *obj,
}
if (i915_vma_compare(vma, vma->vm,
- i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) {
+ i915_vma_is_ggtt(vma) ? &vma->gtt_view : NULL)) {
pr_err("i915_vma_compare failed with itself\n");
return ERR_PTR(-EINVAL);
}
@@ -530,12 +530,12 @@ assert_remapped(struct drm_i915_gem_object *obj,
return sg;
}
-static unsigned int remapped_size(enum i915_ggtt_view_type view_type,
+static unsigned int remapped_size(enum i915_gtt_view_type view_type,
const struct intel_remapped_plane_info *a,
const struct intel_remapped_plane_info *b)
{
- if (view_type == I915_GGTT_VIEW_ROTATED)
+ if (view_type == I915_GTT_VIEW_ROTATED)
return a->dst_stride * a->width + b->dst_stride * b->width;
else
return a->dst_stride * a->height + b->dst_stride * b->height;
@@ -569,9 +569,9 @@ static int igt_vma_rotate_remap(void *arg)
{ }
}, *a, *b;
- enum i915_ggtt_view_type types[] = {
- I915_GGTT_VIEW_ROTATED,
- I915_GGTT_VIEW_REMAPPED,
+ enum i915_gtt_view_type types[] = {
+ I915_GTT_VIEW_ROTATED,
+ I915_GTT_VIEW_REMAPPED,
0,
}, *t;
const unsigned int max_pages = 64;
@@ -588,7 +588,7 @@ static int igt_vma_rotate_remap(void *arg)
for (t = types; *t; t++) {
for (a = planes; a->width; a++) {
for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
- struct i915_ggtt_view view = {
+ struct i915_gtt_view view = {
.type = *t,
.remapped.plane[0] = *a,
.remapped.plane[1] = *b,
@@ -602,11 +602,11 @@ static int igt_vma_rotate_remap(void *arg)
max_offset = max_pages - max_offset;
if (!plane_info[0].dst_stride)
- plane_info[0].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ?
+ plane_info[0].dst_stride = view.type == I915_GTT_VIEW_ROTATED ?
plane_info[0].height :
plane_info[0].width;
if (!plane_info[1].dst_stride)
- plane_info[1].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ?
+ plane_info[1].dst_stride = view.type == I915_GTT_VIEW_ROTATED ?
plane_info[1].height :
plane_info[1].width;
@@ -630,7 +630,7 @@ static int igt_vma_rotate_remap(void *arg)
expected_pages = remapped_size(view.type, &plane_info[0], &plane_info[1]);
- if (view.type == I915_GGTT_VIEW_ROTATED &&
+ if (view.type == I915_GTT_VIEW_ROTATED &&
vma->size != expected_pages * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
PAGE_SIZE * expected_pages, vma->size);
@@ -638,7 +638,7 @@ static int igt_vma_rotate_remap(void *arg)
goto out_object;
}
- if (view.type == I915_GGTT_VIEW_REMAPPED &&
+ if (view.type == I915_GTT_VIEW_REMAPPED &&
vma->size > expected_pages * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
PAGE_SIZE * expected_pages, vma->size);
@@ -668,13 +668,13 @@ static int igt_vma_rotate_remap(void *arg)
sg = vma->pages->sgl;
for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
- if (view.type == I915_GGTT_VIEW_ROTATED)
+ if (view.type == I915_GTT_VIEW_ROTATED)
sg = assert_rotated(obj, &view.rotated, n, sg);
else
sg = assert_remapped(obj, &view.remapped, n, sg);
if (IS_ERR(sg)) {
pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d, %d), (%d, %d, %d, %d, %d)]\n",
- view.type == I915_GGTT_VIEW_ROTATED ?
+ view.type == I915_GTT_VIEW_ROTATED ?
"rotated" : "remapped", n,
plane_info[0].width,
plane_info[0].height,
@@ -741,7 +741,7 @@ static bool assert_partial(struct drm_i915_gem_object *obj,
}
static bool assert_pin(struct i915_vma *vma,
- struct i915_ggtt_view *view,
+ struct i915_gtt_view *view,
u64 size,
const char *name)
{
@@ -759,8 +759,8 @@ static bool assert_pin(struct i915_vma *vma,
ok = false;
}
- if (view && view->type != I915_GGTT_VIEW_NORMAL) {
- if (memcmp(&vma->ggtt_view, view, sizeof(*view))) {
+ if (view && view->type != I915_GTT_VIEW_NORMAL) {
+ if (memcmp(&vma->gtt_view, view, sizeof(*view))) {
pr_err("(%s) VMA mismatch upon creation!\n",
name);
ok = false;
@@ -772,9 +772,9 @@ static bool assert_pin(struct i915_vma *vma,
ok = false;
}
} else {
- if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) {
pr_err("Not the normal ggtt view! Found %d\n",
- vma->ggtt_view.type);
+ vma->gtt_view.type);
ok = false;
}
@@ -818,14 +818,14 @@ static int igt_vma_partial(void *arg)
nvma = 0;
for_each_prime_number_from(sz, 1, npages) {
for_each_prime_number_from(offset, 0, npages - sz) {
- struct i915_ggtt_view view;
+ struct i915_gtt_view view;
- view.type = I915_GGTT_VIEW_PARTIAL;
+ view.type = I915_GTT_VIEW_PARTIAL;
view.partial.offset = offset;
view.partial.size = sz;
if (sz == npages)
- view.type = I915_GGTT_VIEW_NORMAL;
+ view.type = I915_GTT_VIEW_NORMAL;
vma = checked_vma_instance(obj, vm, &view);
if (IS_ERR(vma)) {
@@ -976,9 +976,9 @@ static int igt_vma_remapped_gtt(void *arg)
{ }
}, *p;
- enum i915_ggtt_view_type types[] = {
- I915_GGTT_VIEW_ROTATED,
- I915_GGTT_VIEW_REMAPPED,
+ enum i915_gtt_view_type types[] = {
+ I915_GTT_VIEW_ROTATED,
+ I915_GTT_VIEW_REMAPPED,
0,
}, *t;
struct drm_i915_gem_object *obj;
@@ -996,7 +996,7 @@ static int igt_vma_remapped_gtt(void *arg)
for (t = types; *t; t++) {
for (p = planes; p->width; p++) {
- struct i915_ggtt_view view = {
+ struct i915_gtt_view view = {
.type = *t,
.rotated.plane[0] = *p,
};
@@ -1012,7 +1012,7 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
if (!plane_info[0].dst_stride)
- plane_info[0].dst_stride = *t == I915_GGTT_VIEW_ROTATED ?
+ plane_info[0].dst_stride = *t == I915_GTT_VIEW_ROTATED ?
p->height : p->width;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
@@ -1021,7 +1021,7 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
}
- GEM_BUG_ON(vma->ggtt_view.type != *t);
+ GEM_BUG_ON(vma->gtt_view.type != *t);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -1035,7 +1035,7 @@ static int igt_vma_remapped_gtt(void *arg)
unsigned int offset;
u32 val = y << 16 | x;
- if (*t == I915_GGTT_VIEW_ROTATED)
+ if (*t == I915_GTT_VIEW_ROTATED)
offset = (x * plane_info[0].dst_stride + y) * PAGE_SIZE;
else
offset = (y * plane_info[0].dst_stride + x) * PAGE_SIZE;
@@ -1052,7 +1052,7 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
}
- GEM_BUG_ON(vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL);
+ GEM_BUG_ON(vma->gtt_view.type != I915_GTT_VIEW_NORMAL);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -1067,7 +1067,7 @@ static int igt_vma_remapped_gtt(void *arg)
u32 exp = y << 16 | x;
u32 val;
- if (*t == I915_GGTT_VIEW_ROTATED)
+ if (*t == I915_GTT_VIEW_ROTATED)
src_idx = rotated_index(&view.rotated, 0, x, y);
else
src_idx = remapped_index(&view.remapped, 0, x, y);
@@ -1076,7 +1076,7 @@ static int igt_vma_remapped_gtt(void *arg)
val = ioread32(&map[offset / sizeof(*map)]);
if (val != exp) {
pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n",
- *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped",
+ *t == I915_GTT_VIEW_ROTATED ? "Rotated" : "Remapped",
exp, val);
i915_vma_unpin_iomap(vma);
err = -EINVAL;
@@ -1103,5 +1103,5 @@ int i915_vma_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_vma_remapped_gtt),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
index c849533ca83e..3f5750cc2673 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
ret = dcss_submodules_init(dcss);
if (ret) {
+ of_node_put(dcss->of_port);
dev_err(dev, "submodules initialization failed\n");
goto clks_err;
}
@@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
dcss_clocks_disable(dcss);
}
+ of_node_put(dcss->of_port);
+
pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index f50814e9d549..b4f82ebca532 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -142,8 +142,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
drm_kms_helper_poll_init(drm);
- drm_bridge_connector_enable_hpd(kms->connector);
-
ret = drm_dev_register(drm, 0);
if (ret)
goto cleanup_crtc;
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
index 8989e215dfc9..011be7ff51e1 100644
--- a/drivers/gpu/drm/lima/lima_devfreq.c
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -111,6 +111,12 @@ int lima_devfreq_init(struct lima_device *ldev)
struct dev_pm_opp *opp;
unsigned long cur_freq;
int ret;
+ const char *regulator_names[] = { "mali", NULL };
+ const char *clk_names[] = { "core", NULL };
+ struct dev_pm_opp_config config = {
+ .regulator_names = regulator_names,
+ .clk_names = clk_names,
+ };
if (!device_property_present(dev, "operating-points-v2"))
/* Optional, continue without devfreq */
@@ -118,11 +124,7 @@ int lima_devfreq_init(struct lima_device *ldev)
spin_lock_init(&ldevfreq->lock);
- ret = devm_pm_opp_set_clkname(dev, "core");
- if (ret)
- return ret;
-
- ret = devm_pm_opp_set_regulators(dev, (const char *[]){ "mali" }, 1);
+ ret = devm_pm_opp_set_config(dev, &config);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV)
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index ef386d7b9450..fcf0d493782c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -115,8 +115,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
for_each_endpoint_of_node(dev->of_node, ep) {
/* If the endpoint node exists, consider it enabled */
remote = of_graph_get_remote_port(ep);
- if (remote)
+ if (remote) {
+ of_node_put(remote);
+ of_node_put(ep);
return true;
+ }
}
return false;
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 259f3e6bec90..bb7e109534de 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -469,17 +469,17 @@ void meson_viu_init(struct meson_drm *priv)
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
- VIU_OSD_BLEND_REORDER(1, 0) |
- VIU_OSD_BLEND_REORDER(2, 0) |
- VIU_OSD_BLEND_REORDER(3, 0) |
- VIU_OSD_BLEND_DIN_EN(1) |
- VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
- VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
- VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
- VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
- VIU_OSD_BLEND_HOLD_LINES(4),
- priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+ u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
+ (u32)VIU_OSD_BLEND_REORDER(1, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(2, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(3, 0) |
+ (u32)VIU_OSD_BLEND_DIN_EN(1) |
+ (u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
+ (u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
+ (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
+ (u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
+ (u32)VIU_OSD_BLEND_HOLD_LINES(4);
+ writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index f6ab3b5586ce..dd732215d55b 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -22,14 +22,6 @@
#define DP_DEFAULT_P0_OFFSET 0x1000
#define DP_DEFAULT_P0_SIZE 0x0400
-static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda-1p2", 21800, 4 }, /* 1.2 V */
- {"vdda-0p9", 36000, 32 }, /* 0.9 V */
- },
-};
-
static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
{
struct resource *res;
@@ -283,12 +275,6 @@ static int dp_parser_parse(struct dp_parser *parser)
if (rc)
return rc;
- /* Map the corresponding regulator information according to
- * version. Currently, since we only have one supported platform,
- * mapping the regulator directly.
- */
- parser->regulator_cfg = &sdm845_dp_reg_cfg;
-
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 9abddc6d50c0..866c1a82bf1a 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -91,8 +91,6 @@ struct dp_pinctrl {
struct pinctrl_state *state_suspend;
};
-#define DP_DEV_REGULATOR_MAX 4
-
/* Regulators for DP devices */
struct dp_reg_entry {
char name[32];
@@ -100,11 +98,6 @@ struct dp_reg_entry {
int disable_load;
};
-struct dp_regulator_cfg {
- int num;
- struct dp_reg_entry regs[DP_DEV_REGULATOR_MAX];
-};
-
struct dss_module_power {
unsigned int num_clk;
struct clk_bulk_data *clocks;
@@ -125,7 +118,6 @@ struct dp_parser {
struct dp_pinctrl pinctrl;
struct dp_io io;
struct dp_display_data disp_data;
- const struct dp_regulator_cfg *regulator_cfg;
u32 max_dp_lanes;
struct drm_bridge *next_bridge;
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index b415b35c2b8c..c0aaabb03389 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -20,82 +20,10 @@ struct dp_power_private {
struct clk *link_clk_src;
struct clk *pixel_provider;
struct clk *link_provider;
- struct regulator_bulk_data supplies[DP_DEV_REGULATOR_MAX];
struct dp_power dp_power;
};
-static void dp_power_regulator_disable(struct dp_power_private *power)
-{
- struct regulator_bulk_data *s = power->supplies;
- const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
- int num = power->parser->regulator_cfg->num;
- int i;
-
- DBG("");
- for (i = num - 1; i >= 0; i--)
- if (regs[i].disable_load >= 0)
- regulator_set_load(s[i].consumer,
- regs[i].disable_load);
-
- regulator_bulk_disable(num, s);
-}
-
-static int dp_power_regulator_enable(struct dp_power_private *power)
-{
- struct regulator_bulk_data *s = power->supplies;
- const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
- int num = power->parser->regulator_cfg->num;
- int ret, i;
-
- DBG("");
- for (i = 0; i < num; i++) {
- if (regs[i].enable_load >= 0) {
- ret = regulator_set_load(s[i].consumer,
- regs[i].enable_load);
- if (ret < 0) {
- pr_err("regulator %d set op mode failed, %d\n",
- i, ret);
- goto fail;
- }
- }
- }
-
- ret = regulator_bulk_enable(num, s);
- if (ret < 0) {
- pr_err("regulator enable failed, %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
- return ret;
-}
-
-static int dp_power_regulator_init(struct dp_power_private *power)
-{
- struct regulator_bulk_data *s = power->supplies;
- const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
- struct platform_device *pdev = power->pdev;
- int num = power->parser->regulator_cfg->num;
- int i, ret;
-
- for (i = 0; i < num; i++)
- s[i].supply = regs[i].name;
-
- ret = devm_regulator_bulk_get(&pdev->dev, num, s);
- if (ret < 0) {
- pr_err("%s: failed to init regulator, ret=%d\n",
- __func__, ret);
- return ret;
- }
-
- return 0;
-}
-
static int dp_power_clk_init(struct dp_power_private *power)
{
int rc = 0;
@@ -246,21 +174,10 @@ int dp_power_client_init(struct dp_power *dp_power)
pm_runtime_enable(&power->pdev->dev);
- rc = dp_power_regulator_init(power);
- if (rc) {
- DRM_ERROR("failed to init regulators %d\n", rc);
- goto error;
- }
-
rc = dp_power_clk_init(power);
- if (rc) {
+ if (rc)
DRM_ERROR("failed to init clocks %d\n", rc);
- goto error;
- }
- return 0;
-error:
- pm_runtime_disable(&power->pdev->dev);
return rc;
}
@@ -291,22 +208,15 @@ int dp_power_init(struct dp_power *dp_power, bool flip)
power = container_of(dp_power, struct dp_power_private, dp_power);
pm_runtime_get_sync(&power->pdev->dev);
- rc = dp_power_regulator_enable(power);
- if (rc) {
- DRM_ERROR("failed to enable regulators, %d\n", rc);
- goto exit;
- }
rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
if (rc) {
DRM_ERROR("failed to enable DP core clocks, %d\n", rc);
- goto err_clk;
+ goto exit;
}
return 0;
-err_clk:
- dp_power_regulator_disable(power);
exit:
pm_runtime_put_sync(&power->pdev->dev);
return rc;
@@ -319,7 +229,6 @@ int dp_power_deinit(struct dp_power *dp_power)
power = container_of(dp_power, struct dp_power_private, dp_power);
dp_power_clk_enable(dp_power, DP_CORE_PM, false);
- dp_power_regulator_disable(power);
pm_runtime_put_sync(&power->pdev->dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 6e39d959b9f0..0317055e3253 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -221,7 +221,7 @@ void msm_gem_shrinker_init(struct drm_device *dev)
priv->shrinker.count_objects = msm_gem_shrinker_count;
priv->shrinker.scan_objects = msm_gem_shrinker_scan;
priv->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&priv->shrinker));
+ WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 7ba66ad68a8a..16356611b5b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -680,7 +680,11 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
goto out_free_dma;
for (i = 0; i < npages; i += max) {
- args.end = start + (max << PAGE_SHIFT);
+ if (args.start + (max << PAGE_SHIFT) > end)
+ args.end = end;
+ else
+ args.end = args.start + (max << PAGE_SHIFT);
+
ret = migrate_vma_setup(&args);
if (ret)
goto out_free_pfns;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 568182e68dd7..d8cf71fb0512 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2605,6 +2605,27 @@ nv172_chipset = {
};
static const struct nvkm_device_chip
+nv173_chipset = {
+ .name = "GA103",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv174_chipset = {
.name = "GA104",
.bar = { 0x00000001, tu102_bar_new },
@@ -3067,6 +3088,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
+ case 0x173: device->chip = &nv173_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 4dfcc0758e06..060f4f98bc04 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -738,7 +738,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
desc->delay.hpd_reliable = reliable_ms;
of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
- desc->delay.hpd_reliable = absent_ms;
+ desc->delay.hpd_absent = absent_ms;
/* Power the panel on so we can read the EDID */
ret = pm_runtime_get_sync(dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 194af7f607a6..5110cd9b2425 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -101,8 +101,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return 0;
}
- ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names,
- pfdev->comp->num_supplies);
+ ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index c58075bc096e..2fa5afe21288 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -433,8 +433,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
- list_add_tail(&bo->base.madv_list,
- &pfdev->shrinker_list);
+ list_move_tail(&bo->base.madv_list,
+ &pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
}
@@ -626,24 +626,29 @@ static int panfrost_remove(struct platform_device *pdev)
return 0;
}
-static const char * const default_supplies[] = { "mali" };
+/*
+ * The OPP core wants the supply names to be NULL terminated, but we need the
+ * correct num_supplies value for regulator core. Hence, we NULL terminate here
+ * and then initialize num_supplies with ARRAY_SIZE - 1.
+ */
+static const char * const default_supplies[] = { "mali", NULL };
static const struct panfrost_compatible default_data = {
- .num_supplies = ARRAY_SIZE(default_supplies),
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.num_pm_domains = 1, /* optional */
.pm_domain_names = NULL,
};
static const struct panfrost_compatible amlogic_data = {
- .num_supplies = ARRAY_SIZE(default_supplies),
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
-static const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
+static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
static const struct panfrost_compatible mediatek_mt8183_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
+ .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
.supply_names = mediatek_mt8183_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 77e7cb6d1ae3..bf0170782f25 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -103,7 +103,7 @@ void panfrost_gem_shrinker_init(struct drm_device *dev)
pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
pfdev->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&pfdev->shrinker));
+ WARN_ON(register_shrinker(&pfdev->shrinker, "drm-panfrost"));
}
/**
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 963d8e1997d5..e246d914e7f6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -528,7 +528,7 @@ err_map:
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_bo:
- drm_gem_object_put(&bo->base.base);
+ panfrost_gem_mapping_put(bomapping);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a215ff1363cd..a556b6be1137 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1604,6 +1604,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
if (r) {
/* delay GPU reset to resume */
radeon_fence_driver_force_completion(rdev, i);
+ } else {
+ /* finish executing delayed work */
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index e765abcb3b01..04c693ca419a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1899,7 +1899,7 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
* to false since we want to wait for vbl to avoid flicker.
*/
if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
- jiffies > rdev->pm.dynpm_action_timeout) {
+ time_after(jiffies, rdev->pm.dynpm_action_timeout)) {
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 49831c738d4d..813f9f8c8698 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -23,6 +23,14 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_detach_device(...) ({ })
+#define arm_iommu_release_mapping(...) ({ })
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
@@ -49,6 +57,15 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
if (!private->domain)
return 0;
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ if (mapping) {
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
+ }
+
ret = iommu_attach_device(private->domain, dev);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to attach iommu device\n");
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 79e8e2017c68..c95221fff474 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -352,7 +352,7 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
/* Set precharge period in number of ticks from the internal clock */
precharge = (SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep1) |
- SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep2));
+ SSD130X_SET_PRECHARGE_PERIOD2_SET(ssd130x->prechargep2));
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_PRECHARGE_PERIOD, precharge);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 61a034a01764..cb82622877d2 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1176,12 +1176,12 @@ static int hdmi_audio_hw_params(struct device *dev,
DRM_DEBUG_DRIVER("\n");
if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
- daifmt->frame_clk_inv || daifmt->bit_clk_master ||
- daifmt->frame_clk_master) {
+ daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
+ daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index b4dfa166eccd..34234a144e87 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -531,7 +531,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
- unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
u32 basic_ctl = 0;
size_t bytes;
@@ -555,7 +555,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* (4 bytes). Its minimal size is therefore 10 bytes
*/
#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ hsa = max(HSA_PACKET_OVERHEAD,
(mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
/*
@@ -564,7 +564,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* therefore 6 bytes
*/
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ hbp = max(HBP_PACKET_OVERHEAD,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
@@ -574,7 +574,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* 16 bytes
*/
#define HFP_PACKET_OVERHEAD 16
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ hfp = max(HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
/*
@@ -583,7 +583,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* bytes). Its minimal size is therefore 10 bytes.
*/
#define HBLK_PACKET_OVERHEAD 10
- hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ hblk = max(HBLK_PACKET_OVERHEAD,
(mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
HBLK_PACKET_OVERHEAD);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f066e8124c50..7c8e8be774f1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -911,7 +911,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* We might need to add a TTM.
*/
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 1bba0a0ed3f9..21b61631f73a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -722,7 +722,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
mm_shrinker.count_objects = ttm_pool_shrinker_count;
mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
mm_shrinker.seeks = 1;
- return register_shrinker(&mm_shrinker);
+ return register_shrinker(&mm_shrinker, "drm-ttm_pool");
}
/**
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b2d0a3c89fbb..64f9feabf43e 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -2345,6 +2345,7 @@ out:
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
.name = "vc4-hdmi-cpu-dai-component",
+ .legacy_dai_naming = 1,
};
static int vc4_hdmi_audio_cpu_dai_probe(struct snd_soc_dai *dai)
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 300abb4d1dfe..d4950688b3f1 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -1,27 +1,12 @@
// SPDX-License-Identifier: GPL-2.0+
-#include <drm/drm_rect.h>
+#include <linux/kernel.h>
#include <linux/minmax.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_fixed.h>
#include "vkms_formats.h"
-/* The following macros help doing fixed point arithmetic. */
-/*
- * With Fixed-Point scale 15 we have 17 and 15 bits of integer and fractional
- * parts respectively.
- * | 0000 0000 0000 0000 0.000 0000 0000 0000 |
- * 31 0
- */
-#define SHIFT 15
-
-#define INT_TO_FIXED(a) ((a) << SHIFT)
-#define FIXED_MUL(a, b) ((s32)(((s64)(a) * (b)) >> SHIFT))
-#define FIXED_DIV(a, b) ((s32)(((s64)(a) << SHIFT) / (b)))
-/* This macro converts a fixed point number to int, and round half up it */
-#define FIXED_TO_INT_ROUND(a) (((a) + (1 << (SHIFT - 1))) >> SHIFT)
-#define INT_TO_FIXED_DIV(a, b) (FIXED_DIV(INT_TO_FIXED(a), INT_TO_FIXED(b)))
-#define INT_TO_FIXED_DIV(a, b) (FIXED_DIV(INT_TO_FIXED(a), INT_TO_FIXED(b)))
-
static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y)
{
return frame_info->offset + (y * frame_info->pitch)
@@ -137,19 +122,19 @@ static void RGB565_to_argb_u16(struct line_buffer *stage_buffer,
int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
stage_buffer->n_pixels);
- s32 fp_rb_ratio = INT_TO_FIXED_DIV(65535, 31);
- s32 fp_g_ratio = INT_TO_FIXED_DIV(65535, 63);
+ s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
for (size_t x = 0; x < x_limit; x++, src_pixels++) {
u16 rgb_565 = le16_to_cpu(*src_pixels);
- s32 fp_r = INT_TO_FIXED((rgb_565 >> 11) & 0x1f);
- s32 fp_g = INT_TO_FIXED((rgb_565 >> 5) & 0x3f);
- s32 fp_b = INT_TO_FIXED(rgb_565 & 0x1f);
+ s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
+ s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
+ s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
out_pixels[x].a = (u16)0xffff;
- out_pixels[x].r = FIXED_TO_INT_ROUND(FIXED_MUL(fp_r, fp_rb_ratio));
- out_pixels[x].g = FIXED_TO_INT_ROUND(FIXED_MUL(fp_g, fp_g_ratio));
- out_pixels[x].b = FIXED_TO_INT_ROUND(FIXED_MUL(fp_b, fp_rb_ratio));
+ out_pixels[x].r = drm_fixp2int(drm_fixp_mul(fp_r, fp_rb_ratio));
+ out_pixels[x].g = drm_fixp2int(drm_fixp_mul(fp_g, fp_g_ratio));
+ out_pixels[x].b = drm_fixp2int(drm_fixp_mul(fp_b, fp_rb_ratio));
}
}
@@ -248,17 +233,17 @@ static void argb_u16_to_RGB565(struct vkms_frame_info *frame_info,
int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
src_buffer->n_pixels);
- s32 fp_rb_ratio = INT_TO_FIXED_DIV(65535, 31);
- s32 fp_g_ratio = INT_TO_FIXED_DIV(65535, 63);
+ s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
for (size_t x = 0; x < x_limit; x++, dst_pixels++) {
- s32 fp_r = INT_TO_FIXED(in_pixels[x].r);
- s32 fp_g = INT_TO_FIXED(in_pixels[x].g);
- s32 fp_b = INT_TO_FIXED(in_pixels[x].b);
+ s64 fp_r = drm_int2fixp(in_pixels[x].r);
+ s64 fp_g = drm_int2fixp(in_pixels[x].g);
+ s64 fp_b = drm_int2fixp(in_pixels[x].b);
- u16 r = FIXED_TO_INT_ROUND(FIXED_DIV(fp_r, fp_rb_ratio));
- u16 g = FIXED_TO_INT_ROUND(FIXED_DIV(fp_g, fp_g_ratio));
- u16 b = FIXED_TO_INT_ROUND(FIXED_DIV(fp_b, fp_rb_ratio));
+ u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio));
+ u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
+ u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
*dst_pixels = cpu_to_le16(r << 11 | g << 5 | b);
}
diff --git a/drivers/hid/.kunitconfig b/drivers/hid/.kunitconfig
new file mode 100644
index 000000000000..04daeff5c970
--- /dev/null
+++ b/drivers/hid/.kunitconfig
@@ -0,0 +1,5 @@
+CONFIG_KUNIT=y
+CONFIG_USB=y
+CONFIG_USB_HID=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_KUNIT_TEST=y
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 70da5931082f..6ce92830b5d1 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1306,6 +1306,22 @@ config HID_MCP2221
To compile this driver as a module, choose M here: the module
will be called hid-mcp2221.ko.
+config HID_KUNIT_TEST
+ bool "KUnit tests for HID" if !KUNIT_ALL_TESTS
+ depends on KUNIT=y
+ depends on HID_UCLOGIC
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for HID. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on HID and associated drivers.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
+
endmenu
endif # HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index cac2cbe26d11..b0bef8098139 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -144,6 +144,9 @@ obj-$(CONFIG_HID_WIIMOTE) += hid-wiimote.o
obj-$(CONFIG_HID_SENSOR_HUB) += hid-sensor-hub.o
obj-$(CONFIG_HID_SENSOR_CUSTOM_SENSOR) += hid-sensor-custom.o
+obj-$(CONFIG_HID_KUNIT_TEST) += hid-uclogic-rdesc.o \
+ hid-uclogic-rdesc-test.o
+
obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
obj-$(CONFIG_USB_KBD) += usbhid/
diff --git a/drivers/hid/amd-sfh-hid/Makefile b/drivers/hid/amd-sfh-hid/Makefile
index 35e704da5612..0222170ab7ad 100644
--- a/drivers/hid/amd-sfh-hid/Makefile
+++ b/drivers/hid/amd-sfh-hid/Makefile
@@ -9,5 +9,8 @@ amd_sfh-objs := amd_sfh_hid.o
amd_sfh-objs += amd_sfh_client.o
amd_sfh-objs += amd_sfh_pcie.o
amd_sfh-objs += hid_descriptor/amd_sfh_hid_desc.o
+amd_sfh-objs += sfh1_1/amd_sfh_init.o
+amd_sfh-objs += sfh1_1/amd_sfh_interface.o
+amd_sfh-objs += sfh1_1/amd_sfh_desc.o
ccflags-y += -I $(srctree)/$(src)/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 0f770a2b47ff..8275bba63611 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -18,18 +18,6 @@
#include "amd_sfh_pcie.h"
#include "amd_sfh_hid.h"
-
-struct request_list {
- struct hid_device *hid;
- struct list_head list;
- u8 report_id;
- u8 sensor_idx;
- u8 report_type;
- u8 current_index;
-};
-
-static struct request_list req_list;
-
void amd_sfh_set_report(struct hid_device *hid, int report_id,
int report_type)
{
@@ -50,6 +38,7 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
{
struct amdtp_hid_data *hid_data = hid->driver_data;
struct amdtp_cl_data *cli_data = hid_data->cli_data;
+ struct request_list *req_list = &cli_data->req_list;
int i;
for (i = 0; i < cli_data->num_hid_devices; i++) {
@@ -66,7 +55,7 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
new->report_id = report_id;
cli_data->report_id[i] = report_id;
cli_data->request_done[i] = false;
- list_add(&new->list, &req_list.list);
+ list_add(&new->list, &req_list->list);
break;
}
}
@@ -74,16 +63,19 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
return 0;
}
-static void amd_sfh_work(struct work_struct *work)
+void amd_sfh_work(struct work_struct *work)
{
struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work.work);
+ struct request_list *req_list = &cli_data->req_list;
struct amd_input_data *in_data = cli_data->in_data;
struct request_list *req_node;
u8 current_index, sensor_index;
+ struct amd_mp2_ops *mp2_ops;
+ struct amd_mp2_dev *mp2;
u8 report_id, node_type;
u8 report_size = 0;
- req_node = list_last_entry(&req_list.list, struct request_list, list);
+ req_node = list_last_entry(&req_list->list, struct request_list, list);
list_del(&req_node->list);
current_index = req_node->current_index;
sensor_index = req_node->sensor_idx;
@@ -91,9 +83,11 @@ static void amd_sfh_work(struct work_struct *work)
node_type = req_node->report_type;
kfree(req_node);
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ mp2_ops = mp2->mp2_ops;
if (node_type == HID_FEATURE_REPORT) {
- report_size = get_feature_report(sensor_index, report_id,
- cli_data->feature_report[current_index]);
+ report_size = mp2_ops->get_feat_rep(sensor_index, report_id,
+ cli_data->feature_report[current_index]);
if (report_size)
hid_input_report(cli_data->hid_sensor_hubs[current_index],
cli_data->report_type[current_index],
@@ -102,7 +96,7 @@ static void amd_sfh_work(struct work_struct *work)
pr_err("AMDSFH: Invalid report size\n");
} else if (node_type == HID_INPUT_REPORT) {
- report_size = get_input_report(current_index, sensor_index, report_id, in_data);
+ report_size = mp2_ops->get_in_rep(current_index, sensor_index, report_id, in_data);
if (report_size)
hid_input_report(cli_data->hid_sensor_hubs[current_index],
cli_data->report_type[current_index],
@@ -115,17 +109,19 @@ static void amd_sfh_work(struct work_struct *work)
amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]);
}
-static void amd_sfh_work_buffer(struct work_struct *work)
+void amd_sfh_work_buffer(struct work_struct *work)
{
struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work_buffer.work);
struct amd_input_data *in_data = cli_data->in_data;
+ struct amd_mp2_dev *mp2;
u8 report_size;
int i;
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->sensor_sts[i] == SENSOR_ENABLED) {
- report_size = get_input_report
- (i, cli_data->sensor_idx[i], cli_data->report_id[i], in_data);
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i],
+ cli_data->report_id[i], in_data);
hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
in_data->input_report[i], report_size, 0);
}
@@ -133,7 +129,7 @@ static void amd_sfh_work_buffer(struct work_struct *work)
schedule_delayed_work(&cli_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
}
-u32 amd_sfh_wait_for_response(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
+static u32 amd_sfh_wait_for_response(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
{
if (mp2->mp2_ops->response)
sensor_sts = mp2->mp2_ops->response(mp2, sid, sensor_sts);
@@ -141,7 +137,7 @@ u32 amd_sfh_wait_for_response(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
return sensor_sts;
}
-const char *get_sensor_name(int idx)
+static const char *get_sensor_name(int idx)
{
switch (idx) {
case accel_idx:
@@ -159,24 +155,82 @@ const char *get_sensor_name(int idx)
}
}
+static void amd_sfh_resume(struct amd_mp2_dev *mp2)
+{
+ struct amdtp_cl_data *cl_data = mp2->cl_data;
+ struct amd_mp2_sensor_info info;
+ int i, status;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
+ info.period = AMD_SFH_IDLE_LOOP;
+ info.sensor_idx = cl_data->sensor_idx[i];
+ info.dma_address = cl_data->sensor_dma_addr[i];
+ mp2->mp2_ops->start(mp2, info);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], SENSOR_ENABLED);
+ if (status == SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ dev_dbg(&mp2->pdev->dev, "resume sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ }
+ }
+
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ amd_sfh_clear_intr(mp2);
+}
+
+static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
+{
+ struct amdtp_cl_data *cl_data = mp2->cl_data;
+ int i, status;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] != HPD_IDX &&
+ cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ mp2->mp2_ops->stop(mp2, cl_data->sensor_idx[i]);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], SENSOR_DISABLED);
+ if (status != SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(&mp2->pdev->dev, "suspend sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ }
+ }
+
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amd_sfh_clear_intr(mp2);
+}
+
int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
{
struct amd_input_data *in_data = &privdata->in_data;
struct amdtp_cl_data *cl_data = privdata->cl_data;
+ struct amd_mp2_ops *mp2_ops = privdata->mp2_ops;
struct amd_mp2_sensor_info info;
+ struct request_list *req_list;
struct device *dev;
u32 feature_report_size;
u32 input_report_size;
int rc, i, status;
u8 cl_idx;
+ req_list = &cl_data->req_list;
dev = &privdata->pdev->dev;
+ amd_sfh_set_desc_ops(mp2_ops);
+
+ mp2_ops->suspend = amd_sfh_suspend;
+ mp2_ops->resume = amd_sfh_resume;
cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
+ if (cl_data->num_hid_devices == 0)
+ return -ENODEV;
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
- INIT_LIST_HEAD(&req_list.list);
+ INIT_LIST_HEAD(&req_list->list);
cl_data->in_data = in_data;
for (i = 0; i < cl_data->num_hid_devices; i++) {
@@ -187,17 +241,17 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->sensor_requested_cnt[i] = 0;
cl_data->cur_hid_dev = i;
cl_idx = cl_data->sensor_idx[i];
- cl_data->report_descr_sz[i] = get_descr_sz(cl_idx, descr_size);
+ cl_data->report_descr_sz[i] = mp2_ops->get_desc_sz(cl_idx, descr_size);
if (!cl_data->report_descr_sz[i]) {
rc = -EINVAL;
goto cleanup;
}
- feature_report_size = get_descr_sz(cl_idx, feature_size);
+ feature_report_size = mp2_ops->get_desc_sz(cl_idx, feature_size);
if (!feature_report_size) {
rc = -EINVAL;
goto cleanup;
}
- input_report_size = get_descr_sz(cl_idx, input_size);
+ input_report_size = mp2_ops->get_desc_sz(cl_idx, input_size);
if (!input_report_size) {
rc = -EINVAL;
goto cleanup;
@@ -222,17 +276,17 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
rc = -ENOMEM;
goto cleanup;
}
- rc = get_report_descriptor(cl_idx, cl_data->report_descr[i]);
+ rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
if (rc)
return rc;
- privdata->mp2_ops->start(privdata, info);
+ mp2_ops->start(privdata, info);
status = amd_sfh_wait_for_response
(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
if (status == SENSOR_ENABLED) {
cl_data->sensor_sts[i] = SENSOR_ENABLED;
rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
if (rc) {
- privdata->mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
+ mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
status = amd_sfh_wait_for_response
(privdata, cl_data->sensor_idx[i], SENSOR_DISABLED);
if (status != SENSOR_ENABLED)
@@ -248,8 +302,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
cl_data->sensor_sts[i]);
}
- if (privdata->mp2_ops->discovery_status &&
- privdata->mp2_ops->discovery_status(privdata) == 0) {
+ if (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0) {
amd_sfh_hid_client_deinit(privdata);
for (i = 0; i < cl_data->num_hid_devices; i++) {
devm_kfree(dev, cl_data->feature_report[i]);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
new file mode 100644
index 000000000000..2643bb14fee2
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMD MP2 common macros and structures
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+#ifndef AMD_SFH_COMMON_H
+#define AMD_SFH_COMMON_H
+
+#include <linux/pci.h>
+#include "amd_sfh_hid.h"
+
+#define PCI_DEVICE_ID_AMD_MP2 0x15E4
+#define PCI_DEVICE_ID_AMD_MP2_1_1 0x164A
+
+#define AMD_C2P_MSG(regno) (0x10500 + ((regno) * 4))
+#define AMD_P2C_MSG(regno) (0x10680 + ((regno) * 4))
+
+#define SENSOR_ENABLED 4
+#define SENSOR_DISABLED 5
+
+#define AMD_SFH_IDLE_LOOP 200
+
+enum cmd_id {
+ NO_OP,
+ ENABLE_SENSOR,
+ DISABLE_SENSOR,
+ STOP_ALL_SENSORS = 8,
+};
+
+struct amd_mp2_sensor_info {
+ u8 sensor_idx;
+ u32 period;
+ dma_addr_t dma_address;
+};
+
+struct amd_mp2_dev {
+ struct pci_dev *pdev;
+ struct amdtp_cl_data *cl_data;
+ void __iomem *mmio;
+ void __iomem *vsbase;
+ const struct amd_sfh1_1_ops *sfh1_1_ops;
+ struct amd_mp2_ops *mp2_ops;
+ struct amd_input_data in_data;
+ /* mp2 active control status */
+ u32 mp2_acs;
+};
+
+struct amd_mp2_ops {
+ void (*start)(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
+ void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
+ void (*stop_all)(struct amd_mp2_dev *privdata);
+ int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
+ void (*clear_intr)(struct amd_mp2_dev *privdata);
+ int (*init_intr)(struct amd_mp2_dev *privdata);
+ int (*discovery_status)(struct amd_mp2_dev *privdata);
+ void (*suspend)(struct amd_mp2_dev *mp2);
+ void (*resume)(struct amd_mp2_dev *mp2);
+ void (*remove)(void *privdata);
+ int (*get_rep_desc)(int sensor_idx, u8 rep_desc[]);
+ u32 (*get_desc_sz)(int sensor_idx, int descriptor_name);
+ u8 (*get_feat_rep)(int sensor_idx, int report_id, u8 *feature_report);
+ u8 (*get_in_rep)(u8 current_index, int sensor_idx, int report_id,
+ struct amd_input_data *in_data);
+};
+
+void amd_sfh_work(struct work_struct *work);
+void amd_sfh_work_buffer(struct work_struct *work);
+void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata);
+int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata);
+void amd_sfh_clear_intr(struct amd_mp2_dev *privdata);
+int amd_sfh_irq_init(struct amd_mp2_dev *privdata);
+#endif
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
index 1089134030b0..1b18291fc5af 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
@@ -101,11 +101,15 @@ static int amdtp_wait_for_response(struct hid_device *hid)
void amdtp_hid_wakeup(struct hid_device *hid)
{
- struct amdtp_hid_data *hid_data = hid->driver_data;
- struct amdtp_cl_data *cli_data = hid_data->cli_data;
+ struct amdtp_hid_data *hid_data;
+ struct amdtp_cl_data *cli_data;
- cli_data->request_done[cli_data->cur_hid_dev] = true;
- wake_up_interruptible(&hid_data->hid_wait);
+ if (hid) {
+ hid_data = hid->driver_data;
+ cli_data = hid_data->cli_data;
+ cli_data->request_done[cli_data->cur_hid_dev] = true;
+ wake_up_interruptible(&hid_data->hid_wait);
+ }
}
static struct hid_ll_driver amdtp_hid_ll_driver = {
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index ad264db63180..3754fb423e3a 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -15,6 +15,15 @@
#define AMD_SFH_HID_VENDOR 0x1022
#define AMD_SFH_HID_PRODUCT 0x0001
+struct request_list {
+ struct hid_device *hid;
+ struct list_head list;
+ u8 report_id;
+ u8 sensor_idx;
+ u8 report_type;
+ u8 current_index;
+};
+
struct amd_input_data {
u32 *sensor_virt_addr[MAX_HID_DEVICES];
u8 *input_report[MAX_HID_DEVICES];
@@ -43,6 +52,7 @@ struct amdtp_cl_data {
struct amd_input_data *in_data;
struct delayed_work work;
struct delayed_work work_buffer;
+ struct request_list req_list;
};
/**
@@ -69,6 +79,4 @@ void amdtp_hid_remove(struct amdtp_cl_data *cli_data);
int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type);
void amd_sfh_set_report(struct hid_device *hid, int report_id, int report_type);
void amdtp_hid_wakeup(struct hid_device *hid);
-u8 get_input_report(u8 current_index, int sensor_idx, int report_id,
- struct amd_input_data *in_data);
#endif
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index dadc491bbf6b..4b90c86ee5f8 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include "amd_sfh_pcie.h"
+#include "sfh1_1/amd_sfh_init.h"
#define DRIVER_NAME "pcie_mp2_amd"
#define DRIVER_DESC "AMD(R) PCIe MP2 Communication Driver"
@@ -92,7 +93,7 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
-static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
+void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
{
if (readl(privdata->mmio + AMD_P2C_MSG(4))) {
writel(0, privdata->mmio + AMD_P2C_MSG(4));
@@ -100,7 +101,7 @@ static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
}
}
-static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata)
+void amd_sfh_clear_intr(struct amd_mp2_dev *privdata)
{
if (privdata->mp2_ops->clear_intr)
privdata->mp2_ops->clear_intr(privdata);
@@ -113,7 +114,7 @@ static irqreturn_t amd_sfh_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
+int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
{
int rc;
@@ -136,7 +137,7 @@ static int amd_sfh_dis_sts_v2(struct amd_mp2_dev *privdata)
SENSOR_DISCOVERY_STATUS_MASK) >> SENSOR_DISCOVERY_STATUS_SHIFT;
}
-void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
+static void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
union sfh_cmd_base cmd_base;
@@ -157,7 +158,7 @@ void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info i
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
-void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
+static void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
{
union sfh_cmd_base cmd_base;
@@ -171,7 +172,7 @@ void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
-void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
+static void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
{
union sfh_cmd_base cmd_base;
@@ -244,7 +245,7 @@ static void amd_mp2_pci_remove(void *privdata)
amd_sfh_clear_intr(mp2);
}
-static const struct amd_mp2_ops amd_sfh_ops_v2 = {
+static struct amd_mp2_ops amd_sfh_ops_v2 = {
.start = amd_start_sensor_v2,
.stop = amd_stop_sensor_v2,
.stop_all = amd_stop_all_sensor_v2,
@@ -252,12 +253,14 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
.clear_intr = amd_sfh_clear_intr_v2,
.init_intr = amd_sfh_irq_init_v2,
.discovery_status = amd_sfh_dis_sts_v2,
+ .remove = amd_mp2_pci_remove,
};
-static const struct amd_mp2_ops amd_sfh_ops = {
+static struct amd_mp2_ops amd_sfh_ops = {
.start = amd_start_sensor,
.stop = amd_stop_sensor,
.stop_all = amd_stop_all_sensors,
+ .remove = amd_mp2_pci_remove,
};
static void mp2_select_ops(struct amd_mp2_dev *privdata)
@@ -277,7 +280,7 @@ static void mp2_select_ops(struct amd_mp2_dev *privdata)
}
}
-static int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
+int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
{
if (privdata->mp2_ops->init_intr)
return privdata->mp2_ops->init_intr(privdata);
@@ -316,6 +319,14 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (!privdata->cl_data)
return -ENOMEM;
+ privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data;
+ if (privdata->sfh1_1_ops) {
+ rc = privdata->sfh1_1_ops->init(privdata);
+ if (rc)
+ return rc;
+ goto init_done;
+ }
+
mp2_select_ops(privdata);
rc = amd_sfh_irq_init(privdata);
@@ -327,40 +338,22 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
rc = amd_sfh_hid_client_init(privdata);
if (rc) {
amd_sfh_clear_intr(privdata);
- dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
+ if (rc != -EOPNOTSUPP)
+ dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
return rc;
}
+init_done:
amd_sfh_clear_intr(privdata);
- return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
+ return devm_add_action_or_reset(&pdev->dev, privdata->mp2_ops->remove, privdata);
}
static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
{
struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
- struct amdtp_cl_data *cl_data = mp2->cl_data;
- struct amd_mp2_sensor_info info;
- int i, status;
-
- for (i = 0; i < cl_data->num_hid_devices; i++) {
- if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
- info.period = AMD_SFH_IDLE_LOOP;
- info.sensor_idx = cl_data->sensor_idx[i];
- info.dma_address = cl_data->sensor_dma_addr[i];
- mp2->mp2_ops->start(mp2, info);
- status = amd_sfh_wait_for_response
- (mp2, cl_data->sensor_idx[i], SENSOR_ENABLED);
- if (status == SENSOR_ENABLED)
- cl_data->sensor_sts[i] = SENSOR_ENABLED;
- dev_dbg(dev, "suspend sid 0x%x (%s) status 0x%x\n",
- cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
- cl_data->sensor_sts[i]);
- }
- }
- schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
- amd_sfh_clear_intr(mp2);
+ mp2->mp2_ops->resume(mp2);
return 0;
}
@@ -368,25 +361,8 @@ static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
static int __maybe_unused amd_mp2_pci_suspend(struct device *dev)
{
struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
- struct amdtp_cl_data *cl_data = mp2->cl_data;
- int i, status;
-
- for (i = 0; i < cl_data->num_hid_devices; i++) {
- if (cl_data->sensor_idx[i] != HPD_IDX &&
- cl_data->sensor_sts[i] == SENSOR_ENABLED) {
- mp2->mp2_ops->stop(mp2, cl_data->sensor_idx[i]);
- status = amd_sfh_wait_for_response
- (mp2, cl_data->sensor_idx[i], SENSOR_DISABLED);
- if (status != SENSOR_ENABLED)
- cl_data->sensor_sts[i] = SENSOR_DISABLED;
- dev_dbg(dev, "suspend sid 0x%x (%s) status 0x%x\n",
- cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
- cl_data->sensor_sts[i]);
- }
- }
- cancel_delayed_work_sync(&cl_data->work_buffer);
- amd_sfh_clear_intr(mp2);
+ mp2->mp2_ops->suspend(mp2);
return 0;
}
@@ -396,6 +372,8 @@ static SIMPLE_DEV_PM_OPS(amd_mp2_pm_ops, amd_mp2_pci_suspend,
static const struct pci_device_id amd_mp2_pci_tbl[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_MP2) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_MP2_1_1),
+ .driver_data = (kernel_ulong_t)&sfh1_1_ops },
{ }
};
MODULE_DEVICE_TABLE(pci, amd_mp2_pci_tbl);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 8c760526132a..dfb7cabd82ef 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -10,35 +10,20 @@
#ifndef PCIE_MP2_AMD_H
#define PCIE_MP2_AMD_H
-#include <linux/pci.h>
-#include "amd_sfh_hid.h"
-
-#define PCI_DEVICE_ID_AMD_MP2 0x15E4
-
-#define ENABLE_SENSOR 1
-#define DISABLE_SENSOR 2
-#define STOP_ALL_SENSORS 8
+#include "amd_sfh_common.h"
/* MP2 C2P Message Registers */
#define AMD_C2P_MSG0 0x10500
#define AMD_C2P_MSG1 0x10504
#define AMD_C2P_MSG2 0x10508
-#define AMD_C2P_MSG(regno) (0x10500 + ((regno) * 4))
-#define AMD_P2C_MSG(regno) (0x10680 + ((regno) * 4))
-
/* MP2 P2C Message Registers */
#define AMD_P2C_MSG3 0x1068C /* Supported Sensors info */
#define V2_STATUS 0x2
-#define SENSOR_ENABLED 4
-#define SENSOR_DISABLED 5
-
#define HPD_IDX 16
-#define AMD_SFH_IDLE_LOOP 200
-
#define SENSOR_DISCOVERY_STATUS_MASK GENMASK(5, 3)
#define SENSOR_DISCOVERY_STATUS_SHIFT 3
@@ -96,22 +81,6 @@ enum sensor_idx {
als_idx = 19
};
-struct amd_mp2_dev {
- struct pci_dev *pdev;
- struct amdtp_cl_data *cl_data;
- void __iomem *mmio;
- const struct amd_mp2_ops *mp2_ops;
- struct amd_input_data in_data;
- /* mp2 active control status */
- u32 mp2_acs;
-};
-
-struct amd_mp2_sensor_info {
- u8 sensor_idx;
- u32 period;
- dma_addr_t dma_address;
-};
-
enum mem_use_type {
USE_DRAM,
USE_C2P_REG,
@@ -129,24 +98,9 @@ struct hpd_status {
};
};
-void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
-void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx);
-void amd_stop_all_sensors(struct amd_mp2_dev *privdata);
int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id);
int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata);
int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata);
-u32 amd_sfh_wait_for_response(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
-void amd_mp2_suspend(struct amd_mp2_dev *mp2);
-void amd_mp2_resume(struct amd_mp2_dev *mp2);
-const char *get_sensor_name(int idx);
-
-struct amd_mp2_ops {
- void (*start)(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
- void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
- void (*stop_all)(struct amd_mp2_dev *privdata);
- int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
- void (*clear_intr)(struct amd_mp2_dev *privdata);
- int (*init_intr)(struct amd_mp2_dev *privdata);
- int (*discovery_status)(struct amd_mp2_dev *privdata);
-};
+void amd_sfh_set_desc_ops(struct amd_mp2_ops *mp2_ops);
+
#endif
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
index 76095bd53c65..f9a8c02d5a7b 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
@@ -29,7 +29,7 @@
#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
#define ILLUMINANCE_MASK GENMASK(14, 0)
-int get_report_descriptor(int sensor_idx, u8 *rep_desc)
+static int get_report_descriptor(int sensor_idx, u8 *rep_desc)
{
switch (sensor_idx) {
case accel_idx: /* accel */
@@ -63,7 +63,7 @@ int get_report_descriptor(int sensor_idx, u8 *rep_desc)
return 0;
}
-u32 get_descr_sz(int sensor_idx, int descriptor_name)
+static u32 get_descr_sz(int sensor_idx, int descriptor_name)
{
switch (sensor_idx) {
case accel_idx:
@@ -133,7 +133,7 @@ static void get_common_features(struct common_feature_property *common, int repo
common->report_interval = HID_DEFAULT_REPORT_INTERVAL;
}
-u8 get_feature_report(int sensor_idx, int report_id, u8 *feature_report)
+static u8 get_feature_report(int sensor_idx, int report_id, u8 *feature_report)
{
struct accel3_feature_report acc_feature;
struct gyro_feature_report gyro_feature;
@@ -200,7 +200,8 @@ static void get_common_inputs(struct common_input_property *common, int report_i
common->event_type = HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM;
}
-u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_input_data *in_data)
+static u8 get_input_report(u8 current_index, int sensor_idx, int report_id,
+ struct amd_input_data *in_data)
{
struct amd_mp2_dev *privdata = container_of(in_data, struct amd_mp2_dev, in_data);
u32 *sensor_virt_addr = in_data->sensor_virt_addr[current_index];
@@ -267,3 +268,11 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_
}
return report_size;
}
+
+void amd_sfh_set_desc_ops(struct amd_mp2_ops *mp2_ops)
+{
+ mp2_ops->get_rep_desc = get_report_descriptor;
+ mp2_ops->get_feat_rep = get_feature_report;
+ mp2_ops->get_in_rep = get_input_report;
+ mp2_ops->get_desc_sz = get_descr_sz;
+}
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
index 70b1b7abe2c6..ebd55675eb62 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
@@ -111,7 +111,4 @@ struct hpd_input_report {
u8 human_presence;
} __packed;
-int get_report_descriptor(int sensor_idx, u8 rep_desc[]);
-u32 get_descr_sz(int sensor_idx, int descriptor_name);
-u8 get_feature_report(int sensor_idx, int report_id, u8 *feature_report);
#endif
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
new file mode 100644
index 000000000000..0609fea581c9
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD MP2 1.1 descriptor interfaces
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include <linux/hid-sensor-ids.h>
+
+#include "amd_sfh_interface.h"
+#include "../hid_descriptor/amd_sfh_hid_desc.h"
+#include "../hid_descriptor/amd_sfh_hid_report_desc.h"
+
+#define SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x41
+#define SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x51
+#define HID_DEFAULT_REPORT_INTERVAL 0x50
+#define HID_DEFAULT_MIN_VALUE 0X7F
+#define HID_DEFAULT_MAX_VALUE 0x80
+#define HID_DEFAULT_SENSITIVITY 0x7F
+#define HID_USAGE_SENSOR_PROPERTY_CONNECTION_TYPE_PC_INTEGRATED_ENUM 0x01
+/* state enums */
+#define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
+#define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
+#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
+
+static int get_report_desc(int sensor_idx, u8 *rep_desc)
+{
+ switch (sensor_idx) {
+ case ACCEL_IDX: /* accelerometer */
+ memset(rep_desc, 0, sizeof(accel3_report_descriptor));
+ memcpy(rep_desc, accel3_report_descriptor,
+ sizeof(accel3_report_descriptor));
+ break;
+ case GYRO_IDX: /* gyroscope */
+ memset(rep_desc, 0, sizeof(gyro3_report_descriptor));
+ memcpy(rep_desc, gyro3_report_descriptor,
+ sizeof(gyro3_report_descriptor));
+ break;
+ case MAG_IDX: /* magnetometer */
+ memset(rep_desc, 0, sizeof(comp3_report_descriptor));
+ memcpy(rep_desc, comp3_report_descriptor,
+ sizeof(comp3_report_descriptor));
+ break;
+ case ALS_IDX: /* ambient light sensor */
+ memset(rep_desc, 0, sizeof(als_report_descriptor));
+ memcpy(rep_desc, als_report_descriptor,
+ sizeof(als_report_descriptor));
+ break;
+ case HPD_IDX: /* HPD sensor */
+ memset(rep_desc, 0, sizeof(hpd_report_descriptor));
+ memcpy(rep_desc, hpd_report_descriptor,
+ sizeof(hpd_report_descriptor));
+ break;
+ }
+ return 0;
+}
+
+static void get_common_features(struct common_feature_property *common, int report_id)
+{
+ common->report_id = report_id;
+ common->connection_type = HID_USAGE_SENSOR_PROPERTY_CONNECTION_TYPE_PC_INTEGRATED_ENUM;
+ common->report_state = SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM;
+ common->power_state = SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM;
+ common->sensor_state = HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM;
+ common->report_interval = HID_DEFAULT_REPORT_INTERVAL;
+}
+
+static u8 get_feature_rep(int sensor_idx, int report_id, u8 *feature_report)
+{
+ struct magno_feature_report magno_feature;
+ struct accel3_feature_report acc_feature;
+ struct gyro_feature_report gyro_feature;
+ struct hpd_feature_report hpd_feature;
+ struct als_feature_report als_feature;
+ u8 report_size = 0;
+
+ if (!feature_report)
+ return report_size;
+
+ switch (sensor_idx) {
+ case ACCEL_IDX: /* accelerometer */
+ get_common_features(&acc_feature.common_property, report_id);
+ acc_feature.accel_change_sesnitivity = HID_DEFAULT_SENSITIVITY;
+ acc_feature.accel_sensitivity_min = HID_DEFAULT_MIN_VALUE;
+ acc_feature.accel_sensitivity_max = HID_DEFAULT_MAX_VALUE;
+ memcpy(feature_report, &acc_feature, sizeof(acc_feature));
+ report_size = sizeof(acc_feature);
+ break;
+ case GYRO_IDX: /* gyroscope */
+ get_common_features(&gyro_feature.common_property, report_id);
+ gyro_feature.gyro_change_sesnitivity = HID_DEFAULT_SENSITIVITY;
+ gyro_feature.gyro_sensitivity_min = HID_DEFAULT_MIN_VALUE;
+ gyro_feature.gyro_sensitivity_max = HID_DEFAULT_MAX_VALUE;
+ memcpy(feature_report, &gyro_feature, sizeof(gyro_feature));
+ report_size = sizeof(gyro_feature);
+ break;
+ case MAG_IDX: /* magnetometer */
+ get_common_features(&magno_feature.common_property, report_id);
+ magno_feature.magno_headingchange_sensitivity = HID_DEFAULT_SENSITIVITY;
+ magno_feature.heading_min = HID_DEFAULT_MIN_VALUE;
+ magno_feature.heading_max = HID_DEFAULT_MAX_VALUE;
+ magno_feature.flux_change_sensitivity = HID_DEFAULT_MIN_VALUE;
+ magno_feature.flux_min = HID_DEFAULT_MIN_VALUE;
+ magno_feature.flux_max = HID_DEFAULT_MAX_VALUE;
+ memcpy(feature_report, &magno_feature, sizeof(magno_feature));
+ report_size = sizeof(magno_feature);
+ break;
+ case ALS_IDX: /* ambient light sensor */
+ get_common_features(&als_feature.common_property, report_id);
+ als_feature.als_change_sesnitivity = HID_DEFAULT_SENSITIVITY;
+ als_feature.als_sensitivity_min = HID_DEFAULT_MIN_VALUE;
+ als_feature.als_sensitivity_max = HID_DEFAULT_MAX_VALUE;
+ memcpy(feature_report, &als_feature, sizeof(als_feature));
+ report_size = sizeof(als_feature);
+ break;
+ case HPD_IDX: /* human presence detection sensor */
+ get_common_features(&hpd_feature.common_property, report_id);
+ memcpy(feature_report, &hpd_feature, sizeof(hpd_feature));
+ report_size = sizeof(hpd_feature);
+ break;
+ }
+ return report_size;
+}
+
+static void get_common_inputs(struct common_input_property *common, int report_id)
+{
+ common->report_id = report_id;
+ common->sensor_state = HID_USAGE_SENSOR_STATE_READY_ENUM;
+ common->event_type = HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM;
+}
+
+static int float_to_int(u32 float32)
+{
+ int fraction, shift, mantissa, sign, exp, zeropre;
+
+ mantissa = float32 & GENMASK(22, 0);
+ sign = (float32 & BIT(31)) ? -1 : 1;
+ exp = (float32 & ~BIT(31)) >> 23;
+
+ if (!exp && !mantissa)
+ return 0;
+
+ exp -= 127;
+ if (exp < 0) {
+ exp = -exp;
+ zeropre = (((BIT(23) + mantissa) * 100) >> 23) >> exp;
+ return zeropre >= 50 ? sign : 0;
+ }
+
+ shift = 23 - exp;
+ float32 = BIT(exp) + (mantissa >> shift);
+ fraction = mantissa & GENMASK(shift - 1, 0);
+
+ return (((fraction * 100) >> shift) >= 50) ? sign * (float32 + 1) : sign * float32;
+}
+
+static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
+ struct amd_input_data *in_data)
+{
+ struct amd_mp2_dev *mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ u8 *input_report = in_data->input_report[current_index];
+ struct magno_input_report magno_input;
+ struct accel3_input_report acc_input;
+ struct gyro_input_report gyro_input;
+ struct als_input_report als_input;
+ struct hpd_input_report hpd_input;
+ struct sfh_accel_data accel_data;
+ struct sfh_gyro_data gyro_data;
+ struct sfh_mag_data mag_data;
+ struct sfh_als_data als_data;
+ struct hpd_status hpdstatus;
+ void __iomem *sensoraddr;
+ u8 report_size = 0;
+
+ if (!input_report)
+ return report_size;
+
+ switch (sensor_idx) {
+ case ACCEL_IDX: /* accelerometer */
+ sensoraddr = mp2->vsbase + (ACCEL_IDX * SENSOR_DATA_MEM_SIZE_DEFAULT) +
+ OFFSET_SENSOR_DATA_DEFAULT;
+ memcpy_fromio(&accel_data, sensoraddr, sizeof(struct sfh_accel_data));
+ get_common_inputs(&acc_input.common_property, report_id);
+ acc_input.in_accel_x_value = float_to_int(accel_data.acceldata.x) / 100;
+ acc_input.in_accel_y_value = float_to_int(accel_data.acceldata.y) / 100;
+ acc_input.in_accel_z_value = float_to_int(accel_data.acceldata.z) / 100;
+ memcpy(input_report, &acc_input, sizeof(acc_input));
+ report_size = sizeof(acc_input);
+ break;
+ case GYRO_IDX: /* gyroscope */
+ sensoraddr = mp2->vsbase + (GYRO_IDX * SENSOR_DATA_MEM_SIZE_DEFAULT) +
+ OFFSET_SENSOR_DATA_DEFAULT;
+ memcpy_fromio(&gyro_data, sensoraddr, sizeof(struct sfh_gyro_data));
+ get_common_inputs(&gyro_input.common_property, report_id);
+ gyro_input.in_angel_x_value = float_to_int(gyro_data.gyrodata.x) / 1000;
+ gyro_input.in_angel_y_value = float_to_int(gyro_data.gyrodata.y) / 1000;
+ gyro_input.in_angel_z_value = float_to_int(gyro_data.gyrodata.z) / 1000;
+ memcpy(input_report, &gyro_input, sizeof(gyro_input));
+ report_size = sizeof(gyro_input);
+ break;
+ case MAG_IDX: /* magnetometer */
+ sensoraddr = mp2->vsbase + (MAG_IDX * SENSOR_DATA_MEM_SIZE_DEFAULT) +
+ OFFSET_SENSOR_DATA_DEFAULT;
+ memcpy_fromio(&mag_data, sensoraddr, sizeof(struct sfh_mag_data));
+ get_common_inputs(&magno_input.common_property, report_id);
+ magno_input.in_magno_x = float_to_int(mag_data.magdata.x) / 100;
+ magno_input.in_magno_y = float_to_int(mag_data.magdata.y) / 100;
+ magno_input.in_magno_z = float_to_int(mag_data.magdata.z) / 100;
+ magno_input.in_magno_accuracy = mag_data.accuracy / 100;
+ memcpy(input_report, &magno_input, sizeof(magno_input));
+ report_size = sizeof(magno_input);
+ break;
+ case ALS_IDX:
+ sensoraddr = mp2->vsbase + (ALS_IDX * SENSOR_DATA_MEM_SIZE_DEFAULT) +
+ OFFSET_SENSOR_DATA_DEFAULT;
+ memcpy_fromio(&als_data, sensoraddr, sizeof(struct sfh_als_data));
+ get_common_inputs(&als_input.common_property, report_id);
+ als_input.illuminance_value = als_data.lux;
+ report_size = sizeof(als_input);
+ memcpy(input_report, &als_input, sizeof(als_input));
+ break;
+ case HPD_IDX:
+ get_common_inputs(&hpd_input.common_property, report_id);
+ hpdstatus.val = readl(mp2->mmio + AMD_C2P_MSG(4));
+ hpd_input.human_presence = hpdstatus.shpd.presence;
+ report_size = sizeof(hpd_input);
+ memcpy(input_report, &hpd_input, sizeof(hpd_input));
+ break;
+ }
+ return report_size;
+}
+
+static u32 get_desc_size(int sensor_idx, int descriptor_name)
+{
+ switch (sensor_idx) {
+ case ACCEL_IDX:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(accel3_report_descriptor);
+ case input_size:
+ return sizeof(struct accel3_input_report);
+ case feature_size:
+ return sizeof(struct accel3_feature_report);
+ }
+ break;
+ case GYRO_IDX:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(gyro3_report_descriptor);
+ case input_size:
+ return sizeof(struct gyro_input_report);
+ case feature_size:
+ return sizeof(struct gyro_feature_report);
+ }
+ break;
+ case MAG_IDX:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(comp3_report_descriptor);
+ case input_size:
+ return sizeof(struct magno_input_report);
+ case feature_size:
+ return sizeof(struct magno_feature_report);
+ }
+ break;
+ case ALS_IDX:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(als_report_descriptor);
+ case input_size:
+ return sizeof(struct als_input_report);
+ case feature_size:
+ return sizeof(struct als_feature_report);
+ }
+ break;
+ case HPD_IDX:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(hpd_report_descriptor);
+ case input_size:
+ return sizeof(struct hpd_input_report);
+ case feature_size:
+ return sizeof(struct hpd_feature_report);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+void amd_sfh1_1_set_desc_ops(struct amd_mp2_ops *mp2_ops)
+{
+ mp2_ops->get_rep_desc = get_report_desc;
+ mp2_ops->get_feat_rep = get_feature_rep;
+ mp2_ops->get_desc_sz = get_desc_size;
+ mp2_ops->get_in_rep = get_input_rep;
+}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
new file mode 100644
index 000000000000..70436f9fad2f
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD MP2 1.1 communication driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/hid.h>
+
+#include "amd_sfh_init.h"
+#include "amd_sfh_interface.h"
+#include "../hid_descriptor/amd_sfh_hid_desc.h"
+
+static int amd_sfh_get_sensor_num(struct amd_mp2_dev *mp2, u8 *sensor_id)
+{
+ struct sfh_sensor_list *slist;
+ struct sfh_base_info binfo;
+ int num_of_sensors = 0;
+ int i;
+
+ memcpy_fromio(&binfo, mp2->vsbase, sizeof(struct sfh_base_info));
+ slist = &binfo.sbase.s_list;
+
+ for (i = 0; i < MAX_IDX; i++) {
+ switch (i) {
+ case ACCEL_IDX:
+ case GYRO_IDX:
+ case MAG_IDX:
+ case ALS_IDX:
+ case HPD_IDX:
+ if (BIT(i) & slist->sl.sensors)
+ sensor_id[num_of_sensors++] = i;
+ break;
+ }
+ }
+
+ return num_of_sensors;
+}
+
+static u32 amd_sfh_wait_for_response(struct amd_mp2_dev *mp2, u8 sid, u32 cmd_id)
+{
+ if (mp2->mp2_ops->response)
+ return mp2->mp2_ops->response(mp2, sid, cmd_id);
+
+ return 0;
+}
+
+static const char *get_sensor_name(int idx)
+{
+ switch (idx) {
+ case ACCEL_IDX:
+ return "accelerometer";
+ case GYRO_IDX:
+ return "gyroscope";
+ case MAG_IDX:
+ return "magnetometer";
+ case ALS_IDX:
+ return "ALS";
+ case HPD_IDX:
+ return "HPD";
+ default:
+ return "unknown sensor type";
+ }
+}
+
+static int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
+{
+ struct amdtp_cl_data *cl_data = privdata->cl_data;
+ int i, status;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ privdata->mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], DISABLE_SENSOR);
+ if (status == 0)
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(&privdata->pdev->dev, "stopping sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ }
+ }
+
+ cancel_delayed_work_sync(&cl_data->work);
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amdtp_hid_remove(cl_data);
+
+ return 0;
+}
+
+static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
+{
+ struct amd_input_data *in_data = &privdata->in_data;
+ struct amdtp_cl_data *cl_data = privdata->cl_data;
+ struct amd_mp2_ops *mp2_ops = privdata->mp2_ops;
+ struct amd_mp2_sensor_info info;
+ struct request_list *req_list;
+ u32 feature_report_size;
+ u32 input_report_size;
+ struct device *dev;
+ int rc, i, status;
+ u8 cl_idx;
+
+ req_list = &cl_data->req_list;
+ dev = &privdata->pdev->dev;
+ amd_sfh1_1_set_desc_ops(mp2_ops);
+
+ cl_data->num_hid_devices = amd_sfh_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
+
+ INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
+ INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
+ INIT_LIST_HEAD(&req_list->list);
+ cl_data->in_data = in_data;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ cl_data->sensor_requested_cnt[i] = 0;
+ cl_data->cur_hid_dev = i;
+ cl_idx = cl_data->sensor_idx[i];
+
+ cl_data->report_descr_sz[i] = mp2_ops->get_desc_sz(cl_idx, descr_size);
+ if (!cl_data->report_descr_sz[i]) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ feature_report_size = mp2_ops->get_desc_sz(cl_idx, feature_size);
+ if (!feature_report_size) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ input_report_size = mp2_ops->get_desc_sz(cl_idx, input_size);
+ if (!input_report_size) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL);
+ if (!cl_data->feature_report[i]) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ in_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL);
+ if (!in_data->input_report[i]) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ info.sensor_idx = cl_idx;
+
+ cl_data->report_descr[i] =
+ devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL);
+ if (!cl_data->report_descr[i]) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
+ if (rc)
+ return rc;
+
+ writel(0, privdata->mmio + AMD_P2C_MSG(0));
+ mp2_ops->start(privdata, info);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
+
+ status = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
+
+ if (status == SENSOR_ENABLED) {
+ cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ rc = amdtp_hid_probe(i, cl_data);
+ if (rc) {
+ mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], DISABLE_SENSOR);
+ if (status == 0)
+ status = SENSOR_DISABLED;
+ if (status != SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i],
+ get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ goto cleanup;
+ }
+ }
+ dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ }
+
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ return 0;
+
+cleanup:
+ amd_sfh_hid_client_deinit(privdata);
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ devm_kfree(dev, cl_data->feature_report[i]);
+ devm_kfree(dev, in_data->input_report[i]);
+ devm_kfree(dev, cl_data->report_descr[i]);
+ }
+ return rc;
+}
+
+static void amd_sfh_resume(struct amd_mp2_dev *mp2)
+{
+ struct amdtp_cl_data *cl_data = mp2->cl_data;
+ struct amd_mp2_sensor_info info;
+ int i, status;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
+ info.sensor_idx = cl_data->sensor_idx[i];
+ mp2->mp2_ops->start(mp2, info);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], ENABLE_SENSOR);
+ if (status == 0)
+ status = SENSOR_ENABLED;
+ if (status == SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ dev_dbg(&mp2->pdev->dev, "resume sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ }
+ }
+
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ amd_sfh_clear_intr(mp2);
+}
+
+static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
+{
+ struct amdtp_cl_data *cl_data = mp2->cl_data;
+ int i, status;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] != HPD_IDX &&
+ cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ mp2->mp2_ops->stop(mp2, cl_data->sensor_idx[i]);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], DISABLE_SENSOR);
+ if (status == 0)
+ status = SENSOR_DISABLED;
+ if (status != SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(&mp2->pdev->dev, "suspend sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ }
+ }
+
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amd_sfh_clear_intr(mp2);
+}
+
+static void amd_mp2_pci_remove(void *privdata)
+{
+ struct amd_mp2_dev *mp2 = privdata;
+
+ amd_sfh_hid_client_deinit(privdata);
+ mp2->mp2_ops->stop_all(mp2);
+ pci_intx(mp2->pdev, false);
+ amd_sfh_clear_intr(mp2);
+}
+
+static void amd_sfh_set_ops(struct amd_mp2_dev *mp2)
+{
+ struct amd_mp2_ops *mp2_ops;
+
+ sfh_interface_init(mp2);
+ mp2_ops = mp2->mp2_ops;
+ mp2_ops->clear_intr = amd_sfh_clear_intr_v2,
+ mp2_ops->init_intr = amd_sfh_irq_init_v2,
+ mp2_ops->suspend = amd_sfh_suspend;
+ mp2_ops->resume = amd_sfh_resume;
+ mp2_ops->remove = amd_mp2_pci_remove;
+}
+
+int amd_sfh1_1_init(struct amd_mp2_dev *mp2)
+{
+ u32 phy_base = readl(mp2->mmio + AMD_C2P_MSG(22));
+ struct device *dev = &mp2->pdev->dev;
+ struct sfh_base_info binfo;
+ int rc;
+
+ phy_base <<= 21;
+ if (!devm_request_mem_region(dev, phy_base, 128 * 1024, "amd_sfh")) {
+ dev_err(dev, "can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ mp2->vsbase = devm_ioremap(dev, phy_base, 128 * 1024);
+ if (!mp2->vsbase) {
+ dev_err(dev, "failed to remap vsbase\n");
+ return -ENOMEM;
+ }
+
+ /* Before accessing give time for SFH firmware for processing configuration */
+ msleep(5000);
+
+ memcpy_fromio(&binfo, mp2->vsbase, sizeof(struct sfh_base_info));
+ if (binfo.sbase.fw_info.fw_ver == 0 || binfo.sbase.s_list.sl.sensors == 0) {
+ dev_err(dev, "failed to get sensors\n");
+ return -EOPNOTSUPP;
+ }
+ dev_dbg(dev, "firmware version 0x%x\n", binfo.sbase.fw_info.fw_ver);
+
+ amd_sfh_set_ops(mp2);
+
+ rc = amd_sfh_irq_init(mp2);
+ if (rc) {
+ dev_err(dev, "amd_sfh_irq_init failed\n");
+ return rc;
+ }
+
+ rc = amd_sfh1_1_hid_client_init(mp2);
+ if (rc) {
+ dev_err(dev, "amd_sfh1_1_hid_client_init failed\n");
+ return rc;
+ }
+
+ return rc;
+}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h
new file mode 100644
index 000000000000..21c44990bbeb
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMD MP2 1.1 initialization structures
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#ifndef AMD_SFH_INIT_H
+#define AMD_SFH_INIT_H
+
+#include "../amd_sfh_common.h"
+
+struct amd_sfh1_1_ops {
+ int (*init)(struct amd_mp2_dev *mp2);
+};
+
+int amd_sfh1_1_init(struct amd_mp2_dev *mp2);
+
+static const struct amd_sfh1_1_ops __maybe_unused sfh1_1_ops = {
+ .init = amd_sfh1_1_init,
+};
+
+#endif
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
new file mode 100644
index 000000000000..c6df959ec725
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD MP2 1.1 communication interfaces
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
+
+#include "amd_sfh_interface.h"
+
+static int amd_sfh_wait_response(struct amd_mp2_dev *mp2, u8 sid, u32 cmd_id)
+{
+ struct sfh_cmd_response cmd_resp;
+
+ /* Get response with status within a max of 1600 ms timeout */
+ if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
+ (cmd_resp.response.response == 0 &&
+ cmd_resp.response.cmd_id == cmd_id && (sid == 0xff ||
+ cmd_resp.response.sensor_id == sid)), 500, 1600000))
+ return cmd_resp.response.response;
+
+ return -1;
+}
+
+static void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
+{
+ struct sfh_cmd_base cmd_base;
+
+ cmd_base.ul = 0;
+ cmd_base.cmd.cmd_id = ENABLE_SENSOR;
+ cmd_base.cmd.intr_disable = 0;
+ cmd_base.cmd.sensor_id = info.sensor_idx;
+
+ writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+}
+
+static void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
+{
+ struct sfh_cmd_base cmd_base;
+
+ cmd_base.ul = 0;
+ cmd_base.cmd.cmd_id = DISABLE_SENSOR;
+ cmd_base.cmd.intr_disable = 0;
+ cmd_base.cmd.sensor_id = sensor_idx;
+
+ writeq(0x0, privdata->mmio + AMD_C2P_MSG(1));
+ writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+}
+
+static void amd_stop_all_sensor(struct amd_mp2_dev *privdata)
+{
+ struct sfh_cmd_base cmd_base;
+
+ cmd_base.ul = 0;
+ cmd_base.cmd.cmd_id = STOP_ALL_SENSORS;
+ cmd_base.cmd.intr_disable = 0;
+
+ writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+}
+
+static struct amd_mp2_ops amd_sfh_ops = {
+ .start = amd_start_sensor,
+ .stop = amd_stop_sensor,
+ .stop_all = amd_stop_all_sensor,
+ .response = amd_sfh_wait_response,
+};
+
+void sfh_interface_init(struct amd_mp2_dev *mp2)
+{
+ mp2->mp2_ops = &amd_sfh_ops;
+}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
new file mode 100644
index 000000000000..ae47a369dc05
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMD MP2 1.1 communication interfaces
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#ifndef AMD_SFH_INTERFACE_H
+#define AMD_SFH_INTERFACE_H
+
+#include "../amd_sfh_common.h"
+
+#define SENSOR_DATA_MEM_SIZE_DEFAULT 256
+#define TOTAL_STATIC_MEM_DEFAULT 1024
+#define OFFSET_SFH_INFO_BASE_DEFAULT 0
+#define OFFSET_SENSOR_DATA_DEFAULT (OFFSET_SFH_INFO_BASE_DEFAULT + \
+ TOTAL_STATIC_MEM_DEFAULT)
+enum sensor_index {
+ ACCEL_IDX,
+ GYRO_IDX,
+ MAG_IDX,
+ ALS_IDX = 4,
+ HPD_IDX = 5,
+ MAX_IDX = 15,
+};
+
+struct sfh_cmd_base {
+ union {
+ u32 ul;
+ struct {
+ u32 sensor_id : 4;
+ u32 cmd_id : 4;
+ u32 sub_cmd_id : 6;
+ u32 length : 12;
+ u32 rsvd : 5;
+ u32 intr_disable : 1;
+ } cmd;
+ };
+};
+
+struct sfh_cmd_response {
+ union {
+ u32 resp;
+ struct {
+ u32 response : 8;
+ u32 sensor_id : 4;
+ u32 cmd_id : 4;
+ u32 sub_cmd : 6;
+ u32 rsvd2 : 10;
+ } response;
+ };
+};
+
+struct sfh_platform_info {
+ union {
+ u32 pi;
+ struct {
+ u32 cust_id : 16;
+ u32 plat_id : 6;
+ u32 interface_id : 4;
+ u32 rsvd : 6;
+ } pinfo;
+ };
+};
+
+struct sfh_firmware_info {
+ union {
+ u32 fw_ver;
+ struct {
+ u32 minor_rev : 8;
+ u32 major_rev : 8;
+ u32 minor_ver : 8;
+ u32 major_ver : 8;
+ } fver;
+ };
+};
+
+struct sfh_sensor_list {
+ union {
+ u32 slist;
+ struct {
+ u32 sensors : 16;
+ u32 rsvd : 16;
+ } sl;
+ };
+};
+
+struct sfh_base_info {
+ union {
+ u32 sfh_base[24];
+ struct {
+ struct sfh_platform_info plat_info;
+ struct sfh_firmware_info fw_info;
+ struct sfh_sensor_list s_list;
+ } sbase;
+ };
+};
+
+struct sfh_common_data {
+ u64 timestamp;
+ u32 intr_cnt;
+ u32 featvalid : 16;
+ u32 rsvd : 13;
+ u32 sensor_state : 3;
+};
+
+struct sfh_float32 {
+ u32 x;
+ u32 y;
+ u32 z;
+};
+
+struct sfh_accel_data {
+ struct sfh_common_data commondata;
+ struct sfh_float32 acceldata;
+ u32 accelstatus;
+};
+
+struct sfh_gyro_data {
+ struct sfh_common_data commondata;
+ struct sfh_float32 gyrodata;
+ u32 result;
+};
+
+struct sfh_mag_data {
+ struct sfh_common_data commondata;
+ struct sfh_float32 magdata;
+ u32 accuracy;
+};
+
+struct sfh_als_data {
+ struct sfh_common_data commondata;
+ u16 lux;
+};
+
+struct hpd_status {
+ union {
+ struct {
+ u32 distance : 16;
+ u32 probablity : 8;
+ u32 presence : 2;
+ u32 rsvd : 5;
+ u32 state : 1;
+ } shpd;
+ u32 val;
+ };
+};
+
+void sfh_interface_init(struct amd_mp2_dev *mp2);
+void amd_sfh1_1_set_desc_ops(struct amd_mp2_ops *mp2_ops);
+#endif
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 2b986d0dbde4..db146d0f7937 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -831,6 +831,8 @@ static const struct hid_device_id alps_id[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+ USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
{ }
};
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 42a568902f49..6970797cdc56 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -36,7 +36,7 @@
#define APPLE_NUMLOCK_EMULATION BIT(8)
#define APPLE_RDESC_BATTERY BIT(9)
#define APPLE_BACKLIGHT_CTL BIT(10)
-#define APPLE_IS_KEYCHRON BIT(11)
+#define APPLE_IS_NON_APPLE BIT(11)
#define APPLE_FLAG_FKEY 0x01
@@ -65,6 +65,10 @@ MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
"(For people who want to keep PC keyboard muscle memory. "
"[0] = as-is, Mac layout, 1 = swapped, PC layout)");
+struct apple_non_apple_keyboard {
+ char *name;
+};
+
struct apple_sc_backlight {
struct led_classdev cdev;
struct hid_device *hdev;
@@ -313,6 +317,27 @@ static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
{ }
};
+static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
+ { "SONiX USB DEVICE" },
+ { "Keychron" },
+ { "AONE" },
+ { "GANSS" }
+};
+
+static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(non_apple_keyboards); i++) {
+ char *non_apple = non_apple_keyboards[i].name;
+
+ if (strncmp(hdev->name, non_apple, strlen(non_apple)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
static inline void apple_setup_key_translation(struct input_dev *input,
const struct apple_key_translation *table)
{
@@ -363,7 +388,7 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
if (fnmode == 3) {
- real_fnmode = (asc->quirks & APPLE_IS_KEYCHRON) ? 2 : 1;
+ real_fnmode = (asc->quirks & APPLE_IS_NON_APPLE) ? 2 : 1;
} else {
real_fnmode = fnmode;
}
@@ -669,9 +694,9 @@ static int apple_input_configured(struct hid_device *hdev,
asc->quirks &= ~APPLE_HAS_FN;
}
- if (strncmp(hdev->name, "Keychron", 8) == 0) {
- hid_info(hdev, "Keychron keyboard detected; function keys will default to fnmode=2 behavior\n");
- asc->quirks |= APPLE_IS_KEYCHRON;
+ if (apple_is_non_apple_keyboard(hdev)) {
+ hid_info(hdev, "Non-apple keyboard detected; function keys will default to fnmode=2 behavior\n");
+ asc->quirks |= APPLE_IS_NON_APPLE;
}
return 0;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 00154a1cd2d8..b7f5566e338d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1662,7 +1662,7 @@ static void hid_process_report(struct hid_device *hid,
/* first retrieve all incoming values in data */
for (a = 0; a < report->maxfield; a++)
- hid_input_fetch_field(hid, field = report->field[a], data);
+ hid_input_fetch_field(hid, report->field[a], data);
if (!list_empty(&report->field_entry_list)) {
/* INPUT_REPORT, we have a priority list of fields */
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index ece147d1a278..1e16b0fa310d 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -790,6 +790,11 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
data->word = le16_to_cpup((__le16 *)buf);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (read_length > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EINVAL;
+ goto power_normal;
+ }
+
memcpy(data->block + 1, buf, read_length);
break;
case I2C_SMBUS_BLOCK_DATA:
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d9eb676abe96..0fb720a96399 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -413,6 +413,7 @@
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
+#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -1278,6 +1279,7 @@
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540 0x0075
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G640 0x0094
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01 0x0042
+#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L 0x0935
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06 0x0078
#define USB_DEVICE_ID_UGEE_TABLET_G5 0x0074
#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index c6b27aab9041..48c1c02c69f4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -381,6 +381,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index b2a08233f8d5..c8f82bcbf1ab 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -766,7 +766,7 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
/*
* Some models have multiple interfaces, we want the interface with
- * with the f000.0000 application input report.
+ * the f000.0000 application input report.
*/
rep_enum = &hdev->report_enum[HID_INPUT_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list) {
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 81de88ab2ecc..68f9e9d207f4 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1694,7 +1694,7 @@ static int hidpp_battery_get_property(struct power_supply *psy,
val->strval = hidpp->hid_dev->uniq;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- /* hardware reports voltage in in mV. sysfs expects uV */
+ /* hardware reports voltage in mV. sysfs expects uV */
val->intval = hidpp->battery.voltage * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 4211b9839209..de52e9f7bb8c 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -385,6 +385,9 @@ static int mcp_smbus_write(struct mcp2221 *mcp, u16 addr,
data_len = 7;
break;
default:
+ if (len > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
memcpy(&mcp->txbuf[5], buf, len);
data_len = len + 5;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6bb3890b0f2c..2e72922e36f5 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -194,6 +194,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
#define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015
#define MT_CLS_WIN_8_DISABLE_WAKEUP 0x0016
#define MT_CLS_WIN_8_NO_STICKY_FINGERS 0x0017
+#define MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU 0x0018
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -286,6 +287,15 @@ static const struct mt_class mt_classes[] = {
MT_QUIRK_WIN8_PTP_BUTTONS |
MT_QUIRK_FORCE_MULTI_INPUT,
.export_all_inputs = true },
+ { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+ .quirks = MT_QUIRK_IGNORE_DUPLICATES |
+ MT_QUIRK_HOVERING |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_STICKY_FINGERS |
+ MT_QUIRK_WIN8_PTP_BUTTONS |
+ MT_QUIRK_FORCE_MULTI_INPUT |
+ MT_QUIRK_NOT_SEEN_MEANS_UP,
+ .export_all_inputs = true },
{ .name = MT_CLS_WIN_8_DISABLE_WAKEUP,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_IGNORE_DUPLICATES |
@@ -783,6 +793,7 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_DG_CONFIDENCE:
if ((cls->name == MT_CLS_WIN_8 ||
cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT ||
+ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU ||
cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
@@ -2035,7 +2046,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_LENOVO_X1_TAB3) },
/* Lenovo X12 TAB Gen 1 */
- { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_LENOVO,
USB_DEVICE_ID_LENOVO_X12_TAB) },
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 2204de889739..92ac4f605f13 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -292,6 +292,7 @@ static const struct joycon_rumble_amp_data joycon_rumble_amplitudes[] = {
};
static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160;
static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320;
+static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5;
#endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */
static const u16 JC_RUMBLE_PERIOD_MS = 50;
@@ -402,8 +403,6 @@ struct joycon_input_report {
#define JC_RUMBLE_DATA_SIZE 8
#define JC_RUMBLE_QUEUE_SIZE 8
-static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5;
-
static const char * const joycon_player_led_names[] = {
LED_FUNCTION_PLAYER1,
LED_FUNCTION_PLAYER2,
@@ -1586,6 +1585,7 @@ static const unsigned int joycon_button_inputs_r[] = {
/* We report joy-con d-pad inputs as buttons and pro controller as a hat. */
static const unsigned int joycon_dpad_inputs_jc[] = {
BTN_DPAD_UP, BTN_DPAD_DOWN, BTN_DPAD_LEFT, BTN_DPAD_RIGHT,
+ 0 /* 0 signals end of array */
};
static int joycon_input_create(struct joycon_ctlr *ctlr)
@@ -1634,6 +1634,7 @@ static int joycon_input_create(struct joycon_ctlr *ctlr)
ctlr->input->id.version = hdev->version;
ctlr->input->uniq = ctlr->mac_addr_str;
ctlr->input->name = name;
+ ctlr->input->phys = hdev->phys;
input_set_drvdata(ctlr->input, ctlr);
/* set up sticks and buttons */
@@ -1713,6 +1714,7 @@ static int joycon_input_create(struct joycon_ctlr *ctlr)
ctlr->imu_input->id.version = hdev->version;
ctlr->imu_input->uniq = ctlr->mac_addr_str;
ctlr->imu_input->name = imu_name;
+ ctlr->imu_input->phys = hdev->phys;
input_set_drvdata(ctlr->imu_input, ctlr);
/* configure imu axes */
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index c0fe66e50c58..47a17375c7fc 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -522,6 +522,8 @@ static const struct hid_device_id uclogic_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06) },
{ }
};
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index db838f16282d..c11fa239e6a2 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -23,11 +23,11 @@
/**
* uclogic_params_pen_inrange_to_str() - Convert a pen in-range reporting type
* to a string.
- *
* @inrange: The in-range reporting type to convert.
*
- * Returns:
- * The string representing the type, or NULL if the type is unknown.
+ * Return:
+ * * The string representing the type, or
+ * * %NULL if the type is unknown.
*/
static const char *uclogic_params_pen_inrange_to_str(
enum uclogic_params_pen_inrange inrange)
@@ -45,10 +45,12 @@ static const char *uclogic_params_pen_inrange_to_str(
}
/**
- * Dump tablet interface pen parameters with hid_dbg(), indented with one tab.
- *
+ * uclogic_params_pen_hid_dbg() - Dump tablet interface pen parameters
* @hdev: The HID device the pen parameters describe.
* @pen: The pen parameters to dump.
+ *
+ * Dump tablet interface pen parameters with hid_dbg(). The dump is indented
+ * with a tab.
*/
static void uclogic_params_pen_hid_dbg(const struct hid_device *hdev,
const struct uclogic_params_pen *pen)
@@ -77,11 +79,12 @@ static void uclogic_params_pen_hid_dbg(const struct hid_device *hdev,
}
/**
- * Dump tablet interface frame parameters with hid_dbg(), indented with two
- * tabs.
- *
+ * uclogic_params_frame_hid_dbg() - Dump tablet interface frame parameters
* @hdev: The HID device the pen parameters describe.
* @frame: The frame parameters to dump.
+ *
+ * Dump tablet interface frame parameters with hid_dbg(). The dump is
+ * indented with two tabs.
*/
static void uclogic_params_frame_hid_dbg(
const struct hid_device *hdev,
@@ -102,10 +105,11 @@ static void uclogic_params_frame_hid_dbg(
}
/**
- * Dump tablet interface parameters with hid_dbg().
- *
+ * uclogic_params_hid_dbg() - Dump tablet interface parameters
* @hdev: The HID device the parameters describe.
* @params: The parameters to dump.
+ *
+ * Dump tablet interface parameters with hid_dbg().
*/
void uclogic_params_hid_dbg(const struct hid_device *hdev,
const struct uclogic_params *params)
@@ -234,7 +238,7 @@ static int uclogic_params_pen_init_v1(struct uclogic_params_pen *pen,
const int len = 12;
s32 resolution;
/* Pen report descriptor template parameters */
- s32 desc_params[UCLOGIC_RDESC_PEN_PH_ID_NUM];
+ s32 desc_params[UCLOGIC_RDESC_PH_ID_NUM];
__u8 *desc_ptr = NULL;
/* Check arguments */
@@ -379,7 +383,7 @@ static int uclogic_params_pen_init_v2(struct uclogic_params_pen *pen,
size_t i;
s32 resolution;
/* Pen report descriptor template parameters */
- s32 desc_params[UCLOGIC_RDESC_PEN_PH_ID_NUM];
+ s32 desc_params[UCLOGIC_RDESC_PH_ID_NUM];
__u8 *desc_ptr = NULL;
/* Check arguments */
@@ -1003,6 +1007,197 @@ cleanup:
}
/**
+ * uclogic_probe_interface() - some tablets, like the Parblo A610 PLUS V2 or
+ * the XP-PEN Deco Mini 7, need to be initialized by sending them magic data.
+ *
+ * @hdev: The HID device of the tablet interface to initialize and get
+ * parameters from. Cannot be NULL.
+ * @magic_arr: The magic data that should be sent to probe the interface.
+ * Cannot be NULL.
+ * @magic_size: Size of the magic data.
+ * @endpoint: Endpoint where the magic data should be sent.
+ *
+ * Returns:
+ * Zero, if successful. A negative errno code on error.
+ */
+static int uclogic_probe_interface(struct hid_device *hdev, u8 *magic_arr,
+ int magic_size, int endpoint)
+{
+ struct usb_device *udev;
+ unsigned int pipe = 0;
+ int sent;
+ u8 *buf = NULL;
+ int rc = 0;
+
+ if (!hdev || !magic_arr) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ buf = kmemdup(magic_arr, magic_size, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ udev = hid_to_usb_dev(hdev);
+ pipe = usb_sndintpipe(udev, endpoint);
+
+ rc = usb_interrupt_msg(udev, pipe, buf, magic_size, &sent, 1000);
+ if (rc || sent != magic_size) {
+ hid_err(hdev, "Interface probing failed: %d\n", rc);
+ rc = -1;
+ goto cleanup;
+ }
+
+ rc = 0;
+cleanup:
+ kfree(buf);
+ return rc;
+}
+
+/**
+ * uclogic_params_ugee_v2_init() - initialize a UGEE graphics tablets by
+ * discovering their parameters.
+ *
+ * These tables, internally designed as v2 to differentiate them from older
+ * models, expect a payload of magic data in orther to be switched to the fully
+ * functional mode and expose their parameters in a similar way to the
+ * information present in uclogic_params_pen_init_v1() but with some
+ * differences.
+ *
+ * @params: Parameters to fill in (to be cleaned with
+ * uclogic_params_cleanup()). Not modified in case of error.
+ * Cannot be NULL.
+ * @hdev: The HID device of the tablet interface to initialize and get
+ * parameters from. Cannot be NULL.
+ *
+ * Returns:
+ * Zero, if successful. A negative errno code on error.
+ */
+static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
+ struct hid_device *hdev)
+{
+ int rc = 0;
+ struct usb_interface *iface;
+ __u8 bInterfaceNumber;
+ const int str_desc_len = 12;
+ __u8 *str_desc = NULL;
+ __u8 *rdesc_pen = NULL;
+ __u8 *rdesc_frame = NULL;
+ s32 desc_params[UCLOGIC_RDESC_PH_ID_NUM];
+ s32 resolution;
+ __u8 magic_arr[] = {
+ 0x02, 0xb0, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ /* The resulting parameters (noop) */
+ struct uclogic_params p = {0, };
+
+ if (!params || !hdev) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ iface = to_usb_interface(hdev->dev.parent);
+ bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
+ if (bInterfaceNumber != 2) {
+ uclogic_params_init_invalid(&p);
+ goto output;
+ }
+
+ /*
+ * Initialize the interface by sending magic data.
+ * The specific data was discovered by sniffing the Windows driver
+ * traffic.
+ */
+ rc = uclogic_probe_interface(hdev, magic_arr, sizeof(magic_arr), 0x03);
+ if (rc) {
+ uclogic_params_init_invalid(&p);
+ goto output;
+ }
+
+ /*
+ * Read the string descriptor containing pen and frame parameters.
+ * The specific string descriptor and data were discovered by sniffing
+ * the Windows driver traffic.
+ */
+ rc = uclogic_params_get_str_desc(&str_desc, hdev, 100, str_desc_len);
+ if (rc != str_desc_len) {
+ hid_err(hdev, "failed retrieving pen and frame parameters: %d\n", rc);
+ uclogic_params_init_invalid(&p);
+ goto output;
+ }
+
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_LM] =
+ get_unaligned_le16(str_desc + 2);
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_LM] =
+ get_unaligned_le16(str_desc + 4);
+ desc_params[UCLOGIC_RDESC_FRAME_PH_ID_UM] = str_desc[6];
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM] =
+ get_unaligned_le16(str_desc + 8);
+ resolution = get_unaligned_le16(str_desc + 10);
+ if (resolution == 0) {
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_PM] = 0;
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_PM] = 0;
+ } else {
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_PM] =
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_LM] * 1000 /
+ resolution;
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_PM] =
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_LM] * 1000 /
+ resolution;
+ }
+ kfree(str_desc);
+ str_desc = NULL;
+
+ /* Initialize the pen interface */
+ rdesc_pen = uclogic_rdesc_template_apply(
+ uclogic_rdesc_ugee_v2_pen_template_arr,
+ uclogic_rdesc_ugee_v2_pen_template_size,
+ desc_params, ARRAY_SIZE(desc_params));
+ if (!rdesc_pen) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ p.pen.desc_ptr = rdesc_pen;
+ p.pen.desc_size = uclogic_rdesc_ugee_v2_pen_template_size;
+ p.pen.id = 0x02;
+ p.pen.subreport_list[0].value = 0xf0;
+ p.pen.subreport_list[0].id = UCLOGIC_RDESC_V1_FRAME_ID;
+
+ /* Initialize the frame interface */
+ rdesc_frame = uclogic_rdesc_template_apply(
+ uclogic_rdesc_ugee_v2_frame_btn_template_arr,
+ uclogic_rdesc_ugee_v2_frame_btn_template_size,
+ desc_params, ARRAY_SIZE(desc_params));
+ if (!rdesc_frame) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ rc = uclogic_params_frame_init_with_desc(&p.frame_list[0],
+ rdesc_frame,
+ uclogic_rdesc_ugee_v2_frame_btn_template_size,
+ UCLOGIC_RDESC_V1_FRAME_ID);
+ kfree(rdesc_frame);
+ if (rc) {
+ uclogic_params_init_invalid(&p);
+ goto output;
+ }
+
+output:
+ /* Output parameters */
+ memcpy(params, &p, sizeof(*params));
+ memset(&p, 0, sizeof(p));
+ rc = 0;
+cleanup:
+ kfree(str_desc);
+ uclogic_params_cleanup(&p);
+ return rc;
+}
+
+/**
* uclogic_params_init() - initialize a tablet interface and discover its
* parameters.
*
@@ -1237,6 +1432,12 @@ int uclogic_params_init(struct uclogic_params *params,
uclogic_params_init_invalid(&p);
}
break;
+ case VID_PID(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L):
+ rc = uclogic_params_ugee_v2_init(&p, hdev);
+ if (rc != 0)
+ goto cleanup;
+ break;
case VID_PID(USB_VENDOR_ID_TRUST,
USB_DEVICE_ID_TRUST_PANORA_TABLET):
case VID_PID(USB_VENDOR_ID_UGEE,
diff --git a/drivers/hid/hid-uclogic-rdesc-test.c b/drivers/hid/hid-uclogic-rdesc-test.c
new file mode 100644
index 000000000000..ebebffef5f8a
--- /dev/null
+++ b/drivers/hid/hid-uclogic-rdesc-test.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * HID driver for UC-Logic devices not fully compliant with HID standard
+ *
+ * Copyright (c) 2022 José Expósito <jose.exposito89@gmail.com>
+ */
+
+#include <kunit/test.h>
+#include "./hid-uclogic-rdesc.h"
+
+struct uclogic_template_case {
+ const char *name;
+ const __u8 *template;
+ size_t template_size;
+ const s32 *param_list;
+ size_t param_num;
+ const __u8 *expected;
+};
+
+static const s32 params_pen_all[UCLOGIC_RDESC_PH_ID_NUM] = {
+ [UCLOGIC_RDESC_PEN_PH_ID_X_LM] = 0xAA,
+ [UCLOGIC_RDESC_PEN_PH_ID_X_PM] = 0xBB,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_LM] = 0xCC,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_PM] = 0xDD,
+ [UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM] = 0xEE,
+};
+
+static const s32 params_pen_some[] = {
+ [UCLOGIC_RDESC_PEN_PH_ID_X_LM] = 0xAA,
+ [UCLOGIC_RDESC_PEN_PH_ID_X_PM] = 0xBB,
+};
+
+static const s32 params_frame_all[UCLOGIC_RDESC_PH_ID_NUM] = {
+ [UCLOGIC_RDESC_FRAME_PH_ID_UM] = 0xFF,
+};
+
+static const __u8 template_empty[] = { };
+static const __u8 template_small[] = { 0x00 };
+static const __u8 template_no_ph[] = { 0xAA, 0xFE, 0xAA, 0xED, 0x1D };
+
+static const __u8 template_pen_ph_end[] = {
+ 0xAA, 0xBB, UCLOGIC_RDESC_PEN_PH_HEAD
+};
+
+static const __u8 template_btn_ph_end[] = {
+ 0xAA, 0xBB, UCLOGIC_RDESC_FRAME_PH_BTN_HEAD
+};
+
+static const __u8 template_pen_all_params[] = {
+ UCLOGIC_RDESC_PEN_PH(X_LM),
+ 0x47, UCLOGIC_RDESC_PEN_PH(X_PM),
+ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM),
+ UCLOGIC_RDESC_PEN_PH(Y_PM),
+ 0x00, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM),
+};
+
+static const __u8 expected_pen_all_params[] = {
+ 0xAA, 0x00, 0x00, 0x00,
+ 0x47, 0xBB, 0x00, 0x00, 0x00,
+ 0x27, 0xCC, 0x00, 0x00, 0x00,
+ 0xDD, 0x00, 0x00, 0x00,
+ 0x00, 0xEE, 0x00, 0x00, 0x00,
+};
+
+static const __u8 template_frame_all_params[] = {
+ 0x01, 0x02,
+ UCLOGIC_RDESC_FRAME_PH_BTN,
+ 0x99,
+};
+
+static const __u8 expected_frame_all_params[] = {
+ 0x01, 0x02,
+ 0x2A, 0xFF, 0x00,
+ 0x99,
+};
+
+static const __u8 template_pen_some_params[] = {
+ 0x01, 0x02,
+ UCLOGIC_RDESC_PEN_PH(X_LM),
+ 0x03, UCLOGIC_RDESC_PEN_PH(X_PM),
+ 0x04, 0x05,
+};
+
+static const __u8 expected_pen_some_params[] = {
+ 0x01, 0x02,
+ 0xAA, 0x00, 0x00, 0x00,
+ 0x03, 0xBB, 0x00, 0x00, 0x00,
+ 0x04, 0x05,
+};
+
+static const __u8 template_params_none[] = {
+ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM),
+ UCLOGIC_RDESC_PEN_PH(Y_PM),
+ 0x00, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM),
+};
+
+static struct uclogic_template_case uclogic_template_cases[] = {
+ {
+ .name = "Empty template",
+ .template = template_empty,
+ .template_size = sizeof(template_empty),
+ .param_list = params_pen_all,
+ .param_num = ARRAY_SIZE(params_pen_all),
+ .expected = template_empty,
+ },
+ {
+ .name = "Template smaller than the placeholder",
+ .template = template_small,
+ .template_size = sizeof(template_small),
+ .param_list = params_pen_all,
+ .param_num = ARRAY_SIZE(params_pen_all),
+ .expected = template_small,
+ },
+ {
+ .name = "No placeholder",
+ .template = template_no_ph,
+ .template_size = sizeof(template_no_ph),
+ .param_list = params_pen_all,
+ .param_num = ARRAY_SIZE(params_pen_all),
+ .expected = template_no_ph,
+ },
+ {
+ .name = "Pen placeholder at the end, without ID",
+ .template = template_pen_ph_end,
+ .template_size = sizeof(template_pen_ph_end),
+ .param_list = params_pen_all,
+ .param_num = ARRAY_SIZE(params_pen_all),
+ .expected = template_pen_ph_end,
+ },
+ {
+ .name = "Frame button placeholder at the end, without ID",
+ .template = template_btn_ph_end,
+ .template_size = sizeof(template_btn_ph_end),
+ .param_list = params_frame_all,
+ .param_num = ARRAY_SIZE(params_frame_all),
+ .expected = template_btn_ph_end,
+ },
+ {
+ .name = "All params present in the pen template",
+ .template = template_pen_all_params,
+ .template_size = sizeof(template_pen_all_params),
+ .param_list = params_pen_all,
+ .param_num = ARRAY_SIZE(params_pen_all),
+ .expected = expected_pen_all_params,
+ },
+ {
+ .name = "All params present in the frame template",
+ .template = template_frame_all_params,
+ .template_size = sizeof(template_frame_all_params),
+ .param_list = params_frame_all,
+ .param_num = ARRAY_SIZE(params_frame_all),
+ .expected = expected_frame_all_params,
+ },
+ {
+ .name = "Some params present in the pen template (complete param list)",
+ .template = template_pen_some_params,
+ .template_size = sizeof(template_pen_some_params),
+ .param_list = params_pen_all,
+ .param_num = ARRAY_SIZE(params_pen_all),
+ .expected = expected_pen_some_params,
+ },
+ {
+ .name = "Some params present in the pen template (incomplete param list)",
+ .template = template_pen_some_params,
+ .template_size = sizeof(template_pen_some_params),
+ .param_list = params_pen_some,
+ .param_num = ARRAY_SIZE(params_pen_some),
+ .expected = expected_pen_some_params,
+ },
+ {
+ .name = "No params present in the template",
+ .template = template_params_none,
+ .template_size = sizeof(template_params_none),
+ .param_list = params_pen_some,
+ .param_num = ARRAY_SIZE(params_pen_some),
+ .expected = template_params_none,
+ },
+};
+
+static void uclogic_template_case_desc(struct uclogic_template_case *t,
+ char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(uclogic_template, uclogic_template_cases,
+ uclogic_template_case_desc);
+
+static void uclogic_template_test(struct kunit *test)
+{
+ __u8 *res;
+ const struct uclogic_template_case *params = test->param_value;
+
+ res = uclogic_rdesc_template_apply(params->template,
+ params->template_size,
+ params->param_list,
+ params->param_num);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
+ KUNIT_EXPECT_EQ(test, 0,
+ memcmp(res, params->expected, params->template_size));
+ kfree(res);
+}
+
+static struct kunit_case hid_uclogic_rdesc_test_cases[] = {
+ KUNIT_CASE_PARAM(uclogic_template_test, uclogic_template_gen_params),
+ {}
+};
+
+static struct kunit_suite hid_uclogic_rdesc_test_suite = {
+ .name = "hid-uclogic-rdesc-test",
+ .test_cases = hid_uclogic_rdesc_test_cases,
+};
+
+kunit_test_suite(hid_uclogic_rdesc_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for the UC-Logic driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
index 13f9ce73f1b1..3d68e8b0784d 100644
--- a/drivers/hid/hid-uclogic-rdesc.c
+++ b/drivers/hid/hid-uclogic-rdesc.c
@@ -859,6 +859,108 @@ const __u8 uclogic_rdesc_v2_frame_dial_arr[] = {
const size_t uclogic_rdesc_v2_frame_dial_size =
sizeof(uclogic_rdesc_v2_frame_dial_arr);
+/* Fixed report descriptor template for UGEE v2 pen reports */
+const __u8 uclogic_rdesc_ugee_v2_pen_template_arr[] = {
+ 0x05, 0x0d, /* Usage Page (Digitizers), */
+ 0x09, 0x01, /* Usage (Digitizer), */
+ 0xa1, 0x01, /* Collection (Application), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x09, 0x20, /* Usage (Stylus), */
+ 0xa1, 0x00, /* Collection (Physical), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x09, 0x44, /* Usage (Barrel Switch), */
+ 0x09, 0x46, /* Usage (Tablet Pick), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x09, 0x32, /* Usage (In Range), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x35, 0x00, /* Physical Minimum (0), */
+ 0xa4, /* Push, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x65, 0x13, /* Unit (Inch), */
+ 0x55, 0x0d, /* Unit Exponent (-3), */
+ 0x27, UCLOGIC_RDESC_PEN_PH(X_LM),
+ /* Logical Maximum (PLACEHOLDER), */
+ 0x47, UCLOGIC_RDESC_PEN_PH(X_PM),
+ /* Physical Maximum (PLACEHOLDER), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM),
+ /* Logical Maximum (PLACEHOLDER), */
+ 0x47, UCLOGIC_RDESC_PEN_PH(Y_PM),
+ /* Physical Maximum (PLACEHOLDER), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xb4, /* Pop, */
+ 0x09, 0x30, /* Usage (Tip Pressure), */
+ 0x45, 0x00, /* Physical Maximum (0), */
+ 0x27, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM),
+ /* Logical Maximum (PLACEHOLDER), */
+ 0x75, 0x0D, /* Report Size (13), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x09, 0x3d, /* Usage (X Tilt), */
+ 0x35, 0xC3, /* Physical Minimum (-61), */
+ 0x45, 0x3C, /* Physical Maximum (60), */
+ 0x15, 0xC3, /* Logical Minimum (-61), */
+ 0x25, 0x3C, /* Logical Maximum (60), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x3e, /* Usage (Y Tilt), */
+ 0x35, 0xC3, /* Physical Minimum (-61), */
+ 0x45, 0x3C, /* Physical Maximum (60), */
+ 0x15, 0xC3, /* Logical Minimum (-61), */
+ 0x25, 0x3C, /* Logical Maximum (60), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xc0, /* End Collection, */
+ 0xc0, /* End Collection */
+};
+const size_t uclogic_rdesc_ugee_v2_pen_template_size =
+ sizeof(uclogic_rdesc_ugee_v2_pen_template_arr);
+
+/* Fixed report descriptor template for UGEE v2 frame reports (buttons only) */
+const __u8 uclogic_rdesc_ugee_v2_frame_btn_template_arr[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x07, /* Usage (Keypad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, UCLOGIC_RDESC_V1_FRAME_ID,
+ /* Report ID, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x39, /* Usage (Tablet Function Keys), */
+ 0xA0, /* Collection (Physical), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x08, /* Report Count (8), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ UCLOGIC_RDESC_FRAME_PH_BTN,
+ /* Usage Maximum (PLACEHOLDER), */
+ 0x95, 0x0A, /* Report Count (10), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x46, /* Report Count (70), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+};
+const size_t uclogic_rdesc_ugee_v2_frame_btn_template_size =
+ sizeof(uclogic_rdesc_ugee_v2_frame_btn_template_arr);
+
/* Fixed report descriptor for Ugee EX07 frame */
const __u8 uclogic_rdesc_ugee_ex07_frame_arr[] = {
0x05, 0x01, /* Usage Page (Desktop), */
@@ -979,7 +1081,7 @@ const size_t uclogic_rdesc_xppen_deco01_frame_size =
* uclogic_rdesc_template_apply() - apply report descriptor parameters to a
* report descriptor template, creating a report descriptor. Copies the
* template over to the new report descriptor and replaces every occurrence of
- * UCLOGIC_RDESC_PH_HEAD, followed by an index byte, with the value from the
+ * the template placeholders, followed by an index byte, with the value from the
* parameter list at that index.
*
* @template_ptr: Pointer to the template buffer.
@@ -996,7 +1098,8 @@ __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr,
const s32 *param_list,
size_t param_num)
{
- static const __u8 head[] = {UCLOGIC_RDESC_PH_HEAD};
+ static const __u8 btn_head[] = {UCLOGIC_RDESC_FRAME_PH_BTN_HEAD};
+ static const __u8 pen_head[] = {UCLOGIC_RDESC_PEN_PH_HEAD};
__u8 *rdesc_ptr;
__u8 *p;
s32 v;
@@ -1005,12 +1108,19 @@ __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr,
if (rdesc_ptr == NULL)
return NULL;
- for (p = rdesc_ptr; p + sizeof(head) < rdesc_ptr + template_size;) {
- if (memcmp(p, head, sizeof(head)) == 0 &&
- p[sizeof(head)] < param_num) {
- v = param_list[p[sizeof(head)]];
+ for (p = rdesc_ptr; p + sizeof(btn_head) < rdesc_ptr + template_size;) {
+ if (p + sizeof(pen_head) < rdesc_ptr + template_size &&
+ memcmp(p, pen_head, sizeof(pen_head)) == 0 &&
+ p[sizeof(pen_head)] < param_num) {
+ v = param_list[p[sizeof(pen_head)]];
put_unaligned(cpu_to_le32(v), (s32 *)p);
- p += sizeof(head) + 1;
+ p += sizeof(pen_head) + 1;
+ } else if (memcmp(p, btn_head, sizeof(btn_head)) == 0 &&
+ p[sizeof(btn_head)] < param_num) {
+ v = param_list[p[sizeof(btn_head)]];
+ put_unaligned((__u8)0x2A, p); /* Usage Maximum */
+ put_unaligned_le16((__force u16)cpu_to_le16(v), p + 1);
+ p += sizeof(btn_head) + 1;
} else {
p++;
}
diff --git a/drivers/hid/hid-uclogic-rdesc.h b/drivers/hid/hid-uclogic-rdesc.h
index 0c6e95e8bde7..86e64a9ee6bd 100644
--- a/drivers/hid/hid-uclogic-rdesc.h
+++ b/drivers/hid/hid-uclogic-rdesc.h
@@ -81,7 +81,8 @@ extern __u8 uclogic_rdesc_twha60_fixed1_arr[];
extern const size_t uclogic_rdesc_twha60_fixed1_size;
/* Report descriptor template placeholder head */
-#define UCLOGIC_RDESC_PH_HEAD 0xFE, 0xED, 0x1D
+#define UCLOGIC_RDESC_PEN_PH_HEAD 0xFE, 0xED, 0x1D
+#define UCLOGIC_RDESC_FRAME_PH_BTN_HEAD 0xFE, 0xED
/* Apply report descriptor parameters to a report descriptor template */
extern __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr,
@@ -89,19 +90,24 @@ extern __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr,
const s32 *param_list,
size_t param_num);
-/* Pen report descriptor template placeholder IDs */
-enum uclogic_rdesc_pen_ph_id {
+/* Report descriptor template placeholder IDs */
+enum uclogic_rdesc_ph_id {
UCLOGIC_RDESC_PEN_PH_ID_X_LM,
UCLOGIC_RDESC_PEN_PH_ID_X_PM,
UCLOGIC_RDESC_PEN_PH_ID_Y_LM,
UCLOGIC_RDESC_PEN_PH_ID_Y_PM,
UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM,
- UCLOGIC_RDESC_PEN_PH_ID_NUM
+ UCLOGIC_RDESC_FRAME_PH_ID_UM,
+ UCLOGIC_RDESC_PH_ID_NUM
};
/* Report descriptor pen template placeholder */
#define UCLOGIC_RDESC_PEN_PH(_ID) \
- UCLOGIC_RDESC_PH_HEAD, UCLOGIC_RDESC_PEN_PH_ID_##_ID
+ UCLOGIC_RDESC_PEN_PH_HEAD, UCLOGIC_RDESC_PEN_PH_ID_##_ID
+
+/* Report descriptor frame buttons template placeholder */
+#define UCLOGIC_RDESC_FRAME_PH_BTN \
+ UCLOGIC_RDESC_FRAME_PH_BTN_HEAD, UCLOGIC_RDESC_FRAME_PH_ID_UM
/* Report ID for v1 pen reports */
#define UCLOGIC_RDESC_V1_PEN_ID 0x07
@@ -155,6 +161,14 @@ extern const size_t uclogic_rdesc_v2_frame_dial_size;
/* Device ID byte offset in v2 frame dial reports */
#define UCLOGIC_RDESC_V2_FRAME_DIAL_DEV_ID_BYTE 0x4
+/* Fixed report descriptor template for UGEE v2 pen reports */
+extern const __u8 uclogic_rdesc_ugee_v2_pen_template_arr[];
+extern const size_t uclogic_rdesc_ugee_v2_pen_template_size;
+
+/* Fixed report descriptor template for UGEE v2 frame reports (buttons only) */
+extern const __u8 uclogic_rdesc_ugee_v2_frame_btn_template_arr[];
+extern const size_t uclogic_rdesc_ugee_v2_frame_btn_template_size;
+
/* Fixed report descriptor for Ugee EX07 frame */
extern const __u8 uclogic_rdesc_ugee_ex07_frame_arr[];
extern const size_t uclogic_rdesc_ugee_ex07_frame_size;
diff --git a/drivers/hid/i2c-hid/Kconfig b/drivers/hid/i2c-hid/Kconfig
index a16c6a69680b..5273ee2bb134 100644
--- a/drivers/hid/i2c-hid/Kconfig
+++ b/drivers/hid/i2c-hid/Kconfig
@@ -32,6 +32,21 @@ config I2C_HID_OF
will be called i2c-hid-of. It will also build/depend on the
module i2c-hid.
+config I2C_HID_OF_ELAN
+ tristate "Driver for Elan hid-i2c based devices on OF systems"
+ default n
+ depends on I2C && INPUT && OF
+ help
+ Say Y here if you want support for Elan i2c devices that use
+ the i2c-hid protocol on Open Firmware (Device Tree)-based
+ systems.
+
+ If unsure, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-hid-of-elan. It will also build/depend on
+ the module i2c-hid.
+
config I2C_HID_OF_GOODIX
tristate "Driver for Goodix hid-i2c based devices on OF systems"
default n
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
index 302545a771f3..55bd5e0f35af 100644
--- a/drivers/hid/i2c-hid/Makefile
+++ b/drivers/hid/i2c-hid/Makefile
@@ -10,4 +10,5 @@ i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o
obj-$(CONFIG_I2C_HID_ACPI) += i2c-hid-acpi.o
obj-$(CONFIG_I2C_HID_OF) += i2c-hid-of.o
+obj-$(CONFIG_I2C_HID_OF_ELAN) += i2c-hid-of-elan.o
obj-$(CONFIG_I2C_HID_OF_GOODIX) += i2c-hid-of-goodix.o
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
new file mode 100644
index 000000000000..2d991325e734
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Elan touchscreens that use the i2c-hid protocol.
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include "i2c-hid.h"
+
+struct elan_i2c_hid_chip_data {
+ unsigned int post_gpio_reset_delay_ms;
+ unsigned int post_power_delay_ms;
+ u16 hid_descriptor_address;
+};
+
+struct i2c_hid_of_elan {
+ struct i2chid_ops ops;
+
+ struct regulator *vcc33;
+ struct regulator *vccio;
+ struct gpio_desc *reset_gpio;
+ const struct elan_i2c_hid_chip_data *chip_data;
+};
+
+static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of_elan *ihid_elan =
+ container_of(ops, struct i2c_hid_of_elan, ops);
+ int ret;
+
+ ret = regulator_enable(ihid_elan->vcc33);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(ihid_elan->vccio);
+ if (ret) {
+ regulator_disable(ihid_elan->vcc33);
+ return ret;
+ }
+
+ if (ihid_elan->chip_data->post_power_delay_ms)
+ msleep(ihid_elan->chip_data->post_power_delay_ms);
+
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
+ if (ihid_elan->chip_data->post_gpio_reset_delay_ms)
+ msleep(ihid_elan->chip_data->post_gpio_reset_delay_ms);
+
+ return 0;
+}
+
+static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of_elan *ihid_elan =
+ container_of(ops, struct i2c_hid_of_elan, ops);
+
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+ regulator_disable(ihid_elan->vccio);
+ regulator_disable(ihid_elan->vcc33);
+}
+
+static int i2c_hid_of_elan_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_hid_of_elan *ihid_elan;
+
+ ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
+ if (!ihid_elan)
+ return -ENOMEM;
+
+ ihid_elan->ops.power_up = elan_i2c_hid_power_up;
+ ihid_elan->ops.power_down = elan_i2c_hid_power_down;
+
+ /* Start out with reset asserted */
+ ihid_elan->reset_gpio =
+ devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ihid_elan->reset_gpio))
+ return PTR_ERR(ihid_elan->reset_gpio);
+
+ ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio");
+ if (IS_ERR(ihid_elan->vccio))
+ return PTR_ERR(ihid_elan->vccio);
+
+ ihid_elan->vcc33 = devm_regulator_get(&client->dev, "vcc33");
+ if (IS_ERR(ihid_elan->vcc33))
+ return PTR_ERR(ihid_elan->vcc33);
+
+ ihid_elan->chip_data = device_get_match_data(&client->dev);
+
+ return i2c_hid_core_probe(client, &ihid_elan->ops,
+ ihid_elan->chip_data->hid_descriptor_address, 0);
+}
+
+static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
+ .post_power_delay_ms = 1,
+ .post_gpio_reset_delay_ms = 300,
+ .hid_descriptor_address = 0x0001,
+};
+
+static const struct of_device_id elan_i2c_hid_of_match[] = {
+ { .compatible = "elan,ekth6915", .data = &elan_ekth6915_chip_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, elan_i2c_hid_of_match);
+
+static struct i2c_driver elan_i2c_hid_ts_driver = {
+ .driver = {
+ .name = "i2c_hid_of_elan",
+ .pm = &i2c_hid_core_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = of_match_ptr(elan_i2c_hid_of_match),
+ },
+ .probe = i2c_hid_of_elan_probe,
+ .remove = i2c_hid_core_remove,
+ .shutdown = i2c_hid_core_shutdown,
+};
+module_i2c_driver(elan_i2c_hid_ts_driver);
+
+MODULE_AUTHOR("Douglas Anderson <dianders@chromium.org>");
+MODULE_DESCRIPTION("Elan i2c-hid touchscreen driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 8ccb246b0114..15e14239af82 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -578,7 +578,7 @@ static void _ish_sync_fw_clock(struct ishtp_device *dev)
static unsigned long prev_sync;
uint64_t usec;
- if (prev_sync && jiffies - prev_sync < 20 * HZ)
+ if (prev_sync && time_before(jiffies, prev_sync + 20 * HZ))
return;
prev_sync = jiffies;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 4338c9b68a43..e3d70c5460e9 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -328,7 +328,7 @@ do_get_report:
/**
* ish_cl_event_cb() - bus driver callback for incoming message/packet
- * @device: Pointer to the the ishtp client device for which this message
+ * @device: Pointer to the ishtp client device for which this message
* is targeted
*
* Remove the packet from the list and process the message by calling
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
index e46330b2e561..87637f813de2 100644
--- a/drivers/hid/surface-hid/surface_hid_core.c
+++ b/drivers/hid/surface-hid/surface_hid_core.c
@@ -19,12 +19,30 @@
#include "surface_hid_core.h"
+/* -- Utility functions. ---------------------------------------------------- */
+
+static bool surface_hid_is_hot_removed(struct surface_hid_device *shid)
+{
+ /*
+ * Non-ssam client devices, i.e. platform client devices, cannot be
+ * hot-removed.
+ */
+ if (!is_ssam_device(shid->dev))
+ return false;
+
+ return ssam_device_is_hot_removed(to_ssam_device(shid->dev));
+}
+
+
/* -- Device descriptor access. --------------------------------------------- */
static int surface_hid_load_hid_descriptor(struct surface_hid_device *shid)
{
int status;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_HID,
(u8 *)&shid->hid_desc, sizeof(shid->hid_desc));
if (status)
@@ -61,6 +79,9 @@ static int surface_hid_load_device_attributes(struct surface_hid_device *shid)
{
int status;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_ATTRS,
(u8 *)&shid->attrs, sizeof(shid->attrs));
if (status)
@@ -88,9 +109,18 @@ static int surface_hid_start(struct hid_device *hid)
static void surface_hid_stop(struct hid_device *hid)
{
struct surface_hid_device *shid = hid->driver_data;
+ bool hot_removed;
+
+ /*
+ * Communication may fail for devices that have been hot-removed. This
+ * also includes unregistration of HID events, so we need to check this
+ * here. Only if the device has not been marked as hot-removed, we can
+ * safely disable events.
+ */
+ hot_removed = surface_hid_is_hot_removed(shid);
/* Note: This call will log errors for us, so ignore them here. */
- ssam_notifier_unregister(shid->ctrl, &shid->notif);
+ __ssam_notifier_unregister(shid->ctrl, &shid->notif, !hot_removed);
}
static int surface_hid_open(struct hid_device *hid)
@@ -109,6 +139,9 @@ static int surface_hid_parse(struct hid_device *hid)
u8 *buf;
int status;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -126,6 +159,9 @@ static int surface_hid_raw_request(struct hid_device *hid, unsigned char reportn
{
struct surface_hid_device *shid = hid->driver_data;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT)
return shid->ops.output_report(shid, reportnum, buf, len);
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 203d27d198b8..3f8b24a57014 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -91,6 +91,7 @@
#include <linux/leds.h>
#include <linux/usb/input.h>
#include <linux/power_supply.h>
+#include <linux/timer.h>
#include <asm/unaligned.h>
/*
@@ -167,6 +168,7 @@ struct wacom {
struct delayed_work init_work;
struct wacom_remote *remote;
struct work_struct mode_change_work;
+ struct timer_list idleprox_timer;
bool generic_has_leds;
struct wacom_leds {
struct wacom_group_leds *groups;
@@ -239,4 +241,5 @@ struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group,
struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur);
int wacom_equivalent_usage(int usage);
int wacom_initialize_leds(struct wacom *wacom);
+void wacom_idleprox_timeout(struct timer_list *list);
#endif
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 620fe74f5676..194a2e327591 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2121,7 +2121,7 @@ static int wacom_register_inputs(struct wacom *wacom)
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
if (error) {
- /* no pad in use on this interface */
+ /* no pad events using this interface */
input_free_device(pad_input_dev);
wacom_wac->pad_input = NULL;
pad_input_dev = NULL;
@@ -2781,6 +2781,7 @@ static int wacom_probe(struct hid_device *hdev,
INIT_WORK(&wacom->battery_work, wacom_battery_work);
INIT_WORK(&wacom->remote_work, wacom_remote_work);
INIT_WORK(&wacom->mode_change_work, wacom_mode_change_work);
+ timer_setup(&wacom->idleprox_timer, &wacom_idleprox_timeout, TIMER_DEFERRABLE);
/* ask for the report descriptor to be loaded by HID */
error = hid_parse(hdev);
@@ -2821,6 +2822,7 @@ static void wacom_remove(struct hid_device *hdev)
cancel_work_sync(&wacom->battery_work);
cancel_work_sync(&wacom->remote_work);
cancel_work_sync(&wacom->mode_change_work);
+ del_timer_sync(&wacom->idleprox_timer);
if (hdev->bus == BUS_BLUETOOTH)
device_remove_file(&hdev->dev, &dev_attr_speed);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 9470c2b0b529..d049239256a2 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -11,6 +11,7 @@
#include "wacom_wac.h"
#include "wacom.h"
#include <linux/input/mt.h>
+#include <linux/jiffies.h>
/* resolution for penabled devices */
#define WACOM_PL_RES 20
@@ -41,6 +42,43 @@ static int wacom_numbered_button_to_key(int n);
static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
int group);
+
+static void wacom_force_proxout(struct wacom_wac *wacom_wac)
+{
+ struct input_dev *input = wacom_wac->pen_input;
+
+ wacom_wac->shared->stylus_in_proximity = 0;
+
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_key(input, BTN_STYLUS, 0);
+ input_report_key(input, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_STYLUS3, 0);
+ input_report_key(input, wacom_wac->tool[0], 0);
+ if (wacom_wac->serial[0]) {
+ input_report_abs(input, ABS_MISC, 0);
+ }
+ input_report_abs(input, ABS_PRESSURE, 0);
+
+ wacom_wac->tool[0] = 0;
+ wacom_wac->id[0] = 0;
+ wacom_wac->serial[0] = 0;
+
+ input_sync(input);
+}
+
+void wacom_idleprox_timeout(struct timer_list *list)
+{
+ struct wacom *wacom = from_timer(wacom, list, idleprox_timer);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ if (!wacom_wac->hid_data.sense_state) {
+ return;
+ }
+
+ hid_warn(wacom->hdev, "%s: tool appears to be hung in-prox. forcing it out.\n", __func__);
+ wacom_force_proxout(wacom_wac);
+}
+
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@@ -638,9 +676,26 @@ static int wacom_intuos_id_mangle(int tool_id)
return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF);
}
+static bool wacom_is_art_pen(int tool_id)
+{
+ bool is_art_pen = false;
+
+ switch (tool_id) {
+ case 0x885: /* Intuos3 Marker Pen */
+ case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
+ case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ is_art_pen = true;
+ break;
+ }
+ return is_art_pen;
+}
+
static int wacom_intuos_get_tool_type(int tool_id)
{
- int tool_type;
+ int tool_type = BTN_TOOL_PEN;
+
+ if (wacom_is_art_pen(tool_id))
+ return tool_type;
switch (tool_id) {
case 0x812: /* Inking pen */
@@ -655,12 +710,9 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x852:
case 0x823: /* Intuos3 Grip Pen */
case 0x813: /* Intuos3 Classic Pen */
- case 0x885: /* Intuos3 Marker Pen */
case 0x802: /* Intuos4/5 13HD/24HD General Pen */
- case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
- case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
case 0x10842: /* MobileStudio Pro Pro Pen slim */
case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
case 0x16802: /* Cintiq 13HD Pro Pen */
@@ -718,10 +770,6 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
tool_type = BTN_TOOL_AIRBRUSH;
break;
-
- default: /* Unknown tool */
- tool_type = BTN_TOOL_PEN;
- break;
}
return tool_type;
}
@@ -2009,7 +2057,6 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
wacom_wac->has_mute_touch_switch = true;
usage->type = EV_SW;
usage->code = SW_MUTE_DEVICE;
- features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHSTRIP:
wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0);
@@ -2089,6 +2136,30 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
wacom_wac->hid_data.inrange_state |= value;
}
+ /* Process touch switch state first since it is reported through touch interface,
+ * which is indepentent of pad interface. In the case when there are no other pad
+ * events, the pad interface will not even be created.
+ */
+ if ((equivalent_usage == WACOM_HID_WD_MUTE_DEVICE) ||
+ (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)) {
+ if (wacom_wac->shared->touch_input) {
+ bool *is_touch_on = &wacom_wac->shared->is_touch_on;
+
+ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
+ *is_touch_on = !(*is_touch_on);
+ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
+ *is_touch_on = value;
+
+ input_report_switch(wacom_wac->shared->touch_input,
+ SW_MUTE_DEVICE, !(*is_touch_on));
+ input_sync(wacom_wac->shared->touch_input);
+ }
+ return;
+ }
+
+ if (!input)
+ return;
+
switch (equivalent_usage) {
case WACOM_HID_WD_TOUCHRING:
/*
@@ -2124,22 +2195,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
input_event(input, usage->type, usage->code, 0);
break;
- case WACOM_HID_WD_MUTE_DEVICE:
- case WACOM_HID_WD_TOUCHONOFF:
- if (wacom_wac->shared->touch_input) {
- bool *is_touch_on = &wacom_wac->shared->is_touch_on;
-
- if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
- *is_touch_on = !(*is_touch_on);
- else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
- *is_touch_on = value;
-
- input_report_switch(wacom_wac->shared->touch_input,
- SW_MUTE_DEVICE, !(*is_touch_on));
- input_sync(wacom_wac->shared->touch_input);
- }
- break;
-
case WACOM_HID_WD_MODE_CHANGE:
if (wacom_wac->is_direct_mode != value) {
wacom_wac->is_direct_mode = value;
@@ -2312,6 +2367,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
value = field->logical_maximum - value;
break;
case HID_DG_INRANGE:
+ mod_timer(&wacom->idleprox_timer, jiffies + msecs_to_jiffies(100));
wacom_wac->hid_data.inrange_state = value;
if (!(features->quirks & WACOM_QUIRK_SENSE))
wacom_wac->hid_data.sense_state = value;
@@ -2336,6 +2392,9 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
}
return;
case HID_DG_TWIST:
+ /* don't modify the value if the pen doesn't support the feature */
+ if (!wacom_is_art_pen(wacom_wac->id[0])) return;
+
/*
* Userspace expects pen twist to have its zero point when
* the buttons/finger is on the tablet's left. HID values
@@ -2822,7 +2881,7 @@ void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
/* usage tests must precede field tests */
if (WACOM_BATTERY_USAGE(usage))
wacom_wac_battery_event(hdev, field, usage, value);
- else if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+ else if (WACOM_PAD_FIELD(field))
wacom_wac_pad_event(hdev, field, usage, value);
else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_event(hdev, field, usage, value);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 6218bbf6863a..eca7afd366d6 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -171,6 +171,14 @@ int vmbus_connect(void)
goto cleanup;
}
+ vmbus_connection.rescind_work_queue =
+ create_workqueue("hv_vmbus_rescind");
+ if (!vmbus_connection.rescind_work_queue) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ vmbus_connection.ignore_any_offer_msg = false;
+
vmbus_connection.handle_primary_chan_wq =
create_workqueue("hv_pri_chan");
if (!vmbus_connection.handle_primary_chan_wq) {
@@ -357,6 +365,9 @@ void vmbus_disconnect(void)
if (vmbus_connection.handle_primary_chan_wq)
destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
+ if (vmbus_connection.rescind_work_queue)
+ destroy_workqueue(vmbus_connection.rescind_work_queue);
+
if (vmbus_connection.work_queue)
destroy_workqueue(vmbus_connection.work_queue);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 91e8a72eee14..fdf6decacf06 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mman.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -248,7 +249,7 @@ struct dm_capabilities_resp_msg {
* num_committed: Committed memory in pages.
* page_file_size: The accumulated size of all page files
* in the system in pages.
- * zero_free: The nunber of zero and free pages.
+ * zero_free: The number of zero and free pages.
* page_file_writes: The writes to the page file in pages.
* io_diff: An indicator of file cache efficiency or page file activity,
* calculated as File Cache Page Fault Count - Page Read Count.
@@ -567,6 +568,11 @@ struct hv_dynmem_device {
__u32 version;
struct page_reporting_dev_info pr_dev_info;
+
+ /*
+ * Maximum number of pages that can be hot_add-ed
+ */
+ __u64 max_dynamic_page_count;
};
static struct hv_dynmem_device dm_device;
@@ -1078,6 +1084,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
pr_info("Max. dynamic memory size: %llu MB\n",
(*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
+ dm->max_dynamic_page_count = *max_page_count;
}
break;
@@ -1117,6 +1124,19 @@ static unsigned long compute_balloon_floor(void)
}
/*
+ * Compute total committed memory pages
+ */
+
+static unsigned long get_pages_committed(struct hv_dynmem_device *dm)
+{
+ return vm_memory_committed() +
+ dm->num_pages_ballooned +
+ (dm->num_pages_added > dm->num_pages_onlined ?
+ dm->num_pages_added - dm->num_pages_onlined : 0) +
+ compute_balloon_floor();
+}
+
+/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
* periodically at 1 second intervals.
@@ -1157,11 +1177,7 @@ static void post_status(struct hv_dynmem_device *dm)
* asking us to balloon them out.
*/
num_pages_avail = si_mem_available();
- num_pages_committed = vm_memory_committed() +
- dm->num_pages_ballooned +
- (dm->num_pages_added > dm->num_pages_onlined ?
- dm->num_pages_added - dm->num_pages_onlined : 0) +
- compute_balloon_floor();
+ num_pages_committed = get_pages_committed(dm);
trace_balloon_status(num_pages_avail, num_pages_committed,
vm_memory_committed(), dm->num_pages_ballooned,
@@ -1807,6 +1823,109 @@ out:
return ret;
}
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * hv_balloon_debug_show - shows statistics of balloon operations.
+ * @f: pointer to the &struct seq_file.
+ * @offset: ignored.
+ *
+ * Provides the statistics that can be accessed in hv-balloon in the debugfs.
+ *
+ * Return: zero on success or an error code.
+ */
+static int hv_balloon_debug_show(struct seq_file *f, void *offset)
+{
+ struct hv_dynmem_device *dm = f->private;
+ char *sname;
+
+ seq_printf(f, "%-22s: %u.%u\n", "host_version",
+ DYNMEM_MAJOR_VERSION(dm->version),
+ DYNMEM_MINOR_VERSION(dm->version));
+
+ seq_printf(f, "%-22s:", "capabilities");
+ if (ballooning_enabled())
+ seq_puts(f, " enabled");
+
+ if (hot_add_enabled())
+ seq_puts(f, " hot_add");
+
+ seq_puts(f, "\n");
+
+ seq_printf(f, "%-22s: %u", "state", dm->state);
+ switch (dm->state) {
+ case DM_INITIALIZING:
+ sname = "Initializing";
+ break;
+ case DM_INITIALIZED:
+ sname = "Initialized";
+ break;
+ case DM_BALLOON_UP:
+ sname = "Balloon Up";
+ break;
+ case DM_BALLOON_DOWN:
+ sname = "Balloon Down";
+ break;
+ case DM_HOT_ADD:
+ sname = "Hot Add";
+ break;
+ case DM_INIT_ERROR:
+ sname = "Error";
+ break;
+ default:
+ sname = "Unknown";
+ }
+ seq_printf(f, " (%s)\n", sname);
+
+ /* HV Page Size */
+ seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE);
+
+ /* Pages added with hot_add */
+ seq_printf(f, "%-22s: %u\n", "pages_added", dm->num_pages_added);
+
+ /* pages that are "onlined"/used from pages_added */
+ seq_printf(f, "%-22s: %u\n", "pages_onlined", dm->num_pages_onlined);
+
+ /* pages we have given back to host */
+ seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
+
+ seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
+ get_pages_committed(dm));
+
+ seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
+ dm->max_dynamic_page_count);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
+
+static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+ debugfs_create_file("hv-balloon", 0444, NULL, b,
+ &hv_balloon_debug_fops);
+}
+
+static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+ debugfs_remove(debugfs_lookup("hv-balloon", NULL));
+}
+
+#else
+
+static inline void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+}
+
+static inline void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
static int balloon_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1854,6 +1973,8 @@ static int balloon_probe(struct hv_device *dev,
goto probe_error;
}
+ hv_balloon_debugfs_init(&dm_device);
+
return 0;
probe_error:
@@ -1879,6 +2000,8 @@ static int balloon_remove(struct hv_device *dev)
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
+ hv_balloon_debugfs_exit(dm);
+
cancel_work_sync(&dm->balloon_wrk.wrk);
cancel_work_sync(&dm->ha_wrk.wrk);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 4f5b824b16cf..dc673edf053c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -261,6 +261,13 @@ struct vmbus_connection {
struct workqueue_struct *work_queue;
struct workqueue_struct *handle_primary_chan_wq;
struct workqueue_struct *handle_sub_chan_wq;
+ struct workqueue_struct *rescind_work_queue;
+
+ /*
+ * On suspension of the vmbus, the accumulated offer messages
+ * must be dropped.
+ */
+ bool ignore_any_offer_msg;
/*
* The number of sub-channels and hv_sock channels that should be
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 547ae334e5cd..23c680d1a0f5 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1160,7 +1160,9 @@ void vmbus_on_msg_dpc(unsigned long data)
* work queue: the RESCIND handler can not start to
* run before the OFFER handler finishes.
*/
- schedule_work(&ctx->work);
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
+ queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
break;
case CHANNELMSG_OFFERCHANNEL:
@@ -1186,6 +1188,8 @@ void vmbus_on_msg_dpc(unsigned long data)
* to the CPUs which will execute the offer & rescind
* works by the time these works will start execution.
*/
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
atomic_inc(&vmbus_connection.offer_in_progress);
fallthrough;
@@ -2446,15 +2450,20 @@ acpi_walk_err:
#ifdef CONFIG_PM_SLEEP
static int vmbus_bus_suspend(struct device *dev)
{
+ struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
+ hv_context.cpu_context, VMBUS_CONNECT_CPU);
struct vmbus_channel *channel, *sc;
- while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
- /*
- * We wait here until the completion of any channel
- * offers that are currently in progress.
- */
- usleep_range(1000, 2000);
- }
+ tasklet_disable(&hv_cpu->msg_dpc);
+ vmbus_connection.ignore_any_offer_msg = true;
+ /* The tasklet_enable() takes care of providing a memory barrier */
+ tasklet_enable(&hv_cpu->msg_dpc);
+
+ /* Drain all the workqueues as we are in suspend */
+ drain_workqueue(vmbus_connection.rescind_work_queue);
+ drain_workqueue(vmbus_connection.work_queue);
+ drain_workqueue(vmbus_connection.handle_primary_chan_wq);
+ drain_workqueue(vmbus_connection.handle_sub_chan_wq);
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
@@ -2531,6 +2540,8 @@ static int vmbus_bus_resume(struct device *dev)
size_t msgsize;
int ret;
+ vmbus_connection.ignore_any_offer_msg = false;
+
/*
* We only use the 'vmbus_proto_version', which was in use before
* hibernation, to re-negotiate with the host.
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 590d3d550acb..e70d9614bec2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -100,6 +100,7 @@ config SENSORS_AD7418
config SENSORS_ADM1021
tristate "Analog Devices ADM1021 and compatibles"
depends on I2C
+ depends on SENSORS_LM90=n
help
If you say yes here you get support for Analog Devices ADM1021
and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A,
@@ -256,13 +257,13 @@ config SENSORS_AHT10
will be called aht10.
config SENSORS_AQUACOMPUTER_D5NEXT
- tristate "Aquacomputer D5 Next, Octo, Farbwerk, and Farbwerk 360"
+ tristate "Aquacomputer D5 Next, Octo, Quadro, Farbwerk, and Farbwerk 360"
depends on USB_HID
select CRC16
help
If you say yes here you get support for sensors and fans of
- the Aquacomputer D5 Next watercooling pump, Octo fan
- controller, Farbwerk and Farbwerk 360 RGB controllers, where
+ the Aquacomputer D5 Next watercooling pump, Octo and Quadro fan
+ controllers, Farbwerk and Farbwerk 360 RGB controllers, where
available.
This driver can also be built as a module. If so, the module
@@ -381,7 +382,7 @@ config SENSORS_ARM_SCPI
config SENSORS_ASB100
tristate "Asus ASB100 Bach"
- depends on X86 && I2C
+ depends on (X86 || COMPILE_TEST) && I2C
select HWMON_VID
help
If you say yes here you get support for the ASB100 Bach sensor
@@ -626,7 +627,7 @@ config SENSORS_MC13783_ADC
config SENSORS_FSCHMD
tristate "Fujitsu Siemens Computers sensor chips"
- depends on X86 && I2C
+ depends on (X86 || COMPILE_TEST) && I2C
help
If you say yes here you get support for the following Fujitsu
Siemens Computers (FSC) sensor chips: Poseidon, Scylla, Hermes,
@@ -1102,6 +1103,7 @@ config SENSORS_MAX6639
config SENSORS_MAX6642
tristate "Maxim MAX6642 sensor chip"
depends on I2C
+ depends on SENSORS_LM90=n
help
If you say yes here you get support for MAX6642 sensor chip.
MAX6642 is a SMBus-Compatible Remote/Local Temperature Sensor
@@ -1357,12 +1359,15 @@ config SENSORS_LM90
tristate "National Semiconductor LM90 and compatibles"
depends on I2C
help
- If you say yes here you get support for National Semiconductor LM90,
- LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
- Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6654, MAX6657, MAX6658,
- MAX6659, MAX6680, MAX6681, MAX6692, MAX6695, MAX6696,
- ON Semiconductor NCT1008, Winbond/Nuvoton W83L771W/G/AWG/ASG,
- Philips SA56004, GMT G781, Texas Instruments TMP451 and TMP461
+ If you say yes here you get support for National Semiconductor LM84,
+ LM90, LM86, LM89 and LM99, Analog Devices ADM1020, ADM2021, ADM1021A,
+ ADM1023, ADM1032, ADT7461, ADT7461A, ADT7481, ADT7482, and ADT7483A,
+ Maxim MAX1617, MAX6642, MAX6646, MAX6647, MAX6648, MAX6649, MAX6654,
+ MAX6657, MAX6658, MAX6659, MAX6680, MAX6681, MAX6692, MAX6695,
+ MAX6696,
+ ON Semiconductor NCT1008, NCT210, NCT72, NCT214, NCT218,
+ Winbond/Nuvoton W83L771W/G/AWG/ASG,
+ Philips NE1618, SA56004, GMT G781, Texas Instruments TMP451 and TMP461
sensor chips.
This driver can also be built as a module. If so, the module
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index a0e69f7ece36..66430553cc45 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * hwmon driver for Aquacomputer devices (D5 Next, Farbwerk, Farbwerk 360, Octo)
+ * hwmon driver for Aquacomputer devices (D5 Next, Farbwerk, Farbwerk 360, Octo,
+ * Quadro)
*
* Aquacomputer devices send HID reports (with ID 0x01) every second to report
* sensor values.
@@ -21,17 +22,19 @@
#define USB_VENDOR_ID_AQUACOMPUTER 0x0c70
#define USB_PRODUCT_ID_FARBWERK 0xf00a
+#define USB_PRODUCT_ID_QUADRO 0xf00d
#define USB_PRODUCT_ID_D5NEXT 0xf00e
#define USB_PRODUCT_ID_FARBWERK360 0xf010
#define USB_PRODUCT_ID_OCTO 0xf011
-enum kinds { d5next, farbwerk, farbwerk360, octo };
+enum kinds { d5next, farbwerk, farbwerk360, octo, quadro };
static const char *const aqc_device_names[] = {
[d5next] = "d5next",
[farbwerk] = "farbwerk",
[farbwerk360] = "farbwerk360",
- [octo] = "octo"
+ [octo] = "octo",
+ [quadro] = "quadro"
};
#define DRIVER_NAME "aquacomputer_d5next"
@@ -54,60 +57,61 @@ static u8 secondary_ctrl_report[] = {
0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x34, 0xC6
};
-/* Register offsets for the D5 Next pump */
-#define D5NEXT_POWER_CYCLES 24
-
-#define D5NEXT_COOLANT_TEMP 87
-
-#define D5NEXT_PUMP_SPEED 116
-#define D5NEXT_FAN_SPEED 103
-
-#define D5NEXT_PUMP_POWER 114
-#define D5NEXT_FAN_POWER 101
-
-#define D5NEXT_PUMP_VOLTAGE 110
-#define D5NEXT_FAN_VOLTAGE 97
-#define D5NEXT_5V_VOLTAGE 57
+/* Register offsets for all Aquacomputer devices */
+#define AQC_TEMP_SENSOR_SIZE 0x02
+#define AQC_TEMP_SENSOR_DISCONNECTED 0x7FFF
+#define AQC_FAN_PERCENT_OFFSET 0x00
+#define AQC_FAN_VOLTAGE_OFFSET 0x02
+#define AQC_FAN_CURRENT_OFFSET 0x04
+#define AQC_FAN_POWER_OFFSET 0x06
+#define AQC_FAN_SPEED_OFFSET 0x08
-#define D5NEXT_PUMP_CURRENT 112
-#define D5NEXT_FAN_CURRENT 99
+/* Register offsets for the D5 Next pump */
+#define D5NEXT_POWER_CYCLES 0x18
+#define D5NEXT_COOLANT_TEMP 0x57
+#define D5NEXT_NUM_FANS 2
+#define D5NEXT_NUM_SENSORS 1
+#define D5NEXT_PUMP_OFFSET 0x6c
+#define D5NEXT_FAN_OFFSET 0x5f
+#define D5NEXT_5V_VOLTAGE 0x39
+#define D5NEXT_12V_VOLTAGE 0x37
+#define D5NEXT_CTRL_REPORT_SIZE 0x329
+static u8 d5next_sensor_fan_offsets[] = { D5NEXT_PUMP_OFFSET, D5NEXT_FAN_OFFSET };
+
+/* Pump and fan speed registers in D5 Next control report (from 0-100%) */
+static u16 d5next_ctrl_fan_offsets[] = { 0x97, 0x42 };
/* Register offsets for the Farbwerk RGB controller */
#define FARBWERK_NUM_SENSORS 4
#define FARBWERK_SENSOR_START 0x2f
-#define FARBWERK_SENSOR_SIZE 0x02
-#define FARBWERK_SENSOR_DISCONNECTED 0x7FFF
/* Register offsets for the Farbwerk 360 RGB controller */
#define FARBWERK360_NUM_SENSORS 4
#define FARBWERK360_SENSOR_START 0x32
-#define FARBWERK360_SENSOR_SIZE 0x02
-#define FARBWERK360_SENSOR_DISCONNECTED 0x7FFF
/* Register offsets for the Octo fan controller */
#define OCTO_POWER_CYCLES 0x18
#define OCTO_NUM_FANS 8
-#define OCTO_FAN_PERCENT_OFFSET 0x00
-#define OCTO_FAN_VOLTAGE_OFFSET 0x02
-#define OCTO_FAN_CURRENT_OFFSET 0x04
-#define OCTO_FAN_POWER_OFFSET 0x06
-#define OCTO_FAN_SPEED_OFFSET 0x08
-
-static u8 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 };
-
#define OCTO_NUM_SENSORS 4
#define OCTO_SENSOR_START 0x3D
-#define OCTO_SENSOR_SIZE 0x02
-#define OCTO_SENSOR_DISCONNECTED 0x7FFF
-
-#define OCTO_CTRL_REPORT_SIZE 0x65F
-#define OCTO_CTRL_REPORT_CHECKSUM_OFFSET 0x65D
-#define OCTO_CTRL_REPORT_CHECKSUM_START 0x01
-#define OCTO_CTRL_REPORT_CHECKSUM_LENGTH 0x65C
+#define OCTO_CTRL_REPORT_SIZE 0x65F
+static u8 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 };
/* Fan speed registers in Octo control report (from 0-100%) */
static u16 octo_ctrl_fan_offsets[] = { 0x5B, 0xB0, 0x105, 0x15A, 0x1AF, 0x204, 0x259, 0x2AE };
+/* Register offsets for the Quadro fan controller */
+#define QUADRO_POWER_CYCLES 0x18
+#define QUADRO_NUM_FANS 4
+#define QUADRO_NUM_SENSORS 4
+#define QUADRO_SENSOR_START 0x34
+#define QUADRO_CTRL_REPORT_SIZE 0x3c1
+#define QUADRO_FLOW_SENSOR_OFFSET 0x6e
+static u8 quadro_sensor_fan_offsets[] = { 0x70, 0x7D, 0x8A, 0x97 };
+
+/* Fan speed registers in Quadro control report (from 0-100%) */
+static u16 quadro_ctrl_fan_offsets[] = { 0x36, 0x8b, 0xe0, 0x135 };
+
/* Labels for D5 Next */
static const char *const label_d5next_temp[] = {
"Coolant temp"
@@ -126,7 +130,8 @@ static const char *const label_d5next_power[] = {
static const char *const label_d5next_voltages[] = {
"Pump voltage",
"Fan voltage",
- "+5V voltage"
+ "+5V voltage",
+ "+12V voltage"
};
static const char *const label_d5next_current[] = {
@@ -134,7 +139,7 @@ static const char *const label_d5next_current[] = {
"Fan current"
};
-/* Labels for Farbwerk, Farbwerk 360 and Octo temperature sensors */
+/* Labels for Farbwerk, Farbwerk 360 and Octo and Quadro temperature sensors */
static const char *const label_temp_sensors[] = {
"Sensor 1",
"Sensor 2",
@@ -142,7 +147,7 @@ static const char *const label_temp_sensors[] = {
"Sensor 4"
};
-/* Labels for Octo */
+/* Labels for Octo and Quadro (except speed) */
static const char *const label_fan_speed[] = {
"Fan 1 speed",
"Fan 2 speed",
@@ -187,6 +192,15 @@ static const char *const label_fan_current[] = {
"Fan 8 current"
};
+/* Labels for Quadro fan speeds */
+static const char *const label_quadro_speeds[] = {
+ "Fan 1 speed",
+ "Fan 2 speed",
+ "Fan 3 speed",
+ "Fan 4 speed",
+ "Flow speed [dL/h]"
+};
+
struct aqc_data {
struct hid_device *hdev;
struct device *hwmon_dev;
@@ -201,11 +215,19 @@ struct aqc_data {
int checksum_length;
int checksum_offset;
+ int num_fans;
+ u8 *fan_sensor_offsets;
+ u16 *fan_ctrl_offsets;
+ int num_temp_sensors;
+ int temp_sensor_start_offset;
+ u16 power_cycle_count_offset;
+ u8 flow_sensor_offset;
+
/* General info, same across all devices */
u32 serial_number[2];
u16 firmware_version;
- /* How many times the device was powered on */
+ /* How many times the device was powered on, if available */
u32 power_cycles;
/* Sensor values */
@@ -323,56 +345,47 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
switch (type) {
case hwmon_temp:
- switch (priv->kind) {
- case d5next:
- if (channel == 0)
- return 0444;
- break;
- case farbwerk:
- case farbwerk360:
- case octo:
+ if (channel < priv->num_temp_sensors)
return 0444;
- default:
- break;
- }
break;
case hwmon_pwm:
- switch (priv->kind) {
- case octo:
+ if (priv->fan_ctrl_offsets && channel < priv->num_fans) {
switch (attr) {
case hwmon_pwm_input:
return 0644;
default:
break;
}
- break;
- default:
- break;
}
break;
case hwmon_fan:
- case hwmon_power:
- case hwmon_curr:
switch (priv->kind) {
- case d5next:
- if (channel < 2)
+ case quadro:
+ /* Special case to support flow sensor */
+ if (channel < priv->num_fans + 1)
return 0444;
break;
- case octo:
- return 0444;
default:
+ if (channel < priv->num_fans)
+ return 0444;
break;
}
break;
+ case hwmon_power:
+ case hwmon_curr:
+ if (channel < priv->num_fans)
+ return 0444;
+ break;
case hwmon_in:
switch (priv->kind) {
case d5next:
- if (channel < 3)
+ /* Special case to support +5V and +12V voltage sensors */
+ if (channel < priv->num_fans + 2)
return 0444;
break;
- case octo:
- return 0444;
default:
+ if (channel < priv->num_fans)
+ return 0444;
break;
}
break;
@@ -406,16 +419,12 @@ static int aqc_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
*val = priv->power_input[channel];
break;
case hwmon_pwm:
- switch (priv->kind) {
- case octo:
- ret = aqc_get_ctrl_val(priv, octo_ctrl_fan_offsets[channel]);
+ if (priv->fan_ctrl_offsets) {
+ ret = aqc_get_ctrl_val(priv, priv->fan_ctrl_offsets[channel]);
if (ret < 0)
return ret;
*val = aqc_percent_to_pwm(ret);
- break;
- default:
- break;
}
break;
case hwmon_in:
@@ -469,19 +478,15 @@ static int aqc_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
- switch (priv->kind) {
- case octo:
+ if (priv->fan_ctrl_offsets) {
pwm_value = aqc_pwm_to_percent(val);
if (pwm_value < 0)
return pwm_value;
- ret = aqc_set_ctrl_val(priv, octo_ctrl_fan_offsets[channel],
+ ret = aqc_set_ctrl_val(priv, priv->fan_ctrl_offsets[channel],
pwm_value);
if (ret < 0)
return ret;
- break;
- default:
- break;
}
break;
default:
@@ -576,76 +581,42 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
priv->serial_number[1] = get_unaligned_be16(data + SERIAL_SECOND_PART);
priv->firmware_version = get_unaligned_be16(data + FIRMWARE_VERSION);
- /* Sensor readings */
- switch (priv->kind) {
- case d5next:
- priv->power_cycles = get_unaligned_be32(data + D5NEXT_POWER_CYCLES);
-
- priv->temp_input[0] = get_unaligned_be16(data + D5NEXT_COOLANT_TEMP) * 10;
+ /* Temperature sensor readings */
+ for (i = 0; i < priv->num_temp_sensors; i++) {
+ sensor_value = get_unaligned_be16(data +
+ priv->temp_sensor_start_offset +
+ i * AQC_TEMP_SENSOR_SIZE);
+ if (sensor_value == AQC_TEMP_SENSOR_DISCONNECTED)
+ priv->temp_input[i] = -ENODATA;
+ else
+ priv->temp_input[i] = sensor_value * 10;
+ }
- priv->speed_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_SPEED);
- priv->speed_input[1] = get_unaligned_be16(data + D5NEXT_FAN_SPEED);
+ /* Fan speed and related readings */
+ for (i = 0; i < priv->num_fans; i++) {
+ priv->speed_input[i] =
+ get_unaligned_be16(data + priv->fan_sensor_offsets[i] + AQC_FAN_SPEED_OFFSET);
+ priv->power_input[i] =
+ get_unaligned_be16(data + priv->fan_sensor_offsets[i] +
+ AQC_FAN_POWER_OFFSET) * 10000;
+ priv->voltage_input[i] =
+ get_unaligned_be16(data + priv->fan_sensor_offsets[i] +
+ AQC_FAN_VOLTAGE_OFFSET) * 10;
+ priv->current_input[i] =
+ get_unaligned_be16(data + priv->fan_sensor_offsets[i] + AQC_FAN_CURRENT_OFFSET);
+ }
- priv->power_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_POWER) * 10000;
- priv->power_input[1] = get_unaligned_be16(data + D5NEXT_FAN_POWER) * 10000;
+ if (priv->power_cycle_count_offset != 0)
+ priv->power_cycles = get_unaligned_be32(data + priv->power_cycle_count_offset);
- priv->voltage_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_VOLTAGE) * 10;
- priv->voltage_input[1] = get_unaligned_be16(data + D5NEXT_FAN_VOLTAGE) * 10;
+ /* Special-case sensor readings */
+ switch (priv->kind) {
+ case d5next:
priv->voltage_input[2] = get_unaligned_be16(data + D5NEXT_5V_VOLTAGE) * 10;
-
- priv->current_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_CURRENT);
- priv->current_input[1] = get_unaligned_be16(data + D5NEXT_FAN_CURRENT);
- break;
- case farbwerk:
- /* Temperature sensor readings */
- for (i = 0; i < FARBWERK_NUM_SENSORS; i++) {
- sensor_value = get_unaligned_be16(data + FARBWERK_SENSOR_START +
- i * FARBWERK_SENSOR_SIZE);
- if (sensor_value == FARBWERK_SENSOR_DISCONNECTED)
- priv->temp_input[i] = -ENODATA;
- else
- priv->temp_input[i] = sensor_value * 10;
- }
- break;
- case farbwerk360:
- /* Temperature sensor readings */
- for (i = 0; i < FARBWERK360_NUM_SENSORS; i++) {
- sensor_value = get_unaligned_be16(data + FARBWERK360_SENSOR_START +
- i * FARBWERK360_SENSOR_SIZE);
- if (sensor_value == FARBWERK360_SENSOR_DISCONNECTED)
- priv->temp_input[i] = -ENODATA;
- else
- priv->temp_input[i] = sensor_value * 10;
- }
+ priv->voltage_input[3] = get_unaligned_be16(data + D5NEXT_12V_VOLTAGE) * 10;
break;
- case octo:
- priv->power_cycles = get_unaligned_be32(data + OCTO_POWER_CYCLES);
-
- /* Fan speed and related readings */
- for (i = 0; i < OCTO_NUM_FANS; i++) {
- priv->speed_input[i] =
- get_unaligned_be16(data + octo_sensor_fan_offsets[i] +
- OCTO_FAN_SPEED_OFFSET);
- priv->power_input[i] =
- get_unaligned_be16(data + octo_sensor_fan_offsets[i] +
- OCTO_FAN_POWER_OFFSET) * 10000;
- priv->voltage_input[i] =
- get_unaligned_be16(data + octo_sensor_fan_offsets[i] +
- OCTO_FAN_VOLTAGE_OFFSET) * 10;
- priv->current_input[i] =
- get_unaligned_be16(data + octo_sensor_fan_offsets[i] +
- OCTO_FAN_CURRENT_OFFSET);
- }
-
- /* Temperature sensor readings */
- for (i = 0; i < OCTO_NUM_SENSORS; i++) {
- sensor_value = get_unaligned_be16(data + OCTO_SENSOR_START +
- i * OCTO_SENSOR_SIZE);
- if (sensor_value == OCTO_SENSOR_DISCONNECTED)
- priv->temp_input[i] = -ENODATA;
- else
- priv->temp_input[i] = sensor_value * 10;
- }
+ case quadro:
+ priv->speed_input[4] = get_unaligned_be16(data + priv->flow_sensor_offset);
break;
default:
break;
@@ -699,14 +670,8 @@ static void aqc_debugfs_init(struct aqc_data *priv)
debugfs_create_file("serial_number", 0444, priv->debugfs, priv, &serial_number_fops);
debugfs_create_file("firmware_version", 0444, priv->debugfs, priv, &firmware_version_fops);
- switch (priv->kind) {
- case d5next:
- case octo:
+ if (priv->power_cycle_count_offset != 0)
debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops);
- break;
- default:
- break;
- }
}
#else
@@ -747,6 +712,14 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
case USB_PRODUCT_ID_D5NEXT:
priv->kind = d5next;
+ priv->num_fans = D5NEXT_NUM_FANS;
+ priv->fan_sensor_offsets = d5next_sensor_fan_offsets;
+ priv->fan_ctrl_offsets = d5next_ctrl_fan_offsets;
+ priv->num_temp_sensors = D5NEXT_NUM_SENSORS;
+ priv->temp_sensor_start_offset = D5NEXT_COOLANT_TEMP;
+ priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES;
+ priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE;
+
priv->temp_label = label_d5next_temp;
priv->speed_label = label_d5next_speeds;
priv->power_label = label_d5next_power;
@@ -756,19 +729,29 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
case USB_PRODUCT_ID_FARBWERK:
priv->kind = farbwerk;
+ priv->num_fans = 0;
+ priv->num_temp_sensors = FARBWERK_NUM_SENSORS;
+ priv->temp_sensor_start_offset = FARBWERK_SENSOR_START;
priv->temp_label = label_temp_sensors;
break;
case USB_PRODUCT_ID_FARBWERK360:
priv->kind = farbwerk360;
+ priv->num_fans = 0;
+ priv->num_temp_sensors = FARBWERK360_NUM_SENSORS;
+ priv->temp_sensor_start_offset = FARBWERK360_SENSOR_START;
priv->temp_label = label_temp_sensors;
break;
case USB_PRODUCT_ID_OCTO:
priv->kind = octo;
+
+ priv->num_fans = OCTO_NUM_FANS;
+ priv->fan_sensor_offsets = octo_sensor_fan_offsets;
+ priv->fan_ctrl_offsets = octo_ctrl_fan_offsets;
+ priv->num_temp_sensors = OCTO_NUM_SENSORS;
+ priv->temp_sensor_start_offset = OCTO_SENSOR_START;
+ priv->power_cycle_count_offset = OCTO_POWER_CYCLES;
priv->buffer_size = OCTO_CTRL_REPORT_SIZE;
- priv->checksum_start = OCTO_CTRL_REPORT_CHECKSUM_START;
- priv->checksum_length = OCTO_CTRL_REPORT_CHECKSUM_LENGTH;
- priv->checksum_offset = OCTO_CTRL_REPORT_CHECKSUM_OFFSET;
priv->temp_label = label_temp_sensors;
priv->speed_label = label_fan_speed;
@@ -776,10 +759,34 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->voltage_label = label_fan_voltage;
priv->current_label = label_fan_current;
break;
+ case USB_PRODUCT_ID_QUADRO:
+ priv->kind = quadro;
+
+ priv->num_fans = QUADRO_NUM_FANS;
+ priv->fan_sensor_offsets = quadro_sensor_fan_offsets;
+ priv->fan_ctrl_offsets = quadro_ctrl_fan_offsets;
+ priv->num_temp_sensors = QUADRO_NUM_SENSORS;
+ priv->temp_sensor_start_offset = QUADRO_SENSOR_START;
+ priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
+ priv->buffer_size = QUADRO_CTRL_REPORT_SIZE;
+ priv->flow_sensor_offset = QUADRO_FLOW_SENSOR_OFFSET;
+
+ priv->temp_label = label_temp_sensors;
+ priv->speed_label = label_quadro_speeds;
+ priv->power_label = label_fan_power;
+ priv->voltage_label = label_fan_voltage;
+ priv->current_label = label_fan_current;
+ break;
default:
break;
}
+ if (priv->buffer_size != 0) {
+ priv->checksum_start = 0x01;
+ priv->checksum_length = priv->buffer_size - 3;
+ priv->checksum_offset = priv->buffer_size - 2;
+ }
+
priv->name = aqc_device_names[priv->kind];
priv->buffer = devm_kzalloc(&hdev->dev, priv->buffer_size, GFP_KERNEL);
@@ -825,6 +832,7 @@ static const struct hid_device_id aqc_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK360) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_OCTO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_QUADRO) },
{ }
};
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 3cb88d6fbec0..d11f674e3dc3 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -159,7 +159,7 @@
* 11: reserved.
*/
#define M_TACH_MODE 0x02 /* 10b */
-#define M_TACH_UNIT 0x0210
+#define M_TACH_UNIT 0x0420
#define INIT_FAN_CTRL 0xFF
/* How long we sleep in us while waiting for an RPM result. */
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 3633ab691662..61a4684fc020 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -54,6 +54,10 @@ static char *mutex_path_override;
/* ACPI mutex for locking access to the EC for the firmware */
#define ASUS_HW_ACCESS_MUTEX_ASMX "\\AMW0.ASMX"
+#define ASUS_HW_ACCESS_MUTEX_RMTW_ASMX "\\RMTW.ASMX"
+
+#define ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0 "\\_SB_.PCI0.SBRG.SIO1.MUT0"
+
#define MAX_IDENTICAL_BOARD_VARIATIONS 3
/* Moniker for the ACPI global lock (':' is not allowed in ASL identifiers) */
@@ -119,6 +123,18 @@ enum ec_sensors {
ec_sensor_temp_water_in,
/* "Water_Out" temperature sensor reading [℃] */
ec_sensor_temp_water_out,
+ /* "Water_Block_In" temperature sensor reading [℃] */
+ ec_sensor_temp_water_block_in,
+ /* "Water_Block_Out" temperature sensor reading [℃] */
+ ec_sensor_temp_water_block_out,
+ /* "T_sensor_2" temperature sensor reading [℃] */
+ ec_sensor_temp_t_sensor_2,
+ /* "Extra_1" temperature sensor reading [℃] */
+ ec_sensor_temp_sensor_extra_1,
+ /* "Extra_2" temperature sensor reading [℃] */
+ ec_sensor_temp_sensor_extra_2,
+ /* "Extra_3" temperature sensor reading [℃] */
+ ec_sensor_temp_sensor_extra_3,
};
#define SENSOR_TEMP_CHIPSET BIT(ec_sensor_temp_chipset)
@@ -134,11 +150,19 @@ enum ec_sensors {
#define SENSOR_CURR_CPU BIT(ec_sensor_curr_cpu)
#define SENSOR_TEMP_WATER_IN BIT(ec_sensor_temp_water_in)
#define SENSOR_TEMP_WATER_OUT BIT(ec_sensor_temp_water_out)
+#define SENSOR_TEMP_WATER_BLOCK_IN BIT(ec_sensor_temp_water_block_in)
+#define SENSOR_TEMP_WATER_BLOCK_OUT BIT(ec_sensor_temp_water_block_out)
+#define SENSOR_TEMP_T_SENSOR_2 BIT(ec_sensor_temp_t_sensor_2)
+#define SENSOR_TEMP_SENSOR_EXTRA_1 BIT(ec_sensor_temp_sensor_extra_1)
+#define SENSOR_TEMP_SENSOR_EXTRA_2 BIT(ec_sensor_temp_sensor_extra_2)
+#define SENSOR_TEMP_SENSOR_EXTRA_3 BIT(ec_sensor_temp_sensor_extra_3)
enum board_family {
family_unknown,
family_amd_400_series,
family_amd_500_series,
+ family_intel_300_series,
+ family_intel_600_series
};
/* All the known sensors for ASUS EC controllers */
@@ -195,12 +219,53 @@ static const struct ec_sensor_info sensors_family_amd_500[] = {
EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+ [ec_sensor_temp_water_block_in] =
+ EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02),
+ [ec_sensor_temp_water_block_out] =
+ EC_SENSOR("Water_Block_Out", hwmon_temp, 1, 0x01, 0x03),
+ [ec_sensor_temp_sensor_extra_1] =
+ EC_SENSOR("Extra_1", hwmon_temp, 1, 0x01, 0x09),
+ [ec_sensor_temp_t_sensor_2] =
+ EC_SENSOR("T_sensor_2", hwmon_temp, 1, 0x01, 0x0a),
+ [ec_sensor_temp_sensor_extra_2] =
+ EC_SENSOR("Extra_2", hwmon_temp, 1, 0x01, 0x0b),
+ [ec_sensor_temp_sensor_extra_3] =
+ EC_SENSOR("Extra_3", hwmon_temp, 1, 0x01, 0x0c),
+};
+
+static const struct ec_sensor_info sensors_family_intel_300[] = {
+ [ec_sensor_temp_chipset] =
+ EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
+ [ec_sensor_temp_mb] =
+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+ [ec_sensor_fan_vrm_hs] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
+ [ec_sensor_fan_water_flow] =
+ EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbc),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
+ [ec_sensor_temp_water_out] =
+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+};
+
+static const struct ec_sensor_info sensors_family_intel_600[] = {
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
};
/* Shortcuts for common combinations */
#define SENSOR_SET_TEMP_CHIPSET_CPU_MB \
(SENSOR_TEMP_CHIPSET | SENSOR_TEMP_CPU | SENSOR_TEMP_MB)
#define SENSOR_SET_TEMP_WATER (SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT)
+#define SENSOR_SET_WATER_BLOCK \
+ (SENSOR_TEMP_WATER_BLOCK_IN | SENSOR_TEMP_WATER_BLOCK_OUT)
+
struct ec_board_info {
const char *board_names[MAX_IDENTICAL_BOARD_VARIATIONS];
@@ -273,6 +338,18 @@ static const struct ec_board_info board_info[] = {
.family = family_amd_500_series,
},
{
+ .board_names = {
+ "ROG MAXIMUS XI HERO",
+ "ROG MAXIMUS XI HERO (WI-FI)",
+ },
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_intel_300_series,
+ },
+ {
.board_names = {"ROG CROSSHAIR VIII IMPACT"},
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -324,12 +401,31 @@ static const struct ec_board_info board_info[] = {
},
{
.board_names = {"ROG STRIX X570-I GAMING"},
- .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_FAN_VRM_HS |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
+ .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
.family = family_amd_500_series,
},
+ {
+ .board_names = {"ROG STRIX Z690-A GAMING WIFI D4"},
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+ },
+ {
+ .board_names = {"ROG ZENITH II EXTREME"},
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
+ SENSOR_SET_WATER_BLOCK |
+ SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
+ SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_500_series,
+ },
{}
};
@@ -799,6 +895,12 @@ static int __init asus_ec_probe(struct platform_device *pdev)
case family_amd_500_series:
ec_data->sensors_info = sensors_family_amd_500;
break;
+ case family_intel_300_series:
+ ec_data->sensors_info = sensors_family_intel_300;
+ break;
+ case family_intel_600_series:
+ ec_data->sensors_info = sensors_family_intel_600;
+ break;
default:
dev_err(dev, "Unknown board family: %d",
ec_data->board_info->family);
diff --git a/drivers/hwmon/asus_wmi_sensors.c b/drivers/hwmon/asus_wmi_sensors.c
index 9e935e34c998..6e8a908171f0 100644
--- a/drivers/hwmon/asus_wmi_sensors.c
+++ b/drivers/hwmon/asus_wmi_sensors.c
@@ -514,22 +514,20 @@ static int asus_wmi_configure_sensor_setup(struct device *dev,
int i, idx;
int err;
- temp_sensor = devm_kcalloc(dev, 1, sizeof(*temp_sensor), GFP_KERNEL);
- if (!temp_sensor)
- return -ENOMEM;
-
for (i = 0; i < sensor_data->wmi.sensor_count; i++) {
- err = asus_wmi_sensor_info(i, temp_sensor);
+ struct asus_wmi_sensor_info sensor;
+
+ err = asus_wmi_sensor_info(i, &sensor);
if (err)
return err;
- switch (temp_sensor->data_type) {
+ switch (sensor.data_type) {
case TEMPERATURE_C:
case VOLTAGE:
case CURRENT:
case FAN_RPM:
case WATER_FLOW:
- type = asus_data_types[temp_sensor->data_type];
+ type = asus_data_types[sensor.data_type];
if (!nr_count[type])
nr_types++;
nr_count[type]++;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 071aa6f4e109..7f8d95dd2717 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -130,7 +130,7 @@ struct smm_regs {
unsigned int edx;
unsigned int esi;
unsigned int edi;
-} __packed;
+};
static const char * const temp_labels[] = {
"CPU",
@@ -175,77 +175,35 @@ static int i8k_smm_func(void *par)
struct smm_regs *regs = par;
int eax = regs->eax;
int ebx = regs->ebx;
+ unsigned char carry;
long long duration;
- int rc;
/* SMM requires CPU 0 */
if (smp_processor_id() != 0)
return -EBUSY;
-#if defined(CONFIG_X86_64)
- asm volatile("pushq %%rax\n\t"
- "movl 0(%%rax),%%edx\n\t"
- "pushq %%rdx\n\t"
- "movl 4(%%rax),%%ebx\n\t"
- "movl 8(%%rax),%%ecx\n\t"
- "movl 12(%%rax),%%edx\n\t"
- "movl 16(%%rax),%%esi\n\t"
- "movl 20(%%rax),%%edi\n\t"
- "popq %%rax\n\t"
- "out %%al,$0xb2\n\t"
- "out %%al,$0x84\n\t"
- "xchgq %%rax,(%%rsp)\n\t"
- "movl %%ebx,4(%%rax)\n\t"
- "movl %%ecx,8(%%rax)\n\t"
- "movl %%edx,12(%%rax)\n\t"
- "movl %%esi,16(%%rax)\n\t"
- "movl %%edi,20(%%rax)\n\t"
- "popq %%rdx\n\t"
- "movl %%edx,0(%%rax)\n\t"
- "pushfq\n\t"
- "popq %%rax\n\t"
- "andl $1,%%eax\n"
- : "=a"(rc)
- : "a"(regs)
- : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
-#else
- asm volatile("pushl %%eax\n\t"
- "movl 0(%%eax),%%edx\n\t"
- "push %%edx\n\t"
- "movl 4(%%eax),%%ebx\n\t"
- "movl 8(%%eax),%%ecx\n\t"
- "movl 12(%%eax),%%edx\n\t"
- "movl 16(%%eax),%%esi\n\t"
- "movl 20(%%eax),%%edi\n\t"
- "popl %%eax\n\t"
- "out %%al,$0xb2\n\t"
- "out %%al,$0x84\n\t"
- "xchgl %%eax,(%%esp)\n\t"
- "movl %%ebx,4(%%eax)\n\t"
- "movl %%ecx,8(%%eax)\n\t"
- "movl %%edx,12(%%eax)\n\t"
- "movl %%esi,16(%%eax)\n\t"
- "movl %%edi,20(%%eax)\n\t"
- "popl %%edx\n\t"
- "movl %%edx,0(%%eax)\n\t"
- "lahf\n\t"
- "shrl $8,%%eax\n\t"
- "andl $1,%%eax\n"
- : "=a"(rc)
- : "a"(regs)
- : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
-#endif
- if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax)
- rc = -EINVAL;
+ asm volatile("out %%al,$0xb2\n\t"
+ "out %%al,$0x84\n\t"
+ "setc %0\n"
+ : "=mr" (carry),
+ "+a" (regs->eax),
+ "+b" (regs->ebx),
+ "+c" (regs->ecx),
+ "+d" (regs->edx),
+ "+S" (regs->esi),
+ "+D" (regs->edi));
duration = ktime_us_delta(ktime_get(), calltime);
- pr_debug("smm(0x%.4x 0x%.4x) = 0x%.4x (took %7lld usecs)\n", eax, ebx,
- (rc ? 0xffff : regs->eax & 0xffff), duration);
+ pr_debug("smm(0x%.4x 0x%.4x) = 0x%.4x carry: %d (took %7lld usecs)\n",
+ eax, ebx, regs->eax & 0xffff, carry, duration);
if (duration > DELL_SMM_MAX_DURATION)
pr_warn_once("SMM call took %lld usecs!\n", duration);
- return rc;
+ if (carry || (regs->eax & 0xffff) == 0xffff || regs->eax == eax)
+ return -EINVAL;
+
+ return 0;
}
/*
@@ -1132,6 +1090,13 @@ static const struct i8k_config_data i8k_config_data[] __initconst = {
static const struct dmi_system_id i8k_dmi_table[] __initconst = {
{
+ .ident = "Dell G5 5590",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G5 5590"),
+ },
+ },
+ {
.ident = "Dell Inspiron",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer"),
@@ -1365,6 +1330,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
},
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
},
+ {
+ .ident = "Dell XPS 13 7390",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 13 7390"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
{ }
};
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 1eb37106a220..5bac2b0fc7bb 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -621,3 +621,4 @@ module_exit(drivetemp_exit);
MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>");
MODULE_DESCRIPTION("Hard drive temperature monitor");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:drivetemp");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 6830e029995d..19b6c643059a 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -49,6 +49,7 @@
#define SIO_F81768D_ID 0x1210 /* Chipset ID */
#define SIO_F81865_ID 0x0704 /* Chipset ID */
#define SIO_F81866_ID 0x1010 /* Chipset ID */
+#define SIO_F71858AD_ID 0x0903 /* Chipset ID */
#define SIO_F81966_ID 0x1502 /* Chipset ID */
#define REGION_LENGTH 8
@@ -2638,6 +2639,7 @@ static int __init f71882fg_find(int sioaddr, struct f71882fg_sio_data *sio_data)
sio_data->type = f71808a;
break;
case SIO_F71858_ID:
+ case SIO_F71858AD_ID:
sio_data->type = f71858fg;
break;
case SIO_F71862_ID:
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index 1fe37418ff46..d64be48f1ef6 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -269,10 +269,13 @@ gsc_hwmon_get_devtree_pdata(struct device *dev)
/* fan controller base address */
fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan");
if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) {
+ of_node_put(fan);
dev_err(dev, "fan node without base\n");
return ERR_PTR(-EINVAL);
}
+ of_node_put(fan);
+
/* allocate structures for channels and count instances of each type */
device_for_each_child_node(dev, child) {
if (fwnode_property_read_string(child, "label", &ch->name)) {
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 4e239bd75b1d..5a9d47a229e4 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -428,6 +428,10 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data->ccd_offset = 0x154;
k10temp_get_ccd_support(pdev, data, 8);
break;
+ case 0xa0 ... 0xaf:
+ data->ccd_offset = 0x300;
+ k10temp_get_ccd_support(pdev, data, 8);
+ break;
}
} else if (boot_cpu_data.x86 == 0x19) {
data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
@@ -445,6 +449,11 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data->ccd_offset = 0x300;
k10temp_get_ccd_support(pdev, data, 8);
break;
+ case 0x60 ... 0x6f:
+ case 0x70 ... 0x7f:
+ data->ccd_offset = 0x308;
+ k10temp_get_ccd_support(pdev, data, 8);
+ break;
case 0x10 ... 0x1f:
case 0xa0 ... 0xaf:
data->ccd_offset = 0x300;
@@ -489,10 +498,13 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index a398171162a8..b803ada5e3c9 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -11,7 +11,8 @@
* which contains this code, we don't worry about the wasted space.
*/
-#include <linux/kernel.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
/* straight from the datasheet */
#define LM75_TEMP_MIN (-55000)
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 3820f0e61510..221de01a327a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -42,7 +42,8 @@
* accordingly, and is done during initialization. Extended precision is only
* available at conversion rates of 1 Hz and slower. Note that extended
* precision is not enabled by default, as this driver initializes all chips
- * to 2 Hz by design.
+ * to 2 Hz by design. The driver also supports MAX6690, which is practically
+ * identical to MAX6654.
*
* This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
* MAX6692 chips made by Maxim. These are again similar to the LM86,
@@ -64,6 +65,14 @@
* and extended mode. They are mostly compatible with LM90 except for a data
* format difference for the temperature value registers.
*
+ * This driver also supports ADT7481, ADT7482, and ADT7483 from Analog Devices
+ * / ON Semiconductor. The chips are similar to ADT7461 but support two external
+ * temperature sensors.
+ *
+ * This driver also supports NCT72, NCT214, and NCT218 from ON Semiconductor.
+ * The chips are similar to ADT7461/ADT7461A but have full PEC support
+ * (undocumented).
+ *
* This driver also supports the SA56004 from Philips. This device is
* pin-compatible with the LM86, the ED/EDP parts are also address-compatible.
*
@@ -75,23 +84,34 @@
* They are mostly compatible with ADT7461 except for local temperature
* low byte register and max conversion rate.
*
+ * This driver also supports MAX1617 and various clones such as G767
+ * and NE1617. Such clones will be detected as MAX1617.
+ *
+ * This driver also supports NE1618 from Philips. It is similar to NE1617
+ * but supports 11 bit external temperature values.
+ *
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
*/
-#include <linux/module.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
#include <linux/init.h>
-#include <linux/slab.h>
+#include <linux/interrupt.h>
#include <linux/jiffies.h>
-#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/err.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
-#include <linux/sysfs.h>
-#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+/* The maximum number of channels currently supported */
+#define MAX_CHANNELS 3
/*
* Addresses to scan
@@ -112,119 +132,131 @@ static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
-enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
- max6646, w83l771, max6696, sa56004, g781, tmp451, tmp461, max6654 };
+enum chips { adm1023, adm1032, adt7461, adt7461a, adt7481,
+ g781, lm84, lm90, lm99,
+ max1617, max6642, max6646, max6648, max6654, max6657, max6659, max6680, max6696,
+ nct210, nct72, ne1618, sa56004, tmp451, tmp461, w83l771,
+};
/*
* The LM90 registers
*/
-#define LM90_REG_R_MAN_ID 0xFE
-#define LM90_REG_R_CHIP_ID 0xFF
-#define LM90_REG_R_CONFIG1 0x03
-#define LM90_REG_W_CONFIG1 0x09
-#define LM90_REG_R_CONFIG2 0xBF
-#define LM90_REG_W_CONFIG2 0xBF
-#define LM90_REG_R_CONVRATE 0x04
-#define LM90_REG_W_CONVRATE 0x0A
-#define LM90_REG_R_STATUS 0x02
-#define LM90_REG_R_LOCAL_TEMP 0x00
-#define LM90_REG_R_LOCAL_HIGH 0x05
-#define LM90_REG_W_LOCAL_HIGH 0x0B
-#define LM90_REG_R_LOCAL_LOW 0x06
-#define LM90_REG_W_LOCAL_LOW 0x0C
-#define LM90_REG_R_LOCAL_CRIT 0x20
-#define LM90_REG_W_LOCAL_CRIT 0x20
-#define LM90_REG_R_REMOTE_TEMPH 0x01
-#define LM90_REG_R_REMOTE_TEMPL 0x10
-#define LM90_REG_R_REMOTE_OFFSH 0x11
-#define LM90_REG_W_REMOTE_OFFSH 0x11
-#define LM90_REG_R_REMOTE_OFFSL 0x12
-#define LM90_REG_W_REMOTE_OFFSL 0x12
-#define LM90_REG_R_REMOTE_HIGHH 0x07
-#define LM90_REG_W_REMOTE_HIGHH 0x0D
-#define LM90_REG_R_REMOTE_HIGHL 0x13
-#define LM90_REG_W_REMOTE_HIGHL 0x13
-#define LM90_REG_R_REMOTE_LOWH 0x08
-#define LM90_REG_W_REMOTE_LOWH 0x0E
-#define LM90_REG_R_REMOTE_LOWL 0x14
-#define LM90_REG_W_REMOTE_LOWL 0x14
-#define LM90_REG_R_REMOTE_CRIT 0x19
-#define LM90_REG_W_REMOTE_CRIT 0x19
-#define LM90_REG_R_TCRIT_HYST 0x21
-#define LM90_REG_W_TCRIT_HYST 0x21
+#define LM90_REG_MAN_ID 0xFE
+#define LM90_REG_CHIP_ID 0xFF
+#define LM90_REG_CONFIG1 0x03
+#define LM90_REG_CONFIG2 0xBF
+#define LM90_REG_CONVRATE 0x04
+#define LM90_REG_STATUS 0x02
+#define LM90_REG_LOCAL_TEMP 0x00
+#define LM90_REG_LOCAL_HIGH 0x05
+#define LM90_REG_LOCAL_LOW 0x06
+#define LM90_REG_LOCAL_CRIT 0x20
+#define LM90_REG_REMOTE_TEMPH 0x01
+#define LM90_REG_REMOTE_TEMPL 0x10
+#define LM90_REG_REMOTE_OFFSH 0x11
+#define LM90_REG_REMOTE_OFFSL 0x12
+#define LM90_REG_REMOTE_HIGHH 0x07
+#define LM90_REG_REMOTE_HIGHL 0x13
+#define LM90_REG_REMOTE_LOWH 0x08
+#define LM90_REG_REMOTE_LOWL 0x14
+#define LM90_REG_REMOTE_CRIT 0x19
+#define LM90_REG_TCRIT_HYST 0x21
/* MAX6646/6647/6649/6654/6657/6658/6659/6695/6696 registers */
-#define MAX6657_REG_R_LOCAL_TEMPL 0x11
-#define MAX6696_REG_R_STATUS2 0x12
-#define MAX6659_REG_R_REMOTE_EMERG 0x16
-#define MAX6659_REG_W_REMOTE_EMERG 0x16
-#define MAX6659_REG_R_LOCAL_EMERG 0x17
-#define MAX6659_REG_W_LOCAL_EMERG 0x17
+#define MAX6657_REG_LOCAL_TEMPL 0x11
+#define MAX6696_REG_STATUS2 0x12
+#define MAX6659_REG_REMOTE_EMERG 0x16
+#define MAX6659_REG_LOCAL_EMERG 0x17
/* SA56004 registers */
-#define SA56004_REG_R_LOCAL_TEMPL 0x22
+#define SA56004_REG_LOCAL_TEMPL 0x22
#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
/* TMP451/TMP461 registers */
-#define TMP451_REG_R_LOCAL_TEMPL 0x15
+#define TMP451_REG_LOCAL_TEMPL 0x15
#define TMP451_REG_CONALERT 0x22
#define TMP461_REG_CHEN 0x16
#define TMP461_REG_DFC 0x24
-/*
- * Device flags
- */
-#define LM90_FLAG_ADT7461_EXT (1 << 0) /* ADT7461 extended mode */
+/* ADT7481 registers */
+#define ADT7481_REG_STATUS2 0x23
+#define ADT7481_REG_CONFIG2 0x24
+
+#define ADT7481_REG_MAN_ID 0x3e
+#define ADT7481_REG_CHIP_ID 0x3d
+
/* Device features */
-#define LM90_HAVE_OFFSET (1 << 1) /* temperature offset register */
-#define LM90_HAVE_REM_LIMIT_EXT (1 << 3) /* extended remote limit */
-#define LM90_HAVE_EMERGENCY (1 << 4) /* 3rd upper (emergency) limit */
-#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
-#define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */
-#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
-#define LM90_HAVE_EXTENDED_TEMP (1 << 8) /* extended temperature support*/
-#define LM90_PAUSE_FOR_CONFIG (1 << 9) /* Pause conversion for config */
-#define LM90_HAVE_CRIT (1 << 10)/* Chip supports CRIT/OVERT register */
-#define LM90_HAVE_CRIT_ALRM_SWP (1 << 11)/* critical alarm bits swapped */
+#define LM90_HAVE_EXTENDED_TEMP BIT(0) /* extended temperature support */
+#define LM90_HAVE_OFFSET BIT(1) /* temperature offset register */
+#define LM90_HAVE_UNSIGNED_TEMP BIT(2) /* temperatures are unsigned */
+#define LM90_HAVE_REM_LIMIT_EXT BIT(3) /* extended remote limit */
+#define LM90_HAVE_EMERGENCY BIT(4) /* 3rd upper (emergency) limit */
+#define LM90_HAVE_EMERGENCY_ALARM BIT(5)/* emergency alarm */
+#define LM90_HAVE_TEMP3 BIT(6) /* 3rd temperature sensor */
+#define LM90_HAVE_BROKEN_ALERT BIT(7) /* Broken alert */
+#define LM90_PAUSE_FOR_CONFIG BIT(8) /* Pause conversion for config */
+#define LM90_HAVE_CRIT BIT(9) /* Chip supports CRIT/OVERT register */
+#define LM90_HAVE_CRIT_ALRM_SWP BIT(10) /* critical alarm bits swapped */
+#define LM90_HAVE_PEC BIT(11) /* Chip supports PEC */
+#define LM90_HAVE_PARTIAL_PEC BIT(12) /* Partial PEC support (adm1032)*/
+#define LM90_HAVE_ALARMS BIT(13) /* Create 'alarms' attribute */
+#define LM90_HAVE_EXT_UNSIGNED BIT(14) /* extended unsigned temperature*/
+#define LM90_HAVE_LOW BIT(15) /* low limits */
+#define LM90_HAVE_CONVRATE BIT(16) /* conversion rate */
+#define LM90_HAVE_REMOTE_EXT BIT(17) /* extended remote temperature */
+#define LM90_HAVE_FAULTQUEUE BIT(18) /* configurable samples count */
/* LM90 status */
-#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
-#define LM90_STATUS_RTHRM (1 << 1) /* remote THERM limit tripped */
-#define LM90_STATUS_ROPEN (1 << 2) /* remote is an open circuit */
-#define LM90_STATUS_RLOW (1 << 3) /* remote low temp limit tripped */
-#define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */
-#define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */
-#define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */
-#define LM90_STATUS_BUSY (1 << 7) /* conversion is ongoing */
-
-#define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */
-#define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */
-#define MAX6696_STATUS2_R2LOW (1 << 3) /* remote2 low temp limit tripped */
-#define MAX6696_STATUS2_R2HIGH (1 << 4) /* remote2 high temp limit tripped */
-#define MAX6696_STATUS2_ROT2 (1 << 5) /* remote emergency limit tripped */
-#define MAX6696_STATUS2_R2OT2 (1 << 6) /* remote2 emergency limit tripped */
-#define MAX6696_STATUS2_LOT2 (1 << 7) /* local emergency limit tripped */
+#define LM90_STATUS_LTHRM BIT(0) /* local THERM limit tripped */
+#define LM90_STATUS_RTHRM BIT(1) /* remote THERM limit tripped */
+#define LM90_STATUS_ROPEN BIT(2) /* remote is an open circuit */
+#define LM90_STATUS_RLOW BIT(3) /* remote low temp limit tripped */
+#define LM90_STATUS_RHIGH BIT(4) /* remote high temp limit tripped */
+#define LM90_STATUS_LLOW BIT(5) /* local low temp limit tripped */
+#define LM90_STATUS_LHIGH BIT(6) /* local high temp limit tripped */
+#define LM90_STATUS_BUSY BIT(7) /* conversion is ongoing */
+
+/* MAX6695/6696 and ADT7481 2nd status register */
+#define MAX6696_STATUS2_R2THRM BIT(1) /* remote2 THERM limit tripped */
+#define MAX6696_STATUS2_R2OPEN BIT(2) /* remote2 is an open circuit */
+#define MAX6696_STATUS2_R2LOW BIT(3) /* remote2 low temp limit tripped */
+#define MAX6696_STATUS2_R2HIGH BIT(4) /* remote2 high temp limit tripped */
+#define MAX6696_STATUS2_ROT2 BIT(5) /* remote emergency limit tripped */
+#define MAX6696_STATUS2_R2OT2 BIT(6) /* remote2 emergency limit tripped */
+#define MAX6696_STATUS2_LOT2 BIT(7) /* local emergency limit tripped */
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id lm90_id[] = {
+ { "adm1020", max1617 },
+ { "adm1021", max1617 },
+ { "adm1023", adm1023 },
{ "adm1032", adm1032 },
+ { "adt7421", adt7461a },
{ "adt7461", adt7461 },
- { "adt7461a", adt7461 },
+ { "adt7461a", adt7461a },
+ { "adt7481", adt7481 },
+ { "adt7482", adt7481 },
+ { "adt7483a", adt7481 },
{ "g781", g781 },
+ { "gl523sm", max1617 },
+ { "lm84", lm84 },
+ { "lm86", lm90 },
+ { "lm89", lm90 },
{ "lm90", lm90 },
- { "lm86", lm86 },
- { "lm89", lm86 },
{ "lm99", lm99 },
+ { "max1617", max1617 },
+ { "max6642", max6642 },
{ "max6646", max6646 },
{ "max6647", max6646 },
+ { "max6648", max6648 },
{ "max6649", max6646 },
{ "max6654", max6654 },
{ "max6657", max6657 },
@@ -232,11 +264,20 @@ static const struct i2c_device_id lm90_id[] = {
{ "max6659", max6659 },
{ "max6680", max6680 },
{ "max6681", max6680 },
+ { "max6690", max6654 },
+ { "max6692", max6648 },
{ "max6695", max6696 },
{ "max6696", max6696 },
- { "nct1008", adt7461 },
+ { "mc1066", max1617 },
+ { "nct1008", adt7461a },
+ { "nct210", nct210 },
+ { "nct214", nct72 },
+ { "nct218", nct72 },
+ { "nct72", nct72 },
+ { "ne1618", ne1618 },
{ "w83l771", w83l771 },
{ "sa56004", sa56004 },
+ { "thmc10", max1617 },
{ "tmp451", tmp451 },
{ "tmp461", tmp461 },
{ }
@@ -254,7 +295,11 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = {
},
{
.compatible = "adi,adt7461a",
- .data = (void *)adt7461
+ .data = (void *)adt7461a
+ },
+ {
+ .compatible = "adi,adt7481",
+ .data = (void *)adt7481
},
{
.compatible = "gmt,g781",
@@ -266,11 +311,11 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = {
},
{
.compatible = "national,lm86",
- .data = (void *)lm86
+ .data = (void *)lm90
},
{
.compatible = "national,lm89",
- .data = (void *)lm86
+ .data = (void *)lm90
},
{
.compatible = "national,lm99",
@@ -322,7 +367,19 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = {
},
{
.compatible = "onnn,nct1008",
- .data = (void *)adt7461
+ .data = (void *)adt7461a
+ },
+ {
+ .compatible = "onnn,nct214",
+ .data = (void *)nct72
+ },
+ {
+ .compatible = "onnn,nct218",
+ .data = (void *)nct72
+ },
+ {
+ .compatible = "onnn,nct72",
+ .data = (void *)nct72
},
{
.compatible = "winbond,w83l771",
@@ -352,115 +409,252 @@ struct lm90_params {
u16 alert_alarms; /* Which alarm bits trigger ALERT# */
/* Upper 8 bits for max6695/96 */
u8 max_convrate; /* Maximum conversion rate register value */
+ u8 resolution; /* 16-bit resolution (default 11 bit) */
+ u8 reg_status2; /* 2nd status register (optional) */
u8 reg_local_ext; /* Extended local temp register (optional) */
+ u8 faultqueue_mask; /* fault queue bit mask */
+ u8 faultqueue_depth; /* fault queue depth if mask is used */
};
static const struct lm90_params lm90_params[] = {
+ [adm1023] = {
+ .flags = LM90_HAVE_ALARMS | LM90_HAVE_OFFSET | LM90_HAVE_BROKEN_ALERT
+ | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT,
+ .alert_alarms = 0x7c,
+ .resolution = 8,
+ .max_convrate = 7,
+ },
[adm1032] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT
+ | LM90_HAVE_PARTIAL_PEC | LM90_HAVE_ALARMS
+ | LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT
+ | LM90_HAVE_FAULTQUEUE,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
[adt7461] = {
+ /*
+ * Standard temperature range is supposed to be unsigned,
+ * but that does not match reality. Negative temperatures
+ * are always reported.
+ */
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP
+ | LM90_HAVE_CRIT | LM90_HAVE_PARTIAL_PEC
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE,
+ .alert_alarms = 0x7c,
+ .max_convrate = 10,
+ .resolution = 10,
+ },
+ [adt7461a] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP
- | LM90_HAVE_CRIT,
+ | LM90_HAVE_CRIT | LM90_HAVE_PEC | LM90_HAVE_ALARMS
+ | LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT
+ | LM90_HAVE_FAULTQUEUE,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
+ [adt7481] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP
+ | LM90_HAVE_UNSIGNED_TEMP | LM90_HAVE_PEC
+ | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT | LM90_HAVE_LOW
+ | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT
+ | LM90_HAVE_FAULTQUEUE,
+ .alert_alarms = 0x1c7c,
+ .max_convrate = 11,
+ .resolution = 10,
+ .reg_status2 = ADT7481_REG_STATUS2,
+ },
[g781] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE,
.alert_alarms = 0x7c,
.max_convrate = 7,
},
- [lm86] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_CRIT,
- .alert_alarms = 0x7b,
- .max_convrate = 9,
+ [lm84] = {
+ .flags = LM90_HAVE_ALARMS,
+ .resolution = 8,
},
[lm90] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_CRIT,
+ | LM90_HAVE_CRIT | LM90_HAVE_ALARMS | LM90_HAVE_LOW
+ | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT
+ | LM90_HAVE_FAULTQUEUE,
.alert_alarms = 0x7b,
.max_convrate = 9,
+ .faultqueue_mask = BIT(0),
+ .faultqueue_depth = 3,
},
[lm99] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_CRIT,
+ | LM90_HAVE_CRIT | LM90_HAVE_ALARMS | LM90_HAVE_LOW
+ | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT
+ | LM90_HAVE_FAULTQUEUE,
.alert_alarms = 0x7b,
.max_convrate = 9,
+ .faultqueue_mask = BIT(0),
+ .faultqueue_depth = 3,
+ },
+ [max1617] = {
+ .flags = LM90_HAVE_CONVRATE | LM90_HAVE_BROKEN_ALERT |
+ LM90_HAVE_LOW | LM90_HAVE_ALARMS,
+ .alert_alarms = 0x78,
+ .resolution = 8,
+ .max_convrate = 7,
+ },
+ [max6642] = {
+ .flags = LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXT_UNSIGNED
+ | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE,
+ .alert_alarms = 0x50,
+ .resolution = 10,
+ .reg_local_ext = MAX6657_REG_LOCAL_TEMPL,
+ .faultqueue_mask = BIT(4),
+ .faultqueue_depth = 2,
},
[max6646] = {
- .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT,
+ .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT
+ | LM90_HAVE_EXT_UNSIGNED | LM90_HAVE_ALARMS | LM90_HAVE_LOW
+ | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT,
.alert_alarms = 0x7c,
.max_convrate = 6,
- .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ .reg_local_ext = MAX6657_REG_LOCAL_TEMPL,
+ },
+ [max6648] = {
+ .flags = LM90_HAVE_UNSIGNED_TEMP | LM90_HAVE_CRIT
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_LOW
+ | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT,
+ .alert_alarms = 0x7c,
+ .max_convrate = 6,
+ .reg_local_ext = MAX6657_REG_LOCAL_TEMPL,
},
[max6654] = {
- .flags = LM90_HAVE_BROKEN_ALERT,
+ .flags = LM90_HAVE_BROKEN_ALERT | LM90_HAVE_ALARMS | LM90_HAVE_LOW
+ | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT,
.alert_alarms = 0x7c,
.max_convrate = 7,
- .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ .reg_local_ext = MAX6657_REG_LOCAL_TEMPL,
},
[max6657] = {
- .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT,
+ .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT,
.alert_alarms = 0x7c,
.max_convrate = 8,
- .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ .reg_local_ext = MAX6657_REG_LOCAL_TEMPL,
},
[max6659] = {
- .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT,
+ .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT,
.alert_alarms = 0x7c,
.max_convrate = 8,
- .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ .reg_local_ext = MAX6657_REG_LOCAL_TEMPL,
},
[max6680] = {
+ /*
+ * Apparent temperatures of 128 degrees C or higher are reported
+ * and treated as negative temperatures (meaning min_alarm will
+ * be set).
+ */
.flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
- | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT,
+ | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT,
.alert_alarms = 0x7c,
.max_convrate = 7,
},
[max6696] = {
.flags = LM90_HAVE_EMERGENCY
- | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT,
+ | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE,
.alert_alarms = 0x1c7c,
.max_convrate = 6,
- .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ .reg_status2 = MAX6696_REG_STATUS2,
+ .reg_local_ext = MAX6657_REG_LOCAL_TEMPL,
+ .faultqueue_mask = BIT(5),
+ .faultqueue_depth = 4,
+ },
+ [nct72] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP
+ | LM90_HAVE_CRIT | LM90_HAVE_PEC | LM90_HAVE_UNSIGNED_TEMP
+ | LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT
+ | LM90_HAVE_FAULTQUEUE,
+ .alert_alarms = 0x7c,
+ .max_convrate = 10,
+ .resolution = 10,
+ },
+ [nct210] = {
+ .flags = LM90_HAVE_ALARMS | LM90_HAVE_BROKEN_ALERT
+ | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT,
+ .alert_alarms = 0x7c,
+ .resolution = 11,
+ .max_convrate = 7,
+ },
+ [ne1618] = {
+ .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_BROKEN_ALERT
+ | LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT,
+ .alert_alarms = 0x7c,
+ .resolution = 11,
+ .max_convrate = 7,
},
[w83l771] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT,
.alert_alarms = 0x7c,
.max_convrate = 8,
},
[sa56004] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
+ /*
+ * Apparent temperatures of 128 degrees C or higher are reported
+ * and treated as negative temperatures (meaning min_alarm will
+ * be set).
+ */
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE,
.alert_alarms = 0x7b,
.max_convrate = 9,
- .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
+ .reg_local_ext = SA56004_REG_LOCAL_TEMPL,
+ .faultqueue_mask = BIT(0),
+ .faultqueue_depth = 3,
},
[tmp451] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT
+ | LM90_HAVE_UNSIGNED_TEMP | LM90_HAVE_ALARMS | LM90_HAVE_LOW
+ | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE,
.alert_alarms = 0x7c,
.max_convrate = 9,
- .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
+ .resolution = 12,
+ .reg_local_ext = TMP451_REG_LOCAL_TEMPL,
},
[tmp461] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE,
.alert_alarms = 0x7c,
.max_convrate = 9,
- .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
+ .resolution = 12,
+ .reg_local_ext = TMP451_REG_LOCAL_TEMPL,
},
};
/*
- * TEMP8 register index
+ * temperature register index
*/
-enum lm90_temp8_reg_index {
+enum lm90_temp_reg_index {
LOCAL_LOW = 0,
LOCAL_HIGH,
LOCAL_CRIT,
@@ -469,14 +663,8 @@ enum lm90_temp8_reg_index {
REMOTE_EMERG, /* max6659 and max6695/96 */
REMOTE2_CRIT, /* max6695/96 only */
REMOTE2_EMERG, /* max6695/96 only */
- TEMP8_REG_NUM
-};
-/*
- * TEMP11 register index
- */
-enum lm90_temp11_reg_index {
- REMOTE_TEMP = 0,
+ REMOTE_TEMP,
REMOTE_LOW,
REMOTE_HIGH,
REMOTE_OFFSET, /* except max6646, max6657/58/59, and max6695/96 */
@@ -484,7 +672,9 @@ enum lm90_temp11_reg_index {
REMOTE2_TEMP, /* max6695/96 only */
REMOTE2_LOW, /* max6695/96 only */
REMOTE2_HIGH, /* max6695/96 only */
- TEMP11_REG_NUM
+ REMOTE2_OFFSET,
+
+ TEMP_REG_NUM
};
/*
@@ -494,13 +684,20 @@ enum lm90_temp11_reg_index {
struct lm90_data {
struct i2c_client *client;
struct device *hwmon_dev;
- u32 channel_config[4];
+ u32 chip_config[2];
+ u32 channel_config[MAX_CHANNELS + 1];
+ const char *channel_label[MAX_CHANNELS];
+ struct hwmon_channel_info chip_info;
struct hwmon_channel_info temp_info;
const struct hwmon_channel_info *info[3];
struct hwmon_chip_info chip;
struct mutex update_lock;
+ struct delayed_work alert_work;
+ struct work_struct report_work;
bool valid; /* true if register values are valid */
+ bool alarms_valid; /* true if status register values are valid */
unsigned long last_updated; /* in jiffies */
+ unsigned long alarms_updated; /* in jiffies */
int kind;
u32 flags;
@@ -509,16 +706,23 @@ struct lm90_data {
u8 config; /* Current configuration register value */
u8 config_orig; /* Original configuration register value */
u8 convrate_orig; /* Original conversion rate register value */
+ u8 resolution; /* temperature resolution in bit */
u16 alert_alarms; /* Which alarm bits trigger ALERT# */
/* Upper 8 bits for max6695/96 */
u8 max_convrate; /* Maximum conversion rate */
+ u8 reg_status2; /* 2nd status register (optional) */
u8 reg_local_ext; /* local extension register offset */
+ u8 reg_remote_ext; /* remote temperature low byte */
+ u8 faultqueue_mask; /* fault queue mask */
+ u8 faultqueue_depth; /* fault queue mask */
/* registers values */
- s8 temp8[TEMP8_REG_NUM];
- s16 temp11[TEMP11_REG_NUM];
+ u16 temp[TEMP_REG_NUM];
u8 temp_hyst;
- u16 alarms; /* bitvector (upper 8 bits for max6695/96) */
+ u8 conalert;
+ u16 reported_alarms; /* alarms reported as sysfs/udev events */
+ u16 current_alarms; /* current alarms, reported by chip */
+ u16 alarms; /* alarms not yet reported to user */
};
/*
@@ -526,10 +730,10 @@ struct lm90_data {
*/
/*
- * The ADM1032 supports PEC but not on write byte transactions, so we need
+ * If the chip supports PEC but not on write byte transactions, we need
* to explicitly ask for a transaction without PEC.
*/
-static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value)
+static inline s32 lm90_write_no_pec(struct i2c_client *client, u8 value)
{
return i2c_smbus_xfer(client->adapter, client->addr,
client->flags & ~I2C_CLIENT_PEC,
@@ -538,47 +742,96 @@ static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value)
/*
* It is assumed that client->update_lock is held (unless we are in
- * detection or initialization steps). This matters when PEC is enabled,
- * because we don't want the address pointer to change between the write
- * byte and the read byte transactions.
+ * detection or initialization steps). This matters when PEC is enabled
+ * for chips with partial PEC support, because we don't want the address
+ * pointer to change between the write byte and the read byte transactions.
*/
static int lm90_read_reg(struct i2c_client *client, u8 reg)
{
+ struct lm90_data *data = i2c_get_clientdata(client);
+ bool partial_pec = (client->flags & I2C_CLIENT_PEC) &&
+ (data->flags & LM90_HAVE_PARTIAL_PEC);
int err;
- if (client->flags & I2C_CLIENT_PEC) {
- err = adm1032_write_byte(client, reg);
- if (err >= 0)
- err = i2c_smbus_read_byte(client);
- } else
- err = i2c_smbus_read_byte_data(client, reg);
+ if (partial_pec) {
+ err = lm90_write_no_pec(client, reg);
+ if (err)
+ return err;
+ return i2c_smbus_read_byte(client);
+ }
+ return i2c_smbus_read_byte_data(client, reg);
+}
- return err;
+/*
+ * Return register write address
+ *
+ * The write address for registers 0x03 .. 0x08 is the read address plus 6.
+ * For other registers the write address matches the read address.
+ */
+static u8 lm90_write_reg_addr(u8 reg)
+{
+ if (reg >= LM90_REG_CONFIG1 && reg <= LM90_REG_REMOTE_LOWH)
+ return reg + 6;
+ return reg;
}
-static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl)
+/*
+ * Write into LM90 register.
+ * Convert register address to write address if needed, then execute the
+ * operation.
+ */
+static int lm90_write_reg(struct i2c_client *client, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(client, lm90_write_reg_addr(reg), val);
+}
+
+/*
+ * Write into 16-bit LM90 register.
+ * Convert register addresses to write address if needed, then execute the
+ * operation.
+ */
+static int lm90_write16(struct i2c_client *client, u8 regh, u8 regl, u16 val)
+{
+ int ret;
+
+ ret = lm90_write_reg(client, regh, val >> 8);
+ if (ret < 0 || !regl)
+ return ret;
+ return lm90_write_reg(client, regl, val & 0xff);
+}
+
+static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl,
+ bool is_volatile)
{
int oldh, newh, l;
- /*
- * There is a trick here. We have to read two registers to have the
- * sensor temperature, but we have to beware a conversion could occur
- * between the readings. The datasheet says we should either use
- * the one-shot conversion register, which we don't want to do
- * (disables hardware monitoring) or monitor the busy bit, which is
- * impossible (we can't read the values and monitor that bit at the
- * exact same time). So the solution used here is to read the high
- * byte once, then the low byte, then the high byte again. If the new
- * high byte matches the old one, then we have a valid reading. Else
- * we have to read the low byte again, and now we believe we have a
- * correct reading.
- */
oldh = lm90_read_reg(client, regh);
if (oldh < 0)
return oldh;
+
+ if (!regl)
+ return oldh << 8;
+
l = lm90_read_reg(client, regl);
if (l < 0)
return l;
+
+ if (!is_volatile)
+ return (oldh << 8) | l;
+
+ /*
+ * For volatile registers we have to use a trick.
+ * We have to read two registers to have the sensor temperature,
+ * but we have to beware a conversion could occur between the
+ * readings. The datasheet says we should either use
+ * the one-shot conversion register, which we don't want to do
+ * (disables hardware monitoring) or monitor the busy bit, which is
+ * impossible (we can't read the values and monitor that bit at the
+ * exact same time). So the solution used here is to read the high
+ * the high byte again. If the new high byte matches the old one,
+ * then we have a valid reading. Otherwise we have to read the low
+ * byte again, and now we believe we have a correct reading.
+ */
newh = lm90_read_reg(client, regh);
if (newh < 0)
return newh;
@@ -595,9 +848,7 @@ static int lm90_update_confreg(struct lm90_data *data, u8 config)
if (data->config != config) {
int err;
- err = i2c_smbus_write_byte_data(data->client,
- LM90_REG_W_CONFIG1,
- config);
+ err = lm90_write_reg(data->client, LM90_REG_CONFIG1, config);
if (err)
return err;
data->config = config;
@@ -613,18 +864,14 @@ static int lm90_update_confreg(struct lm90_data *data, u8 config)
* various registers have different meanings as a result of selecting a
* non-default remote channel.
*/
-static int lm90_select_remote_channel(struct lm90_data *data, int channel)
+static int lm90_select_remote_channel(struct lm90_data *data, bool second)
{
- int err = 0;
+ u8 config = data->config & ~0x08;
- if (data->kind == max6696) {
- u8 config = data->config & ~0x08;
+ if (second)
+ config |= 0x08;
- if (channel)
- config |= 0x08;
- err = lm90_update_confreg(data, config);
- }
- return err;
+ return lm90_update_confreg(data, config);
}
static int lm90_write_convrate(struct lm90_data *data, int val)
@@ -640,7 +887,7 @@ static int lm90_write_convrate(struct lm90_data *data, int val)
}
/* Set conv rate */
- err = i2c_smbus_write_byte_data(data->client, LM90_REG_W_CONVRATE, val);
+ err = lm90_write_reg(data->client, LM90_REG_CONVRATE, val);
/* Revert change to config */
lm90_update_confreg(data, config);
@@ -673,6 +920,26 @@ static int lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
return err;
}
+static int lm90_set_faultqueue(struct i2c_client *client,
+ struct lm90_data *data, int val)
+{
+ int err;
+
+ if (data->faultqueue_mask) {
+ err = lm90_update_confreg(data, val <= data->faultqueue_depth / 2 ?
+ data->config & ~data->faultqueue_mask :
+ data->config | data->faultqueue_mask);
+ } else {
+ static const u8 values[4] = {0, 2, 6, 0x0e};
+
+ data->conalert = (data->conalert & 0xf1) | values[val - 1];
+ err = lm90_write_reg(data->client, TMP451_REG_CONALERT,
+ data->conalert);
+ }
+
+ return err;
+}
+
static int lm90_update_limits(struct device *dev)
{
struct lm90_data *data = dev_get_drvdata(dev);
@@ -680,97 +947,260 @@ static int lm90_update_limits(struct device *dev)
int val;
if (data->flags & LM90_HAVE_CRIT) {
- val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
+ val = lm90_read_reg(client, LM90_REG_LOCAL_CRIT);
if (val < 0)
return val;
- data->temp8[LOCAL_CRIT] = val;
+ data->temp[LOCAL_CRIT] = val << 8;
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+ val = lm90_read_reg(client, LM90_REG_REMOTE_CRIT);
if (val < 0)
return val;
- data->temp8[REMOTE_CRIT] = val;
+ data->temp[REMOTE_CRIT] = val << 8;
- val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
+ val = lm90_read_reg(client, LM90_REG_TCRIT_HYST);
if (val < 0)
return val;
data->temp_hyst = val;
}
-
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
- if (val < 0)
- return val;
- data->temp11[REMOTE_LOW] = val << 8;
-
- if (data->flags & LM90_HAVE_REM_LIMIT_EXT) {
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL);
+ if ((data->flags & LM90_HAVE_FAULTQUEUE) && !data->faultqueue_mask) {
+ val = lm90_read_reg(client, TMP451_REG_CONALERT);
if (val < 0)
return val;
- data->temp11[REMOTE_LOW] |= val;
+ data->conalert = val;
}
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH);
+ val = lm90_read16(client, LM90_REG_REMOTE_LOWH,
+ (data->flags & LM90_HAVE_REM_LIMIT_EXT) ? LM90_REG_REMOTE_LOWL : 0,
+ false);
if (val < 0)
return val;
- data->temp11[REMOTE_HIGH] = val << 8;
+ data->temp[REMOTE_LOW] = val;
- if (data->flags & LM90_HAVE_REM_LIMIT_EXT) {
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL);
- if (val < 0)
- return val;
- data->temp11[REMOTE_HIGH] |= val;
- }
+ val = lm90_read16(client, LM90_REG_REMOTE_HIGHH,
+ (data->flags & LM90_HAVE_REM_LIMIT_EXT) ? LM90_REG_REMOTE_HIGHL : 0,
+ false);
+ if (val < 0)
+ return val;
+ data->temp[REMOTE_HIGH] = val;
if (data->flags & LM90_HAVE_OFFSET) {
- val = lm90_read16(client, LM90_REG_R_REMOTE_OFFSH,
- LM90_REG_R_REMOTE_OFFSL);
+ val = lm90_read16(client, LM90_REG_REMOTE_OFFSH,
+ LM90_REG_REMOTE_OFFSL, false);
if (val < 0)
return val;
- data->temp11[REMOTE_OFFSET] = val;
+ data->temp[REMOTE_OFFSET] = val;
}
if (data->flags & LM90_HAVE_EMERGENCY) {
- val = lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG);
+ val = lm90_read_reg(client, MAX6659_REG_LOCAL_EMERG);
if (val < 0)
return val;
- data->temp8[LOCAL_EMERG] = val;
+ data->temp[LOCAL_EMERG] = val << 8;
- val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG);
+ val = lm90_read_reg(client, MAX6659_REG_REMOTE_EMERG);
if (val < 0)
return val;
- data->temp8[REMOTE_EMERG] = val;
+ data->temp[REMOTE_EMERG] = val << 8;
}
- if (data->kind == max6696) {
- val = lm90_select_remote_channel(data, 1);
+ if (data->flags & LM90_HAVE_TEMP3) {
+ val = lm90_select_remote_channel(data, true);
if (val < 0)
return val;
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+ val = lm90_read_reg(client, LM90_REG_REMOTE_CRIT);
if (val < 0)
return val;
- data->temp8[REMOTE2_CRIT] = val;
+ data->temp[REMOTE2_CRIT] = val << 8;
+
+ if (data->flags & LM90_HAVE_EMERGENCY) {
+ val = lm90_read_reg(client, MAX6659_REG_REMOTE_EMERG);
+ if (val < 0)
+ return val;
+ data->temp[REMOTE2_EMERG] = val << 8;
+ }
- val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG);
+ val = lm90_read_reg(client, LM90_REG_REMOTE_LOWH);
if (val < 0)
return val;
- data->temp8[REMOTE2_EMERG] = val;
+ data->temp[REMOTE2_LOW] = val << 8;
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
+ val = lm90_read_reg(client, LM90_REG_REMOTE_HIGHH);
if (val < 0)
return val;
- data->temp11[REMOTE2_LOW] = val << 8;
+ data->temp[REMOTE2_HIGH] = val << 8;
+
+ if (data->flags & LM90_HAVE_OFFSET) {
+ val = lm90_read16(client, LM90_REG_REMOTE_OFFSH,
+ LM90_REG_REMOTE_OFFSL, false);
+ if (val < 0)
+ return val;
+ data->temp[REMOTE2_OFFSET] = val;
+ }
+
+ lm90_select_remote_channel(data, false);
+ }
+
+ return 0;
+}
+
+static void lm90_report_alarms(struct work_struct *work)
+{
+ struct lm90_data *data = container_of(work, struct lm90_data, report_work);
+ u16 cleared_alarms, new_alarms, current_alarms;
+ struct device *hwmon_dev = data->hwmon_dev;
+ struct device *dev = &data->client->dev;
+ int st, st2;
+
+ current_alarms = data->current_alarms;
+ cleared_alarms = data->reported_alarms & ~current_alarms;
+ new_alarms = current_alarms & ~data->reported_alarms;
+
+ if (!cleared_alarms && !new_alarms)
+ return;
+
+ st = new_alarms & 0xff;
+ st2 = new_alarms >> 8;
+
+ if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) ||
+ (st2 & MAX6696_STATUS2_LOT2))
+ dev_dbg(dev, "temp%d out of range, please check!\n", 1);
+ if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) ||
+ (st2 & MAX6696_STATUS2_ROT2))
+ dev_dbg(dev, "temp%d out of range, please check!\n", 2);
+ if (st & LM90_STATUS_ROPEN)
+ dev_dbg(dev, "temp%d diode open, please check!\n", 2);
+ if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH |
+ MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2))
+ dev_dbg(dev, "temp%d out of range, please check!\n", 3);
+ if (st2 & MAX6696_STATUS2_R2OPEN)
+ dev_dbg(dev, "temp%d diode open, please check!\n", 3);
+
+ st |= cleared_alarms & 0xff;
+ st2 |= cleared_alarms >> 8;
+
+ if (st & LM90_STATUS_LLOW)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_min_alarm, 0);
+ if (st & LM90_STATUS_RLOW)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_min_alarm, 1);
+ if (st2 & MAX6696_STATUS2_R2LOW)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_min_alarm, 2);
+
+ if (st & LM90_STATUS_LHIGH)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_max_alarm, 0);
+ if (st & LM90_STATUS_RHIGH)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_max_alarm, 1);
+ if (st2 & MAX6696_STATUS2_R2HIGH)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_max_alarm, 2);
+
+ if (st & LM90_STATUS_LTHRM)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_crit_alarm, 0);
+ if (st & LM90_STATUS_RTHRM)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_crit_alarm, 1);
+ if (st2 & MAX6696_STATUS2_R2THRM)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_crit_alarm, 2);
+
+ if (st2 & MAX6696_STATUS2_LOT2)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_emergency_alarm, 0);
+ if (st2 & MAX6696_STATUS2_ROT2)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_emergency_alarm, 1);
+ if (st2 & MAX6696_STATUS2_R2OT2)
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_emergency_alarm, 2);
+
+ data->reported_alarms = current_alarms;
+}
+
+static int lm90_update_alarms_locked(struct lm90_data *data, bool force)
+{
+ if (force || !data->alarms_valid ||
+ time_after(jiffies, data->alarms_updated + msecs_to_jiffies(data->update_interval))) {
+ struct i2c_client *client = data->client;
+ bool check_enable;
+ u16 alarms;
+ int val;
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH);
+ data->alarms_valid = false;
+
+ val = lm90_read_reg(client, LM90_REG_STATUS);
if (val < 0)
return val;
- data->temp11[REMOTE2_HIGH] = val << 8;
+ alarms = val & ~LM90_STATUS_BUSY;
- lm90_select_remote_channel(data, 0);
- }
+ if (data->reg_status2) {
+ val = lm90_read_reg(client, data->reg_status2);
+ if (val < 0)
+ return val;
+ alarms |= val << 8;
+ }
+ /*
+ * If the update is forced (called from interrupt or alert
+ * handler) and alarm data is valid, the alarms may have been
+ * updated after the last update interval, and the status
+ * register may still be cleared. Only add additional alarms
+ * in this case. Alarms will be cleared later if appropriate.
+ */
+ if (force && data->alarms_valid)
+ data->current_alarms |= alarms;
+ else
+ data->current_alarms = alarms;
+ data->alarms |= alarms;
+
+ check_enable = (client->irq || !(data->config_orig & 0x80)) &&
+ (data->config & 0x80);
+
+ if (force || check_enable)
+ schedule_work(&data->report_work);
+ /*
+ * Re-enable ALERT# output if it was originally enabled, relevant
+ * alarms are all clear, and alerts are currently disabled.
+ * Otherwise (re)schedule worker if needed.
+ */
+ if (check_enable) {
+ if (!(data->current_alarms & data->alert_alarms)) {
+ dev_dbg(&client->dev, "Re-enabling ALERT#\n");
+ lm90_update_confreg(data, data->config & ~0x80);
+ /*
+ * We may have been called from the update handler.
+ * If so, the worker, if scheduled, is no longer
+ * needed. Cancel it. Don't synchronize because
+ * it may already be running.
+ */
+ cancel_delayed_work(&data->alert_work);
+ } else {
+ schedule_delayed_work(&data->alert_work,
+ max_t(int, HZ, msecs_to_jiffies(data->update_interval)));
+ }
+ }
+ data->alarms_updated = jiffies;
+ data->alarms_valid = true;
+ }
return 0;
}
+static int lm90_update_alarms(struct lm90_data *data, bool force)
+{
+ int err;
+
+ mutex_lock(&data->update_lock);
+ err = lm90_update_alarms_locked(data, force);
+ mutex_unlock(&data->update_lock);
+
+ return err;
+}
+
+static void lm90_alert_work(struct work_struct *__work)
+{
+ struct delayed_work *delayed_work = container_of(__work, struct delayed_work, work);
+ struct lm90_data *data = container_of(delayed_work, struct lm90_data, alert_work);
+
+ /* Nothing to do if alerts are enabled */
+ if (!(data->config & 0x80))
+ return;
+
+ lm90_update_alarms(data, true);
+}
+
static int lm90_update_device(struct device *dev)
{
struct lm90_data *data = dev_get_drvdata(dev);
@@ -791,71 +1221,46 @@ static int lm90_update_device(struct device *dev)
data->valid = false;
- val = lm90_read_reg(client, LM90_REG_R_LOCAL_LOW);
+ val = lm90_read_reg(client, LM90_REG_LOCAL_LOW);
if (val < 0)
return val;
- data->temp8[LOCAL_LOW] = val;
+ data->temp[LOCAL_LOW] = val << 8;
- val = lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH);
+ val = lm90_read_reg(client, LM90_REG_LOCAL_HIGH);
if (val < 0)
return val;
- data->temp8[LOCAL_HIGH] = val;
+ data->temp[LOCAL_HIGH] = val << 8;
- if (data->reg_local_ext) {
- val = lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
- data->reg_local_ext);
- if (val < 0)
- return val;
- data->temp11[LOCAL_TEMP] = val;
- } else {
- val = lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP);
- if (val < 0)
- return val;
- data->temp11[LOCAL_TEMP] = val << 8;
- }
- val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL);
+ val = lm90_read16(client, LM90_REG_LOCAL_TEMP,
+ data->reg_local_ext, true);
if (val < 0)
return val;
- data->temp11[REMOTE_TEMP] = val;
-
- val = lm90_read_reg(client, LM90_REG_R_STATUS);
+ data->temp[LOCAL_TEMP] = val;
+ val = lm90_read16(client, LM90_REG_REMOTE_TEMPH,
+ data->reg_remote_ext, true);
if (val < 0)
return val;
- data->alarms = val & ~LM90_STATUS_BUSY;
+ data->temp[REMOTE_TEMP] = val;
- if (data->kind == max6696) {
- val = lm90_select_remote_channel(data, 1);
+ if (data->flags & LM90_HAVE_TEMP3) {
+ val = lm90_select_remote_channel(data, true);
if (val < 0)
return val;
- val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL);
+ val = lm90_read16(client, LM90_REG_REMOTE_TEMPH,
+ data->reg_remote_ext, true);
if (val < 0) {
- lm90_select_remote_channel(data, 0);
+ lm90_select_remote_channel(data, false);
return val;
}
- data->temp11[REMOTE2_TEMP] = val;
+ data->temp[REMOTE2_TEMP] = val;
- lm90_select_remote_channel(data, 0);
-
- val = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
- if (val < 0)
- return val;
- data->alarms |= val << 8;
+ lm90_select_remote_channel(data, false);
}
- /*
- * Re-enable ALERT# output if it was originally enabled and
- * relevant alarms are all clear
- */
- if ((client->irq || !(data->config_orig & 0x80)) &&
- !(data->alarms & data->alert_alarms)) {
- if (data->config & 0x80) {
- dev_dbg(&client->dev, "Re-enabling ALERT#\n");
- lm90_update_confreg(data, data->config & ~0x80);
- }
- }
+ val = lm90_update_alarms_locked(data, false);
+ if (val < 0)
+ return val;
data->last_updated = jiffies;
data->valid = true;
@@ -864,130 +1269,7 @@ static int lm90_update_device(struct device *dev)
return 0;
}
-/*
- * Conversions
- * For local temperatures and limits, critical limits and the hysteresis
- * value, the LM90 uses signed 8-bit values with LSB = 1 degree Celsius.
- * For remote temperatures and limits, it uses signed 11-bit values with
- * LSB = 0.125 degree Celsius, left-justified in 16-bit registers. Some
- * Maxim chips use unsigned values.
- */
-
-static inline int temp_from_s8(s8 val)
-{
- return val * 1000;
-}
-
-static inline int temp_from_u8(u8 val)
-{
- return val * 1000;
-}
-
-static inline int temp_from_s16(s16 val)
-{
- return val / 32 * 125;
-}
-
-static inline int temp_from_u16(u16 val)
-{
- return val / 32 * 125;
-}
-
-static s8 temp_to_s8(long val)
-{
- if (val <= -128000)
- return -128;
- if (val >= 127000)
- return 127;
- if (val < 0)
- return (val - 500) / 1000;
- return (val + 500) / 1000;
-}
-
-static u8 temp_to_u8(long val)
-{
- if (val <= 0)
- return 0;
- if (val >= 255000)
- return 255;
- return (val + 500) / 1000;
-}
-
-static s16 temp_to_s16(long val)
-{
- if (val <= -128000)
- return 0x8000;
- if (val >= 127875)
- return 0x7FE0;
- if (val < 0)
- return (val - 62) / 125 * 32;
- return (val + 62) / 125 * 32;
-}
-
-static u8 hyst_to_reg(long val)
-{
- if (val <= 0)
- return 0;
- if (val >= 30500)
- return 31;
- return (val + 500) / 1000;
-}
-
-/*
- * ADT7461 in compatibility mode is almost identical to LM90 except that
- * attempts to write values that are outside the range 0 < temp < 127 are
- * treated as the boundary value.
- *
- * ADT7461 in "extended mode" operation uses unsigned integers offset by
- * 64 (e.g., 0 -> -64 degC). The range is restricted to -64..191 degC.
- */
-static inline int temp_from_u8_adt7461(struct lm90_data *data, u8 val)
-{
- if (data->flags & LM90_FLAG_ADT7461_EXT)
- return (val - 64) * 1000;
- return temp_from_s8(val);
-}
-
-static inline int temp_from_u16_adt7461(struct lm90_data *data, u16 val)
-{
- if (data->flags & LM90_FLAG_ADT7461_EXT)
- return (val - 0x4000) / 64 * 250;
- return temp_from_s16(val);
-}
-
-static u8 temp_to_u8_adt7461(struct lm90_data *data, long val)
-{
- if (data->flags & LM90_FLAG_ADT7461_EXT) {
- if (val <= -64000)
- return 0;
- if (val >= 191000)
- return 0xFF;
- return (val + 500 + 64000) / 1000;
- }
- if (val <= 0)
- return 0;
- if (val >= 127000)
- return 127;
- return (val + 500) / 1000;
-}
-
-static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
-{
- if (data->flags & LM90_FLAG_ADT7461_EXT) {
- if (val <= -64000)
- return 0;
- if (val >= 191750)
- return 0xFFC0;
- return (val + 64000 + 125) / 250 * 64;
- }
- if (val <= 0)
- return 0;
- if (val >= 127750)
- return 0x7FC0;
- return (val + 125) / 250 * 64;
-}
-
-/* pec used for ADM1032 only */
+/* pec used for devices with PEC support */
static ssize_t pec_show(struct device *dev, struct device_attribute *dummy,
char *buf)
{
@@ -1023,196 +1305,211 @@ static ssize_t pec_store(struct device *dev, struct device_attribute *dummy,
static DEVICE_ATTR_RW(pec);
-static int lm90_get_temp11(struct lm90_data *data, int index)
+static int lm90_temp_get_resolution(struct lm90_data *data, int index)
{
- s16 temp11 = data->temp11[index];
- int temp;
+ switch (index) {
+ case REMOTE_TEMP:
+ if (data->reg_remote_ext)
+ return data->resolution;
+ return 8;
+ case REMOTE_OFFSET:
+ case REMOTE2_OFFSET:
+ case REMOTE2_TEMP:
+ return data->resolution;
+ case LOCAL_TEMP:
+ if (data->reg_local_ext)
+ return data->resolution;
+ return 8;
+ case REMOTE_LOW:
+ case REMOTE_HIGH:
+ case REMOTE2_LOW:
+ case REMOTE2_HIGH:
+ if (data->flags & LM90_HAVE_REM_LIMIT_EXT)
+ return data->resolution;
+ return 8;
+ default:
+ return 8;
+ }
+}
- if (data->flags & LM90_HAVE_EXTENDED_TEMP)
- temp = temp_from_u16_adt7461(data, temp11);
- else if (data->kind == max6646)
- temp = temp_from_u16(temp11);
+static int lm90_temp_from_reg(u32 flags, u16 regval, u8 resolution)
+{
+ int val;
+
+ if (flags & LM90_HAVE_EXTENDED_TEMP)
+ val = regval - 0x4000;
+ else if (flags & (LM90_HAVE_UNSIGNED_TEMP | LM90_HAVE_EXT_UNSIGNED))
+ val = regval;
else
- temp = temp_from_s16(temp11);
+ val = (s16)regval;
+
+ return ((val >> (16 - resolution)) * 1000) >> (resolution - 8);
+}
+
+static int lm90_get_temp(struct lm90_data *data, int index, int channel)
+{
+ int temp = lm90_temp_from_reg(data->flags, data->temp[index],
+ lm90_temp_get_resolution(data, index));
- /* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && index <= 2)
+ /* +16 degrees offset for remote temperature on LM99 */
+ if (data->kind == lm99 && channel)
temp += 16000;
return temp;
}
-static int lm90_set_temp11(struct lm90_data *data, int index, long val)
+static u16 lm90_temp_to_reg(u32 flags, long val, u8 resolution)
{
- static struct reg {
- u8 high;
- u8 low;
- } reg[] = {
- [REMOTE_LOW] = { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL },
- [REMOTE_HIGH] = { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL },
- [REMOTE_OFFSET] = { LM90_REG_W_REMOTE_OFFSH, LM90_REG_W_REMOTE_OFFSL },
- [REMOTE2_LOW] = { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL },
- [REMOTE2_HIGH] = { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL }
+ int fraction = resolution > 8 ?
+ 1000 - DIV_ROUND_CLOSEST(1000, BIT(resolution - 8)) : 0;
+
+ if (flags & LM90_HAVE_EXTENDED_TEMP) {
+ val = clamp_val(val, -64000, 191000 + fraction);
+ val += 64000;
+ } else if (flags & LM90_HAVE_EXT_UNSIGNED) {
+ val = clamp_val(val, 0, 255000 + fraction);
+ } else if (flags & LM90_HAVE_UNSIGNED_TEMP) {
+ val = clamp_val(val, 0, 127000 + fraction);
+ } else {
+ val = clamp_val(val, -128000, 127000 + fraction);
+ }
+
+ return DIV_ROUND_CLOSEST(val << (resolution - 8), 1000) << (16 - resolution);
+}
+
+static int lm90_set_temp(struct lm90_data *data, int index, int channel, long val)
+{
+ static const u8 regs[] = {
+ [LOCAL_LOW] = LM90_REG_LOCAL_LOW,
+ [LOCAL_HIGH] = LM90_REG_LOCAL_HIGH,
+ [LOCAL_CRIT] = LM90_REG_LOCAL_CRIT,
+ [REMOTE_CRIT] = LM90_REG_REMOTE_CRIT,
+ [LOCAL_EMERG] = MAX6659_REG_LOCAL_EMERG,
+ [REMOTE_EMERG] = MAX6659_REG_REMOTE_EMERG,
+ [REMOTE2_CRIT] = LM90_REG_REMOTE_CRIT,
+ [REMOTE2_EMERG] = MAX6659_REG_REMOTE_EMERG,
+ [REMOTE_LOW] = LM90_REG_REMOTE_LOWH,
+ [REMOTE_HIGH] = LM90_REG_REMOTE_HIGHH,
+ [REMOTE2_LOW] = LM90_REG_REMOTE_LOWH,
+ [REMOTE2_HIGH] = LM90_REG_REMOTE_HIGHH,
};
struct i2c_client *client = data->client;
- struct reg *regp = &reg[index];
+ u8 regh = regs[index];
+ u8 regl = 0;
int err;
- /* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && index <= 2) {
+ if (channel && (data->flags & LM90_HAVE_REM_LIMIT_EXT)) {
+ if (index == REMOTE_LOW || index == REMOTE2_LOW)
+ regl = LM90_REG_REMOTE_LOWL;
+ else if (index == REMOTE_HIGH || index == REMOTE2_HIGH)
+ regl = LM90_REG_REMOTE_HIGHL;
+ }
+
+ /* +16 degrees offset for remote temperature on LM99 */
+ if (data->kind == lm99 && channel) {
/* prevent integer underflow */
val = max(val, -128000l);
val -= 16000;
}
- if (data->flags & LM90_HAVE_EXTENDED_TEMP)
- data->temp11[index] = temp_to_u16_adt7461(data, val);
- else if (data->kind == max6646)
- data->temp11[index] = temp_to_u8(val) << 8;
- else if (data->flags & LM90_HAVE_REM_LIMIT_EXT)
- data->temp11[index] = temp_to_s16(val);
- else
- data->temp11[index] = temp_to_s8(val) << 8;
+ data->temp[index] = lm90_temp_to_reg(data->flags, val,
+ lm90_temp_get_resolution(data, index));
- lm90_select_remote_channel(data, index >= 3);
- err = i2c_smbus_write_byte_data(client, regp->high,
- data->temp11[index] >> 8);
- if (err < 0)
- return err;
- if (data->flags & LM90_HAVE_REM_LIMIT_EXT)
- err = i2c_smbus_write_byte_data(client, regp->low,
- data->temp11[index] & 0xff);
+ if (channel > 1)
+ lm90_select_remote_channel(data, true);
+
+ err = lm90_write16(client, regh, regl, data->temp[index]);
+
+ if (channel > 1)
+ lm90_select_remote_channel(data, false);
- lm90_select_remote_channel(data, 0);
return err;
}
-static int lm90_get_temp8(struct lm90_data *data, int index)
+static int lm90_get_temphyst(struct lm90_data *data, int index, int channel)
{
- s8 temp8 = data->temp8[index];
- int temp;
+ int temp = lm90_get_temp(data, index, channel);
- if (data->flags & LM90_HAVE_EXTENDED_TEMP)
- temp = temp_from_u8_adt7461(data, temp8);
- else if (data->kind == max6646)
- temp = temp_from_u8(temp8);
- else
- temp = temp_from_s8(temp8);
-
- /* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && index == 3)
- temp += 16000;
-
- return temp;
+ return temp - data->temp_hyst * 1000;
}
-static int lm90_set_temp8(struct lm90_data *data, int index, long val)
+static int lm90_set_temphyst(struct lm90_data *data, long val)
{
- static const u8 reg[TEMP8_REG_NUM] = {
- LM90_REG_W_LOCAL_LOW,
- LM90_REG_W_LOCAL_HIGH,
- LM90_REG_W_LOCAL_CRIT,
- LM90_REG_W_REMOTE_CRIT,
- MAX6659_REG_W_LOCAL_EMERG,
- MAX6659_REG_W_REMOTE_EMERG,
- LM90_REG_W_REMOTE_CRIT,
- MAX6659_REG_W_REMOTE_EMERG,
- };
- struct i2c_client *client = data->client;
- int err;
+ int temp = lm90_get_temp(data, LOCAL_CRIT, 0);
- /* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && index == 3) {
- /* prevent integer underflow */
- val = max(val, -128000l);
- val -= 16000;
- }
+ /* prevent integer overflow/underflow */
+ val = clamp_val(val, -128000l, 255000l);
+ data->temp_hyst = clamp_val(DIV_ROUND_CLOSEST(temp - val, 1000), 0, 31);
- if (data->flags & LM90_HAVE_EXTENDED_TEMP)
- data->temp8[index] = temp_to_u8_adt7461(data, val);
- else if (data->kind == max6646)
- data->temp8[index] = temp_to_u8(val);
- else
- data->temp8[index] = temp_to_s8(val);
+ return lm90_write_reg(data->client, LM90_REG_TCRIT_HYST, data->temp_hyst);
+}
- lm90_select_remote_channel(data, index >= 6);
- err = i2c_smbus_write_byte_data(client, reg[index], data->temp8[index]);
- lm90_select_remote_channel(data, 0);
+static int lm90_get_temp_offset(struct lm90_data *data, int index)
+{
+ int res = lm90_temp_get_resolution(data, index);
- return err;
+ return lm90_temp_from_reg(0, data->temp[index], res);
}
-static int lm90_get_temphyst(struct lm90_data *data, int index)
+static int lm90_set_temp_offset(struct lm90_data *data, int index, int channel, long val)
{
- int temp;
+ int err;
- if (data->flags & LM90_HAVE_EXTENDED_TEMP)
- temp = temp_from_u8_adt7461(data, data->temp8[index]);
- else if (data->kind == max6646)
- temp = temp_from_u8(data->temp8[index]);
- else
- temp = temp_from_s8(data->temp8[index]);
+ val = lm90_temp_to_reg(0, val, lm90_temp_get_resolution(data, index));
- /* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && index == 3)
- temp += 16000;
+ /* For ADT7481 we can use the same registers for remote channel 1 and 2 */
+ if (channel > 1)
+ lm90_select_remote_channel(data, true);
- return temp - temp_from_s8(data->temp_hyst);
-}
+ err = lm90_write16(data->client, LM90_REG_REMOTE_OFFSH, LM90_REG_REMOTE_OFFSL, val);
-static int lm90_set_temphyst(struct lm90_data *data, long val)
-{
- struct i2c_client *client = data->client;
- int temp;
- int err;
+ if (channel > 1)
+ lm90_select_remote_channel(data, false);
- if (data->flags & LM90_HAVE_EXTENDED_TEMP)
- temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]);
- else if (data->kind == max6646)
- temp = temp_from_u8(data->temp8[LOCAL_CRIT]);
- else
- temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
+ if (err)
+ return err;
- /* prevent integer overflow/underflow */
- val = clamp_val(val, -128000l, 255000l);
+ data->temp[index] = val;
- data->temp_hyst = hyst_to_reg(temp - val);
- err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
- data->temp_hyst);
- return err;
+ return 0;
}
-static const u8 lm90_temp_index[3] = {
+static const u8 lm90_temp_index[MAX_CHANNELS] = {
LOCAL_TEMP, REMOTE_TEMP, REMOTE2_TEMP
};
-static const u8 lm90_temp_min_index[3] = {
+static const u8 lm90_temp_min_index[MAX_CHANNELS] = {
LOCAL_LOW, REMOTE_LOW, REMOTE2_LOW
};
-static const u8 lm90_temp_max_index[3] = {
+static const u8 lm90_temp_max_index[MAX_CHANNELS] = {
LOCAL_HIGH, REMOTE_HIGH, REMOTE2_HIGH
};
-static const u8 lm90_temp_crit_index[3] = {
+static const u8 lm90_temp_crit_index[MAX_CHANNELS] = {
LOCAL_CRIT, REMOTE_CRIT, REMOTE2_CRIT
};
-static const u8 lm90_temp_emerg_index[3] = {
+static const u8 lm90_temp_emerg_index[MAX_CHANNELS] = {
LOCAL_EMERG, REMOTE_EMERG, REMOTE2_EMERG
};
-static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
-static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
-static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
-static const u8 lm90_crit_alarm_bits_swapped[3] = { 1, 0, 9 };
-static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
-static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
+static const s8 lm90_temp_offset_index[MAX_CHANNELS] = {
+ -1, REMOTE_OFFSET, REMOTE2_OFFSET
+};
+
+static const u16 lm90_min_alarm_bits[MAX_CHANNELS] = { BIT(5), BIT(3), BIT(11) };
+static const u16 lm90_max_alarm_bits[MAX_CHANNELS] = { BIT(6), BIT(4), BIT(12) };
+static const u16 lm90_crit_alarm_bits[MAX_CHANNELS] = { BIT(0), BIT(1), BIT(9) };
+static const u16 lm90_crit_alarm_bits_swapped[MAX_CHANNELS] = { BIT(1), BIT(0), BIT(9) };
+static const u16 lm90_emergency_alarm_bits[MAX_CHANNELS] = { BIT(15), BIT(13), BIT(14) };
+static const u16 lm90_fault_bits[MAX_CHANNELS] = { BIT(0), BIT(2), BIT(10) };
static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val)
{
struct lm90_data *data = dev_get_drvdata(dev);
int err;
+ u16 bit;
mutex_lock(&data->update_lock);
err = lm90_update_device(dev);
@@ -1222,56 +1519,57 @@ static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val)
switch (attr) {
case hwmon_temp_input:
- *val = lm90_get_temp11(data, lm90_temp_index[channel]);
+ *val = lm90_get_temp(data, lm90_temp_index[channel], channel);
break;
case hwmon_temp_min_alarm:
- *val = (data->alarms >> lm90_min_alarm_bits[channel]) & 1;
- break;
case hwmon_temp_max_alarm:
- *val = (data->alarms >> lm90_max_alarm_bits[channel]) & 1;
- break;
case hwmon_temp_crit_alarm:
- if (data->flags & LM90_HAVE_CRIT_ALRM_SWP)
- *val = (data->alarms >> lm90_crit_alarm_bits_swapped[channel]) & 1;
- else
- *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
- break;
case hwmon_temp_emergency_alarm:
- *val = (data->alarms >> lm90_emergency_alarm_bits[channel]) & 1;
- break;
case hwmon_temp_fault:
- *val = (data->alarms >> lm90_fault_bits[channel]) & 1;
+ switch (attr) {
+ case hwmon_temp_min_alarm:
+ bit = lm90_min_alarm_bits[channel];
+ break;
+ case hwmon_temp_max_alarm:
+ bit = lm90_max_alarm_bits[channel];
+ break;
+ case hwmon_temp_crit_alarm:
+ if (data->flags & LM90_HAVE_CRIT_ALRM_SWP)
+ bit = lm90_crit_alarm_bits_swapped[channel];
+ else
+ bit = lm90_crit_alarm_bits[channel];
+ break;
+ case hwmon_temp_emergency_alarm:
+ bit = lm90_emergency_alarm_bits[channel];
+ break;
+ case hwmon_temp_fault:
+ bit = lm90_fault_bits[channel];
+ break;
+ }
+ *val = !!(data->alarms & bit);
+ data->alarms &= ~bit;
+ data->alarms |= data->current_alarms;
break;
case hwmon_temp_min:
- if (channel == 0)
- *val = lm90_get_temp8(data,
- lm90_temp_min_index[channel]);
- else
- *val = lm90_get_temp11(data,
- lm90_temp_min_index[channel]);
+ *val = lm90_get_temp(data, lm90_temp_min_index[channel], channel);
break;
case hwmon_temp_max:
- if (channel == 0)
- *val = lm90_get_temp8(data,
- lm90_temp_max_index[channel]);
- else
- *val = lm90_get_temp11(data,
- lm90_temp_max_index[channel]);
+ *val = lm90_get_temp(data, lm90_temp_max_index[channel], channel);
break;
case hwmon_temp_crit:
- *val = lm90_get_temp8(data, lm90_temp_crit_index[channel]);
+ *val = lm90_get_temp(data, lm90_temp_crit_index[channel], channel);
break;
case hwmon_temp_crit_hyst:
- *val = lm90_get_temphyst(data, lm90_temp_crit_index[channel]);
+ *val = lm90_get_temphyst(data, lm90_temp_crit_index[channel], channel);
break;
case hwmon_temp_emergency:
- *val = lm90_get_temp8(data, lm90_temp_emerg_index[channel]);
+ *val = lm90_get_temp(data, lm90_temp_emerg_index[channel], channel);
break;
case hwmon_temp_emergency_hyst:
- *val = lm90_get_temphyst(data, lm90_temp_emerg_index[channel]);
+ *val = lm90_get_temphyst(data, lm90_temp_emerg_index[channel], channel);
break;
case hwmon_temp_offset:
- *val = lm90_get_temp11(data, REMOTE_OFFSET);
+ *val = lm90_get_temp_offset(data, lm90_temp_offset_index[channel]);
break;
default:
return -EOPNOTSUPP;
@@ -1292,36 +1590,27 @@ static int lm90_temp_write(struct device *dev, u32 attr, int channel, long val)
switch (attr) {
case hwmon_temp_min:
- if (channel == 0)
- err = lm90_set_temp8(data,
- lm90_temp_min_index[channel],
- val);
- else
- err = lm90_set_temp11(data,
- lm90_temp_min_index[channel],
- val);
+ err = lm90_set_temp(data, lm90_temp_min_index[channel],
+ channel, val);
break;
case hwmon_temp_max:
- if (channel == 0)
- err = lm90_set_temp8(data,
- lm90_temp_max_index[channel],
- val);
- else
- err = lm90_set_temp11(data,
- lm90_temp_max_index[channel],
- val);
+ err = lm90_set_temp(data, lm90_temp_max_index[channel],
+ channel, val);
break;
case hwmon_temp_crit:
- err = lm90_set_temp8(data, lm90_temp_crit_index[channel], val);
+ err = lm90_set_temp(data, lm90_temp_crit_index[channel],
+ channel, val);
break;
case hwmon_temp_crit_hyst:
err = lm90_set_temphyst(data, val);
break;
case hwmon_temp_emergency:
- err = lm90_set_temp8(data, lm90_temp_emerg_index[channel], val);
+ err = lm90_set_temp(data, lm90_temp_emerg_index[channel],
+ channel, val);
break;
case hwmon_temp_offset:
- err = lm90_set_temp11(data, REMOTE_OFFSET, val);
+ err = lm90_set_temp_offset(data, lm90_temp_offset_index[channel],
+ channel, val);
break;
default:
err = -EOPNOTSUPP;
@@ -1343,6 +1632,7 @@ static umode_t lm90_temp_is_visible(const void *data, u32 attr, int channel)
case hwmon_temp_emergency_alarm:
case hwmon_temp_emergency_hyst:
case hwmon_temp_fault:
+ case hwmon_temp_label:
return 0444;
case hwmon_temp_min:
case hwmon_temp_max:
@@ -1377,6 +1667,28 @@ static int lm90_chip_read(struct device *dev, u32 attr, int channel, long *val)
case hwmon_chip_alarms:
*val = data->alarms;
break;
+ case hwmon_chip_temp_samples:
+ if (data->faultqueue_mask) {
+ *val = (data->config & data->faultqueue_mask) ?
+ data->faultqueue_depth : 1;
+ } else {
+ switch (data->conalert & 0x0e) {
+ case 0x0:
+ default:
+ *val = 1;
+ break;
+ case 0x2:
+ *val = 2;
+ break;
+ case 0x6:
+ *val = 3;
+ break;
+ case 0xe:
+ *val = 4;
+ break;
+ }
+ }
+ break;
default:
return -EOPNOTSUPP;
}
@@ -1401,6 +1713,9 @@ static int lm90_chip_write(struct device *dev, u32 attr, int channel, long val)
err = lm90_set_convrate(client, data,
clamp_val(val, 0, 100000));
break;
+ case hwmon_chip_temp_samples:
+ err = lm90_set_faultqueue(client, data, clamp_val(val, 1, 4));
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -1415,6 +1730,7 @@ static umode_t lm90_chip_is_visible(const void *data, u32 attr, int channel)
{
switch (attr) {
case hwmon_chip_update_interval:
+ case hwmon_chip_temp_samples:
return 0644;
case hwmon_chip_alarms:
return 0444;
@@ -1436,6 +1752,16 @@ static int lm90_read(struct device *dev, enum hwmon_sensor_types type,
}
}
+static int lm90_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+
+ *str = data->channel_label[channel];
+
+ return 0;
+}
+
static int lm90_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
@@ -1462,125 +1788,359 @@ static umode_t lm90_is_visible(const void *data, enum hwmon_sensor_types type,
}
}
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm90_detect(struct i2c_client *client,
- struct i2c_board_info *info)
+static const char *lm90_detect_lm84(struct i2c_client *client)
{
- struct i2c_adapter *adapter = client->adapter;
+ static const u8 regs[] = {
+ LM90_REG_STATUS, LM90_REG_LOCAL_TEMP, LM90_REG_LOCAL_HIGH,
+ LM90_REG_REMOTE_TEMPH, LM90_REG_REMOTE_HIGHH
+ };
+ int status = i2c_smbus_read_byte_data(client, LM90_REG_STATUS);
+ int reg1, reg2, reg3, reg4;
+ bool nonzero = false;
+ u8 ff = 0xff;
+ int i;
+
+ if (status < 0 || (status & 0xab))
+ return NULL;
+
+ /*
+ * For LM84, undefined registers return the most recent value.
+ * Repeat several times, each time checking against a different
+ * (presumably) existing register.
+ */
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ reg1 = i2c_smbus_read_byte_data(client, regs[i]);
+ reg2 = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_TEMPL);
+ reg3 = i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_LOW);
+ reg4 = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWH);
+
+ if (reg1 < 0)
+ return NULL;
+
+ /* If any register has a different value, this is not an LM84 */
+ if (reg2 != reg1 || reg3 != reg1 || reg4 != reg1)
+ return NULL;
+
+ nonzero |= reg1 || reg2 || reg3 || reg4;
+ ff &= reg1;
+ }
+ /*
+ * If all registers always returned 0 or 0xff, all bets are off,
+ * and we can not make any predictions about the chip type.
+ */
+ return nonzero && ff != 0xff ? "lm84" : NULL;
+}
+
+static const char *lm90_detect_max1617(struct i2c_client *client, int config1)
+{
+ int status = i2c_smbus_read_byte_data(client, LM90_REG_STATUS);
+ int llo, rlo, lhi, rhi;
+
+ if (status < 0 || (status & 0x03))
+ return NULL;
+
+ if (config1 & 0x3f)
+ return NULL;
+
+ /*
+ * Fail if unsupported registers return anything but 0xff.
+ * The calling code already checked man_id and chip_id.
+ * A byte read operation repeats the most recent read operation
+ * and should also return 0xff.
+ */
+ if (i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_TEMPL) != 0xff ||
+ i2c_smbus_read_byte_data(client, MAX6657_REG_LOCAL_TEMPL) != 0xff ||
+ i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWL) != 0xff ||
+ i2c_smbus_read_byte(client) != 0xff)
+ return NULL;
+
+ llo = i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_LOW);
+ rlo = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWH);
+
+ lhi = i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_HIGH);
+ rhi = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_HIGHH);
+
+ if (llo < 0 || rlo < 0)
+ return NULL;
+
+ /*
+ * A byte read operation repeats the most recent read and should
+ * return the same value.
+ */
+ if (i2c_smbus_read_byte(client) != rhi)
+ return NULL;
+
+ /*
+ * The following two checks are marginal since the checked values
+ * are strictly speaking valid.
+ */
+
+ /* fail for negative high limits; this also catches read errors */
+ if ((s8)lhi < 0 || (s8)rhi < 0)
+ return NULL;
+
+ /* fail if low limits are larger than or equal to high limits */
+ if ((s8)llo >= lhi || (s8)rlo >= rhi)
+ return NULL;
+
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
+ /*
+ * Word read operations return 0xff in second byte
+ */
+ if (i2c_smbus_read_word_data(client, LM90_REG_REMOTE_TEMPL) !=
+ 0xffff)
+ return NULL;
+ if (i2c_smbus_read_word_data(client, LM90_REG_CONFIG1) !=
+ (config1 | 0xff00))
+ return NULL;
+ if (i2c_smbus_read_word_data(client, LM90_REG_LOCAL_HIGH) !=
+ (lhi | 0xff00))
+ return NULL;
+ }
+
+ return "max1617";
+}
+
+static const char *lm90_detect_national(struct i2c_client *client, int chip_id,
+ int config1, int convrate)
+{
+ int config2 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG2);
int address = client->addr;
const char *name = NULL;
- int man_id, chip_id, config1, config2, convrate;
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -ENODEV;
+ if (config2 < 0)
+ return NULL;
- /* detection and identification */
- man_id = i2c_smbus_read_byte_data(client, LM90_REG_R_MAN_ID);
- chip_id = i2c_smbus_read_byte_data(client, LM90_REG_R_CHIP_ID);
- config1 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG1);
- convrate = i2c_smbus_read_byte_data(client, LM90_REG_R_CONVRATE);
- if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0)
- return -ENODEV;
+ if ((config1 & 0x2a) || (config2 & 0xf8) || convrate > 0x09)
+ return NULL;
- if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) {
- config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2);
- if (config2 < 0)
- return -ENODEV;
+ if (address != 0x4c && address != 0x4d)
+ return NULL;
+
+ switch (chip_id & 0xf0) {
+ case 0x10: /* LM86 */
+ if (address == 0x4c)
+ name = "lm86";
+ break;
+ case 0x20: /* LM90 */
+ if (address == 0x4c)
+ name = "lm90";
+ break;
+ case 0x30: /* LM89/LM99 */
+ name = "lm99"; /* detect LM89 as LM99 */
+ break;
+ default:
+ break;
}
- if ((address == 0x4C || address == 0x4D)
- && man_id == 0x01) { /* National Semiconductor */
- if ((config1 & 0x2A) == 0x00
- && (config2 & 0xF8) == 0x00
- && convrate <= 0x09) {
- if (address == 0x4C
- && (chip_id & 0xF0) == 0x20) { /* LM90 */
- name = "lm90";
- } else
- if ((chip_id & 0xF0) == 0x30) { /* LM89/LM99 */
- name = "lm99";
- dev_info(&adapter->dev,
- "Assuming LM99 chip at 0x%02x\n",
- address);
- dev_info(&adapter->dev,
- "If it is an LM89, instantiate it "
- "with the new_device sysfs "
- "interface\n");
- } else
- if (address == 0x4C
- && (chip_id & 0xF0) == 0x10) { /* LM86 */
- name = "lm86";
- }
- }
- } else
- if ((address == 0x4C || address == 0x4D)
- && man_id == 0x41) { /* Analog Devices */
- if ((chip_id & 0xF0) == 0x40 /* ADM1032 */
- && (config1 & 0x3F) == 0x00
- && convrate <= 0x0A) {
+ return name;
+}
+
+static const char *lm90_detect_on(struct i2c_client *client, int chip_id, int config1,
+ int convrate)
+{
+ int address = client->addr;
+ const char *name = NULL;
+
+ switch (chip_id) {
+ case 0xca: /* NCT218 */
+ if ((address == 0x4c || address == 0x4d) && !(config1 & 0x1b) &&
+ convrate <= 0x0a)
+ name = "nct218";
+ break;
+ default:
+ break;
+ }
+ return name;
+}
+
+static const char *lm90_detect_analog(struct i2c_client *client, bool common_address,
+ int chip_id, int config1, int convrate)
+{
+ int status = i2c_smbus_read_byte_data(client, LM90_REG_STATUS);
+ int config2 = i2c_smbus_read_byte_data(client, ADT7481_REG_CONFIG2);
+ int man_id2 = i2c_smbus_read_byte_data(client, ADT7481_REG_MAN_ID);
+ int chip_id2 = i2c_smbus_read_byte_data(client, ADT7481_REG_CHIP_ID);
+ int address = client->addr;
+ const char *name = NULL;
+
+ if (status < 0 || config2 < 0 || man_id2 < 0 || chip_id2 < 0)
+ return NULL;
+
+ /*
+ * The following chips should be detected by this function. Known
+ * register values are listed. Registers 0x3d .. 0x3e are undocumented
+ * for most of the chips, yet appear to return a well defined value.
+ * Register 0xff is undocumented for some of the chips. Register 0x3f
+ * is undocumented for all chips, but also returns a well defined value.
+ * Values are as reported from real chips unless mentioned otherwise.
+ * The code below checks values for registers 0x3d, 0x3e, and 0xff,
+ * but not for register 0x3f.
+ *
+ * Chip Register
+ * 3d 3e 3f fe ff Notes
+ * ----------------------------------------------------------
+ * adm1020 00 00 00 41 39
+ * adm1021 00 00 00 41 03
+ * adm1021a 00 00 00 41 3c
+ * adm1023 00 00 00 41 3c same as adm1021a
+ * adm1032 00 00 00 41 42
+ *
+ * adt7421 21 41 04 41 04
+ * adt7461 00 00 00 41 51
+ * adt7461a 61 41 05 41 57
+ * adt7481 81 41 02 41 62
+ * adt7482 - - - 41 65 datasheet
+ * 82 41 05 41 75 real chip
+ * adt7483 83 41 04 41 94
+ *
+ * nct72 61 41 07 41 55
+ * nct210 00 00 00 41 3f
+ * nct214 61 41 08 41 5a
+ * nct1008 - - - 41 57 datasheet rev. 3
+ * 61 41 06 41 54 real chip
+ *
+ * nvt210 - - - 41 - datasheet
+ * nvt211 - - - 41 - datasheet
+ */
+ switch (chip_id) {
+ case 0x00 ... 0x03: /* ADM1021 */
+ case 0x05 ... 0x0f:
+ if (man_id2 == 0x00 && chip_id2 == 0x00 && common_address &&
+ !(status & 0x03) && !(config1 & 0x3f) && !(convrate & 0xf8))
+ name = "adm1021";
+ break;
+ case 0x04: /* ADT7421 (undocumented) */
+ if (man_id2 == 0x41 && chip_id2 == 0x21 &&
+ (address == 0x4c || address == 0x4d) &&
+ (config1 & 0x0b) == 0x08 && convrate <= 0x0a)
+ name = "adt7421";
+ break;
+ case 0x30 ... 0x38: /* ADM1021A, ADM1023 */
+ case 0x3a ... 0x3e:
+ /*
+ * ADM1021A and compatible chips will be mis-detected as
+ * ADM1023. Chips labeled 'ADM1021A' and 'ADM1023' were both
+ * found to have a Chip ID of 0x3c.
+ * ADM1021A does not officially support low byte registers
+ * (0x12 .. 0x14), but a chip labeled ADM1021A does support it.
+ * Official support for the temperature offset high byte
+ * register (0x11) was added to revision F of the ADM1021A
+ * datasheet.
+ * It is currently unknown if there is a means to distinguish
+ * ADM1021A from ADM1023, and/or if revisions of ADM1021A exist
+ * which differ in functionality from ADM1023.
+ */
+ if (man_id2 == 0x00 && chip_id2 == 0x00 && common_address &&
+ !(status & 0x03) && !(config1 & 0x3f) && !(convrate & 0xf8))
+ name = "adm1023";
+ break;
+ case 0x39: /* ADM1020 (undocumented) */
+ if (man_id2 == 0x00 && chip_id2 == 0x00 &&
+ (address == 0x4c || address == 0x4d || address == 0x4e) &&
+ !(status & 0x03) && !(config1 & 0x3f) && !(convrate & 0xf8))
+ name = "adm1020";
+ break;
+ case 0x3f: /* NCT210 */
+ if (man_id2 == 0x00 && chip_id2 == 0x00 && common_address &&
+ !(status & 0x03) && !(config1 & 0x3f) && !(convrate & 0xf8))
+ name = "nct210";
+ break;
+ case 0x40 ... 0x4f: /* ADM1032 */
+ if (man_id2 == 0x00 && chip_id2 == 0x00 &&
+ (address == 0x4c || address == 0x4d) && !(config1 & 0x3f) &&
+ convrate <= 0x0a)
name = "adm1032";
- /*
- * The ADM1032 supports PEC, but only if combined
- * transactions are not used.
- */
- if (i2c_check_functionality(adapter,
- I2C_FUNC_SMBUS_BYTE))
- info->flags |= I2C_CLIENT_PEC;
- } else
- if (chip_id == 0x51 /* ADT7461 */
- && (config1 & 0x1B) == 0x00
- && convrate <= 0x0A) {
+ break;
+ case 0x51: /* ADT7461 */
+ if (man_id2 == 0x00 && chip_id2 == 0x00 &&
+ (address == 0x4c || address == 0x4d) && !(config1 & 0x1b) &&
+ convrate <= 0x0a)
name = "adt7461";
- } else
- if (chip_id == 0x57 /* ADT7461A, NCT1008 */
- && (config1 & 0x1B) == 0x00
- && convrate <= 0x0A) {
+ break;
+ case 0x54: /* NCT1008 */
+ if (man_id2 == 0x41 && chip_id2 == 0x61 &&
+ (address == 0x4c || address == 0x4d) && !(config1 & 0x1b) &&
+ convrate <= 0x0a)
+ name = "nct1008";
+ break;
+ case 0x55: /* NCT72 */
+ if (man_id2 == 0x41 && chip_id2 == 0x61 &&
+ (address == 0x4c || address == 0x4d) && !(config1 & 0x1b) &&
+ convrate <= 0x0a)
+ name = "nct72";
+ break;
+ case 0x57: /* ADT7461A, NCT1008 (datasheet rev. 3) */
+ if (man_id2 == 0x41 && chip_id2 == 0x61 &&
+ (address == 0x4c || address == 0x4d) && !(config1 & 0x1b) &&
+ convrate <= 0x0a)
name = "adt7461a";
+ break;
+ case 0x5a: /* NCT214 */
+ if (man_id2 == 0x41 && chip_id2 == 0x61 &&
+ common_address && !(config1 & 0x1b) && convrate <= 0x0a)
+ name = "nct214";
+ break;
+ case 0x62: /* ADT7481, undocumented */
+ if (man_id2 == 0x41 && chip_id2 == 0x81 &&
+ (address == 0x4b || address == 0x4c) && !(config1 & 0x10) &&
+ !(config2 & 0x7f) && (convrate & 0x0f) <= 0x0b) {
+ name = "adt7481";
}
- } else
- if (man_id == 0x4D) { /* Maxim */
- int emerg, emerg2, status2;
+ break;
+ case 0x65: /* ADT7482, datasheet */
+ case 0x75: /* ADT7482, real chip */
+ if (man_id2 == 0x41 && chip_id2 == 0x82 &&
+ address == 0x4c && !(config1 & 0x10) && !(config2 & 0x7f) &&
+ convrate <= 0x0a)
+ name = "adt7482";
+ break;
+ case 0x94: /* ADT7483 */
+ if (man_id2 == 0x41 && chip_id2 == 0x83 &&
+ common_address &&
+ ((address >= 0x18 && address <= 0x1a) ||
+ (address >= 0x29 && address <= 0x2b) ||
+ (address >= 0x4c && address <= 0x4e)) &&
+ !(config1 & 0x10) && !(config2 & 0x7f) && convrate <= 0x0a)
+ name = "adt7483a";
+ break;
+ default:
+ break;
+ }
+
+ return name;
+}
+
+static const char *lm90_detect_maxim(struct i2c_client *client, bool common_address,
+ int chip_id, int config1, int convrate)
+{
+ int man_id, emerg, emerg2, status2;
+ int address = client->addr;
+ const char *name = NULL;
+
+ switch (chip_id) {
+ case 0x01:
+ if (!common_address)
+ break;
/*
- * We read MAX6659_REG_R_REMOTE_EMERG twice, and re-read
- * LM90_REG_R_MAN_ID in between. If MAX6659_REG_R_REMOTE_EMERG
+ * We read MAX6659_REG_REMOTE_EMERG twice, and re-read
+ * LM90_REG_MAN_ID in between. If MAX6659_REG_REMOTE_EMERG
* exists, both readings will reflect the same value. Otherwise,
* the readings will be different.
*/
emerg = i2c_smbus_read_byte_data(client,
- MAX6659_REG_R_REMOTE_EMERG);
+ MAX6659_REG_REMOTE_EMERG);
man_id = i2c_smbus_read_byte_data(client,
- LM90_REG_R_MAN_ID);
+ LM90_REG_MAN_ID);
emerg2 = i2c_smbus_read_byte_data(client,
- MAX6659_REG_R_REMOTE_EMERG);
+ MAX6659_REG_REMOTE_EMERG);
status2 = i2c_smbus_read_byte_data(client,
- MAX6696_REG_R_STATUS2);
+ MAX6696_REG_STATUS2);
if (emerg < 0 || man_id < 0 || emerg2 < 0 || status2 < 0)
- return -ENODEV;
+ return NULL;
/*
- * The MAX6657, MAX6658 and MAX6659 do NOT have a chip_id
- * register. Reading from that address will return the last
- * read value, which in our case is those of the man_id
- * register. Likewise, the config1 register seems to lack a
- * low nibble, so the value will be those of the previous
- * read, so in our case those of the man_id register.
- * MAX6659 has a third set of upper temperature limit registers.
- * Those registers also return values on MAX6657 and MAX6658,
- * thus the only way to detect MAX6659 is by its address.
- * For this reason it will be mis-detected as MAX6657 if its
- * address is 0x4C.
- */
- if (chip_id == man_id
- && (address == 0x4C || address == 0x4D || address == 0x4E)
- && (config1 & 0x1F) == (man_id & 0x0F)
- && convrate <= 0x09) {
- if (address == 0x4C)
- name = "max6657";
- else
- name = "max6659";
- } else
- /*
* Even though MAX6695 and MAX6696 do not have a chip ID
* register, reading it returns 0x01. Bit 4 of the config1
* register is unused and should return zero when read. Bit 0 of
@@ -1591,90 +2151,288 @@ static int lm90_detect(struct i2c_client *client,
* limit registers. We can detect those chips by checking if
* one of those registers exists.
*/
- if (chip_id == 0x01
- && (config1 & 0x10) == 0x00
- && (status2 & 0x01) == 0x00
- && emerg == emerg2
- && convrate <= 0x07) {
+ if (!(config1 & 0x10) && !(status2 & 0x01) && emerg == emerg2 &&
+ convrate <= 0x07)
name = "max6696";
- } else
/*
* The chip_id register of the MAX6680 and MAX6681 holds the
* revision of the chip. The lowest bit of the config1 register
* is unused and should return zero when read, so should the
- * second to last bit of config1 (software reset).
+ * second to last bit of config1 (software reset). Register
+ * address 0x12 (LM90_REG_REMOTE_OFFSL) exists for this chip and
+ * should differ from emerg2, and emerg2 should match man_id
+ * since it does not exist.
*/
- if (chip_id == 0x01
- && (config1 & 0x03) == 0x00
- && convrate <= 0x07) {
+ else if (!(config1 & 0x03) && convrate <= 0x07 &&
+ emerg2 == man_id && emerg2 != status2)
name = "max6680";
- } else
/*
- * The chip_id register of the MAX6646/6647/6649 holds the
- * revision of the chip. The lowest 6 bits of the config1
- * register are unused and should return zero when read.
+ * MAX1617A does not have any extended registers (register
+ * address 0x10 or higher) except for manufacturer and
+ * device ID registers. Unlike other chips of this series,
+ * unsupported registers were observed to return a fixed value
+ * of 0x01.
+ * Note: Multiple chips with different markings labeled as
+ * "MAX1617" (no "A") were observed to report manufacturer ID
+ * 0x4d and device ID 0x01. It is unknown if other variants of
+ * MAX1617/MAX617A with different behavior exist. The detection
+ * code below works for those chips.
*/
- if (chip_id == 0x59
- && (config1 & 0x3f) == 0x00
- && convrate <= 0x07) {
- name = "max6646";
- } else
+ else if (!(config1 & 0x03f) && convrate <= 0x07 &&
+ emerg == 0x01 && emerg2 == 0x01 && status2 == 0x01)
+ name = "max1617";
+ break;
+ case 0x08:
/*
* The chip_id of the MAX6654 holds the revision of the chip.
* The lowest 3 bits of the config1 register are unused and
* should return zero when read.
*/
- if (chip_id == 0x08
- && (config1 & 0x07) == 0x00
- && convrate <= 0x07) {
+ if (common_address && !(config1 & 0x07) && convrate <= 0x07)
name = "max6654";
+ break;
+ case 0x09:
+ /*
+ * The chip_id of the MAX6690 holds the revision of the chip.
+ * The lowest 3 bits of the config1 register are unused and
+ * should return zero when read.
+ * Note that MAX6654 and MAX6690 are practically the same chips.
+ * The only diference is the rated accuracy. Rev. 1 of the
+ * MAX6690 datasheet lists a chip ID of 0x08, and a chip labeled
+ * MAX6654 was observed to have a chip ID of 0x09.
+ */
+ if (common_address && !(config1 & 0x07) && convrate <= 0x07)
+ name = "max6690";
+ break;
+ case 0x4d:
+ /*
+ * MAX6642, MAX6657, MAX6658 and MAX6659 do NOT have a chip_id
+ * register. Reading from that address will return the last
+ * read value, which in our case is those of the man_id
+ * register, or 0x4d.
+ * MAX6642 does not have a conversion rate register, nor low
+ * limit registers. Reading from those registers returns the
+ * last read value.
+ *
+ * For MAX6657, MAX6658 and MAX6659, the config1 register lacks
+ * a low nibble, so the value will be those of the previous
+ * read, so in our case again those of the man_id register.
+ * MAX6659 has a third set of upper temperature limit registers.
+ * Those registers also return values on MAX6657 and MAX6658,
+ * thus the only way to detect MAX6659 is by its address.
+ * For this reason it will be mis-detected as MAX6657 if its
+ * address is 0x4c.
+ */
+ if (address >= 0x48 && address <= 0x4f && config1 == convrate &&
+ !(config1 & 0x0f)) {
+ int regval;
+
+ /*
+ * We know that this is not a MAX6657/58/59 because its
+ * configuration register has the wrong value and it does
+ * not appear to have a conversion rate register.
+ */
+
+ /* re-read manufacturer ID to have a good baseline */
+ if (i2c_smbus_read_byte_data(client, LM90_REG_MAN_ID) != 0x4d)
+ break;
+
+ /* check various non-existing registers */
+ if (i2c_smbus_read_byte_data(client, LM90_REG_CONVRATE) != 0x4d ||
+ i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_LOW) != 0x4d ||
+ i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWH) != 0x4d)
+ break;
+
+ /* check for unused status register bits */
+ regval = i2c_smbus_read_byte_data(client, LM90_REG_STATUS);
+ if (regval < 0 || (regval & 0x2b))
+ break;
+
+ /* re-check unsupported registers */
+ if (i2c_smbus_read_byte_data(client, LM90_REG_CONVRATE) != regval ||
+ i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_LOW) != regval ||
+ i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWH) != regval)
+ break;
+
+ name = "max6642";
+ } else if ((address == 0x4c || address == 0x4d || address == 0x4e) &&
+ (config1 & 0x1f) == 0x0d && convrate <= 0x09) {
+ if (address == 0x4c)
+ name = "max6657";
+ else
+ name = "max6659";
}
- } else
- if (address == 0x4C
- && man_id == 0x5C) { /* Winbond/Nuvoton */
- if ((config1 & 0x2A) == 0x00
- && (config2 & 0xF8) == 0x00) {
- if (chip_id == 0x01 /* W83L771W/G */
- && convrate <= 0x09) {
- name = "w83l771";
- } else
- if ((chip_id & 0xFE) == 0x10 /* W83L771AWG/ASG */
- && convrate <= 0x08) {
- name = "w83l771";
+ break;
+ case 0x59:
+ /*
+ * The chip_id register of the MAX6646/6647/6649 holds the
+ * revision of the chip. The lowest 6 bits of the config1
+ * register are unused and should return zero when read.
+ * The I2C address of MAX6648/6692 is fixed at 0x4c.
+ * MAX6646 is at address 0x4d, MAX6647 is at address 0x4e,
+ * and MAX6649 is at address 0x4c. A slight difference between
+ * the two sets of chips is that the remote temperature register
+ * reports different values if the DXP pin is open or shorted.
+ * We can use that information to help distinguish between the
+ * chips. MAX6648 will be mis-detected as MAX6649 if the remote
+ * diode is connected, but there isn't really anything we can
+ * do about that.
+ */
+ if (!(config1 & 0x3f) && convrate <= 0x07) {
+ int temp;
+
+ switch (address) {
+ case 0x4c:
+ /*
+ * MAX6649 reports an external temperature
+ * value of 0xff if DXP is open or shorted.
+ * MAX6648 reports 0x80 in that case.
+ */
+ temp = i2c_smbus_read_byte_data(client,
+ LM90_REG_REMOTE_TEMPH);
+ if (temp == 0x80)
+ name = "max6648";
+ else
+ name = "max6649";
+ break;
+ case 0x4d:
+ name = "max6646";
+ break;
+ case 0x4e:
+ name = "max6647";
+ break;
+ default:
+ break;
}
}
- } else
- if (address >= 0x48 && address <= 0x4F
- && man_id == 0xA1) { /* NXP Semiconductor/Philips */
- if (chip_id == 0x00
- && (config1 & 0x2A) == 0x00
- && (config2 & 0xFE) == 0x00
- && convrate <= 0x09) {
- name = "sa56004";
+ break;
+ default:
+ break;
+ }
+
+ return name;
+}
+
+static const char *lm90_detect_nuvoton(struct i2c_client *client, int chip_id,
+ int config1, int convrate)
+{
+ int config2 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG2);
+ int address = client->addr;
+ const char *name = NULL;
+
+ if (config2 < 0)
+ return NULL;
+
+ if (address == 0x4c && !(config1 & 0x2a) && !(config2 & 0xf8)) {
+ if (chip_id == 0x01 && convrate <= 0x09) {
+ /* W83L771W/G */
+ name = "w83l771";
+ } else if ((chip_id & 0xfe) == 0x10 && convrate <= 0x08) {
+ /* W83L771AWG/ASG */
+ name = "w83l771";
}
- } else
- if ((address == 0x4C || address == 0x4D)
- && man_id == 0x47) { /* GMT */
- if (chip_id == 0x01 /* G781 */
- && (config1 & 0x3F) == 0x00
- && convrate <= 0x08)
- name = "g781";
- } else
- if (man_id == 0x55 && chip_id == 0x00 &&
- (config1 & 0x1B) == 0x00 && convrate <= 0x09) {
+ }
+ return name;
+}
+
+static const char *lm90_detect_nxp(struct i2c_client *client, bool common_address,
+ int chip_id, int config1, int convrate)
+{
+ int address = client->addr;
+ const char *name = NULL;
+ int config2;
+
+ switch (chip_id) {
+ case 0x00:
+ config2 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG2);
+ if (config2 < 0)
+ return NULL;
+ if (address >= 0x48 && address <= 0x4f &&
+ !(config1 & 0x2a) && !(config2 & 0xfe) && convrate <= 0x09)
+ name = "sa56004";
+ break;
+ case 0x80:
+ if (common_address && !(config1 & 0x3f) && convrate <= 0x07)
+ name = "ne1618";
+ break;
+ default:
+ break;
+ }
+ return name;
+}
+
+static const char *lm90_detect_gmt(struct i2c_client *client, int chip_id,
+ int config1, int convrate)
+{
+ int address = client->addr;
+
+ /*
+ * According to the datasheet, G781 is supposed to be at I2C Address
+ * 0x4c and have a chip ID of 0x01. G781-1 is supposed to be at I2C
+ * address 0x4d and have a chip ID of 0x03. However, when support
+ * for G781 was added, chips at 0x4c and 0x4d were found to have a
+ * chip ID of 0x01. A G781-1 at I2C address 0x4d was now found with
+ * chip ID 0x03.
+ * To avoid detection failures, accept chip ID 0x01 and 0x03 at both
+ * addresses.
+ * G784 reports manufacturer ID 0x47 and chip ID 0x01. A public
+ * datasheet is not available. Extensive testing suggests that
+ * the chip appears to be fully compatible with G781.
+ * Available register dumps show that G751 also reports manufacturer
+ * ID 0x47 and chip ID 0x01 even though that chip does not officially
+ * support those registers. This makes chip detection somewhat
+ * vulnerable. To improve detection quality, read the offset low byte
+ * and alert fault queue registers and verify that only expected bits
+ * are set.
+ */
+ if ((chip_id == 0x01 || chip_id == 0x03) &&
+ (address == 0x4c || address == 0x4d) &&
+ !(config1 & 0x3f) && convrate <= 0x08) {
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_OFFSL);
+ if (reg < 0 || reg & 0x1f)
+ return NULL;
+ reg = i2c_smbus_read_byte_data(client, TMP451_REG_CONALERT);
+ if (reg < 0 || reg & 0xf1)
+ return NULL;
+
+ return "g781";
+ }
+
+ return NULL;
+}
+
+static const char *lm90_detect_ti49(struct i2c_client *client, bool common_address,
+ int chip_id, int config1, int convrate)
+{
+ if (common_address && chip_id == 0x00 && !(config1 & 0x3f) && !(convrate & 0xf8)) {
+ /* THMC10: Unsupported registers return 0xff */
+ if (i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_TEMPL) == 0xff &&
+ i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_CRIT) == 0xff)
+ return "thmc10";
+ }
+ return NULL;
+}
+
+static const char *lm90_detect_ti(struct i2c_client *client, int chip_id,
+ int config1, int convrate)
+{
+ int address = client->addr;
+ const char *name = NULL;
+
+ if (chip_id == 0x00 && !(config1 & 0x1b) && convrate <= 0x09) {
int local_ext, conalert, chen, dfc;
local_ext = i2c_smbus_read_byte_data(client,
- TMP451_REG_R_LOCAL_TEMPL);
+ TMP451_REG_LOCAL_TEMPL);
conalert = i2c_smbus_read_byte_data(client,
TMP451_REG_CONALERT);
chen = i2c_smbus_read_byte_data(client, TMP461_REG_CHEN);
dfc = i2c_smbus_read_byte_data(client, TMP461_REG_DFC);
- if ((local_ext & 0x0F) == 0x00 &&
- (conalert & 0xf1) == 0x01 &&
- (chen & 0xfc) == 0x00 &&
- (dfc & 0xfc) == 0x00) {
+ if (!(local_ext & 0x0f) && (conalert & 0xf1) == 0x01 &&
+ (chen & 0xfc) == 0x00 && (dfc & 0xfc) == 0x00) {
if (address == 0x4c && !(chen & 0x03))
name = "tmp451";
else if (address >= 0x48 && address <= 0x4f)
@@ -1682,10 +2440,110 @@ static int lm90_detect(struct i2c_client *client,
}
}
- if (!name) { /* identification failed */
+ return name;
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int man_id, chip_id, config1, convrate, lhigh;
+ const char *name = NULL;
+ int address = client->addr;
+ bool common_address =
+ (address >= 0x18 && address <= 0x1a) ||
+ (address >= 0x29 && address <= 0x2b) ||
+ (address >= 0x4c && address <= 0x4e);
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ /*
+ * Get well defined register value for chips with neither man_id nor
+ * chip_id registers.
+ */
+ lhigh = i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_HIGH);
+
+ /* detection and identification */
+ man_id = i2c_smbus_read_byte_data(client, LM90_REG_MAN_ID);
+ chip_id = i2c_smbus_read_byte_data(client, LM90_REG_CHIP_ID);
+ config1 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG1);
+ convrate = i2c_smbus_read_byte_data(client, LM90_REG_CONVRATE);
+ if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0 || lhigh < 0)
+ return -ENODEV;
+
+ /* Bail out immediately if all register report the same value */
+ if (lhigh == man_id && lhigh == chip_id && lhigh == config1 && lhigh == convrate)
+ return -ENODEV;
+
+ /*
+ * If reading man_id and chip_id both return the same value as lhigh,
+ * the chip may not support those registers and return the most recent read
+ * value. Check again with a different register and handle accordingly.
+ */
+ if (man_id == lhigh && chip_id == lhigh) {
+ convrate = i2c_smbus_read_byte_data(client, LM90_REG_CONVRATE);
+ man_id = i2c_smbus_read_byte_data(client, LM90_REG_MAN_ID);
+ chip_id = i2c_smbus_read_byte_data(client, LM90_REG_CHIP_ID);
+ if (convrate < 0 || man_id < 0 || chip_id < 0)
+ return -ENODEV;
+ if (man_id == convrate && chip_id == convrate)
+ man_id = -1;
+ }
+ switch (man_id) {
+ case -1: /* Chip does not support man_id / chip_id */
+ if (common_address && !convrate && !(config1 & 0x7f))
+ name = lm90_detect_lm84(client);
+ break;
+ case 0x01: /* National Semiconductor */
+ name = lm90_detect_national(client, chip_id, config1, convrate);
+ break;
+ case 0x1a: /* ON */
+ name = lm90_detect_on(client, chip_id, config1, convrate);
+ break;
+ case 0x23: /* Genesys Logic */
+ if (common_address && !(config1 & 0x3f) && !(convrate & 0xf8))
+ name = "gl523sm";
+ break;
+ case 0x41: /* Analog Devices */
+ name = lm90_detect_analog(client, common_address, chip_id, config1,
+ convrate);
+ break;
+ case 0x47: /* GMT */
+ name = lm90_detect_gmt(client, chip_id, config1, convrate);
+ break;
+ case 0x49: /* TI */
+ name = lm90_detect_ti49(client, common_address, chip_id, config1, convrate);
+ break;
+ case 0x4d: /* Maxim Integrated */
+ name = lm90_detect_maxim(client, common_address, chip_id,
+ config1, convrate);
+ break;
+ case 0x54: /* ON MC1066, Microchip TC1068, TCM1617 (originally TelCom) */
+ if (common_address && !(config1 & 0x3f) && !(convrate & 0xf8))
+ name = "mc1066";
+ break;
+ case 0x55: /* TI */
+ name = lm90_detect_ti(client, chip_id, config1, convrate);
+ break;
+ case 0x5c: /* Winbond/Nuvoton */
+ name = lm90_detect_nuvoton(client, chip_id, config1, convrate);
+ break;
+ case 0xa1: /* NXP Semiconductor/Philips */
+ name = lm90_detect_nxp(client, common_address, chip_id, config1, convrate);
+ break;
+ case 0xff: /* MAX1617, G767, NE1617 */
+ if (common_address && chip_id == 0xff && convrate < 8)
+ name = lm90_detect_max1617(client, config1);
+ break;
+ default:
+ break;
+ }
+
+ if (!name) { /* identification failed */
dev_dbg(&adapter->dev,
- "Unsupported chip at 0x%02x (man_id=0x%02X, "
- "chip_id=0x%02X)\n", address, man_id, chip_id);
+ "Unsupported chip at 0x%02x (man_id=0x%02X, chip_id=0x%02X)\n",
+ client->addr, man_id, chip_id);
return -ENODEV;
}
@@ -1699,10 +2557,13 @@ static void lm90_restore_conf(void *_data)
struct lm90_data *data = _data;
struct i2c_client *client = data->client;
+ cancel_delayed_work_sync(&data->alert_work);
+ cancel_work_sync(&data->report_work);
+
/* Restore initial configuration */
- lm90_write_convrate(data, data->convrate_orig);
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
- data->config_orig);
+ if (data->flags & LM90_HAVE_CONVRATE)
+ lm90_write_convrate(data, data->convrate_orig);
+ lm90_write_reg(client, LM90_REG_CONFIG1, data->config_orig);
}
static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
@@ -1710,35 +2571,39 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
struct device_node *np = client->dev.of_node;
int config, convrate;
- convrate = lm90_read_reg(client, LM90_REG_R_CONVRATE);
- if (convrate < 0)
- return convrate;
- data->convrate_orig = convrate;
+ if (data->flags & LM90_HAVE_CONVRATE) {
+ convrate = lm90_read_reg(client, LM90_REG_CONVRATE);
+ if (convrate < 0)
+ return convrate;
+ data->convrate_orig = convrate;
+ lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
+ } else {
+ data->update_interval = 500;
+ }
/*
* Start the conversions.
*/
- config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ config = lm90_read_reg(client, LM90_REG_CONFIG1);
if (config < 0)
return config;
data->config_orig = config;
data->config = config;
- lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
-
/* Check Temperature Range Select */
if (data->flags & LM90_HAVE_EXTENDED_TEMP) {
if (of_property_read_bool(np, "ti,extended-range-enable"))
config |= 0x04;
-
- if (config & 0x04)
- data->flags |= LM90_FLAG_ADT7461_EXT;
+ if (!(config & 0x04))
+ data->flags &= ~LM90_HAVE_EXTENDED_TEMP;
}
/*
* Put MAX6680/MAX8881 into extended resolution (bit 0x10,
* 0.125 degree resolution) and range (0x08, extend range
* to -64 degree) mode for the remote temperature sensor.
+ * Note that expeciments with an actual chip do not show a difference
+ * if bit 3 is set or not.
*/
if (data->kind == max6680)
config |= 0x18;
@@ -1753,9 +2618,9 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
config |= 0x20;
/*
- * Select external channel 0 for max6695/96
+ * Select external channel 0 for devices with three sensors
*/
- if (data->kind == max6696)
+ if (data->flags & LM90_HAVE_TEMP3)
config &= ~0x08;
/*
@@ -1771,73 +2636,23 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
return devm_add_action_or_reset(&client->dev, lm90_restore_conf, data);
}
-static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
+static bool lm90_is_tripped(struct i2c_client *client)
{
struct lm90_data *data = i2c_get_clientdata(client);
- int st, st2 = 0;
-
- st = lm90_read_reg(client, LM90_REG_R_STATUS);
- if (st < 0)
- return false;
-
- if (data->kind == max6696) {
- st2 = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
- if (st2 < 0)
- return false;
- }
-
- *status = st | (st2 << 8);
+ int ret;
- if ((st & 0x7f) == 0 && (st2 & 0xfe) == 0)
+ ret = lm90_update_alarms(data, true);
+ if (ret < 0)
return false;
- if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) ||
- (st2 & MAX6696_STATUS2_LOT2))
- dev_dbg(&client->dev,
- "temp%d out of range, please check!\n", 1);
- if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) ||
- (st2 & MAX6696_STATUS2_ROT2))
- dev_dbg(&client->dev,
- "temp%d out of range, please check!\n", 2);
- if (st & LM90_STATUS_ROPEN)
- dev_dbg(&client->dev,
- "temp%d diode open, please check!\n", 2);
- if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH |
- MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2))
- dev_dbg(&client->dev,
- "temp%d out of range, please check!\n", 3);
- if (st2 & MAX6696_STATUS2_R2OPEN)
- dev_dbg(&client->dev,
- "temp%d diode open, please check!\n", 3);
-
- if (st & LM90_STATUS_LLOW)
- hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min_alarm, 0);
- if (st & LM90_STATUS_RLOW)
- hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min_alarm, 1);
- if (st2 & MAX6696_STATUS2_R2LOW)
- hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min_alarm, 2);
- if (st & LM90_STATUS_LHIGH)
- hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max_alarm, 0);
- if (st & LM90_STATUS_RHIGH)
- hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max_alarm, 1);
- if (st2 & MAX6696_STATUS2_R2HIGH)
- hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max_alarm, 2);
-
- return true;
+ return !!data->current_alarms;
}
static irqreturn_t lm90_irq_thread(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
- u16 status;
- if (lm90_is_tripped(client, &status))
+ if (lm90_is_tripped(client))
return IRQ_HANDLED;
else
return IRQ_NONE;
@@ -1853,10 +2668,79 @@ static void lm90_regulator_disable(void *regulator)
regulator_disable(regulator);
}
+static int lm90_probe_channel_from_dt(struct i2c_client *client,
+ struct device_node *child,
+ struct lm90_data *data)
+{
+ u32 id;
+ s32 val;
+ int err;
+ struct device *dev = &client->dev;
+
+ err = of_property_read_u32(child, "reg", &id);
+ if (err) {
+ dev_err(dev, "missing reg property of %pOFn\n", child);
+ return err;
+ }
+
+ if (id >= MAX_CHANNELS) {
+ dev_err(dev, "invalid reg property value %d in %pOFn\n", id, child);
+ return -EINVAL;
+ }
+
+ err = of_property_read_string(child, "label", &data->channel_label[id]);
+ if (err == -ENODATA || err == -EILSEQ) {
+ dev_err(dev, "invalid label property in %pOFn\n", child);
+ return err;
+ }
+
+ if (data->channel_label[id])
+ data->channel_config[id] |= HWMON_T_LABEL;
+
+ err = of_property_read_s32(child, "temperature-offset-millicelsius", &val);
+ if (!err) {
+ if (id == 0) {
+ dev_err(dev, "temperature-offset-millicelsius can't be set for internal channel\n");
+ return -EINVAL;
+ }
+
+ err = lm90_set_temp_offset(data, lm90_temp_offset_index[id], id, val);
+ if (err) {
+ dev_err(dev, "can't set temperature offset %d for channel %d (%d)\n",
+ val, id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int lm90_parse_dt_channel_info(struct i2c_client *client,
+ struct lm90_data *data)
+{
+ int err;
+ struct device_node *child;
+ struct device *dev = &client->dev;
+ const struct device_node *np = dev->of_node;
+
+ for_each_child_of_node(np, child) {
+ if (strcmp(child->name, "channel"))
+ continue;
+
+ err = lm90_probe_channel_from_dt(client, child, data);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
static const struct hwmon_ops lm90_ops = {
.is_visible = lm90_is_visible,
.read = lm90_read,
+ .read_string = lm90_read_string,
.write = lm90_write,
};
@@ -1891,41 +2775,63 @@ static int lm90_probe(struct i2c_client *client)
data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
+ INIT_DELAYED_WORK(&data->alert_work, lm90_alert_work);
+ INIT_WORK(&data->report_work, lm90_report_alarms);
/* Set the device type */
if (client->dev.of_node)
data->kind = (enum chips)of_device_get_match_data(&client->dev);
else
data->kind = i2c_match_id(lm90_id, client)->driver_data;
- if (data->kind == adm1032) {
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
- client->flags &= ~I2C_CLIENT_PEC;
- }
/*
* Different devices have different alarm bits triggering the
* ALERT# output
*/
data->alert_alarms = lm90_params[data->kind].alert_alarms;
+ data->resolution = lm90_params[data->kind].resolution ? : 11;
/* Set chip capabilities */
data->flags = lm90_params[data->kind].flags;
+ if ((data->flags & (LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC)) &&
+ !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_PEC))
+ data->flags &= ~(LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC);
+
+ if ((data->flags & LM90_HAVE_PARTIAL_PEC) &&
+ !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+ data->flags &= ~LM90_HAVE_PARTIAL_PEC;
+
data->chip.ops = &lm90_ops;
data->chip.info = data->info;
- data->info[0] = HWMON_CHANNEL_INFO(chip,
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL | HWMON_C_ALARMS);
+ data->info[0] = &data->chip_info;
+ info = &data->chip_info;
+ info->type = hwmon_chip;
+ info->config = data->chip_config;
+
+ data->chip_config[0] = HWMON_C_REGISTER_TZ;
+ if (data->flags & LM90_HAVE_ALARMS)
+ data->chip_config[0] |= HWMON_C_ALARMS;
+ if (data->flags & LM90_HAVE_CONVRATE)
+ data->chip_config[0] |= HWMON_C_UPDATE_INTERVAL;
+ if (data->flags & LM90_HAVE_FAULTQUEUE)
+ data->chip_config[0] |= HWMON_C_TEMP_SAMPLES;
data->info[1] = &data->temp_info;
info = &data->temp_info;
info->type = hwmon_temp;
info->config = data->channel_config;
- data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
- HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM;
- data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
- HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_FAULT;
+ data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_MAX_ALARM;
+ data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_MAX_ALARM | HWMON_T_FAULT;
+
+ if (data->flags & LM90_HAVE_LOW) {
+ data->channel_config[0] |= HWMON_T_MIN | HWMON_T_MIN_ALARM;
+ data->channel_config[1] |= HWMON_T_MIN | HWMON_T_MIN_ALARM;
+ }
if (data->flags & LM90_HAVE_CRIT) {
data->channel_config[0] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
@@ -1951,17 +2857,35 @@ static int lm90_probe(struct i2c_client *client)
data->channel_config[2] = HWMON_T_INPUT |
HWMON_T_MIN | HWMON_T_MAX |
HWMON_T_CRIT | HWMON_T_CRIT_HYST |
- HWMON_T_EMERGENCY | HWMON_T_EMERGENCY_HYST |
HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
- HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM |
- HWMON_T_FAULT;
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT;
+ if (data->flags & LM90_HAVE_EMERGENCY) {
+ data->channel_config[2] |= HWMON_T_EMERGENCY |
+ HWMON_T_EMERGENCY_HYST;
+ }
+ if (data->flags & LM90_HAVE_EMERGENCY_ALARM)
+ data->channel_config[2] |= HWMON_T_EMERGENCY_ALARM;
+ if (data->flags & LM90_HAVE_OFFSET)
+ data->channel_config[2] |= HWMON_T_OFFSET;
}
+ data->faultqueue_mask = lm90_params[data->kind].faultqueue_mask;
+ data->faultqueue_depth = lm90_params[data->kind].faultqueue_depth;
data->reg_local_ext = lm90_params[data->kind].reg_local_ext;
+ if (data->flags & LM90_HAVE_REMOTE_EXT)
+ data->reg_remote_ext = LM90_REG_REMOTE_TEMPL;
+ data->reg_status2 = lm90_params[data->kind].reg_status2;
/* Set maximum conversion rate */
data->max_convrate = lm90_params[data->kind].max_convrate;
+ /* Parse device-tree channel information */
+ if (client->dev.of_node) {
+ err = lm90_parse_dt_channel_info(client, data);
+ if (err)
+ return err;
+ }
+
/* Initialize the LM90 chip */
err = lm90_init_client(client, data);
if (err < 0) {
@@ -1973,7 +2897,7 @@ static int lm90_probe(struct i2c_client *client)
* The 'pec' attribute is attached to the i2c device and thus created
* separately.
*/
- if (client->flags & I2C_CLIENT_PEC) {
+ if (data->flags & (LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC)) {
err = device_create_file(dev, &dev_attr_pec);
if (err)
return err;
@@ -2007,12 +2931,10 @@ static int lm90_probe(struct i2c_client *client)
static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type,
unsigned int flag)
{
- u16 alarms;
-
if (type != I2C_PROTOCOL_SMBUS_ALERT)
return;
- if (lm90_is_tripped(client, &alarms)) {
+ if (lm90_is_tripped(client)) {
/*
* Disable ALERT# output, because these chips don't implement
* SMBus alert correctly; they should only hold the alert line
@@ -2021,9 +2943,13 @@ static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type,
struct lm90_data *data = i2c_get_clientdata(client);
if ((data->flags & LM90_HAVE_BROKEN_ALERT) &&
- (alarms & data->alert_alarms)) {
- dev_dbg(&client->dev, "Disabling ALERT#\n");
- lm90_update_confreg(data, data->config | 0x80);
+ (data->current_alarms & data->alert_alarms)) {
+ if (!(data->config & 0x80)) {
+ dev_dbg(&client->dev, "Disabling ALERT#\n");
+ lm90_update_confreg(data, data->config | 0x80);
+ }
+ schedule_delayed_work(&data->alert_work,
+ max_t(int, HZ, msecs_to_jiffies(data->update_interval)));
}
} else {
dev_dbg(&client->dev, "Everything OK\n");
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index ce2780768074..e093b1998296 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -7,7 +7,7 @@
* Reworked by Sven Schuchmann <schuchmann@schleissheimer.de>
* DT support added by Clemens Gruber <clemens.gruber@pqgruber.com>
*
- * This driver export the value of analog input voltage to sysfs, the
+ * This driver exports the value of analog input voltage to sysfs, the
* voltage unit is mV. Through the sysfs interface, lm-sensors tool
* can also display the input voltage.
*/
@@ -45,19 +45,29 @@ enum chips {
* Client data (each client gets its own)
*/
struct mcp3021_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
u32 vdd; /* supply and reference voltage in millivolt */
u16 sar_shift;
u16 sar_mask;
u8 output_res;
};
-static int mcp3021_read16(struct i2c_client *client)
+static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
{
- struct mcp3021_data *data = i2c_get_clientdata(client);
- int ret;
- u16 reg;
+ return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
+}
+
+static int mcp3021_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct mcp3021_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
__be16 buf;
+ u16 reg;
+ int ret;
+
+ if (type != hwmon_in)
+ return -EOPNOTSUPP;
ret = i2c_master_recv(client, (char *)&buf, 2);
if (ret < 0)
@@ -74,39 +84,46 @@ static int mcp3021_read16(struct i2c_client *client)
*/
reg = (reg >> data->sar_shift) & data->sar_mask;
- return reg;
-}
+ *val = volts_from_reg(data, reg);
-static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
-{
- return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
+ return 0;
}
-static ssize_t in0_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static umode_t mcp3021_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct mcp3021_data *data = i2c_get_clientdata(client);
- int reg, in_input;
+ if (type != hwmon_in)
+ return 0;
- reg = mcp3021_read16(client);
- if (reg < 0)
- return reg;
+ if (attr != hwmon_in_input)
+ return 0;
- in_input = volts_from_reg(data, reg);
-
- return sprintf(buf, "%d\n", in_input);
+ return 0444;
}
-static DEVICE_ATTR_RO(in0_input);
+static const struct hwmon_channel_info *mcp3021_info[] = {
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops mcp3021_hwmon_ops = {
+ .is_visible = mcp3021_is_visible,
+ .read = mcp3021_read,
+};
+
+static const struct hwmon_chip_info mcp3021_chip_info = {
+ .ops = &mcp3021_hwmon_ops,
+ .info = mcp3021_info,
+};
static const struct i2c_device_id mcp3021_id[];
static int mcp3021_probe(struct i2c_client *client)
{
- int err;
struct mcp3021_data *data = NULL;
struct device_node *np = client->dev.of_node;
+ struct device *hwmon_dev;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -147,34 +164,17 @@ static int mcp3021_probe(struct i2c_client *client)
break;
}
+ data->client = client;
+
if (data->vdd > MCP3021_VDD_REF_MAX || data->vdd < MCP3021_VDD_REF_MIN)
return -EINVAL;
- err = sysfs_create_file(&client->dev.kobj, &dev_attr_in0_input.attr);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- sysfs_remove_file(&client->dev.kobj, &dev_attr_in0_input.attr);
- return err;
-}
-
-static int mcp3021_remove(struct i2c_client *client)
-{
- struct mcp3021_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_file(&client->dev.kobj, &dev_attr_in0_input.attr);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_info(&client->dev,
+ client->name,
+ data,
+ &mcp3021_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id mcp3021_id[] = {
@@ -199,7 +199,6 @@ static struct i2c_driver mcp3021_driver = {
.of_match_table = of_match_ptr(of_mcp3021_match),
},
.probe_new = mcp3021_probe,
- .remove = mcp3021_remove,
.id_table = mcp3021_id,
};
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 446964cbae4c..da9ec6983e13 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -1480,7 +1480,7 @@ static int nct6775_update_pwm_limits(struct device *dev)
return 0;
}
-static struct nct6775_data *nct6775_update_device(struct device *dev)
+struct nct6775_data *nct6775_update_device(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
int i, j, err = 0;
@@ -1615,6 +1615,7 @@ out:
mutex_unlock(&data->update_lock);
return err ? ERR_PTR(err) : data;
}
+EXPORT_SYMBOL_GPL(nct6775_update_device);
/*
* Sysfs callback functions
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index 6d46c9401898..41c97cfacfb8 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -359,7 +359,7 @@ static int __maybe_unused nct6775_suspend(struct device *dev)
{
int err;
u16 tmp;
- struct nct6775_data *data = dev_get_drvdata(dev);
+ struct nct6775_data *data = nct6775_update_device(dev);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -1083,6 +1083,7 @@ static const char * const asus_wmi_boards[] = {
"TUF GAMING B550M-PLUS",
"TUF GAMING B550M-PLUS (WI-FI)",
"TUF GAMING B550-PLUS",
+ "TUF GAMING B550-PLUS WIFI II",
"TUF GAMING B550-PRO",
"TUF GAMING X570-PLUS",
"TUF GAMING X570-PLUS (WI-FI)",
@@ -1200,10 +1201,8 @@ static int __init sensors_nct6775_platform_init(void)
exit_device_put:
platform_device_put(pdev[i]);
exit_device_unregister:
- while (--i >= 0) {
- if (pdev[i])
- platform_device_unregister(pdev[i]);
- }
+ while (i--)
+ platform_device_unregister(pdev[i]);
exit_unregister:
platform_driver_unregister(&nct6775_driver);
return err;
@@ -1213,10 +1212,8 @@ static void __exit sensors_nct6775_platform_exit(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(pdev); i++) {
- if (pdev[i])
- platform_device_unregister(pdev[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(pdev); i++)
+ platform_device_unregister(pdev[i]);
platform_driver_unregister(&nct6775_driver);
}
diff --git a/drivers/hwmon/nct6775.h b/drivers/hwmon/nct6775.h
index 93f708148e65..be41848c3cd2 100644
--- a/drivers/hwmon/nct6775.h
+++ b/drivers/hwmon/nct6775.h
@@ -196,6 +196,8 @@ static inline int nct6775_write_value(struct nct6775_data *data, u16 reg, u16 va
return regmap_write(data->regmap, reg, value);
}
+struct nct6775_data *nct6775_update_device(struct device *dev);
+
bool nct6775_reg_is_word_sized(struct nct6775_data *data, u16 reg);
int nct6775_probe(struct device *dev, struct nct6775_data *data,
const struct regmap_config *regmapcfg);
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 157b73a3da29..45407b12db4b 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -729,18 +729,14 @@ static ssize_t occ_show_extended(struct device *dev,
rc = sysfs_emit(buf, "%u",
get_unaligned_be32(&extn->sensor_id));
} else {
- rc = sysfs_emit(buf, "%02x%02x%02x%02x\n",
- extn->name[0], extn->name[1],
- extn->name[2], extn->name[3]);
+ rc = sysfs_emit(buf, "%4phN\n", extn->name);
}
break;
case 1:
rc = sysfs_emit(buf, "%02x\n", extn->flags);
break;
case 2:
- rc = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n",
- extn->data[0], extn->data[1], extn->data[2],
- extn->data[3], extn->data[4], extn->data[5]);
+ rc = sysfs_emit(buf, "%6phN\n", extn->data);
break;
default:
return -EINVAL;
diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
index a91937e28e12..c1e0a1d96cd4 100644
--- a/drivers/hwmon/occ/p9_sbe.c
+++ b/drivers/hwmon/occ/p9_sbe.c
@@ -55,8 +55,7 @@ static bool p9_sbe_occ_save_ffdc(struct p9_sbe_occ *ctx, const void *resp,
mutex_lock(&ctx->sbe_error_lock);
if (!ctx->sbe_error) {
if (resp_len > ctx->ffdc_size) {
- if (ctx->ffdc)
- kvfree(ctx->ffdc);
+ kvfree(ctx->ffdc);
ctx->ffdc = kvmalloc(resp_len, GFP_KERNEL);
if (!ctx->ffdc) {
ctx->ffdc_len = 0;
@@ -170,8 +169,7 @@ static int p9_sbe_occ_remove(struct platform_device *pdev)
ctx->sbe = NULL;
occ_shutdown(occ);
- if (ctx->ffdc)
- kvfree(ctx->ffdc);
+ kvfree(ctx->ffdc);
return 0;
}
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index dfae76db65ae..951e4a9ff2d6 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -181,6 +181,15 @@ config SENSORS_LM25066_REGULATOR
If you say yes here you get regulator support for National
Semiconductor LM25066, LM5064, and LM5066.
+config SENSORS_LT7182S
+ tristate "Analog Devices LT7182S"
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices LT7182S.
+
+ This driver can also be built as a module. If so, the module will
+ be called lt7182s.
+
config SENSORS_LTC2978
tristate "Linear Technologies LTC2978 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 4678fba5012c..e2fe86f98965 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o
obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
+obj-$(CONFIG_SENSORS_LT7182S) += lt7182s.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX15301) += max15301.o
diff --git a/drivers/hwmon/pmbus/lt7182s.c b/drivers/hwmon/pmbus/lt7182s.c
new file mode 100644
index 000000000000..4cfe476fc92d
--- /dev/null
+++ b/drivers/hwmon/pmbus/lt7182s.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Analog Devices LT7182S
+ *
+ * Copyright (c) 2022 Guenter Roeck
+ *
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include "pmbus.h"
+
+#define LT7182S_NUM_PAGES 2
+
+#define MFR_READ_EXTVCC 0xcd
+#define MFR_READ_ITH 0xce
+#define MFR_CONFIG_ALL_LT7182S 0xd1
+#define MFR_IOUT_PEAK 0xd7
+#define MFR_ADC_CONTROL_LT7182S 0xd8
+
+#define MFR_DEBUG_TELEMETRY BIT(0)
+
+#define MFR_VOUT_PEAK 0xdd
+#define MFR_VIN_PEAK 0xde
+#define MFR_TEMPERATURE_1_PEAK 0xdf
+#define MFR_CLEAR_PEAKS 0xe3
+
+#define MFR_CONFIG_IEEE BIT(8)
+
+static int lt7182s_read_word_data(struct i2c_client *client, int page, int phase, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ if (page == 0 || page == 1)
+ ret = pmbus_read_word_data(client, page, phase, MFR_READ_ITH);
+ else
+ ret = pmbus_read_word_data(client, 0, phase, MFR_READ_EXTVCC);
+ break;
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = pmbus_read_word_data(client, page, phase, MFR_IOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, page, phase, MFR_VOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_VIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase, MFR_VIN_PEAK);
+ break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, page, phase, MFR_TEMPERATURE_1_PEAK);
+ break;
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ ret = (page == 0) ? 0 : -ENODATA;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int lt7182s_write_word_data(struct i2c_client *client, int page, int reg, u16 word)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ ret = pmbus_write_byte(client, 0, MFR_CLEAR_PEAKS);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static struct pmbus_driver_info lt7182s_info = {
+ .pages = LT7182S_NUM_PAGES,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT,
+ .read_word_data = lt7182s_read_word_data,
+ .write_word_data = lt7182s_write_word_data,
+};
+
+static int lt7182s_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read PMBUS_MFR_ID\n");
+ return ret;
+ }
+ if (ret != 3 || strncmp(buf, "ADI", 3)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Manufacturer '%s' not supported\n", buf);
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read PMBUS_MFR_MODEL\n");
+ return ret;
+ }
+ if (ret != 7 || strncmp(buf, "LT7182S", 7)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Model '%s' not supported\n", buf);
+ return -ENODEV;
+ }
+
+ info = devm_kmemdup(dev, &lt7182s_info,
+ sizeof(struct pmbus_driver_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* Set data format to IEEE754 if configured */
+ ret = i2c_smbus_read_word_data(client, MFR_CONFIG_ALL_LT7182S);
+ if (ret < 0)
+ return ret;
+ if (ret & MFR_CONFIG_IEEE) {
+ info->format[PSC_VOLTAGE_IN] = ieee754;
+ info->format[PSC_VOLTAGE_OUT] = ieee754;
+ info->format[PSC_CURRENT_IN] = ieee754;
+ info->format[PSC_CURRENT_OUT] = ieee754;
+ info->format[PSC_TEMPERATURE] = ieee754;
+ info->format[PSC_POWER] = ieee754;
+ }
+
+ /* Enable VMON output if configured */
+ ret = i2c_smbus_read_byte_data(client, MFR_ADC_CONTROL_LT7182S);
+ if (ret < 0)
+ return ret;
+ if (ret & MFR_DEBUG_TELEMETRY) {
+ info->pages = 3;
+ info->func[0] |= PMBUS_HAVE_VMON;
+ info->func[1] |= PMBUS_HAVE_VMON;
+ info->func[2] = PMBUS_HAVE_VMON;
+ }
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id lt7182s_id[] = {
+ { "lt7182s", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lt7182s_id);
+
+static const struct of_device_id __maybe_unused lt7182s_of_match[] = {
+ { .compatible = "adi,lt7182s" },
+ {}
+};
+
+static struct i2c_driver lt7182s_driver = {
+ .driver = {
+ .name = "lt7182s",
+ .of_match_table = of_match_ptr(lt7182s_of_match),
+ },
+ .probe_new = lt7182s_probe,
+ .id_table = lt7182s_id,
+};
+
+module_i2c_driver(lt7182s_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Analog Devices LT7182S");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 531aa674a928..6d2592731ba3 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -562,7 +562,24 @@ static const struct i2c_device_id ltc2978_id[] = {
MODULE_DEVICE_TABLE(i2c, ltc2978_id);
#if IS_ENABLED(CONFIG_SENSORS_LTC2978_REGULATOR)
+#define LTC2978_ADC_RES 0xFFFF
+#define LTC2978_N_ADC 122
+#define LTC2978_MAX_UV (LTC2978_ADC_RES * LTC2978_N_ADC)
+#define LTC2978_UV_STEP 1000
+#define LTC2978_N_VOLTAGES ((LTC2978_MAX_UV / LTC2978_UV_STEP) + 1)
+
static const struct regulator_desc ltc2978_reg_desc[] = {
+ PMBUS_REGULATOR_STEP("vout", 0, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
+ PMBUS_REGULATOR_STEP("vout", 1, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
+ PMBUS_REGULATOR_STEP("vout", 2, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
+ PMBUS_REGULATOR_STEP("vout", 3, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
+ PMBUS_REGULATOR_STEP("vout", 4, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
+ PMBUS_REGULATOR_STEP("vout", 5, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
+ PMBUS_REGULATOR_STEP("vout", 6, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
+ PMBUS_REGULATOR_STEP("vout", 7, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
+};
+
+static const struct regulator_desc ltc2978_reg_desc_default[] = {
PMBUS_REGULATOR("vout", 0),
PMBUS_REGULATOR("vout", 1),
PMBUS_REGULATOR("vout", 2),
@@ -839,10 +856,29 @@ static int ltc2978_probe(struct i2c_client *client)
#if IS_ENABLED(CONFIG_SENSORS_LTC2978_REGULATOR)
info->num_regulators = info->pages;
- info->reg_desc = ltc2978_reg_desc;
- if (info->num_regulators > ARRAY_SIZE(ltc2978_reg_desc)) {
- dev_err(&client->dev, "num_regulators too large!");
- info->num_regulators = ARRAY_SIZE(ltc2978_reg_desc);
+ switch (data->id) {
+ case ltc2972:
+ case ltc2974:
+ case ltc2975:
+ case ltc2977:
+ case ltc2978:
+ case ltc2979:
+ case ltc2980:
+ case ltm2987:
+ info->reg_desc = ltc2978_reg_desc;
+ if (info->num_regulators > ARRAY_SIZE(ltc2978_reg_desc)) {
+ dev_warn(&client->dev, "num_regulators too large!");
+ info->num_regulators = ARRAY_SIZE(ltc2978_reg_desc);
+ }
+ break;
+ default:
+ info->reg_desc = ltc2978_reg_desc_default;
+ if (info->num_regulators > ARRAY_SIZE(ltc2978_reg_desc_default)) {
+ dev_warn(&client->dev, "num_regulators too large!");
+ info->num_regulators =
+ ARRAY_SIZE(ltc2978_reg_desc_default);
+ }
+ break;
}
#endif
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index c031a9700ace..7daaf0caf4d3 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -406,7 +406,7 @@ enum pmbus_sensor_classes {
#define PMBUS_PHASE_VIRTUAL BIT(30) /* Phases on this page are virtual */
#define PMBUS_PAGE_VIRTUAL BIT(31) /* Page is virtual */
-enum pmbus_data_format { linear = 0, direct, vid };
+enum pmbus_data_format { linear = 0, ieee754, direct, vid };
enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
struct pmbus_driver_info {
@@ -463,8 +463,8 @@ struct pmbus_driver_info {
extern const struct regulator_ops pmbus_regulator_ops;
-/* Macro for filling in array of struct regulator_desc */
-#define PMBUS_REGULATOR(_name, _id) \
+/* Macros for filling in array of struct regulator_desc */
+#define PMBUS_REGULATOR_STEP(_name, _id, _voltages, _step) \
[_id] = { \
.name = (_name # _id), \
.supply_name = "vin", \
@@ -474,8 +474,12 @@ extern const struct regulator_ops pmbus_regulator_ops;
.ops = &pmbus_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .n_voltages = _voltages, \
+ .uV_step = _step, \
}
+#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0)
+
/* Function declarations */
void pmbus_clear_cache(struct i2c_client *client);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 02912022853d..f10bac8860fc 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -104,6 +104,9 @@ struct pmbus_data {
s16 currpage; /* current page, -1 for unknown/unset */
s16 currphase; /* current phase, 0xff for all, -1 for unknown/unset */
+
+ int vout_low[PMBUS_PAGES]; /* voltage low margin */
+ int vout_high[PMBUS_PAGES]; /* voltage high margin */
};
struct pmbus_debugfs_entry {
@@ -441,6 +444,18 @@ int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg,
}
EXPORT_SYMBOL_NS_GPL(pmbus_update_byte_data, PMBUS);
+static int pmbus_read_block_data(struct i2c_client *client, int page, u8 reg,
+ char *data_buf)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page, 0xff);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_read_block_data(client, reg, data_buf);
+}
+
static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page,
int reg)
{
@@ -578,6 +593,22 @@ bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
}
EXPORT_SYMBOL_NS_GPL(pmbus_check_word_register, PMBUS);
+static bool __maybe_unused pmbus_check_block_register(struct i2c_client *client,
+ int page, int reg)
+{
+ int rv;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ char data_buf[I2C_SMBUS_BLOCK_MAX + 2];
+
+ rv = pmbus_read_block_data(client, page, reg, data_buf);
+ if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
+ rv = pmbus_check_status_cml(client);
+ if (rv < 0 && (data->flags & PMBUS_READ_STATUS_AFTER_FAILED_CHECK))
+ data->read_status(client, -1);
+ pmbus_clear_fault_page(client, -1);
+ return rv >= 0;
+}
+
const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -612,6 +643,66 @@ static void pmbus_update_sensor_data(struct i2c_client *client, struct pmbus_sen
}
/*
+ * Convert ieee754 sensor values to milli- or micro-units
+ * depending on sensor type.
+ *
+ * ieee754 data format:
+ * bit 15: sign
+ * bit 10..14: exponent
+ * bit 0..9: mantissa
+ * exponent=0:
+ * v=(−1)^signbit * 2^(−14) * 0.significantbits
+ * exponent=1..30:
+ * v=(−1)^signbit * 2^(exponent - 15) * 1.significantbits
+ * exponent=31:
+ * v=NaN
+ *
+ * Add the number mantissa bits into the calculations for simplicity.
+ * To do that, add '10' to the exponent. By doing that, we can just add
+ * 0x400 to normal values and get the expected result.
+ */
+static long pmbus_reg2data_ieee754(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ int exponent;
+ bool sign;
+ long val;
+
+ /* only support half precision for now */
+ sign = sensor->data & 0x8000;
+ exponent = (sensor->data >> 10) & 0x1f;
+ val = sensor->data & 0x3ff;
+
+ if (exponent == 0) { /* subnormal */
+ exponent = -(14 + 10);
+ } else if (exponent == 0x1f) { /* NaN, convert to min/max */
+ exponent = 0;
+ val = 65504;
+ } else {
+ exponent -= (15 + 10); /* normal */
+ val |= 0x400;
+ }
+
+ /* scale result to milli-units for all sensors except fans */
+ if (sensor->class != PSC_FAN)
+ val = val * 1000L;
+
+ /* scale result to micro-units for power sensors */
+ if (sensor->class == PSC_POWER)
+ val = val * 1000L;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ if (sign)
+ val = -val;
+
+ return val;
+}
+
+/*
* Convert linear sensor values to milli- or micro-units
* depending on sensor type.
*/
@@ -741,6 +832,9 @@ static s64 pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
case vid:
val = pmbus_reg2data_vid(data, sensor);
break;
+ case ieee754:
+ val = pmbus_reg2data_ieee754(data, sensor);
+ break;
case linear:
default:
val = pmbus_reg2data_linear(data, sensor);
@@ -749,8 +843,72 @@ static s64 pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
return val;
}
-#define MAX_MANTISSA (1023 * 1000)
-#define MIN_MANTISSA (511 * 1000)
+#define MAX_IEEE_MANTISSA (0x7ff * 1000)
+#define MIN_IEEE_MANTISSA (0x400 * 1000)
+
+static u16 pmbus_data2reg_ieee754(struct pmbus_data *data,
+ struct pmbus_sensor *sensor, long val)
+{
+ u16 exponent = (15 + 10);
+ long mantissa;
+ u16 sign = 0;
+
+ /* simple case */
+ if (val == 0)
+ return 0;
+
+ if (val < 0) {
+ sign = 0x8000;
+ val = -val;
+ }
+
+ /* Power is in uW. Convert to mW before converting. */
+ if (sensor->class == PSC_POWER)
+ val = DIV_ROUND_CLOSEST(val, 1000L);
+
+ /*
+ * For simplicity, convert fan data to milli-units
+ * before calculating the exponent.
+ */
+ if (sensor->class == PSC_FAN)
+ val = val * 1000;
+
+ /* Reduce large mantissa until it fits into 10 bit */
+ while (val > MAX_IEEE_MANTISSA && exponent < 30) {
+ exponent++;
+ val >>= 1;
+ }
+ /*
+ * Increase small mantissa to generate valid 'normal'
+ * number
+ */
+ while (val < MIN_IEEE_MANTISSA && exponent > 1) {
+ exponent--;
+ val <<= 1;
+ }
+
+ /* Convert mantissa from milli-units to units */
+ mantissa = DIV_ROUND_CLOSEST(val, 1000);
+
+ /*
+ * Ensure that the resulting number is within range.
+ * Valid range is 0x400..0x7ff, where bit 10 reflects
+ * the implied high bit in normalized ieee754 numbers.
+ * Set the range to 0x400..0x7ff to reflect this.
+ * The upper bit is then removed by the mask against
+ * 0x3ff in the final assignment.
+ */
+ if (mantissa > 0x7ff)
+ mantissa = 0x7ff;
+ else if (mantissa < 0x400)
+ mantissa = 0x400;
+
+ /* Convert to sign, 5 bit exponent, 10 bit mantissa */
+ return sign | (mantissa & 0x3ff) | ((exponent << 10) & 0x7c00);
+}
+
+#define MAX_LIN_MANTISSA (1023 * 1000)
+#define MIN_LIN_MANTISSA (511 * 1000)
static u16 pmbus_data2reg_linear(struct pmbus_data *data,
struct pmbus_sensor *sensor, s64 val)
@@ -796,12 +954,12 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
val = val * 1000LL;
/* Reduce large mantissa until it fits into 10 bit */
- while (val >= MAX_MANTISSA && exponent < 15) {
+ while (val >= MAX_LIN_MANTISSA && exponent < 15) {
exponent++;
val >>= 1;
}
/* Increase small mantissa to improve precision */
- while (val < MIN_MANTISSA && exponent > -15) {
+ while (val < MIN_LIN_MANTISSA && exponent > -15) {
exponent--;
val <<= 1;
}
@@ -875,6 +1033,9 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
case vid:
regval = pmbus_data2reg_vid(data, sensor, val);
break;
+ case ieee754:
+ regval = pmbus_data2reg_ieee754(data, sensor, val);
+ break;
case linear:
default:
regval = pmbus_data2reg_linear(data, sensor, val);
@@ -2369,6 +2530,10 @@ static int pmbus_identify_common(struct i2c_client *client,
if (data->info->format[PSC_VOLTAGE_OUT] != direct)
return -ENODEV;
break;
+ case 3: /* ieee 754 half precision */
+ if (data->info->format[PSC_VOLTAGE_OUT] != ieee754)
+ return -ENODEV;
+ break;
default:
return -ENODEV;
}
@@ -2388,6 +2553,42 @@ static int pmbus_read_status_word(struct i2c_client *client, int page)
return _pmbus_read_word_data(client, page, 0xff, PMBUS_STATUS_WORD);
}
+/* PEC attribute support */
+
+static ssize_t pec_show(struct device *dev, struct device_attribute *dummy,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return sysfs_emit(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC));
+}
+
+static ssize_t pec_store(struct device *dev, struct device_attribute *dummy,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ bool enable;
+ int err;
+
+ err = kstrtobool(buf, &enable);
+ if (err < 0)
+ return err;
+
+ if (enable)
+ client->flags |= I2C_CLIENT_PEC;
+ else
+ client->flags &= ~I2C_CLIENT_PEC;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(pec);
+
+static void pmbus_remove_pec(void *dev)
+{
+ device_remove_file(dev, &dev_attr_pec);
+}
+
static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
@@ -2474,6 +2675,20 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
return ret;
}
+ if (client->flags & I2C_CLIENT_PEC) {
+ /*
+ * If I2C_CLIENT_PEC is set here, both the I2C adapter and the
+ * chip support PEC. Add 'pec' attribute to client device to let
+ * the user control it.
+ */
+ ret = device_create_file(dev, &dev_attr_pec);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, pmbus_remove_pec, dev);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -2636,6 +2851,58 @@ static int pmbus_regulator_get_error_flags(struct regulator_dev *rdev, unsigned
return 0;
}
+static int pmbus_regulator_get_low_margin(struct i2c_client *client, int page)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct pmbus_sensor s = {
+ .page = page,
+ .class = PSC_VOLTAGE_OUT,
+ .convert = true,
+ .data = -1,
+ };
+
+ if (!data->vout_low[page]) {
+ if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MIN))
+ s.data = _pmbus_read_word_data(client, page, 0xff,
+ PMBUS_MFR_VOUT_MIN);
+ if (s.data < 0) {
+ s.data = _pmbus_read_word_data(client, page, 0xff,
+ PMBUS_VOUT_MARGIN_LOW);
+ if (s.data < 0)
+ return s.data;
+ }
+ data->vout_low[page] = pmbus_reg2data(data, &s);
+ }
+
+ return data->vout_low[page];
+}
+
+static int pmbus_regulator_get_high_margin(struct i2c_client *client, int page)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct pmbus_sensor s = {
+ .page = page,
+ .class = PSC_VOLTAGE_OUT,
+ .convert = true,
+ .data = -1,
+ };
+
+ if (!data->vout_high[page]) {
+ if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MAX))
+ s.data = _pmbus_read_word_data(client, page, 0xff,
+ PMBUS_MFR_VOUT_MAX);
+ if (s.data < 0) {
+ s.data = _pmbus_read_word_data(client, page, 0xff,
+ PMBUS_VOUT_MARGIN_HIGH);
+ if (s.data < 0)
+ return s.data;
+ }
+ data->vout_high[page] = pmbus_reg2data(data, &s);
+ }
+
+ return data->vout_high[page];
+}
+
static int pmbus_regulator_get_voltage(struct regulator_dev *rdev)
{
struct device *dev = rdev_get_dev(rdev);
@@ -2671,24 +2938,13 @@ static int pmbus_regulator_set_voltage(struct regulator_dev *rdev, int min_uv,
*selector = 0;
- if (pmbus_check_word_register(client, s.page, PMBUS_MFR_VOUT_MIN))
- s.data = _pmbus_read_word_data(client, s.page, 0xff, PMBUS_MFR_VOUT_MIN);
- if (s.data < 0) {
- s.data = _pmbus_read_word_data(client, s.page, 0xff, PMBUS_VOUT_MARGIN_LOW);
- if (s.data < 0)
- return s.data;
- }
- low = pmbus_reg2data(data, &s);
+ low = pmbus_regulator_get_low_margin(client, s.page);
+ if (low < 0)
+ return low;
- s.data = -1;
- if (pmbus_check_word_register(client, s.page, PMBUS_MFR_VOUT_MAX))
- s.data = _pmbus_read_word_data(client, s.page, 0xff, PMBUS_MFR_VOUT_MAX);
- if (s.data < 0) {
- s.data = _pmbus_read_word_data(client, s.page, 0xff, PMBUS_VOUT_MARGIN_HIGH);
- if (s.data < 0)
- return s.data;
- }
- high = pmbus_reg2data(data, &s);
+ high = pmbus_regulator_get_high_margin(client, s.page);
+ if (high < 0)
+ return high;
/* Make sure we are within margins */
if (low > val)
@@ -2701,6 +2957,35 @@ static int pmbus_regulator_set_voltage(struct regulator_dev *rdev, int min_uv,
return _pmbus_write_word_data(client, s.page, PMBUS_VOUT_COMMAND, (u16)val);
}
+static int pmbus_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct device *dev = rdev_get_dev(rdev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ int val, low, high;
+
+ if (selector >= rdev->desc->n_voltages ||
+ selector < rdev->desc->linear_min_sel)
+ return -EINVAL;
+
+ selector -= rdev->desc->linear_min_sel;
+ val = DIV_ROUND_CLOSEST(rdev->desc->min_uV +
+ (rdev->desc->uV_step * selector), 1000); /* convert to mV */
+
+ low = pmbus_regulator_get_low_margin(client, rdev_get_id(rdev));
+ if (low < 0)
+ return low;
+
+ high = pmbus_regulator_get_high_margin(client, rdev_get_id(rdev));
+ if (high < 0)
+ return high;
+
+ if (val >= low && val <= high)
+ return val * 1000; /* unit is uV */
+
+ return 0;
+}
+
const struct regulator_ops pmbus_regulator_ops = {
.enable = pmbus_regulator_enable,
.disable = pmbus_regulator_disable,
@@ -2708,6 +2993,7 @@ const struct regulator_ops pmbus_regulator_ops = {
.get_error_flags = pmbus_regulator_get_error_flags,
.get_voltage = pmbus_regulator_get_voltage,
.set_voltage = pmbus_regulator_set_voltage,
+ .list_voltage = pmbus_regulator_list_voltage,
};
EXPORT_SYMBOL_NS_GPL(pmbus_regulator_ops, PMBUS);
@@ -2782,41 +3068,33 @@ static int pmbus_debugfs_get_status(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops_status, pmbus_debugfs_get_status,
NULL, "0x%04llx\n");
-static int pmbus_debugfs_get_pec(void *data, u64 *val)
-{
- struct i2c_client *client = data;
-
- *val = !!(client->flags & I2C_CLIENT_PEC);
-
- return 0;
-}
-
-static int pmbus_debugfs_set_pec(void *data, u64 val)
+static ssize_t pmbus_debugfs_mfr_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
int rc;
- struct i2c_client *client = data;
-
- if (!val) {
- client->flags &= ~I2C_CLIENT_PEC;
- return 0;
- }
-
- if (val != 1)
- return -EINVAL;
+ struct pmbus_debugfs_entry *entry = file->private_data;
+ char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
- rc = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
+ rc = pmbus_read_block_data(entry->client, entry->page, entry->reg,
+ data);
if (rc < 0)
return rc;
- if (!(rc & PB_CAPABILITY_ERROR_CHECK))
- return -EOPNOTSUPP;
+ /* Add newline at the end of a read data */
+ data[rc] = '\n';
- client->flags |= I2C_CLIENT_PEC;
+ /* Include newline into the length */
+ rc += 1;
- return 0;
+ return simple_read_from_buffer(buf, count, ppos, data, rc);
}
-DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops_pec, pmbus_debugfs_get_pec,
- pmbus_debugfs_set_pec, "%llu\n");
+
+static const struct file_operations pmbus_debugfs_ops_mfr = {
+ .llseek = noop_llseek,
+ .read = pmbus_debugfs_mfr_read,
+ .write = NULL,
+ .open = simple_open,
+};
static void pmbus_remove_debugfs(void *data)
{
@@ -2846,16 +3124,80 @@ static int pmbus_init_debugfs(struct i2c_client *client,
return -ENODEV;
}
- /* Allocate the max possible entries we need. */
+ /*
+ * Allocate the max possible entries we need.
+ * 6 entries device-specific
+ * 10 entries page-specific
+ */
entries = devm_kcalloc(data->dev,
- data->info->pages * 10, sizeof(*entries),
+ 6 + data->info->pages * 10, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
- debugfs_create_file("pec", 0664, data->debugfs, client,
- &pmbus_debugfs_ops_pec);
-
+ /*
+ * Add device-specific entries.
+ * Please note that the PMBUS standard allows all registers to be
+ * page-specific.
+ * To reduce the number of debugfs entries for devices with many pages
+ * assume that values of the following registers are the same for all
+ * pages and report values only for page 0.
+ */
+ if (pmbus_check_block_register(client, 0, PMBUS_MFR_ID)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = PMBUS_MFR_ID;
+ debugfs_create_file("mfr_id", 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops_mfr);
+ }
+
+ if (pmbus_check_block_register(client, 0, PMBUS_MFR_MODEL)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = PMBUS_MFR_MODEL;
+ debugfs_create_file("mfr_model", 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops_mfr);
+ }
+
+ if (pmbus_check_block_register(client, 0, PMBUS_MFR_REVISION)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = PMBUS_MFR_REVISION;
+ debugfs_create_file("mfr_revision", 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops_mfr);
+ }
+
+ if (pmbus_check_block_register(client, 0, PMBUS_MFR_LOCATION)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = PMBUS_MFR_LOCATION;
+ debugfs_create_file("mfr_location", 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops_mfr);
+ }
+
+ if (pmbus_check_block_register(client, 0, PMBUS_MFR_DATE)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = PMBUS_MFR_DATE;
+ debugfs_create_file("mfr_date", 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops_mfr);
+ }
+
+ if (pmbus_check_block_register(client, 0, PMBUS_MFR_SERIAL)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = PMBUS_MFR_SERIAL;
+ debugfs_create_file("mfr_serial", 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops_mfr);
+ }
+
+ /* Add page specific entries */
for (i = 0; i < data->info->pages; ++i) {
/* Check accessibility of status register if it's not page 0 */
if (!i || pmbus_check_status_register(client, i)) {
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index 3ece53adabd6..de3a0886c2f7 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -523,6 +523,28 @@ static int __init sch56xx_device_add(int address, const char *name)
return PTR_ERR_OR_ZERO(sch56xx_pdev);
}
+static const struct dmi_system_id sch56xx_dmi_override_table[] __initconst = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS W380"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO P710"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO E9900"),
+ },
+ },
+ { }
+};
+
/* For autoloading only */
static const struct dmi_system_id sch56xx_dmi_table[] __initconst = {
{
@@ -543,16 +565,18 @@ static int __init sch56xx_init(void)
if (!dmi_check_system(sch56xx_dmi_table))
return -ENODEV;
- /*
- * Some machines like the Esprimo P720 and Esprimo C700 have
- * onboard devices named " Antiope"/" Theseus" instead of
- * "Antiope"/"Theseus", so we need to check for both.
- */
- if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
- !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
- return -ENODEV;
+ if (!dmi_check_system(sch56xx_dmi_override_table)) {
+ /*
+ * Some machines like the Esprimo P720 and Esprimo C700 have
+ * onboard devices named " Antiope"/" Theseus" instead of
+ * "Antiope"/"Theseus", so we need to check for both.
+ */
+ if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
+ return -ENODEV;
+ }
}
/*
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 7f4a63959730..ae4d14257a11 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -1020,25 +1020,20 @@ err_release_reg:
static int sht15_remove(struct platform_device *pdev)
{
struct sht15_data *data = platform_get_drvdata(pdev);
+ int ret;
- /*
- * Make sure any reads from the device are done and
- * prevent new ones beginning
- */
- mutex_lock(&data->read_lock);
- if (sht15_soft_reset(data)) {
- mutex_unlock(&data->read_lock);
- return -EFAULT;
- }
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group);
+
+ ret = sht15_soft_reset(data);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to reset device (%pe)\n", ERR_PTR(ret));
+
if (!IS_ERR(data->reg)) {
regulator_unregister_notifier(data->reg, &data->nb);
regulator_disable(data->reg);
}
- mutex_unlock(&data->read_lock);
-
return 0;
}
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index 8bd6435c13e8..42762e87b014 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -140,7 +140,8 @@ static int tps23861_read_temp(struct tps23861_data *data, long *val)
static int tps23861_read_voltage(struct tps23861_data *data, int channel,
long *val)
{
- unsigned int regval;
+ __le16 regval;
+ long raw_val;
int err;
if (channel < TPS23861_NUM_PORTS) {
@@ -155,7 +156,8 @@ static int tps23861_read_voltage(struct tps23861_data *data, int channel,
if (err < 0)
return err;
- *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, regval) * VOLTAGE_LSB) / 1000;
+ raw_val = le16_to_cpu(regval);
+ *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, raw_val) * VOLTAGE_LSB) / 1000;
return 0;
}
@@ -163,8 +165,9 @@ static int tps23861_read_voltage(struct tps23861_data *data, int channel,
static int tps23861_read_current(struct tps23861_data *data, int channel,
long *val)
{
- unsigned int current_lsb;
- unsigned int regval;
+ long raw_val, current_lsb;
+ __le16 regval;
+
int err;
if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
@@ -178,7 +181,8 @@ static int tps23861_read_current(struct tps23861_data *data, int channel,
if (err < 0)
return err;
- *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, regval) * current_lsb) / 1000000;
+ raw_val = le16_to_cpu(regval);
+ *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, raw_val) * current_lsb) / 1000000;
return 0;
}
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 33eeff94fc2a..1fb3a2550e29 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -94,11 +94,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
* the module SYSSTATUS register
*/
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
goto runtime_err;
- }
/* Determine number of locks */
i = readl(io_base + SYSSTATUS_OFFSET);
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 364710966665..80ea45b3a815 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -19,6 +19,11 @@
#define QCOM_MUTEX_APPS_PROC_ID 1
#define QCOM_MUTEX_NUM_LOCKS 32
+struct qcom_hwspinlock_of_data {
+ u32 offset;
+ u32 stride;
+};
+
static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
{
struct regmap_field *field = lock->priv;
@@ -63,9 +68,20 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
.unlock = qcom_hwspinlock_unlock,
};
+static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
+ .offset = 0x4,
+ .stride = 0x4,
+};
+
+/* All modern platform has offset 0 and stride of 4k */
+static const struct qcom_hwspinlock_of_data of_tcsr_mutex = {
+ .offset = 0,
+ .stride = 0x1000,
+};
+
static const struct of_device_id qcom_hwspinlock_of_match[] = {
- { .compatible = "qcom,sfpb-mutex" },
- { .compatible = "qcom,tcsr-mutex" },
+ { .compatible = "qcom,sfpb-mutex", .data = &of_sfpb_mutex },
+ { .compatible = "qcom,tcsr-mutex", .data = &of_tcsr_mutex },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
@@ -112,12 +128,14 @@ static const struct regmap_config tcsr_mutex_config = {
static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
u32 *offset, u32 *stride)
{
+ const struct qcom_hwspinlock_of_data *data;
struct device *dev = &pdev->dev;
void __iomem *base;
- /* All modern platform has offset 0 and stride of 4k */
- *offset = 0;
- *stride = 0x1000;
+ data = of_device_get_match_data(dev);
+
+ *offset = data->offset;
+ *stride = data->stride;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h
index 2e1670523461..6ba013975741 100644
--- a/drivers/hwtracing/coresight/coresight-config.h
+++ b/drivers/hwtracing/coresight/coresight-config.h
@@ -134,6 +134,7 @@ struct cscfg_feature_desc {
* @active_cnt: ref count for activate on this configuration.
* @load_owner: handle to load owner for dynamic load and unload of configs.
* @fs_group: reference to configfs group for dynamic unload.
+ * @available: config can be activated - multi-stage load sets true on completion.
*/
struct cscfg_config_desc {
const char *name;
@@ -148,6 +149,7 @@ struct cscfg_config_desc {
atomic_t active_cnt;
void *load_owner;
struct config_group *fs_group;
+ bool available;
};
/**
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index ee6ce92ab4c3..1edfec1e9d18 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -1424,6 +1424,7 @@ static int coresight_remove_match(struct device *dev, void *data)
* platform data.
*/
fwnode_handle_put(conn->child_fwnode);
+ conn->child_fwnode = NULL;
/* No need to continue */
break;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index c039b6ae206f..43bbd5dc3d3b 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -52,6 +52,7 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
* The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
* now take them as general formats and apply on all ETMs.
*/
+PMU_FORMAT_ATTR(branch_broadcast, "config:"__stringify(ETM_OPT_BRANCH_BROADCAST));
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
/* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
@@ -97,6 +98,7 @@ static struct attribute *etm_config_formats_attr[] = {
&format_attr_sinkid.attr,
&format_attr_preset.attr,
&format_attr_configid.attr,
+ &format_attr_branch_broadcast.attr,
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 87299e99dabb..d39660a3e50c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -98,7 +98,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -106,7 +106,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
@@ -130,7 +130,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -138,7 +138,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
@@ -696,6 +696,20 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset);
}
+ /* branch broadcast - enable if selected and supported */
+ if (attr->config & BIT(ETM_OPT_BRANCH_BROADCAST)) {
+ if (!drvdata->trcbb) {
+ /*
+ * Missing BB support could cause silent decode errors
+ * so fail to open if it's not supported.
+ */
+ ret = -EINVAL;
+ goto out;
+ } else {
+ config->cfg |= BIT(ETM4_CFG_BIT_BB);
+ }
+ }
+
out:
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 33869c1d20c3..4b21bb79f168 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -7,6 +7,7 @@
#define _CORESIGHT_CORESIGHT_ETM_H
#include <asm/local.h>
+#include <linux/const.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include "coresight-priv.h"
@@ -515,7 +516,7 @@
({ \
u64 __val; \
\
- if (__builtin_constant_p((offset))) \
+ if (__is_constexpr((offset))) \
__val = read_etm4x_sysreg_const_offset((offset)); \
else \
__val = etm4x_sysreg_read((offset), true, (_64bit)); \
@@ -546,14 +547,14 @@
#define etm4x_read32(csa, offset) \
({ \
u32 __val = etm4x_relaxed_read32((csa), (offset)); \
- __iormb(__val); \
+ __io_ar(__val); \
__val; \
})
#define etm4x_read64(csa, offset) \
({ \
u64 __val = etm4x_relaxed_read64((csa), (offset)); \
- __iormb(__val); \
+ __io_ar(__val); \
__val; \
})
@@ -577,13 +578,13 @@
#define etm4x_write32(csa, val, offset) \
do { \
- __iowmb(); \
+ __io_bw(); \
etm4x_relaxed_write32((csa), (val), (offset)); \
} while (0)
#define etm4x_write64(csa, val, offset) \
do { \
- __iowmb(); \
+ __io_bw(); \
etm4x_relaxed_write64((csa), (val), (offset)); \
} while (0)
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index 11850fd8c3b5..11138a9762b0 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -415,6 +415,27 @@ static void cscfg_remove_owned_csdev_features(struct coresight_device *csdev, vo
}
/*
+ * Unregister all configuration and features from configfs owned by load_owner.
+ * Although this is called without the list mutex being held, it is in the
+ * context of an unload operation which are strictly serialised,
+ * so the lists cannot change during this call.
+ */
+static void cscfg_fs_unregister_cfgs_feats(void *load_owner)
+{
+ struct cscfg_config_desc *config_desc;
+ struct cscfg_feature_desc *feat_desc;
+
+ list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
+ if (config_desc->load_owner == load_owner)
+ cscfg_configfs_del_config(config_desc);
+ }
+ list_for_each_entry(feat_desc, &cscfg_mgr->feat_desc_list, item) {
+ if (feat_desc->load_owner == load_owner)
+ cscfg_configfs_del_feature(feat_desc);
+ }
+}
+
+/*
* removal is relatively easy - just remove from all lists, anything that
* matches the owner. Memory for the descriptors will be managed by the owner,
* memory for the csdev items is devm_ allocated with the individual csdev
@@ -426,6 +447,8 @@ static void cscfg_unload_owned_cfgs_feats(void *load_owner)
struct cscfg_feature_desc *feat_desc, *feat_tmp;
struct cscfg_registered_csdev *csdev_item;
+ lockdep_assert_held(&cscfg_mutex);
+
/* remove from each csdev instance feature and config lists */
list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) {
/*
@@ -439,7 +462,6 @@ static void cscfg_unload_owned_cfgs_feats(void *load_owner)
/* remove from the config descriptor lists */
list_for_each_entry_safe(config_desc, cfg_tmp, &cscfg_mgr->config_desc_list, item) {
if (config_desc->load_owner == load_owner) {
- cscfg_configfs_del_config(config_desc);
etm_perf_del_symlink_cscfg(config_desc);
list_del(&config_desc->item);
}
@@ -448,12 +470,90 @@ static void cscfg_unload_owned_cfgs_feats(void *load_owner)
/* remove from the feature descriptor lists */
list_for_each_entry_safe(feat_desc, feat_tmp, &cscfg_mgr->feat_desc_list, item) {
if (feat_desc->load_owner == load_owner) {
- cscfg_configfs_del_feature(feat_desc);
list_del(&feat_desc->item);
}
}
}
+/*
+ * load the features and configs to the lists - called with list mutex held
+ */
+static int cscfg_load_owned_cfgs_feats(struct cscfg_config_desc **config_descs,
+ struct cscfg_feature_desc **feat_descs,
+ struct cscfg_load_owner_info *owner_info)
+{
+ int i, err;
+
+ lockdep_assert_held(&cscfg_mutex);
+
+ /* load features first */
+ if (feat_descs) {
+ for (i = 0; feat_descs[i]; i++) {
+ err = cscfg_load_feat(feat_descs[i]);
+ if (err) {
+ pr_err("coresight-syscfg: Failed to load feature %s\n",
+ feat_descs[i]->name);
+ return err;
+ }
+ feat_descs[i]->load_owner = owner_info;
+ }
+ }
+
+ /* next any configurations to check feature dependencies */
+ if (config_descs) {
+ for (i = 0; config_descs[i]; i++) {
+ err = cscfg_load_config(config_descs[i]);
+ if (err) {
+ pr_err("coresight-syscfg: Failed to load configuration %s\n",
+ config_descs[i]->name);
+ return err;
+ }
+ config_descs[i]->load_owner = owner_info;
+ config_descs[i]->available = false;
+ }
+ }
+ return 0;
+}
+
+/* set configurations as available to activate at the end of the load process */
+static void cscfg_set_configs_available(struct cscfg_config_desc **config_descs)
+{
+ int i;
+
+ lockdep_assert_held(&cscfg_mutex);
+
+ if (config_descs) {
+ for (i = 0; config_descs[i]; i++)
+ config_descs[i]->available = true;
+ }
+}
+
+/*
+ * Create and register each of the configurations and features with configfs.
+ * Called without mutex being held.
+ */
+static int cscfg_fs_register_cfgs_feats(struct cscfg_config_desc **config_descs,
+ struct cscfg_feature_desc **feat_descs)
+{
+ int i, err;
+
+ if (feat_descs) {
+ for (i = 0; feat_descs[i]; i++) {
+ err = cscfg_configfs_add_feature(feat_descs[i]);
+ if (err)
+ return err;
+ }
+ }
+ if (config_descs) {
+ for (i = 0; config_descs[i]; i++) {
+ err = cscfg_configfs_add_config(config_descs[i]);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
/**
* cscfg_load_config_sets - API function to load feature and config sets.
*
@@ -476,57 +576,63 @@ int cscfg_load_config_sets(struct cscfg_config_desc **config_descs,
struct cscfg_feature_desc **feat_descs,
struct cscfg_load_owner_info *owner_info)
{
- int err = 0, i = 0;
+ int err = 0;
mutex_lock(&cscfg_mutex);
-
- /* load features first */
- if (feat_descs) {
- while (feat_descs[i]) {
- err = cscfg_load_feat(feat_descs[i]);
- if (!err)
- err = cscfg_configfs_add_feature(feat_descs[i]);
- if (err) {
- pr_err("coresight-syscfg: Failed to load feature %s\n",
- feat_descs[i]->name);
- cscfg_unload_owned_cfgs_feats(owner_info);
- goto exit_unlock;
- }
- feat_descs[i]->load_owner = owner_info;
- i++;
- }
+ if (cscfg_mgr->load_state != CSCFG_NONE) {
+ mutex_unlock(&cscfg_mutex);
+ return -EBUSY;
}
+ cscfg_mgr->load_state = CSCFG_LOAD;
- /* next any configurations to check feature dependencies */
- i = 0;
- if (config_descs) {
- while (config_descs[i]) {
- err = cscfg_load_config(config_descs[i]);
- if (!err)
- err = cscfg_configfs_add_config(config_descs[i]);
- if (err) {
- pr_err("coresight-syscfg: Failed to load configuration %s\n",
- config_descs[i]->name);
- cscfg_unload_owned_cfgs_feats(owner_info);
- goto exit_unlock;
- }
- config_descs[i]->load_owner = owner_info;
- i++;
- }
- }
+ /* first load and add to the lists */
+ err = cscfg_load_owned_cfgs_feats(config_descs, feat_descs, owner_info);
+ if (err)
+ goto err_clean_load;
/* add the load owner to the load order list */
list_add_tail(&owner_info->item, &cscfg_mgr->load_order_list);
if (!list_is_singular(&cscfg_mgr->load_order_list)) {
/* lock previous item in load order list */
err = cscfg_owner_get(list_prev_entry(owner_info, item));
- if (err) {
- cscfg_unload_owned_cfgs_feats(owner_info);
- list_del(&owner_info->item);
- }
+ if (err)
+ goto err_clean_owner_list;
}
+ /*
+ * make visible to configfs - configfs manipulation must occur outside
+ * the list mutex lock to avoid circular lockdep issues with configfs
+ * built in mutexes and semaphores. This is safe as it is not possible
+ * to start a new load/unload operation till the current one is done.
+ */
+ mutex_unlock(&cscfg_mutex);
+
+ /* create the configfs elements */
+ err = cscfg_fs_register_cfgs_feats(config_descs, feat_descs);
+ mutex_lock(&cscfg_mutex);
+
+ if (err)
+ goto err_clean_cfs;
+
+ /* mark any new configs as available for activation */
+ cscfg_set_configs_available(config_descs);
+ goto exit_unlock;
+
+err_clean_cfs:
+ /* cleanup after error registering with configfs */
+ cscfg_fs_unregister_cfgs_feats(owner_info);
+
+ if (!list_is_singular(&cscfg_mgr->load_order_list))
+ cscfg_owner_put(list_prev_entry(owner_info, item));
+
+err_clean_owner_list:
+ list_del(&owner_info->item);
+
+err_clean_load:
+ cscfg_unload_owned_cfgs_feats(owner_info);
+
exit_unlock:
+ cscfg_mgr->load_state = CSCFG_NONE;
mutex_unlock(&cscfg_mutex);
return err;
}
@@ -543,6 +649,9 @@ EXPORT_SYMBOL_GPL(cscfg_load_config_sets);
* 1) no configurations are active.
* 2) the set being unloaded was the last to be loaded to maintain dependencies.
*
+ * Once the unload operation commences, we disallow any configuration being
+ * made active until it is complete.
+ *
* @owner_info: Information on owner for set being unloaded.
*/
int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info)
@@ -551,6 +660,13 @@ int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info)
struct cscfg_load_owner_info *load_list_item = NULL;
mutex_lock(&cscfg_mutex);
+ if (cscfg_mgr->load_state != CSCFG_NONE) {
+ mutex_unlock(&cscfg_mutex);
+ return -EBUSY;
+ }
+
+ /* unload op in progress also prevents activation of any config */
+ cscfg_mgr->load_state = CSCFG_UNLOAD;
/* cannot unload if anything is active */
if (atomic_read(&cscfg_mgr->sys_active_cnt)) {
@@ -571,7 +687,12 @@ int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info)
goto exit_unlock;
}
- /* unload all belonging to load_owner */
+ /* remove from configfs - again outside the scope of the list mutex */
+ mutex_unlock(&cscfg_mutex);
+ cscfg_fs_unregister_cfgs_feats(owner_info);
+ mutex_lock(&cscfg_mutex);
+
+ /* unload everything from lists belonging to load_owner */
cscfg_unload_owned_cfgs_feats(owner_info);
/* remove from load order list */
@@ -582,6 +703,7 @@ int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info)
list_del(&owner_info->item);
exit_unlock:
+ cscfg_mgr->load_state = CSCFG_NONE;
mutex_unlock(&cscfg_mutex);
return err;
}
@@ -759,8 +881,15 @@ static int _cscfg_activate_config(unsigned long cfg_hash)
struct cscfg_config_desc *config_desc;
int err = -EINVAL;
+ if (cscfg_mgr->load_state == CSCFG_UNLOAD)
+ return -EBUSY;
+
list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
+ /* if we happen upon a partly loaded config, can't use it */
+ if (config_desc->available == false)
+ return -EBUSY;
+
/* must ensure that config cannot be unloaded in use */
err = cscfg_owner_get(config_desc->load_owner);
if (err)
@@ -1022,8 +1151,10 @@ struct device *cscfg_device(void)
/* Must have a release function or the kernel will complain on module unload */
static void cscfg_dev_release(struct device *dev)
{
+ mutex_lock(&cscfg_mutex);
kfree(cscfg_mgr);
cscfg_mgr = NULL;
+ mutex_unlock(&cscfg_mutex);
}
/* a device is needed to "own" some kernel elements such as sysfs entries. */
@@ -1042,6 +1173,14 @@ static int cscfg_create_device(void)
if (!cscfg_mgr)
goto create_dev_exit_unlock;
+ /* initialise the cscfg_mgr structure */
+ INIT_LIST_HEAD(&cscfg_mgr->csdev_desc_list);
+ INIT_LIST_HEAD(&cscfg_mgr->feat_desc_list);
+ INIT_LIST_HEAD(&cscfg_mgr->config_desc_list);
+ INIT_LIST_HEAD(&cscfg_mgr->load_order_list);
+ atomic_set(&cscfg_mgr->sys_active_cnt, 0);
+ cscfg_mgr->load_state = CSCFG_NONE;
+
/* setup the device */
dev = cscfg_device();
dev->release = cscfg_dev_release;
@@ -1056,17 +1195,73 @@ create_dev_exit_unlock:
return err;
}
-static void cscfg_clear_device(void)
+/*
+ * Loading and unloading is generally on user discretion.
+ * If exiting due to coresight module unload, we need to unload any configurations that remain,
+ * before we unregister the configfs intrastructure.
+ *
+ * Do this by walking the load_owner list and taking appropriate action, depending on the load
+ * owner type.
+ */
+static void cscfg_unload_cfgs_on_exit(void)
{
- struct cscfg_config_desc *cfg_desc;
+ struct cscfg_load_owner_info *owner_info = NULL;
+ /*
+ * grab the mutex - even though we are exiting, some configfs files
+ * may still be live till we dump them, so ensure list data is
+ * protected from a race condition.
+ */
mutex_lock(&cscfg_mutex);
- list_for_each_entry(cfg_desc, &cscfg_mgr->config_desc_list, item) {
- etm_perf_del_symlink_cscfg(cfg_desc);
+ while (!list_empty(&cscfg_mgr->load_order_list)) {
+
+ /* remove in reverse order of loading */
+ owner_info = list_last_entry(&cscfg_mgr->load_order_list,
+ struct cscfg_load_owner_info, item);
+
+ /* action according to type */
+ switch (owner_info->type) {
+ case CSCFG_OWNER_PRELOAD:
+ /*
+ * preloaded descriptors are statically allocated in
+ * this module - just need to unload dynamic items from
+ * csdev lists, and remove from configfs directories.
+ */
+ pr_info("cscfg: unloading preloaded configurations\n");
+ break;
+
+ case CSCFG_OWNER_MODULE:
+ /*
+ * this is an error - the loadable module must have been unloaded prior
+ * to the coresight module unload. Therefore that module has not
+ * correctly unloaded configs in its own exit code.
+ * Nothing to do other than emit an error string as the static descriptor
+ * references we need to unload will have disappeared with the module.
+ */
+ pr_err("cscfg: ERROR: prior module failed to unload configuration\n");
+ goto list_remove;
+ }
+
+ /* remove from configfs - outside the scope of the list mutex */
+ mutex_unlock(&cscfg_mutex);
+ cscfg_fs_unregister_cfgs_feats(owner_info);
+ mutex_lock(&cscfg_mutex);
+
+ /* Next unload from csdev lists. */
+ cscfg_unload_owned_cfgs_feats(owner_info);
+
+list_remove:
+ /* remove from load order list */
+ list_del(&owner_info->item);
}
+ mutex_unlock(&cscfg_mutex);
+}
+
+static void cscfg_clear_device(void)
+{
+ cscfg_unload_cfgs_on_exit();
cscfg_configfs_release(cscfg_mgr);
device_unregister(cscfg_device());
- mutex_unlock(&cscfg_mutex);
}
/* Initialise system config management API device */
@@ -1074,20 +1269,16 @@ int __init cscfg_init(void)
{
int err = 0;
+ /* create the device and init cscfg_mgr */
err = cscfg_create_device();
if (err)
return err;
+ /* initialise configfs subsystem */
err = cscfg_configfs_init(cscfg_mgr);
if (err)
goto exit_err;
- INIT_LIST_HEAD(&cscfg_mgr->csdev_desc_list);
- INIT_LIST_HEAD(&cscfg_mgr->feat_desc_list);
- INIT_LIST_HEAD(&cscfg_mgr->config_desc_list);
- INIT_LIST_HEAD(&cscfg_mgr->load_order_list);
- atomic_set(&cscfg_mgr->sys_active_cnt, 0);
-
/* preload built-in configurations */
err = cscfg_preload(THIS_MODULE);
if (err)
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.h b/drivers/hwtracing/coresight/coresight-syscfg.h
index 9106ffab4833..66e2db890d82 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.h
+++ b/drivers/hwtracing/coresight/coresight-syscfg.h
@@ -12,6 +12,17 @@
#include "coresight-config.h"
+/*
+ * Load operation types.
+ * When loading or unloading, another load operation cannot be run.
+ * When unloading configurations cannot be activated.
+ */
+enum cscfg_load_ops {
+ CSCFG_NONE,
+ CSCFG_LOAD,
+ CSCFG_UNLOAD
+};
+
/**
* System configuration manager device.
*
@@ -30,6 +41,7 @@
* @cfgfs_subsys: configfs subsystem used to manage configurations.
* @sysfs_active_config:Active config hash used if CoreSight controlled from sysfs.
* @sysfs_active_preset:Active preset index used if CoreSight controlled from sysfs.
+ * @load_state: A multi-stage load/unload operation is in progress.
*/
struct cscfg_manager {
struct device dev;
@@ -41,6 +53,7 @@ struct cscfg_manager {
struct configfs_subsystem cfgfs_subsys;
u32 sysfs_active_config;
int sysfs_active_preset;
+ enum cscfg_load_ops load_state;
};
/* get reference to dev in cscfg_manager */
diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c
index 2c7f5116be12..891b28ea25fe 100644
--- a/drivers/hwtracing/intel_th/msu-sink.c
+++ b/drivers/hwtracing/intel_th/msu-sink.c
@@ -71,6 +71,9 @@ static int msu_sink_alloc_window(void *data, struct sg_table **sgt, size_t size)
block = dma_alloc_coherent(priv->dev->parent->parent,
PAGE_SIZE, &sg_dma_address(sg_ptr),
GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
sg_set_buf(sg_ptr, block, PAGE_SIZE);
}
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 70a07b4e9967..6c8215a47a60 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1067,6 +1067,16 @@ msc_buffer_set_uc(struct msc *msc) {}
static inline void msc_buffer_set_wb(struct msc *msc) {}
#endif /* CONFIG_X86 */
+static struct page *msc_sg_page(struct scatterlist *sg)
+{
+ void *addr = sg_virt(sg);
+
+ if (is_vmalloc_addr(addr))
+ return vmalloc_to_page(addr);
+
+ return sg_page(sg);
+}
+
/**
* msc_buffer_win_alloc() - alloc a window for a multiblock mode
* @msc: MSC device
@@ -1137,7 +1147,7 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
int i;
for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
- struct page *page = sg_page(sg);
+ struct page *page = msc_sg_page(sg);
page->mapping = NULL;
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
@@ -1401,7 +1411,7 @@ found:
pgoff -= win->pgoff;
for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
- struct page *page = sg_page(sg);
+ struct page *page = msc_sg_page(sg);
size_t pgsz = PFN_DOWN(sg->length);
if (pgoff < pgsz)
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 7da4f298ed01..147d338c191e 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -100,8 +100,10 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
}
th = intel_th_alloc(&pdev->dev, drvdata, resource, r);
- if (IS_ERR(th))
- return PTR_ERR(th);
+ if (IS_ERR(th)) {
+ err = PTR_ERR(th);
+ goto err_free_irq;
+ }
th->activate = intel_th_pci_activate;
th->deactivate = intel_th_pci_deactivate;
@@ -109,6 +111,10 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
return 0;
+
+err_free_irq:
+ pci_free_irq_vectors(pdev);
+ return err;
}
static void intel_th_pci_remove(struct pci_dev *pdev)
@@ -279,6 +285,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Meteor Lake-P */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7e24),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Raptor Lake-S */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Raptor Lake-S CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa76f),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a1bae59208e3..7284206b278b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -108,6 +108,7 @@ config I2C_HIX5HD2
config I2C_I801
tristate "Intel 82801 (ICH/PCH)"
depends on PCI
+ select P2SB if X86
select CHECK_SIGNATURE if X86 && DMI
select I2C_SMBUS
help
@@ -156,6 +157,7 @@ config I2C_I801
Emmitsburg (PCH)
Alder Lake (PCH)
Raptor Lake (PCH)
+ Meteor Lake (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -486,7 +488,7 @@ config I2C_BCM_KONA
config I2C_BRCMSTB
tristate "BRCM Settop/DSL I2C controller"
- depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCM_63XX || \
+ depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCMBCA || \
ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
default y
help
@@ -781,6 +783,17 @@ config I2C_MESON
If you say yes to this option, support will be included for the
I2C interface on the Amlogic Meson family of SoCs.
+config I2C_MICROCHIP_CORE
+ tristate "Microchip FPGA I2C controller"
+ depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
+ depends on OF
+ help
+ If you say yes to this option, support will be included for the
+ I2C interface on Microchip FPGAs.
+
+ This driver can also be built as a module. If so, the module will be
+ called i2c-microchip-core.
+
config I2C_MPC
tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
depends on PPC
@@ -838,13 +851,13 @@ config I2C_NOMADIK
I2C interface from ST-Ericsson's Nomadik and Ux500 architectures,
as well as the STA2X11 PCIe I/O HUB.
-config I2C_NPCM7XX
+config I2C_NPCM
tristate "Nuvoton I2C Controller"
- depends on ARCH_NPCM7XX || COMPILE_TEST
+ depends on ARCH_NPCM || COMPILE_TEST
help
If you say yes to this option, support will be included for the
- Nuvoton I2C controller, which is available on the NPCM7xx BMC
- controller.
+ Nuvoton I2C controller, which is available on the NPCM BMC
+ controllers.
Driver can also support slave mode (select I2C_SLAVE).
config I2C_OCORES
@@ -984,6 +997,16 @@ config I2C_RK3X
This driver can also be built as a module. If so, the module will
be called i2c-rk3x.
+config I2C_RZV2M
+ tristate "Renesas RZ/V2M adapter"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ Renesas RZ/V2M I2C interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-rzv2m.
+
config I2C_S3C2410
tristate "S3C/Exynos I2C Driver"
depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || \
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 479f60e4ee3d..c5cac15f075c 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -78,13 +78,14 @@ obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
obj-$(CONFIG_I2C_LPC2K) += i2c-lpc2k.o
obj-$(CONFIG_I2C_MESON) += i2c-meson.o
+obj-$(CONFIG_I2C_MICROCHIP_CORE) += i2c-microchip-corei2c.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MT65XX) += i2c-mt65xx.o
obj-$(CONFIG_I2C_MT7621) += i2c-mt7621.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
-obj-$(CONFIG_I2C_NPCM7XX) += i2c-npcm7xx.o
+obj-$(CONFIG_I2C_NPCM) += i2c-npcm7xx.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
obj-$(CONFIG_I2C_OWL) += i2c-owl.o
@@ -101,6 +102,7 @@ obj-$(CONFIG_I2C_QCOM_GENI) += i2c-qcom-geni.o
obj-$(CONFIG_I2C_QUP) += i2c-qup.o
obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
+obj-$(CONFIG_I2C_RZV2M) += i2c-rzv2m.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 354cf7e45c4a..50e7f3f670b6 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -447,7 +447,7 @@ static int altr_i2c_probe(struct platform_device *pdev)
mutex_unlock(&idev->isr_mutex);
i2c_set_adapdata(&idev->adapter, idev);
- strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
+ strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
idev->adapter.owner = THIS_MODULE;
idev->adapter.algo = &altr_i2c_algo;
idev->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 771e53d3d197..185dedfebbac 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -1022,7 +1022,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
bus->adap.algo = &aspeed_i2c_algo;
bus->adap.dev.parent = &pdev->dev;
bus->adap.dev.of_node = pdev->dev.of_node;
- strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
+ strscpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
i2c_set_adapdata(&bus->adap, bus);
bus->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 22aed922552b..99bd24d0e6a5 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -321,7 +321,7 @@ i2c_au1550_probe(struct platform_device *pdev)
priv->adap.algo = &au1550_algo;
priv->adap.algo_data = priv;
priv->adap.dev.parent = &pdev->dev;
- strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
/* Now, set up the PSC for SMBus PIO mode. */
i2c_au1550_setup(priv);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 5294b73beca8..bdf3b50de8ad 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -783,7 +783,7 @@ static int axxia_i2c_probe(struct platform_device *pdev)
}
i2c_set_adapdata(&idev->adapter, idev);
- strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
+ strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
idev->adapter.owner = THIS_MODULE;
idev->adapter.algo = &axxia_i2c_algo;
idev->adapter.bus_recovery_info = &axxia_i2c_recovery_info;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 6304d1dd2dd6..85d8a6b04885 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 8e350f20cde0..f3e369f0fd40 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2013 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2013 Broadcom Corporation
#include <linux/device.h>
#include <linux/kernel.h>
@@ -849,7 +839,7 @@ static int bcm_kona_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- strlcpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name));
adap->algo = &bcm_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index b00f35c0b066..69383be47905 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
#include <linux/clk.h>
#include <linux/delay.h>
@@ -684,9 +674,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- strlcpy(adap->name, "Broadcom STB : ", sizeof(adap->name));
- if (int_name)
- strlcat(adap->name, int_name, sizeof(adap->name));
+ strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
adap->algo = &brcmstb_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 3d6f8ee355bf..33f5588a50c0 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr)
*/
static irqreturn_t cdns_i2c_master_isr(void *ptr)
{
- unsigned int isr_status, avail_bytes, updatetx;
+ unsigned int isr_status, avail_bytes;
unsigned int bytes_to_send;
- bool hold_quirk;
+ bool updatetx;
struct cdns_i2c *id = ptr;
/* Signal completion only after everything is updated */
int done_flag = 0;
@@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* Check if transfer size register needs to be updated again for a
* large data receive operation.
*/
- updatetx = 0;
- if (id->recv_count > id->curr_recv_count)
- updatetx = 1;
-
- hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
+ updatetx = id->recv_count > id->curr_recv_count;
/* When receiving, handle data interrupt and completion interrupt */
if (id->p_recv_buf &&
@@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
break;
}
- if (cdns_is_holdquirk(id, hold_quirk))
+ if (cdns_is_holdquirk(id, updatetx))
break;
}
@@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* maintain transfer size non-zero while performing a large
* receive operation.
*/
- if (cdns_is_holdquirk(id, hold_quirk)) {
+ if (cdns_is_holdquirk(id, updatetx)) {
/* wait while fifo is full */
while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
(id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count;
}
- } else if (id->recv_count && !hold_quirk &&
- !id->curr_recv_count) {
-
- /* Set the slave address in address register*/
- cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
- CDNS_I2C_ADDR_OFFSET);
-
- if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
- cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
- CDNS_I2C_XFER_SIZE_OFFSET);
- id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
- } else {
- cdns_i2c_writereg(id->recv_count,
- CDNS_I2C_XFER_SIZE_OFFSET);
- id->curr_recv_count = id->recv_count;
- }
}
/* Clear hold (if not repeated start) and signal completion */
@@ -593,8 +573,13 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
ctrl_reg |= CDNS_I2C_CR_RW | CDNS_I2C_CR_CLR_FIFO;
+ /*
+ * Receive up to I2C_SMBUS_BLOCK_MAX data bytes, plus one message length
+ * byte, plus one checksum byte if PEC is enabled. p_msg->len will be 2 if
+ * PEC is enabled, otherwise 1.
+ */
if (id->p_msg->flags & I2C_M_RECV_LEN)
- id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
+ id->recv_count = I2C_SMBUS_BLOCK_MAX + id->p_msg->len;
id->curr_recv_count = id->recv_count;
@@ -809,6 +794,9 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
if (id->err_status & CDNS_I2C_IXR_ARB_LOST)
return -EAGAIN;
+ if (msg->flags & I2C_M_RECV_LEN)
+ msg->len += min_t(unsigned int, msg->buf[0], I2C_SMBUS_BLOCK_MAX);
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index f8639a4457d2..d97c61eec95c 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -245,7 +245,7 @@ static int cbus_i2c_probe(struct platform_device *pdev)
adapter->nr = pdev->id;
adapter->timeout = HZ;
adapter->algo = &cbus_i2c_algo;
- strlcpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
+ strscpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
spin_lock_init(&chost->lock);
chost->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index de15f09c9b47..190abdc46dd3 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -404,7 +404,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
adap->adapter.class = I2C_CLASS_HWMON;
adap->adapter.algo = &cht_wc_i2c_adap_algo;
adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
- strlcpy(adap->adapter.name, "PMIC I2C Adapter",
+ strscpy(adap->adapter.name, "PMIC I2C Adapter",
sizeof(adap->adapter.name));
adap->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 892213d51f43..4e787dc709f9 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -267,7 +267,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->dev = dev;
bus->adap.owner = THIS_MODULE;
- strlcpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name));
+ strscpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name));
bus->adap.algo = &ec_i2c_algorithm;
bus->adap.algo_data = bus;
bus->adap.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 9e09db31a937..471c47db546b 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -845,7 +845,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
adap->algo = &i2c_davinci_algo;
adap->dev.parent = &pdev->dev;
adap->timeout = DAVINCI_I2C_TIMEOUT;
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 60c838c7c454..50925d97fa42 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -322,7 +322,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- strlcpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
+ strscpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &dc_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 321b2770feab..4914bfbee2a9 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -773,7 +773,7 @@ static int pch_i2c_probe(struct pci_dev *pdev,
pch_adap->owner = THIS_MODULE;
pch_adap->class = I2C_CLASS_HWMON;
- strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
+ strscpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
pch_adap->algo = &pch_algorithm;
pch_adap->algo_data = &adap_info->pch_data[i];
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index bdff0e6345d9..f2e537b137b2 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -371,7 +371,7 @@ static int em_i2c_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- strlcpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
priv->sclk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(priv->sclk))
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b812d1090c0f..4a6260d04db2 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -802,7 +802,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-frequency", &i2c->op_clock))
i2c->op_clock = I2C_MAX_STANDARD_MODE_FREQ;
- strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &exynos5_i2c_algorithm;
i2c->adap.retries = 3;
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 7a048abbf92b..b1985c1667e1 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -436,7 +436,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
if (np)
- strlcpy(adap->name, dev_name(dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(dev), sizeof(adap->name));
else
snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index a2add128d084..4374a8677271 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -402,7 +402,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON;
- strlcpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name));
adap->algo = &highlander_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->nr = pdev->id;
diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
index acf394812061..76c3d8f6fc3c 100644
--- a/drivers/i2c/busses/i2c-hisi.c
+++ b/drivers/i2c/busses/i2c-hisi.c
@@ -15,6 +15,7 @@
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/units.h>
#define HISI_I2C_FRAME_CTRL 0x0000
#define HISI_I2C_FRAME_CTRL_SPEED_MODE GENMASK(1, 0)
@@ -80,8 +81,6 @@
#define HISI_I2C_TX_F_AE_THRESH 1
#define HISI_I2C_RX_F_AF_THRESH 60
-#define HZ_PER_KHZ 1000
-
#define NSEC_TO_CYCLES(ns, clk_rate_khz) \
DIV_ROUND_UP_ULL((clk_rate_khz) * (ns), NSEC_PER_MSEC)
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 61ae58f57047..0e34cbaca22d 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -423,7 +423,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->clk);
- strlcpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name));
priv->dev = &pdev->dev;
priv->adap.owner = THIS_MODULE;
priv->adap.algo = &hix5hd2_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ff706349bdfb..a176296f4fff 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -76,6 +76,7 @@
* Alder Lake-P (PCH) 0x51a3 32 hard yes yes yes
* Alder Lake-M (PCH) 0x54a3 32 hard yes yes yes
* Raptor Lake-S (PCH) 0x7a23 32 hard yes yes yes
+ * Meteor Lake-P (SOC) 0x7e22 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -111,6 +112,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/platform_data/itco_wdt.h>
+#include <linux/platform_data/x86/p2sb.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
@@ -140,7 +142,6 @@
#define TCOBASE 0x050
#define TCOCTL 0x054
-#define SBREG_BAR 0x10
#define SBREG_SMBCTRL 0xc6000c
#define SBREG_SMBCTRL_DNV 0xcf000c
@@ -231,6 +232,7 @@
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
+#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_P_SMBUS 0x7e22
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
@@ -1049,6 +1051,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, ALDER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, ALDER_LAKE_M_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, RAPTOR_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
@@ -1113,7 +1116,7 @@ static void dmi_check_onboard_device(u8 type, const char *name,
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dmi_devices[i].i2c_addr;
- strlcpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
+ strscpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
i2c_new_client_device(adap, &info);
break;
}
@@ -1264,7 +1267,7 @@ static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dell_lis3lv02d_devices[i].i2c_addr;
- strlcpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
+ strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
i2c_new_client_device(&priv->adapter, &info);
}
@@ -1482,45 +1485,24 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
.version = 4,
};
struct resource *res;
- unsigned int devfn;
- u64 base64_addr;
- u32 base_addr;
- u8 hidden;
+ int ret;
/*
* We must access the NO_REBOOT bit over the Primary to Sideband
- * bridge (P2SB). The BIOS prevents the P2SB device from being
- * enumerated by the PCI subsystem, so we need to unhide/hide it
- * to lookup the P2SB BAR.
+ * (P2SB) bridge.
*/
- pci_lock_rescan_remove();
-
- devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 1);
-
- /* Unhide the P2SB device, if it is hidden */
- pci_bus_read_config_byte(pci_dev->bus, devfn, 0xe1, &hidden);
- if (hidden)
- pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x0);
-
- pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR, &base_addr);
- base64_addr = base_addr & 0xfffffff0;
-
- pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR + 0x4, &base_addr);
- base64_addr |= (u64)base_addr << 32;
-
- /* Hide the P2SB device, if it was hidden before */
- if (hidden)
- pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden);
- pci_unlock_rescan_remove();
res = &tco_res[1];
+ ret = p2sb_bar(pci_dev->bus, 0, res);
+ if (ret)
+ return ERR_PTR(ret);
+
if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+ res->start += SBREG_SMBCTRL_DNV;
else
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+ res->start += SBREG_SMBCTRL;
res->end = res->start + 3;
- res->flags = IORESOURCE_MEM;
return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
tco_res, 2, &pldata, sizeof(pldata));
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 9f71daf6db64..eeb80e34f9ad 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -738,7 +738,7 @@ static int iic_probe(struct platform_device *ofdev)
adap = &dev->adap;
adap->dev.parent = &ofdev->dev;
adap->dev.of_node = of_node_get(np);
- strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
+ strscpy(adap->name, "IBM IIC", sizeof(adap->name));
i2c_set_adapdata(adap, dev);
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &iic_algo;
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index 5dae7cab7260..febcb6f01d4d 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -141,7 +141,7 @@ static int icy_probe(struct zorro_dev *z,
i2c->adapter.owner = THIS_MODULE;
/* i2c->adapter.algo assigned by i2c_pcf_add_bus() */
i2c->adapter.algo_data = algo_data;
- strlcpy(i2c->adapter.name, "ICY I2C Zorro adapter",
+ strscpy(i2c->adapter.name, "ICY I2C Zorro adapter",
sizeof(i2c->adapter.name));
if (!devm_request_mem_region(&z->dev,
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 8b9ba055c418..b51ab3cad2b1 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -558,7 +558,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
lpi2c_imx->adapter.algo = &lpi2c_imx_algo;
lpi2c_imx->adapter.dev.parent = &pdev->dev;
lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
- strlcpy(lpi2c_imx->adapter.name, pdev->name,
+ strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
lpi2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e9e2db68b9fb..e47fa3465671 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -66,7 +66,7 @@
/* IMX I2C registers:
* the I2C register offset is different between SoCs,
- * to provid support for all these chips, split the
+ * to provide support for all these chips, split the
* register offset into a fixed base address and a
* variable shift value, then the full register offset
* will be calculated by
@@ -1572,9 +1572,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);
hrtimer_cancel(&i2c_imx->slave_timer);
@@ -1585,17 +1583,21 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
- /* setup chip registers to defaults */
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ if (ret == 0) {
+ /* setup chip registers to defaults */
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ clk_disable(i2c_imx->clk);
+ }
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, i2c_imx);
- clk_disable_unprepare(i2c_imx->clk);
+
+ clk_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index 5bbb7f0d7852..cf857cf22507 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -303,6 +303,7 @@ static int kempld_i2c_probe(struct platform_device *pdev)
i2c->dev = &pdev->dev;
i2c->adap = kempld_i2c_adapter;
i2c->adap.dev.parent = i2c->dev;
+ ACPI_COMPANION_SET(&i2c->adap.dev, ACPI_COMPANION(&pdev->dev));
i2c_set_adapdata(&i2c->adap, i2c);
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 4e30c5267142..8fff6fbb7065 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -417,7 +417,7 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.owner = THIS_MODULE;
- strlcpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
i2c->adap.algo = &i2c_lpc2k_algorithm;
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 61cc5b2462c6..889eff06b78f 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -502,7 +502,7 @@ static int meson_i2c_probe(struct platform_device *pdev)
return ret;
}
- strlcpy(i2c->adap.name, "Meson I2C adapter",
+ strscpy(i2c->adap.name, "Meson I2C adapter",
sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &meson_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
new file mode 100644
index 000000000000..4d7e9b25f018
--- /dev/null
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip CoreI2C I2C controller driver
+ *
+ * Copyright (c) 2018-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define CORE_I2C_CTRL (0x00)
+#define CTRL_CR0 BIT(0)
+#define CTRL_CR1 BIT(1)
+#define CTRL_AA BIT(2)
+#define CTRL_SI BIT(3)
+#define CTRL_STO BIT(4)
+#define CTRL_STA BIT(5)
+#define CTRL_ENS1 BIT(6)
+#define CTRL_CR2 BIT(7)
+
+#define STATUS_BUS_ERROR (0x00)
+#define STATUS_M_START_SENT (0x08)
+#define STATUS_M_REPEATED_START_SENT (0x10)
+#define STATUS_M_SLAW_ACK (0x18)
+#define STATUS_M_SLAW_NACK (0x20)
+#define STATUS_M_TX_DATA_ACK (0x28)
+#define STATUS_M_TX_DATA_NACK (0x30)
+#define STATUS_M_ARB_LOST (0x38)
+#define STATUS_M_SLAR_ACK (0x40)
+#define STATUS_M_SLAR_NACK (0x48)
+#define STATUS_M_RX_DATA_ACKED (0x50)
+#define STATUS_M_RX_DATA_NACKED (0x58)
+#define STATUS_S_SLAW_ACKED (0x60)
+#define STATUS_S_ARB_LOST_SLAW_ACKED (0x68)
+#define STATUS_S_GENERAL_CALL_ACKED (0x70)
+#define STATUS_S_ARB_LOST_GENERAL_CALL_ACKED (0x78)
+#define STATUS_S_RX_DATA_ACKED (0x80)
+#define STATUS_S_RX_DATA_NACKED (0x88)
+#define STATUS_S_GENERAL_CALL_RX_DATA_ACKED (0x90)
+#define STATUS_S_GENERAL_CALL_RX_DATA_NACKED (0x98)
+#define STATUS_S_RX_STOP (0xA0)
+#define STATUS_S_SLAR_ACKED (0xA8)
+#define STATUS_S_ARB_LOST_SLAR_ACKED (0xB0)
+#define STATUS_S_TX_DATA_ACK (0xB8)
+#define STATUS_S_TX_DATA_NACK (0xC0)
+#define STATUS_LAST_DATA_ACK (0xC8)
+#define STATUS_M_SMB_MASTER_RESET (0xD0)
+#define STATUS_S_SCL_LOW_TIMEOUT (0xD8) /* 25 ms */
+#define STATUS_NO_STATE_INFO (0xF8)
+
+#define CORE_I2C_STATUS (0x04)
+#define CORE_I2C_DATA (0x08)
+#define WRITE_BIT (0x0)
+#define READ_BIT (0x1)
+#define SLAVE_ADDR_SHIFT (1)
+#define CORE_I2C_SLAVE0_ADDR (0x0c)
+#define GENERAL_CALL_BIT (0x0)
+#define CORE_I2C_SMBUS (0x10)
+#define SMBALERT_INT_ENB (0x0)
+#define SMBSUS_INT_ENB (0x1)
+#define SMBUS_ENB (0x2)
+#define SMBALERT_NI_STATUS (0x3)
+#define SMBALERT_NO_CTRL (0x4)
+#define SMBSUS_NI_STATUS (0x5)
+#define SMBSUS_NO_CTRL (0x6)
+#define SMBUS_RESET (0x7)
+#define CORE_I2C_FREQ (0x14)
+#define CORE_I2C_GLITCHREG (0x18)
+#define CORE_I2C_SLAVE1_ADDR (0x1c)
+
+#define PCLK_DIV_960 (CTRL_CR2)
+#define PCLK_DIV_256 (0)
+#define PCLK_DIV_224 (CTRL_CR0)
+#define PCLK_DIV_192 (CTRL_CR1)
+#define PCLK_DIV_160 (CTRL_CR0 | CTRL_CR1)
+#define PCLK_DIV_120 (CTRL_CR0 | CTRL_CR2)
+#define PCLK_DIV_60 (CTRL_CR1 | CTRL_CR2)
+#define BCLK_DIV_8 (CTRL_CR0 | CTRL_CR1 | CTRL_CR2)
+#define CLK_MASK (CTRL_CR0 | CTRL_CR1 | CTRL_CR2)
+
+/**
+ * struct mchp_corei2c_dev - Microchip CoreI2C device private data
+ *
+ * @base: pointer to register struct
+ * @dev: device reference
+ * @i2c_clk: clock reference for i2c input clock
+ * @buf: pointer to msg buffer for easier use
+ * @msg_complete: xfer completion object
+ * @adapter: core i2c abstraction
+ * @msg_err: error code for completed message
+ * @bus_clk_rate: current i2c bus clock rate
+ * @isr_status: cached copy of local ISR status
+ * @msg_len: number of bytes transferred in msg
+ * @addr: address of the current slave
+ */
+struct mchp_corei2c_dev {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *i2c_clk;
+ u8 *buf;
+ struct completion msg_complete;
+ struct i2c_adapter adapter;
+ int msg_err;
+ u32 bus_clk_rate;
+ u32 isr_status;
+ u16 msg_len;
+ u8 addr;
+};
+
+static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev)
+{
+ u8 ctrl = readb(idev->base + CORE_I2C_CTRL);
+
+ ctrl &= ~CTRL_ENS1;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+}
+
+static void mchp_corei2c_core_enable(struct mchp_corei2c_dev *idev)
+{
+ u8 ctrl = readb(idev->base + CORE_I2C_CTRL);
+
+ ctrl |= CTRL_ENS1;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+}
+
+static void mchp_corei2c_reset(struct mchp_corei2c_dev *idev)
+{
+ mchp_corei2c_core_disable(idev);
+ mchp_corei2c_core_enable(idev);
+}
+
+static inline void mchp_corei2c_stop(struct mchp_corei2c_dev *idev)
+{
+ u8 ctrl = readb(idev->base + CORE_I2C_CTRL);
+
+ ctrl |= CTRL_STO;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+}
+
+static inline int mchp_corei2c_set_divisor(u32 rate,
+ struct mchp_corei2c_dev *idev)
+{
+ u8 clkval, ctrl;
+
+ if (rate >= 960)
+ clkval = PCLK_DIV_960;
+ else if (rate >= 256)
+ clkval = PCLK_DIV_256;
+ else if (rate >= 224)
+ clkval = PCLK_DIV_224;
+ else if (rate >= 192)
+ clkval = PCLK_DIV_192;
+ else if (rate >= 160)
+ clkval = PCLK_DIV_160;
+ else if (rate >= 120)
+ clkval = PCLK_DIV_120;
+ else if (rate >= 60)
+ clkval = PCLK_DIV_60;
+ else if (rate >= 8)
+ clkval = BCLK_DIV_8;
+ else
+ return -EINVAL;
+
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ ctrl &= ~CLK_MASK;
+ ctrl |= clkval;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ if ((ctrl & CLK_MASK) != clkval)
+ return -EIO;
+
+ return 0;
+}
+
+static int mchp_corei2c_init(struct mchp_corei2c_dev *idev)
+{
+ u32 clk_rate = clk_get_rate(idev->i2c_clk);
+ u32 divisor = clk_rate / idev->bus_clk_rate;
+ int ret;
+
+ ret = mchp_corei2c_set_divisor(divisor, idev);
+ if (ret)
+ return ret;
+
+ mchp_corei2c_reset(idev);
+
+ return 0;
+}
+
+static void mchp_corei2c_empty_rx(struct mchp_corei2c_dev *idev)
+{
+ u8 ctrl;
+
+ if (idev->msg_len > 0) {
+ *idev->buf++ = readb(idev->base + CORE_I2C_DATA);
+ idev->msg_len--;
+ }
+
+ if (idev->msg_len <= 1) {
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ ctrl &= ~CTRL_AA;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+ }
+}
+
+static int mchp_corei2c_fill_tx(struct mchp_corei2c_dev *idev)
+{
+ if (idev->msg_len > 0)
+ writeb(*idev->buf++, idev->base + CORE_I2C_DATA);
+ idev->msg_len--;
+
+ return 0;
+}
+
+static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
+{
+ u32 status = idev->isr_status;
+ u8 ctrl;
+ bool last_byte = false, finished = false;
+
+ if (!idev->buf)
+ return IRQ_NONE;
+
+ switch (status) {
+ case STATUS_M_START_SENT:
+ case STATUS_M_REPEATED_START_SENT:
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ ctrl &= ~CTRL_STA;
+ writeb(idev->addr, idev->base + CORE_I2C_DATA);
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+ if (idev->msg_len == 0)
+ finished = true;
+ break;
+ case STATUS_M_ARB_LOST:
+ idev->msg_err = -EAGAIN;
+ finished = true;
+ break;
+ case STATUS_M_SLAW_ACK:
+ case STATUS_M_TX_DATA_ACK:
+ if (idev->msg_len > 0)
+ mchp_corei2c_fill_tx(idev);
+ else
+ last_byte = true;
+ break;
+ case STATUS_M_TX_DATA_NACK:
+ case STATUS_M_SLAR_NACK:
+ case STATUS_M_SLAW_NACK:
+ idev->msg_err = -ENXIO;
+ last_byte = true;
+ break;
+ case STATUS_M_SLAR_ACK:
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ if (idev->msg_len == 1u) {
+ ctrl &= ~CTRL_AA;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+ } else {
+ ctrl |= CTRL_AA;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+ }
+ if (idev->msg_len < 1u)
+ last_byte = true;
+ break;
+ case STATUS_M_RX_DATA_ACKED:
+ mchp_corei2c_empty_rx(idev);
+ break;
+ case STATUS_M_RX_DATA_NACKED:
+ mchp_corei2c_empty_rx(idev);
+ if (idev->msg_len == 0)
+ last_byte = true;
+ break;
+ default:
+ break;
+ }
+
+ /* On the last byte to be transmitted, send STOP */
+ if (last_byte)
+ mchp_corei2c_stop(idev);
+
+ if (last_byte || finished)
+ complete(&idev->msg_complete);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mchp_corei2c_isr(int irq, void *_dev)
+{
+ struct mchp_corei2c_dev *idev = _dev;
+ irqreturn_t ret = IRQ_NONE;
+ u8 ctrl;
+
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ if (ctrl & CTRL_SI) {
+ idev->isr_status = readb(idev->base + CORE_I2C_STATUS);
+ ret = mchp_corei2c_handle_isr(idev);
+ }
+
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ ctrl &= ~CTRL_SI;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+
+ return ret;
+}
+
+static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
+ struct i2c_msg *msg)
+{
+ u8 ctrl;
+ unsigned long time_left;
+
+ idev->addr = i2c_8bit_addr_from_msg(msg);
+ idev->msg_len = msg->len;
+ idev->buf = msg->buf;
+ idev->msg_err = 0;
+
+ reinit_completion(&idev->msg_complete);
+
+ mchp_corei2c_core_enable(idev);
+
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ ctrl |= CTRL_STA;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+
+ time_left = wait_for_completion_timeout(&idev->msg_complete,
+ idev->adapter.timeout);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return idev->msg_err;
+}
+
+static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ ret = mchp_corei2c_xfer_msg(idev, msgs++);
+ if (ret)
+ return ret;
+ }
+
+ return num;
+}
+
+static u32 mchp_corei2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm mchp_corei2c_algo = {
+ .master_xfer = mchp_corei2c_xfer,
+ .functionality = mchp_corei2c_func,
+};
+
+static int mchp_corei2c_probe(struct platform_device *pdev)
+{
+ struct mchp_corei2c_dev *idev;
+ struct resource *res;
+ int irq, ret;
+
+ idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(idev->base))
+ return PTR_ERR(idev->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return dev_err_probe(&pdev->dev, -ENXIO,
+ "invalid IRQ %d for I2C controller\n", irq);
+
+ idev->i2c_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(idev->i2c_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(idev->i2c_clk),
+ "missing clock\n");
+
+ idev->dev = &pdev->dev;
+ init_completion(&idev->msg_complete);
+
+ ret = device_property_read_u32(idev->dev, "clock-frequency",
+ &idev->bus_clk_rate);
+ if (ret || !idev->bus_clk_rate) {
+ dev_info(&pdev->dev, "default to 100kHz\n");
+ idev->bus_clk_rate = 100000;
+ }
+
+ if (idev->bus_clk_rate > 400000)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "clock-frequency too high: %d\n",
+ idev->bus_clk_rate);
+
+ /*
+ * This driver supports both the hard peripherals & soft FPGA cores.
+ * The hard peripherals do not have shared IRQs, but we don't have
+ * control over what way the interrupts are wired for the soft cores.
+ */
+ ret = devm_request_irq(&pdev->dev, irq, mchp_corei2c_isr, IRQF_SHARED,
+ pdev->name, idev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to claim irq %d\n", irq);
+
+ ret = clk_prepare_enable(idev->i2c_clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable clock\n");
+
+ ret = mchp_corei2c_init(idev);
+ if (ret) {
+ clk_disable_unprepare(idev->i2c_clk);
+ return dev_err_probe(&pdev->dev, ret, "failed to program clock divider\n");
+ }
+
+ i2c_set_adapdata(&idev->adapter, idev);
+ snprintf(idev->adapter.name, sizeof(idev->adapter.name),
+ "Microchip I2C hw bus at %08lx", (unsigned long)res->start);
+ idev->adapter.owner = THIS_MODULE;
+ idev->adapter.algo = &mchp_corei2c_algo;
+ idev->adapter.dev.parent = &pdev->dev;
+ idev->adapter.dev.of_node = pdev->dev.of_node;
+ idev->adapter.timeout = HZ;
+
+ platform_set_drvdata(pdev, idev);
+
+ ret = i2c_add_adapter(&idev->adapter);
+ if (ret) {
+ clk_disable_unprepare(idev->i2c_clk);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "registered CoreI2C bus driver\n");
+
+ return 0;
+}
+
+static int mchp_corei2c_remove(struct platform_device *pdev)
+{
+ struct mchp_corei2c_dev *idev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(idev->i2c_clk);
+ i2c_del_adapter(&idev->adapter);
+
+ return 0;
+}
+
+static const struct of_device_id mchp_corei2c_of_match[] = {
+ { .compatible = "microchip,mpfs-i2c" },
+ { .compatible = "microchip,corei2c-rtl-v7" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mchp_corei2c_of_match);
+
+static struct platform_driver mchp_corei2c_driver = {
+ .probe = mchp_corei2c_probe,
+ .remove = mchp_corei2c_remove,
+ .driver = {
+ .name = "microchip-corei2c",
+ .of_match_table = mchp_corei2c_of_match,
+ },
+};
+
+module_platform_driver(mchp_corei2c_driver);
+
+MODULE_DESCRIPTION("Microchip CoreI2C bus driver");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 56aa424fd71d..72fcfb17dd67 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -49,7 +49,7 @@
#define MLXCPLD_LPCI2C_NACK_IND 2
#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c
+#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0e
#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
enum mlxcpld_i2c_frequency {
@@ -560,6 +560,10 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
if (err)
goto mlxcpld_i2_probe_failed;
+ /* Notify caller when adapter is added. */
+ if (pdata && pdata->completion_notify)
+ pdata->completion_notify(pdata->handle, mlxcpld_i2c_adapter.nr);
+
return 0;
mlxcpld_i2_probe_failed:
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 6c698c10d3cd..81ac92bb4f6f 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -239,6 +239,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
u32 *real_clk)
{
+ struct fwnode_handle *fwnode = of_fwnode_handle(node);
const struct mpc_i2c_divider *div = NULL;
unsigned int pvr = mfspr(SPRN_PVR);
u32 divider;
@@ -246,12 +247,12 @@ static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
if (clock == MPC_I2C_CLOCK_LEGACY) {
/* see below - default fdr = 0x3f -> div = 2048 */
- *real_clk = mpc5xxx_get_bus_frequency(node) / 2048;
+ *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / 2048;
return -EINVAL;
}
/* Determine divider value */
- divider = mpc5xxx_get_bus_frequency(node) / clock;
+ divider = mpc5xxx_fwnode_get_bus_frequency(fwnode) / clock;
/*
* We want to choose an FDR/DFSR that generates an I2C bus speed that
@@ -266,7 +267,7 @@ static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
break;
}
- *real_clk = mpc5xxx_get_bus_frequency(node) / div->divider;
+ *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / div->divider;
return (int)div->fdr;
}
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 8e6985354fd5..fc7bfd98156b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -229,6 +229,35 @@ static const u16 mt_i2c_regs_v2[] = {
[OFFSET_DCM_EN] = 0xf88,
};
+static const u16 mt_i2c_regs_v3[] = {
+ [OFFSET_DATA_PORT] = 0x0,
+ [OFFSET_INTR_MASK] = 0x8,
+ [OFFSET_INTR_STAT] = 0xc,
+ [OFFSET_CONTROL] = 0x10,
+ [OFFSET_TRANSFER_LEN] = 0x14,
+ [OFFSET_TRANSAC_LEN] = 0x18,
+ [OFFSET_DELAY_LEN] = 0x1c,
+ [OFFSET_TIMING] = 0x20,
+ [OFFSET_START] = 0x24,
+ [OFFSET_EXT_CONF] = 0x28,
+ [OFFSET_LTIMING] = 0x2c,
+ [OFFSET_HS] = 0x30,
+ [OFFSET_IO_CONFIG] = 0x34,
+ [OFFSET_FIFO_ADDR_CLR] = 0x38,
+ [OFFSET_SDA_TIMING] = 0x3c,
+ [OFFSET_TRANSFER_LEN_AUX] = 0x44,
+ [OFFSET_CLOCK_DIV] = 0x48,
+ [OFFSET_SOFTRESET] = 0x50,
+ [OFFSET_MULTI_DMA] = 0x8c,
+ [OFFSET_SCL_MIS_COMP_POINT] = 0x90,
+ [OFFSET_SLAVE_ADDR] = 0x94,
+ [OFFSET_DEBUGSTAT] = 0xe4,
+ [OFFSET_DEBUGCTRL] = 0xe8,
+ [OFFSET_FIFO_STAT] = 0xf4,
+ [OFFSET_FIFO_THRESH] = 0xf8,
+ [OFFSET_DCM_EN] = 0xf88,
+};
+
struct mtk_i2c_compatible {
const struct i2c_adapter_quirks *quirks;
const u16 *regs;
@@ -442,6 +471,19 @@ static const struct mtk_i2c_compatible mt8186_compat = {
.max_dma_support = 36,
};
+static const struct mtk_i2c_compatible mt8188_compat = {
+ .regs = mt_i2c_regs_v3,
+ .pmic_i2c = 0,
+ .dcm = 0,
+ .auto_restart = 1,
+ .aux_len_reg = 1,
+ .timing_adjust = 1,
+ .dma_sync = 0,
+ .ltiming_adjust = 1,
+ .apdma_sync = 1,
+ .max_dma_support = 36,
+};
+
static const struct mtk_i2c_compatible mt8192_compat = {
.quirks = &mt8183_i2c_quirks,
.regs = mt_i2c_regs_v2,
@@ -465,6 +507,7 @@ static const struct of_device_id mtk_i2c_of_match[] = {
{ .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
{ .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
{ .compatible = "mediatek,mt8186-i2c", .data = &mt8186_compat },
+ { .compatible = "mediatek,mt8188-i2c", .data = &mt8188_compat },
{ .compatible = "mediatek,mt8192-i2c", .data = &mt8192_compat },
{}
};
@@ -1389,7 +1432,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
speed_clk = I2C_MT65XX_CLK_MAIN;
}
- strlcpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
ret = mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
if (ret) {
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index cfe6de8175dd..20eda5738ac4 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -312,7 +312,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
i2c_set_adapdata(adap, i2c);
adap->dev.of_node = pdev->dev.of_node;
- strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5c8e94b6cdb5..047dfef7a657 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -150,6 +150,7 @@ struct mv64xxx_i2c_data {
/* Clk div is 2 to the power n, not 2 to the power n + 1 */
bool clk_n_base_0;
struct i2c_bus_recovery_info rinfo;
+ bool atomic;
};
static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
@@ -179,7 +180,10 @@ mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
u32 dir = 0;
drv_data->cntl_bits = MV64XXX_I2C_REG_CONTROL_ACK |
- MV64XXX_I2C_REG_CONTROL_INTEN | MV64XXX_I2C_REG_CONTROL_TWSIEN;
+ MV64XXX_I2C_REG_CONTROL_TWSIEN;
+
+ if (!drv_data->atomic)
+ drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_INTEN;
if (msg->flags & I2C_M_RD)
dir = 1;
@@ -409,7 +413,8 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
case MV64XXX_I2C_ACTION_RCV_DATA_STOP:
drv_data->msg->buf[drv_data->byte_posn++] =
readl(drv_data->reg_base + drv_data->reg_offsets.data);
- drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
+ if (!drv_data->atomic)
+ drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
drv_data->reg_base + drv_data->reg_offsets.control);
drv_data->block = 0;
@@ -427,7 +432,8 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->rc = -EIO;
fallthrough;
case MV64XXX_I2C_ACTION_SEND_STOP:
- drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
+ if (!drv_data->atomic)
+ drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
drv_data->reg_base + drv_data->reg_offsets.control);
drv_data->block = 0;
@@ -575,6 +581,17 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data)
spin_unlock_irqrestore(&drv_data->lock, flags);
}
+static void mv64xxx_i2c_wait_polling(struct mv64xxx_i2c_data *drv_data)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), drv_data->adapter.timeout);
+
+ while (READ_ONCE(drv_data->block) &&
+ ktime_compare(ktime_get(), timeout) < 0) {
+ udelay(5);
+ mv64xxx_i2c_intr(0, drv_data);
+ }
+}
+
static int
mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
int is_last)
@@ -590,7 +607,11 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
mv64xxx_i2c_send_start(drv_data);
spin_unlock_irqrestore(&drv_data->lock, flags);
- mv64xxx_i2c_wait_for_completion(drv_data);
+ if (!drv_data->atomic)
+ mv64xxx_i2c_wait_for_completion(drv_data);
+ else
+ mv64xxx_i2c_wait_polling(drv_data);
+
return drv_data->rc;
}
@@ -717,7 +738,7 @@ mv64xxx_i2c_functionality(struct i2c_adapter *adap)
}
static int
-mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+mv64xxx_i2c_xfer_core(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
int rc, ret = num;
@@ -730,7 +751,7 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
drv_data->msgs = msgs;
drv_data->num_msgs = num;
- if (mv64xxx_i2c_can_offload(drv_data))
+ if (mv64xxx_i2c_can_offload(drv_data) && !drv_data->atomic)
rc = mv64xxx_i2c_offload_xfer(drv_data);
else
rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1);
@@ -747,8 +768,27 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
return ret;
}
+static int
+mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
+
+ drv_data->atomic = 0;
+ return mv64xxx_i2c_xfer_core(adap, msgs, num);
+}
+
+static int mv64xxx_i2c_xfer_atomic(struct i2c_adapter *adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
+
+ drv_data->atomic = 1;
+ return mv64xxx_i2c_xfer_core(adap, msgs, num);
+}
+
static const struct i2c_algorithm mv64xxx_i2c_algo = {
.master_xfer = mv64xxx_i2c_xfer,
+ .master_xfer_atomic = mv64xxx_i2c_xfer_atomic,
.functionality = mv64xxx_i2c_functionality,
};
@@ -949,7 +989,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
if (IS_ERR(drv_data->reg_base))
return PTR_ERR(drv_data->reg_base);
- strlcpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
+ strscpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
sizeof(drv_data->adapter.name));
init_waitqueue_head(&drv_data->waitq);
@@ -1047,14 +1087,6 @@ mv64xxx_i2c_remove(struct platform_device *pd)
return 0;
}
-static void
-mv64xxx_i2c_shutdown(struct platform_device *pd)
-{
- pm_runtime_disable(&pd->dev);
- if (!pm_runtime_status_suspended(&pd->dev))
- mv64xxx_i2c_runtime_suspend(&pd->dev);
-}
-
static const struct dev_pm_ops mv64xxx_i2c_pm_ops = {
SET_RUNTIME_PM_OPS(mv64xxx_i2c_runtime_suspend,
mv64xxx_i2c_runtime_resume, NULL)
@@ -1065,7 +1097,6 @@ static const struct dev_pm_ops mv64xxx_i2c_pm_ops = {
static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
.remove = mv64xxx_i2c_remove,
- .shutdown = mv64xxx_i2c_shutdown,
.driver = {
.name = MV64XXX_I2C_CTLR_NAME,
.pm = &mv64xxx_i2c_pm_ops,
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 864a3f1bd4e1..5af5cffc444e 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -799,7 +799,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
if (!i2c)
return -ENOMEM;
- i2c->dev_type = (enum mxs_i2c_devtype)of_device_get_match_data(&pdev->dev);
+ i2c->dev_type = (uintptr_t)of_device_get_match_data(&pdev->dev);
i2c->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(i2c->regs))
@@ -838,7 +838,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
return err;
adap = &i2c->adapter;
- strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &mxs_i2c_algo;
adap->quirks = &mxs_i2c_quirks;
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index aede9d551130..0c365b57d957 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -17,6 +17,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -91,7 +92,6 @@ enum i2c_addr {
/* init register and default value required to enable module */
#define NPCM_I2CSEGCTL 0xE4
-#define NPCM_I2CSEGCTL_INIT_VAL 0x0333F000
/* Common regs */
#define NPCM_I2CSDA 0x00
@@ -123,11 +123,11 @@ enum i2c_addr {
* Since the addr regs are sprinkled all over the address space,
* use this array to get the address or each register.
*/
-#define I2C_NUM_OWN_ADDR 10
+#define I2C_NUM_OWN_ADDR 2
+#define I2C_NUM_OWN_ADDR_SUPPORTED 2
+
static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
- NPCM_I2CADDR1, NPCM_I2CADDR2, NPCM_I2CADDR3, NPCM_I2CADDR4,
- NPCM_I2CADDR5, NPCM_I2CADDR6, NPCM_I2CADDR7, NPCM_I2CADDR8,
- NPCM_I2CADDR9, NPCM_I2CADDR10,
+ NPCM_I2CADDR1, NPCM_I2CADDR2,
};
#endif
@@ -226,8 +226,7 @@ static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
#define NPCM_I2CFIF_CTS_CLR_FIFO BIT(6)
#define NPCM_I2CFIF_CTS_SLVRSTR BIT(7)
-/* NPCM_I2CTXF_CTL reg fields */
-#define NPCM_I2CTXF_CTL_TX_THR GENMASK(4, 0)
+/* NPCM_I2CTXF_CTL reg field */
#define NPCM_I2CTXF_CTL_THR_TXIE BIT(6)
/* NPCM_I2CT_OUT reg fields */
@@ -236,22 +235,18 @@ static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
#define NPCM_I2CT_OUT_T_OUTST BIT(7)
/* NPCM_I2CTXF_STS reg fields */
-#define NPCM_I2CTXF_STS_TX_BYTES GENMASK(4, 0)
#define NPCM_I2CTXF_STS_TX_THST BIT(6)
/* NPCM_I2CRXF_STS reg fields */
-#define NPCM_I2CRXF_STS_RX_BYTES GENMASK(4, 0)
#define NPCM_I2CRXF_STS_RX_THST BIT(6)
/* NPCM_I2CFIF_CTL reg fields */
#define NPCM_I2CFIF_CTL_FIFO_EN BIT(4)
/* NPCM_I2CRXF_CTL reg fields */
-#define NPCM_I2CRXF_CTL_RX_THR GENMASK(4, 0)
-#define NPCM_I2CRXF_CTL_LAST_PEC BIT(5)
#define NPCM_I2CRXF_CTL_THR_RXIE BIT(6)
-#define I2C_HW_FIFO_SIZE 16
+#define MAX_I2C_HW_FIFO_SIZE 32
/* I2C_VER reg fields */
#define I2C_VER_VERSION GENMASK(6, 0)
@@ -268,11 +263,36 @@ static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
#define I2C_FREQ_MIN_HZ 10000
#define I2C_FREQ_MAX_HZ I2C_MAX_FAST_MODE_PLUS_FREQ
+struct npcm_i2c_data {
+ u8 fifo_size;
+ u32 segctl_init_val;
+ u8 txf_sts_tx_bytes;
+ u8 rxf_sts_rx_bytes;
+ u8 rxf_ctl_last_pec;
+};
+
+static const struct npcm_i2c_data npxm7xx_i2c_data = {
+ .fifo_size = 16,
+ .segctl_init_val = 0x0333F000,
+ .txf_sts_tx_bytes = GENMASK(4, 0),
+ .rxf_sts_rx_bytes = GENMASK(4, 0),
+ .rxf_ctl_last_pec = BIT(5),
+};
+
+static const struct npcm_i2c_data npxm8xx_i2c_data = {
+ .fifo_size = 32,
+ .segctl_init_val = 0x9333F000,
+ .txf_sts_tx_bytes = GENMASK(5, 0),
+ .rxf_sts_rx_bytes = GENMASK(5, 0),
+ .rxf_ctl_last_pec = BIT(7),
+};
+
/* Status of one I2C module */
struct npcm_i2c {
struct i2c_adapter adap;
struct device *dev;
unsigned char __iomem *reg;
+ const struct npcm_i2c_data *data;
spinlock_t lock; /* IRQ synchronization */
struct completion cmd_complete;
int cmd_err;
@@ -305,8 +325,8 @@ struct npcm_i2c {
int slv_rd_ind;
int slv_wr_size;
int slv_wr_ind;
- u8 slv_rd_buf[I2C_HW_FIFO_SIZE];
- u8 slv_wr_buf[I2C_HW_FIFO_SIZE];
+ u8 slv_rd_buf[MAX_I2C_HW_FIFO_SIZE];
+ u8 slv_wr_buf[MAX_I2C_HW_FIFO_SIZE];
#endif
struct dentry *debugfs; /* debugfs device directory */
u64 ber_cnt;
@@ -392,14 +412,10 @@ static void npcm_i2c_disable(struct npcm_i2c *bus)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
int i;
- /* select bank 0 for I2C addresses */
- npcm_i2c_select_bank(bus, I2C_BANK_0);
-
/* Slave addresses removal */
- for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++)
+ for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR_SUPPORTED; i++)
iowrite8(0, bus->reg + npcm_i2caddr[i]);
- npcm_i2c_select_bank(bus, I2C_BANK_1);
#endif
/* Disable module */
i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2);
@@ -443,7 +459,7 @@ static inline bool npcm_i2c_tx_fifo_empty(struct npcm_i2c *bus)
tx_fifo_sts = ioread8(bus->reg + NPCM_I2CTXF_STS);
/* check if TX FIFO is not empty */
- if ((tx_fifo_sts & NPCM_I2CTXF_STS_TX_BYTES) == 0)
+ if ((tx_fifo_sts & bus->data->txf_sts_tx_bytes) == 0)
return false;
/* check if TX FIFO status bit is set: */
@@ -456,7 +472,7 @@ static inline bool npcm_i2c_rx_fifo_full(struct npcm_i2c *bus)
rx_fifo_sts = ioread8(bus->reg + NPCM_I2CRXF_STS);
/* check if RX FIFO is not empty: */
- if ((rx_fifo_sts & NPCM_I2CRXF_STS_RX_BYTES) == 0)
+ if ((rx_fifo_sts & bus->data->rxf_sts_rx_bytes) == 0)
return false;
/* check if rx fifo full status is set: */
@@ -604,8 +620,7 @@ static int npcm_i2c_slave_enable(struct npcm_i2c *bus, enum i2c_addr addr_type,
i2cctl1 &= ~NPCM_I2CCTL1_GCMEN;
iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1);
return 0;
- }
- if (addr_type == I2C_ARP_ADDR) {
+ } else if (addr_type == I2C_ARP_ADDR) {
i2cctl3 = ioread8(bus->reg + NPCM_I2CCTL3);
if (enable)
i2cctl3 |= I2CCTL3_ARPMEN;
@@ -614,16 +629,16 @@ static int npcm_i2c_slave_enable(struct npcm_i2c *bus, enum i2c_addr addr_type,
iowrite8(i2cctl3, bus->reg + NPCM_I2CCTL3);
return 0;
}
+ if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10)
+ dev_err(bus->dev, "try to enable more than 2 SA not supported\n");
+
if (addr_type >= I2C_ARP_ADDR)
return -EFAULT;
- /* select bank 0 for address 3 to 10 */
- if (addr_type > I2C_SLAVE_ADDR2)
- npcm_i2c_select_bank(bus, I2C_BANK_0);
+
/* Set and enable the address */
iowrite8(sa_reg, bus->reg + npcm_i2caddr[addr_type]);
npcm_i2c_slave_int_enable(bus, enable);
- if (addr_type > I2C_SLAVE_ADDR2)
- npcm_i2c_select_bank(bus, I2C_BANK_1);
+
return 0;
}
#endif
@@ -665,7 +680,7 @@ static void npcm_i2c_reset(struct npcm_i2c *bus)
}
#endif
- /* clear status bits for spurious interrupts */
+ /* Clear status bits for spurious interrupts */
npcm_i2c_clear_master_status(bus);
bus->state = I2C_IDLE;
@@ -744,11 +759,11 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus)
{
if (bus->operation == I2C_WRITE_OPER)
- return FIELD_GET(NPCM_I2CTXF_STS_TX_BYTES,
- ioread8(bus->reg + NPCM_I2CTXF_STS));
+ return (bus->data->txf_sts_tx_bytes &
+ ioread8(bus->reg + NPCM_I2CTXF_STS));
if (bus->operation == I2C_READ_OPER)
- return FIELD_GET(NPCM_I2CRXF_STS_RX_BYTES,
- ioread8(bus->reg + NPCM_I2CRXF_STS));
+ return (bus->data->rxf_sts_rx_bytes &
+ ioread8(bus->reg + NPCM_I2CRXF_STS));
return 0;
}
@@ -760,13 +775,13 @@ static void npcm_i2c_write_to_fifo_master(struct npcm_i2c *bus, u16 max_bytes)
* Fill the FIFO, while the FIFO is not full and there are more bytes
* to write
*/
- size_free_fifo = I2C_HW_FIFO_SIZE - npcm_i2c_fifo_usage(bus);
+ size_free_fifo = bus->data->fifo_size - npcm_i2c_fifo_usage(bus);
while (max_bytes-- && size_free_fifo) {
if (bus->wr_ind < bus->wr_size)
npcm_i2c_wr_byte(bus, bus->wr_buf[bus->wr_ind++]);
else
npcm_i2c_wr_byte(bus, 0xFF);
- size_free_fifo = I2C_HW_FIFO_SIZE - npcm_i2c_fifo_usage(bus);
+ size_free_fifo = bus->data->fifo_size - npcm_i2c_fifo_usage(bus);
}
}
@@ -787,11 +802,11 @@ static void npcm_i2c_set_fifo(struct npcm_i2c *bus, int nread, int nwrite)
/* configure RX FIFO */
if (nread > 0) {
- rxf_ctl = min_t(int, nread, I2C_HW_FIFO_SIZE);
+ rxf_ctl = min_t(int, nread, bus->data->fifo_size);
/* set LAST bit. if LAST is set next FIFO packet is nacked */
- if (nread <= I2C_HW_FIFO_SIZE)
- rxf_ctl |= NPCM_I2CRXF_CTL_LAST_PEC;
+ if (nread <= bus->data->fifo_size)
+ rxf_ctl |= bus->data->rxf_ctl_last_pec;
/*
* if we are about to read the first byte in blk rd mode,
@@ -809,9 +824,9 @@ static void npcm_i2c_set_fifo(struct npcm_i2c *bus, int nread, int nwrite)
/* configure TX FIFO */
if (nwrite > 0) {
- if (nwrite > I2C_HW_FIFO_SIZE)
+ if (nwrite > bus->data->fifo_size)
/* data to send is more then FIFO size. */
- iowrite8(I2C_HW_FIFO_SIZE, bus->reg + NPCM_I2CTXF_CTL);
+ iowrite8(bus->data->fifo_size, bus->reg + NPCM_I2CTXF_CTL);
else
iowrite8(nwrite, bus->reg + NPCM_I2CTXF_CTL);
@@ -846,15 +861,11 @@ static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type)
{
u8 slave_add;
- /* select bank 0 for address 3 to 10 */
- if (addr_type > I2C_SLAVE_ADDR2)
- npcm_i2c_select_bank(bus, I2C_BANK_0);
+ if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10)
+ dev_err(bus->dev, "get slave: try to use more than 2 SA not supported\n");
slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]);
- if (addr_type > I2C_SLAVE_ADDR2)
- npcm_i2c_select_bank(bus, I2C_BANK_1);
-
return slave_add;
}
@@ -864,12 +875,12 @@ static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add)
/* Set the enable bit */
slave_add |= 0x80;
- npcm_i2c_select_bank(bus, I2C_BANK_0);
- for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++) {
+
+ for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR_SUPPORTED; i++) {
if (ioread8(bus->reg + npcm_i2caddr[i]) == slave_add)
iowrite8(0, bus->reg + npcm_i2caddr[i]);
}
- npcm_i2c_select_bank(bus, I2C_BANK_1);
+
return 0;
}
@@ -882,13 +893,13 @@ static void npcm_i2c_write_fifo_slave(struct npcm_i2c *bus, u16 max_bytes)
npcm_i2c_clear_fifo_int(bus);
npcm_i2c_clear_tx_fifo(bus);
iowrite8(0, bus->reg + NPCM_I2CTXF_CTL);
- while (max_bytes-- && I2C_HW_FIFO_SIZE != npcm_i2c_fifo_usage(bus)) {
+ while (max_bytes-- && bus->data->fifo_size != npcm_i2c_fifo_usage(bus)) {
if (bus->slv_wr_size <= 0)
break;
- bus->slv_wr_ind = bus->slv_wr_ind % I2C_HW_FIFO_SIZE;
+ bus->slv_wr_ind = bus->slv_wr_ind & (bus->data->fifo_size - 1);
npcm_i2c_wr_byte(bus, bus->slv_wr_buf[bus->slv_wr_ind]);
bus->slv_wr_ind++;
- bus->slv_wr_ind = bus->slv_wr_ind % I2C_HW_FIFO_SIZE;
+ bus->slv_wr_ind = bus->slv_wr_ind & (bus->data->fifo_size - 1);
bus->slv_wr_size--;
}
}
@@ -903,7 +914,7 @@ static void npcm_i2c_read_fifo_slave(struct npcm_i2c *bus, u8 bytes_in_fifo)
while (bytes_in_fifo--) {
data = npcm_i2c_rd_byte(bus);
- bus->slv_rd_ind = bus->slv_rd_ind % I2C_HW_FIFO_SIZE;
+ bus->slv_rd_ind = bus->slv_rd_ind & (bus->data->fifo_size - 1);
bus->slv_rd_buf[bus->slv_rd_ind] = data;
bus->slv_rd_ind++;
@@ -921,16 +932,20 @@ static int npcm_i2c_slave_get_wr_buf(struct npcm_i2c *bus)
int ret = bus->slv_wr_ind;
/* fill a cyclic buffer */
- for (i = 0; i < I2C_HW_FIFO_SIZE; i++) {
- if (bus->slv_wr_size >= I2C_HW_FIFO_SIZE)
+ for (i = 0; i < bus->data->fifo_size; i++) {
+ if (bus->slv_wr_size >= bus->data->fifo_size)
break;
- i2c_slave_event(bus->slave, I2C_SLAVE_READ_REQUESTED, &value);
- ind = (bus->slv_wr_ind + bus->slv_wr_size) % I2C_HW_FIFO_SIZE;
+ if (bus->state == I2C_SLAVE_MATCH) {
+ i2c_slave_event(bus->slave, I2C_SLAVE_READ_REQUESTED, &value);
+ bus->state = I2C_OPER_STARTED;
+ } else {
+ i2c_slave_event(bus->slave, I2C_SLAVE_READ_PROCESSED, &value);
+ }
+ ind = (bus->slv_wr_ind + bus->slv_wr_size) & (bus->data->fifo_size - 1);
bus->slv_wr_buf[ind] = value;
bus->slv_wr_size++;
- i2c_slave_event(bus->slave, I2C_SLAVE_READ_PROCESSED, &value);
}
- return I2C_HW_FIFO_SIZE - ret;
+ return bus->data->fifo_size - ret;
}
static void npcm_i2c_slave_send_rd_buf(struct npcm_i2c *bus)
@@ -965,7 +980,7 @@ static void npcm_i2c_slave_receive(struct npcm_i2c *bus, u16 nread,
bus->slv_rd_ind = 0;
iowrite8(0, bus->reg + NPCM_I2CTXF_CTL);
- iowrite8(I2C_HW_FIFO_SIZE, bus->reg + NPCM_I2CRXF_CTL);
+ iowrite8(bus->data->fifo_size, bus->reg + NPCM_I2CRXF_CTL);
npcm_i2c_clear_tx_fifo(bus);
npcm_i2c_clear_rx_fifo(bus);
}
@@ -976,7 +991,6 @@ static void npcm_i2c_slave_xmit(struct npcm_i2c *bus, u16 nwrite,
if (nwrite == 0)
return;
- bus->state = I2C_OPER_STARTED;
bus->operation = I2C_WRITE_OPER;
/* get the next buffer */
@@ -999,12 +1013,12 @@ static void npcm_i2c_slave_wr_buf_sync(struct npcm_i2c *bus)
{
int left_in_fifo;
- left_in_fifo = FIELD_GET(NPCM_I2CTXF_STS_TX_BYTES,
- ioread8(bus->reg + NPCM_I2CTXF_STS));
+ left_in_fifo = bus->data->txf_sts_tx_bytes &
+ ioread8(bus->reg + NPCM_I2CTXF_STS);
/* fifo already full: */
- if (left_in_fifo >= I2C_HW_FIFO_SIZE ||
- bus->slv_wr_size >= I2C_HW_FIFO_SIZE)
+ if (left_in_fifo >= bus->data->fifo_size ||
+ bus->slv_wr_size >= bus->data->fifo_size)
return;
/* update the wr fifo index back to the untransmitted bytes: */
@@ -1012,7 +1026,7 @@ static void npcm_i2c_slave_wr_buf_sync(struct npcm_i2c *bus)
bus->slv_wr_size = bus->slv_wr_size + left_in_fifo;
if (bus->slv_wr_ind < 0)
- bus->slv_wr_ind += I2C_HW_FIFO_SIZE;
+ bus->slv_wr_ind += bus->data->fifo_size;
}
static void npcm_i2c_slave_rd_wr(struct npcm_i2c *bus)
@@ -1158,7 +1172,7 @@ static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus)
npcm_i2c_clear_rx_fifo(bus);
npcm_i2c_clear_tx_fifo(bus);
iowrite8(0, bus->reg + NPCM_I2CTXF_CTL);
- iowrite8(I2C_HW_FIFO_SIZE, bus->reg + NPCM_I2CRXF_CTL);
+ iowrite8(bus->data->fifo_size, bus->reg + NPCM_I2CRXF_CTL);
if (NPCM_I2CST_XMIT & i2cst) {
bus->operation = I2C_WRITE_OPER;
} else {
@@ -1238,7 +1252,7 @@ static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus)
} /* SDAST */
/*
- * if irq is not one of the above, make sure EOB is disabled and all
+ * If irq is not one of the above, make sure EOB is disabled and all
* status bits are cleared.
*/
if (ret == IRQ_NONE) {
@@ -1319,8 +1333,8 @@ static void npcm_i2c_master_fifo_read(struct npcm_i2c *bus)
* read == FIFO Size + C (where C < FIFO Size)then first read C bytes
* and in the next int we read rest of the data.
*/
- if (rcount < (2 * I2C_HW_FIFO_SIZE) && rcount > I2C_HW_FIFO_SIZE)
- fifo_bytes = rcount - I2C_HW_FIFO_SIZE;
+ if (rcount < (2 * bus->data->fifo_size) && rcount > bus->data->fifo_size)
+ fifo_bytes = rcount - bus->data->fifo_size;
if (rcount <= fifo_bytes) {
/* last bytes are about to be read - end of tx */
@@ -1492,7 +1506,7 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus)
npcm_i2c_clear_master_status(bus);
readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val,
!(val & NPCM_I2CCST_BUSY), 10, 200);
- /* verify no status bits are still set after bus is released */
+ /* Verify no status bits are still set after bus is released */
npcm_i2c_clear_master_status(bus);
}
bus->state = I2C_IDLE;
@@ -1960,7 +1974,7 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
npcm_i2c_reset(bus);
- /* check HW is OK: SDA and SCL should be high at this point. */
+ /* Check HW is OK: SDA and SCL should be high at this point. */
if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
@@ -2020,7 +2034,7 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
#endif
- /* clear status bits for spurious interrupts */
+ /* Clear status bits for spurious interrupts */
npcm_i2c_clear_master_status(bus);
return IRQ_HANDLED;
@@ -2199,10 +2213,10 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
* It cannot be cleared without resetting the module.
*/
else if (bus->cmd_err &&
- (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
+ (bus->data->rxf_ctl_last_pec & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
npcm_i2c_reset(bus);
- /* after any xfer, successful or not, stall and EOB must be disabled */
+ /* After any xfer, successful or not, stall and EOB must be disabled */
npcm_i2c_stall_after_start(bus, false);
npcm_i2c_eob_int(bus, false);
@@ -2268,6 +2282,7 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
static struct regmap *gcr_regmap;
+ struct device *dev = &pdev->dev;
struct i2c_adapter *adap;
struct npcm_i2c *bus;
struct clk *i2c_clk;
@@ -2280,6 +2295,12 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
bus->dev = &pdev->dev;
+ bus->data = of_device_get_match_data(dev);
+ if (!bus->data) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+
bus->num = of_alias_get_id(pdev->dev.of_node, "i2c");
/* core clk must be acquired to calculate module timing settings */
i2c_clk = devm_clk_get(&pdev->dev, NULL);
@@ -2293,7 +2314,7 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
if (IS_ERR(gcr_regmap))
return PTR_ERR(gcr_regmap);
- regmap_write(gcr_regmap, NPCM_I2CSEGCTL, NPCM_I2CSEGCTL_INIT_VAL);
+ regmap_write(gcr_regmap, NPCM_I2CSEGCTL, bus->data->segctl_init_val);
bus->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bus->reg))
@@ -2355,7 +2376,8 @@ static int npcm_i2c_remove_bus(struct platform_device *pdev)
}
static const struct of_device_id npcm_i2c_bus_of_table[] = {
- { .compatible = "nuvoton,npcm750-i2c", },
+ { .compatible = "nuvoton,npcm750-i2c", .data = &npxm7xx_i2c_data },
+ { .compatible = "nuvoton,npcm845-i2c", .data = &npxm8xx_i2c_data },
{}
};
MODULE_DEVICE_TABLE(of, npcm_i2c_bus_of_table);
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 6920c1b9a126..12e330cd7635 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -299,7 +299,7 @@ static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
i2c_set_adapdata(&i2cd->adapter, i2cd);
i2cd->adapter.owner = THIS_MODULE;
- strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+ strscpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
sizeof(i2cd->adapter.name));
i2cd->adapter.algo = &gpu_i2c_algorithm;
i2cd->adapter.quirks = &gpu_i2c_quirks;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d4f6c6d60683..f9ae520aed22 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1488,7 +1488,7 @@ omap_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, omap);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->quirks = &omap_i2c_quirks;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index 6eb0f50c5d28..9f773b4f5ed8 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -220,9 +220,9 @@ static int i2c_opal_probe(struct platform_device *pdev)
adapter->dev.of_node = of_node_get(pdev->dev.of_node);
pname = of_get_property(pdev->dev.of_node, "ibm,port-name", NULL);
if (pname)
- strlcpy(adapter->name, pname, sizeof(adapter->name));
+ strscpy(adapter->name, pname, sizeof(adapter->name));
else
- strlcpy(adapter->name, "opal", sizeof(adapter->name));
+ strscpy(adapter->name, "opal", sizeof(adapter->name));
platform_set_drvdata(pdev, adapter);
rc = i2c_add_adapter(adapter);
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 231145c48728..0af86a542568 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -308,7 +308,7 @@ static void i2c_parport_attach(struct parport *port)
/* Fill the rest of the structure */
adapter->adapter.owner = THIS_MODULE;
adapter->adapter.class = I2C_CLASS_HWMON;
- strlcpy(adapter->adapter.name, "Parallel port adapter",
+ strscpy(adapter->adapter.name, "Parallel port adapter",
sizeof(adapter->adapter.name));
adapter->algo_data = parport_algo_data;
/* Slow down if we can't sense SCL */
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 690188a9ffff..b605b6e43cb9 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1403,7 +1403,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- strlcpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
i2c->clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(i2c->clk)) {
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index 5c7cc862f08f..ea48e6a9cfca 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -541,6 +541,7 @@ static int cci_probe(struct platform_device *pdev)
return -ENOENT;
for_each_available_child_of_node(dev->of_node, child) {
+ struct cci_master *master;
u32 idx;
ret = of_property_read_u32(child, "reg", &idx);
@@ -555,27 +556,27 @@ static int cci_probe(struct platform_device *pdev)
continue;
}
- cci->master[idx].adap.quirks = &cci->data->quirks;
- cci->master[idx].adap.algo = &cci_algo;
- cci->master[idx].adap.dev.parent = dev;
- cci->master[idx].adap.dev.of_node = of_node_get(child);
- cci->master[idx].master = idx;
- cci->master[idx].cci = cci;
+ master = &cci->master[idx];
+ master->adap.quirks = &cci->data->quirks;
+ master->adap.algo = &cci_algo;
+ master->adap.dev.parent = dev;
+ master->adap.dev.of_node = of_node_get(child);
+ master->master = idx;
+ master->cci = cci;
- i2c_set_adapdata(&cci->master[idx].adap, &cci->master[idx]);
- snprintf(cci->master[idx].adap.name,
- sizeof(cci->master[idx].adap.name), "Qualcomm-CCI");
+ i2c_set_adapdata(&master->adap, master);
+ snprintf(master->adap.name, sizeof(master->adap.name), "Qualcomm-CCI");
- cci->master[idx].mode = I2C_MODE_STANDARD;
+ master->mode = I2C_MODE_STANDARD;
ret = of_property_read_u32(child, "clock-frequency", &val);
if (!ret) {
if (val == I2C_MAX_FAST_MODE_FREQ)
- cci->master[idx].mode = I2C_MODE_FAST;
+ master->mode = I2C_MODE_FAST;
else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ)
- cci->master[idx].mode = I2C_MODE_FAST_PLUS;
+ master->mode = I2C_MODE_FAST_PLUS;
}
- init_completion(&cci->master[idx].irq_complete);
+ init_completion(&master->irq_complete);
}
/* Memory */
@@ -725,6 +726,40 @@ static const struct cci_data cci_v1_data = {
},
};
+static const struct cci_data cci_v1_5_data = {
+ .num_masters = 2,
+ .queue_size = { 64, 16 },
+ .quirks = {
+ .max_write_len = 10,
+ .max_read_len = 12,
+ },
+ .cci_clk_rate = 19200000,
+ .params[I2C_MODE_STANDARD] = {
+ .thigh = 78,
+ .tlow = 114,
+ .tsu_sto = 28,
+ .tsu_sta = 28,
+ .thd_dat = 10,
+ .thd_sta = 77,
+ .tbuf = 118,
+ .scl_stretch_en = 0,
+ .trdhld = 6,
+ .tsp = 1
+ },
+ .params[I2C_MODE_FAST] = {
+ .thigh = 20,
+ .tlow = 28,
+ .tsu_sto = 21,
+ .tsu_sta = 21,
+ .thd_dat = 13,
+ .thd_sta = 18,
+ .tbuf = 32,
+ .scl_stretch_en = 0,
+ .trdhld = 6,
+ .tsp = 3
+ },
+};
+
static const struct cci_data cci_v2_data = {
.num_masters = 2,
.queue_size = { 64, 16 },
@@ -773,6 +808,7 @@ static const struct cci_data cci_v2_data = {
static const struct of_device_id cci_dt_match[] = {
{ .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
+ { .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
{ .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
{ .compatible = "qcom,sm8250-cci", .data = &cci_v2_data},
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 6ac402ea58fb..84a77512614d 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -97,6 +97,7 @@ struct geni_i2c_dev {
struct dma_chan *tx_c;
struct dma_chan *rx_c;
bool gpi_mode;
+ bool abort_done;
};
struct geni_i2c_err_log {
@@ -203,9 +204,18 @@ static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
dev_dbg(gi2c->se.dev, "len:%d, slv-addr:0x%x, RD/WR:%d\n",
gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags);
- if (err != NACK && err != GENI_ABORT_DONE) {
+ switch (err) {
+ case GENI_ABORT_DONE:
+ gi2c->abort_done = true;
+ break;
+ case NACK:
+ case GENI_TIMEOUT:
+ dev_dbg(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
+ break;
+ default:
dev_err(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
geni_i2c_err_misc(gi2c);
+ break;
}
}
@@ -311,21 +321,21 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
static void geni_i2c_abort_xfer(struct geni_i2c_dev *gi2c)
{
- u32 val;
unsigned long time_left = ABORT_TIMEOUT;
unsigned long flags;
spin_lock_irqsave(&gi2c->lock, flags);
geni_i2c_err(gi2c, GENI_TIMEOUT);
gi2c->cur = NULL;
+ gi2c->abort_done = false;
geni_se_abort_m_cmd(&gi2c->se);
spin_unlock_irqrestore(&gi2c->lock, flags);
+
do {
time_left = wait_for_completion_timeout(&gi2c->done, time_left);
- val = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
- } while (!(val & M_CMD_ABORT_EN) && time_left);
+ } while (!gi2c->abort_done && time_left);
- if (!(val & M_CMD_ABORT_EN))
+ if (!time_left)
dev_err(gi2c->se.dev, "Timeout abort_m_cmd\n");
}
@@ -484,12 +494,12 @@ static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
{
if (tx_buf) {
dma_unmap_single(gi2c->se.dev->parent, tx_addr, msg->len, DMA_TO_DEVICE);
- i2c_put_dma_safe_msg_buf(tx_buf, msg, false);
+ i2c_put_dma_safe_msg_buf(tx_buf, msg, !gi2c->err);
}
if (rx_buf) {
dma_unmap_single(gi2c->se.dev->parent, rx_addr, msg->len, DMA_FROM_DEVICE);
- i2c_put_dma_safe_msg_buf(rx_buf, msg, false);
+ i2c_put_dma_safe_msg_buf(rx_buf, msg, !gi2c->err);
}
}
@@ -553,6 +563,7 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
desc->callback_param = gi2c;
dmaengine_submit(desc);
+ *buf = dma_buf;
*dma_addr_p = addr;
return 0;
@@ -688,7 +699,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
pm_runtime_put_autosuspend(gi2c->se.dev);
gi2c->cur = NULL;
gi2c->err = 0;
- return num;
+ return ret;
}
static u32 geni_i2c_func(struct i2c_adapter *adap)
@@ -806,7 +817,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&gi2c->adap, gi2c);
gi2c->adap.dev.parent = dev;
gi2c->adap.dev.of_node = dev->of_node;
- strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+ strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
ret = geni_icc_get(&gi2c->se, "qup-memory");
if (ret)
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 69e9f3ecf87d..2e153f2f71b6 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1878,7 +1878,7 @@ nodma:
qup->adap.dev.of_node = pdev->dev.of_node;
qup->is_last = true;
- strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
+ strscpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(qup->dev);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 6e7be9d9f504..cef82b205c26 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1076,7 +1076,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
adap->bus_recovery_info = &rcar_i2c_bri;
adap->quirks = &rcar_i2c_quirks;
i2c_set_adapdata(adap, priv);
- strlcpy(adap->name, pdev->name, sizeof(adap->name));
+ strscpy(adap->name, pdev->name, sizeof(adap->name));
/* Init DMA */
sg_init_table(&priv->sg, 1);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index cded77e06670..ecba1dfc1278 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -448,7 +448,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
adap = &riic->adapter;
i2c_set_adapdata(adap, riic);
- strlcpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
+ strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &riic_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 989040a73626..2e98e7793bba 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1240,7 +1240,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
/* use common interface to get I2C timing properties */
i2c_parse_fw_timings(&pdev->dev, &i2c->t, true);
- strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &rk3x_i2c_algorithm;
i2c->adap.retries = 3;
diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c
new file mode 100644
index 000000000000..56d0faee5c46
--- /dev/null
+++ b/drivers/i2c/busses/i2c-rzv2m.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Renesas RZ/V2M I2C unit
+ *
+ * Copyright (C) 2016-2022 Renesas Electronics Corporation
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+/* Register offsets */
+#define IICB0DAT 0x00 /* Data Register */
+#define IICB0CTL0 0x08 /* Control Register 0 */
+#define IICB0TRG 0x0C /* Trigger Register */
+#define IICB0STR0 0x10 /* Status Register 0 */
+#define IICB0CTL1 0x20 /* Control Register 1 */
+#define IICB0WL 0x24 /* Low Level Width Setting Reg */
+#define IICB0WH 0x28 /* How Level Width Setting Reg */
+
+/* IICB0CTL0 */
+#define IICB0IICE BIT(7) /* I2C Enable */
+#define IICB0SLWT BIT(1) /* Interrupt Request Timing */
+#define IICB0SLAC BIT(0) /* Acknowledge */
+
+/* IICB0TRG */
+#define IICB0WRET BIT(2) /* Quit Wait Trigger */
+#define IICB0STT BIT(1) /* Create Start Condition Trigger */
+#define IICB0SPT BIT(0) /* Create Stop Condition Trigger */
+
+/* IICB0STR0 */
+#define IICB0SSAC BIT(8) /* Ack Flag */
+#define IICB0SSBS BIT(6) /* Bus Flag */
+#define IICB0SSSP BIT(4) /* Stop Condition Flag */
+
+/* IICB0CTL1 */
+#define IICB0MDSC BIT(7) /* Bus Mode */
+#define IICB0SLSE BIT(1) /* Start condition output */
+
+#define bit_setl(addr, val) writel(readl(addr) | (val), (addr))
+#define bit_clrl(addr, val) writel(readl(addr) & ~(val), (addr))
+
+struct rzv2m_i2c_priv {
+ void __iomem *base;
+ struct i2c_adapter adap;
+ struct clk *clk;
+ int bus_mode;
+ struct completion msg_tia_done;
+ u32 iicb0wl;
+ u32 iicb0wh;
+};
+
+enum bcr_index {
+ RZV2M_I2C_100K = 0,
+ RZV2M_I2C_400K,
+};
+
+struct bitrate_config {
+ unsigned int percent_low;
+ unsigned int min_hold_time_ns;
+};
+
+static const struct bitrate_config bitrate_configs[] = {
+ [RZV2M_I2C_100K] = { 47, 3450 },
+ [RZV2M_I2C_400K] = { 52, 900 },
+};
+
+static irqreturn_t rzv2m_i2c_tia_irq_handler(int this_irq, void *dev_id)
+{
+ struct rzv2m_i2c_priv *priv = dev_id;
+
+ complete(&priv->msg_tia_done);
+
+ return IRQ_HANDLED;
+}
+
+/* Calculate IICB0WL and IICB0WH */
+static int rzv2m_i2c_clock_calculate(struct device *dev,
+ struct rzv2m_i2c_priv *priv)
+{
+ const struct bitrate_config *config;
+ unsigned int hold_time_ns;
+ unsigned int total_pclks;
+ unsigned int trf_pclks;
+ unsigned long pclk_hz;
+ struct i2c_timings t;
+ u32 trf_ns;
+
+ i2c_parse_fw_timings(dev, &t, true);
+
+ pclk_hz = clk_get_rate(priv->clk);
+ total_pclks = pclk_hz / t.bus_freq_hz;
+
+ trf_ns = t.scl_rise_ns + t.scl_fall_ns;
+ trf_pclks = mul_u64_u32_div(pclk_hz, trf_ns, NSEC_PER_SEC);
+
+ /* Config setting */
+ switch (t.bus_freq_hz) {
+ case I2C_MAX_FAST_MODE_FREQ:
+ priv->bus_mode = RZV2M_I2C_400K;
+ break;
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ priv->bus_mode = RZV2M_I2C_100K;
+ break;
+ default:
+ dev_err(dev, "transfer speed is invalid\n");
+ return -EINVAL;
+ }
+ config = &bitrate_configs[priv->bus_mode];
+
+ /* IICB0WL = (percent_low / Transfer clock) x PCLK */
+ priv->iicb0wl = total_pclks * config->percent_low / 100;
+ if (priv->iicb0wl > (BIT(10) - 1))
+ return -EINVAL;
+
+ /* IICB0WH = ((percent_high / Transfer clock) x PCLK) - (tR + tF) */
+ priv->iicb0wh = total_pclks - priv->iicb0wl - trf_pclks;
+ if (priv->iicb0wh > (BIT(10) - 1))
+ return -EINVAL;
+
+ /*
+ * Data hold time must be less than 0.9us in fast mode and
+ * 3.45us in standard mode.
+ * Data hold time = IICB0WL[9:2] / PCLK
+ */
+ hold_time_ns = div64_ul((u64)(priv->iicb0wl >> 2) * NSEC_PER_SEC, pclk_hz);
+ if (hold_time_ns > config->min_hold_time_ns) {
+ dev_err(dev, "data hold time %dns is over %dns\n",
+ hold_time_ns, config->min_hold_time_ns);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rzv2m_i2c_init(struct rzv2m_i2c_priv *priv)
+{
+ u32 i2c_ctl0;
+ u32 i2c_ctl1;
+
+ /* i2c disable */
+ writel(0, priv->base + IICB0CTL0);
+
+ /* IICB0CTL1 setting */
+ i2c_ctl1 = IICB0SLSE;
+ if (priv->bus_mode == RZV2M_I2C_400K)
+ i2c_ctl1 |= IICB0MDSC;
+ writel(i2c_ctl1, priv->base + IICB0CTL1);
+
+ /* IICB0WL IICB0WH setting */
+ writel(priv->iicb0wl, priv->base + IICB0WL);
+ writel(priv->iicb0wh, priv->base + IICB0WH);
+
+ /* i2c enable after setting */
+ i2c_ctl0 = IICB0SLWT | IICB0SLAC | IICB0IICE;
+ writel(i2c_ctl0, priv->base + IICB0CTL0);
+}
+
+static int rzv2m_i2c_write_with_ack(struct rzv2m_i2c_priv *priv, u32 data)
+{
+ unsigned long time_left;
+
+ reinit_completion(&priv->msg_tia_done);
+
+ writel(data, priv->base + IICB0DAT);
+
+ time_left = wait_for_completion_timeout(&priv->msg_tia_done,
+ priv->adap.timeout);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ /* Confirm ACK */
+ if ((readl(priv->base + IICB0STR0) & IICB0SSAC) != IICB0SSAC)
+ return -ENXIO;
+
+ return 0;
+}
+
+static int rzv2m_i2c_read_with_ack(struct rzv2m_i2c_priv *priv, u8 *data,
+ bool last)
+{
+ unsigned long time_left;
+ u32 data_tmp;
+
+ reinit_completion(&priv->msg_tia_done);
+
+ /* Interrupt request timing : 8th clock */
+ bit_clrl(priv->base + IICB0CTL0, IICB0SLWT);
+
+ /* Exit the wait state */
+ writel(IICB0WRET, priv->base + IICB0TRG);
+
+ /* Wait for transaction */
+ time_left = wait_for_completion_timeout(&priv->msg_tia_done,
+ priv->adap.timeout);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ if (last) {
+ /* Disable ACK */
+ bit_clrl(priv->base + IICB0CTL0, IICB0SLAC);
+
+ /* Read data*/
+ data_tmp = readl(priv->base + IICB0DAT);
+
+ /* Interrupt request timing : 9th clock */
+ bit_setl(priv->base + IICB0CTL0, IICB0SLWT);
+
+ /* Exit the wait state */
+ writel(IICB0WRET, priv->base + IICB0TRG);
+
+ /* Wait for transaction */
+ time_left = wait_for_completion_timeout(&priv->msg_tia_done,
+ priv->adap.timeout);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ /* Enable ACK */
+ bit_setl(priv->base + IICB0CTL0, IICB0SLAC);
+ } else {
+ /* Read data */
+ data_tmp = readl(priv->base + IICB0DAT);
+ }
+
+ *data = data_tmp;
+
+ return 0;
+}
+
+static int rzv2m_i2c_send(struct rzv2m_i2c_priv *priv, struct i2c_msg *msg,
+ unsigned int *count)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < msg->len; i++) {
+ ret = rzv2m_i2c_write_with_ack(priv, msg->buf[i]);
+ if (ret < 0)
+ return ret;
+ }
+ *count = i;
+
+ return 0;
+}
+
+static int rzv2m_i2c_receive(struct rzv2m_i2c_priv *priv, struct i2c_msg *msg,
+ unsigned int *count)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < msg->len; i++) {
+ ret = rzv2m_i2c_read_with_ack(priv, &msg->buf[i],
+ (msg->len - 1) == i);
+ if (ret < 0)
+ return ret;
+ }
+ *count = i;
+
+ return 0;
+}
+
+static int rzv2m_i2c_send_address(struct rzv2m_i2c_priv *priv,
+ struct i2c_msg *msg)
+{
+ u32 addr;
+ int ret;
+
+ if (msg->flags & I2C_M_TEN) {
+ /*
+ * 10-bit address
+ * addr_1: 5'b11110 | addr[9:8] | (R/nW)
+ * addr_2: addr[7:0]
+ */
+ addr = 0xf0 | ((msg->addr & GENMASK(9, 8)) >> 7);
+ addr |= !!(msg->flags & I2C_M_RD);
+ /* Send 1st address(extend code) */
+ ret = rzv2m_i2c_write_with_ack(priv, addr);
+ if (ret)
+ return ret;
+
+ /* Send 2nd address */
+ ret = rzv2m_i2c_write_with_ack(priv, msg->addr & 0xff);
+ } else {
+ /* 7-bit address */
+ addr = i2c_8bit_addr_from_msg(msg);
+ ret = rzv2m_i2c_write_with_ack(priv, addr);
+ }
+
+ return ret;
+}
+
+static int rzv2m_i2c_stop_condition(struct rzv2m_i2c_priv *priv)
+{
+ u32 value;
+
+ /* Send stop condition */
+ writel(IICB0SPT, priv->base + IICB0TRG);
+ return readl_poll_timeout(priv->base + IICB0STR0,
+ value, value & IICB0SSSP,
+ 100, jiffies_to_usecs(priv->adap.timeout));
+}
+
+static int rzv2m_i2c_master_xfer_msg(struct rzv2m_i2c_priv *priv,
+ struct i2c_msg *msg, int stop)
+{
+ unsigned int count = 0;
+ int ret, read = !!(msg->flags & I2C_M_RD);
+
+ /* Send start condition */
+ writel(IICB0STT, priv->base + IICB0TRG);
+
+ ret = rzv2m_i2c_send_address(priv, msg);
+ if (!ret) {
+ if (read)
+ ret = rzv2m_i2c_receive(priv, msg, &count);
+ else
+ ret = rzv2m_i2c_send(priv, msg, &count);
+
+ if (!ret && stop)
+ ret = rzv2m_i2c_stop_condition(priv);
+ }
+
+ if (ret == -ENXIO)
+ rzv2m_i2c_stop_condition(priv);
+ else if (ret < 0)
+ rzv2m_i2c_init(priv);
+ else
+ ret = count;
+
+ return ret;
+}
+
+static int rzv2m_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct rzv2m_i2c_priv *priv = i2c_get_adapdata(adap);
+ struct device *dev = priv->adap.dev.parent;
+ unsigned int i;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ if (readl(priv->base + IICB0STR0) & IICB0SSBS) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* I2C main transfer */
+ for (i = 0; i < num; i++) {
+ ret = rzv2m_i2c_master_xfer_msg(priv, &msgs[i], i == (num - 1));
+ if (ret < 0)
+ goto out;
+ }
+ ret = num;
+
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static u32 rzv2m_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_adapter_quirks rzv2m_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
+static struct i2c_algorithm rzv2m_i2c_algo = {
+ .master_xfer = rzv2m_i2c_master_xfer,
+ .functionality = rzv2m_i2c_func,
+};
+
+static int rzv2m_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rzv2m_i2c_priv *priv;
+ struct reset_control *rstc;
+ struct i2c_adapter *adap;
+ struct resource *res;
+ int irq, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Can't get clock\n");
+
+ rstc = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(rstc))
+ return dev_err_probe(dev, PTR_ERR(rstc), "Missing reset ctrl\n");
+ /*
+ * The reset also affects other HW that is not under the control
+ * of Linux. Therefore, all we can do is deassert the reset.
+ */
+ reset_control_deassert(rstc);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, rzv2m_i2c_tia_irq_handler, 0,
+ dev_name(dev), priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Unable to request irq %d\n", irq);
+
+ adap = &priv->adap;
+ adap->nr = pdev->id;
+ adap->algo = &rzv2m_i2c_algo;
+ adap->quirks = &rzv2m_i2c_quirks;
+ adap->dev.parent = dev;
+ adap->owner = THIS_MODULE;
+ device_set_node(&adap->dev, dev_fwnode(dev));
+ i2c_set_adapdata(adap, priv);
+ strscpy(adap->name, pdev->name, sizeof(adap->name));
+ init_completion(&priv->msg_tia_done);
+
+ ret = rzv2m_i2c_clock_calculate(dev, priv);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_enable(dev);
+
+ pm_runtime_get_sync(dev);
+ rzv2m_i2c_init(priv);
+ pm_runtime_put(dev);
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = i2c_add_numbered_adapter(adap);
+ if (ret < 0)
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int rzv2m_i2c_remove(struct platform_device *pdev)
+{
+ struct rzv2m_i2c_priv *priv = platform_get_drvdata(pdev);
+ struct device *dev = priv->adap.dev.parent;
+
+ i2c_del_adapter(&priv->adap);
+ bit_clrl(priv->base + IICB0CTL0, IICB0IICE);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int rzv2m_i2c_suspend(struct device *dev)
+{
+ struct rzv2m_i2c_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ bit_clrl(priv->base + IICB0CTL0, IICB0IICE);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int rzv2m_i2c_resume(struct device *dev)
+{
+ struct rzv2m_i2c_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = rzv2m_i2c_clock_calculate(dev, priv);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ rzv2m_i2c_init(priv);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static const struct of_device_id rzv2m_i2c_ids[] = {
+ { .compatible = "renesas,rzv2m-i2c" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rzv2m_i2c_ids);
+
+static const struct dev_pm_ops rzv2m_i2c_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(rzv2m_i2c_suspend, rzv2m_i2c_resume)
+};
+
+static struct platform_driver rzv2m_i2c_driver = {
+ .driver = {
+ .name = "rzv2m-i2c",
+ .of_match_table = rzv2m_i2c_ids,
+ .pm = pm_sleep_ptr(&rzv2m_i2c_pm_ops),
+ },
+ .probe = rzv2m_i2c_probe,
+ .remove = rzv2m_i2c_remove,
+};
+module_platform_driver(rzv2m_i2c_driver);
+
+MODULE_DESCRIPTION("RZ/V2M I2C bus driver");
+MODULE_AUTHOR("Renesas Electronics Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index b49a1b170bb2..36dab9cd208c 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1076,7 +1076,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
else
s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c);
- strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &s3c24xx_i2c_algorithm;
i2c->adap.retries = 2;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 72f024a0c363..29330ee64c9c 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -940,7 +940,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
adap->nr = dev->id;
adap->dev.of_node = dev->dev.of_node;
- strlcpy(adap->name, dev->name, sizeof(adap->name));
+ strscpy(adap->name, dev->name, sizeof(adap->name));
spin_lock_init(&pd->lock);
init_waitqueue_head(&pd->wait);
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 458c7bcf1d24..87701744752f 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -99,7 +99,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
pd->adap.algo_data = &pd->bit;
pd->adap.dev.parent = &dev->dev;
- strlcpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name));
+ strscpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name));
pd->bit.data = pd;
pd->bit.setsda = simtec_i2c_setsda;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 6d4aa64b195d..d1c59d83a65b 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -410,6 +410,12 @@ static const struct stm32f7_i2c_setup stm32mp15_setup = {
.fmp_clr_offset = 0x40,
};
+static const struct stm32f7_i2c_setup stm32mp13_setup = {
+ .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
+ .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
+ .fmp_clr_offset = 0x4,
+};
+
static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask)
{
writel_relaxed(readl_relaxed(reg) | mask, reg);
@@ -2468,6 +2474,7 @@ static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
static const struct of_device_id stm32f7_i2c_match[] = {
{ .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup},
{ .compatible = "st,stm32mp15-i2c", .data = &stm32mp15_setup},
+ { .compatible = "st,stm32mp13-i2c", .data = &stm32mp13_setup},
{},
};
MODULE_DEVICE_TABLE(of, stm32f7_i2c_match);
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index b4050f5b6746..b0f0120793e1 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -239,7 +239,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
dev_err(&serio->dev, "TAOS EVM identification failed\n");
goto exit_close;
}
- strlcpy(adapter->name, name, sizeof(adapter->name));
+ strscpy(adapter->name, name, sizeof(adapter->name));
/* Turn echo off for better performance */
taos->state = TAOS_STATE_EOFF;
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index ec0c7cad4240..95139985b2d5 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -305,7 +305,7 @@ static int tegra_bpmp_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.owner = THIS_MODULE;
- strlcpy(i2c->adapter.name, "Tegra BPMP I2C adapter",
+ strscpy(i2c->adapter.name, "Tegra BPMP I2C adapter",
sizeof(i2c->adapter.name));
i2c->adapter.algo = &tegra_bpmp_i2c_algo;
i2c->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 2941e42aa6a0..031c78ac42e6 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1825,7 +1825,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (i2c_dev->hw->supports_bus_clear)
i2c_dev->adapter.bus_recovery_info = &tegra_i2c_recovery_info;
- strlcpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev),
+ strscpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev),
sizeof(i2c_dev->adapter.name));
err = i2c_add_numbered_adapter(&i2c_dev->adapter);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index cb4666c54a23..d7b622891e52 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -564,7 +564,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
priv->adap.algo = &uniphier_fi2c_algo;
priv->adap.dev.parent = dev;
priv->adap.dev.of_node = dev->of_node;
- strlcpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name));
priv->adap.bus_recovery_info = &uniphier_fi2c_bus_recovery_info;
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index ee00a44bf4c7..e3ebae381f08 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -358,7 +358,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
priv->adap.algo = &uniphier_i2c_algo;
priv->adap.dev.parent = dev;
priv->adap.dev.of_node = dev->of_node;
- strlcpy(priv->adap.name, "UniPhier I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "UniPhier I2C", sizeof(priv->adap.name));
priv->adap.bus_recovery_info = &uniphier_i2c_bus_recovery_info;
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 8d980b1374a8..1ab419f8fa52 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -79,7 +79,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
writel(SCL | SDA, i2c->base + I2C_CONTROLS);
i2c->adap.owner = THIS_MODULE;
- strlcpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name));
i2c->adap.algo_data = &i2c->algo;
i2c->adap.dev.parent = &dev->dev;
i2c->adap.dev.of_node = dev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index 88f5aafdce5b..7d4bc8736079 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -413,7 +413,7 @@ static int wmt_i2c_probe(struct platform_device *pdev)
adap = &i2c_dev->adapter;
i2c_set_adapdata(adap, i2c_dev);
- strlcpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &wmt_i2c_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 9a1c3f8b7048..b3fe6b2aa3ca 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -367,7 +367,7 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
}
}
-static void xiic_wakeup(struct xiic_i2c *i2c, int code)
+static void xiic_wakeup(struct xiic_i2c *i2c, enum xilinx_i2c_state code)
{
i2c->tx_msg = NULL;
i2c->rx_msg = NULL;
@@ -383,7 +383,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
u32 clr = 0;
int xfer_more = 0;
int wakeup_req = 0;
- int wakeup_code = 0;
+ enum xilinx_i2c_state wakeup_code = STATE_DONE;
int ret;
/* Get the interrupt Status from the IPIF. There is no clearing of
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index d43db2c3876e..91007558bcb2 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -933,7 +933,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->init_irq = i2c_dev_irq_from_resources(info->resources,
info->num_resources);
- strlcpy(client->name, info->type, sizeof(client->name));
+ strscpy(client->name, info->type, sizeof(client->name));
status = i2c_check_addr_validity(client->addr, client->flags);
if (status) {
@@ -1023,15 +1023,9 @@ static int dummy_probe(struct i2c_client *client,
return 0;
}
-static int dummy_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static struct i2c_driver dummy_driver = {
.driver.name = "dummy",
.probe = dummy_probe,
- .remove = dummy_remove,
.id_table = dummy_id,
};
@@ -2467,8 +2461,9 @@ void i2c_put_adapter(struct i2c_adapter *adap)
if (!adap)
return;
- put_device(&adap->dev);
module_put(adap->owner);
+ /* Should be last, otherwise we risk use-after-free with 'adap' */
+ put_device(&adap->dev);
}
EXPORT_SYMBOL(i2c_put_adapter);
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 775332945ad0..8ba9b59a3c40 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -391,7 +391,7 @@ void i2c_register_spd(struct i2c_adapter *adap)
unsigned short addr_list[2];
memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, name, I2C_NAME_SIZE);
+ strscpy(info.type, name, I2C_NAME_SIZE);
addr_list[0] = 0x50 + n;
addr_list[1] = I2C_CLIENT_END;
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index d3acd8d66c32..33024acaac02 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -134,6 +134,7 @@ static int i2c_mux_probe(struct platform_device *pdev)
return 0;
err_children:
+ of_node_put(child);
i2c_mux_del_adapters(muxc);
err_parent:
i2c_put_adapter(parent);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 424ef470223d..3e101719689a 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -47,13 +47,16 @@
#include <linux/tick.h>
#include <trace/events/power.h>
#include <linux/sched.h>
+#include <linux/sched/smt.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/nospec-branch.h>
#include <asm/mwait.h>
#include <asm/msr.h>
+#include <asm/fpu/api.h>
#define INTEL_IDLE_VERSION "0.5.1"
@@ -106,6 +109,17 @@ static unsigned int mwait_substates __initdata;
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
/*
+ * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
+ * above.
+ */
+#define CPUIDLE_FLAG_IBRS BIT(16)
+
+/*
+ * Initialize large xstate for the C6-state entrance.
+ */
+#define CPUIDLE_FLAG_INIT_XSTATE BIT(17)
+
+/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
@@ -154,11 +168,42 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
raw_local_irq_enable();
ret = __intel_idle(dev, drv, index);
- raw_local_irq_disable();
+
+ /*
+ * The lockdep hardirqs state may be changed to 'on' with timer
+ * tick interrupt followed by __do_softirq(). Use local_irq_disable()
+ * to keep the hardirqs state correct.
+ */
+ local_irq_disable();
+
+ return ret;
+}
+
+static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ bool smt_active = sched_smt_active();
+ u64 spec_ctrl = spec_ctrl_current();
+ int ret;
+
+ if (smt_active)
+ wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
+ ret = __intel_idle(dev, drv, index);
+
+ if (smt_active)
+ wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
return ret;
}
+static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ fpu_idle_fpregs();
+ return __intel_idle(dev, drv, index);
+}
+
/**
* intel_idle_s2idle - Ask the processor to enter the given idle state.
* @dev: cpuidle device of the target CPU.
@@ -174,8 +219,12 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- unsigned long eax = flg2MWAIT(drv->states[index].flags);
unsigned long ecx = 1; /* break on interrupt flag */
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+
+ if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
+ fpu_idle_fpregs();
mwait_idle_with_hints(eax, ecx);
@@ -680,7 +729,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C6",
.desc = "MWAIT 0x20",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
@@ -688,7 +737,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C7s",
.desc = "MWAIT 0x33",
- .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
.enter = &intel_idle,
@@ -696,7 +745,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C8",
.desc = "MWAIT 0x40",
- .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
@@ -704,7 +753,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C9",
.desc = "MWAIT 0x50",
- .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
.enter = &intel_idle,
@@ -712,7 +761,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C10",
.desc = "MWAIT 0x60",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
.enter = &intel_idle,
@@ -741,7 +790,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
{
.name = "C6",
.desc = "MWAIT 0x20",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
.enter = &intel_idle,
@@ -879,16 +928,6 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.enter = NULL }
};
-/*
- * On Sapphire Rapids Xeon C1 has to be disabled if C1E is enabled, and vice
- * versa. On SPR C1E is enabled only if "C1E promotion" bit is set in
- * MSR_IA32_POWER_CTL. But in this case there effectively no C1, because C1
- * requests are promoted to C1E. If the "C1E promotion" bit is cleared, then
- * both C1 and C1E requests end up with C1, so there is effectively no C1E.
- *
- * By default we enable C1 and disable C1E by marking it with
- * 'CPUIDLE_FLAG_UNUSABLE'.
- */
static struct cpuidle_state spr_cstates[] __initdata = {
{
.name = "C1",
@@ -901,8 +940,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE |
- CPUIDLE_FLAG_UNUSABLE,
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
.enter = &intel_idle,
@@ -910,7 +948,8 @@ static struct cpuidle_state spr_cstates[] __initdata = {
{
.name = "C6",
.desc = "MWAIT 0x20",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
+ CPUIDLE_FLAG_INIT_XSTATE,
.exit_latency = 290,
.target_residency = 800,
.enter = &intel_idle,
@@ -1724,17 +1763,6 @@ static void __init spr_idle_state_table_update(void)
{
unsigned long long msr;
- /* Check if user prefers C1E over C1. */
- if ((preferred_states_mask & BIT(2)) &&
- !(preferred_states_mask & BIT(1))) {
- /* Disable C1 and enable C1E. */
- spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
- spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
-
- /* Enable C1E using the "C1E promotion" bit. */
- c1e_promotion = C1E_PROMOTION_ENABLE;
- }
-
/*
* By default, the C6 state assumes the worst-case scenario of package
* C6. However, if PC6 is disabled, we update the numbers to match
@@ -1819,6 +1847,15 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
drv->states[drv->state_count].enter = intel_idle_irq;
+ if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
+ cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
+ WARN_ON_ONCE(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE);
+ drv->states[drv->state_count].enter = intel_idle_ibrs;
+ }
+
+ if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_INIT_XSTATE)
+ drv->states[drv->state_count].enter = intel_idle_xstate;
+
if ((disabled_states_mask & BIT(drv->state_count)) ||
((icpu->use_acpi || force_use_acpi) &&
intel_idle_off_by_default(mwait_hint) &&
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index b53f010f3e40..35798712f811 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -204,6 +204,8 @@ config BMA220
config BMA400
tristate "Bosch BMA400 3-Axis Accelerometer Driver"
select REGMAP
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
select BMA400_I2C if I2C
select BMA400_SPI if SPI
help
diff --git a/drivers/iio/accel/adxl313_core.c b/drivers/iio/accel/adxl313_core.c
index 9e4193e64765..afeef779e1d0 100644
--- a/drivers/iio/accel/adxl313_core.c
+++ b/drivers/iio/accel/adxl313_core.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_NS_GPL(adxl313_writable_regs_table, IIO_ADXL313);
struct adxl313_data {
struct regmap *regmap;
struct mutex lock; /* lock to protect transf_buf */
- __le16 transf_buf ____cacheline_aligned;
+ __le16 transf_buf __aligned(IIO_DMA_MINALIGN);
};
static const int adxl313_odr_freqs[][2] = {
diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
index 7561399daef3..4bc648eac8b2 100644
--- a/drivers/iio/accel/adxl355_core.c
+++ b/drivers/iio/accel/adxl355_core.c
@@ -177,7 +177,7 @@ struct adxl355_data {
u8 buf[14];
s64 ts;
} buffer;
- } ____cacheline_aligned;
+ } __aligned(IIO_DMA_MINALIGN);
};
static int adxl355_set_op_mode(struct adxl355_data *data,
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 0289ed8cf2c6..47feb375b70b 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -179,7 +179,7 @@ struct adxl367_state {
unsigned int fifo_set_size;
unsigned int fifo_watermark;
- __be16 fifo_buf[ADXL367_FIFO_SIZE] ____cacheline_aligned;
+ __be16 fifo_buf[ADXL367_FIFO_SIZE] __aligned(IIO_DMA_MINALIGN);
__be16 sample_buf;
u8 act_threshold_buf[2];
u8 inact_time_buf[2];
@@ -447,21 +447,17 @@ static int adxl367_set_fifo_format(struct adxl367_state *st,
fifo_format));
}
-static int adxl367_set_fifo_samples(struct adxl367_state *st,
- unsigned int fifo_watermark,
- unsigned int fifo_set_size)
+static int adxl367_set_fifo_watermark(struct adxl367_state *st,
+ unsigned int fifo_watermark)
{
- unsigned int fifo_samples = fifo_watermark * fifo_set_size;
+ unsigned int fifo_samples = fifo_watermark * st->fifo_set_size;
unsigned int fifo_samples_h, fifo_samples_l;
int ret;
if (fifo_samples > ADXL367_FIFO_MAX_WATERMARK)
fifo_samples = ADXL367_FIFO_MAX_WATERMARK;
- if (fifo_set_size == 0)
- return 0;
-
- fifo_samples /= fifo_set_size;
+ fifo_samples /= st->fifo_set_size;
fifo_samples_h = FIELD_PREP(ADXL367_SAMPLES_H_MASK,
FIELD_GET(ADXL367_SAMPLES_VAL_H_MASK,
@@ -475,30 +471,8 @@ static int adxl367_set_fifo_samples(struct adxl367_state *st,
if (ret)
return ret;
- return regmap_update_bits(st->regmap, ADXL367_REG_FIFO_SAMPLES,
- ADXL367_SAMPLES_L_MASK, fifo_samples_l);
-}
-
-static int adxl367_set_fifo_set_size(struct adxl367_state *st,
- unsigned int fifo_set_size)
-{
- int ret;
-
- ret = adxl367_set_fifo_samples(st, st->fifo_watermark, fifo_set_size);
- if (ret)
- return ret;
-
- st->fifo_set_size = fifo_set_size;
-
- return 0;
-}
-
-static int adxl367_set_fifo_watermark(struct adxl367_state *st,
- unsigned int fifo_watermark)
-{
- int ret;
-
- ret = adxl367_set_fifo_samples(st, fifo_watermark, st->fifo_set_size);
+ ret = regmap_update_bits(st->regmap, ADXL367_REG_FIFO_SAMPLES,
+ ADXL367_SAMPLES_L_MASK, fifo_samples_l);
if (ret)
return ret;
@@ -1276,14 +1250,11 @@ static int adxl367_update_scan_mode(struct iio_dev *indio_dev,
{
struct adxl367_state *st = iio_priv(indio_dev);
enum adxl367_fifo_format fifo_format;
- unsigned int fifo_set_size;
int ret;
if (!adxl367_find_mask_fifo_format(active_scan_mask, &fifo_format))
return -EINVAL;
- fifo_set_size = bitmap_weight(active_scan_mask, indio_dev->masklength);
-
mutex_lock(&st->lock);
ret = adxl367_set_measure_en(st, false);
@@ -1294,11 +1265,12 @@ static int adxl367_update_scan_mode(struct iio_dev *indio_dev,
if (ret)
goto out;
- ret = adxl367_set_fifo_set_size(st, fifo_set_size);
+ ret = adxl367_set_measure_en(st, true);
if (ret)
goto out;
- ret = adxl367_set_measure_en(st, true);
+ st->fifo_set_size = bitmap_weight(active_scan_mask,
+ indio_dev->masklength);
out:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/accel/adxl367_spi.c b/drivers/iio/accel/adxl367_spi.c
index 26dfc821ebbe..118c894015a5 100644
--- a/drivers/iio/accel/adxl367_spi.c
+++ b/drivers/iio/accel/adxl367_spi.c
@@ -9,6 +9,8 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
#include "adxl367.h"
#define ADXL367_SPI_WRITE_COMMAND 0x0A
@@ -28,10 +30,10 @@ struct adxl367_spi_state {
struct spi_transfer fifo_xfer[2];
/*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
+ * DMA (thus cache coherency maintenance) may require the
+ * transfer buffers live in their own cache lines.
*/
- u8 reg_write_tx_buf[1] ____cacheline_aligned;
+ u8 reg_write_tx_buf[1] __aligned(IIO_DMA_MINALIGN);
u8 reg_read_tx_buf[2];
u8 fifo_tx_buf[1];
};
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 74024d7ce5ac..fcbd695e4654 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -67,7 +67,7 @@ struct bma220_data {
/* Ensure timestamp is naturally aligned. */
s64 timestamp __aligned(8);
} scan;
- u8 tx_buf[2] ____cacheline_aligned;
+ u8 tx_buf[2] __aligned(IIO_DMA_MINALIGN);
};
static const struct iio_chan_spec bma220_channels[] = {
@@ -289,20 +289,20 @@ static int bma220_probe(struct spi_device *spi)
return devm_iio_device_register(&spi->dev, indio_dev);
}
-static __maybe_unused int bma220_suspend(struct device *dev)
+static int bma220_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
return bma220_power(spi, false);
}
-static __maybe_unused int bma220_resume(struct device *dev)
+static int bma220_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
return bma220_power(spi, true);
}
-static SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume);
static const struct spi_device_id bma220_spi_id[] = {
{"bma220", 0},
@@ -318,7 +318,7 @@ MODULE_DEVICE_TABLE(spi, bma220_spi_id);
static struct spi_driver bma220_driver = {
.driver = {
.name = "bma220_spi",
- .pm = &bma220_pm_ops,
+ .pm = pm_sleep_ptr(&bma220_pm_ops),
.acpi_match_table = bma220_acpi_id,
},
.probe = bma220_probe,
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
index c4c8d74155c2..e8f802a82300 100644
--- a/drivers/iio/accel/bma400.h
+++ b/drivers/iio/accel/bma400.h
@@ -39,6 +39,7 @@
#define BMA400_INT_STAT0_REG 0x0e
#define BMA400_INT_STAT1_REG 0x0f
#define BMA400_INT_STAT2_REG 0x10
+#define BMA400_INT12_MAP_REG 0x23
/* Temperature register */
#define BMA400_TEMP_DATA_REG 0x11
@@ -53,6 +54,9 @@
#define BMA400_STEP_CNT1_REG 0x16
#define BMA400_STEP_CNT3_REG 0x17
#define BMA400_STEP_STAT_REG 0x18
+#define BMA400_STEP_INT_MSK BIT(0)
+#define BMA400_STEP_RAW_LEN 0x03
+#define BMA400_STEP_STAT_MASK GENMASK(9, 8)
/*
* Read-write configuration registers
@@ -62,6 +66,13 @@
#define BMA400_ACC_CONFIG2_REG 0x1b
#define BMA400_CMD_REG 0x7e
+/* Interrupt registers */
+#define BMA400_INT_CONFIG0_REG 0x1f
+#define BMA400_INT_CONFIG1_REG 0x20
+#define BMA400_INT1_MAP_REG 0x21
+#define BMA400_INT_IO_CTRL_REG 0x24
+#define BMA400_INT_DRDY_MSK BIT(7)
+
/* Chip ID of BMA 400 devices found in the chip ID register. */
#define BMA400_ID_REG_VAL 0x90
@@ -83,8 +94,38 @@
#define BMA400_ACC_ODR_MIN_WHOLE_HZ 25
#define BMA400_ACC_ODR_MIN_HZ 12
-#define BMA400_SCALE_MIN 38357
-#define BMA400_SCALE_MAX 306864
+/* Generic interrupts register */
+#define BMA400_GEN1INT_CONFIG0 0x3f
+#define BMA400_GEN2INT_CONFIG0 0x4A
+#define BMA400_GEN_CONFIG1_OFF 0x01
+#define BMA400_GEN_CONFIG2_OFF 0x02
+#define BMA400_GEN_CONFIG3_OFF 0x03
+#define BMA400_GEN_CONFIG31_OFF 0x04
+#define BMA400_INT_GEN1_MSK BIT(2)
+#define BMA400_INT_GEN2_MSK BIT(3)
+#define BMA400_GEN_HYST_MSK GENMASK(1, 0)
+
+/*
+ * BMA400_SCALE_MIN macro value represents m/s^2 for 1 LSB before
+ * converting to micro values for +-2g range.
+ *
+ * For +-2g - 1 LSB = 0.976562 milli g = 0.009576 m/s^2
+ * For +-4g - 1 LSB = 1.953125 milli g = 0.019153 m/s^2
+ * For +-16g - 1 LSB = 7.8125 milli g = 0.076614 m/s^2
+ *
+ * The raw value which is used to select the different ranges is determined
+ * by the first bit set position from the scale value, so BMA400_SCALE_MIN
+ * should be odd.
+ *
+ * Scale values for +-2g, +-4g, +-8g and +-16g are populated into bma400_scales
+ * array by left shifting BMA400_SCALE_MIN.
+ * e.g.:
+ * To select +-2g = 9577 << 0 = raw value to write is 0.
+ * To select +-8g = 9577 << 2 = raw value to write is 2.
+ * To select +-16g = 9577 << 3 = raw value to write is 3.
+ */
+#define BMA400_SCALE_MIN 9577
+#define BMA400_SCALE_MAX 76617
#define BMA400_NUM_REGULATORS 2
#define BMA400_VDD_REGULATOR 0
@@ -92,8 +133,7 @@
extern const struct regmap_config bma400_regmap_config;
-int bma400_probe(struct device *dev, struct regmap *regmap, const char *name);
-
-void bma400_remove(struct device *dev);
+int bma400_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name);
#endif
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index 043002fe6f63..c31bdd9b168e 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -11,15 +11,24 @@
* - Create channel for sensor time
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/device.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include "bma400.h"
@@ -46,11 +55,24 @@ enum bma400_power_mode {
POWER_MODE_INVALID = 0x03,
};
+enum bma400_scan {
+ BMA400_ACCL_X,
+ BMA400_ACCL_Y,
+ BMA400_ACCL_Z,
+ BMA400_TEMP,
+};
+
struct bma400_sample_freq {
int hz;
int uhz;
};
+enum bma400_activity {
+ BMA400_STILL,
+ BMA400_WALKING,
+ BMA400_RUNNING,
+};
+
struct bma400_data {
struct device *dev;
struct regmap *regmap;
@@ -61,6 +83,19 @@ struct bma400_data {
struct bma400_sample_freq sample_freq;
int oversampling_ratio;
int scale;
+ struct iio_trigger *trig;
+ int steps_enabled;
+ bool step_event_en;
+ bool activity_event_en;
+ unsigned int generic_event_en;
+ /* Correct time stamp alignment */
+ struct {
+ __le16 buff[3];
+ u8 temperature;
+ s64 ts __aligned(8);
+ } buffer __aligned(IIO_DMA_MINALIGN);
+ __le16 status;
+ __be16 duration;
};
static bool bma400_is_writable_reg(struct device *dev, unsigned int reg)
@@ -152,7 +187,38 @@ static const struct iio_chan_spec_ext_info bma400_ext_info[] = {
{ }
};
-#define BMA400_ACC_CHANNEL(_axis) { \
+static const struct iio_event_spec bma400_step_detect_event = {
+ .type = IIO_EV_TYPE_CHANGE,
+ .dir = IIO_EV_DIR_NONE,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+};
+
+static const struct iio_event_spec bma400_activity_event = {
+ .type = IIO_EV_TYPE_CHANGE,
+ .dir = IIO_EV_DIR_NONE,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE),
+};
+
+static const struct iio_event_spec bma400_accel_event[] = {
+ {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_HYSTERESIS) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_HYSTERESIS) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define BMA400_ACC_CHANNEL(_index, _axis) { \
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##_axis, \
@@ -164,17 +230,55 @@ static const struct iio_chan_spec_ext_info bma400_ext_info[] = {
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.ext_info = bma400_ext_info, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+ .event_spec = bma400_accel_event, \
+ .num_event_specs = ARRAY_SIZE(bma400_accel_event) \
+}
+
+#define BMA400_ACTIVITY_CHANNEL(_chan2) { \
+ .type = IIO_ACTIVITY, \
+ .modified = 1, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .scan_index = -1, /* No buffer support */ \
+ .event_spec = &bma400_activity_event, \
+ .num_event_specs = 1, \
}
static const struct iio_chan_spec bma400_channels[] = {
- BMA400_ACC_CHANNEL(X),
- BMA400_ACC_CHANNEL(Y),
- BMA400_ACC_CHANNEL(Z),
+ BMA400_ACC_CHANNEL(0, X),
+ BMA400_ACC_CHANNEL(1, Y),
+ BMA400_ACC_CHANNEL(2, Z),
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 8,
+ .storagebits = 8,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_STEPS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_ENABLE),
+ .scan_index = -1, /* No buffer support */
+ .event_spec = &bma400_step_detect_event,
+ .num_event_specs = 1,
},
+ BMA400_ACTIVITY_CHANNEL(IIO_MOD_STILL),
+ BMA400_ACTIVITY_CHANNEL(IIO_MOD_WALKING),
+ BMA400_ACTIVITY_CHANNEL(IIO_MOD_RUNNING),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
};
static int bma400_get_temp_reg(struct bma400_data *data, int *val, int *val2)
@@ -542,6 +646,40 @@ static int bma400_set_power_mode(struct bma400_data *data,
return 0;
}
+static int bma400_enable_steps(struct bma400_data *data, int val)
+{
+ int ret;
+
+ if (data->steps_enabled == val)
+ return 0;
+
+ ret = regmap_update_bits(data->regmap, BMA400_INT_CONFIG1_REG,
+ BMA400_STEP_INT_MSK,
+ FIELD_PREP(BMA400_STEP_INT_MSK, val ? 1 : 0));
+ if (ret)
+ return ret;
+ data->steps_enabled = val;
+ return ret;
+}
+
+static int bma400_get_steps_reg(struct bma400_data *data, int *val)
+{
+ u8 *steps_raw;
+ int ret;
+
+ steps_raw = kmalloc(BMA400_STEP_RAW_LEN, GFP_KERNEL);
+ if (!steps_raw)
+ return -ENOMEM;
+
+ ret = regmap_bulk_read(data->regmap, BMA400_STEP_CNT0_REG,
+ steps_raw, BMA400_STEP_RAW_LEN);
+ if (ret)
+ return ret;
+ *val = get_unaligned_le24(steps_raw);
+ kfree(steps_raw);
+ return IIO_VAL_INT;
+}
+
static void bma400_init_tables(void)
{
int raw;
@@ -560,6 +698,40 @@ static void bma400_init_tables(void)
}
}
+static void bma400_regulators_disable(void *data_ptr)
+{
+ struct bma400_data *data = data_ptr;
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
+}
+
+static void bma400_power_disable(void *data_ptr)
+{
+ struct bma400_data *data = data_ptr;
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bma400_set_power_mode(data, POWER_MODE_SLEEP);
+ mutex_unlock(&data->mutex);
+ if (ret)
+ dev_warn(data->dev, "Failed to put device into sleep mode (%pe)\n",
+ ERR_PTR(ret));
+}
+
+static enum iio_modifier bma400_act_to_mod(enum bma400_activity activity)
+{
+ switch (activity) {
+ case BMA400_STILL:
+ return IIO_MOD_STILL;
+ case BMA400_WALKING:
+ return IIO_MOD_WALKING;
+ case BMA400_RUNNING:
+ return IIO_MOD_RUNNING;
+ default:
+ return IIO_NO_MOD;
+ }
+}
+
static int bma400_init(struct bma400_data *data)
{
unsigned int val;
@@ -569,13 +741,12 @@ static int bma400_init(struct bma400_data *data)
ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
if (ret) {
dev_err(data->dev, "Failed to read chip id register\n");
- goto out;
+ return ret;
}
if (val != BMA400_ID_REG_VAL) {
dev_err(data->dev, "Chip ID mismatch\n");
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
data->regulators[BMA400_VDD_REGULATOR].supply = "vdd";
@@ -589,27 +760,31 @@ static int bma400_init(struct bma400_data *data)
"Failed to get regulators: %d\n",
ret);
- goto out;
+ return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
data->regulators);
if (ret) {
dev_err(data->dev, "Failed to enable regulators: %d\n",
ret);
- goto out;
+ return ret;
}
+ ret = devm_add_action_or_reset(data->dev, bma400_regulators_disable, data);
+ if (ret)
+ return ret;
+
ret = bma400_get_power_mode(data);
if (ret) {
dev_err(data->dev, "Failed to get the initial power-mode\n");
- goto err_reg_disable;
+ return ret;
}
if (data->power_mode != POWER_MODE_NORMAL) {
ret = bma400_set_power_mode(data, POWER_MODE_NORMAL);
if (ret) {
dev_err(data->dev, "Failed to wake up the device\n");
- goto err_reg_disable;
+ return ret;
}
/*
* TODO: The datasheet waits 1500us here in the example, but
@@ -618,20 +793,28 @@ static int bma400_init(struct bma400_data *data)
usleep_range(1500, 2000);
}
+ ret = devm_add_action_or_reset(data->dev, bma400_power_disable, data);
+ if (ret)
+ return ret;
+
bma400_init_tables();
ret = bma400_get_accel_output_data_rate(data);
if (ret)
- goto err_reg_disable;
+ return ret;
ret = bma400_get_accel_oversampling_ratio(data);
if (ret)
- goto err_reg_disable;
+ return ret;
ret = bma400_get_accel_scale(data);
if (ret)
- goto err_reg_disable;
+ return ret;
+ /* Configure INT1 pin to open drain */
+ ret = regmap_write(data->regmap, BMA400_INT_IO_CTRL_REG, 0x06);
+ if (ret)
+ return ret;
/*
* Once the interrupt engine is supported we might use the
* data_src_reg, but for now ensure this is set to the
@@ -639,12 +822,6 @@ static int bma400_init(struct bma400_data *data)
* channel.
*/
return regmap_write(data->regmap, BMA400_ACC_CONFIG2_REG, 0x00);
-
-err_reg_disable:
- regulator_bulk_disable(ARRAY_SIZE(data->regulators),
- data->regulators);
-out:
- return ret;
}
static int bma400_read_raw(struct iio_dev *indio_dev,
@@ -652,14 +829,37 @@ static int bma400_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct bma400_data *data = iio_priv(indio_dev);
+ unsigned int activity;
int ret;
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
- mutex_lock(&data->mutex);
- ret = bma400_get_temp_reg(data, val, val2);
- mutex_unlock(&data->mutex);
- return ret;
+ switch (chan->type) {
+ case IIO_TEMP:
+ mutex_lock(&data->mutex);
+ ret = bma400_get_temp_reg(data, val, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_STEPS:
+ return bma400_get_steps_reg(data, val);
+ case IIO_ACTIVITY:
+ ret = regmap_read(data->regmap, BMA400_STEP_STAT_REG,
+ &activity);
+ if (ret)
+ return ret;
+ /*
+ * The device does not support confidence value levels,
+ * so we will always have 100% for current activity and
+ * 0% for the others.
+ */
+ if (chan->channel2 == bma400_act_to_mod(activity))
+ *val = 100;
+ else
+ *val = 0;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_RAW:
mutex_lock(&data->mutex);
ret = bma400_get_accel_reg(data, chan, val);
@@ -700,6 +900,9 @@ static int bma400_read_raw(struct iio_dev *indio_dev,
*val = data->oversampling_ratio;
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ *val = data->steps_enabled;
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
@@ -765,6 +968,11 @@ static int bma400_write_raw(struct iio_dev *indio_dev,
ret = bma400_set_accel_oversampling_ratio(data, val);
mutex_unlock(&data->mutex);
return ret;
+ case IIO_CHAN_INFO_ENABLE:
+ mutex_lock(&data->mutex);
+ ret = bma400_enable_steps(data, val);
+ mutex_unlock(&data->mutex);
+ return ret;
default:
return -EINVAL;
}
@@ -781,19 +989,419 @@ static int bma400_write_raw_get_fmt(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return FIELD_GET(BMA400_INT_GEN1_MSK,
+ data->generic_event_en);
+ case IIO_EV_DIR_FALLING:
+ return FIELD_GET(BMA400_INT_GEN2_MSK,
+ data->generic_event_en);
+ default:
+ return -EINVAL;
+ }
+ case IIO_STEPS:
+ return data->step_event_en;
+ case IIO_ACTIVITY:
+ return data->activity_event_en;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_steps_event_enable(struct bma400_data *data, int state)
+{
+ int ret;
+
+ ret = bma400_enable_steps(data, 1);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, BMA400_INT12_MAP_REG,
+ BMA400_STEP_INT_MSK,
+ FIELD_PREP(BMA400_STEP_INT_MSK,
+ state));
+ if (ret)
+ return ret;
+ data->step_event_en = state;
+ return 0;
+}
+
+static int bma400_activity_event_en(struct bma400_data *data,
+ enum iio_event_direction dir,
+ int state)
+{
+ int ret, reg, msk, value, field_value;
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ reg = BMA400_GEN1INT_CONFIG0;
+ msk = BMA400_INT_GEN1_MSK;
+ value = 2;
+ set_mask_bits(&field_value, BMA400_INT_GEN1_MSK,
+ FIELD_PREP(BMA400_INT_GEN1_MSK, state));
+ break;
+ case IIO_EV_DIR_FALLING:
+ reg = BMA400_GEN2INT_CONFIG0;
+ msk = BMA400_INT_GEN2_MSK;
+ value = 0;
+ set_mask_bits(&field_value, BMA400_INT_GEN2_MSK,
+ FIELD_PREP(BMA400_INT_GEN2_MSK, state));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Enabling all axis for interrupt evaluation */
+ ret = regmap_write(data->regmap, reg, 0xF8);
+ if (ret)
+ return ret;
+
+ /* OR combination of all axis for interrupt evaluation */
+ ret = regmap_write(data->regmap, reg + BMA400_GEN_CONFIG1_OFF, value);
+ if (ret)
+ return ret;
+
+ /* Initial value to avoid interrupts while enabling*/
+ ret = regmap_write(data->regmap, reg + BMA400_GEN_CONFIG2_OFF, 0x0A);
+ if (ret)
+ return ret;
+
+ /* Initial duration value to avoid interrupts while enabling*/
+ ret = regmap_write(data->regmap, reg + BMA400_GEN_CONFIG31_OFF, 0x0F);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, BMA400_INT1_MAP_REG, msk,
+ field_value);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, BMA400_INT_CONFIG0_REG, msk,
+ field_value);
+ if (ret)
+ return ret;
+
+ set_mask_bits(&data->generic_event_en, msk, field_value);
+ return 0;
+}
+
+static int bma400_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ mutex_lock(&data->mutex);
+ ret = bma400_activity_event_en(data, dir, state);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_STEPS:
+ mutex_lock(&data->mutex);
+ ret = bma400_steps_event_enable(data, state);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_ACTIVITY:
+ mutex_lock(&data->mutex);
+ if (!data->step_event_en) {
+ ret = bma400_steps_event_enable(data, true);
+ if (ret) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ }
+ data->activity_event_en = state;
+ mutex_unlock(&data->mutex);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int get_gen_config_reg(enum iio_event_direction dir)
+{
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ return BMA400_GEN2INT_CONFIG0;
+ case IIO_EV_DIR_RISING:
+ return BMA400_GEN1INT_CONFIG0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret, reg;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ reg = get_gen_config_reg(dir);
+ if (reg < 0)
+ return -EINVAL;
+
+ *val2 = 0;
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = regmap_read(data->regmap,
+ reg + BMA400_GEN_CONFIG2_OFF,
+ val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_PERIOD:
+ mutex_lock(&data->mutex);
+ ret = regmap_bulk_read(data->regmap,
+ reg + BMA400_GEN_CONFIG3_OFF,
+ &data->duration,
+ sizeof(data->duration));
+ if (ret) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ *val = be16_to_cpu(data->duration);
+ mutex_unlock(&data->mutex);
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = regmap_read(data->regmap, reg, val);
+ if (ret)
+ return ret;
+ *val = FIELD_GET(BMA400_GEN_HYST_MSK, *val);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+ int reg, ret;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ reg = get_gen_config_reg(dir);
+ if (reg < 0)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (val < 1 || val > 255)
+ return -EINVAL;
+
+ return regmap_write(data->regmap,
+ reg + BMA400_GEN_CONFIG2_OFF,
+ val);
+ case IIO_EV_INFO_PERIOD:
+ if (val < 1 || val > 65535)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ put_unaligned_be16(val, &data->duration);
+ ret = regmap_bulk_write(data->regmap,
+ reg + BMA400_GEN_CONFIG3_OFF,
+ &data->duration,
+ sizeof(data->duration));
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_EV_INFO_HYSTERESIS:
+ if (val < 0 || val > 3)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap, reg,
+ BMA400_GEN_HYST_MSK,
+ FIELD_PREP(BMA400_GEN_HYST_MSK,
+ val));
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
}
+static int bma400_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, BMA400_INT_CONFIG0_REG,
+ BMA400_INT_DRDY_MSK,
+ FIELD_PREP(BMA400_INT_DRDY_MSK, state));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(data->regmap, BMA400_INT1_MAP_REG,
+ BMA400_INT_DRDY_MSK,
+ FIELD_PREP(BMA400_INT_DRDY_MSK, state));
+}
+
+static const unsigned long bma400_avail_scan_masks[] = {
+ BIT(BMA400_ACCL_X) | BIT(BMA400_ACCL_Y) | BIT(BMA400_ACCL_Z),
+ BIT(BMA400_ACCL_X) | BIT(BMA400_ACCL_Y) | BIT(BMA400_ACCL_Z)
+ | BIT(BMA400_TEMP),
+ 0
+};
+
static const struct iio_info bma400_info = {
.read_raw = bma400_read_raw,
.read_avail = bma400_read_avail,
.write_raw = bma400_write_raw,
.write_raw_get_fmt = bma400_write_raw_get_fmt,
+ .read_event_config = bma400_read_event_config,
+ .write_event_config = bma400_write_event_config,
+ .write_event_value = bma400_write_event_value,
+ .read_event_value = bma400_read_event_value,
+};
+
+static const struct iio_trigger_ops bma400_trigger_ops = {
+ .set_trigger_state = &bma400_data_rdy_trigger_set_state,
+ .validate_device = &iio_trigger_validate_own_device,
};
-int bma400_probe(struct device *dev, struct regmap *regmap, const char *name)
+static irqreturn_t bma400_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret, temp;
+
+ /* Lock to protect the data->buffer */
+ mutex_lock(&data->mutex);
+
+ /* bulk read six registers, with the base being the LSB register */
+ ret = regmap_bulk_read(data->regmap, BMA400_X_AXIS_LSB_REG,
+ &data->buffer.buff, sizeof(data->buffer.buff));
+ if (ret)
+ goto unlock_err;
+
+ if (test_bit(BMA400_TEMP, indio_dev->active_scan_mask)) {
+ ret = regmap_read(data->regmap, BMA400_TEMP_DATA_REG, &temp);
+ if (ret)
+ goto unlock_err;
+
+ data->buffer.temperature = temp;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+ iio_get_time_ns(indio_dev));
+
+ mutex_unlock(&data->mutex);
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+
+unlock_err:
+ mutex_unlock(&data->mutex);
+ return IRQ_NONE;
+}
+
+static irqreturn_t bma400_interrupt(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct bma400_data *data = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns(indio_dev);
+ unsigned int act, ev_dir = IIO_EV_DIR_NONE;
+ int ret;
+
+ /* Lock to protect the data->status */
+ mutex_lock(&data->mutex);
+ ret = regmap_bulk_read(data->regmap, BMA400_INT_STAT0_REG,
+ &data->status,
+ sizeof(data->status));
+ /*
+ * if none of the bit is set in the status register then it is
+ * spurious interrupt.
+ */
+ if (ret || !data->status)
+ goto unlock_err;
+
+ if (FIELD_GET(BMA400_INT_GEN1_MSK, le16_to_cpu(data->status)))
+ ev_dir = IIO_EV_DIR_RISING;
+
+ if (FIELD_GET(BMA400_INT_GEN2_MSK, le16_to_cpu(data->status)))
+ ev_dir = IIO_EV_DIR_FALLING;
+
+ if (ev_dir != IIO_EV_DIR_NONE) {
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_MAG, ev_dir),
+ timestamp);
+ }
+
+ if (FIELD_GET(BMA400_STEP_STAT_MASK, le16_to_cpu(data->status))) {
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_STEPS, 0, IIO_NO_MOD,
+ IIO_EV_TYPE_CHANGE,
+ IIO_EV_DIR_NONE),
+ timestamp);
+
+ if (data->activity_event_en) {
+ ret = regmap_read(data->regmap, BMA400_STEP_STAT_REG,
+ &act);
+ if (ret)
+ goto unlock_err;
+
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACTIVITY, 0,
+ bma400_act_to_mod(act),
+ IIO_EV_TYPE_CHANGE,
+ IIO_EV_DIR_NONE),
+ timestamp);
+ }
+ }
+
+ if (FIELD_GET(BMA400_INT_DRDY_MSK, le16_to_cpu(data->status))) {
+ mutex_unlock(&data->mutex);
+ iio_trigger_poll_chained(data->trig);
+ return IRQ_HANDLED;
+ }
+
+ mutex_unlock(&data->mutex);
+ return IRQ_HANDLED;
+
+unlock_err:
+ mutex_unlock(&data->mutex);
+ return IRQ_NONE;
+}
+
+int bma400_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name)
{
struct iio_dev *indio_dev;
struct bma400_data *data;
@@ -820,33 +1428,43 @@ int bma400_probe(struct device *dev, struct regmap *regmap, const char *name)
indio_dev->info = &bma400_info;
indio_dev->channels = bma400_channels;
indio_dev->num_channels = ARRAY_SIZE(bma400_channels);
+ indio_dev->available_scan_masks = bma400_avail_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
- dev_set_drvdata(dev, indio_dev);
-
- return iio_device_register(indio_dev);
-}
-EXPORT_SYMBOL_NS(bma400_probe, IIO_BMA400);
+ if (irq > 0) {
+ data->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!data->trig)
+ return -ENOMEM;
-void bma400_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct bma400_data *data = iio_priv(indio_dev);
- int ret;
+ data->trig->ops = &bma400_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
- mutex_lock(&data->mutex);
- ret = bma400_set_power_mode(data, POWER_MODE_SLEEP);
- mutex_unlock(&data->mutex);
+ ret = devm_iio_trigger_register(data->dev, data->trig);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "iio trigger register fail\n");
+
+ indio_dev->trig = iio_trigger_get(data->trig);
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ &bma400_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ indio_dev->name, indio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "request irq %d failed\n", irq);
+ }
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ &bma400_trigger_handler, NULL);
if (ret)
- dev_warn(dev, "Failed to put device into sleep mode (%pe)\n", ERR_PTR(ret));
+ return dev_err_probe(data->dev, ret,
+ "iio triggered buffer setup failed\n");
- regulator_bulk_disable(ARRAY_SIZE(data->regulators),
- data->regulators);
-
- iio_device_unregister(indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
-EXPORT_SYMBOL_NS(bma400_remove, IIO_BMA400);
+EXPORT_SYMBOL_NS(bma400_probe, IIO_BMA400);
MODULE_AUTHOR("Dan Robertson <dan@dlrobertson.com>");
MODULE_DESCRIPTION("Bosch BMA400 triaxial acceleration sensor core");
diff --git a/drivers/iio/accel/bma400_i2c.c b/drivers/iio/accel/bma400_i2c.c
index da104ffd3fe0..1ba2a982ea73 100644
--- a/drivers/iio/accel/bma400_i2c.c
+++ b/drivers/iio/accel/bma400_i2c.c
@@ -24,14 +24,7 @@ static int bma400_i2c_probe(struct i2c_client *client,
return PTR_ERR(regmap);
}
- return bma400_probe(&client->dev, regmap, id->name);
-}
-
-static int bma400_i2c_remove(struct i2c_client *client)
-{
- bma400_remove(&client->dev);
-
- return 0;
+ return bma400_probe(&client->dev, regmap, client->irq, id->name);
}
static const struct i2c_device_id bma400_i2c_ids[] = {
@@ -52,7 +45,6 @@ static struct i2c_driver bma400_i2c_driver = {
.of_match_table = bma400_of_i2c_match,
},
.probe = bma400_i2c_probe,
- .remove = bma400_i2c_remove,
.id_table = bma400_i2c_ids,
};
diff --git a/drivers/iio/accel/bma400_spi.c b/drivers/iio/accel/bma400_spi.c
index 51f23bdc0ea5..ec13c044b304 100644
--- a/drivers/iio/accel/bma400_spi.c
+++ b/drivers/iio/accel/bma400_spi.c
@@ -84,12 +84,7 @@ static int bma400_spi_probe(struct spi_device *spi)
if (ret)
dev_err(&spi->dev, "Failed to read chip id register\n");
- return bma400_probe(&spi->dev, regmap, id->name);
-}
-
-static void bma400_spi_remove(struct spi_device *spi)
-{
- bma400_remove(&spi->dev);
+ return bma400_probe(&spi->dev, regmap, spi->irq, id->name);
}
static const struct spi_device_id bma400_spi_ids[] = {
@@ -110,7 +105,6 @@ static struct spi_driver bma400_spi_driver = {
.of_match_table = bma400_of_spi_match,
},
.probe = bma400_spi_probe,
- .remove = bma400_spi_remove,
.id_table = bma400_spi_ids,
};
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index 8b2728bbcade..bca4cf98bf4d 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -6,6 +6,7 @@
* Copyright (c) 2018-2021, Topic Embedded Products
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -73,6 +74,8 @@
#define BMI088_ACCEL_FIFO_MODE_FIFO 0x40
#define BMI088_ACCEL_FIFO_MODE_STREAM 0x80
+#define BMIO088_ACCEL_ACC_RANGE_MSK GENMASK(1, 0)
+
enum bmi088_accel_axis {
AXIS_X,
AXIS_Y,
@@ -119,12 +122,13 @@ struct bmi088_accel_chip_info {
u8 chip_id;
const struct iio_chan_spec *channels;
int num_channels;
+ const int scale_table[4][2];
};
struct bmi088_accel_data {
struct regmap *regmap;
const struct bmi088_accel_chip_info *chip_info;
- u8 buffer[2] ____cacheline_aligned; /* shared DMA safe buffer */
+ u8 buffer[2] __aligned(IIO_DMA_MINALIGN); /* shared DMA safe buffer */
};
static const struct regmap_range bmi088_volatile_ranges[] = {
@@ -236,6 +240,21 @@ static int bmi088_accel_set_sample_freq(struct bmi088_accel_data *data, int val)
BMI088_ACCEL_MODE_ODR_MASK, regval);
}
+static int bmi088_accel_set_scale(struct bmi088_accel_data *data, int val, int val2)
+{
+ unsigned int i;
+
+ for (i = 0; i < 4; i++)
+ if (val == data->chip_info->scale_table[i][0] &&
+ val2 == data->chip_info->scale_table[i][1])
+ break;
+
+ if (i == 4)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, BMI088_ACCEL_REG_ACC_RANGE, i);
+}
+
static int bmi088_accel_get_temp(struct bmi088_accel_data *data, int *val)
{
int ret;
@@ -280,6 +299,7 @@ static int bmi088_accel_read_raw(struct iio_dev *indio_dev,
struct bmi088_accel_data *data = iio_priv(indio_dev);
struct device *dev = regmap_get_device(data->regmap);
int ret;
+ int reg;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -330,13 +350,14 @@ static int bmi088_accel_read_raw(struct iio_dev *indio_dev,
return ret;
ret = regmap_read(data->regmap,
- BMI088_ACCEL_REG_ACC_RANGE, val);
+ BMI088_ACCEL_REG_ACC_RANGE, &reg);
if (ret)
goto out_read_raw_pm_put;
- *val2 = 15 - (*val & 0x3);
- *val = 3 * 980;
- ret = IIO_VAL_FRACTIONAL_LOG2;
+ reg = FIELD_GET(BMIO088_ACCEL_ACC_RANGE_MSK, reg);
+ *val = data->chip_info->scale_table[reg][0];
+ *val2 = data->chip_info->scale_table[reg][1];
+ ret = IIO_VAL_INT_PLUS_MICRO;
goto out_read_raw_pm_put;
default:
@@ -367,7 +388,14 @@ static int bmi088_accel_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct bmi088_accel_data *data = iio_priv(indio_dev);
+
switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (const int *)data->chip_info->scale_table;
+ *length = 8;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_SAMP_FREQ:
*type = IIO_VAL_INT_PLUS_MICRO;
*vals = bmi088_sample_freqs;
@@ -387,6 +415,15 @@ static int bmi088_accel_write_raw(struct iio_dev *indio_dev,
int ret;
switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = bmi088_accel_set_scale(data, val, val2);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = pm_runtime_resume_and_get(dev);
if (ret)
@@ -408,7 +445,8 @@ static int bmi088_accel_write_raw(struct iio_dev *indio_dev,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = AXIS_##_axis, \
}
@@ -427,11 +465,26 @@ static const struct iio_chan_spec bmi088_accel_channels[] = {
};
static const struct bmi088_accel_chip_info bmi088_accel_chip_info_tbl[] = {
- [0] = {
- .name = "bmi088a",
+ [BOSCH_BMI085] = {
+ .name = "bmi085-accel",
+ .chip_id = 0x1F,
+ .channels = bmi088_accel_channels,
+ .num_channels = ARRAY_SIZE(bmi088_accel_channels),
+ .scale_table = {{0, 598}, {0, 1196}, {0, 2393}, {0, 4785}},
+ },
+ [BOSCH_BMI088] = {
+ .name = "bmi088-accel",
.chip_id = 0x1E,
.channels = bmi088_accel_channels,
.num_channels = ARRAY_SIZE(bmi088_accel_channels),
+ .scale_table = {{0, 897}, {0, 1794}, {0, 3589}, {0, 7178}},
+ },
+ [BOSCH_BMI090L] = {
+ .name = "bmi090l-accel",
+ .chip_id = 0x1A,
+ .channels = bmi088_accel_channels,
+ .num_channels = ARRAY_SIZE(bmi088_accel_channels),
+ .scale_table = {{0, 897}, {0, 1794}, {0, 3589}, {0, 7178}},
},
};
@@ -446,12 +499,15 @@ static const unsigned long bmi088_accel_scan_masks[] = {
0
};
-static int bmi088_accel_chip_init(struct bmi088_accel_data *data)
+static int bmi088_accel_chip_init(struct bmi088_accel_data *data, enum bmi_device_type type)
{
struct device *dev = regmap_get_device(data->regmap);
int ret, i;
unsigned int val;
+ if (type >= BOSCH_UNKNOWN)
+ return -ENODEV;
+
/* Do a dummy read to enable SPI interface, won't harm I2C */
regmap_read(data->regmap, BMI088_ACCEL_REG_INT_STATUS, &val);
@@ -477,22 +533,23 @@ static int bmi088_accel_chip_init(struct bmi088_accel_data *data)
}
/* Validate chip ID */
- for (i = 0; i < ARRAY_SIZE(bmi088_accel_chip_info_tbl); i++) {
- if (bmi088_accel_chip_info_tbl[i].chip_id == val) {
- data->chip_info = &bmi088_accel_chip_info_tbl[i];
+ for (i = 0; i < ARRAY_SIZE(bmi088_accel_chip_info_tbl); i++)
+ if (bmi088_accel_chip_info_tbl[i].chip_id == val)
break;
- }
- }
- if (i == ARRAY_SIZE(bmi088_accel_chip_info_tbl)) {
- dev_err(dev, "Invalid chip %x\n", val);
- return -ENODEV;
- }
+
+ if (i == ARRAY_SIZE(bmi088_accel_chip_info_tbl))
+ data->chip_info = &bmi088_accel_chip_info_tbl[type];
+ else
+ data->chip_info = &bmi088_accel_chip_info_tbl[i];
+
+ if (i != type)
+ dev_warn(dev, "unexpected chip id 0x%X\n", val);
return 0;
}
int bmi088_accel_core_probe(struct device *dev, struct regmap *regmap,
- int irq, const char *name, bool block_supported)
+ int irq, enum bmi_device_type type)
{
struct bmi088_accel_data *data;
struct iio_dev *indio_dev;
@@ -507,13 +564,13 @@ int bmi088_accel_core_probe(struct device *dev, struct regmap *regmap,
data->regmap = regmap;
- ret = bmi088_accel_chip_init(data);
+ ret = bmi088_accel_chip_init(data, type);
if (ret)
return ret;
indio_dev->channels = data->chip_info->channels;
indio_dev->num_channels = data->chip_info->num_channels;
- indio_dev->name = name ? name : data->chip_info->name;
+ indio_dev->name = data->chip_info->name;
indio_dev->available_scan_masks = bmi088_accel_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmi088_accel_info;
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index 167c36cf1eb8..9e2ed3bd5661 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -52,8 +52,8 @@ static int bmi088_accel_probe(struct spi_device *spi)
return PTR_ERR(regmap);
}
- return bmi088_accel_core_probe(&spi->dev, regmap, spi->irq, id->name,
- true);
+ return bmi088_accel_core_probe(&spi->dev, regmap, spi->irq,
+ id->driver_data);
}
static void bmi088_accel_remove(struct spi_device *spi)
@@ -61,8 +61,18 @@ static void bmi088_accel_remove(struct spi_device *spi)
bmi088_accel_core_remove(&spi->dev);
}
+static const struct of_device_id bmi088_of_match[] = {
+ { .compatible = "bosch,bmi085-accel" },
+ { .compatible = "bosch,bmi088-accel" },
+ { .compatible = "bosch,bmi090l-accel" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bmi088_of_match);
+
static const struct spi_device_id bmi088_accel_id[] = {
- {"bmi088-accel", },
+ {"bmi085-accel", BOSCH_BMI085},
+ {"bmi088-accel", BOSCH_BMI088},
+ {"bmi090l-accel", BOSCH_BMI090L},
{}
};
MODULE_DEVICE_TABLE(spi, bmi088_accel_id);
@@ -71,6 +81,7 @@ static struct spi_driver bmi088_accel_driver = {
.driver = {
.name = "bmi088_accel_spi",
.pm = &bmi088_accel_pm_ops,
+ .of_match_table = bmi088_of_match,
},
.probe = bmi088_accel_probe,
.remove = bmi088_accel_remove,
diff --git a/drivers/iio/accel/bmi088-accel.h b/drivers/iio/accel/bmi088-accel.h
index 5d40c7cf1cbc..80cd396a3141 100644
--- a/drivers/iio/accel/bmi088-accel.h
+++ b/drivers/iio/accel/bmi088-accel.h
@@ -8,11 +8,18 @@
struct device;
+enum bmi_device_type {
+ BOSCH_BMI085,
+ BOSCH_BMI088,
+ BOSCH_BMI090L,
+ BOSCH_UNKNOWN,
+};
+
extern const struct regmap_config bmi088_regmap_conf;
extern const struct dev_pm_ops bmi088_accel_pm_ops;
int bmi088_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
- const char *name, bool block_supported);
+ enum bmi_device_type type);
void bmi088_accel_core_remove(struct device *dev);
#endif /* BMI088_ACCEL_H */
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index b6f3471b62dc..0f403342b1fc 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -215,7 +215,7 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
return -ENOMEM;
ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
- cros_ec_sensors_capture, NULL);
+ cros_ec_sensors_capture);
if (ret)
return ret;
@@ -230,12 +230,12 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
indio_dev->channels = cros_ec_accel_legacy_channels;
indio_dev->num_channels = ARRAY_SIZE(cros_ec_accel_legacy_channels);
/* The lid sensor needs to be presented inverted. */
- if (state->loc == MOTIONSENSE_LOC_LID) {
+ if (!strcmp(indio_dev->label, "accel-display")) {
state->sign[CROS_EC_SENSOR_X] = -1;
state->sign[CROS_EC_SENSOR_Z] = -1;
}
- return devm_iio_device_register(dev, indio_dev);
+ return cros_ec_sensors_core_register(dev, indio_dev, NULL);
}
static struct platform_driver cros_ec_accel_platform_driver = {
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index c8dc52f11037..d57f264bd6c8 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -55,7 +55,7 @@ static struct i2c_driver kxsd9_i2c_driver = {
.driver = {
.name = "kxsd9",
.of_match_table = kxsd9_of_match,
- .pm = &kxsd9_dev_pm_ops,
+ .pm = pm_ptr(&kxsd9_dev_pm_ops),
},
.probe = kxsd9_i2c_probe,
.remove = kxsd9_i2c_remove,
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index b7b5af45429e..07f14a9f22c7 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -52,7 +52,7 @@ MODULE_DEVICE_TABLE(of, kxsd9_of_match);
static struct spi_driver kxsd9_spi_driver = {
.driver = {
.name = "kxsd9",
- .pm = &kxsd9_dev_pm_ops,
+ .pm = pm_ptr(&kxsd9_dev_pm_ops),
.of_match_table = kxsd9_of_match,
},
.probe = kxsd9_spi_probe,
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 3975860331a6..ba99649fe195 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -492,7 +492,6 @@ void kxsd9_common_remove(struct device *dev)
}
EXPORT_SYMBOL_NS(kxsd9_common_remove, IIO_KXSD9);
-#ifdef CONFIG_PM
static int kxsd9_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
@@ -508,15 +507,9 @@ static int kxsd9_runtime_resume(struct device *dev)
return kxsd9_power_up(st);
}
-#endif /* CONFIG_PM */
-const struct dev_pm_ops kxsd9_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(kxsd9_runtime_suspend,
- kxsd9_runtime_resume, NULL)
-};
-EXPORT_SYMBOL_NS(kxsd9_dev_pm_ops, IIO_KXSD9);
+EXPORT_NS_RUNTIME_DEV_PM_OPS(kxsd9_dev_pm_ops, kxsd9_runtime_suspend,
+ kxsd9_runtime_resume, NULL, IIO_KXSD9);
MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
MODULE_DESCRIPTION("Kionix KXSD9 driver");
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index 679e69cd7657..c15d16e7f1da 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -157,7 +157,9 @@ static int mc3230_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
- return mc3230_set_opcon(iio_priv(indio_dev), MC3230_MODE_OPCON_STANDBY);
+ mc3230_set_opcon(iio_priv(indio_dev), MC3230_MODE_OPCON_STANDBY);
+
+ return 0;
}
static int mc3230_suspend(struct device *dev)
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 112a5a33c29f..794f2f383303 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -7,8 +7,8 @@
* IIO driver for Freescale MMA7660FC; 7-bit I2C address: 0x4c.
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -255,7 +255,7 @@ static const struct of_device_id mma7660_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mma7660_of_match);
-static const struct acpi_device_id __maybe_unused mma7660_acpi_id[] = {
+static const struct acpi_device_id mma7660_acpi_id[] = {
{"MMA7660", 0},
{}
};
@@ -267,7 +267,7 @@ static struct i2c_driver mma7660_driver = {
.name = "mma7660",
.pm = pm_sleep_ptr(&mma7660_pm_ops),
.of_match_table = mma7660_of_match,
- .acpi_match_table = ACPI_PTR(mma7660_acpi_id),
+ .acpi_match_table = mma7660_acpi_id,
},
.probe = mma7660_probe,
.remove = mma7660_remove,
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 29a68a7d34cd..87c54e41f6cc 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -167,8 +167,8 @@ struct sca3000_state {
int mo_det_use_count;
struct mutex lock;
/* Can these share a cacheline ? */
- u8 rx[384] ____cacheline_aligned;
- u8 tx[6] ____cacheline_aligned;
+ u8 rx[384] __aligned(IIO_DMA_MINALIGN);
+ u8 tx[6] __aligned(IIO_DMA_MINALIGN);
};
/**
@@ -424,7 +424,7 @@ error_ret:
* sca3000_print_rev() - sysfs interface to read the chip revision number
* @indio_dev: Device instance specific generic IIO data.
* Driver specific device instance data can be obtained via
- * via iio_priv(indio_dev)
+ * iio_priv(indio_dev)
*/
static int sca3000_print_rev(struct iio_dev *indio_dev)
{
diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c
index f7ef8ecfd34a..eaa0c9cfda44 100644
--- a/drivers/iio/accel/sca3300.c
+++ b/drivers/iio/accel/sca3300.c
@@ -38,19 +38,35 @@
/* Device ID */
#define SCA3300_REG_WHOAMI 0x10
#define SCA3300_WHOAMI_ID 0x51
+#define SCL3300_WHOAMI_ID 0xC1
/* Device return status and mask */
#define SCA3300_VALUE_RS_ERROR 0x3
#define SCA3300_MASK_RS_STATUS GENMASK(1, 0)
+#define SCL3300_REG_ANG_CTRL 0x0C
+#define SCL3300_ANG_ENABLE 0x1F
+
enum sca3300_scan_indexes {
SCA3300_ACC_X = 0,
SCA3300_ACC_Y,
SCA3300_ACC_Z,
SCA3300_TEMP,
- SCA3300_TIMESTAMP,
+ SCA3300_INCLI_X,
+ SCA3300_INCLI_Y,
+ SCA3300_INCLI_Z,
+ SCA3300_SCAN_MAX
};
+/*
+ * Buffer size max case:
+ * Three accel channels, two bytes per channel.
+ * Temperature channel, two bytes.
+ * Three incli channels, two bytes per channel.
+ * Timestamp channel, eight bytes.
+ */
+#define SCA3300_MAX_BUFFER_SIZE (ALIGN(sizeof(s16) * SCA3300_SCAN_MAX, sizeof(s64)) + sizeof(s64))
+
#define SCA3300_ACCEL_CHANNEL(index, reg, axis) { \
.type = IIO_ACCEL, \
.address = reg, \
@@ -72,27 +88,72 @@ enum sca3300_scan_indexes {
}, \
}
+#define SCA3300_INCLI_CHANNEL(index, reg, axis) { \
+ .type = IIO_INCLI, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define SCA3300_TEMP_CHANNEL(index, reg) { \
+ .type = IIO_TEMP, \
+ .address = reg, \
+ .scan_index = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
static const struct iio_chan_spec sca3300_channels[] = {
SCA3300_ACCEL_CHANNEL(SCA3300_ACC_X, 0x1, X),
SCA3300_ACCEL_CHANNEL(SCA3300_ACC_Y, 0x2, Y),
SCA3300_ACCEL_CHANNEL(SCA3300_ACC_Z, 0x3, Z),
- {
- .type = IIO_TEMP,
- .address = 0x5,
- .scan_index = SCA3300_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .scan_type = {
- .sign = 's',
- .realbits = 16,
- .storagebits = 16,
- .endianness = IIO_CPU,
- },
- },
+ SCA3300_TEMP_CHANNEL(SCA3300_TEMP, 0x05),
IIO_CHAN_SOFT_TIMESTAMP(4),
};
-static const int sca3300_lp_freq[] = {70, 70, 70, 10};
-static const int sca3300_accel_scale[][2] = {{0, 370}, {0, 741}, {0, 185}, {0, 185}};
+static const int sca3300_lp_freq[] = {70, 10};
+static const int sca3300_lp_freq_map[] = {0, 0, 0, 1};
+
+static const int scl3300_lp_freq[] = {40, 70, 10};
+static const int scl3300_lp_freq_map[] = {0, 1, 2};
+
+static const int sca3300_accel_scale[][2] = {{0, 370}, {0, 741}, {0, 185}};
+static const int sca3300_accel_scale_map[] = {0, 1, 2, 2};
+
+static const int scl3300_accel_scale[][2] = {{0, 167}, {0, 333}, {0, 83}};
+static const int scl3300_accel_scale_map[] = {0, 1, 2};
+
+static const int scl3300_incli_scale[][2] = {{0, 5495}};
+static const int scl3300_incli_scale_map[] = {0, 0, 0};
+
+static const int sca3300_avail_modes_map[] = {0, 1, 2, 3};
+static const int scl3300_avail_modes_map[] = {0, 1, 3};
+
+static const struct iio_chan_spec scl3300_channels[] = {
+ SCA3300_ACCEL_CHANNEL(SCA3300_ACC_X, 0x1, X),
+ SCA3300_ACCEL_CHANNEL(SCA3300_ACC_Y, 0x2, Y),
+ SCA3300_ACCEL_CHANNEL(SCA3300_ACC_Z, 0x3, Z),
+ SCA3300_TEMP_CHANNEL(SCA3300_TEMP, 0x05),
+ SCA3300_INCLI_CHANNEL(SCA3300_INCLI_X, 0x09, X),
+ SCA3300_INCLI_CHANNEL(SCA3300_INCLI_Y, 0x0A, Y),
+ SCA3300_INCLI_CHANNEL(SCA3300_INCLI_Z, 0x0B, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(7),
+};
static const unsigned long sca3300_scan_masks[] = {
BIT(SCA3300_ACC_X) | BIT(SCA3300_ACC_Y) | BIT(SCA3300_ACC_Z) |
@@ -100,25 +161,91 @@ static const unsigned long sca3300_scan_masks[] = {
0
};
+static const unsigned long scl3300_scan_masks[] = {
+ BIT(SCA3300_ACC_X) | BIT(SCA3300_ACC_Y) | BIT(SCA3300_ACC_Z) |
+ BIT(SCA3300_TEMP) |
+ BIT(SCA3300_INCLI_X) | BIT(SCA3300_INCLI_Y) | BIT(SCA3300_INCLI_Z),
+ 0
+};
+
+struct sca3300_chip_info {
+ const char *name;
+ const unsigned long *scan_masks;
+ const struct iio_chan_spec *channels;
+ u8 num_channels;
+ u8 num_accel_scales;
+ const int (*accel_scale)[2];
+ const int *accel_scale_map;
+ const int (*incli_scale)[2];
+ const int *incli_scale_map;
+ u8 num_incli_scales;
+ u8 num_freqs;
+ const int *freq_table;
+ const int *freq_map;
+ const int *avail_modes_table;
+ u8 num_avail_modes;
+ u8 chip_id;
+ bool angle_supported;
+};
+
/**
* struct sca3300_data - device data
* @spi: SPI device structure
* @lock: Data buffer lock
- * @scan: Triggered buffer. Four channel 16-bit data + 64-bit timestamp
+ * @chip: Sensor chip specific information
+ * @buffer: Triggered buffer:
+ * -SCA3300: 4 channel 16-bit data + 64-bit timestamp
+ * -SCL3300: 7 channel 16-bit data + 64-bit timestamp
* @txbuf: Transmit buffer
* @rxbuf: Receive buffer
*/
struct sca3300_data {
struct spi_device *spi;
struct mutex lock;
- struct {
- s16 channels[4];
- s64 ts __aligned(sizeof(s64));
- } scan;
- u8 txbuf[4] ____cacheline_aligned;
+ const struct sca3300_chip_info *chip;
+ u8 buffer[SCA3300_MAX_BUFFER_SIZE] __aligned(sizeof(s64));
+ u8 txbuf[4] __aligned(IIO_DMA_MINALIGN);
u8 rxbuf[4];
};
+static const struct sca3300_chip_info sca3300_chip_tbl[] = {
+ {
+ .name = "sca3300",
+ .scan_masks = sca3300_scan_masks,
+ .channels = sca3300_channels,
+ .num_channels = ARRAY_SIZE(sca3300_channels),
+ .num_accel_scales = ARRAY_SIZE(sca3300_accel_scale)*2,
+ .accel_scale = sca3300_accel_scale,
+ .accel_scale_map = sca3300_accel_scale_map,
+ .num_freqs = ARRAY_SIZE(sca3300_lp_freq),
+ .freq_table = sca3300_lp_freq,
+ .freq_map = sca3300_lp_freq_map,
+ .avail_modes_table = sca3300_avail_modes_map,
+ .num_avail_modes = 4,
+ .chip_id = SCA3300_WHOAMI_ID,
+ .angle_supported = false,
+ },
+ {
+ .name = "scl3300",
+ .scan_masks = scl3300_scan_masks,
+ .channels = scl3300_channels,
+ .num_channels = ARRAY_SIZE(scl3300_channels),
+ .num_accel_scales = ARRAY_SIZE(scl3300_accel_scale)*2,
+ .accel_scale = scl3300_accel_scale,
+ .accel_scale_map = scl3300_accel_scale_map,
+ .incli_scale = scl3300_incli_scale,
+ .incli_scale_map = scl3300_incli_scale_map,
+ .num_incli_scales = ARRAY_SIZE(scl3300_incli_scale)*2,
+ .num_freqs = ARRAY_SIZE(scl3300_lp_freq),
+ .freq_table = scl3300_lp_freq,
+ .freq_map = scl3300_lp_freq_map,
+ .avail_modes_table = scl3300_avail_modes_map,
+ .num_avail_modes = 3,
+ .chip_id = SCL3300_WHOAMI_ID,
+ .angle_supported = true,
+ },
+};
+
DECLARE_CRC8_TABLE(sca3300_crc_table);
static int sca3300_transfer(struct sca3300_data *sca_data, int *val)
@@ -225,36 +352,91 @@ static int sca3300_write_reg(struct sca3300_data *sca_data, u8 reg, int val)
return sca3300_error_handler(sca_data);
}
+static int sca3300_set_op_mode(struct sca3300_data *sca_data, int index)
+{
+ if ((index < 0) || (index >= sca_data->chip->num_avail_modes))
+ return -EINVAL;
+
+ return sca3300_write_reg(sca_data, SCA3300_REG_MODE,
+ sca_data->chip->avail_modes_table[index]);
+}
+
+static int sca3300_get_op_mode(struct sca3300_data *sca_data, int *index)
+{
+ int reg_val;
+ int ret;
+ int i;
+
+ ret = sca3300_read_reg(sca_data, SCA3300_REG_MODE, &reg_val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < sca_data->chip->num_avail_modes; i++) {
+ if (sca_data->chip->avail_modes_table[i] == reg_val)
+ break;
+ }
+ if (i == sca_data->chip->num_avail_modes)
+ return -EINVAL;
+
+ *index = i;
+ return 0;
+}
+
+static int sca3300_set_frequency(struct sca3300_data *data, int val)
+{
+ const struct sca3300_chip_info *chip = data->chip;
+ unsigned int index;
+ int *opmode_scale;
+ int *new_scale;
+ unsigned int i;
+
+ if (sca3300_get_op_mode(data, &index))
+ return -EINVAL;
+
+ /*
+ * Find a mode in which the requested sampling frequency is available
+ * and the scaling currently set is retained.
+ */
+ opmode_scale = (int *)chip->accel_scale[chip->accel_scale_map[index]];
+ for (i = 0; i < chip->num_avail_modes; i++) {
+ new_scale = (int *)chip->accel_scale[chip->accel_scale_map[i]];
+ if ((val == chip->freq_table[chip->freq_map[i]]) &&
+ (opmode_scale[1] == new_scale[1]) &&
+ (opmode_scale[0] == new_scale[0]))
+ break;
+ }
+ if (i == chip->num_avail_modes)
+ return -EINVAL;
+
+ return sca3300_set_op_mode(data, i);
+}
+
static int sca3300_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct sca3300_data *data = iio_priv(indio_dev);
- int reg_val;
- int ret;
+ int index;
int i;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- if (val)
+ if (chan->type != IIO_ACCEL)
return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(sca3300_accel_scale); i++) {
- if (val2 == sca3300_accel_scale[i][1])
- return sca3300_write_reg(data, SCA3300_REG_MODE, i);
+ /*
+ * Letting scale take priority over sampling frequency.
+ * That makes sense given we can only ever end up increasing
+ * the sampling frequency which is unlikely to be a problem.
+ */
+ for (i = 0; i < data->chip->num_avail_modes; i++) {
+ index = data->chip->accel_scale_map[i];
+ if ((val == data->chip->accel_scale[index][0]) &&
+ (val2 == data->chip->accel_scale[index][1]))
+ return sca3300_set_op_mode(data, i);
}
return -EINVAL;
-
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- ret = sca3300_read_reg(data, SCA3300_REG_MODE, &reg_val);
- if (ret)
- return ret;
- /* freq. change is possible only for mode 3 and 4 */
- if (reg_val == 2 && val == sca3300_lp_freq[3])
- return sca3300_write_reg(data, SCA3300_REG_MODE, 3);
- if (reg_val == 3 && val == sca3300_lp_freq[2])
- return sca3300_write_reg(data, SCA3300_REG_MODE, 2);
- return -EINVAL;
+ return sca3300_set_frequency(data, val);
default:
return -EINVAL;
}
@@ -265,8 +447,8 @@ static int sca3300_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sca3300_data *data = iio_priv(indio_dev);
+ int index;
int ret;
- int reg_val;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -275,17 +457,29 @@ static int sca3300_read_raw(struct iio_dev *indio_dev,
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- ret = sca3300_read_reg(data, SCA3300_REG_MODE, &reg_val);
+ ret = sca3300_get_op_mode(data, &index);
if (ret)
return ret;
- *val = 0;
- *val2 = sca3300_accel_scale[reg_val][1];
- return IIO_VAL_INT_PLUS_MICRO;
+ switch (chan->type) {
+ case IIO_INCLI:
+ index = data->chip->incli_scale_map[index];
+ *val = data->chip->incli_scale[index][0];
+ *val2 = data->chip->incli_scale[index][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ index = data->chip->accel_scale_map[index];
+ *val = data->chip->accel_scale[index][0];
+ *val2 = data->chip->accel_scale[index][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- ret = sca3300_read_reg(data, SCA3300_REG_MODE, &reg_val);
+ ret = sca3300_get_op_mode(data, &index);
if (ret)
return ret;
- *val = sca3300_lp_freq[reg_val];
+ index = data->chip->freq_map[index];
+ *val = data->chip->freq_table[index];
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -298,21 +492,21 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct sca3300_data *data = iio_priv(indio_dev);
int bit, ret, val, i = 0;
+ s16 *channels = (s16 *)data->buffer;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
- ret = sca3300_read_reg(data, sca3300_channels[bit].address,
- &val);
+ ret = sca3300_read_reg(data, indio_dev->channels[bit].address, &val);
if (ret) {
dev_err_ratelimited(&data->spi->dev,
"failed to read register, error: %d\n", ret);
/* handled, but bailing out due to errors */
goto out;
}
- data->scan.channels[i++] = val;
+ channels[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -329,6 +523,7 @@ static int sca3300_init(struct sca3300_data *sca_data,
{
int value = 0;
int ret;
+ int i;
ret = sca3300_write_reg(sca_data, SCA3300_REG_MODE,
SCA3300_MODE_SW_RESET);
@@ -337,20 +532,33 @@ static int sca3300_init(struct sca3300_data *sca_data,
/*
* Wait 1ms after SW-reset command.
- * Wait 15ms for settling of signal paths.
+ * Wait for the settling of signal paths,
+ * 15ms for SCA3300 and 25ms for SCL3300,
*/
- usleep_range(16e3, 50e3);
+ usleep_range(26e3, 50e3);
ret = sca3300_read_reg(sca_data, SCA3300_REG_WHOAMI, &value);
if (ret)
return ret;
- if (value != SCA3300_WHOAMI_ID) {
- dev_err(&sca_data->spi->dev,
- "device id not expected value, %d != %u\n",
- value, SCA3300_WHOAMI_ID);
+ for (i = 0; i < ARRAY_SIZE(sca3300_chip_tbl); i++) {
+ if (sca3300_chip_tbl[i].chip_id == value)
+ break;
+ }
+ if (i == ARRAY_SIZE(sca3300_chip_tbl)) {
+ dev_err(&sca_data->spi->dev, "unknown chip id %x\n", value);
return -ENODEV;
}
+
+ sca_data->chip = &sca3300_chip_tbl[i];
+
+ if (sca_data->chip->angle_supported) {
+ ret = sca3300_write_reg(sca_data, SCL3300_REG_ANG_CTRL,
+ SCL3300_ANG_ENABLE);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -382,15 +590,26 @@ static int sca3300_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct sca3300_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- *vals = (const int *)sca3300_accel_scale;
- *length = ARRAY_SIZE(sca3300_accel_scale) * 2 - 2;
- *type = IIO_VAL_INT_PLUS_MICRO;
- return IIO_AVAIL_LIST;
+ switch (chan->type) {
+ case IIO_INCLI:
+ *vals = (const int *)data->chip->incli_scale;
+ *length = data->chip->num_incli_scales;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ case IIO_ACCEL:
+ *vals = (const int *)data->chip->accel_scale;
+ *length = data->chip->num_accel_scales;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- *vals = &sca3300_lp_freq[2];
- *length = 2;
+ *vals = (const int *)data->chip->freq_table;
+ *length = data->chip->num_freqs;
*type = IIO_VAL_INT;
return IIO_AVAIL_LIST;
default:
@@ -422,11 +641,6 @@ static int sca3300_probe(struct spi_device *spi)
crc8_populate_msb(sca3300_crc_table, SCA3300_CRC8_POLYNOMIAL);
indio_dev->info = &sca3300_info;
- indio_dev->name = SCA3300_ALIAS;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = sca3300_channels;
- indio_dev->num_channels = ARRAY_SIZE(sca3300_channels);
- indio_dev->available_scan_masks = sca3300_scan_masks;
ret = sca3300_init(sca_data, indio_dev);
if (ret) {
@@ -434,6 +648,12 @@ static int sca3300_probe(struct spi_device *spi)
return ret;
}
+ indio_dev->name = sca_data->chip->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = sca_data->chip->channels;
+ indio_dev->num_channels = sca_data->chip->num_channels;
+ indio_dev->available_scan_masks = sca_data->chip->scan_masks;
+
ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
iio_pollfunc_store_time,
sca3300_trigger_handler, NULL);
@@ -454,6 +674,7 @@ static int sca3300_probe(struct spi_device *spi)
static const struct of_device_id sca3300_dt_ids[] = {
{ .compatible = "murata,sca3300"},
+ { .compatible = "murata,scl3300"},
{}
};
MODULE_DEVICE_TABLE(of, sca3300_dt_ids);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index a71dfff3ca4a..ceca28913355 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -608,7 +608,9 @@ static int stk8312_remove(struct i2c_client *client)
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
- return stk8312_set_mode(data, STK8312_MODE_STANDBY);
+ stk8312_set_mode(data, STK8312_MODE_STANDBY);
+
+ return 0;
}
static int stk8312_suspend(struct device *dev)
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 0067ec5cbae8..7d59efb41e22 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -501,7 +501,9 @@ static int stk8ba50_remove(struct i2c_client *client)
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
- return stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+ stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+
+ return 0;
}
static int stk8ba50_suspend(struct device *dev)
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 48ace7412874..7fe5930891e0 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -417,7 +417,6 @@ config DLN2_ADC
config ENVELOPE_DETECTOR
tristate "Envelope detector using a DAC and a comparator"
- depends on OF
help
Say yes here to build support for an envelope detector using a DAC
and a comparator.
@@ -563,7 +562,7 @@ config LP8788_ADC
config LPC18XX_ADC
tristate "NXP LPC18xx ADC driver"
depends on ARCH_LPC18XX || COMPILE_TEST
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
help
Say yes here to build support for NXP LPC18XX ADC.
@@ -823,6 +822,18 @@ config QCOM_PM8XXX_XOADC
To compile this driver as a module, choose M here: the module
will be called qcom-pm8xxx-xoadc.
+config QCOM_SPMI_RRADC
+ tristate "Qualcomm SPMI RRADC"
+ depends on MFD_SPMI_PMIC
+ help
+ This is for the PMIC Round Robin ADC driver.
+
+ This driver exposes the battery ID resistor, battery thermal, PMIC die
+ temperature, charger USB in and DC in voltage and current.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-qpmi-rradc.
+
config QCOM_SPMI_IADC
tristate "Qualcomm SPMI PMIC current ADC"
depends on SPMI
@@ -941,7 +952,6 @@ config SPEAR_ADC
config SD_ADC_MODULATOR
tristate "Generic sigma delta modulator"
- depends on OF
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -1259,7 +1269,6 @@ config TWL6030_GPADC
config VF610_ADC
tristate "Freescale vf610 ADC driver"
- depends on OF
depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 39d806f6d457..1772a549a3c8 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_SPMI_ADC5) += qcom-spmi-adc5.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
+obj-$(CONFIG_QCOM_SPMI_RRADC) += qcom-spmi-rradc.o
obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index f20d39f0bc01..468c2656d2be 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -37,7 +37,7 @@ struct ad7266_state {
struct gpio_desc *gpios[3];
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
* The buffer needs to be large enough to hold two samples (4 bytes) and
* the naturally aligned timestamp (8 bytes).
@@ -45,7 +45,7 @@ struct ad7266_state {
struct {
__be16 sample[2];
s64 timestamp;
- } data ____cacheline_aligned;
+ } data __aligned(IIO_DMA_MINALIGN);
};
static int ad7266_wakeup(struct ad7266_state *st)
diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c
index 3bdf3d9422f2..d4a4e15c8244 100644
--- a/drivers/iio/adc/ad7280a.c
+++ b/drivers/iio/adc/ad7280a.c
@@ -183,7 +183,7 @@ struct ad7280_state {
unsigned char cb_mask[AD7280A_MAX_CHAIN];
struct mutex lock; /* protect sensor state */
- __be32 tx ____cacheline_aligned;
+ __be32 tx __aligned(IIO_DMA_MINALIGN);
__be32 rx;
};
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index 3271a31afde1..92c68d467c50 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -80,7 +80,7 @@ struct ad7292_state {
struct regulator *reg;
unsigned short vref_mv;
- __be16 d16 ____cacheline_aligned;
+ __be16 d16 __aligned(IIO_DMA_MINALIGN);
u8 d8[2];
};
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index 3f4e73f7d35a..c0430f71f592 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -49,7 +49,7 @@ struct ad7298_state {
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
- __be16 rx_buf[12] ____cacheline_aligned;
+ __be16 rx_buf[12] __aligned(IIO_DMA_MINALIGN);
__be16 tx_buf[2];
};
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index a1e8b32671cf..94776f696290 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -44,13 +44,12 @@ struct ad7476_state {
struct spi_transfer xfer;
struct spi_message msg;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
* Make the buffer large enough for one 16 bit sample and one 64 bit
* aligned 64 bit timestamp.
*/
- unsigned char data[ALIGN(2, sizeof(s64)) + sizeof(s64)]
- ____cacheline_aligned;
+ unsigned char data[ALIGN(2, sizeof(s64)) + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
};
enum ad7476_supported_device_ids {
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index 3b193dc26438..ba24f99523e0 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 4f82d7c9acfd..2dc4f599f9df 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -116,11 +116,11 @@ struct ad7606_state {
struct completion completion;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
* 16 * 16-bit samples + 64-bit timestamp
*/
- unsigned short data[20] ____cacheline_aligned;
+ unsigned short data[20] __aligned(IIO_DMA_MINALIGN);
__be16 d16[2];
};
diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index 8888e56b5e90..b912b4df9b56 100644
--- a/drivers/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -5,6 +5,7 @@
* Copyright 2011 Analog Devices Inc.
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
index 51ee9482e0df..3079a0872947 100644
--- a/drivers/iio/adc/ad7766.c
+++ b/drivers/iio/adc/ad7766.c
@@ -45,13 +45,12 @@ struct ad7766 {
struct spi_message msg;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
* Make the buffer large enough for one 24 bit sample and one 64 bit
* aligned 64 bit timestamp.
*/
- unsigned char data[ALIGN(3, sizeof(s64)) + sizeof(s64)]
- ____cacheline_aligned;
+ unsigned char data[ALIGN(3, sizeof(s64)) + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
};
/*
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index aa42ba759fa1..652db768ef37 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -163,7 +163,7 @@ struct ad7768_state {
struct gpio_desc *gpio_sync_in;
const char *labels[ARRAY_SIZE(ad7768_channels)];
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
union {
@@ -173,7 +173,7 @@ struct ad7768_state {
} scan;
__be32 d32;
u8 d8[2];
- } data ____cacheline_aligned;
+ } data __aligned(IIO_DMA_MINALIGN);
};
static int ad7768_spi_reg_read(struct ad7768_state *st, unsigned int addr,
@@ -620,7 +620,7 @@ static int ad7768_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(ad7768_channels);
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad7768_info;
- indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_TRIGGERED;
+ indio_dev->modes = INDIO_DIRECT_MODE;
ret = ad7768_setup(st);
if (ret < 0) {
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index f64999714a4d..965bdc8aa696 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -66,13 +66,12 @@ struct ad7887_state {
unsigned char tx_cmd_buf[4];
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
* Buffer needs to be large enough to hold two 16 bit samples and a
* 64 bit aligned 64 bit timestamp.
*/
- unsigned char data[ALIGN(4, sizeof(s64)) + sizeof(s64)]
- ____cacheline_aligned;
+ unsigned char data[ALIGN(4, sizeof(s64)) + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
};
enum ad7887_supported_device_ids {
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 069b561ee768..edad1f30121d 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -57,12 +57,12 @@ struct ad7923_state {
unsigned int settings;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
* Ensure rx_buf can be directly used in iio_push_to_buffers_with_timetamp
* Length = 8 channels + 4 extra for 8 byte timestamp
*/
- __be16 rx_buf[12] ____cacheline_aligned;
+ __be16 rx_buf[12] __aligned(IIO_DMA_MINALIGN);
__be16 tx_buf[4];
};
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index 44bb5fde83de..edd0c3a35ab7 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -86,7 +86,7 @@ struct ad7949_adc_chip {
u8 resolution;
u16 cfg;
unsigned int current_channel;
- u16 buffer ____cacheline_aligned;
+ u16 buffer __aligned(IIO_DMA_MINALIGN);
__be16 buf8b;
};
@@ -400,7 +400,7 @@ static int ad7949_spi_probe(struct spi_device *spi)
ret = ad7949_spi_init(ad7949_adc);
if (ret) {
- dev_err(dev, "enable to init this device: %d\n", ret);
+ dev_err(dev, "fail to init this device: %d\n", ret);
return ret;
}
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 220228c375d3..262bd7665b33 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -896,7 +896,7 @@ static int ad799x_remove(struct i2c_client *client)
return 0;
}
-static int __maybe_unused ad799x_suspend(struct device *dev)
+static int ad799x_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct ad799x_state *st = iio_priv(indio_dev);
@@ -908,7 +908,7 @@ static int __maybe_unused ad799x_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ad799x_resume(struct device *dev)
+static int ad799x_resume(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct ad799x_state *st = iio_priv(indio_dev);
@@ -941,7 +941,7 @@ static int __maybe_unused ad799x_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(ad799x_pm_ops, ad799x_suspend, ad799x_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ad799x_pm_ops, ad799x_suspend, ad799x_resume);
static const struct i2c_device_id ad799x_id[] = {
{ "ad7991", ad7991 },
@@ -960,7 +960,7 @@ MODULE_DEVICE_TABLE(i2c, ad799x_id);
static struct i2c_driver ad799x_driver = {
.driver = {
.name = "ad799x",
- .pm = &ad799x_pm_ops,
+ .pm = pm_sleep_ptr(&ad799x_pm_ops),
},
.probe = ad799x_probe,
.remove = ad799x_remove,
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index dbfc8517cb8a..5a5f33f7bc8f 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -474,3 +474,4 @@ module_spi_driver(ad9467_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD9467 ADC driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_ADI_AXI);
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index a9e655e69eaa..e8a8ea4140f1 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -84,9 +84,10 @@ void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv)
{
struct adi_axi_adc_client *cl = conv_to_client(conv);
- return (char *)cl + ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN);
+ return (char *)cl + ALIGN(sizeof(struct adi_axi_adc_client),
+ IIO_DMA_MINALIGN);
}
-EXPORT_SYMBOL_GPL(adi_axi_adc_conv_priv);
+EXPORT_SYMBOL_NS_GPL(adi_axi_adc_conv_priv, IIO_ADI_AXI);
static void adi_axi_adc_write(struct adi_axi_adc_state *st,
unsigned int reg,
@@ -169,9 +170,9 @@ static struct adi_axi_adc_conv *adi_axi_adc_conv_register(struct device *dev,
struct adi_axi_adc_client *cl;
size_t alloc_size;
- alloc_size = ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN);
+ alloc_size = ALIGN(sizeof(struct adi_axi_adc_client), IIO_DMA_MINALIGN);
if (sizeof_priv)
- alloc_size += ALIGN(sizeof_priv, IIO_ALIGN);
+ alloc_size += ALIGN(sizeof_priv, IIO_DMA_MINALIGN);
cl = kzalloc(alloc_size, GFP_KERNEL);
if (!cl)
@@ -224,7 +225,7 @@ struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
return conv;
}
-EXPORT_SYMBOL_GPL(devm_adi_axi_adc_conv_register);
+EXPORT_SYMBOL_NS_GPL(devm_adi_axi_adc_conv_register, IIO_ADI_AXI);
static ssize_t in_voltage_scale_available_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index b764823ce57e..279430c1d88c 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1752,7 +1752,7 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
int ret;
if (val > AT91_HWFIFO_MAX_SIZE)
- return -EINVAL;
+ val = AT91_HWFIFO_MAX_SIZE;
if (!st->selected_trig->hw_trig) {
dev_dbg(&indio_dev->dev, "we need hw trigger for DMA\n");
@@ -2103,7 +2103,7 @@ static int at91_adc_remove(struct platform_device *pdev)
return 0;
}
-static __maybe_unused int at91_adc_suspend(struct device *dev)
+static int at91_adc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
@@ -2123,7 +2123,7 @@ static __maybe_unused int at91_adc_suspend(struct device *dev)
return pinctrl_pm_select_sleep_state(dev);
}
-static __maybe_unused int at91_adc_resume(struct device *dev)
+static int at91_adc_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
@@ -2169,7 +2169,8 @@ resume_failed:
return ret;
}
-static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend,
+ at91_adc_resume);
static const struct of_device_id at91_adc_dt_match[] = {
{
@@ -2190,7 +2191,7 @@ static struct platform_driver at91_adc_driver = {
.driver = {
.name = "at91-sama5d2_adc",
.of_match_table = at91_adc_dt_match,
- .pm = &at91_adc_pm_ops,
+ .pm = pm_sleep_ptr(&at91_adc_pm_ops),
},
};
module_platform_driver(at91_adc_driver)
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 03987d7e6b3d..3d2e8b4db61a 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -15,7 +15,9 @@
#include <linux/iio/machine.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 8eb0140df133..771fa12bdc02 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -49,7 +49,7 @@ struct hi8435_priv {
unsigned threshold_lo[2]; /* GND-Open and Supply-Open thresholds */
unsigned threshold_hi[2]; /* GND-Open and Supply-Open thresholds */
- u8 reg_buffer[3] ____cacheline_aligned;
+ u8 reg_buffer[3] __aligned(IIO_DMA_MINALIGN);
};
static int hi8435_readb(struct hi8435_priv *priv, u8 reg, u8 *val)
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 12f5b8e34c84..86caff1d006b 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -539,14 +540,15 @@ static int imx7d_adc_probe(struct platform_device *pdev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(imx7d_adc_pm_ops, imx7d_adc_disable, imx7d_adc_enable);
+static DEFINE_SIMPLE_DEV_PM_OPS(imx7d_adc_pm_ops, imx7d_adc_disable,
+ imx7d_adc_enable);
static struct platform_driver imx7d_adc_driver = {
.probe = imx7d_adc_probe,
.driver = {
.name = "imx7d_adc",
.of_match_table = imx7d_adc_match,
- .pm = &imx7d_adc_pm_ops,
+ .pm = pm_sleep_ptr(&imx7d_adc_pm_ops),
},
};
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index 901dd8e1b32f..e48446784a0a 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -416,7 +417,7 @@ static int imx8qxp_adc_remove(struct platform_device *pdev)
return 0;
}
-static __maybe_unused int imx8qxp_adc_runtime_suspend(struct device *dev)
+static int imx8qxp_adc_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct imx8qxp_adc *adc = iio_priv(indio_dev);
@@ -430,7 +431,7 @@ static __maybe_unused int imx8qxp_adc_runtime_suspend(struct device *dev)
return 0;
}
-static __maybe_unused int imx8qxp_adc_runtime_resume(struct device *dev)
+static int imx8qxp_adc_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct imx8qxp_adc *adc = iio_priv(indio_dev);
@@ -467,10 +468,9 @@ err_disable_reg:
return ret;
}
-static const struct dev_pm_ops imx8qxp_adc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(imx8qxp_adc_runtime_suspend, imx8qxp_adc_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(imx8qxp_adc_pm_ops,
+ imx8qxp_adc_runtime_suspend,
+ imx8qxp_adc_runtime_resume, NULL);
static const struct of_device_id imx8qxp_adc_match[] = {
{ .compatible = "nxp,imx8qxp-adc", },
@@ -484,7 +484,7 @@ static struct platform_driver imx8qxp_adc_driver = {
.driver = {
.name = ADC_DRIVER_NAME,
.of_match_table = imx8qxp_adc_match,
- .pm = &imx8qxp_adc_pm_ops,
+ .pm = pm_ptr(&imx8qxp_adc_pm_ops),
},
};
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index abad16803849..240e6c420701 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -1038,12 +1038,18 @@ static int ina2xx_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
+ int ret;
iio_device_unregister(indio_dev);
/* Powerdown */
- return regmap_update_bits(chip->regmap, INA2XX_CONFIG,
- INA2XX_MODE_MASK, 0);
+ ret = regmap_update_bits(chip->regmap, INA2XX_CONFIG,
+ INA2XX_MODE_MASK, 0);
+ if (ret)
+ dev_warn(&client->dev, "Failed to power down device (%pe)\n",
+ ERR_PTR(ret));
+
+ return 0;
}
static const struct i2c_device_id ina2xx_id[] = {
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index 2b3912c6ca6b..bf5c03c34f84 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -17,7 +17,9 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#define JZ_ADC_REG_ENABLE 0x00
#define JZ_ADC_REG_CFG 0x04
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
index 616de0c3a049..7263ad76124d 100644
--- a/drivers/iio/adc/intel_mrfld_adc.c
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index ae9c9384f23e..42e6cd6fa6f7 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -17,10 +17,9 @@
#include <linux/iio/driver.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
index 5a55f79f2574..dfb3bb5997e5 100644
--- a/drivers/iio/adc/ltc2496.c
+++ b/drivers/iio/adc/ltc2496.c
@@ -24,10 +24,10 @@ struct ltc2496_driverdata {
struct spi_device *spi;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- unsigned char rxbuf[3] ____cacheline_aligned;
+ unsigned char rxbuf[3] __aligned(IIO_DMA_MINALIGN);
unsigned char txbuf[3];
};
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 1adddf5a88a9..f7c786f37ceb 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -20,10 +20,10 @@ struct ltc2497_driverdata {
struct ltc2497core_driverdata common_ddata;
struct i2c_client *client;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- __be32 buf ____cacheline_aligned;
+ __be32 buf __aligned(IIO_DMA_MINALIGN);
};
static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 4daf1d576c4e..136fcf753837 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -272,7 +272,7 @@ struct max1027_state {
struct mutex lock;
struct completion complete;
- u8 reg ____cacheline_aligned;
+ u8 reg __aligned(IIO_DMA_MINALIGN);
};
static int max1027_wait_eoc(struct iio_dev *indio_dev)
@@ -349,8 +349,7 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
if (ret < 0) {
dev_err(&indio_dev->dev,
"Failed to configure conversion register\n");
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ goto release;
}
/*
@@ -360,11 +359,12 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
*/
ret = max1027_wait_eoc(indio_dev);
if (ret)
- return ret;
+ goto release;
/* Read result */
ret = spi_read(st->spi, st->buffer, (chan->type == IIO_TEMP) ? 4 : 2);
+release:
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
diff --git a/drivers/iio/adc/max11100.c b/drivers/iio/adc/max11100.c
index eb1ce6a0315c..49e38dca8fe2 100644
--- a/drivers/iio/adc/max11100.c
+++ b/drivers/iio/adc/max11100.c
@@ -33,10 +33,10 @@ struct max11100_state {
struct spi_device *spi;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- u8 buffer[3] ____cacheline_aligned;
+ u8 buffer[3] __aligned(IIO_DMA_MINALIGN);
};
static const struct iio_chan_spec max11100_channels[] = {
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index a41bc570be21..75ab57d9aef7 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -42,7 +42,7 @@ struct max1118 {
s64 ts __aligned(8);
} scan;
- u8 data ____cacheline_aligned;
+ u8 data __aligned(IIO_DMA_MINALIGN);
};
#define MAX1118_CHANNEL(ch) \
diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c
index a5afd84af58b..a815ad1f6913 100644
--- a/drivers/iio/adc/max1241.c
+++ b/drivers/iio/adc/max1241.c
@@ -26,7 +26,7 @@ struct max1241 {
struct regulator *vref;
struct gpio_desc *shutdown;
- __be16 data ____cacheline_aligned;
+ __be16 data __aligned(IIO_DMA_MINALIGN);
};
static const struct iio_chan_spec max1241_channels[] = {
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index b4c69acb33e3..f3b81798b3c9 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -92,7 +92,7 @@ struct mcp320x {
struct mutex lock;
const struct mcp320x_chip_info *chip_info;
- u8 tx_buf ____cacheline_aligned;
+ u8 tx_buf __aligned(IIO_DMA_MINALIGN);
u8 rx_buf[4];
};
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 62cc6fb0ef85..1a68b099d323 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -322,22 +322,17 @@ static int meson_sar_adc_calib_val(struct iio_dev *indio_dev, int val)
static int meson_sar_adc_wait_busy_clear(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- int regval, timeout = 10000;
+ int val;
/*
* NOTE: we need a small delay before reading the status, otherwise
* the sample engine may not have started internally (which would
* seem to us that sampling is already finished).
*/
- do {
- udelay(1);
- regmap_read(priv->regmap, MESON_SAR_ADC_REG0, &regval);
- } while (FIELD_GET(MESON_SAR_ADC_REG0_BUSY_MASK, regval) && timeout--);
-
- if (timeout < 0)
- return -ETIMEDOUT;
-
- return 0;
+ udelay(1);
+ return regmap_read_poll_timeout_atomic(priv->regmap, MESON_SAR_ADC_REG0, val,
+ !FIELD_GET(MESON_SAR_ADC_REG0_BUSY_MASK, val),
+ 1, 10000);
}
static int meson_sar_adc_read_raw_sample(struct iio_dev *indio_dev,
@@ -345,6 +340,7 @@ static int meson_sar_adc_read_raw_sample(struct iio_dev *indio_dev,
int *val)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
int regval, fifo_chan, fifo_val, count;
if (!wait_for_completion_timeout(&priv->done,
@@ -353,16 +349,14 @@ static int meson_sar_adc_read_raw_sample(struct iio_dev *indio_dev,
count = meson_sar_adc_get_fifo_count(indio_dev);
if (count != 1) {
- dev_err(&indio_dev->dev,
- "ADC FIFO has %d element(s) instead of one\n", count);
+ dev_err(dev, "ADC FIFO has %d element(s) instead of one\n", count);
return -EINVAL;
}
regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &regval);
fifo_chan = FIELD_GET(MESON_SAR_ADC_FIFO_RD_CHAN_ID_MASK, regval);
if (fifo_chan != chan->address) {
- dev_err(&indio_dev->dev,
- "ADC FIFO entry belongs to channel %d instead of %lu\n",
+ dev_err(dev, "ADC FIFO entry belongs to channel %d instead of %lu\n",
fifo_chan, chan->address);
return -EINVAL;
}
@@ -490,7 +484,7 @@ static void meson_sar_adc_stop_sample_engine(struct iio_dev *indio_dev)
static int meson_sar_adc_lock(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- int val, timeout = 10000;
+ int val, ret;
mutex_lock(&indio_dev->mlock);
@@ -500,18 +494,18 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev)
MESON_SAR_ADC_DELAY_KERNEL_BUSY,
MESON_SAR_ADC_DELAY_KERNEL_BUSY);
+ udelay(1);
+
/*
* wait until BL30 releases it's lock (so we can use the SAR
* ADC)
*/
- do {
- udelay(1);
- regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val);
- } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--);
-
- if (timeout < 0) {
+ ret = regmap_read_poll_timeout_atomic(priv->regmap, MESON_SAR_ADC_DELAY, val,
+ !(val & MESON_SAR_ADC_DELAY_BL30_BUSY),
+ 1, 10000);
+ if (ret) {
mutex_unlock(&indio_dev->mlock);
- return -ETIMEDOUT;
+ return ret;
}
}
@@ -550,6 +544,7 @@ static int meson_sar_adc_get_sample(struct iio_dev *indio_dev,
int *val)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
int ret;
if (chan->type == IIO_TEMP && !priv->temperature_sensor_calibrated)
@@ -573,8 +568,7 @@ static int meson_sar_adc_get_sample(struct iio_dev *indio_dev,
meson_sar_adc_unlock(indio_dev);
if (ret) {
- dev_warn(indio_dev->dev.parent,
- "failed to read sample for channel %lu: %d\n",
+ dev_warn(dev, "failed to read sample for channel %lu: %d\n",
chan->address, ret);
return ret;
}
@@ -587,6 +581,7 @@ static int meson_sar_adc_iio_info_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
int ret;
switch (mask) {
@@ -603,9 +598,7 @@ static int meson_sar_adc_iio_info_read_raw(struct iio_dev *indio_dev,
if (chan->type == IIO_VOLTAGE) {
ret = regulator_get_voltage(priv->vref);
if (ret < 0) {
- dev_err(indio_dev->dev.parent,
- "failed to get vref voltage: %d\n",
- ret);
+ dev_err(dev, "failed to get vref voltage: %d\n", ret);
return ret;
}
@@ -650,11 +643,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
void __iomem *base)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
struct clk_init_data init;
const char *clk_parents[1];
- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_div",
- dev_name(indio_dev->dev.parent));
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#adc_div", dev_name(dev));
if (!init.name)
return -ENOMEM;
@@ -670,13 +663,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
priv->clk_div.hw.init = &init;
priv->clk_div.flags = 0;
- priv->adc_div_clk = devm_clk_register(&indio_dev->dev,
- &priv->clk_div.hw);
+ priv->adc_div_clk = devm_clk_register(dev, &priv->clk_div.hw);
if (WARN_ON(IS_ERR(priv->adc_div_clk)))
return PTR_ERR(priv->adc_div_clk);
- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_en",
- dev_name(indio_dev->dev.parent));
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#adc_en", dev_name(dev));
if (!init.name)
return -ENOMEM;
@@ -690,7 +681,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN);
priv->clk_gate.hw.init = &init;
- priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw);
+ priv->adc_clk = devm_clk_register(dev, &priv->clk_gate.hw);
if (WARN_ON(IS_ERR(priv->adc_clk)))
return PTR_ERR(priv->adc_clk);
@@ -701,12 +692,12 @@ static int meson_sar_adc_temp_sensor_init(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
u8 *buf, trimming_bits, trimming_mask, upper_adc_val;
+ struct device *dev = indio_dev->dev.parent;
struct nvmem_cell *temperature_calib;
size_t read_len;
int ret;
- temperature_calib = devm_nvmem_cell_get(indio_dev->dev.parent,
- "temperature_calib");
+ temperature_calib = devm_nvmem_cell_get(dev, "temperature_calib");
if (IS_ERR(temperature_calib)) {
ret = PTR_ERR(temperature_calib);
@@ -717,30 +708,21 @@ static int meson_sar_adc_temp_sensor_init(struct iio_dev *indio_dev)
if (ret == -ENODEV)
return 0;
- return dev_err_probe(indio_dev->dev.parent, ret,
- "failed to get temperature_calib cell\n");
+ return dev_err_probe(dev, ret, "failed to get temperature_calib cell\n");
}
- priv->tsc_regmap =
- syscon_regmap_lookup_by_phandle(indio_dev->dev.parent->of_node,
- "amlogic,hhi-sysctrl");
- if (IS_ERR(priv->tsc_regmap)) {
- dev_err(indio_dev->dev.parent,
- "failed to get amlogic,hhi-sysctrl regmap\n");
- return PTR_ERR(priv->tsc_regmap);
- }
+ priv->tsc_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "amlogic,hhi-sysctrl");
+ if (IS_ERR(priv->tsc_regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->tsc_regmap),
+ "failed to get amlogic,hhi-sysctrl regmap\n");
read_len = MESON_SAR_ADC_EFUSE_BYTES;
buf = nvmem_cell_read(temperature_calib, &read_len);
- if (IS_ERR(buf)) {
- dev_err(indio_dev->dev.parent,
- "failed to read temperature_calib cell\n");
- return PTR_ERR(buf);
- } else if (read_len != MESON_SAR_ADC_EFUSE_BYTES) {
+ if (IS_ERR(buf))
+ return dev_err_probe(dev, PTR_ERR(buf), "failed to read temperature_calib cell\n");
+ if (read_len != MESON_SAR_ADC_EFUSE_BYTES) {
kfree(buf);
- dev_err(indio_dev->dev.parent,
- "invalid read size of temperature_calib cell\n");
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL, "invalid read size of temperature_calib cell\n");
}
trimming_bits = priv->param->temperature_trimming_bits;
@@ -765,6 +747,7 @@ static int meson_sar_adc_temp_sensor_init(struct iio_dev *indio_dev)
static int meson_sar_adc_init(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
int regval, i, ret;
/*
@@ -888,18 +871,12 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
}
ret = clk_set_parent(priv->adc_sel_clk, priv->clkin);
- if (ret) {
- dev_err(indio_dev->dev.parent,
- "failed to set adc parent to clkin\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set adc parent to clkin\n");
ret = clk_set_rate(priv->adc_clk, priv->param->clock_rate);
- if (ret) {
- dev_err(indio_dev->dev.parent,
- "failed to set adc clock rate\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set adc clock rate\n");
return 0;
}
@@ -922,6 +899,7 @@ static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
int ret;
u32 regval;
@@ -931,14 +909,13 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
ret = regulator_enable(priv->vref);
if (ret < 0) {
- dev_err(indio_dev->dev.parent,
- "failed to enable vref regulator\n");
+ dev_err(dev, "failed to enable vref regulator\n");
goto err_vref;
}
ret = clk_prepare_enable(priv->core_clk);
if (ret) {
- dev_err(indio_dev->dev.parent, "failed to enable core clk\n");
+ dev_err(dev, "failed to enable core clk\n");
goto err_core_clk;
}
@@ -956,7 +933,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
ret = clk_prepare_enable(priv->adc_clk);
if (ret) {
- dev_err(indio_dev->dev.parent, "failed to enable adc clk\n");
+ dev_err(dev, "failed to enable adc clk\n");
goto err_adc_clk;
}
@@ -1186,24 +1163,21 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
{
const struct meson_sar_adc_data *match_data;
struct meson_sar_adc_priv *priv;
+ struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
void __iomem *base;
int irq, ret;
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
- return -ENOMEM;
- }
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM, "failed allocating iio device\n");
priv = iio_priv(indio_dev);
init_completion(&priv->done);
- match_data = of_device_get_match_data(&pdev->dev);
- if (!match_data) {
- dev_err(&pdev->dev, "failed to get match data\n");
- return -ENODEV;
- }
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return dev_err_probe(dev, -ENODEV, "failed to get match data\n");
priv->param = match_data->param;
@@ -1215,47 +1189,33 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- priv->param->regmap_config);
+ priv->regmap = devm_regmap_init_mmio(dev, base, priv->param->regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq)
return -EINVAL;
- ret = devm_request_irq(&pdev->dev, irq, meson_sar_adc_irq, IRQF_SHARED,
- dev_name(&pdev->dev), indio_dev);
+ ret = devm_request_irq(dev, irq, meson_sar_adc_irq, IRQF_SHARED, dev_name(dev), indio_dev);
if (ret)
return ret;
- priv->clkin = devm_clk_get(&pdev->dev, "clkin");
+ priv->clkin = devm_clk_get(dev, "clkin");
if (IS_ERR(priv->clkin))
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->clkin),
- "failed to get clkin\n");
+ return dev_err_probe(dev, PTR_ERR(priv->clkin), "failed to get clkin\n");
- priv->core_clk = devm_clk_get(&pdev->dev, "core");
+ priv->core_clk = devm_clk_get(dev, "core");
if (IS_ERR(priv->core_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->core_clk),
- "failed to get core clk\n");
+ return dev_err_probe(dev, PTR_ERR(priv->core_clk), "failed to get core clk\n");
- priv->adc_clk = devm_clk_get(&pdev->dev, "adc_clk");
- if (IS_ERR(priv->adc_clk)) {
- if (PTR_ERR(priv->adc_clk) == -ENOENT)
- priv->adc_clk = NULL;
- else
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->adc_clk),
- "failed to get adc clk\n");
- }
+ priv->adc_clk = devm_clk_get_optional(dev, "adc_clk");
+ if (IS_ERR(priv->adc_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->adc_clk), "failed to get adc clk\n");
- priv->adc_sel_clk = devm_clk_get(&pdev->dev, "adc_sel");
- if (IS_ERR(priv->adc_sel_clk)) {
- if (PTR_ERR(priv->adc_sel_clk) == -ENOENT)
- priv->adc_sel_clk = NULL;
- else
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->adc_sel_clk),
- "failed to get adc_sel clk\n");
- }
+ priv->adc_sel_clk = devm_clk_get_optional(dev, "adc_sel");
+ if (IS_ERR(priv->adc_sel_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->adc_sel_clk), "failed to get adc_sel clk\n");
/* on pre-GXBB SoCs the SAR ADC itself provides the ADC clock: */
if (!priv->adc_clk) {
@@ -1264,10 +1224,9 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
return ret;
}
- priv->vref = devm_regulator_get(&pdev->dev, "vref");
+ priv->vref = devm_regulator_get(dev, "vref");
if (IS_ERR(priv->vref))
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->vref),
- "failed to get vref regulator\n");
+ return dev_err_probe(dev, PTR_ERR(priv->vref), "failed to get vref regulator\n");
priv->calibscale = MILLION;
@@ -1297,7 +1256,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
ret = meson_sar_adc_calib(indio_dev);
if (ret)
- dev_warn(&pdev->dev, "calibration failed\n");
+ dev_warn(dev, "calibration failed\n");
platform_set_drvdata(pdev, indio_dev);
@@ -1322,22 +1281,22 @@ static int meson_sar_adc_remove(struct platform_device *pdev)
return meson_sar_adc_hw_disable(indio_dev);
}
-static int __maybe_unused meson_sar_adc_suspend(struct device *dev)
+static int meson_sar_adc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
return meson_sar_adc_hw_disable(indio_dev);
}
-static int __maybe_unused meson_sar_adc_resume(struct device *dev)
+static int meson_sar_adc_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
return meson_sar_adc_hw_enable(indio_dev);
}
-static SIMPLE_DEV_PM_OPS(meson_sar_adc_pm_ops,
- meson_sar_adc_suspend, meson_sar_adc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(meson_sar_adc_pm_ops,
+ meson_sar_adc_suspend, meson_sar_adc_resume);
static struct platform_driver meson_sar_adc_driver = {
.probe = meson_sar_adc_probe,
@@ -1345,7 +1304,7 @@ static struct platform_driver meson_sar_adc_driver = {
.driver = {
.name = "meson-saradc",
.of_match_table = meson_sar_adc_of_match,
- .pm = &meson_sar_adc_pm_ops,
+ .pm = pm_sleep_ptr(&meson_sar_adc_pm_ops),
},
};
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
index aca084f1e78a..30a31f185d08 100644
--- a/drivers/iio/adc/mp2629_adc.c
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -11,6 +11,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/machine.h>
#include <linux/mfd/mp2629.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
diff --git a/drivers/iio/adc/mt6360-adc.c b/drivers/iio/adc/mt6360-adc.c
index 07c0e6768391..35260d9e4e47 100644
--- a/drivers/iio/adc/mt6360-adc.c
+++ b/drivers/iio/adc/mt6360-adc.c
@@ -5,6 +5,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index e78c96a185db..0e134777bdd2 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -215,7 +215,7 @@ static const struct iio_info mt6577_auxadc_info = {
.read_raw = &mt6577_auxadc_read_raw,
};
-static int __maybe_unused mt6577_auxadc_resume(struct device *dev)
+static int mt6577_auxadc_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct mt6577_auxadc_device *adc_dev = iio_priv(indio_dev);
@@ -234,7 +234,7 @@ static int __maybe_unused mt6577_auxadc_resume(struct device *dev)
return 0;
}
-static int __maybe_unused mt6577_auxadc_suspend(struct device *dev)
+static int mt6577_auxadc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct mt6577_auxadc_device *adc_dev = iio_priv(indio_dev);
@@ -330,9 +330,9 @@ static int mt6577_auxadc_remove(struct platform_device *pdev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(mt6577_auxadc_pm_ops,
- mt6577_auxadc_suspend,
- mt6577_auxadc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mt6577_auxadc_pm_ops,
+ mt6577_auxadc_suspend,
+ mt6577_auxadc_resume);
static const struct of_device_id mt6577_auxadc_of_match[] = {
{ .compatible = "mediatek,mt2701-auxadc", .data = &mt8173_compat },
@@ -349,7 +349,7 @@ static struct platform_driver mt6577_auxadc_driver = {
.driver = {
.name = "mt6577-auxadc",
.of_match_table = mt6577_auxadc_of_match,
- .pm = &mt6577_auxadc_pm_ops,
+ .pm = pm_sleep_ptr(&mt6577_auxadc_pm_ops),
},
.probe = mt6577_auxadc_probe,
.remove = mt6577_auxadc_remove,
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index 976c235f3079..c1261ecd400c 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -8,10 +8,11 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/wait.h>
#include <linux/log2.h>
-#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -407,21 +408,14 @@ static const struct iio_info nau7802_info = {
.attrs = &nau7802_attribute_group,
};
-static int nau7802_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int nau7802_probe(struct i2c_client *client)
{
struct iio_dev *indio_dev;
struct nau7802_state *st;
- struct device_node *np = client->dev.of_node;
int i, ret;
u8 data;
u32 tmp = 0;
- if (!client->dev.of_node) {
- dev_err(&client->dev, "No device tree node available.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -457,7 +451,7 @@ static int nau7802_probe(struct i2c_client *client,
if (!(ret & NAU7802_PUCTRL_PUR_BIT))
return ret;
- of_property_read_u32(np, "nuvoton,vldo", &tmp);
+ device_property_read_u32(&client->dev, "nuvoton,vldo", &tmp);
st->vref_mv = tmp;
data = NAU7802_PUCTRL_PUD_BIT | NAU7802_PUCTRL_PUA_BIT |
@@ -550,7 +544,7 @@ static const struct of_device_id nau7802_dt_ids[] = {
MODULE_DEVICE_TABLE(of, nau7802_dt_ids);
static struct i2c_driver nau7802_driver = {
- .probe = nau7802_probe,
+ .probe_new = nau7802_probe,
.id_table = nau7802_i2c_id,
.driver = {
.name = "nau7802",
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
index f7bc0bb7f112..ba4cd8f49f66 100644
--- a/drivers/iio/adc/npcm_adc.c
+++ b/drivers/iio/adc/npcm_adc.c
@@ -8,14 +8,22 @@
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/reset.h>
+struct npcm_adc_info {
+ u32 data_mask;
+ u32 internal_vref;
+ u32 res_bits;
+};
+
struct npcm_adc {
bool int_status;
u32 adc_sample_hz;
@@ -34,6 +42,7 @@ struct npcm_adc {
* has finished.
*/
struct mutex lock;
+ const struct npcm_adc_info *data;
};
/* ADC registers */
@@ -52,13 +61,21 @@ struct npcm_adc {
#define NPCM_ADCCON_CH(x) ((x) << 24)
#define NPCM_ADCCON_DIV_SHIFT 1
#define NPCM_ADCCON_DIV_MASK GENMASK(8, 1)
-#define NPCM_ADC_DATA_MASK(x) ((x) & GENMASK(9, 0))
#define NPCM_ADC_ENABLE (NPCM_ADCCON_ADC_EN | NPCM_ADCCON_ADC_INT_EN)
/* ADC General Definition */
-#define NPCM_RESOLUTION_BITS 10
-#define NPCM_INT_VREF_MV 2000
+static const struct npcm_adc_info npxm7xx_adc_info = {
+ .data_mask = GENMASK(9, 0),
+ .internal_vref = 2048,
+ .res_bits = 10,
+};
+
+static const struct npcm_adc_info npxm8xx_adc_info = {
+ .data_mask = GENMASK(11, 0),
+ .internal_vref = 1229,
+ .res_bits = 12,
+};
#define NPCM_ADC_CHAN(ch) { \
.type = IIO_VOLTAGE, \
@@ -129,7 +146,8 @@ static int npcm_adc_read(struct npcm_adc *info, int *val, u8 channel)
if (ret < 0)
return ret;
- *val = NPCM_ADC_DATA_MASK(ioread32(info->regs + NPCM_ADCDATA));
+ *val = ioread32(info->regs + NPCM_ADCDATA);
+ *val &= info->data->data_mask;
return 0;
}
@@ -157,9 +175,9 @@ static int npcm_adc_read_raw(struct iio_dev *indio_dev,
vref_uv = regulator_get_voltage(info->vref);
*val = vref_uv / 1000;
} else {
- *val = NPCM_INT_VREF_MV;
+ *val = info->data->internal_vref;
}
- *val2 = NPCM_RESOLUTION_BITS;
+ *val2 = info->data->res_bits;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = info->adc_sample_hz;
@@ -176,7 +194,8 @@ static const struct iio_info npcm_adc_iio_info = {
};
static const struct of_device_id npcm_adc_match[] = {
- { .compatible = "nuvoton,npcm750-adc", },
+ { .compatible = "nuvoton,npcm750-adc", .data = &npxm7xx_adc_info},
+ { .compatible = "nuvoton,npcm845-adc", .data = &npxm8xx_adc_info},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, npcm_adc_match);
@@ -196,6 +215,10 @@ static int npcm_adc_probe(struct platform_device *pdev)
return -ENOMEM;
info = iio_priv(indio_dev);
+ info->data = device_get_match_data(dev);
+ if (!info->data)
+ return -EINVAL;
+
mutex_init(&info->lock);
info->dev = &pdev->dev;
diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c
new file mode 100644
index 000000000000..56a713766954
--- /dev/null
+++ b/drivers/iio/adc/qcom-spmi-rradc.c
@@ -0,0 +1,1022 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Linaro Limited.
+ * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ *
+ * This driver is for the Round Robin ADC found in the pmi8998 and pm660 PMICs.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+#include <soc/qcom/qcom-spmi-pmic.h>
+
+#define DRIVER_NAME "qcom-spmi-rradc"
+
+#define RR_ADC_EN_CTL 0x46
+#define RR_ADC_SKIN_TEMP_LSB 0x50
+#define RR_ADC_SKIN_TEMP_MSB 0x51
+#define RR_ADC_CTL 0x52
+#define RR_ADC_CTL_CONTINUOUS_SEL BIT(3)
+#define RR_ADC_LOG 0x53
+#define RR_ADC_LOG_CLR_CTRL BIT(0)
+
+#define RR_ADC_FAKE_BATT_LOW_LSB 0x58
+#define RR_ADC_FAKE_BATT_LOW_MSB 0x59
+#define RR_ADC_FAKE_BATT_HIGH_LSB 0x5A
+#define RR_ADC_FAKE_BATT_HIGH_MSB 0x5B
+
+#define RR_ADC_BATT_ID_CTRL 0x60
+#define RR_ADC_BATT_ID_CTRL_CHANNEL_CONV BIT(0)
+#define RR_ADC_BATT_ID_TRIGGER 0x61
+#define RR_ADC_BATT_ID_STS 0x62
+#define RR_ADC_BATT_ID_CFG 0x63
+#define BATT_ID_SETTLE_MASK GENMASK(7, 5)
+#define RR_ADC_BATT_ID_5_LSB 0x66
+#define RR_ADC_BATT_ID_5_MSB 0x67
+#define RR_ADC_BATT_ID_15_LSB 0x68
+#define RR_ADC_BATT_ID_15_MSB 0x69
+#define RR_ADC_BATT_ID_150_LSB 0x6A
+#define RR_ADC_BATT_ID_150_MSB 0x6B
+
+#define RR_ADC_BATT_THERM_CTRL 0x70
+#define RR_ADC_BATT_THERM_TRIGGER 0x71
+#define RR_ADC_BATT_THERM_STS 0x72
+#define RR_ADC_BATT_THERM_CFG 0x73
+#define RR_ADC_BATT_THERM_LSB 0x74
+#define RR_ADC_BATT_THERM_MSB 0x75
+#define RR_ADC_BATT_THERM_FREQ 0x76
+
+#define RR_ADC_AUX_THERM_CTRL 0x80
+#define RR_ADC_AUX_THERM_TRIGGER 0x81
+#define RR_ADC_AUX_THERM_STS 0x82
+#define RR_ADC_AUX_THERM_CFG 0x83
+#define RR_ADC_AUX_THERM_LSB 0x84
+#define RR_ADC_AUX_THERM_MSB 0x85
+
+#define RR_ADC_SKIN_HOT 0x86
+#define RR_ADC_SKIN_TOO_HOT 0x87
+
+#define RR_ADC_AUX_THERM_C1 0x88
+#define RR_ADC_AUX_THERM_C2 0x89
+#define RR_ADC_AUX_THERM_C3 0x8A
+#define RR_ADC_AUX_THERM_HALF_RANGE 0x8B
+
+#define RR_ADC_USB_IN_V_CTRL 0x90
+#define RR_ADC_USB_IN_V_TRIGGER 0x91
+#define RR_ADC_USB_IN_V_STS 0x92
+#define RR_ADC_USB_IN_V_LSB 0x94
+#define RR_ADC_USB_IN_V_MSB 0x95
+#define RR_ADC_USB_IN_I_CTRL 0x98
+#define RR_ADC_USB_IN_I_TRIGGER 0x99
+#define RR_ADC_USB_IN_I_STS 0x9A
+#define RR_ADC_USB_IN_I_LSB 0x9C
+#define RR_ADC_USB_IN_I_MSB 0x9D
+
+#define RR_ADC_DC_IN_V_CTRL 0xA0
+#define RR_ADC_DC_IN_V_TRIGGER 0xA1
+#define RR_ADC_DC_IN_V_STS 0xA2
+#define RR_ADC_DC_IN_V_LSB 0xA4
+#define RR_ADC_DC_IN_V_MSB 0xA5
+#define RR_ADC_DC_IN_I_CTRL 0xA8
+#define RR_ADC_DC_IN_I_TRIGGER 0xA9
+#define RR_ADC_DC_IN_I_STS 0xAA
+#define RR_ADC_DC_IN_I_LSB 0xAC
+#define RR_ADC_DC_IN_I_MSB 0xAD
+
+#define RR_ADC_PMI_DIE_TEMP_CTRL 0xB0
+#define RR_ADC_PMI_DIE_TEMP_TRIGGER 0xB1
+#define RR_ADC_PMI_DIE_TEMP_STS 0xB2
+#define RR_ADC_PMI_DIE_TEMP_CFG 0xB3
+#define RR_ADC_PMI_DIE_TEMP_LSB 0xB4
+#define RR_ADC_PMI_DIE_TEMP_MSB 0xB5
+
+#define RR_ADC_CHARGER_TEMP_CTRL 0xB8
+#define RR_ADC_CHARGER_TEMP_TRIGGER 0xB9
+#define RR_ADC_CHARGER_TEMP_STS 0xBA
+#define RR_ADC_CHARGER_TEMP_CFG 0xBB
+#define RR_ADC_CHARGER_TEMP_LSB 0xBC
+#define RR_ADC_CHARGER_TEMP_MSB 0xBD
+#define RR_ADC_CHARGER_HOT 0xBE
+#define RR_ADC_CHARGER_TOO_HOT 0xBF
+
+#define RR_ADC_GPIO_CTRL 0xC0
+#define RR_ADC_GPIO_TRIGGER 0xC1
+#define RR_ADC_GPIO_STS 0xC2
+#define RR_ADC_GPIO_LSB 0xC4
+#define RR_ADC_GPIO_MSB 0xC5
+
+#define RR_ADC_ATEST_CTRL 0xC8
+#define RR_ADC_ATEST_TRIGGER 0xC9
+#define RR_ADC_ATEST_STS 0xCA
+#define RR_ADC_ATEST_LSB 0xCC
+#define RR_ADC_ATEST_MSB 0xCD
+#define RR_ADC_SEC_ACCESS 0xD0
+
+#define RR_ADC_PERPH_RESET_CTL2 0xD9
+#define RR_ADC_PERPH_RESET_CTL3 0xDA
+#define RR_ADC_PERPH_RESET_CTL4 0xDB
+#define RR_ADC_INT_TEST1 0xE0
+#define RR_ADC_INT_TEST_VAL 0xE1
+
+#define RR_ADC_TM_TRIGGER_CTRLS 0xE2
+#define RR_ADC_TM_ADC_CTRLS 0xE3
+#define RR_ADC_TM_CNL_CTRL 0xE4
+#define RR_ADC_TM_BATT_ID_CTRL 0xE5
+#define RR_ADC_TM_THERM_CTRL 0xE6
+#define RR_ADC_TM_CONV_STS 0xE7
+#define RR_ADC_TM_ADC_READ_LSB 0xE8
+#define RR_ADC_TM_ADC_READ_MSB 0xE9
+#define RR_ADC_TM_ATEST_MUX_1 0xEA
+#define RR_ADC_TM_ATEST_MUX_2 0xEB
+#define RR_ADC_TM_REFERENCES 0xED
+#define RR_ADC_TM_MISC_CTL 0xEE
+#define RR_ADC_TM_RR_CTRL 0xEF
+
+#define RR_ADC_TRIGGER_EVERY_CYCLE BIT(7)
+#define RR_ADC_TRIGGER_CTL BIT(0)
+
+#define RR_ADC_BATT_ID_RANGE 820
+
+#define RR_ADC_BITS 10
+#define RR_ADC_CHAN_MSB (1 << RR_ADC_BITS)
+#define RR_ADC_FS_VOLTAGE_MV 2500
+
+/* BATT_THERM 0.25K/LSB */
+#define RR_ADC_BATT_THERM_LSB_K 4
+
+#define RR_ADC_TEMP_FS_VOLTAGE_NUM 5000000
+#define RR_ADC_TEMP_FS_VOLTAGE_DEN 3
+#define RR_ADC_DIE_TEMP_OFFSET 601400
+#define RR_ADC_DIE_TEMP_SLOPE 2
+#define RR_ADC_DIE_TEMP_OFFSET_MILLI_DEGC 25000
+
+#define RR_ADC_CHG_TEMP_GF_OFFSET_UV 1303168
+#define RR_ADC_CHG_TEMP_GF_SLOPE_UV_PER_C 3784
+#define RR_ADC_CHG_TEMP_SMIC_OFFSET_UV 1338433
+#define RR_ADC_CHG_TEMP_SMIC_SLOPE_UV_PER_C 3655
+#define RR_ADC_CHG_TEMP_660_GF_OFFSET_UV 1309001
+#define RR_ADC_CHG_TEMP_660_GF_SLOPE_UV_PER_C 3403
+#define RR_ADC_CHG_TEMP_660_SMIC_OFFSET_UV 1295898
+#define RR_ADC_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C 3596
+#define RR_ADC_CHG_TEMP_660_MGNA_OFFSET_UV 1314779
+#define RR_ADC_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C 3496
+#define RR_ADC_CHG_TEMP_OFFSET_MILLI_DEGC 25000
+#define RR_ADC_CHG_THRESHOLD_SCALE 4
+
+#define RR_ADC_VOLT_INPUT_FACTOR 8
+#define RR_ADC_CURR_INPUT_FACTOR 2000
+#define RR_ADC_CURR_USBIN_INPUT_FACTOR_MIL 1886
+#define RR_ADC_CURR_USBIN_660_FACTOR_MIL 9
+#define RR_ADC_CURR_USBIN_660_UV_VAL 579500
+
+#define RR_ADC_GPIO_FS_RANGE 5000
+#define RR_ADC_COHERENT_CHECK_RETRY 5
+#define RR_ADC_CHAN_MAX_CONTINUOUS_BUFFER_LEN 16
+
+#define RR_ADC_STS_CHANNEL_READING_MASK GENMASK(1, 0)
+#define RR_ADC_STS_CHANNEL_STS BIT(1)
+
+#define RR_ADC_TP_REV_VERSION1 21
+#define RR_ADC_TP_REV_VERSION2 29
+#define RR_ADC_TP_REV_VERSION3 32
+
+#define RRADC_BATT_ID_DELAY_MAX 8
+
+enum rradc_channel_id {
+ RR_ADC_BATT_ID = 0,
+ RR_ADC_BATT_THERM,
+ RR_ADC_SKIN_TEMP,
+ RR_ADC_USBIN_I,
+ RR_ADC_USBIN_V,
+ RR_ADC_DCIN_I,
+ RR_ADC_DCIN_V,
+ RR_ADC_DIE_TEMP,
+ RR_ADC_CHG_TEMP,
+ RR_ADC_GPIO,
+ RR_ADC_CHAN_MAX
+};
+
+struct rradc_chip;
+
+/**
+ * struct rradc_channel - rradc channel data
+ * @label: channel label
+ * @lsb: Channel least significant byte
+ * @status: Channel status address
+ * @size: number of bytes to read
+ * @trigger_addr: Trigger address, trigger is only used on some channels
+ * @trigger_mask: Trigger mask
+ * @scale_fn: Post process callback for channels which can't be exposed
+ * as offset + scale.
+ */
+struct rradc_channel {
+ const char *label;
+ u8 lsb;
+ u8 status;
+ int size;
+ int trigger_addr;
+ int trigger_mask;
+ int (*scale_fn)(struct rradc_chip *chip, u16 adc_code, int *result);
+};
+
+struct rradc_chip {
+ struct device *dev;
+ const struct qcom_spmi_pmic *pmic;
+ /*
+ * Lock held while doing channel conversion
+ * involving multiple register read/writes
+ */
+ struct mutex conversion_lock;
+ struct regmap *regmap;
+ u32 base;
+ int batt_id_delay;
+ u16 batt_id_data;
+};
+
+static const int batt_id_delays[] = { 0, 1, 4, 12, 20, 40, 60, 80 };
+static const struct rradc_channel rradc_chans[RR_ADC_CHAN_MAX];
+static const struct iio_chan_spec rradc_iio_chans[RR_ADC_CHAN_MAX];
+
+static int rradc_read(struct rradc_chip *chip, u16 addr, __le16 *buf, int len)
+{
+ int ret, retry_cnt = 0;
+ __le16 data_check[RR_ADC_CHAN_MAX_CONTINUOUS_BUFFER_LEN / 2];
+
+ if (len > RR_ADC_CHAN_MAX_CONTINUOUS_BUFFER_LEN) {
+ dev_err(chip->dev,
+ "Can't read more than %d bytes, but asked to read %d bytes.\n",
+ RR_ADC_CHAN_MAX_CONTINUOUS_BUFFER_LEN, len);
+ return -EINVAL;
+ }
+
+ while (retry_cnt < RR_ADC_COHERENT_CHECK_RETRY) {
+ ret = regmap_bulk_read(chip->regmap, chip->base + addr, buf,
+ len);
+ if (ret < 0) {
+ dev_err(chip->dev, "rr_adc reg 0x%x failed :%d\n", addr,
+ ret);
+ return ret;
+ }
+
+ ret = regmap_bulk_read(chip->regmap, chip->base + addr,
+ data_check, len);
+ if (ret < 0) {
+ dev_err(chip->dev, "rr_adc reg 0x%x failed :%d\n", addr,
+ ret);
+ return ret;
+ }
+
+ if (memcmp(buf, data_check, len) != 0) {
+ retry_cnt++;
+ dev_dbg(chip->dev,
+ "coherent read error, retry_cnt:%d\n",
+ retry_cnt);
+ continue;
+ }
+
+ break;
+ }
+
+ if (retry_cnt == RR_ADC_COHERENT_CHECK_RETRY)
+ dev_err(chip->dev, "Retry exceeded for coherency check\n");
+
+ return ret;
+}
+
+static int rradc_get_fab_coeff(struct rradc_chip *chip, int64_t *offset,
+ int64_t *slope)
+{
+ if (chip->pmic->subtype == PM660_SUBTYPE) {
+ switch (chip->pmic->fab_id) {
+ case PM660_FAB_ID_GF:
+ *offset = RR_ADC_CHG_TEMP_660_GF_OFFSET_UV;
+ *slope = RR_ADC_CHG_TEMP_660_GF_SLOPE_UV_PER_C;
+ return 0;
+ case PM660_FAB_ID_TSMC:
+ *offset = RR_ADC_CHG_TEMP_660_SMIC_OFFSET_UV;
+ *slope = RR_ADC_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C;
+ return 0;
+ default:
+ *offset = RR_ADC_CHG_TEMP_660_MGNA_OFFSET_UV;
+ *slope = RR_ADC_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C;
+ }
+ } else if (chip->pmic->subtype == PMI8998_SUBTYPE) {
+ switch (chip->pmic->fab_id) {
+ case PMI8998_FAB_ID_GF:
+ *offset = RR_ADC_CHG_TEMP_GF_OFFSET_UV;
+ *slope = RR_ADC_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ return 0;
+ case PMI8998_FAB_ID_SMIC:
+ *offset = RR_ADC_CHG_TEMP_SMIC_OFFSET_UV;
+ *slope = RR_ADC_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * These functions explicitly cast int64_t to int.
+ * They will never overflow, as the values are small enough.
+ */
+static int rradc_post_process_batt_id(struct rradc_chip *chip, u16 adc_code,
+ int *result_ohms)
+{
+ uint32_t current_value;
+ int64_t r_id;
+
+ current_value = chip->batt_id_data;
+ r_id = ((int64_t)adc_code * RR_ADC_FS_VOLTAGE_MV);
+ r_id = div64_s64(r_id, (RR_ADC_CHAN_MSB * current_value));
+ *result_ohms = (int)(r_id * MILLI);
+
+ return 0;
+}
+
+static int rradc_enable_continuous_mode(struct rradc_chip *chip)
+{
+ int ret;
+
+ /* Clear channel log */
+ ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_LOG,
+ RR_ADC_LOG_CLR_CTRL, RR_ADC_LOG_CLR_CTRL);
+ if (ret < 0) {
+ dev_err(chip->dev, "log ctrl update to clear failed:%d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_LOG,
+ RR_ADC_LOG_CLR_CTRL, 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "log ctrl update to not clear failed:%d\n",
+ ret);
+ return ret;
+ }
+
+ /* Switch to continuous mode */
+ ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_CTL,
+ RR_ADC_CTL_CONTINUOUS_SEL,
+ RR_ADC_CTL_CONTINUOUS_SEL);
+ if (ret < 0)
+ dev_err(chip->dev, "Update to continuous mode failed:%d\n",
+ ret);
+
+ return ret;
+}
+
+static int rradc_disable_continuous_mode(struct rradc_chip *chip)
+{
+ int ret;
+
+ /* Switch to non continuous mode */
+ ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_CTL,
+ RR_ADC_CTL_CONTINUOUS_SEL, 0);
+ if (ret < 0)
+ dev_err(chip->dev, "Update to non-continuous mode failed:%d\n",
+ ret);
+
+ return ret;
+}
+
+static bool rradc_is_ready(struct rradc_chip *chip,
+ enum rradc_channel_id chan_address)
+{
+ const struct rradc_channel *chan = &rradc_chans[chan_address];
+ int ret;
+ unsigned int status, mask;
+
+ /* BATT_ID STS bit does not get set initially */
+ switch (chan_address) {
+ case RR_ADC_BATT_ID:
+ mask = RR_ADC_STS_CHANNEL_STS;
+ break;
+ default:
+ mask = RR_ADC_STS_CHANNEL_READING_MASK;
+ break;
+ }
+
+ ret = regmap_read(chip->regmap, chip->base + chan->status, &status);
+ if (ret < 0 || !(status & mask))
+ return false;
+
+ return true;
+}
+
+static int rradc_read_status_in_cont_mode(struct rradc_chip *chip,
+ enum rradc_channel_id chan_address)
+{
+ const struct rradc_channel *chan = &rradc_chans[chan_address];
+ const struct iio_chan_spec *iio_chan = &rradc_iio_chans[chan_address];
+ int ret, i;
+
+ if (chan->trigger_mask == 0) {
+ dev_err(chip->dev, "Channel doesn't have a trigger mask\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(chip->regmap, chip->base + chan->trigger_addr,
+ chan->trigger_mask, chan->trigger_mask);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to apply trigger for channel '%s' ret=%d\n",
+ iio_chan->extend_name, ret);
+ return ret;
+ }
+
+ ret = rradc_enable_continuous_mode(chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to switch to continuous mode\n");
+ goto disable_trigger;
+ }
+
+ /*
+ * The wait/sleep values were found through trial and error,
+ * this is mostly for the battery ID channel which takes some
+ * time to settle.
+ */
+ for (i = 0; i < 5; i++) {
+ if (rradc_is_ready(chip, chan_address))
+ break;
+ usleep_range(50000, 50000 + 500);
+ }
+
+ if (i == 5) {
+ dev_err(chip->dev, "Channel '%s' is not ready\n",
+ iio_chan->extend_name);
+ ret = -ETIMEDOUT;
+ }
+
+ rradc_disable_continuous_mode(chip);
+
+disable_trigger:
+ regmap_update_bits(chip->regmap, chip->base + chan->trigger_addr,
+ chan->trigger_mask, 0);
+
+ return ret;
+}
+
+static int rradc_prepare_batt_id_conversion(struct rradc_chip *chip,
+ enum rradc_channel_id chan_address,
+ u16 *data)
+{
+ int ret;
+
+ ret = regmap_update_bits(chip->regmap, chip->base + RR_ADC_BATT_ID_CTRL,
+ RR_ADC_BATT_ID_CTRL_CHANNEL_CONV,
+ RR_ADC_BATT_ID_CTRL_CHANNEL_CONV);
+ if (ret < 0) {
+ dev_err(chip->dev, "Enabling BATT ID channel failed:%d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(chip->regmap,
+ chip->base + RR_ADC_BATT_ID_TRIGGER,
+ RR_ADC_TRIGGER_CTL, RR_ADC_TRIGGER_CTL);
+ if (ret < 0) {
+ dev_err(chip->dev, "BATT_ID trigger set failed:%d\n", ret);
+ goto out_disable_batt_id;
+ }
+
+ ret = rradc_read_status_in_cont_mode(chip, chan_address);
+
+ /* Reset registers back to default values */
+ regmap_update_bits(chip->regmap, chip->base + RR_ADC_BATT_ID_TRIGGER,
+ RR_ADC_TRIGGER_CTL, 0);
+
+out_disable_batt_id:
+ regmap_update_bits(chip->regmap, chip->base + RR_ADC_BATT_ID_CTRL,
+ RR_ADC_BATT_ID_CTRL_CHANNEL_CONV, 0);
+
+ return ret;
+}
+
+static int rradc_do_conversion(struct rradc_chip *chip,
+ enum rradc_channel_id chan_address, u16 *data)
+{
+ const struct rradc_channel *chan = &rradc_chans[chan_address];
+ const struct iio_chan_spec *iio_chan = &rradc_iio_chans[chan_address];
+ int ret;
+ __le16 buf[3];
+
+ mutex_lock(&chip->conversion_lock);
+
+ switch (chan_address) {
+ case RR_ADC_BATT_ID:
+ ret = rradc_prepare_batt_id_conversion(chip, chan_address, data);
+ if (ret < 0) {
+ dev_err(chip->dev, "Battery ID conversion failed:%d\n",
+ ret);
+ goto unlock_out;
+ }
+ break;
+
+ case RR_ADC_USBIN_V:
+ case RR_ADC_DIE_TEMP:
+ ret = rradc_read_status_in_cont_mode(chip, chan_address);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Error reading in continuous mode:%d\n", ret);
+ goto unlock_out;
+ }
+ break;
+ default:
+ if (!rradc_is_ready(chip, chan_address)) {
+ /*
+ * Usually this means the channel isn't attached, for example
+ * the in_voltage_usbin_v_input channel will not be ready if
+ * no USB cable is attached
+ */
+ dev_dbg(chip->dev, "channel '%s' is not ready\n",
+ iio_chan->extend_name);
+ ret = -ENODATA;
+ goto unlock_out;
+ }
+ break;
+ }
+
+ ret = rradc_read(chip, chan->lsb, buf, chan->size);
+ if (ret) {
+ dev_err(chip->dev, "read data failed\n");
+ goto unlock_out;
+ }
+
+ /*
+ * For the battery ID we read the register for every ID ADC and then
+ * see which one is actually connected.
+ */
+ if (chan_address == RR_ADC_BATT_ID) {
+ u16 batt_id_150 = le16_to_cpu(buf[2]);
+ u16 batt_id_15 = le16_to_cpu(buf[1]);
+ u16 batt_id_5 = le16_to_cpu(buf[0]);
+
+ if (!batt_id_150 && !batt_id_15 && !batt_id_5) {
+ dev_err(chip->dev,
+ "Invalid batt_id values with all zeros\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (batt_id_150 <= RR_ADC_BATT_ID_RANGE) {
+ *data = batt_id_150;
+ chip->batt_id_data = 150;
+ } else if (batt_id_15 <= RR_ADC_BATT_ID_RANGE) {
+ *data = batt_id_15;
+ chip->batt_id_data = 15;
+ } else {
+ *data = batt_id_5;
+ chip->batt_id_data = 5;
+ }
+ } else {
+ /*
+ * All of the other channels are either 1 or 2 bytes.
+ * We can rely on the second byte being 0 for 1-byte channels.
+ */
+ *data = le16_to_cpu(buf[0]);
+ }
+
+unlock_out:
+ mutex_unlock(&chip->conversion_lock);
+
+ return ret;
+}
+
+static int rradc_read_scale(struct rradc_chip *chip, int chan_address, int *val,
+ int *val2)
+{
+ int64_t fab_offset, fab_slope;
+ int ret;
+
+ ret = rradc_get_fab_coeff(chip, &fab_offset, &fab_slope);
+ if (ret < 0) {
+ dev_err(chip->dev, "Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
+
+ switch (chan_address) {
+ case RR_ADC_SKIN_TEMP:
+ *val = MILLI;
+ *val2 = RR_ADC_BATT_THERM_LSB_K;
+ return IIO_VAL_FRACTIONAL;
+ case RR_ADC_USBIN_I:
+ *val = RR_ADC_CURR_USBIN_INPUT_FACTOR_MIL *
+ RR_ADC_FS_VOLTAGE_MV;
+ *val2 = RR_ADC_CHAN_MSB;
+ return IIO_VAL_FRACTIONAL;
+ case RR_ADC_DCIN_I:
+ *val = RR_ADC_CURR_INPUT_FACTOR * RR_ADC_FS_VOLTAGE_MV;
+ *val2 = RR_ADC_CHAN_MSB;
+ return IIO_VAL_FRACTIONAL;
+ case RR_ADC_USBIN_V:
+ case RR_ADC_DCIN_V:
+ *val = RR_ADC_VOLT_INPUT_FACTOR * RR_ADC_FS_VOLTAGE_MV * MILLI;
+ *val2 = RR_ADC_CHAN_MSB;
+ return IIO_VAL_FRACTIONAL;
+ case RR_ADC_GPIO:
+ *val = RR_ADC_GPIO_FS_RANGE;
+ *val2 = RR_ADC_CHAN_MSB;
+ return IIO_VAL_FRACTIONAL;
+ case RR_ADC_CHG_TEMP:
+ /*
+ * We divide val2 by MILLI instead of multiplying val
+ * to avoid an integer overflow.
+ */
+ *val = -RR_ADC_TEMP_FS_VOLTAGE_NUM;
+ *val2 = div64_s64(RR_ADC_TEMP_FS_VOLTAGE_DEN * RR_ADC_CHAN_MSB *
+ fab_slope,
+ MILLI);
+
+ return IIO_VAL_FRACTIONAL;
+ case RR_ADC_DIE_TEMP:
+ *val = RR_ADC_TEMP_FS_VOLTAGE_NUM;
+ *val2 = RR_ADC_TEMP_FS_VOLTAGE_DEN * RR_ADC_CHAN_MSB *
+ RR_ADC_DIE_TEMP_SLOPE;
+
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rradc_read_offset(struct rradc_chip *chip, int chan_address, int *val)
+{
+ int64_t fab_offset, fab_slope;
+ int64_t offset1, offset2;
+ int ret;
+
+ switch (chan_address) {
+ case RR_ADC_SKIN_TEMP:
+ /*
+ * Offset from kelvin to degC, divided by the
+ * scale factor (250). We lose some precision here.
+ * 273150 / 250 = 1092.6
+ */
+ *val = div64_s64(ABSOLUTE_ZERO_MILLICELSIUS,
+ (MILLI / RR_ADC_BATT_THERM_LSB_K));
+ return IIO_VAL_INT;
+ case RR_ADC_CHG_TEMP:
+ ret = rradc_get_fab_coeff(chip, &fab_offset, &fab_slope);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
+ offset1 = -(fab_offset * RR_ADC_TEMP_FS_VOLTAGE_DEN *
+ RR_ADC_CHAN_MSB);
+ offset1 += (int64_t)RR_ADC_TEMP_FS_VOLTAGE_NUM / 2ULL;
+ offset1 = div64_s64(offset1,
+ (int64_t)(RR_ADC_TEMP_FS_VOLTAGE_NUM));
+
+ offset2 = (int64_t)RR_ADC_CHG_TEMP_OFFSET_MILLI_DEGC *
+ RR_ADC_TEMP_FS_VOLTAGE_DEN * RR_ADC_CHAN_MSB *
+ (int64_t)fab_slope;
+ offset2 += ((int64_t)MILLI * RR_ADC_TEMP_FS_VOLTAGE_NUM) / 2;
+ offset2 = div64_s64(
+ offset2, ((int64_t)MILLI * RR_ADC_TEMP_FS_VOLTAGE_NUM));
+
+ /*
+ * The -1 is to compensate for lost precision.
+ * It should actually be -0.7906976744186046.
+ * This works out to every value being off
+ * by about +0.091 degrees C after applying offset and scale.
+ */
+ *val = (int)(offset1 - offset2 - 1);
+ return IIO_VAL_INT;
+ case RR_ADC_DIE_TEMP:
+ offset1 = -RR_ADC_DIE_TEMP_OFFSET *
+ (int64_t)RR_ADC_TEMP_FS_VOLTAGE_DEN *
+ (int64_t)RR_ADC_CHAN_MSB;
+ offset1 = div64_s64(offset1, RR_ADC_TEMP_FS_VOLTAGE_NUM);
+
+ offset2 = -(int64_t)RR_ADC_CHG_TEMP_OFFSET_MILLI_DEGC *
+ RR_ADC_TEMP_FS_VOLTAGE_DEN * RR_ADC_CHAN_MSB *
+ RR_ADC_DIE_TEMP_SLOPE;
+ offset2 = div64_s64(offset2,
+ ((int64_t)RR_ADC_TEMP_FS_VOLTAGE_NUM));
+
+ /*
+ * The result is -339, it should be -338.69789, this results
+ * in the calculated die temp being off by
+ * -0.004 - -0.0175 degrees C
+ */
+ *val = (int)(offset1 - offset2);
+ return IIO_VAL_INT;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int rradc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan_spec, int *val,
+ int *val2, long mask)
+{
+ struct rradc_chip *chip = iio_priv(indio_dev);
+ const struct rradc_channel *chan;
+ int ret;
+ u16 adc_code;
+
+ if (chan_spec->address >= RR_ADC_CHAN_MAX) {
+ dev_err(chip->dev, "Invalid channel index:%lu\n",
+ chan_spec->address);
+ return -EINVAL;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return rradc_read_scale(chip, chan_spec->address, val, val2);
+ case IIO_CHAN_INFO_OFFSET:
+ return rradc_read_offset(chip, chan_spec->address, val);
+ case IIO_CHAN_INFO_RAW:
+ ret = rradc_do_conversion(chip, chan_spec->address, &adc_code);
+ if (ret < 0)
+ return ret;
+
+ *val = adc_code;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ chan = &rradc_chans[chan_spec->address];
+ if (!chan->scale_fn)
+ return -EINVAL;
+ ret = rradc_do_conversion(chip, chan_spec->address, &adc_code);
+ if (ret < 0)
+ return ret;
+
+ *val = chan->scale_fn(chip, adc_code, val);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rradc_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ return snprintf(label, PAGE_SIZE, "%s\n",
+ rradc_chans[chan->address].label);
+}
+
+static const struct iio_info rradc_info = {
+ .read_raw = rradc_read_raw,
+ .read_label = rradc_read_label,
+};
+
+static const struct rradc_channel rradc_chans[RR_ADC_CHAN_MAX] = {
+ {
+ .label = "batt_id",
+ .scale_fn = rradc_post_process_batt_id,
+ .lsb = RR_ADC_BATT_ID_5_LSB,
+ .status = RR_ADC_BATT_ID_STS,
+ .size = 6,
+ .trigger_addr = RR_ADC_BATT_ID_TRIGGER,
+ .trigger_mask = BIT(0),
+ }, {
+ .label = "batt",
+ .lsb = RR_ADC_BATT_THERM_LSB,
+ .status = RR_ADC_BATT_THERM_STS,
+ .size = 2,
+ .trigger_addr = RR_ADC_BATT_THERM_TRIGGER,
+ }, {
+ .label = "pmi8998_skin",
+ .lsb = RR_ADC_SKIN_TEMP_LSB,
+ .status = RR_ADC_AUX_THERM_STS,
+ .size = 2,
+ .trigger_addr = RR_ADC_AUX_THERM_TRIGGER,
+ }, {
+ .label = "usbin_i",
+ .lsb = RR_ADC_USB_IN_I_LSB,
+ .status = RR_ADC_USB_IN_I_STS,
+ .size = 2,
+ .trigger_addr = RR_ADC_USB_IN_I_TRIGGER,
+ }, {
+ .label = "usbin_v",
+ .lsb = RR_ADC_USB_IN_V_LSB,
+ .status = RR_ADC_USB_IN_V_STS,
+ .size = 2,
+ .trigger_addr = RR_ADC_USB_IN_V_TRIGGER,
+ .trigger_mask = BIT(7),
+ }, {
+ .label = "dcin_i",
+ .lsb = RR_ADC_DC_IN_I_LSB,
+ .status = RR_ADC_DC_IN_I_STS,
+ .size = 2,
+ .trigger_addr = RR_ADC_DC_IN_I_TRIGGER,
+ }, {
+ .label = "dcin_v",
+ .lsb = RR_ADC_DC_IN_V_LSB,
+ .status = RR_ADC_DC_IN_V_STS,
+ .size = 2,
+ .trigger_addr = RR_ADC_DC_IN_V_TRIGGER,
+ }, {
+ .label = "pmi8998_die",
+ .lsb = RR_ADC_PMI_DIE_TEMP_LSB,
+ .status = RR_ADC_PMI_DIE_TEMP_STS,
+ .size = 2,
+ .trigger_addr = RR_ADC_PMI_DIE_TEMP_TRIGGER,
+ .trigger_mask = RR_ADC_TRIGGER_EVERY_CYCLE,
+ }, {
+ .label = "chg",
+ .lsb = RR_ADC_CHARGER_TEMP_LSB,
+ .status = RR_ADC_CHARGER_TEMP_STS,
+ .size = 2,
+ .trigger_addr = RR_ADC_CHARGER_TEMP_TRIGGER,
+ }, {
+ .label = "gpio",
+ .lsb = RR_ADC_GPIO_LSB,
+ .status = RR_ADC_GPIO_STS,
+ .size = 2,
+ .trigger_addr = RR_ADC_GPIO_TRIGGER,
+ },
+};
+
+static const struct iio_chan_spec rradc_iio_chans[RR_ADC_CHAN_MAX] = {
+ {
+ .type = IIO_RESISTANCE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .address = RR_ADC_BATT_ID,
+ .channel = 0,
+ .indexed = 1,
+ }, {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .address = RR_ADC_BATT_THERM,
+ .channel = 0,
+ .indexed = 1,
+ }, {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = RR_ADC_SKIN_TEMP,
+ .channel = 1,
+ .indexed = 1,
+ }, {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = RR_ADC_USBIN_I,
+ .channel = 0,
+ .indexed = 1,
+ }, {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = RR_ADC_USBIN_V,
+ .channel = 0,
+ .indexed = 1,
+ }, {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = RR_ADC_DCIN_I,
+ .channel = 1,
+ .indexed = 1,
+ }, {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = RR_ADC_DCIN_V,
+ .channel = 1,
+ .indexed = 1,
+ }, {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = RR_ADC_DIE_TEMP,
+ .channel = 2,
+ .indexed = 1,
+ }, {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = RR_ADC_CHG_TEMP,
+ .channel = 3,
+ .indexed = 1,
+ }, {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = RR_ADC_GPIO,
+ .channel = 2,
+ .indexed = 1,
+ },
+};
+
+static int rradc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct rradc_chip *chip;
+ int ret, i, batt_id_delay;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->regmap) {
+ dev_err(dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ chip->dev = dev;
+ mutex_init(&chip->conversion_lock);
+
+ ret = device_property_read_u32(dev, "reg", &chip->base);
+ if (ret < 0) {
+ dev_err(chip->dev, "Couldn't find reg address, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ batt_id_delay = -1;
+ ret = device_property_read_u32(dev, "qcom,batt-id-delay-ms",
+ &batt_id_delay);
+ if (!ret) {
+ for (i = 0; i < RRADC_BATT_ID_DELAY_MAX; i++) {
+ if (batt_id_delay == batt_id_delays[i])
+ break;
+ }
+ if (i == RRADC_BATT_ID_DELAY_MAX)
+ batt_id_delay = -1;
+ }
+
+ if (batt_id_delay >= 0) {
+ batt_id_delay = FIELD_PREP(BATT_ID_SETTLE_MASK, batt_id_delay);
+ ret = regmap_update_bits(chip->regmap,
+ chip->base + RR_ADC_BATT_ID_CFG,
+ batt_id_delay, batt_id_delay);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "BATT_ID settling time config failed:%d\n",
+ ret);
+ }
+ }
+
+ /* Get the PMIC revision, we need it to handle some varying coefficients */
+ chip->pmic = qcom_pmic_get(chip->dev);
+ if (IS_ERR(chip->pmic)) {
+ dev_err(chip->dev, "Unable to get reference to PMIC device\n");
+ return PTR_ERR(chip->pmic);
+ }
+
+ switch (chip->pmic->subtype) {
+ case PMI8998_SUBTYPE:
+ indio_dev->name = "pmi8998-rradc";
+ break;
+ case PM660_SUBTYPE:
+ indio_dev->name = "pm660-rradc";
+ break;
+ default:
+ indio_dev->name = DRIVER_NAME;
+ break;
+ }
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &rradc_info;
+ indio_dev->channels = rradc_iio_chans;
+ indio_dev->num_channels = ARRAY_SIZE(rradc_iio_chans);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id rradc_match_table[] = {
+ { .compatible = "qcom,pm660-rradc" },
+ { .compatible = "qcom,pmi8998-rradc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rradc_match_table);
+
+static struct platform_driver rradc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = rradc_match_table,
+ },
+ .probe = rradc_probe,
+};
+module_platform_driver(rradc_driver);
+
+MODULE_DESCRIPTION("QCOM SPMI PMIC RR ADC driver");
+MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index 5b09a93fdf34..0921ff2d9b3a 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/reset.h>
#define DRIVER_NAME "rzg2l-adc"
@@ -260,9 +261,6 @@ static int rzg2l_adc_read_label(struct iio_dev *iio_dev,
const struct iio_chan_spec *chan,
char *label)
{
- if (chan->channel >= RZG2L_ADC_MAX_CHANNELS)
- return -EINVAL;
-
return sysfs_emit(label, "%s\n", rzg2l_adc_channel_name[chan->channel]);
}
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index e9ff2d6a8a57..f8421cbba8fa 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -579,15 +579,14 @@ unlock_adc:
return ret;
}
-static void sc27xx_adc_volt_ratio(struct sc27xx_adc_data *data,
- int channel, int scale,
- u32 *div_numerator, u32 *div_denominator)
+static void sc27xx_adc_volt_ratio(struct sc27xx_adc_data *data, int channel, int scale,
+ struct u32_fract *fract)
{
u32 ratio;
ratio = data->var_data->get_ratio(channel, scale);
- *div_numerator = ratio >> SC27XX_RATIO_NUMERATOR_OFFSET;
- *div_denominator = ratio & SC27XX_RATIO_DENOMINATOR_MASK;
+ fract->numerator = ratio >> SC27XX_RATIO_NUMERATOR_OFFSET;
+ fract->denominator = ratio & SC27XX_RATIO_DENOMINATOR_MASK;
}
static int adc_to_volt(struct sc27xx_adc_linear_graph *graph,
@@ -615,7 +614,7 @@ static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
static int sc27xx_adc_convert_volt(struct sc27xx_adc_data *data, int channel,
int scale, int raw_adc)
{
- u32 numerator, denominator;
+ struct u32_fract fract;
u32 volt;
/*
@@ -637,9 +636,9 @@ static int sc27xx_adc_convert_volt(struct sc27xx_adc_data *data, int channel,
break;
}
- sc27xx_adc_volt_ratio(data, channel, scale, &numerator, &denominator);
+ sc27xx_adc_volt_ratio(data, channel, scale, &fract);
- return DIV_ROUND_CLOSEST(volt * denominator, numerator);
+ return DIV_ROUND_CLOSEST(volt * fract.denominator, fract.numerator);
}
static int sc27xx_adc_read_processed(struct sc27xx_adc_data *data,
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 3efb8c404ccc..1ce52af3fe8b 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -358,7 +358,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
if ((status & priv->cfg->regs->eoc_msk[i] &&
stm32_adc_eoc_enabled(priv, i)) ||
(status & priv->cfg->regs->ovr_msk[i]))
- generic_handle_irq(irq_find_mapping(priv->domain, i));
+ generic_handle_domain_irq(priv->domain, i);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 11ef873d6453..130e8dd6f0c8 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -876,6 +876,9 @@ static void stm32h7_adc_disable(struct iio_dev *indio_dev)
int ret;
u32 val;
+ if (!(stm32_adc_readl(adc, STM32H7_ADC_CR) & STM32H7_ADEN))
+ return;
+
/* Disable ADC and wait until it's effectively disabled */
stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS);
ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
@@ -1016,6 +1019,9 @@ static int stm32h7_adc_selfcalib(struct iio_dev *indio_dev)
if (adc->cal.calibrated)
return true;
+ /* ADC must be disabled for calibration */
+ stm32h7_adc_disable(indio_dev);
+
/*
* Select calibration mode:
* - Offset calibration for single ended inputs
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index 000e5cfecb43..67518e460e05 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -333,7 +333,7 @@ static int stmpe_adc_probe(struct platform_device *pdev)
return devm_iio_device_register(&pdev->dev, indio_dev);
}
-static int __maybe_unused stmpe_adc_resume(struct device *dev)
+static int stmpe_adc_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct stmpe_adc *info = iio_priv(indio_dev);
@@ -343,7 +343,7 @@ static int __maybe_unused stmpe_adc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(stmpe_adc_pm_ops, NULL, stmpe_adc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stmpe_adc_pm_ops, NULL, stmpe_adc_resume);
static const struct of_device_id stmpe_adc_ids[] = {
{ .compatible = "st,stmpe-adc", },
@@ -355,7 +355,7 @@ static struct platform_driver stmpe_adc_driver = {
.probe = stmpe_adc_probe,
.driver = {
.name = "stmpe-adc",
- .pm = &stmpe_adc_pm_ops,
+ .pm = pm_sleep_ptr(&stmpe_adc_pm_ops),
.of_match_table = stmpe_adc_ids,
},
};
diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c
index 55bd2dc514e9..48a91a95e597 100644
--- a/drivers/iio/adc/stx104.c
+++ b/drivers/iio/adc/stx104.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
#define STX104_OUT_CHAN(chan) { \
.type = IIO_VOLTAGE, \
@@ -45,13 +46,35 @@ module_param_hw_array(base, uint, ioport, &num_stx104, 0);
MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
/**
+ * struct stx104_reg - device register structure
+ * @ssr_ad: Software Strobe Register and ADC Data
+ * @achan: ADC Channel
+ * @dio: Digital I/O
+ * @dac: DAC Channels
+ * @cir_asr: Clear Interrupts and ADC Status
+ * @acr: ADC Control
+ * @pccr_fsh: Pacer Clock Control and FIFO Status MSB
+ * @acfg: ADC Configuration
+ */
+struct stx104_reg {
+ u16 ssr_ad;
+ u8 achan;
+ u8 dio;
+ u16 dac[2];
+ u8 cir_asr;
+ u8 acr;
+ u8 pccr_fsh;
+ u8 acfg;
+};
+
+/**
* struct stx104_iio - IIO device private data structure
* @chan_out_states: channels' output states
- * @base: base port address of the IIO device
+ * @reg: I/O address offset for the device registers
*/
struct stx104_iio {
unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
- unsigned int base;
+ struct stx104_reg __iomem *reg;
};
/**
@@ -64,7 +87,7 @@ struct stx104_iio {
struct stx104_gpio {
struct gpio_chip chip;
spinlock_t lock;
- unsigned int base;
+ u8 __iomem *base;
unsigned int out_state;
};
@@ -72,6 +95,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
struct stx104_iio *const priv = iio_priv(indio_dev);
+ struct stx104_reg __iomem *const reg = priv->reg;
unsigned int adc_config;
int adbu;
int gain;
@@ -79,7 +103,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_HARDWAREGAIN:
/* get gain configuration */
- adc_config = inb(priv->base + 11);
+ adc_config = ioread8(&reg->acfg);
gain = adc_config & 0x3;
*val = 1 << gain;
@@ -91,24 +115,26 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
}
/* select ADC channel */
- outb(chan->channel | (chan->channel << 4), priv->base + 2);
+ iowrite8(chan->channel | (chan->channel << 4), &reg->achan);
- /* trigger ADC sample capture and wait for completion */
- outb(0, priv->base);
- while (inb(priv->base + 8) & BIT(7));
+ /* trigger ADC sample capture by writing to the 8-bit
+ * Software Strobe Register and wait for completion
+ */
+ iowrite8(0, &reg->ssr_ad);
+ while (ioread8(&reg->cir_asr) & BIT(7));
- *val = inw(priv->base);
+ *val = ioread16(&reg->ssr_ad);
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
/* get ADC bipolar/unipolar configuration */
- adc_config = inb(priv->base + 11);
+ adc_config = ioread8(&reg->acfg);
adbu = !(adc_config & BIT(2));
*val = -32768 * adbu;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
/* get ADC bipolar/unipolar and gain configuration */
- adc_config = inb(priv->base + 11);
+ adc_config = ioread8(&reg->acfg);
adbu = !(adc_config & BIT(2));
gain = adc_config & 0x3;
@@ -130,16 +156,16 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
/* Only four gain states (x1, x2, x4, x8) */
switch (val) {
case 1:
- outb(0, priv->base + 11);
+ iowrite8(0, &priv->reg->acfg);
break;
case 2:
- outb(1, priv->base + 11);
+ iowrite8(1, &priv->reg->acfg);
break;
case 4:
- outb(2, priv->base + 11);
+ iowrite8(2, &priv->reg->acfg);
break;
case 8:
- outb(3, priv->base + 11);
+ iowrite8(3, &priv->reg->acfg);
break;
default:
return -EINVAL;
@@ -153,7 +179,7 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
priv->chan_out_states[chan->channel] = val;
- outw(val, priv->base + 4 + 2 * chan->channel);
+ iowrite16(val, &priv->reg->dac[chan->channel]);
return 0;
}
@@ -222,7 +248,7 @@ static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset)
if (offset >= 4)
return -EINVAL;
- return !!(inb(stx104gpio->base) & BIT(offset));
+ return !!(ioread8(stx104gpio->base) & BIT(offset));
}
static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
@@ -230,7 +256,7 @@ static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
{
struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
- *bits = inb(stx104gpio->base);
+ *bits = ioread8(stx104gpio->base);
return 0;
}
@@ -252,7 +278,7 @@ static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset,
else
stx104gpio->out_state &= ~mask;
- outb(stx104gpio->out_state, stx104gpio->base);
+ iowrite8(stx104gpio->out_state, stx104gpio->base);
spin_unlock_irqrestore(&stx104gpio->lock, flags);
}
@@ -279,7 +305,7 @@ static void stx104_gpio_set_multiple(struct gpio_chip *chip,
stx104gpio->out_state &= ~*mask;
stx104gpio->out_state |= *mask & *bits;
- outb(stx104gpio->out_state, stx104gpio->base);
+ iowrite8(stx104gpio->out_state, stx104gpio->base);
spin_unlock_irqrestore(&stx104gpio->lock, flags);
}
@@ -306,11 +332,16 @@ static int stx104_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
+ priv = iio_priv(indio_dev);
+ priv->reg = devm_ioport_map(dev, base[id], STX104_EXTENT);
+ if (!priv->reg)
+ return -ENOMEM;
+
indio_dev->info = &stx104_info;
indio_dev->modes = INDIO_DIRECT_MODE;
/* determine if differential inputs */
- if (inb(base[id] + 8) & BIT(5)) {
+ if (ioread8(&priv->reg->cir_asr) & BIT(5)) {
indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff);
indio_dev->channels = stx104_channels_diff;
} else {
@@ -320,18 +351,15 @@ static int stx104_probe(struct device *dev, unsigned int id)
indio_dev->name = dev_name(dev);
- priv = iio_priv(indio_dev);
- priv->base = base[id];
-
/* configure device for software trigger operation */
- outb(0, base[id] + 9);
+ iowrite8(0, &priv->reg->acr);
/* initialize gain setting to x1 */
- outb(0, base[id] + 11);
+ iowrite8(0, &priv->reg->acfg);
/* initialize DAC output to 0V */
- outw(0, base[id] + 4);
- outw(0, base[id] + 6);
+ iowrite16(0, &priv->reg->dac[0]);
+ iowrite16(0, &priv->reg->dac[1]);
stx104gpio->chip.label = dev_name(dev);
stx104gpio->chip.parent = dev;
@@ -346,7 +374,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
stx104gpio->chip.get_multiple = stx104_gpio_get_multiple;
stx104gpio->chip.set = stx104_gpio_set;
stx104gpio->chip.set_multiple = stx104_gpio_set_multiple;
- stx104gpio->base = base[id] + 3;
+ stx104gpio->base = &priv->reg->dio;
stx104gpio->out_state = 0x0;
spin_lock_init(&stx104gpio->lock);
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index fb5e72600b96..b11ce555ba3b 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -36,7 +36,7 @@ struct adc0832 {
*/
u8 data[24] __aligned(8);
- u8 tx_buf[2] ____cacheline_aligned;
+ u8 tx_buf[2] __aligned(IIO_DMA_MINALIGN);
u8 rx_buf[2];
};
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index c9b5d9aec3dc..1f6e53832e06 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -32,10 +32,10 @@ struct adc084s021 {
s64 ts __aligned(8);
} scan;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache line.
*/
- u16 tx_buf[4] ____cacheline_aligned;
+ u16 tx_buf[4] __aligned(IIO_DMA_MINALIGN);
__be16 rx_buf[5]; /* First 16-bits are trash */
};
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index c8e48881c37f..c82a161630e1 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -77,8 +77,8 @@ struct adc108s102_state {
* tx_buf: 8 channel read commands, plus 1 dummy command
* rx_buf: 1 dummy response, 8 channel responses
*/
- __be16 rx_buf[9] ____cacheline_aligned;
- __be16 tx_buf[9] ____cacheline_aligned;
+ __be16 rx_buf[9] __aligned(IIO_DMA_MINALIGN);
+ __be16 tx_buf[9] __aligned(IIO_DMA_MINALIGN);
};
#define ADC108S102_V_CHAN(index) \
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index 59d75d09604f..c0a72d72f3a9 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -55,7 +55,7 @@ struct adc12138 {
*/
__be16 data[20] __aligned(8);
- u8 tx_buf[2] ____cacheline_aligned;
+ u8 tx_buf[2] __aligned(IIO_DMA_MINALIGN);
u8 rx_buf[2];
};
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index 8e7adec87755..622fd384983c 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -29,7 +29,7 @@ struct adc128 {
struct regulator *reg;
struct mutex lock;
- u8 buffer[2] ____cacheline_aligned;
+ u8 buffer[2] __aligned(IIO_DMA_MINALIGN);
};
static int adc128_adc_conversion(struct adc128 *adc, u8 channel)
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index 75ca7f1c8726..b789891dcf49 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -71,7 +71,7 @@ struct ti_adc_data {
u8 read_size;
u8 shift;
- u8 buffer[16] ____cacheline_aligned;
+ u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
};
static int ti_adc_read_measurement(struct ti_adc_data *data,
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 5544da80b636..e3dfc155fbe2 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -1098,6 +1098,7 @@ static int ads1015_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ads1015_data *data = iio_priv(indio_dev);
+ int ret;
iio_device_unregister(indio_dev);
@@ -1105,7 +1106,12 @@ static int ads1015_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
/* power down single shot mode */
- return ads1015_set_conv_mode(data, ADS1015_SINGLESHOT);
+ ret = ads1015_set_conv_mode(data, ADS1015_SINGLESHOT);
+ if (ret)
+ dev_warn(&client->dev, "Failed to power down (%pe)\n",
+ ERR_PTR(ret));
+
+ return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 767b3b634809..4ca62121f0d1 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -106,7 +106,7 @@ struct ads124s_private {
* timestamp is maintained.
*/
u32 buffer[ADS124S08_MAX_CHANNELS + sizeof(s64)/sizeof(u32)] __aligned(8);
- u8 data[5] ____cacheline_aligned;
+ u8 data[5] __aligned(IIO_DMA_MINALIGN);
};
#define ADS124S08_CHAN(index) \
@@ -193,7 +193,7 @@ static int ads124s_reset(struct iio_dev *indio_dev)
return 0;
};
-static int ads124s_read(struct iio_dev *indio_dev, unsigned int chan)
+static int ads124s_read(struct iio_dev *indio_dev)
{
struct ads124s_private *priv = iio_priv(indio_dev);
int ret;
@@ -242,7 +242,7 @@ static int ads124s_read_raw(struct iio_dev *indio_dev,
goto out;
}
- ret = ads124s_read(indio_dev, chan->channel);
+ ret = ads124s_read(indio_dev);
if (ret < 0) {
dev_err(&priv->spi->dev, "Read ADC failed\n");
goto out;
@@ -290,7 +290,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p)
if (ret)
dev_err(&priv->spi->dev, "Start ADC conversions failed\n");
- priv->buffer[j] = ads124s_read(indio_dev, scan_index);
+ priv->buffer[j] = ads124s_read(indio_dev);
ret = ads124s_write_cmd(indio_dev, ADS124S08_STOP_CONV);
if (ret)
dev_err(&priv->spi->dev, "Stop ADC conversions failed\n");
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 80a09817c119..32237cacc9a3 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -105,7 +105,7 @@ struct ads131e08_state {
s64 ts __aligned(8);
} tmp_buf;
- u8 tx_buf[3] ____cacheline_aligned;
+ u8 tx_buf[3] __aligned(IIO_DMA_MINALIGN);
/*
* Add extra one padding byte to be able to access the last channel
* value using u32 pointer
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index e3658b969c5b..2cc9a9bd9db6 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -102,11 +102,11 @@ struct ti_ads7950_state {
unsigned int gpio_cmd_settings_bitmask;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
u16 rx_buf[TI_ADS7950_MAX_CHAN + 2 + TI_ADS7950_TIMESTAMP_SIZE]
- ____cacheline_aligned;
+ __aligned(IIO_DMA_MINALIGN);
u16 tx_buf[TI_ADS7950_MAX_CHAN + 2];
u16 single_tx;
u16 single_rx;
diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c
index c96d2a9ba924..bbd85cb47f81 100644
--- a/drivers/iio/adc/ti-ads8344.c
+++ b/drivers/iio/adc/ti-ads8344.c
@@ -28,7 +28,7 @@ struct ads8344 {
*/
struct mutex lock;
- u8 tx_buf ____cacheline_aligned;
+ u8 tx_buf __aligned(IIO_DMA_MINALIGN);
u8 rx_buf[3];
};
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 708cca0a63be..ef06a897421a 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -71,7 +71,7 @@ struct ads8688_state {
union {
__be32 d32;
u8 d8[4];
- } data[2] ____cacheline_aligned;
+ } data[2] __aligned(IIO_DMA_MINALIGN);
};
enum ads8688_id {
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 2406eda9dfc6..30f629a553a1 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -37,12 +37,12 @@ struct tlc4541_state {
struct spi_message scan_single_msg;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
* 2 bytes data + 6 bytes padding + 8 bytes timestamp when
* call iio_push_to_buffers_with_timestamp.
*/
- __be16 rx_buf[8] ____cacheline_aligned;
+ __be16 rx_buf[8] __aligned(IIO_DMA_MINALIGN);
};
struct tlc4541_chip_info {
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 55b35570ad8b..0d9436a69cbf 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -776,7 +776,7 @@ static int tsc2046_adc_probe(struct spi_device *spi)
priv->spi = spi;
indio_dev->name = TI_TSC2046_NAME;
- indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_TRIGGERED;
+ indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = dcfg->channels;
indio_dev->num_channels = dcfg->num_channels;
indio_dev->info = &tsc2046_adc_info;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 567d43a30955..642c5c4895e3 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -702,7 +702,7 @@ static int tiadc_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused tiadc_suspend(struct device *dev)
+static int tiadc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -715,7 +715,7 @@ static int __maybe_unused tiadc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused tiadc_resume(struct device *dev)
+static int tiadc_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -732,7 +732,7 @@ static int __maybe_unused tiadc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(tiadc_pm_ops, tiadc_suspend, tiadc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(tiadc_pm_ops, tiadc_suspend, tiadc_resume);
static const struct of_device_id ti_adc_dt_ids[] = {
{ .compatible = "ti,am3359-adc", },
@@ -744,7 +744,7 @@ MODULE_DEVICE_TABLE(of, ti_adc_dt_ids);
static struct platform_driver tiadc_driver = {
.driver = {
.name = "TI-am335x-adc",
- .pm = &tiadc_pm_ops,
+ .pm = pm_sleep_ptr(&tiadc_pm_ops),
.of_match_table = ti_adc_dt_ids,
},
.probe = tiadc_probe,
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index c84293efc129..c6b16cf6e367 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -5,7 +5,9 @@
* Copyright 2013 Freescale Semiconductor, Inc.
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -14,10 +16,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/completion.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/regulator/consumer.h>
-#include <linux/of_platform.h>
#include <linux/err.h>
#include <linux/iio/iio.h>
@@ -799,6 +798,7 @@ MODULE_DEVICE_TABLE(of, vf610_adc_match);
static int vf610_adc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct vf610_adc *info;
struct iio_dev *indio_dev;
int irq;
@@ -846,13 +846,10 @@ static int vf610_adc_probe(struct platform_device *pdev)
info->vref_uv = regulator_get_voltage(info->vref);
- of_property_read_u32_array(pdev->dev.of_node, "fsl,adck-max-frequency",
- info->max_adck_rate, 3);
+ device_property_read_u32_array(dev, "fsl,adck-max-frequency", info->max_adck_rate, 3);
- ret = of_property_read_u32(pdev->dev.of_node, "min-sample-time",
- &info->adc_feature.default_sample_time);
- if (ret)
- info->adc_feature.default_sample_time = DEFAULT_SAMPLE_TIME;
+ info->adc_feature.default_sample_time = DEFAULT_SAMPLE_TIME;
+ device_property_read_u32(dev, "min-sample-time", &info->adc_feature.default_sample_time);
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index a7687706012d..9cd2713146e5 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1421,7 +1421,7 @@ static int ams_probe(struct platform_device *pdev)
return devm_iio_device_register(&pdev->dev, indio_dev);
}
-static int __maybe_unused ams_suspend(struct device *dev)
+static int ams_suspend(struct device *dev)
{
struct ams *ams = iio_priv(dev_get_drvdata(dev));
@@ -1430,20 +1430,20 @@ static int __maybe_unused ams_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ams_resume(struct device *dev)
+static int ams_resume(struct device *dev)
{
struct ams *ams = iio_priv(dev_get_drvdata(dev));
return clk_prepare_enable(ams->clk);
}
-static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
static struct platform_driver ams_driver = {
.probe = ams_probe,
.driver = {
.name = "xilinx-ams",
- .pm = &ams_pm_ops,
+ .pm = pm_sleep_ptr(&ams_pm_ops),
.of_match_table = ams_of_match_table,
},
};
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 823c8e5f9809..1b247722ba25 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -17,10 +17,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
@@ -1182,14 +1183,13 @@ static const struct of_device_id xadc_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, xadc_of_match_table);
-static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
- unsigned int *conf, int irq)
+static int xadc_parse_dt(struct iio_dev *indio_dev, unsigned int *conf, int irq)
{
struct device *dev = indio_dev->dev.parent;
struct xadc *xadc = iio_priv(indio_dev);
const struct iio_chan_spec *channel_templates;
struct iio_chan_spec *channels, *chan;
- struct device_node *chan_node, *child;
+ struct fwnode_handle *chan_node, *child;
unsigned int max_channels;
unsigned int num_channels;
const char *external_mux;
@@ -1200,7 +1200,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
*conf = 0;
- ret = of_property_read_string(np, "xlnx,external-mux", &external_mux);
+ ret = device_property_read_string(dev, "xlnx,external-mux", &external_mux);
if (ret < 0 || strcasecmp(external_mux, "none") == 0)
xadc->external_mux_mode = XADC_EXTERNAL_MUX_NONE;
else if (strcasecmp(external_mux, "single") == 0)
@@ -1211,8 +1211,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
return -EINVAL;
if (xadc->external_mux_mode != XADC_EXTERNAL_MUX_NONE) {
- ret = of_property_read_u32(np, "xlnx,external-mux-channel",
- &ext_mux_chan);
+ ret = device_property_read_u32(dev, "xlnx,external-mux-channel", &ext_mux_chan);
if (ret < 0)
return ret;
@@ -1247,33 +1246,31 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
num_channels = 9;
chan = &channels[9];
- chan_node = of_get_child_by_name(np, "xlnx,channels");
- if (chan_node) {
- for_each_child_of_node(chan_node, child) {
- if (num_channels >= max_channels) {
- of_node_put(child);
- break;
- }
+ chan_node = device_get_named_child_node(dev, "xlnx,channels");
+ fwnode_for_each_child_node(chan_node, child) {
+ if (num_channels >= max_channels) {
+ fwnode_handle_put(child);
+ break;
+ }
- ret = of_property_read_u32(child, "reg", &reg);
- if (ret || reg > 16)
- continue;
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg > 16)
+ continue;
- if (of_property_read_bool(child, "xlnx,bipolar"))
- chan->scan_type.sign = 's';
+ if (fwnode_property_read_bool(child, "xlnx,bipolar"))
+ chan->scan_type.sign = 's';
- if (reg == 0) {
- chan->scan_index = 11;
- chan->address = XADC_REG_VPVN;
- } else {
- chan->scan_index = 15 + reg;
- chan->address = XADC_REG_VAUX(reg - 1);
- }
- num_channels++;
- chan++;
+ if (reg == 0) {
+ chan->scan_index = 11;
+ chan->address = XADC_REG_VPVN;
+ } else {
+ chan->scan_index = 15 + reg;
+ chan->address = XADC_REG_VAUX(reg - 1);
}
+ num_channels++;
+ chan++;
}
- of_node_put(chan_node);
+ fwnode_handle_put(chan_node);
/* No IRQ => no events */
if (irq <= 0) {
@@ -1316,7 +1313,6 @@ static void xadc_cancel_delayed_work(void *data)
static int xadc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *id;
const struct xadc_ops *ops;
struct iio_dev *indio_dev;
unsigned int bipolar_mask;
@@ -1326,15 +1322,10 @@ static int xadc_probe(struct platform_device *pdev)
int irq;
int i;
- if (!dev->of_node)
- return -ENODEV;
-
- id = of_match_node(xadc_of_match_table, dev->of_node);
- if (!id)
+ ops = device_get_match_data(dev);
+ if (!ops)
return -EINVAL;
- ops = id->data;
-
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0 &&
(irq != -ENXIO || !(ops->flags & XADC_FLAGS_IRQ_OPTIONAL)))
@@ -1345,7 +1336,7 @@ static int xadc_probe(struct platform_device *pdev)
return -ENOMEM;
xadc = iio_priv(indio_dev);
- xadc->ops = id->data;
+ xadc->ops = ops;
init_completion(&xadc->completion);
mutex_init(&xadc->mutex);
spin_lock_init(&xadc->lock);
@@ -1359,7 +1350,7 @@ static int xadc_probe(struct platform_device *pdev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &xadc_info;
- ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0, irq);
+ ret = xadc_parse_dt(indio_dev, &conf0, irq);
if (ret)
return ret;
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index acd230a6af35..899bcd83f40b 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -77,13 +77,13 @@ struct ad74413r_state {
struct spi_transfer adc_samples_xfer[AD74413R_CHANNEL_MAX + 1];
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
struct {
u8 rx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX];
s64 timestamp;
- } adc_samples_buf ____cacheline_aligned;
+ } adc_samples_buf __aligned(IIO_DMA_MINALIGN);
u8 adc_samples_tx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX];
u8 reg_tx_buf[AD74413R_FRAME_SIZE];
@@ -284,10 +284,10 @@ static void ad74413r_gpio_set_multiple(struct gpio_chip *chip,
struct ad74413r_state *st = gpiochip_get_data(chip);
unsigned long real_mask = 0;
unsigned long real_bits = 0;
- unsigned int offset = 0;
+ unsigned int offset;
int ret;
- for_each_set_bit_from(offset, mask, chip->ngpio) {
+ for_each_set_bit(offset, mask, chip->ngpio) {
unsigned int real_offset = st->gpo_gpio_offsets[offset];
ret = ad74413r_set_gpo_config(st, real_offset,
@@ -325,7 +325,7 @@ static int ad74413r_gpio_get_multiple(struct gpio_chip *chip,
unsigned long *bits)
{
struct ad74413r_state *st = gpiochip_get_data(chip);
- unsigned int offset = 0;
+ unsigned int offset;
unsigned int val;
int ret;
@@ -333,7 +333,7 @@ static int ad74413r_gpio_get_multiple(struct gpio_chip *chip,
if (ret)
return ret;
- for_each_set_bit_from(offset, mask, chip->ngpio) {
+ for_each_set_bit(offset, mask, chip->ngpio) {
unsigned int real_offset = st->comp_gpio_offsets[offset];
__assign_bit(offset, bits, val & BIT(real_offset));
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index 6949d2151025..1f280c360701 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -107,6 +107,7 @@ int rescale_process_scale(struct rescale *rescale, int scale_type,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL_NS_GPL(rescale_process_scale, IIO_RESCALE);
int rescale_process_offset(struct rescale *rescale, int scale_type,
int scale, int scale2, int schan_off,
@@ -140,6 +141,7 @@ int rescale_process_offset(struct rescale *rescale, int scale_type,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL_NS_GPL(rescale_process_offset, IIO_RESCALE);
static int rescale_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index 1134ae12e531..f2c2ea79a07f 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -45,10 +45,10 @@ struct ad8366_state {
enum ad8366_type type;
struct ad8366_info *info;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- unsigned char data[2] ____cacheline_aligned;
+ unsigned char data[2] __aligned(IIO_DMA_MINALIGN);
};
static struct ad8366_info ad8366_infos[] = {
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 56dea9734c8d..8378c00fa2ff 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -726,6 +726,7 @@ static int atlas_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct atlas_data *data = iio_priv(indio_dev);
+ int ret;
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
@@ -734,7 +735,12 @@ static int atlas_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- return atlas_set_powermode(data, 0);
+ ret = atlas_set_powermode(data, 0);
+ if (ret)
+ dev_err(&client->dev, "Failed to power down device (%pe)\n",
+ ERR_PTR(ret));
+
+ return 0;
}
static int atlas_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 16ff7a98c9f0..ef5e0e46fd34 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -638,7 +638,7 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
comp_temp = bme680_compensate_temp(data, adc_temp);
/*
* val might be NULL if we're called by the read_press/read_humid
- * routine which is callled to get t_fine value used in
+ * routine which is called to get t_fine value used in
* compensate_press/compensate_humid to get compensated
* pressure/humidity readings.
*/
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 80ef1aa9aae3..560183efb36f 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -536,14 +536,20 @@ static int ccs811_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ccs811_data *data = iio_priv(indio_dev);
+ int ret;
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
if (data->drdy_trig)
iio_trigger_unregister(data->drdy_trig);
- return i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE,
- CCS811_MODE_IDLE);
+ ret = i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE,
+ CCS811_MODE_IDLE);
+ if (ret)
+ dev_warn(&client->dev, "Failed to power down device (%pe)\n",
+ ERR_PTR(ret));
+
+ return 0;
}
static const struct i2c_device_id ccs811_id[] = {
diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c
index 37143b5526ee..54066532ea45 100644
--- a/drivers/iio/chemical/scd4x.c
+++ b/drivers/iio/chemical/scd4x.c
@@ -551,7 +551,7 @@ static const struct iio_chan_spec scd4x_channels[] = {
},
};
-static int __maybe_unused scd4x_suspend(struct device *dev)
+static int scd4x_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct scd4x_state *state = iio_priv(indio_dev);
@@ -564,7 +564,7 @@ static int __maybe_unused scd4x_suspend(struct device *dev)
return regulator_disable(state->vdd);
}
-static int __maybe_unused scd4x_resume(struct device *dev)
+static int scd4x_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct scd4x_state *state = iio_priv(indio_dev);
@@ -577,7 +577,7 @@ static int __maybe_unused scd4x_resume(struct device *dev)
return scd4x_send_command(state, CMD_START_MEAS);
}
-static __maybe_unused SIMPLE_DEV_PM_OPS(scd4x_pm_ops, scd4x_suspend, scd4x_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(scd4x_pm_ops, scd4x_suspend, scd4x_resume);
static void scd4x_stop_meas(void *state)
{
@@ -688,7 +688,7 @@ static struct i2c_driver scd4x_i2c_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = scd4x_dt_ids,
- .pm = &scd4x_pm_ops
+ .pm = pm_sleep_ptr(&scd4x_pm_ops),
},
.probe = scd4x_probe,
};
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index abd67559e451..814ce0aad1cc 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -372,7 +372,7 @@ int sps30_probe(struct device *dev, const char *name, void *priv, const struct s
return devm_iio_device_register(dev, indio_dev);
}
-EXPORT_SYMBOL_GPL(sps30_probe);
+EXPORT_SYMBOL_NS_GPL(sps30_probe, IIO_SPS30);
MODULE_AUTHOR("Tomasz Duszynski <tduszyns@gmail.com>");
MODULE_DESCRIPTION("Sensirion SPS30 particulate matter sensor driver");
diff --git a/drivers/iio/chemical/sps30_i2c.c b/drivers/iio/chemical/sps30_i2c.c
index d33560ed7184..2aed483a2fde 100644
--- a/drivers/iio/chemical/sps30_i2c.c
+++ b/drivers/iio/chemical/sps30_i2c.c
@@ -256,3 +256,4 @@ module_i2c_driver(sps30_i2c_driver);
MODULE_AUTHOR("Tomasz Duszynski <tomasz.duszynski@octakon.com>");
MODULE_DESCRIPTION("Sensirion SPS30 particulate matter sensor i2c driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_SPS30);
diff --git a/drivers/iio/chemical/sps30_serial.c b/drivers/iio/chemical/sps30_serial.c
index 3f311d50087c..164f4b3e025c 100644
--- a/drivers/iio/chemical/sps30_serial.c
+++ b/drivers/iio/chemical/sps30_serial.c
@@ -429,3 +429,4 @@ module_serdev_device_driver(sps30_serial_driver);
MODULE_AUTHOR("Tomasz Duszynski <tomasz.duszynski@octakon.com>");
MODULE_DESCRIPTION("Sensirion SPS30 particulate matter sensor serial driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_SPS30);
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
index af801e203623..119acb078af3 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
@@ -20,6 +20,7 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_device.h>
@@ -97,7 +98,7 @@ static int cros_ec_lid_angle_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, false, NULL, NULL);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, false, NULL);
if (ret)
return ret;
@@ -113,7 +114,7 @@ static int cros_ec_lid_angle_probe(struct platform_device *pdev)
if (ret)
return ret;
- return devm_iio_device_register(dev, indio_dev);
+ return cros_ec_sensors_core_register(dev, indio_dev, NULL);
}
static const struct platform_device_id cros_ec_lid_angle_ids[] = {
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 376a5b30010a..66153b1850f1 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -16,6 +16,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -235,8 +236,7 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
return -ENOMEM;
ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
- cros_ec_sensors_capture,
- cros_ec_sensors_push_data);
+ cros_ec_sensors_capture);
if (ret)
return ret;
@@ -297,7 +297,8 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
else
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- return devm_iio_device_register(dev, indio_dev);
+ return cros_ec_sensors_core_register(dev, indio_dev,
+ cros_ec_sensors_push_data);
}
static const struct platform_device_id cros_ec_sensors_ids[] = {
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 5976aca48e3b..05a28d353e34 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -29,12 +29,6 @@
*/
#define CROS_EC_FIFO_SIZE (2048 * 2 / 3)
-static char *cros_ec_loc[] = {
- [MOTIONSENSE_LOC_BASE] = "base",
- [MOTIONSENSE_LOC_LID] = "lid",
- [MOTIONSENSE_LOC_MAX] = "unknown",
-};
-
static int cros_ec_get_host_cmd_version_mask(struct cros_ec_device *ec_dev,
u16 cmd_offset, u16 cmd, u32 *mask)
{
@@ -234,21 +228,18 @@ static void cros_ec_sensors_core_clean(void *arg)
/**
* cros_ec_sensors_core_init() - basic initialization of the core structure
- * @pdev: platform device created for the sensors
+ * @pdev: platform device created for the sensor
* @indio_dev: iio device structure of the device
* @physical_device: true if the device refers to a physical device
* @trigger_capture: function pointer to call buffer is triggered,
* for backward compatibility.
- * @push_data: function to call when cros_ec_sensorhub receives
- * a sample for that sensor.
*
* Return: 0 on success, -errno on failure.
*/
int cros_ec_sensors_core_init(struct platform_device *pdev,
struct iio_dev *indio_dev,
bool physical_device,
- cros_ec_sensors_capture_t trigger_capture,
- cros_ec_sensorhub_push_data_cb_t push_data)
+ cros_ec_sensors_capture_t trigger_capture)
{
struct device *dev = &pdev->dev;
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
@@ -287,6 +278,8 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
indio_dev->name = pdev->name;
if (physical_device) {
+ enum motionsensor_location loc;
+
state->param.cmd = MOTIONSENSE_CMD_INFO;
state->param.info.sensor_num = sensor_platform->sensor_num;
ret = cros_ec_motion_send_host_cmd(state, 0);
@@ -295,7 +288,13 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
return ret;
}
state->type = state->resp->info.type;
- state->loc = state->resp->info.location;
+ loc = state->resp->info.location;
+ if (loc == MOTIONSENSE_LOC_BASE)
+ indio_dev->label = "accel-base";
+ else if (loc == MOTIONSENSE_LOC_LID)
+ indio_dev->label = "accel-display";
+ else if (loc == MOTIONSENSE_LOC_CAMERA)
+ indio_dev->label = "accel-camera";
/* Set sign vector, only used for backward compatibility. */
memset(state->sign, 1, CROS_EC_SENSOR_MAX_AXIS);
@@ -338,17 +337,6 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
if (ret)
return ret;
- ret = cros_ec_sensorhub_register_push_data(
- sensor_hub, sensor_platform->sensor_num,
- indio_dev, push_data);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(
- dev, cros_ec_sensors_core_clean, pdev);
- if (ret)
- return ret;
-
/* Timestamp coming from FIFO are in ns since boot. */
ret = iio_device_set_clock(indio_dev, CLOCK_BOOTTIME);
if (ret)
@@ -371,6 +359,46 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init);
/**
+ * cros_ec_sensors_core_register() - Register callback to FIFO and IIO when
+ * sensor is ready.
+ * It must be called at the end of the sensor probe routine.
+ * @dev: device created for the sensor
+ * @indio_dev: iio device structure of the device
+ * @push_data: function to call when cros_ec_sensorhub receives
+ * a sample for that sensor.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_core_register(struct device *dev,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t push_data)
+{
+ struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
+ struct cros_ec_sensorhub *sensor_hub = dev_get_drvdata(dev->parent);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_dev *ec = sensor_hub->ec;
+ int ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return ret;
+
+ if (!push_data ||
+ !cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
+ return 0;
+
+ ret = cros_ec_sensorhub_register_push_data(
+ sensor_hub, sensor_platform->sensor_num,
+ indio_dev, push_data);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(
+ dev, cros_ec_sensors_core_clean, pdev);
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_register);
+
+/**
* cros_ec_motion_send_host_cmd() - send motion sense host command
* @state: pointer to state information for device
* @opt_length: optional length to reduce the response size, useful on the data
@@ -442,15 +470,6 @@ static ssize_t cros_ec_sensors_id(struct iio_dev *indio_dev,
return snprintf(buf, PAGE_SIZE, "%d\n", st->param.info.sensor_num);
}
-static ssize_t cros_ec_sensors_loc(struct iio_dev *indio_dev,
- uintptr_t private, const struct iio_chan_spec *chan,
- char *buf)
-{
- struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
-
- return snprintf(buf, PAGE_SIZE, "%s\n", cros_ec_loc[st->loc]);
-}
-
const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[] = {
{
.name = "calibrate",
@@ -462,11 +481,6 @@ const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[] = {
.shared = IIO_SHARED_BY_ALL,
.read = cros_ec_sensors_id
},
- {
- .name = "location",
- .shared = IIO_SHARED_BY_ALL,
- .read = cros_ec_sensors_loc
- },
{ },
};
EXPORT_SYMBOL_GPL(cros_ec_sensors_ext_info);
diff --git a/drivers/iio/common/ssp_sensors/ssp.h b/drivers/iio/common/ssp_sensors/ssp.h
index abb832795619..f649cdecc277 100644
--- a/drivers/iio/common/ssp_sensors/ssp.h
+++ b/drivers/iio/common/ssp_sensors/ssp.h
@@ -221,8 +221,7 @@ struct ssp_data {
struct iio_dev *sensor_devs[SSP_SENSOR_MAX];
atomic_t enable_refcount;
- __le16 header_buffer[SSP_HEADER_BUFFER_SIZE / sizeof(__le16)]
- ____cacheline_aligned;
+ __le16 header_buffer[SSP_HEADER_BUFFER_SIZE / sizeof(__le16)] __aligned(IIO_DMA_MINALIGN);
};
void ssp_clean_pending_list(struct ssp_data *data);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index d1c7bde8aece..80521bd28d0f 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -416,7 +416,7 @@ config TI_DAC5571
help
Driver for the Texas Instruments
DAC5571, DAC6571, DAC7571, DAC5574, DAC6574, DAC7574, DAC5573,
- DAC6573, DAC7573, DAC8571, DAC8574.
+ DAC6573, DAC7573, DAC8571, DAC8574, DAC121C081.
If compiled as a module, it will be called ti-dac5571.
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index d87cf14daabe..4447b8811827 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -115,13 +115,13 @@ struct ad5064_state {
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
union {
u8 i2c[3];
__be32 spi;
- } data ____cacheline_aligned;
+ } data __aligned(IIO_DMA_MINALIGN);
};
enum ad5064_type {
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 22b000a40828..e0b7f658d611 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -79,13 +79,13 @@ struct ad5360_state {
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
union {
__be32 d32;
u8 d8[4];
- } data[2] ____cacheline_aligned;
+ } data[2] __aligned(IIO_DMA_MINALIGN);
};
enum ad5360_type {
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index a44c83242fb1..81775152aac6 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -36,8 +36,7 @@
* @channel_template: channel specification template
* @num_channels: number of channels
* @int_vref: internal vref in uV
-*/
-
+ */
struct ad5380_chip_info {
struct iio_chan_spec channel_template;
unsigned int num_channels;
@@ -53,7 +52,6 @@ struct ad5380_chip_info {
* @pwr_down: whether the chip is currently in power down mode
* @lock: lock to protect the data buffer during regmap ops
*/
-
struct ad5380_state {
struct regmap *regmap;
const struct ad5380_chip_info *chip_info;
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index eedf661d32b2..7644acfd879e 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -72,13 +72,13 @@ struct ad5421_state {
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
union {
__be32 d32;
u8 d8[4];
- } data[2] ____cacheline_aligned;
+ } data[2] __aligned(IIO_DMA_MINALIGN);
};
static const struct iio_event_spec ad5421_current_event[] = {
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index bad9bdaafa94..4572d6f49275 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -68,10 +68,10 @@ struct ad5449 {
uint16_t dac_cache[AD5449_MAX_CHANNELS];
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- __be16 data[2] ____cacheline_aligned;
+ __be16 data[2] __aligned(IIO_DMA_MINALIGN);
};
enum ad5449_type {
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index a0817e799cc0..e6c5be728bb2 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -54,7 +54,7 @@ struct ad5504_state {
unsigned pwr_down_mask;
unsigned pwr_down_mode;
- __be16 data[2] ____cacheline_aligned;
+ __be16 data[2] __aligned(IIO_DMA_MINALIGN);
};
/*
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 4434c1b2a322..7a9b5fc1e579 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -603,7 +603,7 @@ int ad5592r_probe(struct device *dev, const char *name,
st->reg = devm_regulator_get_optional(dev, "vref");
if (IS_ERR(st->reg)) {
- if ((PTR_ERR(st->reg) != -ENODEV) && dev->of_node)
+ if ((PTR_ERR(st->reg) != -ENODEV) && dev_fwnode(dev))
return PTR_ERR(st->reg);
st->reg = NULL;
diff --git a/drivers/iio/dac/ad5592r-base.h b/drivers/iio/dac/ad5592r-base.h
index 2a22ef691996..cc7be426cbc8 100644
--- a/drivers/iio/dac/ad5592r-base.h
+++ b/drivers/iio/dac/ad5592r-base.h
@@ -14,6 +14,8 @@
#include <linux/mutex.h>
#include <linux/gpio/driver.h>
+#include <linux/iio/iio.h>
+
struct device;
struct ad5592r_state;
@@ -65,7 +67,7 @@ struct ad5592r_state {
u8 gpio_in;
u8 gpio_val;
- __be16 spi_msg ____cacheline_aligned;
+ __be16 spi_msg __aligned(IIO_DMA_MINALIGN);
__be16 spi_msg_nop;
};
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index cd5fff9e9d53..b7ade3a6b9b6 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -13,6 +13,8 @@
#include <linux/mutex.h>
#include <linux/kernel.h>
+#include <linux/iio/iio.h>
+
#define AD5310_CMD(x) ((x) << 12)
#define AD5683_DATA(x) ((x) << 4)
@@ -137,7 +139,7 @@ struct ad5686_state {
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
@@ -145,7 +147,7 @@ struct ad5686_state {
__be32 d32;
__be16 d16;
u8 d8[4];
- } data[3] ____cacheline_aligned;
+ } data[3] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 1a63b8456725..beadfa938d2d 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -189,14 +189,14 @@ struct ad5755_state {
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
union {
__be32 d32;
u8 d8[4];
- } data[2] ____cacheline_aligned;
+ } data[2] __aligned(IIO_DMA_MINALIGN);
};
enum ad5755_type {
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
index 4cb8471db81e..6aa1a068adb0 100644
--- a/drivers/iio/dac/ad5761.c
+++ b/drivers/iio/dac/ad5761.c
@@ -70,13 +70,13 @@ struct ad5761_state {
enum ad5761_voltage_range range;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
union {
__be32 d32;
u8 d8[4];
- } data[3] ____cacheline_aligned;
+ } data[3] __aligned(IIO_DMA_MINALIGN);
};
static const struct ad5761_range_params ad5761_range_params[] = {
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index d235a8047ba0..26c049d5b73a 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -56,13 +56,13 @@ struct ad5764_state {
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
union {
__be32 d32;
u8 d8[4];
- } data[2] ____cacheline_aligned;
+ } data[2] __aligned(IIO_DMA_MINALIGN);
};
enum ad5764_type {
diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c
index 43189af2fb1f..899894523752 100644
--- a/drivers/iio/dac/ad5766.c
+++ b/drivers/iio/dac/ad5766.c
@@ -123,7 +123,7 @@ struct ad5766_state {
u32 d32;
u16 w16[2];
u8 b8[4];
- } data[3] ____cacheline_aligned;
+ } data[3] __aligned(IIO_DMA_MINALIGN);
};
struct ad5766_span_tbl {
diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c
index 7e2fd32e993a..f66d67402e43 100644
--- a/drivers/iio/dac/ad5770r.c
+++ b/drivers/iio/dac/ad5770r.c
@@ -140,7 +140,7 @@ struct ad5770r_state {
bool ch_pwr_down[AD5770R_MAX_CHANNELS];
bool internal_ref;
bool external_res;
- u8 transf_buf[2] ____cacheline_aligned;
+ u8 transf_buf[2] __aligned(IIO_DMA_MINALIGN);
};
static const struct regmap_config ad5770r_spi_regmap_config = {
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 339564fe47d1..a4167454da81 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -95,7 +95,7 @@ struct ad5791_state {
union {
__be32 d32;
u8 d8[4];
- } data[3] ____cacheline_aligned;
+ } data[3] __aligned(IIO_DMA_MINALIGN);
};
enum ad5791_supported_device_ids {
diff --git a/drivers/iio/dac/ad7293.c b/drivers/iio/dac/ad7293.c
index 59a38ca4c3c7..06f05750d921 100644
--- a/drivers/iio/dac/ad7293.c
+++ b/drivers/iio/dac/ad7293.c
@@ -144,7 +144,7 @@ struct ad7293_state {
struct regulator *reg_avdd;
struct regulator *reg_vdrive;
u8 page_select;
- u8 data[3] ____cacheline_aligned;
+ u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
static int ad7293_page_select(struct ad7293_state *st, unsigned int reg)
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 03edf046dec6..bff6bf697d9c 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -44,10 +44,10 @@ struct ad7303_state {
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- __be16 data ____cacheline_aligned;
+ __be16 data __aligned(IIO_DMA_MINALIGN);
};
static int ad7303_write(struct ad7303_state *st, unsigned int chan,
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
index 6be35c92d435..919e8c880697 100644
--- a/drivers/iio/dac/ad8801.c
+++ b/drivers/iio/dac/ad8801.c
@@ -26,7 +26,7 @@ struct ad8801_state {
struct regulator *vrefh_reg;
struct regulator *vrefl_reg;
- __be16 data ____cacheline_aligned;
+ __be16 data __aligned(IIO_DMA_MINALIGN);
};
static int ad8801_spi_write(struct ad8801_state *state,
diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
index 95813569f394..791dd999cf29 100644
--- a/drivers/iio/dac/cio-dac.c
+++ b/drivers/iio/dac/cio-dac.c
@@ -16,6 +16,7 @@
#include <linux/isa.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/types.h>
#define CIO_DAC_NUM_CHAN 16
@@ -37,11 +38,11 @@ MODULE_PARM_DESC(base, "Measurement Computing CIO-DAC base addresses");
/**
* struct cio_dac_iio - IIO device private data structure
* @chan_out_states: channels' output states
- * @base: base port address of the IIO device
+ * @base: base memory address of the DAC device
*/
struct cio_dac_iio {
int chan_out_states[CIO_DAC_NUM_CHAN];
- unsigned int base;
+ u16 __iomem *base;
};
static int cio_dac_read_raw(struct iio_dev *indio_dev,
@@ -61,7 +62,6 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
struct cio_dac_iio *const priv = iio_priv(indio_dev);
- const unsigned int chan_addr_offset = 2 * chan->channel;
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
@@ -71,7 +71,7 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
priv->chan_out_states[chan->channel] = val;
- outw(val, priv->base + chan_addr_offset);
+ iowrite16(val, priv->base + chan->channel);
return 0;
}
@@ -105,18 +105,20 @@ static int cio_dac_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
+ priv = iio_priv(indio_dev);
+ priv->base = devm_ioport_map(dev, base[id], CIO_DAC_EXTENT);
+ if (!priv->base)
+ return -ENOMEM;
+
indio_dev->info = &cio_dac_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = cio_dac_channels;
indio_dev->num_channels = CIO_DAC_NUM_CHAN;
indio_dev->name = dev_name(dev);
- priv = iio_priv(indio_dev);
- priv->base = base[id];
-
/* initialize DAC outputs to 0V */
- for (i = 0; i < 32; i += 2)
- outw(0, base[id] + i);
+ for (i = 0; i < CIO_DAC_NUM_CHAN; i++)
+ iowrite16(0, priv->base + i);
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index 5a5e967b0be4..509394690bcc 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -171,7 +171,7 @@ static int ds4424_verify_chip(struct iio_dev *indio_dev)
return ret;
}
-static int __maybe_unused ds4424_suspend(struct device *dev)
+static int ds4424_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -189,7 +189,7 @@ static int __maybe_unused ds4424_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused ds4424_resume(struct device *dev)
+static int ds4424_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -206,7 +206,7 @@ static int __maybe_unused ds4424_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(ds4424_pm_ops, ds4424_suspend, ds4424_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ds4424_pm_ops, ds4424_suspend, ds4424_resume);
static const struct iio_info ds4424_info = {
.read_raw = ds4424_read_raw,
@@ -312,7 +312,7 @@ static struct i2c_driver ds4424_driver = {
.driver = {
.name = "ds4424",
.of_match_table = ds4424_of_match,
- .pm = &ds4424_pm_ops,
+ .pm = pm_sleep_ptr(&ds4424_pm_ops),
},
.probe = ds4424_probe,
.remove = ds4424_remove,
diff --git a/drivers/iio/dac/ltc1660.c b/drivers/iio/dac/ltc1660.c
index c76233c9bb72..2758fc8a5ad5 100644
--- a/drivers/iio/dac/ltc1660.c
+++ b/drivers/iio/dac/ltc1660.c
@@ -137,20 +137,21 @@ static const struct iio_info ltc1660_info = {
.write_raw = &ltc1660_write_raw,
};
-static int __maybe_unused ltc1660_suspend(struct device *dev)
+static int ltc1660_suspend(struct device *dev)
{
struct ltc1660_priv *priv = iio_priv(spi_get_drvdata(
to_spi_device(dev)));
return regmap_write(priv->regmap, LTC1660_REG_SLEEP, 0x00);
}
-static int __maybe_unused ltc1660_resume(struct device *dev)
+static int ltc1660_resume(struct device *dev)
{
struct ltc1660_priv *priv = iio_priv(spi_get_drvdata(
to_spi_device(dev)));
return regmap_write(priv->regmap, LTC1660_REG_WAKE, 0x00);
}
-static SIMPLE_DEV_PM_OPS(ltc1660_pm_ops, ltc1660_suspend, ltc1660_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ltc1660_pm_ops, ltc1660_suspend,
+ ltc1660_resume);
static int ltc1660_probe(struct spi_device *spi)
{
@@ -233,7 +234,7 @@ static struct spi_driver ltc1660_driver = {
.driver = {
.name = "ltc1660",
.of_match_table = ltc1660_dt_ids,
- .pm = &ltc1660_pm_ops,
+ .pm = pm_sleep_ptr(&ltc1660_pm_ops),
},
.probe = ltc1660_probe,
.remove = ltc1660_remove,
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index 937b0d25a11c..28bdde2d3088 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -91,10 +91,10 @@ struct ltc2688_state {
struct mutex lock;
int vref;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- u8 tx_data[6] ____cacheline_aligned;
+ u8 tx_data[6] __aligned(IIO_DMA_MINALIGN);
u8 rx_data[3];
};
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index a6ef555153f4..373ce6ff83b7 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -100,21 +100,21 @@ static int max517_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int __maybe_unused max517_suspend(struct device *dev)
+static int max517_suspend(struct device *dev)
{
u8 outbuf = COMMAND_PD;
return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
}
-static int __maybe_unused max517_resume(struct device *dev)
+static int max517_resume(struct device *dev)
{
u8 outbuf = 0;
return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
}
-static SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume);
static const struct iio_info max517_info = {
.read_raw = max517_read_raw,
@@ -201,7 +201,7 @@ MODULE_DEVICE_TABLE(i2c, max517_id);
static struct i2c_driver max517_driver = {
.driver = {
.name = MAX517_DRV_NAME,
- .pm = &max517_pm_ops,
+ .pm = pm_sleep_ptr(&max517_pm_ops),
},
.probe = max517_probe,
.id_table = max517_id,
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 540f9ea7cada..e001b594d5b1 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -267,7 +267,7 @@ static int max5821_write_raw(struct iio_dev *indio_dev,
}
}
-static int __maybe_unused max5821_suspend(struct device *dev)
+static int max5821_suspend(struct device *dev)
{
u8 outbuf[2] = { MAX5821_EXTENDED_COMMAND_MODE,
MAX5821_EXTENDED_DAC_A |
@@ -277,7 +277,7 @@ static int __maybe_unused max5821_suspend(struct device *dev)
return i2c_master_send(to_i2c_client(dev), outbuf, 2);
}
-static int __maybe_unused max5821_resume(struct device *dev)
+static int max5821_resume(struct device *dev)
{
u8 outbuf[2] = { MAX5821_EXTENDED_COMMAND_MODE,
MAX5821_EXTENDED_DAC_A |
@@ -287,7 +287,8 @@ static int __maybe_unused max5821_resume(struct device *dev)
return i2c_master_send(to_i2c_client(dev), outbuf, 2);
}
-static SIMPLE_DEV_PM_OPS(max5821_pm_ops, max5821_suspend, max5821_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(max5821_pm_ops, max5821_suspend,
+ max5821_resume);
static const struct iio_info max5821_info = {
.read_raw = max5821_read_raw,
@@ -374,7 +375,7 @@ static struct i2c_driver max5821_driver = {
.driver = {
.name = "max5821",
.of_match_table = max5821_of_match,
- .pm = &max5821_pm_ops,
+ .pm = pm_sleep_ptr(&max5821_pm_ops),
},
.probe = max5821_probe,
.id_table = max5821_id,
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 7fcb86288823..bb4b85a7b95b 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -42,7 +42,7 @@ struct mcp4725_data {
struct regulator *vref_reg;
};
-static int __maybe_unused mcp4725_suspend(struct device *dev)
+static int mcp4725_suspend(struct device *dev)
{
struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
to_i2c_client(dev)));
@@ -55,7 +55,7 @@ static int __maybe_unused mcp4725_suspend(struct device *dev)
return i2c_master_send(data->client, outbuf, 2);
}
-static int __maybe_unused mcp4725_resume(struct device *dev)
+static int mcp4725_resume(struct device *dev)
{
struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
to_i2c_client(dev)));
@@ -68,7 +68,8 @@ static int __maybe_unused mcp4725_resume(struct device *dev)
return i2c_master_send(data->client, outbuf, 2);
}
-static SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend, mcp4725_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend,
+ mcp4725_resume);
static ssize_t mcp4725_store_eeprom(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
@@ -523,7 +524,7 @@ static struct i2c_driver mcp4725_driver = {
.driver = {
.name = MCP4725_DRV_NAME,
.of_match_table = mcp4725_of_match,
- .pm = &mcp4725_pm_ops,
+ .pm = pm_sleep_ptr(&mcp4725_pm_ops),
},
.probe = mcp4725_probe,
.remove = mcp4725_remove,
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index cb9e60e71b91..da4327624d45 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -17,10 +17,12 @@
#include <linux/bitops.h>
#define MCP4922_NUM_CHANNELS 2
+#define MCP4921_NUM_CHANNELS 1
enum mcp4922_supported_device_ids {
ID_MCP4902,
ID_MCP4912,
+ ID_MCP4921,
ID_MCP4922,
};
@@ -29,7 +31,7 @@ struct mcp4922_state {
unsigned int value[MCP4922_NUM_CHANNELS];
unsigned int vref_mv;
struct regulator *vref_reg;
- u8 mosi[2] ____cacheline_aligned;
+ u8 mosi[2] __aligned(IIO_DMA_MINALIGN);
};
#define MCP4922_CHAN(chan, bits) { \
@@ -105,9 +107,10 @@ static int mcp4922_write_raw(struct iio_dev *indio_dev,
}
}
-static const struct iio_chan_spec mcp4922_channels[3][MCP4922_NUM_CHANNELS] = {
+static const struct iio_chan_spec mcp4922_channels[4][MCP4922_NUM_CHANNELS] = {
[ID_MCP4902] = { MCP4922_CHAN(0, 8), MCP4922_CHAN(1, 8) },
[ID_MCP4912] = { MCP4922_CHAN(0, 10), MCP4922_CHAN(1, 10) },
+ [ID_MCP4921] = { MCP4922_CHAN(0, 12), {} },
[ID_MCP4922] = { MCP4922_CHAN(0, 12), MCP4922_CHAN(1, 12) },
};
@@ -154,7 +157,10 @@ static int mcp4922_probe(struct spi_device *spi)
indio_dev->info = &mcp4922_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mcp4922_channels[id->driver_data];
- indio_dev->num_channels = MCP4922_NUM_CHANNELS;
+ if (id->driver_data == ID_MCP4921)
+ indio_dev->num_channels = MCP4921_NUM_CHANNELS;
+ else
+ indio_dev->num_channels = MCP4922_NUM_CHANNELS;
indio_dev->name = id->name;
ret = iio_device_register(indio_dev);
@@ -185,6 +191,7 @@ static void mcp4922_remove(struct spi_device *spi)
static const struct spi_device_id mcp4922_id[] = {
{"mcp4902", ID_MCP4902},
{"mcp4912", ID_MCP4912},
+ {"mcp4921", ID_MCP4921},
{"mcp4922", ID_MCP4922},
{}
};
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index daa42bcbae83..15eb44075107 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -12,8 +12,11 @@
#include <linux/iio/iio.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/string_helpers.h>
#include "stm32-dac-core.h"
@@ -79,8 +82,7 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
ret = regmap_update_bits(dac->common->regmap, STM32_DAC_CR, msk, en);
mutex_unlock(&dac->lock);
if (ret < 0) {
- dev_err(&indio_dev->dev, "%s failed\n", en ?
- "Enable" : "Disable");
+ dev_err(&indio_dev->dev, "%s failed\n", str_enable_disable(en));
goto err_put_pm;
}
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
index 106ce3546419..8e1590e3cc8b 100644
--- a/drivers/iio/dac/ti-dac082s085.c
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -55,7 +55,7 @@ struct ti_dac_chip {
bool powerdown;
u8 powerdown_mode;
u8 resolution;
- u8 buf[2] ____cacheline_aligned;
+ u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
#define WRITE_NOT_UPDATE(chan) (0x00 | (chan) << 6)
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 4b6b04038e94..f91f8a504989 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -13,6 +13,7 @@
* https://www.ti.com/lit/ds/symlink/dac5573.pdf
* https://www.ti.com/lit/ds/symlink/dac6573.pdf
* https://www.ti.com/lit/ds/symlink/dac7573.pdf
+ * https://www.ti.com/lit/ds/symlink/dac121c081.pdf
*/
#include <linux/iio/iio.h>
@@ -52,7 +53,7 @@ struct dac5571_data {
struct dac5571_spec const *spec;
int (*dac5571_cmd)(struct dac5571_data *data, int channel, u16 val);
int (*dac5571_pwrdwn)(struct dac5571_data *data, int channel, u8 pwrdwn);
- u8 buf[3] ____cacheline_aligned;
+ u8 buf[3] __aligned(IIO_DMA_MINALIGN);
};
#define DAC5571_POWERDOWN(mode) ((mode) + 1)
@@ -402,6 +403,7 @@ static const struct of_device_id dac5571_of_id[] = {
{.compatible = "ti,dac5573", .data = (void *)quad_8bit},
{.compatible = "ti,dac6573", .data = (void *)quad_10bit},
{.compatible = "ti,dac7573", .data = (void *)quad_12bit},
+ {.compatible = "ti,dac121c081", .data = (void *)single_12bit},
{}
};
MODULE_DEVICE_TABLE(of, dac5571_of_id);
@@ -416,6 +418,7 @@ static const struct i2c_device_id dac5571_id[] = {
{"dac5573", quad_8bit},
{"dac6573", quad_10bit},
{"dac7573", quad_12bit},
+ {"dac121c081", single_12bit},
{}
};
MODULE_DEVICE_TABLE(i2c, dac5571_id);
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 4afc411725d9..7f89d2a52f49 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -52,7 +52,7 @@ struct ti_dac_chip {
bool powerdown;
u8 powerdown_mode;
u8 resolution;
- u8 buf[2] ____cacheline_aligned;
+ u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
static u8 ti_dac_get_power(struct ti_dac_chip *ti_dac, bool powerdown)
diff --git a/drivers/iio/dac/ti-dac7612.c b/drivers/iio/dac/ti-dac7612.c
index 4c0f4b5e9ff4..8195815de26f 100644
--- a/drivers/iio/dac/ti-dac7612.c
+++ b/drivers/iio/dac/ti-dac7612.c
@@ -31,10 +31,10 @@ struct dac7612 {
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- uint8_t data[2] ____cacheline_aligned;
+ uint8_t data[2] __aligned(IIO_DMA_MINALIGN);
};
static int dac7612_cmd_single(struct dac7612 *priv, int channel, u16 val)
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 92429c0d2685..fc182250c622 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 942870539268..97662ca1ca96 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -287,13 +287,13 @@ struct ad9523_state {
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
+ * DMA (thus cache coherency maintenance) may require that
+ * transfer buffers live in their own cache lines.
*/
union {
__be32 d32;
u8 d8[4];
- } data[2] ____cacheline_aligned;
+ } data[2] __aligned(IIO_DMA_MINALIGN);
};
static int ad9523_read(struct iio_dev *indio_dev, unsigned int addr)
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index be1218d86291..85e289700c3c 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -56,10 +56,10 @@ struct adf4350_state {
*/
struct mutex lock;
/*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
+ * DMA (thus cache coherency maintenance) may require that
+ * transfer buffers live in their own cache lines.
*/
- __be32 val ____cacheline_aligned;
+ __be32 val __aligned(IIO_DMA_MINALIGN);
};
static struct adf4350_platform_data default_pdata = {
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index ecd5e18995ad..135c8cedc33d 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -175,7 +175,7 @@ struct adf4371_state {
unsigned int mod2;
unsigned int rf_div_sel;
unsigned int ref_div_factor;
- u8 buf[10] ____cacheline_aligned;
+ u8 buf[10] __aligned(IIO_DMA_MINALIGN);
};
static unsigned long long adf4371_pll_fract_n_get_rate(struct adf4371_state *st,
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index b0e1f6571afb..ed8167271358 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -100,7 +100,7 @@ struct admv1013_state {
unsigned int input_mode;
unsigned int quad_se_mode;
bool det_en;
- u8 data[3] ____cacheline_aligned;
+ u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
static int __admv1013_spi_read(struct admv1013_state *st, unsigned int reg,
diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c
index 1aac5665b5de..865addd10db4 100644
--- a/drivers/iio/frequency/admv1014.c
+++ b/drivers/iio/frequency/admv1014.c
@@ -127,7 +127,7 @@ struct admv1014_state {
unsigned int quad_se_mode;
unsigned int p1db_comp;
bool det_en;
- u8 data[3] ____cacheline_aligned;
+ u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
static const int mixer_vgate_table[] = {106, 107, 108, 110, 111, 112, 113, 114,
diff --git a/drivers/iio/frequency/admv4420.c b/drivers/iio/frequency/admv4420.c
index 51134aee8510..863ba8e98c95 100644
--- a/drivers/iio/frequency/admv4420.c
+++ b/drivers/iio/frequency/admv4420.c
@@ -113,7 +113,7 @@ struct admv4420_state {
struct admv4420_n_counter n_counter;
enum admv4420_mux_sel mux_sel;
struct mutex lock;
- u8 transf_buf[4] ____cacheline_aligned;
+ u8 transf_buf[4] __aligned(IIO_DMA_MINALIGN);
};
static const struct regmap_config admv4420_regmap_config = {
diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c
index 8255ffd174f6..21878bad0909 100644
--- a/drivers/iio/frequency/adrf6780.c
+++ b/drivers/iio/frequency/adrf6780.c
@@ -86,7 +86,7 @@ struct adrf6780_state {
bool uc_bias_en;
bool lo_sideband;
bool vdet_out_en;
- u8 data[3] ____cacheline_aligned;
+ u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
static int __adrf6780_spi_read(struct adrf6780_state *st, unsigned int reg,
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index acef59d822b1..14b3abf6dce9 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -45,7 +45,7 @@ struct adis16080_state {
const struct adis16080_chip_info *info;
struct mutex lock;
- __be16 buf ____cacheline_aligned;
+ __be16 buf __aligned(IIO_DMA_MINALIGN);
};
static int adis16080_read_sample(struct iio_dev *indio_dev,
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index b9c952e65b55..33cde9e6fca5 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -41,7 +41,7 @@
struct adis16130_state {
struct spi_device *us;
struct mutex buf_lock;
- u8 buf[4] ____cacheline_aligned;
+ u8 buf[4] __aligned(IIO_DMA_MINALIGN);
};
static int adis16130_spi_read(struct iio_dev *indio_dev, u8 reg_addr, u32 *val)
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
index 04f350025215..f84438e0c42c 100644
--- a/drivers/iio/gyro/adxrs450.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -73,7 +73,7 @@ enum {
struct adxrs450_state {
struct spi_device *us;
struct mutex buf_lock;
- __be32 tx ____cacheline_aligned;
+ __be32 tx __aligned(IIO_DMA_MINALIGN);
__be32 rx;
};
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 81a6d09788bd..cedd9f02ea21 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -766,7 +766,7 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev,
return 0;
}
/*
- * We will expect the enable and disable to do operation in
+ * We will expect the enable and disable to do operation
* in reverse order. This will happen here anyway as our
* resume operation uses sync mode runtime pm calls, the
* suspend operation will be delayed by autosuspend delay
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 0923fd793492..a36d71d9e3ea 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -150,10 +150,10 @@ struct fxas21002c_data {
struct regulator *vddio;
/*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
+ * DMA (thus cache coherency maintenance) may require the
+ * transfer buffers live in their own cache lines.
*/
- s16 buffer[8] ____cacheline_aligned;
+ s16 buffer[8] __aligned(IIO_DMA_MINALIGN);
};
enum fxas21002c_channel_index {
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index a7f1bbb5f289..0491c64e1b32 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -364,7 +364,7 @@ static int itg3200_remove(struct i2c_client *client)
return 0;
}
-static int __maybe_unused itg3200_suspend(struct device *dev)
+static int itg3200_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct itg3200 *st = iio_priv(indio_dev);
@@ -375,14 +375,15 @@ static int __maybe_unused itg3200_suspend(struct device *dev)
ITG3200_SLEEP);
}
-static int __maybe_unused itg3200_resume(struct device *dev)
+static int itg3200_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
return itg3200_initial_setup(indio_dev);
}
-static SIMPLE_DEV_PM_OPS(itg3200_pm_ops, itg3200_suspend, itg3200_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(itg3200_pm_ops, itg3200_suspend,
+ itg3200_resume);
static const struct i2c_device_id itg3200_id[] = {
{ "itg3200", 0 },
@@ -400,7 +401,7 @@ static struct i2c_driver itg3200_driver = {
.driver = {
.name = "itg3200",
.of_match_table = itg3200_of_match,
- .pm = &itg3200_pm_ops,
+ .pm = pm_sleep_ptr(&itg3200_pm_ops),
},
.id_table = itg3200_id,
.probe = itg3200_probe,
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 5908a96ca8af..6a6d84a3deda 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -1262,7 +1262,6 @@ err_power_down:
return ret;
}
-EXPORT_SYMBOL(mpu3050_common_probe);
void mpu3050_common_remove(struct device *dev)
{
@@ -1278,9 +1277,7 @@ void mpu3050_common_remove(struct device *dev)
iio_device_unregister(indio_dev);
mpu3050_power_down(mpu3050);
}
-EXPORT_SYMBOL(mpu3050_common_remove);
-#ifdef CONFIG_PM
static int mpu3050_runtime_suspend(struct device *dev)
{
return mpu3050_power_down(iio_priv(dev_get_drvdata(dev)));
@@ -1290,16 +1287,9 @@ static int mpu3050_runtime_resume(struct device *dev)
{
return mpu3050_power_up(iio_priv(dev_get_drvdata(dev)));
}
-#endif /* CONFIG_PM */
-
-const struct dev_pm_ops mpu3050_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(mpu3050_runtime_suspend,
- mpu3050_runtime_resume, NULL)
-};
-EXPORT_SYMBOL(mpu3050_dev_pm_ops);
+DEFINE_RUNTIME_DEV_PM_OPS(mpu3050_dev_pm_ops, mpu3050_runtime_suspend,
+ mpu3050_runtime_resume, NULL);
MODULE_AUTHOR("Linus Walleij");
MODULE_DESCRIPTION("MPU3050 gyroscope driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 5b5f58baaf7f..78f4a0102986 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -116,7 +116,7 @@ static struct i2c_driver mpu3050_i2c_driver = {
.driver = {
.of_match_table = mpu3050_i2c_of_match,
.name = "mpu3050-i2c",
- .pm = &mpu3050_dev_pm_ops,
+ .pm = pm_ptr(&mpu3050_dev_pm_ops),
},
};
module_i2c_driver(mpu3050_i2c_driver);
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 856ec901b091..3bb4028c5d74 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -408,7 +408,7 @@ static const struct of_device_id afe4403_of_match[] = {
};
MODULE_DEVICE_TABLE(of, afe4403_of_match);
-static int __maybe_unused afe4403_suspend(struct device *dev)
+static int afe4403_suspend(struct device *dev)
{
struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
struct afe4403_data *afe = iio_priv(indio_dev);
@@ -429,7 +429,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused afe4403_resume(struct device *dev)
+static int afe4403_resume(struct device *dev)
{
struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
struct afe4403_data *afe = iio_priv(indio_dev);
@@ -449,7 +449,8 @@ static int __maybe_unused afe4403_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(afe4403_pm_ops, afe4403_suspend, afe4403_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(afe4403_pm_ops, afe4403_suspend,
+ afe4403_resume);
static int afe4403_probe(struct spi_device *spi)
{
@@ -598,7 +599,7 @@ static struct spi_driver afe4403_spi_driver = {
.driver = {
.name = AFE4403_DRIVER_NAME,
.of_match_table = afe4403_of_match,
- .pm = &afe4403_pm_ops,
+ .pm = pm_sleep_ptr(&afe4403_pm_ops),
},
.probe = afe4403_probe,
.remove = afe4403_remove,
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index aa9311e1e655..dd7800159051 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -415,7 +415,7 @@ static const struct of_device_id afe4404_of_match[] = {
};
MODULE_DEVICE_TABLE(of, afe4404_of_match);
-static int __maybe_unused afe4404_suspend(struct device *dev)
+static int afe4404_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct afe4404_data *afe = iio_priv(indio_dev);
@@ -436,7 +436,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused afe4404_resume(struct device *dev)
+static int afe4404_resume(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct afe4404_data *afe = iio_priv(indio_dev);
@@ -456,7 +456,8 @@ static int __maybe_unused afe4404_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(afe4404_pm_ops, afe4404_suspend, afe4404_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(afe4404_pm_ops, afe4404_suspend,
+ afe4404_resume);
static int afe4404_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -591,10 +592,8 @@ static int afe4404_remove(struct i2c_client *client)
iio_trigger_unregister(afe->trig);
ret = regulator_disable(afe->regulator);
- if (ret) {
+ if (ret)
dev_err(afe->dev, "Unable to disable regulator\n");
- return ret;
- }
return 0;
}
@@ -609,7 +608,7 @@ static struct i2c_driver afe4404_i2c_driver = {
.driver = {
.name = AFE4404_DRIVER_NAME,
.of_match_table = afe4404_of_match,
- .pm = &afe4404_pm_ops,
+ .pm = pm_sleep_ptr(&afe4404_pm_ops),
},
.probe = afe4404_probe,
.remove = afe4404_remove,
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 66b32413cf5e..2a4107a79662 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/bitfield.h>
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index 6a39615b6961..517158307d8c 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -668,9 +668,9 @@ int hts221_probe(struct device *dev, int irq, const char *name,
return devm_iio_device_register(hw->dev, iio_dev);
}
-EXPORT_SYMBOL(hts221_probe);
+EXPORT_SYMBOL_NS(hts221_probe, IIO_HTS221);
-static int __maybe_unused hts221_suspend(struct device *dev)
+static int hts221_suspend(struct device *dev)
{
struct iio_dev *iio_dev = dev_get_drvdata(dev);
struct hts221_hw *hw = iio_priv(iio_dev);
@@ -680,7 +680,7 @@ static int __maybe_unused hts221_suspend(struct device *dev)
FIELD_PREP(HTS221_ENABLE_MASK, false));
}
-static int __maybe_unused hts221_resume(struct device *dev)
+static int hts221_resume(struct device *dev)
{
struct iio_dev *iio_dev = dev_get_drvdata(dev);
struct hts221_hw *hw = iio_priv(iio_dev);
@@ -694,10 +694,8 @@ static int __maybe_unused hts221_resume(struct device *dev)
return err;
}
-const struct dev_pm_ops hts221_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(hts221_suspend, hts221_resume)
-};
-EXPORT_SYMBOL(hts221_pm_ops);
+EXPORT_NS_SIMPLE_DEV_PM_OPS(hts221_pm_ops, hts221_suspend, hts221_resume,
+ IIO_HTS221);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
MODULE_DESCRIPTION("STMicroelectronics hts221 sensor driver");
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index cab39c4756f8..afbc611f7712 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(i2c, hts221_i2c_id_table);
static struct i2c_driver hts221_driver = {
.driver = {
.name = "hts221_i2c",
- .pm = &hts221_pm_ops,
+ .pm = pm_sleep_ptr(&hts221_pm_ops),
.of_match_table = hts221_i2c_of_match,
.acpi_match_table = ACPI_PTR(hts221_acpi_match),
},
@@ -74,3 +74,4 @@ module_i2c_driver(hts221_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
MODULE_DESCRIPTION("STMicroelectronics hts221 i2c driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_HTS221);
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
index 729e86e433b1..fc4adb68faf6 100644
--- a/drivers/iio/humidity/hts221_spi.c
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(spi, hts221_spi_id_table);
static struct spi_driver hts221_driver = {
.driver = {
.name = "hts221_spi",
- .pm = &hts221_pm_ops,
+ .pm = pm_sleep_ptr(&hts221_pm_ops),
.of_match_table = hts221_spi_of_match,
},
.probe = hts221_spi_probe,
@@ -66,3 +66,4 @@ module_spi_driver(hts221_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
MODULE_DESCRIPTION("STMicroelectronics hts221 spi driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_HTS221);
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index e7aec56ea136..a77f1a8348ff 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -143,7 +143,7 @@ const struct regmap_config bmi160_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
-EXPORT_SYMBOL(bmi160_regmap_config);
+EXPORT_SYMBOL_NS(bmi160_regmap_config, IIO_BMI160);
struct bmi160_regs {
u8 data; /* LSB byte register for X-axis */
@@ -633,7 +633,7 @@ int bmi160_enable_irq(struct regmap *regmap, bool enable)
BMI160_DRDY_INT_EN, enable_bit,
BMI160_NORMAL_WRITE_USLEEP);
}
-EXPORT_SYMBOL(bmi160_enable_irq);
+EXPORT_SYMBOL_NS(bmi160_enable_irq, IIO_BMI160);
static int bmi160_get_irq(struct fwnode_handle *fwnode, enum bmi160_int_pin *pin)
{
@@ -884,7 +884,7 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
return devm_iio_device_register(dev, indio_dev);
}
-EXPORT_SYMBOL_GPL(bmi160_core_probe);
+EXPORT_SYMBOL_NS_GPL(bmi160_core_probe, IIO_BMI160);
MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
MODULE_DESCRIPTION("Bosch BMI160 driver");
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index 02f149d37b17..d93f4fa2ad55 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -68,3 +68,4 @@ module_i2c_driver(bmi160_i2c_driver);
MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
MODULE_DESCRIPTION("BMI160 I2C driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_BMI160);
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
index 24f7d75c7903..8b573ea99af2 100644
--- a/drivers/iio/imu/bmi160/bmi160_spi.c
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -65,3 +65,4 @@ module_spi_driver(bmi160_spi_driver);
MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
MODULE_DESCRIPTION("Bosch BMI160 SPI driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_BMI160);
diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c
index ab288186f36e..423cfe526f2a 100644
--- a/drivers/iio/imu/fxos8700_core.c
+++ b/drivers/iio/imu/fxos8700_core.c
@@ -167,7 +167,7 @@
struct fxos8700_data {
struct regmap *regmap;
struct iio_trigger *trig;
- __be16 buf[FXOS8700_DATA_BUF_SIZE] ____cacheline_aligned;
+ __be16 buf[FXOS8700_DATA_BUF_SIZE] __aligned(IIO_DMA_MINALIGN);
};
/* Regmap info */
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index 995a9dc06521..3d91469beccb 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -141,7 +141,7 @@ struct inv_icm42600_state {
struct inv_icm42600_suspended suspended;
struct iio_dev *indio_gyro;
struct iio_dev *indio_accel;
- uint8_t buffer[2] ____cacheline_aligned;
+ uint8_t buffer[2] __aligned(IIO_DMA_MINALIGN);
struct inv_icm42600_fifo fifo;
struct {
int64_t gyro;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
index de2a3949dcc7..8b85ee333bf8 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
@@ -39,7 +39,7 @@ struct inv_icm42600_fifo {
size_t accel;
size_t total;
} nb;
- uint8_t data[2080] ____cacheline_aligned;
+ uint8_t data[2080] __aligned(IIO_DMA_MINALIGN);
};
/* FIFO data packet */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 8e14f20b1314..94b54c501ec0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -204,7 +204,7 @@ struct inv_mpu6050_state {
s32 magn_raw_to_gauss[3];
struct iio_mount_matrix magn_orient;
unsigned int suspended_sensors;
- u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] ____cacheline_aligned;
+ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] __aligned(IIO_DMA_MINALIGN);
};
/*register and associated bit definition*/
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 910397716833..b5e4a4113652 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -54,6 +54,7 @@
#include <linux/iio/sysfs.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/minmax.h>
#include <linux/pm.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -1615,8 +1616,7 @@ int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val)
struct st_lsm6dsx_hw *hw = sensor->hw;
int err;
- if (val < 1 || val > hw->settings->fifo_ops.max_size)
- return -EINVAL;
+ val = clamp_val(val, 1, hw->settings->fifo_ops.max_size);
mutex_lock(&hw->conf_lock);
@@ -2289,9 +2289,9 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return 0;
}
-EXPORT_SYMBOL(st_lsm6dsx_probe);
+EXPORT_SYMBOL_NS(st_lsm6dsx_probe, IIO_LSM6DSX);
-static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
+static int st_lsm6dsx_suspend(struct device *dev)
{
struct st_lsm6dsx_hw *hw = dev_get_drvdata(dev);
struct st_lsm6dsx_sensor *sensor;
@@ -2330,7 +2330,7 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
return err;
}
-static int __maybe_unused st_lsm6dsx_resume(struct device *dev)
+static int st_lsm6dsx_resume(struct device *dev)
{
struct st_lsm6dsx_hw *hw = dev_get_drvdata(dev);
struct st_lsm6dsx_sensor *sensor;
@@ -2366,10 +2366,8 @@ static int __maybe_unused st_lsm6dsx_resume(struct device *dev)
return err;
}
-const struct dev_pm_ops st_lsm6dsx_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(st_lsm6dsx_suspend, st_lsm6dsx_resume)
-};
-EXPORT_SYMBOL(st_lsm6dsx_pm_ops);
+EXPORT_NS_SIMPLE_DEV_PM_OPS(st_lsm6dsx_pm_ops, st_lsm6dsx_suspend,
+ st_lsm6dsx_resume, IIO_LSM6DSX);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 715fbdc8190e..2ea34c0d3a8c 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -134,7 +134,7 @@ MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
static struct i2c_driver st_lsm6dsx_driver = {
.driver = {
.name = "st_lsm6dsx_i2c",
- .pm = &st_lsm6dsx_pm_ops,
+ .pm = pm_sleep_ptr(&st_lsm6dsx_pm_ops),
.of_match_table = st_lsm6dsx_i2c_of_match,
},
.probe = st_lsm6dsx_i2c_probe,
@@ -146,3 +146,4 @@ MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics st_lsm6dsx i2c driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_LSM6DSX);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
index 8d4201b86e87..3b0c8b19c448 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
@@ -6,11 +6,11 @@
*/
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/i3c/device.h>
#include <linux/i3c/master.h>
#include <linux/slab.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include "st_lsm6dsx.h"
@@ -44,7 +44,7 @@ static int st_lsm6dsx_i3c_probe(struct i3c_device *i3cdev)
static struct i3c_driver st_lsm6dsx_driver = {
.driver = {
.name = "st_lsm6dsx_i3c",
- .pm = &st_lsm6dsx_pm_ops,
+ .pm = pm_sleep_ptr(&st_lsm6dsx_pm_ops),
},
.probe = st_lsm6dsx_i3c_probe,
.id_table = st_lsm6dsx_i3c_ids,
@@ -54,3 +54,4 @@ module_i3c_driver(st_lsm6dsx_driver);
MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
MODULE_DESCRIPTION("STMicroelectronics st_lsm6dsx i3c driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_LSM6DSX);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index f5767cf76c1d..6a8883f022a8 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -134,7 +134,7 @@ MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
static struct spi_driver st_lsm6dsx_driver = {
.driver = {
.name = "st_lsm6dsx_spi",
- .pm = &st_lsm6dsx_pm_ops,
+ .pm = pm_sleep_ptr(&st_lsm6dsx_pm_ops),
.of_match_table = st_lsm6dsx_spi_of_match,
},
.probe = st_lsm6dsx_spi_probe,
@@ -146,3 +146,4 @@ MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics st_lsm6dsx spi driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_LSM6DSX);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 06141ca27e1f..acc2b6c05d57 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -630,18 +630,16 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
}
-static ssize_t iio_buffer_read_length(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t length_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
return sysfs_emit(buf, "%d\n", buffer->length);
}
-static ssize_t iio_buffer_write_length(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t length_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
@@ -672,9 +670,8 @@ out:
return ret ? ret : len;
}
-static ssize_t iio_buffer_show_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
@@ -705,7 +702,7 @@ static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
{
- unsigned bytes = 0;
+ unsigned int bytes = 0;
int length, i, largest = 0;
/* How much space will the demuxed element take? */
@@ -934,9 +931,9 @@ static int iio_verify_update(struct iio_dev *indio_dev,
* @l: list head used for management
*/
struct iio_demux_table {
- unsigned from;
- unsigned to;
- unsigned length;
+ unsigned int from;
+ unsigned int to;
+ unsigned int length;
struct list_head l;
};
@@ -974,7 +971,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
int ret, in_ind = -1, out_ind, length;
- unsigned in_loc = 0, out_loc = 0;
+ unsigned int in_loc = 0, out_loc = 0;
struct iio_demux_table *p = NULL;
/* Clear out any old demux */
@@ -1292,10 +1289,8 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
iio_buffer_deactivate_all(indio_dev);
}
-static ssize_t iio_buffer_store_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
int ret;
bool requested_state;
@@ -1325,19 +1320,17 @@ done:
return (ret < 0) ? ret : len;
}
-static ssize_t iio_buffer_show_watermark(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
return sysfs_emit(buf, "%u\n", buffer->watermark);
}
-static ssize_t iio_buffer_store_watermark(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+static ssize_t watermark_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
@@ -1369,9 +1362,8 @@ out:
return ret ? ret : len;
}
-static ssize_t iio_dma_show_data_available(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t data_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
@@ -1394,18 +1386,12 @@ static ssize_t direction_show(struct device *dev,
}
}
-static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
- iio_buffer_write_length);
-static struct device_attribute dev_attr_length_ro = __ATTR(length,
- S_IRUGO, iio_buffer_read_length, NULL);
-static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
- iio_buffer_show_enable, iio_buffer_store_enable);
-static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
- iio_buffer_show_watermark, iio_buffer_store_watermark);
-static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
- S_IRUGO, iio_buffer_show_watermark, NULL);
-static DEVICE_ATTR(data_available, S_IRUGO,
- iio_dma_show_data_available, NULL);
+static DEVICE_ATTR_RW(length);
+static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
+static DEVICE_ATTR_RW(enable);
+static DEVICE_ATTR_RW(watermark);
+static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
+static DEVICE_ATTR_RO(data_available);
static DEVICE_ATTR_RO(direction);
/*
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index adf054c7a75e..0f4dbda3b9d3 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -334,29 +334,6 @@ s64 iio_get_time_ns(const struct iio_dev *indio_dev)
}
EXPORT_SYMBOL(iio_get_time_ns);
-/**
- * iio_get_time_res() - utility function to get time stamp clock resolution in
- * nano seconds.
- * @indio_dev: device
- */
-unsigned int iio_get_time_res(const struct iio_dev *indio_dev)
-{
- switch (iio_device_get_clock(indio_dev)) {
- case CLOCK_REALTIME:
- case CLOCK_MONOTONIC:
- case CLOCK_MONOTONIC_RAW:
- case CLOCK_BOOTTIME:
- case CLOCK_TAI:
- return hrtimer_resolution;
- case CLOCK_REALTIME_COARSE:
- case CLOCK_MONOTONIC_COARSE:
- return LOW_RES_NSEC;
- default:
- BUG();
- }
-}
-EXPORT_SYMBOL(iio_get_time_res);
-
static int __init iio_init(void)
{
int ret;
@@ -398,7 +375,7 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
{
struct iio_dev *indio_dev = file->private_data;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- unsigned val = 0;
+ unsigned int val = 0;
int ret;
if (*ppos > 0)
@@ -428,7 +405,7 @@ static ssize_t iio_debugfs_write_reg(struct file *file,
{
struct iio_dev *indio_dev = file->private_data;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- unsigned reg, val;
+ unsigned int reg, val;
char buf[80];
int ret;
@@ -835,7 +812,23 @@ static ssize_t iio_format_avail_list(char *buf, const int *vals,
static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
{
- return iio_format_list(buf, vals, type, 3, "[", "]");
+ int length;
+
+ /*
+ * length refers to the array size , not the number of elements.
+ * The purpose is to print the range [min , step ,max] so length should
+ * be 3 in case of int, and 6 for other types.
+ */
+ switch (type) {
+ case IIO_VAL_INT:
+ length = 3;
+ break;
+ default:
+ length = 6;
+ break;
+ }
+
+ return iio_format_list(buf, vals, type, length, "[", "]");
}
static ssize_t iio_read_channel_info_avail(struct device *dev,
@@ -1127,12 +1120,12 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
dev_attr->attr.name = name;
if (readfunc) {
- dev_attr->attr.mode |= S_IRUGO;
+ dev_attr->attr.mode |= 0444;
dev_attr->show = readfunc;
}
if (writefunc) {
- dev_attr->attr.mode |= S_IWUSR;
+ dev_attr->attr.mode |= 0200;
dev_attr->store = writefunc;
}
@@ -1406,29 +1399,27 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
}
}
-static ssize_t iio_show_dev_name(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
return sysfs_emit(buf, "%s\n", indio_dev->name);
}
-static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
+static DEVICE_ATTR_RO(name);
-static ssize_t iio_show_dev_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t label_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
return sysfs_emit(buf, "%s\n", indio_dev->label);
}
-static DEVICE_ATTR(label, S_IRUGO, iio_show_dev_label, NULL);
+static DEVICE_ATTR_RO(label);
-static ssize_t iio_show_timestamp_clock(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t current_timestamp_clock_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
const clockid_t clk = iio_device_get_clock(indio_dev);
@@ -1472,9 +1463,9 @@ static ssize_t iio_show_timestamp_clock(struct device *dev,
return sz;
}
-static ssize_t iio_store_timestamp_clock(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t current_timestamp_clock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
clockid_t clk;
int ret;
@@ -1522,8 +1513,7 @@ int iio_device_register_sysfs_group(struct iio_dev *indio_dev,
return 0;
}
-static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR,
- iio_show_timestamp_clock, iio_store_timestamp_clock);
+static DEVICE_ATTR_RW(current_timestamp_clock);
static int iio_device_register_sysfs(struct iio_dev *indio_dev)
{
@@ -1631,7 +1621,7 @@ static void iio_dev_release(struct device *device)
iio_device_detach_buffers(indio_dev);
- ida_simple_remove(&iio_ida, iio_dev_opaque->id);
+ ida_free(&iio_ida, iio_dev_opaque->id);
kfree(iio_dev_opaque);
}
@@ -1653,7 +1643,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
alloc_size = sizeof(struct iio_dev_opaque);
if (sizeof_priv) {
- alloc_size = ALIGN(alloc_size, IIO_ALIGN);
+ alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN);
alloc_size += sizeof_priv;
}
@@ -1663,7 +1653,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
indio_dev = &iio_dev_opaque->indio_dev;
indio_dev->priv = (char *)iio_dev_opaque +
- ALIGN(sizeof(struct iio_dev_opaque), IIO_ALIGN);
+ ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN);
indio_dev->dev.parent = parent;
indio_dev->dev.type = &iio_device_type;
@@ -1673,7 +1663,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
mutex_init(&iio_dev_opaque->info_exist_lock);
INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
- iio_dev_opaque->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
+ iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL);
if (iio_dev_opaque->id < 0) {
/* cannot use a dev_err as the name isn't available */
pr_err("failed to get device id\n");
@@ -1682,7 +1672,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
}
if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) {
- ida_simple_remove(&iio_ida, iio_dev_opaque->id);
+ ida_free(&iio_ida, iio_dev_opaque->id);
kfree(iio_dev_opaque);
return NULL;
}
diff --git a/drivers/iio/industrialio-sw-device.c b/drivers/iio/industrialio-sw-device.c
index 49f775f16ad5..cdaf30a3f233 100644
--- a/drivers/iio/industrialio-sw-device.c
+++ b/drivers/iio/industrialio-sw-device.c
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(iio_device_types_lock);
static
struct iio_sw_device_type *__iio_find_sw_device_type(const char *name,
- unsigned len)
+ unsigned int len)
{
struct iio_sw_device_type *d = NULL, *iter;
diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c
index 9ae793a70b8b..994f03a71520 100644
--- a/drivers/iio/industrialio-sw-trigger.c
+++ b/drivers/iio/industrialio-sw-trigger.c
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(iio_trigger_types_lock);
static
struct iio_sw_trigger_type *__iio_find_sw_trigger_type(const char *name,
- unsigned len)
+ unsigned int len)
{
struct iio_sw_trigger_type *t = NULL, *iter;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 585b6cef8fcc..b78814d869b7 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -37,7 +37,7 @@ static LIST_HEAD(iio_trigger_list);
static DEFINE_MUTEX(iio_trigger_list_lock);
/**
- * iio_trigger_read_name() - retrieve useful identifying name
+ * name_show() - retrieve useful identifying name
* @dev: device associated with the iio_trigger
* @attr: pointer to the device_attribute structure that is
* being processed
@@ -46,15 +46,14 @@ static DEFINE_MUTEX(iio_trigger_list_lock);
* Return: a negative number on failure or the number of written
* characters on success.
*/
-static ssize_t iio_trigger_read_name(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
return sysfs_emit(buf, "%s\n", trig->name);
}
-static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct attribute *iio_trig_dev_attrs[] = {
&dev_attr_name.attr,
@@ -64,14 +63,11 @@ ATTRIBUTE_GROUPS(iio_trig_dev);
static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
-int __iio_trigger_register(struct iio_trigger *trig_info,
- struct module *this_mod)
+int iio_trigger_register(struct iio_trigger *trig_info)
{
int ret;
- trig_info->owner = this_mod;
-
- trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
+ trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL);
if (trig_info->id < 0)
return trig_info->id;
@@ -98,10 +94,10 @@ error_device_del:
mutex_unlock(&iio_trigger_list_lock);
device_del(&trig_info->dev);
error_unregister_id:
- ida_simple_remove(&iio_trigger_ida, trig_info->id);
+ ida_free(&iio_trigger_ida, trig_info->id);
return ret;
}
-EXPORT_SYMBOL(__iio_trigger_register);
+EXPORT_SYMBOL(iio_trigger_register);
void iio_trigger_unregister(struct iio_trigger *trig_info)
{
@@ -109,7 +105,7 @@ void iio_trigger_unregister(struct iio_trigger *trig_info)
list_del(&trig_info->list);
mutex_unlock(&iio_trigger_list_lock);
- ida_simple_remove(&iio_trigger_ida, trig_info->id);
+ ida_free(&iio_trigger_ida, trig_info->id);
/* Possible issue in here */
device_del(&trig_info->dev);
}
@@ -368,8 +364,8 @@ struct iio_poll_func
va_list vargs;
struct iio_poll_func *pf;
- pf = kmalloc(sizeof *pf, GFP_KERNEL);
- if (pf == NULL)
+ pf = kmalloc(sizeof(*pf), GFP_KERNEL);
+ if (!pf)
return NULL;
va_start(vargs, fmt);
pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
@@ -395,7 +391,7 @@ void iio_dealloc_pollfunc(struct iio_poll_func *pf)
EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
/**
- * iio_trigger_read_current() - trigger consumer sysfs query current trigger
+ * current_trigger_show() - trigger consumer sysfs query current trigger
* @dev: device associated with an industrial I/O device
* @attr: pointer to the device_attribute structure that
* is being processed
@@ -407,9 +403,8 @@ EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
* Return: a negative number on failure, the number of characters written
* on success or 0 if no trigger is available
*/
-static ssize_t iio_trigger_read_current(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t current_trigger_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -419,7 +414,7 @@ static ssize_t iio_trigger_read_current(struct device *dev,
}
/**
- * iio_trigger_write_current() - trigger consumer sysfs set current trigger
+ * current_trigger_store() - trigger consumer sysfs set current trigger
* @dev: device associated with an industrial I/O device
* @attr: device attribute that is being processed
* @buf: string buffer that holds the name of the trigger
@@ -432,10 +427,9 @@ static ssize_t iio_trigger_read_current(struct device *dev,
* Return: negative error code on failure or length of the buffer
* on success
*/
-static ssize_t iio_trigger_write_current(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+static ssize_t current_trigger_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
@@ -494,9 +488,7 @@ out_trigger_put:
return ret;
}
-static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
- iio_trigger_read_current,
- iio_trigger_write_current);
+static DEVICE_ATTR_RW(current_trigger);
static struct attribute *iio_trigger_consumer_attrs[] = {
&dev_attr_current_trigger.attr,
@@ -552,15 +544,16 @@ static void iio_trig_subirqunmask(struct irq_data *d)
trig->subirqs[d->irq - trig->subirq_base].enabled = true;
}
-static __printf(2, 0)
+static __printf(3, 0)
struct iio_trigger *viio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
const char *fmt,
va_list vargs)
{
struct iio_trigger *trig;
int i;
- trig = kzalloc(sizeof *trig, GFP_KERNEL);
+ trig = kzalloc(sizeof(*trig), GFP_KERNEL);
if (!trig)
return NULL;
@@ -581,6 +574,10 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent,
if (trig->name == NULL)
goto free_descs;
+ INIT_LIST_HEAD(&trig->list);
+
+ trig->owner = this_mod;
+
trig->subirq_chip.name = trig->name;
trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
@@ -601,8 +598,9 @@ free_trig:
}
/**
- * iio_trigger_alloc - Allocate a trigger
+ * __iio_trigger_alloc - Allocate a trigger
* @parent: Device to allocate iio_trigger for
+ * @this_mod: module allocating the trigger
* @fmt: trigger name format. If it includes format
* specifiers, the additional arguments following
* format are formatted and inserted in the resulting
@@ -610,18 +608,20 @@ free_trig:
* RETURNS:
* Pointer to allocated iio_trigger on success, NULL on failure.
*/
-struct iio_trigger *iio_trigger_alloc(struct device *parent, const char *fmt, ...)
+struct iio_trigger *__iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...)
{
struct iio_trigger *trig;
va_list vargs;
va_start(vargs, fmt);
- trig = viio_trigger_alloc(parent, fmt, vargs);
+ trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
va_end(vargs);
return trig;
}
-EXPORT_SYMBOL(iio_trigger_alloc);
+EXPORT_SYMBOL(__iio_trigger_alloc);
void iio_trigger_free(struct iio_trigger *trig)
{
@@ -636,10 +636,11 @@ static void devm_iio_trigger_release(struct device *dev, void *res)
}
/**
- * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
+ * __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
* Managed iio_trigger_alloc. iio_trigger allocated with this function is
* automatically freed on driver detach.
* @parent: Device to allocate iio_trigger for
+ * @this_mod: module allocating the trigger
* @fmt: trigger name format. If it includes format
* specifiers, the additional arguments following
* format are formatted and inserted in the resulting
@@ -649,7 +650,9 @@ static void devm_iio_trigger_release(struct device *dev, void *res)
* RETURNS:
* Pointer to allocated iio_trigger on success, NULL on failure.
*/
-struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, const char *fmt, ...)
+struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...)
{
struct iio_trigger **ptr, *trig;
va_list vargs;
@@ -661,7 +664,7 @@ struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, const char *fm
/* use raw alloc_dr for kmalloc caller tracing */
va_start(vargs, fmt);
- trig = viio_trigger_alloc(parent, fmt, vargs);
+ trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
va_end(vargs);
if (trig) {
*ptr = trig;
@@ -672,7 +675,7 @@ struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, const char *fm
return trig;
}
-EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
+EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc);
static void devm_iio_trigger_unreg(void *trigger_info)
{
@@ -680,10 +683,9 @@ static void devm_iio_trigger_unreg(void *trigger_info)
}
/**
- * __devm_iio_trigger_register - Resource-managed iio_trigger_register()
+ * devm_iio_trigger_register - Resource-managed iio_trigger_register()
* @dev: device this trigger was allocated for
* @trig_info: trigger to register
- * @this_mod: module registering the trigger
*
* Managed iio_trigger_register(). The IIO trigger registered with this
* function is automatically unregistered on driver detach. This function
@@ -693,19 +695,18 @@ static void devm_iio_trigger_unreg(void *trigger_info)
* RETURNS:
* 0 on success, negative error number on failure.
*/
-int __devm_iio_trigger_register(struct device *dev,
- struct iio_trigger *trig_info,
- struct module *this_mod)
+int devm_iio_trigger_register(struct device *dev,
+ struct iio_trigger *trig_info)
{
int ret;
- ret = __iio_trigger_register(trig_info, this_mod);
+ ret = iio_trigger_register(trig_info);
if (ret)
return ret;
return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
}
-EXPORT_SYMBOL_GPL(__devm_iio_trigger_register);
+EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
bool iio_trigger_using_own(struct iio_dev *indio_dev)
{
diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c
index b4e9924094cd..ce5363845b22 100644
--- a/drivers/iio/light/al3010.c
+++ b/drivers/iio/light/al3010.c
@@ -200,17 +200,17 @@ static int al3010_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
-static int __maybe_unused al3010_suspend(struct device *dev)
+static int al3010_suspend(struct device *dev)
{
return al3010_set_pwr(to_i2c_client(dev), false);
}
-static int __maybe_unused al3010_resume(struct device *dev)
+static int al3010_resume(struct device *dev)
{
return al3010_set_pwr(to_i2c_client(dev), true);
}
-static SIMPLE_DEV_PM_OPS(al3010_pm_ops, al3010_suspend, al3010_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(al3010_pm_ops, al3010_suspend, al3010_resume);
static const struct i2c_device_id al3010_id[] = {
{"al3010", },
@@ -228,7 +228,7 @@ static struct i2c_driver al3010_driver = {
.driver = {
.name = AL3010_DRV_NAME,
.of_match_table = al3010_of_match,
- .pm = &al3010_pm_ops,
+ .pm = pm_sleep_ptr(&al3010_pm_ops),
},
.probe = al3010_probe,
.id_table = al3010_id,
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
index cc1407ccc10a..bc99179728ed 100644
--- a/drivers/iio/light/al3320a.c
+++ b/drivers/iio/light/al3320a.c
@@ -223,17 +223,18 @@ static int al3320a_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
-static int __maybe_unused al3320a_suspend(struct device *dev)
+static int al3320a_suspend(struct device *dev)
{
return al3320a_set_pwr(to_i2c_client(dev), false);
}
-static int __maybe_unused al3320a_resume(struct device *dev)
+static int al3320a_resume(struct device *dev)
{
return al3320a_set_pwr(to_i2c_client(dev), true);
}
-static SIMPLE_DEV_PM_OPS(al3320a_pm_ops, al3320a_suspend, al3320a_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(al3320a_pm_ops, al3320a_suspend,
+ al3320a_resume);
static const struct i2c_device_id al3320a_id[] = {
{"al3320a", 0},
@@ -251,7 +252,7 @@ static struct i2c_driver al3320a_driver = {
.driver = {
.name = AL3320A_DRV_NAME,
.of_match_table = al3320a_of_match,
- .pm = &al3320a_pm_ops,
+ .pm = pm_sleep_ptr(&al3320a_pm_ops),
},
.probe = al3320a_probe,
.id_table = al3320a_id,
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index 3ba2378df3dd..2307fc531752 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -755,21 +755,22 @@ static int as73211_probe(struct i2c_client *client)
return devm_iio_device_register(dev, indio_dev);
}
-static int __maybe_unused as73211_suspend(struct device *dev)
+static int as73211_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
return as73211_power(indio_dev, false);
}
-static int __maybe_unused as73211_resume(struct device *dev)
+static int as73211_resume(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
return as73211_power(indio_dev, true);
}
-static SIMPLE_DEV_PM_OPS(as73211_pm_ops, as73211_suspend, as73211_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(as73211_pm_ops, as73211_suspend,
+ as73211_resume);
static const struct of_device_id as73211_of_match[] = {
{ .compatible = "ams,as73211" },
@@ -787,7 +788,7 @@ static struct i2c_driver as73211_driver = {
.driver = {
.name = AS73211_DRV_NAME,
.of_match_table = as73211_of_match,
- .pm = &as73211_pm_ops,
+ .pm = pm_sleep_ptr(&as73211_pm_ops),
},
.probe_new = as73211_probe,
.id_table = as73211_id,
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 48484b9401b9..471985c220bb 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -277,7 +277,7 @@ static int bh1750_remove(struct i2c_client *client)
return 0;
}
-static int __maybe_unused bh1750_suspend(struct device *dev)
+static int bh1750_suspend(struct device *dev)
{
int ret;
struct bh1750_data *data =
@@ -294,7 +294,7 @@ static int __maybe_unused bh1750_suspend(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(bh1750_pm_ops, bh1750_suspend, NULL);
+static DEFINE_SIMPLE_DEV_PM_OPS(bh1750_pm_ops, bh1750_suspend, NULL);
static const struct i2c_device_id bh1750_id[] = {
{ "bh1710", BH1710 },
@@ -320,7 +320,7 @@ static struct i2c_driver bh1750_driver = {
.driver = {
.name = "bh1750",
.of_match_table = bh1750_of_match,
- .pm = &bh1750_pm_ops,
+ .pm = pm_sleep_ptr(&bh1750_pm_ops),
},
.probe = bh1750_probe,
.remove = bh1750_remove,
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index 790d3d613979..fc7141390117 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -213,10 +213,9 @@ static int bh1780_remove(struct i2c_client *client)
pm_runtime_put_noidle(&client->dev);
pm_runtime_disable(&client->dev);
ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
- if (ret < 0) {
- dev_err(&client->dev, "failed to power off\n");
- return ret;
- }
+ if (ret < 0)
+ dev_err(&client->dev, "failed to power off (%pe)\n",
+ ERR_PTR(ret));
return 0;
}
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 97649944f1df..edbe6a3138d0 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -460,6 +460,8 @@ static int cm32181_probe(struct i2c_client *client)
return PTR_ERR(client);
}
+ i2c_set_clientdata(client, indio_dev);
+
cm32181 = iio_priv(indio_dev);
cm32181->client = client;
cm32181->dev = dev;
@@ -486,6 +488,25 @@ static int cm32181_probe(struct i2c_client *client)
return 0;
}
+static int cm32181_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
+ CM32181_CMD_ALS_DISABLE);
+}
+
+static int cm32181_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
+
+ return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
+}
+
+DEFINE_SIMPLE_DEV_PM_OPS(cm32181_pm_ops, cm32181_suspend, cm32181_resume);
+
static const struct of_device_id cm32181_of_match[] = {
{ .compatible = "capella,cm3218" },
{ .compatible = "capella,cm32181" },
@@ -506,6 +527,7 @@ static struct i2c_driver cm32181_driver = {
.name = "cm32181",
.acpi_match_table = ACPI_PTR(cm32181_acpi_match),
.of_match_table = cm32181_of_match,
+ .pm = pm_sleep_ptr(&cm32181_pm_ops),
},
.probe_new = cm32181_probe,
};
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index 50d34a98839c..c721b69d5095 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -278,7 +278,7 @@ static int cm3605_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused cm3605_pm_suspend(struct device *dev)
+static int cm3605_pm_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct cm3605 *cm3605 = iio_priv(indio_dev);
@@ -289,7 +289,7 @@ static int __maybe_unused cm3605_pm_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused cm3605_pm_resume(struct device *dev)
+static int cm3605_pm_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct cm3605 *cm3605 = iio_priv(indio_dev);
@@ -302,11 +302,8 @@ static int __maybe_unused cm3605_pm_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops cm3605_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(cm3605_pm_suspend,
- cm3605_pm_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(cm3605_dev_pm_ops, cm3605_pm_suspend,
+ cm3605_pm_resume);
static const struct of_device_id cm3605_of_match[] = {
{.compatible = "capella,cm3605"},
@@ -318,7 +315,7 @@ static struct platform_driver cm3605_driver = {
.driver = {
.name = "cm3605",
.of_match_table = cm3605_of_match,
- .pm = &cm3605_dev_pm_ops,
+ .pm = pm_sleep_ptr(&cm3605_dev_pm_ops),
},
.probe = cm3605_probe,
.remove = cm3605_remove,
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index de472f23d1cb..19e529c84e95 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -14,6 +14,7 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -181,15 +182,12 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
return -ENOMEM;
ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
- cros_ec_sensors_capture,
- cros_ec_sensors_push_data);
+ cros_ec_sensors_capture);
if (ret)
return ret;
indio_dev->info = &cros_ec_light_prox_info;
state = iio_priv(indio_dev);
- state->core.type = state->core.resp->info.type;
- state->core.loc = state->core.resp->info.location;
channel = state->channels;
/* Common part */
@@ -240,7 +238,8 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- return devm_iio_device_register(dev, indio_dev);
+ return cros_ec_sensors_core_register(dev, indio_dev,
+ cros_ec_sensors_push_data);
}
static const struct platform_device_id cros_ec_light_prox_ids[] = {
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index c6d1d88d3775..e2707416f9a8 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -635,7 +635,7 @@ static int gp2ap002_remove(struct i2c_client *client)
return 0;
}
-static int __maybe_unused gp2ap002_runtime_suspend(struct device *dev)
+static int gp2ap002_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
@@ -660,7 +660,7 @@ static int __maybe_unused gp2ap002_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused gp2ap002_runtime_resume(struct device *dev)
+static int gp2ap002_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
@@ -691,12 +691,8 @@ static int __maybe_unused gp2ap002_runtime_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops gp2ap002_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(gp2ap002_runtime_suspend,
- gp2ap002_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(gp2ap002_dev_pm_ops, gp2ap002_runtime_suspend,
+ gp2ap002_runtime_resume, NULL);
static const struct i2c_device_id gp2ap002_id_table[] = {
{ "gp2ap002", 0 },
@@ -715,7 +711,7 @@ static struct i2c_driver gp2ap002_driver = {
.driver = {
.name = "gp2ap002",
.of_match_table = gp2ap002_of_match,
- .pm = &gp2ap002_dev_pm_ops,
+ .pm = pm_ptr(&gp2ap002_dev_pm_ops),
},
.probe = gp2ap002_probe,
.remove = gp2ap002_remove,
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 9de3262aa688..ff5996d77818 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -625,7 +625,7 @@ static int isl29028_probe(struct i2c_client *client,
ISL29028_POWER_OFF_DELAY_MS);
pm_runtime_use_autosuspend(&client->dev);
- ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev,
"%s(): iio registration failed with error %d\n",
@@ -646,10 +646,12 @@ static int isl29028_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- return isl29028_clear_configure_reg(chip);
+ isl29028_clear_configure_reg(chip);
+
+ return 0;
}
-static int __maybe_unused isl29028_suspend(struct device *dev)
+static int isl29028_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct isl29028_chip *chip = iio_priv(indio_dev);
@@ -664,7 +666,7 @@ static int __maybe_unused isl29028_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused isl29028_resume(struct device *dev)
+static int isl29028_resume(struct device *dev)
{
/**
* The specific component (ALS/IR or proximity) will enable itself as
@@ -674,11 +676,8 @@ static int __maybe_unused isl29028_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops isl29028_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(isl29028_suspend, isl29028_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(isl29028_pm_ops, isl29028_suspend,
+ isl29028_resume, NULL);
static const struct i2c_device_id isl29028_id[] = {
{"isl29028", 0},
@@ -698,7 +697,7 @@ MODULE_DEVICE_TABLE(of, isl29028_of_match);
static struct i2c_driver isl29028_driver = {
.driver = {
.name = "isl29028",
- .pm = &isl29028_pm_ops,
+ .pm = pm_ptr(&isl29028_pm_ops),
.of_match_table = isl29028_of_match,
},
.probe = isl29028_probe,
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index a55194263d23..5387c12231cf 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -380,7 +380,9 @@ static int jsa1212_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
- return jsa1212_power_off(data);
+ jsa1212_power_off(data);
+
+ return 0;
}
static int jsa1212_suspend(struct device *dev)
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 1880bd5bb258..a326d47afc9b 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -808,7 +808,7 @@ static int opt3001_remove(struct i2c_client *client)
if (ret < 0) {
dev_err(opt->dev, "failed to read register %02x\n",
OPT3001_CONFIGURATION);
- return ret;
+ return 0;
}
reg = ret;
@@ -819,7 +819,6 @@ static int opt3001_remove(struct i2c_client *client)
if (ret < 0) {
dev_err(opt->dev, "failed to write register %02x\n",
OPT3001_CONFIGURATION);
- return ret;
}
return 0;
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index 528fa5dd2b13..772874e707ae 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -397,13 +397,19 @@ out_err:
static int pa12203001_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ int ret;
iio_device_unregister(indio_dev);
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- return pa12203001_power_chip(indio_dev, PA12203001_CHIP_DISABLE);
+ ret = pa12203001_power_chip(indio_dev, PA12203001_CHIP_DISABLE);
+ if (ret)
+ dev_warn(&client->dev, "Failed to power down (%pe)\n",
+ ERR_PTR(ret));
+
+ return 0;
}
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM)
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index b578b46276cc..f7cc7a6c0c8d 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -654,7 +654,9 @@ static int stk3310_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- return stk3310_set_state(iio_priv(indio_dev), STK3310_STATE_STANDBY);
+ stk3310_set_state(iio_priv(indio_dev), STK3310_STATE_STANDBY);
+
+ return 0;
}
static int stk3310_suspend(struct device *dev)
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 0a278eea36ca..bbb577459fb9 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -310,7 +310,7 @@ static int tsl2563_get_adc(struct tsl2563_chip *chip)
goto out;
if (!chip->int_enabled) {
- cancel_delayed_work(&chip->poweroff_work);
+ cancel_delayed_work_sync(&chip->poweroff_work);
if (!tsl2563_get_power(chip)) {
ret = tsl2563_set_power(chip, 1);
@@ -638,7 +638,7 @@ static int tsl2563_write_interrupt_config(struct iio_dev *indio_dev,
chip->intr &= ~0x30;
chip->intr |= 0x10;
/* ensure the chip is actually on */
- cancel_delayed_work(&chip->poweroff_work);
+ cancel_delayed_work_sync(&chip->poweroff_work);
if (!tsl2563_get_power(chip)) {
ret = tsl2563_set_power(chip, 1);
if (ret)
@@ -803,12 +803,11 @@ static int tsl2563_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
if (!chip->int_enabled)
- cancel_delayed_work(&chip->poweroff_work);
+ cancel_delayed_work_sync(&chip->poweroff_work);
/* Ensure that interrupts are disabled - then flush any bottom halves */
chip->intr &= ~0x30;
i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | TSL2563_REG_INT,
chip->intr);
- flush_scheduled_work();
tsl2563_set_power(chip, 0);
return 0;
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 7e101d5f72ee..82662dab87c0 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -883,10 +883,12 @@ static int tsl2583_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- return tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
+ tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
+
+ return 0;
}
-static int __maybe_unused tsl2583_suspend(struct device *dev)
+static int tsl2583_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -901,7 +903,7 @@ static int __maybe_unused tsl2583_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused tsl2583_resume(struct device *dev)
+static int tsl2583_resume(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -916,11 +918,8 @@ static int __maybe_unused tsl2583_resume(struct device *dev)
return ret;
}
-static const struct dev_pm_ops tsl2583_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(tsl2583_suspend, tsl2583_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(tsl2583_pm_ops, tsl2583_suspend,
+ tsl2583_resume, NULL);
static const struct i2c_device_id tsl2583_idtable[] = {
{ "tsl2580", 0 },
@@ -942,7 +941,7 @@ MODULE_DEVICE_TABLE(of, tsl2583_of_match);
static struct i2c_driver tsl2583_driver = {
.driver = {
.name = "tsl2583",
- .pm = &tsl2583_pm_ops,
+ .pm = pm_ptr(&tsl2583_pm_ops),
.of_match_table = tsl2583_of_match,
},
.id_table = tsl2583_idtable,
diff --git a/drivers/iio/light/tsl2591.c b/drivers/iio/light/tsl2591.c
index 39e68d0c9d6a..e485a556e6da 100644
--- a/drivers/iio/light/tsl2591.c
+++ b/drivers/iio/light/tsl2591.c
@@ -1019,7 +1019,7 @@ static const struct iio_info tsl2591_info_no_irq = {
.read_avail = tsl2591_read_available,
};
-static int __maybe_unused tsl2591_suspend(struct device *dev)
+static int tsl2591_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tsl2591_chip *chip = iio_priv(indio_dev);
@@ -1032,7 +1032,7 @@ static int __maybe_unused tsl2591_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused tsl2591_resume(struct device *dev)
+static int tsl2591_resume(struct device *dev)
{
int power_state = TSL2591_PWR_ON | TSL2591_ENABLE_ALS;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
@@ -1049,10 +1049,8 @@ static int __maybe_unused tsl2591_resume(struct device *dev)
return ret;
}
-static const struct dev_pm_ops tsl2591_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(tsl2591_suspend, tsl2591_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(tsl2591_pm_ops, tsl2591_suspend,
+ tsl2591_resume, NULL);
static irqreturn_t tsl2591_event_handler(int irq, void *private)
{
@@ -1213,7 +1211,7 @@ MODULE_DEVICE_TABLE(of, tsl2591_of_match);
static struct i2c_driver tsl2591_driver = {
.driver = {
.name = "tsl2591",
- .pm = &tsl2591_pm_ops,
+ .pm = pm_ptr(&tsl2591_pm_ops),
.of_match_table = tsl2591_of_match,
},
.probe_new = tsl2591_probe
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 1492aaf8d84c..80d2299da561 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -907,16 +907,21 @@ out_err:
static int us5182d_remove(struct i2c_client *client)
{
struct us5182d_data *data = iio_priv(i2c_get_clientdata(client));
+ int ret;
iio_device_unregister(i2c_get_clientdata(client));
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- return us5182d_shutdown_en(data, US5182D_CFG0_SHUTDOWN_EN);
+ ret = us5182d_shutdown_en(data, US5182D_CFG0_SHUTDOWN_EN);
+ if (ret)
+ dev_warn(&client->dev, "Failed to shut down (%pe)\n",
+ ERR_PTR(ret));
+
+ return 0;
}
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM)
static int us5182d_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
@@ -939,11 +944,10 @@ static int us5182d_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops us5182d_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(us5182d_suspend, us5182d_resume)
- SET_RUNTIME_PM_OPS(us5182d_suspend, us5182d_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(us5182d_suspend, us5182d_resume)
+ RUNTIME_PM_OPS(us5182d_suspend, us5182d_resume, NULL)
};
static const struct acpi_device_id us5182d_acpi_match[] = {
@@ -969,7 +973,7 @@ MODULE_DEVICE_TABLE(of, us5182d_of_match);
static struct i2c_driver us5182d_driver = {
.driver = {
.name = US5182D_DRV_NAME,
- .pm = &us5182d_pm_ops,
+ .pm = pm_ptr(&us5182d_pm_ops),
.of_match_table = us5182d_of_match,
.acpi_match_table = ACPI_PTR(us5182d_acpi_match),
},
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index e02e92bc2928..3db4e26731bb 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1115,16 +1115,22 @@ static int vcnl4000_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
pm_runtime_dont_use_autosuspend(&client->dev);
pm_runtime_disable(&client->dev);
iio_device_unregister(indio_dev);
pm_runtime_set_suspended(&client->dev);
- return data->chip_spec->set_power_state(data, false);
+ ret = data->chip_spec->set_power_state(data, false);
+ if (ret)
+ dev_warn(&client->dev, "Failed to power down (%pe)\n",
+ ERR_PTR(ret));
+
+ return 0;
}
-static int __maybe_unused vcnl4000_runtime_suspend(struct device *dev)
+static int vcnl4000_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct vcnl4000_data *data = iio_priv(indio_dev);
@@ -1132,7 +1138,7 @@ static int __maybe_unused vcnl4000_runtime_suspend(struct device *dev)
return data->chip_spec->set_power_state(data, false);
}
-static int __maybe_unused vcnl4000_runtime_resume(struct device *dev)
+static int vcnl4000_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct vcnl4000_data *data = iio_priv(indio_dev);
@@ -1140,17 +1146,13 @@ static int __maybe_unused vcnl4000_runtime_resume(struct device *dev)
return data->chip_spec->set_power_state(data, true);
}
-static const struct dev_pm_ops vcnl4000_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(vcnl4000_runtime_suspend,
- vcnl4000_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(vcnl4000_pm_ops, vcnl4000_runtime_suspend,
+ vcnl4000_runtime_resume, NULL);
static struct i2c_driver vcnl4000_driver = {
.driver = {
.name = VCNL4000_DRV_NAME,
- .pm = &vcnl4000_pm_ops,
+ .pm = pm_ptr(&vcnl4000_pm_ops),
.of_match_table = vcnl_4000_of_match,
},
.probe = vcnl4000_probe,
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index da2bf622a67b..6a196cf2270b 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -604,17 +604,23 @@ fail_poweroff:
static int vcnl4035_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ int ret;
pm_runtime_dont_use_autosuspend(&client->dev);
pm_runtime_disable(&client->dev);
iio_device_unregister(indio_dev);
pm_runtime_set_suspended(&client->dev);
- return vcnl4035_set_als_power_state(iio_priv(indio_dev),
- VCNL4035_MODE_ALS_DISABLE);
+ ret = vcnl4035_set_als_power_state(iio_priv(indio_dev),
+ VCNL4035_MODE_ALS_DISABLE);
+ if (ret)
+ dev_warn(&client->dev, "Failed to put device into standby (%pe)\n",
+ ERR_PTR(ret));
+
+ return 0;
}
-static int __maybe_unused vcnl4035_runtime_suspend(struct device *dev)
+static int vcnl4035_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct vcnl4035_data *data = iio_priv(indio_dev);
@@ -626,7 +632,7 @@ static int __maybe_unused vcnl4035_runtime_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused vcnl4035_runtime_resume(struct device *dev)
+static int vcnl4035_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct vcnl4035_data *data = iio_priv(indio_dev);
@@ -643,12 +649,8 @@ static int __maybe_unused vcnl4035_runtime_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops vcnl4035_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(vcnl4035_runtime_suspend,
- vcnl4035_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(vcnl4035_pm_ops, vcnl4035_runtime_suspend,
+ vcnl4035_runtime_resume, NULL);
static const struct i2c_device_id vcnl4035_id[] = {
{ "vcnl4035", 0 },
@@ -665,7 +667,7 @@ MODULE_DEVICE_TABLE(of, vcnl4035_of_match);
static struct i2c_driver vcnl4035_driver = {
.driver = {
.name = VCNL4035_DRV_NAME,
- .pm = &vcnl4035_pm_ops,
+ .pm = pm_ptr(&vcnl4035_pm_ops),
.of_match_table = vcnl4035_of_match,
},
.probe = vcnl4035_probe,
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index 3c937c55a10d..9a7800cdfee2 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -846,7 +846,7 @@ static int veml6030_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
-static int __maybe_unused veml6030_runtime_suspend(struct device *dev)
+static int veml6030_runtime_suspend(struct device *dev)
{
int ret;
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
@@ -859,7 +859,7 @@ static int __maybe_unused veml6030_runtime_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused veml6030_runtime_resume(struct device *dev)
+static int veml6030_runtime_resume(struct device *dev)
{
int ret;
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
@@ -872,12 +872,8 @@ static int __maybe_unused veml6030_runtime_resume(struct device *dev)
return ret;
}
-static const struct dev_pm_ops veml6030_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(veml6030_runtime_suspend,
- veml6030_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(veml6030_pm_ops, veml6030_runtime_suspend,
+ veml6030_runtime_resume, NULL);
static const struct of_device_id veml6030_of_match[] = {
{ .compatible = "vishay,veml6030" },
@@ -895,7 +891,7 @@ static struct i2c_driver veml6030_driver = {
.driver = {
.name = "veml6030",
.of_match_table = veml6030_of_match,
- .pm = &veml6030_pm_ops,
+ .pm = pm_ptr(&veml6030_pm_ops),
},
.probe = veml6030_probe,
.id_table = veml6030_id,
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index e54feacfb980..c89a91db0690 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -985,7 +985,7 @@ static int ak8974_remove(struct i2c_client *i2c)
return 0;
}
-static int __maybe_unused ak8974_runtime_suspend(struct device *dev)
+static int ak8974_runtime_suspend(struct device *dev)
{
struct ak8974 *ak8974 =
iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
@@ -996,7 +996,7 @@ static int __maybe_unused ak8974_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ak8974_runtime_resume(struct device *dev)
+static int ak8974_runtime_resume(struct device *dev)
{
struct ak8974 *ak8974 =
iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
@@ -1024,12 +1024,8 @@ out_regulator_disable:
return ret;
}
-static const struct dev_pm_ops ak8974_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(ak8974_runtime_suspend,
- ak8974_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(ak8974_dev_pm_ops, ak8974_runtime_suspend,
+ ak8974_runtime_resume, NULL);
static const struct i2c_device_id ak8974_id[] = {
{"ami305", 0 },
@@ -1050,7 +1046,7 @@ MODULE_DEVICE_TABLE(of, ak8974_of_match);
static struct i2c_driver ak8974_driver = {
.driver = {
.name = "ak8974",
- .pm = &ak8974_dev_pm_ops,
+ .pm = pm_ptr(&ak8974_dev_pm_ops),
.of_match_table = ak8974_of_match,
},
.probe = ak8974_probe,
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index 64e8b04e654b..06d5a1ef1fbd 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -985,7 +985,7 @@ err_poweroff:
}
EXPORT_SYMBOL_NS(bmc150_magn_probe, IIO_BMC150_MAGN);
-int bmc150_magn_remove(struct device *dev)
+void bmc150_magn_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
@@ -1008,7 +1008,6 @@ int bmc150_magn_remove(struct device *dev)
mutex_unlock(&data->mutex);
regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
- return 0;
}
EXPORT_SYMBOL_NS(bmc150_magn_remove, IIO_BMC150_MAGN);
diff --git a/drivers/iio/magnetometer/bmc150_magn.h b/drivers/iio/magnetometer/bmc150_magn.h
index 3b69232afd2c..98c086d10c13 100644
--- a/drivers/iio/magnetometer/bmc150_magn.h
+++ b/drivers/iio/magnetometer/bmc150_magn.h
@@ -7,6 +7,6 @@ extern const struct dev_pm_ops bmc150_magn_pm_ops;
int bmc150_magn_probe(struct device *dev, struct regmap *regmap, int irq,
const char *name);
-int bmc150_magn_remove(struct device *dev);
+void bmc150_magn_remove(struct device *dev);
#endif /* _BMC150_MAGN_H_ */
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index e39b89661ad1..65c004411d0f 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -36,7 +36,9 @@ static int bmc150_magn_i2c_probe(struct i2c_client *client,
static int bmc150_magn_i2c_remove(struct i2c_client *client)
{
- return bmc150_magn_remove(&client->dev);
+ bmc150_magn_remove(&client->dev);
+
+ return 0;
}
static const struct acpi_device_id bmc150_magn_acpi_match[] = {
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 92eb2d156ddb..4a63b2da9df0 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Device driver for the the HMC5843 multi-chip module designed
+ * Device driver for the HMC5843 multi-chip module designed
* for low field magnetic sensing.
*
* Copyright (C) 2010 Texas Instruments
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 707ba25360b8..69938204456f 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -544,7 +544,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
indio_dev->info = &rm3100_info;
indio_dev->channels = rm3100_channels;
indio_dev->num_channels = ARRAY_SIZE(rm3100_channels);
- indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_TRIGGERED;
+ indio_dev->modes = INDIO_DIRECT_MODE;
if (!irq)
data->use_interrupt = false;
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index b2bc637150bf..aeaa4da6923b 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -965,7 +965,7 @@ static int yas5xx_remove(struct i2c_client *i2c)
return 0;
}
-static int __maybe_unused yas5xx_runtime_suspend(struct device *dev)
+static int yas5xx_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct yas5xx *yas5xx = iio_priv(indio_dev);
@@ -976,7 +976,7 @@ static int __maybe_unused yas5xx_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused yas5xx_runtime_resume(struct device *dev)
+static int yas5xx_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct yas5xx *yas5xx = iio_priv(indio_dev);
@@ -1011,12 +1011,8 @@ out_reset:
return ret;
}
-static const struct dev_pm_ops yas5xx_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(yas5xx_runtime_suspend,
- yas5xx_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(yas5xx_dev_pm_ops, yas5xx_runtime_suspend,
+ yas5xx_runtime_resume, NULL);
static const struct i2c_device_id yas5xx_id[] = {
{"yas530", },
@@ -1038,7 +1034,7 @@ static struct i2c_driver yas5xx_driver = {
.driver = {
.name = "yas5xx",
.of_match_table = yas5xx_of_match,
- .pm = &yas5xx_dev_pm_ops,
+ .pm = pm_ptr(&yas5xx_dev_pm_ops),
},
.probe = yas5xx_probe,
.remove = yas5xx_remove,
diff --git a/drivers/iio/potentiometer/ad5110.c b/drivers/iio/potentiometer/ad5110.c
index d4eeedae56e5..8fbcce482989 100644
--- a/drivers/iio/potentiometer/ad5110.c
+++ b/drivers/iio/potentiometer/ad5110.c
@@ -63,10 +63,10 @@ struct ad5110_data {
struct mutex lock;
const struct ad5110_cfg *cfg;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- u8 buf[2] ____cacheline_aligned;
+ u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
static const struct iio_chan_spec ad5110_channels[] = {
diff --git a/drivers/iio/potentiometer/ad5272.c b/drivers/iio/potentiometer/ad5272.c
index d8cbd170262f..ed5fc0b50fe9 100644
--- a/drivers/iio/potentiometer/ad5272.c
+++ b/drivers/iio/potentiometer/ad5272.c
@@ -50,7 +50,7 @@ struct ad5272_data {
struct i2c_client *client;
struct mutex lock;
const struct ad5272_cfg *cfg;
- u8 buf[2] ____cacheline_aligned;
+ u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
static const struct iio_chan_spec ad5272_channel = {
diff --git a/drivers/iio/potentiometer/max5481.c b/drivers/iio/potentiometer/max5481.c
index 098d144a8fdd..b40e5ac218d7 100644
--- a/drivers/iio/potentiometer/max5481.c
+++ b/drivers/iio/potentiometer/max5481.c
@@ -44,7 +44,7 @@ static const struct max5481_cfg max5481_cfg[] = {
struct max5481_data {
struct spi_device *spi;
const struct max5481_cfg *cfg;
- u8 msg[3] ____cacheline_aligned;
+ u8 msg[3] __aligned(IIO_DMA_MINALIGN);
};
#define MAX5481_CHANNEL { \
diff --git a/drivers/iio/potentiometer/mcp41010.c b/drivers/iio/potentiometer/mcp41010.c
index 30a4594d4e11..2b73c7540209 100644
--- a/drivers/iio/potentiometer/mcp41010.c
+++ b/drivers/iio/potentiometer/mcp41010.c
@@ -60,7 +60,7 @@ struct mcp41010_data {
const struct mcp41010_cfg *cfg;
struct mutex lock; /* Protect write sequences */
unsigned int value[MCP41010_MAX_WIPERS]; /* Cache wiper values */
- u8 buf[2] ____cacheline_aligned;
+ u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
#define MCP41010_CHANNEL(ch) { \
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
index 7c8c18ab8764..7890c0993ec4 100644
--- a/drivers/iio/potentiometer/mcp4131.c
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -129,7 +129,7 @@ struct mcp4131_data {
struct spi_device *spi;
const struct mcp4131_cfg *cfg;
struct mutex lock;
- u8 buf[2] ____cacheline_aligned;
+ u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
#define MCP4131_CHANNEL(ch) { \
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index bf8167f43c56..fe7aa81e7cc9 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1136,7 +1136,7 @@ int bmp280_common_probe(struct device *dev,
return devm_iio_device_register(dev, indio_dev);
}
-EXPORT_SYMBOL(bmp280_common_probe);
+EXPORT_SYMBOL_NS(bmp280_common_probe, IIO_BMP280);
static int bmp280_runtime_suspend(struct device *dev)
{
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 35045bd92846..bf4a7a617537 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -68,3 +68,4 @@ module_i2c_driver(bmp280_i2c_driver);
MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_BMP280);
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
index da136dbadc8f..969698518984 100644
--- a/drivers/iio/pressure/bmp280-regmap.c
+++ b/drivers/iio/pressure/bmp280-regmap.c
@@ -39,7 +39,7 @@ const struct regmap_config bmp180_regmap_config = {
.writeable_reg = bmp180_is_writeable_reg,
.volatile_reg = bmp180_is_volatile_reg,
};
-EXPORT_SYMBOL(bmp180_regmap_config);
+EXPORT_SYMBOL_NS(bmp180_regmap_config, IIO_BMP280);
static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
{
@@ -82,4 +82,4 @@ const struct regmap_config bmp280_regmap_config = {
.writeable_reg = bmp280_is_writeable_reg,
.volatile_reg = bmp280_is_volatile_reg,
};
-EXPORT_SYMBOL(bmp280_regmap_config);
+EXPORT_SYMBOL_NS(bmp280_regmap_config, IIO_BMP280);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 41f6cc56d229..4cfaf3e869b8 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -118,3 +118,4 @@ module_spi_driver(bmp280_spi_driver);
MODULE_DESCRIPTION("BMP280 SPI bus driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_BMP280);
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 2f882e109423..2649c2f89e89 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -14,6 +14,7 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_data/cros_ec_commands.h>
@@ -138,15 +139,12 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
return -ENOMEM;
ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
- cros_ec_sensors_capture,
- cros_ec_sensors_push_data);
+ cros_ec_sensors_capture);
if (ret)
return ret;
indio_dev->info = &cros_ec_baro_info;
state = iio_priv(indio_dev);
- state->core.type = state->core.resp->info.type;
- state->core.loc = state->core.resp->info.location;
channel = state->channels;
/* Common part */
channel->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
@@ -186,7 +184,8 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- return devm_iio_device_register(dev, indio_dev);
+ return cros_ec_sensors_core_register(dev, indio_dev,
+ cros_ec_sensors_push_data);
}
static const struct platform_device_id cros_ec_baro_ids[] = {
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index ade73267d5eb..5f6bb3603a8b 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -47,7 +47,7 @@ struct dlh_state {
struct dlh_info info;
bool use_interrupt;
struct completion completion;
- u8 rx_buf[DLH_NUM_READ_BYTES] ____cacheline_aligned;
+ u8 rx_buf[DLH_NUM_READ_BYTES];
};
static struct dlh_info dlh_info_tbl[] = {
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 67891ce2bd09..ebc95cf8f5f4 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -65,7 +65,7 @@ struct as3935_state {
u8 chan;
s64 timestamp __aligned(8);
} scan;
- u8 buf[2] ____cacheline_aligned;
+ u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
static const struct iio_chan_spec as3935_channels[] = {
diff --git a/drivers/iio/proximity/cros_ec_mkbp_proximity.c b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
index 8213b0081713..571ea1812246 100644
--- a/drivers/iio/proximity/cros_ec_mkbp_proximity.c
+++ b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
@@ -184,7 +184,7 @@ static const struct iio_info cros_ec_mkbp_proximity_info = {
.write_event_config = cros_ec_mkbp_proximity_write_event_config,
};
-static __maybe_unused int cros_ec_mkbp_proximity_resume(struct device *dev)
+static int cros_ec_mkbp_proximity_resume(struct device *dev)
{
struct cros_ec_mkbp_proximity_data *data = dev_get_drvdata(dev);
struct cros_ec_device *ec = data->ec;
@@ -201,8 +201,8 @@ static __maybe_unused int cros_ec_mkbp_proximity_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(cros_ec_mkbp_proximity_pm_ops, NULL,
- cros_ec_mkbp_proximity_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(cros_ec_mkbp_proximity_pm_ops, NULL,
+ cros_ec_mkbp_proximity_resume);
static int cros_ec_mkbp_proximity_probe(struct platform_device *pdev)
{
@@ -260,7 +260,7 @@ static struct platform_driver cros_ec_mkbp_proximity_driver = {
.driver = {
.name = "cros-ec-mkbp-proximity",
.of_match_table = cros_ec_mkbp_proximity_of_match,
- .pm = &cros_ec_mkbp_proximity_pm_ops,
+ .pm = pm_sleep_ptr(&cros_ec_mkbp_proximity_pm_ops),
},
.probe = cros_ec_mkbp_proximity_probe,
.remove = cros_ec_mkbp_proximity_remove,
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
index d56e037378de..2ad69b150902 100644
--- a/drivers/iio/proximity/ping.c
+++ b/drivers/iio/proximity/ping.c
@@ -173,7 +173,7 @@ static int ping_read(struct iio_dev *indio_dev)
/*
* read error code of laser ping sensor and give users chance to
- * figure out error by using dynamic debuggging
+ * figure out error by using dynamic debugging
*/
if (data->cfg->laserping_error) {
if ((time_ns > 12500000) && (time_ns <= 13500000)) {
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index 4e6286765f01..05015351a34a 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -37,9 +37,8 @@
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/sched.h>
@@ -261,7 +260,7 @@ static int srf04_probe(struct platform_device *pdev)
data = iio_priv(indio_dev);
data->dev = dev;
- data->cfg = of_match_device(of_srf04_match, dev)->data;
+ data->cfg = device_get_match_data(dev);
mutex_init(&data->lock);
init_completion(&data->rising);
@@ -289,10 +288,8 @@ static int srf04_probe(struct platform_device *pdev)
return PTR_ERR(data->gpiod_power);
}
if (data->gpiod_power) {
-
- if (of_property_read_u32(dev->of_node, "startup-time-ms",
- &data->startup_time_ms))
- data->startup_time_ms = 100;
+ data->startup_time_ms = 100;
+ device_property_read_u32(dev, "startup-time-ms", &data->startup_time_ms);
dev_dbg(dev, "using power gpio: startup-time-ms=%d\n",
data->startup_time_ms);
}
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index ac1ab7e89d4e..7ed11339c31e 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -354,7 +354,7 @@ static ssize_t srf08_write_sensitivity(struct srf08_data *data,
return -EINVAL;
for (i = 0; i < data->chip_info->num_sensitivity_avail; i++)
- if (val && (val == data->chip_info->sensitivity_avail[i])) {
+ if (val == data->chip_info->sensitivity_avail[i]) {
regval = i;
break;
}
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index 63fbcaa4cac8..edb5a2ce4e27 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -52,6 +52,16 @@
#define SX9324_REG_CLK_SPRD 0x15
#define SX9324_REG_AFE_CTRL0 0x20
+#define SX9324_REG_AFE_CTRL0_RINT_SHIFT 6
+#define SX9324_REG_AFE_CTRL0_RINT_MASK \
+ GENMASK(SX9324_REG_AFE_CTRL0_RINT_SHIFT + 1, \
+ SX9324_REG_AFE_CTRL0_RINT_SHIFT)
+#define SX9324_REG_AFE_CTRL0_RINT_LOWEST 0x00
+#define SX9324_REG_AFE_CTRL0_CSIDLE_SHIFT 4
+#define SX9324_REG_AFE_CTRL0_CSIDLE_MASK \
+ GENMASK(SX9324_REG_AFE_CTRL0_CSIDLE_SHIFT + 1, \
+ SX9324_REG_AFE_CTRL0_CSIDLE_SHIFT)
+#define SX9324_REG_AFE_CTRL0_RINT_LOWEST 0x00
#define SX9324_REG_AFE_CTRL1 0x21
#define SX9324_REG_AFE_CTRL2 0x22
#define SX9324_REG_AFE_CTRL3 0x23
@@ -72,7 +82,9 @@
#define SX9324_REG_AFE_CTRL8 0x2c
#define SX9324_REG_AFE_CTRL8_RESERVED 0x10
#define SX9324_REG_AFE_CTRL8_RESFILTIN_4KOHM 0x02
+#define SX9324_REG_AFE_CTRL8_RESFILTIN_MASK GENMASK(3, 0)
#define SX9324_REG_AFE_CTRL9 0x2d
+#define SX9324_REG_AFE_CTRL9_AGAIN_MASK GENMASK(3, 0)
#define SX9324_REG_AFE_CTRL9_AGAIN_1 0x08
#define SX9324_REG_PROX_CTRL0 0x30
@@ -93,7 +105,7 @@
#define SX9324_REG_PROX_CTRL4_AVGNEGFILT_MASK GENMASK(5, 3)
#define SX9324_REG_PROX_CTRL4_AVGNEG_FILT_2 0x08
#define SX9324_REG_PROX_CTRL4_AVGPOSFILT_MASK GENMASK(2, 0)
-#define SX9324_REG_PROX_CTRL3_AVGPOS_FILT_256 0x04
+#define SX9324_REG_PROX_CTRL4_AVGPOS_FILT_256 0x04
#define SX9324_REG_PROX_CTRL5 0x35
#define SX9324_REG_PROX_CTRL5_HYST_MASK GENMASK(5, 4)
#define SX9324_REG_PROX_CTRL5_CLOSE_DEBOUNCE_MASK GENMASK(3, 2)
@@ -782,7 +794,7 @@ static const struct sx_common_reg_default sx9324_default_regs[] = {
*/
{ SX9324_REG_GNRL_CTRL1, SX9324_REG_GNRL_CTRL1_PAUSECTRL },
- { SX9324_REG_AFE_CTRL0, 0x00 },
+ { SX9324_REG_AFE_CTRL0, SX9324_REG_AFE_CTRL0_RINT_LOWEST },
{ SX9324_REG_AFE_CTRL3, 0x00 },
{ SX9324_REG_AFE_CTRL4, SX9324_REG_AFE_CTRL4_FREQ_83_33HZ |
SX9324_REG_AFE_CTRL4_RES_100 },
@@ -810,7 +822,7 @@ static const struct sx_common_reg_default sx9324_default_regs[] = {
{ SX9324_REG_PROX_CTRL3, SX9324_REG_PROX_CTRL3_AVGDEB_2SAMPLES |
SX9324_REG_PROX_CTRL3_AVGPOS_THRESH_16K },
{ SX9324_REG_PROX_CTRL4, SX9324_REG_PROX_CTRL4_AVGNEG_FILT_2 |
- SX9324_REG_PROX_CTRL3_AVGPOS_FILT_256 },
+ SX9324_REG_PROX_CTRL4_AVGPOS_FILT_256 },
{ SX9324_REG_PROX_CTRL5, 0x00 },
{ SX9324_REG_PROX_CTRL6, SX9324_REG_PROX_CTRL6_PROXTHRESH_32 },
{ SX9324_REG_PROX_CTRL7, SX9324_REG_PROX_CTRL6_PROXTHRESH_32 },
@@ -863,6 +875,10 @@ static const struct sx_common_reg_default *
sx9324_get_default_reg(struct device *dev, int idx,
struct sx_common_reg_default *reg_def)
{
+ static const char * const sx9324_rints[] = { "lowest", "low", "high",
+ "highest" };
+ static const char * const sx9324_csidle[] = { "hi-z", "hi-z", "gnd",
+ "vdd" };
#define SX9324_PIN_DEF "semtech,ph0-pin"
#define SX9324_RESOLUTION_DEF "semtech,ph01-resolution"
#define SX9324_PROXRAW_DEF "semtech,ph01-proxraw-strength"
@@ -870,6 +886,7 @@ sx9324_get_default_reg(struct device *dev, int idx,
char prop[] = SX9324_PROXRAW_DEF;
u32 start = 0, raw = 0, pos = 0;
int ret, count, ph, pin;
+ const char *res;
memcpy(reg_def, &sx9324_default_regs[idx], sizeof(*reg_def));
switch (reg_def->reg) {
@@ -893,6 +910,26 @@ sx9324_get_default_reg(struct device *dev, int idx,
SX9324_REG_AFE_PH0_PIN_MASK(pin);
reg_def->def = raw;
break;
+ case SX9324_REG_AFE_CTRL0:
+ ret = device_property_read_string(dev,
+ "semtech,cs-idle-sleep", &res);
+ if (!ret)
+ ret = match_string(sx9324_csidle, ARRAY_SIZE(sx9324_csidle), res);
+ if (ret >= 0) {
+ reg_def->def &= ~SX9324_REG_AFE_CTRL0_CSIDLE_MASK;
+ reg_def->def |= ret << SX9324_REG_AFE_CTRL0_CSIDLE_SHIFT;
+ }
+
+ ret = device_property_read_string(dev,
+ "semtech,int-comp-resistor", &res);
+ if (ret)
+ break;
+ ret = match_string(sx9324_rints, ARRAY_SIZE(sx9324_rints), res);
+ if (ret < 0)
+ break;
+ reg_def->def &= ~SX9324_REG_AFE_CTRL0_RINT_MASK;
+ reg_def->def |= ret << SX9324_REG_AFE_CTRL0_RINT_SHIFT;
+ break;
case SX9324_REG_AFE_CTRL4:
case SX9324_REG_AFE_CTRL7:
if (reg_def->reg == SX9324_REG_AFE_CTRL4)
@@ -912,6 +949,39 @@ sx9324_get_default_reg(struct device *dev, int idx,
reg_def->def |= FIELD_PREP(SX9324_REG_AFE_CTRL4_RESOLUTION_MASK,
raw);
break;
+ case SX9324_REG_AFE_CTRL8:
+ ret = device_property_read_u32(dev,
+ "semtech,input-precharge-resistor-ohms",
+ &raw);
+ if (ret)
+ break;
+
+ reg_def->def &= ~SX9324_REG_AFE_CTRL8_RESFILTIN_MASK;
+ reg_def->def |= FIELD_PREP(SX9324_REG_AFE_CTRL8_RESFILTIN_MASK,
+ raw / 2000);
+ break;
+
+ case SX9324_REG_AFE_CTRL9:
+ ret = device_property_read_u32(dev,
+ "semtech,input-analog-gain", &raw);
+ if (ret)
+ break;
+ /*
+ * The analog gain has the following setting:
+ * +---------+----------------+----------------+
+ * | dt(raw) | physical value | register value |
+ * +---------+----------------+----------------+
+ * | 0 | x1.247 | 6 |
+ * | 1 | x1 | 8 |
+ * | 2 | x0.768 | 11 |
+ * | 3 | x0.552 | 15 |
+ * +---------+----------------+----------------+
+ */
+ reg_def->def &= ~SX9324_REG_AFE_CTRL9_AGAIN_MASK;
+ reg_def->def |= FIELD_PREP(SX9324_REG_AFE_CTRL9_AGAIN_MASK,
+ 6 + raw * (raw + 3) / 2);
+ break;
+
case SX9324_REG_ADV_CTRL5:
ret = device_property_read_u32(dev, "semtech,startup-sensor",
&start);
diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c
index 3ebb30c8a4f6..d9a12e6be6ca 100644
--- a/drivers/iio/proximity/sx9360.c
+++ b/drivers/iio/proximity/sx9360.c
@@ -51,6 +51,8 @@
#define SX9360_REG_GNRL_REG_2_FREQ(_r) (SX9360_FOSC_HZ / ((_r) * 8192))
#define SX9360_REG_AFE_CTRL1 0x21
+#define SX9360_REG_AFE_CTRL1_RESFILTIN_MASK GENMASK(3, 0)
+#define SX9360_REG_AFE_CTRL1_RESFILTIN_0OHMS 0
#define SX9360_REG_AFE_PARAM0_PHR 0x22
#define SX9360_REG_AFE_PARAM1_PHR 0x23
#define SX9360_REG_AFE_PARAM0_PHM 0x24
@@ -671,7 +673,7 @@ static const struct sx_common_reg_default sx9360_default_regs[] = {
{ SX9360_REG_GNRL_CTRL1, 0x00 },
{ SX9360_REG_GNRL_CTRL2, SX9360_REG_GNRL_CTRL2_PERIOD_102MS },
- { SX9360_REG_AFE_CTRL1, 0x00 },
+ { SX9360_REG_AFE_CTRL1, SX9360_REG_AFE_CTRL1_RESFILTIN_0OHMS },
{ SX9360_REG_AFE_PARAM0_PHR, SX9360_REG_AFE_PARAM0_RSVD |
SX9360_REG_AFE_PARAM0_RESOLUTION_128 },
{ SX9360_REG_AFE_PARAM1_PHR, SX9360_REG_AFE_PARAM1_AGAIN_PHM_6PF |
@@ -722,6 +724,17 @@ sx9360_get_default_reg(struct device *dev, int idx,
memcpy(reg_def, &sx9360_default_regs[idx], sizeof(*reg_def));
switch (reg_def->reg) {
+ case SX9360_REG_AFE_CTRL1:
+ ret = device_property_read_u32(dev,
+ "semtech,input-precharge-resistor-ohms",
+ &raw);
+ if (ret)
+ break;
+
+ reg_def->def &= ~SX9360_REG_AFE_CTRL1_RESFILTIN_MASK;
+ reg_def->def |= FIELD_PREP(SX9360_REG_AFE_CTRL1_RESFILTIN_MASK,
+ raw / 2000);
+ break;
case SX9360_REG_AFE_PARAM0_PHR:
case SX9360_REG_AFE_PARAM0_PHM:
ret = device_property_read_u32(dev, "semtech,resolution", &raw);
diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c
index 8ad814d96b7e..d70a6b4f0bf8 100644
--- a/drivers/iio/proximity/sx_common.c
+++ b/drivers/iio/proximity/sx_common.c
@@ -5,7 +5,6 @@
* Common part of most Semtech SAR sensor.
*/
-#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/delay.h>
@@ -434,7 +433,7 @@ static void sx_common_regulator_disable(void *_data)
#define SX_COMMON_SOFT_RESET 0xde
-static int sx_common_init_device(struct iio_dev *indio_dev)
+static int sx_common_init_device(struct device *dev, struct iio_dev *indio_dev)
{
struct sx_common_data *data = iio_priv(indio_dev);
struct sx_common_reg_default tmp;
@@ -456,8 +455,7 @@ static int sx_common_init_device(struct iio_dev *indio_dev)
/* Program defaults from constant or BIOS. */
for (i = 0; i < data->chip_info->num_default_regs; i++) {
- initval = data->chip_info->ops.get_default_reg(&indio_dev->dev,
- i, &tmp);
+ initval = data->chip_info->ops.get_default_reg(dev, i, &tmp);
ret = regmap_write(data->regmap, initval->reg, initval->def);
if (ret)
return ret;
@@ -520,8 +518,6 @@ int sx_common_probe(struct i2c_client *client,
if (ret)
return dev_err_probe(dev, ret, "error reading WHOAMI\n");
- ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(dev));
- indio_dev->dev.of_node = client->dev.of_node;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = data->chip_info->iio_channels;
@@ -530,7 +526,7 @@ int sx_common_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
- ret = sx_common_init_device(indio_dev);
+ ret = sx_common_init_device(dev, indio_dev);
if (ret)
return dev_err_probe(dev, ret, "Unable to initialize sensor\n");
diff --git a/drivers/iio/proximity/vcnl3020.c b/drivers/iio/proximity/vcnl3020.c
index ff83638db16f..cbc8400c773c 100644
--- a/drivers/iio/proximity/vcnl3020.c
+++ b/drivers/iio/proximity/vcnl3020.c
@@ -71,14 +71,14 @@ static const int vcnl3020_prox_sampling_frequency[][2] = {
* @dev: vcnl3020 device.
* @rev: revision id.
* @lock: lock for protecting access to device hardware registers.
- * @buf: DMA safe __be16 buffer.
+ * @buf: __be16 buffer.
*/
struct vcnl3020_data {
struct regmap *regmap;
struct device *dev;
u8 rev;
struct mutex lock;
- __be16 buf ____cacheline_aligned;
+ __be16 buf;
};
/**
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index a284b20529fb..c7c4d33d340f 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -15,7 +15,9 @@
*/
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -42,6 +44,8 @@
struct vl53l0x_data {
struct i2c_client *client;
struct completion completion;
+ struct regulator *vdd_supply;
+ struct gpio_desc *reset_gpio;
};
static irqreturn_t vl53l0x_handle_irq(int irq, void *priv)
@@ -57,11 +61,15 @@ static irqreturn_t vl53l0x_handle_irq(int irq, void *priv)
static int vl53l0x_configure_irq(struct i2c_client *client,
struct iio_dev *indio_dev)
{
+ int irq_flags = irq_get_trigger_type(client->irq);
struct vl53l0x_data *data = iio_priv(indio_dev);
int ret;
+ if (!irq_flags)
+ irq_flags = IRQF_TRIGGER_FALLING;
+
ret = devm_request_irq(&client->dev, client->irq, vl53l0x_handle_irq,
- IRQF_TRIGGER_FALLING, indio_dev->name, indio_dev);
+ irq_flags, indio_dev->name, indio_dev);
if (ret) {
dev_err(&client->dev, "devm_request_irq error: %d\n", ret);
return ret;
@@ -186,10 +194,35 @@ static const struct iio_info vl53l0x_info = {
.read_raw = vl53l0x_read_raw,
};
+static void vl53l0x_power_off(void *_data)
+{
+ struct vl53l0x_data *data = _data;
+
+ gpiod_set_value_cansleep(data->reset_gpio, 1);
+
+ regulator_disable(data->vdd_supply);
+}
+
+static int vl53l0x_power_on(struct vl53l0x_data *data)
+{
+ int ret;
+
+ ret = regulator_enable(data->vdd_supply);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+
+ usleep_range(3200, 5000);
+
+ return 0;
+}
+
static int vl53l0x_probe(struct i2c_client *client)
{
struct vl53l0x_data *data;
struct iio_dev *indio_dev;
+ int error;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -204,6 +237,26 @@ static int vl53l0x_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_BYTE_DATA))
return -EOPNOTSUPP;
+ data->vdd_supply = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd_supply))
+ return dev_err_probe(&client->dev, PTR_ERR(data->vdd_supply),
+ "Unable to get VDD regulator\n");
+
+ data->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(data->reset_gpio),
+ "Cannot get reset GPIO\n");
+
+ error = vl53l0x_power_on(data);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "Failed to power on the chip\n");
+
+ error = devm_add_action_or_reset(&client->dev, vl53l0x_power_off, data);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "Failed to install poweroff action\n");
+
indio_dev->name = "vl53l0x";
indio_dev->info = &vl53l0x_info;
indio_dev->channels = vl53l0x_channels;
diff --git a/drivers/iio/resolver/ad2s1200.c b/drivers/iio/resolver/ad2s1200.c
index 9746bd935628..9d95241bdf8f 100644
--- a/drivers/iio/resolver/ad2s1200.c
+++ b/drivers/iio/resolver/ad2s1200.c
@@ -41,7 +41,7 @@ struct ad2s1200_state {
struct spi_device *sdev;
struct gpio_desc *sample;
struct gpio_desc *rdvel;
- __be16 rx ____cacheline_aligned;
+ __be16 rx __aligned(IIO_DMA_MINALIGN);
};
static int ad2s1200_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/resolver/ad2s90.c b/drivers/iio/resolver/ad2s90.c
index d6a91f137e13..be6836e55376 100644
--- a/drivers/iio/resolver/ad2s90.c
+++ b/drivers/iio/resolver/ad2s90.c
@@ -24,7 +24,7 @@
struct ad2s90_state {
struct mutex lock; /* lock to protect rx buffer */
struct spi_device *sdev;
- u8 rx[2] ____cacheline_aligned;
+ u8 rx[2] __aligned(IIO_DMA_MINALIGN);
};
static int ad2s90_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index 4fc654275155..b652d2b39bcf 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -204,11 +204,11 @@ struct ltc2983_data {
u8 num_channels;
u8 iio_channels;
/*
- * DMA (thus cache coherency maintenance) requires the
+ * DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
* Holds the converted temperature
*/
- __be32 temp ____cacheline_aligned;
+ __be32 temp __aligned(IIO_DMA_MINALIGN);
};
struct ltc2983_sensor {
@@ -1534,7 +1534,7 @@ static int ltc2983_probe(struct spi_device *spi)
return devm_iio_device_register(&spi->dev, indio_dev);
}
-static int __maybe_unused ltc2983_resume(struct device *dev)
+static int ltc2983_resume(struct device *dev)
{
struct ltc2983_data *st = spi_get_drvdata(to_spi_device(dev));
int dummy;
@@ -1545,14 +1545,15 @@ static int __maybe_unused ltc2983_resume(struct device *dev)
return ltc2983_setup(st, false);
}
-static int __maybe_unused ltc2983_suspend(struct device *dev)
+static int ltc2983_suspend(struct device *dev)
{
struct ltc2983_data *st = spi_get_drvdata(to_spi_device(dev));
return regmap_write(st->regmap, LTC2983_STATUS_REG, LTC2983_SLEEP);
}
-static SIMPLE_DEV_PM_OPS(ltc2983_pm_ops, ltc2983_suspend, ltc2983_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ltc2983_pm_ops, ltc2983_suspend,
+ ltc2983_resume);
static const struct spi_device_id ltc2983_id_table[] = {
{ "ltc2983" },
@@ -1570,7 +1571,7 @@ static struct spi_driver ltc2983_driver = {
.driver = {
.name = "ltc2983",
.of_match_table = ltc2983_of_match,
- .pm = &ltc2983_pm_ops,
+ .pm = pm_sleep_ptr(&ltc2983_pm_ops),
},
.probe = ltc2983_probe,
.id_table = ltc2983_id_table,
diff --git a/drivers/iio/temperature/max31865.c b/drivers/iio/temperature/max31865.c
index e3bb78184c6e..29e23652ba5a 100644
--- a/drivers/iio/temperature/max31865.c
+++ b/drivers/iio/temperature/max31865.c
@@ -55,7 +55,7 @@ struct max31865_data {
struct mutex lock;
bool filter_50hz;
bool three_wire;
- u8 buf[2] ____cacheline_aligned;
+ u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
static int max31865_read(struct max31865_data *data, u8 reg,
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 98c41cddc6f0..c28a7a6dea5f 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -122,7 +122,7 @@ struct maxim_thermocouple_data {
struct spi_device *spi;
const struct maxim_thermocouple_chip *chip;
- u8 buffer[16] ____cacheline_aligned;
+ u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
char tc_type;
};
diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig
index 4c66c3f18c34..0b6e4e278a2f 100644
--- a/drivers/iio/test/Kconfig
+++ b/drivers/iio/test/Kconfig
@@ -5,15 +5,25 @@
# Keep in alphabetical order
config IIO_RESCALE_KUNIT_TEST
- bool "Test IIO rescale conversion functions"
- depends on KUNIT=y && IIO_RESCALE=y
+ tristate "Test IIO rescale conversion functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT && IIO_RESCALE
default KUNIT_ALL_TESTS
help
- If you want to run tests on the iio-rescale code say Y here.
+ Build unit tests for the iio-rescale code.
- This takes advantage of ARCH=um to run tests and should be used by
- developers to tests their changes to the rescaling logic.
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
-config IIO_TEST_FORMAT
- bool "Test IIO formatting functions"
- depends on KUNIT=y
+ If unsure, say N.
+
+config IIO_FORMAT_KUNIT_TEST
+ tristate "Test IIO formatting functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ build unit tests for the IIO formatting functions.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile
index 880360f8d02c..d76eaf36da82 100644
--- a/drivers/iio/test/Makefile
+++ b/drivers/iio/test/Makefile
@@ -5,5 +5,5 @@
# Keep in alphabetical order
obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o
-obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
+obj-$(CONFIG_IIO_FORMAT_KUNIT_TEST) += iio-test-format.o
CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/iio/test/iio-test-format.c b/drivers/iio/test/iio-test-format.c
index 237321436b83..fc67e6b73df7 100644
--- a/drivers/iio/test/iio-test-format.c
+++ b/drivers/iio/test/iio-test-format.c
@@ -265,3 +265,7 @@ static struct kunit_suite iio_format_test_suite = {
.test_cases = iio_format_test_cases,
};
kunit_test_suite(iio_format_test_suite);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Test IIO formatting functions");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/test/iio-test-rescale.c b/drivers/iio/test/iio-test-rescale.c
index 0b6699bfd553..cc782ccff880 100644
--- a/drivers/iio/test/iio-test-rescale.c
+++ b/drivers/iio/test/iio-test-rescale.c
@@ -708,3 +708,8 @@ static struct kunit_suite iio_rescale_test_suite = {
.test_cases = iio_rescale_test_cases,
};
kunit_test_suite(iio_rescale_test_suite);
+
+MODULE_AUTHOR("Liam Beguin <liambeguin@gmail.com>");
+MODULE_DESCRIPTION("Test IIO rescale conversion functions");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_RESCALE);
diff --git a/drivers/iio/trigger/stm32-lptimer-trigger.c b/drivers/iio/trigger/stm32-lptimer-trigger.c
index 98cdc7e47f3d..2e447a3f047d 100644
--- a/drivers/iio/trigger/stm32-lptimer-trigger.c
+++ b/drivers/iio/trigger/stm32-lptimer-trigger.c
@@ -11,8 +11,10 @@
#include <linux/iio/timer/stm32-lptim-trigger.h>
#include <linux/mfd/stm32-lptimer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
/* List Low-Power Timer triggers */
static const char * const stm32_lptim_triggers[] = {
@@ -77,7 +79,7 @@ static int stm32_lptim_trigger_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- if (of_property_read_u32(pdev->dev.of_node, "reg", &index))
+ if (device_property_read_u32(&pdev->dev, "reg", &index))
return -EINVAL;
if (index >= ARRAY_SIZE(stm32_lptim_triggers))
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 33d3ce9c888e..aa36ac618e72 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -78,20 +78,21 @@ config INFINIBAND_VIRT_DMA
def_bool !HIGHMEM
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
-source "drivers/infiniband/hw/mthca/Kconfig"
-source "drivers/infiniband/hw/qib/Kconfig"
+source "drivers/infiniband/hw/bnxt_re/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
+source "drivers/infiniband/hw/erdma/Kconfig"
+source "drivers/infiniband/hw/hfi1/Kconfig"
+source "drivers/infiniband/hw/hns/Kconfig"
source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
+source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
-source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
-source "drivers/infiniband/hw/usnic/Kconfig"
-source "drivers/infiniband/hw/hns/Kconfig"
-source "drivers/infiniband/hw/bnxt_re/Kconfig"
-source "drivers/infiniband/hw/hfi1/Kconfig"
source "drivers/infiniband/hw/qedr/Kconfig"
+source "drivers/infiniband/hw/qib/Kconfig"
+source "drivers/infiniband/hw/usnic/Kconfig"
+source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/sw/siw/Kconfig"
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index fabca5e51e3d..46d06678dfbe 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -11,6 +11,7 @@
#include <linux/in6.h>
#include <linux/mutex.h>
#include <linux/random.h>
+#include <linux/rbtree.h>
#include <linux/igmp.h>
#include <linux/xarray.h>
#include <linux/inetdevice.h>
@@ -20,6 +21,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/netevent.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip_fib.h>
@@ -168,6 +170,9 @@ static struct ib_sa_client sa_client;
static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
+static struct rb_root id_table = RB_ROOT;
+/* Serialize operations of id_table tree */
+static DEFINE_SPINLOCK(id_table_lock);
static struct workqueue_struct *cma_wq;
static unsigned int cma_pernet_id;
@@ -202,6 +207,11 @@ struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
}
}
+struct id_table_entry {
+ struct list_head id_list;
+ struct rb_node rb_node;
+};
+
struct cma_device {
struct list_head list;
struct ib_device *device;
@@ -420,11 +430,21 @@ static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
return hdr->ip_version >> 4;
}
-static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
+static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
{
hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
}
+static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
+{
+ return (struct sockaddr *)&id_priv->id.route.addr.src_addr;
+}
+
+static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
+{
+ return (struct sockaddr *)&id_priv->id.route.addr.dst_addr;
+}
+
static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
{
struct in_device *in_dev = NULL;
@@ -445,6 +465,117 @@ static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
return (in_dev) ? 0 : -ENODEV;
}
+static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
+ struct id_table_entry *entry_b)
+{
+ struct rdma_id_private *id_priv = list_first_entry(
+ &entry_b->id_list, struct rdma_id_private, id_list_entry);
+ int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if;
+ struct sockaddr *sb = cma_dst_addr(id_priv);
+
+ if (ifindex_a != ifindex_b)
+ return (ifindex_a > ifindex_b) ? 1 : -1;
+
+ if (sa->sa_family != sb->sa_family)
+ return sa->sa_family - sb->sa_family;
+
+ if (sa->sa_family == AF_INET)
+ return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr,
+ (char *)&((struct sockaddr_in *)sb)->sin_addr,
+ sizeof(((struct sockaddr_in *)sa)->sin_addr));
+
+ return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
+ &((struct sockaddr_in6 *)sb)->sin6_addr);
+}
+
+static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
+{
+ struct rb_node **new, *parent = NULL;
+ struct id_table_entry *this, *node;
+ unsigned long flags;
+ int result;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ new = &id_table.rb_node;
+ while (*new) {
+ this = container_of(*new, struct id_table_entry, rb_node);
+ result = compare_netdev_and_ip(
+ node_id_priv->id.route.addr.dev_addr.bound_dev_if,
+ cma_dst_addr(node_id_priv), this);
+
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else if (result > 0)
+ new = &((*new)->rb_right);
+ else {
+ list_add_tail(&node_id_priv->id_list_entry,
+ &this->id_list);
+ kfree(node);
+ goto unlock;
+ }
+ }
+
+ INIT_LIST_HEAD(&node->id_list);
+ list_add_tail(&node_id_priv->id_list_entry, &node->id_list);
+
+ rb_link_node(&node->rb_node, parent, new);
+ rb_insert_color(&node->rb_node, &id_table);
+
+unlock:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+ return 0;
+}
+
+static struct id_table_entry *
+node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa)
+{
+ struct rb_node *node = root->rb_node;
+ struct id_table_entry *data;
+ int result;
+
+ while (node) {
+ data = container_of(node, struct id_table_entry, rb_node);
+ result = compare_netdev_and_ip(ifindex, sa, data);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return data;
+ }
+
+ return NULL;
+}
+
+static void cma_remove_id_from_tree(struct rdma_id_private *id_priv)
+{
+ struct id_table_entry *data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ if (list_empty(&id_priv->id_list_entry))
+ goto out;
+
+ data = node_from_ndev_ip(&id_table,
+ id_priv->id.route.addr.dev_addr.bound_dev_if,
+ cma_dst_addr(id_priv));
+ if (!data)
+ goto out;
+
+ list_del_init(&id_priv->id_list_entry);
+ if (list_empty(&data->id_list)) {
+ rb_erase(&data->rb_node, &id_table);
+ kfree(data);
+ }
+out:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+}
+
static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev)
{
@@ -481,16 +612,6 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
-static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
-{
- return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
-}
-
-static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
-{
- return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
-}
-
static inline unsigned short cma_family(struct rdma_id_private *id_priv)
{
return id_priv->id.route.addr.src_addr.ss_family;
@@ -861,6 +982,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
refcount_set(&id_priv->refcount, 1);
mutex_init(&id_priv->handler_mutex);
INIT_LIST_HEAD(&id_priv->device_item);
+ INIT_LIST_HEAD(&id_priv->id_list_entry);
INIT_LIST_HEAD(&id_priv->listen_list);
INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
@@ -1883,6 +2005,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
cma_cancel_operation(id_priv, state);
rdma_restrack_del(&id_priv->res);
+ cma_remove_id_from_tree(id_priv);
if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
@@ -3172,8 +3295,11 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
cma_id_get(id_priv);
if (rdma_cap_ib_sa(id->device, id->port_num))
ret = cma_resolve_ib_route(id_priv, timeout_ms);
- else if (rdma_protocol_roce(id->device, id->port_num))
+ else if (rdma_protocol_roce(id->device, id->port_num)) {
ret = cma_resolve_iboe_route(id_priv);
+ if (!ret)
+ cma_add_id_to_tree(id_priv);
+ }
else if (rdma_protocol_iwarp(id->device, id->port_num))
ret = cma_resolve_iw_route(id_priv);
else
@@ -4922,10 +5048,87 @@ out:
return ret;
}
+static void cma_netevent_work_handler(struct work_struct *_work)
+{
+ struct rdma_id_private *id_priv =
+ container_of(_work, struct rdma_id_private, id.net_work);
+ struct rdma_cm_event event = {};
+
+ mutex_lock(&id_priv->handler_mutex);
+
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
+ goto out_unlock;
+
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -ETIMEDOUT;
+
+ if (cma_cm_event_handler(id_priv, &event)) {
+ __acquire(&id_priv->handler_mutex);
+ id_priv->cm_id.ib = NULL;
+ cma_id_put(id_priv);
+ destroy_id_handler_unlock(id_priv);
+ return;
+ }
+
+out_unlock:
+ mutex_unlock(&id_priv->handler_mutex);
+ cma_id_put(id_priv);
+}
+
+static int cma_netevent_callback(struct notifier_block *self,
+ unsigned long event, void *ctx)
+{
+ struct id_table_entry *ips_node = NULL;
+ struct rdma_id_private *current_id;
+ struct neighbour *neigh = ctx;
+ unsigned long flags;
+
+ if (event != NETEVENT_NEIGH_UPDATE)
+ return NOTIFY_DONE;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ if (neigh->tbl->family == AF_INET6) {
+ struct sockaddr_in6 neigh_sock_6;
+
+ neigh_sock_6.sin6_family = AF_INET6;
+ neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key;
+ ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
+ (struct sockaddr *)&neigh_sock_6);
+ } else if (neigh->tbl->family == AF_INET) {
+ struct sockaddr_in neigh_sock_4;
+
+ neigh_sock_4.sin_family = AF_INET;
+ neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key);
+ ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
+ (struct sockaddr *)&neigh_sock_4);
+ } else
+ goto out;
+
+ if (!ips_node)
+ goto out;
+
+ list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) {
+ if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
+ neigh->ha, ETH_ALEN))
+ continue;
+ INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
+ cma_id_get(current_id);
+ queue_work(cma_wq, &current_id->id.net_work);
+ }
+out:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+ return NOTIFY_DONE;
+}
+
static struct notifier_block cma_nb = {
.notifier_call = cma_netdev_callback
};
+static struct notifier_block cma_netevent_cb = {
+ .notifier_call = cma_netevent_callback
+};
+
static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
{
struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
@@ -5148,6 +5351,7 @@ static int __init cma_init(void)
ib_sa_register_client(&sa_client);
register_netdevice_notifier(&cma_nb);
+ register_netevent_notifier(&cma_netevent_cb);
ret = ib_register_client(&cma_client);
if (ret)
@@ -5162,6 +5366,7 @@ static int __init cma_init(void)
err_ib:
ib_unregister_client(&cma_client);
err:
+ unregister_netevent_notifier(&cma_netevent_cb);
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
unregister_pernet_subsys(&cma_pernet_operations);
@@ -5174,6 +5379,7 @@ static void __exit cma_cleanup(void)
{
cma_configfs_exit();
ib_unregister_client(&cma_client);
+ unregister_netevent_notifier(&cma_netevent_cb);
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
unregister_pernet_subsys(&cma_pernet_operations);
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index 757a0ef79872..b7354c94cf1b 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -64,6 +64,7 @@ struct rdma_id_private {
struct list_head listen_item;
struct list_head listen_list;
};
+ struct list_head id_list_entry;
struct cma_device *cma_dev;
struct list_head mc_list;
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 94d83b665a2f..29b1ab1d5f93 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -68,7 +68,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj,
* In exclusive access mode, we check that the counter is zero (nobody
* claimed this object) and we set it to -1. Releasing a shared access
* lock is done simply by decreasing the counter. As for exclusive
- * access locks, since only a single one of them is is allowed
+ * access locks, since only a single one of them is allowed
* concurrently, setting the counter to zero is enough for releasing
* this lock.
*/
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 68197e576433..e958c43dd28f 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -250,7 +250,7 @@ static bool upper_device_filter(struct ib_device *ib_dev, u32 port,
/**
* is_upper_ndev_bond_master_filter - Check if a given netdevice
- * is bond master device of netdevice of the the RDMA device of port.
+ * is bond master device of netdevice of the RDMA device of port.
* @ib_dev: IB device to check
* @port: Port to consider for adding default GID
* @rdma_ndev: Pointer to rdma netdevice
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 4d98f931a13d..8367974b7998 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -274,33 +274,6 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return 1;
}
-static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
- u32 sg_cnt, enum dma_data_direction dir)
-{
- if (is_pci_p2pdma_page(sg_page(sg)))
- pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
- else
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
-}
-
-static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- int nents;
-
- if (is_pci_p2pdma_page(sg_page(sgt->sgl))) {
- if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
- return 0;
- nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl,
- sgt->orig_nents, dir);
- if (!nents)
- return -EIO;
- sgt->nents = nents;
- return 0;
- }
- return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0);
-}
-
/**
* rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
* @ctx: context to initialize
@@ -327,7 +300,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
};
int ret;
- ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
sg_cnt = sgt.nents;
@@ -366,7 +339,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
return ret;
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_init);
@@ -414,12 +387,12 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL;
}
- ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
if (prot_sg_cnt) {
- ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0);
if (ret)
goto out_unmap_sg;
}
@@ -486,9 +459,9 @@ out_free_ctx:
kfree(ctx->reg);
out_unmap_prot_sg:
if (prot_sgt.nents)
- rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0);
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
@@ -621,7 +594,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
break;
}
- rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
@@ -649,8 +622,8 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
kfree(ctx->reg);
if (prot_sg_cnt)
- rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
- rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index fce80a4a5147..04c04e6d24c3 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
struct scatterlist *sg;
unsigned long start, end, cur = 0;
unsigned int nmap = 0;
+ long ret;
int i;
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
@@ -67,9 +68,14 @@ wait_fence:
* may be not up-to-date. Wait for the exporter to finish
* the migration.
*/
- return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+ ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+ return 0;
}
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index fba0b3be903e..6b3a88046125 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
+obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 79401e6c6aa9..785c37cae3c0 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -173,7 +173,7 @@ struct bnxt_re_dev {
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
- /* QP for for handling QP1 packets */
+ /* QP for handling QP1 packets */
struct bnxt_re_gsi_context gsi_ctx;
struct bnxt_re_stats stats;
atomic_t nq_alloc_cnt;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index c16017f6e8db..14392c942f49 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CCTRL_ECN_V(1);
}
- skb_get(skb);
- rpl = cplhdr(skb);
if (!is_t4(adapter_type)) {
- BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
- skb_trim(skb, sizeof(*rpl5));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
- if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
+ rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+ rpl = (void *)rpl5;
+ INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
- rpl5 = (void *)rpl;
- memset_after(rpl5, 0, iss);
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
+ } else {
+ skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ rpl = __skb_put_zero(skb, sizeof(*rpl));
+ INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
}
rpl->opt0 = cpu_to_be64(opt0);
diff --git a/drivers/infiniband/hw/erdma/Kconfig b/drivers/infiniband/hw/erdma/Kconfig
new file mode 100644
index 000000000000..169038e3ceb1
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_ERDMA
+ tristate "Alibaba Elastic RDMA Adapter (ERDMA) support"
+ depends on PCI_MSI && 64BIT
+ depends on INFINIBAND_ADDR_TRANS
+ depends on INFINIBAND_USER_ACCESS
+ help
+ This is a RDMA/iWarp driver for Alibaba Elastic RDMA Adapter(ERDMA),
+ which supports RDMA features in Alibaba cloud environment.
+
+ To compile this driver as module, choose M here. The module will be
+ called erdma.
diff --git a/drivers/infiniband/hw/erdma/Makefile b/drivers/infiniband/hw/erdma/Makefile
new file mode 100644
index 000000000000..51d2ef91905a
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INFINIBAND_ERDMA) := erdma.o
+
+erdma-y := erdma_cm.o erdma_main.o erdma_cmdq.o erdma_cq.o erdma_verbs.o erdma_qp.o erdma_eq.o
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
new file mode 100644
index 000000000000..2aae635c1c8d
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_H__
+#define __ERDMA_H__
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+#include <linux/xarray.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma_hw.h"
+
+#define DRV_MODULE_NAME "erdma"
+#define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
+
+struct erdma_eq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+
+ u16 ci;
+ u16 rsvd;
+
+ atomic64_t event_num;
+ atomic64_t notify_num;
+
+ u64 __iomem *db_addr;
+ u64 *db_record;
+};
+
+struct erdma_cmdq_sq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+ u16 ci;
+ u16 pi;
+
+ u16 wqebb_cnt;
+
+ u64 *db_record;
+};
+
+struct erdma_cmdq_cq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+ u32 ci;
+ u32 cmdsn;
+
+ u64 *db_record;
+
+ atomic64_t armed_num;
+};
+
+enum {
+ ERDMA_CMD_STATUS_INIT,
+ ERDMA_CMD_STATUS_ISSUED,
+ ERDMA_CMD_STATUS_FINISHED,
+ ERDMA_CMD_STATUS_TIMEOUT
+};
+
+struct erdma_comp_wait {
+ struct completion wait_event;
+ u32 cmd_status;
+ u32 ctx_id;
+ u16 sq_pi;
+ u8 comp_status;
+ u8 rsvd;
+ u32 comp_data[4];
+};
+
+enum {
+ ERDMA_CMDQ_STATE_OK_BIT = 0,
+ ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
+ ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
+};
+
+#define ERDMA_CMDQ_TIMEOUT_MS 15000
+#define ERDMA_REG_ACCESS_WAIT_MS 20
+#define ERDMA_WAIT_DEV_DONE_CNT 500
+
+struct erdma_cmdq {
+ unsigned long *comp_wait_bitmap;
+ struct erdma_comp_wait *wait_pool;
+ spinlock_t lock;
+
+ bool use_event;
+
+ struct erdma_cmdq_sq sq;
+ struct erdma_cmdq_cq cq;
+ struct erdma_eq eq;
+
+ unsigned long state;
+
+ struct semaphore credits;
+ u16 max_outstandings;
+};
+
+#define COMPROMISE_CC ERDMA_CC_CUBIC
+enum erdma_cc_alg {
+ ERDMA_CC_NEWRENO = 0,
+ ERDMA_CC_CUBIC,
+ ERDMA_CC_HPCC_RTT,
+ ERDMA_CC_HPCC_ECN,
+ ERDMA_CC_HPCC_INT,
+ ERDMA_CC_METHODS_NUM
+};
+
+struct erdma_devattr {
+ u32 fw_version;
+
+ unsigned char peer_addr[ETH_ALEN];
+
+ int numa_node;
+ enum erdma_cc_alg cc;
+ u32 grp_num;
+ u32 irq_num;
+
+ bool disable_dwqe;
+ u16 dwqe_pages;
+ u16 dwqe_entries;
+
+ u32 max_qp;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_ord;
+ u32 max_ird;
+
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_sge_rd;
+ u32 max_cq;
+ u32 max_cqe;
+ u64 max_mr_size;
+ u32 max_mr;
+ u32 max_pd;
+ u32 max_mw;
+ u32 local_dma_key;
+};
+
+#define ERDMA_IRQNAME_SIZE 50
+
+struct erdma_irq {
+ char name[ERDMA_IRQNAME_SIZE];
+ u32 msix_vector;
+ cpumask_t affinity_hint_mask;
+};
+
+struct erdma_eq_cb {
+ bool ready;
+ void *dev; /* All EQs use this fields to get erdma_dev struct */
+ struct erdma_irq irq;
+ struct erdma_eq eq;
+ struct tasklet_struct tasklet;
+};
+
+struct erdma_resource_cb {
+ unsigned long *bitmap;
+ spinlock_t lock;
+ u32 next_alloc_idx;
+ u32 max_cap;
+};
+
+enum {
+ ERDMA_RES_TYPE_PD = 0,
+ ERDMA_RES_TYPE_STAG_IDX = 1,
+ ERDMA_RES_CNT = 2,
+};
+
+#define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
+#define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
+
+struct erdma_dev {
+ struct ib_device ibdev;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct notifier_block netdev_nb;
+
+ resource_size_t func_bar_addr;
+ resource_size_t func_bar_len;
+ u8 __iomem *func_bar;
+
+ struct erdma_devattr attrs;
+ /* physical port state (only one port per device) */
+ enum ib_port_state state;
+
+ /* cmdq and aeq use the same msix vector */
+ struct erdma_irq comm_irq;
+ struct erdma_cmdq cmdq;
+ struct erdma_eq aeq;
+ struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
+
+ spinlock_t lock;
+ struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
+ struct xarray qp_xa;
+ struct xarray cq_xa;
+
+ u32 next_alloc_qpn;
+ u32 next_alloc_cqn;
+
+ spinlock_t db_bitmap_lock;
+ /* We provide max 64 uContexts that each has one SQ doorbell Page. */
+ DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT);
+ /*
+ * We provide max 496 uContexts that each has one SQ normal Db,
+ * and one directWQE db。
+ */
+ DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT);
+
+ atomic_t num_ctx;
+ struct list_head cep_list;
+};
+
+static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
+{
+ idx &= (depth - 1);
+
+ return qbuf + (idx << shift);
+}
+
+static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct erdma_dev, ibdev);
+}
+
+static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
+{
+ return readl(dev->func_bar + reg);
+}
+
+static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
+{
+ return readq(dev->func_bar + reg);
+}
+
+static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
+{
+ writel(value, dev->func_bar + reg);
+}
+
+static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
+{
+ writeq(value, dev->func_bar + reg);
+}
+
+static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
+ u32 filed_mask)
+{
+ u32 val = erdma_reg_read32(dev, reg);
+
+ return FIELD_GET(filed_mask, val);
+}
+
+int erdma_cmdq_init(struct erdma_dev *dev);
+void erdma_finish_cmdq_init(struct erdma_dev *dev);
+void erdma_cmdq_destroy(struct erdma_dev *dev);
+
+void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+ u64 *resp0, u64 *resp1);
+void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
+
+int erdma_ceqs_init(struct erdma_dev *dev);
+void erdma_ceqs_uninit(struct erdma_dev *dev);
+void notify_eq(struct erdma_eq *eq);
+void *get_next_valid_eqe(struct erdma_eq *eq);
+
+int erdma_aeq_init(struct erdma_dev *dev);
+void erdma_aeq_destroy(struct erdma_dev *dev);
+
+void erdma_aeq_event_handler(struct erdma_dev *dev);
+void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
new file mode 100644
index 000000000000..f13f16479eca
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -0,0 +1,1430 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Fredy Neeser */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#include <linux/errno.h>
+#include <linux/inetdevice.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+static struct workqueue_struct *erdma_cm_wq;
+
+static void erdma_cm_llp_state_change(struct sock *sk);
+static void erdma_cm_llp_data_ready(struct sock *sk);
+static void erdma_cm_llp_error_report(struct sock *sk);
+
+static void erdma_sk_assign_cm_upcalls(struct sock *sk)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_state_change = erdma_cm_llp_state_change;
+ sk->sk_data_ready = erdma_cm_llp_data_ready;
+ sk->sk_error_report = erdma_cm_llp_error_report;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void erdma_sk_save_upcalls(struct sock *sk)
+{
+ struct erdma_cep *cep = sk_to_cep(sk);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ cep->sk_state_change = sk->sk_state_change;
+ cep->sk_data_ready = sk->sk_data_ready;
+ cep->sk_error_report = sk->sk_error_report;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void erdma_sk_restore_upcalls(struct sock *sk, struct erdma_cep *cep)
+{
+ sk->sk_state_change = cep->sk_state_change;
+ sk->sk_data_ready = cep->sk_data_ready;
+ sk->sk_error_report = cep->sk_error_report;
+ sk->sk_user_data = NULL;
+}
+
+static void erdma_socket_disassoc(struct socket *s)
+{
+ struct sock *sk = s->sk;
+ struct erdma_cep *cep;
+
+ if (sk) {
+ write_lock_bh(&sk->sk_callback_lock);
+ cep = sk_to_cep(sk);
+ if (cep) {
+ erdma_sk_restore_upcalls(sk, cep);
+ erdma_cep_put(cep);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void erdma_cep_socket_assoc(struct erdma_cep *cep, struct socket *s)
+{
+ cep->sock = s;
+ erdma_cep_get(cep);
+ s->sk->sk_user_data = cep;
+
+ erdma_sk_save_upcalls(s->sk);
+ erdma_sk_assign_cm_upcalls(s->sk);
+}
+
+static void erdma_disassoc_listen_cep(struct erdma_cep *cep)
+{
+ if (cep->listen_cep) {
+ erdma_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ }
+}
+
+static struct erdma_cep *erdma_cep_alloc(struct erdma_dev *dev)
+{
+ struct erdma_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL);
+ unsigned long flags;
+
+ if (!cep)
+ return NULL;
+
+ INIT_LIST_HEAD(&cep->listenq);
+ INIT_LIST_HEAD(&cep->devq);
+ INIT_LIST_HEAD(&cep->work_freelist);
+
+ kref_init(&cep->ref);
+ cep->state = ERDMA_EPSTATE_IDLE;
+ init_waitqueue_head(&cep->waitq);
+ spin_lock_init(&cep->lock);
+ cep->dev = dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cep->devq, &dev->cep_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return cep;
+}
+
+static void erdma_cm_free_work(struct erdma_cep *cep)
+{
+ struct list_head *w, *tmp;
+ struct erdma_cm_work *work;
+
+ list_for_each_safe(w, tmp, &cep->work_freelist) {
+ work = list_entry(w, struct erdma_cm_work, list);
+ list_del(&work->list);
+ kfree(work);
+ }
+}
+
+static void erdma_cancel_mpatimer(struct erdma_cep *cep)
+{
+ spin_lock_bh(&cep->lock);
+ if (cep->mpa_timer) {
+ if (cancel_delayed_work(&cep->mpa_timer->work)) {
+ erdma_cep_put(cep);
+ kfree(cep->mpa_timer);
+ }
+ cep->mpa_timer = NULL;
+ }
+ spin_unlock_bh(&cep->lock);
+}
+
+static void erdma_put_work(struct erdma_cm_work *work)
+{
+ INIT_LIST_HEAD(&work->list);
+ spin_lock_bh(&work->cep->lock);
+ list_add(&work->list, &work->cep->work_freelist);
+ spin_unlock_bh(&work->cep->lock);
+}
+
+static void erdma_cep_set_inuse(struct erdma_cep *cep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cep->lock, flags);
+ while (cep->in_use) {
+ spin_unlock_irqrestore(&cep->lock, flags);
+ wait_event_interruptible(cep->waitq, !cep->in_use);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ spin_lock_irqsave(&cep->lock, flags);
+ }
+
+ cep->in_use = 1;
+ spin_unlock_irqrestore(&cep->lock, flags);
+}
+
+static void erdma_cep_set_free(struct erdma_cep *cep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cep->lock, flags);
+ cep->in_use = 0;
+ spin_unlock_irqrestore(&cep->lock, flags);
+
+ wake_up(&cep->waitq);
+}
+
+static void __erdma_cep_dealloc(struct kref *ref)
+{
+ struct erdma_cep *cep = container_of(ref, struct erdma_cep, ref);
+ struct erdma_dev *dev = cep->dev;
+ unsigned long flags;
+
+ WARN_ON(cep->listen_cep);
+
+ kfree(cep->private_data);
+ kfree(cep->mpa.pdata);
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist))
+ erdma_cm_free_work(cep);
+ spin_unlock_bh(&cep->lock);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_del(&cep->devq);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ kfree(cep);
+}
+
+static struct erdma_cm_work *erdma_get_work(struct erdma_cep *cep)
+{
+ struct erdma_cm_work *work = NULL;
+
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist)) {
+ work = list_entry(cep->work_freelist.next, struct erdma_cm_work,
+ list);
+ list_del_init(&work->list);
+ }
+
+ spin_unlock_bh(&cep->lock);
+ return work;
+}
+
+static int erdma_cm_alloc_work(struct erdma_cep *cep, int num)
+{
+ struct erdma_cm_work *work;
+
+ while (num--) {
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ if (!(list_empty(&cep->work_freelist)))
+ erdma_cm_free_work(cep);
+ return -ENOMEM;
+ }
+ work->cep = cep;
+ INIT_LIST_HEAD(&work->list);
+ list_add(&work->list, &cep->work_freelist);
+ }
+
+ return 0;
+}
+
+static int erdma_cm_upcall(struct erdma_cep *cep, enum iw_cm_event_type reason,
+ int status)
+{
+ struct iw_cm_event event;
+ struct iw_cm_id *cm_id;
+
+ memset(&event, 0, sizeof(event));
+ event.status = status;
+ event.event = reason;
+
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
+ event.provider_data = cep;
+ cm_id = cep->listen_cep->cm_id;
+
+ event.ird = cep->dev->attrs.max_ird;
+ event.ord = cep->dev->attrs.max_ord;
+ } else {
+ cm_id = cep->cm_id;
+ }
+
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST ||
+ reason == IW_CM_EVENT_CONNECT_REPLY) {
+ u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len);
+
+ if (pd_len && cep->mpa.pdata) {
+ event.private_data_len = pd_len;
+ event.private_data = cep->mpa.pdata;
+ }
+
+ getname_local(cep->sock, &event.local_addr);
+ getname_peer(cep->sock, &event.remote_addr);
+ }
+
+ return cm_id->event_handler(cm_id, &event);
+}
+
+void erdma_qp_cm_drop(struct erdma_qp *qp)
+{
+ struct erdma_cep *cep = qp->cep;
+
+ if (!qp->cep)
+ return;
+
+ erdma_cep_set_inuse(cep);
+
+ /* already closed. */
+ if (cep->state == ERDMA_EPSTATE_CLOSED)
+ goto out;
+
+ if (cep->cm_id) {
+ switch (cep->state) {
+ case ERDMA_EPSTATE_AWAIT_MPAREP:
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -EINVAL);
+ break;
+ case ERDMA_EPSTATE_RDMA_MODE:
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ break;
+ case ERDMA_EPSTATE_IDLE:
+ case ERDMA_EPSTATE_LISTENING:
+ case ERDMA_EPSTATE_CONNECTING:
+ case ERDMA_EPSTATE_AWAIT_MPAREQ:
+ case ERDMA_EPSTATE_RECVD_MPAREQ:
+ case ERDMA_EPSTATE_CLOSED:
+ default:
+ break;
+ }
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ erdma_cep_put(cep);
+ }
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+
+ if (cep->qp) {
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+ }
+out:
+ erdma_cep_set_free(cep);
+}
+
+void erdma_cep_put(struct erdma_cep *cep)
+{
+ WARN_ON(kref_read(&cep->ref) < 1);
+ kref_put(&cep->ref, __erdma_cep_dealloc);
+}
+
+void erdma_cep_get(struct erdma_cep *cep)
+{
+ kref_get(&cep->ref);
+}
+
+static int erdma_send_mpareqrep(struct erdma_cep *cep, const void *pdata,
+ u8 pd_len)
+{
+ struct socket *s = cep->sock;
+ struct mpa_rr *rr = &cep->mpa.hdr;
+ struct kvec iov[3];
+ struct msghdr msg;
+ int iovec_num = 0;
+ int ret;
+ int mpa_len;
+
+ memset(&msg, 0, sizeof(msg));
+
+ rr->params.pd_len = cpu_to_be16(pd_len);
+
+ iov[iovec_num].iov_base = rr;
+ iov[iovec_num].iov_len = sizeof(*rr);
+ iovec_num++;
+ mpa_len = sizeof(*rr);
+
+ iov[iovec_num].iov_base = &cep->mpa.ext_data;
+ iov[iovec_num].iov_len = sizeof(cep->mpa.ext_data);
+ iovec_num++;
+ mpa_len += sizeof(cep->mpa.ext_data);
+
+ if (pd_len) {
+ iov[iovec_num].iov_base = (char *)pdata;
+ iov[iovec_num].iov_len = pd_len;
+ mpa_len += pd_len;
+ iovec_num++;
+ }
+
+ ret = kernel_sendmsg(s, &msg, iov, iovec_num, mpa_len);
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline int ksock_recv(struct socket *sock, char *buf, size_t size,
+ int flags)
+{
+ struct kvec iov = { buf, size };
+ struct msghdr msg = { .msg_name = NULL, .msg_flags = flags };
+
+ return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
+}
+
+static int __recv_mpa_hdr(struct erdma_cep *cep, int hdr_rcvd, char *hdr,
+ int hdr_size, int *rcvd_out)
+{
+ struct socket *s = cep->sock;
+ int rcvd;
+
+ *rcvd_out = 0;
+ if (hdr_rcvd < hdr_size) {
+ rcvd = ksock_recv(s, hdr + hdr_rcvd, hdr_size - hdr_rcvd,
+ MSG_DONTWAIT);
+ if (rcvd == -EAGAIN)
+ return -EAGAIN;
+
+ if (rcvd <= 0)
+ return -ECONNABORTED;
+
+ hdr_rcvd += rcvd;
+ *rcvd_out = rcvd;
+
+ if (hdr_rcvd < hdr_size)
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void __mpa_rr_set_revision(__be16 *bits, u8 rev)
+{
+ *bits = (*bits & ~MPA_RR_MASK_REVISION) |
+ (cpu_to_be16(rev) & MPA_RR_MASK_REVISION);
+}
+
+static u8 __mpa_rr_revision(__be16 mpa_rr_bits)
+{
+ __be16 rev = mpa_rr_bits & MPA_RR_MASK_REVISION;
+
+ return (u8)be16_to_cpu(rev);
+}
+
+static void __mpa_ext_set_cc(__be32 *bits, u32 cc)
+{
+ *bits = (*bits & ~MPA_EXT_FLAG_CC) |
+ (cpu_to_be32(cc) & MPA_EXT_FLAG_CC);
+}
+
+static u8 __mpa_ext_cc(__be32 mpa_ext_bits)
+{
+ __be32 cc = mpa_ext_bits & MPA_EXT_FLAG_CC;
+
+ return (u8)be32_to_cpu(cc);
+}
+
+/*
+ * Receive MPA Request/Reply header.
+ *
+ * Returns 0 if complete MPA Request/Reply haeder including
+ * eventual private data was received. Returns -EAGAIN if
+ * header was partially received or negative error code otherwise.
+ *
+ * Context: May be called in process context only
+ */
+static int erdma_recv_mpa_rr(struct erdma_cep *cep)
+{
+ struct mpa_rr *hdr = &cep->mpa.hdr;
+ struct socket *s = cep->sock;
+ u16 pd_len;
+ int rcvd, to_rcv, ret, pd_rcvd;
+
+ if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) {
+ ret = __recv_mpa_hdr(cep, cep->mpa.bytes_rcvd,
+ (char *)&cep->mpa.hdr,
+ sizeof(struct mpa_rr), &rcvd);
+ cep->mpa.bytes_rcvd += rcvd;
+ if (ret)
+ return ret;
+ }
+
+ if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA ||
+ __mpa_rr_revision(hdr->params.bits) != MPA_REVISION_EXT_1)
+ return -EPROTO;
+
+ if (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr) <
+ sizeof(struct erdma_mpa_ext)) {
+ ret = __recv_mpa_hdr(
+ cep, cep->mpa.bytes_rcvd - sizeof(struct mpa_rr),
+ (char *)&cep->mpa.ext_data,
+ sizeof(struct erdma_mpa_ext), &rcvd);
+ cep->mpa.bytes_rcvd += rcvd;
+ if (ret)
+ return ret;
+ }
+
+ pd_len = be16_to_cpu(hdr->params.pd_len);
+ pd_rcvd = cep->mpa.bytes_rcvd - sizeof(struct mpa_rr) -
+ sizeof(struct erdma_mpa_ext);
+ to_rcv = pd_len - pd_rcvd;
+
+ if (!to_rcv) {
+ /*
+ * We have received the whole MPA Request/Reply message.
+ * Check against peer protocol violation.
+ */
+ u32 word;
+
+ ret = __recv_mpa_hdr(cep, 0, (char *)&word, sizeof(word),
+ &rcvd);
+ if (ret == -EAGAIN && rcvd == 0)
+ return 0;
+
+ if (ret)
+ return ret;
+
+ return -EPROTO;
+ }
+
+ /*
+ * At this point, MPA header has been fully received, and pd_len != 0.
+ * So, begin to receive private data.
+ */
+ if (!cep->mpa.pdata) {
+ cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL);
+ if (!cep->mpa.pdata)
+ return -ENOMEM;
+ }
+
+ rcvd = ksock_recv(s, cep->mpa.pdata + pd_rcvd, to_rcv + 4,
+ MSG_DONTWAIT);
+ if (rcvd < 0)
+ return rcvd;
+
+ if (rcvd > to_rcv)
+ return -EPROTO;
+
+ cep->mpa.bytes_rcvd += rcvd;
+
+ if (to_rcv == rcvd)
+ return 0;
+
+ return -EAGAIN;
+}
+
+/*
+ * erdma_proc_mpareq()
+ *
+ * Read MPA Request from socket and signal new connection to IWCM
+ * if success. Caller must hold lock on corresponding listening CEP.
+ */
+static int erdma_proc_mpareq(struct erdma_cep *cep)
+{
+ struct mpa_rr *req;
+ int ret;
+
+ ret = erdma_recv_mpa_rr(cep);
+ if (ret)
+ return ret;
+
+ req = &cep->mpa.hdr;
+
+ if (memcmp(req->key, MPA_KEY_REQ, MPA_KEY_SIZE))
+ return -EPROTO;
+
+ memcpy(req->key, MPA_KEY_REP, MPA_KEY_SIZE);
+
+ /* Currently does not support marker and crc. */
+ if (req->params.bits & MPA_RR_FLAG_MARKERS ||
+ req->params.bits & MPA_RR_FLAG_CRC)
+ goto reject_conn;
+
+ cep->state = ERDMA_EPSTATE_RECVD_MPAREQ;
+
+ /* Keep reference until IWCM accepts/rejects */
+ erdma_cep_get(cep);
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0);
+ if (ret)
+ erdma_cep_put(cep);
+
+ return ret;
+
+reject_conn:
+ req->params.bits &= ~MPA_RR_FLAG_MARKERS;
+ req->params.bits |= MPA_RR_FLAG_REJECT;
+ req->params.bits &= ~MPA_RR_FLAG_CRC;
+
+ kfree(cep->mpa.pdata);
+ cep->mpa.pdata = NULL;
+ erdma_send_mpareqrep(cep, NULL, 0);
+
+ return -EOPNOTSUPP;
+}
+
+static int erdma_proc_mpareply(struct erdma_cep *cep)
+{
+ struct erdma_qp_attrs qp_attrs;
+ struct erdma_qp *qp = cep->qp;
+ struct mpa_rr *rep;
+ int ret;
+
+ ret = erdma_recv_mpa_rr(cep);
+ if (ret)
+ goto out_err;
+
+ erdma_cancel_mpatimer(cep);
+
+ rep = &cep->mpa.hdr;
+
+ if (memcmp(rep->key, MPA_KEY_REP, MPA_KEY_SIZE)) {
+ ret = -EPROTO;
+ goto out_err;
+ }
+
+ if (rep->params.bits & MPA_RR_FLAG_REJECT) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET);
+ return -ECONNRESET;
+ }
+
+ /* Currently does not support marker and crc. */
+ if ((rep->params.bits & MPA_RR_FLAG_MARKERS) ||
+ (rep->params.bits & MPA_RR_FLAG_CRC)) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
+ return -EINVAL;
+ }
+
+ memset(&qp_attrs, 0, sizeof(qp_attrs));
+ qp_attrs.irq_size = cep->ird;
+ qp_attrs.orq_size = cep->ord;
+ qp_attrs.state = ERDMA_QP_STATE_RTS;
+
+ down_write(&qp->state_lock);
+ if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto out_err;
+ }
+
+ qp->attrs.qp_type = ERDMA_QP_ACTIVE;
+ if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc)
+ qp->attrs.cc = COMPROMISE_CC;
+
+ ret = erdma_modify_qp_internal(qp, &qp_attrs,
+ ERDMA_QP_ATTR_STATE |
+ ERDMA_QP_ATTR_LLP_HANDLE |
+ ERDMA_QP_ATTR_MPA);
+
+ up_write(&qp->state_lock);
+
+ if (!ret) {
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0);
+ if (!ret)
+ cep->state = ERDMA_EPSTATE_RDMA_MODE;
+
+ return 0;
+ }
+
+out_err:
+ if (ret != -EAGAIN)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+
+ return ret;
+}
+
+static void erdma_accept_newconn(struct erdma_cep *cep)
+{
+ struct socket *s = cep->sock;
+ struct socket *new_s = NULL;
+ struct erdma_cep *new_cep = NULL;
+ int ret = 0;
+
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ goto error;
+
+ new_cep = erdma_cep_alloc(cep->dev);
+ if (!new_cep)
+ goto error;
+
+ /*
+ * 4: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout.
+ */
+ if (erdma_cm_alloc_work(new_cep, 4) != 0)
+ goto error;
+
+ /*
+ * Copy saved socket callbacks from listening CEP
+ * and assign new socket with new CEP
+ */
+ new_cep->sk_state_change = cep->sk_state_change;
+ new_cep->sk_data_ready = cep->sk_data_ready;
+ new_cep->sk_error_report = cep->sk_error_report;
+
+ ret = kernel_accept(s, &new_s, O_NONBLOCK);
+ if (ret != 0)
+ goto error;
+
+ new_cep->sock = new_s;
+ erdma_cep_get(new_cep);
+ new_s->sk->sk_user_data = new_cep;
+
+ tcp_sock_set_nodelay(new_s->sk);
+ new_cep->state = ERDMA_EPSTATE_AWAIT_MPAREQ;
+
+ ret = erdma_cm_queue_work(new_cep, ERDMA_CM_WORK_MPATIMEOUT);
+ if (ret)
+ goto error;
+
+ new_cep->listen_cep = cep;
+ erdma_cep_get(cep);
+
+ if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
+ /* MPA REQ already queued */
+ erdma_cep_set_inuse(new_cep);
+ ret = erdma_proc_mpareq(new_cep);
+ if (ret != -EAGAIN) {
+ erdma_cep_put(cep);
+ new_cep->listen_cep = NULL;
+ if (ret) {
+ erdma_cep_set_free(new_cep);
+ goto error;
+ }
+ }
+ erdma_cep_set_free(new_cep);
+ }
+ return;
+
+error:
+ if (new_cep) {
+ new_cep->state = ERDMA_EPSTATE_CLOSED;
+ erdma_cancel_mpatimer(new_cep);
+
+ erdma_cep_put(new_cep);
+ new_cep->sock = NULL;
+ }
+
+ if (new_s) {
+ erdma_socket_disassoc(new_s);
+ sock_release(new_s);
+ }
+}
+
+static int erdma_newconn_connected(struct erdma_cep *cep)
+{
+ int ret = 0;
+
+ cep->mpa.hdr.params.bits = 0;
+ __mpa_rr_set_revision(&cep->mpa.hdr.params.bits, MPA_REVISION_EXT_1);
+
+ memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, MPA_KEY_SIZE);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+ __mpa_ext_set_cc(&cep->mpa.ext_data.bits, cep->qp->attrs.cc);
+
+ ret = erdma_send_mpareqrep(cep, cep->private_data, cep->pd_len);
+ cep->state = ERDMA_EPSTATE_AWAIT_MPAREP;
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (ret >= 0)
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_MPATIMEOUT);
+
+ return ret;
+}
+
+static void erdma_cm_work_handler(struct work_struct *w)
+{
+ struct erdma_cm_work *work;
+ struct erdma_cep *cep;
+ int release_cep = 0, ret = 0;
+
+ work = container_of(w, struct erdma_cm_work, work.work);
+ cep = work->cep;
+
+ erdma_cep_set_inuse(cep);
+
+ switch (work->type) {
+ case ERDMA_CM_WORK_CONNECTED:
+ erdma_cancel_mpatimer(cep);
+ if (cep->state == ERDMA_EPSTATE_CONNECTING) {
+ ret = erdma_newconn_connected(cep);
+ if (ret) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -EIO);
+ release_cep = 1;
+ }
+ }
+ break;
+ case ERDMA_CM_WORK_CONNECTTIMEOUT:
+ if (cep->state == ERDMA_EPSTATE_CONNECTING) {
+ cep->mpa_timer = NULL;
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ETIMEDOUT);
+ release_cep = 1;
+ }
+ break;
+ case ERDMA_CM_WORK_ACCEPT:
+ erdma_accept_newconn(cep);
+ break;
+ case ERDMA_CM_WORK_READ_MPAHDR:
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ if (cep->listen_cep) {
+ erdma_cep_set_inuse(cep->listen_cep);
+
+ if (cep->listen_cep->state ==
+ ERDMA_EPSTATE_LISTENING)
+ ret = erdma_proc_mpareq(cep);
+ else
+ ret = -EFAULT;
+
+ erdma_cep_set_free(cep->listen_cep);
+
+ if (ret != -EAGAIN) {
+ erdma_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ if (ret)
+ erdma_cep_put(cep);
+ }
+ }
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ ret = erdma_proc_mpareply(cep);
+ }
+
+ if (ret && ret != -EAGAIN)
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_CLOSE_LLP:
+ if (cep->cm_id)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_PEER_CLOSE:
+ if (cep->cm_id) {
+ if (cep->state == ERDMA_EPSTATE_CONNECTING ||
+ cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA reply not received, but connection drop
+ */
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ } else if (cep->state == ERDMA_EPSTATE_RDMA_MODE) {
+ /*
+ * NOTE: IW_CM_EVENT_DISCONNECT is given just
+ * to transition IWCM into CLOSING.
+ */
+ erdma_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0);
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ }
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ /* Socket close before MPA request received. */
+ erdma_disassoc_listen_cep(cep);
+ erdma_cep_put(cep);
+ }
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_MPATIMEOUT:
+ cep->mpa_timer = NULL;
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA request timed out:
+ * Hide any partially received private data and signal
+ * timeout
+ */
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (cep->cm_id)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ETIMEDOUT);
+ release_cep = 1;
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ /* No MPA req received after peer TCP stream setup. */
+ erdma_disassoc_listen_cep(cep);
+
+ erdma_cep_put(cep);
+ release_cep = 1;
+ }
+ break;
+ default:
+ WARN(1, "Undefined CM work type: %d\n", work->type);
+ }
+
+ if (release_cep) {
+ erdma_cancel_mpatimer(cep);
+ cep->state = ERDMA_EPSTATE_CLOSED;
+ if (cep->qp) {
+ struct erdma_qp *qp = cep->qp;
+ /*
+ * Serialize a potential race with application
+ * closing the QP and calling erdma_qp_cm_drop()
+ */
+ erdma_qp_get(qp);
+ erdma_cep_set_free(cep);
+
+ erdma_qp_llp_close(qp);
+ erdma_qp_put(qp);
+
+ erdma_cep_set_inuse(cep);
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+ }
+
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ erdma_cep_put(cep);
+ }
+ }
+ erdma_cep_set_free(cep);
+ erdma_put_work(work);
+ erdma_cep_put(cep);
+}
+
+int erdma_cm_queue_work(struct erdma_cep *cep, enum erdma_work_type type)
+{
+ struct erdma_cm_work *work = erdma_get_work(cep);
+ unsigned long delay = 0;
+
+ if (!work)
+ return -ENOMEM;
+
+ work->type = type;
+ work->cep = cep;
+
+ erdma_cep_get(cep);
+
+ INIT_DELAYED_WORK(&work->work, erdma_cm_work_handler);
+
+ if (type == ERDMA_CM_WORK_MPATIMEOUT) {
+ cep->mpa_timer = work;
+
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP)
+ delay = MPAREP_TIMEOUT;
+ else
+ delay = MPAREQ_TIMEOUT;
+ } else if (type == ERDMA_CM_WORK_CONNECTTIMEOUT) {
+ cep->mpa_timer = work;
+
+ delay = CONNECT_TIMEOUT;
+ }
+
+ queue_delayed_work(erdma_cm_wq, &work->work, delay);
+
+ return 0;
+}
+
+static void erdma_cm_llp_data_ready(struct sock *sk)
+{
+ struct erdma_cep *cep;
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep)
+ goto out;
+
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ ||
+ cep->state == ERDMA_EPSTATE_AWAIT_MPAREP)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_READ_MPAHDR);
+
+out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void erdma_cm_llp_error_report(struct sock *sk)
+{
+ struct erdma_cep *cep = sk_to_cep(sk);
+
+ if (cep)
+ cep->sk_error_report(sk);
+}
+
+static void erdma_cm_llp_state_change(struct sock *sk)
+{
+ struct erdma_cep *cep;
+ void (*orig_state_change)(struct sock *sk);
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep) {
+ read_unlock(&sk->sk_callback_lock);
+ return;
+ }
+ orig_state_change = cep->sk_state_change;
+
+ switch (sk->sk_state) {
+ case TCP_ESTABLISHED:
+ if (cep->state == ERDMA_EPSTATE_CONNECTING)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTED);
+ else
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_ACCEPT);
+ break;
+ case TCP_CLOSE:
+ case TCP_CLOSE_WAIT:
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_PEER_CLOSE);
+ break;
+ default:
+ break;
+ }
+ read_unlock(&sk->sk_callback_lock);
+ orig_state_change(sk);
+}
+
+static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
+ int laddrlen, struct sockaddr *raddr,
+ int raddrlen, int flags)
+{
+ int ret;
+
+ sock_set_reuseaddr(s->sk);
+ ret = s->ops->bind(s, laddr, laddrlen);
+ if (ret)
+ return ret;
+ ret = s->ops->connect(s, raddr, raddrlen, flags);
+ return ret < 0 ? ret : 0;
+}
+
+int erdma_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct erdma_dev *dev = to_edev(id->device);
+ struct erdma_qp *qp;
+ struct erdma_cep *cep = NULL;
+ struct socket *s = NULL;
+ struct sockaddr *laddr = (struct sockaddr *)&id->m_local_addr;
+ struct sockaddr *raddr = (struct sockaddr *)&id->m_remote_addr;
+ u16 pd_len = params->private_data_len;
+ int ret;
+
+ if (pd_len > MPA_MAX_PRIVDATA)
+ return -EINVAL;
+
+ if (params->ird > dev->attrs.max_ird ||
+ params->ord > dev->attrs.max_ord)
+ return -EINVAL;
+
+ if (laddr->sa_family != AF_INET || raddr->sa_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ qp = find_qp_by_qpn(dev, params->qpn);
+ if (!qp)
+ return -ENOENT;
+ erdma_qp_get(qp);
+
+ ret = sock_create(AF_INET, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (ret < 0)
+ goto error_put_qp;
+
+ cep = erdma_cep_alloc(dev);
+ if (!cep) {
+ ret = -ENOMEM;
+ goto error_release_sock;
+ }
+
+ erdma_cep_set_inuse(cep);
+
+ /* Associate QP with CEP */
+ erdma_cep_get(cep);
+ qp->cep = cep;
+ cep->qp = qp;
+
+ /* Associate cm_id with CEP */
+ id->add_ref(id);
+ cep->cm_id = id;
+
+ /*
+ * 6: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout, connected event
+ * and connect timeout.
+ */
+ ret = erdma_cm_alloc_work(cep, 6);
+ if (ret != 0) {
+ ret = -ENOMEM;
+ goto error_release_cep;
+ }
+
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+ cep->state = ERDMA_EPSTATE_CONNECTING;
+
+ erdma_cep_socket_assoc(cep, s);
+
+ if (pd_len) {
+ cep->pd_len = pd_len;
+ cep->private_data = kmalloc(pd_len, GFP_KERNEL);
+ if (!cep->private_data) {
+ ret = -ENOMEM;
+ goto error_disassoc;
+ }
+
+ memcpy(cep->private_data, params->private_data,
+ params->private_data_len);
+ }
+
+ ret = kernel_bindconnect(s, laddr, sizeof(*laddr), raddr,
+ sizeof(*raddr), O_NONBLOCK);
+ if (ret != -EINPROGRESS && ret != 0) {
+ goto error_disassoc;
+ } else if (ret == 0) {
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTED);
+ if (ret)
+ goto error_disassoc;
+ } else {
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTTIMEOUT);
+ if (ret)
+ goto error_disassoc;
+ }
+
+ erdma_cep_set_free(cep);
+ return 0;
+
+error_disassoc:
+ kfree(cep->private_data);
+ cep->private_data = NULL;
+ cep->pd_len = 0;
+
+ erdma_socket_disassoc(s);
+
+error_release_cep:
+ /* disassoc with cm_id */
+ cep->cm_id = NULL;
+ id->rem_ref(id);
+
+ /* disassoc with qp */
+ qp->cep = NULL;
+ erdma_cep_put(cep);
+ cep->qp = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+
+ /* release the cep. */
+ erdma_cep_put(cep);
+
+error_release_sock:
+ if (s)
+ sock_release(s);
+error_put_qp:
+ erdma_qp_put(qp);
+
+ return ret;
+}
+
+int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct erdma_dev *dev = to_edev(id->device);
+ struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+ struct erdma_qp *qp;
+ struct erdma_qp_attrs qp_attrs;
+ int ret;
+
+ erdma_cep_set_inuse(cep);
+ erdma_cep_put(cep);
+
+ /* Free lingering inbound private data */
+ if (cep->mpa.hdr.params.pd_len) {
+ cep->mpa.hdr.params.pd_len = 0;
+ kfree(cep->mpa.pdata);
+ cep->mpa.pdata = NULL;
+ }
+ erdma_cancel_mpatimer(cep);
+
+ if (cep->state != ERDMA_EPSTATE_RECVD_MPAREQ) {
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return -ECONNRESET;
+ }
+
+ qp = find_qp_by_qpn(dev, params->qpn);
+ if (!qp)
+ return -ENOENT;
+ erdma_qp_get(qp);
+
+ down_write(&qp->state_lock);
+ if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ if (params->ord > dev->attrs.max_ord ||
+ params->ird > dev->attrs.max_ord) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ if (params->private_data_len > MPA_MAX_PRIVDATA) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ memset(&qp_attrs, 0, sizeof(qp_attrs));
+ qp_attrs.orq_size = params->ord;
+ qp_attrs.irq_size = params->ird;
+
+ qp_attrs.state = ERDMA_QP_STATE_RTS;
+
+ /* Associate QP with CEP */
+ erdma_cep_get(cep);
+ qp->cep = cep;
+ cep->qp = qp;
+
+ cep->state = ERDMA_EPSTATE_RDMA_MODE;
+
+ qp->attrs.qp_type = ERDMA_QP_PASSIVE;
+ qp->attrs.pd_len = params->private_data_len;
+
+ if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits))
+ qp->attrs.cc = COMPROMISE_CC;
+
+ /* move to rts */
+ ret = erdma_modify_qp_internal(qp, &qp_attrs,
+ ERDMA_QP_ATTR_STATE |
+ ERDMA_QP_ATTR_ORD |
+ ERDMA_QP_ATTR_LLP_HANDLE |
+ ERDMA_QP_ATTR_IRD |
+ ERDMA_QP_ATTR_MPA);
+ up_write(&qp->state_lock);
+
+ if (ret)
+ goto error;
+
+ cep->mpa.ext_data.bits = 0;
+ __mpa_ext_set_cc(&cep->mpa.ext_data.bits, qp->attrs.cc);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+
+ ret = erdma_send_mpareqrep(cep, params->private_data,
+ params->private_data_len);
+ if (!ret) {
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
+ if (ret)
+ goto error;
+
+ erdma_cep_set_free(cep);
+
+ return 0;
+ }
+
+error:
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(id);
+ cep->cm_id = NULL;
+ }
+
+ if (qp->cep) {
+ erdma_cep_put(cep);
+ qp->cep = NULL;
+ }
+
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return ret;
+}
+
+int erdma_reject(struct iw_cm_id *id, const void *pdata, u8 plen)
+{
+ struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+
+ erdma_cep_set_inuse(cep);
+ erdma_cep_put(cep);
+
+ erdma_cancel_mpatimer(cep);
+
+ if (cep->state != ERDMA_EPSTATE_RECVD_MPAREQ) {
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return -ECONNRESET;
+ }
+
+ if (__mpa_rr_revision(cep->mpa.hdr.params.bits) == MPA_REVISION_EXT_1) {
+ cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
+ erdma_send_mpareqrep(cep, pdata, plen);
+ }
+
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return 0;
+}
+
+int erdma_create_listen(struct iw_cm_id *id, int backlog)
+{
+ struct socket *s;
+ struct erdma_cep *cep = NULL;
+ int ret = 0;
+ struct erdma_dev *dev = to_edev(id->device);
+ int addr_family = id->local_addr.ss_family;
+ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
+
+ if (addr_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ ret = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (ret < 0)
+ return ret;
+
+ sock_set_reuseaddr(s->sk);
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
+ s->sk->sk_bound_dev_if = dev->netdev->ifindex;
+
+ ret = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in));
+ if (ret)
+ goto error;
+
+ cep = erdma_cep_alloc(dev);
+ if (!cep) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ erdma_cep_socket_assoc(cep, s);
+
+ ret = erdma_cm_alloc_work(cep, backlog);
+ if (ret)
+ goto error;
+
+ ret = s->ops->listen(s, backlog);
+ if (ret)
+ goto error;
+
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ if (!id->provider_data) {
+ id->provider_data =
+ kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!id->provider_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ INIT_LIST_HEAD((struct list_head *)id->provider_data);
+ }
+
+ list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ cep->state = ERDMA_EPSTATE_LISTENING;
+
+ return 0;
+
+error:
+ if (cep) {
+ erdma_cep_set_inuse(cep);
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ }
+ cep->sock = NULL;
+ erdma_socket_disassoc(s);
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+ }
+ sock_release(s);
+
+ return ret;
+}
+
+static void erdma_drop_listeners(struct iw_cm_id *id)
+{
+ struct list_head *p, *tmp;
+ /*
+ * In case of a wildcard rdma_listen on a multi-homed device,
+ * a listener's IWCM id is associated with more than one listening CEP.
+ */
+ list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
+ struct erdma_cep *cep =
+ list_entry(p, struct erdma_cep, listenq);
+
+ list_del(p);
+
+ erdma_cep_set_inuse(cep);
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ }
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+ cep->state = ERDMA_EPSTATE_CLOSED;
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+ }
+}
+
+int erdma_destroy_listen(struct iw_cm_id *id)
+{
+ if (!id->provider_data)
+ return 0;
+
+ erdma_drop_listeners(id);
+ kfree(id->provider_data);
+ id->provider_data = NULL;
+
+ return 0;
+}
+
+int erdma_cm_init(void)
+{
+ erdma_cm_wq = create_singlethread_workqueue("erdma_cm_wq");
+ if (!erdma_cm_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void erdma_cm_exit(void)
+{
+ if (erdma_cm_wq)
+ destroy_workqueue(erdma_cm_wq);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.h b/drivers/infiniband/hw/erdma/erdma_cm.h
new file mode 100644
index 000000000000..8a3f998fec9b
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cm.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#ifndef __ERDMA_CM_H__
+#define __ERDMA_CM_H__
+
+#include <linux/tcp.h>
+#include <net/sock.h>
+#include <rdma/iw_cm.h>
+
+/* iWarp MPA protocol defs */
+#define MPA_REVISION_EXT_1 129
+#define MPA_MAX_PRIVDATA RDMA_MAX_PRIVATE_DATA
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+#define MPA_KEY_SIZE 16
+#define MPA_DEFAULT_HDR_LEN 28
+
+struct mpa_rr_params {
+ __be16 bits;
+ __be16 pd_len;
+};
+
+/*
+ * MPA request/response Hdr bits & fields
+ */
+enum {
+ MPA_RR_FLAG_MARKERS = __cpu_to_be16(0x8000),
+ MPA_RR_FLAG_CRC = __cpu_to_be16(0x4000),
+ MPA_RR_FLAG_REJECT = __cpu_to_be16(0x2000),
+ MPA_RR_RESERVED = __cpu_to_be16(0x1f00),
+ MPA_RR_MASK_REVISION = __cpu_to_be16(0x00ff)
+};
+
+/*
+ * MPA request/reply header
+ */
+struct mpa_rr {
+ u8 key[16];
+ struct mpa_rr_params params;
+};
+
+struct erdma_mpa_ext {
+ __be32 cookie;
+ __be32 bits;
+};
+
+enum {
+ MPA_EXT_FLAG_CC = cpu_to_be32(0x0000000f),
+};
+
+struct erdma_mpa_info {
+ struct mpa_rr hdr; /* peer mpa hdr in host byte order */
+ struct erdma_mpa_ext ext_data;
+ char *pdata;
+ int bytes_rcvd;
+};
+
+struct erdma_sk_upcalls {
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk, int bytes);
+ void (*sk_error_report)(struct sock *sk);
+};
+
+struct erdma_dev;
+
+enum erdma_cep_state {
+ ERDMA_EPSTATE_IDLE = 1,
+ ERDMA_EPSTATE_LISTENING,
+ ERDMA_EPSTATE_CONNECTING,
+ ERDMA_EPSTATE_AWAIT_MPAREQ,
+ ERDMA_EPSTATE_RECVD_MPAREQ,
+ ERDMA_EPSTATE_AWAIT_MPAREP,
+ ERDMA_EPSTATE_RDMA_MODE,
+ ERDMA_EPSTATE_CLOSED
+};
+
+struct erdma_cep {
+ struct iw_cm_id *cm_id;
+ struct erdma_dev *dev;
+ struct list_head devq;
+ spinlock_t lock;
+ struct kref ref;
+ int in_use;
+ wait_queue_head_t waitq;
+ enum erdma_cep_state state;
+
+ struct list_head listenq;
+ struct erdma_cep *listen_cep;
+
+ struct erdma_qp *qp;
+ struct socket *sock;
+
+ struct erdma_cm_work *mpa_timer;
+ struct list_head work_freelist;
+
+ struct erdma_mpa_info mpa;
+ int ord;
+ int ird;
+
+ int pd_len;
+ /* hold user's private data. */
+ void *private_data;
+
+ /* Saved upcalls of socket llp.sock */
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_error_report)(struct sock *sk);
+};
+
+#define MPAREQ_TIMEOUT (HZ * 20)
+#define MPAREP_TIMEOUT (HZ * 10)
+#define CONNECT_TIMEOUT (HZ * 10)
+
+enum erdma_work_type {
+ ERDMA_CM_WORK_ACCEPT = 1,
+ ERDMA_CM_WORK_READ_MPAHDR,
+ ERDMA_CM_WORK_CLOSE_LLP, /* close socket */
+ ERDMA_CM_WORK_PEER_CLOSE, /* socket indicated peer close */
+ ERDMA_CM_WORK_MPATIMEOUT,
+ ERDMA_CM_WORK_CONNECTED,
+ ERDMA_CM_WORK_CONNECTTIMEOUT
+};
+
+struct erdma_cm_work {
+ struct delayed_work work;
+ struct list_head list;
+ enum erdma_work_type type;
+ struct erdma_cep *cep;
+};
+
+#define to_sockaddr_in(a) (*(struct sockaddr_in *)(&(a)))
+
+static inline int getname_peer(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 1);
+}
+
+static inline int getname_local(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 0);
+}
+
+int erdma_connect(struct iw_cm_id *id, struct iw_cm_conn_param *param);
+int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *param);
+int erdma_reject(struct iw_cm_id *id, const void *pdata, u8 plen);
+int erdma_create_listen(struct iw_cm_id *id, int backlog);
+int erdma_destroy_listen(struct iw_cm_id *id);
+
+void erdma_cep_get(struct erdma_cep *ceq);
+void erdma_cep_put(struct erdma_cep *ceq);
+int erdma_cm_queue_work(struct erdma_cep *ceq, enum erdma_work_type type);
+
+int erdma_cm_init(void);
+void erdma_cm_exit(void);
+
+#define sk_to_cep(sk) ((struct erdma_cep *)((sk)->sk_user_data))
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
new file mode 100644
index 000000000000..57da0c670472
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "erdma.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
+{
+ struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
+ u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) |
+ FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
+ FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
+ FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
+
+ *cmdq->cq.db_record = db_data;
+ writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
+
+ atomic64_inc(&cmdq->cq.armed_num);
+}
+
+static void kick_cmdq_db(struct erdma_cmdq *cmdq)
+{
+ struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
+ u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
+
+ *cmdq->sq.db_record = db_data;
+ writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
+}
+
+static struct erdma_comp_wait *get_comp_wait(struct erdma_cmdq *cmdq)
+{
+ int comp_idx;
+
+ spin_lock(&cmdq->lock);
+ comp_idx = find_first_zero_bit(cmdq->comp_wait_bitmap,
+ cmdq->max_outstandings);
+ if (comp_idx == cmdq->max_outstandings) {
+ spin_unlock(&cmdq->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ __set_bit(comp_idx, cmdq->comp_wait_bitmap);
+ spin_unlock(&cmdq->lock);
+
+ return &cmdq->wait_pool[comp_idx];
+}
+
+static void put_comp_wait(struct erdma_cmdq *cmdq,
+ struct erdma_comp_wait *comp_wait)
+{
+ int used;
+
+ cmdq->wait_pool[comp_wait->ctx_id].cmd_status = ERDMA_CMD_STATUS_INIT;
+ spin_lock(&cmdq->lock);
+ used = __test_and_clear_bit(comp_wait->ctx_id, cmdq->comp_wait_bitmap);
+ spin_unlock(&cmdq->lock);
+
+ WARN_ON(!used);
+}
+
+static int erdma_cmdq_wait_res_init(struct erdma_dev *dev,
+ struct erdma_cmdq *cmdq)
+{
+ int i;
+
+ cmdq->wait_pool =
+ devm_kcalloc(&dev->pdev->dev, cmdq->max_outstandings,
+ sizeof(struct erdma_comp_wait), GFP_KERNEL);
+ if (!cmdq->wait_pool)
+ return -ENOMEM;
+
+ spin_lock_init(&cmdq->lock);
+ cmdq->comp_wait_bitmap = devm_bitmap_zalloc(
+ &dev->pdev->dev, cmdq->max_outstandings, GFP_KERNEL);
+ if (!cmdq->comp_wait_bitmap)
+ return -ENOMEM;
+
+ for (i = 0; i < cmdq->max_outstandings; i++) {
+ init_completion(&cmdq->wait_pool[i].wait_event);
+ cmdq->wait_pool[i].ctx_id = i;
+ }
+
+ return 0;
+}
+
+static int erdma_cmdq_sq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_cmdq_sq *sq = &cmdq->sq;
+ u32 buf_size;
+
+ sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
+ sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
+
+ buf_size = sq->depth << SQEBB_SHIFT;
+
+ sq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &sq->qbuf_dma_addr, GFP_KERNEL);
+ if (!sq->qbuf)
+ return -ENOMEM;
+
+ sq->db_record = (u64 *)(sq->qbuf + buf_size);
+
+ spin_lock_init(&sq->lock);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_H_REG,
+ upper_32_bits(sq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
+ lower_32_bits(sq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
+ erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG,
+ sq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+static int erdma_cmdq_cq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_cmdq_cq *cq = &cmdq->cq;
+ u32 buf_size;
+
+ cq->depth = cmdq->sq.depth;
+ buf_size = cq->depth << CQE_SHIFT;
+
+ cq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &cq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!cq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&cq->lock);
+
+ cq->db_record = (u64 *)(cq->qbuf + buf_size);
+
+ atomic64_set(&cq->armed_num, 0);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_H_REG,
+ upper_32_bits(cq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
+ lower_32_bits(cq->qbuf_dma_addr));
+ erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG,
+ cq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+static int erdma_cmdq_eq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_eq *eq = &cmdq->eq;
+ u32 buf_size;
+
+ eq->depth = cmdq->max_outstandings;
+ buf_size = eq->depth << EQE_SHIFT;
+
+ eq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+
+ eq->db_addr =
+ (u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG);
+ eq->db_record = (u64 *)(eq->qbuf + buf_size);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG,
+ eq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+int erdma_cmdq_init(struct erdma_dev *dev)
+{
+ int err, i;
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ u32 sts, ctrl;
+
+ cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
+ cmdq->use_event = false;
+
+ sema_init(&cmdq->credits, cmdq->max_outstandings);
+
+ err = erdma_cmdq_wait_res_init(dev, cmdq);
+ if (err)
+ return err;
+
+ err = erdma_cmdq_sq_init(dev);
+ if (err)
+ return err;
+
+ err = erdma_cmdq_cq_init(dev);
+ if (err)
+ goto err_destroy_sq;
+
+ err = erdma_cmdq_eq_init(dev);
+ if (err)
+ goto err_destroy_cq;
+
+ ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1);
+ erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
+
+ for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) {
+ sts = erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG,
+ ERDMA_REG_DEV_ST_INIT_DONE_MASK);
+ if (sts)
+ break;
+
+ msleep(ERDMA_REG_ACCESS_WAIT_MS);
+ }
+
+ if (i == ERDMA_WAIT_DEV_DONE_CNT) {
+ dev_err(&dev->pdev->dev, "wait init done failed.\n");
+ err = -ETIMEDOUT;
+ goto err_destroy_eq;
+ }
+
+ set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+
+ return 0;
+
+err_destroy_eq:
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->eq.depth << EQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
+
+err_destroy_cq:
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->cq.depth << CQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+
+err_destroy_sq:
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->sq.depth << SQEBB_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+
+ return err;
+}
+
+void erdma_finish_cmdq_init(struct erdma_dev *dev)
+{
+ /* after device init successfully, change cmdq to event mode. */
+ dev->cmdq.use_event = true;
+ arm_cmdq_cq(&dev->cmdq);
+}
+
+void erdma_cmdq_destroy(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->eq.depth << EQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->sq.depth << SQEBB_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->cq.depth << CQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+}
+
+static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
+{
+ __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
+ cmdq->cq.depth, CQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ __be32_to_cpu(READ_ONCE(*cqe)));
+
+ return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
+}
+
+static void push_cmdq_sqe(struct erdma_cmdq *cmdq, u64 *req, size_t req_len,
+ struct erdma_comp_wait *comp_wait)
+{
+ __le64 *wqe;
+ u64 hdr = *req;
+
+ comp_wait->cmd_status = ERDMA_CMD_STATUS_ISSUED;
+ reinit_completion(&comp_wait->wait_event);
+ comp_wait->sq_pi = cmdq->sq.pi;
+
+ wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth,
+ SQEBB_SHIFT);
+ memcpy(wqe, req, req_len);
+
+ cmdq->sq.pi += cmdq->sq.wqebb_cnt;
+ hdr |= FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi) |
+ FIELD_PREP(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK,
+ comp_wait->ctx_id) |
+ FIELD_PREP(ERDMA_CMD_HDR_WQEBB_CNT_MASK, cmdq->sq.wqebb_cnt - 1);
+ *wqe = cpu_to_le64(hdr);
+
+ kick_cmdq_db(cmdq);
+}
+
+static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
+{
+ struct erdma_comp_wait *comp_wait;
+ u32 hdr0, sqe_idx;
+ __be32 *cqe;
+ u16 ctx_id;
+ u64 *sqe;
+ int i;
+
+ cqe = get_next_valid_cmdq_cqe(cmdq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cmdq->cq.ci++;
+
+ dma_rmb();
+ hdr0 = __be32_to_cpu(*cqe);
+ sqe_idx = __be32_to_cpu(*(cqe + 1));
+
+ sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
+ SQEBB_SHIFT);
+ ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
+ comp_wait = &cmdq->wait_pool[ctx_id];
+ if (comp_wait->cmd_status != ERDMA_CMD_STATUS_ISSUED)
+ return -EIO;
+
+ comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED;
+ comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0);
+ cmdq->sq.ci += cmdq->sq.wqebb_cnt;
+
+ for (i = 0; i < 4; i++)
+ comp_wait->comp_data[i] = __be32_to_cpu(*(cqe + 2 + i));
+
+ if (cmdq->use_event)
+ complete(&comp_wait->wait_event);
+
+ return 0;
+}
+
+static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
+{
+ unsigned long flags;
+ u16 comp_num;
+
+ spin_lock_irqsave(&cmdq->cq.lock, flags);
+
+ /* We must have less than # of max_outstandings
+ * completions at one time.
+ */
+ for (comp_num = 0; comp_num < cmdq->max_outstandings; comp_num++)
+ if (erdma_poll_single_cmd_completion(cmdq))
+ break;
+
+ if (comp_num && cmdq->use_event)
+ arm_cmdq_cq(cmdq);
+
+ spin_unlock_irqrestore(&cmdq->cq.lock, flags);
+}
+
+void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
+{
+ int got_event = 0;
+
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
+ !cmdq->use_event)
+ return;
+
+ while (get_next_valid_eqe(&cmdq->eq)) {
+ cmdq->eq.ci++;
+ got_event++;
+ }
+
+ if (got_event) {
+ cmdq->cq.cmdsn++;
+ erdma_polling_cmd_completions(cmdq);
+ }
+
+ notify_eq(&cmdq->eq);
+}
+
+static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
+ struct erdma_cmdq *cmdq, u32 timeout)
+{
+ unsigned long comp_timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (1) {
+ erdma_polling_cmd_completions(cmdq);
+ if (comp_ctx->cmd_status != ERDMA_CMD_STATUS_ISSUED)
+ break;
+
+ if (time_is_before_jiffies(comp_timeout))
+ return -ETIME;
+
+ msleep(20);
+ }
+
+ return 0;
+}
+
+static int erdma_wait_cmd_completion(struct erdma_comp_wait *comp_ctx,
+ struct erdma_cmdq *cmdq, u32 timeout)
+{
+ unsigned long flags = 0;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+ msecs_to_jiffies(timeout));
+
+ if (unlikely(comp_ctx->cmd_status != ERDMA_CMD_STATUS_FINISHED)) {
+ spin_lock_irqsave(&cmdq->cq.lock, flags);
+ comp_ctx->cmd_status = ERDMA_CMD_STATUS_TIMEOUT;
+ spin_unlock_irqrestore(&cmdq->cq.lock, flags);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
+{
+ *hdr = FIELD_PREP(ERDMA_CMD_HDR_SUB_MOD_MASK, mod) |
+ FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
+}
+
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+ u64 *resp0, u64 *resp1)
+{
+ struct erdma_comp_wait *comp_wait;
+ int ret;
+
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
+ return -ENODEV;
+
+ down(&cmdq->credits);
+
+ comp_wait = get_comp_wait(cmdq);
+ if (IS_ERR(comp_wait)) {
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+ set_bit(ERDMA_CMDQ_STATE_CTX_ERR_BIT, &cmdq->state);
+ up(&cmdq->credits);
+ return PTR_ERR(comp_wait);
+ }
+
+ spin_lock(&cmdq->sq.lock);
+ push_cmdq_sqe(cmdq, req, req_size, comp_wait);
+ spin_unlock(&cmdq->sq.lock);
+
+ if (cmdq->use_event)
+ ret = erdma_wait_cmd_completion(comp_wait, cmdq,
+ ERDMA_CMDQ_TIMEOUT_MS);
+ else
+ ret = erdma_poll_cmd_completion(comp_wait, cmdq,
+ ERDMA_CMDQ_TIMEOUT_MS);
+
+ if (ret) {
+ set_bit(ERDMA_CMDQ_STATE_TIMEOUT_BIT, &cmdq->state);
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+ goto out;
+ }
+
+ if (comp_wait->comp_status)
+ ret = -EIO;
+
+ if (resp0 && resp1) {
+ *resp0 = *((u64 *)&comp_wait->comp_data[0]);
+ *resp1 = *((u64 *)&comp_wait->comp_data[2]);
+ }
+ put_comp_wait(cmdq, comp_wait);
+
+out:
+ up(&cmdq->credits);
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
new file mode 100644
index 000000000000..751c7f9f0de7
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <rdma/ib_verbs.h>
+
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+static void *get_next_valid_cqe(struct erdma_cq *cq)
+{
+ __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
+ cq->depth, CQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ __be32_to_cpu(READ_ONCE(*cqe)));
+
+ return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
+}
+
+static void notify_cq(struct erdma_cq *cq, u8 solcitied)
+{
+ u64 db_data =
+ FIELD_PREP(ERDMA_CQDB_IDX_MASK, (cq->kern_cq.notify_cnt)) |
+ FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) |
+ FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
+ FIELD_PREP(ERDMA_CQDB_SOL_MASK, solcitied) |
+ FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
+ FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
+
+ *cq->kern_cq.db_record = db_data;
+ writeq(db_data, cq->kern_cq.db);
+}
+
+int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ unsigned long irq_flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, irq_flags);
+
+ notify_cq(cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && get_next_valid_cqe(cq))
+ ret = 1;
+
+ cq->kern_cq.notify_cnt++;
+
+ spin_unlock_irqrestore(&cq->kern_cq.lock, irq_flags);
+
+ return ret;
+}
+
+static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
+ [ERDMA_OP_WRITE] = IB_WC_RDMA_WRITE,
+ [ERDMA_OP_READ] = IB_WC_RDMA_READ,
+ [ERDMA_OP_SEND] = IB_WC_SEND,
+ [ERDMA_OP_SEND_WITH_IMM] = IB_WC_SEND,
+ [ERDMA_OP_RECEIVE] = IB_WC_RECV,
+ [ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
+ [ERDMA_OP_RECV_INV] = IB_WC_RECV,
+ [ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [ERDMA_OP_INVALIDATE] = IB_WC_LOCAL_INV,
+ [ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
+ [ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
+ [ERDMA_OP_REG_MR] = IB_WC_REG_MR,
+ [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
+ [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
+};
+
+static const struct {
+ enum erdma_wc_status erdma;
+ enum ib_wc_status base;
+ enum erdma_vendor_err vendor;
+} map_cqe_status[ERDMA_NUM_WC_STATUS] = {
+ { ERDMA_WC_SUCCESS, IB_WC_SUCCESS, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_GENERAL_ERR, IB_WC_GENERAL_ERR, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_RECV_WQE_FORMAT_ERR, IB_WC_GENERAL_ERR,
+ ERDMA_WC_VENDOR_INVALID_RQE },
+ { ERDMA_WC_RECV_STAG_INVALID_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_INVALID_STAG },
+ { ERDMA_WC_RECV_ADDR_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION },
+ { ERDMA_WC_RECV_RIGHT_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR },
+ { ERDMA_WC_RECV_PDID_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_INVALID_PD },
+ { ERDMA_WC_RECV_WARRPING_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_WRAP_ERR },
+ { ERDMA_WC_SEND_WQE_FORMAT_ERR, IB_WC_LOC_QP_OP_ERR,
+ ERDMA_WC_VENDOR_INVALID_SQE },
+ { ERDMA_WC_SEND_WQE_ORD_EXCEED, IB_WC_GENERAL_ERR,
+ ERDMA_WC_VENDOR_ZERO_ORD },
+ { ERDMA_WC_SEND_STAG_INVALID_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_INVALID_STAG },
+ { ERDMA_WC_SEND_ADDR_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION },
+ { ERDMA_WC_SEND_RIGHT_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_ACCESS_ERR },
+ { ERDMA_WC_SEND_PDID_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_INVALID_PD },
+ { ERDMA_WC_SEND_WARRPING_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_WARP_ERR },
+ { ERDMA_WC_FLUSH_ERR, IB_WC_WR_FLUSH_ERR, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
+};
+
+#define ERDMA_POLLCQ_NO_QP 1
+
+static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
+{
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+ u8 opcode, syndrome, qtype;
+ struct erdma_kqp *kern_qp;
+ struct erdma_cqe *cqe;
+ struct erdma_qp *qp;
+ u16 wqe_idx, depth;
+ u32 qpn, cqe_hdr;
+ u64 *id_table;
+ u64 *wqe_hdr;
+
+ cqe = get_next_valid_cqe(cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cq->kern_cq.ci++;
+
+ /* cqbuf should be ready when we poll */
+ dma_rmb();
+
+ qpn = be32_to_cpu(cqe->qpn);
+ wqe_idx = be32_to_cpu(cqe->qe_idx);
+ cqe_hdr = be32_to_cpu(cqe->hdr);
+
+ qp = find_qp_by_qpn(dev, qpn);
+ if (!qp)
+ return ERDMA_POLLCQ_NO_QP;
+
+ kern_qp = &qp->kern_qp;
+
+ qtype = FIELD_GET(ERDMA_CQE_HDR_QTYPE_MASK, cqe_hdr);
+ syndrome = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, cqe_hdr);
+ opcode = FIELD_GET(ERDMA_CQE_HDR_OPCODE_MASK, cqe_hdr);
+
+ if (qtype == ERDMA_CQE_QTYPE_SQ) {
+ id_table = kern_qp->swr_tbl;
+ depth = qp->attrs.sq_size;
+ wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ kern_qp->sq_ci =
+ FIELD_GET(ERDMA_SQE_HDR_WQEBB_CNT_MASK, *wqe_hdr) +
+ wqe_idx + 1;
+ } else {
+ id_table = kern_qp->rwr_tbl;
+ depth = qp->attrs.rq_size;
+ }
+ wc->wr_id = id_table[wqe_idx & (depth - 1)];
+ wc->byte_len = be32_to_cpu(cqe->size);
+
+ wc->wc_flags = 0;
+
+ wc->opcode = wc_mapping_table[opcode];
+ if (opcode == ERDMA_OP_RECV_IMM || opcode == ERDMA_OP_RSP_SEND_IMM) {
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->imm_data));
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ } else if (opcode == ERDMA_OP_RECV_INV) {
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inv_rkey);
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+
+ if (syndrome >= ERDMA_NUM_WC_STATUS)
+ syndrome = ERDMA_WC_GENERAL_ERR;
+
+ wc->status = map_cqe_status[syndrome].base;
+ wc->vendor_err = map_cqe_status[syndrome].vendor;
+ wc->qp = &qp->ibqp;
+
+ return 0;
+}
+
+int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ unsigned long flags;
+ int npolled, ret;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, flags);
+
+ for (npolled = 0; npolled < num_entries;) {
+ ret = erdma_poll_one_cqe(cq, wc + npolled);
+
+ if (ret == -EAGAIN) /* no received new CQEs. */
+ break;
+ else if (ret) /* ignore invalid CQEs. */
+ continue;
+
+ npolled++;
+ }
+
+ spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
+
+ return npolled;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
new file mode 100644
index 000000000000..8f2d094e0227
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "erdma.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+#define MAX_POLL_CHUNK_SIZE 16
+
+void notify_eq(struct erdma_eq *eq)
+{
+ u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
+ FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
+
+ *eq->db_record = db_data;
+ writeq(db_data, eq->db_addr);
+
+ atomic64_inc(&eq->notify_num);
+}
+
+void *get_next_valid_eqe(struct erdma_eq *eq)
+{
+ u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
+
+ return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
+}
+
+void erdma_aeq_event_handler(struct erdma_dev *dev)
+{
+ struct erdma_aeqe *aeqe;
+ u32 cqn, qpn;
+ struct erdma_qp *qp;
+ struct erdma_cq *cq;
+ struct ib_event event;
+ u32 poll_cnt = 0;
+
+ memset(&event, 0, sizeof(event));
+
+ while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
+ aeqe = get_next_valid_eqe(&dev->aeq);
+ if (!aeqe)
+ break;
+
+ dma_rmb();
+
+ dev->aeq.ci++;
+ atomic64_inc(&dev->aeq.event_num);
+ poll_cnt++;
+
+ if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
+ le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
+ cqn = le32_to_cpu(aeqe->event_data0);
+ cq = find_cq_by_cqn(dev, cqn);
+ if (!cq)
+ continue;
+
+ event.device = cq->ibcq.device;
+ event.element.cq = &cq->ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ if (cq->ibcq.event_handler)
+ cq->ibcq.event_handler(&event,
+ cq->ibcq.cq_context);
+ } else {
+ qpn = le32_to_cpu(aeqe->event_data0);
+ qp = find_qp_by_qpn(dev, qpn);
+ if (!qp)
+ continue;
+
+ event.device = qp->ibqp.device;
+ event.element.qp = &qp->ibqp;
+ event.event = IB_EVENT_QP_FATAL;
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&event,
+ qp->ibqp.qp_context);
+ }
+ }
+
+ notify_eq(&dev->aeq);
+}
+
+int erdma_aeq_init(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+ u32 buf_size;
+
+ eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
+ buf_size = eq->depth << EQE_SHIFT;
+
+ eq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+ atomic64_set(&eq->notify_num, 0);
+
+ eq->db_addr = (u64 __iomem *)(dev->func_bar + ERDMA_REGS_AEQ_DB_REG);
+ eq->db_record = (u64 *)(eq->qbuf + buf_size);
+
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG,
+ eq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+void erdma_aeq_destroy(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+
+ dma_free_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
+ eq->qbuf_dma_addr);
+}
+
+void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
+{
+ struct erdma_dev *dev = ceq_cb->dev;
+ struct erdma_cq *cq;
+ u32 poll_cnt = 0;
+ u64 *ceqe;
+ int cqn;
+
+ if (!ceq_cb->ready)
+ return;
+
+ while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
+ ceqe = get_next_valid_eqe(&ceq_cb->eq);
+ if (!ceqe)
+ break;
+
+ dma_rmb();
+ ceq_cb->eq.ci++;
+ poll_cnt++;
+ cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
+
+ cq = find_cq_by_cqn(dev, cqn);
+ if (!cq)
+ continue;
+
+ if (rdma_is_kernel_res(&cq->ibcq.res))
+ cq->kern_cq.cmdsn++;
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ }
+
+ notify_eq(&ceq_cb->eq);
+}
+
+static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
+{
+ struct erdma_eq_cb *ceq_cb = data;
+
+ tasklet_schedule(&ceq_cb->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void erdma_intr_ceq_task(unsigned long data)
+{
+ erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
+}
+
+static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
+ int err;
+
+ snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
+ pci_name(dev->pdev));
+ eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
+
+ tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
+ (unsigned long)&dev->ceqs[ceqn]);
+
+ cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
+ &eqc->irq.affinity_hint_mask);
+
+ err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
+ eqc->irq.name, eqc);
+ if (err) {
+ dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
+ return err;
+ }
+
+ irq_set_affinity_hint(eqc->irq.msix_vector,
+ &eqc->irq.affinity_hint_mask);
+
+ return 0;
+}
+
+static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
+
+ irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
+ free_irq(eqc->irq.msix_vector, eqc);
+}
+
+static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
+{
+ struct erdma_cmdq_create_eq_req req;
+ dma_addr_t db_info_dma_addr;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_CREATE_EQ);
+ req.eqn = eqn;
+ req.depth = ilog2(eq->depth);
+ req.qbuf_addr = eq->qbuf_dma_addr;
+ req.qtype = ERDMA_EQ_TYPE_CEQ;
+ /* Vector index is the same as EQN. */
+ req.vector_idx = eqn;
+ db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
+ req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
+ req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req,
+ sizeof(struct erdma_cmdq_create_eq_req),
+ NULL, NULL);
+}
+
+static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+ u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
+ int ret;
+
+ eq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+ atomic64_set(&eq->notify_num, 0);
+
+ eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
+ eq->db_addr =
+ (u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
+ (ceqn + 1) * ERDMA_DB_SIZE);
+ eq->db_record = (u64 *)(eq->qbuf + buf_size);
+ eq->ci = 0;
+ dev->ceqs[ceqn].dev = dev;
+
+ /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
+ ret = create_eq_cmd(dev, ceqn + 1, eq);
+ dev->ceqs[ceqn].ready = ret ? false : true;
+
+ return ret;
+}
+
+static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+ u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
+ struct erdma_cmdq_destroy_eq_req req;
+ int err;
+
+ dev->ceqs[ceqn].ready = 0;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_DESTROY_EQ);
+ /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
+ req.eqn = ceqn + 1;
+ req.qtype = ERDMA_EQ_TYPE_CEQ;
+ req.vector_idx = ceqn + 1;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (err)
+ return;
+
+ dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
+ eq->qbuf_dma_addr);
+}
+
+int erdma_ceqs_init(struct erdma_dev *dev)
+{
+ u32 i, j;
+ int err;
+
+ for (i = 0; i < dev->attrs.irq_num - 1; i++) {
+ err = erdma_ceq_init_one(dev, i);
+ if (err)
+ goto out_err;
+
+ err = erdma_set_ceq_irq(dev, i);
+ if (err) {
+ erdma_ceq_uninit_one(dev, i);
+ goto out_err;
+ }
+ }
+
+ return 0;
+
+out_err:
+ for (j = 0; j < i; j++) {
+ erdma_free_ceq_irq(dev, j);
+ erdma_ceq_uninit_one(dev, j);
+ }
+
+ return err;
+}
+
+void erdma_ceqs_uninit(struct erdma_dev *dev)
+{
+ u32 i;
+
+ for (i = 0; i < dev->attrs.irq_num - 1; i++) {
+ erdma_free_ceq_irq(dev, i);
+ erdma_ceq_uninit_one(dev, i);
+ }
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
new file mode 100644
index 000000000000..b210c49c669f
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -0,0 +1,508 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_HW_H__
+#define __ERDMA_HW_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/* PCIe device related definition. */
+#define PCI_VENDOR_ID_ALIBABA 0x1ded
+
+#define ERDMA_PCI_WIDTH 64
+#define ERDMA_FUNC_BAR 0
+#define ERDMA_MISX_BAR 2
+
+#define ERDMA_BAR_MASK (BIT(ERDMA_FUNC_BAR) | BIT(ERDMA_MISX_BAR))
+
+/* MSI-X related. */
+#define ERDMA_NUM_MSIX_VEC 32U
+#define ERDMA_MSIX_VECTOR_CMDQ 0
+
+/* PCIe Bar0 Registers. */
+#define ERDMA_REGS_VERSION_REG 0x0
+#define ERDMA_REGS_DEV_CTRL_REG 0x10
+#define ERDMA_REGS_DEV_ST_REG 0x14
+#define ERDMA_REGS_NETDEV_MAC_L_REG 0x18
+#define ERDMA_REGS_NETDEV_MAC_H_REG 0x1C
+#define ERDMA_REGS_CMDQ_SQ_ADDR_L_REG 0x20
+#define ERDMA_REGS_CMDQ_SQ_ADDR_H_REG 0x24
+#define ERDMA_REGS_CMDQ_CQ_ADDR_L_REG 0x28
+#define ERDMA_REGS_CMDQ_CQ_ADDR_H_REG 0x2C
+#define ERDMA_REGS_CMDQ_DEPTH_REG 0x30
+#define ERDMA_REGS_CMDQ_EQ_DEPTH_REG 0x34
+#define ERDMA_REGS_CMDQ_EQ_ADDR_L_REG 0x38
+#define ERDMA_REGS_CMDQ_EQ_ADDR_H_REG 0x3C
+#define ERDMA_REGS_AEQ_ADDR_L_REG 0x40
+#define ERDMA_REGS_AEQ_ADDR_H_REG 0x44
+#define ERDMA_REGS_AEQ_DEPTH_REG 0x48
+#define ERDMA_REGS_GRP_NUM_REG 0x4c
+#define ERDMA_REGS_AEQ_DB_REG 0x50
+#define ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG 0x60
+#define ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG 0x68
+#define ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG 0x70
+#define ERDMA_AEQ_DB_HOST_ADDR_REG 0x78
+#define ERDMA_REGS_STATS_TSO_IN_PKTS_REG 0x80
+#define ERDMA_REGS_STATS_TSO_OUT_PKTS_REG 0x88
+#define ERDMA_REGS_STATS_TSO_OUT_BYTES_REG 0x90
+#define ERDMA_REGS_STATS_TX_DROP_PKTS_REG 0x98
+#define ERDMA_REGS_STATS_TX_BPS_METER_DROP_PKTS_REG 0xa0
+#define ERDMA_REGS_STATS_TX_PPS_METER_DROP_PKTS_REG 0xa8
+#define ERDMA_REGS_STATS_RX_PKTS_REG 0xc0
+#define ERDMA_REGS_STATS_RX_BYTES_REG 0xc8
+#define ERDMA_REGS_STATS_RX_DROP_PKTS_REG 0xd0
+#define ERDMA_REGS_STATS_RX_BPS_METER_DROP_PKTS_REG 0xd8
+#define ERDMA_REGS_STATS_RX_PPS_METER_DROP_PKTS_REG 0xe0
+#define ERDMA_REGS_CEQ_DB_BASE_REG 0x100
+#define ERDMA_CMDQ_SQDB_REG 0x200
+#define ERDMA_CMDQ_CQDB_REG 0x300
+
+/* DEV_CTRL_REG details. */
+#define ERDMA_REG_DEV_CTRL_RESET_MASK 0x00000001
+#define ERDMA_REG_DEV_CTRL_INIT_MASK 0x00000002
+
+/* DEV_ST_REG details. */
+#define ERDMA_REG_DEV_ST_RESET_DONE_MASK 0x00000001U
+#define ERDMA_REG_DEV_ST_INIT_DONE_MASK 0x00000002U
+
+/* eRDMA PCIe DBs definition. */
+#define ERDMA_BAR_DB_SPACE_BASE 4096
+
+#define ERDMA_BAR_SQDB_SPACE_OFFSET ERDMA_BAR_DB_SPACE_BASE
+#define ERDMA_BAR_SQDB_SPACE_SIZE (384 * 1024)
+
+#define ERDMA_BAR_RQDB_SPACE_OFFSET \
+ (ERDMA_BAR_SQDB_SPACE_OFFSET + ERDMA_BAR_SQDB_SPACE_SIZE)
+#define ERDMA_BAR_RQDB_SPACE_SIZE (96 * 1024)
+
+#define ERDMA_BAR_CQDB_SPACE_OFFSET \
+ (ERDMA_BAR_RQDB_SPACE_OFFSET + ERDMA_BAR_RQDB_SPACE_SIZE)
+
+/* Doorbell page resources related. */
+/*
+ * Max # of parallelly issued directSQE is 3072 per device,
+ * hardware organizes this into 24 group, per group has 128 credits.
+ */
+#define ERDMA_DWQE_MAX_GRP_CNT 24
+#define ERDMA_DWQE_NUM_PER_GRP 128
+
+#define ERDMA_DWQE_TYPE0_CNT 64
+#define ERDMA_DWQE_TYPE1_CNT 496
+/* type1 DB contains 2 DBs, takes 256Byte. */
+#define ERDMA_DWQE_TYPE1_CNT_PER_PAGE 16
+
+#define ERDMA_SDB_SHARED_PAGE_INDEX 95
+
+/* Doorbell related. */
+#define ERDMA_DB_SIZE 8
+
+#define ERDMA_CQDB_IDX_MASK GENMASK_ULL(63, 56)
+#define ERDMA_CQDB_CQN_MASK GENMASK_ULL(55, 32)
+#define ERDMA_CQDB_ARM_MASK BIT_ULL(31)
+#define ERDMA_CQDB_SOL_MASK BIT_ULL(30)
+#define ERDMA_CQDB_CMDSN_MASK GENMASK_ULL(29, 28)
+#define ERDMA_CQDB_CI_MASK GENMASK_ULL(23, 0)
+
+#define ERDMA_EQDB_ARM_MASK BIT(31)
+#define ERDMA_EQDB_CI_MASK GENMASK_ULL(23, 0)
+
+#define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000
+
+/* WQE related. */
+#define EQE_SIZE 16
+#define EQE_SHIFT 4
+#define RQE_SIZE 32
+#define RQE_SHIFT 5
+#define CQE_SIZE 32
+#define CQE_SHIFT 5
+#define SQEBB_SIZE 32
+#define SQEBB_SHIFT 5
+#define SQEBB_MASK (~(SQEBB_SIZE - 1))
+#define SQEBB_ALIGN(size) ((size + SQEBB_SIZE - 1) & SQEBB_MASK)
+#define SQEBB_COUNT(size) (SQEBB_ALIGN(size) >> SQEBB_SHIFT)
+
+#define ERDMA_MAX_SQE_SIZE 128
+#define ERDMA_MAX_WQEBB_PER_SQE 4
+
+/* CMDQ related. */
+#define ERDMA_CMDQ_MAX_OUTSTANDING 128
+#define ERDMA_CMDQ_SQE_SIZE 64
+
+/* cmdq sub module definition. */
+enum CMDQ_WQE_SUB_MOD {
+ CMDQ_SUBMOD_RDMA = 0,
+ CMDQ_SUBMOD_COMMON = 1
+};
+
+enum CMDQ_RDMA_OPCODE {
+ CMDQ_OPCODE_QUERY_DEVICE = 0,
+ CMDQ_OPCODE_CREATE_QP = 1,
+ CMDQ_OPCODE_DESTROY_QP = 2,
+ CMDQ_OPCODE_MODIFY_QP = 3,
+ CMDQ_OPCODE_CREATE_CQ = 4,
+ CMDQ_OPCODE_DESTROY_CQ = 5,
+ CMDQ_OPCODE_REG_MR = 8,
+ CMDQ_OPCODE_DEREG_MR = 9
+};
+
+enum CMDQ_COMMON_OPCODE {
+ CMDQ_OPCODE_CREATE_EQ = 0,
+ CMDQ_OPCODE_DESTROY_EQ = 1,
+ CMDQ_OPCODE_QUERY_FW_INFO = 2,
+};
+
+/* cmdq-SQE HDR */
+#define ERDMA_CMD_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
+#define ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK GENMASK_ULL(47, 32)
+#define ERDMA_CMD_HDR_SUB_MOD_MASK GENMASK_ULL(25, 24)
+#define ERDMA_CMD_HDR_OPCODE_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
+
+struct erdma_cmdq_destroy_cq_req {
+ u64 hdr;
+ u32 cqn;
+};
+
+#define ERDMA_EQ_TYPE_AEQ 0
+#define ERDMA_EQ_TYPE_CEQ 1
+
+struct erdma_cmdq_create_eq_req {
+ u64 hdr;
+ u64 qbuf_addr;
+ u8 vector_idx;
+ u8 eqn;
+ u8 depth;
+ u8 qtype;
+ u32 db_dma_addr_l;
+ u32 db_dma_addr_h;
+};
+
+struct erdma_cmdq_destroy_eq_req {
+ u64 hdr;
+ u64 rsvd0;
+ u8 vector_idx;
+ u8 eqn;
+ u8 rsvd1;
+ u8 qtype;
+};
+
+/* create_cq cfg0 */
+#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
+#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
+#define ERDMA_CMD_CREATE_CQ_CQN_MASK GENMASK(19, 0)
+
+/* create_cq cfg1 */
+#define ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK GENMASK(31, 16)
+#define ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK BIT(15)
+#define ERDMA_CMD_CREATE_CQ_EQN_MASK GENMASK(9, 0)
+
+struct erdma_cmdq_create_cq_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 qbuf_addr_l;
+ u32 qbuf_addr_h;
+ u32 cfg1;
+ u64 cq_db_info_addr;
+ u32 first_page_offset;
+};
+
+/* regmr/deregmr cfg0 */
+#define ERDMA_CMD_MR_VALID_MASK BIT(31)
+#define ERDMA_CMD_MR_KEY_MASK GENMASK(27, 20)
+#define ERDMA_CMD_MR_MPT_IDX_MASK GENMASK(19, 0)
+
+/* regmr cfg1 */
+#define ERDMA_CMD_REGMR_PD_MASK GENMASK(31, 12)
+#define ERDMA_CMD_REGMR_TYPE_MASK GENMASK(7, 6)
+#define ERDMA_CMD_REGMR_RIGHT_MASK GENMASK(5, 2)
+#define ERDMA_CMD_REGMR_ACC_MODE_MASK GENMASK(1, 0)
+
+/* regmr cfg2 */
+#define ERDMA_CMD_REGMR_PAGESIZE_MASK GENMASK(31, 27)
+#define ERDMA_CMD_REGMR_MTT_TYPE_MASK GENMASK(21, 20)
+#define ERDMA_CMD_REGMR_MTT_CNT_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_reg_mr_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u64 start_va;
+ u32 size;
+ u32 cfg2;
+ u64 phy_addr[4];
+};
+
+struct erdma_cmdq_dereg_mr_req {
+ u64 hdr;
+ u32 cfg;
+};
+
+/* modify qp cfg */
+#define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
+#define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
+#define ERDMA_CMD_MODIFY_QP_QPN_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_modify_qp_req {
+ u64 hdr;
+ u32 cfg;
+ u32 cookie;
+ __be32 dip;
+ __be32 sip;
+ __be16 sport;
+ __be16 dport;
+ u32 send_nxt;
+ u32 recv_nxt;
+};
+
+/* create qp cfg0 */
+#define ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK GENMASK(31, 20)
+#define ERDMA_CMD_CREATE_QP_QPN_MASK GENMASK(19, 0)
+
+/* create qp cfg1 */
+#define ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK GENMASK(31, 20)
+#define ERDMA_CMD_CREATE_QP_PD_MASK GENMASK(19, 0)
+
+/* create qp cqn_mtt_cfg */
+#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
+#define ERDMA_CMD_CREATE_QP_CQN_MASK GENMASK(23, 0)
+
+/* create qp mtt_cfg */
+#define ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK GENMASK(31, 12)
+#define ERDMA_CMD_CREATE_QP_MTT_CNT_MASK GENMASK(11, 1)
+#define ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK BIT(0)
+
+#define ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK GENMASK_ULL(31, 0)
+
+struct erdma_cmdq_create_qp_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u32 sq_cqn_mtt_cfg;
+ u32 rq_cqn_mtt_cfg;
+ u64 sq_buf_addr;
+ u64 rq_buf_addr;
+ u32 sq_mtt_cfg;
+ u32 rq_mtt_cfg;
+ u64 sq_db_info_dma_addr;
+ u64 rq_db_info_dma_addr;
+};
+
+struct erdma_cmdq_destroy_qp_req {
+ u64 hdr;
+ u32 qpn;
+};
+
+/* cap qword 0 definition */
+#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
+#define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
+
+/* cap qword 1 definition */
+#define ERDMA_CMD_DEV_CAP_DMA_LOCAL_KEY_MASK GENMASK_ULL(63, 32)
+#define ERDMA_CMD_DEV_CAP_DEFAULT_CC_MASK GENMASK_ULL(31, 28)
+#define ERDMA_CMD_DEV_CAP_QBLOCK_MASK GENMASK_ULL(27, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_MW_MASK GENMASK_ULL(7, 0)
+
+#define ERDMA_NQP_PER_QBLOCK 1024
+
+#define ERDMA_CMD_INFO0_FW_VER_MASK GENMASK_ULL(31, 0)
+
+/* CQE hdr */
+#define ERDMA_CQE_HDR_OWNER_MASK BIT(31)
+#define ERDMA_CQE_HDR_OPCODE_MASK GENMASK(23, 16)
+#define ERDMA_CQE_HDR_QTYPE_MASK GENMASK(15, 8)
+#define ERDMA_CQE_HDR_SYNDROME_MASK GENMASK(7, 0)
+
+#define ERDMA_CQE_QTYPE_SQ 0
+#define ERDMA_CQE_QTYPE_RQ 1
+#define ERDMA_CQE_QTYPE_CMDQ 2
+
+struct erdma_cqe {
+ __be32 hdr;
+ __be32 qe_idx;
+ __be32 qpn;
+ union {
+ __le32 imm_data;
+ __be32 inv_rkey;
+ };
+ __be32 size;
+ __be32 rsvd[3];
+};
+
+struct erdma_sge {
+ __aligned_le64 laddr;
+ __le32 length;
+ __le32 lkey;
+};
+
+/* Receive Queue Element */
+struct erdma_rqe {
+ __le16 qe_idx;
+ __le16 rsvd0;
+ __le32 qpn;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ __le64 to;
+ __le32 length;
+ __le32 stag;
+};
+
+/* SQE */
+#define ERDMA_SQE_HDR_SGL_LEN_MASK GENMASK_ULL(63, 56)
+#define ERDMA_SQE_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
+#define ERDMA_SQE_HDR_QPN_MASK GENMASK_ULL(51, 32)
+#define ERDMA_SQE_HDR_OPCODE_MASK GENMASK_ULL(31, 27)
+#define ERDMA_SQE_HDR_DWQE_MASK BIT_ULL(26)
+#define ERDMA_SQE_HDR_INLINE_MASK BIT_ULL(25)
+#define ERDMA_SQE_HDR_FENCE_MASK BIT_ULL(24)
+#define ERDMA_SQE_HDR_SE_MASK BIT_ULL(23)
+#define ERDMA_SQE_HDR_CE_MASK BIT_ULL(22)
+#define ERDMA_SQE_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
+
+/* REG MR attrs */
+#define ERDMA_SQE_MR_MODE_MASK GENMASK(1, 0)
+#define ERDMA_SQE_MR_ACCESS_MASK GENMASK(5, 2)
+#define ERDMA_SQE_MR_MTT_TYPE_MASK GENMASK(7, 6)
+#define ERDMA_SQE_MR_MTT_CNT_MASK GENMASK(31, 12)
+
+struct erdma_write_sqe {
+ __le64 hdr;
+ __be32 imm_data;
+ __le32 length;
+
+ __le32 sink_stag;
+ __le32 sink_to_l;
+ __le32 sink_to_h;
+
+ __le32 rsvd;
+
+ struct erdma_sge sgl[0];
+};
+
+struct erdma_send_sqe {
+ __le64 hdr;
+ union {
+ __be32 imm_data;
+ __le32 invalid_stag;
+ };
+
+ __le32 length;
+ struct erdma_sge sgl[0];
+};
+
+struct erdma_readreq_sqe {
+ __le64 hdr;
+ __le32 invalid_stag;
+ __le32 length;
+ __le32 sink_stag;
+ __le32 sink_to_l;
+ __le32 sink_to_h;
+ __le32 rsvd;
+};
+
+struct erdma_reg_mr_sqe {
+ __le64 hdr;
+ __le64 addr;
+ __le32 length;
+ __le32 stag;
+ __le32 attrs;
+ __le32 rsvd;
+};
+
+/* EQ related. */
+#define ERDMA_DEFAULT_EQ_DEPTH 256
+
+/* ceqe */
+#define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
+#define ERDMA_CEQE_HDR_PI_MASK GENMASK_ULL(55, 32)
+#define ERDMA_CEQE_HDR_O_MASK BIT_ULL(31)
+#define ERDMA_CEQE_HDR_CQN_MASK GENMASK_ULL(19, 0)
+
+/* aeqe */
+#define ERDMA_AEQE_HDR_O_MASK BIT(31)
+#define ERDMA_AEQE_HDR_TYPE_MASK GENMASK(23, 16)
+#define ERDMA_AEQE_HDR_SUBTYPE_MASK GENMASK(7, 0)
+
+#define ERDMA_AE_TYPE_QP_FATAL_EVENT 0
+#define ERDMA_AE_TYPE_QP_ERQ_ERR_EVENT 1
+#define ERDMA_AE_TYPE_ACC_ERR_EVENT 2
+#define ERDMA_AE_TYPE_CQ_ERR 3
+#define ERDMA_AE_TYPE_OTHER_ERROR 4
+
+struct erdma_aeqe {
+ __le32 hdr;
+ __le32 event_data0;
+ __le32 event_data1;
+ __le32 rsvd;
+};
+
+enum erdma_opcode {
+ ERDMA_OP_WRITE = 0,
+ ERDMA_OP_READ = 1,
+ ERDMA_OP_SEND = 2,
+ ERDMA_OP_SEND_WITH_IMM = 3,
+
+ ERDMA_OP_RECEIVE = 4,
+ ERDMA_OP_RECV_IMM = 5,
+ ERDMA_OP_RECV_INV = 6,
+
+ ERDMA_OP_REQ_ERR = 7,
+ ERDMA_OP_READ_RESPONSE = 8,
+ ERDMA_OP_WRITE_WITH_IMM = 9,
+
+ ERDMA_OP_RECV_ERR = 10,
+
+ ERDMA_OP_INVALIDATE = 11,
+ ERDMA_OP_RSP_SEND_IMM = 12,
+ ERDMA_OP_SEND_WITH_INV = 13,
+
+ ERDMA_OP_REG_MR = 14,
+ ERDMA_OP_LOCAL_INV = 15,
+ ERDMA_OP_READ_WITH_INV = 16,
+ ERDMA_NUM_OPCODES = 17,
+ ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
+};
+
+enum erdma_wc_status {
+ ERDMA_WC_SUCCESS = 0,
+ ERDMA_WC_GENERAL_ERR = 1,
+ ERDMA_WC_RECV_WQE_FORMAT_ERR = 2,
+ ERDMA_WC_RECV_STAG_INVALID_ERR = 3,
+ ERDMA_WC_RECV_ADDR_VIOLATION_ERR = 4,
+ ERDMA_WC_RECV_RIGHT_VIOLATION_ERR = 5,
+ ERDMA_WC_RECV_PDID_ERR = 6,
+ ERDMA_WC_RECV_WARRPING_ERR = 7,
+ ERDMA_WC_SEND_WQE_FORMAT_ERR = 8,
+ ERDMA_WC_SEND_WQE_ORD_EXCEED = 9,
+ ERDMA_WC_SEND_STAG_INVALID_ERR = 10,
+ ERDMA_WC_SEND_ADDR_VIOLATION_ERR = 11,
+ ERDMA_WC_SEND_RIGHT_VIOLATION_ERR = 12,
+ ERDMA_WC_SEND_PDID_ERR = 13,
+ ERDMA_WC_SEND_WARRPING_ERR = 14,
+ ERDMA_WC_FLUSH_ERR = 15,
+ ERDMA_WC_RETRY_EXC_ERR = 16,
+ ERDMA_NUM_WC_STATUS
+};
+
+enum erdma_vendor_err {
+ ERDMA_WC_VENDOR_NO_ERR = 0,
+ ERDMA_WC_VENDOR_INVALID_RQE = 1,
+ ERDMA_WC_VENDOR_RQE_INVALID_STAG = 2,
+ ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION = 3,
+ ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR = 4,
+ ERDMA_WC_VENDOR_RQE_INVALID_PD = 5,
+ ERDMA_WC_VENDOR_RQE_WRAP_ERR = 6,
+ ERDMA_WC_VENDOR_INVALID_SQE = 0x20,
+ ERDMA_WC_VENDOR_ZERO_ORD = 0x21,
+ ERDMA_WC_VENDOR_SQE_INVALID_STAG = 0x30,
+ ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION = 0x31,
+ ERDMA_WC_VENDOR_SQE_ACCESS_ERR = 0x32,
+ ERDMA_WC_VENDOR_SQE_INVALID_PD = 0x33,
+ ERDMA_WC_VENDOR_SQE_WARP_ERR = 0x34
+};
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
new file mode 100644
index 000000000000..07e743d24847
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -0,0 +1,608 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <net/addrconf.h>
+#include <rdma/erdma-abi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>");
+MODULE_DESCRIPTION("Alibaba elasticRDMA adapter driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
+ void *arg)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(arg);
+ struct erdma_dev *dev = container_of(nb, struct erdma_dev, netdev_nb);
+
+ if (dev->netdev == NULL || dev->netdev != netdev)
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ dev->state = IB_PORT_ACTIVE;
+ erdma_port_event(dev, IB_EVENT_PORT_ACTIVE);
+ break;
+ case NETDEV_DOWN:
+ dev->state = IB_PORT_DOWN;
+ erdma_port_event(dev, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_REGISTER:
+ case NETDEV_UNREGISTER:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGEMTU:
+ case NETDEV_GOING_DOWN:
+ case NETDEV_CHANGE:
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_OK;
+}
+
+static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
+{
+ struct net_device *netdev;
+ int ret = -ENODEV;
+
+ /* Already binded to a net_device, so we skip. */
+ if (dev->netdev)
+ return 0;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, netdev) {
+ /*
+ * In erdma, the paired netdev and ibdev should have the same
+ * MAC address. erdma can get the value from its PCIe bar
+ * registers. Since erdma can not get the paired netdev
+ * reference directly, we do a traverse here to get the paired
+ * netdev.
+ */
+ if (ether_addr_equal_unaligned(netdev->perm_addr,
+ dev->attrs.peer_addr)) {
+ ret = ib_device_set_netdev(&dev->ibdev, netdev, 1);
+ if (ret) {
+ rtnl_unlock();
+ ibdev_warn(&dev->ibdev,
+ "failed (%d) to link netdev", ret);
+ return ret;
+ }
+
+ dev->netdev = netdev;
+ break;
+ }
+ }
+
+ rtnl_unlock();
+
+ return ret;
+}
+
+static int erdma_device_register(struct erdma_dev *dev)
+{
+ struct ib_device *ibdev = &dev->ibdev;
+ int ret;
+
+ ret = erdma_enum_and_get_netdev(dev);
+ if (ret)
+ return ret;
+
+ addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
+
+ ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "ib_register_device failed: ret = %d\n", ret);
+ return ret;
+ }
+
+ dev->netdev_nb.notifier_call = erdma_netdev_event;
+ ret = register_netdevice_notifier(&dev->netdev_nb);
+ if (ret) {
+ ibdev_err(&dev->ibdev, "failed to register notifier.\n");
+ ib_unregister_device(ibdev);
+ }
+
+ return ret;
+}
+
+static irqreturn_t erdma_comm_irq_handler(int irq, void *data)
+{
+ struct erdma_dev *dev = data;
+
+ erdma_cmdq_completion_handler(&dev->cmdq);
+ erdma_aeq_event_handler(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void erdma_dwqe_resource_init(struct erdma_dev *dev)
+{
+ int total_pages, type0, type1;
+
+ dev->attrs.grp_num = erdma_reg_read32(dev, ERDMA_REGS_GRP_NUM_REG);
+
+ if (dev->attrs.grp_num < 4)
+ dev->attrs.disable_dwqe = true;
+ else
+ dev->attrs.disable_dwqe = false;
+
+ /* One page contains 4 goups. */
+ total_pages = dev->attrs.grp_num * 4;
+
+ if (dev->attrs.grp_num >= ERDMA_DWQE_MAX_GRP_CNT) {
+ dev->attrs.grp_num = ERDMA_DWQE_MAX_GRP_CNT;
+ type0 = ERDMA_DWQE_TYPE0_CNT;
+ type1 = ERDMA_DWQE_TYPE1_CNT / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+ } else {
+ type1 = total_pages / 3;
+ type0 = total_pages - type1 - 1;
+ }
+
+ dev->attrs.dwqe_pages = type0;
+ dev->attrs.dwqe_entries = type1 * ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+}
+
+static int erdma_request_vectors(struct erdma_dev *dev)
+{
+ int expect_irq_num = min(num_possible_cpus() + 1, ERDMA_NUM_MSIX_VEC);
+ int ret;
+
+ ret = pci_alloc_irq_vectors(dev->pdev, 1, expect_irq_num, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "request irq vectors failed(%d)\n",
+ ret);
+ return ret;
+ }
+ dev->attrs.irq_num = ret;
+
+ return 0;
+}
+
+static int erdma_comm_irq_init(struct erdma_dev *dev)
+{
+ snprintf(dev->comm_irq.name, ERDMA_IRQNAME_SIZE, "erdma-common@pci:%s",
+ pci_name(dev->pdev));
+ dev->comm_irq.msix_vector =
+ pci_irq_vector(dev->pdev, ERDMA_MSIX_VECTOR_CMDQ);
+
+ cpumask_set_cpu(cpumask_first(cpumask_of_pcibus(dev->pdev->bus)),
+ &dev->comm_irq.affinity_hint_mask);
+ irq_set_affinity_hint(dev->comm_irq.msix_vector,
+ &dev->comm_irq.affinity_hint_mask);
+
+ return request_irq(dev->comm_irq.msix_vector, erdma_comm_irq_handler, 0,
+ dev->comm_irq.name, dev);
+}
+
+static void erdma_comm_irq_uninit(struct erdma_dev *dev)
+{
+ irq_set_affinity_hint(dev->comm_irq.msix_vector, NULL);
+ free_irq(dev->comm_irq.msix_vector, dev);
+}
+
+static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
+{
+ int ret;
+
+ erdma_dwqe_resource_init(dev);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(ERDMA_PCI_WIDTH));
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
+ return 0;
+}
+
+static void erdma_device_uninit(struct erdma_dev *dev)
+{
+ u32 ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_RESET_MASK, 1);
+
+ erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
+}
+
+static const struct pci_device_id erdma_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ALIBABA, 0x107f) },
+ {}
+};
+
+static int erdma_probe_dev(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev;
+ int bars, err;
+ u32 version;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed(%d)\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ dev = ib_alloc_device(erdma_dev, ibdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "ib_alloc_device failed\n");
+ err = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ dev->pdev = pdev;
+ dev->attrs.numa_node = dev_to_node(&pdev->dev);
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
+ if (bars != ERDMA_BAR_MASK || err) {
+ err = err ? err : -EINVAL;
+ goto err_ib_device_release;
+ }
+
+ dev->func_bar_addr = pci_resource_start(pdev, ERDMA_FUNC_BAR);
+ dev->func_bar_len = pci_resource_len(pdev, ERDMA_FUNC_BAR);
+
+ dev->func_bar =
+ devm_ioremap(&pdev->dev, dev->func_bar_addr, dev->func_bar_len);
+ if (!dev->func_bar) {
+ dev_err(&pdev->dev, "devm_ioremap failed.\n");
+ err = -EFAULT;
+ goto err_release_bars;
+ }
+
+ version = erdma_reg_read32(dev, ERDMA_REGS_VERSION_REG);
+ if (version == 0) {
+ /* we knows that it is a non-functional function. */
+ err = -ENODEV;
+ goto err_iounmap_func_bar;
+ }
+
+ err = erdma_device_init(dev, pdev);
+ if (err)
+ goto err_iounmap_func_bar;
+
+ err = erdma_request_vectors(dev);
+ if (err)
+ goto err_iounmap_func_bar;
+
+ err = erdma_comm_irq_init(dev);
+ if (err)
+ goto err_free_vectors;
+
+ err = erdma_aeq_init(dev);
+ if (err)
+ goto err_uninit_comm_irq;
+
+ err = erdma_cmdq_init(dev);
+ if (err)
+ goto err_uninit_aeq;
+
+ err = erdma_ceqs_init(dev);
+ if (err)
+ goto err_uninit_cmdq;
+
+ erdma_finish_cmdq_init(dev);
+
+ return 0;
+
+err_uninit_cmdq:
+ erdma_device_uninit(dev);
+ erdma_cmdq_destroy(dev);
+
+err_uninit_aeq:
+ erdma_aeq_destroy(dev);
+
+err_uninit_comm_irq:
+ erdma_comm_irq_uninit(dev);
+
+err_free_vectors:
+ pci_free_irq_vectors(dev->pdev);
+
+err_iounmap_func_bar:
+ devm_iounmap(&pdev->dev, dev->func_bar);
+
+err_release_bars:
+ pci_release_selected_regions(pdev, bars);
+
+err_ib_device_release:
+ ib_dealloc_device(&dev->ibdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void erdma_remove_dev(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+
+ erdma_ceqs_uninit(dev);
+
+ erdma_device_uninit(dev);
+
+ erdma_cmdq_destroy(dev);
+ erdma_aeq_destroy(dev);
+ erdma_comm_irq_uninit(dev);
+ pci_free_irq_vectors(dev->pdev);
+
+ devm_iounmap(&pdev->dev, dev->func_bar);
+ pci_release_selected_regions(pdev, ERDMA_BAR_MASK);
+
+ ib_dealloc_device(&dev->ibdev);
+
+ pci_disable_device(pdev);
+}
+
+#define ERDMA_GET_CAP(name, cap) FIELD_GET(ERDMA_CMD_DEV_CAP_##name##_MASK, cap)
+
+static int erdma_dev_attrs_init(struct erdma_dev *dev)
+{
+ int err;
+ u64 req_hdr, cap0, cap1;
+
+ erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_QUERY_DEVICE);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
+ &cap1);
+ if (err)
+ return err;
+
+ dev->attrs.max_cqe = 1 << ERDMA_GET_CAP(MAX_CQE, cap0);
+ dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
+ dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
+ dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
+ dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
+ dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
+ dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
+ dev->attrs.max_mr = dev->attrs.max_qp << 1;
+ dev->attrs.max_cq = dev->attrs.max_qp << 1;
+
+ dev->attrs.max_send_wr = ERDMA_MAX_SEND_WR;
+ dev->attrs.max_ord = ERDMA_MAX_ORD;
+ dev->attrs.max_ird = ERDMA_MAX_IRD;
+ dev->attrs.max_send_sge = ERDMA_MAX_SEND_SGE;
+ dev->attrs.max_recv_sge = ERDMA_MAX_RECV_SGE;
+ dev->attrs.max_sge_rd = ERDMA_MAX_SGE_RD;
+ dev->attrs.max_pd = ERDMA_MAX_PD;
+
+ dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
+ dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
+
+ erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_QUERY_FW_INFO);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
+ &cap1);
+ if (!err)
+ dev->attrs.fw_version =
+ FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
+
+ return err;
+}
+
+static int erdma_res_cb_init(struct erdma_dev *dev)
+{
+ int i, j;
+
+ for (i = 0; i < ERDMA_RES_CNT; i++) {
+ dev->res_cb[i].next_alloc_idx = 1;
+ spin_lock_init(&dev->res_cb[i].lock);
+ dev->res_cb[i].bitmap =
+ bitmap_zalloc(dev->res_cb[i].max_cap, GFP_KERNEL);
+ if (!dev->res_cb[i].bitmap)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ bitmap_free(dev->res_cb[j].bitmap);
+
+ return -ENOMEM;
+}
+
+static void erdma_res_cb_free(struct erdma_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ERDMA_RES_CNT; i++)
+ bitmap_free(dev->res_cb[i].bitmap);
+}
+
+static const struct ib_device_ops erdma_device_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_ERDMA,
+ .uverbs_abi_ver = ERDMA_ABI_VERSION,
+
+ .alloc_mr = erdma_ib_alloc_mr,
+ .alloc_pd = erdma_alloc_pd,
+ .alloc_ucontext = erdma_alloc_ucontext,
+ .create_cq = erdma_create_cq,
+ .create_qp = erdma_create_qp,
+ .dealloc_pd = erdma_dealloc_pd,
+ .dealloc_ucontext = erdma_dealloc_ucontext,
+ .dereg_mr = erdma_dereg_mr,
+ .destroy_cq = erdma_destroy_cq,
+ .destroy_qp = erdma_destroy_qp,
+ .get_dma_mr = erdma_get_dma_mr,
+ .get_port_immutable = erdma_get_port_immutable,
+ .iw_accept = erdma_accept,
+ .iw_add_ref = erdma_qp_get_ref,
+ .iw_connect = erdma_connect,
+ .iw_create_listen = erdma_create_listen,
+ .iw_destroy_listen = erdma_destroy_listen,
+ .iw_get_qp = erdma_get_ibqp,
+ .iw_reject = erdma_reject,
+ .iw_rem_ref = erdma_qp_put_ref,
+ .map_mr_sg = erdma_map_mr_sg,
+ .mmap = erdma_mmap,
+ .mmap_free = erdma_mmap_free,
+ .modify_qp = erdma_modify_qp,
+ .post_recv = erdma_post_recv,
+ .post_send = erdma_post_send,
+ .poll_cq = erdma_poll_cq,
+ .query_device = erdma_query_device,
+ .query_gid = erdma_query_gid,
+ .query_port = erdma_query_port,
+ .query_qp = erdma_query_qp,
+ .req_notify_cq = erdma_req_notify_cq,
+ .reg_user_mr = erdma_reg_user_mr,
+
+ INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, erdma_ucontext, ibucontext),
+ INIT_RDMA_OBJ_SIZE(ib_qp, erdma_qp, ibqp),
+};
+
+static int erdma_ib_device_add(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+ struct ib_device *ibdev = &dev->ibdev;
+ u64 mac;
+ int ret;
+
+ ret = erdma_dev_attrs_init(dev);
+ if (ret)
+ return ret;
+
+ ibdev->node_type = RDMA_NODE_RNIC;
+ memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
+
+ /*
+ * Current model (one-to-one device association):
+ * One ERDMA device per net_device or, equivalently,
+ * per physical port.
+ */
+ ibdev->phys_port_cnt = 1;
+ ibdev->num_comp_vectors = dev->attrs.irq_num - 1;
+
+ ib_set_device_ops(ibdev, &erdma_device_ops);
+
+ INIT_LIST_HEAD(&dev->cep_list);
+
+ spin_lock_init(&dev->lock);
+ xa_init_flags(&dev->qp_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&dev->cq_xa, XA_FLAGS_ALLOC1);
+ dev->next_alloc_cqn = 1;
+ dev->next_alloc_qpn = 1;
+
+ ret = erdma_res_cb_init(dev);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&dev->db_bitmap_lock);
+ bitmap_zero(dev->sdb_page, ERDMA_DWQE_TYPE0_CNT);
+ bitmap_zero(dev->sdb_entry, ERDMA_DWQE_TYPE1_CNT);
+
+ atomic_set(&dev->num_ctx, 0);
+
+ mac = erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_L_REG);
+ mac |= (u64)erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_H_REG) << 32;
+
+ u64_to_ether_addr(mac, dev->attrs.peer_addr);
+
+ ret = erdma_device_register(dev);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ xa_destroy(&dev->qp_xa);
+ xa_destroy(&dev->cq_xa);
+
+ erdma_res_cb_free(dev);
+
+ return ret;
+}
+
+static void erdma_ib_device_remove(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+
+ unregister_netdevice_notifier(&dev->netdev_nb);
+ ib_unregister_device(&dev->ibdev);
+
+ erdma_res_cb_free(dev);
+ xa_destroy(&dev->qp_xa);
+ xa_destroy(&dev->cq_xa);
+}
+
+static int erdma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = erdma_probe_dev(pdev);
+ if (ret)
+ return ret;
+
+ ret = erdma_ib_device_add(pdev);
+ if (ret) {
+ erdma_remove_dev(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void erdma_remove(struct pci_dev *pdev)
+{
+ erdma_ib_device_remove(pdev);
+ erdma_remove_dev(pdev);
+}
+
+static struct pci_driver erdma_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = erdma_pci_tbl,
+ .probe = erdma_probe,
+ .remove = erdma_remove
+};
+
+MODULE_DEVICE_TABLE(pci, erdma_pci_tbl);
+
+static __init int erdma_init_module(void)
+{
+ int ret;
+
+ ret = erdma_cm_init();
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&erdma_pci_driver);
+ if (ret)
+ erdma_cm_exit();
+
+ return ret;
+}
+
+static void __exit erdma_exit_module(void)
+{
+ pci_unregister_driver(&erdma_pci_driver);
+
+ erdma_cm_exit();
+}
+
+module_init(erdma_init_module);
+module_exit(erdma_exit_module);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
new file mode 100644
index 000000000000..bc3ec22a62c5
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2021, Alibaba Group */
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+void erdma_qp_llp_close(struct erdma_qp *qp)
+{
+ struct erdma_qp_attrs qp_attrs;
+
+ down_write(&qp->state_lock);
+
+ switch (qp->attrs.state) {
+ case ERDMA_QP_STATE_RTS:
+ case ERDMA_QP_STATE_RTR:
+ case ERDMA_QP_STATE_IDLE:
+ case ERDMA_QP_STATE_TERMINATE:
+ qp_attrs.state = ERDMA_QP_STATE_CLOSING;
+ erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ break;
+ case ERDMA_QP_STATE_CLOSING:
+ qp->attrs.state = ERDMA_QP_STATE_IDLE;
+ break;
+ default:
+ break;
+ }
+
+ if (qp->cep) {
+ erdma_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+
+ up_write(&qp->state_lock);
+}
+
+struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
+{
+ struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id);
+
+ if (qp)
+ return &qp->ibqp;
+
+ return NULL;
+}
+
+static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
+ struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask)
+{
+ int ret;
+ struct erdma_dev *dev = qp->dev;
+ struct erdma_cmdq_modify_qp_req req;
+ struct tcp_sock *tp;
+ struct erdma_cep *cep = qp->cep;
+ struct sockaddr_storage local_addr, remote_addr;
+
+ if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
+ return -EINVAL;
+
+ if (!(mask & ERDMA_QP_ATTR_MPA))
+ return -EINVAL;
+
+ ret = getname_local(cep->sock, &local_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = getname_peer(cep->sock, &remote_addr);
+ if (ret < 0)
+ return ret;
+
+ qp->attrs.state = ERDMA_QP_STATE_RTS;
+
+ tp = tcp_sk(qp->cep->sock->sk);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
+ req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
+ req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
+ req.dport = to_sockaddr_in(remote_addr).sin_port;
+ req.sport = to_sockaddr_in(local_addr).sin_port;
+
+ req.send_nxt = tp->snd_nxt;
+ /* rsvd tcp seq for mpa-rsp in server. */
+ if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
+ req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
+ req.recv_nxt = tp->rcv_nxt;
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
+ struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask)
+{
+ struct erdma_dev *dev = qp->dev;
+ struct erdma_cmdq_modify_qp_req req;
+
+ qp->attrs.state = attrs->state;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask)
+{
+ int drop_conn, ret = 0;
+
+ if (!mask)
+ return 0;
+
+ if (!(mask & ERDMA_QP_ATTR_STATE))
+ return 0;
+
+ switch (qp->attrs.state) {
+ case ERDMA_QP_STATE_IDLE:
+ case ERDMA_QP_STATE_RTR:
+ if (attrs->state == ERDMA_QP_STATE_RTS) {
+ ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
+ } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ if (qp->cep) {
+ erdma_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ }
+ break;
+ case ERDMA_QP_STATE_RTS:
+ drop_conn = 0;
+
+ if (attrs->state == ERDMA_QP_STATE_CLOSING) {
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ drop_conn = 1;
+ } else if (attrs->state == ERDMA_QP_STATE_TERMINATE) {
+ qp->attrs.state = ERDMA_QP_STATE_TERMINATE;
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ drop_conn = 1;
+ } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ drop_conn = 1;
+ }
+
+ if (drop_conn)
+ erdma_qp_cm_drop(qp);
+
+ break;
+ case ERDMA_QP_STATE_TERMINATE:
+ if (attrs->state == ERDMA_QP_STATE_ERROR)
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ break;
+ case ERDMA_QP_STATE_CLOSING:
+ if (attrs->state == ERDMA_QP_STATE_IDLE) {
+ qp->attrs.state = ERDMA_QP_STATE_IDLE;
+ } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ } else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
+ return -ECONNABORTED;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void erdma_qp_safe_free(struct kref *ref)
+{
+ struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
+
+ complete(&qp->safe_free);
+}
+
+void erdma_qp_put(struct erdma_qp *qp)
+{
+ WARN_ON(kref_read(&qp->ref) < 1);
+ kref_put(&qp->ref, erdma_qp_safe_free);
+}
+
+void erdma_qp_get(struct erdma_qp *qp)
+{
+ kref_get(&qp->ref);
+}
+
+static int fill_inline_data(struct erdma_qp *qp,
+ const struct ib_send_wr *send_wr, u16 wqe_idx,
+ u32 sgl_offset, __le32 *length_field)
+{
+ u32 remain_size, copy_size, data_off, bytes = 0;
+ char *data;
+ int i = 0;
+
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+ data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size,
+ SQEBB_SHIFT);
+
+ while (i < send_wr->num_sge) {
+ bytes += send_wr->sg_list[i].length;
+ if (bytes > (int)ERDMA_MAX_INLINE)
+ return -EINVAL;
+
+ remain_size = send_wr->sg_list[i].length;
+ data_off = 0;
+
+ while (1) {
+ copy_size = min(remain_size, SQEBB_SIZE - sgl_offset);
+
+ memcpy(data + sgl_offset,
+ (void *)(uintptr_t)send_wr->sg_list[i].addr +
+ data_off,
+ copy_size);
+ remain_size -= copy_size;
+ data_off += copy_size;
+ sgl_offset += copy_size;
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+
+ data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ if (!remain_size)
+ break;
+ }
+
+ i++;
+ }
+ *length_field = cpu_to_le32(bytes);
+
+ return bytes;
+}
+
+static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
+ u16 wqe_idx, u32 sgl_offset, __le32 *length_field)
+{
+ int i = 0;
+ u32 bytes = 0;
+ char *sgl;
+
+ if (send_wr->num_sge > qp->dev->attrs.max_send_sge)
+ return -EINVAL;
+
+ if (sgl_offset & 0xF)
+ return -EINVAL;
+
+ while (i < send_wr->num_sge) {
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+ sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+
+ bytes += send_wr->sg_list[i].length;
+ memcpy(sgl + sgl_offset, &send_wr->sg_list[i],
+ sizeof(struct ib_sge));
+
+ sgl_offset += sizeof(struct ib_sge);
+ i++;
+ }
+
+ *length_field = cpu_to_le32(bytes);
+ return 0;
+}
+
+static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
+ const struct ib_send_wr *send_wr)
+{
+ u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
+ u32 idx = *pi & (qp->attrs.sq_size - 1);
+ enum ib_wr_opcode op = send_wr->opcode;
+ struct erdma_readreq_sqe *read_sqe;
+ struct erdma_reg_mr_sqe *regmr_sge;
+ struct erdma_write_sqe *write_sqe;
+ struct erdma_send_sqe *send_sqe;
+ struct ib_rdma_wr *rdma_wr;
+ struct erdma_mr *mr;
+ __le32 *length_field;
+ u64 wqe_hdr, *entry;
+ struct ib_sge *sge;
+ u32 attrs;
+ int ret;
+
+ entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
+ SQEBB_SHIFT);
+
+ /* Clear the SQE header section. */
+ *entry = 0;
+
+ qp->kern_qp.swr_tbl[idx] = send_wr->wr_id;
+ flags = send_wr->send_flags;
+ wqe_hdr = FIELD_PREP(
+ ERDMA_SQE_HDR_CE_MASK,
+ ((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SE_MASK,
+ flags & IB_SEND_SOLICITED ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_FENCE_MASK,
+ flags & IB_SEND_FENCE ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_INLINE_MASK,
+ flags & IB_SEND_INLINE ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp));
+
+ switch (op) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ hw_op = ERDMA_OP_WRITE;
+ if (op == IB_WR_RDMA_WRITE_WITH_IMM)
+ hw_op = ERDMA_OP_WRITE_WITH_IMM;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
+ write_sqe = (struct erdma_write_sqe *)entry;
+
+ write_sqe->imm_data = send_wr->ex.imm_data;
+ write_sqe->sink_stag = cpu_to_le32(rdma_wr->rkey);
+ write_sqe->sink_to_h =
+ cpu_to_le32(upper_32_bits(rdma_wr->remote_addr));
+ write_sqe->sink_to_l =
+ cpu_to_le32(lower_32_bits(rdma_wr->remote_addr));
+
+ length_field = &write_sqe->length;
+ wqe_size = sizeof(struct erdma_write_sqe);
+ sgl_offset = wqe_size;
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ read_sqe = (struct erdma_readreq_sqe *)entry;
+ if (unlikely(send_wr->num_sge != 1))
+ return -EINVAL;
+ hw_op = ERDMA_OP_READ;
+ if (op == IB_WR_RDMA_READ_WITH_INV) {
+ hw_op = ERDMA_OP_READ_WITH_INV;
+ read_sqe->invalid_stag =
+ cpu_to_le32(send_wr->ex.invalidate_rkey);
+ }
+
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
+ read_sqe->length = cpu_to_le32(send_wr->sg_list[0].length);
+ read_sqe->sink_stag = cpu_to_le32(send_wr->sg_list[0].lkey);
+ read_sqe->sink_to_l =
+ cpu_to_le32(lower_32_bits(send_wr->sg_list[0].addr));
+ read_sqe->sink_to_h =
+ cpu_to_le32(upper_32_bits(send_wr->sg_list[0].addr));
+
+ sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ sge->addr = rdma_wr->remote_addr;
+ sge->lkey = rdma_wr->rkey;
+ sge->length = send_wr->sg_list[0].length;
+ wqe_size = sizeof(struct erdma_readreq_sqe) +
+ send_wr->num_sge * sizeof(struct ib_sge);
+
+ goto out;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ send_sqe = (struct erdma_send_sqe *)entry;
+ hw_op = ERDMA_OP_SEND;
+ if (op == IB_WR_SEND_WITH_IMM) {
+ hw_op = ERDMA_OP_SEND_WITH_IMM;
+ send_sqe->imm_data = send_wr->ex.imm_data;
+ } else if (op == IB_WR_SEND_WITH_INV) {
+ hw_op = ERDMA_OP_SEND_WITH_INV;
+ send_sqe->invalid_stag =
+ cpu_to_le32(send_wr->ex.invalidate_rkey);
+ }
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ length_field = &send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe);
+ sgl_offset = wqe_size;
+
+ break;
+ case IB_WR_REG_MR:
+ wqe_hdr |=
+ FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, ERDMA_OP_REG_MR);
+ regmr_sge = (struct erdma_reg_mr_sqe *)entry;
+ mr = to_emr(reg_wr(send_wr)->mr);
+
+ mr->access = ERDMA_MR_ACC_LR |
+ to_erdma_access_flags(reg_wr(send_wr)->access);
+ regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
+ regmr_sge->length = cpu_to_le32(mr->ibmr.length);
+ regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
+ attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
+ FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
+ FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
+ mr->mem.mtt_nents);
+
+ if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
+ /* Copy SGLs to SQE content to accelerate */
+ memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT),
+ mr->mem.mtt_buf, MTT_SIZE(mr->mem.mtt_nents));
+ wqe_size = sizeof(struct erdma_reg_mr_sqe) +
+ MTT_SIZE(mr->mem.mtt_nents);
+ } else {
+ attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 1);
+ wqe_size = sizeof(struct erdma_reg_mr_sqe);
+ }
+
+ regmr_sge->attrs = cpu_to_le32(attrs);
+ goto out;
+ case IB_WR_LOCAL_INV:
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
+ ERDMA_OP_LOCAL_INV);
+ regmr_sge = (struct erdma_reg_mr_sqe *)entry;
+ regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
+ wqe_size = sizeof(struct erdma_reg_mr_sqe);
+ goto out;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (flags & IB_SEND_INLINE) {
+ ret = fill_inline_data(qp, send_wr, idx, sgl_offset,
+ length_field);
+ if (ret < 0)
+ return -EINVAL;
+ wqe_size += ret;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK, ret);
+ } else {
+ ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field);
+ if (ret)
+ return -EINVAL;
+ wqe_size += send_wr->num_sge * sizeof(struct ib_sge);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK,
+ send_wr->num_sge);
+ }
+
+out:
+ wqebb_cnt = SQEBB_COUNT(wqe_size);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_CNT_MASK, wqebb_cnt - 1);
+ *pi += wqebb_cnt;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, *pi);
+
+ *entry = wqe_hdr;
+
+ return 0;
+}
+
+static void kick_sq_db(struct erdma_qp *qp, u16 pi)
+{
+ u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
+ FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
+
+ *(u64 *)qp->kern_qp.sq_db_info = db_data;
+ writeq(db_data, qp->kern_qp.hw_sq_db);
+}
+
+int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ int ret = 0;
+ const struct ib_send_wr *wr = send_wr;
+ unsigned long flags;
+ u16 sq_pi;
+
+ if (!send_wr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&qp->lock, flags);
+ sq_pi = qp->kern_qp.sq_pi;
+
+ while (wr) {
+ if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) {
+ ret = -ENOMEM;
+ *bad_send_wr = send_wr;
+ break;
+ }
+
+ ret = erdma_push_one_sqe(qp, &sq_pi, wr);
+ if (ret) {
+ *bad_send_wr = wr;
+ break;
+ }
+ qp->kern_qp.sq_pi = sq_pi;
+ kick_sq_db(qp, sq_pi);
+
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ return ret;
+}
+
+static int erdma_post_recv_one(struct erdma_qp *qp,
+ const struct ib_recv_wr *recv_wr)
+{
+ struct erdma_rqe *rqe =
+ get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi,
+ qp->attrs.rq_size, RQE_SHIFT);
+
+ rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1);
+ rqe->qpn = cpu_to_le32(QP_ID(qp));
+
+ if (recv_wr->num_sge == 0) {
+ rqe->length = 0;
+ } else if (recv_wr->num_sge == 1) {
+ rqe->stag = cpu_to_le32(recv_wr->sg_list[0].lkey);
+ rqe->to = cpu_to_le64(recv_wr->sg_list[0].addr);
+ rqe->length = cpu_to_le32(recv_wr->sg_list[0].length);
+ } else {
+ return -EINVAL;
+ }
+
+ *(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe;
+ writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
+
+ qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
+ recv_wr->wr_id;
+ qp->kern_qp.rq_pi++;
+
+ return 0;
+}
+
+int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr)
+{
+ const struct ib_recv_wr *wr = recv_wr;
+ struct erdma_qp *qp = to_eqp(ibqp);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ while (wr) {
+ ret = erdma_post_recv_one(qp, wr);
+ if (ret) {
+ *bad_recv_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
new file mode 100644
index 000000000000..699bd3f59cd3
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -0,0 +1,1460 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+/* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <net/addrconf.h>
+#include <rdma/erdma-abi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
+{
+ struct erdma_cmdq_create_qp_req req;
+ struct erdma_pd *pd = to_epd(qp->ibqp.pd);
+ struct erdma_uqp *user_qp;
+ u64 resp0, resp1;
+ int err;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_QP);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK,
+ ilog2(qp->attrs.sq_size)) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_QPN_MASK, QP_ID(qp));
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK,
+ ilog2(qp->attrs.rq_size)) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res)) {
+ u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT;
+
+ req.sq_cqn_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ pgsz_range) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+ req.rq_cqn_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ pgsz_range) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+
+ req.sq_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
+ ERDMA_MR_INLINE_MTT);
+ req.rq_mtt_cfg = req.sq_mtt_cfg;
+
+ req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
+ req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
+ req.sq_db_info_dma_addr = qp->kern_qp.sq_buf_dma_addr +
+ (qp->attrs.sq_size << SQEBB_SHIFT);
+ req.rq_db_info_dma_addr = qp->kern_qp.rq_buf_dma_addr +
+ (qp->attrs.rq_size << RQE_SHIFT);
+ } else {
+ user_qp = &qp->user_qp;
+ req.sq_cqn_mtt_cfg = FIELD_PREP(
+ ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT);
+ req.sq_cqn_mtt_cfg |=
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+
+ req.rq_cqn_mtt_cfg = FIELD_PREP(
+ ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT);
+ req.rq_cqn_mtt_cfg |=
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+
+ req.sq_mtt_cfg = user_qp->sq_mtt.page_offset;
+ req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
+ user_qp->sq_mtt.mtt_nents) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
+ user_qp->sq_mtt.mtt_type);
+
+ req.rq_mtt_cfg = user_qp->rq_mtt.page_offset;
+ req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
+ user_qp->rq_mtt.mtt_nents) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
+ user_qp->rq_mtt.mtt_type);
+
+ req.sq_buf_addr = user_qp->sq_mtt.mtt_entry[0];
+ req.rq_buf_addr = user_qp->rq_mtt.mtt_entry[0];
+
+ req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
+ req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
+ }
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), &resp0,
+ &resp1);
+ if (!err)
+ qp->attrs.cookie =
+ FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
+
+ return err;
+}
+
+static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
+{
+ struct erdma_cmdq_reg_mr_req req;
+ struct erdma_pd *pd = to_epd(mr->ibmr.pd);
+ u64 *phy_addr;
+ int i;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
+ FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) |
+ FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8);
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) |
+ FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) |
+ FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access) |
+ FIELD_PREP(ERDMA_CMD_REGMR_ACC_MODE_MASK, 0);
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK,
+ ilog2(mr->mem.page_size)) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_TYPE_MASK, mr->mem.mtt_type) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt);
+
+ if (mr->type == ERDMA_MR_TYPE_DMA)
+ goto post_cmd;
+
+ if (mr->type == ERDMA_MR_TYPE_NORMAL) {
+ req.start_va = mr->mem.va;
+ req.size = mr->mem.len;
+ }
+
+ if (mr->type == ERDMA_MR_TYPE_FRMR ||
+ mr->mem.mtt_type == ERDMA_MR_INDIRECT_MTT) {
+ phy_addr = req.phy_addr;
+ *phy_addr = mr->mem.mtt_entry[0];
+ } else {
+ phy_addr = req.phy_addr;
+ for (i = 0; i < mr->mem.mtt_nents; i++)
+ *phy_addr++ = mr->mem.mtt_entry[i];
+ }
+
+post_cmd:
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
+{
+ struct erdma_cmdq_create_cq_req req;
+ u32 page_size;
+ struct erdma_mem *mtt;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_CQ);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) |
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth));
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn);
+
+ if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ page_size = SZ_32M;
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+ ilog2(page_size) - PAGE_SHIFT);
+ req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
+ req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
+
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
+ ERDMA_MR_INLINE_MTT);
+
+ req.first_page_offset = 0;
+ req.cq_db_info_addr =
+ cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
+ } else {
+ mtt = &cq->user_cq.qbuf_mtt;
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+ ilog2(mtt->page_size) - PAGE_SHIFT);
+ if (mtt->mtt_nents == 1) {
+ req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
+ req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
+ } else {
+ req.qbuf_addr_l = lower_32_bits(mtt->mtt_entry[0]);
+ req.qbuf_addr_h = upper_32_bits(mtt->mtt_entry[0]);
+ }
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK,
+ mtt->mtt_nents);
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
+ mtt->mtt_type);
+
+ req.first_page_offset = mtt->page_offset;
+ req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
+ }
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
+{
+ int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&res_cb->lock, flags);
+ idx = find_next_zero_bit(res_cb->bitmap, res_cb->max_cap,
+ res_cb->next_alloc_idx);
+ if (idx == res_cb->max_cap) {
+ idx = find_first_zero_bit(res_cb->bitmap, res_cb->max_cap);
+ if (idx == res_cb->max_cap) {
+ res_cb->next_alloc_idx = 1;
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+ return -ENOSPC;
+ }
+ }
+
+ set_bit(idx, res_cb->bitmap);
+ res_cb->next_alloc_idx = idx + 1;
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+
+ return idx;
+}
+
+static inline void erdma_free_idx(struct erdma_resource_cb *res_cb, u32 idx)
+{
+ unsigned long flags;
+ u32 used;
+
+ spin_lock_irqsave(&res_cb->lock, flags);
+ used = __test_and_clear_bit(idx, res_cb->bitmap);
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+ WARN_ON(!used);
+}
+
+static struct rdma_user_mmap_entry *
+erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address,
+ u32 size, u8 mmap_flag, u64 *mmap_offset)
+{
+ struct erdma_user_mmap_entry *entry =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+ int ret;
+
+ if (!entry)
+ return NULL;
+
+ entry->address = (u64)address;
+ entry->mmap_flag = mmap_flag;
+
+ size = PAGE_ALIGN(size);
+
+ ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry,
+ size);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+
+ *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
+ struct ib_udata *unused)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->max_mr_size = dev->attrs.max_mr_size;
+ attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
+ attr->vendor_part_id = dev->pdev->device;
+ attr->hw_ver = dev->pdev->revision;
+ attr->max_qp = dev->attrs.max_qp - 1;
+ attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
+ attr->max_qp_rd_atom = dev->attrs.max_ord;
+ attr->max_qp_init_rd_atom = dev->attrs.max_ird;
+ attr->max_res_rd_atom = dev->attrs.max_qp * dev->attrs.max_ird;
+ attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
+ attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ ibdev->local_dma_lkey = dev->attrs.local_dma_key;
+ attr->max_send_sge = dev->attrs.max_send_sge;
+ attr->max_recv_sge = dev->attrs.max_recv_sge;
+ attr->max_sge_rd = dev->attrs.max_sge_rd;
+ attr->max_cq = dev->attrs.max_cq - 1;
+ attr->max_cqe = dev->attrs.max_cqe;
+ attr->max_mr = dev->attrs.max_mr;
+ attr->max_pd = dev->attrs.max_pd;
+ attr->max_mw = dev->attrs.max_mw;
+ attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
+ attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
+ attr->fw_ver = dev->attrs.fw_version;
+
+ if (dev->netdev)
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ dev->netdev->dev_addr);
+
+ return 0;
+}
+
+int erdma_query_gid(struct ib_device *ibdev, u32 port, int idx,
+ union ib_gid *gid)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ memset(gid, 0, sizeof(*gid));
+ ether_addr_copy(gid->raw, dev->attrs.peer_addr);
+
+ return 0;
+}
+
+int erdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+ struct net_device *ndev = dev->netdev;
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->gid_tbl_len = 1;
+ attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
+ attr->max_msg_sz = -1;
+
+ if (!ndev)
+ goto out;
+
+ ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
+ attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
+ attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
+ if (netif_running(ndev) && netif_carrier_ok(ndev))
+ dev->state = IB_PORT_ACTIVE;
+ else
+ dev->state = IB_PORT_DOWN;
+ attr->state = dev->state;
+
+out:
+ if (dev->state == IB_PORT_ACTIVE)
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ else
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+
+ return 0;
+}
+
+int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
+ struct ib_port_immutable *port_immutable)
+{
+ port_immutable->gid_tbl_len = 1;
+ port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+ return 0;
+}
+
+int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct erdma_pd *pd = to_epd(ibpd);
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ int pdn;
+
+ pdn = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_PD]);
+ if (pdn < 0)
+ return pdn;
+
+ pd->pdn = pdn;
+
+ return 0;
+}
+
+int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct erdma_pd *pd = to_epd(ibpd);
+ struct erdma_dev *dev = to_edev(ibpd->device);
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_PD], pd->pdn);
+
+ return 0;
+}
+
+static int erdma_qp_validate_cap(struct erdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if ((attrs->cap.max_send_wr > dev->attrs.max_send_wr) ||
+ (attrs->cap.max_recv_wr > dev->attrs.max_recv_wr) ||
+ (attrs->cap.max_send_sge > dev->attrs.max_send_sge) ||
+ (attrs->cap.max_recv_sge > dev->attrs.max_recv_sge) ||
+ (attrs->cap.max_inline_data > ERDMA_MAX_INLINE) ||
+ !attrs->cap.max_send_wr || !attrs->cap.max_recv_wr) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int erdma_qp_validate_attr(struct erdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (attrs->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+
+ if (attrs->srq)
+ return -EOPNOTSUPP;
+
+ if (!attrs->send_cq || !attrs->recv_cq)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void free_kernel_qp(struct erdma_qp *qp)
+{
+ struct erdma_dev *dev = qp->dev;
+
+ vfree(qp->kern_qp.swr_tbl);
+ vfree(qp->kern_qp.rwr_tbl);
+
+ if (qp->kern_qp.sq_buf)
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
+ qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+
+ if (qp->kern_qp.rq_buf)
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
+ qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
+}
+
+static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ struct erdma_kqp *kqp = &qp->kern_qp;
+ int size;
+
+ if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
+ kqp->sig_all = 1;
+
+ kqp->sq_pi = 0;
+ kqp->sq_ci = 0;
+ kqp->rq_pi = 0;
+ kqp->rq_ci = 0;
+ kqp->hw_sq_db =
+ dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT);
+ kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET;
+
+ kqp->swr_tbl = vmalloc(qp->attrs.sq_size * sizeof(u64));
+ kqp->rwr_tbl = vmalloc(qp->attrs.rq_size * sizeof(u64));
+ if (!kqp->swr_tbl || !kqp->rwr_tbl)
+ goto err_out;
+
+ size = (qp->attrs.sq_size << SQEBB_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
+ &kqp->sq_buf_dma_addr, GFP_KERNEL);
+ if (!kqp->sq_buf)
+ goto err_out;
+
+ size = (qp->attrs.rq_size << RQE_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
+ &kqp->rq_buf_dma_addr, GFP_KERNEL);
+ if (!kqp->rq_buf)
+ goto err_out;
+
+ kqp->sq_db_info = kqp->sq_buf + (qp->attrs.sq_size << SQEBB_SHIFT);
+ kqp->rq_db_info = kqp->rq_buf + (qp->attrs.rq_size << RQE_SHIFT);
+
+ return 0;
+
+err_out:
+ free_kernel_qp(qp);
+ return -ENOMEM;
+}
+
+static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
+ u64 start, u64 len, int access, u64 virt,
+ unsigned long req_page_size, u8 force_indirect_mtt)
+{
+ struct ib_block_iter biter;
+ uint64_t *phy_addr = NULL;
+ int ret = 0;
+
+ mem->umem = ib_umem_get(&dev->ibdev, start, len, access);
+ if (IS_ERR(mem->umem)) {
+ ret = PTR_ERR(mem->umem);
+ mem->umem = NULL;
+ return ret;
+ }
+
+ mem->va = virt;
+ mem->len = len;
+ mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt);
+ mem->page_offset = start & (mem->page_size - 1);
+ mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size);
+ mem->page_cnt = mem->mtt_nents;
+
+ if (mem->page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES ||
+ force_indirect_mtt) {
+ mem->mtt_type = ERDMA_MR_INDIRECT_MTT;
+ mem->mtt_buf =
+ alloc_pages_exact(MTT_SIZE(mem->page_cnt), GFP_KERNEL);
+ if (!mem->mtt_buf) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ phy_addr = mem->mtt_buf;
+ } else {
+ mem->mtt_type = ERDMA_MR_INLINE_MTT;
+ phy_addr = mem->mtt_entry;
+ }
+
+ rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size) {
+ *phy_addr = rdma_block_iter_dma_address(&biter);
+ phy_addr++;
+ }
+
+ if (mem->mtt_type == ERDMA_MR_INDIRECT_MTT) {
+ mem->mtt_entry[0] =
+ dma_map_single(&dev->pdev->dev, mem->mtt_buf,
+ MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, mem->mtt_entry[0])) {
+ free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
+ mem->mtt_buf = NULL;
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ }
+
+ return 0;
+
+error_ret:
+ if (mem->umem) {
+ ib_umem_release(mem->umem);
+ mem->umem = NULL;
+ }
+
+ return ret;
+}
+
+static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
+{
+ if (mem->mtt_buf) {
+ dma_unmap_single(&dev->pdev->dev, mem->mtt_entry[0],
+ MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
+ free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
+ }
+
+ if (mem->umem) {
+ ib_umem_release(mem->umem);
+ mem->umem = NULL;
+ }
+}
+
+static int erdma_map_user_dbrecords(struct erdma_ucontext *ctx,
+ u64 dbrecords_va,
+ struct erdma_user_dbrecords_page **dbr_page,
+ dma_addr_t *dma_addr)
+{
+ struct erdma_user_dbrecords_page *page = NULL;
+ int rv = 0;
+
+ mutex_lock(&ctx->dbrecords_page_mutex);
+
+ list_for_each_entry(page, &ctx->dbrecords_page_list, list)
+ if (page->va == (dbrecords_va & PAGE_MASK))
+ goto found;
+
+ page = kmalloc(sizeof(*page), GFP_KERNEL);
+ if (!page) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ page->va = (dbrecords_va & PAGE_MASK);
+ page->refcnt = 0;
+
+ page->umem = ib_umem_get(ctx->ibucontext.device,
+ dbrecords_va & PAGE_MASK, PAGE_SIZE, 0);
+ if (IS_ERR(page->umem)) {
+ rv = PTR_ERR(page->umem);
+ kfree(page);
+ goto out;
+ }
+
+ list_add(&page->list, &ctx->dbrecords_page_list);
+
+found:
+ *dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (dbrecords_va & ~PAGE_MASK);
+ *dbr_page = page;
+ page->refcnt++;
+
+out:
+ mutex_unlock(&ctx->dbrecords_page_mutex);
+ return rv;
+}
+
+static void
+erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
+ struct erdma_user_dbrecords_page **dbr_page)
+{
+ if (!ctx || !(*dbr_page))
+ return;
+
+ mutex_lock(&ctx->dbrecords_page_mutex);
+ if (--(*dbr_page)->refcnt == 0) {
+ list_del(&(*dbr_page)->list);
+ ib_umem_release((*dbr_page)->umem);
+ kfree(*dbr_page);
+ }
+
+ *dbr_page = NULL;
+ mutex_unlock(&ctx->dbrecords_page_mutex);
+}
+
+static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
+ u64 va, u32 len, u64 db_info_va)
+{
+ dma_addr_t db_info_dma_addr;
+ u32 rq_offset;
+ int ret;
+
+ if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) +
+ qp->attrs.rq_size * RQE_SIZE))
+ return -EINVAL;
+
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mtt, va,
+ qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
+ (SZ_1M - SZ_4K), 1);
+ if (ret)
+ return ret;
+
+ rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT);
+ qp->user_qp.rq_offset = rq_offset;
+
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
+ qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
+ (SZ_1M - SZ_4K), 1);
+ if (ret)
+ goto put_sq_mtt;
+
+ ret = erdma_map_user_dbrecords(uctx, db_info_va,
+ &qp->user_qp.user_dbr_page,
+ &db_info_dma_addr);
+ if (ret)
+ goto put_rq_mtt;
+
+ qp->user_qp.sq_db_info_dma_addr = db_info_dma_addr;
+ qp->user_qp.rq_db_info_dma_addr = db_info_dma_addr + ERDMA_DB_SIZE;
+
+ return 0;
+
+put_rq_mtt:
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+
+put_sq_mtt:
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
+
+ return ret;
+}
+
+static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
+{
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+ erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
+}
+
+int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ struct erdma_dev *dev = to_edev(ibqp->device);
+ struct erdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ struct erdma_ureq_create_qp ureq;
+ struct erdma_uresp_create_qp uresp;
+ int ret;
+
+ ret = erdma_qp_validate_cap(dev, attrs);
+ if (ret)
+ goto err_out;
+
+ ret = erdma_qp_validate_attr(dev, attrs);
+ if (ret)
+ goto err_out;
+
+ qp->scq = to_ecq(attrs->send_cq);
+ qp->rcq = to_ecq(attrs->recv_cq);
+ qp->dev = dev;
+ qp->attrs.cc = dev->attrs.cc;
+
+ init_rwsem(&qp->state_lock);
+ kref_init(&qp->ref);
+ init_completion(&qp->safe_free);
+
+ ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
+ XA_LIMIT(1, dev->attrs.max_qp - 1),
+ &dev->next_alloc_qpn, GFP_KERNEL);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ qp->attrs.sq_size = roundup_pow_of_two(attrs->cap.max_send_wr *
+ ERDMA_MAX_WQEBB_PER_SQE);
+ qp->attrs.rq_size = roundup_pow_of_two(attrs->cap.max_recv_wr);
+
+ if (uctx) {
+ ret = ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen));
+ if (ret)
+ goto err_out_xa;
+
+ ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len,
+ ureq.db_record_va);
+ if (ret)
+ goto err_out_xa;
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.num_sqe = qp->attrs.sq_size;
+ uresp.num_rqe = qp->attrs.rq_size;
+ uresp.qp_id = QP_ID(qp);
+ uresp.rq_offset = qp->user_qp.rq_offset;
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_out_cmd;
+ } else {
+ init_kernel_qp(dev, qp, attrs);
+ }
+
+ qp->attrs.max_send_sge = attrs->cap.max_send_sge;
+ qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
+ qp->attrs.state = ERDMA_QP_STATE_IDLE;
+
+ ret = create_qp_cmd(dev, qp);
+ if (ret)
+ goto err_out_cmd;
+
+ spin_lock_init(&qp->lock);
+
+ return 0;
+
+err_out_cmd:
+ if (uctx)
+ free_user_qp(qp, uctx);
+ else
+ free_kernel_qp(qp);
+err_out_xa:
+ xa_erase(&dev->qp_xa, QP_ID(qp));
+err_out:
+ return ret;
+}
+
+static int erdma_create_stag(struct erdma_dev *dev, u32 *stag)
+{
+ int stag_idx;
+
+ stag_idx = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX]);
+ if (stag_idx < 0)
+ return stag_idx;
+
+ /* For now, we always let key field be zero. */
+ *stag = (stag_idx << 8);
+
+ return 0;
+}
+
+struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ struct erdma_mr *mr;
+ u32 stag;
+ int ret;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto out_free;
+
+ mr->type = ERDMA_MR_TYPE_DMA;
+
+ mr->ibmr.lkey = stag;
+ mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(acc);
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto out_remove_stag;
+
+ return &mr->ibmr;
+
+out_remove_stag:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct erdma_mr *mr;
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ int ret;
+ u32 stag;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (max_num_sg > ERDMA_MR_MAX_MTT_CNT)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto out_free;
+
+ mr->type = ERDMA_MR_TYPE_FRMR;
+
+ mr->ibmr.lkey = stag;
+ mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ /* update it in FRMR. */
+ mr->access = ERDMA_MR_ACC_LR | ERDMA_MR_ACC_LW | ERDMA_MR_ACC_RR |
+ ERDMA_MR_ACC_RW;
+
+ mr->mem.page_size = PAGE_SIZE; /* update it later. */
+ mr->mem.page_cnt = max_num_sg;
+ mr->mem.mtt_type = ERDMA_MR_INDIRECT_MTT;
+ mr->mem.mtt_buf =
+ alloc_pages_exact(MTT_SIZE(mr->mem.page_cnt), GFP_KERNEL);
+ if (!mr->mem.mtt_buf) {
+ ret = -ENOMEM;
+ goto out_remove_stag;
+ }
+
+ mr->mem.mtt_entry[0] =
+ dma_map_single(&dev->pdev->dev, mr->mem.mtt_buf,
+ MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, mr->mem.mtt_entry[0])) {
+ ret = -ENOMEM;
+ goto out_free_mtt;
+ }
+
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto out_dma_unmap;
+
+ return &mr->ibmr;
+
+out_dma_unmap:
+ dma_unmap_single(&dev->pdev->dev, mr->mem.mtt_entry[0],
+ MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
+out_free_mtt:
+ free_pages_exact(mr->mem.mtt_buf, MTT_SIZE(mr->mem.page_cnt));
+
+out_remove_stag:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+static int erdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct erdma_mr *mr = to_emr(ibmr);
+
+ if (mr->mem.mtt_nents >= mr->mem.page_cnt)
+ return -1;
+
+ *((u64 *)mr->mem.mtt_buf + mr->mem.mtt_nents) = addr;
+ mr->mem.mtt_nents++;
+
+ return 0;
+}
+
+int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct erdma_mr *mr = to_emr(ibmr);
+ int num;
+
+ mr->mem.mtt_nents = 0;
+
+ num = ib_sg_to_pages(&mr->ibmr, sg, sg_nents, sg_offset,
+ erdma_set_page);
+
+ return num;
+}
+
+struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 virt, int access, struct ib_udata *udata)
+{
+ struct erdma_mr *mr = NULL;
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ u32 stag;
+ int ret;
+
+ if (!len || len > dev->attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
+ SZ_2G - SZ_4K, 0);
+ if (ret)
+ goto err_out_free;
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto err_out_put_mtt;
+
+ mr->ibmr.lkey = mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ mr->mem.va = virt;
+ mr->mem.len = len;
+ mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(access);
+ mr->valid = 1;
+ mr->type = ERDMA_MR_TYPE_NORMAL;
+
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto err_out_mr;
+
+ return &mr->ibmr;
+
+err_out_mr:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+err_out_put_mtt:
+ put_mtt_entries(dev, &mr->mem);
+
+err_out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct erdma_mr *mr;
+ struct erdma_dev *dev = to_edev(ibmr->device);
+ struct erdma_cmdq_dereg_mr_req req;
+ int ret;
+
+ mr = to_emr(ibmr);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DEREG_MR);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
+ FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (ret)
+ return ret;
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8);
+
+ put_mtt_entries(dev, &mr->mem);
+
+ kfree(mr);
+ return 0;
+}
+
+int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_dev *dev = to_edev(ibcq->device);
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ int err;
+ struct erdma_cmdq_destroy_cq_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_CQ);
+ req.cqn = cq->cqn;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (err)
+ return err;
+
+ if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ dma_free_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ } else {
+ erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ }
+
+ xa_erase(&dev->cq_xa, cq->cqn);
+
+ return 0;
+}
+
+int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ struct erdma_dev *dev = to_edev(ibqp->device);
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ struct erdma_qp_attrs qp_attrs;
+ int err;
+ struct erdma_cmdq_destroy_qp_req req;
+
+ down_write(&qp->state_lock);
+ qp_attrs.state = ERDMA_QP_STATE_ERROR;
+ erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ up_write(&qp->state_lock);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_QP);
+ req.qpn = QP_ID(qp);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (err)
+ return err;
+
+ erdma_qp_put(qp);
+ wait_for_completion(&qp->safe_free);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res)) {
+ vfree(qp->kern_qp.swr_tbl);
+ vfree(qp->kern_qp.rwr_tbl);
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
+ qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
+ qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+ } else {
+ put_mtt_entries(dev, &qp->user_qp.sq_mtt);
+ put_mtt_entries(dev, &qp->user_qp.rq_mtt);
+ erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
+ }
+
+ if (qp->cep)
+ erdma_cep_put(qp->cep);
+ xa_erase(&dev->qp_xa, QP_ID(qp));
+
+ return 0;
+}
+
+void erdma_qp_get_ref(struct ib_qp *ibqp)
+{
+ erdma_qp_get(to_eqp(ibqp));
+}
+
+void erdma_qp_put_ref(struct ib_qp *ibqp)
+{
+ erdma_qp_put(to_eqp(ibqp));
+}
+
+int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct erdma_user_mmap_entry *entry;
+ pgprot_t prot;
+ int err;
+
+ rdma_entry = rdma_user_mmap_entry_get(ctx, vma);
+ if (!rdma_entry)
+ return -EINVAL;
+
+ entry = to_emmap(rdma_entry);
+
+ switch (entry->mmap_flag) {
+ case ERDMA_MMAP_IO_NC:
+ /* map doorbell. */
+ prot = pgprot_device(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
+ prot, rdma_entry);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return err;
+}
+
+void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct erdma_user_mmap_entry *entry = to_emmap(rdma_entry);
+
+ kfree(entry);
+}
+
+#define ERDMA_SDB_PAGE 0
+#define ERDMA_SDB_ENTRY 1
+#define ERDMA_SDB_SHARED 2
+
+static void alloc_db_resources(struct erdma_dev *dev,
+ struct erdma_ucontext *ctx)
+{
+ u32 bitmap_idx;
+ struct erdma_devattr *attrs = &dev->attrs;
+
+ if (attrs->disable_dwqe)
+ goto alloc_normal_db;
+
+ /* Try to alloc independent SDB page. */
+ spin_lock(&dev->db_bitmap_lock);
+ bitmap_idx = find_first_zero_bit(dev->sdb_page, attrs->dwqe_pages);
+ if (bitmap_idx != attrs->dwqe_pages) {
+ set_bit(bitmap_idx, dev->sdb_page);
+ spin_unlock(&dev->db_bitmap_lock);
+
+ ctx->sdb_type = ERDMA_SDB_PAGE;
+ ctx->sdb_idx = bitmap_idx;
+ ctx->sdb_page_idx = bitmap_idx;
+ ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
+ (bitmap_idx << PAGE_SHIFT);
+ ctx->sdb_page_off = 0;
+
+ return;
+ }
+
+ bitmap_idx = find_first_zero_bit(dev->sdb_entry, attrs->dwqe_entries);
+ if (bitmap_idx != attrs->dwqe_entries) {
+ set_bit(bitmap_idx, dev->sdb_entry);
+ spin_unlock(&dev->db_bitmap_lock);
+
+ ctx->sdb_type = ERDMA_SDB_ENTRY;
+ ctx->sdb_idx = bitmap_idx;
+ ctx->sdb_page_idx = attrs->dwqe_pages +
+ bitmap_idx / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+ ctx->sdb_page_off = bitmap_idx % ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+
+ ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
+ (ctx->sdb_page_idx << PAGE_SHIFT);
+
+ return;
+ }
+
+ spin_unlock(&dev->db_bitmap_lock);
+
+alloc_normal_db:
+ ctx->sdb_type = ERDMA_SDB_SHARED;
+ ctx->sdb_idx = 0;
+ ctx->sdb_page_idx = ERDMA_SDB_SHARED_PAGE_INDEX;
+ ctx->sdb_page_off = 0;
+
+ ctx->sdb = dev->func_bar_addr + (ctx->sdb_page_idx << PAGE_SHIFT);
+}
+
+static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx)
+{
+ rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(uctx->rq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(uctx->cq_db_mmap_entry);
+}
+
+int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
+{
+ struct erdma_ucontext *ctx = to_ectx(ibctx);
+ struct erdma_dev *dev = to_edev(ibctx->device);
+ int ret;
+ struct erdma_uresp_alloc_ctx uresp = {};
+
+ if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ INIT_LIST_HEAD(&ctx->dbrecords_page_list);
+ mutex_init(&ctx->dbrecords_page_mutex);
+
+ alloc_db_resources(dev, ctx);
+
+ ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
+ ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
+
+ if (udata->outlen < sizeof(uresp)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
+ if (!ctx->sq_db_mmap_entry) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
+ if (!ctx->rq_db_mmap_entry) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
+ if (!ctx->cq_db_mmap_entry) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ uresp.dev_id = dev->pdev->device;
+ uresp.sdb_type = ctx->sdb_type;
+ uresp.sdb_offset = ctx->sdb_page_off;
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ erdma_uctx_user_mmap_entries_remove(ctx);
+ atomic_dec(&dev->num_ctx);
+ return ret;
+}
+
+void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct erdma_ucontext *ctx = to_ectx(ibctx);
+ struct erdma_dev *dev = to_edev(ibctx->device);
+
+ spin_lock(&dev->db_bitmap_lock);
+ if (ctx->sdb_type == ERDMA_SDB_PAGE)
+ clear_bit(ctx->sdb_idx, dev->sdb_page);
+ else if (ctx->sdb_type == ERDMA_SDB_ENTRY)
+ clear_bit(ctx->sdb_idx, dev->sdb_entry);
+
+ erdma_uctx_user_mmap_entries_remove(ctx);
+
+ spin_unlock(&dev->db_bitmap_lock);
+
+ atomic_dec(&dev->num_ctx);
+}
+
+static int ib_qp_state_to_erdma_qp_state[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = ERDMA_QP_STATE_IDLE,
+ [IB_QPS_INIT] = ERDMA_QP_STATE_IDLE,
+ [IB_QPS_RTR] = ERDMA_QP_STATE_RTR,
+ [IB_QPS_RTS] = ERDMA_QP_STATE_RTS,
+ [IB_QPS_SQD] = ERDMA_QP_STATE_CLOSING,
+ [IB_QPS_SQE] = ERDMA_QP_STATE_TERMINATE,
+ [IB_QPS_ERR] = ERDMA_QP_STATE_ERROR
+};
+
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+ struct erdma_qp_attrs new_attrs;
+ enum erdma_qp_attr_mask erdma_attr_mask = 0;
+ struct erdma_qp *qp = to_eqp(ibqp);
+ int ret = 0;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ memset(&new_attrs, 0, sizeof(new_attrs));
+
+ if (attr_mask & IB_QP_STATE) {
+ new_attrs.state = ib_qp_state_to_erdma_qp_state[attr->qp_state];
+
+ erdma_attr_mask |= ERDMA_QP_ATTR_STATE;
+ }
+
+ down_write(&qp->state_lock);
+
+ ret = erdma_modify_qp_internal(qp, &new_attrs, erdma_attr_mask);
+
+ up_write(&qp->state_lock);
+
+ return ret;
+}
+
+int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct erdma_qp *qp;
+ struct erdma_dev *dev;
+
+ if (ibqp && qp_attr && qp_init_attr) {
+ qp = to_eqp(ibqp);
+ dev = to_edev(ibqp->device);
+ } else {
+ return -EINVAL;
+ }
+
+ qp_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
+ qp_init_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
+
+ qp_attr->cap.max_send_wr = qp->attrs.sq_size;
+ qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
+ qp_attr->cap.max_send_sge = qp->attrs.max_send_sge;
+ qp_attr->cap.max_recv_sge = qp->attrs.max_recv_sge;
+
+ qp_attr->path_mtu = ib_mtu_int_to_enum(dev->netdev->mtu);
+ qp_attr->max_rd_atomic = qp->attrs.irq_size;
+ qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
+
+ qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+ qp_init_attr->cap = qp_attr->cap;
+
+ return 0;
+}
+
+static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
+ struct erdma_ureq_create_cq *ureq)
+{
+ int ret;
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+
+ ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mtt, ureq->qbuf_va,
+ ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
+ 1);
+ if (ret)
+ return ret;
+
+ ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
+ &cq->user_cq.user_dbr_page,
+ &cq->user_cq.db_info_dma_addr);
+ if (ret)
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+
+ return ret;
+}
+
+static int erdma_init_kernel_cq(struct erdma_cq *cq)
+{
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+
+ cq->kern_cq.qbuf =
+ dma_alloc_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
+ if (!cq->kern_cq.qbuf)
+ return -ENOMEM;
+
+ cq->kern_cq.db_record =
+ (u64 *)(cq->kern_cq.qbuf + (cq->depth << CQE_SHIFT));
+ spin_lock_init(&cq->kern_cq.lock);
+ /* use default cqdb addr */
+ cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
+
+ return 0;
+}
+
+int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_dev *dev = to_edev(ibcq->device);
+ unsigned int depth = attr->cqe;
+ int ret;
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+
+ if (depth > dev->attrs.max_cqe)
+ return -EINVAL;
+
+ depth = roundup_pow_of_two(depth);
+ cq->ibcq.cqe = depth;
+ cq->depth = depth;
+ cq->assoc_eqn = attr->comp_vector + 1;
+
+ ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq,
+ XA_LIMIT(1, dev->attrs.max_cq - 1),
+ &dev->next_alloc_cqn, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ if (!rdma_is_kernel_res(&ibcq->res)) {
+ struct erdma_ureq_create_cq ureq;
+ struct erdma_uresp_create_cq uresp;
+
+ ret = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (ret)
+ goto err_out_xa;
+
+ ret = erdma_init_user_cq(ctx, cq, &ureq);
+ if (ret)
+ goto err_out_xa;
+
+ uresp.cq_id = cq->cqn;
+ uresp.num_cqe = depth;
+
+ ret = ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen));
+ if (ret)
+ goto err_free_res;
+ } else {
+ ret = erdma_init_kernel_cq(cq);
+ if (ret)
+ goto err_out_xa;
+ }
+
+ ret = create_cq_cmd(dev, cq);
+ if (ret)
+ goto err_free_res;
+
+ return 0;
+
+err_free_res:
+ if (!rdma_is_kernel_res(&ibcq->res)) {
+ erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ } else {
+ dma_free_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(depth << CQE_SHIFT),
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ }
+
+err_out_xa:
+ xa_erase(&dev->cq_xa, cq->cqn);
+
+ return ret;
+}
+
+void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
+{
+ struct ib_event event;
+
+ event.device = &dev->ibdev;
+ event.element.port_num = 1;
+ event.event = reason;
+
+ ib_dispatch_event(&event);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
new file mode 100644
index 000000000000..c7baddb1f292
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_VERBS_H__
+#define __ERDMA_VERBS_H__
+
+#include <linux/errno.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+
+/* RDMA Capability. */
+#define ERDMA_MAX_PD (128 * 1024)
+#define ERDMA_MAX_SEND_WR 4096
+#define ERDMA_MAX_ORD 128
+#define ERDMA_MAX_IRD 128
+#define ERDMA_MAX_SGE_RD 1
+#define ERDMA_MAX_CONTEXT (128 * 1024)
+#define ERDMA_MAX_SEND_SGE 6
+#define ERDMA_MAX_RECV_SGE 1
+#define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE))
+#define ERDMA_MAX_FRMR_PA 512
+
+enum {
+ ERDMA_MMAP_IO_NC = 0, /* no cache */
+};
+
+struct erdma_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u64 address;
+ u8 mmap_flag;
+};
+
+struct erdma_ucontext {
+ struct ib_ucontext ibucontext;
+
+ u32 sdb_type;
+ u32 sdb_idx;
+ u32 sdb_page_idx;
+ u32 sdb_page_off;
+ u64 sdb;
+ u64 rdb;
+ u64 cdb;
+
+ struct rdma_user_mmap_entry *sq_db_mmap_entry;
+ struct rdma_user_mmap_entry *rq_db_mmap_entry;
+ struct rdma_user_mmap_entry *cq_db_mmap_entry;
+
+ /* doorbell records */
+ struct list_head dbrecords_page_list;
+ struct mutex dbrecords_page_mutex;
+};
+
+struct erdma_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+};
+
+/*
+ * MemoryRegion definition.
+ */
+#define ERDMA_MAX_INLINE_MTT_ENTRIES 4
+#define MTT_SIZE(mtt_cnt) (mtt_cnt << 3) /* per mtt takes 8 Bytes. */
+#define ERDMA_MR_MAX_MTT_CNT 524288
+#define ERDMA_MTT_ENTRY_SIZE 8
+
+#define ERDMA_MR_TYPE_NORMAL 0
+#define ERDMA_MR_TYPE_FRMR 1
+#define ERDMA_MR_TYPE_DMA 2
+
+#define ERDMA_MR_INLINE_MTT 0
+#define ERDMA_MR_INDIRECT_MTT 1
+
+#define ERDMA_MR_ACC_LR BIT(0)
+#define ERDMA_MR_ACC_LW BIT(1)
+#define ERDMA_MR_ACC_RR BIT(2)
+#define ERDMA_MR_ACC_RW BIT(3)
+
+static inline u8 to_erdma_access_flags(int access)
+{
+ return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) |
+ (access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) |
+ (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0);
+}
+
+struct erdma_mem {
+ struct ib_umem *umem;
+ void *mtt_buf;
+ u32 mtt_type;
+ u32 page_size;
+ u32 page_offset;
+ u32 page_cnt;
+ u32 mtt_nents;
+
+ u64 va;
+ u64 len;
+
+ u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
+};
+
+struct erdma_mr {
+ struct ib_mr ibmr;
+ struct erdma_mem mem;
+ u8 type;
+ u8 access;
+ u8 valid;
+};
+
+struct erdma_user_dbrecords_page {
+ struct list_head list;
+ struct ib_umem *umem;
+ u64 va;
+ int refcnt;
+};
+
+struct erdma_uqp {
+ struct erdma_mem sq_mtt;
+ struct erdma_mem rq_mtt;
+
+ dma_addr_t sq_db_info_dma_addr;
+ dma_addr_t rq_db_info_dma_addr;
+
+ struct erdma_user_dbrecords_page *user_dbr_page;
+
+ u32 rq_offset;
+};
+
+struct erdma_kqp {
+ u16 sq_pi;
+ u16 sq_ci;
+
+ u16 rq_pi;
+ u16 rq_ci;
+
+ u64 *swr_tbl;
+ u64 *rwr_tbl;
+
+ void __iomem *hw_sq_db;
+ void __iomem *hw_rq_db;
+
+ void *sq_buf;
+ dma_addr_t sq_buf_dma_addr;
+
+ void *rq_buf;
+ dma_addr_t rq_buf_dma_addr;
+
+ void *sq_db_info;
+ void *rq_db_info;
+
+ u8 sig_all;
+};
+
+enum erdma_qp_state {
+ ERDMA_QP_STATE_IDLE = 0,
+ ERDMA_QP_STATE_RTR = 1,
+ ERDMA_QP_STATE_RTS = 2,
+ ERDMA_QP_STATE_CLOSING = 3,
+ ERDMA_QP_STATE_TERMINATE = 4,
+ ERDMA_QP_STATE_ERROR = 5,
+ ERDMA_QP_STATE_UNDEF = 7,
+ ERDMA_QP_STATE_COUNT = 8
+};
+
+enum erdma_qp_attr_mask {
+ ERDMA_QP_ATTR_STATE = (1 << 0),
+ ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
+ ERDMA_QP_ATTR_ORD = (1 << 3),
+ ERDMA_QP_ATTR_IRD = (1 << 4),
+ ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
+ ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
+ ERDMA_QP_ATTR_MPA = (1 << 7)
+};
+
+struct erdma_qp_attrs {
+ enum erdma_qp_state state;
+ enum erdma_cc_alg cc; /* Congestion control algorithm */
+ u32 sq_size;
+ u32 rq_size;
+ u32 orq_size;
+ u32 irq_size;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 cookie;
+#define ERDMA_QP_ACTIVE 0
+#define ERDMA_QP_PASSIVE 1
+ u8 qp_type;
+ u8 pd_len;
+};
+
+struct erdma_qp {
+ struct ib_qp ibqp;
+ struct kref ref;
+ struct completion safe_free;
+ struct erdma_dev *dev;
+ struct erdma_cep *cep;
+ struct rw_semaphore state_lock;
+
+ union {
+ struct erdma_kqp kern_qp;
+ struct erdma_uqp user_qp;
+ };
+
+ struct erdma_cq *scq;
+ struct erdma_cq *rcq;
+
+ struct erdma_qp_attrs attrs;
+ spinlock_t lock;
+};
+
+struct erdma_kcq_info {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+ u32 ci;
+ u32 cmdsn;
+ u32 notify_cnt;
+
+ spinlock_t lock;
+ u8 __iomem *db;
+ u64 *db_record;
+};
+
+struct erdma_ucq_info {
+ struct erdma_mem qbuf_mtt;
+ struct erdma_user_dbrecords_page *user_dbr_page;
+ dma_addr_t db_info_dma_addr;
+};
+
+struct erdma_cq {
+ struct ib_cq ibcq;
+ u32 cqn;
+
+ u32 depth;
+ u32 assoc_eqn;
+
+ union {
+ struct erdma_kcq_info kern_cq;
+ struct erdma_ucq_info user_cq;
+ };
+};
+
+#define QP_ID(qp) ((qp)->ibqp.qp_num)
+
+static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
+{
+ return (struct erdma_qp *)xa_load(&dev->qp_xa, id);
+}
+
+static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
+{
+ return (struct erdma_cq *)xa_load(&dev->cq_xa, id);
+}
+
+void erdma_qp_get(struct erdma_qp *qp);
+void erdma_qp_put(struct erdma_qp *qp);
+int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask);
+void erdma_qp_llp_close(struct erdma_qp *qp);
+void erdma_qp_cm_drop(struct erdma_qp *qp);
+
+static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
+{
+ return container_of(ibctx, struct erdma_ucontext, ibucontext);
+}
+
+static inline struct erdma_pd *to_epd(struct ib_pd *pd)
+{
+ return container_of(pd, struct erdma_pd, ibpd);
+}
+
+static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct erdma_mr, ibmr);
+}
+
+static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
+{
+ return container_of(qp, struct erdma_qp, ibqp);
+}
+
+static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct erdma_cq, ibcq);
+}
+
+static inline struct erdma_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *ibmmap)
+{
+ return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry);
+}
+
+int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data);
+void erdma_dealloc_ucontext(struct ib_ucontext *ibctx);
+int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
+ struct ib_udata *data);
+int erdma_get_port_immutable(struct ib_device *dev, u32 port,
+ struct ib_port_immutable *ib_port_immutable);
+int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *data);
+int erdma_query_port(struct ib_device *dev, u32 port,
+ struct ib_port_attr *attr);
+int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
+ union ib_gid *gid);
+int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data);
+int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *data);
+int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_qp_init_attr *init_attr);
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *data);
+int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 virt, int access, struct ib_udata *udata);
+struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
+int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
+int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+void erdma_qp_get_ref(struct ib_qp *ibqp);
+void erdma_qp_put_ref(struct ib_qp *ibqp);
+struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
+int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
+int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
+int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
+
+#endif
diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index 6eb739052121..14b92e12bf29 100644
--- a/drivers/infiniband/hw/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HFI1
tristate "Cornelis OPX Gen1 support"
- depends on X86_64 && INFINIBAND_RDMAVT && I2C
+ depends on X86_64 && INFINIBAND_RDMAVT && I2C && !UML
select MMU_NOTIFIER
select CRC32
select I2C_ALGOBIT
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 2e4cf2b11653..629beff053ad 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1179,8 +1179,10 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
goto done;
ret = init_user_ctxt(fd, uctxt);
- if (ret)
+ if (ret) {
+ hfi1_free_ctxt_rcv_groups(uctxt);
goto done;
+ }
user_init(uctxt);
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index d6bbdb8fcb50..5d9a7b09ca37 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -742,9 +742,7 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
kzalloc_node(sizeof(*tx->sdma_hdr),
GFP_KERNEL, priv->dd->node);
- netif_tx_napi_add(dev, &txq->napi,
- hfi1_ipoib_poll_tx_ring,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring);
}
return 0;
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 03b098a494b5..3dfa5aff2512 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -216,7 +216,7 @@ static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
* right now.
*/
set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
- netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
+ netif_napi_add_weight(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
rc = msix_netdev_request_rcd_irq(rxq->rcd);
if (rc)
goto bail_context_irq_failure;
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 136f9a99e1e0..7690f996d5e3 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -172,7 +172,7 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n)
}
/*
- * Read nbytes from "from" and and place them in the low bytes
+ * Read nbytes from "from" and place them in the low bytes
* of pbuf->carry. Other bytes are left as-is. Any previous
* value in pbuf->carry is lost.
*
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
index 707f1053f0b7..582b6f68df3d 100644
--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -26,14 +26,10 @@ DECLARE_EVENT_CLASS(hfi1_trace_template,
TP_PROTO(const char *function, struct va_format *vaf),
TP_ARGS(function, vaf),
TP_STRUCT__entry(__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf
- (__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >=
- MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("(%s) %s",
__get_str(function),
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 2855e9ad4b32..f848eedc6a23 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -959,6 +959,7 @@ struct hns_roce_dev {
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
+ struct work_struct ecc_work;
const struct hns_roce_dfx_hw *dfx;
u32 func_num;
u32 is_vf;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index ba3c742258ef..cbdafaac678a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -55,6 +55,42 @@ enum {
CMD_RST_PRC_EBUSY,
};
+enum ecc_resource_type {
+ ECC_RESOURCE_QPC,
+ ECC_RESOURCE_CQC,
+ ECC_RESOURCE_MPT,
+ ECC_RESOURCE_SRQC,
+ ECC_RESOURCE_GMV,
+ ECC_RESOURCE_QPC_TIMER,
+ ECC_RESOURCE_CQC_TIMER,
+ ECC_RESOURCE_SCCC,
+ ECC_RESOURCE_COUNT,
+};
+
+static const struct {
+ const char *name;
+ u8 read_bt0_op;
+ u8 write_bt0_op;
+} fmea_ram_res[] = {
+ { "ECC_RESOURCE_QPC",
+ HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 },
+ { "ECC_RESOURCE_CQC",
+ HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 },
+ { "ECC_RESOURCE_MPT",
+ HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 },
+ { "ECC_RESOURCE_SRQC",
+ HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 },
+ /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */
+ { "ECC_RESOURCE_GMV",
+ 0, 0 },
+ { "ECC_RESOURCE_QPC_TIMER",
+ HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 },
+ { "ECC_RESOURCE_CQC_TIMER",
+ HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 },
+ { "ECC_RESOURCE_SCCC",
+ HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 },
+};
+
static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
struct ib_sge *sg)
{
@@ -5855,12 +5891,12 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
}
-static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
{
struct device *dev = hr_dev->dev;
struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
- int aeqe_found = 0;
+ irqreturn_t aeqe_found = IRQ_NONE;
int event_type;
u32 queue_num;
int sub_type;
@@ -5914,7 +5950,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
eq->event_type = event_type;
eq->sub_type = sub_type;
++eq->cons_index;
- aeqe_found = 1;
+ aeqe_found = IRQ_HANDLED;
hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
@@ -5922,7 +5958,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
}
update_eq_db(eq);
- return aeqe_found;
+
+ return IRQ_RETVAL(aeqe_found);
}
static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
@@ -5937,11 +5974,11 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}
-static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
{
struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
- int ceqe_found = 0;
+ irqreturn_t ceqe_found = IRQ_NONE;
u32 cqn;
while (ceqe) {
@@ -5955,21 +5992,21 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
hns_roce_cq_completion(hr_dev, cqn);
++eq->cons_index;
- ceqe_found = 1;
+ ceqe_found = IRQ_HANDLED;
ceqe = next_ceqe_sw_v2(eq);
}
update_eq_db(eq);
- return ceqe_found;
+ return IRQ_RETVAL(ceqe_found);
}
static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
{
struct hns_roce_eq *eq = eq_ptr;
struct hns_roce_dev *hr_dev = eq->hr_dev;
- int int_work;
+ irqreturn_t int_work;
if (eq->type_flag == HNS_ROCE_CEQ)
/* Completion event interrupt */
@@ -5981,27 +6018,22 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
return IRQ_RETVAL(int_work);
}
-static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
+ u32 int_st)
{
- struct hns_roce_dev *hr_dev = dev_id;
- struct device *dev = hr_dev->dev;
- int int_work = 0;
- u32 int_st;
+ struct pci_dev *pdev = hr_dev->pci_dev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops = ae_dev->ops;
+ irqreturn_t int_work = IRQ_NONE;
u32 int_en;
- /* Abnormal interrupt */
- int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
- struct pci_dev *pdev = hr_dev->pci_dev;
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- const struct hnae3_ae_ops *ops = ae_dev->ops;
+ dev_err(hr_dev->dev, "AEQ overflow!\n");
- dev_err(dev, "AEQ overflow!\n");
-
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
+ 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
/* Set reset level for reset_event() */
if (ops->set_default_reset_request)
@@ -6013,19 +6045,165 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
- int_work = 1;
- } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
- dev_err(dev, "RAS interrupt!\n");
+ int_work = IRQ_HANDLED;
+ } else {
+ dev_err(hr_dev->dev, "there is no basic abn irq found.\n");
+ }
+
+ return IRQ_RETVAL(int_work);
+}
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev,
+ struct fmea_ram_ecc *ecc_info)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ int ret;
- int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR);
+ ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE);
+ ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG);
+
+ return 0;
+}
+
+static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 addr_upper;
+ u32 addr_low;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true);
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
+
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to read gmv, ret = %d.\n", ret);
+ return ret;
+ }
+
+ addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L);
+ addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H);
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
+ hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low);
+ hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper);
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
+{
+ if (res_type == ECC_RESOURCE_QPC_TIMER ||
+ res_type == ECC_RESOURCE_CQC_TIMER ||
+ res_type == ECC_RESOURCE_SCCC)
+ return le64_to_cpu(*data);
+
+ return le64_to_cpu(*data) << PAGE_SHIFT;
+}
- int_work = 1;
+static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
+ u32 index)
+{
+ u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op;
+ u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op;
+ struct hns_roce_cmd_mailbox *mailbox;
+ u64 addr;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to read fmea ram, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ addr = fmea_get_ram_res_addr(res_type, mailbox->buf);
+
+ ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index);
+ if (ret)
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to write fmea ram, ret = %d.\n",
+ ret);
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev,
+ struct fmea_ram_ecc *ecc_info)
+{
+ u32 res_type = ecc_info->res_type;
+ u32 index = ecc_info->index;
+ int ret;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT);
+
+ if (res_type >= ECC_RESOURCE_COUNT) {
+ dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n",
+ res_type);
+ return;
+ }
+
+ if (res_type == ECC_RESOURCE_GMV)
+ ret = fmea_recover_gmv(hr_dev, index);
+ else
+ ret = fmea_recover_others(hr_dev, res_type, index);
+ if (ret)
+ dev_err(hr_dev->dev,
+ "failed to recover %s, index = %u, ret = %d.\n",
+ fmea_ram_res[res_type].name, index, ret);
+}
+
+static void fmea_ram_ecc_work(struct work_struct *ecc_work)
+{
+ struct hns_roce_dev *hr_dev =
+ container_of(ecc_work, struct hns_roce_dev, ecc_work);
+ struct fmea_ram_ecc ecc_info = {};
+
+ if (fmea_ram_ecc_query(hr_dev, &ecc_info)) {
+ dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n");
+ return;
+ }
+
+ if (!ecc_info.is_ecc_err) {
+ dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n");
+ return;
+ }
+
+ fmea_ram_ecc_recover(hr_dev, &ecc_info);
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ irqreturn_t int_work = IRQ_NONE;
+ u32 int_st;
+
+ int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
+
+ if (int_st) {
+ int_work = abnormal_interrupt_basic(hr_dev, int_st);
+ } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ queue_work(hr_dev->irq_workq, &hr_dev->ecc_work);
+ int_work = IRQ_HANDLED;
} else {
- dev_err(dev, "There is no abnormal irq found!\n");
+ dev_err(hr_dev->dev, "there is no abnormal irq found.\n");
}
return IRQ_RETVAL(int_work);
@@ -6342,6 +6520,8 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
}
}
+ INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work);
+
hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
if (!hr_dev->irq_workq) {
dev_err(dev, "failed to create irq workqueue.\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 7ffb7824d268..f96debac30fe 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -250,6 +250,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
HNS_ROCE_OPC_EXT_CFG = 0x8512,
+ HNS_ROCE_QUERY_RAM_ECC = 0x8513,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
};
@@ -1107,6 +1108,11 @@ enum {
#define CFG_GMV_BT_BA_H CMQ_REQ_FIELD_LOC(51, 32)
#define CFG_GMV_BT_IDX CMQ_REQ_FIELD_LOC(95, 64)
+/* Fields of HNS_ROCE_QUERY_RAM_ECC */
+#define QUERY_RAM_ECC_1BIT_ERR CMQ_REQ_FIELD_LOC(31, 0)
+#define QUERY_RAM_ECC_RES_TYPE CMQ_REQ_FIELD_LOC(63, 32)
+#define QUERY_RAM_ECC_TAG CMQ_REQ_FIELD_LOC(95, 64)
+
struct hns_roce_cfg_sgid_tb {
__le32 table_idx_rsv;
__le32 vf_sgid_l;
@@ -1343,6 +1349,12 @@ struct hns_roce_dip {
struct list_head node; /* all dips are on a list */
};
+struct fmea_ram_ecc {
+ u32 is_ecc_err;
+ u32 res_type;
+ u32 index;
+};
+
/* only for RNR timeout issue of HIP08 */
#define HNS_ROCE_CLOCK_ADJUST 1000
#define HNS_ROCE_MAX_CQ_PERIOD 65
@@ -1382,7 +1394,6 @@ struct hns_roce_dip {
#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
-#define HNS_ROCE_V2_VF_INT_ST_RAS_INT_S 1
#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 638bf4a1ed94..7b086fe63a24 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1477,12 +1477,13 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
list_for_each_entry (listen_node, &cm_core->listen_list, list) {
memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
listen_port = listen_node->loc_port;
+ if (listen_port != dst_port ||
+ !(listener_state & listen_node->listener_state))
+ continue;
/* compare node pair, return node handle if a match */
- if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
- !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
- listen_port == dst_port &&
- vlan_id == listen_node->vlan_id &&
- (listener_state & listen_node->listener_state)) {
+ if (!memcmp(listen_addr, ip_zero, sizeof(listen_addr)) ||
+ (!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) &&
+ vlan_id == listen_node->vlan_id)) {
refcount_inc(&listen_node->refcnt);
spin_unlock_irqrestore(&cm_core->listen_list_lock,
flags);
@@ -4231,10 +4232,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_node *cm_node;
struct list_head teardown_list;
struct ib_qp_attr attr;
- struct irdma_sc_vsi *vsi = &iwdev->vsi;
- struct irdma_sc_qp *sc_qp;
- struct irdma_qp *qp;
- int i;
INIT_LIST_HEAD(&teardown_list);
@@ -4251,52 +4248,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node);
}
- if (!iwdev->roce_mode)
- return;
-
- INIT_LIST_HEAD(&teardown_list);
- for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
- mutex_lock(&vsi->qos[i].qos_mutex);
- list_for_each_safe (list_node, list_core_temp,
- &vsi->qos[i].qplist) {
- u32 qp_ip[4];
-
- sc_qp = container_of(list_node, struct irdma_sc_qp,
- list);
- if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
- continue;
-
- qp = sc_qp->qp_uk.back_qp;
- if (!disconnect_all) {
- if (nfo->ipv4)
- qp_ip[0] = qp->udp_info.local_ipaddr[3];
- else
- memcpy(qp_ip,
- &qp->udp_info.local_ipaddr[0],
- sizeof(qp_ip));
- }
-
- if (disconnect_all ||
- (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
- !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
- spin_lock(&iwdev->rf->qptable_lock);
- if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
- irdma_qp_add_ref(&qp->ibqp);
- list_add(&qp->teardown_entry,
- &teardown_list);
- }
- spin_unlock(&iwdev->rf->qptable_lock);
- }
- }
- mutex_unlock(&vsi->qos[i].qos_mutex);
- }
-
- list_for_each_safe (list_node, list_core_temp, &teardown_list) {
- qp = container_of(list_node, struct irdma_qp, teardown_entry);
- attr.qp_state = IB_QPS_ERR;
- irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
- irdma_qp_rem_ref(&qp->ibqp);
- }
}
/**
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 58c0e181ca2b..a41e0d21143a 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -4872,10 +4872,12 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
sd_diff = sd_needed - hmc_fpm_misc->max_sds;
if (sd_diff > 128) {
- if (qpwanted > 128 && sd_diff > 144)
+ if (!(loop_count % 2) && qpwanted > 128) {
qpwanted /= 2;
- mrwanted /= 2;
- pblewanted /= 2;
+ } else {
+ mrwanted /= 2;
+ pblewanted /= 2;
+ }
continue;
}
if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index dd3943d22dc6..4f132c6fb653 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -257,10 +257,6 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
iwqp->last_aeq = info->ae_id;
spin_unlock_irqrestore(&iwqp->lock, flags);
ctx_info = &iwqp->ctx_info;
- if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1))
- ctx_info->roce_info->err_rq_idx_valid = true;
- else
- ctx_info->iwarp_info->err_rq_idx_valid = true;
} else {
if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
continue;
@@ -370,16 +366,12 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- if (rdma_protocol_roce(&iwdev->ibdev, 1))
- ctx_info->roce_info->err_rq_idx_valid = false;
- else
- ctx_info->iwarp_info->err_rq_idx_valid = false;
- fallthrough;
default:
- ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d\n",
- info->ae_id, info->qp, info->qp_cq_id);
+ ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
+ info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
- if (!info->sq && ctx_info->roce_info->err_rq_idx_valid) {
+ ctx_info->roce_info->err_rq_idx_valid = info->rq;
+ if (info->rq) {
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
ctx_info);
@@ -388,7 +380,8 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
irdma_cm_disconn(iwqp);
break;
}
- if (!info->sq && ctx_info->iwarp_info->err_rq_idx_valid) {
+ ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
+ if (info->rq) {
ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = true;
@@ -1512,10 +1505,7 @@ static int irdma_hmc_setup(struct irdma_pci_f *rf)
int status;
u32 qpcnt;
- if (rf->rdma_ver == IRDMA_GEN_1)
- qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2;
- else
- qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
+ qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
rf->sd_type = IRDMA_SD_TYPE_DIRECT;
status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
@@ -1543,7 +1533,7 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
rf->obj_mem.pa);
rf->obj_mem.va = NULL;
if (rf->rdma_ver != IRDMA_GEN_1) {
- kfree(rf->allocated_ws_nodes);
+ bitmap_free(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
}
kfree(rf->ceqlist);
@@ -1972,9 +1962,8 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
u32 ret;
if (rf->rdma_ver != IRDMA_GEN_1) {
- rf->allocated_ws_nodes =
- kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES),
- sizeof(unsigned long), GFP_KERNEL);
+ rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
+ GFP_KERNEL);
if (!rf->allocated_ws_nodes)
return -ENOMEM;
@@ -2023,7 +2012,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
return 0;
mem_rsrc_kzalloc_fail:
- kfree(rf->allocated_ws_nodes);
+ bitmap_free(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
return ret;
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
index e46fc110004d..50299f58b6b3 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.c
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M;
dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
index cf53b17510cd..5986fd906308 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.c
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->irq_ops = &icrdma_irq_ops;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
index 46c12334c735..4789e85d717b 100644
--- a/drivers/infiniband/hw/irdma/irdma.h
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -127,6 +127,7 @@ struct irdma_hw_attrs {
u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size;
u64 max_mr_size;
+ u64 page_size_cap;
u32 min_hw_qp_id;
u32 min_hw_aeq_size;
u32 max_hw_aeq_size;
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index ef862bced20f..65e966ad3453 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -85,7 +85,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
#define IRDMA_NO_QSET 0xffff
#define IW_CFG_FPM_QP_COUNT 32768
-#define IRDMA_MAX_PAGES_PER_FMR 512
+#define IRDMA_MAX_PAGES_PER_FMR 262144
#define IRDMA_MIN_PAGES_PER_FMR 1
#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index ab3c5208a123..fdf4cc88cb91 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -652,6 +652,7 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
};
static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
+ {0xffff, 0x8002, "Invalid State"},
{0xffff, 0x8006, "Flush No Wqe Pending"},
{0xffff, 0x8007, "Modify QP Bad Close"},
{0xffff, 0x8009, "LLP Closed"},
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index c4412ece5a6d..9b07b8af2997 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -32,7 +32,7 @@ static int irdma_query_device(struct ib_device *ibdev,
props->vendor_part_id = pcidev->device;
props->hw_ver = rf->pcidev->revision;
- props->page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ props->page_size_cap = hw_attrs->page_size_cap;
props->max_mr_size = hw_attrs->max_mr_size;
props->max_qp = rf->max_qp - rf->used_qps;
props->max_qp_wr = hw_attrs->max_qp_wr;
@@ -1776,11 +1776,11 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
spin_unlock_irqrestore(&iwcq->lock, flags);
irdma_cq_wq_destroy(iwdev->rf, cq);
- irdma_cq_free_rsrc(iwdev->rf, iwcq);
spin_lock_irqsave(&iwceq->ce_lock, flags);
irdma_sc_cleanup_ceqes(cq, ceq);
spin_unlock_irqrestore(&iwceq->ce_lock, flags);
+ irdma_cq_free_rsrc(iwdev->rf, iwcq);
return 0;
}
@@ -2605,7 +2605,7 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg;
err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
- true);
+ false);
if (err_code)
goto err_get_pble;
@@ -2641,8 +2641,16 @@ static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(iwmr->npages == iwmr->page_cnt))
return -ENOMEM;
- pbl = palloc->level1.addr;
- pbl[iwmr->npages++] = addr;
+ if (palloc->level == PBLE_LEVEL_2) {
+ struct irdma_pble_info *palloc_info =
+ palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
+
+ palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
+ } else {
+ pbl = palloc->level1.addr;
+ pbl[iwmr->npages] = addr;
+ }
+ iwmr->npages++;
return 0;
}
@@ -2781,7 +2789,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
iwmr->page_size = ib_umem_find_best_pgsz(region,
- SZ_4K | SZ_2M | SZ_1G,
+ iwdev->rf->sc_dev.hw_attrs.page_size_cap,
virt);
if (unlikely(!iwmr->page_size)) {
kfree(iwmr);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 08371a80fdc2..be189e0525de 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -523,6 +523,10 @@ repoll:
"Requestor" : "Responder", cq->mcq.cqn);
mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
err_cqe->syndrome, err_cqe->vendor_err_synd);
+ if (wc->status != IB_WC_WR_FLUSH_ERR &&
+ (*cur_qp)->type == MLX5_IB_QPT_REG_UMR)
+ dev->umrc.state = MLX5_UMR_STATE_RECOVER;
+
if (opcode == MLX5_CQE_REQ_ERR) {
wq = &(*cur_qp)->sq;
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
index 001d766cf291..3669c90b2dad 100644
--- a/drivers/infiniband/hw/mlx5/dm.c
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -336,9 +336,15 @@ err_copy:
static enum mlx5_sw_icm_type get_icm_type(int uapi_type)
{
- return uapi_type == MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM ?
- MLX5_SW_ICM_TYPE_STEERING :
- MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+ switch (uapi_type) {
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ return MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ return MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ default:
+ return MLX5_SW_ICM_TYPE_STEERING;
+ }
}
static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
@@ -347,11 +353,32 @@ static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
int type)
{
struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
- enum mlx5_sw_icm_type icm_type = get_icm_type(type);
+ enum mlx5_sw_icm_type icm_type;
struct mlx5_ib_dm_icm *dm;
u64 act_size;
int err;
+ if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW))
+ return ERR_PTR(-EPERM);
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2)))
+ return ERR_PTR(-EOPNOTSUPP);
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))
+ return ERR_PTR(-EOPNOTSUPP);
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
if (!dm)
return ERR_PTR(-ENOMEM);
@@ -359,19 +386,6 @@ static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
dm->base.type = type;
dm->base.ibdm.device = ctx->device;
- if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW)) {
- err = -EPERM;
- goto free;
- }
-
- if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
- MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))) {
- err = -EOPNOTSUPP;
- goto free;
- }
-
/* Allocation size must a multiple of the basic block size
* and a power of 2.
*/
@@ -379,6 +393,8 @@ static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
act_size = roundup_pow_of_two(act_size);
dm->base.size = act_size;
+ icm_type = get_icm_type(type);
+
err = mlx5_dm_sw_icm_alloc(dev, icm_type, act_size, attr->alignment,
to_mucontext(ctx)->devx_uid,
&dm->base.dev_addr, &dm->obj_id);
@@ -420,8 +436,8 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
case MLX5_IB_UAPI_DM_TYPE_MEMIC:
return handle_alloc_dm_memic(context, attr, attrs);
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- return handle_alloc_dm_sw_icm(context, attr, attrs, type);
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
return handle_alloc_dm_sw_icm(context, attr, attrs, type);
default:
return ERR_PTR(-EOPNOTSUPP);
@@ -474,6 +490,7 @@ static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
return 0;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
default:
return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 39ffb363ba0c..490ec308e309 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -679,7 +679,15 @@ enum flow_table_type {
#define MLX5_FS_MAX_TYPES 6
#define MLX5_FS_MAX_ENTRIES BIT(16)
-static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
+static bool mlx5_ib_shared_ft_allowed(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return MLX5_CAP_GEN(dev->mdev, shared_object_to_user_object_allowed);
+}
+
+static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_namespace *ns,
struct mlx5_ib_flow_prio *prio,
int priority,
int num_entries, int num_groups,
@@ -688,6 +696,8 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
+ if (mlx5_ib_shared_ft_allowed(&dev->ib_dev))
+ ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
ft_attr.prio = priority;
ft_attr.max_fte = num_entries;
ft_attr.flags = flags;
@@ -784,8 +794,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = prio->flow_table;
if (!ft)
- return _get_prio(ns, prio, priority, max_table_size, num_groups,
- flags);
+ return _get_prio(dev, ns, prio, priority, max_table_size,
+ num_groups, flags);
return prio;
}
@@ -927,7 +937,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
prio = &dev->flow_db->opfcs[type];
if (!prio->flow_table) {
- prio = _get_prio(ns, prio, priority,
+ prio = _get_prio(dev, ns, prio, priority,
dev->num_ports * MAX_OPFC_RULES, 1, 0);
if (IS_ERR(prio)) {
err = PTR_ERR(prio);
@@ -1407,8 +1417,8 @@ free_ucmd:
}
static struct mlx5_ib_flow_prio *
-_get_flow_table(struct mlx5_ib_dev *dev,
- struct mlx5_ib_flow_matcher *fs_matcher,
+_get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
+ enum mlx5_flow_namespace_type ns_type,
bool mcast)
{
struct mlx5_flow_namespace *ns = NULL;
@@ -1421,11 +1431,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (mcast)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
- priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+ priority = ib_prio_to_core_prio(user_priority, false);
esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- switch (fs_matcher->ns_type) {
+ switch (ns_type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size));
@@ -1452,17 +1462,17 @@ _get_flow_table(struct mlx5_ib_dev *dev,
reformat_l3_tunnel_to_l2) &&
esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- priority = fs_matcher->priority;
+ priority = user_priority;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size));
- priority = fs_matcher->priority;
+ priority = user_priority;
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size));
- priority = fs_matcher->priority;
+ priority = user_priority;
break;
default:
break;
@@ -1470,11 +1480,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
- ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
+ ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
if (!ns)
return ERR_PTR(-EOPNOTSUPP);
- switch (fs_matcher->ns_type) {
+ switch (ns_type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
prio = &dev->flow_db->prios[priority];
break;
@@ -1499,7 +1509,7 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (prio->flow_table)
return prio;
- return _get_prio(ns, prio, priority, max_table_size,
+ return _get_prio(dev, ns, prio, priority, max_table_size,
MLX5_FS_MAX_TYPES, flags);
}
@@ -1618,7 +1628,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
mutex_lock(&dev->flow_db->lock);
- ft_prio = _get_flow_table(dev, fs_matcher, mcast);
+ ft_prio = _get_flow_table(dev, fs_matcher->priority,
+ fs_matcher->ns_type, mcast);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto unlock;
@@ -2015,6 +2026,23 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject,
return 0;
}
+static int steering_anchor_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_steering_anchor *obj = uobject->object;
+
+ if (atomic_read(&obj->usecnt))
+ return -EBUSY;
+
+ mutex_lock(&obj->dev->flow_db->lock);
+ put_flow_table(obj->dev, obj->ft_prio, true);
+ mutex_unlock(&obj->dev->flow_db->lock);
+
+ kfree(obj);
+ return 0;
+}
+
static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
struct mlx5_ib_flow_matcher *obj)
{
@@ -2050,12 +2078,10 @@ static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
if (err)
return err;
- if (flags) {
- mlx5_ib_ft_type_to_namespace(
+ if (flags)
+ return mlx5_ib_ft_type_to_namespace(
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
&obj->ns_type);
- return 0;
- }
}
obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
@@ -2121,6 +2147,75 @@ end:
return err;
}
+static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ enum mlx5_ib_uapi_flow_table_type ib_uapi_ft_type;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_ib_steering_anchor *obj;
+ struct mlx5_ib_flow_prio *ft_prio;
+ u16 priority;
+ u32 ft_id;
+ int err;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ err = uverbs_get_const(&ib_uapi_ft_type, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE);
+ if (err)
+ return err;
+
+ err = mlx5_ib_ft_type_to_namespace(ib_uapi_ft_type, &ns_type);
+ if (err)
+ return err;
+
+ err = uverbs_copy_from(&priority, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY);
+ if (err)
+ return err;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ mutex_lock(&dev->flow_db->lock);
+ ft_prio = _get_flow_table(dev, priority, ns_type, 0);
+ if (IS_ERR(ft_prio)) {
+ mutex_unlock(&dev->flow_db->lock);
+ err = PTR_ERR(ft_prio);
+ goto free_obj;
+ }
+
+ ft_prio->refcount++;
+ ft_id = mlx5_flow_table_id(ft_prio->flow_table);
+ mutex_unlock(&dev->flow_db->lock);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ &ft_id, sizeof(ft_id));
+ if (err)
+ goto put_flow_table;
+
+ uobj->object = obj;
+ obj->dev = dev;
+ obj->ft_prio = ft_prio;
+ atomic_set(&obj->usecnt, 0);
+
+ return 0;
+
+put_flow_table:
+ mutex_lock(&dev->flow_db->lock);
+ put_flow_table(dev, ft_prio, true);
+ mutex_unlock(&dev->flow_db->lock);
+free_obj:
+ kfree(obj);
+
+ return err;
+}
+
static struct ib_flow_action *
mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
enum mlx5_ib_uapi_flow_table_type ft_type,
@@ -2477,6 +2572,35 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_STEERING_ANCHOR_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_TYPE_ALLOC_IDR(steering_anchor_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
+
const struct uapi_definition mlx5_ib_flow_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_FLOW_MATCHER),
@@ -2485,6 +2609,9 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
&mlx5_ib_fs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_actions),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
{},
};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b68fddeac0f1..fc94a1b25485 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
- dev->port_caps[port - 1].has_smi = false;
- if (MLX5_CAP_GEN(dev->mdev, port_type) ==
- MLX5_CAP_PORT_TYPE_IB) {
- if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port, 0,
- &vport_ctx);
- if (err) {
- mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
- port, err);
- return err;
- }
- dev->port_caps[port - 1].has_smi =
- vport_ctx.has_smi;
- } else {
- dev->port_caps[port - 1].has_smi = true;
- }
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return 0;
+
+ for (port = 1; port <= dev->num_ports; port++) {
+ if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
+ dev->port_caps[port - 1].has_smi = true;
+ continue;
}
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
+ &vport_ctx);
+ if (err) {
+ mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
+ port, err);
+ return err;
+ }
+ dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
}
+
return 0;
}
@@ -4002,7 +4000,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
int err;
- err = mlx5_mr_cache_cleanup(dev);
+ err = mlx5_mkey_cache_cleanup(dev);
if (err)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
@@ -4022,7 +4020,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
if (ret)
return ret;
- ret = mlx5_mr_cache_init(dev);
+ ret = mlx5_mkey_cache_init(dev);
if (ret) {
mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
mlx5r_umr_resource_cleanup(dev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 998b67509a53..2e2ad3918385 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -259,6 +259,12 @@ struct mlx5_ib_flow_matcher {
u8 match_criteria_enable;
};
+struct mlx5_ib_steering_anchor {
+ struct mlx5_ib_flow_prio *ft_prio;
+ struct mlx5_ib_dev *dev;
+ atomic_t usecnt;
+};
+
struct mlx5_ib_pp {
u16 index;
struct mlx5_core_dev *mdev;
@@ -613,6 +619,7 @@ struct mlx5_ib_mkey {
unsigned int ndescs;
struct wait_queue_head wait;
refcount_t usecount;
+ struct mlx5_cache_ent *cache_ent;
};
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
@@ -635,20 +642,9 @@ struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_ib_mkey mmkey;
- /* User MR data */
- struct mlx5_cache_ent *cache_ent;
- /* Everything after cache_ent is zero'd when MR allocated */
struct ib_umem *umem;
union {
- /* Used only while the MR is in the cache */
- struct {
- u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
- struct mlx5_async_work cb_work;
- /* Cache list element */
- struct list_head list;
- };
-
/* Used only by kernel MRs (umem == NULL) */
struct {
void *descs;
@@ -688,12 +684,6 @@ struct mlx5_ib_mr {
};
};
-/* Zero the fields in the mr that are variant depending on usage */
-static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
-{
- memset_after(mr, 0, cache_ent);
-}
-
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
{
return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
@@ -717,21 +707,29 @@ struct mlx5_ib_umr_context {
struct completion done;
};
+enum {
+ MLX5_UMR_STATE_ACTIVE,
+ MLX5_UMR_STATE_RECOVER,
+ MLX5_UMR_STATE_ERR,
+};
+
struct umr_common {
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
- /* control access to UMR QP
+ /* Protects from UMR QP overflow
*/
struct semaphore sem;
+ /* Protects from using UMR while the UMR is not active
+ */
+ struct mutex lock;
+ unsigned int state;
};
struct mlx5_cache_ent {
- struct list_head head;
- /* sync access to the cahce entry
- */
- spinlock_t lock;
-
+ struct xarray mkeys;
+ unsigned long stored;
+ unsigned long reserved;
char name[4];
u32 order;
@@ -743,18 +741,11 @@ struct mlx5_cache_ent {
u8 fill_to_high_water:1;
/*
- * - available_mrs is the length of list head, ie the number of MRs
- * available for immediate allocation.
- * - total_mrs is available_mrs plus all in use MRs that could be
- * returned to the cache.
- * - limit is the low water mark for available_mrs, 2* limit is the
+ * - limit is the low water mark for stored mkeys, 2* limit is the
* upper water mark.
- * - pending is the number of MRs currently being created
*/
- u32 total_mrs;
- u32 available_mrs;
+ u32 in_use;
u32 limit;
- u32 pending;
/* Statistics */
u32 miss;
@@ -763,9 +754,19 @@ struct mlx5_cache_ent {
struct delayed_work dwork;
};
-struct mlx5_mr_cache {
+struct mlx5r_async_create_mkey {
+ union {
+ u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
+ u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
+ };
+ struct mlx5_async_work cb_work;
+ struct mlx5_cache_ent *ent;
+ u32 mkey;
+};
+
+struct mlx5_mkey_cache {
struct workqueue_struct *wq;
- struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
+ struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES];
struct dentry *root;
unsigned long last_add;
};
@@ -1064,7 +1065,7 @@ struct mlx5_ib_dev {
struct mlx5_ib_resources devr;
atomic_t mkey_var;
- struct mlx5_mr_cache cache;
+ struct mlx5_mkey_cache cache;
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex;
@@ -1309,8 +1310,8 @@ void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
-int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
-int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
+int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
struct mlx5_cache_ent *ent,
@@ -1338,7 +1339,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
+void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent);
void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags);
@@ -1357,7 +1358,7 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
-static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
+static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {}
static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags) {}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1e7653c997b5..129d531bd01b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -82,15 +82,14 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
MLX5_SET64(mkc, mkc, start_addr, start_addr);
}
-static void assign_mkey_variant(struct mlx5_ib_dev *dev,
- struct mlx5_ib_mkey *mkey, u32 *in)
+static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in)
{
u8 key = atomic_inc_return(&dev->mkey_var);
void *mkc;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, mkey_7_0, key);
- mkey->key = key;
+ *mkey = key;
}
static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
@@ -98,7 +97,7 @@ static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
{
int ret;
- assign_mkey_variant(dev, mkey, in);
+ assign_mkey_variant(dev, &mkey->key, in);
ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
if (!ret)
init_waitqueue_head(&mkey->wait);
@@ -106,20 +105,21 @@ static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
return ret;
}
-static int
-mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
- struct mlx5_ib_mkey *mkey,
- struct mlx5_async_ctx *async_ctx,
- u32 *in, int inlen, u32 *out, int outlen,
- struct mlx5_async_work *context)
+static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
{
- MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
- assign_mkey_variant(dev, mkey, in);
- return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
- create_mkey_callback, context);
+ struct mlx5_ib_dev *dev = async_create->ent->dev;
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out);
+
+ MLX5_SET(create_mkey_in, async_create->in, opcode,
+ MLX5_CMD_OP_CREATE_MKEY);
+ assign_mkey_variant(dev, &async_create->mkey, async_create->in);
+ return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen,
+ async_create->out, outlen, create_mkey_callback,
+ &async_create->cb_work);
}
-static int mr_cache_max_order(struct mlx5_ib_dev *dev);
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
@@ -142,40 +142,132 @@ static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
}
+
+static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
+ void *to_store)
+{
+ XA_STATE(xas, &ent->mkeys, 0);
+ void *curr;
+
+ xa_lock_irq(&ent->mkeys);
+ if (limit_pendings &&
+ (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) {
+ xa_unlock_irq(&ent->mkeys);
+ return -EAGAIN;
+ }
+ while (1) {
+ /*
+ * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version
+ * doesn't transparently unlock. Instead we set the xas index to
+ * the current value of reserved every iteration.
+ */
+ xas_set(&xas, ent->reserved);
+ curr = xas_load(&xas);
+ if (!curr) {
+ if (to_store && ent->stored == ent->reserved)
+ xas_store(&xas, to_store);
+ else
+ xas_store(&xas, XA_ZERO_ENTRY);
+ if (xas_valid(&xas)) {
+ ent->reserved++;
+ if (to_store) {
+ if (ent->stored != ent->reserved)
+ __xa_store(&ent->mkeys,
+ ent->stored,
+ to_store,
+ GFP_KERNEL);
+ ent->stored++;
+ queue_adjust_cache_locked(ent);
+ WRITE_ONCE(ent->dev->cache.last_add,
+ jiffies);
+ }
+ }
+ }
+ xa_unlock_irq(&ent->mkeys);
+
+ /*
+ * Notice xas_nomem() must always be called as it cleans
+ * up any cached allocation.
+ */
+ if (!xas_nomem(&xas, GFP_KERNEL))
+ break;
+ xa_lock_irq(&ent->mkeys);
+ }
+ if (xas_error(&xas))
+ return xas_error(&xas);
+ if (WARN_ON(curr))
+ return -EINVAL;
+ return 0;
+}
+
+static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
+{
+ void *old;
+
+ ent->reserved--;
+ old = __xa_erase(&ent->mkeys, ent->reserved);
+ WARN_ON(old);
+}
+
+static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey)
+{
+ void *old;
+
+ old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0);
+ WARN_ON(old);
+ ent->stored++;
+}
+
+static u32 pop_stored_mkey(struct mlx5_cache_ent *ent)
+{
+ void *old, *xa_mkey;
+
+ ent->stored--;
+ ent->reserved--;
+
+ if (ent->stored == ent->reserved) {
+ xa_mkey = __xa_erase(&ent->mkeys, ent->stored);
+ WARN_ON(!xa_mkey);
+ return (u32)xa_to_value(xa_mkey);
+ }
+
+ xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ WARN_ON(!xa_mkey || xa_is_err(xa_mkey));
+ old = __xa_erase(&ent->mkeys, ent->reserved);
+ WARN_ON(old);
+ return (u32)xa_to_value(xa_mkey);
+}
+
static void create_mkey_callback(int status, struct mlx5_async_work *context)
{
- struct mlx5_ib_mr *mr =
- container_of(context, struct mlx5_ib_mr, cb_work);
- struct mlx5_cache_ent *ent = mr->cache_ent;
+ struct mlx5r_async_create_mkey *mkey_out =
+ container_of(context, struct mlx5r_async_create_mkey, cb_work);
+ struct mlx5_cache_ent *ent = mkey_out->ent;
struct mlx5_ib_dev *dev = ent->dev;
unsigned long flags;
if (status) {
- create_mkey_warn(dev, status, mr->out);
- kfree(mr);
- spin_lock_irqsave(&ent->lock, flags);
- ent->pending--;
+ create_mkey_warn(dev, status, mkey_out->out);
+ kfree(mkey_out);
+ xa_lock_irqsave(&ent->mkeys, flags);
+ undo_push_reserve_mkey(ent);
WRITE_ONCE(dev->fill_delay, 1);
- spin_unlock_irqrestore(&ent->lock, flags);
+ xa_unlock_irqrestore(&ent->mkeys, flags);
mod_timer(&dev->delay_timer, jiffies + HZ);
return;
}
- mr->mmkey.type = MLX5_MKEY_MR;
- mr->mmkey.key |= mlx5_idx_to_mkey(
- MLX5_GET(create_mkey_out, mr->out, mkey_index));
- init_waitqueue_head(&mr->mmkey.wait);
-
+ mkey_out->mkey |= mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, mkey_out->out, mkey_index));
WRITE_ONCE(dev->cache.last_add, jiffies);
- spin_lock_irqsave(&ent->lock, flags);
- list_add_tail(&mr->list, &ent->head);
- ent->available_mrs++;
- ent->total_mrs++;
+ xa_lock_irqsave(&ent->mkeys, flags);
+ push_to_reserved(ent, mkey_out->mkey);
/* If we are doing fill_to_high_water then keep going. */
queue_adjust_cache_locked(ent);
- ent->pending--;
- spin_unlock_irqrestore(&ent->lock, flags);
+ xa_unlock_irqrestore(&ent->mkeys, flags);
+ kfree(mkey_out);
}
static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
@@ -197,15 +289,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
return ret;
}
-static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
+static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
{
- struct mlx5_ib_mr *mr;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return NULL;
- mr->cache_ent = ent;
-
set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
@@ -215,133 +300,106 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->access_mode, ent->ndescs));
MLX5_SET(mkc, mkc, log_page_size, ent->page);
- return mr;
}
/* Asynchronously schedule new MRs to be populated in the cache. */
static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
{
- size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_ib_mr *mr;
+ struct mlx5r_async_create_mkey *async_create;
void *mkc;
- u32 *in;
int err = 0;
int i;
- in = kzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
for (i = 0; i < num; i++) {
- mr = alloc_cache_mr(ent, mkc);
- if (!mr) {
- err = -ENOMEM;
- break;
- }
- spin_lock_irq(&ent->lock);
- if (ent->pending >= MAX_PENDING_REG_MR) {
- err = -EAGAIN;
- spin_unlock_irq(&ent->lock);
- kfree(mr);
- break;
- }
- ent->pending++;
- spin_unlock_irq(&ent->lock);
- err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
- &ent->dev->async_ctx, in, inlen,
- mr->out, sizeof(mr->out),
- &mr->cb_work);
+ async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey),
+ GFP_KERNEL);
+ if (!async_create)
+ return -ENOMEM;
+ mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
+ memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
+ async_create->ent = ent;
+
+ err = push_mkey(ent, true, NULL);
+ if (err)
+ goto free_async_create;
+
+ err = mlx5_ib_create_mkey_cb(async_create);
if (err) {
- spin_lock_irq(&ent->lock);
- ent->pending--;
- spin_unlock_irq(&ent->lock);
mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
- kfree(mr);
- break;
+ goto err_undo_reserve;
}
}
- kfree(in);
+ return 0;
+
+err_undo_reserve:
+ xa_lock_irq(&ent->mkeys);
+ undo_push_reserve_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+free_async_create:
+ kfree(async_create);
return err;
}
/* Synchronously create a MR in the cache */
-static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
+static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey)
{
size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_ib_mr *mr;
void *mkc;
u32 *in;
int err;
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
- mr = alloc_cache_mr(ent, mkc);
- if (!mr) {
- err = -ENOMEM;
- goto free_in;
- }
-
- err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen);
+ err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen);
if (err)
- goto free_mr;
+ goto free_in;
- init_waitqueue_head(&mr->mmkey.wait);
- mr->mmkey.type = MLX5_MKEY_MR;
WRITE_ONCE(ent->dev->cache.last_add, jiffies);
- spin_lock_irq(&ent->lock);
- ent->total_mrs++;
- spin_unlock_irq(&ent->lock);
- kfree(in);
- return mr;
-free_mr:
- kfree(mr);
free_in:
kfree(in);
- return ERR_PTR(err);
+ return err;
}
static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
{
- struct mlx5_ib_mr *mr;
+ u32 mkey;
- lockdep_assert_held(&ent->lock);
- if (list_empty(&ent->head))
+ lockdep_assert_held(&ent->mkeys.xa_lock);
+ if (!ent->stored)
return;
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->available_mrs--;
- ent->total_mrs--;
- spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key);
- kfree(mr);
- spin_lock_irq(&ent->lock);
+ mkey = pop_stored_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+ mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
+ xa_lock_irq(&ent->mkeys);
}
static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
bool limit_fill)
+ __acquires(&ent->mkeys) __releases(&ent->mkeys)
{
int err;
- lockdep_assert_held(&ent->lock);
+ lockdep_assert_held(&ent->mkeys.xa_lock);
while (true) {
if (limit_fill)
target = ent->limit * 2;
- if (target == ent->available_mrs + ent->pending)
+ if (target == ent->reserved)
return 0;
- if (target > ent->available_mrs + ent->pending) {
- u32 todo = target - (ent->available_mrs + ent->pending);
+ if (target > ent->reserved) {
+ u32 todo = target - ent->reserved;
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
err = add_keys(ent, todo);
if (err == -EAGAIN)
usleep_range(3000, 5000);
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (err) {
if (err != -EAGAIN)
return err;
@@ -366,15 +424,15 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
/*
* Target is the new value of total_mrs the user requests, however we
- * cannot free MRs that are in use. Compute the target value for
- * available_mrs.
+ * cannot free MRs that are in use. Compute the target value for stored
+ * mkeys.
*/
- spin_lock_irq(&ent->lock);
- if (target < ent->total_mrs - ent->available_mrs) {
+ xa_lock_irq(&ent->mkeys);
+ if (target < ent->in_use) {
err = -EINVAL;
goto err_unlock;
}
- target = target - (ent->total_mrs - ent->available_mrs);
+ target = target - ent->in_use;
if (target < ent->limit || target > ent->limit*2) {
err = -EINVAL;
goto err_unlock;
@@ -382,12 +440,12 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
err = resize_available_mrs(ent, target, false);
if (err)
goto err_unlock;
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
return count;
err_unlock:
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
return err;
}
@@ -398,7 +456,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
+ err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use);
if (err < 0)
return err;
@@ -427,10 +485,10 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
* Upon set we immediately fill the cache to high water mark implied by
* the limit.
*/
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
ent->limit = var;
err = resize_available_mrs(ent, 0, true);
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
if (err)
return err;
return count;
@@ -457,17 +515,17 @@ static const struct file_operations limit_fops = {
.read = limit_read,
};
-static bool someone_adding(struct mlx5_mr_cache *cache)
+static bool someone_adding(struct mlx5_mkey_cache *cache)
{
unsigned int i;
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
struct mlx5_cache_ent *ent = &cache->ent[i];
bool ret;
- spin_lock_irq(&ent->lock);
- ret = ent->available_mrs < ent->limit;
- spin_unlock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
+ ret = ent->stored < ent->limit;
+ xa_unlock_irq(&ent->mkeys);
if (ret)
return true;
}
@@ -481,26 +539,26 @@ static bool someone_adding(struct mlx5_mr_cache *cache)
*/
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
{
- lockdep_assert_held(&ent->lock);
+ lockdep_assert_held(&ent->mkeys.xa_lock);
if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
return;
- if (ent->available_mrs < ent->limit) {
+ if (ent->stored < ent->limit) {
ent->fill_to_high_water = true;
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->fill_to_high_water &&
- ent->available_mrs + ent->pending < 2 * ent->limit) {
+ ent->reserved < 2 * ent->limit) {
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
- } else if (ent->available_mrs == 2 * ent->limit) {
+ } else if (ent->stored == 2 * ent->limit) {
ent->fill_to_high_water = false;
- } else if (ent->available_mrs > 2 * ent->limit) {
+ } else if (ent->stored > 2 * ent->limit) {
/* Queue deletion of excess entries */
ent->fill_to_high_water = false;
- if (ent->pending)
+ if (ent->stored != ent->reserved)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000));
else
@@ -511,25 +569,24 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
int err;
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (ent->disabled)
goto out;
- if (ent->fill_to_high_water &&
- ent->available_mrs + ent->pending < 2 * ent->limit &&
+ if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit &&
!READ_ONCE(dev->fill_delay)) {
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
err = add_keys(ent, 1);
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (ent->disabled)
goto out;
if (err) {
/*
- * EAGAIN only happens if pending is positive, so we
- * will be rescheduled from reg_mr_callback(). The only
+ * EAGAIN only happens if there are pending MRs, so we
+ * will be rescheduled when storing them. The only
* failure path here is ENOMEM.
*/
if (err != -EAGAIN) {
@@ -541,7 +598,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
msecs_to_jiffies(1000));
}
}
- } else if (ent->available_mrs > 2 * ent->limit) {
+ } else if (ent->stored > 2 * ent->limit) {
bool need_delay;
/*
@@ -556,11 +613,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
* the garbage collection work to try to run in next cycle, in
* order to free CPU resources to other tasks.
*/
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
need_delay = need_resched() || someone_adding(cache) ||
!time_after(jiffies,
READ_ONCE(cache->last_add) + 300 * HZ);
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (ent->disabled)
goto out;
if (need_delay) {
@@ -571,7 +628,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
queue_adjust_cache_locked(ent);
}
out:
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
}
static void delayed_cache_work_func(struct work_struct *work)
@@ -587,73 +644,59 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
int access_flags)
{
struct mlx5_ib_mr *mr;
+ int err;
- /* Matches access in alloc_cache_mr() */
if (!mlx5r_umr_can_reconfig(dev, 0, access_flags))
return ERR_PTR(-EOPNOTSUPP);
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ xa_lock_irq(&ent->mkeys);
+ ent->in_use++;
+
+ if (!ent->stored) {
queue_adjust_cache_locked(ent);
ent->miss++;
- spin_unlock_irq(&ent->lock);
- mr = create_cache_mr(ent);
- if (IS_ERR(mr))
- return mr;
+ xa_unlock_irq(&ent->mkeys);
+ err = create_cache_mkey(ent, &mr->mmkey.key);
+ if (err) {
+ xa_lock_irq(&ent->mkeys);
+ ent->in_use--;
+ xa_unlock_irq(&ent->mkeys);
+ kfree(mr);
+ return ERR_PTR(err);
+ }
} else {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->available_mrs--;
+ mr->mmkey.key = pop_stored_mkey(ent);
queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
-
- mlx5_clear_mr(mr);
+ xa_unlock_irq(&ent->mkeys);
}
+ mr->mmkey.cache_ent = ent;
+ mr->mmkey.type = MLX5_MKEY_MR;
+ init_waitqueue_head(&mr->mmkey.wait);
return mr;
}
-static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
-{
- struct mlx5_cache_ent *ent = mr->cache_ent;
-
- WRITE_ONCE(dev->cache.last_add, jiffies);
- spin_lock_irq(&ent->lock);
- list_add_tail(&mr->list, &ent->head);
- ent->available_mrs++;
- queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
-}
-
static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_ib_mr *tmp_mr;
- struct mlx5_ib_mr *mr;
- LIST_HEAD(del_list);
+ u32 mkey;
cancel_delayed_work(&ent->dwork);
- while (1) {
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
- spin_unlock_irq(&ent->lock);
- break;
- }
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_move(&mr->list, &del_list);
- ent->available_mrs--;
- ent->total_mrs--;
- spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
- }
-
- list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
- list_del(&mr->list);
- kfree(mr);
+ xa_lock_irq(&ent->mkeys);
+ while (ent->stored) {
+ mkey = pop_stored_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+ mlx5_core_destroy_mkey(dev->mdev, mkey);
+ xa_lock_irq(&ent->mkeys);
}
+ xa_unlock_irq(&ent->mkeys);
}
-static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
if (!mlx5_debugfs_root || dev->is_rep)
return;
@@ -662,9 +705,9 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
dev->cache.root = NULL;
}
-static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
+static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
struct dentry *dir;
int i;
@@ -674,13 +717,13 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
sprintf(ent->name, "%d", ent->order);
dir = debugfs_create_dir(ent->name, cache->root);
debugfs_create_file("size", 0600, dir, ent, &size_fops);
debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
- debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
+ debugfs_create_ulong("cur", 0400, dir, &ent->stored);
debugfs_create_u32("miss", 0600, dir, &ent->miss);
}
}
@@ -692,9 +735,9 @@ static void delay_time_func(struct timer_list *t)
WRITE_ONCE(dev->fill_delay, 0);
}
-int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
int i;
@@ -707,22 +750,21 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
timer_setup(&dev->delay_timer, delay_time_func, 0);
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
- INIT_LIST_HEAD(&ent->head);
- spin_lock_init(&ent->lock);
+ xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
ent->order = i + 2;
ent->dev = dev;
ent->limit = 0;
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
- if (i > MR_CACHE_LAST_STD_ENTRY) {
- mlx5_odp_init_mr_cache_entry(ent);
+ if (i > MKEY_CACHE_LAST_STD_ENTRY) {
+ mlx5_odp_init_mkey_cache_entry(ent);
continue;
}
- if (ent->order > mr_cache_max_order(dev))
+ if (ent->order > mkey_cache_max_order(dev))
continue;
ent->page = PAGE_SHIFT;
@@ -734,36 +776,36 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile.mr_cache[i].limit;
else
ent->limit = 0;
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
}
- mlx5_mr_cache_debugfs_init(dev);
+ mlx5_mkey_cache_debugfs_init(dev);
return 0;
}
-int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
+int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
{
unsigned int i;
if (!dev->cache.wq)
return 0;
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
struct mlx5_cache_ent *ent = &dev->cache.ent[i];
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
ent->disabled = true;
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
cancel_delayed_work_sync(&ent->dwork);
}
- mlx5_mr_cache_debugfs_cleanup(dev);
+ mlx5_mkey_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
@@ -830,22 +872,22 @@ static int get_octo_len(u64 addr, u64 len, int page_shift)
return (npages + 1) / 2;
}
-static int mr_cache_max_order(struct mlx5_ib_dev *dev)
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
- return MR_CACHE_LAST_STD_ENTRY + 2;
+ return MKEY_CACHE_LAST_STD_ENTRY + 2;
return MLX5_MAX_UMR_SHIFT;
}
-static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
- unsigned int order)
+static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev,
+ unsigned int order)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
if (order < cache->ent[0].order)
return &cache->ent[0];
order = order - cache->ent[0].order;
- if (order > MR_CACHE_LAST_STD_ENTRY)
+ if (order > MKEY_CACHE_LAST_STD_ENTRY)
return NULL;
return &cache->ent[order];
}
@@ -888,7 +930,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
0, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
- ent = mr_cache_ent_from_order(
+ ent = mkey_cache_ent_from_order(
dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
/*
* Matches access in alloc_cache_mr(). If the MR can't come from the
@@ -1083,6 +1125,7 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
break;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
return ERR_PTR(-EINVAL);
@@ -1319,7 +1362,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
/* We only track the allocated sizes of MRs from the cache */
- if (!mr->cache_ent)
+ if (!mr->mmkey.cache_ent)
return false;
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false;
@@ -1328,7 +1371,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
if (WARN_ON(!*page_size))
return false;
- return (1ULL << mr->cache_ent->order) >=
+ return (1ULL << mr->mmkey.cache_ent->order) >=
ib_umem_num_dma_blocks(new_umem, *page_size);
}
@@ -1569,15 +1612,17 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
}
/* Stop DMA */
- if (mr->cache_ent) {
- if (mlx5r_umr_revoke_mr(mr)) {
- spin_lock_irq(&mr->cache_ent->lock);
- mr->cache_ent->total_mrs--;
- spin_unlock_irq(&mr->cache_ent->lock);
- mr->cache_ent = NULL;
- }
+ if (mr->mmkey.cache_ent) {
+ xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+ mr->mmkey.cache_ent->in_use--;
+ xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
+
+ if (mlx5r_umr_revoke_mr(mr) ||
+ push_mkey(mr->mmkey.cache_ent, false,
+ xa_mk_value(mr->mmkey.key)))
+ mr->mmkey.cache_ent = NULL;
}
- if (!mr->cache_ent) {
+ if (!mr->mmkey.cache_ent) {
rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
if (rc)
return rc;
@@ -1594,12 +1639,10 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
mlx5_ib_free_odp_mr(mr);
}
- if (mr->cache_ent) {
- mlx5_mr_cache_free(dev, mr);
- } else {
+ if (!mr->mmkey.cache_ent)
mlx5_free_priv_descs(mr);
- kfree(mr);
- }
+
+ kfree(mr);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 84da5674e1ab..e305bf1dc6c2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1588,7 +1588,7 @@ mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
return err;
}
-void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
+void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
{
if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return;
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 3a48364c0918..e00b94d1b1ea 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -176,6 +176,7 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
dev->umrc.pd = pd;
sema_init(&dev->umrc.sem, MAX_UMR_WR);
+ mutex_init(&dev->umrc.lock);
return 0;
@@ -195,6 +196,31 @@ void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
ib_dealloc_pd(dev->umrc.pd);
}
+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_qp_attr attr;
+ int err;
+
+ attr.qp_state = IB_QPS_RESET;
+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+ if (err) {
+ mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
+ goto err;
+ }
+
+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+ if (err)
+ goto err;
+
+ umrc->state = MLX5_UMR_STATE_ACTIVE;
+ return 0;
+
+err:
+ umrc->state = MLX5_UMR_STATE_ERR;
+ return err;
+}
+
static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
struct mlx5r_umr_wqe *wqe, bool with_data)
{
@@ -231,7 +257,7 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
id.ib_cqe = cqe;
mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
- MLX5_FENCE_MODE_NONE, MLX5_OPCODE_UMR);
+ MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR);
mlx5r_ring_db(qp, 1, ctrl);
@@ -270,17 +296,49 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
mlx5r_umr_init_context(&umr_context);
down(&umrc->sem);
- err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
- with_data);
- if (err)
- mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
- else {
- wait_for_completion(&umr_context.done);
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_warn(dev, "reg umr failed (%u)\n",
- umr_context.status);
+ while (true) {
+ mutex_lock(&umrc->lock);
+ if (umrc->state == MLX5_UMR_STATE_ERR) {
+ mutex_unlock(&umrc->lock);
err = -EFAULT;
+ break;
+ }
+
+ if (umrc->state == MLX5_UMR_STATE_RECOVER) {
+ mutex_unlock(&umrc->lock);
+ usleep_range(3000, 5000);
+ continue;
+ }
+
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
+ with_data);
+ mutex_unlock(&umrc->lock);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR post send failed, err %d\n",
+ err);
+ break;
}
+
+ wait_for_completion(&umr_context.done);
+
+ if (umr_context.status == IB_WC_SUCCESS)
+ break;
+
+ if (umr_context.status == IB_WC_WR_FLUSH_ERR)
+ continue;
+
+ WARN_ON_ONCE(1);
+ mlx5_ib_warn(dev,
+ "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n",
+ umr_context.status);
+ mutex_lock(&umrc->lock);
+ err = mlx5r_umr_recover(dev);
+ mutex_unlock(&umrc->lock);
+ if (err)
+ mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
+ err);
+ err = -EFAULT;
+ break;
}
up(&umrc->sem);
return err;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 03ed7c0fae50..d745ce9dc88a 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3084,7 +3084,7 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
else
DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
- goto err0;
+ goto err1;
}
/* Index only, 18 bit long, lkey = itid << 8 | key */
@@ -3108,7 +3108,7 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
if (rc) {
DP_ERR(dev, "roce register tid returned an error %d\n", rc);
- goto err1;
+ goto err2;
}
mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
@@ -3117,8 +3117,10 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
return mr;
-err1:
+err2:
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
err0:
kfree(mr);
return ERR_PTR(rc);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index b37b1c6d35c6..26c615772be3 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -321,7 +321,7 @@ struct qib_verbs_txreq {
* These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
* negotiation) are used for the 3rd argument to path_f_set_ib_cfg
* with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
- * are also the the possible values for qib_link_speed_enabled and active
+ * are also the possible values for qib_link_speed_enabled and active
* The values were chosen to match values used within the IB spec.
*/
#define QIB_IB_SDR 1
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index aa290928cf96..3937144b2ae5 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -153,7 +153,7 @@ static int qib_get_base_info(struct file *fp, void __user *ubase,
kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
/*
* for this use, may be cfgctxts summed over all chips that
- * are are configured and present
+ * are configured and present
*/
kinfo->spi_nctxts = dd->cfgctxts;
/* unit (chip/board) our context is on */
@@ -851,7 +851,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
ret = -EPERM;
goto bail;
}
- /* don't allow them to later change to writeable with mprotect */
+ /* don't allow them to later change to writable with mprotect */
vma->vm_flags &= ~VM_MAYWRITE;
start = vma->vm_start;
@@ -941,7 +941,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
goto bail;
}
/*
- * Don't allow permission to later change to writeable
+ * Don't allow permission to later change to writable
* with mprotect.
*/
vma->vm_flags &= ~VM_MAYWRITE;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 37b628a162e0..6af57067c32e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -58,7 +58,7 @@ static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
/*
* This file contains almost all the chip-specific register information and
* access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
- * exception of SerDes support, which in in qib_sd7220.c.
+ * exception of SerDes support, which in qib_sd7220.c.
*/
/* Below uses machine-generated qib_chipnum_regs.h file */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ceed302cf6a0..6861c6384f18 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2850,9 +2850,9 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
qib_7322_free_irq(dd);
kfree(dd->cspec->cntrs);
- kfree(dd->cspec->sendchkenable);
- kfree(dd->cspec->sendgrhchk);
- kfree(dd->cspec->sendibchk);
+ bitmap_free(dd->cspec->sendchkenable);
+ bitmap_free(dd->cspec->sendgrhchk);
+ bitmap_free(dd->cspec->sendibchk);
kfree(dd->cspec->msix_entries);
for (i = 0; i < dd->num_pports; i++) {
unsigned long flags;
@@ -6383,18 +6383,11 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
features = qib_7322_boardname(dd);
/* now that piobcnt2k and 4k set, we can allocate these */
- sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
- NUM_VL15_BUFS + BITS_PER_LONG - 1;
- sbufcnt /= BITS_PER_LONG;
- dd->cspec->sendchkenable =
- kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
- GFP_KERNEL);
- dd->cspec->sendgrhchk =
- kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
- GFP_KERNEL);
- dd->cspec->sendibchk =
- kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
- GFP_KERNEL);
+ sbufcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+
+ dd->cspec->sendchkenable = bitmap_zalloc(sbufcnt, GFP_KERNEL);
+ dd->cspec->sendgrhchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
+ dd->cspec->sendibchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
!dd->cspec->sendibchk) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index d1a72e89e297..45211008449f 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1106,8 +1106,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
if (!qib_cpulist_count) {
u32 count = num_online_cpus();
- qib_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
- GFP_KERNEL);
+ qib_cpulist = bitmap_zalloc(count, GFP_KERNEL);
if (qib_cpulist)
qib_cpulist_count = count;
}
@@ -1279,7 +1278,7 @@ static void __exit qib_ib_cleanup(void)
#endif
qib_cpulist_count = 0;
- kfree(qib_cpulist);
+ bitmap_free(qib_cpulist);
WARN_ON(!xa_empty(&qib_dev_table));
qib_dev_cleanup();
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 81b810d006c0..1dc3ccf0cf1f 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -587,7 +587,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
/* Need to release */
u64 pollval;
/*
- * The only writeable bits are the request and CS.
+ * The only writable bits are the request and CS.
* Both should be clear
*/
u64 newval = 0;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index e212929369df..67a1b4562dc2 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -482,7 +482,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
if (err)
goto out_free_dev;
- if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
+ if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
usnic_err("IOMMU of %s does not support cache coherency\n",
dev_name(dev));
err = -EINVAL;
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index da3a398053b8..fb0c008af78c 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -114,6 +114,8 @@ void retransmit_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
+ pr_debug("%s: fired for qp#%d\n", __func__, qp->elem.index);
+
if (qp->valid) {
qp->comp.timeout = 1;
rxe_run_task(&qp->comp.task, 1);
@@ -560,17 +562,16 @@ int rxe_completer(void *arg)
struct sk_buff *skb = NULL;
struct rxe_pkt_info *pkt = NULL;
enum comp_state state;
- int ret = 0;
+ int ret;
if (!rxe_get(qp))
return -EAGAIN;
- if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
- qp->req.state == QP_STATE_RESET) {
+ if (!qp->valid || qp->comp.state == QP_STATE_ERROR ||
+ qp->comp.state == QP_STATE_RESET) {
rxe_drain_resp_pkts(qp, qp->valid &&
- qp->req.state == QP_STATE_ERROR);
- ret = -EAGAIN;
- goto done;
+ qp->comp.state == QP_STATE_ERROR);
+ goto exit;
}
if (qp->comp.timeout) {
@@ -580,10 +581,8 @@ int rxe_completer(void *arg)
qp->comp.timeout_retry = 0;
}
- if (qp->req.need_retry) {
- ret = -EAGAIN;
- goto done;
- }
+ if (qp->req.need_retry)
+ goto exit;
state = COMPST_GET_ACK;
@@ -676,8 +675,7 @@ int rxe_completer(void *arg)
qp->qp_timeout_jiffies)
mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies);
- ret = -EAGAIN;
- goto done;
+ goto exit;
case COMPST_ERROR_RETRY:
/* we come here if the retry timer fired and we did
@@ -689,10 +687,8 @@ int rxe_completer(void *arg)
*/
/* there is nothing to retry in this case */
- if (!wqe || (wqe->state == wqe_state_posted)) {
- ret = -EAGAIN;
- goto done;
- }
+ if (!wqe || (wqe->state == wqe_state_posted))
+ goto exit;
/* if we've started a retry, don't start another
* retry sequence, unless this is a timeout.
@@ -730,18 +726,21 @@ int rxe_completer(void *arg)
break;
case COMPST_RNR_RETRY:
+ /* we come here if we received an RNR NAK */
if (qp->comp.rnr_retry > 0) {
if (qp->comp.rnr_retry != 7)
qp->comp.rnr_retry--;
- qp->req.need_retry = 1;
+ /* don't start a retry flow until the
+ * rnr timer has fired
+ */
+ qp->req.wait_for_rnr_timer = 1;
pr_debug("qp#%d set rnr nak timer\n",
qp_num(qp));
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
- ret = -EAGAIN;
- goto done;
+ goto exit;
} else {
rxe_counter_inc(rxe,
RXE_CNT_RNR_RETRY_EXCEEDED);
@@ -754,12 +753,20 @@ int rxe_completer(void *arg)
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
do_complete(qp, wqe);
rxe_qp_error(qp);
- ret = -EAGAIN;
- goto done;
+ goto exit;
}
}
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the tasklet. A zero return
+ * will continue looping and return to rxe_completer
+ */
done:
+ ret = 0;
+ goto out;
+exit:
+ ret = -EAGAIN;
+out:
if (pkt)
free_pkt(pkt);
rxe_put(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 642b52539ac3..b1a0ab3cd4bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -19,16 +19,16 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
}
if (cqe > rxe->attr.max_cqe) {
- pr_warn("cqe(%d) > max_cqe(%d)\n",
- cqe, rxe->attr.max_cqe);
+ pr_debug("cqe(%d) > max_cqe(%d)\n",
+ cqe, rxe->attr.max_cqe);
goto err1;
}
if (cq) {
count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
- pr_warn("cqe(%d) < current # elements in queue (%d)",
- cqe, count);
+ pr_debug("cqe(%d) < current # elements in queue (%d)",
+ cqe, count);
goto err1;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0e022ae1b8a5..22f6cc31d1d6 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -77,9 +77,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
-int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
-int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
void rxe_mr_cleanup(struct rxe_pool_elem *elem);
@@ -145,7 +144,7 @@ static inline int rcv_wqe_size(int max_sge)
max_sge * sizeof(struct ib_sge);
}
-void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
+void free_rd_atomic_resource(struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index fc3942e04a1f..850b80f5ad8b 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -24,7 +24,7 @@ u8 rxe_get_next_key(u32 last_key)
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
- struct rxe_map_set *set = mr->cur_map_set;
+
switch (mr->type) {
case IB_MR_TYPE_DMA:
@@ -32,8 +32,8 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG:
- if (iova < set->iova || length > set->length ||
- iova > set->iova + set->length - length)
+ if (iova < mr->iova || length > mr->length ||
+ iova > mr->iova + mr->length - length)
return -EFAULT;
return 0;
@@ -65,89 +65,41 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-static void rxe_mr_free_map_set(int num_map, struct rxe_map_set *set)
-{
- int i;
-
- for (i = 0; i < num_map; i++)
- kfree(set->map[i]);
-
- kfree(set->map);
- kfree(set);
-}
-
-static int rxe_mr_alloc_map_set(int num_map, struct rxe_map_set **setp)
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
int i;
- struct rxe_map_set *set;
+ int num_map;
+ struct rxe_map **map = mr->map;
- set = kmalloc(sizeof(*set), GFP_KERNEL);
- if (!set)
- goto err_out;
+ num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
- set->map = kmalloc_array(num_map, sizeof(struct rxe_map *), GFP_KERNEL);
- if (!set->map)
- goto err_free_set;
+ mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+ if (!mr->map)
+ goto err1;
for (i = 0; i < num_map; i++) {
- set->map[i] = kmalloc(sizeof(struct rxe_map), GFP_KERNEL);
- if (!set->map[i])
- goto err_free_map;
+ mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+ if (!mr->map[i])
+ goto err2;
}
- *setp = set;
-
- return 0;
-
-err_free_map:
- for (i--; i >= 0; i--)
- kfree(set->map[i]);
-
- kfree(set->map);
-err_free_set:
- kfree(set);
-err_out:
- return -ENOMEM;
-}
-
-/**
- * rxe_mr_alloc() - Allocate memory map array(s) for MR
- * @mr: Memory region
- * @num_buf: Number of buffer descriptors to support
- * @both: If non zero allocate both mr->map and mr->next_map
- * else just allocate mr->map. Used for fast MRs
- *
- * Return: 0 on success else an error
- */
-static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
-{
- int ret;
- int num_map;
-
BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
- num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
mr->map_mask = RXE_BUF_PER_MAP - 1;
+
mr->num_buf = num_buf;
- mr->max_buf = num_map * RXE_BUF_PER_MAP;
mr->num_map = num_map;
-
- ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
- if (ret)
- return -ENOMEM;
-
- if (both) {
- ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
- if (ret)
- goto err_free;
- }
+ mr->max_buf = num_map * RXE_BUF_PER_MAP;
return 0;
-err_free:
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
- mr->cur_map_set = NULL;
+err2:
+ for (i--; i >= 0; i--)
+ kfree(mr->map[i]);
+
+ kfree(mr->map);
+err1:
return -ENOMEM;
}
@@ -164,7 +116,6 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
- struct rxe_map_set *set;
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
struct ib_umem *umem;
@@ -172,6 +123,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf;
void *vaddr;
int err;
+ int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
@@ -185,20 +137,18 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
rxe_mr_init(access, mr);
- err = rxe_mr_alloc(mr, num_buf, 0);
+ err = rxe_mr_alloc(mr, num_buf);
if (err) {
pr_warn("%s: Unable to allocate memory for map\n",
__func__);
goto err_release_umem;
}
- set = mr->cur_map_set;
- set->page_shift = PAGE_SHIFT;
- set->page_mask = PAGE_SIZE - 1;
-
- num_buf = 0;
- map = set->map;
+ mr->page_shift = PAGE_SHIFT;
+ mr->page_mask = PAGE_SIZE - 1;
+ num_buf = 0;
+ map = mr->map;
if (length > 0) {
buf = map[0]->buf;
@@ -214,29 +164,33 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
pr_warn("%s: Unable to get virtual address\n",
__func__);
err = -ENOMEM;
- goto err_release_umem;
+ goto err_cleanup_map;
}
buf->addr = (uintptr_t)vaddr;
buf->size = PAGE_SIZE;
num_buf++;
buf++;
+
}
}
mr->ibmr.pd = &pd->ibpd;
mr->umem = umem;
mr->access = access;
+ mr->length = length;
+ mr->iova = iova;
+ mr->va = start;
+ mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_USER;
- set->length = length;
- set->iova = iova;
- set->va = start;
- set->offset = ib_umem_offset(umem);
-
return 0;
+err_cleanup_map:
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
+ kfree(mr->map);
err_release_umem:
ib_umem_release(umem);
err_out:
@@ -250,7 +204,7 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
/* always allow remote access for FMRs */
rxe_mr_init(IB_ACCESS_REMOTE, mr);
- err = rxe_mr_alloc(mr, max_pages, 1);
+ err = rxe_mr_alloc(mr, max_pages);
if (err)
goto err1;
@@ -268,24 +222,21 @@ err1:
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out)
{
- struct rxe_map_set *set = mr->cur_map_set;
- size_t offset = iova - set->iova + set->offset;
+ size_t offset = iova - mr->iova + mr->offset;
int map_index;
int buf_index;
u64 length;
- struct rxe_map *map;
- if (likely(set->page_shift)) {
- *offset_out = offset & set->page_mask;
- offset >>= set->page_shift;
+ if (likely(mr->page_shift)) {
+ *offset_out = offset & mr->page_mask;
+ offset >>= mr->page_shift;
*n_out = offset & mr->map_mask;
*m_out = offset >> mr->map_shift;
} else {
map_index = 0;
buf_index = 0;
- map = set->map[map_index];
- length = map->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
while (offset >= length) {
offset -= length;
@@ -295,8 +246,7 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
map_index++;
buf_index = 0;
}
- map = set->map[map_index];
- length = map->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
}
*m_out = map_index;
@@ -317,7 +267,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
goto out;
}
- if (!mr->cur_map_set) {
+ if (!mr->map) {
addr = (void *)(uintptr_t)iova;
goto out;
}
@@ -330,13 +280,13 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
lookup_iova(mr, iova, &m, &n, &offset);
- if (offset + length > mr->cur_map_set->map[m]->buf[n].size) {
+ if (offset + length > mr->map[m]->buf[n].size) {
pr_warn("crosses page boundary\n");
addr = NULL;
goto out;
}
- addr = (void *)(uintptr_t)mr->cur_map_set->map[m]->buf[n].addr + offset;
+ addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
out:
return addr;
@@ -372,7 +322,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return 0;
}
- WARN_ON_ONCE(!mr->cur_map_set);
+ WARN_ON_ONCE(!mr->map);
err = mr_check_range(mr, iova, length);
if (err) {
@@ -382,7 +332,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
lookup_iova(mr, iova, &m, &i, &offset);
- map = mr->cur_map_set->map + m;
+ map = mr->map + m;
buf = map[0]->buf + i;
while (length > 0) {
@@ -576,22 +526,22 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
return mr;
}
-int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_mr *mr;
int ret;
- mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+ mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
if (!mr) {
- pr_err("%s: No MR for rkey %#x\n", __func__, rkey);
+ pr_err("%s: No MR for key %#x\n", __func__, key);
ret = -EINVAL;
goto err;
}
- if (rkey != mr->rkey) {
- pr_err("%s: rkey (%#x) doesn't match mr->rkey (%#x)\n",
- __func__, rkey, mr->rkey);
+ if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) {
+ pr_err("%s: wr key (%#x) doesn't match mr key (%#x)\n",
+ __func__, key, (mr->rkey ? mr->rkey : mr->lkey));
ret = -EINVAL;
goto err_drop_ref;
}
@@ -628,9 +578,8 @@ err:
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
- u32 key = wqe->wr.wr.reg.key & 0xff;
+ u32 key = wqe->wr.wr.reg.key;
u32 access = wqe->wr.wr.reg.access;
- struct rxe_map_set *set;
/* user can only register MR in free state */
if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
@@ -646,36 +595,19 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
return -EINVAL;
}
+ /* user is only allowed to change key portion of l/rkey */
+ if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
+ pr_warn("%s: key = 0x%x has wrong index mr->lkey = 0x%x\n",
+ __func__, key, mr->lkey);
+ return -EINVAL;
+ }
+
mr->access = access;
- mr->lkey = (mr->lkey & ~0xff) | key;
- mr->rkey = (access & IB_ACCESS_REMOTE) ? mr->lkey : 0;
+ mr->lkey = key;
+ mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0;
+ mr->iova = wqe->wr.wr.reg.mr->iova;
mr->state = RXE_MR_STATE_VALID;
- set = mr->cur_map_set;
- mr->cur_map_set = mr->next_map_set;
- mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova;
- mr->next_map_set = set;
-
- return 0;
-}
-
-int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map_set *set = mr->next_map_set;
- struct rxe_map *map;
- struct rxe_phys_buf *buf;
-
- if (unlikely(set->nbuf == mr->num_buf))
- return -ENOMEM;
-
- map = set->map[set->nbuf / RXE_BUF_PER_MAP];
- buf = &map->buf[set->nbuf % RXE_BUF_PER_MAP];
-
- buf->addr = addr;
- buf->size = ibmr->page_size;
- set->nbuf++;
-
return 0;
}
@@ -687,7 +619,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (atomic_read(&mr->num_mw) > 0)
return -EINVAL;
- rxe_put(mr);
+ rxe_cleanup(mr);
return 0;
}
@@ -695,14 +627,15 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
+ int i;
rxe_put(mr_pd(mr));
-
ib_umem_release(mr->umem);
- if (mr->cur_map_set)
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
+ if (mr->map) {
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
- if (mr->next_map_set)
- rxe_mr_free_map_set(mr->num_map, mr->next_map_set);
+ kfree(mr->map);
+ }
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 2e1fa844fabf..104993801a80 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -33,6 +33,8 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
+ rxe_finalize(mw);
+
return 0;
}
@@ -40,7 +42,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
- rxe_put(mw);
+ rxe_cleanup(mw);
return 0;
}
@@ -48,8 +50,6 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_mw *mw, struct rxe_mr *mr)
{
- u32 key = wqe->wr.wr.mw.rkey & 0xff;
-
if (mw->ibmw.type == IB_MW_TYPE_1) {
if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
pr_err_once(
@@ -87,11 +87,6 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
}
- if (unlikely(key == (mw->rkey & 0xff))) {
- pr_err_once("attempt to bind MW with same key\n");
- return -EINVAL;
- }
-
/* remaining checks only apply to a nonzero MR */
if (!mr)
return 0;
@@ -113,21 +108,21 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
(IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
!(mr->access & IB_ACCESS_LOCAL_WRITE))) {
pr_err_once(
- "attempt to bind an writeable MW to an MR without local write access\n");
+ "attempt to bind an Writable MW to an MR without local write access\n");
return -EINVAL;
}
/* C10-75 */
if (mw->access & IB_ZERO_BASED) {
- if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
pr_err_once(
"attempt to bind a ZB MW outside of the MR\n");
return -EINVAL;
}
} else {
- if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) ||
+ if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
- (mr->cur_map_set->iova + mr->cur_map_set->length)))) {
+ (mr->iova + mr->length)))) {
pr_err_once(
"attempt to bind a VA MW outside of the MR\n");
return -EINVAL;
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 568a7cbd13d4..86c7a8bf3cbb 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -105,6 +105,12 @@ enum rxe_device_param {
RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64,
RXE_INFLIGHT_SKBS_PER_QP_LOW = 16,
+ /* Max number of interations of each tasklet
+ * before yielding the cpu to let other
+ * work make progress
+ */
+ RXE_MAX_ITERATIONS = 1024,
+
/* Delay before calling arbiter timer */
RXE_NSEC_ARB_TIMER_DELAY = 200,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 19b14826385b..f50620f5a0a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -6,6 +6,7 @@
#include "rxe.h"
+#define RXE_POOL_TIMEOUT (200)
#define RXE_POOL_ALIGN (16)
static const struct rxe_type_info {
@@ -136,10 +137,14 @@ void *rxe_alloc(struct rxe_pool *pool)
elem->pool = pool;
elem->obj = obj;
kref_init(&elem->ref_cnt);
+ init_completion(&elem->complete);
- err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+ /* allocate index in array but leave pointer as NULL so it
+ * can't be looked up until rxe_finalize() is called
+ */
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
&pool->next, GFP_KERNEL);
- if (err)
+ if (err < 0)
goto err_free;
return obj;
@@ -151,9 +156,11 @@ err_cnt:
return NULL;
}
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ bool sleepable)
{
int err;
+ gfp_t gfp_flags;
if (WARN_ON(pool->type == RXE_TYPE_MR))
return -EINVAL;
@@ -164,10 +171,19 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
elem->pool = pool;
elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
-
- err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
- &pool->next, GFP_KERNEL);
- if (err)
+ init_completion(&elem->complete);
+
+ /* AH objects are unique in that the create_ah verb
+ * can be called in atomic context. If the create_ah
+ * call is not sleepable use GFP_ATOMIC.
+ */
+ gfp_flags = sleepable ? GFP_KERNEL : GFP_ATOMIC;
+
+ if (sleepable)
+ might_sleep();
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
+ &pool->next, gfp_flags);
+ if (err < 0)
goto err_cnt;
return 0;
@@ -181,16 +197,15 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
struct rxe_pool_elem *elem;
struct xarray *xa = &pool->xa;
- unsigned long flags;
void *obj;
- xa_lock_irqsave(xa, flags);
+ rcu_read_lock();
elem = xa_load(xa, index);
if (elem && kref_get_unless_zero(&elem->ref_cnt))
obj = elem->obj;
else
obj = NULL;
- xa_unlock_irqrestore(xa, flags);
+ rcu_read_unlock();
return obj;
}
@@ -198,17 +213,74 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
static void rxe_elem_release(struct kref *kref)
{
struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
- struct rxe_pool *pool = elem->pool;
- xa_erase(&pool->xa, elem->index);
+ complete(&elem->complete);
+}
+
+int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+{
+ struct rxe_pool *pool = elem->pool;
+ struct xarray *xa = &pool->xa;
+ static int timeout = RXE_POOL_TIMEOUT;
+ int ret, err = 0;
+ void *xa_ret;
+
+ if (sleepable)
+ might_sleep();
+
+ /* erase xarray entry to prevent looking up
+ * the pool elem from its index
+ */
+ xa_ret = xa_erase(xa, elem->index);
+ WARN_ON(xa_err(xa_ret));
+
+ /* if this is the last call to rxe_put complete the
+ * object. It is safe to touch obj->elem after this since
+ * it is freed below
+ */
+ __rxe_put(elem);
+
+ /* wait until all references to the object have been
+ * dropped before final object specific cleanup and
+ * return to rdma-core
+ */
+ if (sleepable) {
+ if (!completion_done(&elem->complete) && timeout) {
+ ret = wait_for_completion_timeout(&elem->complete,
+ timeout);
+
+ /* Shouldn't happen. There are still references to
+ * the object but, rather than deadlock, free the
+ * object or pass back to rdma-core.
+ */
+ if (WARN_ON(!ret))
+ err = -EINVAL;
+ }
+ } else {
+ unsigned long until = jiffies + timeout;
+
+ /* AH objects are unique in that the destroy_ah verb
+ * can be called in atomic context. This delay
+ * replaces the wait_for_completion call above
+ * when the destroy_ah call is not sleepable
+ */
+ while (!completion_done(&elem->complete) &&
+ time_before(jiffies, until))
+ mdelay(1);
+
+ if (WARN_ON(!completion_done(&elem->complete)))
+ err = -EINVAL;
+ }
if (pool->cleanup)
pool->cleanup(elem);
if (pool->type == RXE_TYPE_MR)
- kfree(elem->obj);
+ kfree_rcu(elem->obj);
atomic_dec(&pool->num_elem);
+
+ return err;
}
int __rxe_get(struct rxe_pool_elem *elem)
@@ -220,3 +292,11 @@ int __rxe_put(struct rxe_pool_elem *elem)
{
return kref_put(&elem->ref_cnt, rxe_elem_release);
}
+
+void __rxe_finalize(struct rxe_pool_elem *elem)
+{
+ void *xa_ret;
+
+ xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL);
+ WARN_ON(xa_err(xa_ret));
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 0860660d65ec..9d83cb32092f 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -24,6 +24,7 @@ struct rxe_pool_elem {
void *obj;
struct kref ref_cnt;
struct list_head list;
+ struct completion complete;
u32 index;
};
@@ -57,21 +58,28 @@ void rxe_pool_cleanup(struct rxe_pool *pool);
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
-
-#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ bool sleepable);
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem, true)
+#define rxe_add_to_pool_ah(pool, obj, sleepable) __rxe_add_to_pool(pool, \
+ &(obj)->elem, sleepable)
/* lookup an indexed object from index. takes a reference on object */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
int __rxe_get(struct rxe_pool_elem *elem);
-
#define rxe_get(obj) __rxe_get(&(obj)->elem)
int __rxe_put(struct rxe_pool_elem *elem);
-
#define rxe_put(obj) __rxe_put(&(obj)->elem)
+int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable);
+#define rxe_cleanup(obj) __rxe_cleanup(&(obj)->elem, true)
+#define rxe_cleanup_ah(obj, sleepable) __rxe_cleanup(&(obj)->elem, sleepable)
+
#define rxe_read(obj) kref_read(&(obj)->elem.ref_cnt)
+void __rxe_finalize(struct rxe_pool_elem *elem);
+#define rxe_finalize(obj) __rxe_finalize(&(obj)->elem)
+
#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 22e9b85344c3..516bf9b95e48 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -120,17 +120,15 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
struct resp_res *res = &qp->resp.resources[i];
- free_rd_atomic_resource(qp, res);
+ free_rd_atomic_resource(res);
}
kfree(qp->resp.resources);
qp->resp.resources = NULL;
}
}
-void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
+void free_rd_atomic_resource(struct resp_res *res)
{
- if (res->type == RXE_ATOMIC_MASK)
- kfree_skb(res->atomic.skb);
res->type = 0;
}
@@ -142,7 +140,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
if (qp->resp.resources) {
for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
res = &qp->resp.resources[i];
- free_rd_atomic_resource(qp, res);
+ free_rd_atomic_resource(res);
}
}
}
@@ -174,6 +172,14 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_init(&qp->state_lock);
+ spin_lock_init(&qp->req.task.state_lock);
+ spin_lock_init(&qp->resp.task.state_lock);
+ spin_lock_init(&qp->comp.task.state_lock);
+
+ spin_lock_init(&qp->sq.sq_lock);
+ spin_lock_init(&qp->rq.producer_lock);
+ spin_lock_init(&qp->rq.consumer_lock);
+
atomic_set(&qp->ssn, 0);
atomic_set(&qp->skb_out, 0);
}
@@ -230,10 +236,10 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
QUEUE_TYPE_FROM_CLIENT);
qp->req.state = QP_STATE_RESET;
+ qp->comp.state = QP_STATE_RESET;
qp->req.opcode = -1;
qp->comp.opcode = -1;
- spin_lock_init(&qp->sq.sq_lock);
skb_queue_head_init(&qp->req_pkts);
rxe_init_task(rxe, &qp->req.task, qp,
@@ -284,9 +290,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
}
}
- spin_lock_init(&qp->rq.producer_lock);
- spin_lock_init(&qp->rq.consumer_lock);
-
skb_queue_head_init(&qp->resp_pkts);
rxe_init_task(rxe, &qp->resp.task, qp,
@@ -490,6 +493,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* move qp to the reset state */
qp->req.state = QP_STATE_RESET;
+ qp->comp.state = QP_STATE_RESET;
qp->resp.state = QP_STATE_RESET;
/* let state machines reset themselves drain work and packet queues
@@ -507,6 +511,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
atomic_set(&qp->ssn, 0);
qp->req.opcode = -1;
qp->req.need_retry = 0;
+ qp->req.wait_for_rnr_timer = 0;
qp->req.noack_pkts = 0;
qp->resp.msn = 0;
qp->resp.opcode = -1;
@@ -552,6 +557,7 @@ void rxe_qp_error(struct rxe_qp *qp)
{
qp->req.state = QP_STATE_ERROR;
qp->resp.state = QP_STATE_ERROR;
+ qp->comp.state = QP_STATE_ERROR;
qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
@@ -689,6 +695,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
pr_debug("qp#%d state -> INIT\n", qp_num(qp));
qp->req.state = QP_STATE_INIT;
qp->resp.state = QP_STATE_INIT;
+ qp->comp.state = QP_STATE_INIT;
break;
case IB_QPS_RTR:
@@ -699,6 +706,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
case IB_QPS_RTS:
pr_debug("qp#%d state -> RTS\n", qp_num(qp));
qp->req.state = QP_STATE_READY;
+ qp->comp.state = QP_STATE_READY;
break;
case IB_QPS_SQD:
@@ -804,13 +812,15 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
if (qp->rq.queue)
rxe_queue_cleanup(qp->rq.queue);
- atomic_dec(&qp->scq->num_wq);
- if (qp->scq)
+ if (qp->scq) {
+ atomic_dec(&qp->scq->num_wq);
rxe_put(qp->scq);
+ }
- atomic_dec(&qp->rcq->num_wq);
- if (qp->rcq)
+ if (qp->rcq) {
+ atomic_dec(&qp->rcq->num_wq);
rxe_put(qp->rcq);
+ }
if (qp->pd)
rxe_put(qp->pd);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 6227112ef7a2..ed44042782fa 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -7,9 +7,6 @@
#ifndef RXE_QUEUE_H
#define RXE_QUEUE_H
-/* for definition of shared struct rxe_queue_buf */
-#include <uapi/rdma/rdma_user_rxe.h>
-
/* Implements a simple circular buffer that is shared between user
* and the driver and can be resized. The requested element size is
* rounded up to a power of 2 and the number of elements in the buffer
@@ -53,6 +50,8 @@ enum queue_type {
QUEUE_TYPE_FROM_DRIVER,
};
+struct rxe_queue_buf;
+
struct rxe_queue {
struct rxe_dev *rxe;
struct rxe_queue_buf *buf;
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9d98237389cf..f63771207970 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -15,8 +15,7 @@ static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
u32 opcode);
static inline void retry_first_write_send(struct rxe_qp *qp,
- struct rxe_send_wqe *wqe,
- unsigned int mask, int npsn)
+ struct rxe_send_wqe *wqe, int npsn)
{
int i;
@@ -83,7 +82,7 @@ static void req_retry(struct rxe_qp *qp)
if (mask & WR_WRITE_OR_SEND_MASK) {
npsn = (qp->comp.psn - wqe->first_psn) &
BTH_PSN_MASK;
- retry_first_write_send(qp, wqe, mask, npsn);
+ retry_first_write_send(qp, wqe, npsn);
}
if (mask & WR_READ_MASK) {
@@ -101,7 +100,11 @@ void rnr_nak_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
- pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
+ pr_debug("%s: fired for qp#%d\n", __func__, qp_num(qp));
+
+ /* request a send queue retry */
+ qp->req.need_retry = 1;
+ qp->req.wait_for_rnr_timer = 0;
rxe_run_task(&qp->req.task, 1);
}
@@ -161,16 +164,36 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
(wqe->state != wqe_state_processing)))
return NULL;
- if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
- (index != cons))) {
- qp->req.wait_fence = 1;
- return NULL;
- }
-
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
return wqe;
}
+/**
+ * rxe_wqe_is_fenced - check if next wqe is fenced
+ * @qp: the queue pair
+ * @wqe: the next wqe
+ *
+ * Returns: 1 if wqe needs to wait
+ * 0 if wqe is ready to go
+ */
+static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ /* Local invalidate fence (LIF) see IBA 10.6.5.1
+ * Requires ALL previous operations on the send queue
+ * are complete. Make mandatory for the rxe driver.
+ */
+ if (wqe->wr.opcode == IB_WR_LOCAL_INV)
+ return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
+
+ /* Fence see IBA 10.8.3.3
+ * Requires that all previous read and atomic operations
+ * are complete.
+ */
+ return (wqe->wr.send_flags & IB_SEND_FENCE) &&
+ atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
+}
+
static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
{
switch (opcode) {
@@ -581,9 +604,11 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->status = IB_WC_SUCCESS;
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
- if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- qp->sq_sig_type == IB_SIGNAL_ALL_WR)
- rxe_run_task(&qp->comp.task, 1);
+ /* There is no ack coming for local work requests
+ * which can lead to a deadlock. So go ahead and complete
+ * it now.
+ */
+ rxe_run_task(&qp->comp.task, 1);
return 0;
}
@@ -599,6 +624,7 @@ int rxe_requester(void *arg)
u32 payload;
int mtu;
int opcode;
+ int err;
int ret;
struct rxe_send_wqe rollback_wqe;
u32 rollback_psn;
@@ -609,10 +635,20 @@ int rxe_requester(void *arg)
if (!rxe_get(qp))
return -EAGAIN;
-next_wqe:
- if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
+ if (unlikely(!qp->valid))
goto exit;
+ if (unlikely(qp->req.state == QP_STATE_ERROR)) {
+ wqe = req_next_wqe(qp);
+ if (wqe)
+ /*
+ * Generate an error completion for error qp state
+ */
+ goto err;
+ else
+ goto exit;
+ }
+
if (unlikely(qp->req.state == QP_STATE_RESET)) {
qp->req.wqe_index = queue_get_consumer(q,
QUEUE_TYPE_FROM_CLIENT);
@@ -620,10 +656,17 @@ next_wqe:
qp->req.need_rd_atomic = 0;
qp->req.wait_psn = 0;
qp->req.need_retry = 0;
+ qp->req.wait_for_rnr_timer = 0;
goto exit;
}
- if (unlikely(qp->req.need_retry)) {
+ /* we come here if the retransmit timer has fired
+ * or if the rnr timer has fired. If the retransmit
+ * timer fires while we are processing an RNR NAK wait
+ * until the rnr timer has fired before starting the
+ * retry flow
+ */
+ if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
req_retry(qp);
qp->req.need_retry = 0;
}
@@ -632,12 +675,17 @@ next_wqe:
if (unlikely(!wqe))
goto exit;
+ if (rxe_wqe_is_fenced(qp, wqe)) {
+ qp->req.wait_fence = 1;
+ goto exit;
+ }
+
if (wqe->mask & WR_LOCAL_OP_MASK) {
- ret = rxe_do_local_ops(qp, wqe);
- if (unlikely(ret))
+ err = rxe_do_local_ops(qp, wqe);
+ if (unlikely(err))
goto err;
else
- goto next_wqe;
+ goto done;
}
if (unlikely(qp_type(qp) == IB_QPT_RC &&
@@ -685,9 +733,8 @@ next_wqe:
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- __rxe_do_task(&qp->comp.task);
- rxe_put(qp);
- return 0;
+ rxe_run_task(&qp->comp.task, 0);
+ goto done;
}
payload = mtu;
}
@@ -703,25 +750,29 @@ next_wqe:
if (unlikely(!av)) {
pr_err("qp#%d Failed no address vector\n", qp_num(qp));
wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err_drop_ah;
+ goto err;
}
skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
if (unlikely(!skb)) {
pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err_drop_ah;
+ if (ah)
+ rxe_put(ah);
+ goto err;
}
- ret = finish_packet(qp, av, wqe, &pkt, skb, payload);
- if (unlikely(ret)) {
+ err = finish_packet(qp, av, wqe, &pkt, skb, payload);
+ if (unlikely(err)) {
pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
- if (ret == -EFAULT)
+ if (err == -EFAULT)
wqe->status = IB_WC_LOC_PROT_ERR;
else
wqe->status = IB_WC_LOC_QP_OP_ERR;
kfree_skb(skb);
- goto err_drop_ah;
+ if (ah)
+ rxe_put(ah);
+ goto err;
}
if (ah)
@@ -736,13 +787,14 @@ next_wqe:
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
- ret = rxe_xmit_packet(qp, &pkt, skb);
- if (ret) {
+
+ err = rxe_xmit_packet(qp, &pkt, skb);
+ if (err) {
qp->need_req_skb = 1;
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
- if (ret == -EAGAIN) {
+ if (err == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);
goto exit;
}
@@ -753,16 +805,23 @@ next_wqe:
update_state(qp, &pkt);
- goto next_wqe;
-
-err_drop_ah:
- if (ah)
- rxe_put(ah);
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the tasklet. A zero return
+ * will continue looping and return to rxe_requester
+ */
+done:
+ ret = 0;
+ goto out;
err:
+ /* update wqe_index for each wqe completion */
+ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error;
- __rxe_do_task(&qp->comp.task);
-
+ qp->req.state = QP_STATE_ERROR;
+ rxe_run_task(&qp->comp.task, 0);
exit:
+ ret = -EAGAIN;
+out:
rxe_put(qp);
- return -EAGAIN;
+
+ return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index f4f6ee5d81fe..b36ec5c4d5e0 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -21,6 +21,7 @@ enum resp_states {
RESPST_CHK_RKEY,
RESPST_EXECUTE,
RESPST_READ_REPLY,
+ RESPST_ATOMIC_REPLY,
RESPST_COMPLETE,
RESPST_ACKNOWLEDGE,
RESPST_CLEANUP,
@@ -55,6 +56,7 @@ static char *resp_state_name[] = {
[RESPST_CHK_RKEY] = "CHK_RKEY",
[RESPST_EXECUTE] = "EXECUTE",
[RESPST_READ_REPLY] = "READ_REPLY",
+ [RESPST_ATOMIC_REPLY] = "ATOMIC_REPLY",
[RESPST_COMPLETE] = "COMPLETE",
[RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
[RESPST_CLEANUP] = "CLEANUP",
@@ -448,7 +450,8 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
if (rkey_is_mw(rkey)) {
mw = rxe_lookup_mw(qp, access, rkey);
if (!mw) {
- pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
+ pr_debug("%s: no MW matches rkey %#x\n",
+ __func__, rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@@ -468,7 +471,8 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
} else {
mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
if (!mr) {
- pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
+ pr_debug("%s: no MR matches rkey %#x\n",
+ __func__, rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@@ -549,49 +553,106 @@ out:
return rc;
}
+static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ int type)
+{
+ struct resp_res *res;
+ u32 pkts;
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ rxe_advance_resp_resource(qp);
+ free_rd_atomic_resource(res);
+
+ res->type = type;
+ res->replay = 0;
+
+ switch (type) {
+ case RXE_READ_MASK:
+ res->read.va = qp->resp.va + qp->resp.offset;
+ res->read.va_org = qp->resp.va + qp->resp.offset;
+ res->read.resid = qp->resp.resid;
+ res->read.length = qp->resp.resid;
+ res->read.rkey = qp->resp.rkey;
+
+ pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
+ res->first_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
+
+ res->state = rdatm_res_state_new;
+ break;
+ case RXE_ATOMIC_MASK:
+ res->first_psn = pkt->psn;
+ res->last_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ break;
+ }
+
+ return res;
+}
+
/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);
-static enum resp_states process_atomic(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+static enum resp_states atomic_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
{
u64 *vaddr;
enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr;
+ struct resp_res *res = qp->resp.res;
+ u64 value;
- if (mr->state != RXE_MR_STATE_VALID) {
- ret = RESPST_ERR_RKEY_VIOLATION;
- goto out;
+ if (!res) {
+ res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
+ qp->resp.res = res;
}
- vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
+ if (!res->replay) {
+ if (mr->state != RXE_MR_STATE_VALID) {
+ ret = RESPST_ERR_RKEY_VIOLATION;
+ goto out;
+ }
- /* check vaddr is 8 bytes aligned. */
- if (!vaddr || (uintptr_t)vaddr & 7) {
- ret = RESPST_ERR_MISALIGNED_ATOMIC;
- goto out;
- }
+ vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
+ sizeof(u64));
- spin_lock_bh(&atomic_ops_lock);
+ /* check vaddr is 8 bytes aligned. */
+ if (!vaddr || (uintptr_t)vaddr & 7) {
+ ret = RESPST_ERR_MISALIGNED_ATOMIC;
+ goto out;
+ }
- qp->resp.atomic_orig = *vaddr;
+ spin_lock_bh(&atomic_ops_lock);
+ res->atomic.orig_val = value = *vaddr;
- if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
- if (*vaddr == atmeth_comp(pkt))
- *vaddr = atmeth_swap_add(pkt);
- } else {
- *vaddr += atmeth_swap_add(pkt);
- }
+ if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ if (value == atmeth_comp(pkt))
+ value = atmeth_swap_add(pkt);
+ } else {
+ value += atmeth_swap_add(pkt);
+ }
+
+ *vaddr = value;
+ spin_unlock_bh(&atomic_ops_lock);
+
+ qp->resp.msn++;
- spin_unlock_bh(&atomic_ops_lock);
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
- ret = RESPST_NONE;
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+ }
+
+ ret = RESPST_ACKNOWLEDGE;
out:
return ret;
}
static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt,
struct rxe_pkt_info *ack,
int opcode,
int payload,
@@ -629,7 +690,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
}
if (ack->mask & RXE_ATMACK_MASK)
- atmack_set_orig(ack, qp->resp.atomic_orig);
+ atmack_set_orig(ack, qp->resp.res->atomic.orig_val);
err = rxe_prepare(&qp->pri_av, ack, skb);
if (err) {
@@ -640,34 +701,6 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
return skb;
}
-static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
-{
- struct resp_res *res;
- u32 pkts;
-
- res = &qp->resp.resources[qp->resp.res_head];
- rxe_advance_resp_resource(qp);
- free_rd_atomic_resource(qp, res);
-
- res->type = RXE_READ_MASK;
- res->replay = 0;
- res->read.va = qp->resp.va + qp->resp.offset;
- res->read.va_org = qp->resp.va + qp->resp.offset;
- res->read.resid = qp->resp.resid;
- res->read.length = qp->resp.resid;
- res->read.rkey = qp->resp.rkey;
-
- pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
- res->first_psn = pkt->psn;
- res->cur_psn = pkt->psn;
- res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
-
- res->state = rdatm_res_state_new;
-
- return res;
-}
-
/**
* rxe_recheck_mr - revalidate MR from rkey and get a reference
* @qp: the qp
@@ -738,7 +771,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
struct rxe_mr *mr;
if (!res) {
- res = rxe_prepare_read_res(qp, req_pkt);
+ res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
qp->resp.res = res;
}
@@ -771,7 +804,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
payload = min_t(int, res->read.resid, mtu);
- skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
+ skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
res->cur_psn, AETH_ACK_UNLIMITED);
if (!skb)
return RESPST_ERR_RNR;
@@ -858,9 +891,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
qp->resp.msn++;
return RESPST_READ_REPLY;
} else if (pkt->mask & RXE_ATOMIC_MASK) {
- err = process_atomic(qp, pkt);
- if (err)
- return err;
+ return RESPST_ATOMIC_REPLY;
} else {
/* Unreachable */
WARN_ON_ONCE(1);
@@ -997,14 +1028,13 @@ finish:
return RESPST_CLEANUP;
}
-static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
- u8 syndrome, u32 psn)
+static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
int err = 0;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
- skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
+ skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
0, psn, syndrome);
if (!skb) {
err = -ENOMEM;
@@ -1019,40 +1049,29 @@ err1:
return err;
}
-static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
- u8 syndrome)
+static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
- int rc = 0;
+ int err = 0;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
- struct resp_res *res;
- skb = prepare_ack_packet(qp, pkt, &ack_pkt,
- IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
- syndrome);
+ skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
+ 0, psn, syndrome);
if (!skb) {
- rc = -ENOMEM;
+ err = -ENOMEM;
goto out;
}
- res = &qp->resp.resources[qp->resp.res_head];
- free_rd_atomic_resource(qp, res);
- rxe_advance_resp_resource(qp);
-
- skb_get(skb);
- res->type = RXE_ATOMIC_MASK;
- res->atomic.skb = skb;
- res->first_psn = ack_pkt.psn;
- res->last_psn = ack_pkt.psn;
- res->cur_psn = ack_pkt.psn;
+ err = rxe_xmit_packet(qp, &ack_pkt, skb);
+ if (err)
+ pr_err_ratelimited("Failed sending atomic ack\n");
- rc = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (rc) {
- pr_err_ratelimited("Failed sending ack\n");
- rxe_put(qp);
- }
+ /* have to clear this since it is used to trigger
+ * long read replies
+ */
+ qp->resp.res = NULL;
out:
- return rc;
+ return err;
}
static enum resp_states acknowledge(struct rxe_qp *qp,
@@ -1062,11 +1081,11 @@ static enum resp_states acknowledge(struct rxe_qp *qp,
return RESPST_CLEANUP;
if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
- send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
+ send_ack(qp, qp->resp.aeth_syndrome, pkt->psn);
else if (pkt->mask & RXE_ATOMIC_MASK)
- send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
+ send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
else if (bth_ack(pkt))
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
+ send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
return RESPST_CLEANUP;
}
@@ -1119,7 +1138,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
/* SEND. Ack again and cleanup. C9-105. */
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
+ send_ack(qp, AETH_ACK_UNLIMITED, prev_psn);
return RESPST_CLEANUP;
} else if (pkt->mask & RXE_READ_MASK) {
struct resp_res *res;
@@ -1173,14 +1192,11 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
/* Find the operation in our list of responder resources. */
res = find_resource(qp, pkt->psn);
if (res) {
- skb_get(res->atomic.skb);
- /* Resend the result. */
- rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
- if (rc) {
- pr_err("Failed resending result. This flow is not handled - skb ignored\n");
- rc = RESPST_CLEANUP;
- goto out;
- }
+ res->replay = 1;
+ res->cur_psn = pkt->psn;
+ qp->resp.res = res;
+ rc = RESPST_ATOMIC_REPLY;
+ goto out;
}
/* Resource not found. Class D error. Drop the request. */
@@ -1260,17 +1276,15 @@ int rxe_responder(void *arg)
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state;
struct rxe_pkt_info *pkt = NULL;
- int ret = 0;
+ int ret;
if (!rxe_get(qp))
return -EAGAIN;
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
- if (!qp->valid) {
- ret = -EINVAL;
- goto done;
- }
+ if (!qp->valid)
+ goto exit;
switch (qp->resp.state) {
case QP_STATE_RESET:
@@ -1316,6 +1330,9 @@ int rxe_responder(void *arg)
case RESPST_READ_REPLY:
state = read_reply(qp, pkt);
break;
+ case RESPST_ATOMIC_REPLY:
+ state = atomic_reply(qp, pkt);
+ break;
case RESPST_ACKNOWLEDGE:
state = acknowledge(qp, pkt);
break;
@@ -1327,7 +1344,7 @@ int rxe_responder(void *arg)
break;
case RESPST_ERR_PSN_OUT_OF_SEQ:
/* RC only - Class B. Drop packet. */
- send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
+ send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
state = RESPST_CLEANUP;
break;
@@ -1349,7 +1366,7 @@ int rxe_responder(void *arg)
if (qp_type(qp) == IB_QPT_RC) {
rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
/* RC - class B */
- send_ack(qp, pkt, AETH_RNR_NAK |
+ send_ack(qp, AETH_RNR_NAK |
(~AETH_TYPE_MASK &
qp->attr.min_rnr_timer),
pkt->psn);
@@ -1438,7 +1455,7 @@ int rxe_responder(void *arg)
case RESPST_ERROR:
qp->resp.goto_error = 0;
- pr_warn("qp#%d moved to error state\n", qp_num(qp));
+ pr_debug("qp#%d moved to error state\n", qp_num(qp));
rxe_qp_error(qp);
goto exit;
@@ -1447,9 +1464,16 @@ int rxe_responder(void *arg)
}
}
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the tasklet. A zero return
+ * will continue looping and return to rxe_responder
+ */
+done:
+ ret = 0;
+ goto out;
exit:
ret = -EAGAIN;
-done:
+out:
rxe_put(qp);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 0c4db5bb17d7..2248cf33d776 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/hardirq.h>
-#include "rxe_task.h"
+#include "rxe.h"
int __rxe_do_task(struct rxe_task *task)
@@ -33,6 +33,7 @@ void rxe_do_task(struct tasklet_struct *t)
int cont;
int ret;
struct rxe_task *task = from_tasklet(task, t, tasklet);
+ unsigned int iterations = RXE_MAX_ITERATIONS;
spin_lock_bh(&task->state_lock);
switch (task->state) {
@@ -61,13 +62,20 @@ void rxe_do_task(struct tasklet_struct *t)
spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_BUSY:
- if (ret)
+ if (ret) {
task->state = TASK_STATE_START;
- else
+ } else if (iterations--) {
cont = 1;
+ } else {
+ /* reschedule the tasklet and exit
+ * the loop to give up the cpu
+ */
+ tasklet_schedule(&task->tasklet);
+ task->state = TASK_STATE_START;
+ }
break;
- /* soneone tried to run the task since the last time we called
+ /* someone tried to run the task since the last time we called
* func, so we will call one more time regardless of the
* return value
*/
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9d995854a174..e264cf69bf55 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -115,7 +115,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
{
struct rxe_ucontext *uc = to_ruc(ibuc);
- rxe_put(uc);
+ rxe_cleanup(uc);
}
static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
@@ -149,7 +149,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_pd *pd = to_rpd(ibpd);
- rxe_put(pd);
+ rxe_cleanup(pd);
return 0;
}
@@ -176,7 +176,8 @@ static int rxe_create_ah(struct ib_ah *ibah,
if (err)
return err;
- err = rxe_add_to_pool(&rxe->ah_pool, ah);
+ err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
+ init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (err)
return err;
@@ -188,7 +189,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = copy_to_user(&uresp->ah_num, &ah->ah_num,
sizeof(uresp->ah_num));
if (err) {
- rxe_put(ah);
+ rxe_cleanup(ah);
return -EFAULT;
}
} else if (ah->is_user) {
@@ -197,6 +198,8 @@ static int rxe_create_ah(struct ib_ah *ibah,
}
rxe_init_av(init_attr->ah_attr, &ah->av);
+ rxe_finalize(ah);
+
return 0;
}
@@ -228,7 +231,8 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
- rxe_put(ah);
+ rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
+
return 0;
}
@@ -308,12 +312,13 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err)
- goto err_put;
+ goto err_cleanup;
return 0;
-err_put:
- rxe_put(srq);
+err_cleanup:
+ rxe_cleanup(srq);
+
return err;
}
@@ -362,7 +367,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rxe_srq *srq = to_rsrq(ibsrq);
- rxe_put(srq);
+ rxe_cleanup(srq);
return 0;
}
@@ -429,10 +434,11 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (err)
goto qp_init;
+ rxe_finalize(qp);
return 0;
qp_init:
- rxe_put(qp);
+ rxe_cleanup(qp);
return err;
}
@@ -485,7 +491,7 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (ret)
return ret;
- rxe_put(qp);
+ rxe_cleanup(qp);
return 0;
}
@@ -803,7 +809,7 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
rxe_cq_disable(cq);
- rxe_put(cq);
+ rxe_cleanup(cq);
return 0;
}
@@ -898,6 +904,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_get(pd);
rxe_mr_init_dma(pd, access, mr);
+ rxe_finalize(mr);
return &mr->ibmr;
}
@@ -926,11 +933,13 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
if (err)
goto err3;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err3:
rxe_put(pd);
- rxe_put(mr);
+ rxe_cleanup(mr);
err2:
return ERR_PTR(err);
}
@@ -958,35 +967,52 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (err)
goto err2;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err2:
rxe_put(pd);
- rxe_put(mr);
+ rxe_cleanup(mr);
err1:
return ERR_PTR(err);
}
-/* build next_map_set from scatterlist
- * The IB_WR_REG_MR WR will swap map_sets
- */
+static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+ struct rxe_map *map;
+ struct rxe_phys_buf *buf;
+
+ if (unlikely(mr->nbuf == mr->num_buf))
+ return -ENOMEM;
+
+ map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
+ buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
+
+ buf->addr = addr;
+ buf->size = ibmr->page_size;
+ mr->nbuf++;
+
+ return 0;
+}
+
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map_set *set = mr->next_map_set;
int n;
- set->nbuf = 0;
+ mr->nbuf = 0;
- n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page);
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
- set->va = ibmr->iova;
- set->iova = ibmr->iova;
- set->length = ibmr->length;
- set->page_shift = ilog2(ibmr->page_size);
- set->page_mask = ibmr->page_size - 1;
- set->offset = set->iova & set->page_mask;
+ mr->va = ibmr->iova;
+ mr->iova = ibmr->iova;
+ mr->length = ibmr->length;
+ mr->page_shift = ilog2(ibmr->page_size);
+ mr->page_mask = ibmr->page_size - 1;
+ mr->offset = mr->iova & mr->page_mask;
return n;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index ac464e68c923..96af3e054f4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -9,7 +9,6 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
-#include <rdma/rdma_user_rxe.h>
#include "rxe_pool.h"
#include "rxe_task.h"
#include "rxe_hw_counters.h"
@@ -124,11 +123,13 @@ struct rxe_req_info {
int need_rd_atomic;
int wait_psn;
int need_retry;
+ int wait_for_rnr_timer;
int noack_pkts;
struct rxe_task task;
};
struct rxe_comp_info {
+ enum rxe_qp_state state;
u32 psn;
int opcode;
int timeout;
@@ -155,7 +156,7 @@ struct resp_res {
union {
struct {
- struct sk_buff *skb;
+ u64 orig_val;
} atomic;
struct {
u64 va_org;
@@ -189,7 +190,6 @@ struct rxe_resp_info {
u32 resid;
u32 rkey;
u32 length;
- u64 atomic_orig;
/* SRQ only */
struct {
@@ -288,17 +288,6 @@ struct rxe_map {
struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
};
-struct rxe_map_set {
- struct rxe_map **map;
- u64 va;
- u64 iova;
- size_t length;
- u32 offset;
- u32 nbuf;
- int page_shift;
- int page_mask;
-};
-
static inline int rkey_is_mw(u32 rkey)
{
u32 index = rkey >> 8;
@@ -316,20 +305,26 @@ struct rxe_mr {
u32 rkey;
enum rxe_mr_state state;
enum ib_mr_type type;
+ u64 va;
+ u64 iova;
+ size_t length;
+ u32 offset;
int access;
+ int page_shift;
+ int page_mask;
int map_shift;
int map_mask;
u32 num_buf;
+ u32 nbuf;
u32 max_buf;
u32 num_map;
atomic_t num_mw;
- struct rxe_map_set *cur_map_set;
- struct rxe_map_set *next_map_set;
+ struct rxe_map **map;
};
enum rxe_mw_state {
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 17f34d584cd9..f88d2971c2c6 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -725,11 +725,11 @@ static int siw_proc_mpareply(struct siw_cep *cep)
enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
rv = siw_recv_mpa_rr(cep);
- if (rv != -EAGAIN)
- siw_cancel_mpatimer(cep);
if (rv)
goto out_err;
+ siw_cancel_mpatimer(cep);
+
rep = &cep->mpa.hdr;
if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
@@ -895,7 +895,8 @@ static int siw_proc_mpareply(struct siw_cep *cep)
}
out_err:
- siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+ if (rv != -EAGAIN)
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
return rv;
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 09316072b789..8dedae7ae79e 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -1167,7 +1167,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
err_out:
siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
- if (cq && cq->queue) {
+ if (cq->queue) {
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2c3dca41d3bd..ed25061fac62 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -573,7 +573,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
if (skb_is_gso(skb)) {
- hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
phead = skb->data;
if (unlikely(!skb_pull(skb, hlen))) {
ipoib_warn(priv, "linear data too small\n");
@@ -1109,7 +1109,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
* if he sets the device address back to be based on GID index 0,
* he no longer wishs to control it.
*
- * If the user doesn't control the the device address,
+ * If the user doesn't control the device address,
* IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
* the port GUID has changed and GID at index 0 has changed
* so we need to change priv->local_gid and priv->dev->dev_addr
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2a8961b685c2..a4904371e2db 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1664,8 +1664,10 @@ static void ipoib_napi_add(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
- netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
+ netif_napi_add_weight(dev, &priv->recv_napi, ipoib_rx_poll,
+ IPOIB_NUM_WC);
+ netif_napi_add_weight(dev, &priv->send_napi, ipoib_tx_poll,
+ MAX_SEND_CQE);
}
static void ipoib_napi_del(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 321949a570ed..620ae5b2d80d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -568,7 +568,7 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
@@ -685,7 +685,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
return cls_session;
remove_host:
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index bd5f3b5e1727..7b83f48f60c5 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -537,6 +537,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iscsi_hdr *hdr;
char *data;
int length;
+ bool full_feature_phase;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "login_rsp");
@@ -550,6 +551,9 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
hdr = desc->rsp + sizeof(struct iser_ctrl);
data = desc->rsp + ISER_HEADERS_LEN;
length = wc->byte_len - ISER_HEADERS_LEN;
+ full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
+ ISCSI_FULL_FEATURE_PHASE) &&
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL);
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, length);
@@ -560,7 +564,8 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
- if (iser_conn->iscsi_conn->session->discovery_sess)
+ if (!full_feature_phase ||
+ iser_conn->iscsi_conn->session->discovery_sess)
return;
/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index c08f2d9133b6..a00ca117303a 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -246,6 +246,7 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
device = ib_conn->device;
ib_dev = device->ib_device;
+ /* +1 for drain */
if (ib_conn->pi_support)
max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
else
@@ -267,7 +268,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
init_attr.qp_context = (void *)ib_conn;
init_attr.send_cq = ib_conn->cq;
init_attr.recv_cq = ib_conn->cq;
- init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
+ /* +1 for drain */
+ init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS + 1;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -485,7 +487,7 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
iser_conn, err);
/* block until all flush errors are consumed */
- ib_drain_sq(ib_conn->qp);
+ ib_drain_qp(ib_conn->qp);
}
return 1;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index 385a19846c24..1e6ffafa2db3 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -32,11 +32,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
{
- struct rtrs_clt_stats_pcpu *s;
-
- s = get_cpu_ptr(stats->pcpu_stats);
- s->rdma.failover_cnt++;
- put_cpu_ptr(stats->pcpu_stats);
+ this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt);
}
int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
@@ -169,12 +165,8 @@ int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
size_t size, int d)
{
- struct rtrs_clt_stats_pcpu *s;
-
- s = get_cpu_ptr(stats->pcpu_stats);
- s->rdma.dir[d].cnt++;
- s->rdma.dir[d].size_total += size;
- put_cpu_ptr(stats->pcpu_stats);
+ this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt);
+ this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size);
}
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 9809c3883979..baecde41d126 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -740,25 +740,25 @@ struct path_it {
struct rtrs_clt_path *(*next_path)(struct path_it *it);
};
-/**
- * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
+/*
+ * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
* @head: the head for the list.
- * @ptr: the list head to take the next element from.
- * @type: the type of the struct this is embedded in.
- * @memb: the name of the list_head within the struct.
+ * @clt_path: The element to take the next clt_path from.
*
- * Next element returned in round-robin fashion, i.e. head will be skipped,
+ * Next clt path returned in round-robin fashion, i.e. head will be skipped,
* but if list is observed as empty, NULL will be returned.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
+ * This function may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
-#define list_next_or_null_rr_rcu(head, ptr, type, memb) \
-({ \
- list_next_or_null_rcu(head, ptr, type, memb) ?: \
- list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
- type, memb); \
-})
+static inline struct rtrs_clt_path *
+rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path)
+{
+ return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?:
+ list_next_or_null_rcu(head,
+ READ_ONCE((&clt_path->s.entry)->next),
+ typeof(*clt_path), s.entry);
+}
/**
* get_next_path_rr() - Returns path in round-robin fashion.
@@ -789,10 +789,8 @@ static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
path = list_first_or_null_rcu(&clt->paths_list,
typeof(*path), s.entry);
else
- path = list_next_or_null_rr_rcu(&clt->paths_list,
- &path->s.entry,
- typeof(*path),
- s.entry);
+ path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path);
+
rcu_assign_pointer(*ppcpu_path, path);
return path;
@@ -1403,8 +1401,7 @@ static int alloc_permits(struct rtrs_clt_sess *clt)
unsigned int chunk_bits;
int err, i;
- clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
- sizeof(long), GFP_KERNEL);
+ clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
if (!clt->permits_map) {
err = -ENOMEM;
goto out_err;
@@ -1426,7 +1423,7 @@ static int alloc_permits(struct rtrs_clt_sess *clt)
return 0;
err_map:
- kfree(clt->permits_map);
+ bitmap_free(clt->permits_map);
clt->permits_map = NULL;
out_err:
return err;
@@ -1434,13 +1431,11 @@ out_err:
static void free_permits(struct rtrs_clt_sess *clt)
{
- if (clt->permits_map) {
- size_t sz = clt->queue_depth;
-
+ if (clt->permits_map)
wait_event(clt->permits_wait,
- find_first_bit(clt->permits_map, sz) >= sz);
- }
- kfree(clt->permits_map);
+ bitmap_empty(clt->permits_map, clt->queue_depth));
+
+ bitmap_free(clt->permits_map);
clt->permits_map = NULL;
kfree(clt->permits);
clt->permits = NULL;
@@ -2277,8 +2272,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
* removed. If @sess is the last element, then @next is NULL.
*/
rcu_read_lock();
- next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry,
- typeof(*next), s.entry);
+ next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
rcu_read_unlock();
/*
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 9a1e5c2ae55c..ac0df734eba8 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -23,6 +23,17 @@
#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
__stringify(RTRS_PROTO_VER_MINOR)
+/*
+ * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
+ * and the minimum chunk size is 4096 (2^12).
+ * So the maximum sess_queue_depth is 65536 (2^16) in theory.
+ * But mempool_create, create_qp and ib_post_send fail with
+ * "cannot allocate memory" error if sess_queue_depth is too big.
+ * Therefore the pratical max value of sess_queue_depth is
+ * somewhere between 1 and 65534 and it depends on the system.
+ */
+#define MAX_SESS_QUEUE_DEPTH 65535
+
enum rtrs_imm_const {
MAX_IMM_TYPE_BITS = 4,
MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
@@ -46,16 +57,6 @@ enum {
MAX_PATHS_NUM = 128,
- /*
- * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
- * and the minimum chunk size is 4096 (2^12).
- * So the maximum sess_queue_depth is 65536 (2^16) in theory.
- * But mempool_create, create_qp and ib_post_send fail with
- * "cannot allocate memory" error if sess_queue_depth is too big.
- * Therefore the pratical max value of sess_queue_depth is
- * somewhere between 1 and 65534 and it depends on the system.
- */
- MAX_SESS_QUEUE_DEPTH = 65535,
MIN_CHUNK_SIZE = 8192,
RTRS_HB_INTERVAL_MS = 5000,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
index 44b1c1652131..2aff1213a19d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -14,9 +14,14 @@
int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
{
if (enable) {
- struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+ memset(r, 0, sizeof(*r));
+ }
- memset(r, 0, sizeof(*r));
return 0;
}
@@ -25,11 +30,22 @@ int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page)
{
- struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats sum;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ }
- return sysfs_emit(page, "%lld %lld %lld %lldn %u\n",
- (s64)atomic64_read(&r->dir[READ].cnt),
- (s64)atomic64_read(&r->dir[READ].size_total),
- (s64)atomic64_read(&r->dir[WRITE].cnt),
- (s64)atomic64_read(&r->dir[WRITE].size_total), 0);
+ return sysfs_emit(page, "%llu %llu %llu %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index b94ae12c2795..2a3c9ac64a42 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -220,6 +220,8 @@ static void rtrs_srv_path_stats_release(struct kobject *kobj)
stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
+ free_percpu(stats->rdma_stats);
+
kfree(stats);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 24024bce2566..34c03bde5064 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -11,7 +11,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/module.h>
-#include <linux/mempool.h>
#include "rtrs-srv.h"
#include "rtrs-log.h"
@@ -26,11 +25,7 @@ MODULE_LICENSE("GPL");
#define DEFAULT_SESS_QUEUE_DEPTH 512
#define MAX_HDR_SIZE PAGE_SIZE
-/* We guarantee to serve 10 paths at least */
-#define CHUNK_POOL_SZ 10
-
static struct rtrs_rdma_dev_pd dev_pd;
-static mempool_t *chunk_pool;
struct class *rtrs_dev_class;
static struct rtrs_srv_ib_ctx ib_ctx;
@@ -1358,7 +1353,7 @@ static void free_srv(struct rtrs_srv_sess *srv)
WARN_ON(refcount_read(&srv->refcount));
for (i = 0; i < srv->queue_depth; i++)
- mempool_free(srv->chunks[i], chunk_pool);
+ __free_pages(srv->chunks[i], get_order(max_chunk_size));
kfree(srv->chunks);
mutex_destroy(&srv->paths_mutex);
mutex_destroy(&srv->paths_ev_mutex);
@@ -1411,7 +1406,8 @@ static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
goto err_free_srv;
for (i = 0; i < srv->queue_depth; i++) {
- srv->chunks[i] = mempool_alloc(chunk_pool, GFP_KERNEL);
+ srv->chunks[i] = alloc_pages(GFP_KERNEL,
+ get_order(max_chunk_size));
if (!srv->chunks[i])
goto err_free_chunks;
}
@@ -1424,7 +1420,7 @@ static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
err_free_chunks:
while (i--)
- mempool_free(srv->chunks[i], chunk_pool);
+ __free_pages(srv->chunks[i], get_order(max_chunk_size));
kfree(srv->chunks);
err_free_srv:
@@ -1513,6 +1509,7 @@ static void free_path(struct rtrs_srv_path *srv_path)
kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
} else {
+ free_percpu(srv_path->stats->rdma_stats);
kfree(srv_path->stats);
kfree(srv_path);
}
@@ -1755,13 +1752,17 @@ static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
if (!srv_path->stats)
goto err_free_sess;
+ srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
+ if (!srv_path->stats->rdma_stats)
+ goto err_free_stats;
+
srv_path->stats->srv_path = srv_path;
srv_path->dma_addr = kcalloc(srv->queue_depth,
sizeof(*srv_path->dma_addr),
GFP_KERNEL);
if (!srv_path->dma_addr)
- goto err_free_stats;
+ goto err_free_percpu;
srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
GFP_KERNEL);
@@ -1813,6 +1814,8 @@ err_free_con:
kfree(srv_path->s.con);
err_free_dma_addr:
kfree(srv_path->dma_addr);
+err_free_percpu:
+ free_percpu(srv_path->stats->rdma_stats);
err_free_stats:
kfree(srv_path->stats);
err_free_sess:
@@ -2266,14 +2269,10 @@ static int __init rtrs_server_init(void)
err);
return err;
}
- chunk_pool = mempool_create_page_pool(sess_queue_depth * CHUNK_POOL_SZ,
- get_order(max_chunk_size));
- if (!chunk_pool)
- return -ENOMEM;
rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
if (IS_ERR(rtrs_dev_class)) {
err = PTR_ERR(rtrs_dev_class);
- goto out_chunk_pool;
+ goto out_err;
}
rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
if (!rtrs_wq) {
@@ -2285,9 +2284,7 @@ static int __init rtrs_server_init(void)
out_dev_class:
class_destroy(rtrs_dev_class);
-out_chunk_pool:
- mempool_destroy(chunk_pool);
-
+out_err:
return err;
}
@@ -2295,7 +2292,6 @@ static void __exit rtrs_server_exit(void)
{
destroy_workqueue(rtrs_wq);
class_destroy(rtrs_dev_class);
- mempool_destroy(chunk_pool);
rtrs_rdma_dev_pd_deinit(&dev_pd);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 6292e87f6afd..186a63c217df 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/refcount.h>
+#include <linux/percpu.h>
#include "rtrs-pri.h"
/*
@@ -29,15 +30,15 @@ enum rtrs_srv_state {
*/
struct rtrs_srv_stats_rdma_stats {
struct {
- atomic64_t cnt;
- atomic64_t size_total;
+ u64 cnt;
+ u64 size_total;
} dir[2];
};
struct rtrs_srv_stats {
- struct kobject kobj_stats;
- struct rtrs_srv_stats_rdma_stats rdma_stats;
- struct rtrs_srv_path *srv_path;
+ struct kobject kobj_stats;
+ struct rtrs_srv_stats_rdma_stats __percpu *rdma_stats;
+ struct rtrs_srv_path *srv_path;
};
struct rtrs_srv_con {
@@ -130,8 +131,8 @@ void close_path(struct rtrs_srv_path *srv_path);
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
size_t size, int d)
{
- atomic64_inc(&s->rdma_stats.dir[d].cnt);
- atomic64_add(size, &s->rdma_stats.dir[d].size_total);
+ this_cpu_inc(s->rdma_stats->dir[d].cnt);
+ this_cpu_add(s->rdma_stats->dir[d].size_total, size);
}
/* functions which are implemented in rtrs-srv-stats.c */
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 6058abf42ba7..7720ea270ed8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1282,8 +1282,7 @@ struct srp_terminate_context {
int scsi_result;
};
-static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
- bool reserved)
+static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
{
struct srp_terminate_context *context = context_ptr;
struct srp_target_port *target = context->srp_target;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index f86ee1c4b970..21cbe30d526f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -565,12 +565,9 @@ static int srpt_refresh_port(struct srpt_port *sport)
if (ret)
return ret;
- sport->port_guid_id.wwn.priv = sport;
- srpt_format_guid(sport->port_guid_id.name,
- sizeof(sport->port_guid_id.name),
+ srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
&sport->gid.global.interface_id);
- sport->port_gid_id.wwn.priv = sport;
- snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name),
+ snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
@@ -2221,13 +2218,13 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
ch->sport = sport;
- if (ib_cm_id) {
- ch->ib_cm.cm_id = ib_cm_id;
- ib_cm_id->context = ch;
- } else {
+ if (rdma_cm_id) {
ch->using_rdma_cm = true;
ch->rdma_cm.cm_id = rdma_cm_id;
rdma_cm_id->context = ch;
+ } else {
+ ch->ib_cm.cm_id = ib_cm_id;
+ ib_cm_id->context = ch;
}
/*
* ch->rq_size should be at least as large as the initiator queue
@@ -2314,31 +2311,35 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
tag_num = ch->rq_size;
tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
- mutex_lock(&sport->port_guid_id.mutex);
- list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) {
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (sport->guid_id) {
+ mutex_lock(&sport->guid_id->mutex);
+ list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
+ }
+ mutex_unlock(&sport->guid_id->mutex);
}
- mutex_unlock(&sport->port_guid_id.mutex);
- mutex_lock(&sport->port_gid_id.mutex);
- list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) {
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (sport->gid_id) {
+ mutex_lock(&sport->gid_id->mutex);
+ list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL, i_port_id,
ch, NULL);
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- /* Retry without leading "0x" */
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ /* Retry without leading "0x" */
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
+ }
+ mutex_unlock(&sport->gid_id->mutex);
}
- mutex_unlock(&sport->port_gid_id.mutex);
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
@@ -2983,7 +2984,12 @@ static int srpt_release_sport(struct srpt_port *sport)
return 0;
}
-static struct se_wwn *__srpt_lookup_wwn(const char *name)
+struct port_and_port_id {
+ struct srpt_port *sport;
+ struct srpt_port_id **port_id;
+};
+
+static struct port_and_port_id __srpt_lookup_port(const char *name)
{
struct ib_device *dev;
struct srpt_device *sdev;
@@ -2998,25 +3004,38 @@ static struct se_wwn *__srpt_lookup_wwn(const char *name)
for (i = 0; i < dev->phys_port_cnt; i++) {
sport = &sdev->port[i];
- if (strcmp(sport->port_guid_id.name, name) == 0)
- return &sport->port_guid_id.wwn;
- if (strcmp(sport->port_gid_id.name, name) == 0)
- return &sport->port_gid_id.wwn;
+ if (strcmp(sport->guid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->guid_id};
+ }
+ if (strcmp(sport->gid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->gid_id};
+ }
}
}
- return NULL;
+ return (struct port_and_port_id){};
}
-static struct se_wwn *srpt_lookup_wwn(const char *name)
+/**
+ * srpt_lookup_port() - Look up an RDMA port by name
+ * @name: ASCII port name
+ *
+ * Increments the RDMA port reference count if an RDMA port pointer is returned.
+ * The caller must drop that reference count by calling srpt_port_put_ref().
+ */
+static struct port_and_port_id srpt_lookup_port(const char *name)
{
- struct se_wwn *wwn;
+ struct port_and_port_id papi;
spin_lock(&srpt_dev_lock);
- wwn = __srpt_lookup_wwn(name);
+ papi = __srpt_lookup_port(name);
spin_unlock(&srpt_dev_lock);
- return wwn;
+ return papi;
}
static void srpt_free_srq(struct srpt_device *sdev)
@@ -3101,6 +3120,18 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
return ret;
}
+static void srpt_free_sdev(struct kref *refcnt)
+{
+ struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
+
+ kfree(sdev);
+}
+
+static void srpt_sdev_put(struct srpt_device *sdev)
+{
+ kref_put(&sdev->refcnt, srpt_free_sdev);
+}
+
/**
* srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
@@ -3119,6 +3150,7 @@ static int srpt_add_one(struct ib_device *device)
if (!sdev)
return -ENOMEM;
+ kref_init(&sdev->refcnt);
sdev->device = device;
mutex_init(&sdev->sdev_mutex);
@@ -3182,10 +3214,6 @@ static int srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
- mutex_init(&sport->port_guid_id.mutex);
- INIT_LIST_HEAD(&sport->port_guid_id.tpg_list);
- mutex_init(&sport->port_gid_id.mutex);
- INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
ret = srpt_refresh_port(sport);
if (ret) {
@@ -3214,7 +3242,7 @@ err_ring:
srpt_free_srq(sdev);
ib_dealloc_pd(sdev->pd);
free_dev:
- kfree(sdev);
+ srpt_sdev_put(sdev);
pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
return ret;
}
@@ -3258,7 +3286,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
ib_dealloc_pd(sdev->pd);
- kfree(sdev);
+ srpt_sdev_put(sdev);
}
static struct ib_client srpt_client = {
@@ -3286,10 +3314,10 @@ static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
{
struct srpt_port *sport = wwn->priv;
- if (wwn == &sport->port_guid_id.wwn)
- return &sport->port_guid_id;
- if (wwn == &sport->port_gid_id.wwn)
- return &sport->port_gid_id;
+ if (sport->guid_id && &sport->guid_id->wwn == wwn)
+ return sport->guid_id;
+ if (sport->gid_id && &sport->gid_id->wwn == wwn)
+ return sport->gid_id;
WARN_ON_ONCE(true);
return NULL;
}
@@ -3774,7 +3802,31 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
- return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
+ struct port_and_port_id papi = srpt_lookup_port(name);
+ struct srpt_port *sport = papi.sport;
+ struct srpt_port_id *port_id;
+
+ if (!papi.port_id)
+ return ERR_PTR(-EINVAL);
+ if (*papi.port_id) {
+ /* Attempt to create a directory that already exists. */
+ WARN_ON_ONCE(true);
+ return &(*papi.port_id)->wwn;
+ }
+ port_id = kzalloc(sizeof(*port_id), GFP_KERNEL);
+ if (!port_id) {
+ srpt_sdev_put(sport->sdev);
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&port_id->mutex);
+ INIT_LIST_HEAD(&port_id->tpg_list);
+ port_id->wwn.priv = sport;
+ memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
+ sport->gid_name, ARRAY_SIZE(port_id->name));
+
+ *papi.port_id = port_id;
+
+ return &port_id->wwn;
}
/**
@@ -3783,6 +3835,18 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
*/
static void srpt_drop_tport(struct se_wwn *wwn)
{
+ struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn);
+ struct srpt_port *sport = wwn->priv;
+
+ if (sport->guid_id == port_id)
+ sport->guid_id = NULL;
+ else if (sport->gid_id == port_id)
+ sport->gid_id = NULL;
+ else
+ WARN_ON_ONCE(true);
+
+ srpt_sdev_put(sport->sdev);
+ kfree(port_id);
}
static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 76e66f630c17..4c46b301eea1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -376,7 +376,7 @@ struct srpt_tpg {
};
/**
- * struct srpt_port_id - information about an RDMA port name
+ * struct srpt_port_id - LIO RDMA port information
* @mutex: Protects @tpg_list changes.
* @tpg_list: TPGs associated with the RDMA port name.
* @wwn: WWN associated with the RDMA port name.
@@ -393,7 +393,7 @@ struct srpt_port_id {
};
/**
- * struct srpt_port - information associated by SRPT with a single IB port
+ * struct srpt_port - SRPT RDMA port information
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
@@ -402,8 +402,10 @@ struct srpt_port_id {
* @lid: cached value of the port's lid.
* @gid: cached value of the port's gid.
* @work: work structure for refreshing the aforementioned cached values.
- * @port_guid_id: target port GUID
- * @port_gid_id: target port GID
+ * @guid_name: port name in GUID format.
+ * @guid_id: LIO target port information for the port name in GUID format.
+ * @gid_name: port name in GID format.
+ * @gid_id: LIO target port information for the port name in GID format.
* @port_attrib: Port attributes that can be accessed through configfs.
* @refcount: Number of objects associated with this port.
* @freed_channels: Completion that will be signaled once @refcount becomes 0.
@@ -419,8 +421,10 @@ struct srpt_port {
u32 lid;
union ib_gid gid;
struct work_struct work;
- struct srpt_port_id port_guid_id;
- struct srpt_port_id port_gid_id;
+ char guid_name[64];
+ struct srpt_port_id *guid_id;
+ char gid_name[64];
+ struct srpt_port_id *gid_id;
struct srpt_port_attrib port_attrib;
atomic_t refcount;
struct completion *freed_channels;
@@ -430,6 +434,7 @@ struct srpt_port {
/**
* struct srpt_device - information associated by SRPT with a single HCA
+ * @refcnt: Reference count for this device.
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory.
@@ -445,6 +450,7 @@ struct srpt_port {
* @port: Information about the ports owned by this HCA.
*/
struct srpt_device {
+ struct kref refcnt;
struct ib_device *device;
struct ib_pd *pd;
u32 lkey;
diff --git a/drivers/input/input-core-private.h b/drivers/input/input-core-private.h
new file mode 100644
index 000000000000..116834cf8868
--- /dev/null
+++ b/drivers/input/input-core-private.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _INPUT_CORE_PRIVATE_H
+#define _INPUT_CORE_PRIVATE_H
+
+/*
+ * Functions and definitions that are private to input core,
+ * should not be used by input drivers or handlers.
+ */
+
+struct input_dev;
+
+void input_mt_release_slots(struct input_dev *dev);
+void input_handle_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value);
+
+#endif /* _INPUT_CORE_PRIVATE_H */
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 44fe6f2f063c..14b53dac1253 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -8,6 +8,7 @@
#include <linux/input/mt.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include "input-core-private.h"
#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
@@ -259,10 +260,13 @@ static void __input_mt_drop_unused(struct input_dev *dev, struct input_mt *mt)
{
int i;
+ lockdep_assert_held(&dev->event_lock);
+
for (i = 0; i < mt->num_slots; i++) {
- if (!input_mt_is_used(mt, &mt->slots[i])) {
- input_mt_slot(dev, i);
- input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
+ if (input_mt_is_active(&mt->slots[i]) &&
+ !input_mt_is_used(mt, &mt->slots[i])) {
+ input_handle_event(dev, EV_ABS, ABS_MT_SLOT, i);
+ input_handle_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
}
}
}
@@ -278,13 +282,44 @@ void input_mt_drop_unused(struct input_dev *dev)
struct input_mt *mt = dev->mt;
if (mt) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
__input_mt_drop_unused(dev, mt);
mt->frame++;
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_mt_drop_unused);
/**
+ * input_mt_release_slots() - Deactivate all slots
+ * @dev: input device with allocated MT slots
+ *
+ * Lift all active slots.
+ */
+void input_mt_release_slots(struct input_dev *dev)
+{
+ struct input_mt *mt = dev->mt;
+
+ lockdep_assert_held(&dev->event_lock);
+
+ if (mt) {
+ /* This will effectively mark all slots unused. */
+ mt->frame++;
+
+ __input_mt_drop_unused(dev, mt);
+
+ if (test_bit(ABS_PRESSURE, dev->absbit))
+ input_handle_event(dev, EV_ABS, ABS_PRESSURE, 0);
+
+ mt->frame++;
+ }
+}
+
+/**
* input_mt_sync_frame() - synchronize mt frame
* @dev: input device with allocated MT slots
*
@@ -300,8 +335,13 @@ void input_mt_sync_frame(struct input_dev *dev)
if (!mt)
return;
- if (mt->flags & INPUT_MT_DROP_UNUSED)
+ if (mt->flags & INPUT_MT_DROP_UNUSED) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
__input_mt_drop_unused(dev, mt);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
use_count = true;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1365c9dfb5f2..ebb2b7f0f8ff 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include "input-compat.h"
+#include "input-core-private.h"
#include "input-poller.h"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
@@ -142,6 +143,8 @@ static void input_pass_values(struct input_dev *dev,
struct input_handle *handle;
struct input_value *v;
+ lockdep_assert_held(&dev->event_lock);
+
if (!count)
return;
@@ -174,44 +177,6 @@ static void input_pass_values(struct input_dev *dev,
}
}
-static void input_pass_event(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
-{
- struct input_value vals[] = { { type, code, value } };
-
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
-}
-
-/*
- * Generate software autorepeat event. Note that we take
- * dev->event_lock here to avoid racing with input_event
- * which may cause keys get "stuck".
- */
-static void input_repeat_key(struct timer_list *t)
-{
- struct input_dev *dev = from_timer(dev, t, timer);
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- if (test_bit(dev->repeat_key, dev->key) &&
- is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
- struct input_value vals[] = {
- { EV_KEY, dev->repeat_key, 2 },
- input_value_sync
- };
-
- input_set_timestamp(dev, ktime_get());
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
-
- if (dev->rep[REP_PERIOD])
- mod_timer(&dev->timer, jiffies +
- msecs_to_jiffies(dev->rep[REP_PERIOD]));
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
#define INPUT_IGNORE_EVENT 0
#define INPUT_PASS_TO_HANDLERS 1
#define INPUT_PASS_TO_DEVICE 2
@@ -275,6 +240,10 @@ static int input_get_disposition(struct input_dev *dev,
int disposition = INPUT_IGNORE_EVENT;
int value = *pval;
+ /* filter-out events from inhibited devices */
+ if (dev->inhibited)
+ return INPUT_IGNORE_EVENT;
+
switch (type) {
case EV_SYN:
@@ -375,19 +344,9 @@ static int input_get_disposition(struct input_dev *dev,
return disposition;
}
-static void input_handle_event(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+static void input_event_dispose(struct input_dev *dev, int disposition,
+ unsigned int type, unsigned int code, int value)
{
- int disposition;
-
- /* filter-out events from inhibited devices */
- if (dev->inhibited)
- return;
-
- disposition = input_get_disposition(dev, type, code, &value);
- if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
- add_input_randomness(type, code, value);
-
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
@@ -426,7 +385,22 @@ static void input_handle_event(struct input_dev *dev,
input_pass_values(dev, dev->vals, dev->num_vals);
dev->num_vals = 0;
}
+}
+void input_handle_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value)
+{
+ int disposition;
+
+ lockdep_assert_held(&dev->event_lock);
+
+ disposition = input_get_disposition(dev, type, code, &value);
+ if (disposition != INPUT_IGNORE_EVENT) {
+ if (type != EV_SYN)
+ add_input_randomness(type, code, value);
+
+ input_event_dispose(dev, disposition, type, code, value);
+ }
}
/**
@@ -613,7 +587,7 @@ static void __input_release_device(struct input_handle *handle)
lockdep_is_held(&dev->mutex));
if (grabber == handle) {
rcu_assign_pointer(dev->grab, NULL);
- /* Make sure input_pass_event() notices that grab is gone */
+ /* Make sure input_pass_values() notices that grab is gone */
synchronize_rcu();
list_for_each_entry(handle, &dev->h_list, d_node)
@@ -736,7 +710,7 @@ void input_close_device(struct input_handle *handle)
if (!--handle->open) {
/*
- * synchronize_rcu() makes sure that input_pass_event()
+ * synchronize_rcu() makes sure that input_pass_values()
* completed and that no more input events are delivered
* through this handle
*/
@@ -751,22 +725,21 @@ EXPORT_SYMBOL(input_close_device);
* Simulate keyup events for all keys that are marked as pressed.
* The function must be called with dev->event_lock held.
*/
-static void input_dev_release_keys(struct input_dev *dev)
+static bool input_dev_release_keys(struct input_dev *dev)
{
bool need_sync = false;
int code;
+ lockdep_assert_held(&dev->event_lock);
+
if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
for_each_set_bit(code, dev->key, KEY_CNT) {
- input_pass_event(dev, EV_KEY, code, 0);
+ input_handle_event(dev, EV_KEY, code, 0);
need_sync = true;
}
-
- if (need_sync)
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
-
- memset(dev->key, 0, sizeof(dev->key));
}
+
+ return need_sync;
}
/*
@@ -793,7 +766,8 @@ static void input_disconnect_device(struct input_dev *dev)
* generate events even after we done here but they will not
* reach any handlers.
*/
- input_dev_release_keys(dev);
+ if (input_dev_release_keys(dev))
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
list_for_each_entry(handle, &dev->h_list, d_node)
handle->open = 0;
@@ -1004,12 +978,16 @@ int input_set_keycode(struct input_dev *dev,
} else if (test_bit(EV_KEY, dev->evbit) &&
!is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(old_keycode, dev->key)) {
- struct input_value vals[] = {
- { EV_KEY, old_keycode, 0 },
- input_value_sync
- };
-
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
+ /*
+ * We have to use input_event_dispose() here directly instead
+ * of input_handle_event() because the key we want to release
+ * here is considered no longer supported by the device and
+ * input_handle_event() will ignore it.
+ */
+ input_event_dispose(dev, INPUT_PASS_TO_HANDLERS,
+ EV_KEY, old_keycode, 0);
+ input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH,
+ EV_SYN, SYN_REPORT, 1);
}
out:
@@ -1784,7 +1762,8 @@ void input_reset_device(struct input_dev *dev)
spin_lock_irqsave(&dev->event_lock, flags);
input_dev_toggle(dev, true);
- input_dev_release_keys(dev);
+ if (input_dev_release_keys(dev))
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irqrestore(&dev->event_lock, flags);
mutex_unlock(&dev->mutex);
@@ -1806,7 +1785,9 @@ static int input_inhibit_device(struct input_dev *dev)
}
spin_lock_irq(&dev->event_lock);
+ input_mt_release_slots(dev);
input_dev_release_keys(dev);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
input_dev_toggle(dev, false);
spin_unlock_irq(&dev->event_lock);
@@ -1857,7 +1838,8 @@ static int input_dev_suspend(struct device *dev)
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
- input_dev_release_keys(input_dev);
+ if (input_dev_release_keys(input_dev))
+ input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
@@ -1891,7 +1873,8 @@ static int input_dev_freeze(struct device *dev)
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
- input_dev_release_keys(input_dev);
+ if (input_dev_release_keys(input_dev))
+ input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irq(&input_dev->event_lock);
@@ -2259,6 +2242,34 @@ static void devm_input_device_unregister(struct device *dev, void *res)
__input_unregister_device(input);
}
+/*
+ * Generate software autorepeat event. Note that we take
+ * dev->event_lock here to avoid racing with input_event
+ * which may cause keys get "stuck".
+ */
+static void input_repeat_key(struct timer_list *t)
+{
+ struct input_dev *dev = from_timer(dev, t, timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (!dev->inhibited &&
+ test_bit(dev->repeat_key, dev->key) &&
+ is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
+
+ input_set_timestamp(dev, ktime_get());
+ input_handle_event(dev, EV_KEY, dev->repeat_key, 2);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
+
+ if (dev->rep[REP_PERIOD])
+ mod_timer(&dev->timer, jiffies +
+ msecs_to_jiffies(dev->rep[REP_PERIOD]));
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* input_enable_softrepeat - enable software autorepeat
* @dev: input device
diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c
index 78ebca7d400a..e0cfdc84763f 100644
--- a/drivers/input/joystick/adc-joystick.c
+++ b/drivers/input/joystick/adc-joystick.c
@@ -222,13 +222,6 @@ static int adc_joystick_probe(struct platform_device *pdev)
if (error)
return error;
- input_set_drvdata(input, joy);
- error = input_register_device(input);
- if (error) {
- dev_err(dev, "Unable to register input device\n");
- return error;
- }
-
joy->buffer = iio_channel_get_all_cb(dev, adc_joystick_handle, joy);
if (IS_ERR(joy->buffer)) {
dev_err(dev, "Unable to allocate callback buffer\n");
@@ -241,6 +234,14 @@ static int adc_joystick_probe(struct platform_device *pdev)
return error;
}
+ input_set_drvdata(input, joy);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "Unable to register input device\n");
+ return error;
+ }
+
return 0;
}
diff --git a/drivers/input/joystick/sensehat-joystick.c b/drivers/input/joystick/sensehat-joystick.c
index 5ad1fe4ff496..a84df39d3b2f 100644
--- a/drivers/input/joystick/sensehat-joystick.c
+++ b/drivers/input/joystick/sensehat-joystick.c
@@ -98,10 +98,8 @@ static int sensehat_joystick_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Could not retrieve interrupt request");
+ if (irq < 0)
return irq;
- }
error = devm_request_threaded_irq(&pdev->dev, irq,
NULL, sensehat_joystick_report,
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4ea79db8f134..a20ee693b22b 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -795,7 +795,7 @@ config KEYBOARD_MT6779
config KEYBOARD_MTK_PMIC
tristate "MediaTek PMIC keys support"
- depends on MFD_MT6397
+ depends on MFD_MT6397 || COMPILE_TEST
help
Say Y here if you want to use the pmic keys (powerkey/homekey).
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 1592da4de336..1a1a05d7cd42 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -8,17 +8,19 @@
* Copyright (C) 2008-2010 Analog Devices Inc.
*/
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/pm.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/gpio/driver.h>
+#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/timekeeping.h>
#include <linux/platform_data/adp5588.h>
@@ -36,18 +38,18 @@
* asserted.
*/
#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
+#define WA_DELAYED_READOUT_TIME 25
struct adp5588_kpad {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work work;
+ ktime_t irq_time;
unsigned long delay;
unsigned short keycode[ADP5588_KEYMAPSIZE];
const struct adp5588_gpi_map *gpimap;
unsigned short gpimapsize;
#ifdef CONFIG_GPIOLIB
unsigned char gpiomap[ADP5588_MAXGPIO];
- bool export_gpio;
struct gpio_chip gc;
struct mutex gpio_lock; /* Protect cached dir, dat_out */
u8 dat_out[3];
@@ -179,6 +181,21 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
return n_unused;
}
+static void adp5588_gpio_do_teardown(void *_kpad)
+{
+ struct adp5588_kpad *kpad = _kpad;
+ struct device *dev = &kpad->client->dev;
+ const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
+ const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
+ int error;
+
+ error = gpio_data->teardown(kpad->client,
+ kpad->gc.base, kpad->gc.ngpio,
+ gpio_data->context);
+ if (error)
+ dev_warn(&kpad->client->dev, "teardown failed %d\n", error);
+}
+
static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
@@ -195,8 +212,6 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
return 0;
}
- kpad->export_gpio = true;
-
kpad->gc.direction_input = adp5588_gpio_direction_input;
kpad->gc.direction_output = adp5588_gpio_direction_output;
kpad->gc.get = adp5588_gpio_get_value;
@@ -210,9 +225,9 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
mutex_init(&kpad->gpio_lock);
- error = gpiochip_add_data(&kpad->gc, kpad);
+ error = devm_gpiochip_add_data(dev, &kpad->gc, kpad);
if (error) {
- dev_err(dev, "gpiochip_add failed, err: %d\n", error);
+ dev_err(dev, "gpiochip_add failed: %d\n", error);
return error;
}
@@ -227,41 +242,24 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->gc.base, kpad->gc.ngpio,
gpio_data->context);
if (error)
- dev_warn(dev, "setup failed, %d\n", error);
+ dev_warn(dev, "setup failed: %d\n", error);
}
- return 0;
-}
-
-static void adp5588_gpio_remove(struct adp5588_kpad *kpad)
-{
- struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
- int error;
-
- if (!kpad->export_gpio)
- return;
-
if (gpio_data->teardown) {
- error = gpio_data->teardown(kpad->client,
- kpad->gc.base, kpad->gc.ngpio,
- gpio_data->context);
+ error = devm_add_action(dev, adp5588_gpio_do_teardown, kpad);
if (error)
- dev_warn(dev, "teardown failed %d\n", error);
+ dev_warn(dev, "failed to schedule teardown: %d\n",
+ error);
}
- gpiochip_remove(&kpad->gc);
+ return 0;
}
+
#else
static inline int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
return 0;
}
-
-static inline void adp5588_gpio_remove(struct adp5588_kpad *kpad)
-{
-}
#endif
static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
@@ -289,13 +287,36 @@ static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
}
}
-static void adp5588_work(struct work_struct *work)
+static irqreturn_t adp5588_hard_irq(int irq, void *handle)
{
- struct adp5588_kpad *kpad = container_of(work,
- struct adp5588_kpad, work.work);
+ struct adp5588_kpad *kpad = handle;
+
+ kpad->irq_time = ktime_get();
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t adp5588_thread_irq(int irq, void *handle)
+{
+ struct adp5588_kpad *kpad = handle;
struct i2c_client *client = kpad->client;
+ ktime_t target_time, now;
+ unsigned long delay;
int status, ev_cnt;
+ /*
+ * Readout needs to wait for at least 25ms after the notification
+ * for REVID < 4.
+ */
+ if (kpad->delay) {
+ target_time = ktime_add_ms(kpad->irq_time, kpad->delay);
+ now = ktime_get();
+ if (ktime_before(now, target_time)) {
+ delay = ktime_to_us(ktime_sub(target_time, now));
+ usleep_range(delay, delay + 1000);
+ }
+ }
+
status = adp5588_read(client, INT_STAT);
if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */
@@ -308,20 +329,8 @@ static void adp5588_work(struct work_struct *work)
input_sync(kpad->input);
}
}
- adp5588_write(client, INT_STAT, status); /* Status is W1C */
-}
-
-static irqreturn_t adp5588_irq(int irq, void *handle)
-{
- struct adp5588_kpad *kpad = handle;
- /*
- * use keventd context to read the event fifo registers
- * Schedule readout at least 25ms after notification for
- * REVID < 4
- */
-
- schedule_delayed_work(&kpad->work, kpad->delay);
+ adp5588_write(client, INT_STAT, status); /* Status is W1C */
return IRQ_HANDLED;
}
@@ -496,30 +505,27 @@ static int adp5588_probe(struct i2c_client *client,
return -EINVAL;
}
- kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
- input = input_allocate_device();
- if (!kpad || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ kpad = devm_kzalloc(&client->dev, sizeof(*kpad), GFP_KERNEL);
+ if (!kpad)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
kpad->client = client;
kpad->input = input;
- INIT_DELAYED_WORK(&kpad->work, adp5588_work);
ret = adp5588_read(client, DEV_ID);
- if (ret < 0) {
- error = ret;
- goto err_free_mem;
- }
+ if (ret < 0)
+ return ret;
revid = (u8) ret & ADP5588_DEVICE_ID_MASK;
if (WA_DELAYED_READOUT_REVID(revid))
- kpad->delay = msecs_to_jiffies(30);
+ kpad->delay = msecs_to_jiffies(WA_DELAYED_READOUT_TIME);
input->name = client->name;
input->phys = "adp5588-keys/input0";
- input->dev.parent = &client->dev;
input_set_drvdata(input, kpad);
@@ -556,95 +562,63 @@ static int adp5588_probe(struct i2c_client *client,
error = input_register_device(input);
if (error) {
- dev_err(&client->dev, "unable to register input device\n");
- goto err_free_mem;
+ dev_err(&client->dev, "unable to register input device: %d\n",
+ error);
+ return error;
}
- error = request_irq(client->irq, adp5588_irq,
- IRQF_TRIGGER_FALLING,
- client->dev.driver->name, kpad);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ adp5588_hard_irq, adp5588_thread_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, kpad);
if (error) {
- dev_err(&client->dev, "irq %d busy?\n", client->irq);
- goto err_unreg_dev;
+ dev_err(&client->dev, "failed to request irq %d: %d\n",
+ client->irq, error);
+ return error;
}
error = adp5588_setup(client);
if (error)
- goto err_free_irq;
+ return error;
if (kpad->gpimapsize)
adp5588_report_switch_state(kpad);
error = adp5588_gpio_add(kpad);
if (error)
- goto err_free_irq;
-
- device_init_wakeup(&client->dev, 1);
- i2c_set_clientdata(client, kpad);
+ return error;
dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
return 0;
-
- err_free_irq:
- free_irq(client->irq, kpad);
- cancel_delayed_work_sync(&kpad->work);
- err_unreg_dev:
- input_unregister_device(input);
- input = NULL;
- err_free_mem:
- input_free_device(input);
- kfree(kpad);
-
- return error;
}
static int adp5588_remove(struct i2c_client *client)
{
- struct adp5588_kpad *kpad = i2c_get_clientdata(client);
-
adp5588_write(client, CFG, 0);
- free_irq(client->irq, kpad);
- cancel_delayed_work_sync(&kpad->work);
- input_unregister_device(kpad->input);
- adp5588_gpio_remove(kpad);
- kfree(kpad);
+ /* all resources will be freed by devm */
return 0;
}
-#ifdef CONFIG_PM
-static int adp5588_suspend(struct device *dev)
+static int __maybe_unused adp5588_suspend(struct device *dev)
{
- struct adp5588_kpad *kpad = dev_get_drvdata(dev);
- struct i2c_client *client = kpad->client;
+ struct i2c_client *client = to_i2c_client(dev);
disable_irq(client->irq);
- cancel_delayed_work_sync(&kpad->work);
-
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(client->irq);
return 0;
}
-static int adp5588_resume(struct device *dev)
+static int __maybe_unused adp5588_resume(struct device *dev)
{
- struct adp5588_kpad *kpad = dev_get_drvdata(dev);
- struct i2c_client *client = kpad->client;
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(client->irq);
+ struct i2c_client *client = to_i2c_client(dev);
enable_irq(client->irq);
return 0;
}
-static const struct dev_pm_ops adp5588_dev_pm_ops = {
- .suspend = adp5588_suspend,
- .resume = adp5588_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(adp5588_dev_pm_ops, adp5588_suspend, adp5588_resume);
static const struct i2c_device_id adp5588_id[] = {
{ "adp5588-keys", 0 },
@@ -656,9 +630,7 @@ MODULE_DEVICE_TABLE(i2c, adp5588_id);
static struct i2c_driver adp5588_driver = {
.driver = {
.name = KBUILD_MODNAME,
-#ifdef CONFIG_PM
.pm = &adp5588_dev_pm_ops,
-#endif
},
.probe = adp5588_probe,
.remove = adp5588_remove,
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index d1f5354d5ea2..cbc6c0d4670a 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -1597,52 +1597,38 @@ static u32 applespi_notify(acpi_handle gpe_device, u32 gpe, void *context)
static int applespi_get_saved_bl_level(struct applespi_data *applespi)
{
- struct efivar_entry *efivar_entry;
+ efi_status_t sts = EFI_NOT_FOUND;
u16 efi_data = 0;
- unsigned long efi_data_len;
- int sts;
-
- efivar_entry = kmalloc(sizeof(*efivar_entry), GFP_KERNEL);
- if (!efivar_entry)
- return -ENOMEM;
-
- memcpy(efivar_entry->var.VariableName, EFI_BL_LEVEL_NAME,
- sizeof(EFI_BL_LEVEL_NAME));
- efivar_entry->var.VendorGuid = EFI_BL_LEVEL_GUID;
- efi_data_len = sizeof(efi_data);
+ unsigned long efi_data_len = sizeof(efi_data);
- sts = efivar_entry_get(efivar_entry, NULL, &efi_data_len, &efi_data);
- if (sts && sts != -ENOENT)
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ sts = efi.get_variable(EFI_BL_LEVEL_NAME, &EFI_BL_LEVEL_GUID,
+ NULL, &efi_data_len, &efi_data);
+ if (sts != EFI_SUCCESS && sts != EFI_NOT_FOUND)
dev_warn(&applespi->spi->dev,
- "Error getting backlight level from EFI vars: %d\n",
+ "Error getting backlight level from EFI vars: 0x%lx\n",
sts);
- kfree(efivar_entry);
-
- return sts ? sts : efi_data;
+ return sts != EFI_SUCCESS ? -ENODEV : efi_data;
}
static void applespi_save_bl_level(struct applespi_data *applespi,
unsigned int level)
{
- efi_guid_t efi_guid;
+ efi_status_t sts = EFI_UNSUPPORTED;
u32 efi_attr;
- unsigned long efi_data_len;
u16 efi_data;
- int sts;
- /* Save keyboard backlight level */
- efi_guid = EFI_BL_LEVEL_GUID;
efi_data = (u16)level;
- efi_data_len = sizeof(efi_data);
efi_attr = EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS;
- sts = efivar_entry_set_safe((efi_char16_t *)EFI_BL_LEVEL_NAME, efi_guid,
- efi_attr, true, efi_data_len, &efi_data);
- if (sts)
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE))
+ sts = efi.set_variable(EFI_BL_LEVEL_NAME, &EFI_BL_LEVEL_GUID,
+ efi_attr, sizeof(efi_data), &efi_data);
+ if (sts != EFI_SUCCESS)
dev_warn(&applespi->spi->dev,
- "Error saving backlight level to EFI vars: %d\n", sts);
+ "Error saving backlight level to EFI vars: 0x%lx\n", sts);
}
static int applespi_probe(struct spi_device *spi)
diff --git a/drivers/input/keyboard/bcm-keypad.c b/drivers/input/keyboard/bcm-keypad.c
index 166d6023a538..56a919ec23b5 100644
--- a/drivers/input/keyboard/bcm-keypad.c
+++ b/drivers/input/keyboard/bcm-keypad.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
#include <linux/bitops.h>
#include <linux/clk.h>
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index cc73a149da28..c14136b733a9 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -12,6 +12,7 @@
// expensive.
#include <linux/module.h>
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/input.h>
@@ -518,6 +519,50 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev,
return 0;
}
+static void cros_ec_keyb_parse_vivaldi_physmap(struct cros_ec_keyb *ckdev)
+{
+ u32 *physmap = ckdev->vdata.function_row_physmap;
+ unsigned int row, col, scancode;
+ int n_physmap;
+ int error;
+ int i;
+
+ n_physmap = device_property_count_u32(ckdev->dev,
+ "function-row-physmap");
+ if (n_physmap <= 0)
+ return;
+
+ if (n_physmap >= VIVALDI_MAX_FUNCTION_ROW_KEYS) {
+ dev_warn(ckdev->dev,
+ "only up to %d top row keys is supported (%d specified)\n",
+ VIVALDI_MAX_FUNCTION_ROW_KEYS, n_physmap);
+ n_physmap = VIVALDI_MAX_FUNCTION_ROW_KEYS;
+ }
+
+ error = device_property_read_u32_array(ckdev->dev,
+ "function-row-physmap",
+ physmap, n_physmap);
+ if (error) {
+ dev_warn(ckdev->dev,
+ "failed to parse function-row-physmap property: %d\n",
+ error);
+ return;
+ }
+
+ /*
+ * Convert (in place) from row/column encoding to matrix "scancode"
+ * used by the driver.
+ */
+ for (i = 0; i < n_physmap; i++) {
+ row = KEY_ROW(physmap[i]);
+ col = KEY_COL(physmap[i]);
+ scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+ physmap[i] = scancode;
+ }
+
+ ckdev->vdata.num_function_row_keys = n_physmap;
+}
+
/**
* cros_ec_keyb_register_matrix - Register matrix keys
*
@@ -534,11 +579,6 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
struct input_dev *idev;
const char *phys;
int err;
- struct property *prop;
- const __be32 *p;
- u32 *physmap;
- u32 key_pos;
- unsigned int row, col, scancode, n_physmap;
err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
if (err)
@@ -573,7 +613,7 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
idev->id.product = 0;
idev->dev.parent = dev;
- ckdev->ghost_filter = of_property_read_bool(dev->of_node,
+ ckdev->ghost_filter = device_property_read_bool(dev,
"google,needs-ghost-filter");
err = matrix_keypad_build_keymap(NULL, NULL, ckdev->rows, ckdev->cols,
@@ -589,22 +629,7 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
input_set_drvdata(idev, ckdev);
ckdev->idev = idev;
cros_ec_keyb_compute_valid_keys(ckdev);
-
- physmap = ckdev->vdata.function_row_physmap;
- n_physmap = 0;
- of_property_for_each_u32(dev->of_node, "function-row-physmap",
- prop, p, key_pos) {
- if (n_physmap == VIVALDI_MAX_FUNCTION_ROW_KEYS) {
- dev_warn(dev, "Only support up to %d top row keys\n",
- VIVALDI_MAX_FUNCTION_ROW_KEYS);
- break;
- }
- row = KEY_ROW(key_pos);
- col = KEY_COL(key_pos);
- scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
- physmap[n_physmap++] = scancode;
- }
- ckdev->vdata.num_function_row_keys = n_physmap;
+ cros_ec_keyb_parse_vivaldi_physmap(ckdev);
err = input_register_device(ckdev->idev);
if (err) {
@@ -653,14 +678,19 @@ static const struct attribute_group cros_ec_keyb_attr_group = {
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
- struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *ec;
struct device *dev = &pdev->dev;
struct cros_ec_keyb *ckdev;
bool buttons_switches_only = device_get_match_data(dev);
int err;
- if (!dev->of_node)
- return -ENODEV;
+ /*
+ * If the parent ec device has not been probed yet, defer the probe of
+ * this keyboard/button driver until later.
+ */
+ ec = dev_get_drvdata(pdev->dev.parent);
+ if (!ec)
+ return -EPROBE_DEFER;
ckdev = devm_kzalloc(dev, sizeof(*ckdev), GFP_KERNEL);
if (!ckdev)
@@ -713,6 +743,14 @@ static int cros_ec_keyb_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ec_keyb_acpi_match[] = {
+ { "GOOG0007", true },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_keyb_acpi_match);
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id cros_ec_keyb_of_match[] = {
{ .compatible = "google,cros-ec-keyb" },
@@ -730,6 +768,7 @@ static struct platform_driver cros_ec_keyb_driver = {
.driver = {
.name = "cros-ec-keyb",
.of_match_table = of_match_ptr(cros_ec_keyb_of_match),
+ .acpi_match_table = ACPI_PTR(cros_ec_keyb_acpi_match),
.pm = &cros_ec_keyb_pm_ops,
},
};
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
index 2e7c9187c10f..bf447bf598fb 100644
--- a/drivers/input/keyboard/mt6779-keypad.c
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -17,6 +17,11 @@
#define MTK_KPD_DEBOUNCE 0x0018
#define MTK_KPD_DEBOUNCE_MASK GENMASK(13, 0)
#define MTK_KPD_DEBOUNCE_MAX_MS 256
+#define MTK_KPD_SEL 0x0020
+#define MTK_KPD_SEL_COL GENMASK(15, 10)
+#define MTK_KPD_SEL_ROW GENMASK(9, 4)
+#define MTK_KPD_SEL_COLMASK(c) GENMASK((c) + 9, 10)
+#define MTK_KPD_SEL_ROWMASK(r) GENMASK((r) + 3, 4)
#define MTK_KPD_NUM_MEMS 5
#define MTK_KPD_NUM_BITS 136 /* 4*32+8 MEM5 only use 8 BITS */
@@ -42,7 +47,7 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
const unsigned short *keycode = keypad->input_dev->keycode;
DECLARE_BITMAP(new_state, MTK_KPD_NUM_BITS);
DECLARE_BITMAP(change, MTK_KPD_NUM_BITS);
- unsigned int bit_nr;
+ unsigned int bit_nr, key;
unsigned int row, col;
unsigned int scancode;
unsigned int row_shift = get_count_order(keypad->n_cols);
@@ -61,8 +66,10 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
if (bit_nr % 32 >= 16)
continue;
- row = bit_nr / 32;
- col = bit_nr % 32;
+ key = bit_nr / 32 * 16 + bit_nr % 32;
+ row = key / 9;
+ col = key % 9;
+
scancode = MATRIX_SCAN_CODE(row, col, row_shift);
/* 1: not pressed, 0: pressed */
pressed = !test_bit(bit_nr, new_state);
@@ -159,6 +166,11 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
(debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_ROW,
+ MTK_KPD_SEL_ROWMASK(keypad->n_rows));
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_COL,
+ MTK_KPD_SEL_COLMASK(keypad->n_cols));
+
keypad->clk = devm_clk_get(&pdev->dev, "kpd");
if (IS_ERR(keypad->clk))
return PTR_ERR(keypad->clk);
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index c31ab4368388..6404081253ea 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -18,17 +18,9 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define MTK_PMIC_PWRKEY_RST_EN_MASK 0x1
-#define MTK_PMIC_PWRKEY_RST_EN_SHIFT 6
-#define MTK_PMIC_HOMEKEY_RST_EN_MASK 0x1
-#define MTK_PMIC_HOMEKEY_RST_EN_SHIFT 5
-#define MTK_PMIC_RST_DU_MASK 0x3
-#define MTK_PMIC_RST_DU_SHIFT 8
-
-#define MTK_PMIC_PWRKEY_RST \
- (MTK_PMIC_PWRKEY_RST_EN_MASK << MTK_PMIC_PWRKEY_RST_EN_SHIFT)
-#define MTK_PMIC_HOMEKEY_RST \
- (MTK_PMIC_HOMEKEY_RST_EN_MASK << MTK_PMIC_HOMEKEY_RST_EN_SHIFT)
+#define MTK_PMIC_RST_DU_MASK GENMASK(9, 8)
+#define MTK_PMIC_PWRKEY_RST BIT(6)
+#define MTK_PMIC_HOMEKEY_RST BIT(5)
#define MTK_PMIC_PWRKEY_INDEX 0
#define MTK_PMIC_HOMEKEY_INDEX 1
@@ -39,50 +31,58 @@ struct mtk_pmic_keys_regs {
u32 deb_mask;
u32 intsel_reg;
u32 intsel_mask;
+ u32 rst_en_mask;
};
#define MTK_PMIC_KEYS_REGS(_deb_reg, _deb_mask, \
- _intsel_reg, _intsel_mask) \
+ _intsel_reg, _intsel_mask, _rst_mask) \
{ \
.deb_reg = _deb_reg, \
.deb_mask = _deb_mask, \
.intsel_reg = _intsel_reg, \
.intsel_mask = _intsel_mask, \
+ .rst_en_mask = _rst_mask, \
}
struct mtk_pmic_regs {
const struct mtk_pmic_keys_regs keys_regs[MTK_PMIC_MAX_KEY_COUNT];
u32 pmic_rst_reg;
+ u32 rst_lprst_mask; /* Long-press reset timeout bitmask */
};
static const struct mtk_pmic_regs mt6397_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6397_CHRSTATUS,
- 0x8, MT6397_INT_RSV, 0x10),
+ 0x8, MT6397_INT_RSV, 0x10, MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6397_OCSTATUS2,
- 0x10, MT6397_INT_RSV, 0x8),
+ 0x10, MT6397_INT_RSV, 0x8, MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6397_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
static const struct mtk_pmic_regs mt6323_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6323_CHRSTATUS,
- 0x2, MT6323_INT_MISC_CON, 0x10),
+ 0x2, MT6323_INT_MISC_CON, 0x10, MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6323_CHRSTATUS,
- 0x4, MT6323_INT_MISC_CON, 0x8),
+ 0x4, MT6323_INT_MISC_CON, 0x8, MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6323_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
static const struct mtk_pmic_regs mt6358_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
- 0x2, MT6358_PSC_TOP_INT_CON0, 0x5),
+ 0x2, MT6358_PSC_TOP_INT_CON0, 0x5,
+ MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
- 0x8, MT6358_PSC_TOP_INT_CON0, 0xa),
+ 0x8, MT6358_PSC_TOP_INT_CON0, 0xa,
+ MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6358_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
struct mtk_pmic_keys_info {
@@ -108,53 +108,49 @@ enum mtk_pmic_keys_lp_mode {
};
static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys,
- u32 pmic_rst_reg)
+ const struct mtk_pmic_regs *regs)
{
- int ret;
+ const struct mtk_pmic_keys_regs *kregs_home, *kregs_pwr;
u32 long_press_mode, long_press_debounce;
+ u32 value, mask;
+ int error;
+
+ kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs;
+ kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs;
- ret = of_property_read_u32(keys->dev->of_node,
- "power-off-time-sec", &long_press_debounce);
- if (ret)
+ error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec",
+ &long_press_debounce);
+ if (error)
long_press_debounce = 0;
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_RST_DU_MASK << MTK_PMIC_RST_DU_SHIFT,
- long_press_debounce << MTK_PMIC_RST_DU_SHIFT);
+ mask = regs->rst_lprst_mask;
+ value = long_press_debounce << (ffs(regs->rst_lprst_mask) - 1);
- ret = of_property_read_u32(keys->dev->of_node,
- "mediatek,long-press-mode", &long_press_mode);
- if (ret)
+ error = of_property_read_u32(keys->dev->of_node,
+ "mediatek,long-press-mode",
+ &long_press_mode);
+ if (error)
long_press_mode = LP_DISABLE;
switch (long_press_mode) {
- case LP_ONEKEY:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- MTK_PMIC_PWRKEY_RST);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- 0);
- break;
case LP_TWOKEY:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- MTK_PMIC_PWRKEY_RST);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- MTK_PMIC_HOMEKEY_RST);
- break;
+ value |= kregs_home->rst_en_mask;
+ fallthrough;
+
+ case LP_ONEKEY:
+ value |= kregs_pwr->rst_en_mask;
+ fallthrough;
+
case LP_DISABLE:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- 0);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- 0);
+ mask |= kregs_home->rst_en_mask;
+ mask |= kregs_pwr->rst_en_mask;
break;
+
default:
break;
}
+
+ regmap_update_bits(keys->regmap, regs->pmic_rst_reg, mask, value);
}
static irqreturn_t mtk_pmic_keys_irq_handler_thread(int irq, void *data)
@@ -358,7 +354,7 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
return error;
}
- mtk_pmic_keys_lp_reset_setup(keys, mtk_pmic_regs->pmic_rst_reg);
+ mtk_pmic_keys_lp_reset_setup(keys, mtk_pmic_regs);
platform_set_drvdata(pdev, keys);
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 8a7ce41b8c56..ee9d04a3f0d5 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -179,11 +179,9 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
int error;
u64 keys;
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
return IRQ_NONE;
- }
low = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
high = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
@@ -207,11 +205,9 @@ static int omap4_keypad_open(struct input_dev *input)
struct device *dev = input->dev.parent;
int error;
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
return error;
- }
disable_irq(keypad_data->irq);
@@ -254,9 +250,10 @@ static void omap4_keypad_close(struct input_dev *input)
struct device *dev = input->dev.parent;
int error;
- error = pm_runtime_get_sync(dev);
- if (error < 0)
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
+ dev_err(dev, "%s: pm_runtime_resume_and_get() failed: %d\n",
+ __func__, error);
disable_irq(keypad_data->irq);
omap4_keypad_stop(keypad_data);
@@ -392,10 +389,9 @@ static int omap4_keypad_probe(struct platform_device *pdev)
* Enable clocks for the keypad module so that we can read
* revision register.
*/
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error) {
+ dev_err(dev, "pm_runtime_resume_and_get() failed\n");
return error;
}
diff --git a/drivers/input/misc/gpio_decoder.c b/drivers/input/misc/gpio_decoder.c
index 145826a1a9a1..ee668eba302f 100644
--- a/drivers/input/misc/gpio_decoder.c
+++ b/drivers/input/misc/gpio_decoder.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* A generic driver to read multiple gpio lines and translate the
* encoded numeric value into an input event.
*/
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 6b4138771a3f..b2e8097a2e6d 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -40,7 +40,6 @@
#define IQS7222_SLDR_SETUP_2_RES_MASK GENMASK(15, 8)
#define IQS7222_SLDR_SETUP_2_RES_SHIFT 8
#define IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK GENMASK(7, 0)
-#define IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK GENMASK(9, 0)
#define IQS7222_GPIO_SETUP_0_GPIO_EN BIT(0)
@@ -54,6 +53,9 @@
#define IQS7222_SYS_SETUP_ACK_RESET BIT(0)
#define IQS7222_EVENT_MASK_ATI BIT(12)
+#define IQS7222_EVENT_MASK_SLDR BIT(10)
+#define IQS7222_EVENT_MASK_TOUCH BIT(1)
+#define IQS7222_EVENT_MASK_PROX BIT(0)
#define IQS7222_COMMS_HOLD BIT(0)
#define IQS7222_COMMS_ERROR 0xEEEE
@@ -92,11 +94,11 @@ enum iqs7222_reg_key_id {
enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_STAT,
+ IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_CYCLE,
IQS7222_REG_GRP_GLBL,
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
- IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_SLDR,
IQS7222_REG_GRP_GPIO,
IQS7222_REG_GRP_SYS,
@@ -135,12 +137,12 @@ struct iqs7222_event_desc {
static const struct iqs7222_event_desc iqs7222_kp_events[] = {
{
.name = "event-prox",
- .enable = BIT(0),
+ .enable = IQS7222_EVENT_MASK_PROX,
.reg_key = IQS7222_REG_KEY_PROX,
},
{
.name = "event-touch",
- .enable = BIT(1),
+ .enable = IQS7222_EVENT_MASK_TOUCH,
.reg_key = IQS7222_REG_KEY_TOUCH,
},
};
@@ -556,13 +558,6 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
.label = "current reference trim",
},
{
- .name = "azoteq,rf-filt-enable",
- .reg_grp = IQS7222_REG_GRP_GLBL,
- .reg_offset = 0,
- .reg_shift = 15,
- .reg_width = 1,
- },
- {
.name = "azoteq,max-counts",
.reg_grp = IQS7222_REG_GRP_GLBL,
.reg_offset = 0,
@@ -1272,9 +1267,22 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
struct i2c_client *client = iqs7222->client;
ktime_t ati_timeout;
u16 sys_status = 0;
- u16 sys_setup = iqs7222->sys_setup[0] & ~IQS7222_SYS_SETUP_ACK_RESET;
+ u16 sys_setup;
int error, i;
+ /*
+ * The reserved fields of the system setup register may have changed
+ * as a result of other registers having been written. As such, read
+ * the register's latest value to avoid unexpected behavior when the
+ * register is written in the loop that follows.
+ */
+ error = iqs7222_read_word(iqs7222, IQS7222_SYS_SETUP, &sys_setup);
+ if (error)
+ return error;
+
+ sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+ sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
+
for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
/*
* Trigger ATI from streaming and normal-power modes so that
@@ -1299,12 +1307,15 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
if (error)
return error;
- if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE)
- continue;
+ if (sys_status & IQS7222_SYS_STATUS_RESET)
+ return 0;
if (sys_status & IQS7222_SYS_STATUS_ATI_ERROR)
break;
+ if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE)
+ continue;
+
/*
* Use stream-in-touch mode if either slider reports
* absolute position.
@@ -1321,7 +1332,7 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
dev_err(&client->dev,
"ATI attempt %d of %d failed with status 0x%02X, %s\n",
i + 1, IQS7222_NUM_RETRIES, (u8)sys_status,
- i < IQS7222_NUM_RETRIES ? "retrying..." : "stopping");
+ i + 1 < IQS7222_NUM_RETRIES ? "retrying" : "stopping");
}
return -ETIMEDOUT;
@@ -1334,6 +1345,34 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
int error, i, j, k;
/*
+ * Acknowledge reset before writing any registers in case the device
+ * suffers a spurious reset during initialization. Because this step
+ * may change the reserved fields of the second filter beta register,
+ * its cache must be updated.
+ *
+ * Writing the second filter beta register, in turn, may clobber the
+ * system status register. As such, the filter beta register pair is
+ * written first to protect against this hazard.
+ */
+ if (dir == WRITE) {
+ u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1;
+ u16 filt_setup;
+
+ error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
+ iqs7222->sys_setup[0] |
+ IQS7222_SYS_SETUP_ACK_RESET);
+ if (error)
+ return error;
+
+ error = iqs7222_read_word(iqs7222, reg, &filt_setup);
+ if (error)
+ return error;
+
+ iqs7222->filt_setup[1] &= GENMASK(7, 0);
+ iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0));
+ }
+
+ /*
* Take advantage of the stop-bit disable function, if available, to
* save the trouble of having to reopen a communication window after
* each burst read or write.
@@ -1957,8 +1996,8 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
int ext_chan = rounddown(num_chan, 10);
int count, error, reg_offset, i;
+ u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset];
u16 *sldr_setup = iqs7222->sldr_setup[sldr_index];
- u16 *sys_setup = iqs7222->sys_setup;
unsigned int chan_sel[4], val;
error = iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
@@ -2003,7 +2042,7 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
reg_offset = dev_desc->sldr_res < U16_MAX ? 0 : 1;
sldr_setup[0] |= count;
- sldr_setup[3 + reg_offset] &= ~IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK;
+ sldr_setup[3 + reg_offset] &= ~GENMASK(ext_chan - 1, 0);
for (i = 0; i < ARRAY_SIZE(chan_sel); i++) {
sldr_setup[5 + reg_offset + i] = 0;
@@ -2081,17 +2120,19 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
sldr_setup[0] |= dev_desc->wheel_enable;
}
+ /*
+ * The absence of a register offset makes it safe to assume the device
+ * supports gestures, each of which is first disabled until explicitly
+ * enabled.
+ */
+ if (!reg_offset)
+ for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++)
+ sldr_setup[9] &= ~iqs7222_sl_events[i].enable;
+
for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++) {
const char *event_name = iqs7222_sl_events[i].name;
struct fwnode_handle *event_node;
- /*
- * The absence of a register offset means the remaining fields
- * in the group represent gesture settings.
- */
- if (iqs7222_sl_events[i].enable && !reg_offset)
- sldr_setup[9] &= ~iqs7222_sl_events[i].enable;
-
event_node = fwnode_get_named_child_node(sldr_node, event_name);
if (!event_node)
continue;
@@ -2104,6 +2145,22 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
if (error)
return error;
+ /*
+ * The press/release event does not expose a direct GPIO link,
+ * but one can be emulated by tying each of the participating
+ * channels to the same GPIO.
+ */
+ error = iqs7222_gpio_select(iqs7222, event_node,
+ i ? iqs7222_sl_events[i].enable
+ : sldr_setup[3 + reg_offset],
+ i ? 1568 + sldr_index * 30
+ : sldr_setup[4 + reg_offset]);
+ if (error)
+ return error;
+
+ if (!reg_offset)
+ sldr_setup[9] |= iqs7222_sl_events[i].enable;
+
error = fwnode_property_read_u32(event_node, "linux,code",
&val);
if (error) {
@@ -2115,26 +2172,20 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
iqs7222->sl_code[sldr_index][i] = val;
input_set_capability(iqs7222->keypad, EV_KEY, val);
- /*
- * The press/release event is determined based on whether the
- * coordinate field reports 0xFFFF and has no explicit enable
- * control.
- */
- if (!iqs7222_sl_events[i].enable || reg_offset)
- continue;
-
- sldr_setup[9] |= iqs7222_sl_events[i].enable;
-
- error = iqs7222_gpio_select(iqs7222, event_node,
- iqs7222_sl_events[i].enable,
- 1568 + sldr_index * 30);
- if (error)
- return error;
-
if (!dev_desc->event_offset)
continue;
- sys_setup[dev_desc->event_offset] |= BIT(10 + sldr_index);
+ /*
+ * The press/release event is determined based on whether the
+ * coordinate field reports 0xFFFF and solely relies on touch
+ * or proximity interrupts to be unmasked.
+ */
+ if (i && !reg_offset)
+ *event_mask |= (IQS7222_EVENT_MASK_SLDR << sldr_index);
+ else if (sldr_setup[4 + reg_offset] == dev_desc->touch_link)
+ *event_mask |= IQS7222_EVENT_MASK_TOUCH;
+ else
+ *event_mask |= IQS7222_EVENT_MASK_PROX;
}
/*
@@ -2227,11 +2278,6 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
return error;
}
- sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
- sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
-
- sys_setup[0] |= IQS7222_SYS_SETUP_ACK_RESET;
-
return iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_SYS,
IQS7222_REG_KEY_NONE);
}
@@ -2299,29 +2345,37 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
input_report_abs(iqs7222->keypad, iqs7222->sl_axis[i],
sldr_pos);
- for (j = 0; j < ARRAY_SIZE(iqs7222_sl_events); j++) {
- u16 mask = iqs7222_sl_events[j].mask;
- u16 val = iqs7222_sl_events[j].val;
+ input_report_key(iqs7222->keypad, iqs7222->sl_code[i][0],
+ sldr_pos < dev_desc->sldr_res);
- if (!iqs7222_sl_events[j].enable) {
- input_report_key(iqs7222->keypad,
- iqs7222->sl_code[i][j],
- sldr_pos < dev_desc->sldr_res);
- continue;
- }
+ /*
+ * A maximum resolution indicates the device does not support
+ * gestures, in which case the remaining fields are ignored.
+ */
+ if (dev_desc->sldr_res == U16_MAX)
+ continue;
- /*
- * The remaining offsets represent gesture state, and
- * are discarded in the case of IQS7222C because only
- * absolute position is reported.
- */
- if (num_stat < IQS7222_MAX_COLS_STAT)
- continue;
+ if (!(le16_to_cpu(status[1]) & IQS7222_EVENT_MASK_SLDR << i))
+ continue;
+
+ /*
+ * Skip the press/release event, as it does not have separate
+ * status fields and is handled separately.
+ */
+ for (j = 1; j < ARRAY_SIZE(iqs7222_sl_events); j++) {
+ u16 mask = iqs7222_sl_events[j].mask;
+ u16 val = iqs7222_sl_events[j].val;
input_report_key(iqs7222->keypad,
iqs7222->sl_code[i][j],
(state & mask) == val);
}
+
+ input_sync(iqs7222->keypad);
+
+ for (j = 1; j < ARRAY_SIZE(iqs7222_sl_events); j++)
+ input_report_key(iqs7222->keypad,
+ iqs7222->sl_code[i][j], 0);
}
input_sync(iqs7222->keypad);
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
index 2213e06b611d..465e6693077a 100644
--- a/drivers/input/misc/palmas-pwrbutton.c
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Texas Instruments' Palmas Power Button Input Driver
*
* Copyright (C) 2012-2014 Texas Instruments Incorporated - http://www.ti.com/
* Girish S Ghongdemath
* Nishanth Menon
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/bitfield.h>
diff --git a/drivers/input/misc/tps65218-pwrbutton.c b/drivers/input/misc/tps65218-pwrbutton.c
index f011447c44fb..fc450fce0932 100644
--- a/drivers/input/misc/tps65218-pwrbutton.c
+++ b/drivers/input/misc/tps65218-pwrbutton.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Texas Instruments' TPS65217 and TPS65218 Power Button Input Driver
*
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
* Author: Felipe Balbi <balbi@ti.com>
* Author: Marcin Niestroj <m.niestroj@grinn-global.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/init.h>
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index 812edfced86e..0caaf3e64215 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -57,7 +57,7 @@ struct pip_app_resp_head {
* The value of data_status can be the first byte of data or
* the command status or the unsupported command code depending on the
* requested command code.
- */
+ */
u8 data_status;
} __packed;
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 23507fce3a2b..18ccbd45004a 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -41,7 +41,7 @@ struct gpio_mouse {
/*
* Timer function which is run every scan_ms ms when the device is opened.
- * The dev input variable is set to the the input_dev pointer.
+ * The dev input variable is set to the input_dev pointer.
*/
static void gpio_mouse_scan(struct input_dev *input)
{
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index a9065c6ab550..da2c67cb8642 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -350,6 +350,10 @@ static int __init gscps2_probe(struct parisc_device *dev)
ps2port->port = serio;
ps2port->padev = dev;
ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
+ if (!ps2port->addr) {
+ ret = -ENOMEM;
+ goto fail_nomem;
+ }
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 148a7c5fd0e2..4fbec7bbecca 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -67,25 +67,84 @@ static inline void i8042_write_command(int val)
#include <linux/dmi.h>
-static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+#define SERIO_QUIRK_NOKBD BIT(0)
+#define SERIO_QUIRK_NOAUX BIT(1)
+#define SERIO_QUIRK_NOMUX BIT(2)
+#define SERIO_QUIRK_FORCEMUX BIT(3)
+#define SERIO_QUIRK_UNLOCK BIT(4)
+#define SERIO_QUIRK_PROBE_DEFER BIT(5)
+#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
+#define SERIO_QUIRK_RESET_NEVER BIT(7)
+#define SERIO_QUIRK_DIECT BIT(8)
+#define SERIO_QUIRK_DUMBKBD BIT(9)
+#define SERIO_QUIRK_NOLOOP BIT(10)
+#define SERIO_QUIRK_NOTIMEOUT BIT(11)
+#define SERIO_QUIRK_KBDRESET BIT(12)
+#define SERIO_QUIRK_DRITEK BIT(13)
+#define SERIO_QUIRK_NOPNP BIT(14)
+
+/* Quirk table for different mainboards. Options similar or identical to i8042
+ * module parameters.
+ * ORDERING IS IMPORTANT! The first match will be apllied and the rest ignored.
+ * This allows entries to overwrite vendor wide quirks on a per device basis.
+ * Where this is irrelevant, entries are sorted case sensitive by DMI_SYS_VENDOR
+ * and/or DMI_BOARD_VENDOR to make it easier to avoid dublicate entries.
+ */
+static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
{
- /*
- * Arima-Rioworks HDAMB -
- * AUX LOOP command does not raise AUX IRQ
- */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
- DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
- DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_BOARD_NAME, "G1S"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ /* Asus X450LCP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UX425UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
},
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UM325UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ /*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
},
{
/* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */
@@ -94,585 +153,689 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"),
DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
+ /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "G1S"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Acer Aspire 5710 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Embedded Box PC 3000 */
+ /* Acer Aspire 7738 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* OQO Model 01 */
+ /* Acer Aspire 5536 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ULI EV4873 - AUX LOOP does not work properly */
+ /*
+ * Acer Aspire 5738z
+ * Touchpad stops working in mux mode when dis- + re-enabled
+ * with the touchpad enable/disable toggle hotkey
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Microsoft Virtual Machine */
+ /* Acer Aspire One 150 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Medion MAM 2070 */
+ /* Acer Aspire One 532h */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AO532h"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "blue"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M1022M netbook */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
- DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Wistron based laptops need us to explicitly enable the 'Dritek
+ * keyboard extension' to make their extra keys start generating scancodes.
+ * Originally, this was just confined to older laptops, but a few Acer laptops
+ * have turned up in 2007 that also need this again.
+ */
{
+ /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
- { }
-};
-
-/*
- * Some Fujitsu notebooks are having trouble with touchpads if
- * active multiplexing mode is activated. Luckily they don't have
- * external PS/2 ports so we can safely disable it.
- * ... apparently some Toshibas don't like MUX mode either and
- * die horrible death on reboot.
- */
-static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{
- /* Fujitsu Lifebook P7010/P7010D */
+ /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P7010 */
+ /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P5020D */
+ /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S2000 */
+ /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S6230 */
+ /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Acer TravelMate 2490 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook U745 */
+ /* Acer TravelMate 4280 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu T70H */
+ /* Amoi M636/A737 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Lifebook E4010 */
+ /* Compal HEL80I */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * No data is coming from the touchscreen unless KBC
- * is in legacy mode.
- */
- /* Panasonic CF-29 */
+ /* Advent 4211 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
+ DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /*
- * HP Pavilion DV4017EA -
- * errors on MUX ports are reported without raising AUXDATA
- * causing "spurious NAK" messages.
- */
+ /* Dell Embedded Box PC 3000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * HP Pavilion ZT1000 -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell XPS M1530 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * HP Pavilion DV4270ca -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell Vostro 1510 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
+ /* Dell Vostro 1320 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1520 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Sharp Actius MM20 */
+ /* Entroware Proteus */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Fujitsu notebooks are having trouble with touchpads if
+ * active multiplexing mode is activated. Luckily they don't have
+ * external PS/2 ports so we can safely disable it.
+ * ... apparently some Toshibas don't like MUX mode either and
+ * die horrible death on reboot.
+ */
{
- /* Sony Vaio FS-115b */
+ /* Fujitsu Lifebook P7010/P7010D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Sony Vaio FZ-240E -
- * reset and GET ID commands issued via KBD port are
- * sometimes being delivered to AUX3.
- */
+ /* Fujitsu Lifebook P5020D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Most (all?) VAIOs do not have external PS/2 ports nor
- * they implement active multiplexing properly, and
- * MUX discovery usually messes up keyboard/touchpad.
- */
+ /* Fujitsu Lifebook S2000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Amoi M636/A737 */
+ /* Fujitsu Lifebook S6230 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Lenovo 3000 n100 */
+ /* Fujitsu Lifebook T725 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Lenovo XiaoXin Air 12 */
+ /* Fujitsu Lifebook U745 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Fujitsu T70H */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5710 */
+ /* Fujitsu A544 laptop */
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Acer Aspire 7738 */
+ /* Fujitsu AH544 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Gericom Bellagio */
+ /* Fujitsu U574 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* IBM 2656 */
+ /* Fujitsu UH554 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
- DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Dell XPS M1530 */
+ /* Fujitsu Lifebook P7010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Compal HEL80I */
+ /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1510 */
+ /* Fujitsu-Siemens Lifebook E4010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5536 */
+ /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro V13 */
+ /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Newer HP Pavilion dv4 models */
+ /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Asus X450LCP */
+ /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Avatar AVIU-145A6 */
+ /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
- DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* TUXEDO BU1406 */
+ /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Lenovo LaVie Z */
+ /* Gigabyte P35 v2 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /*
- * Acer Aspire 5738z
- * Touchpad stops working in mux mode when dis- + re-enabled
- * with the touchpad enable/disable toggle hotkey
- */
+ /* Gigabyte P34 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /* Entroware Proteus */
+ /* Gigabyte P57 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Gericom Bellagio */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /* Gigabyte M1022M netbook */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
- { }
-};
-
-static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
{
/*
- * Sony Vaio VGN-CS series require MUX or the touch sensor
- * buttons will disturb touchpad operation
+ * HP Pavilion DV4017EA -
+ * errors on MUX ports are reported without raising AUXDATA
+ * causing "spurious NAK" messages.
*/
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-/*
- * On some Asus laptops, just running self tests cause problems.
- */
-static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
{
+ /*
+ * HP Pavilion ZT1000 -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
},
- }, {
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /*
+ * HP Pavilion DV4270ca -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
{
- /* MSI Wind U-100 */
+ /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* LG Electronics X110 */
+ /* IBM 2656 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "X110"),
- DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire One 150 */
+ /* Avatar AVIU-145A6 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Intel MBO Desktop D845PESV */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /*
+ * Intel NUC D54250WYK - does not have i8042 controller but
+ * declares PS/2 devices in DSDT.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /* Lenovo 3000 n100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo XiaoXin Air 12 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo LaVie Z */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo Ideapad U455 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Lenovo ThinkPad L460 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Advent 4211 */
+ /* Lenovo ThinkPad Twist S230u */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
+ },
+ {
+ /* LG Electronics X110 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "X110"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya Mini E1210 */
@@ -680,6 +843,7 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya E1222 */
@@ -687,331 +851,434 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Mivvy M310 */
+ /* MSI Wind U-100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOPNP)
},
{
- /* Dell Vostro 1320 */
+ /*
+ * No data is coming from the touchscreen unless KBC
+ * is in legacy mode.
+ */
+ /* Panasonic CF-29 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1520 */
+ /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Dell Vostro 1720 */
+ /* Microsoft Virtual Machine */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo Ideapad U455 */
+ /* Medion MAM 2070 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo ThinkPad L460 */
+ /* TUXEDO BU1406 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ /* OQO Model 01 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo ThinkPad Twist S230u */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Entroware Proteus */
+ /* Acer Aspire 5 A515 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-#ifdef CONFIG_PNP
-static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
{
- /* Intel MBO Desktop D845PESV */
+ /* ULI EV4873 - AUX LOOP does not work properly */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
/*
- * Intel NUC D54250WYK - does not have i8042 controller but
- * declares PS/2 devices in DSDT.
+ * Arima-Rioworks HDAMB -
+ * AUX LOOP command does not raise AUX IRQ
*/
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
+ DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* MSI Wind U-100 */
+ /* Sharp Actius MM20 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5 A515 */
+ /*
+ * Sony Vaio FZ-240E -
+ * reset and GET ID commands issued via KBD port are
+ * sometimes being delivered to AUX3.
+ */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /*
+ * Most (all?) VAIOs do not have external PS/2 ports nor
+ * they implement active multiplexing properly, and
+ * MUX discovery usually messes up keyboard/touchpad.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
{
+ /* Sony Vaio FS-115b */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /*
+ * Sony Vaio VGN-CS series require MUX or the touch sensor
+ * buttons will disturb touchpad operation
+ */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
},
+ .driver_data = (void *)(SERIO_QUIRK_FORCEMUX)
},
{
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-#endif
-
-static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
{
- /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them. These two are based on a Clevo design, but have the
+ * board_name changed.
+ */
{
- /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu A544 laptop */
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu AH544 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ /* Mivvy M310 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
+ DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /* Fujitsu U574 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+ DMI_MATCH(DMI_SYS_VENDOR, "blue"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them.
+ * Clevo barebones come with board_vendor and/or system_vendor set to
+ * either the very generic string "Notebook" and/or a different value
+ * for each individual reseller. The only somewhat universal way to
+ * identify them is by board_name.
+ */
{
- /* Fujitsu UH554 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-/*
- * Some Wistron based laptops need us to explicitly enable the 'Dritek
- * keyboard extension' to make their extra keys start generating scancodes.
- * Originally, this was just confined to older laptops, but a few Acer laptops
- * have turned up in 2007 that also need this again.
- */
-static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{
- /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
+ DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
+ DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ /*
+ * At least one modern Clevo barebone has the touchpad connected both
+ * via PS/2 and i2c interface. This causes a race condition between the
+ * psmouse and i2c-hid driver. Since the full capability of the touchpad
+ * is available via the i2c interface and the device has no external
+ * PS/2 port, it is safe to just ignore all ps2 mouses here to avoid
+ * this issue. The known affected device is the
+ * TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU which comes with one of
+ * the two different dmi strings below. NS50MU is not a typo!
+ */
{
- /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
+ DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 2490 */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 4280 */
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-/*
- * Some laptops need keyboard reset before probing for the trackpad to get
- * it detected, initialised & finally work.
- */
-static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
{
- /* Gigabyte P35 v2 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- {
- /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ {
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Gigabyte P34 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Gigabyte P57 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{ }
};
-static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
+#ifdef CONFIG_PNP
+static const struct dmi_system_id i8042_dmi_laptop_table[] __initconst = {
{
- /* ASUS ZenBook UX425UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
},
},
{
- /* ASUS ZenBook UM325UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
},
},
{ }
};
+#endif
#endif /* CONFIG_X86 */
@@ -1167,11 +1434,6 @@ static int __init i8042_pnp_init(void)
bool pnp_data_busted = false;
int err;
-#ifdef CONFIG_X86
- if (dmi_check_system(i8042_dmi_nopnp_table))
- i8042_nopnp = true;
-#endif
-
if (i8042_nopnp) {
pr_info("PNP detection disabled\n");
return 0;
@@ -1275,6 +1537,59 @@ static inline int i8042_pnp_init(void) { return 0; }
static inline void i8042_pnp_exit(void) { }
#endif /* CONFIG_PNP */
+
+#ifdef CONFIG_X86
+static void __init i8042_check_quirks(void)
+{
+ const struct dmi_system_id *device_quirk_info;
+ uintptr_t quirks;
+
+ device_quirk_info = dmi_first_match(i8042_dmi_quirk_table);
+ if (!device_quirk_info)
+ return;
+
+ quirks = (uintptr_t)device_quirk_info->driver_data;
+
+ if (quirks & SERIO_QUIRK_NOKBD)
+ i8042_nokbd = true;
+ if (quirks & SERIO_QUIRK_NOAUX)
+ i8042_noaux = true;
+ if (quirks & SERIO_QUIRK_NOMUX)
+ i8042_nomux = true;
+ if (quirks & SERIO_QUIRK_FORCEMUX)
+ i8042_nomux = false;
+ if (quirks & SERIO_QUIRK_UNLOCK)
+ i8042_unlock = true;
+ if (quirks & SERIO_QUIRK_PROBE_DEFER)
+ i8042_probe_defer = true;
+ /* Honor module parameter when value is not default */
+ if (i8042_reset == I8042_RESET_DEFAULT) {
+ if (quirks & SERIO_QUIRK_RESET_ALWAYS)
+ i8042_reset = I8042_RESET_ALWAYS;
+ if (quirks & SERIO_QUIRK_RESET_NEVER)
+ i8042_reset = I8042_RESET_NEVER;
+ }
+ if (quirks & SERIO_QUIRK_DIECT)
+ i8042_direct = true;
+ if (quirks & SERIO_QUIRK_DUMBKBD)
+ i8042_dumbkbd = true;
+ if (quirks & SERIO_QUIRK_NOLOOP)
+ i8042_noloop = true;
+ if (quirks & SERIO_QUIRK_NOTIMEOUT)
+ i8042_notimeout = true;
+ if (quirks & SERIO_QUIRK_KBDRESET)
+ i8042_kbdreset = true;
+ if (quirks & SERIO_QUIRK_DRITEK)
+ i8042_dritek = true;
+#ifdef CONFIG_PNP
+ if (quirks & SERIO_QUIRK_NOPNP)
+ i8042_nopnp = true;
+#endif
+}
+#else
+static inline void i8042_check_quirks(void) {}
+#endif
+
static int __init i8042_platform_init(void)
{
int retval;
@@ -1297,45 +1612,42 @@ static int __init i8042_platform_init(void)
i8042_kbd_irq = I8042_MAP_IRQ(1);
i8042_aux_irq = I8042_MAP_IRQ(12);
- retval = i8042_pnp_init();
- if (retval)
- return retval;
-
#if defined(__ia64__)
- i8042_reset = I8042_RESET_ALWAYS;
+ i8042_reset = I8042_RESET_ALWAYS;
#endif
+ i8042_check_quirks();
+
+ pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ i8042_nokbd ? " nokbd" : "",
+ i8042_noaux ? " noaux" : "",
+ i8042_nomux ? " nomux" : "",
+ i8042_unlock ? " unlock" : "",
+ i8042_probe_defer ? "probe_defer" : "",
+ i8042_reset == I8042_RESET_DEFAULT ?
+ "" : i8042_reset == I8042_RESET_ALWAYS ?
+ " reset_always" : " reset_never",
+ i8042_direct ? " direct" : "",
+ i8042_dumbkbd ? " dumbkbd" : "",
+ i8042_noloop ? " noloop" : "",
+ i8042_notimeout ? " notimeout" : "",
+ i8042_kbdreset ? " kbdreset" : "",
#ifdef CONFIG_X86
- /* Honor module parameter when value is not default */
- if (i8042_reset == I8042_RESET_DEFAULT) {
- if (dmi_check_system(i8042_dmi_reset_table))
- i8042_reset = I8042_RESET_ALWAYS;
-
- if (dmi_check_system(i8042_dmi_noselftest_table))
- i8042_reset = I8042_RESET_NEVER;
- }
-
- if (dmi_check_system(i8042_dmi_noloop_table))
- i8042_noloop = true;
-
- if (dmi_check_system(i8042_dmi_nomux_table))
- i8042_nomux = true;
-
- if (dmi_check_system(i8042_dmi_forcemux_table))
- i8042_nomux = false;
-
- if (dmi_check_system(i8042_dmi_notimeout_table))
- i8042_notimeout = true;
-
- if (dmi_check_system(i8042_dmi_dritek_table))
- i8042_dritek = true;
-
- if (dmi_check_system(i8042_dmi_kbdreset_table))
- i8042_kbdreset = true;
+ i8042_dritek ? " dritek" : "",
+#else
+ "",
+#endif
+#ifdef CONFIG_PNP
+ i8042_nopnp ? " nopnp" : "");
+#else
+ "");
+#endif
- if (dmi_check_system(i8042_dmi_probe_defer_table))
- i8042_probe_defer = true;
+ retval = i8042_pnp_init();
+ if (retval)
+ return retval;
+#ifdef CONFIG_X86
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/touchscreen/bcm_iproc_tsc.c b/drivers/input/touchscreen/bcm_iproc_tsc.c
index 7de1fd24ce36..35e2fe9911a4 100644
--- a/drivers/input/touchscreen/bcm_iproc_tsc.c
+++ b/drivers/input/touchscreen/bcm_iproc_tsc.c
@@ -1,14 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Broadcom Corporation
*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License as
-* published by the Free Software Foundation version 2.
-*
-* This program is distributed "as is" WITHOUT ANY WARRANTY of any
-* kind, whether express or implied; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index bb2e1cbffba7..82beddb28761 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/ratelimit.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -47,6 +48,8 @@
#define M09_REGISTER_NUM_X 0x94
#define M09_REGISTER_NUM_Y 0x95
+#define M12_REGISTER_REPORT_RATE 0x88
+
#define EV_REGISTER_THRESHOLD 0x40
#define EV_REGISTER_GAIN 0x41
#define EV_REGISTER_OFFSET_Y 0x45
@@ -127,9 +130,12 @@ struct edt_ft5x06_ts_data {
int max_support_points;
char name[EDT_NAME_LEN];
+ char fw_version[EDT_NAME_LEN];
struct edt_reg_addr reg_addr;
enum edt_ver version;
+ unsigned int crc_errors;
+ unsigned int header_errors;
};
struct edt_i2c_chip_data {
@@ -178,6 +184,7 @@ static bool edt_ft5x06_ts_check_crc(struct edt_ft5x06_ts_data *tsdata,
crc ^= buf[i];
if (crc != buf[buflen-1]) {
+ tsdata->crc_errors++;
dev_err_ratelimited(&tsdata->client->dev,
"crc error: 0x%02x expected, got 0x%02x\n",
crc, buf[buflen-1]);
@@ -235,6 +242,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
if (tsdata->version == EDT_M06) {
if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa ||
rdbuf[2] != datalen) {
+ tsdata->header_errors++;
dev_err_ratelimited(dev,
"Unexpected header: %02x%02x%02x!\n",
rdbuf[0], rdbuf[1], rdbuf[2]);
@@ -523,9 +531,55 @@ static EDT_ATTR(offset_y, S_IWUSR | S_IRUGO, NO_REGISTER, NO_REGISTER,
/* m06: range 20 to 80, m09: range 0 to 30, m12: range 1 to 255... */
static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
M09_REGISTER_THRESHOLD, EV_REGISTER_THRESHOLD, 0, 255);
-/* m06: range 3 to 14, m12: (0x64: 100Hz) */
+/* m06: range 3 to 14, m12: range 1 to 255 */
static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
- NO_REGISTER, NO_REGISTER, 0, 255);
+ M12_REGISTER_REPORT_RATE, NO_REGISTER, 0, 255);
+
+static ssize_t model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%s\n", tsdata->name);
+}
+
+static DEVICE_ATTR_RO(model);
+
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%s\n", tsdata->fw_version);
+}
+
+static DEVICE_ATTR_RO(fw_version);
+
+/* m06 only */
+static ssize_t header_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%d\n", tsdata->header_errors);
+}
+
+static DEVICE_ATTR_RO(header_errors);
+
+/* m06 only */
+static ssize_t crc_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%d\n", tsdata->crc_errors);
+}
+
+static DEVICE_ATTR_RO(crc_errors);
static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_gain.dattr.attr,
@@ -534,6 +588,10 @@ static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_offset_y.dattr.attr,
&edt_ft5x06_attr_threshold.dattr.attr,
&edt_ft5x06_attr_report_rate.dattr.attr,
+ &dev_attr_model.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_header_errors.attr,
+ &dev_attr_crc_errors.attr,
NULL
};
@@ -820,13 +878,13 @@ static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
#endif /* CONFIG_DEBUGFS */
static int edt_ft5x06_ts_identify(struct i2c_client *client,
- struct edt_ft5x06_ts_data *tsdata,
- char *fw_version)
+ struct edt_ft5x06_ts_data *tsdata)
{
u8 rdbuf[EDT_NAME_LEN];
char *p;
int error;
char *model_name = tsdata->name;
+ char *fw_version = tsdata->fw_version;
/* see what we find if we assume it is a M06 *
* if we get less than EDT_NAME_LEN, we don't want
@@ -1030,7 +1088,8 @@ static void edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
case EDT_M09:
case EDT_M12:
reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
- reg_addr->reg_report_rate = NO_REGISTER;
+ reg_addr->reg_report_rate = tsdata->version == EDT_M12 ?
+ M12_REGISTER_REPORT_RATE : NO_REGISTER;
reg_addr->reg_gain = M09_REGISTER_GAIN;
reg_addr->reg_offset = M09_REGISTER_OFFSET;
reg_addr->reg_offset_x = NO_REGISTER;
@@ -1081,7 +1140,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
struct input_dev *input;
unsigned long irq_flags;
int error;
- char fw_version[EDT_NAME_LEN];
+ u32 report_rate;
dev_dbg(&client->dev, "probing for EDT FT5x06 I2C\n");
@@ -1194,7 +1253,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
tsdata->input = input;
tsdata->factory_mode = false;
- error = edt_ft5x06_ts_identify(client, tsdata, fw_version);
+ error = edt_ft5x06_ts_identify(client, tsdata);
if (error) {
dev_err(&client->dev, "touchscreen probe failed\n");
return error;
@@ -1210,9 +1269,30 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
edt_ft5x06_ts_get_parameters(tsdata);
+ if (tsdata->reg_addr.reg_report_rate != NO_REGISTER &&
+ !device_property_read_u32(&client->dev,
+ "report-rate-hz", &report_rate)) {
+ if (tsdata->version == EDT_M06)
+ tsdata->report_rate = clamp_val(report_rate, 30, 140);
+ else
+ tsdata->report_rate = clamp_val(report_rate, 1, 255);
+
+ if (report_rate != tsdata->report_rate)
+ dev_warn(&client->dev,
+ "report-rate %dHz is unsupported, use %dHz\n",
+ report_rate, tsdata->report_rate);
+
+ if (tsdata->version == EDT_M06)
+ tsdata->report_rate /= 10;
+
+ edt_ft5x06_register_write(tsdata,
+ tsdata->reg_addr.reg_report_rate,
+ tsdata->report_rate);
+ }
+
dev_dbg(&client->dev,
"Model \"%s\", Rev. \"%s\", %dx%d sensors\n",
- tsdata->name, fw_version, tsdata->num_x, tsdata->num_y);
+ tsdata->name, tsdata->fw_version, tsdata->num_x, tsdata->num_y);
input->name = tsdata->name;
input->id.bustype = BUS_I2C;
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index cbe0dd412912..4b7eee01c6aa 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -220,6 +220,7 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
{
u8 buf[EXC3000_LEN_VENDOR_REQUEST] = { 0x67, 0x00, 0x42, 0x00, 0x03 };
int ret;
+ unsigned long time_left;
mutex_lock(&data->query_lock);
@@ -233,9 +234,9 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
goto out_unlock;
if (response) {
- ret = wait_for_completion_timeout(&data->wait_event,
- timeout * HZ);
- if (ret <= 0) {
+ time_left = wait_for_completion_timeout(&data->wait_event,
+ timeout * HZ);
+ if (time_left == 0) {
ret = -ETIMEDOUT;
goto out_unlock;
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 3ad9870db108..d016505fc081 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -822,22 +822,16 @@ static int goodix_resource(struct acpi_resource *ares, void *data)
struct device *dev = &ts->client->dev;
struct acpi_resource_gpio *gpio;
- switch (ares->type) {
- case ACPI_RESOURCE_TYPE_GPIO:
- gpio = &ares->data.gpio;
- if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) {
- if (ts->gpio_int_idx == -1) {
- ts->gpio_int_idx = ts->gpio_count;
- } else {
- dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n");
- ts->gpio_int_idx = -2;
- }
+ if (acpi_gpio_get_irq_resource(ares, &gpio)) {
+ if (ts->gpio_int_idx == -1) {
+ ts->gpio_int_idx = ts->gpio_count;
+ } else {
+ dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n");
+ ts->gpio_int_idx = -2;
}
ts->gpio_count++;
- break;
- default:
- break;
- }
+ } else if (acpi_gpio_get_io_resource(ares, &gpio))
+ ts->gpio_count++;
return 0;
}
@@ -900,6 +894,11 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
} else {
dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n",
ts->gpio_count, ts->gpio_int_idx);
+ /*
+ * On some devices _PS0 does a reset for us and
+ * sometimes this is necessary for things to work.
+ */
+ acpi_device_fix_up_power(ACPI_COMPANION(dev));
return -EINVAL;
}
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 43c521f50c85..3dda6eaabdab 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1654,6 +1654,9 @@ static int usbtouch_probe(struct usb_interface *intf,
if (id->driver_info == DEVTYPE_IGNORE)
return -ENODEV;
+ if (id->driver_info >= ARRAY_SIZE(usbtouch_dev_info))
+ return -ENODEV;
+
endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting);
if (!endpoint)
return -ENXIO;
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 2757c7768ffe..f51ab5614532 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -758,7 +758,9 @@ batt_err:
static int wm97xx_mfd_remove(struct platform_device *pdev)
{
- return wm97xx_remove(&pdev->dev);
+ wm97xx_remove(&pdev->dev);
+
+ return 0;
}
static int __maybe_unused wm97xx_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 8bd03278ad9a..52f9e9eaab14 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -15,75 +15,75 @@
/* Register Map */
-#define BT541_SWRESET_CMD 0x0000
-#define BT541_WAKEUP_CMD 0x0001
+#define ZINITIX_SWRESET_CMD 0x0000
+#define ZINITIX_WAKEUP_CMD 0x0001
-#define BT541_IDLE_CMD 0x0004
-#define BT541_SLEEP_CMD 0x0005
+#define ZINITIX_IDLE_CMD 0x0004
+#define ZINITIX_SLEEP_CMD 0x0005
-#define BT541_CLEAR_INT_STATUS_CMD 0x0003
-#define BT541_CALIBRATE_CMD 0x0006
-#define BT541_SAVE_STATUS_CMD 0x0007
-#define BT541_SAVE_CALIBRATION_CMD 0x0008
-#define BT541_RECALL_FACTORY_CMD 0x000f
+#define ZINITIX_CLEAR_INT_STATUS_CMD 0x0003
+#define ZINITIX_CALIBRATE_CMD 0x0006
+#define ZINITIX_SAVE_STATUS_CMD 0x0007
+#define ZINITIX_SAVE_CALIBRATION_CMD 0x0008
+#define ZINITIX_RECALL_FACTORY_CMD 0x000f
-#define BT541_THRESHOLD 0x0020
+#define ZINITIX_THRESHOLD 0x0020
-#define BT541_LARGE_PALM_REJECT_AREA_TH 0x003F
+#define ZINITIX_LARGE_PALM_REJECT_AREA_TH 0x003F
-#define BT541_DEBUG_REG 0x0115 /* 0~7 */
+#define ZINITIX_DEBUG_REG 0x0115 /* 0~7 */
-#define BT541_TOUCH_MODE 0x0010
-#define BT541_CHIP_REVISION 0x0011
-#define BT541_FIRMWARE_VERSION 0x0012
+#define ZINITIX_TOUCH_MODE 0x0010
+#define ZINITIX_CHIP_REVISION 0x0011
+#define ZINITIX_FIRMWARE_VERSION 0x0012
#define ZINITIX_USB_DETECT 0x116
-#define BT541_MINOR_FW_VERSION 0x0121
+#define ZINITIX_MINOR_FW_VERSION 0x0121
-#define BT541_VENDOR_ID 0x001C
-#define BT541_HW_ID 0x0014
+#define ZINITIX_VENDOR_ID 0x001C
+#define ZINITIX_HW_ID 0x0014
-#define BT541_DATA_VERSION_REG 0x0013
-#define BT541_SUPPORTED_FINGER_NUM 0x0015
-#define BT541_EEPROM_INFO 0x0018
-#define BT541_INITIAL_TOUCH_MODE 0x0019
+#define ZINITIX_DATA_VERSION_REG 0x0013
+#define ZINITIX_SUPPORTED_FINGER_NUM 0x0015
+#define ZINITIX_EEPROM_INFO 0x0018
+#define ZINITIX_INITIAL_TOUCH_MODE 0x0019
-#define BT541_TOTAL_NUMBER_OF_X 0x0060
-#define BT541_TOTAL_NUMBER_OF_Y 0x0061
+#define ZINITIX_TOTAL_NUMBER_OF_X 0x0060
+#define ZINITIX_TOTAL_NUMBER_OF_Y 0x0061
-#define BT541_DELAY_RAW_FOR_HOST 0x007f
+#define ZINITIX_DELAY_RAW_FOR_HOST 0x007f
-#define BT541_BUTTON_SUPPORTED_NUM 0x00B0
-#define BT541_BUTTON_SENSITIVITY 0x00B2
-#define BT541_DUMMY_BUTTON_SENSITIVITY 0X00C8
+#define ZINITIX_BUTTON_SUPPORTED_NUM 0x00B0
+#define ZINITIX_BUTTON_SENSITIVITY 0x00B2
+#define ZINITIX_DUMMY_BUTTON_SENSITIVITY 0X00C8
-#define BT541_X_RESOLUTION 0x00C0
-#define BT541_Y_RESOLUTION 0x00C1
+#define ZINITIX_X_RESOLUTION 0x00C0
+#define ZINITIX_Y_RESOLUTION 0x00C1
-#define BT541_POINT_STATUS_REG 0x0080
-#define BT541_ICON_STATUS_REG 0x00AA
+#define ZINITIX_POINT_STATUS_REG 0x0080
+#define ZINITIX_ICON_STATUS_REG 0x00AA
-#define BT541_POINT_COORD_REG (BT541_POINT_STATUS_REG + 2)
+#define ZINITIX_POINT_COORD_REG (ZINITIX_POINT_STATUS_REG + 2)
-#define BT541_AFE_FREQUENCY 0x0100
-#define BT541_DND_N_COUNT 0x0122
-#define BT541_DND_U_COUNT 0x0135
+#define ZINITIX_AFE_FREQUENCY 0x0100
+#define ZINITIX_DND_N_COUNT 0x0122
+#define ZINITIX_DND_U_COUNT 0x0135
-#define BT541_RAWDATA_REG 0x0200
+#define ZINITIX_RAWDATA_REG 0x0200
-#define BT541_EEPROM_INFO_REG 0x0018
+#define ZINITIX_EEPROM_INFO_REG 0x0018
-#define BT541_INT_ENABLE_FLAG 0x00f0
-#define BT541_PERIODICAL_INTERRUPT_INTERVAL 0x00f1
+#define ZINITIX_INT_ENABLE_FLAG 0x00f0
+#define ZINITIX_PERIODICAL_INTERRUPT_INTERVAL 0x00f1
-#define BT541_BTN_WIDTH 0x016d
+#define ZINITIX_BTN_WIDTH 0x016d
-#define BT541_CHECKSUM_RESULT 0x012c
+#define ZINITIX_CHECKSUM_RESULT 0x012c
-#define BT541_INIT_FLASH 0x01d0
-#define BT541_WRITE_FLASH 0x01d1
-#define BT541_READ_FLASH 0x01d2
+#define ZINITIX_INIT_FLASH 0x01d0
+#define ZINITIX_WRITE_FLASH 0x01d1
+#define ZINITIX_READ_FLASH 0x01d2
#define ZINITIX_INTERNAL_FLAG_02 0x011e
#define ZINITIX_INTERNAL_FLAG_03 0x011f
@@ -196,13 +196,13 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
int i;
int error;
- error = zinitix_write_cmd(client, BT541_SWRESET_CMD);
+ error = zinitix_write_cmd(client, ZINITIX_SWRESET_CMD);
if (error) {
dev_err(&client->dev, "Failed to write reset command\n");
return error;
}
- error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG, 0x0);
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, 0x0);
if (error) {
dev_err(&client->dev,
"Failed to reset interrupt enable flag\n");
@@ -210,32 +210,32 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
}
/* initialize */
- error = zinitix_write_u16(client, BT541_X_RESOLUTION,
+ error = zinitix_write_u16(client, ZINITIX_X_RESOLUTION,
bt541->prop.max_x);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_Y_RESOLUTION,
+ error = zinitix_write_u16(client, ZINITIX_Y_RESOLUTION,
bt541->prop.max_y);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_SUPPORTED_FINGER_NUM,
+ error = zinitix_write_u16(client, ZINITIX_SUPPORTED_FINGER_NUM,
MAX_SUPPORTED_FINGER_NUM);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_INITIAL_TOUCH_MODE,
+ error = zinitix_write_u16(client, ZINITIX_INITIAL_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_TOUCH_MODE,
+ error = zinitix_write_u16(client, ZINITIX_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG,
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG,
BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE |
BIT_UP);
if (error)
@@ -243,7 +243,7 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
/* clear queue */
for (i = 0; i < 10; i++) {
- zinitix_write_cmd(client, BT541_CLEAR_INT_STATUS_CMD);
+ zinitix_write_cmd(client, ZINITIX_CLEAR_INT_STATUS_CMD);
udelay(10);
}
@@ -361,7 +361,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
memset(&touch_event, 0, sizeof(struct touch_event));
- error = zinitix_read_data(bt541->client, BT541_POINT_STATUS_REG,
+ error = zinitix_read_data(bt541->client, ZINITIX_POINT_STATUS_REG,
&touch_event, sizeof(struct touch_event));
if (error) {
dev_err(&client->dev, "Failed to read in touchpoint struct\n");
@@ -381,7 +381,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
input_sync(bt541->input_dev);
out:
- zinitix_write_cmd(bt541->client, BT541_CLEAR_INT_STATUS_CMD);
+ zinitix_write_cmd(bt541->client, ZINITIX_CLEAR_INT_STATUS_CMD);
return IRQ_HANDLED;
}
diff --git a/drivers/interconnect/bulk.c b/drivers/interconnect/bulk.c
index 448cc536aa79..8b1d8a412464 100644
--- a/drivers/interconnect/bulk.c
+++ b/drivers/interconnect/bulk.c
@@ -115,3 +115,45 @@ void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths)
icc_disable(paths[num_paths].path);
}
EXPORT_SYMBOL_GPL(icc_bulk_disable);
+
+struct icc_bulk_devres {
+ struct icc_bulk_data *paths;
+ int num_paths;
+};
+
+static void devm_icc_bulk_release(struct device *dev, void *res)
+{
+ struct icc_bulk_devres *devres = res;
+
+ icc_bulk_put(devres->num_paths, devres->paths);
+}
+
+/**
+ * devm_of_icc_bulk_get() - resource managed of_icc_bulk_get
+ * @dev: the device requesting the path
+ * @num_paths: the number of icc_bulk_data
+ * @paths: the table with the paths we want to get
+ *
+ * Returns 0 on success or negative errno otherwise.
+ */
+int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths)
+{
+ struct icc_bulk_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_icc_bulk_release, sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ ret = of_icc_bulk_get(dev, num_paths, paths);
+ if (!ret) {
+ devres->paths = paths;
+ devres->num_paths = num_paths;
+ devres_add(dev, devres);
+ } else {
+ devres_free(devres);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_icc_bulk_get);
diff --git a/drivers/interconnect/imx/Kconfig b/drivers/interconnect/imx/Kconfig
index be2928362bb7..c772552431f5 100644
--- a/drivers/interconnect/imx/Kconfig
+++ b/drivers/interconnect/imx/Kconfig
@@ -15,3 +15,7 @@ config INTERCONNECT_IMX8MN
config INTERCONNECT_IMX8MQ
tristate "i.MX8MQ interconnect driver"
depends on INTERCONNECT_IMX
+
+config INTERCONNECT_IMX8MP
+ tristate "i.MX8MP interconnect driver"
+ depends on INTERCONNECT_IMX
diff --git a/drivers/interconnect/imx/Makefile b/drivers/interconnect/imx/Makefile
index 21fd5233754f..16d256cdeab4 100644
--- a/drivers/interconnect/imx/Makefile
+++ b/drivers/interconnect/imx/Makefile
@@ -2,8 +2,10 @@ imx-interconnect-objs := imx.o
imx8mm-interconnect-objs := imx8mm.o
imx8mq-interconnect-objs := imx8mq.o
imx8mn-interconnect-objs := imx8mn.o
+imx8mp-interconnect-objs := imx8mp.o
obj-$(CONFIG_INTERCONNECT_IMX) += imx-interconnect.o
obj-$(CONFIG_INTERCONNECT_IMX8MM) += imx8mm-interconnect.o
obj-$(CONFIG_INTERCONNECT_IMX8MQ) += imx8mq-interconnect.o
obj-$(CONFIG_INTERCONNECT_IMX8MN) += imx8mn-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MP) += imx8mp-interconnect.o
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 249ca25d1d55..48ffd59953bf 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/interconnect-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -21,8 +22,10 @@
/* private icc_node data */
struct imx_icc_node {
const struct imx_icc_node_desc *desc;
+ const struct imx_icc_noc_setting *setting;
struct device *qos_dev;
struct dev_pm_qos_request qos_req;
+ struct imx_icc_provider *imx_provider;
};
static int imx_icc_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
@@ -37,8 +40,30 @@ static int imx_icc_node_set(struct icc_node *node)
{
struct device *dev = node->provider->dev;
struct imx_icc_node *node_data = node->data;
+ void __iomem *base;
+ u32 prio;
u64 freq;
+ if (node_data->setting && node->peak_bw) {
+ base = node_data->setting->reg + node_data->imx_provider->noc_base;
+ if (node_data->setting->mode == IMX_NOC_MODE_FIXED) {
+ prio = node_data->setting->prio_level;
+ prio = PRIORITY_COMP_MARK | (prio << 8) | prio;
+ writel(prio, base + IMX_NOC_PRIO_REG);
+ writel(node_data->setting->mode, base + IMX_NOC_MODE_REG);
+ writel(node_data->setting->ext_control, base + IMX_NOC_EXT_CTL_REG);
+ dev_dbg(dev, "%s: mode: 0x%x, prio: 0x%x, ext_control: 0x%x\n",
+ node_data->desc->name, node_data->setting->mode, prio,
+ node_data->setting->ext_control);
+ } else if (node_data->setting->mode == IMX_NOC_MODE_UNCONFIGURED) {
+ dev_dbg(dev, "%s: mode not unconfigured\n", node_data->desc->name);
+ } else {
+ dev_info(dev, "%s: mode: %d not supported\n",
+ node_data->desc->name, node_data->setting->mode);
+ return -EOPNOTSUPP;
+ }
+ }
+
if (!node_data->qos_dev)
return 0;
@@ -61,6 +86,12 @@ static int imx_icc_node_set(struct icc_node *node)
static int imx_icc_set(struct icc_node *src, struct icc_node *dst)
{
+ int ret;
+
+ ret = imx_icc_node_set(src);
+ if (ret)
+ return ret;
+
return imx_icc_node_set(dst);
}
@@ -128,9 +159,11 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
DEV_PM_QOS_MIN_FREQUENCY, 0);
}
-static struct icc_node *imx_icc_node_add(struct icc_provider *provider,
- const struct imx_icc_node_desc *node_desc)
+static struct icc_node *imx_icc_node_add(struct imx_icc_provider *imx_provider,
+ const struct imx_icc_node_desc *node_desc,
+ const struct imx_icc_noc_setting *setting)
{
+ struct icc_provider *provider = &imx_provider->provider;
struct device *dev = provider->dev;
struct imx_icc_node *node_data;
struct icc_node *node;
@@ -157,6 +190,8 @@ static struct icc_node *imx_icc_node_add(struct icc_provider *provider,
node->name = node_desc->name;
node->data = node_data;
node_data->desc = node_desc;
+ node_data->setting = setting;
+ node_data->imx_provider = imx_provider;
icc_node_add(node, provider);
if (node_desc->adj) {
@@ -178,10 +213,12 @@ static void imx_icc_unregister_nodes(struct icc_provider *provider)
imx_icc_node_destroy(node);
}
-static int imx_icc_register_nodes(struct icc_provider *provider,
+static int imx_icc_register_nodes(struct imx_icc_provider *imx_provider,
const struct imx_icc_node_desc *descs,
- int count)
+ int count,
+ const struct imx_icc_noc_setting *settings)
{
+ struct icc_provider *provider = &imx_provider->provider;
struct icc_onecell_data *provider_data = provider->data;
int ret;
int i;
@@ -191,7 +228,8 @@ static int imx_icc_register_nodes(struct icc_provider *provider,
const struct imx_icc_node_desc *node_desc = &descs[i];
size_t j;
- node = imx_icc_node_add(provider, node_desc);
+ node = imx_icc_node_add(imx_provider, node_desc,
+ settings ? &settings[node_desc->id] : NULL);
if (IS_ERR(node)) {
ret = dev_err_probe(provider->dev, PTR_ERR(node),
"failed to add %s\n", node_desc->name);
@@ -229,32 +267,44 @@ static int get_max_node_id(struct imx_icc_node_desc *nodes, int nodes_count)
}
int imx_icc_register(struct platform_device *pdev,
- struct imx_icc_node_desc *nodes, int nodes_count)
+ struct imx_icc_node_desc *nodes, int nodes_count,
+ struct imx_icc_noc_setting *settings)
{
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
+ struct imx_icc_provider *imx_provider;
struct icc_provider *provider;
- int max_node_id;
+ int num_nodes;
int ret;
/* icc_onecell_data is indexed by node_id, unlike nodes param */
- max_node_id = get_max_node_id(nodes, nodes_count);
- data = devm_kzalloc(dev, struct_size(data, nodes, max_node_id),
+ num_nodes = get_max_node_id(nodes, nodes_count) + 1;
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->num_nodes = max_node_id;
+ data->num_nodes = num_nodes;
- provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
- if (!provider)
+ imx_provider = devm_kzalloc(dev, sizeof(*imx_provider), GFP_KERNEL);
+ if (!imx_provider)
return -ENOMEM;
+ provider = &imx_provider->provider;
provider->set = imx_icc_set;
provider->get_bw = imx_icc_get_bw;
provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
provider->dev = dev->parent;
- platform_set_drvdata(pdev, provider);
+ platform_set_drvdata(pdev, imx_provider);
+
+ if (settings) {
+ imx_provider->noc_base = devm_of_iomap(dev, provider->dev->of_node, 0, NULL);
+ if (IS_ERR(imx_provider->noc_base)) {
+ ret = PTR_ERR(imx_provider->noc_base);
+ dev_err(dev, "Error mapping NoC: %d\n", ret);
+ return ret;
+ }
+ }
ret = icc_provider_add(provider);
if (ret) {
@@ -262,7 +312,7 @@ int imx_icc_register(struct platform_device *pdev,
return ret;
}
- ret = imx_icc_register_nodes(provider, nodes, nodes_count);
+ ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
if (ret)
goto provider_del;
@@ -276,11 +326,11 @@ EXPORT_SYMBOL_GPL(imx_icc_register);
int imx_icc_unregister(struct platform_device *pdev)
{
- struct icc_provider *provider = platform_get_drvdata(pdev);
+ struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev);
- imx_icc_unregister_nodes(provider);
+ imx_icc_unregister_nodes(&imx_provider->provider);
- return icc_provider_del(provider);
+ return icc_provider_del(&imx_provider->provider);
}
EXPORT_SYMBOL_GPL(imx_icc_unregister);
diff --git a/drivers/interconnect/imx/imx.h b/drivers/interconnect/imx/imx.h
index 75da51076c68..e0a2ee173ecd 100644
--- a/drivers/interconnect/imx/imx.h
+++ b/drivers/interconnect/imx/imx.h
@@ -10,11 +10,43 @@
#ifndef __DRIVERS_INTERCONNECT_IMX_H
#define __DRIVERS_INTERCONNECT_IMX_H
+#include <linux/interconnect-provider.h>
#include <linux/kernel.h>
#define IMX_ICC_MAX_LINKS 4
/*
+ * High throughput priority level in Regulator mode
+ * Read Priority in Fixed/Limiter mode
+ */
+#define PRIORITY0_SHIFT 0
+/*
+ * Low throughput priority level in Regulator mode
+ * Write Priority in Fixed/Limiter mode
+ */
+#define PRIORITY1_SHIFT 8
+#define PRIORITY_MASK 0x7
+
+#define PRIORITY_COMP_MARK BIT(31) /* Must set */
+
+#define IMX_NOC_MODE_FIXED 0
+#define IMX_NOC_MODE_LIMITER 1
+#define IMX_NOC_MODE_BYPASS 2
+#define IMX_NOC_MODE_REGULATOR 3
+#define IMX_NOC_MODE_UNCONFIGURED 0xFF
+
+#define IMX_NOC_PRIO_REG 0x8
+#define IMX_NOC_MODE_REG 0xC
+#define IMX_NOC_BANDWIDTH_REG 0x10
+#define IMX_NOC_SATURATION 0x14
+#define IMX_NOC_EXT_CTL_REG 0x18
+
+struct imx_icc_provider {
+ void __iomem *noc_base;
+ struct icc_provider provider;
+};
+
+/*
* struct imx_icc_node_adj - Describe a dynamic adjustable node
*/
struct imx_icc_node_adj_desc {
@@ -38,6 +70,20 @@ struct imx_icc_node_desc {
const struct imx_icc_node_adj_desc *adj;
};
+/*
+ * struct imx_icc_noc_setting - Describe an interconnect node setting
+ * @reg: register offset inside the NoC
+ * @prio_level: priority level
+ * @mode: functional mode
+ * @ext_control: external input control
+ */
+struct imx_icc_noc_setting {
+ u32 reg;
+ u32 prio_level;
+ u32 mode;
+ u32 ext_control;
+};
+
#define DEFINE_BUS_INTERCONNECT(_name, _id, _adj, ...) \
{ \
.id = _id, \
@@ -55,7 +101,8 @@ struct imx_icc_node_desc {
int imx_icc_register(struct platform_device *pdev,
struct imx_icc_node_desc *nodes,
- int nodes_count);
+ int nodes_count,
+ struct imx_icc_noc_setting *noc_settings);
int imx_icc_unregister(struct platform_device *pdev);
#endif /* __DRIVERS_INTERCONNECT_IMX_H */
diff --git a/drivers/interconnect/imx/imx8mm.c b/drivers/interconnect/imx/imx8mm.c
index 1083490bb391..ae797412db96 100644
--- a/drivers/interconnect/imx/imx8mm.c
+++ b/drivers/interconnect/imx/imx8mm.c
@@ -83,7 +83,7 @@ static struct imx_icc_node_desc nodes[] = {
static int imx8mm_icc_probe(struct platform_device *pdev)
{
- return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
}
static int imx8mm_icc_remove(struct platform_device *pdev)
diff --git a/drivers/interconnect/imx/imx8mn.c b/drivers/interconnect/imx/imx8mn.c
index ad97e55fd4e5..1ce94c5bdd8c 100644
--- a/drivers/interconnect/imx/imx8mn.c
+++ b/drivers/interconnect/imx/imx8mn.c
@@ -72,7 +72,7 @@ static struct imx_icc_node_desc nodes[] = {
static int imx8mn_icc_probe(struct platform_device *pdev)
{
- return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
}
static int imx8mn_icc_remove(struct platform_device *pdev)
diff --git a/drivers/interconnect/imx/imx8mp.c b/drivers/interconnect/imx/imx8mp.c
new file mode 100644
index 000000000000..5f1c83ed157b
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mp.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MP SoC
+ *
+ * Copyright 2022 NXP
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/fsl,imx8mp.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mp_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 16,
+ .main_noc = true,
+};
+
+static struct imx_icc_noc_setting noc_setting_nodes[] = {
+ [IMX8MP_ICM_MLMIX] = {
+ .reg = 0x180,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_DSP] = {
+ .reg = 0x200,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_SDMA2PER] = {
+ .reg = 0x280,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 4,
+ },
+ [IMX8MP_ICM_SDMA2BURST] = {
+ .reg = 0x300,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 4,
+ },
+ [IMX8MP_ICM_SDMA3PER] = {
+ .reg = 0x380,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 4,
+ },
+ [IMX8MP_ICM_SDMA3BURST] = {
+ .reg = 0x400,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 4,
+ },
+ [IMX8MP_ICM_EDMA] = {
+ .reg = 0x480,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 4,
+ },
+ [IMX8MP_ICM_GPU3D] = {
+ .reg = 0x500,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_GPU2D] = {
+ .reg = 0x580,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_HRV] = {
+ .reg = 0x600,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 2,
+ .ext_control = 1,
+ },
+ [IMX8MP_ICM_LCDIF_HDMI] = {
+ .reg = 0x680,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 2,
+ .ext_control = 1,
+ },
+ [IMX8MP_ICM_HDCP] = {
+ .reg = 0x700,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 5,
+ },
+ [IMX8MP_ICM_NOC_PCIE] = {
+ .reg = 0x780,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_USB1] = {
+ .reg = 0x800,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_USB2] = {
+ .reg = 0x880,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_PCIE] = {
+ .reg = 0x900,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_LCDIF_RD] = {
+ .reg = 0x980,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 2,
+ .ext_control = 1,
+ },
+ [IMX8MP_ICM_LCDIF_WR] = {
+ .reg = 0xa00,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 2,
+ .ext_control = 1,
+ },
+ [IMX8MP_ICM_ISI0] = {
+ .reg = 0xa80,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 2,
+ .ext_control = 1,
+ },
+ [IMX8MP_ICM_ISI1] = {
+ .reg = 0xb00,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 2,
+ .ext_control = 1,
+ },
+ [IMX8MP_ICM_ISI2] = {
+ .reg = 0xb80,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 2,
+ .ext_control = 1,
+ },
+ [IMX8MP_ICM_ISP0] = {
+ .reg = 0xc00,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 7,
+ },
+ [IMX8MP_ICM_ISP1] = {
+ .reg = 0xc80,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 7,
+ },
+ [IMX8MP_ICM_DWE] = {
+ .reg = 0xd00,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 7,
+ },
+ [IMX8MP_ICM_VPU_G1] = {
+ .reg = 0xd80,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_VPU_G2] = {
+ .reg = 0xe00,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICM_VPU_H1] = {
+ .reg = 0xe80,
+ .mode = IMX_NOC_MODE_FIXED,
+ .prio_level = 3,
+ },
+ [IMX8MP_ICN_MEDIA] = {
+ .mode = IMX_NOC_MODE_UNCONFIGURED,
+ },
+ [IMX8MP_ICN_VIDEO] = {
+ .mode = IMX_NOC_MODE_UNCONFIGURED,
+ },
+ [IMX8MP_ICN_AUDIO] = {
+ .mode = IMX_NOC_MODE_UNCONFIGURED,
+ },
+ [IMX8MP_ICN_HDMI] = {
+ .mode = IMX_NOC_MODE_UNCONFIGURED,
+ },
+ [IMX8MP_ICN_GPU] = {
+ .mode = IMX_NOC_MODE_UNCONFIGURED,
+ },
+ [IMX8MP_ICN_HSIO] = {
+ .mode = IMX_NOC_MODE_UNCONFIGURED,
+ },
+};
+
+/* Describe bus masters, slaves and connections between them */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MP_ICN_NOC, &imx8mp_noc_adj,
+ IMX8MP_ICS_DRAM, IMX8MP_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MP_ICS_OCRAM, NULL),
+ DEFINE_BUS_SLAVE("DRAM", IMX8MP_ICS_DRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MP_ICM_A53, IMX8MP_ICN_NOC),
+ DEFINE_BUS_MASTER("SUPERMIX", IMX8MP_ICM_SUPERMIX, IMX8MP_ICN_NOC),
+ DEFINE_BUS_MASTER("GIC", IMX8MP_ICM_GIC, IMX8MP_ICN_NOC),
+ DEFINE_BUS_MASTER("MLMIX", IMX8MP_ICM_MLMIX, IMX8MP_ICN_NOC),
+
+ DEFINE_BUS_INTERCONNECT("NOC_AUDIO", IMX8MP_ICN_AUDIO, NULL, IMX8MP_ICN_NOC),
+ DEFINE_BUS_MASTER("DSP", IMX8MP_ICM_DSP, IMX8MP_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA2PER", IMX8MP_ICM_SDMA2PER, IMX8MP_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA2BURST", IMX8MP_ICM_SDMA2BURST, IMX8MP_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA3PER", IMX8MP_ICM_SDMA3PER, IMX8MP_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA3BURST", IMX8MP_ICM_SDMA3BURST, IMX8MP_ICN_AUDIO),
+ DEFINE_BUS_MASTER("EDMA", IMX8MP_ICM_EDMA, IMX8MP_ICN_AUDIO),
+
+ DEFINE_BUS_INTERCONNECT("NOC_GPU", IMX8MP_ICN_GPU, NULL, IMX8MP_ICN_NOC),
+ DEFINE_BUS_MASTER("GPU 2D", IMX8MP_ICM_GPU2D, IMX8MP_ICN_GPU),
+ DEFINE_BUS_MASTER("GPU 3D", IMX8MP_ICM_GPU3D, IMX8MP_ICN_GPU),
+
+ DEFINE_BUS_INTERCONNECT("NOC_HDMI", IMX8MP_ICN_HDMI, NULL, IMX8MP_ICN_NOC),
+ DEFINE_BUS_MASTER("HRV", IMX8MP_ICM_HRV, IMX8MP_ICN_HDMI),
+ DEFINE_BUS_MASTER("LCDIF_HDMI", IMX8MP_ICM_LCDIF_HDMI, IMX8MP_ICN_HDMI),
+ DEFINE_BUS_MASTER("HDCP", IMX8MP_ICM_HDCP, IMX8MP_ICN_HDMI),
+
+ DEFINE_BUS_INTERCONNECT("NOC_HSIO", IMX8MP_ICN_HSIO, NULL, IMX8MP_ICN_NOC),
+ DEFINE_BUS_MASTER("NOC_PCIE", IMX8MP_ICM_NOC_PCIE, IMX8MP_ICN_HSIO),
+ DEFINE_BUS_MASTER("USB1", IMX8MP_ICM_USB1, IMX8MP_ICN_HSIO),
+ DEFINE_BUS_MASTER("USB2", IMX8MP_ICM_USB2, IMX8MP_ICN_HSIO),
+ DEFINE_BUS_MASTER("PCIE", IMX8MP_ICM_PCIE, IMX8MP_ICN_HSIO),
+
+ DEFINE_BUS_INTERCONNECT("NOC_MEDIA", IMX8MP_ICN_MEDIA, NULL, IMX8MP_ICN_NOC),
+ DEFINE_BUS_MASTER("LCDIF_RD", IMX8MP_ICM_LCDIF_RD, IMX8MP_ICN_MEDIA),
+ DEFINE_BUS_MASTER("LCDIF_WR", IMX8MP_ICM_LCDIF_WR, IMX8MP_ICN_MEDIA),
+ DEFINE_BUS_MASTER("ISI0", IMX8MP_ICM_ISI0, IMX8MP_ICN_MEDIA),
+ DEFINE_BUS_MASTER("ISI1", IMX8MP_ICM_ISI1, IMX8MP_ICN_MEDIA),
+ DEFINE_BUS_MASTER("ISI2", IMX8MP_ICM_ISI2, IMX8MP_ICN_MEDIA),
+ DEFINE_BUS_MASTER("ISP0", IMX8MP_ICM_ISP0, IMX8MP_ICN_MEDIA),
+ DEFINE_BUS_MASTER("ISP1", IMX8MP_ICM_ISP1, IMX8MP_ICN_MEDIA),
+ DEFINE_BUS_MASTER("DWE", IMX8MP_ICM_DWE, IMX8MP_ICN_MEDIA),
+
+ DEFINE_BUS_INTERCONNECT("NOC_VIDEO", IMX8MP_ICN_VIDEO, NULL, IMX8MP_ICN_NOC),
+ DEFINE_BUS_MASTER("VPU G1", IMX8MP_ICM_VPU_G1, IMX8MP_ICN_VIDEO),
+ DEFINE_BUS_MASTER("VPU G2", IMX8MP_ICM_VPU_G2, IMX8MP_ICN_VIDEO),
+ DEFINE_BUS_MASTER("VPU H1", IMX8MP_ICM_VPU_H1, IMX8MP_ICN_VIDEO),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MP_ICN_MAIN, NULL,
+ IMX8MP_ICN_NOC, IMX8MP_ICS_OCRAM),
+};
+
+static int imx8mp_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), noc_setting_nodes);
+}
+
+static int imx8mp_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mp_icc_driver = {
+ .probe = imx8mp_icc_probe,
+ .remove = imx8mp_icc_remove,
+ .driver = {
+ .name = "imx8mp-interconnect",
+ },
+};
+
+module_platform_driver(imx8mp_icc_driver);
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx8mp-interconnect");
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index d7768d3c6d8a..7f00a0511c6e 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -82,7 +82,7 @@ static struct imx_icc_node_desc nodes[] = {
static int imx8mq_icc_probe(struct platform_device *pdev)
{
- return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
}
static int imx8mq_icc_remove(struct platform_device *pdev)
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 22adff5d7f53..25d5b4baf6f6 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -155,6 +155,15 @@ config INTERCONNECT_QCOM_SDX65
This is a driver for the Qualcomm Network-on-Chip on sdx65-based
platforms.
+config INTERCONNECT_QCOM_SM6350
+ tristate "Qualcomm SM6350 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sm6350-based
+ platforms.
+
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 8d1fe9d38ac3..8e357528185d 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INTERCONNECT_QCOM) += interconnect_qcom.o
+
+interconnect_qcom-y := icc-common.o
icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8916-objs := msm8916.o
qnoc-msm8939-objs := msm8939.o
@@ -17,6 +20,7 @@ qnoc-sdm660-objs := sdm660.o
qnoc-sdm845-objs := sdm845.o
qnoc-sdx55-objs := sdx55.o
qnoc-sdx65-objs := sdx65.o
+qnoc-sm6350-objs := sm6350.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
@@ -40,6 +44,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM6350) += qnoc-sm6350.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
diff --git a/drivers/interconnect/qcom/icc-common.c b/drivers/interconnect/qcom/icc-common.c
new file mode 100644
index 000000000000..0822ce207b5d
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-common.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ */
+
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "icc-common.h"
+
+struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ node = of_icc_xlate_onecell(spec, data);
+ if (IS_ERR(node))
+ return ERR_CAST(node);
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ ndata->node = node;
+
+ if (spec->args_count == 2)
+ ndata->tag = spec->args[1];
+
+ if (spec->args_count > 2)
+ pr_warn("%pOF: Too many arguments, path tag is not parsed\n", spec->np);
+
+ return ndata;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);
diff --git a/drivers/interconnect/qcom/icc-common.h b/drivers/interconnect/qcom/icc-common.h
new file mode 100644
index 000000000000..33bb2c38dff3
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-common.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_COMMON_H__
+#define __DRIVERS_INTERCONNECT_QCOM_ICC_COMMON_H__
+
+#include <linux/interconnect-provider.h>
+
+struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data);
+
+#endif
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index fb013191c29b..7f6a70e0256a 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include "smd-rpm.h"
+#include "icc-common.h"
#include "icc-rpm.h"
/* QNOC QoS */
@@ -233,48 +234,162 @@ static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
return ret;
}
+static int __qcom_icc_set(struct icc_node *n, struct qcom_icc_node *qn,
+ u64 sum_bw)
+{
+ int ret;
+
+ if (!qn->qos.ap_owned) {
+ /* send bandwidth request message to the RPM processor */
+ ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
+ if (ret)
+ return ret;
+ } else if (qn->qos.qos_mode != -1) {
+ /* set bandwidth directly from the AP */
+ ret = qcom_icc_qos_set(n, sum_bw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
+ * @node: icc node to operate on
+ */
+static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
+{
+ struct qcom_icc_node *qn;
+ size_t i;
+
+ qn = node->data;
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ qn->sum_avg[i] = 0;
+ qn->max_peak[i] = 0;
+ }
+}
+
+/**
+ * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
+ * @node: node to aggregate
+ * @tag: tag to indicate which buckets to aggregate
+ * @avg_bw: new bw to sum aggregate
+ * @peak_bw: new bw to max aggregate
+ * @agg_avg: existing aggregate avg bw val
+ * @agg_peak: existing aggregate peak bw val
+ */
+static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+
+ qn = node->data;
+
+ if (!tag)
+ tag = QCOM_ICC_TAG_ALWAYS;
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ if (tag & BIT(i)) {
+ qn->sum_avg[i] += avg_bw;
+ qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
+ }
+ }
+
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+ return 0;
+}
+
+/**
+ * qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes
+ * @provider: generic interconnect provider
+ * @agg_avg: an array for aggregated average bandwidth of buckets
+ * @agg_peak: an array for aggregated peak bandwidth of buckets
+ * @max_agg_avg: pointer to max value of aggregated average bandwidth
+ */
+static void qcom_icc_bus_aggregate(struct icc_provider *provider,
+ u64 *agg_avg, u64 *agg_peak,
+ u64 *max_agg_avg)
+{
+ struct icc_node *node;
+ struct qcom_icc_node *qn;
+ int i;
+
+ /* Initialise aggregate values */
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ agg_avg[i] = 0;
+ agg_peak[i] = 0;
+ }
+
+ *max_agg_avg = 0;
+
+ /*
+ * Iterate nodes on the interconnect and aggregate bandwidth
+ * requests for every bucket.
+ */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ qn = node->data;
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ agg_avg[i] += qn->sum_avg[i];
+ agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]);
+ }
+ }
+
+ /* Find maximum values across all buckets */
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++)
+ *max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]);
+}
+
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_provider *qp;
- struct qcom_icc_node *qn;
+ struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
struct icc_provider *provider;
- struct icc_node *n;
u64 sum_bw;
- u64 max_peak_bw;
u64 rate;
- u32 agg_avg = 0;
- u32 agg_peak = 0;
+ u64 agg_avg[QCOM_ICC_NUM_BUCKETS], agg_peak[QCOM_ICC_NUM_BUCKETS];
+ u64 max_agg_avg;
int ret, i;
+ int bucket;
- qn = src->data;
+ src_qn = src->data;
+ if (dst)
+ dst_qn = dst->data;
provider = src->provider;
qp = to_qcom_provider(provider);
- list_for_each_entry(n, &provider->nodes, node_list)
- provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
+ qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);
- sum_bw = icc_units_to_bps(agg_avg);
- max_peak_bw = icc_units_to_bps(agg_peak);
+ sum_bw = icc_units_to_bps(max_agg_avg);
- if (!qn->qos.ap_owned) {
- /* send bandwidth request message to the RPM processor */
- ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
- if (ret)
- return ret;
- } else if (qn->qos.qos_mode != -1) {
- /* set bandwidth directly from the AP */
- ret = qcom_icc_qos_set(src, sum_bw);
+ ret = __qcom_icc_set(src, src_qn, sum_bw);
+ if (ret)
+ return ret;
+ if (dst_qn) {
+ ret = __qcom_icc_set(dst, dst_qn, sum_bw);
if (ret)
return ret;
}
- rate = max(sum_bw, max_peak_bw);
-
- do_div(rate, qn->buswidth);
- rate = min_t(u64, rate, LONG_MAX);
-
for (i = 0; i < qp->num_clks; i++) {
+ /*
+ * Use WAKE bucket for active clock, otherwise, use SLEEP bucket
+ * for other clocks. If a platform doesn't set interconnect
+ * path tags, by default use sleep bucket for all clocks.
+ *
+ * Note, AMC bucket is not supported yet.
+ */
+ if (!strcmp(qp->bus_clks[i].id, "bus_a"))
+ bucket = QCOM_ICC_BUCKET_WAKE;
+ else
+ bucket = QCOM_ICC_BUCKET_SLEEP;
+
+ rate = icc_units_to_bps(max(agg_avg[bucket], agg_peak[bucket]));
+ do_div(rate, src_qn->buswidth);
+ rate = min_t(u64, rate, LONG_MAX);
+
if (qp->bus_clk_rate[i] == rate)
continue;
@@ -394,8 +509,9 @@ regmap_done:
INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = qcom_icc_set;
- provider->aggregate = icc_std_aggregate;
- provider->xlate = of_icc_xlate_onecell;
+ provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
+ provider->aggregate = qcom_icc_bw_aggregate;
+ provider->xlate_extended = qcom_icc_xlate_extended;
provider->data = data;
ret = icc_provider_add(provider);
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index ebee9009301e..a49af844ab13 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -6,6 +6,8 @@
#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_RPM_H
#define __DRIVERS_INTERCONNECT_QCOM_ICC_RPM_H
+#include <dt-bindings/interconnect/qcom,icc.h>
+
#define RPM_BUS_MASTER_REQ 0x73616d62
#define RPM_BUS_SLAVE_REQ 0x766c7362
@@ -65,6 +67,8 @@ struct qcom_icc_qos {
* @links: an array of nodes where we can go next while traversing
* @num_links: the total number of @links
* @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @sum_avg: current sum aggregate value of all avg bw requests
+ * @max_peak: current max aggregate value of all peak bw requests
* @mas_rpm_id: RPM id for devices that are bus masters
* @slv_rpm_id: RPM id for devices that are bus slaves
* @qos: NoC QoS setting parameters
@@ -75,6 +79,8 @@ struct qcom_icc_node {
const u16 *links;
u16 num_links;
u16 buswidth;
+ u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
+ u64 max_peak[QCOM_ICC_NUM_BUCKETS];
int mas_rpm_id;
int slv_rpm_id;
struct qcom_icc_qos qos;
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 3c40076eb5fb..114bb8f64573 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include "bcm-voter.h"
+#include "icc-common.h"
#include "icc-rpmh.h"
/**
@@ -100,31 +101,6 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
}
EXPORT_SYMBOL_GPL(qcom_icc_set);
-struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
-{
- struct icc_node_data *ndata;
- struct icc_node *node;
-
- node = of_icc_xlate_onecell(spec, data);
- if (IS_ERR(node))
- return ERR_CAST(node);
-
- ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
- if (!ndata)
- return ERR_PTR(-ENOMEM);
-
- ndata->node = node;
-
- if (spec->args_count == 2)
- ndata->tag = spec->args[1];
-
- if (spec->args_count > 2)
- pr_warn("%pOF: Too many arguments, path tag is not parsed\n", spec->np);
-
- return ndata;
-}
-EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);
-
/**
* qcom_icc_bcm_init - populates bcm aux data and connect qnodes
* @bcm: bcm to be initialized
@@ -258,6 +234,10 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
data->num_nodes = num_nodes;
platform_set_drvdata(pdev, qp);
+ /* Populate child NoC devices if any */
+ if (of_get_child_count(dev->of_node) > 0)
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+
return 0;
err:
icc_nodes_remove(provider);
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index d29929461c17..04391c1ba465 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -131,7 +131,6 @@ struct qcom_icc_desc {
int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
int qcom_icc_set(struct icc_node *src, struct icc_node *dst);
-struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data);
int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev);
void qcom_icc_pre_aggregate(struct icc_node *node);
int qcom_icc_rpmh_probe(struct platform_device *pdev);
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
index 63b31deea722..caf0aefad668 100644
--- a/drivers/interconnect/qcom/msm8939.c
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -1423,6 +1423,7 @@ static struct platform_driver msm8939_noc_driver = {
.driver = {
.name = "qnoc-msm8939",
.of_match_table = msm8939_noc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(msm8939_noc_driver);
diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
new file mode 100644
index 000000000000..a3d46e59444e
--- /dev/null
+++ b/drivers/interconnect/qcom/sm6350.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm6350.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sm6350.h"
+
+DEFINE_QNODE(qhm_a1noc_cfg, SM6350_MASTER_A1NOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qup_0, SM6350_MASTER_QUP_0, 1, 4, SM6350_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_emmc, SM6350_MASTER_EMMC, 1, 8, SM6350_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_ufs_mem, SM6350_MASTER_UFS_MEM, 1, 8, SM6350_A1NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_a2noc_cfg, SM6350_MASTER_A2NOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, SM6350_MASTER_QDSS_BAM, 1, 4, SM6350_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_qup_1, SM6350_MASTER_QUP_1, 1, 4, SM6350_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qxm_crypto, SM6350_MASTER_CRYPTO_CORE_0, 1, 8, SM6350_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qxm_ipa, SM6350_MASTER_IPA, 1, 8, SM6350_A2NOC_SNOC_SLV);
+DEFINE_QNODE(xm_qdss_etr, SM6350_MASTER_QDSS_ETR, 1, 8, SM6350_A2NOC_SNOC_SLV);
+DEFINE_QNODE(xm_sdc2, SM6350_MASTER_SDCC_2, 1, 8, SM6350_A2NOC_SNOC_SLV);
+DEFINE_QNODE(xm_usb3_0, SM6350_MASTER_USB3, 1, 8, SM6350_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SM6350_MASTER_CAMNOC_HF0_UNCOMP, 2, 32, SM6350_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_icp_uncomp, SM6350_MASTER_CAMNOC_ICP_UNCOMP, 1, 32, SM6350_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, SM6350_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SM6350_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qup0_core_master, SM6350_MASTER_QUP_CORE_0, 1, 4, SM6350_SLAVE_QUP_CORE_0);
+DEFINE_QNODE(qup1_core_master, SM6350_MASTER_QUP_CORE_1, 1, 4, SM6350_SLAVE_QUP_CORE_1);
+DEFINE_QNODE(qnm_npu, SM6350_MASTER_NPU, 2, 32, SM6350_SLAVE_CDSP_GEM_NOC);
+DEFINE_QNODE(qxm_npu_dsp, SM6350_MASTER_NPU_PROC, 1, 8, SM6350_SLAVE_CDSP_GEM_NOC);
+DEFINE_QNODE(qnm_snoc, SM6350_SNOC_CNOC_MAS, 1, 8, SM6350_SLAVE_CAMERA_CFG, SM6350_SLAVE_SDCC_2, SM6350_SLAVE_CNOC_MNOC_CFG, SM6350_SLAVE_UFS_MEM_CFG, SM6350_SLAVE_QM_CFG, SM6350_SLAVE_SNOC_CFG, SM6350_SLAVE_QM_MPU_CFG, SM6350_SLAVE_GLM, SM6350_SLAVE_PDM, SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG, SM6350_SLAVE_A2NOC_CFG, SM6350_SLAVE_QDSS_CFG, SM6350_SLAVE_VSENSE_CTRL_CFG, SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG, SM6350_SLAVE_DISPLAY_CFG, SM6350_SLAVE_TCSR, SM6350_SLAVE_DCC_CFG, SM6350_SLAVE_CNOC_DDRSS, SM6350_SLAVE_DISPLAY_THROTTLE_CFG, SM6350_SLAVE_NPU_CFG, SM6350_SLAVE_AHB2PHY, SM6350_SLAVE_GRAPHICS_3D_CFG, SM6350_SLAVE_BOOT_ROM, SM6350_SLAVE_VENUS_CFG, SM6350_SLAVE_IPA_CFG, SM6350_SLAVE_SECURITY, SM6350_SLAVE_IMEM_CFG, SM6350_SLAVE_CNOC_MSS, SM6350_SLAVE_SERVICE_CNOC, SM6350_SLAVE_USB3, SM6350_SLAVE_VENUS_THROTTLE_CFG, SM6350_SLAVE_RBCPR_CX_CFG, SM6350_SLAVE_A1NOC_CFG, SM6350_SLAVE_AOSS, SM6350_SLAVE_PRNG, SM6350_SLAVE_EMMC_CFG, SM6350_SLAVE_CRYPTO_0_CFG, SM6350_SLAVE_PIMEM_CFG, SM6350_SLAVE_RBCPR_MX_CFG, SM6350_SLAVE_QUP_0, SM6350_SLAVE_QUP_1, SM6350_SLAVE_CLK_CTL);
+DEFINE_QNODE(xm_qdss_dap, SM6350_MASTER_QDSS_DAP, 1, 8, SM6350_SLAVE_CAMERA_CFG, SM6350_SLAVE_SDCC_2, SM6350_SLAVE_CNOC_MNOC_CFG, SM6350_SLAVE_UFS_MEM_CFG, SM6350_SLAVE_QM_CFG, SM6350_SLAVE_SNOC_CFG, SM6350_SLAVE_QM_MPU_CFG, SM6350_SLAVE_GLM, SM6350_SLAVE_PDM, SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG, SM6350_SLAVE_A2NOC_CFG, SM6350_SLAVE_QDSS_CFG, SM6350_SLAVE_VSENSE_CTRL_CFG, SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG, SM6350_SLAVE_DISPLAY_CFG, SM6350_SLAVE_TCSR, SM6350_SLAVE_DCC_CFG, SM6350_SLAVE_CNOC_DDRSS, SM6350_SLAVE_DISPLAY_THROTTLE_CFG, SM6350_SLAVE_NPU_CFG, SM6350_SLAVE_AHB2PHY, SM6350_SLAVE_GRAPHICS_3D_CFG, SM6350_SLAVE_BOOT_ROM, SM6350_SLAVE_VENUS_CFG, SM6350_SLAVE_IPA_CFG, SM6350_SLAVE_SECURITY, SM6350_SLAVE_IMEM_CFG, SM6350_SLAVE_CNOC_MSS, SM6350_SLAVE_SERVICE_CNOC, SM6350_SLAVE_USB3, SM6350_SLAVE_VENUS_THROTTLE_CFG, SM6350_SLAVE_RBCPR_CX_CFG, SM6350_SLAVE_A1NOC_CFG, SM6350_SLAVE_AOSS, SM6350_SLAVE_PRNG, SM6350_SLAVE_EMMC_CFG, SM6350_SLAVE_CRYPTO_0_CFG, SM6350_SLAVE_PIMEM_CFG, SM6350_SLAVE_RBCPR_MX_CFG, SM6350_SLAVE_QUP_0, SM6350_SLAVE_QUP_1, SM6350_SLAVE_CLK_CTL);
+DEFINE_QNODE(qhm_cnoc_dc_noc, SM6350_MASTER_CNOC_DC_NOC, 1, 4, SM6350_SLAVE_LLCC_CFG, SM6350_SLAVE_GEM_NOC_CFG);
+DEFINE_QNODE(acm_apps, SM6350_MASTER_AMPSS_M0, 1, 16, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(acm_sys_tcu, SM6350_MASTER_SYS_TCU, 1, 8, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qhm_gemnoc_cfg, SM6350_MASTER_GEM_NOC_CFG, 1, 4, SM6350_SLAVE_MCDMA_MS_MPU_CFG, SM6350_SLAVE_SERVICE_GEM_NOC, SM6350_SLAVE_MSS_PROC_MS_MPU_CFG);
+DEFINE_QNODE(qnm_cmpnoc, SM6350_MASTER_COMPUTE_NOC, 1, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_mnoc_hf, SM6350_MASTER_MNOC_HF_MEM_NOC, 1, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_mnoc_sf, SM6350_MASTER_MNOC_SF_MEM_NOC, 1, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SM6350_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM6350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SM6350_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM6350_SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, SM6350_MASTER_GRAPHICS_3D, 2, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(llcc_mc, SM6350_MASTER_LLCC, 2, 4, SM6350_SLAVE_EBI_CH0);
+DEFINE_QNODE(qhm_mnoc_cfg, SM6350_MASTER_CNOC_MNOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qnm_video0, SM6350_MASTER_VIDEO_P0, 1, 32, SM6350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_video_cvp, SM6350_MASTER_VIDEO_PROC, 1, 8, SM6350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf, SM6350_MASTER_CAMNOC_HF, 2, 32, SM6350_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_icp, SM6350_MASTER_CAMNOC_ICP, 1, 8, SM6350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, SM6350_MASTER_CAMNOC_SF, 1, 32, SM6350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SM6350_MASTER_MDP_PORT0, 1, 32, SM6350_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(amm_npu_sys, SM6350_MASTER_NPU_SYS, 2, 32, SM6350_SLAVE_NPU_COMPUTE_NOC);
+DEFINE_QNODE(qhm_npu_cfg, SM6350_MASTER_NPU_NOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_NPU_NOC, SM6350_SLAVE_ISENSE_CFG, SM6350_SLAVE_NPU_LLM_CFG, SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG, SM6350_SLAVE_NPU_CP, SM6350_SLAVE_NPU_TCM, SM6350_SLAVE_NPU_CAL_DP0, SM6350_SLAVE_NPU_DPM);
+DEFINE_QNODE(qhm_snoc_cfg, SM6350_MASTER_SNOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, SM6350_A1NOC_SNOC_MAS, 1, 16, SM6350_SLAVE_SNOC_GEM_NOC_SF, SM6350_SLAVE_PIMEM, SM6350_SLAVE_OCIMEM, SM6350_SLAVE_APPSS, SM6350_SNOC_CNOC_SLV, SM6350_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, SM6350_A2NOC_SNOC_MAS, 1, 16, SM6350_SLAVE_SNOC_GEM_NOC_SF, SM6350_SLAVE_PIMEM, SM6350_SLAVE_OCIMEM, SM6350_SLAVE_APPSS, SM6350_SNOC_CNOC_SLV, SM6350_SLAVE_TCU, SM6350_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_gemnoc, SM6350_MASTER_GEM_NOC_SNOC, 1, 8, SM6350_SLAVE_PIMEM, SM6350_SLAVE_OCIMEM, SM6350_SLAVE_APPSS, SM6350_SNOC_CNOC_SLV, SM6350_SLAVE_TCU, SM6350_SLAVE_QDSS_STM);
+DEFINE_QNODE(qxm_pimem, SM6350_MASTER_PIMEM, 1, 8, SM6350_SLAVE_SNOC_GEM_NOC_GC, SM6350_SLAVE_OCIMEM);
+DEFINE_QNODE(xm_gic, SM6350_MASTER_GIC, 1, 8, SM6350_SLAVE_SNOC_GEM_NOC_GC);
+DEFINE_QNODE(qns_a1noc_snoc, SM6350_A1NOC_SNOC_SLV, 1, 16, SM6350_A1NOC_SNOC_MAS);
+DEFINE_QNODE(srvc_aggre1_noc, SM6350_SLAVE_SERVICE_A1NOC, 1, 4);
+DEFINE_QNODE(qns_a2noc_snoc, SM6350_A2NOC_SNOC_SLV, 1, 16, SM6350_A2NOC_SNOC_MAS);
+DEFINE_QNODE(srvc_aggre2_noc, SM6350_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qns_camnoc_uncomp, SM6350_SLAVE_CAMNOC_UNCOMP, 1, 32);
+DEFINE_QNODE(qup0_core_slave, SM6350_SLAVE_QUP_CORE_0, 1, 4);
+DEFINE_QNODE(qup1_core_slave, SM6350_SLAVE_QUP_CORE_1, 1, 4);
+DEFINE_QNODE(qns_cdsp_gemnoc, SM6350_SLAVE_CDSP_GEM_NOC, 1, 32, SM6350_MASTER_COMPUTE_NOC);
+DEFINE_QNODE(qhs_a1_noc_cfg, SM6350_SLAVE_A1NOC_CFG, 1, 4, SM6350_MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SM6350_SLAVE_A2NOC_CFG, 1, 4, SM6350_MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_ahb2phy0, SM6350_SLAVE_AHB2PHY, 1, 4);
+DEFINE_QNODE(qhs_ahb2phy2, SM6350_SLAVE_AHB2PHY_2, 1, 4);
+DEFINE_QNODE(qhs_aoss, SM6350_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_boot_rom, SM6350_SLAVE_BOOT_ROM, 1, 4);
+DEFINE_QNODE(qhs_camera_cfg, SM6350_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_camera_nrt_thrott_cfg, SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_camera_rt_throttle_cfg, SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SM6350_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SM6350_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mx, SM6350_SLAVE_RBCPR_MX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SM6350_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_dcc_cfg, SM6350_SLAVE_DCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SM6350_SLAVE_CNOC_DDRSS, 1, 4, SM6350_MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_display_cfg, SM6350_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_display_throttle_cfg, SM6350_SLAVE_DISPLAY_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_emmc_cfg, SM6350_SLAVE_EMMC_CFG, 1, 4);
+DEFINE_QNODE(qhs_glm, SM6350_SLAVE_GLM, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SM6350_SLAVE_GRAPHICS_3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_imem_cfg, SM6350_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SM6350_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mnoc_cfg, SM6350_SLAVE_CNOC_MNOC_CFG, 1, 4, SM6350_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_mss_cfg, SM6350_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_npu_cfg, SM6350_SLAVE_NPU_CFG, 1, 4, SM6350_MASTER_NPU_NOC_CFG);
+DEFINE_QNODE(qhs_pdm, SM6350_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SM6350_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_prng, SM6350_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SM6350_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qm_cfg, SM6350_SLAVE_QM_CFG, 1, 4);
+DEFINE_QNODE(qhs_qm_mpu_cfg, SM6350_SLAVE_QM_MPU_CFG, 1, 4);
+DEFINE_QNODE(qhs_qup0, SM6350_SLAVE_QUP_0, 1, 4);
+DEFINE_QNODE(qhs_qup1, SM6350_SLAVE_QUP_1, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SM6350_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_security, SM6350_SLAVE_SECURITY, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SM6350_SLAVE_SNOC_CFG, 1, 4, SM6350_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_tcsr, SM6350_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SM6350_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3_0, SM6350_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SM6350_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_venus_throttle_cfg, SM6350_SLAVE_VENUS_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM6350_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(srvc_cnoc, SM6350_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(qhs_gemnoc, SM6350_SLAVE_GEM_NOC_CFG, 1, 4, SM6350_MASTER_GEM_NOC_CFG);
+DEFINE_QNODE(qhs_llcc, SM6350_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_mcdma_ms_mpu_cfg, SM6350_SLAVE_MCDMA_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SM6350_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qns_gem_noc_snoc, SM6350_SLAVE_GEM_NOC_SNOC, 1, 8, SM6350_MASTER_GEM_NOC_SNOC);
+DEFINE_QNODE(qns_llcc, SM6350_SLAVE_LLCC, 1, 16, SM6350_MASTER_LLCC);
+DEFINE_QNODE(srvc_gemnoc, SM6350_SLAVE_SERVICE_GEM_NOC, 1, 4);
+DEFINE_QNODE(ebi, SM6350_SLAVE_EBI_CH0, 2, 4);
+DEFINE_QNODE(qns_mem_noc_hf, SM6350_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SM6350_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_sf, SM6350_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SM6350_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SM6350_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qhs_cal_dp0, SM6350_SLAVE_NPU_CAL_DP0, 1, 4);
+DEFINE_QNODE(qhs_cp, SM6350_SLAVE_NPU_CP, 1, 4);
+DEFINE_QNODE(qhs_dma_bwmon, SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG, 1, 4);
+DEFINE_QNODE(qhs_dpm, SM6350_SLAVE_NPU_DPM, 1, 4);
+DEFINE_QNODE(qhs_isense, SM6350_SLAVE_ISENSE_CFG, 1, 4);
+DEFINE_QNODE(qhs_llm, SM6350_SLAVE_NPU_LLM_CFG, 1, 4);
+DEFINE_QNODE(qhs_tcm, SM6350_SLAVE_NPU_TCM, 1, 4);
+DEFINE_QNODE(qns_npu_sys, SM6350_SLAVE_NPU_COMPUTE_NOC, 2, 32);
+DEFINE_QNODE(srvc_noc, SM6350_SLAVE_SERVICE_NPU_NOC, 1, 4);
+DEFINE_QNODE(qhs_apss, SM6350_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qns_cnoc, SM6350_SNOC_CNOC_SLV, 1, 8, SM6350_SNOC_CNOC_MAS);
+DEFINE_QNODE(qns_gemnoc_gc, SM6350_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM6350_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_gemnoc_sf, SM6350_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM6350_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SM6350_SLAVE_OCIMEM, 1, 8);
+DEFINE_QNODE(qxs_pimem, SM6350_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SM6350_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_qdss_stm, SM6350_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SM6350_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_thrott_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
+DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_emmc, &xm_sdc2, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_sdc2);
+DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
+DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_icp_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf, &qxm_mdp0);
+DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+DEFINE_QBCM(bcm_mm3, "MM3", false, &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_gemnoc);
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_QUP_0] = &qhm_qup_0,
+ [MASTER_EMMC] = &xm_emmc,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [A1NOC_SNOC_SLV] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static const struct qcom_icc_desc sm6350_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_1] = &qhm_qup_1,
+ [MASTER_CRYPTO_CORE_0] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_USB3] = &xm_usb3_0,
+ [A2NOC_SNOC_SLV] = &qns_a2noc_snoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static const struct qcom_icc_desc sm6350_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_mm1,
+ &bcm_qup0,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_ICP_UNCOMP] = &qxm_camnoc_icp_uncomp,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_EBI_CH0] = &ebi,
+};
+
+static const struct qcom_icc_desc sm6350_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
+ &bcm_co0,
+ &bcm_co2,
+ &bcm_co3,
+};
+
+static struct qcom_icc_node * const compute_noc_nodes[] = {
+ [MASTER_NPU] = &qnm_npu,
+ [MASTER_NPU_PROC] = &qxm_npu_dsp,
+ [SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
+};
+
+static const struct qcom_icc_desc sm6350_compute_noc = {
+ .nodes = compute_noc_nodes,
+ .num_nodes = ARRAY_SIZE(compute_noc_nodes),
+ .bcms = compute_noc_bcms,
+ .num_bcms = ARRAY_SIZE(compute_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [SNOC_CNOC_MAS] = &qnm_snoc,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AHB2PHY] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_BOOT_ROM] = &qhs_boot_rom,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_thrott_cfg,
+ [SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
+ [SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_NPU_CFG] = &qhs_npu_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QM_CFG] = &qhs_qm_cfg,
+ [SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SECURITY] = &qhs_security,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static const struct qcom_icc_desc sm6350_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
+ [SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+};
+
+static const struct qcom_icc_desc sm6350_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh4,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_AMPSS_M0] = &acm_apps,
+ [MASTER_SYS_TCU] = &acm_sys_tcu,
+ [MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GRAPHICS_3D] = &qxm_gpu,
+ [SLAVE_MCDMA_MS_MPU_CFG] = &qhs_mcdma_ms_mpu_cfg,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
+};
+
+static const struct qcom_icc_desc sm6350_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+ &bcm_mm3,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_VIDEO_P0] = &qnm_video0,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_CAMNOC_HF] = &qxm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qxm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP_PORT0] = &qxm_mdp0,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sm6350_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const npu_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const npu_noc_nodes[] = {
+ [MASTER_NPU_SYS] = &amm_npu_sys,
+ [MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
+ [SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
+ [SLAVE_NPU_CP] = &qhs_cp,
+ [SLAVE_NPU_INT_DMA_BWMON_CFG] = &qhs_dma_bwmon,
+ [SLAVE_NPU_DPM] = &qhs_dpm,
+ [SLAVE_ISENSE_CFG] = &qhs_isense,
+ [SLAVE_NPU_LLM_CFG] = &qhs_llm,
+ [SLAVE_NPU_TCM] = &qhs_tcm,
+ [SLAVE_NPU_COMPUTE_NOC] = &qns_npu_sys,
+ [SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
+};
+
+static const struct qcom_icc_desc sm6350_npu_noc = {
+ .nodes = npu_noc_nodes,
+ .num_nodes = ARRAY_SIZE(npu_noc_nodes),
+ .bcms = npu_noc_bcms,
+ .num_bcms = ARRAY_SIZE(npu_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn10,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
+ [A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
+ [MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SNOC_CNOC_SLV] = &qns_cnoc,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_OCIMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sm6350_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm6350-aggre1-noc",
+ .data = &sm6350_aggre1_noc},
+ { .compatible = "qcom,sm6350-aggre2-noc",
+ .data = &sm6350_aggre2_noc},
+ { .compatible = "qcom,sm6350-clk-virt",
+ .data = &sm6350_clk_virt},
+ { .compatible = "qcom,sm6350-compute-noc",
+ .data = &sm6350_compute_noc},
+ { .compatible = "qcom,sm6350-config-noc",
+ .data = &sm6350_config_noc},
+ { .compatible = "qcom,sm6350-dc-noc",
+ .data = &sm6350_dc_noc},
+ { .compatible = "qcom,sm6350-gem-noc",
+ .data = &sm6350_gem_noc},
+ { .compatible = "qcom,sm6350-mmss-noc",
+ .data = &sm6350_mmss_noc},
+ { .compatible = "qcom,sm6350-npu-noc",
+ .data = &sm6350_npu_noc},
+ { .compatible = "qcom,sm6350-system-noc",
+ .data = &sm6350_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sm6350",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SM6350 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sm6350.h b/drivers/interconnect/qcom/sm6350.h
new file mode 100644
index 000000000000..43cf2930c88a
--- /dev/null
+++ b/drivers/interconnect/qcom/sm6350.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm #define SM6350 interconnect IDs
+ *
+ * Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM6350_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM6350_H
+
+#define SM6350_A1NOC_SNOC_MAS 0
+#define SM6350_A1NOC_SNOC_SLV 1
+#define SM6350_A2NOC_SNOC_MAS 2
+#define SM6350_A2NOC_SNOC_SLV 3
+#define SM6350_MASTER_A1NOC_CFG 4
+#define SM6350_MASTER_A2NOC_CFG 5
+#define SM6350_MASTER_AMPSS_M0 6
+#define SM6350_MASTER_CAMNOC_HF 7
+#define SM6350_MASTER_CAMNOC_HF0_UNCOMP 8
+#define SM6350_MASTER_CAMNOC_ICP 9
+#define SM6350_MASTER_CAMNOC_ICP_UNCOMP 10
+#define SM6350_MASTER_CAMNOC_SF 11
+#define SM6350_MASTER_CAMNOC_SF_UNCOMP 12
+#define SM6350_MASTER_CNOC_DC_NOC 13
+#define SM6350_MASTER_CNOC_MNOC_CFG 14
+#define SM6350_MASTER_COMPUTE_NOC 15
+#define SM6350_MASTER_CRYPTO_CORE_0 16
+#define SM6350_MASTER_EMMC 17
+#define SM6350_MASTER_GEM_NOC_CFG 18
+#define SM6350_MASTER_GEM_NOC_SNOC 19
+#define SM6350_MASTER_GIC 20
+#define SM6350_MASTER_GRAPHICS_3D 21
+#define SM6350_MASTER_IPA 22
+#define SM6350_MASTER_LLCC 23
+#define SM6350_MASTER_MDP_PORT0 24
+#define SM6350_MASTER_MNOC_HF_MEM_NOC 25
+#define SM6350_MASTER_MNOC_SF_MEM_NOC 26
+#define SM6350_MASTER_NPU 27
+#define SM6350_MASTER_NPU_NOC_CFG 28
+#define SM6350_MASTER_NPU_PROC 29
+#define SM6350_MASTER_NPU_SYS 30
+#define SM6350_MASTER_PIMEM 31
+#define SM6350_MASTER_QDSS_BAM 32
+#define SM6350_MASTER_QDSS_DAP 33
+#define SM6350_MASTER_QDSS_ETR 34
+#define SM6350_MASTER_QUP_0 35
+#define SM6350_MASTER_QUP_1 36
+#define SM6350_MASTER_QUP_CORE_0 37
+#define SM6350_MASTER_QUP_CORE_1 38
+#define SM6350_MASTER_SDCC_2 39
+#define SM6350_MASTER_SNOC_CFG 40
+#define SM6350_MASTER_SNOC_GC_MEM_NOC 41
+#define SM6350_MASTER_SNOC_SF_MEM_NOC 42
+#define SM6350_MASTER_SYS_TCU 43
+#define SM6350_MASTER_UFS_MEM 44
+#define SM6350_MASTER_USB3 45
+#define SM6350_MASTER_VIDEO_P0 46
+#define SM6350_MASTER_VIDEO_PROC 47
+#define SM6350_SLAVE_A1NOC_CFG 48
+#define SM6350_SLAVE_A2NOC_CFG 49
+#define SM6350_SLAVE_AHB2PHY 50
+#define SM6350_SLAVE_AHB2PHY_2 51
+#define SM6350_SLAVE_AOSS 52
+#define SM6350_SLAVE_APPSS 53
+#define SM6350_SLAVE_BOOT_ROM 54
+#define SM6350_SLAVE_CAMERA_CFG 55
+#define SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG 56
+#define SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG 57
+#define SM6350_SLAVE_CAMNOC_UNCOMP 58
+#define SM6350_SLAVE_CDSP_GEM_NOC 59
+#define SM6350_SLAVE_CLK_CTL 60
+#define SM6350_SLAVE_CNOC_DDRSS 61
+#define SM6350_SLAVE_CNOC_MNOC_CFG 62
+#define SM6350_SLAVE_CNOC_MSS 63
+#define SM6350_SLAVE_CRYPTO_0_CFG 64
+#define SM6350_SLAVE_DCC_CFG 65
+#define SM6350_SLAVE_DISPLAY_CFG 66
+#define SM6350_SLAVE_DISPLAY_THROTTLE_CFG 67
+#define SM6350_SLAVE_EBI_CH0 68
+#define SM6350_SLAVE_EMMC_CFG 69
+#define SM6350_SLAVE_GEM_NOC_CFG 70
+#define SM6350_SLAVE_GEM_NOC_SNOC 71
+#define SM6350_SLAVE_GLM 72
+#define SM6350_SLAVE_GRAPHICS_3D_CFG 73
+#define SM6350_SLAVE_IMEM_CFG 74
+#define SM6350_SLAVE_IPA_CFG 75
+#define SM6350_SLAVE_ISENSE_CFG 76
+#define SM6350_SLAVE_LLCC 77
+#define SM6350_SLAVE_LLCC_CFG 78
+#define SM6350_SLAVE_MCDMA_MS_MPU_CFG 79
+#define SM6350_SLAVE_MNOC_HF_MEM_NOC 80
+#define SM6350_SLAVE_MNOC_SF_MEM_NOC 81
+#define SM6350_SLAVE_MSS_PROC_MS_MPU_CFG 82
+#define SM6350_SLAVE_NPU_CAL_DP0 83
+#define SM6350_SLAVE_NPU_CFG 84
+#define SM6350_SLAVE_NPU_COMPUTE_NOC 85
+#define SM6350_SLAVE_NPU_CP 86
+#define SM6350_SLAVE_NPU_DPM 87
+#define SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG 88
+#define SM6350_SLAVE_NPU_LLM_CFG 89
+#define SM6350_SLAVE_NPU_TCM 90
+#define SM6350_SLAVE_OCIMEM 91
+#define SM6350_SLAVE_PDM 92
+#define SM6350_SLAVE_PIMEM 93
+#define SM6350_SLAVE_PIMEM_CFG 94
+#define SM6350_SLAVE_PRNG 95
+#define SM6350_SLAVE_QDSS_CFG 96
+#define SM6350_SLAVE_QDSS_STM 97
+#define SM6350_SLAVE_QM_CFG 98
+#define SM6350_SLAVE_QM_MPU_CFG 99
+#define SM6350_SLAVE_QUP_0 100
+#define SM6350_SLAVE_QUP_1 101
+#define SM6350_SLAVE_QUP_CORE_0 102
+#define SM6350_SLAVE_QUP_CORE_1 103
+#define SM6350_SLAVE_RBCPR_CX_CFG 104
+#define SM6350_SLAVE_RBCPR_MX_CFG 105
+#define SM6350_SLAVE_SDCC_2 106
+#define SM6350_SLAVE_SECURITY 107
+#define SM6350_SLAVE_SERVICE_A1NOC 108
+#define SM6350_SLAVE_SERVICE_A2NOC 109
+#define SM6350_SLAVE_SERVICE_CNOC 110
+#define SM6350_SLAVE_SERVICE_GEM_NOC 111
+#define SM6350_SLAVE_SERVICE_MNOC 112
+#define SM6350_SLAVE_SERVICE_NPU_NOC 113
+#define SM6350_SLAVE_SERVICE_SNOC 114
+#define SM6350_SLAVE_SNOC_CFG 115
+#define SM6350_SLAVE_SNOC_GEM_NOC_GC 116
+#define SM6350_SLAVE_SNOC_GEM_NOC_SF 117
+#define SM6350_SLAVE_TCSR 118
+#define SM6350_SLAVE_TCU 119
+#define SM6350_SLAVE_UFS_MEM_CFG 120
+#define SM6350_SLAVE_USB3 121
+#define SM6350_SLAVE_VENUS_CFG 122
+#define SM6350_SLAVE_VENUS_THROTTLE_CFG 123
+#define SM6350_SLAVE_VSENSE_CTRL_CFG 124
+#define SM6350_SNOC_CNOC_MAS 125
+#define SM6350_SNOC_CNOC_SLV 126
+
+#endif
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index 7e3d372b712f..e821fd0b2f66 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -12,6 +12,7 @@
#include <dt-bindings/interconnect/qcom,sm8450.h>
#include "bcm-voter.h"
+#include "icc-common.h"
#include "icc-rpmh.h"
#include "sm8450.h"
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c79a0df090c0..5c5cb5bee8b6 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -363,6 +363,16 @@ config ARM_SMMU_QCOM
When running on a Qualcomm platform that has the custom variant
of the ARM SMMU, this needs to be built into the SMMU driver.
+config ARM_SMMU_QCOM_DEBUG
+ bool "ARM SMMU QCOM implementation defined debug support"
+ depends on ARM_SMMU_QCOM
+ help
+ Support for implementation specific debug features in ARM SMMU
+ hardware found in QTI platforms.
+
+ Say Y here to enable debug for issues such as TLB sync timeouts
+ which requires implementation defined register dumps.
+
config ARM_SMMU_V3
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 1ab31074f5b3..84e5bb1bf01b 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -13,12 +13,13 @@
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
-extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern int amd_iommu_init_api(void);
+extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
@@ -114,10 +115,17 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
amd_iommu_domain_set_pt_root(domain, 0);
}
+static inline int get_pci_sbdf_id(struct pci_dev *pdev)
+{
+ int seg = pci_domain_nr(pdev->bus);
+ u16 devid = pci_dev_id(pdev);
+
+ return PCI_SEG_DEVID_TO_SBDF(seg, devid);
+}
extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct device *dev);
-extern int __init add_special_device(u8 type, u8 id, u16 *devid,
+extern int __init add_special_device(u8 type, u8 id, u32 *devid,
bool cmd_line);
#ifdef CONFIG_DMI
@@ -128,4 +136,10 @@ static inline void amd_iommu_apply_ivrs_quirks(void) { }
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
+extern struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
+
+extern u64 amd_iommu_efr;
+extern u64 amd_iommu_efr2;
+
+extern bool amd_iommu_snp_en;
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 72d0f5e2f651..5b1019dab328 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -67,6 +67,7 @@
#define MMIO_INTCAPXT_EVT_OFFSET 0x0170
#define MMIO_INTCAPXT_PPR_OFFSET 0x0178
#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180
+#define MMIO_EXT_FEATURES2 0x01A0
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
@@ -102,6 +103,12 @@
#define FEATURE_GLXVAL_SHIFT 14
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
+/* Extended Feature 2 Bits */
+#define FEATURE_SNPAVICSUP_SHIFT 5
+#define FEATURE_SNPAVICSUP_MASK (0x07ULL << FEATURE_SNPAVICSUP_SHIFT)
+#define FEATURE_SNPAVICSUP_GAM(x) \
+ ((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1)
+
/* Note:
* The current driver only support 16-bit PASID.
* Currently, hardware only implement upto 16-bit PASID
@@ -143,27 +150,28 @@
#define EVENT_FLAG_I 0x008
/* feature control bits */
-#define CONTROL_IOMMU_EN 0x00ULL
-#define CONTROL_HT_TUN_EN 0x01ULL
-#define CONTROL_EVT_LOG_EN 0x02ULL
-#define CONTROL_EVT_INT_EN 0x03ULL
-#define CONTROL_COMWAIT_EN 0x04ULL
-#define CONTROL_INV_TIMEOUT 0x05ULL
-#define CONTROL_PASSPW_EN 0x08ULL
-#define CONTROL_RESPASSPW_EN 0x09ULL
-#define CONTROL_COHERENT_EN 0x0aULL
-#define CONTROL_ISOC_EN 0x0bULL
-#define CONTROL_CMDBUF_EN 0x0cULL
-#define CONTROL_PPRLOG_EN 0x0dULL
-#define CONTROL_PPRINT_EN 0x0eULL
-#define CONTROL_PPR_EN 0x0fULL
-#define CONTROL_GT_EN 0x10ULL
-#define CONTROL_GA_EN 0x11ULL
-#define CONTROL_GAM_EN 0x19ULL
-#define CONTROL_GALOG_EN 0x1CULL
-#define CONTROL_GAINT_EN 0x1DULL
-#define CONTROL_XT_EN 0x32ULL
-#define CONTROL_INTCAPXT_EN 0x33ULL
+#define CONTROL_IOMMU_EN 0
+#define CONTROL_HT_TUN_EN 1
+#define CONTROL_EVT_LOG_EN 2
+#define CONTROL_EVT_INT_EN 3
+#define CONTROL_COMWAIT_EN 4
+#define CONTROL_INV_TIMEOUT 5
+#define CONTROL_PASSPW_EN 8
+#define CONTROL_RESPASSPW_EN 9
+#define CONTROL_COHERENT_EN 10
+#define CONTROL_ISOC_EN 11
+#define CONTROL_CMDBUF_EN 12
+#define CONTROL_PPRLOG_EN 13
+#define CONTROL_PPRINT_EN 14
+#define CONTROL_PPR_EN 15
+#define CONTROL_GT_EN 16
+#define CONTROL_GA_EN 17
+#define CONTROL_GAM_EN 25
+#define CONTROL_GALOG_EN 28
+#define CONTROL_GAINT_EN 29
+#define CONTROL_XT_EN 50
+#define CONTROL_INTCAPXT_EN 51
+#define CONTROL_SNPAVIC_EN 61
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0
@@ -445,8 +453,6 @@ struct irq_remap_table {
u32 *table;
};
-extern struct irq_remap_table **irq_lookup_table;
-
/* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap;
@@ -456,6 +462,16 @@ extern bool amdr_ivrs_remap_support;
/* kmem_cache to get tables with 128 byte alignement */
extern struct kmem_cache *amd_iommu_irq_cache;
+#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff)
+#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff)
+#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \
+ ((devid) & 0xffff))
+
+/* Make iterating over all pci segment easier */
+#define for_each_pci_segment(pci_seg) \
+ list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list)
+#define for_each_pci_segment_safe(pci_seg, next) \
+ list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list)
/*
* Make iterating over all IOMMUs easier
*/
@@ -478,13 +494,14 @@ extern struct kmem_cache *amd_iommu_irq_cache;
struct amd_iommu_fault {
u64 address; /* IO virtual address of the fault*/
u32 pasid; /* Address space identifier */
- u16 device_id; /* Originating PCI device id */
+ u32 sbdf; /* Originating PCI device id */
u16 tag; /* PPR tag */
u16 flags; /* Fault flags */
};
+struct amd_iommu;
struct iommu_domain;
struct irq_domain;
struct amd_irte_ops;
@@ -531,6 +548,75 @@ struct protection_domain {
};
/*
+ * This structure contains information about one PCI segment in the system.
+ */
+struct amd_iommu_pci_seg {
+ /* List with all PCI segments in the system */
+ struct list_head list;
+
+ /* List of all available dev_data structures */
+ struct llist_head dev_data_list;
+
+ /* PCI segment number */
+ u16 id;
+
+ /* Largest PCI device id we expect translation requests for */
+ u16 last_bdf;
+
+ /* Size of the device table */
+ u32 dev_table_size;
+
+ /* Size of the alias table */
+ u32 alias_table_size;
+
+ /* Size of the rlookup table */
+ u32 rlookup_table_size;
+
+ /*
+ * device table virtual address
+ *
+ * Pointer to the per PCI segment device table.
+ * It is indexed by the PCI device id or the HT unit id and contains
+ * information about the domain the device belongs to as well as the
+ * page table root pointer.
+ */
+ struct dev_table_entry *dev_table;
+
+ /*
+ * The rlookup iommu table is used to find the IOMMU which is
+ * responsible for a specific device. It is indexed by the PCI
+ * device id.
+ */
+ struct amd_iommu **rlookup_table;
+
+ /*
+ * This table is used to find the irq remapping table for a given
+ * device id quickly.
+ */
+ struct irq_remap_table **irq_lookup_table;
+
+ /*
+ * Pointer to a device table which the content of old device table
+ * will be copied to. It's only be used in kdump kernel.
+ */
+ struct dev_table_entry *old_dev_tbl_cpy;
+
+ /*
+ * The alias table is a driver specific data structure which contains the
+ * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
+ * More than one device can share the same requestor id.
+ */
+ u16 *alias_table;
+
+ /*
+ * A list of required unity mappings we find in ACPI. It is not locked
+ * because as runtime it is only read. It is created at ACPI table
+ * parsing time.
+ */
+ struct list_head unity_map;
+};
+
+/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
*/
@@ -567,6 +653,9 @@ struct amd_iommu {
/* Extended features */
u64 features;
+ /* Extended features 2 */
+ u64 features2;
+
/* IOMMUv2 */
bool is_iommu_v2;
@@ -581,7 +670,7 @@ struct amd_iommu {
u16 cap_ptr;
/* pci domain of this IOMMU */
- u16 pci_seg;
+ struct amd_iommu_pci_seg *pci_seg;
/* start of exclusion range of that IOMMU */
u64 exclusion_start;
@@ -666,8 +755,8 @@ struct acpihid_map_entry {
struct list_head list;
u8 uid[ACPIHID_UID_LEN];
u8 hid[ACPIHID_HID_LEN];
- u16 devid;
- u16 root_devid;
+ u32 devid;
+ u32 root_devid;
bool cmd_line;
struct iommu_group *group;
};
@@ -675,7 +764,7 @@ struct acpihid_map_entry {
struct devid_map {
struct list_head list;
u8 id;
- u16 devid;
+ u32 devid;
bool cmd_line;
};
@@ -689,7 +778,7 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
- struct pci_dev *pdev;
+ struct device *dev;
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
struct {
@@ -710,6 +799,12 @@ extern struct list_head hpet_map;
extern struct list_head acpihid_map;
/*
+ * List with all PCI segments in the system. This list is not locked because
+ * it is only written at driver initialization time
+ */
+extern struct list_head amd_iommu_pci_seg_list;
+
+/*
* List with all IOMMUs in the system. This list is not locked because it is
* only written and read at driver initialization or suspend time
*/
@@ -749,38 +844,12 @@ struct unity_map_entry {
};
/*
- * List of all unity mappings. It is not locked because as runtime it is only
- * read. It is created at ACPI table parsing time.
- */
-extern struct list_head amd_iommu_unity_map;
-
-/*
* Data structures for device handling
*/
-/*
- * Device table used by hardware. Read and write accesses by software are
- * locked with the amd_iommu_pd_table lock.
- */
-extern struct dev_table_entry *amd_iommu_dev_table;
-
-/*
- * Alias table to find requestor ids to device ids. Not locked because only
- * read on runtime.
- */
-extern u16 *amd_iommu_alias_table;
-
-/*
- * Reverse lookup table to find the IOMMU which translates a specific device.
- */
-extern struct amd_iommu **amd_iommu_rlookup_table;
-
/* size of the dma_ops aperture as power of 2 */
extern unsigned amd_iommu_aperture_order;
-/* largest PCI device id we expect translation requests for */
-extern u16 amd_iommu_last_bdf;
-
/* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap;
@@ -913,6 +982,7 @@ struct irq_2_irte {
struct amd_ir_data {
u32 cached_ga_tag;
+ struct amd_iommu *iommu;
struct irq_2_irte irq_2_irte;
struct msi_msg msi_entry;
void *entry; /* Pointer to union irte or struct irte_ga */
@@ -930,9 +1000,9 @@ struct amd_ir_data {
struct amd_irte_ops {
void (*prepare)(void *, u32, bool, u8, u32, int);
- void (*activate)(void *, u16, u16);
- void (*deactivate)(void *, u16, u16);
- void (*set_affinity)(void *, u16, u16, u8, u32);
+ void (*activate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*deactivate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*set_affinity)(struct amd_iommu *iommu, void *, u16, u16, u8, u32);
void *(*get)(struct irq_remap_table *, int);
void (*set_allocated)(struct irq_remap_table *, int);
bool (*is_allocated)(struct irq_remap_table *, int);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 1d08f87e734b..fdc642362c14 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -84,6 +84,10 @@
#define ACPI_DEVFLAG_ATSDIS 0x10000000
#define LOOP_TIMEOUT 2000000
+
+#define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
+ | ((dev & 0x1f) << 3) | (fn & 0x7))
+
/*
* ACPI table definitions
*
@@ -110,7 +114,7 @@ struct ivhd_header {
/* Following only valid on IVHD type 11h and 40h */
u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
- u64 res;
+ u64 efr_reg2;
} __attribute__((packed));
/*
@@ -141,7 +145,8 @@ struct ivmd_header {
u16 length;
u16 devid;
u16 aux;
- u64 resv;
+ u16 pci_seg;
+ u8 resv[6];
u64 range_start;
u64 range_length;
} __attribute__((packed));
@@ -159,11 +164,15 @@ static bool amd_iommu_disabled __initdata;
static bool amd_iommu_force_enable __initdata;
static int amd_iommu_target_ivhd_type;
-u16 amd_iommu_last_bdf; /* largest PCI device id we have
- to handle */
-LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
- we find in ACPI */
+/* Global EFR and EFR2 registers */
+u64 amd_iommu_efr;
+u64 amd_iommu_efr2;
+/* SNP is enabled on the system? */
+bool amd_iommu_snp_en;
+EXPORT_SYMBOL(amd_iommu_snp_en);
+
+LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@@ -186,47 +195,11 @@ bool amdr_ivrs_remap_support __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
/*
- * Pointer to the device table which is shared by all AMD IOMMUs
- * it is indexed by the PCI device id or the HT unit id and contains
- * information about the domain the device belongs to as well as the
- * page table root pointer.
- */
-struct dev_table_entry *amd_iommu_dev_table;
-/*
- * Pointer to a device table which the content of old device table
- * will be copied to. It's only be used in kdump kernel.
- */
-static struct dev_table_entry *old_dev_tbl_cpy;
-
-/*
- * The alias table is a driver specific data structure which contains the
- * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
- * More than one device can share the same requestor id.
- */
-u16 *amd_iommu_alias_table;
-
-/*
- * The rlookup table is used to find the IOMMU which is responsible
- * for a specific device. It is also indexed by the PCI device id.
- */
-struct amd_iommu **amd_iommu_rlookup_table;
-
-/*
- * This table is used to find the irq remapping table for a given device id
- * quickly.
- */
-struct irq_remap_table **irq_lookup_table;
-
-/*
* AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
unsigned long *amd_iommu_pd_alloc_bitmap;
-static u32 dev_table_size; /* size of the device table */
-static u32 alias_table_size; /* size of the alias table */
-static u32 rlookup_table_size; /* size if the rlookup table */
-
enum iommu_init_state {
IOMMU_START_STATE,
IOMMU_IVRS_DETECTED,
@@ -256,7 +229,7 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
-static void init_device_table_dma(void);
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
static bool amd_iommu_pre_enabled = true;
@@ -281,16 +254,10 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
-static inline void update_last_devid(u16 devid)
-{
- if (devid > amd_iommu_last_bdf)
- amd_iommu_last_bdf = devid;
-}
-
-static inline unsigned long tbl_size(int entry_size)
+static inline unsigned long tbl_size(int entry_size, int last_bdf)
{
unsigned shift = PAGE_SHIFT +
- get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
+ get_order((last_bdf + 1) * entry_size);
return 1UL << shift;
}
@@ -300,21 +267,46 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
-#ifdef CONFIG_IRQ_REMAP
-static bool check_feature_on_all_iommus(u64 mask)
+/*
+ * Iterate through all the IOMMUs to get common EFR
+ * masks among all IOMMUs and warn if found inconsistency.
+ */
+static void get_global_efr(void)
{
- bool ret = false;
struct amd_iommu *iommu;
for_each_iommu(iommu) {
- ret = iommu_feature(iommu, mask);
- if (!ret)
- return false;
+ u64 tmp = iommu->features;
+ u64 tmp2 = iommu->features2;
+
+ if (list_is_first(&iommu->list, &amd_iommu_list)) {
+ amd_iommu_efr = tmp;
+ amd_iommu_efr2 = tmp2;
+ continue;
+ }
+
+ if (amd_iommu_efr == tmp &&
+ amd_iommu_efr2 == tmp2)
+ continue;
+
+ pr_err(FW_BUG
+ "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
+ tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
+ iommu->index, iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
+ PCI_FUNC(iommu->devid));
+
+ amd_iommu_efr &= tmp;
+ amd_iommu_efr2 &= tmp2;
}
- return true;
+ pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
+}
+
+static bool check_feature_on_all_iommus(u64 mask)
+{
+ return !!(amd_iommu_efr & mask);
}
-#endif
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
@@ -324,8 +316,10 @@ static bool check_feature_on_all_iommus(u64 mask)
static void __init early_iommu_features_init(struct amd_iommu *iommu,
struct ivhd_header *h)
{
- if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
iommu->features = h->efr_reg;
+ iommu->features2 = h->efr_reg2;
+ }
if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
amdr_ivrs_remap_support = true;
}
@@ -399,7 +393,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
u64 entry = start & PM_ADDR_MASK;
- if (!iommu_feature(iommu, FEATURE_SNP))
+ if (!check_feature_on_all_iommus(FEATURE_SNP))
return;
/* Note:
@@ -421,10 +415,12 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
static void iommu_set_device_table(struct amd_iommu *iommu)
{
u64 entry;
+ u32 dev_table_size = iommu->pci_seg->dev_table_size;
+ void *dev_table = (void *)get_dev_table(iommu);
BUG_ON(iommu->mmio_base == NULL);
- entry = iommu_virt_to_phys(amd_iommu_dev_table);
+ entry = iommu_virt_to_phys(dev_table);
entry |= (dev_table_size >> 12) - 1;
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
&entry, sizeof(entry));
@@ -557,6 +553,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
{
u8 *p = (void *)h, *end = (void *)h;
struct ivhd_entry *dev;
+ int last_devid = -EINVAL;
u32 ivhd_size = get_ivhd_header_size(h);
@@ -573,14 +570,14 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
switch (dev->type) {
case IVHD_DEV_ALL:
/* Use maximum BDF value for DEV_ALL */
- update_last_devid(0xffff);
- break;
+ return 0xffff;
case IVHD_DEV_SELECT:
case IVHD_DEV_RANGE_END:
case IVHD_DEV_ALIAS:
case IVHD_DEV_EXT_SELECT:
/* all the above subfield types refer to device ids */
- update_last_devid(dev->devid);
+ if (dev->devid > last_devid)
+ last_devid = dev->devid;
break;
default:
break;
@@ -590,7 +587,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
WARN_ON(p != end);
- return 0;
+ return last_devid;
}
static int __init check_ivrs_checksum(struct acpi_table_header *table)
@@ -614,38 +611,125 @@ static int __init check_ivrs_checksum(struct acpi_table_header *table)
* id which we need to handle. This is the first of three functions which parse
* the ACPI table. So we check the checksum here.
*/
-static int __init find_last_devid_acpi(struct acpi_table_header *table)
+static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
{
u8 *p = (u8 *)table, *end = (u8 *)table;
struct ivhd_header *h;
+ int last_devid, last_bdf = 0;
p += IVRS_HEADER_LENGTH;
end += table->length;
while (p < end) {
h = (struct ivhd_header *)p;
- if (h->type == amd_iommu_target_ivhd_type) {
- int ret = find_last_devid_from_ivhd(h);
-
- if (ret)
- return ret;
+ if (h->pci_seg == pci_seg &&
+ h->type == amd_iommu_target_ivhd_type) {
+ last_devid = find_last_devid_from_ivhd(h);
+
+ if (last_devid < 0)
+ return -EINVAL;
+ if (last_devid > last_bdf)
+ last_bdf = last_devid;
}
p += h->length;
}
WARN_ON(p != end);
- return 0;
+ return last_bdf;
}
/****************************************************************************
*
* The following functions belong to the code path which parses the ACPI table
* the second time. In this ACPI parsing iteration we allocate IOMMU specific
- * data structures, initialize the device/alias/rlookup table and also
- * basically initialize the hardware.
+ * data structures, initialize the per PCI segment device/alias/rlookup table
+ * and also basically initialize the hardware.
*
****************************************************************************/
+/* Allocate per PCI segment device table */
+static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
+ get_order(pci_seg->dev_table_size));
+ if (!pci_seg->dev_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = NULL;
+}
+
+/* Allocate per PCI segment IOMMU rlookup table. */
+static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->rlookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ if (pci_seg->rlookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->rlookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = NULL;
+}
+
+static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->irq_lookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ kmemleak_alloc(pci_seg->irq_lookup_table,
+ pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ if (pci_seg->irq_lookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ kmemleak_free(pci_seg->irq_lookup_table);
+ free_pages((unsigned long)pci_seg->irq_lookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->irq_lookup_table = NULL;
+}
+
+static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ int i;
+
+ pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(pci_seg->alias_table_size));
+ if (!pci_seg->alias_table)
+ return -ENOMEM;
+
+ /*
+ * let all alias entries point to itself
+ */
+ for (i = 0; i <= pci_seg->last_bdf; ++i)
+ pci_seg->alias_table[i] = i;
+
+ return 0;
+}
+
+static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->alias_table,
+ get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = NULL;
+}
+
/*
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
* write commands to that buffer later and the IOMMU will execute them
@@ -724,7 +808,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
void *buf = (void *)__get_free_pages(gfp, order);
if (buf &&
- iommu_feature(iommu, FEATURE_SNP) &&
+ check_feature_on_all_iommus(FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) {
free_pages((unsigned long)buf, order);
buf = NULL;
@@ -815,20 +899,15 @@ static void free_ga_log(struct amd_iommu *iommu)
#endif
}
+#ifdef CONFIG_IRQ_REMAP
static int iommu_ga_log_enable(struct amd_iommu *iommu)
{
-#ifdef CONFIG_IRQ_REMAP
u32 status, i;
u64 entry;
if (!iommu->ga_log)
return -EINVAL;
- /* Check if already running */
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
- return 0;
-
entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
&entry, sizeof(entry));
@@ -852,13 +931,12 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
if (WARN_ON(i >= LOOP_TIMEOUT))
return -EINVAL;
-#endif /* CONFIG_IRQ_REMAP */
+
return 0;
}
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
-#ifdef CONFIG_IRQ_REMAP
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
@@ -876,10 +954,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
err_out:
free_ga_log(iommu);
return -EINVAL;
-#else
- return 0;
-#endif /* CONFIG_IRQ_REMAP */
}
+#endif /* CONFIG_IRQ_REMAP */
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
@@ -916,56 +992,59 @@ static void iommu_enable_gt(struct amd_iommu *iommu)
}
/* sets a specific bit in the device table entry. */
-static void set_dev_entry_bit(u16 devid, u8 bit)
+static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
+ dev_table[devid].data[i] |= (1UL << _bit);
+}
+
+static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ return __set_dev_entry_bit(dev_table, devid, bit);
}
-static int get_dev_entry_bit(u16 devid, u8 bit)
+static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
+ return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
}
+static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
-static bool copy_device_table(void)
+ return __get_dev_entry_bit(dev_table, devid, bit);
+}
+
+static bool __copy_device_table(struct amd_iommu *iommu)
{
- u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
+ u64 int_ctl, int_tab_len, entry = 0;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
struct dev_table_entry *old_devtb = NULL;
u32 lo, hi, devid, old_devtb_size;
phys_addr_t old_devtb_phys;
- struct amd_iommu *iommu;
u16 dom_id, dte_v, irq_v;
gfp_t gfp_flag;
u64 tmp;
- if (!amd_iommu_pre_enabled)
- return false;
-
- pr_warn("Translation is already enabled - trying to copy translation structures\n");
- for_each_iommu(iommu) {
- /* All IOMMUs should use the same device table with the same size */
- lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
- hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
- entry = (((u64) hi) << 32) + lo;
- if (last_entry && last_entry != entry) {
- pr_err("IOMMU:%d should use the same dev table as others!\n",
- iommu->index);
- return false;
- }
- last_entry = entry;
+ /* Each IOMMU use separate device table with the same size */
+ lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
+ hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
+ entry = (((u64) hi) << 32) + lo;
- old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
- if (old_devtb_size != dev_table_size) {
- pr_err("The device table size of IOMMU:%d is not expected!\n",
- iommu->index);
- return false;
- }
+ old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
+ if (old_devtb_size != pci_seg->dev_table_size) {
+ pr_err("The device table size of IOMMU:%d is not expected!\n",
+ iommu->index);
+ return false;
}
/*
@@ -981,38 +1060,38 @@ static bool copy_device_table(void)
}
old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys,
- dev_table_size)
- : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
+ pci_seg->dev_table_size)
+ : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
if (!old_devtb)
return false;
gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
- old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
- get_order(dev_table_size));
- if (old_dev_tbl_cpy == NULL) {
+ pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
+ get_order(pci_seg->dev_table_size));
+ if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
return false;
}
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- old_dev_tbl_cpy[devid] = old_devtb[devid];
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
if (dte_v && dom_id) {
- old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
- old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
+ pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
+ pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
/* If gcr3 table existed, mask it out */
if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
- old_dev_tbl_cpy[devid].data[1] &= ~tmp;
+ pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
tmp |= DTE_FLAG_GV;
- old_dev_tbl_cpy[devid].data[0] &= ~tmp;
+ pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
}
}
@@ -1027,7 +1106,7 @@ static bool copy_device_table(void)
return false;
}
- old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
+ pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
}
}
memunmap(old_devtb);
@@ -1035,21 +1114,42 @@ static bool copy_device_table(void)
return true;
}
-void amd_iommu_apply_erratum_63(u16 devid)
+static bool copy_device_table(void)
{
- int sysmgt;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
- sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
- (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
+ if (!amd_iommu_pre_enabled)
+ return false;
- if (sysmgt == 0x01)
- set_dev_entry_bit(devid, DEV_ENTRY_IW);
+ pr_warn("Translation is already enabled - trying to copy translation structures\n");
+
+ /*
+ * All IOMMUs within PCI segment shares common device table.
+ * Hence copy device table only once per PCI segment.
+ */
+ for_each_pci_segment(pci_seg) {
+ for_each_iommu(iommu) {
+ if (pci_seg->id != iommu->pci_seg->id)
+ continue;
+ if (!__copy_device_table(iommu))
+ return false;
+ break;
+ }
+ }
+
+ return true;
}
-/* Writes the specific IOMMU for a device into the rlookup table */
-static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
+void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
{
- amd_iommu_rlookup_table[devid] = iommu;
+ int sysmgt;
+
+ sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
+ (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
+
+ if (sysmgt == 0x01)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
}
/*
@@ -1060,26 +1160,26 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
u16 devid, u32 flags, u32 ext_flags)
{
if (flags & ACPI_DEVFLAG_INITPASS)
- set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
if (flags & ACPI_DEVFLAG_EXTINT)
- set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
if (flags & ACPI_DEVFLAG_NMI)
- set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
if (flags & ACPI_DEVFLAG_SYSMGT1)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
if (flags & ACPI_DEVFLAG_SYSMGT2)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
if (flags & ACPI_DEVFLAG_LINT0)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
if (flags & ACPI_DEVFLAG_LINT1)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
- amd_iommu_apply_erratum_63(devid);
+ amd_iommu_apply_erratum_63(iommu, devid);
- set_iommu_for_device(iommu, devid);
+ amd_iommu_set_rlookup_table(iommu, devid);
}
-int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
{
struct devid_map *entry;
struct list_head *list;
@@ -1116,7 +1216,7 @@ int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
return 0;
}
-static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
+static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
bool cmd_line)
{
struct acpihid_map_entry *entry;
@@ -1195,10 +1295,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
{
u8 *p = (u8 *)h;
u8 *end = p, flags = 0;
- u16 devid = 0, devid_start = 0, devid_to = 0;
+ u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u32 ivhd_size;
int ret;
@@ -1230,19 +1331,21 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
while (p < end) {
e = (struct ivhd_entry *)p;
+ seg_id = pci_seg->id;
+
switch (e->type) {
case IVHD_DEV_ALL:
DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
- for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
+ for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
break;
case IVHD_DEV_SELECT:
- DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -1253,8 +1356,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_SELECT_RANGE_START:
DUMP_printk(" DEV_SELECT_RANGE_START\t "
- "devid: %02x:%02x.%x flags: %02x\n",
- PCI_BUS_NUM(e->devid),
+ "devid: %04x:%02x:%02x.%x flags: %02x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -1266,9 +1369,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS:
- DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x devid_to: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
@@ -1280,18 +1383,18 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
devid_to = e->ext >> 8;
set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
- amd_iommu_alias_table[devid] = devid_to;
+ pci_seg->alias_table[devid] = devid_to;
break;
case IVHD_DEV_ALIAS_RANGE:
DUMP_printk(" DEV_ALIAS_RANGE\t\t "
- "devid: %02x:%02x.%x flags: %02x "
- "devid_to: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ "devid: %04x:%02x:%02x.%x flags: %02x "
+ "devid_to: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
- PCI_BUS_NUM(e->ext >> 8),
+ seg_id, PCI_BUS_NUM(e->ext >> 8),
PCI_SLOT(e->ext >> 8),
PCI_FUNC(e->ext >> 8));
@@ -1303,9 +1406,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT:
- DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x ext: %08x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -1317,8 +1420,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_EXT_SELECT_RANGE:
DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
- "%02x:%02x.%x flags: %02x ext: %08x\n",
- PCI_BUS_NUM(e->devid),
+ "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -1330,15 +1433,15 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_RANGE_END:
- DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid));
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
if (alias) {
- amd_iommu_alias_table[dev_i] = devid_to;
+ pci_seg->alias_table[dev_i] = devid_to;
set_dev_entry_from_acpi(iommu,
devid_to, flags, ext_flags);
}
@@ -1349,11 +1452,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_SPECIAL: {
u8 handle, type;
const char *var;
- u16 devid;
+ u32 devid;
int ret;
handle = e->ext & 0xff;
- devid = (e->ext >> 8) & 0xffff;
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
type = (e->ext >> 24) & 0xff;
if (type == IVHD_SPECIAL_IOAPIC)
@@ -1363,9 +1466,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
else
var = "UNKNOWN";
- DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
+ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
var, (int)handle,
- PCI_BUS_NUM(devid),
+ seg_id, PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1383,7 +1486,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
case IVHD_DEV_ACPI_HID: {
- u16 devid;
+ u32 devid;
u8 hid[ACPIHID_HID_LEN];
u8 uid[ACPIHID_UID_LEN];
int ret;
@@ -1426,9 +1529,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
- devid = e->devid;
- DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
- hid, uid,
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
+ DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
+ hid, uid, seg_id,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1458,6 +1561,74 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
return 0;
}
+/* Allocate PCI segment data structure */
+static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ int last_bdf;
+
+ /*
+ * First parse ACPI tables to find the largest Bus/Dev/Func we need to
+ * handle in this PCI segment. Upon this information the shared data
+ * structures for the PCI segments in the system will be allocated.
+ */
+ last_bdf = find_last_devid_acpi(ivrs_base, id);
+ if (last_bdf < 0)
+ return NULL;
+
+ pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
+ if (pci_seg == NULL)
+ return NULL;
+
+ pci_seg->last_bdf = last_bdf;
+ DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
+ pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+
+ pci_seg->id = id;
+ init_llist_head(&pci_seg->dev_data_list);
+ INIT_LIST_HEAD(&pci_seg->unity_map);
+ list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
+
+ if (alloc_dev_table(pci_seg))
+ return NULL;
+ if (alloc_alias_table(pci_seg))
+ return NULL;
+ if (alloc_rlookup_table(pci_seg))
+ return NULL;
+
+ return pci_seg;
+}
+
+static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == id)
+ return pci_seg;
+ }
+
+ return alloc_pci_segment(id, ivrs_base);
+}
+
+static void __init free_pci_segments(void)
+{
+ struct amd_iommu_pci_seg *pci_seg, *next;
+
+ for_each_pci_segment_safe(pci_seg, next) {
+ list_del(&pci_seg->list);
+ free_irq_lookup_table(pci_seg);
+ free_rlookup_table(pci_seg);
+ free_alias_table(pci_seg);
+ free_dev_table(pci_seg);
+ kfree(pci_seg);
+ }
+}
+
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_cwwb_sem(iommu);
@@ -1542,9 +1713,15 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
*/
-static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
+ struct acpi_table_header *ivrs_base)
{
- int ret;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+ iommu->pci_seg = pci_seg;
raw_spin_lock_init(&iommu->lock);
iommu->cmd_sem_val = 0;
@@ -1566,7 +1743,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
*/
iommu->devid = h->devid;
iommu->cap_ptr = h->cap_ptr;
- iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
switch (h->type) {
@@ -1621,6 +1797,13 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->mmio_base)
return -ENOMEM;
+ return init_iommu_from_acpi(iommu, h);
+}
+
+static int __init init_iommu_one_late(struct amd_iommu *iommu)
+{
+ int ret;
+
if (alloc_cwwb_sem(iommu))
return -ENOMEM;
@@ -1642,10 +1825,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (amd_iommu_pre_enabled)
amd_iommu_pre_enabled = translation_pre_enabled(iommu);
- ret = init_iommu_from_acpi(iommu, h);
- if (ret)
- return ret;
-
if (amd_iommu_irq_remap) {
ret = amd_iommu_create_irq_domain(iommu);
if (ret)
@@ -1656,7 +1835,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
* Make sure IOMMU is not considered to translate itself. The IVRS
* table tells us so, but this is a lie!
*/
- amd_iommu_rlookup_table[iommu->devid] = NULL;
+ iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
return 0;
}
@@ -1701,15 +1880,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
end += table->length;
p += IVRS_HEADER_LENGTH;
+ /* Phase 1: Process all IVHD blocks */
while (p < end) {
h = (struct ivhd_header *)p;
if (*p == amd_iommu_target_ivhd_type) {
- DUMP_printk("device: %02x:%02x.%01x cap: %04x "
- "seg: %d flags: %01x info %04x\n",
- PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
- PCI_FUNC(h->devid), h->cap_ptr,
- h->pci_seg, h->flags, h->info);
+ DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
+ "flags: %01x info %04x\n",
+ h->pci_seg, PCI_BUS_NUM(h->devid),
+ PCI_SLOT(h->devid), PCI_FUNC(h->devid),
+ h->cap_ptr, h->flags, h->info);
DUMP_printk(" mmio-addr: %016llx\n",
h->mmio_phys);
@@ -1717,7 +1897,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
if (iommu == NULL)
return -ENOMEM;
- ret = init_iommu_one(iommu, h);
+ ret = init_iommu_one(iommu, h, table);
if (ret)
return ret;
}
@@ -1726,6 +1906,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
}
WARN_ON(p != end);
+ /* Phase 2 : Early feature support check */
+ get_global_efr();
+
+ /* Phase 3 : Enabling IOMMU features */
+ for_each_iommu(iommu) {
+ ret = init_iommu_one_late(iommu);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1762,7 +1952,7 @@ static ssize_t amd_iommu_show_features(struct device *dev,
char *buf)
{
struct amd_iommu *iommu = dev_to_amd_iommu(dev);
- return sprintf(buf, "%llx\n", iommu->features);
+ return sprintf(buf, "%llx:%llx\n", iommu->features2, iommu->features);
}
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
@@ -1789,16 +1979,18 @@ static const struct attribute_group *amd_iommu_groups[] = {
*/
static void __init late_iommu_features_init(struct amd_iommu *iommu)
{
- u64 features;
+ u64 features, features2;
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
return;
/* read extended feature bits */
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
if (!iommu->features) {
iommu->features = features;
+ iommu->features2 = features2;
return;
}
@@ -1806,9 +1998,13 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
* Sanity check and warn if EFR values from
* IVHD and MMIO conflict.
*/
- if (features != iommu->features)
- pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
- features, iommu->features);
+ if (features != iommu->features ||
+ features2 != iommu->features2) {
+ pr_warn(FW_WARN
+ "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
+ features, iommu->features,
+ features2, iommu->features2);
+ }
}
static int __init iommu_init_pci(struct amd_iommu *iommu)
@@ -1816,7 +2012,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
int cap_ptr = iommu->cap_ptr;
int ret;
- iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
+ iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid),
iommu->devid & 0xff);
if (!iommu->dev)
return -ENODEV;
@@ -1863,10 +2060,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
- ret = iommu_init_ga_log(iommu);
- if (ret)
- return ret;
-
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
pr_info("Using strict mode due to virtualization\n");
iommu_set_dma_strict();
@@ -1879,7 +2072,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
int i, j;
iommu->root_pdev =
- pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
+ pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ iommu->dev->bus->number,
PCI_DEVFN(0, 0));
/*
@@ -1906,8 +2100,11 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu);
- iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+ ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
amd_iommu_groups, "ivhd%d", iommu->index);
+ if (ret)
+ return ret;
+
iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
return pci_enable_device(iommu->dev);
@@ -1928,7 +2125,7 @@ static void print_iommu_info(void)
pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pr_info("Extended features (%#llx):", iommu->features);
+ pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
@@ -1938,13 +2135,14 @@ static void print_iommu_info(void)
if (iommu->features & FEATURE_GAM_VAPIC)
pr_cont(" GA_vAPIC");
+ if (iommu->features & FEATURE_SNP)
+ pr_cont(" SNP");
+
pr_cont("\n");
}
}
if (irq_remapping_enabled) {
pr_info("Interrupt remapping enabled\n");
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
- pr_info("Virtual APIC enabled\n");
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
@@ -1953,6 +2151,7 @@ static void print_iommu_info(void)
static int __init amd_iommu_init_pci(void)
{
struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
int ret;
for_each_iommu(iommu) {
@@ -1983,7 +2182,8 @@ static int __init amd_iommu_init_pci(void)
goto out;
}
- init_device_table_dma();
+ for_each_pci_segment(pci_seg)
+ init_device_table_dma(pci_seg);
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
@@ -2232,9 +2432,6 @@ enable_faults:
if (iommu->ppr_log != NULL)
iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
-
- iommu_ga_log_enable(iommu);
-
return 0;
}
@@ -2249,19 +2446,28 @@ enable_faults:
static void __init free_unity_maps(void)
{
struct unity_map_entry *entry, *next;
+ struct amd_iommu_pci_seg *p, *pci_seg;
- list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
- list_del(&entry->list);
- kfree(entry);
+ for_each_pci_segment_safe(pci_seg, p) {
+ list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
}
}
/* called for unity map ACPI definition */
-static int __init init_unity_map_range(struct ivmd_header *m)
+static int __init init_unity_map_range(struct ivmd_header *m,
+ struct acpi_table_header *ivrs_base)
{
struct unity_map_entry *e = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
char *s;
+ pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
@@ -2277,7 +2483,7 @@ static int __init init_unity_map_range(struct ivmd_header *m)
case ACPI_IVMD_TYPE_ALL:
s = "IVMD_TYPE_ALL\t\t";
e->devid_start = 0;
- e->devid_end = amd_iommu_last_bdf;
+ e->devid_end = pci_seg->last_bdf;
break;
case ACPI_IVMD_TYPE_RANGE:
s = "IVMD_TYPE_RANGE\t\t";
@@ -2299,14 +2505,16 @@ static int __init init_unity_map_range(struct ivmd_header *m)
if (m->flags & IVMD_FLAG_EXCL_RANGE)
e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
- DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
- " range_start: %016llx range_end: %016llx flags: %x\n", s,
+ DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
+ "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
+ " flags: %x\n", s, m->pci_seg,
PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
- PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
+ PCI_FUNC(e->devid_start), m->pci_seg,
+ PCI_BUS_NUM(e->devid_end),
PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
e->address_start, e->address_end, m->flags);
- list_add_tail(&e->list, &amd_iommu_unity_map);
+ list_add_tail(&e->list, &pci_seg->unity_map);
return 0;
}
@@ -2323,7 +2531,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
while (p < end) {
m = (struct ivmd_header *)p;
if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
- init_unity_map_range(m);
+ init_unity_map_range(m, table);
p += m->length;
}
@@ -2334,35 +2542,48 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
/*
* Init the device table to not allow DMA access for devices
*/
-static void init_device_table_dma(void)
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- set_dev_entry_bit(devid, DEV_ENTRY_VALID);
- set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
+ if (dev_table == NULL)
+ return;
+
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
+ if (!amd_iommu_snp_en)
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
}
}
-static void __init uninit_device_table_dma(void)
+static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
+
+ if (dev_table == NULL)
+ return;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- amd_iommu_dev_table[devid].data[0] = 0ULL;
- amd_iommu_dev_table[devid].data[1] = 0ULL;
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ dev_table[devid].data[0] = 0ULL;
+ dev_table[devid].data[1] = 0ULL;
}
}
static void init_device_table(void)
{
+ struct amd_iommu_pci_seg *pci_seg;
u32 devid;
if (!amd_iommu_irq_remap)
return;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
- set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
+ for_each_pci_segment(pci_seg) {
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
+ __set_dev_entry_bit(pci_seg->dev_table,
+ devid, DEV_ENTRY_IRQ_TBL_EN);
+ }
}
static void iommu_init_flags(struct amd_iommu *iommu)
@@ -2440,8 +2661,6 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
#ifdef CONFIG_IRQ_REMAP
switch (amd_iommu_guest_ir) {
case AMD_IOMMU_GUEST_IR_VAPIC:
- iommu_feature_enable(iommu, CONTROL_GAM_EN);
- fallthrough;
case AMD_IOMMU_GUEST_IR_LEGACY_GA:
iommu_feature_enable(iommu, CONTROL_GA_EN);
iommu->irte_ops = &irte_128_ops;
@@ -2478,7 +2697,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
static void early_enable_iommus(void)
{
struct amd_iommu *iommu;
-
+ struct amd_iommu_pci_seg *pci_seg;
if (!copy_device_table()) {
/*
@@ -2488,9 +2707,14 @@ static void early_enable_iommus(void)
*/
if (amd_iommu_pre_enabled)
pr_err("Failed to copy DEV table from previous kernel.\n");
- if (old_dev_tbl_cpy != NULL)
- free_pages((unsigned long)old_dev_tbl_cpy,
- get_order(dev_table_size));
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->old_dev_tbl_cpy != NULL) {
+ free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = NULL;
+ }
+ }
for_each_iommu(iommu) {
clear_translation_pre_enabled(iommu);
@@ -2498,9 +2722,13 @@ static void early_enable_iommus(void)
}
} else {
pr_info("Copied DEV table from previous kernel.\n");
- free_pages((unsigned long)amd_iommu_dev_table,
- get_order(dev_table_size));
- amd_iommu_dev_table = old_dev_tbl_cpy;
+
+ for_each_pci_segment(pci_seg) {
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
+ }
+
for_each_iommu(iommu) {
iommu_disable_command_buffer(iommu);
iommu_disable_event_buffer(iommu);
@@ -2512,19 +2740,6 @@ static void early_enable_iommus(void)
iommu_flush_all_caches(iommu);
}
}
-
-#ifdef CONFIG_IRQ_REMAP
- /*
- * Note: We have already checked GASup from IVRS table.
- * Now, we need to make sure that GAMSup is set.
- */
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
- !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
-
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
- amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
-#endif
}
static void enable_iommus_v2(void)
@@ -2537,10 +2752,72 @@ static void enable_iommus_v2(void)
}
}
+static void enable_iommus_vapic(void)
+{
+#ifdef CONFIG_IRQ_REMAP
+ u32 status, i;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ /*
+ * Disable GALog if already running. It could have been enabled
+ * in the previous boot before kdump.
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ continue;
+
+ iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+ iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
+ /*
+ * Need to set and poll check the GALOGRun bit to zero before
+ * we can set/ modify GA Log registers safely.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ break;
+ udelay(10);
+ }
+
+ if (WARN_ON(i >= LOOP_TIMEOUT))
+ return;
+ }
+
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ if (amd_iommu_snp_en &&
+ !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
+ pr_warn("Force to disable Virtual APIC due to SNP\n");
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ /* Enabling GAM and SNPAVIC support */
+ for_each_iommu(iommu) {
+ if (iommu_init_ga_log(iommu) ||
+ iommu_ga_log_enable(iommu))
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_GAM_EN);
+ if (amd_iommu_snp_en)
+ iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
+ }
+
+ amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
+ pr_info("Virtual APIC enabled\n");
+#endif
+}
+
static void enable_iommus(void)
{
early_enable_iommus();
-
+ enable_iommus_vapic();
enable_iommus_v2();
}
@@ -2590,27 +2867,11 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_iommu_resources(void)
{
- kmemleak_free(irq_lookup_table);
- free_pages((unsigned long)irq_lookup_table,
- get_order(rlookup_table_size));
- irq_lookup_table = NULL;
-
kmem_cache_destroy(amd_iommu_irq_cache);
amd_iommu_irq_cache = NULL;
- free_pages((unsigned long)amd_iommu_rlookup_table,
- get_order(rlookup_table_size));
- amd_iommu_rlookup_table = NULL;
-
- free_pages((unsigned long)amd_iommu_alias_table,
- get_order(alias_table_size));
- amd_iommu_alias_table = NULL;
-
- free_pages((unsigned long)amd_iommu_dev_table,
- get_order(dev_table_size));
- amd_iommu_dev_table = NULL;
-
free_iommu_all();
+ free_pci_segments();
}
/* SB IOAPIC is always on this device in AMD systems */
@@ -2709,7 +2970,7 @@ static void __init ivinfo_init(void *ivrs)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- int i, remap_cache_sz, ret;
+ int remap_cache_sz, ret;
acpi_status status;
if (!amd_iommu_detected)
@@ -2737,42 +2998,8 @@ static int __init early_amd_iommu_init(void)
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
- /*
- * First parse ACPI tables to find the largest Bus/Dev/Func
- * we need to handle. Upon this information the shared data
- * structures for the IOMMUs in the system will be allocated
- */
- ret = find_last_devid_acpi(ivrs_base);
- if (ret)
- goto out;
-
- dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
- alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
- rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
-
/* Device table - directly used by all IOMMUs */
ret = -ENOMEM;
- amd_iommu_dev_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
- get_order(dev_table_size));
- if (amd_iommu_dev_table == NULL)
- goto out;
-
- /*
- * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
- * IOMMU see for that device
- */
- amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
- get_order(alias_table_size));
- if (amd_iommu_alias_table == NULL)
- goto out;
-
- /* IOMMU rlookup table - find the IOMMU for a specific device */
- amd_iommu_rlookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(rlookup_table_size));
- if (amd_iommu_rlookup_table == NULL)
- goto out;
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,
@@ -2781,12 +3008,6 @@ static int __init early_amd_iommu_init(void)
goto out;
/*
- * let all alias entries point to itself
- */
- for (i = 0; i <= amd_iommu_last_bdf; ++i)
- amd_iommu_alias_table[i] = i;
-
- /*
* never allocate domain 0 because its used as the non-allocated and
* error value placeholder
*/
@@ -2808,6 +3029,7 @@ static int __init early_amd_iommu_init(void)
amd_iommu_irq_remap = check_ioapic_information();
if (amd_iommu_irq_remap) {
+ struct amd_iommu_pci_seg *pci_seg;
/*
* Interrupt remapping enabled, create kmem_cache for the
* remapping tables.
@@ -2824,13 +3046,10 @@ static int __init early_amd_iommu_init(void)
if (!amd_iommu_irq_cache)
goto out;
- irq_lookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(rlookup_table_size));
- kmemleak_alloc(irq_lookup_table, rlookup_table_size,
- 1, GFP_KERNEL);
- if (!irq_lookup_table)
- goto out;
+ for_each_pci_segment(pci_seg) {
+ if (alloc_irq_lookup_table(pci_seg))
+ goto out;
+ }
}
ret = init_memory_definitions(ivrs_base);
@@ -2937,6 +3156,7 @@ static int __init state_next(void)
register_syscore_ops(&amd_iommu_syscore_ops);
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
+ enable_iommus_vapic();
enable_iommus_v2();
break;
case IOMMU_PCI_INIT:
@@ -2967,8 +3187,11 @@ static int __init state_next(void)
free_iommu_resources();
} else {
struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg)
+ uninit_device_table_dma(pci_seg);
- uninit_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
}
@@ -3161,15 +3384,17 @@ static int __init parse_amd_iommu_options(char *str)
static int __init parse_ivrs_ioapic(char *str)
{
- unsigned int bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
int ret, id, i;
- u16 devid;
+ u32 devid;
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
if (ret != 4) {
- pr_err("Invalid command line: ivrs_ioapic%s\n", str);
- return 1;
+ ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_ioapic%s\n", str);
+ return 1;
+ }
}
if (early_ioapic_map_size == EARLY_MAP_SIZE) {
@@ -3178,7 +3403,7 @@ static int __init parse_ivrs_ioapic(char *str)
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_ioapic_map_size++;
@@ -3191,15 +3416,17 @@ static int __init parse_ivrs_ioapic(char *str)
static int __init parse_ivrs_hpet(char *str)
{
- unsigned int bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
int ret, id, i;
- u16 devid;
+ u32 devid;
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
if (ret != 4) {
- pr_err("Invalid command line: ivrs_hpet%s\n", str);
- return 1;
+ ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_hpet%s\n", str);
+ return 1;
+ }
}
if (early_hpet_map_size == EARLY_MAP_SIZE) {
@@ -3208,7 +3435,7 @@ static int __init parse_ivrs_hpet(char *str)
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_hpet_map_size++;
@@ -3221,15 +3448,18 @@ static int __init parse_ivrs_hpet(char *str)
static int __init parse_ivrs_acpihid(char *str)
{
- u32 bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p;
char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
int ret, i;
ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
if (ret != 4) {
- pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
- return 1;
+ ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
+ return 1;
+ }
}
p = acpiid;
@@ -3244,8 +3474,7 @@ static int __init parse_ivrs_acpihid(char *str)
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
- early_acpihid_map[i].devid =
- ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
early_acpihid_map[i].cmd_line = true;
return 1;
@@ -3260,7 +3489,12 @@ __setup("ivrs_acpihid", parse_ivrs_acpihid);
bool amd_iommu_v2_supported(void)
{
- return amd_iommu_v2_present;
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system
+ * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
+ * setting up IOMMUv1 page table.
+ */
+ return amd_iommu_v2_present && !amd_iommu_snp_en;
}
EXPORT_SYMBOL(amd_iommu_v2_supported);
@@ -3363,3 +3597,41 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+int amd_iommu_snp_enable(void)
+{
+ /*
+ * The SNP support requires that IOMMU must be enabled, and is
+ * not configured in the passthrough mode.
+ */
+ if (no_iommu || iommu_default_passthrough()) {
+ pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
+ return -EINVAL;
+ }
+
+ /*
+ * Prevent enabling SNP after IOMMU_ENABLED state because this process
+ * affect how IOMMU driver sets up data structures and configures
+ * IOMMU hardware.
+ */
+ if (init_state > IOMMU_ENABLED) {
+ pr_err("SNP: Too late to enable SNP for IOMMU.\n");
+ return -EINVAL;
+ }
+
+ amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
+ if (!amd_iommu_snp_en)
+ return -EINVAL;
+
+ pr_info("SNP enabled\n");
+
+ /* Enforce IOMMU v1 pagetable when SNP is enabled. */
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 6608d1717574..7d4b61e5db47 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -258,7 +258,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
/* pte could have been changed somewhere. */
- if (cmpxchg64(pte, __pte, __npte) != __pte)
+ if (!try_cmpxchg64(pte, &__pte, __npte))
free_page((unsigned long)page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -341,10 +341,8 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
u64 *pt;
int mode;
- while (cmpxchg64(pte, pteval, 0) != pteval) {
+ while (!try_cmpxchg64(pte, &pteval, 0))
pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
- pteval = *pte;
- }
if (!IOMMU_PTE_PRESENT(pteval))
return;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 840831d5d2ad..65b8e4fd8217 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -62,9 +62,6 @@
static DEFINE_SPINLOCK(pd_bitmap_lock);
-/* List of all available dev_data structures */
-static LLIST_HEAD(dev_data_list);
-
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
@@ -95,13 +92,6 @@ static void detach_device(struct device *dev);
*
****************************************************************************/
-static inline u16 get_pci_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return pci_dev_id(pdev);
-}
-
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
@@ -122,16 +112,74 @@ static inline int get_acpihid_device_id(struct device *dev,
return -EINVAL;
}
-static inline int get_device_id(struct device *dev)
+static inline int get_device_sbdf_id(struct device *dev)
{
- int devid;
+ int sbdf;
if (dev_is_pci(dev))
- devid = get_pci_device_id(dev);
+ sbdf = get_pci_sbdf_id(to_pci_dev(dev));
else
- devid = get_acpihid_device_id(dev, NULL);
+ sbdf = get_acpihid_device_id(dev, NULL);
+
+ return sbdf;
+}
+
+struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
+{
+ struct dev_table_entry *dev_table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ BUG_ON(pci_seg == NULL);
+ dev_table = pci_seg->dev_table;
+ BUG_ON(dev_table == NULL);
+
+ return dev_table;
+}
+
+static inline u16 get_device_segment(struct device *dev)
+{
+ u16 seg;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ seg = pci_domain_nr(pdev->bus);
+ } else {
+ u32 devid = get_acpihid_device_id(dev, NULL);
+
+ seg = PCI_SBDF_TO_SEGID(devid);
+ }
+
+ return seg;
+}
+
+/* Writes the specific IOMMU for a device into the PCI segment rlookup table */
+void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->rlookup_table[devid] = iommu;
+}
+
+static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == seg)
+ return pci_seg->rlookup_table[devid];
+ }
+ return NULL;
+}
- return devid;
+static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
+{
+ u16 seg = get_device_segment(dev);
+ int devid = get_device_sbdf_id(dev);
+
+ if (devid < 0)
+ return NULL;
+ return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
}
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
@@ -139,9 +187,10 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static struct iommu_dev_data *alloc_dev_data(u16 devid)
+static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
@@ -151,19 +200,20 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
dev_data->devid = devid;
ratelimit_default_init(&dev_data->rs);
- llist_add(&dev_data->dev_data_list, &dev_data_list);
+ llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
return dev_data;
}
-static struct iommu_dev_data *search_dev_data(u16 devid)
+static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
struct llist_node *node;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- if (llist_empty(&dev_data_list))
+ if (llist_empty(&pci_seg->dev_data_list))
return NULL;
- node = dev_data_list.first;
+ node = pci_seg->dev_data_list.first;
llist_for_each_entry(dev_data, node, dev_data_list) {
if (dev_data->devid == devid)
return dev_data;
@@ -174,67 +224,74 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct amd_iommu *iommu;
+ struct dev_table_entry *dev_table;
u16 devid = pci_dev_id(pdev);
if (devid == alias)
return 0;
- amd_iommu_rlookup_table[alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[alias].data));
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return 0;
+
+ amd_iommu_set_rlookup_table(iommu, alias);
+ dev_table = get_dev_table(iommu);
+ memcpy(dev_table[alias].data,
+ dev_table[devid].data,
+ sizeof(dev_table[alias].data));
return 0;
}
-static void clone_aliases(struct pci_dev *pdev)
+static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
{
- if (!pdev)
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(dev))
return;
+ pdev = to_pci_dev(dev);
/*
* The IVRS alias stored in the alias table may not be
* part of the PCI DMA aliases if it's bus differs
* from the original device.
*/
- clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
+ clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
pci_for_each_dma_alias(pdev, clone_alias, NULL);
}
-static struct pci_dev *setup_aliases(struct device *dev)
+static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u16 ivrs_alias;
/* For ACPI HID devices, there are no aliases */
if (!dev_is_pci(dev))
- return NULL;
+ return;
/*
* Add the IVRS alias to the pci aliases if it is on the same
* bus. The IVRS table may know about a quirk that we don't.
*/
- ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
if (ivrs_alias != pci_dev_id(pdev) &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
- clone_aliases(pdev);
-
- return pdev;
+ clone_aliases(iommu, dev);
}
-static struct iommu_dev_data *find_dev_data(u16 devid)
+static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- dev_data = search_dev_data(devid);
+ dev_data = search_dev_data(iommu, devid);
if (dev_data == NULL) {
- dev_data = alloc_dev_data(devid);
+ dev_data = alloc_dev_data(iommu, devid);
if (!dev_data)
return NULL;
@@ -296,42 +353,49 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
*/
static bool check_device(struct device *dev)
{
- int devid;
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu;
+ int devid, sbdf;
if (!dev)
return false;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
return false;
+ devid = PCI_SBDF_TO_DEVID(sbdf);
- /* Out of our scope? */
- if (devid > amd_iommu_last_bdf)
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
return false;
- if (amd_iommu_rlookup_table[devid] == NULL)
+ /* Out of our scope? */
+ pci_seg = iommu->pci_seg;
+ if (devid > pci_seg->last_bdf)
return false;
return true;
}
-static int iommu_init_device(struct device *dev)
+static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
{
struct iommu_dev_data *dev_data;
- int devid;
+ int devid, sbdf;
if (dev_iommu_priv_get(dev))
return 0;
- devid = get_device_id(dev);
- if (devid < 0)
- return devid;
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return sbdf;
- dev_data = find_dev_data(devid);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ dev_data = find_dev_data(iommu, devid);
if (!dev_data)
return -ENOMEM;
- dev_data->pdev = setup_aliases(dev);
+ dev_data->dev = dev;
+ setup_aliases(iommu, dev);
/*
* By default we use passthrough mode for IOMMUv2 capable device.
@@ -341,9 +405,6 @@ static int iommu_init_device(struct device *dev)
*/
if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
- struct amd_iommu *iommu;
-
- iommu = amd_iommu_rlookup_table[dev_data->devid];
dev_data->iommu_v2 = iommu->is_iommu_v2;
}
@@ -352,18 +413,21 @@ static int iommu_init_device(struct device *dev)
return 0;
}
-static void iommu_ignore_device(struct device *dev)
+static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
{
- int devid;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ int devid, sbdf;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
return;
- amd_iommu_rlookup_table[devid] = NULL;
- memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ pci_seg->rlookup_table[devid] = NULL;
+ memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
- setup_aliases(dev);
+ setup_aliases(iommu, dev);
}
static void amd_iommu_uninit_device(struct device *dev)
@@ -391,13 +455,13 @@ static void amd_iommu_uninit_device(struct device *dev)
*
****************************************************************************/
-static void dump_dte_entry(u16 devid)
+static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{
int i;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
for (i = 0; i < 4; ++i)
- pr_err("DTE[%d]: %016llx\n", i,
- amd_iommu_dev_table[devid].data[i]);
+ pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
}
static void dump_command(unsigned long phys_addr)
@@ -409,7 +473,7 @@ static void dump_command(unsigned long phys_addr)
pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
}
-static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
+static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, vmg_tag, flags;
@@ -421,7 +485,7 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -432,8 +496,8 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
vmg_tag, spa, flags);
}
} else {
- pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, spa, flags);
}
@@ -441,7 +505,7 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
pci_dev_put(pdev);
}
-static void amd_iommu_report_rmp_fault(volatile u32 *event)
+static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, flags_rmp, vmg_tag, flags;
@@ -454,7 +518,7 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
gpa = ((u64)event[3] << 32) | event[2];
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -465,8 +529,8 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
vmg_tag, gpa, flags_rmp, flags);
}
} else {
- pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, gpa, flags_rmp, flags);
}
@@ -480,13 +544,14 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
#define IS_WRITE_REQUEST(flags) \
((flags) & EVENT_FLAG_RW)
-static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
+static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
+ u16 devid, u16 domain_id,
u64 address, int flags)
{
struct iommu_dev_data *dev_data = NULL;
struct pci_dev *pdev;
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -511,8 +576,8 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
domain_id, address, flags);
}
} else {
- pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domain_id, address, flags);
}
@@ -549,26 +614,26 @@ retry:
}
if (type == EVENT_TYPE_IO_FAULT) {
- amd_iommu_report_page_fault(devid, pasid, address, flags);
+ amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
return;
}
switch (type) {
case EVENT_TYPE_ILL_DEV:
- dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
- dump_dte_entry(devid);
+ dump_dte_entry(iommu, devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
- dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
"address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
@@ -580,26 +645,26 @@ retry:
address, flags);
break;
case EVENT_TYPE_IOTLB_INV_TO:
- dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address);
break;
case EVENT_TYPE_INV_DEV_REQ:
- dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
case EVENT_TYPE_RMP_FAULT:
- amd_iommu_report_rmp_fault(event);
+ amd_iommu_report_rmp_fault(iommu, event);
break;
case EVENT_TYPE_RMP_HW_ERR:
- amd_iommu_report_rmp_hw_error(event);
+ amd_iommu_report_rmp_hw_error(iommu, event);
break;
case EVENT_TYPE_INV_PPR_REQ:
pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
- dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags, tag);
break;
default:
@@ -636,7 +701,7 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
fault.address = raw[1];
fault.pasid = PPR_PASID(raw[0]);
- fault.device_id = PPR_DEVID(raw[0]);
+ fault.sbdf = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0]));
fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]);
@@ -1125,8 +1190,9 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
{
u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (devid = 0; devid <= 0xffff; ++devid)
+ for (devid = 0; devid <= last_bdf; ++devid)
iommu_flush_dte(iommu, devid);
iommu_completion_wait(iommu);
@@ -1139,8 +1205,9 @@ static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
{
u32 dom_id;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
+ for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
dom_id, 1);
@@ -1183,8 +1250,9 @@ static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
{
u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
+ for (devid = 0; devid <= last_bdf; devid++)
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -1212,7 +1280,9 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
int qdep;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
@@ -1232,20 +1302,28 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
static int device_flush_dte(struct iommu_dev_data *dev_data)
{
struct amd_iommu *iommu;
+ struct pci_dev *pdev = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
u16 alias;
int ret;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
- if (dev_data->pdev)
- ret = pci_for_each_dma_alias(dev_data->pdev,
+ if (dev_is_pci(dev_data->dev))
+ pdev = to_pci_dev(dev_data->dev);
+
+ if (pdev)
+ ret = pci_for_each_dma_alias(pdev,
device_flush_dte_alias, iommu);
else
ret = iommu_flush_dte(iommu, dev_data->devid);
if (ret)
return ret;
- alias = amd_iommu_alias_table[dev_data->devid];
+ pci_seg = iommu->pci_seg;
+ alias = pci_seg->alias_table[dev_data->devid];
if (alias != dev_data->devid) {
ret = iommu_flush_dte(iommu, alias);
if (ret)
@@ -1461,28 +1539,35 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void set_dte_entry(u16 devid, struct protection_domain *domain,
- bool ats, bool ppr)
+static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
+ struct protection_domain *domain, bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
if (domain->iop.mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->iop.root);
pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
- flags = amd_iommu_dev_table[devid].data[1];
+ pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+
+ /*
+ * When SNP is enabled, Only set TV bit when IOMMU
+ * page translation is in use.
+ */
+ if (!amd_iommu_snp_en || (domain->id != 0))
+ pte_root |= DTE_FLAG_TV;
+
+ flags = dev_table[devid].data[1];
if (ats)
flags |= DTE_FLAG_IOTLB;
if (ppr) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
if (iommu_feature(iommu, FEATURE_EPHSUP))
pte_root |= 1ULL << DEV_ENTRY_PPR;
}
@@ -1516,9 +1601,9 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
flags &= ~DEV_DOMID_MASK;
flags |= domain->id;
- old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
- amd_iommu_dev_table[devid].data[1] = flags;
- amd_iommu_dev_table[devid].data[0] = pte_root;
+ old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
+ dev_table[devid].data[1] = flags;
+ dev_table[devid].data[0] = pte_root;
/*
* A kdump kernel might be replacing a domain ID that was copied from
@@ -1526,19 +1611,23 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
* entries for the old domain ID that is being overwritten
*/
if (old_domid) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
amd_iommu_flush_tlb_domid(iommu, old_domid);
}
}
-static void clear_dte_entry(u16 devid)
+static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
/* remove entry from the device table seen by the hardware */
- amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV;
- amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
+ dev_table[devid].data[0] = DTE_FLAG_V;
+
+ if (!amd_iommu_snp_en)
+ dev_table[devid].data[0] |= DTE_FLAG_TV;
+
+ dev_table[devid].data[1] &= DTE_FLAG_MASK;
- amd_iommu_apply_erratum_63(devid);
+ amd_iommu_apply_erratum_63(iommu, devid);
}
static void do_attach(struct iommu_dev_data *dev_data,
@@ -1547,7 +1636,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct amd_iommu *iommu;
bool ats;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -1559,9 +1650,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- set_dte_entry(dev_data->devid, domain,
+ set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
- clone_aliases(dev_data->pdev);
+ clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
}
@@ -1571,13 +1662,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
- clear_dte_entry(dev_data->devid);
- clone_aliases(dev_data->pdev);
+ clear_dte_entry(iommu, dev_data->devid);
+ clone_aliases(iommu, dev_data->dev);
/* Flush the DTE entry */
device_flush_dte(dev_data);
@@ -1749,23 +1842,24 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
- int ret, devid;
+ int ret;
if (!check_device(dev))
return ERR_PTR(-ENODEV);
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
- ret = iommu_init_device(dev);
+ ret = iommu_init_device(iommu, dev);
if (ret) {
if (ret != -ENOTSUPP)
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
- iommu_ignore_device(dev);
+ iommu_ignore_device(iommu, dev);
} else {
amd_iommu_set_pci_msi_domain(dev, iommu);
iommu_dev = &iommu->iommu;
@@ -1785,13 +1879,14 @@ static void amd_iommu_probe_finalize(struct device *dev)
static void amd_iommu_release_device(struct device *dev)
{
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
return;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return;
amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
@@ -1816,9 +1911,13 @@ static void update_device_table(struct protection_domain *domain)
struct iommu_dev_data *dev_data;
list_for_each_entry(dev_data, &domain->dev_list, list) {
- set_dte_entry(dev_data->devid, domain,
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ if (!iommu)
+ continue;
+ set_dte_entry(iommu, dev_data->devid, domain,
dev_data->ats.enabled, dev_data->iommu_v2);
- clone_aliases(dev_data->pdev);
+ clone_aliases(iommu, dev_data->dev);
}
}
@@ -1969,6 +2068,13 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *domain;
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
+ * default to use IOMMU_DOMAIN_DMA[_FQ].
+ */
+ if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
+ return NULL;
+
domain = protection_domain_alloc(type);
if (!domain)
return NULL;
@@ -2004,7 +2110,6 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
@@ -2013,7 +2118,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
if (dev_data->domain != NULL)
detach_device(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return;
@@ -2040,7 +2145,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
dev_data = dev_iommu_priv_get(dev);
dev_data->defer_attach = false;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return -EINVAL;
@@ -2169,13 +2274,21 @@ static void amd_iommu_get_resv_regions(struct device *dev,
{
struct iommu_resv_region *region;
struct unity_map_entry *entry;
- int devid;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+ int devid, sbdf;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return;
+
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
return;
+ pci_seg = iommu->pci_seg;
- list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+ list_for_each_entry(entry, &pci_seg->unity_map, list) {
int type, prot = 0;
size_t length;
@@ -2280,7 +2393,6 @@ const struct iommu_ops amd_iommu_ops = {
.probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.def_domain_type = amd_iommu_def_domain_type,
@@ -2419,8 +2531,9 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
continue;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
-
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ continue;
build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
qdep, address, size);
@@ -2582,7 +2695,9 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
struct iommu_cmd cmd;
dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return -ENODEV;
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
tag, dev_data->pri_tlp);
@@ -2644,30 +2759,35 @@ EXPORT_SYMBOL(amd_iommu_device_info);
static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock);
-static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
+static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
+ struct irq_remap_table *table)
{
u64 dte;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
- dte = amd_iommu_dev_table[devid].data[2];
+ dte = dev_table[devid].data[2];
dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
dte |= iommu_virt_to_phys(table->table);
dte |= DTE_IRQ_REMAP_INTCTL;
dte |= DTE_INTTABLEN;
dte |= DTE_IRQ_REMAP_ENABLE;
- amd_iommu_dev_table[devid].data[2] = dte;
+ dev_table[devid].data[2] = dte;
}
-static struct irq_remap_table *get_irq_table(u16 devid)
+static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
{
struct irq_remap_table *table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
- "%s: no iommu for devid %x\n", __func__, devid))
+ if (WARN_ONCE(!pci_seg->rlookup_table[devid],
+ "%s: no iommu for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
return NULL;
- table = irq_lookup_table[devid];
- if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
+ table = pci_seg->irq_lookup_table[devid];
+ if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
return NULL;
return table;
@@ -2700,8 +2820,10 @@ static struct irq_remap_table *__alloc_irq_table(void)
static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->irq_lookup_table[devid] = table;
+ set_dte_irq_entry(iommu, devid, table);
iommu_flush_dte(iommu, devid);
}
@@ -2709,35 +2831,38 @@ static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
void *data)
{
struct irq_remap_table *table = data;
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
- irq_lookup_table[alias] = table;
- set_dte_irq_entry(alias, table);
+ if (!iommu)
+ return -EINVAL;
- iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
+ pci_seg = iommu->pci_seg;
+ pci_seg->irq_lookup_table[alias] = table;
+ set_dte_irq_entry(iommu, alias, table);
+ iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
return 0;
}
-static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
+static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
+ u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
- struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
u16 alias;
spin_lock_irqsave(&iommu_table_lock, flags);
- iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- goto out_unlock;
-
- table = irq_lookup_table[devid];
+ pci_seg = iommu->pci_seg;
+ table = pci_seg->irq_lookup_table[devid];
if (table)
goto out_unlock;
- alias = amd_iommu_alias_table[devid];
- table = irq_lookup_table[alias];
+ alias = pci_seg->alias_table[devid];
+ table = pci_seg->irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out_wait;
@@ -2751,11 +2876,11 @@ static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
spin_lock_irqsave(&iommu_table_lock, flags);
- table = irq_lookup_table[devid];
+ table = pci_seg->irq_lookup_table[devid];
if (table)
goto out_unlock;
- table = irq_lookup_table[alias];
+ table = pci_seg->irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out_wait;
@@ -2786,18 +2911,14 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count, bool align,
- struct pci_dev *pdev)
+static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
+ bool align, struct pci_dev *pdev)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
unsigned long flags;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- return -ENODEV;
-
- table = alloc_irq_table(devid, pdev);
+ table = alloc_irq_table(iommu, devid, pdev);
if (!table)
return -ENODEV;
@@ -2836,20 +2957,15 @@ out:
return index;
}
-static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
- struct amd_ir_data *data)
+static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte, struct amd_ir_data *data)
{
bool ret;
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
struct irte_ga *entry;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return -EINVAL;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENOMEM;
@@ -2880,17 +2996,13 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
return 0;
}
-static int modify_irte(u16 devid, int index, union irte *irte)
+static int modify_irte(struct amd_iommu *iommu,
+ u16 devid, int index, union irte *irte)
{
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return -EINVAL;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENOMEM;
@@ -2904,17 +3016,12 @@ static int modify_irte(u16 devid, int index, union irte *irte)
return 0;
}
-static void free_irte(u16 devid, int index)
+static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
{
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return;
@@ -2956,49 +3063,49 @@ static void irte_ga_prepare(void *entry,
irte->lo.fields_remap.valid = 1;
}
-static void irte_activate(void *entry, u16 devid, u16 index)
+static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 1;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_activate(void *entry, u16 devid, u16 index)
+static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 1;
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
-static void irte_deactivate(void *entry, u16 devid, u16 index)
+static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 0;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
+static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 0;
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
-static void irte_set_affinity(void *entry, u16 devid, u16 index,
+static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
union irte *irte = (union irte *) entry;
irte->fields.vector = vector;
irte->fields.destination = dest_apicid;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
+static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
@@ -3009,7 +3116,7 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
APICID_TO_IRTE_DEST_LO(dest_apicid);
irte->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(dest_apicid);
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
}
@@ -3068,7 +3175,7 @@ static int get_devid(struct irq_alloc_info *info)
return get_hpet_devid(info->devid);
case X86_IRQ_ALLOC_TYPE_PCI_MSI:
case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
- return get_device_id(msi_desc_to_dev(info->desc));
+ return get_device_sbdf_id(msi_desc_to_dev(info->desc));
default:
WARN_ON_ONCE(1);
return -1;
@@ -3097,7 +3204,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
int devid, int index, int sub_handle)
{
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+ struct amd_iommu *iommu = data->iommu;
if (!iommu)
return;
@@ -3148,8 +3255,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
struct amd_ir_data *data = NULL;
+ struct amd_iommu *iommu;
struct irq_cfg *cfg;
- int i, ret, devid;
+ int i, ret, devid, seg, sbdf;
int index;
if (!info)
@@ -3165,8 +3273,14 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
- devid = get_devid(info);
- if (devid < 0)
+ sbdf = get_devid(info);
+ if (sbdf < 0)
+ return -EINVAL;
+
+ seg = PCI_SBDF_TO_SEGID(sbdf);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = __rlookup_amd_iommu(seg, devid);
+ if (!iommu)
return -EINVAL;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
@@ -3175,9 +3289,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
struct irq_remap_table *table;
- struct amd_iommu *iommu;
- table = alloc_irq_table(devid, NULL);
+ table = alloc_irq_table(iommu, devid, NULL);
if (table) {
if (!table->min_index) {
/*
@@ -3185,7 +3298,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
* interrupts.
*/
table->min_index = 32;
- iommu = amd_iommu_rlookup_table[devid];
for (i = 0; i < 32; ++i)
iommu->irte_ops->set_allocated(table, i);
}
@@ -3198,10 +3310,10 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
- index = alloc_irq_index(devid, nr_irqs, align,
+ index = alloc_irq_index(iommu, devid, nr_irqs, align,
msi_desc_to_pci_dev(info->desc));
} else {
- index = alloc_irq_index(devid, nr_irqs, false, NULL);
+ index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
}
if (index < 0) {
@@ -3233,6 +3345,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
goto out_free_data;
}
+ data->iommu = iommu;
irq_data->hwirq = (devid << 16) + i;
irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip;
@@ -3249,7 +3362,7 @@ out_free_data:
kfree(irq_data->chip_data);
}
for (i = 0; i < nr_irqs; i++)
- free_irte(devid, index + i);
+ free_irte(iommu, devid, index + i);
out_free_parent:
irq_domain_free_irqs_common(domain, virq, nr_irqs);
return ret;
@@ -3268,7 +3381,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
if (irq_data && irq_data->chip_data) {
data = irq_data->chip_data;
irte_info = &data->irq_2_irte;
- free_irte(irte_info->devid, irte_info->index);
+ free_irte(data->iommu, irte_info->devid, irte_info->index);
kfree(data->entry);
kfree(data);
}
@@ -3286,13 +3399,13 @@ static int irq_remapping_activate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = data->iommu;
struct irq_cfg *cfg = irqd_cfg(irq_data);
if (!iommu)
return 0;
- iommu->irte_ops->activate(data->entry, irte_info->devid,
+ iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
irte_info->index);
amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
return 0;
@@ -3303,10 +3416,10 @@ static void irq_remapping_deactivate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = data->iommu;
if (iommu)
- iommu->irte_ops->deactivate(data->entry, irte_info->devid,
+ iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
irte_info->index);
}
@@ -3326,8 +3439,8 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
if (devid < 0)
return 0;
+ iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
- iommu = amd_iommu_rlookup_table[devid];
return iommu && iommu->ir_domain == d;
}
@@ -3361,7 +3474,7 @@ int amd_iommu_activate_guest_mode(void *data)
entry->hi.fields.vector = ir_data->ga_vector;
entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
- return modify_irte_ga(ir_data->irq_2_irte.devid,
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
@@ -3391,7 +3504,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
entry->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
- return modify_irte_ga(ir_data->irq_2_irte.devid,
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
@@ -3399,12 +3512,16 @@ EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{
int ret;
- struct amd_iommu *iommu;
struct amd_iommu_pi_data *pi_data = vcpu_info;
struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
struct amd_ir_data *ir_data = data->chip_data;
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
- struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
+ struct iommu_dev_data *dev_data;
+
+ if (ir_data->iommu == NULL)
+ return -EINVAL;
+
+ dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
/* Note:
* This device has never been set up for guest mode.
@@ -3426,10 +3543,6 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
pi_data->is_guest_mode = false;
}
- iommu = amd_iommu_rlookup_table[irte_info->devid];
- if (iommu == NULL)
- return -EINVAL;
-
pi_data->prev_ga_tag = ir_data->cached_ga_tag;
if (pi_data->is_guest_mode) {
ir_data->ga_root_ptr = (pi_data->base >> 12);
@@ -3463,7 +3576,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
* Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache.
*/
- iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
+ iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
irte_info->index, cfg->vector,
cfg->dest_apicid);
}
@@ -3475,7 +3588,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct irq_cfg *cfg = irqd_cfg(data);
struct irq_data *parent = data->parent_data;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = ir_data->iommu;
int ret;
if (!iommu)
@@ -3545,11 +3658,11 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
!ref || !entry || !entry->lo.fields_vapic.guest_mode)
return 0;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = ir_data->iommu;
if (!iommu)
return -ENODEV;
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENODEV;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index afb3efd565b7..696d5555be57 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -51,7 +51,7 @@ struct pasid_state {
struct device_state {
struct list_head list;
- u16 devid;
+ u32 sbdf;
atomic_t count;
struct pci_dev *pdev;
struct pasid_state **states;
@@ -83,35 +83,25 @@ static struct workqueue_struct *iommu_wq;
static void free_pasid_states(struct device_state *dev_state);
-static u16 device_id(struct pci_dev *pdev)
-{
- u16 devid;
-
- devid = pdev->bus->number;
- devid = (devid << 8) | pdev->devfn;
-
- return devid;
-}
-
-static struct device_state *__get_device_state(u16 devid)
+static struct device_state *__get_device_state(u32 sbdf)
{
struct device_state *dev_state;
list_for_each_entry(dev_state, &state_list, list) {
- if (dev_state->devid == devid)
+ if (dev_state->sbdf == sbdf)
return dev_state;
}
return NULL;
}
-static struct device_state *get_device_state(u16 devid)
+static struct device_state *get_device_state(u32 sbdf)
{
struct device_state *dev_state;
unsigned long flags;
spin_lock_irqsave(&state_lock, flags);
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state != NULL)
atomic_inc(&dev_state->count);
spin_unlock_irqrestore(&state_lock, flags);
@@ -528,15 +518,16 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
unsigned long flags;
struct fault *fault;
bool finish;
- u16 tag, devid;
+ u16 tag, devid, seg_id;
int ret;
iommu_fault = data;
tag = iommu_fault->tag & 0x1ff;
finish = (iommu_fault->tag >> 9) & 1;
- devid = iommu_fault->device_id;
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ seg_id = PCI_SBDF_TO_SEGID(iommu_fault->sbdf);
+ devid = PCI_SBDF_TO_DEVID(iommu_fault->sbdf);
+ pdev = pci_get_domain_bus_and_slot(seg_id, PCI_BUS_NUM(devid),
devid & 0xff);
if (!pdev)
return -ENODEV;
@@ -550,7 +541,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
goto out;
}
- dev_state = get_device_state(iommu_fault->device_id);
+ dev_state = get_device_state(iommu_fault->sbdf);
if (dev_state == NULL)
goto out;
@@ -609,7 +600,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
struct pasid_state *pasid_state;
struct device_state *dev_state;
struct mm_struct *mm;
- u16 devid;
+ u32 sbdf;
int ret;
might_sleep();
@@ -617,8 +608,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
- dev_state = get_device_state(devid);
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
if (dev_state == NULL)
return -EINVAL;
@@ -692,15 +683,15 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
- u16 devid;
+ u32 sbdf;
might_sleep();
if (!amd_iommu_v2_supported())
return;
- devid = device_id(pdev);
- dev_state = get_device_state(devid);
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
if (dev_state == NULL)
return;
@@ -742,7 +733,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
struct iommu_group *group;
unsigned long flags;
int ret, tmp;
- u16 devid;
+ u32 sbdf;
might_sleep();
@@ -759,7 +750,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (pasids <= 0 || pasids > (PASID_MASK + 1))
return -EINVAL;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
if (dev_state == NULL)
@@ -768,7 +759,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_init(&dev_state->lock);
init_waitqueue_head(&dev_state->wq);
dev_state->pdev = pdev;
- dev_state->devid = devid;
+ dev_state->sbdf = sbdf;
tmp = pasids;
for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
@@ -806,7 +797,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_irqsave(&state_lock, flags);
- if (__get_device_state(devid) != NULL) {
+ if (__get_device_state(sbdf) != NULL) {
spin_unlock_irqrestore(&state_lock, flags);
ret = -EBUSY;
goto out_free_domain;
@@ -838,16 +829,16 @@ void amd_iommu_free_device(struct pci_dev *pdev)
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
if (!amd_iommu_v2_supported())
return;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL) {
spin_unlock_irqrestore(&state_lock, flags);
return;
@@ -867,18 +858,18 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL)
goto out_unlock;
@@ -898,18 +889,18 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL)
goto out_unlock;
diff --git a/drivers/iommu/amd/quirks.c b/drivers/iommu/amd/quirks.c
index 5120ce4fdce3..79dbb8f33b47 100644
--- a/drivers/iommu/amd/quirks.c
+++ b/drivers/iommu/amd/quirks.c
@@ -15,7 +15,7 @@
struct ivrs_quirk_entry {
u8 id;
- u16 devid;
+ u32 devid;
};
enum {
@@ -49,7 +49,7 @@ static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
const struct ivrs_quirk_entry *i;
for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
- add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
+ add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u32 *)&i->devid, 0);
return 0;
}
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 8af0242a90d9..1b1725759262 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -564,9 +564,6 @@ static void apple_dart_release_device(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
- if (!cfg)
- return;
-
dev_iommu_priv_set(dev, NULL);
kfree(cfg);
}
@@ -771,7 +768,6 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.of_xlate = apple_dart_of_xlate,
.def_domain_type = apple_dart_def_domain_type,
.get_resv_regions = apple_dart_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 88817a3376ef..d32b02336411 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1380,12 +1380,21 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
-static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent)
+static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
{
unsigned int i;
+ u64 val = STRTAB_STE_0_V;
+
+ if (disable_bypass && !force)
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
+ else
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- arm_smmu_write_strtab_ent(NULL, -1, strtab);
+ strtab[0] = cpu_to_le64(val);
+ strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+ strtab[2] = 0;
strtab += STRTAB_STE_DWORDS;
}
}
@@ -1413,7 +1422,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
+ arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -2537,6 +2546,19 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
return sid < limit;
}
+static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
+{
+ /* Check the SIDs are in range of the SMMU and our stream table */
+ if (!arm_smmu_sid_in_range(smmu, sid))
+ return -ERANGE;
+
+ /* Ensure l2 strtab is initialised */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
+ return arm_smmu_init_l2_strtab(smmu, sid);
+
+ return 0;
+}
+
static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
struct arm_smmu_master *master)
{
@@ -2560,20 +2582,9 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
new_stream->id = sid;
new_stream->master = master;
- /*
- * Check the SIDs are in range of the SMMU and our stream table
- */
- if (!arm_smmu_sid_in_range(smmu, sid)) {
- ret = -ERANGE;
+ ret = arm_smmu_init_sid_strtab(smmu, sid);
+ if (ret)
break;
- }
-
- /* Ensure l2 strtab is initialised */
- if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- ret = arm_smmu_init_l2_strtab(smmu, sid);
- if (ret)
- break;
- }
/* Insert into SID tree */
new_node = &(smmu->streams.rb_node);
@@ -2691,20 +2702,14 @@ err_free_master:
static void arm_smmu_release_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master *master;
-
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- master = dev_iommu_priv_get(dev);
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
kfree(master);
- iommu_fwspec_free(dev);
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
@@ -2760,58 +2765,27 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static bool arm_smmu_dev_has_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return false;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return arm_smmu_master_iopf_supported(master);
- case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_sva_supported(master);
- default:
- return false;
- }
-}
-
-static bool arm_smmu_dev_feature_enabled(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return false;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return master->iopf_enabled;
- case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_sva_enabled(master);
- default:
- return false;
- }
-}
-
static int arm_smmu_dev_enable_feature(struct device *dev,
enum iommu_dev_features feat)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (!arm_smmu_dev_has_feature(dev, feat))
+ if (!master)
return -ENODEV;
- if (arm_smmu_dev_feature_enabled(dev, feat))
- return -EBUSY;
-
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+ if (!arm_smmu_master_iopf_supported(master))
+ return -EINVAL;
+ if (master->iopf_enabled)
+ return -EBUSY;
master->iopf_enabled = true;
return 0;
case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_supported(master))
+ return -EINVAL;
+ if (arm_smmu_master_sva_enabled(master))
+ return -EBUSY;
return arm_smmu_master_enable_sva(master);
default:
return -EINVAL;
@@ -2823,16 +2797,20 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (!arm_smmu_dev_feature_enabled(dev, feat))
+ if (!master)
return -EINVAL;
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+ if (!master->iopf_enabled)
+ return -EINVAL;
if (master->sva_enabled)
return -EBUSY;
master->iopf_enabled = false;
return 0;
case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_enabled(master))
+ return -EINVAL;
return arm_smmu_master_disable_sva(master);
default:
return -EINVAL;
@@ -2847,9 +2825,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
- .dev_has_feat = arm_smmu_dev_has_feature,
- .dev_feat_enabled = arm_smmu_dev_feature_enabled,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
.sva_bind = arm_smmu_sva_bind,
@@ -3049,7 +3024,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
+ arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
return 0;
}
@@ -3743,6 +3718,36 @@ static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
return devm_ioremap_resource(dev, &res);
}
+static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ __le64 *step;
+ struct iommu_iort_rmr_data *rmr;
+ int ret, i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]);
+ if (ret) {
+ dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
+ rmr->sids[i]);
+ continue;
+ }
+
+ step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
+ arm_smmu_init_bypass_stes(step, 1, true);
+ }
+ }
+
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -3826,6 +3831,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Record our private device structure */
platform_set_drvdata(pdev, smmu);
+ /* Check for RMRs and install bypass STEs if any */
+ arm_smmu_rmr_install_bypass_ste(smmu);
+
/* Reset the device */
ret = arm_smmu_device_reset(smmu, bypass);
if (ret)
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
index b0cc01aa20c9..2a5a95e8e3f9 100644
--- a/drivers/iommu/arm/arm-smmu/Makefile
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM_DEBUG) += arm-smmu-qcom-debug.o
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
new file mode 100644
index 000000000000..6eed8e67a0ca
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/of_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/ratelimit.h>
+
+#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
+
+enum qcom_smmu_impl_reg_offset {
+ QCOM_SMMU_TBU_PWR_STATUS,
+ QCOM_SMMU_STATS_SYNC_INV_TBU_ACK,
+ QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR,
+};
+
+struct qcom_smmu_config {
+ const u32 *reg_offset;
+};
+
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
+{
+ int ret;
+ u32 tbu_pwr_status, sync_inv_ack, sync_inv_progress;
+ struct qcom_smmu *qsmmu = container_of(smmu, struct qcom_smmu, smmu);
+ const struct qcom_smmu_config *cfg;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (__ratelimit(&rs)) {
+ dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
+
+ cfg = qsmmu->cfg;
+ if (!cfg)
+ return;
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_TBU_PWR_STATUS],
+ &tbu_pwr_status);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU power status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK],
+ &sync_inv_ack);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU sync/inv ack status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR],
+ &sync_inv_progress);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TCU syn/inv progress: %d\n", ret);
+
+ dev_err(smmu->dev,
+ "TBU: power_status %#x sync_inv_ack %#x sync_inv_progress %#x\n",
+ tbu_pwr_status, sync_inv_ack, sync_inv_progress);
+ }
+}
+
+/* Implementation Defined Register Space 0 register offsets */
+static const u32 qcom_smmu_impl0_reg_offset[] = {
+ [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
+ [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
+ [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
+};
+
+static const struct qcom_smmu_config qcm2290_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc7180_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc7280_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc8180x_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc8280xp_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm6125_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm6350_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8150_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8250_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8350_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8450_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct of_device_id __maybe_unused qcom_smmu_impl_debug_match[] = {
+ { .compatible = "qcom,msm8998-smmu-v2" },
+ { .compatible = "qcom,qcm2290-smmu-500", .data = &qcm2290_smmu_cfg },
+ { .compatible = "qcom,sc7180-smmu-500", .data = &sc7180_smmu_cfg },
+ { .compatible = "qcom,sc7280-smmu-500", .data = &sc7280_smmu_cfg},
+ { .compatible = "qcom,sc8180x-smmu-500", .data = &sc8180x_smmu_cfg },
+ { .compatible = "qcom,sc8280xp-smmu-500", .data = &sc8280xp_smmu_cfg },
+ { .compatible = "qcom,sdm630-smmu-v2" },
+ { .compatible = "qcom,sdm845-smmu-500" },
+ { .compatible = "qcom,sm6125-smmu-500", .data = &sm6125_smmu_cfg},
+ { .compatible = "qcom,sm6350-smmu-500", .data = &sm6350_smmu_cfg},
+ { .compatible = "qcom,sm8150-smmu-500", .data = &sm8150_smmu_cfg },
+ { .compatible = "qcom,sm8250-smmu-500", .data = &sm8250_smmu_cfg },
+ { .compatible = "qcom,sm8350-smmu-500", .data = &sm8350_smmu_cfg },
+ { .compatible = "qcom,sm8450-smmu-500", .data = &sm8450_smmu_cfg },
+ { }
+};
+
+const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
+{
+ const struct of_device_id *match;
+ const struct device_node *np = smmu->dev->of_node;
+
+ match = of_match_node(qcom_smmu_impl_debug_match, np);
+ if (!match)
+ return NULL;
+
+ return match->data;
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 7820711c4560..b2708de25ea3 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -5,23 +5,40 @@
#include <linux/acpi.h>
#include <linux/adreno-smmu-priv.h>
+#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/qcom_scm.h>
#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
-struct qcom_smmu {
- struct arm_smmu_device smmu;
- bool bypass_quirk;
- u8 bypass_cbndx;
- u32 stall_enabled;
-};
+#define QCOM_DUMMY_VAL -1
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
}
+static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
+{
+ unsigned int spin_cnt, delay;
+ u32 reg;
+
+ arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ reg = arm_smmu_readl(smmu, page, status);
+ if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+ return;
+ cpu_relax();
+ }
+ udelay(delay);
+ }
+
+ qcom_smmu_tlb_sync_debug(smmu);
+}
+
static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
u32 reg)
{
@@ -233,6 +250,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mdss" },
{ .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sm8250-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
{ }
@@ -374,6 +392,7 @@ static const struct arm_smmu_impl qcom_smmu_impl = {
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
+ .tlb_sync = qcom_smmu_tlb_sync,
};
static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
@@ -382,6 +401,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
.reset = qcom_smmu500_reset,
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
+ .tlb_sync = qcom_smmu_tlb_sync,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
@@ -398,6 +418,7 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
return ERR_PTR(-ENOMEM);
qsmmu->smmu.impl = impl;
+ qsmmu->cfg = qcom_smmu_impl_data(smmu);
return &qsmmu->smmu;
}
@@ -413,6 +434,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm6125-smmu-500" },
{ .compatible = "qcom,sm6350-smmu-500" },
+ { .compatible = "qcom,sm6375-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
{ .compatible = "qcom,sm8350-smmu-500" },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
new file mode 100644
index 000000000000..99ec8f8629a0
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ARM_SMMU_QCOM_H
+#define _ARM_SMMU_QCOM_H
+
+struct qcom_smmu {
+ struct arm_smmu_device smmu;
+ const struct qcom_smmu_config *cfg;
+ bool bypass_quirk;
+ u8 bypass_cbndx;
+ u32 stall_enabled;
+};
+
+#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
+const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu);
+#else
+static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
+static inline const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _ARM_SMMU_QCOM_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 2ed3594f384e..dfa82df00342 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1432,27 +1432,19 @@ out_free:
static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_cfg *cfg;
- struct arm_smmu_device *smmu;
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
int ret;
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return;
-
- cfg = dev_iommu_priv_get(dev);
- smmu = cfg->smmu;
-
- ret = arm_smmu_rpm_get(smmu);
+ ret = arm_smmu_rpm_get(cfg->smmu);
if (ret < 0)
return;
arm_smmu_master_free_smes(cfg, fwspec);
- arm_smmu_rpm_put(smmu);
+ arm_smmu_rpm_put(cfg->smmu);
dev_iommu_priv_set(dev, NULL);
kfree(cfg);
- iommu_fwspec_free(dev);
}
static void arm_smmu_probe_finalize(struct device *dev)
@@ -1592,7 +1584,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
@@ -2071,10 +2062,57 @@ err_reset_platform_ops: __maybe_unused;
return err;
}
+static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+ int idx, cnt = 0;
+ u32 reg;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ /*
+ * Rather than trying to look at existing mappings that
+ * are setup by the firmware and then invalidate the ones
+ * that do no have matching RMR entries, just disable the
+ * SMMU until it gets enabled again in the reset routine.
+ */
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
+ reg |= ARM_SMMU_sCR0_CLIENTPD;
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ struct iommu_iort_rmr_data *rmr;
+ int i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
+ if (idx < 0)
+ continue;
+
+ if (smmu->s2crs[idx].count == 0) {
+ smmu->smrs[idx].id = rmr->sids[i];
+ smmu->smrs[idx].mask = 0;
+ smmu->smrs[idx].valid = true;
+ }
+ smmu->s2crs[idx].count++;
+ smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
+ smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
+
+ cnt++;
+ }
+ }
+
+ dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
+ cnt == 1 ? "" : "s");
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
struct resource *res;
- resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
@@ -2098,7 +2136,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
- ioaddr = res->start;
+ smmu->ioaddr = res->start;
+
/*
* The resource size should effectively match the value of SMMU_TOP;
* stash that temporarily until we know PAGESIZE to validate it with.
@@ -2178,7 +2217,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
- "smmu.%pa", &ioaddr);
+ "smmu.%pa", &smmu->ioaddr);
if (err) {
dev_err(dev, "Failed to register iommu in sysfs\n");
return err;
@@ -2191,6 +2230,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, smmu);
+
+ /* Check for RMRs and install bypass SMRs if any */
+ arm_smmu_rmr_install_bypass_smr(smmu);
+
arm_smmu_device_reset(smmu);
arm_smmu_test_smr_masks(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 2b9b42fb6f30..703fd5817ec1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -278,6 +278,7 @@ struct arm_smmu_device {
struct device *dev;
void __iomem *base;
+ phys_addr_t ioaddr;
unsigned int numpage;
unsigned int pgshift;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 4c077c38fbd6..17235116d3bb 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -532,16 +532,6 @@ static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
return &qcom_iommu->iommu;
}
-static void qcom_iommu_release_device(struct device *dev)
-{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
-
- if (!qcom_iommu)
- return;
-
- iommu_fwspec_free(dev);
-}
-
static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
struct qcom_iommu_dev *qcom_iommu;
@@ -591,7 +581,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.capable = qcom_iommu_capable,
.domain_alloc = qcom_iommu_domain_alloc,
.probe_device = qcom_iommu_probe_device,
- .release_device = qcom_iommu_release_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
@@ -750,9 +739,12 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
{
struct device_node *child;
- for_each_child_of_node(qcom_iommu->dev->of_node, child)
- if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
+ for_each_child_of_node(qcom_iommu->dev->of_node, child) {
+ if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
+ of_node_put(child);
return true;
+ }
+ }
return false;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f90251572a5d..17dd683b2fce 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,6 +21,7 @@
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
+#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -64,6 +65,7 @@ struct iommu_dma_cookie {
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
+ struct mutex mutex;
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
@@ -310,6 +312,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
if (!domain->iova_cookie)
return -ENOMEM;
+ mutex_init(&domain->iova_cookie->mutex);
return 0;
}
@@ -385,7 +388,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
- iort_iommu_msi_get_resv_regions(dev, list);
+ iort_iommu_get_resv_regions(dev, list);
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
@@ -560,26 +563,33 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
/* start_pfn is always nonzero for an already-initialised domain */
+ mutex_lock(&cookie->mutex);
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
- return -EFAULT;
+ ret = -EFAULT;
+ goto done_unlock;
}
- return 0;
+ ret = 0;
+ goto done_unlock;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad);
if (ret)
- return ret;
+ goto done_unlock;
/* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
domain->type = IOMMU_DOMAIN_DMA;
- return iova_reserve_iommu_regions(dev, domain);
+ ret = iova_reserve_iommu_regions(dev, domain);
+
+done_unlock:
+ mutex_unlock(&cookie->mutex);
+ return ret;
}
/**
@@ -1053,15 +1063,30 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
+ dma_addr_t s_dma_addr = sg_dma_address(s);
unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
- s->offset += s_iova_off;
- s->length = s_length;
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
+ if (sg_is_dma_bus_address(s)) {
+ if (i > 0)
+ cur = sg_next(cur);
+
+ sg_dma_unmark_bus_address(s);
+ sg_dma_address(cur) = s_dma_addr;
+ sg_dma_len(cur) = s_length;
+ sg_dma_mark_bus_address(cur);
+ count++;
+ cur_len = 0;
+ continue;
+ }
+
+ s->offset += s_iova_off;
+ s->length = s_length;
+
/*
* Now fill in the real DMA data. If...
* - there is a valid output segment to append to
@@ -1102,10 +1127,14 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_MAPPING_ERROR)
- s->offset += sg_dma_address(s);
- if (sg_dma_len(s))
- s->length = sg_dma_len(s);
+ if (sg_is_dma_bus_address(s)) {
+ sg_dma_unmark_bus_address(s);
+ } else {
+ if (sg_dma_address(s) != DMA_MAPPING_ERROR)
+ s->offset += sg_dma_address(s);
+ if (sg_dma_len(s))
+ s->length = sg_dma_len(s);
+ }
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
@@ -1158,6 +1187,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ enum pci_p2pdma_map_type map;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
@@ -1187,6 +1218,30 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
+ if (is_pci_p2pdma_page(sg_page(s))) {
+ map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
+ switch (map) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * iommu_map_sg() will skip this segment as
+ * it is marked as a bus address,
+ * __finalise_sg() will copy the dma address
+ * into the output segment.
+ */
+ continue;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * Mapping through host bridge should be
+ * mapped with regular IOVAs, thus we
+ * do nothing here and continue below.
+ */
+ break;
+ default:
+ ret = -EREMOTEIO;
+ goto out_restore_sg;
+ }
+ }
+
sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length;
s->offset -= s_iova_off;
@@ -1215,6 +1270,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
+ if (!iova_len)
+ return __finalise_sg(dev, sg, nents, 0);
+
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova) {
ret = -ENOMEM;
@@ -1236,7 +1294,7 @@ out_free_iova:
out_restore_sg:
__invalidate_sg(sg, nents);
out:
- if (ret != -ENOMEM)
+ if (ret != -ENOMEM && ret != -EREMOTEIO)
return -EINVAL;
return ret;
}
@@ -1244,7 +1302,7 @@ out:
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t start, end;
+ dma_addr_t end = 0, start;
struct scatterlist *tmp;
int i;
@@ -1258,16 +1316,37 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
/*
* The scatterlist segments are mapped into a single
- * contiguous IOVA allocation, so this is incredibly easy.
+ * contiguous IOVA allocation, the start and end points
+ * just have to be determined.
*/
- start = sg_dma_address(sg);
- for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+ for_each_sg(sg, tmp, nents, i) {
+ if (sg_is_dma_bus_address(tmp)) {
+ sg_dma_unmark_bus_address(tmp);
+ continue;
+ }
+
+ if (sg_dma_len(tmp) == 0)
+ break;
+
+ start = sg_dma_address(tmp);
+ break;
+ }
+
+ nents -= i;
+ for_each_sg(tmp, tmp, nents, i) {
+ if (sg_is_dma_bus_address(tmp)) {
+ sg_dma_unmark_bus_address(tmp);
+ continue;
+ }
+
if (sg_dma_len(tmp) == 0)
break;
- sg = tmp;
+
+ end = sg_dma_address(tmp) + sg_dma_len(tmp);
}
- end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(dev, start, end - start);
+
+ if (end)
+ __iommu_dma_unmap(dev, start, end - start);
}
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1459,7 +1538,13 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
}
+static size_t iommu_dma_opt_mapping_size(void)
+{
+ return iova_rcache_range();
+}
+
static const struct dma_map_ops iommu_dma_ops = {
+ .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
@@ -1479,6 +1564,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
+ .opt_mapping_size = iommu_dma_opt_mapping_size,
};
/*
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 71f2018e23fe..8e18984a0c4f 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -135,6 +135,11 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
+#define CTRL_VM_ENABLE BIT(0)
+#define CTRL_VM_FAULT_MODE_STALL BIT(3)
+#define CAPA0_CAPA1_EXIST BIT(11)
+#define CAPA1_VCR_ENABLED BIT(14)
+
/* common registers */
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
@@ -148,29 +153,20 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
/* v1.x - v3.x registers */
-#define REG_MMU_FLUSH 0x00C
-#define REG_MMU_FLUSH_ENTRY 0x010
-#define REG_PT_BASE_ADDR 0x014
-#define REG_INT_STATUS 0x018
-#define REG_INT_CLEAR 0x01C
-
#define REG_PAGE_FAULT_ADDR 0x024
#define REG_AW_FAULT_ADDR 0x028
#define REG_AR_FAULT_ADDR 0x02C
#define REG_DEFAULT_SLAVE_ADDR 0x030
/* v5.x registers */
-#define REG_V5_PT_BASE_PFN 0x00C
-#define REG_V5_MMU_FLUSH_ALL 0x010
-#define REG_V5_MMU_FLUSH_ENTRY 0x014
-#define REG_V5_MMU_FLUSH_RANGE 0x018
-#define REG_V5_MMU_FLUSH_START 0x020
-#define REG_V5_MMU_FLUSH_END 0x024
-#define REG_V5_INT_STATUS 0x060
-#define REG_V5_INT_CLEAR 0x064
#define REG_V5_FAULT_AR_VA 0x070
#define REG_V5_FAULT_AW_VA 0x080
+/* v7.x registers */
+#define REG_V7_CAPA0 0x870
+#define REG_V7_CAPA1 0x874
+#define REG_V7_CTRL_VM 0x8000
+
#define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
static struct device *dma_dev;
@@ -251,6 +247,21 @@ struct exynos_iommu_domain {
};
/*
+ * SysMMU version specific data. Contains offsets for the registers which can
+ * be found in different SysMMU variants, but have different offset values.
+ */
+struct sysmmu_variant {
+ u32 pt_base; /* page table base address (physical) */
+ u32 flush_all; /* invalidate all TLB entries */
+ u32 flush_entry; /* invalidate specific TLB entry */
+ u32 flush_range; /* invalidate TLB entries in specified range */
+ u32 flush_start; /* start address of range invalidation */
+ u32 flush_end; /* end address of range invalidation */
+ u32 int_status; /* interrupt status information */
+ u32 int_clear; /* clear the interrupt */
+};
+
+/*
* This structure hold all data of a single SYSMMU controller, this includes
* hw resources like registers and clocks, pointers and list nodes to connect
* it to all other structures, internal state and parameters read from device
@@ -274,6 +285,45 @@ struct sysmmu_drvdata {
unsigned int version; /* our version */
struct iommu_device iommu; /* IOMMU core handle */
+ const struct sysmmu_variant *variant; /* version specific data */
+
+ /* v7 fields */
+ bool has_vcr; /* virtual machine control register */
+};
+
+#define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
+
+/* SysMMU v1..v3 */
+static const struct sysmmu_variant sysmmu_v1_variant = {
+ .flush_all = 0x0c,
+ .flush_entry = 0x10,
+ .pt_base = 0x14,
+ .int_status = 0x18,
+ .int_clear = 0x1c,
+};
+
+/* SysMMU v5 and v7 (non-VM capable) */
+static const struct sysmmu_variant sysmmu_v5_variant = {
+ .pt_base = 0x0c,
+ .flush_all = 0x10,
+ .flush_entry = 0x14,
+ .flush_range = 0x18,
+ .flush_start = 0x20,
+ .flush_end = 0x24,
+ .int_status = 0x60,
+ .int_clear = 0x64,
+};
+
+/* SysMMU v7: VM capable register set */
+static const struct sysmmu_variant sysmmu_v7_vm_variant = {
+ .pt_base = 0x800c,
+ .flush_all = 0x8010,
+ .flush_entry = 0x8014,
+ .flush_range = 0x8018,
+ .flush_start = 0x8020,
+ .flush_end = 0x8024,
+ .int_status = 0x60,
+ .int_clear = 0x64,
};
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -304,10 +354,7 @@ static bool sysmmu_block(struct sysmmu_drvdata *data)
static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
{
- if (MMU_MAJ_VER(data->version) < 5)
- writel(0x1, data->sfrbase + REG_MMU_FLUSH);
- else
- writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
+ writel(0x1, SYSMMU_REG(data, flush_all));
}
static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -315,34 +362,30 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
{
unsigned int i;
- if (MMU_MAJ_VER(data->version) < 5) {
+ if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) {
for (i = 0; i < num_inv; i++) {
writel((iova & SPAGE_MASK) | 1,
- data->sfrbase + REG_MMU_FLUSH_ENTRY);
+ SYSMMU_REG(data, flush_entry));
iova += SPAGE_SIZE;
}
} else {
- if (num_inv == 1) {
- writel((iova & SPAGE_MASK) | 1,
- data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
- } else {
- writel((iova & SPAGE_MASK),
- data->sfrbase + REG_V5_MMU_FLUSH_START);
- writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
- data->sfrbase + REG_V5_MMU_FLUSH_END);
- writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
- }
+ writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start));
+ writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
+ SYSMMU_REG(data, flush_end));
+ writel(0x1, SYSMMU_REG(data, flush_range));
}
}
static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
{
+ u32 pt_base;
+
if (MMU_MAJ_VER(data->version) < 5)
- writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
+ pt_base = pgd;
else
- writel(pgd >> PAGE_SHIFT,
- data->sfrbase + REG_V5_PT_BASE_PFN);
+ pt_base = pgd >> SPAGE_ORDER;
+ writel(pt_base, SYSMMU_REG(data, pt_base));
__sysmmu_tlb_invalidate(data);
}
@@ -362,6 +405,20 @@ static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
clk_disable_unprepare(data->clk_master);
}
+static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data)
+{
+ u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0);
+
+ return capa0 & CAPA0_CAPA1_EXIST;
+}
+
+static void __sysmmu_get_vcr(struct sysmmu_drvdata *data)
+{
+ u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1);
+
+ data->has_vcr = capa1 & CAPA1_VCR_ENABLED;
+}
+
static void __sysmmu_get_version(struct sysmmu_drvdata *data)
{
u32 ver;
@@ -379,6 +436,19 @@ static void __sysmmu_get_version(struct sysmmu_drvdata *data)
dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
+ if (MMU_MAJ_VER(data->version) < 5) {
+ data->variant = &sysmmu_v1_variant;
+ } else if (MMU_MAJ_VER(data->version) < 7) {
+ data->variant = &sysmmu_v5_variant;
+ } else {
+ if (__sysmmu_has_capa1(data))
+ __sysmmu_get_vcr(data);
+ if (data->has_vcr)
+ data->variant = &sysmmu_v7_vm_variant;
+ else
+ data->variant = &sysmmu_v5_variant;
+ }
+
__sysmmu_disable_clocks(data);
}
@@ -406,19 +476,14 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
const struct sysmmu_fault_info *finfo;
unsigned int i, n, itype;
sysmmu_iova_t fault_addr;
- unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
WARN_ON(!data->active);
if (MMU_MAJ_VER(data->version) < 5) {
- reg_status = REG_INT_STATUS;
- reg_clear = REG_INT_CLEAR;
finfo = sysmmu_faults;
n = ARRAY_SIZE(sysmmu_faults);
} else {
- reg_status = REG_V5_INT_STATUS;
- reg_clear = REG_V5_INT_CLEAR;
finfo = sysmmu_v5_faults;
n = ARRAY_SIZE(sysmmu_v5_faults);
}
@@ -427,7 +492,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
clk_enable(data->clk_master);
- itype = __ffs(readl(data->sfrbase + reg_status));
+ itype = __ffs(readl(SYSMMU_REG(data, int_status)));
for (i = 0; i < n; i++, finfo++)
if (finfo->bit == itype)
break;
@@ -444,7 +509,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
/* fault is not recovered by fault handler */
BUG_ON(ret != 0);
- writel(1 << itype, data->sfrbase + reg_clear);
+ writel(1 << itype, SYSMMU_REG(data, int_clear));
sysmmu_unblock(data);
@@ -486,6 +551,18 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
writel(cfg, data->sfrbase + REG_MMU_CFG);
}
+static void __sysmmu_enable_vid(struct sysmmu_drvdata *data)
+{
+ u32 ctrl;
+
+ if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr)
+ return;
+
+ ctrl = readl(data->sfrbase + REG_V7_CTRL_VM);
+ ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL;
+ writel(ctrl, data->sfrbase + REG_V7_CTRL_VM);
+}
+
static void __sysmmu_enable(struct sysmmu_drvdata *data)
{
unsigned long flags;
@@ -496,6 +573,7 @@ static void __sysmmu_enable(struct sysmmu_drvdata *data)
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
__sysmmu_init_config(data);
__sysmmu_set_ptbase(data, data->pgtable);
+ __sysmmu_enable_vid(data);
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
data->active = true;
spin_unlock_irqrestore(&data->lock, flags);
@@ -551,7 +629,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
* 64KB page can be one of 16 consecutive sets.
*/
if (MMU_MAJ_VER(data->version) == 2)
- num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
+ num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
if (sysmmu_block(data)) {
__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
@@ -623,6 +701,8 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
data->sysmmu = dev;
spin_lock_init(&data->lock);
+ __sysmmu_get_version(data);
+
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
dev_name(data->sysmmu));
if (ret)
@@ -630,11 +710,10 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
if (ret)
- return ret;
+ goto err_iommu_register;
platform_set_drvdata(pdev, data);
- __sysmmu_get_version(data);
if (PG_ENT_SHIFT < 0) {
if (MMU_MAJ_VER(data->version) < 5) {
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
@@ -647,6 +726,14 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
}
}
+ if (MMU_MAJ_VER(data->version) >= 5) {
+ ret = dma_set_mask(dev, DMA_BIT_MASK(36));
+ if (ret) {
+ dev_err(dev, "Unable to set DMA mask: %d\n", ret);
+ goto err_dma_set_mask;
+ }
+ }
+
/*
* use the first registered sysmmu device for performing
* dma mapping operations on iommu page tables (cpu cache flush)
@@ -657,6 +744,12 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
return 0;
+
+err_dma_set_mask:
+ iommu_device_unregister(&data->iommu);
+err_iommu_register:
+ iommu_device_sysfs_remove(&data->iommu);
+ return ret;
}
static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
@@ -1251,9 +1344,6 @@ static void exynos_iommu_release_device(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
- if (!has_sysmmu(dev))
- return;
-
if (owner->domain) {
struct iommu_group *group = iommu_group_get(dev);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 94b4589dc67c..011f9ab7f743 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -447,15 +447,10 @@ static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
return &pamu_iommu;
}
-static void fsl_pamu_release_device(struct device *dev)
-{
-}
-
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
.probe_device = fsl_pamu_probe_device,
- .release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = fsl_pamu_attach_device,
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index e285a220c913..e190bb8c225c 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -68,7 +68,6 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
{
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
- struct irq_desc *desc;
int ret = 0;
if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
@@ -90,8 +89,7 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
* Hypver-V IO APIC irq affinity should be in the scope of
* ioapic_max_cpumask because no irq remapping support.
*/
- desc = irq_data_to_desc(irq_data);
- cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);
+ irq_data_update_affinity(irq_data, &ioapic_max_cpumask);
return 0;
}
@@ -194,7 +192,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
u32 vector;
struct irq_cfg *cfg;
int ioapic_id;
- struct cpumask *affinity;
+ const struct cpumask *affinity;
int cpu;
struct hv_interrupt_entry entry;
struct hyperv_root_ir_data *data = irq_data->chip_data;
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
index 71596fc62822..3ee68393122f 100644
--- a/drivers/iommu/intel/cap_audit.c
+++ b/drivers/iommu/intel/cap_audit.c
@@ -10,7 +10,7 @@
#define pr_fmt(fmt) "DMAR: " fmt
-#include <linux/intel-iommu.h>
+#include "iommu.h"
#include "cap_audit.h"
static u64 intel_iommu_cap_sanity;
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index ed796eea4581..1f925285104e 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -10,11 +10,11 @@
#include <linux/debugfs.h>
#include <linux/dmar.h>
-#include <linux/intel-iommu.h>
#include <linux/pci.h>
#include <asm/irq_remapping.h>
+#include "iommu.h"
#include "pasid.h"
#include "perf.h"
@@ -263,10 +263,9 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
{
- unsigned long flags;
u16 bus;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
(u64)virt_to_phys(iommu->root_entry));
seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
@@ -278,8 +277,7 @@ static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
*/
for (bus = 0; bus < 256; bus++)
ctx_tbl_walk(m, iommu, bus);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
}
static int dmar_translation_struct_show(struct seq_file *m, void *unused)
@@ -342,13 +340,13 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
}
}
-static int show_device_domain_translation(struct device *dev, void *data)
+static int __show_device_domain_translation(struct device *dev, void *data)
{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct dmar_domain *domain = info->domain;
+ struct dmar_domain *domain;
struct seq_file *m = data;
u64 path[6] = { 0 };
+ domain = to_dmar_domain(iommu_get_domain_for_dev(dev));
if (!domain)
return 0;
@@ -359,20 +357,39 @@ static int show_device_domain_translation(struct device *dev, void *data)
pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
seq_putc(m, '\n');
- return 0;
+ /* Don't iterate */
+ return 1;
}
-static int domain_translation_struct_show(struct seq_file *m, void *unused)
+static int show_device_domain_translation(struct device *dev, void *data)
{
- unsigned long flags;
- int ret;
+ struct iommu_group *group;
- spin_lock_irqsave(&device_domain_lock, flags);
- ret = bus_for_each_dev(&pci_bus_type, NULL, m,
- show_device_domain_translation);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ group = iommu_group_get(dev);
+ if (group) {
+ /*
+ * The group->mutex is held across the callback, which will
+ * block calls to iommu_attach/detach_group/device. Hence,
+ * the domain of the device will not change during traversal.
+ *
+ * All devices in an iommu group share a single domain, hence
+ * we only dump the domain of the first device. Even though,
+ * this code still possibly races with the iommu_unmap()
+ * interface. This could be solved by RCU-freeing the page
+ * table pages in the iommu_unmap() path.
+ */
+ iommu_group_for_each_dev(group, data,
+ __show_device_domain_translation);
+ iommu_group_put(group);
+ }
- return ret;
+ return 0;
+}
+
+static int domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+ return bus_for_each_dev(&pci_bus_type, NULL, m,
+ show_device_domain_translation);
}
DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 9699ca101c62..5a8f780e7ffd 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -19,7 +19,6 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iova.h>
-#include <linux/intel-iommu.h>
#include <linux/timer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -30,10 +29,11 @@
#include <linux/numa.h>
#include <linux/limits.h>
#include <asm/irq_remapping.h>
-#include <trace/events/intel_iommu.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "perf.h"
+#include "trace.h"
typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
struct dmar_res_callback {
@@ -60,7 +60,7 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static int dmar_dev_scope_status = 1;
-static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
+static DEFINE_IDA(dmar_seq_ids);
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
@@ -494,7 +494,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
if (drhd->reg_base_addr == rhsa->base_address) {
int node = pxm_to_node(rhsa->proximity_domain);
- if (!node_online(node))
+ if (node != NUMA_NO_NODE && !node_online(node))
node = NUMA_NO_NODE;
drhd->iommu->node = node;
return 0;
@@ -1023,28 +1023,6 @@ out:
return err;
}
-static int dmar_alloc_seq_id(struct intel_iommu *iommu)
-{
- iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
- DMAR_UNITS_SUPPORTED);
- if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
- iommu->seq_id = -1;
- } else {
- set_bit(iommu->seq_id, dmar_seq_ids);
- sprintf(iommu->name, "dmar%d", iommu->seq_id);
- }
-
- return iommu->seq_id;
-}
-
-static void dmar_free_seq_id(struct intel_iommu *iommu)
-{
- if (iommu->seq_id >= 0) {
- clear_bit(iommu->seq_id, dmar_seq_ids);
- iommu->seq_id = -1;
- }
-}
-
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
@@ -1062,11 +1040,14 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu)
return -ENOMEM;
- if (dmar_alloc_seq_id(iommu) < 0) {
+ iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
+ DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
+ if (iommu->seq_id < 0) {
pr_err("Failed to allocate seq_id\n");
- err = -ENOSPC;
+ err = iommu->seq_id;
goto error;
}
+ sprintf(iommu->name, "dmar%d", iommu->seq_id);
err = map_iommu(iommu, drhd->reg_base_addr);
if (err) {
@@ -1150,7 +1131,7 @@ err_sysfs:
err_unmap:
unmap_iommu(iommu);
error_free_seq_id:
- dmar_free_seq_id(iommu);
+ ida_free(&dmar_seq_ids, iommu->seq_id);
error:
kfree(iommu);
return err;
@@ -1183,7 +1164,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
- dmar_free_seq_id(iommu);
+ ida_free(&dmar_seq_ids, iommu->seq_id);
kfree(iommu);
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 5c0dce78586a..7cca030a508e 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -17,7 +17,6 @@
#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/dmi.h>
-#include <linux/intel-iommu.h>
#include <linux/intel-svm.h>
#include <linux/memory.h>
#include <linux/pci.h>
@@ -26,6 +25,7 @@
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "pasid.h"
@@ -126,13 +126,8 @@ static inline unsigned long virt_to_dma_pfn(void *p)
return page_to_dma_pfn(virt_to_page(p));
}
-/* global iommu list, set NULL for ignored DMAR units */
-static struct intel_iommu **g_iommus;
-
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
-static inline struct device_domain_info *
-dmar_search_domain_by_dev_info(int segment, int bus, int devfn);
/*
* set to 1 to panic kernel if can't successfully enable VT-d
@@ -256,10 +251,6 @@ static inline void context_clear_entry(struct context_entry *context)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
-#define for_each_domain_iommu(idx, domain) \
- for (idx = 0; idx < g_num_of_iommus; idx++) \
- if (domain->iommu_refcnt[idx])
-
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -293,12 +284,7 @@ static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
-/* bitmap for indexing intel_iommus */
-static int g_num_of_iommus;
-
-static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
-static void __dmar_remove_one_dev_info(struct device_domain_info *info);
int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
@@ -314,12 +300,6 @@ static int iommu_skip_te_disable;
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-int intel_iommu_gfx_mapped;
-EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-
-DEFINE_SPINLOCK(device_domain_lock);
-static LIST_HEAD(device_domain_list);
-
const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
@@ -455,24 +435,6 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
-/* This functionin only returns single iommu in a domain */
-struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
-{
- int iommu_id;
-
- /* si_domain and vm domain should not get here. */
- if (WARN_ON(!iommu_is_dma_domain(&domain->domain)))
- return NULL;
-
- for_each_domain_iommu(iommu_id, domain)
- break;
-
- if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
- return NULL;
-
- return g_iommus[iommu_id];
-}
-
static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
@@ -481,16 +443,16 @@ static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
+ struct iommu_domain_info *info;
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool found = false;
- int i;
+ unsigned long i;
domain->iommu_coherency = true;
-
- for_each_domain_iommu(i, domain) {
+ xa_for_each(&domain->iommu_array, i, info) {
found = true;
- if (!iommu_paging_structure_coherency(g_iommus[i])) {
+ if (!iommu_paging_structure_coherency(info->iommu)) {
domain->iommu_coherency = false;
break;
}
@@ -544,15 +506,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
- assert_spin_locked(&device_domain_lock);
-
- if (list_empty(&domain->devices))
- return NUMA_NO_NODE;
-
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
- if (!info->dev)
- continue;
-
/*
* There could possibly be multiple device numa nodes as devices
* within the same domain may sit behind different IOMMUs. There
@@ -563,6 +518,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
+ spin_unlock(&domain->lock);
return nid;
}
@@ -804,26 +760,23 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct context_entry *context;
int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (context)
ret = context_present(context);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
return ret;
}
static void free_context_table(struct intel_iommu *iommu)
{
- int i;
- unsigned long flags;
struct context_entry *context;
+ int i;
+
+ if (!iommu->root_entry)
+ return;
- spin_lock_irqsave(&iommu->lock, flags);
- if (!iommu->root_entry) {
- goto out;
- }
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
@@ -835,12 +788,10 @@ static void free_context_table(struct intel_iommu *iommu)
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
free_pgtable_page(context);
-
}
+
free_pgtable_page(iommu->root_entry);
iommu->root_entry = NULL;
-out:
- spin_unlock_irqrestore(&iommu->lock, flags);
}
#ifdef CONFIG_DMAR_DEBUG
@@ -849,9 +800,14 @@ static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u
struct device_domain_info *info;
struct dma_pte *parent, *pte;
struct dmar_domain *domain;
+ struct pci_dev *pdev;
int offset, level;
- info = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
+ pdev = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn);
+ if (!pdev)
+ return;
+
+ info = dev_iommu_priv_get(&pdev->dev);
if (!info || !info->domain) {
pr_info("device [%02x:%02x.%d] not probed\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1234,7 +1190,6 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root) {
@@ -1244,10 +1199,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
}
__iommu_flush_cache(iommu, root, ROOT_SIZE);
-
- spin_lock_irqsave(&iommu->lock, flags);
iommu->root_entry = root;
- spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
@@ -1389,23 +1341,23 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
}
static struct device_domain_info *
-iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
- u8 bus, u8 devfn)
+iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
+ u8 bus, u8 devfn)
{
struct device_domain_info *info;
- assert_spin_locked(&device_domain_lock);
-
if (!iommu->qi)
return NULL;
- list_for_each_entry(info, &domain->devices, link)
+ spin_lock(&domain->lock);
+ list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- if (info->ats_supported && info->dev)
- return info;
- break;
+ spin_unlock(&domain->lock);
+ return info->ats_supported ? info : NULL;
}
+ }
+ spin_unlock(&domain->lock);
return NULL;
}
@@ -1415,23 +1367,21 @@ static void domain_update_iotlb(struct dmar_domain *domain)
struct device_domain_info *info;
bool has_iotlb_device = false;
- assert_spin_locked(&device_domain_lock);
-
- list_for_each_entry(info, &domain->devices, link)
+ spin_lock(&domain->lock);
+ list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
-
+ }
domain->has_iotlb_device = has_iotlb_device;
+ spin_unlock(&domain->lock);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
-
if (!info || !dev_is_pci(info->dev))
return;
@@ -1477,8 +1427,6 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
-
if (!dev_is_pci(info->dev))
return;
@@ -1518,17 +1466,15 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- unsigned long flags;
struct device_domain_info *info;
if (!domain->has_iotlb_device)
return;
- spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
-
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&domain->lock);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -1539,7 +1485,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
unsigned int aligned_pages = __roundup_pow_of_two(pages);
unsigned int mask = ilog2(aligned_pages);
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
- u16 did = domain->iommu_did[iommu->seq_id];
+ u16 did = domain_id_iommu(domain, iommu);
BUG_ON(pages == 0);
@@ -1609,11 +1555,12 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- int idx;
+ struct iommu_domain_info *info;
+ unsigned long idx;
- for_each_domain_iommu(idx, dmar_domain) {
- struct intel_iommu *iommu = g_iommus[idx];
- u16 did = dmar_domain->iommu_did[iommu->seq_id];
+ xa_for_each(&dmar_domain->iommu_array, idx, info) {
+ struct intel_iommu *iommu = info->iommu;
+ u16 did = domain_id_iommu(dmar_domain, iommu);
if (domain_use_first_level(dmar_domain))
qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0);
@@ -1719,23 +1666,16 @@ static int iommu_init_domains(struct intel_iommu *iommu)
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- struct device_domain_info *info, *tmp;
- unsigned long flags;
-
if (!iommu->domain_ids)
return;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
- if (info->iommu != iommu)
- continue;
-
- if (!info->dev || !info->domain)
- continue;
-
- __dmar_remove_one_dev_info(info);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ /*
+ * All iommu domains must have been detached from the devices,
+ * hence there should be no domain IDs in use.
+ */
+ if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
+ > NUM_RESERVED_DID))
+ return;
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
@@ -1748,8 +1688,6 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
iommu->domain_ids = NULL;
}
- g_iommus[iommu->seq_id] = NULL;
-
/* free context mapping */
free_context_table(iommu);
@@ -1795,55 +1733,77 @@ static struct dmar_domain *alloc_domain(unsigned int type)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ spin_lock_init(&domain->lock);
+ xa_init(&domain->iommu_array);
return domain;
}
-/* Must be called with iommu->lock */
static int domain_attach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
+ struct iommu_domain_info *info, *curr;
unsigned long ndomains;
- int num;
+ int num, ret = -ENOSPC;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- domain->iommu_refcnt[iommu->seq_id] += 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 1) {
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ spin_lock(&iommu->lock);
+ curr = xa_load(&domain->iommu_array, iommu->seq_id);
+ if (curr) {
+ curr->refcnt++;
+ spin_unlock(&iommu->lock);
+ kfree(info);
+ return 0;
+ }
- if (num >= ndomains) {
- pr_err("%s: No free domain ids\n", iommu->name);
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- return -ENOSPC;
- }
+ ndomains = cap_ndoms(iommu->cap);
+ num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ if (num >= ndomains) {
+ pr_err("%s: No free domain ids\n", iommu->name);
+ goto err_unlock;
+ }
- set_bit(num, iommu->domain_ids);
- domain->iommu_did[iommu->seq_id] = num;
- domain->nid = iommu->node;
- domain_update_iommu_cap(domain);
+ set_bit(num, iommu->domain_ids);
+ info->refcnt = 1;
+ info->did = num;
+ info->iommu = iommu;
+ curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
+ NULL, info, GFP_ATOMIC);
+ if (curr) {
+ ret = xa_err(curr) ? : -EBUSY;
+ goto err_clear;
}
+ domain_update_iommu_cap(domain);
+ spin_unlock(&iommu->lock);
return 0;
+
+err_clear:
+ clear_bit(info->did, iommu->domain_ids);
+err_unlock:
+ spin_unlock(&iommu->lock);
+ kfree(info);
+ return ret;
}
static void domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- int num;
+ struct iommu_domain_info *info;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
-
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 0) {
- num = domain->iommu_did[iommu->seq_id];
- clear_bit(num, iommu->domain_ids);
+ spin_lock(&iommu->lock);
+ info = xa_load(&domain->iommu_array, iommu->seq_id);
+ if (--info->refcnt == 0) {
+ clear_bit(info->did, iommu->domain_ids);
+ xa_erase(&domain->iommu_array, iommu->seq_id);
+ domain->nid = NUMA_NO_NODE;
domain_update_iommu_cap(domain);
- domain->iommu_did[iommu->seq_id] = 0;
+ kfree(info);
}
+ spin_unlock(&iommu->lock);
}
static inline int guestwidth_to_adjustwidth(int gaw)
@@ -1862,10 +1822,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
static void domain_exit(struct dmar_domain *domain)
{
-
- /* Remove associated devices and clear attached or cached domains */
- domain_remove_dev_info(domain);
-
if (domain->pgd) {
LIST_HEAD(freelist);
@@ -1873,6 +1829,9 @@ static void domain_exit(struct dmar_domain *domain)
put_pages_list(&freelist);
}
+ if (WARN_ON(!list_empty(&domain->devices)))
+ return;
+
kfree(domain);
}
@@ -1930,11 +1889,11 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct pasid_table *table,
u8 bus, u8 devfn)
{
- u16 did = domain->iommu_did[iommu->seq_id];
+ struct device_domain_info *info =
+ iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+ u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
- struct device_domain_info *info = NULL;
struct context_entry *context;
- unsigned long flags;
int ret;
WARN_ON(did == 0);
@@ -1947,9 +1906,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
BUG_ON(!domain->pgd);
- spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
-
ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
if (!context)
@@ -2000,7 +1957,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* Setup the Device-TLB enable bit and Page request
* Enable bit:
*/
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
context_set_sm_dte(context);
if (info && info->pri_supported)
@@ -2023,7 +1979,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
goto out_unlock;
}
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
translation = CONTEXT_TT_DEV_IOTLB;
else
@@ -2069,7 +2024,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
out_unlock:
spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
@@ -2186,8 +2140,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long end_pfn, int level)
{
unsigned long lvl_pages = lvl_to_nr_pages(level);
+ struct iommu_domain_info *info;
struct dma_pte *pte = NULL;
- int i;
+ unsigned long i;
while (start_pfn <= end_pfn) {
if (!pte)
@@ -2198,8 +2153,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
start_pfn + lvl_pages - 1,
level + 1);
- for_each_domain_iommu(i, domain)
- iommu_flush_iotlb_psi(g_iommus[i], domain,
+ xa_for_each(&domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
}
@@ -2313,16 +2268,15 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
{
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
- unsigned long flags;
u16 did_old;
if (!iommu)
return;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) {
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
return;
}
@@ -2330,14 +2284,14 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
if (hw_pass_through && domain_type_is_si(info->domain))
did_old = FLPT_DEFAULT_DID;
else
- did_old = info->domain->iommu_did[iommu->seq_id];
+ did_old = domain_id_iommu(info->domain, iommu);
} else {
did_old = context_domain_id(context);
}
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
iommu->flush.flush_context(iommu,
did_old,
(((u16)bus) << 8) | devfn,
@@ -2356,30 +2310,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
}
-static void domain_remove_dev_info(struct dmar_domain *domain)
-{
- struct device_domain_info *info, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-static inline struct device_domain_info *
-dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
-{
- struct device_domain_info *info;
-
- list_for_each_entry(info, &device_domain_list, global)
- if (info->segment == segment && info->bus == bus &&
- info->devfn == devfn)
- return info;
-
- return NULL;
-}
-
static int domain_setup_first_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev,
@@ -2412,7 +2342,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
flags |= PASID_FLAG_PAGE_SNOOP;
return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
- domain->iommu_did[iommu->seq_id],
+ domain_id_iommu(domain, iommu),
flags);
}
@@ -2499,7 +2429,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
- unsigned long flags;
u8 bus, devfn;
int ret;
@@ -2507,17 +2436,13 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (!iommu)
return -ENODEV;
- spin_lock_irqsave(&device_domain_lock, flags);
- info->domain = domain;
- spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
- if (ret) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (ret)
return ret;
- }
+ info->domain = domain;
+ spin_lock(&domain->lock);
list_add(&info->link, &domain->devices);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&domain->lock);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -2529,7 +2454,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
}
/* Setup the PASID entry for requests without PASID: */
- spin_lock_irqsave(&iommu->lock, flags);
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
dev, PASID_RID2PASID);
@@ -2539,7 +2463,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
else
ret = intel_pasid_setup_second_level(iommu, domain,
dev, PASID_RID2PASID);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev);
@@ -2807,7 +2730,6 @@ static int copy_translation_tables(struct intel_iommu *iommu)
struct root_entry *old_rt;
phys_addr_t old_rt_phys;
int ctxt_table_entries;
- unsigned long flags;
u64 rtaddr_reg;
int bus, ret;
bool new_ext, ext;
@@ -2850,7 +2772,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
}
}
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
/* Context tables are copied, now write them to the root_entry table */
for (bus = 0; bus < 256; bus++) {
@@ -2869,7 +2791,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
iommu->root_entry[bus].hi = val;
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
kfree(ctxt_tbls);
@@ -2968,36 +2890,6 @@ static int __init init_dmars(void)
struct intel_iommu *iommu;
int ret;
- /*
- * for each drhd
- * allocate root
- * initialize and program root entry to not present
- * endfor
- */
- for_each_drhd_unit(drhd) {
- /*
- * lock not needed as this is only incremented in the single
- * threaded kernel __init code path all other access are read
- * only
- */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
- g_num_of_iommus++;
- continue;
- }
- pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
- }
-
- /* Preallocate enough resources for IOMMU hot-addition */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
- g_num_of_iommus = DMAR_UNITS_SUPPORTED;
-
- g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
- GFP_KERNEL);
- if (!g_iommus) {
- ret = -ENOMEM;
- goto error;
- }
-
ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
if (ret)
goto free_iommu;
@@ -3020,8 +2912,6 @@ static int __init init_dmars(void)
intel_pasid_max_id);
}
- g_iommus[iommu->seq_id] = iommu;
-
intel_iommu_init_qi(iommu);
ret = iommu_init_domains(iommu);
@@ -3147,9 +3037,6 @@ free_iommu:
free_dmar_iommu(iommu);
}
- kfree(g_iommus);
-
-error:
return ret;
}
@@ -3530,9 +3417,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
int sp, ret;
struct intel_iommu *iommu = dmaru->iommu;
- if (g_iommus[iommu->seq_id])
- return 0;
-
ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
if (ret)
goto out;
@@ -3556,7 +3440,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
if (ret == 0)
ret = iommu_alloc_root_entry(iommu);
@@ -4022,6 +3905,20 @@ static int __init probe_acpi_namespace_devices(void)
return 0;
}
+static __init int tboot_force_iommu(void)
+{
+ if (!tboot_enabled())
+ return 0;
+
+ if (no_iommu || dmar_disabled)
+ pr_warn("Forcing Intel-IOMMU to enabled\n");
+
+ dmar_disabled = 0;
+ no_iommu = 0;
+
+ return 1;
+}
+
int __init intel_iommu_init(void)
{
int ret = -ENODEV;
@@ -4093,9 +3990,6 @@ int __init intel_iommu_init(void)
if (list_empty(&dmar_satc_units))
pr_info("No SATC found\n");
- if (dmar_map_gfx)
- intel_iommu_gfx_mapped = 1;
-
init_no_remapping_devices();
ret = init_dmars();
@@ -4181,21 +4075,13 @@ static void domain_context_clear(struct device_domain_info *info)
&domain_context_clear_one_cb, info);
}
-static void __dmar_remove_one_dev_info(struct device_domain_info *info)
+static void dmar_remove_one_dev_info(struct device *dev)
{
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- unsigned long flags;
-
- assert_spin_locked(&device_domain_lock);
-
- if (WARN_ON(!info))
- return;
-
- iommu = info->iommu;
- domain = info->domain;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *domain = info->domain;
+ struct intel_iommu *iommu = info->iommu;
- if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
+ if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
PASID_RID2PASID, false);
@@ -4205,23 +4091,12 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
intel_pasid_free_table(info->dev);
}
+ spin_lock(&domain->lock);
list_del(&info->link);
+ spin_unlock(&domain->lock);
- spin_lock_irqsave(&iommu->lock, flags);
domain_detach_iommu(domain, iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void dmar_remove_one_dev_info(struct device *dev)
-{
- struct device_domain_info *info;
- unsigned long flags;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dev_iommu_priv_get(dev);
- if (info)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ info->domain = NULL;
}
static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4466,15 +4341,16 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long iova_pfn = IOVA_PFN(gather->start);
size_t size = gather->end - gather->start;
+ struct iommu_domain_info *info;
unsigned long start_pfn;
unsigned long nrpages;
- int iommu_id;
+ unsigned long i;
nrpages = aligned_nrpages(gather->start, size);
start_pfn = mm_to_dma_pfn(iova_pfn);
- for_each_domain_iommu(iommu_id, dmar_domain)
- iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+ xa_for_each(&dmar_domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, dmar_domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
@@ -4503,7 +4379,7 @@ static bool domain_support_force_snooping(struct dmar_domain *domain)
struct device_domain_info *info;
bool support = true;
- assert_spin_locked(&device_domain_lock);
+ assert_spin_locked(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
if (!ecap_sc_support(info->iommu->ecap)) {
support = false;
@@ -4518,8 +4394,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
{
struct device_domain_info *info;
- assert_spin_locked(&device_domain_lock);
-
+ assert_spin_locked(&domain->lock);
/*
* Second level page table supports per-PTE snoop control. The
* iommu_map() interface will handle this by setting SNP bit.
@@ -4537,20 +4412,19 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long flags;
if (dmar_domain->force_snooping)
return true;
- spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&dmar_domain->lock);
if (!domain_support_force_snooping(dmar_domain)) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&dmar_domain->lock);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&dmar_domain->lock);
return true;
}
@@ -4572,7 +4446,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
struct device_domain_info *info;
struct intel_iommu *iommu;
- unsigned long flags;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
@@ -4615,10 +4488,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
}
- spin_lock_irqsave(&device_domain_lock, flags);
- list_add(&info->global, &device_domain_list);
dev_iommu_priv_set(dev, info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return &iommu->iommu;
}
@@ -4626,15 +4496,9 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- unsigned long flags;
dmar_remove_one_dev_info(dev);
-
- spin_lock_irqsave(&device_domain_lock, flags);
dev_iommu_priv_set(dev, NULL);
- list_del(&info->global);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
kfree(info);
set_dma_ops(dev, NULL);
}
@@ -4707,7 +4571,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct context_entry *context;
struct dmar_domain *domain;
- unsigned long flags;
u64 ctx_lo;
int ret;
@@ -4715,9 +4578,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
if (!domain)
return -EINVAL;
- spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
-
ret = -EINVAL;
if (!info->pasid_supported)
goto out;
@@ -4733,7 +4594,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
context[0].lo = ctx_lo;
wmb();
iommu->flush.flush_context(iommu,
- domain->iommu_did[iommu->seq_id],
+ domain_id_iommu(domain, iommu),
PCI_DEVID(info->bus, info->devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
@@ -4747,7 +4608,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
out:
spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
@@ -4871,13 +4731,11 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long pages = aligned_nrpages(iova, size);
unsigned long pfn = iova >> VTD_PAGE_SHIFT;
- struct intel_iommu *iommu;
- int iommu_id;
+ struct iommu_domain_info *info;
+ unsigned long i;
- for_each_domain_iommu(iommu_id, dmar_domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, dmar_domain, pfn, pages);
- }
+ xa_for_each(&dmar_domain->iommu_array, i, info)
+ __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
}
const struct iommu_ops intel_iommu_ops = {
@@ -4887,7 +4745,6 @@ const struct iommu_ops intel_iommu_ops = {
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.device_group = intel_iommu_device_group,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
new file mode 100644
index 000000000000..fae45bbb0c7f
--- /dev/null
+++ b/drivers/iommu/intel/iommu.h
@@ -0,0 +1,839 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2006-2015, Intel Corporation.
+ *
+ * Authors: Ashok Raj <ashok.raj@intel.com>
+ * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * David Woodhouse <David.Woodhouse@intel.com>
+ */
+
+#ifndef _INTEL_IOMMU_H_
+#define _INTEL_IOMMU_H_
+
+#include <linux/types.h>
+#include <linux/iova.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/mmu_notifier.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmar.h>
+#include <linux/ioasid.h>
+#include <linux/bitfield.h>
+#include <linux/xarray.h>
+
+#include <asm/cacheflush.h>
+#include <asm/iommu.h>
+
+/*
+ * VT-d hardware uses 4KiB page size regardless of host page size.
+ */
+#define VTD_PAGE_SHIFT (12)
+#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
+#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
+#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
+
+#define VTD_STRIDE_SHIFT (9)
+#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
+
+#define DMA_PTE_READ BIT_ULL(0)
+#define DMA_PTE_WRITE BIT_ULL(1)
+#define DMA_PTE_LARGE_PAGE BIT_ULL(7)
+#define DMA_PTE_SNP BIT_ULL(11)
+
+#define DMA_FL_PTE_PRESENT BIT_ULL(0)
+#define DMA_FL_PTE_US BIT_ULL(2)
+#define DMA_FL_PTE_ACCESS BIT_ULL(5)
+#define DMA_FL_PTE_DIRTY BIT_ULL(6)
+#define DMA_FL_PTE_XD BIT_ULL(63)
+
+#define ADDR_WIDTH_5LEVEL (57)
+#define ADDR_WIDTH_4LEVEL (48)
+
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define CONTEXT_TT_DEV_IOTLB 1
+#define CONTEXT_TT_PASS_THROUGH 2
+#define CONTEXT_PASIDE BIT_ULL(3)
+
+/*
+ * Intel IOMMU register specification per version 1.0 public spec.
+ */
+#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
+#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
+#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
+#define DMAR_GCMD_REG 0x18 /* Global command register */
+#define DMAR_GSTS_REG 0x1c /* Global status register */
+#define DMAR_RTADDR_REG 0x20 /* Root entry table */
+#define DMAR_CCMD_REG 0x28 /* Context command reg */
+#define DMAR_FSTS_REG 0x34 /* Fault Status register */
+#define DMAR_FECTL_REG 0x38 /* Fault control register */
+#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
+#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
+#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
+#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
+#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
+#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
+#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
+#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
+#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
+#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
+#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
+#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
+#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
+#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
+#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */
+#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
+#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
+#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
+#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
+#define DMAR_PRS_REG 0xdc /* Page request status register */
+#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
+#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
+#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
+#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
+#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
+#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
+#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
+#define DMAR_MTRR_FIX16K_80000_REG 0x128
+#define DMAR_MTRR_FIX16K_A0000_REG 0x130
+#define DMAR_MTRR_FIX4K_C0000_REG 0x138
+#define DMAR_MTRR_FIX4K_C8000_REG 0x140
+#define DMAR_MTRR_FIX4K_D0000_REG 0x148
+#define DMAR_MTRR_FIX4K_D8000_REG 0x150
+#define DMAR_MTRR_FIX4K_E0000_REG 0x158
+#define DMAR_MTRR_FIX4K_E8000_REG 0x160
+#define DMAR_MTRR_FIX4K_F0000_REG 0x168
+#define DMAR_MTRR_FIX4K_F8000_REG 0x170
+#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
+#define DMAR_MTRR_PHYSMASK0_REG 0x188
+#define DMAR_MTRR_PHYSBASE1_REG 0x190
+#define DMAR_MTRR_PHYSMASK1_REG 0x198
+#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
+#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
+#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
+#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
+#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
+#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
+#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
+#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
+#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
+#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
+#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
+#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
+#define DMAR_MTRR_PHYSBASE8_REG 0x200
+#define DMAR_MTRR_PHYSMASK8_REG 0x208
+#define DMAR_MTRR_PHYSBASE9_REG 0x210
+#define DMAR_MTRR_PHYSMASK9_REG 0x218
+#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
+#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
+#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
+
+#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
+#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
+#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg)
+
+#define OFFSET_STRIDE (9)
+
+#define dmar_readq(a) readq(a)
+#define dmar_writeq(a,v) writeq(v,a)
+#define dmar_readl(a) readl(a)
+#define dmar_writel(a, v) writel(v, a)
+
+#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
+#define DMAR_VER_MINOR(v) ((v) & 0x0f)
+
+/*
+ * Decoding Capability Register
+ */
+#define cap_5lp_support(c) (((c) >> 60) & 1)
+#define cap_pi_support(c) (((c) >> 59) & 1)
+#define cap_fl1gp_support(c) (((c) >> 56) & 1)
+#define cap_read_drain(c) (((c) >> 55) & 1)
+#define cap_write_drain(c) (((c) >> 54) & 1)
+#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
+#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
+#define cap_pgsel_inv(c) (((c) >> 39) & 1)
+
+#define cap_super_page_val(c) (((c) >> 34) & 0xf)
+#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
+ * OFFSET_STRIDE) + 21)
+
+#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
+#define cap_max_fault_reg_offset(c) \
+ (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
+
+#define cap_zlr(c) (((c) >> 22) & 1)
+#define cap_isoch(c) (((c) >> 23) & 1)
+#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
+#define cap_sagaw(c) (((c) >> 8) & 0x1f)
+#define cap_caching_mode(c) (((c) >> 7) & 1)
+#define cap_phmr(c) (((c) >> 6) & 1)
+#define cap_plmr(c) (((c) >> 5) & 1)
+#define cap_rwbf(c) (((c) >> 4) & 1)
+#define cap_afl(c) (((c) >> 3) & 1)
+#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
+/*
+ * Extended Capability Register
+ */
+
+#define ecap_rps(e) (((e) >> 49) & 0x1)
+#define ecap_smpwc(e) (((e) >> 48) & 0x1)
+#define ecap_flts(e) (((e) >> 47) & 0x1)
+#define ecap_slts(e) (((e) >> 46) & 0x1)
+#define ecap_slads(e) (((e) >> 45) & 0x1)
+#define ecap_vcs(e) (((e) >> 44) & 0x1)
+#define ecap_smts(e) (((e) >> 43) & 0x1)
+#define ecap_dit(e) (((e) >> 41) & 0x1)
+#define ecap_pds(e) (((e) >> 42) & 0x1)
+#define ecap_pasid(e) (((e) >> 40) & 0x1)
+#define ecap_pss(e) (((e) >> 35) & 0x1f)
+#define ecap_eafs(e) (((e) >> 34) & 0x1)
+#define ecap_nwfs(e) (((e) >> 33) & 0x1)
+#define ecap_srs(e) (((e) >> 31) & 0x1)
+#define ecap_ers(e) (((e) >> 30) & 0x1)
+#define ecap_prs(e) (((e) >> 29) & 0x1)
+#define ecap_broken_pasid(e) (((e) >> 28) & 0x1)
+#define ecap_dis(e) (((e) >> 27) & 0x1)
+#define ecap_nest(e) (((e) >> 26) & 0x1)
+#define ecap_mts(e) (((e) >> 25) & 0x1)
+#define ecap_ecs(e) (((e) >> 24) & 0x1)
+#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
+#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
+#define ecap_coherent(e) ((e) & 0x1)
+#define ecap_qis(e) ((e) & 0x2)
+#define ecap_pass_through(e) (((e) >> 6) & 0x1)
+#define ecap_eim_support(e) (((e) >> 4) & 0x1)
+#define ecap_ir_support(e) (((e) >> 3) & 0x1)
+#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
+#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
+#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
+
+/* Virtual command interface capability */
+#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
+
+/* IOTLB_REG */
+#define DMA_TLB_FLUSH_GRANU_OFFSET 60
+#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
+#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
+#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
+#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
+#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
+#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
+#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
+#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
+#define DMA_TLB_IVT (((u64)1) << 63)
+#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_TLB_MAX_SIZE (0x3f)
+
+/* INVALID_DESC */
+#define DMA_CCMD_INVL_GRANU_OFFSET 61
+#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
+#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
+#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
+#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
+#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
+#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
+#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_ID_TLB_ADDR(addr) (addr)
+#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
+
+/* PMEN_REG */
+#define DMA_PMEN_EPM (((u32)1)<<31)
+#define DMA_PMEN_PRS (((u32)1)<<0)
+
+/* GCMD_REG */
+#define DMA_GCMD_TE (((u32)1) << 31)
+#define DMA_GCMD_SRTP (((u32)1) << 30)
+#define DMA_GCMD_SFL (((u32)1) << 29)
+#define DMA_GCMD_EAFL (((u32)1) << 28)
+#define DMA_GCMD_WBF (((u32)1) << 27)
+#define DMA_GCMD_QIE (((u32)1) << 26)
+#define DMA_GCMD_SIRTP (((u32)1) << 24)
+#define DMA_GCMD_IRE (((u32) 1) << 25)
+#define DMA_GCMD_CFI (((u32) 1) << 23)
+
+/* GSTS_REG */
+#define DMA_GSTS_TES (((u32)1) << 31)
+#define DMA_GSTS_RTPS (((u32)1) << 30)
+#define DMA_GSTS_FLS (((u32)1) << 29)
+#define DMA_GSTS_AFLS (((u32)1) << 28)
+#define DMA_GSTS_WBFS (((u32)1) << 27)
+#define DMA_GSTS_QIES (((u32)1) << 26)
+#define DMA_GSTS_IRTPS (((u32)1) << 24)
+#define DMA_GSTS_IRES (((u32)1) << 25)
+#define DMA_GSTS_CFIS (((u32)1) << 23)
+
+/* DMA_RTADDR_REG */
+#define DMA_RTADDR_RTT (((u64)1) << 11)
+#define DMA_RTADDR_SMT (((u64)1) << 10)
+
+/* CCMD_REG */
+#define DMA_CCMD_ICC (((u64)1) << 63)
+#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
+#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
+#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
+#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
+#define DMA_CCMD_MASK_NOBIT 0
+#define DMA_CCMD_MASK_1BIT 1
+#define DMA_CCMD_MASK_2BIT 2
+#define DMA_CCMD_MASK_3BIT 3
+#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
+#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
+
+/* FECTL_REG */
+#define DMA_FECTL_IM (((u32)1) << 31)
+
+/* FSTS_REG */
+#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
+#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
+#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
+#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
+#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
+#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
+#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
+
+/* FRCD_REG, 32 bits access */
+#define DMA_FRCD_F (((u32)1) << 31)
+#define dma_frcd_type(d) ((d >> 30) & 1)
+#define dma_frcd_fault_reason(c) (c & 0xff)
+#define dma_frcd_source_id(c) (c & 0xffff)
+#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
+#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
+/* low 64 bit */
+#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+
+/* PRS_REG */
+#define DMA_PRS_PPR ((u32)1)
+#define DMA_PRS_PRO ((u32)2)
+
+#define DMA_VCS_PAS ((u64)1)
+
+#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
+do { \
+ cycles_t start_time = get_cycles(); \
+ while (1) { \
+ sts = op(iommu->reg + offset); \
+ if (cond) \
+ break; \
+ if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
+ panic("DMAR hardware is malfunctioning\n"); \
+ cpu_relax(); \
+ } \
+} while (0)
+
+#define QI_LENGTH 256 /* queue length */
+
+enum {
+ QI_FREE,
+ QI_IN_USE,
+ QI_DONE,
+ QI_ABORT
+};
+
+#define QI_CC_TYPE 0x1
+#define QI_IOTLB_TYPE 0x2
+#define QI_DIOTLB_TYPE 0x3
+#define QI_IEC_TYPE 0x4
+#define QI_IWD_TYPE 0x5
+#define QI_EIOTLB_TYPE 0x6
+#define QI_PC_TYPE 0x7
+#define QI_DEIOTLB_TYPE 0x8
+#define QI_PGRP_RESP_TYPE 0x9
+#define QI_PSTRM_RESP_TYPE 0xa
+
+#define QI_IEC_SELECTIVE (((u64)1) << 4)
+#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
+#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
+
+#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
+#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
+#define QI_IWD_FENCE (((u64)1) << 6)
+#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
+
+#define QI_IOTLB_DID(did) (((u64)did) << 16)
+#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
+#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
+#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
+#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
+#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
+
+#define QI_CC_FM(fm) (((u64)fm) << 48)
+#define QI_CC_SID(sid) (((u64)sid) << 32)
+#define QI_CC_DID(did) (((u64)did) << 16)
+#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+
+#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
+#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
+#define QI_DEV_IOTLB_SIZE 1
+#define QI_DEV_IOTLB_MAX_INVS 32
+
+#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
+#define QI_PC_DID(did) (((u64)did) << 16)
+#define QI_PC_GRAN(gran) (((u64)gran) << 4)
+
+/* PASID cache invalidation granu */
+#define QI_PC_ALL_PASIDS 0
+#define QI_PC_PASID_SEL 1
+#define QI_PC_GLOBAL 3
+
+#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
+#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
+#define QI_EIOTLB_DID(did) (((u64)did) << 16)
+#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
+
+/* QI Dev-IOTLB inv granu */
+#define QI_DEV_IOTLB_GRAN_ALL 1
+#define QI_DEV_IOTLB_GRAN_PASID_SEL 0
+
+#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
+#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
+#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
+#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
+#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
+#define QI_DEV_EIOTLB_MAX_INVS 32
+
+/* Page group response descriptor QW0 */
+#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
+#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
+#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
+#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
+#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
+
+/* Page group response descriptor QW1 */
+#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
+#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
+
+
+#define QI_RESP_SUCCESS 0x0
+#define QI_RESP_INVALID 0x1
+#define QI_RESP_FAILURE 0xf
+
+#define QI_GRAN_NONG_PASID 2
+#define QI_GRAN_PSI_PASID 3
+
+#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
+
+struct qi_desc {
+ u64 qw0;
+ u64 qw1;
+ u64 qw2;
+ u64 qw3;
+};
+
+struct q_inval {
+ raw_spinlock_t q_lock;
+ void *desc; /* invalidation queue */
+ int *desc_status; /* desc status */
+ int free_head; /* first free entry */
+ int free_tail; /* last free entry */
+ int free_cnt;
+};
+
+struct dmar_pci_notify_info;
+
+#ifdef CONFIG_IRQ_REMAP
+/* 1MB - maximum possible interrupt remapping table size */
+#define INTR_REMAP_PAGE_ORDER 8
+#define INTR_REMAP_TABLE_REG_SIZE 0xf
+#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
+
+#define INTR_REMAP_TABLE_ENTRIES 65536
+
+struct irq_domain;
+
+struct ir_table {
+ struct irte *base;
+ unsigned long *bitmap;
+};
+
+void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
+#else
+static inline void
+intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
+#endif
+
+struct iommu_flush {
+ void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+ void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+};
+
+enum {
+ SR_DMAR_FECTL_REG,
+ SR_DMAR_FEDATA_REG,
+ SR_DMAR_FEADDR_REG,
+ SR_DMAR_FEUADDR_REG,
+ MAX_SR_DMAR_REGS
+};
+
+#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
+#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+#define VTD_FLAG_SVM_CAPABLE (1 << 2)
+
+extern int intel_iommu_sm;
+
+#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
+#define pasid_supported(iommu) (sm_supported(iommu) && \
+ ecap_pasid((iommu)->ecap))
+
+struct pasid_entry;
+struct pasid_state_entry;
+struct page_req_dsc;
+
+/*
+ * 0: Present
+ * 1-11: Reserved
+ * 12-63: Context Ptr (12 - (haw-1))
+ * 64-127: Reserved
+ */
+struct root_entry {
+ u64 lo;
+ u64 hi;
+};
+
+/*
+ * low 64 bits:
+ * 0: present
+ * 1: fault processing disable
+ * 2-3: translation type
+ * 12-63: address space root
+ * high 64 bits:
+ * 0-2: address width
+ * 3-6: aval
+ * 8-23: domain id
+ */
+struct context_entry {
+ u64 lo;
+ u64 hi;
+};
+
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
+
+struct iommu_domain_info {
+ struct intel_iommu *iommu;
+ unsigned int refcnt; /* Refcount of devices per iommu */
+ u16 did; /* Domain ids per IOMMU. Use u16 since
+ * domain ids are 16 bit wide according
+ * to VT-d spec, section 9.3 */
+};
+
+struct dmar_domain {
+ int nid; /* node id */
+ struct xarray iommu_array; /* Attached IOMMU array */
+
+ u8 has_iotlb_device: 1;
+ u8 iommu_coherency: 1; /* indicate coherency of iommu access */
+ u8 force_snooping : 1; /* Create IOPTEs with snoop control */
+ u8 set_pte_snp:1;
+
+ spinlock_t lock; /* Protect device tracking lists */
+ struct list_head devices; /* all devices' list */
+
+ struct dma_pte *pgd; /* virtual address */
+ int gaw; /* max guest address width */
+
+ /* adjusted guest address width, 0 is level 2 30-bit */
+ int agaw;
+
+ int flags; /* flags to find out type of domain */
+ int iommu_superpage;/* Level of superpages supported:
+ 0 == 4KiB (no superpages), 1 == 2MiB,
+ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
+ u64 max_addr; /* maximum mapped address */
+
+ struct iommu_domain domain; /* generic domain data structure for
+ iommu core */
+};
+
+struct intel_iommu {
+ void __iomem *reg; /* Pointer to hardware regs, virtual addr */
+ u64 reg_phys; /* physical address of hw register set */
+ u64 reg_size; /* size of hw register set */
+ u64 cap;
+ u64 ecap;
+ u64 vccap;
+ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
+ raw_spinlock_t register_lock; /* protect register handling */
+ int seq_id; /* sequence id of the iommu */
+ int agaw; /* agaw of this iommu */
+ int msagaw; /* max sagaw of this iommu */
+ unsigned int irq, pr_irq;
+ u16 segment; /* PCI segment# */
+ unsigned char name[13]; /* Device Name */
+
+#ifdef CONFIG_INTEL_IOMMU
+ unsigned long *domain_ids; /* bitmap of domains */
+ spinlock_t lock; /* protect context, domain ids */
+ struct root_entry *root_entry; /* virtual address */
+
+ struct iommu_flush flush;
+#endif
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ struct page_req_dsc *prq;
+ unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ struct completion prq_complete;
+ struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
+#endif
+ struct iopf_queue *iopf_queue;
+ unsigned char iopfq_name[16];
+ struct q_inval *qi; /* Queued invalidation info */
+ u32 *iommu_state; /* Store iommu states between suspend and resume.*/
+
+#ifdef CONFIG_IRQ_REMAP
+ struct ir_table *ir_table; /* Interrupt remapping info */
+ struct irq_domain *ir_domain;
+ struct irq_domain *ir_msi_domain;
+#endif
+ struct iommu_device iommu; /* IOMMU core code handle */
+ int node;
+ u32 flags; /* Software defined flags */
+
+ struct dmar_drhd_unit *drhd;
+ void *perf_statistic;
+};
+
+/* PCI domain-device relationship */
+struct device_domain_info {
+ struct list_head link; /* link to domain siblings */
+ u32 segment; /* PCI segment number */
+ u8 bus; /* PCI bus number */
+ u8 devfn; /* PCI devfn number */
+ u16 pfsid; /* SRIOV physical function source ID */
+ u8 pasid_supported:3;
+ u8 pasid_enabled:1;
+ u8 pri_supported:1;
+ u8 pri_enabled:1;
+ u8 ats_supported:1;
+ u8 ats_enabled:1;
+ u8 ats_qdep;
+ struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
+ struct dmar_domain *domain; /* pointer to domain */
+ struct pasid_table *pasid_table; /* pasid table */
+};
+
+static inline void __iommu_flush_cache(
+ struct intel_iommu *iommu, void *addr, int size)
+{
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(addr, size);
+}
+
+/* Convert generic struct iommu_domain to private struct dmar_domain */
+static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct dmar_domain, domain);
+}
+
+/* Retrieve the domain ID which has allocated to the domain */
+static inline u16
+domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
+{
+ struct iommu_domain_info *info =
+ xa_load(&domain->iommu_array, iommu->seq_id);
+
+ return info->did;
+}
+
+/*
+ * 0: readable
+ * 1: writable
+ * 2-6: reserved
+ * 7: super page
+ * 8-10: available
+ * 11: snoop behavior
+ * 12-63: Host physical address
+ */
+struct dma_pte {
+ u64 val;
+};
+
+static inline void dma_clear_pte(struct dma_pte *pte)
+{
+ pte->val = 0;
+}
+
+static inline u64 dma_pte_addr(struct dma_pte *pte)
+{
+#ifdef CONFIG_64BIT
+ return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
+#else
+ /* Must have a full atomic 64-bit read */
+ return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
+ VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
+#endif
+}
+
+static inline bool dma_pte_present(struct dma_pte *pte)
+{
+ return (pte->val & 3) != 0;
+}
+
+static inline bool dma_pte_superpage(struct dma_pte *pte)
+{
+ return (pte->val & DMA_PTE_LARGE_PAGE);
+}
+
+static inline bool first_pte_in_page(struct dma_pte *pte)
+{
+ return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
+}
+
+static inline int nr_pte_to_next_page(struct dma_pte *pte)
+{
+ return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
+ (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
+}
+
+extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+
+extern int dmar_enable_qi(struct intel_iommu *iommu);
+extern void dmar_disable_qi(struct intel_iommu *iommu);
+extern int dmar_reenable_qi(struct intel_iommu *iommu);
+extern void qi_global_iec(struct intel_iommu *iommu);
+
+extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask);
+
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+ unsigned long npages, bool ih);
+
+void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u32 pasid, u16 qdep, u64 addr,
+ unsigned int size_order);
+void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
+ u32 pasid);
+
+int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ unsigned int count, unsigned long options);
+/*
+ * Options used in qi_submit_sync:
+ * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
+ */
+#define QI_OPT_WAIT_DRAIN BIT(0)
+
+extern int dmar_ir_support(void);
+
+void *alloc_pgtable_page(int node);
+void free_pgtable_page(void *vaddr);
+void iommu_flush_write_buffer(struct intel_iommu *iommu);
+int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
+struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+extern void intel_svm_check(struct intel_iommu *iommu);
+extern int intel_svm_enable_prq(struct intel_iommu *iommu);
+extern int intel_svm_finish_prq(struct intel_iommu *iommu);
+struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
+ void *drvdata);
+void intel_svm_unbind(struct iommu_sva *handle);
+u32 intel_svm_get_pasid(struct iommu_sva *handle);
+int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
+ struct iommu_page_response *msg);
+
+struct intel_svm_dev {
+ struct list_head list;
+ struct rcu_head rcu;
+ struct device *dev;
+ struct intel_iommu *iommu;
+ struct iommu_sva sva;
+ unsigned long prq_seq_number;
+ u32 pasid;
+ int users;
+ u16 did;
+ u16 dev_iotlb:1;
+ u16 sid, qdep;
+};
+
+struct intel_svm {
+ struct mmu_notifier notifier;
+ struct mm_struct *mm;
+
+ unsigned int flags;
+ u32 pasid;
+ struct list_head devs;
+};
+#else
+static inline void intel_svm_check(struct intel_iommu *iommu) {}
+#endif
+
+#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
+void intel_iommu_debugfs_init(void);
+#else
+static inline void intel_iommu_debugfs_init(void) {}
+#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
+
+extern const struct attribute_group *intel_iommu_groups[];
+bool context_present(struct context_entry *context);
+struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+ u8 devfn, int alloc);
+
+extern const struct iommu_ops intel_iommu_ops;
+
+#ifdef CONFIG_INTEL_IOMMU
+extern int iommu_calculate_agaw(struct intel_iommu *iommu);
+extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+extern int dmar_disabled;
+extern int intel_iommu_enabled;
+#else
+static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+#define dmar_disabled (1)
+#define intel_iommu_enabled (0)
+#endif
+
+static inline const char *decode_prq_descriptor(char *str, size_t size,
+ u64 dw0, u64 dw1, u64 dw2, u64 dw3)
+{
+ char *buf = str;
+ int bytes;
+
+ bytes = snprintf(buf, size,
+ "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx",
+ FIELD_GET(GENMASK_ULL(31, 16), dw0),
+ FIELD_GET(GENMASK_ULL(63, 12), dw1),
+ dw1 & BIT_ULL(0) ? 'r' : '-',
+ dw1 & BIT_ULL(1) ? 'w' : '-',
+ dw0 & BIT_ULL(52) ? 'x' : '-',
+ dw0 & BIT_ULL(53) ? 'p' : '-',
+ dw1 & BIT_ULL(2) ? 'l' : '-',
+ FIELD_GET(GENMASK_ULL(51, 32), dw0),
+ FIELD_GET(GENMASK_ULL(11, 3), dw1));
+
+ /* Private Data */
+ if (dw0 & BIT_ULL(9)) {
+ size -= bytes;
+ buf += bytes;
+ snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3);
+ }
+
+ return str;
+}
+
+#endif
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index a67319597884..2e9683e970f8 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -10,7 +10,6 @@
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
-#include <linux/intel-iommu.h>
#include <linux/acpi.h>
#include <linux/irqdomain.h>
#include <linux/crash_dump.h>
@@ -21,6 +20,7 @@
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "cap_audit.h"
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 17cad7c1f62d..c5e7e8b020a5 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -12,13 +12,13 @@
#include <linux/bitops.h>
#include <linux/cpufeature.h>
#include <linux/dmar.h>
-#include <linux/intel-iommu.h>
#include <linux/iommu.h>
#include <linux/memory.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
+#include "iommu.h"
#include "pasid.h"
/*
@@ -450,17 +450,17 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
struct pasid_entry *pte;
u16 did, pgtt;
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte))
- return;
-
- if (!pasid_pte_is_present(pte))
+ if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return;
+ }
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
-
intel_pasid_clear_entry(dev, pasid, fault_ignore);
+ spin_unlock(&iommu->lock);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
@@ -496,22 +496,6 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
}
}
-static inline int pasid_enable_wpe(struct pasid_entry *pte)
-{
-#ifdef CONFIG_X86
- unsigned long cr0 = read_cr0();
-
- /* CR0.WP is normally set but just to be sure */
- if (unlikely(!(cr0 & X86_CR0_WP))) {
- pr_err_ratelimited("No CPU write protect!\n");
- return -EINVAL;
- }
-#endif
- pasid_set_wpe(pte);
-
- return 0;
-};
-
/*
* Set up the scalable mode pasid table entry for first only
* translation type.
@@ -528,39 +512,52 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
return -EINVAL;
}
- pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte))
+ if (flags & PASID_FLAG_SUPERVISOR_MODE) {
+#ifdef CONFIG_X86
+ unsigned long cr0 = read_cr0();
+
+ /* CR0.WP is normally set but just to be sure */
+ if (unlikely(!(cr0 & X86_CR0_WP))) {
+ pr_err("No CPU write protect!\n");
+ return -EINVAL;
+ }
+#endif
+ if (!ecap_srs(iommu->ecap)) {
+ pr_err("No supervisor request support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+ }
+
+ if ((flags & PASID_FLAG_FL5LP) && !cap_5lp_support(iommu->cap)) {
+ pr_err("No 5-level paging support for first-level on %s\n",
+ iommu->name);
return -EINVAL;
+ }
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ spin_lock(&iommu->lock);
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (!pte) {
+ spin_unlock(&iommu->lock);
+ return -ENODEV;
+ }
+
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
/* Setup the first level page table pointer: */
pasid_set_flptr(pte, (u64)__pa(pgd));
if (flags & PASID_FLAG_SUPERVISOR_MODE) {
- if (!ecap_srs(iommu->ecap)) {
- pr_err("No supervisor request support on %s\n",
- iommu->name);
- return -EINVAL;
- }
pasid_set_sre(pte);
- if (pasid_enable_wpe(pte))
- return -EINVAL;
-
+ pasid_set_wpe(pte);
}
- if (flags & PASID_FLAG_FL5LP) {
- if (cap_5lp_support(iommu->cap)) {
- pasid_set_flpm(pte, 1);
- } else {
- pr_err("No 5-level paging support for first-level\n");
- pasid_clear_entry(pte);
- return -EINVAL;
- }
- }
+ if (flags & PASID_FLAG_FL5LP)
+ pasid_set_flpm(pte, 1);
if (flags & PASID_FLAG_PAGE_SNOOP)
pasid_set_pgsnp(pte);
@@ -572,6 +569,8 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
/* Setup Present and PASID Granular Transfer Type: */
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
@@ -627,17 +626,19 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
}
pgd_val = virt_to_phys(pgd);
- did = domain->iommu_did[iommu->seq_id];
+ did = domain_id_iommu(domain, iommu);
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (!pte) {
- dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
+ spin_unlock(&iommu->lock);
return -ENODEV;
}
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
@@ -654,6 +655,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
if (pasid != PASID_RID2PASID)
pasid_set_sre(pte);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
@@ -669,15 +672,17 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
u16 did = FLPT_DEFAULT_DID;
struct pasid_entry *pte;
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (!pte) {
- dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
+ spin_unlock(&iommu->lock);
return -ENODEV;
}
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
@@ -692,6 +697,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
*/
pasid_set_sre(pte);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index bf5b937848b4..20c54e50f533 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -39,6 +39,7 @@
* only and pass-through transfer modes.
*/
#define FLPT_DEFAULT_DID 1
+#define NUM_RESERVED_DID 2
/*
* The SUPERVISOR_MODE flag indicates a first level translation which
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index 0e8e03252d92..94ee70ac38e3 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -9,8 +9,8 @@
*/
#include <linux/spinlock.h>
-#include <linux/intel-iommu.h>
+#include "iommu.h"
#include "perf.h"
static DEFINE_SPINLOCK(latency_lock);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 7ee37d996e15..8bcfb93dda56 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -5,7 +5,6 @@
* Authors: David Woodhouse <dwmw2@infradead.org>
*/
-#include <linux/intel-iommu.h>
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -21,11 +20,12 @@
#include <linux/ioasid.h>
#include <asm/page.h>
#include <asm/fpu/api.h>
-#include <trace/events/intel_iommu.h>
+#include "iommu.h"
#include "pasid.h"
#include "perf.h"
#include "../iommu-sva-lib.h"
+#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, u32 pasid);
@@ -328,9 +328,9 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
unsigned int flags)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- unsigned long iflags, sflags;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
+ unsigned long sflags;
int ret = 0;
svm = pasid_private_find(mm->pasid);
@@ -394,11 +394,8 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
PASID_FLAG_SUPERVISOR_MODE : 0;
sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
- spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
FLPT_DEFAULT_DID, sflags);
- spin_unlock_irqrestore(&iommu->lock, iflags);
-
if (ret)
goto free_sdev;
@@ -544,7 +541,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid)
domain = info->domain;
pdev = to_pci_dev(dev);
sid = PCI_DEVID(info->bus, info->devfn);
- did = domain->iommu_did[iommu->seq_id];
+ did = domain_id_iommu(domain, iommu);
qdep = pci_ats_queue_depth(pdev);
/*
diff --git a/drivers/iommu/intel/trace.c b/drivers/iommu/intel/trace.c
index bfb6a6e37a88..117e626e3ea9 100644
--- a/drivers/iommu/intel/trace.c
+++ b/drivers/iommu/intel/trace.c
@@ -11,4 +11,4 @@
#include <linux/types.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/intel_iommu.h>
+#include "trace.h"
diff --git a/drivers/iommu/intel/trace.h b/drivers/iommu/intel/trace.h
new file mode 100644
index 000000000000..93d96f93a89b
--- /dev/null
+++ b/drivers/iommu/intel/trace.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel IOMMU trace support
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM intel_iommu
+
+#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTEL_IOMMU_H
+
+#include <linux/tracepoint.h>
+
+#include "iommu.h"
+
+#define MSG_MAX 256
+
+TRACE_EVENT(qi_submit,
+ TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3),
+
+ TP_ARGS(iommu, qw0, qw1, qw2, qw3),
+
+ TP_STRUCT__entry(
+ __field(u64, qw0)
+ __field(u64, qw1)
+ __field(u64, qw2)
+ __field(u64, qw3)
+ __string(iommu, iommu->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(iommu, iommu->name);
+ __entry->qw0 = qw0;
+ __entry->qw1 = qw1;
+ __entry->qw2 = qw2;
+ __entry->qw3 = qw3;
+ ),
+
+ TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx",
+ __print_symbolic(__entry->qw0 & 0xf,
+ { QI_CC_TYPE, "cc_inv" },
+ { QI_IOTLB_TYPE, "iotlb_inv" },
+ { QI_DIOTLB_TYPE, "dev_tlb_inv" },
+ { QI_IEC_TYPE, "iec_inv" },
+ { QI_IWD_TYPE, "inv_wait" },
+ { QI_EIOTLB_TYPE, "p_iotlb_inv" },
+ { QI_PC_TYPE, "pc_inv" },
+ { QI_DEIOTLB_TYPE, "p_dev_tlb_inv" },
+ { QI_PGRP_RESP_TYPE, "page_grp_resp" }),
+ __get_str(iommu),
+ __entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3
+ )
+);
+
+TRACE_EVENT(prq_report,
+ TP_PROTO(struct intel_iommu *iommu, struct device *dev,
+ u64 dw0, u64 dw1, u64 dw2, u64 dw3,
+ unsigned long seq),
+
+ TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq),
+
+ TP_STRUCT__entry(
+ __field(u64, dw0)
+ __field(u64, dw1)
+ __field(u64, dw2)
+ __field(u64, dw3)
+ __field(unsigned long, seq)
+ __string(iommu, iommu->name)
+ __string(dev, dev_name(dev))
+ __dynamic_array(char, buff, MSG_MAX)
+ ),
+
+ TP_fast_assign(
+ __entry->dw0 = dw0;
+ __entry->dw1 = dw1;
+ __entry->dw2 = dw2;
+ __entry->dw3 = dw3;
+ __entry->seq = seq;
+ __assign_str(iommu, iommu->name);
+ __assign_str(dev, dev_name(dev));
+ ),
+
+ TP_printk("%s/%s seq# %ld: %s",
+ __get_str(iommu), __get_str(dev), __entry->seq,
+ decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0,
+ __entry->dw1, __entry->dw2, __entry->dw3)
+ )
+);
+#endif /* _TRACE_INTEL_IOMMU_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/iommu/intel/
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index be066c1503d3..ba3115fd0f86 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -182,14 +182,8 @@ static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg)
(cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT);
}
-static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
- struct io_pgtable_cfg *cfg)
+static arm_v7s_iopte to_mtk_iopte(phys_addr_t paddr, arm_v7s_iopte pte)
{
- arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
-
- if (!arm_v7s_is_mtk_enabled(cfg))
- return pte;
-
if (paddr & BIT_ULL(32))
pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
if (paddr & BIT_ULL(33))
@@ -199,6 +193,17 @@ static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
return pte;
}
+static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
+ struct io_pgtable_cfg *cfg)
+{
+ arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
+
+ if (arm_v7s_is_mtk_enabled(cfg))
+ return to_mtk_iopte(paddr, pte);
+
+ return pte;
+}
+
static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
struct io_pgtable_cfg *cfg)
{
@@ -240,10 +245,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
dma_addr_t dma;
size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
void *table = NULL;
+ gfp_t gfp_l1;
+
+ /*
+ * ARM_MTK_TTBR_EXT extend the translation table base support larger
+ * memory address.
+ */
+ gfp_l1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ GFP_KERNEL : ARM_V7S_TABLE_GFP_DMA;
if (lvl == 1)
- table = (void *)__get_free_pages(
- __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
+ table = (void *)__get_free_pages(gfp_l1 | __GFP_ZERO, get_order(size));
else if (lvl == 2)
table = kmem_cache_zalloc(data->l2_tables, gfp);
@@ -251,7 +263,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
return NULL;
phys = virt_to_phys(table);
- if (phys != (arm_v7s_iopte)phys) {
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ phys >= (1ULL << cfg->oas) : phys != (arm_v7s_iopte)phys) {
/* Doesn't fit in PTE */
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
@@ -457,9 +470,14 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
arm_v7s_iopte curr,
struct io_pgtable_cfg *cfg)
{
+ phys_addr_t phys = virt_to_phys(table);
arm_v7s_iopte old, new;
- new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE;
+ new = phys | ARM_V7S_PTE_TYPE_TABLE;
+
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT)
+ new = to_mtk_iopte(phys, new);
+
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
new |= ARM_V7S_ATTR_NS_TABLE;
@@ -779,6 +797,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
void *cookie)
{
struct arm_v7s_io_pgtable *data;
+ slab_flags_t slab_flag;
+ phys_addr_t paddr;
if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
return NULL;
@@ -788,7 +808,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_ARM_MTK_EXT))
+ IO_PGTABLE_QUIRK_ARM_MTK_EXT |
+ IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
@@ -796,15 +817,27 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
!(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
return NULL;
+ if ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT) &&
+ !arm_v7s_is_mtk_enabled(cfg))
+ return NULL;
+
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
spin_lock_init(&data->split_lock);
+
+ /*
+ * ARM_MTK_TTBR_EXT extend the translation table base support larger
+ * memory address.
+ */
+ slab_flag = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ 0 : ARM_V7S_TABLE_SLAB_FLAGS;
+
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
ARM_V7S_TABLE_SIZE(2, cfg),
ARM_V7S_TABLE_SIZE(2, cfg),
- ARM_V7S_TABLE_SLAB_FLAGS, NULL);
+ slab_flag, NULL);
if (!data->l2_tables)
goto out_free_data;
@@ -850,12 +883,16 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
wmb();
/* TTBR */
- cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
- (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
- ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
- (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
+ paddr = virt_to_phys(data->pgd);
+ if (arm_v7s_is_mtk_enabled(cfg))
+ cfg->arm_v7s_cfg.ttbr = paddr | upper_32_bits(paddr);
+ else
+ cfg->arm_v7s_cfg.ttbr = paddr | ARM_V7S_TTBR_S |
+ (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
+ ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
return &data->iop;
out_free_data:
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 847ad47a2dfd..780fb7071577 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -259,7 +259,8 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
return 0;
out_release:
- ops->release_device(dev);
+ if (ops->release_device)
+ ops->release_device(dev);
out_module_put:
module_put(ops->owner);
@@ -272,7 +273,7 @@ err_free:
int iommu_probe_device(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops;
struct iommu_group *group;
int ret;
@@ -313,6 +314,7 @@ int iommu_probe_device(struct device *dev)
mutex_unlock(&group->mutex);
iommu_group_put(group);
+ ops = dev_iommu_ops(dev);
if (ops->probe_finalize)
ops->probe_finalize(dev);
@@ -336,7 +338,8 @@ void iommu_release_device(struct device *dev)
iommu_device_unlink(dev->iommu->iommu_dev, dev);
ops = dev_iommu_ops(dev);
- ops->release_device(dev);
+ if (ops->release_device)
+ ops->release_device(dev);
iommu_group_remove_device(dev);
module_put(ops->owner);
@@ -600,7 +603,7 @@ static void iommu_group_release(struct kobject *kobj)
if (group->iommu_data_release)
group->iommu_data_release(group->iommu_data);
- ida_simple_remove(&iommu_group_ida, group->id);
+ ida_free(&iommu_group_ida, group->id);
if (group->default_domain)
iommu_domain_free(group->default_domain);
@@ -641,7 +644,7 @@ struct iommu_group *iommu_group_alloc(void)
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
- ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
if (ret < 0) {
kfree(group);
return ERR_PTR(ret);
@@ -651,7 +654,7 @@ struct iommu_group *iommu_group_alloc(void)
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
- ida_simple_remove(&iommu_group_ida, group->id);
+ ida_free(&iommu_group_ida, group->id);
kobject_put(&group->kobj);
return ERR_PTR(ret);
}
@@ -2457,6 +2460,9 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
len = 0;
}
+ if (sg_is_dma_bus_address(sg))
+ goto next;
+
if (len) {
len += sg->length;
} else {
@@ -2464,6 +2470,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
start = s_phys;
}
+next:
if (++i < nents)
sg = sg_next(sg);
}
@@ -2576,32 +2583,25 @@ void iommu_get_resv_regions(struct device *dev, struct list_head *list)
ops->get_resv_regions(dev, list);
}
-void iommu_put_resv_regions(struct device *dev, struct list_head *list)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->put_resv_regions)
- ops->put_resv_regions(dev, list);
-}
-
/**
- * generic_iommu_put_resv_regions - Reserved region driver helper
+ * iommu_put_resv_regions - release resered regions
* @dev: device for which to free reserved regions
* @list: reserved region list for device
*
- * IOMMU drivers can use this to implement their .put_resv_regions() callback
- * for simple reservations. Memory allocated for each reserved region will be
- * freed. If an IOMMU driver allocates additional resources per region, it is
- * going to have to implement a custom callback.
+ * This releases a reserved region list acquired by iommu_get_resv_regions().
*/
-void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
+void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
struct iommu_resv_region *entry, *next;
- list_for_each_entry_safe(entry, next, list, list)
- kfree(entry);
+ list_for_each_entry_safe(entry, next, list, list) {
+ if (entry->free)
+ entry->free(dev, entry);
+ else
+ kfree(entry);
+ }
}
-EXPORT_SYMBOL(generic_iommu_put_resv_regions);
+EXPORT_SYMBOL(iommu_put_resv_regions);
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
size_t length, int prot,
@@ -2751,19 +2751,6 @@ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
}
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev->iommu && dev->iommu->iommu_dev) {
- const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
-
- if (ops->dev_feat_enabled)
- return ops->dev_feat_enabled(dev, feat);
- }
-
- return false;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
-
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index db77aa675145..47d1983dfa2a 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -26,6 +26,11 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
+unsigned long iova_rcache_range(void)
+{
+ return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
+}
+
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
{
struct iova_domain *iovad;
@@ -614,7 +619,12 @@ EXPORT_SYMBOL_GPL(reserve_iova);
* dynamic size tuning described in the paper.
*/
-#define IOVA_MAG_SIZE 128
+/*
+ * As kmalloc's buffer size is fixed to power of 2, 127 is chosen to
+ * assure size of 'iova_magazine' to be 1024 bytes, so that no memory
+ * will be wasted.
+ */
+#define IOVA_MAG_SIZE 127
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
struct iova_magazine {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f09aedfdd462..6a24aa804ea3 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -394,10 +394,6 @@ static struct iommu_device *msm_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static void msm_iommu_release_device(struct device *dev)
-{
-}
-
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
@@ -603,7 +599,7 @@ static int insert_iommu_master(struct device *dev,
for (sid = 0; sid < master->num_mids; sid++)
if (master->mids[sid] == spec->args[0]) {
- dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
+ dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n",
sid);
return 0;
}
@@ -677,7 +673,6 @@ fail:
static struct iommu_ops msm_iommu_ops = {
.domain_alloc = msm_iommu_domain_alloc,
.probe_device = msm_iommu_probe_device,
- .release_device = msm_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index bb9dd92c9898..7e363b1f24df 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -34,7 +34,6 @@
#include <dt-bindings/memory/mtk-memory-port.h>
#define REG_MMU_PT_BASE_ADDR 0x000
-#define MMU_PT_ADDR_MASK GENMASK(31, 7)
#define REG_MMU_INVALIDATE 0x020
#define F_ALL_INVLD 0x2
@@ -138,6 +137,7 @@
/* PM and clock always on. e.g. infra iommu */
#define PM_CLK_AO BIT(15)
#define IFA_IOMMU_PCIE_SUPPORT BIT(16)
+#define PGTABLE_PA_35_EN BIT(17)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -596,6 +596,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
.iommu_dev = data->dev,
};
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN))
+ dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT;
+
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
dom->cfg.oas = data->enable_4GB ? 33 : 32;
else
@@ -684,8 +687,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
goto err_unlock;
}
bank->m4u_dom = dom;
- writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- bank->base + REG_MMU_PT_BASE_ADDR);
+ writel(dom->cfg.arm_v7s_cfg.ttbr, bank->base + REG_MMU_PT_BASE_ADDR);
pm_runtime_put(m4udev);
}
@@ -819,17 +821,12 @@ static void mtk_iommu_release_device(struct device *dev)
struct device *larbdev;
unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return;
-
data = dev_iommu_priv_get(dev);
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
larbdev = data->larb_imu[larbid].dev;
device_link_remove(dev, larbdev);
}
-
- iommu_fwspec_free(dev);
}
static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data)
@@ -933,7 +930,6 @@ static const struct iommu_ops mtk_iommu_ops = {
.device_group = mtk_iommu_device_group,
.of_xlate = mtk_iommu_of_xlate,
.get_resv_regions = mtk_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
@@ -1140,22 +1136,32 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
- switch (data->plat_data->m4u_plat) {
- case M4U_MT2712:
- p = "mediatek,mt2712-infracfg";
- break;
- case M4U_MT8173:
- p = "mediatek,mt8173-infracfg";
- break;
- default:
- p = NULL;
+ infracfg = syscon_regmap_lookup_by_phandle(dev->of_node, "mediatek,infracfg");
+ if (IS_ERR(infracfg)) {
+ /*
+ * Legacy devicetrees will not specify a phandle to
+ * mediatek,infracfg: in that case, we use the older
+ * way to retrieve a syscon to infra.
+ *
+ * This is for retrocompatibility purposes only, hence
+ * no more compatibles shall be added to this.
+ */
+ switch (data->plat_data->m4u_plat) {
+ case M4U_MT2712:
+ p = "mediatek,mt2712-infracfg";
+ break;
+ case M4U_MT8173:
+ p = "mediatek,mt8173-infracfg";
+ break;
+ default:
+ p = NULL;
+ }
+
+ infracfg = syscon_regmap_lookup_by_compatible(p);
+ if (IS_ERR(infracfg))
+ return PTR_ERR(infracfg);
}
- infracfg = syscon_regmap_lookup_by_compatible(p);
-
- if (IS_ERR(infracfg))
- return PTR_ERR(infracfg);
-
ret = regmap_read(infracfg, REG_INFRA_MISC, &val);
if (ret)
return ret;
@@ -1204,18 +1210,16 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = mtk_iommu_mm_dts_parse(dev, &match, data);
if (ret) {
- dev_err(dev, "mm dts parse fail(%d).", ret);
+ dev_err_probe(dev, ret, "mm dts parse fail\n");
goto out_runtime_disable;
}
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- data->plat_data->pericfg_comp_str) {
- infracfg = syscon_regmap_lookup_by_compatible(data->plat_data->pericfg_comp_str);
- if (IS_ERR(infracfg)) {
- ret = PTR_ERR(infracfg);
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+ p = data->plat_data->pericfg_comp_str;
+ data->pericfg = syscon_regmap_lookup_by_compatible(p);
+ if (IS_ERR(data->pericfg)) {
+ ret = PTR_ERR(data->pericfg);
goto out_runtime_disable;
}
-
- data->pericfg = infracfg;
}
platform_set_drvdata(pdev, data);
@@ -1366,8 +1370,7 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR);
- writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- base + REG_MMU_PT_BASE_ADDR);
+ writel(m4u_dom->cfg.arm_v7s_cfg.ttbr, base + REG_MMU_PT_BASE_ADDR);
} while (++i < data->plat_data->banks_num);
/*
@@ -1401,7 +1404,7 @@ static const struct mtk_iommu_plat_data mt2712_data = {
static const struct mtk_iommu_plat_data mt6779_data = {
.m4u_plat = M4U_MT6779,
.flags = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN |
- MTK_IOMMU_TYPE_MM,
+ MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN,
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
.banks_num = 1,
.banks_enable = {true},
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index e1cb51b9866c..128c7a3f1778 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -532,15 +532,10 @@ static void mtk_iommu_v1_release_device(struct device *dev)
struct device *larbdev;
unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
- return;
-
data = dev_iommu_priv_get(dev);
larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
larbdev = data->larb_imu[larbid].dev;
device_link_remove(dev, larbdev);
-
- iommu_fwspec_free(dev);
}
static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5696314ae69e..41f4eb005219 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -40,7 +40,7 @@ static int of_iommu_xlate(struct device *dev,
* a proper probe-ordering dependency mechanism in future.
*/
if (!ops)
- return driver_deferred_probe_check_state(dev);
+ return -ENODEV;
if (!try_module_get(ops->owner))
return -ENODEV;
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index bd409bab6286..511959c8a14d 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -383,16 +383,6 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
return &sdev->iommu;
}
-static void sprd_iommu_release_device(struct device *dev)
-{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- if (!fwspec || fwspec->ops != &sprd_iommu_ops)
- return;
-
- iommu_fwspec_free(dev);
-}
-
static struct iommu_group *sprd_iommu_device_group(struct device *dev)
{
struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
@@ -417,7 +407,6 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops sprd_iommu_ops = {
.domain_alloc = sprd_iommu_domain_alloc,
.probe_device = sprd_iommu_probe_device,
- .release_device = sprd_iommu_release_device,
.device_group = sprd_iommu_device_group,
.of_xlate = sprd_iommu_of_xlate,
.pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index c54ab477b8fd..a84c63518773 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -738,8 +738,6 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static void sun50i_iommu_release_device(struct device *dev) {}
-
static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
{
struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
@@ -764,7 +762,6 @@ static const struct iommu_ops sun50i_iommu_ops = {
.domain_alloc = sun50i_iommu_domain_alloc,
.of_xlate = sun50i_iommu_of_xlate,
.probe_device = sun50i_iommu_probe_device,
- .release_device = sun50i_iommu_release_device,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sun50i_iommu_attach_device,
.detach_dev = sun50i_iommu_detach_device,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index a6700a40a6f8..e5ca3cf1a949 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -246,10 +246,6 @@ static struct iommu_device *gart_iommu_probe_device(struct device *dev)
return &gart_handle->iommu;
}
-static void gart_iommu_release_device(struct device *dev)
-{
-}
-
static int gart_iommu_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -273,7 +269,6 @@ static void gart_iommu_sync(struct iommu_domain *domain,
static const struct iommu_ops gart_iommu_ops = {
.domain_alloc = gart_iommu_domain_alloc,
.probe_device = gart_iommu_probe_device,
- .release_device = gart_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
.of_xlate = gart_iommu_of_xlate,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 1fea68e551f1..2a8de975fe63 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -864,8 +864,6 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
return &smmu->iommu;
}
-static void tegra_smmu_release_device(struct device *dev) {}
-
static const struct tegra_smmu_group_soc *
tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
{
@@ -966,7 +964,6 @@ static int tegra_smmu_of_xlate(struct device *dev,
static const struct iommu_ops tegra_smmu_ops = {
.domain_alloc = tegra_smmu_domain_alloc,
.probe_device = tegra_smmu_probe_device,
- .release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 25be4b822aa0..08eeafc9529f 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -788,11 +788,13 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0;
}
-static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
int ret;
u32 flags;
+ size_t size = pgsize * pgcount;
u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -823,17 +825,21 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
viommu_del_mappings(vdomain, iova, end);
+ else if (mapped)
+ *mapped = size;
return ret;
}
-static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
int ret = 0;
size_t unmapped;
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
+ size_t size = pgsize * pgcount;
unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
@@ -964,7 +970,7 @@ static struct iommu_device *viommu_probe_device(struct device *dev)
return &viommu->iommu;
err_free_dev:
- generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
+ iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
return ERR_PTR(ret);
@@ -981,15 +987,9 @@ static void viommu_probe_finalize(struct device *dev)
static void viommu_release_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev;
-
- if (!fwspec || fwspec->ops != &viommu_ops)
- return;
-
- vdev = dev_iommu_priv_get(dev);
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
- generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
+ iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
}
@@ -1013,13 +1013,12 @@ static struct iommu_ops viommu_ops = {
.release_device = viommu_release_device,
.device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = viommu_attach_dev,
- .map = viommu_map,
- .unmap = viommu_unmap,
+ .map_pages = viommu_map_pages,
+ .unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.free = viommu_domain_free,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bbb11cb8b0f7..66b9fa408bf2 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -8,7 +8,7 @@ config IRQCHIP
config ARM_GIC
bool
select IRQ_DOMAIN_HIERARCHY
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config ARM_GIC_PM
bool
@@ -34,7 +34,7 @@ config ARM_GIC_V3
bool
select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config ARM_GIC_V3_ITS
bool
@@ -76,7 +76,7 @@ config ARMADA_370_XP_IRQ
bool
select GENERIC_IRQ_CHIP
select PCI_MSI if PCI
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config ALPINE_MSI
bool
@@ -112,7 +112,7 @@ config BCM6345_L1_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config BCM7038_L1_IRQ
tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver"
@@ -120,7 +120,7 @@ config BCM7038_L1_IRQ
default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config BCM7120_L2_IRQ
tristate "Broadcom STB 7120-style L2 interrupt controller driver"
@@ -177,9 +177,9 @@ config MADERA_IRQ
config IRQ_MIPS_CPU
bool
select GENERIC_IRQ_CHIP
- select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
+ select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING
select IRQ_DOMAIN
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config CLPS711X_IRQCHIP
bool
@@ -242,6 +242,14 @@ config RENESAS_RZA1_IRQC
Enable support for the Renesas RZ/A1 Interrupt Controller, to use up
to 8 external interrupts with configurable sense select.
+config RENESAS_RZG2L_IRQC
+ bool "Renesas RZ/G2L (and alike SoC) IRQC support" if COMPILE_TEST
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Enable support for the Renesas RZ/G2L (and alike SoC) Interrupt Controller
+ for external devices.
+
config SL28CPLD_INTC
bool "Kontron sl28cpld IRQ controller"
depends on MFD_SL28CPLD=y || COMPILE_TEST
@@ -294,7 +302,7 @@ config VERSATILE_FPGA_IRQ_NR
config XTENSA_MX
bool
select IRQ_DOMAIN
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
config XILINX_INTC
bool "Xilinx Interrupt Controller IP"
@@ -322,7 +330,8 @@ config KEYSTONE_IRQ
config MIPS_GIC
bool
- select GENERIC_IRQ_IPI
+ select GENERIC_IRQ_IPI if SMP
+ select IRQ_DOMAIN_HIERARCHY
select MIPS_CM
config INGENIC_IRQ
@@ -530,6 +539,7 @@ config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
help
This enables support for the PLIC chip found in SiFive (and
potentially other) RISC-V systems. The PLIC controls devices
@@ -546,6 +556,16 @@ config EXYNOS_IRQ_COMBINER
Say yes here to add support for the IRQ combiner devices embedded
in Samsung Exynos chips.
+config IRQ_LOONGARCH_CPU
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ help
+ Support for the LoongArch CPU Interrupt Controller. For details of
+ irq chip hierarchy on LoongArch platforms please read the document
+ Documentation/loongarch/irq-chip-model.rst.
+
config LOONGSON_LIOINTC
bool "Loongson Local I/O Interrupt Controller"
depends on MACH_LOONGSON64
@@ -555,6 +575,16 @@ config LOONGSON_LIOINTC
help
Support for the Loongson Local I/O Interrupt Controller.
+config LOONGSON_EIOINTC
+ bool "Loongson Extend I/O Interrupt Controller"
+ depends on LOONGARCH
+ depends on MACH_LOONGSON64
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_CHIP
+ help
+ Support for the Loongson3 Extend I/O Interrupt Vector Controller.
+
config LOONGSON_HTPIC
bool "Loongson3 HyperTransport PIC Controller"
depends on MACH_LOONGSON64 && MIPS
@@ -574,7 +604,7 @@ config LOONGSON_HTVEC
config LOONGSON_PCH_PIC
bool "Loongson PCH PIC Controller"
- depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on MACH_LOONGSON64
default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
select IRQ_FASTEOI_HIERARCHY_HANDLERS
@@ -583,7 +613,7 @@ config LOONGSON_PCH_PIC
config LOONGSON_PCH_MSI
bool "Loongson PCH MSI Controller"
- depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on MACH_LOONGSON64
depends on PCI
default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
@@ -591,6 +621,14 @@ config LOONGSON_PCH_MSI
help
Support for the Loongson PCH MSI Controller.
+config LOONGSON_PCH_LPC
+ bool "Loongson PCH LPC Controller"
+ depends on MACH_LOONGSON64
+ default (MACH_LOONGSON64 && LOONGARCH)
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for the Loongson PCH LPC Controller.
+
config MST_IRQ
bool "MStar Interrupt Controller"
depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST
@@ -627,4 +665,13 @@ config MCHP_EIC
help
Support for Microchip External Interrupt Controller.
+config SUNPLUS_SP7021_INTC
+ bool "Sunplus SP7021 interrupt controller" if COMPILE_TEST
+ default SOC_SP7021
+ help
+ Support for the Sunplus SP7021 Interrupt Controller IP core.
+ SP7021 SoC has 2 Chips: C-Chip & P-Chip. This is used as a
+ chained controller, routing all interrupt source in P-Chip to
+ the primary controller on C-Chip.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 5b67450a9538..b6acbca2248b 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o
+obj-$(CONFIG_RENESAS_RZG2L_IRQC) += irq-renesas-rzg2l.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
@@ -103,11 +104,14 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o
+obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o
obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
+obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o
obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o
obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o
obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
+obj-$(CONFIG_LOONGSON_PCH_LPC) += irq-loongson-pch-lpc.o
obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o
obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o
obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
@@ -115,3 +119,4 @@ obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o
obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o
+obj-$(CONFIG_SUNPLUS_SP7021_INTC) += irq-sp7021-intc.o
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index 142a7431745f..6899e37810a8 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -216,11 +216,11 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
if (enabled)
__bcm6345_l1_mask(d);
- cpumask_copy(irq_data_get_affinity_mask(d), dest);
+ irq_data_update_affinity(d, dest);
if (enabled)
__bcm6345_l1_unmask(d);
} else {
- cpumask_copy(irq_data_get_affinity_mask(d), dest);
+ irq_data_update_affinity(d, dest);
}
raw_spin_unlock_irqrestore(&intc->lock, flags);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2d25bca63d2a..262658fd5f9e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1783,7 +1783,7 @@ static void gic_enable_nmi_support(void)
* the security state of the GIC (controlled by the GICD_CTRL.DS bit)
* and if Group 0 interrupts can be delivered to Linux in the non-secure
* world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
- * the ICC_PMR_EL1 register and the priority that software assigns to
+ * ICC_PMR_EL1 register and the priority that software assigns to
* interrupts:
*
* GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
@@ -2381,11 +2381,17 @@ static void __init gic_acpi_setup_kvm_info(void)
vgic_set_kvm_info(&gic_v3_kvm_info);
}
+static struct fwnode_handle *gsi_domain_handle;
+
+static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
+{
+ return gsi_domain_handle;
+}
+
static int __init
gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_generic_distributor *dist;
- struct fwnode_handle *domain_handle;
size_t size;
int i, err;
@@ -2417,18 +2423,18 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
if (err)
goto out_redist_unmap;
- domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
- if (!domain_handle) {
+ gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
+ if (!gsi_domain_handle) {
err = -ENOMEM;
goto out_redist_unmap;
}
err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
- acpi_data.nr_redist_regions, 0, domain_handle);
+ acpi_data.nr_redist_regions, 0, gsi_domain_handle);
if (err)
goto out_fwhandle_free;
- acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
+ acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
if (static_branch_likely(&supports_deactivate_key))
gic_acpi_setup_kvm_info();
@@ -2436,7 +2442,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
return 0;
out_fwhandle_free:
- irq_domain_free_fwnode(domain_handle);
+ irq_domain_free_fwnode(gsi_domain_handle);
out_redist_unmap:
for (i = 0; i < acpi_data.nr_redist_regions; i++)
if (acpi_data.redist_regs[i].redist_base)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 820404cb56bc..4c7bae0ec8f9 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1682,11 +1682,17 @@ static void __init gic_acpi_setup_kvm_info(void)
vgic_set_kvm_info(&gic_v2_kvm_info);
}
+static struct fwnode_handle *gsi_domain_handle;
+
+static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi)
+{
+ return gsi_domain_handle;
+}
+
static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_distributor *dist;
- struct fwnode_handle *domain_handle;
struct gic_chip_data *gic = &gic_data[0];
int count, ret;
@@ -1724,22 +1730,22 @@ static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
/*
* Initialize GIC instance zero (no multi-GIC support).
*/
- domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
- if (!domain_handle) {
+ gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
+ if (!gsi_domain_handle) {
pr_err("Unable to allocate domain handle\n");
gic_teardown(gic);
return -ENOMEM;
}
- ret = __gic_init_bases(gic, domain_handle);
+ ret = __gic_init_bases(gic, gsi_domain_handle);
if (ret) {
pr_err("Failed to initialise GIC\n");
- irq_domain_free_fwnode(domain_handle);
+ irq_domain_free_fwnode(gsi_domain_handle);
gic_teardown(gic);
return ret;
}
- acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
+ acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id);
if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(NULL, gic_data[0].domain);
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index d47c8041e5bc..ba9792e60329 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Texas Instruments Keystone IRQ controller IP driver
*
* Copyright (C) 2014 Texas Instruments, Inc.
* Author: Sajesh Kumar Saran <sajesh@ti.com>
* Grygorii Strashko <grygorii.strashko@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/irq.h>
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
new file mode 100644
index 000000000000..741612ba6a52
--- /dev/null
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#include <asm/loongarch.h>
+#include <asm/setup.h>
+
+static struct irq_domain *irq_domain;
+struct fwnode_handle *cpuintc_handle;
+
+static u32 lpic_gsi_to_irq(u32 gsi)
+{
+ /* Only pch irqdomain transferring is required for LoongArch. */
+ if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
+ return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+
+ return 0;
+}
+
+static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
+{
+ int id;
+ struct fwnode_handle *domain_handle = NULL;
+
+ switch (gsi) {
+ case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
+ if (liointc_handle)
+ domain_handle = liointc_handle;
+ break;
+
+ case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
+ if (pch_lpc_handle)
+ domain_handle = pch_lpc_handle;
+ break;
+
+ case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
+ id = find_pch_pic(gsi);
+ if (id >= 0 && pch_pic_handle[id])
+ domain_handle = pch_pic_handle[id];
+ break;
+ }
+
+ return domain_handle;
+}
+
+static void mask_loongarch_irq(struct irq_data *d)
+{
+ clear_csr_ecfg(ECFGF(d->hwirq));
+}
+
+static void unmask_loongarch_irq(struct irq_data *d)
+{
+ set_csr_ecfg(ECFGF(d->hwirq));
+}
+
+static struct irq_chip cpu_irq_controller = {
+ .name = "CPUINTC",
+ .irq_mask = mask_loongarch_irq,
+ .irq_unmask = unmask_loongarch_irq,
+};
+
+static void handle_cpu_irq(struct pt_regs *regs)
+{
+ int hwirq;
+ unsigned int estat = read_csr_estat() & CSR_ESTAT_IS;
+
+ while ((hwirq = ffs(estat))) {
+ estat &= ~BIT(hwirq - 1);
+ generic_handle_domain_irq(irq_domain, hwirq - 1);
+ }
+}
+
+static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_noprobe(irq);
+ irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
+ .map = loongarch_cpu_intc_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init
+liointc_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header;
+
+ return liointc_acpi_init(irq_domain, liointc_entry);
+}
+
+static int __init
+eiointc_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header;
+
+ return eiointc_acpi_init(irq_domain, eiointc_entry);
+}
+
+static int __init acpi_cascade_irqdomain_init(void)
+{
+ acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC,
+ liointc_parse_madt, 0);
+ acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
+ eiointc_parse_madt, 0);
+ return 0;
+}
+
+static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ if (irq_domain)
+ return 0;
+
+ /* Mask interrupts. */
+ clear_csr_ecfg(ECFG0_IM);
+ clear_csr_estat(ESTATF_IP);
+
+ cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC");
+ irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
+ &loongarch_cpu_intc_irq_domain_ops, NULL);
+
+ if (!irq_domain)
+ panic("Failed to add irqdomain for LoongArch CPU");
+
+ set_handle_irq(&handle_cpu_irq);
+ acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id);
+ acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq);
+ acpi_cascade_irqdomain_init();
+
+ return 0;
+}
+
+IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC,
+ NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
new file mode 100644
index 000000000000..16e9af8d8b1e
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Loongson Extend I/O Interrupt Controller support
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) "eiointc: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#define EIOINTC_REG_NODEMAP 0x14a0
+#define EIOINTC_REG_IPMAP 0x14c0
+#define EIOINTC_REG_ENABLE 0x1600
+#define EIOINTC_REG_BOUNCE 0x1680
+#define EIOINTC_REG_ISR 0x1800
+#define EIOINTC_REG_ROUTE 0x1c00
+
+#define VEC_REG_COUNT 4
+#define VEC_COUNT_PER_REG 64
+#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
+#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
+#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
+#define EIOINTC_ALL_ENABLE 0xffffffff
+
+#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
+
+static int nr_pics;
+
+struct eiointc_priv {
+ u32 node;
+ nodemask_t node_map;
+ cpumask_t cpuspan_map;
+ struct fwnode_handle *domain_handle;
+ struct irq_domain *eiointc_domain;
+};
+
+static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
+
+static void eiointc_enable(void)
+{
+ uint64_t misc;
+
+ misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+ misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
+ iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
+}
+
+static int cpu_to_eio_node(int cpu)
+{
+ return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
+}
+
+static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
+{
+ int i, node, cpu_node, route_node;
+ unsigned char coremap;
+ uint32_t pos_off, data, data_byte, data_mask;
+
+ pos_off = pos & ~3;
+ data_byte = pos & 3;
+ data_mask = ~BIT_MASK(data_byte) & 0xf;
+
+ /* Calculate node and coremap of target irq */
+ cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
+ coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
+
+ for_each_online_cpu(i) {
+ node = cpu_to_eio_node(i);
+ if (!node_isset(node, *node_map))
+ continue;
+
+ /* EIO node 0 is in charge of inter-node interrupt dispatch */
+ route_node = (node == mnode) ? cpu_node : node;
+ data = ((coremap | (route_node << 4)) << (data_byte * 8));
+ csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
+ }
+}
+
+static DEFINE_RAW_SPINLOCK(affinity_lock);
+
+static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
+{
+ unsigned int cpu;
+ unsigned long flags;
+ uint32_t vector, regaddr;
+ struct cpumask intersect_affinity;
+ struct eiointc_priv *priv = d->domain->host_data;
+
+ raw_spin_lock_irqsave(&affinity_lock, flags);
+
+ cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
+ cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
+
+ if (cpumask_empty(&intersect_affinity)) {
+ raw_spin_unlock_irqrestore(&affinity_lock, flags);
+ return -EINVAL;
+ }
+ cpu = cpumask_first(&intersect_affinity);
+
+ vector = d->hwirq;
+ regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
+
+ /* Mask target vector */
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
+ /* Set route for target vector */
+ eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
+
+ /* Unmask target vector */
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ raw_spin_unlock_irqrestore(&affinity_lock, flags);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int eiointc_index(int node)
+{
+ int i;
+
+ for (i = 0; i < nr_pics; i++) {
+ if (node_isset(node, eiointc_priv[i]->node_map))
+ return i;
+ }
+
+ return -1;
+}
+
+static int eiointc_router_init(unsigned int cpu)
+{
+ int i, bit;
+ uint32_t data;
+ uint32_t node = cpu_to_eio_node(cpu);
+ uint32_t index = eiointc_index(node);
+
+ if (index < 0) {
+ pr_err("Error: invalid nodemap!\n");
+ return -1;
+ }
+
+ if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
+ eiointc_enable();
+
+ for (i = 0; i < VEC_COUNT / 32; i++) {
+ data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
+ iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
+ }
+
+ for (i = 0; i < VEC_COUNT / 32 / 4; i++) {
+ bit = BIT(1 + index); /* Route to IP[1 + index] */
+ data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
+ }
+
+ for (i = 0; i < VEC_COUNT / 4; i++) {
+ /* Route to Node-0 Core-0 */
+ if (index == 0)
+ bit = BIT(cpu_logical_map(0));
+ else
+ bit = (eiointc_priv[index]->node << 4) | 1;
+
+ data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
+ }
+
+ for (i = 0; i < VEC_COUNT / 32; i++) {
+ data = 0xffffffff;
+ iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
+ iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
+ }
+ }
+
+ return 0;
+}
+
+static void eiointc_irq_dispatch(struct irq_desc *desc)
+{
+ int i;
+ u64 pending;
+ bool handled = false;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < VEC_REG_COUNT; i++) {
+ pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
+ iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
+ while (pending) {
+ int bit = __ffs(pending);
+ int irq = bit + VEC_COUNT_PER_REG * i;
+
+ generic_handle_domain_irq(priv->eiointc_domain, irq);
+ pending &= ~BIT(bit);
+ handled = true;
+ }
+ }
+
+ if (!handled)
+ spurious_interrupt();
+
+ chained_irq_exit(chip, desc);
+}
+
+static void eiointc_ack_irq(struct irq_data *d)
+{
+}
+
+static void eiointc_mask_irq(struct irq_data *d)
+{
+}
+
+static void eiointc_unmask_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip eiointc_irq_chip = {
+ .name = "EIOINTC",
+ .irq_ack = eiointc_ack_irq,
+ .irq_mask = eiointc_mask_irq,
+ .irq_unmask = eiointc_unmask_irq,
+ .irq_set_affinity = eiointc_set_irq_affinity,
+};
+
+static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int ret;
+ unsigned int i, type;
+ unsigned long hwirq = 0;
+ struct eiointc *priv = domain->host_data;
+
+ ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
+ priv, handle_edge_irq, NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops eiointc_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = eiointc_domain_alloc,
+ .free = eiointc_domain_free,
+};
+
+static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
+{
+ int i;
+
+ if (cpu_has_flatmode)
+ node = cpu_to_node(node * CORES_PER_EIO_NODE);
+
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ if (node == vec_group[i].node) {
+ vec_group[i].parent = parent;
+ return;
+ }
+ }
+}
+
+static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
+{
+ int i;
+
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ if (node == vec_group[i].node)
+ return vec_group[i].parent;
+ }
+ return NULL;
+}
+
+static int __init
+pch_pic_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
+ unsigned int node = (pchpic_entry->address >> 44) & 0xf;
+ struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
+
+ if (parent)
+ return pch_pic_acpi_init(parent, pchpic_entry);
+
+ return -EINVAL;
+}
+
+static int __init
+pch_msi_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+ struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group);
+
+ if (parent)
+ return pch_msi_acpi_init(parent, pchmsi_entry);
+
+ return -EINVAL;
+}
+
+static int __init acpi_cascade_irqdomain_init(void)
+{
+ acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC,
+ pch_pic_parse_madt, 0);
+ acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC,
+ pch_msi_parse_madt, 1);
+ return 0;
+}
+
+int __init eiointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_eio_pic *acpi_eiointc)
+{
+ int i, parent_irq;
+ unsigned long node_map;
+ struct eiointc_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
+ acpi_eiointc->node);
+ if (!priv->domain_handle) {
+ pr_err("Unable to allocate domain handle\n");
+ goto out_free_priv;
+ }
+
+ priv->node = acpi_eiointc->node;
+ node_map = acpi_eiointc->node_map ? : -1ULL;
+
+ for_each_possible_cpu(i) {
+ if (node_map & (1ULL << cpu_to_eio_node(i))) {
+ node_set(cpu_to_eio_node(i), priv->node_map);
+ cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i));
+ }
+ }
+
+ /* Setup IRQ domain */
+ priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT,
+ &eiointc_domain_ops, priv);
+ if (!priv->eiointc_domain) {
+ pr_err("loongson-eiointc: cannot add IRQ domain\n");
+ goto out_free_handle;
+ }
+
+ eiointc_priv[nr_pics++] = priv;
+
+ eiointc_router_init(0);
+
+ parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
+
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ "irqchip/loongarch/intc:starting",
+ eiointc_router_init, NULL);
+
+ acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group);
+ acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group);
+ acpi_cascade_irqdomain_init();
+
+ return 0;
+
+out_free_handle:
+ irq_domain_free_fwnode(priv->domain_handle);
+ priv->domain_handle = NULL;
+out_free_priv:
+ kfree(priv);
+
+ return -ENOMEM;
+}
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 8d05d8bcf56f..0da8716f8f24 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -23,7 +23,7 @@
#endif
#define LIOINTC_CHIP_IRQ 32
-#define LIOINTC_NUM_PARENT 4
+#define LIOINTC_NUM_PARENT 4
#define LIOINTC_NUM_CORES 4
#define LIOINTC_INTC_CHIP_START 0x20
@@ -58,6 +58,8 @@ struct liointc_priv {
bool has_lpc_irq_errata;
};
+struct fwnode_handle *liointc_handle;
+
static void liointc_chained_handle_irq(struct irq_desc *desc)
{
struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
@@ -153,97 +155,79 @@ static void liointc_resume(struct irq_chip_generic *gc)
irq_gc_unlock_irqrestore(gc, flags);
}
-static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
-static const char * const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
+static int parent_irq[LIOINTC_NUM_PARENT];
+static u32 parent_int_map[LIOINTC_NUM_PARENT];
+static const char *const parent_names[] = {"int0", "int1", "int2", "int3"};
+static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
-static void __iomem *liointc_get_reg_byname(struct device_node *node,
- const char *name)
+static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
{
- int index = of_property_match_string(node, "reg-names", name);
-
- if (index < 0)
- return NULL;
-
- return of_iomap(node, index);
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+ *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ;
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
}
-static int __init liointc_of_init(struct device_node *node,
- struct device_node *parent)
+static const struct irq_domain_ops acpi_irq_gc_ops = {
+ .map = irq_map_generic_chip,
+ .unmap = irq_unmap_generic_chip,
+ .xlate = liointc_domain_xlate,
+};
+
+static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
+ struct fwnode_handle *domain_handle, struct device_node *node)
{
+ int i, err;
+ void __iomem *base;
+ struct irq_chip_type *ct;
struct irq_chip_generic *gc;
struct irq_domain *domain;
- struct irq_chip_type *ct;
struct liointc_priv *priv;
- void __iomem *base;
- u32 of_parent_int_map[LIOINTC_NUM_PARENT];
- int parent_irq[LIOINTC_NUM_PARENT];
- bool have_parent = FALSE;
- int sz, i, err = 0;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- if (of_device_is_compatible(node, "loongson,liointc-2.0")) {
- base = liointc_get_reg_byname(node, "main");
- if (!base) {
- err = -ENODEV;
- goto out_free_priv;
- }
+ base = ioremap(addr, size);
+ if (!base)
+ goto out_free_priv;
- for (i = 0; i < LIOINTC_NUM_CORES; i++)
- priv->core_isr[i] = liointc_get_reg_byname(node, core_reg_names[i]);
- if (!priv->core_isr[0]) {
- err = -ENODEV;
- goto out_iounmap_base;
- }
- } else {
- base = of_iomap(node, 0);
- if (!base) {
- err = -ENODEV;
- goto out_free_priv;
- }
+ for (i = 0; i < LIOINTC_NUM_CORES; i++)
+ priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
- for (i = 0; i < LIOINTC_NUM_CORES; i++)
- priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
- }
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++)
+ priv->handler[i].parent_int_map = parent_int_map[i];
- for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
- parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
- if (parent_irq[i] > 0)
- have_parent = TRUE;
- }
- if (!have_parent) {
- err = -ENODEV;
- goto out_iounmap_isr;
- }
+ if (revision > 1) {
+ for (i = 0; i < LIOINTC_NUM_CORES; i++) {
+ int index = of_property_match_string(node,
+ "reg-names", core_reg_names[i]);
- sz = of_property_read_variable_u32_array(node,
- "loongson,parent_int_map",
- &of_parent_int_map[0],
- LIOINTC_NUM_PARENT,
- LIOINTC_NUM_PARENT);
- if (sz < 4) {
- pr_err("loongson-liointc: No parent_int_map\n");
- err = -ENODEV;
- goto out_iounmap_isr;
- }
+ if (index < 0)
+ goto out_iounmap;
- for (i = 0; i < LIOINTC_NUM_PARENT; i++)
- priv->handler[i].parent_int_map = of_parent_int_map[i];
+ priv->core_isr[i] = of_iomap(node, index);
+ }
+ }
/* Setup IRQ domain */
- domain = irq_domain_add_linear(node, 32,
+ if (!acpi_disabled)
+ domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
+ &acpi_irq_gc_ops, priv);
+ else
+ domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
&irq_generic_chip_ops, priv);
if (!domain) {
pr_err("loongson-liointc: cannot add IRQ domain\n");
- err = -EINVAL;
- goto out_iounmap_isr;
+ goto out_iounmap;
}
- err = irq_alloc_domain_generic_chips(domain, 32, 1,
- node->full_name, handle_level_irq,
- IRQ_NOPROBE, 0, 0);
+ err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1,
+ (node ? node->full_name : "LIOINTC"),
+ handle_level_irq, 0, IRQ_NOPROBE, 0);
if (err) {
pr_err("loongson-liointc: unable to register IRQ domain\n");
goto out_free_domain;
@@ -299,24 +283,93 @@ static int __init liointc_of_init(struct device_node *node,
liointc_chained_handle_irq, &priv->handler[i]);
}
+ liointc_handle = domain_handle;
return 0;
out_free_domain:
irq_domain_remove(domain);
-out_iounmap_isr:
- for (i = 0; i < LIOINTC_NUM_CORES; i++) {
- if (!priv->core_isr[i])
- continue;
- iounmap(priv->core_isr[i]);
- }
-out_iounmap_base:
+out_iounmap:
iounmap(base);
out_free_priv:
kfree(priv);
- return err;
+ return -EINVAL;
+}
+
+#ifdef CONFIG_OF
+
+static int __init liointc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ bool have_parent = FALSE;
+ int sz, i, index, revision, err = 0;
+ struct resource res;
+
+ if (!of_device_is_compatible(node, "loongson,liointc-2.0")) {
+ index = 0;
+ revision = 1;
+ } else {
+ index = of_property_match_string(node, "reg-names", "main");
+ revision = 2;
+ }
+
+ if (of_address_to_resource(node, index, &res))
+ return -EINVAL;
+
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
+ parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
+ if (parent_irq[i] > 0)
+ have_parent = TRUE;
+ }
+ if (!have_parent)
+ return -ENODEV;
+
+ sz = of_property_read_variable_u32_array(node,
+ "loongson,parent_int_map",
+ &parent_int_map[0],
+ LIOINTC_NUM_PARENT,
+ LIOINTC_NUM_PARENT);
+ if (sz < 4) {
+ pr_err("loongson-liointc: No parent_int_map\n");
+ return -ENODEV;
+ }
+
+ err = liointc_init(res.start, resource_size(&res),
+ revision, of_node_to_fwnode(node), node);
+ if (err < 0)
+ return err;
+
+ return 0;
}
IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init);
IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init);
IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init);
+
+#endif
+
+#ifdef CONFIG_ACPI
+int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc)
+{
+ int ret;
+ struct fwnode_handle *domain_handle;
+
+ parent_int_map[0] = acpi_liointc->cascade_map[0];
+ parent_int_map[1] = acpi_liointc->cascade_map[1];
+
+ parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
+ parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
+
+ domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
+ if (!domain_handle) {
+ pr_err("Unable to allocate domain handle\n");
+ return -ENOMEM;
+ }
+ ret = liointc_init(acpi_liointc->address, acpi_liointc->size,
+ 1, domain_handle, NULL);
+ if (ret)
+ irq_domain_free_fwnode(domain_handle);
+
+ return ret;
+}
+#endif
diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c
new file mode 100644
index 000000000000..bf2324910a75
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-pch-lpc.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Loongson LPC Interrupt Controller support
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) "lpc: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+
+/* Registers */
+#define LPC_INT_CTL 0x00
+#define LPC_INT_ENA 0x04
+#define LPC_INT_STS 0x08
+#define LPC_INT_CLR 0x0c
+#define LPC_INT_POL 0x10
+#define LPC_COUNT 16
+
+/* LPC_INT_CTL */
+#define LPC_INT_CTL_EN BIT(31)
+
+struct pch_lpc {
+ void __iomem *base;
+ struct irq_domain *lpc_domain;
+ raw_spinlock_t lpc_lock;
+ u32 saved_reg_ctl;
+ u32 saved_reg_ena;
+ u32 saved_reg_pol;
+};
+
+struct fwnode_handle *pch_lpc_handle;
+
+static void lpc_irq_ack(struct irq_data *d)
+{
+ unsigned long flags;
+ struct pch_lpc *priv = d->domain->host_data;
+
+ raw_spin_lock_irqsave(&priv->lpc_lock, flags);
+ writel(0x1 << d->hwirq, priv->base + LPC_INT_CLR);
+ raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
+}
+
+static void lpc_irq_mask(struct irq_data *d)
+{
+ unsigned long flags;
+ struct pch_lpc *priv = d->domain->host_data;
+
+ raw_spin_lock_irqsave(&priv->lpc_lock, flags);
+ writel(readl(priv->base + LPC_INT_ENA) & (~(0x1 << (d->hwirq))),
+ priv->base + LPC_INT_ENA);
+ raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
+}
+
+static void lpc_irq_unmask(struct irq_data *d)
+{
+ unsigned long flags;
+ struct pch_lpc *priv = d->domain->host_data;
+
+ raw_spin_lock_irqsave(&priv->lpc_lock, flags);
+ writel(readl(priv->base + LPC_INT_ENA) | (0x1 << (d->hwirq)),
+ priv->base + LPC_INT_ENA);
+ raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
+}
+
+static int lpc_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ u32 val;
+ u32 mask = 0x1 << (d->hwirq);
+ struct pch_lpc *priv = d->domain->host_data;
+
+ if (!(type & IRQ_TYPE_LEVEL_MASK))
+ return 0;
+
+ val = readl(priv->base + LPC_INT_POL);
+
+ if (type == IRQ_TYPE_LEVEL_HIGH)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ writel(val, priv->base + LPC_INT_POL);
+
+ return 0;
+}
+
+static const struct irq_chip pch_lpc_irq_chip = {
+ .name = "PCH LPC",
+ .irq_mask = lpc_irq_mask,
+ .irq_unmask = lpc_irq_unmask,
+ .irq_ack = lpc_irq_ack,
+ .irq_set_type = lpc_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static void lpc_irq_dispatch(struct irq_desc *desc)
+{
+ u32 pending, bit;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct pch_lpc *priv = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+
+ pending = readl(priv->base + LPC_INT_ENA);
+ pending &= readl(priv->base + LPC_INT_STS);
+ if (!pending)
+ spurious_interrupt();
+
+ while (pending) {
+ bit = __ffs(pending);
+
+ generic_handle_domain_irq(priv->lpc_domain, bit);
+ pending &= ~BIT(bit);
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static int pch_lpc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &pch_lpc_irq_chip, handle_level_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops pch_lpc_domain_ops = {
+ .map = pch_lpc_map,
+ .translate = irq_domain_translate_twocell,
+};
+
+static void pch_lpc_reset(struct pch_lpc *priv)
+{
+ /* Enable the LPC interrupt, bit31: en bit30: edge */
+ writel(LPC_INT_CTL_EN, priv->base + LPC_INT_CTL);
+ writel(0, priv->base + LPC_INT_ENA);
+ /* Clear all 18-bit interrpt bit */
+ writel(GENMASK(17, 0), priv->base + LPC_INT_CLR);
+}
+
+static int pch_lpc_disabled(struct pch_lpc *priv)
+{
+ return (readl(priv->base + LPC_INT_ENA) == 0xffffffff) &&
+ (readl(priv->base + LPC_INT_STS) == 0xffffffff);
+}
+
+int __init pch_lpc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lpc_pic *acpi_pchlpc)
+{
+ int parent_irq;
+ struct pch_lpc *priv;
+ struct irq_fwspec fwspec;
+ struct fwnode_handle *irq_handle;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&priv->lpc_lock);
+
+ priv->base = ioremap(acpi_pchlpc->address, acpi_pchlpc->size);
+ if (!priv->base)
+ goto free_priv;
+
+ if (pch_lpc_disabled(priv)) {
+ pr_err("Failed to get LPC status\n");
+ goto iounmap_base;
+ }
+
+ irq_handle = irq_domain_alloc_named_fwnode("lpcintc");
+ if (!irq_handle) {
+ pr_err("Unable to allocate domain handle\n");
+ goto iounmap_base;
+ }
+
+ priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT,
+ &pch_lpc_domain_ops, priv);
+ if (!priv->lpc_domain) {
+ pr_err("Failed to create IRQ domain\n");
+ goto free_irq_handle;
+ }
+ pch_lpc_reset(priv);
+
+ fwspec.fwnode = parent->fwnode;
+ fwspec.param[0] = acpi_pchlpc->cascade + GSI_MIN_PCH_IRQ;
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ fwspec.param_count = 2;
+ parent_irq = irq_create_fwspec_mapping(&fwspec);
+ irq_set_chained_handler_and_data(parent_irq, lpc_irq_dispatch, priv);
+
+ pch_lpc_handle = irq_handle;
+ return 0;
+
+free_irq_handle:
+ irq_domain_free_fwnode(irq_handle);
+iounmap_base:
+ iounmap(priv->base);
+free_priv:
+ kfree(priv);
+
+ return -ENOMEM;
+}
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index e3801c4a77ed..a72ede90ffc6 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -15,6 +15,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
+static int nr_pics;
+
struct pch_msi_data {
struct mutex msi_map_lock;
phys_addr_t doorbell;
@@ -23,6 +25,8 @@ struct pch_msi_data {
unsigned long *msi_map;
};
+static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS];
+
static void pch_msi_mask_msi_irq(struct irq_data *d)
{
pci_msi_mask_irq(d);
@@ -154,12 +158,12 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = {
};
static int pch_msi_init_domains(struct pch_msi_data *priv,
- struct device_node *node,
- struct irq_domain *parent)
+ struct irq_domain *parent,
+ struct fwnode_handle *domain_handle)
{
struct irq_domain *middle_domain, *msi_domain;
- middle_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ middle_domain = irq_domain_create_linear(domain_handle,
priv->num_irqs,
&pch_msi_middle_domain_ops,
priv);
@@ -171,7 +175,7 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
middle_domain->parent = parent;
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
- msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi_domain = pci_msi_create_irq_domain(domain_handle,
&pch_msi_domain_info,
middle_domain);
if (!msi_domain) {
@@ -183,19 +187,11 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
return 0;
}
-static int pch_msi_init(struct device_node *node,
- struct device_node *parent)
+static int pch_msi_init(phys_addr_t msg_address, int irq_base, int irq_count,
+ struct irq_domain *parent_domain, struct fwnode_handle *domain_handle)
{
- struct pch_msi_data *priv;
- struct irq_domain *parent_domain;
- struct resource res;
int ret;
-
- parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- pr_err("Failed to find the parent domain\n");
- return -ENXIO;
- }
+ struct pch_msi_data *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -203,48 +199,95 @@ static int pch_msi_init(struct device_node *node,
mutex_init(&priv->msi_map_lock);
- ret = of_address_to_resource(node, 0, &res);
- if (ret) {
- pr_err("Failed to allocate resource\n");
- goto err_priv;
- }
-
- priv->doorbell = res.start;
-
- if (of_property_read_u32(node, "loongson,msi-base-vec",
- &priv->irq_first)) {
- pr_err("Unable to parse MSI vec base\n");
- ret = -EINVAL;
- goto err_priv;
- }
-
- if (of_property_read_u32(node, "loongson,msi-num-vecs",
- &priv->num_irqs)) {
- pr_err("Unable to parse MSI vec number\n");
- ret = -EINVAL;
- goto err_priv;
- }
+ priv->doorbell = msg_address;
+ priv->irq_first = irq_base;
+ priv->num_irqs = irq_count;
priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL);
- if (!priv->msi_map) {
- ret = -ENOMEM;
+ if (!priv->msi_map)
goto err_priv;
- }
pr_debug("Registering %d MSIs, starting at %d\n",
priv->num_irqs, priv->irq_first);
- ret = pch_msi_init_domains(priv, node, parent_domain);
+ ret = pch_msi_init_domains(priv, parent_domain, domain_handle);
if (ret)
goto err_map;
+ pch_msi_handle[nr_pics++] = domain_handle;
return 0;
err_map:
bitmap_free(priv->msi_map);
err_priv:
kfree(priv);
- return ret;
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_OF
+static int pch_msi_of_init(struct device_node *node, struct device_node *parent)
+{
+ int err;
+ int irq_base, irq_count;
+ struct resource res;
+ struct irq_domain *parent_domain;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Failed to find the parent domain\n");
+ return -ENXIO;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("Failed to allocate resource\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "loongson,msi-base-vec", &irq_base)) {
+ pr_err("Unable to parse MSI vec base\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "loongson,msi-num-vecs", &irq_count)) {
+ pr_err("Unable to parse MSI vec number\n");
+ return -EINVAL;
+ }
+
+ err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_node_to_fwnode(node));
+ if (err < 0)
+ return err;
+
+ return 0;
}
-IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_init);
+IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init);
+#endif
+
+#ifdef CONFIG_ACPI
+struct fwnode_handle *get_pch_msi_handle(int pci_segment)
+{
+ int i;
+
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ if (msi_group[i].pci_segment == pci_segment)
+ return pch_msi_handle[i];
+ }
+ return NULL;
+}
+
+int __init pch_msi_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_msi_pic *acpi_pchmsi)
+{
+ int ret;
+ struct fwnode_handle *domain_handle;
+
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address);
+ ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
+ acpi_pchmsi->count, parent, domain_handle);
+ if (ret < 0)
+ irq_domain_free_fwnode(domain_handle);
+
+ return ret;
+}
+#endif
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index a4eb8a2181c7..c01b9c257005 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -33,13 +33,21 @@
#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG)
#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG)
+static int nr_pics;
+
struct pch_pic {
void __iomem *base;
struct irq_domain *pic_domain;
u32 ht_vec_base;
raw_spinlock_t pic_lock;
+ u32 vec_count;
+ u32 gsi_base;
};
+static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
+
+struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
+
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{
u32 reg;
@@ -139,6 +147,28 @@ static struct irq_chip pch_pic_irq_chip = {
.irq_set_type = pch_pic_set_type,
};
+static int pch_pic_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct pch_pic *priv = d->host_data;
+ struct device_node *of_node = to_of_node(fwspec->fwnode);
+
+ if (fwspec->param_count < 1)
+ return -EINVAL;
+
+ if (of_node) {
+ *hwirq = fwspec->param[0] + priv->ht_vec_base;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ } else {
+ *hwirq = fwspec->param[0] - priv->gsi_base;
+ *type = IRQ_TYPE_NONE;
+ }
+
+ return 0;
+}
+
static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -149,13 +179,13 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_fwspec parent_fwspec;
struct pch_pic *priv = domain->host_data;
- err = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
+ err = pch_pic_domain_translate(domain, fwspec, &hwirq, &type);
if (err)
return err;
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 1;
- parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
+ parent_fwspec.param[0] = hwirq;
err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
if (err)
@@ -170,7 +200,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops pch_pic_domain_ops = {
- .translate = irq_domain_translate_twocell,
+ .translate = pch_pic_domain_translate,
.alloc = pch_pic_alloc,
.free = irq_domain_free_irqs_parent,
};
@@ -180,7 +210,7 @@ static void pch_pic_reset(struct pch_pic *priv)
int i;
for (i = 0; i < PIC_COUNT; i++) {
- /* Write vectored ID */
+ /* Write vector ID */
writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
/* Hardcode route to HT0 Lo */
writeb(1, priv->base + PCH_INT_ROUTE(i));
@@ -198,50 +228,37 @@ static void pch_pic_reset(struct pch_pic *priv)
}
}
-static int pch_pic_of_init(struct device_node *node,
- struct device_node *parent)
+static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
+ struct irq_domain *parent_domain, struct fwnode_handle *domain_handle,
+ u32 gsi_base)
{
struct pch_pic *priv;
- struct irq_domain *parent_domain;
- int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
raw_spin_lock_init(&priv->pic_lock);
- priv->base = of_iomap(node, 0);
- if (!priv->base) {
- err = -ENOMEM;
+ priv->base = ioremap(addr, size);
+ if (!priv->base)
goto free_priv;
- }
-
- parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- pr_err("Failed to find the parent domain\n");
- err = -ENXIO;
- goto iounmap_base;
- }
- if (of_property_read_u32(node, "loongson,pic-base-vec",
- &priv->ht_vec_base)) {
- pr_err("Failed to determine pic-base-vec\n");
- err = -EINVAL;
- goto iounmap_base;
- }
+ priv->ht_vec_base = vec_base;
+ priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1;
+ priv->gsi_base = gsi_base;
priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0,
- PIC_COUNT,
- of_node_to_fwnode(node),
- &pch_pic_domain_ops,
- priv);
+ priv->vec_count, domain_handle,
+ &pch_pic_domain_ops, priv);
+
if (!priv->pic_domain) {
pr_err("Failed to create IRQ domain\n");
- err = -ENOMEM;
goto iounmap_base;
}
pch_pic_reset(priv);
+ pch_pic_handle[nr_pics] = domain_handle;
+ pch_pic_priv[nr_pics++] = priv;
return 0;
@@ -250,7 +267,105 @@ iounmap_base:
free_priv:
kfree(priv);
- return err;
+ return -EINVAL;
+}
+
+#ifdef CONFIG_OF
+
+static int pch_pic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int err, vec_base;
+ struct resource res;
+ struct irq_domain *parent_domain;
+
+ if (of_address_to_resource(node, 0, &res))
+ return -EINVAL;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Failed to find the parent domain\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(node, "loongson,pic-base-vec", &vec_base)) {
+ pr_err("Failed to determine pic-base-vec\n");
+ return -EINVAL;
+ }
+
+ err = pch_pic_init(res.start, resource_size(&res), vec_base,
+ parent_domain, of_node_to_fwnode(node), 0);
+ if (err < 0)
+ return err;
+
+ return 0;
}
IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
+
+#endif
+
+#ifdef CONFIG_ACPI
+int find_pch_pic(u32 gsi)
+{
+ int i;
+
+ /* Find the PCH_PIC that manages this GSI. */
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ struct pch_pic *priv = pch_pic_priv[i];
+
+ if (!priv)
+ return -1;
+
+ if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
+ return i;
+ }
+
+ pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
+ return -1;
+}
+
+static int __init
+pch_lpc_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_lpc_pic *pchlpc_entry = (struct acpi_madt_lpc_pic *)header;
+
+ return pch_lpc_acpi_init(pch_pic_priv[0]->pic_domain, pchlpc_entry);
+}
+
+static int __init acpi_cascade_irqdomain_init(void)
+{
+ acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC,
+ pch_lpc_parse_madt, 0);
+ return 0;
+}
+
+int __init pch_pic_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_bio_pic *acpi_pchpic)
+{
+ int ret, vec_base;
+ struct fwnode_handle *domain_handle;
+
+ vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
+
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
+ if (!domain_handle) {
+ pr_err("Unable to allocate domain handle\n");
+ return -ENOMEM;
+ }
+
+ ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size,
+ vec_base, parent, domain_handle, acpi_pchpic->gsi_base);
+
+ if (ret < 0) {
+ irq_domain_free_fwnode(domain_handle);
+ return ret;
+ }
+
+ if (acpi_pchpic->id == 0)
+ acpi_cascade_irqdomain_init();
+
+ return ret;
+}
+#endif
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index ff89b36267dd..1ba0f1555c80 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -52,13 +52,15 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
static DEFINE_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
-static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs;
static unsigned int gic_cpu_pin;
static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+#endif /* CONFIG_GENERIC_IRQ_IPI */
static struct gic_all_vpes_chip_data {
u32 map;
@@ -472,9 +474,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
u32 map;
if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
+#ifdef CONFIG_GENERIC_IRQ_IPI
/* verify that shared irqs don't conflict with an IPI irq */
if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
return -EBUSY;
+#endif /* CONFIG_GENERIC_IRQ_IPI */
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_level_irq_controller,
@@ -567,6 +571,8 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map,
};
+#ifdef CONFIG_GENERIC_IRQ_IPI
+
static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
@@ -670,6 +676,48 @@ static const struct irq_domain_ops gic_ipi_domain_ops = {
.match = gic_ipi_domain_match,
};
+static int gic_register_ipi_domain(struct device_node *node)
+{
+ struct irq_domain *gic_ipi_domain;
+ unsigned int v[2], num_ipis;
+
+ gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU,
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+ node, &gic_ipi_domain_ops, NULL);
+ if (!gic_ipi_domain) {
+ pr_err("Failed to add IPI domain");
+ return -ENXIO;
+ }
+
+ irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
+
+ if (node &&
+ !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
+ bitmap_set(ipi_resrv, v[0], v[1]);
+ } else {
+ /*
+ * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
+ * meeting the requirements of arch/mips SMP.
+ */
+ num_ipis = 2 * num_possible_cpus();
+ bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
+ }
+
+ bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
+
+ return 0;
+}
+
+#else /* !CONFIG_GENERIC_IRQ_IPI */
+
+static inline int gic_register_ipi_domain(struct device_node *node)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_GENERIC_IRQ_IPI */
+
static int gic_cpu_startup(unsigned int cpu)
{
/* Enable or disable EIC */
@@ -688,11 +736,12 @@ static int gic_cpu_startup(unsigned int cpu)
static int __init gic_of_init(struct device_node *node,
struct device_node *parent)
{
- unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
+ unsigned int cpu_vec, i, gicconfig;
unsigned long reserved;
phys_addr_t gic_base;
struct resource res;
size_t gic_len;
+ int ret;
/* Find the first available CPU vector. */
i = 0;
@@ -734,6 +783,10 @@ static int __init gic_of_init(struct device_node *node,
}
mips_gic_base = ioremap(gic_base, gic_len);
+ if (!mips_gic_base) {
+ pr_err("Failed to ioremap gic_base\n");
+ return -ENOMEM;
+ }
gicconfig = read_gic_config();
gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig);
@@ -780,30 +833,9 @@ static int __init gic_of_init(struct device_node *node,
return -ENXIO;
}
- gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
- IRQ_DOMAIN_FLAG_IPI_PER_CPU,
- GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
- node, &gic_ipi_domain_ops, NULL);
- if (!gic_ipi_domain) {
- pr_err("Failed to add IPI domain");
- return -ENXIO;
- }
-
- irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
-
- if (node &&
- !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
- bitmap_set(ipi_resrv, v[0], v[1]);
- } else {
- /*
- * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
- * meeting the requirements of arch/mips SMP.
- */
- num_ipis = 2 * num_possible_cpus();
- bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
- }
-
- bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
+ ret = gic_register_ipi_domain(node);
+ if (ret)
+ return ret;
board_bind_eic_interrupt = &gic_bind_eic_interrupt;
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
new file mode 100644
index 000000000000..25fd8ee66565
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L IRQC Driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation.
+ *
+ * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define IRQC_IRQ_START 1
+#define IRQC_IRQ_COUNT 8
+#define IRQC_TINT_START (IRQC_IRQ_START + IRQC_IRQ_COUNT)
+#define IRQC_TINT_COUNT 32
+#define IRQC_NUM_IRQ (IRQC_TINT_START + IRQC_TINT_COUNT)
+
+#define ISCR 0x10
+#define IITSR 0x14
+#define TSCR 0x20
+#define TITSR0 0x24
+#define TITSR1 0x28
+#define TITSR0_MAX_INT 16
+#define TITSEL_WIDTH 0x2
+#define TSSR(n) (0x30 + ((n) * 4))
+#define TIEN BIT(7)
+#define TSSEL_SHIFT(n) (8 * (n))
+#define TSSEL_MASK GENMASK(7, 0)
+#define IRQ_MASK 0x3
+
+#define TSSR_OFFSET(n) ((n) % 4)
+#define TSSR_INDEX(n) ((n) / 4)
+
+#define TITSR_TITSEL_EDGE_RISING 0
+#define TITSR_TITSEL_EDGE_FALLING 1
+#define TITSR_TITSEL_LEVEL_HIGH 2
+#define TITSR_TITSEL_LEVEL_LOW 3
+
+#define IITSR_IITSEL(n, sense) ((sense) << ((n) * 2))
+#define IITSR_IITSEL_LEVEL_LOW 0
+#define IITSR_IITSEL_EDGE_FALLING 1
+#define IITSR_IITSEL_EDGE_RISING 2
+#define IITSR_IITSEL_EDGE_BOTH 3
+#define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3)
+
+#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
+#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
+
+struct rzg2l_irqc_priv {
+ void __iomem *base;
+ struct irq_fwspec fwspec[IRQC_NUM_IRQ];
+ raw_spinlock_t lock;
+};
+
+static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
+{
+ return data->domain->host_data;
+}
+
+static void rzg2l_irq_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u32 bit = BIT(hw_irq);
+ u32 reg;
+
+ reg = readl_relaxed(priv->base + ISCR);
+ if (reg & bit)
+ writel_relaxed(reg & ~bit, priv->base + ISCR);
+}
+
+static void rzg2l_tint_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u32 bit = BIT(hw_irq);
+ u32 reg;
+
+ reg = readl_relaxed(priv->base + TSCR);
+ if (reg & bit)
+ writel_relaxed(reg & ~bit, priv->base + TSCR);
+}
+
+static void rzg2l_irqc_eoi(struct irq_data *d)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = irqd_to_hwirq(d);
+
+ raw_spin_lock(&priv->lock);
+ if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
+ rzg2l_irq_eoi(d);
+ else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
+ rzg2l_tint_eoi(d);
+ raw_spin_unlock(&priv->lock);
+ irq_chip_eoi_parent(d);
+}
+
+static void rzg2l_irqc_irq_disable(struct irq_data *d)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d);
+
+ if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u32 offset = hw_irq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(offset);
+ u8 tssr_index = TSSR_INDEX(offset);
+ u32 reg;
+
+ raw_spin_lock(&priv->lock);
+ reg = readl_relaxed(priv->base + TSSR(tssr_index));
+ reg &= ~(TSSEL_MASK << tssr_offset);
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ raw_spin_unlock(&priv->lock);
+ }
+ irq_chip_disable_parent(d);
+}
+
+static void rzg2l_irqc_irq_enable(struct irq_data *d)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d);
+
+ if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned long tint = (uintptr_t)d->chip_data;
+ u32 offset = hw_irq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(offset);
+ u8 tssr_index = TSSR_INDEX(offset);
+ u32 reg;
+
+ raw_spin_lock(&priv->lock);
+ reg = readl_relaxed(priv->base + TSSR(tssr_index));
+ reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset);
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ raw_spin_unlock(&priv->lock);
+ }
+ irq_chip_enable_parent(d);
+}
+
+static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u16 sense, tmp;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_LOW:
+ sense = IITSR_IITSEL_LEVEL_LOW;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = IITSR_IITSEL_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = IITSR_IITSEL_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ sense = IITSR_IITSEL_EDGE_BOTH;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ raw_spin_lock(&priv->lock);
+ tmp = readl_relaxed(priv->base + IITSR);
+ tmp &= ~IITSR_IITSEL_MASK(hw_irq);
+ tmp |= IITSR_IITSEL(hw_irq, sense);
+ writel_relaxed(tmp, priv->base + IITSR);
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 titseln = hwirq - IRQC_TINT_START;
+ u32 offset;
+ u8 sense;
+ u32 reg;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ sense = TITSR_TITSEL_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = TITSR_TITSEL_EDGE_FALLING;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ offset = TITSR0;
+ if (titseln >= TITSR0_MAX_INT) {
+ titseln -= TITSR0_MAX_INT;
+ offset = TITSR1;
+ }
+
+ raw_spin_lock(&priv->lock);
+ reg = readl_relaxed(priv->base + offset);
+ reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
+ reg |= sense << (titseln * TITSEL_WIDTH);
+ writel_relaxed(reg, priv->base + offset);
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d);
+ int ret = -EINVAL;
+
+ if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
+ ret = rzg2l_irq_set_type(d, type);
+ else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
+ ret = rzg2l_tint_set_edge(d, type);
+ if (ret)
+ return ret;
+
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static const struct irq_chip irqc_chip = {
+ .name = "rzg2l-irqc",
+ .irq_eoi = rzg2l_irqc_eoi,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_disable = rzg2l_irqc_irq_disable,
+ .irq_enable = rzg2l_irqc_irq_enable,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = rzg2l_irqc_set_type,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct rzg2l_irqc_priv *priv = domain->host_data;
+ unsigned long tint = 0;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ /*
+ * For TINT interrupts ie where pinctrl driver is child of irqc domain
+ * the hwirq and TINT are encoded in fwspec->param[0].
+ * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT
+ * from 16-31 bits. TINT from the pinctrl driver needs to be programmed
+ * in IRQC registers to enable a given gpio pin as interrupt.
+ */
+ if (hwirq > IRQC_IRQ_COUNT) {
+ tint = TINT_EXTRACT_GPIOINT(hwirq);
+ hwirq = TINT_EXTRACT_HWIRQ(hwirq);
+
+ if (hwirq < IRQC_TINT_START)
+ return -EINVAL;
+ }
+
+ if (hwirq > (IRQC_NUM_IRQ - 1))
+ return -EINVAL;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip,
+ (void *)(uintptr_t)tint);
+ if (ret)
+ return ret;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
+}
+
+static const struct irq_domain_ops rzg2l_irqc_domain_ops = {
+ .alloc = rzg2l_irqc_alloc,
+ .free = irq_domain_free_irqs_common,
+ .translate = irq_domain_translate_twocell,
+};
+
+static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
+ struct device_node *np)
+{
+ struct of_phandle_args map;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < IRQC_NUM_IRQ; i++) {
+ ret = of_irq_parse_one(np, i, &map);
+ if (ret)
+ return ret;
+ of_phandle_args_to_fwspec(np, map.args, map.args_count,
+ &priv->fwspec[i]);
+ }
+
+ return 0;
+}
+
+static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *irq_domain, *parent_domain;
+ struct platform_device *pdev;
+ struct reset_control *resetn;
+ struct rzg2l_irqc_priv *priv;
+ int ret;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -ENODEV;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ dev_err(&pdev->dev, "cannot find parent domain\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ ret = rzg2l_irqc_parse_interrupts(priv, node);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
+ return ret;
+ }
+
+ resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(resetn))
+ return PTR_ERR(resetn);
+
+ ret = reset_control_deassert(resetn);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
+ goto pm_disable;
+ }
+
+ raw_spin_lock_init(&priv->lock);
+
+ irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
+ node, &rzg2l_irqc_domain_ops,
+ priv);
+ if (!irq_domain) {
+ dev_err(&pdev->dev, "failed to add irq domain\n");
+ ret = -ENOMEM;
+ goto pm_put;
+ }
+
+ return 0;
+
+pm_put:
+ pm_runtime_put(&pdev->dev);
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(resetn);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
+IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
+IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index b65bd8878d4f..499e5f81b3fe 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -95,10 +95,11 @@ static const struct irq_domain_ops riscv_intc_domain_ops = {
static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent)
{
- int rc, hartid;
+ int rc;
+ unsigned long hartid;
- hartid = riscv_of_parent_hartid(node);
- if (hartid < 0) {
+ rc = riscv_of_parent_hartid(node, &hartid);
+ if (rc < 0) {
pr_warn("unable to find hart id for %pOF\n", node);
return 0;
}
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index bb87e4c3b88e..2f4784860df5 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -60,10 +60,13 @@
#define PLIC_DISABLE_THRESHOLD 0x7
#define PLIC_ENABLE_THRESHOLD 0
+#define PLIC_QUIRK_EDGE_INTERRUPT 0
+
struct plic_priv {
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
+ unsigned long plic_quirks;
};
struct plic_handler {
@@ -81,6 +84,8 @@ static int plic_parent_irq __ro_after_init;
static bool plic_cpuhp_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
+static int plic_irq_set_type(struct irq_data *d, unsigned int type);
+
static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
{
u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
@@ -103,37 +108,43 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
struct irq_data *d, int enable)
{
int cpu;
- struct plic_priv *priv = irq_data_get_irq_chip_data(d);
- writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
- if (handler->present &&
- cpumask_test_cpu(cpu, &handler->priv->lmask))
- plic_toggle(handler, d->hwirq, enable);
+ plic_toggle(handler, d->hwirq, enable);
}
}
+static void plic_irq_enable(struct irq_data *d)
+{
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
+}
+
+static void plic_irq_disable(struct irq_data *d)
+{
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
+}
+
static void plic_irq_unmask(struct irq_data *d)
{
- struct cpumask amask;
- unsigned int cpu;
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
- cpumask_and(&amask, &priv->lmask, cpu_online_mask);
- cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
- &amask);
- if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
- return;
- plic_irq_toggle(cpumask_of(cpu), d, 1);
+ writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
}
static void plic_irq_mask(struct irq_data *d)
{
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
- plic_irq_toggle(&priv->lmask, d, 0);
+ writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+}
+
+static void plic_irq_eoi(struct irq_data *d)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
}
#ifdef CONFIG_SMP
@@ -154,38 +165,68 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- plic_irq_toggle(&priv->lmask, d, 0);
- plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d));
+ plic_irq_disable(d);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ if (!irqd_irq_disabled(d))
+ plic_irq_enable(d);
+
return IRQ_SET_MASK_OK_DONE;
}
#endif
-static void plic_irq_eoi(struct irq_data *d)
-{
- struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
-
- if (irqd_irq_masked(d)) {
- plic_irq_unmask(d);
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
- plic_irq_mask(d);
- } else {
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
- }
-}
+static struct irq_chip plic_edge_chip = {
+ .name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
+ .irq_ack = plic_irq_eoi,
+ .irq_mask = plic_irq_mask,
+ .irq_unmask = plic_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = plic_set_affinity,
+#endif
+ .irq_set_type = plic_irq_set_type,
+ .flags = IRQCHIP_AFFINITY_PRE_STARTUP,
+};
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
.irq_mask = plic_irq_mask,
.irq_unmask = plic_irq_unmask,
.irq_eoi = plic_irq_eoi,
#ifdef CONFIG_SMP
.irq_set_affinity = plic_set_affinity,
#endif
+ .irq_set_type = plic_irq_set_type,
+ .flags = IRQCHIP_AFFINITY_PRE_STARTUP,
};
+static int plic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
+ return IRQ_SET_MASK_OK_NOCOPY;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ irq_set_chip_handler_name_locked(d, &plic_edge_chip,
+ handle_edge_irq, NULL);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_chip_handler_name_locked(d, &plic_chip,
+ handle_fasteoi_irq, NULL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IRQ_SET_MASK_OK;
+}
+
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -198,6 +239,19 @@ static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
+static int plic_irq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct plic_priv *priv = d->host_data;
+
+ if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
+ return irq_domain_translate_twocell(d, fwspec, hwirq, type);
+
+ return irq_domain_translate_onecell(d, fwspec, hwirq, type);
+}
+
static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -206,7 +260,7 @@ static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int type;
struct irq_fwspec *fwspec = arg;
- ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
+ ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
@@ -220,7 +274,7 @@ static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops plic_irqdomain_ops = {
- .translate = irq_domain_translate_onecell,
+ .translate = plic_irq_domain_translate,
.alloc = plic_irq_domain_alloc,
.free = irq_domain_free_irqs_top,
};
@@ -281,8 +335,9 @@ static int plic_starting_cpu(unsigned int cpu)
return 0;
}
-static int __init plic_init(struct device_node *node,
- struct device_node *parent)
+static int __init __plic_init(struct device_node *node,
+ struct device_node *parent,
+ unsigned long plic_quirks)
{
int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
@@ -293,6 +348,8 @@ static int __init plic_init(struct device_node *node,
if (!priv)
return -ENOMEM;
+ priv->plic_quirks = plic_quirks;
+
priv->regs = of_iomap(node, 0);
if (WARN_ON(!priv->regs)) {
error = -EIO;
@@ -317,7 +374,8 @@ static int __init plic_init(struct device_node *node,
for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
irq_hw_number_t hwirq;
- int cpu, hartid;
+ int cpu;
+ unsigned long hartid;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -341,8 +399,8 @@ static int __init plic_init(struct device_node *node,
continue;
}
- hartid = riscv_of_parent_hartid(parent.np);
- if (hartid < 0) {
+ error = riscv_of_parent_hartid(parent.np, &hartid);
+ if (error < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
}
@@ -382,8 +440,11 @@ static int __init plic_init(struct device_node *node,
i * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
done:
- for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
plic_toggle(handler, hwirq, 0);
+ writel(1, priv->regs + PRIORITY_BASE +
+ hwirq * PRIORITY_PER_ID);
+ }
nr_handlers++;
}
@@ -410,6 +471,20 @@ out_free_priv:
return error;
}
+static int __init plic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return __plic_init(node, parent, 0);
+}
+
IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
-IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
+
+static int __init plic_edge_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT));
+}
+
+IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init);
+IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init);
diff --git a/drivers/irqchip/irq-sp7021-intc.c b/drivers/irqchip/irq-sp7021-intc.c
new file mode 100644
index 000000000000..bed78d1def3d
--- /dev/null
+++ b/drivers/irqchip/irq-sp7021-intc.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define SP_INTC_HWIRQ_MIN 0
+#define SP_INTC_HWIRQ_MAX 223
+
+#define SP_INTC_NR_IRQS (SP_INTC_HWIRQ_MAX - SP_INTC_HWIRQ_MIN + 1)
+#define SP_INTC_NR_GROUPS DIV_ROUND_UP(SP_INTC_NR_IRQS, 32)
+#define SP_INTC_REG_SIZE (SP_INTC_NR_GROUPS * 4)
+
+/* REG_GROUP_0 regs */
+#define REG_INTR_TYPE (sp_intc.g0)
+#define REG_INTR_POLARITY (REG_INTR_TYPE + SP_INTC_REG_SIZE)
+#define REG_INTR_PRIORITY (REG_INTR_POLARITY + SP_INTC_REG_SIZE)
+#define REG_INTR_MASK (REG_INTR_PRIORITY + SP_INTC_REG_SIZE)
+
+/* REG_GROUP_1 regs */
+#define REG_INTR_CLEAR (sp_intc.g1)
+#define REG_MASKED_EXT1 (REG_INTR_CLEAR + SP_INTC_REG_SIZE)
+#define REG_MASKED_EXT0 (REG_MASKED_EXT1 + SP_INTC_REG_SIZE)
+#define REG_INTR_GROUP (REG_INTR_CLEAR + 31 * 4)
+
+#define GROUP_MASK (BIT(SP_INTC_NR_GROUPS) - 1)
+#define GROUP_SHIFT_EXT1 (0)
+#define GROUP_SHIFT_EXT0 (8)
+
+/*
+ * When GPIO_INT0~7 set to edge trigger, doesn't work properly.
+ * WORKAROUND: change it to level trigger, and toggle the polarity
+ * at ACK/Handler to make the HW work.
+ */
+#define GPIO_INT0_HWIRQ 120
+#define GPIO_INT7_HWIRQ 127
+#define IS_GPIO_INT(irq) \
+({ \
+ u32 i = irq; \
+ (i >= GPIO_INT0_HWIRQ) && (i <= GPIO_INT7_HWIRQ); \
+})
+
+/* index of states */
+enum {
+ _IS_EDGE = 0,
+ _IS_LOW,
+ _IS_ACTIVE
+};
+
+#define STATE_BIT(irq, idx) (((irq) - GPIO_INT0_HWIRQ) * 3 + (idx))
+#define ASSIGN_STATE(irq, idx, v) assign_bit(STATE_BIT(irq, idx), sp_intc.states, v)
+#define TEST_STATE(irq, idx) test_bit(STATE_BIT(irq, idx), sp_intc.states)
+
+static struct sp_intctl {
+ /*
+ * REG_GROUP_0: include type/polarity/priority/mask regs.
+ * REG_GROUP_1: include clear/masked_ext0/masked_ext1/group regs.
+ */
+ void __iomem *g0; // REG_GROUP_0 base
+ void __iomem *g1; // REG_GROUP_1 base
+
+ struct irq_domain *domain;
+ raw_spinlock_t lock;
+
+ /*
+ * store GPIO_INT states
+ * each interrupt has 3 states: is_edge, is_low, is_active
+ */
+ DECLARE_BITMAP(states, (GPIO_INT7_HWIRQ - GPIO_INT0_HWIRQ + 1) * 3);
+} sp_intc;
+
+static struct irq_chip sp_intc_chip;
+
+static void sp_intc_assign_bit(u32 hwirq, void __iomem *base, bool value)
+{
+ u32 offset, mask;
+ unsigned long flags;
+ void __iomem *reg;
+
+ offset = (hwirq / 32) * 4;
+ reg = base + offset;
+
+ raw_spin_lock_irqsave(&sp_intc.lock, flags);
+ mask = readl_relaxed(reg);
+ if (value)
+ mask |= BIT(hwirq % 32);
+ else
+ mask &= ~BIT(hwirq % 32);
+ writel_relaxed(mask, reg);
+ raw_spin_unlock_irqrestore(&sp_intc.lock, flags);
+}
+
+static void sp_intc_ack_irq(struct irq_data *d)
+{
+ u32 hwirq = d->hwirq;
+
+ if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_EDGE))) { // WORKAROUND
+ sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, !TEST_STATE(hwirq, _IS_LOW));
+ ASSIGN_STATE(hwirq, _IS_ACTIVE, true);
+ }
+
+ sp_intc_assign_bit(hwirq, REG_INTR_CLEAR, 1);
+}
+
+static void sp_intc_mask_irq(struct irq_data *d)
+{
+ sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 0);
+}
+
+static void sp_intc_unmask_irq(struct irq_data *d)
+{
+ sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 1);
+}
+
+static int sp_intc_set_type(struct irq_data *d, unsigned int type)
+{
+ u32 hwirq = d->hwirq;
+ bool is_edge = !(type & IRQ_TYPE_LEVEL_MASK);
+ bool is_low = (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING);
+
+ irq_set_handler_locked(d, is_edge ? handle_edge_irq : handle_level_irq);
+
+ if (unlikely(IS_GPIO_INT(hwirq) && is_edge)) { // WORKAROUND
+ /* store states */
+ ASSIGN_STATE(hwirq, _IS_EDGE, is_edge);
+ ASSIGN_STATE(hwirq, _IS_LOW, is_low);
+ ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
+ /* change to level */
+ is_edge = false;
+ }
+
+ sp_intc_assign_bit(hwirq, REG_INTR_TYPE, is_edge);
+ sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, is_low);
+
+ return 0;
+}
+
+static int sp_intc_get_ext_irq(int ext_num)
+{
+ void __iomem *base = ext_num ? REG_MASKED_EXT1 : REG_MASKED_EXT0;
+ u32 shift = ext_num ? GROUP_SHIFT_EXT1 : GROUP_SHIFT_EXT0;
+ u32 groups;
+ u32 pending_group;
+ u32 group;
+ u32 pending_irq;
+
+ groups = readl_relaxed(REG_INTR_GROUP);
+ pending_group = (groups >> shift) & GROUP_MASK;
+ if (!pending_group)
+ return -1;
+
+ group = fls(pending_group) - 1;
+ pending_irq = readl_relaxed(base + group * 4);
+ if (!pending_irq)
+ return -1;
+
+ return (group * 32) + fls(pending_irq) - 1;
+}
+
+static void sp_intc_handle_ext_cascaded(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int ext_num = (uintptr_t)irq_desc_get_handler_data(desc);
+ int hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ while ((hwirq = sp_intc_get_ext_irq(ext_num)) >= 0) {
+ if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_ACTIVE))) { // WORKAROUND
+ ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
+ sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, TEST_STATE(hwirq, _IS_LOW));
+ } else {
+ generic_handle_domain_irq(sp_intc.domain, hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip sp_intc_chip = {
+ .name = "sp_intc",
+ .irq_ack = sp_intc_ack_irq,
+ .irq_mask = sp_intc_mask_irq,
+ .irq_unmask = sp_intc_unmask_irq,
+ .irq_set_type = sp_intc_set_type,
+};
+
+static int sp_intc_irq_domain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sp_intc_chip, handle_level_irq);
+ irq_set_chip_data(irq, &sp_intc_chip);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops sp_intc_dm_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .map = sp_intc_irq_domain_map,
+};
+
+static int sp_intc_irq_map(struct device_node *node, int i)
+{
+ unsigned int irq;
+
+ irq = irq_of_parse_and_map(node, i);
+ if (!irq)
+ return -ENOENT;
+
+ irq_set_chained_handler_and_data(irq, sp_intc_handle_ext_cascaded, (void *)(uintptr_t)i);
+
+ return 0;
+}
+
+static int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent)
+{
+ int i, ret;
+
+ sp_intc.g0 = of_iomap(node, 0);
+ if (!sp_intc.g0)
+ return -ENXIO;
+
+ sp_intc.g1 = of_iomap(node, 1);
+ if (!sp_intc.g1) {
+ ret = -ENXIO;
+ goto out_unmap0;
+ }
+
+ ret = sp_intc_irq_map(node, 0); // EXT_INT0
+ if (ret)
+ goto out_unmap1;
+
+ ret = sp_intc_irq_map(node, 1); // EXT_INT1
+ if (ret)
+ goto out_unmap1;
+
+ /* initial regs */
+ for (i = 0; i < SP_INTC_NR_GROUPS; i++) {
+ /* all mask */
+ writel_relaxed(0, REG_INTR_MASK + i * 4);
+ /* all edge */
+ writel_relaxed(~0, REG_INTR_TYPE + i * 4);
+ /* all high-active */
+ writel_relaxed(0, REG_INTR_POLARITY + i * 4);
+ /* all EXT_INT0 */
+ writel_relaxed(~0, REG_INTR_PRIORITY + i * 4);
+ /* all clear */
+ writel_relaxed(~0, REG_INTR_CLEAR + i * 4);
+ }
+
+ sp_intc.domain = irq_domain_add_linear(node, SP_INTC_NR_IRQS,
+ &sp_intc_dm_ops, &sp_intc);
+ if (!sp_intc.domain) {
+ ret = -ENOMEM;
+ goto out_unmap1;
+ }
+
+ raw_spin_lock_init(&sp_intc.lock);
+
+ return 0;
+
+out_unmap1:
+ iounmap(sp_intc.g1);
+out_unmap0:
+ iounmap(sp_intc.g0);
+
+ return ret;
+}
+
+IRQCHIP_DECLARE(sp_intc, "sunplus,sp7021-intc", sp_intc_init_dt);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 9d18f47040eb..a73763d475f0 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -34,21 +34,15 @@ struct stm32_exti_bank {
u32 swier_ofst;
u32 rpr_ofst;
u32 fpr_ofst;
+ u32 trg_ofst;
};
#define UNDEF_REG ~0
-struct stm32_desc_irq {
- u32 exti;
- u32 irq_parent;
- struct irq_chip *chip;
-};
-
struct stm32_exti_drv_data {
const struct stm32_exti_bank **exti_banks;
- const struct stm32_desc_irq *desc_irqs;
+ const u8 *desc_irqs;
u32 bank_nr;
- u32 irq_nr;
};
struct stm32_exti_chip_data {
@@ -78,6 +72,7 @@ static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
.swier_ofst = 0x10,
.rpr_ofst = 0x14,
.fpr_ofst = UNDEF_REG,
+ .trg_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
@@ -97,6 +92,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
.swier_ofst = 0x08,
.rpr_ofst = 0x88,
.fpr_ofst = UNDEF_REG,
+ .trg_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
@@ -107,6 +103,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
.swier_ofst = 0x28,
.rpr_ofst = 0x98,
.fpr_ofst = UNDEF_REG,
+ .trg_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
@@ -117,6 +114,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
.swier_ofst = 0x48,
.rpr_ofst = 0xA8,
.fpr_ofst = UNDEF_REG,
+ .trg_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
@@ -132,32 +130,35 @@ static const struct stm32_exti_drv_data stm32h7xx_drv_data = {
static const struct stm32_exti_bank stm32mp1_exti_b1 = {
.imr_ofst = 0x80,
- .emr_ofst = 0x84,
+ .emr_ofst = UNDEF_REG,
.rtsr_ofst = 0x00,
.ftsr_ofst = 0x04,
.swier_ofst = 0x08,
.rpr_ofst = 0x0C,
.fpr_ofst = 0x10,
+ .trg_ofst = 0x3EC,
};
static const struct stm32_exti_bank stm32mp1_exti_b2 = {
.imr_ofst = 0x90,
- .emr_ofst = 0x94,
+ .emr_ofst = UNDEF_REG,
.rtsr_ofst = 0x20,
.ftsr_ofst = 0x24,
.swier_ofst = 0x28,
.rpr_ofst = 0x2C,
.fpr_ofst = 0x30,
+ .trg_ofst = 0x3E8,
};
static const struct stm32_exti_bank stm32mp1_exti_b3 = {
.imr_ofst = 0xA0,
- .emr_ofst = 0xA4,
+ .emr_ofst = UNDEF_REG,
.rtsr_ofst = 0x40,
.ftsr_ofst = 0x44,
.swier_ofst = 0x48,
.rpr_ofst = 0x4C,
.fpr_ofst = 0x50,
+ .trg_ofst = 0x3E4,
};
static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
@@ -169,126 +170,114 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
static struct irq_chip stm32_exti_h_chip;
static struct irq_chip stm32_exti_h_chip_direct;
-static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
- { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip },
- { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip },
- { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip },
- { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip },
- { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip },
- { .exti = 5, .irq_parent = 23, .chip = &stm32_exti_h_chip },
- { .exti = 6, .irq_parent = 64, .chip = &stm32_exti_h_chip },
- { .exti = 7, .irq_parent = 65, .chip = &stm32_exti_h_chip },
- { .exti = 8, .irq_parent = 66, .chip = &stm32_exti_h_chip },
- { .exti = 9, .irq_parent = 67, .chip = &stm32_exti_h_chip },
- { .exti = 10, .irq_parent = 40, .chip = &stm32_exti_h_chip },
- { .exti = 11, .irq_parent = 42, .chip = &stm32_exti_h_chip },
- { .exti = 12, .irq_parent = 76, .chip = &stm32_exti_h_chip },
- { .exti = 13, .irq_parent = 77, .chip = &stm32_exti_h_chip },
- { .exti = 14, .irq_parent = 121, .chip = &stm32_exti_h_chip },
- { .exti = 15, .irq_parent = 127, .chip = &stm32_exti_h_chip },
- { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip },
- { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct },
- { .exti = 21, .irq_parent = 31, .chip = &stm32_exti_h_chip_direct },
- { .exti = 22, .irq_parent = 33, .chip = &stm32_exti_h_chip_direct },
- { .exti = 23, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct },
- { .exti = 24, .irq_parent = 95, .chip = &stm32_exti_h_chip_direct },
- { .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct },
- { .exti = 26, .irq_parent = 37, .chip = &stm32_exti_h_chip_direct },
- { .exti = 27, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct },
- { .exti = 28, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct },
- { .exti = 29, .irq_parent = 71, .chip = &stm32_exti_h_chip_direct },
- { .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct },
- { .exti = 31, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct },
- { .exti = 32, .irq_parent = 82, .chip = &stm32_exti_h_chip_direct },
- { .exti = 33, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct },
- { .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
- { .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct },
- { .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct },
- { .exti = 52, .irq_parent = 140, .chip = &stm32_exti_h_chip_direct },
- { .exti = 53, .irq_parent = 141, .chip = &stm32_exti_h_chip_direct },
- { .exti = 54, .irq_parent = 135, .chip = &stm32_exti_h_chip_direct },
- { .exti = 61, .irq_parent = 100, .chip = &stm32_exti_h_chip_direct },
- { .exti = 65, .irq_parent = 144, .chip = &stm32_exti_h_chip },
- { .exti = 68, .irq_parent = 143, .chip = &stm32_exti_h_chip },
- { .exti = 70, .irq_parent = 62, .chip = &stm32_exti_h_chip_direct },
- { .exti = 73, .irq_parent = 129, .chip = &stm32_exti_h_chip },
+#define EXTI_INVALID_IRQ U8_MAX
+#define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK)
+
+static const u8 stm32mp1_desc_irq[] = {
+ /* default value */
+ [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+
+ [0] = 6,
+ [1] = 7,
+ [2] = 8,
+ [3] = 9,
+ [4] = 10,
+ [5] = 23,
+ [6] = 64,
+ [7] = 65,
+ [8] = 66,
+ [9] = 67,
+ [10] = 40,
+ [11] = 42,
+ [12] = 76,
+ [13] = 77,
+ [14] = 121,
+ [15] = 127,
+ [16] = 1,
+ [19] = 3,
+ [21] = 31,
+ [22] = 33,
+ [23] = 72,
+ [24] = 95,
+ [25] = 107,
+ [26] = 37,
+ [27] = 38,
+ [28] = 39,
+ [29] = 71,
+ [30] = 52,
+ [31] = 53,
+ [32] = 82,
+ [33] = 83,
+ [47] = 93,
+ [48] = 138,
+ [50] = 139,
+ [52] = 140,
+ [53] = 141,
+ [54] = 135,
+ [61] = 100,
+ [65] = 144,
+ [68] = 143,
+ [70] = 62,
+ [73] = 129,
};
-static const struct stm32_desc_irq stm32mp13_desc_irq[] = {
- { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip },
- { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip },
- { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip },
- { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip },
- { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip },
- { .exti = 5, .irq_parent = 24, .chip = &stm32_exti_h_chip },
- { .exti = 6, .irq_parent = 65, .chip = &stm32_exti_h_chip },
- { .exti = 7, .irq_parent = 66, .chip = &stm32_exti_h_chip },
- { .exti = 8, .irq_parent = 67, .chip = &stm32_exti_h_chip },
- { .exti = 9, .irq_parent = 68, .chip = &stm32_exti_h_chip },
- { .exti = 10, .irq_parent = 41, .chip = &stm32_exti_h_chip },
- { .exti = 11, .irq_parent = 43, .chip = &stm32_exti_h_chip },
- { .exti = 12, .irq_parent = 77, .chip = &stm32_exti_h_chip },
- { .exti = 13, .irq_parent = 78, .chip = &stm32_exti_h_chip },
- { .exti = 14, .irq_parent = 106, .chip = &stm32_exti_h_chip },
- { .exti = 15, .irq_parent = 109, .chip = &stm32_exti_h_chip },
- { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip },
- { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct },
- { .exti = 21, .irq_parent = 32, .chip = &stm32_exti_h_chip_direct },
- { .exti = 22, .irq_parent = 34, .chip = &stm32_exti_h_chip_direct },
- { .exti = 23, .irq_parent = 73, .chip = &stm32_exti_h_chip_direct },
- { .exti = 24, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
- { .exti = 25, .irq_parent = 114, .chip = &stm32_exti_h_chip_direct },
- { .exti = 26, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct },
- { .exti = 27, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct },
- { .exti = 28, .irq_parent = 40, .chip = &stm32_exti_h_chip_direct },
- { .exti = 29, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct },
- { .exti = 30, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct },
- { .exti = 31, .irq_parent = 54, .chip = &stm32_exti_h_chip_direct },
- { .exti = 32, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct },
- { .exti = 33, .irq_parent = 84, .chip = &stm32_exti_h_chip_direct },
- { .exti = 44, .irq_parent = 96, .chip = &stm32_exti_h_chip_direct },
- { .exti = 47, .irq_parent = 92, .chip = &stm32_exti_h_chip_direct },
- { .exti = 48, .irq_parent = 116, .chip = &stm32_exti_h_chip_direct },
- { .exti = 50, .irq_parent = 117, .chip = &stm32_exti_h_chip_direct },
- { .exti = 52, .irq_parent = 118, .chip = &stm32_exti_h_chip_direct },
- { .exti = 53, .irq_parent = 119, .chip = &stm32_exti_h_chip_direct },
- { .exti = 68, .irq_parent = 63, .chip = &stm32_exti_h_chip_direct },
- { .exti = 70, .irq_parent = 98, .chip = &stm32_exti_h_chip_direct },
+static const u8 stm32mp13_desc_irq[] = {
+ /* default value */
+ [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+
+ [0] = 6,
+ [1] = 7,
+ [2] = 8,
+ [3] = 9,
+ [4] = 10,
+ [5] = 24,
+ [6] = 65,
+ [7] = 66,
+ [8] = 67,
+ [9] = 68,
+ [10] = 41,
+ [11] = 43,
+ [12] = 77,
+ [13] = 78,
+ [14] = 106,
+ [15] = 109,
+ [16] = 1,
+ [19] = 3,
+ [21] = 32,
+ [22] = 34,
+ [23] = 73,
+ [24] = 93,
+ [25] = 114,
+ [26] = 38,
+ [27] = 39,
+ [28] = 40,
+ [29] = 72,
+ [30] = 53,
+ [31] = 54,
+ [32] = 83,
+ [33] = 84,
+ [44] = 96,
+ [47] = 92,
+ [48] = 116,
+ [50] = 117,
+ [52] = 118,
+ [53] = 119,
+ [68] = 63,
+ [70] = 98,
};
static const struct stm32_exti_drv_data stm32mp1_drv_data = {
.exti_banks = stm32mp1_exti_banks,
.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
.desc_irqs = stm32mp1_desc_irq,
- .irq_nr = ARRAY_SIZE(stm32mp1_desc_irq),
};
static const struct stm32_exti_drv_data stm32mp13_drv_data = {
.exti_banks = stm32mp1_exti_banks,
.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
.desc_irqs = stm32mp13_desc_irq,
- .irq_nr = ARRAY_SIZE(stm32mp13_desc_irq),
};
-static const struct
-stm32_desc_irq *stm32_exti_get_desc(const struct stm32_exti_drv_data *drv_data,
- irq_hw_number_t hwirq)
-{
- const struct stm32_desc_irq *desc = NULL;
- int i;
-
- if (!drv_data->desc_irqs)
- return NULL;
-
- for (i = 0; i < drv_data->irq_nr; i++) {
- desc = &drv_data->desc_irqs[i];
- if (desc->exti == hwirq)
- break;
- }
-
- return desc;
-}
-
static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
{
struct stm32_exti_chip_data *chip_data = gc->private;
@@ -614,7 +603,7 @@ static int stm32_exti_h_set_affinity(struct irq_data *d,
if (d->parent_data->chip)
return irq_chip_set_affinity_parent(d, dest, force);
- return -EINVAL;
+ return IRQ_SET_MASK_OK_DONE;
}
static int __maybe_unused stm32_exti_h_suspend(void)
@@ -691,8 +680,8 @@ static struct irq_chip stm32_exti_h_chip_direct = {
.name = "stm32-exti-h-direct",
.irq_eoi = irq_chip_eoi_parent,
.irq_ack = irq_chip_ack_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
+ .irq_mask = stm32_exti_h_mask,
+ .irq_unmask = stm32_exti_h_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = irq_chip_set_type_parent,
.irq_set_wake = stm32_exti_h_set_wake,
@@ -706,28 +695,36 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
{
struct stm32_exti_host_data *host_data = dm->host_data;
struct stm32_exti_chip_data *chip_data;
- const struct stm32_desc_irq *desc;
+ u8 desc_irq;
struct irq_fwspec *fwspec = data;
struct irq_fwspec p_fwspec;
irq_hw_number_t hwirq;
int bank;
+ u32 event_trg;
+ struct irq_chip *chip;
hwirq = fwspec->param[0];
+ if (hwirq >= host_data->drv_data->bank_nr * IRQS_PER_BANK)
+ return -EINVAL;
+
bank = hwirq / IRQS_PER_BANK;
chip_data = &host_data->chips_data[bank];
+ event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst);
+ chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ?
+ &stm32_exti_h_chip : &stm32_exti_h_chip_direct;
+
+ irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
- desc = stm32_exti_get_desc(host_data->drv_data, hwirq);
- if (!desc)
+ if (!host_data->drv_data || !host_data->drv_data->desc_irqs)
return -EINVAL;
- irq_domain_set_hwirq_and_chip(dm, virq, hwirq, desc->chip,
- chip_data);
- if (desc->irq_parent) {
+ desc_irq = host_data->drv_data->desc_irqs[hwirq];
+ if (desc_irq != EXTI_INVALID_IRQ) {
p_fwspec.fwnode = dm->parent->fwnode;
p_fwspec.param_count = 3;
p_fwspec.param[0] = GIC_SPI;
- p_fwspec.param[1] = desc->irq_parent;
+ p_fwspec.param[1] = desc_irq;
p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
@@ -792,7 +789,8 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
* clear registers to avoid residue
*/
writel_relaxed(0, base + stm32_bank->imr_ofst);
- writel_relaxed(0, base + stm32_bank->emr_ofst);
+ if (stm32_bank->emr_ofst != UNDEF_REG)
+ writel_relaxed(0, base + stm32_bank->emr_ofst);
pr_info("%pOF: bank%d\n", node, bank_idx);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index e1f771c72fc4..ad3e2c1b3c87 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -148,10 +148,10 @@ static int tegra_ictlr_suspend(void)
lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
/* Disable COP interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
/* Disable CPU interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
/* Enable the wakeup sources of ictlr */
writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
@@ -172,12 +172,12 @@ static void tegra_ictlr_resume(void)
writel_relaxed(lic->cpu_iep[i],
ictlr + ICTLR_CPU_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
writel_relaxed(lic->cpu_ier[i],
ictlr + ICTLR_CPU_IER_SET);
writel_relaxed(lic->cop_iep[i],
ictlr + ICTLR_COP_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
writel_relaxed(lic->cop_ier[i],
ictlr + ICTLR_COP_IER_SET);
}
@@ -312,7 +312,7 @@ static int __init tegra_ictlr_init(struct device_node *node,
lic->base[i] = base;
/* Disable all interrupts */
- writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR);
/* All interrupts target IRQ */
writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index cd5642cef01f..651f2f8f685b 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1557,7 +1557,7 @@ reset_hfcsusb(struct hfcsusb *hw)
write_reg(hw, HFCUSB_USB_SIZE, (hw->packet_size / 8) |
((hw->packet_size / 8) << 4));
- /* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */
+ /* set USB_SIZE_I to match the wMaxPacketSize for ISO transfers */
write_reg(hw, HFCUSB_USB_SIZE_I, hw->iso_packet_size);
/* enable PCM/GCI master mode */
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a49979f41eee..499d0f215a8b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -447,16 +447,16 @@ config LEDS_LP8860
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
- depends on LEDS_CLASS
+ depends on LEDS_CLASS && BROKEN
depends on X86 && SERIO_I8042 && DMI
help
This driver makes the mail LED accessible from userspace
- programs through the leds subsystem. This LED have three
- known mode: off, blink at 0.5Hz and blink at 1Hz.
+ programs through the LEDs subsystem. This LED has three
+ known modes: off, blink at 0.5Hz and blink at 1Hz.
The driver supports two kinds of interface: using ledtrig-timer
or through /sys/class/leds/clevo::mail/brightness. As this LED
- cannot change it's brightness it blinks instead. The brightness
+ cannot change its brightness it blinks instead. The brightness
value 0 means off, 1..127 means blink at 0.5Hz and 128..255 means
blink at 1Hz.
@@ -697,7 +697,7 @@ config LEDS_MENF21BMC
config LEDS_IS31FL319X
tristate "LED Support for ISSI IS31FL319x I2C LED controller family"
- depends on LEDS_CLASS && I2C && OF
+ depends on LEDS_CLASS && I2C
select REGMAP_I2C
help
This option enables support for LEDs connected to ISSI IS31FL319x
diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
index 59ba81e40e85..945c84286a4e 100644
--- a/drivers/leds/blink/Kconfig
+++ b/drivers/leds/blink/Kconfig
@@ -1,3 +1,17 @@
+config LEDS_BCM63138
+ tristate "LED Support for Broadcom BCM63138 SoC"
+ depends on LEDS_CLASS
+ depends on ARCH_BCM4908 || ARCH_BCM_5301X || BCM63XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ default ARCH_BCM4908
+ help
+ This option enables support for LED controller that is part of
+ BCM63138 SoC. The same hardware block is known to be also used
+ in BCM4908, BCM6848, BCM6858, BCM63148, BCM63381 and BCM68360.
+
+ If compiled as module it will be called leds-bcm63138.
+
config LEDS_LGM
tristate "LED support for LGM SoC series"
depends on X86 || COMPILE_TEST
diff --git a/drivers/leds/blink/Makefile b/drivers/leds/blink/Makefile
index fa5d04dccf13..447029f4153a 100644
--- a/drivers/leds/blink/Makefile
+++ b/drivers/leds/blink/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_BCM63138) += leds-bcm63138.o
obj-$(CONFIG_LEDS_LGM) += leds-lgm-sso.o
diff --git a/drivers/leds/blink/leds-bcm63138.c b/drivers/leds/blink/leds-bcm63138.c
new file mode 100644
index 000000000000..2cf2761e4914
--- /dev/null
+++ b/drivers/leds/blink/leds-bcm63138.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define BCM63138_MAX_LEDS 32
+#define BCM63138_MAX_BRIGHTNESS 9
+
+#define BCM63138_LED_BITS 4 /* how many bits control a single LED */
+#define BCM63138_LED_MASK ((1 << BCM63138_LED_BITS) - 1) /* 0xf */
+#define BCM63138_LEDS_PER_REG (32 / BCM63138_LED_BITS) /* 8 */
+
+#define BCM63138_GLB_CTRL 0x00
+#define BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL 0x00000002
+#define BCM63138_GLB_CTRL_SERIAL_LED_EN_POL 0x00000008
+#define BCM63138_MASK 0x04
+#define BCM63138_HW_LED_EN 0x08
+#define BCM63138_SERIAL_LED_SHIFT_SEL 0x0c
+#define BCM63138_FLASH_RATE_CTRL1 0x10
+#define BCM63138_FLASH_RATE_CTRL2 0x14
+#define BCM63138_FLASH_RATE_CTRL3 0x18
+#define BCM63138_FLASH_RATE_CTRL4 0x1c
+#define BCM63138_BRIGHT_CTRL1 0x20
+#define BCM63138_BRIGHT_CTRL2 0x24
+#define BCM63138_BRIGHT_CTRL3 0x28
+#define BCM63138_BRIGHT_CTRL4 0x2c
+#define BCM63138_POWER_LED_CFG 0x30
+#define BCM63138_HW_POLARITY 0xb4
+#define BCM63138_SW_DATA 0xb8
+#define BCM63138_SW_POLARITY 0xbc
+#define BCM63138_PARALLEL_LED_POLARITY 0xc0
+#define BCM63138_SERIAL_LED_POLARITY 0xc4
+#define BCM63138_HW_LED_STATUS 0xc8
+#define BCM63138_FLASH_CTRL_STATUS 0xcc
+#define BCM63138_FLASH_BRT_CTRL 0xd0
+#define BCM63138_FLASH_P_LED_OUT_STATUS 0xd4
+#define BCM63138_FLASH_S_LED_OUT_STATUS 0xd8
+
+struct bcm63138_leds {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+struct bcm63138_led {
+ struct bcm63138_leds *leds;
+ struct led_classdev cdev;
+ u32 pin;
+ bool active_low;
+};
+
+/*
+ * I/O access
+ */
+
+static void bcm63138_leds_write(struct bcm63138_leds *leds, unsigned int reg,
+ u32 data)
+{
+ writel(data, leds->base + reg);
+}
+
+static unsigned long bcm63138_leds_read(struct bcm63138_leds *leds,
+ unsigned int reg)
+{
+ return readl(leds->base + reg);
+}
+
+static void bcm63138_leds_update_bits(struct bcm63138_leds *leds,
+ unsigned int reg, u32 mask, u32 val)
+{
+ WARN_ON(val & ~mask);
+
+ bcm63138_leds_write(leds, reg, (bcm63138_leds_read(leds, reg) & ~mask) | (val & mask));
+}
+
+/*
+ * Helpers
+ */
+
+static void bcm63138_leds_set_flash_rate(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ u8 value)
+{
+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
+
+ bcm63138_leds_update_bits(leds, BCM63138_FLASH_RATE_CTRL1 + reg_offset,
+ BCM63138_LED_MASK << shift, value << shift);
+}
+
+static void bcm63138_leds_set_bright(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ u8 value)
+{
+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
+
+ bcm63138_leds_update_bits(leds, BCM63138_BRIGHT_CTRL1 + reg_offset,
+ BCM63138_LED_MASK << shift, value << shift);
+}
+
+static void bcm63138_leds_enable_led(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ enum led_brightness value)
+{
+ u32 bit = BIT(led->pin);
+
+ bcm63138_leds_update_bits(leds, BCM63138_SW_DATA, bit, value ? bit : 0);
+}
+
+/*
+ * API callbacks
+ */
+
+static void bcm63138_leds_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
+ struct bcm63138_leds *leds = led->leds;
+ unsigned long flags;
+
+ spin_lock_irqsave(&leds->lock, flags);
+
+ bcm63138_leds_enable_led(leds, led, value);
+ if (!value)
+ bcm63138_leds_set_flash_rate(leds, led, 0);
+ else
+ bcm63138_leds_set_bright(leds, led, value);
+
+ spin_unlock_irqrestore(&leds->lock, flags);
+}
+
+static int bcm63138_leds_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
+ struct bcm63138_leds *leds = led->leds;
+ unsigned long flags;
+ u8 value;
+
+ if (!*delay_on && !*delay_off) {
+ *delay_on = 640;
+ *delay_off = 640;
+ }
+
+ if (*delay_on != *delay_off) {
+ dev_dbg(led_cdev->dev, "Blinking at unequal delays is not supported\n");
+ return -EINVAL;
+ }
+
+ switch (*delay_on) {
+ case 1152 ... 1408: /* 1280 ms ± 10% */
+ value = 0x7;
+ break;
+ case 576 ... 704: /* 640 ms ± 10% */
+ value = 0x6;
+ break;
+ case 288 ... 352: /* 320 ms ± 10% */
+ value = 0x5;
+ break;
+ case 126 ... 154: /* 140 ms ± 10% */
+ value = 0x4;
+ break;
+ case 59 ... 72: /* 65 ms ± 10% */
+ value = 0x3;
+ break;
+ default:
+ dev_dbg(led_cdev->dev, "Blinking delay value %lu is unsupported\n",
+ *delay_on);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&leds->lock, flags);
+
+ bcm63138_leds_enable_led(leds, led, BCM63138_MAX_BRIGHTNESS);
+ bcm63138_leds_set_flash_rate(leds, led, value);
+
+ spin_unlock_irqrestore(&leds->lock, flags);
+
+ return 0;
+}
+
+/*
+ * LED driver
+ */
+
+static void bcm63138_leds_create_led(struct bcm63138_leds *leds,
+ struct device_node *np)
+{
+ struct led_init_data init_data = {
+ .fwnode = of_fwnode_handle(np),
+ };
+ struct device *dev = leds->dev;
+ struct bcm63138_led *led;
+ struct pinctrl *pinctrl;
+ u32 bit;
+ int err;
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led) {
+ dev_err(dev, "Failed to alloc LED\n");
+ return;
+ }
+
+ led->leds = leds;
+
+ if (of_property_read_u32(np, "reg", &led->pin)) {
+ dev_err(dev, "Missing \"reg\" property in %pOF\n", np);
+ goto err_free;
+ }
+
+ if (led->pin >= BCM63138_MAX_LEDS) {
+ dev_err(dev, "Invalid \"reg\" value %d\n", led->pin);
+ goto err_free;
+ }
+
+ led->active_low = of_property_read_bool(np, "active-low");
+
+ led->cdev.max_brightness = BCM63138_MAX_BRIGHTNESS;
+ led->cdev.brightness_set = bcm63138_leds_brightness_set;
+ led->cdev.blink_set = bcm63138_leds_blink_set;
+
+ err = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
+ if (err) {
+ dev_err(dev, "Failed to register LED %pOF: %d\n", np, err);
+ goto err_free;
+ }
+
+ pinctrl = devm_pinctrl_get_select_default(led->cdev.dev);
+ if (IS_ERR(pinctrl) && PTR_ERR(pinctrl) != -ENODEV) {
+ dev_warn(led->cdev.dev, "Failed to select %pOF pinctrl: %ld\n",
+ np, PTR_ERR(pinctrl));
+ }
+
+ bit = BIT(led->pin);
+ bcm63138_leds_update_bits(leds, BCM63138_PARALLEL_LED_POLARITY, bit,
+ led->active_low ? 0 : bit);
+ bcm63138_leds_update_bits(leds, BCM63138_HW_LED_EN, bit, 0);
+ bcm63138_leds_set_flash_rate(leds, led, 0);
+ bcm63138_leds_enable_led(leds, led, led->cdev.brightness);
+
+ return;
+
+err_free:
+ devm_kfree(dev, led);
+}
+
+static int bcm63138_leds_probe(struct platform_device *pdev)
+{
+ struct device_node *np = dev_of_node(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct bcm63138_leds *leds;
+ struct device_node *child;
+
+ leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ leds->dev = dev;
+
+ leds->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(leds->base))
+ return PTR_ERR(leds->base);
+
+ spin_lock_init(&leds->lock);
+
+ bcm63138_leds_write(leds, BCM63138_GLB_CTRL,
+ BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL |
+ BCM63138_GLB_CTRL_SERIAL_LED_EN_POL);
+ bcm63138_leds_write(leds, BCM63138_HW_LED_EN, 0);
+ bcm63138_leds_write(leds, BCM63138_SERIAL_LED_POLARITY, 0);
+ bcm63138_leds_write(leds, BCM63138_PARALLEL_LED_POLARITY, 0);
+
+ for_each_available_child_of_node(np, child) {
+ bcm63138_leds_create_led(leds, child);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm63138_leds_of_match_table[] = {
+ { .compatible = "brcm,bcm63138-leds", },
+ { },
+};
+
+static struct platform_driver bcm63138_leds_driver = {
+ .probe = bcm63138_leds_probe,
+ .driver = {
+ .name = "leds-bcm63xxx",
+ .of_match_table = bcm63138_leds_of_match_table,
+ },
+};
+
+module_platform_driver(bcm63138_leds_driver);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, bcm63138_leds_of_match_table);
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 4161b9dd7e48..52b59b62f437 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -11,9 +11,9 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -21,39 +21,64 @@
/* register numbers */
#define IS31FL319X_SHUTDOWN 0x00
-#define IS31FL319X_CTRL1 0x01
-#define IS31FL319X_CTRL2 0x02
-#define IS31FL319X_CONFIG1 0x03
-#define IS31FL319X_CONFIG2 0x04
-#define IS31FL319X_RAMP_MODE 0x05
-#define IS31FL319X_BREATH_MASK 0x06
-#define IS31FL319X_PWM(channel) (0x07 + channel)
-#define IS31FL319X_DATA_UPDATE 0x10
-#define IS31FL319X_T0(channel) (0x11 + channel)
-#define IS31FL319X_T123_1 0x1a
-#define IS31FL319X_T123_2 0x1b
-#define IS31FL319X_T123_3 0x1c
-#define IS31FL319X_T4(channel) (0x1d + channel)
-#define IS31FL319X_TIME_UPDATE 0x26
-#define IS31FL319X_RESET 0xff
-
-#define IS31FL319X_REG_CNT (IS31FL319X_RESET + 1)
+
+/* registers for 3190, 3191 and 3193 */
+#define IS31FL3190_BREATHING 0x01
+#define IS31FL3190_LEDMODE 0x02
+#define IS31FL3190_CURRENT 0x03
+#define IS31FL3190_PWM(channel) (0x04 + channel)
+#define IS31FL3190_DATA_UPDATE 0x07
+#define IS31FL3190_T0(channel) (0x0a + channel)
+#define IS31FL3190_T1T2(channel) (0x10 + channel)
+#define IS31FL3190_T3T4(channel) (0x16 + channel)
+#define IS31FL3190_TIME_UPDATE 0x1c
+#define IS31FL3190_LEDCONTROL 0x1d
+#define IS31FL3190_RESET 0x2f
+
+#define IS31FL3190_CURRENT_uA_MIN 5000
+#define IS31FL3190_CURRENT_uA_DEFAULT 42000
+#define IS31FL3190_CURRENT_uA_MAX 42000
+#define IS31FL3190_CURRENT_MASK GENMASK(4, 2)
+#define IS31FL3190_CURRENT_5_mA 0x02
+#define IS31FL3190_CURRENT_10_mA 0x01
+#define IS31FL3190_CURRENT_17dot5_mA 0x04
+#define IS31FL3190_CURRENT_30_mA 0x03
+#define IS31FL3190_CURRENT_42_mA 0x00
+
+/* registers for 3196 and 3199 */
+#define IS31FL3196_CTRL1 0x01
+#define IS31FL3196_CTRL2 0x02
+#define IS31FL3196_CONFIG1 0x03
+#define IS31FL3196_CONFIG2 0x04
+#define IS31FL3196_RAMP_MODE 0x05
+#define IS31FL3196_BREATH_MARK 0x06
+#define IS31FL3196_PWM(channel) (0x07 + channel)
+#define IS31FL3196_DATA_UPDATE 0x10
+#define IS31FL3196_T0(channel) (0x11 + channel)
+#define IS31FL3196_T123_1 0x1a
+#define IS31FL3196_T123_2 0x1b
+#define IS31FL3196_T123_3 0x1c
+#define IS31FL3196_T4(channel) (0x1d + channel)
+#define IS31FL3196_TIME_UPDATE 0x26
+#define IS31FL3196_RESET 0xff
+
+#define IS31FL3196_REG_CNT (IS31FL3196_RESET + 1)
#define IS31FL319X_MAX_LEDS 9
/* CS (Current Setting) in CONFIG2 register */
-#define IS31FL319X_CONFIG2_CS_SHIFT 4
-#define IS31FL319X_CONFIG2_CS_MASK 0x7
-#define IS31FL319X_CONFIG2_CS_STEP_REF 12
+#define IS31FL3196_CONFIG2_CS_SHIFT 4
+#define IS31FL3196_CONFIG2_CS_MASK GENMASK(2, 0)
+#define IS31FL3196_CONFIG2_CS_STEP_REF 12
-#define IS31FL319X_CURRENT_MIN ((u32)5000)
-#define IS31FL319X_CURRENT_MAX ((u32)40000)
-#define IS31FL319X_CURRENT_STEP ((u32)5000)
-#define IS31FL319X_CURRENT_DEFAULT ((u32)20000)
+#define IS31FL3196_CURRENT_uA_MIN 5000
+#define IS31FL3196_CURRENT_uA_MAX 40000
+#define IS31FL3196_CURRENT_uA_STEP 5000
+#define IS31FL3196_CURRENT_uA_DEFAULT 20000
/* Audio gain in CONFIG2 register */
-#define IS31FL319X_AUDIO_GAIN_DB_MAX ((u32)21)
-#define IS31FL319X_AUDIO_GAIN_DB_STEP ((u32)3)
+#define IS31FL3196_AUDIO_GAIN_DB_MAX ((u32)21)
+#define IS31FL3196_AUDIO_GAIN_DB_STEP 3
/*
* regmap is used as a cache of chip's register space,
@@ -78,52 +103,161 @@ struct is31fl319x_chip {
struct is31fl319x_chipdef {
int num_leds;
+ u8 reset_reg;
+ const struct regmap_config *is31fl319x_regmap_config;
+ int (*brightness_set)(struct led_classdev *cdev, enum led_brightness brightness);
+ u32 current_default;
+ u32 current_min;
+ u32 current_max;
+ bool is_3196or3199;
};
-static const struct is31fl319x_chipdef is31fl3190_cdef = {
- .num_leds = 1,
-};
+static bool is31fl319x_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* we have no readable registers */
+ return false;
+}
-static const struct is31fl319x_chipdef is31fl3193_cdef = {
- .num_leds = 3,
+static bool is31fl3190_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* volatile registers are not cached */
+ switch (reg) {
+ case IS31FL3190_DATA_UPDATE:
+ case IS31FL3190_TIME_UPDATE:
+ case IS31FL3190_RESET:
+ return true; /* always write-through */
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default is31fl3190_reg_defaults[] = {
+ { IS31FL3190_LEDMODE, 0x00 },
+ { IS31FL3190_CURRENT, 0x00 },
+ { IS31FL3190_PWM(0), 0x00 },
+ { IS31FL3190_PWM(1), 0x00 },
+ { IS31FL3190_PWM(2), 0x00 },
};
-static const struct is31fl319x_chipdef is31fl3196_cdef = {
- .num_leds = 6,
+static struct regmap_config is31fl3190_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IS31FL3190_RESET,
+ .cache_type = REGCACHE_FLAT,
+ .readable_reg = is31fl319x_readable_reg,
+ .volatile_reg = is31fl3190_volatile_reg,
+ .reg_defaults = is31fl3190_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(is31fl3190_reg_defaults),
};
-static const struct is31fl319x_chipdef is31fl3199_cdef = {
- .num_leds = 9,
+static bool is31fl3196_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* volatile registers are not cached */
+ switch (reg) {
+ case IS31FL3196_DATA_UPDATE:
+ case IS31FL3196_TIME_UPDATE:
+ case IS31FL3196_RESET:
+ return true; /* always write-through */
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default is31fl3196_reg_defaults[] = {
+ { IS31FL3196_CONFIG1, 0x00 },
+ { IS31FL3196_CONFIG2, 0x00 },
+ { IS31FL3196_PWM(0), 0x00 },
+ { IS31FL3196_PWM(1), 0x00 },
+ { IS31FL3196_PWM(2), 0x00 },
+ { IS31FL3196_PWM(3), 0x00 },
+ { IS31FL3196_PWM(4), 0x00 },
+ { IS31FL3196_PWM(5), 0x00 },
+ { IS31FL3196_PWM(6), 0x00 },
+ { IS31FL3196_PWM(7), 0x00 },
+ { IS31FL3196_PWM(8), 0x00 },
};
-static const struct of_device_id of_is31fl319x_match[] = {
- { .compatible = "issi,is31fl3190", .data = &is31fl3190_cdef, },
- { .compatible = "issi,is31fl3191", .data = &is31fl3190_cdef, },
- { .compatible = "issi,is31fl3193", .data = &is31fl3193_cdef, },
- { .compatible = "issi,is31fl3196", .data = &is31fl3196_cdef, },
- { .compatible = "issi,is31fl3199", .data = &is31fl3199_cdef, },
- { .compatible = "si-en,sn3199", .data = &is31fl3199_cdef, },
- { }
+static struct regmap_config is31fl3196_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IS31FL3196_REG_CNT,
+ .cache_type = REGCACHE_FLAT,
+ .readable_reg = is31fl319x_readable_reg,
+ .volatile_reg = is31fl3196_volatile_reg,
+ .reg_defaults = is31fl3196_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(is31fl3196_reg_defaults),
};
-MODULE_DEVICE_TABLE(of, of_is31fl319x_match);
-static int is31fl319x_brightness_set(struct led_classdev *cdev,
+static int is31fl3190_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led, cdev);
+ struct is31fl319x_chip *is31 = led->chip;
+ int chan = led - is31->leds;
+ int ret;
+ int i;
+ u8 ctrl = 0;
+
+ dev_dbg(&is31->client->dev, "channel %d: %d\n", chan, brightness);
+
+ mutex_lock(&is31->lock);
+
+ /* update PWM register */
+ ret = regmap_write(is31->regmap, IS31FL3190_PWM(chan), brightness);
+ if (ret < 0)
+ goto out;
+
+ /* read current brightness of all PWM channels */
+ for (i = 0; i < is31->cdef->num_leds; i++) {
+ unsigned int pwm_value;
+ bool on;
+
+ /*
+ * since neither cdev nor the chip can provide
+ * the current setting, we read from the regmap cache
+ */
+
+ ret = regmap_read(is31->regmap, IS31FL3190_PWM(i), &pwm_value);
+ on = ret >= 0 && pwm_value > LED_OFF;
+
+ ctrl |= on << i;
+ }
+
+ if (ctrl > 0) {
+ dev_dbg(&is31->client->dev, "power up %02x\n", ctrl);
+ regmap_write(is31->regmap, IS31FL3190_LEDCONTROL, ctrl);
+ /* update PWMs */
+ regmap_write(is31->regmap, IS31FL3190_DATA_UPDATE, 0x00);
+ /* enable chip from shut down and enable all channels */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x20);
+ } else {
+ dev_dbg(&is31->client->dev, "power down\n");
+ /* shut down (no need to clear LEDCONTROL) */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x01);
+ }
+
+out:
+ mutex_unlock(&is31->lock);
+
+ return ret;
+}
+
+static int is31fl3196_brightness_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
- struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led,
- cdev);
+ struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led, cdev);
struct is31fl319x_chip *is31 = led->chip;
int chan = led - is31->leds;
int ret;
int i;
u8 ctrl1 = 0, ctrl2 = 0;
- dev_dbg(&is31->client->dev, "%s %d: %d\n", __func__, chan, brightness);
+ dev_dbg(&is31->client->dev, "channel %d: %d\n", chan, brightness);
mutex_lock(&is31->lock);
/* update PWM register */
- ret = regmap_write(is31->regmap, IS31FL319X_PWM(chan), brightness);
+ ret = regmap_write(is31->regmap, IS31FL3196_PWM(chan), brightness);
if (ret < 0)
goto out;
@@ -137,9 +271,7 @@ static int is31fl319x_brightness_set(struct led_classdev *cdev,
* the current setting, we read from the regmap cache
*/
- ret = regmap_read(is31->regmap, IS31FL319X_PWM(i), &pwm_value);
- dev_dbg(&is31->client->dev, "%s read %d: ret=%d: %d\n",
- __func__, i, ret, pwm_value);
+ ret = regmap_read(is31->regmap, IS31FL3196_PWM(i), &pwm_value);
on = ret >= 0 && pwm_value > LED_OFF;
if (i < 3)
@@ -153,10 +285,10 @@ static int is31fl319x_brightness_set(struct led_classdev *cdev,
if (ctrl1 > 0 || ctrl2 > 0) {
dev_dbg(&is31->client->dev, "power up %02x %02x\n",
ctrl1, ctrl2);
- regmap_write(is31->regmap, IS31FL319X_CTRL1, ctrl1);
- regmap_write(is31->regmap, IS31FL319X_CTRL2, ctrl2);
+ regmap_write(is31->regmap, IS31FL3196_CTRL1, ctrl1);
+ regmap_write(is31->regmap, IS31FL3196_CTRL2, ctrl2);
/* update PWMs */
- regmap_write(is31->regmap, IS31FL319X_DATA_UPDATE, 0x00);
+ regmap_write(is31->regmap, IS31FL3196_DATA_UPDATE, 0x00);
/* enable chip from shut down */
ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x01);
} else {
@@ -171,92 +303,141 @@ out:
return ret;
}
-static int is31fl319x_parse_child_dt(const struct device *dev,
- const struct device_node *child,
- struct is31fl319x_led *led)
+static const struct is31fl319x_chipdef is31fl3190_cdef = {
+ .num_leds = 1,
+ .reset_reg = IS31FL3190_RESET,
+ .is31fl319x_regmap_config = &is31fl3190_regmap_config,
+ .brightness_set = is31fl3190_brightness_set,
+ .current_default = IS31FL3190_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3190_CURRENT_uA_MIN,
+ .current_max = IS31FL3190_CURRENT_uA_MAX,
+ .is_3196or3199 = false,
+};
+
+static const struct is31fl319x_chipdef is31fl3193_cdef = {
+ .num_leds = 3,
+ .reset_reg = IS31FL3190_RESET,
+ .is31fl319x_regmap_config = &is31fl3190_regmap_config,
+ .brightness_set = is31fl3190_brightness_set,
+ .current_default = IS31FL3190_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3190_CURRENT_uA_MIN,
+ .current_max = IS31FL3190_CURRENT_uA_MAX,
+ .is_3196or3199 = false,
+};
+
+static const struct is31fl319x_chipdef is31fl3196_cdef = {
+ .num_leds = 6,
+ .reset_reg = IS31FL3196_RESET,
+ .is31fl319x_regmap_config = &is31fl3196_regmap_config,
+ .brightness_set = is31fl3196_brightness_set,
+ .current_default = IS31FL3196_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3196_CURRENT_uA_MIN,
+ .current_max = IS31FL3196_CURRENT_uA_MAX,
+ .is_3196or3199 = true,
+};
+
+static const struct is31fl319x_chipdef is31fl3199_cdef = {
+ .num_leds = 9,
+ .reset_reg = IS31FL3196_RESET,
+ .is31fl319x_regmap_config = &is31fl3196_regmap_config,
+ .brightness_set = is31fl3196_brightness_set,
+ .current_default = IS31FL3196_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3196_CURRENT_uA_MIN,
+ .current_max = IS31FL3196_CURRENT_uA_MAX,
+ .is_3196or3199 = true,
+};
+
+static const struct of_device_id of_is31fl319x_match[] = {
+ { .compatible = "issi,is31fl3190", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3191", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3193", .data = &is31fl3193_cdef, },
+ { .compatible = "issi,is31fl3196", .data = &is31fl3196_cdef, },
+ { .compatible = "issi,is31fl3199", .data = &is31fl3199_cdef, },
+ { .compatible = "si-en,sn3190", .data = &is31fl3190_cdef, },
+ { .compatible = "si-en,sn3191", .data = &is31fl3190_cdef, },
+ { .compatible = "si-en,sn3193", .data = &is31fl3193_cdef, },
+ { .compatible = "si-en,sn3196", .data = &is31fl3196_cdef, },
+ { .compatible = "si-en,sn3199", .data = &is31fl3199_cdef, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_is31fl319x_match);
+
+static int is31fl319x_parse_child_fw(const struct device *dev,
+ const struct fwnode_handle *child,
+ struct is31fl319x_led *led,
+ struct is31fl319x_chip *is31)
{
struct led_classdev *cdev = &led->cdev;
int ret;
- if (of_property_read_string(child, "label", &cdev->name))
- cdev->name = child->name;
+ if (fwnode_property_read_string(child, "label", &cdev->name))
+ cdev->name = fwnode_get_name(child);
- ret = of_property_read_string(child, "linux,default-trigger",
- &cdev->default_trigger);
+ ret = fwnode_property_read_string(child, "linux,default-trigger", &cdev->default_trigger);
if (ret < 0 && ret != -EINVAL) /* is optional */
return ret;
- led->max_microamp = IS31FL319X_CURRENT_DEFAULT;
- ret = of_property_read_u32(child, "led-max-microamp",
- &led->max_microamp);
+ led->max_microamp = is31->cdef->current_default;
+ ret = fwnode_property_read_u32(child, "led-max-microamp", &led->max_microamp);
if (!ret) {
- if (led->max_microamp < IS31FL319X_CURRENT_MIN)
+ if (led->max_microamp < is31->cdef->current_min)
return -EINVAL; /* not supported */
led->max_microamp = min(led->max_microamp,
- IS31FL319X_CURRENT_MAX);
+ is31->cdef->current_max);
}
return 0;
}
-static int is31fl319x_parse_dt(struct device *dev,
- struct is31fl319x_chip *is31)
+static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
{
- struct device_node *np = dev_of_node(dev), *child;
+ struct fwnode_handle *fwnode = dev_fwnode(dev), *child;
int count;
int ret;
- if (!np)
- return -ENODEV;
-
- is31->shutdown_gpio = devm_gpiod_get_optional(dev,
- "shutdown",
- GPIOD_OUT_HIGH);
- if (IS_ERR(is31->shutdown_gpio)) {
- ret = PTR_ERR(is31->shutdown_gpio);
- dev_err(dev, "Failed to get shutdown gpio: %d\n", ret);
- return ret;
- }
+ is31->shutdown_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(is31->shutdown_gpio))
+ return dev_err_probe(dev, PTR_ERR(is31->shutdown_gpio),
+ "Failed to get shutdown gpio\n");
is31->cdef = device_get_match_data(dev);
- count = of_get_available_child_count(np);
+ count = 0;
+ fwnode_for_each_available_child_node(fwnode, child)
+ count++;
dev_dbg(dev, "probing with %d leds defined in DT\n", count);
- if (!count || count > is31->cdef->num_leds) {
- dev_err(dev, "Number of leds defined must be between 1 and %u\n",
- is31->cdef->num_leds);
- return -ENODEV;
- }
+ if (!count || count > is31->cdef->num_leds)
+ return dev_err_probe(dev, -ENODEV,
+ "Number of leds defined must be between 1 and %u\n",
+ is31->cdef->num_leds);
- for_each_available_child_of_node(np, child) {
+ fwnode_for_each_available_child_node(fwnode, child) {
struct is31fl319x_led *led;
u32 reg;
- ret = of_property_read_u32(child, "reg", &reg);
+ ret = fwnode_property_read_u32(child, "reg", &reg);
if (ret) {
- dev_err(dev, "Failed to read led 'reg' property\n");
+ ret = dev_err_probe(dev, ret, "Failed to read led 'reg' property\n");
goto put_child_node;
}
if (reg < 1 || reg > is31->cdef->num_leds) {
- dev_err(dev, "invalid led reg %u\n", reg);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg);
goto put_child_node;
}
led = &is31->leds[reg - 1];
if (led->configured) {
- dev_err(dev, "led %u is already configured\n", reg);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg);
goto put_child_node;
}
- ret = is31fl319x_parse_child_dt(dev, child, led);
+ ret = is31fl319x_parse_child_fw(dev, child, led, is31);
if (ret) {
- dev_err(dev, "led %u DT parsing failed\n", reg);
+ ret = dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg);
goto put_child_node;
}
@@ -264,82 +445,62 @@ static int is31fl319x_parse_dt(struct device *dev,
}
is31->audio_gain_db = 0;
- ret = of_property_read_u32(np, "audio-gain-db", &is31->audio_gain_db);
- if (!ret)
- is31->audio_gain_db = min(is31->audio_gain_db,
- IS31FL319X_AUDIO_GAIN_DB_MAX);
+ if (is31->cdef->is_3196or3199) {
+ ret = fwnode_property_read_u32(fwnode, "audio-gain-db", &is31->audio_gain_db);
+ if (!ret)
+ is31->audio_gain_db = min(is31->audio_gain_db,
+ IS31FL3196_AUDIO_GAIN_DB_MAX);
+ }
return 0;
put_child_node:
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
-static bool is31fl319x_readable_reg(struct device *dev, unsigned int reg)
-{ /* we have no readable registers */
- return false;
-}
-
-static bool is31fl319x_volatile_reg(struct device *dev, unsigned int reg)
-{ /* volatile registers are not cached */
- switch (reg) {
- case IS31FL319X_DATA_UPDATE:
- case IS31FL319X_TIME_UPDATE:
- case IS31FL319X_RESET:
- return true; /* always write-through */
+static inline int is31fl3190_microamp_to_cs(struct device *dev, u32 microamp)
+{
+ switch (microamp) {
+ case 5000:
+ return IS31FL3190_CURRENT_5_mA;
+ case 10000:
+ return IS31FL3190_CURRENT_10_mA;
+ case 17500:
+ return IS31FL3190_CURRENT_17dot5_mA;
+ case 30000:
+ return IS31FL3190_CURRENT_30_mA;
+ case 42000:
+ return IS31FL3190_CURRENT_42_mA;
default:
- return false;
+ dev_warn(dev, "Unsupported current value: %d, using 5000 µA!\n", microamp);
+ return IS31FL3190_CURRENT_5_mA;
}
}
-static const struct reg_default is31fl319x_reg_defaults[] = {
- { IS31FL319X_CONFIG1, 0x00},
- { IS31FL319X_CONFIG2, 0x00},
- { IS31FL319X_PWM(0), 0x00},
- { IS31FL319X_PWM(1), 0x00},
- { IS31FL319X_PWM(2), 0x00},
- { IS31FL319X_PWM(3), 0x00},
- { IS31FL319X_PWM(4), 0x00},
- { IS31FL319X_PWM(5), 0x00},
- { IS31FL319X_PWM(6), 0x00},
- { IS31FL319X_PWM(7), 0x00},
- { IS31FL319X_PWM(8), 0x00},
-};
-
-static struct regmap_config regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = IS31FL319X_REG_CNT,
- .cache_type = REGCACHE_FLAT,
- .readable_reg = is31fl319x_readable_reg,
- .volatile_reg = is31fl319x_volatile_reg,
- .reg_defaults = is31fl319x_reg_defaults,
- .num_reg_defaults = ARRAY_SIZE(is31fl319x_reg_defaults),
-};
-
-static inline int is31fl319x_microamp_to_cs(struct device *dev, u32 microamp)
-{ /* round down to nearest supported value (range check done by caller) */
- u32 step = microamp / IS31FL319X_CURRENT_STEP;
+static inline int is31fl3196_microamp_to_cs(struct device *dev, u32 microamp)
+{
+ /* round down to nearest supported value (range check done by caller) */
+ u32 step = microamp / IS31FL3196_CURRENT_uA_STEP;
- return ((IS31FL319X_CONFIG2_CS_STEP_REF - step) &
- IS31FL319X_CONFIG2_CS_MASK) <<
- IS31FL319X_CONFIG2_CS_SHIFT; /* CS encoding */
+ return ((IS31FL3196_CONFIG2_CS_STEP_REF - step) &
+ IS31FL3196_CONFIG2_CS_MASK) <<
+ IS31FL3196_CONFIG2_CS_SHIFT; /* CS encoding */
}
-static inline int is31fl319x_db_to_gain(u32 dezibel)
-{ /* round down to nearest supported value (range check done by caller) */
- return dezibel / IS31FL319X_AUDIO_GAIN_DB_STEP;
+static inline int is31fl3196_db_to_gain(u32 dezibel)
+{
+ /* round down to nearest supported value (range check done by caller) */
+ return dezibel / IS31FL3196_AUDIO_GAIN_DB_STEP;
}
-static int is31fl319x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int is31fl319x_probe(struct i2c_client *client)
{
struct is31fl319x_chip *is31;
struct device *dev = &client->dev;
int err;
int i = 0;
- u32 aggregated_led_microamp = IS31FL319X_CURRENT_MAX;
+ u32 aggregated_led_microamp;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -EIO;
@@ -349,10 +510,13 @@ static int is31fl319x_probe(struct i2c_client *client,
return -ENOMEM;
mutex_init(&is31->lock);
+ err = devm_add_action(dev, (void (*)(void *))mutex_destroy, &is31->lock);
+ if (err)
+ return err;
- err = is31fl319x_parse_dt(&client->dev, is31);
+ err = is31fl319x_parse_fw(&client->dev, is31);
if (err)
- goto free_mutex;
+ return err;
if (is31->shutdown_gpio) {
gpiod_direction_output(is31->shutdown_gpio, 0);
@@ -361,37 +525,35 @@ static int is31fl319x_probe(struct i2c_client *client,
}
is31->client = client;
- is31->regmap = devm_regmap_init_i2c(client, &regmap_config);
- if (IS_ERR(is31->regmap)) {
- dev_err(&client->dev, "failed to allocate register map\n");
- err = PTR_ERR(is31->regmap);
- goto free_mutex;
- }
+ is31->regmap = devm_regmap_init_i2c(client, is31->cdef->is31fl319x_regmap_config);
+ if (IS_ERR(is31->regmap))
+ return dev_err_probe(dev, PTR_ERR(is31->regmap), "failed to allocate register map\n");
i2c_set_clientdata(client, is31);
/* check for write-reply from chip (we can't read any registers) */
- err = regmap_write(is31->regmap, IS31FL319X_RESET, 0x00);
- if (err < 0) {
- dev_err(&client->dev, "no response from chip write: err = %d\n",
- err);
- err = -EIO; /* does not answer */
- goto free_mutex;
- }
+ err = regmap_write(is31->regmap, is31->cdef->reset_reg, 0x00);
+ if (err < 0)
+ return dev_err_probe(dev, err, "no response from chip write\n");
/*
* Kernel conventions require per-LED led-max-microamp property.
* But the chip does not allow to limit individual LEDs.
* So we take minimum from all subnodes for safety of hardware.
*/
+ aggregated_led_microamp = is31->cdef->current_max;
for (i = 0; i < is31->cdef->num_leds; i++)
if (is31->leds[i].configured &&
is31->leds[i].max_microamp < aggregated_led_microamp)
aggregated_led_microamp = is31->leds[i].max_microamp;
- regmap_write(is31->regmap, IS31FL319X_CONFIG2,
- is31fl319x_microamp_to_cs(dev, aggregated_led_microamp) |
- is31fl319x_db_to_gain(is31->audio_gain_db));
+ if (is31->cdef->is_3196or3199)
+ regmap_write(is31->regmap, IS31FL3196_CONFIG2,
+ is31fl3196_microamp_to_cs(dev, aggregated_led_microamp) |
+ is31fl3196_db_to_gain(is31->audio_gain_db));
+ else
+ regmap_update_bits(is31->regmap, IS31FL3190_CURRENT, IS31FL3190_CURRENT_MASK,
+ is31fl3190_microamp_to_cs(dev, aggregated_led_microamp));
for (i = 0; i < is31->cdef->num_leds; i++) {
struct is31fl319x_led *led = &is31->leds[i];
@@ -400,26 +562,14 @@ static int is31fl319x_probe(struct i2c_client *client,
continue;
led->chip = is31;
- led->cdev.brightness_set_blocking = is31fl319x_brightness_set;
+ led->cdev.brightness_set_blocking = is31->cdef->brightness_set;
err = devm_led_classdev_register(&client->dev, &led->cdev);
if (err < 0)
- goto free_mutex;
+ return err;
}
return 0;
-
-free_mutex:
- mutex_destroy(&is31->lock);
- return err;
-}
-
-static int is31fl319x_remove(struct i2c_client *client)
-{
- struct is31fl319x_chip *is31 = i2c_get_clientdata(client);
-
- mutex_destroy(&is31->lock);
- return 0;
}
/*
@@ -432,6 +582,10 @@ static const struct i2c_device_id is31fl319x_id[] = {
{ "is31fl3193" },
{ "is31fl3196" },
{ "is31fl3199" },
+ { "sn3190" },
+ { "sn3191" },
+ { "sn3193" },
+ { "sn3196" },
{ "sn3199" },
{},
};
@@ -440,10 +594,9 @@ MODULE_DEVICE_TABLE(i2c, is31fl319x_id);
static struct i2c_driver is31fl319x_driver = {
.driver = {
.name = "leds-is31fl319x",
- .of_match_table = of_match_ptr(of_is31fl319x_match),
+ .of_match_table = of_is31fl319x_match,
},
- .probe = is31fl319x_probe,
- .remove = is31fl319x_remove,
+ .probe_new = is31fl319x_probe,
.id_table = is31fl319x_id,
};
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 1adfed1c0619..eac6f4a573b2 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -239,9 +239,6 @@ static int omnia_leds_probe(struct i2c_client *client,
led += ret;
}
- if (devm_device_add_groups(dev, omnia_led_controller_groups))
- dev_warn(dev, "Could not add attribute group!\n");
-
return 0;
}
@@ -283,6 +280,7 @@ static struct i2c_driver omnia_leds_driver = {
.driver = {
.name = "leds-turris-omnia",
.of_match_table = of_omnia_leds_match,
+ .dev_groups = omnia_led_controller_groups,
},
};
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index 45e38708ecb1..da9d2218ae18 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -19,6 +19,7 @@
struct pwm_led {
struct pwm_device *pwm;
struct pwm_state state;
+ bool active_low;
};
struct pwm_mc_led {
@@ -45,6 +46,9 @@ static int led_pwm_mc_set(struct led_classdev *cdev,
duty *= mc_cdev->subled_info[i].brightness;
do_div(duty, cdev->max_brightness);
+ if (priv->leds[i].active_low)
+ duty = priv->leds[i].state.period - duty;
+
priv->leds[i].state.duty_cycle = duty;
priv->leds[i].state.enabled = duty > 0;
ret = pwm_apply_state(priv->leds[i].pwm,
@@ -72,11 +76,11 @@ static int iterate_subleds(struct device *dev, struct pwm_mc_led *priv,
pwmled = &priv->leds[priv->mc_cdev.num_colors];
pwmled->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
if (IS_ERR(pwmled->pwm)) {
- ret = PTR_ERR(pwmled->pwm);
- dev_err(dev, "unable to request PWM: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(pwmled->pwm), "unable to request PWM\n");
goto release_fwnode;
}
pwm_init_state(pwmled->pwm, &pwmled->state);
+ pwmled->active_low = fwnode_property_read_bool(fwnode, "active-low");
ret = fwnode_property_read_u32(fwnode, "color", &color);
if (ret) {
diff --git a/drivers/leds/simple/Kconfig b/drivers/leds/simple/Kconfig
index 9f6a68336659..fd2b8225d926 100644
--- a/drivers/leds/simple/Kconfig
+++ b/drivers/leds/simple/Kconfig
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config LEDS_SIEMENS_SIMATIC_IPC
tristate "LED driver for Siemens Simatic IPCs"
- depends on LEDS_CLASS
+ depends on LEDS_GPIO
depends on SIEMENS_SIMATIC_IPC
help
This option enables support for the LEDs of several Industrial PCs
from Siemens.
- To compile this driver as a module, choose M here: the module
- will be called simatic-ipc-leds.
+ To compile this driver as a module, choose M here: the modules
+ will be called simatic-ipc-leds and simatic-ipc-leds-gpio.
diff --git a/drivers/leds/simple/Makefile b/drivers/leds/simple/Makefile
index 8481f1e9e360..1c7ef5e1324b 100644
--- a/drivers/leds/simple/Makefile
+++ b/drivers/leds/simple/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds.o
+obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds-gpio.o
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c
new file mode 100644
index 000000000000..4c9e663a90ba
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for GPIO based LEDs
+ *
+ * Copyright (c) Siemens AG, 2022
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH),
+ },
+};
+
+static const struct gpio_led simatic_ipc_gpio_leds[] = {
+ { .name = "green:" LED_FUNCTION_STATUS "-3" },
+ { .name = "red:" LED_FUNCTION_STATUS "-1" },
+ { .name = "green:" LED_FUNCTION_STATUS "-1" },
+ { .name = "red:" LED_FUNCTION_STATUS "-2" },
+ { .name = "green:" LED_FUNCTION_STATUS "-2" },
+ { .name = "red:" LED_FUNCTION_STATUS "-3" },
+};
+
+static const struct gpio_led_platform_data simatic_ipc_gpio_leds_pdata = {
+ .num_leds = ARRAY_SIZE(simatic_ipc_gpio_leds),
+ .leds = simatic_ipc_gpio_leds,
+};
+
+static struct platform_device *simatic_leds_pdev;
+
+static int simatic_ipc_leds_gpio_remove(struct platform_device *pdev)
+{
+ gpiod_remove_lookup_table(&simatic_ipc_led_gpio_table);
+ platform_device_unregister(simatic_leds_pdev);
+
+ return 0;
+}
+
+static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
+{
+ struct gpio_desc *gpiod;
+ int err;
+
+ gpiod_add_lookup_table(&simatic_ipc_led_gpio_table);
+ simatic_leds_pdev = platform_device_register_resndata(NULL,
+ "leds-gpio", PLATFORM_DEVID_NONE, NULL, 0,
+ &simatic_ipc_gpio_leds_pdata,
+ sizeof(simatic_ipc_gpio_leds_pdata));
+ if (IS_ERR(simatic_leds_pdev)) {
+ err = PTR_ERR(simatic_leds_pdev);
+ goto out;
+ }
+
+ /* PM_BIOS_BOOT_N */
+ gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 6, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ err = PTR_ERR(gpiod);
+ goto out;
+ }
+ gpiod_put(gpiod);
+
+ /* PM_WDT_OUT */
+ gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 7, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ err = PTR_ERR(gpiod);
+ goto out;
+ }
+ gpiod_put(gpiod);
+
+ return 0;
+out:
+ simatic_ipc_leds_gpio_remove(pdev);
+
+ return err;
+}
+
+static struct platform_driver simatic_ipc_led_gpio_driver = {
+ .probe = simatic_ipc_leds_gpio_probe,
+ .remove = simatic_ipc_leds_gpio_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ }
+};
+module_platform_driver(simatic_ipc_led_gpio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: platform:leds-gpio");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds.c b/drivers/leds/simple/simatic-ipc-leds.c
index 078d43f5ba38..4894c228c165 100644
--- a/drivers/leds/simple/simatic-ipc-leds.c
+++ b/drivers/leds/simple/simatic-ipc-leds.c
@@ -23,7 +23,7 @@
#define SIMATIC_IPC_LED_PORT_BASE 0x404E
struct simatic_ipc_led {
- unsigned int value; /* mask for io and offset for mem */
+ unsigned int value; /* mask for io */
char *name;
struct led_classdev cdev;
};
@@ -38,21 +38,6 @@ static struct simatic_ipc_led simatic_ipc_leds_io[] = {
{ }
};
-/* the actual start will be discovered with PCI, 0 is a placeholder */
-static struct resource simatic_ipc_led_mem_res = DEFINE_RES_MEM_NAMED(0, SZ_4K, KBUILD_MODNAME);
-
-static void __iomem *simatic_ipc_led_memory;
-
-static struct simatic_ipc_led simatic_ipc_leds_mem[] = {
- {0x500 + 0x1A0, "red:" LED_FUNCTION_STATUS "-1"},
- {0x500 + 0x1A8, "green:" LED_FUNCTION_STATUS "-1"},
- {0x500 + 0x1C8, "red:" LED_FUNCTION_STATUS "-2"},
- {0x500 + 0x1D0, "green:" LED_FUNCTION_STATUS "-2"},
- {0x500 + 0x1E0, "red:" LED_FUNCTION_STATUS "-3"},
- {0x500 + 0x198, "green:" LED_FUNCTION_STATUS "-3"},
- { }
-};
-
static struct resource simatic_ipc_led_io_res =
DEFINE_RES_IO_NAMED(SIMATIC_IPC_LED_PORT_BASE, SZ_2, KBUILD_MODNAME);
@@ -88,28 +73,6 @@ static enum led_brightness simatic_ipc_led_get_io(struct led_classdev *led_cd)
return inw(SIMATIC_IPC_LED_PORT_BASE) & led->value ? LED_OFF : led_cd->max_brightness;
}
-static void simatic_ipc_led_set_mem(struct led_classdev *led_cd,
- enum led_brightness brightness)
-{
- struct simatic_ipc_led *led = cdev_to_led(led_cd);
- void __iomem *reg = simatic_ipc_led_memory + led->value;
- u32 val;
-
- val = readl(reg);
- val = (val & ~1) | (brightness == LED_OFF);
- writel(val, reg);
-}
-
-static enum led_brightness simatic_ipc_led_get_mem(struct led_classdev *led_cd)
-{
- struct simatic_ipc_led *led = cdev_to_led(led_cd);
- void __iomem *reg = simatic_ipc_led_memory + led->value;
- u32 val;
-
- val = readl(reg);
- return (val & 1) ? LED_OFF : led_cd->max_brightness;
-}
-
static int simatic_ipc_leds_probe(struct platform_device *pdev)
{
const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
@@ -117,9 +80,7 @@ static int simatic_ipc_leds_probe(struct platform_device *pdev)
struct simatic_ipc_led *ipcled;
struct led_classdev *cdev;
struct resource *res;
- void __iomem *reg;
- int err, type;
- u32 val;
+ int err;
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_227D:
@@ -134,52 +95,19 @@ static int simatic_ipc_leds_probe(struct platform_device *pdev)
}
ipcled = simatic_ipc_leds_io;
}
- type = IORESOURCE_IO;
if (!devm_request_region(dev, res->start, resource_size(res), KBUILD_MODNAME)) {
dev_err(dev, "Unable to register IO resource at %pR\n", res);
return -EBUSY;
}
break;
- case SIMATIC_IPC_DEVICE_127E:
- res = &simatic_ipc_led_mem_res;
- ipcled = simatic_ipc_leds_mem;
- type = IORESOURCE_MEM;
-
- /* get GPIO base from PCI */
- res->start = simatic_ipc_get_membase0(PCI_DEVFN(13, 0));
- if (res->start == 0)
- return -ENODEV;
-
- /* do the final address calculation */
- res->start = res->start + (0xC5 << 16);
- res->end += res->start;
-
- simatic_ipc_led_memory = devm_ioremap_resource(dev, res);
- if (IS_ERR(simatic_ipc_led_memory))
- return PTR_ERR(simatic_ipc_led_memory);
-
- /* initialize power/watchdog LED */
- reg = simatic_ipc_led_memory + 0x500 + 0x1D8; /* PM_WDT_OUT */
- val = readl(reg);
- writel(val & ~1, reg);
-
- reg = simatic_ipc_led_memory + 0x500 + 0x1C0; /* PM_BIOS_BOOT_N */
- val = readl(reg);
- writel(val | 1, reg);
- break;
default:
return -ENODEV;
}
while (ipcled->value) {
cdev = &ipcled->cdev;
- if (type == IORESOURCE_MEM) {
- cdev->brightness_set = simatic_ipc_led_set_mem;
- cdev->brightness_get = simatic_ipc_led_get_mem;
- } else {
- cdev->brightness_set = simatic_ipc_led_set_io;
- cdev->brightness_get = simatic_ipc_led_get_io;
- }
+ cdev->brightness_set = simatic_ipc_led_set_io;
+ cdev->brightness_get = simatic_ipc_led_get_io;
cdev->max_brightness = LED_ON;
cdev->name = ipcled->name;
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 439fab4eaa85..1bbb9ca08d40 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -647,7 +647,7 @@ do_adb_query(struct adb_request *req)
switch(req->data[1]) {
case ADB_QUERY_GETDEVINFO:
- if (req->nbytes < 3)
+ if (req->nbytes < 3 || req->data[2] >= 16)
break;
mutex_lock(&adb_handler_mutex);
req->reply[0] = adb_handler[req->data[2]].original_address;
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index 22acb51531cb..fda16f76401e 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2017 Broadcom
/*
* Broadcom FlexRM Mailbox Driver
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index b10239d6ef93..02922073c9ef 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -19,13 +19,15 @@
#include <linux/suspend.h>
#include <linux/slab.h>
-#define IMX_MU_CHANS 16
+#define IMX_MU_CHANS 17
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
/* TX0/RX0 */
#define IMX_MU_S4_CHANS 2
#define IMX_MU_CHAN_NAME_SIZE 20
+#define IMX_MU_NUM_RR 4
+
#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
@@ -35,9 +37,11 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RX = 1, /* Rx */
IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
+ IMX_MU_TYPE_RST = 4, /* Reset */
};
enum imx_mu_xcr {
+ IMX_MU_CR,
IMX_MU_GIER,
IMX_MU_GCR,
IMX_MU_TCR,
@@ -50,6 +54,7 @@ enum imx_mu_xsr {
IMX_MU_GSR,
IMX_MU_TSR,
IMX_MU_RSR,
+ IMX_MU_xSR_MAX,
};
struct imx_sc_rpc_msg_max {
@@ -85,7 +90,7 @@ struct imx_mu_priv {
int irq[IMX_MU_CHANS];
bool suspend;
- u32 xcr[4];
+ u32 xcr[IMX_MU_xCR_MAX];
bool side_b;
};
@@ -105,8 +110,8 @@ struct imx_mu_dcfg {
enum imx_mu_type type;
u32 xTR; /* Transmit Register0 */
u32 xRR; /* Receive Register0 */
- u32 xSR[4]; /* Status Registers */
- u32 xCR[4]; /* Control Registers */
+ u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
+ u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
};
#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
@@ -121,6 +126,9 @@ struct imx_mu_dcfg {
#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Request */
#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
+/* MU reset */
+#define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
+#define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -497,6 +505,8 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
(ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
+ case IMX_MU_TYPE_RST:
+ return IRQ_NONE;
default:
dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
cp->type);
@@ -581,6 +591,8 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
+ int ret;
+ u32 sr;
if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
@@ -598,6 +610,13 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
case IMX_MU_TYPE_RXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
+ case IMX_MU_TYPE_RST:
+ imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
+ !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
+ if (ret)
+ dev_warn(priv->dev, "RST channel timeout\n");
+ break;
default:
break;
}
@@ -694,6 +713,7 @@ static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
static void imx_mu_init_generic(struct imx_mu_priv *priv)
{
unsigned int i;
+ unsigned int val;
for (i = 0; i < IMX_MU_CHANS; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
@@ -715,6 +735,14 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
+
+ /* Clear any pending GIP */
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
+ imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
+
+ /* Clear any pending RSR */
+ for (i = 0; i < IMX_MU_NUM_RR; i++)
+ imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
}
static void imx_mu_init_specific(struct imx_mu_priv *priv)
@@ -865,7 +893,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
- .xCR = {0x24, 0x24, 0x24, 0x24},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
@@ -888,7 +916,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
- .xCR = {0x110, 0x114, 0x120, 0x128},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 2578e5aaa935..9465f9081515 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -192,15 +192,10 @@ static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
{
- struct cmdq_task_cb *cb = &task->pkt->async_cb;
struct cmdq_cb_data data;
data.sta = sta;
- data.data = cb->data;
data.pkt = task->pkt;
- if (cb->cb)
- cb->cb(data);
-
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
@@ -448,7 +443,6 @@ done:
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
{
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
- struct cmdq_task_cb *cb;
struct cmdq_cb_data data;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task, *tmp;
@@ -465,13 +459,8 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
- cb = &task->pkt->async_cb;
data.sta = -ECONNABORTED;
- data.data = cb->data;
data.pkt = task->pkt;
- if (cb->cb)
- cb->cb(data);
-
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
kfree(task);
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index cf128b3471d7..338fc889b357 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -254,7 +254,7 @@ static void mcb_free_bus(struct device *dev)
struct mcb_bus *bus = to_mcb_bus(dev);
put_device(bus->carrier);
- ida_simple_remove(&mcb_ida, bus->bus_nr);
+ ida_free(&mcb_ida, bus->bus_nr);
kfree(bus);
}
@@ -273,7 +273,7 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
if (!bus)
return ERR_PTR(-ENOMEM);
- bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
+ bus_nr = ida_alloc(&mcb_ida, GFP_KERNEL);
if (bus_nr < 0) {
kfree(bus);
return ERR_PTR(bus_nr);
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 0454b0885b01..84291e38dca8 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -5,7 +5,7 @@
dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o \
- dm-rq.o
+ dm-rq.o dm-io-rewind.o
dm-multipath-y += dm-path-selector.o dm-mpath.o
dm-historical-service-time-y += dm-ps-historical-service-time.o
dm-io-affinity-y += dm-ps-io-affinity.o
@@ -83,6 +83,7 @@ obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
obj-$(CONFIG_DM_INTEGRITY) += dm-integrity.o
obj-$(CONFIG_DM_ZONED) += dm-zoned.o
obj-$(CONFIG_DM_WRITECACHE) += dm-writecache.o
+obj-$(CONFIG_SECURITY_LOADPIN_VERITY) += dm-verity-loadpin.o
ifeq ($(CONFIG_DM_INIT),y)
dm-mod-objs += dm-init.o
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index cf3e8096942a..529c9d04e9a4 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -29,7 +29,7 @@ config BCACHE_CLOSURES_DEBUG
operations that get stuck.
config BCACHE_ASYNC_REGISTRATION
- bool "Asynchronous device registration (EXPERIMENTAL)"
+ bool "Asynchronous device registration"
depends on BCACHE
help
Add a sysfs file /sys/fs/bcache/register_async. Writing registering
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index e136d6edc1ed..147c493a989a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -812,7 +812,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
- if (register_shrinker(&c->shrink))
+ if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid))
pr_warn("bcache: %s: could not register shrinker\n",
__func__);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3563d15dbaf2..ba3909bb6bea 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -414,8 +414,8 @@ static void uuid_io_unlock(struct closure *cl)
up(&c->uuid_write_mutex);
}
-static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
- struct bkey *k, struct closure *parent)
+static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k,
+ struct closure *parent)
{
struct closure *cl = &c->uuid_write;
struct uuid_entry *u;
@@ -429,22 +429,22 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
for (i = 0; i < KEY_PTRS(k); i++) {
struct bio *bio = bch_bbio_alloc(c);
- bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
+ bio->bi_opf = opf | REQ_SYNC | REQ_META;
bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
bio->bi_private = cl;
- bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
bch_bio_map(bio, c->uuids);
bch_submit_bbio(bio, c, k, i);
- if (op != REQ_OP_WRITE)
+ if ((opf & REQ_OP_MASK) != REQ_OP_WRITE)
break;
}
bch_extent_to_text(buf, sizeof(buf), k);
- pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf);
+ pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ?
+ "wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
if (!bch_is_zero(u->uuid, 16))
@@ -463,7 +463,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
- uuid_io(c, REQ_OP_READ, 0, k, cl);
+ uuid_io(c, REQ_OP_READ, k, cl);
if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
struct uuid_entry_v0 *u0 = (void *) c->uuids;
@@ -511,7 +511,7 @@ static int __uuid_write(struct cache_set *c)
size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
SET_KEY_SIZE(&k.key, size);
- uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
+ uuid_io(c, REQ_OP_WRITE, &k.key, &cl);
closure_sync(&cl);
/* Only one bucket used for uuid write */
@@ -587,8 +587,7 @@ static void prio_endio(struct bio *bio)
closure_put(&ca->prio);
}
-static void prio_io(struct cache *ca, uint64_t bucket, int op,
- unsigned long op_flags)
+static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf)
{
struct closure *cl = &ca->prio;
struct bio *bio = bch_bbio_alloc(ca->set);
@@ -601,7 +600,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
- bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
+ bio->bi_opf = opf | REQ_SYNC | REQ_META;
bch_bio_map(bio, ca->disk_buckets);
closure_bio_submit(ca->set, bio, &ca->prio);
@@ -661,7 +660,7 @@ int bch_prio_write(struct cache *ca, bool wait)
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
- prio_io(ca, bucket, REQ_OP_WRITE, 0);
+ prio_io(ca, bucket, REQ_OP_WRITE);
mutex_lock(&ca->set->bucket_lock);
ca->prio_buckets[i] = bucket;
@@ -705,7 +704,7 @@ static int prio_read(struct cache *ca, uint64_t bucket)
ca->prio_last_buckets[bucket_nr] = bucket;
bucket_nr++;
- prio_io(ca, bucket, REQ_OP_READ, 0);
+ prio_io(ca, bucket, REQ_OP_READ);
if (p->csum !=
bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
@@ -884,7 +883,7 @@ static void bcache_device_free(struct bcache_device *d)
if (disk) {
ida_simple_remove(&bcache_device_idx,
first_minor_to_idx(disk->first_minor));
- blk_cleanup_disk(disk);
+ put_disk(disk);
}
bioset_exit(&d->bio_split);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5ffa1dcf84cf..09c7ed2650ca 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/stacktrace.h>
+#include <linux/jump_label.h>
#define DM_MSG_PREFIX "bufio"
@@ -81,6 +82,8 @@
*/
struct dm_bufio_client {
struct mutex lock;
+ spinlock_t spinlock;
+ bool no_sleep;
struct list_head lru[LIST_SIZE];
unsigned long n_buffers[LIST_SIZE];
@@ -90,7 +93,6 @@ struct dm_bufio_client {
s8 sectors_per_block_bits;
void (*alloc_callback)(struct dm_buffer *);
void (*write_callback)(struct dm_buffer *);
-
struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache;
struct dm_io_client *dm_io;
@@ -161,23 +163,34 @@ struct dm_buffer {
#endif
};
+static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+
/*----------------------------------------------------------------*/
#define dm_bufio_in_request() (!!current->bio_list)
static void dm_bufio_lock(struct dm_bufio_client *c)
{
- mutex_lock_nested(&c->lock, dm_bufio_in_request());
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_lock_bh(&c->spinlock);
+ else
+ mutex_lock_nested(&c->lock, dm_bufio_in_request());
}
static int dm_bufio_trylock(struct dm_bufio_client *c)
{
- return mutex_trylock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return spin_trylock_bh(&c->spinlock);
+ else
+ return mutex_trylock(&c->lock);
}
static void dm_bufio_unlock(struct dm_bufio_client *c)
{
- mutex_unlock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_unlock_bh(&c->spinlock);
+ else
+ mutex_unlock(&c->lock);
}
/*----------------------------------------------------------------*/
@@ -577,13 +590,12 @@ static void dmio_complete(unsigned long error, void *context)
b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
}
-static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
+static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
{
int r;
struct dm_io_request io_req = {
- .bi_op = rw,
- .bi_op_flags = 0,
+ .bi_opf = op,
.notify.fn = dmio_complete,
.notify.context = b,
.client = b->c->dm_io,
@@ -616,7 +628,7 @@ static void bio_complete(struct bio *bio)
b->end_io(b, status);
}
-static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
+static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
{
struct bio *bio;
@@ -630,10 +642,10 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
dmio:
- use_dmio(b, rw, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset);
return;
}
- bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw);
+ bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
@@ -669,7 +681,8 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
return sector;
}
-static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
+static void submit_io(struct dm_buffer *b, enum req_op op,
+ void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned n_sectors;
sector_t sector;
@@ -679,7 +692,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
sector = block_to_sector(b->c, b->block);
- if (rw != REQ_OP_WRITE) {
+ if (op != REQ_OP_WRITE) {
n_sectors = b->c->block_size >> SECTOR_SHIFT;
offset = 0;
} else {
@@ -698,9 +711,9 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
}
if (b->data_mode != DATA_MODE_VMALLOC)
- use_bio(b, rw, sector, n_sectors, offset);
+ use_bio(b, op, sector, n_sectors, offset);
else
- use_dmio(b, rw, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset);
}
/*----------------------------------------------------------------
@@ -802,6 +815,10 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
BUG_ON(test_bit(B_WRITING, &b->state));
BUG_ON(test_bit(B_DIRTY, &b->state));
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
+ unlikely(test_bit(B_READING, &b->state)))
+ continue;
+
if (!b->hold_count) {
__make_buffer_clean(b);
__unlink_buffer(b);
@@ -810,6 +827,9 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
cond_resched();
}
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return NULL;
+
list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
BUG_ON(test_bit(B_READING, &b->state));
@@ -1341,8 +1361,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
- .bi_op = REQ_OP_WRITE,
- .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
@@ -1365,8 +1384,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
{
struct dm_io_request io_req = {
- .bi_op = REQ_OP_DISCARD,
- .bi_op_flags = REQ_SYNC,
+ .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
@@ -1619,7 +1637,8 @@ static void drop_buffers(struct dm_bufio_client *c)
*/
static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
{
- if (!(gfp & __GFP_FS)) {
+ if (!(gfp & __GFP_FS) ||
+ (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
@@ -1717,7 +1736,8 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
unsigned reserved_buffers, unsigned aux_size,
void (*alloc_callback)(struct dm_buffer *),
- void (*write_callback)(struct dm_buffer *))
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags)
{
int r;
struct dm_bufio_client *c;
@@ -1747,12 +1767,18 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->alloc_callback = alloc_callback;
c->write_callback = write_callback;
+ if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
+ c->no_sleep = true;
+ static_branch_inc(&no_sleep_enabled);
+ }
+
for (i = 0; i < LIST_SIZE; i++) {
INIT_LIST_HEAD(&c->lru[i]);
c->n_buffers[i] = 0;
}
mutex_init(&c->lock);
+ spin_lock_init(&c->spinlock);
INIT_LIST_HEAD(&c->reserved_buffers);
c->need_reserved_buffers = reserved_buffers;
@@ -1806,7 +1832,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
- r = register_shrinker(&c->shrinker);
+ r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
if (r)
goto bad;
@@ -1878,6 +1905,8 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
kmem_cache_destroy(c->slab_buffer);
dm_io_client_destroy(c->dm_io);
mutex_destroy(&c->lock);
+ if (c->no_sleep)
+ static_branch_dec(&no_sleep_enabled);
kfree(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 179ed5bf81a3..0905f2c1615e 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -131,7 +131,7 @@ void dm_cache_dump(struct dm_cache_metadata *cmd);
* hints will be lost.
*
* The hints are indexed by the cblock, but many policies will not
- * neccessarily have a fast way of accessing efficiently via cblock. So
+ * necessarily have a fast way of accessing efficiently via cblock. So
* rather than querying the policy for each cblock, we let it walk its data
* structures and fill in the hints in whatever order it wishes.
*/
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 28c5de8eca4a..54a8d5c9a44e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2775,7 +2775,7 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
/*
* The discard block size in the on disk metadata is not
- * neccessarily the same as we're currently using. So we have to
+ * necessarily the same as we're currently using. So we have to
* be careful to only set the discarded attribute if we know it
* covers a complete block of the new size.
*/
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index c954ff91870e..6c6bd24774f2 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -22,6 +22,8 @@
#define DM_RESERVED_MAX_IOS 1024
+struct dm_io;
+
struct dm_kobject_holder {
struct kobject kobj;
struct completion completion;
@@ -91,6 +93,14 @@ struct mapped_device {
spinlock_t deferred_lock;
struct bio_list deferred;
+ /*
+ * requeue work context is needed for cloning one new bio
+ * to represent the dm_io to be requeued, since each
+ * dm_io may point to the original bio from FS.
+ */
+ struct work_struct requeue_work;
+ struct dm_io *requeue_list;
+
void *interface_ptr;
/*
@@ -216,6 +226,13 @@ struct dm_table {
#endif
};
+static inline struct dm_target *dm_table_get_target(struct dm_table *t,
+ unsigned int index)
+{
+ BUG_ON(index >= t->num_targets);
+ return t->targets + index;
+}
+
/*
* One of these is allocated per clone bio.
*/
@@ -230,6 +247,9 @@ struct dm_target_io {
sector_t old_sector;
struct bio clone;
};
+#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
+#define DM_IO_BIO_OFFSET \
+ (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
/*
* dm_target_io flags
@@ -272,7 +292,6 @@ struct dm_io {
atomic_t io_count;
struct mapped_device *md;
- struct bio *split_bio;
/* The three fields represent mapped part of original bio */
struct bio *orig_bio;
unsigned int sector_offset; /* offset to end of orig_bio */
@@ -300,6 +319,8 @@ static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
io->flags |= (1U << bit);
}
+void dm_io_rewind(struct dm_io *io, struct bio_set *bs);
+
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
{
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 0221fa63f888..512cc6cea095 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -61,7 +61,8 @@ static inline bool __ebs_check_bs(unsigned int bs)
*
* copy blocks between bufio blocks and bio vector's (partial/overlapping) pages.
*/
-static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bvec_iter *iter)
+static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv,
+ struct bvec_iter *iter)
{
int r = 0;
unsigned char *ba, *pa;
@@ -81,7 +82,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len);
/* Avoid reading for writes in case bio vector's page overwrites block completely. */
- if (rw == READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
+ if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
ba = dm_bufio_read(ec->bufio, block, &b);
else
ba = dm_bufio_new(ec->bufio, block, &b);
@@ -95,7 +96,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
} else {
/* Copy data to/from bio to buffer if read/new was successful above. */
ba += buf_off;
- if (rw == READ) {
+ if (op == REQ_OP_READ) {
memcpy(pa, ba, cur_len);
flush_dcache_page(bv->bv_page);
} else {
@@ -117,14 +118,14 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
}
/* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */
-static int __ebs_rw_bio(struct ebs_c *ec, int rw, struct bio *bio)
+static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio)
{
int r = 0, rr;
struct bio_vec bv;
struct bvec_iter iter;
bio_for_each_bvec(bv, bio, iter) {
- rr = __ebs_rw_bvec(ec, rw, &bv, &iter);
+ rr = __ebs_rw_bvec(ec, op, &bv, &iter);
if (rr)
r = rr;
}
@@ -205,10 +206,10 @@ static void __ebs_process_bios(struct work_struct *ws)
bio_list_for_each(bio, &bios) {
r = -EIO;
if (bio_op(bio) == REQ_OP_READ)
- r = __ebs_rw_bio(ec, READ, bio);
+ r = __ebs_rw_bio(ec, REQ_OP_READ, bio);
else if (bio_op(bio) == REQ_OP_WRITE) {
write = true;
- r = __ebs_rw_bio(ec, WRITE, bio);
+ r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio);
} else if (bio_op(bio) == REQ_OP_DISCARD) {
__ebs_forget_bio(ec, bio);
r = __ebs_discard_bio(ec, bio);
@@ -312,7 +313,8 @@ static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1, 0, NULL, NULL);
+ ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1,
+ 0, NULL, NULL, 0);
if (IS_ERR(ec->bufio)) {
ti->error = "Cannot create dm bufio client";
r = PTR_ERR(ec->bufio);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index f2305eb758a2..89fa7a68c6c4 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -32,7 +32,7 @@ struct flakey_c {
unsigned corrupt_bio_byte;
unsigned corrupt_bio_rw;
unsigned corrupt_bio_value;
- unsigned corrupt_bio_flags;
+ blk_opf_t corrupt_bio_flags;
};
enum feature_flag_bits {
@@ -145,7 +145,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
/*
* Only corrupt bios with these flags set.
*/
- r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
+ BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
+ sizeof(unsigned int));
+ r = dm_read_arg(_args + 3, as,
+ (__force unsigned *)&fc->corrupt_bio_flags,
+ &ti->error);
if (r)
return r;
argc--;
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index 1842d3a958ef..a1bd7cd52b1b 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -208,7 +208,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
if (!target_data_buf)
goto error;
- num_targets = dm_table_get_num_targets(table);
+ num_targets = table->num_targets;
if (dm_ima_alloc_and_copy_device_data(table->md, &device_data_buf, num_targets, noio))
goto error;
@@ -237,9 +237,6 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
for (i = 0; i < num_targets; i++) {
struct dm_target *ti = dm_table_get_target(table, i);
- if (!ti)
- goto error;
-
last_target_measured = 0;
/*
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 3d5a0ce123c9..aaf2472df6e5 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -298,7 +298,7 @@ struct dm_integrity_io {
struct work_struct work;
struct dm_integrity_c *ic;
- enum req_opf op;
+ enum req_op op;
bool fua;
struct dm_integrity_range range;
@@ -551,14 +551,14 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
return 0;
}
-static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
+static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
+ const enum req_op op = opf & REQ_OP_MASK;
int r;
- io_req.bi_op = op;
- io_req.bi_op_flags = op_flags;
+ io_req.bi_opf = opf;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = ic->sb;
io_req.notify.fn = NULL;
@@ -1050,8 +1050,9 @@ static void complete_journal_io(unsigned long error, void *context)
complete_journal_op(comp);
}
-static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
- unsigned sector, unsigned n_sectors, struct journal_completion *comp)
+static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
+ unsigned sector, unsigned n_sectors,
+ struct journal_completion *comp)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1067,8 +1068,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
- io_req.bi_op = op;
- io_req.bi_op_flags = op_flags;
+ io_req.bi_opf = opf;
io_req.mem.type = DM_IO_PAGE_LIST;
if (ic->journal_io)
io_req.mem.ptr.pl = &ic->journal_io[pl_index];
@@ -1088,7 +1088,8 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
r = dm_io(&io_req, 1, &io_loc, NULL);
if (unlikely(r)) {
- dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
+ dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
+ "reading journal" : "writing journal", r);
if (comp) {
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
complete_journal_io(-1UL, comp);
@@ -1096,15 +1097,16 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
}
}
-static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
+ unsigned section, unsigned n_sections,
+ struct journal_completion *comp)
{
unsigned sector, n_sectors;
sector = section * ic->journal_section_sectors;
n_sectors = n_sections * ic->journal_section_sectors;
- rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
+ rw_journal_sectors(ic, opf, sector, n_sectors, comp);
}
static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
@@ -1129,7 +1131,7 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
for (i = 0; i < commit_sections; i++)
rw_section_mac(ic, commit_start + i, true);
}
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
commit_sections, &io_comp);
} else {
unsigned to_end;
@@ -1141,7 +1143,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
if (try_wait_for_completion(&crypt_comp_1.comp)) {
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA,
+ commit_start, to_end, &io_comp);
reinit_completion(&crypt_comp_1.comp);
crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
@@ -1152,17 +1155,17 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
wait_for_completion_io(&crypt_comp_1.comp);
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
wait_for_completion_io(&crypt_comp_2.comp);
}
} else {
for (i = 0; i < to_end; i++)
rw_section_mac(ic, commit_start + i, true);
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
for (i = 0; i < commit_sections - to_end; i++)
rw_section_mac(ic, i, true);
}
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp);
}
wait_for_completion_io(&io_comp.comp);
@@ -1188,8 +1191,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
- io_req.bi_op = REQ_OP_WRITE;
- io_req.bi_op_flags = 0;
+ io_req.bi_opf = REQ_OP_WRITE;
io_req.mem.type = DM_IO_PAGE_LIST;
io_req.mem.ptr.pl = &ic->journal[pl_index];
io_req.mem.offset = pl_offset;
@@ -1516,8 +1518,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
if (!ic->meta_dev)
flush_data = false;
if (flush_data) {
- fr.io_req.bi_op = REQ_OP_WRITE,
- fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
fr.io_req.mem.type = DM_IO_KMEM,
fr.io_req.mem.ptr.addr = NULL,
fr.io_req.notify.fn = flush_notify,
@@ -2626,7 +2627,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
if (dm_integrity_failed(ic))
return;
- r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
+ r = sync_rw_sb(ic, REQ_OP_WRITE);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
}
@@ -2706,8 +2707,7 @@ next_chunk:
if (unlikely(dm_integrity_failed(ic)))
goto err;
- io_req.bi_op = REQ_OP_READ;
- io_req.bi_op_flags = 0;
+ io_req.bi_opf = REQ_OP_READ;
io_req.mem.type = DM_IO_VMA;
io_req.mem.ptr.addr = ic->recalc_buffer;
io_req.notify.fn = NULL;
@@ -2800,7 +2800,7 @@ static void bitmap_block_work(struct work_struct *w)
if (bio_list_empty(&waiting))
return;
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC,
bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
@@ -2846,7 +2846,7 @@ static void bitmap_flush_work(struct work_struct *work)
block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
spin_lock_irq(&ic->endio_wait.lock);
@@ -2918,7 +2918,7 @@ static void replay_journal(struct dm_integrity_c *ic)
if (!ic->just_formatted) {
DEBUG_print("reading journal\n");
- rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
+ rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL);
if (ic->journal_io)
DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
if (ic->journal_io) {
@@ -3113,7 +3113,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
/* set to 0 to test bitmap replay code */
init_journal(ic, 0, ic->journal_sections, 0);
ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
#endif
@@ -3136,23 +3136,23 @@ static void dm_integrity_resume(struct dm_target *ti)
if (ic->provided_data_sectors > old_provided_data_sectors &&
ic->mode == 'B' &&
ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
- rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
+ rw_journal_sectors(ic, REQ_OP_READ, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
}
ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
}
if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
DEBUG_print("resume dirty_bitmap\n");
- rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
+ rw_journal_sectors(ic, REQ_OP_READ, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
if (ic->mode == 'B') {
if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
@@ -3171,7 +3171,7 @@ static void dm_integrity_resume(struct dm_target *ti)
block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
ic->sb->recalc_sector = cpu_to_le64(0);
@@ -3187,7 +3187,7 @@ static void dm_integrity_resume(struct dm_target *ti)
replay_journal(ic);
ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
}
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
} else {
@@ -3199,7 +3199,7 @@ static void dm_integrity_resume(struct dm_target *ti)
if (ic->mode == 'B') {
ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
@@ -3215,7 +3215,7 @@ static void dm_integrity_resume(struct dm_target *ti)
block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
}
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
}
}
@@ -4256,7 +4256,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- r = sync_rw_sb(ic, REQ_OP_READ, 0);
+ r = sync_rw_sb(ic, REQ_OP_READ);
if (r) {
ti->error = "Error reading superblock";
goto bad;
@@ -4439,7 +4439,7 @@ try_smaller_buffer:
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
- 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
+ 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
if (IS_ERR(ic->bufio)) {
r = PTR_ERR(ic->bufio);
ti->error = "Cannot initialize dm-bufio";
@@ -4500,7 +4500,7 @@ try_smaller_buffer:
ti->error = "Error initializing journal";
goto bad;
}
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (r) {
ti->error = "Error initializing superblock";
goto bad;
diff --git a/drivers/md/dm-io-rewind.c b/drivers/md/dm-io-rewind.c
new file mode 100644
index 000000000000..0db53ccb94ba
--- /dev/null
+++ b/drivers/md/dm-io-rewind.c
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2022 Red Hat, Inc.
+ */
+
+#include <linux/bio.h>
+#include <linux/blk-crypto.h>
+#include <linux/blk-integrity.h>
+
+#include "dm-core.h"
+
+static inline bool dm_bvec_iter_rewind(const struct bio_vec *bv,
+ struct bvec_iter *iter,
+ unsigned int bytes)
+{
+ int idx;
+
+ iter->bi_size += bytes;
+ if (bytes <= iter->bi_bvec_done) {
+ iter->bi_bvec_done -= bytes;
+ return true;
+ }
+
+ bytes -= iter->bi_bvec_done;
+ idx = iter->bi_idx - 1;
+
+ while (idx >= 0 && bytes && bytes > bv[idx].bv_len) {
+ bytes -= bv[idx].bv_len;
+ idx--;
+ }
+
+ if (WARN_ONCE(idx < 0 && bytes,
+ "Attempted to rewind iter beyond bvec's boundaries\n")) {
+ iter->bi_size -= bytes;
+ iter->bi_bvec_done = 0;
+ iter->bi_idx = 0;
+ return false;
+ }
+
+ iter->bi_idx = idx;
+ iter->bi_bvec_done = bv[idx].bv_len - bytes;
+ return true;
+}
+
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+/**
+ * dm_bio_integrity_rewind - Rewind integrity vector
+ * @bio: bio whose integrity vector to update
+ * @bytes_done: number of data bytes to rewind
+ *
+ * Description: This function calculates how many integrity bytes the
+ * number of completed data bytes correspond to and rewind the
+ * integrity vector accordingly.
+ */
+static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
+
+ bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
+ dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
+}
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+static inline void dm_bio_integrity_rewind(struct bio *bio,
+ unsigned int bytes_done)
+{
+ return;
+}
+
+#endif
+
+#if defined(CONFIG_BLK_INLINE_ENCRYPTION)
+
+/* Decrements @dun by @dec, treating @dun as a multi-limb integer. */
+static void dm_bio_crypt_dun_decrement(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
+ unsigned int dec)
+{
+ int i;
+
+ for (i = 0; dec && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
+ u64 prev = dun[i];
+
+ dun[i] -= dec;
+ if (dun[i] > prev)
+ dec = 1;
+ else
+ dec = 0;
+ }
+}
+
+static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
+{
+ struct bio_crypt_ctx *bc = bio->bi_crypt_context;
+
+ dm_bio_crypt_dun_decrement(bc->bc_dun,
+ bytes >> bc->bc_key->data_unit_size_bits);
+}
+
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
+{
+ return;
+}
+
+#endif
+
+static inline void dm_bio_rewind_iter(const struct bio *bio,
+ struct bvec_iter *iter, unsigned int bytes)
+{
+ iter->bi_sector -= bytes >> 9;
+
+ /* No advance means no rewind */
+ if (bio_no_advance_iter(bio))
+ iter->bi_size += bytes;
+ else
+ dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
+}
+
+/**
+ * dm_bio_rewind - update ->bi_iter of @bio by rewinding @bytes.
+ * @bio: bio to rewind
+ * @bytes: how many bytes to rewind
+ *
+ * WARNING:
+ * Caller must ensure that @bio has a fixed end sector, to allow
+ * rewinding from end of bio and restoring its original position.
+ * Caller is also responsibile for restoring bio's size.
+ */
+static void dm_bio_rewind(struct bio *bio, unsigned bytes)
+{
+ if (bio_integrity(bio))
+ dm_bio_integrity_rewind(bio, bytes);
+
+ if (bio_has_crypt_ctx(bio))
+ dm_bio_crypt_rewind(bio, bytes);
+
+ dm_bio_rewind_iter(bio, &bio->bi_iter, bytes);
+}
+
+void dm_io_rewind(struct dm_io *io, struct bio_set *bs)
+{
+ struct bio *orig = io->orig_bio;
+ struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig,
+ GFP_NOIO, bs);
+ /*
+ * dm_bio_rewind can restore to previous position since the
+ * end sector is fixed for original bio, but we still need
+ * to restore bio's size manually (using io->sectors).
+ */
+ dm_bio_rewind(new_orig, ((io->sector_offset << 9) -
+ orig->bi_iter.bi_size));
+ bio_trim(new_orig, 0, io->sectors);
+
+ bio_chain(new_orig, orig);
+ /*
+ * __bi_remaining was increased (by dm_split_and_process_bio),
+ * so must drop the one added in bio_chain.
+ */
+ atomic_dec(&orig->__bi_remaining);
+ io->orig_bio = new_orig;
+}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index e4b95eaeec8c..783564533459 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -293,7 +293,7 @@ static void km_dp_init(struct dpages *dp, void *data)
/*-----------------------------------------------------------------
* IO routines that accept a list of pages.
*---------------------------------------------------------------*/
-static void do_region(int op, int op_flags, unsigned region,
+static void do_region(const blk_opf_t opf, unsigned region,
struct dm_io_region *where, struct dpages *dp,
struct io *io)
{
@@ -306,6 +306,7 @@ static void do_region(int op, int op_flags, unsigned region,
struct request_queue *q = bdev_get_queue(where->bdev);
sector_t num_sectors;
unsigned int special_cmd_max_sectors;
+ const enum req_op op = opf & REQ_OP_MASK;
/*
* Reject unsupported discard and write same requests.
@@ -339,8 +340,8 @@ static void do_region(int op, int op_flags, unsigned region,
(PAGE_SIZE >> SECTOR_SHIFT)));
}
- bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags,
- GFP_NOIO, &io->client->bios);
+ bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
+ &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_end_io = endio;
store_io_and_region_in_bio(bio, io, region);
@@ -368,7 +369,7 @@ static void do_region(int op, int op_flags, unsigned region,
} while (remaining);
}
-static void dispatch_io(int op, int op_flags, unsigned int num_regions,
+static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
struct dm_io_region *where, struct dpages *dp,
struct io *io, int sync)
{
@@ -378,7 +379,7 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync)
- op_flags |= REQ_SYNC;
+ opf |= REQ_SYNC;
/*
* For multiple regions we need to be careful to rewind
@@ -386,8 +387,8 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions,
*/
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
- if (where[i].count || (op_flags & REQ_PREFLUSH))
- do_region(op, op_flags, i, where + i, dp, io);
+ if (where[i].count || (opf & REQ_PREFLUSH))
+ do_region(opf, i, where + i, dp, io);
}
/*
@@ -411,13 +412,13 @@ static void sync_io_complete(unsigned long error, void *context)
}
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
- struct dm_io_region *where, int op, int op_flags,
- struct dpages *dp, unsigned long *error_bits)
+ struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
+ unsigned long *error_bits)
{
struct io *io;
struct sync_io sio;
- if (num_regions > 1 && !op_is_write(op)) {
+ if (num_regions > 1 && !op_is_write(opf)) {
WARN_ON(1);
return -EIO;
}
@@ -434,7 +435,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
+ dispatch_io(opf, num_regions, where, dp, io, 1);
wait_for_completion_io(&sio.wait);
@@ -445,12 +446,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
}
static int async_io(struct dm_io_client *client, unsigned int num_regions,
- struct dm_io_region *where, int op, int op_flags,
+ struct dm_io_region *where, blk_opf_t opf,
struct dpages *dp, io_notify_fn fn, void *context)
{
struct io *io;
- if (num_regions > 1 && !op_is_write(op)) {
+ if (num_regions > 1 && !op_is_write(opf)) {
WARN_ON(1);
fn(1, context);
return -EIO;
@@ -466,7 +467,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
+ dispatch_io(opf, num_regions, where, dp, io, 0);
return 0;
}
@@ -489,7 +490,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
case DM_IO_VMA:
flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
- if (io_req->bi_op == REQ_OP_READ) {
+ if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
dp->vma_invalidate_address = io_req->mem.ptr.vma;
dp->vma_invalidate_size = size;
}
@@ -519,11 +520,10 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
if (!io_req->notify.fn)
return sync_io(io_req->client, num_regions, where,
- io_req->bi_op, io_req->bi_op_flags, &dp,
- sync_error_bits);
+ io_req->bi_opf, &dp, sync_error_bits);
- return async_io(io_req->client, num_regions, where, io_req->bi_op,
- io_req->bi_op_flags, &dp, io_req->notify.fn,
+ return async_io(io_req->client, num_regions, where,
+ io_req->bi_opf, &dp, io_req->notify.fn,
io_req->notify.context);
}
EXPORT_SYMBOL(dm_io);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 87310fceb0d8..98976aaa9db9 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -832,7 +832,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) {
if (get_disk_ro(disk))
param->flags |= DM_READONLY_FLAG;
- param->target_count = dm_table_get_num_targets(table);
+ param->target_count = table->num_targets;
}
param->flags |= DM_ACTIVE_PRESENT_FLAG;
@@ -845,7 +845,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (table) {
if (!(dm_table_get_mode(table) & FMODE_WRITE))
param->flags |= DM_READONLY_FLAG;
- param->target_count = dm_table_get_num_targets(table);
+ param->target_count = table->num_targets;
}
dm_put_live_table(md, srcu_idx);
}
@@ -1248,7 +1248,7 @@ static void retrieve_status(struct dm_table *table,
type = STATUSTYPE_INFO;
/* Get all the target info */
- num_targets = dm_table_get_num_targets(table);
+ num_targets = table->num_targets;
for (i = 0; i < num_targets; i++) {
struct dm_target *ti = dm_table_get_target(table, i);
size_t l;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 37b03ab7e5c9..4d3bbbea2e9a 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -219,7 +219,7 @@ static struct page_list *alloc_pl(gfp_t gfp)
if (!pl)
return NULL;
- pl->page = alloc_page(gfp);
+ pl->page = alloc_page(gfp | __GFP_HIGHMEM);
if (!pl->page) {
kfree(pl);
return NULL;
@@ -350,9 +350,9 @@ struct kcopyd_job {
unsigned long write_err;
/*
- * Either READ or WRITE
+ * REQ_OP_READ, REQ_OP_WRITE or REQ_OP_WRITE_ZEROES.
*/
- int rw;
+ enum req_op op;
struct dm_io_region source;
/*
@@ -418,7 +418,8 @@ static struct kcopyd_job *pop_io_job(struct list_head *jobs,
* constraint and sequential writes that are at the right position.
*/
list_for_each_entry(job, jobs, list) {
- if (job->rw == READ || !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
+ if (job->op == REQ_OP_READ ||
+ !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
list_del(&job->list);
return job;
}
@@ -518,7 +519,7 @@ static void complete_io(unsigned long error, void *context)
io_job_finish(kc->throttle);
if (error) {
- if (op_is_write(job->rw))
+ if (op_is_write(job->op))
job->write_err |= error;
else
job->read_err = 1;
@@ -530,11 +531,11 @@ static void complete_io(unsigned long error, void *context)
}
}
- if (op_is_write(job->rw))
+ if (op_is_write(job->op))
push(&kc->complete_jobs, job);
else {
- job->rw = WRITE;
+ job->op = REQ_OP_WRITE;
push(&kc->io_jobs, job);
}
@@ -549,8 +550,7 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_op = job->rw,
- .bi_op_flags = 0,
+ .bi_opf = job->op,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = 0,
@@ -571,7 +571,7 @@ static int run_io_job(struct kcopyd_job *job)
io_job_start(job->kc->throttle);
- if (job->rw == READ)
+ if (job->op == REQ_OP_READ)
r = dm_io(&io_req, 1, &job->source, NULL);
else
r = dm_io(&io_req, job->num_dests, job->dests, NULL);
@@ -614,7 +614,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
if (r < 0) {
/* error this rogue job */
- if (op_is_write(job->rw))
+ if (op_is_write(job->op))
job->write_err = (unsigned long) -1L;
else
job->read_err = 1;
@@ -817,7 +817,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
if (from) {
job->source = *from;
job->pages = NULL;
- job->rw = READ;
+ job->op = REQ_OP_READ;
} else {
memset(&job->source, 0, sizeof job->source);
job->source.count = job->dests[0].count;
@@ -826,10 +826,10 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
/*
* Use WRITE ZEROES to optimize zeroing if all dests support it.
*/
- job->rw = REQ_OP_WRITE_ZEROES;
+ job->op = REQ_OP_WRITE_ZEROES;
for (i = 0; i < job->num_dests; i++)
if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
- job->rw = WRITE;
+ job->op = REQ_OP_WRITE;
break;
}
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 0c6620e7b7bf..cf10fa667797 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -291,10 +291,9 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
core->nr_regions = le64_to_cpu(disk->nr_regions);
}
-static int rw_header(struct log_c *lc, int op)
+static int rw_header(struct log_c *lc, enum req_op op)
{
- lc->io_req.bi_op = op;
- lc->io_req.bi_op_flags = 0;
+ lc->io_req.bi_opf = op;
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
}
@@ -307,8 +306,7 @@ static int flush_header(struct log_c *lc)
.count = 0,
};
- lc->io_req.bi_op = REQ_OP_WRITE;
- lc->io_req.bi_op_flags = REQ_PREFLUSH;
+ lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL);
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 80c9f7134e9b..c640be453313 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1369,7 +1369,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
}
rs->md.bitmap_info.daemon_sleep = value;
} else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
- /* Userspace passes new data_offset after having extended the the data image LV */
+ /* Userspace passes new data_offset after having extended the data image LV */
if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
rs->ti->error = "Only one data_offset argument pair allowed";
return -EINVAL;
@@ -2038,7 +2038,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
rdev->sb_loaded = 0;
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) {
DMERR("Failed to read superblock of device at position %d",
rdev->raid_disk);
md_error(rdev->mddev, rdev);
@@ -3097,6 +3097,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
ti->num_flush_bios = 1;
+ ti->needs_bio_set_dev = true;
/* Restore any requested new layout for conversion decision */
rs_config_restore(rs, &rs_layout);
@@ -3509,7 +3510,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL;
int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
@@ -3727,6 +3728,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
}
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
@@ -3819,7 +3821,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
- for (i = 0; i < mddev->raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
r = &rs->dev[i].rdev;
/* HM FIXME: enhance journal device recovery processing */
if (test_bit(Journal, &r->flags))
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 8811d484fdd1..06a38dc32025 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -260,8 +260,7 @@ static int mirror_flush(struct dm_target *ti)
struct dm_io_region io[MAX_NR_MIRRORS];
struct mirror *m;
struct dm_io_request io_req = {
- .bi_op = REQ_OP_WRITE,
- .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = ms->io_client,
@@ -535,8 +534,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
{
struct dm_io_region io;
struct dm_io_request io_req = {
- .bi_op = REQ_OP_READ,
- .bi_op_flags = 0,
+ .bi_opf = REQ_OP_READ,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = read_callback,
@@ -648,9 +646,9 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
unsigned int i;
struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
struct mirror *m;
+ blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
struct dm_io_request io_req = {
- .bi_op = REQ_OP_WRITE,
- .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
+ .bi_opf = REQ_OP_WRITE | op_flags,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = write_callback,
@@ -659,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
};
if (bio_op(bio) == REQ_OP_DISCARD) {
- io_req.bi_op = REQ_OP_DISCARD;
+ io_req.bi_opf = REQ_OP_DISCARD | op_flags;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = NULL;
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index a83b98a8d2a9..4f49bbcce4f1 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -43,7 +43,6 @@ unsigned dm_get_reserved_rq_based_ios(void)
return __dm_get_module_param(&reserved_rq_based_ios,
RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
}
-EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
static unsigned dm_get_blk_mq_nr_hw_queues(void)
{
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3bb5cff5d6fc..680cc05ec654 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -226,8 +226,8 @@ static void do_metadata(struct work_struct *work)
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
-static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
- int op_flags, int metadata)
+static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
+ int metadata)
{
struct dm_io_region where = {
.bdev = dm_snap_cow(ps->store->snap)->bdev,
@@ -235,8 +235,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
.count = ps->store->chunk_size,
};
struct dm_io_request io_req = {
- .bi_op = op,
- .bi_op_flags = op_flags,
+ .bi_opf = opf,
.mem.type = DM_IO_VMA,
.mem.ptr.vma = area,
.client = ps->io_client,
@@ -282,11 +281,11 @@ static void skip_metadata(struct pstore *ps)
* Read or write a metadata area. Remembering to skip the first
* chunk which holds the header.
*/
-static int area_io(struct pstore *ps, int op, int op_flags)
+static int area_io(struct pstore *ps, blk_opf_t opf)
{
chunk_t chunk = area_location(ps, ps->current_area);
- return chunk_io(ps, ps->area, chunk, op, op_flags, 0);
+ return chunk_io(ps, ps->area, chunk, opf, 0);
}
static void zero_memory_area(struct pstore *ps)
@@ -297,7 +296,7 @@ static void zero_memory_area(struct pstore *ps)
static int zero_disk_area(struct pstore *ps, chunk_t area)
{
return chunk_io(ps, ps->zero_area, area_location(ps, area),
- REQ_OP_WRITE, 0, 0);
+ REQ_OP_WRITE, 0);
}
static int read_header(struct pstore *ps, int *new_snapshot)
@@ -329,7 +328,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
if (r)
return r;
- r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1);
+ r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1);
if (r)
goto bad;
@@ -390,7 +389,7 @@ static int write_header(struct pstore *ps)
dh->version = cpu_to_le32(ps->version);
dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
- return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1);
+ return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1);
}
/*
@@ -494,7 +493,7 @@ static int read_exceptions(struct pstore *ps,
client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
ps->store->chunk_size << SECTOR_SHIFT,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(client))
return PTR_ERR(client);
@@ -734,8 +733,8 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, REQ_OP_WRITE,
- REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
+ if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA |
+ REQ_SYNC))
ps->valid = 0;
/*
@@ -775,7 +774,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
return 0;
ps->current_area--;
- r = area_io(ps, REQ_OP_READ, 0);
+ r = area_io(ps, REQ_OP_READ);
if (r < 0)
return r;
ps->current_committed = ps->exceptions_per_area;
@@ -812,7 +811,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
+ r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 0d336b5ec571..d1c2f84d27e3 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2026,7 +2026,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
/*
* Write to snapshot - higher level takes care of RW/RO
* flags so we should only get this if we are
- * writeable.
+ * writable.
*/
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index bd539afbfe88..332f96b58252 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -6,6 +6,7 @@
*/
#include "dm-core.h"
+#include "dm-rq.h"
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -174,8 +175,6 @@ static void dm_table_destroy_crypto_profile(struct dm_table *t);
void dm_table_destroy(struct dm_table *t)
{
- unsigned int i;
-
if (!t)
return;
@@ -184,13 +183,13 @@ void dm_table_destroy(struct dm_table *t)
kvfree(t->index[t->depth - 2]);
/* free the targets */
- for (i = 0; i < t->num_targets; i++) {
- struct dm_target *tgt = t->targets + i;
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
- if (tgt->type->dtr)
- tgt->type->dtr(tgt);
+ if (ti->type->dtr)
+ ti->type->dtr(ti);
- dm_put_target_type(tgt->type);
+ dm_put_target_type(ti->type);
}
kvfree(t->highs);
@@ -450,14 +449,14 @@ EXPORT_SYMBOL(dm_put_device);
/*
* Checks to see if the target joins onto the end of the table.
*/
-static int adjoin(struct dm_table *table, struct dm_target *ti)
+static int adjoin(struct dm_table *t, struct dm_target *ti)
{
struct dm_target *prev;
- if (!table->num_targets)
+ if (!t->num_targets)
return !ti->begin;
- prev = &table->targets[table->num_targets - 1];
+ prev = &t->targets[t->num_targets - 1];
return (ti->begin == (prev->begin + prev->len));
}
@@ -564,8 +563,8 @@ int dm_split_args(int *argc, char ***argvp, char *input)
* two or more targets, the size of each piece it gets split into must
* be compatible with the logical_block_size of the target processing it.
*/
-static int validate_hardware_logical_block_alignment(struct dm_table *table,
- struct queue_limits *limits)
+static int validate_hardware_logical_block_alignment(struct dm_table *t,
+ struct queue_limits *limits)
{
/*
* This function uses arithmetic modulo the logical_block_size
@@ -587,13 +586,13 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
struct dm_target *ti;
struct queue_limits ti_limits;
- unsigned i;
+ unsigned int i;
/*
* Check each entry in the table in turn.
*/
- for (i = 0; i < dm_table_get_num_targets(table); i++) {
- ti = dm_table_get_target(table, i);
+ for (i = 0; i < t->num_targets; i++) {
+ ti = dm_table_get_target(t, i);
blk_set_stacking_limits(&ti_limits);
@@ -621,7 +620,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
if (remaining) {
DMWARN("%s: table line %u (start sect %llu len %llu) "
"not aligned to h/w logical block size %u",
- dm_device_name(table->md), i,
+ dm_device_name(t->md), i,
(unsigned long long) ti->begin,
(unsigned long long) ti->len,
limits->logical_block_size);
@@ -636,7 +635,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
{
int r = -EINVAL, argc;
char **argv;
- struct dm_target *tgt;
+ struct dm_target *ti;
if (t->singleton) {
DMERR("%s: target type %s must appear alone in table",
@@ -646,87 +645,87 @@ int dm_table_add_target(struct dm_table *t, const char *type,
BUG_ON(t->num_targets >= t->num_allocated);
- tgt = t->targets + t->num_targets;
- memset(tgt, 0, sizeof(*tgt));
+ ti = t->targets + t->num_targets;
+ memset(ti, 0, sizeof(*ti));
if (!len) {
DMERR("%s: zero-length target", dm_device_name(t->md));
return -EINVAL;
}
- tgt->type = dm_get_target_type(type);
- if (!tgt->type) {
+ ti->type = dm_get_target_type(type);
+ if (!ti->type) {
DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}
- if (dm_target_needs_singleton(tgt->type)) {
+ if (dm_target_needs_singleton(ti->type)) {
if (t->num_targets) {
- tgt->error = "singleton target type must appear alone in table";
+ ti->error = "singleton target type must appear alone in table";
goto bad;
}
t->singleton = true;
}
- if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
- tgt->error = "target type may not be included in a read-only table";
+ if (dm_target_always_writeable(ti->type) && !(t->mode & FMODE_WRITE)) {
+ ti->error = "target type may not be included in a read-only table";
goto bad;
}
if (t->immutable_target_type) {
- if (t->immutable_target_type != tgt->type) {
- tgt->error = "immutable target type cannot be mixed with other target types";
+ if (t->immutable_target_type != ti->type) {
+ ti->error = "immutable target type cannot be mixed with other target types";
goto bad;
}
- } else if (dm_target_is_immutable(tgt->type)) {
+ } else if (dm_target_is_immutable(ti->type)) {
if (t->num_targets) {
- tgt->error = "immutable target type cannot be mixed with other target types";
+ ti->error = "immutable target type cannot be mixed with other target types";
goto bad;
}
- t->immutable_target_type = tgt->type;
+ t->immutable_target_type = ti->type;
}
- if (dm_target_has_integrity(tgt->type))
+ if (dm_target_has_integrity(ti->type))
t->integrity_added = 1;
- tgt->table = t;
- tgt->begin = start;
- tgt->len = len;
- tgt->error = "Unknown error";
+ ti->table = t;
+ ti->begin = start;
+ ti->len = len;
+ ti->error = "Unknown error";
/*
* Does this target adjoin the previous one ?
*/
- if (!adjoin(t, tgt)) {
- tgt->error = "Gap in table";
+ if (!adjoin(t, ti)) {
+ ti->error = "Gap in table";
goto bad;
}
r = dm_split_args(&argc, &argv, params);
if (r) {
- tgt->error = "couldn't split parameters";
+ ti->error = "couldn't split parameters";
goto bad;
}
- r = tgt->type->ctr(tgt, argc, argv);
+ r = ti->type->ctr(ti, argc, argv);
kfree(argv);
if (r)
goto bad;
- t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
+ t->highs[t->num_targets++] = ti->begin + ti->len - 1;
- if (!tgt->num_discard_bios && tgt->discards_supported)
+ if (!ti->num_discard_bios && ti->discards_supported)
DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
dm_device_name(t->md), type);
- if (tgt->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
+ if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
static_branch_enable(&swap_bios_enabled);
return 0;
bad:
- DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, tgt->error, ERR_PTR(r));
- dm_put_target_type(tgt->type);
+ DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r));
+ dm_put_target_type(ti->type);
return r;
}
@@ -825,14 +824,11 @@ static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_de
}
static bool dm_table_supports_dax(struct dm_table *t,
- iterate_devices_callout_fn iterate_fn)
+ iterate_devices_callout_fn iterate_fn)
{
- struct dm_target *ti;
- unsigned i;
-
/* Ensure that all targets support DAX. */
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->type->direct_access)
return false;
@@ -860,9 +856,8 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
static int dm_table_determine_type(struct dm_table *t)
{
- unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- struct dm_target *tgt;
+ struct dm_target *ti;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -876,11 +871,11 @@ static int dm_table_determine_type(struct dm_table *t)
goto verify_rq_based;
}
- for (i = 0; i < t->num_targets; i++) {
- tgt = t->targets + i;
- if (dm_target_hybrid(tgt))
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ ti = dm_table_get_target(t, i);
+ if (dm_target_hybrid(ti))
hybrid = 1;
- else if (dm_target_request_based(tgt))
+ else if (dm_target_request_based(ti))
request_based = 1;
else
bio_based = 1;
@@ -942,18 +937,18 @@ verify_rq_based:
return 0;
}
- tgt = dm_table_get_immutable_target(t);
- if (!tgt) {
+ ti = dm_table_get_immutable_target(t);
+ if (!ti) {
DMERR("table load rejected: immutable target is required");
return -EINVAL;
- } else if (tgt->max_io_len) {
+ } else if (ti->max_io_len) {
DMERR("table load rejected: immutable target that splits IO is not supported");
return -EINVAL;
}
/* Non-request-stackable devices can't be used for request-based dm */
- if (!tgt->type->iterate_devices ||
- !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
@@ -983,11 +978,9 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i;
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
if (dm_target_is_wildcard(ti->type))
return ti;
}
@@ -1010,32 +1003,56 @@ static bool dm_table_supports_poll(struct dm_table *t);
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
- unsigned per_io_data_size = 0;
- unsigned min_pool_size = 0;
- struct dm_target *ti;
- unsigned i;
- bool poll_supported = false;
+ unsigned int per_io_data_size = 0, front_pad, io_front_pad;
+ unsigned int min_pool_size = 0, pool_size;
+ struct dm_md_mempools *pools;
if (unlikely(type == DM_TYPE_NONE)) {
DMWARN("no table type is set, can't allocate mempools");
return -EINVAL;
}
- if (__table_type_bio_based(type)) {
- for (i = 0; i < t->num_targets; i++) {
- ti = t->targets + i;
- per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
- min_pool_size = max(min_pool_size, ti->num_flush_bios);
- }
- poll_supported = dm_table_supports_poll(t);
+ pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
+ if (!pools)
+ return -ENOMEM;
+
+ if (type == DM_TYPE_REQUEST_BASED) {
+ pool_size = dm_get_reserved_rq_based_ios();
+ front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
+ goto init_bs;
}
- t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size,
- t->integrity_supported, poll_supported);
- if (!t->mempools)
- return -ENOMEM;
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+ per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
+ min_pool_size = max(min_pool_size, ti->num_flush_bios);
+ }
+ pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
+ front_pad = roundup(per_io_data_size,
+ __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
+
+ io_front_pad = roundup(per_io_data_size,
+ __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
+ if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
+ dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
+ goto out_free_pools;
+ if (t->integrity_supported &&
+ bioset_integrity_create(&pools->io_bs, pool_size))
+ goto out_free_pools;
+init_bs:
+ if (bioset_init(&pools->bs, pool_size, front_pad, 0))
+ goto out_free_pools;
+ if (t->integrity_supported &&
+ bioset_integrity_create(&pools->bs, pool_size))
+ goto out_free_pools;
+
+ t->mempools = pools;
return 0;
+
+out_free_pools:
+ dm_free_md_mempools(pools);
+ return -ENOMEM;
}
static int setup_indexes(struct dm_table *t)
@@ -1100,10 +1117,10 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL;
struct gendisk *prev_disk = NULL, *template_disk = NULL;
- unsigned i;
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
+
if (!dm_target_passes_integrity(ti->type))
goto no_integrity;
}
@@ -1217,18 +1234,19 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;
- int i;
- struct dm_target *ti;
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return 0;
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
+
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
}
+
dm_put_live_table(md, srcu_idx);
return args.err;
}
@@ -1277,7 +1295,6 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
{
struct dm_crypto_profile *dmcp;
struct blk_crypto_profile *profile;
- struct dm_target *ti;
unsigned int i;
bool empty_profile = true;
@@ -1293,8 +1310,8 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
memset(profile->modes_supported, 0xFF,
sizeof(profile->modes_supported));
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
+ for (i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!dm_target_passes_crypto(ti->type)) {
blk_crypto_intersect_capabilities(profile, NULL);
@@ -1444,14 +1461,6 @@ inline sector_t dm_table_get_size(struct dm_table *t)
}
EXPORT_SYMBOL(dm_table_get_size);
-struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
-{
- if (index >= t->num_targets)
- return NULL;
-
- return t->targets + index;
-}
-
/*
* Search the btree for the correct target.
*
@@ -1512,11 +1521,8 @@ static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
static bool dm_table_any_dev_attr(struct dm_table *t,
iterate_devices_callout_fn func, void *data)
{
- struct dm_target *ti;
- unsigned int i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, func, data))
@@ -1538,11 +1544,8 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
static bool dm_table_supports_poll(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i = 0;
-
- while (i < dm_table_get_num_targets(t)) {
- ti = dm_table_get_target(t, i++);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
@@ -1558,18 +1561,15 @@ static bool dm_table_supports_poll(struct dm_table *t)
* Returns false if the result is unknown because a target doesn't
* support iterate_devices.
*/
-bool dm_table_has_no_data_devices(struct dm_table *table)
+bool dm_table_has_no_data_devices(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i, num_devices;
-
- for (i = 0; i < dm_table_get_num_targets(table); i++) {
- ti = dm_table_get_target(table, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+ unsigned num_devices = 0;
if (!ti->type->iterate_devices)
return false;
- num_devices = 0;
ti->type->iterate_devices(ti, count_device, &num_devices);
if (num_devices)
return false;
@@ -1597,11 +1597,8 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
static bool dm_table_supports_zoned_model(struct dm_table *t,
enum blk_zoned_model zoned_model)
{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (dm_target_supports_zoned_hm(ti->type)) {
if (!ti->type->iterate_devices ||
@@ -1620,13 +1617,11 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
unsigned int *zone_sectors = data;
- if (!blk_queue_is_zoned(q))
+ if (!bdev_is_zoned(dev->bdev))
return 0;
-
- return blk_queue_zone_sectors(q) != *zone_sectors;
+ return bdev_zone_sectors(dev->bdev) != *zone_sectors;
}
/*
@@ -1634,16 +1629,16 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
* zone sectors, if the destination device is a zoned block device, it shall
* have the specified zone_sectors.
*/
-static int validate_hardware_zoned_model(struct dm_table *table,
+static int validate_hardware_zoned_model(struct dm_table *t,
enum blk_zoned_model zoned_model,
unsigned int zone_sectors)
{
if (zoned_model == BLK_ZONED_NONE)
return 0;
- if (!dm_table_supports_zoned_model(table, zoned_model)) {
+ if (!dm_table_supports_zoned_model(t, zoned_model)) {
DMERR("%s: zoned model is not consistent across all devices",
- dm_device_name(table->md));
+ dm_device_name(t->md));
return -EINVAL;
}
@@ -1651,9 +1646,9 @@ static int validate_hardware_zoned_model(struct dm_table *table,
if (!zone_sectors || !is_power_of_2(zone_sectors))
return -EINVAL;
- if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
+ if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
DMERR("%s: zone sectors is not consistent across all zoned devices",
- dm_device_name(table->md));
+ dm_device_name(t->md));
return -EINVAL;
}
@@ -1663,21 +1658,19 @@ static int validate_hardware_zoned_model(struct dm_table *table,
/*
* Establish the new table's queue_limits and validate them.
*/
-int dm_calculate_queue_limits(struct dm_table *table,
+int dm_calculate_queue_limits(struct dm_table *t,
struct queue_limits *limits)
{
- struct dm_target *ti;
struct queue_limits ti_limits;
- unsigned i;
enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
unsigned int zone_sectors = 0;
blk_set_stacking_limits(limits);
- for (i = 0; i < dm_table_get_num_targets(table); i++) {
- blk_set_stacking_limits(&ti_limits);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
- ti = dm_table_get_target(table, i);
+ blk_set_stacking_limits(&ti_limits);
if (!ti->type->iterate_devices)
goto combine_limits;
@@ -1718,7 +1711,7 @@ combine_limits:
DMWARN("%s: adding target device "
"(start sect %llu len %llu) "
"caused an alignment inconsistency",
- dm_device_name(table->md),
+ dm_device_name(t->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
}
@@ -1738,10 +1731,10 @@ combine_limits:
zoned_model = limits->zoned;
zone_sectors = limits->chunk_sectors;
}
- if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
+ if (validate_hardware_zoned_model(t, zoned_model, zone_sectors))
return -EINVAL;
- return validate_hardware_logical_block_alignment(table, limits);
+ return validate_hardware_logical_block_alignment(t, limits);
}
/*
@@ -1785,17 +1778,14 @@ static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
{
- struct dm_target *ti;
- unsigned i;
-
/*
* Require at least one underlying device to support flushes.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting flushes must provide.
*/
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->num_flush_bios)
continue;
@@ -1849,11 +1839,8 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *
static bool dm_table_supports_write_zeroes(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i = 0;
-
- while (i < dm_table_get_num_targets(t)) {
- ti = dm_table_get_target(t, i++);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->num_write_zeroes_bios)
return false;
@@ -1876,11 +1863,8 @@ static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
static bool dm_table_supports_nowait(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i = 0;
-
- while (i < dm_table_get_num_targets(t)) {
- ti = dm_table_get_target(t, i++);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!dm_target_supports_nowait(ti->type))
return false;
@@ -1901,11 +1885,8 @@ static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
static bool dm_table_supports_discards(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->num_discard_bios)
return false;
@@ -1933,11 +1914,8 @@ static int device_not_secure_erase_capable(struct dm_target *ti,
static bool dm_table_supports_secure_erase(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned int i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->num_secure_erase_bios)
return false;
@@ -2067,11 +2045,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
return 0;
}
-unsigned int dm_table_get_num_targets(struct dm_table *t)
-{
- return t->num_targets;
-}
-
struct list_head *dm_table_get_devices(struct dm_table *t)
{
return &t->devices;
@@ -2091,12 +2064,11 @@ enum suspend_mode {
static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
{
- int i = t->num_targets;
- struct dm_target *ti = t->targets;
-
lockdep_assert_held(&t->md->suspend_lock);
- while (i--) {
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
switch (mode) {
case PRESUSPEND:
if (ti->type->presuspend)
@@ -2111,7 +2083,6 @@ static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
ti->type->postsuspend(ti);
break;
}
- ti++;
}
}
@@ -2141,12 +2112,13 @@ void dm_table_postsuspend_targets(struct dm_table *t)
int dm_table_resume_targets(struct dm_table *t)
{
- int i, r = 0;
+ unsigned int i;
+ int r = 0;
lockdep_assert_held(&t->md->suspend_lock);
for (i = 0; i < t->num_targets; i++) {
- struct dm_target *ti = t->targets + i;
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->type->preresume)
continue;
@@ -2160,7 +2132,7 @@ int dm_table_resume_targets(struct dm_table *t)
}
for (i = 0; i < t->num_targets; i++) {
- struct dm_target *ti = t->targets + i;
+ struct dm_target *ti = dm_table_get_target(t, i);
if (ti->type->resume)
ti->type->resume(ti);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 2db7030aba00..a27395c8621f 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -2045,10 +2045,13 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_sm_threshold_fn fn,
void *context)
{
- int r;
+ int r = -EINVAL;
pmd_write_lock_in_core(pmd);
- r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
+ if (!pmd->fail_io) {
+ r = dm_sm_register_threshold_callback(pmd->metadata_sm,
+ threshold, fn, context);
+ }
pmd_write_unlock(pmd);
return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 84c083f76673..e76c96c760a9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3375,8 +3375,10 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
calc_metadata_threshold(pt),
metadata_low_callback,
pool);
- if (r)
+ if (r) {
+ ti->error = "Error registering metadata threshold";
goto out_flags_changed;
+ }
dm_pool_register_pre_commit_callback(pool->pmd,
metadata_pre_commit_callback, pool);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index cea2b3789736..23cffce56403 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -749,7 +749,7 @@ int verity_fec_ctr(struct dm_verity *v)
f->bufio = dm_bufio_client_create(f->dev->bdev,
f->io_size,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(f->bufio)) {
ti->error = "Cannot initialize FEC bufio client";
return PTR_ERR(f->bufio);
@@ -765,7 +765,7 @@ int verity_fec_ctr(struct dm_verity *v)
f->data_bufio = dm_bufio_client_create(v->data_dev->bdev,
1 << v->data_dev_block_bits,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(f->data_bufio)) {
ti->error = "Cannot initialize FEC data bufio client";
return PTR_ERR(f->data_bufio);
diff --git a/drivers/md/dm-verity-loadpin.c b/drivers/md/dm-verity-loadpin.c
new file mode 100644
index 000000000000..387ec43aef72
--- /dev/null
+++ b/drivers/md/dm-verity-loadpin.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/dm-verity-loadpin.h>
+
+#include "dm.h"
+#include "dm-core.h"
+#include "dm-verity.h"
+
+#define DM_MSG_PREFIX "verity-loadpin"
+
+LIST_HEAD(dm_verity_loadpin_trusted_root_digests);
+
+static bool is_trusted_verity_target(struct dm_target *ti)
+{
+ u8 *root_digest;
+ unsigned int digest_size;
+ struct dm_verity_loadpin_trusted_root_digest *trd;
+ bool trusted = false;
+
+ if (!dm_is_verity_target(ti))
+ return false;
+
+ if (dm_verity_get_root_digest(ti, &root_digest, &digest_size))
+ return false;
+
+ list_for_each_entry(trd, &dm_verity_loadpin_trusted_root_digests, node) {
+ if ((trd->len == digest_size) &&
+ !memcmp(trd->data, root_digest, digest_size)) {
+ trusted = true;
+ break;
+ }
+ }
+
+ kfree(root_digest);
+
+ return trusted;
+}
+
+/*
+ * Determines whether the file system of a superblock is located on
+ * a verity device that is trusted by LoadPin.
+ */
+bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
+{
+ struct mapped_device *md;
+ struct dm_table *table;
+ struct dm_target *ti;
+ int srcu_idx;
+ bool trusted = false;
+
+ if (list_empty(&dm_verity_loadpin_trusted_root_digests))
+ return false;
+
+ md = dm_get_md(bdev->bd_dev);
+ if (!md)
+ return false;
+
+ table = dm_get_live_table(md, &srcu_idx);
+
+ if (table->num_targets != 1)
+ goto out;
+
+ ti = dm_table_get_target(table, 0);
+
+ if (is_trusted_verity_target(ti))
+ trusted = true;
+
+out:
+ dm_put_live_table(md, srcu_idx);
+ dm_put(md);
+
+ return trusted;
+}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index d6dbd47492a8..94b6cb599db4 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -19,6 +19,8 @@
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <linux/jump_label.h>
#define DM_MSG_PREFIX "verity"
@@ -34,14 +36,17 @@
#define DM_VERITY_OPT_PANIC "panic_on_corruption"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
+#define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
-#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \
+#define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
+
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
@@ -220,7 +225,7 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
struct mapped_device *md = dm_table_get_md(v->ti->table);
/* Corruption should be visible in device status in all modes */
- v->hash_failed = 1;
+ v->hash_failed = true;
if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
goto out;
@@ -286,7 +291,19 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
verity_hash_at_level(v, block, level, &hash_block, &offset);
- data = dm_bufio_read(v->bufio, hash_block, &buf);
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ data = dm_bufio_get(v->bufio, hash_block, &buf);
+ if (data == NULL) {
+ /*
+ * In tasklet and the hash was not in the bufio cache.
+ * Return early and resume execution from a work-queue
+ * to read the hash from disk.
+ */
+ return -EAGAIN;
+ }
+ } else
+ data = dm_bufio_read(v->bufio, hash_block, &buf);
+
if (IS_ERR(data))
return PTR_ERR(data);
@@ -307,6 +324,15 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
v->digest_size) == 0))
aux->hash_verified = 1;
+ else if (static_branch_unlikely(&use_tasklet_enabled) &&
+ io->in_tasklet) {
+ /*
+ * Error handling code (FEC included) cannot be run in a
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ r = -EAGAIN;
+ goto release_ret_r;
+ }
else if (verity_fec_decode(v, io,
DM_VERITY_BLOCK_TYPE_METADATA,
hash_block, data, NULL) == 0)
@@ -473,10 +499,24 @@ static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
+#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
- unsigned b;
+#endif
+ struct bvec_iter iter_copy;
+ struct bvec_iter *iter;
struct crypto_wait wait;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
+ unsigned int b;
+
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ /*
+ * Copy the iterator in case we need to restart
+ * verification in a work-queue.
+ */
+ iter_copy = io->iter;
+ iter = &iter_copy;
+ } else
+ iter = &io->iter;
for (b = 0; b < io->n_blocks; b++) {
int r;
@@ -485,7 +525,7 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks &&
likely(test_bit(cur_block, v->validated_blocks))) {
- verity_bv_skip_block(v, io, &io->iter);
+ verity_bv_skip_block(v, io, iter);
continue;
}
@@ -500,7 +540,7 @@ static int verity_verify_io(struct dm_verity_io *io)
* If we expect a zero block, don't validate, just
* return zeros.
*/
- r = verity_for_bv_block(v, io, &io->iter,
+ r = verity_for_bv_block(v, io, iter,
verity_bv_zero);
if (unlikely(r < 0))
return r;
@@ -512,8 +552,11 @@ static int verity_verify_io(struct dm_verity_io *io)
if (unlikely(r < 0))
return r;
- start = io->iter;
- r = verity_for_io_block(v, io, &io->iter, &wait);
+#if defined(CONFIG_DM_VERITY_FEC)
+ if (verity_fec_is_enabled(v))
+ start = *iter;
+#endif
+ r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
@@ -527,11 +570,19 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
- }
- else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
- cur_block, NULL, &start) == 0)
+ } else if (static_branch_unlikely(&use_tasklet_enabled) &&
+ io->in_tasklet) {
+ /*
+ * Error handling code (FEC included) cannot be run in a
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ return -EAGAIN;
+#if defined(CONFIG_DM_VERITY_FEC)
+ } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
+ cur_block, NULL, &start) == 0) {
continue;
- else {
+#endif
+ } else {
if (bio->bi_status) {
/*
* Error correction failed; Just return error
@@ -567,7 +618,8 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_status = status;
- verity_fec_finish_io(io);
+ if (!static_branch_unlikely(&use_tasklet_enabled) || !io->in_tasklet)
+ verity_fec_finish_io(io);
bio_endio(bio);
}
@@ -576,9 +628,29 @@ static void verity_work(struct work_struct *w)
{
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
+ io->in_tasklet = false;
+
+ verity_fec_init_io(io);
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
+static void verity_tasklet(unsigned long data)
+{
+ struct dm_verity_io *io = (struct dm_verity_io *)data;
+ int err;
+
+ io->in_tasklet = true;
+ err = verity_verify_io(io);
+ if (err == -EAGAIN) {
+ /* fallback to retrying with work-queue */
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ return;
+ }
+
+ verity_finish_io(io, errno_to_blk_status(err));
+}
+
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
@@ -589,8 +661,13 @@ static void verity_end_io(struct bio *bio)
return;
}
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
+ tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
+ tasklet_schedule(&io->tasklet);
+ } else {
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ }
}
/*
@@ -701,8 +778,6 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_private = io;
io->iter = bio->bi_iter;
- verity_fec_init_io(io);
-
verity_submit_prefetch(v, io);
submit_bio_noacct(bio);
@@ -752,6 +827,8 @@ static void verity_status(struct dm_target *ti, status_type_t type,
args++;
if (v->validated_blocks)
args++;
+ if (v->use_tasklet)
+ args++;
if (v->signature_key_desc)
args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
if (!args)
@@ -777,6 +854,8 @@ static void verity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
if (v->validated_blocks)
DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
+ if (v->use_tasklet)
+ DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
sz = verity_fec_status_table(v, sz, result, maxlen);
if (v->signature_key_desc)
DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY
@@ -890,6 +969,9 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->signature_key_desc);
+ if (v->use_tasklet)
+ static_branch_dec(&use_tasklet_enabled);
+
kfree(v);
}
@@ -968,9 +1050,10 @@ static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
}
static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
- struct dm_verity_sig_opts *verify_args)
+ struct dm_verity_sig_opts *verify_args,
+ bool only_modifier_opts)
{
- int r;
+ int r = 0;
unsigned argc;
struct dm_target *ti = v->ti;
const char *arg_name;
@@ -991,6 +1074,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
argc--;
if (verity_is_verity_mode(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_parse_verity_mode(v, arg_name);
if (r) {
ti->error = "Conflicting error handling parameters";
@@ -999,6 +1084,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
+ if (only_modifier_opts)
+ continue;
r = verity_alloc_zero_digest(v);
if (r) {
ti->error = "Cannot allocate zero digest";
@@ -1007,17 +1094,29 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
+ if (only_modifier_opts)
+ continue;
r = verity_alloc_most_once(v);
if (r)
return r;
continue;
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
+ v->use_tasklet = true;
+ static_branch_inc(&use_tasklet_enabled);
+ continue;
+
} else if (verity_is_fec_opt_arg(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
if (r)
return r;
continue;
+
} else if (verity_verify_is_sig_opt_arg(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_verify_sig_parse_opt_args(as, v,
verify_args,
&argc, arg_name);
@@ -1025,8 +1124,17 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
return r;
continue;
+ } else if (only_modifier_opts) {
+ /*
+ * Ignore unrecognized opt, could easily be an extra
+ * argument to an option whose parsing was skipped.
+ * Normal parsing (@only_modifier_opts=false) will
+ * properly parse all options (and their extra args).
+ */
+ continue;
}
+ DMERR("Unrecognized verity feature request: %s", arg_name);
ti->error = "Unrecognized verity feature request";
return -EINVAL;
} while (argc && !r);
@@ -1054,6 +1162,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct dm_verity_sig_opts verify_args = {0};
struct dm_arg_set as;
unsigned int num;
+ unsigned int wq_flags;
unsigned long long num_ll;
int r;
int i;
@@ -1085,6 +1194,15 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ /* Parse optional parameters that modify primary args */
+ if (argc > 10) {
+ as.argc = argc - 10;
+ as.argv = argv + 10;
+ r = verity_parse_opt_args(&as, v, &verify_args, true);
+ if (r < 0)
+ goto bad;
+ }
+
if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
num > 1) {
ti->error = "Invalid version";
@@ -1156,7 +1274,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- v->tfm = crypto_alloc_ahash(v->alg_name, 0, 0);
+ v->tfm = crypto_alloc_ahash(v->alg_name, 0,
+ v->use_tasklet ? CRYPTO_ALG_ASYNC : 0);
if (IS_ERR(v->tfm)) {
ti->error = "Cannot initialize hash function";
r = PTR_ERR(v->tfm);
@@ -1218,8 +1337,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (argc) {
as.argc = argc;
as.argv = argv;
-
- r = verity_parse_opt_args(&as, v, &verify_args);
+ r = verity_parse_opt_args(&as, v, &verify_args, false);
if (r < 0)
goto bad;
}
@@ -1266,7 +1384,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
- dm_bufio_alloc_callback, NULL);
+ dm_bufio_alloc_callback, NULL,
+ v->use_tasklet ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
if (IS_ERR(v->bufio)) {
ti->error = "Cannot initialize dm-bufio";
r = PTR_ERR(v->bufio);
@@ -1281,7 +1400,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
- v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
+ wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
+ if (v->use_tasklet) {
+ /*
+ * Allow verify_wq to preempt softirq since verification in
+ * tasklet will fall-back to using it for error handling
+ * (or if the bufio cache doesn't have required hashes).
+ */
+ wq_flags |= WQ_HIGHPRI;
+ }
+ v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
@@ -1310,10 +1438,40 @@ bad:
return r;
}
+/*
+ * Check whether a DM target is a verity target.
+ */
+bool dm_is_verity_target(struct dm_target *ti)
+{
+ return ti->type->module == THIS_MODULE;
+}
+
+/*
+ * Get the root digest of a verity target.
+ *
+ * Returns a copy of the root digest, the caller is responsible for
+ * freeing the memory of the digest.
+ */
+int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size)
+{
+ struct dm_verity *v = ti->private;
+
+ if (!dm_is_verity_target(ti))
+ return -EINVAL;
+
+ *root_digest = kmemdup(v->root_digest, v->digest_size, GFP_KERNEL);
+ if (*root_digest == NULL)
+ return -ENOMEM;
+
+ *digest_size = v->digest_size;
+
+ return 0;
+}
+
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 4e769d13473a..45455de1b4bc 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -13,6 +13,7 @@
#include <linux/dm-bufio.h>
#include <linux/device-mapper.h>
+#include <linux/interrupt.h>
#include <crypto/hash.h>
#define DM_VERITY_MAX_LEVELS 63
@@ -51,9 +52,10 @@ struct dm_verity {
unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
unsigned char levels; /* the number of tree levels */
unsigned char version;
+ bool hash_failed:1; /* set if hash of any block failed */
+ bool use_tasklet:1; /* try to verify in tasklet before work-queue */
unsigned digest_size; /* digest size for the current hash algorithm */
unsigned int ahash_reqsize;/* the size of temporary space for crypto */
- int hash_failed; /* set to 1 if hash of any block failed */
enum verity_mode mode; /* mode for handling verification errors */
unsigned corrupted_errs;/* Number of errors for corrupted blocks */
@@ -76,10 +78,12 @@ struct dm_verity_io {
sector_t block;
unsigned n_blocks;
+ bool in_tasklet;
struct bvec_iter iter;
struct work_struct work;
+ struct tasklet_struct tasklet;
/*
* Three variably-size fields follow this struct:
@@ -129,4 +133,8 @@ extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);
+extern bool dm_is_verity_target(struct dm_target *ti);
+extern int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest,
+ unsigned int *digest_size);
+
#endif /* DM_VERITY_H */
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index d74c5a7a0ab4..96a003eb7323 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -22,7 +22,7 @@
#define HIGH_WATERMARK 50
#define LOW_WATERMARK 45
-#define MAX_WRITEBACK_JOBS 0
+#define MAX_WRITEBACK_JOBS min(0x10000000 / PAGE_SIZE, totalram_pages() / 16)
#define ENDIO_LATENCY 16
#define WRITEBACK_LATENCY 64
#define AUTOCOMMIT_BLOCKS_SSD 65536
@@ -523,8 +523,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
region.sector += wc->start_sector;
atomic_inc(&endio.count);
- req.bi_op = REQ_OP_WRITE;
- req.bi_op_flags = REQ_SYNC;
+ req.bi_opf = REQ_OP_WRITE | REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
req.client = wc->dm_io;
@@ -562,8 +561,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
region.sector += wc->start_sector;
- req.bi_op = REQ_OP_WRITE;
- req.bi_op_flags = REQ_SYNC | REQ_FUA;
+ req.bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_FUA;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
@@ -592,8 +590,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
region.bdev = dev->bdev;
region.sector = 0;
region.count = 0;
- req.bi_op = REQ_OP_WRITE;
- req.bi_op_flags = REQ_PREFLUSH;
+ req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
req.mem.type = DM_IO_KMEM;
req.mem.ptr.addr = NULL;
req.client = wc->dm_io;
@@ -981,8 +978,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
region.bdev = wc->ssd_dev->bdev;
region.sector = wc->start_sector;
region.count = n_sectors;
- req.bi_op = REQ_OP_READ;
- req.bi_op_flags = REQ_SYNC;
+ req.bi_opf = REQ_OP_READ | REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
@@ -1329,8 +1325,8 @@ enum wc_map_op {
WC_MAP_ERROR,
};
-static enum wc_map_op writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio,
- struct wc_entry *e)
+static void writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio,
+ struct wc_entry *e)
{
if (e) {
sector_t next_boundary =
@@ -1338,8 +1334,6 @@ static enum wc_map_op writecache_map_remap_origin(struct dm_writecache *wc, stru
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT)
dm_accept_partial_bio(bio, next_boundary);
}
-
- return WC_MAP_REMAP_ORIGIN;
}
static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio)
@@ -1366,14 +1360,16 @@ read_next_block:
map_op = WC_MAP_REMAP;
}
} else {
- map_op = writecache_map_remap_origin(wc, bio, e);
+ writecache_map_remap_origin(wc, bio, e);
+ wc->stats.reads += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
+ map_op = WC_MAP_REMAP_ORIGIN;
}
return map_op;
}
-static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
- struct wc_entry *e, bool search_used)
+static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
+ struct wc_entry *e, bool search_used)
{
unsigned bio_size = wc->block_size;
sector_t start_cache_sec = cache_sector(wc, e);
@@ -1413,14 +1409,15 @@ static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct b
bio->bi_iter.bi_sector = start_cache_sec;
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
+ wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
+ wc->stats.writes_allocate += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
+
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
wc->uncommitted_blocks = 0;
queue_work(wc->writeback_wq, &wc->flush_work);
} else {
writecache_schedule_autocommit(wc);
}
-
- return WC_MAP_REMAP;
}
static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio)
@@ -1430,9 +1427,10 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
do {
bool found_entry = false;
bool search_used = false;
- wc->stats.writes++;
- if (writecache_has_error(wc))
+ if (writecache_has_error(wc)) {
+ wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
return WC_MAP_ERROR;
+ }
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
if (e) {
if (!writecache_entry_is_committed(wc, e)) {
@@ -1456,9 +1454,11 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
if (unlikely(!e)) {
if (!WC_MODE_PMEM(wc) && !found_entry) {
direct_write:
- wc->stats.writes_around++;
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
- return writecache_map_remap_origin(wc, bio, e);
+ writecache_map_remap_origin(wc, bio, e);
+ wc->stats.writes_around += bio->bi_iter.bi_size >> wc->block_size_bits;
+ wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
+ return WC_MAP_REMAP_ORIGIN;
}
wc->stats.writes_blocked_on_freelist++;
writecache_wait_on_freelist(wc);
@@ -1469,10 +1469,13 @@ direct_write:
wc->uncommitted_blocks++;
wc->stats.writes_allocate++;
bio_copy:
- if (WC_MODE_PMEM(wc))
+ if (WC_MODE_PMEM(wc)) {
bio_copy_block(wc, bio, memory_data(wc, e));
- else
- return writecache_bio_copy_ssd(wc, bio, e, search_used);
+ wc->stats.writes++;
+ } else {
+ writecache_bio_copy_ssd(wc, bio, e, search_used);
+ return WC_MAP_REMAP;
+ }
} while (bio->bi_iter.bi_size);
if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks))
@@ -1507,7 +1510,7 @@ static enum wc_map_op writecache_map_flush(struct dm_writecache *wc, struct bio
static enum wc_map_op writecache_map_discard(struct dm_writecache *wc, struct bio *bio)
{
- wc->stats.discards++;
+ wc->stats.discards += bio->bi_iter.bi_size >> wc->block_size_bits;
if (writecache_has_error(wc))
return WC_MAP_ERROR;
@@ -1591,7 +1594,8 @@ done:
default:
BUG();
- return -1;
+ wc_unlock(wc);
+ return DM_MAPIO_KILL;
}
}
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 3e7b1fe1580b..3dafc0e8b7a9 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -139,13 +139,11 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
void dm_cleanup_zoned_dev(struct mapped_device *md)
{
- struct request_queue *q = md->queue;
-
- if (q) {
- kfree(q->conv_zones_bitmap);
- q->conv_zones_bitmap = NULL;
- kfree(q->seq_zones_wlock);
- q->seq_zones_wlock = NULL;
+ if (md->disk) {
+ kfree(md->disk->conv_zones_bitmap);
+ md->disk->conv_zones_bitmap = NULL;
+ kfree(md->disk->seq_zones_wlock);
+ md->disk->seq_zones_wlock = NULL;
}
kvfree(md->zwp_offset);
@@ -179,31 +177,31 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
void *data)
{
struct mapped_device *md = data;
- struct request_queue *q = md->queue;
+ struct gendisk *disk = md->disk;
switch (zone->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
- if (!q->conv_zones_bitmap) {
- q->conv_zones_bitmap =
- kcalloc(BITS_TO_LONGS(q->nr_zones),
+ if (!disk->conv_zones_bitmap) {
+ disk->conv_zones_bitmap =
+ kcalloc(BITS_TO_LONGS(disk->nr_zones),
sizeof(unsigned long), GFP_NOIO);
- if (!q->conv_zones_bitmap)
+ if (!disk->conv_zones_bitmap)
return -ENOMEM;
}
- set_bit(idx, q->conv_zones_bitmap);
+ set_bit(idx, disk->conv_zones_bitmap);
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
- if (!q->seq_zones_wlock) {
- q->seq_zones_wlock =
- kcalloc(BITS_TO_LONGS(q->nr_zones),
+ if (!disk->seq_zones_wlock) {
+ disk->seq_zones_wlock =
+ kcalloc(BITS_TO_LONGS(disk->nr_zones),
sizeof(unsigned long), GFP_NOIO);
- if (!q->seq_zones_wlock)
+ if (!disk->seq_zones_wlock)
return -ENOMEM;
}
if (!md->zwp_offset) {
md->zwp_offset =
- kvcalloc(q->nr_zones, sizeof(unsigned int),
+ kvcalloc(disk->nr_zones, sizeof(unsigned int),
GFP_KERNEL);
if (!md->zwp_offset)
return -ENOMEM;
@@ -228,7 +226,7 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
*/
static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
{
- struct request_queue *q = md->queue;
+ struct gendisk *disk = md->disk;
unsigned int noio_flag;
int ret;
@@ -236,7 +234,7 @@ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
* Check if something changed. If yes, cleanup the current resources
* and reallocate everything.
*/
- if (!q->nr_zones || q->nr_zones != md->nr_zones)
+ if (!disk->nr_zones || disk->nr_zones != md->nr_zones)
dm_cleanup_zoned_dev(md);
if (md->nr_zones)
return 0;
@@ -246,17 +244,17 @@ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
* operations in this context are done as if GFP_NOIO was specified.
*/
noio_flag = memalloc_noio_save();
- ret = dm_blk_do_report_zones(md, t, 0, q->nr_zones,
+ ret = dm_blk_do_report_zones(md, t, 0, disk->nr_zones,
dm_zone_revalidate_cb, md);
memalloc_noio_restore(noio_flag);
if (ret < 0)
goto err;
- if (ret != q->nr_zones) {
+ if (ret != disk->nr_zones) {
ret = -EIO;
goto err;
}
- md->nr_zones = q->nr_zones;
+ md->nr_zones = disk->nr_zones;
return 0;
@@ -270,16 +268,13 @@ static int device_not_zone_append_capable(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
- return !blk_queue_is_zoned(bdev_get_queue(dev->bdev));
+ return !bdev_is_zoned(dev->bdev);
}
static bool dm_table_supports_zone_append(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned int i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (ti->emulate_zone_append)
return false;
@@ -301,7 +296,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
* correct value to be exposed in sysfs queue/nr_zones.
*/
WARN_ON_ONCE(queue_is_mq(q));
- q->nr_zones = blkdev_nr_zones(md->disk);
+ md->disk->nr_zones = bdev_nr_zones(md->disk->part0);
/* Check if zone append is natively supported */
if (dm_table_supports_zone_append(t)) {
@@ -334,7 +329,7 @@ static int dm_update_zone_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
unsigned int *wp_ofst)
{
- sector_t sector = zno * blk_queue_zone_sectors(md->queue);
+ sector_t sector = zno * bdev_zone_sectors(md->disk->part0);
unsigned int noio_flag;
struct dm_table *t;
int srcu_idx, ret;
@@ -361,7 +356,7 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
}
struct orig_bio_details {
- unsigned int op;
+ enum req_op op;
unsigned int nr_sectors;
};
@@ -373,7 +368,7 @@ struct orig_bio_details {
static bool dm_zone_map_bio_begin(struct mapped_device *md,
unsigned int zno, struct bio *clone)
{
- sector_t zsectors = blk_queue_zone_sectors(md->queue);
+ sector_t zsectors = bdev_zone_sectors(md->disk->part0);
unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
/*
@@ -443,7 +438,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int z
return BLK_STS_OK;
case REQ_OP_ZONE_FINISH:
WRITE_ONCE(md->zwp_offset[zno],
- blk_queue_zone_sectors(md->queue));
+ bdev_zone_sectors(md->disk->part0));
return BLK_STS_OK;
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE:
@@ -466,26 +461,26 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int z
}
}
-static inline void dm_zone_lock(struct request_queue *q,
- unsigned int zno, struct bio *clone)
+static inline void dm_zone_lock(struct gendisk *disk, unsigned int zno,
+ struct bio *clone)
{
if (WARN_ON_ONCE(bio_flagged(clone, BIO_ZONE_WRITE_LOCKED)))
return;
- wait_on_bit_lock_io(q->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE);
+ wait_on_bit_lock_io(disk->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE);
bio_set_flag(clone, BIO_ZONE_WRITE_LOCKED);
}
-static inline void dm_zone_unlock(struct request_queue *q,
- unsigned int zno, struct bio *clone)
+static inline void dm_zone_unlock(struct gendisk *disk, unsigned int zno,
+ struct bio *clone)
{
if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
return;
- WARN_ON_ONCE(!test_bit(zno, q->seq_zones_wlock));
- clear_bit_unlock(zno, q->seq_zones_wlock);
+ WARN_ON_ONCE(!test_bit(zno, disk->seq_zones_wlock));
+ clear_bit_unlock(zno, disk->seq_zones_wlock);
smp_mb__after_atomic();
- wake_up_bit(q->seq_zones_wlock, zno);
+ wake_up_bit(disk->seq_zones_wlock, zno);
bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
}
@@ -520,7 +515,6 @@ int dm_zone_map_bio(struct dm_target_io *tio)
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
struct mapped_device *md = io->md;
- struct request_queue *q = md->queue;
struct bio *clone = &tio->clone;
struct orig_bio_details orig_bio_details;
unsigned int zno;
@@ -536,7 +530,7 @@ int dm_zone_map_bio(struct dm_target_io *tio)
/* Lock the target zone */
zno = bio_zone_no(clone);
- dm_zone_lock(q, zno, clone);
+ dm_zone_lock(md->disk, zno, clone);
orig_bio_details.nr_sectors = bio_sectors(clone);
orig_bio_details.op = bio_op(clone);
@@ -546,7 +540,7 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* both valid, and if the bio is a zone append, remap it to a write.
*/
if (!dm_zone_map_bio_begin(md, zno, clone)) {
- dm_zone_unlock(q, zno, clone);
+ dm_zone_unlock(md->disk, zno, clone);
return DM_MAPIO_KILL;
}
@@ -570,12 +564,12 @@ int dm_zone_map_bio(struct dm_target_io *tio)
sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
*tio->len_ptr);
if (sts != BLK_STS_OK)
- dm_zone_unlock(q, zno, clone);
+ dm_zone_unlock(md->disk, zno, clone);
break;
case DM_MAPIO_REQUEUE:
case DM_MAPIO_KILL:
default:
- dm_zone_unlock(q, zno, clone);
+ dm_zone_unlock(md->disk, zno, clone);
sts = BLK_STS_IOERR;
break;
}
@@ -592,7 +586,7 @@ int dm_zone_map_bio(struct dm_target_io *tio)
void dm_zone_endio(struct dm_io *io, struct bio *clone)
{
struct mapped_device *md = io->md;
- struct request_queue *q = md->queue;
+ struct gendisk *disk = md->disk;
struct bio *orig_bio = io->orig_bio;
unsigned int zwp_offset;
unsigned int zno;
@@ -608,7 +602,8 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
*/
if (clone->bi_status == BLK_STS_OK &&
bio_op(clone) == REQ_OP_ZONE_APPEND) {
- sector_t mask = (sector_t)blk_queue_zone_sectors(q) - 1;
+ sector_t mask =
+ (sector_t)bdev_zone_sectors(disk->part0) - 1;
orig_bio->bi_iter.bi_sector +=
clone->bi_iter.bi_sector & mask;
@@ -649,5 +644,5 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
zwp_offset - bio_sectors(orig_bio);
}
- dm_zone_unlock(q, zno, clone);
+ dm_zone_unlock(disk, zno, clone);
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index d1ea66114d14..0278482fac94 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -737,7 +737,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
/*
* Read/write a metadata block.
*/
-static int dmz_rdwr_block(struct dmz_dev *dev, int op,
+static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op,
sector_t block, struct page *page)
{
struct bio *bio;
@@ -2045,7 +2045,8 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
* allocated and used to map the chunk.
* The zone returned will be set to the active state.
*/
-struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op)
+struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
+ unsigned int chunk, enum req_op op)
{
struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
@@ -2944,7 +2945,9 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
/* Metadata cache shrinker */
- ret = register_shrinker(&zmd->mblk_shrinker);
+ ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
+ MAJOR(dev->bdev->bd_dev),
+ MINOR(dev->bdev->bd_dev));
if (ret) {
dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
goto err;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 0ec5d8b9b1a4..95b132b52f33 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -764,8 +764,7 @@ static void dmz_put_zoned_device(struct dm_target *ti)
static int dmz_fixup_devices(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
- struct dmz_dev *reg_dev, *zoned_dev;
- struct request_queue *q;
+ struct dmz_dev *reg_dev = NULL;
sector_t zone_nr_sectors = 0;
int i;
@@ -780,32 +779,32 @@ static int dmz_fixup_devices(struct dm_target *ti)
return -EINVAL;
}
for (i = 1; i < dmz->nr_ddevs; i++) {
- zoned_dev = &dmz->dev[i];
+ struct dmz_dev *zoned_dev = &dmz->dev[i];
+ struct block_device *bdev = zoned_dev->bdev;
+
if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
ti->error = "Secondary disk is not a zoned device";
return -EINVAL;
}
- q = bdev_get_queue(zoned_dev->bdev);
if (zone_nr_sectors &&
- zone_nr_sectors != blk_queue_zone_sectors(q)) {
+ zone_nr_sectors != bdev_zone_sectors(bdev)) {
ti->error = "Zone nr sectors mismatch";
return -EINVAL;
}
- zone_nr_sectors = blk_queue_zone_sectors(q);
+ zone_nr_sectors = bdev_zone_sectors(bdev);
zoned_dev->zone_nr_sectors = zone_nr_sectors;
- zoned_dev->nr_zones =
- blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ zoned_dev->nr_zones = bdev_nr_zones(bdev);
}
} else {
- reg_dev = NULL;
- zoned_dev = &dmz->dev[0];
+ struct dmz_dev *zoned_dev = &dmz->dev[0];
+ struct block_device *bdev = zoned_dev->bdev;
+
if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
ti->error = "Disk is not a zoned device";
return -EINVAL;
}
- q = bdev_get_queue(zoned_dev->bdev);
- zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
- zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ zoned_dev->zone_nr_sectors = bdev_zone_sectors(bdev);
+ zoned_dev->nr_zones = bdev_nr_zones(bdev);
}
if (reg_dev) {
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index a02744a0846c..265494d3f711 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -248,7 +248,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
unsigned int dev_idx, bool idle);
struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
- unsigned int chunk, int op);
+ unsigned int chunk, enum req_op op);
void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone);
struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
struct dm_zone *dzone);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2b75f1ef7386..60549b65c799 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -88,10 +88,6 @@ struct clone_info {
bool submit_as_polled:1;
};
-#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
-#define DM_IO_BIO_OFFSET \
- (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
-
static inline struct dm_target_io *clone_to_tio(struct bio *clone)
{
return container_of(clone, struct dm_target_io, clone);
@@ -415,7 +411,7 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
{
- struct dm_target *tgt;
+ struct dm_target *ti;
struct dm_table *map;
int r;
@@ -426,17 +422,17 @@ retry:
return r;
/* We only support devices that have a single target */
- if (dm_table_get_num_targets(map) != 1)
+ if (map->num_targets != 1)
return r;
- tgt = dm_table_get_target(map, 0);
- if (!tgt->type->prepare_ioctl)
+ ti = dm_table_get_target(map, 0);
+ if (!ti->type->prepare_ioctl)
return r;
if (dm_suspended_md(md))
return -EAGAIN;
- r = tgt->type->prepare_ioctl(tgt, bdev);
+ r = ti->type->prepare_ioctl(ti, bdev);
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
msleep(10);
@@ -578,9 +574,6 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct bio *clone;
clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
- /* Set default bdev, but target must bio_set_dev() before issuing IO */
- clone->bi_bdev = md->disk->part0;
-
tio = clone_to_tio(clone);
tio->flags = 0;
dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
@@ -594,7 +587,6 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
atomic_set(&io->io_count, 2);
this_cpu_inc(*md->pending_io);
io->orig_bio = bio;
- io->split_bio = NULL;
io->md = md;
spin_lock_init(&io->lock);
io->start_time = jiffies;
@@ -614,6 +606,7 @@ static void free_io(struct dm_io *io)
static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
{
+ struct mapped_device *md = ci->io->md;
struct dm_target_io *tio;
struct bio *clone;
@@ -623,14 +616,10 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
/* alloc_io() already initialized embedded clone */
clone = &tio->clone;
} else {
- struct mapped_device *md = ci->io->md;
-
clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
&md->mempools->bs);
if (!clone)
return NULL;
- /* Set default bdev, but target must bio_set_dev() before issuing IO */
- clone->bi_bdev = md->disk->part0;
/* REQ_DM_POLL_LIST shouldn't be inherited */
clone->bi_opf &= ~REQ_DM_POLL_LIST;
@@ -646,6 +635,11 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
tio->len_ptr = len;
tio->old_sector = 0;
+ /* Set default bdev, but target must bio_set_dev() before issuing IO */
+ clone->bi_bdev = md->disk->part0;
+ if (unlikely(ti->needs_bio_set_dev))
+ bio_set_dev(clone, md->disk->part0);
+
if (len) {
clone->bi_iter.bi_size = to_bytes(*len);
if (bio_integrity(clone))
@@ -716,7 +710,7 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
}
static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
- int *srcu_idx, unsigned bio_opf)
+ int *srcu_idx, blk_opf_t bio_opf)
{
if (bio_opf & REQ_NOWAIT)
return dm_get_live_table_fast(md);
@@ -725,7 +719,7 @@ static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
}
static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
- unsigned bio_opf)
+ blk_opf_t bio_opf)
{
if (bio_opf & REQ_NOWAIT)
dm_put_live_table_fast(md);
@@ -758,7 +752,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
return 0;
}
@@ -884,22 +878,63 @@ static int __noflush_suspending(struct mapped_device *md)
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}
-static void dm_io_complete(struct dm_io *io)
+static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
{
- blk_status_t io_error;
struct mapped_device *md = io->md;
- struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio;
- if (io->status == BLK_STS_DM_REQUEUE) {
+ if (first_stage) {
+ struct dm_io *next = md->requeue_list;
+
+ md->requeue_list = io;
+ io->next = next;
+ } else {
+ bio_list_add_head(&md->deferred, io->orig_bio);
+ }
+}
+
+static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
+{
+ if (first_stage)
+ queue_work(md->wq, &md->requeue_work);
+ else
+ queue_work(md->wq, &md->work);
+}
+
+/*
+ * Return true if the dm_io's original bio is requeued.
+ * io->status is updated with error if requeue disallowed.
+ */
+static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
+{
+ struct bio *bio = io->orig_bio;
+ bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE);
+ bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) &&
+ (bio->bi_opf & REQ_POLLED));
+ struct mapped_device *md = io->md;
+ bool requeued = false;
+
+ if (handle_requeue || handle_polled_eagain) {
unsigned long flags;
+
+ if (bio->bi_opf & REQ_POLLED) {
+ /*
+ * Upper layer won't help us poll split bio
+ * (io->orig_bio may only reflect a subset of the
+ * pre-split original) so clear REQ_POLLED.
+ */
+ bio_clear_polled(bio);
+ }
+
/*
- * Target requested pushing back the I/O.
+ * Target requested pushing back the I/O or
+ * polled IO hit BLK_STS_AGAIN.
*/
spin_lock_irqsave(&md->deferred_lock, flags);
- if (__noflush_suspending(md) &&
- !WARN_ON_ONCE(dm_is_zone_write(md, bio))) {
- /* NOTE early return due to BLK_STS_DM_REQUEUE below */
- bio_list_add_head(&md->deferred, bio);
+ if ((__noflush_suspending(md) &&
+ !WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
+ handle_polled_eagain || first_stage) {
+ dm_requeue_add_io(io, first_stage);
+ requeued = true;
} else {
/*
* noflush suspend was interrupted or this is
@@ -910,6 +945,23 @@ static void dm_io_complete(struct dm_io *io)
spin_unlock_irqrestore(&md->deferred_lock, flags);
}
+ if (requeued)
+ dm_kick_requeue(md, first_stage);
+
+ return requeued;
+}
+
+static void __dm_io_complete(struct dm_io *io, bool first_stage)
+{
+ struct bio *bio = io->orig_bio;
+ struct mapped_device *md = io->md;
+ blk_status_t io_error;
+ bool requeued;
+
+ requeued = dm_handle_requeue(io, first_stage);
+ if (requeued && first_stage)
+ return;
+
io_error = io->status;
if (dm_io_flagged(io, DM_IO_ACCOUNTED))
dm_end_io_acct(io);
@@ -929,23 +981,9 @@ static void dm_io_complete(struct dm_io *io)
if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
- if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) {
- if (bio->bi_opf & REQ_POLLED) {
- /*
- * Upper layer won't help us poll split bio (io->orig_bio
- * may only reflect a subset of the pre-split original)
- * so clear REQ_POLLED in case of requeue.
- */
- bio_clear_polled(bio);
- if (io_error == BLK_STS_AGAIN) {
- /* io_uring doesn't handle BLK_STS_AGAIN (yet) */
- queue_io(md, bio);
- return;
- }
- }
- if (io_error == BLK_STS_DM_REQUEUE)
- return;
- }
+ /* Return early if the original bio was requeued */
+ if (requeued)
+ return;
if (bio_is_flush_with_data(bio)) {
/*
@@ -962,6 +1000,58 @@ static void dm_io_complete(struct dm_io *io)
}
}
+static void dm_wq_requeue_work(struct work_struct *work)
+{
+ struct mapped_device *md = container_of(work, struct mapped_device,
+ requeue_work);
+ unsigned long flags;
+ struct dm_io *io;
+
+ /* reuse deferred lock to simplify dm_handle_requeue */
+ spin_lock_irqsave(&md->deferred_lock, flags);
+ io = md->requeue_list;
+ md->requeue_list = NULL;
+ spin_unlock_irqrestore(&md->deferred_lock, flags);
+
+ while (io) {
+ struct dm_io *next = io->next;
+
+ dm_io_rewind(io, &md->disk->bio_split);
+
+ io->next = NULL;
+ __dm_io_complete(io, false);
+ io = next;
+ }
+}
+
+/*
+ * Two staged requeue:
+ *
+ * 1) io->orig_bio points to the real original bio, and the part mapped to
+ * this io must be requeued, instead of other parts of the original bio.
+ *
+ * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
+ */
+static void dm_io_complete(struct dm_io *io)
+{
+ bool first_requeue;
+
+ /*
+ * Only dm_io that has been split needs two stage requeue, otherwise
+ * we may run into long bio clone chain during suspend and OOM could
+ * be triggered.
+ *
+ * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
+ * also aren't handled via the first stage requeue.
+ */
+ if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
+ first_requeue = true;
+ else
+ first_requeue = false;
+
+ __dm_io_complete(io, first_requeue);
+}
+
/*
* Decrements the number of outstanding ios that a bio has been
* cloned into, completing the original io if necc.
@@ -1033,7 +1123,7 @@ static void clone_endio(struct bio *bio)
}
if (static_branch_unlikely(&zoned_enabled) &&
- unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
+ unlikely(bdev_is_zoned(bio->bi_bdev)))
dm_zone_endio(io, bio);
if (endio) {
@@ -1086,23 +1176,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
{
sector_t target_offset = dm_target_offset(ti, sector);
sector_t len = max_io_len_target_boundary(ti, target_offset);
- sector_t max_len;
/*
* Does the target need to split IO even further?
* - varied (per target) IO splitting is a tenet of DM; this
* explains why stacked chunk_sectors based splitting via
- * blk_max_size_offset() isn't possible here. So pass in
- * ti->max_io_len to override stacked chunk_sectors.
+ * bio_split_to_limits() isn't possible here.
*/
- if (ti->max_io_len) {
- max_len = blk_max_size_offset(ti->table->md->queue,
- target_offset, ti->max_io_len);
- if (len > max_len)
- len = max_len;
- }
-
- return len;
+ if (!ti->max_io_len)
+ return len;
+ return min_t(sector_t, len,
+ min(queue_max_sectors(ti->table->md->queue),
+ blk_chunk_sectors_left(target_offset, ti->max_io_len)));
}
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
@@ -1245,6 +1330,7 @@ out:
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = clone_to_tio(bio);
+ struct dm_io *io = tio->io;
unsigned bio_sectors = bio_sectors(bio);
BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
@@ -1260,8 +1346,9 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
* __split_and_process_bio() may have already saved mapped part
* for accounting but it is being reduced so update accordingly.
*/
- dm_io_set_flag(tio->io, DM_IO_WAS_SPLIT);
- tio->io->sectors = n_sectors;
+ dm_io_set_flag(io, DM_IO_WAS_SPLIT);
+ io->sectors = n_sectors;
+ io->sector_offset = bio_sectors(io->orig_bio);
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
@@ -1384,17 +1471,7 @@ static void setup_split_accounting(struct clone_info *ci, unsigned len)
*/
dm_io_set_flag(io, DM_IO_WAS_SPLIT);
io->sectors = len;
- }
-
- if (static_branch_unlikely(&stats_enabled) &&
- unlikely(dm_stats_used(&io->md->stats))) {
- /*
- * Save bi_sector in terms of its offset from end of
- * original bio, only needed for DM-stats' benefit.
- * - saved regardless of whether split needed so that
- * dm_accept_partial_bio() doesn't need to.
- */
- io->sector_offset = bio_end_sector(ci->bio) - ci->sector;
+ io->sector_offset = bio_sectors(ci->bio);
}
}
@@ -1428,11 +1505,11 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
- unsigned num_bios, unsigned *len)
+ unsigned int num_bios, unsigned *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
struct bio *clone;
- int ret = 0;
+ unsigned int ret = 0;
switch (num_bios) {
case 0:
@@ -1460,8 +1537,7 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
static void __send_empty_flush(struct clone_info *ci)
{
- unsigned target_nr = 0;
- struct dm_target *ti;
+ struct dm_table *t = ci->map;
struct bio flush_bio;
/*
@@ -1476,8 +1552,9 @@ static void __send_empty_flush(struct clone_info *ci)
ci->sector_count = 0;
ci->io->tio.clone.bi_iter.bi_size = 0;
- while ((ti = dm_table_get_target(ci->map, target_nr++))) {
- int bios;
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ unsigned int bios;
+ struct dm_target *ti = dm_table_get_target(t, i);
atomic_add(ti->num_flush_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
@@ -1497,7 +1574,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
unsigned num_bios)
{
unsigned len;
- int bios;
+ unsigned int bios;
len = min_t(sector_t, ci->sector_count,
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
@@ -1516,7 +1593,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
static bool is_abnormal_io(struct bio *bio)
{
- unsigned int op = bio_op(bio);
+ enum req_op op = bio_op(bio);
if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
switch (op) {
@@ -1547,6 +1624,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios;
break;
+ default:
+ break;
}
/*
@@ -1628,7 +1707,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
* Only support bio polling for normal IO, and the target io is
* exactly inside the dm_io instance (verified in dm_poll_dm_io)
*/
- ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED;
+ ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
setup_split_accounting(ci, len);
@@ -1672,10 +1751,10 @@ static void dm_split_and_process_bio(struct mapped_device *md,
is_abnormal = is_abnormal_io(bio);
if (unlikely(is_abnormal)) {
/*
- * Use blk_queue_split() for abnormal IO (e.g. discard, etc)
+ * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
* otherwise associated queue_limits won't be imposed.
*/
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
}
init_clone_info(&ci, md, map, bio, is_abnormal);
@@ -1694,11 +1773,9 @@ static void dm_split_and_process_bio(struct mapped_device *md,
* Remainder must be passed to submit_bio_noacct() so it gets handled
* *after* bios already submitted have been completely processed.
*/
- WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT));
- io->split_bio = bio_split(bio, io->sectors, GFP_NOIO,
- &md->queue->bio_split);
- bio_chain(io->split_bio, bio);
- trace_block_split(io->split_bio, bio->bi_iter.bi_sector);
+ bio_trim(bio, io->sectors, ci.sector_count);
+ trace_block_split(bio, bio->bi_iter.bi_sector);
+ bio_inc_remaining(bio);
submit_bio_noacct(bio);
out:
/*
@@ -1725,7 +1802,7 @@ static void dm_submit_bio(struct bio *bio)
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
int srcu_idx;
struct dm_table *map;
- unsigned bio_opf = bio->bi_opf;
+ blk_opf_t bio_opf = bio->bi_opf;
map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
@@ -1899,7 +1976,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
del_gendisk(md->disk);
}
dm_queue_destroy_crypto_profile(md->queue);
- blk_cleanup_disk(md->disk);
+ put_disk(md->disk);
}
if (md->pending_io) {
@@ -1974,9 +2051,11 @@ static struct mapped_device *alloc_dev(int minor)
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
+ INIT_WORK(&md->requeue_work, dm_wq_requeue_work);
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
+ md->requeue_list = NULL;
md->swap_bios = get_swap_bios();
sema_init(&md->swap_bios_semaphore, md->swap_bios);
mutex_init(&md->swap_bios_lock);
@@ -2983,54 +3062,6 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
-struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned per_io_data_size, unsigned min_pool_size,
- bool integrity, bool poll)
-{
- struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
- unsigned int pool_size = 0;
- unsigned int front_pad, io_front_pad;
- int ret;
-
- if (!pools)
- return NULL;
-
- switch (type) {
- case DM_TYPE_BIO_BASED:
- case DM_TYPE_DAX_BIO_BASED:
- pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
- front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
- io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
- ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, poll ? BIOSET_PERCPU_CACHE : 0);
- if (ret)
- goto out;
- if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
- goto out;
- break;
- case DM_TYPE_REQUEST_BASED:
- pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
- front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
- /* per_io_data_size is used for blk-mq pdu at queue allocation */
- break;
- default:
- BUG();
- }
-
- ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
- if (ret)
- goto out;
-
- if (integrity && bioset_integrity_create(&pools->bs, pool_size))
- goto out;
-
- return pools;
-
-out:
- dm_free_md_mempools(pools);
-
- return NULL;
-}
-
void dm_free_md_mempools(struct dm_md_mempools *pools)
{
if (!pools)
@@ -3046,11 +3077,14 @@ struct dm_pr {
u64 old_key;
u64 new_key;
u32 flags;
+ bool abort;
bool fail_early;
+ int ret;
+ enum pr_type type;
};
static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
- void *data)
+ struct dm_pr *pr)
{
struct mapped_device *md = bdev->bd_disk->private_data;
struct dm_table *table;
@@ -3062,15 +3096,21 @@ static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
goto out;
/* We only support devices that have a single target */
- if (dm_table_get_num_targets(table) != 1)
+ if (table->num_targets != 1)
goto out;
ti = dm_table_get_target(table, 0);
+ if (dm_suspended_md(md)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
ret = -EINVAL;
if (!ti->type->iterate_devices)
goto out;
- ret = ti->type->iterate_devices(ti, fn, data);
+ ti->type->iterate_devices(ti, fn, pr);
+ ret = 0;
out:
dm_put_live_table(md, srcu_idx);
return ret;
@@ -3084,10 +3124,24 @@ static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
{
struct dm_pr *pr = data;
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+ int ret;
- if (!ops || !ops->pr_register)
- return -EOPNOTSUPP;
- return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
+ if (!ops || !ops->pr_register) {
+ pr->ret = -EOPNOTSUPP;
+ return -1;
+ }
+
+ ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
+ if (!ret)
+ return 0;
+
+ if (!pr->ret)
+ pr->ret = ret;
+
+ if (pr->fail_early)
+ return -1;
+
+ return 0;
}
static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
@@ -3098,82 +3152,145 @@ static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
.new_key = new_key,
.flags = flags,
.fail_early = true,
+ .ret = 0,
};
int ret;
ret = dm_call_pr(bdev, __dm_pr_register, &pr);
- if (ret && new_key) {
- /* unregister all paths if we failed to register any path */
- pr.old_key = new_key;
- pr.new_key = 0;
- pr.flags = 0;
- pr.fail_early = false;
- dm_call_pr(bdev, __dm_pr_register, &pr);
+ if (ret) {
+ /* Didn't even get to register a path */
+ return ret;
}
+ if (!pr.ret)
+ return 0;
+ ret = pr.ret;
+
+ if (!new_key)
+ return ret;
+
+ /* unregister all paths if we failed to register any path */
+ pr.old_key = new_key;
+ pr.new_key = 0;
+ pr.flags = 0;
+ pr.fail_early = false;
+ (void) dm_call_pr(bdev, __dm_pr_register, &pr);
return ret;
}
+
+static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_pr *pr = data;
+ const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+ if (!ops || !ops->pr_reserve) {
+ pr->ret = -EOPNOTSUPP;
+ return -1;
+ }
+
+ pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags);
+ if (!pr->ret)
+ return -1;
+
+ return 0;
+}
+
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
u32 flags)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
- const struct pr_ops *ops;
- int r, srcu_idx;
+ struct dm_pr pr = {
+ .old_key = key,
+ .flags = flags,
+ .type = type,
+ .fail_early = false,
+ .ret = 0,
+ };
+ int ret;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
- if (r < 0)
- goto out;
+ ret = dm_call_pr(bdev, __dm_pr_reserve, &pr);
+ if (ret)
+ return ret;
- ops = bdev->bd_disk->fops->pr_ops;
- if (ops && ops->pr_reserve)
- r = ops->pr_reserve(bdev, key, type, flags);
- else
- r = -EOPNOTSUPP;
-out:
- dm_unprepare_ioctl(md, srcu_idx);
- return r;
+ return pr.ret;
+}
+
+/*
+ * If there is a non-All Registrants type of reservation, the release must be
+ * sent down the holding path. For the cases where there is no reservation or
+ * the path is not the holder the device will also return success, so we must
+ * try each path to make sure we got the correct path.
+ */
+static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_pr *pr = data;
+ const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+ if (!ops || !ops->pr_release) {
+ pr->ret = -EOPNOTSUPP;
+ return -1;
+ }
+
+ pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type);
+ if (pr->ret)
+ return -1;
+
+ return 0;
}
static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
- const struct pr_ops *ops;
- int r, srcu_idx;
+ struct dm_pr pr = {
+ .old_key = key,
+ .type = type,
+ .fail_early = false,
+ };
+ int ret;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
- if (r < 0)
- goto out;
+ ret = dm_call_pr(bdev, __dm_pr_release, &pr);
+ if (ret)
+ return ret;
- ops = bdev->bd_disk->fops->pr_ops;
- if (ops && ops->pr_release)
- r = ops->pr_release(bdev, key, type);
- else
- r = -EOPNOTSUPP;
-out:
- dm_unprepare_ioctl(md, srcu_idx);
- return r;
+ return pr.ret;
+}
+
+static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_pr *pr = data;
+ const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+ if (!ops || !ops->pr_preempt) {
+ pr->ret = -EOPNOTSUPP;
+ return -1;
+ }
+
+ pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type,
+ pr->abort);
+ if (!pr->ret)
+ return -1;
+
+ return 0;
}
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
- const struct pr_ops *ops;
- int r, srcu_idx;
+ struct dm_pr pr = {
+ .new_key = new_key,
+ .old_key = old_key,
+ .type = type,
+ .fail_early = false,
+ };
+ int ret;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
- if (r < 0)
- goto out;
+ ret = dm_call_pr(bdev, __dm_pr_preempt, &pr);
+ if (ret)
+ return ret;
- ops = bdev->bd_disk->fops->pr_ops;
- if (ops && ops->pr_preempt)
- r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
- else
- r = -EOPNOTSUPP;
-out:
- dm_unprepare_ioctl(md, srcu_idx);
- return r;
+ return pr.ret;
}
static int dm_pr_clear(struct block_device *bdev, u64 key)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a8405ce305a9..5201df03ce40 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -53,7 +53,6 @@ struct dm_io;
*---------------------------------------------------------------*/
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
-struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
bool dm_table_has_no_data_devices(struct dm_table *table);
int dm_calculate_queue_limits(struct dm_table *table,
@@ -218,9 +217,6 @@ void dm_kcopyd_exit(void);
/*
* Mempool operations
*/
-struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned per_io_data_size, unsigned min_pool_size,
- bool integrity, bool poll);
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index 2cf973722f59..91836e6de326 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -125,7 +125,6 @@ static void __init md_setup_drive(struct md_setup_args *args)
char *devname = args->device_names;
dev_t devices[MD_SB_DISKS + 1], mdev;
struct mdu_array_info_s ainfo = { };
- struct block_device *bdev;
struct mddev *mddev;
int err = 0, i;
char name[16];
@@ -169,24 +168,16 @@ static void __init md_setup_drive(struct md_setup_args *args)
pr_info("md: Loading %s: %s\n", name, args->device_names);
- bdev = blkdev_get_by_dev(mdev, FMODE_READ, NULL);
- if (IS_ERR(bdev)) {
- pr_err("md: open failed - cannot start array %s\n", name);
+ mddev = md_alloc(mdev, name);
+ if (IS_ERR(mddev)) {
+ pr_err("md: md_alloc failed - cannot start array %s\n", name);
return;
}
- err = -EIO;
- if (WARN(bdev->bd_disk->fops != &md_fops,
- "Opening block device %x resulted in non-md device\n",
- mdev))
- goto out_blkdev_put;
-
- mddev = bdev->bd_disk->private_data;
-
err = mddev_lock(mddev);
if (err) {
pr_err("md: failed to lock array %s\n", name);
- goto out_blkdev_put;
+ goto out_mddev_put;
}
if (!list_empty(&mddev->disks) || mddev->raid_disks) {
@@ -230,8 +221,8 @@ static void __init md_setup_drive(struct md_setup_args *args)
pr_warn("md: starting %s failed\n", name);
out_unlock:
mddev_unlock(mddev);
-out_blkdev_put:
- blkdev_put(bdev, FMODE_READ);
+out_mddev_put:
+ mddev_put(mddev);
}
static int __init raid_setup(char *str)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index d87f674ab762..bf6dffadbe6f 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -165,7 +165,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
if (sync_page_io(rdev, target,
roundup(size, bdev_logical_block_size(rdev->bdev)),
- page, REQ_OP_READ, 0, true)) {
+ page, REQ_OP_READ, true)) {
page->index = index;
return 0;
}
@@ -302,7 +302,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
+ submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
bh = bh->b_this_page;
}
@@ -394,7 +394,7 @@ static int read_page(struct file *file, unsigned long index,
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(REQ_OP_READ, 0, bh);
+ submit_bh(REQ_OP_READ, bh);
}
blk_cur++;
bh = bh->b_this_page;
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 37cbcce3cc66..742b2349fea3 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -40,7 +40,7 @@ struct resync_info {
/* Lock the send communication. This is done through
* bit manipulation as opposed to a mutex in order to
- * accomodate lock and hold. See next comment.
+ * accommodate lock and hold. See next comment.
*/
#define MD_CLUSTER_SEND_LOCK 4
/* If cluster operations (such as adding a disk) must lock the
@@ -689,7 +689,7 @@ static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
/*
* If resync thread run after raid1d thread, then process_metadata_update
* could not continue if raid1d held reconfig_mutex (and raid1d is blocked
- * since another node already got EX on Token and waitting the EX of Ack),
+ * since another node already got EX on Token and waiting the EX of Ack),
* so let resync wake up thread in case flag is set.
*/
if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c7ecb0bffda0..afaf36b2f6ab 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -368,28 +368,6 @@ EXPORT_SYMBOL_GPL(md_new_event);
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
-/*
- * iterates through all used mddevs in the system.
- * We take care to grab the all_mddevs_lock whenever navigating
- * the list, and to always hold a refcount when unlocked.
- * Any code which breaks out of this loop while own
- * a reference to the current mddev and must mddev_put it.
- */
-#define for_each_mddev(_mddev,_tmp) \
- \
- for (({ spin_lock(&all_mddevs_lock); \
- _tmp = all_mddevs.next; \
- _mddev = NULL;}); \
- ({ if (_tmp != &all_mddevs) \
- mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
- spin_unlock(&all_mddevs_lock); \
- if (_mddev) mddev_put(_mddev); \
- _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
- _tmp != &all_mddevs;}); \
- ({ spin_lock(&all_mddevs_lock); \
- _tmp = _tmp->next;}) \
- )
-
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
@@ -464,7 +442,7 @@ static void md_submit_bio(struct bio *bio)
return;
}
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
if (bio_sectors(bio) != 0)
@@ -647,13 +625,17 @@ EXPORT_SYMBOL(md_flush_request);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
+ lockdep_assert_held(&all_mddevs_lock);
+
+ if (test_bit(MD_DELETED, &mddev->flags))
+ return NULL;
atomic_inc(&mddev->active);
return mddev;
}
static void mddev_delayed_delete(struct work_struct *ws);
-static void mddev_put(struct mddev *mddev)
+void mddev_put(struct mddev *mddev)
{
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
@@ -661,7 +643,7 @@ static void mddev_put(struct mddev *mddev)
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del_init(&mddev->all_mddevs);
+ set_bit(MD_DELETED, &mddev->flags);
/*
* Call queue_work inside the spinlock so that
@@ -678,7 +660,6 @@ static void md_safemode_timeout(struct timer_list *t);
void mddev_init(struct mddev *mddev)
{
- kobject_init(&mddev->kobj, &md_ktype);
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
@@ -733,22 +714,6 @@ static dev_t mddev_alloc_unit(void)
return dev;
}
-static struct mddev *mddev_find(dev_t unit)
-{
- struct mddev *mddev;
-
- if (MAJOR(unit) != MD_MAJOR)
- unit &= ~((1 << MdpMinorShift) - 1);
-
- spin_lock(&all_mddevs_lock);
- mddev = mddev_find_locked(unit);
- if (mddev)
- mddev_get(mddev);
- spin_unlock(&all_mddevs_lock);
-
- return mddev;
-}
-
static struct mddev *mddev_alloc(dev_t unit)
{
struct mddev *new;
@@ -791,6 +756,15 @@ out_free_new:
return ERR_PTR(error);
}
+static void mddev_free(struct mddev *mddev)
+{
+ spin_lock(&all_mddevs_lock);
+ list_del(&mddev->all_mddevs);
+ spin_unlock(&all_mddevs_lock);
+
+ kfree(mddev);
+}
+
static const struct attribute_group md_redundancy_group;
void mddev_unlock(struct mddev *mddev)
@@ -993,15 +967,15 @@ int md_super_wait(struct mddev *mddev)
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
- struct page *page, int op, int op_flags, bool metadata_op)
+ struct page *page, blk_opf_t opf, bool metadata_op)
{
struct bio bio;
struct bio_vec bvec;
if (metadata_op && rdev->meta_bdev)
- bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags);
+ bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
else
- bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags);
+ bio_init(&bio, rdev->bdev, &bvec, 1, opf);
if (metadata_op)
bio.bi_iter.bi_sector = sector + rdev->sb_start;
@@ -1024,7 +998,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
if (rdev->sb_loaded)
return 0;
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
@@ -1722,7 +1696,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
bb_sector = (long long)offset;
if (!sync_page_io(rdev, bb_sector, sectors << 9,
- rdev->bb_page, REQ_OP_READ, 0, true))
+ rdev->bb_page, REQ_OP_READ, true))
return -EIO;
bbp = (__le64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
@@ -2438,7 +2412,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
mdname(mddev), mddev->max_disks);
return -EBUSY;
}
- bdevname(rdev->bdev,b);
+ snprintf(b, sizeof(b), "%pg", rdev->bdev);
strreplace(b, '/', '!');
rdev->mddev = mddev;
@@ -3335,14 +3309,35 @@ rdev_size_show(struct md_rdev *rdev, char *page)
return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
}
-static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
+static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b)
{
/* check if two start/length pairs overlap */
- if (s1+l1 <= s2)
- return 0;
- if (s2+l2 <= s1)
- return 0;
- return 1;
+ if (a->data_offset + a->sectors <= b->data_offset)
+ return false;
+ if (b->data_offset + b->sectors <= a->data_offset)
+ return false;
+ return true;
+}
+
+static bool md_rdev_overlaps(struct md_rdev *rdev)
+{
+ struct mddev *mddev;
+ struct md_rdev *rdev2;
+
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
+ if (test_bit(MD_DELETED, &mddev->flags))
+ continue;
+ rdev_for_each(rdev2, mddev) {
+ if (rdev != rdev2 && rdev->bdev == rdev2->bdev &&
+ md_rdevs_overlap(rdev, rdev2)) {
+ spin_unlock(&all_mddevs_lock);
+ return true;
+ }
+ }
+ }
+ spin_unlock(&all_mddevs_lock);
+ return false;
}
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
@@ -3394,46 +3389,21 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
return -EINVAL; /* component must fit device */
rdev->sectors = sectors;
- if (sectors > oldsectors && my_mddev->external) {
- /* Need to check that all other rdevs with the same
- * ->bdev do not overlap. 'rcu' is sufficient to walk
- * the rdev lists safely.
- * This check does not provide a hard guarantee, it
- * just helps avoid dangerous mistakes.
- */
- struct mddev *mddev;
- int overlap = 0;
- struct list_head *tmp;
-
- rcu_read_lock();
- for_each_mddev(mddev, tmp) {
- struct md_rdev *rdev2;
- rdev_for_each(rdev2, mddev)
- if (rdev->bdev == rdev2->bdev &&
- rdev != rdev2 &&
- overlaps(rdev->data_offset, rdev->sectors,
- rdev2->data_offset,
- rdev2->sectors)) {
- overlap = 1;
- break;
- }
- if (overlap) {
- mddev_put(mddev);
- break;
- }
- }
- rcu_read_unlock();
- if (overlap) {
- /* Someone else could have slipped in a size
- * change here, but doing so is just silly.
- * We put oldsectors back because we *know* it is
- * safe, and trust userspace not to race with
- * itself
- */
- rdev->sectors = oldsectors;
- return -EBUSY;
- }
+ /*
+ * Check that all other rdevs with the same bdev do not overlap. This
+ * check does not provide a hard guarantee, it just helps avoid
+ * dangerous mistakes.
+ */
+ if (sectors > oldsectors && my_mddev->external &&
+ md_rdev_overlaps(rdev)) {
+ /*
+ * Someone else could have slipped in a size change here, but
+ * doing so is just silly. We put oldsectors back because we
+ * know it is safe, and trust userspace not to race with itself.
+ */
+ rdev->sectors = oldsectors;
+ return -EBUSY;
}
return len;
}
@@ -4830,6 +4800,19 @@ action_store(struct mddev *mddev, const char *page, size_t len)
if (work_pending(&mddev->del_work))
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
+ sector_t save_rp = mddev->reshape_position;
+
+ mddev_unlock(mddev);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
+ mddev_lock_nointr(mddev);
+ /*
+ * set RECOVERY_INTR again and restore reshape
+ * position in case others changed them after
+ * got lock, eg, reshape_position_store and
+ * md_check_recovery.
+ */
+ mddev->reshape_position = save_rp;
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
@@ -5001,7 +4984,7 @@ static ssize_t
sync_speed_show(struct mddev *mddev, char *page)
{
unsigned long resync, dt, db;
- if (mddev->curr_resync == 0)
+ if (mddev->curr_resync == MD_RESYNC_NONE)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ;
@@ -5020,8 +5003,8 @@ sync_completed_show(struct mddev *mddev, char *page)
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
- if (mddev->curr_resync == 1 ||
- mddev->curr_resync == 2)
+ if (mddev->curr_resync == MD_RESYNC_YIELDED ||
+ mddev->curr_resync == MD_RESYNC_DELAYED)
return sprintf(page, "delayed\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -5532,11 +5515,10 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
spin_lock(&all_mddevs_lock);
- if (list_empty(&mddev->all_mddevs)) {
+ if (!mddev_get(mddev)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
- mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->show(mddev, page);
@@ -5557,18 +5539,17 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock(&all_mddevs_lock);
- if (list_empty(&mddev->all_mddevs)) {
+ if (!mddev_get(mddev)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
- mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->store(mddev, page, length);
mddev_put(mddev);
return rv;
}
-static void md_free(struct kobject *ko)
+static void md_kobj_release(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
@@ -5577,15 +5558,8 @@ static void md_free(struct kobject *ko)
if (mddev->sysfs_level)
sysfs_put(mddev->sysfs_level);
- if (mddev->gendisk) {
- del_gendisk(mddev->gendisk);
- blk_cleanup_disk(mddev->gendisk);
- }
- percpu_ref_exit(&mddev->writes_pending);
-
- bioset_exit(&mddev->bio_set);
- bioset_exit(&mddev->sync_set);
- kfree(mddev);
+ del_gendisk(mddev->gendisk);
+ put_disk(mddev->gendisk);
}
static const struct sysfs_ops md_sysfs_ops = {
@@ -5593,7 +5567,7 @@ static const struct sysfs_ops md_sysfs_ops = {
.store = md_attr_store,
};
static struct kobj_type md_ktype = {
- .release = md_free,
+ .release = md_kobj_release,
.sysfs_ops = &md_sysfs_ops,
.default_groups = md_attr_groups,
};
@@ -5604,7 +5578,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
- kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
@@ -5623,7 +5596,7 @@ int mddev_init_writes_pending(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
-static int md_alloc(dev_t dev, char *name)
+struct mddev *md_alloc(dev_t dev, char *name)
{
/*
* If dev is zero, name is the name of a device to allocate with
@@ -5651,8 +5624,8 @@ static int md_alloc(dev_t dev, char *name)
mutex_lock(&disks_mutex);
mddev = mddev_alloc(dev);
if (IS_ERR(mddev)) {
- mutex_unlock(&disks_mutex);
- return PTR_ERR(mddev);
+ error = PTR_ERR(mddev);
+ goto out_unlock;
}
partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
@@ -5670,7 +5643,7 @@ static int md_alloc(dev_t dev, char *name)
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
error = -EEXIST;
- goto out_unlock_disks_mutex;
+ goto out_free_mddev;
}
spin_unlock(&all_mddevs_lock);
}
@@ -5683,7 +5656,7 @@ static int md_alloc(dev_t dev, char *name)
error = -ENOMEM;
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- goto out_unlock_disks_mutex;
+ goto out_free_mddev;
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -5704,25 +5677,45 @@ static int md_alloc(dev_t dev, char *name)
mddev->gendisk = disk;
error = add_disk(disk);
if (error)
- goto out_cleanup_disk;
+ goto out_put_disk;
+ kobject_init(&mddev->kobj, &md_ktype);
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
- if (error)
- goto out_del_gendisk;
+ if (error) {
+ /*
+ * The disk is already live at this point. Clear the hold flag
+ * and let mddev_put take care of the deletion, as it isn't any
+ * different from a normal close on last release now.
+ */
+ mddev->hold_active = 0;
+ mutex_unlock(&disks_mutex);
+ mddev_put(mddev);
+ return ERR_PTR(error);
+ }
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
- goto out_unlock_disks_mutex;
+ mutex_unlock(&disks_mutex);
+ return mddev;
-out_del_gendisk:
- del_gendisk(disk);
-out_cleanup_disk:
- blk_cleanup_disk(disk);
-out_unlock_disks_mutex:
+out_put_disk:
+ put_disk(disk);
+out_free_mddev:
+ mddev_free(mddev);
+out_unlock:
mutex_unlock(&disks_mutex);
+ return ERR_PTR(error);
+}
+
+static int md_alloc_and_put(dev_t dev, char *name)
+{
+ struct mddev *mddev = md_alloc(dev, name);
+
+ if (IS_ERR(mddev))
+ return PTR_ERR(mddev);
mddev_put(mddev);
- return error;
+ return 0;
}
static void md_probe(dev_t dev)
@@ -5730,7 +5723,7 @@ static void md_probe(dev_t dev)
if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
return;
if (create_on_open)
- md_alloc(dev, NULL);
+ md_alloc_and_put(dev, NULL);
}
static int add_named_array(const char *val, const struct kernel_param *kp)
@@ -5752,12 +5745,12 @@ static int add_named_array(const char *val, const struct kernel_param *kp)
return -E2BIG;
strscpy(buf, val, len+1);
if (strncmp(buf, "md_", 3) == 0)
- return md_alloc(0, buf);
+ return md_alloc_and_put(0, buf);
if (strncmp(buf, "md", 2) == 0 &&
isdigit(buf[2]) &&
kstrtoul(buf+2, 10, &devnum) == 0 &&
devnum <= MINORMASK)
- return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
+ return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL);
return -EINVAL;
}
@@ -6197,6 +6190,7 @@ static void __md_stop_writes(struct mddev *mddev)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
}
@@ -6244,11 +6238,11 @@ static void mddev_detach(struct mddev *mddev)
static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
- md_bitmap_destroy(mddev);
mddev_detach(mddev);
/* Ensure ->event_work is done */
if (mddev->event_work.func)
flush_workqueue(md_misc_wq);
+ md_bitmap_destroy(mddev);
spin_lock(&mddev->lock);
mddev->pers = NULL;
spin_unlock(&mddev->lock);
@@ -6497,9 +6491,8 @@ static void autorun_devices(int part)
break;
}
- md_probe(dev);
- mddev = mddev_find(dev);
- if (!mddev)
+ mddev = md_alloc(dev, NULL);
+ if (IS_ERR(mddev))
break;
if (mddev_lock(mddev))
@@ -7782,45 +7775,33 @@ out_unlock:
static int md_open(struct block_device *bdev, fmode_t mode)
{
- /*
- * Succeed if we can lock the mddev, which confirms that
- * it isn't being stopped right now.
- */
- struct mddev *mddev = mddev_find(bdev->bd_dev);
+ struct mddev *mddev;
int err;
+ spin_lock(&all_mddevs_lock);
+ mddev = mddev_get(bdev->bd_disk->private_data);
+ spin_unlock(&all_mddevs_lock);
if (!mddev)
return -ENODEV;
- if (mddev->gendisk != bdev->bd_disk) {
- /* we are racing with mddev_put which is discarding this
- * bd_disk.
- */
- mddev_put(mddev);
- /* Wait until bdev->bd_disk is definitely gone */
- if (work_pending(&mddev->del_work))
- flush_workqueue(md_misc_wq);
- return -EBUSY;
- }
- BUG_ON(mddev != bdev->bd_disk->private_data);
-
- if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
+ err = mutex_lock_interruptible(&mddev->open_mutex);
+ if (err)
goto out;
- if (test_bit(MD_CLOSING, &mddev->flags)) {
- mutex_unlock(&mddev->open_mutex);
- err = -ENODEV;
- goto out;
- }
+ err = -ENODEV;
+ if (test_bit(MD_CLOSING, &mddev->flags))
+ goto out_unlock;
- err = 0;
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
bdev_check_media_change(bdev);
- out:
- if (err)
- mddev_put(mddev);
+ return 0;
+
+out_unlock:
+ mutex_unlock(&mddev->open_mutex);
+out:
+ mddev_put(mddev);
return err;
}
@@ -7844,6 +7825,17 @@ static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
return ret;
}
+static void md_free_disk(struct gendisk *disk)
+{
+ struct mddev *mddev = disk->private_data;
+
+ percpu_ref_exit(&mddev->writes_pending);
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
+
+ mddev_free(mddev);
+}
+
const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -7857,6 +7849,7 @@ const struct block_device_operations md_fops =
.getgeo = md_getgeo,
.check_events = md_check_events,
.set_read_only = md_set_read_only,
+ .free_disk = md_free_disk,
};
static int md_thread(void *arg)
@@ -8018,16 +8011,26 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync;
- if (resync <= 3) {
+ if (resync < MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
/* Still cleaning up */
resync = max_sectors;
- } else if (resync > max_sectors)
+ } else if (resync > max_sectors) {
resync = max_sectors;
- else
+ } else {
resync -= atomic_read(&mddev->recovery_active);
+ if (resync < MD_RESYNC_ACTIVE) {
+ /*
+ * Resync has started, but the subtraction has
+ * yielded one of the special values. Force it
+ * to active to ensure the status reports an
+ * active resync.
+ */
+ resync = MD_RESYNC_ACTIVE;
+ }
+ }
- if (resync == 0) {
+ if (resync == MD_RESYNC_NONE) {
if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
struct md_rdev *rdev;
@@ -8051,7 +8054,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
}
return 0;
}
- if (resync < 3) {
+ if (resync < MD_RESYNC_ACTIVE) {
seq_printf(seq, "\tresync=DELAYED");
return 1;
}
@@ -8152,6 +8155,8 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
mddev_get(mddev);
+ if (!mddev_get(mddev))
+ continue;
spin_unlock(&all_mddevs_lock);
return mddev;
}
@@ -8165,25 +8170,35 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
+ struct mddev *to_put = NULL;
++*pos;
if (v == (void*)2)
return NULL;
spin_lock(&all_mddevs_lock);
- if (v == (void*)1)
+ if (v == (void*)1) {
tmp = all_mddevs.next;
- else
+ } else {
+ to_put = mddev;
+ tmp = mddev->all_mddevs.next;
+ }
+
+ for (;;) {
+ if (tmp == &all_mddevs) {
+ next_mddev = (void*)2;
+ *pos = 0x10000;
+ break;
+ }
+ next_mddev = list_entry(tmp, struct mddev, all_mddevs);
+ if (mddev_get(next_mddev))
+ break;
+ mddev = next_mddev;
tmp = mddev->all_mddevs.next;
- if (tmp != &all_mddevs)
- next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
- else {
- next_mddev = (void*)2;
- *pos = 0x10000;
}
spin_unlock(&all_mddevs_lock);
- if (v != (void*)1)
+ if (to_put)
mddev_put(mddev);
return next_mddev;
@@ -8682,7 +8697,6 @@ void md_do_sync(struct md_thread *thread)
unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
- struct list_head *tmp;
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
@@ -8729,13 +8743,7 @@ void md_do_sync(struct md_thread *thread)
mddev->last_sync_action = action ?: desc;
- /* we overload curr_resync somewhat here.
- * 0 == not engaged in resync at all
- * 2 == checking that there is no conflict with another sync
- * 1 == like 2, but have yielded to allow conflicting resync to
- * commence
- * other == active in resync - this many blocks
- *
+ /*
* Before starting a resync we must have set curr_resync to
* 2, and then checked that every "conflicting" array has curr_resync
* less than ours. When we find one that is the same or higher
@@ -8747,24 +8755,29 @@ void md_do_sync(struct md_thread *thread)
do {
int mddev2_minor = -1;
- mddev->curr_resync = 2;
+ mddev->curr_resync = MD_RESYNC_DELAYED;
try_again:
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
- for_each_mddev(mddev2, tmp) {
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry(mddev2, &all_mddevs, all_mddevs) {
+ if (test_bit(MD_DELETED, &mddev2->flags))
+ continue;
if (mddev2 == mddev)
continue;
if (!mddev->parallel_resync
&& mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq);
- if (mddev < mddev2 && mddev->curr_resync == 2) {
+ if (mddev < mddev2 &&
+ mddev->curr_resync == MD_RESYNC_DELAYED) {
/* arbitrarily yield */
- mddev->curr_resync = 1;
+ mddev->curr_resync = MD_RESYNC_YIELDED;
wake_up(&resync_wait);
}
- if (mddev > mddev2 && mddev->curr_resync == 1)
+ if (mddev > mddev2 &&
+ mddev->curr_resync == MD_RESYNC_YIELDED)
/* no need to wait here, we can wait the next
* time 'round when curr_resync == 2
*/
@@ -8782,7 +8795,8 @@ void md_do_sync(struct md_thread *thread)
desc, mdname(mddev),
mdname(mddev2));
}
- mddev_put(mddev2);
+ spin_unlock(&all_mddevs_lock);
+
if (signal_pending(current))
flush_signals(current);
schedule();
@@ -8792,7 +8806,8 @@ void md_do_sync(struct md_thread *thread)
finish_wait(&resync_wait, &wq);
}
}
- } while (mddev->curr_resync < 2);
+ spin_unlock(&all_mddevs_lock);
+ } while (mddev->curr_resync < MD_RESYNC_DELAYED);
j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -8876,7 +8891,7 @@ void md_do_sync(struct md_thread *thread)
desc, mdname(mddev));
mddev->curr_resync = j;
} else
- mddev->curr_resync = 3; /* no longer delayed */
+ mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
md_new_event();
@@ -9011,14 +9026,14 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->curr_resync > 3) {
+ mddev->curr_resync >= MD_RESYNC_ACTIVE) {
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > 3) {
+ mddev->curr_resync >= MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
@@ -9082,7 +9097,7 @@ void md_do_sync(struct md_thread *thread)
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
- mddev->curr_resync = 0;
+ mddev->curr_resync = MD_RESYNC_NONE;
spin_unlock(&mddev->lock);
wake_up(&resync_wait);
@@ -9303,6 +9318,7 @@ void md_check_recovery(struct mddev *mddev)
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -9338,6 +9354,7 @@ void md_check_recovery(struct mddev *mddev)
goto unlock;
}
if (mddev->sync_thread) {
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
goto unlock;
}
@@ -9417,8 +9434,7 @@ void md_reap_sync_thread(struct mddev *mddev)
sector_t old_dev_sectors = mddev->dev_sectors;
bool is_reshaped = false;
- /* resync has finished, collect result */
- md_unregister_thread(&mddev->sync_thread);
+ /* sync_thread should be unregistered, collect result */
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) {
@@ -9466,6 +9482,7 @@ void md_reap_sync_thread(struct mddev *mddev)
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event();
if (mddev->event_work.func)
@@ -9544,11 +9561,14 @@ EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
- struct list_head *tmp;
- struct mddev *mddev;
+ struct mddev *mddev, *n;
int need_delay = 0;
- for_each_mddev(mddev, tmp) {
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ if (!mddev_get(mddev))
+ continue;
+ spin_unlock(&all_mddevs_lock);
if (mddev_trylock(mddev)) {
if (mddev->pers)
__md_stop_writes(mddev);
@@ -9557,7 +9577,11 @@ static int md_notify_reboot(struct notifier_block *this,
mddev_unlock(mddev);
}
need_delay = 1;
+ mddev_put(mddev);
+ spin_lock(&all_mddevs_lock);
}
+ spin_unlock(&all_mddevs_lock);
+
/*
* certain more exotic SCSI devices are known to be
* volatile wrt too early system reboots. While the
@@ -9876,8 +9900,7 @@ void md_autostart_arrays(int part)
static __exit void md_exit(void)
{
- struct mddev *mddev;
- struct list_head *tmp;
+ struct mddev *mddev, *n;
int delay = 1;
unregister_blkdev(MD_MAJOR,"md");
@@ -9897,17 +9920,24 @@ static __exit void md_exit(void)
}
remove_proc_entry("mdstat", NULL);
- for_each_mddev(mddev, tmp) {
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ if (!mddev_get(mddev))
+ continue;
+ spin_unlock(&all_mddevs_lock);
export_array(mddev);
mddev->ctime = 0;
mddev->hold_active = 0;
/*
- * for_each_mddev() will call mddev_put() at the end of each
- * iteration. As the mddev is now fully clear, this will
- * schedule the mddev for destruction by a workqueue, and the
+ * As the mddev is now fully clear, mddev_put will schedule
+ * the mddev for destruction by a workqueue, and the
* destroy_workqueue() below will wait for that to complete.
*/
+ mddev_put(mddev);
+ spin_lock(&all_mddevs_lock);
}
+ spin_unlock(&all_mddevs_lock);
+
destroy_workqueue(md_rdev_misc_wq);
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index cf2cbb17acbd..b4e2d8b87b61 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -254,6 +254,7 @@ struct md_cluster_info;
* @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that
* array is ready yet.
* @MD_BROKEN: This is used to stop writes and mark array as failed.
+ * @MD_DELETED: This device is being deleted
*
* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added
*/
@@ -270,6 +271,7 @@ enum mddev_flags {
MD_UPDATING_SB,
MD_NOT_READY,
MD_BROKEN,
+ MD_DELETED,
};
enum mddev_sb_flags {
@@ -288,6 +290,21 @@ struct serial_info {
sector_t _subtree_last; /* highest sector in subtree of rb node */
};
+/*
+ * mddev->curr_resync stores the current sector of the resync but
+ * also has some overloaded values.
+ */
+enum {
+ /* No resync in progress */
+ MD_RESYNC_NONE = 0,
+ /* Yielded to allow another conflicting resync to commence */
+ MD_RESYNC_YIELDED = 1,
+ /* Delayed to check that there is no conflict with another sync */
+ MD_RESYNC_DELAYED = 2,
+ /* Any value greater than or equal to this is in an active resync */
+ MD_RESYNC_ACTIVE = 3,
+};
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -738,8 +755,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
- struct page *page, int op, int op_flags,
- bool metadata_op);
+ struct page *page, blk_opf_t opf, bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(void);
extern void md_allow_write(struct mddev *mddev);
@@ -751,6 +767,8 @@ extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
+struct mddev *md_alloc(dev_t dev, char *name);
+void mddev_put(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
extern int md_start(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 54c089a50b15..11935864f50f 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -391,7 +391,8 @@ struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
sizeof(struct buffer_aux),
dm_block_manager_alloc_callback,
- dm_block_manager_write_callback);
+ dm_block_manager_write_callback,
+ 0);
if (IS_ERR(bm->bufio)) {
r = PTR_ERR(bm->bufio);
kfree(bm);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 258d4eb2d63c..05d8438cfec8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1220,8 +1220,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct raid1_info *mirror;
struct bio *read_bio;
struct bitmap *bitmap = mddev->bitmap;
- const int op = bio_op(bio);
- const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const enum req_op op = bio_op(bio);
+ const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
int rdisk;
bool r1bio_existed = !!r1_bio;
@@ -1240,7 +1240,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
rcu_read_lock();
rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
if (rdev)
- bdevname(rdev->bdev, b);
+ snprintf(b, sizeof(b), "%pg", rdev->bdev);
else
strcpy(b, "???");
rcu_read_unlock();
@@ -1988,9 +1988,9 @@ static void end_sync_write(struct bio *bio)
}
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
- int sectors, struct page *page, int rw)
+ int sectors, struct page *page, int rw)
{
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
if (rw == WRITE) {
@@ -2057,7 +2057,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev, sect, s<<9,
pages[idx],
- REQ_OP_READ, 0, false)) {
+ REQ_OP_READ, false)) {
success = 1;
break;
}
@@ -2305,7 +2305,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
if (sync_page_io(rdev, sect, s<<9,
- conf->tmppage, REQ_OP_READ, 0, false))
+ conf->tmppage, REQ_OP_READ, false))
success = 1;
rdev_dec_pending(rdev, mddev);
if (success)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d589f823feb1..9117fcdee1be 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1136,8 +1136,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
{
struct r10conf *conf = mddev->private;
struct bio *read_bio;
- const int op = bio_op(bio);
- const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const enum req_op op = bio_op(bio);
+ const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
struct md_rdev *rdev;
char b[BDEVNAME_SIZE];
@@ -1164,7 +1164,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
disk = r10_bio->devs[slot].devnum;
err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (err_rdev)
- bdevname(err_rdev->bdev, b);
+ snprintf(b, sizeof(b), "%pg", err_rdev->bdev);
else {
strcpy(b, "???");
/* This never gets dereferenced */
@@ -1230,9 +1230,9 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
struct bio *bio, bool replacement,
int n_copy)
{
- const int op = bio_op(bio);
- const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
- const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
+ const enum req_op op = bio_op(bio);
+ const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+ const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
unsigned long flags;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@@ -2167,9 +2167,12 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
int err = 0;
int number = rdev->raid_disk;
struct md_rdev **rdevp;
- struct raid10_info *p = conf->mirrors + number;
+ struct raid10_info *p;
print_conf(conf);
+ if (unlikely(number >= mddev->raid_disks))
+ return 0;
+ p = conf->mirrors + number;
if (rdev == p->rdev)
rdevp = &p->rdev;
else if (rdev == p->replacement)
@@ -2512,7 +2515,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr,
s << 9,
pages[idx],
- REQ_OP_READ, 0, false);
+ REQ_OP_READ, false);
if (ok) {
rdev = conf->mirrors[dw].rdev;
addr = r10_bio->devs[1].addr + sect;
@@ -2520,7 +2523,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr,
s << 9,
pages[idx],
- REQ_OP_WRITE, 0, false);
+ REQ_OP_WRITE, false);
if (!ok) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement,
@@ -2644,7 +2647,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
&& (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
if (rw == WRITE) {
@@ -2726,7 +2729,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
sect,
s<<9,
conf->tmppage,
- REQ_OP_READ, 0, false);
+ REQ_OP_READ, false);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
if (success)
@@ -5107,7 +5110,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
addr,
s << 9,
pages[idx],
- REQ_OP_READ, 0, false);
+ REQ_OP_READ, false);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
if (success)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 83c184eddbda..f4e1cc1ece43 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1590,18 +1590,13 @@ void r5l_quiesce(struct r5l_log *log, int quiesce)
bool r5l_log_disk_error(struct r5conf *conf)
{
- struct r5l_log *log;
- bool ret;
- /* don't allow write if journal disk is missing */
- rcu_read_lock();
- log = rcu_dereference(conf->log);
+ struct r5l_log *log = conf->log;
+ /* don't allow write if journal disk is missing */
if (!log)
- ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+ return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
else
- ret = test_bit(Faulty, &log->rdev->flags);
- rcu_read_unlock();
- return ret;
+ return test_bit(Faulty, &log->rdev->flags);
}
#define R5L_RECOVERY_PAGE_POOL_SIZE 256
@@ -1788,7 +1783,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
mb = page_address(page);
mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
mb, PAGE_SIZE));
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
REQ_SYNC | REQ_FUA, false)) {
__free_page(page);
return -EIO;
@@ -1898,7 +1893,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
sync_page_io(rdev, sh->sector, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+ sh->dev[disk_index].page, REQ_OP_WRITE,
false);
rdev_dec_pending(rdev, rdev->mddev);
rcu_read_lock();
@@ -1908,7 +1903,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
atomic_inc(&rrdev->nr_pending);
rcu_read_unlock();
sync_page_io(rrdev, sh->sector, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+ sh->dev[disk_index].page, REQ_OP_WRITE,
false);
rdev_dec_pending(rrdev, rrdev->mddev);
rcu_read_lock();
@@ -2394,7 +2389,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
PAGE_SIZE));
kunmap_atomic(addr);
sync_page_io(log->rdev, write_pos, PAGE_SIZE,
- dev->page, REQ_OP_WRITE, 0, false);
+ dev->page, REQ_OP_WRITE, false);
write_pos = r5l_ring_add(log, write_pos,
BLOCK_SECTORS);
offset += sizeof(__le32) +
@@ -2406,7 +2401,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
mb, PAGE_SIZE));
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
- REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
+ REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
sh->log_start = ctx->pos;
list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
atomic_inc(&log->stripe_in_journal_count);
@@ -2534,12 +2529,13 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
struct r5conf *conf;
int ret;
- spin_lock(&mddev->lock);
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
conf = mddev->private;
- if (!conf || !conf->log) {
- spin_unlock(&mddev->lock);
- return 0;
- }
+ if (!conf || !conf->log)
+ goto out_unlock;
switch (conf->log->r5c_journal_mode) {
case R5C_JOURNAL_MODE_WRITE_THROUGH:
@@ -2557,7 +2553,9 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
default:
ret = 0;
}
- spin_unlock(&mddev->lock);
+
+out_unlock:
+ mddev_unlock(mddev);
return ret;
}
@@ -2639,7 +2637,7 @@ int r5c_try_caching_write(struct r5conf *conf,
int i;
struct r5dev *dev;
int to_cache = 0;
- void **pslot;
+ void __rcu **pslot;
sector_t tree_index;
int ret;
uintptr_t refcount;
@@ -2806,7 +2804,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
int i;
int do_wakeup = 0;
sector_t tree_index;
- void **pslot;
+ void __rcu **pslot;
uintptr_t refcount;
if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
@@ -2971,7 +2969,7 @@ static int r5l_load_log(struct r5l_log *log)
if (!page)
return -ENOMEM;
- if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
+ if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
ret = -EIO;
goto ioerr;
}
@@ -3145,7 +3143,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->stripe_in_journal_lock);
atomic_set(&log->stripe_in_journal_count, 0);
- rcu_assign_pointer(conf->log, log);
+ conf->log = log;
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
@@ -3167,13 +3165,13 @@ void r5l_exit_log(struct r5conf *conf)
{
struct r5l_log *log = conf->log;
- conf->log = NULL;
- synchronize_rcu();
-
/* Ensure disable_writeback_work wakes up and exits */
wake_up(&conf->mddev->sb_wait);
flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread);
+
+ conf->log = NULL;
+
mempool_exit(&log->meta_pool);
bioset_exit(&log->bs);
mempool_exit(&log->io_pool);
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 43c714a8798c..c8332502669e 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -2,49 +2,46 @@
#ifndef _RAID5_LOG_H
#define _RAID5_LOG_H
-extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
-extern void r5l_exit_log(struct r5conf *conf);
-extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
-extern void r5l_write_stripe_run(struct r5l_log *log);
-extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
-extern void r5l_stripe_write_finished(struct stripe_head *sh);
-extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
-extern void r5l_quiesce(struct r5l_log *log, int quiesce);
-extern bool r5l_log_disk_error(struct r5conf *conf);
-extern bool r5c_is_writeback(struct r5l_log *log);
-extern int
-r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
- struct stripe_head_state *s, int disks);
-extern void
-r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
- struct stripe_head_state *s);
-extern void r5c_release_extra_page(struct stripe_head *sh);
-extern void r5c_use_extra_page(struct stripe_head *sh);
-extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
-extern void r5c_handle_cached_data_endio(struct r5conf *conf,
- struct stripe_head *sh, int disks);
-extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh);
-extern void r5c_make_stripe_write_out(struct stripe_head *sh);
-extern void r5c_flush_cache(struct r5conf *conf, int num);
-extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
-extern void r5c_check_cached_full_stripe(struct r5conf *conf);
+int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
+void r5l_exit_log(struct r5conf *conf);
+int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
+void r5l_write_stripe_run(struct r5l_log *log);
+void r5l_flush_stripe_to_raid(struct r5l_log *log);
+void r5l_stripe_write_finished(struct stripe_head *sh);
+int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
+void r5l_quiesce(struct r5l_log *log, int quiesce);
+bool r5l_log_disk_error(struct r5conf *conf);
+bool r5c_is_writeback(struct r5l_log *log);
+int r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s, int disks);
+void r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s);
+void r5c_release_extra_page(struct stripe_head *sh);
+void r5c_use_extra_page(struct stripe_head *sh);
+void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks);
+int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh);
+void r5c_make_stripe_write_out(struct stripe_head *sh);
+void r5c_flush_cache(struct r5conf *conf, int num);
+void r5c_check_stripe_cache_usage(struct r5conf *conf);
+void r5c_check_cached_full_stripe(struct r5conf *conf);
extern struct md_sysfs_entry r5c_journal_mode;
-extern void r5c_update_on_rdev_error(struct mddev *mddev,
- struct md_rdev *rdev);
-extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
-extern int r5l_start(struct r5l_log *log);
+void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev);
+bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
+int r5l_start(struct r5l_log *log);
-extern struct dma_async_tx_descriptor *
+struct dma_async_tx_descriptor *
ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx);
-extern int ppl_init_log(struct r5conf *conf);
-extern void ppl_exit_log(struct r5conf *conf);
-extern int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
-extern void ppl_write_stripe_run(struct r5conf *conf);
-extern void ppl_stripe_write_finished(struct stripe_head *sh);
-extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
-extern void ppl_quiesce(struct r5conf *conf, int quiesce);
-extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
+int ppl_init_log(struct r5conf *conf);
+void ppl_exit_log(struct r5conf *conf);
+int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
+void ppl_write_stripe_run(struct r5conf *conf);
+void ppl_stripe_write_finished(struct stripe_head *sh);
+int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
+void ppl_quiesce(struct r5conf *conf, int quiesce);
+int ppl_handle_flush_request(struct bio *bio);
extern struct md_sysfs_entry ppl_write_hint;
static inline bool raid5_has_log(struct r5conf *conf)
@@ -111,7 +108,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
if (conf->log)
ret = r5l_handle_flush_request(conf->log, bio);
else if (raid5_has_ppl(conf))
- ret = ppl_handle_flush_request(conf->log, bio);
+ ret = ppl_handle_flush_request(bio);
return ret;
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 0a2e4806b1ec..31b9157bc9ae 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -679,7 +679,7 @@ void ppl_quiesce(struct r5conf *conf, int quiesce)
}
}
-int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
+int ppl_handle_flush_request(struct bio *bio)
{
if (bio->bi_iter.bi_size == 0) {
bio_endio(bio);
@@ -897,7 +897,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
__func__, indent, "", rdev->bdev,
(unsigned long long)sector);
if (!sync_page_io(rdev, sector, block_size, page2,
- REQ_OP_READ, 0, false)) {
+ REQ_OP_READ, false)) {
md_error(mddev, rdev);
pr_debug("%s:%*s read failed!\n", __func__,
indent, "");
@@ -919,7 +919,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)(ppl_sector + i));
if (!sync_page_io(log->rdev,
ppl_sector - log->rdev->data_offset + i,
- block_size, page2, REQ_OP_READ, 0,
+ block_size, page2, REQ_OP_READ,
false)) {
pr_debug("%s:%*s read failed!\n", __func__,
indent, "");
@@ -946,7 +946,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)parity_sector,
parity_rdev->bdev);
if (!sync_page_io(parity_rdev, parity_sector, block_size,
- page1, REQ_OP_WRITE, 0, false)) {
+ page1, REQ_OP_WRITE, false)) {
pr_debug("%s:%*s parity write error!\n", __func__,
indent, "");
md_error(mddev, parity_rdev);
@@ -998,7 +998,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
if (!sync_page_io(rdev, sector - rdev->data_offset,
- s, page, REQ_OP_READ, 0, false)) {
+ s, page, REQ_OP_READ, false)) {
md_error(mddev, rdev);
ret = -EIO;
goto out;
@@ -1062,7 +1062,7 @@ static int ppl_write_empty_header(struct ppl_log *log)
if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
- REQ_FUA, 0, false)) {
+ REQ_FUA, false)) {
md_error(rdev->mddev, rdev);
ret = -EIO;
}
@@ -1100,7 +1100,7 @@ static int ppl_load_distributed(struct ppl_log *log)
if (!sync_page_io(rdev,
rdev->ppl.sector - rdev->data_offset +
pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
- 0, false)) {
+ false)) {
md_error(mddev, rdev);
ret = -EIO;
/* if not able to read - don't recover any PPL */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 20e53b167f81..31a0cbf63384 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -61,6 +61,8 @@
#define cpu_to_group(cpu) cpu_to_node(cpu)
#define ANY_GROUP NUMA_NO_NODE
+#define RAID5_MAX_REQ_STRIPES 256
+
static bool devices_handle_discard_safely = false;
module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
@@ -624,6 +626,49 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
return NULL;
}
+static struct stripe_head *find_get_stripe(struct r5conf *conf,
+ sector_t sector, short generation, int hash)
+{
+ int inc_empty_inactive_list_flag;
+ struct stripe_head *sh;
+
+ sh = __find_stripe(conf, sector, generation);
+ if (!sh)
+ return NULL;
+
+ if (atomic_inc_not_zero(&sh->count))
+ return sh;
+
+ /*
+ * Slow path. The reference count is zero which means the stripe must
+ * be on a list (sh->lru). Must remove the stripe from the list that
+ * references it with the device_lock held.
+ */
+
+ spin_lock(&conf->device_lock);
+ if (!atomic_read(&sh->count)) {
+ if (!test_bit(STRIPE_HANDLE, &sh->state))
+ atomic_inc(&conf->active_stripes);
+ BUG_ON(list_empty(&sh->lru) &&
+ !test_bit(STRIPE_EXPANDING, &sh->state));
+ inc_empty_inactive_list_flag = 0;
+ if (!list_empty(conf->inactive_list + hash))
+ inc_empty_inactive_list_flag = 1;
+ list_del_init(&sh->lru);
+ if (list_empty(conf->inactive_list + hash) &&
+ inc_empty_inactive_list_flag)
+ atomic_inc(&conf->empty_inactive_list_nr);
+ if (sh->group) {
+ sh->group->stripes_cnt--;
+ sh->group = NULL;
+ }
+ }
+ atomic_inc(&sh->count);
+ spin_unlock(&conf->device_lock);
+
+ return sh;
+}
+
/*
* Need to check if array has failed when deciding whether to:
* - start an array
@@ -710,80 +755,121 @@ static bool has_failed(struct r5conf *conf)
return degraded > conf->max_degraded;
}
-struct stripe_head *
-raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- int previous, int noblock, int noquiesce)
+enum stripe_result {
+ STRIPE_SUCCESS = 0,
+ STRIPE_RETRY,
+ STRIPE_SCHEDULE_AND_RETRY,
+ STRIPE_FAIL,
+};
+
+struct stripe_request_ctx {
+ /* a reference to the last stripe_head for batching */
+ struct stripe_head *batch_last;
+
+ /* first sector in the request */
+ sector_t first_sector;
+
+ /* last sector in the request */
+ sector_t last_sector;
+
+ /*
+ * bitmap to track stripe sectors that have been added to stripes
+ * add one to account for unaligned requests
+ */
+ DECLARE_BITMAP(sectors_to_do, RAID5_MAX_REQ_STRIPES + 1);
+
+ /* the request had REQ_PREFLUSH, cleared after the first stripe_head */
+ bool do_flush;
+};
+
+/*
+ * Block until another thread clears R5_INACTIVE_BLOCKED or
+ * there are fewer than 3/4 the maximum number of active stripes
+ * and there is an inactive stripe available.
+ */
+static bool is_inactive_blocked(struct r5conf *conf, int hash)
+{
+ int active = atomic_read(&conf->active_stripes);
+
+ if (list_empty(conf->inactive_list + hash))
+ return false;
+
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
+ return true;
+
+ return active < (conf->max_nr_stripes * 3 / 4);
+}
+
+static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
+ struct stripe_request_ctx *ctx, sector_t sector,
+ bool previous, bool noblock, bool noquiesce)
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(conf, sector);
- int inc_empty_inactive_list_flag;
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(conf->hash_locks + hash);
- do {
- wait_event_lock_irq(conf->wait_for_quiescent,
- conf->quiesce == 0 || noquiesce,
+retry:
+ if (!noquiesce && conf->quiesce) {
+ /*
+ * Must release the reference to batch_last before waiting,
+ * on quiesce, otherwise the batch_last will hold a reference
+ * to a stripe and raid5_quiesce() will deadlock waiting for
+ * active_stripes to go to zero.
+ */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
+
+ wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
*(conf->hash_locks + hash));
- sh = __find_stripe(conf, sector, conf->generation - previous);
- if (!sh) {
- if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
- sh = get_free_stripe(conf, hash);
- if (!sh && !test_bit(R5_DID_ALLOC,
- &conf->cache_state))
- set_bit(R5_ALLOC_MORE,
- &conf->cache_state);
- }
- if (noblock && sh == NULL)
- break;
+ }
- r5c_check_stripe_cache_usage(conf);
- if (!sh) {
- set_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state);
- r5l_wake_reclaim(conf->log, 0);
- wait_event_lock_irq(
- conf->wait_for_stripe,
- !list_empty(conf->inactive_list + hash) &&
- (atomic_read(&conf->active_stripes)
- < (conf->max_nr_stripes * 3 / 4)
- || !test_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state)),
- *(conf->hash_locks + hash));
- clear_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state);
- } else {
- init_stripe(sh, sector, previous);
- atomic_inc(&sh->count);
- }
- } else if (!atomic_inc_not_zero(&sh->count)) {
- spin_lock(&conf->device_lock);
- if (!atomic_read(&sh->count)) {
- if (!test_bit(STRIPE_HANDLE, &sh->state))
- atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&sh->lru) &&
- !test_bit(STRIPE_EXPANDING, &sh->state));
- inc_empty_inactive_list_flag = 0;
- if (!list_empty(conf->inactive_list + hash))
- inc_empty_inactive_list_flag = 1;
- list_del_init(&sh->lru);
- if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
- atomic_inc(&conf->empty_inactive_list_nr);
- if (sh->group) {
- sh->group->stripes_cnt--;
- sh->group = NULL;
- }
- }
- atomic_inc(&sh->count);
- spin_unlock(&conf->device_lock);
- }
- } while (sh == NULL);
+ sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
+ if (sh)
+ goto out;
+
+ if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
+ goto wait_for_stripe;
+
+ sh = get_free_stripe(conf, hash);
+ if (sh) {
+ r5c_check_stripe_cache_usage(conf);
+ init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ goto out;
+ }
+
+ if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE, &conf->cache_state);
+
+wait_for_stripe:
+ if (noblock)
+ goto out;
+ set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ is_inactive_blocked(conf, hash),
+ *(conf->hash_locks + hash));
+ clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ goto retry;
+
+out:
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
+struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
+ sector_t sector, bool previous, bool noblock, bool noquiesce)
+{
+ return __raid5_get_active_stripe(conf, NULL, sector, previous, noblock,
+ noquiesce);
+}
+
static bool is_full_stripe_write(struct stripe_head *sh)
{
BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
@@ -824,13 +910,13 @@ static bool stripe_can_batch(struct stripe_head *sh)
}
/* we only do back search */
-static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
+static void stripe_add_to_batch_list(struct r5conf *conf,
+ struct stripe_head *sh, struct stripe_head *last_sh)
{
struct stripe_head *head;
sector_t head_sector, tmp_sec;
int hash;
int dd_idx;
- int inc_empty_inactive_list_flag;
/* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
tmp_sec = sh->sector;
@@ -838,36 +924,20 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
return;
head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf);
- hash = stripe_hash_locks_hash(conf, head_sector);
- spin_lock_irq(conf->hash_locks + hash);
- head = __find_stripe(conf, head_sector, conf->generation);
- if (head && !atomic_inc_not_zero(&head->count)) {
- spin_lock(&conf->device_lock);
- if (!atomic_read(&head->count)) {
- if (!test_bit(STRIPE_HANDLE, &head->state))
- atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&head->lru) &&
- !test_bit(STRIPE_EXPANDING, &head->state));
- inc_empty_inactive_list_flag = 0;
- if (!list_empty(conf->inactive_list + hash))
- inc_empty_inactive_list_flag = 1;
- list_del_init(&head->lru);
- if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
- atomic_inc(&conf->empty_inactive_list_nr);
- if (head->group) {
- head->group->stripes_cnt--;
- head->group = NULL;
- }
- }
+ if (last_sh && head_sector == last_sh->sector) {
+ head = last_sh;
atomic_inc(&head->count);
- spin_unlock(&conf->device_lock);
+ } else {
+ hash = stripe_hash_locks_hash(conf, head_sector);
+ spin_lock_irq(conf->hash_locks + hash);
+ head = find_get_stripe(conf, head_sector, conf->generation,
+ hash);
+ spin_unlock_irq(conf->hash_locks + hash);
+ if (!head)
+ return;
+ if (!stripe_can_batch(head))
+ goto out;
}
- spin_unlock_irq(conf->hash_locks + hash);
-
- if (!head)
- return;
- if (!stripe_can_batch(head))
- goto out;
lock_two_stripes(head, sh);
/* clear_batch_ready clear the flag */
@@ -1082,7 +1152,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
should_defer = conf->batch_bio_dispatch && conf->group_cnt;
for (i = disks; i--; ) {
- int op, op_flags = 0;
+ enum req_op op;
+ blk_opf_t op_flags = 0;
int replace_only = 0;
struct bio *bi, *rbi;
struct md_rdev *rdev, *rrdev = NULL;
@@ -2881,10 +2952,10 @@ static void raid5_end_write_request(struct bio *bi)
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
- raid5_release_stripe(sh);
if (sh->batch_head && sh != sh->batch_head)
raid5_release_stripe(sh->batch_head);
+ raid5_release_stripe(sh);
}
static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
@@ -3412,39 +3483,32 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
s->locked, s->ops_request);
}
-/*
- * Each stripe/dev can have one or more bion attached.
- * toread/towrite point to the first in a chain.
- * The bi_next chain must be in order.
- */
-static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
- int forwrite, int previous)
+static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi,
+ int dd_idx, int forwrite)
{
- struct bio **bip;
struct r5conf *conf = sh->raid_conf;
- int firstwrite=0;
+ struct bio **bip;
- pr_debug("adding bi b#%llu to stripe s#%llu\n",
- (unsigned long long)bi->bi_iter.bi_sector,
- (unsigned long long)sh->sector);
+ pr_debug("checking bi b#%llu to stripe s#%llu\n",
+ bi->bi_iter.bi_sector, sh->sector);
- spin_lock_irq(&sh->stripe_lock);
/* Don't allow new IO added to stripes in batch list */
if (sh->batch_head)
- goto overlap;
- if (forwrite) {
+ return true;
+
+ if (forwrite)
bip = &sh->dev[dd_idx].towrite;
- if (*bip == NULL)
- firstwrite = 1;
- } else
+ else
bip = &sh->dev[dd_idx].toread;
+
while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
- goto overlap;
- bip = & (*bip)->bi_next;
+ return true;
+ bip = &(*bip)->bi_next;
}
+
if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
- goto overlap;
+ return true;
if (forwrite && raid5_has_ppl(conf)) {
/*
@@ -3473,9 +3537,30 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
}
if (first + conf->chunk_sectors * (count - 1) != last)
- goto overlap;
+ return true;
+ }
+
+ return false;
+}
+
+static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+ int dd_idx, int forwrite, int previous)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct bio **bip;
+ int firstwrite = 0;
+
+ if (forwrite) {
+ bip = &sh->dev[dd_idx].towrite;
+ if (!*bip)
+ firstwrite = 1;
+ } else {
+ bip = &sh->dev[dd_idx].toread;
}
+ while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector)
+ bip = &(*bip)->bi_next;
+
if (!forwrite || previous)
clear_bit(STRIPE_BATCH_READY, &sh->state);
@@ -3501,9 +3586,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
sh->overwrite_disks++;
}
- pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)(*bip)->bi_iter.bi_sector,
- (unsigned long long)sh->sector, dd_idx);
+ pr_debug("added bi b#%llu to stripe s#%llu, disk %d, logical %llu\n",
+ (*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
+ sh->dev[dd_idx].sector);
if (conf->mddev->bitmap && firstwrite) {
/* Cannot hold spinlock over bitmap_startwrite,
@@ -3511,7 +3596,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
* we have added to the bitmap and set bm_seq.
* So set STRIPE_BITMAP_PENDING to prevent
* batching.
- * If multiple add_stripe_bio() calls race here they
+ * If multiple __add_stripe_bio() calls race here they
* much all set STRIPE_BITMAP_PENDING. So only the first one
* to complete "bitmap_startwrite" gets to set
* STRIPE_BIT_DELAY. This is important as once a stripe
@@ -3529,16 +3614,27 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
}
- spin_unlock_irq(&sh->stripe_lock);
+}
- if (stripe_can_batch(sh))
- stripe_add_to_batch_list(conf, sh);
- return 1;
+/*
+ * Each stripe/dev can have one or more bios attached.
+ * toread/towrite point to the first in a chain.
+ * The bi_next chain must be in order.
+ */
+static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+ int dd_idx, int forwrite, int previous)
+{
+ spin_lock_irq(&sh->stripe_lock);
+
+ if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
+ set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
+ spin_unlock_irq(&sh->stripe_lock);
+ return false;
+ }
- overlap:
- set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
+ __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
spin_unlock_irq(&sh->stripe_lock);
- return 0;
+ return true;
}
static void end_reshape(struct r5conf *conf);
@@ -5784,17 +5880,215 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
bio_endio(bi);
}
-static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+static bool ahead_of_reshape(struct mddev *mddev, sector_t sector,
+ sector_t reshape_sector)
{
- struct r5conf *conf = mddev->private;
+ return mddev->reshape_backwards ? sector < reshape_sector :
+ sector >= reshape_sector;
+}
+
+static bool range_ahead_of_reshape(struct mddev *mddev, sector_t min,
+ sector_t max, sector_t reshape_sector)
+{
+ return mddev->reshape_backwards ? max < reshape_sector :
+ min >= reshape_sector;
+}
+
+static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
+ struct stripe_head *sh)
+{
+ sector_t max_sector = 0, min_sector = MaxSector;
+ bool ret = false;
int dd_idx;
- sector_t new_sector;
- sector_t logical_sector, last_sector;
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+ if (dd_idx == sh->pd_idx)
+ continue;
+
+ min_sector = min(min_sector, sh->dev[dd_idx].sector);
+ max_sector = min(max_sector, sh->dev[dd_idx].sector);
+ }
+
+ spin_lock_irq(&conf->device_lock);
+
+ if (!range_ahead_of_reshape(mddev, min_sector, max_sector,
+ conf->reshape_progress))
+ /* mismatch, need to try again */
+ ret = true;
+
+ spin_unlock_irq(&conf->device_lock);
+
+ return ret;
+}
+
+static int add_all_stripe_bios(struct r5conf *conf,
+ struct stripe_request_ctx *ctx, struct stripe_head *sh,
+ struct bio *bi, int forwrite, int previous)
+{
+ int dd_idx;
+ int ret = 1;
+
+ spin_lock_irq(&sh->stripe_lock);
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+ struct r5dev *dev = &sh->dev[dd_idx];
+
+ if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ continue;
+
+ if (dev->sector < ctx->first_sector ||
+ dev->sector >= ctx->last_sector)
+ continue;
+
+ if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
+ set_bit(R5_Overlap, &dev->flags);
+ ret = 0;
+ continue;
+ }
+ }
+
+ if (!ret)
+ goto out;
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+ struct r5dev *dev = &sh->dev[dd_idx];
+
+ if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ continue;
+
+ if (dev->sector < ctx->first_sector ||
+ dev->sector >= ctx->last_sector)
+ continue;
+
+ __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
+ clear_bit((dev->sector - ctx->first_sector) >>
+ RAID5_STRIPE_SHIFT(conf), ctx->sectors_to_do);
+ }
+
+out:
+ spin_unlock_irq(&sh->stripe_lock);
+ return ret;
+}
+
+static enum stripe_result make_stripe_request(struct mddev *mddev,
+ struct r5conf *conf, struct stripe_request_ctx *ctx,
+ sector_t logical_sector, struct bio *bi)
+{
+ const int rw = bio_data_dir(bi);
+ enum stripe_result ret;
struct stripe_head *sh;
+ sector_t new_sector;
+ int previous = 0;
+ int seq, dd_idx;
+
+ seq = read_seqcount_begin(&conf->gen_lock);
+
+ if (unlikely(conf->reshape_progress != MaxSector)) {
+ /*
+ * Spinlock is needed as reshape_progress may be
+ * 64bit on a 32bit platform, and so it might be
+ * possible to see a half-updated value
+ * Of course reshape_progress could change after
+ * the lock is dropped, so once we get a reference
+ * to the stripe that we think it is, we will have
+ * to check again.
+ */
+ spin_lock_irq(&conf->device_lock);
+ if (ahead_of_reshape(mddev, logical_sector,
+ conf->reshape_progress)) {
+ previous = 1;
+ } else {
+ if (ahead_of_reshape(mddev, logical_sector,
+ conf->reshape_safe)) {
+ spin_unlock_irq(&conf->device_lock);
+ return STRIPE_SCHEDULE_AND_RETRY;
+ }
+ }
+ spin_unlock_irq(&conf->device_lock);
+ }
+
+ new_sector = raid5_compute_sector(conf, logical_sector, previous,
+ &dd_idx, NULL);
+ pr_debug("raid456: %s, sector %llu logical %llu\n", __func__,
+ new_sector, logical_sector);
+
+ sh = __raid5_get_active_stripe(conf, ctx, new_sector, previous,
+ (bi->bi_opf & REQ_RAHEAD), 0);
+ if (unlikely(!sh)) {
+ /* cannot get stripe, just give-up */
+ bi->bi_status = BLK_STS_IOERR;
+ return STRIPE_FAIL;
+ }
+
+ if (unlikely(previous) &&
+ stripe_ahead_of_reshape(mddev, conf, sh)) {
+ /*
+ * Expansion moved on while waiting for a stripe.
+ * Expansion could still move past after this
+ * test, but as we are holding a reference to
+ * 'sh', we know that if that happens,
+ * STRIPE_EXPANDING will get set and the expansion
+ * won't proceed until we finish with the stripe.
+ */
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out_release;
+ }
+
+ if (read_seqcount_retry(&conf->gen_lock, seq)) {
+ /* Might have got the wrong stripe_head by accident */
+ ret = STRIPE_RETRY;
+ goto out_release;
+ }
+
+ if (test_bit(STRIPE_EXPANDING, &sh->state) ||
+ !add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
+ /*
+ * Stripe is busy expanding or add failed due to
+ * overlap. Flush everything and wait a while.
+ */
+ md_wakeup_thread(mddev->thread);
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out_release;
+ }
+
+ if (stripe_can_batch(sh)) {
+ stripe_add_to_batch_list(conf, sh, ctx->batch_last);
+ if (ctx->batch_last)
+ raid5_release_stripe(ctx->batch_last);
+ atomic_inc(&sh->count);
+ ctx->batch_last = sh;
+ }
+
+ if (ctx->do_flush) {
+ set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
+ /* we only need flush for one stripe */
+ ctx->do_flush = false;
+ }
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ if ((!sh->batch_head || sh == sh->batch_head) &&
+ (bi->bi_opf & REQ_SYNC) &&
+ !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
+
+ release_stripe_plug(mddev, sh);
+ return STRIPE_SUCCESS;
+
+out_release:
+ raid5_release_stripe(sh);
+ return ret;
+}
+
+static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct r5conf *conf = mddev->private;
+ sector_t logical_sector;
+ struct stripe_request_ctx ctx = {};
const int rw = bio_data_dir(bi);
- DEFINE_WAIT(w);
- bool do_prepare;
- bool do_flush = false;
+ enum stripe_result res;
+ int s, stripe_cnt;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = log_handle_flush_request(conf, bi);
@@ -5810,7 +6104,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
* if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
* we need to flush journal device
*/
- do_flush = bi->bi_opf & REQ_PREFLUSH;
+ ctx.do_flush = bi->bi_opf & REQ_PREFLUSH;
}
if (!md_write_start(mddev, bi))
@@ -5834,134 +6128,68 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
}
logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
- last_sector = bio_end_sector(bi);
+ ctx.first_sector = logical_sector;
+ ctx.last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
+ stripe_cnt = DIV_ROUND_UP_SECTOR_T(ctx.last_sector - logical_sector,
+ RAID5_STRIPE_SECTORS(conf));
+ bitmap_set(ctx.sectors_to_do, 0, stripe_cnt);
+
+ pr_debug("raid456: %s, logical %llu to %llu\n", __func__,
+ bi->bi_iter.bi_sector, ctx.last_sector);
+
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
if ((bi->bi_opf & REQ_NOWAIT) &&
(conf->reshape_progress != MaxSector) &&
- (mddev->reshape_backwards
- ? (logical_sector > conf->reshape_progress && logical_sector <= conf->reshape_safe)
- : (logical_sector >= conf->reshape_safe && logical_sector < conf->reshape_progress))) {
+ !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) &&
+ ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) {
bio_wouldblock_error(bi);
if (rw == WRITE)
md_write_end(mddev);
return true;
}
md_account_bio(mddev, &bi);
- prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
- for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
- int previous;
- int seq;
-
- do_prepare = false;
- retry:
- seq = read_seqcount_begin(&conf->gen_lock);
- previous = 0;
- if (do_prepare)
- prepare_to_wait(&conf->wait_for_overlap, &w,
- TASK_UNINTERRUPTIBLE);
- if (unlikely(conf->reshape_progress != MaxSector)) {
- /* spinlock is needed as reshape_progress may be
- * 64bit on a 32bit platform, and so it might be
- * possible to see a half-updated value
- * Of course reshape_progress could change after
- * the lock is dropped, so once we get a reference
- * to the stripe that we think it is, we will have
- * to check again.
- */
- spin_lock_irq(&conf->device_lock);
- if (mddev->reshape_backwards
- ? logical_sector < conf->reshape_progress
- : logical_sector >= conf->reshape_progress) {
- previous = 1;
- } else {
- if (mddev->reshape_backwards
- ? logical_sector < conf->reshape_safe
- : logical_sector >= conf->reshape_safe) {
- spin_unlock_irq(&conf->device_lock);
- schedule();
- do_prepare = true;
- goto retry;
- }
- }
- spin_unlock_irq(&conf->device_lock);
- }
- new_sector = raid5_compute_sector(conf, logical_sector,
- previous,
- &dd_idx, NULL);
- pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
- (unsigned long long)new_sector,
- (unsigned long long)logical_sector);
+ add_wait_queue(&conf->wait_for_overlap, &wait);
+ while (1) {
+ res = make_stripe_request(mddev, conf, &ctx, logical_sector,
+ bi);
+ if (res == STRIPE_FAIL)
+ break;
- sh = raid5_get_active_stripe(conf, new_sector, previous,
- (bi->bi_opf & REQ_RAHEAD), 0);
- if (sh) {
- if (unlikely(previous)) {
- /* expansion might have moved on while waiting for a
- * stripe, so we must do the range check again.
- * Expansion could still move past after this
- * test, but as we are holding a reference to
- * 'sh', we know that if that happens,
- * STRIPE_EXPANDING will get set and the expansion
- * won't proceed until we finish with the stripe.
- */
- int must_retry = 0;
- spin_lock_irq(&conf->device_lock);
- if (mddev->reshape_backwards
- ? logical_sector >= conf->reshape_progress
- : logical_sector < conf->reshape_progress)
- /* mismatch, need to try again */
- must_retry = 1;
- spin_unlock_irq(&conf->device_lock);
- if (must_retry) {
- raid5_release_stripe(sh);
- schedule();
- do_prepare = true;
- goto retry;
- }
- }
- if (read_seqcount_retry(&conf->gen_lock, seq)) {
- /* Might have got the wrong stripe_head
- * by accident
- */
- raid5_release_stripe(sh);
- goto retry;
- }
+ if (res == STRIPE_RETRY)
+ continue;
- if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
- /* Stripe is busy expanding or
- * add failed due to overlap. Flush everything
- * and wait a while
- */
- md_wakeup_thread(mddev->thread);
- raid5_release_stripe(sh);
- schedule();
- do_prepare = true;
- goto retry;
- }
- if (do_flush) {
- set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
- /* we only need flush for one stripe */
- do_flush = false;
+ if (res == STRIPE_SCHEDULE_AND_RETRY) {
+ /*
+ * Must release the reference to batch_last before
+ * scheduling and waiting for work to be done,
+ * otherwise the batch_last stripe head could prevent
+ * raid5_activate_delayed() from making progress
+ * and thus deadlocking.
+ */
+ if (ctx.batch_last) {
+ raid5_release_stripe(ctx.batch_last);
+ ctx.batch_last = NULL;
}
- set_bit(STRIPE_HANDLE, &sh->state);
- clear_bit(STRIPE_DELAYED, &sh->state);
- if ((!sh->batch_head || sh == sh->batch_head) &&
- (bi->bi_opf & REQ_SYNC) &&
- !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- atomic_inc(&conf->preread_active_stripes);
- release_stripe_plug(mddev, sh);
- } else {
- /* cannot get stripe for read-ahead, just give-up */
- bi->bi_status = BLK_STS_IOERR;
- break;
+ wait_woken(&wait, TASK_UNINTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ continue;
}
+
+ s = find_first_bit(ctx.sectors_to_do, stripe_cnt);
+ if (s == stripe_cnt)
+ break;
+
+ logical_sector = ctx.first_sector +
+ (s << RAID5_STRIPE_SHIFT(conf));
}
- finish_wait(&conf->wait_for_overlap, &w);
+ remove_wait_queue(&conf->wait_for_overlap, &wait);
+
+ if (ctx.batch_last)
+ raid5_release_stripe(ctx.batch_last);
if (rw == WRITE)
md_write_end(mddev);
@@ -7304,7 +7532,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort;
conf->mddev = mddev;
- if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
+ ret = -ENOMEM;
+ conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!conf->stripe_hashtbl)
goto abort;
/* We init hash_locks[0] separately to that it can be used
@@ -7414,7 +7644,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.count_objects = raid5_cache_count;
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
- ret = register_shrinker(&conf->shrinker);
+ ret = register_shrinker(&conf->shrinker, "md-raid5:%s", mdname(mddev));
if (ret) {
pr_warn("md/raid:%s: couldn't register shrinker.\n",
mdname(mddev));
@@ -7812,7 +8042,15 @@ static int raid5_run(struct mddev *mddev)
mddev->queue->limits.discard_granularity < stripe)
blk_queue_max_discard_sectors(mddev->queue, 0);
- blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
+ /*
+ * Requests require having a bitmap for each stripe.
+ * Limit the max sectors based on this.
+ */
+ blk_queue_max_hw_sectors(mddev->queue,
+ RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf));
+
+ /* No restrictions on the number of segments in the request */
+ blk_queue_max_segments(mddev->queue, USHRT_MAX);
}
if (log_init(conf, journal_dev, raid5_has_ppl(conf)))
@@ -8063,8 +8301,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* find the disk ... but prefer rdev->saved_raid_disk
* if possible.
*/
- if (rdev->saved_raid_disk >= 0 &&
- rdev->saved_raid_disk >= first &&
+ if (rdev->saved_raid_disk >= first &&
rdev->saved_raid_disk <= last &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
first = rdev->saved_raid_disk;
@@ -8701,8 +8938,11 @@ static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
err = log_init(conf, NULL, true);
if (!err) {
err = resize_stripes(conf, conf->pool_size);
- if (err)
+ if (err) {
+ mddev_suspend(mddev);
log_exit(conf);
+ mddev_resume(mddev);
+ }
}
} else
err = -EINVAL;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 638d29863503..a5082bed83c8 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -812,7 +812,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
struct stripe_head *sh);
extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- int previous, int noblock, int noquiesce);
+ bool previous, bool noblock, bool noquiesce);
extern int raid5_calc_degraded(struct r5conf *conf);
extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
#endif
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 8bf91b5a7d0e..41a79293ee02 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1309,8 +1309,11 @@ static int cec_config_log_addr(struct cec_adapter *adap,
* we assume that something is really weird and that it is not a
* good idea to try and claim this logical address.
*/
- if (i == max_retries)
+ if (i == max_retries) {
+ dprintk(0, "polling for LA %u failed with tx_status=0x%04x\n",
+ log_addr, msg.tx_status);
return 0;
+ }
/*
* Message not acknowledged, so this logical
diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index 8c8d8fc5e63e..3b583ed4da9d 100644
--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
@@ -217,6 +217,10 @@ static const struct cec_dmi_match cec_dmi_match_table[] = {
{ "Google", "Fizz", "0000:00:02.0", "Port B" },
/* Google Brask */
{ "Google", "Brask", "0000:00:02.0", "Port B" },
+ /* Google Moli */
+ { "Google", "Moli", "0000:00:02.0", "Port B" },
+ /* Google Kinox */
+ { "Google", "Kinox", "0000:00:02.0", "Port B" },
};
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 7607b516a7c4..9b7bcdce6e44 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -266,6 +266,8 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_XYUV32:
case V4L2_PIX_FMT_VUYA32:
case V4L2_PIX_FMT_VUYX32:
+ case V4L2_PIX_FMT_YUVA32:
+ case V4L2_PIX_FMT_YUVX32:
tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUV420M:
@@ -412,6 +414,8 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_XYUV32:
case V4L2_PIX_FMT_VUYA32:
case V4L2_PIX_FMT_VUYX32:
+ case V4L2_PIX_FMT_YUVA32:
+ case V4L2_PIX_FMT_YUVX32:
case V4L2_PIX_FMT_HSV32:
tpg->twopixelsize[0] = 2 * 4;
break;
@@ -1376,9 +1380,11 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset + 3] = b_v;
break;
case V4L2_PIX_FMT_RGBX32:
+ case V4L2_PIX_FMT_YUVX32:
alpha = 0;
fallthrough;
case V4L2_PIX_FMT_RGBA32:
+ case V4L2_PIX_FMT_YUVA32:
buf[0][offset] = r_y_h;
buf[0][offset + 1] = g_u_s;
buf[0][offset + 2] = b_v;
@@ -2402,6 +2408,44 @@ static void tpg_fill_plane_extras(const struct tpg_data *tpg,
((params->sav_eav_f ^ vact) << 1) |
(hact ^ vact ^ params->sav_eav_f);
}
+ if (tpg->insert_hdmi_video_guard_band) {
+ unsigned int i;
+
+ switch (tpg->fourcc) {
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB24:
+ for (i = 0; i < 3 * 4; i += 3) {
+ vbuf[i] = 0xab;
+ vbuf[i + 1] = 0x55;
+ vbuf[i + 2] = 0xab;
+ }
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_BGRX32:
+ case V4L2_PIX_FMT_BGRA32:
+ for (i = 0; i < 4 * 4; i += 4) {
+ vbuf[i] = 0x00;
+ vbuf[i + 1] = 0xab;
+ vbuf[i + 2] = 0x55;
+ vbuf[i + 3] = 0xab;
+ }
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_RGBX32:
+ case V4L2_PIX_FMT_RGBA32:
+ for (i = 0; i < 4 * 4; i += 4) {
+ vbuf[i] = 0xab;
+ vbuf[i + 1] = 0x55;
+ vbuf[i + 2] = 0xab;
+ vbuf[i + 3] = 0x00;
+ }
+ break;
+ }
+ }
}
static void tpg_fill_plane_pattern(const struct tpg_data *tpg,
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 075d24ebf44c..f26cb8586bd4 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -638,6 +638,18 @@ int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
}
EXPORT_SYMBOL_GPL(vb2_find_timestamp);
+struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
+{
+ unsigned int i;
+
+ for (i = 0; i < q->num_buffers; i++)
+ if (q->bufs[i]->copied_timestamp &&
+ q->bufs[i]->timestamp == timestamp)
+ return vb2_get_buffer(q, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vb2_find_buffer);
+
/*
* vb2_querybuf() - query video buffer information
* @q: videobuf queue
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 2b20aa6c37b1..7806d4b81716 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -34,6 +34,19 @@ config VIDEO_APTINA_PLL
config VIDEO_CCS_PLL
tristate
+config VIDEO_AR0521
+ tristate "ON Semiconductor AR0521 sensor support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the ON Semiconductor
+ AR0521 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ar0521.
+
config VIDEO_HI556
tristate "Hynix Hi-556 sensor support"
depends on I2C && VIDEO_DEV
@@ -75,8 +88,10 @@ config VIDEO_HI847
config VIDEO_IMX208
tristate "Sony IMX208 sensor support"
- depends on I2C && VIDEO_DEV && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_DEV
depends on MEDIA_CAMERA_SUPPORT
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Sony
IMX208 camera.
@@ -1178,6 +1193,7 @@ config VIDEO_ISL7998X
depends on OF_GPIO
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
help
Support for Intersil ISL7998x analog to MIPI-CSI2 or
BT.656 decoder.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 3e1696963e7f..0a2933103dd9 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
obj-$(CONFIG_VIDEO_AK7375) += ak7375.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_APTINA_PLL) += aptina-pll.o
+obj-$(CONFIG_VIDEO_AR0521) += ar0521.o
obj-$(CONFIG_VIDEO_BT819) += bt819.o
obj-$(CONFIG_VIDEO_BT856) += bt856.o
obj-$(CONFIG_VIDEO_BT866) += bt866.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index e3a57c178c6b..5fde5243722d 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -43,6 +43,7 @@
#define ADV7180_INPUT_CONTROL_INSEL_MASK 0x0f
#define ADV7182_REG_INPUT_VIDSEL 0x0002
+#define ADV7182_REG_INPUT_RESERVED BIT(2)
#define ADV7180_REG_OUTPUT_CONTROL 0x0003
#define ADV7180_REG_EXTENDED_OUTPUT_CONTROL 0x0004
@@ -1060,7 +1061,9 @@ static int adv7182_init(struct adv7180_state *state)
static int adv7182_set_std(struct adv7180_state *state, unsigned int std)
{
- return adv7180_write(state, ADV7182_REG_INPUT_VIDSEL, std << 4);
+ /* Failing to set the reserved bit can result in increased video noise */
+ return adv7180_write(state, ADV7182_REG_INPUT_VIDSEL,
+ (std << 4) | ADV7182_REG_INPUT_RESERVED);
}
enum adv7182_input_type {
diff --git a/drivers/media/i2c/adv7343_regs.h b/drivers/media/i2c/adv7343_regs.h
index 2f04ce4b9118..e0357e6272e3 100644
--- a/drivers/media/i2c/adv7343_regs.h
+++ b/drivers/media/i2c/adv7343_regs.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ADV7343 encoder related structure and register definitions
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef ADV7343_REGS_H
diff --git a/drivers/media/i2c/adv7393_regs.h b/drivers/media/i2c/adv7393_regs.h
index 78968330f0be..6eb8732b5324 100644
--- a/drivers/media/i2c/adv7393_regs.h
+++ b/drivers/media/i2c/adv7393_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ADV7393 encoder related structure and register definitions
*
@@ -7,15 +8,6 @@
* Based on ADV7343 driver,
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef ADV7393_REGS_H
diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
index 31bac06d46b5..d75eb3d8be5a 100644
--- a/drivers/media/i2c/adv748x/adv748x.h
+++ b/drivers/media/i2c/adv748x/adv748x.h
@@ -417,7 +417,7 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
static inline struct v4l2_subdev *adv748x_get_remote_sd(struct media_pad *pad)
{
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad)
return NULL;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index bb0c8fc6d383..497419a5cfdd 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2505,9 +2505,8 @@ static void adv76xx_log_infoframes(struct v4l2_subdev *sd)
union hdmi_infoframe frame;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (adv76xx_read_infoframe(sd, i, &frame))
- return;
- hdmi_infoframe_log(KERN_INFO, &client->dev, &frame);
+ if (!adv76xx_read_infoframe(sd, i, &frame))
+ hdmi_infoframe_log(KERN_INFO, &client->dev, &frame);
}
}
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
new file mode 100644
index 000000000000..c7bdfc69b9be
--- /dev/null
+++ b/drivers/media/i2c/ar0521.c
@@ -0,0 +1,1061 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Sieć Badawcza Łukasiewicz
+ * - Przemysłowy Instytut Automatyki i Pomiarów PIAP
+ * Written by Krzysztof Hałasa
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+/* External clock (extclk) frequencies */
+#define AR0521_EXTCLK_MIN (10 * 1000 * 1000)
+#define AR0521_EXTCLK_MAX (48 * 1000 * 1000)
+
+/* PLL and PLL2 */
+#define AR0521_PLL_MIN (320 * 1000 * 1000)
+#define AR0521_PLL_MAX (1280 * 1000 * 1000)
+
+/* Effective pixel clocks, the registers may be DDR */
+#define AR0521_PIXEL_CLOCK_RATE (184 * 1000 * 1000)
+#define AR0521_PIXEL_CLOCK_MIN (168 * 1000 * 1000)
+#define AR0521_PIXEL_CLOCK_MAX (414 * 1000 * 1000)
+
+#define AR0521_WIDTH_MIN 8u
+#define AR0521_WIDTH_MAX 2608u
+#define AR0521_HEIGHT_MIN 8u
+#define AR0521_HEIGHT_MAX 1958u
+
+#define AR0521_WIDTH_BLANKING_MIN 572u
+#define AR0521_HEIGHT_BLANKING_MIN 38u /* must be even */
+#define AR0521_TOTAL_WIDTH_MIN 2968u
+
+/* AR0521 registers */
+#define AR0521_REG_VT_PIX_CLK_DIV 0x0300
+#define AR0521_REG_FRAME_LENGTH_LINES 0x0340
+
+#define AR0521_REG_CHIP_ID 0x3000
+#define AR0521_REG_COARSE_INTEGRATION_TIME 0x3012
+#define AR0521_REG_ROW_SPEED 0x3016
+#define AR0521_REG_EXTRA_DELAY 0x3018
+#define AR0521_REG_RESET 0x301A
+#define AR0521_REG_RESET_DEFAULTS 0x0238
+#define AR0521_REG_RESET_GROUP_PARAM_HOLD 0x8000
+#define AR0521_REG_RESET_STREAM BIT(2)
+#define AR0521_REG_RESET_RESTART BIT(1)
+#define AR0521_REG_RESET_INIT BIT(0)
+
+#define AR0521_REG_GREEN1_GAIN 0x3056
+#define AR0521_REG_BLUE_GAIN 0x3058
+#define AR0521_REG_RED_GAIN 0x305A
+#define AR0521_REG_GREEN2_GAIN 0x305C
+#define AR0521_REG_GLOBAL_GAIN 0x305E
+
+#define AR0521_REG_HISPI_TEST_MODE 0x3066
+#define AR0521_REG_HISPI_TEST_MODE_LP11 0x0004
+
+#define AR0521_REG_TEST_PATTERN_MODE 0x3070
+
+#define AR0521_REG_SERIAL_FORMAT 0x31AE
+#define AR0521_REG_SERIAL_FORMAT_MIPI 0x0200
+
+#define AR0521_REG_HISPI_CONTROL_STATUS 0x31C6
+#define AR0521_REG_HISPI_CONTROL_STATUS_FRAMER_TEST_MODE_ENABLE 0x80
+
+#define be cpu_to_be16
+
+static const char * const ar0521_supply_names[] = {
+ "vdd_io", /* I/O (1.8V) supply */
+ "vdd", /* Core, PLL and MIPI (1.2V) supply */
+ "vaa", /* Analog (2.7V) supply */
+};
+
+struct ar0521_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *red_balance;
+ struct v4l2_ctrl *blue_balance;
+ };
+ struct {
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ };
+ struct v4l2_ctrl *pixrate;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *test_pattern;
+};
+
+struct ar0521_dev {
+ struct i2c_client *i2c_client;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct clk *extclk;
+ u32 extclk_freq;
+
+ struct regulator *supplies[ARRAY_SIZE(ar0521_supply_names)];
+ struct gpio_desc *reset_gpio;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ struct v4l2_mbus_framefmt fmt;
+ struct ar0521_ctrls ctrls;
+ unsigned int lane_count;
+ u16 total_width;
+ u16 total_height;
+ u16 pll_pre;
+ u16 pll_mult;
+ u16 pll_pre2;
+ u16 pll_mult2;
+ bool streaming;
+};
+
+static inline struct ar0521_dev *to_ar0521_dev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ar0521_dev, sd);
+}
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct ar0521_dev,
+ ctrls.handler)->sd;
+}
+
+static u32 div64_round(u64 v, u32 d)
+{
+ return div_u64(v + (d >> 1), d);
+}
+
+static u32 div64_round_up(u64 v, u32 d)
+{
+ return div_u64(v + d - 1, d);
+}
+
+/* Data must be BE16, the first value is the register address */
+static int ar0521_write_regs(struct ar0521_dev *sensor, const __be16 *data,
+ unsigned int count)
+{
+ struct i2c_client *client = sensor->i2c_client;
+ struct i2c_msg msg;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.buf = (u8 *)data;
+ msg.len = count * sizeof(*data);
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret < 0) {
+ v4l2_err(&sensor->sd, "%s: I2C write error\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ar0521_write_reg(struct ar0521_dev *sensor, u16 reg, u16 val)
+{
+ __be16 buf[2] = {be(reg), be(val)};
+
+ return ar0521_write_regs(sensor, buf, 2);
+}
+
+static int ar0521_set_geometry(struct ar0521_dev *sensor)
+{
+ /* All dimensions are unsigned 12-bit integers */
+ u16 x = (AR0521_WIDTH_MAX - sensor->fmt.width) / 2;
+ u16 y = ((AR0521_HEIGHT_MAX - sensor->fmt.height) / 2) & ~1;
+ __be16 regs[] = {
+ be(AR0521_REG_FRAME_LENGTH_LINES),
+ be(sensor->total_height),
+ be(sensor->total_width),
+ be(x),
+ be(y),
+ be(x + sensor->fmt.width - 1),
+ be(y + sensor->fmt.height - 1),
+ be(sensor->fmt.width),
+ be(sensor->fmt.height)
+ };
+
+ return ar0521_write_regs(sensor, regs, ARRAY_SIZE(regs));
+}
+
+static int ar0521_set_gains(struct ar0521_dev *sensor)
+{
+ int green = sensor->ctrls.gain->val;
+ int red = max(green + sensor->ctrls.red_balance->val, 0);
+ int blue = max(green + sensor->ctrls.blue_balance->val, 0);
+ unsigned int gain = min(red, min(green, blue));
+ unsigned int analog = min(gain, 64u); /* range is 0 - 127 */
+ __be16 regs[5];
+
+ red = min(red - analog + 64, 511u);
+ green = min(green - analog + 64, 511u);
+ blue = min(blue - analog + 64, 511u);
+ regs[0] = be(AR0521_REG_GREEN1_GAIN);
+ regs[1] = be(green << 7 | analog);
+ regs[2] = be(blue << 7 | analog);
+ regs[3] = be(red << 7 | analog);
+ regs[4] = be(green << 7 | analog);
+
+ return ar0521_write_regs(sensor, regs, ARRAY_SIZE(regs));
+}
+
+static u32 calc_pll(struct ar0521_dev *sensor, int num, u32 freq, u16 *pre_ptr,
+ u16 *mult_ptr)
+{
+ u16 pre = 1, mult = 1, new_pre;
+ u32 pll = AR0521_PLL_MAX + 1;
+
+ for (new_pre = 1; new_pre < 64; new_pre++) {
+ u32 new_pll;
+ u32 new_mult = div64_round_up((u64)freq * new_pre,
+ sensor->extclk_freq);
+
+ if (new_mult < 32)
+ continue; /* Minimum value */
+ if (new_mult > 254)
+ break; /* Maximum, larger pre won't work either */
+ if (sensor->extclk_freq * (u64)new_mult < AR0521_PLL_MIN *
+ new_pre)
+ continue;
+ if (sensor->extclk_freq * (u64)new_mult > AR0521_PLL_MAX *
+ new_pre)
+ break; /* Larger pre won't work either */
+ new_pll = div64_round_up(sensor->extclk_freq * (u64)new_mult,
+ new_pre);
+ if (new_pll < pll) {
+ pll = new_pll;
+ pre = new_pre;
+ mult = new_mult;
+ }
+ }
+
+ pll = div64_round(sensor->extclk_freq * (u64)mult, pre);
+ *pre_ptr = pre;
+ *mult_ptr = mult;
+ return pll;
+}
+
+#define DIV 4
+static void ar0521_calc_mode(struct ar0521_dev *sensor)
+{
+ unsigned int speed_mod = 4 / sensor->lane_count; /* 1 with 4 DDR lanes */
+ u16 total_width = max(sensor->fmt.width + AR0521_WIDTH_BLANKING_MIN,
+ AR0521_TOTAL_WIDTH_MIN);
+ u16 total_height = sensor->fmt.height + AR0521_HEIGHT_BLANKING_MIN;
+
+ /* Calculate approximate pixel clock first */
+ u64 pix_clk = AR0521_PIXEL_CLOCK_RATE;
+
+ /* PLL1 drives pixel clock - dual rate */
+ pix_clk = calc_pll(sensor, 1, pix_clk * (DIV / 2), &sensor->pll_pre,
+ &sensor->pll_mult);
+ pix_clk = div64_round(pix_clk, (DIV / 2));
+ calc_pll(sensor, 2, pix_clk * (DIV / 2) * speed_mod, &sensor->pll_pre2,
+ &sensor->pll_mult2);
+
+ sensor->total_width = total_width;
+ sensor->total_height = total_height;
+}
+
+static int ar0521_write_mode(struct ar0521_dev *sensor)
+{
+ __be16 pll_regs[] = {
+ be(AR0521_REG_VT_PIX_CLK_DIV),
+ /* 0x300 */ be(4), /* vt_pix_clk_div = number of bits / 2 */
+ /* 0x302 */ be(1), /* vt_sys_clk_div */
+ /* 0x304 */ be((sensor->pll_pre2 << 8) | sensor->pll_pre),
+ /* 0x306 */ be((sensor->pll_mult2 << 8) | sensor->pll_mult),
+ /* 0x308 */ be(8), /* op_pix_clk_div = 2 * vt_pix_clk_div */
+ /* 0x30A */ be(1) /* op_sys_clk_div */
+ };
+ int ret;
+
+ /* Stop streaming for just a moment */
+ ret = ar0521_write_reg(sensor, AR0521_REG_RESET,
+ AR0521_REG_RESET_DEFAULTS);
+ if (ret)
+ return ret;
+
+ ret = ar0521_set_geometry(sensor);
+ if (ret)
+ return ret;
+
+ ret = ar0521_write_regs(sensor, pll_regs, ARRAY_SIZE(pll_regs));
+ if (ret)
+ return ret;
+
+ ret = ar0521_write_reg(sensor, AR0521_REG_COARSE_INTEGRATION_TIME,
+ sensor->ctrls.exposure->val);
+ if (ret)
+ return ret;
+
+ ret = ar0521_write_reg(sensor, AR0521_REG_RESET,
+ AR0521_REG_RESET_DEFAULTS |
+ AR0521_REG_RESET_STREAM);
+ if (ret)
+ return ret;
+
+ ret = ar0521_write_reg(sensor, AR0521_REG_TEST_PATTERN_MODE,
+ sensor->ctrls.test_pattern->val);
+ return ret;
+}
+
+static int ar0521_set_stream(struct ar0521_dev *sensor, bool on)
+{
+ int ret;
+
+ if (on) {
+ ret = pm_runtime_resume_and_get(&sensor->i2c_client->dev);
+ if (ret < 0)
+ return ret;
+
+ ar0521_calc_mode(sensor);
+ ret = ar0521_write_mode(sensor);
+ if (ret)
+ goto err;
+
+ ret = ar0521_set_gains(sensor);
+ if (ret)
+ goto err;
+
+ /* Exit LP-11 mode on clock and data lanes */
+ ret = ar0521_write_reg(sensor, AR0521_REG_HISPI_CONTROL_STATUS,
+ 0);
+ if (ret)
+ goto err;
+
+ /* Start streaming */
+ ret = ar0521_write_reg(sensor, AR0521_REG_RESET,
+ AR0521_REG_RESET_DEFAULTS |
+ AR0521_REG_RESET_STREAM);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ pm_runtime_put(&sensor->i2c_client->dev);
+ return ret;
+
+ } else {
+ /*
+ * Reset gain, the sensor may produce all white pixels without
+ * this
+ */
+ ret = ar0521_write_reg(sensor, AR0521_REG_GLOBAL_GAIN, 0x2000);
+ if (ret)
+ return ret;
+
+ /* Stop streaming */
+ ret = ar0521_write_reg(sensor, AR0521_REG_RESET,
+ AR0521_REG_RESET_DEFAULTS);
+ if (ret)
+ return ret;
+
+ pm_runtime_put(&sensor->i2c_client->dev);
+ return 0;
+ }
+}
+
+static void ar0521_adj_fmt(struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = clamp(ALIGN(fmt->width, 4), AR0521_WIDTH_MIN,
+ AR0521_WIDTH_MAX);
+ fmt->height = clamp(ALIGN(fmt->height, 4), AR0521_HEIGHT_MIN,
+ AR0521_HEIGHT_MAX);
+ fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int ar0521_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ mutex_lock(&sensor->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state, 0
+ /* pad */);
+ else
+ fmt = &sensor->fmt;
+
+ format->format = *fmt;
+
+ mutex_unlock(&sensor->lock);
+ return 0;
+}
+
+static int ar0521_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+ int ret = 0;
+
+ ar0521_adj_fmt(&format->format);
+
+ mutex_lock(&sensor->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, 0 /* pad */);
+ *fmt = format->format;
+ } else {
+ sensor->fmt = format->format;
+ ar0521_calc_mode(sensor);
+ }
+
+ mutex_unlock(&sensor->lock);
+ return ret;
+}
+
+static int ar0521_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+ int ret;
+
+ /* v4l2_ctrl_lock() locks our own mutex */
+
+ switch (ctrl->id) {
+ case V4L2_CID_HBLANK:
+ case V4L2_CID_VBLANK:
+ sensor->total_width = sensor->fmt.width +
+ sensor->ctrls.hblank->val;
+ sensor->total_height = sensor->fmt.width +
+ sensor->ctrls.vblank->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ /* access the sensor only if it's powered up */
+ if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HBLANK:
+ case V4L2_CID_VBLANK:
+ ret = ar0521_set_geometry(sensor);
+ break;
+ case V4L2_CID_GAIN:
+ case V4L2_CID_RED_BALANCE:
+ case V4L2_CID_BLUE_BALANCE:
+ ret = ar0521_set_gains(sensor);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = ar0521_write_reg(sensor,
+ AR0521_REG_COARSE_INTEGRATION_TIME,
+ ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ar0521_write_reg(sensor, AR0521_REG_TEST_PATTERN_MODE,
+ ctrl->val);
+ break;
+ }
+
+ pm_runtime_put(&sensor->i2c_client->dev);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ar0521_ctrl_ops = {
+ .s_ctrl = ar0521_s_ctrl,
+};
+
+static const char * const test_pattern_menu[] = {
+ "Disabled",
+ "Solid color",
+ "Color bars",
+ "Faded color bars"
+};
+
+static int ar0521_init_controls(struct ar0521_dev *sensor)
+{
+ const struct v4l2_ctrl_ops *ops = &ar0521_ctrl_ops;
+ struct ar0521_ctrls *ctrls = &sensor->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdl, 32);
+
+ /* We can use our own mutex for the ctrl lock */
+ hdl->lock = &sensor->lock;
+
+ /* Manual gain */
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 511, 1, 0);
+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
+ -512, 511, 1, 0);
+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
+ -512, 511, 1, 0);
+ v4l2_ctrl_cluster(3, &ctrls->gain);
+
+ ctrls->hblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK,
+ AR0521_WIDTH_BLANKING_MIN, 4094, 1,
+ AR0521_WIDTH_BLANKING_MIN);
+ ctrls->vblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK,
+ AR0521_HEIGHT_BLANKING_MIN, 4094, 2,
+ AR0521_HEIGHT_BLANKING_MIN);
+ v4l2_ctrl_cluster(2, &ctrls->hblank);
+
+ /* Read-only */
+ ctrls->pixrate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
+ AR0521_PIXEL_CLOCK_MIN,
+ AR0521_PIXEL_CLOCK_MAX, 1,
+ AR0521_PIXEL_CLOCK_RATE);
+
+ /* Manual exposure time */
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE, 0,
+ 65535, 1, 360);
+
+ ctrls->test_pattern = v4l2_ctrl_new_std_menu_items(hdl, ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ 0, 0, test_pattern_menu);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ goto free_ctrls;
+ }
+
+ sensor->sd.ctrl_handler = hdl;
+ return 0;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+}
+
+#define REGS_ENTRY(a) {(a), ARRAY_SIZE(a)}
+#define REGS(...) REGS_ENTRY(((const __be16[]){__VA_ARGS__}))
+
+static const struct initial_reg {
+ const __be16 *data; /* data[0] is register address */
+ unsigned int count;
+} initial_regs[] = {
+ REGS(be(0x0112), be(0x0808)), /* 8-bit/8-bit mode */
+
+ /* PEDESTAL+2 :+2 is a workaround for 10bit mode +0.5 rounding */
+ REGS(be(0x301E), be(0x00AA)),
+
+ /* corrections_recommended_bayer */
+ REGS(be(0x3042),
+ be(0x0004), /* 3042: RNC: enable b/w rnc mode */
+ be(0x4580)), /* 3044: RNC: enable row noise correction */
+
+ REGS(be(0x30D2),
+ be(0x0000), /* 30D2: CRM/CC: enable crm on Visible and CC rows */
+ be(0x0000), /* 30D4: CC: CC enabled with 16 samples per column */
+ /* 30D6: CC: bw mode enabled/12 bit data resolution/bw mode */
+ be(0x2FFF)),
+
+ REGS(be(0x30DA),
+ be(0x0FFF), /* 30DA: CC: column correction clip level 2 is 0 */
+ be(0x0FFF), /* 30DC: CC: column correction clip level 3 is 0 */
+ be(0x0000)), /* 30DE: CC: Group FPN correction */
+
+ /* RNC: rnc scaling factor = * 54 / 64 (32 / 38 * 64 = 53.9) */
+ REGS(be(0x30EE), be(0x1136)),
+ REGS(be(0x30FA), be(0xFD00)), /* GPIO0 = flash, GPIO1 = shutter */
+ REGS(be(0x3120), be(0x0005)), /* p1 dither enabled for 10bit mode */
+ REGS(be(0x3172), be(0x0206)), /* txlo clk divider options */
+ /* FDOC:fdoc settings with fdoc every frame turned of */
+ REGS(be(0x3180), be(0x9434)),
+
+ REGS(be(0x31B0),
+ be(0x008B), /* 31B0: frame_preamble - FIXME check WRT lanes# */
+ be(0x0050)), /* 31B2: line_preamble - FIXME check WRT lanes# */
+
+ /* don't use continuous clock mode while shut down */
+ REGS(be(0x31BC), be(0x068C)),
+ REGS(be(0x31E0), be(0x0781)), /* Fuse/2DDC: enable 2ddc */
+
+ /* analog_setup_recommended_10bit */
+ REGS(be(0x341A), be(0x4735)), /* Samp&Hold pulse in ADC */
+ REGS(be(0x3420), be(0x4735)), /* Samp&Hold pulse in ADC */
+ REGS(be(0x3426), be(0x8A1A)), /* ADC offset distribution pulse */
+ REGS(be(0x342A), be(0x0018)), /* pulse_config */
+
+ /* pixel_timing_recommended */
+ REGS(be(0x3D00),
+ /* 3D00 */ be(0x043E), be(0x4760), be(0xFFFF), be(0xFFFF),
+ /* 3D08 */ be(0x8000), be(0x0510), be(0xAF08), be(0x0252),
+ /* 3D10 */ be(0x486F), be(0x5D5D), be(0x8056), be(0x8313),
+ /* 3D18 */ be(0x0087), be(0x6A48), be(0x6982), be(0x0280),
+ /* 3D20 */ be(0x8359), be(0x8D02), be(0x8020), be(0x4882),
+ /* 3D28 */ be(0x4269), be(0x6A95), be(0x5988), be(0x5A83),
+ /* 3D30 */ be(0x5885), be(0x6280), be(0x6289), be(0x6097),
+ /* 3D38 */ be(0x5782), be(0x605C), be(0xBF18), be(0x0961),
+ /* 3D40 */ be(0x5080), be(0x2090), be(0x4390), be(0x4382),
+ /* 3D48 */ be(0x5F8A), be(0x5D5D), be(0x9C63), be(0x8063),
+ /* 3D50 */ be(0xA960), be(0x9757), be(0x8260), be(0x5CFF),
+ /* 3D58 */ be(0xBF10), be(0x1681), be(0x0802), be(0x8000),
+ /* 3D60 */ be(0x141C), be(0x6000), be(0x6022), be(0x4D80),
+ /* 3D68 */ be(0x5C97), be(0x6A69), be(0xAC6F), be(0x4645),
+ /* 3D70 */ be(0x4400), be(0x0513), be(0x8069), be(0x6AC6),
+ /* 3D78 */ be(0x5F95), be(0x5F70), be(0x8040), be(0x4A81),
+ /* 3D80 */ be(0x0300), be(0xE703), be(0x0088), be(0x4A83),
+ /* 3D88 */ be(0x40FF), be(0xFFFF), be(0xFD70), be(0x8040),
+ /* 3D90 */ be(0x4A85), be(0x4FA8), be(0x4F8C), be(0x0070),
+ /* 3D98 */ be(0xBE47), be(0x8847), be(0xBC78), be(0x6B89),
+ /* 3DA0 */ be(0x6A80), be(0x6986), be(0x6B8E), be(0x6B80),
+ /* 3DA8 */ be(0x6980), be(0x6A88), be(0x7C9F), be(0x866B),
+ /* 3DB0 */ be(0x8765), be(0x46FF), be(0xE365), be(0xA679),
+ /* 3DB8 */ be(0x4A40), be(0x4580), be(0x44BC), be(0x7000),
+ /* 3DC0 */ be(0x8040), be(0x0802), be(0x10EF), be(0x0104),
+ /* 3DC8 */ be(0x3860), be(0x5D5D), be(0x5682), be(0x1300),
+ /* 3DD0 */ be(0x8648), be(0x8202), be(0x8082), be(0x598A),
+ /* 3DD8 */ be(0x0280), be(0x2048), be(0x3060), be(0x8042),
+ /* 3DE0 */ be(0x9259), be(0x865A), be(0x8258), be(0x8562),
+ /* 3DE8 */ be(0x8062), be(0x8560), be(0x9257), be(0x8221),
+ /* 3DF0 */ be(0x10FF), be(0xB757), be(0x9361), be(0x1019),
+ /* 3DF8 */ be(0x8020), be(0x9043), be(0x8E43), be(0x845F),
+ /* 3E00 */ be(0x835D), be(0x805D), be(0x8163), be(0x8063),
+ /* 3E08 */ be(0xA060), be(0x9157), be(0x8260), be(0x5CFF),
+ /* 3E10 */ be(0xFFFF), be(0xFFE5), be(0x1016), be(0x2048),
+ /* 3E18 */ be(0x0802), be(0x1C60), be(0x0014), be(0x0060),
+ /* 3E20 */ be(0x2205), be(0x8120), be(0x908F), be(0x6A80),
+ /* 3E28 */ be(0x6982), be(0x5F9F), be(0x6F46), be(0x4544),
+ /* 3E30 */ be(0x0005), be(0x8013), be(0x8069), be(0x6A80),
+ /* 3E38 */ be(0x7000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E40 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E48 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E50 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E58 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E60 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E68 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E70 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E78 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E80 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E88 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E90 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3E98 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3EA0 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3EA8 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000),
+ /* 3EB0 */ be(0x0000), be(0x0000), be(0x0000)),
+
+ REGS(be(0x3EB6), be(0x004C)), /* ECL */
+
+ REGS(be(0x3EBA),
+ be(0xAAAD), /* 3EBA */
+ be(0x0086)), /* 3EBC: Bias currents for FSC/ECL */
+
+ REGS(be(0x3EC0),
+ be(0x1E00), /* 3EC0: SFbin/SH mode settings */
+ be(0x100A), /* 3EC2: CLK divider for ramp for 10 bit 400MH */
+ /* 3EC4: FSC clamps for HDR mode and adc comp power down co */
+ be(0x3300),
+ be(0xEA44), /* 3EC6: VLN and clk gating controls */
+ be(0x6F6F), /* 3EC8: Txl0 and Txlo1 settings for normal mode */
+ be(0x2F4A), /* 3ECA: CDAC/Txlo2/RSTGHI/RSTGLO settings */
+ be(0x0506), /* 3ECC: RSTDHI/RSTDLO/CDAC/TXHI settings */
+ /* 3ECE: Ramp buffer settings and Booster enable (bits 0-5) */
+ be(0x203B),
+ be(0x13F0), /* 3ED0: TXLO from atest/sf bin settings */
+ be(0xA53D), /* 3ED2: Ramp offset */
+ be(0x862F), /* 3ED4: TXLO open loop/row driver settings */
+ be(0x4081), /* 3ED6: Txlatch fr cfpn rows/vln bias */
+ be(0x8003), /* 3ED8: Ramp step setting for 10 bit 400 Mhz */
+ be(0xA580), /* 3EDA: Ramp Offset */
+ be(0xC000), /* 3EDC: over range for rst and under range for sig */
+ be(0xC103)), /* 3EDE: over range for sig and col dec clk settings */
+
+ /* corrections_recommended_bayer */
+ REGS(be(0x3F00),
+ be(0x0017), /* 3F00: BM_T0 */
+ be(0x02DD), /* 3F02: BM_T1 */
+ /* 3F04: if Ana_gain less than 2, use noise_floor0, multipl */
+ be(0x0020),
+ /* 3F06: if Ana_gain between 4 and 7, use noise_floor2 and */
+ be(0x0040),
+ /* 3F08: if Ana_gain between 4 and 7, use noise_floor2 and */
+ be(0x0070),
+ /* 3F0A: Define noise_floor0(low address) and noise_floor1 */
+ be(0x0101),
+ be(0x0302)), /* 3F0C: Define noise_floor2 and noise_floor3 */
+
+ REGS(be(0x3F10),
+ be(0x0505), /* 3F10: single k factor 0 */
+ be(0x0505), /* 3F12: single k factor 1 */
+ be(0x0505), /* 3F14: single k factor 2 */
+ be(0x01FF), /* 3F16: cross factor 0 */
+ be(0x01FF), /* 3F18: cross factor 1 */
+ be(0x01FF), /* 3F1A: cross factor 2 */
+ be(0x0022)), /* 3F1E */
+
+ /* GTH_THRES_RTN: 4max,4min filtered out of every 46 samples and */
+ REGS(be(0x3F2C), be(0x442E)),
+
+ REGS(be(0x3F3E),
+ be(0x0000), /* 3F3E: Switch ADC from 12 bit to 10 bit mode */
+ be(0x1511), /* 3F40: couple k factor 0 */
+ be(0x1511), /* 3F42: couple k factor 1 */
+ be(0x0707)), /* 3F44: couple k factor 2 */
+};
+
+static int ar0521_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+ int i;
+
+ clk_disable_unprepare(sensor->extclk);
+
+ if (sensor->reset_gpio)
+ gpiod_set_value(sensor->reset_gpio, 1); /* assert RESET signal */
+
+ for (i = ARRAY_SIZE(ar0521_supply_names) - 1; i >= 0; i--) {
+ if (sensor->supplies[i])
+ regulator_disable(sensor->supplies[i]);
+ }
+ return 0;
+}
+
+static int ar0521_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+ unsigned int cnt;
+ int ret;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(ar0521_supply_names); cnt++)
+ if (sensor->supplies[cnt]) {
+ ret = regulator_enable(sensor->supplies[cnt]);
+ if (ret < 0)
+ goto off;
+
+ usleep_range(1000, 1500); /* min 1 ms */
+ }
+
+ ret = clk_prepare_enable(sensor->extclk);
+ if (ret < 0) {
+ v4l2_err(&sensor->sd, "error enabling sensor clock\n");
+ goto off;
+ }
+ usleep_range(1000, 1500); /* min 1 ms */
+
+ if (sensor->reset_gpio)
+ /* deassert RESET signal */
+ gpiod_set_value(sensor->reset_gpio, 0);
+ usleep_range(4500, 5000); /* min 45000 clocks */
+
+ for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++)
+ if (ar0521_write_regs(sensor, initial_regs[cnt].data,
+ initial_regs[cnt].count))
+ goto off;
+
+ ret = ar0521_write_reg(sensor, AR0521_REG_SERIAL_FORMAT,
+ AR0521_REG_SERIAL_FORMAT_MIPI |
+ sensor->lane_count);
+ if (ret)
+ goto off;
+
+ /* set MIPI test mode - disabled for now */
+ ret = ar0521_write_reg(sensor, AR0521_REG_HISPI_TEST_MODE,
+ ((0x40 << sensor->lane_count) - 0x40) |
+ AR0521_REG_HISPI_TEST_MODE_LP11);
+ if (ret)
+ goto off;
+
+ ret = ar0521_write_reg(sensor, AR0521_REG_ROW_SPEED, 0x110 |
+ 4 / sensor->lane_count);
+ if (ret)
+ goto off;
+
+ return 0;
+off:
+ ar0521_power_off(dev);
+ return ret;
+}
+
+static int ar0521_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = sensor->fmt.code;
+ return 0;
+}
+
+static int ar0521_pre_streamon(struct v4l2_subdev *sd, u32 flags)
+{
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+ int ret;
+
+ if (!(flags & V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP))
+ return -EACCES;
+
+ ret = pm_runtime_resume_and_get(&sensor->i2c_client->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Set LP-11 on clock and data lanes */
+ ret = ar0521_write_reg(sensor, AR0521_REG_HISPI_CONTROL_STATUS,
+ AR0521_REG_HISPI_CONTROL_STATUS_FRAMER_TEST_MODE_ENABLE);
+ if (ret)
+ goto err;
+
+ /* Start streaming LP-11 */
+ ret = ar0521_write_reg(sensor, AR0521_REG_RESET,
+ AR0521_REG_RESET_DEFAULTS |
+ AR0521_REG_RESET_STREAM);
+ if (ret)
+ goto err;
+ return 0;
+
+err:
+ pm_runtime_put(&sensor->i2c_client->dev);
+ return ret;
+}
+
+static int ar0521_post_streamoff(struct v4l2_subdev *sd)
+{
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+
+ pm_runtime_put(&sensor->i2c_client->dev);
+ return 0;
+}
+
+static int ar0521_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+ int ret;
+
+ mutex_lock(&sensor->lock);
+
+ ret = ar0521_set_stream(sensor, enable);
+ if (!ret)
+ sensor->streaming = enable;
+
+ mutex_unlock(&sensor->lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops ar0521_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+};
+
+static const struct v4l2_subdev_video_ops ar0521_video_ops = {
+ .s_stream = ar0521_s_stream,
+ .pre_streamon = ar0521_pre_streamon,
+ .post_streamoff = ar0521_post_streamoff,
+};
+
+static const struct v4l2_subdev_pad_ops ar0521_pad_ops = {
+ .enum_mbus_code = ar0521_enum_mbus_code,
+ .get_fmt = ar0521_get_fmt,
+ .set_fmt = ar0521_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ar0521_subdev_ops = {
+ .core = &ar0521_core_ops,
+ .video = &ar0521_video_ops,
+ .pad = &ar0521_pad_ops,
+};
+
+static int __maybe_unused ar0521_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+
+ if (sensor->streaming)
+ ar0521_set_stream(sensor, 0);
+
+ return 0;
+}
+
+static int __maybe_unused ar0521_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+
+ if (sensor->streaming)
+ return ar0521_set_stream(sensor, 1);
+
+ return 0;
+}
+
+static int ar0521_probe(struct i2c_client *client)
+{
+ struct v4l2_fwnode_endpoint ep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct device *dev = &client->dev;
+ struct fwnode_handle *endpoint;
+ struct ar0521_dev *sensor;
+ unsigned int cnt;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->i2c_client = client;
+ sensor->fmt.width = AR0521_WIDTH_MAX;
+ sensor->fmt.height = AR0521_HEIGHT_MAX;
+
+ endpoint = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!endpoint) {
+ dev_err(dev, "endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(endpoint, &ep);
+ fwnode_handle_put(endpoint);
+ if (ret) {
+ dev_err(dev, "could not parse endpoint\n");
+ return ret;
+ }
+
+ if (ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(dev, "invalid bus type, must be MIPI CSI2\n");
+ return -EINVAL;
+ }
+
+ sensor->lane_count = ep.bus.mipi_csi2.num_data_lanes;
+ switch (sensor->lane_count) {
+ case 1:
+ case 2:
+ case 4:
+ break;
+ default:
+ dev_err(dev, "invalid number of MIPI data lanes\n");
+ return -EINVAL;
+ }
+
+ /* Get master clock (extclk) */
+ sensor->extclk = devm_clk_get(dev, "extclk");
+ if (IS_ERR(sensor->extclk)) {
+ dev_err(dev, "failed to get extclk\n");
+ return PTR_ERR(sensor->extclk);
+ }
+
+ sensor->extclk_freq = clk_get_rate(sensor->extclk);
+
+ if (sensor->extclk_freq < AR0521_EXTCLK_MIN ||
+ sensor->extclk_freq > AR0521_EXTCLK_MAX) {
+ dev_err(dev, "extclk frequency out of range: %u Hz\n",
+ sensor->extclk_freq);
+ return -EINVAL;
+ }
+
+ /* Request optional reset pin (usually active low) and assert it */
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+
+ v4l2_i2c_subdev_init(&sensor->sd, client, &ar0521_subdev_ops);
+
+ sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
+ if (ret)
+ return ret;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(ar0521_supply_names); cnt++) {
+ struct regulator *supply = devm_regulator_get(dev,
+ ar0521_supply_names[cnt]);
+
+ if (IS_ERR(supply)) {
+ dev_info(dev, "no %s regulator found: %li\n",
+ ar0521_supply_names[cnt], PTR_ERR(supply));
+ return PTR_ERR(supply);
+ }
+ sensor->supplies[cnt] = supply;
+ }
+
+ mutex_init(&sensor->lock);
+
+ ret = ar0521_init_controls(sensor);
+ if (ret)
+ goto entity_cleanup;
+
+ ar0521_adj_fmt(&sensor->fmt);
+
+ ret = v4l2_async_register_subdev(&sensor->sd);
+ if (ret)
+ goto free_ctrls;
+
+ /* Turn on the device and enable runtime PM */
+ ret = ar0521_power_on(&client->dev);
+ if (ret)
+ goto disable;
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+ return 0;
+
+disable:
+ v4l2_async_unregister_subdev(&sensor->sd);
+ media_entity_cleanup(&sensor->sd.entity);
+free_ctrls:
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+entity_cleanup:
+ media_entity_cleanup(&sensor->sd.entity);
+ mutex_destroy(&sensor->lock);
+ return ret;
+}
+
+static int ar0521_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+
+ v4l2_async_unregister_subdev(&sensor->sd);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ar0521_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ mutex_destroy(&sensor->lock);
+ return 0;
+}
+
+static const struct dev_pm_ops ar0521_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ar0521_suspend, ar0521_resume)
+ SET_RUNTIME_PM_OPS(ar0521_power_off, ar0521_power_on, NULL)
+};
+static const struct of_device_id ar0521_dt_ids[] = {
+ {.compatible = "onnn,ar0521"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, ar0521_dt_ids);
+
+static struct i2c_driver ar0521_i2c_driver = {
+ .driver = {
+ .name = "ar0521",
+ .pm = &ar0521_pm_ops,
+ .of_match_table = ar0521_dt_ids,
+ },
+ .probe_new = ar0521_probe,
+ .remove = ar0521_remove,
+};
+
+module_i2c_driver(ar0521_i2c_driver);
+
+MODULE_DESCRIPTION("AR0521 MIPI Camera subdev driver");
+MODULE_AUTHOR("Krzysztof Hałasa <khalasa@piap.pl>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index cbce8b88dbcf..1fd4dc6e4726 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -623,12 +623,22 @@ static int mt9p031_get_selection(struct v4l2_subdev *subdev,
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
- if (sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = MT9P031_COLUMN_START_MIN;
+ sel->r.top = MT9P031_ROW_START_MIN;
+ sel->r.width = MT9P031_WINDOW_WIDTH_MAX;
+ sel->r.height = MT9P031_WINDOW_HEIGHT_MAX;
+ return 0;
- sel->r = *__mt9p031_get_pad_crop(mt9p031, sd_state, sel->pad,
- sel->which);
- return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *__mt9p031_get_pad_crop(mt9p031, sd_state,
+ sel->pad, sel->which);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
}
static int mt9p031_set_selection(struct v4l2_subdev *subdev,
@@ -682,6 +692,37 @@ static int mt9p031_set_selection(struct v4l2_subdev *subdev,
return 0;
}
+static int mt9p031_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ const int which = sd_state == NULL ? V4L2_SUBDEV_FORMAT_ACTIVE :
+ V4L2_SUBDEV_FORMAT_TRY;
+
+ crop = __mt9p031_get_pad_crop(mt9p031, sd_state, 0, which);
+ v4l2_subdev_get_try_crop(subdev, sd_state, 0);
+ crop->left = MT9P031_COLUMN_START_DEF;
+ crop->top = MT9P031_ROW_START_DEF;
+ crop->width = MT9P031_WINDOW_WIDTH_DEF;
+ crop->height = MT9P031_WINDOW_HEIGHT_DEF;
+
+ format = __mt9p031_get_pad_format(mt9p031, sd_state, 0, which);
+
+ if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
+ format->code = MEDIA_BUS_FMT_Y12_1X12;
+ else
+ format->code = MEDIA_BUS_FMT_SGRBG12_1X12;
+
+ format->width = MT9P031_WINDOW_WIDTH_DEF;
+ format->height = MT9P031_WINDOW_HEIGHT_DEF;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* V4L2 subdev control operations
*/
@@ -980,28 +1021,6 @@ static int mt9p031_registered(struct v4l2_subdev *subdev)
static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
{
- struct mt9p031 *mt9p031 = to_mt9p031(subdev);
- struct v4l2_mbus_framefmt *format;
- struct v4l2_rect *crop;
-
- crop = v4l2_subdev_get_try_crop(subdev, fh->state, 0);
- crop->left = MT9P031_COLUMN_START_DEF;
- crop->top = MT9P031_ROW_START_DEF;
- crop->width = MT9P031_WINDOW_WIDTH_DEF;
- crop->height = MT9P031_WINDOW_HEIGHT_DEF;
-
- format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
-
- if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
- format->code = MEDIA_BUS_FMT_Y12_1X12;
- else
- format->code = MEDIA_BUS_FMT_SGRBG12_1X12;
-
- format->width = MT9P031_WINDOW_WIDTH_DEF;
- format->height = MT9P031_WINDOW_HEIGHT_DEF;
- format->field = V4L2_FIELD_NONE;
- format->colorspace = V4L2_COLORSPACE_SRGB;
-
return mt9p031_set_power(subdev, 1);
}
@@ -1019,6 +1038,7 @@ static const struct v4l2_subdev_video_ops mt9p031_subdev_video_ops = {
};
static const struct v4l2_subdev_pad_ops mt9p031_subdev_pad_ops = {
+ .init_cfg = mt9p031_init_cfg,
.enum_mbus_code = mt9p031_enum_mbus_code,
.enum_frame_size = mt9p031_enum_frame_size,
.get_fmt = mt9p031_get_format,
@@ -1166,20 +1186,9 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- mt9p031->crop.width = MT9P031_WINDOW_WIDTH_DEF;
- mt9p031->crop.height = MT9P031_WINDOW_HEIGHT_DEF;
- mt9p031->crop.left = MT9P031_COLUMN_START_DEF;
- mt9p031->crop.top = MT9P031_ROW_START_DEF;
-
- if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
- mt9p031->format.code = MEDIA_BUS_FMT_Y12_1X12;
- else
- mt9p031->format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
-
- mt9p031->format.width = MT9P031_WINDOW_WIDTH_DEF;
- mt9p031->format.height = MT9P031_WINDOW_HEIGHT_DEF;
- mt9p031->format.field = V4L2_FIELD_NONE;
- mt9p031->format.colorspace = V4L2_COLORSPACE_SRGB;
+ ret = mt9p031_init_cfg(&mt9p031->subdev, NULL);
+ if (ret)
+ goto done;
mt9p031->reset = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_HIGH);
@@ -1214,6 +1223,7 @@ static int mt9p031_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt9p031_id[] = {
+ { "mt9p006", MT9P031_MODEL_COLOR },
{ "mt9p031", MT9P031_MODEL_COLOR },
{ "mt9p031m", MT9P031_MODEL_MONOCHROME },
{ }
@@ -1222,6 +1232,7 @@ MODULE_DEVICE_TABLE(i2c, mt9p031_id);
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id mt9p031_of_match[] = {
+ { .compatible = "aptina,mt9p006", },
{ .compatible = "aptina,mt9p031", },
{ .compatible = "aptina,mt9p031m", },
{ /* sentinel */ },
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index db5a19babe67..502f0b62e950 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -29,8 +29,21 @@
#define OV5640_XCLK_MIN 6000000
#define OV5640_XCLK_MAX 54000000
+#define OV5640_NATIVE_WIDTH 2624
+#define OV5640_NATIVE_HEIGHT 1964
+#define OV5640_PIXEL_ARRAY_TOP 14
+#define OV5640_PIXEL_ARRAY_LEFT 16
+#define OV5640_PIXEL_ARRAY_WIDTH 2592
+#define OV5640_PIXEL_ARRAY_HEIGHT 1944
+
+/* FIXME: not documented. */
+#define OV5640_MIN_VBLANK 24
+#define OV5640_MAX_VTS 3375
+
#define OV5640_DEFAULT_SLAVE_ID 0x3c
+#define OV5640_LINK_RATE_MAX 490000000U
+
#define OV5640_REG_SYS_RESET02 0x3002
#define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006
#define OV5640_REG_SYS_CTRL0 0x3008
@@ -59,10 +72,16 @@
#define OV5640_REG_AEC_PK_MANUAL 0x3503
#define OV5640_REG_AEC_PK_REAL_GAIN 0x350a
#define OV5640_REG_AEC_PK_VTS 0x350c
+#define OV5640_REG_TIMING_HS 0x3800
+#define OV5640_REG_TIMING_VS 0x3802
+#define OV5640_REG_TIMING_HW 0x3804
+#define OV5640_REG_TIMING_VH 0x3806
#define OV5640_REG_TIMING_DVPHO 0x3808
#define OV5640_REG_TIMING_DVPVO 0x380a
#define OV5640_REG_TIMING_HTS 0x380c
#define OV5640_REG_TIMING_VTS 0x380e
+#define OV5640_REG_TIMING_HOFFS 0x3810
+#define OV5640_REG_TIMING_VOFFS 0x3812
#define OV5640_REG_TIMING_TC_REG20 0x3820
#define OV5640_REG_TIMING_TC_REG21 0x3821
#define OV5640_REG_AEC_CTRL00 0x3a00
@@ -88,6 +107,7 @@
#define OV5640_REG_POLARITY_CTRL00 0x4740
#define OV5640_REG_MIPI_CTRL00 0x4800
#define OV5640_REG_DEBUG_MODE 0x4814
+#define OV5640_REG_PCLK_PERIOD 0x4837
#define OV5640_REG_ISP_FORMAT_MUX_CTRL 0x501f
#define OV5640_REG_PRE_ISP_TEST_SET1 0x503d
#define OV5640_REG_SDE_CTRL0 0x5580
@@ -118,6 +138,47 @@ enum ov5640_frame_rate {
OV5640_NUM_FRAMERATES,
};
+enum ov5640_pixel_rate_id {
+ OV5640_PIXEL_RATE_168M,
+ OV5640_PIXEL_RATE_148M,
+ OV5640_PIXEL_RATE_124M,
+ OV5640_PIXEL_RATE_96M,
+ OV5640_PIXEL_RATE_48M,
+ OV5640_NUM_PIXEL_RATES,
+};
+
+/*
+ * The chip manual suggests 24/48/96/192 MHz pixel clocks.
+ *
+ * 192MHz exceeds the sysclk limits; use 168MHz as maximum pixel rate for
+ * full resolution mode @15 FPS.
+ */
+static const u32 ov5640_pixel_rates[] = {
+ [OV5640_PIXEL_RATE_168M] = 168000000,
+ [OV5640_PIXEL_RATE_148M] = 148000000,
+ [OV5640_PIXEL_RATE_124M] = 124000000,
+ [OV5640_PIXEL_RATE_96M] = 96000000,
+ [OV5640_PIXEL_RATE_48M] = 48000000,
+};
+
+/*
+ * MIPI CSI-2 link frequencies.
+ *
+ * Derived from the above defined pixel rate for bpp = (8, 16, 24) and
+ * data_lanes = (1, 2)
+ *
+ * link_freq = (pixel_rate * bpp) / (2 * data_lanes)
+ */
+static const s64 ov5640_csi2_link_freqs[] = {
+ 992000000, 888000000, 768000000, 744000000, 672000000, 672000000,
+ 592000000, 592000000, 576000000, 576000000, 496000000, 496000000,
+ 384000000, 384000000, 384000000, 336000000, 296000000, 288000000,
+ 248000000, 192000000, 192000000, 192000000, 96000000,
+};
+
+/* Link freq for default mode: UYVY 16 bpp, 2 data lanes. */
+#define OV5640_DEFAULT_LINK_FREQ 13
+
enum ov5640_format_mux {
OV5640_FMT_MUX_YUV422 = 0,
OV5640_FMT_MUX_RGB,
@@ -130,20 +191,145 @@ enum ov5640_format_mux {
struct ov5640_pixfmt {
u32 code;
u32 colorspace;
+ u8 bpp;
+ u8 ctrl00;
+ enum ov5640_format_mux mux;
+};
+
+static const struct ov5640_pixfmt ov5640_dvp_formats[] = {
+ {
+ /* YUV422, YUYV */
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .bpp = 16,
+ .ctrl00 = 0x30,
+ .mux = OV5640_FMT_MUX_YUV422,
+ }, {
+ /* YUV422, UYVY */
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ .ctrl00 = 0x3f,
+ .mux = OV5640_FMT_MUX_YUV422,
+ }, {
+ /* YUV422, YUYV */
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ .ctrl00 = 0x30,
+ .mux = OV5640_FMT_MUX_YUV422,
+ }, {
+ /* RGB565 {g[2:0],b[4:0]},{r[4:0],g[5:3]} */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ .ctrl00 = 0x6f,
+ .mux = OV5640_FMT_MUX_RGB,
+ }, {
+ /* RGB565 {r[4:0],g[5:3]},{g[2:0],b[4:0]} */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ .ctrl00 = 0x61,
+ .mux = OV5640_FMT_MUX_RGB,
+ }, {
+ /* Raw, BGBG... / GRGR... */
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ .ctrl00 = 0x00,
+ .mux = OV5640_FMT_MUX_RAW_DPC,
+ }, {
+ /* Raw bayer, GBGB... / RGRG... */
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ .ctrl00 = 0x01,
+ .mux = OV5640_FMT_MUX_RAW_DPC,
+ }, {
+ /* Raw bayer, GRGR... / BGBG... */
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ .ctrl00 = 0x02,
+ .mux = OV5640_FMT_MUX_RAW_DPC,
+ }, {
+ /* Raw bayer, RGRG... / GBGB... */
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ .ctrl00 = 0x03,
+ .mux = OV5640_FMT_MUX_RAW_DPC,
+ },
+ { /* sentinel */ }
};
-static const struct ov5640_pixfmt ov5640_formats[] = {
- { MEDIA_BUS_FMT_JPEG_1X8, V4L2_COLORSPACE_JPEG, },
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_COLORSPACE_SRGB, },
+static const struct ov5640_pixfmt ov5640_csi2_formats[] = {
+ {
+ /* YUV422, YUYV */
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .bpp = 16,
+ .ctrl00 = 0x30,
+ .mux = OV5640_FMT_MUX_YUV422,
+ }, {
+ /* YUV422, UYVY */
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ .ctrl00 = 0x3f,
+ .mux = OV5640_FMT_MUX_YUV422,
+ }, {
+ /* YUV422, YUYV */
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ .ctrl00 = 0x30,
+ .mux = OV5640_FMT_MUX_YUV422,
+ }, {
+ /* RGB565 {g[2:0],b[4:0]},{r[4:0],g[5:3]} */
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ .ctrl00 = 0x6f,
+ .mux = OV5640_FMT_MUX_RGB,
+ }, {
+ /* BGR888: RGB */
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 24,
+ .ctrl00 = 0x23,
+ .mux = OV5640_FMT_MUX_RGB,
+ }, {
+ /* Raw, BGBG... / GRGR... */
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ .ctrl00 = 0x00,
+ .mux = OV5640_FMT_MUX_RAW_DPC,
+ }, {
+ /* Raw bayer, GBGB... / RGRG... */
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ .ctrl00 = 0x01,
+ .mux = OV5640_FMT_MUX_RAW_DPC,
+ }, {
+ /* Raw bayer, GRGR... / BGBG... */
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ .ctrl00 = 0x02,
+ .mux = OV5640_FMT_MUX_RAW_DPC,
+ }, {
+ /* Raw bayer, RGRG... / GBGB... */
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ .ctrl00 = 0x03,
+ .mux = OV5640_FMT_MUX_RAW_DPC,
+ },
+ { /* sentinel */ }
};
/*
@@ -186,21 +372,42 @@ struct reg_value {
u32 delay_ms;
};
+struct ov5640_timings {
+ /* Analog crop rectangle. */
+ struct v4l2_rect analog_crop;
+ /* Visibile crop: from analog crop top-left corner. */
+ struct v4l2_rect crop;
+ /* Total pixels per line: width + fixed hblank. */
+ u32 htot;
+ /* Default vertical blanking: frame height = height + vblank. */
+ u32 vblank_def;
+};
+
struct ov5640_mode_info {
enum ov5640_mode_id id;
enum ov5640_downsize_mode dn_mode;
- u32 hact;
- u32 htot;
- u32 vact;
- u32 vtot;
+ enum ov5640_pixel_rate_id pixel_rate;
+
+ unsigned int width;
+ unsigned int height;
+
+ struct ov5640_timings dvp_timings;
+ struct ov5640_timings csi2_timings;
+
const struct reg_value *reg_data;
u32 reg_data_size;
+
+ /* Used by s_frame_interval only. */
u32 max_fps;
+ u32 def_fps;
};
struct ov5640_ctrls {
struct v4l2_ctrl_handler handler;
struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
struct {
struct v4l2_ctrl *auto_exp;
struct v4l2_ctrl *exposure;
@@ -249,6 +456,7 @@ struct ov5640_dev {
const struct ov5640_mode_info *last_mode;
enum ov5640_frame_rate current_fr;
struct v4l2_fract frame_interval;
+ s64 current_link_freq;
struct ov5640_ctrls ctrls;
@@ -270,6 +478,40 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
ctrls.handler)->sd;
}
+static inline bool ov5640_is_csi2(const struct ov5640_dev *sensor)
+{
+ return sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY;
+}
+
+static inline const struct ov5640_pixfmt *
+ov5640_formats(struct ov5640_dev *sensor)
+{
+ return ov5640_is_csi2(sensor) ? ov5640_csi2_formats
+ : ov5640_dvp_formats;
+}
+
+static const struct ov5640_pixfmt *
+ov5640_code_to_pixfmt(struct ov5640_dev *sensor, u32 code)
+{
+ const struct ov5640_pixfmt *formats = ov5640_formats(sensor);
+ unsigned int i;
+
+ for (i = 0; formats[i].code; ++i) {
+ if (formats[i].code == code)
+ return &formats[i];
+ }
+
+ return &formats[0];
+}
+
+static u32 ov5640_code_to_bpp(struct ov5640_dev *sensor, u32 code)
+{
+ const struct ov5640_pixfmt *format = ov5640_code_to_pixfmt(sensor,
+ code);
+
+ return format->bpp;
+}
+
/*
* FIXME: all of these register tables are likely filled with
* entries that set the register to their power-on default values,
@@ -278,7 +520,19 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
* over i2c.
*/
/* YUV422 UYVY VGA@30fps */
-static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
+
+static const struct v4l2_mbus_framefmt ov5640_default_fmt = {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .width = 640,
+ .height = 480,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(V4L2_COLORSPACE_SRGB),
+ .quantization = V4L2_QUANTIZATION_FULL_RANGE,
+ .xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(V4L2_COLORSPACE_SRGB),
+ .field = V4L2_FIELD_NONE,
+};
+
+static const struct reg_value ov5640_init_setting[] = {
{0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
{0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
{0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
@@ -294,11 +548,7 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x3c06, 0x00, 0, 0}, {0x3c07, 0x08, 0, 0}, {0x3c08, 0x00, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3815, 0x31, 0, 0},
{0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
{0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
@@ -362,72 +612,11 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x3a1f, 0x14, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3c00, 0x04, 0, 300},
};
-static const struct reg_value ov5640_setting_VGA_640_480[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_XGA_1024_768[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_QVGA_320_240[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_QQVGA_160_120[] = {
+static const struct reg_value ov5640_setting_low_res[] = {
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3815, 0x31, 0, 0},
{0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
{0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
@@ -437,72 +626,11 @@ static const struct reg_value ov5640_setting_QQVGA_160_120[] = {
{0x4407, 0x04, 0, 0}, {0x5001, 0xa3, 0, 0},
};
-static const struct reg_value ov5640_setting_QCIF_176_144[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_NTSC_720_480[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_PAL_720_576[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
static const struct reg_value ov5640_setting_720P_1280_720[] = {
{0x3c07, 0x07, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3815, 0x31, 0, 0},
{0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
{0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
{0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
@@ -517,11 +645,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
- {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3815, 0x11, 0, 0},
{0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
{0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
@@ -532,9 +656,6 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0},
{0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
- {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
- {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0},
{0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
{0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
{0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
@@ -548,11 +669,7 @@ static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
- {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3815, 0x11, 0, 0},
{0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
{0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
@@ -563,67 +680,462 @@ static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 70},
};
-/* power-on sensor init reg table */
-static const struct ov5640_mode_info ov5640_mode_init_data = {
- 0, SUBSAMPLING, 640, 1896, 480, 984,
- ov5640_init_setting_30fps_VGA,
- ARRAY_SIZE(ov5640_init_setting_30fps_VGA),
- OV5640_30_FPS,
+static const struct ov5640_mode_info ov5640_mode_data[OV5640_NUM_MODES] = {
+ {
+ /* 160x120 */
+ .id = OV5640_MODE_QQVGA_160_120,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_48M,
+ .width = 160,
+ .height = 120,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 160,
+ .height = 120,
+ },
+ .htot = 1896,
+ .vblank_def = 864,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ /* Maintain a minimum processing margin. */
+ .crop = {
+ .left = 2,
+ .top = 4,
+ .width = 160,
+ .height = 120,
+ },
+ .htot = 1600,
+ .vblank_def = 878,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 176x144 */
+ .id = OV5640_MODE_QCIF_176_144,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_48M,
+ .width = 176,
+ .height = 144,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 176,
+ .height = 144,
+ },
+ .htot = 1896,
+ .vblank_def = 840,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ /* Maintain a minimum processing margin. */
+ .crop = {
+ .left = 2,
+ .top = 4,
+ .width = 176,
+ .height = 144,
+ },
+ .htot = 1600,
+ .vblank_def = 854,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 320x240 */
+ .id = OV5640_MODE_QVGA_320_240,
+ .dn_mode = SUBSAMPLING,
+ .width = 320,
+ .height = 240,
+ .pixel_rate = OV5640_PIXEL_RATE_48M,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 320,
+ .height = 240,
+ },
+ .htot = 1896,
+ .vblank_def = 744,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ /* Maintain a minimum processing margin. */
+ .crop = {
+ .left = 2,
+ .top = 4,
+ .width = 320,
+ .height = 240,
+ },
+ .htot = 1600,
+ .vblank_def = 760,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 640x480 */
+ .id = OV5640_MODE_VGA_640_480,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_48M,
+ .width = 640,
+ .height = 480,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 640,
+ .height = 480,
+ },
+ .htot = 1896,
+ .vblank_def = 600,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ /* Maintain a minimum processing margin. */
+ .crop = {
+ .left = 2,
+ .top = 4,
+ .width = 640,
+ .height = 480,
+ },
+ .htot = 1600,
+ .vblank_def = 520,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_60_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 720x480 */
+ .id = OV5640_MODE_NTSC_720_480,
+ .dn_mode = SUBSAMPLING,
+ .width = 720,
+ .height = 480,
+ .pixel_rate = OV5640_PIXEL_RATE_96M,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 56,
+ .top = 60,
+ .width = 720,
+ .height = 480,
+ },
+ .htot = 1896,
+ .vblank_def = 504,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ .crop = {
+ .left = 56,
+ .top = 60,
+ .width = 720,
+ .height = 480,
+ },
+ .htot = 1896,
+ .vblank_def = 1206,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 720x576 */
+ .id = OV5640_MODE_PAL_720_576,
+ .dn_mode = SUBSAMPLING,
+ .width = 720,
+ .height = 576,
+ .pixel_rate = OV5640_PIXEL_RATE_96M,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 56,
+ .top = 6,
+ .width = 720,
+ .height = 576,
+ },
+ .htot = 1896,
+ .vblank_def = 408,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ .crop = {
+ .left = 56,
+ .top = 6,
+ .width = 720,
+ .height = 576,
+ },
+ .htot = 1896,
+ .vblank_def = 1110,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 1024x768 */
+ .id = OV5640_MODE_XGA_1024_768,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_96M,
+ .width = 1024,
+ .height = 768,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 1024,
+ .height = 768,
+ },
+ .htot = 1896,
+ .vblank_def = 312,
+ },
+ .csi2_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = OV5640_NATIVE_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 1024,
+ .height = 768,
+ },
+ .htot = 1896,
+ .vblank_def = 918,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 1280x720 */
+ .id = OV5640_MODE_720P_1280_720,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_124M,
+ .width = 1280,
+ .height = 720,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 250,
+ .width = 2624,
+ .height = 1456,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 1280,
+ .height = 720,
+ },
+ .htot = 1892,
+ .vblank_def = 20,
+ },
+ .csi2_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 250,
+ .width = 2624,
+ .height = 1456,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 1280,
+ .height = 720,
+ },
+ .htot = 1600,
+ .vblank_def = 560,
+ },
+ .reg_data = ov5640_setting_720P_1280_720,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_720P_1280_720),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 1920x1080 */
+ .id = OV5640_MODE_1080P_1920_1080,
+ .dn_mode = SCALING,
+ .pixel_rate = OV5640_PIXEL_RATE_148M,
+ .width = 1920,
+ .height = 1080,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 336,
+ .top = 434,
+ .width = 1952,
+ .height = 1088,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 1920,
+ .height = 1080,
+ },
+ .htot = 2500,
+ .vblank_def = 40,
+ },
+ .csi2_timings = {
+ /* Crop the full valid pixel array in the center. */
+ .analog_crop = {
+ .left = 336,
+ .top = 434,
+ .width = 1952,
+ .height = 1088,
+ },
+ /* Maintain a larger processing margins. */
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 1920,
+ .height = 1080,
+ },
+ .htot = 2234,
+ .vblank_def = 24,
+ },
+ .reg_data = ov5640_setting_1080P_1920_1080,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_1080P_1920_1080),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 2592x1944 */
+ .id = OV5640_MODE_QSXGA_2592_1944,
+ .dn_mode = SCALING,
+ .pixel_rate = OV5640_PIXEL_RATE_168M,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 0,
+ .width = 2624,
+ .height = 1952,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 2592,
+ .height = 1944,
+ },
+ .htot = 2844,
+ .vblank_def = 24,
+ },
+ .csi2_timings = {
+ /* Give more processing margin to full resolution. */
+ .analog_crop = {
+ .left = 0,
+ .top = 0,
+ .width = OV5640_NATIVE_WIDTH,
+ .height = 1952,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 2592,
+ .height = 1944,
+ },
+ .htot = 2844,
+ .vblank_def = 24,
+ },
+ .reg_data = ov5640_setting_QSXGA_2592_1944,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944),
+ .max_fps = OV5640_15_FPS,
+ .def_fps = OV5640_15_FPS
+ },
};
-static const struct ov5640_mode_info
-ov5640_mode_data[OV5640_NUM_MODES] = {
- {OV5640_MODE_QQVGA_160_120, SUBSAMPLING,
- 160, 1896, 120, 984,
- ov5640_setting_QQVGA_160_120,
- ARRAY_SIZE(ov5640_setting_QQVGA_160_120),
- OV5640_30_FPS},
- {OV5640_MODE_QCIF_176_144, SUBSAMPLING,
- 176, 1896, 144, 984,
- ov5640_setting_QCIF_176_144,
- ARRAY_SIZE(ov5640_setting_QCIF_176_144),
- OV5640_30_FPS},
- {OV5640_MODE_QVGA_320_240, SUBSAMPLING,
- 320, 1896, 240, 984,
- ov5640_setting_QVGA_320_240,
- ARRAY_SIZE(ov5640_setting_QVGA_320_240),
- OV5640_30_FPS},
- {OV5640_MODE_VGA_640_480, SUBSAMPLING,
- 640, 1896, 480, 1080,
- ov5640_setting_VGA_640_480,
- ARRAY_SIZE(ov5640_setting_VGA_640_480),
- OV5640_60_FPS},
- {OV5640_MODE_NTSC_720_480, SUBSAMPLING,
- 720, 1896, 480, 984,
- ov5640_setting_NTSC_720_480,
- ARRAY_SIZE(ov5640_setting_NTSC_720_480),
- OV5640_30_FPS},
- {OV5640_MODE_PAL_720_576, SUBSAMPLING,
- 720, 1896, 576, 984,
- ov5640_setting_PAL_720_576,
- ARRAY_SIZE(ov5640_setting_PAL_720_576),
- OV5640_30_FPS},
- {OV5640_MODE_XGA_1024_768, SUBSAMPLING,
- 1024, 1896, 768, 1080,
- ov5640_setting_XGA_1024_768,
- ARRAY_SIZE(ov5640_setting_XGA_1024_768),
- OV5640_30_FPS},
- {OV5640_MODE_720P_1280_720, SUBSAMPLING,
- 1280, 1892, 720, 740,
- ov5640_setting_720P_1280_720,
- ARRAY_SIZE(ov5640_setting_720P_1280_720),
- OV5640_30_FPS},
- {OV5640_MODE_1080P_1920_1080, SCALING,
- 1920, 2500, 1080, 1120,
- ov5640_setting_1080P_1920_1080,
- ARRAY_SIZE(ov5640_setting_1080P_1920_1080),
- OV5640_30_FPS},
- {OV5640_MODE_QSXGA_2592_1944, SCALING,
- 2592, 2844, 1944, 1968,
- ov5640_setting_QSXGA_2592_1944,
- ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944),
- OV5640_15_FPS},
-};
+static const struct ov5640_timings *
+ov5640_timings(const struct ov5640_dev *sensor,
+ const struct ov5640_mode_info *mode)
+{
+ if (ov5640_is_csi2(sensor))
+ return &mode->csi2_timings;
+
+ return &mode->dvp_timings;
+}
static int ov5640_init_slave_id(struct ov5640_dev *sensor)
{
@@ -797,20 +1309,10 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
* +-----+-----+
* +------------> PCLK
*
- * This is deviating from the datasheet at least for the register
- * 0x3108, since it's said here that the PCLK would be clocked from
- * the PLL.
- *
- * There seems to be also (unverified) constraints:
+ * There seems to be also constraints:
* - the PLL pre-divider output rate should be in the 4-27MHz range
* - the PLL multiplier output rate should be in the 500-1000MHz range
* - PCLK >= SCLK * 2 in YUV, >= SCLK in Raw or JPEG
- *
- * In the two latter cases, these constraints are met since our
- * factors are hardcoded. If we were to change that, we would need to
- * take this into account. The only varying parts are the PLL
- * multiplier and the system clock divider, which are shared between
- * all these clocks so won't cause any issue.
*/
/*
@@ -830,13 +1332,6 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
#define OV5640_SYSDIV_MAX 16
/*
- * Hardcode these values for scaler and non-scaler modes.
- * FIXME: to be re-calcualted for 1 data lanes setups
- */
-#define OV5640_MIPI_DIV_PCLK 2
-#define OV5640_MIPI_DIV_SCLK 1
-
-/*
* This is supposed to be ranging from 1 to 2, but the value is always
* set to 2 in the vendor kernels.
*/
@@ -945,70 +1440,83 @@ out:
/*
* ov5640_set_mipi_pclk() - Calculate the clock tree configuration values
* for the MIPI CSI-2 output.
- *
- * @rate: The requested bandwidth per lane in bytes per second.
- * 'Bandwidth Per Lane' is calculated as:
- * bpl = HTOT * VTOT * FPS * bpp / num_lanes;
- *
- * This function use the requested bandwidth to calculate:
- * - sample_rate = bpl / (bpp / num_lanes);
- * = bpl / (PLL_RDIV * BIT_DIV * PCLK_DIV * MIPI_DIV / num_lanes);
- *
- * - mipi_sclk = bpl / MIPI_DIV / 2; ( / 2 is for CSI-2 DDR)
- *
- * with these fixed parameters:
- * PLL_RDIV = 2;
- * BIT_DIVIDER = 2; (MIPI_BIT_MODE == 8 ? 2 : 2,5);
- * PCLK_DIV = 1;
- *
- * The MIPI clock generation differs for modes that use the scaler and modes
- * that do not. In case the scaler is in use, the MIPI_SCLK generates the MIPI
- * BIT CLk, and thus:
- *
- * - mipi_sclk = bpl / MIPI_DIV / 2;
- * MIPI_DIV = 1;
- *
- * For modes that do not go through the scaler, the MIPI BIT CLOCK is generated
- * from the pixel clock, and thus:
- *
- * - sample_rate = bpl / (bpp / num_lanes);
- * = bpl / (2 * 2 * 1 * MIPI_DIV / num_lanes);
- * = bpl / (4 * MIPI_DIV / num_lanes);
- * - MIPI_DIV = bpp / (4 * num_lanes);
- *
- * FIXME: this have been tested with 16bpp and 2 lanes setup only.
- * MIPI_DIV is fixed to value 2, but it -might- be changed according to the
- * above formula for setups with 1 lane or image formats with different bpp.
- *
- * FIXME: this deviates from the sensor manual documentation which is quite
- * thin on the MIPI clock tree generation part.
*/
-static int ov5640_set_mipi_pclk(struct ov5640_dev *sensor,
- unsigned long rate)
+static int ov5640_set_mipi_pclk(struct ov5640_dev *sensor)
{
- const struct ov5640_mode_info *mode = sensor->current_mode;
+ u8 bit_div, mipi_div, pclk_div, sclk_div, sclk2x_div, root_div;
u8 prediv, mult, sysdiv;
- u8 mipi_div;
+ unsigned long link_freq;
+ unsigned long sysclk;
+ u8 pclk_period;
+ u32 sample_rate;
+ u32 num_lanes;
int ret;
+ /* Use the link freq computed at ov5640_update_pixel_rate() time. */
+ link_freq = sensor->current_link_freq;
+
/*
- * 1280x720 is reported to use 'SUBSAMPLING' only,
- * but according to the sensor manual it goes through the
- * scaler before subsampling.
+ * - mipi_div - Additional divider for the MIPI lane clock.
+ *
+ * Higher link frequencies would make sysclk > 1GHz.
+ * Keep the sysclk low and do not divide in the MIPI domain.
*/
- if (mode->dn_mode == SCALING ||
- (mode->id == OV5640_MODE_720P_1280_720))
- mipi_div = OV5640_MIPI_DIV_SCLK;
+ if (link_freq > OV5640_LINK_RATE_MAX)
+ mipi_div = 1;
else
- mipi_div = OV5640_MIPI_DIV_PCLK;
+ mipi_div = 2;
- ov5640_calc_sys_clk(sensor, rate, &prediv, &mult, &sysdiv);
+ sysclk = link_freq * mipi_div;
+ ov5640_calc_sys_clk(sensor, sysclk, &prediv, &mult, &sysdiv);
- ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL0,
- 0x0f, OV5640_PLL_CTRL0_MIPI_MODE_8BIT);
+ /*
+ * Adjust PLL parameters to maintain the MIPI_SCLK-to-PCLK ratio.
+ *
+ * - root_div = 2 (fixed)
+ * - bit_div : MIPI 8-bit = 2; MIPI 10-bit = 2.5
+ * - pclk_div = 1 (fixed)
+ * - p_div = (2 lanes ? mipi_div : 2 * mipi_div)
+ *
+ * This results in the following MIPI_SCLK depending on the number
+ * of lanes:
+ *
+ * - 2 lanes: MIPI_SCLK = (4 or 5) * PCLK
+ * - 1 lanes: MIPI_SCLK = (8 or 10) * PCLK
+ */
+ root_div = OV5640_PLL_CTRL3_PLL_ROOT_DIV_2;
+ bit_div = OV5640_PLL_CTRL0_MIPI_MODE_8BIT;
+ pclk_div = ilog2(OV5640_PCLK_ROOT_DIV);
- ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL1,
- 0xff, sysdiv << 4 | mipi_div);
+ /*
+ * Scaler clock:
+ * - YUV: PCLK >= 2 * SCLK
+ * - RAW or JPEG: PCLK >= SCLK
+ * - sclk2x_div = sclk_div / 2
+ */
+ sclk_div = ilog2(OV5640_SCLK_ROOT_DIV);
+ sclk2x_div = ilog2(OV5640_SCLK2X_ROOT_DIV);
+
+ /*
+ * Set the pixel clock period expressed in ns with 1-bit decimal
+ * (0x01=0.5ns).
+ *
+ * The register is very briefly documented. In the OV5645 datasheet it
+ * is described as (2 * pclk period), and from testing it seems the
+ * actual definition is 2 * 8-bit sample period.
+ *
+ * 2 * sample_period = (mipi_clk * 2 * num_lanes / bpp) * (bpp / 8) / 2
+ */
+ num_lanes = sensor->ep.bus.mipi_csi2.num_data_lanes;
+ sample_rate = (link_freq * mipi_div * num_lanes * 2) / 16;
+ pclk_period = 2000000000UL / sample_rate;
+
+ /* Program the clock tree registers. */
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL0, 0x0f, bit_div);
+ if (ret)
+ return ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL1, 0xff,
+ (sysdiv << 4) | mipi_div);
if (ret)
return ret;
@@ -1016,13 +1524,29 @@ static int ov5640_set_mipi_pclk(struct ov5640_dev *sensor,
if (ret)
return ret;
- ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL3,
- 0x1f, OV5640_PLL_CTRL3_PLL_ROOT_DIV_2 | prediv);
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL3, 0x1f,
+ root_div | prediv);
if (ret)
return ret;
- return ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER,
- 0x30, OV5640_PLL_SYS_ROOT_DIVIDER_BYPASS);
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3f,
+ (pclk_div << 4) | (sclk2x_div << 2) | sclk_div);
+ if (ret)
+ return ret;
+
+ return ov5640_write_reg(sensor, OV5640_REG_PCLK_PERIOD, pclk_period);
+}
+
+static u32 ov5640_calc_pixel_rate(struct ov5640_dev *sensor)
+{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ const struct ov5640_timings *timings = &mode->dvp_timings;
+ u32 rate;
+
+ rate = timings->htot * (timings->crop.height + timings->vblank_def);
+ rate *= ov5640_framerates[sensor->current_fr];
+
+ return rate;
}
static unsigned long ov5640_calc_pclk(struct ov5640_dev *sensor,
@@ -1042,11 +1566,16 @@ static unsigned long ov5640_calc_pclk(struct ov5640_dev *sensor,
return _rate / *pll_rdiv / *bit_div / *pclk_div;
}
-static int ov5640_set_dvp_pclk(struct ov5640_dev *sensor, unsigned long rate)
+static int ov5640_set_dvp_pclk(struct ov5640_dev *sensor)
{
u8 prediv, mult, sysdiv, pll_rdiv, bit_div, pclk_div;
+ u32 rate;
int ret;
+ rate = ov5640_calc_pixel_rate(sensor);
+ rate *= ov5640_code_to_bpp(sensor, sensor->fmt.code);
+ rate /= sensor->ep.bus.parallel.bus_width;
+
ov5640_calc_pclk(sensor, rate, &prediv, &mult, &sysdiv, &pll_rdiv,
&bit_div, &pclk_div);
@@ -1098,17 +1627,20 @@ static int ov5640_set_jpeg_timings(struct ov5640_dev *sensor,
if (ret < 0)
return ret;
- ret = ov5640_write_reg16(sensor, OV5640_REG_VFIFO_HSIZE, mode->hact);
+ ret = ov5640_write_reg16(sensor, OV5640_REG_VFIFO_HSIZE, mode->width);
if (ret < 0)
return ret;
- return ov5640_write_reg16(sensor, OV5640_REG_VFIFO_VSIZE, mode->vact);
+ return ov5640_write_reg16(sensor, OV5640_REG_VFIFO_VSIZE, mode->height);
}
/* download ov5640 settings to sensor through i2c */
static int ov5640_set_timings(struct ov5640_dev *sensor,
const struct ov5640_mode_info *mode)
{
+ const struct ov5640_timings *timings;
+ const struct v4l2_rect *analog_crop;
+ const struct v4l2_rect *crop;
int ret;
if (sensor->fmt.code == MEDIA_BUS_FMT_JPEG_1X8) {
@@ -1117,32 +1649,68 @@ static int ov5640_set_timings(struct ov5640_dev *sensor,
return ret;
}
- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact);
+ timings = ov5640_timings(sensor, mode);
+ analog_crop = &timings->analog_crop;
+ crop = &timings->crop;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HS,
+ analog_crop->left);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VS,
+ analog_crop->top);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HW,
+ analog_crop->left + analog_crop->width - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VH,
+ analog_crop->top + analog_crop->height - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HOFFS, crop->left);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VOFFS, crop->top);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->width);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->height);
if (ret < 0)
return ret;
- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->vact);
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, timings->htot);
if (ret < 0)
return ret;
- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, mode->htot);
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS,
+ mode->height + timings->vblank_def);
if (ret < 0)
return ret;
- return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, mode->vtot);
+ return 0;
}
-static int ov5640_load_regs(struct ov5640_dev *sensor,
- const struct ov5640_mode_info *mode)
+static void ov5640_load_regs(struct ov5640_dev *sensor,
+ const struct reg_value *regs, unsigned int regnum)
{
- const struct reg_value *regs = mode->reg_data;
unsigned int i;
u32 delay_ms;
u16 reg_addr;
u8 mask, val;
int ret = 0;
- for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
+ for (i = 0; i < regnum; ++i, ++regs) {
delay_ms = regs->delay_ms;
reg_addr = regs->reg_addr;
val = regs->val;
@@ -1151,7 +1719,7 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
/* remain in power down mode for DVP */
if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
- sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
+ !ov5640_is_csi2(sensor))
continue;
if (mask)
@@ -1164,8 +1732,6 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
if (delay_ms)
usleep_range(1000 * delay_ms, 1000 * delay_ms + 100);
}
-
- return ov5640_set_timings(sensor, mode);
}
static int ov5640_set_autoexposure(struct ov5640_dev *sensor, bool on)
@@ -1550,37 +2116,22 @@ static int ov5640_set_virtual_channel(struct ov5640_dev *sensor)
}
static const struct ov5640_mode_info *
-ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
- int width, int height, bool nearest)
+ov5640_find_mode(struct ov5640_dev *sensor, int width, int height, bool nearest)
{
const struct ov5640_mode_info *mode;
mode = v4l2_find_nearest_size(ov5640_mode_data,
ARRAY_SIZE(ov5640_mode_data),
- hact, vact,
- width, height);
+ width, height, width, height);
if (!mode ||
- (!nearest && (mode->hact != width || mode->vact != height)))
- return NULL;
-
- /* Check to see if the current mode exceeds the max frame rate */
- if (ov5640_framerates[fr] > ov5640_framerates[mode->max_fps])
+ (!nearest &&
+ (mode->width != width || mode->height != height)))
return NULL;
return mode;
}
-static u64 ov5640_calc_pixel_rate(struct ov5640_dev *sensor)
-{
- u64 rate;
-
- rate = sensor->current_mode->vtot * sensor->current_mode->htot;
- rate *= ov5640_framerates[sensor->current_fr];
-
- return rate;
-}
-
/*
* sensor changes between scaling and subsampling, go through
* exposure calculation
@@ -1628,7 +2179,8 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor,
return ret;
/* Write capture setting */
- ret = ov5640_load_regs(sensor, mode);
+ ov5640_load_regs(sensor, mode->reg_data, mode->reg_data_size);
+ ret = ov5640_set_timings(sensor, mode);
if (ret < 0)
return ret;
@@ -1752,7 +2304,8 @@ static int ov5640_set_mode_direct(struct ov5640_dev *sensor,
return -EINVAL;
/* Write capture setting */
- return ov5640_load_regs(sensor, mode);
+ ov5640_load_regs(sensor, mode->reg_data, mode->reg_data_size);
+ return ov5640_set_timings(sensor, mode);
}
static int ov5640_set_mode(struct ov5640_dev *sensor)
@@ -1762,7 +2315,6 @@ static int ov5640_set_mode(struct ov5640_dev *sensor)
enum ov5640_downsize_mode dn_mode, orig_dn_mode;
bool auto_gain = sensor->ctrls.auto_gain->val == 1;
bool auto_exp = sensor->ctrls.auto_exp->val == V4L2_EXPOSURE_AUTO;
- unsigned long rate;
int ret;
dn_mode = mode->dn_mode;
@@ -1781,19 +2333,10 @@ static int ov5640_set_mode(struct ov5640_dev *sensor)
goto restore_auto_gain;
}
- /*
- * All the formats we support have 16 bits per pixel, seems to require
- * the same rate than YUV, so we can just use 16 bpp all the time.
- */
- rate = ov5640_calc_pixel_rate(sensor) * 16;
- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
- rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
- ret = ov5640_set_mipi_pclk(sensor, rate);
- } else {
- rate = rate / sensor->ep.bus.parallel.bus_width;
- ret = ov5640_set_dvp_pclk(sensor, rate);
- }
-
+ if (ov5640_is_csi2(sensor))
+ ret = ov5640_set_mipi_pclk(sensor);
+ else
+ ret = ov5640_set_dvp_pclk(sensor);
if (ret < 0)
return 0;
@@ -1860,10 +2403,8 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor)
int ret;
/* first load the initial register values */
- ret = ov5640_load_regs(sensor, &ov5640_mode_init_data);
- if (ret < 0)
- return ret;
- sensor->last_mode = &ov5640_mode_init_data;
+ ov5640_load_regs(sensor, ov5640_init_setting,
+ ARRAY_SIZE(ov5640_init_setting));
ret = ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3f,
(ilog2(OV5640_SCLK2X_ROOT_DIV) << 2) |
@@ -2224,7 +2765,7 @@ static int ov5640_try_frame_interval(struct ov5640_dev *sensor,
fi->denominator = best_fps;
find_mode:
- mode = ov5640_find_mode(sensor, rate, width, height, false);
+ mode = ov5640_find_mode(sensor, width, height, false);
return mode ? rate : -EINVAL;
}
@@ -2260,25 +2801,34 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
const struct ov5640_mode_info *mode;
- int i;
+ const struct ov5640_pixfmt *pixfmt;
+ unsigned int bpp;
- mode = ov5640_find_mode(sensor, fr, fmt->width, fmt->height, true);
+ mode = ov5640_find_mode(sensor, fmt->width, fmt->height, true);
if (!mode)
return -EINVAL;
- fmt->width = mode->hact;
- fmt->height = mode->vact;
+
+ pixfmt = ov5640_code_to_pixfmt(sensor, fmt->code);
+ bpp = pixfmt->bpp;
+
+ /*
+ * Adjust mode according to bpp:
+ * - 8bpp modes work for resolution >= 1280x720
+ * - 24bpp modes work resolution < 1280x720
+ */
+ if (bpp == 8 && mode->width < 1280)
+ mode = &ov5640_mode_data[OV5640_MODE_720P_1280_720];
+ else if (bpp == 24 && mode->width > 1024)
+ mode = &ov5640_mode_data[OV5640_MODE_XGA_1024_768];
+
+ fmt->width = mode->width;
+ fmt->height = mode->height;
if (new_mode)
*new_mode = mode;
- for (i = 0; i < ARRAY_SIZE(ov5640_formats); i++)
- if (ov5640_formats[i].code == fmt->code)
- break;
- if (i >= ARRAY_SIZE(ov5640_formats))
- i = 0;
-
- fmt->code = ov5640_formats[i].code;
- fmt->colorspace = ov5640_formats[i].colorspace;
+ fmt->code = pixfmt->code;
+ fmt->colorspace = pixfmt->colorspace;
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
@@ -2286,6 +2836,107 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
return 0;
}
+static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
+ struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
+ const struct ov5640_timings *timings;
+ s32 exposure_val, exposure_max;
+ unsigned int hblank;
+ unsigned int i = 0;
+ u32 pixel_rate;
+ s64 link_freq;
+ u32 num_lanes;
+ u32 vblank;
+ u32 bpp;
+
+ /*
+ * Update the pixel rate control value.
+ *
+ * For DVP mode, maintain the pixel rate calculation using fixed FPS.
+ */
+ if (!ov5640_is_csi2(sensor)) {
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
+
+ return 0;
+ }
+
+ /*
+ * The MIPI CSI-2 link frequency should comply with the CSI-2
+ * specification and be lower than 1GHz.
+ *
+ * Start from the suggested pixel_rate for the current mode and
+ * progressively slow it down if it exceeds 1GHz.
+ */
+ num_lanes = sensor->ep.bus.mipi_csi2.num_data_lanes;
+ bpp = ov5640_code_to_bpp(sensor, fmt->code);
+ do {
+ pixel_rate = ov5640_pixel_rates[pixel_rate_id];
+ link_freq = pixel_rate * bpp / (2 * num_lanes);
+ } while (link_freq >= 1000000000U &&
+ ++pixel_rate_id < OV5640_NUM_PIXEL_RATES);
+
+ sensor->current_link_freq = link_freq;
+
+ /*
+ * Higher link rates require the clock tree to be programmed with
+ * 'mipi_div' = 1; this has the effect of halving the actual output
+ * pixel rate in the MIPI domain.
+ *
+ * Adjust the pixel rate and link frequency control value to report it
+ * correctly to userspace.
+ */
+ if (link_freq > OV5640_LINK_RATE_MAX) {
+ pixel_rate /= 2;
+ link_freq /= 2;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ov5640_csi2_link_freqs); ++i) {
+ if (ov5640_csi2_link_freqs[i] == link_freq)
+ break;
+ }
+ WARN_ON(i == ARRAY_SIZE(ov5640_csi2_link_freqs));
+
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
+ __v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
+
+ timings = ov5640_timings(sensor, mode);
+ hblank = timings->htot - mode->width;
+ __v4l2_ctrl_modify_range(sensor->ctrls.hblank,
+ hblank, hblank, 1, hblank);
+
+ vblank = timings->vblank_def;
+
+ if (sensor->current_fr != mode->def_fps) {
+ /*
+ * Compute the vertical blanking according to the framerate
+ * configured with s_frame_interval.
+ */
+ int fie_num = sensor->frame_interval.numerator;
+ int fie_denom = sensor->frame_interval.denominator;
+
+ vblank = ((fie_num * pixel_rate / fie_denom) / timings->htot) -
+ mode->height;
+ }
+
+ __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
+ OV5640_MAX_VTS - mode->height, 1, vblank);
+ __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
+
+ exposure_max = timings->crop.height + vblank - 4;
+ exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
+ sensor->ctrls.exposure->minimum,
+ exposure_max);
+
+ __v4l2_ctrl_modify_range(sensor->ctrls.exposure,
+ sensor->ctrls.exposure->minimum,
+ exposure_max, 1, exposure_val);
+
+ return 0;
+}
+
static int ov5640_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
@@ -2316,6 +2967,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
}
if (new_mode != sensor->current_mode) {
+ sensor->current_fr = new_mode->def_fps;
sensor->current_mode = new_mode;
sensor->pending_mode_change = true;
}
@@ -2325,80 +2977,70 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
/* update format even if code is unchanged, resolution might change */
sensor->fmt = *mbus_fmt;
- __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
- ov5640_calc_pixel_rate(sensor));
+ ov5640_update_pixel_rate(sensor);
+
out:
mutex_unlock(&sensor->lock);
return ret;
}
+static int ov5640_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ const struct ov5640_timings *timings;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP: {
+ mutex_lock(&sensor->lock);
+ timings = ov5640_timings(sensor, mode);
+ sel->r = timings->analog_crop;
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+ }
+
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV5640_NATIVE_WIDTH;
+ sel->r.height = OV5640_NATIVE_HEIGHT;
+
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = OV5640_PIXEL_ARRAY_TOP;
+ sel->r.left = OV5640_PIXEL_ARRAY_LEFT;
+ sel->r.width = OV5640_PIXEL_ARRAY_WIDTH;
+ sel->r.height = OV5640_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int ov5640_set_framefmt(struct ov5640_dev *sensor,
struct v4l2_mbus_framefmt *format)
{
+ bool is_jpeg = format->code == MEDIA_BUS_FMT_JPEG_1X8;
+ const struct ov5640_pixfmt *pixfmt;
int ret = 0;
- bool is_jpeg = false;
- u8 fmt, mux;
- switch (format->code) {
- case MEDIA_BUS_FMT_UYVY8_1X16:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- /* YUV422, UYVY */
- fmt = 0x3f;
- mux = OV5640_FMT_MUX_YUV422;
- break;
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- /* YUV422, YUYV */
- fmt = 0x30;
- mux = OV5640_FMT_MUX_YUV422;
- break;
- case MEDIA_BUS_FMT_RGB565_2X8_LE:
- /* RGB565 {g[2:0],b[4:0]},{r[4:0],g[5:3]} */
- fmt = 0x6F;
- mux = OV5640_FMT_MUX_RGB;
- break;
- case MEDIA_BUS_FMT_RGB565_2X8_BE:
- /* RGB565 {r[4:0],g[5:3]},{g[2:0],b[4:0]} */
- fmt = 0x61;
- mux = OV5640_FMT_MUX_RGB;
- break;
- case MEDIA_BUS_FMT_JPEG_1X8:
- /* YUV422, YUYV */
- fmt = 0x30;
- mux = OV5640_FMT_MUX_YUV422;
- is_jpeg = true;
- break;
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- /* Raw, BGBG... / GRGR... */
- fmt = 0x00;
- mux = OV5640_FMT_MUX_RAW_DPC;
- break;
- case MEDIA_BUS_FMT_SGBRG8_1X8:
- /* Raw bayer, GBGB... / RGRG... */
- fmt = 0x01;
- mux = OV5640_FMT_MUX_RAW_DPC;
- break;
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- /* Raw bayer, GRGR... / BGBG... */
- fmt = 0x02;
- mux = OV5640_FMT_MUX_RAW_DPC;
- break;
- case MEDIA_BUS_FMT_SRGGB8_1X8:
- /* Raw bayer, RGRG... / GBGB... */
- fmt = 0x03;
- mux = OV5640_FMT_MUX_RAW_DPC;
- break;
- default:
- return -EINVAL;
- }
+ pixfmt = ov5640_code_to_pixfmt(sensor, format->code);
/* FORMAT CONTROL00: YUV and RGB formatting */
- ret = ov5640_write_reg(sensor, OV5640_REG_FORMAT_CONTROL00, fmt);
+ ret = ov5640_write_reg(sensor, OV5640_REG_FORMAT_CONTROL00,
+ pixfmt->ctrl00);
if (ret)
return ret;
/* FORMAT MUX CONTROL: ISP YUV or RGB */
- ret = ov5640_write_reg(sensor, OV5640_REG_ISP_FORMAT_MUX_CTRL, mux);
+ ret = ov5640_write_reg(sensor, OV5640_REG_ISP_FORMAT_MUX_CTRL,
+ pixfmt->mux);
if (ret)
return ret;
@@ -2655,6 +3297,15 @@ static int ov5640_set_ctrl_vflip(struct ov5640_dev *sensor, int value)
(BIT(2) | BIT(1)) : 0);
}
+static int ov5640_set_ctrl_vblank(struct ov5640_dev *sensor, int value)
+{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+
+ /* Update the VTOT timing register value. */
+ return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS,
+ mode->height + value);
+}
+
static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
@@ -2685,10 +3336,25 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ const struct ov5640_timings *timings;
+ unsigned int exp_max;
int ret;
/* v4l2_ctrl_lock() locks our own mutex */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ /* Update the exposure range to the newly programmed vblank. */
+ timings = ov5640_timings(sensor, mode);
+ exp_max = mode->height + ctrl->val - 4;
+ __v4l2_ctrl_modify_range(sensor->ctrls.exposure,
+ sensor->ctrls.exposure->minimum,
+ exp_max, sensor->ctrls.exposure->step,
+ timings->vblank_def);
+ break;
+ }
+
/*
* If the device is not powered up by the host driver do
* not apply any controls to H/W at this time. Instead
@@ -2728,6 +3394,9 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_VFLIP:
ret = ov5640_set_ctrl_vflip(sensor, ctrl->val);
break;
+ case V4L2_CID_VBLANK:
+ ret = ov5640_set_ctrl_vblank(sensor, ctrl->val);
+ break;
default:
ret = -EINVAL;
break;
@@ -2743,9 +3412,14 @@ static const struct v4l2_ctrl_ops ov5640_ctrl_ops = {
static int ov5640_init_controls(struct ov5640_dev *sensor)
{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
const struct v4l2_ctrl_ops *ops = &ov5640_ctrl_ops;
struct ov5640_ctrls *ctrls = &sensor->ctrls;
struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ struct v4l2_fwnode_device_properties props;
+ const struct ov5640_timings *timings;
+ unsigned int max_vblank;
+ unsigned int hblank;
int ret;
v4l2_ctrl_handler_init(hdl, 32);
@@ -2755,8 +3429,25 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
/* Clock related controls */
ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
- 0, INT_MAX, 1,
- ov5640_calc_pixel_rate(sensor));
+ ov5640_pixel_rates[OV5640_NUM_PIXEL_RATES - 1],
+ ov5640_pixel_rates[0], 1,
+ ov5640_pixel_rates[mode->pixel_rate]);
+
+ ctrls->link_freq = v4l2_ctrl_new_int_menu(hdl, ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(ov5640_csi2_link_freqs) - 1,
+ OV5640_DEFAULT_LINK_FREQ,
+ ov5640_csi2_link_freqs);
+
+ timings = ov5640_timings(sensor, mode);
+ hblank = timings->htot - mode->width;
+ ctrls->hblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK, hblank,
+ hblank, 1, hblank);
+
+ max_vblank = OV5640_MAX_VTS - mode->height;
+ ctrls->vblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK,
+ OV5640_MIN_VBLANK, max_vblank,
+ 1, timings->vblank_def);
/* Auto/manual white balance */
ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
@@ -2805,7 +3496,20 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
goto free_ctrls;
}
+ ret = v4l2_fwnode_device_parse(&sensor->i2c_client->dev, &props);
+ if (ret)
+ goto free_ctrls;
+
+ if (props.rotation == 180)
+ sensor->upside_down = true;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, ops, &props);
+ if (ret)
+ goto free_ctrls;
+
ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ ctrls->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
@@ -2825,16 +3529,29 @@ static int ov5640_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ u32 bpp = ov5640_code_to_bpp(sensor, fse->code);
+ unsigned int index = fse->index;
+
if (fse->pad != 0)
return -EINVAL;
- if (fse->index >= OV5640_NUM_MODES)
+ if (!bpp)
+ return -EINVAL;
+
+ /* Only low-resolution modes are supported for 24bpp formats. */
+ if (bpp == 24 && index >= OV5640_MODE_720P_1280_720)
+ return -EINVAL;
+
+ /* FIXME: Low resolution modes don't work in 8bpp formats. */
+ if (bpp == 8)
+ index += OV5640_MODE_720P_1280_720;
+
+ if (index >= OV5640_NUM_MODES)
return -EINVAL;
- fse->min_width =
- ov5640_mode_data[fse->index].hact;
+ fse->min_width = ov5640_mode_data[index].width;
fse->max_width = fse->min_width;
- fse->min_height =
- ov5640_mode_data[fse->index].vact;
+ fse->min_height = ov5640_mode_data[index].height;
fse->max_height = fse->min_height;
return 0;
@@ -2898,20 +3615,25 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
mode = sensor->current_mode;
frame_rate = ov5640_try_frame_interval(sensor, &fi->interval,
- mode->hact, mode->vact);
+ mode->width,
+ mode->height);
if (frame_rate < 0) {
/* Always return a valid frame interval value */
fi->interval = sensor->frame_interval;
goto out;
}
- mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
- mode->vact, true);
+ mode = ov5640_find_mode(sensor, mode->width, mode->height, true);
if (!mode) {
ret = -EINVAL;
goto out;
}
+ if (ov5640_framerates[frame_rate] > ov5640_framerates[mode->max_fps]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (mode != sensor->current_mode ||
frame_rate != sensor->current_fr) {
sensor->current_fr = frame_rate;
@@ -2919,8 +3641,7 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
sensor->current_mode = mode;
sensor->pending_mode_change = true;
- __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
- ov5640_calc_pixel_rate(sensor));
+ ov5640_update_pixel_rate(sensor);
}
out:
mutex_unlock(&sensor->lock);
@@ -2931,12 +3652,23 @@ static int ov5640_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- if (code->pad != 0)
- return -EINVAL;
- if (code->index >= ARRAY_SIZE(ov5640_formats))
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ const struct ov5640_pixfmt *formats;
+ unsigned int num_formats;
+
+ if (ov5640_is_csi2(sensor)) {
+ formats = ov5640_csi2_formats;
+ num_formats = ARRAY_SIZE(ov5640_csi2_formats) - 1;
+ } else {
+ formats = ov5640_dvp_formats;
+ num_formats = ARRAY_SIZE(ov5640_dvp_formats) - 1;
+ }
+
+ if (code->index >= num_formats)
return -EINVAL;
- code->code = ov5640_formats[code->index].code;
+ code->code = formats[code->index].code;
+
return 0;
}
@@ -2961,7 +3693,7 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable)
sensor->pending_fmt_change = false;
}
- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
+ if (ov5640_is_csi2(sensor))
ret = ov5640_set_stream_mipi(sensor, enable);
else
ret = ov5640_set_stream_dvp(sensor, enable);
@@ -2974,6 +3706,23 @@ out:
return ret;
}
+static int ov5640_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_get_try_format(sd, state, 0);
+ struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, state, 0);
+
+ *fmt = ov5640_default_fmt;
+
+ crop->left = OV5640_PIXEL_ARRAY_LEFT;
+ crop->top = OV5640_PIXEL_ARRAY_TOP;
+ crop->width = OV5640_PIXEL_ARRAY_WIDTH;
+ crop->height = OV5640_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+}
+
static const struct v4l2_subdev_core_ops ov5640_core_ops = {
.s_power = ov5640_s_power,
.log_status = v4l2_ctrl_subdev_log_status,
@@ -2988,9 +3737,11 @@ static const struct v4l2_subdev_video_ops ov5640_video_ops = {
};
static const struct v4l2_subdev_pad_ops ov5640_pad_ops = {
+ .init_cfg = ov5640_init_cfg,
.enum_mbus_code = ov5640_enum_mbus_code,
.get_fmt = ov5640_get_fmt,
.set_fmt = ov5640_set_fmt,
+ .get_selection = ov5640_get_selection,
.enum_frame_size = ov5640_enum_frame_size,
.enum_frame_interval = ov5640_enum_frame_interval,
};
@@ -3046,8 +3797,6 @@ static int ov5640_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct fwnode_handle *endpoint;
struct ov5640_dev *sensor;
- struct v4l2_mbus_framefmt *fmt;
- u32 rotation;
int ret;
sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
@@ -3060,40 +3809,17 @@ static int ov5640_probe(struct i2c_client *client)
* default init sequence initialize sensor to
* YUV422 UYVY VGA@30fps
*/
- fmt = &sensor->fmt;
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
- fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
- fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
- fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
- fmt->width = 640;
- fmt->height = 480;
- fmt->field = V4L2_FIELD_NONE;
+ sensor->fmt = ov5640_default_fmt;
sensor->frame_interval.numerator = 1;
sensor->frame_interval.denominator = ov5640_framerates[OV5640_30_FPS];
sensor->current_fr = OV5640_30_FPS;
sensor->current_mode =
&ov5640_mode_data[OV5640_MODE_VGA_640_480];
sensor->last_mode = sensor->current_mode;
+ sensor->current_link_freq = OV5640_DEFAULT_LINK_FREQ;
sensor->ae_target = 52;
- /* optional indication of physical rotation of sensor */
- ret = fwnode_property_read_u32(dev_fwnode(&client->dev), "rotation",
- &rotation);
- if (!ret) {
- switch (rotation) {
- case 180:
- sensor->upside_down = true;
- fallthrough;
- case 0:
- break;
- default:
- dev_warn(dev, "%u degrees rotation is not supported, ignoring...\n",
- rotation);
- }
- }
-
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev),
NULL);
if (!endpoint) {
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 117ff5403312..82a9b2de7735 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -127,11 +127,16 @@
#define OV5693_LINK_FREQ_419_2MHZ 419200000
#define OV5693_PIXEL_RATE 167680000
-/* Miscellaneous */
-#define OV5693_NUM_SUPPLIES 2
-
#define to_ov5693_sensor(x) container_of(x, struct ov5693_device, sd)
+static const char * const ov5693_supply_names[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital circuit power */
+};
+
+#define OV5693_NUM_SUPPLIES ARRAY_SIZE(ov5693_supply_names)
+
struct ov5693_reg {
u32 reg;
u8 val;
@@ -152,7 +157,7 @@ struct ov5693_device {
struct gpio_desc *reset;
struct gpio_desc *powerdown;
struct regulator_bulk_data supplies[OV5693_NUM_SUPPLIES];
- struct clk *clk;
+ struct clk *xvclk;
struct ov5693_mode {
struct v4l2_rect crop;
@@ -352,11 +357,6 @@ static const s64 link_freq_menu_items[] = {
OV5693_LINK_FREQ_419_2MHZ
};
-static const char * const ov5693_supply_names[] = {
- "avdd",
- "dovdd",
-};
-
static const char * const ov5693_test_pattern_menu[] = {
"Disabled",
"Random Data",
@@ -794,7 +794,7 @@ static void ov5693_sensor_powerdown(struct ov5693_device *ov5693)
regulator_bulk_disable(OV5693_NUM_SUPPLIES, ov5693->supplies);
- clk_disable_unprepare(ov5693->clk);
+ clk_disable_unprepare(ov5693->xvclk);
}
static int ov5693_sensor_powerup(struct ov5693_device *ov5693)
@@ -804,7 +804,7 @@ static int ov5693_sensor_powerup(struct ov5693_device *ov5693)
gpiod_set_value_cansleep(ov5693->reset, 1);
gpiod_set_value_cansleep(ov5693->powerdown, 1);
- ret = clk_prepare_enable(ov5693->clk);
+ ret = clk_prepare_enable(ov5693->xvclk);
if (ret) {
dev_err(ov5693->dev, "Failed to enable clk\n");
goto fail_power;
@@ -1390,7 +1390,7 @@ out_free_bus_cfg:
static int ov5693_probe(struct i2c_client *client)
{
struct ov5693_device *ov5693;
- u32 clk_rate;
+ u32 xvclk_rate;
int ret = 0;
ov5693 = devm_kzalloc(&client->dev, sizeof(*ov5693), GFP_KERNEL);
@@ -1408,16 +1408,28 @@ static int ov5693_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&ov5693->sd, client, &ov5693_ops);
- ov5693->clk = devm_clk_get(&client->dev, "xvclk");
- if (IS_ERR(ov5693->clk)) {
- dev_err(&client->dev, "Error getting clock\n");
- return PTR_ERR(ov5693->clk);
+ ov5693->xvclk = devm_clk_get_optional(&client->dev, "xvclk");
+ if (IS_ERR(ov5693->xvclk))
+ return dev_err_probe(&client->dev, PTR_ERR(ov5693->xvclk),
+ "failed to get xvclk: %ld\n",
+ PTR_ERR(ov5693->xvclk));
+
+ if (ov5693->xvclk) {
+ xvclk_rate = clk_get_rate(ov5693->xvclk);
+ } else {
+ ret = fwnode_property_read_u32(dev_fwnode(&client->dev),
+ "clock-frequency",
+ &xvclk_rate);
+
+ if (ret) {
+ dev_err(&client->dev, "can't get clock frequency");
+ return ret;
+ }
}
- clk_rate = clk_get_rate(ov5693->clk);
- if (clk_rate != OV5693_XVCLK_FREQ)
+ if (xvclk_rate != OV5693_XVCLK_FREQ)
dev_warn(&client->dev, "Found clk freq %u, expected %u\n",
- clk_rate, OV5693_XVCLK_FREQ);
+ xvclk_rate, OV5693_XVCLK_FREQ);
ret = ov5693_configure_gpios(ov5693);
if (ret)
@@ -1521,10 +1533,17 @@ static const struct acpi_device_id ov5693_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
+static const struct of_device_id ov5693_of_match[] = {
+ { .compatible = "ovti,ov5693", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ov5693_of_match);
+
static struct i2c_driver ov5693_driver = {
.driver = {
.name = "ov5693",
.acpi_match_table = ov5693_acpi_match,
+ .of_match_table = ov5693_of_match,
.pm = &ov5693_pm_ops,
},
.probe_new = ov5693_probe,
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 0e7be15bc20a..1bd797c7926b 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -934,6 +934,8 @@ static int ov7251_set_power_on(struct device *dev)
ARRAY_SIZE(ov7251_global_init_setting));
if (ret < 0) {
dev_err(ov7251->dev, "error during global init\n");
+ gpiod_set_value_cansleep(ov7251->enable_gpio, 0);
+ clk_disable_unprepare(ov7251->xclk);
ov7251_regulators_disable(ov7251);
return ret;
}
@@ -1340,7 +1342,7 @@ static int ov7251_s_stream(struct v4l2_subdev *subdev, int enable)
if (enable) {
ret = pm_runtime_get_sync(ov7251->dev);
if (ret < 0)
- goto unlock_out;
+ goto err_power_down;
ret = ov7251_pll_configure(ov7251);
if (ret) {
@@ -1372,12 +1374,11 @@ static int ov7251_s_stream(struct v4l2_subdev *subdev, int enable)
pm_runtime_put(ov7251->dev);
}
-unlock_out:
mutex_unlock(&ov7251->lock);
return ret;
err_power_down:
- pm_runtime_put_noidle(ov7251->dev);
+ pm_runtime_put(ov7251->dev);
mutex_unlock(&ov7251->lock);
return ret;
}
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index ef976d085d72..16cc547976dd 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -50,6 +50,7 @@
/* Bits definition for MIPID02_MODE_REG2 */
#define MODE_HSYNC_ACTIVE_HIGH BIT(1)
#define MODE_VSYNC_ACTIVE_HIGH BIT(2)
+#define MODE_PCLK_SAMPLE_RISING BIT(3)
/* Bits definition for MIPID02_DATA_SELECTION_CTRL */
#define SELECTION_MANUAL_DATA BIT(2)
#define SELECTION_MANUAL_WIDTH BIT(3)
@@ -61,9 +62,12 @@ static const u32 mipid02_supported_fmt_codes[] = {
MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12,
MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12,
- MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YVYU8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_VYUY8_1X16,
+ MEDIA_BUS_FMT_RGB565_1X16, MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB565_2X8_LE, MEDIA_BUS_FMT_RGB565_2X8_BE,
- MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_JPEG_1X8
};
@@ -130,9 +134,15 @@ static int bpp_from_code(__u32 code)
case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12:
return 12;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
+ case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
case MEDIA_BUS_FMT_RGB565_2X8_LE:
case MEDIA_BUS_FMT_RGB565_2X8_BE:
return 16;
@@ -161,12 +171,18 @@ static u8 data_type_from_code(__u32 code)
case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12:
return 0x2c;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
return 0x1e;
case MEDIA_BUS_FMT_BGR888_1X24:
return 0x24;
+ case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB565_2X8_LE:
case MEDIA_BUS_FMT_RGB565_2X8_BE:
return 0x22;
@@ -201,8 +217,16 @@ static __u32 get_fmt_code(__u32 code)
static __u32 serial_to_parallel_code(__u32 serial)
{
+ if (serial == MEDIA_BUS_FMT_RGB565_1X16)
+ return MEDIA_BUS_FMT_RGB565_2X8_LE;
+ if (serial == MEDIA_BUS_FMT_YUYV8_1X16)
+ return MEDIA_BUS_FMT_YUYV8_2X8;
+ if (serial == MEDIA_BUS_FMT_YVYU8_1X16)
+ return MEDIA_BUS_FMT_YVYU8_2X8;
if (serial == MEDIA_BUS_FMT_UYVY8_1X16)
return MEDIA_BUS_FMT_UYVY8_2X8;
+ if (serial == MEDIA_BUS_FMT_VYUY8_1X16)
+ return MEDIA_BUS_FMT_VYUY8_2X8;
if (serial == MEDIA_BUS_FMT_BGR888_1X24)
return MEDIA_BUS_FMT_BGR888_3X8;
@@ -494,6 +518,8 @@ static int mipid02_configure_from_tx(struct mipid02_dev *bridge)
bridge->r.mode_reg2 |= MODE_HSYNC_ACTIVE_HIGH;
if (ep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
bridge->r.mode_reg2 |= MODE_VSYNC_ACTIVE_HIGH;
+ if (ep->bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ bridge->r.mode_reg2 |= MODE_PCLK_SAMPLE_RISING;
return 0;
}
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 8fafce26d62f..f66ac14cffad 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2517,7 +2517,6 @@ static struct snd_soc_component_driver tda1997x_codec_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int tda1997x_probe(struct i2c_client *client,
@@ -2798,6 +2797,7 @@ err_free_mutex:
cancel_delayed_work(&state->delayed_work_enable_hpd);
mutex_destroy(&state->page_lock);
mutex_destroy(&state->lock);
+ tda1997x_set_power(state, 0);
err_free_state:
kfree(state);
dev_err(&client->dev, "%s failed: %d\n", __func__, ret);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 65472438444b..93a980c4e899 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1285,7 +1285,7 @@ static int tvp5150_disable_all_input_links(struct tvp5150 *decoder)
int err;
for (i = 0; i < TVP5150_NUM_PADS - 1; i++) {
- connector_pad = media_entity_remote_pad(&decoder->pads[i]);
+ connector_pad = media_pad_remote_pad_first(&decoder->pads[i]);
if (!connector_pad)
continue;
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 11f5207f73aa..afd1bd7ff7b6 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -9,6 +9,7 @@
*/
#include <linux/bitmap.h>
+#include <linux/list.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <media/media-entity.h>
@@ -449,7 +450,7 @@ __must_check int __media_pipeline_start(struct media_entity *entity,
bitmap_zero(active, entity->num_pads);
bitmap_fill(has_no_links, entity->num_pads);
- list_for_each_entry(link, &entity->links, list) {
+ for_each_media_entity_data_link(entity, link) {
struct media_pad *pad = link->sink->entity == entity
? link->sink : link->source;
@@ -888,7 +889,7 @@ media_entity_find_link(struct media_pad *source, struct media_pad *sink)
{
struct media_link *link;
- list_for_each_entry(link, &source->entity->links, list) {
+ for_each_media_entity_data_link(source->entity, link) {
if (link->source->entity == source->entity &&
link->source->index == source->index &&
link->sink->entity == sink->entity &&
@@ -900,11 +901,11 @@ media_entity_find_link(struct media_pad *source, struct media_pad *sink)
}
EXPORT_SYMBOL_GPL(media_entity_find_link);
-struct media_pad *media_entity_remote_pad(const struct media_pad *pad)
+struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad)
{
struct media_link *link;
- list_for_each_entry(link, &pad->entity->links, list) {
+ for_each_media_entity_data_link(pad->entity, link) {
if (!(link->flags & MEDIA_LNK_FL_ENABLED))
continue;
@@ -918,7 +919,77 @@ struct media_pad *media_entity_remote_pad(const struct media_pad *pad)
return NULL;
}
-EXPORT_SYMBOL_GPL(media_entity_remote_pad);
+EXPORT_SYMBOL_GPL(media_pad_remote_pad_first);
+
+struct media_pad *
+media_entity_remote_pad_unique(const struct media_entity *entity,
+ unsigned int type)
+{
+ struct media_pad *pad = NULL;
+ struct media_link *link;
+
+ list_for_each_entry(link, &entity->links, list) {
+ struct media_pad *local_pad;
+ struct media_pad *remote_pad;
+
+ if (((link->flags & MEDIA_LNK_FL_LINK_TYPE) !=
+ MEDIA_LNK_FL_DATA_LINK) ||
+ !(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ if (type == MEDIA_PAD_FL_SOURCE) {
+ local_pad = link->sink;
+ remote_pad = link->source;
+ } else {
+ local_pad = link->source;
+ remote_pad = link->sink;
+ }
+
+ if (local_pad->entity == entity) {
+ if (pad)
+ return ERR_PTR(-ENOTUNIQ);
+
+ pad = remote_pad;
+ }
+ }
+
+ if (!pad)
+ return ERR_PTR(-ENOLINK);
+
+ return pad;
+}
+EXPORT_SYMBOL_GPL(media_entity_remote_pad_unique);
+
+struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad)
+{
+ struct media_pad *found_pad = NULL;
+ struct media_link *link;
+
+ list_for_each_entry(link, &pad->entity->links, list) {
+ struct media_pad *remote_pad;
+
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ if (link->sink == pad)
+ remote_pad = link->source;
+ else if (link->source == pad)
+ remote_pad = link->sink;
+ else
+ continue;
+
+ if (found_pad)
+ return ERR_PTR(-ENOTUNIQ);
+
+ found_pad = remote_pad;
+ }
+
+ if (!found_pad)
+ return ERR_PTR(-ENOLINK);
+
+ return found_pad;
+}
+EXPORT_SYMBOL_GPL(media_pad_remote_pad_unique);
static void media_interface_init(struct media_device *mdev,
struct media_interface *intf,
@@ -1051,3 +1122,18 @@ struct media_link *media_create_ancillary_link(struct media_entity *primary,
return link;
}
EXPORT_SYMBOL_GPL(media_create_ancillary_link);
+
+struct media_link *__media_entity_next_link(struct media_entity *entity,
+ struct media_link *link,
+ unsigned long link_type)
+{
+ link = link ? list_next_entry(link, list)
+ : list_first_entry(&entity->links, typeof(*link), list);
+
+ list_for_each_entry_from(link, &entity->links, list)
+ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == link_type)
+ return link;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__media_entity_next_link);
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index 76e5a504df8c..d3358643fb7d 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -180,7 +180,7 @@ static void cx18_av_initialize(struct v4l2_subdev *sd)
*/
cx18_av_and_or4(cx, CXADEC_CHIP_CTRL, 0xFFFBFFFF, 0x00120000);
- /* Setup the Video and and Aux/Audio PLLs */
+ /* Setup the Video and Aux/Audio PLLs */
cx18_av_init(cx);
/* set video to auto-detect */
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 89d4d5a3ba34..52be42f9a7fa 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -618,12 +618,24 @@ EXPORT_SYMBOL(cx88_reset);
static inline unsigned int norm_swidth(v4l2_std_id norm)
{
- return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 754 : 922;
+ if (norm & (V4L2_STD_NTSC | V4L2_STD_PAL_M))
+ return 754;
+
+ if (norm & V4L2_STD_PAL_Nc)
+ return 745;
+
+ return 922;
}
static inline unsigned int norm_hdelay(v4l2_std_id norm)
{
- return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 135 : 186;
+ if (norm & (V4L2_STD_NTSC | V4L2_STD_PAL_M))
+ return 135;
+
+ if (norm & V4L2_STD_PAL_Nc)
+ return 149;
+
+ return 186;
}
static inline unsigned int norm_vdelay(v4l2_std_id norm)
@@ -636,7 +648,7 @@ static inline unsigned int norm_fsc8(v4l2_std_id norm)
if (norm & V4L2_STD_PAL_M)
return 28604892; // 3.575611 MHz
- if (norm & (V4L2_STD_PAL_Nc))
+ if (norm & V4L2_STD_PAL_Nc)
return 28656448; // 3.582056 MHz
if (norm & V4L2_STD_NTSC) // All NTSC/M and variants
@@ -841,8 +853,8 @@ static int set_tvaudio(struct cx88_core *core)
} else if (V4L2_STD_SECAM_DK & norm) {
core->tvaudio = WW_DK;
- } else if ((V4L2_STD_NTSC_M & norm) ||
- (V4L2_STD_PAL_M & norm)) {
+ } else if ((V4L2_STD_NTSC_M | V4L2_STD_PAL_M | V4L2_STD_PAL_Nc) &
+ norm) {
core->tvaudio = WW_BTSC;
} else if (V4L2_STD_NTSC_M_JP & norm) {
diff --git a/drivers/media/pci/ddbridge/ddbridge-ci.c b/drivers/media/pci/ddbridge/ddbridge-ci.c
index 377991095aba..ee20813c33ff 100644
--- a/drivers/media/pci/ddbridge/ddbridge-ci.c
+++ b/drivers/media/pci/ddbridge/ddbridge-ci.c
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Marcus Metzler <mocm@metzlerbros.de>
* Ralph Metzler <rjkm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "ddbridge.h"
diff --git a/drivers/media/pci/ddbridge/ddbridge-ci.h b/drivers/media/pci/ddbridge/ddbridge-ci.h
index cc98656af349..41cd97e52aa1 100644
--- a/drivers/media/pci/ddbridge/ddbridge-ci.h
+++ b/drivers/media/pci/ddbridge/ddbridge-ci.h
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Marcus Metzler <mocm@metzlerbros.de>
* Ralph Metzler <rjkm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DDBRIDGE_CI_H__
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 92fe051c672f..fe833f39698a 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Marcus Metzler <mocm@metzlerbros.de>
* Ralph Metzler <rjkm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/ddbridge/ddbridge-hw.c b/drivers/media/pci/ddbridge/ddbridge-hw.c
index f9c91bdbd041..d7d9cd0dad23 100644
--- a/drivers/media/pci/ddbridge/ddbridge-hw.c
+++ b/drivers/media/pci/ddbridge/ddbridge-hw.c
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "ddbridge.h"
diff --git a/drivers/media/pci/ddbridge/ddbridge-hw.h b/drivers/media/pci/ddbridge/ddbridge-hw.h
index e34bd94c266b..934f296f48c0 100644
--- a/drivers/media/pci/ddbridge/ddbridge-hw.h
+++ b/drivers/media/pci/ddbridge/ddbridge-hw.h
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DDBRIDGE_HW_H_
@@ -40,4 +31,4 @@ struct ddb_device_id {
const struct ddb_info *get_ddb_info(u16 vendor, u16 device,
u16 subvendor, u16 subdevice);
-#endif /* _DDBRIDGE_HW_H */
+#endif /* _DDBRIDGE_HW_H_ */
diff --git a/drivers/media/pci/ddbridge/ddbridge-i2c.c b/drivers/media/pci/ddbridge/ddbridge-i2c.c
index aafa6030c8cc..c894be180446 100644
--- a/drivers/media/pci/ddbridge/ddbridge-i2c.c
+++ b/drivers/media/pci/ddbridge/ddbridge-i2c.c
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/ddbridge/ddbridge-i2c.h b/drivers/media/pci/ddbridge/ddbridge-i2c.h
index 90830f7b1638..48555d41aa48 100644
--- a/drivers/media/pci/ddbridge/ddbridge-i2c.h
+++ b/drivers/media/pci/ddbridge/ddbridge-i2c.h
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DDBRIDGE_I2C_H__
diff --git a/drivers/media/pci/ddbridge/ddbridge-io.h b/drivers/media/pci/ddbridge/ddbridge-io.h
index 1a5b31b52494..991246cecee2 100644
--- a/drivers/media/pci/ddbridge/ddbridge-io.h
+++ b/drivers/media/pci/ddbridge/ddbridge-io.h
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DDBRIDGE_IO_H__
diff --git a/drivers/media/pci/ddbridge/ddbridge-main.c b/drivers/media/pci/ddbridge/ddbridge-main.c
index 25d0d6745b52..91733ab9f58c 100644
--- a/drivers/media/pci/ddbridge/ddbridge-main.c
+++ b/drivers/media/pci/ddbridge/ddbridge-main.c
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/pci/ddbridge/ddbridge-max.c b/drivers/media/pci/ddbridge/ddbridge-max.c
index 576dd2318e4d..0582b86bb869 100644
--- a/drivers/media/pci/ddbridge/ddbridge-max.c
+++ b/drivers/media/pci/ddbridge/ddbridge-max.c
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/ddbridge/ddbridge-max.h b/drivers/media/pci/ddbridge/ddbridge-max.h
index 6543dfc77138..da1553fe8695 100644
--- a/drivers/media/pci/ddbridge/ddbridge-max.h
+++ b/drivers/media/pci/ddbridge/ddbridge-max.h
@@ -5,15 +5,6 @@
* Copyright (C) 2010-2017 Digital Devices GmbH
* Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DDBRIDGE_MAX_H_
@@ -27,4 +18,4 @@ int ddb_lnb_init_fmode(struct ddb *dev, struct ddb_link *link, u32 fm);
int ddb_fe_attach_mxl5xx(struct ddb_input *input);
int ddb_fe_attach_mci(struct ddb_input *input, u32 type);
-#endif /* _DDBRIDGE_MAX_H */
+#endif /* _DDBRIDGE_MAX_H_ */
diff --git a/drivers/media/pci/ddbridge/ddbridge-mci.c b/drivers/media/pci/ddbridge/ddbridge-mci.c
index 97384ae9ad27..a006cb0fa199 100644
--- a/drivers/media/pci/ddbridge/ddbridge-mci.c
+++ b/drivers/media/pci/ddbridge/ddbridge-mci.c
@@ -5,15 +5,6 @@
* Copyright (C) 2017-2018 Digital Devices GmbH
* Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "ddbridge.h"
diff --git a/drivers/media/pci/ddbridge/ddbridge-mci.h b/drivers/media/pci/ddbridge/ddbridge-mci.h
index 24241111c634..d9799fbf59d4 100644
--- a/drivers/media/pci/ddbridge/ddbridge-mci.h
+++ b/drivers/media/pci/ddbridge/ddbridge-mci.h
@@ -5,15 +5,6 @@
* Copyright (C) 2017-2018 Digital Devices GmbH
* Marcus Metzler <mocm@metzlerbros.de>
* Ralph Metzler <rjkm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DDBRIDGE_MCI_H_
diff --git a/drivers/media/pci/ddbridge/ddbridge-regs.h b/drivers/media/pci/ddbridge/ddbridge-regs.h
index 2942a7f35099..42256fc9695d 100644
--- a/drivers/media/pci/ddbridge/ddbridge-regs.h
+++ b/drivers/media/pci/ddbridge/ddbridge-regs.h
@@ -3,15 +3,6 @@
* ddbridge-regs.h: Digital Devices PCIe bridge driver
*
* Copyright (C) 2010-2017 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DDBRIDGE_REGS_H__
diff --git a/drivers/media/pci/ddbridge/ddbridge-sx8.c b/drivers/media/pci/ddbridge/ddbridge-sx8.c
index 374fcee94960..c8de8d283f85 100644
--- a/drivers/media/pci/ddbridge/ddbridge-sx8.c
+++ b/drivers/media/pci/ddbridge/ddbridge-sx8.c
@@ -5,15 +5,6 @@
* Copyright (C) 2018 Digital Devices GmbH
* Marcus Metzler <mocm@metzlerbros.de>
* Ralph Metzler <rjkm@metzlerbros.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "ddbridge.h"
diff --git a/drivers/media/pci/ddbridge/ddbridge.h b/drivers/media/pci/ddbridge/ddbridge.h
index b834449e78f8..f3699dbd193f 100644
--- a/drivers/media/pci/ddbridge/ddbridge.h
+++ b/drivers/media/pci/ddbridge/ddbridge.h
@@ -4,15 +4,6 @@
*
* Copyright (C) 2010-2017 Digital Devices GmbH
* Ralph Metzler <rmetzler@digitaldevices.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DDBRIDGE_H_
@@ -379,4 +370,4 @@ void ddb_unmap(struct ddb *dev);
int ddb_exit_ddbridge(int stage, int error);
int ddb_init_ddbridge(void);
-#endif /* DDBRIDGE_H */
+#endif /* _DDBRIDGE_H_ */
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
index dbdbdb648a0d..a3fe547b7fce 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
@@ -1323,7 +1323,7 @@ static int cio2_video_link_validate(struct media_link *link)
struct v4l2_subdev_format source_fmt;
int ret;
- if (!media_entity_remote_pad(entity->pads)) {
+ if (!media_pad_remote_pad_first(entity->pads)) {
dev_info(dev, "video node %s pad not connected\n", vd->name);
return -ENOTCONN;
}
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index 5526bcc7a9bd..965d285a9240 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -801,7 +801,7 @@ int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen)
if (buflen < 128)
return -ENOMEM;
- /* Assumption: Hauppauge eeprom is at 0xa0 on on bus 0 */
+ /* Assumption: Hauppauge eeprom is at 0xa0 on bus 0 */
/* TODO: Pull the details from the boards struct */
return saa7164_api_i2c_read(&dev->i2c_bus[0], 0xa0 >> 1, sizeof(reg),
&reg[0], 128, buf);
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
index a96e170ab04e..118b922c08c3 100644
--- a/drivers/media/pci/sta2x11/Kconfig
+++ b/drivers/media/pci/sta2x11/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config STA2X11_VIP
tristate "STA2X11 VIP Video For Linux"
- depends on PCI && VIDEO_DEV && VIRT_TO_BUS && I2C
+ depends on PCI && VIDEO_DEV && I2C
depends on STA2X11 || COMPILE_TEST
select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/tw5864/tw5864-core.c b/drivers/media/pci/tw5864/tw5864-core.c
index 5cae73e6fb9c..560ff1ddcc83 100644
--- a/drivers/media/pci/tw5864/tw5864-core.c
+++ b/drivers/media/pci/tw5864/tw5864-core.c
@@ -254,9 +254,9 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
/* pci init */
dev->pci = pci_dev;
- err = pci_enable_device(pci_dev);
+ err = pcim_enable_device(pci_dev);
if (err) {
- dev_err(&dev->pci->dev, "pci_enable_device() failed\n");
+ dev_err(&dev->pci->dev, "pcim_enable_device() failed\n");
goto unreg_v4l2;
}
@@ -265,21 +265,16 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&dev->pci->dev, "32 bit PCI DMA is not supported\n");
- goto disable_pci;
+ goto unreg_v4l2;
}
/* get mmio */
- err = pci_request_regions(pci_dev, dev->name);
+ err = pcim_iomap_regions(pci_dev, BIT(0), dev->name);
if (err) {
dev_err(&dev->pci->dev, "Cannot request regions for MMIO\n");
- goto disable_pci;
- }
- dev->mmio = pci_ioremap_bar(pci_dev, 0);
- if (!dev->mmio) {
- err = -EIO;
- dev_err(&dev->pci->dev, "can't ioremap() MMIO memory\n");
- goto release_mmio;
+ goto unreg_v4l2;
}
+ dev->mmio = pcim_iomap_table(pci_dev)[0];
spin_lock_init(&dev->slock);
@@ -291,7 +286,7 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
err = tw5864_video_init(dev, video_nr);
if (err)
- goto unmap_mmio;
+ goto unreg_v4l2;
/* get irq */
err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw5864_isr,
@@ -308,12 +303,6 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
fini_video:
tw5864_video_fini(dev);
-unmap_mmio:
- iounmap(dev->mmio);
-release_mmio:
- pci_release_regions(pci_dev);
-disable_pci:
- pci_disable_device(pci_dev);
unreg_v4l2:
v4l2_device_unregister(&dev->v4l2_dev);
return err;
@@ -331,11 +320,6 @@ static void tw5864_finidev(struct pci_dev *pci_dev)
/* unregister */
tw5864_video_fini(dev);
- /* release resources */
- iounmap(dev->mmio);
- pci_release_regions(pci_dev);
- pci_disable_device(pci_dev);
-
v4l2_device_unregister(&dev->v4l2_dev);
}
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
index 6676e069b515..c53099c958ca 100644
--- a/drivers/media/pci/tw686x/tw686x-core.c
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -315,13 +315,6 @@ static int tw686x_probe(struct pci_dev *pci_dev,
spin_lock_init(&dev->lock);
- err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED,
- dev->name, dev);
- if (err < 0) {
- dev_err(&pci_dev->dev, "unable to request interrupt\n");
- goto iounmap;
- }
-
timer_setup(&dev->dma_delay_timer, tw686x_dma_delay, 0);
/*
@@ -333,18 +326,26 @@ static int tw686x_probe(struct pci_dev *pci_dev,
err = tw686x_video_init(dev);
if (err) {
dev_err(&pci_dev->dev, "can't register video\n");
- goto free_irq;
+ goto iounmap;
}
err = tw686x_audio_init(dev);
if (err)
dev_warn(&pci_dev->dev, "can't register audio\n");
+ err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED,
+ dev->name, dev);
+ if (err < 0) {
+ dev_err(&pci_dev->dev, "unable to request interrupt\n");
+ goto tw686x_free;
+ }
+
pci_set_drvdata(pci_dev, dev);
return 0;
-free_irq:
- free_irq(pci_dev->irq, dev);
+tw686x_free:
+ tw686x_video_free(dev);
+ tw686x_audio_free(dev);
iounmap:
pci_iounmap(pci_dev, dev->mmio);
free_region:
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index 6344a479119f..3ebf7a2c95f0 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -1280,8 +1280,10 @@ int tw686x_video_init(struct tw686x_dev *dev)
video_set_drvdata(vdev, vc);
err = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
- if (err < 0)
+ if (err < 0) {
+ video_device_release(vdev);
goto error;
+ }
vc->num = vdev->num;
}
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 3c02aa2a54aa..9e64041cc1c1 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -63,6 +63,7 @@ struct vdec_t {
bool is_source_changed;
u32 source_change;
u32 drain;
+ bool aborting;
};
static const struct vpu_format vdec_formats[] = {
@@ -104,7 +105,6 @@ static const struct vpu_format vdec_formats[] = {
.pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION
},
{
.pixfmt = V4L2_PIX_FMT_MPEG2,
@@ -178,16 +178,6 @@ static int vdec_ctrl_init(struct vpu_inst *inst)
return 0;
}
-static void vdec_set_last_buffer_dequeued(struct vpu_inst *inst)
-{
- struct vdec_t *vdec = inst->priv;
-
- if (vdec->eos_received) {
- if (!vpu_set_last_buffer_dequeued(inst))
- vdec->eos_received--;
- }
-}
-
static void vdec_handle_resolution_change(struct vpu_inst *inst)
{
struct vdec_t *vdec = inst->priv;
@@ -234,6 +224,21 @@ static int vdec_update_state(struct vpu_inst *inst, enum vpu_codec_state state,
return 0;
}
+static void vdec_set_last_buffer_dequeued(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ if (inst->state == VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ return;
+
+ if (vdec->eos_received) {
+ if (!vpu_set_last_buffer_dequeued(inst)) {
+ vdec->eos_received--;
+ vdec_update_state(inst, VPU_CODEC_STATE_DRAIN, 0);
+ }
+ }
+}
+
static int vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
strscpy(cap->driver, "amphion-vpu", sizeof(cap->driver));
@@ -493,6 +498,8 @@ static int vdec_drain(struct vpu_inst *inst)
static int vdec_cmd_start(struct vpu_inst *inst)
{
+ struct vdec_t *vdec = inst->priv;
+
switch (inst->state) {
case VPU_CODEC_STATE_STARTED:
case VPU_CODEC_STATE_DRAIN:
@@ -503,6 +510,8 @@ static int vdec_cmd_start(struct vpu_inst *inst)
break;
}
vpu_process_capture_buffer(inst);
+ if (vdec->eos_received)
+ vdec_set_last_buffer_dequeued(inst);
return 0;
}
@@ -731,6 +740,7 @@ static void vdec_stop_done(struct vpu_inst *inst)
vdec->eos_received = 0;
vdec->is_source_changed = false;
vdec->source_change = 0;
+ inst->total_input_count = 0;
vpu_inst_unlock(inst);
}
@@ -939,6 +949,9 @@ static int vdec_response_frame(struct vpu_inst *inst, struct vb2_v4l2_buffer *vb
if (inst->state != VPU_CODEC_STATE_ACTIVE)
return -EINVAL;
+ if (vdec->aborting)
+ return -EINVAL;
+
if (!vdec->req_frame_count)
return -EINVAL;
@@ -1048,6 +1061,8 @@ static void vdec_clear_slots(struct vpu_inst *inst)
vpu_buf = vdec->slots[i];
vbuf = &vpu_buf->m2m_buf.vb;
+ vpu_trace(inst->dev, "clear slot %d\n", i);
+ vdec_response_fs_release(inst, i, vpu_buf->tag);
vdec_recycle_buffer(inst, vbuf);
vdec->slots[i]->state = VPU_BUF_STATE_IDLE;
vdec->slots[i] = NULL;
@@ -1203,7 +1218,6 @@ static void vdec_event_eos(struct vpu_inst *inst)
vdec->eos_received++;
vdec->fixed_fmt = false;
inst->min_buffer_cap = VDEC_MIN_BUFFER_CAP;
- vdec_update_state(inst, VPU_CODEC_STATE_DRAIN, 0);
vdec_set_last_buffer_dequeued(inst);
vpu_inst_unlock(inst);
}
@@ -1310,6 +1324,8 @@ static void vdec_abort(struct vpu_inst *inst)
int ret;
vpu_trace(inst->dev, "[%d] state = %d\n", inst->id, inst->state);
+
+ vdec->aborting = true;
vpu_iface_add_scode(inst, SCODE_PADDING_ABORT);
vdec->params.end_flag = 1;
vpu_iface_set_decode_params(inst, &vdec->params, 1);
@@ -1333,6 +1349,7 @@ static void vdec_abort(struct vpu_inst *inst)
vdec->decoded_frame_count = 0;
vdec->display_frame_count = 0;
vdec->sequence = 0;
+ vdec->aborting = false;
}
static void vdec_stop(struct vpu_inst *inst, bool free)
@@ -1369,8 +1386,7 @@ static void vdec_cleanup(struct vpu_inst *inst)
return;
vdec = inst->priv;
- if (vdec)
- vfree(vdec);
+ vfree(vdec);
inst->priv = NULL;
vfree(inst);
}
@@ -1480,10 +1496,10 @@ static int vdec_stop_session(struct vpu_inst *inst, u32 type)
vdec_update_state(inst, VPU_CODEC_STATE_SEEK, 0);
vdec->drain = 0;
} else {
- if (inst->state != VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ if (inst->state != VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE) {
vdec_abort(inst);
-
- vdec->eos_received = 0;
+ vdec->eos_received = 0;
+ }
vdec_clear_slots(inst);
}
diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
index 43d61d82f58c..461524dd1e44 100644
--- a/drivers/media/platform/amphion/venc.c
+++ b/drivers/media/platform/amphion/venc.c
@@ -919,8 +919,7 @@ static void venc_cleanup(struct vpu_inst *inst)
return;
venc = inst->priv;
- if (venc)
- vfree(venc);
+ vfree(venc);
inst->priv = NULL;
vfree(inst);
}
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index e56b96a7e5d3..f914de6ed81e 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -258,6 +258,7 @@ struct vpu_inst {
struct vpu_format cap_format;
u32 min_buffer_cap;
u32 min_buffer_out;
+ u32 total_input_count;
struct v4l2_rect crop;
u32 colorspace;
diff --git a/drivers/media/platform/amphion/vpu_cmds.c b/drivers/media/platform/amphion/vpu_cmds.c
index 9b39d77a178d..f4d7ca78a621 100644
--- a/drivers/media/platform/amphion/vpu_cmds.c
+++ b/drivers/media/platform/amphion/vpu_cmds.c
@@ -117,8 +117,7 @@ static void vpu_free_cmd(struct vpu_cmd_t *cmd)
{
if (!cmd)
return;
- if (cmd->pkt)
- vfree(cmd->pkt);
+ vfree(cmd->pkt);
vfree(cmd);
}
diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
index 68ad183925fd..73faa50d2865 100644
--- a/drivers/media/platform/amphion/vpu_core.c
+++ b/drivers/media/platform/amphion/vpu_core.c
@@ -257,14 +257,8 @@ static int vpu_core_register(struct device *dev, struct vpu_core *core)
}
list_add_tail(&core->list, &vpu->cores);
-
vpu_core_get_vpu(core);
- if (vpu_iface_get_power_state(core))
- ret = vpu_core_restore(core);
- if (ret)
- goto error;
-
return 0;
error:
if (core->msg_buffer) {
@@ -362,7 +356,10 @@ struct vpu_core *vpu_request_core(struct vpu_dev *vpu, enum vpu_core_type type)
pm_runtime_resume_and_get(core->dev);
if (core->state == VPU_CORE_DEINIT) {
- ret = vpu_core_boot(core, true);
+ if (vpu_iface_get_power_state(core))
+ ret = vpu_core_restore(core);
+ else
+ ret = vpu_core_boot(core, true);
if (ret) {
pm_runtime_put_sync(core->dev);
mutex_unlock(&core->lock);
@@ -455,8 +452,13 @@ int vpu_inst_unregister(struct vpu_inst *inst)
}
vpu_core_check_hang(core);
if (core->state == VPU_CORE_HANG && !core->instance_mask) {
+ int err;
+
dev_info(core->dev, "reset hang core\n");
- if (!vpu_core_sw_reset(core)) {
+ mutex_unlock(&core->lock);
+ err = vpu_core_sw_reset(core);
+ mutex_lock(&core->lock);
+ if (!err) {
core->state = VPU_CORE_ACTIVE;
core->hang_mask = 0;
}
diff --git a/drivers/media/platform/amphion/vpu_dbg.c b/drivers/media/platform/amphion/vpu_dbg.c
index da62bd718fb8..f72c8a506b22 100644
--- a/drivers/media/platform/amphion/vpu_dbg.c
+++ b/drivers/media/platform/amphion/vpu_dbg.c
@@ -27,7 +27,7 @@ struct print_buf_desc {
u32 bytes;
u32 read;
u32 write;
- char buffer[0];
+ char buffer[];
};
static char *vb2_stat_name[] = {
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index f29c223eefce..f4a488bf9880 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -309,6 +309,7 @@ struct malone_padding_scode {
struct malone_fmt_mapping {
u32 pixelformat;
enum vpu_malone_format malone_format;
+ u32 is_disabled;
};
struct malone_scode_t {
@@ -568,6 +569,8 @@ static enum vpu_malone_format vpu_malone_format_remap(u32 pixelformat)
u32 i;
for (i = 0; i < ARRAY_SIZE(fmt_mappings); i++) {
+ if (fmt_mappings[i].is_disabled)
+ continue;
if (pixelformat == fmt_mappings[i].pixelformat)
return fmt_mappings[i].malone_format;
}
@@ -575,6 +578,19 @@ static enum vpu_malone_format vpu_malone_format_remap(u32 pixelformat)
return MALONE_FMT_NULL;
}
+bool vpu_malone_check_fmt(enum vpu_core_type type, u32 pixelfmt)
+{
+ if (!vpu_imx8q_check_fmt(type, pixelfmt))
+ return false;
+
+ if (pixelfmt == V4L2_PIX_FMT_NV12M_8L128 || pixelfmt == V4L2_PIX_FMT_NV12M_10BE_8L128)
+ return true;
+ if (vpu_malone_format_remap(pixelfmt) == MALONE_FMT_NULL)
+ return false;
+
+ return true;
+}
+
static void vpu_malone_set_stream_cfg(struct vpu_shared_addr *shared,
u32 instance,
enum vpu_malone_format malone_format)
@@ -610,6 +626,8 @@ static int vpu_malone_set_params(struct vpu_shared_addr *shared,
enum vpu_malone_format malone_format;
malone_format = vpu_malone_format_remap(params->codec_format);
+ if (WARN_ON(malone_format == MALONE_FMT_NULL))
+ return -EINVAL;
iface->udata_buffer[instance].base = params->udata.base;
iface->udata_buffer[instance].slot_size = params->udata.size;
@@ -1296,6 +1314,8 @@ static int vpu_malone_insert_scode_vc1_l_seq(struct malone_scode_t *scode)
int size = 0;
u8 rcv_seqhdr[MALONE_VC1_RCV_SEQ_HEADER_LEN];
+ if (scode->inst->total_input_count)
+ return 0;
scode->need_data = 0;
ret = vpu_malone_insert_scode_seq(scode, MALONE_CODEC_ID_VC1_SIMPLE, sizeof(rcv_seqhdr));
diff --git a/drivers/media/platform/amphion/vpu_malone.h b/drivers/media/platform/amphion/vpu_malone.h
index e5a5cbe9843e..02a9d9530970 100644
--- a/drivers/media/platform/amphion/vpu_malone.h
+++ b/drivers/media/platform/amphion/vpu_malone.h
@@ -40,5 +40,6 @@ int vpu_malone_pre_cmd(struct vpu_shared_addr *shared, u32 instance);
int vpu_malone_post_cmd(struct vpu_shared_addr *shared, u32 instance);
int vpu_malone_init_instance(struct vpu_shared_addr *shared, u32 instance);
u32 vpu_malone_get_max_instance_count(struct vpu_shared_addr *shared);
+bool vpu_malone_check_fmt(enum vpu_core_type type, u32 pixelfmt);
#endif
diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
index d5850df8f1d5..d8247f36d84b 100644
--- a/drivers/media/platform/amphion/vpu_msgs.c
+++ b/drivers/media/platform/amphion/vpu_msgs.c
@@ -150,7 +150,12 @@ static void vpu_session_handle_eos(struct vpu_inst *inst, struct vpu_rpc_event *
static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
{
- dev_err(inst->dev, "unsupported stream\n");
+ char *str = (char *)pkt->data;
+
+ if (strlen(str))
+ dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
+ else
+ dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
call_void_vop(inst, event_notify, VPU_MSG_ID_UNSUPPORTED, NULL);
vpu_v4l2_set_error(inst);
}
diff --git a/drivers/media/platform/amphion/vpu_rpc.c b/drivers/media/platform/amphion/vpu_rpc.c
index 18a164766409..676f7da041bd 100644
--- a/drivers/media/platform/amphion/vpu_rpc.c
+++ b/drivers/media/platform/amphion/vpu_rpc.c
@@ -195,7 +195,7 @@ static struct vpu_iface_ops imx8q_rpc_ops[] = {
},
[VPU_CORE_TYPE_DEC] = {
.check_codec = vpu_imx8q_check_codec,
- .check_fmt = vpu_imx8q_check_fmt,
+ .check_fmt = vpu_malone_check_fmt,
.boot_core = vpu_imx8q_boot_core,
.get_power_state = vpu_imx8q_get_power_state,
.on_firmware_loaded = vpu_imx8q_on_firmware_loaded,
diff --git a/drivers/media/platform/amphion/vpu_rpc.h b/drivers/media/platform/amphion/vpu_rpc.h
index 25119e5e807e..7eb6f01e6ab5 100644
--- a/drivers/media/platform/amphion/vpu_rpc.h
+++ b/drivers/media/platform/amphion/vpu_rpc.h
@@ -312,11 +312,16 @@ static inline int vpu_iface_input_frame(struct vpu_inst *inst,
struct vb2_buffer *vb)
{
struct vpu_iface_ops *ops = vpu_core_get_iface(inst->core);
+ int ret;
if (!ops || !ops->input_frame)
return -EINVAL;
- return ops->input_frame(inst->core->iface, inst, vb);
+ ret = ops->input_frame(inst->core->iface, inst, vb);
+ if (ret < 0)
+ return ret;
+ inst->total_input_count++;
+ return ret;
}
static inline int vpu_iface_config_memory_resource(struct vpu_inst *inst,
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
index 446f07d09d0b..8a3eed957ae6 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.c
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -500,10 +500,12 @@ static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
fmt->sizeimage[1], fmt->bytesperline[1],
fmt->sizeimage[2], fmt->bytesperline[2],
q->num_buffers);
- call_void_vop(inst, start, q->type);
vb2_clear_last_buffer_dequeued(q);
+ ret = call_vop(inst, start, q->type);
+ if (ret)
+ vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
- return 0;
+ return ret;
}
static void vpu_vb2_stop_streaming(struct vb2_queue *q)
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
index 83aebee0c8eb..f399dba62e17 100644
--- a/drivers/media/platform/atmel/Kconfig
+++ b/drivers/media/platform/atmel/Kconfig
@@ -20,12 +20,14 @@ config VIDEO_ATMEL_ISC
config VIDEO_ATMEL_XISC
tristate "ATMEL eXtended Image Sensor Controller (XISC) support"
depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV && COMMON_CLK && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_DEV && COMMON_CLK
depends on ARCH_AT91 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select REGMAP_MMIO
select V4L2_FWNODE
select VIDEO_ATMEL_ISC_BASE
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This module makes the ATMEL eXtended Image Sensor Controller
available as a v4l2 device.
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index 2f07a50035c8..9e5317a7d516 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -132,12 +132,9 @@ static int isc_buffer_prepare(struct vb2_buffer *vb)
return 0;
}
-static void isc_start_dma(struct isc_device *isc)
+static void isc_crop_pfe(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
- u32 sizeimage = isc->fmt.fmt.pix.sizeimage;
- u32 dctrl_dview;
- dma_addr_t addr0;
u32 h, w;
h = isc->fmt.fmt.pix.height;
@@ -172,6 +169,14 @@ static void isc_start_dma(struct isc_device *isc)
regmap_update_bits(regmap, ISC_PFE_CFG0,
ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN,
ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN);
+}
+
+static void isc_start_dma(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 sizeimage = isc->fmt.fmt.pix.sizeimage;
+ u32 dctrl_dview;
+ dma_addr_t addr0;
addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
regmap_write(regmap, ISC_DAD0 + isc->offsets.dma, addr0);
@@ -369,6 +374,7 @@ static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
struct isc_buffer, list);
list_del(&isc->cur_frm->list);
+ isc_crop_pfe(isc);
isc_start_dma(isc);
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
@@ -1466,7 +1472,7 @@ static void isc_awb_work(struct work_struct *w)
if (isc->stop) {
mutex_unlock(&isc->awb_mutex);
return;
- };
+ }
isc_update_profile(isc);
@@ -1525,10 +1531,6 @@ static int isc_s_awb_ctrl(struct v4l2_ctrl *ctrl)
else
ctrls->awb = ISC_WB_NONE;
- /* we did not configure ISC yet */
- if (!isc->config.sd_format)
- break;
-
/* configure the controls with new values from v4l2 */
if (ctrl->cluster[ISC_CTRL_R_GAIN]->is_new)
ctrls->gain[ISC_HIS_CFG_MODE_R] = isc->r_gain_ctrl->val;
diff --git a/drivers/media/platform/atmel/atmel-sama7g5-isc.c b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
index 83b175070c06..8b11aa8340d7 100644
--- a/drivers/media/platform/atmel/atmel-sama7g5-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
@@ -591,11 +591,13 @@ static const struct dev_pm_ops microchip_xisc_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xisc_runtime_suspend, xisc_runtime_resume, NULL)
};
+#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id microchip_xisc_of_match[] = {
{ .compatible = "microchip,sama7g5-isc" },
{ }
};
MODULE_DEVICE_TABLE(of, microchip_xisc_of_match);
+#endif
static struct platform_driver microchip_xisc_driver = {
.probe = microchip_xisc_probe,
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index bc5b0a0168ec..87685a62a5c2 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -1369,6 +1369,9 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
jpeg->vdev->device_caps = V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_M2M_MPLANE;
+ if (of_get_property(pdev->dev.of_node, "dma-ranges", NULL))
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
+
ret = video_register_device(jpeg->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_ipi.h b/drivers/media/platform/mediatek/mdp/mtk_mdp_ipi.h
index 2cb8cecb3077..b810c96695c8 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_ipi.h
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_ipi.h
@@ -40,12 +40,14 @@ struct mdp_ipi_init {
* @ipi_id : IPI_MDP
* @ap_inst : AP mtk_mdp_vpu address
* @vpu_inst_addr : VPU MDP instance address
+ * @padding : Alignment padding
*/
struct mdp_ipi_comm {
uint32_t msg_id;
uint32_t ipi_id;
uint64_t ap_inst;
uint32_t vpu_inst_addr;
+ uint32_t padding;
};
/**
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
index 52e5d36aa912..7d194a476713 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
@@ -35,6 +35,44 @@ mtk_vdec_find_format(struct v4l2_format *f,
return NULL;
}
+static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_ctx *ctx, int format_index)
+{
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
+ const struct mtk_video_fmt *fmt;
+ struct mtk_q_data *q_data;
+ int num_frame_count = 0, i;
+ bool ret = true;
+
+ for (i = 0; i < *dec_pdata->num_formats; i++) {
+ if (dec_pdata->vdec_formats[i].type != MTK_FMT_FRAME)
+ continue;
+
+ num_frame_count++;
+ }
+
+ if (num_frame_count == 1)
+ return true;
+
+ fmt = &dec_pdata->vdec_formats[format_index];
+ q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+ switch (q_data->fmt->fourcc) {
+ case V4L2_PIX_FMT_VP8_FRAME:
+ if (fmt->fourcc == V4L2_PIX_FMT_MM21)
+ ret = true;
+ break;
+ case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_VP9_FRAME:
+ if (fmt->fourcc == V4L2_PIX_FMT_MM21)
+ ret = false;
+ break;
+ default:
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
enum v4l2_buf_type type)
{
@@ -112,8 +150,6 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
{
struct mtk_q_data *q_data;
- ctx->dev->vdec_pdata->init_vdec_params(ctx);
-
ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
ctx->fh.m2m_ctx = ctx->m2m_ctx;
ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
@@ -141,15 +177,6 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
q_data->coded_height = DFT_CFG_HEIGHT;
q_data->fmt = ctx->dev->vdec_pdata->default_cap_fmt;
q_data->field = V4L2_FIELD_NONE;
- ctx->max_width = MTK_VDEC_MAX_W;
- ctx->max_height = MTK_VDEC_MAX_H;
-
- v4l_bound_align_image(&q_data->coded_width,
- MTK_VDEC_MIN_W,
- ctx->max_width, 4,
- &q_data->coded_height,
- MTK_VDEC_MIN_H,
- ctx->max_height, 5, 6);
q_data->sizeimage[0] = q_data->coded_width * q_data->coded_height;
q_data->bytesperline[0] = q_data->coded_width;
@@ -185,12 +212,34 @@ static int vidioc_vdec_dqbuf(struct file *file, void *priv,
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
+static int mtk_vcodec_dec_get_chip_name(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct device *dev = &ctx->dev->plat_dev->dev;
+
+ if (of_device_is_compatible(dev->of_node, "mediatek,mt8173-vcodec-dec"))
+ return 8173;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8183-vcodec-dec"))
+ return 8183;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8192-vcodec-dec"))
+ return 8192;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-dec"))
+ return 8195;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8186-vcodec-dec"))
+ return 8186;
+ else
+ return 8173;
+}
+
static int vidioc_vdec_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- strscpy(cap->driver, MTK_VCODEC_DEC_NAME, sizeof(cap->driver));
- strscpy(cap->bus_info, MTK_PLATFORM_STR, sizeof(cap->bus_info));
- strscpy(cap->card, MTK_PLATFORM_STR, sizeof(cap->card));
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct device *dev = &ctx->dev->plat_dev->dev;
+ int platform_name = mtk_vcodec_dec_get_chip_name(priv);
+
+ strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
+ snprintf(cap->card, sizeof(cap->card), "MT%d video decoder", platform_name);
return 0;
}
@@ -198,6 +247,11 @@ static int vidioc_vdec_querycap(struct file *file, void *priv,
static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(fh);
+
+ if (ctx->dev->vdec_pdata->uses_stateless_api)
+ return v4l2_ctrl_subscribe_event(fh, sub);
+
switch (sub->type) {
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 2, NULL);
@@ -212,13 +266,18 @@ static int vidioc_try_fmt(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
const struct mtk_video_fmt *fmt)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ const struct v4l2_frmsize_stepwise *frmsize;
pix_fmt_mp->field = V4L2_FIELD_NONE;
- pix_fmt_mp->width =
- clamp(pix_fmt_mp->width, MTK_VDEC_MIN_W, ctx->max_width);
- pix_fmt_mp->height =
- clamp(pix_fmt_mp->height, MTK_VDEC_MIN_H, ctx->max_height);
+ /* Always apply frame size constraints from the coded side */
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ frmsize = &fmt->frmsize;
+ else
+ frmsize = &ctx->q_data[MTK_Q_DATA_SRC].fmt->frmsize;
+
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width, MTK_VDEC_MIN_W, frmsize->max_width);
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height, MTK_VDEC_MIN_H, frmsize->max_height);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pix_fmt_mp->num_planes = 1;
@@ -234,18 +293,15 @@ static int vidioc_try_fmt(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
*/
tmp_w = pix_fmt_mp->width;
tmp_h = pix_fmt_mp->height;
- v4l_bound_align_image(&pix_fmt_mp->width,
- MTK_VDEC_MIN_W,
- ctx->max_width, 6,
- &pix_fmt_mp->height,
- MTK_VDEC_MIN_H,
- ctx->max_height, 6, 9);
+ v4l_bound_align_image(&pix_fmt_mp->width, MTK_VDEC_MIN_W, frmsize->max_width, 6,
+ &pix_fmt_mp->height, MTK_VDEC_MIN_H, frmsize->max_height, 6,
+ 9);
if (pix_fmt_mp->width < tmp_w &&
- (pix_fmt_mp->width + 64) <= ctx->max_width)
+ (pix_fmt_mp->width + 64) <= frmsize->max_width)
pix_fmt_mp->width += 64;
if (pix_fmt_mp->height < tmp_h &&
- (pix_fmt_mp->height + 64) <= ctx->max_height)
+ (pix_fmt_mp->height + 64) <= frmsize->max_height)
pix_fmt_mp->height += 64;
mtk_v4l2_debug(0,
@@ -435,13 +491,6 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
if (fmt == NULL)
return -EINVAL;
- if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED) &&
- fmt->fourcc != V4L2_PIX_FMT_VP8_FRAME) {
- mtk_v4l2_debug(3, "4K is enabled");
- ctx->max_width = VCODEC_DEC_4K_CODED_WIDTH;
- ctx->max_height = VCODEC_DEC_4K_CODED_HEIGHT;
- }
-
q_data->fmt = fmt;
vidioc_try_fmt(ctx, f, q_data->fmt);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
@@ -526,15 +575,17 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
if (fsize->index != 0)
return -EINVAL;
- for (i = 0; i < *dec_pdata->num_framesizes; ++i) {
- if (fsize->pixel_format != dec_pdata->vdec_framesizes[i].fourcc)
+ for (i = 0; i < *dec_pdata->num_formats; i++) {
+ if (fsize->pixel_format != dec_pdata->vdec_formats[i].fourcc)
continue;
+ /* Only coded formats have frame sizes set */
+ if (!dec_pdata->vdec_formats[i].frmsize.max_width)
+ return -ENOTTY;
+
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = dec_pdata->vdec_framesizes[i].stepwise;
+ fsize->stepwise = dec_pdata->vdec_formats[i].frmsize;
- fsize->stepwise.max_width = ctx->max_width;
- fsize->stepwise.max_height = ctx->max_height;
mtk_v4l2_debug(1, "%x, %d %d %d %d %d %d",
ctx->dev->dec_capability,
fsize->stepwise.min_width,
@@ -566,6 +617,9 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, void *priv,
dec_pdata->vdec_formats[i].type != MTK_FMT_FRAME)
continue;
+ if (!output_queue && !mtk_vdec_get_cap_fmt(ctx, i))
+ continue;
+
if (j == f->index)
break;
++j;
@@ -735,6 +789,7 @@ int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
i, vb2_plane_size(vb, i),
q_data->sizeimage[i]);
+ return -EINVAL;
}
if (!V4L2_TYPE_IS_OUTPUT(vb->type))
vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
@@ -938,6 +993,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
src_vq->dev = &ctx->dev->plat_dev->dev;
+ src_vq->allow_cache_hints = 1;
ret = vb2_queue_init(src_vq);
if (ret) {
@@ -953,6 +1009,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->dev_mutex;
dst_vq->dev = &ctx->dev->plat_dev->dev;
+ dst_vq->allow_cache_hints = 1;
ret = vb2_queue_init(dst_vq);
if (ret)
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
index 995e6e2fb1ab..e0b6ae9d6caa 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
@@ -208,9 +208,12 @@ static int fops_vcodec_open(struct file *file)
dev->dec_capability =
mtk_vcodec_fw_get_vdec_capa(dev->fw_handler);
+
mtk_v4l2_debug(0, "decoder capability %x", dev->dec_capability);
}
+ ctx->dev->vdec_pdata->init_vdec_params(ctx);
+
list_add(&ctx->list, &dev->ctx_list);
mutex_unlock(&dev->dev_mutex);
@@ -386,8 +389,14 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
mtk_v4l2_err("Main device of_platform_populate failed.");
goto err_reg_cont;
}
+ } else {
+ set_bit(MTK_VDEC_CORE, dev->subdev_bitmap);
}
+ atomic_set(&dev->dec_active_cnt, 0);
+ memset(dev->vdec_racing_info, 0, sizeof(dev->vdec_racing_info));
+ mutex_init(&dev->dec_racing_info_mutex);
+
ret = video_register_device(vfd_dec, VFL_TYPE_VIDEO, -1);
if (ret) {
mtk_v4l2_err("Failed to register video device");
@@ -465,6 +474,10 @@ static const struct of_device_id mtk_vcodec_match[] = {
.compatible = "mediatek,mt8186-vcodec-dec",
.data = &mtk_vdec_single_core_pdata,
},
+ {
+ .compatible = "mediatek,mt8195-vcodec-dec",
+ .data = &mtk_lat_sig_core_pdata,
+ },
{},
};
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
index 14bed2bd4283..376db0e433d7 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
@@ -28,6 +28,10 @@ static const struct of_device_id mtk_vdec_hw_match[] = {
.compatible = "mediatek,mtk-vcodec-core",
.data = (void *)MTK_VDEC_CORE,
},
+ {
+ .compatible = "mediatek,mtk-vcodec-lat-soc",
+ .data = (void *)MTK_VDEC_LAT_SOC,
+ },
{},
};
MODULE_DEVICE_TABLE(of, mtk_vdec_hw_match);
@@ -166,9 +170,11 @@ static int mtk_vdec_hw_probe(struct platform_device *pdev)
subdev_dev->reg_base[VDEC_HW_SYS] = main_dev->reg_base[VDEC_HW_SYS];
set_bit(subdev_dev->hw_idx, main_dev->subdev_bitmap);
- ret = mtk_vdec_hw_init_irq(subdev_dev);
- if (ret)
- goto err;
+ if (IS_SUPPORT_VDEC_HW_IRQ(hw_idx)) {
+ ret = mtk_vdec_hw_init_irq(subdev_dev);
+ if (ret)
+ goto err;
+ }
subdev_dev->reg_base[VDEC_HW_MISC] =
devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h
index a63e4b1b81c3..36faa8d9d681 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h
@@ -17,6 +17,8 @@
#define VDEC_IRQ_CLR 0x10
#define VDEC_IRQ_CFG_REG 0xa4
+#define IS_SUPPORT_VDEC_HW_IRQ(hw_idx) ((hw_idx) != MTK_VDEC_LAT_SOC)
+
/**
* enum mtk_vdec_hw_reg_idx - subdev hardware register base index
* @VDEC_HW_SYS : vdec soc register index
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c
index 0fb7e5ba635b..4305e4eb9900 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c
@@ -144,6 +144,34 @@ static void mtk_vcodec_dec_disable_irq(struct mtk_vcodec_dev *vdec_dev, int hw_i
}
}
+static void mtk_vcodec_load_racing_info(struct mtk_vcodec_ctx *ctx)
+{
+ void __iomem *vdec_racing_addr;
+ int j;
+
+ mutex_lock(&ctx->dev->dec_racing_info_mutex);
+ if (atomic_inc_return(&ctx->dev->dec_active_cnt) == 1) {
+ vdec_racing_addr = ctx->dev->reg_base[VDEC_MISC] + 0x100;
+ for (j = 0; j < 132; j++)
+ writel(ctx->dev->vdec_racing_info[j], vdec_racing_addr + j * 4);
+ }
+ mutex_unlock(&ctx->dev->dec_racing_info_mutex);
+}
+
+static void mtk_vcodec_record_racing_info(struct mtk_vcodec_ctx *ctx)
+{
+ void __iomem *vdec_racing_addr;
+ int j;
+
+ mutex_lock(&ctx->dev->dec_racing_info_mutex);
+ if (atomic_dec_and_test(&ctx->dev->dec_active_cnt)) {
+ vdec_racing_addr = ctx->dev->reg_base[VDEC_MISC] + 0x100;
+ for (j = 0; j < 132; j++)
+ ctx->dev->vdec_racing_info[j] = readl(vdec_racing_addr + j * 4);
+ }
+ mutex_unlock(&ctx->dev->dec_racing_info_mutex);
+}
+
static struct mtk_vcodec_pm *mtk_vcodec_dec_get_pm(struct mtk_vcodec_dev *vdec_dev,
int hw_idx)
{
@@ -174,6 +202,14 @@ static void mtk_vcodec_dec_child_dev_on(struct mtk_vcodec_dev *vdec_dev,
mtk_vcodec_dec_pw_on(pm);
mtk_vcodec_dec_clock_on(pm);
}
+
+ if (hw_idx == MTK_VDEC_LAT0) {
+ pm = mtk_vcodec_dec_get_pm(vdec_dev, MTK_VDEC_LAT_SOC);
+ if (pm) {
+ mtk_vcodec_dec_pw_on(pm);
+ mtk_vcodec_dec_clock_on(pm);
+ }
+ }
}
static void mtk_vcodec_dec_child_dev_off(struct mtk_vcodec_dev *vdec_dev,
@@ -186,6 +222,14 @@ static void mtk_vcodec_dec_child_dev_off(struct mtk_vcodec_dev *vdec_dev,
mtk_vcodec_dec_clock_off(pm);
mtk_vcodec_dec_pw_off(pm);
}
+
+ if (hw_idx == MTK_VDEC_LAT0) {
+ pm = mtk_vcodec_dec_get_pm(vdec_dev, MTK_VDEC_LAT_SOC);
+ if (pm) {
+ mtk_vcodec_dec_clock_off(pm);
+ mtk_vcodec_dec_pw_off(pm);
+ }
+ }
}
void mtk_vcodec_dec_enable_hardware(struct mtk_vcodec_ctx *ctx, int hw_idx)
@@ -198,11 +242,17 @@ void mtk_vcodec_dec_enable_hardware(struct mtk_vcodec_ctx *ctx, int hw_idx)
mtk_vcodec_dec_child_dev_on(ctx->dev, hw_idx);
mtk_vcodec_dec_enable_irq(ctx->dev, hw_idx);
+
+ if (IS_VDEC_INNER_RACING(ctx->dev->dec_capability))
+ mtk_vcodec_load_racing_info(ctx);
}
EXPORT_SYMBOL_GPL(mtk_vcodec_dec_enable_hardware);
void mtk_vcodec_dec_disable_hardware(struct mtk_vcodec_ctx *ctx, int hw_idx)
{
+ if (IS_VDEC_INNER_RACING(ctx->dev->dec_capability))
+ mtk_vcodec_record_racing_info(ctx);
+
mtk_vcodec_dec_disable_irq(ctx->dev, hw_idx);
mtk_vcodec_dec_child_dev_off(ctx->dev, hw_idx);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
index 9c7e6145cebb..035c86e7809f 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
@@ -17,18 +17,24 @@ static const struct mtk_video_fmt mtk_video_formats[] = {
.type = MTK_FMT_DEC,
.num_planes = 1,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ .frmsize = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
},
{
.fourcc = V4L2_PIX_FMT_VP8,
.type = MTK_FMT_DEC,
.num_planes = 1,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ .frmsize = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
},
{
.fourcc = V4L2_PIX_FMT_VP9,
.type = MTK_FMT_DEC,
.num_planes = 1,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ .frmsize = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
},
{
.fourcc = V4L2_PIX_FMT_MT21C,
@@ -43,27 +49,6 @@ static const unsigned int num_supported_formats =
#define DEFAULT_OUT_FMT_IDX 0
#define DEFAULT_CAP_FMT_IDX 3
-static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP9,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
-};
-
-static const unsigned int num_supported_framesize =
- ARRAY_SIZE(mtk_vdec_framesizes);
-
/*
* This function tries to clean all display buffers, the buffers will return
* in display order.
@@ -618,8 +603,6 @@ const struct mtk_vcodec_dec_pdata mtk_vdec_8173_pdata = {
.num_formats = &num_supported_formats,
.default_out_fmt = &mtk_video_formats[DEFAULT_OUT_FMT_IDX],
.default_cap_fmt = &mtk_video_formats[DEFAULT_CAP_FMT_IDX],
- .vdec_framesizes = mtk_vdec_framesizes,
- .num_framesizes = &num_supported_framesize,
.worker = mtk_vdec_worker,
.flush_decoder = mtk_vdec_flush_decoder,
.is_subdev_supported = false,
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
index 16d55785d84b..c45bd2599bb2 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
@@ -112,14 +112,12 @@ static const struct mtk_stateless_control mtk_stateless_controls[] = {
#define NUM_CTRLS ARRAY_SIZE(mtk_stateless_controls)
static struct mtk_video_fmt mtk_video_formats[5];
-static struct mtk_codec_framesizes mtk_vdec_framesizes[3];
static struct mtk_video_fmt default_out_format;
static struct mtk_video_fmt default_cap_format;
static unsigned int num_formats;
-static unsigned int num_framesizes;
-static struct v4l2_frmsize_stepwise stepwise_fhd = {
+static const struct v4l2_frmsize_stepwise stepwise_fhd = {
.min_width = MTK_VDEC_MIN_W,
.max_width = MTK_VDEC_MAX_W,
.step_width = 16,
@@ -348,7 +346,6 @@ static void mtk_vcodec_add_formats(unsigned int fourcc,
struct mtk_vcodec_dev *dev = ctx->dev;
const struct mtk_vcodec_dec_pdata *pdata = dev->vdec_pdata;
int count_formats = *pdata->num_formats;
- int count_framesizes = *pdata->num_framesizes;
switch (fourcc) {
case V4L2_PIX_FMT_H264_SLICE:
@@ -357,10 +354,15 @@ static void mtk_vcodec_add_formats(unsigned int fourcc,
mtk_video_formats[count_formats].fourcc = fourcc;
mtk_video_formats[count_formats].type = MTK_FMT_DEC;
mtk_video_formats[count_formats].num_planes = 1;
-
- mtk_vdec_framesizes[count_framesizes].fourcc = fourcc;
- mtk_vdec_framesizes[count_framesizes].stepwise = stepwise_fhd;
- num_framesizes++;
+ mtk_video_formats[count_formats].frmsize = stepwise_fhd;
+
+ if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED) &&
+ fourcc != V4L2_PIX_FMT_VP8_FRAME) {
+ mtk_video_formats[count_formats].frmsize.max_width =
+ VCODEC_DEC_4K_CODED_WIDTH;
+ mtk_video_formats[count_formats].frmsize.max_height =
+ VCODEC_DEC_4K_CODED_HEIGHT;
+ }
break;
case V4L2_PIX_FMT_MM21:
case V4L2_PIX_FMT_MT21C:
@@ -374,15 +376,15 @@ static void mtk_vcodec_add_formats(unsigned int fourcc,
}
num_formats++;
- mtk_v4l2_debug(3, "num_formats: %d num_frames:%d dec_capability: 0x%x",
- count_formats, count_framesizes, ctx->dev->dec_capability);
+ mtk_v4l2_debug(3, "num_formats: %d dec_capability: 0x%x",
+ count_formats, ctx->dev->dec_capability);
}
static void mtk_vcodec_get_supported_formats(struct mtk_vcodec_ctx *ctx)
{
int cap_format_count = 0, out_format_count = 0;
- if (num_formats && num_framesizes)
+ if (num_formats)
return;
if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MM21) {
@@ -461,8 +463,6 @@ const struct mtk_vcodec_dec_pdata mtk_vdec_8183_pdata = {
.num_formats = &num_formats,
.default_out_fmt = &default_out_format,
.default_cap_fmt = &default_cap_format,
- .vdec_framesizes = mtk_vdec_framesizes,
- .num_framesizes = &num_framesizes,
.uses_stateless_api = true,
.worker = mtk_vdec_worker,
.flush_decoder = mtk_vdec_flush_decoder,
@@ -481,8 +481,6 @@ const struct mtk_vcodec_dec_pdata mtk_lat_sig_core_pdata = {
.num_formats = &num_formats,
.default_out_fmt = &default_out_format,
.default_cap_fmt = &default_cap_format,
- .vdec_framesizes = mtk_vdec_framesizes,
- .num_framesizes = &num_framesizes,
.uses_stateless_api = true,
.worker = mtk_vdec_worker,
.flush_decoder = mtk_vdec_flush_decoder,
@@ -500,8 +498,6 @@ const struct mtk_vcodec_dec_pdata mtk_vdec_single_core_pdata = {
.num_formats = &num_formats,
.default_out_fmt = &default_out_format,
.default_cap_fmt = &default_cap_format,
- .vdec_framesizes = mtk_vdec_framesizes,
- .num_framesizes = &num_framesizes,
.uses_stateless_api = true,
.worker = mtk_vdec_worker,
.flush_decoder = mtk_vdec_flush_decoder,
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
index a29041a0b7e0..ef4584a46417 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
@@ -19,15 +19,14 @@
#include "mtk_vcodec_util.h"
#include "vdec_msg_queue.h"
-#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
#define MTK_VCODEC_DEC_NAME "mtk-vcodec-dec"
#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
-#define MTK_PLATFORM_STR "platform:mt8173"
#define MTK_VCODEC_MAX_PLANES 3
#define MTK_V4L2_BENCHMARK 0
#define WAIT_INTR_TIMEOUT_MS 1000
#define IS_VDEC_LAT_ARCH(hw_arch) ((hw_arch) >= MTK_VDEC_LAT_SINGLE_CORE)
+#define IS_VDEC_INNER_RACING(capability) ((capability) & MTK_VCODEC_INNER_RACING)
/*
* enum mtk_hw_reg_idx - MTK hw register base index
@@ -104,6 +103,7 @@ enum mtk_vdec_hw_id {
MTK_VDEC_CORE,
MTK_VDEC_LAT0,
MTK_VDEC_LAT1,
+ MTK_VDEC_LAT_SOC,
MTK_VDEC_HW_MAX,
};
@@ -125,15 +125,7 @@ struct mtk_video_fmt {
enum mtk_fmt_type type;
u32 num_planes;
u32 flags;
-};
-
-/*
- * struct mtk_codec_framesizes - Structure used to store information about
- * framesizes
- */
-struct mtk_codec_framesizes {
- u32 fourcc;
- struct v4l2_frmsize_stepwise stepwise;
+ struct v4l2_frmsize_stepwise frmsize;
};
/*
@@ -255,7 +247,7 @@ struct vdec_pic_info {
* @param_change: indicate encode parameter type
* @enc_params: encoding parameters
* @dec_if: hooked decoder driver interface
- * @enc_if: hoooked encoder driver interface
+ * @enc_if: hooked encoder driver interface
* @drv_handle: driver handle for specific decode/encode instance
*
* @picinfo: store picture info after header parsing
@@ -285,8 +277,6 @@ struct vdec_pic_info {
* mtk_video_dec_buf.
* @hw_id: hardware index used to identify different hardware.
*
- * @max_width: hardware supported max width
- * @max_height: hardware supported max height
* @msg_queue: msg queue used to store lat buffer information.
*/
struct mtk_vcodec_ctx {
@@ -333,8 +323,6 @@ struct mtk_vcodec_ctx {
struct mutex lock;
int hw_id;
- unsigned int max_width;
- unsigned int max_height;
struct vdec_msg_queue msg_queue;
};
@@ -356,6 +344,7 @@ enum mtk_vdec_format_types {
MTK_VDEC_FORMAT_H264_SLICE = 0x100,
MTK_VDEC_FORMAT_VP8_FRAME = 0x200,
MTK_VDEC_FORMAT_VP9_FRAME = 0x400,
+ MTK_VCODEC_INNER_RACING = 0x20000,
};
/**
@@ -373,9 +362,6 @@ enum mtk_vdec_format_types {
* @default_out_fmt: default output buffer format
* @default_cap_fmt: default capture buffer format
*
- * @vdec_framesizes: supported video decoder frame sizes
- * @num_framesizes: count of video decoder frame sizes
- *
* @hw_arch: hardware arch is used to separate pure_sin_core and lat_sin_core
*
* @is_subdev_supported: whether support parent-node architecture(subdev)
@@ -398,9 +384,6 @@ struct mtk_vcodec_dec_pdata {
const struct mtk_video_fmt *default_out_fmt;
const struct mtk_video_fmt *default_cap_fmt;
- const struct mtk_codec_framesizes *vdec_framesizes;
- const int *num_framesizes;
-
enum mtk_vdec_hw_arch hw_arch;
bool is_subdev_supported;
@@ -477,6 +460,10 @@ struct mtk_vcodec_enc_pdata {
* @subdev_dev: subdev hardware device
* @subdev_prob_done: check whether all used hw device is prob done
* @subdev_bitmap: used to record hardware is ready or not
+ *
+ * @dec_active_cnt: used to mark whether need to record register value
+ * @vdec_racing_info: record register value
+ * @dec_racing_info_mutex: mutex lock used for inner racing mode
*/
struct mtk_vcodec_dev {
struct v4l2_device v4l2_dev;
@@ -522,6 +509,11 @@ struct mtk_vcodec_dev {
void *subdev_dev[MTK_VDEC_HW_MAX];
int (*subdev_prob_done)(struct mtk_vcodec_dev *vdec_dev);
DECLARE_BITMAP(subdev_bitmap, MTK_VDEC_HW_MAX);
+
+ atomic_t dec_active_cnt;
+ u32 vdec_racing_info[132];
+ /* Protects access to vdec_racing_info data */
+ struct mutex dec_racing_info_mutex;
};
static inline struct mtk_vcodec_ctx *fh_to_ctx(struct v4l2_fh *fh)
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
index c21367038c34..25e816863597 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
@@ -50,6 +50,14 @@ static int vidioc_venc_s_ctrl(struct v4l2_ctrl *ctrl)
int ret = 0;
switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_BITRATE_MODE val= %d",
+ ctrl->val);
+ if (ctrl->val != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) {
+ mtk_v4l2_err("Unsupported bitrate mode =%d", ctrl->val);
+ ret = -EINVAL;
+ }
+ break;
case V4L2_CID_MPEG_VIDEO_BITRATE:
mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_BITRATE val = %d",
ctrl->val);
@@ -204,12 +212,32 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
pdata->num_output_formats);
}
+static int mtk_vcodec_enc_get_chip_name(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct device *dev = &ctx->dev->plat_dev->dev;
+
+ if (of_device_is_compatible(dev->of_node, "mediatek,mt8173-vcodec-enc"))
+ return 8173;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8183-vcodec-enc"))
+ return 8183;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8192-vcodec-enc"))
+ return 8192;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-enc"))
+ return 8195;
+ else
+ return 8173;
+}
+
static int vidioc_venc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- strscpy(cap->driver, MTK_VCODEC_ENC_NAME, sizeof(cap->driver));
- strscpy(cap->bus_info, MTK_PLATFORM_STR, sizeof(cap->bus_info));
- strscpy(cap->card, MTK_PLATFORM_STR, sizeof(cap->card));
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct device *dev = &ctx->dev->plat_dev->dev;
+ int platform_name = mtk_vcodec_enc_get_chip_name(priv);
+
+ strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
+ snprintf(cap->card, sizeof(cap->card), "MT%d video encoder", platform_name);
return 0;
}
@@ -1373,6 +1401,9 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
V4L2_MPEG_VIDEO_VP8_PROFILE_0, 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
+ v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ 0, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
if (handler->error) {
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.c
index ca628321d272..580ce979e2a3 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.c
@@ -51,7 +51,7 @@ void mtk_vdec_h264_fill_dpb_info(struct mtk_vcodec_ctx *ctx,
struct vb2_queue *vq;
struct vb2_buffer *vb;
struct vb2_v4l2_buffer *vb2_v4l2;
- int index, vb2_index;
+ int index;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
@@ -62,8 +62,8 @@ void mtk_vdec_h264_fill_dpb_info(struct mtk_vcodec_ctx *ctx,
continue;
}
- vb2_index = vb2_find_timestamp(vq, dpb->reference_ts, 0);
- if (vb2_index < 0) {
+ vb = vb2_find_buffer(vq, dpb->reference_ts);
+ if (!vb) {
dev_err(&ctx->dev->plat_dev->dev,
"Reference invalid: dpb_index(%d) reference_ts(%lld)",
index, dpb->reference_ts);
@@ -76,7 +76,6 @@ void mtk_vdec_h264_fill_dpb_info(struct mtk_vcodec_ctx *ctx,
else
h264_dpb_info[index].reference_flag = 2;
- vb = vq->bufs[vb2_index];
vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
h264_dpb_info[index].field = vb2_v4l2->field;
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
index 784d01f8bd50..4cc92700692b 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
@@ -633,6 +633,17 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
goto err_scp_decode;
}
+ share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+ inst->vsi->wdma_end_addr_offset;
+ share_info->trans_start = inst->ctx->msg_queue.wdma_wptr_addr;
+ share_info->nal_info = inst->vsi->dec.nal_info;
+
+ if (IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
+ memcpy(&share_info->h264_slice_params, &inst->vsi->h264_slice_params,
+ sizeof(share_info->h264_slice_params));
+ vdec_msg_queue_qbuf(&inst->ctx->dev->msg_queue_core_ctx, lat_buf);
+ }
+
/* wait decoder done interrupt */
timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
@@ -646,18 +657,22 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
inst->vsi->wdma_end_addr_offset;
- share_info->trans_start = inst->ctx->msg_queue.wdma_wptr_addr;
- share_info->nal_info = inst->vsi->dec.nal_info;
vdec_msg_queue_update_ube_wptr(&lat_buf->ctx->msg_queue, share_info->trans_end);
- memcpy(&share_info->h264_slice_params, &inst->vsi->h264_slice_params,
- sizeof(share_info->h264_slice_params));
- vdec_msg_queue_qbuf(&inst->ctx->dev->msg_queue_core_ctx, lat_buf);
+ if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
+ memcpy(&share_info->h264_slice_params, &inst->vsi->h264_slice_params,
+ sizeof(share_info->h264_slice_params));
+ vdec_msg_queue_qbuf(&inst->ctx->dev->msg_queue_core_ctx, lat_buf);
+ }
+ mtk_vcodec_debug(inst, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
+ inst->vsi->dec.crc[0], inst->vsi->dec.crc[1], inst->vsi->dec.crc[2]);
inst->slice_dec_num++;
return 0;
err_scp_decode:
+ if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
err_free_fb_out:
vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
mtk_vcodec_err(inst, "slice dec number: %d err: %d", inst->slice_dec_num, err);
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c
index eef102f3f4f3..e1fe2603e92e 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c
@@ -237,7 +237,7 @@ static int vdec_vp8_slice_get_decode_parameters(struct vdec_vp8_slice_inst *inst
struct vb2_queue *vq;
struct vb2_buffer *vb;
u64 referenct_ts;
- int index, vb2_index;
+ int index;
frame_header = vdec_vp8_slice_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_VP8_FRAME);
if (IS_ERR(frame_header))
@@ -246,8 +246,8 @@ static int vdec_vp8_slice_get_decode_parameters(struct vdec_vp8_slice_inst *inst
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
for (index = 0; index < 3; index++) {
referenct_ts = vdec_vp8_slice_get_ref_by_ts(frame_header, index);
- vb2_index = vb2_find_timestamp(vq, referenct_ts, 0);
- if (vb2_index < 0) {
+ vb = vb2_find_buffer(vq, referenct_ts);
+ if (!vb) {
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(frame_header))
mtk_vcodec_err(inst, "reference invalid: index(%d) ts(%lld)",
index, referenct_ts);
@@ -256,7 +256,6 @@ static int vdec_vp8_slice_get_decode_parameters(struct vdec_vp8_slice_inst *inst
}
inst->vsi->vp8_dpb_info[index].reference_flag = 1;
- vb = vq->bufs[vb2_index];
inst->vsi->vp8_dpb_info[index].y_dma_addr =
vb2_dma_contig_plane_dma_addr(vb, 0);
if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
index 023aba4ec2c4..fb1c36a3592d 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
@@ -439,6 +439,8 @@ struct vdec_vp9_slice_ref {
* @init_vsi: vsi used for initialized VP9 instance
* @vsi: vsi used for decoding/flush ...
* @core_vsi: vsi used for Core stage
+ *
+ * @sc_pfc: per frame context single core
* @counts_map: used map to counts_helper
* @counts_helper: counts table according to newest kernel spec
*/
@@ -487,6 +489,7 @@ struct vdec_vp9_slice_instance {
};
struct vdec_vp9_slice_vsi *core_vsi;
+ struct vdec_vp9_slice_pfc sc_pfc;
struct vdec_vp9_slice_counts_map counts_map;
struct v4l2_vp9_frame_symbol_counts counts_helper;
};
@@ -523,13 +526,12 @@ static int vdec_vp9_slice_init_default_frame_ctx(struct vdec_vp9_slice_instance
if (vdec_vp9_slice_default_frame_ctx)
goto out;
- frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_KERNEL);
+ frame_ctx = kmemdup(remote_frame_ctx, sizeof(*frame_ctx), GFP_KERNEL);
if (!frame_ctx) {
ret = -ENOMEM;
goto out;
}
- memcpy(frame_ctx, remote_frame_ctx, sizeof(*frame_ctx));
vdec_vp9_slice_default_frame_ctx = frame_ctx;
out:
@@ -689,7 +691,26 @@ static int vdec_vp9_slice_tile_offset(int idx, int mi_num, int tile_log2)
int sbs = (mi_num + 7) >> 3;
int offset = ((idx * sbs) >> tile_log2) << 3;
- return offset < mi_num ? offset : mi_num;
+ return min(offset, mi_num);
+}
+
+static
+int vdec_vp9_slice_setup_single_from_src_to_dst(struct vdec_vp9_slice_instance *instance)
+{
+ struct vb2_v4l2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+
+ src = v4l2_m2m_next_src_buf(instance->ctx->m2m_ctx);
+ if (!src)
+ return -EINVAL;
+
+ dst = v4l2_m2m_next_dst_buf(instance->ctx->m2m_ctx);
+ if (!dst)
+ return -EINVAL;
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ return 0;
}
static int vdec_vp9_slice_setup_lat_from_src_buf(struct vdec_vp9_slice_instance *instance,
@@ -1567,6 +1588,33 @@ static int vdec_vp9_slice_update_prob(struct vdec_vp9_slice_instance *instance,
return 0;
}
+static int vdec_vp9_slice_update_single(struct vdec_vp9_slice_instance *instance,
+ struct vdec_vp9_slice_pfc *pfc)
+{
+ struct vdec_vp9_slice_vsi *vsi;
+
+ vsi = &pfc->vsi;
+ memcpy(&pfc->state[0], &vsi->state, sizeof(vsi->state));
+
+ mtk_vcodec_debug(instance, "Frame %u Y_CRC %08x %08x %08x %08x\n",
+ pfc->seq,
+ vsi->state.crc[0], vsi->state.crc[1],
+ vsi->state.crc[2], vsi->state.crc[3]);
+ mtk_vcodec_debug(instance, "Frame %u C_CRC %08x %08x %08x %08x\n",
+ pfc->seq,
+ vsi->state.crc[4], vsi->state.crc[5],
+ vsi->state.crc[6], vsi->state.crc[7]);
+
+ vdec_vp9_slice_update_prob(instance, vsi);
+
+ instance->width = vsi->frame.uh.frame_width;
+ instance->height = vsi->frame.uh.frame_height;
+ instance->frame_type = vsi->frame.uh.frame_type;
+ instance->show_frame = vsi->frame.uh.show_frame;
+
+ return 0;
+}
+
static int vdec_vp9_slice_update_lat(struct vdec_vp9_slice_instance *instance,
struct vdec_lat_buf *lat_buf,
struct vdec_vp9_slice_pfc *pfc)
@@ -1624,7 +1672,6 @@ static int vdec_vp9_slice_setup_core_buffer(struct vdec_vp9_slice_instance *inst
struct vdec_vp9_slice_reference *ref;
int plane;
int size;
- int idx;
int w;
int h;
int i;
@@ -1667,15 +1714,16 @@ static int vdec_vp9_slice_setup_core_buffer(struct vdec_vp9_slice_instance *inst
*/
for (i = 0; i < 3; i++) {
ref = &vsi->frame.ref[i];
- idx = vb2_find_timestamp(vq, pfc->ref_idx[i], 0);
- if (idx < 0) {
+ vb = vb2_find_buffer(vq, pfc->ref_idx[i]);
+ if (!vb) {
ref->frame_width = w;
ref->frame_height = h;
memset(&vsi->ref[i], 0, sizeof(vsi->ref[i]));
} else {
+ int idx = vb->index;
+
ref->frame_width = instance->dpb[idx].width;
ref->frame_height = instance->dpb[idx].height;
- vb = vq->bufs[idx];
vsi->ref[i].y.dma_addr =
vb2_dma_contig_plane_dma_addr(vb, 0);
if (plane == 1)
@@ -1690,6 +1738,40 @@ static int vdec_vp9_slice_setup_core_buffer(struct vdec_vp9_slice_instance *inst
return 0;
}
+static void vdec_vp9_slice_setup_single_buffer(struct vdec_vp9_slice_instance *instance,
+ struct vdec_vp9_slice_pfc *pfc,
+ struct vdec_vp9_slice_vsi *vsi,
+ struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb)
+{
+ int i;
+
+ vsi->bs.buf.dma_addr = bs->dma_addr;
+ vsi->bs.buf.size = bs->size;
+ vsi->bs.frame.dma_addr = bs->dma_addr;
+ vsi->bs.frame.size = bs->size;
+
+ for (i = 0; i < 2; i++) {
+ vsi->mv[i].dma_addr = instance->mv[i].dma_addr;
+ vsi->mv[i].size = instance->mv[i].size;
+ }
+ for (i = 0; i < 2; i++) {
+ vsi->seg[i].dma_addr = instance->seg[i].dma_addr;
+ vsi->seg[i].size = instance->seg[i].size;
+ }
+ vsi->tile.dma_addr = instance->tile.dma_addr;
+ vsi->tile.size = instance->tile.size;
+ vsi->prob.dma_addr = instance->prob.dma_addr;
+ vsi->prob.size = instance->prob.size;
+ vsi->counts.dma_addr = instance->counts.dma_addr;
+ vsi->counts.size = instance->counts.size;
+
+ vsi->row_info.buf = 0;
+ vsi->row_info.size = 0;
+
+ vdec_vp9_slice_setup_core_buffer(instance, pfc, vsi, fb, NULL);
+}
+
static int vdec_vp9_slice_setup_core(struct vdec_vp9_slice_instance *instance,
struct vdec_fb *fb,
struct vdec_lat_buf *lat_buf,
@@ -1716,6 +1798,43 @@ err:
return ret;
}
+static int vdec_vp9_slice_setup_single(struct vdec_vp9_slice_instance *instance,
+ struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb,
+ struct vdec_vp9_slice_pfc *pfc)
+{
+ struct vdec_vp9_slice_vsi *vsi = &pfc->vsi;
+ int ret;
+
+ ret = vdec_vp9_slice_setup_single_from_src_to_dst(instance);
+ if (ret)
+ goto err;
+
+ ret = vdec_vp9_slice_setup_pfc(instance, pfc);
+ if (ret)
+ goto err;
+
+ ret = vdec_vp9_slice_alloc_working_buffer(instance, vsi);
+ if (ret)
+ goto err;
+
+ vdec_vp9_slice_setup_single_buffer(instance, pfc, vsi, bs, fb);
+ vdec_vp9_slice_setup_seg_buffer(instance, vsi, &instance->seg[0]);
+
+ ret = vdec_vp9_slice_setup_prob_buffer(instance, vsi);
+ if (ret)
+ goto err;
+
+ ret = vdec_vp9_slice_setup_tile_buffer(instance, vsi, bs);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ return ret;
+}
+
static int vdec_vp9_slice_update_core(struct vdec_vp9_slice_instance *instance,
struct vdec_lat_buf *lat_buf,
struct vdec_vp9_slice_pfc *pfc)
@@ -1813,8 +1932,8 @@ static int vdec_vp9_slice_flush(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_vp9_slice_instance *instance = h_vdec;
mtk_vcodec_debug(instance, "flush ...\n");
-
- vdec_msg_queue_wait_lat_buf_full(&instance->ctx->msg_queue);
+ if (instance->ctx->dev->vdec_pdata->hw_arch != MTK_VDEC_PURE_SINGLE_CORE)
+ vdec_msg_queue_wait_lat_buf_full(&instance->ctx->msg_queue);
return vpu_dec_reset(&instance->vpu);
}
@@ -1867,6 +1986,63 @@ static int vdec_vp9_slice_get_param(void *h_vdec, enum vdec_get_param_type type,
return 0;
}
+static int vdec_vp9_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_vp9_slice_instance *instance = h_vdec;
+ struct vdec_vp9_slice_pfc *pfc = &instance->sc_pfc;
+ struct vdec_vp9_slice_vsi *vsi;
+ struct mtk_vcodec_ctx *ctx;
+ int ret;
+
+ if (!instance || !instance->ctx)
+ return -EINVAL;
+ ctx = instance->ctx;
+
+ /* bs NULL means flush decoder */
+ if (!bs)
+ return vdec_vp9_slice_flush(h_vdec, bs, fb, res_chg);
+
+ fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
+ if (!fb)
+ return -EBUSY;
+
+ vsi = &pfc->vsi;
+
+ ret = vdec_vp9_slice_setup_single(instance, bs, fb, pfc);
+ if (ret) {
+ mtk_vcodec_err(instance, "Failed to setup VP9 single ret %d\n", ret);
+ return ret;
+ }
+ vdec_vp9_slice_vsi_to_remote(vsi, instance->vsi);
+
+ ret = vpu_dec_start(&instance->vpu, NULL, 0);
+ if (ret) {
+ mtk_vcodec_err(instance, "Failed to dec VP9 ret %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
+ /* update remote vsi if decode timeout */
+ if (ret) {
+ mtk_vcodec_err(instance, "VP9 decode timeout %d\n", ret);
+ WRITE_ONCE(instance->vsi->state.timeout, 1);
+ }
+
+ vpu_dec_end(&instance->vpu);
+
+ vdec_vp9_slice_vsi_from_remote(vsi, instance->vsi, 0);
+ ret = vdec_vp9_slice_update_single(instance, pfc);
+ if (ret) {
+ mtk_vcodec_err(instance, "VP9 decode error: %d\n", ret);
+ return ret;
+ }
+
+ instance->ctx->decoded_frame_cnt++;
+ return 0;
+}
+
static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg)
{
@@ -1946,6 +2122,20 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
return 0;
}
+static int vdec_vp9_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_vp9_slice_instance *instance = h_vdec;
+ int ret;
+
+ if (instance->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_PURE_SINGLE_CORE)
+ ret = vdec_vp9_slice_single_decode(h_vdec, bs, fb, res_chg);
+ else
+ ret = vdec_vp9_slice_lat_decode(h_vdec, bs, fb, res_chg);
+
+ return ret;
+}
+
static int vdec_vp9_slice_core_decode(struct vdec_lat_buf *lat_buf)
{
struct vdec_vp9_slice_instance *instance;
@@ -2024,7 +2214,7 @@ err:
const struct vdec_common_if vdec_vp9_slice_lat_if = {
.init = vdec_vp9_slice_init,
- .decode = vdec_vp9_slice_lat_decode,
+ .decode = vdec_vp9_slice_decode,
.get_param = vdec_vp9_slice_get_param,
.deinit = vdec_vp9_slice_deinit,
};
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c
index 27b4b35039cf..f3807f03d880 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c
@@ -47,7 +47,7 @@ int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
break;
case V4L2_PIX_FMT_VP9_FRAME:
ctx->dec_if = &vdec_vp9_slice_lat_if;
- ctx->hw_id = MTK_VDEC_LAT0;
+ ctx->hw_id = IS_VDEC_LAT_ARCH(hw_arch) ? MTK_VDEC_LAT0 : MTK_VDEC_CORE;
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c
index 35f4d5583084..df309e8e9379 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c
@@ -91,6 +91,11 @@ static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
(unsigned long)msg->ap_inst_addr;
+ if (!vpu) {
+ mtk_v4l2_err("ap_inst_addr is NULL, did the SCP hang or crash?");
+ return;
+ }
+
mtk_vcodec_debug(vpu, "+ id=%X", msg->msg_id);
vpu->failure = msg->status;
diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c
index 88f81a134ba0..204e474d57f7 100644
--- a/drivers/media/platform/nvidia/tegra-vde/h264.c
+++ b/drivers/media/platform/nvidia/tegra-vde/h264.c
@@ -659,20 +659,19 @@ static struct vb2_buffer *get_ref_buf(struct tegra_ctx *ctx,
{
const struct v4l2_h264_dpb_entry *dpb = ctx->h264.decode_params->dpb;
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
- int buf_idx = -1;
+ struct vb2_buffer *vb = NULL;
if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
- buf_idx = vb2_find_timestamp(cap_q,
- dpb[dpb_idx].reference_ts, 0);
+ vb = vb2_find_buffer(cap_q, dpb[dpb_idx].reference_ts);
/*
* If a DPB entry is unused or invalid, address of current destination
* buffer is returned.
*/
- if (buf_idx < 0)
+ if (!vb)
return &dst->vb2_buf;
- return vb2_get_buffer(cap_q, buf_idx);
+ return vb;
}
static int tegra_vde_validate_vb_size(struct tegra_ctx *ctx,
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
index 29c604b1b179..9418fcf740a8 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
@@ -79,6 +79,11 @@ void mxc_jpeg_enable_irq(void __iomem *reg, int slot)
writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN));
}
+void mxc_jpeg_disable_irq(void __iomem *reg, int slot)
+{
+ writel(0x0, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN));
+}
+
void mxc_jpeg_sw_reset(void __iomem *reg)
{
/*
@@ -100,9 +105,6 @@ void mxc_jpeg_enc_mode_conf(struct device *dev, void __iomem *reg)
/* all markers and segments */
writel(0x3ff, reg + CAST_CFG_MODE);
-
- /* quality factor */
- writel(0x4b, reg + CAST_QUALITY);
}
void mxc_jpeg_enc_mode_go(struct device *dev, void __iomem *reg)
@@ -114,6 +116,14 @@ void mxc_jpeg_enc_mode_go(struct device *dev, void __iomem *reg)
writel(0x140, reg + CAST_MODE);
}
+void mxc_jpeg_enc_set_quality(struct device *dev, void __iomem *reg, u8 quality)
+{
+ dev_dbg(dev, "CAST Encoder Quality %d...\n", quality);
+
+ /* quality factor */
+ writel(quality, reg + CAST_QUALITY);
+}
+
void mxc_jpeg_dec_mode_go(struct device *dev, void __iomem *reg)
{
dev_dbg(dev, "CAST Decoder GO...\n");
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
index d838e875616c..ecf3b6562ba2 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
@@ -53,10 +53,10 @@
#define CAST_REC_REGS_SEL CAST_STATUS4
#define CAST_LUMTH CAST_STATUS5
#define CAST_CHRTH CAST_STATUS6
-#define CAST_NOMFRSIZE_LO CAST_STATUS7
-#define CAST_NOMFRSIZE_HI CAST_STATUS8
-#define CAST_OFBSIZE_LO CAST_STATUS9
-#define CAST_OFBSIZE_HI CAST_STATUS10
+#define CAST_NOMFRSIZE_LO CAST_STATUS16
+#define CAST_NOMFRSIZE_HI CAST_STATUS17
+#define CAST_OFBSIZE_LO CAST_STATUS18
+#define CAST_OFBSIZE_HI CAST_STATUS19
#define MXC_MAX_SLOTS 1 /* TODO use all 4 slots*/
/* JPEG-Decoder Wrapper Slot Registers 0..3 */
@@ -119,12 +119,14 @@ int mxc_jpeg_enable(void __iomem *reg);
void wait_frmdone(struct device *dev, void __iomem *reg);
void mxc_jpeg_enc_mode_conf(struct device *dev, void __iomem *reg);
void mxc_jpeg_enc_mode_go(struct device *dev, void __iomem *reg);
+void mxc_jpeg_enc_set_quality(struct device *dev, void __iomem *reg, u8 quality);
void mxc_jpeg_dec_mode_go(struct device *dev, void __iomem *reg);
int mxc_jpeg_get_slot(void __iomem *reg);
u32 mxc_jpeg_get_offset(void __iomem *reg, int slot);
void mxc_jpeg_enable_slot(void __iomem *reg, int slot);
void mxc_jpeg_set_l_endian(void __iomem *reg, int le);
void mxc_jpeg_enable_irq(void __iomem *reg, int slot);
+void mxc_jpeg_disable_irq(void __iomem *reg, int slot);
int mxc_jpeg_set_input(void __iomem *reg, u32 in_buf, u32 bufsize);
int mxc_jpeg_set_output(void __iomem *reg, u16 out_pitch, u32 out_buf,
u16 w, u16 h);
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index f36b512bae51..32fd04a3d8bb 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -389,7 +389,6 @@ static int enum_fmt(const struct mxc_jpeg_fmt *mxc_formats, int n,
if (i >= n)
return -EINVAL;
- strscpy(f->description, mxc_formats[i].name, sizeof(f->description));
f->pixelformat = mxc_formats[i].fourcc;
return 0;
@@ -520,6 +519,7 @@ static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg,
GFP_ATOMIC);
if (!cfg_stm)
goto err;
+ memset(cfg_stm, 0, MXC_JPEG_MAX_CFG_STREAM);
jpeg->slot_data[slot].cfg_stream_vaddr = cfg_stm;
skip_alloc:
@@ -558,6 +558,18 @@ static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg,
jpeg->slot_data[slot].used = false;
}
+static void mxc_jpeg_check_and_set_last_buffer(struct mxc_jpeg_ctx *ctx,
+ struct vb2_v4l2_buffer *src_buf,
+ struct vb2_v4l2_buffer *dst_buf)
+{
+ if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src_buf)) {
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx);
+ notify_eos(ctx);
+ ctx->header_parsed = false;
+ }
+}
+
static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
{
struct mxc_jpeg_dev *jpeg = priv;
@@ -580,15 +592,8 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
dev_dbg(dev, "Irq %d on slot %d.\n", irq, slot);
ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
- if (!ctx) {
- dev_err(dev,
- "Instance released before the end of transaction.\n");
- /* soft reset only resets internal state, not registers */
- mxc_jpeg_sw_reset(reg);
- /* clear all interrupts */
- writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS));
+ if (WARN_ON(!ctx))
goto job_unlock;
- }
if (slot != ctx->slot) {
/* TODO investigate when adding multi-instance support */
@@ -624,6 +629,7 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
ctx->enc_state == MXC_JPEG_ENC_CONF) {
ctx->enc_state = MXC_JPEG_ENCODING;
dev_dbg(dev, "Encoder config finished. Start encoding...\n");
+ mxc_jpeg_enc_set_quality(dev, reg, ctx->jpeg_quality);
mxc_jpeg_enc_mode_go(dev, reg);
goto job_unlock;
}
@@ -632,6 +638,7 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
dev_dbg(dev, "Decoder DHT cfg finished. Start decoding...\n");
goto job_unlock;
}
+
if (jpeg->mode == MXC_JPEG_ENCODE) {
payload = readl(reg + MXC_SLOT_OFFSET(slot, SLOT_BUF_PTR));
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload);
@@ -659,7 +666,9 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
buf_state = VB2_BUF_STATE_DONE;
buffers_done:
+ mxc_jpeg_disable_irq(reg, ctx->slot);
jpeg->slot_data[slot].used = false; /* unused, but don't free */
+ mxc_jpeg_check_and_set_last_buffer(ctx, src_buf, dst_buf);
v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, buf_state);
@@ -755,7 +764,13 @@ static unsigned int mxc_jpeg_setup_cfg_stream(void *cfg_stream_vaddr,
u32 fourcc,
u16 w, u16 h)
{
- unsigned int offset = 0;
+ /*
+ * There is a hardware issue that first 128 bytes of configuration data
+ * can't be loaded correctly.
+ * To avoid this issue, we need to write the configuration from
+ * an offset which should be no less than 0x80 (128 bytes).
+ */
+ unsigned int offset = 0x80;
u8 *cfg = (u8 *)cfg_stream_vaddr;
struct mxc_jpeg_sof *sof;
struct mxc_jpeg_sos *sos;
@@ -887,8 +902,8 @@ static void mxc_jpeg_config_enc_desc(struct vb2_buffer *out_buf,
jpeg->slot_data[slot].cfg_stream_size =
mxc_jpeg_setup_cfg_stream(cfg_stream_vaddr,
q_data->fmt->fourcc,
- q_data->w_adjusted,
- q_data->h_adjusted);
+ q_data->w,
+ q_data->h);
/* chain the config descriptor with the encoding descriptor */
cfg_desc->next_descpt_ptr = desc_handle | MXC_NXT_DESCPT_EN;
@@ -970,7 +985,7 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
&q_data_cap->h_adjusted,
q_data_cap->h_adjusted, /* adjust up */
MXC_JPEG_MAX_HEIGHT,
- q_data_cap->fmt->v_align,
+ 0,
0);
/* setup bytesperline/sizeimage for capture queue */
@@ -1027,6 +1042,7 @@ static void mxc_jpeg_device_run(void *priv)
jpeg_src_buf->jpeg_parse_error = true;
}
if (jpeg_src_buf->jpeg_parse_error) {
+ mxc_jpeg_check_and_set_last_buffer(ctx, src_buf, dst_buf);
v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
@@ -1077,45 +1093,33 @@ end:
spin_unlock_irqrestore(&ctx->mxc_jpeg->hw_lock, flags);
}
-static void mxc_jpeg_set_last_buffer_dequeued(struct mxc_jpeg_ctx *ctx)
-{
- struct vb2_queue *q;
-
- ctx->stopped = 1;
- q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
- if (!list_empty(&q->done_list))
- return;
-
- q->last_buffer_dequeued = true;
- wake_up(&q->done_wq);
- ctx->stopped = 0;
- ctx->header_parsed = false;
-}
-
static int mxc_jpeg_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
struct v4l2_fh *fh = file->private_data;
struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
- struct device *dev = ctx->mxc_jpeg->dev;
int ret;
ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, cmd);
if (ret < 0)
return ret;
- if (cmd->cmd == V4L2_DEC_CMD_STOP) {
- dev_dbg(dev, "Received V4L2_DEC_CMD_STOP");
- if (v4l2_m2m_num_src_bufs_ready(fh->m2m_ctx) == 0) {
- /* No more src bufs, notify app EOS */
- notify_eos(ctx);
- mxc_jpeg_set_last_buffer_dequeued(ctx);
- } else {
- /* will send EOS later*/
- ctx->stopping = 1;
- }
+ if (!vb2_is_streaming(v4l2_m2m_get_src_vq(fh->m2m_ctx)))
+ return 0;
+
+ ret = v4l2_m2m_ioctl_decoder_cmd(file, priv, cmd);
+ if (ret < 0)
+ return ret;
+
+ if (cmd->cmd == V4L2_DEC_CMD_STOP &&
+ v4l2_m2m_has_stopped(fh->m2m_ctx)) {
+ notify_eos(ctx);
+ ctx->header_parsed = false;
}
+ if (cmd->cmd == V4L2_DEC_CMD_START &&
+ v4l2_m2m_has_stopped(fh->m2m_ctx))
+ vb2_clear_last_buffer_dequeued(&fh->m2m_ctx->cap_q_ctx.q);
return 0;
}
@@ -1124,24 +1128,27 @@ static int mxc_jpeg_encoder_cmd(struct file *file, void *priv,
{
struct v4l2_fh *fh = file->private_data;
struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
- struct device *dev = ctx->mxc_jpeg->dev;
int ret;
ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd);
if (ret < 0)
return ret;
- if (cmd->cmd == V4L2_ENC_CMD_STOP) {
- dev_dbg(dev, "Received V4L2_ENC_CMD_STOP");
- if (v4l2_m2m_num_src_bufs_ready(fh->m2m_ctx) == 0) {
- /* No more src bufs, notify app EOS */
- notify_eos(ctx);
- mxc_jpeg_set_last_buffer_dequeued(ctx);
- } else {
- /* will send EOS later*/
- ctx->stopping = 1;
- }
- }
+ if (!vb2_is_streaming(v4l2_m2m_get_src_vq(fh->m2m_ctx)) ||
+ !vb2_is_streaming(v4l2_m2m_get_dst_vq(fh->m2m_ctx)))
+ return 0;
+
+ ret = v4l2_m2m_ioctl_encoder_cmd(file, fh, cmd);
+ if (ret < 0)
+ return 0;
+
+ if (cmd->cmd == V4L2_ENC_CMD_STOP &&
+ v4l2_m2m_has_stopped(fh->m2m_ctx))
+ notify_eos(ctx);
+
+ if (cmd->cmd == V4L2_ENC_CMD_START &&
+ v4l2_m2m_has_stopped(fh->m2m_ctx))
+ vb2_clear_last_buffer_dequeued(&fh->m2m_ctx->cap_q_ctx.q);
return 0;
}
@@ -1154,18 +1161,30 @@ static int mxc_jpeg_queue_setup(struct vb2_queue *q,
{
struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(q);
struct mxc_jpeg_q_data *q_data = NULL;
+ struct mxc_jpeg_q_data tmp_q;
int i;
q_data = mxc_jpeg_get_q_data(ctx, q->type);
if (!q_data)
return -EINVAL;
+ tmp_q.fmt = q_data->fmt;
+ tmp_q.w = q_data->w_adjusted;
+ tmp_q.h = q_data->h_adjusted;
+ for (i = 0; i < MXC_JPEG_MAX_PLANES; i++) {
+ tmp_q.bytesperline[i] = q_data->bytesperline[i];
+ tmp_q.sizeimage[i] = q_data->sizeimage[i];
+ }
+ mxc_jpeg_sizeimage(&tmp_q);
+ for (i = 0; i < MXC_JPEG_MAX_PLANES; i++)
+ tmp_q.sizeimage[i] = max(tmp_q.sizeimage[i], q_data->sizeimage[i]);
+
/* Handle CREATE_BUFS situation - *nplanes != 0 */
if (*nplanes) {
if (*nplanes != q_data->fmt->colplanes)
return -EINVAL;
for (i = 0; i < *nplanes; i++) {
- if (sizes[i] < q_data->sizeimage[i])
+ if (sizes[i] < tmp_q.sizeimage[i])
return -EINVAL;
}
return 0;
@@ -1174,7 +1193,7 @@ static int mxc_jpeg_queue_setup(struct vb2_queue *q,
/* Handle REQBUFS situation */
*nplanes = q_data->fmt->colplanes;
for (i = 0; i < *nplanes; i++)
- sizes[i] = q_data->sizeimage[i];
+ sizes[i] = tmp_q.sizeimage[i];
return 0;
}
@@ -1185,6 +1204,8 @@ static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
struct mxc_jpeg_q_data *q_data = mxc_jpeg_get_q_data(ctx, q->type);
int ret;
+ v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q);
+
if (ctx->mxc_jpeg->mode == MXC_JPEG_DECODE && V4L2_TYPE_IS_CAPTURE(q->type))
ctx->source_change = 0;
dev_dbg(ctx->mxc_jpeg->dev, "Start streaming ctx=%p", ctx);
@@ -1216,11 +1237,15 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
break;
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
}
- pm_runtime_put_sync(&ctx->mxc_jpeg->pdev->dev);
- if (V4L2_TYPE_IS_OUTPUT(q->type)) {
- ctx->stopping = 0;
- ctx->stopped = 0;
+
+ v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
+ if (V4L2_TYPE_IS_OUTPUT(q->type) &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) {
+ notify_eos(ctx);
+ ctx->header_parsed = false;
}
+
+ pm_runtime_put_sync(&ctx->mxc_jpeg->pdev->dev);
}
static int mxc_jpeg_valid_comp_id(struct device *dev,
@@ -1374,11 +1399,6 @@ static int mxc_jpeg_parse(struct mxc_jpeg_ctx *ctx, struct vb2_buffer *vb)
}
q_data_out->w = header.frame.width;
q_data_out->h = header.frame.height;
- if (header.frame.width % 8 != 0 || header.frame.height % 8 != 0) {
- dev_err(dev, "JPEG width or height not multiple of 8: %dx%d\n",
- header.frame.width, header.frame.height);
- return -EINVAL;
- }
if (header.frame.width > MXC_JPEG_MAX_WIDTH ||
header.frame.height > MXC_JPEG_MAX_HEIGHT) {
dev_err(dev, "JPEG width or height should be <= 8192: %dx%d\n",
@@ -1424,6 +1444,20 @@ static void mxc_jpeg_buf_queue(struct vb2_buffer *vb)
struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct mxc_jpeg_src_buf *jpeg_src_buf;
+ if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
+ vb2_is_streaming(vb->vb2_queue) &&
+ v4l2_m2m_dst_buf_is_last(ctx->fh.m2m_ctx)) {
+ struct mxc_jpeg_q_data *q_data;
+
+ q_data = mxc_jpeg_get_q_data(ctx, vb->vb2_queue->type);
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence = q_data->sequence++;
+ v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, vbuf);
+ notify_eos(ctx);
+ ctx->header_parsed = false;
+ return;
+ }
+
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
goto end;
@@ -1472,24 +1506,11 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
}
- return 0;
-}
-
-static void mxc_jpeg_buf_finish(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_queue *q = vb->vb2_queue;
-
- if (V4L2_TYPE_IS_OUTPUT(vb->type))
- return;
- if (!ctx->stopped)
- return;
- if (list_empty(&q->done_list)) {
- vbuf->flags |= V4L2_BUF_FLAG_LAST;
- ctx->stopped = 0;
- ctx->header_parsed = false;
+ if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) {
+ vb2_set_plane_payload(vb, 0, 0);
+ vb2_set_plane_payload(vb, 1, 0);
}
+ return 0;
}
static const struct vb2_ops mxc_jpeg_qops = {
@@ -1498,7 +1519,6 @@ static const struct vb2_ops mxc_jpeg_qops = {
.wait_finish = vb2_ops_wait_finish,
.buf_out_validate = mxc_jpeg_buf_out_validate,
.buf_prepare = mxc_jpeg_buf_prepare,
- .buf_finish = mxc_jpeg_buf_finish,
.start_streaming = mxc_jpeg_start_streaming,
.stop_streaming = mxc_jpeg_stop_streaming,
.buf_queue = mxc_jpeg_buf_queue,
@@ -1563,6 +1583,56 @@ static void mxc_jpeg_set_default_params(struct mxc_jpeg_ctx *ctx)
}
}
+static int mxc_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mxc_jpeg_ctx *ctx =
+ container_of(ctrl->handler, struct mxc_jpeg_ctx, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ctx->jpeg_quality = ctrl->val;
+ break;
+ default:
+ dev_err(ctx->mxc_jpeg->dev, "Invalid control, id = %d, val = %d\n",
+ ctrl->id, ctrl->val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mxc_jpeg_ctrl_ops = {
+ .s_ctrl = mxc_jpeg_s_ctrl,
+};
+
+static void mxc_jpeg_encode_ctrls(struct mxc_jpeg_ctx *ctx)
+{
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &mxc_jpeg_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 1, 100, 1, 75);
+}
+
+static int mxc_jpeg_ctrls_setup(struct mxc_jpeg_ctx *ctx)
+{
+ int err;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 2);
+
+ if (ctx->mxc_jpeg->mode == MXC_JPEG_ENCODE)
+ mxc_jpeg_encode_ctrls(ctx);
+
+ if (ctx->ctrl_handler.error) {
+ err = ctx->ctrl_handler.error;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ err = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (err)
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+}
+
static int mxc_jpeg_open(struct file *file)
{
struct mxc_jpeg_dev *mxc_jpeg = video_drvdata(file);
@@ -1594,6 +1664,12 @@ static int mxc_jpeg_open(struct file *file)
goto error;
}
+ ret = mxc_jpeg_ctrls_setup(ctx);
+ if (ret) {
+ dev_err(ctx->mxc_jpeg->dev, "failed to setup mxc jpeg controls\n");
+ goto err_ctrls_setup;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
mxc_jpeg_set_default_params(ctx);
ctx->slot = MXC_MAX_SLOTS; /* slot not allocated yet */
@@ -1605,6 +1681,8 @@ static int mxc_jpeg_open(struct file *file)
return 0;
+err_ctrls_setup:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -1646,7 +1724,6 @@ static int mxc_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index)
return -EINVAL;
f->pixelformat = q_data->fmt->fourcc;
- strscpy(f->description, q_data->fmt->name, sizeof(f->description));
return 0;
}
}
@@ -1684,22 +1761,17 @@ static int mxc_jpeg_try_fmt(struct v4l2_format *f, const struct mxc_jpeg_fmt *fm
pix_mp->num_planes = fmt->colplanes;
pix_mp->pixelformat = fmt->fourcc;
- /*
- * use MXC_JPEG_H_ALIGN instead of fmt->v_align, for vertical
- * alignment, to loosen up the alignment to multiple of 8,
- * otherwise NV12-1080p fails as 1080 is not a multiple of 16
- */
+ pix_mp->width = w;
+ pix_mp->height = h;
v4l_bound_align_image(&w,
- MXC_JPEG_MIN_WIDTH,
- w, /* adjust downwards*/
+ w, /* adjust upwards*/
+ MXC_JPEG_MAX_WIDTH,
fmt->h_align,
&h,
- MXC_JPEG_MIN_HEIGHT,
- h, /* adjust downwards*/
- MXC_JPEG_H_ALIGN,
+ h, /* adjust upwards*/
+ MXC_JPEG_MAX_HEIGHT,
+ 0,
0);
- pix_mp->width = w; /* negotiate the width */
- pix_mp->height = h; /* negotiate the height */
/* get user input into the tmp_q */
tmp_q.w = w;
@@ -1825,35 +1897,19 @@ static int mxc_jpeg_s_fmt(struct mxc_jpeg_ctx *ctx,
q_data->w_adjusted = q_data->w;
q_data->h_adjusted = q_data->h;
- if (jpeg->mode == MXC_JPEG_DECODE) {
- /*
- * align up the resolution for CAST IP,
- * but leave the buffer resolution unchanged
- */
- v4l_bound_align_image(&q_data->w_adjusted,
- q_data->w_adjusted, /* adjust upwards */
- MXC_JPEG_MAX_WIDTH,
- q_data->fmt->h_align,
- &q_data->h_adjusted,
- q_data->h_adjusted, /* adjust upwards */
- MXC_JPEG_MAX_HEIGHT,
- q_data->fmt->v_align,
- 0);
- } else {
- /*
- * align down the resolution for CAST IP,
- * but leave the buffer resolution unchanged
- */
- v4l_bound_align_image(&q_data->w_adjusted,
- MXC_JPEG_MIN_WIDTH,
- q_data->w_adjusted, /* adjust downwards*/
- q_data->fmt->h_align,
- &q_data->h_adjusted,
- MXC_JPEG_MIN_HEIGHT,
- q_data->h_adjusted, /* adjust downwards*/
- q_data->fmt->v_align,
- 0);
- }
+ /*
+ * align up the resolution for CAST IP,
+ * but leave the buffer resolution unchanged
+ */
+ v4l_bound_align_image(&q_data->w_adjusted,
+ q_data->w_adjusted, /* adjust upwards */
+ MXC_JPEG_MAX_WIDTH,
+ q_data->fmt->h_align,
+ &q_data->h_adjusted,
+ q_data->h_adjusted, /* adjust upwards */
+ MXC_JPEG_MAX_HEIGHT,
+ q_data->fmt->v_align,
+ 0);
for (i = 0; i < pix_mp->num_planes; i++) {
q_data->bytesperline[i] = pix_mp->plane_fmt[i].bytesperline;
@@ -1958,32 +2014,13 @@ static int mxc_jpeg_subscribe_event(struct v4l2_fh *fh,
return v4l2_event_subscribe(fh, sub, 0, NULL);
case V4L2_EVENT_SOURCE_CHANGE:
return v4l2_src_change_event_subscribe(fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
default:
return -EINVAL;
}
}
-static int mxc_jpeg_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct v4l2_fh *fh = file->private_data;
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
- struct device *dev = ctx->mxc_jpeg->dev;
- int num_src_ready = v4l2_m2m_num_src_bufs_ready(fh->m2m_ctx);
- int ret;
-
- dev_dbg(dev, "DQBUF type=%d, index=%d", buf->type, buf->index);
- if (ctx->stopping == 1 && num_src_ready == 0) {
- /* No more src bufs, notify app EOS */
- notify_eos(ctx);
- ctx->stopping = 0;
- mxc_jpeg_set_last_buffer_dequeued(ctx);
- }
-
- ret = v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
- return ret;
-}
-
static const struct v4l2_ioctl_ops mxc_jpeg_ioctl_ops = {
.vidioc_querycap = mxc_jpeg_querycap,
.vidioc_enum_fmt_vid_cap = mxc_jpeg_enum_fmt_vid_cap,
@@ -2007,7 +2044,7 @@ static const struct v4l2_ioctl_ops mxc_jpeg_ioctl_ops = {
.vidioc_encoder_cmd = mxc_jpeg_encoder_cmd,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
- .vidioc_dqbuf = mxc_jpeg_dqbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
@@ -2031,6 +2068,7 @@ static int mxc_jpeg_release(struct file *file)
else
dev_dbg(dev, "Release JPEG encoder instance on slot %d.",
ctx->slot);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -2167,12 +2205,14 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
jpeg->clk_ipg = devm_clk_get(dev, "ipg");
if (IS_ERR(jpeg->clk_ipg)) {
dev_err(dev, "failed to get clock: ipg\n");
+ ret = PTR_ERR(jpeg->clk_ipg);
goto err_clk;
}
jpeg->clk_per = devm_clk_get(dev, "per");
if (IS_ERR(jpeg->clk_per)) {
dev_err(dev, "failed to get clock: per\n");
+ ret = PTR_ERR(jpeg->clk_per);
goto err_clk;
}
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
index 760eaf5387a1..c508d41a906f 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
@@ -92,11 +92,11 @@ struct mxc_jpeg_ctx {
struct mxc_jpeg_q_data cap_q;
struct v4l2_fh fh;
enum mxc_jpeg_enc_state enc_state;
- unsigned int stopping;
- unsigned int stopped;
unsigned int slot;
unsigned int source_change;
bool header_parsed;
+ struct v4l2_ctrl_handler ctrl_handler;
+ u8 jpeg_quality;
};
struct mxc_jpeg_slot_data {
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 80b1c021d14a..905072871ed2 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -46,6 +46,11 @@
/* Register map definition */
+/* CSIS version */
+#define MIPI_CSIS_VERSION 0x00
+#define MIPI_CSIS_VERSION_IMX7D 0x03030505
+#define MIPI_CSIS_VERSION_IMX8MP 0x03060301
+
/* CSIS common control */
#define MIPI_CSIS_CMN_CTRL 0x04
#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW BIT(16)
@@ -1155,6 +1160,32 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
return 0;
}
+static int mipi_csis_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct mipi_csis_device *csis = sd_to_mipi_csis_device(sd);
+ struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[0];
+
+ if (pad != CSIS_PAD_SOURCE)
+ return -EINVAL;
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL;
+ fd->num_entries = 1;
+
+ memset(entry, 0, sizeof(*entry));
+
+ mutex_lock(&csis->lock);
+
+ entry->flags = 0;
+ entry->pixelcode = csis->csis_fmt->code;
+ entry->bus.csi2.vc = 0;
+ entry->bus.csi2.dt = csis->csis_fmt->data_type;
+
+ mutex_unlock(&csis->lock);
+
+ return 0;
+}
+
static int mipi_csis_log_status(struct v4l2_subdev *sd)
{
struct mipi_csis_device *csis = sd_to_mipi_csis_device(sd);
@@ -1179,6 +1210,7 @@ static const struct v4l2_subdev_pad_ops mipi_csis_pad_ops = {
.enum_mbus_code = mipi_csis_enum_mbus_code,
.get_fmt = mipi_csis_get_fmt,
.set_fmt = mipi_csis_set_fmt,
+ .get_frame_desc = mipi_csis_get_frame_desc,
};
static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
@@ -1378,6 +1410,13 @@ static int mipi_csis_subdev_init(struct mipi_csis_device *csis)
sd->dev = csis->dev;
+ sd->fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(csis->dev),
+ 1, 0, 0);
+ if (!sd->fwnode) {
+ dev_err(csis->dev, "Unable to retrieve endpoint for port@1\n");
+ return -ENOENT;
+ }
+
csis->csis_fmt = &mipi_csis_formats[0];
mipi_csis_init_cfg(sd, NULL);
@@ -1498,6 +1537,7 @@ cleanup:
v4l2_async_unregister_subdev(&csis->sd);
disable_clock:
mipi_csis_clk_disable(csis);
+ fwnode_handle_put(csis->sd.fwnode);
mutex_destroy(&csis->lock);
return ret;
@@ -1517,6 +1557,7 @@ static int mipi_csis_remove(struct platform_device *pdev)
mipi_csis_runtime_suspend(&pdev->dev);
mipi_csis_clk_disable(csis);
media_entity_cleanup(&csis->sd.entity);
+ fwnode_handle_put(csis->sd.fwnode);
mutex_destroy(&csis->lock);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index f993f349b66b..88f188e0f750 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -245,7 +245,7 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
}
if (!csid->testgen.enabled &&
- !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
+ !media_pad_remote_pad_first(&csid->pads[MSM_CSID_PAD_SINK]))
return -ENOLINK;
}
@@ -518,7 +518,7 @@ static int csid_set_test_pattern(struct csid_device *csid, s32 value)
struct csid_testgen_config *tg = &csid->testgen;
/* If CSID is linked to CSIPHY, do not allow to enable test generator */
- if (value && media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
+ if (value && media_pad_remote_pad_first(&csid->pads[MSM_CSID_PAD_SINK]))
return -EBUSY;
tg->enabled = !!value;
@@ -666,7 +666,7 @@ int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
if (csid->num_supplies) {
csid->supplies = devm_kmalloc_array(camss->dev,
csid->num_supplies,
- sizeof(csid->supplies),
+ sizeof(*csid->supplies),
GFP_KERNEL);
if (!csid->supplies)
return -ENOMEM;
@@ -729,7 +729,7 @@ static int csid_link_setup(struct media_entity *entity,
const struct media_pad *remote, u32 flags)
{
if (flags & MEDIA_LNK_FL_ENABLED)
- if (media_entity_remote_pad(local))
+ if (media_pad_remote_pad_first(local))
return -EBUSY;
if ((local->flags & MEDIA_PAD_FL_SINK) &&
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 75fcfc627400..3f726a7237f5 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -693,7 +693,7 @@ static int csiphy_link_setup(struct media_entity *entity,
struct csiphy_device *csiphy;
struct csid_device *csid;
- if (media_entity_remote_pad(local))
+ if (media_pad_remote_pad_first(local))
return -EBUSY;
sd = media_entity_to_v4l2_subdev(entity);
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index 4ee11bb979cd..b713f5b86aba 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -812,7 +812,7 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
int ret;
if (enable) {
- if (!media_entity_remote_pad(&line->pads[MSM_ISPIF_PAD_SINK]))
+ if (!media_pad_remote_pad_first(&line->pads[MSM_ISPIF_PAD_SINK]))
return -ENOLINK;
/* Config */
@@ -1253,6 +1253,41 @@ static enum ispif_intf ispif_get_intf(enum vfe_line_id line_id)
}
/*
+ * ispif_get_vfe_id - Get VFE HW module id
+ * @entity: Pointer to VFE media entity structure
+ * @id: Return CSID HW module id here
+ */
+static void ispif_get_vfe_id(struct media_entity *entity, u8 *id)
+{
+ struct v4l2_subdev *sd;
+ struct vfe_line *line;
+ struct vfe_device *vfe;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ line = v4l2_get_subdevdata(sd);
+ vfe = to_vfe(line);
+
+ *id = vfe->id;
+}
+
+/*
+ * ispif_get_vfe_line_id - Get VFE line id by media entity
+ * @entity: Pointer to VFE media entity structure
+ * @id: Return VFE line id here
+ */
+static void ispif_get_vfe_line_id(struct media_entity *entity,
+ enum vfe_line_id *id)
+{
+ struct v4l2_subdev *sd;
+ struct vfe_line *line;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ line = v4l2_get_subdevdata(sd);
+
+ *id = line->id;
+}
+
+/*
* ispif_link_setup - Setup ISPIF connections
* @entity: Pointer to media entity structure
* @local: Pointer to local pad
@@ -1266,7 +1301,7 @@ static int ispif_link_setup(struct media_entity *entity,
const struct media_pad *remote, u32 flags)
{
if (flags & MEDIA_LNK_FL_ENABLED) {
- if (media_entity_remote_pad(local))
+ if (media_pad_remote_pad_first(local))
return -EBUSY;
if (local->flags & MEDIA_PAD_FL_SINK) {
@@ -1285,8 +1320,8 @@ static int ispif_link_setup(struct media_entity *entity,
sd = media_entity_to_v4l2_subdev(entity);
line = v4l2_get_subdevdata(sd);
- msm_vfe_get_vfe_id(remote->entity, &line->vfe_id);
- msm_vfe_get_vfe_line_id(remote->entity, &id);
+ ispif_get_vfe_id(remote->entity, &line->vfe_id);
+ ispif_get_vfe_line_id(remote->entity, &id);
line->interface = ispif_get_intf(id);
}
}
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 5b148e9f8134..a26e4a5d87b6 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -1423,40 +1423,6 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
}
/*
- * msm_vfe_get_vfe_id - Get VFE HW module id
- * @entity: Pointer to VFE media entity structure
- * @id: Return CSID HW module id here
- */
-void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id)
-{
- struct v4l2_subdev *sd;
- struct vfe_line *line;
- struct vfe_device *vfe;
-
- sd = media_entity_to_v4l2_subdev(entity);
- line = v4l2_get_subdevdata(sd);
- vfe = to_vfe(line);
-
- *id = vfe->id;
-}
-
-/*
- * msm_vfe_get_vfe_line_id - Get VFE line id by media entity
- * @entity: Pointer to VFE media entity structure
- * @id: Return VFE line id here
- */
-void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id)
-{
- struct v4l2_subdev *sd;
- struct vfe_line *line;
-
- sd = media_entity_to_v4l2_subdev(entity);
- line = v4l2_get_subdevdata(sd);
-
- *id = line->id;
-}
-
-/*
* vfe_link_setup - Setup VFE connections
* @entity: Pointer to media entity structure
* @local: Pointer to local pad
@@ -1470,7 +1436,7 @@ static int vfe_link_setup(struct media_entity *entity,
const struct media_pad *remote, u32 flags)
{
if (flags & MEDIA_LNK_FL_ENABLED)
- if (media_entity_remote_pad(local))
+ if (media_pad_remote_pad_first(local))
return -EBUSY;
return 0;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 0eba04eb9b77..cbc314c4e244 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -163,9 +163,6 @@ int msm_vfe_register_entities(struct vfe_device *vfe,
void msm_vfe_unregister_entities(struct vfe_device *vfe);
-void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id);
-void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id);
-
/*
* vfe_buf_add_pending - Add output buffer to list of pending
* @output: VFE output
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index 307bb1dc4589..290df04c4d02 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -328,7 +328,7 @@ static struct v4l2_subdev *video_remote_subdev(struct camss_video *video,
{
struct media_pad *remote;
- remote = media_entity_remote_pad(&video->pad);
+ remote = media_pad_remote_pad_first(&video->pad);
if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
return NULL;
@@ -507,7 +507,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
@@ -543,7 +543,7 @@ static void video_stop_streaming(struct vb2_queue *q)
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 79ad82e233cb..1118c40886d5 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -937,7 +937,7 @@ struct media_entity *camss_find_sensor(struct media_entity *entity)
if (!(pad->flags & MEDIA_PAD_FL_SINK))
return NULL;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
return NULL;
@@ -1452,19 +1452,31 @@ static const struct media_device_ops camss_media_ops = {
static int camss_configure_pd(struct camss *camss)
{
- int nbr_pm_domains = 0;
+ struct device *dev = camss->dev;
int last_pm_domain = 0;
int i;
int ret;
- if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660)
- nbr_pm_domains = PM_DOMAIN_GEN1_COUNT;
- else if (camss->version == CAMSS_845 ||
- camss->version == CAMSS_8250)
- nbr_pm_domains = PM_DOMAIN_GEN2_COUNT;
+ camss->genpd_num = of_count_phandle_with_args(dev->of_node,
+ "power-domains",
+ "#power-domain-cells");
+ if (camss->genpd_num < 0) {
+ dev_err(dev, "Power domains are not defined for camss\n");
+ return camss->genpd_num;
+ }
+
+ camss->genpd = devm_kmalloc_array(dev, camss->genpd_num,
+ sizeof(*camss->genpd), GFP_KERNEL);
+ if (!camss->genpd)
+ return -ENOMEM;
+
+ camss->genpd_link = devm_kmalloc_array(dev, camss->genpd_num,
+ sizeof(*camss->genpd_link),
+ GFP_KERNEL);
+ if (!camss->genpd_link)
+ return -ENOMEM;
- for (i = 0; i < nbr_pm_domains; i++) {
+ for (i = 0; i < camss->genpd_num; i++) {
camss->genpd[i] = dev_pm_domain_attach_by_id(camss->dev, i);
if (IS_ERR(camss->genpd[i])) {
ret = PTR_ERR(camss->genpd[i]);
@@ -1529,7 +1541,7 @@ static int camss_probe(struct platform_device *pdev)
struct camss *camss;
int num_subdevs, ret;
- camss = kzalloc(sizeof(*camss), GFP_KERNEL);
+ camss = devm_kzalloc(dev, sizeof(*camss), GFP_KERNEL);
if (!camss)
return -ENOMEM;
@@ -1567,39 +1579,30 @@ static int camss_probe(struct platform_device *pdev)
camss->csid_num = 4;
camss->vfe_num = 4;
} else {
- ret = -EINVAL;
- goto err_free;
+ return -EINVAL;
}
camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
sizeof(*camss->csiphy), GFP_KERNEL);
- if (!camss->csiphy) {
- ret = -ENOMEM;
- goto err_free;
- }
+ if (!camss->csiphy)
+ return -ENOMEM;
camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
GFP_KERNEL);
- if (!camss->csid) {
- ret = -ENOMEM;
- goto err_free;
- }
+ if (!camss->csid)
+ return -ENOMEM;
if (camss->version == CAMSS_8x16 ||
camss->version == CAMSS_8x96) {
camss->ispif = devm_kcalloc(dev, 1, sizeof(*camss->ispif), GFP_KERNEL);
- if (!camss->ispif) {
- ret = -ENOMEM;
- goto err_free;
- }
+ if (!camss->ispif)
+ return -ENOMEM;
}
camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
GFP_KERNEL);
- if (!camss->vfe) {
- ret = -ENOMEM;
- goto err_free;
- }
+ if (!camss->vfe)
+ return -ENOMEM;
v4l2_async_nf_init(&camss->notifier);
@@ -1681,15 +1684,12 @@ err_register_entities:
v4l2_device_unregister(&camss->v4l2_dev);
err_cleanup:
v4l2_async_nf_cleanup(&camss->notifier);
-err_free:
- kfree(camss);
return ret;
}
void camss_delete(struct camss *camss)
{
- int nbr_pm_domains = 0;
int i;
v4l2_device_unregister(&camss->v4l2_dev);
@@ -1698,19 +1698,10 @@ void camss_delete(struct camss *camss)
pm_runtime_disable(camss->dev);
- if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660)
- nbr_pm_domains = PM_DOMAIN_GEN1_COUNT;
- else if (camss->version == CAMSS_845 ||
- camss->version == CAMSS_8250)
- nbr_pm_domains = PM_DOMAIN_GEN2_COUNT;
-
- for (i = 0; i < nbr_pm_domains; i++) {
+ for (i = 0; i < camss->genpd_num; i++) {
device_link_del(camss->genpd_link[i]);
dev_pm_domain_detach(camss->genpd[i], true);
}
-
- kfree(camss);
}
/*
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index c9b3e0df5be8..0db80cadbbaa 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -69,9 +69,7 @@ struct resources_icc {
enum pm_domain {
PM_DOMAIN_VFE0 = 0,
PM_DOMAIN_VFE1 = 1,
- PM_DOMAIN_GEN1_COUNT = 2, /* CAMSS series of ISPs */
PM_DOMAIN_VFELITE = 2, /* VFELITE / TOP GDSC */
- PM_DOMAIN_GEN2_COUNT = 3, /* Titan series of ISPs */
};
enum camss_version {
@@ -101,8 +99,9 @@ struct camss {
int vfe_num;
struct vfe_device *vfe;
atomic_t ref_count;
- struct device *genpd[PM_DOMAIN_GEN2_COUNT];
- struct device_link *genpd_link[PM_DOMAIN_GEN2_COUNT];
+ int genpd_num;
+ struct device **genpd;
+ struct device_link **genpd_link;
struct icc_path *icc_path[ICC_SM8250_COUNT];
struct icc_bw_tbl icc_bw_tbl[ICC_SM8250_COUNT];
};
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 877eca125803..990a1519f968 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -265,6 +265,19 @@ static void venus_assign_register_offsets(struct venus_core *core)
}
}
+static irqreturn_t venus_isr_thread(int irq, void *dev_id)
+{
+ struct venus_core *core = dev_id;
+ irqreturn_t ret;
+
+ ret = hfi_isr_thread(irq, dev_id);
+
+ if (ret == IRQ_HANDLED && venus_fault_inject_ssr())
+ hfi_core_trigger_ssr(core, HFI_TEST_SSR_SW_ERR_FATAL);
+
+ return ret;
+}
+
static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -320,7 +333,7 @@ static int venus_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
init_waitqueue_head(&core->sys_err_done);
- ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, hfi_isr_thread,
+ ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"venus", core);
if (ret)
@@ -832,6 +845,10 @@ static const struct reg_val sm7280_reg_preset[] = {
{ 0xb0088, 0 },
};
+static const struct hfi_ubwc_config sc7280_ubwc_config = {
+ 0, 0, {1, 1, 1, 0, 0, 0}, 8, 32, 14, 0, 0, {0, 0}
+};
+
static const struct venus_resources sc7280_res = {
.freq_tbl = sc7280_freq_table,
.freq_tbl_size = ARRAY_SIZE(sc7280_freq_table),
@@ -841,6 +858,7 @@ static const struct venus_resources sc7280_res = {
.bw_tbl_enc_size = ARRAY_SIZE(sc7280_bw_table_enc),
.bw_tbl_dec = sc7280_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sc7280_bw_table_dec),
+ .ubwc_conf = &sc7280_ubwc_config,
.clks = {"core", "bus", "iface"},
.clks_num = 3,
.vcodec0_clks = {"vcodec_core", "vcodec_bus"},
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index d33825553edc..32551c2602a9 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -16,6 +16,7 @@
#include "dbgfs.h"
#include "hfi.h"
#include "hfi_platform.h"
+#include "hfi_helper.h"
#define VDBGL "VenusLow : "
#define VDBGM "VenusMed : "
@@ -57,6 +58,7 @@ struct venus_resources {
unsigned int bw_tbl_dec_size;
const struct reg_val *reg_tbl;
unsigned int reg_tbl_size;
+ const struct hfi_ubwc_config *ubwc_conf;
const char * const clks[VIDC_CLKS_NUM_MAX];
unsigned int clks_num;
const char * const vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX];
diff --git a/drivers/media/platform/qcom/venus/dbgfs.c b/drivers/media/platform/qcom/venus/dbgfs.c
index 52de47f2ca88..726f4b730e69 100644
--- a/drivers/media/platform/qcom/venus/dbgfs.c
+++ b/drivers/media/platform/qcom/venus/dbgfs.c
@@ -4,13 +4,22 @@
*/
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include "core.h"
+#ifdef CONFIG_FAULT_INJECTION
+DECLARE_FAULT_ATTR(venus_ssr_attr);
+#endif
+
void venus_dbgfs_init(struct venus_core *core)
{
core->root = debugfs_create_dir("venus", NULL);
debugfs_create_x32("fw_level", 0644, core->root, &venus_fw_debug);
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_ssr", core->root, &venus_ssr_attr);
+#endif
}
void venus_dbgfs_deinit(struct venus_core *core)
diff --git a/drivers/media/platform/qcom/venus/dbgfs.h b/drivers/media/platform/qcom/venus/dbgfs.h
index b7b621a8472f..c87c1355d039 100644
--- a/drivers/media/platform/qcom/venus/dbgfs.h
+++ b/drivers/media/platform/qcom/venus/dbgfs.h
@@ -4,8 +4,21 @@
#ifndef __VENUS_DBGFS_H__
#define __VENUS_DBGFS_H__
+#include <linux/fault-inject.h>
+
struct venus_core;
+#ifdef CONFIG_FAULT_INJECTION
+extern struct fault_attr venus_ssr_attr;
+static inline bool venus_fault_inject_ssr(void)
+{
+ return should_fail(&venus_ssr_attr, 1);
+}
+#else
+static inline bool venus_fault_inject_ssr(void) { return false; }
+#endif
+
+
void venus_dbgfs_init(struct venus_core *core);
void venus_dbgfs_deinit(struct venus_core *core);
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 5c1104379c49..60de4200375d 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -671,8 +671,7 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
unsigned int i;
int ret;
- if (req)
- memset(req, 0, sizeof(*req));
+ memset(req, 0, sizeof(*req));
if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
req->count_min = inst->fw_min_cnt;
@@ -694,8 +693,7 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
if (hprop.bufreq[i].type != type)
continue;
- if (req)
- memcpy(req, &hprop.bufreq[i], sizeof(*req));
+ memcpy(req, &hprop.bufreq[i], sizeof(*req));
ret = 0;
break;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 4ecd444050bb..930b743f225e 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -58,6 +58,15 @@ void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode)
pkt->data[1] = mode;
}
+void pkt_sys_ubwc_config(struct hfi_sys_set_property_pkt *pkt, const struct hfi_ubwc_config *hfi)
+{
+ pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_UBWC_CONFIG;
+ memcpy(&pkt->data[1], hfi, sizeof(*hfi));
+}
+
int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
u32 addr, void *cookie)
{
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h
index 327ed90a2788..99bc0b6db67c 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.h
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.h
@@ -256,6 +256,7 @@ void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type);
void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt);
void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable);
void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable);
+void pkt_sys_ubwc_config(struct hfi_sys_set_property_pkt *pkt, const struct hfi_ubwc_config *hfi);
int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
u32 addr, void *cookie);
int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id,
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index 2daa88e3df9f..d2d6719a2ba4 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -427,6 +427,7 @@
#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL 0x5
#define HFI_PROPERTY_SYS_IMAGE_VERSION 0x6
#define HFI_PROPERTY_SYS_CONFIG_COVERAGE 0x7
+#define HFI_PROPERTY_SYS_UBWC_CONFIG 0x8
/*
* HFI_PROPERTY_PARAM_COMMON_START
@@ -626,6 +627,25 @@ struct hfi_debug_config {
u32 mode;
};
+struct hfi_ubwc_config {
+ u32 size;
+ u32 packet_type;
+ struct {
+ u32 max_channel_override : 1;
+ u32 mal_length_override : 1;
+ u32 hb_override : 1;
+ u32 bank_swzl_level_override : 1;
+ u32 bank_spreading_override : 1;
+ u32 reserved : 27;
+ } override_bit_info;
+ u32 max_channels;
+ u32 mal_length;
+ u32 highest_bank_bit;
+ u32 bank_swzl_level;
+ u32 bank_spreading;
+ u32 reserved[2];
+};
+
struct hfi_enable {
u32 enable;
};
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 5b8389b98299..6cf74b2bc5ae 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -234,6 +234,7 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
const struct hfi_plat_caps *caps = NULL;
u32 enc_codecs, dec_codecs, count = 0;
unsigned int entries;
+ int ret;
plat = hfi_platform_get(core->res->hfi_version);
if (!plat)
@@ -242,8 +243,9 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
if (inst)
return 0;
- if (plat->codecs)
- plat->codecs(&enc_codecs, &dec_codecs, &count);
+ ret = hfi_platform_get_codecs(core, &enc_codecs, &dec_codecs, &count);
+ if (ret)
+ return ret;
if (plat->capabilities)
caps = plat->capabilities(&entries);
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c
index f16f8962273c..f07f554bc5fe 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform.c
@@ -2,7 +2,9 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
+#include <linux/of_device.h>
#include "hfi_platform.h"
+#include "core.h"
const struct hfi_platform *hfi_platform_get(enum hfi_version version)
{
@@ -66,3 +68,23 @@ hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_
return freq;
}
+int
+hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codecs, u32 *count)
+{
+ const struct hfi_platform *plat;
+
+ plat = hfi_platform_get(core->res->hfi_version);
+ if (!plat)
+ return -EINVAL;
+
+ if (plat->codecs)
+ plat->codecs(enc_codecs, dec_codecs, count);
+
+ if (of_device_is_compatible(core->dev->of_node, "qcom,sc7280-venus")) {
+ *enc_codecs &= ~HFI_VIDEO_CODEC_VP8;
+ *dec_codecs &= ~HFI_VIDEO_CODEC_VP8;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.h b/drivers/media/platform/qcom/venus/hfi_platform.h
index 1dcf4085928c..ec89a90a8129 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.h
+++ b/drivers/media/platform/qcom/venus/hfi_platform.h
@@ -66,4 +66,6 @@ unsigned long hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 code
u32 session_type);
unsigned long hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec,
u32 session_type);
+int hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codecs,
+ u32 *count);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 9a34662fea38..2ad40b3945b0 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -904,6 +904,24 @@ static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
return 0;
}
+static int venus_sys_set_ubwc_config(struct venus_hfi_device *hdev)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ const struct venus_resources *res = hdev->core->res;
+ int ret;
+
+ pkt = (struct hfi_sys_set_property_pkt *)packet;
+
+ pkt_sys_ubwc_config(pkt, res->ubwc_conf);
+
+ ret = venus_iface_cmdq_write(hdev, pkt, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int venus_get_queue_size(struct venus_hfi_device *hdev,
unsigned int index)
{
@@ -922,6 +940,7 @@ static int venus_get_queue_size(struct venus_hfi_device *hdev,
static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
+ const struct venus_resources *res = hdev->core->res;
int ret;
ret = venus_sys_set_debug(hdev, venus_fw_debug);
@@ -945,6 +964,13 @@ static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
ret);
+ /* For specific venus core, it is mandatory to set the UBWC configuration */
+ if (res->ubwc_conf) {
+ ret = venus_sys_set_ubwc_config(hdev);
+ if (ret)
+ dev_warn(dev, "setting ubwc config failed (%d)\n", ret);
+ }
+
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index cb48c5ff3dee..c93d2906e4c7 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -875,7 +875,7 @@ static int vcodec_domains_get(struct venus_core *core)
}
skip_pmdomains:
- if (!core->has_opp_table)
+ if (!core->res->opp_pmdomain)
return 0;
/* Attach the power domain for setting performance state */
@@ -1007,6 +1007,10 @@ static int core_get_v4(struct venus_core *core)
if (ret)
return ret;
+ ret = vcodec_domains_get(core);
+ if (ret)
+ return ret;
+
if (core->res->opp_pmdomain) {
ret = devm_pm_opp_of_add_table(dev);
if (!ret) {
@@ -1017,10 +1021,6 @@ static int core_get_v4(struct venus_core *core)
}
}
- ret = vcodec_domains_get(core);
- if (ret)
- return ret;
-
return 0;
}
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index 49bdcfba010b..968a74234e92 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -845,7 +845,7 @@ static int rvin_csi2_link_notify(struct media_link *link, u32 flags,
continue;
/* Get remote CSI-2, if any. */
- csi_pad = media_entity_remote_pad(
+ csi_pad = media_pad_remote_pad_first(
&group->vin[i]->vdev.entity.pads[0]);
if (!csi_pad)
continue;
@@ -1261,7 +1261,7 @@ static const struct rvin_info rcar_info_r8a77980 = {
};
static const struct rvin_group_route rcar_info_r8a77990_routes[] = {
- { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI40, .chsel = 0x03 },
{ /* Sentinel */ }
};
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
index fea8f00a9152..174aa6176f54 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
@@ -1313,7 +1313,7 @@ static int rcsi2_link_setup(struct media_entity *entity,
channel = id % 4;
if (flags & MEDIA_LNK_FL_ENABLED) {
- if (media_entity_remote_pad(local)) {
+ if (media_pad_remote_pad_first(local)) {
dev_dbg(priv->dev,
"Each VC can only be routed to one output channel\n");
return -EINVAL;
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 6644b498929d..8d37fbdc266a 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -1258,7 +1258,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
return ret == -ENOIOCTLCMD ? 0 : ret;
}
- pad = media_entity_remote_pad(&vin->pad);
+ pad = media_pad_remote_pad_first(&vin->pad);
if (!pad)
return -EPIPE;
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
index 2e2aa9d746ee..576059f9bbe3 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
@@ -1032,7 +1032,7 @@ static void rvin_notify(struct v4l2_subdev *sd,
if (!vin)
continue;
- pad = media_entity_remote_pad(&vin->pad);
+ pad = media_pad_remote_pad_first(&vin->pad);
if (!pad)
continue;
diff --git a/drivers/media/platform/renesas/rcar_drif.c b/drivers/media/platform/renesas/rcar_drif.c
index 9a0982fa5c6b..3fec41f6e964 100644
--- a/drivers/media/platform/renesas/rcar_drif.c
+++ b/drivers/media/platform/renesas/rcar_drif.c
@@ -3,11 +3,6 @@
* R-Car Gen3 Digital Radio Interface (DRIF) driver
*
* Copyright (C) 2017 Renesas Electronics Corporation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
@@ -1477,7 +1472,7 @@ MODULE_DEVICE_TABLE(of, rcar_drif_of_table);
static struct platform_driver rcar_drif_driver = {
.driver = {
.name = RCAR_DRIF_DRV_NAME,
- .of_match_table = of_match_ptr(rcar_drif_of_table),
+ .of_match_table = rcar_drif_of_table,
.pm = &rcar_drif_pm_ops,
},
.probe = rcar_drif_probe,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.c b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
index a116a3362f9e..4c3bd2b1ca28 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
@@ -516,8 +516,8 @@ int vsp1_entity_link_setup(struct media_entity *entity,
* higher than one for the data pipelines, except for the links to the HGO and
* HGT that can be enabled in addition to a regular data link. When traversing
* outgoing links this function ignores HGO and HGT entities and should thus be
- * used in place of the generic media_entity_remote_pad() function to traverse
- * data pipelines.
+ * used in place of the generic media_pad_remote_pad_first() function to
+ * traverse data pipelines.
*
* Return a pointer to the pad at the remote end of the first found enabled
* link, or NULL if no enabled link has been found.
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index 51219b1b6ea9..e8e0ee5f2277 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -50,7 +50,7 @@ vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
{
struct media_pad *remote;
- remote = media_entity_remote_pad(local);
+ remote = media_pad_remote_pad_first(local);
if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
return NULL;
diff --git a/drivers/media/platform/rockchip/rkisp1/Kconfig b/drivers/media/platform/rockchip/rkisp1/Kconfig
index dabd7e42c193..731c9acbf6ef 100644
--- a/drivers/media/platform/rockchip/rkisp1/Kconfig
+++ b/drivers/media/platform/rockchip/rkisp1/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_ROCKCHIP_ISP1
tristate "Rockchip Image Signal Processing v1 Unit driver"
depends on V4L_PLATFORM_DRIVERS
depends on VIDEO_DEV && OF
- depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on ARCH_ROCKCHIP || ARCH_MXC || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/rockchip/rkisp1/Makefile b/drivers/media/platform/rockchip/rkisp1/Makefile
index ab32a77db8f7..b3844c4f7623 100644
--- a/drivers/media/platform/rockchip/rkisp1/Makefile
+++ b/drivers/media/platform/rockchip/rkisp1/Makefile
@@ -1,10 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
+rockchip-isp1-y := rkisp1-capture.o \
+ rkisp1-common.o \
+ rkisp1-csi.o \
+ rkisp1-dev.o \
+ rkisp1-isp.o \
+ rkisp1-resizer.o \
+ rkisp1-stats.o \
+ rkisp1-params.o
+
+rockchip-isp1-$(CONFIG_DEBUG_FS) += rkisp1-debug.o
+
obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rockchip-isp1.o
-rockchip-isp1-objs += rkisp1-capture.o \
- rkisp1-common.o \
- rkisp1-dev.o \
- rkisp1-isp.o \
- rkisp1-resizer.o \
- rkisp1-stats.o \
- rkisp1-params.o
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index fee2aaacb26b..d5904c96ff3f 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -383,7 +383,7 @@ static void rkisp1_mi_config_ctrl(struct rkisp1_capture *cap)
mi_ctrl |= RKISP1_CIF_MI_CTRL_INIT_BASE_EN |
RKISP1_CIF_MI_CTRL_INIT_OFFSET_EN;
- rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+ rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static u32 rkisp1_pixfmt_comp_size(const struct v4l2_pix_format_mplane *pixm,
@@ -404,7 +404,7 @@ static void rkisp1_irq_frame_end_enable(struct rkisp1_capture *cap)
u32 mi_imsc = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_IMSC);
mi_imsc |= RKISP1_CIF_MI_FRAME(cap);
- rkisp1_write(cap->rkisp1, mi_imsc, RKISP1_CIF_MI_IMSC);
+ rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_IMSC, mi_imsc);
}
static void rkisp1_mp_config(struct rkisp1_capture *cap)
@@ -413,12 +413,12 @@ static void rkisp1_mp_config(struct rkisp1_capture *cap)
struct rkisp1_device *rkisp1 = cap->rkisp1;
u32 reg;
- rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
- cap->config->mi.y_size_init);
- rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB),
- cap->config->mi.cb_size_init);
- rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR),
- cap->config->mi.cr_size_init);
+ rkisp1_write(rkisp1, cap->config->mi.y_size_init,
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y));
+ rkisp1_write(rkisp1, cap->config->mi.cb_size_init,
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB));
+ rkisp1_write(rkisp1, cap->config->mi.cr_size_init,
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
rkisp1_irq_frame_end_enable(cap);
@@ -429,7 +429,7 @@ static void rkisp1_mp_config(struct rkisp1_capture *cap)
reg |= RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP;
else
reg &= ~RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP;
- rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL, reg);
}
rkisp1_mi_config_ctrl(cap);
@@ -437,11 +437,11 @@ static void rkisp1_mp_config(struct rkisp1_capture *cap)
reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
reg &= ~RKISP1_MI_CTRL_MP_FMT_MASK;
reg |= cap->pix.cfg->write_format;
- rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_CTRL, reg);
reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
reg |= RKISP1_CIF_MI_MP_AUTOUPDATE_ENABLE;
- rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_CTRL, reg);
}
static void rkisp1_sp_config(struct rkisp1_capture *cap)
@@ -450,16 +450,16 @@ static void rkisp1_sp_config(struct rkisp1_capture *cap)
struct rkisp1_device *rkisp1 = cap->rkisp1;
u32 mi_ctrl, reg;
- rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
- cap->config->mi.y_size_init);
- rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB),
- cap->config->mi.cb_size_init);
- rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR),
- cap->config->mi.cr_size_init);
+ rkisp1_write(rkisp1, cap->config->mi.y_size_init,
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y));
+ rkisp1_write(rkisp1, cap->config->mi.cb_size_init,
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB));
+ rkisp1_write(rkisp1, cap->config->mi.cr_size_init,
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
- rkisp1_write(rkisp1, pixm->width, RKISP1_CIF_MI_SP_Y_PIC_WIDTH);
- rkisp1_write(rkisp1, pixm->height, RKISP1_CIF_MI_SP_Y_PIC_HEIGHT);
- rkisp1_write(rkisp1, cap->sp_y_stride, RKISP1_CIF_MI_SP_Y_LLENGTH);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_WIDTH, pixm->width);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_HEIGHT, pixm->height);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_LLENGTH, cap->sp_y_stride);
rkisp1_irq_frame_end_enable(cap);
@@ -470,7 +470,7 @@ static void rkisp1_sp_config(struct rkisp1_capture *cap)
reg |= RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP;
else
reg &= ~RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP;
- rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL, reg);
}
rkisp1_mi_config_ctrl(cap);
@@ -481,7 +481,7 @@ static void rkisp1_sp_config(struct rkisp1_capture *cap)
RKISP1_MI_CTRL_SP_INPUT_YUV422 |
cap->pix.cfg->output_format |
RKISP1_CIF_MI_SP_AUTOUPDATE_ENABLE;
- rkisp1_write(rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_mp_disable(struct rkisp1_capture *cap)
@@ -490,7 +490,7 @@ static void rkisp1_mp_disable(struct rkisp1_capture *cap)
mi_ctrl &= ~(RKISP1_CIF_MI_CTRL_MP_ENABLE |
RKISP1_CIF_MI_CTRL_RAW_ENABLE);
- rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+ rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_sp_disable(struct rkisp1_capture *cap)
@@ -498,7 +498,7 @@ static void rkisp1_sp_disable(struct rkisp1_capture *cap)
u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
mi_ctrl &= ~RKISP1_CIF_MI_CTRL_SP_ENABLE;
- rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+ rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_mp_enable(struct rkisp1_capture *cap)
@@ -514,7 +514,7 @@ static void rkisp1_mp_enable(struct rkisp1_capture *cap)
else
mi_ctrl |= RKISP1_CIF_MI_CTRL_MP_ENABLE;
- rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+ rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_sp_enable(struct rkisp1_capture *cap)
@@ -522,15 +522,14 @@ static void rkisp1_sp_enable(struct rkisp1_capture *cap)
u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
mi_ctrl |= RKISP1_CIF_MI_CTRL_SP_ENABLE;
- rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+ rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_mp_sp_stop(struct rkisp1_capture *cap)
{
if (!cap->is_streaming)
return;
- rkisp1_write(cap->rkisp1,
- RKISP1_CIF_MI_FRAME(cap), RKISP1_CIF_MI_ICR);
+ rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_ICR, RKISP1_CIF_MI_FRAME(cap));
cap->ops->disable(cap);
}
@@ -554,7 +553,7 @@ static void rkisp1_mp_set_data_path(struct rkisp1_capture *cap)
dpcl = dpcl | RKISP1_CIF_VI_DPCL_CHAN_MODE_MP |
RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_MI;
- rkisp1_write(cap->rkisp1, dpcl, RKISP1_CIF_VI_DPCL);
+ rkisp1_write(cap->rkisp1, RKISP1_CIF_VI_DPCL, dpcl);
}
static void rkisp1_sp_set_data_path(struct rkisp1_capture *cap)
@@ -562,7 +561,7 @@ static void rkisp1_sp_set_data_path(struct rkisp1_capture *cap)
u32 dpcl = rkisp1_read(cap->rkisp1, RKISP1_CIF_VI_DPCL);
dpcl |= RKISP1_CIF_VI_DPCL_CHAN_MODE_SP;
- rkisp1_write(cap->rkisp1, dpcl, RKISP1_CIF_VI_DPCL);
+ rkisp1_write(cap->rkisp1, RKISP1_CIF_VI_DPCL, dpcl);
}
static const struct rkisp1_capture_ops rkisp1_capture_ops_mp = {
@@ -628,9 +627,8 @@ static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
buff_addr = cap->buf.next->buff_addr;
- rkisp1_write(cap->rkisp1,
- buff_addr[RKISP1_PLANE_Y],
- cap->config->mi.y_base_ad_init);
+ rkisp1_write(cap->rkisp1, cap->config->mi.y_base_ad_init,
+ buff_addr[RKISP1_PLANE_Y]);
/*
* In order to support grey format we capture
* YUV422 planar format from the camera and
@@ -638,39 +636,36 @@ static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
*/
if (cap->pix.cfg->fourcc == V4L2_PIX_FMT_GREY) {
rkisp1_write(cap->rkisp1,
- cap->buf.dummy.dma_addr,
- cap->config->mi.cb_base_ad_init);
+ cap->config->mi.cb_base_ad_init,
+ cap->buf.dummy.dma_addr);
rkisp1_write(cap->rkisp1,
- cap->buf.dummy.dma_addr,
- cap->config->mi.cr_base_ad_init);
+ cap->config->mi.cr_base_ad_init,
+ cap->buf.dummy.dma_addr);
} else {
rkisp1_write(cap->rkisp1,
- buff_addr[RKISP1_PLANE_CB],
- cap->config->mi.cb_base_ad_init);
+ cap->config->mi.cb_base_ad_init,
+ buff_addr[RKISP1_PLANE_CB]);
rkisp1_write(cap->rkisp1,
- buff_addr[RKISP1_PLANE_CR],
- cap->config->mi.cr_base_ad_init);
+ cap->config->mi.cr_base_ad_init,
+ buff_addr[RKISP1_PLANE_CR]);
}
} else {
/*
* Use the dummy space allocated by dma_alloc_coherent to
* throw data if there is no available buffer.
*/
- rkisp1_write(cap->rkisp1,
- cap->buf.dummy.dma_addr,
- cap->config->mi.y_base_ad_init);
- rkisp1_write(cap->rkisp1,
- cap->buf.dummy.dma_addr,
- cap->config->mi.cb_base_ad_init);
- rkisp1_write(cap->rkisp1,
- cap->buf.dummy.dma_addr,
- cap->config->mi.cr_base_ad_init);
+ rkisp1_write(cap->rkisp1, cap->config->mi.y_base_ad_init,
+ cap->buf.dummy.dma_addr);
+ rkisp1_write(cap->rkisp1, cap->config->mi.cb_base_ad_init,
+ cap->buf.dummy.dma_addr);
+ rkisp1_write(cap->rkisp1, cap->config->mi.cr_base_ad_init,
+ cap->buf.dummy.dma_addr);
}
/* Set plane offsets */
- rkisp1_write(cap->rkisp1, 0, cap->config->mi.y_offs_cnt_init);
- rkisp1_write(cap->rkisp1, 0, cap->config->mi.cb_offs_cnt_init);
- rkisp1_write(cap->rkisp1, 0, cap->config->mi.cr_offs_cnt_init);
+ rkisp1_write(cap->rkisp1, cap->config->mi.y_offs_cnt_init, 0);
+ rkisp1_write(cap->rkisp1, cap->config->mi.cb_offs_cnt_init, 0);
+ rkisp1_write(cap->rkisp1, cap->config->mi.cr_offs_cnt_init, 0);
}
/*
@@ -710,7 +705,7 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
if (!status)
return IRQ_NONE;
- rkisp1_write(rkisp1, status, RKISP1_CIF_MI_ICR);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_ICR, status);
for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); ++i) {
struct rkisp1_capture *cap = &rkisp1->capture_devs[i];
@@ -888,8 +883,8 @@ static void rkisp1_cap_stream_enable(struct rkisp1_capture *cap)
*/
if (!other->is_streaming) {
/* force cfg update */
- rkisp1_write(rkisp1,
- RKISP1_CIF_MI_INIT_SOFT_UPD, RKISP1_CIF_MI_INIT);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_INIT,
+ RKISP1_CIF_MI_INIT_SOFT_UPD);
rkisp1_set_next_buf(cap);
}
spin_unlock_irq(&cap->buf.lock);
@@ -931,11 +926,8 @@ static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap)
* If the other capture is streaming, isp and sensor nodes shouldn't
* be disabled, skip them.
*/
- if (rkisp1->pipe.streaming_count < 2) {
- v4l2_subdev_call(rkisp1->active_sensor->sd, video, s_stream,
- false);
+ if (rkisp1->pipe.streaming_count < 2)
v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, false);
- }
v4l2_subdev_call(&rkisp1->resizer_devs[cap->id].sd, video, s_stream,
false);
@@ -971,15 +963,8 @@ static int rkisp1_pipeline_stream_enable(struct rkisp1_capture *cap)
if (ret)
goto err_disable_rsz;
- ret = v4l2_subdev_call(rkisp1->active_sensor->sd, video, s_stream,
- true);
- if (ret)
- goto err_disable_isp;
-
return 0;
-err_disable_isp:
- v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, false);
err_disable_rsz:
v4l2_subdev_call(&rkisp1->resizer_devs[cap->id].sd, video, s_stream,
false);
@@ -1253,11 +1238,8 @@ static int rkisp1_g_fmt_vid_cap_mplane(struct file *file, void *fh,
static int
rkisp1_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
{
- struct rkisp1_capture *cap_dev = video_drvdata(file);
- struct rkisp1_device *rkisp1 = cap_dev->rkisp1;
-
- strscpy(cap->driver, rkisp1->dev->driver->name, sizeof(cap->driver));
- strscpy(cap->card, rkisp1->dev->driver->name, sizeof(cap->card));
+ strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, RKISP1_DRIVER_NAME, sizeof(cap->card));
strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info));
return 0;
@@ -1302,8 +1284,16 @@ static int rkisp1_capture_link_validate(struct media_link *link)
if (sd_fmt.format.height != cap->pix.fmt.height ||
sd_fmt.format.width != cap->pix.fmt.width ||
- sd_fmt.format.code != fmt->mbus)
+ sd_fmt.format.code != fmt->mbus) {
+ dev_dbg(cap->rkisp1->dev,
+ "link '%s':%u -> '%s':%u not valid: 0x%04x/%ux%u != 0x%04x/%ux%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index,
+ sd_fmt.format.code, sd_fmt.format.width,
+ sd_fmt.format.height, fmt->mbus, cap->pix.fmt.width,
+ cap->pix.fmt.height);
return -EPIPE;
+ }
return 0;
}
@@ -1326,8 +1316,12 @@ static const struct v4l2_file_operations rkisp1_fops = {
static void rkisp1_unregister_capture(struct rkisp1_capture *cap)
{
+ if (!video_is_registered(&cap->vnode.vdev))
+ return;
+
media_entity_cleanup(&cap->vnode.vdev.entity);
vb2_video_unregister_device(&cap->vnode.vdev);
+ mutex_destroy(&cap->vnode.vlock);
}
void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1)
@@ -1381,27 +1375,31 @@ static int rkisp1_register_capture(struct rkisp1_capture *cap)
if (ret) {
dev_err(cap->rkisp1->dev,
"vb2 queue init failed (err=%d)\n", ret);
- return ret;
+ goto error;
}
vdev->queue = q;
+ ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
+ if (ret)
+ goto error;
+
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(cap->rkisp1->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
- return ret;
+ goto error;
}
+
v4l2_info(v4l2_dev, "registered %s as /dev/video%d\n", vdev->name,
vdev->num);
- ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
- if (ret) {
- video_unregister_device(vdev);
- return ret;
- }
-
return 0;
+
+error:
+ media_entity_cleanup(&vdev->entity);
+ mutex_destroy(&node->vlock);
+ return ret;
}
static void
@@ -1436,26 +1434,21 @@ rkisp1_capture_init(struct rkisp1_device *rkisp1, enum rkisp1_stream_id id)
int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1)
{
- struct rkisp1_capture *cap;
- unsigned int i, j;
+ unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); i++) {
+ struct rkisp1_capture *cap = &rkisp1->capture_devs[i];
+
rkisp1_capture_init(rkisp1, i);
- cap = &rkisp1->capture_devs[i];
- cap->rkisp1 = rkisp1;
+
ret = rkisp1_register_capture(cap);
- if (ret)
- goto err_unreg_capture_devs;
+ if (ret) {
+ rkisp1_capture_devs_unregister(rkisp1);
+ return ret;
+ }
}
return 0;
-err_unreg_capture_devs:
- for (j = 0; j < i; j++) {
- cap = &rkisp1->capture_devs[j];
- rkisp1_unregister_capture(cap);
- }
-
- return ret;
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c
index cf889666e166..f956b90a407a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c
@@ -5,10 +5,153 @@
* Copyright (C) 2019 Collabora, Ltd.
*/
+#include <media/mipi-csi2.h>
#include <media/v4l2-rect.h>
#include "rkisp1-common.h"
+static const struct rkisp1_mbus_info rkisp1_formats[] = {
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
+ .direction = RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW10,
+ .bayer_pat = RKISP1_RAW_RGGB,
+ .bus_width = 10,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW10,
+ .bayer_pat = RKISP1_RAW_BGGR,
+ .bus_width = 10,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW10,
+ .bayer_pat = RKISP1_RAW_GBRG,
+ .bus_width = 10,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW10,
+ .bayer_pat = RKISP1_RAW_GRBG,
+ .bus_width = 10,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW12,
+ .bayer_pat = RKISP1_RAW_RGGB,
+ .bus_width = 12,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW12,
+ .bayer_pat = RKISP1_RAW_BGGR,
+ .bus_width = 12,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW12,
+ .bayer_pat = RKISP1_RAW_GBRG,
+ .bus_width = 12,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW12,
+ .bayer_pat = RKISP1_RAW_GRBG,
+ .bus_width = 12,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW8,
+ .bayer_pat = RKISP1_RAW_RGGB,
+ .bus_width = 8,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW8,
+ .bayer_pat = RKISP1_RAW_BGGR,
+ .bus_width = 8,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW8,
+ .bayer_pat = RKISP1_RAW_GBRG,
+ .bus_width = 8,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
+ .mipi_dt = MIPI_CSI2_DT_RAW8,
+ .bayer_pat = RKISP1_RAW_GRBG,
+ .bus_width = 8,
+ .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
+ .mipi_dt = MIPI_CSI2_DT_YUV422_8B,
+ .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCBYCR,
+ .bus_width = 16,
+ .direction = RKISP1_ISP_SD_SINK,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
+ .mipi_dt = MIPI_CSI2_DT_YUV422_8B,
+ .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCRYCB,
+ .bus_width = 16,
+ .direction = RKISP1_ISP_SD_SINK,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
+ .mipi_dt = MIPI_CSI2_DT_YUV422_8B,
+ .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CBYCRY,
+ .bus_width = 16,
+ .direction = RKISP1_ISP_SD_SINK,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
+ .mipi_dt = MIPI_CSI2_DT_YUV422_8B,
+ .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CRYCBY,
+ .bus_width = 16,
+ .direction = RKISP1_ISP_SD_SINK,
+ },
+};
+
+const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_index(unsigned int index)
+{
+ if (index >= ARRAY_SIZE(rkisp1_formats))
+ return NULL;
+
+ return &rkisp1_formats[index];
+}
+
+const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_code(u32 mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rkisp1_formats); i++) {
+ const struct rkisp1_mbus_info *fmt = &rkisp1_formats[i];
+
+ if (fmt->mbus_code == mbus_code)
+ return fmt;
+ }
+
+ return NULL;
+}
+
static const struct v4l2_rect rkisp1_sd_min_crop = {
.width = RKISP1_ISP_MIN_WIDTH,
.height = RKISP1_ISP_MIN_HEIGHT,
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index d8fa3f1a5a85..8056997d5c29 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -23,18 +23,20 @@
#include "rkisp1-regs.h"
+struct dentry;
+
/*
- * flags on the 'direction' field in struct 'rkisp1_isp_mbus_info' that indicate
+ * flags on the 'direction' field in struct rkisp1_mbus_info' that indicate
* on which pad the media bus format is supported
*/
-#define RKISP1_ISP_SD_SRC BIT(0)
-#define RKISP1_ISP_SD_SINK BIT(1)
+#define RKISP1_ISP_SD_SRC BIT(0)
+#define RKISP1_ISP_SD_SINK BIT(1)
/* min and max values for the widths and heights of the entities */
-#define RKISP1_ISP_MAX_WIDTH 4032
-#define RKISP1_ISP_MAX_HEIGHT 3024
-#define RKISP1_ISP_MIN_WIDTH 32
-#define RKISP1_ISP_MIN_HEIGHT 32
+#define RKISP1_ISP_MAX_WIDTH 4032
+#define RKISP1_ISP_MAX_HEIGHT 3024
+#define RKISP1_ISP_MIN_WIDTH 32
+#define RKISP1_ISP_MIN_HEIGHT 32
#define RKISP1_RSZ_MP_SRC_MAX_WIDTH 4416
#define RKISP1_RSZ_MP_SRC_MAX_HEIGHT 3312
@@ -44,20 +46,20 @@
#define RKISP1_RSZ_SRC_MIN_HEIGHT 16
/* the default width and height of all the entities */
-#define RKISP1_DEFAULT_WIDTH 800
-#define RKISP1_DEFAULT_HEIGHT 600
+#define RKISP1_DEFAULT_WIDTH 800
+#define RKISP1_DEFAULT_HEIGHT 600
-#define RKISP1_DRIVER_NAME "rkisp1"
-#define RKISP1_BUS_INFO "platform:" RKISP1_DRIVER_NAME
+#define RKISP1_DRIVER_NAME "rkisp1"
+#define RKISP1_BUS_INFO "platform:" RKISP1_DRIVER_NAME
/* maximum number of clocks */
-#define RKISP1_MAX_BUS_CLK 8
+#define RKISP1_MAX_BUS_CLK 8
/* a bitmask of the ready stats */
-#define RKISP1_STATS_MEAS_MASK (RKISP1_CIF_ISP_AWB_DONE | \
- RKISP1_CIF_ISP_AFM_FIN | \
- RKISP1_CIF_ISP_EXP_END | \
- RKISP1_CIF_ISP_HIST_MEASURE_RDY)
+#define RKISP1_STATS_MEAS_MASK (RKISP1_CIF_ISP_AWB_DONE | \
+ RKISP1_CIF_ISP_AFM_FIN | \
+ RKISP1_CIF_ISP_EXP_END | \
+ RKISP1_CIF_ISP_HIST_MEASURE_RDY)
/* enum for the resizer pads */
enum rkisp1_rsz_pad {
@@ -66,6 +68,13 @@ enum rkisp1_rsz_pad {
RKISP1_RSZ_PAD_MAX
};
+/* enum for the csi receiver pads */
+enum rkisp1_csi_pad {
+ RKISP1_CSI_PAD_SINK,
+ RKISP1_CSI_PAD_SRC,
+ RKISP1_CSI_PAD_NUM
+};
+
/* enum for the capture id */
enum rkisp1_stream_id {
RKISP1_MAINPATH,
@@ -90,25 +99,89 @@ enum rkisp1_isp_pad {
};
/*
+ * enum rkisp1_feature - ISP features
+ *
+ * @RKISP1_FEATURE_MIPI_CSI2: The ISP has an internal MIPI CSI-2 receiver
+ *
+ * The ISP features are stored in a bitmask in &rkisp1_info.features and allow
+ * the driver to implement support for features present in some ISP versions
+ * only.
+ */
+enum rkisp1_feature {
+ RKISP1_FEATURE_MIPI_CSI2 = BIT(0),
+};
+
+/*
+ * struct rkisp1_info - Model-specific ISP Information
+ *
+ * @clks: array of ISP clock names
+ * @clk_size: number of entries in the @clks array
+ * @isrs: array of ISP interrupt descriptors
+ * @isr_size: number of entries in the @isrs array
+ * @isp_ver: ISP version
+ * @features: bitmask of rkisp1_feature features implemented by the ISP
+ *
+ * This structure contains information about the ISP specific to a particular
+ * ISP model, version, or integration in a particular SoC.
+ */
+struct rkisp1_info {
+ const char * const *clks;
+ unsigned int clk_size;
+ const struct rkisp1_isr_data *isrs;
+ unsigned int isr_size;
+ enum rkisp1_cif_isp_version isp_ver;
+ unsigned int features;
+};
+
+/*
* struct rkisp1_sensor_async - A container for the v4l2_async_subdev to add to the notifier
* of the v4l2-async API
*
* @asd: async_subdev variable for the sensor
+ * @index: index of the sensor (counting sensor found in DT)
+ * @source_ep: fwnode for the sensor source endpoint
* @lanes: number of lanes
* @mbus_type: type of bus (currently only CSI2 is supported)
* @mbus_flags: media bus (V4L2_MBUS_*) flags
* @sd: a pointer to v4l2_subdev struct of the sensor
* @pixel_rate_ctrl: pixel rate of the sensor, used to initialize the phy
- * @dphy: a pointer to the phy
+ * @port: port number (0: MIPI, 1: Parallel)
*/
struct rkisp1_sensor_async {
struct v4l2_async_subdev asd;
+ unsigned int index;
+ struct fwnode_handle *source_ep;
unsigned int lanes;
enum v4l2_mbus_type mbus_type;
unsigned int mbus_flags;
struct v4l2_subdev *sd;
struct v4l2_ctrl *pixel_rate_ctrl;
+ unsigned int port;
+};
+
+/*
+ * struct rkisp1_csi - CSI receiver subdev
+ *
+ * @rkisp1: pointer to the rkisp1 device
+ * @dphy: a pointer to the phy
+ * @is_dphy_errctrl_disabled: if dphy errctrl is disabled (avoid endless interrupt)
+ * @sd: v4l2_subdev variable
+ * @pads: media pads
+ * @pad_cfg: configurations for the pads
+ * @sink_fmt: input format
+ * @lock: protects pad_cfg and sink_fmt
+ * @source: source in-use, set when starting streaming
+ */
+struct rkisp1_csi {
+ struct rkisp1_device *rkisp1;
struct phy *dphy;
+ bool is_dphy_errctrl_disabled;
+ struct v4l2_subdev sd;
+ struct media_pad pads[RKISP1_CSI_PAD_NUM];
+ struct v4l2_subdev_pad_config pad_cfg[RKISP1_CSI_PAD_NUM];
+ const struct rkisp1_mbus_info *sink_fmt;
+ struct mutex lock;
+ struct v4l2_subdev *source;
};
/*
@@ -121,17 +194,16 @@ struct rkisp1_sensor_async {
* @sink_fmt: input format
* @src_fmt: output format
* @ops_lock: ops serialization
- * @is_dphy_errctrl_disabled: if dphy errctrl is disabled (avoid endless interrupt)
* @frame_sequence: used to synchronize frame_id between video devices.
*/
struct rkisp1_isp {
struct v4l2_subdev sd;
+ struct rkisp1_device *rkisp1;
struct media_pad pads[RKISP1_ISP_PAD_MAX];
struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
- const struct rkisp1_isp_mbus_info *sink_fmt;
- const struct rkisp1_isp_mbus_info *src_fmt;
+ const struct rkisp1_mbus_info *sink_fmt;
+ const struct rkisp1_mbus_info *src_fmt;
struct mutex ops_lock; /* serialize the subdevice ops */
- bool is_dphy_errctrl_disabled;
__u32 frame_sequence;
};
@@ -313,6 +385,7 @@ struct rkisp1_params {
* struct rkisp1_resizer - Resizer subdev
*
* @sd: v4l2_subdev variable
+ * @regs_base: base register address offset
* @id: id of the resizer, one of RKISP1_SELFPATH, RKISP1_MAINPATH
* @rkisp1: pointer to the rkisp1 device
* @pads: media pads
@@ -323,6 +396,7 @@ struct rkisp1_params {
*/
struct rkisp1_resizer {
struct v4l2_subdev sd;
+ u32 regs_base;
enum rkisp1_stream_id id;
struct rkisp1_device *rkisp1;
struct media_pad pads[RKISP1_RSZ_PAD_MAX];
@@ -373,7 +447,8 @@ struct rkisp1_debug {
* @v4l2_dev: v4l2_device variable
* @media_dev: media_device variable
* @notifier: a notifier to register on the v4l2-async API to be notified on the sensor
- * @active_sensor: sensor in-use, set when streaming on
+ * @source: source subdev in-use, set when starting streaming
+ * @csi: internal CSI-2 receiver
* @isp: ISP sub-device
* @resizer_devs: resizer sub-devices
* @capture_devs: capture devices
@@ -382,6 +457,7 @@ struct rkisp1_debug {
* @pipe: media pipeline
* @stream_lock: serializes {start/stop}_streaming callbacks between the capture devices.
* @debug: debug params to be exposed on debugfs
+ * @info: version-specific ISP information
*/
struct rkisp1_device {
void __iomem *base_addr;
@@ -391,7 +467,8 @@ struct rkisp1_device {
struct v4l2_device v4l2_dev;
struct media_device media_dev;
struct v4l2_async_notifier notifier;
- struct rkisp1_sensor_async *active_sensor;
+ struct v4l2_subdev *source;
+ struct rkisp1_csi csi;
struct rkisp1_isp isp;
struct rkisp1_resizer resizer_devs[2];
struct rkisp1_capture capture_devs[2];
@@ -400,11 +477,12 @@ struct rkisp1_device {
struct media_pipeline pipe;
struct mutex stream_lock; /* serialize {start/stop}_streaming cb between capture devices */
struct rkisp1_debug debug;
+ const struct rkisp1_info *info;
};
/*
- * struct rkisp1_isp_mbus_info - ISP media bus info, Translates media bus code to hardware
- * format values
+ * struct rkisp1_mbus_info - ISP media bus info, Translates media bus code to hardware
+ * format values
*
* @mbus_code: media bus code
* @pixel_enc: pixel encoding
@@ -414,7 +492,7 @@ struct rkisp1_device {
* @bayer_pat: bayer pattern
* @direction: a bitmask of the flags indicating on which pad the format is supported on
*/
-struct rkisp1_isp_mbus_info {
+struct rkisp1_mbus_info {
u32 mbus_code;
enum v4l2_pixel_encoding pixel_enc;
u32 mipi_dt;
@@ -425,7 +503,7 @@ struct rkisp1_isp_mbus_info {
};
static inline void
-rkisp1_write(struct rkisp1_device *rkisp1, u32 val, unsigned int addr)
+rkisp1_write(struct rkisp1_device *rkisp1, unsigned int addr, u32 val)
{
writel(val, rkisp1->base_addr + addr);
}
@@ -447,6 +525,13 @@ int rkisp1_cap_enum_mbus_codes(struct rkisp1_capture *cap,
struct v4l2_subdev_mbus_code_enum *code);
/*
+ * rkisp1_mbus_info_get_by_index - Retrieve the ith supported mbus info
+ *
+ * @index: index of the mbus info to fetch
+ */
+const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_index(unsigned int index);
+
+/*
* rkisp1_sd_adjust_crop_rect - adjust a rectangle to fit into another rectangle.
*
* @crop: rectangle to adjust.
@@ -465,11 +550,11 @@ void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
const struct v4l2_mbus_framefmt *bounds);
/*
- * rkisp1_isp_mbus_info - get the isp info of the media bus code
+ * rkisp1_mbus_info_get_by_code - get the isp info of the media bus code
*
* @mbus_code: the media bus code
*/
-const struct rkisp1_isp_mbus_info *rkisp1_isp_mbus_info_get(u32 mbus_code);
+const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_code(u32 mbus_code);
/* rkisp1_params_configure - configure the params when stream starts.
* This function is called by the isp entity upon stream starts.
@@ -493,7 +578,7 @@ void rkisp1_params_disable(struct rkisp1_params *params);
/* irq handlers */
irqreturn_t rkisp1_isp_isr(int irq, void *ctx);
-irqreturn_t rkisp1_mipi_isr(int irq, void *ctx);
+irqreturn_t rkisp1_csi_isr(int irq, void *ctx);
irqreturn_t rkisp1_capture_isr(int irq, void *ctx);
void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris);
void rkisp1_params_isr(struct rkisp1_device *rkisp1);
@@ -514,4 +599,16 @@ void rkisp1_stats_unregister(struct rkisp1_device *rkisp1);
int rkisp1_params_register(struct rkisp1_device *rkisp1);
void rkisp1_params_unregister(struct rkisp1_device *rkisp1);
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void rkisp1_debug_init(struct rkisp1_device *rkisp1);
+void rkisp1_debug_cleanup(struct rkisp1_device *rkisp1);
+#else
+static inline void rkisp1_debug_init(struct rkisp1_device *rkisp1)
+{
+}
+static inline void rkisp1_debug_cleanup(struct rkisp1_device *rkisp1)
+{
+}
+#endif
+
#endif /* _RKISP1_COMMON_H */
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
new file mode 100644
index 000000000000..d7acc94e10f8
--- /dev/null
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - CSI-2 Receiver
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ * Copyright (C) 2022 Ideas on Board
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/lockdep.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+
+#include "rkisp1-common.h"
+#include "rkisp1-csi.h"
+
+#define RKISP1_CSI_DEV_NAME RKISP1_DRIVER_NAME "_csi"
+
+#define RKISP1_CSI_DEF_FMT MEDIA_BUS_FMT_SRGGB10_1X10
+
+static inline struct rkisp1_csi *to_rkisp1_csi(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rkisp1_csi, sd);
+}
+
+static struct v4l2_mbus_framefmt *
+rkisp1_csi_get_pad_fmt(struct rkisp1_csi *csi,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, u32 which)
+{
+ struct v4l2_subdev_state state = {
+ .pads = csi->pad_cfg
+ };
+
+ lockdep_assert_held(&csi->lock);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
+ else
+ return v4l2_subdev_get_try_format(&csi->sd, &state, pad);
+}
+
+int rkisp1_csi_link_sensor(struct rkisp1_device *rkisp1, struct v4l2_subdev *sd,
+ struct rkisp1_sensor_async *s_asd,
+ unsigned int source_pad)
+{
+ struct rkisp1_csi *csi = &rkisp1->csi;
+ int ret;
+
+ s_asd->pixel_rate_ctrl = v4l2_ctrl_find(sd->ctrl_handler,
+ V4L2_CID_PIXEL_RATE);
+ if (!s_asd->pixel_rate_ctrl) {
+ dev_err(rkisp1->dev, "No pixel rate control in subdev %s\n",
+ sd->name);
+ return -EINVAL;
+ }
+
+ /* Create the link from the sensor to the CSI receiver. */
+ ret = media_create_pad_link(&sd->entity, source_pad,
+ &csi->sd.entity, RKISP1_CSI_PAD_SINK,
+ !s_asd->index ? MEDIA_LNK_FL_ENABLED : 0);
+ if (ret) {
+ dev_err(csi->rkisp1->dev, "failed to link src pad of %s\n",
+ sd->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rkisp1_csi_config(struct rkisp1_csi *csi,
+ const struct rkisp1_sensor_async *sensor)
+{
+ struct rkisp1_device *rkisp1 = csi->rkisp1;
+ unsigned int lanes = sensor->lanes;
+ u32 mipi_ctrl;
+
+ if (lanes < 1 || lanes > 4)
+ return -EINVAL;
+
+ mipi_ctrl = RKISP1_CIF_MIPI_CTRL_NUM_LANES(lanes - 1) |
+ RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(0xf) |
+ RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP |
+ RKISP1_CIF_MIPI_CTRL_CLOCKLANE_ENA;
+
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_CTRL, mipi_ctrl);
+
+ /* V12 could also use a newer csi2-host, but we don't want that yet */
+ if (rkisp1->info->isp_ver == RKISP1_V12)
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_CSI0_CTRL0, 0);
+
+ /* Configure Data Type and Virtual Channel */
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL,
+ RKISP1_CIF_MIPI_DATA_SEL_DT(csi->sink_fmt->mipi_dt) |
+ RKISP1_CIF_MIPI_DATA_SEL_VC(0));
+
+ /* Clear MIPI interrupts */
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, ~0);
+
+ /*
+ * Disable RKISP1_CIF_MIPI_ERR_DPHY interrupt here temporary for
+ * isp bus may be dead when switch isp.
+ */
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC,
+ RKISP1_CIF_MIPI_FRAME_END | RKISP1_CIF_MIPI_ERR_CSI |
+ RKISP1_CIF_MIPI_ERR_DPHY |
+ RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(0x03) |
+ RKISP1_CIF_MIPI_ADD_DATA_OVFLW);
+
+ dev_dbg(rkisp1->dev, "\n MIPI_CTRL 0x%08x\n"
+ " MIPI_IMG_DATA_SEL 0x%08x\n"
+ " MIPI_STATUS 0x%08x\n"
+ " MIPI_IMSC 0x%08x\n",
+ rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL),
+ rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL),
+ rkisp1_read(rkisp1, RKISP1_CIF_MIPI_STATUS),
+ rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC));
+
+ return 0;
+}
+
+static void rkisp1_csi_enable(struct rkisp1_csi *csi)
+{
+ struct rkisp1_device *rkisp1 = csi->rkisp1;
+ u32 val;
+
+ val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_CTRL,
+ val | RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA);
+}
+
+static void rkisp1_csi_disable(struct rkisp1_csi *csi)
+{
+ struct rkisp1_device *rkisp1 = csi->rkisp1;
+ u32 val;
+
+ /* Mask and clear interrupts. */
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, ~0);
+
+ val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_CTRL,
+ val & (~RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA));
+}
+
+static int rkisp1_csi_start(struct rkisp1_csi *csi,
+ const struct rkisp1_sensor_async *sensor)
+{
+ struct rkisp1_device *rkisp1 = csi->rkisp1;
+ union phy_configure_opts opts;
+ struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
+ s64 pixel_clock;
+ int ret;
+
+ ret = rkisp1_csi_config(csi, sensor);
+ if (ret)
+ return ret;
+
+ pixel_clock = v4l2_ctrl_g_ctrl_int64(sensor->pixel_rate_ctrl);
+ if (!pixel_clock) {
+ dev_err(rkisp1->dev, "Invalid pixel rate value\n");
+ return -EINVAL;
+ }
+
+ phy_mipi_dphy_get_default_config(pixel_clock, csi->sink_fmt->bus_width,
+ sensor->lanes, cfg);
+ phy_set_mode(csi->dphy, PHY_MODE_MIPI_DPHY);
+ phy_configure(csi->dphy, &opts);
+ phy_power_on(csi->dphy);
+
+ rkisp1_csi_enable(csi);
+
+ /*
+ * CIF spec says to wait for sufficient time after enabling
+ * the MIPI interface and before starting the sensor output.
+ */
+ usleep_range(1000, 1200);
+
+ return 0;
+}
+
+static void rkisp1_csi_stop(struct rkisp1_csi *csi)
+{
+ rkisp1_csi_disable(csi);
+
+ phy_power_off(csi->dphy);
+}
+
+irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
+{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ u32 val, status;
+
+ status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
+ if (!status)
+ return IRQ_NONE;
+
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, status);
+
+ /*
+ * Disable DPHY errctrl interrupt, because this dphy
+ * erctrl signal is asserted until the next changes
+ * of line state. This time is may be too long and cpu
+ * is hold in this interrupt.
+ */
+ if (status & RKISP1_CIF_MIPI_ERR_CTRL(0x0f)) {
+ val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC,
+ val & ~RKISP1_CIF_MIPI_ERR_CTRL(0x0f));
+ rkisp1->csi.is_dphy_errctrl_disabled = true;
+ }
+
+ /*
+ * Enable DPHY errctrl interrupt again, if mipi have receive
+ * the whole frame without any error.
+ */
+ if (status == RKISP1_CIF_MIPI_FRAME_END) {
+ /*
+ * Enable DPHY errctrl interrupt again, if mipi have receive
+ * the whole frame without any error.
+ */
+ if (rkisp1->csi.is_dphy_errctrl_disabled) {
+ val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
+ val |= RKISP1_CIF_MIPI_ERR_CTRL(0x0f);
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC, val);
+ rkisp1->csi.is_dphy_errctrl_disabled = false;
+ }
+ } else {
+ rkisp1->debug.mipi_error++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* ----------------------------------------------------------------------------
+ * Subdev pad operations
+ */
+
+static int rkisp1_csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct rkisp1_csi *csi = to_rkisp1_csi(sd);
+ unsigned int i;
+ int pos = 0;
+
+ if (code->pad == RKISP1_CSI_PAD_SRC) {
+ const struct v4l2_mbus_framefmt *sink_fmt;
+
+ if (code->index)
+ return -EINVAL;
+
+ mutex_lock(&csi->lock);
+
+ sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state,
+ RKISP1_CSI_PAD_SINK,
+ code->which);
+ code->code = sink_fmt->code;
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+ }
+
+ for (i = 0; ; i++) {
+ const struct rkisp1_mbus_info *fmt =
+ rkisp1_mbus_info_get_by_index(i);
+
+ if (!fmt)
+ return -EINVAL;
+
+ if (!(fmt->direction & RKISP1_ISP_SD_SINK))
+ continue;
+
+ if (code->index == pos) {
+ code->code = fmt->mbus_code;
+ return 0;
+ }
+
+ pos++;
+ }
+
+ return -EINVAL;
+}
+
+static int rkisp1_csi_init_config(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
+
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ RKISP1_CSI_PAD_SINK);
+ src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ RKISP1_CSI_PAD_SRC);
+
+ sink_fmt->width = RKISP1_DEFAULT_WIDTH;
+ sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = RKISP1_CSI_DEF_FMT;
+
+ *src_fmt = *sink_fmt;
+
+ return 0;
+}
+
+static int rkisp1_csi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct rkisp1_csi *csi = to_rkisp1_csi(sd);
+
+ mutex_lock(&csi->lock);
+ fmt->format = *rkisp1_csi_get_pad_fmt(csi, sd_state, fmt->pad,
+ fmt->which);
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct rkisp1_csi *csi = to_rkisp1_csi(sd);
+ const struct rkisp1_mbus_info *mbus_info;
+ struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
+
+ /* The format on the source pad always matches the sink pad. */
+ if (fmt->pad == RKISP1_CSI_PAD_SRC)
+ return rkisp1_csi_get_fmt(sd, sd_state, fmt);
+
+ mutex_lock(&csi->lock);
+
+ sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SINK,
+ fmt->which);
+
+ sink_fmt->code = fmt->format.code;
+
+ mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
+ if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SINK)) {
+ sink_fmt->code = RKISP1_CSI_DEF_FMT;
+ mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
+ }
+
+ sink_fmt->width = clamp_t(u32, fmt->format.width,
+ RKISP1_ISP_MIN_WIDTH,
+ RKISP1_ISP_MAX_WIDTH);
+ sink_fmt->height = clamp_t(u32, fmt->format.height,
+ RKISP1_ISP_MIN_HEIGHT,
+ RKISP1_ISP_MAX_HEIGHT);
+
+ fmt->format = *sink_fmt;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ csi->sink_fmt = mbus_info;
+
+ /* Propagate the format to the source pad. */
+ src_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SRC,
+ fmt->which);
+ *src_fmt = *sink_fmt;
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------------
+ * Subdev video operations
+ */
+
+static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rkisp1_csi *csi = to_rkisp1_csi(sd);
+ struct rkisp1_device *rkisp1 = csi->rkisp1;
+ struct rkisp1_sensor_async *source_asd;
+ struct media_pad *source_pad;
+ struct v4l2_subdev *source;
+ int ret;
+
+ if (!enable) {
+ v4l2_subdev_call(csi->source, video, s_stream, false);
+
+ rkisp1_csi_stop(csi);
+
+ return 0;
+ }
+
+ source_pad = media_entity_remote_source_pad_unique(&sd->entity);
+ if (IS_ERR(source_pad)) {
+ dev_dbg(rkisp1->dev, "Failed to get source for CSI: %ld\n",
+ PTR_ERR(source_pad));
+ return -EPIPE;
+ }
+
+ source = media_entity_to_v4l2_subdev(source_pad->entity);
+ if (!source) {
+ /* This should really not happen, so is not worth a message. */
+ return -EPIPE;
+ }
+
+ source_asd = container_of(source->asd, struct rkisp1_sensor_async, asd);
+ if (source_asd->mbus_type != V4L2_MBUS_CSI2_DPHY)
+ return -EINVAL;
+
+ mutex_lock(&csi->lock);
+ ret = rkisp1_csi_start(csi, source_asd);
+ mutex_unlock(&csi->lock);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_call(source, video, s_stream, true);
+ if (ret) {
+ rkisp1_csi_stop(csi);
+ return ret;
+ }
+
+ csi->source = source;
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------------
+ * Registration
+ */
+
+static const struct media_entity_operations rkisp1_csi_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops rkisp1_csi_video_ops = {
+ .s_stream = rkisp1_csi_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops rkisp1_csi_pad_ops = {
+ .enum_mbus_code = rkisp1_csi_enum_mbus_code,
+ .init_cfg = rkisp1_csi_init_config,
+ .get_fmt = rkisp1_csi_get_fmt,
+ .set_fmt = rkisp1_csi_set_fmt,
+};
+
+static const struct v4l2_subdev_ops rkisp1_csi_ops = {
+ .video = &rkisp1_csi_video_ops,
+ .pad = &rkisp1_csi_pad_ops,
+};
+
+int rkisp1_csi_register(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_csi *csi = &rkisp1->csi;
+ struct v4l2_subdev_state state = {};
+ struct media_pad *pads;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ csi->rkisp1 = rkisp1;
+ mutex_init(&csi->lock);
+
+ sd = &csi->sd;
+ v4l2_subdev_init(sd, &rkisp1_csi_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->entity.ops = &rkisp1_csi_media_ops;
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->owner = THIS_MODULE;
+ strscpy(sd->name, RKISP1_CSI_DEV_NAME, sizeof(sd->name));
+
+ pads = csi->pads;
+ pads[RKISP1_CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_MUST_CONNECT;
+ pads[RKISP1_CSI_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
+ MEDIA_PAD_FL_MUST_CONNECT;
+
+ csi->sink_fmt = rkisp1_mbus_info_get_by_code(RKISP1_CSI_DEF_FMT);
+
+ ret = media_entity_pads_init(&sd->entity, RKISP1_CSI_PAD_NUM, pads);
+ if (ret)
+ goto error;
+
+ state.pads = csi->pad_cfg;
+ rkisp1_csi_init_config(sd, &state);
+
+ ret = v4l2_device_register_subdev(&csi->rkisp1->v4l2_dev, sd);
+ if (ret) {
+ dev_err(sd->dev, "Failed to register csi receiver subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&sd->entity);
+ mutex_destroy(&csi->lock);
+ csi->rkisp1 = NULL;
+ return ret;
+}
+
+void rkisp1_csi_unregister(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_csi *csi = &rkisp1->csi;
+
+ if (!csi->rkisp1)
+ return;
+
+ v4l2_device_unregister_subdev(&csi->sd);
+ media_entity_cleanup(&csi->sd.entity);
+ mutex_destroy(&csi->lock);
+}
+
+int rkisp1_csi_init(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_csi *csi = &rkisp1->csi;
+
+ csi->rkisp1 = rkisp1;
+
+ csi->dphy = devm_phy_get(rkisp1->dev, "dphy");
+ if (IS_ERR(csi->dphy))
+ return dev_err_probe(rkisp1->dev, PTR_ERR(csi->dphy),
+ "Couldn't get the MIPI D-PHY\n");
+
+ phy_init(csi->dphy);
+
+ return 0;
+}
+
+void rkisp1_csi_cleanup(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_csi *csi = &rkisp1->csi;
+
+ phy_exit(csi->dphy);
+}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.h
new file mode 100644
index 000000000000..1f5f2af31a7d
--- /dev/null
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Rockchip ISP1 Driver - CSI-2 Receiver
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ * Copyright (C) 2022 Ideas on Board
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+#ifndef _RKISP1_CSI_H
+#define _RKISP1_CSI_H
+
+struct rkisp1_csi;
+struct rkisp1_device;
+struct rkisp1_sensor_async;
+
+int rkisp1_csi_init(struct rkisp1_device *rkisp1);
+void rkisp1_csi_cleanup(struct rkisp1_device *rkisp1);
+
+int rkisp1_csi_register(struct rkisp1_device *rkisp1);
+void rkisp1_csi_unregister(struct rkisp1_device *rkisp1);
+
+int rkisp1_csi_link_sensor(struct rkisp1_device *rkisp1, struct v4l2_subdev *sd,
+ struct rkisp1_sensor_async *s_asd,
+ unsigned int source_pad);
+
+#endif /* _RKISP1_CSI_H */
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c
new file mode 100644
index 000000000000..71df3dc95e6f
--- /dev/null
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - Base driver
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/minmax.h>
+#include <linux/pm_runtime.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+
+#include "rkisp1-common.h"
+#include "rkisp1-regs.h"
+
+struct rkisp1_debug_register {
+ u32 reg;
+ u32 shd;
+ const char * const name;
+};
+
+#define RKISP1_DEBUG_REG(name) { RKISP1_CIF_##name, 0, #name }
+#define RKISP1_DEBUG_SHD_REG(name) { \
+ RKISP1_CIF_##name, RKISP1_CIF_##name##_SHD, #name \
+}
+
+/* Keep this up-to-date when adding new registers. */
+#define RKISP1_MAX_REG_LENGTH 21
+
+static int rkisp1_debug_dump_regs(struct rkisp1_device *rkisp1,
+ struct seq_file *m, unsigned int offset,
+ const struct rkisp1_debug_register *regs)
+{
+ const int width = RKISP1_MAX_REG_LENGTH;
+ u32 val, shd;
+ int ret;
+
+ ret = pm_runtime_get_if_in_use(rkisp1->dev);
+ if (ret <= 0)
+ return ret ? : -ENODATA;
+
+ for (; regs->name; ++regs) {
+ val = rkisp1_read(rkisp1, offset + regs->reg);
+
+ if (regs->shd) {
+ shd = rkisp1_read(rkisp1, offset + regs->shd);
+ seq_printf(m, "%*s: 0x%08x/0x%08x\n", width, regs->name,
+ val, shd);
+ } else {
+ seq_printf(m, "%*s: 0x%08x\n", width, regs->name, val);
+ }
+ }
+
+ pm_runtime_put(rkisp1->dev);
+
+ return 0;
+}
+
+static int rkisp1_debug_dump_core_regs_show(struct seq_file *m, void *p)
+{
+ static const struct rkisp1_debug_register registers[] = {
+ RKISP1_DEBUG_REG(VI_CCL),
+ RKISP1_DEBUG_REG(VI_ICCL),
+ RKISP1_DEBUG_REG(VI_IRCL),
+ RKISP1_DEBUG_REG(VI_DPCL),
+ RKISP1_DEBUG_REG(MI_CTRL),
+ RKISP1_DEBUG_REG(MI_BYTE_CNT),
+ RKISP1_DEBUG_REG(MI_CTRL_SHD),
+ RKISP1_DEBUG_REG(MI_RIS),
+ RKISP1_DEBUG_REG(MI_STATUS),
+ RKISP1_DEBUG_REG(MI_DMA_CTRL),
+ RKISP1_DEBUG_REG(MI_DMA_STATUS),
+ { /* Sentinel */ },
+ };
+ struct rkisp1_device *rkisp1 = m->private;
+
+ return rkisp1_debug_dump_regs(rkisp1, m, 0, registers);
+}
+DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_core_regs);
+
+static int rkisp1_debug_dump_isp_regs_show(struct seq_file *m, void *p)
+{
+ static const struct rkisp1_debug_register registers[] = {
+ RKISP1_DEBUG_REG(ISP_CTRL),
+ RKISP1_DEBUG_REG(ISP_ACQ_PROP),
+ RKISP1_DEBUG_REG(ISP_FLAGS_SHD),
+ RKISP1_DEBUG_REG(ISP_RIS),
+ RKISP1_DEBUG_REG(ISP_ERR),
+ { /* Sentinel */ },
+ };
+ struct rkisp1_device *rkisp1 = m->private;
+
+ return rkisp1_debug_dump_regs(rkisp1, m, 0, registers);
+}
+DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_isp_regs);
+
+static int rkisp1_debug_dump_rsz_regs_show(struct seq_file *m, void *p)
+{
+ static const struct rkisp1_debug_register registers[] = {
+ RKISP1_DEBUG_SHD_REG(RSZ_CTRL),
+ RKISP1_DEBUG_SHD_REG(RSZ_SCALE_HY),
+ RKISP1_DEBUG_SHD_REG(RSZ_SCALE_HCB),
+ RKISP1_DEBUG_SHD_REG(RSZ_SCALE_HCR),
+ RKISP1_DEBUG_SHD_REG(RSZ_SCALE_VY),
+ RKISP1_DEBUG_SHD_REG(RSZ_SCALE_VC),
+ RKISP1_DEBUG_SHD_REG(RSZ_PHASE_HY),
+ RKISP1_DEBUG_SHD_REG(RSZ_PHASE_HC),
+ RKISP1_DEBUG_SHD_REG(RSZ_PHASE_VY),
+ RKISP1_DEBUG_SHD_REG(RSZ_PHASE_VC),
+ { /* Sentinel */ },
+ };
+ struct rkisp1_resizer *rsz = m->private;
+
+ return rkisp1_debug_dump_regs(rsz->rkisp1, m, rsz->regs_base, registers);
+}
+DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_rsz_regs);
+
+static int rkisp1_debug_dump_mi_mp_show(struct seq_file *m, void *p)
+{
+ static const struct rkisp1_debug_register registers[] = {
+ RKISP1_DEBUG_REG(MI_MP_Y_BASE_AD_INIT),
+ RKISP1_DEBUG_REG(MI_MP_Y_BASE_AD_INIT2),
+ RKISP1_DEBUG_REG(MI_MP_Y_BASE_AD_SHD),
+ RKISP1_DEBUG_REG(MI_MP_Y_SIZE_INIT),
+ RKISP1_DEBUG_REG(MI_MP_Y_SIZE_INIT),
+ RKISP1_DEBUG_REG(MI_MP_Y_SIZE_SHD),
+ RKISP1_DEBUG_REG(MI_MP_Y_OFFS_CNT_SHD),
+ { /* Sentinel */ },
+ };
+ struct rkisp1_device *rkisp1 = m->private;
+
+ return rkisp1_debug_dump_regs(rkisp1, m, 0, registers);
+}
+DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_mi_mp);
+
+#define RKISP1_DEBUG_DATA_COUNT_BINS 32
+#define RKISP1_DEBUG_DATA_COUNT_STEP (4096 / RKISP1_DEBUG_DATA_COUNT_BINS)
+
+static int rkisp1_debug_input_status_show(struct seq_file *m, void *p)
+{
+ struct rkisp1_device *rkisp1 = m->private;
+ u16 data_count[RKISP1_DEBUG_DATA_COUNT_BINS] = { };
+ unsigned int hsync_count = 0;
+ unsigned int vsync_count = 0;
+ unsigned int i;
+ u32 data;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_get_if_in_use(rkisp1->dev);
+ if (ret <= 0)
+ return ret ? : -ENODATA;
+
+ /* Sample the ISP input port status 10000 times with a 1µs interval. */
+ for (i = 0; i < 10000; ++i) {
+ val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_FLAGS_SHD);
+
+ data = (val & RKISP1_CIF_ISP_FLAGS_SHD_S_DATA_MASK)
+ >> RKISP1_CIF_ISP_FLAGS_SHD_S_DATA_SHIFT;
+ data_count[data / RKISP1_DEBUG_DATA_COUNT_STEP]++;
+
+ if (val & RKISP1_CIF_ISP_FLAGS_SHD_S_HSYNC)
+ hsync_count++;
+ if (val & RKISP1_CIF_ISP_FLAGS_SHD_S_VSYNC)
+ vsync_count++;
+
+ udelay(1);
+ }
+
+ pm_runtime_put(rkisp1->dev);
+
+ seq_printf(m, "vsync: %u, hsync: %u\n", vsync_count, hsync_count);
+ seq_puts(m, "data:\n");
+ for (i = 0; i < ARRAY_SIZE(data_count); ++i)
+ seq_printf(m, "- [%04u:%04u]: %u\n",
+ i * RKISP1_DEBUG_DATA_COUNT_STEP,
+ (i + 1) * RKISP1_DEBUG_DATA_COUNT_STEP - 1,
+ data_count[i]);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_input_status);
+
+void rkisp1_debug_init(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_debug *debug = &rkisp1->debug;
+ struct dentry *regs_dir;
+
+ debug->debugfs_dir = debugfs_create_dir(dev_name(rkisp1->dev), NULL);
+
+ debugfs_create_ulong("data_loss", 0444, debug->debugfs_dir,
+ &debug->data_loss);
+ debugfs_create_ulong("outform_size_err", 0444, debug->debugfs_dir,
+ &debug->outform_size_error);
+ debugfs_create_ulong("img_stabilization_size_error", 0444,
+ debug->debugfs_dir,
+ &debug->img_stabilization_size_error);
+ debugfs_create_ulong("inform_size_error", 0444, debug->debugfs_dir,
+ &debug->inform_size_error);
+ debugfs_create_ulong("irq_delay", 0444, debug->debugfs_dir,
+ &debug->irq_delay);
+ debugfs_create_ulong("mipi_error", 0444, debug->debugfs_dir,
+ &debug->mipi_error);
+ debugfs_create_ulong("stats_error", 0444, debug->debugfs_dir,
+ &debug->stats_error);
+ debugfs_create_ulong("mp_stop_timeout", 0444, debug->debugfs_dir,
+ &debug->stop_timeout[RKISP1_MAINPATH]);
+ debugfs_create_ulong("sp_stop_timeout", 0444, debug->debugfs_dir,
+ &debug->stop_timeout[RKISP1_SELFPATH]);
+ debugfs_create_ulong("mp_frame_drop", 0444, debug->debugfs_dir,
+ &debug->frame_drop[RKISP1_MAINPATH]);
+ debugfs_create_ulong("sp_frame_drop", 0444, debug->debugfs_dir,
+ &debug->frame_drop[RKISP1_SELFPATH]);
+ debugfs_create_file("input_status", 0444, debug->debugfs_dir, rkisp1,
+ &rkisp1_debug_input_status_fops);
+
+ regs_dir = debugfs_create_dir("regs", debug->debugfs_dir);
+
+ debugfs_create_file("core", 0444, regs_dir, rkisp1,
+ &rkisp1_debug_dump_core_regs_fops);
+ debugfs_create_file("isp", 0444, regs_dir, rkisp1,
+ &rkisp1_debug_dump_isp_regs_fops);
+ debugfs_create_file("mrsz", 0444, regs_dir,
+ &rkisp1->resizer_devs[RKISP1_MAINPATH],
+ &rkisp1_debug_dump_rsz_regs_fops);
+ debugfs_create_file("srsz", 0444, regs_dir,
+ &rkisp1->resizer_devs[RKISP1_SELFPATH],
+ &rkisp1_debug_dump_rsz_regs_fops);
+
+ debugfs_create_file("mi_mp", 0444, regs_dir, rkisp1,
+ &rkisp1_debug_dump_mi_mp_fops);
+}
+
+void rkisp1_debug_cleanup(struct rkisp1_device *rkisp1)
+{
+ debugfs_remove_recursive(rkisp1->debug.debugfs_dir);
+}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 3f5cfa7eb937..f2475c6235ea 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -9,18 +9,18 @@
*/
#include <linux/clk.h>
-#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/phy/phy.h>
-#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/pm_runtime.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
#include "rkisp1-common.h"
+#include "rkisp1-csi.h"
/*
* ISP Details
@@ -68,18 +68,28 @@
*
* Media Topology
* --------------
- * +----------+ +----------+
- * | Sensor 2 | | Sensor X |
- * ------------ ... ------------
- * | 0 | | 0 |
- * +----------+ +----------+ +-----------+
- * \ | | params |
- * \ | | (output) |
- * +----------+ \ | +-----------+
- * | Sensor 1 | v v |
- * ------------ +------+------+ |
- * | 0 |----->| 0 | 1 |<---------+
- * +----------+ |------+------|
+ *
+ * +----------+ +----------+
+ * | Sensor 1 | | Sensor X |
+ * ------------ ... ------------
+ * | 0 | | 0 |
+ * +----------+ +----------+
+ * | |
+ * \----\ /----/
+ * | |
+ * v v
+ * +-------------+
+ * | 0 |
+ * ---------------
+ * | CSI-2 RX |
+ * --------------- +-----------+
+ * | 1 | | params |
+ * +-------------+ | (output) |
+ * | +-----------+
+ * v |
+ * +------+------+ |
+ * | 0 | 1 |<---------+
+ * |------+------|
* | ISP |
* |------+------|
* +-------------| 2 | 3 |----------+
@@ -106,88 +116,10 @@ struct rkisp1_isr_data {
irqreturn_t (*isr)(int irq, void *ctx);
};
-struct rkisp1_match_data {
- const char * const *clks;
- unsigned int clk_size;
- const struct rkisp1_isr_data *isrs;
- unsigned int isr_size;
- enum rkisp1_cif_isp_version isp_ver;
-};
-
/* ----------------------------------------------------------------------------
* Sensor DT bindings
*/
-static int rkisp1_create_links(struct rkisp1_device *rkisp1)
-{
- struct media_entity *source, *sink;
- unsigned int flags, source_pad;
- struct v4l2_subdev *sd;
- unsigned int i;
- int ret;
-
- /* sensor links */
- flags = MEDIA_LNK_FL_ENABLED;
- list_for_each_entry(sd, &rkisp1->v4l2_dev.subdevs, list) {
- if (sd == &rkisp1->isp.sd ||
- sd == &rkisp1->resizer_devs[RKISP1_MAINPATH].sd ||
- sd == &rkisp1->resizer_devs[RKISP1_SELFPATH].sd)
- continue;
-
- ret = media_entity_get_fwnode_pad(&sd->entity, sd->fwnode,
- MEDIA_PAD_FL_SOURCE);
- if (ret < 0) {
- dev_err(rkisp1->dev, "failed to find src pad for %s\n",
- sd->name);
- return ret;
- }
- source_pad = ret;
-
- ret = media_create_pad_link(&sd->entity, source_pad,
- &rkisp1->isp.sd.entity,
- RKISP1_ISP_PAD_SINK_VIDEO,
- flags);
- if (ret)
- return ret;
-
- flags = 0;
- }
-
- flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
-
- /* create ISP->RSZ->CAP links */
- for (i = 0; i < 2; i++) {
- source = &rkisp1->isp.sd.entity;
- sink = &rkisp1->resizer_devs[i].sd.entity;
- ret = media_create_pad_link(source, RKISP1_ISP_PAD_SOURCE_VIDEO,
- sink, RKISP1_RSZ_PAD_SINK,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
- return ret;
-
- source = sink;
- sink = &rkisp1->capture_devs[i].vnode.vdev.entity;
- ret = media_create_pad_link(source, RKISP1_RSZ_PAD_SRC,
- sink, 0, flags);
- if (ret)
- return ret;
- }
-
- /* params links */
- source = &rkisp1->params.vnode.vdev.entity;
- sink = &rkisp1->isp.sd.entity;
- ret = media_create_pad_link(source, 0, sink,
- RKISP1_ISP_PAD_SINK_PARAMS, flags);
- if (ret)
- return ret;
-
- /* 3A stats links */
- source = &rkisp1->isp.sd.entity;
- sink = &rkisp1->stats.vnode.vdev.entity;
- return media_create_pad_link(source, RKISP1_ISP_PAD_SOURCE_STATS,
- sink, 0, flags);
-}
-
static int rkisp1_subdev_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
@@ -196,116 +128,171 @@ static int rkisp1_subdev_notifier_bound(struct v4l2_async_notifier *notifier,
container_of(notifier, struct rkisp1_device, notifier);
struct rkisp1_sensor_async *s_asd =
container_of(asd, struct rkisp1_sensor_async, asd);
+ int source_pad;
+ int ret;
- s_asd->pixel_rate_ctrl = v4l2_ctrl_find(sd->ctrl_handler,
- V4L2_CID_PIXEL_RATE);
s_asd->sd = sd;
- s_asd->dphy = devm_phy_get(rkisp1->dev, "dphy");
- if (IS_ERR(s_asd->dphy)) {
- if (PTR_ERR(s_asd->dphy) != -EPROBE_DEFER)
- dev_err(rkisp1->dev, "Couldn't get the MIPI D-PHY\n");
- return PTR_ERR(s_asd->dphy);
+
+ source_pad = media_entity_get_fwnode_pad(&sd->entity, s_asd->source_ep,
+ MEDIA_PAD_FL_SOURCE);
+ if (source_pad < 0) {
+ dev_err(rkisp1->dev, "failed to find source pad for %s\n",
+ sd->name);
+ return source_pad;
}
- phy_init(s_asd->dphy);
+ if (s_asd->port == 0)
+ return rkisp1_csi_link_sensor(rkisp1, sd, s_asd, source_pad);
- return 0;
-}
-
-static void rkisp1_subdev_notifier_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
-{
- struct rkisp1_sensor_async *s_asd =
- container_of(asd, struct rkisp1_sensor_async, asd);
+ ret = media_create_pad_link(&sd->entity, source_pad,
+ &rkisp1->isp.sd.entity,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ !s_asd->index ? MEDIA_LNK_FL_ENABLED : 0);
+ if (ret) {
+ dev_err(rkisp1->dev, "failed to link source pad of %s\n",
+ sd->name);
+ return ret;
+ }
- phy_exit(s_asd->dphy);
+ return 0;
}
static int rkisp1_subdev_notifier_complete(struct v4l2_async_notifier *notifier)
{
struct rkisp1_device *rkisp1 =
container_of(notifier, struct rkisp1_device, notifier);
- int ret;
- ret = rkisp1_create_links(rkisp1);
- if (ret)
- return ret;
-
- ret = v4l2_device_register_subdev_nodes(&rkisp1->v4l2_dev);
- if (ret)
- return ret;
+ return v4l2_device_register_subdev_nodes(&rkisp1->v4l2_dev);
+}
- dev_dbg(rkisp1->dev, "Async subdev notifier completed\n");
+static void rkisp1_subdev_notifier_destroy(struct v4l2_async_subdev *asd)
+{
+ struct rkisp1_sensor_async *rk_asd =
+ container_of(asd, struct rkisp1_sensor_async, asd);
- return 0;
+ fwnode_handle_put(rk_asd->source_ep);
}
static const struct v4l2_async_notifier_operations rkisp1_subdev_notifier_ops = {
.bound = rkisp1_subdev_notifier_bound,
- .unbind = rkisp1_subdev_notifier_unbind,
.complete = rkisp1_subdev_notifier_complete,
+ .destroy = rkisp1_subdev_notifier_destroy,
};
-static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
+static int rkisp1_subdev_notifier_register(struct rkisp1_device *rkisp1)
{
struct v4l2_async_notifier *ntf = &rkisp1->notifier;
- unsigned int next_id = 0;
- int ret;
+ struct fwnode_handle *fwnode = dev_fwnode(rkisp1->dev);
+ struct fwnode_handle *ep;
+ unsigned int index = 0;
+ int ret = 0;
v4l2_async_nf_init(ntf);
- while (1) {
- struct v4l2_fwnode_endpoint vep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY
- };
+ ntf->ops = &rkisp1_subdev_notifier_ops;
+
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
+ struct fwnode_handle *port;
+ struct v4l2_fwnode_endpoint vep = { };
struct rkisp1_sensor_async *rk_asd;
- struct fwnode_handle *ep;
+ struct fwnode_handle *source;
+ u32 reg = 0;
+
+ /* Select the bus type based on the port. */
+ port = fwnode_get_parent(ep);
+ fwnode_property_read_u32(port, "reg", &reg);
+ fwnode_handle_put(port);
+
+ switch (reg) {
+ case 0:
+ /* MIPI CSI-2 port */
+ if (!(rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)) {
+ dev_err(rkisp1->dev,
+ "internal CSI must be available for port 0\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ vep.bus_type = V4L2_MBUS_CSI2_DPHY;
+ break;
- ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(rkisp1->dev),
- 0, next_id,
- FWNODE_GRAPH_ENDPOINT_NEXT);
- if (!ep)
+ case 1:
+ /*
+ * Parallel port. The bus-type property in DT is
+ * mandatory for port 1, it will be used to determine if
+ * it's PARALLEL or BT656.
+ */
+ vep.bus_type = V4L2_MBUS_UNKNOWN;
break;
+ }
+ /* Parse the endpoint and validate the bus type. */
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- if (ret)
- goto err_parse;
+ if (ret) {
+ dev_err(rkisp1->dev, "failed to parse endpoint %pfw\n",
+ ep);
+ break;
+ }
- rk_asd = v4l2_async_nf_add_fwnode_remote(ntf, ep,
- struct
- rkisp1_sensor_async);
+ if (vep.base.port == 1) {
+ if (vep.bus_type != V4L2_MBUS_PARALLEL &&
+ vep.bus_type != V4L2_MBUS_BT656) {
+ dev_err(rkisp1->dev,
+ "port 1 must be parallel or BT656\n");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ /* Add the async subdev to the notifier. */
+ source = fwnode_graph_get_remote_endpoint(ep);
+ if (!source) {
+ dev_err(rkisp1->dev,
+ "endpoint %pfw has no remote endpoint\n",
+ ep);
+ ret = -ENODEV;
+ break;
+ }
+
+ rk_asd = v4l2_async_nf_add_fwnode(ntf, source,
+ struct rkisp1_sensor_async);
if (IS_ERR(rk_asd)) {
+ fwnode_handle_put(source);
ret = PTR_ERR(rk_asd);
- goto err_parse;
+ break;
}
+ rk_asd->index = index++;
+ rk_asd->source_ep = source;
rk_asd->mbus_type = vep.bus_type;
- rk_asd->mbus_flags = vep.bus.mipi_csi2.flags;
- rk_asd->lanes = vep.bus.mipi_csi2.num_data_lanes;
-
- dev_dbg(rkisp1->dev, "registered ep id %d with %d lanes\n",
- vep.base.id, rk_asd->lanes);
+ rk_asd->port = vep.base.port;
- next_id = vep.base.id + 1;
+ if (vep.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ rk_asd->mbus_flags = vep.bus.mipi_csi2.flags;
+ rk_asd->lanes = vep.bus.mipi_csi2.num_data_lanes;
+ } else {
+ rk_asd->mbus_flags = vep.bus.parallel.flags;
+ }
- fwnode_handle_put(ep);
+ dev_dbg(rkisp1->dev, "registered ep id %d, bus type %u, %u lanes\n",
+ vep.base.id, rk_asd->mbus_type, rk_asd->lanes);
+ }
- continue;
-err_parse:
+ if (ret) {
fwnode_handle_put(ep);
v4l2_async_nf_cleanup(ntf);
return ret;
}
- if (next_id == 0)
+ if (!index)
dev_dbg(rkisp1->dev, "no remote subdevice found\n");
- ntf->ops = &rkisp1_subdev_notifier_ops;
+
ret = v4l2_async_nf_register(&rkisp1->v4l2_dev, ntf);
if (ret) {
v4l2_async_nf_cleanup(ntf);
return ret;
}
+
return 0;
}
@@ -346,48 +333,110 @@ static const struct dev_pm_ops rkisp1_pm_ops = {
* Core
*/
+static int rkisp1_create_links(struct rkisp1_device *rkisp1)
+{
+ unsigned int i;
+ int ret;
+
+ if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2) {
+ /* Link the CSI receiver to the ISP. */
+ ret = media_create_pad_link(&rkisp1->csi.sd.entity,
+ RKISP1_CSI_PAD_SRC,
+ &rkisp1->isp.sd.entity,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ }
+
+ /* create ISP->RSZ->CAP links */
+ for (i = 0; i < 2; i++) {
+ struct media_entity *resizer =
+ &rkisp1->resizer_devs[i].sd.entity;
+ struct media_entity *capture =
+ &rkisp1->capture_devs[i].vnode.vdev.entity;
+
+ ret = media_create_pad_link(&rkisp1->isp.sd.entity,
+ RKISP1_ISP_PAD_SOURCE_VIDEO,
+ resizer, RKISP1_RSZ_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ ret = media_create_pad_link(resizer, RKISP1_RSZ_PAD_SRC,
+ capture, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret)
+ return ret;
+ }
+
+ /* params links */
+ ret = media_create_pad_link(&rkisp1->params.vnode.vdev.entity, 0,
+ &rkisp1->isp.sd.entity,
+ RKISP1_ISP_PAD_SINK_PARAMS,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret)
+ return ret;
+
+ /* 3A stats links */
+ return media_create_pad_link(&rkisp1->isp.sd.entity,
+ RKISP1_ISP_PAD_SOURCE_STATS,
+ &rkisp1->stats.vnode.vdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void rkisp1_entities_unregister(struct rkisp1_device *rkisp1)
+{
+ if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)
+ rkisp1_csi_unregister(rkisp1);
+ rkisp1_params_unregister(rkisp1);
+ rkisp1_stats_unregister(rkisp1);
+ rkisp1_capture_devs_unregister(rkisp1);
+ rkisp1_resizer_devs_unregister(rkisp1);
+ rkisp1_isp_unregister(rkisp1);
+}
+
static int rkisp1_entities_register(struct rkisp1_device *rkisp1)
{
int ret;
ret = rkisp1_isp_register(rkisp1);
if (ret)
- return ret;
+ goto error;
ret = rkisp1_resizer_devs_register(rkisp1);
if (ret)
- goto err_unreg_isp_subdev;
+ goto error;
ret = rkisp1_capture_devs_register(rkisp1);
if (ret)
- goto err_unreg_resizer_devs;
+ goto error;
ret = rkisp1_stats_register(rkisp1);
if (ret)
- goto err_unreg_capture_devs;
+ goto error;
ret = rkisp1_params_register(rkisp1);
if (ret)
- goto err_unreg_stats;
+ goto error;
- ret = rkisp1_subdev_notifier(rkisp1);
- if (ret) {
- dev_err(rkisp1->dev,
- "Failed to register subdev notifier(%d)\n", ret);
- goto err_unreg_params;
+ if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2) {
+ ret = rkisp1_csi_register(rkisp1);
+ if (ret)
+ goto error;
}
+ ret = rkisp1_create_links(rkisp1);
+ if (ret)
+ goto error;
+
return 0;
-err_unreg_params:
- rkisp1_params_unregister(rkisp1);
-err_unreg_stats:
- rkisp1_stats_unregister(rkisp1);
-err_unreg_capture_devs:
- rkisp1_capture_devs_unregister(rkisp1);
-err_unreg_resizer_devs:
- rkisp1_resizer_devs_unregister(rkisp1);
-err_unreg_isp_subdev:
- rkisp1_isp_unregister(rkisp1);
+
+error:
+ rkisp1_entities_unregister(rkisp1);
return ret;
}
@@ -401,7 +450,7 @@ static irqreturn_t rkisp1_isr(int irq, void *ctx)
*/
rkisp1_capture_isr(irq, ctx);
rkisp1_isp_isr(irq, ctx);
- rkisp1_mipi_isr(irq, ctx);
+ rkisp1_csi_isr(irq, ctx);
return IRQ_HANDLED;
}
@@ -416,15 +465,16 @@ static const char * const px30_isp_clks[] = {
static const struct rkisp1_isr_data px30_isp_isrs[] = {
{ "isp", rkisp1_isp_isr },
{ "mi", rkisp1_capture_isr },
- { "mipi", rkisp1_mipi_isr },
+ { "mipi", rkisp1_csi_isr },
};
-static const struct rkisp1_match_data px30_isp_match_data = {
+static const struct rkisp1_info px30_isp_info = {
.clks = px30_isp_clks,
.clk_size = ARRAY_SIZE(px30_isp_clks),
.isrs = px30_isp_isrs,
.isr_size = ARRAY_SIZE(px30_isp_isrs),
.isp_ver = RKISP1_V12,
+ .features = RKISP1_FEATURE_MIPI_CSI2,
};
static const char * const rk3399_isp_clks[] = {
@@ -437,74 +487,45 @@ static const struct rkisp1_isr_data rk3399_isp_isrs[] = {
{ NULL, rkisp1_isr },
};
-static const struct rkisp1_match_data rk3399_isp_match_data = {
+static const struct rkisp1_info rk3399_isp_info = {
.clks = rk3399_isp_clks,
.clk_size = ARRAY_SIZE(rk3399_isp_clks),
.isrs = rk3399_isp_isrs,
.isr_size = ARRAY_SIZE(rk3399_isp_isrs),
.isp_ver = RKISP1_V10,
+ .features = RKISP1_FEATURE_MIPI_CSI2,
};
static const struct of_device_id rkisp1_of_match[] = {
{
.compatible = "rockchip,px30-cif-isp",
- .data = &px30_isp_match_data,
+ .data = &px30_isp_info,
},
{
.compatible = "rockchip,rk3399-cif-isp",
- .data = &rk3399_isp_match_data,
+ .data = &rk3399_isp_info,
},
{},
};
MODULE_DEVICE_TABLE(of, rkisp1_of_match);
-static void rkisp1_debug_init(struct rkisp1_device *rkisp1)
-{
- struct rkisp1_debug *debug = &rkisp1->debug;
-
- debug->debugfs_dir = debugfs_create_dir(dev_name(rkisp1->dev), NULL);
- debugfs_create_ulong("data_loss", 0444, debug->debugfs_dir,
- &debug->data_loss);
- debugfs_create_ulong("outform_size_err", 0444, debug->debugfs_dir,
- &debug->outform_size_error);
- debugfs_create_ulong("img_stabilization_size_error", 0444,
- debug->debugfs_dir,
- &debug->img_stabilization_size_error);
- debugfs_create_ulong("inform_size_error", 0444, debug->debugfs_dir,
- &debug->inform_size_error);
- debugfs_create_ulong("irq_delay", 0444, debug->debugfs_dir,
- &debug->irq_delay);
- debugfs_create_ulong("mipi_error", 0444, debug->debugfs_dir,
- &debug->mipi_error);
- debugfs_create_ulong("stats_error", 0444, debug->debugfs_dir,
- &debug->stats_error);
- debugfs_create_ulong("mp_stop_timeout", 0444, debug->debugfs_dir,
- &debug->stop_timeout[RKISP1_MAINPATH]);
- debugfs_create_ulong("sp_stop_timeout", 0444, debug->debugfs_dir,
- &debug->stop_timeout[RKISP1_SELFPATH]);
- debugfs_create_ulong("mp_frame_drop", 0444, debug->debugfs_dir,
- &debug->frame_drop[RKISP1_MAINPATH]);
- debugfs_create_ulong("sp_frame_drop", 0444, debug->debugfs_dir,
- &debug->frame_drop[RKISP1_SELFPATH]);
-}
-
static int rkisp1_probe(struct platform_device *pdev)
{
- const struct rkisp1_match_data *match_data;
+ const struct rkisp1_info *info;
struct device *dev = &pdev->dev;
struct rkisp1_device *rkisp1;
struct v4l2_device *v4l2_dev;
unsigned int i;
int ret, irq;
-
- match_data = of_device_get_match_data(&pdev->dev);
- if (!match_data)
- return -ENODEV;
+ u32 cif_id;
rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
if (!rkisp1)
return -ENOMEM;
+ info = of_device_get_match_data(dev);
+ rkisp1->info = info;
+
dev_set_drvdata(dev, rkisp1);
rkisp1->dev = dev;
@@ -514,14 +535,14 @@ static int rkisp1_probe(struct platform_device *pdev)
if (IS_ERR(rkisp1->base_addr))
return PTR_ERR(rkisp1->base_addr);
- for (i = 0; i < match_data->isr_size; i++) {
- irq = (match_data->isrs[i].name) ?
- platform_get_irq_byname(pdev, match_data->isrs[i].name) :
- platform_get_irq(pdev, i);
+ for (i = 0; i < info->isr_size; i++) {
+ irq = info->isrs[i].name
+ ? platform_get_irq_byname(pdev, info->isrs[i].name)
+ : platform_get_irq(pdev, i);
if (irq < 0)
return irq;
- ret = devm_request_irq(dev, irq, match_data->isrs[i].isr, IRQF_SHARED,
+ ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED,
dev_driver_string(dev), dev);
if (ret) {
dev_err(dev, "request irq failed: %d\n", ret);
@@ -529,16 +550,25 @@ static int rkisp1_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < match_data->clk_size; i++)
- rkisp1->clks[i].id = match_data->clks[i];
- ret = devm_clk_bulk_get(dev, match_data->clk_size, rkisp1->clks);
+ for (i = 0; i < info->clk_size; i++)
+ rkisp1->clks[i].id = info->clks[i];
+ ret = devm_clk_bulk_get(dev, info->clk_size, rkisp1->clks);
if (ret)
return ret;
- rkisp1->clk_size = match_data->clk_size;
+ rkisp1->clk_size = info->clk_size;
pm_runtime_enable(&pdev->dev);
- rkisp1->media_dev.hw_revision = match_data->isp_ver;
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ goto err_pm_runtime_disable;
+
+ cif_id = rkisp1_read(rkisp1, RKISP1_CIF_VI_ID);
+ dev_dbg(rkisp1->dev, "CIF_ID 0x%08x\n", cif_id);
+
+ pm_runtime_put(&pdev->dev);
+
+ rkisp1->media_dev.hw_revision = info->isp_ver;
strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
sizeof(rkisp1->media_dev.model));
rkisp1->media_dev.dev = &pdev->dev;
@@ -552,7 +582,7 @@ static int rkisp1_probe(struct platform_device *pdev)
ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev);
if (ret)
- return ret;
+ goto err_pm_runtime_disable;
ret = media_device_register(&rkisp1->media_dev);
if (ret) {
@@ -560,18 +590,34 @@ static int rkisp1_probe(struct platform_device *pdev)
goto err_unreg_v4l2_dev;
}
+ if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2) {
+ ret = rkisp1_csi_init(rkisp1);
+ if (ret)
+ goto err_unreg_media_dev;
+ }
+
ret = rkisp1_entities_register(rkisp1);
if (ret)
- goto err_unreg_media_dev;
+ goto err_cleanup_csi;
+
+ ret = rkisp1_subdev_notifier_register(rkisp1);
+ if (ret)
+ goto err_unreg_entities;
rkisp1_debug_init(rkisp1);
return 0;
+err_unreg_entities:
+ rkisp1_entities_unregister(rkisp1);
+err_cleanup_csi:
+ if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)
+ rkisp1_csi_cleanup(rkisp1);
err_unreg_media_dev:
media_device_unregister(&rkisp1->media_dev);
err_unreg_v4l2_dev:
v4l2_device_unregister(&rkisp1->v4l2_dev);
+err_pm_runtime_disable:
pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -583,18 +629,16 @@ static int rkisp1_remove(struct platform_device *pdev)
v4l2_async_nf_unregister(&rkisp1->notifier);
v4l2_async_nf_cleanup(&rkisp1->notifier);
- rkisp1_params_unregister(rkisp1);
- rkisp1_stats_unregister(rkisp1);
- rkisp1_capture_devs_unregister(rkisp1);
- rkisp1_resizer_devs_unregister(rkisp1);
- rkisp1_isp_unregister(rkisp1);
+ rkisp1_entities_unregister(rkisp1);
+ if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)
+ rkisp1_csi_cleanup(rkisp1);
+ rkisp1_debug_cleanup(rkisp1);
media_device_unregister(&rkisp1->media_dev);
v4l2_device_unregister(&rkisp1->v4l2_dev);
pm_runtime_disable(&pdev->dev);
- debugfs_remove_recursive(rkisp1->debug.debugfs_dir);
return 0;
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index 4415c7248c2f..383a3ec83ca9 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -9,13 +9,10 @@
*/
#include <linux/iopoll.h>
-#include <linux/phy/phy.h>
-#include <linux/phy/phy-mipi-dphy.h>
#include <linux/pm_runtime.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
-#include <media/mipi-csi2.h>
#include <media/v4l2-event.h>
#include "rkisp1-common.h"
@@ -56,158 +53,10 @@
* +---------------------------------------------------------+
*/
-static const struct rkisp1_isp_mbus_info rkisp1_isp_formats[] = {
- {
- .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
- .pixel_enc = V4L2_PIXEL_ENC_YUV,
- .direction = RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW10,
- .bayer_pat = RKISP1_RAW_RGGB,
- .bus_width = 10,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW10,
- .bayer_pat = RKISP1_RAW_BGGR,
- .bus_width = 10,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW10,
- .bayer_pat = RKISP1_RAW_GBRG,
- .bus_width = 10,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW10,
- .bayer_pat = RKISP1_RAW_GRBG,
- .bus_width = 10,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW12,
- .bayer_pat = RKISP1_RAW_RGGB,
- .bus_width = 12,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW12,
- .bayer_pat = RKISP1_RAW_BGGR,
- .bus_width = 12,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW12,
- .bayer_pat = RKISP1_RAW_GBRG,
- .bus_width = 12,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW12,
- .bayer_pat = RKISP1_RAW_GRBG,
- .bus_width = 12,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW8,
- .bayer_pat = RKISP1_RAW_RGGB,
- .bus_width = 8,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW8,
- .bayer_pat = RKISP1_RAW_BGGR,
- .bus_width = 8,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW8,
- .bayer_pat = RKISP1_RAW_GBRG,
- .bus_width = 8,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .pixel_enc = V4L2_PIXEL_ENC_BAYER,
- .mipi_dt = MIPI_CSI2_DT_RAW8,
- .bayer_pat = RKISP1_RAW_GRBG,
- .bus_width = 8,
- .direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
- }, {
- .mbus_code = MEDIA_BUS_FMT_YUYV8_1X16,
- .pixel_enc = V4L2_PIXEL_ENC_YUV,
- .mipi_dt = MIPI_CSI2_DT_YUV422_8B,
- .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCBYCR,
- .bus_width = 16,
- .direction = RKISP1_ISP_SD_SINK,
- }, {
- .mbus_code = MEDIA_BUS_FMT_YVYU8_1X16,
- .pixel_enc = V4L2_PIXEL_ENC_YUV,
- .mipi_dt = MIPI_CSI2_DT_YUV422_8B,
- .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCRYCB,
- .bus_width = 16,
- .direction = RKISP1_ISP_SD_SINK,
- }, {
- .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
- .pixel_enc = V4L2_PIXEL_ENC_YUV,
- .mipi_dt = MIPI_CSI2_DT_YUV422_8B,
- .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CBYCRY,
- .bus_width = 16,
- .direction = RKISP1_ISP_SD_SINK,
- }, {
- .mbus_code = MEDIA_BUS_FMT_VYUY8_1X16,
- .pixel_enc = V4L2_PIXEL_ENC_YUV,
- .mipi_dt = MIPI_CSI2_DT_YUV422_8B,
- .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CRYCBY,
- .bus_width = 16,
- .direction = RKISP1_ISP_SD_SINK,
- },
-};
-
/* ----------------------------------------------------------------------------
* Helpers
*/
-const struct rkisp1_isp_mbus_info *rkisp1_isp_mbus_info_get(u32 mbus_code)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(rkisp1_isp_formats); i++) {
- const struct rkisp1_isp_mbus_info *fmt = &rkisp1_isp_formats[i];
-
- if (fmt->mbus_code == mbus_code)
- return fmt;
- }
-
- return NULL;
-}
-
-static struct v4l2_subdev *rkisp1_get_remote_sensor(struct v4l2_subdev *sd)
-{
- struct media_pad *local, *remote;
- struct media_entity *sensor_me;
-
- local = &sd->entity.pads[RKISP1_ISP_PAD_SINK_VIDEO];
- remote = media_entity_remote_pad(local);
- if (!remote)
- return NULL;
-
- sensor_me = remote->entity;
- return media_entity_to_v4l2_subdev(sensor_me);
-}
-
static struct v4l2_mbus_framefmt *
rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
@@ -215,7 +64,8 @@ rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp,
{
struct v4l2_subdev_state state = {
.pads = isp->pad_cfg
- };
+ };
+
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&isp->sd, sd_state, pad);
else
@@ -229,7 +79,8 @@ rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
{
struct v4l2_subdev_state state = {
.pads = isp->pad_cfg
- };
+ };
+
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_crop(&isp->sd, sd_state, pad);
else
@@ -245,73 +96,73 @@ rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
* This should only be called when configuring CIF
* or at the frame end interrupt
*/
-static void rkisp1_config_ism(struct rkisp1_device *rkisp1)
+static void rkisp1_config_ism(struct rkisp1_isp *isp)
{
- struct v4l2_rect *src_crop =
- rkisp1_isp_get_pad_crop(&rkisp1->isp, NULL,
+ const struct v4l2_rect *src_crop =
+ rkisp1_isp_get_pad_crop(isp, NULL,
RKISP1_ISP_PAD_SOURCE_VIDEO,
V4L2_SUBDEV_FORMAT_ACTIVE);
+ struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 val;
- rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_RECENTER);
- rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_MAX_DX);
- rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_MAX_DY);
- rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_DISPLACE);
- rkisp1_write(rkisp1, src_crop->left, RKISP1_CIF_ISP_IS_H_OFFS);
- rkisp1_write(rkisp1, src_crop->top, RKISP1_CIF_ISP_IS_V_OFFS);
- rkisp1_write(rkisp1, src_crop->width, RKISP1_CIF_ISP_IS_H_SIZE);
- rkisp1_write(rkisp1, src_crop->height, RKISP1_CIF_ISP_IS_V_SIZE);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_RECENTER, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_MAX_DX, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_MAX_DY, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_DISPLACE, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_H_OFFS, src_crop->left);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_V_OFFS, src_crop->top);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_H_SIZE, src_crop->width);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_V_SIZE, src_crop->height);
/* IS(Image Stabilization) is always on, working as output crop */
- rkisp1_write(rkisp1, 1, RKISP1_CIF_ISP_IS_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_CTRL, 1);
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
val |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD;
- rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
}
/*
* configure ISP blocks with input format, size......
*/
-static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
-{
- u32 isp_ctrl = 0, irq_mask = 0, acq_mult = 0, signal = 0;
- const struct rkisp1_isp_mbus_info *src_fmt, *sink_fmt;
- struct rkisp1_sensor_async *sensor;
- struct v4l2_mbus_framefmt *sink_frm;
- struct v4l2_rect *sink_crop;
+static int rkisp1_config_isp(struct rkisp1_isp *isp,
+ enum v4l2_mbus_type mbus_type, u32 mbus_flags)
+{
+ struct rkisp1_device *rkisp1 = isp->rkisp1;
+ u32 isp_ctrl = 0, irq_mask = 0, acq_mult = 0, acq_prop = 0;
+ const struct rkisp1_mbus_info *sink_fmt = isp->sink_fmt;
+ const struct rkisp1_mbus_info *src_fmt = isp->src_fmt;
+ const struct v4l2_mbus_framefmt *sink_frm;
+ const struct v4l2_rect *sink_crop;
- sensor = rkisp1->active_sensor;
- sink_fmt = rkisp1->isp.sink_fmt;
- src_fmt = rkisp1->isp.src_fmt;
- sink_frm = rkisp1_isp_get_pad_fmt(&rkisp1->isp, NULL,
+ sink_frm = rkisp1_isp_get_pad_fmt(isp, NULL,
RKISP1_ISP_PAD_SINK_VIDEO,
V4L2_SUBDEV_FORMAT_ACTIVE);
- sink_crop = rkisp1_isp_get_pad_crop(&rkisp1->isp, NULL,
+ sink_crop = rkisp1_isp_get_pad_crop(isp, NULL,
RKISP1_ISP_PAD_SINK_VIDEO,
V4L2_SUBDEV_FORMAT_ACTIVE);
if (sink_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
acq_mult = 1;
if (src_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
- if (sensor->mbus_type == V4L2_MBUS_BT656)
+ if (mbus_type == V4L2_MBUS_BT656)
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT_ITU656;
else
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT;
} else {
- rkisp1_write(rkisp1, RKISP1_CIF_ISP_DEMOSAIC_TH(0xc),
- RKISP1_CIF_ISP_DEMOSAIC);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_DEMOSAIC,
+ RKISP1_CIF_ISP_DEMOSAIC_TH(0xc));
- if (sensor->mbus_type == V4L2_MBUS_BT656)
+ if (mbus_type == V4L2_MBUS_BT656)
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU656;
else
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU601;
}
} else if (sink_fmt->pixel_enc == V4L2_PIXEL_ENC_YUV) {
acq_mult = 2;
- if (sensor->mbus_type == V4L2_MBUS_CSI2_DPHY) {
+ if (mbus_type == V4L2_MBUS_CSI2_DPHY) {
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
} else {
- if (sensor->mbus_type == V4L2_MBUS_BT656)
+ if (mbus_type == V4L2_MBUS_BT656)
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU656;
else
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
@@ -321,50 +172,65 @@ static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
}
/* Set up input acquisition properties */
- if (sensor->mbus_type == V4L2_MBUS_BT656 ||
- sensor->mbus_type == V4L2_MBUS_PARALLEL) {
- if (sensor->mbus_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
- signal = RKISP1_CIF_ISP_ACQ_PROP_POS_EDGE;
+ if (mbus_type == V4L2_MBUS_BT656 || mbus_type == V4L2_MBUS_PARALLEL) {
+ if (mbus_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_POS_EDGE;
+
+ switch (sink_fmt->bus_width) {
+ case 8:
+ acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_ZERO;
+ break;
+ case 10:
+ acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_ZERO;
+ break;
+ case 12:
+ acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_12B;
+ break;
+ default:
+ dev_err(rkisp1->dev, "Invalid bus width %u\n",
+ sink_fmt->bus_width);
+ return -EINVAL;
+ }
}
- if (sensor->mbus_type == V4L2_MBUS_PARALLEL) {
- if (sensor->mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- signal |= RKISP1_CIF_ISP_ACQ_PROP_VSYNC_LOW;
+ if (mbus_type == V4L2_MBUS_PARALLEL) {
+ if (mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_VSYNC_LOW;
- if (sensor->mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- signal |= RKISP1_CIF_ISP_ACQ_PROP_HSYNC_LOW;
+ if (mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_HSYNC_LOW;
}
- rkisp1_write(rkisp1, isp_ctrl, RKISP1_CIF_ISP_CTRL);
- rkisp1_write(rkisp1, signal | sink_fmt->yuv_seq |
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, isp_ctrl);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_PROP,
+ acq_prop | sink_fmt->yuv_seq |
RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT(sink_fmt->bayer_pat) |
- RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ALL,
- RKISP1_CIF_ISP_ACQ_PROP);
- rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_ACQ_NR_FRAMES);
+ RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ALL);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_NR_FRAMES, 0);
/* Acquisition Size */
- rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_ACQ_H_OFFS);
- rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_ACQ_V_OFFS);
- rkisp1_write(rkisp1,
- acq_mult * sink_frm->width, RKISP1_CIF_ISP_ACQ_H_SIZE);
- rkisp1_write(rkisp1, sink_frm->height, RKISP1_CIF_ISP_ACQ_V_SIZE);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_H_OFFS, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_V_OFFS, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_H_SIZE,
+ acq_mult * sink_frm->width);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_V_SIZE, sink_frm->height);
/* ISP Out Area */
- rkisp1_write(rkisp1, sink_crop->left, RKISP1_CIF_ISP_OUT_H_OFFS);
- rkisp1_write(rkisp1, sink_crop->top, RKISP1_CIF_ISP_OUT_V_OFFS);
- rkisp1_write(rkisp1, sink_crop->width, RKISP1_CIF_ISP_OUT_H_SIZE);
- rkisp1_write(rkisp1, sink_crop->height, RKISP1_CIF_ISP_OUT_V_SIZE);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_OUT_H_OFFS, sink_crop->left);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_OUT_V_OFFS, sink_crop->top);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_OUT_H_SIZE, sink_crop->width);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_OUT_V_SIZE, sink_crop->height);
irq_mask |= RKISP1_CIF_ISP_FRAME | RKISP1_CIF_ISP_V_START |
RKISP1_CIF_ISP_PIC_SIZE_ERROR;
- rkisp1_write(rkisp1, irq_mask, RKISP1_CIF_ISP_IMSC);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IMSC, irq_mask);
if (src_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
rkisp1_params_disable(&rkisp1->params);
} else {
struct v4l2_mbus_framefmt *src_frm;
- src_frm = rkisp1_isp_get_pad_fmt(&rkisp1->isp, NULL,
+ src_frm = rkisp1_isp_get_pad_fmt(isp, NULL,
RKISP1_ISP_PAD_SINK_VIDEO,
V4L2_SUBDEV_FORMAT_ACTIVE);
rkisp1_params_configure(&rkisp1->params, sink_fmt->bayer_pat,
@@ -374,213 +240,117 @@ static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
return 0;
}
-static int rkisp1_config_dvp(struct rkisp1_device *rkisp1)
-{
- const struct rkisp1_isp_mbus_info *sink_fmt = rkisp1->isp.sink_fmt;
- u32 val, input_sel;
-
- switch (sink_fmt->bus_width) {
- case 8:
- input_sel = RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_ZERO;
- break;
- case 10:
- input_sel = RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_ZERO;
- break;
- case 12:
- input_sel = RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_12B;
- break;
- default:
- dev_err(rkisp1->dev, "Invalid bus width\n");
- return -EINVAL;
- }
-
- val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_ACQ_PROP);
- rkisp1_write(rkisp1, val | input_sel, RKISP1_CIF_ISP_ACQ_PROP);
-
- return 0;
-}
-
-static int rkisp1_config_mipi(struct rkisp1_device *rkisp1)
-{
- const struct rkisp1_isp_mbus_info *sink_fmt = rkisp1->isp.sink_fmt;
- unsigned int lanes = rkisp1->active_sensor->lanes;
- u32 mipi_ctrl;
-
- if (lanes < 1 || lanes > 4)
- return -EINVAL;
-
- mipi_ctrl = RKISP1_CIF_MIPI_CTRL_NUM_LANES(lanes - 1) |
- RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(0xf) |
- RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP |
- RKISP1_CIF_MIPI_CTRL_CLOCKLANE_ENA;
-
- rkisp1_write(rkisp1, mipi_ctrl, RKISP1_CIF_MIPI_CTRL);
-
- /* V12 could also use a newer csi2-host, but we don't want that yet */
- if (rkisp1->media_dev.hw_revision == RKISP1_V12)
- rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_CSI0_CTRL0);
-
- /* Configure Data Type and Virtual Channel */
- rkisp1_write(rkisp1,
- RKISP1_CIF_MIPI_DATA_SEL_DT(sink_fmt->mipi_dt) |
- RKISP1_CIF_MIPI_DATA_SEL_VC(0),
- RKISP1_CIF_MIPI_IMG_DATA_SEL);
-
- /* Clear MIPI interrupts */
- rkisp1_write(rkisp1, ~0, RKISP1_CIF_MIPI_ICR);
- /*
- * Disable RKISP1_CIF_MIPI_ERR_DPHY interrupt here temporary for
- * isp bus may be dead when switch isp.
- */
- rkisp1_write(rkisp1,
- RKISP1_CIF_MIPI_FRAME_END | RKISP1_CIF_MIPI_ERR_CSI |
- RKISP1_CIF_MIPI_ERR_DPHY |
- RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(0x03) |
- RKISP1_CIF_MIPI_ADD_DATA_OVFLW,
- RKISP1_CIF_MIPI_IMSC);
-
- dev_dbg(rkisp1->dev, "\n MIPI_CTRL 0x%08x\n"
- " MIPI_IMG_DATA_SEL 0x%08x\n"
- " MIPI_STATUS 0x%08x\n"
- " MIPI_IMSC 0x%08x\n",
- rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL),
- rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL),
- rkisp1_read(rkisp1, RKISP1_CIF_MIPI_STATUS),
- rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC));
-
- return 0;
-}
-
/* Configure MUX */
-static int rkisp1_config_path(struct rkisp1_device *rkisp1)
+static void rkisp1_config_path(struct rkisp1_isp *isp,
+ enum v4l2_mbus_type mbus_type)
{
- struct rkisp1_sensor_async *sensor = rkisp1->active_sensor;
+ struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 dpcl = rkisp1_read(rkisp1, RKISP1_CIF_VI_DPCL);
- int ret = 0;
- if (sensor->mbus_type == V4L2_MBUS_BT656 ||
- sensor->mbus_type == V4L2_MBUS_PARALLEL) {
- ret = rkisp1_config_dvp(rkisp1);
+ if (mbus_type == V4L2_MBUS_BT656 || mbus_type == V4L2_MBUS_PARALLEL)
dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_PARALLEL;
- } else if (sensor->mbus_type == V4L2_MBUS_CSI2_DPHY) {
- ret = rkisp1_config_mipi(rkisp1);
+ else if (mbus_type == V4L2_MBUS_CSI2_DPHY)
dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_MIPI;
- }
-
- rkisp1_write(rkisp1, dpcl, RKISP1_CIF_VI_DPCL);
- return ret;
+ rkisp1_write(rkisp1, RKISP1_CIF_VI_DPCL, dpcl);
}
/* Hardware configure Entry */
-static int rkisp1_config_cif(struct rkisp1_device *rkisp1)
+static int rkisp1_config_cif(struct rkisp1_isp *isp,
+ enum v4l2_mbus_type mbus_type, u32 mbus_flags)
{
- u32 cif_id;
int ret;
- cif_id = rkisp1_read(rkisp1, RKISP1_CIF_VI_ID);
- dev_dbg(rkisp1->dev, "CIF_ID 0x%08x\n", cif_id);
-
- ret = rkisp1_config_isp(rkisp1);
+ ret = rkisp1_config_isp(isp, mbus_type, mbus_flags);
if (ret)
return ret;
- ret = rkisp1_config_path(rkisp1);
- if (ret)
- return ret;
- rkisp1_config_ism(rkisp1);
+
+ rkisp1_config_path(isp, mbus_type);
+ rkisp1_config_ism(isp);
return 0;
}
-static void rkisp1_isp_stop(struct rkisp1_device *rkisp1)
+static void rkisp1_isp_stop(struct rkisp1_isp *isp)
{
+ struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 val;
/*
* ISP(mi) stop in mi frame end -> Stop ISP(mipi) ->
* Stop ISP(isp) ->wait for ISP isp off
*/
- /* stop and clear MI, MIPI, and ISP interrupts */
- rkisp1_write(rkisp1, 0, RKISP1_CIF_MIPI_IMSC);
- rkisp1_write(rkisp1, ~0, RKISP1_CIF_MIPI_ICR);
-
- rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IMSC);
- rkisp1_write(rkisp1, ~0, RKISP1_CIF_ISP_ICR);
-
- rkisp1_write(rkisp1, 0, RKISP1_CIF_MI_IMSC);
- rkisp1_write(rkisp1, ~0, RKISP1_CIF_MI_ICR);
- val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
- rkisp1_write(rkisp1, val & (~RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA),
- RKISP1_CIF_MIPI_CTRL);
+ /* stop and clear MI and ISP interrupts */
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IMSC, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, ~0);
+
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_IMSC, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_ICR, ~0);
+
/* stop ISP */
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
val &= ~(RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE |
RKISP1_CIF_ISP_CTRL_ISP_ENABLE);
- rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
- rkisp1_write(rkisp1, val | RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD,
- RKISP1_CIF_ISP_CTRL);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL,
+ val | RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
readx_poll_timeout(readl, rkisp1->base_addr + RKISP1_CIF_ISP_RIS,
val, val & RKISP1_CIF_ISP_OFF, 20, 100);
- rkisp1_write(rkisp1,
- RKISP1_CIF_IRCL_MIPI_SW_RST | RKISP1_CIF_IRCL_ISP_SW_RST,
- RKISP1_CIF_IRCL);
- rkisp1_write(rkisp1, 0x0, RKISP1_CIF_IRCL);
+ rkisp1_write(rkisp1, RKISP1_CIF_VI_IRCL,
+ RKISP1_CIF_VI_IRCL_MIPI_SW_RST |
+ RKISP1_CIF_VI_IRCL_ISP_SW_RST);
+ rkisp1_write(rkisp1, RKISP1_CIF_VI_IRCL, 0x0);
}
-static void rkisp1_config_clk(struct rkisp1_device *rkisp1)
+static void rkisp1_config_clk(struct rkisp1_isp *isp)
{
- u32 val = RKISP1_CIF_ICCL_ISP_CLK | RKISP1_CIF_ICCL_CP_CLK |
- RKISP1_CIF_ICCL_MRSZ_CLK | RKISP1_CIF_ICCL_SRSZ_CLK |
- RKISP1_CIF_ICCL_JPEG_CLK | RKISP1_CIF_ICCL_MI_CLK |
- RKISP1_CIF_ICCL_IE_CLK | RKISP1_CIF_ICCL_MIPI_CLK |
- RKISP1_CIF_ICCL_DCROP_CLK;
+ struct rkisp1_device *rkisp1 = isp->rkisp1;
+
+ u32 val = RKISP1_CIF_VI_ICCL_ISP_CLK | RKISP1_CIF_VI_ICCL_CP_CLK |
+ RKISP1_CIF_VI_ICCL_MRSZ_CLK | RKISP1_CIF_VI_ICCL_SRSZ_CLK |
+ RKISP1_CIF_VI_ICCL_JPEG_CLK | RKISP1_CIF_VI_ICCL_MI_CLK |
+ RKISP1_CIF_VI_ICCL_IE_CLK | RKISP1_CIF_VI_ICCL_MIPI_CLK |
+ RKISP1_CIF_VI_ICCL_DCROP_CLK;
- rkisp1_write(rkisp1, val, RKISP1_CIF_ICCL);
+ rkisp1_write(rkisp1, RKISP1_CIF_VI_ICCL, val);
/* ensure sp and mp can run at the same time in V12 */
- if (rkisp1->media_dev.hw_revision == RKISP1_V12) {
+ if (rkisp1->info->isp_ver == RKISP1_V12) {
val = RKISP1_CIF_CLK_CTRL_MI_Y12 | RKISP1_CIF_CLK_CTRL_MI_SP |
RKISP1_CIF_CLK_CTRL_MI_RAW0 | RKISP1_CIF_CLK_CTRL_MI_RAW1 |
RKISP1_CIF_CLK_CTRL_MI_READ | RKISP1_CIF_CLK_CTRL_MI_RAWRD |
RKISP1_CIF_CLK_CTRL_CP | RKISP1_CIF_CLK_CTRL_IE;
- rkisp1_write(rkisp1, val, RKISP1_CIF_VI_ISP_CLK_CTRL_V12);
+ rkisp1_write(rkisp1, RKISP1_CIF_VI_ISP_CLK_CTRL_V12, val);
}
}
-static void rkisp1_isp_start(struct rkisp1_device *rkisp1)
+static void rkisp1_isp_start(struct rkisp1_isp *isp)
{
- struct rkisp1_sensor_async *sensor = rkisp1->active_sensor;
+ struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 val;
- rkisp1_config_clk(rkisp1);
+ rkisp1_config_clk(isp);
- /* Activate MIPI */
- if (sensor->mbus_type == V4L2_MBUS_CSI2_DPHY) {
- val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
- rkisp1_write(rkisp1, val | RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA,
- RKISP1_CIF_MIPI_CTRL);
- }
/* Activate ISP */
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
val |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD |
RKISP1_CIF_ISP_CTRL_ISP_ENABLE |
RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE;
- rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_CTRL);
-
- /*
- * CIF spec says to wait for sufficient time after enabling
- * the MIPI interface and before starting the sensor output.
- */
- usleep_range(1000, 1200);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
}
/* ----------------------------------------------------------------------------
* Subdev pad operations
*/
+static inline struct rkisp1_isp *to_rkisp1_isp(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rkisp1_isp, sd);
+}
+
static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
@@ -599,11 +369,12 @@ static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
- if (code->index >= ARRAY_SIZE(rkisp1_isp_formats))
- return -EINVAL;
+ for (i = 0; ; i++) {
+ const struct rkisp1_mbus_info *fmt =
+ rkisp1_mbus_info_get_by_index(i);
- for (i = 0; i < ARRAY_SIZE(rkisp1_isp_formats); i++) {
- const struct rkisp1_isp_mbus_info *fmt = &rkisp1_isp_formats[i];
+ if (!fmt)
+ return -EINVAL;
if (fmt->direction & dir)
pos++;
@@ -625,7 +396,7 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- const struct rkisp1_isp_mbus_info *mbus_info;
+ const struct rkisp1_mbus_info *mbus_info;
if (fse->pad == RKISP1_ISP_PAD_SINK_PARAMS ||
fse->pad == RKISP1_ISP_PAD_SOURCE_STATS)
@@ -634,7 +405,7 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index > 0)
return -EINVAL;
- mbus_info = rkisp1_isp_mbus_info_get(fse->code);
+ mbus_info = rkisp1_mbus_info_get_by_code(fse->code);
if (!mbus_info)
return -EINVAL;
@@ -701,7 +472,7 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
- const struct rkisp1_isp_mbus_info *mbus_info;
+ const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *src_fmt;
const struct v4l2_rect *src_crop;
@@ -711,10 +482,10 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
src_fmt->code = format->code;
- mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
+ mbus_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SRC)) {
src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
- mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
+ mbus_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
}
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
isp->src_fmt = mbus_info;
@@ -771,7 +542,7 @@ static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
struct v4l2_rect *r, unsigned int which)
{
struct v4l2_rect *sink_crop, *src_crop;
- struct v4l2_mbus_framefmt *sink_fmt;
+ const struct v4l2_mbus_framefmt *sink_fmt;
sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO,
@@ -799,7 +570,7 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
- const struct rkisp1_isp_mbus_info *mbus_info;
+ const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
@@ -807,10 +578,10 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
RKISP1_ISP_PAD_SINK_VIDEO,
which);
sink_fmt->code = format->code;
- mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SINK)) {
sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
- mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
}
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
isp->sink_fmt = mbus_info;
@@ -835,7 +606,7 @@ static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+ struct rkisp1_isp *isp = to_rkisp1_isp(sd);
mutex_lock(&isp->ops_lock);
fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
@@ -848,7 +619,7 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+ struct rkisp1_isp *isp = to_rkisp1_isp(sd);
mutex_lock(&isp->ops_lock);
if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO)
@@ -869,7 +640,7 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+ struct rkisp1_isp *isp = to_rkisp1_isp(sd);
int ret = 0;
if (sel->pad != RKISP1_ISP_PAD_SOURCE_VIDEO &&
@@ -909,15 +680,13 @@ static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct rkisp1_device *rkisp1 =
- container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev);
- struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+ struct rkisp1_isp *isp = to_rkisp1_isp(sd);
int ret = 0;
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- dev_dbg(rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
+ dev_dbg(isp->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
mutex_lock(&isp->ops_lock);
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
@@ -954,77 +723,62 @@ static const struct v4l2_subdev_pad_ops rkisp1_isp_pad_ops = {
* Stream operations
*/
-static int rkisp1_mipi_csi2_start(struct rkisp1_isp *isp,
- struct rkisp1_sensor_async *sensor)
-{
- struct rkisp1_device *rkisp1 =
- container_of(isp->sd.v4l2_dev, struct rkisp1_device, v4l2_dev);
- union phy_configure_opts opts;
- struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
- s64 pixel_clock;
-
- if (!sensor->pixel_rate_ctrl) {
- dev_warn(rkisp1->dev, "No pixel rate control in sensor subdev\n");
- return -EPIPE;
- }
-
- pixel_clock = v4l2_ctrl_g_ctrl_int64(sensor->pixel_rate_ctrl);
- if (!pixel_clock) {
- dev_err(rkisp1->dev, "Invalid pixel rate value\n");
- return -EINVAL;
- }
-
- phy_mipi_dphy_get_default_config(pixel_clock, isp->sink_fmt->bus_width,
- sensor->lanes, cfg);
- phy_set_mode(sensor->dphy, PHY_MODE_MIPI_DPHY);
- phy_configure(sensor->dphy, &opts);
- phy_power_on(sensor->dphy);
-
- return 0;
-}
-
-static void rkisp1_mipi_csi2_stop(struct rkisp1_sensor_async *sensor)
-{
- phy_power_off(sensor->dphy);
-}
-
static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct rkisp1_device *rkisp1 =
- container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev);
- struct rkisp1_isp *isp = &rkisp1->isp;
- struct v4l2_subdev *sensor_sd;
- int ret = 0;
+ struct rkisp1_isp *isp = to_rkisp1_isp(sd);
+ struct rkisp1_device *rkisp1 = isp->rkisp1;
+ struct media_pad *source_pad;
+ struct media_pad *sink_pad;
+ enum v4l2_mbus_type mbus_type;
+ u32 mbus_flags;
+ int ret;
if (!enable) {
- rkisp1_isp_stop(rkisp1);
- rkisp1_mipi_csi2_stop(rkisp1->active_sensor);
+ v4l2_subdev_call(rkisp1->source, video, s_stream, false);
+ rkisp1_isp_stop(isp);
return 0;
}
- sensor_sd = rkisp1_get_remote_sensor(sd);
- if (!sensor_sd) {
- dev_warn(rkisp1->dev, "No link between isp and sensor\n");
- return -ENODEV;
+ sink_pad = &isp->pads[RKISP1_ISP_PAD_SINK_VIDEO];
+ source_pad = media_pad_remote_pad_unique(sink_pad);
+ if (IS_ERR(source_pad)) {
+ dev_dbg(rkisp1->dev, "Failed to get source for ISP: %ld\n",
+ PTR_ERR(source_pad));
+ return -EPIPE;
}
- rkisp1->active_sensor = container_of(sensor_sd->asd,
- struct rkisp1_sensor_async, asd);
+ rkisp1->source = media_entity_to_v4l2_subdev(source_pad->entity);
+ if (!rkisp1->source) {
+ /* This should really not happen, so is not worth a message. */
+ return -EPIPE;
+ }
- if (rkisp1->active_sensor->mbus_type != V4L2_MBUS_CSI2_DPHY)
- return -EINVAL;
+ if (rkisp1->source == &rkisp1->csi.sd) {
+ mbus_type = V4L2_MBUS_CSI2_DPHY;
+ mbus_flags = 0;
+ } else {
+ const struct rkisp1_sensor_async *asd;
- rkisp1->isp.frame_sequence = -1;
+ asd = container_of(rkisp1->source->asd,
+ struct rkisp1_sensor_async, asd);
+
+ mbus_type = asd->mbus_type;
+ mbus_flags = asd->mbus_flags;
+ }
+
+ isp->frame_sequence = -1;
mutex_lock(&isp->ops_lock);
- ret = rkisp1_config_cif(rkisp1);
+ ret = rkisp1_config_cif(isp, mbus_type, mbus_flags);
if (ret)
goto mutex_unlock;
- ret = rkisp1_mipi_csi2_start(&rkisp1->isp, rkisp1->active_sensor);
- if (ret)
- goto mutex_unlock;
+ rkisp1_isp_start(isp);
- rkisp1_isp_start(rkisp1);
+ ret = v4l2_subdev_call(rkisp1->source, video, s_stream, true);
+ if (ret) {
+ rkisp1_isp_stop(isp);
+ goto mutex_unlock;
+ }
mutex_unlock:
mutex_unlock(&isp->ops_lock);
@@ -1067,12 +821,14 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1)
{
struct v4l2_subdev_state state = {
.pads = rkisp1->isp.pad_cfg
- };
+ };
struct rkisp1_isp *isp = &rkisp1->isp;
struct media_pad *pads = isp->pads;
struct v4l2_subdev *sd = &isp->sd;
int ret;
+ isp->rkisp1 = rkisp1;
+
v4l2_subdev_init(sd, &rkisp1_isp_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
sd->entity.ops = &rkisp1_isp_media_ops;
@@ -1086,95 +842,54 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1)
pads[RKISP1_ISP_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
pads[RKISP1_ISP_PAD_SOURCE_STATS].flags = MEDIA_PAD_FL_SOURCE;
- isp->sink_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SINK_PAD_FMT);
- isp->src_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SRC_PAD_FMT);
+ isp->sink_fmt = rkisp1_mbus_info_get_by_code(RKISP1_DEF_SINK_PAD_FMT);
+ isp->src_fmt = rkisp1_mbus_info_get_by_code(RKISP1_DEF_SRC_PAD_FMT);
mutex_init(&isp->ops_lock);
ret = media_entity_pads_init(&sd->entity, RKISP1_ISP_PAD_MAX, pads);
if (ret)
- return ret;
+ goto error;
ret = v4l2_device_register_subdev(&rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(rkisp1->dev, "Failed to register isp subdev\n");
- goto err_cleanup_media_entity;
+ goto error;
}
rkisp1_isp_init_config(sd, &state);
+
return 0;
-err_cleanup_media_entity:
+error:
media_entity_cleanup(&sd->entity);
-
+ mutex_destroy(&isp->ops_lock);
+ isp->sd.v4l2_dev = NULL;
return ret;
}
void rkisp1_isp_unregister(struct rkisp1_device *rkisp1)
{
- struct v4l2_subdev *sd = &rkisp1->isp.sd;
+ struct rkisp1_isp *isp = &rkisp1->isp;
- v4l2_device_unregister_subdev(sd);
- media_entity_cleanup(&sd->entity);
+ if (!isp->sd.v4l2_dev)
+ return;
+
+ v4l2_device_unregister_subdev(&isp->sd);
+ media_entity_cleanup(&isp->sd.entity);
+ mutex_destroy(&isp->ops_lock);
}
/* ----------------------------------------------------------------------------
* Interrupt handlers
*/
-irqreturn_t rkisp1_mipi_isr(int irq, void *ctx)
-{
- struct device *dev = ctx;
- struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
- u32 val, status;
-
- status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
- if (!status)
- return IRQ_NONE;
-
- rkisp1_write(rkisp1, status, RKISP1_CIF_MIPI_ICR);
-
- /*
- * Disable DPHY errctrl interrupt, because this dphy
- * erctrl signal is asserted until the next changes
- * of line state. This time is may be too long and cpu
- * is hold in this interrupt.
- */
- if (status & RKISP1_CIF_MIPI_ERR_CTRL(0x0f)) {
- val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
- rkisp1_write(rkisp1, val & ~RKISP1_CIF_MIPI_ERR_CTRL(0x0f),
- RKISP1_CIF_MIPI_IMSC);
- rkisp1->isp.is_dphy_errctrl_disabled = true;
- }
-
- /*
- * Enable DPHY errctrl interrupt again, if mipi have receive
- * the whole frame without any error.
- */
- if (status == RKISP1_CIF_MIPI_FRAME_END) {
- /*
- * Enable DPHY errctrl interrupt again, if mipi have receive
- * the whole frame without any error.
- */
- if (rkisp1->isp.is_dphy_errctrl_disabled) {
- val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
- val |= RKISP1_CIF_MIPI_ERR_CTRL(0x0f);
- rkisp1_write(rkisp1, val, RKISP1_CIF_MIPI_IMSC);
- rkisp1->isp.is_dphy_errctrl_disabled = false;
- }
- } else {
- rkisp1->debug.mipi_error++;
- }
-
- return IRQ_HANDLED;
-}
-
static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
{
struct v4l2_event event = {
.type = V4L2_EVENT_FRAME_SYNC,
};
- event.u.frame_sync.frame_sequence = isp->frame_sequence;
+ event.u.frame_sync.frame_sequence = isp->frame_sequence;
v4l2_event_queue(isp->sd.devnode, &event);
}
@@ -1188,7 +903,7 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
if (!status)
return IRQ_NONE;
- rkisp1_write(rkisp1, status, RKISP1_CIF_ISP_ICR);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, status);
/* Vertical sync signal, starting generating new frame */
if (status & RKISP1_CIF_ISP_V_START) {
@@ -1208,7 +923,7 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
rkisp1->debug.img_stabilization_size_error++;
if (isp_err & RKISP1_CIF_ISP_ERR_OUTFORM_SIZE)
rkisp1->debug.outform_size_error++;
- rkisp1_write(rkisp1, isp_err, RKISP1_CIF_ISP_ERR_CLR);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ERR_CLR, isp_err);
} else if (status & RKISP1_CIF_ISP_DATA_LOSS) {
/* keep track of data_loss in debugfs */
rkisp1->debug.data_loss++;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index 8f62f09e635f..9da7dc1bc690 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -37,7 +37,7 @@ rkisp1_param_set_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
u32 val;
val = rkisp1_read(params->rkisp1, reg);
- rkisp1_write(params->rkisp1, val | bit_mask, reg);
+ rkisp1_write(params->rkisp1, reg, val | bit_mask);
}
static inline void
@@ -46,7 +46,7 @@ rkisp1_param_clear_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
u32 val;
val = rkisp1_read(params->rkisp1, reg);
- rkisp1_write(params->rkisp1, val & ~bit_mask, reg);
+ rkisp1_write(params->rkisp1, reg, val & ~bit_mask);
}
/* ISP BP interface function */
@@ -60,35 +60,35 @@ static void rkisp1_dpcc_config(struct rkisp1_params *params,
mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE);
mode &= RKISP1_CIF_ISP_DPCC_ENA;
mode |= arg->mode & ~RKISP1_CIF_ISP_DPCC_ENA;
- rkisp1_write(params->rkisp1, mode, RKISP1_CIF_ISP_DPCC_MODE);
- rkisp1_write(params->rkisp1, arg->output_mode,
- RKISP1_CIF_ISP_DPCC_OUTPUT_MODE);
- rkisp1_write(params->rkisp1, arg->set_use,
- RKISP1_CIF_ISP_DPCC_SET_USE);
-
- rkisp1_write(params->rkisp1, arg->methods[0].method,
- RKISP1_CIF_ISP_DPCC_METHODS_SET_1);
- rkisp1_write(params->rkisp1, arg->methods[1].method,
- RKISP1_CIF_ISP_DPCC_METHODS_SET_2);
- rkisp1_write(params->rkisp1, arg->methods[2].method,
- RKISP1_CIF_ISP_DPCC_METHODS_SET_3);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE, mode);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_OUTPUT_MODE,
+ arg->output_mode);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_SET_USE,
+ arg->set_use);
+
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_1,
+ arg->methods[0].method);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_2,
+ arg->methods[1].method);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_3,
+ arg->methods[2].method);
for (i = 0; i < RKISP1_CIF_ISP_DPCC_METHODS_MAX; i++) {
- rkisp1_write(params->rkisp1, arg->methods[i].line_thresh,
- RKISP1_ISP_DPCC_LINE_THRESH(i));
- rkisp1_write(params->rkisp1, arg->methods[i].line_mad_fac,
- RKISP1_ISP_DPCC_LINE_MAD_FAC(i));
- rkisp1_write(params->rkisp1, arg->methods[i].pg_fac,
- RKISP1_ISP_DPCC_PG_FAC(i));
- rkisp1_write(params->rkisp1, arg->methods[i].rnd_thresh,
- RKISP1_ISP_DPCC_RND_THRESH(i));
- rkisp1_write(params->rkisp1, arg->methods[i].rg_fac,
- RKISP1_ISP_DPCC_RG_FAC(i));
+ rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_THRESH(i),
+ arg->methods[i].line_thresh);
+ rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_MAD_FAC(i),
+ arg->methods[i].line_mad_fac);
+ rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_PG_FAC(i),
+ arg->methods[i].pg_fac);
+ rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RND_THRESH(i),
+ arg->methods[i].rnd_thresh);
+ rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RG_FAC(i),
+ arg->methods[i].rg_fac);
}
- rkisp1_write(params->rkisp1, arg->rnd_offs,
- RKISP1_CIF_ISP_DPCC_RND_OFFS);
- rkisp1_write(params->rkisp1, arg->ro_limits,
- RKISP1_CIF_ISP_DPCC_RO_LIMITS);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RND_OFFS,
+ arg->rnd_offs);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RO_LIMITS,
+ arg->ro_limits);
}
/* ISP black level subtraction interface function */
@@ -107,44 +107,44 @@ static void rkisp1_bls_config(struct rkisp1_params *params,
switch (params->raw_type) {
case RKISP1_RAW_BGGR:
- rkisp1_write(params->rkisp1,
- pval->r, RKISP1_CIF_ISP_BLS_D_FIXED);
- rkisp1_write(params->rkisp1,
- pval->gr, RKISP1_CIF_ISP_BLS_C_FIXED);
- rkisp1_write(params->rkisp1,
- pval->gb, RKISP1_CIF_ISP_BLS_B_FIXED);
- rkisp1_write(params->rkisp1,
- pval->b, RKISP1_CIF_ISP_BLS_A_FIXED);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
+ pval->r);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
+ pval->gr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
+ pval->gb);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
+ pval->b);
break;
case RKISP1_RAW_GBRG:
- rkisp1_write(params->rkisp1,
- pval->r, RKISP1_CIF_ISP_BLS_C_FIXED);
- rkisp1_write(params->rkisp1,
- pval->gr, RKISP1_CIF_ISP_BLS_D_FIXED);
- rkisp1_write(params->rkisp1,
- pval->gb, RKISP1_CIF_ISP_BLS_A_FIXED);
- rkisp1_write(params->rkisp1,
- pval->b, RKISP1_CIF_ISP_BLS_B_FIXED);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
+ pval->r);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
+ pval->gr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
+ pval->gb);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
+ pval->b);
break;
case RKISP1_RAW_GRBG:
- rkisp1_write(params->rkisp1,
- pval->r, RKISP1_CIF_ISP_BLS_B_FIXED);
- rkisp1_write(params->rkisp1,
- pval->gr, RKISP1_CIF_ISP_BLS_A_FIXED);
- rkisp1_write(params->rkisp1,
- pval->gb, RKISP1_CIF_ISP_BLS_D_FIXED);
- rkisp1_write(params->rkisp1,
- pval->b, RKISP1_CIF_ISP_BLS_C_FIXED);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
+ pval->r);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
+ pval->gr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
+ pval->gb);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
+ pval->b);
break;
case RKISP1_RAW_RGGB:
- rkisp1_write(params->rkisp1,
- pval->r, RKISP1_CIF_ISP_BLS_A_FIXED);
- rkisp1_write(params->rkisp1,
- pval->gr, RKISP1_CIF_ISP_BLS_B_FIXED);
- rkisp1_write(params->rkisp1,
- pval->gb, RKISP1_CIF_ISP_BLS_C_FIXED);
- rkisp1_write(params->rkisp1,
- pval->b, RKISP1_CIF_ISP_BLS_D_FIXED);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
+ pval->r);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
+ pval->gr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
+ pval->gb);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
+ pval->b);
break;
default:
break;
@@ -152,35 +152,35 @@ static void rkisp1_bls_config(struct rkisp1_params *params,
} else {
if (arg->en_windows & BIT(1)) {
- rkisp1_write(params->rkisp1, arg->bls_window2.h_offs,
- RKISP1_CIF_ISP_BLS_H2_START);
- rkisp1_write(params->rkisp1, arg->bls_window2.h_size,
- RKISP1_CIF_ISP_BLS_H2_STOP);
- rkisp1_write(params->rkisp1, arg->bls_window2.v_offs,
- RKISP1_CIF_ISP_BLS_V2_START);
- rkisp1_write(params->rkisp1, arg->bls_window2.v_size,
- RKISP1_CIF_ISP_BLS_V2_STOP);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H2_START,
+ arg->bls_window2.h_offs);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H2_STOP,
+ arg->bls_window2.h_size);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_V2_START,
+ arg->bls_window2.v_offs);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_V2_STOP,
+ arg->bls_window2.v_size);
new_control |= RKISP1_CIF_ISP_BLS_WINDOW_2;
}
if (arg->en_windows & BIT(0)) {
- rkisp1_write(params->rkisp1, arg->bls_window1.h_offs,
- RKISP1_CIF_ISP_BLS_H1_START);
- rkisp1_write(params->rkisp1, arg->bls_window1.h_size,
- RKISP1_CIF_ISP_BLS_H1_STOP);
- rkisp1_write(params->rkisp1, arg->bls_window1.v_offs,
- RKISP1_CIF_ISP_BLS_V1_START);
- rkisp1_write(params->rkisp1, arg->bls_window1.v_size,
- RKISP1_CIF_ISP_BLS_V1_STOP);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H1_START,
+ arg->bls_window1.h_offs);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H1_STOP,
+ arg->bls_window1.h_size);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_V1_START,
+ arg->bls_window1.v_offs);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_V1_STOP,
+ arg->bls_window1.v_size);
new_control |= RKISP1_CIF_ISP_BLS_WINDOW_1;
}
- rkisp1_write(params->rkisp1, arg->bls_samples,
- RKISP1_CIF_ISP_BLS_SAMPLES);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_SAMPLES,
+ arg->bls_samples);
new_control |= RKISP1_CIF_ISP_BLS_MODE_MEASURED;
}
- rkisp1_write(params->rkisp1, new_control, RKISP1_CIF_ISP_BLS_CTRL);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_CTRL, new_control);
}
/* ISP LS correction interface function */
@@ -196,14 +196,10 @@ rkisp1_lsc_matrix_config_v10(struct rkisp1_params *params,
sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
- rkisp1_write(params->rkisp1, sram_addr,
- RKISP1_CIF_ISP_LSC_R_TABLE_ADDR);
- rkisp1_write(params->rkisp1, sram_addr,
- RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR);
- rkisp1_write(params->rkisp1, sram_addr,
- RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR);
- rkisp1_write(params->rkisp1, sram_addr,
- RKISP1_CIF_ISP_LSC_B_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr);
/* program data tables (table size is 9 * 17 = 153) */
for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
@@ -214,45 +210,45 @@ rkisp1_lsc_matrix_config_v10(struct rkisp1_params *params,
for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j],
pconfig->r_data_tbl[i][j + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA, data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j],
pconfig->gr_data_tbl[i][j + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j],
pconfig->gb_data_tbl[i][j + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j],
pconfig->b_data_tbl[i][j + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA, data);
}
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j], 0);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
+ data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j], 0);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
+ data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j], 0);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
+ data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j], 0);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
+ data);
}
isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
RKISP1_CIF_ISP_LSC_TABLE_0 :
RKISP1_CIF_ISP_LSC_TABLE_1;
- rkisp1_write(params->rkisp1, isp_lsc_table_sel,
- RKISP1_CIF_ISP_LSC_TABLE_SEL);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL,
+ isp_lsc_table_sel);
}
static void
@@ -267,10 +263,10 @@ rkisp1_lsc_matrix_config_v12(struct rkisp1_params *params,
sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
- rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR);
- rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR);
- rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR);
- rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr);
/* program data tables (table size is 9 * 17 = 153) */
for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
@@ -282,49 +278,49 @@ rkisp1_lsc_matrix_config_v12(struct rkisp1_params *params,
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
pconfig->r_data_tbl[i][j],
pconfig->r_data_tbl[i][j + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA, data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
pconfig->gr_data_tbl[i][j],
pconfig->gr_data_tbl[i][j + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
pconfig->gb_data_tbl[i][j],
pconfig->gb_data_tbl[i][j + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
pconfig->b_data_tbl[i][j],
pconfig->b_data_tbl[i][j + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA, data);
}
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->r_data_tbl[i][j], 0);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
+ data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gr_data_tbl[i][j], 0);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
+ data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gb_data_tbl[i][j], 0);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
+ data);
data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->b_data_tbl[i][j], 0);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
+ data);
}
isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
RKISP1_CIF_ISP_LSC_TABLE_0 :
RKISP1_CIF_ISP_LSC_TABLE_1;
- rkisp1_write(params->rkisp1, isp_lsc_table_sel,
- RKISP1_CIF_ISP_LSC_TABLE_SEL);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL,
+ isp_lsc_table_sel);
}
static void rkisp1_lsc_config(struct rkisp1_params *params,
@@ -343,26 +339,26 @@ static void rkisp1_lsc_config(struct rkisp1_params *params,
/* program x size tables */
data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_size_tbl[i * 2],
arg->x_size_tbl[i * 2 + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_XSIZE_01 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_XSIZE_01 + i * 4, data);
/* program x grad tables */
data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_grad_tbl[i * 2],
arg->x_grad_tbl[i * 2 + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_XGRAD_01 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_XGRAD_01 + i * 4, data);
/* program y size tables */
data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_size_tbl[i * 2],
arg->y_size_tbl[i * 2 + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_YSIZE_01 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_YSIZE_01 + i * 4, data);
/* program y grad tables */
data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_grad_tbl[i * 2],
arg->y_grad_tbl[i * 2 + 1]);
- rkisp1_write(params->rkisp1, data,
- RKISP1_CIF_ISP_LSC_YGRAD_01 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_LSC_YGRAD_01 + i * 4, data);
}
/* restore the lsc ctrl status */
@@ -383,28 +379,32 @@ static void rkisp1_flt_config(struct rkisp1_params *params,
{
u32 filt_mode;
- rkisp1_write(params->rkisp1,
- arg->thresh_bl0, RKISP1_CIF_ISP_FILT_THRESH_BL0);
- rkisp1_write(params->rkisp1,
- arg->thresh_bl1, RKISP1_CIF_ISP_FILT_THRESH_BL1);
- rkisp1_write(params->rkisp1,
- arg->thresh_sh0, RKISP1_CIF_ISP_FILT_THRESH_SH0);
- rkisp1_write(params->rkisp1,
- arg->thresh_sh1, RKISP1_CIF_ISP_FILT_THRESH_SH1);
- rkisp1_write(params->rkisp1, arg->fac_bl0, RKISP1_CIF_ISP_FILT_FAC_BL0);
- rkisp1_write(params->rkisp1, arg->fac_bl1, RKISP1_CIF_ISP_FILT_FAC_BL1);
- rkisp1_write(params->rkisp1, arg->fac_mid, RKISP1_CIF_ISP_FILT_FAC_MID);
- rkisp1_write(params->rkisp1, arg->fac_sh0, RKISP1_CIF_ISP_FILT_FAC_SH0);
- rkisp1_write(params->rkisp1, arg->fac_sh1, RKISP1_CIF_ISP_FILT_FAC_SH1);
- rkisp1_write(params->rkisp1,
- arg->lum_weight, RKISP1_CIF_ISP_FILT_LUM_WEIGHT);
-
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_THRESH_BL0,
+ arg->thresh_bl0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_THRESH_BL1,
+ arg->thresh_bl1);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_THRESH_SH0,
+ arg->thresh_sh0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_THRESH_SH1,
+ arg->thresh_sh1);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_BL0,
+ arg->fac_bl0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_BL1,
+ arg->fac_bl1);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_MID,
+ arg->fac_mid);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_SH0,
+ arg->fac_sh0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_SH1,
+ arg->fac_sh1);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_LUM_WEIGHT,
+ arg->lum_weight);
+
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_MODE,
(arg->mode ? RKISP1_CIF_ISP_FLT_MODE_DNR : 0) |
RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(arg->chr_v_mode) |
RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
- RKISP1_CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1),
- RKISP1_CIF_ISP_FILT_MODE);
+ RKISP1_CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1));
/* avoid to override the old enable value */
filt_mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_FILT_MODE);
@@ -414,7 +414,7 @@ static void rkisp1_flt_config(struct rkisp1_params *params,
filt_mode |= RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(arg->chr_v_mode) |
RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
RKISP1_CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1);
- rkisp1_write(params->rkisp1, filt_mode, RKISP1_CIF_ISP_FILT_MODE);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_MODE, filt_mode);
}
/* ISP demosaic interface function */
@@ -428,7 +428,7 @@ static int rkisp1_bdm_config(struct rkisp1_params *params,
bdm_th &= RKISP1_CIF_ISP_DEMOSAIC_BYPASS;
bdm_th |= arg->demosaic_th & ~RKISP1_CIF_ISP_DEMOSAIC_BYPASS;
/* set demosaic threshold */
- rkisp1_write(params->rkisp1, bdm_th, RKISP1_CIF_ISP_DEMOSAIC);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DEMOSAIC, bdm_th);
return 0;
}
@@ -438,18 +438,21 @@ static void rkisp1_sdg_config(struct rkisp1_params *params,
{
unsigned int i;
- rkisp1_write(params->rkisp1,
- arg->xa_pnts.gamma_dx0, RKISP1_CIF_ISP_GAMMA_DX_LO);
- rkisp1_write(params->rkisp1,
- arg->xa_pnts.gamma_dx1, RKISP1_CIF_ISP_GAMMA_DX_HI);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_GAMMA_DX_LO,
+ arg->xa_pnts.gamma_dx0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_GAMMA_DX_HI,
+ arg->xa_pnts.gamma_dx1);
for (i = 0; i < RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE; i++) {
- rkisp1_write(params->rkisp1, arg->curve_r.gamma_y[i],
- RKISP1_CIF_ISP_GAMMA_R_Y0 + i * 4);
- rkisp1_write(params->rkisp1, arg->curve_g.gamma_y[i],
- RKISP1_CIF_ISP_GAMMA_G_Y0 + i * 4);
- rkisp1_write(params->rkisp1, arg->curve_b.gamma_y[i],
- RKISP1_CIF_ISP_GAMMA_B_Y0 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_GAMMA_R_Y0 + i * 4,
+ arg->curve_r.gamma_y[i]);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_GAMMA_G_Y0 + i * 4,
+ arg->curve_g.gamma_y[i]);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_GAMMA_B_Y0 + i * 4,
+ arg->curve_b.gamma_y[i]);
}
}
@@ -461,11 +464,13 @@ static void rkisp1_goc_config_v10(struct rkisp1_params *params,
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
- rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10,
+ arg->mode);
for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
- rkisp1_write(params->rkisp1, arg->gamma_y[i],
- RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 + i * 4,
+ arg->gamma_y[i]);
}
static void rkisp1_goc_config_v12(struct rkisp1_params *params,
@@ -476,14 +481,15 @@ static void rkisp1_goc_config_v12(struct rkisp1_params *params,
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
- rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12,
+ arg->mode);
for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 / 2; i++) {
value = RKISP1_CIF_ISP_GAMMA_VALUE_V12(
arg->gamma_y[2 * i + 1],
arg->gamma_y[2 * i]);
- rkisp1_write(params->rkisp1, value,
- RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 + i * 4, value);
}
}
@@ -495,11 +501,13 @@ static void rkisp1_ctk_config(struct rkisp1_params *params,
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
- rkisp1_write(params->rkisp1, arg->coeff[i][j],
- RKISP1_CIF_ISP_CT_COEFF_0 + 4 * k++);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_CT_COEFF_0 + 4 * k++,
+ arg->coeff[i][j]);
for (i = 0; i < 3; i++)
- rkisp1_write(params->rkisp1, arg->ct_offset[i],
- RKISP1_CIF_ISP_CT_OFFSET_R + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_CT_OFFSET_R + i * 4,
+ arg->ct_offset[i]);
}
static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en)
@@ -508,19 +516,19 @@ static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en)
return;
/* Write back the default values. */
- rkisp1_write(params->rkisp1, 0x80, RKISP1_CIF_ISP_CT_COEFF_0);
- rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_1);
- rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_2);
- rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_3);
- rkisp1_write(params->rkisp1, 0x80, RKISP1_CIF_ISP_CT_COEFF_4);
- rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_5);
- rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_6);
- rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_7);
- rkisp1_write(params->rkisp1, 0x80, RKISP1_CIF_ISP_CT_COEFF_8);
-
- rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_OFFSET_R);
- rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_OFFSET_G);
- rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_OFFSET_B);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_0, 0x80);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_1, 0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_2, 0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_3, 0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_4, 0x80);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_5, 0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_6, 0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_7, 0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_8, 0x80);
+
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_OFFSET_R, 0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_OFFSET_G, 0);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_OFFSET_B, 0);
}
/* ISP White Balance Mode */
@@ -531,15 +539,15 @@ static void rkisp1_awb_meas_config_v10(struct rkisp1_params *params,
/* based on the mode,configure the awb module */
if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_YCBCR) {
/* Reference Cb and Cr */
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_REF_V10,
RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
- arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF_V10);
+ arg->awb_ref_cb);
/* Yc Threshold */
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_THRESH_V10,
RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
- arg->min_c, RKISP1_CIF_ISP_AWB_THRESH_V10);
+ arg->min_c);
}
reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10);
@@ -547,21 +555,21 @@ static void rkisp1_awb_meas_config_v10(struct rkisp1_params *params,
reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
else
reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10, reg_val);
/* window offset */
- rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_offs, RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10);
- rkisp1_write(params->rkisp1,
- arg->awb_wnd.h_offs, RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10,
+ arg->awb_wnd.v_offs);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10,
+ arg->awb_wnd.h_offs);
/* AWB window size */
- rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_size, RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10);
- rkisp1_write(params->rkisp1,
- arg->awb_wnd.h_size, RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10,
+ arg->awb_wnd.v_size);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10,
+ arg->awb_wnd.h_size);
/* Number of frames */
- rkisp1_write(params->rkisp1,
- arg->frames, RKISP1_CIF_ISP_AWB_FRAMES_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_FRAMES_V10,
+ arg->frames);
}
static void rkisp1_awb_meas_config_v12(struct rkisp1_params *params,
@@ -571,15 +579,15 @@ static void rkisp1_awb_meas_config_v12(struct rkisp1_params *params,
/* based on the mode,configure the awb module */
if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_YCBCR) {
/* Reference Cb and Cr */
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_REF_V12,
RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
- arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF_V12);
+ arg->awb_ref_cb);
/* Yc Threshold */
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_THRESH_V12,
RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
- arg->min_c, RKISP1_CIF_ISP_AWB_THRESH_V12);
+ arg->min_c);
}
reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12);
@@ -589,18 +597,14 @@ static void rkisp1_awb_meas_config_v12(struct rkisp1_params *params,
reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
reg_val &= ~RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12;
reg_val |= RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(arg->frames);
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12, reg_val);
/* window offset */
- rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_offs << 16 |
- arg->awb_wnd.h_offs,
- RKISP1_CIF_ISP_AWB_OFFS_V12);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_OFFS_V12,
+ arg->awb_wnd.v_offs << 16 | arg->awb_wnd.h_offs);
/* AWB window size */
- rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_size << 16 |
- arg->awb_wnd.h_size,
- RKISP1_CIF_ISP_AWB_SIZE_V12);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_SIZE_V12,
+ arg->awb_wnd.v_size << 16 | arg->awb_wnd.h_size);
}
static void
@@ -619,14 +623,15 @@ rkisp1_awb_meas_enable_v10(struct rkisp1_params *params,
else
reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10,
+ reg_val);
/* Measurements require AWB block be active. */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
} else {
- rkisp1_write(params->rkisp1,
- reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10,
+ reg_val);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
}
@@ -648,14 +653,15 @@ rkisp1_awb_meas_enable_v12(struct rkisp1_params *params,
else
reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12,
+ reg_val);
/* Measurements require AWB block be active. */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
} else {
- rkisp1_write(params->rkisp1,
- reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12,
+ reg_val);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
}
@@ -665,26 +671,26 @@ static void
rkisp1_awb_gain_config_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_awb_gain_config *arg)
{
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_GAIN_G_V10,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
- arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G_V10);
+ arg->gain_green_b);
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_GAIN_RB_V10,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
- arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB_V10);
+ arg->gain_blue);
}
static void
rkisp1_awb_gain_config_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_awb_gain_config *arg)
{
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_GAIN_G_V12,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
- arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G_V12);
+ arg->gain_green_b);
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_GAIN_RB_V12,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
- arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB_V12);
+ arg->gain_blue);
}
static void rkisp1_aec_config_v10(struct rkisp1_params *params,
@@ -700,24 +706,22 @@ static void rkisp1_aec_config_v10(struct rkisp1_params *params,
exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP;
if (arg->mode == RKISP1_CIF_ISP_EXP_MEASURING_MODE_1)
exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1;
- rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL, exp_ctrl);
- rkisp1_write(params->rkisp1,
- arg->meas_window.h_offs, RKISP1_CIF_ISP_EXP_H_OFFSET_V10);
- rkisp1_write(params->rkisp1,
- arg->meas_window.v_offs, RKISP1_CIF_ISP_EXP_V_OFFSET_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_H_OFFSET_V10,
+ arg->meas_window.h_offs);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_V_OFFSET_V10,
+ arg->meas_window.v_offs);
block_hsize = arg->meas_window.h_size /
RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 - 1;
block_vsize = arg->meas_window.v_size /
RKISP1_CIF_ISP_EXP_ROW_NUM_V10 - 1;
- rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(block_hsize),
- RKISP1_CIF_ISP_EXP_H_SIZE_V10);
- rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(block_vsize),
- RKISP1_CIF_ISP_EXP_V_SIZE_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_H_SIZE_V10,
+ RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(block_hsize));
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_V_SIZE_V10,
+ RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(block_vsize));
}
static void rkisp1_aec_config_v12(struct rkisp1_params *params,
@@ -736,20 +740,18 @@ static void rkisp1_aec_config_v12(struct rkisp1_params *params,
if (arg->mode == RKISP1_CIF_ISP_EXP_MEASURING_MODE_1)
exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1;
exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_WNDNUM_SET_V12(wnd_num_idx);
- rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL, exp_ctrl);
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_OFFS_V12,
RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(arg->meas_window.v_offs) |
- RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(arg->meas_window.h_offs),
- RKISP1_CIF_ISP_EXP_OFFS_V12);
+ RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(arg->meas_window.h_offs));
block_hsize = arg->meas_window.h_size / ae_wnd_num[wnd_num_idx] - 1;
block_vsize = arg->meas_window.v_size / ae_wnd_num[wnd_num_idx] - 1;
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_SIZE_V12,
RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(block_vsize) |
- RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(block_hsize),
- RKISP1_CIF_ISP_EXP_SIZE_V12);
+ RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(block_hsize));
}
static void rkisp1_cproc_config(struct rkisp1_params *params,
@@ -762,11 +764,12 @@ static void rkisp1_cproc_config(struct rkisp1_params *params,
u32 effect = cur_ie_config->effect;
u32 quantization = params->quantization;
- rkisp1_write(params->rkisp1, arg->contrast, RKISP1_CIF_C_PROC_CONTRAST);
- rkisp1_write(params->rkisp1, arg->hue, RKISP1_CIF_C_PROC_HUE);
- rkisp1_write(params->rkisp1, arg->sat, RKISP1_CIF_C_PROC_SATURATION);
- rkisp1_write(params->rkisp1, arg->brightness,
- RKISP1_CIF_C_PROC_BRIGHTNESS);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_C_PROC_CONTRAST,
+ arg->contrast);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_C_PROC_HUE, arg->hue);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_C_PROC_SATURATION, arg->sat);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_C_PROC_BRIGHTNESS,
+ arg->brightness);
if (quantization != V4L2_QUANTIZATION_FULL_RANGE ||
effect != V4L2_COLORFX_NONE) {
@@ -802,31 +805,29 @@ static void rkisp1_hst_config_v10(struct rkisp1_params *params,
hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP_V10);
hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10;
hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(arg->histogram_predivider);
- rkisp1_write(params->rkisp1, hist_prop, RKISP1_CIF_ISP_HIST_PROP_V10);
- rkisp1_write(params->rkisp1,
- arg->meas_window.h_offs,
- RKISP1_CIF_ISP_HIST_H_OFFS_V10);
- rkisp1_write(params->rkisp1,
- arg->meas_window.v_offs,
- RKISP1_CIF_ISP_HIST_V_OFFS_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP_V10, hist_prop);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_H_OFFS_V10,
+ arg->meas_window.h_offs);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_V_OFFS_V10,
+ arg->meas_window.v_offs);
block_hsize = arg->meas_window.h_size /
RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10 - 1;
block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM_V10 - 1;
- rkisp1_write(params->rkisp1, block_hsize, RKISP1_CIF_ISP_HIST_H_SIZE_V10);
- rkisp1_write(params->rkisp1, block_vsize, RKISP1_CIF_ISP_HIST_V_SIZE_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_H_SIZE_V10,
+ block_hsize);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_V_SIZE_V10,
+ block_vsize);
weight = arg->hist_weight;
for (i = 0; i < ARRAY_SIZE(hist_weight_regs); ++i, weight += 4)
- rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(weight[0],
- weight[1],
- weight[2],
- weight[3]),
- hist_weight_regs[i]);
+ rkisp1_write(params->rkisp1, hist_weight_regs[i],
+ RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(weight[0], weight[1],
+ weight[2], weight[3]));
- rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44_V10);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_WEIGHT_44_V10,
+ weight[0] & 0x1F);
}
static void rkisp1_hst_config_v12(struct rkisp1_params *params,
@@ -852,18 +853,16 @@ static void rkisp1_hst_config_v12(struct rkisp1_params *params,
RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(0) |
RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(1) |
RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(arg->histogram_predivider);
- rkisp1_write(params->rkisp1, hist_ctrl, RKISP1_CIF_ISP_HIST_CTRL_V12);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_CTRL_V12, hist_ctrl);
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_OFFS_V12,
RKISP1_CIF_ISP_HIST_OFFS_SET_V12(arg->meas_window.h_offs,
- arg->meas_window.v_offs),
- RKISP1_CIF_ISP_HIST_OFFS_V12);
+ arg->meas_window.v_offs));
block_hsize = arg->meas_window.h_size / hist_wnd_num[wnd_num_idx] - 1;
block_vsize = arg->meas_window.v_size / hist_wnd_num[wnd_num_idx] - 1;
- rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_HIST_SIZE_SET_V12(block_hsize, block_vsize),
- RKISP1_CIF_ISP_HIST_SIZE_V12);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_SIZE_V12,
+ RKISP1_CIF_ISP_HIST_SIZE_SET_V12(block_hsize, block_vsize));
for (i = 0; i < hist_wnd_num[wnd_num_idx]; i++) {
for (j = 0; j < hist_wnd_num[wnd_num_idx]; j++) {
@@ -879,12 +878,12 @@ static void rkisp1_hst_config_v12(struct rkisp1_params *params,
weight15x15[4 * i + 1],
weight15x15[4 * i + 2],
weight15x15[4 * i + 3]);
- rkisp1_write(params->rkisp1, value,
- RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i, value);
}
value = RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(weight15x15[4 * i + 0], 0, 0, 0);
- rkisp1_write(params->rkisp1, value,
- RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i,
+ value);
}
static void
@@ -938,22 +937,20 @@ static void rkisp1_afm_config_v10(struct rkisp1_params *params,
RKISP1_CIF_ISP_AFM_ENA);
for (i = 0; i < num_of_win; i++) {
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_LT_A + i * 8,
RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_offs) |
- RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs),
- RKISP1_CIF_ISP_AFM_LT_A + i * 8);
- rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs));
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_RB_A + i * 8,
RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_size +
arg->afm_win[i].h_offs) |
RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_size +
- arg->afm_win[i].v_offs),
- RKISP1_CIF_ISP_AFM_RB_A + i * 8);
+ arg->afm_win[i].v_offs));
}
- rkisp1_write(params->rkisp1, arg->thres, RKISP1_CIF_ISP_AFM_THRES);
- rkisp1_write(params->rkisp1, arg->var_shift,
- RKISP1_CIF_ISP_AFM_VAR_SHIFT);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_THRES, arg->thres);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_VAR_SHIFT,
+ arg->var_shift);
/* restore afm status */
- rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL, afm_ctrl);
}
static void rkisp1_afm_config_v12(struct rkisp1_params *params,
@@ -970,29 +967,26 @@ static void rkisp1_afm_config_v12(struct rkisp1_params *params,
RKISP1_CIF_ISP_AFM_ENA);
for (i = 0; i < num_of_win; i++) {
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_LT_A + i * 8,
RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_offs) |
- RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs),
- RKISP1_CIF_ISP_AFM_LT_A + i * 8);
- rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs));
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_RB_A + i * 8,
RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_size +
arg->afm_win[i].h_offs) |
RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_size +
- arg->afm_win[i].v_offs),
- RKISP1_CIF_ISP_AFM_RB_A + i * 8);
+ arg->afm_win[i].v_offs));
}
- rkisp1_write(params->rkisp1, arg->thres, RKISP1_CIF_ISP_AFM_THRES);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_THRES, arg->thres);
lum_var_shift = RKISP1_CIF_ISP_AFM_GET_LUM_SHIFT_a_V12(arg->var_shift);
afm_var_shift = RKISP1_CIF_ISP_AFM_GET_AFM_SHIFT_a_V12(arg->var_shift);
- rkisp1_write(params->rkisp1,
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_VAR_SHIFT,
RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(lum_var_shift, afm_var_shift) |
RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(lum_var_shift, afm_var_shift) |
- RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(lum_var_shift, afm_var_shift),
- RKISP1_CIF_ISP_AFM_VAR_SHIFT);
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(lum_var_shift, afm_var_shift));
/* restore afm status */
- rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL, afm_ctrl);
}
static void rkisp1_ie_config(struct rkisp1_params *params,
@@ -1011,8 +1005,8 @@ static void rkisp1_ie_config(struct rkisp1_params *params,
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA;
break;
case V4L2_COLORFX_SET_CBCR:
- rkisp1_write(params->rkisp1, arg->eff_tint,
- RKISP1_CIF_IMG_EFF_TINT);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_TINT,
+ arg->eff_tint);
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA;
break;
/*
@@ -1021,26 +1015,26 @@ static void rkisp1_ie_config(struct rkisp1_params *params,
*/
case V4L2_COLORFX_AQUA:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL;
- rkisp1_write(params->rkisp1, arg->color_sel,
- RKISP1_CIF_IMG_EFF_COLOR_SEL);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_COLOR_SEL,
+ arg->color_sel);
break;
case V4L2_COLORFX_EMBOSS:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS;
- rkisp1_write(params->rkisp1, arg->eff_mat_1,
- RKISP1_CIF_IMG_EFF_MAT_1);
- rkisp1_write(params->rkisp1, arg->eff_mat_2,
- RKISP1_CIF_IMG_EFF_MAT_2);
- rkisp1_write(params->rkisp1, arg->eff_mat_3,
- RKISP1_CIF_IMG_EFF_MAT_3);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_1,
+ arg->eff_mat_1);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_2,
+ arg->eff_mat_2);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_3,
+ arg->eff_mat_3);
break;
case V4L2_COLORFX_SKETCH:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH;
- rkisp1_write(params->rkisp1, arg->eff_mat_3,
- RKISP1_CIF_IMG_EFF_MAT_3);
- rkisp1_write(params->rkisp1, arg->eff_mat_4,
- RKISP1_CIF_IMG_EFF_MAT_4);
- rkisp1_write(params->rkisp1, arg->eff_mat_5,
- RKISP1_CIF_IMG_EFF_MAT_5);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_3,
+ arg->eff_mat_3);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_4,
+ arg->eff_mat_4);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_5,
+ arg->eff_mat_5);
break;
case V4L2_COLORFX_BW:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE;
@@ -1052,23 +1046,23 @@ static void rkisp1_ie_config(struct rkisp1_params *params,
break;
}
- rkisp1_write(params->rkisp1, eff_ctrl, RKISP1_CIF_IMG_EFF_CTRL);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL, eff_ctrl);
}
static void rkisp1_ie_enable(struct rkisp1_params *params, bool en)
{
if (en) {
- rkisp1_param_set_bits(params, RKISP1_CIF_ICCL,
- RKISP1_CIF_ICCL_IE_CLK);
- rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL_ENABLE,
- RKISP1_CIF_IMG_EFF_CTRL);
+ rkisp1_param_set_bits(params, RKISP1_CIF_VI_ICCL,
+ RKISP1_CIF_VI_ICCL_IE_CLK);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL,
+ RKISP1_CIF_IMG_EFF_CTRL_ENABLE);
rkisp1_param_set_bits(params, RKISP1_CIF_IMG_EFF_CTRL,
RKISP1_CIF_IMG_EFF_CTRL_CFG_UPD);
} else {
rkisp1_param_clear_bits(params, RKISP1_CIF_IMG_EFF_CTRL,
RKISP1_CIF_IMG_EFF_CTRL_ENABLE);
- rkisp1_param_clear_bits(params, RKISP1_CIF_ICCL,
- RKISP1_CIF_ICCL_IE_CLK);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_VI_ICCL,
+ RKISP1_CIF_VI_ICCL_IE_CLK);
}
}
@@ -1088,16 +1082,18 @@ static void rkisp1_csm_config(struct rkisp1_params *params, bool full_range)
if (full_range) {
for (i = 0; i < ARRAY_SIZE(full_range_coeff); i++)
- rkisp1_write(params->rkisp1, full_range_coeff[i],
- RKISP1_CIF_ISP_CC_COEFF_0 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_CC_COEFF_0 + i * 4,
+ full_range_coeff[i]);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA);
} else {
for (i = 0; i < ARRAY_SIZE(limited_range_coeff); i++)
- rkisp1_write(params->rkisp1, limited_range_coeff[i],
- RKISP1_CIF_ISP_CC_COEFF_0 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_CC_COEFF_0 + i * 4,
+ limited_range_coeff[i]);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
@@ -1152,52 +1148,53 @@ static void rkisp1_dpf_config(struct rkisp1_params *params,
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPF_MODE,
isp_dpf_mode);
- rkisp1_write(params->rkisp1, arg->gain.nf_b_gain,
- RKISP1_CIF_ISP_DPF_NF_GAIN_B);
- rkisp1_write(params->rkisp1, arg->gain.nf_r_gain,
- RKISP1_CIF_ISP_DPF_NF_GAIN_R);
- rkisp1_write(params->rkisp1, arg->gain.nf_gb_gain,
- RKISP1_CIF_ISP_DPF_NF_GAIN_GB);
- rkisp1_write(params->rkisp1, arg->gain.nf_gr_gain,
- RKISP1_CIF_ISP_DPF_NF_GAIN_GR);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_NF_GAIN_B,
+ arg->gain.nf_b_gain);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_NF_GAIN_R,
+ arg->gain.nf_r_gain);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_NF_GAIN_GB,
+ arg->gain.nf_gb_gain);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_NF_GAIN_GR,
+ arg->gain.nf_gr_gain);
for (i = 0; i < RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS; i++) {
- rkisp1_write(params->rkisp1, arg->nll.coeff[i],
- RKISP1_CIF_ISP_DPF_NULL_COEFF_0 + i * 4);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_DPF_NULL_COEFF_0 + i * 4,
+ arg->nll.coeff[i]);
}
spatial_coeff = arg->g_flt.spatial_coeff[0] |
(arg->g_flt.spatial_coeff[1] << 8) |
(arg->g_flt.spatial_coeff[2] << 16) |
(arg->g_flt.spatial_coeff[3] << 24);
- rkisp1_write(params->rkisp1, spatial_coeff,
- RKISP1_CIF_ISP_DPF_S_WEIGHT_G_1_4);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_S_WEIGHT_G_1_4,
+ spatial_coeff);
spatial_coeff = arg->g_flt.spatial_coeff[4] |
(arg->g_flt.spatial_coeff[5] << 8);
- rkisp1_write(params->rkisp1, spatial_coeff,
- RKISP1_CIF_ISP_DPF_S_WEIGHT_G_5_6);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_S_WEIGHT_G_5_6,
+ spatial_coeff);
spatial_coeff = arg->rb_flt.spatial_coeff[0] |
(arg->rb_flt.spatial_coeff[1] << 8) |
(arg->rb_flt.spatial_coeff[2] << 16) |
(arg->rb_flt.spatial_coeff[3] << 24);
- rkisp1_write(params->rkisp1, spatial_coeff,
- RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_1_4);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_1_4,
+ spatial_coeff);
spatial_coeff = arg->rb_flt.spatial_coeff[4] |
(arg->rb_flt.spatial_coeff[5] << 8);
- rkisp1_write(params->rkisp1, spatial_coeff,
- RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6,
+ spatial_coeff);
}
static void
rkisp1_dpf_strength_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_dpf_strength_config *arg)
{
- rkisp1_write(params->rkisp1, arg->b, RKISP1_CIF_ISP_DPF_STRENGTH_B);
- rkisp1_write(params->rkisp1, arg->g, RKISP1_CIF_ISP_DPF_STRENGTH_G);
- rkisp1_write(params->rkisp1, arg->r, RKISP1_CIF_ISP_DPF_STRENGTH_R);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_STRENGTH_B, arg->b);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_STRENGTH_G, arg->g);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_STRENGTH_R, arg->r);
}
static void
@@ -1804,7 +1801,7 @@ static void rkisp1_init_params(struct rkisp1_params *params)
params->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_params_cfg);
- if (params->rkisp1->media_dev.hw_revision == RKISP1_V12)
+ if (params->rkisp1->info->isp_ver == RKISP1_V12)
params->ops = &rkisp1_v12_params_ops;
else
params->ops = &rkisp1_v10_params_ops;
@@ -1844,16 +1841,20 @@ int rkisp1_params_register(struct rkisp1_device *rkisp1)
node->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
if (ret)
- return ret;
+ goto error;
+
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(rkisp1->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
- goto err_cleanup_media_entity;
+ goto error;
}
+
return 0;
-err_cleanup_media_entity:
+
+error:
media_entity_cleanup(&vdev->entity);
+ mutex_destroy(&node->vlock);
return ret;
}
@@ -1863,6 +1864,10 @@ void rkisp1_params_unregister(struct rkisp1_device *rkisp1)
struct rkisp1_vdev_node *node = &params->vnode;
struct video_device *vdev = &node->vdev;
+ if (!video_is_registered(vdev))
+ return;
+
vb2_video_unregister_device(vdev);
media_entity_cleanup(&vdev->entity);
+ mutex_destroy(&node->vlock);
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index 82f8d33d98b3..dd3e6c38be67 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -11,7 +11,7 @@
/* ISP_CTRL */
#define RKISP1_CIF_ISP_CTRL_ISP_ENABLE BIT(0)
#define RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT (0 << 1)
-#define RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU656 BIT(1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU656 (1 << 1)
#define RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601 (2 << 1)
#define RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU601 (3 << 1)
#define RKISP1_CIF_ISP_CTRL_ISP_MODE_DATA_MODE (4 << 1)
@@ -33,37 +33,37 @@
#define RKISP1_CIF_ISP_ACQ_PROP_HSYNC_LOW BIT(1)
#define RKISP1_CIF_ISP_ACQ_PROP_VSYNC_LOW BIT(2)
#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_RGGB (0 << 3)
-#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_GRBG BIT(3)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_GRBG (1 << 3)
#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_GBRG (2 << 3)
#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_BGGR (3 << 3)
#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT(pat) ((pat) << 3)
#define RKISP1_CIF_ISP_ACQ_PROP_YCBYCR (0 << 7)
-#define RKISP1_CIF_ISP_ACQ_PROP_YCRYCB BIT(7)
+#define RKISP1_CIF_ISP_ACQ_PROP_YCRYCB (1 << 7)
#define RKISP1_CIF_ISP_ACQ_PROP_CBYCRY (2 << 7)
#define RKISP1_CIF_ISP_ACQ_PROP_CRYCBY (3 << 7)
#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ALL (0 << 9)
-#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_EVEN BIT(9)
+#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_EVEN (1 << 9)
#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ODD (2 << 9)
#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_12B (0 << 12)
-#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_ZERO BIT(12)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_ZERO (1 << 12)
#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_MSB (2 << 12)
#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_ZERO (3 << 12)
#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_MSB (4 << 12)
/* VI_DPCL */
#define RKISP1_CIF_VI_DPCL_DMA_JPEG (0 << 0)
-#define RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_MI BIT(0)
+#define RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_MI (1 << 0)
#define RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_JPEG (2 << 0)
-#define RKISP1_CIF_VI_DPCL_CHAN_MODE_MP BIT(2)
+#define RKISP1_CIF_VI_DPCL_CHAN_MODE_MP (1 << 2)
#define RKISP1_CIF_VI_DPCL_CHAN_MODE_SP (2 << 2)
#define RKISP1_CIF_VI_DPCL_CHAN_MODE_MPSP (3 << 2)
#define RKISP1_CIF_VI_DPCL_DMA_SW_SPMUX (0 << 4)
-#define RKISP1_CIF_VI_DPCL_DMA_SW_SI BIT(4)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_SI (1 << 4)
#define RKISP1_CIF_VI_DPCL_DMA_SW_IE (2 << 4)
#define RKISP1_CIF_VI_DPCL_DMA_SW_JPEG (3 << 4)
#define RKISP1_CIF_VI_DPCL_DMA_SW_ISP (4 << 4)
#define RKISP1_CIF_VI_DPCL_IF_SEL_PARALLEL (0 << 8)
-#define RKISP1_CIF_VI_DPCL_IF_SEL_SMIA BIT(8)
+#define RKISP1_CIF_VI_DPCL_IF_SEL_SMIA (1 << 8)
#define RKISP1_CIF_VI_DPCL_IF_SEL_MIPI (2 << 8)
#define RKISP1_CIF_VI_DPCL_DMA_IE_MUX_DMA BIT(10)
#define RKISP1_CIF_VI_DPCL_DMA_SP_MUX_DMA BIT(11)
@@ -112,26 +112,26 @@
#define RKISP1_CIF_MI_SP_AUTOUPDATE_ENABLE BIT(14)
#define RKISP1_CIF_MI_LAST_PIXEL_SIG_ENABLE BIT(15)
#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_16 (0 << 16)
-#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_32 BIT(16)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_32 (1 << 16)
#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_64 (2 << 16)
#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_16 (0 << 18)
-#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_32 BIT(18)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_32 (1 << 18)
#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_64 (2 << 18)
#define RKISP1_CIF_MI_CTRL_INIT_BASE_EN BIT(20)
#define RKISP1_CIF_MI_CTRL_INIT_OFFSET_EN BIT(21)
#define RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8 (0 << 22)
-#define RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA BIT(22)
+#define RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA (1 << 22)
#define RKISP1_MI_CTRL_MP_WRITE_YUVINT (2 << 22)
#define RKISP1_MI_CTRL_MP_WRITE_RAW12 (2 << 22)
#define RKISP1_MI_CTRL_SP_WRITE_PLA (0 << 24)
-#define RKISP1_MI_CTRL_SP_WRITE_SPLA BIT(24)
+#define RKISP1_MI_CTRL_SP_WRITE_SPLA (1 << 24)
#define RKISP1_MI_CTRL_SP_WRITE_INT (2 << 24)
#define RKISP1_MI_CTRL_SP_INPUT_YUV400 (0 << 26)
-#define RKISP1_MI_CTRL_SP_INPUT_YUV420 BIT(26)
+#define RKISP1_MI_CTRL_SP_INPUT_YUV420 (1 << 26)
#define RKISP1_MI_CTRL_SP_INPUT_YUV422 (2 << 26)
#define RKISP1_MI_CTRL_SP_INPUT_YUV444 (3 << 26)
#define RKISP1_MI_CTRL_SP_OUTPUT_YUV400 (0 << 28)
-#define RKISP1_MI_CTRL_SP_OUTPUT_YUV420 BIT(28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_YUV420 (1 << 28)
#define RKISP1_MI_CTRL_SP_OUTPUT_YUV422 (2 << 28)
#define RKISP1_MI_CTRL_SP_OUTPUT_YUV444 (3 << 28)
#define RKISP1_MI_CTRL_SP_OUTPUT_RGB565 (4 << 28)
@@ -186,22 +186,22 @@
/* MI_DMA_CTRL */
#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_16 (0 << 0)
-#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_32 BIT(0)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_32 (1 << 0)
#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_64 (2 << 0)
#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_16 (0 << 2)
-#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_32 BIT(2)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_32 (1 << 2)
#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_64 (2 << 2)
#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_PLANAR (0 << 4)
-#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_SPLANAR BIT(4)
-#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV400 (0 << 6)
-#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV420 BIT(6)
+#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_SPLANAR (1 << 4)
#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_PACKED (2 << 4)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV400 (0 << 6)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV420 (1 << 6)
#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV422 (2 << 6)
#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV444 (3 << 6)
#define RKISP1_CIF_MI_DMA_CTRL_BYTE_SWAP BIT(8)
#define RKISP1_CIF_MI_DMA_CTRL_CONTINUOUS_ENA BIT(9)
#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_NO (0 << 12)
-#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_8BIT BIT(12)
+#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_8BIT (1 << 12)
#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_16BIT (2 << 12)
/* MI_DMA_START */
#define RKISP1_CIF_MI_DMA_START_ENABLE BIT(0)
@@ -210,7 +210,7 @@
#define RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP BIT(1)
#define RKISP1_CIF_MI_XTD_FMT_CTRL_DMA_CB_CR_SWAP BIT(2)
-/* CCL */
+/* VI_CCL */
#define RKISP1_CIF_CCL_CIF_CLK_DIS BIT(2)
/* VI_ISP_CLK_CTRL */
#define RKISP1_CIF_CLK_CTRL_ISP_RAW BIT(0)
@@ -241,32 +241,32 @@
#define RKISP1_CIF_CLK_CTRL_RSZS BIT(25)
#define RKISP1_CIF_CLK_CTRL_MIPI BIT(26)
#define RKISP1_CIF_CLK_CTRL_MARVINMI BIT(27)
-/* ICCL */
-#define RKISP1_CIF_ICCL_ISP_CLK BIT(0)
-#define RKISP1_CIF_ICCL_CP_CLK BIT(1)
-#define RKISP1_CIF_ICCL_RES_2 BIT(2)
-#define RKISP1_CIF_ICCL_MRSZ_CLK BIT(3)
-#define RKISP1_CIF_ICCL_SRSZ_CLK BIT(4)
-#define RKISP1_CIF_ICCL_JPEG_CLK BIT(5)
-#define RKISP1_CIF_ICCL_MI_CLK BIT(6)
-#define RKISP1_CIF_ICCL_RES_7 BIT(7)
-#define RKISP1_CIF_ICCL_IE_CLK BIT(8)
-#define RKISP1_CIF_ICCL_SIMP_CLK BIT(9)
-#define RKISP1_CIF_ICCL_SMIA_CLK BIT(10)
-#define RKISP1_CIF_ICCL_MIPI_CLK BIT(11)
-#define RKISP1_CIF_ICCL_DCROP_CLK BIT(12)
-/* IRCL */
-#define RKISP1_CIF_IRCL_ISP_SW_RST BIT(0)
-#define RKISP1_CIF_IRCL_CP_SW_RST BIT(1)
-#define RKISP1_CIF_IRCL_YCS_SW_RST BIT(2)
-#define RKISP1_CIF_IRCL_MRSZ_SW_RST BIT(3)
-#define RKISP1_CIF_IRCL_SRSZ_SW_RST BIT(4)
-#define RKISP1_CIF_IRCL_JPEG_SW_RST BIT(5)
-#define RKISP1_CIF_IRCL_MI_SW_RST BIT(6)
-#define RKISP1_CIF_IRCL_CIF_SW_RST BIT(7)
-#define RKISP1_CIF_IRCL_IE_SW_RST BIT(8)
-#define RKISP1_CIF_IRCL_SI_SW_RST BIT(9)
-#define RKISP1_CIF_IRCL_MIPI_SW_RST BIT(11)
+/* VI_ICCL */
+#define RKISP1_CIF_VI_ICCL_ISP_CLK BIT(0)
+#define RKISP1_CIF_VI_ICCL_CP_CLK BIT(1)
+#define RKISP1_CIF_VI_ICCL_RES_2 BIT(2)
+#define RKISP1_CIF_VI_ICCL_MRSZ_CLK BIT(3)
+#define RKISP1_CIF_VI_ICCL_SRSZ_CLK BIT(4)
+#define RKISP1_CIF_VI_ICCL_JPEG_CLK BIT(5)
+#define RKISP1_CIF_VI_ICCL_MI_CLK BIT(6)
+#define RKISP1_CIF_VI_ICCL_RES_7 BIT(7)
+#define RKISP1_CIF_VI_ICCL_IE_CLK BIT(8)
+#define RKISP1_CIF_VI_ICCL_SIMP_CLK BIT(9)
+#define RKISP1_CIF_VI_ICCL_SMIA_CLK BIT(10)
+#define RKISP1_CIF_VI_ICCL_MIPI_CLK BIT(11)
+#define RKISP1_CIF_VI_ICCL_DCROP_CLK BIT(12)
+/* VI_IRCL */
+#define RKISP1_CIF_VI_IRCL_ISP_SW_RST BIT(0)
+#define RKISP1_CIF_VI_IRCL_CP_SW_RST BIT(1)
+#define RKISP1_CIF_VI_IRCL_YCS_SW_RST BIT(2)
+#define RKISP1_CIF_VI_IRCL_MRSZ_SW_RST BIT(3)
+#define RKISP1_CIF_VI_IRCL_SRSZ_SW_RST BIT(4)
+#define RKISP1_CIF_VI_IRCL_JPEG_SW_RST BIT(5)
+#define RKISP1_CIF_VI_IRCL_MI_SW_RST BIT(6)
+#define RKISP1_CIF_VI_IRCL_CIF_SW_RST BIT(7)
+#define RKISP1_CIF_VI_IRCL_IE_SW_RST BIT(8)
+#define RKISP1_CIF_VI_IRCL_SI_SW_RST BIT(9)
+#define RKISP1_CIF_VI_IRCL_MIPI_SW_RST BIT(11)
/* C_PROC_CTR */
#define RKISP1_CIF_C_PROC_CTR_ENABLE BIT(0)
@@ -282,10 +282,10 @@
#define RKISP1_CIF_C_PROC_TONE_RESERVED 0xF000
/* DUAL_CROP_CTRL */
#define RKISP1_CIF_DUAL_CROP_MP_MODE_BYPASS (0 << 0)
-#define RKISP1_CIF_DUAL_CROP_MP_MODE_YUV BIT(0)
+#define RKISP1_CIF_DUAL_CROP_MP_MODE_YUV (1 << 0)
#define RKISP1_CIF_DUAL_CROP_MP_MODE_RAW (2 << 0)
#define RKISP1_CIF_DUAL_CROP_SP_MODE_BYPASS (0 << 2)
-#define RKISP1_CIF_DUAL_CROP_SP_MODE_YUV BIT(2)
+#define RKISP1_CIF_DUAL_CROP_SP_MODE_YUV (1 << 2)
#define RKISP1_CIF_DUAL_CROP_SP_MODE_RAW (2 << 2)
#define RKISP1_CIF_DUAL_CROP_CFG_UPD_PERMANENT BIT(4)
#define RKISP1_CIF_DUAL_CROP_CFG_UPD BIT(5)
@@ -294,7 +294,7 @@
/* IMG_EFF_CTRL */
#define RKISP1_CIF_IMG_EFF_CTRL_ENABLE BIT(0)
#define RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE (0 << 1)
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE BIT(1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE (1 << 1)
#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA (2 << 1)
#define RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL (3 << 1)
#define RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS (4 << 1)
@@ -314,7 +314,7 @@
/* IMG_EFF_COLOR_SEL */
#define RKISP1_CIF_IMG_EFF_COLOR_RGB 0
-#define RKISP1_CIF_IMG_EFF_COLOR_B BIT(0)
+#define RKISP1_CIF_IMG_EFF_COLOR_B (1 << 0)
#define RKISP1_CIF_IMG_EFF_COLOR_G (2 << 0)
#define RKISP1_CIF_IMG_EFF_COLOR_GB (3 << 0)
#define RKISP1_CIF_IMG_EFF_COLOR_R (4 << 0)
@@ -365,7 +365,7 @@
/* ISP HISTOGRAM CALCULATION : ISP_HIST_PROP */
#define RKISP1_CIF_ISP_HIST_PROP_MODE_DIS_V10 (0 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB_V10 BIT(0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB_V10 (1 << 0)
#define RKISP1_CIF_ISP_HIST_PROP_MODE_RED_V10 (2 << 0)
#define RKISP1_CIF_ISP_HIST_PROP_MODE_GREEN_V10 (3 << 0)
#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE_V10 (4 << 0)
@@ -443,6 +443,15 @@
#define RKISP1_CIF_ISP_DEMOSAIC_BYPASS BIT(10)
#define RKISP1_CIF_ISP_DEMOSAIC_TH(x) ((x) & 0xFF)
+/* ISP_FLAGS_SHD */
+#define RKISP1_CIF_ISP_FLAGS_SHD_ISP_ENABLE_SHD BIT(0)
+#define RKISP1_CIF_ISP_FLAGS_SHD_ISP_ENABLE_INFORM_SHD BIT(1)
+#define RKISP1_CIF_ISP_FLAGS_SHD_INFORM_FIELD BIT(2)
+#define RKISP1_CIF_ISP_FLAGS_SHD_S_DATA_MASK GENMASK(27, 16)
+#define RKISP1_CIF_ISP_FLAGS_SHD_S_DATA_SHIFT 16
+#define RKISP1_CIF_ISP_FLAGS_SHD_S_VSYNC BIT(30)
+#define RKISP1_CIF_ISP_FLAGS_SHD_S_HSYNC BIT(31)
+
/* AWB */
/* ISP_AWB_PROP */
#define RKISP1_CIF_ISP_AWB_YMAX_CMP_EN BIT(2)
@@ -628,7 +637,7 @@
#define RKISP1_CIF_ISP_BLS_ENA BIT(0)
#define RKISP1_CIF_ISP_BLS_MODE_MEASURED BIT(1)
#define RKISP1_CIF_ISP_BLS_MODE_FIXED 0
-#define RKISP1_CIF_ISP_BLS_WINDOW_1 BIT(2)
+#define RKISP1_CIF_ISP_BLS_WINDOW_1 (1 << 2)
#define RKISP1_CIF_ISP_BLS_WINDOW_2 (2 << 2)
/* GAMMA-IN */
@@ -676,11 +685,11 @@
/* CIF Registers */
/* =================================================================== */
#define RKISP1_CIF_CTRL_BASE 0x00000000
-#define RKISP1_CIF_CCL (RKISP1_CIF_CTRL_BASE + 0x00000000)
+#define RKISP1_CIF_VI_CCL (RKISP1_CIF_CTRL_BASE + 0x00000000)
#define RKISP1_CIF_VI_ID (RKISP1_CIF_CTRL_BASE + 0x00000008)
#define RKISP1_CIF_VI_ISP_CLK_CTRL_V12 (RKISP1_CIF_CTRL_BASE + 0x0000000C)
-#define RKISP1_CIF_ICCL (RKISP1_CIF_CTRL_BASE + 0x00000010)
-#define RKISP1_CIF_IRCL (RKISP1_CIF_CTRL_BASE + 0x00000014)
+#define RKISP1_CIF_VI_ICCL (RKISP1_CIF_CTRL_BASE + 0x00000010)
+#define RKISP1_CIF_VI_IRCL (RKISP1_CIF_CTRL_BASE + 0x00000014)
#define RKISP1_CIF_VI_DPCL (RKISP1_CIF_CTRL_BASE + 0x00000018)
#define RKISP1_CIF_IMG_EFF_BASE 0x00000200
@@ -894,52 +903,29 @@
#define RKISP1_CIF_DUAL_CROP_S_V_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000040)
#define RKISP1_CIF_MRSZ_BASE 0x00000C00
-#define RKISP1_CIF_MRSZ_CTRL (RKISP1_CIF_MRSZ_BASE + 0x00000000)
-#define RKISP1_CIF_MRSZ_SCALE_HY (RKISP1_CIF_MRSZ_BASE + 0x00000004)
-#define RKISP1_CIF_MRSZ_SCALE_HCB (RKISP1_CIF_MRSZ_BASE + 0x00000008)
-#define RKISP1_CIF_MRSZ_SCALE_HCR (RKISP1_CIF_MRSZ_BASE + 0x0000000C)
-#define RKISP1_CIF_MRSZ_SCALE_VY (RKISP1_CIF_MRSZ_BASE + 0x00000010)
-#define RKISP1_CIF_MRSZ_SCALE_VC (RKISP1_CIF_MRSZ_BASE + 0x00000014)
-#define RKISP1_CIF_MRSZ_PHASE_HY (RKISP1_CIF_MRSZ_BASE + 0x00000018)
-#define RKISP1_CIF_MRSZ_PHASE_HC (RKISP1_CIF_MRSZ_BASE + 0x0000001C)
-#define RKISP1_CIF_MRSZ_PHASE_VY (RKISP1_CIF_MRSZ_BASE + 0x00000020)
-#define RKISP1_CIF_MRSZ_PHASE_VC (RKISP1_CIF_MRSZ_BASE + 0x00000024)
-#define RKISP1_CIF_MRSZ_SCALE_LUT_ADDR (RKISP1_CIF_MRSZ_BASE + 0x00000028)
-#define RKISP1_CIF_MRSZ_SCALE_LUT (RKISP1_CIF_MRSZ_BASE + 0x0000002C)
-#define RKISP1_CIF_MRSZ_CTRL_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000030)
-#define RKISP1_CIF_MRSZ_SCALE_HY_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000034)
-#define RKISP1_CIF_MRSZ_SCALE_HCB_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000038)
-#define RKISP1_CIF_MRSZ_SCALE_HCR_SHD (RKISP1_CIF_MRSZ_BASE + 0x0000003C)
-#define RKISP1_CIF_MRSZ_SCALE_VY_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000040)
-#define RKISP1_CIF_MRSZ_SCALE_VC_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000044)
-#define RKISP1_CIF_MRSZ_PHASE_HY_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000048)
-#define RKISP1_CIF_MRSZ_PHASE_HC_SHD (RKISP1_CIF_MRSZ_BASE + 0x0000004C)
-#define RKISP1_CIF_MRSZ_PHASE_VY_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000050)
-#define RKISP1_CIF_MRSZ_PHASE_VC_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000054)
-
#define RKISP1_CIF_SRSZ_BASE 0x00001000
-#define RKISP1_CIF_SRSZ_CTRL (RKISP1_CIF_SRSZ_BASE + 0x00000000)
-#define RKISP1_CIF_SRSZ_SCALE_HY (RKISP1_CIF_SRSZ_BASE + 0x00000004)
-#define RKISP1_CIF_SRSZ_SCALE_HCB (RKISP1_CIF_SRSZ_BASE + 0x00000008)
-#define RKISP1_CIF_SRSZ_SCALE_HCR (RKISP1_CIF_SRSZ_BASE + 0x0000000C)
-#define RKISP1_CIF_SRSZ_SCALE_VY (RKISP1_CIF_SRSZ_BASE + 0x00000010)
-#define RKISP1_CIF_SRSZ_SCALE_VC (RKISP1_CIF_SRSZ_BASE + 0x00000014)
-#define RKISP1_CIF_SRSZ_PHASE_HY (RKISP1_CIF_SRSZ_BASE + 0x00000018)
-#define RKISP1_CIF_SRSZ_PHASE_HC (RKISP1_CIF_SRSZ_BASE + 0x0000001C)
-#define RKISP1_CIF_SRSZ_PHASE_VY (RKISP1_CIF_SRSZ_BASE + 0x00000020)
-#define RKISP1_CIF_SRSZ_PHASE_VC (RKISP1_CIF_SRSZ_BASE + 0x00000024)
-#define RKISP1_CIF_SRSZ_SCALE_LUT_ADDR (RKISP1_CIF_SRSZ_BASE + 0x00000028)
-#define RKISP1_CIF_SRSZ_SCALE_LUT (RKISP1_CIF_SRSZ_BASE + 0x0000002C)
-#define RKISP1_CIF_SRSZ_CTRL_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000030)
-#define RKISP1_CIF_SRSZ_SCALE_HY_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000034)
-#define RKISP1_CIF_SRSZ_SCALE_HCB_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000038)
-#define RKISP1_CIF_SRSZ_SCALE_HCR_SHD (RKISP1_CIF_SRSZ_BASE + 0x0000003C)
-#define RKISP1_CIF_SRSZ_SCALE_VY_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000040)
-#define RKISP1_CIF_SRSZ_SCALE_VC_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000044)
-#define RKISP1_CIF_SRSZ_PHASE_HY_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000048)
-#define RKISP1_CIF_SRSZ_PHASE_HC_SHD (RKISP1_CIF_SRSZ_BASE + 0x0000004C)
-#define RKISP1_CIF_SRSZ_PHASE_VY_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000050)
-#define RKISP1_CIF_SRSZ_PHASE_VC_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000054)
+#define RKISP1_CIF_RSZ_CTRL 0x0000
+#define RKISP1_CIF_RSZ_SCALE_HY 0x0004
+#define RKISP1_CIF_RSZ_SCALE_HCB 0x0008
+#define RKISP1_CIF_RSZ_SCALE_HCR 0x000C
+#define RKISP1_CIF_RSZ_SCALE_VY 0x0010
+#define RKISP1_CIF_RSZ_SCALE_VC 0x0014
+#define RKISP1_CIF_RSZ_PHASE_HY 0x0018
+#define RKISP1_CIF_RSZ_PHASE_HC 0x001C
+#define RKISP1_CIF_RSZ_PHASE_VY 0x0020
+#define RKISP1_CIF_RSZ_PHASE_VC 0x0024
+#define RKISP1_CIF_RSZ_SCALE_LUT_ADDR 0x0028
+#define RKISP1_CIF_RSZ_SCALE_LUT 0x002C
+#define RKISP1_CIF_RSZ_CTRL_SHD 0x0030
+#define RKISP1_CIF_RSZ_SCALE_HY_SHD 0x0034
+#define RKISP1_CIF_RSZ_SCALE_HCB_SHD 0x0038
+#define RKISP1_CIF_RSZ_SCALE_HCR_SHD 0x003C
+#define RKISP1_CIF_RSZ_SCALE_VY_SHD 0x0040
+#define RKISP1_CIF_RSZ_SCALE_VC_SHD 0x0044
+#define RKISP1_CIF_RSZ_PHASE_HY_SHD 0x0048
+#define RKISP1_CIF_RSZ_PHASE_HC_SHD 0x004C
+#define RKISP1_CIF_RSZ_PHASE_VY_SHD 0x0050
+#define RKISP1_CIF_RSZ_PHASE_VC_SHD 0x0054
#define RKISP1_CIF_MI_BASE 0x00001400
#define RKISP1_CIF_MI_CTRL (RKISP1_CIF_MI_BASE + 0x00000000)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
index 2070f4b06705..f4caa8f684aa 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
@@ -61,30 +61,6 @@ struct rkisp1_rsz_config {
/* registers */
struct {
u32 ctrl;
- u32 ctrl_shd;
- u32 scale_hy;
- u32 scale_hcr;
- u32 scale_hcb;
- u32 scale_vy;
- u32 scale_vc;
- u32 scale_lut;
- u32 scale_lut_addr;
- u32 scale_hy_shd;
- u32 scale_hcr_shd;
- u32 scale_hcb_shd;
- u32 scale_vy_shd;
- u32 scale_vc_shd;
- u32 phase_hy;
- u32 phase_hc;
- u32 phase_vy;
- u32 phase_vc;
- u32 phase_hy_shd;
- u32 phase_hc_shd;
- u32 phase_vy_shd;
- u32 phase_vc_shd;
- } rsz;
- struct {
- u32 ctrl;
u32 yuvmode_mask;
u32 rawmode_mask;
u32 h_offset;
@@ -101,30 +77,6 @@ static const struct rkisp1_rsz_config rkisp1_rsz_config_mp = {
.min_rsz_width = RKISP1_RSZ_SRC_MIN_WIDTH,
.min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
/* registers */
- .rsz = {
- .ctrl = RKISP1_CIF_MRSZ_CTRL,
- .scale_hy = RKISP1_CIF_MRSZ_SCALE_HY,
- .scale_hcr = RKISP1_CIF_MRSZ_SCALE_HCR,
- .scale_hcb = RKISP1_CIF_MRSZ_SCALE_HCB,
- .scale_vy = RKISP1_CIF_MRSZ_SCALE_VY,
- .scale_vc = RKISP1_CIF_MRSZ_SCALE_VC,
- .scale_lut = RKISP1_CIF_MRSZ_SCALE_LUT,
- .scale_lut_addr = RKISP1_CIF_MRSZ_SCALE_LUT_ADDR,
- .scale_hy_shd = RKISP1_CIF_MRSZ_SCALE_HY_SHD,
- .scale_hcr_shd = RKISP1_CIF_MRSZ_SCALE_HCR_SHD,
- .scale_hcb_shd = RKISP1_CIF_MRSZ_SCALE_HCB_SHD,
- .scale_vy_shd = RKISP1_CIF_MRSZ_SCALE_VY_SHD,
- .scale_vc_shd = RKISP1_CIF_MRSZ_SCALE_VC_SHD,
- .phase_hy = RKISP1_CIF_MRSZ_PHASE_HY,
- .phase_hc = RKISP1_CIF_MRSZ_PHASE_HC,
- .phase_vy = RKISP1_CIF_MRSZ_PHASE_VY,
- .phase_vc = RKISP1_CIF_MRSZ_PHASE_VC,
- .ctrl_shd = RKISP1_CIF_MRSZ_CTRL_SHD,
- .phase_hy_shd = RKISP1_CIF_MRSZ_PHASE_HY_SHD,
- .phase_hc_shd = RKISP1_CIF_MRSZ_PHASE_HC_SHD,
- .phase_vy_shd = RKISP1_CIF_MRSZ_PHASE_VY_SHD,
- .phase_vc_shd = RKISP1_CIF_MRSZ_PHASE_VC_SHD,
- },
.dual_crop = {
.ctrl = RKISP1_CIF_DUAL_CROP_CTRL,
.yuvmode_mask = RKISP1_CIF_DUAL_CROP_MP_MODE_YUV,
@@ -143,30 +95,6 @@ static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = {
.min_rsz_width = RKISP1_RSZ_SRC_MIN_WIDTH,
.min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
/* registers */
- .rsz = {
- .ctrl = RKISP1_CIF_SRSZ_CTRL,
- .scale_hy = RKISP1_CIF_SRSZ_SCALE_HY,
- .scale_hcr = RKISP1_CIF_SRSZ_SCALE_HCR,
- .scale_hcb = RKISP1_CIF_SRSZ_SCALE_HCB,
- .scale_vy = RKISP1_CIF_SRSZ_SCALE_VY,
- .scale_vc = RKISP1_CIF_SRSZ_SCALE_VC,
- .scale_lut = RKISP1_CIF_SRSZ_SCALE_LUT,
- .scale_lut_addr = RKISP1_CIF_SRSZ_SCALE_LUT_ADDR,
- .scale_hy_shd = RKISP1_CIF_SRSZ_SCALE_HY_SHD,
- .scale_hcr_shd = RKISP1_CIF_SRSZ_SCALE_HCR_SHD,
- .scale_hcb_shd = RKISP1_CIF_SRSZ_SCALE_HCB_SHD,
- .scale_vy_shd = RKISP1_CIF_SRSZ_SCALE_VY_SHD,
- .scale_vc_shd = RKISP1_CIF_SRSZ_SCALE_VC_SHD,
- .phase_hy = RKISP1_CIF_SRSZ_PHASE_HY,
- .phase_hc = RKISP1_CIF_SRSZ_PHASE_HC,
- .phase_vy = RKISP1_CIF_SRSZ_PHASE_VY,
- .phase_vc = RKISP1_CIF_SRSZ_PHASE_VC,
- .ctrl_shd = RKISP1_CIF_SRSZ_CTRL_SHD,
- .phase_hy_shd = RKISP1_CIF_SRSZ_PHASE_HY_SHD,
- .phase_hc_shd = RKISP1_CIF_SRSZ_PHASE_HC_SHD,
- .phase_vy_shd = RKISP1_CIF_SRSZ_PHASE_VY_SHD,
- .phase_vc_shd = RKISP1_CIF_SRSZ_PHASE_VC_SHD,
- },
.dual_crop = {
.ctrl = RKISP1_CIF_DUAL_CROP_CTRL,
.yuvmode_mask = RKISP1_CIF_DUAL_CROP_SP_MODE_YUV,
@@ -178,6 +106,17 @@ static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = {
},
};
+static inline u32 rkisp1_rsz_read(struct rkisp1_resizer *rsz, u32 offset)
+{
+ return rkisp1_read(rsz->rkisp1, rsz->regs_base + offset);
+}
+
+static inline void rkisp1_rsz_write(struct rkisp1_resizer *rsz, u32 offset,
+ u32 value)
+{
+ rkisp1_write(rsz->rkisp1, rsz->regs_base + offset, value);
+}
+
static struct v4l2_mbus_framefmt *
rkisp1_rsz_get_pad_fmt(struct rkisp1_resizer *rsz,
struct v4l2_subdev_state *sd_state,
@@ -222,7 +161,7 @@ static void rkisp1_dcrop_disable(struct rkisp1_resizer *rsz,
dc_ctrl |= RKISP1_CIF_DUAL_CROP_GEN_CFG_UPD;
else
dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
- rkisp1_write(rsz->rkisp1, dc_ctrl, rsz->config->dual_crop.ctrl);
+ rkisp1_write(rsz->rkisp1, rsz->config->dual_crop.ctrl, dc_ctrl);
}
/* configure dual-crop unit */
@@ -247,13 +186,13 @@ static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz)
}
dc_ctrl = rkisp1_read(rkisp1, rsz->config->dual_crop.ctrl);
- rkisp1_write(rkisp1, sink_crop->left, rsz->config->dual_crop.h_offset);
- rkisp1_write(rkisp1, sink_crop->top, rsz->config->dual_crop.v_offset);
- rkisp1_write(rkisp1, sink_crop->width, rsz->config->dual_crop.h_size);
- rkisp1_write(rkisp1, sink_crop->height, rsz->config->dual_crop.v_size);
+ rkisp1_write(rkisp1, rsz->config->dual_crop.h_offset, sink_crop->left);
+ rkisp1_write(rkisp1, rsz->config->dual_crop.v_offset, sink_crop->top);
+ rkisp1_write(rkisp1, rsz->config->dual_crop.h_size, sink_crop->width);
+ rkisp1_write(rkisp1, rsz->config->dual_crop.v_size, sink_crop->height);
dc_ctrl |= rsz->config->dual_crop.yuvmode_mask;
dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
- rkisp1_write(rkisp1, dc_ctrl, rsz->config->dual_crop.ctrl);
+ rkisp1_write(rkisp1, rsz->config->dual_crop.ctrl, dc_ctrl);
dev_dbg(rkisp1->dev, "stream %d crop: %dx%d -> %dx%d\n", rsz->id,
sink_fmt->width, sink_fmt->height,
@@ -264,52 +203,17 @@ static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz)
* Resizer hw configs
*/
-static void rkisp1_rsz_dump_regs(struct rkisp1_resizer *rsz)
-{
- dev_dbg(rsz->rkisp1->dev,
- "RSZ_CTRL 0x%08x/0x%08x\n"
- "RSZ_SCALE_HY %d/%d\n"
- "RSZ_SCALE_HCB %d/%d\n"
- "RSZ_SCALE_HCR %d/%d\n"
- "RSZ_SCALE_VY %d/%d\n"
- "RSZ_SCALE_VC %d/%d\n"
- "RSZ_PHASE_HY %d/%d\n"
- "RSZ_PHASE_HC %d/%d\n"
- "RSZ_PHASE_VY %d/%d\n"
- "RSZ_PHASE_VC %d/%d\n",
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl_shd),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hy),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hy_shd),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcb),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcb_shd),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcr),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcr_shd),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vy),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vy_shd),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vc),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vc_shd),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hy),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hy_shd),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hc),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hc_shd),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vy),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vy_shd),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vc),
- rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vc_shd));
-}
-
static void rkisp1_rsz_update_shadow(struct rkisp1_resizer *rsz,
enum rkisp1_shadow_regs_when when)
{
- u32 ctrl_cfg = rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl);
+ u32 ctrl_cfg = rkisp1_rsz_read(rsz, RKISP1_CIF_RSZ_CTRL);
if (when == RKISP1_SHADOW_REGS_ASYNC)
ctrl_cfg |= RKISP1_CIF_RSZ_CTRL_CFG_UPD_AUTO;
else
ctrl_cfg |= RKISP1_CIF_RSZ_CTRL_CFG_UPD;
- rkisp1_write(rsz->rkisp1, ctrl_cfg, rsz->config->rsz.ctrl);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_CTRL, ctrl_cfg);
}
static u32 rkisp1_rsz_calc_ratio(u32 len_sink, u32 len_src)
@@ -325,7 +229,7 @@ static u32 rkisp1_rsz_calc_ratio(u32 len_sink, u32 len_src)
static void rkisp1_rsz_disable(struct rkisp1_resizer *rsz,
enum rkisp1_shadow_regs_when when)
{
- rkisp1_write(rsz->rkisp1, 0, rsz->config->rsz.ctrl);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_CTRL, 0);
if (when == RKISP1_SHADOW_REGS_SYNC)
rkisp1_rsz_update_shadow(rsz, when);
@@ -338,20 +242,19 @@ static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
struct v4l2_rect *src_c,
enum rkisp1_shadow_regs_when when)
{
- struct rkisp1_device *rkisp1 = rsz->rkisp1;
u32 ratio, rsz_ctrl = 0;
unsigned int i;
/* No phase offset */
- rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_hy);
- rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_hc);
- rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_vy);
- rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_vc);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_PHASE_HY, 0);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_PHASE_HC, 0);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_PHASE_VY, 0);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_PHASE_VC, 0);
/* Linear interpolation */
for (i = 0; i < 64; i++) {
- rkisp1_write(rkisp1, i, rsz->config->rsz.scale_lut_addr);
- rkisp1_write(rkisp1, i, rsz->config->rsz.scale_lut);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_LUT_ADDR, i);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_LUT, i);
}
if (sink_y->width != src_y->width) {
@@ -359,7 +262,7 @@ static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
if (sink_y->width < src_y->width)
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HY_UP;
ratio = rkisp1_rsz_calc_ratio(sink_y->width, src_y->width);
- rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hy);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_HY, ratio);
}
if (sink_c->width != src_c->width) {
@@ -367,8 +270,8 @@ static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
if (sink_c->width < src_c->width)
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HC_UP;
ratio = rkisp1_rsz_calc_ratio(sink_c->width, src_c->width);
- rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hcb);
- rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hcr);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_HCB, ratio);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_HCR, ratio);
}
if (sink_y->height != src_y->height) {
@@ -376,7 +279,7 @@ static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
if (sink_y->height < src_y->height)
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VY_UP;
ratio = rkisp1_rsz_calc_ratio(sink_y->height, src_y->height);
- rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_vy);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_VY, ratio);
}
if (sink_c->height != src_c->height) {
@@ -384,10 +287,10 @@ static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
if (sink_c->height < src_c->height)
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VC_UP;
ratio = rkisp1_rsz_calc_ratio(sink_c->height, src_c->height);
- rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_vc);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_VC, ratio);
}
- rkisp1_write(rkisp1, rsz_ctrl, rsz->config->rsz.ctrl);
+ rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_CTRL, rsz_ctrl);
rkisp1_rsz_update_shadow(rsz, when);
}
@@ -448,8 +351,6 @@ static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
/* set values in the hw */
rkisp1_rsz_config_regs(rsz, &sink_y, &sink_c, &src_y, &src_c, when);
-
- rkisp1_rsz_dump_regs(rsz);
}
/* ----------------------------------------------------------------------------
@@ -532,14 +433,14 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
- const struct rkisp1_isp_mbus_info *sink_mbus_info;
+ const struct rkisp1_mbus_info *sink_mbus_info;
struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
which);
src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
which);
- sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ sink_mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
/* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
if (sink_mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
@@ -561,7 +462,7 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
struct v4l2_rect *r,
unsigned int which)
{
- const struct rkisp1_isp_mbus_info *mbus_info;
+ const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
@@ -572,7 +473,7 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
which);
/* Not crop for MP bayer raw data */
- mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (rsz->id == RKISP1_MAINPATH &&
mbus_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
@@ -599,7 +500,7 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
- const struct rkisp1_isp_mbus_info *mbus_info;
+ const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
@@ -615,10 +516,10 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
else
sink_fmt->code = format->code;
- mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SRC)) {
sink_fmt->code = RKISP1_DEF_FMT;
- mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
}
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
rsz->pixel_enc = mbus_info->pixel_enc;
@@ -782,8 +683,12 @@ static const struct v4l2_subdev_ops rkisp1_rsz_ops = {
static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz)
{
+ if (!rsz->rkisp1)
+ return;
+
v4l2_device_unregister_subdev(&rsz->sd);
media_entity_cleanup(&rsz->sd.entity);
+ mutex_destroy(&rsz->ops_lock);
}
static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
@@ -799,10 +704,13 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
struct v4l2_subdev *sd = &rsz->sd;
int ret;
- if (rsz->id == RKISP1_SELFPATH)
+ if (rsz->id == RKISP1_SELFPATH) {
+ rsz->regs_base = RKISP1_CIF_SRSZ_BASE;
rsz->config = &rkisp1_rsz_config_sp;
- else
+ } else {
+ rsz->regs_base = RKISP1_CIF_MRSZ_BASE;
rsz->config = &rkisp1_rsz_config_mp;
+ }
v4l2_subdev_init(sd, &rkisp1_rsz_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
@@ -821,47 +729,43 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
mutex_init(&rsz->ops_lock);
ret = media_entity_pads_init(&sd->entity, RKISP1_RSZ_PAD_MAX, pads);
if (ret)
- return ret;
+ goto error;
ret = v4l2_device_register_subdev(&rsz->rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(sd->dev, "Failed to register resizer subdev\n");
- goto err_cleanup_media_entity;
+ goto error;
}
rkisp1_rsz_init_config(sd, &state);
return 0;
-err_cleanup_media_entity:
+error:
media_entity_cleanup(&sd->entity);
-
+ mutex_destroy(&rsz->ops_lock);
return ret;
}
int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1)
{
- struct rkisp1_resizer *rsz;
- unsigned int i, j;
+ unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(rkisp1->resizer_devs); i++) {
- rsz = &rkisp1->resizer_devs[i];
+ struct rkisp1_resizer *rsz = &rkisp1->resizer_devs[i];
+
rsz->rkisp1 = rkisp1;
rsz->id = i;
+
ret = rkisp1_rsz_register(rsz);
- if (ret)
- goto err_unreg_resizer_devs;
+ if (ret) {
+ rsz->rkisp1 = NULL;
+ rkisp1_resizer_devs_unregister(rkisp1);
+ return ret;
+ }
}
return 0;
-
-err_unreg_resizer_devs:
- for (j = 0; j < i; j++) {
- rsz = &rkisp1->resizer_devs[j];
- rkisp1_rsz_unregister(rsz);
- }
-
- return ret;
}
void rkisp1_resizer_devs_unregister(struct rkisp1_device *rkisp1)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
index be5777c65bfb..2795eef91bdd 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
@@ -305,7 +305,7 @@ static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
- const struct rkisp1_isp_mbus_info *in_fmt = rkisp1->isp.sink_fmt;
+ const struct rkisp1_mbus_info *in_fmt = rkisp1->isp.sink_fmt;
struct rkisp1_cif_isp_bls_meas_val *bls_val;
bls_val = &pbuf->params.ae.bls_val;
@@ -408,7 +408,7 @@ void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris)
spin_lock(&stats->lock);
- rkisp1_write(rkisp1, RKISP1_STATS_MEAS_MASK, RKISP1_CIF_ISP_ICR);
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, RKISP1_STATS_MEAS_MASK);
isp_mis_tmp = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (isp_mis_tmp & RKISP1_STATS_MEAS_MASK)
@@ -427,7 +427,7 @@ static void rkisp1_init_stats(struct rkisp1_stats *stats)
stats->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_stat_buffer);
- if (stats->rkisp1->media_dev.hw_revision == RKISP1_V12)
+ if (stats->rkisp1->info->isp_ver == RKISP1_V12)
stats->ops = &rkisp1_v12_stats_ops;
else
stats->ops = &rkisp1_v10_stats_ops;
@@ -463,21 +463,21 @@ int rkisp1_stats_register(struct rkisp1_device *rkisp1)
node->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
if (ret)
- goto err_mutex_destroy;
+ goto error;
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(&vdev->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
- goto err_cleanup_media_entity;
+ goto error;
}
return 0;
-err_cleanup_media_entity:
+error:
media_entity_cleanup(&vdev->entity);
-err_mutex_destroy:
mutex_destroy(&node->vlock);
+ stats->rkisp1 = NULL;
return ret;
}
@@ -487,6 +487,9 @@ void rkisp1_stats_unregister(struct rkisp1_device *rkisp1)
struct rkisp1_vdev_node *node = &stats->vnode;
struct video_device *vdev = &node->vdev;
+ if (!stats->rkisp1)
+ return;
+
vb2_video_unregister_device(vdev);
media_entity_cleanup(&vdev->entity);
mutex_destroy(&node->vlock);
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
index e3559b047092..b147c645ae0b 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
@@ -339,8 +339,7 @@ static int get_plane_info(struct gsc_frame *frm, u32 addr, u32 *index, u32 *ret_
void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm)
{
- u32 f_chk_addr, f_chk_len, s_chk_addr, s_chk_len;
- f_chk_addr = f_chk_len = s_chk_addr = s_chk_len = 0;
+ u32 f_chk_addr, f_chk_len, s_chk_addr = 0, s_chk_len = 0;
f_chk_addr = frm->addr.y;
f_chk_len = frm->payload[0];
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-core.h b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
index e894e85e84a4..1ea5fa1bf3c8 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
@@ -222,7 +222,7 @@ struct gsc_m2m_device {
* @org_scaler_input_w: max pixel width when the scaler is enabled
* @org_scaler_input_h: max pixel height when the scaler is enabled
* @real_rot_dis_w: max pixel src cropped height with the rotator is off
- * @real_rot_dis_h: max pixel src croppped width with the rotator is off
+ * @real_rot_dis_h: max pixel src cropped width with the rotator is off
* @real_rot_en_w: max pixel src cropped width with the rotator is on
* @real_rot_en_h: max pixel src cropped height with the rotator is on
* @target_rot_dis_w: max pixel dst scaled width with the rotator is off
diff --git a/drivers/media/platform/samsung/exynos4-is/common.c b/drivers/media/platform/samsung/exynos4-is/common.c
index 26ee2388edfd..e41333535eac 100644
--- a/drivers/media/platform/samsung/exynos4-is/common.c
+++ b/drivers/media/platform/samsung/exynos4-is/common.c
@@ -21,7 +21,7 @@ struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity)
while (pad->flags & MEDIA_PAD_FL_SINK) {
/* source pad */
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
index 7ff4024003f4..03638c8f772d 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
@@ -737,7 +737,7 @@ static struct media_entity *fimc_pipeline_get_head(struct media_entity *me)
struct media_pad *pad = &me->pads[0];
while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) {
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad)
break;
me = pad->entity;
@@ -810,7 +810,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
return ret;
}
- pad = media_entity_remote_pad(&me->pads[sfmt.pad]);
+ pad = media_pad_remote_pad_first(&me->pads[sfmt.pad]);
if (!pad)
return -EINVAL;
me = pad->entity;
@@ -1115,7 +1115,7 @@ static int fimc_pipeline_validate(struct fimc_dev *fimc)
if (p->flags & MEDIA_PAD_FL_SINK) {
sink_pad = p;
- src_pad = media_entity_remote_pad(sink_pad);
+ src_pad = media_pad_remote_pad_first(sink_pad);
if (src_pad)
break;
}
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
index da36b48b8f9f..9dcbb9853ac0 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
@@ -116,7 +116,7 @@ enum fimc_is_error {
ERROR_COMMON_PARAMETER = 2, /* Invalid parameter */
/* setfile is not loaded before adjusting */
ERROR_COMMON_SETFILE_LOAD = 3,
- /* setfile is not Adjusted before runnng. */
+ /* setfile is not Adjusted before running. */
ERROR_COMMON_SETFILE_ADJUST = 4,
/* Index of setfile is not valid (0~MAX_SETFILE_NUM-1) */
ERROR_COMMON_SETFILE_INDEX = 5,
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
index 83688a7982f7..8f12240b0eb7 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
@@ -465,7 +465,7 @@ static int isp_video_pipeline_validate(struct fimc_isp *isp)
return -EPIPE;
/* Retrieve format at the source pad */
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
index 1a396b7cd9a9..41b0a4a5929a 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
@@ -789,7 +789,7 @@ static int fimc_pipeline_validate(struct fimc_lite *fimc)
return -EPIPE;
}
/* Retrieve format at the source pad */
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.c b/drivers/media/platform/samsung/exynos4-is/media-dev.c
index 544b54e428c9..52b43ea04030 100644
--- a/drivers/media/platform/samsung/exynos4-is/media-dev.c
+++ b/drivers/media/platform/samsung/exynos4-is/media-dev.c
@@ -81,7 +81,7 @@ static void fimc_pipeline_prepare(struct fimc_pipeline *p,
struct media_pad *spad = &me->pads[i];
if (!(spad->flags & MEDIA_PAD_FL_SINK))
continue;
- pad = media_entity_remote_pad(spad);
+ pad = media_pad_remote_pad_first(spad);
if (pad)
break;
}
diff --git a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
index 27a214936cb0..6a0d35f33e8c 100644
--- a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
@@ -124,7 +124,7 @@ static char *csi_clock_name[] = {
#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
static const char * const csis_supply_name[] = {
- "vddcore", /* CSIS Core (1.0V, 1.1V or 1.2V) suppply */
+ "vddcore", /* CSIS Core (1.0V, 1.1V or 1.2V) supply */
"vddio", /* CSIS I/O and PLL (1.8V) supply */
};
#define CSIS_NUM_SUPPLIES ARRAY_SIZE(csis_supply_name)
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
index 140854ab4dd8..c2d8f1e425d8 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
@@ -811,7 +811,7 @@ static int camif_pipeline_validate(struct camif_dev *camif)
int ret;
/* Retrieve format at the sensor subdev source pad */
- pad = media_entity_remote_pad(&camif->pads[0]);
+ pad = media_pad_remote_pad_first(&camif->pads[0]);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
return -EPIPE;
diff --git a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
index 456287186ad8..55814041b8d8 100644
--- a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
@@ -1709,7 +1709,7 @@ static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx,
w_ratio = ctx->out_q.w / r->width;
h_ratio = ctx->out_q.h / r->height;
- scale_factor = w_ratio > h_ratio ? w_ratio : h_ratio;
+ scale_factor = max(w_ratio, h_ratio);
scale_factor = clamp_val(scale_factor, 1, 8);
/* Align scale ratio to the nearest power of 2 */
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c
index 72a901e99450..187849841a28 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c
@@ -88,7 +88,6 @@ int s5p_mfc_power_on(void)
if (ret < 0) {
mfc_err("clock prepare failed for clock: %s\n",
pm->clk_names[i]);
- i++;
goto err;
}
}
@@ -98,7 +97,7 @@ int s5p_mfc_power_on(void)
return 0;
err:
- while (--i > 0)
+ while (--i >= 0)
clk_disable_unprepare(pm->clocks[i]);
pm_runtime_put(pm->device);
return ret;
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index 7bb1384e4bad..cefe6b7bfdc4 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -107,7 +107,7 @@ static void channel_swdemux_tsklet(struct tasklet_struct *t)
size,
DMA_FROM_DEVICE);
- buf = (u8 *) channel->back_buffer_aligned;
+ buf = channel->back_buffer_aligned;
dev_dbg(fei->dev,
"chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
@@ -176,7 +176,7 @@ static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
channel = fei->channel_data[stdemux->tsin_index];
- bitmap = (unsigned long *) channel->pid_buffer_aligned;
+ bitmap = channel->pid_buffer_aligned;
/* 8192 is a special PID */
if (dvbdmxfeed->pid == 8192) {
@@ -272,7 +272,7 @@ static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
channel = fei->channel_data[stdemux->tsin_index];
- bitmap = (unsigned long *) channel->pid_buffer_aligned;
+ bitmap = channel->pid_buffer_aligned;
if (dvbdmxfeed->pid == 8192) {
tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
@@ -333,8 +333,7 @@ static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
__func__, __LINE__, stdemux, channel->tsin_id);
/* turn off all PIDS in the bitmap */
- memset((void *)channel->pid_buffer_aligned
- , 0x00, PID_TABLE_SIZE);
+ memset(channel->pid_buffer_aligned, 0, PID_TABLE_SIZE);
/* manage cache so data is visible to HW */
dma_sync_single_for_device(fei->dev,
@@ -458,23 +457,19 @@ static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
init_completion(&tsin->idle_completion);
- tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
- FEI_ALIGNMENT, GFP_KERNEL);
-
+ tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE + FEI_ALIGNMENT, GFP_KERNEL);
if (!tsin->back_buffer_start) {
ret = -ENOMEM;
goto err_unmap;
}
/* Ensure backbuffer is 32byte aligned */
- tsin->back_buffer_aligned = tsin->back_buffer_start
- + FEI_ALIGNMENT;
+ tsin->back_buffer_aligned = tsin->back_buffer_start + FEI_ALIGNMENT;
- tsin->back_buffer_aligned = (void *)
- (((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
+ tsin->back_buffer_aligned = PTR_ALIGN(tsin->back_buffer_aligned, FEI_ALIGNMENT);
tsin->back_buffer_busaddr = dma_map_single(fei->dev,
- (void *)tsin->back_buffer_aligned,
+ tsin->back_buffer_aligned,
FEI_BUFFER_SIZE,
DMA_BIDIRECTIONAL);
@@ -489,8 +484,7 @@ static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
* per pid. By powers of deduction we conclude stih407 family
* is configured (at SoC design stage) for bit per pid.
*/
- tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
-
+ tsin->pid_buffer_start = kzalloc(PID_TABLE_SIZE + PID_TABLE_SIZE, GFP_KERNEL);
if (!tsin->pid_buffer_start) {
ret = -ENOMEM;
goto err_unmap;
@@ -503,11 +497,9 @@ static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
* the register.
*/
- tsin->pid_buffer_aligned = tsin->pid_buffer_start +
- PID_TABLE_SIZE;
+ tsin->pid_buffer_aligned = tsin->pid_buffer_start + PID_TABLE_SIZE;
- tsin->pid_buffer_aligned = (void *)
- (((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
+ tsin->pid_buffer_aligned = PTR_ALIGN(tsin->pid_buffer_aligned, PID_TABLE_SIZE);
tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
tsin->pid_buffer_aligned,
@@ -915,8 +907,7 @@ static int c8sectpfe_remove(struct platform_device *pdev)
if (readl(fei->io + SYS_OTHER_CLKEN))
writel(0, fei->io + SYS_OTHER_CLKEN);
- if (fei->c8sectpfeclk)
- clk_disable_unprepare(fei->c8sectpfeclk);
+ clk_disable_unprepare(fei->c8sectpfeclk);
return 0;
}
diff --git a/drivers/media/platform/st/sti/delta/delta-v4l2.c b/drivers/media/platform/st/sti/delta/delta-v4l2.c
index 420ad4d8df5d..03eaee6d15da 100644
--- a/drivers/media/platform/st/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/st/sti/delta/delta-v4l2.c
@@ -1669,14 +1669,12 @@ static int delta_open(struct file *file)
set_default_params(ctx);
/* enable ST231 clocks */
- if (delta->clk_st231)
- if (clk_prepare_enable(delta->clk_st231))
- dev_warn(delta->dev, "failed to enable st231 clk\n");
+ if (clk_prepare_enable(delta->clk_st231))
+ dev_warn(delta->dev, "failed to enable st231 clk\n");
/* enable FLASH_PROMIP clock */
- if (delta->clk_flash_promip)
- if (clk_prepare_enable(delta->clk_flash_promip))
- dev_warn(delta->dev, "failed to enable delta promip clk\n");
+ if (clk_prepare_enable(delta->clk_flash_promip))
+ dev_warn(delta->dev, "failed to enable delta promip clk\n");
mutex_unlock(&delta->lock);
@@ -1717,12 +1715,10 @@ static int delta_release(struct file *file)
v4l2_fh_exit(&ctx->fh);
/* disable ST231 clocks */
- if (delta->clk_st231)
- clk_disable_unprepare(delta->clk_st231);
+ clk_disable_unprepare(delta->clk_st231);
/* disable FLASH_PROMIP clock */
- if (delta->clk_flash_promip)
- clk_disable_unprepare(delta->clk_flash_promip);
+ clk_disable_unprepare(delta->clk_flash_promip);
dev_dbg(delta->dev, "%s decoder instance released\n", ctx->name);
@@ -1926,8 +1922,7 @@ static int delta_runtime_suspend(struct device *dev)
{
struct delta_dev *delta = dev_get_drvdata(dev);
- if (delta->clk_delta)
- clk_disable_unprepare(delta->clk_delta);
+ clk_disable_unprepare(delta->clk_delta);
return 0;
}
@@ -1936,9 +1931,8 @@ static int delta_runtime_resume(struct device *dev)
{
struct delta_dev *delta = dev_get_drvdata(dev);
- if (delta->clk_delta)
- if (clk_prepare_enable(delta->clk_delta))
- dev_warn(dev, "failed to prepare/enable delta clk\n");
+ if (clk_prepare_enable(delta->clk_delta))
+ dev_warn(dev, "failed to prepare/enable delta clk\n");
return 0;
}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index 09a743cd7004..2ca95ab2b0fe 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -611,7 +611,7 @@ static struct media_entity *dcmi_find_source(struct stm32_dcmi *dcmi)
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
@@ -622,7 +622,6 @@ static struct media_entity *dcmi_find_source(struct stm32_dcmi *dcmi)
}
static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
- struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct media_entity *entity = &dcmi->source->entity;
@@ -664,7 +663,7 @@ static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
format->format.width, format->format.height);
fmt.pad = pad->index;
- ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, NULL, &fmt);
if (ret < 0) {
dev_err(dcmi->dev, "%s: Failed to set format 0x%x %ux%u on \"%s\":%d pad (%d)\n",
__func__, format->format.code,
@@ -682,7 +681,7 @@ static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
}
/* Walk to next entity */
- sink_pad = media_entity_remote_pad(src_pad);
+ sink_pad = media_pad_remote_pad_first(src_pad);
if (!sink_pad || !is_media_entity_v4l2_subdev(sink_pad->entity))
break;
@@ -706,7 +705,7 @@ static int dcmi_pipeline_s_stream(struct stm32_dcmi *dcmi, int state)
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
@@ -999,10 +998,6 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
const struct dcmi_format *sd_fmt;
struct dcmi_framesize sd_fsize;
struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_subdev_pad_config pad_cfg;
- struct v4l2_subdev_state pad_state = {
- .pads = &pad_cfg
- };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1037,8 +1032,7 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
}
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
- ret = v4l2_subdev_call(dcmi->source, pad, set_fmt,
- &pad_state, &format);
+ ret = v4l2_subdev_call_state_try(dcmi->source, pad, set_fmt, &format);
if (ret < 0)
return ret;
@@ -1115,7 +1109,7 @@ static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
mf->width = sd_framesize.width;
mf->height = sd_framesize.height;
- ret = dcmi_pipeline_s_fmt(dcmi, NULL, &format);
+ ret = dcmi_pipeline_s_fmt(dcmi, &format);
if (ret < 0)
return ret;
@@ -1187,10 +1181,6 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
- struct v4l2_subdev_pad_config pad_cfg;
- struct v4l2_subdev_state pad_state = {
- .pads = &pad_cfg
- };
int ret;
sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
@@ -1203,8 +1193,7 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
}
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
- ret = v4l2_subdev_call(dcmi->source, pad, set_fmt,
- &pad_state, &format);
+ ret = v4l2_subdev_call_state_try(dcmi->source, pad, set_fmt, &format);
if (ret < 0)
return ret;
@@ -1592,26 +1581,32 @@ static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi)
return 0;
}
-/*
- * FIXME: For the time being we only support subdevices
- * which expose RGB & YUV "parallel form" mbus code (_2X8).
- * Nevertheless, this allows to support serial source subdevices
- * and serial to parallel bridges which conform to this.
- */
static const struct dcmi_format dcmi_formats[] = {
{
.fourcc = V4L2_PIX_FMT_RGB565,
.mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.bpp = 2,
}, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_1X16,
+ .bpp = 2,
+ }, {
.fourcc = V4L2_PIX_FMT_YUYV,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
}, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .bpp = 2,
+ }, {
.fourcc = V4L2_PIX_FMT_UYVY,
.mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.bpp = 2,
}, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .bpp = 2,
+ }, {
.fourcc = V4L2_PIX_FMT_JPEG,
.mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
.bpp = 1,
@@ -1631,6 +1626,54 @@ static const struct dcmi_format dcmi_formats[] = {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.bpp = 1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR14,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG14,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG14,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB14,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .bpp = 2,
},
};
diff --git a/drivers/media/platform/sunxi/Kconfig b/drivers/media/platform/sunxi/Kconfig
index 46b7b9bf989c..2dd15083a1d9 100644
--- a/drivers/media/platform/sunxi/Kconfig
+++ b/drivers/media/platform/sunxi/Kconfig
@@ -4,5 +4,7 @@ comment "Sunxi media platform drivers"
source "drivers/media/platform/sunxi/sun4i-csi/Kconfig"
source "drivers/media/platform/sunxi/sun6i-csi/Kconfig"
+source "drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig"
+source "drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig"
source "drivers/media/platform/sunxi/sun8i-di/Kconfig"
source "drivers/media/platform/sunxi/sun8i-rotate/Kconfig"
diff --git a/drivers/media/platform/sunxi/Makefile b/drivers/media/platform/sunxi/Makefile
index fc537c9f5ca9..9aa01cb01883 100644
--- a/drivers/media/platform/sunxi/Makefile
+++ b/drivers/media/platform/sunxi/Makefile
@@ -2,5 +2,7 @@
obj-y += sun4i-csi/
obj-y += sun6i-csi/
+obj-y += sun6i-mipi-csi2/
+obj-y += sun8i-a83t-mipi-csi2/
obj-y += sun8i-di/
obj-y += sun8i-rotate/
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 682c26536034..1d46e113d01d 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -77,7 +77,7 @@ sun6i_video_remote_subdev(struct sun6i_video *video, u32 *pad)
{
struct media_pad *remote;
- remote = media_entity_remote_pad(&video->pad);
+ remote = media_pad_remote_pad_first(&video->pad);
if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
return NULL;
@@ -560,7 +560,7 @@ static int sun6i_video_link_validate(struct media_link *link)
video->mbus_code = 0;
- if (!media_entity_remote_pad(link->sink->entity->pads)) {
+ if (!media_pad_remote_pad_first(link->sink->entity->pads)) {
dev_info(video->csi->dev,
"video node %s pad not connected\n", vdev->name);
return -ENOLINK;
diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig
new file mode 100644
index 000000000000..eb982466abd3
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_SUN6I_MIPI_CSI2
+ tristate "Allwinner A31 MIPI CSI-2 Controller Driver"
+ depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on PM && COMMON_CLK
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ select PHY_SUN6I_MIPI_DPHY
+ select GENERIC_PHY_MIPI_DPHY
+ select REGMAP_MMIO
+ help
+ Support for the Allwinner A31 MIPI CSI-2 controller, also found on
+ other platforms such as the V3/V3s.
diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/Makefile b/drivers/media/platform/sunxi/sun6i-mipi-csi2/Makefile
new file mode 100644
index 000000000000..14e4e03818b5
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+sun6i-mipi-csi2-y += sun6i_mipi_csi2.o
+
+obj-$(CONFIG_VIDEO_SUN6I_MIPI_CSI2) += sun6i-mipi-csi2.o
diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
new file mode 100644
index 000000000000..a4e3f9a6b2ff
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <media/mipi-csi2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#include "sun6i_mipi_csi2.h"
+#include "sun6i_mipi_csi2_reg.h"
+
+/* Format */
+
+static const struct sun6i_mipi_csi2_format sun6i_mipi_csi2_formats[] = {
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ },
+};
+
+static const struct sun6i_mipi_csi2_format *
+sun6i_mipi_csi2_format_find(u32 mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun6i_mipi_csi2_formats); i++)
+ if (sun6i_mipi_csi2_formats[i].mbus_code == mbus_code)
+ return &sun6i_mipi_csi2_formats[i];
+
+ return NULL;
+}
+
+/* Controller */
+
+static void sun6i_mipi_csi2_enable(struct sun6i_mipi_csi2_device *csi2_dev)
+{
+ struct regmap *regmap = csi2_dev->regmap;
+
+ regmap_update_bits(regmap, SUN6I_MIPI_CSI2_CTL_REG,
+ SUN6I_MIPI_CSI2_CTL_EN, SUN6I_MIPI_CSI2_CTL_EN);
+}
+
+static void sun6i_mipi_csi2_disable(struct sun6i_mipi_csi2_device *csi2_dev)
+{
+ struct regmap *regmap = csi2_dev->regmap;
+
+ regmap_update_bits(regmap, SUN6I_MIPI_CSI2_CTL_REG,
+ SUN6I_MIPI_CSI2_CTL_EN, 0);
+}
+
+static void sun6i_mipi_csi2_configure(struct sun6i_mipi_csi2_device *csi2_dev)
+{
+ struct regmap *regmap = csi2_dev->regmap;
+ unsigned int lanes_count =
+ csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
+ struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
+ const struct sun6i_mipi_csi2_format *format;
+ struct device *dev = csi2_dev->dev;
+ u32 version = 0;
+
+ format = sun6i_mipi_csi2_format_find(mbus_format->code);
+ if (WARN_ON(!format))
+ return;
+
+ /*
+ * The enable flow in the Allwinner BSP is a bit different: the enable
+ * and reset bits are set together before starting the CSI controller.
+ *
+ * In mainline we enable the CSI controller first (due to subdev logic).
+ * One reliable way to make this work is to deassert reset, configure
+ * registers and enable the controller when everything's ready.
+ *
+ * However, setting the version enable bit and removing it afterwards
+ * appears necessary for capture to work reliably, while replacing it
+ * with a delay doesn't do the trick.
+ */
+ regmap_write(regmap, SUN6I_MIPI_CSI2_CTL_REG,
+ SUN6I_MIPI_CSI2_CTL_RESET_N |
+ SUN6I_MIPI_CSI2_CTL_VERSION_EN |
+ SUN6I_MIPI_CSI2_CTL_UNPK_EN);
+
+ regmap_read(regmap, SUN6I_MIPI_CSI2_VERSION_REG, &version);
+
+ regmap_update_bits(regmap, SUN6I_MIPI_CSI2_CTL_REG,
+ SUN6I_MIPI_CSI2_CTL_VERSION_EN, 0);
+
+ dev_dbg(dev, "A31 MIPI CSI-2 version: %04x\n", version);
+
+ regmap_write(regmap, SUN6I_MIPI_CSI2_CFG_REG,
+ SUN6I_MIPI_CSI2_CFG_CHANNEL_MODE(1) |
+ SUN6I_MIPI_CSI2_CFG_LANE_COUNT(lanes_count));
+
+ /*
+ * Only a single virtual channel (index 0) is currently supported.
+ * While the registers do mention multiple physical channels being
+ * available (which can be configured to match a specific virtual
+ * channel or data type), it's unclear whether channels > 0 are actually
+ * connected and available and the reference source code only makes use
+ * of channel 0.
+ *
+ * Using extra channels would also require matching channels to be
+ * available on the CSI (and ISP) side, which is also unsure although
+ * some CSI implementations are said to support multiple channels for
+ * BT656 time-sharing.
+ *
+ * We still configure virtual channel numbers to ensure that virtual
+ * channel 0 only goes to channel 0.
+ */
+
+ regmap_write(regmap, SUN6I_MIPI_CSI2_VCDT_RX_REG,
+ SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(3, 3) |
+ SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(2, 2) |
+ SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(1, 1) |
+ SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(0, 0) |
+ SUN6I_MIPI_CSI2_VCDT_RX_CH_DT(0, format->data_type));
+
+ regmap_write(regmap, SUN6I_MIPI_CSI2_CH_INT_PD_REG,
+ SUN6I_MIPI_CSI2_CH_INT_PD_CLEAR);
+}
+
+/* V4L2 Subdev */
+
+static int sun6i_mipi_csi2_s_stream(struct v4l2_subdev *subdev, int on)
+{
+ struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev);
+ struct v4l2_subdev *source_subdev = csi2_dev->bridge.source_subdev;
+ union phy_configure_opts dphy_opts = { 0 };
+ struct phy_configure_opts_mipi_dphy *dphy_cfg = &dphy_opts.mipi_dphy;
+ struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
+ const struct sun6i_mipi_csi2_format *format;
+ struct phy *dphy = csi2_dev->dphy;
+ struct device *dev = csi2_dev->dev;
+ struct v4l2_ctrl *ctrl;
+ unsigned int lanes_count =
+ csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
+ unsigned long pixel_rate;
+ int ret;
+
+ if (!source_subdev)
+ return -ENODEV;
+
+ if (!on) {
+ ret = v4l2_subdev_call(source_subdev, video, s_stream, 0);
+ goto disable;
+ }
+
+ /* Runtime PM */
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Sensor Pixel Rate */
+
+ ctrl = v4l2_ctrl_find(source_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl) {
+ dev_err(dev, "missing sensor pixel rate\n");
+ ret = -ENODEV;
+ goto error_pm;
+ }
+
+ pixel_rate = (unsigned long)v4l2_ctrl_g_ctrl_int64(ctrl);
+ if (!pixel_rate) {
+ dev_err(dev, "missing (zero) sensor pixel rate\n");
+ ret = -ENODEV;
+ goto error_pm;
+ }
+
+ /* D-PHY */
+
+ if (!lanes_count) {
+ dev_err(dev, "missing (zero) MIPI CSI-2 lanes count\n");
+ ret = -ENODEV;
+ goto error_pm;
+ }
+
+ format = sun6i_mipi_csi2_format_find(mbus_format->code);
+ if (WARN_ON(!format)) {
+ ret = -ENODEV;
+ goto error_pm;
+ }
+
+ phy_mipi_dphy_get_default_config(pixel_rate, format->bpp, lanes_count,
+ dphy_cfg);
+
+ /*
+ * Note that our hardware is using DDR, which is not taken in account by
+ * phy_mipi_dphy_get_default_config when calculating hs_clk_rate from
+ * the pixel rate, lanes count and bpp.
+ *
+ * The resulting clock rate is basically the symbol rate over the whole
+ * link. The actual clock rate is calculated with division by two since
+ * DDR samples both on rising and falling edges.
+ */
+
+ dev_dbg(dev, "A31 MIPI CSI-2 config:\n");
+ dev_dbg(dev, "%ld pixels/s, %u bits/pixel, %u lanes, %lu Hz clock\n",
+ pixel_rate, format->bpp, lanes_count,
+ dphy_cfg->hs_clk_rate / 2);
+
+ ret = phy_reset(dphy);
+ if (ret) {
+ dev_err(dev, "failed to reset MIPI D-PHY\n");
+ goto error_pm;
+ }
+
+ ret = phy_configure(dphy, &dphy_opts);
+ if (ret) {
+ dev_err(dev, "failed to configure MIPI D-PHY\n");
+ goto error_pm;
+ }
+
+ /* Controller */
+
+ sun6i_mipi_csi2_configure(csi2_dev);
+ sun6i_mipi_csi2_enable(csi2_dev);
+
+ /* D-PHY */
+
+ ret = phy_power_on(dphy);
+ if (ret) {
+ dev_err(dev, "failed to power on MIPI D-PHY\n");
+ goto error_pm;
+ }
+
+ /* Source */
+
+ ret = v4l2_subdev_call(source_subdev, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD)
+ goto disable;
+
+ return 0;
+
+disable:
+ if (!on)
+ ret = 0;
+ phy_power_off(dphy);
+ sun6i_mipi_csi2_disable(csi2_dev);
+
+error_pm:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops sun6i_mipi_csi2_video_ops = {
+ .s_stream = sun6i_mipi_csi2_s_stream,
+};
+
+static void
+sun6i_mipi_csi2_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
+{
+ if (!sun6i_mipi_csi2_format_find(mbus_format->code))
+ mbus_format->code = sun6i_mipi_csi2_formats[0].mbus_code;
+
+ mbus_format->field = V4L2_FIELD_NONE;
+ mbus_format->colorspace = V4L2_COLORSPACE_RAW;
+ mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int sun6i_mipi_csi2_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
+{
+ struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev);
+ unsigned int pad = SUN6I_MIPI_CSI2_PAD_SINK;
+ struct v4l2_mbus_framefmt *mbus_format =
+ v4l2_subdev_get_try_format(subdev, state, pad);
+ struct mutex *lock = &csi2_dev->bridge.lock;
+
+ mutex_lock(lock);
+
+ mbus_format->code = sun6i_mipi_csi2_formats[0].mbus_code;
+ mbus_format->width = 640;
+ mbus_format->height = 480;
+
+ sun6i_mipi_csi2_mbus_format_prepare(mbus_format);
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static int
+sun6i_mipi_csi2_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code_enum)
+{
+ if (code_enum->index >= ARRAY_SIZE(sun6i_mipi_csi2_formats))
+ return -EINVAL;
+
+ code_enum->code = sun6i_mipi_csi2_formats[code_enum->index].mbus_code;
+
+ return 0;
+}
+
+static int sun6i_mipi_csi2_get_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+ struct mutex *lock = &csi2_dev->bridge.lock;
+
+ mutex_lock(lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *mbus_format = *v4l2_subdev_get_try_format(subdev, state,
+ format->pad);
+ else
+ *mbus_format = csi2_dev->bridge.mbus_format;
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static int sun6i_mipi_csi2_set_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+ struct mutex *lock = &csi2_dev->bridge.lock;
+
+ mutex_lock(lock);
+
+ sun6i_mipi_csi2_mbus_format_prepare(mbus_format);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *v4l2_subdev_get_try_format(subdev, state, format->pad) =
+ *mbus_format;
+ else
+ csi2_dev->bridge.mbus_format = *mbus_format;
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops sun6i_mipi_csi2_pad_ops = {
+ .init_cfg = sun6i_mipi_csi2_init_cfg,
+ .enum_mbus_code = sun6i_mipi_csi2_enum_mbus_code,
+ .get_fmt = sun6i_mipi_csi2_get_fmt,
+ .set_fmt = sun6i_mipi_csi2_set_fmt,
+};
+
+static const struct v4l2_subdev_ops sun6i_mipi_csi2_subdev_ops = {
+ .video = &sun6i_mipi_csi2_video_ops,
+ .pad = &sun6i_mipi_csi2_pad_ops,
+};
+
+/* Media Entity */
+
+static const struct media_entity_operations sun6i_mipi_csi2_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* V4L2 Async */
+
+static int
+sun6i_mipi_csi2_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *remote_subdev,
+ struct v4l2_async_subdev *async_subdev)
+{
+ struct v4l2_subdev *subdev = notifier->sd;
+ struct sun6i_mipi_csi2_device *csi2_dev =
+ container_of(notifier, struct sun6i_mipi_csi2_device,
+ bridge.notifier);
+ struct media_entity *sink_entity = &subdev->entity;
+ struct media_entity *source_entity = &remote_subdev->entity;
+ struct device *dev = csi2_dev->dev;
+ int sink_pad_index = 0;
+ int source_pad_index;
+ int ret;
+
+ ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "missing source pad in external entity %s\n",
+ source_entity->name);
+ return -EINVAL;
+ }
+
+ source_pad_index = ret;
+
+ dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name,
+ source_pad_index, sink_entity->name, sink_pad_index);
+
+ ret = media_create_pad_link(source_entity, source_pad_index,
+ sink_entity, sink_pad_index,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(dev, "failed to create %s:%u -> %s:%u link\n",
+ source_entity->name, source_pad_index,
+ sink_entity->name, sink_pad_index);
+ return ret;
+ }
+
+ csi2_dev->bridge.source_subdev = remote_subdev;
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations
+sun6i_mipi_csi2_notifier_ops = {
+ .bound = sun6i_mipi_csi2_notifier_bound,
+};
+
+/* Bridge */
+
+static int
+sun6i_mipi_csi2_bridge_source_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+{
+ struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
+ struct v4l2_fwnode_endpoint *endpoint = &csi2_dev->bridge.endpoint;
+ struct v4l2_async_subdev *subdev_async;
+ struct fwnode_handle *handle;
+ struct device *dev = csi2_dev->dev;
+ int ret;
+
+ handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!handle)
+ return -ENODEV;
+
+ endpoint->bus_type = V4L2_MBUS_CSI2_DPHY;
+
+ ret = v4l2_fwnode_endpoint_parse(handle, endpoint);
+ if (ret)
+ goto complete;
+
+ subdev_async =
+ v4l2_async_nf_add_fwnode_remote(notifier, handle,
+ struct v4l2_async_subdev);
+ if (IS_ERR(subdev_async))
+ ret = PTR_ERR(subdev_async);
+
+complete:
+ fwnode_handle_put(handle);
+
+ return ret;
+}
+
+static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+{
+ struct sun6i_mipi_csi2_bridge *bridge = &csi2_dev->bridge;
+ struct v4l2_subdev *subdev = &bridge->subdev;
+ struct v4l2_async_notifier *notifier = &bridge->notifier;
+ struct media_pad *pads = bridge->pads;
+ struct device *dev = csi2_dev->dev;
+ int ret;
+
+ mutex_init(&bridge->lock);
+
+ /* V4L2 Subdev */
+
+ v4l2_subdev_init(subdev, &sun6i_mipi_csi2_subdev_ops);
+ strscpy(subdev->name, SUN6I_MIPI_CSI2_NAME, sizeof(subdev->name));
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->owner = THIS_MODULE;
+ subdev->dev = dev;
+
+ v4l2_set_subdevdata(subdev, csi2_dev);
+
+ /* Media Entity */
+
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ subdev->entity.ops = &sun6i_mipi_csi2_entity_ops;
+
+ /* Media Pads */
+
+ pads[SUN6I_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[SUN6I_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&subdev->entity, SUN6I_MIPI_CSI2_PAD_COUNT,
+ pads);
+ if (ret)
+ return ret;
+
+ /* V4L2 Async */
+
+ v4l2_async_nf_init(notifier);
+ notifier->ops = &sun6i_mipi_csi2_notifier_ops;
+
+ ret = sun6i_mipi_csi2_bridge_source_setup(csi2_dev);
+ if (ret)
+ goto error_v4l2_notifier_cleanup;
+
+ ret = v4l2_async_subdev_nf_register(subdev, notifier);
+ if (ret < 0)
+ goto error_v4l2_notifier_cleanup;
+
+ /* V4L2 Subdev */
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0)
+ goto error_v4l2_notifier_unregister;
+
+ return 0;
+
+error_v4l2_notifier_unregister:
+ v4l2_async_nf_unregister(notifier);
+
+error_v4l2_notifier_cleanup:
+ v4l2_async_nf_cleanup(notifier);
+
+ media_entity_cleanup(&subdev->entity);
+
+ return ret;
+}
+
+static void
+sun6i_mipi_csi2_bridge_cleanup(struct sun6i_mipi_csi2_device *csi2_dev)
+{
+ struct v4l2_subdev *subdev = &csi2_dev->bridge.subdev;
+ struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_async_nf_unregister(notifier);
+ v4l2_async_nf_cleanup(notifier);
+ media_entity_cleanup(&subdev->entity);
+}
+
+/* Platform */
+
+static int sun6i_mipi_csi2_suspend(struct device *dev)
+{
+ struct sun6i_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(csi2_dev->clock_mod);
+ reset_control_assert(csi2_dev->reset);
+
+ return 0;
+}
+
+static int sun6i_mipi_csi2_resume(struct device *dev)
+{
+ struct sun6i_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_deassert(csi2_dev->reset);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(csi2_dev->clock_mod);
+ if (ret) {
+ dev_err(dev, "failed to enable module clock\n");
+ goto error_reset;
+ }
+
+ return 0;
+
+error_reset:
+ reset_control_assert(csi2_dev->reset);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sun6i_mipi_csi2_pm_ops = {
+ .runtime_suspend = sun6i_mipi_csi2_suspend,
+ .runtime_resume = sun6i_mipi_csi2_resume,
+};
+
+static const struct regmap_config sun6i_mipi_csi2_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x400,
+};
+
+static int
+sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev,
+ struct platform_device *platform_dev)
+{
+ struct device *dev = csi2_dev->dev;
+ void __iomem *io_base;
+ int ret;
+
+ /* Registers */
+
+ io_base = devm_platform_ioremap_resource(platform_dev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ csi2_dev->regmap =
+ devm_regmap_init_mmio_clk(dev, "bus", io_base,
+ &sun6i_mipi_csi2_regmap_config);
+ if (IS_ERR(csi2_dev->regmap)) {
+ dev_err(dev, "failed to init register map\n");
+ return PTR_ERR(csi2_dev->regmap);
+ }
+
+ /* Clock */
+
+ csi2_dev->clock_mod = devm_clk_get(dev, "mod");
+ if (IS_ERR(csi2_dev->clock_mod)) {
+ dev_err(dev, "failed to acquire mod clock\n");
+ return PTR_ERR(csi2_dev->clock_mod);
+ }
+
+ ret = clk_set_rate_exclusive(csi2_dev->clock_mod, 297000000);
+ if (ret) {
+ dev_err(dev, "failed to set mod clock rate\n");
+ return ret;
+ }
+
+ /* Reset */
+
+ csi2_dev->reset = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(csi2_dev->reset)) {
+ dev_err(dev, "failed to get reset controller\n");
+ return PTR_ERR(csi2_dev->reset);
+ }
+
+ /* D-PHY */
+
+ csi2_dev->dphy = devm_phy_get(dev, "dphy");
+ if (IS_ERR(csi2_dev->dphy)) {
+ dev_err(dev, "failed to get MIPI D-PHY\n");
+ return PTR_ERR(csi2_dev->dphy);
+ }
+
+ ret = phy_init(csi2_dev->dphy);
+ if (ret) {
+ dev_err(dev, "failed to initialize MIPI D-PHY\n");
+ return ret;
+ }
+
+ /* Runtime PM */
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static void
+sun6i_mipi_csi2_resources_cleanup(struct sun6i_mipi_csi2_device *csi2_dev)
+{
+ pm_runtime_disable(csi2_dev->dev);
+ phy_exit(csi2_dev->dphy);
+ clk_rate_exclusive_put(csi2_dev->clock_mod);
+}
+
+static int sun6i_mipi_csi2_probe(struct platform_device *platform_dev)
+{
+ struct sun6i_mipi_csi2_device *csi2_dev;
+ struct device *dev = &platform_dev->dev;
+ int ret;
+
+ csi2_dev = devm_kzalloc(dev, sizeof(*csi2_dev), GFP_KERNEL);
+ if (!csi2_dev)
+ return -ENOMEM;
+
+ csi2_dev->dev = dev;
+ platform_set_drvdata(platform_dev, csi2_dev);
+
+ ret = sun6i_mipi_csi2_resources_setup(csi2_dev, platform_dev);
+ if (ret)
+ return ret;
+
+ ret = sun6i_mipi_csi2_bridge_setup(csi2_dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int sun6i_mipi_csi2_remove(struct platform_device *platform_dev)
+{
+ struct sun6i_mipi_csi2_device *csi2_dev =
+ platform_get_drvdata(platform_dev);
+
+ sun6i_mipi_csi2_bridge_cleanup(csi2_dev);
+ sun6i_mipi_csi2_resources_cleanup(csi2_dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_mipi_csi2_of_match[] = {
+ { .compatible = "allwinner,sun6i-a31-mipi-csi2" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun6i_mipi_csi2_of_match);
+
+static struct platform_driver sun6i_mipi_csi2_platform_driver = {
+ .probe = sun6i_mipi_csi2_probe,
+ .remove = sun6i_mipi_csi2_remove,
+ .driver = {
+ .name = SUN6I_MIPI_CSI2_NAME,
+ .of_match_table = of_match_ptr(sun6i_mipi_csi2_of_match),
+ .pm = &sun6i_mipi_csi2_pm_ops,
+ },
+};
+module_platform_driver(sun6i_mipi_csi2_platform_driver);
+
+MODULE_DESCRIPTION("Allwinner A31 MIPI CSI-2 Controller Driver");
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.h b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.h
new file mode 100644
index 000000000000..24b15e34b5e8
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN6I_MIPI_CSI2_H_
+#define _SUN6I_MIPI_CSI2_H_
+
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define SUN6I_MIPI_CSI2_NAME "sun6i-mipi-csi2"
+
+enum sun6i_mipi_csi2_pad {
+ SUN6I_MIPI_CSI2_PAD_SINK = 0,
+ SUN6I_MIPI_CSI2_PAD_SOURCE = 1,
+ SUN6I_MIPI_CSI2_PAD_COUNT = 2,
+};
+
+struct sun6i_mipi_csi2_format {
+ u32 mbus_code;
+ u8 data_type;
+ u32 bpp;
+};
+
+struct sun6i_mipi_csi2_bridge {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[SUN6I_MIPI_CSI2_PAD_COUNT];
+ struct v4l2_fwnode_endpoint endpoint;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_mbus_framefmt mbus_format;
+ struct mutex lock; /* Mbus format lock. */
+
+ struct v4l2_subdev *source_subdev;
+};
+
+struct sun6i_mipi_csi2_device {
+ struct device *dev;
+
+ struct regmap *regmap;
+ struct clk *clock_mod;
+ struct reset_control *reset;
+ struct phy *dphy;
+
+ struct sun6i_mipi_csi2_bridge bridge;
+};
+
+#endif
diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2_reg.h b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2_reg.h
new file mode 100644
index 000000000000..d9c92cf2b038
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2_reg.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN6I_MIPI_CSI2_REG_H_
+#define _SUN6I_MIPI_CSI2_REG_H_
+
+#define SUN6I_MIPI_CSI2_CTL_REG 0x0
+#define SUN6I_MIPI_CSI2_CTL_RESET_N BIT(31)
+#define SUN6I_MIPI_CSI2_CTL_VERSION_EN BIT(30)
+#define SUN6I_MIPI_CSI2_CTL_UNPK_EN BIT(1)
+#define SUN6I_MIPI_CSI2_CTL_EN BIT(0)
+
+#define SUN6I_MIPI_CSI2_CFG_REG 0x4
+#define SUN6I_MIPI_CSI2_CFG_CHANNEL_MODE(v) ((((v) - 1) << 8) & \
+ GENMASK(9, 8))
+#define SUN6I_MIPI_CSI2_CFG_LANE_COUNT(v) (((v) - 1) & GENMASK(1, 0))
+
+#define SUN6I_MIPI_CSI2_VCDT_RX_REG 0x8
+#define SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(ch, vc) (((vc) & GENMASK(1, 0)) << \
+ ((ch) * 8 + 6))
+#define SUN6I_MIPI_CSI2_VCDT_RX_CH_DT(ch, t) (((t) & GENMASK(5, 0)) << \
+ ((ch) * 8))
+#define SUN6I_MIPI_CSI2_RX_PKT_NUM_REG 0xc
+
+#define SUN6I_MIPI_CSI2_VERSION_REG 0x3c
+
+#define SUN6I_MIPI_CSI2_CH_CFG_REG 0x40
+#define SUN6I_MIPI_CSI2_CH_INT_EN_REG 0x50
+#define SUN6I_MIPI_CSI2_CH_INT_EN_EOT_ERR BIT(29)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_CHKSUM_ERR BIT(28)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_ECC_WRN BIT(27)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_ECC_ERR BIT(26)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_LINE_SYNC_ERR BIT(25)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_FRAME_SYNC_ERR BIT(24)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_EMB_DATA BIT(18)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_PF BIT(17)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_PH_UPDATE BIT(16)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_LINE_START_SYNC BIT(11)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_LINE_END_SYNC BIT(10)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_FRAME_START_SYNC BIT(9)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_FRAME_END_SYNC BIT(8)
+#define SUN6I_MIPI_CSI2_CH_INT_EN_FIFO_OVER BIT(0)
+
+#define SUN6I_MIPI_CSI2_CH_INT_PD_REG 0x58
+#define SUN6I_MIPI_CSI2_CH_INT_PD_CLEAR 0xff
+#define SUN6I_MIPI_CSI2_CH_INT_PD_EOT_ERR BIT(29)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_CHKSUM_ERR BIT(28)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_ECC_WRN BIT(27)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_ECC_ERR BIT(26)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_LINE_SYNC_ERR BIT(25)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_FRAME_SYNC_ERR BIT(24)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_EMB_DATA BIT(18)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_PF BIT(17)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_PH_UPDATE BIT(16)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_LINE_START_SYNC BIT(11)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_LINE_END_SYNC BIT(10)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_FRAME_START_SYNC BIT(9)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_FRAME_END_SYNC BIT(8)
+#define SUN6I_MIPI_CSI2_CH_INT_PD_FIFO_OVER BIT(0)
+
+#define SUN6I_MIPI_CSI2_CH_DT_TRIGGER_REG 0x60
+#define SUN6I_MIPI_CSI2_CH_CUR_PH_REG 0x70
+#define SUN6I_MIPI_CSI2_CH_ECC_REG 0x74
+#define SUN6I_MIPI_CSI2_CH_CKS_REG 0x78
+#define SUN6I_MIPI_CSI2_CH_FRAME_NUM_REG 0x7c
+#define SUN6I_MIPI_CSI2_CH_LINE_NUM_REG 0x80
+
+#define SUN6I_MIPI_CSI2_CH_OFFSET 0x100
+
+#define SUN6I_MIPI_CSI2_CH_REG(reg, ch) \
+ (SUN6I_MIPI_CSI2_CH_OFFSET * (ch) + (reg))
+
+#endif
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
new file mode 100644
index 000000000000..789d58ee12ea
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_SUN8I_A83T_MIPI_CSI2
+ tristate "Allwinner A83T MIPI CSI-2 Controller and D-PHY Driver"
+ depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on PM && COMMON_CLK
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ select REGMAP_MMIO
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Support for the Allwinner A83T MIPI CSI-2 controller and D-PHY.
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Makefile b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Makefile
new file mode 100644
index 000000000000..1427d15a879a
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+sun8i-a83t-mipi-csi2-y += sun8i_a83t_mipi_csi2.o sun8i_a83t_dphy.o
+
+obj-$(CONFIG_VIDEO_SUN8I_A83T_MIPI_CSI2) += sun8i-a83t-mipi-csi2.o
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_dphy.c b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_dphy.c
new file mode 100644
index 000000000000..24bbcc85013d
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_dphy.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+
+#include "sun8i_a83t_dphy.h"
+#include "sun8i_a83t_mipi_csi2.h"
+
+static int sun8i_a83t_dphy_configure(struct phy *dphy,
+ union phy_configure_opts *opts)
+{
+ return phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+}
+
+static int sun8i_a83t_dphy_power_on(struct phy *dphy)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev = phy_get_drvdata(dphy);
+ struct regmap *regmap = csi2_dev->regmap;
+
+ regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG,
+ SUN8I_A83T_DPHY_CTRL_RESET_N |
+ SUN8I_A83T_DPHY_CTRL_SHUTDOWN_N);
+
+ regmap_write(regmap, SUN8I_A83T_DPHY_ANA0_REG,
+ SUN8I_A83T_DPHY_ANA0_REXT_EN |
+ SUN8I_A83T_DPHY_ANA0_RINT(2) |
+ SUN8I_A83T_DPHY_ANA0_SNK(2));
+
+ return 0;
+};
+
+static int sun8i_a83t_dphy_power_off(struct phy *dphy)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev = phy_get_drvdata(dphy);
+ struct regmap *regmap = csi2_dev->regmap;
+
+ regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG, 0);
+
+ return 0;
+};
+
+static const struct phy_ops sun8i_a83t_dphy_ops = {
+ .configure = sun8i_a83t_dphy_configure,
+ .power_on = sun8i_a83t_dphy_power_on,
+ .power_off = sun8i_a83t_dphy_power_off,
+};
+
+int sun8i_a83t_dphy_register(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+{
+ struct device *dev = csi2_dev->dev;
+ struct phy_provider *phy_provider;
+
+ csi2_dev->dphy = devm_phy_create(dev, NULL, &sun8i_a83t_dphy_ops);
+ if (IS_ERR(csi2_dev->dphy)) {
+ dev_err(dev, "failed to create D-PHY\n");
+ return PTR_ERR(csi2_dev->dphy);
+ }
+
+ phy_set_drvdata(csi2_dev->dphy, csi2_dev);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "failed to register D-PHY provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_dphy.h b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_dphy.h
new file mode 100644
index 000000000000..9ab709060770
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_dphy.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 Kévin L'hôpital <kevin.lhopital@bootlin.com>
+ * Copyright 2020-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN8I_A83T_DPHY_H_
+#define _SUN8I_A83T_DPHY_H_
+
+#include "sun8i_a83t_mipi_csi2.h"
+
+#define SUN8I_A83T_DPHY_CTRL_REG 0x10
+#define SUN8I_A83T_DPHY_CTRL_INIT_VALUE 0xb8df698e
+#define SUN8I_A83T_DPHY_CTRL_RESET_N BIT(31)
+#define SUN8I_A83T_DPHY_CTRL_SHUTDOWN_N BIT(15)
+#define SUN8I_A83T_DPHY_CTRL_DEBUG BIT(8)
+#define SUN8I_A83T_DPHY_STATUS_REG 0x14
+#define SUN8I_A83T_DPHY_STATUS_CLK_STOP BIT(10)
+#define SUN8I_A83T_DPHY_STATUS_CLK_ULPS BIT(9)
+#define SUN8I_A83T_DPHY_STATUS_HSCLK BIT(8)
+#define SUN8I_A83T_DPHY_STATUS_D3_STOP BIT(7)
+#define SUN8I_A83T_DPHY_STATUS_D2_STOP BIT(6)
+#define SUN8I_A83T_DPHY_STATUS_D1_STOP BIT(5)
+#define SUN8I_A83T_DPHY_STATUS_D0_STOP BIT(4)
+#define SUN8I_A83T_DPHY_STATUS_D3_ULPS BIT(3)
+#define SUN8I_A83T_DPHY_STATUS_D2_ULPS BIT(2)
+#define SUN8I_A83T_DPHY_STATUS_D1_ULPS BIT(1)
+#define SUN8I_A83T_DPHY_STATUS_D0_ULPS BIT(0)
+
+#define SUN8I_A83T_DPHY_ANA0_REG 0x30
+#define SUN8I_A83T_DPHY_ANA0_REXT_EN BIT(31)
+#define SUN8I_A83T_DPHY_ANA0_REXT BIT(30)
+#define SUN8I_A83T_DPHY_ANA0_RINT(v) (((v) << 28) & GENMASK(29, 28))
+#define SUN8I_A83T_DPHY_ANA0_SNK(v) (((v) << 20) & GENMASK(22, 20))
+
+int sun8i_a83t_dphy_register(struct sun8i_a83t_mipi_csi2_device *csi2_dev);
+
+#endif
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
new file mode 100644
index 000000000000..d052ee77ef0a
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020 Kévin L'hôpital <kevin.lhopital@bootlin.com>
+ * Copyright 2020-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <media/mipi-csi2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#include "sun8i_a83t_dphy.h"
+#include "sun8i_a83t_mipi_csi2.h"
+#include "sun8i_a83t_mipi_csi2_reg.h"
+
+/* Format */
+
+static const struct sun8i_a83t_mipi_csi2_format
+sun8i_a83t_mipi_csi2_formats[] = {
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ },
+};
+
+static const struct sun8i_a83t_mipi_csi2_format *
+sun8i_a83t_mipi_csi2_format_find(u32 mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun8i_a83t_mipi_csi2_formats); i++)
+ if (sun8i_a83t_mipi_csi2_formats[i].mbus_code == mbus_code)
+ return &sun8i_a83t_mipi_csi2_formats[i];
+
+ return NULL;
+}
+
+/* Controller */
+
+static void
+sun8i_a83t_mipi_csi2_init(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+{
+ struct regmap *regmap = csi2_dev->regmap;
+
+ /*
+ * The Allwinner BSP sets various magic values on a bunch of registers.
+ * This is apparently a necessary initialization process that will cause
+ * the capture to fail with unsolicited interrupts hitting if skipped.
+ *
+ * Most of the registers are set to proper values later, except for the
+ * two reserved registers. They are said to hold a "hardware lock"
+ * value, without more information available.
+ */
+
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, 0);
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG,
+ SUN8I_A83T_MIPI_CSI2_CTRL_INIT_VALUE);
+
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG, 0);
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG,
+ SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_INIT_VALUE);
+
+ regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG, 0);
+ regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG,
+ SUN8I_A83T_DPHY_CTRL_INIT_VALUE);
+
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD1_REG, 0);
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD1_REG,
+ SUN8I_A83T_MIPI_CSI2_RSVD1_HW_LOCK_VALUE);
+
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD2_REG, 0);
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD2_REG,
+ SUN8I_A83T_MIPI_CSI2_RSVD2_HW_LOCK_VALUE);
+
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG, 0);
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
+ SUN8I_A83T_MIPI_CSI2_CFG_INIT_VALUE);
+}
+
+static void
+sun8i_a83t_mipi_csi2_enable(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+{
+ struct regmap *regmap = csi2_dev->regmap;
+
+ regmap_update_bits(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
+ SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN,
+ SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN);
+}
+
+static void
+sun8i_a83t_mipi_csi2_disable(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+{
+ struct regmap *regmap = csi2_dev->regmap;
+
+ regmap_update_bits(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
+ SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN, 0);
+
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, 0);
+}
+
+static void
+sun8i_a83t_mipi_csi2_configure(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+{
+ struct regmap *regmap = csi2_dev->regmap;
+ unsigned int lanes_count =
+ csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
+ struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
+ const struct sun8i_a83t_mipi_csi2_format *format;
+ struct device *dev = csi2_dev->dev;
+ u32 version = 0;
+
+ format = sun8i_a83t_mipi_csi2_format_find(mbus_format->code);
+ if (WARN_ON(!format))
+ return;
+
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG,
+ SUN8I_A83T_MIPI_CSI2_CTRL_RESET_N);
+
+ regmap_read(regmap, SUN8I_A83T_MIPI_CSI2_VERSION_REG, &version);
+
+ dev_dbg(dev, "A83T MIPI CSI-2 version: %04x\n", version);
+
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
+ SUN8I_A83T_MIPI_CSI2_CFG_UNPKT_EN |
+ SUN8I_A83T_MIPI_CSI2_CFG_SYNC_DLY_CYCLE(8) |
+ SUN8I_A83T_MIPI_CSI2_CFG_N_CHANNEL(1) |
+ SUN8I_A83T_MIPI_CSI2_CFG_N_LANE(lanes_count));
+
+ /*
+ * Only a single virtual channel (index 0) is currently supported.
+ * While the registers do mention multiple physical channels being
+ * available (which can be configured to match a specific virtual
+ * channel or data type), it's unclear whether channels > 0 are actually
+ * connected and available and the reference source code only makes use
+ * of channel 0.
+ *
+ * Using extra channels would also require matching channels to be
+ * available on the CSI (and ISP) side, which is also unsure although
+ * some CSI implementations are said to support multiple channels for
+ * BT656 time-sharing.
+ *
+ * We still configure virtual channel numbers to ensure that virtual
+ * channel 0 only goes to channel 0.
+ */
+
+ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_VCDT0_REG,
+ SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(3, 3) |
+ SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(2, 2) |
+ SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(1, 1) |
+ SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(0, 0) |
+ SUN8I_A83T_MIPI_CSI2_VCDT0_CH_DT(0, format->data_type));
+}
+
+/* V4L2 Subdev */
+
+static int sun8i_a83t_mipi_csi2_s_stream(struct v4l2_subdev *subdev, int on)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev =
+ v4l2_get_subdevdata(subdev);
+ struct v4l2_subdev *source_subdev = csi2_dev->bridge.source_subdev;
+ union phy_configure_opts dphy_opts = { 0 };
+ struct phy_configure_opts_mipi_dphy *dphy_cfg = &dphy_opts.mipi_dphy;
+ struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
+ const struct sun8i_a83t_mipi_csi2_format *format;
+ struct phy *dphy = csi2_dev->dphy;
+ struct device *dev = csi2_dev->dev;
+ struct v4l2_ctrl *ctrl;
+ unsigned int lanes_count =
+ csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
+ unsigned long pixel_rate;
+ int ret;
+
+ if (!source_subdev)
+ return -ENODEV;
+
+ if (!on) {
+ ret = v4l2_subdev_call(source_subdev, video, s_stream, 0);
+ goto disable;
+ }
+
+ /* Runtime PM */
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Sensor pixel rate */
+
+ ctrl = v4l2_ctrl_find(source_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl) {
+ dev_err(dev, "missing sensor pixel rate\n");
+ ret = -ENODEV;
+ goto error_pm;
+ }
+
+ pixel_rate = (unsigned long)v4l2_ctrl_g_ctrl_int64(ctrl);
+ if (!pixel_rate) {
+ dev_err(dev, "missing (zero) sensor pixel rate\n");
+ ret = -ENODEV;
+ goto error_pm;
+ }
+
+ /* D-PHY */
+
+ if (!lanes_count) {
+ dev_err(dev, "missing (zero) MIPI CSI-2 lanes count\n");
+ ret = -ENODEV;
+ goto error_pm;
+ }
+
+ format = sun8i_a83t_mipi_csi2_format_find(mbus_format->code);
+ if (WARN_ON(!format)) {
+ ret = -ENODEV;
+ goto error_pm;
+ }
+
+ phy_mipi_dphy_get_default_config(pixel_rate, format->bpp, lanes_count,
+ dphy_cfg);
+
+ /*
+ * Note that our hardware is using DDR, which is not taken in account by
+ * phy_mipi_dphy_get_default_config when calculating hs_clk_rate from
+ * the pixel rate, lanes count and bpp.
+ *
+ * The resulting clock rate is basically the symbol rate over the whole
+ * link. The actual clock rate is calculated with division by two since
+ * DDR samples both on rising and falling edges.
+ */
+
+ dev_dbg(dev, "A83T MIPI CSI-2 config:\n");
+ dev_dbg(dev, "%ld pixels/s, %u bits/pixel, %u lanes, %lu Hz clock\n",
+ pixel_rate, format->bpp, lanes_count,
+ dphy_cfg->hs_clk_rate / 2);
+
+ ret = phy_reset(dphy);
+ if (ret) {
+ dev_err(dev, "failed to reset MIPI D-PHY\n");
+ goto error_pm;
+ }
+
+ ret = phy_configure(dphy, &dphy_opts);
+ if (ret) {
+ dev_err(dev, "failed to configure MIPI D-PHY\n");
+ goto error_pm;
+ }
+
+ /* Controller */
+
+ sun8i_a83t_mipi_csi2_configure(csi2_dev);
+ sun8i_a83t_mipi_csi2_enable(csi2_dev);
+
+ /* D-PHY */
+
+ ret = phy_power_on(dphy);
+ if (ret) {
+ dev_err(dev, "failed to power on MIPI D-PHY\n");
+ goto error_pm;
+ }
+
+ /* Source */
+
+ ret = v4l2_subdev_call(source_subdev, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD)
+ goto disable;
+
+ return 0;
+
+disable:
+ if (!on)
+ ret = 0;
+ phy_power_off(dphy);
+ sun8i_a83t_mipi_csi2_disable(csi2_dev);
+
+error_pm:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops
+sun8i_a83t_mipi_csi2_video_ops = {
+ .s_stream = sun8i_a83t_mipi_csi2_s_stream,
+};
+
+static void
+sun8i_a83t_mipi_csi2_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
+{
+ if (!sun8i_a83t_mipi_csi2_format_find(mbus_format->code))
+ mbus_format->code = sun8i_a83t_mipi_csi2_formats[0].mbus_code;
+
+ mbus_format->field = V4L2_FIELD_NONE;
+ mbus_format->colorspace = V4L2_COLORSPACE_RAW;
+ mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int sun8i_a83t_mipi_csi2_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev =
+ v4l2_get_subdevdata(subdev);
+ unsigned int pad = SUN8I_A83T_MIPI_CSI2_PAD_SINK;
+ struct v4l2_mbus_framefmt *mbus_format =
+ v4l2_subdev_get_try_format(subdev, state, pad);
+ struct mutex *lock = &csi2_dev->bridge.lock;
+
+ mutex_lock(lock);
+
+ mbus_format->code = sun8i_a83t_mipi_csi2_formats[0].mbus_code;
+ mbus_format->width = 640;
+ mbus_format->height = 480;
+
+ sun8i_a83t_mipi_csi2_mbus_format_prepare(mbus_format);
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static int
+sun8i_a83t_mipi_csi2_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code_enum)
+{
+ if (code_enum->index >= ARRAY_SIZE(sun8i_a83t_mipi_csi2_formats))
+ return -EINVAL;
+
+ code_enum->code =
+ sun8i_a83t_mipi_csi2_formats[code_enum->index].mbus_code;
+
+ return 0;
+}
+
+static int sun8i_a83t_mipi_csi2_get_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev =
+ v4l2_get_subdevdata(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+ struct mutex *lock = &csi2_dev->bridge.lock;
+
+ mutex_lock(lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *mbus_format = *v4l2_subdev_get_try_format(subdev, state,
+ format->pad);
+ else
+ *mbus_format = csi2_dev->bridge.mbus_format;
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static int sun8i_a83t_mipi_csi2_set_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev =
+ v4l2_get_subdevdata(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+ struct mutex *lock = &csi2_dev->bridge.lock;
+
+ mutex_lock(lock);
+
+ sun8i_a83t_mipi_csi2_mbus_format_prepare(mbus_format);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *v4l2_subdev_get_try_format(subdev, state, format->pad) =
+ *mbus_format;
+ else
+ csi2_dev->bridge.mbus_format = *mbus_format;
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops sun8i_a83t_mipi_csi2_pad_ops = {
+ .init_cfg = sun8i_a83t_mipi_csi2_init_cfg,
+ .enum_mbus_code = sun8i_a83t_mipi_csi2_enum_mbus_code,
+ .get_fmt = sun8i_a83t_mipi_csi2_get_fmt,
+ .set_fmt = sun8i_a83t_mipi_csi2_set_fmt,
+};
+
+static const struct v4l2_subdev_ops sun8i_a83t_mipi_csi2_subdev_ops = {
+ .video = &sun8i_a83t_mipi_csi2_video_ops,
+ .pad = &sun8i_a83t_mipi_csi2_pad_ops,
+};
+
+/* Media Entity */
+
+static const struct media_entity_operations sun8i_a83t_mipi_csi2_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* V4L2 Async */
+
+static int
+sun8i_a83t_mipi_csi2_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *remote_subdev,
+ struct v4l2_async_subdev *async_subdev)
+{
+ struct v4l2_subdev *subdev = notifier->sd;
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev =
+ container_of(notifier, struct sun8i_a83t_mipi_csi2_device,
+ bridge.notifier);
+ struct media_entity *sink_entity = &subdev->entity;
+ struct media_entity *source_entity = &remote_subdev->entity;
+ struct device *dev = csi2_dev->dev;
+ int sink_pad_index = 0;
+ int source_pad_index;
+ int ret;
+
+ ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "missing source pad in external entity %s\n",
+ source_entity->name);
+ return -EINVAL;
+ }
+
+ source_pad_index = ret;
+
+ dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name,
+ source_pad_index, sink_entity->name, sink_pad_index);
+
+ ret = media_create_pad_link(source_entity, source_pad_index,
+ sink_entity, sink_pad_index,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(dev, "failed to create %s:%u -> %s:%u link\n",
+ source_entity->name, source_pad_index,
+ sink_entity->name, sink_pad_index);
+ return ret;
+ }
+
+ csi2_dev->bridge.source_subdev = remote_subdev;
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations
+sun8i_a83t_mipi_csi2_notifier_ops = {
+ .bound = sun8i_a83t_mipi_csi2_notifier_bound,
+};
+
+/* Bridge */
+
+static int
+sun8i_a83t_mipi_csi2_bridge_source_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+{
+ struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
+ struct v4l2_fwnode_endpoint *endpoint = &csi2_dev->bridge.endpoint;
+ struct v4l2_async_subdev *subdev_async;
+ struct fwnode_handle *handle;
+ struct device *dev = csi2_dev->dev;
+ int ret;
+
+ handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!handle)
+ return -ENODEV;
+
+ endpoint->bus_type = V4L2_MBUS_CSI2_DPHY;
+
+ ret = v4l2_fwnode_endpoint_parse(handle, endpoint);
+ if (ret)
+ goto complete;
+
+ subdev_async =
+ v4l2_async_nf_add_fwnode_remote(notifier, handle,
+ struct v4l2_async_subdev);
+ if (IS_ERR(subdev_async))
+ ret = PTR_ERR(subdev_async);
+
+complete:
+ fwnode_handle_put(handle);
+
+ return ret;
+}
+
+static int
+sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+{
+ struct sun8i_a83t_mipi_csi2_bridge *bridge = &csi2_dev->bridge;
+ struct v4l2_subdev *subdev = &bridge->subdev;
+ struct v4l2_async_notifier *notifier = &bridge->notifier;
+ struct media_pad *pads = bridge->pads;
+ struct device *dev = csi2_dev->dev;
+ int ret;
+
+ mutex_init(&bridge->lock);
+
+ /* V4L2 Subdev */
+
+ v4l2_subdev_init(subdev, &sun8i_a83t_mipi_csi2_subdev_ops);
+ strscpy(subdev->name, SUN8I_A83T_MIPI_CSI2_NAME, sizeof(subdev->name));
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->owner = THIS_MODULE;
+ subdev->dev = dev;
+
+ v4l2_set_subdevdata(subdev, csi2_dev);
+
+ /* Media Entity */
+
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ subdev->entity.ops = &sun8i_a83t_mipi_csi2_entity_ops;
+
+ /* Media Pads */
+
+ pads[SUN8I_A83T_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[SUN8I_A83T_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&subdev->entity,
+ SUN8I_A83T_MIPI_CSI2_PAD_COUNT, pads);
+ if (ret)
+ return ret;
+
+ /* V4L2 Async */
+
+ v4l2_async_nf_init(notifier);
+ notifier->ops = &sun8i_a83t_mipi_csi2_notifier_ops;
+
+ ret = sun8i_a83t_mipi_csi2_bridge_source_setup(csi2_dev);
+ if (ret)
+ goto error_v4l2_notifier_cleanup;
+
+ ret = v4l2_async_subdev_nf_register(subdev, notifier);
+ if (ret < 0)
+ goto error_v4l2_notifier_cleanup;
+
+ /* V4L2 Subdev */
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0)
+ goto error_v4l2_notifier_unregister;
+
+ return 0;
+
+error_v4l2_notifier_unregister:
+ v4l2_async_nf_unregister(notifier);
+
+error_v4l2_notifier_cleanup:
+ v4l2_async_nf_cleanup(notifier);
+
+ media_entity_cleanup(&subdev->entity);
+
+ return ret;
+}
+
+static void
+sun8i_a83t_mipi_csi2_bridge_cleanup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+{
+ struct v4l2_subdev *subdev = &csi2_dev->bridge.subdev;
+ struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_async_nf_unregister(notifier);
+ v4l2_async_nf_cleanup(notifier);
+ media_entity_cleanup(&subdev->entity);
+}
+
+/* Platform */
+
+static int sun8i_a83t_mipi_csi2_suspend(struct device *dev)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(csi2_dev->clock_misc);
+ clk_disable_unprepare(csi2_dev->clock_mipi);
+ clk_disable_unprepare(csi2_dev->clock_mod);
+ reset_control_assert(csi2_dev->reset);
+
+ return 0;
+}
+
+static int sun8i_a83t_mipi_csi2_resume(struct device *dev)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_deassert(csi2_dev->reset);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(csi2_dev->clock_mod);
+ if (ret) {
+ dev_err(dev, "failed to enable module clock\n");
+ goto error_reset;
+ }
+
+ ret = clk_prepare_enable(csi2_dev->clock_mipi);
+ if (ret) {
+ dev_err(dev, "failed to enable MIPI clock\n");
+ goto error_clock_mod;
+ }
+
+ ret = clk_prepare_enable(csi2_dev->clock_misc);
+ if (ret) {
+ dev_err(dev, "failed to enable CSI misc clock\n");
+ goto error_clock_mipi;
+ }
+
+ sun8i_a83t_mipi_csi2_init(csi2_dev);
+
+ return 0;
+
+error_clock_mipi:
+ clk_disable_unprepare(csi2_dev->clock_mipi);
+
+error_clock_mod:
+ clk_disable_unprepare(csi2_dev->clock_mod);
+
+error_reset:
+ reset_control_assert(csi2_dev->reset);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sun8i_a83t_mipi_csi2_pm_ops = {
+ .runtime_suspend = sun8i_a83t_mipi_csi2_suspend,
+ .runtime_resume = sun8i_a83t_mipi_csi2_resume,
+};
+
+static const struct regmap_config sun8i_a83t_mipi_csi2_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x120,
+};
+
+static int
+sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev,
+ struct platform_device *platform_dev)
+{
+ struct device *dev = csi2_dev->dev;
+ void __iomem *io_base;
+ int ret;
+
+ /* Registers */
+
+ io_base = devm_platform_ioremap_resource(platform_dev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ csi2_dev->regmap =
+ devm_regmap_init_mmio_clk(dev, "bus", io_base,
+ &sun8i_a83t_mipi_csi2_regmap_config);
+ if (IS_ERR(csi2_dev->regmap)) {
+ dev_err(dev, "failed to init register map\n");
+ return PTR_ERR(csi2_dev->regmap);
+ }
+
+ /* Clocks */
+
+ csi2_dev->clock_mod = devm_clk_get(dev, "mod");
+ if (IS_ERR(csi2_dev->clock_mod)) {
+ dev_err(dev, "failed to acquire mod clock\n");
+ return PTR_ERR(csi2_dev->clock_mod);
+ }
+
+ ret = clk_set_rate_exclusive(csi2_dev->clock_mod, 297000000);
+ if (ret) {
+ dev_err(dev, "failed to set mod clock rate\n");
+ return ret;
+ }
+
+ csi2_dev->clock_mipi = devm_clk_get(dev, "mipi");
+ if (IS_ERR(csi2_dev->clock_mipi)) {
+ dev_err(dev, "failed to acquire mipi clock\n");
+ return PTR_ERR(csi2_dev->clock_mipi);
+ }
+
+ csi2_dev->clock_misc = devm_clk_get(dev, "misc");
+ if (IS_ERR(csi2_dev->clock_misc)) {
+ dev_err(dev, "failed to acquire misc clock\n");
+ return PTR_ERR(csi2_dev->clock_misc);
+ }
+
+ /* Reset */
+
+ csi2_dev->reset = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(csi2_dev->reset)) {
+ dev_err(dev, "failed to get reset controller\n");
+ return PTR_ERR(csi2_dev->reset);
+ }
+
+ /* D-PHY */
+
+ ret = sun8i_a83t_dphy_register(csi2_dev);
+ if (ret) {
+ dev_err(dev, "failed to initialize MIPI D-PHY\n");
+ return ret;
+ }
+
+ /* Runtime PM */
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static void
+sun8i_a83t_mipi_csi2_resources_cleanup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+{
+ pm_runtime_disable(csi2_dev->dev);
+ phy_exit(csi2_dev->dphy);
+ clk_rate_exclusive_put(csi2_dev->clock_mod);
+}
+
+static int sun8i_a83t_mipi_csi2_probe(struct platform_device *platform_dev)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev;
+ struct device *dev = &platform_dev->dev;
+ int ret;
+
+ csi2_dev = devm_kzalloc(dev, sizeof(*csi2_dev), GFP_KERNEL);
+ if (!csi2_dev)
+ return -ENOMEM;
+
+ csi2_dev->dev = dev;
+ platform_set_drvdata(platform_dev, csi2_dev);
+
+ ret = sun8i_a83t_mipi_csi2_resources_setup(csi2_dev, platform_dev);
+ if (ret)
+ return ret;
+
+ ret = sun8i_a83t_mipi_csi2_bridge_setup(csi2_dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int sun8i_a83t_mipi_csi2_remove(struct platform_device *platform_dev)
+{
+ struct sun8i_a83t_mipi_csi2_device *csi2_dev =
+ platform_get_drvdata(platform_dev);
+
+ sun8i_a83t_mipi_csi2_bridge_cleanup(csi2_dev);
+ sun8i_a83t_mipi_csi2_resources_cleanup(csi2_dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun8i_a83t_mipi_csi2_of_match[] = {
+ { .compatible = "allwinner,sun8i-a83t-mipi-csi2" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun8i_a83t_mipi_csi2_of_match);
+
+static struct platform_driver sun8i_a83t_mipi_csi2_platform_driver = {
+ .probe = sun8i_a83t_mipi_csi2_probe,
+ .remove = sun8i_a83t_mipi_csi2_remove,
+ .driver = {
+ .name = SUN8I_A83T_MIPI_CSI2_NAME,
+ .of_match_table = of_match_ptr(sun8i_a83t_mipi_csi2_of_match),
+ .pm = &sun8i_a83t_mipi_csi2_pm_ops,
+ },
+};
+module_platform_driver(sun8i_a83t_mipi_csi2_platform_driver);
+
+MODULE_DESCRIPTION("Allwinner A83T MIPI CSI-2 and D-PHY Controller Driver");
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.h b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.h
new file mode 100644
index 000000000000..f1e64c53434c
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 Kévin L'hôpital <kevin.lhopital@bootlin.com>
+ * Copyright 2020-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN8I_A83T_MIPI_CSI2_H_
+#define _SUN8I_A83T_MIPI_CSI2_H_
+
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define SUN8I_A83T_MIPI_CSI2_NAME "sun8i-a83t-mipi-csi2"
+
+enum sun8i_a83t_mipi_csi2_pad {
+ SUN8I_A83T_MIPI_CSI2_PAD_SINK = 0,
+ SUN8I_A83T_MIPI_CSI2_PAD_SOURCE = 1,
+ SUN8I_A83T_MIPI_CSI2_PAD_COUNT = 2,
+};
+
+struct sun8i_a83t_mipi_csi2_format {
+ u32 mbus_code;
+ u8 data_type;
+ u32 bpp;
+};
+
+struct sun8i_a83t_mipi_csi2_bridge {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[SUN8I_A83T_MIPI_CSI2_PAD_COUNT];
+ struct v4l2_fwnode_endpoint endpoint;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_mbus_framefmt mbus_format;
+ struct mutex lock; /* Mbus format lock. */
+
+ struct v4l2_subdev *source_subdev;
+};
+
+struct sun8i_a83t_mipi_csi2_device {
+ struct device *dev;
+
+ struct regmap *regmap;
+ struct clk *clock_mod;
+ struct clk *clock_mipi;
+ struct clk *clock_misc;
+ struct reset_control *reset;
+ struct phy *dphy;
+
+ struct sun8i_a83t_mipi_csi2_bridge bridge;
+};
+
+#endif
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2_reg.h b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2_reg.h
new file mode 100644
index 000000000000..2cfc9eb490e6
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2_reg.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 Kévin L'hôpital <kevin.lhopital@bootlin.com>
+ * Copyright 2020-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN8I_A83T_MIPI_CSI2_REG_H_
+#define _SUN8I_A83T_MIPI_CSI2_REG_H_
+
+#define SUN8I_A83T_MIPI_CSI2_VERSION_REG 0x0
+#define SUN8I_A83T_MIPI_CSI2_CTRL_REG 0x4
+#define SUN8I_A83T_MIPI_CSI2_CTRL_INIT_VALUE 0xb8c39bec
+#define SUN8I_A83T_MIPI_CSI2_CTRL_RESET_N BIT(31)
+#define SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG 0x8
+#define SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_INIT_VALUE 0xb8d257f8
+#define SUN8I_A83T_MIPI_CSI2_RSVD0_REG 0xc
+
+#define SUN8I_A83T_MIPI_CSI2_RSVD1_REG 0x18
+#define SUN8I_A83T_MIPI_CSI2_RSVD1_HW_LOCK_VALUE 0xb8c8a30c
+#define SUN8I_A83T_MIPI_CSI2_RSVD2_REG 0x1c
+#define SUN8I_A83T_MIPI_CSI2_RSVD2_HW_LOCK_VALUE 0xb8df8ad7
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_REG 0x20
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_ECC_ERR_DBL BIT(28)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_CKSM_ERR_VC3 BIT(27)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_CKSM_ERR_VC2 BIT(26)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_CKSM_ERR_VC1 BIT(25)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_CKSM_ERR_VC0 BIT(24)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_SEQ_ERR_DT3 BIT(23)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_SEQ_ERR_DT2 BIT(22)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_SEQ_ERR_DT1 BIT(21)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_SEQ_ERR_DT0 BIT(20)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LS_LE_ERR_DT3 BIT(19)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LS_LE_ERR_DT2 BIT(18)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LS_LE_ERR_DT1 BIT(17)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LS_LE_ERR_DT0 BIT(16)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_CRC_ERR_VC3 BIT(15)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_CRC_ERR_VC2 BIT(14)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_CRC_ERR_VC1 BIT(13)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_CRC_ERR_VC0 BIT(12)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FRM_SEQ_ERR_VC3 BIT(11)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FRM_SEQ_ERR_VC2 BIT(10)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FRM_SEQ_ERR_VC1 BIT(9)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FRM_SEQ_ERR_VC0 BIT(8)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FS_FE_ERR_VC3 BIT(7)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FS_FE_ERR_VC2 BIT(6)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FS_FE_ERR_VC1 BIT(5)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FS_FE_ERR_VC0 BIT(4)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_SOT_SYNC_ERR_3 BIT(3)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_SOT_SYNC_ERR_2 BIT(2)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_SOT_SYNC_ERR_1 BIT(1)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA0_SOT_SYNC_ERR_0 BIT(0)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_REG 0x24
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LINE_SEQ_ERR_DT7 BIT(23)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LINE_SEQ_ERR_DT6 BIT(22)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LINE_SEQ_ERR_DT5 BIT(21)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LINE_SEQ_ERR_DT4 BIT(20)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LS_LE_ERR_DT7 BIT(19)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LS_LE_ERR_DT6 BIT(18)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LS_LE_ERR_DT5 BIT(17)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LS_LE_ERR_DT4 BIT(16)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_DT_ERR_VC3 BIT(15)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_DT_ERR_VC2 BIT(14)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_DT_ERR_VC1 BIT(13)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_DT_ERR_VC0 BIT(12)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ECC_ERR1_VC3 BIT(11)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ECC_ERR1_VC2 BIT(10)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ECC_ERR1_VC1 BIT(9)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ECC_ERR1_VC0 BIT(8)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_SOT_ERR_3 BIT(7)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_SOT_ERR_2 BIT(6)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_SOT_ERR_1 BIT(5)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_SOT_ERR_0 BIT(4)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ESC_ENTRY_ERR_3 BIT(3)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ESC_ENTRY_ERR_2 BIT(2)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ESC_ENTRY_ERR_1 BIT(1)
+#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ESC_ENTRY_ERR_0 BIT(0)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_REG 0x28
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_ECC_ERR_DBL BIT(28)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CKSM_ERR_VC3 BIT(27)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CKSM_ERR_VC2 BIT(26)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CKSM_ERR_VC1 BIT(25)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CKSM_ERR_VC0 BIT(24)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LINE_SEQ_ERR_DT3 BIT(23)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LINE_SEQ_ERR_DT2 BIT(22)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LINE_SEQ_ERR_DT1 BIT(21)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LINE_SEQ_ERR_DT0 BIT(20)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LS_LE_ERR_DT3 BIT(19)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LS_LE_ERR_DT2 BIT(18)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LS_LE_ERR_DT1 BIT(17)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LS_LE_ERR_DT0 BIT(16)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CRC_ERR_VC3 BIT(15)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CRC_ERR_VC2 BIT(14)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CRC_ERR_VC1 BIT(13)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CRC_ERR_VC0 BIT(12)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FRM_SEQ_ERR_VC3 BIT(11)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FRM_SEQ_ERR_VC2 BIT(10)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FRM_SEQ_ERR_VC1 BIT(9)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FRM_SEQ_ERR_VC0 BIT(8)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FS_FE_ERR_VC3 BIT(7)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FS_FE_ERR_VC2 BIT(6)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FS_FE_ERR_VC1 BIT(5)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FS_FE_ERR_VC0 BIT(4)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_SOT_SYNC_ERR_3 BIT(3)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_SOT_SYNC_ERR_2 BIT(2)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_SOT_SYNC_ERR_1 BIT(1)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_SOT_SYNC_ERR_0 BIT(0)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_REG 0x2c
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_DT_ERR_VC3 BIT(15)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_DT_ERR_VC2 BIT(14)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_DT_ERR_VC1 BIT(13)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_DT_ERR_VC0 BIT(12)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ECC_ERR1_VC3 BIT(11)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ECC_ERR1_VC2 BIT(10)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ECC_ERR1_VC1 BIT(9)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ECC_ERR1_VC0 BIT(8)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_SOT_ERR_3 BIT(7)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_SOT_ERR_2 BIT(6)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_SOT_ERR_1 BIT(5)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_SOT_ERR_0 BIT(4)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ESC_ENTRY_ERR_3 BIT(3)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ESC_ENTRY_ERR_2 BIT(2)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ESC_ENTRY_ERR_1 BIT(1)
+#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ESC_ENTRY_ERR_0 BIT(0)
+
+#define SUN8I_A83T_MIPI_CSI2_CFG_REG 0x100
+#define SUN8I_A83T_MIPI_CSI2_CFG_INIT_VALUE 0xb8c64f24
+#define SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN BIT(31)
+#define SUN8I_A83T_MIPI_CSI2_CFG_BYPASS_ECC_EN BIT(29)
+#define SUN8I_A83T_MIPI_CSI2_CFG_UNPKT_EN BIT(28)
+#define SUN8I_A83T_MIPI_CSI2_CFG_NONE_UNPKT_RX_MODE BIT(27)
+#define SUN8I_A83T_MIPI_CSI2_CFG_YC_SWAB BIT(26)
+#define SUN8I_A83T_MIPI_CSI2_CFG_N_BYTE BIT(24)
+#define SUN8I_A83T_MIPI_CSI2_CFG_SYNC_DLY_CYCLE(v) (((v) << 18) & \
+ GENMASK(22, 18))
+#define SUN8I_A83T_MIPI_CSI2_CFG_N_CHANNEL(v) ((((v) - 1) << 16) & \
+ GENMASK(17, 16))
+#define SUN8I_A83T_MIPI_CSI2_CFG_N_LANE(v) ((((v) - 1) << 4) & \
+ GENMASK(5, 4))
+#define SUN8I_A83T_MIPI_CSI2_VCDT0_REG 0x104
+#define SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(ch, vc) (((vc) & GENMASK(1, 0)) << \
+ ((ch) * 8 + 6))
+#define SUN8I_A83T_MIPI_CSI2_VCDT0_CH_DT(ch, t) (((t) & GENMASK(5, 0)) << \
+ ((ch) * 8))
+#define SUN8I_A83T_MIPI_CSI2_VCDT1_REG 0x108
+#define SUN8I_A83T_MIPI_CSI2_VCDT1_CH_VC(ch, vc) (((vc) & GENMASK(1, 0)) << \
+ (((ch) - 4) * 8 + 6))
+#define SUN8I_A83T_MIPI_CSI2_VCDT1_CH_DT(ch, t) (((t) & GENMASK(5, 0)) << \
+ (((ch) - 4) * 8))
+
+#endif
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index e69fed117fea..e136d70b4048 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -305,7 +305,7 @@ static int cal_camerarx_start(struct cal_camerarx *phy)
/*
* CSI-2 PHY Link Initialization Sequence, according to the DRA74xP /
* DRA75xP / DRA76xP / DRA77xP TRM. The DRA71x / DRA72x and the AM65x /
- * DRA80xM TRMs have a a slightly simplified sequence.
+ * DRA80xM TRMs have a slightly simplified sequence.
*/
/*
@@ -592,7 +592,7 @@ int cal_camerarx_get_remote_frame_desc(struct cal_camerarx *phy,
if (!phy->source)
return -EPIPE;
- pad = media_entity_remote_pad(&phy->pads[CAL_CAMERARX_PAD_SINK]);
+ pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]);
if (!pad)
return -EPIPE;
diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c
index 07ae1a34e6b0..776da0cfcdbe 100644
--- a/drivers/media/platform/ti/cal/cal-video.c
+++ b/drivers/media/platform/ti/cal/cal-video.c
@@ -685,7 +685,7 @@ static int cal_video_check_format(struct cal_ctx *ctx)
const struct v4l2_mbus_framefmt *format;
struct media_pad *remote_pad;
- remote_pad = media_entity_remote_pad(&ctx->pad);
+ remote_pad = media_pad_remote_pad_first(&ctx->pad);
if (!remote_pad)
return -ENODEV;
diff --git a/drivers/media/platform/ti/davinci/vpif.c b/drivers/media/platform/ti/davinci/vpif.c
index 97ef770266af..da27da4c165a 100644
--- a/drivers/media/platform/ti/davinci/vpif.c
+++ b/drivers/media/platform/ti/davinci/vpif.c
@@ -469,6 +469,7 @@ static int vpif_probe(struct platform_device *pdev)
endpoint);
if (!endpoint)
return 0;
+ of_node_put(endpoint);
/*
* For DT platforms, manually create platform_devices for
diff --git a/drivers/media/platform/ti/davinci/vpif.h b/drivers/media/platform/ti/davinci/vpif.h
index c6d1d890478a..651943e3e375 100644
--- a/drivers/media/platform/ti/davinci/vpif.h
+++ b/drivers/media/platform/ti/davinci/vpif.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* VPIF header file
*
* Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef VPIF_H
@@ -685,4 +677,3 @@ struct vpif_params {
};
#endif /* End of #ifndef VPIF_H */
-
diff --git a/drivers/media/platform/ti/davinci/vpif_display.h b/drivers/media/platform/ti/davinci/vpif_display.h
index f98062e79167..f27474e0fc36 100644
--- a/drivers/media/platform/ti/davinci/vpif_display.h
+++ b/drivers/media/platform/ti/davinci/vpif_display.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* VPIF display header file
*
* Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef VPIF_DISPLAY_H
diff --git a/drivers/media/platform/ti/omap/omap_voutlib.c b/drivers/media/platform/ti/omap/omap_voutlib.c
index 480a7e95533d..fdea2309ee37 100644
--- a/drivers/media/platform/ti/omap/omap_voutlib.c
+++ b/drivers/media/platform/ti/omap/omap_voutlib.c
@@ -314,7 +314,7 @@ unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
if (virt_addr) {
while (size > 0) {
- SetPageReserved(virt_to_page(addr));
+ SetPageReserved(virt_to_page((void *)addr));
addr += PAGE_SIZE;
size -= PAGE_SIZE;
}
@@ -335,7 +335,7 @@ void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
order = get_order(size);
while (size > 0) {
- ClearPageReserved(virt_to_page(addr));
+ ClearPageReserved(virt_to_page((void *)addr));
addr += PAGE_SIZE;
size -= PAGE_SIZE;
}
diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
index 4c937f3f323e..d251736eb420 100644
--- a/drivers/media/platform/ti/omap3isp/isp.c
+++ b/drivers/media/platform/ti/omap3isp/isp.c
@@ -700,7 +700,7 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
@@ -797,7 +797,7 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
@@ -942,7 +942,7 @@ static int isp_pipeline_is_last(struct media_entity *me)
pipe = to_isp_pipeline(me);
if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
return 0;
- pad = media_entity_remote_pad(&pipe->output->pad);
+ pad = media_pad_remote_pad_first(&pipe->output->pad);
return pad->entity == me;
}
diff --git a/drivers/media/platform/ti/omap3isp/ispccdc.c b/drivers/media/platform/ti/omap3isp/ispccdc.c
index 108b5e9f82cb..11afb8aec292 100644
--- a/drivers/media/platform/ti/omap3isp/ispccdc.c
+++ b/drivers/media/platform/ti/omap3isp/ispccdc.c
@@ -1133,7 +1133,7 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
ccdc->bt656 = false;
ccdc->fields = 0;
- pad = media_entity_remote_pad(&ccdc->pads[CCDC_PAD_SINK]);
+ pad = media_pad_remote_pad_first(&ccdc->pads[CCDC_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
if (ccdc->input == CCDC_INPUT_PARALLEL) {
struct v4l2_subdev *sd =
diff --git a/drivers/media/platform/ti/omap3isp/ispccp2.c b/drivers/media/platform/ti/omap3isp/ispccp2.c
index acb58b6ddba1..fc90ff88464f 100644
--- a/drivers/media/platform/ti/omap3isp/ispccp2.c
+++ b/drivers/media/platform/ti/omap3isp/ispccp2.c
@@ -357,7 +357,7 @@ static int ccp2_if_configure(struct isp_ccp2_device *ccp2)
ccp2_pwr_cfg(ccp2);
- pad = media_entity_remote_pad(&ccp2->pads[CCP2_PAD_SINK]);
+ pad = media_pad_remote_pad_first(&ccp2->pads[CCP2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
diff --git a/drivers/media/platform/ti/omap3isp/ispcsi2.c b/drivers/media/platform/ti/omap3isp/ispcsi2.c
index 6302e0c94034..6870980a2fa9 100644
--- a/drivers/media/platform/ti/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/ti/omap3isp/ispcsi2.c
@@ -561,7 +561,7 @@ static int csi2_configure(struct isp_csi2_device *csi2)
if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
return -EBUSY;
- pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
+ pad = media_pad_remote_pad_first(&csi2->pads[CSI2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.c b/drivers/media/platform/ti/omap3isp/ispvideo.c
index 8811d6dd4ee7..d7059180e80e 100644
--- a/drivers/media/platform/ti/omap3isp/ispvideo.c
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.c
@@ -206,7 +206,7 @@ isp_video_remote_subdev(struct isp_video *video, u32 *pad)
{
struct media_pad *remote;
- remote = media_entity_remote_pad(&video->pad);
+ remote = media_pad_remote_pad_first(&video->pad);
if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
return NULL;
@@ -981,7 +981,7 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
continue;
/* ISP entities have always sink pad == 0. Find source. */
- source_pad = media_entity_remote_pad(&ents[i]->pads[0]);
+ source_pad = media_pad_remote_pad_first(&ents[i]->pads[0]);
if (source_pad == NULL)
continue;
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index b31e5913a4cd..71d97042a470 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -118,7 +118,7 @@ static int video_mux_s_stream(struct v4l2_subdev *sd, int enable)
return -EINVAL;
}
- pad = media_entity_remote_pad(&sd->entity.pads[vmux->active]);
+ pad = media_pad_remote_pad_first(&sd->entity.pads[vmux->active]);
if (!pad) {
dev_err(sd->dev, "Failed to find remote source pad\n");
return -ENOLINK;
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
index 051c60cba1e0..cf8e892c47f0 100644
--- a/drivers/media/platform/xilinx/xilinx-csi2rxss.c
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -474,7 +474,7 @@ static struct v4l2_subdev *xcsi2rxss_get_remote_subdev(struct media_pad *local)
{
struct media_pad *remote;
- remote = media_entity_remote_pad(local);
+ remote = media_pad_remote_pad_first(local);
if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
return NULL;
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 338c3661d809..2d1ef7a25c33 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -44,7 +44,7 @@ xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
{
struct media_pad *remote;
- remote = media_entity_remote_pad(local);
+ remote = media_pad_remote_pad_first(local);
if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
return NULL;
@@ -107,7 +107,7 @@ static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index d0b0e0600952..48fe229c5b33 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -28,8 +28,8 @@ struct clk;
#define XVIP_MAX_HEIGHT 7680
/*
- * Pad IDs. IP cores with with multiple inputs or outputs should define
- * their own values.
+ * Pad IDs. IP cores with multiple inputs or outputs should define their own
+ * values.
*/
#define XVIP_PAD_SINK 0
#define XVIP_PAD_SOURCE 1
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 3155e876616d..fff4dd48eaca 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -135,8 +135,6 @@ MODULE_PARM_DESC(mouse, "Enable mouse device, default = yes");
#define dbginfo(dev, format, arg...) \
do { if (debug) dev_info(dev , format , ## arg); } while (0)
-#undef err
-#define err(format, arg...) printk(KERN_ERR format , ## arg)
struct ati_receiver_type {
/* either default_keymap or get_default_keymap should be set */
@@ -816,11 +814,12 @@ static int ati_remote_probe(struct usb_interface *interface,
struct ati_receiver_type *type = (struct ati_receiver_type *)id->driver_info;
struct ati_remote *ati_remote;
struct input_dev *input_dev;
+ struct device *device = &interface->dev;
struct rc_dev *rc_dev;
int err = -ENOMEM;
if (iface_host->desc.bNumEndpoints != 2) {
- err("%s: Unexpected desc.bNumEndpoints\n", __func__);
+ dev_err(device, "%s: Unexpected desc.bNumEndpoints\n", __func__);
return -ENODEV;
}
@@ -828,15 +827,15 @@ static int ati_remote_probe(struct usb_interface *interface,
endpoint_out = &iface_host->endpoint[1].desc;
if (!usb_endpoint_is_int_in(endpoint_in)) {
- err("%s: Unexpected endpoint_in\n", __func__);
+ dev_err(device, "%s: Unexpected endpoint_in\n", __func__);
return -ENODEV;
}
if (le16_to_cpu(endpoint_in->wMaxPacketSize) == 0) {
- err("%s: endpoint_in message size==0? \n", __func__);
+ dev_err(device, "%s: endpoint_in message size==0?\n", __func__);
return -ENODEV;
}
if (!usb_endpoint_is_int_out(endpoint_out)) {
- err("%s: Unexpected endpoint_out\n", __func__);
+ dev_err(device, "%s: Unexpected endpoint_out\n", __func__);
return -ENODEV;
}
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index b40dbf500186..1464ef9c55bc 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -38,7 +38,7 @@ struct igorplugusb {
struct timer_list timer;
- uint8_t buf_in[MAX_PACKET];
+ u8 *buf_in;
char phys[64];
};
@@ -110,7 +110,6 @@ static void igorplugusb_callback(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
- usb_unlink_urb(urb);
return;
default:
dev_warn(ir->dev, "Error: urb status = %d\n", urb->status);
@@ -126,7 +125,7 @@ static void igorplugusb_cmd(struct igorplugusb *ir, int cmd)
ir->request.bRequest = cmd;
ir->urb->transfer_flags = 0;
ret = usb_submit_urb(ir->urb, GFP_ATOMIC);
- if (ret)
+ if (ret && ret != -EPERM)
dev_err(ir->dev, "submit urb failed: %d", ret);
}
@@ -171,15 +170,18 @@ static int igorplugusb_probe(struct usb_interface *intf,
ir->request.bRequest = GET_INFRACODE;
ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
- ir->request.wLength = cpu_to_le16(sizeof(ir->buf_in));
+ ir->request.wLength = cpu_to_le16(MAX_PACKET);
ir->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ir->urb)
goto fail;
+ ir->buf_in = kmalloc(MAX_PACKET, GFP_KERNEL);
+ if (!ir->buf_in)
+ goto fail;
usb_fill_control_urb(ir->urb, udev,
usb_rcvctrlpipe(udev, 0), (uint8_t *)&ir->request,
- ir->buf_in, sizeof(ir->buf_in), igorplugusb_callback, ir);
+ ir->buf_in, MAX_PACKET, igorplugusb_callback, ir);
usb_make_path(udev, ir->phys, sizeof(ir->phys));
@@ -220,9 +222,12 @@ static int igorplugusb_probe(struct usb_interface *intf,
return 0;
fail:
- rc_free_device(ir->rc);
- usb_free_urb(ir->urb);
+ usb_poison_urb(ir->urb);
del_timer(&ir->timer);
+ usb_unpoison_urb(ir->urb);
+ usb_free_urb(ir->urb);
+ rc_free_device(ir->rc);
+ kfree(ir->buf_in);
return ret;
}
@@ -232,10 +237,12 @@ static void igorplugusb_disconnect(struct usb_interface *intf)
struct igorplugusb *ir = usb_get_intfdata(intf);
rc_unregister_device(ir->rc);
+ usb_poison_urb(ir->urb);
del_timer_sync(&ir->timer);
usb_set_intfdata(intf, NULL);
- usb_kill_urb(ir->urb);
+ usb_unpoison_urb(ir->urb);
usb_free_urb(ir->urb);
+ kfree(ir->buf_in);
}
static const struct usb_device_id igorplugusb_table[] = {
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index c9cb8277723f..276bf3c8a8cb 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -149,10 +149,8 @@ static void iguanair_rx(struct urb *urb)
return;
ir = urb->context;
- if (!ir) {
- usb_unlink_urb(urb);
+ if (!ir)
return;
- }
switch (urb->status) {
case 0:
@@ -161,7 +159,6 @@ static void iguanair_rx(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
- usb_unlink_urb(urb);
return;
case -EPIPE:
default:
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index d41580f6e4c7..b02ded52f19e 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -14,7 +14,7 @@ struct imon {
struct device *dev;
struct urb *ir_urb;
struct rc_dev *rcdev;
- __be64 ir_buf;
+ __be64 *ir_buf;
char phys[64];
};
@@ -29,7 +29,7 @@ struct imon {
static void imon_ir_data(struct imon *imon)
{
struct ir_raw_event rawir = {};
- u64 data = be64_to_cpu(imon->ir_buf);
+ u64 data = be64_to_cpup(imon->ir_buf);
u8 packet_no = data & 0xff;
int offset = 40;
int bit;
@@ -37,7 +37,7 @@ static void imon_ir_data(struct imon *imon)
if (packet_no == 0xff)
return;
- dev_dbg(imon->dev, "data: %*ph", 8, &imon->ir_buf);
+ dev_dbg(imon->dev, "data: %*ph", 8, imon->ir_buf);
/*
* Only the first 5 bytes contain IR data. Right shift so we move
@@ -137,10 +137,16 @@ static int imon_probe(struct usb_interface *intf,
if (!imon->ir_urb)
return -ENOMEM;
+ imon->ir_buf = kmalloc(sizeof(__be64), GFP_KERNEL);
+ if (!imon->ir_buf) {
+ ret = -ENOMEM;
+ goto free_urb;
+ }
+
imon->dev = &intf->dev;
usb_fill_int_urb(imon->ir_urb, udev,
usb_rcvintpipe(udev, ir_ep->bEndpointAddress),
- &imon->ir_buf, sizeof(imon->ir_buf),
+ imon->ir_buf, sizeof(__be64),
imon_ir_rx, imon, ir_ep->bInterval);
rcdev = devm_rc_allocate_device(&intf->dev, RC_DRIVER_IR_RAW);
@@ -177,6 +183,7 @@ static int imon_probe(struct usb_interface *intf,
free_urb:
usb_free_urb(imon->ir_urb);
+ kfree(imon->ir_buf);
return ret;
}
@@ -186,6 +193,7 @@ static void imon_disconnect(struct usb_interface *intf)
usb_kill_urb(imon->ir_urb);
usb_free_urb(imon->ir_urb);
+ kfree(imon->ir_buf);
}
static const struct usb_device_id imon_table[] = {
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 765375bda0c6..25ab61dae126 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -715,7 +715,7 @@ int lirc_register(struct rc_dev *dev)
const char *rx_type, *tx_type;
int err, minor;
- minor = ida_simple_get(&lirc_ida, 0, RC_DEV_MAX, GFP_KERNEL);
+ minor = ida_alloc_max(&lirc_ida, RC_DEV_MAX - 1, GFP_KERNEL);
if (minor < 0)
return minor;
@@ -760,7 +760,7 @@ int lirc_register(struct rc_dev *dev)
return 0;
out_ida:
- ida_simple_remove(&lirc_ida, minor);
+ ida_free(&lirc_ida, minor);
return err;
}
@@ -778,7 +778,7 @@ void lirc_unregister(struct rc_dev *dev)
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
cdev_device_del(&dev->lirc_cdev, &dev->lirc_dev);
- ida_simple_remove(&lirc_ida, MINOR(dev->lirc_dev.devt));
+ ida_free(&lirc_ida, MINOR(dev->lirc_dev.devt));
}
int __init lirc_dev_init(void)
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index b90438a71c80..eba0cd30e314 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -786,7 +786,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
dev->last_toggle != toggle);
struct lirc_scancode sc = {
.scancode = scancode, .rc_proto = protocol,
- .flags = toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0,
+ .flags = (toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0) |
+ (!new_event ? LIRC_SCANCODE_FLAG_REPEAT : 0),
.keycode = keycode
};
@@ -1897,7 +1898,7 @@ int rc_register_device(struct rc_dev *dev)
if (!dev)
return -EINVAL;
- minor = ida_simple_get(&rc_ida, 0, RC_DEV_MAX, GFP_KERNEL);
+ minor = ida_alloc_max(&rc_ida, RC_DEV_MAX - 1, GFP_KERNEL);
if (minor < 0)
return minor;
@@ -1980,7 +1981,7 @@ out_rx_free:
out_raw:
ir_raw_event_free(dev);
out_minor:
- ida_simple_remove(&rc_ida, minor);
+ ida_free(&rc_ida, minor);
return rc;
}
EXPORT_SYMBOL_GPL(rc_register_device);
@@ -2040,7 +2041,7 @@ void rc_unregister_device(struct rc_dev *dev)
device_del(&dev->dev);
- ida_simple_remove(&rc_ida, dev->minor);
+ ida_free(&rc_ida, dev->minor);
if (!dev->managed_alloc)
rc_free_device(dev);
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index cb22316b3f00..9f2947af33aa 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -1155,9 +1155,9 @@ static int redrat3_dev_resume(struct usb_interface *intf)
{
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
- if (usb_submit_urb(rr3->narrow_urb, GFP_ATOMIC))
+ if (usb_submit_urb(rr3->narrow_urb, GFP_NOIO))
return -EIO;
- if (usb_submit_urb(rr3->wide_urb, GFP_ATOMIC))
+ if (usb_submit_urb(rr3->wide_urb, GFP_NOIO))
return -EIO;
led_classdev_resume(&rr3->led);
return 0;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index deb85330c940..9b209e687f25 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -406,7 +406,7 @@ static int streamzap_resume(struct usb_interface *intf)
{
struct streamzap_ir *sz = usb_get_intfdata(intf);
- if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) {
+ if (usb_submit_urb(sz->urb_in, GFP_NOIO)) {
dev_err(sz->dev, "Error submitting urb\n");
return -EIO;
}
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index 629787d53ee1..560a26f3965c 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -90,7 +90,6 @@ static void ttusbir_bulk_complete(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
- usb_unlink_urb(urb);
return;
case -EPIPE:
default:
@@ -166,7 +165,6 @@ static void ttusbir_urb_complete(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
- usb_unlink_urb(urb);
return;
case -EPIPE:
default:
@@ -402,7 +400,7 @@ static int ttusbir_resume(struct usb_interface *intf)
led_classdev_resume(&tt->led);
for (i = 0; i < NUM_URBS; i++) {
- rc = usb_submit_urb(tt->urb[i], GFP_KERNEL);
+ rc = usb_submit_urb(tt->urb[i], GFP_NOIO);
if (rc) {
dev_warn(tt->dev, "failed to submit urb: %d\n", rc);
break;
diff --git a/drivers/media/rc/xbox_remote.c b/drivers/media/rc/xbox_remote.c
index 7424b2031152..a1572381d097 100644
--- a/drivers/media/rc/xbox_remote.c
+++ b/drivers/media/rc/xbox_remote.c
@@ -163,8 +163,8 @@ static void xbox_remote_rc_init(struct xbox_remote *xbox_remote)
rdev->dev.parent = &xbox_remote->interface->dev;
}
-static int xbox_remote_initialize(struct xbox_remote *xbox_remote,
- struct usb_endpoint_descriptor *endpoint_in)
+static void xbox_remote_initialize(struct xbox_remote *xbox_remote,
+ struct usb_endpoint_descriptor *endpoint_in)
{
struct usb_device *udev = xbox_remote->udev;
int pipe, maxp;
@@ -177,8 +177,6 @@ static int xbox_remote_initialize(struct xbox_remote *xbox_remote,
usb_fill_int_urb(xbox_remote->irq_urb, udev, pipe, xbox_remote->inbuf,
maxp, xbox_remote_irq_in, xbox_remote,
endpoint_in->bInterval);
-
- return 0;
}
/*
@@ -249,9 +247,7 @@ static int xbox_remote_probe(struct usb_interface *interface,
xbox_remote_rc_init(xbox_remote);
/* Device Hardware Initialization */
- err = xbox_remote_initialize(xbox_remote, endpoint_in);
- if (err)
- goto exit_kill_urbs;
+ xbox_remote_initialize(xbox_remote, endpoint_in);
/* Set up and register rc device */
err = rc_register_device(xbox_remote->rdev);
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index be43f7d32df9..1d1bee111732 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -280,17 +280,13 @@ static int device_process(struct vicodec_ctx *ctx,
*/
if (!(ntohl(ctx->state.header.flags) & V4L2_FWHT_FL_I_FRAME)) {
struct vb2_buffer *ref_vb2_buf;
- int ref_buf_idx;
struct vb2_queue *vq_cap =
v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
- ref_buf_idx = vb2_find_timestamp(vq_cap,
- ctx->state.ref_frame_ts, 0);
- if (ref_buf_idx < 0)
+ ref_vb2_buf = vb2_find_buffer(vq_cap, ctx->state.ref_frame_ts);
+ if (!ref_vb2_buf)
return -EINVAL;
-
- ref_vb2_buf = vq_cap->bufs[ref_buf_idx];
if (ref_vb2_buf->state == VB2_BUF_STATE_ERROR)
ret = -EINVAL;
ctx->state.ref_frame.buf =
diff --git a/drivers/media/test-drivers/vimc/Makefile b/drivers/media/test-drivers/vimc/Makefile
index a53b2b532e9f..9b9631562473 100644
--- a/drivers/media/test-drivers/vimc/Makefile
+++ b/drivers/media/test-drivers/vimc/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
vimc-y := vimc-core.o vimc-common.o vimc-streamer.o vimc-capture.o \
- vimc-debayer.o vimc-scaler.o vimc-sensor.o
+ vimc-debayer.o vimc-scaler.o vimc-sensor.o vimc-lens.o
obj-$(CONFIG_VIDEO_VIMC) += vimc.o
diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c
index d1e2d0739c00..6c437802f91f 100644
--- a/drivers/media/test-drivers/vimc/vimc-capture.c
+++ b/drivers/media/test-drivers/vimc/vimc-capture.c
@@ -13,7 +13,7 @@
#include "vimc-common.h"
#include "vimc-streamer.h"
-struct vimc_cap_device {
+struct vimc_capture_device {
struct vimc_ent_device ved;
struct video_device vdev;
struct v4l2_pix_format format;
@@ -41,7 +41,7 @@ static const struct v4l2_pix_format fmt_default = {
.colorspace = V4L2_COLORSPACE_SRGB,
};
-struct vimc_cap_buffer {
+struct vimc_capture_buffer {
/*
* struct vb2_v4l2_buffer must be the first element
* the videobuf2 framework will allocate this struct based on
@@ -52,7 +52,7 @@ struct vimc_cap_buffer {
struct list_head list;
};
-static int vimc_cap_querycap(struct file *file, void *priv,
+static int vimc_capture_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
strscpy(cap->driver, VIMC_PDEV_NAME, sizeof(cap->driver));
@@ -63,26 +63,26 @@ static int vimc_cap_querycap(struct file *file, void *priv,
return 0;
}
-static void vimc_cap_get_format(struct vimc_ent_device *ved,
+static void vimc_capture_get_format(struct vimc_ent_device *ved,
struct v4l2_pix_format *fmt)
{
- struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ struct vimc_capture_device *vcapture = container_of(ved, struct vimc_capture_device,
ved);
- *fmt = vcap->format;
+ *fmt = vcapture->format;
}
-static int vimc_cap_g_fmt_vid_cap(struct file *file, void *priv,
+static int vimc_capture_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vimc_cap_device *vcap = video_drvdata(file);
+ struct vimc_capture_device *vcapture = video_drvdata(file);
- f->fmt.pix = vcap->format;
+ f->fmt.pix = vcapture->format;
return 0;
}
-static int vimc_cap_try_fmt_vid_cap(struct file *file, void *priv,
+static int vimc_capture_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format *format = &f->fmt.pix;
@@ -114,40 +114,40 @@ static int vimc_cap_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
+static int vimc_capture_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vimc_cap_device *vcap = video_drvdata(file);
+ struct vimc_capture_device *vcapture = video_drvdata(file);
int ret;
/* Do not change the format while stream is on */
- if (vb2_is_busy(&vcap->queue))
+ if (vb2_is_busy(&vcapture->queue))
return -EBUSY;
- ret = vimc_cap_try_fmt_vid_cap(file, priv, f);
+ ret = vimc_capture_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
- dev_dbg(vcap->ved.dev, "%s: format update: "
+ dev_dbg(vcapture->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
- "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vcap->vdev.name,
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vcapture->vdev.name,
/* old */
- vcap->format.width, vcap->format.height,
- vcap->format.pixelformat, vcap->format.colorspace,
- vcap->format.quantization, vcap->format.xfer_func,
- vcap->format.ycbcr_enc,
+ vcapture->format.width, vcapture->format.height,
+ vcapture->format.pixelformat, vcapture->format.colorspace,
+ vcapture->format.quantization, vcapture->format.xfer_func,
+ vcapture->format.ycbcr_enc,
/* new */
f->fmt.pix.width, f->fmt.pix.height,
f->fmt.pix.pixelformat, f->fmt.pix.colorspace,
f->fmt.pix.quantization, f->fmt.pix.xfer_func,
f->fmt.pix.ycbcr_enc);
- vcap->format = f->fmt.pix;
+ vcapture->format = f->fmt.pix;
return 0;
}
-static int vimc_cap_enum_fmt_vid_cap(struct file *file, void *priv,
+static int vimc_capture_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct vimc_pix_map *vpix;
@@ -169,7 +169,7 @@ static int vimc_cap_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vimc_cap_enum_framesizes(struct file *file, void *fh,
+static int vimc_capture_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
const struct vimc_pix_map *vpix;
@@ -193,7 +193,7 @@ static int vimc_cap_enum_framesizes(struct file *file, void *fh,
return 0;
}
-static const struct v4l2_file_operations vimc_cap_fops = {
+static const struct v4l2_file_operations vimc_capture_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
@@ -203,14 +203,14 @@ static const struct v4l2_file_operations vimc_cap_fops = {
.mmap = vb2_fop_mmap,
};
-static const struct v4l2_ioctl_ops vimc_cap_ioctl_ops = {
- .vidioc_querycap = vimc_cap_querycap,
+static const struct v4l2_ioctl_ops vimc_capture_ioctl_ops = {
+ .vidioc_querycap = vimc_capture_querycap,
- .vidioc_g_fmt_vid_cap = vimc_cap_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vimc_cap_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vimc_cap_try_fmt_vid_cap,
- .vidioc_enum_fmt_vid_cap = vimc_cap_enum_fmt_vid_cap,
- .vidioc_enum_framesizes = vimc_cap_enum_framesizes,
+ .vidioc_g_fmt_vid_cap = vimc_capture_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vimc_capture_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vimc_capture_try_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = vimc_capture_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = vimc_capture_enum_framesizes,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
@@ -223,40 +223,40 @@ static const struct v4l2_ioctl_ops vimc_cap_ioctl_ops = {
.vidioc_streamoff = vb2_ioctl_streamoff,
};
-static void vimc_cap_return_all_buffers(struct vimc_cap_device *vcap,
+static void vimc_capture_return_all_buffers(struct vimc_capture_device *vcapture,
enum vb2_buffer_state state)
{
- struct vimc_cap_buffer *vbuf, *node;
+ struct vimc_capture_buffer *vbuf, *node;
- spin_lock(&vcap->qlock);
+ spin_lock(&vcapture->qlock);
- list_for_each_entry_safe(vbuf, node, &vcap->buf_list, list) {
+ list_for_each_entry_safe(vbuf, node, &vcapture->buf_list, list) {
list_del(&vbuf->list);
vb2_buffer_done(&vbuf->vb2.vb2_buf, state);
}
- spin_unlock(&vcap->qlock);
+ spin_unlock(&vcapture->qlock);
}
-static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
+static int vimc_capture_start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
- struct media_entity *entity = &vcap->vdev.entity;
+ struct vimc_capture_device *vcapture = vb2_get_drv_priv(vq);
+ struct media_entity *entity = &vcapture->vdev.entity;
int ret;
- vcap->sequence = 0;
+ vcapture->sequence = 0;
/* Start the media pipeline */
- ret = media_pipeline_start(entity, &vcap->stream.pipe);
+ ret = media_pipeline_start(entity, &vcapture->stream.pipe);
if (ret) {
- vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+ vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED);
return ret;
}
- ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
+ ret = vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 1);
if (ret) {
media_pipeline_stop(entity);
- vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+ vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED);
return ret;
}
@@ -267,65 +267,65 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
* Stop the stream engine. Any remaining buffers in the stream queue are
* dequeued and passed on to the vb2 framework marked as STATE_ERROR.
*/
-static void vimc_cap_stop_streaming(struct vb2_queue *vq)
+static void vimc_capture_stop_streaming(struct vb2_queue *vq)
{
- struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+ struct vimc_capture_device *vcapture = vb2_get_drv_priv(vq);
- vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
+ vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 0);
/* Stop the media pipeline */
- media_pipeline_stop(&vcap->vdev.entity);
+ media_pipeline_stop(&vcapture->vdev.entity);
/* Release all active buffers */
- vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_ERROR);
+ vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_ERROR);
}
-static void vimc_cap_buf_queue(struct vb2_buffer *vb2_buf)
+static void vimc_capture_buf_queue(struct vb2_buffer *vb2_buf)
{
- struct vimc_cap_device *vcap = vb2_get_drv_priv(vb2_buf->vb2_queue);
- struct vimc_cap_buffer *buf = container_of(vb2_buf,
- struct vimc_cap_buffer,
+ struct vimc_capture_device *vcapture = vb2_get_drv_priv(vb2_buf->vb2_queue);
+ struct vimc_capture_buffer *buf = container_of(vb2_buf,
+ struct vimc_capture_buffer,
vb2.vb2_buf);
- spin_lock(&vcap->qlock);
- list_add_tail(&buf->list, &vcap->buf_list);
- spin_unlock(&vcap->qlock);
+ spin_lock(&vcapture->qlock);
+ list_add_tail(&buf->list, &vcapture->buf_list);
+ spin_unlock(&vcapture->qlock);
}
-static int vimc_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+static int vimc_capture_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[],
struct device *alloc_devs[])
{
- struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+ struct vimc_capture_device *vcapture = vb2_get_drv_priv(vq);
if (*nplanes)
- return sizes[0] < vcap->format.sizeimage ? -EINVAL : 0;
+ return sizes[0] < vcapture->format.sizeimage ? -EINVAL : 0;
/* We don't support multiplanes for now */
*nplanes = 1;
- sizes[0] = vcap->format.sizeimage;
+ sizes[0] = vcapture->format.sizeimage;
return 0;
}
-static int vimc_cap_buffer_prepare(struct vb2_buffer *vb)
+static int vimc_capture_buffer_prepare(struct vb2_buffer *vb)
{
- struct vimc_cap_device *vcap = vb2_get_drv_priv(vb->vb2_queue);
- unsigned long size = vcap->format.sizeimage;
+ struct vimc_capture_device *vcapture = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = vcapture->format.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
- dev_err(vcap->ved.dev, "%s: buffer too small (%lu < %lu)\n",
- vcap->vdev.name, vb2_plane_size(vb, 0), size);
+ dev_err(vcapture->ved.dev, "%s: buffer too small (%lu < %lu)\n",
+ vcapture->vdev.name, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
return 0;
}
-static const struct vb2_ops vimc_cap_qops = {
- .start_streaming = vimc_cap_start_streaming,
- .stop_streaming = vimc_cap_stop_streaming,
- .buf_queue = vimc_cap_buf_queue,
- .queue_setup = vimc_cap_queue_setup,
- .buf_prepare = vimc_cap_buffer_prepare,
+static const struct vb2_ops vimc_capture_qops = {
+ .start_streaming = vimc_capture_start_streaming,
+ .stop_streaming = vimc_capture_stop_streaming,
+ .buf_queue = vimc_capture_buf_queue,
+ .queue_setup = vimc_capture_queue_setup,
+ .buf_prepare = vimc_capture_buffer_prepare,
/*
* Since q->lock is set we can use the standard
* vb2_ops_wait_prepare/finish helper functions.
@@ -334,107 +334,107 @@ static const struct vb2_ops vimc_cap_qops = {
.wait_finish = vb2_ops_wait_finish,
};
-static const struct media_entity_operations vimc_cap_mops = {
+static const struct media_entity_operations vimc_capture_mops = {
.link_validate = vimc_vdev_link_validate,
};
-static void vimc_cap_release(struct vimc_ent_device *ved)
+static void vimc_capture_release(struct vimc_ent_device *ved)
{
- struct vimc_cap_device *vcap =
- container_of(ved, struct vimc_cap_device, ved);
+ struct vimc_capture_device *vcapture =
+ container_of(ved, struct vimc_capture_device, ved);
- media_entity_cleanup(vcap->ved.ent);
- kfree(vcap);
+ media_entity_cleanup(vcapture->ved.ent);
+ kfree(vcapture);
}
-static void vimc_cap_unregister(struct vimc_ent_device *ved)
+static void vimc_capture_unregister(struct vimc_ent_device *ved)
{
- struct vimc_cap_device *vcap =
- container_of(ved, struct vimc_cap_device, ved);
+ struct vimc_capture_device *vcapture =
+ container_of(ved, struct vimc_capture_device, ved);
- vb2_video_unregister_device(&vcap->vdev);
+ vb2_video_unregister_device(&vcapture->vdev);
}
-static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
+static void *vimc_capture_process_frame(struct vimc_ent_device *ved,
const void *frame)
{
- struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ struct vimc_capture_device *vcapture = container_of(ved, struct vimc_capture_device,
ved);
- struct vimc_cap_buffer *vimc_buf;
+ struct vimc_capture_buffer *vimc_buf;
void *vbuf;
- spin_lock(&vcap->qlock);
+ spin_lock(&vcapture->qlock);
/* Get the first entry of the list */
- vimc_buf = list_first_entry_or_null(&vcap->buf_list,
+ vimc_buf = list_first_entry_or_null(&vcapture->buf_list,
typeof(*vimc_buf), list);
if (!vimc_buf) {
- spin_unlock(&vcap->qlock);
+ spin_unlock(&vcapture->qlock);
return ERR_PTR(-EAGAIN);
}
/* Remove this entry from the list */
list_del(&vimc_buf->list);
- spin_unlock(&vcap->qlock);
+ spin_unlock(&vcapture->qlock);
/* Fill the buffer */
vimc_buf->vb2.vb2_buf.timestamp = ktime_get_ns();
- vimc_buf->vb2.sequence = vcap->sequence++;
- vimc_buf->vb2.field = vcap->format.field;
+ vimc_buf->vb2.sequence = vcapture->sequence++;
+ vimc_buf->vb2.field = vcapture->format.field;
vbuf = vb2_plane_vaddr(&vimc_buf->vb2.vb2_buf, 0);
- memcpy(vbuf, frame, vcap->format.sizeimage);
+ memcpy(vbuf, frame, vcapture->format.sizeimage);
/* Set it as ready */
vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
- vcap->format.sizeimage);
+ vcapture->format.sizeimage);
vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
return NULL;
}
-static struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+static struct vimc_ent_device *vimc_capture_add(struct vimc_device *vimc,
const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
const struct vimc_pix_map *vpix;
- struct vimc_cap_device *vcap;
+ struct vimc_capture_device *vcapture;
struct video_device *vdev;
struct vb2_queue *q;
int ret;
- /* Allocate the vimc_cap_device struct */
- vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
- if (!vcap)
+ /* Allocate the vimc_capture_device struct */
+ vcapture = kzalloc(sizeof(*vcapture), GFP_KERNEL);
+ if (!vcapture)
return ERR_PTR(-ENOMEM);
/* Initialize the media entity */
- vcap->vdev.entity.name = vcfg_name;
- vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
- vcap->pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_pads_init(&vcap->vdev.entity,
- 1, &vcap->pad);
+ vcapture->vdev.entity.name = vcfg_name;
+ vcapture->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
+ vcapture->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vcapture->vdev.entity,
+ 1, &vcapture->pad);
if (ret)
- goto err_free_vcap;
+ goto err_free_vcapture;
/* Initialize the lock */
- mutex_init(&vcap->lock);
+ mutex_init(&vcapture->lock);
/* Initialize the vb2 queue */
- q = &vcap->queue;
+ q = &vcapture->queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_DMABUF;
if (vimc_allocator == VIMC_ALLOCATOR_VMALLOC)
q->io_modes |= VB2_USERPTR;
- q->drv_priv = vcap;
- q->buf_struct_size = sizeof(struct vimc_cap_buffer);
- q->ops = &vimc_cap_qops;
+ q->drv_priv = vcapture;
+ q->buf_struct_size = sizeof(struct vimc_capture_buffer);
+ q->ops = &vimc_capture_qops;
q->mem_ops = vimc_allocator == VIMC_ALLOCATOR_DMA_CONTIG
? &vb2_dma_contig_memops : &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
- q->lock = &vcap->lock;
+ q->lock = &vcapture->lock;
q->dev = v4l2_dev->dev;
ret = vb2_queue_init(q);
@@ -445,57 +445,57 @@ static struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
}
/* Initialize buffer list and its lock */
- INIT_LIST_HEAD(&vcap->buf_list);
- spin_lock_init(&vcap->qlock);
+ INIT_LIST_HEAD(&vcapture->buf_list);
+ spin_lock_init(&vcapture->qlock);
/* Set default frame format */
- vcap->format = fmt_default;
- vpix = vimc_pix_map_by_pixelformat(vcap->format.pixelformat);
- vcap->format.bytesperline = vcap->format.width * vpix->bpp;
- vcap->format.sizeimage = vcap->format.bytesperline *
- vcap->format.height;
+ vcapture->format = fmt_default;
+ vpix = vimc_pix_map_by_pixelformat(vcapture->format.pixelformat);
+ vcapture->format.bytesperline = vcapture->format.width * vpix->bpp;
+ vcapture->format.sizeimage = vcapture->format.bytesperline *
+ vcapture->format.height;
/* Fill the vimc_ent_device struct */
- vcap->ved.ent = &vcap->vdev.entity;
- vcap->ved.process_frame = vimc_cap_process_frame;
- vcap->ved.vdev_get_format = vimc_cap_get_format;
- vcap->ved.dev = vimc->mdev.dev;
+ vcapture->ved.ent = &vcapture->vdev.entity;
+ vcapture->ved.process_frame = vimc_capture_process_frame;
+ vcapture->ved.vdev_get_format = vimc_capture_get_format;
+ vcapture->ved.dev = vimc->mdev.dev;
/* Initialize the video_device struct */
- vdev = &vcap->vdev;
+ vdev = &vcapture->vdev;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
| V4L2_CAP_IO_MC;
- vdev->entity.ops = &vimc_cap_mops;
+ vdev->entity.ops = &vimc_capture_mops;
vdev->release = video_device_release_empty;
- vdev->fops = &vimc_cap_fops;
- vdev->ioctl_ops = &vimc_cap_ioctl_ops;
- vdev->lock = &vcap->lock;
+ vdev->fops = &vimc_capture_fops;
+ vdev->ioctl_ops = &vimc_capture_ioctl_ops;
+ vdev->lock = &vcapture->lock;
vdev->queue = q;
vdev->v4l2_dev = v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
strscpy(vdev->name, vcfg_name, sizeof(vdev->name));
- video_set_drvdata(vdev, &vcap->ved);
+ video_set_drvdata(vdev, &vcapture->ved);
/* Register the video_device with the v4l2 and the media framework */
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(vimc->mdev.dev, "%s: video register failed (err=%d)\n",
- vcap->vdev.name, ret);
+ vcapture->vdev.name, ret);
goto err_clean_m_ent;
}
- return &vcap->ved;
+ return &vcapture->ved;
err_clean_m_ent:
- media_entity_cleanup(&vcap->vdev.entity);
-err_free_vcap:
- kfree(vcap);
+ media_entity_cleanup(&vcapture->vdev.entity);
+err_free_vcapture:
+ kfree(vcapture);
return ERR_PTR(ret);
}
-struct vimc_ent_type vimc_cap_type = {
- .add = vimc_cap_add,
- .unregister = vimc_cap_unregister,
- .release = vimc_cap_release
+struct vimc_ent_type vimc_capture_type = {
+ .add = vimc_capture_add,
+ .unregister = vimc_capture_unregister,
+ .release = vimc_capture_release
};
diff --git a/drivers/media/test-drivers/vimc/vimc-common.h b/drivers/media/test-drivers/vimc/vimc-common.h
index ba1930772589..7641a101a728 100644
--- a/drivers/media/test-drivers/vimc/vimc-common.h
+++ b/drivers/media/test-drivers/vimc/vimc-common.h
@@ -167,10 +167,11 @@ struct vimc_ent_config {
*/
bool vimc_is_source(struct media_entity *ent);
-extern struct vimc_ent_type vimc_sen_type;
-extern struct vimc_ent_type vimc_deb_type;
-extern struct vimc_ent_type vimc_sca_type;
-extern struct vimc_ent_type vimc_cap_type;
+extern struct vimc_ent_type vimc_sensor_type;
+extern struct vimc_ent_type vimc_debayer_type;
+extern struct vimc_ent_type vimc_scaler_type;
+extern struct vimc_ent_type vimc_capture_type;
+extern struct vimc_ent_type vimc_lens_type;
/**
* vimc_pix_map_by_index - get vimc_pix_map struct by its index
diff --git a/drivers/media/test-drivers/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c
index 06edf9d4d92c..2ae7a0f11ebf 100644
--- a/drivers/media/test-drivers/vimc/vimc-core.c
+++ b/drivers/media/test-drivers/vimc/vimc-core.c
@@ -24,7 +24,7 @@ MODULE_PARM_DESC(allocator, " memory allocator selection, default is 0.\n"
#define VIMC_MDEV_MODEL_NAME "VIMC MDEV"
-#define VIMC_ENT_LINK(src, srcpad, sink, sinkpad, link_flags) { \
+#define VIMC_DATA_LINK(src, srcpad, sink, sinkpad, link_flags) { \
.src_ent = src, \
.src_pad = srcpad, \
.sink_ent = sink, \
@@ -32,8 +32,13 @@ MODULE_PARM_DESC(allocator, " memory allocator selection, default is 0.\n"
.flags = link_flags, \
}
-/* Structure which describes links between entities */
-struct vimc_ent_link {
+#define VIMC_ANCILLARY_LINK(primary, ancillary) { \
+ .primary_ent = primary, \
+ .ancillary_ent = ancillary \
+}
+
+/* Structure which describes data links between entities */
+struct vimc_data_link {
unsigned int src_ent;
u16 src_pad;
unsigned int sink_ent;
@@ -41,12 +46,35 @@ struct vimc_ent_link {
u32 flags;
};
+/* Enum to improve clarity when defining vimc_data_links */
+enum vimc_data_link_ents {
+ SENSOR_A,
+ SENSOR_B,
+ DEBAYER_A,
+ DEBAYER_B,
+ RAW_CAPTURE_0,
+ RAW_CAPTURE_1,
+ RGB_YUV_INPUT,
+ SCALER,
+ RGB_YUV_CAPTURE,
+ LENS_A,
+ LENS_B,
+};
+
+/* Structure which describes ancillary links between entities */
+struct vimc_ancillary_link {
+ unsigned int primary_ent;
+ unsigned int ancillary_ent;
+};
+
/* Structure which describes the whole topology */
struct vimc_pipeline_config {
const struct vimc_ent_config *ents;
size_t num_ents;
- const struct vimc_ent_link *links;
- size_t num_links;
+ const struct vimc_data_link *data_links;
+ size_t num_data_links;
+ const struct vimc_ancillary_link *ancillary_links;
+ size_t num_ancillary_links;
};
/* --------------------------------------------------------------------------
@@ -54,69 +82,91 @@ struct vimc_pipeline_config {
*/
static struct vimc_ent_config ent_config[] = {
- {
+ [SENSOR_A] = {
.name = "Sensor A",
- .type = &vimc_sen_type
+ .type = &vimc_sensor_type
},
- {
+ [SENSOR_B] = {
.name = "Sensor B",
- .type = &vimc_sen_type
+ .type = &vimc_sensor_type
},
- {
+ [DEBAYER_A] = {
.name = "Debayer A",
- .type = &vimc_deb_type
+ .type = &vimc_debayer_type
},
- {
+ [DEBAYER_B] = {
.name = "Debayer B",
- .type = &vimc_deb_type
+ .type = &vimc_debayer_type
},
- {
+ [RAW_CAPTURE_0] = {
.name = "Raw Capture 0",
- .type = &vimc_cap_type
+ .type = &vimc_capture_type
},
- {
+ [RAW_CAPTURE_1] = {
.name = "Raw Capture 1",
- .type = &vimc_cap_type
+ .type = &vimc_capture_type
},
- {
+ [RGB_YUV_INPUT] = {
/* TODO: change this to vimc-input when it is implemented */
.name = "RGB/YUV Input",
- .type = &vimc_sen_type
+ .type = &vimc_sensor_type
},
- {
+ [SCALER] = {
.name = "Scaler",
- .type = &vimc_sca_type
+ .type = &vimc_scaler_type
},
- {
+ [RGB_YUV_CAPTURE] = {
.name = "RGB/YUV Capture",
- .type = &vimc_cap_type
+ .type = &vimc_capture_type
+ },
+ [LENS_A] = {
+ .name = "Lens A",
+ .type = &vimc_lens_type
+ },
+ [LENS_B] = {
+ .name = "Lens B",
+ .type = &vimc_lens_type
},
};
-static const struct vimc_ent_link ent_links[] = {
+static const struct vimc_data_link data_links[] = {
/* Link: Sensor A (Pad 0)->(Pad 0) Debayer A */
- VIMC_ENT_LINK(0, 0, 2, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ VIMC_DATA_LINK(SENSOR_A, 0, DEBAYER_A, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
/* Link: Sensor A (Pad 0)->(Pad 0) Raw Capture 0 */
- VIMC_ENT_LINK(0, 0, 4, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ VIMC_DATA_LINK(SENSOR_A, 0, RAW_CAPTURE_0, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
/* Link: Sensor B (Pad 0)->(Pad 0) Debayer B */
- VIMC_ENT_LINK(1, 0, 3, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ VIMC_DATA_LINK(SENSOR_B, 0, DEBAYER_B, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
/* Link: Sensor B (Pad 0)->(Pad 0) Raw Capture 1 */
- VIMC_ENT_LINK(1, 0, 5, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ VIMC_DATA_LINK(SENSOR_B, 0, RAW_CAPTURE_1, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
/* Link: Debayer A (Pad 1)->(Pad 0) Scaler */
- VIMC_ENT_LINK(2, 1, 7, 0, MEDIA_LNK_FL_ENABLED),
+ VIMC_DATA_LINK(DEBAYER_A, 1, SCALER, 0, MEDIA_LNK_FL_ENABLED),
/* Link: Debayer B (Pad 1)->(Pad 0) Scaler */
- VIMC_ENT_LINK(3, 1, 7, 0, 0),
+ VIMC_DATA_LINK(DEBAYER_B, 1, SCALER, 0, 0),
/* Link: RGB/YUV Input (Pad 0)->(Pad 0) Scaler */
- VIMC_ENT_LINK(6, 0, 7, 0, 0),
+ VIMC_DATA_LINK(RGB_YUV_INPUT, 0, SCALER, 0, 0),
/* Link: Scaler (Pad 1)->(Pad 0) RGB/YUV Capture */
- VIMC_ENT_LINK(7, 1, 8, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ VIMC_DATA_LINK(SCALER, 1, RGB_YUV_CAPTURE, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+};
+
+static const struct vimc_ancillary_link ancillary_links[] = {
+ /* Link: Sensor A -> Lens A */
+ VIMC_ANCILLARY_LINK(0, 9),
+ /* Link: Sensor B -> Lens B */
+ VIMC_ANCILLARY_LINK(1, 10),
};
static struct vimc_pipeline_config pipe_cfg = {
- .ents = ent_config,
- .num_ents = ARRAY_SIZE(ent_config),
- .links = ent_links,
- .num_links = ARRAY_SIZE(ent_links)
+ .ents = ent_config,
+ .num_ents = ARRAY_SIZE(ent_config),
+ .data_links = data_links,
+ .num_data_links = ARRAY_SIZE(data_links),
+ .ancillary_links = ancillary_links,
+ .num_ancillary_links = ARRAY_SIZE(ancillary_links),
};
/* -------------------------------------------------------------------------- */
@@ -135,8 +185,8 @@ static int vimc_create_links(struct vimc_device *vimc)
int ret;
/* Initialize the links between entities */
- for (i = 0; i < vimc->pipe_cfg->num_links; i++) {
- const struct vimc_ent_link *link = &vimc->pipe_cfg->links[i];
+ for (i = 0; i < vimc->pipe_cfg->num_data_links; i++) {
+ const struct vimc_data_link *link = &vimc->pipe_cfg->data_links[i];
struct vimc_ent_device *ved_src =
vimc->ent_devs[link->src_ent];
@@ -150,6 +200,22 @@ static int vimc_create_links(struct vimc_device *vimc)
goto err_rm_links;
}
+ for (i = 0; i < vimc->pipe_cfg->num_ancillary_links; i++) {
+ const struct vimc_ancillary_link *link = &vimc->pipe_cfg->ancillary_links[i];
+
+ struct vimc_ent_device *ved_primary =
+ vimc->ent_devs[link->primary_ent];
+ struct vimc_ent_device *ved_ancillary =
+ vimc->ent_devs[link->ancillary_ent];
+ struct media_link *ret_link =
+ media_create_ancillary_link(ved_primary->ent, ved_ancillary->ent);
+
+ if (IS_ERR(ret_link)) {
+ ret = PTR_ERR(ret_link);
+ goto err_rm_links;
+ }
+ }
+
return 0;
err_rm_links:
diff --git a/drivers/media/test-drivers/vimc/vimc-debayer.c b/drivers/media/test-drivers/vimc/vimc-debayer.c
index 2d06cdbacc76..f671251fdf0e 100644
--- a/drivers/media/test-drivers/vimc/vimc-debayer.c
+++ b/drivers/media/test-drivers/vimc/vimc-debayer.c
@@ -15,28 +15,29 @@
#include "vimc-common.h"
-enum vimc_deb_rgb_colors {
- VIMC_DEB_RED = 0,
- VIMC_DEB_GREEN = 1,
- VIMC_DEB_BLUE = 2,
+enum vimc_debayer_rgb_colors {
+ VIMC_DEBAYER_RED = 0,
+ VIMC_DEBAYER_GREEN = 1,
+ VIMC_DEBAYER_BLUE = 2,
};
-struct vimc_deb_pix_map {
+struct vimc_debayer_pix_map {
u32 code;
- enum vimc_deb_rgb_colors order[2][2];
+ enum vimc_debayer_rgb_colors order[2][2];
};
-struct vimc_deb_device {
+struct vimc_debayer_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
/* The active format */
struct v4l2_mbus_framefmt sink_fmt;
u32 src_code;
- void (*set_rgb_src)(struct vimc_deb_device *vdeb, unsigned int lin,
- unsigned int col, unsigned int rgb[3]);
+ void (*set_rgb_src)(struct vimc_debayer_device *vdebayer,
+ unsigned int lin, unsigned int col,
+ unsigned int rgb[3]);
/* Values calculated when the stream starts */
u8 *src_frame;
- const struct vimc_deb_pix_map *sink_pix_map;
+ const struct vimc_debayer_pix_map *sink_pix_map;
unsigned int sink_bpp;
unsigned int mean_win_size;
struct v4l2_ctrl_handler hdl;
@@ -51,7 +52,7 @@ static const struct v4l2_mbus_framefmt sink_fmt_default = {
.colorspace = V4L2_COLORSPACE_SRGB,
};
-static const u32 vimc_deb_src_mbus_codes[] = {
+static const u32 vimc_debayer_src_mbus_codes[] = {
MEDIA_BUS_FMT_GBR888_1X24,
MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_BGR888_3X8,
@@ -64,95 +65,95 @@ static const u32 vimc_deb_src_mbus_codes[] = {
MEDIA_BUS_FMT_RGB888_1X32_PADHI,
};
-static const struct vimc_deb_pix_map vimc_deb_pix_map_list[] = {
+static const struct vimc_debayer_pix_map vimc_debayer_pix_map_list[] = {
{
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
- { VIMC_DEB_GREEN, VIMC_DEB_RED } }
+ .order = { { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN },
+ { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED } }
},
{
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
- { VIMC_DEB_RED, VIMC_DEB_GREEN } }
+ .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE },
+ { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN } }
},
{
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
- { VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
+ .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED },
+ { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN } }
},
{
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
- { VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
+ .order = { { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN },
+ { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE } }
},
{
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
- { VIMC_DEB_GREEN, VIMC_DEB_RED } }
+ .order = { { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN },
+ { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED } }
},
{
.code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
- { VIMC_DEB_RED, VIMC_DEB_GREEN } }
+ .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE },
+ { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN } }
},
{
.code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
- { VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
+ .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED },
+ { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN } }
},
{
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
- { VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
+ .order = { { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN },
+ { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE } }
},
{
.code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
- { VIMC_DEB_GREEN, VIMC_DEB_RED } }
+ .order = { { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN },
+ { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED } }
},
{
.code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
- { VIMC_DEB_RED, VIMC_DEB_GREEN } }
+ .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE },
+ { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN } }
},
{
.code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
- { VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
+ .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED },
+ { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN } }
},
{
.code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
- { VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
+ .order = { { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN },
+ { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE } }
},
};
-static const struct vimc_deb_pix_map *vimc_deb_pix_map_by_code(u32 code)
+static const struct vimc_debayer_pix_map *vimc_debayer_pix_map_by_code(u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(vimc_deb_pix_map_list); i++)
- if (vimc_deb_pix_map_list[i].code == code)
- return &vimc_deb_pix_map_list[i];
+ for (i = 0; i < ARRAY_SIZE(vimc_debayer_pix_map_list); i++)
+ if (vimc_debayer_pix_map_list[i].code == code)
+ return &vimc_debayer_pix_map_list[i];
return NULL;
}
-static bool vimc_deb_src_code_is_valid(u32 code)
+static bool vimc_debayer_src_code_is_valid(u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(vimc_deb_src_mbus_codes); i++)
- if (vimc_deb_src_mbus_codes[i] == code)
+ for (i = 0; i < ARRAY_SIZE(vimc_debayer_src_mbus_codes); i++)
+ if (vimc_debayer_src_mbus_codes[i] == code)
return true;
return false;
}
-static int vimc_deb_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int vimc_debayer_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
- struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+ struct vimc_debayer_device *vdebayer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
unsigned int i;
@@ -162,45 +163,45 @@ static int vimc_deb_init_cfg(struct v4l2_subdev *sd,
for (i = 1; i < sd->entity.num_pads; i++) {
mf = v4l2_subdev_get_try_format(sd, sd_state, i);
*mf = sink_fmt_default;
- mf->code = vdeb->src_code;
+ mf->code = vdebayer->src_code;
}
return 0;
}
-static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
+static int vimc_debayer_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
{
if (VIMC_IS_SRC(code->pad)) {
- if (code->index >= ARRAY_SIZE(vimc_deb_src_mbus_codes))
+ if (code->index >= ARRAY_SIZE(vimc_debayer_src_mbus_codes))
return -EINVAL;
- code->code = vimc_deb_src_mbus_codes[code->index];
+ code->code = vimc_debayer_src_mbus_codes[code->index];
} else {
- if (code->index >= ARRAY_SIZE(vimc_deb_pix_map_list))
+ if (code->index >= ARRAY_SIZE(vimc_debayer_pix_map_list))
return -EINVAL;
- code->code = vimc_deb_pix_map_list[code->index].code;
+ code->code = vimc_debayer_pix_map_list[code->index].code;
}
return 0;
}
-static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_size_enum *fse)
+static int vimc_debayer_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index)
return -EINVAL;
if (VIMC_IS_SINK(fse->pad)) {
- const struct vimc_deb_pix_map *vpix =
- vimc_deb_pix_map_by_code(fse->code);
+ const struct vimc_debayer_pix_map *vpix =
+ vimc_debayer_pix_map_by_code(fse->code);
if (!vpix)
return -EINVAL;
- } else if (!vimc_deb_src_code_is_valid(fse->code)) {
+ } else if (!vimc_debayer_src_code_is_valid(fse->code)) {
return -EINVAL;
}
@@ -212,30 +213,30 @@ static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
return 0;
}
-static int vimc_deb_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
+static int vimc_debayer_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
- struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+ struct vimc_debayer_device *vdebayer = v4l2_get_subdevdata(sd);
/* Get the current sink format */
fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
*v4l2_subdev_get_try_format(sd, sd_state, 0) :
- vdeb->sink_fmt;
+ vdebayer->sink_fmt;
/* Set the right code for the source pad */
if (VIMC_IS_SRC(fmt->pad))
- fmt->format.code = vdeb->src_code;
+ fmt->format.code = vdebayer->src_code;
return 0;
}
-static void vimc_deb_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
+static void vimc_debayer_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
{
- const struct vimc_deb_pix_map *vpix;
+ const struct vimc_debayer_pix_map *vpix;
/* Don't accept a code that is not on the debayer table */
- vpix = vimc_deb_pix_map_by_code(fmt->code);
+ vpix = vimc_debayer_pix_map_by_code(fmt->code);
if (!vpix)
fmt->code = sink_fmt_default.code;
@@ -250,21 +251,21 @@ static void vimc_deb_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
vimc_colorimetry_clamp(fmt);
}
-static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
+static int vimc_debayer_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
- struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+ struct vimc_debayer_device *vdebayer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
u32 *src_code;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
- if (vdeb->src_frame)
+ if (vdebayer->src_frame)
return -EBUSY;
- sink_fmt = &vdeb->sink_fmt;
- src_code = &vdeb->src_code;
+ sink_fmt = &vdebayer->sink_fmt;
+ src_code = &vdebayer->src_code;
} else {
sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
src_code = &v4l2_subdev_get_try_format(sd, sd_state, 1)->code;
@@ -279,17 +280,17 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
fmt->format = *sink_fmt;
- if (vimc_deb_src_code_is_valid(code))
+ if (vimc_debayer_src_code_is_valid(code))
*src_code = code;
fmt->format.code = *src_code;
} else {
/* Set the new format in the sink pad */
- vimc_deb_adjust_sink_fmt(&fmt->format);
+ vimc_debayer_adjust_sink_fmt(&fmt->format);
- dev_dbg(vdeb->ved.dev, "%s: sink format update: "
+ dev_dbg(vdebayer->ved.dev, "%s: sink format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
- "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vdeb->sd.name,
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vdebayer->sd.name,
/* old */
sink_fmt->width, sink_fmt->height, sink_fmt->code,
sink_fmt->colorspace, sink_fmt->quantization,
@@ -305,97 +306,97 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static const struct v4l2_subdev_pad_ops vimc_deb_pad_ops = {
- .init_cfg = vimc_deb_init_cfg,
- .enum_mbus_code = vimc_deb_enum_mbus_code,
- .enum_frame_size = vimc_deb_enum_frame_size,
- .get_fmt = vimc_deb_get_fmt,
- .set_fmt = vimc_deb_set_fmt,
+static const struct v4l2_subdev_pad_ops vimc_debayer_pad_ops = {
+ .init_cfg = vimc_debayer_init_cfg,
+ .enum_mbus_code = vimc_debayer_enum_mbus_code,
+ .enum_frame_size = vimc_debayer_enum_frame_size,
+ .get_fmt = vimc_debayer_get_fmt,
+ .set_fmt = vimc_debayer_set_fmt,
};
-static void vimc_deb_process_rgb_frame(struct vimc_deb_device *vdeb,
- unsigned int lin,
- unsigned int col,
- unsigned int rgb[3])
+static void vimc_debayer_process_rgb_frame(struct vimc_debayer_device *vdebayer,
+ unsigned int lin,
+ unsigned int col,
+ unsigned int rgb[3])
{
const struct vimc_pix_map *vpix;
unsigned int i, index;
- vpix = vimc_pix_map_by_code(vdeb->src_code);
- index = VIMC_FRAME_INDEX(lin, col, vdeb->sink_fmt.width, 3);
+ vpix = vimc_pix_map_by_code(vdebayer->src_code);
+ index = VIMC_FRAME_INDEX(lin, col, vdebayer->sink_fmt.width, 3);
for (i = 0; i < 3; i++) {
switch (vpix->pixelformat) {
case V4L2_PIX_FMT_RGB24:
- vdeb->src_frame[index + i] = rgb[i];
+ vdebayer->src_frame[index + i] = rgb[i];
break;
case V4L2_PIX_FMT_BGR24:
- vdeb->src_frame[index + i] = rgb[2 - i];
+ vdebayer->src_frame[index + i] = rgb[2 - i];
break;
}
}
}
-static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
+static int vimc_debayer_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+ struct vimc_debayer_device *vdebayer = v4l2_get_subdevdata(sd);
if (enable) {
const struct vimc_pix_map *vpix;
unsigned int frame_size;
- if (vdeb->src_frame)
+ if (vdebayer->src_frame)
return 0;
/* Calculate the frame size of the source pad */
- vpix = vimc_pix_map_by_code(vdeb->src_code);
- frame_size = vdeb->sink_fmt.width * vdeb->sink_fmt.height *
+ vpix = vimc_pix_map_by_code(vdebayer->src_code);
+ frame_size = vdebayer->sink_fmt.width * vdebayer->sink_fmt.height *
vpix->bpp;
/* Save the bytes per pixel of the sink */
- vpix = vimc_pix_map_by_code(vdeb->sink_fmt.code);
- vdeb->sink_bpp = vpix->bpp;
+ vpix = vimc_pix_map_by_code(vdebayer->sink_fmt.code);
+ vdebayer->sink_bpp = vpix->bpp;
/* Get the corresponding pixel map from the table */
- vdeb->sink_pix_map =
- vimc_deb_pix_map_by_code(vdeb->sink_fmt.code);
+ vdebayer->sink_pix_map =
+ vimc_debayer_pix_map_by_code(vdebayer->sink_fmt.code);
/*
* Allocate the frame buffer. Use vmalloc to be able to
* allocate a large amount of memory
*/
- vdeb->src_frame = vmalloc(frame_size);
- if (!vdeb->src_frame)
+ vdebayer->src_frame = vmalloc(frame_size);
+ if (!vdebayer->src_frame)
return -ENOMEM;
} else {
- if (!vdeb->src_frame)
+ if (!vdebayer->src_frame)
return 0;
- vfree(vdeb->src_frame);
- vdeb->src_frame = NULL;
+ vfree(vdebayer->src_frame);
+ vdebayer->src_frame = NULL;
}
return 0;
}
-static const struct v4l2_subdev_core_ops vimc_deb_core_ops = {
+static const struct v4l2_subdev_core_ops vimc_debayer_core_ops = {
.log_status = v4l2_ctrl_subdev_log_status,
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
-static const struct v4l2_subdev_video_ops vimc_deb_video_ops = {
- .s_stream = vimc_deb_s_stream,
+static const struct v4l2_subdev_video_ops vimc_debayer_video_ops = {
+ .s_stream = vimc_debayer_s_stream,
};
-static const struct v4l2_subdev_ops vimc_deb_ops = {
- .core = &vimc_deb_core_ops,
- .pad = &vimc_deb_pad_ops,
- .video = &vimc_deb_video_ops,
+static const struct v4l2_subdev_ops vimc_debayer_ops = {
+ .core = &vimc_debayer_core_ops,
+ .pad = &vimc_debayer_pad_ops,
+ .video = &vimc_debayer_video_ops,
};
-static unsigned int vimc_deb_get_val(const u8 *bytes,
- const unsigned int n_bytes)
+static unsigned int vimc_debayer_get_val(const u8 *bytes,
+ const unsigned int n_bytes)
{
unsigned int i;
unsigned int acc = 0;
@@ -406,11 +407,11 @@ static unsigned int vimc_deb_get_val(const u8 *bytes,
return acc;
}
-static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
- const u8 *frame,
- const unsigned int lin,
- const unsigned int col,
- unsigned int rgb[3])
+static void vimc_debayer_calc_rgb_sink(struct vimc_debayer_device *vdebayer,
+ const u8 *frame,
+ const unsigned int lin,
+ const unsigned int col,
+ unsigned int rgb[3])
{
unsigned int i, seek, wlin, wcol;
unsigned int n_rgb[3] = {0, 0, 0};
@@ -423,13 +424,13 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
* the top left corner of the mean window (considering the current
* pixel as the center)
*/
- seek = vdeb->mean_win_size / 2;
+ seek = vdebayer->mean_win_size / 2;
/* Sum the values of the colors in the mean window */
- dev_dbg(vdeb->ved.dev,
+ dev_dbg(vdebayer->ved.dev,
"deb: %s: --- Calc pixel %dx%d, window mean %d, seek %d ---\n",
- vdeb->sd.name, lin, col, vdeb->sink_fmt.height, seek);
+ vdebayer->sd.name, lin, col, vdebayer->sink_fmt.height, seek);
/*
* Iterate through all the lines in the mean window, start
@@ -438,7 +439,7 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
* frame
*/
for (wlin = seek > lin ? 0 : lin - seek;
- wlin < lin + seek + 1 && wlin < vdeb->sink_fmt.height;
+ wlin < lin + seek + 1 && wlin < vdebayer->sink_fmt.height;
wlin++) {
/*
@@ -448,78 +449,80 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
* frame
*/
for (wcol = seek > col ? 0 : col - seek;
- wcol < col + seek + 1 && wcol < vdeb->sink_fmt.width;
+ wcol < col + seek + 1 && wcol < vdebayer->sink_fmt.width;
wcol++) {
- enum vimc_deb_rgb_colors color;
+ enum vimc_debayer_rgb_colors color;
unsigned int index;
/* Check which color this pixel is */
- color = vdeb->sink_pix_map->order[wlin % 2][wcol % 2];
+ color = vdebayer->sink_pix_map->order[wlin % 2][wcol % 2];
index = VIMC_FRAME_INDEX(wlin, wcol,
- vdeb->sink_fmt.width,
- vdeb->sink_bpp);
+ vdebayer->sink_fmt.width,
+ vdebayer->sink_bpp);
- dev_dbg(vdeb->ved.dev,
+ dev_dbg(vdebayer->ved.dev,
"deb: %s: RGB CALC: frame index %d, win pos %dx%d, color %d\n",
- vdeb->sd.name, index, wlin, wcol, color);
+ vdebayer->sd.name, index, wlin, wcol, color);
/* Get its value */
rgb[color] = rgb[color] +
- vimc_deb_get_val(&frame[index], vdeb->sink_bpp);
+ vimc_debayer_get_val(&frame[index],
+ vdebayer->sink_bpp);
/* Save how many values we already added */
n_rgb[color]++;
- dev_dbg(vdeb->ved.dev, "deb: %s: RGB CALC: val %d, n %d\n",
- vdeb->sd.name, rgb[color], n_rgb[color]);
+ dev_dbg(vdebayer->ved.dev, "deb: %s: RGB CALC: val %d, n %d\n",
+ vdebayer->sd.name, rgb[color], n_rgb[color]);
}
}
/* Calculate the mean */
for (i = 0; i < 3; i++) {
- dev_dbg(vdeb->ved.dev,
+ dev_dbg(vdebayer->ved.dev,
"deb: %s: PRE CALC: %dx%d Color %d, val %d, n %d\n",
- vdeb->sd.name, lin, col, i, rgb[i], n_rgb[i]);
+ vdebayer->sd.name, lin, col, i, rgb[i], n_rgb[i]);
if (n_rgb[i])
rgb[i] = rgb[i] / n_rgb[i];
- dev_dbg(vdeb->ved.dev,
+ dev_dbg(vdebayer->ved.dev,
"deb: %s: FINAL CALC: %dx%d Color %d, val %d\n",
- vdeb->sd.name, lin, col, i, rgb[i]);
+ vdebayer->sd.name, lin, col, i, rgb[i]);
}
}
-static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
- const void *sink_frame)
+static void *vimc_debayer_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
{
- struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
- ved);
+ struct vimc_debayer_device *vdebayer =
+ container_of(ved, struct vimc_debayer_device, ved);
+
unsigned int rgb[3];
unsigned int i, j;
/* If the stream in this node is not active, just return */
- if (!vdeb->src_frame)
+ if (!vdebayer->src_frame)
return ERR_PTR(-EINVAL);
- for (i = 0; i < vdeb->sink_fmt.height; i++)
- for (j = 0; j < vdeb->sink_fmt.width; j++) {
- vimc_deb_calc_rgb_sink(vdeb, sink_frame, i, j, rgb);
- vdeb->set_rgb_src(vdeb, i, j, rgb);
+ for (i = 0; i < vdebayer->sink_fmt.height; i++)
+ for (j = 0; j < vdebayer->sink_fmt.width; j++) {
+ vimc_debayer_calc_rgb_sink(vdebayer, sink_frame, i, j, rgb);
+ vdebayer->set_rgb_src(vdebayer, i, j, rgb);
}
- return vdeb->src_frame;
+ return vdebayer->src_frame;
}
-static int vimc_deb_s_ctrl(struct v4l2_ctrl *ctrl)
+static int vimc_debayer_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct vimc_deb_device *vdeb =
- container_of(ctrl->handler, struct vimc_deb_device, hdl);
+ struct vimc_debayer_device *vdebayer =
+ container_of(ctrl->handler, struct vimc_debayer_device, hdl);
switch (ctrl->id) {
case VIMC_CID_MEAN_WIN_SIZE:
- vdeb->mean_win_size = ctrl->val;
+ vdebayer->mean_win_size = ctrl->val;
break;
default:
return -EINVAL;
@@ -527,29 +530,29 @@ static int vimc_deb_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
}
-static const struct v4l2_ctrl_ops vimc_deb_ctrl_ops = {
- .s_ctrl = vimc_deb_s_ctrl,
+static const struct v4l2_ctrl_ops vimc_debayer_ctrl_ops = {
+ .s_ctrl = vimc_debayer_s_ctrl,
};
-static void vimc_deb_release(struct vimc_ent_device *ved)
+static void vimc_debayer_release(struct vimc_ent_device *ved)
{
- struct vimc_deb_device *vdeb =
- container_of(ved, struct vimc_deb_device, ved);
+ struct vimc_debayer_device *vdebayer =
+ container_of(ved, struct vimc_debayer_device, ved);
- v4l2_ctrl_handler_free(&vdeb->hdl);
- media_entity_cleanup(vdeb->ved.ent);
- kfree(vdeb);
+ v4l2_ctrl_handler_free(&vdebayer->hdl);
+ media_entity_cleanup(vdebayer->ved.ent);
+ kfree(vdebayer);
}
-static const struct v4l2_ctrl_config vimc_deb_ctrl_class = {
+static const struct v4l2_ctrl_config vimc_debayer_ctrl_class = {
.flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
.id = VIMC_CID_VIMC_CLASS,
.name = "VIMC Controls",
.type = V4L2_CTRL_TYPE_CTRL_CLASS,
};
-static const struct v4l2_ctrl_config vimc_deb_ctrl_mean_win_size = {
- .ops = &vimc_deb_ctrl_ops,
+static const struct v4l2_ctrl_config vimc_debayer_ctrl_mean_win_size = {
+ .ops = &vimc_debayer_ctrl_ops,
.id = VIMC_CID_MEAN_WIN_SIZE,
.name = "Debayer Mean Window Size",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -559,65 +562,65 @@ static const struct v4l2_ctrl_config vimc_deb_ctrl_mean_win_size = {
.def = 3,
};
-static struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
- const char *vcfg_name)
+static struct vimc_ent_device *vimc_debayer_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
- struct vimc_deb_device *vdeb;
+ struct vimc_debayer_device *vdebayer;
int ret;
- /* Allocate the vdeb struct */
- vdeb = kzalloc(sizeof(*vdeb), GFP_KERNEL);
- if (!vdeb)
+ /* Allocate the vdebayer struct */
+ vdebayer = kzalloc(sizeof(*vdebayer), GFP_KERNEL);
+ if (!vdebayer)
return ERR_PTR(-ENOMEM);
/* Create controls: */
- v4l2_ctrl_handler_init(&vdeb->hdl, 2);
- v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_class, NULL);
- v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_mean_win_size, NULL);
- vdeb->sd.ctrl_handler = &vdeb->hdl;
- if (vdeb->hdl.error) {
- ret = vdeb->hdl.error;
- goto err_free_vdeb;
+ v4l2_ctrl_handler_init(&vdebayer->hdl, 2);
+ v4l2_ctrl_new_custom(&vdebayer->hdl, &vimc_debayer_ctrl_class, NULL);
+ v4l2_ctrl_new_custom(&vdebayer->hdl, &vimc_debayer_ctrl_mean_win_size, NULL);
+ vdebayer->sd.ctrl_handler = &vdebayer->hdl;
+ if (vdebayer->hdl.error) {
+ ret = vdebayer->hdl.error;
+ goto err_free_vdebayer;
}
/* Initialize ved and sd */
- vdeb->pads[0].flags = MEDIA_PAD_FL_SINK;
- vdeb->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ vdebayer->pads[0].flags = MEDIA_PAD_FL_SINK;
+ vdebayer->pads[1].flags = MEDIA_PAD_FL_SOURCE;
- ret = vimc_ent_sd_register(&vdeb->ved, &vdeb->sd, v4l2_dev,
+ ret = vimc_ent_sd_register(&vdebayer->ved, &vdebayer->sd, v4l2_dev,
vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2,
- vdeb->pads, &vimc_deb_ops);
+ vdebayer->pads, &vimc_debayer_ops);
if (ret)
goto err_free_hdl;
- vdeb->ved.process_frame = vimc_deb_process_frame;
- vdeb->ved.dev = vimc->mdev.dev;
- vdeb->mean_win_size = vimc_deb_ctrl_mean_win_size.def;
+ vdebayer->ved.process_frame = vimc_debayer_process_frame;
+ vdebayer->ved.dev = vimc->mdev.dev;
+ vdebayer->mean_win_size = vimc_debayer_ctrl_mean_win_size.def;
/* Initialize the frame format */
- vdeb->sink_fmt = sink_fmt_default;
+ vdebayer->sink_fmt = sink_fmt_default;
/*
* TODO: Add support for more output formats, we only support
* RGB888 for now
* NOTE: the src format is always the same as the sink, except
* for the code
*/
- vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
- vdeb->set_rgb_src = vimc_deb_process_rgb_frame;
+ vdebayer->src_code = MEDIA_BUS_FMT_RGB888_1X24;
+ vdebayer->set_rgb_src = vimc_debayer_process_rgb_frame;
- return &vdeb->ved;
+ return &vdebayer->ved;
err_free_hdl:
- v4l2_ctrl_handler_free(&vdeb->hdl);
-err_free_vdeb:
- kfree(vdeb);
+ v4l2_ctrl_handler_free(&vdebayer->hdl);
+err_free_vdebayer:
+ kfree(vdebayer);
return ERR_PTR(ret);
}
-struct vimc_ent_type vimc_deb_type = {
- .add = vimc_deb_add,
- .release = vimc_deb_release
+struct vimc_ent_type vimc_debayer_type = {
+ .add = vimc_debayer_add,
+ .release = vimc_debayer_release
};
diff --git a/drivers/media/test-drivers/vimc/vimc-lens.c b/drivers/media/test-drivers/vimc/vimc-lens.c
new file mode 100644
index 000000000000..3ce7f4b4d2cc
--- /dev/null
+++ b/drivers/media/test-drivers/vimc/vimc-lens.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * vimc-lens.c Virtual Media Controller Driver
+ * Copyright (C) 2022 Google, Inc
+ * Author: yunkec@google.com (Yunke Cao)
+ */
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "vimc-common.h"
+
+#define VIMC_LENS_MAX_FOCUS_POS 1023
+#define VIMC_LENS_MAX_FOCUS_STEP 1
+
+struct vimc_lens_device {
+ struct vimc_ent_device ved;
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ u32 focus_absolute;
+};
+
+static const struct v4l2_subdev_core_ops vimc_lens_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops vimc_lens_ops = {
+ .core = &vimc_lens_core_ops
+};
+
+static int vimc_lens_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vimc_lens_device *vlens =
+ container_of(ctrl->handler, struct vimc_lens_device, hdl);
+ if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) {
+ vlens->focus_absolute = ctrl->val;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops vimc_lens_ctrl_ops = {
+ .s_ctrl = vimc_lens_s_ctrl,
+};
+
+static struct vimc_ent_device *vimc_lens_add(struct vimc_device *vimc,
+ const char *vcfg_name)
+{
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
+ struct vimc_lens_device *vlens;
+ int ret;
+
+ /* Allocate the vlens struct */
+ vlens = kzalloc(sizeof(*vlens), GFP_KERNEL);
+ if (!vlens)
+ return ERR_PTR(-ENOMEM);
+
+ v4l2_ctrl_handler_init(&vlens->hdl, 1);
+
+ v4l2_ctrl_new_std(&vlens->hdl, &vimc_lens_ctrl_ops,
+ V4L2_CID_FOCUS_ABSOLUTE, 0,
+ VIMC_LENS_MAX_FOCUS_POS, VIMC_LENS_MAX_FOCUS_STEP, 0);
+ vlens->sd.ctrl_handler = &vlens->hdl;
+ if (vlens->hdl.error) {
+ ret = vlens->hdl.error;
+ goto err_free_vlens;
+ }
+ vlens->ved.dev = vimc->mdev.dev;
+
+ ret = vimc_ent_sd_register(&vlens->ved, &vlens->sd, v4l2_dev,
+ vcfg_name, MEDIA_ENT_F_LENS, 0,
+ NULL, &vimc_lens_ops);
+ if (ret)
+ goto err_free_hdl;
+
+ return &vlens->ved;
+
+err_free_hdl:
+ v4l2_ctrl_handler_free(&vlens->hdl);
+err_free_vlens:
+ kfree(vlens);
+
+ return ERR_PTR(ret);
+}
+
+static void vimc_lens_release(struct vimc_ent_device *ved)
+{
+ struct vimc_lens_device *vlens =
+ container_of(ved, struct vimc_lens_device, ved);
+
+ v4l2_ctrl_handler_free(&vlens->hdl);
+ media_entity_cleanup(vlens->ved.ent);
+ kfree(vlens);
+}
+
+struct vimc_ent_type vimc_lens_type = {
+ .add = vimc_lens_add,
+ .release = vimc_lens_release
+};
diff --git a/drivers/media/test-drivers/vimc/vimc-scaler.c b/drivers/media/test-drivers/vimc/vimc-scaler.c
index 820b8f5b502f..b671774e2784 100644
--- a/drivers/media/test-drivers/vimc/vimc-scaler.c
+++ b/drivers/media/test-drivers/vimc/vimc-scaler.c
@@ -16,14 +16,14 @@
/* Pad identifier */
enum vic_sca_pad {
- VIMC_SCA_SINK = 0,
- VIMC_SCA_SRC = 1,
+ VIMC_SCALER_SINK = 0,
+ VIMC_SCALER_SRC = 1,
};
-#define VIMC_SCA_FMT_WIDTH_DEFAULT 640
-#define VIMC_SCA_FMT_HEIGHT_DEFAULT 480
+#define VIMC_SCALER_FMT_WIDTH_DEFAULT 640
+#define VIMC_SCALER_FMT_HEIGHT_DEFAULT 480
-struct vimc_sca_device {
+struct vimc_scaler_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
struct v4l2_rect crop_rect;
@@ -36,16 +36,16 @@ struct vimc_sca_device {
};
static const struct v4l2_mbus_framefmt fmt_default = {
- .width = VIMC_SCA_FMT_WIDTH_DEFAULT,
- .height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
+ .width = VIMC_SCALER_FMT_WIDTH_DEFAULT,
+ .height = VIMC_SCALER_FMT_HEIGHT_DEFAULT,
.code = MEDIA_BUS_FMT_RGB888_1X24,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_SRGB,
};
static const struct v4l2_rect crop_rect_default = {
- .width = VIMC_SCA_FMT_WIDTH_DEFAULT,
- .height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
+ .width = VIMC_SCALER_FMT_WIDTH_DEFAULT,
+ .height = VIMC_SCALER_FMT_HEIGHT_DEFAULT,
.top = 0,
.left = 0,
};
@@ -58,7 +58,7 @@ static const struct v4l2_rect crop_rect_min = {
};
static struct v4l2_rect
-vimc_sca_get_crop_bound_sink(const struct v4l2_mbus_framefmt *sink_fmt)
+vimc_scaler_get_crop_bound_sink(const struct v4l2_mbus_framefmt *sink_fmt)
{
/* Get the crop bounds to clamp the crop rectangle correctly */
struct v4l2_rect r = {
@@ -70,7 +70,7 @@ vimc_sca_get_crop_bound_sink(const struct v4l2_mbus_framefmt *sink_fmt)
return r;
}
-static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
+static int vimc_scaler_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mf;
@@ -82,13 +82,13 @@ static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
*mf = fmt_default;
}
- r = v4l2_subdev_get_try_crop(sd, sd_state, VIMC_SCA_SINK);
+ r = v4l2_subdev_get_try_crop(sd, sd_state, VIMC_SCALER_SINK);
*r = crop_rect_default;
return 0;
}
-static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
+static int vimc_scaler_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
@@ -109,7 +109,7 @@ static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
+static int vimc_scaler_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
@@ -133,57 +133,57 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
}
static struct v4l2_mbus_framefmt *
-vimc_sca_pad_format(struct vimc_sca_device *vsca,
+vimc_scaler_pad_format(struct vimc_scaler_device *vscaler,
struct v4l2_subdev_state *sd_state, u32 pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&vsca->sd, sd_state, pad);
+ return v4l2_subdev_get_try_format(&vscaler->sd, sd_state, pad);
else
- return &vsca->fmt[pad];
+ return &vscaler->fmt[pad];
}
static struct v4l2_rect *
-vimc_sca_pad_crop(struct vimc_sca_device *vsca,
+vimc_scaler_pad_crop(struct vimc_scaler_device *vscaler,
struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&vsca->sd, sd_state,
- VIMC_SCA_SINK);
+ return v4l2_subdev_get_try_crop(&vscaler->sd, sd_state,
+ VIMC_SCALER_SINK);
else
- return &vsca->crop_rect;
+ return &vscaler->crop_rect;
}
-static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
+static int vimc_scaler_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
- struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct vimc_scaler_device *vscaler = v4l2_get_subdevdata(sd);
- format->format = *vimc_sca_pad_format(vsca, sd_state, format->pad,
+ format->format = *vimc_scaler_pad_format(vscaler, sd_state, format->pad,
format->which);
return 0;
}
-static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
+static int vimc_scaler_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
- struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct vimc_scaler_device *vscaler = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *fmt;
/* Do not change the active format while stream is on */
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && vsca->src_frame)
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && vscaler->src_frame)
return -EBUSY;
- fmt = vimc_sca_pad_format(vsca, sd_state, format->pad, format->which);
+ fmt = vimc_scaler_pad_format(vscaler, sd_state, format->pad, format->which);
/*
* The media bus code and colorspace can only be changed on the sink
* pad, the source pad only follows.
*/
- if (format->pad == VIMC_SCA_SINK) {
+ if (format->pad == VIMC_SCALER_SINK) {
const struct vimc_pix_map *vpix;
/* Only accept code in the pix map table in non bayer format. */
@@ -211,17 +211,17 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
* Propagate the sink pad format to the crop rectangle and the source
* pad.
*/
- if (format->pad == VIMC_SCA_SINK) {
+ if (format->pad == VIMC_SCALER_SINK) {
struct v4l2_mbus_framefmt *src_fmt;
struct v4l2_rect *crop;
- crop = vimc_sca_pad_crop(vsca, sd_state, format->which);
+ crop = vimc_scaler_pad_crop(vscaler, sd_state, format->which);
crop->width = fmt->width;
crop->height = fmt->height;
crop->top = 0;
crop->left = 0;
- src_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SRC,
+ src_fmt = vimc_scaler_pad_format(vscaler, sd_state, VIMC_SCALER_SRC,
format->which);
*src_fmt = *fmt;
}
@@ -231,11 +231,11 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int vimc_sca_get_selection(struct v4l2_subdev *sd,
+static int vimc_scaler_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct vimc_scaler_device *vscaler = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
if (VIMC_IS_SRC(sel->pad))
@@ -243,12 +243,12 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *vimc_sca_pad_crop(vsca, sd_state, sel->which);
+ sel->r = *vimc_scaler_pad_crop(vscaler, sd_state, sel->which);
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
- sink_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SINK,
+ sink_fmt = vimc_scaler_pad_format(vscaler, sd_state, VIMC_SCALER_SINK,
sel->which);
- sel->r = vimc_sca_get_crop_bound_sink(sink_fmt);
+ sel->r = vimc_scaler_get_crop_bound_sink(sink_fmt);
break;
default:
return -EINVAL;
@@ -257,22 +257,22 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
return 0;
}
-static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
+static void vimc_scaler_adjust_sink_crop(struct v4l2_rect *r,
const struct v4l2_mbus_framefmt *sink_fmt)
{
const struct v4l2_rect sink_rect =
- vimc_sca_get_crop_bound_sink(sink_fmt);
+ vimc_scaler_get_crop_bound_sink(sink_fmt);
/* Disallow rectangles smaller than the minimal one. */
v4l2_rect_set_min_size(r, &crop_rect_min);
v4l2_rect_map_inside(r, &sink_rect);
}
-static int vimc_sca_set_selection(struct v4l2_subdev *sd,
+static int vimc_scaler_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct vimc_scaler_device *vscaler = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *crop_rect;
@@ -280,165 +280,165 @@ static int vimc_sca_set_selection(struct v4l2_subdev *sd,
if (VIMC_IS_SRC(sel->pad) || sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE && vsca->src_frame)
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE && vscaler->src_frame)
return -EBUSY;
- crop_rect = vimc_sca_pad_crop(vsca, sd_state, sel->which);
- sink_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SINK,
+ crop_rect = vimc_scaler_pad_crop(vscaler, sd_state, sel->which);
+ sink_fmt = vimc_scaler_pad_format(vscaler, sd_state, VIMC_SCALER_SINK,
sel->which);
- vimc_sca_adjust_sink_crop(&sel->r, sink_fmt);
+ vimc_scaler_adjust_sink_crop(&sel->r, sink_fmt);
*crop_rect = sel->r;
return 0;
}
-static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
- .init_cfg = vimc_sca_init_cfg,
- .enum_mbus_code = vimc_sca_enum_mbus_code,
- .enum_frame_size = vimc_sca_enum_frame_size,
- .get_fmt = vimc_sca_get_fmt,
- .set_fmt = vimc_sca_set_fmt,
- .get_selection = vimc_sca_get_selection,
- .set_selection = vimc_sca_set_selection,
+static const struct v4l2_subdev_pad_ops vimc_scaler_pad_ops = {
+ .init_cfg = vimc_scaler_init_cfg,
+ .enum_mbus_code = vimc_scaler_enum_mbus_code,
+ .enum_frame_size = vimc_scaler_enum_frame_size,
+ .get_fmt = vimc_scaler_get_fmt,
+ .set_fmt = vimc_scaler_set_fmt,
+ .get_selection = vimc_scaler_get_selection,
+ .set_selection = vimc_scaler_set_selection,
};
-static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
+static int vimc_scaler_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct vimc_scaler_device *vscaler = v4l2_get_subdevdata(sd);
if (enable) {
const struct vimc_pix_map *vpix;
unsigned int frame_size;
- if (vsca->src_frame)
+ if (vscaler->src_frame)
return 0;
/* Save the bytes per pixel of the sink */
- vpix = vimc_pix_map_by_code(vsca->fmt[VIMC_SCA_SINK].code);
- vsca->bpp = vpix->bpp;
+ vpix = vimc_pix_map_by_code(vscaler->fmt[VIMC_SCALER_SINK].code);
+ vscaler->bpp = vpix->bpp;
/* Calculate the frame size of the source pad */
- frame_size = vsca->fmt[VIMC_SCA_SRC].width
- * vsca->fmt[VIMC_SCA_SRC].height * vsca->bpp;
+ frame_size = vscaler->fmt[VIMC_SCALER_SRC].width
+ * vscaler->fmt[VIMC_SCALER_SRC].height * vscaler->bpp;
/* Allocate the frame buffer. Use vmalloc to be able to
* allocate a large amount of memory
*/
- vsca->src_frame = vmalloc(frame_size);
- if (!vsca->src_frame)
+ vscaler->src_frame = vmalloc(frame_size);
+ if (!vscaler->src_frame)
return -ENOMEM;
} else {
- if (!vsca->src_frame)
+ if (!vscaler->src_frame)
return 0;
- vfree(vsca->src_frame);
- vsca->src_frame = NULL;
+ vfree(vscaler->src_frame);
+ vscaler->src_frame = NULL;
}
return 0;
}
-static const struct v4l2_subdev_video_ops vimc_sca_video_ops = {
- .s_stream = vimc_sca_s_stream,
+static const struct v4l2_subdev_video_ops vimc_scaler_video_ops = {
+ .s_stream = vimc_scaler_s_stream,
};
-static const struct v4l2_subdev_ops vimc_sca_ops = {
- .pad = &vimc_sca_pad_ops,
- .video = &vimc_sca_video_ops,
+static const struct v4l2_subdev_ops vimc_scaler_ops = {
+ .pad = &vimc_scaler_pad_ops,
+ .video = &vimc_scaler_video_ops,
};
-static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
+static void vimc_scaler_fill_src_frame(const struct vimc_scaler_device *const vscaler,
const u8 *const sink_frame)
{
- const struct v4l2_mbus_framefmt *src_fmt = &vsca->fmt[VIMC_SCA_SRC];
- const struct v4l2_rect *r = &vsca->crop_rect;
- unsigned int snk_width = vsca->fmt[VIMC_SCA_SINK].width;
+ const struct v4l2_mbus_framefmt *src_fmt = &vscaler->fmt[VIMC_SCALER_SRC];
+ const struct v4l2_rect *r = &vscaler->crop_rect;
+ unsigned int snk_width = vscaler->fmt[VIMC_SCALER_SINK].width;
unsigned int src_x, src_y;
- u8 *walker = vsca->src_frame;
+ u8 *walker = vscaler->src_frame;
/* Set each pixel at the src_frame to its sink_frame equivalent */
for (src_y = 0; src_y < src_fmt->height; src_y++) {
unsigned int snk_y, y_offset;
snk_y = (src_y * r->height) / src_fmt->height + r->top;
- y_offset = snk_y * snk_width * vsca->bpp;
+ y_offset = snk_y * snk_width * vscaler->bpp;
for (src_x = 0; src_x < src_fmt->width; src_x++) {
unsigned int snk_x, x_offset, index;
snk_x = (src_x * r->width) / src_fmt->width + r->left;
- x_offset = snk_x * vsca->bpp;
+ x_offset = snk_x * vscaler->bpp;
index = y_offset + x_offset;
- memcpy(walker, &sink_frame[index], vsca->bpp);
- walker += vsca->bpp;
+ memcpy(walker, &sink_frame[index], vscaler->bpp);
+ walker += vscaler->bpp;
}
}
}
-static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
+static void *vimc_scaler_process_frame(struct vimc_ent_device *ved,
const void *sink_frame)
{
- struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
+ struct vimc_scaler_device *vscaler = container_of(ved, struct vimc_scaler_device,
ved);
/* If the stream in this node is not active, just return */
- if (!vsca->src_frame)
+ if (!vscaler->src_frame)
return ERR_PTR(-EINVAL);
- vimc_sca_fill_src_frame(vsca, sink_frame);
+ vimc_scaler_fill_src_frame(vscaler, sink_frame);
- return vsca->src_frame;
+ return vscaler->src_frame;
};
-static void vimc_sca_release(struct vimc_ent_device *ved)
+static void vimc_scaler_release(struct vimc_ent_device *ved)
{
- struct vimc_sca_device *vsca =
- container_of(ved, struct vimc_sca_device, ved);
+ struct vimc_scaler_device *vscaler =
+ container_of(ved, struct vimc_scaler_device, ved);
- media_entity_cleanup(vsca->ved.ent);
- kfree(vsca);
+ media_entity_cleanup(vscaler->ved.ent);
+ kfree(vscaler);
}
-static struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+static struct vimc_ent_device *vimc_scaler_add(struct vimc_device *vimc,
const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
- struct vimc_sca_device *vsca;
+ struct vimc_scaler_device *vscaler;
int ret;
- /* Allocate the vsca struct */
- vsca = kzalloc(sizeof(*vsca), GFP_KERNEL);
- if (!vsca)
+ /* Allocate the vscaler struct */
+ vscaler = kzalloc(sizeof(*vscaler), GFP_KERNEL);
+ if (!vscaler)
return ERR_PTR(-ENOMEM);
/* Initialize ved and sd */
- vsca->pads[VIMC_SCA_SINK].flags = MEDIA_PAD_FL_SINK;
- vsca->pads[VIMC_SCA_SRC].flags = MEDIA_PAD_FL_SOURCE;
+ vscaler->pads[VIMC_SCALER_SINK].flags = MEDIA_PAD_FL_SINK;
+ vscaler->pads[VIMC_SCALER_SRC].flags = MEDIA_PAD_FL_SOURCE;
- ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
+ ret = vimc_ent_sd_register(&vscaler->ved, &vscaler->sd, v4l2_dev,
vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_SCALER, 2,
- vsca->pads, &vimc_sca_ops);
+ vscaler->pads, &vimc_scaler_ops);
if (ret) {
- kfree(vsca);
+ kfree(vscaler);
return ERR_PTR(ret);
}
- vsca->ved.process_frame = vimc_sca_process_frame;
- vsca->ved.dev = vimc->mdev.dev;
+ vscaler->ved.process_frame = vimc_scaler_process_frame;
+ vscaler->ved.dev = vimc->mdev.dev;
/* Initialize the frame format */
- vsca->fmt[VIMC_SCA_SINK] = fmt_default;
- vsca->fmt[VIMC_SCA_SRC] = fmt_default;
+ vscaler->fmt[VIMC_SCALER_SINK] = fmt_default;
+ vscaler->fmt[VIMC_SCALER_SRC] = fmt_default;
/* Initialize the crop selection */
- vsca->crop_rect = crop_rect_default;
+ vscaler->crop_rect = crop_rect_default;
- return &vsca->ved;
+ return &vscaler->ved;
}
-struct vimc_ent_type vimc_sca_type = {
- .add = vimc_sca_add,
- .release = vimc_sca_release
+struct vimc_ent_type vimc_scaler_type = {
+ .add = vimc_scaler_add,
+ .release = vimc_scaler_release
};
diff --git a/drivers/media/test-drivers/vimc/vimc-sensor.c b/drivers/media/test-drivers/vimc/vimc-sensor.c
index 74ab79cadb5d..41a3dce2d714 100644
--- a/drivers/media/test-drivers/vimc/vimc-sensor.c
+++ b/drivers/media/test-drivers/vimc/vimc-sensor.c
@@ -14,18 +14,18 @@
#include "vimc-common.h"
-enum vimc_sen_osd_mode {
- VIMC_SEN_OSD_SHOW_ALL = 0,
- VIMC_SEN_OSD_SHOW_COUNTERS = 1,
- VIMC_SEN_OSD_SHOW_NONE = 2
+enum vimc_sensor_osd_mode {
+ VIMC_SENSOR_OSD_SHOW_ALL = 0,
+ VIMC_SENSOR_OSD_SHOW_COUNTERS = 1,
+ VIMC_SENSOR_OSD_SHOW_NONE = 2
};
-struct vimc_sen_device {
+struct vimc_sensor_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
struct tpg_data tpg;
u8 *frame;
- enum vimc_sen_osd_mode osd_value;
+ enum vimc_sensor_osd_mode osd_value;
u64 start_stream_ts;
/* The active format */
struct v4l2_mbus_framefmt mbus_format;
@@ -41,8 +41,8 @@ static const struct v4l2_mbus_framefmt fmt_default = {
.colorspace = V4L2_COLORSPACE_SRGB,
};
-static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int vimc_sensor_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
unsigned int i;
@@ -56,9 +56,9 @@ static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
return 0;
}
-static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
+static int vimc_sensor_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
{
u32 mbus_code = vimc_mbus_code_by_index(code->index);
@@ -70,9 +70,9 @@ static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_size_enum *fse)
+static int vimc_sensor_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
{
const struct vimc_pix_map *vpix;
@@ -92,39 +92,39 @@ static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
return 0;
}
-static int vimc_sen_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
+static int vimc_sensor_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
- struct vimc_sen_device *vsen =
- container_of(sd, struct vimc_sen_device, sd);
+ struct vimc_sensor_device *vsensor =
+ container_of(sd, struct vimc_sensor_device, sd);
fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
*v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) :
- vsen->mbus_format;
+ vsensor->mbus_format;
return 0;
}
-static void vimc_sen_tpg_s_format(struct vimc_sen_device *vsen)
+static void vimc_sensor_tpg_s_format(struct vimc_sensor_device *vsensor)
{
const struct vimc_pix_map *vpix =
- vimc_pix_map_by_code(vsen->mbus_format.code);
+ vimc_pix_map_by_code(vsensor->mbus_format.code);
- tpg_reset_source(&vsen->tpg, vsen->mbus_format.width,
- vsen->mbus_format.height, vsen->mbus_format.field);
- tpg_s_bytesperline(&vsen->tpg, 0, vsen->mbus_format.width * vpix->bpp);
- tpg_s_buf_height(&vsen->tpg, vsen->mbus_format.height);
- tpg_s_fourcc(&vsen->tpg, vpix->pixelformat);
+ tpg_reset_source(&vsensor->tpg, vsensor->mbus_format.width,
+ vsensor->mbus_format.height, vsensor->mbus_format.field);
+ tpg_s_bytesperline(&vsensor->tpg, 0, vsensor->mbus_format.width * vpix->bpp);
+ tpg_s_buf_height(&vsensor->tpg, vsensor->mbus_format.height);
+ tpg_s_fourcc(&vsensor->tpg, vpix->pixelformat);
/* TODO: add support for V4L2_FIELD_ALTERNATE */
- tpg_s_field(&vsen->tpg, vsen->mbus_format.field, false);
- tpg_s_colorspace(&vsen->tpg, vsen->mbus_format.colorspace);
- tpg_s_ycbcr_enc(&vsen->tpg, vsen->mbus_format.ycbcr_enc);
- tpg_s_quantization(&vsen->tpg, vsen->mbus_format.quantization);
- tpg_s_xfer_func(&vsen->tpg, vsen->mbus_format.xfer_func);
+ tpg_s_field(&vsensor->tpg, vsensor->mbus_format.field, false);
+ tpg_s_colorspace(&vsensor->tpg, vsensor->mbus_format.colorspace);
+ tpg_s_ycbcr_enc(&vsensor->tpg, vsensor->mbus_format.ycbcr_enc);
+ tpg_s_quantization(&vsensor->tpg, vsensor->mbus_format.quantization);
+ tpg_s_xfer_func(&vsensor->tpg, vsensor->mbus_format.xfer_func);
}
-static void vimc_sen_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
+static void vimc_sensor_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
{
const struct vimc_pix_map *vpix;
@@ -145,29 +145,29 @@ static void vimc_sen_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
vimc_colorimetry_clamp(fmt);
}
-static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
+static int vimc_sensor_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
- struct vimc_sen_device *vsen = v4l2_get_subdevdata(sd);
+ struct vimc_sensor_device *vsensor = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
- if (vsen->frame)
+ if (vsensor->frame)
return -EBUSY;
- mf = &vsen->mbus_format;
+ mf = &vsensor->mbus_format;
} else {
mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
}
/* Set the new format */
- vimc_sen_adjust_fmt(&fmt->format);
+ vimc_sensor_adjust_fmt(&fmt->format);
- dev_dbg(vsen->ved.dev, "%s: format update: "
+ dev_dbg(vsensor->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
- "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsen->sd.name,
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsensor->sd.name,
/* old */
mf->width, mf->height, mf->code,
mf->colorspace, mf->quantization,
@@ -182,146 +182,147 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
- .init_cfg = vimc_sen_init_cfg,
- .enum_mbus_code = vimc_sen_enum_mbus_code,
- .enum_frame_size = vimc_sen_enum_frame_size,
- .get_fmt = vimc_sen_get_fmt,
- .set_fmt = vimc_sen_set_fmt,
+static const struct v4l2_subdev_pad_ops vimc_sensor_pad_ops = {
+ .init_cfg = vimc_sensor_init_cfg,
+ .enum_mbus_code = vimc_sensor_enum_mbus_code,
+ .enum_frame_size = vimc_sensor_enum_frame_size,
+ .get_fmt = vimc_sensor_get_fmt,
+ .set_fmt = vimc_sensor_set_fmt,
};
-static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
- const void *sink_frame)
+static void *vimc_sensor_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
{
- struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
- ved);
+ struct vimc_sensor_device *vsensor =
+ container_of(ved, struct vimc_sensor_device, ved);
+
const unsigned int line_height = 16;
u8 *basep[TPG_MAX_PLANES][2];
unsigned int line = 1;
char str[100];
- tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
- tpg_calc_text_basep(&vsen->tpg, basep, 0, vsen->frame);
- switch (vsen->osd_value) {
- case VIMC_SEN_OSD_SHOW_ALL: {
- const char *order = tpg_g_color_order(&vsen->tpg);
+ tpg_fill_plane_buffer(&vsensor->tpg, 0, 0, vsensor->frame);
+ tpg_calc_text_basep(&vsensor->tpg, basep, 0, vsensor->frame);
+ switch (vsensor->osd_value) {
+ case VIMC_SENSOR_OSD_SHOW_ALL: {
+ const char *order = tpg_g_color_order(&vsensor->tpg);
- tpg_gen_text(&vsen->tpg, basep, line++ * line_height,
+ tpg_gen_text(&vsensor->tpg, basep, line++ * line_height,
16, order);
snprintf(str, sizeof(str),
"brightness %3d, contrast %3d, saturation %3d, hue %d ",
- vsen->tpg.brightness,
- vsen->tpg.contrast,
- vsen->tpg.saturation,
- vsen->tpg.hue);
- tpg_gen_text(&vsen->tpg, basep, line++ * line_height, 16, str);
+ vsensor->tpg.brightness,
+ vsensor->tpg.contrast,
+ vsensor->tpg.saturation,
+ vsensor->tpg.hue);
+ tpg_gen_text(&vsensor->tpg, basep, line++ * line_height, 16, str);
snprintf(str, sizeof(str), "sensor size: %dx%d",
- vsen->mbus_format.width,
- vsen->mbus_format.height);
- tpg_gen_text(&vsen->tpg, basep, line++ * line_height, 16, str);
+ vsensor->mbus_format.width,
+ vsensor->mbus_format.height);
+ tpg_gen_text(&vsensor->tpg, basep, line++ * line_height, 16, str);
fallthrough;
}
- case VIMC_SEN_OSD_SHOW_COUNTERS: {
+ case VIMC_SENSOR_OSD_SHOW_COUNTERS: {
unsigned int ms;
- ms = div_u64(ktime_get_ns() - vsen->start_stream_ts, 1000000);
+ ms = div_u64(ktime_get_ns() - vsensor->start_stream_ts, 1000000);
snprintf(str, sizeof(str), "%02d:%02d:%02d:%03d",
(ms / (60 * 60 * 1000)) % 24,
(ms / (60 * 1000)) % 60,
(ms / 1000) % 60,
ms % 1000);
- tpg_gen_text(&vsen->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(&vsensor->tpg, basep, line++ * line_height, 16, str);
break;
}
- case VIMC_SEN_OSD_SHOW_NONE:
+ case VIMC_SENSOR_OSD_SHOW_NONE:
default:
break;
}
- return vsen->frame;
+ return vsensor->frame;
}
-static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
+static int vimc_sensor_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct vimc_sen_device *vsen =
- container_of(sd, struct vimc_sen_device, sd);
+ struct vimc_sensor_device *vsensor =
+ container_of(sd, struct vimc_sensor_device, sd);
if (enable) {
const struct vimc_pix_map *vpix;
unsigned int frame_size;
- vsen->start_stream_ts = ktime_get_ns();
+ vsensor->start_stream_ts = ktime_get_ns();
/* Calculate the frame size */
- vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
- frame_size = vsen->mbus_format.width * vpix->bpp *
- vsen->mbus_format.height;
+ vpix = vimc_pix_map_by_code(vsensor->mbus_format.code);
+ frame_size = vsensor->mbus_format.width * vpix->bpp *
+ vsensor->mbus_format.height;
/*
* Allocate the frame buffer. Use vmalloc to be able to
* allocate a large amount of memory
*/
- vsen->frame = vmalloc(frame_size);
- if (!vsen->frame)
+ vsensor->frame = vmalloc(frame_size);
+ if (!vsensor->frame)
return -ENOMEM;
/* configure the test pattern generator */
- vimc_sen_tpg_s_format(vsen);
+ vimc_sensor_tpg_s_format(vsensor);
} else {
- vfree(vsen->frame);
- vsen->frame = NULL;
+ vfree(vsensor->frame);
+ vsensor->frame = NULL;
}
return 0;
}
-static const struct v4l2_subdev_core_ops vimc_sen_core_ops = {
+static const struct v4l2_subdev_core_ops vimc_sensor_core_ops = {
.log_status = v4l2_ctrl_subdev_log_status,
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
-static const struct v4l2_subdev_video_ops vimc_sen_video_ops = {
- .s_stream = vimc_sen_s_stream,
+static const struct v4l2_subdev_video_ops vimc_sensor_video_ops = {
+ .s_stream = vimc_sensor_s_stream,
};
-static const struct v4l2_subdev_ops vimc_sen_ops = {
- .core = &vimc_sen_core_ops,
- .pad = &vimc_sen_pad_ops,
- .video = &vimc_sen_video_ops,
+static const struct v4l2_subdev_ops vimc_sensor_ops = {
+ .core = &vimc_sensor_core_ops,
+ .pad = &vimc_sensor_pad_ops,
+ .video = &vimc_sensor_video_ops,
};
-static int vimc_sen_s_ctrl(struct v4l2_ctrl *ctrl)
+static int vimc_sensor_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct vimc_sen_device *vsen =
- container_of(ctrl->handler, struct vimc_sen_device, hdl);
+ struct vimc_sensor_device *vsensor =
+ container_of(ctrl->handler, struct vimc_sensor_device, hdl);
switch (ctrl->id) {
case VIMC_CID_TEST_PATTERN:
- tpg_s_pattern(&vsen->tpg, ctrl->val);
+ tpg_s_pattern(&vsensor->tpg, ctrl->val);
break;
case V4L2_CID_HFLIP:
- tpg_s_hflip(&vsen->tpg, ctrl->val);
+ tpg_s_hflip(&vsensor->tpg, ctrl->val);
break;
case V4L2_CID_VFLIP:
- tpg_s_vflip(&vsen->tpg, ctrl->val);
+ tpg_s_vflip(&vsensor->tpg, ctrl->val);
break;
case V4L2_CID_BRIGHTNESS:
- tpg_s_brightness(&vsen->tpg, ctrl->val);
+ tpg_s_brightness(&vsensor->tpg, ctrl->val);
break;
case V4L2_CID_CONTRAST:
- tpg_s_contrast(&vsen->tpg, ctrl->val);
+ tpg_s_contrast(&vsensor->tpg, ctrl->val);
break;
case V4L2_CID_HUE:
- tpg_s_hue(&vsen->tpg, ctrl->val);
+ tpg_s_hue(&vsensor->tpg, ctrl->val);
break;
case V4L2_CID_SATURATION:
- tpg_s_saturation(&vsen->tpg, ctrl->val);
+ tpg_s_saturation(&vsensor->tpg, ctrl->val);
break;
case VIMC_CID_OSD_TEXT_MODE:
- vsen->osd_value = ctrl->val;
+ vsensor->osd_value = ctrl->val;
break;
default:
return -EINVAL;
@@ -329,31 +330,31 @@ static int vimc_sen_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
}
-static const struct v4l2_ctrl_ops vimc_sen_ctrl_ops = {
- .s_ctrl = vimc_sen_s_ctrl,
+static const struct v4l2_ctrl_ops vimc_sensor_ctrl_ops = {
+ .s_ctrl = vimc_sensor_s_ctrl,
};
-static void vimc_sen_release(struct vimc_ent_device *ved)
+static void vimc_sensor_release(struct vimc_ent_device *ved)
{
- struct vimc_sen_device *vsen =
- container_of(ved, struct vimc_sen_device, ved);
+ struct vimc_sensor_device *vsensor =
+ container_of(ved, struct vimc_sensor_device, ved);
- v4l2_ctrl_handler_free(&vsen->hdl);
- tpg_free(&vsen->tpg);
- media_entity_cleanup(vsen->ved.ent);
- kfree(vsen);
+ v4l2_ctrl_handler_free(&vsensor->hdl);
+ tpg_free(&vsensor->tpg);
+ media_entity_cleanup(vsensor->ved.ent);
+ kfree(vsensor);
}
/* Image Processing Controls */
-static const struct v4l2_ctrl_config vimc_sen_ctrl_class = {
+static const struct v4l2_ctrl_config vimc_sensor_ctrl_class = {
.flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
.id = VIMC_CID_VIMC_CLASS,
.name = "VIMC Controls",
.type = V4L2_CTRL_TYPE_CTRL_CLASS,
};
-static const struct v4l2_ctrl_config vimc_sen_ctrl_test_pattern = {
- .ops = &vimc_sen_ctrl_ops,
+static const struct v4l2_ctrl_config vimc_sensor_ctrl_test_pattern = {
+ .ops = &vimc_sensor_ctrl_ops,
.id = VIMC_CID_TEST_PATTERN,
.name = "Test Pattern",
.type = V4L2_CTRL_TYPE_MENU,
@@ -368,8 +369,8 @@ static const char * const vimc_ctrl_osd_mode_strings[] = {
NULL,
};
-static const struct v4l2_ctrl_config vimc_sen_ctrl_osd_mode = {
- .ops = &vimc_sen_ctrl_ops,
+static const struct v4l2_ctrl_config vimc_sensor_ctrl_osd_mode = {
+ .ops = &vimc_sensor_ctrl_ops,
.id = VIMC_CID_OSD_TEXT_MODE,
.name = "Show Information",
.type = V4L2_CTRL_TYPE_MENU,
@@ -377,76 +378,76 @@ static const struct v4l2_ctrl_config vimc_sen_ctrl_osd_mode = {
.qmenu = vimc_ctrl_osd_mode_strings,
};
-static struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
- const char *vcfg_name)
+static struct vimc_ent_device *vimc_sensor_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
- struct vimc_sen_device *vsen;
+ struct vimc_sensor_device *vsensor;
int ret;
- /* Allocate the vsen struct */
- vsen = kzalloc(sizeof(*vsen), GFP_KERNEL);
- if (!vsen)
+ /* Allocate the vsensor struct */
+ vsensor = kzalloc(sizeof(*vsensor), GFP_KERNEL);
+ if (!vsensor)
return ERR_PTR(-ENOMEM);
- v4l2_ctrl_handler_init(&vsen->hdl, 4);
+ v4l2_ctrl_handler_init(&vsensor->hdl, 4);
- v4l2_ctrl_new_custom(&vsen->hdl, &vimc_sen_ctrl_class, NULL);
- v4l2_ctrl_new_custom(&vsen->hdl, &vimc_sen_ctrl_test_pattern, NULL);
- v4l2_ctrl_new_custom(&vsen->hdl, &vimc_sen_ctrl_osd_mode, NULL);
- v4l2_ctrl_new_std(&vsen->hdl, &vimc_sen_ctrl_ops,
+ v4l2_ctrl_new_custom(&vsensor->hdl, &vimc_sensor_ctrl_class, NULL);
+ v4l2_ctrl_new_custom(&vsensor->hdl, &vimc_sensor_ctrl_test_pattern, NULL);
+ v4l2_ctrl_new_custom(&vsensor->hdl, &vimc_sensor_ctrl_osd_mode, NULL);
+ v4l2_ctrl_new_std(&vsensor->hdl, &vimc_sensor_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&vsen->hdl, &vimc_sen_ctrl_ops,
+ v4l2_ctrl_new_std(&vsensor->hdl, &vimc_sensor_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&vsen->hdl, &vimc_sen_ctrl_ops,
+ v4l2_ctrl_new_std(&vsensor->hdl, &vimc_sensor_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
- v4l2_ctrl_new_std(&vsen->hdl, &vimc_sen_ctrl_ops,
+ v4l2_ctrl_new_std(&vsensor->hdl, &vimc_sensor_ctrl_ops,
V4L2_CID_CONTRAST, 0, 255, 1, 128);
- v4l2_ctrl_new_std(&vsen->hdl, &vimc_sen_ctrl_ops,
+ v4l2_ctrl_new_std(&vsensor->hdl, &vimc_sensor_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
- v4l2_ctrl_new_std(&vsen->hdl, &vimc_sen_ctrl_ops,
+ v4l2_ctrl_new_std(&vsensor->hdl, &vimc_sensor_ctrl_ops,
V4L2_CID_SATURATION, 0, 255, 1, 128);
- vsen->sd.ctrl_handler = &vsen->hdl;
- if (vsen->hdl.error) {
- ret = vsen->hdl.error;
- goto err_free_vsen;
+ vsensor->sd.ctrl_handler = &vsensor->hdl;
+ if (vsensor->hdl.error) {
+ ret = vsensor->hdl.error;
+ goto err_free_vsensor;
}
/* Initialize the test pattern generator */
- tpg_init(&vsen->tpg, vsen->mbus_format.width,
- vsen->mbus_format.height);
- ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
+ tpg_init(&vsensor->tpg, vsensor->mbus_format.width,
+ vsensor->mbus_format.height);
+ ret = tpg_alloc(&vsensor->tpg, VIMC_FRAME_MAX_WIDTH);
if (ret)
goto err_free_hdl;
/* Initialize ved and sd */
- vsen->pad.flags = MEDIA_PAD_FL_SOURCE;
- ret = vimc_ent_sd_register(&vsen->ved, &vsen->sd, v4l2_dev,
+ vsensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = vimc_ent_sd_register(&vsensor->ved, &vsensor->sd, v4l2_dev,
vcfg_name,
- MEDIA_ENT_F_CAM_SENSOR, 1, &vsen->pad,
- &vimc_sen_ops);
+ MEDIA_ENT_F_CAM_SENSOR, 1, &vsensor->pad,
+ &vimc_sensor_ops);
if (ret)
goto err_free_tpg;
- vsen->ved.process_frame = vimc_sen_process_frame;
- vsen->ved.dev = vimc->mdev.dev;
+ vsensor->ved.process_frame = vimc_sensor_process_frame;
+ vsensor->ved.dev = vimc->mdev.dev;
/* Initialize the frame format */
- vsen->mbus_format = fmt_default;
+ vsensor->mbus_format = fmt_default;
- return &vsen->ved;
+ return &vsensor->ved;
err_free_tpg:
- tpg_free(&vsen->tpg);
+ tpg_free(&vsensor->tpg);
err_free_hdl:
- v4l2_ctrl_handler_free(&vsen->hdl);
-err_free_vsen:
- kfree(vsen);
+ v4l2_ctrl_handler_free(&vsensor->hdl);
+err_free_vsensor:
+ kfree(vsensor);
return ERR_PTR(ret);
}
-struct vimc_ent_type vimc_sen_type = {
- .add = vimc_sen_add,
- .release = vimc_sen_release
+struct vimc_ent_type vimc_sensor_type = {
+ .add = vimc_sensor_add,
+ .release = vimc_sensor_release
};
diff --git a/drivers/media/test-drivers/vimc/vimc-streamer.c b/drivers/media/test-drivers/vimc/vimc-streamer.c
index 65feb3c596db..807551a5143b 100644
--- a/drivers/media/test-drivers/vimc/vimc-streamer.c
+++ b/drivers/media/test-drivers/vimc/vimc-streamer.c
@@ -30,7 +30,7 @@ static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
for (i = 0; i < ent->num_pads; i++) {
if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
continue;
- pad = media_entity_remote_pad(&ent->pads[i]);
+ pad = media_pad_remote_pad_first(&ent->pads[i]);
return pad ? pad->entity : NULL;
}
return NULL;
diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
index e7516dc1227b..a78d676575bc 100644
--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
@@ -34,6 +34,7 @@
#define VIVID_CID_U8_4D_ARRAY (VIVID_CID_CUSTOM_BASE + 10)
#define VIVID_CID_AREA (VIVID_CID_CUSTOM_BASE + 11)
#define VIVID_CID_RO_INTEGER (VIVID_CID_CUSTOM_BASE + 12)
+#define VIVID_CID_U32_DYN_ARRAY (VIVID_CID_CUSTOM_BASE + 13)
#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -46,6 +47,7 @@
#define VIVID_CID_INSERT_SAV (VIVID_CID_VIVID_BASE + 6)
#define VIVID_CID_INSERT_EAV (VIVID_CID_VIVID_BASE + 7)
#define VIVID_CID_VBI_CAP_INTERLACED (VIVID_CID_VIVID_BASE + 8)
+#define VIVID_CID_INSERT_HDMI_VIDEO_GUARD_BAND (VIVID_CID_VIVID_BASE + 9)
#define VIVID_CID_HFLIP (VIVID_CID_VIVID_BASE + 20)
#define VIVID_CID_VFLIP (VIVID_CID_VIVID_BASE + 21)
@@ -189,6 +191,19 @@ static const struct v4l2_ctrl_config vivid_ctrl_u32_array = {
.dims = { 1 },
};
+static const struct v4l2_ctrl_config vivid_ctrl_u32_dyn_array = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_U32_DYN_ARRAY,
+ .name = "U32 Dynamic Array",
+ .type = V4L2_CTRL_TYPE_U32,
+ .flags = V4L2_CTRL_FLAG_DYNAMIC_ARRAY,
+ .def = 50,
+ .min = 10,
+ .max = 90,
+ .step = 1,
+ .dims = { 100 },
+};
+
static const struct v4l2_ctrl_config vivid_ctrl_u16_matrix = {
.ops = &vivid_user_gen_ctrl_ops,
.id = VIVID_CID_U16_MATRIX,
@@ -474,6 +489,9 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
case VIVID_CID_INSERT_EAV:
tpg_s_insert_eav(&dev->tpg, ctrl->val);
break;
+ case VIVID_CID_INSERT_HDMI_VIDEO_GUARD_BAND:
+ tpg_s_insert_hdmi_video_guard_band(&dev->tpg, ctrl->val);
+ break;
case VIVID_CID_HFLIP:
dev->sensor_hflip = ctrl->val;
tpg_s_hflip(&dev->tpg, dev->sensor_hflip ^ dev->hflip);
@@ -660,6 +678,15 @@ static const struct v4l2_ctrl_config vivid_ctrl_insert_eav = {
.step = 1,
};
+static const struct v4l2_ctrl_config vivid_ctrl_insert_hdmi_video_guard_band = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_INSERT_HDMI_VIDEO_GUARD_BAND,
+ .name = "Insert Video Guard Band",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
static const struct v4l2_ctrl_config vivid_ctrl_hflip = {
.ops = &vivid_vid_cap_ctrl_ops,
.id = VIVID_CID_HFLIP,
@@ -1612,6 +1639,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->ro_int32 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_ro_int32, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_area, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_dyn_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL);
@@ -1638,6 +1666,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_vflip, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_sav, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_eav, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_hdmi_video_guard_band, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_reduced_fps, NULL);
if (show_ccs_cap) {
dev->ctrl_has_crop_cap = v4l2_ctrl_new_custom(hdl_vid_cap,
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-common.c b/drivers/media/test-drivers/vivid/vivid-vid-common.c
index 19701fe72030..38d788b5cf19 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-common.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-common.c
@@ -199,6 +199,21 @@ struct vivid_fmt vivid_formats[] = {
.buffers = 1,
},
{
+ .fourcc = V4L2_PIX_FMT_YUVA32,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0xff000000,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUVX32,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_GREY,
.vdownsampling = { 1 },
.bit_depth = { 8 },
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 8de08704f8e4..af88e0766388 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -17,7 +17,6 @@ source "drivers/media/usb/cpia2/Kconfig"
source "drivers/media/usb/gspca/Kconfig"
source "drivers/media/usb/pwc/Kconfig"
source "drivers/media/usb/s2255/Kconfig"
-source "drivers/media/usb/stkwebcam/Kconfig"
source "drivers/media/usb/usbtv/Kconfig"
source "drivers/media/usb/uvc/Kconfig"
source "drivers/media/usb/zr364xx/Kconfig"
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 044bd46c799c..25fa2015b179 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -10,7 +10,6 @@ obj-y += dvb-usb/
obj-y += dvb-usb-v2/
obj-y += s2255/
obj-y += siano/
-obj-y += stkwebcam/
obj-y += ttusb-budget/
obj-y += ttusb-dec/
obj-y += zr364xx/
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index d568452618d1..240a7cc56777 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -123,7 +123,7 @@ struct airspy {
/* USB control message buffer */
#define BUF_SIZE 128
- u8 buf[BUF_SIZE];
+ u8 *buf;
/* Current configuration */
unsigned int f_adc;
@@ -856,6 +856,7 @@ static void airspy_video_release(struct v4l2_device *v)
v4l2_ctrl_handler_free(&s->hdl);
v4l2_device_unregister(&s->v4l2_dev);
+ kfree(s->buf);
kfree(s);
}
@@ -963,7 +964,10 @@ static int airspy_probe(struct usb_interface *intf,
{
struct airspy *s;
int ret;
- u8 u8tmp, buf[BUF_SIZE];
+ u8 u8tmp, *buf;
+
+ buf = NULL;
+ ret = -ENOMEM;
s = kzalloc(sizeof(struct airspy), GFP_KERNEL);
if (s == NULL) {
@@ -971,6 +975,13 @@ static int airspy_probe(struct usb_interface *intf,
return -ENOMEM;
}
+ s->buf = kzalloc(BUF_SIZE, GFP_KERNEL);
+ if (!s->buf)
+ goto err_free_mem;
+ buf = kzalloc(BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto err_free_mem;
+
mutex_init(&s->v4l2_lock);
mutex_init(&s->vb_queue_lock);
spin_lock_init(&s->queued_bufs_lock);
@@ -1068,6 +1079,8 @@ err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
+ kfree(buf);
+ kfree(s->buf);
kfree(s);
return ret;
}
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 4d5ab1433b44..ce1b0d9e0741 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -10,16 +10,6 @@
//
// This driver is based on my previous au600 usb pstn audio driver
// and inherits all the copyrights
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index d1e66b503f4d..b5f58dc6dd0f 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -4,16 +4,6 @@
//
// Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@kernel.org>
// Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index ae25d2cbfdfe..4d037c92af7c 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -8,16 +8,6 @@
// Mauro Carvalho Chehab <mchehab@kernel.org>
// Sascha Sommer <saschasommer@freenet.de>
// Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index c837cc528a33..61d7bf701d57 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -7,16 +7,6 @@
// Mauro Carvalho Chehab <mchehab@kernel.org>
// Sascha Sommer <saschasommer@freenet.de>
// Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 471bd74667e3..185e89c18d68 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -16,10 +16,6 @@
// Based on cx88-dvb, saa7134-dvb and videobuf-dvb originally written by:
// (c) 2004, 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
// (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation version 2 of the License.
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index b9a8d3fbad1a..a7eb11f7fb34 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -7,16 +7,6 @@
// Mauro Carvalho Chehab <mchehab@kernel.org>
// Sascha Sommer <saschasommer@freenet.de>
// Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 0b6d77c3bec8..5f3b00869bdb 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -6,16 +6,6 @@
// Markus Rechberger <mrechberger@gmail.com>
// Mauro Carvalho Chehab <mchehab@kernel.org>
// Sascha Sommer <saschasommer@freenet.de>
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx-v4l.h b/drivers/media/usb/em28xx/em28xx-v4l.h
index 6216cdd182f3..8455dcfdaf81 100644
--- a/drivers/media/usb/em28xx/em28xx-v4l.h
+++ b/drivers/media/usb/em28xx/em28xx-v4l.h
@@ -4,15 +4,6 @@
* video capture devices
*
* Copyright (C) 2013-2014 Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 63c48361d3f2..b253c44c9724 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -5,16 +5,6 @@
// Copyright (C) 2009 Devin Heitmueller <dheitmueller@kernellabs.com>
//
// This work was sponsored by EyeMagnet Limited.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 6b84c3413e83..8181c0e6a25b 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -11,16 +11,6 @@
//
// Some parts based on SN9C10x PC Camera Controllers GPL driver made
// by Luca Risolia <luca.risolia@studio.unibo.it>
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 2 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 7fc0b68a4a22..db18dd814a67 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -8,16 +8,6 @@
* Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
*
* Based on the em2800 driver from Sascha Sommer <saschasommer@freenet.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _EM28XX_H
diff --git a/drivers/media/usb/gspca/spca501.c b/drivers/media/usb/gspca/spca501.c
index ecc97f807cfa..f7c75d7535c4 100644
--- a/drivers/media/usb/gspca/spca501.c
+++ b/drivers/media/usb/gspca/spca501.c
@@ -488,7 +488,7 @@ static const __u16 spca501_init_data[][3] = {
/* Data for video camera init before capture.
* Capture and decoding by Colin Peart.
- * This is is for the 3com HomeConnect Lite which is spca501a based.
+ * This is for the 3com HomeConnect Lite which is spca501a based.
*/
static const __u16 spca501_3com_open_data[][3] = {
/* bmRequest,value,index */
diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
index cc87c24dd24c..acfb9a195106 100644
--- a/drivers/media/usb/gspca/xirlink_cit.c
+++ b/drivers/media/usb/gspca/xirlink_cit.c
@@ -817,7 +817,7 @@ static void cit_model2_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2)
* 00_d141_0124
* 00_0096_0127
* 00_fea8_0124
-*/
+ */
static void cit_model3_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2)
{
cit_write_reg(gspca_dev, 0x0078, 0x012d);
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 60e57e0f1927..fd7d2a9d0449 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -409,7 +409,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
struct hdpvr_device *dev = video_drvdata(file);
struct hdpvr_buffer *buf = NULL;
struct urb *urb;
- unsigned int ret = 0;
+ int ret = 0;
int rem, cnt;
if (*pos)
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index a9666373af6b..62ff1fa1c753 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2610,6 +2610,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
del_timer_sync(&hdw->encoder_run_timer);
del_timer_sync(&hdw->encoder_wait_timer);
flush_work(&hdw->workpoll);
+ v4l2_device_unregister(&hdw->v4l2_dev);
usb_free_urb(hdw->ctl_read_urb);
usb_free_urb(hdw->ctl_write_urb);
kfree(hdw->ctl_read_buffer);
@@ -5040,7 +5041,7 @@ void pvr2_hdw_status_poll(struct pvr2_hdw *hdw)
/* Note: There apparently is no replacement for VIDIOC_CROPCAP
using v4l2-subdev - therefore we can't support that AT ALL right
now. (Of course, no sub-drivers seem to implement it either.
- But now it's a a chicken and egg problem...) */
+ But now it's a chicken and egg problem...) */
v4l2_device_call_all(&hdw->v4l2_dev, 0, tuner, g_tuner, vtp);
pvr2_trace(PVR2_TRACE_CHIPS, "subdev status poll type=%u strength=%u audio=0x%x cap=0x%x low=%u hi=%u",
vtp->type,
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index a714ad77ca8e..1e30e05953dc 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -136,6 +136,8 @@ static uint16_t usbtv_norm_to_16f_reg(v4l2_std_id norm)
return 0x00a8;
if (norm & (V4L2_STD_PAL_M | V4L2_STD_PAL_60))
return 0x00bc;
+ if (norm & V4L2_STD_PAL_Nc)
+ return 0x00fe;
/* Fallback to automatic detection for other standards */
return 0x0000;
}
@@ -241,7 +243,8 @@ static int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm)
static const v4l2_std_id ntsc_mask =
V4L2_STD_NTSC | V4L2_STD_NTSC_443;
static const v4l2_std_id pal_mask =
- V4L2_STD_PAL | V4L2_STD_PAL_60 | V4L2_STD_PAL_M;
+ V4L2_STD_PAL | V4L2_STD_PAL_60 | V4L2_STD_PAL_M |
+ V4L2_STD_PAL_Nc;
if (norm & ntsc_mask)
ret = usbtv_set_regs(usbtv, ntsc, ARRAY_SIZE(ntsc));
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
index 77a368e90fd0..b9fa7c0088c9 100644
--- a/drivers/media/usb/usbtv/usbtv.h
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -68,7 +68,8 @@
#define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15)
#define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff)
-#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL | V4L2_STD_SECAM)
+#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL | \
+ V4L2_STD_PAL_Nc | V4L2_STD_SECAM)
/* parameters for supported TV norms */
struct usbtv_norm_params {
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 0e78233fc8a0..8c208db9600b 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -366,6 +366,7 @@ static const struct uvc_menu_info power_line_frequency_controls[] = {
{ 0, "Disabled" },
{ 1, "50 Hz" },
{ 2, "60 Hz" },
+ { 3, "Auto" },
};
static const struct uvc_menu_info exposure_auto_controls[] = {
@@ -505,17 +506,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
},
{
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .entity = UVC_GUID_UVC_PROCESSING,
- .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
- .size = 2,
- .offset = 0,
- .v4l2_type = V4L2_CTRL_TYPE_MENU,
- .data_type = UVC_CTRL_DATA_TYPE_ENUM,
- .menu_info = power_line_frequency_controls,
- .menu_count = ARRAY_SIZE(power_line_frequency_controls),
- },
- {
.id = V4L2_CID_HUE_AUTO,
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_AUTO_CONTROL,
@@ -730,6 +720,34 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
};
+static const struct uvc_control_mapping uvc_ctrl_mappings_uvc11[] = {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .entity = UVC_GUID_UVC_PROCESSING,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+ .size = 2,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_info = power_line_frequency_controls,
+ .menu_count = ARRAY_SIZE(power_line_frequency_controls) - 1,
+ },
+};
+
+static const struct uvc_control_mapping uvc_ctrl_mappings_uvc15[] = {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .entity = UVC_GUID_UVC_PROCESSING,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+ .size = 2,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_info = power_line_frequency_controls,
+ .menu_count = ARRAY_SIZE(power_line_frequency_controls),
+ },
+};
+
/* ------------------------------------------------------------------------
* Utility functions
*/
@@ -749,7 +767,8 @@ static inline void uvc_clear_bit(u8 *data, int bit)
data[bit >> 3] &= ~(1 << (bit & 7));
}
-/* Extract the bit string specified by mapping->offset and mapping->size
+/*
+ * Extract the bit string specified by mapping->offset and mapping->size
* from the little-endian data stored at 'data' and return the result as
* a signed 32bit integer. Sign extension will be performed if the mapping
* references a signed data type.
@@ -785,7 +804,8 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
return value;
}
-/* Set the bit string specified by mapping->offset and mapping->size
+/*
+ * Set the bit string specified by mapping->offset and mapping->size
* in the little-endian data stored at 'data' to the value 'value'.
*/
static void uvc_set_le_value(struct uvc_control_mapping *mapping,
@@ -795,7 +815,8 @@ static void uvc_set_le_value(struct uvc_control_mapping *mapping,
int offset = mapping->offset;
u8 mask;
- /* According to the v4l2 spec, writing any value to a button control
+ /*
+ * According to the v4l2 spec, writing any value to a button control
* should result in the action belonging to the button control being
* triggered. UVC devices however want to see a 1 written -> override
* value.
@@ -927,7 +948,8 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
UVC_VC_EXTENSION_UNIT)
return ret;
- /* GET_RES is mandatory for XU controls, but some
+ /*
+ * GET_RES is mandatory for XU controls, but some
* cameras still choke on it. Ignore errors and set the
* resolution value to zero.
*/
@@ -1522,8 +1544,10 @@ static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val,
changes);
- /* Mark the queue as active, allowing this initial
- event to be accepted. */
+ /*
+ * Mark the queue as active, allowing this initial event to be
+ * accepted.
+ */
sev->elems = elems;
v4l2_event_queue_fh(sev->fh, &ev);
}
@@ -1596,7 +1620,8 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
if (!ctrl->initialized)
continue;
- /* Reset the loaded flag for auto-update controls that were
+ /*
+ * Reset the loaded flag for auto-update controls that were
* marked as loaded in uvc_ctrl_get/uvc_ctrl_set to prevent
* uvc_ctrl_get from using the cached value, and for write-only
* controls to prevent uvc_ctrl_set from setting bits not
@@ -1755,7 +1780,8 @@ int uvc_ctrl_set(struct uvc_fh *handle,
return -ERANGE;
value = mapping->menu_info[xctrl->value].value;
- /* Valid menu indices are reported by the GET_RES request for
+ /*
+ * Valid menu indices are reported by the GET_RES request for
* UVC controls that support it.
*/
if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
@@ -1779,7 +1805,8 @@ int uvc_ctrl_set(struct uvc_fh *handle,
break;
}
- /* If the mapping doesn't span the whole UVC control, the current value
+ /*
+ * If the mapping doesn't span the whole UVC control, the current value
* needs to be loaded from the device to perform the read-modify-write
* operation.
*/
@@ -2180,7 +2207,8 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
unsigned int size;
unsigned int i;
- /* Most mappings come from static kernel data and need to be duplicated.
+ /*
+ * Most mappings come from static kernel data and need to be duplicated.
* Mappings that come from userspace will be unnecessarily duplicated,
* this could be optimized.
*/
@@ -2385,11 +2413,11 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
{
const struct uvc_control_info *info = uvc_ctrls;
const struct uvc_control_info *iend = info + ARRAY_SIZE(uvc_ctrls);
- const struct uvc_control_mapping *mapping = uvc_ctrl_mappings;
- const struct uvc_control_mapping *mend =
- mapping + ARRAY_SIZE(uvc_ctrl_mappings);
+ const struct uvc_control_mapping *mapping;
+ const struct uvc_control_mapping *mend;
- /* XU controls initialization requires querying the device for control
+ /*
+ * XU controls initialization requires querying the device for control
* information. As some buggy UVC devices will crash when queried
* repeatedly in a tight loop, delay XU controls initialization until
* first use.
@@ -2415,6 +2443,48 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
if (!ctrl->initialized)
return;
+ /*
+ * First check if the device provides a custom mapping for this control,
+ * used to override standard mappings for non-conformant devices. Don't
+ * process standard mappings if a custom mapping is found. This
+ * mechanism doesn't support combining standard and custom mappings for
+ * a single control.
+ */
+ if (chain->dev->info->mappings) {
+ bool custom = false;
+ unsigned int i;
+
+ for (i = 0; (mapping = chain->dev->info->mappings[i]); ++i) {
+ if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
+ ctrl->info.selector == mapping->selector) {
+ __uvc_ctrl_add_mapping(chain, ctrl, mapping);
+ custom = true;
+ }
+ }
+
+ if (custom)
+ return;
+ }
+
+ /* Process common mappings next. */
+ mapping = uvc_ctrl_mappings;
+ mend = mapping + ARRAY_SIZE(uvc_ctrl_mappings);
+
+ for (; mapping < mend; ++mapping) {
+ if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
+ ctrl->info.selector == mapping->selector)
+ __uvc_ctrl_add_mapping(chain, ctrl, mapping);
+ }
+
+ /* Finally process version-specific mappings. */
+ if (chain->dev->uvc_version < 0x0150) {
+ mapping = uvc_ctrl_mappings_uvc11;
+ mend = mapping + ARRAY_SIZE(uvc_ctrl_mappings_uvc11);
+ } else {
+ mapping = uvc_ctrl_mappings_uvc15;
+ mend = mapping + ARRAY_SIZE(uvc_ctrl_mappings_uvc15);
+ }
+
for (; mapping < mend; ++mapping) {
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 6c86faecbea2..9c05776f11d1 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -329,7 +329,8 @@ static enum v4l2_ycbcr_encoding uvc_ycbcr_enc(const u8 matrix_coefficients)
return V4L2_YCBCR_ENC_DEFAULT; /* Reserved */
}
-/* Simplify a fraction using a simple continued fraction decomposition. The
+/*
+ * Simplify a fraction using a simple continued fraction decomposition. The
* idea here is to convert fractions such as 333333/10000000 to 1/30 using
* 32 bit arithmetic only. The algorithm is not perfect and relies upon two
* arbitrary parameters to remove non-significative terms from the simple
@@ -347,8 +348,9 @@ void uvc_simplify_fraction(u32 *numerator, u32 *denominator,
if (an == NULL)
return;
- /* Convert the fraction to a simple continued fraction. See
- * https://mathforum.org/dr.math/faq/faq.fractions.html
+ /*
+ * Convert the fraction to a simple continued fraction. See
+ * https://en.wikipedia.org/wiki/Continued_fraction
* Stop if the current term is bigger than or equal to the given
* threshold.
*/
@@ -383,7 +385,8 @@ void uvc_simplify_fraction(u32 *numerator, u32 *denominator,
kfree(an);
}
-/* Convert a fraction to a frame interval in 100ns multiples. The idea here is
+/*
+ * Convert a fraction to a frame interval in 100ns multiples. The idea here is
* to compute numerator / denominator * 10000000 using 32 bit fixed point
* arithmetic only.
*/
@@ -396,7 +399,8 @@ u32 uvc_fraction_to_interval(u32 numerator, u32 denominator)
numerator/denominator >= ((u32)-1)/10000000)
return (u32)-1;
- /* Divide both the denominator and the multiplier by two until
+ /*
+ * Divide both the denominator and the multiplier by two until
* numerator * multiplier doesn't overflow. If anyone knows a better
* algorithm please let me know.
*/
@@ -548,7 +552,8 @@ static int uvc_parse_format(struct uvc_device *dev,
format->bpp = buffer[21];
- /* Some devices report a format that doesn't match what they
+ /*
+ * Some devices report a format that doesn't match what they
* really send.
*/
if (dev->quirks & UVC_QUIRK_FORCE_Y8) {
@@ -663,7 +668,8 @@ static int uvc_parse_format(struct uvc_device *dev,
buflen -= buffer[0];
buffer += buffer[0];
- /* Parse the frame descriptors. Only uncompressed, MJPEG and frame
+ /*
+ * Parse the frame descriptors. Only uncompressed, MJPEG and frame
* based formats have frame descriptors.
*/
while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
@@ -705,7 +711,8 @@ static int uvc_parse_format(struct uvc_device *dev,
}
frame->dwFrameInterval = *intervals;
- /* Several UVC chipsets screw up dwMaxVideoFrameBufferSize
+ /*
+ * Several UVC chipsets screw up dwMaxVideoFrameBufferSize
* completely. Observed behaviours range from setting the
* value to 1.1x the actual frame size to hardwiring the
* 16 low bits to 0. This results in a higher than necessary
@@ -717,7 +724,8 @@ static int uvc_parse_format(struct uvc_device *dev,
frame->dwMaxVideoFrameBufferSize = format->bpp
* frame->wWidth * frame->wHeight / 8;
- /* Some bogus devices report dwMinFrameInterval equal to
+ /*
+ * Some bogus devices report dwMinFrameInterval equal to
* dwMaxFrameInterval and have dwFrameIntervalStep set to
* zero. Setting all null intervals to 1 fixes the problem and
* some other divisions by zero that could happen.
@@ -727,7 +735,8 @@ static int uvc_parse_format(struct uvc_device *dev,
*(*intervals)++ = interval ? interval : 1;
}
- /* Make sure that the default frame interval stays between
+ /*
+ * Make sure that the default frame interval stays between
* the boundaries.
*/
n -= frame->bFrameIntervalType ? 1 : 2;
@@ -819,7 +828,8 @@ static int uvc_parse_streaming(struct uvc_device *dev,
return -ENOMEM;
}
- /* The Pico iMage webcam has its class-specific interface descriptors
+ /*
+ * The Pico iMage webcam has its class-specific interface descriptors
* after the endpoint descriptors.
*/
if (buflen == 0) {
@@ -918,7 +928,8 @@ static int uvc_parse_streaming(struct uvc_device *dev,
break;
case UVC_VS_FORMAT_DV:
- /* DV format has no frame descriptor. We will create a
+ /*
+ * DV format has no frame descriptor. We will create a
* dummy frame descriptor with a dummy frame interval.
*/
nformats++;
@@ -1105,7 +1116,8 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
if (buffer[1] != 0x41 || buffer[2] != 0x01)
break;
- /* Logitech implements several vendor specific functions
+ /*
+ * Logitech implements several vendor specific functions
* through vendor specific extension units (LXU).
*
* The LXU descriptors are similar to XU descriptors
@@ -1303,7 +1315,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- /* Make sure the terminal type MSB is not null, otherwise it
+ /*
+ * Make sure the terminal type MSB is not null, otherwise it
* could be confused with a unit.
*/
type = get_unaligned_le16(&buffer[4]);
@@ -1437,7 +1450,8 @@ static int uvc_parse_control(struct uvc_device *dev)
int buflen = alts->extralen;
int ret;
- /* Parse the default alternate setting only, as the UVC specification
+ /*
+ * Parse the default alternate setting only, as the UVC specification
* defines a single alternate setting, the default alternate setting
* zero.
*/
@@ -1455,7 +1469,8 @@ next_descriptor:
buffer += buffer[0];
}
- /* Check if the optional status endpoint is present. Built-in iSight
+ /*
+ * Check if the optional status endpoint is present. Built-in iSight
* webcams have an interrupt endpoint but spit proprietary data that
* don't conform to the UVC status endpoint messages. Don't try to
* handle the interrupt endpoint for those cameras.
@@ -2057,7 +2072,8 @@ static int uvc_scan_device(struct uvc_device *dev)
if (!UVC_ENTITY_IS_OTERM(term))
continue;
- /* If the terminal is already included in a chain, skip it.
+ /*
+ * If the terminal is already included in a chain, skip it.
* This can happen for chains that have multiple output
* terminals, where all output terminals beside the first one
* will be inserted in the chain in forward scans.
@@ -2309,7 +2325,8 @@ static int uvc_register_terms(struct uvc_device *dev,
if (ret < 0)
return ret;
- /* Register a metadata node, but ignore a possible failure,
+ /*
+ * Register a metadata node, but ignore a possible failure,
* complete registration of video nodes anyway.
*/
uvc_meta_register(stream);
@@ -2507,7 +2524,8 @@ static void uvc_disconnect(struct usb_interface *intf)
{
struct uvc_device *dev = usb_get_intfdata(intf);
- /* Set the USB interface data to NULL. This can be done outside the
+ /*
+ * Set the USB interface data to NULL. This can be done outside the
* lock, as there's no other reader.
*/
usb_set_intfdata(intf, NULL);
@@ -2643,6 +2661,30 @@ MODULE_PARM_DESC(timeout, "Streaming control requests timeout");
* Driver initialization and cleanup
*/
+static const struct uvc_menu_info power_line_frequency_controls_limited[] = {
+ { 1, "50 Hz" },
+ { 2, "60 Hz" },
+};
+
+static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .entity = UVC_GUID_UVC_PROCESSING,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+ .size = 2,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_info = power_line_frequency_controls_limited,
+ .menu_count = ARRAY_SIZE(power_line_frequency_controls_limited),
+};
+
+static const struct uvc_device_info uvc_ctrl_power_line_limited = {
+ .mappings = (const struct uvc_control_mapping *[]) {
+ &uvc_ctrl_power_line_mapping_limited,
+ NULL, /* Sentinel */
+ },
+};
+
static const struct uvc_device_info uvc_quirk_probe_minmax = {
.quirks = UVC_QUIRK_PROBE_MINMAX,
};
@@ -2673,6 +2715,33 @@ static const struct uvc_device_info uvc_quirk_force_y8 = {
* though they are compliant.
*/
static const struct usb_device_id uvc_ids[] = {
+ /* Quanta USB2.0 HD UVC Webcam */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x0408,
+ .idProduct = 0x3090,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ /* Quanta USB2.0 HD UVC Webcam */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x0408,
+ .idProduct = 0x4030,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ /* Quanta USB2.0 HD UVC Webcam */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x0408,
+ .idProduct = 0x4034,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* LogiLink Wireless Webcam */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2784,6 +2853,33 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTRICT_FRAME_RATE) },
+ /* Chicony EasyCamera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x04f2,
+ .idProduct = 0xb5eb,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ /* Chicony EasyCamera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x04f2,
+ .idProduct = 0xb6ba,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ /* Chicony EasyCamera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x04f2,
+ .idProduct = 0xb746,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* Alcor Micro AU3820 (Future Boy PC USB Webcam) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -3168,6 +3264,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) },
+ /* Acer EasyCamera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x5986,
+ .idProduct = 0x1172,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* Intel RealSense D4M */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_isight.c b/drivers/media/usb/uvc/uvc_isight.c
index 2578d6ee4829..43cda5e760a3 100644
--- a/drivers/media/usb/uvc/uvc_isight.c
+++ b/drivers/media/usb/uvc/uvc_isight.c
@@ -14,7 +14,8 @@
#include "uvcvideo.h"
-/* Built-in iSight webcams implements most of UVC 1.0 except a
+/*
+ * Built-in iSight webcams implements most of UVC 1.0 except a
* different packet format. Instead of sending a header at the
* beginning of each isochronous transfer payload, the webcam sends a
* single header per image (on its own in a packet), followed by
@@ -65,7 +66,8 @@ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
buf->state = UVC_BUF_STATE_ACTIVE;
}
- /* Mark the buffer as done if we're at the beginning of a new frame.
+ /*
+ * Mark the buffer as done if we're at the beginning of a new frame.
*
* Empty buffers (bytesused == 0) don't trigger end of frame detection
* as it doesn't make sense to return an empty buffer.
@@ -75,7 +77,8 @@ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
return -EAGAIN;
}
- /* Copy the video data to the buffer. Skip header packets, as they
+ /*
+ * Copy the video data to the buffer. Skip header packets, as they
* contain no data.
*/
if (!is_header) {
@@ -109,7 +112,9 @@ void uvc_video_decode_isight(struct uvc_urb *uvc_urb, struct uvc_buffer *buf,
urb->iso_frame_desc[i].status);
}
- /* Decode the payload packet.
+ /*
+ * Decode the payload packet.
+ *
* uvc_video_decode is entered twice when a frame transition
* has been detected because the end of frame can only be
* reliably detected when the first packet of the new frame
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 21a907d32bb7..16fa17bbd15e 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -135,7 +135,8 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
kref_init(&buf->ref);
list_add_tail(&buf->queue, &queue->irqqueue);
} else {
- /* If the device is disconnected return the buffer to userspace
+ /*
+ * If the device is disconnected return the buffer to userspace
* directly. The next QBUF call will fail with -ENODEV.
*/
buf->state = UVC_BUF_STATE_ERROR;
@@ -412,7 +413,8 @@ void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
spin_lock_irqsave(&queue->irqlock, flags);
uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
- /* This must be protected by the irqlock spinlock to avoid race
+ /*
+ * This must be protected by the irqlock spinlock to avoid race
* conditions between uvc_buffer_queue and the disconnection event that
* could result in an interruptible wait in uvc_dequeue_buffer. Do not
* blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 753c8226db70..7518ffce22ed 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -202,8 +202,7 @@ static void uvc_status_complete(struct urb *urb)
case -ENOENT: /* usb_kill_urb() called. */
case -ECONNRESET: /* usb_unlink_urb() called. */
case -ESHUTDOWN: /* The endpoint is being disabled. */
- case -EPROTO: /* Device is disconnected (reported by some
- * host controller). */
+ case -EPROTO: /* Device is disconnected (reported by some host controllers). */
return;
default:
@@ -272,7 +271,8 @@ int uvc_status_init(struct uvc_device *dev)
pipe = usb_rcvintpipe(dev->udev, ep->desc.bEndpointAddress);
- /* For high-speed interrupt endpoints, the bInterval value is used as
+ /*
+ * For high-speed interrupt endpoints, the bInterval value is used as
* an exponent of two. Some developers forgot about it.
*/
interval = ep->desc.bInterval;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 648dcd579e81..4cc3fa6b8c98 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -63,7 +63,8 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
break;
case V4L2_CTRL_TYPE_MENU:
- /* Prevent excessive memory consumption, as well as integer
+ /*
+ * Prevent excessive memory consumption, as well as integer
* overflows.
*/
if (xmap->menu_count == 0 ||
@@ -177,7 +178,8 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
fcc[0], fcc[1], fcc[2], fcc[3],
fmt->fmt.pix.width, fmt->fmt.pix.height);
- /* Check if the hardware supports the requested format, use the default
+ /*
+ * Check if the hardware supports the requested format, use the default
* format otherwise.
*/
for (i = 0; i < stream->nformats; ++i) {
@@ -191,7 +193,8 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
fmt->fmt.pix.pixelformat = format->fcc;
}
- /* Find the closest image size. The distance between image sizes is
+ /*
+ * Find the closest image size. The distance between image sizes is
* the size in pixels of the non-overlapping regions between the
* requested size and the frame-specified size.
*/
@@ -233,7 +236,8 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
probe->bFormatIndex = format->index;
probe->bFrameIndex = frame->bFrameIndex;
probe->dwFrameInterval = uvc_try_frame_interval(frame, interval);
- /* Some webcams stall the probe control set request when the
+ /*
+ * Some webcams stall the probe control set request when the
* dwMaxVideoFrameSize field is set to zero. The UVC specification
* clearly states that the field is read-only from the host, so this
* is a webcam bug. Set dwMaxVideoFrameSize to the value reported by
@@ -254,9 +258,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
ret = uvc_probe_video(stream, probe);
mutex_unlock(&stream->mutex);
if (ret < 0)
- goto done;
+ return ret;
- /* After the probe, update fmt with the values returned from
+ /*
+ * After the probe, update fmt with the values returned from
* negotiation with the device. Some devices return invalid bFormatIndex
* and bFrameIndex values, in which case we can only assume they have
* accepted the requested format as-is.
@@ -300,7 +305,6 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
if (uvc_frame != NULL)
*uvc_frame = frame;
-done:
return ret;
}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 6d3dfa4e0bb2..170a008f4006 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -189,7 +189,8 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
ctrl->dwMaxVideoFrameSize =
frame->dwMaxVideoFrameBufferSize;
- /* The "TOSHIBA Web Camera - 5M" Chicony device (04f2:b50b) seems to
+ /*
+ * The "TOSHIBA Web Camera - 5M" Chicony device (04f2:b50b) seems to
* compute the bandwidth on 16 bits and erroneously sign-extend it to
* 32 bits, resulting in a huge bandwidth value. Detect and fix that
* condition by setting the 16 MSBs to 0 when they're all equal to 1.
@@ -207,7 +208,8 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
? ctrl->dwFrameInterval
: frame->dwFrameInterval[0];
- /* Compute a bandwidth estimation by multiplying the frame
+ /*
+ * Compute a bandwidth estimation by multiplying the frame
* size by the number of video frames per second, divide the
* result by the number of USB frames (or micro-frames for
* high-speed devices) per second and add the UVC header size
@@ -220,7 +222,8 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
bandwidth /= 8;
bandwidth += 12;
- /* The bandwidth estimate is too low for many cameras. Don't use
+ /*
+ * The bandwidth estimate is too low for many cameras. Don't use
* maximum packet sizes lower than 1024 bytes to try and work
* around the problem. According to measurements done on two
* different camera models, the value is high enough to get most
@@ -267,7 +270,8 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
size, uvc_timeout_param);
if ((query == UVC_GET_MIN || query == UVC_GET_MAX) && ret == 2) {
- /* Some cameras, mostly based on Bison Electronics chipsets,
+ /*
+ * Some cameras, mostly based on Bison Electronics chipsets,
* answer a GET_MIN or GET_MAX request with the wCompQuality
* field only.
*/
@@ -279,7 +283,8 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
ret = 0;
goto out;
} else if (query == UVC_GET_DEF && probe == 1 && ret != size) {
- /* Many cameras don't support the GET_DEF request on their
+ /*
+ * Many cameras don't support the GET_DEF request on their
* video probe control. Warn once and return, the caller will
* fall back to GET_CUR.
*/
@@ -322,7 +327,8 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
ctrl->bMaxVersion = 0;
}
- /* Some broken devices return null or wrong dwMaxVideoFrameSize and
+ /*
+ * Some broken devices return null or wrong dwMaxVideoFrameSize and
* dwMaxPayloadTransferSize fields. Try to get the value from the
* format and frame descriptors.
*/
@@ -386,7 +392,8 @@ int uvc_probe_video(struct uvc_streaming *stream,
unsigned int i;
int ret;
- /* Perform probing. The device should adjust the requested values
+ /*
+ * Perform probing. The device should adjust the requested values
* according to its capabilities. However, some devices, namely the
* first generation UVC Logitech webcams, don't implement the Video
* Probe control properly, and just return the needed bandwidth. For
@@ -493,7 +500,8 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
if (len < header_size)
return;
- /* Extract the timestamps:
+ /*
+ * Extract the timestamps:
*
* - store the frame PTS in the buffer structure
* - if the SCR field is present, retrieve the host SOF counter and
@@ -506,7 +514,8 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
if (!has_scr)
return;
- /* To limit the amount of data, drop SCRs with an SOF identical to the
+ /*
+ * To limit the amount of data, drop SCRs with an SOF identical to the
* previous one.
*/
dev_sof = get_unaligned_le16(&data[header_size - 2]);
@@ -518,7 +527,8 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
host_sof = usb_get_current_frame_number(stream->dev->udev);
time = uvc_video_get_time();
- /* The UVC specification allows device implementations that can't obtain
+ /*
+ * The UVC specification allows device implementations that can't obtain
* the USB frame number to keep their own frame counters as long as they
* match the size and frequency of the frame number associated with USB
* SOF tokens. The SOF values sent by such devices differ from the USB
@@ -756,7 +766,8 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
y1 = NSEC_PER_SEC;
y2 = (u32)ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1;
- /* Interpolated and host SOF timestamps can wrap around at slightly
+ /*
+ * Interpolated and host SOF timestamps can wrap around at slightly
* different times. Handle this by adding or removing 2048 to or from
* the computed SOF value to keep it close to the SOF samples mean
* value.
@@ -854,7 +865,8 @@ static void uvc_video_stats_decode(struct uvc_streaming *stream,
stream->stats.frame.pts = pts;
}
- /* Do all frames have a PTS in their first non-empty packet, or before
+ /*
+ * Do all frames have a PTS in their first non-empty packet, or before
* their first empty packet ?
*/
if (stream->stats.frame.size == 0) {
@@ -945,7 +957,8 @@ size_t uvc_video_stats_dump(struct uvc_streaming *stream, char *buf,
unsigned int duration;
size_t count = 0;
- /* Compute the SCR.SOF frequency estimate. At the nominal 1kHz SOF
+ /*
+ * Compute the SCR.SOF frequency estimate. At the nominal 1kHz SOF
* frequency this will not overflow before more than 1h.
*/
duration = ktime_ms_delta(stream->stats.stream.stop_ts,
@@ -997,7 +1010,8 @@ static void uvc_video_stats_stop(struct uvc_streaming *stream)
* Video codecs
*/
-/* Video payload decoding is handled by uvc_video_decode_start(),
+/*
+ * Video payload decoding is handled by uvc_video_decode_start(),
* uvc_video_decode_data() and uvc_video_decode_end().
*
* uvc_video_decode_start is called with URB data at the start of a bulk or
@@ -1037,7 +1051,8 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
{
u8 fid;
- /* Sanity checks:
+ /*
+ * Sanity checks:
* - packet must be at least 2 bytes long
* - bHeaderLength value must be at least 2 bytes (see above)
* - bHeaderLength value can't be larger than the packet size.
@@ -1049,7 +1064,8 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
fid = data[1] & UVC_STREAM_FID;
- /* Increase the sequence number regardless of any buffer states, so
+ /*
+ * Increase the sequence number regardless of any buffer states, so
* that discontinuous sequence numbers always indicate lost frames.
*/
if (stream->last_fid != fid) {
@@ -1061,7 +1077,8 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
uvc_video_clock_decode(stream, buf, data, len);
uvc_video_stats_decode(stream, data, len);
- /* Store the payload FID bit and return immediately when the buffer is
+ /*
+ * Store the payload FID bit and return immediately when the buffer is
* NULL.
*/
if (buf == NULL) {
@@ -1076,7 +1093,8 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
buf->error = 1;
}
- /* Synchronize to the input stream by waiting for the FID bit to be
+ /*
+ * Synchronize to the input stream by waiting for the FID bit to be
* toggled when the the buffer state is not UVC_BUF_STATE_ACTIVE.
* stream->last_fid is initialized to -1, so the first isochronous
* frame will always be in sync.
@@ -1102,7 +1120,8 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
buf->state = UVC_BUF_STATE_ACTIVE;
}
- /* Mark the buffer as done if we're at the beginning of a new frame.
+ /*
+ * Mark the buffer as done if we're at the beginning of a new frame.
* End of frame detection is better implemented by checking the EOF
* bit (FID bit toggling is delayed by one frame compared to the EOF
* bit), but some devices don't set the bit at end of frame (and the
@@ -1226,7 +1245,8 @@ static void uvc_video_decode_end(struct uvc_streaming *stream,
}
}
-/* Video payload encoding is handled by uvc_video_encode_header() and
+/*
+ * Video payload encoding is handled by uvc_video_encode_header() and
* uvc_video_encode_data(). Only bulk transfers are currently supported.
*
* uvc_video_encode_header is called at the start of a payload. It adds header
@@ -1450,7 +1470,8 @@ static void uvc_video_decode_bulk(struct uvc_urb *uvc_urb,
len = urb->actual_length;
stream->bulk.payload_size += len;
- /* If the URB is the first of its payload, decode and save the
+ /*
+ * If the URB is the first of its payload, decode and save the
* header.
*/
if (stream->bulk.header_size == 0 && !stream->bulk.skip_payload) {
@@ -1474,7 +1495,8 @@ static void uvc_video_decode_bulk(struct uvc_urb *uvc_urb,
}
}
- /* The buffer queue might have been cancelled while a bulk transfer
+ /*
+ * The buffer queue might have been cancelled while a bulk transfer
* was in progress, so we can reach here with buf equal to NULL. Make
* sure buf is never dereferenced if NULL.
*/
@@ -1483,7 +1505,8 @@ static void uvc_video_decode_bulk(struct uvc_urb *uvc_urb,
if (!stream->bulk.skip_payload && buf != NULL)
uvc_video_decode_data(uvc_urb, buf, mem, len);
- /* Detect the payload end by a URB smaller than the maximum size (or
+ /*
+ * Detect the payload end by a URB smaller than the maximum size (or
* a payload size equal to the maximum) and process the header again.
*/
if (urb->actual_length < urb->transfer_buffer_length ||
@@ -1686,7 +1709,8 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
if (stream->urb_size)
return stream->urb_size / psize;
- /* Compute the number of packets. Bulk endpoints might transfer UVC
+ /*
+ * Compute the number of packets. Bulk endpoints might transfer UVC
* payloads across multiple URBs.
*/
npackets = DIV_ROUND_UP(size, psize);
@@ -1975,7 +1999,8 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
}
}
- /* The Logitech C920 temporarily forgets that it should not be adjusting
+ /*
+ * The Logitech C920 temporarily forgets that it should not be adjusting
* Exposure Absolute during init so restore controls to stored values.
*/
if (stream->dev->quirks & UVC_QUIRK_RESTORE_CTRLS_ON_INIT)
@@ -2018,7 +2043,8 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset)
{
int ret;
- /* If the bus has been reset on resume, set the alternate setting to 0.
+ /*
+ * If the bus has been reset on resume, set the alternate setting to 0.
* This should be the default value, but some devices crash or otherwise
* misbehave if they don't receive a SET_INTERFACE request before any
* other video control request.
@@ -2071,14 +2097,16 @@ int uvc_video_init(struct uvc_streaming *stream)
atomic_set(&stream->active, 0);
- /* Alternate setting 0 should be the default, yet the XBox Live Vision
+ /*
+ * Alternate setting 0 should be the default, yet the XBox Live Vision
* Cam (and possibly other devices) crash or otherwise misbehave if
* they don't receive a SET_INTERFACE request before any other video
* control request.
*/
usb_set_interface(stream->dev->udev, stream->intfnum, 0);
- /* Set the streaming probe control with default streaming parameters
+ /*
+ * Set the streaming probe control with default streaming parameters
* retrieved from the device. Webcams that don't support GET_DEF
* requests on the probe control will just keep their current streaming
* parameters.
@@ -2086,7 +2114,8 @@ int uvc_video_init(struct uvc_streaming *stream)
if (uvc_get_video_ctrl(stream, probe, 1, UVC_GET_DEF) == 0)
uvc_set_video_ctrl(stream, probe, 1);
- /* Initialize the streaming parameters with the probe control current
+ /*
+ * Initialize the streaming parameters with the probe control current
* value. This makes sure SET_CUR requests on the streaming commit
* control will always use values retrieved from a successful GET_CUR
* request on the probe control, as required by the UVC specification.
@@ -2095,7 +2124,8 @@ int uvc_video_init(struct uvc_streaming *stream)
if (ret < 0)
return ret;
- /* Check if the default format descriptor exists. Use the first
+ /*
+ * Check if the default format descriptor exists. Use the first
* available format otherwise.
*/
for (i = stream->nformats; i > 0; --i) {
@@ -2110,7 +2140,8 @@ int uvc_video_init(struct uvc_streaming *stream)
return -EINVAL;
}
- /* Zero bFrameIndex might be correct. Stream-based formats (including
+ /*
+ * Zero bFrameIndex might be correct. Stream-based formats (including
* MPEG-2 TS and DV) do not support frames but have a dummy frame
* descriptor with bFrameIndex set to zero. If the default frame
* descriptor is not found, use the first available frame.
@@ -2187,7 +2218,8 @@ void uvc_video_stop_streaming(struct uvc_streaming *stream)
if (stream->intf->num_altsetting > 1) {
usb_set_interface(stream->dev->udev, stream->intfnum, 0);
} else {
- /* UVC doesn't specify how to inform a bulk-based device
+ /*
+ * UVC doesn't specify how to inform a bulk-based device
* when the video stream is stopped. Windows sends a
* CLEAR_FEATURE(HALT) request to the video streaming
* bulk endpoint, mimic the same behaviour.
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index c5b4febd2d94..24c911aeebce 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -225,7 +225,8 @@ struct gpio_desc;
struct sg_table;
struct uvc_device;
-/* TODO: Put the most frequently accessed fields at the beginning of
+/*
+ * TODO: Put the most frequently accessed fields at the beginning of
* structures to maximize cache efficiency.
*/
struct uvc_control_info {
@@ -270,8 +271,7 @@ struct uvc_control {
struct uvc_entity *entity;
struct uvc_control_info info;
- u8 index; /* Used to match the uvc_control entry with a
- uvc_control_info. */
+ u8 index; /* Used to match the uvc_control entry with a uvc_control_info. */
u8 dirty:1,
loaded:1,
modified:1,
@@ -289,7 +289,8 @@ struct uvc_format_desc {
u32 fcc;
};
-/* The term 'entity' refers to both UVC units and UVC terminals.
+/*
+ * The term 'entity' refers to both UVC units and UVC terminals.
*
* The type field is either the terminal type (wTerminalType in the terminal
* descriptor), or the unit type (bDescriptorSubtype in the unit descriptor).
@@ -308,8 +309,7 @@ struct uvc_format_desc {
struct uvc_entity {
struct list_head list; /* Entity as part of a UVC device. */
- struct list_head chain; /* Entity as part of a video device
- * chain. */
+ struct list_head chain; /* Entity as part of a video device chain. */
unsigned int flags;
/*
@@ -591,7 +591,8 @@ struct uvc_streaming {
struct uvc_format *cur_format;
struct uvc_frame *cur_frame;
- /* Protect access to ctrl, cur_format, cur_frame and hardware video
+ /*
+ * Protect access to ctrl, cur_format, cur_frame and hardware video
* probe control.
*/
struct mutex mutex;
@@ -667,6 +668,7 @@ struct uvc_device_info {
u32 quirks;
u32 meta_format;
u16 uvc_version;
+ const struct uvc_control_mapping **mappings;
};
struct uvc_device {
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 1be9a2cc947a..348559bc2468 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -9,7 +9,7 @@ config VIDEO_V4L2_I2C
default y
config VIDEO_V4L2_SUBDEV_API
- bool "V4L2 sub-device userspace API"
+ bool
depends on VIDEO_DEV && MEDIA_CONTROLLER
help
Enables the V4L2 sub-device pad-level userspace API used to configure
@@ -56,9 +56,11 @@ config V4L2_MEM2MEM_DEV
# Used by LED subsystem flash drivers
config V4L2_FLASH_LED_CLASS
tristate "V4L2 flash API for LED flash class devices"
- depends on VIDEO_DEV && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_DEV
depends on LEDS_CLASS_FLASH
+ select MEDIA_CONTROLLER
select V4L2_ASYNC
+ select VIDEO_V4L2_SUBDEV_API
help
Say Y here to enable V4L2 flash API support for LED flash
class drivers.
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index c6995718237a..2f1b718a9189 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -52,6 +52,15 @@ static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
return n->ops->complete(n);
}
+static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
+ struct v4l2_async_subdev *asd)
+{
+ if (!n->ops || !n->ops->destroy)
+ return;
+
+ n->ops->destroy(asd);
+}
+
static bool match_i2c(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
@@ -66,8 +75,10 @@ static bool match_i2c(struct v4l2_async_notifier *notifier,
#endif
}
-static bool match_fwnode(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
+static bool
+match_fwnode_one(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
+ struct v4l2_async_subdev *asd)
{
struct fwnode_handle *other_fwnode;
struct fwnode_handle *dev_fwnode;
@@ -80,15 +91,7 @@ static bool match_fwnode(struct v4l2_async_notifier *notifier,
* fwnode or a device fwnode. Start with the simple case of direct
* fwnode matching.
*/
- if (sd->fwnode == asd->match.fwnode)
- return true;
-
- /*
- * Check the same situation for any possible secondary assigned to the
- * subdev's fwnode
- */
- if (!IS_ERR_OR_NULL(sd->fwnode->secondary) &&
- sd->fwnode->secondary == asd->match.fwnode)
+ if (sd_fwnode == asd->match.fwnode)
return true;
/*
@@ -99,7 +102,7 @@ static bool match_fwnode(struct v4l2_async_notifier *notifier,
* ACPI. This won't make a difference, as drivers should not try to
* match unconnected endpoints.
*/
- sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd->fwnode);
+ sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd_fwnode);
asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
if (sd_fwnode_is_ep == asd_fwnode_is_ep)
@@ -110,11 +113,11 @@ static bool match_fwnode(struct v4l2_async_notifier *notifier,
* parent of the endpoint fwnode, and compare it with the other fwnode.
*/
if (sd_fwnode_is_ep) {
- dev_fwnode = fwnode_graph_get_port_parent(sd->fwnode);
+ dev_fwnode = fwnode_graph_get_port_parent(sd_fwnode);
other_fwnode = asd->match.fwnode;
} else {
dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
- other_fwnode = sd->fwnode;
+ other_fwnode = sd_fwnode;
}
fwnode_handle_put(dev_fwnode);
@@ -143,6 +146,19 @@ static bool match_fwnode(struct v4l2_async_notifier *notifier,
return true;
}
+static bool match_fwnode(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
+{
+ if (match_fwnode_one(notifier, sd, sd->fwnode, asd))
+ return true;
+
+ /* Also check the secondary fwnode. */
+ if (IS_ERR_OR_NULL(sd->fwnode->secondary))
+ return false;
+
+ return match_fwnode_one(notifier, sd, sd->fwnode->secondary, asd);
+}
+
static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
@@ -626,6 +642,7 @@ static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
}
list_del(&asd->asd_list);
+ v4l2_async_nf_call_destroy(notifier, asd);
kfree(asd);
}
}
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index df34b2a283bc..e0fbe6ba4b6c 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -266,6 +266,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
{ .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
@@ -277,6 +278,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
/* Tiled YUV formats */
{ .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 },
/* YUV planar formats, non contiguous variant */
{ .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index db9baa0bd05f..50d012ba3c02 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -97,29 +97,47 @@ static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
return ptr_to_user(c, ctrl, ctrl->p_new);
}
-/* Helper function: copy the caller-provider value to the given control value */
-static int user_to_ptr(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl,
- union v4l2_ctrl_ptr ptr)
+/* Helper function: copy the caller-provider value as the new control value */
+static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
int ret;
u32 size;
- ctrl->is_new = 1;
+ ctrl->is_new = 0;
+ if (ctrl->is_dyn_array &&
+ c->size > ctrl->p_dyn_alloc_elems * ctrl->elem_size) {
+ void *old = ctrl->p_dyn;
+ void *tmp = kvzalloc(2 * c->size, GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size);
+ memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
+ ctrl->p_new.p = tmp;
+ ctrl->p_cur.p = tmp + c->size;
+ ctrl->p_dyn = tmp;
+ ctrl->p_dyn_alloc_elems = c->size / ctrl->elem_size;
+ kvfree(old);
+ }
+
if (ctrl->is_ptr && !ctrl->is_string) {
+ unsigned int elems = c->size / ctrl->elem_size;
unsigned int idx;
- ret = copy_from_user(ptr.p, c->ptr, c->size) ? -EFAULT : 0;
- if (ret || !ctrl->is_array)
- return ret;
- for (idx = c->size / ctrl->elem_size; idx < ctrl->elems; idx++)
- ctrl->type_ops->init(ctrl, idx, ptr);
+ if (copy_from_user(ctrl->p_new.p, c->ptr, c->size))
+ return -EFAULT;
+ ctrl->is_new = 1;
+ if (ctrl->is_dyn_array)
+ ctrl->new_elems = elems;
+ else if (ctrl->is_array)
+ for (idx = elems; idx < ctrl->elems; idx++)
+ ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
return 0;
}
switch (ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER64:
- *ptr.p_s64 = c->value64;
+ *ctrl->p_new.p_s64 = c->value64;
break;
case V4L2_CTRL_TYPE_STRING:
size = c->size;
@@ -127,32 +145,27 @@ static int user_to_ptr(struct v4l2_ext_control *c,
return -ERANGE;
if (size > ctrl->maximum + 1)
size = ctrl->maximum + 1;
- ret = copy_from_user(ptr.p_char, c->string, size) ? -EFAULT : 0;
+ ret = copy_from_user(ctrl->p_new.p_char, c->string, size) ? -EFAULT : 0;
if (!ret) {
- char last = ptr.p_char[size - 1];
+ char last = ctrl->p_new.p_char[size - 1];
- ptr.p_char[size - 1] = 0;
+ ctrl->p_new.p_char[size - 1] = 0;
/*
* If the string was longer than ctrl->maximum,
* then return an error.
*/
- if (strlen(ptr.p_char) == ctrl->maximum && last)
+ if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
return -ERANGE;
}
return ret;
default:
- *ptr.p_s32 = c->value;
+ *ctrl->p_new.p_s32 = c->value;
break;
}
+ ctrl->is_new = 1;
return 0;
}
-/* Helper function: copy the caller-provider value as the new control value */
-static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
-{
- return user_to_ptr(c, ctrl, ctrl->p_new);
-}
-
/*
* VIDIOC_G/TRY/S_EXT_CTRLS implementation
*/
@@ -254,7 +267,31 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
have_clusters = true;
if (ctrl->cluster[0] != ctrl)
ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
- if (ctrl->is_ptr && !ctrl->is_string) {
+ if (ctrl->is_dyn_array) {
+ unsigned int max_size = ctrl->dims[0] * ctrl->elem_size;
+ unsigned int tot_size = ctrl->elem_size;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ tot_size *= ref->p_req_elems;
+ else
+ tot_size *= ctrl->elems;
+
+ c->size = ctrl->elem_size * (c->size / ctrl->elem_size);
+ if (get) {
+ if (c->size < tot_size) {
+ c->size = tot_size;
+ return -ENOSPC;
+ }
+ c->size = tot_size;
+ } else {
+ if (c->size > max_size) {
+ c->size = max_size;
+ return -ENOSPC;
+ }
+ if (!c->size)
+ return -EFAULT;
+ }
+ } else if (ctrl->is_ptr && !ctrl->is_string) {
unsigned int tot_size = ctrl->elems * ctrl->elem_size;
if (c->size < tot_size) {
@@ -346,7 +383,7 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
*
* Note that v4l2_g_ext_ctrls_common() with 'which' set to
* V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
- * completed, and in that case valid_p_req is true for all controls.
+ * completed, and in that case p_req_valid is true for all controls.
*/
int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
@@ -430,7 +467,9 @@ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
if (is_default)
ret = def_to_user(cs->controls + idx, ref->ctrl);
- else if (is_request && ref->valid_p_req)
+ else if (is_request && ref->p_req_dyn_enomem)
+ ret = -ENOMEM;
+ else if (is_request && ref->p_req_valid)
ret = req_to_user(cs->controls + idx, ref);
else if (is_volatile)
ret = new_to_user(cs->controls + idx, ref->ctrl);
@@ -457,6 +496,17 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
}
EXPORT_SYMBOL(v4l2_g_ext_ctrls);
+/* Validate a new control */
+static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
+{
+ unsigned int idx;
+ int err = 0;
+
+ for (idx = 0; !err && idx < ctrl->new_elems; idx++)
+ err = ctrl->type_ops->validate(ctrl, idx, p_new);
+ return err;
+}
+
/* Validate controls. */
static int validate_ctrls(struct v4l2_ext_controls *cs,
struct v4l2_ctrl_helper *helpers,
@@ -872,6 +922,9 @@ int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl,
/* It's a driver bug if this happens. */
if (WARN_ON(ctrl->type != type))
return -EINVAL;
+ /* Setting dynamic arrays is not (yet?) supported. */
+ if (WARN_ON(ctrl->is_dyn_array))
+ return -EINVAL;
memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size);
return set_ctrl(NULL, ctrl, 0);
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index 949c1884d9c1..1f85828d6694 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -307,6 +307,21 @@ static void std_log(const struct v4l2_ctrl *ctrl)
case V4L2_CTRL_TYPE_VP9_FRAME:
pr_cont("VP9_FRAME");
break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ pr_cont("HEVC_SPS");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ pr_cont("HEVC_PPS");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ pr_cont("HEVC_SLICE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ pr_cont("HEVC_SCALING_MATRIX");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
+ pr_cont("HEVC_DECODE_PARAMS");
+ break;
default:
pr_cont("unknown type %d", ctrl->type);
break;
@@ -521,7 +536,6 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
struct v4l2_ctrl_h264_decode_params *p_h264_dec_params;
struct v4l2_ctrl_hevc_sps *p_hevc_sps;
struct v4l2_ctrl_hevc_pps *p_hevc_pps;
- struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params;
struct v4l2_area *area;
@@ -799,8 +813,6 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
p_hevc_pps->pps_beta_offset_div2 = 0;
p_hevc_pps->pps_tc_offset_div2 = 0;
}
-
- zero_padding(*p_hevc_pps);
break;
case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
@@ -809,21 +821,9 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
if (p_hevc_decode_params->num_active_dpb_entries >
V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
return -EINVAL;
-
- for (i = 0; i < p_hevc_decode_params->num_active_dpb_entries;
- i++) {
- struct v4l2_hevc_dpb_entry *dpb_entry =
- &p_hevc_decode_params->dpb[i];
-
- zero_padding(*dpb_entry);
- }
break;
case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
- p_hevc_slice_params = p;
-
- zero_padding(p_hevc_slice_params->pred_weight_table);
- zero_padding(*p_hevc_slice_params);
break;
case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
@@ -991,11 +991,12 @@ EXPORT_SYMBOL(v4l2_ctrl_notify);
/* Copy the one value to another. */
static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
- union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to)
+ union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to,
+ unsigned int elems)
{
if (ctrl == NULL)
return;
- memcpy(to.p, from.p_const, ctrl->elems * ctrl->elem_size);
+ memcpy(to.p, from.p_const, elems * ctrl->elem_size);
}
/* Copy the new value to the current value. */
@@ -1008,8 +1009,11 @@ void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
/* has_changed is set by cluster_changed */
changed = ctrl->has_changed;
- if (changed)
- ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur);
+ if (changed) {
+ if (ctrl->is_dyn_array)
+ ctrl->elems = ctrl->new_elems;
+ ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur, ctrl->elems);
+ }
if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
/* Note: CH_FLAGS is only set for auto clusters. */
@@ -1039,36 +1043,122 @@ void cur_to_new(struct v4l2_ctrl *ctrl)
{
if (ctrl == NULL)
return;
- ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new);
+ if (ctrl->is_dyn_array)
+ ctrl->new_elems = ctrl->elems;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems);
+}
+
+static bool req_alloc_dyn_array(struct v4l2_ctrl_ref *ref, u32 elems)
+{
+ void *tmp;
+
+ if (elems < ref->p_req_dyn_alloc_elems)
+ return true;
+
+ tmp = kvmalloc(elems * ref->ctrl->elem_size, GFP_KERNEL);
+
+ if (!tmp) {
+ ref->p_req_dyn_enomem = true;
+ return false;
+ }
+ ref->p_req_dyn_enomem = false;
+ kvfree(ref->p_req.p);
+ ref->p_req.p = tmp;
+ ref->p_req_dyn_alloc_elems = elems;
+ return true;
}
/* Copy the new value to the request value */
void new_to_req(struct v4l2_ctrl_ref *ref)
{
+ struct v4l2_ctrl *ctrl;
+
if (!ref)
return;
- ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
- ref->valid_p_req = true;
+
+ ctrl = ref->ctrl;
+ if (ctrl->is_dyn_array && !req_alloc_dyn_array(ref, ctrl->new_elems))
+ return;
+
+ ref->p_req_elems = ctrl->new_elems;
+ ptr_to_ptr(ctrl, ctrl->p_new, ref->p_req, ref->p_req_elems);
+ ref->p_req_valid = true;
}
/* Copy the current value to the request value */
void cur_to_req(struct v4l2_ctrl_ref *ref)
{
+ struct v4l2_ctrl *ctrl;
+
if (!ref)
return;
- ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
- ref->valid_p_req = true;
+
+ ctrl = ref->ctrl;
+ if (ctrl->is_dyn_array && !req_alloc_dyn_array(ref, ctrl->elems))
+ return;
+
+ ref->p_req_elems = ctrl->elems;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req, ctrl->elems);
+ ref->p_req_valid = true;
}
/* Copy the request value to the new value */
-void req_to_new(struct v4l2_ctrl_ref *ref)
+int req_to_new(struct v4l2_ctrl_ref *ref)
{
+ struct v4l2_ctrl *ctrl;
+
if (!ref)
- return;
- if (ref->valid_p_req)
- ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
- else
- ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
+ return 0;
+
+ ctrl = ref->ctrl;
+
+ /*
+ * This control was never set in the request, so just use the current
+ * value.
+ */
+ if (!ref->p_req_valid) {
+ if (ctrl->is_dyn_array)
+ ctrl->new_elems = ctrl->elems;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems);
+ return 0;
+ }
+
+ /* Not a dynamic array, so just copy the request value */
+ if (!ctrl->is_dyn_array) {
+ ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems);
+ return 0;
+ }
+
+ /* Sanity check, should never happen */
+ if (WARN_ON(!ref->p_req_dyn_alloc_elems))
+ return -ENOMEM;
+
+ /*
+ * Check if the number of elements in the request is more than the
+ * elements in ctrl->p_dyn. If so, attempt to realloc ctrl->p_dyn.
+ * Note that p_dyn is allocated with twice the number of elements
+ * in the dynamic array since it has to store both the current and
+ * new value of such a control.
+ */
+ if (ref->p_req_elems > ctrl->p_dyn_alloc_elems) {
+ unsigned int sz = ref->p_req_elems * ctrl->elem_size;
+ void *old = ctrl->p_dyn;
+ void *tmp = kvzalloc(2 * sz, GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size);
+ memcpy(tmp + sz, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
+ ctrl->p_new.p = tmp;
+ ctrl->p_cur.p = tmp + sz;
+ ctrl->p_dyn = tmp;
+ ctrl->p_dyn_alloc_elems = ref->p_req_elems;
+ kvfree(old);
+ }
+
+ ctrl->new_elems = ref->p_req_elems;
+ ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems);
+ return 0;
}
/* Control range checking */
@@ -1110,17 +1200,6 @@ int check_range(enum v4l2_ctrl_type type,
}
}
-/* Validate a new control */
-int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
-{
- unsigned idx;
- int err = 0;
-
- for (idx = 0; !err && idx < ctrl->elems; idx++)
- err = ctrl->type_ops->validate(ctrl, idx, p_new);
- return err;
-}
-
/* Set the handler's error code if it wasn't set earlier already */
static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
{
@@ -1164,6 +1243,8 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
/* Free all nodes */
list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
list_del(&ref->node);
+ if (ref->p_req_dyn_alloc_elems)
+ kvfree(ref->p_req.p);
kfree(ref);
}
/* Free all controls owned by the handler */
@@ -1171,6 +1252,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
list_del(&ctrl->node);
list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
list_del(&sev->node);
+ kvfree(ctrl->p_dyn);
kvfree(ctrl);
}
kvfree(hdl->buckets);
@@ -1286,7 +1368,7 @@ int handler_new_ref(struct v4l2_ctrl_handler *hdl,
if (hdl->error)
return hdl->error;
- if (allocate_req)
+ if (allocate_req && !ctrl->is_dyn_array)
size_extra_req = ctrl->elems * ctrl->elem_size;
new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
if (!new_ref)
@@ -1460,7 +1542,6 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
elem_size = sizeof(s32);
break;
}
- tot_ctrl_size = elem_size * elems;
/* Sanity checks */
if (id == 0 || name == NULL || !elem_size ||
@@ -1481,17 +1562,33 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
handler_set_err(hdl, -EINVAL);
return NULL;
}
+ if (flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) {
+ /*
+ * For now only support this for one-dimensional arrays only.
+ *
+ * This can be relaxed in the future, but this will
+ * require more effort.
+ */
+ if (nr_of_dims != 1) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ /* Start with just 1 element */
+ elems = 1;
+ }
+ tot_ctrl_size = elem_size * elems;
sz_extra = 0;
if (type == V4L2_CTRL_TYPE_BUTTON)
flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
flags |= V4L2_CTRL_FLAG_READ_ONLY;
- else if (type == V4L2_CTRL_TYPE_INTEGER64 ||
- type == V4L2_CTRL_TYPE_STRING ||
- type >= V4L2_CTRL_COMPOUND_TYPES ||
- is_array)
+ else if (!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) &&
+ (type == V4L2_CTRL_TYPE_INTEGER64 ||
+ type == V4L2_CTRL_TYPE_STRING ||
+ type >= V4L2_CTRL_COMPOUND_TYPES ||
+ is_array))
sz_extra += 2 * tot_ctrl_size;
if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
@@ -1520,7 +1617,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string;
ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64;
ctrl->is_array = is_array;
+ ctrl->is_dyn_array = !!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY);
ctrl->elems = elems;
+ ctrl->new_elems = elems;
ctrl->nr_of_dims = nr_of_dims;
if (nr_of_dims)
memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0]));
@@ -1533,6 +1632,16 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->cur.val = ctrl->val = def;
data = &ctrl[1];
+ if (ctrl->is_dyn_array) {
+ ctrl->p_dyn_alloc_elems = elems;
+ ctrl->p_dyn = kvzalloc(2 * elems * elem_size, GFP_KERNEL);
+ if (!ctrl->p_dyn) {
+ kvfree(ctrl);
+ return NULL;
+ }
+ data = ctrl->p_dyn;
+ }
+
if (!ctrl->is_int) {
ctrl->p_new.p = data;
ctrl->p_cur.p = data + tot_ctrl_size;
@@ -1542,7 +1651,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
}
if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
- ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
+ if (ctrl->is_dyn_array)
+ ctrl->p_def.p = &ctrl[1];
+ else
+ ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
}
@@ -1552,6 +1664,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
}
if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
+ kvfree(ctrl->p_dyn);
kvfree(ctrl);
return NULL;
}
@@ -1889,6 +2002,9 @@ static int cluster_changed(struct v4l2_ctrl *master)
continue;
}
+ if (ctrl->elems != ctrl->new_elems)
+ ctrl_changed = true;
+
for (idx = 0; !ctrl_changed && idx < ctrl->elems; idx++)
ctrl_changed = !ctrl->type_ops->equal(ctrl, idx,
ctrl->p_cur, ctrl->p_new);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
index 16f42d2fd359..e22921e7ea61 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-defs.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -704,9 +704,9 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return hevc_tier;
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
return hevc_loop_filter_mode;
- case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
return hevc_decode_mode;
- case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
+ case V4L2_CID_STATELESS_HEVC_START_CODE:
return hevc_start_code;
case V4L2_CID_CAMERA_ORIENTATION:
return camera_orientation;
@@ -1003,13 +1003,6 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
- case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
- case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
- case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
- case V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix";
- case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters";
- case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
- case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1188,6 +1181,14 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices";
case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates";
case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters";
+ case V4L2_CID_STATELESS_HEVC_SPS: return "HEVC Sequence Parameter Set";
+ case V4L2_CID_STATELESS_HEVC_PPS: return "HEVC Picture Parameter Set";
+ case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix";
+ case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters";
+ case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode";
+ case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code";
+ case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets";
/* Colorimetry controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1363,8 +1364,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
- case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
- case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
+ case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
+ case V4L2_CID_STATELESS_HEVC_START_CODE:
case V4L2_CID_STATELESS_H264_DECODE_MODE:
case V4L2_CID_STATELESS_H264_START_CODE:
case V4L2_CID_CAMERA_ORIENTATION:
@@ -1502,21 +1503,26 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_STATELESS_VP8_FRAME:
*type = V4L2_CTRL_TYPE_VP8_FRAME;
break;
- case V4L2_CID_MPEG_VIDEO_HEVC_SPS:
+ case V4L2_CID_STATELESS_HEVC_SPS:
*type = V4L2_CTRL_TYPE_HEVC_SPS;
break;
- case V4L2_CID_MPEG_VIDEO_HEVC_PPS:
+ case V4L2_CID_STATELESS_HEVC_PPS:
*type = V4L2_CTRL_TYPE_HEVC_PPS;
break;
- case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
+ case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
+ *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
break;
- case V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX:
+ case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX:
*type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX;
break;
- case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS:
+ case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS;
break;
+ case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS:
+ *type = V4L2_CTRL_TYPE_U32;
+ *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
+ break;
case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR:
*type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR;
break;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-priv.h b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
index d4bf2c716f97..aba6176fab6c 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-priv.h
+++ b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
@@ -57,10 +57,9 @@ void cur_to_new(struct v4l2_ctrl *ctrl);
void cur_to_req(struct v4l2_ctrl_ref *ref);
void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags);
void new_to_req(struct v4l2_ctrl_ref *ref);
-void req_to_new(struct v4l2_ctrl_ref *ref);
+int req_to_new(struct v4l2_ctrl_ref *ref);
void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl);
void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes);
-int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new);
int handler_new_ref(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl *ctrl,
struct v4l2_ctrl_ref **ctrl_ref,
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-request.c b/drivers/media/v4l2-core/v4l2-ctrls-request.c
index 7d098f287fd9..c637049d7a2b 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-request.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-request.c
@@ -143,7 +143,7 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
{
struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
- return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
+ return (ref && ref->p_req_valid) ? ref->ctrl : NULL;
}
EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
@@ -373,7 +373,7 @@ void v4l2_ctrl_request_complete(struct media_request *req,
v4l2_ctrl_unlock(master);
continue;
}
- if (ref->valid_p_req)
+ if (ref->p_req_valid)
continue;
/* Copy the current control value into the request */
@@ -442,7 +442,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
struct v4l2_ctrl_ref *r =
find_ref(hdl, master->cluster[i]->id);
- if (r->valid_p_req) {
+ if (r->p_req_valid) {
have_new_data = true;
break;
}
@@ -458,7 +458,11 @@ int v4l2_ctrl_request_setup(struct media_request *req,
struct v4l2_ctrl_ref *r =
find_ref(hdl, master->cluster[i]->id);
- req_to_new(r);
+ ret = req_to_new(r);
+ if (ret) {
+ v4l2_ctrl_unlock(master);
+ goto error;
+ }
master->cluster[i]->is_new = 1;
r->req_done = true;
}
@@ -490,6 +494,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
break;
}
+error:
media_request_object_put(obj);
return ret;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 21470de62d72..c314025d977e 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1007,6 +1007,31 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
return -EINVAL;
}
+static void v4l_sanitize_colorspace(u32 pixelformat, u32 *colorspace,
+ u32 *encoding, u32 *quantization,
+ u32 *xfer_func)
+{
+ bool is_hsv = pixelformat == V4L2_PIX_FMT_HSV24 ||
+ pixelformat == V4L2_PIX_FMT_HSV32;
+
+ if (!v4l2_is_colorspace_valid(*colorspace)) {
+ *colorspace = V4L2_COLORSPACE_DEFAULT;
+ *encoding = V4L2_YCBCR_ENC_DEFAULT;
+ *quantization = V4L2_QUANTIZATION_DEFAULT;
+ *xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ }
+
+ if ((!is_hsv && !v4l2_is_ycbcr_enc_valid(*encoding)) ||
+ (is_hsv && !v4l2_is_hsv_enc_valid(*encoding)))
+ *encoding = V4L2_YCBCR_ENC_DEFAULT;
+
+ if (!v4l2_is_quant_valid(*quantization))
+ *quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ if (!v4l2_is_xfer_func_valid(*xfer_func))
+ *xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
static void v4l_sanitize_format(struct v4l2_format *fmt)
{
unsigned int offset;
@@ -1026,20 +1051,40 @@ static void v4l_sanitize_format(struct v4l2_format *fmt)
* field to the magic value when the extended pixel format structure
* isn't used by applications.
*/
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (fmt->fmt.pix.priv != V4L2_PIX_FMT_PRIV_MAGIC) {
+ fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+
+ offset = offsetof(struct v4l2_pix_format, priv)
+ + sizeof(fmt->fmt.pix.priv);
+ memset(((void *)&fmt->fmt.pix) + offset, 0,
+ sizeof(fmt->fmt.pix) - offset);
+ }
+ }
- if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- fmt->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
- return;
-
- if (fmt->fmt.pix.priv == V4L2_PIX_FMT_PRIV_MAGIC)
- return;
+ /* Replace invalid colorspace values with defaults. */
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l_sanitize_colorspace(fmt->fmt.pix.pixelformat,
+ &fmt->fmt.pix.colorspace,
+ &fmt->fmt.pix.ycbcr_enc,
+ &fmt->fmt.pix.quantization,
+ &fmt->fmt.pix.xfer_func);
+ } else if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ u32 ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc;
+ u32 quantization = fmt->fmt.pix_mp.quantization;
+ u32 xfer_func = fmt->fmt.pix_mp.xfer_func;
- fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ v4l_sanitize_colorspace(fmt->fmt.pix_mp.pixelformat,
+ &fmt->fmt.pix_mp.colorspace, &ycbcr_enc,
+ &quantization, &xfer_func);
- offset = offsetof(struct v4l2_pix_format, priv)
- + sizeof(fmt->fmt.pix.priv);
- memset(((void *)&fmt->fmt.pix) + offset, 0,
- sizeof(fmt->fmt.pix) - offset);
+ fmt->fmt.pix_mp.ycbcr_enc = ycbcr_enc;
+ fmt->fmt.pix_mp.quantization = quantization;
+ fmt->fmt.pix_mp.xfer_func = xfer_func;
+ }
}
static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
@@ -1296,6 +1341,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break;
case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break;
case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_YUVA32: descr = "32-bit YUVA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_YUVX32: descr = "32-bit YUVX 8-8-8-8"; break;
case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break;
case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break;
case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
@@ -1306,9 +1353,11 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_NV61: descr = "Y/CrCb 4:2:2"; break;
case V4L2_PIX_FMT_NV24: descr = "Y/CbCr 4:4:4"; break;
case V4L2_PIX_FMT_NV42: descr = "Y/CrCb 4:4:4"; break;
+ case V4L2_PIX_FMT_P010: descr = "10-bit Y/CbCr 4:2:0"; break;
case V4L2_PIX_FMT_NV12_4L4: descr = "Y/CbCr 4:2:0 (4x4 Linear)"; break;
case V4L2_PIX_FMT_NV12_16L16: descr = "Y/CbCr 4:2:0 (16x16 Linear)"; break;
case V4L2_PIX_FMT_NV12_32L32: descr = "Y/CbCr 4:2:0 (32x32 Linear)"; break;
+ case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/CbCr 4:2:0 (4x4 Linear)"; break;
case V4L2_PIX_FMT_NV12M: descr = "Y/CbCr 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV21M: descr = "Y/CrCb 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV16M: descr = "Y/CbCr 4:2:2 (N-C)"; break;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 6469f9a25a4e..837e1855f94b 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -925,7 +925,7 @@ static __poll_t v4l2_m2m_poll_for_data(struct file *file,
if ((!src_q->streaming || src_q->error ||
list_empty(&src_q->queued_list)) &&
(!dst_q->streaming || dst_q->error ||
- list_empty(&dst_q->queued_list)))
+ (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued)))
return EPOLLERR;
spin_lock_irqsave(&src_q->done_lock, flags);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 4c5154e0bf00..d7cb7ead2ac7 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -21,11 +21,13 @@
/* SMI COMMON */
#define SMI_L1LEN 0x100
+#define SMI_L1_ARB 0x200
#define SMI_BUS_SEL 0x220
#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
/* All are MMU0 defaultly. Only specialize mmu1 here. */
#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+#define SMI_READ_FIFO_TH 0x230
#define SMI_M4U_TH 0x234
#define SMI_FIFO_TH1 0x238
#define SMI_FIFO_TH2 0x23c
@@ -360,6 +362,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
{.compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701},
{.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712},
{.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779},
+ {.compatible = "mediatek,mt6795-smi-larb", .data = &mtk_smi_larb_mt8173},
{.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167},
{.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
{.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
@@ -544,6 +547,13 @@ static struct platform_driver mtk_smi_larb_driver = {
}
};
+static const struct mtk_smi_reg_pair mtk_smi_common_mt6795_init[SMI_COMMON_INIT_REGS_NR] = {
+ {SMI_L1_ARB, 0x1b},
+ {SMI_M4U_TH, 0xce810c85},
+ {SMI_FIFO_TH1, 0x43214c8},
+ {SMI_READ_FIFO_TH, 0x191f},
+};
+
static const struct mtk_smi_reg_pair mtk_smi_common_mt8195_init[SMI_COMMON_INIT_REGS_NR] = {
{SMI_L1LEN, 0xb},
{SMI_M4U_TH, 0xe100e10},
@@ -568,6 +578,12 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt6795 = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(0),
+ .init = mtk_smi_common_mt6795_init,
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
.type = MTK_SMI_GEN2,
.has_gals = true,
@@ -612,6 +628,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
{.compatible = "mediatek,mt2701-smi-common", .data = &mtk_smi_common_gen1},
{.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779},
+ {.compatible = "mediatek,mt6795-smi-common", .data = &mtk_smi_common_mt6795},
{.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 908f8d5392b2..85bc936c02f9 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1395,15 +1395,14 @@ err_msg:
static int tegra_emc_opp_table_init(struct tegra_emc *emc)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
- struct opp_table *hw_opp_table;
- int err;
+ int opp_token, err;
- hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
- err = PTR_ERR_OR_ZERO(hw_opp_table);
- if (err) {
+ err = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ if (err < 0) {
dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
return err;
}
+ opp_token = err;
err = dev_pm_opp_of_add_table(emc->dev);
if (err) {
@@ -1430,7 +1429,7 @@ static int tegra_emc_opp_table_init(struct tegra_emc *emc)
remove_table:
dev_pm_opp_of_remove_table(emc->dev);
put_hw_table:
- dev_pm_opp_put_supported_hw(hw_opp_table);
+ dev_pm_opp_put_supported_hw(opp_token);
return err;
}
diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
index e23ebd421f17..a9e8fd99730f 100644
--- a/drivers/memory/tegra/tegra234.c
+++ b/drivers/memory/tegra/tegra234.c
@@ -11,6 +11,76 @@
static const struct tegra_mc_client tegra234_mc_clients[] = {
{
+ .id = TEGRA234_MEMORY_CLIENT_MGBEARD,
+ .name = "mgbeard",
+ .sid = TEGRA234_SID_MGBE,
+ .regs = {
+ .sid = {
+ .override = 0x2c0,
+ .security = 0x2c4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEBRD,
+ .name = "mgbebrd",
+ .sid = TEGRA234_SID_MGBE_VF1,
+ .regs = {
+ .sid = {
+ .override = 0x2c8,
+ .security = 0x2cc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBECRD,
+ .name = "mgbecrd",
+ .sid = TEGRA234_SID_MGBE_VF2,
+ .regs = {
+ .sid = {
+ .override = 0x2d0,
+ .security = 0x2d4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEDRD,
+ .name = "mgbedrd",
+ .sid = TEGRA234_SID_MGBE_VF3,
+ .regs = {
+ .sid = {
+ .override = 0x2d8,
+ .security = 0x2dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEAWR,
+ .name = "mgbeawr",
+ .sid = TEGRA234_SID_MGBE,
+ .regs = {
+ .sid = {
+ .override = 0x2e0,
+ .security = 0x2e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEBWR,
+ .name = "mgbebwr",
+ .sid = TEGRA234_SID_MGBE_VF1,
+ .regs = {
+ .sid = {
+ .override = 0x2f8,
+ .security = 0x2fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBECWR,
+ .name = "mgbecwr",
+ .sid = TEGRA234_SID_MGBE_VF2,
+ .regs = {
+ .sid = {
+ .override = 0x308,
+ .security = 0x30c,
+ },
+ },
+ }, {
.id = TEGRA234_MEMORY_CLIENT_SDMMCRAB,
.name = "sdmmcrab",
.sid = TEGRA234_SID_SDMMC4,
@@ -21,6 +91,16 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
},
}, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEDWR,
+ .name = "mgbedwr",
+ .sid = TEGRA234_SID_MGBE_VF3,
+ .regs = {
+ .sid = {
+ .override = 0x328,
+ .security = 0x32c,
+ },
+ },
+ }, {
.id = TEGRA234_MEMORY_CLIENT_SDMMCWAB,
.name = "sdmmcwab",
.sid = TEGRA234_SID_SDMMC4,
diff --git a/drivers/memory/ti-emif-sram-pm.S b/drivers/memory/ti-emif-sram-pm.S
index d1c83bd5b98e..9bcac35c3304 100644
--- a/drivers/memory/ti-emif-sram-pm.S
+++ b/drivers/memory/ti-emif-sram-pm.S
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Low level PM code for TI EMIF
*
* Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
* Dave Gerlach
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/linkage.h>
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 3993bdd4b519..ba8414519515 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1341,17 +1341,17 @@ static int msb_ftl_initialize(struct msb_data *msb)
msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
msb->logical_block_count = msb->zone_count * 496 - 2;
- msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
- msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+ msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
+ msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
msb->lba_to_pba_table =
kmalloc_array(msb->logical_block_count, sizeof(u16),
GFP_KERNEL);
if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
!msb->erased_blocks_bitmap) {
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
- kfree(msb->erased_blocks_bitmap);
return -ENOMEM;
}
@@ -1946,7 +1946,8 @@ static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
static void msb_data_clear(struct msb_data *msb)
{
kfree(msb->boot_page);
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
kfree(msb->cache);
msb->card = NULL;
@@ -2129,7 +2130,7 @@ static int msb_init_disk(struct memstick_dev *card)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(msb->disk);
+ put_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
@@ -2187,7 +2188,6 @@ static void msb_remove(struct memstick_dev *card)
/* Remove the disk */
del_gendisk(msb->disk);
- blk_cleanup_queue(msb->queue);
blk_mq_free_tag_set(&msb->tag_set);
msb->queue = NULL;
@@ -2244,8 +2244,8 @@ static int msb_resume(struct memstick_dev *card)
goto out;
if (msb->block_count != new_msb->block_count ||
- memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
- msb->block_count / 8))
+ !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+ msb->block_count))
goto out;
card_dead = false;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 725ba74ded30..61cf75d4a01e 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1209,7 +1209,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(msb->disk);
+ put_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
@@ -1294,7 +1294,6 @@ static void mspro_block_remove(struct memstick_dev *card)
del_gendisk(msb->disk);
dev_dbg(&card->dev, "mspro block remove\n");
- blk_cleanup_queue(msb->queue);
blk_mq_free_tag_set(&msb->tag_set);
msb->queue = NULL;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 388675cc1765..62089a8caa2f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -101,7 +101,7 @@ static u8 mptspiInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for interna
* @target: per target private data
* @sdev: SCSI device
*
- * Update the target negotiation parameters based on the the Inquiry
+ * Update the target negotiation parameters based on the Inquiry
* data, adapter capabilities, and NVRAM settings.
**/
static void
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 3b59456f5545..abb58ab1a1a4 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -572,6 +572,7 @@ config LPC_ICH
tristate "Intel ICH LPC"
depends on PCI
select MFD_CORE
+ select P2SB if X86
help
The LPC bridge function of the Intel ICH provides support for
many functional units. This driver provides needed support for
@@ -1357,12 +1358,13 @@ config MFD_STA2X11
select REGMAP_MMIO
config MFD_SUN6I_PRCM
- bool "Allwinner A31 PRCM controller"
+ bool "Allwinner A31/A23/A33 PRCM controller"
depends on ARCH_SUNXI || COMPILE_TEST
select MFD_CORE
help
Support for the PRCM (Power/Reset/Clock Management) unit available
- in A31 SoC.
+ in the A31, A23, and A33 SoCs. Other Allwinner SoCs contain similar
+ hardware, but they do not use this driver.
config MFD_SYSCON
bool "System Controller Register R/W Based on Regmap"
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 56338f9dbd0b..4fb7e35eb5ed 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -596,12 +596,11 @@ static __init int asic3_gpio_probe(struct platform_device *pdev,
return gpiochip_add_data(&asic->gpio, asic);
}
-static int asic3_gpio_remove(struct platform_device *pdev)
+static void asic3_gpio_remove(struct platform_device *pdev)
{
struct asic3 *asic = platform_get_drvdata(pdev);
gpiochip_remove(&asic->gpio);
- return 0;
}
static void asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
@@ -1030,7 +1029,6 @@ static int __init asic3_probe(struct platform_device *pdev)
static int asic3_remove(struct platform_device *pdev)
{
- int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
@@ -1038,9 +1036,8 @@ static int asic3_remove(struct platform_device *pdev)
asic3_mfd_remove(pdev);
- ret = asic3_gpio_remove(pdev);
- if (ret < 0)
- return ret;
+ asic3_gpio_remove(pdev);
+
asic3_irq_remove(pdev);
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), 0);
diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c
index d96f1d689e7f..f3bad2b52f17 100644
--- a/drivers/mfd/atmel-smc.c
+++ b/drivers/mfd/atmel-smc.c
@@ -240,7 +240,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_cycle);
* @conf: the SMC CS conf to apply
*
* Applies an SMC CS configuration.
- * Only valid on at91sam9/avr32 SoCs.
+ * Only valid on at91sam9 SoCs.
*/
void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs,
const struct atmel_smc_cs_conf *conf)
@@ -281,7 +281,7 @@ EXPORT_SYMBOL_GPL(atmel_hsmc_cs_conf_apply);
* @conf: the SMC CS conf object to store the current conf
*
* Retrieve the SMC CS configuration.
- * Only valid on at91sam9/avr32 SoCs.
+ * Only valid on at91sam9 SoCs.
*/
void atmel_smc_cs_conf_get(struct regmap *regmap, int cs,
struct atmel_smc_cs_conf *conf)
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 8161a5dc68e8..88a212a8168c 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -619,6 +619,9 @@ static const struct mfd_cell axp20x_cells[] = {
static const struct mfd_cell axp221_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
@@ -645,6 +648,9 @@ static const struct mfd_cell axp221_cells[] = {
static const struct mfd_cell axp223_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
@@ -785,6 +791,9 @@ static const struct mfd_cell axp806_cells[] = {
static const struct mfd_cell axp809_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp809_pek_resources),
.resources = axp809_pek_resources,
diff --git a/drivers/mfd/bcm2835-pm.c b/drivers/mfd/bcm2835-pm.c
index 42fe67f1538e..49cd1f03884a 100644
--- a/drivers/mfd/bcm2835-pm.c
+++ b/drivers/mfd/bcm2835-pm.c
@@ -25,9 +25,52 @@ static const struct mfd_cell bcm2835_power_devs[] = {
{ .name = "bcm2835-power" },
};
+static int bcm2835_pm_get_pdata(struct platform_device *pdev,
+ struct bcm2835_pm *pm)
+{
+ if (of_find_property(pm->dev->of_node, "reg-names", NULL)) {
+ struct resource *res;
+
+ pm->base = devm_platform_ioremap_resource_byname(pdev, "pm");
+ if (IS_ERR(pm->base))
+ return PTR_ERR(pm->base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "asb");
+ if (res) {
+ pm->asb = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pm->asb))
+ pm->asb = NULL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "rpivid_asb");
+ if (res) {
+ pm->rpivid_asb = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pm->rpivid_asb))
+ pm->rpivid_asb = NULL;
+ }
+
+ return 0;
+ }
+
+ /* If no 'reg-names' property is found we can assume we're using old DTB. */
+ pm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pm->base))
+ return PTR_ERR(pm->base);
+
+ pm->asb = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pm->asb))
+ pm->asb = NULL;
+
+ pm->rpivid_asb = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(pm->rpivid_asb))
+ pm->rpivid_asb = NULL;
+
+ return 0;
+}
+
static int bcm2835_pm_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct bcm2835_pm *pm;
int ret;
@@ -39,10 +82,9 @@ static int bcm2835_pm_probe(struct platform_device *pdev)
pm->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pm->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pm->base))
- return PTR_ERR(pm->base);
+ ret = bcm2835_pm_get_pdata(pdev, pm);
+ if (ret)
+ return ret;
ret = devm_mfd_add_devices(dev, -1,
bcm2835_pm_devs, ARRAY_SIZE(bcm2835_pm_devs),
@@ -50,30 +92,22 @@ static int bcm2835_pm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* We'll use the presence of the AXI ASB regs in the
+ /*
+ * We'll use the presence of the AXI ASB regs in the
* bcm2835-pm binding as the key for whether we can reference
* the full PM register range and support power domains.
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- pm->asb = devm_ioremap_resource(dev, res);
- if (IS_ERR(pm->asb))
- return PTR_ERR(pm->asb);
-
- ret = devm_mfd_add_devices(dev, -1,
- bcm2835_power_devs,
- ARRAY_SIZE(bcm2835_power_devs),
- NULL, 0, NULL);
- if (ret)
- return ret;
- }
-
+ if (pm->asb)
+ return devm_mfd_add_devices(dev, -1, bcm2835_power_devs,
+ ARRAY_SIZE(bcm2835_power_devs),
+ NULL, 0, NULL);
return 0;
}
static const struct of_device_id bcm2835_pm_of_match[] = {
{ .compatible = "brcm,bcm2835-pm-wdt", },
{ .compatible = "brcm,bcm2835-pm", },
+ { .compatible = "brcm,bcm2711-pm", },
{},
};
MODULE_DEVICE_TABLE(of, bcm2835_pm_of_match);
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 596731caf407..344ad03bdc42 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -65,6 +65,11 @@ static const struct cros_feature_to_name cros_mcu_devices[] = {
.desc = "System Control Processor",
},
{
+ .id = EC_FEATURE_SCP_C1,
+ .name = CROS_EC_DEV_SCP_C1_NAME,
+ .desc = "System Control Processor 2nd Core",
+ },
+ {
.id = EC_FEATURE_TOUCHPAD,
.name = CROS_EC_DEV_TP_NAME,
.desc = "Touchpad",
@@ -250,8 +255,8 @@ static int ec_device_probe(struct platform_device *pdev)
* The PCHG device cannot be detected by sending EC_FEATURE_GET_CMD, but
* it can be detected by querying the number of peripheral chargers.
*/
- retval = cros_ec_command(ec->ec_dev, 0, EC_CMD_PCHG_COUNT, NULL, 0,
- &pchg_count, sizeof(pchg_count));
+ retval = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_PCHG_COUNT, NULL, 0,
+ &pchg_count, sizeof(pchg_count));
if (retval >= 0 && pchg_count.port_count) {
retval = mfd_add_hotplug_devices(ec->dev,
cros_ec_pchg_cells,
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 56c61c99eb23..27a881da4d6e 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -798,7 +798,7 @@ void db8500_prcmu_get_abb_event_buffer(void __iomem **buf)
* @opp: The new ARM operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
- * This function sets the the operating point of the ARM.
+ * This function sets the operating point of the ARM.
*/
int db8500_prcmu_set_arm_opp(u8 opp)
{
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 852129ea0766..6cd0b0c752d6 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -91,11 +91,6 @@ struct dln2_mod_rx_slots {
spinlock_t lock;
};
-enum dln2_endpoint {
- DLN2_EP_OUT = 0,
- DLN2_EP_IN = 1,
-};
-
struct dln2_dev {
struct usb_device *usb_dev;
struct usb_interface *interface;
@@ -777,16 +772,12 @@ static int dln2_probe(struct usb_interface *interface,
int ret;
int i, j;
- if (hostif->desc.bInterfaceNumber != 0 ||
- hostif->desc.bNumEndpoints < 2)
+ if (hostif->desc.bInterfaceNumber != 0)
return -ENODEV;
- epout = &hostif->endpoint[DLN2_EP_OUT].desc;
- if (!usb_endpoint_is_bulk_out(epout))
- return -ENODEV;
- epin = &hostif->endpoint[DLN2_EP_IN].desc;
- if (!usb_endpoint_is_bulk_in(epin))
- return -ENODEV;
+ ret = usb_find_common_endpoints(hostif, &epin, &epout, NULL, NULL);
+ if (ret)
+ return ret;
dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
if (!dln2)
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index f7950d2197df..bb08b7a73fe1 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -385,6 +385,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x7afc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afe), (kernel_ulong_t)&bxt_uart_info },
+ /* MTL-P */
+ { PCI_VDEVICE(INTEL, 0x7e25), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e26), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e50), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e51), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e52), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e78), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e79), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e7a), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e7b), (kernel_ulong_t)&bxt_i2c_info },
/* LKF */
{ PCI_VDEVICE(INTEL, 0x98a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x98a9), (kernel_ulong_t)&bxt_uart_info },
diff --git a/drivers/mfd/intel-m10-bmc.c b/drivers/mfd/intel-m10-bmc.c
index 8db3bcf5fccc..f4d0d72573c8 100644
--- a/drivers/mfd/intel-m10-bmc.c
+++ b/drivers/mfd/intel-m10-bmc.c
@@ -26,7 +26,7 @@ static struct mfd_cell m10bmc_d5005_subdevs[] = {
static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
{ .name = "n3000bmc-hwmon" },
{ .name = "n3000bmc-retimer" },
- { .name = "n3000bmc-secure" },
+ { .name = "n3000bmc-sec-update" },
};
static struct mfd_cell m10bmc_n5010_subdevs[] = {
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index bc069c4daa60..8dac0d41f64f 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -2,10 +2,11 @@
/*
* MFD core driver for Intel Broxton Whiskey Cove PMIC
*
- * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015-2017, 2022 Intel Corporation. All rights reserved.
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -18,9 +19,9 @@
#include <asm/intel_scu_ipc.h>
/* PMIC device registers */
-#define REG_ADDR_MASK 0xFF00
+#define REG_ADDR_MASK GENMASK(15, 8)
#define REG_ADDR_SHIFT 8
-#define REG_OFFSET_MASK 0xFF
+#define REG_OFFSET_MASK GENMASK(7, 0)
/* Interrupt Status Registers */
#define BXTWC_IRQLVL1 0x4E02
@@ -112,29 +113,29 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
};
static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = {
- REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01),
+ REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, BIT(0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
- REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 0, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 0, GENMASK(4, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
- REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 0, 0xff),
+ REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 0, GENMASK(7, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20),
- REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
- REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, GENMASK(4, 0)),
+ REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, GENMASK(4, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_tmu[] = {
- REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, 0x06),
+ REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, GENMASK(2, 1)),
};
static const struct regmap_irq bxtwc_regmap_irqs_crit[] = {
- REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, 0x03),
+ REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, GENMASK(1, 0)),
};
static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
@@ -333,17 +334,19 @@ static unsigned long bxtwc_reg_addr;
static ssize_t addr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%lx\n", bxtwc_reg_addr);
+ return sysfs_emit(buf, "0x%lx\n", bxtwc_reg_addr);
}
static ssize_t addr_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- if (kstrtoul(buf, 0, &bxtwc_reg_addr)) {
- dev_err(dev, "Invalid register address\n");
- return -EINVAL;
- }
- return (ssize_t)count;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &bxtwc_reg_addr);
+ if (ret)
+ return ret;
+
+ return count;
}
static ssize_t val_show(struct device *dev,
@@ -354,12 +357,12 @@ static ssize_t val_show(struct device *dev,
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
ret = regmap_read(pmic->regmap, bxtwc_reg_addr, &val);
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "Failed to read 0x%lx\n", bxtwc_reg_addr);
- return -EIO;
+ return ret;
}
- return sprintf(buf, "0x%02x\n", val);
+ return sysfs_emit(buf, "0x%02x\n", val);
}
static ssize_t val_store(struct device *dev,
@@ -377,7 +380,7 @@ static ssize_t val_store(struct device *dev,
if (ret) {
dev_err(dev, "Failed to write value 0x%02x to address 0x%lx",
val, bxtwc_reg_addr);
- return -EIO;
+ return ret;
}
return count;
}
@@ -394,6 +397,11 @@ static const struct attribute_group bxtwc_group = {
.attrs = bxtwc_attrs,
};
+static const struct attribute_group *bxtwc_groups[] = {
+ &bxtwc_group,
+ NULL
+};
+
static const struct regmap_config bxtwc_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
@@ -410,12 +418,9 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
int irq;
irq = regmap_irq_get_virq(pdata, pirq);
- if (irq < 0) {
- dev_err(pmic->dev,
- "Failed to get parent vIRQ(%d) for chip %s, ret:%d\n",
- pirq, chip->name, irq);
- return irq;
- }
+ if (irq < 0)
+ return dev_err_probe(pmic->dev, irq, "Failed to get parent vIRQ(%d) for chip %s\n",
+ pirq, chip->name);
return devm_regmap_add_irq_chip(pmic->dev, pmic->regmap, irq, irq_flags,
0, chip, data);
@@ -423,25 +428,19 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
static int bxtwc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
- acpi_handle handle;
acpi_status status;
unsigned long long hrv;
struct intel_soc_pmic *pmic;
- handle = ACPI_HANDLE(&pdev->dev);
- status = acpi_evaluate_integer(handle, "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status)) {
- dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
- return -ENODEV;
- }
- if (hrv != BROXTON_PMIC_WC_HRV) {
- dev_err(&pdev->dev, "Invalid PMIC hardware revision: %llu\n",
- hrv);
- return -ENODEV;
- }
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
+ if (ACPI_FAILURE(status))
+ return dev_err_probe(dev, -ENODEV, "Failed to get PMIC hardware revision\n");
+ if (hrv != BROXTON_PMIC_WC_HRV)
+ return dev_err_probe(dev, -ENODEV, "Invalid PMIC hardware revision: %llu\n", hrv);
- pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
@@ -450,49 +449,39 @@ static int bxtwc_probe(struct platform_device *pdev)
return ret;
pmic->irq = ret;
- dev_set_drvdata(&pdev->dev, pmic);
- pmic->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pmic);
+ pmic->dev = dev;
- pmic->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ pmic->scu = devm_intel_scu_ipc_dev_get(dev);
if (!pmic->scu)
return -EPROBE_DEFER;
- pmic->regmap = devm_regmap_init(&pdev->dev, NULL, pmic,
- &bxtwc_regmap_config);
- if (IS_ERR(pmic->regmap)) {
- ret = PTR_ERR(pmic->regmap);
- dev_err(&pdev->dev, "Failed to initialise regmap: %d\n", ret);
- return ret;
- }
+ pmic->regmap = devm_regmap_init(dev, NULL, pmic, &bxtwc_regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return dev_err_probe(dev, PTR_ERR(pmic->regmap), "Failed to initialise regmap\n");
- ret = devm_regmap_add_irq_chip(&pdev->dev, pmic->regmap, pmic->irq,
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
IRQF_ONESHOT | IRQF_SHARED,
0, &bxtwc_regmap_irq_chip,
&pmic->irq_chip_data);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_PWRBTN_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_pwrbtn,
&pmic->irq_chip_data_pwrbtn);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWRBTN IRQ chip\n");
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_TMU_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_tmu,
&pmic->irq_chip_data_tmu);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add TMU IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add TMU IRQ chip\n");
/* Add chained IRQ handler for BCU IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -500,12 +489,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_bcu,
&pmic->irq_chip_data_bcu);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add BUC IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add BUC IRQ chip\n");
/* Add chained IRQ handler for ADC IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -513,12 +498,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_adc,
&pmic->irq_chip_data_adc);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add ADC IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add ADC IRQ chip\n");
/* Add chained IRQ handler for CHGR IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -526,12 +507,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_chgr,
&pmic->irq_chip_data_chgr);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add CHGR IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add CHGR IRQ chip\n");
/* Add chained IRQ handler for CRIT IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -539,54 +516,33 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_crit,
&pmic->irq_chip_data_crit);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add CRIT IRQ chip\n");
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add CRIT IRQ chip\n");
- return ret;
- }
-
- ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev,
- ARRAY_SIZE(bxt_wc_dev), NULL, 0, NULL);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add devices\n");
- return ret;
- }
-
- ret = sysfs_create_group(&pdev->dev.kobj, &bxtwc_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group %d\n", ret);
- return ret;
- }
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, bxt_wc_dev, ARRAY_SIZE(bxt_wc_dev),
+ NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add devices\n");
/*
- * There is known hw bug. Upon reset BIT 5 of register
+ * There is a known H/W bug. Upon reset, BIT 5 of register
* BXTWC_CHGR_LVL1_IRQ is 0 which is the expected value. However,
* later it's set to 1(masked) automatically by hardware. So we
- * have the software workaround here to unmaksed it in order to let
- * charger interrutp work.
+ * place the software workaround here to unmask it again in order
+ * to re-enable the charger interrupt.
*/
- regmap_update_bits(pmic->regmap, BXTWC_MIRQLVL1,
- BXTWC_MIRQLVL1_MCHGR, 0);
-
- return 0;
-}
-
-static int bxtwc_remove(struct platform_device *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &bxtwc_group);
+ regmap_update_bits(pmic->regmap, BXTWC_MIRQLVL1, BXTWC_MIRQLVL1_MCHGR, 0);
return 0;
}
static void bxtwc_shutdown(struct platform_device *pdev)
{
- struct intel_soc_pmic *pmic = dev_get_drvdata(&pdev->dev);
+ struct intel_soc_pmic *pmic = platform_get_drvdata(pdev);
disable_irq(pmic->irq);
}
-#ifdef CONFIG_PM_SLEEP
static int bxtwc_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -603,8 +559,8 @@ static int bxtwc_resume(struct device *dev)
enable_irq(pmic->irq);
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(bxtwc_pm_ops, bxtwc_suspend, bxtwc_resume);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bxtwc_pm_ops, bxtwc_suspend, bxtwc_resume);
static const struct acpi_device_id bxtwc_acpi_ids[] = {
{ "INT34D3", },
@@ -614,16 +570,16 @@ MODULE_DEVICE_TABLE(acpi, bxtwc_acpi_ids);
static struct platform_driver bxtwc_driver = {
.probe = bxtwc_probe,
- .remove = bxtwc_remove,
.shutdown = bxtwc_shutdown,
.driver = {
.name = "BXTWC PMIC",
- .pm = &bxtwc_pm_ops,
- .acpi_match_table = ACPI_PTR(bxtwc_acpi_ids),
+ .pm = pm_sleep_ptr(&bxtwc_pm_ops),
+ .acpi_match_table = bxtwc_acpi_ids,
+ .dev_groups = bxtwc_groups,
},
};
module_platform_driver(bxtwc_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Qipeng Zha<qipeng.zha@intel.com>");
+MODULE_AUTHOR("Qipeng Zha <qipeng.zha@intel.com>");
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 4eab191e053a..9216f0d34206 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -179,18 +179,13 @@ static int cht_wc_probe(struct i2c_client *client)
int ret;
status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "Failed to get PMIC hardware revision\n");
- return -ENODEV;
- }
- if (hrv != CHT_WC_HRV) {
- dev_err(dev, "Invalid PMIC hardware revision: %llu\n", hrv);
- return -ENODEV;
- }
- if (client->irq < 0) {
- dev_err(dev, "Invalid IRQ\n");
- return -EINVAL;
- }
+ if (ACPI_FAILURE(status))
+ return dev_err_probe(dev, -ENODEV, "Failed to get PMIC hardware revision\n");
+ if (hrv != CHT_WC_HRV)
+ return dev_err_probe(dev, -ENODEV, "Invalid PMIC hardware revision: %llu\n", hrv);
+
+ if (client->irq < 0)
+ return dev_err_probe(dev, -EINVAL, "Invalid IRQ\n");
pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
@@ -227,7 +222,7 @@ static void cht_wc_shutdown(struct i2c_client *client)
disable_irq(pmic->irq);
}
-static int __maybe_unused cht_wc_suspend(struct device *dev)
+static int cht_wc_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -236,7 +231,7 @@ static int __maybe_unused cht_wc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused cht_wc_resume(struct device *dev)
+static int cht_wc_resume(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -244,7 +239,7 @@ static int __maybe_unused cht_wc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(cht_wc_pm_ops, cht_wc_suspend, cht_wc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(cht_wc_pm_ops, cht_wc_suspend, cht_wc_resume);
static const struct i2c_device_id cht_wc_i2c_id[] = {
{ }
@@ -258,7 +253,7 @@ static const struct acpi_device_id cht_wc_acpi_ids[] = {
static struct i2c_driver cht_wc_driver = {
.driver = {
.name = "CHT Whiskey Cove PMIC",
- .pm = &cht_wc_pm_ops,
+ .pm = pm_sleep_ptr(&cht_wc_pm_ops),
.acpi_match_table = cht_wc_acpi_ids,
},
.probe_new = cht_wc_probe,
diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c
index 858c9e0a49a4..b6166dec492d 100644
--- a/drivers/mfd/lp873x.c
+++ b/drivers/mfd/lp873x.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*
* Author: Keerthy <j-keerthy@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/interrupt.h>
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 9ffab9aafd81..650951f89f1c 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -8,7 +8,8 @@
* Configuration Registers.
*
* This driver is derived from lpc_sch.
-
+ *
+ * Copyright (c) 2017, 2021-2022 Intel Corporation
* Copyright (c) 2011 Extreme Engineering Solution, Inc.
* Author: Aaron Sierra <asierra@xes-inc.com>
*
@@ -42,9 +43,11 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/pinctrl/pinctrl.h>
#include <linux/mfd/core.h>
#include <linux/mfd/lpc_ich.h>
#include <linux/platform_data/itco_wdt.h>
+#include <linux/platform_data/x86/p2sb.h>
#define ACPIBASE 0x40
#define ACPIBASE_GPE_OFF 0x28
@@ -71,8 +74,6 @@
#define BCR 0xdc
#define BCR_WPD BIT(0)
-#define SPIBASE_APL_SZ 4096
-
#define GPIOBASE_ICH0 0x58
#define GPIOCTRL_ICH0 0x5C
#define GPIOBASE_ICH6 0x48
@@ -143,6 +144,73 @@ static struct mfd_cell lpc_ich_gpio_cell = {
.ignore_resource_conflicts = true,
};
+#define APL_GPIO_NORTH 0
+#define APL_GPIO_NORTHWEST 1
+#define APL_GPIO_WEST 2
+#define APL_GPIO_SOUTHWEST 3
+#define APL_GPIO_NR_DEVICES 4
+
+/* Offset data for Apollo Lake GPIO controllers */
+static resource_size_t apl_gpio_offsets[APL_GPIO_NR_DEVICES] = {
+ [APL_GPIO_NORTH] = 0xc50000,
+ [APL_GPIO_NORTHWEST] = 0xc40000,
+ [APL_GPIO_WEST] = 0xc70000,
+ [APL_GPIO_SOUTHWEST] = 0xc00000,
+};
+
+#define APL_GPIO_RESOURCE_SIZE 0x1000
+
+#define APL_GPIO_IRQ 14
+
+static struct resource apl_gpio_resources[APL_GPIO_NR_DEVICES][2] = {
+ [APL_GPIO_NORTH] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+ [APL_GPIO_NORTHWEST] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+ [APL_GPIO_WEST] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+ [APL_GPIO_SOUTHWEST] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+};
+
+static const struct mfd_cell apl_gpio_devices[APL_GPIO_NR_DEVICES] = {
+ [APL_GPIO_NORTH] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_NORTH,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_NORTH]),
+ .resources = apl_gpio_resources[APL_GPIO_NORTH],
+ .ignore_resource_conflicts = true,
+ },
+ [APL_GPIO_NORTHWEST] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_NORTHWEST,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_NORTHWEST]),
+ .resources = apl_gpio_resources[APL_GPIO_NORTHWEST],
+ .ignore_resource_conflicts = true,
+ },
+ [APL_GPIO_WEST] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_WEST,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_WEST]),
+ .resources = apl_gpio_resources[APL_GPIO_WEST],
+ .ignore_resource_conflicts = true,
+ },
+ [APL_GPIO_SOUTHWEST] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_SOUTHWEST,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_SOUTHWEST]),
+ .resources = apl_gpio_resources[APL_GPIO_SOUTHWEST],
+ .ignore_resource_conflicts = true,
+ },
+};
static struct mfd_cell lpc_ich_spi_cell = {
.name = "intel-spi",
@@ -1086,6 +1154,34 @@ wdt_done:
return ret;
}
+static int lpc_ich_init_pinctrl(struct pci_dev *dev)
+{
+ struct resource base;
+ unsigned int i;
+ int ret;
+
+ /* Check, if GPIO has been exported as an ACPI device */
+ if (acpi_dev_present("INT3452", NULL, -1))
+ return -EEXIST;
+
+ ret = p2sb_bar(dev->bus, 0, &base);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(apl_gpio_devices); i++) {
+ struct resource *mem = &apl_gpio_resources[i][0];
+ resource_size_t offset = apl_gpio_offsets[i];
+
+ /* Fill MEM resource */
+ mem->start = base.start + offset;
+ mem->end = base.start + offset + APL_GPIO_RESOURCE_SIZE - 1;
+ mem->flags = base.flags;
+ }
+
+ return mfd_add_devices(&dev->dev, 0, apl_gpio_devices,
+ ARRAY_SIZE(apl_gpio_devices), NULL, 0, NULL);
+}
+
static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
{
u32 val;
@@ -1100,35 +1196,32 @@ static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
return val & BYT_BCR_WPD;
}
-static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
+static bool lpc_ich_set_writeable(struct pci_bus *bus, unsigned int devfn)
{
- struct pci_dev *pdev = data;
u32 bcr;
- pci_read_config_dword(pdev, BCR, &bcr);
+ pci_bus_read_config_dword(bus, devfn, BCR, &bcr);
if (!(bcr & BCR_WPD)) {
bcr |= BCR_WPD;
- pci_write_config_dword(pdev, BCR, bcr);
- pci_read_config_dword(pdev, BCR, &bcr);
+ pci_bus_write_config_dword(bus, devfn, BCR, bcr);
+ pci_bus_read_config_dword(bus, devfn, BCR, &bcr);
}
return bcr & BCR_WPD;
}
-static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
+static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
{
- unsigned int spi = PCI_DEVFN(13, 2);
- struct pci_bus *bus = data;
- u32 bcr;
+ struct pci_dev *pdev = data;
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_bus_write_config_dword(bus, spi, BCR, bcr);
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- }
+ return lpc_ich_set_writeable(pdev->bus, pdev->devfn);
+}
- return bcr & BCR_WPD;
+static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+
+ return lpc_ich_set_writeable(pdev->bus, PCI_DEVFN(13, 2));
}
static int lpc_ich_init_spi(struct pci_dev *dev)
@@ -1137,6 +1230,7 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
struct resource *res = &intel_spi_res[0];
struct intel_spi_boardinfo *info;
u32 spi_base, rcba;
+ int ret;
info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1167,30 +1261,19 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
}
break;
- case INTEL_SPI_BXT: {
- unsigned int p2sb = PCI_DEVFN(13, 0);
- unsigned int spi = PCI_DEVFN(13, 2);
- struct pci_bus *bus = dev->bus;
-
+ case INTEL_SPI_BXT:
/*
* The P2SB is hidden by BIOS and we need to unhide it in
* order to read BAR of the SPI flash device. Once that is
* done we hide it again.
*/
- pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x0);
- pci_bus_read_config_dword(bus, spi, PCI_BASE_ADDRESS_0,
- &spi_base);
- if (spi_base != ~0) {
- res->start = spi_base & 0xfffffff0;
- res->end = res->start + SPIBASE_APL_SZ - 1;
-
- info->set_writeable = lpc_ich_bxt_set_writeable;
- info->data = bus;
- }
+ ret = p2sb_bar(dev->bus, PCI_DEVFN(13, 2), res);
+ if (ret)
+ return ret;
- pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
+ info->set_writeable = lpc_ich_bxt_set_writeable;
+ info->data = dev;
break;
- }
default:
return -EINVAL;
@@ -1249,6 +1332,12 @@ static int lpc_ich_probe(struct pci_dev *dev,
cell_added = true;
}
+ if (priv->chipset == LPC_APL) {
+ ret = lpc_ich_init_pinctrl(dev);
+ if (!ret)
+ cell_added = true;
+ }
+
if (lpc_chipset_info[priv->chipset].spi_type) {
ret = lpc_ich_init_spi(dev);
if (!ret)
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index fec2096474ad..a6661e07035b 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -419,9 +419,11 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
ret = max77620_config_fps(chip, fps_child);
if (ret < 0) {
of_node_put(fps_child);
+ of_node_put(fps_np);
return ret;
}
}
+ of_node_put(fps_np);
config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
diff --git a/drivers/mfd/max77714.c b/drivers/mfd/max77714.c
index d1e4247800d2..143a432ea343 100644
--- a/drivers/mfd/max77714.c
+++ b/drivers/mfd/max77714.c
@@ -3,7 +3,7 @@
* Maxim MAX77714 Core Driver
*
* Copyright (C) 2022 Luca Ceresoli
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/i2c.h>
@@ -148,5 +148,5 @@ static struct i2c_driver max77714_driver = {
module_i2c_driver(max77714_driver);
MODULE_DESCRIPTION("Maxim MAX77714 MFD core driver");
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 684a011a6396..8b058200d5ad 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -60,12 +60,29 @@ int mfd_cell_disable(struct platform_device *pdev)
EXPORT_SYMBOL(mfd_cell_disable);
#if IS_ENABLED(CONFIG_ACPI)
+struct match_ids_walk_data {
+ struct acpi_device_id *ids;
+ struct acpi_device *adev;
+};
+
+static int match_device_ids(struct acpi_device *adev, void *data)
+{
+ struct match_ids_walk_data *wd = data;
+
+ if (!acpi_match_device_ids(adev, wd->ids)) {
+ wd->adev = adev;
+ return 1;
+ }
+
+ return 0;
+}
+
static void mfd_acpi_add_device(const struct mfd_cell *cell,
struct platform_device *pdev)
{
const struct mfd_cell_acpi_match *match = cell->acpi_match;
- struct acpi_device *parent, *child;
struct acpi_device *adev = NULL;
+ struct acpi_device *parent;
parent = ACPI_COMPANION(pdev->dev.parent);
if (!parent)
@@ -83,14 +100,14 @@ static void mfd_acpi_add_device(const struct mfd_cell *cell,
if (match) {
if (match->pnpid) {
struct acpi_device_id ids[2] = {};
+ struct match_ids_walk_data wd = {
+ .adev = NULL,
+ .ids = ids,
+ };
strlcpy(ids[0].id, match->pnpid, sizeof(ids[0].id));
- list_for_each_entry(child, &parent->children, node) {
- if (!acpi_match_device_ids(child, ids)) {
- adev = child;
- break;
- }
- }
+ acpi_dev_for_each_child(parent, match_device_ids, &wd);
+ adev = wd.adev;
} else {
adev = acpi_find_child_device(parent, match->adr, false);
}
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index ea5e452510eb..389756436af6 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -3,6 +3,8 @@
// Copyright (c) 2020 MediaTek Inc.
#include <linux/interrupt.h>
+#include <linux/mfd/mt6357/core.h>
+#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6359/core.h>
@@ -17,6 +19,17 @@
#define MTK_PMIC_REG_WIDTH 16
+static const struct irq_top_t mt6357_ints[] = {
+ MT6357_TOP_GEN(BUCK),
+ MT6357_TOP_GEN(LDO),
+ MT6357_TOP_GEN(PSC),
+ MT6357_TOP_GEN(SCK),
+ MT6357_TOP_GEN(BM),
+ MT6357_TOP_GEN(HK),
+ MT6357_TOP_GEN(AUD),
+ MT6357_TOP_GEN(MISC),
+};
+
static const struct irq_top_t mt6358_ints[] = {
MT6358_TOP_GEN(BUCK),
MT6358_TOP_GEN(LDO),
@@ -39,6 +52,13 @@ static const struct irq_top_t mt6359_ints[] = {
MT6359_TOP_GEN(MISC),
};
+static struct pmic_irq_data mt6357_irqd = {
+ .num_top = ARRAY_SIZE(mt6357_ints),
+ .num_pmic_irqs = MT6357_IRQ_NR,
+ .top_int_status_reg = MT6357_TOP_INT_STATUS0,
+ .pmic_ints = mt6357_ints,
+};
+
static struct pmic_irq_data mt6358_irqd = {
.num_top = ARRAY_SIZE(mt6358_ints),
.num_pmic_irqs = MT6358_IRQ_NR,
@@ -211,6 +231,10 @@ int mt6358_irq_init(struct mt6397_chip *chip)
struct pmic_irq_data *irqd;
switch (chip->chip_id) {
+ case MT6357_CHIP_ID:
+ chip->irq_data = &mt6357_irqd;
+ break;
+
case MT6358_CHIP_ID:
case MT6366_CHIP_ID:
chip->irq_data = &mt6358_irqd;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 1a368ad08f58..f6c1f80f94a4 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -12,10 +12,14 @@
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
+#include <linux/mfd/mt6331/core.h>
+#include <linux/mfd/mt6357/core.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6359/core.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/registers.h>
+#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/registers.h>
@@ -23,6 +27,12 @@
#define MT6323_RTC_BASE 0x8000
#define MT6323_RTC_SIZE 0x40
+#define MT6357_RTC_BASE 0x0588
+#define MT6357_RTC_SIZE 0x3c
+
+#define MT6331_RTC_BASE 0x4000
+#define MT6331_RTC_SIZE 0x40
+
#define MT6358_RTC_BASE 0x0588
#define MT6358_RTC_SIZE 0x3c
@@ -37,6 +47,16 @@ static const struct resource mt6323_rtc_resources[] = {
DEFINE_RES_IRQ(MT6323_IRQ_STATUS_RTC),
};
+static const struct resource mt6357_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6357_RTC_BASE, MT6357_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6357_IRQ_RTC),
+};
+
+static const struct resource mt6331_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6331_RTC_BASE, MT6331_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6331_IRQ_STATUS_RTC),
+};
+
static const struct resource mt6358_rtc_resources[] = {
DEFINE_RES_MEM(MT6358_RTC_BASE, MT6358_RTC_SIZE),
DEFINE_RES_IRQ(MT6358_IRQ_RTC),
@@ -66,6 +86,18 @@ static const struct resource mt6323_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_FCHRKEY, "homekey"),
};
+static const struct resource mt6357_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_HOMEKEY, "homekey"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_PWRKEY_R, "powerkey_r"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_HOMEKEY_R, "homekey_r"),
+};
+
+static const struct resource mt6331_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6331_IRQ_STATUS_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6331_IRQ_STATUS_HOMEKEY, "homekey"),
+};
+
static const struct resource mt6397_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6397_IRQ_PWRKEY, "powerkey"),
DEFINE_RES_IRQ_NAMED(MT6397_IRQ_HOMEKEY, "homekey"),
@@ -100,6 +132,43 @@ static const struct mfd_cell mt6323_devs[] = {
},
};
+static const struct mfd_cell mt6357_devs[] = {
+ {
+ .name = "mt6357-regulator",
+ }, {
+ .name = "mt6357-rtc",
+ .num_resources = ARRAY_SIZE(mt6357_rtc_resources),
+ .resources = mt6357_rtc_resources,
+ .of_compatible = "mediatek,mt6357-rtc",
+ }, {
+ .name = "mtk-pmic-keys",
+ .num_resources = ARRAY_SIZE(mt6357_keys_resources),
+ .resources = mt6357_keys_resources,
+ .of_compatible = "mediatek,mt6357-keys"
+ },
+};
+
+/* MT6331 is always used in combination with MT6332 */
+static const struct mfd_cell mt6331_mt6332_devs[] = {
+ {
+ .name = "mt6331-rtc",
+ .num_resources = ARRAY_SIZE(mt6331_rtc_resources),
+ .resources = mt6331_rtc_resources,
+ .of_compatible = "mediatek,mt6331-rtc",
+ }, {
+ .name = "mt6331-regulator",
+ .of_compatible = "mediatek,mt6331-regulator"
+ }, {
+ .name = "mt6332-regulator",
+ .of_compatible = "mediatek,mt6332-regulator"
+ }, {
+ .name = "mtk-pmic-keys",
+ .num_resources = ARRAY_SIZE(mt6331_keys_resources),
+ .resources = mt6331_keys_resources,
+ .of_compatible = "mediatek,mt6331-keys"
+ },
+};
+
static const struct mfd_cell mt6358_devs[] = {
{
.name = "mt6358-regulator",
@@ -179,6 +248,22 @@ static const struct chip_data mt6323_core = {
.irq_init = mt6397_irq_init,
};
+static const struct chip_data mt6357_core = {
+ .cid_addr = MT6357_SWCID,
+ .cid_shift = 8,
+ .cells = mt6357_devs,
+ .cell_size = ARRAY_SIZE(mt6357_devs),
+ .irq_init = mt6358_irq_init,
+};
+
+static const struct chip_data mt6331_mt6332_core = {
+ .cid_addr = MT6331_HWCID,
+ .cid_shift = 0,
+ .cells = mt6331_mt6332_devs,
+ .cell_size = ARRAY_SIZE(mt6331_mt6332_devs),
+ .irq_init = mt6397_irq_init,
+};
+
static const struct chip_data mt6358_core = {
.cid_addr = MT6358_SWCID,
.cid_shift = 8,
@@ -262,6 +347,12 @@ static const struct of_device_id mt6397_of_match[] = {
.compatible = "mediatek,mt6323",
.data = &mt6323_core,
}, {
+ .compatible = "mediatek,mt6331",
+ .data = &mt6331_mt6332_core,
+ }, {
+ .compatible = "mediatek,mt6357",
+ .data = &mt6357_core,
+ }, {
.compatible = "mediatek,mt6358",
.data = &mt6358_core,
}, {
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index 2924919da991..eff53fed8fe7 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -12,6 +12,8 @@
#include <linux/suspend.h>
#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/core.h>
+#include <linux/mfd/mt6331/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
@@ -172,7 +174,12 @@ int mt6397_irq_init(struct mt6397_chip *chip)
chip->int_status[0] = MT6323_INT_STATUS0;
chip->int_status[1] = MT6323_INT_STATUS1;
break;
-
+ case MT6331_CHIP_ID:
+ chip->int_con[0] = MT6331_INT_CON0;
+ chip->int_con[1] = MT6331_INT_CON1;
+ chip->int_status[0] = MT6331_INT_STATUS_CON0;
+ chip->int_status[1] = MT6331_INT_STATUS_CON1;
+ break;
case MT6391_CHIP_ID:
case MT6397_CHIP_ID:
chip->int_con[0] = MT6397_INT_CON0;
diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
index c472d7f8103c..4b8ff947762f 100644
--- a/drivers/mfd/qcom-pm8008.c
+++ b/drivers/mfd/qcom-pm8008.c
@@ -54,13 +54,6 @@ enum {
#define PM8008_PERIPH_OFFSET(paddr) (paddr - PM8008_PERIPH_0_BASE)
-struct pm8008_data {
- struct device *dev;
- struct regmap *regmap;
- int irq;
- struct regmap_irq_chip_data *irq_data;
-};
-
static unsigned int p0_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_0_BASE)};
static unsigned int p1_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_1_BASE)};
static unsigned int p2_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_2_BASE)};
@@ -150,7 +143,7 @@ static struct regmap_config qcom_mfd_regmap_cfg = {
.max_register = 0xFFFF,
};
-static int pm8008_init(struct pm8008_data *chip)
+static int pm8008_init(struct regmap *regmap)
{
int rc;
@@ -160,34 +153,31 @@ static int pm8008_init(struct pm8008_data *chip)
* This is required to enable the writing of TYPE registers in
* regmap_irq_sync_unlock().
*/
- rc = regmap_write(chip->regmap,
- (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET),
- BIT(0));
+ rc = regmap_write(regmap, (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
if (rc)
return rc;
/* Do the same for GPIO1 and GPIO2 peripherals */
- rc = regmap_write(chip->regmap,
- (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+ rc = regmap_write(regmap, (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
if (rc)
return rc;
- rc = regmap_write(chip->regmap,
- (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+ rc = regmap_write(regmap, (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
return rc;
}
-static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
+static int pm8008_probe_irq_peripherals(struct device *dev,
+ struct regmap *regmap,
int client_irq)
{
int rc, i;
struct regmap_irq_type *type;
struct regmap_irq_chip_data *irq_data;
- rc = pm8008_init(chip);
+ rc = pm8008_init(regmap);
if (rc) {
- dev_err(chip->dev, "Init failed: %d\n", rc);
+ dev_err(dev, "Init failed: %d\n", rc);
return rc;
}
@@ -207,10 +197,10 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW);
}
- rc = devm_regmap_add_irq_chip(chip->dev, chip->regmap, client_irq,
+ rc = devm_regmap_add_irq_chip(dev, regmap, client_irq,
IRQF_SHARED, 0, &pm8008_irq_chip, &irq_data);
if (rc) {
- dev_err(chip->dev, "Failed to add IRQ chip: %d\n", rc);
+ dev_err(dev, "Failed to add IRQ chip: %d\n", rc);
return rc;
}
@@ -220,26 +210,23 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
static int pm8008_probe(struct i2c_client *client)
{
int rc;
- struct pm8008_data *chip;
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
+ struct device *dev;
+ struct regmap *regmap;
- chip->dev = &client->dev;
- chip->regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
- if (!chip->regmap)
+ dev = &client->dev;
+ regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
+ if (!regmap)
return -ENODEV;
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, regmap);
- if (of_property_read_bool(chip->dev->of_node, "interrupt-controller")) {
- rc = pm8008_probe_irq_peripherals(chip, client->irq);
+ if (of_property_read_bool(dev->of_node, "interrupt-controller")) {
+ rc = pm8008_probe_irq_peripherals(dev, regmap, client->irq);
if (rc)
- dev_err(chip->dev, "Failed to probe irq periphs: %d\n", rc);
+ dev_err(dev, "Failed to probe irq periphs: %d\n", rc);
}
- return devm_of_platform_populate(chip->dev);
+ return devm_of_platform_populate(dev);
}
static const struct of_device_id pm8008_match[] = {
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 1cacc00aa6c9..00003a868d28 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -3,120 +3,166 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spmi.h>
+#include <linux/types.h>
#include <linux/regmap.h>
#include <linux/of_platform.h>
+#include <soc/qcom/qcom-spmi-pmic.h>
#define PMIC_REV2 0x101
#define PMIC_REV3 0x102
#define PMIC_REV4 0x103
#define PMIC_TYPE 0x104
#define PMIC_SUBTYPE 0x105
+#define PMIC_FAB_ID 0x1f2
#define PMIC_TYPE_VALUE 0x51
-#define COMMON_SUBTYPE 0x00
-#define PM8941_SUBTYPE 0x01
-#define PM8841_SUBTYPE 0x02
-#define PM8019_SUBTYPE 0x03
-#define PM8226_SUBTYPE 0x04
-#define PM8110_SUBTYPE 0x05
-#define PMA8084_SUBTYPE 0x06
-#define PMI8962_SUBTYPE 0x07
-#define PMD9635_SUBTYPE 0x08
-#define PM8994_SUBTYPE 0x09
-#define PMI8994_SUBTYPE 0x0a
-#define PM8916_SUBTYPE 0x0b
-#define PM8004_SUBTYPE 0x0c
-#define PM8909_SUBTYPE 0x0d
-#define PM8028_SUBTYPE 0x0e
-#define PM8901_SUBTYPE 0x0f
-#define PM8950_SUBTYPE 0x10
-#define PMI8950_SUBTYPE 0x11
-#define PM8998_SUBTYPE 0x14
-#define PMI8998_SUBTYPE 0x15
-#define PM8005_SUBTYPE 0x18
-#define PM660L_SUBTYPE 0x1A
-#define PM660_SUBTYPE 0x1B
-#define PM8150_SUBTYPE 0x1E
-#define PM8150L_SUBTYPE 0x1f
-#define PM8150B_SUBTYPE 0x20
-#define PMK8002_SUBTYPE 0x21
-#define PM8009_SUBTYPE 0x24
-#define PM8150C_SUBTYPE 0x26
-#define SMB2351_SUBTYPE 0x29
+#define PMIC_REV4_V2 0x02
+
+struct qcom_spmi_dev {
+ int num_usids;
+ struct qcom_spmi_pmic pmic;
+};
+
+#define N_USIDS(n) ((void *)n)
static const struct of_device_id pmic_spmi_id_table[] = {
- { .compatible = "qcom,pm660", .data = (void *)PM660_SUBTYPE },
- { .compatible = "qcom,pm660l", .data = (void *)PM660L_SUBTYPE },
- { .compatible = "qcom,pm8004", .data = (void *)PM8004_SUBTYPE },
- { .compatible = "qcom,pm8005", .data = (void *)PM8005_SUBTYPE },
- { .compatible = "qcom,pm8019", .data = (void *)PM8019_SUBTYPE },
- { .compatible = "qcom,pm8028", .data = (void *)PM8028_SUBTYPE },
- { .compatible = "qcom,pm8110", .data = (void *)PM8110_SUBTYPE },
- { .compatible = "qcom,pm8150", .data = (void *)PM8150_SUBTYPE },
- { .compatible = "qcom,pm8150b", .data = (void *)PM8150B_SUBTYPE },
- { .compatible = "qcom,pm8150c", .data = (void *)PM8150C_SUBTYPE },
- { .compatible = "qcom,pm8150l", .data = (void *)PM8150L_SUBTYPE },
- { .compatible = "qcom,pm8226", .data = (void *)PM8226_SUBTYPE },
- { .compatible = "qcom,pm8841", .data = (void *)PM8841_SUBTYPE },
- { .compatible = "qcom,pm8901", .data = (void *)PM8901_SUBTYPE },
- { .compatible = "qcom,pm8909", .data = (void *)PM8909_SUBTYPE },
- { .compatible = "qcom,pm8916", .data = (void *)PM8916_SUBTYPE },
- { .compatible = "qcom,pm8941", .data = (void *)PM8941_SUBTYPE },
- { .compatible = "qcom,pm8950", .data = (void *)PM8950_SUBTYPE },
- { .compatible = "qcom,pm8994", .data = (void *)PM8994_SUBTYPE },
- { .compatible = "qcom,pm8998", .data = (void *)PM8998_SUBTYPE },
- { .compatible = "qcom,pma8084", .data = (void *)PMA8084_SUBTYPE },
- { .compatible = "qcom,pmd9635", .data = (void *)PMD9635_SUBTYPE },
- { .compatible = "qcom,pmi8950", .data = (void *)PMI8950_SUBTYPE },
- { .compatible = "qcom,pmi8962", .data = (void *)PMI8962_SUBTYPE },
- { .compatible = "qcom,pmi8994", .data = (void *)PMI8994_SUBTYPE },
- { .compatible = "qcom,pmi8998", .data = (void *)PMI8998_SUBTYPE },
- { .compatible = "qcom,pmk8002", .data = (void *)PMK8002_SUBTYPE },
- { .compatible = "qcom,smb2351", .data = (void *)SMB2351_SUBTYPE },
- { .compatible = "qcom,spmi-pmic", .data = (void *)COMMON_SUBTYPE },
+ { .compatible = "qcom,pm660", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm660l", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8004", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8005", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8019", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8028", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8110", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8150", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8150b", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8150c", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8150l", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8226", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8841", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8901", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8909", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8916", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8941", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8950", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8994", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8998", .data = N_USIDS(2) },
+ { .compatible = "qcom,pma8084", .data = N_USIDS(2) },
+ { .compatible = "qcom,pmd9635", .data = N_USIDS(2) },
+ { .compatible = "qcom,pmi8950", .data = N_USIDS(2) },
+ { .compatible = "qcom,pmi8962", .data = N_USIDS(2) },
+ { .compatible = "qcom,pmi8994", .data = N_USIDS(2) },
+ { .compatible = "qcom,pmi8998", .data = N_USIDS(2) },
+ { .compatible = "qcom,pmk8002", .data = N_USIDS(2) },
+ { .compatible = "qcom,smb2351", .data = N_USIDS(2) },
+ { .compatible = "qcom,spmi-pmic", .data = N_USIDS(1) },
{ }
};
-static void pmic_spmi_show_revid(struct regmap *map, struct device *dev)
+/*
+ * A PMIC can be represented by multiple SPMI devices, but
+ * only the base PMIC device will contain a reference to
+ * the revision information.
+ *
+ * This function takes a pointer to a pmic device and
+ * returns a pointer to the base PMIC device.
+ *
+ * This only supports PMICs with 1 or 2 USIDs.
+ */
+static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
{
- unsigned int rev2, minor, major, type, subtype;
- const char *name = "unknown";
- int ret, i;
+ struct spmi_device *sdev;
+ struct qcom_spmi_dev *ctx;
+ struct device_node *spmi_bus;
+ struct device_node *other_usid = NULL;
+ int function_parent_usid, ret;
+ u32 pmic_addr;
- ret = regmap_read(map, PMIC_TYPE, &type);
- if (ret < 0)
- return;
+ sdev = to_spmi_device(dev);
+ ctx = dev_get_drvdata(&sdev->dev);
- if (type != PMIC_TYPE_VALUE)
- return;
+ /*
+ * Quick return if the function device is already in the base
+ * USID. This will always be hit for PMICs with only 1 USID.
+ */
+ if (sdev->usid % ctx->num_usids == 0)
+ return sdev;
- ret = regmap_read(map, PMIC_SUBTYPE, &subtype);
+ function_parent_usid = sdev->usid;
+
+ /*
+ * Walk through the list of PMICs until we find the sibling USID.
+ * The goal is to find the first USID which is less than the
+ * number of USIDs in the PMIC array, e.g. for a PMIC with 2 USIDs
+ * where the function device is under USID 3, we want to find the
+ * device for USID 2.
+ */
+ spmi_bus = of_get_parent(sdev->dev.of_node);
+ do {
+ other_usid = of_get_next_child(spmi_bus, other_usid);
+
+ ret = of_property_read_u32_index(other_usid, "reg", 0, &pmic_addr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ sdev = spmi_device_from_of(other_usid);
+ if (pmic_addr == function_parent_usid - (ctx->num_usids - 1)) {
+ if (!sdev)
+ /*
+ * If the base USID for this PMIC hasn't probed yet
+ * but the secondary USID has, then we need to defer
+ * the function driver so that it will attempt to
+ * probe again when the base USID is ready.
+ */
+ return ERR_PTR(-EPROBE_DEFER);
+ return sdev;
+ }
+ } while (other_usid->sibling);
+
+ return ERR_PTR(-ENODATA);
+}
+
+static int pmic_spmi_load_revid(struct regmap *map, struct device *dev,
+ struct qcom_spmi_pmic *pmic)
+{
+ int ret;
+
+ ret = regmap_read(map, PMIC_TYPE, &pmic->type);
if (ret < 0)
- return;
+ return ret;
- for (i = 0; i < ARRAY_SIZE(pmic_spmi_id_table); i++) {
- if (subtype == (unsigned long)pmic_spmi_id_table[i].data)
- break;
- }
+ if (pmic->type != PMIC_TYPE_VALUE)
+ return ret;
- if (i != ARRAY_SIZE(pmic_spmi_id_table))
- name = pmic_spmi_id_table[i].compatible;
+ ret = regmap_read(map, PMIC_SUBTYPE, &pmic->subtype);
+ if (ret < 0)
+ return ret;
+
+ pmic->name = of_match_device(pmic_spmi_id_table, dev)->compatible;
- ret = regmap_read(map, PMIC_REV2, &rev2);
+ ret = regmap_read(map, PMIC_REV2, &pmic->rev2);
if (ret < 0)
- return;
+ return ret;
- ret = regmap_read(map, PMIC_REV3, &minor);
+ ret = regmap_read(map, PMIC_REV3, &pmic->minor);
if (ret < 0)
- return;
+ return ret;
- ret = regmap_read(map, PMIC_REV4, &major);
+ ret = regmap_read(map, PMIC_REV4, &pmic->major);
if (ret < 0)
- return;
+ return ret;
+
+ if (pmic->subtype == PMI8998_SUBTYPE || pmic->subtype == PM660_SUBTYPE) {
+ ret = regmap_read(map, PMIC_FAB_ID, &pmic->fab_id);
+ if (ret < 0)
+ return ret;
+ }
/*
* In early versions of PM8941 and PM8226, the major revision number
@@ -124,16 +170,50 @@ static void pmic_spmi_show_revid(struct regmap *map, struct device *dev)
* Increment the major revision number here if the chip is an early
* version of PM8941 or PM8226.
*/
- if ((subtype == PM8941_SUBTYPE || subtype == PM8226_SUBTYPE) &&
- major < 0x02)
- major++;
+ if ((pmic->subtype == PM8941_SUBTYPE || pmic->subtype == PM8226_SUBTYPE) &&
+ pmic->major < PMIC_REV4_V2)
+ pmic->major++;
+
+ if (pmic->subtype == PM8110_SUBTYPE)
+ pmic->minor = pmic->rev2;
- if (subtype == PM8110_SUBTYPE)
- minor = rev2;
+ dev_dbg(dev, "%x: %s v%d.%d\n",
+ pmic->subtype, pmic->name, pmic->major, pmic->minor);
- dev_dbg(dev, "%x: %s v%d.%d\n", subtype, name, major, minor);
+ return 0;
}
+/**
+ * qcom_pmic_get() - Get a pointer to the base PMIC device
+ *
+ * This function takes a struct device for a driver which is a child of a PMIC.
+ * And locates the PMIC revision information for it.
+ *
+ * @dev: the pmic function device
+ * @return: the struct qcom_spmi_pmic* pointer associated with the function device
+ */
+const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev)
+{
+ struct spmi_device *sdev;
+ struct qcom_spmi_dev *spmi;
+
+ /*
+ * Make sure the device is actually a child of a PMIC
+ */
+ if (!of_match_device(pmic_spmi_id_table, dev->parent))
+ return ERR_PTR(-EINVAL);
+
+ sdev = qcom_pmic_get_base_usid(dev->parent);
+
+ if (IS_ERR(sdev))
+ return ERR_CAST(sdev);
+
+ spmi = dev_get_drvdata(&sdev->dev);
+
+ return &spmi->pmic;
+}
+EXPORT_SYMBOL(qcom_pmic_get);
+
static const struct regmap_config spmi_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
@@ -144,14 +224,26 @@ static const struct regmap_config spmi_regmap_config = {
static int pmic_spmi_probe(struct spmi_device *sdev)
{
struct regmap *regmap;
+ struct qcom_spmi_dev *ctx;
+ int ret;
regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ ctx = devm_kzalloc(&sdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->num_usids = (uintptr_t)of_device_get_match_data(&sdev->dev);
+
/* Only the first slave id for a PMIC contains this information */
- if (sdev->usid % 2 == 0)
- pmic_spmi_show_revid(regmap, &sdev->dev);
+ if (sdev->usid % ctx->num_usids == 0) {
+ ret = pmic_spmi_load_revid(regmap, &sdev->dev, &ctx->pmic);
+ if (ret < 0)
+ return ret;
+ }
+ spmi_device_set_drvdata(sdev, ctx);
return devm_of_platform_populate(&sdev->dev);
}
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 191fdb87c424..bdb2ce7ff03b 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -101,8 +101,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
}
}
- syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", np,
- (u64)res.start);
+ syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 5369c67e3280..663ffd4b8570 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -397,11 +397,8 @@ err_noirq:
static int t7l66xb_remove(struct platform_device *dev)
{
- struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- int ret;
- ret = pdata->disable(dev);
clk_disable_unprepare(t7l66xb->clk48m);
clk_put(t7l66xb->clk48m);
clk_disable_unprepare(t7l66xb->clk32k);
@@ -412,8 +409,7 @@ static int t7l66xb_remove(struct platform_device *dev)
mfd_remove_devices(&dev->dev);
kfree(t7l66xb);
- return ret;
-
+ return 0;
}
static struct platform_driver t7l66xb_platform_driver = {
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 0be5731685b4..aa903a31dd43 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -798,20 +798,19 @@ static int tc6393xb_remove(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
- int ret;
mfd_remove_devices(&dev->dev);
tc6393xb_detach_irq(dev);
- ret = tcpd->disable(dev);
+ tcpd->disable(dev);
clk_disable_unprepare(tc6393xb->clk);
iounmap(tc6393xb->scr);
release_resource(&tc6393xb->rscr);
clk_put(tc6393xb->clk);
kfree(tc6393xb);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index 3bd5728844a0..cbae9777a24e 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65912 driver
*/
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 8027b0a9e14f..8e8da204a02e 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tps65217.c
*
* TPS65217 chip family multi-function driver
*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 167e9fc308ef..49bb8fd168f8 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for TPS65218 Integrated power management chipsets
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index c282a05e7146..7d994b8a5965 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Core functions for TI TPS65912x PMICs
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 06eb2784d322..afb7f7d97dc0 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* I2C access driver for TI TPS65912x PMICs
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index bba38fbc781d..9e976f9c6bbe 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI access driver for TI TPS65912x PMICs
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bd6659cf3bc0..2cb9326f3e61 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -656,309 +656,6 @@ static inline struct device *add_child(unsigned mod_no, const char *name,
can_wakeup, irq0, irq1);
}
-static struct device *
-add_regulator_linked(int num, struct regulator_init_data *pdata,
- struct regulator_consumer_supply *consumers,
- unsigned num_consumers, unsigned long features)
-{
- struct twl_regulator_driver_data drv_data;
-
- /* regulator framework demands init_data ... */
- if (!pdata)
- return NULL;
-
- if (consumers) {
- pdata->consumer_supplies = consumers;
- pdata->num_consumer_supplies = num_consumers;
- }
-
- if (pdata->driver_data) {
- /* If we have existing drv_data, just add the flags */
- struct twl_regulator_driver_data *tmp;
- tmp = pdata->driver_data;
- tmp->features |= features;
- } else {
- /* add new driver data struct, used only during init */
- drv_data.features = features;
- drv_data.set_voltage = NULL;
- drv_data.get_voltage = NULL;
- drv_data.data = NULL;
- pdata->driver_data = &drv_data;
- }
-
- /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */
- return add_numbered_child(TWL_MODULE_PM_MASTER, "twl_reg", num,
- pdata, sizeof(*pdata), false, 0, 0);
-}
-
-static struct device *
-add_regulator(int num, struct regulator_init_data *pdata,
- unsigned long features)
-{
- return add_regulator_linked(num, pdata, NULL, 0, features);
-}
-
-/*
- * NOTE: We know the first 8 IRQs after pdata->base_irq are
- * for the PIH, and the next are for the PWR_INT SIH, since
- * that's how twl_init_irq() sets things up.
- */
-
-static int
-add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
- unsigned long features)
-{
- struct device *child;
-
- if (IS_ENABLED(CONFIG_GPIO_TWL4030) && pdata->gpio) {
- child = add_child(TWL4030_MODULE_GPIO, "twl4030_gpio",
- pdata->gpio, sizeof(*pdata->gpio),
- false, irq_base + GPIO_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_KEYBOARD_TWL4030) && pdata->keypad) {
- child = add_child(TWL4030_MODULE_KEYPAD, "twl4030_keypad",
- pdata->keypad, sizeof(*pdata->keypad),
- true, irq_base + KEYPAD_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
- twl_class_is_4030()) {
- child = add_child(TWL4030_MODULE_MADC, "twl4030_madc",
- pdata->madc, sizeof(*pdata->madc),
- true, irq_base + MADC_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_RTC_DRV_TWL4030)) {
- /*
- * REVISIT platform_data here currently might expose the
- * "msecure" line ... but for now we just expect board
- * setup to tell the chip "it's always ok to SET_TIME".
- * Eventually, Linux might become more aware of such
- * HW security concerns, and "least privilege".
- */
- child = add_child(TWL_MODULE_RTC, "twl_rtc", NULL, 0,
- true, irq_base + RTC_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_PWM_TWL)) {
- child = add_child(TWL_MODULE_PWM, "twl-pwm", NULL, 0,
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
- child = add_child(TWL_MODULE_LED, "twl-pwmled", NULL, 0,
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_USB) && pdata->usb &&
- twl_class_is_4030()) {
-
- static struct regulator_consumer_supply usb1v5 = {
- .supply = "usb1v5",
- };
- static struct regulator_consumer_supply usb1v8 = {
- .supply = "usb1v8",
- };
- static struct regulator_consumer_supply usb3v1 = {
- .supply = "usb3v1",
- };
-
- /* First add the regulators so that they can be used by transceiver */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030)) {
- /* this is a template that gets copied */
- struct regulator_init_data usb_fixed = {
- .constraints.valid_modes_mask =
- REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .constraints.valid_ops_mask =
- REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- };
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V5,
- &usb_fixed, &usb1v5, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V8,
- &usb_fixed, &usb1v8, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB3V1,
- &usb_fixed, &usb3v1, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- }
-
- child = add_child(TWL_MODULE_USB, "twl4030_usb",
- pdata->usb, sizeof(*pdata->usb), true,
- /* irq0 = USB_PRES, irq1 = USB */
- irq_base + USB_PRES_INTR_OFFSET,
- irq_base + USB_INTR_OFFSET);
-
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- /* we need to connect regulators to this transceiver */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && child) {
- usb1v5.dev_name = dev_name(child);
- usb1v8.dev_name = dev_name(child);
- usb3v1.dev_name = dev_name(child);
- }
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
- child = add_child(TWL_MODULE_PM_RECEIVER, "twl4030_wdt", NULL,
- 0, false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
- child = add_child(TWL_MODULE_PM_MASTER, "twl4030_pwrbutton",
- NULL, 0, true, irq_base + 8 + 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
- twl_class_is_4030()) {
- child = add_child(TWL4030_MODULE_AUDIO_VOICE, "twl4030-audio",
- pdata->audio, sizeof(*pdata->audio),
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* twl4030 regulators */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && twl_class_is_4030()) {
- child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VIO, pdata->vio,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDAC, pdata->vdac,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator((features & TWL4030_VAUX2)
- ? TWL4030_REG_VAUX2_4030
- : TWL4030_REG_VAUX2,
- pdata->vaux2, features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* maybe add LDOs that are omitted on cost-reduced parts */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && !(features & TPS_SUBSET)
- && twl_class_is_4030()) {
- child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VSIM, pdata->vsim,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
- !(features & (TPS_SUBSET | TWL5031))) {
- child = add_child(TWL_MODULE_MAIN_CHARGE, "twl4030_bci",
- pdata->bci, sizeof(*pdata->bci), false,
- /* irq0 = CHG_PRES, irq1 = BCI */
- irq_base + BCI_PRES_INTR_OFFSET,
- irq_base + BCI_INTR_OFFSET);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_POWER) && pdata->power) {
- child = add_child(TWL_MODULE_PM_MASTER, "twl4030_power",
- pdata->power, sizeof(*pdata->power), false,
- 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- return 0;
-}
-
/*----------------------------------------------------------------------*/
/*
@@ -987,8 +684,7 @@ static inline int unprotect_pm_master(void)
return e;
}
-static void clocks_init(struct device *dev,
- struct twl4030_clock_init_data *clock)
+static void clocks_init(struct device *dev)
{
int e = 0;
struct clk *osc;
@@ -1018,8 +714,6 @@ static void clocks_init(struct device *dev,
}
ctrl |= HIGH_PERF_SQ;
- if (clock && clock->ck32k_lowpwr_enable)
- ctrl |= CK32K_LOWPWR_EN;
e |= unprotect_pm_master();
/* effect->MADC+USB ck en */
@@ -1063,7 +757,6 @@ static struct of_dev_auxdata twl_auxdata_lookup[] = {
static int
twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *node = client->dev.of_node;
struct platform_device *pdev;
const struct regmap_config *twl_regmap_config;
@@ -1071,7 +764,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
int status;
unsigned i, num_slaves;
- if (!node && !pdata) {
+ if (!node) {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
@@ -1161,7 +854,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_priv->ready = true;
/* setup clock framework */
- clocks_init(&client->dev, pdata ? pdata->clock : NULL);
+ clocks_init(&client->dev);
/* read TWL IDCODE Register */
if (twl_class_is_4030()) {
@@ -1209,14 +902,8 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
TWL4030_DCDC_GLOBAL_CFG);
}
- if (node) {
- if (pdata)
- twl_auxdata_lookup[0].platform_data = pdata->gpio;
- status = of_platform_populate(node, NULL, twl_auxdata_lookup,
- &client->dev);
- } else {
- status = add_children(pdata, irq_base, id->driver_data);
- }
+ status = of_platform_populate(node, NULL, twl_auxdata_lookup,
+ &client->dev);
fail:
if (status < 0)
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index 8c3832a58ef6..ac1d18039568 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -72,11 +72,9 @@ static int ucb1400_core_probe(struct device *dev)
/* GPIO */
ucb_gpio.ac97 = ac97;
- if (pdata) {
- ucb_gpio.gpio_setup = pdata->gpio_setup;
- ucb_gpio.gpio_teardown = pdata->gpio_teardown;
+ if (pdata)
ucb_gpio.gpio_offset = pdata->gpio_offset;
- }
+
ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1);
if (!ucb->ucb1400_gpio) {
err = -ENOMEM;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 41d2bb0ae23a..94e9fb4cdd76 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -483,6 +483,19 @@ config OPEN_DICE
If unsure, say N.
+config VCPU_STALL_DETECTOR
+ tristate "Guest vCPU stall detector"
+ depends on OF && HAS_IOMEM
+ help
+ When this driver is bound inside a KVM guest, it will
+ periodically "pet" an MMIO stall detector device from each vCPU
+ and allow the host to detect vCPU stalls.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vcpu_stall_detector.
+
+ If you do not intend to run this kernel as a guest, say N.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 70e800e9127f..2be8542616dd 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -60,3 +60,4 @@ obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o
obj-$(CONFIG_HI6421V600_IRQ) += hi6421v600-irq.o
obj-$(CONFIG_OPEN_DICE) += open-dice.o
+obj-$(CONFIG_VCPU_STALL_DETECTOR) += vcpu_stall_detector.o \ No newline at end of file
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 69f9b0336410..7f9f562d6433 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -276,7 +276,7 @@ static struct platform_driver ssc_driver = {
};
module_platform_driver(ssc_driver);
-MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
-MODULE_DESCRIPTION("SSC driver for Atmel AVR32 and AT91");
+MODULE_AUTHOR("Hans-Christian Noren Egtvedt <egtvedt@samfundet.no>");
+MODULE_DESCRIPTION("SSC driver for Atmel AT91");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ssc");
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 2a2619e3c72c..32b7783e9d4f 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -131,7 +131,7 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
rtsx_disable_aspm(pcr);
- /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
+ /* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
msleep(1);
if (option->ltr_enabled)
@@ -1507,7 +1507,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->remap_addr = ioremap(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
- goto free_handle;
+ goto free_idr;
}
pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
@@ -1570,6 +1570,10 @@ disable_msi:
pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
unmap:
iounmap(pcr->remap_addr);
+free_idr:
+ spin_lock(&rtsx_pci_lock);
+ idr_remove(&rtsx_pci_idr, pcr->id);
+ spin_unlock(&rtsx_pci_lock);
free_handle:
kfree(handle);
free_pcr:
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index e627b4056623..acaa44809c58 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -331,7 +331,7 @@ static void reclaim_ctx(struct rcu_head *rcu)
__free_page(ctx->ff_page);
ctx->sstp = NULL;
- kfree(ctx->irq_bitmap);
+ bitmap_free(ctx->irq_bitmap);
/* Drop ref to the afu device taken during cxl_context_init */
cxl_afu_put(ctx->afu);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 7a6dd91987fd..0562071cdd4a 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -1104,7 +1104,7 @@ extern const struct cxl_backend_ops cxl_native_ops;
extern const struct cxl_backend_ops cxl_guest_ops;
extern const struct cxl_backend_ops *cxl_ops;
-/* check if the given pci_dev is on the the cxl vphb bus */
+/* check if the given pci_dev is on the cxl vphb bus */
bool cxl_pci_is_vphb_device(struct pci_dev *dev);
/* decode AFU error bits in the PSL register PSL_SERR_An */
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3321c014913c..375f692ae9d6 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1053,7 +1053,7 @@ static void free_adapter(struct cxl *adapter)
if (adapter->guest->irq_avail) {
for (i = 0; i < adapter->guest->irq_nranges; i++) {
cur = &adapter->guest->irq_avail[i];
- kfree(cur->bitmap);
+ bitmap_free(cur->bitmap);
}
kfree(adapter->guest->irq_avail);
}
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 5f0e2dcebb34..b730e022a48e 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -319,8 +319,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
}
ctx->irq_count = count;
- ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
- sizeof(*ctx->irq_bitmap), GFP_KERNEL);
+ ctx->irq_bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!ctx->irq_bitmap)
goto out;
@@ -350,6 +349,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
out:
cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+ bitmap_free(ctx->irq_bitmap);
afu_irq_name_free(ctx);
return -ENOMEM;
}
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index 1cfecba42d01..25ce725035e7 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -308,8 +308,7 @@ static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np)
cur = &adapter->guest->irq_avail[i];
cur->offset = be32_to_cpu(ranges[i * 2]);
cur->range = be32_to_cpu(ranges[i * 2 + 1]);
- cur->bitmap = kcalloc(BITS_TO_LONGS(cur->range),
- sizeof(*cur->bitmap), GFP_KERNEL);
+ cur->bitmap = bitmap_zalloc(cur->range, GFP_KERNEL);
if (cur->bitmap == NULL)
goto err;
if (cur->offset < adapter->guest->irq_base_offset)
@@ -326,7 +325,7 @@ static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np)
err:
for (i--; i >= 0; i--) {
cur = &adapter->guest->irq_avail[i];
- kfree(cur->bitmap);
+ bitmap_free(cur->bitmap);
}
kfree(adapter->guest->irq_avail);
adapter->guest->irq_avail = NULL;
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index b0cff4b152da..9aec3338e37d 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -566,7 +566,7 @@ static int idt_eeprom_read_byte(struct idt_89hpesx_dev *pdev, u16 memaddr,
eeseq.memaddr = cpu_to_le16(memaddr);
ret = pdev->smb_write(pdev, &smbseq);
if (ret != 0) {
- dev_err(dev, "Failed to init eeprom addr 0x%02hhx",
+ dev_err(dev, "Failed to init eeprom addr 0x%02x",
memaddr);
break;
}
@@ -575,7 +575,7 @@ static int idt_eeprom_read_byte(struct idt_89hpesx_dev *pdev, u16 memaddr,
smbseq.bytecnt = EEPROM_RD_CNT;
ret = pdev->smb_read(pdev, &smbseq);
if (ret != 0) {
- dev_err(dev, "Failed to read eeprom data 0x%02hhx",
+ dev_err(dev, "Failed to read eeprom data 0x%02x",
memaddr);
break;
}
@@ -810,7 +810,7 @@ static int idt_csr_read(struct idt_89hpesx_dev *pdev, u16 csraddr, u32 *data)
smbseq.bytecnt = CSR_RD_CNT;
ret = pdev->smb_read(pdev, &smbseq);
if (ret != 0) {
- dev_err(dev, "Failed to read csr 0x%04hx",
+ dev_err(dev, "Failed to read csr 0x%04x",
CSR_REAL_ADDR(csraddr));
goto err_mutex_unlock;
}
@@ -909,14 +909,18 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
u32 csraddr, csrval;
char *buf;
+ if (*offp)
+ return 0;
+
/* Copy data from User-space */
buf = kmalloc(count + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = simple_write_to_buffer(buf, count, offp, ubuf, count);
- if (ret < 0)
+ if (copy_from_user(buf, ubuf, count)) {
+ ret = -EFAULT;
goto free_buf;
+ }
buf[count] = 0;
/* Find position of colon in the buffer */
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index a786c0a7de9a..b35d7000c86b 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -14,4 +14,7 @@ habanalabs-y += $(HL_GOYA_FILES)
include $(src)/gaudi/Makefile
habanalabs-y += $(HL_GAUDI_FILES)
+include $(src)/gaudi2/Makefile
+habanalabs-y += $(HL_GAUDI2_FILES)
+
habanalabs-$(CONFIG_DEBUG_FS) += common/debugfs.o
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index 934a3a4aedc9..e6abffea9f87 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -11,4 +11,5 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
common/command_submission.o common/firmware_if.o \
- common/state_dump.o common/memory_mgr.o
+ common/security.o common/state_dump.o \
+ common/memory_mgr.o common/decoder.o
diff --git a/drivers/misc/habanalabs/common/asid.c b/drivers/misc/habanalabs/common/asid.c
index ede04c032b6e..c9c2619cc43d 100644
--- a/drivers/misc/habanalabs/common/asid.c
+++ b/drivers/misc/habanalabs/common/asid.c
@@ -11,8 +11,7 @@
int hl_asid_init(struct hl_device *hdev)
{
- hdev->asid_bitmap = kcalloc(BITS_TO_LONGS(hdev->asic_prop.max_asid),
- sizeof(*hdev->asid_bitmap), GFP_KERNEL);
+ hdev->asid_bitmap = bitmap_zalloc(hdev->asic_prop.max_asid, GFP_KERNEL);
if (!hdev->asid_bitmap)
return -ENOMEM;
@@ -27,7 +26,7 @@ int hl_asid_init(struct hl_device *hdev)
void hl_asid_fini(struct hl_device *hdev)
{
mutex_destroy(&hdev->asid_mutex);
- kfree(hdev->asid_bitmap);
+ bitmap_free(hdev->asid_bitmap);
}
unsigned long hl_asid_alloc(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index e13b2b39c058..b027f66f8bd4 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -143,8 +143,7 @@ static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
gen_pool_free(hdev->internal_cb_pool,
(uintptr_t)cb->kernel_address, cb->size);
else
- hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
- cb->kernel_address, cb->bus_address);
+ hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address);
kfree(cb);
}
@@ -195,14 +194,11 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
cb->is_internal = true;
cb->bus_address = hdev->internal_cb_va_base + cb_offset;
} else if (ctx_id == HL_KERNEL_ASID_ID) {
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
- &cb->bus_address, GFP_ATOMIC);
+ p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC);
if (!p)
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
- cb_size, &cb->bus_address, GFP_KERNEL);
+ p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
} else {
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
- &cb->bus_address,
+ p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
GFP_USER | __GFP_ZERO);
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index fb30b7de4aab..90a4574cbe2d 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
- HL_CS_FLAGS_COLLECTIVE_WAIT)
+ HL_CS_FLAGS_COLLECTIVE_WAIT)
#define MAX_TS_ITER_NUM 10
@@ -29,11 +29,88 @@ enum hl_cs_wait_status {
};
static void job_wq_completion(struct work_struct *work);
-static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
- u64 timeout_us, u64 seq,
+static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp);
static void cs_do_release(struct kref *ref);
+static void hl_push_cs_outcome(struct hl_device *hdev,
+ struct hl_cs_outcome_store *outcome_store,
+ u64 seq, ktime_t ts, int error)
+{
+ struct hl_cs_outcome *node;
+ unsigned long flags;
+
+ /*
+ * CS outcome store supports the following operations:
+ * push outcome - store a recent CS outcome in the store
+ * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store
+ * It uses 2 lists: used list and free list.
+ * It has a pre-allocated amount of nodes, each node stores
+ * a single CS outcome.
+ * Initially, all the nodes are in the free list.
+ * On push outcome, a node (any) is taken from the free list, its
+ * information is filled in, and the node is moved to the used list.
+ * It is possible, that there are no nodes left in the free list.
+ * In this case, we will lose some information about old outcomes. We
+ * will pop the OLDEST node from the used list, and make it free.
+ * On pop, the node is searched for in the used list (using a search
+ * index).
+ * If found, the node is then removed from the used list, and moved
+ * back to the free list. The outcome data that the node contained is
+ * returned back to the user.
+ */
+
+ spin_lock_irqsave(&outcome_store->db_lock, flags);
+
+ if (list_empty(&outcome_store->free_list)) {
+ node = list_last_entry(&outcome_store->used_list,
+ struct hl_cs_outcome, list_link);
+ hash_del(&node->map_link);
+ dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
+ } else {
+ node = list_last_entry(&outcome_store->free_list,
+ struct hl_cs_outcome, list_link);
+ }
+
+ list_del_init(&node->list_link);
+
+ node->seq = seq;
+ node->ts = ts;
+ node->error = error;
+
+ list_add(&node->list_link, &outcome_store->used_list);
+ hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
+
+ spin_unlock_irqrestore(&outcome_store->db_lock, flags);
+}
+
+static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store,
+ u64 seq, ktime_t *ts, int *error)
+{
+ struct hl_cs_outcome *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&outcome_store->db_lock, flags);
+
+ hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
+ if (node->seq == seq) {
+ *ts = node->ts;
+ *error = node->error;
+
+ hash_del(&node->map_link);
+ list_del_init(&node->list_link);
+ list_add(&node->list_link, &outcome_store->free_list);
+
+ spin_unlock_irqrestore(&outcome_store->db_lock, flags);
+
+ return true;
+ }
+
+ spin_unlock_irqrestore(&outcome_store->db_lock, flags);
+
+ return false;
+}
+
static void hl_sob_reset(struct kref *ref)
{
struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
@@ -171,7 +248,7 @@ static void cs_job_do_release(struct kref *ref)
kfree(job);
}
-static void cs_job_put(struct hl_cs_job *job)
+static void hl_cs_job_put(struct hl_cs_job *job)
{
kref_put(&job->refcount, cs_job_do_release);
}
@@ -266,7 +343,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
return rc;
}
-static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
+static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
{
struct hl_cs *cs = job->cs;
@@ -285,12 +362,12 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
/* For H/W queue jobs, if a user CB was allocated by driver and MMU is
* enabled, the user CB isn't released in cs_parser() and thus should be
- * released here.
- * This is also true for INT queues jobs which were allocated by driver
+ * released here. This is also true for INT queues jobs which were
+ * allocated by driver.
*/
- if (job->is_kernel_allocated_cb &&
+ if ((job->is_kernel_allocated_cb &&
((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
- job->queue_type == QUEUE_TYPE_INT)) {
+ job->queue_type == QUEUE_TYPE_INT))) {
atomic_dec(&job->user_cb->cs_cnt);
hl_cb_put(job->user_cb);
}
@@ -318,11 +395,10 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
* flow by calling 'hl_hw_queue_update_ci'.
*/
if (cs_needs_completion(cs) &&
- (job->queue_type == QUEUE_TYPE_EXT ||
- job->queue_type == QUEUE_TYPE_HW))
+ (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW))
cs_put(cs);
- cs_job_put(job);
+ hl_cs_job_put(job);
}
/*
@@ -612,7 +688,7 @@ static void cs_do_release(struct kref *ref)
* still holds a pointer to them (but no reference).
*/
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
- complete_job(hdev, job);
+ hl_complete_job(hdev, job);
if (!cs->submitted) {
/*
@@ -642,9 +718,9 @@ static void cs_do_release(struct kref *ref)
* staged submission
*/
if (cs->staged_last) {
- struct hl_cs *staged_cs, *tmp;
+ struct hl_cs *staged_cs, *tmp_cs;
- list_for_each_entry_safe(staged_cs, tmp,
+ list_for_each_entry_safe(staged_cs, tmp_cs,
&cs->staged_cs_node, staged_cs_node)
staged_cs_put(hdev, staged_cs);
}
@@ -678,7 +754,7 @@ out:
*/
hl_debugfs_remove_cs(cs);
- hl_ctx_put(cs->ctx);
+ hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL;
/* We need to mark an error for not submitted because in that case
* the hl fence release flow is different. Mainly, we don't need
@@ -698,8 +774,14 @@ out:
div_u64(jiffies - cs->submission_time_jiffies, HZ));
}
- if (cs->timestamp)
+ if (cs->timestamp) {
cs->fence->timestamp = ktime_get();
+ hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
+ cs->fence->timestamp, cs->fence->error);
+ }
+
+ hl_ctx_put(cs->ctx);
+
complete_all(&cs->fence->completion);
complete_multi_cs(hdev, cs);
@@ -714,10 +796,11 @@ out:
static void cs_timedout(struct work_struct *work)
{
struct hl_device *hdev;
+ u64 event_mask;
int rc;
struct hl_cs *cs = container_of(work, struct hl_cs,
work_tdr.work);
- bool skip_reset_on_timeout = cs->skip_reset_on_timeout;
+ bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false;
rc = cs_get_unless_zero(cs);
if (!rc)
@@ -728,17 +811,28 @@ static void cs_timedout(struct work_struct *work)
return;
}
- /* Mark the CS is timed out so we won't try to cancel its TDR */
- if (likely(!skip_reset_on_timeout))
- cs->timedout = true;
-
hdev = cs->ctx->hdev;
+ if (likely(!skip_reset_on_timeout)) {
+ if (hdev->reset_on_lockup)
+ device_reset = true;
+ else
+ hdev->reset_info.needs_reset = true;
+
+ /* Mark the CS is timed out so we won't try to cancel its TDR */
+ cs->timedout = true;
+ }
+
/* Save only the first CS timeout parameters */
- rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_disable, 0, 1);
- if (!rc) {
+ rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_enable, 1, 0);
+ if (rc) {
hdev->last_error.cs_timeout.timestamp = ktime_get();
hdev->last_error.cs_timeout.seq = cs->sequence;
+
+ event_mask = device_reset ? (HL_NOTIFIER_EVENT_CS_TIMEOUT |
+ HL_NOTIFIER_EVENT_DEVICE_RESET) : HL_NOTIFIER_EVENT_CS_TIMEOUT;
+
+ hl_notifier_event_send_all(hdev, event_mask);
}
switch (cs->type) {
@@ -773,12 +867,8 @@ static void cs_timedout(struct work_struct *work)
cs_put(cs);
- if (likely(!skip_reset_on_timeout)) {
- if (hdev->reset_on_lockup)
- hl_device_reset(hdev, HL_DRV_RESET_TDR);
- else
- hdev->reset_info.needs_reset = true;
- }
+ if (device_reset)
+ hl_device_reset(hdev, HL_DRV_RESET_TDR);
}
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
@@ -916,7 +1006,7 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
staged_cs_put(hdev, cs);
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
- complete_job(hdev, job);
+ hl_complete_job(hdev, job);
}
void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
@@ -933,6 +1023,7 @@ void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
+ flush_workqueue(hdev->cs_cmplt_wq);
}
/* Make sure we don't have leftovers in the CS mirror list */
@@ -940,7 +1031,7 @@ void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
cs_get(cs);
cs->aborted = true;
dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
- cs->ctx->asid, cs->sequence);
+ cs->ctx->asid, cs->sequence);
cs_rollback(hdev, cs);
cs_put(cs);
}
@@ -989,7 +1080,10 @@ void hl_release_pending_user_interrupts(struct hl_device *hdev)
wake_pending_user_interrupt_threads(interrupt);
}
- interrupt = &hdev->common_user_interrupt;
+ interrupt = &hdev->common_user_cq_interrupt;
+ wake_pending_user_interrupt_threads(interrupt);
+
+ interrupt = &hdev->common_decoder_interrupt;
wake_pending_user_interrupt_threads(interrupt);
}
@@ -1001,7 +1095,17 @@ static void job_wq_completion(struct work_struct *work)
struct hl_device *hdev = cs->ctx->hdev;
/* job is no longer needed */
- complete_job(hdev, job);
+ hl_complete_job(hdev, job);
+}
+
+static void cs_completion(struct work_struct *work)
+{
+ struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
+ struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_cs_job *job, *tmp;
+
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
+ hl_complete_job(hdev, job);
}
static int validate_queue_index(struct hl_device *hdev,
@@ -1024,7 +1128,13 @@ static int validate_queue_index(struct hl_device *hdev,
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
if (hw_queue_prop->type == QUEUE_TYPE_NA) {
- dev_err(hdev->dev, "Queue index %d is invalid\n",
+ dev_err(hdev->dev, "Queue index %d is not applicable\n",
+ chunk->queue_index);
+ return -EINVAL;
+ }
+
+ if (hw_queue_prop->binned) {
+ dev_err(hdev->dev, "Queue index %d is binned out\n",
chunk->queue_index);
return -EINVAL;
}
@@ -1166,17 +1276,16 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
cs_type = hl_cs_get_cs_type(cs_type_flags);
num_chunks = args->in.num_chunks_execute;
- if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
- !hdev->supports_sync_stream)) {
+ if (unlikely((cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
+ cs_type == CS_TYPE_COLLECTIVE_WAIT) &&
+ !hdev->supports_sync_stream)) {
dev_err(hdev->dev, "Sync stream CS is not supported\n");
return -EINVAL;
}
if (cs_type == CS_TYPE_DEFAULT) {
if (!num_chunks) {
- dev_err(hdev->dev,
- "Got execute CS with 0 chunks, context %d\n",
- ctx->asid);
+ dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
return -EINVAL;
}
} else if (num_chunks != 1) {
@@ -1276,7 +1385,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
u32 encaps_signals_handle, u32 timeout,
u16 *signal_initial_sob_count)
{
- bool staged_mid, int_queues_only = true;
+ bool staged_mid, int_queues_only = true, using_hw_queues = false;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
@@ -1365,6 +1474,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
chunk->queue_index);
}
+ if (queue_type == QUEUE_TYPE_HW)
+ using_hw_queues = true;
+
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
@@ -1385,6 +1497,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job->hw_queue_id = chunk->queue_index;
cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+ cs->jobs_cnt++;
list_add_tail(&job->cs_node, &cs->job_list);
@@ -1425,6 +1538,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
goto free_cs_object;
}
+ if (using_hw_queues)
+ INIT_WORK(&cs->finish_work, cs_completion);
+
/*
* store the (external/HW queues) streams used by the CS in the
* fence object for multi-CS completion
@@ -1773,6 +1889,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
cs_get(cs);
cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+ cs->jobs_cnt++;
list_add_tail(&job->cs_node, &cs->job_list);
@@ -2191,6 +2308,9 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (rc)
goto free_cs_object;
+ if (q_type == QUEUE_TYPE_HW)
+ INIT_WORK(&cs->finish_work, cs_completion);
+
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
/* In case wait cs failed here, it means the signal cs
@@ -2321,12 +2441,12 @@ out:
}
static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
- enum hl_cs_wait_status *status, u64 timeout_us,
- s64 *timestamp)
+ enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp)
{
struct hl_device *hdev = ctx->hdev;
+ ktime_t timestamp_kt;
long completion_rc;
- int rc = 0;
+ int rc = 0, error;
if (IS_ERR(fence)) {
rc = PTR_ERR(fence);
@@ -2338,12 +2458,16 @@ static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence
}
if (!fence) {
- dev_dbg(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
+ if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, &timestamp_kt, &error)) {
+ dev_dbg(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
seq, ctx->cs_sequence);
+ *status = CS_WAIT_STATUS_GONE;
+ return 0;
+ }
- *status = CS_WAIT_STATUS_GONE;
- return 0;
+ completion_rc = 1;
+ goto report_results;
}
if (!timeout_us) {
@@ -2358,18 +2482,20 @@ static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence
&fence->completion, timeout);
}
+ error = fence->error;
+ timestamp_kt = fence->timestamp;
+
+report_results:
if (completion_rc > 0) {
*status = CS_WAIT_STATUS_COMPLETED;
if (timestamp)
- *timestamp = ktime_to_ns(fence->timestamp);
+ *timestamp = ktime_to_ns(timestamp_kt);
} else {
*status = CS_WAIT_STATUS_BUSY;
}
- if (fence->error == -ETIMEDOUT)
- rc = -ETIMEDOUT;
- else if (fence->error == -EIO)
- rc = -EIO;
+ if (error == -ETIMEDOUT || error == -EIO)
+ rc = error;
return rc;
}
@@ -2443,8 +2569,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
* function won't sleep as it is called with timeout 0 (i.e.
* poll the fence)
*/
- rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
- &status, 0, NULL);
+ rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
if (rc) {
dev_err(hdev->dev,
"wait_for_fence error :%d for CS seq %llu\n",
@@ -2482,7 +2607,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
* For this we have to validate that the timestamp is
* earliest of all timestamps so far.
*/
- if (mcs_data->update_ts &&
+ if (fence && mcs_data->update_ts &&
(ktime_compare(fence->timestamp, first_cs_time) < 0))
first_cs_time = fence->timestamp;
break;
@@ -2513,8 +2638,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
return rc;
}
-static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
- u64 timeout_us, u64 seq,
+static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp)
{
struct hl_fence *fence;
@@ -2815,8 +2939,7 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
s64 timestamp;
int rc;
- rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
- &status, &timestamp);
+ rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, &timestamp);
if (rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
@@ -2880,7 +3003,7 @@ static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
u64 current_cq_counter;
/* Validate ts_offset not exceeding last max */
- if (requested_offset_record > cb_last) {
+ if (requested_offset_record >= cb_last) {
dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
(u64)(uintptr_t)cb_last);
return -EINVAL;
@@ -2936,8 +3059,8 @@ start_over:
*pend = requested_offset_record;
- dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB(0x%llx)\n",
- (u64)(uintptr_t)requested_offset_record);
+ dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
+ requested_offset_record);
return 0;
}
@@ -2965,6 +3088,13 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
goto put_ctx;
}
+ /* Validate the cq offset */
+ if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >=
+ ((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) {
+ rc = -EINVAL;
+ goto put_cq_cb;
+ }
+
if (register_ts_record) {
dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
interrupt->interrupt_id, ts_offset, cq_counters_offset);
@@ -3094,7 +3224,6 @@ put_ctx:
static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
u64 timeout_us, u64 user_address,
u64 target_value, struct hl_user_interrupt *interrupt,
-
u32 *status,
u64 *timestamp)
{
@@ -3216,33 +3345,46 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
struct hl_user_interrupt *interrupt;
union hl_wait_cs_args *args = data;
u32 status = HL_WAIT_CS_STATUS_BUSY;
- u64 timestamp;
- int rc;
+ u64 timestamp = 0;
+ int rc, int_idx;
prop = &hdev->asic_prop;
- if (!prop->user_interrupt_count) {
+ if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) {
dev_err(hdev->dev, "no user interrupts allowed");
return -EPERM;
}
interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
- first_interrupt = prop->first_available_user_msix_interrupt;
- last_interrupt = prop->first_available_user_msix_interrupt +
- prop->user_interrupt_count - 1;
+ first_interrupt = prop->first_available_user_interrupt;
+ last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1;
+
+ if (interrupt_id < prop->user_dec_intr_count) {
+
+ /* Check if the requested core is enabled */
+ if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) {
+ dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed",
+ interrupt_id);
+ return -EINVAL;
+ }
+
+ interrupt = &hdev->user_interrupt[interrupt_id];
- if ((interrupt_id < first_interrupt || interrupt_id > last_interrupt) &&
- interrupt_id != HL_COMMON_USER_INTERRUPT_ID) {
+ } else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) {
+
+ int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count;
+ interrupt = &hdev->user_interrupt[int_idx];
+
+ } else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) {
+ interrupt = &hdev->common_user_cq_interrupt;
+ } else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) {
+ interrupt = &hdev->common_decoder_interrupt;
+ } else {
dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
return -EINVAL;
}
- if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
- interrupt = &hdev->common_user_interrupt;
- else
- interrupt = &hdev->user_interrupt[interrupt_id - first_interrupt];
-
if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
args->in.interrupt_timeout_us, args->in.cq_counters_handle,
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index ed2cfd0c6e99..2f4620b7990c 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -102,13 +102,13 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
hl_device_set_debug_mode(hdev, ctx, false);
hdev->asic_funcs->ctx_fini(ctx);
+
+ hl_dec_ctx_fini(ctx);
+
hl_cb_va_pool_fini(ctx);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
-
- /* Scrub both SRAM and DRAM */
- hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
} else {
dev_dbg(hdev->dev, "closing kernel context\n");
hdev->asic_funcs->ctx_fini(ctx);
@@ -125,15 +125,22 @@ void hl_ctx_do_release(struct kref *ref)
hl_ctx_fini(ctx);
- if (ctx->hpriv)
- hl_hpriv_put(ctx->hpriv);
+ if (ctx->hpriv) {
+ struct hl_fpriv *hpriv = ctx->hpriv;
+
+ mutex_lock(&hpriv->ctx_lock);
+ hpriv->ctx = NULL;
+ mutex_unlock(&hpriv->ctx_lock);
+
+ hl_hpriv_put(hpriv);
+ }
kfree(ctx);
}
int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
{
- struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
+ struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
struct hl_ctx *ctx;
int rc;
@@ -143,9 +150,9 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
goto out_err;
}
- mutex_lock(&mgr->ctx_lock);
- rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
- mutex_unlock(&mgr->ctx_lock);
+ mutex_lock(&ctx_mgr->lock);
+ rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
+ mutex_unlock(&ctx_mgr->lock);
if (rc < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
@@ -170,9 +177,9 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
return 0;
remove_from_idr:
- mutex_lock(&mgr->ctx_lock);
- idr_remove(&mgr->ctx_handles, ctx->handle);
- mutex_unlock(&mgr->ctx_lock);
+ mutex_lock(&ctx_mgr->lock);
+ idr_remove(&ctx_mgr->handles, ctx->handle);
+ mutex_unlock(&ctx_mgr->lock);
free_ctx:
kfree(ctx);
out_err:
@@ -181,7 +188,7 @@ out_err:
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
- int rc = 0;
+ int rc = 0, i;
ctx->hdev = hdev;
@@ -197,6 +204,13 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
if (!ctx->cs_pending)
return -ENOMEM;
+ INIT_LIST_HEAD(&ctx->outcome_store.used_list);
+ INIT_LIST_HEAD(&ctx->outcome_store.free_list);
+ hash_init(ctx->outcome_store.outcome_map);
+ for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
+ list_add(&ctx->outcome_store.nodes_pool[i].list_link,
+ &ctx->outcome_store.free_list);
+
hl_hw_block_mem_init(ctx);
if (is_kernel_ctx) {
@@ -262,6 +276,11 @@ err_hw_block_mem_fini:
return rc;
}
+static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
+{
+ return kref_get_unless_zero(&ctx->refcount);
+}
+
void hl_ctx_get(struct hl_ctx *ctx)
{
kref_get(&ctx->refcount);
@@ -280,11 +299,15 @@ struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
mutex_lock(&hdev->fpriv_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+ mutex_lock(&hpriv->ctx_lock);
+ ctx = hpriv->ctx;
+ if (ctx && !hl_ctx_get_unless_zero(ctx))
+ ctx = NULL;
+ mutex_unlock(&hpriv->ctx_lock);
+
/* There can only be a single user which has opened the compute device, so exit
- * immediately once we find him
+ * immediately once we find its context or if we see that it has been released
*/
- ctx = hpriv->ctx;
- hl_ctx_get(ctx);
break;
}
@@ -376,37 +399,37 @@ int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
/*
* hl_ctx_mgr_init - initialize the context manager
*
- * @mgr: pointer to context manager structure
+ * @ctx_mgr: pointer to context manager structure
*
* This manager is an object inside the hpriv object of the user process.
* The function is called when a user process opens the FD.
*/
-void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
+void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
{
- mutex_init(&mgr->ctx_lock);
- idr_init(&mgr->ctx_handles);
+ mutex_init(&ctx_mgr->lock);
+ idr_init(&ctx_mgr->handles);
}
/*
* hl_ctx_mgr_fini - finalize the context manager
*
* @hdev: pointer to device structure
- * @mgr: pointer to context manager structure
+ * @ctx_mgr: pointer to context manager structure
*
* This function goes over all the contexts in the manager and frees them.
* It is called when a process closes the FD.
*/
-void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
+void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
{
struct hl_ctx *ctx;
struct idr *idp;
u32 id;
- idp = &mgr->ctx_handles;
+ idp = &ctx_mgr->handles;
idr_for_each_entry(idp, ctx, id)
kref_put(&ctx->refcount, hl_ctx_do_release);
- idr_destroy(&mgr->ctx_handles);
- mutex_destroy(&mgr->ctx_lock);
+ idr_destroy(&ctx_mgr->handles);
+ mutex_destroy(&ctx_mgr->lock);
}
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index c6744bfc6da4..64439f33a19b 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -152,12 +152,12 @@ static int command_submission_show(struct seq_file *s, void *data)
if (first) {
first = false;
seq_puts(s, "\n");
- seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
- seq_puts(s, "------------------------------------------------------\n");
+ seq_puts(s, " CS ID CS TYPE CTX ASID CS RefCnt Submitted Completed\n");
+ seq_puts(s, "----------------------------------------------------------------\n");
}
seq_printf(s,
- " %llu %d %d %d %d\n",
- cs->sequence, cs->ctx->asid,
+ " %llu %d %d %d %d %d\n",
+ cs->sequence, cs->type, cs->ctx->asid,
kref_read(&cs->refcount),
cs->submitted, cs->completed);
}
@@ -183,17 +183,18 @@ static int command_submission_jobs_show(struct seq_file *s, void *data)
if (first) {
first = false;
seq_puts(s, "\n");
- seq_puts(s, " JOB ID CS ID CTX ASID JOB RefCnt H/W Queue\n");
- seq_puts(s, "----------------------------------------------------\n");
+ seq_puts(s, " JOB ID CS ID CS TYPE CTX ASID JOB RefCnt H/W Queue\n");
+ seq_puts(s, "---------------------------------------------------------------\n");
}
if (job->cs)
seq_printf(s,
- " %02d %llu %d %d %d\n",
- job->id, job->cs->sequence, job->cs->ctx->asid,
- kref_read(&job->refcount), job->hw_queue_id);
+ " %02d %llu %d %d %d %d\n",
+ job->id, job->cs->sequence, job->cs->type,
+ job->cs->ctx->asid, kref_read(&job->refcount),
+ job->hw_queue_id);
else
seq_printf(s,
- " %02d 0 %d %d %d\n",
+ " %02d 0 0 %d %d %d\n",
job->id, HL_KERNEL_ASID_ID,
kref_read(&job->refcount), job->hw_queue_id);
}
@@ -449,7 +450,7 @@ static int mmu_show(struct seq_file *s, void *data)
if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) {
dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
virt_addr);
- return 0;
+ goto put_ctx;
}
hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
@@ -475,6 +476,10 @@ static int mmu_show(struct seq_file *s, void *data)
i, hops_info.hop_info[i].hop_pte_val);
}
+put_ctx:
+ if (dev_entry->mmu_asid != HL_KERNEL_ASID_ID)
+ hl_ctx_put(ctx);
+
return 0;
}
@@ -521,6 +526,66 @@ err:
return -EINVAL;
}
+static int mmu_ack_error(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ int rc;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ if (!dev_entry->mmu_cap_mask) {
+ dev_err(hdev->dev, "mmu_cap_mask is not set\n");
+ goto err;
+ }
+
+ rc = hdev->asic_funcs->ack_mmu_errors(hdev, dev_entry->mmu_cap_mask);
+ if (rc)
+ goto err;
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static ssize_t mmu_ack_error_value_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[MMU_KBUF_SIZE];
+ ssize_t rc;
+
+ if (!hdev->mmu_enable)
+ return count;
+
+ if (count > sizeof(kbuf) - 1)
+ goto err;
+
+ if (copy_from_user(kbuf, buf, count))
+ goto err;
+
+ kbuf[count] = 0;
+
+ if (strncmp(kbuf, "0x", 2))
+ goto err;
+
+ rc = kstrtoull(kbuf, 16, &dev_entry->mmu_cap_mask);
+ if (rc)
+ goto err;
+
+ return count;
+err:
+ dev_err(hdev->dev, "usage: echo <0xmmu_cap_mask > > mmu_error\n");
+
+ return -EINVAL;
+}
+
static int engines_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
@@ -543,7 +608,7 @@ static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u64 val = entry->memory_scrub_val;
+ u64 val = hdev->memory_scrub_val;
int rc;
if (!hl_device_operational(hdev, NULL)) {
@@ -666,7 +731,8 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
dev_err(hdev->dev,
"virt addr 0x%llx is not mapped\n",
virt_addr);
- return -EINVAL;
+ rc = -EINVAL;
+ goto put_ctx;
}
rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
@@ -677,6 +743,9 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
rc = -EINVAL;
}
+put_ctx:
+ hl_ctx_put(ctx);
+
return rc;
}
@@ -695,8 +764,7 @@ static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
if (addr >= mem_reg->region_base &&
addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
*found = true;
- return hdev->asic_funcs->access_dev_mem(hdev, mem_reg, i,
- addr, val, acc_type);
+ return hdev->asic_funcs->access_dev_mem(hdev, i, addr, val, acc_type);
}
}
return 0;
@@ -728,7 +796,7 @@ static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
}
static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
- enum debugfs_access_type acc_type)
+ enum debugfs_access_type acc_type)
{
size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
sizeof(u64) : sizeof(u32);
@@ -1349,6 +1417,17 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_check_razwi_happened(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+
+ hdev->asic_funcs->check_if_razwi_happened(hdev);
+
+ return 0;
+}
+
static const struct file_operations hl_mem_scrub_fops = {
.owner = THIS_MODULE,
.write = hl_memory_scrub,
@@ -1438,6 +1517,11 @@ static const struct file_operations hl_timeout_locked_fops = {
.write = hl_timeout_locked_write
};
+static const struct file_operations hl_razwi_check_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_check_razwi_happened
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
@@ -1446,7 +1530,8 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"vm", vm_show, NULL},
{"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
{"mmu", mmu_show, mmu_asid_va_write},
- {"engines", engines_show, NULL}
+ {"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
+ {"engines", engines_show, NULL},
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
@@ -1477,6 +1562,53 @@ static const struct file_operations hl_debugfs_fops = {
.release = single_release,
};
+static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry)
+{
+ debugfs_create_u8("i2c_bus",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_bus);
+
+ debugfs_create_u8("i2c_addr",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_addr);
+
+ debugfs_create_u8("i2c_reg",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_reg);
+
+ debugfs_create_u8("i2c_len",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_len);
+
+ debugfs_create_file("i2c_data",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_i2c_data_fops);
+
+ debugfs_create_file("led0",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led0_fops);
+
+ debugfs_create_file("led1",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led1_fops);
+
+ debugfs_create_file("led2",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led2_fops);
+}
+
void hl_debugfs_add_device(struct hl_device *hdev)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
@@ -1516,7 +1648,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
debugfs_create_x64("memory_scrub_val",
0644,
dev_entry->root,
- &dev_entry->memory_scrub_val);
+ &hdev->memory_scrub_val);
debugfs_create_file("memory_scrub",
0200,
@@ -1547,50 +1679,6 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_power_fops);
- debugfs_create_u8("i2c_bus",
- 0644,
- dev_entry->root,
- &dev_entry->i2c_bus);
-
- debugfs_create_u8("i2c_addr",
- 0644,
- dev_entry->root,
- &dev_entry->i2c_addr);
-
- debugfs_create_u8("i2c_reg",
- 0644,
- dev_entry->root,
- &dev_entry->i2c_reg);
-
- debugfs_create_u8("i2c_len",
- 0644,
- dev_entry->root,
- &dev_entry->i2c_len);
-
- debugfs_create_file("i2c_data",
- 0644,
- dev_entry->root,
- dev_entry,
- &hl_i2c_data_fops);
-
- debugfs_create_file("led0",
- 0200,
- dev_entry->root,
- dev_entry,
- &hl_led0_fops);
-
- debugfs_create_file("led1",
- 0200,
- dev_entry->root,
- dev_entry,
- &hl_led1_fops);
-
- debugfs_create_file("led2",
- 0200,
- dev_entry->root,
- dev_entry,
- &hl_led2_fops);
-
debugfs_create_file("device",
0200,
dev_entry->root,
@@ -1615,6 +1703,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_security_violations_fops);
+ debugfs_create_file("dump_razwi_events",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_razwi_check_fops);
+
debugfs_create_file("dma_size",
0200,
dev_entry->root,
@@ -1663,6 +1757,9 @@ void hl_debugfs_add_device(struct hl_device *hdev)
entry->info_ent = &hl_debugfs_list[i];
entry->dev_entry = dev_entry;
}
+
+ if (!hdev->asic_prop.fw_security_enabled)
+ add_secured_nodes(dev_entry);
}
void hl_debugfs_remove_device(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/common/decoder.c b/drivers/misc/habanalabs/common/decoder.c
new file mode 100644
index 000000000000..2aab14d74b53
--- /dev/null
+++ b/drivers/misc/habanalabs/common/decoder.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#define VCMD_CONTROL_OFFSET 0x40 /* SWREG16 */
+#define VCMD_IRQ_STATUS_OFFSET 0x44 /* SWREG17 */
+
+#define VCMD_IRQ_STATUS_ENDCMD_MASK 0x1
+#define VCMD_IRQ_STATUS_BUSERR_MASK 0x2
+#define VCMD_IRQ_STATUS_TIMEOUT_MASK 0x4
+#define VCMD_IRQ_STATUS_CMDERR_MASK 0x8
+#define VCMD_IRQ_STATUS_ABORT_MASK 0x10
+#define VCMD_IRQ_STATUS_RESET_MASK 0x20
+
+static void dec_print_abnrm_intr_source(struct hl_device *hdev, u32 irq_status)
+{
+ const char *format = "abnormal interrupt source:%s%s%s%s%s%s\n";
+ char *intr_source[6] = {"Unknown", "", "", "", "", ""};
+ int i = 0;
+
+ if (!irq_status)
+ return;
+
+ if (irq_status & VCMD_IRQ_STATUS_ENDCMD_MASK)
+ intr_source[i++] = " ENDCMD";
+ if (irq_status & VCMD_IRQ_STATUS_BUSERR_MASK)
+ intr_source[i++] = " BUSERR";
+ if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
+ intr_source[i++] = " TIMEOUT";
+ if (irq_status & VCMD_IRQ_STATUS_CMDERR_MASK)
+ intr_source[i++] = " CMDERR";
+ if (irq_status & VCMD_IRQ_STATUS_ABORT_MASK)
+ intr_source[i++] = " ABORT";
+ if (irq_status & VCMD_IRQ_STATUS_RESET_MASK)
+ intr_source[i++] = " RESET";
+
+ dev_err(hdev->dev, format, intr_source[0], intr_source[1],
+ intr_source[2], intr_source[3], intr_source[4], intr_source[5]);
+}
+
+static void dec_error_intr_work(struct hl_device *hdev, u32 base_addr, u32 core_id)
+{
+ bool reset_required = false;
+ u32 irq_status;
+
+ irq_status = RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
+
+ dev_err(hdev->dev, "Decoder abnormal interrupt %#x, core %d\n", irq_status, core_id);
+
+ dec_print_abnrm_intr_source(hdev, irq_status);
+
+ if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
+ reset_required = true;
+
+ /* Clear the interrupt */
+ WREG32(base_addr + VCMD_IRQ_STATUS_OFFSET, irq_status);
+
+ /* Flush the interrupt clear */
+ RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
+
+ if (reset_required)
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
+}
+
+static void dec_completion_abnrm(struct work_struct *work)
+{
+ struct hl_dec *dec = container_of(work, struct hl_dec, completion_abnrm_work);
+ struct hl_device *hdev = dec->hdev;
+
+ dec_error_intr_work(hdev, dec->base_addr, dec->core_id);
+}
+
+void hl_dec_fini(struct hl_device *hdev)
+{
+ kfree(hdev->dec);
+}
+
+int hl_dec_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_dec *dec;
+ int rc, j;
+
+ /* if max core is 0, nothing to do*/
+ if (!prop->max_dec)
+ return 0;
+
+ hdev->dec = kcalloc(prop->max_dec, sizeof(struct hl_dec), GFP_KERNEL);
+ if (!hdev->dec)
+ return -ENOMEM;
+
+ for (j = 0 ; j < prop->max_dec ; j++) {
+ dec = hdev->dec + j;
+
+ dec->hdev = hdev;
+ INIT_WORK(&dec->completion_abnrm_work, dec_completion_abnrm);
+ dec->core_id = j;
+ dec->base_addr = hdev->asic_funcs->get_dec_base_addr(hdev, j);
+ if (!dec->base_addr) {
+ dev_err(hdev->dev, "Invalid base address of decoder %d\n", j);
+ rc = -EINVAL;
+ goto err_dec_fini;
+ }
+ }
+
+ return 0;
+
+err_dec_fini:
+ hl_dec_fini(hdev);
+
+ return rc;
+}
+
+void hl_dec_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_dec *dec;
+ int j;
+
+ for (j = 0 ; j < prop->max_dec ; j++) {
+ if (!!(prop->decoder_enabled_mask & BIT(j))) {
+ dec = hdev->dec + j;
+ /* Stop the decoder */
+ WREG32(dec->base_addr + VCMD_CONTROL_OFFSET, 0);
+ }
+ }
+}
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index b4f14c6d3970..b30aeb1c657f 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -15,6 +15,14 @@
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
+enum dma_alloc_type {
+ DMA_ALLOC_COHERENT,
+ DMA_ALLOC_CPU_ACCESSIBLE,
+ DMA_ALLOC_POOL,
+};
+
+#define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
+
/*
* hl_set_dram_bar- sets the bar to allow later access to address
*
@@ -44,7 +52,7 @@ static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val
enum debugfs_access_type acc_type, enum pci_region region_type)
{
struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
- u64 old_base, rc;
+ u64 old_base = 0, rc;
if (region_type == PCI_REGION_DRAM) {
old_base = hl_set_dram_bar(hdev, addr);
@@ -88,6 +96,75 @@ static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val
return 0;
}
+static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, enum dma_alloc_type alloc_type)
+{
+ void *ptr;
+
+ switch (alloc_type) {
+ case DMA_ALLOC_COHERENT:
+ ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
+ break;
+ case DMA_ALLOC_CPU_ACCESSIBLE:
+ ptr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+ break;
+ case DMA_ALLOC_POOL:
+ ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
+ break;
+ }
+
+ return ptr;
+}
+
+static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, enum dma_alloc_type alloc_type)
+{
+ switch (alloc_type) {
+ case DMA_ALLOC_COHERENT:
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
+ break;
+ case DMA_ALLOC_CPU_ACCESSIBLE:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, cpu_addr);
+ break;
+ case DMA_ALLOC_POOL:
+ hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
+ break;
+ }
+}
+
+void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT);
+}
+
+void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT);
+}
+
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
+{
+ return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+}
+
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
+{
+ hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+}
+
+void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+ dma_addr_t *dma_handle)
+{
+ return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL);
+}
+
+void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr)
+{
+ hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL);
+}
+
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -168,14 +245,13 @@ int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
* hl_access_dev_mem - access device memory
*
* @hdev: pointer to habanalabs device structure
- * @region: the memory region the address belongs to
* @region_type: the type of the region the address belongs to
* @addr: the address to access
* @val: the value to write from or read to
* @acc_type: the type of access (r/w, 32/64)
*/
-int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
- enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type)
+int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
+ u64 addr, u64 *val, enum debugfs_access_type acc_type)
{
switch (region_type) {
case PCI_REGION_CFG:
@@ -195,16 +271,20 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (hdev->reset_info.in_reset)
- status = HL_DEVICE_STATUS_IN_RESET;
- else if (hdev->reset_info.needs_reset)
+ if (hdev->reset_info.in_reset) {
+ if (hdev->reset_info.in_compute_reset)
+ status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;
+ else
+ status = HL_DEVICE_STATUS_IN_RESET;
+ } else if (hdev->reset_info.needs_reset) {
status = HL_DEVICE_STATUS_NEEDS_RESET;
- else if (hdev->disabled)
+ } else if (hdev->disabled) {
status = HL_DEVICE_STATUS_MALFUNCTION;
- else if (!hdev->init_done)
+ } else if (!hdev->init_done) {
status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
- else
+ } else {
status = HL_DEVICE_STATUS_OPERATIONAL;
+ }
return status;
}
@@ -220,6 +300,7 @@ bool hl_device_operational(struct hl_device *hdev,
switch (current_status) {
case HL_DEVICE_STATUS_IN_RESET:
+ case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
case HL_DEVICE_STATUS_MALFUNCTION:
case HL_DEVICE_STATUS_NEEDS_RESET:
return false;
@@ -245,6 +326,7 @@ static void hpriv_release(struct kref *ref)
hl_debugfs_remove_file(hpriv);
+ mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
if ((!hdev->pldm) && (hdev->pdev) &&
@@ -271,9 +353,14 @@ static void hpriv_release(struct kref *ref)
list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_list_lock);
- if ((hdev->reset_if_device_not_idle && !device_is_idle)
- || hdev->reset_upon_device_release)
+ if (!device_is_idle || hdev->reset_upon_device_release) {
hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
+ } else {
+ int rc = hdev->asic_funcs->scrub_device_mem(hdev);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);
+ }
/* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
* thread, we don't care because the in_reset is marked so if a user will try to open
@@ -330,8 +417,8 @@ static int hl_device_release(struct inode *inode, struct file *filp)
*/
hl_release_pending_user_interrupts(hpriv->hdev);
- hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
+ hl_mem_mgr_fini(&hpriv->mem_mgr);
hdev->compute_ctx_in_release = 1;
@@ -379,7 +466,7 @@ out:
* @*filp: pointer to file structure
* @*vma: pointer to vm_area_struct of the process
*
- * Called when process does an mmap on habanalabs device. Call the device's mmap
+ * Called when process does an mmap on habanalabs device. Call the relevant mmap
* function at the end of the common code.
*/
static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -404,7 +491,6 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
case HL_MMAP_TYPE_TS_BUFF:
return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
}
-
return -EINVAL;
}
@@ -563,6 +649,14 @@ static int device_early_init(struct hl_device *hdev)
gaudi_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
break;
+ case ASIC_GAUDI2:
+ gaudi2_set_asic_funcs(hdev);
+ strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
+ break;
+ case ASIC_GAUDI2_SEC:
+ gaudi2_set_asic_funcs(hdev);
+ strscpy(hdev->asic_name, "GAUDI2 SEC", sizeof(hdev->asic_name));
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -604,12 +698,20 @@ static int device_early_init(struct hl_device *hdev)
goto free_cq_wq;
}
+ hdev->cs_cmplt_wq = alloc_workqueue("hl-cs-completions", WQ_UNBOUND, 0);
+ if (!hdev->cs_cmplt_wq) {
+ dev_err(hdev->dev,
+ "Failed to allocate CS completions workqueue\n");
+ rc = -ENOMEM;
+ goto free_eq_wq;
+ }
+
hdev->ts_free_obj_wq = alloc_workqueue("hl-ts-free-obj", WQ_UNBOUND, 0);
if (!hdev->ts_free_obj_wq) {
dev_err(hdev->dev,
"Failed to allocate Timestamp registration free workqueue\n");
rc = -ENOMEM;
- goto free_eq_wq;
+ goto free_cs_cmplt_wq;
}
hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
@@ -666,6 +768,8 @@ free_pf_wq:
destroy_workqueue(hdev->pf_wq);
free_ts_free_wq:
destroy_workqueue(hdev->ts_free_obj_wq);
+free_cs_cmplt_wq:
+ destroy_workqueue(hdev->cs_cmplt_wq);
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
@@ -706,6 +810,7 @@ static void device_early_fini(struct hl_device *hdev)
destroy_workqueue(hdev->pf_wq);
destroy_workqueue(hdev->ts_free_obj_wq);
+ destroy_workqueue(hdev->cs_cmplt_wq);
destroy_workqueue(hdev->eq_wq);
destroy_workqueue(hdev->device_reset_work.wq);
@@ -1159,8 +1264,7 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
* of heartbeat, the device CPU is marked as disable
* so this message won't be sent
*/
- if (hl_fw_send_pci_access_msg(hdev,
- CPUCP_PACKET_DISABLE_PCI_ACCESS))
+ if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
dev_warn(hdev->dev,
"Failed to disable PCI access by F/W\n");
}
@@ -1202,7 +1306,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
skip_wq_flush = !!(flags & HL_DRV_RESET_DEV_RELEASE);
delay_reset = !!(flags & HL_DRV_RESET_DELAY);
- if (!hard_reset && !hdev->asic_prop.supports_soft_reset) {
+ if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
hard_instead_soft = true;
hard_reset = true;
}
@@ -1225,7 +1329,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
}
if (hard_instead_soft)
- dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
+ dev_dbg(hdev->dev, "Doing hard-reset instead of compute reset\n");
do_reset:
/* Re-entry of reset thread */
@@ -1241,13 +1345,20 @@ do_reset:
/* Block future CS/VM/JOB completion operations */
spin_lock(&hdev->reset_info.lock);
if (hdev->reset_info.in_reset) {
- /* We only allow scheduling of a hard reset during soft reset */
- if (hard_reset && hdev->reset_info.is_in_soft_reset)
+ /* We only allow scheduling of a hard reset during compute reset */
+ if (hard_reset && hdev->reset_info.in_compute_reset)
hdev->reset_info.hard_reset_schedule_flags = flags;
spin_unlock(&hdev->reset_info.lock);
return 0;
}
+
+ /* This still allows the completion of some KDMA ops
+ * Update this before in_reset because in_compute_reset implies we are in reset
+ */
+ hdev->reset_info.in_compute_reset = !hard_reset;
+
hdev->reset_info.in_reset = 1;
+
spin_unlock(&hdev->reset_info.lock);
if (delay_reset)
@@ -1255,9 +1366,6 @@ do_reset:
handle_reset_trigger(hdev, flags);
- /* This still allows the completion of some KDMA ops */
- hdev->reset_info.is_in_soft_reset = !hard_reset;
-
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
@@ -1445,7 +1553,8 @@ kill_processes:
goto out_err;
}
- hl_fw_set_max_power(hdev);
+ if (!hdev->asic_prop.fw_security_enabled)
+ hl_fw_set_max_power(hdev);
} else {
rc = hdev->asic_funcs->non_hard_reset_late_init(hdev);
if (rc) {
@@ -1453,13 +1562,19 @@ kill_processes:
dev_err(hdev->dev,
"Failed late init in reset after device release\n");
else
- dev_err(hdev->dev, "Failed late init after soft reset\n");
+ dev_err(hdev->dev, "Failed late init after compute reset\n");
goto out_err;
}
}
+ rc = hdev->asic_funcs->scrub_device_mem(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
+ return rc;
+ }
+
spin_lock(&hdev->reset_info.lock);
- hdev->reset_info.is_in_soft_reset = false;
+ hdev->reset_info.in_compute_reset = 0;
/* Schedule hard reset only if requested and if not already in hard reset.
* We keep 'in_reset' enabled, so no other reset can go in during the hard
@@ -1489,11 +1604,11 @@ kill_processes:
*/
hdev->asic_funcs->enable_events_from_fw(hdev);
} else if (!reset_upon_device_release) {
- hdev->reset_info.soft_reset_cnt++;
+ hdev->reset_info.compute_reset_cnt++;
}
if (schedule_hard_reset) {
- dev_info(hdev->dev, "Performing hard reset scheduled during soft reset\n");
+ dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
flags = hdev->reset_info.hard_reset_schedule_flags;
hdev->reset_info.hard_reset_schedule_flags = 0;
hdev->disabled = true;
@@ -1506,20 +1621,24 @@ kill_processes:
out_err:
hdev->disabled = true;
- hdev->reset_info.is_in_soft_reset = false;
+
+ spin_lock(&hdev->reset_info.lock);
+ hdev->reset_info.in_compute_reset = 0;
if (hard_reset) {
dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
hdev->reset_info.hard_reset_cnt++;
} else if (reset_upon_device_release) {
+ spin_unlock(&hdev->reset_info.lock);
dev_err(hdev->dev, "Failed to reset device after user release\n");
flags |= HL_DRV_RESET_HARD;
flags &= ~HL_DRV_RESET_DEV_RELEASE;
hard_reset = true;
goto again;
} else {
- dev_err(hdev->dev, "Failed to do soft-reset\n");
- hdev->reset_info.soft_reset_cnt++;
+ spin_unlock(&hdev->reset_info.lock);
+ dev_err(hdev->dev, "Failed to do compute reset\n");
+ hdev->reset_info.compute_reset_cnt++;
flags |= HL_DRV_RESET_HARD;
hard_reset = true;
goto again;
@@ -1527,13 +1646,16 @@ out_err:
hdev->reset_info.in_reset = 0;
+ spin_unlock(&hdev->reset_info.lock);
+
return rc;
}
-static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event)
+static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
{
mutex_lock(&notifier_event->lock);
- notifier_event->events_mask |= event;
+ notifier_event->events_mask |= event_mask;
+
if (notifier_event->eventfd)
eventfd_signal(notifier_event->eventfd, 1);
@@ -1544,17 +1666,17 @@ static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64
* hl_notifier_event_send_all - notify all user processes via eventfd
*
* @hdev: pointer to habanalabs device structure
- * @event: the occurred event
+ * @event_mask: the occurred event/s
* Returns 0 for success or an error on failure.
*/
-void hl_notifier_event_send_all(struct hl_device *hdev, u64 event)
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
{
struct hl_fpriv *hpriv;
mutex_lock(&hdev->fpriv_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
- hl_notifier_event_send(&hpriv->notifier_event, event);
+ hl_notifier_event_send(&hpriv->notifier_event, event_mask);
mutex_unlock(&hdev->fpriv_list_lock);
@@ -1562,7 +1684,7 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event)
mutex_lock(&hdev->fpriv_ctrl_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node)
- hl_notifier_event_send(&hpriv->notifier_event, event);
+ hl_notifier_event_send(&hpriv->notifier_event, event_mask);
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
}
@@ -1617,13 +1739,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto free_dev_ctrl;
- user_interrupt_cnt = hdev->asic_prop.user_interrupt_count;
+ user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
+ hdev->asic_prop.user_interrupt_count;
if (user_interrupt_cnt) {
- hdev->user_interrupt = kcalloc(user_interrupt_cnt,
- sizeof(*hdev->user_interrupt),
- GFP_KERNEL);
-
+ hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),
+ GFP_KERNEL);
if (!hdev->user_interrupt) {
rc = -ENOMEM;
goto early_fini;
@@ -1636,7 +1757,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
*/
rc = hdev->asic_funcs->sw_init(hdev);
if (rc)
- goto user_interrupts_fini;
+ goto free_usr_intr_mem;
/* initialize completion structure for multi CS wait */
@@ -1684,6 +1805,13 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->completion_queue[i].cq_idx = i;
}
+ hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
+ sizeof(*hdev->shadow_cs_queue), GFP_KERNEL);
+ if (!hdev->shadow_cs_queue) {
+ rc = -ENOMEM;
+ goto cq_fini;
+ }
+
/*
* Initialize the event queue. Must be done before hw_init,
* because there the address of the event queue is being
@@ -1692,7 +1820,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
rc = hl_eq_init(hdev, &hdev->event_queue);
if (rc) {
dev_err(hdev->dev, "failed to initialize event queue\n");
- goto cq_fini;
+ goto free_shadow_cs_queue;
}
/* MMU S/W must be initialized before kernel context is created */
@@ -1713,6 +1841,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->asic_funcs->state_dump_init(hdev);
+ hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
hl_debugfs_add_device(hdev);
/* debugfs nodes are created in hl_ctx_init so it must be called after
@@ -1731,6 +1860,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto release_ctx;
}
+ rc = hl_dec_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize the decoder module\n");
+ goto cb_pool_fini;
+ }
+
/*
* From this point, override rc (=0) in case of an error to allow
* debugging (by adding char devices and create sysfs nodes as part of
@@ -1794,7 +1929,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
/* Need to call this again because the max power might change,
* depending on card type for certain ASICs
*/
- if (hdev->asic_prop.set_max_power_on_device_init)
+ if (hdev->asic_prop.set_max_power_on_device_init &&
+ !hdev->asic_prop.fw_security_enabled)
hl_fw_set_max_power(hdev);
/*
@@ -1824,6 +1960,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
return 0;
+cb_pool_fini:
+ hl_cb_pool_fini(hdev);
release_ctx:
if (hl_ctx_put(hdev->kernel_ctx) != 1)
dev_err(hdev->dev,
@@ -1834,6 +1972,8 @@ mmu_fini:
hl_mmu_fini(hdev);
eq_fini:
hl_eq_fini(hdev, &hdev->event_queue);
+free_shadow_cs_queue:
+ kfree(hdev->shadow_cs_queue);
cq_fini:
for (i = 0 ; i < cq_ready_cnt ; i++)
hl_cq_fini(hdev, &hdev->completion_queue[i]);
@@ -1842,7 +1982,7 @@ hw_queues_destroy:
hl_hw_queues_destroy(hdev);
sw_fini:
hdev->asic_funcs->sw_fini(hdev);
-user_interrupts_fini:
+free_usr_intr_mem:
kfree(hdev->user_interrupt);
early_fini:
device_early_fini(hdev);
@@ -1928,7 +2068,7 @@ void hl_device_fini(struct hl_device *hdev)
* message won't be send. Also, in case of heartbeat, the device CPU is
* marked as disable so this message won't be sent
*/
- hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
/* Mark device as disabled */
hdev->disabled = true;
@@ -1974,12 +2114,16 @@ void hl_device_fini(struct hl_device *hdev)
hl_debugfs_remove_device(hdev);
+ hl_dec_fini(hdev);
+
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
hl_eq_fini(hdev, &hdev->event_queue);
+ kfree(hdev->shadow_cs_queue);
+
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_fini(hdev, &hdev->completion_queue[i]);
kfree(hdev->completion_queue);
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 828a36af5b14..608ca67527a5 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -15,6 +15,14 @@
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
+struct fw_binning_conf {
+ u64 tpc_binning;
+ u32 dec_binning;
+ u32 hbm_binning;
+ u32 edma_binning;
+ u32 mme_redundancy;
+};
+
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
@@ -33,7 +41,7 @@ static char *extract_fw_ver_from_str(const char *fw_str)
ver_offset = str - fw_str;
/* Copy until the next whitespace */
- whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
+ whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
if (!whitespace)
goto free_fw_ver;
@@ -46,6 +54,43 @@ free_fw_ver:
return NULL;
}
+static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
+{
+ char major[8], minor[8], *first_dot, *second_dot;
+ int rc;
+
+ first_dot = strnstr(preboot_ver, ".", 10);
+ if (first_dot) {
+ strscpy(major, preboot_ver, first_dot - preboot_ver + 1);
+ rc = kstrtou32(major, 10, &hdev->fw_major_version);
+ } else {
+ rc = -EINVAL;
+ }
+
+ if (rc) {
+ dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
+ goto out;
+ }
+
+ /* skip the first dot */
+ first_dot++;
+
+ second_dot = strnstr(first_dot, ".", 10);
+ if (second_dot) {
+ strscpy(minor, first_dot, second_dot - first_dot + 1);
+ rc = kstrtou32(minor, 10, &hdev->fw_minor_version);
+ } else {
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ dev_err(hdev->dev, "Error %d parsing preboot minor version\n", rc);
+
+out:
+ kfree(preboot_ver);
+ return rc;
+}
+
static int hl_request_fw(struct hl_device *hdev,
const struct firmware **firmware_p,
const char *fw_name)
@@ -197,14 +242,14 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
return rc;
}
-int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
+int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
{
struct cpucp_packet pkt = {};
pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(value);
- return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
- sizeof(pkt), 0, NULL);
+ return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
}
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
@@ -218,8 +263,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u32 tmp, expected_ack_val, pi;
int rc;
- pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
- &pkt_dma_addr);
+ pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
if (!pkt) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for packet to CPU\n");
@@ -231,7 +275,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
mutex_lock(&hdev->send_cpu_message_lock);
/* CPU-CP messages can be sent during soft-reset */
- if (hdev->disabled && !hdev->reset_info.is_in_soft_reset) {
+ if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
rc = 0;
goto out;
}
@@ -267,7 +311,14 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
+ /* If FW performed reset just before sending it a packet, we will get a timeout.
+ * This is expected behavior, hence no need for error message.
+ */
+ if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
+ dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
+ tmp);
+ else
+ dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
hdev->device_cpu_disabled = true;
goto out;
}
@@ -276,11 +327,15 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
if (rc) {
- dev_err(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
- rc,
- (tmp & CPUCP_PKT_CTL_OPCODE_MASK)
- >> CPUCP_PKT_CTL_OPCODE_SHIFT);
+ dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
+ rc, (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+ /* propagate the return code from the f/w to the callers who want to check it */
+ if (result)
+ *result = rc;
+
rc = -EIO;
+
} else if (result) {
*result = le64_to_cpu(pkt->result);
}
@@ -296,7 +351,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
out:
mutex_unlock(&hdev->send_cpu_message_lock);
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
+ hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
return rc;
}
@@ -517,6 +572,11 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
}
+ if (err_val & CPU_BOOT_ERR0_BINNING_FAIL) {
+ dev_err(hdev->dev, "Device boot error - binning failure\n");
+ err_exists = true;
+ }
+
if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
@@ -637,10 +697,8 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
u64 result;
int rc;
- cpucp_info_cpu_addr =
- hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- sizeof(struct cpucp_info),
- &cpucp_info_dma_addr);
+ cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
+ &cpucp_info_dma_addr);
if (!cpucp_info_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP info packet\n");
@@ -701,8 +759,7 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- sizeof(struct cpucp_info), cpucp_info_cpu_addr);
+ hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
return rc;
}
@@ -785,9 +842,8 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
u64 result;
int rc;
- eeprom_info_cpu_addr =
- hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- max_size, &eeprom_info_dma_addr);
+ eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
+ &eeprom_info_dma_addr);
if (!eeprom_info_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
@@ -815,8 +871,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
- eeprom_info_cpu_addr);
+ hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
return rc;
}
@@ -833,8 +888,7 @@ int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
int i, rc;
data_size = sizeof(struct cpucp_monitor_dump);
- mon_dump_cpu_addr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, data_size,
- &mon_dump_dma_addr);
+ mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
if (!mon_dump_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
@@ -864,7 +918,7 @@ int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
}
out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
+ hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
return rc;
}
@@ -1057,10 +1111,9 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
u64 result;
int rc;
- cpucp_repl_rows_info_cpu_addr =
- hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- sizeof(struct cpucp_hbm_row_info),
- &cpucp_repl_rows_info_dma_addr);
+ cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
+ sizeof(struct cpucp_hbm_row_info),
+ &cpucp_repl_rows_info_dma_addr);
if (!cpucp_repl_rows_info_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
@@ -1085,9 +1138,8 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- sizeof(struct cpucp_hbm_row_info),
- cpucp_repl_rows_info_cpu_addr);
+ hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
+ cpucp_repl_rows_info_cpu_addr);
return rc;
}
@@ -1234,15 +1286,10 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
}
}
-static int hl_fw_read_preboot_caps(struct hl_device *hdev,
- u32 cpu_boot_status_reg,
- u32 sts_boot_dev_sts0_reg,
- u32 sts_boot_dev_sts1_reg,
- u32 boot_err0_reg, u32 boot_err1_reg,
- u32 timeout)
+static int hl_fw_wait_preboot_ready(struct hl_device *hdev)
{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 status, reg_val;
+ struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
+ u32 status;
int rc;
/* Need to check two possible scenarios:
@@ -1255,13 +1302,13 @@ static int hl_fw_read_preboot_caps(struct hl_device *hdev,
*/
rc = hl_poll_timeout(
hdev,
- cpu_boot_status_reg,
+ pre_fw_load->cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
hdev->fw_poll_interval_usec,
- timeout);
+ pre_fw_load->wait_for_preboot_timeout);
if (rc) {
dev_err(hdev->dev, "CPU boot ready status timeout\n");
@@ -1271,12 +1318,32 @@ static int hl_fw_read_preboot_caps(struct hl_device *hdev,
* of reading specific errors
*/
if (status != -1)
- fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
- sts_boot_dev_sts0_reg,
- sts_boot_dev_sts1_reg);
+ fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
+ pre_fw_load->boot_err1_reg,
+ pre_fw_load->sts_boot_dev_sts0_reg,
+ pre_fw_load->sts_boot_dev_sts1_reg);
return -EIO;
}
+ hdev->fw_loader.fw_comp_loaded |= FW_TYPE_PREBOOT_CPU;
+
+ return 0;
+}
+
+static int hl_fw_read_preboot_caps(struct hl_device *hdev)
+{
+ struct pre_fw_load_props *pre_fw_load;
+ struct asic_fixed_properties *prop;
+ u32 reg_val;
+ int rc;
+
+ prop = &hdev->asic_prop;
+ pre_fw_load = &hdev->fw_loader.pre_fw_load;
+
+ rc = hl_fw_wait_preboot_ready(hdev);
+ if (rc)
+ return rc;
+
/*
* the registers DEV_STS* contain FW capabilities/features.
* We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
@@ -1287,13 +1354,13 @@ static int hl_fw_read_preboot_caps(struct hl_device *hdev,
* In case it is not enabled the stored value will be left 0- all
* caps/features are off
*/
- reg_val = RREG32(sts_boot_dev_sts0_reg);
+ reg_val = RREG32(pre_fw_load->sts_boot_dev_sts0_reg);
if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
prop->fw_cpu_boot_dev_sts0_valid = true;
prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
}
- reg_val = RREG32(sts_boot_dev_sts1_reg);
+ reg_val = RREG32(pre_fw_load->sts_boot_dev_sts1_reg);
if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
prop->fw_cpu_boot_dev_sts1_valid = true;
prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
@@ -1436,24 +1503,21 @@ static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
return 0;
}
-int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
- u32 sts_boot_dev_sts0_reg,
- u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
- u32 boot_err1_reg, u32 timeout)
+int hl_fw_read_preboot_status(struct hl_device *hdev)
{
int rc;
if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
+ /* get FW pre-load parameters */
+ hdev->asic_funcs->init_firmware_preload_params(hdev);
+
/*
* In order to determine boot method (static VS dymanic) we need to
* read the boot caps register
*/
- rc = hl_fw_read_preboot_caps(hdev, cpu_boot_status_reg,
- sts_boot_dev_sts0_reg,
- sts_boot_dev_sts1_reg, boot_err0_reg,
- boot_err1_reg, timeout);
+ rc = hl_fw_read_preboot_caps(hdev);
if (rc)
return rc;
@@ -1989,18 +2053,14 @@ static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
if (preboot_ver) {
- char major[8];
int rc;
dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
- sprintf(major, "%.2s", preboot_ver);
- kfree(preboot_ver);
- rc = kstrtou32(major, 10, &hdev->fw_major_version);
- if (rc) {
- dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
+ /* This function takes care of freeing preboot_ver */
+ rc = extract_fw_sub_versions(hdev, preboot_ver);
+ if (rc)
return rc;
- }
}
break;
@@ -2361,6 +2421,19 @@ static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
case HL_COMMS_RESET_CAUSE_TYPE:
msg.reset_cause = *(__u8 *) data;
break;
+
+ case HL_COMMS_BINNING_CONF_TYPE:
+ {
+ struct fw_binning_conf *binning_conf = (struct fw_binning_conf *) data;
+
+ msg.tpc_binning_conf = cpu_to_le64(binning_conf->tpc_binning);
+ msg.dec_binning_conf = cpu_to_le32(binning_conf->dec_binning);
+ msg.hbm_binning_conf = cpu_to_le32(binning_conf->hbm_binning);
+ msg.edma_binning_conf = cpu_to_le32(binning_conf->edma_binning);
+ msg.mme_redundancy_conf = cpu_to_le32(binning_conf->mme_redundancy);
+ break;
+ }
+
default:
dev_err(hdev->dev,
"Send COMMS message - invalid message type %u\n",
@@ -2418,7 +2491,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
int rc;
dev_info(hdev->dev,
- "Loading firmware to device, may take some time...\n");
+ "Loading %sfirmware to device, may take some time...\n",
+ hdev->asic_prop.fw_security_enabled ? "secured " : "");
/* initialize FW descriptor as invalid */
fw_loader->dynamic_loader.fw_desc_valid = false;
@@ -2429,6 +2503,13 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
*/
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
+ /* if no preboot loaded indication- wait for preboot */
+ if (!(hdev->fw_loader.fw_comp_loaded & FW_TYPE_PREBOOT_CPU)) {
+ rc = hl_fw_wait_preboot_ready(hdev);
+ if (rc)
+ return -EIO;
+ }
+
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
0, true,
fw_loader->cpu_timeout);
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index b0b0f3f89865..d59bba9e55c9 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -31,6 +31,9 @@
#define HL_NAME "habanalabs"
+struct hl_device;
+struct hl_fpriv;
+
/* Use upper bits of mmap offset to store habana driver specific information.
* bits[63:59] - Encode mmap type
* bits[45:0] - mmap offset value
@@ -69,9 +72,12 @@
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
-#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
+#define HL_SIM_MAX_TIMEOUT_US 100000000 /* 100s */
-#define HL_COMMON_USER_INTERRUPT_ID 0xFFF
+#define HL_INVALID_QUEUE UINT_MAX
+
+#define HL_COMMON_USER_CQ_INTERRUPT_ID 0xFFF
+#define HL_COMMON_DEC_INTERRUPT_ID 0xFFE
#define HL_STATE_DUMP_HIST_LEN 5
@@ -99,6 +105,18 @@ enum hl_mmu_page_table_location {
MMU_NUM_PGT_LOCATIONS /* num of PGT locations */
};
+/**
+ * enum hl_mmu_enablement - what mmu modules to enable
+ * @MMU_EN_NONE: mmu disabled.
+ * @MMU_EN_ALL: enable all.
+ * @MMU_EN_PMMU_ONLY: Enable only the PMMU leaving the DMMU disabled.
+ */
+enum hl_mmu_enablement {
+ MMU_EN_NONE = 0,
+ MMU_EN_ALL = 1,
+ MMU_EN_PMMU_ONLY = 3, /* N/A for Goya/Gaudi */
+};
+
/*
* HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
* HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
@@ -118,7 +136,12 @@ enum hl_mmu_page_table_location {
#define HL_PCI_NUM_BARS 6
-#define HL_MAX_DCORES 4
+/* Completion queue entry relates to completed job */
+#define HL_COMPLETION_MODE_JOB 0
+/* Completion queue entry relates to completed command submission */
+#define HL_COMPLETION_MODE_CS 1
+
+#define HL_MAX_DCORES 8
/*
* Reset Flags
@@ -159,6 +182,51 @@ enum hl_mmu_page_table_location {
#define HL_DRV_RESET_FW_FATAL_ERR (1 << 6)
#define HL_DRV_RESET_DELAY (1 << 7)
+/*
+ * Security
+ */
+
+#define HL_PB_SHARED 1
+#define HL_PB_NA 0
+#define HL_PB_SINGLE_INSTANCE 1
+#define HL_BLOCK_SIZE 0x1000
+#define HL_BLOCK_GLBL_ERR_MASK 0xF40
+#define HL_BLOCK_GLBL_ERR_ADDR 0xF44
+#define HL_BLOCK_GLBL_ERR_CAUSE 0xF48
+#define HL_BLOCK_GLBL_SEC_OFFS 0xF80
+#define HL_BLOCK_GLBL_SEC_SIZE (HL_BLOCK_SIZE - HL_BLOCK_GLBL_SEC_OFFS)
+#define HL_BLOCK_GLBL_SEC_LEN (HL_BLOCK_GLBL_SEC_SIZE / sizeof(u32))
+#define UNSET_GLBL_SEC_BIT(array, b) ((array)[((b) / 32)] |= (1 << ((b) % 32)))
+
+enum hl_protection_levels {
+ SECURED_LVL,
+ PRIVILEGED_LVL,
+ NON_SECURED_LVL
+};
+
+/**
+ * struct iterate_module_ctx - HW module iterator
+ * @fn: function to apply to each HW module instance
+ * @data: optional internal data to the function iterator
+ */
+struct iterate_module_ctx {
+ /*
+ * callback for the HW module iterator
+ * @hdev: pointer to the habanalabs device structure
+ * @block: block (ASIC specific definition can be dcore/hdcore)
+ * @inst: HW module instance within the block
+ * @offset: current HW module instance offset from the 1-st HW module instance
+ * in the 1-st block
+ * @data: function specific data
+ */
+ void (*fn)(struct hl_device *hdev, int block, int inst, u32 offset, void *data);
+ void *data;
+};
+
+struct hl_block_glbl_sec {
+ u32 sec_array[HL_BLOCK_GLBL_SEC_LEN];
+};
+
#define HL_MAX_SOBS_PER_MONITOR 8
/**
@@ -183,28 +251,32 @@ struct hl_gen_wait_properties {
/**
* struct pgt_info - MMU hop page info.
- * @node: hash linked-list node for the pgts shadow hash of pgts.
+ * @node: hash linked-list node for the pgts on host (shadow pgts for device resident MMU and
+ * actual pgts for host resident MMU).
* @phys_addr: physical address of the pgt.
- * @shadow_addr: shadow hop in the host.
+ * @virt_addr: host virtual address of the pgt (see above device/host resident).
+ * @shadow_addr: shadow hop in the host for device resident MMU.
* @ctx: pointer to the owner ctx.
- * @num_of_ptes: indicates how many ptes are used in the pgt.
+ * @num_of_ptes: indicates how many ptes are used in the pgt. used only for dynamically
+ * allocated HOPs (all HOPs but HOP0)
*
- * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
- * is needed during mapping, a new page is allocated and this structure holds
- * its essential information. During unmapping, if no valid PTEs remained in the
- * page, it is freed with its pgt_info structure.
+ * The MMU page tables hierarchy can be placed either on the device's DRAM (in which case shadow
+ * pgts will be stored on host memory) or on host memory (in which case no shadow is required).
+ *
+ * When a new level (hop) is needed during mapping this structure will be used to describe
+ * the newly allocated hop as well as to track number of PTEs in it.
+ * During unmapping, if no valid PTEs remained in the page of a newly allocated hop, it is
+ * freed with its pgt_info structure.
*/
struct pgt_info {
struct hlist_node node;
u64 phys_addr;
+ u64 virt_addr;
u64 shadow_addr;
struct hl_ctx *ctx;
int num_of_ptes;
};
-struct hl_device;
-struct hl_fpriv;
-
/**
* enum hl_pci_match_mode - pci match mode per region
* @PCI_ADDRESS_MATCH_MODE: address match mode
@@ -337,21 +409,23 @@ enum hl_collective_mode {
/**
* struct hw_queue_properties - queue information.
* @type: queue type.
- * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB
- * that allocated by the Kernel driver and therefore,
- * a CB handle can be provided for jobs on this queue.
- * Otherwise, a CB address must be provided.
+ * @cb_alloc_flags: bitmap which indicates if the hw queue supports CB
+ * that allocated by the Kernel driver and therefore,
+ * a CB handle can be provided for jobs on this queue.
+ * Otherwise, a CB address must be provided.
* @collective_mode: collective mode of current queue
* @driver_only: true if only the driver is allowed to send a job to this queue,
* false otherwise.
+ * @binned: True if the queue is binned out and should not be used
* @supports_sync_stream: True if queue supports sync stream
*/
struct hw_queue_properties {
- enum hl_queue_type type;
- enum queue_cb_alloc_flags cb_alloc_flags;
- enum hl_collective_mode collective_mode;
- u8 driver_only;
- u8 supports_sync_stream;
+ enum hl_queue_type type;
+ enum queue_cb_alloc_flags cb_alloc_flags;
+ enum hl_collective_mode collective_mode;
+ u8 driver_only;
+ u8 binned;
+ u8 supports_sync_stream;
};
/**
@@ -401,6 +475,8 @@ enum hl_device_hw_state {
* @hop_masks: array holds HOPs masks.
* @last_mask: mask to get the bit indicating this is the last hop.
* @pgt_size: size for page tables.
+ * @supported_pages_mask: bitmask for supported page size (relevant only for MMUs
+ * supporting multiple page size).
* @page_size: default page size used to allocate memory.
* @num_hops: The amount of hops supported by the translation table.
* @hop_table_size: HOP table size.
@@ -415,6 +491,7 @@ struct hl_mmu_properties {
u64 hop_masks[MMU_HOP_MAX];
u64 last_mask;
u64 pgt_size;
+ u64 supported_pages_mask;
u32 page_size;
u32 num_hops;
u32 hop_table_size;
@@ -455,7 +532,7 @@ struct hl_hints_range {
* @dram_user_base_address: DRAM physical start address for user access.
* @dram_size: DRAM total size.
* @dram_pci_bar_size: size of PCI bar towards DRAM.
- * @max_power_default: max power of the device after reset
+ * @max_power_default: max power of the device after reset.
* @dc_power_default: power consumed by the device in mode idle.
* @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
* fault.
@@ -463,12 +540,19 @@ struct hl_hints_range {
* @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
* @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
* @mmu_dram_default_page_addr: DRAM default page physical address.
+ * @tpc_enabled_mask: which TPCs are enabled.
+ * @tpc_binning_mask: which TPCs are binned. 0 means usable and 1 means binned.
+ * @dram_enabled_mask: which DRAMs are enabled.
+ * @dram_binning_mask: which DRAMs are binned. 0 means usable, 1 means binned.
* @cb_va_start_addr: virtual start address of command buffers which are mapped
* to the device's MMU.
* @cb_va_end_addr: virtual end address of command buffers which are mapped to
* the device's MMU.
* @dram_hints_align_mask: dram va hint addresses alignment mask which is used
* for hints validity check.
+ * @cfg_base_address: config space base address.
+ * @mmu_cache_mng_addr: address of the MMU cache.
+ * @mmu_cache_mng_size: size of the MMU cache.
* @device_dma_offset_for_host_access: the offset to add to host DMA addresses
* to enable the device to access them.
* @host_base_address: host physical start address for host DMA from device
@@ -493,6 +577,12 @@ struct hl_hints_range {
* @high_pll: high PLL frequency used by the device.
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
+ * @decoder_enabled_mask: which decoders are enabled.
+ * @decoder_binning_mask: which decoders are binned, 0 means usable and 1
+ * means binned (at most one binned decoder per dcore).
+ * @edma_enabled_mask: which EDMAs are enabled.
+ * @edma_binning_mask: which EDMAs are binned, 0 means usable and 1 means
+ * binned (at most one binned DMA).
* @max_pending_cs: maximum of concurrent pending command submissions
* @max_queues: maximum amount of queues in the system
* @fw_preboot_cpu_boot_dev_sts0: bitmap representation of preboot cpu
@@ -513,6 +603,13 @@ struct hl_hints_range {
* @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
* status reported by FW, bit description can be
* found in CPU_BOOT_DEV_STS1
+ * @max_dec: maximum number of decoders
+ * @hmmu_hif_enabled_mask: mask of HMMUs/HIFs that are not isolated (enabled)
+ * 1- enabled, 0- isolated.
+ * @faulty_dram_cluster_map: mask of faulty DRAM cluster.
+ * 1- faulty cluster, 0- good cluster.
+ * @xbar_edge_enabled_mask: mask of XBAR_EDGEs that are not isolated (enabled)
+ * 1- enabled, 0- isolated.
* @device_mem_alloc_default_page_size: may be different than dram_page_size only for ASICs for
* which the property supports_user_set_page_size is true
* (i.e. the DRAM supports multiple page sizes), otherwise
@@ -523,14 +620,17 @@ struct hl_hints_range {
* @sync_stream_first_mon: first monitor available for sync stream use
* @first_available_user_sob: first sob available for the user
* @first_available_user_mon: first monitor available for the user
- * @first_available_user_msix_interrupt: first available msix interrupt
- * reserved for the user
+ * @first_available_user_interrupt: first available interrupt reserved for the user
* @first_available_cq: first available CQ for the user.
* @user_interrupt_count: number of user interrupts.
+ * @user_dec_intr_count: number of decoder interrupts exposed to user.
+ * @cache_line_size: device cache line size.
* @server_type: Server type that the ASIC is currently installed in.
* The value is according to enum hl_server_type in uapi file.
- * @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
+ * @completion_mode: 0 - job based completion, 1 - cs based completion
+ * @mme_master_slave_mode: 0 - Each MME works independently, 1 - MME works
+ * in Master/Slave mode
* @fw_security_enabled: true if security measures are enabled in firmware,
* false otherwise
* @fw_cpu_boot_dev_sts0_valid: status bits are valid and can be fetched from
@@ -547,7 +647,7 @@ struct hl_hints_range {
* false otherwise.
* @use_get_power_for_reset_history: To support backward compatibility for Goya
* and Gaudi
- * @supports_soft_reset: is soft reset supported.
+ * @supports_compute_reset: is a reset which is not a hard-reset supported by this asic.
* @allow_inference_soft_reset: true if the ASIC supports soft reset that is
* initiated by user or TDR. This is only true
* in inference ASICs, as there is no real-world
@@ -585,9 +685,16 @@ struct asic_fixed_properties {
u64 pcie_aux_dbi_reg_addr;
u64 mmu_pgt_addr;
u64 mmu_dram_default_page_addr;
+ u64 tpc_enabled_mask;
+ u64 tpc_binning_mask;
+ u64 dram_enabled_mask;
+ u64 dram_binning_mask;
u64 cb_va_start_addr;
u64 cb_va_end_addr;
u64 dram_hints_align_mask;
+ u64 cfg_base_address;
+ u64 mmu_cache_mng_addr;
+ u64 mmu_cache_mng_size;
u64 device_dma_offset_for_host_access;
u64 host_base_address;
u64 host_end_address;
@@ -610,6 +717,10 @@ struct asic_fixed_properties {
u32 high_pll;
u32 cb_pool_cb_cnt;
u32 cb_pool_cb_size;
+ u32 decoder_enabled_mask;
+ u32 decoder_binning_mask;
+ u32 edma_enabled_mask;
+ u32 edma_binning_mask;
u32 max_pending_cs;
u32 max_queues;
u32 fw_preboot_cpu_boot_dev_sts0;
@@ -618,6 +729,10 @@ struct asic_fixed_properties {
u32 fw_bootfit_cpu_boot_dev_sts1;
u32 fw_app_cpu_boot_dev_sts0;
u32 fw_app_cpu_boot_dev_sts1;
+ u32 max_dec;
+ u32 hmmu_hif_enabled_mask;
+ u32 faulty_dram_cluster_map;
+ u32 xbar_edge_enabled_mask;
u32 device_mem_alloc_default_page_size;
u16 collective_first_sob;
u16 collective_first_mon;
@@ -625,12 +740,15 @@ struct asic_fixed_properties {
u16 sync_stream_first_mon;
u16 first_available_user_sob[HL_MAX_DCORES];
u16 first_available_user_mon[HL_MAX_DCORES];
- u16 first_available_user_msix_interrupt;
+ u16 first_available_user_interrupt;
u16 first_available_cq[HL_MAX_DCORES];
u16 user_interrupt_count;
+ u16 user_dec_intr_count;
+ u16 cache_line_size;
u16 server_type;
- u8 tpc_enabled_mask;
u8 completion_queues_count;
+ u8 completion_mode;
+ u8 mme_master_slave_mode;
u8 fw_security_enabled;
u8 fw_cpu_boot_dev_sts0_valid;
u8 fw_cpu_boot_dev_sts1_valid;
@@ -642,7 +760,7 @@ struct asic_fixed_properties {
u8 dynamic_fw_load;
u8 gic_interrupts_enable;
u8 use_get_power_for_reset_history;
- u8 supports_soft_reset;
+ u8 supports_compute_reset;
u8 allow_inference_soft_reset;
u8 configurable_stop_on_err;
u8 set_max_power_on_device_init;
@@ -811,7 +929,6 @@ struct hl_cb {
* QUEUES
*/
-struct hl_cs;
struct hl_cs_job;
/* Queue length of external and HW queues */
@@ -934,12 +1051,14 @@ struct hl_cq {
* @wait_list_head: head to the list of user threads pending on this interrupt
* @wait_list_lock: protects wait_list_head
* @interrupt_id: msix interrupt id
+ * @is_decoder: whether this entry represents a decoder interrupt
*/
struct hl_user_interrupt {
struct hl_device *hdev;
struct list_head wait_list_head;
spinlock_t wait_list_lock;
u32 interrupt_id;
+ bool is_decoder;
};
/**
@@ -1025,23 +1144,36 @@ struct hl_eq {
bool check_eqe_index;
};
-
-/*
- * ASICs
+/**
+ * struct hl_dec - describes a decoder sw instance.
+ * @hdev: pointer to the device structure.
+ * @completion_abnrm_work: workqueue object to run when decoder generates an error interrupt
+ * @core_id: ID of the decoder.
+ * @base_addr: base address of the decoder.
*/
+struct hl_dec {
+ struct hl_device *hdev;
+ struct work_struct completion_abnrm_work;
+ u32 core_id;
+ u32 base_addr;
+};
/**
* enum hl_asic_type - supported ASIC types.
* @ASIC_INVALID: Invalid ASIC type.
- * @ASIC_GOYA: Goya device.
- * @ASIC_GAUDI: Gaudi device.
+ * @ASIC_GOYA: Goya device (HL-1000).
+ * @ASIC_GAUDI: Gaudi device (HL-2000).
* @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
+ * @ASIC_GAUDI2: Gaudi2 device.
+ * @ASIC_GAUDI2_SEC: Gaudi2 secured device.
*/
enum hl_asic_type {
ASIC_INVALID,
ASIC_GOYA,
ASIC_GAUDI,
- ASIC_GAUDI_SEC
+ ASIC_GAUDI_SEC,
+ ASIC_GAUDI2,
+ ASIC_GAUDI2_SEC,
};
struct hl_cs_parser;
@@ -1177,6 +1309,24 @@ struct dynamic_fw_load_mgr {
};
/**
+ * struct pre_fw_load_props - needed properties for pre-FW load
+ * @cpu_boot_status_reg: cpu_boot_status register address
+ * @sts_boot_dev_sts0_reg: sts_boot_dev_sts0 register address
+ * @sts_boot_dev_sts1_reg: sts_boot_dev_sts1 register address
+ * @boot_err0_reg: boot_err0 register address
+ * @boot_err1_reg: boot_err1 register address
+ * @wait_for_preboot_timeout: timeout to poll for preboot ready
+ */
+struct pre_fw_load_props {
+ u32 cpu_boot_status_reg;
+ u32 sts_boot_dev_sts0_reg;
+ u32 sts_boot_dev_sts1_reg;
+ u32 boot_err0_reg;
+ u32 boot_err1_reg;
+ u32 wait_for_preboot_timeout;
+};
+
+/**
* struct fw_image_props - properties of FW image
* @image_name: name of the image
* @src_off: offset in src FW to copy from
@@ -1192,6 +1342,7 @@ struct fw_image_props {
* struct fw_load_mgr - manager FW loading process
* @dynamic_loader: specific structure for dynamic load
* @static_loader: specific structure for static load
+ * @pre_fw_load_props: parameter for pre FW load
* @boot_fit_img: boot fit image properties
* @linux_img: linux image properties
* @cpu_timeout: CPU response timeout in usec
@@ -1207,6 +1358,7 @@ struct fw_load_mgr {
struct dynamic_fw_load_mgr dynamic_loader;
struct static_fw_load_mgr static_loader;
};
+ struct pre_fw_load_props pre_fw_load;
struct fw_image_props boot_fit_img;
struct fw_image_props linux_img;
u32 cpu_timeout;
@@ -1217,6 +1369,8 @@ struct fw_load_mgr {
u8 fw_comp_loaded;
};
+struct hl_cs;
+
/**
* struct hl_asic_funcs - ASIC specific functions that are can be called from
* common code.
@@ -1248,7 +1402,7 @@ struct fw_load_mgr {
* dma_free_coherent(). This is ASIC function because
* its implementation is not trivial when the driver
* is loaded in simulation mode (not upstreamed).
- * @scrub_device_mem: Scrub device memory given an address and size
+ * @scrub_device_mem: Scrub the entire SRAM and DRAM.
* @scrub_device_dram: Scrub the dram memory of the device.
* @get_int_queue_base: get the internal queue base address.
* @test_queues: run simple test on all queues for sanity check.
@@ -1257,10 +1411,11 @@ struct fw_load_mgr {
* @asic_dma_pool_free: free small DMA allocation from pool.
* @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
* @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
+ * @asic_dma_unmap_single: unmap a single DMA buffer
+ * @asic_dma_map_single: map a single buffer to a DMA
* @hl_dma_unmap_sgtable: DMA unmap scatter-gather table.
* @cs_parser: parse Command Submission.
* @asic_dma_map_sgtable: DMA map scatter-gather table.
- * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
* @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
* @update_eq_ci: update event queue CI.
* @context_switch: called upon ASID context switch.
@@ -1282,6 +1437,8 @@ struct fw_load_mgr {
* @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset
* @hw_queues_lock: acquire H/W queues lock.
* @hw_queues_unlock: release H/W queues lock.
+ * @kdma_lock: acquire H/W queues lock. Relevant from GRECO ASIC
+ * @kdma_unlock: release H/W queues lock. Relevant from GRECO ASIC
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
* @get_monitor_dump: retrieve monitor registers dump from F/W.
@@ -1298,6 +1455,7 @@ struct fw_load_mgr {
* @halt_coresight: stop the ETF and ETR traces.
* @ctx_init: context dependent initialization.
* @ctx_fini: context dependent cleanup.
+ * @pre_schedule_cs: Perform pre-CS-scheduling operations.
* @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
* @load_firmware_to_device: load the firmware to the device's memory
* @load_boot_fit_to_device: load boot fit to device's memory
@@ -1308,9 +1466,11 @@ struct fw_load_mgr {
* @reset_sob: Reset a SOB.
* @reset_sob_group: Reset SOB group
* @get_device_time: Get the device time.
+ * @pb_print_security_errors: print security errors according block and cause
* @collective_wait_init_cs: Generate collective master/slave packets
* and place them in the relevant cs jobs
* @collective_wait_create_jobs: allocate collective wait cs jobs
+ * @get_dec_base_addr: get the base address of a given decoder.
* @scramble_addr: Routine to scramble the address prior of mapping it
* in the MMU.
* @descramble_addr: Routine to de-scramble the address prior of
@@ -1324,18 +1484,18 @@ struct fw_load_mgr {
* driver is ready to receive asynchronous events. This
* function should be called during the first init and
* after every hard-reset of the device
+ * @ack_mmu_errors: check and ack mmu errors, page fault, access violation.
* @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
* @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
* generic f/w compatible PLL Indexes
+ * @init_firmware_preload_params: initialize pre FW-load parameters.
* @init_firmware_loader: initialize data for FW loader.
* @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
* @state_dump_init: initialize constants required for state dump
* @get_sob_addr: get SOB base address offset.
* @set_pci_memory_regions: setting properties of PCI memory regions
* @get_stream_master_qid_arr: get pointer to stream masters QID array
- * @is_valid_dram_page_size: return true if page size is supported in device
- * memory allocation, otherwise false.
- * @get_valid_dram_page_orders: get valid device memory allocation page orders
+ * @check_if_razwi_happened: check if there was a razwi due to RR violation.
* @access_dev_mem: access device memory
* @set_dram_bar_base: set the base of the DRAM BAR
*/
@@ -1360,7 +1520,7 @@ struct hl_asic_funcs {
dma_addr_t *dma_handle, gfp_t flag);
void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle);
- int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
+ int (*scrub_device_mem)(struct hl_device *hdev);
int (*scrub_device_dram)(struct hl_device *hdev, u64 val);
void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len);
@@ -1373,16 +1533,21 @@ struct hl_asic_funcs {
size_t size, dma_addr_t *dma_handle);
void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
size_t size, void *vaddr);
+ void (*asic_dma_unmap_single)(struct hl_device *hdev,
+ dma_addr_t dma_addr, int len,
+ enum dma_data_direction dir);
+ dma_addr_t (*asic_dma_map_single)(struct hl_device *hdev,
+ void *addr, int len,
+ enum dma_data_direction dir);
void (*hl_dma_unmap_sgtable)(struct hl_device *hdev,
struct sg_table *sgt,
enum dma_data_direction dir);
int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
int (*asic_dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
- u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
- struct sg_table *sgt);
void (*add_end_of_cb_packets)(struct hl_device *hdev,
void *kernel_address, u32 len,
+ u32 original_len,
u64 cq_addr, u32 cq_val, u32 msix_num,
bool eb);
void (*update_eq_ci)(struct hl_device *hdev, u32 val);
@@ -1410,6 +1575,8 @@ struct hl_asic_funcs {
int (*non_hard_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
+ void (*kdma_lock)(struct hl_device *hdev, int dcore_id);
+ void (*kdma_unlock)(struct hl_device *hdev, int dcore_id);
u32 (*get_pci_id)(struct hl_device *hdev);
int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size);
int (*get_monitor_dump)(struct hl_device *hdev, void *data);
@@ -1422,6 +1589,7 @@ struct hl_asic_funcs {
void (*halt_coresight)(struct hl_device *hdev, struct hl_ctx *ctx);
int (*ctx_init)(struct hl_ctx *ctx);
void (*ctx_fini)(struct hl_ctx *ctx);
+ int (*pre_schedule_cs)(struct hl_cs *cs);
u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
int (*load_firmware_to_device)(struct hl_device *hdev);
int (*load_boot_fit_to_device)(struct hl_device *hdev);
@@ -1434,11 +1602,14 @@ struct hl_asic_funcs {
void (*reset_sob)(struct hl_device *hdev, void *data);
void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
u64 (*get_device_time)(struct hl_device *hdev);
+ void (*pb_print_security_errors)(struct hl_device *hdev,
+ u32 block_addr, u32 cause, u32 offended_addr);
int (*collective_wait_init_cs)(struct hl_cs *cs);
int (*collective_wait_create_jobs)(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
u32 wait_queue_id, u32 collective_engine_id,
u32 encaps_signal_offset);
+ u32 (*get_dec_base_addr)(struct hl_device *hdev, u32 core_id);
u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
void (*ack_protection_bits_errors)(struct hl_device *hdev);
@@ -1447,20 +1618,21 @@ struct hl_asic_funcs {
int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
u32 block_id, u32 block_size);
void (*enable_events_from_fw)(struct hl_device *hdev);
+ int (*ack_mmu_errors)(struct hl_device *hdev, u64 mmu_cap_mask);
void (*get_msi_info)(__le32 *table);
int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
+ void (*init_firmware_preload_params)(struct hl_device *hdev);
void (*init_firmware_loader)(struct hl_device *hdev);
void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
void (*state_dump_init)(struct hl_device *hdev);
u32 (*get_sob_addr)(struct hl_device *hdev, u32 sob_id);
void (*set_pci_memory_regions)(struct hl_device *hdev);
u32* (*get_stream_master_qid_arr)(void);
- bool (*is_valid_dram_page_size)(u32 page_size);
+ void (*check_if_razwi_happened)(struct hl_device *hdev);
int (*mmu_get_real_page_size)(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
u32 page_size, u32 *real_page_size, bool is_dram_addr);
- void (*get_valid_dram_page_orders)(struct hl_info_dev_memalloc_page_sizes *info);
- int (*access_dev_mem)(struct hl_device *hdev, struct pci_mem_region *region,
- enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
+ int (*access_dev_mem)(struct hl_device *hdev, enum pci_region region_type,
+ u64 addr, u64 *val, enum debugfs_access_type acc_type);
u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
};
@@ -1535,16 +1707,55 @@ struct hl_dmabuf_priv {
uint64_t device_address;
};
+#define HL_CS_OUTCOME_HISTORY_LEN 256
+
+/**
+ * struct hl_cs_outcome - represents a single completed CS outcome
+ * @list_link: link to either container's used list or free list
+ * @map_link: list to the container hash map
+ * @ts: completion ts
+ * @seq: the original cs sequence
+ * @error: error code cs completed with, if any
+ */
+struct hl_cs_outcome {
+ struct list_head list_link;
+ struct hlist_node map_link;
+ ktime_t ts;
+ u64 seq;
+ int error;
+};
+
+/**
+ * struct hl_cs_outcome_store - represents a limited store of completed CS outcomes
+ * @outcome_map: index of completed CS searcheable by sequence number
+ * @used_list: list of outcome objects currently in use
+ * @free_list: list of outcome objects currently not in use
+ * @nodes_pool: a static pool of preallocated outcome objects
+ * @db_lock: any operation on the store must take this lock
+ */
+struct hl_cs_outcome_store {
+ DECLARE_HASHTABLE(outcome_map, 8);
+ struct list_head used_list;
+ struct list_head free_list;
+ struct hl_cs_outcome nodes_pool[HL_CS_OUTCOME_HISTORY_LEN];
+ spinlock_t db_lock;
+};
+
/**
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
* @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
+ * @hr_mmu_phys_hash: if host-resident MMU is used, holds a mapping from
+ * MMU-hop-page physical address to its host-resident
+ * pgt_info structure.
* @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
* @refcount: reference counter for the context. Context is released only when
* this hits 0l. It is incremented on CS and CS_WAIT.
* @cs_pending: array of hl fence objects representing pending CS.
+ * @outcome_store: storage data structure used to remember ouitcomes of completed
+ * command submissions for a long time after CS id wraparound.
* @va_range: holds available virtual addresses for host and dram mappings.
* @mem_hash_lock: protects the mem_hash.
* @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
@@ -1576,10 +1787,12 @@ struct hl_dmabuf_priv {
struct hl_ctx {
DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
+ DECLARE_HASHTABLE(hr_mmu_phys_hash, MMU_HASH_TABLE_BITS);
struct hl_fpriv *hpriv;
struct hl_device *hdev;
struct kref refcount;
struct hl_fence **cs_pending;
+ struct hl_cs_outcome_store outcome_store;
struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX];
struct mutex mem_hash_lock;
struct mutex mmu_lock;
@@ -1601,12 +1814,12 @@ struct hl_ctx {
/**
* struct hl_ctx_mgr - for handling multiple contexts.
- * @ctx_lock: protects ctx_handles.
- * @ctx_handles: idr to hold all ctx handles.
+ * @lock: protects ctx_handles.
+ * @handles: idr to hold all ctx handles.
*/
struct hl_ctx_mgr {
- struct mutex ctx_lock;
- struct idr ctx_handles;
+ struct mutex lock;
+ struct idr handles;
};
@@ -1665,6 +1878,7 @@ struct hl_userptr {
* @timeout_jiffies: cs timeout in jiffies.
* @submission_time_jiffies: submission time of the cs
* @type: CS_TYPE_*.
+ * @jobs_cnt: counter of submitted jobs on all queues.
* @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
* @sob_addr_offset: sob offset from the configuration base address.
* @initial_sob_count: count of completed signals in SOB before current submission of signal or
@@ -1703,6 +1917,7 @@ struct hl_cs {
u64 timeout_jiffies;
u64 submission_time_jiffies;
enum hl_cs_type type;
+ u32 jobs_cnt;
u32 encaps_sig_hdl_id;
u32 sob_addr_offset;
u16 initial_sob_count;
@@ -1961,6 +2176,8 @@ struct hl_notifier_event {
* @dev_node: node in the device list of file private data
* @refcount: number of related contexts.
* @restore_phase_mutex: lock for context switch and restore phase.
+ * @ctx_lock: protects the pointer to current executing context pointer. TODO: remove for multiple
+ * ctx per process.
*/
struct hl_fpriv {
struct hl_device *hdev;
@@ -1974,6 +2191,7 @@ struct hl_fpriv {
struct list_head dev_node;
struct kref refcount;
struct mutex restore_phase_mutex;
+ struct mutex ctx_lock;
};
@@ -2027,8 +2245,8 @@ struct hl_debugfs_entry {
* @state_dump_sem: protects state_dump.
* @addr: next address to read/write from/to in read/write32.
* @mmu_addr: next virtual address to translate to physical address in mmu_show.
+ * @mmu_cap_mask: mmu hw capability mask, to be used in mmu_ack_error.
* @userptr_lookup: the target user ptr to look up for on demand.
- * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram
* @mmu_asid: ASID to use while translating in mmu_show.
* @state_dump_head: index of the latest state dump
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
@@ -2058,8 +2276,8 @@ struct hl_dbg_device_entry {
struct rw_semaphore state_dump_sem;
u64 addr;
u64 mmu_addr;
+ u64 mmu_cap_mask;
u64 userptr_lookup;
- u64 memory_scrub_val;
u32 mmu_asid;
u32 state_dump_head;
u8 i2c_bus;
@@ -2255,9 +2473,11 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
/* Timeout should be longer when working with simulator but cap the
* increased timeout to some maximum
*/
-#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
+#define hl_poll_timeout_common(hdev, addr, val, cond, sleep_us, timeout_us, elbi) \
({ \
ktime_t __timeout; \
+ u32 __elbi_read; \
+ int __rc = 0; \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
@@ -2266,19 +2486,103 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
(u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
- (val) = RREG32(addr); \
+ if (elbi) { \
+ __rc = hl_pci_elbi_read(hdev, addr, &__elbi_read); \
+ if (__rc) \
+ break; \
+ (val) = __elbi_read; \
+ } else {\
+ (val) = RREG32((u32)addr); \
+ } \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = RREG32(addr); \
+ if (elbi) { \
+ __rc = hl_pci_elbi_read(hdev, addr, &__elbi_read); \
+ if (__rc) \
+ break; \
+ (val) = __elbi_read; \
+ } else {\
+ (val) = RREG32((u32)addr); \
+ } \
break; \
} \
if (sleep_us) \
usleep_range((sleep_us >> 2) + 1, sleep_us); \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ __rc ? __rc : ((cond) ? 0 : -ETIMEDOUT); \
})
+#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
+ hl_poll_timeout_common(hdev, addr, val, cond, sleep_us, timeout_us, false)
+
+#define hl_poll_timeout_elbi(hdev, addr, val, cond, sleep_us, timeout_us) \
+ hl_poll_timeout_common(hdev, addr, val, cond, sleep_us, timeout_us, true)
+
+/*
+ * poll array of register addresses.
+ * condition is satisfied if all registers values match the expected value.
+ * once some register in the array satisfies the condition it will not be polled again,
+ * this is done both for efficiency and due to some registers are "clear on read".
+ * TODO: use read from PCI bar in other places in the code (SW-91406)
+ */
+#define hl_poll_reg_array_timeout_common(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us, elbi) \
+({ \
+ ktime_t __timeout; \
+ u64 __elem_bitmask; \
+ u32 __read_val; \
+ u8 __arr_idx; \
+ int __rc = 0; \
+ \
+ if (hdev->pdev) \
+ __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ else \
+ __timeout = ktime_add_us(ktime_get(),\
+ min(((u64)timeout_us * 10), \
+ (u64) HL_SIM_MAX_TIMEOUT_US)); \
+ \
+ might_sleep_if(sleep_us); \
+ if (arr_size >= 64) \
+ __rc = -EINVAL; \
+ else \
+ __elem_bitmask = BIT_ULL(arr_size) - 1; \
+ for (;;) { \
+ if (__rc) \
+ break; \
+ for (__arr_idx = 0; __arr_idx < (arr_size); __arr_idx++) { \
+ if (!(__elem_bitmask & BIT_ULL(__arr_idx))) \
+ continue; \
+ if (elbi) { \
+ __rc = hl_pci_elbi_read(hdev, (addr_arr)[__arr_idx], &__read_val); \
+ if (__rc) \
+ break; \
+ } else { \
+ __read_val = RREG32((u32)(addr_arr)[__arr_idx]); \
+ } \
+ if (__read_val == (expected_val)) \
+ __elem_bitmask &= ~BIT_ULL(__arr_idx); \
+ } \
+ if (__rc || (__elem_bitmask == 0)) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) \
+ break; \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ __rc ? __rc : ((__elem_bitmask == 0) ? 0 : -ETIMEDOUT); \
+})
+
+#define hl_poll_reg_array_timeout(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us) \
+ hl_poll_reg_array_timeout_common(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us, false)
+
+#define hl_poll_reg_array_timeout_elbi(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us) \
+ hl_poll_reg_array_timeout_common(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us, true)
+
/*
* address in this macro points always to a memory location in the
* host's (server's) memory. That location is updated asynchronously
@@ -2299,7 +2603,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
__timeout = ktime_add_us(ktime_get(),\
- min((u64)(timeout_us * 10), \
+ min((u64)(timeout_us * 100), \
(u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
@@ -2322,29 +2626,21 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
(cond) ? 0 : -ETIMEDOUT; \
})
-#define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
- timeout_us) \
+#define HL_USR_MAPPED_BLK_INIT(blk, base, sz) \
({ \
- ktime_t __timeout; \
- if (hdev->pdev) \
- __timeout = ktime_add_us(ktime_get(), timeout_us); \
- else \
- __timeout = ktime_add_us(ktime_get(),\
- min((u64)(timeout_us * 10), \
- (u64) HL_SIM_MAX_TIMEOUT_US)); \
- might_sleep_if(sleep_us); \
- for (;;) { \
- (val) = readl(addr); \
- if (cond) \
- break; \
- if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = readl(addr); \
- break; \
- } \
- if (sleep_us) \
- usleep_range((sleep_us >> 2) + 1, sleep_us); \
- } \
- (cond) ? 0 : -ETIMEDOUT; \
+ struct user_mapped_block *p = blk; \
+\
+ p->address = base; \
+ p->size = sz; \
+})
+
+#define HL_USR_INTR_STRUCT_INIT(usr_intr, hdev, intr_id, decoder) \
+({ \
+ usr_intr.hdev = hdev; \
+ usr_intr.interrupt_id = intr_id; \
+ usr_intr.is_decoder = decoder; \
+ INIT_LIST_HEAD(&usr_intr.wait_list_head); \
+ spin_lock_init(&usr_intr.wait_list_lock); \
})
struct hwmon_chip_info;
@@ -2364,27 +2660,15 @@ struct hl_device_reset_work {
};
/**
- * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
- * information.
- * @virt_addr: the virtual address of the hop.
- * @phys-addr: the physical address of the hop (used by the device-mmu).
- * @shadow_addr: The shadow of the hop used by the driver for walking the hops.
- */
-struct hr_mmu_hop_addrs {
- u64 virt_addr;
- u64 phys_addr;
- u64 shadow_addr;
-};
-
-/**
* struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident
* page-table internal information.
- * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
- * @mmu_shadow_hop0: shadow array of hop0 tables.
+ * @mmu_pgt_pool: pool of page tables used by a host-resident MMU for
+ * allocating hops.
+ * @mmu_asid_hop0: per-ASID array of host-resident hop0 tables.
*/
struct hl_mmu_hr_priv {
- struct gen_pool *mmu_pgt_pool;
- struct hr_mmu_hop_addrs *mmu_shadow_hop0;
+ struct gen_pool *mmu_pgt_pool;
+ struct pgt_info *mmu_asid_hop0;
};
/**
@@ -2437,12 +2721,28 @@ struct hl_mmu_per_hop_info {
struct hl_mmu_hop_info {
u64 scrambled_vaddr;
u64 unscrambled_paddr;
- struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS];
+ struct hl_mmu_per_hop_info hop_info[MMU_ARCH_6_HOPS];
u32 used_hops;
enum hl_va_range_type range_type;
};
/**
+ * struct hl_hr_mmu_funcs - Device related host resident MMU functions.
+ * @get_hop0_pgt_info: get page table info structure for HOP0.
+ * @get_pgt_info: get page table info structure for HOP other than HOP0.
+ * @add_pgt_info: add page table info structure to hash.
+ * @get_tlb_mapping_params: get mapping parameters needed for getting TLB info for specific mapping.
+ */
+struct hl_hr_mmu_funcs {
+ struct pgt_info *(*get_hop0_pgt_info)(struct hl_ctx *ctx);
+ struct pgt_info *(*get_pgt_info)(struct hl_ctx *ctx, u64 phys_hop_addr);
+ void (*add_pgt_info)(struct hl_ctx *ctx, struct pgt_info *pgt_info, dma_addr_t phys_addr);
+ int (*get_tlb_mapping_params)(struct hl_device *hdev, struct hl_mmu_properties **mmu_prop,
+ struct hl_mmu_hop_info *hops,
+ u64 virt_addr, bool *is_huge);
+};
+
+/**
* struct hl_mmu_funcs - Device related MMU functions.
* @init: initialize the MMU module.
* @fini: release the MMU module.
@@ -2456,22 +2756,21 @@ struct hl_mmu_hop_info {
* @get_tlb_info: returns the list of hops and hop-entries used that were
* created in order to translate the giver virtual address to a
* physical one.
+ * @hr_funcs: functions specific to host resident MMU.
*/
struct hl_mmu_funcs {
int (*init)(struct hl_device *hdev);
void (*fini)(struct hl_device *hdev);
int (*ctx_init)(struct hl_ctx *ctx);
void (*ctx_fini)(struct hl_ctx *ctx);
- int (*map)(struct hl_ctx *ctx,
- u64 virt_addr, u64 phys_addr, u32 page_size,
- bool is_dram_addr);
- int (*unmap)(struct hl_ctx *ctx,
- u64 virt_addr, bool is_dram_addr);
+ int (*map)(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
+ bool is_dram_addr);
+ int (*unmap)(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr);
void (*flush)(struct hl_ctx *ctx);
void (*swap_out)(struct hl_ctx *ctx);
void (*swap_in)(struct hl_ctx *ctx);
- int (*get_tlb_info)(struct hl_ctx *ctx,
- u64 virt_addr, struct hl_mmu_hop_info *hops);
+ int (*get_tlb_info)(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops);
+ struct hl_hr_mmu_funcs hr_funcs;
};
/**
@@ -2568,23 +2867,33 @@ struct hl_clk_throttle {
};
/**
+ * struct user_mapped_block - describes a hw block allowed to be mmapped by user
+ * @address: physical HW block address
+ * @size: allowed size for mmap
+ */
+struct user_mapped_block {
+ u32 address;
+ u32 size;
+};
+
+/**
* struct cs_timeout_info - info of last CS timeout occurred.
* @timestamp: CS timeout timestamp.
- * @write_disable: if set writing to CS parameters in the structure is disabled so,
- * the first (root cause) CS timeout will not be overwritten.
+ * @write_enable: if set writing to CS parameters in the structure is enabled. otherwise - disabled,
+ * so the first (root cause) CS timeout will not be overwritten.
* @seq: CS timeout sequence number.
*/
struct cs_timeout_info {
ktime_t timestamp;
- atomic_t write_disable;
+ atomic_t write_enable;
u64 seq;
};
/**
* struct razwi_info - info about last razwi error occurred.
* @timestamp: razwi timestamp.
- * @write_disable: if set writing to razwi parameters in the structure is disabled so the
- * first (root cause) razwi will not be overwritten.
+ * @write_enable: if set writing to razwi parameters in the structure is enabled.
+ * otherwise - disabled, so the first (root cause) razwi will not be overwritten.
* @addr: address that caused razwi.
* @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
* not have engine id it will be set to U16_MAX.
@@ -2596,7 +2905,7 @@ struct cs_timeout_info {
*/
struct razwi_info {
ktime_t timestamp;
- atomic_t write_disable;
+ atomic_t write_enable;
u64 addr;
u16 engine_id_1;
u16 engine_id_2;
@@ -2604,25 +2913,59 @@ struct razwi_info {
u8 type;
};
+#define MAX_QMAN_STREAMS_INFO 4
+#define OPCODE_INFO_MAX_ADDR_SIZE 8
+/**
+ * struct undefined_opcode_info - info about last undefined opcode error
+ * @timestamp: timestamp of the undefined opcode error
+ * @cb_addr_streams: CB addresses (per stream) that are currently exists in the PQ
+ * entiers. In case all streams array entries are
+ * filled with values, it means the execution was in Lower-CP.
+ * @cq_addr: the address of the current handled command buffer
+ * @cq_size: the size of the current handled command buffer
+ * @cb_addr_streams_len: num of streams - actual len of cb_addr_streams array.
+ * should be equal to 1 incase of undefined opcode
+ * in Upper-CP (specific stream) and equal to 4 incase
+ * of undefined opcode in Lower-CP.
+ * @engine_id: engine-id that the error occurred on
+ * @stream_id: the stream id the error occurred on. In case the stream equals to
+ * MAX_QMAN_STREAMS_INFO it means the error occurred on a Lower-CP.
+ * @write_enable: if set, writing to undefined opcode parameters in the structure
+ * is enable so the first (root cause) undefined opcode will not be
+ * overwritten.
+ */
+struct undefined_opcode_info {
+ ktime_t timestamp;
+ u64 cb_addr_streams[MAX_QMAN_STREAMS_INFO][OPCODE_INFO_MAX_ADDR_SIZE];
+ u64 cq_addr;
+ u32 cq_size;
+ u32 cb_addr_streams_len;
+ u32 engine_id;
+ u32 stream_id;
+ bool write_enable;
+};
+
/**
* struct last_error_session_info - info about last session errors occurred.
* @cs_timeout: CS timeout error last information.
* @razwi: razwi last information.
+ * @undef_opcode: undefined opcode information
*/
struct last_error_session_info {
- struct cs_timeout_info cs_timeout;
- struct razwi_info razwi;
+ struct cs_timeout_info cs_timeout;
+ struct razwi_info razwi;
+ struct undefined_opcode_info undef_opcode;
};
/**
* struct hl_reset_info - holds current device reset information.
* @lock: lock to protect critical reset flows.
- * @soft_reset_cnt: number of soft reset since the driver was loaded.
- * @hard_reset_cnt: number of hard reset since the driver was loaded.
- * @hard_reset_schedule_flags: hard reset is scheduled to after current soft reset,
+ * @compute_reset_cnt: number of compte resets since the driver was loaded.
+ * @hard_reset_cnt: number of hard resets since the driver was loaded.
+ * @hard_reset_schedule_flags: hard reset is scheduled to after current compute reset,
* here we hold the hard reset flags.
* @in_reset: is device in reset flow.
- * @is_in_soft_reset: Device is currently in soft reset process.
+ * @in_compute_reset: Device is currently in reset but not in hard-reset.
* @needs_reset: true if reset_on_lockup is false and device should be reset
* due to lockup.
* @hard_reset_pending: is there a hard reset work pending.
@@ -2637,11 +2980,11 @@ struct last_error_session_info {
*/
struct hl_reset_info {
spinlock_t lock;
- u32 soft_reset_cnt;
+ u32 compute_reset_cnt;
u32 hard_reset_cnt;
u32 hard_reset_schedule_flags;
u8 in_reset;
- u8 is_in_soft_reset;
+ u8 in_compute_reset;
u8 needs_reset;
u8 hard_reset_pending;
@@ -2671,12 +3014,17 @@ struct hl_reset_info {
* @user_interrupt: array of hl_user_interrupt. upon the corresponding user
* interrupt, driver will monitor the list of fences
* registered to this interrupt.
- * @common_user_interrupt: common user interrupt for all user interrupts.
- * upon any user interrupt, driver will monitor the
+ * @common_user_cq_interrupt: common user CQ interrupt for all user CQ interrupts.
+ * upon any user CQ interrupt, driver will monitor the
* list of fences registered to this common structure.
+ * @common_decoder_interrupt: common decoder interrupt for all user decoder interrupts.
+ * @shadow_cs_queue: pointer to a shadow queue that holds pointers to
+ * outstanding command submissions.
* @cq_wq: work queues of completion queues for executing work in process
* context.
* @eq_wq: work queue of event queue for executing work in process context.
+ * @cs_cmplt_wq: work queue of CS completions for executing work in process
+ * context.
* @ts_free_obj_wq: work queue for timestamp registration objects release.
* @pf_wq: work queue for MMU pre-fetch operations.
* @kernel_ctx: Kernel driver context structure.
@@ -2716,6 +3064,7 @@ struct hl_reset_info {
* @aggregated_cs_counters: aggregated cs counters among all contexts
* @mmu_priv: device-specific MMU data.
* @mmu_func: device-related MMU functions.
+ * @dec: list of decoder sw instance
* @fw_loader: FW loader manager.
* @pci_mem_region: array of memory regions in the PCI
* @state_dump_specs: constants and dictionaries needed to dump system state.
@@ -2724,8 +3073,10 @@ struct hl_reset_info {
* @last_error: holds information about last session in which CS timeout or razwi error occurred.
* @reset_info: holds current device reset information.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
- * @fw_major_version: major version of current loaded preboot
+ * @fw_major_version: major version of current loaded preboot.
+ * @fw_minor_version: minor version of current loaded preboot.
* @dram_used_mem: current DRAM memory consumption.
+ * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, the driver will restore
@@ -2747,10 +3098,18 @@ struct hl_reset_info {
* used for CPU boot status
* @fw_comms_poll_interval_usec: FW comms/protocol poll interval in usec.
* used for COMMs protocols cmds(COMMS_STS_*)
+ * @dram_binning: contains mask of drams that is received from the f/w which indicates which
+ * drams are binned-out
+ * @tpc_binning: contains mask of tpc engines that is received from the f/w which indicates which
+ * tpc engines are binned-out
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
* @major: habanalabs kernel driver major.
* @high_pll: high PLL profile frequency.
+ * @decoder_binning: contains mask of decoder engines that is received from the f/w which
+ * indicates which decoder engines are binned-out
+ * @edma_binning: contains mask of edma engines that is received from the f/w which
+ * indicates which edma engines are binned-out
* @id: device minor.
* @id_control: minor of the control device
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
@@ -2759,7 +3118,6 @@ struct hl_reset_info {
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
- * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
* otherwise.
* @dram_default_page_mapping: is DRAM default page mapping enabled.
@@ -2792,6 +3150,21 @@ struct hl_reset_info {
* @is_compute_ctx_active: Whether there is an active compute context executing.
* @compute_ctx_in_release: true if the current compute context is being released.
* @supports_mmu_prefetch: true if prefetch is supported, otherwise false.
+ * @reset_upon_device_release: reset the device when the user closes the file descriptor of the
+ * device.
+ * @nic_ports_mask: Controls which NIC ports are enabled. Used only for testing.
+ * @fw_components: Controls which f/w components to load to the device. There are multiple f/w
+ * stages and sometimes we want to stop at a certain stage. Used only for testing.
+ * @mmu_enable: Whether to enable or disable the device MMU(s). Used only for testing.
+ * @cpu_queues_enable: Whether to enable queues communication vs. the f/w. Used only for testing.
+ * @pldm: Whether we are running in Palladium environment. Used only for testing.
+ * @hard_reset_on_fw_events: Whether to do device hard-reset when a fatal event is received from
+ * the f/w. Used only for testing.
+ * @bmc_enable: Whether we are running in a box with BMC. Used only for testing.
+ * @reset_on_preboot_fail: Whether to reset the device if preboot f/w fails to load.
+ * Used only for testing.
+ * @heartbeat: Controls if we want to enable the heartbeat mechanism vs. the f/w, which verifies
+ * that the f/w is always alive. Used only for testing.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2809,9 +3182,12 @@ struct hl_device {
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
struct hl_user_interrupt *user_interrupt;
- struct hl_user_interrupt common_user_interrupt;
+ struct hl_user_interrupt common_user_cq_interrupt;
+ struct hl_user_interrupt common_decoder_interrupt;
+ struct hl_cs **shadow_cs_queue;
struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq;
+ struct workqueue_struct *cs_cmplt_wq;
struct workqueue_struct *ts_free_obj_wq;
struct workqueue_struct *pf_wq;
struct hl_ctx *kernel_ctx;
@@ -2855,6 +3231,8 @@ struct hl_device {
struct hl_mmu_priv mmu_priv;
struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
+ struct hl_dec *dec;
+
struct fw_load_mgr fw_loader;
struct pci_mem_region pci_mem_region[PCI_REGION_NUMBER];
@@ -2870,7 +3248,9 @@ struct hl_device {
u32 *stream_master_qid_arr;
u32 fw_major_version;
+ u32 fw_minor_version;
atomic64_t dram_used_mem;
+ u64 memory_scrub_val;
u64 timeout_jiffies;
u64 max_power;
u64 boot_error_status_mask;
@@ -2881,10 +3261,14 @@ struct hl_device {
u64 fw_poll_interval_usec;
ktime_t last_successful_open_ktime;
u64 fw_comms_poll_interval_usec;
+ u64 dram_binning;
+ u64 tpc_binning;
enum cpucp_card_types card_type;
u32 major;
u32 high_pll;
+ u32 decoder_binning;
+ u32 edma_binning;
u16 id;
u16 id_control;
u16 cpu_pci_msb_addr;
@@ -2892,7 +3276,6 @@ struct hl_device {
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
- u8 heartbeat;
u8 reset_on_lockup;
u8 dram_default_page_mapping;
u8 memory_scrub;
@@ -2916,24 +3299,18 @@ struct hl_device {
u8 is_compute_ctx_active;
u8 compute_ctx_in_release;
u8 supports_mmu_prefetch;
+ u8 reset_upon_device_release;
/* Parameters for bring-up */
u64 nic_ports_mask;
u64 fw_components;
u8 mmu_enable;
- u8 mmu_huge_page_opt;
- u8 reset_pcilink;
u8 cpu_queues_enable;
u8 pldm;
- u8 axi_drain;
- u8 sram_scrambler_enable;
- u8 dram_scrambler_enable;
u8 hard_reset_on_fw_events;
u8 bmc_enable;
- u8 rl_enable;
u8 reset_on_preboot_fail;
- u8 reset_upon_device_release;
- u8 reset_if_device_not_idle;
+ u8 heartbeat;
};
@@ -3049,13 +3426,22 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
}
uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
+void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag);
+void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle);
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle);
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr);
+void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+ dma_addr_t *dma_handle);
+void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr);
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type);
-int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
- enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
+int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
+ u64 addr, u64 *val, enum debugfs_access_type acc_type);
int hl_device_open(struct inode *inode, struct file *filp);
int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_operational(struct hl_device *hdev,
@@ -3085,7 +3471,8 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
-irqreturn_t hl_irq_handler_user_cq(int irq, void *arg);
+irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg);
+irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg);
irqreturn_t hl_irq_handler_default(int irq, void *arg);
u32 hl_cq_inc_ptr(u32 ptr);
@@ -3119,7 +3506,7 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
struct cpucp_sensor *sensors_arr);
-void hl_notifier_event_send_all(struct hl_device *hdev, u64 event);
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask);
int hl_sysfs_init(struct hl_device *hdev);
void hl_sysfs_fini(struct hl_device *hdev);
@@ -3158,6 +3545,7 @@ void hl_multi_cs_completion_init(struct hl_device *hdev);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
+void gaudi2_set_asic_funcs(struct hl_device *hdev);
int hl_vm_ctx_init(struct hl_ctx *ctx);
void hl_vm_ctx_fini(struct hl_ctx *ctx);
@@ -3201,10 +3589,39 @@ int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va,
u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte);
u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
u8 hop_idx, u64 hop_addr, u64 virt_addr);
+void hl_mmu_hr_flush(struct hl_ctx *ctx);
+int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size,
+ u64 pgt_size);
+void hl_mmu_hr_fini(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size);
+void hl_mmu_hr_free_hop_remove_pgt(struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size);
+u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, u64 phys_pte_addr,
+ u32 hop_table_size);
+void hl_mmu_hr_write_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
+ u64 val, u32 hop_table_size);
+void hl_mmu_hr_clear_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
+ u32 hop_table_size);
+int hl_mmu_hr_put_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size);
+void hl_mmu_hr_get_pte(struct hl_ctx *ctx, struct hl_hr_mmu_funcs *hr_func, u64 phys_hop_addr);
+struct pgt_info *hl_mmu_hr_get_next_hop_pgt_info(struct hl_ctx *ctx,
+ struct hl_hr_mmu_funcs *hr_func,
+ u64 curr_pte);
+struct pgt_info *hl_mmu_hr_alloc_hop(struct hl_ctx *ctx, struct hl_mmu_hr_priv *hr_priv,
+ struct hl_hr_mmu_funcs *hr_func,
+ struct hl_mmu_properties *mmu_prop);
+struct pgt_info *hl_mmu_hr_get_alloc_next_hop(struct hl_ctx *ctx,
+ struct hl_mmu_hr_priv *hr_priv,
+ struct hl_hr_mmu_funcs *hr_func,
+ struct hl_mmu_properties *mmu_prop,
+ u64 curr_pte, bool *is_new_hop);
+int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops,
+ struct hl_hr_mmu_funcs *hr_func);
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
int hl_mmu_if_set_funcs(struct hl_device *hdev);
void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
+void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops);
@@ -3214,7 +3631,7 @@ bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
-int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
+int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value);
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, u64 *result);
int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
@@ -3248,10 +3665,7 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev);
void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev);
int hl_fw_init_cpu(struct hl_device *hdev);
-int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
- u32 sts_boot_dev_sts0_reg,
- u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
- u32 boot_err1_reg, u32 timeout);
+int hl_fw_read_preboot_status(struct hl_device *hdev);
int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
enum comms_cmd cmd, unsigned int size,
@@ -3298,6 +3712,11 @@ void hl_encaps_handle_do_release(struct kref *ref);
void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
struct hl_cs *cs, struct hl_cs_job *job,
struct hl_cs_compl *cs_cmpl);
+
+int hl_dec_init(struct hl_device *hdev);
+void hl_dec_fini(struct hl_device *hdev);
+void hl_dec_ctx_fini(struct hl_ctx *ctx);
+
void hl_release_pending_user_interrupts(struct hl_device *hdev);
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
@@ -3426,6 +3845,55 @@ static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
#endif
+/* Security */
+int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
+ const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
+ int array_size);
+int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
+ int mm_array_size, int offset, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], int blocks_array_size);
+void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], u32 block_offset,
+ int array_size);
+void hl_secure_block(struct hl_device *hdev,
+ struct hl_block_glbl_sec sgs_array[], int array_size);
+int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size, u64 mask);
+int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size);
+int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array, u32 regs_range_array_size,
+ u64 mask);
+int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array,
+ u32 regs_range_array_size);
+int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size);
+int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array,
+ u32 regs_range_array_size);
+void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size);
+void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size, u64 mask);
+void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size);
+
/* IOCTLs */
long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 37edb69a7255..f733ead605e7 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -54,10 +54,15 @@ MODULE_PARM_DESC(boot_error_status_mask,
#define PCI_IDS_GAUDI 0x1000
#define PCI_IDS_GAUDI_SEC 0x1010
+#define PCI_IDS_GAUDI2 0x1020
+#define PCI_IDS_GAUDI2_SEC 0x1030
+
static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI_SEC), },
+ { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI2), },
+ { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI2_SEC), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ids);
@@ -84,6 +89,12 @@ static enum hl_asic_type get_asic_type(u16 device)
case PCI_IDS_GAUDI_SEC:
asic_type = ASIC_GAUDI_SEC;
break;
+ case PCI_IDS_GAUDI2:
+ asic_type = ASIC_GAUDI2;
+ break;
+ case PCI_IDS_GAUDI2_SEC:
+ asic_type = ASIC_GAUDI2_SEC;
+ break;
default:
asic_type = ASIC_INVALID;
break;
@@ -96,6 +107,7 @@ static bool is_asic_secured(enum hl_asic_type asic_type)
{
switch (asic_type) {
case ASIC_GAUDI_SEC:
+ case ASIC_GAUDI2_SEC:
return true;
default:
return false;
@@ -137,6 +149,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
mutex_init(&hpriv->notifier_event.lock);
mutex_init(&hpriv->restore_phase_mutex);
+ mutex_init(&hpriv->ctx_lock);
kref_init(&hpriv->refcount);
nonseekable_open(inode, filp);
@@ -152,7 +165,8 @@ int hl_device_open(struct inode *inode, struct file *filp)
"Can't open %s because it is %s\n",
dev_name(hdev->dev), hdev->status[status]);
- if (status == HL_DEVICE_STATUS_IN_RESET)
+ if (status == HL_DEVICE_STATUS_IN_RESET ||
+ status == HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE)
rc = -EAGAIN;
else
rc = -EPERM;
@@ -195,8 +209,9 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
- atomic_set(&hdev->last_error.cs_timeout.write_disable, 0);
- atomic_set(&hdev->last_error.razwi.write_disable, 0);
+ atomic_set(&hdev->last_error.cs_timeout.write_enable, 1);
+ atomic_set(&hdev->last_error.razwi.write_enable, 1);
+ hdev->last_error.undef_opcode.write_enable = true;
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
@@ -209,6 +224,7 @@ out_err:
hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
+ mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
mutex_destroy(&hpriv->notifier_event.lock);
put_pid(hpriv->taskpid);
@@ -277,43 +293,56 @@ out_err:
static void set_driver_behavior_per_device(struct hl_device *hdev)
{
- hdev->pldm = 0;
+ hdev->nic_ports_mask = 0;
hdev->fw_components = FW_TYPE_ALL_TYPES;
+ hdev->mmu_enable = MMU_EN_ALL;
hdev->cpu_queues_enable = 1;
- hdev->heartbeat = 1;
- hdev->mmu_enable = 1;
- hdev->sram_scrambler_enable = 1;
- hdev->dram_scrambler_enable = 1;
- hdev->bmc_enable = 1;
+ hdev->pldm = 0;
hdev->hard_reset_on_fw_events = 1;
+ hdev->bmc_enable = 1;
hdev->reset_on_preboot_fail = 1;
- hdev->reset_if_device_not_idle = 1;
-
- hdev->reset_pcilink = 0;
- hdev->axi_drain = 0;
+ hdev->heartbeat = 1;
}
static void copy_kernel_module_params_to_device(struct hl_device *hdev)
{
+ hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
+
hdev->major = hl_major;
hdev->memory_scrub = memory_scrub;
hdev->reset_on_lockup = reset_on_lockup;
hdev->boot_error_status_mask = boot_error_status_mask;
+}
- if (timeout_locked)
- hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
- else
- hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+static void fixup_device_params_per_asic(struct hl_device *hdev)
+{
+ switch (hdev->asic_type) {
+ case ASIC_GOYA:
+ case ASIC_GAUDI:
+ case ASIC_GAUDI_SEC:
+ hdev->reset_upon_device_release = 0;
+ break;
+ default:
+ hdev->reset_upon_device_release = 1;
+ break;
+ }
}
static int fixup_device_params(struct hl_device *hdev)
{
- hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
+ int tmp_timeout;
+
+ tmp_timeout = timeout_locked;
hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
+ if (tmp_timeout)
+ hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * 1000);
+ else
+ hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
hdev->stop_on_err = true;
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
@@ -321,6 +350,18 @@ static int fixup_device_params(struct hl_device *hdev)
/* Enable only after the initialization of the device */
hdev->disabled = true;
+ if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) &&
+ (hdev->fw_components & ~FW_TYPE_PREBOOT_CPU)) {
+ pr_err("Preboot must be set along with other components");
+ return -EINVAL;
+ }
+
+ /* If CPU queues not enabled, no way to do heartbeat */
+ if (!hdev->cpu_queues_enable)
+ hdev->heartbeat = 0;
+
+ fixup_device_params_per_asic(hdev);
+
return 0;
}
@@ -345,7 +386,7 @@ static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
if (!hdev)
return -ENOMEM;
- /* can be NULL in case of simulator device */
+ /* Will be NULL in case of simulator device */
hdev->pdev = pdev;
/* Assign status description string */
@@ -355,6 +396,9 @@ static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], "needs reset", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
"in device creation", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE],
+ "in reset after device release", HL_STR_MAX);
+
/* First, we must find out which ASIC are we handling. This is needed
* to configure the behavior of the driver (kernel parameters)
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index c7864d6bb0a1..6a30bd98ab5e 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -47,7 +47,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
u32 size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 sram_kmd_size, dram_kmd_size;
+ u64 sram_kmd_size, dram_kmd_size, dram_available_size;
if ((!size) || (!out))
return -EINVAL;
@@ -62,19 +62,22 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.dram_base_address =
hdev->mmu_enable && prop->dram_supports_virtual_memory ?
prop->dmmu.start_addr : prop->dram_user_base_address;
- hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
+ hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF;
+ hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask;
+
hw_ip.sram_size = prop->sram_size - sram_kmd_size;
- if (hdev->mmu_enable)
- hw_ip.dram_size =
- DIV_ROUND_DOWN_ULL(prop->dram_size - dram_kmd_size,
- prop->dram_page_size) *
- prop->dram_page_size;
+ dram_available_size = prop->dram_size - dram_kmd_size;
+
+ if (hdev->mmu_enable == MMU_EN_ALL)
+ hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size,
+ prop->dram_page_size) * prop->dram_page_size;
else
- hw_ip.dram_size = prop->dram_size - dram_kmd_size;
+ hw_ip.dram_size = dram_available_size;
if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
+
hw_ip.dram_page_size = prop->dram_page_size;
hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
hw_ip.num_of_events = prop->num_of_events;
@@ -93,8 +96,12 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
- hw_ip.first_available_interrupt_id = prop->first_available_user_msix_interrupt;
+ hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask;
+ hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
+ hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
+
+ hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
hw_ip.server_type = prop->server_type;
return copy_to_user(out, &hw_ip,
@@ -287,7 +294,7 @@ static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
return -EINVAL;
reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
- reset_count.soft_reset_cnt = hdev->reset_info.soft_reset_cnt;
+ reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt;
return copy_to_user(out, &reset_count,
min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
@@ -610,6 +617,28 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
+static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ struct hl_info_undefined_opcode_event info = {0};
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ info.timestamp = ktime_to_ns(hdev->last_error.undef_opcode.timestamp);
+ info.engine_id = hdev->last_error.undef_opcode.engine_id;
+ info.cq_addr = hdev->last_error.undef_opcode.cq_addr;
+ info.cq_size = hdev->last_error.undef_opcode.cq_size;
+ info.stream_id = hdev->last_error.undef_opcode.stream_id;
+ info.cb_addr_streams_len = hdev->last_error.undef_opcode.cb_addr_streams_len;
+ memcpy(info.cb_addr_streams, hdev->last_error.undef_opcode.cb_addr_streams,
+ sizeof(info.cb_addr_streams));
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
@@ -626,7 +655,7 @@ static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_
* For this reason for all ASICs that not support multiple page size the function will
* return an empty bitmask indicating that multiple page sizes is not supported.
*/
- hdev->asic_funcs->get_valid_dram_page_orders(&info);
+ info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask;
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -718,6 +747,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_RAZWI_EVENT:
return razwi_info(hpriv, args);
+ case HL_INFO_UNDEFINED_OPCODE_EVENT:
+ return undefined_opcode_info(hpriv, args);
+
case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
return dev_mem_alloc_page_sizes_info(hpriv, args);
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 6103e479e855..3f15ab9d827f 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -308,6 +308,7 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
+ job->user_cb_size,
cq_addr,
le32_to_cpu(cq_pkt.data),
q->msi_vec,
@@ -695,6 +696,16 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
goto unroll_cq_resv;
}
+ rc = hdev->asic_funcs->pre_schedule_cs(cs);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed in pre-submission operations of CS %d.%llu\n",
+ ctx->asid, cs->sequence);
+ goto unroll_cq_resv;
+ }
+
+ hdev->shadow_cs_queue[cs->sequence &
+ (hdev->asic_prop.max_pending_cs - 1)] = cs;
if (cs->encaps_signals && cs->staged_first) {
rc = encaps_sig_first_staged_cs_handler(hdev, cs);
@@ -806,13 +817,9 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
int rc;
if (is_cpu_queue)
- p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- &q->bus_address);
+ p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address);
else
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- &q->bus_address,
+ p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
@@ -838,14 +845,10 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
free_queue:
if (is_cpu_queue)
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- q->kernel_address);
+ hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
else
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- q->kernel_address,
- q->bus_address);
+ hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
+ q->bus_address);
return rc;
}
@@ -884,10 +887,8 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
void *p;
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- &q->bus_address,
- GFP_KERNEL | __GFP_ZERO);
+ p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
+ GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
@@ -1060,14 +1061,10 @@ static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
kfree(q->shadow_queue);
if (q->queue_type == QUEUE_TYPE_CPU)
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- q->kernel_address);
+ hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
else
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- q->kernel_address,
- q->bus_address);
+ hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
+ q->bus_address);
}
int hl_hw_queues_create(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index 8500e15ef743..94d537fd4fde 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -67,6 +67,56 @@ static void irq_handle_eqe(struct work_struct *work)
}
/**
+ * job_finish - queue job finish work
+ *
+ * @hdev: pointer to device structure
+ * @cs_seq: command submission sequence
+ * @cq: completion queue
+ *
+ */
+static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq)
+{
+ struct hl_hw_queue *queue;
+ struct hl_cs_job *job;
+
+ queue = &hdev->kernel_queues[cq->hw_queue_id];
+ job = queue->shadow_queue[hl_pi_2_offset(cs_seq)];
+ queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
+
+ atomic_inc(&queue->ci);
+}
+
+/**
+ * cs_finish - queue all cs jobs finish work
+ *
+ * @hdev: pointer to device structure
+ * @cs_seq: command submission sequence
+ *
+ */
+static void cs_finish(struct hl_device *hdev, u16 cs_seq)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_hw_queue *queue;
+ struct hl_cs *cs;
+ struct hl_cs_job *job;
+
+ cs = hdev->shadow_cs_queue[cs_seq & (prop->max_pending_cs - 1)];
+ if (!cs) {
+ dev_warn(hdev->dev,
+ "No pointer to CS in shadow array at index %d\n",
+ cs_seq);
+ return;
+ }
+
+ list_for_each_entry(job, &cs->job_list, cs_node) {
+ queue = &hdev->kernel_queues[job->hw_queue_id];
+ atomic_inc(&queue->ci);
+ }
+
+ queue_work(hdev->cs_cmplt_wq, &cs->finish_work);
+}
+
+/**
* hl_irq_handler_cq - irq handler for completion queue
*
* @irq: irq number
@@ -77,9 +127,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
{
struct hl_cq *cq = arg;
struct hl_device *hdev = cq->hdev;
- struct hl_hw_queue *queue;
- struct hl_cs_job *job;
- bool shadow_index_valid;
+ bool shadow_index_valid, entry_ready;
u16 shadow_index;
struct hl_cq_entry *cq_entry, *cq_base;
@@ -93,37 +141,41 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
cq_base = cq->kernel_address;
while (1) {
- bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
- CQ_ENTRY_READY_MASK)
- >> CQ_ENTRY_READY_SHIFT);
+ cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
+ entry_ready = !!FIELD_GET(CQ_ENTRY_READY_MASK,
+ le32_to_cpu(cq_entry->data));
if (!entry_ready)
break;
- cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
-
/* Make sure we read CQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
- shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
- CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
- >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
-
- shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
- CQ_ENTRY_SHADOW_INDEX_MASK)
- >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
+ shadow_index_valid =
+ !!FIELD_GET(CQ_ENTRY_SHADOW_INDEX_VALID_MASK,
+ le32_to_cpu(cq_entry->data));
- queue = &hdev->kernel_queues[cq->hw_queue_id];
+ shadow_index = FIELD_GET(CQ_ENTRY_SHADOW_INDEX_MASK,
+ le32_to_cpu(cq_entry->data));
- if ((shadow_index_valid) && (!hdev->disabled)) {
- job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
- queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
+ /*
+ * CQ interrupt handler has 2 modes of operation:
+ * 1. Interrupt per CS completion: (Single CQ for all queues)
+ * CQ entry represents a completed CS
+ *
+ * 2. Interrupt per CS job completion in queue: (CQ per queue)
+ * CQ entry represents a completed job in a certain queue
+ */
+ if (shadow_index_valid && !hdev->disabled) {
+ if (hdev->asic_prop.completion_mode ==
+ HL_COMPLETION_MODE_CS)
+ cs_finish(hdev, shadow_index);
+ else
+ job_finish(hdev, shadow_index, cq);
}
- atomic_inc(&queue->ci);
-
/* Clear CQ entry ready bit */
cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
~CQ_ENTRY_READY_MASK);
@@ -217,8 +269,7 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
return 0;
}
-static void handle_user_cq(struct hl_device *hdev,
- struct hl_user_interrupt *user_cq)
+static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interrupt *intr)
{
struct hl_user_pending_interrupt *pend, *temp_pend;
struct list_head *ts_reg_free_list_head = NULL;
@@ -240,8 +291,8 @@ static void handle_user_cq(struct hl_device *hdev,
if (!job)
return;
- spin_lock(&user_cq->wait_list_lock);
- list_for_each_entry_safe(pend, temp_pend, &user_cq->wait_list_head, wait_list_node) {
+ spin_lock(&intr->wait_list_lock);
+ list_for_each_entry_safe(pend, temp_pend, &intr->wait_list_head, wait_list_node) {
if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
!pend->cq_kernel_addr) {
if (pend->ts_reg_info.buf) {
@@ -258,7 +309,7 @@ static void handle_user_cq(struct hl_device *hdev,
}
}
}
- spin_unlock(&user_cq->wait_list_lock);
+ spin_unlock(&intr->wait_list_lock);
if (ts_reg_free_list_head) {
INIT_WORK(&job->free_obj, hl_ts_free_objects);
@@ -271,22 +322,24 @@ static void handle_user_cq(struct hl_device *hdev,
}
/**
- * hl_irq_handler_user_cq - irq handler for user completion queues
+ * hl_irq_handler_user_interrupt - irq handler for user interrupts
*
* @irq: irq number
* @arg: pointer to user interrupt structure
*
*/
-irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
+irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
{
- struct hl_user_interrupt *user_cq = arg;
- struct hl_device *hdev = user_cq->hdev;
+ struct hl_user_interrupt *user_int = arg;
+ struct hl_device *hdev = user_int->hdev;
- /* Handle user cq interrupts registered on all interrupts */
- handle_user_cq(hdev, &hdev->common_user_interrupt);
+ if (user_int->is_decoder)
+ handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
+ else
+ handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
- /* Handle user cq interrupts registered on this specific interrupt */
- handle_user_cq(hdev, user_cq);
+ /* Handle user cq or decoder interrupts registered on this specific irq */
+ handle_user_interrupt(hdev, user_int);
return IRQ_HANDLED;
}
@@ -304,9 +357,7 @@ irqreturn_t hl_irq_handler_default(int irq, void *arg)
struct hl_device *hdev = user_interrupt->hdev;
u32 interrupt_id = user_interrupt->interrupt_id;
- dev_err(hdev->dev,
- "got invalid user interrupt %u",
- interrupt_id);
+ dev_err(hdev->dev, "got invalid user interrupt %u", interrupt_id);
return IRQ_HANDLED;
}
@@ -360,7 +411,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
*/
dma_rmb();
- if (hdev->disabled && !hdev->reset_info.is_in_soft_reset) {
+ if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
dev_warn(hdev->dev, "Device disabled but received an EQ event\n");
goto skip_irq;
}
@@ -390,11 +441,26 @@ skip_irq:
}
/**
+ * hl_irq_handler_dec_abnrm - Decoder error interrupt handler
+ * @irq: IRQ number
+ * @arg: pointer to decoder structure.
+ */
+irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg)
+{
+ struct hl_dec *dec = arg;
+
+ schedule_work(&dec->completion_abnrm_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
* hl_cq_init - main initialization function for an cq object
*
* @hdev: pointer to device structure
* @q: pointer to cq structure
* @hw_queue_id: The H/W queue ID this completion queue belongs to
+ * HL_INVALID_QUEUE if cq is not attached to any specific queue
*
* Allocate dma-able memory for the completion queue and initialize fields
* Returns 0 on success
@@ -403,8 +469,8 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
{
void *p;
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
- &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ p = hl_asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, &q->bus_address,
+ GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
@@ -429,9 +495,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
*/
void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
{
- hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
- q->kernel_address,
- q->bus_address);
+ hl_asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, q->kernel_address, q->bus_address);
}
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
@@ -464,9 +528,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
{
void *p;
- p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- HL_EQ_SIZE_IN_BYTES,
- &q->bus_address);
+ p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_EQ_SIZE_IN_BYTES, &q->bus_address);
if (!p)
return -ENOMEM;
@@ -490,9 +552,7 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
{
flush_workqueue(hdev->eq_wq);
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- HL_EQ_SIZE_IN_BYTES,
- q->kernel_address);
+ hl_cpu_accessible_dma_pool_free(hdev, HL_EQ_SIZE_IN_BYTES, q->kernel_address);
}
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 663dd7e589d4..61bc1bfe984a 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2021 HabanaLabs, Ltd.
+ * Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -27,7 +27,7 @@ static int allocate_timestamps_buffers(struct hl_fpriv *hpriv,
static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 psize;
+ u64 psize;
/*
* for ASIC that supports setting the allocation page size by user we will address
@@ -36,8 +36,8 @@ static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u
if (prop->supports_user_set_page_size && args->alloc.page_size) {
psize = args->alloc.page_size;
- if (!hdev->asic_funcs->is_valid_dram_page_size(psize)) {
- dev_err(hdev->dev, "user page size (%#x) is not valid\n", psize);
+ if (!is_power_of_2(psize)) {
+ dev_err(hdev->dev, "user page size (%#llx) is not power of 2\n", psize);
return -EINVAL;
}
} else {
@@ -305,33 +305,20 @@ static void dram_pg_pool_do_release(struct kref *ref)
*
* This function does the following:
* - For DRAM memory only
- * - iterate over the pack, scrub and free each physical block structure by
+ * - iterate over the pack, free each physical block structure by
* returning it to the general pool.
- * In case of error during scrubbing, initiate hard reset.
- * Once hard reset is triggered, scrubbing is bypassed while freeing the
- * memory continues.
* - Free the hl_vm_phys_pg_pack structure.
*/
-static int free_phys_pg_pack(struct hl_device *hdev,
+static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
u64 i;
- int rc = 0;
if (phys_pg_pack->created_from_userptr)
goto end;
if (phys_pg_pack->contiguous) {
- if (hdev->memory_scrub && !hdev->disabled) {
- rc = hdev->asic_funcs->scrub_device_mem(hdev,
- phys_pg_pack->pages[0],
- phys_pg_pack->total_size);
- if (rc)
- dev_err(hdev->dev,
- "Failed to scrub contiguous device memory\n");
- }
-
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
phys_pg_pack->total_size);
@@ -340,15 +327,6 @@ static int free_phys_pg_pack(struct hl_device *hdev,
dram_pg_pool_do_release);
} else {
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
- if (hdev->memory_scrub && !hdev->disabled && rc == 0) {
- rc = hdev->asic_funcs->scrub_device_mem(
- hdev,
- phys_pg_pack->pages[i],
- phys_pg_pack->page_size);
- if (rc)
- dev_err(hdev->dev,
- "Failed to scrub device memory\n");
- }
gen_pool_free(vm->dram_pg_pool,
phys_pg_pack->pages[i],
phys_pg_pack->page_size);
@@ -357,14 +335,11 @@ static int free_phys_pg_pack(struct hl_device *hdev,
}
}
- if (rc && !hdev->disabled)
- hl_device_reset(hdev, HL_DRV_RESET_HARD);
-
end:
kvfree(phys_pg_pack->pages);
kfree(phys_pg_pack);
- return rc;
+ return;
}
/**
@@ -384,40 +359,35 @@ static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
spin_lock(&vm->idr_lock);
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
- if (phys_pg_pack) {
- if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
- dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
- handle);
- spin_unlock(&vm->idr_lock);
- return -EINVAL;
- }
-
- if (phys_pg_pack->exporting_cnt) {
- dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
- spin_unlock(&vm->idr_lock);
- return -EINVAL;
- }
-
- /*
- * must remove from idr before the freeing of the physical
- * pages as the refcount of the pool is also the trigger of the
- * idr destroy
- */
- idr_remove(&vm->phys_pg_pack_handles, handle);
+ if (!phys_pg_pack) {
spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev, "free device memory failed, no match for handle %u\n", handle);
+ return -EINVAL;
+ }
- atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
- atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
+ if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
+ spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev, "handle %u is mapped, cannot free\n", handle);
+ return -EINVAL;
+ }
- return free_phys_pg_pack(hdev, phys_pg_pack);
- } else {
+ if (phys_pg_pack->exporting_cnt) {
spin_unlock(&vm->idr_lock);
- dev_err(hdev->dev,
- "free device memory failed, no match for handle %u\n",
- handle);
+ dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
return -EINVAL;
}
+ /* must remove from idr before the freeing of the physical pages as the refcount of the pool
+ * is also the trigger of the idr destroy
+ */
+ idr_remove(&vm->phys_pg_pack_handles, handle);
+ spin_unlock(&vm->idr_lock);
+
+ atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
+ atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
+
+ free_phys_pg_pack(hdev, phys_pg_pack);
+
return 0;
}
@@ -657,7 +627,7 @@ static u64 get_va_block(struct hl_device *hdev,
/* Check if we need to ignore hint address */
if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
- (!is_align_pow_2 && is_hint_dram_addr &&
+ (!is_align_pow_2 && is_hint_dram_addr &&
do_div(tmp_hint_addr, va_range->page_size))) {
if (force_hint) {
@@ -1245,16 +1215,16 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
+ mutex_unlock(&ctx->mmu_lock);
goto map_err;
}
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
+ mutex_unlock(&ctx->mmu_lock);
if (rc)
goto map_err;
- mutex_unlock(&ctx->mmu_lock);
-
/*
* prefetch is done upon user's request. it is performed in WQ as and so can
* be outside the MMU lock. the operation itself is already protected by the mmu lock
@@ -1278,13 +1248,11 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
*device_addr = ret_vaddr;
if (is_userptr)
- rc = free_phys_pg_pack(hdev, phys_pg_pack);
+ free_phys_pg_pack(hdev, phys_pg_pack);
return rc;
map_err:
- mutex_unlock(&ctx->mmu_lock);
-
if (add_va_block(hdev, va_range, ret_vaddr,
ret_vaddr + phys_pg_pack->total_size - 1))
dev_warn(hdev->dev,
@@ -2509,17 +2477,20 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
* va_range_init() - initialize virtual addresses range.
* @hdev: pointer to the habanalabs device structure.
* @va_ranges: pointer to va_ranges array.
- * @start: range start address.
- * @end: range end address.
+ * @range_type: virtual address range type.
+ * @start: range start address, inclusive.
+ * @end: range end address, inclusive.
* @page_size: page size for this va_range.
*
* This function does the following:
* - Initializes the virtual addresses list of the given range with the given
* addresses.
*/
-static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
- u64 start, u64 end, u32 page_size)
+static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
+ enum hl_va_range_type range_type, u64 start,
+ u64 end, u32 page_size)
{
+ struct hl_va_range *va_range = va_ranges[range_type];
int rc;
INIT_LIST_HEAD(&va_range->list);
@@ -2637,7 +2608,7 @@ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
- rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST],
+ rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_HOST,
host_range_start, host_range_end, host_page_size);
if (rc) {
dev_err(hdev->dev, "failed to init host vm range\n");
@@ -2648,7 +2619,7 @@ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
rc = va_range_init(hdev,
- ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE],
+ ctx->va_range, HL_VA_RANGE_TYPE_HOST_HUGE,
host_huge_range_start, host_huge_range_end,
host_huge_page_size);
if (rc) {
@@ -2664,7 +2635,7 @@ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
- rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM],
+ rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_DRAM,
dram_range_start, dram_range_end, dram_page_size);
if (rc) {
dev_err(hdev->dev, "failed to init dram vm range\n");
diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/misc/habanalabs/common/memory_mgr.c
index ea5f2bd31b0a..56df962d2f3c 100644
--- a/drivers/misc/habanalabs/common/memory_mgr.c
+++ b/drivers/misc/habanalabs/common/memory_mgr.c
@@ -135,7 +135,7 @@ int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
}
/**
- * @hl_mmap_mem_buf_alloc - allocate a new mappable buffer
+ * hl_mmap_mem_buf_alloc - allocate a new mappable buffer
*
* @mmg: parent unifed memory manager
* @behavior: behavior object describing this buffer polymorphic behavior
diff --git a/drivers/misc/habanalabs/common/mmu/Makefile b/drivers/misc/habanalabs/common/mmu/Makefile
index d852c3874658..1806c524e04a 100644
--- a/drivers/misc/habanalabs/common/mmu/Makefile
+++ b/drivers/misc/habanalabs/common/mmu/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o
+HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o \
+ common/mmu/mmu_v2_hr.o
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index f3734718d94f..60740de47b34 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2020 HabanaLabs, Ltd.
+ * Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -51,8 +51,17 @@ int hl_mmu_init(struct hl_device *hdev)
return rc;
}
- if (hdev->mmu_func[MMU_HR_PGT].init != NULL)
+ if (hdev->mmu_func[MMU_HR_PGT].init != NULL) {
rc = hdev->mmu_func[MMU_HR_PGT].init(hdev);
+ if (rc)
+ goto fini_dr_mmu;
+ }
+
+ return 0;
+
+fini_dr_mmu:
+ if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
+ hdev->mmu_func[MMU_DR_PGT].fini(hdev);
return rc;
}
@@ -103,8 +112,17 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
return rc;
}
- if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL)
+ if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) {
rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx);
+ if (rc)
+ goto fini_dr_ctx;
+ }
+
+ return 0;
+
+fini_dr_ctx:
+ if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
+ hdev->mmu_func[MMU_DR_PGT].fini(hdev);
return rc;
}
@@ -607,6 +625,11 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
case ASIC_GAUDI_SEC:
hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
break;
+ case ASIC_GAUDI2:
+ case ASIC_GAUDI2_SEC:
+ /* MMUs in Gaudi2 are always host resident */
+ hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -745,3 +768,470 @@ u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *m
return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
+static void mmu_dma_mem_free_from_chunk(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk,
+ void *data)
+{
+ struct hl_device *hdev = (struct hl_device *)data;
+
+ hl_asic_dma_free_coherent(hdev, (chunk->end_addr - chunk->start_addr) + 1,
+ (void *)chunk->start_addr, chunk->phys_addr);
+}
+
+void hl_mmu_hr_flush(struct hl_ctx *ctx)
+{
+ /* a flush operation requires memory barrier */
+ mb();
+}
+
+/**
+ * hl_mmu_hr_pool_destroy() - destroy genpool
+ * @hdev: habanalabs device structure.
+ * @hr_priv: MMU HR private data.
+ * @hop_table_size: HOP table size.
+ *
+ * This function does the following:
+ * - free entries allocated for shadow HOP0
+ * - free pool chunks
+ * - free pool
+ */
+static void hl_mmu_hr_pool_destroy(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gen_pool **pool = &hr_priv->mmu_pgt_pool;
+ struct pgt_info *hop0_pgt;
+ int asid;
+
+ if (ZERO_OR_NULL_PTR(*pool))
+ return;
+
+ /* Free the Fixed allocation of HOPs0 */
+ if (hr_priv->mmu_asid_hop0) {
+ for (asid = 0 ; asid < prop->max_asid ; asid++) {
+ hop0_pgt = &hr_priv->mmu_asid_hop0[asid];
+ if (ZERO_OR_NULL_PTR(hop0_pgt->virt_addr))
+ continue;
+
+ gen_pool_free(*pool, (uintptr_t) hop0_pgt->virt_addr, hop_table_size);
+ }
+ }
+
+ gen_pool_for_each_chunk(*pool, mmu_dma_mem_free_from_chunk, hdev);
+ gen_pool_destroy(*pool);
+
+ /* Make sure that if we arrive here again without init was called we
+ * won't cause kernel panic. This can happen for example if we fail
+ * during hard reset code at certain points
+ */
+ *pool = NULL;
+}
+
+/**
+ * hl_mmu_hr_init() - initialize the MMU module.
+ * @hdev: habanalabs device structure.
+ * @hr_priv: MMU HR private data.
+ * @hop_table_size: HOP table size.
+ * @pgt_size: memory size allocated for the page table
+ *
+ * @return 0 on success otherwise non-zero error code
+ *
+ * This function does the following:
+ * - Create a pool of pages for pgt_infos.
+ * - Create a shadow table for pgt
+ */
+int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size,
+ u64 pgt_size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ size_t pool_chunk_size = SZ_4M;
+ struct pgt_info *hop0_pgt;
+ dma_addr_t dma_addr;
+ u64 virt_addr;
+ int i, rc;
+
+ /*
+ * we set alloc size as PAGE_SIZE (sine dma_alloc_coherent allocation order/size is
+ * PAGE_SHIFT/PAGE_SIZE) in order to be able to control the allocations alignment.
+ * This way we can call "DMA alloc align" according to dma_alloc granularity and supply
+ * allocations with higher-order alignment restrictions
+ */
+ hr_priv->mmu_pgt_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (ZERO_OR_NULL_PTR(hr_priv->mmu_pgt_pool)) {
+ dev_err(hdev->dev, "Failed to create hr page pool\n");
+ return -ENOMEM;
+ }
+
+ hr_priv->mmu_asid_hop0 = kvcalloc(prop->max_asid, sizeof(struct pgt_info), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
+ dev_err(hdev->dev, "Failed to allocate hr-mmu hop0 table\n");
+ rc = -ENOMEM;
+ goto destroy_mmu_pgt_pool;
+ }
+
+ for (i = 0 ; i < pgt_size ; i += pool_chunk_size) {
+ virt_addr = (uintptr_t) hl_asic_dma_alloc_coherent(hdev, pool_chunk_size,
+ &dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (ZERO_OR_NULL_PTR(virt_addr)) {
+ dev_err(hdev->dev,
+ "Failed to allocate memory for host-resident page pool\n");
+ rc = -ENOMEM;
+ goto destroy_mmu_pgt_pool;
+ }
+
+ rc = gen_pool_add_virt(hr_priv->mmu_pgt_pool, virt_addr, (phys_addr_t) dma_addr,
+ pool_chunk_size, -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to fill host-resident page pool\n");
+ goto destroy_mmu_pgt_pool;
+ }
+ }
+
+ for (i = 0 ; i < prop->max_asid ; i++) {
+ hop0_pgt = &hr_priv->mmu_asid_hop0[i];
+ hop0_pgt->virt_addr = (uintptr_t)
+ gen_pool_dma_zalloc_align(hr_priv->mmu_pgt_pool,
+ hop_table_size,
+ (dma_addr_t *) &hop0_pgt->phys_addr,
+ hop_table_size);
+ if (!hop0_pgt->virt_addr) {
+ dev_err(hdev->dev, "Failed to allocate HOP from pgt pool\n");
+ rc = -ENOMEM;
+ goto destroy_mmu_pgt_pool;
+ }
+ }
+
+ /* MMU H/W init will be done in device hw_init() */
+
+ return 0;
+
+destroy_mmu_pgt_pool:
+ hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
+ if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0))
+ kvfree(hr_priv->mmu_asid_hop0);
+
+ return rc;
+}
+
+/**
+ * hl_mmu_hr_fini() - release the MMU module.
+ * @hdev: habanalabs device structure.
+ * @hr_priv: MMU host resident private info.
+ * @hop_table_size: HOP table size
+ *
+ * This function does the following:
+ * - Disable MMU in H/W.
+ * - Free the pgt_infos pool.
+ *
+ * All contexts should be freed before calling this function.
+ */
+void hl_mmu_hr_fini(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size)
+{
+ /* MMU H/W fini was already done in device hw_fini() */
+
+ hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
+
+ if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
+ kvfree(hr_priv->mmu_asid_hop0);
+
+ /* Make sure that if we arrive here again without init was
+ * called we won't cause kernel panic. This can happen for
+ * example if we fail during hard reset code at certain points
+ */
+ hr_priv->mmu_asid_hop0 = NULL;
+ }
+}
+
+/**
+ * hl_mmu_hr_free_hop_remove_pgt() - free HOP and remove PGT from hash
+ * @pgt_info: page table info structure.
+ * @hr_priv: MMU HR private data.
+ * @hop_table_size: HOP table size.
+ */
+void hl_mmu_hr_free_hop_remove_pgt(struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size)
+{
+ gen_pool_free(hr_priv->mmu_pgt_pool, pgt_info->virt_addr, hop_table_size);
+ hash_del(&pgt_info->node);
+ kfree(pgt_info);
+}
+
+/**
+ * hl_mmu_hr_pte_phys_to_virt() - translate PTE phys addr to virt addr
+ * @ctx: pointer to the context structure
+ * @pgt: pgt_info for the HOP hosting the PTE
+ * @phys_pte_addr: phys address of the PTE
+ * @hop_table_size: HOP table size
+ *
+ * @return PTE virtual address
+ *
+ * The function use the pgt_info to get HOP base virt addr and obtain the PTE's virt addr
+ * by adding the PTE offset.
+ */
+u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt,
+ u64 phys_pte_addr, u32 hop_table_size)
+{
+ u64 page_mask = (hop_table_size - 1);
+ u64 pte_offset = phys_pte_addr & page_mask;
+
+ return pgt->virt_addr + pte_offset;
+}
+
+/**
+ * hl_mmu_hr_write_pte() - write HR PTE
+ * @ctx: pointer to the context structure
+ * @pgt_info: HOP's page table info structure
+ * @phys_pte_addr: phys PTE address
+ * @val: raw PTE data
+ * @hop_table_size: HOP table size
+ */
+void hl_mmu_hr_write_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
+ u64 val, u32 hop_table_size)
+{
+ /*
+ * The value to write is the phys address of the next hop +
+ * flags at the 12 LSBs.
+ */
+ u64 virt_addr = hl_mmu_hr_pte_phys_to_virt(ctx, pgt_info, phys_pte_addr, hop_table_size);
+
+ *((u64 *) (uintptr_t) virt_addr) = val;
+}
+
+/**
+ * hl_mmu_hr_clear_pte() - clear HR PTE
+ * @ctx: pointer to the context structure
+ * @pgt_info: HOP's page table info structure
+ * @phys_pte_addr: phys PTE address
+ * @hop_table_size: HOP table size
+ */
+void hl_mmu_hr_clear_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
+ u32 hop_table_size)
+{
+ /* no need to transform the value to physical address */
+ hl_mmu_hr_write_pte(ctx, pgt_info, phys_pte_addr, 0, hop_table_size);
+}
+
+/**
+ * hl_mmu_hr_put_pte() - put HR PTE and remove it if necessary (no more PTEs)
+ * @ctx: pointer to the context structure
+ * @pgt_info: HOP's page table info structure
+ * @hr_priv: HR MMU private info
+ * @hop_table_size: HOP table size
+ *
+ * @return number of PTEs still in the HOP
+ */
+int hl_mmu_hr_put_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info,
+ struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size)
+{
+ int num_of_ptes_left;
+
+ pgt_info->num_of_ptes--;
+
+ /*
+ * Need to save the number of ptes left because free_hop might free
+ * the pgt_info
+ */
+ num_of_ptes_left = pgt_info->num_of_ptes;
+ if (!num_of_ptes_left)
+ hl_mmu_hr_free_hop_remove_pgt(pgt_info, hr_priv, hop_table_size);
+
+ return num_of_ptes_left;
+}
+
+/**
+ * hl_mmu_hr_get_pte() - increase PGT PTE count
+ * @ctx: pointer to the context structure
+ * @hr_func: host resident functions
+ * @phys_hop_addr: HOP phys address
+ */
+void hl_mmu_hr_get_pte(struct hl_ctx *ctx, struct hl_hr_mmu_funcs *hr_func, u64 phys_hop_addr)
+{
+ hr_func->get_pgt_info(ctx, phys_hop_addr)->num_of_ptes++;
+}
+
+/**
+ * hl_mmu_hr_get_next_hop_pgt_info() - get pgt_info structure for the next HOP
+ * @ctx: pointer to the context structure.
+ * @hr_func: host resident functions.
+ * @curr_pte: current PTE value.
+ *
+ * @return pgt_info structure on success, otherwise NULL.
+ */
+struct pgt_info *hl_mmu_hr_get_next_hop_pgt_info(struct hl_ctx *ctx,
+ struct hl_hr_mmu_funcs *hr_func,
+ u64 curr_pte)
+{
+ u64 next_hop_phys_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+
+ if (next_hop_phys_addr == ULLONG_MAX)
+ return NULL;
+
+ return hr_func->get_pgt_info(ctx, next_hop_phys_addr);
+}
+
+/**
+ * hl_mmu_hr_alloc_hop() - allocate HOP
+ * @ctx: pointer to the context structure.
+ * @hr_priv: host resident private info structure.
+ * @hr_func: host resident functions.
+ * @mmu_prop: MMU properties.
+ *
+ * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
+ */
+struct pgt_info *hl_mmu_hr_alloc_hop(struct hl_ctx *ctx, struct hl_mmu_hr_priv *hr_priv,
+ struct hl_hr_mmu_funcs *hr_func,
+ struct hl_mmu_properties *mmu_prop)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct pgt_info *pgt_info;
+ dma_addr_t phys_addr;
+ void *virt_addr;
+ int i, retry = 1;
+
+ pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
+ if (!pgt_info)
+ return NULL;
+
+ for (i = 0; i <= retry; i++) {
+ virt_addr = gen_pool_dma_zalloc_align(hr_priv->mmu_pgt_pool,
+ mmu_prop->hop_table_size,
+ &phys_addr,
+ mmu_prop->hop_table_size);
+ if (virt_addr)
+ break;
+
+ /* No memory in pool - get some and try again */
+ virt_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &phys_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (ZERO_OR_NULL_PTR(virt_addr))
+ break;
+
+ if (gen_pool_add_virt(hr_priv->mmu_pgt_pool, (unsigned long)virt_addr,
+ phys_addr, SZ_2M, -1)) {
+ hl_asic_dma_free_coherent(hdev, SZ_2M, virt_addr, phys_addr);
+ virt_addr = NULL;
+ break;
+ }
+ }
+
+ if (ZERO_OR_NULL_PTR(virt_addr)) {
+ dev_err(hdev->dev, "failed to allocate page\n");
+ goto pool_alloc_err;
+ }
+
+ pgt_info->phys_addr = phys_addr;
+ pgt_info->shadow_addr = (unsigned long) NULL;
+ pgt_info->virt_addr = (unsigned long)virt_addr;
+ pgt_info->ctx = ctx;
+ pgt_info->num_of_ptes = 0;
+ hr_func->add_pgt_info(ctx, pgt_info, phys_addr);
+
+ return pgt_info;
+
+pool_alloc_err:
+ kfree(pgt_info);
+
+ return NULL;
+}
+
+/**
+ * hl_mmu_hr_get_alloc_next_hop() - get the next HOP, allocate it if it does not exist
+ * @ctx: pointer to the context structure.
+ * @hr_priv: host resident private info structure.
+ * @hr_func: host resident functions.
+ * @mmu_prop: MMU properties.
+ * @curr_pte: current PTE value.
+ * @is_new_hop: set to true if HOP is new (caller responsibility to set it to false).
+ *
+ * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
+ */
+struct pgt_info *hl_mmu_hr_get_alloc_next_hop(struct hl_ctx *ctx,
+ struct hl_mmu_hr_priv *hr_priv,
+ struct hl_hr_mmu_funcs *hr_func,
+ struct hl_mmu_properties *mmu_prop,
+ u64 curr_pte, bool *is_new_hop)
+{
+ u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+
+ if (hop_addr != ULLONG_MAX)
+ return hr_func->get_pgt_info(ctx, hop_addr);
+
+ *is_new_hop = true;
+ return hl_mmu_hr_alloc_hop(ctx, hr_priv, hr_func, mmu_prop);
+}
+
+/**
+ * hl_mmu_hr_get_tlb_info() - get the TLB info (info for a specific mapping)
+ * @ctx: pointer to the context structure.
+ * @virt_addr: the virt address for which to get info.
+ * @hops: HOPs info structure.
+ * @hr_func: host resident functions.
+ *
+ * @return 0 on success, otherwise non 0 error code..
+ */
+int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops,
+ struct hl_hr_mmu_funcs *hr_func)
+{
+ /* using 6 HOPs as this is the maximum number of HOPs */
+ struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_mmu_properties *mmu_prop;
+ int rc, i, used_hops;
+ bool is_huge;
+
+ rc = hr_func->get_tlb_mapping_params(hdev, &mmu_prop, hops, virt_addr, &is_huge);
+ if (rc)
+ return rc;
+
+ used_hops = mmu_prop->num_hops;
+
+ /* huge pages use one less hop */
+ if (is_huge)
+ used_hops--;
+
+ hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+
+ for (i = 0 ; i < used_hops ; i++) {
+ if (i == 0)
+ hops_pgt_info[i] = hr_func->get_hop0_pgt_info(ctx);
+ else
+ hops_pgt_info[i] = hl_mmu_hr_get_next_hop_pgt_info(ctx, hr_func,
+ hops->hop_info[i - 1].hop_pte_val);
+
+ if (!hops_pgt_info[i])
+ return -EFAULT;
+
+ hops->hop_info[i].hop_addr = hops_pgt_info[i]->phys_addr;
+ hops->hop_info[i].hop_pte_addr =
+ hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hops->hop_info[i].hop_addr,
+ hops->scrambled_vaddr);
+ hops->hop_info[i].hop_pte_val = *(u64 *) (uintptr_t)
+ hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
+ hops->hop_info[i].hop_pte_addr,
+ mmu_prop->hop_table_size);
+
+ if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
+ return -EFAULT;
+
+ if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
+ break;
+ }
+
+ /* if passed over all hops then no last hop was found */
+ if (i == mmu_prop->num_hops)
+ return -EFAULT;
+
+ if (hops->scrambled_vaddr != virt_addr)
+ hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr
+ (hdev, hops->hop_info[i].hop_pte_val);
+ else
+ hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val;
+
+ hops->used_hops = i + 1;
+
+ return 0;
+}
+
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index e2d91a69acc2..8a40de4a4761 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -393,9 +393,8 @@ static int hl_mmu_v1_init(struct hl_device *hdev)
goto err_pool_add;
}
- hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
- prop->mmu_hop_table_size,
- GFP_KERNEL | __GFP_ZERO);
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, prop->mmu_hop_table_size,
+ GFP_KERNEL);
if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
rc = -ENOMEM;
goto err_pool_add;
@@ -412,7 +411,7 @@ err_pool_add:
}
/**
- * hl_mmu_fini() - release the MMU module.
+ * hl_mmu_v1_fini() - release the MMU module.
* @hdev: habanalabs device structure.
*
* This function does the following:
@@ -438,7 +437,7 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
}
/**
- * hl_mmu_ctx_init() - initialize a context for using the MMU module.
+ * hl_mmu_v1_ctx_init() - initialize a context for using the MMU module.
* @ctx: pointer to the context structure to initialize.
*
* Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c b/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c
new file mode 100644
index 000000000000..afe7ef964f82
--- /dev/null
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "../habanalabs.h"
+#include "../../include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/slab.h>
+
+static struct pgt_info *hl_mmu_v2_hr_get_pgt_info(struct hl_ctx *ctx, u64 phys_hop_addr)
+{
+ struct pgt_info *pgt_info = NULL;
+
+ hash_for_each_possible(ctx->hr_mmu_phys_hash, pgt_info, node,
+ (unsigned long) phys_hop_addr)
+ if (phys_hop_addr == pgt_info->phys_addr)
+ break;
+
+ return pgt_info;
+}
+
+static void hl_mmu_v2_hr_add_pgt_info(struct hl_ctx *ctx, struct pgt_info *pgt_info,
+ dma_addr_t phys_addr)
+{
+ hash_add(ctx->hr_mmu_phys_hash, &pgt_info->node, phys_addr);
+}
+
+static struct pgt_info *hl_mmu_v2_hr_get_hop0_pgt_info(struct hl_ctx *ctx)
+{
+ return &ctx->hdev->mmu_priv.hr.mmu_asid_hop0[ctx->asid];
+}
+
+/**
+ * hl_mmu_v2_hr_init() - initialize the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Create a pool of pages for pgt_infos.
+ * - Create a shadow table for pgt
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static inline int hl_mmu_v2_hr_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size,
+ prop->mmu_pgt_size);
+}
+
+/**
+ * hl_mmu_v2_hr_fini() - release the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Disable MMU in H/W.
+ * - Free the pgt_infos pool.
+ *
+ * All contexts should be freed before calling this function.
+ */
+static inline void hl_mmu_v2_hr_fini(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size);
+}
+
+/**
+ * hl_mmu_v2_hr_ctx_init() - initialize a context for using the MMU module.
+ * @ctx: pointer to the context structure to initialize.
+ *
+ * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
+ * page tables hops related to this context.
+ * Return: 0 on success, non-zero otherwise.
+ */
+static int hl_mmu_v2_hr_ctx_init(struct hl_ctx *ctx)
+{
+ hash_init(ctx->hr_mmu_phys_hash);
+ return 0;
+}
+
+/*
+ * hl_mmu_v2_hr_ctx_fini - disable a ctx from using the mmu module
+ *
+ * @ctx: pointer to the context structure
+ *
+ * This function does the following:
+ * - Free any pgts which were not freed yet
+ * - Free the mutex
+ * - Free DRAM default page mapping hops
+ */
+static void hl_mmu_v2_hr_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct pgt_info *pgt_info;
+ struct hlist_node *tmp;
+ int i;
+
+ if (!hash_empty(ctx->hr_mmu_phys_hash))
+ dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
+ ctx->asid);
+
+ hash_for_each_safe(ctx->hr_mmu_phys_hash, i, tmp, pgt_info, node) {
+ dev_err_ratelimited(hdev->dev,
+ "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
+ pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
+ hl_mmu_hr_free_hop_remove_pgt(pgt_info, &ctx->hdev->mmu_priv.hr,
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+ }
+}
+
+static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx,
+ u64 virt_addr, bool is_dram_addr)
+{
+ u64 curr_pte, scrambled_virt_addr, hop_pte_phys_addr[MMU_ARCH_6_HOPS] = { 0 };
+ struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop;
+ struct hl_mmu_properties *mmu_prop;
+ bool is_huge = false;
+ int i, hop_last;
+
+ prop = &hdev->asic_prop;
+
+ /* shifts and masks are the same in PMMU and HMMU, use one of them */
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+ hop_last = mmu_prop->num_hops - 1;
+
+ scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+ curr_pte = 0;
+
+ for (i = 0 ; i < mmu_prop->num_hops ; i++) {
+ /* we get HOP0 differently, it doesn't need curr_pte */
+ if (i == 0)
+ hops_pgt_info[i] = hl_mmu_v2_hr_get_hop0_pgt_info(ctx);
+ else
+ hops_pgt_info[i] = hl_mmu_hr_get_next_hop_pgt_info(ctx,
+ &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs, curr_pte);
+ if (!hops_pgt_info[i])
+ goto not_mapped;
+
+ hop_pte_phys_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hops_pgt_info[i]->phys_addr,
+ scrambled_virt_addr);
+ if (hop_pte_phys_addr[i] == U64_MAX)
+ return -EFAULT;
+
+ curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
+ hop_pte_phys_addr[i],
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+ if ((i < hop_last) && (curr_pte & mmu_prop->last_mask)) {
+ hop_last = i;
+ is_huge = true;
+ break;
+ }
+ }
+
+ if (is_dram_addr && !is_huge) {
+ dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
+ return -EFAULT;
+ }
+
+ if (!(curr_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+
+ for (i = hop_last ; i > 0 ; i--) {
+ hl_mmu_hr_clear_pte(ctx, hops_pgt_info[i], hop_pte_phys_addr[i],
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+ if (hl_mmu_hr_put_pte(ctx, hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
+ ctx->hdev->asic_prop.mmu_hop_table_size))
+ goto mapped;
+ }
+ hl_mmu_hr_clear_pte(ctx, hops_pgt_info[0], hop_pte_phys_addr[0],
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+mapped:
+ return 0;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", virt_addr);
+
+ return -EINVAL;
+}
+
+static int hl_mmu_v2_get_last_hop(struct hl_mmu_properties *mmu_prop, u32 page_size)
+{
+ int hop;
+
+ for (hop = (mmu_prop->num_hops - 1); hop; hop--) {
+ if (mmu_prop->hop_shifts[hop] == 0)
+ continue;
+
+ if (page_size <= (1 << mmu_prop->hop_shifts[hop]))
+ break;
+ }
+
+ return hop;
+}
+
+static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
+ u64 virt_addr, u64 phys_addr,
+ u32 page_size, bool is_dram_addr)
+{
+ u64 hop_pte_phys_addr[MMU_ARCH_6_HOPS] = { 0 },
+ curr_pte = 0, scrambled_virt_addr, scrambled_phys_addr;
+ struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
+ bool hop_new[MMU_ARCH_6_HOPS] = { false };
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
+ int i, hop_last, rc = -ENOMEM;
+
+ /*
+ * This mapping function can map a page or a huge page. For huge page
+ * there are only 4 hops rather than 5. Currently the DRAM allocation
+ * uses huge pages only but user memory could have been allocated with
+ * one of the two page sizes. Since this is a common code for all the
+ * three cases, we need this hugs page check.
+ */
+ if (is_dram_addr)
+ mmu_prop = &prop->dmmu;
+ else if (page_size == prop->pmmu_huge.page_size)
+ mmu_prop = &prop->pmmu_huge;
+ else
+ mmu_prop = &prop->pmmu;
+
+ hop_last = hl_mmu_v2_get_last_hop(mmu_prop, page_size);
+ if (hop_last <= 0) {
+ dev_err(ctx->hdev->dev, "Invalid last HOP %d\n", hop_last);
+ return -EFAULT;
+ }
+
+ scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+ scrambled_phys_addr = hdev->asic_funcs->scramble_addr(hdev, phys_addr);
+
+ for (i = 0 ; i <= hop_last ; i++) {
+
+ if (i == 0)
+ hops_pgt_info[i] = hl_mmu_v2_hr_get_hop0_pgt_info(ctx);
+ else
+ hops_pgt_info[i] = hl_mmu_hr_get_alloc_next_hop(ctx,
+ &ctx->hdev->mmu_priv.hr,
+ &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
+ mmu_prop, curr_pte, &hop_new[i]);
+ if (!hops_pgt_info[i])
+ goto err;
+
+ hop_pte_phys_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hops_pgt_info[i]->phys_addr,
+ scrambled_virt_addr);
+ curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
+ hop_pte_phys_addr[i],
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+ }
+
+ if (curr_pte & PAGE_PRESENT_MASK) {
+ dev_err(hdev->dev, "mapping already exists for virt_addr 0x%llx\n",
+ scrambled_virt_addr);
+
+ for (i = 0 ; i <= hop_last ; i++)
+ dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n",
+ i,
+ *(u64 *) (uintptr_t)
+ hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
+ hop_pte_phys_addr[i],
+ ctx->hdev->asic_prop.mmu_hop_table_size),
+ hop_pte_phys_addr[i]);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ curr_pte = (scrambled_phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
+ | PAGE_PRESENT_MASK;
+
+ /* Write the PTEs */
+ hl_mmu_hr_write_pte(ctx, hops_pgt_info[hop_last], hop_pte_phys_addr[hop_last], curr_pte,
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+ /* for each new hop, add its address to the table of previous-hop */
+ for (i = 1 ; i <= hop_last ; i++) {
+ if (hop_new[i]) {
+ curr_pte = (hops_pgt_info[i]->phys_addr & HOP_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ hl_mmu_hr_write_pte(ctx, hops_pgt_info[i - 1], hop_pte_phys_addr[i - 1],
+ curr_pte, ctx->hdev->asic_prop.mmu_hop_table_size);
+ if (i - 1)
+ hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
+ hops_pgt_info[i - 1]->phys_addr);
+ }
+ }
+
+ hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
+ hops_pgt_info[hop_last]->phys_addr);
+
+ return 0;
+
+err:
+ for (i = 1 ; i <= hop_last ; i++)
+ if (hop_new[i] && hops_pgt_info[i])
+ hl_mmu_hr_free_hop_remove_pgt(hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_v2_swap_out - marks all mapping of the given ctx as swapped out
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v2_hr_swap_out(struct hl_ctx *ctx)
+{
+
+}
+
+/*
+ * hl_mmu_v2_swap_in - marks all mapping of the given ctx as swapped in
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v2_hr_swap_in(struct hl_ctx *ctx)
+{
+
+}
+
+static int hl_mmu_v2_hr_get_tlb_mapping_params(struct hl_device *hdev,
+ struct hl_mmu_properties **mmu_prop,
+ struct hl_mmu_hop_info *hops,
+ u64 virt_addr, bool *is_huge)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr;
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+ is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
+ prop->pmmu.start_addr,
+ prop->pmmu.end_addr);
+ is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
+ prop->pmmu_huge.page_size,
+ prop->pmmu_huge.start_addr,
+ prop->pmmu_huge.end_addr);
+ if (is_dram_addr) {
+ *mmu_prop = &prop->dmmu;
+ *is_huge = true;
+ hops->range_type = HL_VA_RANGE_TYPE_DRAM;
+ } else if (is_pmmu_addr) {
+ *mmu_prop = &prop->pmmu;
+ *is_huge = false;
+ hops->range_type = HL_VA_RANGE_TYPE_HOST;
+ } else if (is_pmmu_h_addr) {
+ *mmu_prop = &prop->pmmu_huge;
+ *is_huge = true;
+ hops->range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hl_mmu_v2_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
+ struct hl_mmu_hop_info *hops)
+{
+ return hl_mmu_hr_get_tlb_info(ctx, virt_addr, hops,
+ &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs);
+}
+
+/*
+ * hl_mmu_v2_prepare - prepare mmu_if for working with mmu v2
+ *
+ * @hdev: pointer to the device structure
+ * @mmu_if: pointer to the mmu interface structure
+ */
+void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
+{
+ mmu->init = hl_mmu_v2_hr_init;
+ mmu->fini = hl_mmu_v2_hr_fini;
+ mmu->ctx_init = hl_mmu_v2_hr_ctx_init;
+ mmu->ctx_fini = hl_mmu_v2_hr_ctx_fini;
+ mmu->map = _hl_mmu_v2_hr_map;
+ mmu->unmap = _hl_mmu_v2_hr_unmap;
+ mmu->flush = hl_mmu_hr_flush;
+ mmu->swap_out = hl_mmu_v2_hr_swap_out;
+ mmu->swap_in = hl_mmu_v2_hr_swap_in;
+ mmu->get_tlb_info = hl_mmu_v2_hr_get_tlb_info;
+ mmu->hr_funcs.get_hop0_pgt_info = hl_mmu_v2_hr_get_hop0_pgt_info;
+ mmu->hr_funcs.get_pgt_info = hl_mmu_v2_hr_get_pgt_info;
+ mmu->hr_funcs.add_pgt_info = hl_mmu_v2_hr_add_pgt_info;
+ mmu->hr_funcs.get_tlb_mapping_params = hl_mmu_v2_hr_get_tlb_mapping_params;
+}
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index 610acd4a8057..5fe3da5fba30 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -225,27 +225,6 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
}
/**
- * hl_pci_reset_link_through_bridge() - Reset PCI link.
- * @hdev: Pointer to hl_device structure.
- */
-static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
-{
- struct pci_dev *pdev = hdev->pdev;
- struct pci_dev *parent_port;
- u16 val;
-
- parent_port = pdev->bus->self;
- pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
- val |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
- ssleep(1);
-
- val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
- pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
- ssleep(3);
-}
-
-/**
* hl_pci_set_inbound_region() - Configure inbound region
* @hdev: Pointer to hl_device structure.
* @region: Inbound region number.
@@ -280,21 +259,19 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
}
/* Point to the specified address */
- rc |= hl_pci_iatu_write(hdev, offset + 0x14,
- lower_32_bits(pci_region->addr));
- rc |= hl_pci_iatu_write(hdev, offset + 0x18,
- upper_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(pci_region->addr));
+
+ /* Set bar type as memory */
rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
/* Enable + bar/address match + match enable + bar number */
ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1);
- ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK,
- pci_region->mode);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, pci_region->mode);
ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1);
if (pci_region->mode == PCI_BAR_MATCH_MODE)
- ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK,
- pci_region->bar);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, pci_region->bar);
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
@@ -396,9 +373,6 @@ int hl_pci_init(struct hl_device *hdev)
struct pci_dev *pdev = hdev->pdev;
int rc;
- if (hdev->reset_pcilink)
- hl_pci_reset_link_through_bridge(hdev);
-
rc = pci_enable_device_mem(pdev);
if (rc) {
dev_err(hdev->dev, "can't enable PCI device\n");
@@ -445,7 +419,7 @@ disable_device:
}
/**
- * hl_fw_fini() - PCI finalization code.
+ * hl_pci_fini() - PCI finalization code.
* @hdev: Pointer to hl_device structure
*
* Unmap PCI bars and disable PCI device.
diff --git a/drivers/misc/habanalabs/common/security.c b/drivers/misc/habanalabs/common/security.c
new file mode 100644
index 000000000000..6196c0487c8b
--- /dev/null
+++ b/drivers/misc/habanalabs/common/security.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+/**
+ * hl_get_pb_block - return the relevant block within the block array
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_addr: register address in the desired block
+ * @pb_blocks: blocks array
+ * @array_size: blocks array size
+ *
+ */
+static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
+ const u32 pb_blocks[], int array_size)
+{
+ int i;
+ u32 start_addr, end_addr;
+
+ for (i = 0 ; i < array_size ; i++) {
+ start_addr = pb_blocks[i];
+ end_addr = start_addr + HL_BLOCK_SIZE;
+
+ if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
+ return i;
+ }
+
+ dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
+ mm_reg_addr);
+ return -EDOM;
+}
+
+/**
+ * hl_unset_pb_in_block - clear a specific protection bit in a block
+ *
+ * @hdev: pointer to hl_device structure
+ * @reg_offset: register offset will be converted to bit offset in pb block
+ * @sgs_entry: pb array
+ *
+ */
+static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
+ struct hl_block_glbl_sec *sgs_entry)
+{
+ if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
+ dev_err(hdev->dev,
+ "Register offset(%d) is out of range(%d) or invalid\n",
+ reg_offset, HL_BLOCK_SIZE);
+ return -EINVAL;
+ }
+
+ UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
+ (reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
+
+ return 0;
+}
+
+/**
+ * hl_unsecure_register - locate the relevant block for this register and
+ * remove corresponding protection bit
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_addr: register address to unsecure
+ * @offset: additional offset to the register address
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @array_size: blocks array size
+ *
+ */
+int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
+ const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
+ int array_size)
+{
+ u32 reg_offset;
+ int block_num;
+
+ block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
+ array_size);
+ if (block_num < 0)
+ return block_num;
+
+ reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
+
+ return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
+}
+
+/**
+ * hl_unsecure_register_range - locate the relevant block for this register
+ * range and remove corresponding protection bit
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_range: register address range to unsecure
+ * @offset: additional offset to the register address
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @array_size: blocks array size
+ *
+ */
+static int hl_unsecure_register_range(struct hl_device *hdev,
+ struct range mm_reg_range, int offset, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[],
+ int array_size)
+{
+ u32 reg_offset;
+ int i, block_num, rc = 0;
+
+ block_num = hl_get_pb_block(hdev,
+ mm_reg_range.start + offset, pb_blocks,
+ array_size);
+ if (block_num < 0)
+ return block_num;
+
+ for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
+ reg_offset = (i + offset) - pb_blocks[block_num];
+ rc |= hl_unset_pb_in_block(hdev, reg_offset,
+ &sgs_array[block_num]);
+ }
+
+ return rc;
+}
+
+/**
+ * hl_unsecure_registers - locate the relevant block for all registers and
+ * remove corresponding protection bit
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_array: register address array to unsecure
+ * @mm_array_size: register array size
+ * @offset: additional offset to the register address
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @blocks_array_size: blocks array size
+ *
+ */
+int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
+ int mm_array_size, int offset, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
+{
+ int i, rc = 0;
+
+ for (i = 0 ; i < mm_array_size ; i++) {
+ rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
+ pb_blocks, sgs_array, blocks_array_size);
+
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * hl_unsecure_registers_range - locate the relevant block for all register
+ * ranges and remove corresponding protection bit
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_range_array: register address range array to unsecure
+ * @mm_array_size: register array size
+ * @offset: additional offset to the register address
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @blocks_array_size: blocks array size
+ *
+ */
+static int hl_unsecure_registers_range(struct hl_device *hdev,
+ const struct range mm_reg_range_array[], int mm_array_size,
+ int offset, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
+{
+ int i, rc = 0;
+
+ for (i = 0 ; i < mm_array_size ; i++) {
+ rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
+ offset, pb_blocks, sgs_array, blocks_array_size);
+
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * hl_ack_pb_security_violations - Ack security violation
+ *
+ * @hdev: pointer to hl_device structure
+ * @pb_blocks: blocks array
+ * @block_offset: additional offset to the block
+ * @array_size: blocks array size
+ *
+ */
+static void hl_ack_pb_security_violations(struct hl_device *hdev,
+ const u32 pb_blocks[], u32 block_offset, int array_size)
+{
+ int i;
+ u32 cause, addr, block_base;
+
+ for (i = 0 ; i < array_size ; i++) {
+ block_base = pb_blocks[i] + block_offset;
+ cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
+ if (cause) {
+ addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
+ hdev->asic_funcs->pb_print_security_errors(hdev,
+ block_base, cause, addr);
+ WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
+ }
+ }
+}
+
+/**
+ * hl_config_glbl_sec - set pb in HW according to given pb array
+ *
+ * @hdev: pointer to hl_device structure
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @block_offset: additional offset to the block
+ * @array_size: blocks array size
+ *
+ */
+void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], u32 block_offset,
+ int array_size)
+{
+ int i, j;
+ u32 sgs_base;
+
+ if (hdev->pldm)
+ usleep_range(100, 1000);
+
+ for (i = 0 ; i < array_size ; i++) {
+ sgs_base = block_offset + pb_blocks[i] +
+ HL_BLOCK_GLBL_SEC_OFFS;
+
+ for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
+ WREG32(sgs_base + j * sizeof(u32),
+ sgs_array[i].sec_array[j]);
+ }
+}
+
+/**
+ * hl_secure_block - locally memsets a block to 0
+ *
+ * @hdev: pointer to hl_device structure
+ * @sgs_array: pb array to clear
+ * @array_size: blocks array size
+ *
+ */
+void hl_secure_block(struct hl_device *hdev,
+ struct hl_block_glbl_sec sgs_array[], int array_size)
+{
+ int i;
+
+ for (i = 0 ; i < array_size ; i++)
+ memset((char *)(sgs_array[i].sec_array), 0,
+ HL_BLOCK_GLBL_SEC_SIZE);
+}
+
+/**
+ * hl_init_pb_with_mask - set selected pb instances with mask in HW according
+ * to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_array: register array
+ * @regs_array_size: register array size
+ * @mask: enabled instances mask: 1- enabled, 0- disabled
+ */
+int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size, u64 mask)
+{
+ int i, j;
+ struct hl_block_glbl_sec *glbl_sec;
+
+ glbl_sec = kcalloc(blocks_array_size,
+ sizeof(struct hl_block_glbl_sec),
+ GFP_KERNEL);
+ if (!glbl_sec)
+ return -ENOMEM;
+
+ hl_secure_block(hdev, glbl_sec, blocks_array_size);
+ hl_unsecure_registers(hdev, regs_array, regs_array_size, 0, pb_blocks,
+ glbl_sec, blocks_array_size);
+
+ /* Fill all blocks with the same configuration */
+ for (i = 0 ; i < num_dcores ; i++) {
+ for (j = 0 ; j < num_instances ; j++) {
+ int seq = i * num_instances + j;
+
+ if (!(mask & BIT_ULL(seq)))
+ continue;
+
+ hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
+ i * dcore_offset + j * instance_offset,
+ blocks_array_size);
+ }
+ }
+
+ kfree(glbl_sec);
+
+ return 0;
+}
+
+/**
+ * hl_init_pb - set pb in HW according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_array: register array
+ * @regs_array_size: register array size
+ *
+ */
+int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size)
+{
+ return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
+ num_instances, instance_offset, pb_blocks,
+ blocks_array_size, regs_array, regs_array_size,
+ ULLONG_MAX);
+}
+
+/**
+ * hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
+ * given configuration unsecurring registers
+ * ranges instead of specific registers
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_range_array: register range array
+ * @regs_range_array_size: register range array size
+ * @mask: enabled instances mask: 1- enabled, 0- disabled
+ */
+int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array, u32 regs_range_array_size,
+ u64 mask)
+{
+ int i, j, rc = 0;
+ struct hl_block_glbl_sec *glbl_sec;
+
+ glbl_sec = kcalloc(blocks_array_size,
+ sizeof(struct hl_block_glbl_sec),
+ GFP_KERNEL);
+ if (!glbl_sec)
+ return -ENOMEM;
+
+ hl_secure_block(hdev, glbl_sec, blocks_array_size);
+ rc = hl_unsecure_registers_range(hdev, regs_range_array,
+ regs_range_array_size, 0, pb_blocks, glbl_sec,
+ blocks_array_size);
+ if (rc)
+ goto free_glbl_sec;
+
+ /* Fill all blocks with the same configuration */
+ for (i = 0 ; i < num_dcores ; i++) {
+ for (j = 0 ; j < num_instances ; j++) {
+ int seq = i * num_instances + j;
+
+ if (!(mask & BIT_ULL(seq)))
+ continue;
+
+ hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
+ i * dcore_offset + j * instance_offset,
+ blocks_array_size);
+ }
+ }
+
+free_glbl_sec:
+ kfree(glbl_sec);
+
+ return rc;
+}
+
+/**
+ * hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
+ * registers ranges instead of specific registers
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_range_array: register range array
+ * @regs_range_array_size: register range array size
+ *
+ */
+int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array, u32 regs_range_array_size)
+{
+ return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
+ num_instances, instance_offset, pb_blocks,
+ blocks_array_size, regs_range_array,
+ regs_range_array_size, ULLONG_MAX);
+}
+
+/**
+ * hl_init_pb_single_dcore - set pb for a single docre in HW
+ * according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @dcore_offset: offset from the dcore0
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_array: register array
+ * @regs_array_size: register array size
+ *
+ */
+int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size)
+{
+ int i, rc = 0;
+ struct hl_block_glbl_sec *glbl_sec;
+
+ glbl_sec = kcalloc(blocks_array_size,
+ sizeof(struct hl_block_glbl_sec),
+ GFP_KERNEL);
+ if (!glbl_sec)
+ return -ENOMEM;
+
+ hl_secure_block(hdev, glbl_sec, blocks_array_size);
+ rc = hl_unsecure_registers(hdev, regs_array, regs_array_size, 0,
+ pb_blocks, glbl_sec, blocks_array_size);
+ if (rc)
+ goto free_glbl_sec;
+
+ /* Fill all blocks with the same configuration */
+ for (i = 0 ; i < num_instances ; i++)
+ hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
+ dcore_offset + i * instance_offset,
+ blocks_array_size);
+
+free_glbl_sec:
+ kfree(glbl_sec);
+
+ return rc;
+}
+
+/**
+ * hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
+ * to given configuration unsecurring
+ * registers ranges instead of specific
+ * registers
+ *
+ * @hdev: pointer to hl_device structure
+ * @dcore_offset: offset from the dcore0
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_range_array: register range array
+ * @regs_range_array_size: register range array size
+ *
+ */
+int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array, u32 regs_range_array_size)
+{
+ int i;
+ struct hl_block_glbl_sec *glbl_sec;
+
+ glbl_sec = kcalloc(blocks_array_size,
+ sizeof(struct hl_block_glbl_sec),
+ GFP_KERNEL);
+ if (!glbl_sec)
+ return -ENOMEM;
+
+ hl_secure_block(hdev, glbl_sec, blocks_array_size);
+ hl_unsecure_registers_range(hdev, regs_range_array,
+ regs_range_array_size, 0, pb_blocks, glbl_sec,
+ blocks_array_size);
+
+ /* Fill all blocks with the same configuration */
+ for (i = 0 ; i < num_instances ; i++)
+ hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
+ dcore_offset + i * instance_offset,
+ blocks_array_size);
+
+ kfree(glbl_sec);
+
+ return 0;
+}
+
+/**
+ * hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @mask: enabled instances mask: 1- enabled, 0- disabled
+ *
+ */
+void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
+{
+ int i, j;
+
+ /* ack all blocks */
+ for (i = 0 ; i < num_dcores ; i++) {
+ for (j = 0 ; j < num_instances ; j++) {
+ int seq = i * num_instances + j;
+
+ if (!(mask & BIT_ULL(seq)))
+ continue;
+
+ hl_ack_pb_security_violations(hdev, pb_blocks,
+ i * dcore_offset + j * instance_offset,
+ blocks_array_size);
+ }
+ }
+}
+
+/**
+ * hl_ack_pb - ack pb in HW according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ *
+ */
+void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size)
+{
+ hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
+ instance_offset, pb_blocks, blocks_array_size,
+ ULLONG_MAX);
+}
+
+/**
+ * hl_ack_pb_single_dcore - ack pb for single docre in HW
+ * according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @dcore_offset: offset from dcore0
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ *
+ */
+void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size)
+{
+ int i;
+
+ /* ack all blocks */
+ for (i = 0 ; i < num_instances ; i++)
+ hl_ack_pb_security_violations(hdev, pb_blocks,
+ dcore_offset + i * instance_offset,
+ blocks_array_size);
+
+}
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 9ebeb18ab85e..6c5271f01160 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -73,6 +73,7 @@ static DEVICE_ATTR_RO(clk_cur_freq_mhz);
static struct attribute *hl_dev_clk_attrs[] = {
&dev_attr_clk_max_freq_mhz.attr,
&dev_attr_clk_cur_freq_mhz.attr,
+ NULL,
};
static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -93,6 +94,7 @@ static DEVICE_ATTR_RO(vrm_ver);
static struct attribute *hl_dev_vrm_attrs[] = {
&dev_attr_vrm_ver.attr,
+ NULL,
};
static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr,
@@ -243,6 +245,12 @@ static ssize_t device_type_show(struct device *dev,
case ASIC_GAUDI_SEC:
str = "GAUDI SEC";
break;
+ case ASIC_GAUDI2:
+ str = "GAUDI2";
+ break;
+ case ASIC_GAUDI2_SEC:
+ str = "GAUDI2 SEC";
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -283,7 +291,7 @@ static ssize_t soft_reset_cnt_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->reset_info.soft_reset_cnt);
+ return sprintf(buf, "%d\n", hdev->reset_info.compute_reset_cnt);
}
static ssize_t hard_reset_cnt_show(struct device *dev,
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index fba322241096..cb2988e2c7a8 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -97,15 +97,8 @@
#define GAUDI_ARB_WDT_TIMEOUT 0xEE6b27FF /* 8 seconds */
-#define GAUDI_CLK_GATE_DEBUGFS_MASK (\
- BIT(GAUDI_ENGINE_ID_MME_0) |\
- BIT(GAUDI_ENGINE_ID_MME_2) |\
- GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0))
-
#define HBM_SCRUBBING_TIMEOUT_US 1000000 /* 1s */
-#define GAUDI_PLL_MAX 10
-
#define BIN_REG_STRING_SIZE sizeof("0b10101010101010101010101010101010")
#define MONITOR_SOB_STRING_SIZE 256
@@ -241,12 +234,6 @@ gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = {
"MSG AXI LBW returned with error"
};
-enum gaudi_sm_sei_cause {
- GAUDI_SM_SEI_SO_OVERFLOW,
- GAUDI_SM_SEI_LBW_4B_UNALIGNED,
- GAUDI_SM_SEI_AXI_RESPONSE_ERR
-};
-
static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */
@@ -443,6 +430,38 @@ static s64 gaudi_state_dump_specs_props[] = {
[SP_NUM_CORES] = 1,
};
+static const int gaudi_queue_id_to_engine_id[] = {
+ [GAUDI_QUEUE_ID_DMA_0_0...GAUDI_QUEUE_ID_DMA_0_3] = GAUDI_ENGINE_ID_DMA_0,
+ [GAUDI_QUEUE_ID_DMA_1_0...GAUDI_QUEUE_ID_DMA_1_3] = GAUDI_ENGINE_ID_DMA_1,
+ [GAUDI_QUEUE_ID_CPU_PQ] = GAUDI_ENGINE_ID_SIZE,
+ [GAUDI_QUEUE_ID_DMA_2_0...GAUDI_QUEUE_ID_DMA_2_3] = GAUDI_ENGINE_ID_DMA_2,
+ [GAUDI_QUEUE_ID_DMA_3_0...GAUDI_QUEUE_ID_DMA_3_3] = GAUDI_ENGINE_ID_DMA_3,
+ [GAUDI_QUEUE_ID_DMA_4_0...GAUDI_QUEUE_ID_DMA_4_3] = GAUDI_ENGINE_ID_DMA_4,
+ [GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3] = GAUDI_ENGINE_ID_DMA_5,
+ [GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3] = GAUDI_ENGINE_ID_DMA_6,
+ [GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3] = GAUDI_ENGINE_ID_DMA_7,
+ [GAUDI_QUEUE_ID_MME_0_0...GAUDI_QUEUE_ID_MME_0_3] = GAUDI_ENGINE_ID_MME_0,
+ [GAUDI_QUEUE_ID_MME_1_0...GAUDI_QUEUE_ID_MME_1_3] = GAUDI_ENGINE_ID_MME_2,
+ [GAUDI_QUEUE_ID_TPC_0_0...GAUDI_QUEUE_ID_TPC_0_3] = GAUDI_ENGINE_ID_TPC_0,
+ [GAUDI_QUEUE_ID_TPC_1_0...GAUDI_QUEUE_ID_TPC_1_3] = GAUDI_ENGINE_ID_TPC_1,
+ [GAUDI_QUEUE_ID_TPC_2_0...GAUDI_QUEUE_ID_TPC_2_3] = GAUDI_ENGINE_ID_TPC_2,
+ [GAUDI_QUEUE_ID_TPC_3_0...GAUDI_QUEUE_ID_TPC_3_3] = GAUDI_ENGINE_ID_TPC_3,
+ [GAUDI_QUEUE_ID_TPC_4_0...GAUDI_QUEUE_ID_TPC_4_3] = GAUDI_ENGINE_ID_TPC_4,
+ [GAUDI_QUEUE_ID_TPC_5_0...GAUDI_QUEUE_ID_TPC_5_3] = GAUDI_ENGINE_ID_TPC_5,
+ [GAUDI_QUEUE_ID_TPC_6_0...GAUDI_QUEUE_ID_TPC_6_3] = GAUDI_ENGINE_ID_TPC_6,
+ [GAUDI_QUEUE_ID_TPC_7_0...GAUDI_QUEUE_ID_TPC_7_3] = GAUDI_ENGINE_ID_TPC_7,
+ [GAUDI_QUEUE_ID_NIC_0_0...GAUDI_QUEUE_ID_NIC_0_3] = GAUDI_ENGINE_ID_NIC_0,
+ [GAUDI_QUEUE_ID_NIC_1_0...GAUDI_QUEUE_ID_NIC_1_3] = GAUDI_ENGINE_ID_NIC_1,
+ [GAUDI_QUEUE_ID_NIC_2_0...GAUDI_QUEUE_ID_NIC_2_3] = GAUDI_ENGINE_ID_NIC_2,
+ [GAUDI_QUEUE_ID_NIC_3_0...GAUDI_QUEUE_ID_NIC_3_3] = GAUDI_ENGINE_ID_NIC_3,
+ [GAUDI_QUEUE_ID_NIC_4_0...GAUDI_QUEUE_ID_NIC_4_3] = GAUDI_ENGINE_ID_NIC_4,
+ [GAUDI_QUEUE_ID_NIC_5_0...GAUDI_QUEUE_ID_NIC_5_3] = GAUDI_ENGINE_ID_NIC_5,
+ [GAUDI_QUEUE_ID_NIC_6_0...GAUDI_QUEUE_ID_NIC_6_3] = GAUDI_ENGINE_ID_NIC_6,
+ [GAUDI_QUEUE_ID_NIC_7_0...GAUDI_QUEUE_ID_NIC_7_3] = GAUDI_ENGINE_ID_NIC_7,
+ [GAUDI_QUEUE_ID_NIC_8_0...GAUDI_QUEUE_ID_NIC_8_3] = GAUDI_ENGINE_ID_NIC_8,
+ [GAUDI_QUEUE_ID_NIC_9_0...GAUDI_QUEUE_ID_NIC_9_3] = GAUDI_ENGINE_ID_NIC_9,
+};
+
/* The order here is opposite to the order of the indexing in the h/w.
* i.e. SYNC_MGR_W_S is actually 0, SYNC_MGR_E_S is 1, etc.
*/
@@ -556,10 +575,13 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
get_collective_mode(hdev, i);
}
+ prop->cache_line_size = DEVICE_CACHE_LINE_SIZE;
+ prop->cfg_base_address = CFG_BASE;
prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->host_base_address = HOST_PHYS_BASE;
prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
+ prop->completion_mode = HL_COMPLETION_MODE_JOB;
prop->collective_first_sob = 0;
prop->collective_first_mon = 0;
@@ -577,16 +599,17 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->dram_base_address = DRAM_PHYS_BASE;
prop->dram_size = GAUDI_HBM_SIZE_32GB;
- prop->dram_end_address = prop->dram_base_address +
- prop->dram_size;
+ prop->dram_end_address = prop->dram_base_address + prop->dram_size;
prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
prop->sram_base_address = SRAM_BASE_ADDR;
prop->sram_size = SRAM_SIZE;
- prop->sram_end_address = prop->sram_base_address +
- prop->sram_size;
- prop->sram_user_base_address = prop->sram_base_address +
- SRAM_USER_BASE_OFFSET;
+ prop->sram_end_address = prop->sram_base_address + prop->sram_size;
+ prop->sram_user_base_address =
+ prop->sram_base_address + SRAM_USER_BASE_OFFSET;
+
+ prop->mmu_cache_mng_addr = MMU_CACHE_MNG_ADDR;
+ prop->mmu_cache_mng_size = MMU_CACHE_MNG_SIZE;
prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
if (hdev->pldm)
@@ -655,7 +678,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->sync_stream_first_mon +
(num_sync_stream_queues * HL_RSVD_MONS);
- prop->first_available_user_msix_interrupt = USHRT_MAX;
+ prop->first_available_user_interrupt = USHRT_MAX;
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
@@ -777,6 +800,7 @@ static int gaudi_early_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
+ resource_size_t pci_bar_size;
u32 fw_boot_status;
int rc;
@@ -787,24 +811,20 @@ static int gaudi_early_init(struct hl_device *hdev)
}
/* Check BAR sizes */
- if (pci_resource_len(pdev, SRAM_BAR_ID) != SRAM_BAR_SIZE) {
- dev_err(hdev->dev,
- "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
- SRAM_BAR_ID,
- (unsigned long long) pci_resource_len(pdev,
- SRAM_BAR_ID),
- SRAM_BAR_SIZE);
+ pci_bar_size = pci_resource_len(pdev, SRAM_BAR_ID);
+
+ if (pci_bar_size != SRAM_BAR_SIZE) {
+ dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
+ SRAM_BAR_ID, &pci_bar_size, SRAM_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
- if (pci_resource_len(pdev, CFG_BAR_ID) != CFG_BAR_SIZE) {
- dev_err(hdev->dev,
- "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
- CFG_BAR_ID,
- (unsigned long long) pci_resource_len(pdev,
- CFG_BAR_ID),
- CFG_BAR_SIZE);
+ pci_bar_size = pci_resource_len(pdev, CFG_BAR_ID);
+
+ if (pci_bar_size != CFG_BAR_SIZE) {
+ dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
+ CFG_BAR_ID, &pci_bar_size, CFG_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
@@ -842,11 +862,7 @@ pci_init:
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
- rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
- mmCPU_BOOT_DEV_STS0,
- mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
- mmCPU_BOOT_ERR1,
- GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
+ rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
hdev->asic_funcs->hw_fini(hdev, true, false);
@@ -854,8 +870,7 @@ pci_init:
}
if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
+ dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
hdev->asic_funcs->hw_fini(hdev, true, false);
}
@@ -1046,8 +1061,7 @@ again:
}
fw_size = fw->size;
- cpu_addr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, fw_size,
- &dma_handle, GFP_KERNEL | __GFP_ZERO);
+ cpu_addr = hl_asic_dma_alloc_coherent(hdev, fw_size, &dma_handle, GFP_KERNEL | __GFP_ZERO);
if (!cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate %zu of dma memory for TPC kernel\n",
@@ -1060,8 +1074,7 @@ again:
rc = _gaudi_init_tpc_mem(hdev, dma_handle, fw_size);
- hdev->asic_funcs->asic_dma_free_coherent(hdev, fw->size, cpu_addr,
- dma_handle);
+ hl_asic_dma_free_coherent(hdev, fw->size, cpu_addr, dma_handle);
out:
release_firmware(fw);
@@ -1391,6 +1404,19 @@ static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
return 0;
}
+static u32 gaudi_get_patched_cb_extra_size(u32 user_cb_size)
+{
+ u32 cacheline_end, additional_commands;
+
+ cacheline_end = round_up(user_cb_size, DEVICE_CACHE_LINE_SIZE);
+ additional_commands = sizeof(struct packet_msg_prot) * 2;
+
+ if (user_cb_size + additional_commands > cacheline_end)
+ return cacheline_end - user_cb_size + additional_commands;
+ else
+ return additional_commands;
+}
+
static int gaudi_collective_wait_create_job(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id,
@@ -1411,7 +1437,7 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
* 1 fence packet
* 4 msg short packets for monitor 2 configuration
* 1 fence packet
- * 2 msg prot packets for completion and MSI-X
+ * 2 msg prot packets for completion and MSI
*/
cb_size = sizeof(struct packet_msg_short) * 8 +
sizeof(struct packet_fence) * 2 +
@@ -1605,14 +1631,14 @@ static int gaudi_late_init(struct hl_device *hdev)
gaudi->hw_cap_initialized &= ~(HW_CAP_NIC0 | HW_CAP_NIC1);
}
- rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
if (rc) {
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
return rc;
}
/* Scrub both SRAM and DRAM */
- rc = hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
+ rc = hdev->asic_funcs->scrub_device_mem(hdev);
if (rc)
goto disable_pci_access;
@@ -1650,7 +1676,7 @@ static int gaudi_late_init(struct hl_device *hdev)
return 0;
disable_pci_access:
- hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
return rc;
}
@@ -1692,11 +1718,9 @@ static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
*/
for (i = 0 ; i < GAUDI_ALLOC_CPU_MEM_RETRY_CNT ; i++) {
- virt_addr_arr[i] =
- hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
- HL_CPU_ACCESSIBLE_MEM_SIZE,
- &dma_addr_arr[i],
- GFP_KERNEL | __GFP_ZERO);
+ virt_addr_arr[i] = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
+ &dma_addr_arr[i],
+ GFP_KERNEL | __GFP_ZERO);
if (!virt_addr_arr[i]) {
rc = -ENOMEM;
goto free_dma_mem_arr;
@@ -1725,9 +1749,7 @@ static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
free_dma_mem_arr:
for (j = 0 ; j < i ; j++)
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HL_CPU_ACCESSIBLE_MEM_SIZE,
- virt_addr_arr[j],
+ hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, virt_addr_arr[j],
dma_addr_arr[j]);
return rc;
@@ -1743,9 +1765,7 @@ static void gaudi_free_internal_qmans_pq_mem(struct hl_device *hdev)
q = &gaudi->internal_qmans[i];
if (!q->pq_kernel_addr)
continue;
- hdev->asic_funcs->asic_dma_free_coherent(hdev, q->pq_size,
- q->pq_kernel_addr,
- q->pq_dma_addr);
+ hl_asic_dma_free_coherent(hdev, q->pq_size, q->pq_kernel_addr, q->pq_dma_addr);
}
}
@@ -1780,10 +1800,8 @@ static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev)
goto free_internal_qmans_pq_mem;
}
- q->pq_kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
- hdev, q->pq_size,
- &q->pq_dma_addr,
- GFP_KERNEL | __GFP_ZERO);
+ q->pq_kernel_addr = hl_asic_dma_alloc_coherent(hdev, q->pq_size, &q->pq_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
if (!q->pq_kernel_addr) {
rc = -ENOMEM;
goto free_internal_qmans_pq_mem;
@@ -1924,10 +1942,8 @@ free_cpu_dma_mem:
if (!hdev->asic_prop.fw_security_enabled)
GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
hdev->cpu_pci_msb_addr);
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HL_CPU_ACCESSIBLE_MEM_SIZE,
- hdev->cpu_accessible_dma_mem,
- hdev->cpu_accessible_dma_address);
+ hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
free_dma_pool:
dma_pool_destroy(hdev->dma_pool);
free_gaudi_device:
@@ -1947,10 +1963,8 @@ static int gaudi_sw_fini(struct hl_device *hdev)
GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
hdev->cpu_pci_msb_addr);
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HL_CPU_ACCESSIBLE_MEM_SIZE,
- hdev->cpu_accessible_dma_mem,
- hdev->cpu_accessible_dma_address);
+ hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
dma_pool_destroy(hdev->dma_pool);
@@ -2139,9 +2153,6 @@ static void gaudi_init_scrambler_sram(struct hl_device *hdev)
if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER)
return;
- if (!hdev->sram_scrambler_enable)
- return;
-
WREG32(mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN,
@@ -2210,9 +2221,6 @@ static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER)
return;
- if (!hdev->dram_scrambler_enable)
- return;
-
WREG32(mmNIF_RTR_CTRL_0_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_1_SCRAM_HBM_EN,
@@ -2396,128 +2404,6 @@ static void gaudi_init_e2e(struct hl_device *hdev)
WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
- if (!hdev->dram_scrambler_enable) {
- WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
- WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
- WREG32(mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
- WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
- WREG32(mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
- WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
- WREG32(mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
- WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
- WREG32(mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
- WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
- WREG32(mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
- WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
- WREG32(mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
- WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
- WREG32(mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
- WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
- WREG32(mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
- WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
- WREG32(mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
- WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
- WREG32(mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
- WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
- WREG32(mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
- WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
- WREG32(mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
- WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
- WREG32(mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
- WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
- WREG32(mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
- WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
- WREG32(mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
- WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
- WREG32(mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
- WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
- WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
- }
-
WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_EN,
@@ -3339,19 +3225,19 @@ static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
u32 nic_qm_err_cfg, irq_handler_offset;
u32 q_off;
- mtr_base_en_lo = lower_32_bits(CFG_BASE +
+ mtr_base_en_lo = lower_32_bits((CFG_BASE & U32_MAX) +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
- so_base_en_lo = lower_32_bits(CFG_BASE +
+ so_base_en_lo = lower_32_bits((CFG_BASE & U32_MAX) +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
- mtr_base_ws_lo = lower_32_bits(CFG_BASE +
+ mtr_base_ws_lo = lower_32_bits((CFG_BASE & U32_MAX) +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
- so_base_ws_lo = lower_32_bits(CFG_BASE +
+ so_base_ws_lo = lower_32_bits((CFG_BASE & U32_MAX) +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
@@ -3851,8 +3737,8 @@ static int gaudi_mmu_init(struct hl_device *hdev)
}
/* init MMU cache manage page */
- WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
- WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
+ WREG32(mmSTLB_CACHE_INV_BASE_39_8, prop->mmu_cache_mng_addr >> 8);
+ WREG32(mmSTLB_CACHE_INV_BASE_49_40, prop->mmu_cache_mng_addr >> 40);
/* mem cache invalidation */
WREG32(mmSTLB_MEM_CACHE_INVALIDATION, 1);
@@ -3862,8 +3748,7 @@ static int gaudi_mmu_init(struct hl_device *hdev)
WREG32(mmMMU_UP_MMU_ENABLE, 1);
WREG32(mmMMU_UP_SPI_MASK, 0xF);
- WREG32(mmSTLB_HOP_CONFIGURATION,
- hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
+ WREG32(mmSTLB_HOP_CONFIGURATION, 0x30440);
/*
* The H/W expects the first PI after init to be 1. After wraparound
@@ -3943,6 +3828,18 @@ static void gaudi_init_static_firmware_loader(struct hl_device *hdev)
GAUDI_CPU_RESET_WAIT_MSEC;
}
+static void gaudi_init_firmware_preload_params(struct hl_device *hdev)
+{
+ struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
+
+ pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
+ pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0;
+ pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1;
+ pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0;
+ pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1;
+ pre_fw_load->wait_for_preboot_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC;
+}
+
static void gaudi_init_firmware_loader(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -4338,7 +4235,7 @@ static int gaudi_suspend(struct hl_device *hdev)
{
int rc;
- rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -4743,7 +4640,7 @@ static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
static int gaudi_scrub_device_dram(struct hl_device *hdev, u64 val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 cur_addr = DRAM_BASE_ADDR_USER;
+ u64 cur_addr = prop->dram_user_base_address;
u32 chunk_size, busy;
int rc, dma_id;
@@ -4801,51 +4698,47 @@ static int gaudi_scrub_device_dram(struct hl_device *hdev, u64 val)
return 0;
}
-static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
+static int gaudi_scrub_device_mem(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 wait_to_idle_time = hdev->pdev ? HBM_SCRUBBING_TIMEOUT_US :
+ min_t(u64, HBM_SCRUBBING_TIMEOUT_US * 10, HL_SIM_MAX_TIMEOUT_US);
+ u64 addr, size, val = hdev->memory_scrub_val;
+ ktime_t timeout;
int rc = 0;
- u64 val = 0;
if (!hdev->memory_scrub)
return 0;
- if (!addr && !size) {
- /* Wait till device is idle */
- rc = hl_poll_timeout(
- hdev,
- mmDMA0_CORE_STS0/* dummy */,
- val/* dummy */,
- (hdev->asic_funcs->is_device_idle(hdev, NULL,
- 0, NULL)),
- 1000,
- HBM_SCRUBBING_TIMEOUT_US);
- if (rc) {
+ timeout = ktime_add_us(ktime_get(), wait_to_idle_time);
+ while (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
+ if (ktime_compare(ktime_get(), timeout) > 0) {
dev_err(hdev->dev, "waiting for idle timeout\n");
- return -EIO;
+ return -ETIMEDOUT;
}
+ usleep_range((1000 >> 2) + 1, 1000);
+ }
- /* Scrub SRAM */
- addr = prop->sram_user_base_address;
- size = hdev->pldm ? 0x10000 :
- (prop->sram_size - SRAM_USER_BASE_OFFSET);
- val = 0x7777777777777777ull;
+ /* Scrub SRAM */
+ addr = prop->sram_user_base_address;
+ size = hdev->pldm ? 0x10000 : prop->sram_size - SRAM_USER_BASE_OFFSET;
- rc = gaudi_memset_device_memory(hdev, addr, size, val);
- if (rc) {
- dev_err(hdev->dev,
- "Failed to clear SRAM in mem scrub all\n");
- return rc;
- }
+ dev_dbg(hdev->dev, "Scrubing SRAM: 0x%09llx - 0x%09llx val: 0x%llx\n",
+ addr, addr + size, val);
+ rc = gaudi_memset_device_memory(hdev, addr, size, val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear SRAM (%d)\n", rc);
+ return rc;
+ }
- /* Scrub HBM using all DMA channels in parallel */
- rc = gaudi_scrub_device_dram(hdev, 0xdeadbeaf);
- if (rc)
- dev_err(hdev->dev,
- "Failed to clear HBM in mem scrub all\n");
+ /* Scrub HBM using all DMA channels in parallel */
+ rc = gaudi_scrub_device_dram(hdev, val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear HBM (%d)\n", rc);
+ return rc;
}
- return rc;
+ return 0;
}
static void *gaudi_get_int_queue_base(struct hl_device *hdev,
@@ -4902,8 +4795,7 @@ static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
fence_val = GAUDI_QMAN0_FENCE_VAL;
- fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
- &fence_dma_addr);
+ fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
"Failed to allocate memory for H/W queue %d testing\n",
@@ -4913,9 +4805,8 @@ static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
*fence_ptr = 0;
- fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
- sizeof(struct packet_msg_prot),
- GFP_KERNEL, &pkt_dma_addr);
+ fence_pkt = hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_prot), GFP_KERNEL,
+ &pkt_dma_addr);
if (!fence_pkt) {
dev_err(hdev->dev,
"Failed to allocate packet for H/W queue %d testing\n",
@@ -4955,11 +4846,9 @@ static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
}
free_pkt:
- hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
- pkt_dma_addr);
+ hl_asic_dma_pool_free(hdev, (void *) fence_pkt, pkt_dma_addr);
free_fence_ptr:
- hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
- fence_dma_addr);
+ hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
return rc;
}
@@ -5305,11 +5194,13 @@ static int gaudi_validate_cb(struct hl_device *hdev,
/*
* The new CB should have space at the end for two MSG_PROT packets:
- * 1. A packet that will act as a completion packet
- * 2. A packet that will generate MSI-X interrupt
+ * 1. Optional NOP padding for cacheline alignment
+ * 2. A packet that will act as a completion packet
+ * 3. A packet that will generate MSI interrupt
*/
if (parser->completion)
- parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
+ parser->patched_cb_size += gaudi_get_patched_cb_extra_size(
+ parser->patched_cb_size);
return rc;
}
@@ -5532,13 +5423,14 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
int rc;
/*
- * The new CB should have space at the end for two MSG_PROT pkt:
- * 1. A packet that will act as a completion packet
- * 2. A packet that will generate MSI interrupt
+ * The new CB should have space at the end for two MSG_PROT packets:
+ * 1. Optional NOP padding for cacheline alignment
+ * 2. A packet that will act as a completion packet
+ * 3. A packet that will generate MSI interrupt
*/
if (parser->completion)
parser->patched_cb_size = parser->user_cb_size +
- sizeof(struct packet_msg_prot) * 2;
+ gaudi_get_patched_cb_extra_size(parser->user_cb_size);
else
parser->patched_cb_size = parser->user_cb_size;
@@ -5562,8 +5454,14 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
}
/*
- * The check that parser->user_cb_size <= parser->user_cb->size was done
- * in validate_queue_index().
+ * We are protected from overflow because the check
+ * "parser->user_cb_size <= parser->user_cb->size" was done in get_cb_from_cs_chunk()
+ * in the common code. That check is done only if is_kernel_allocated_cb is true.
+ *
+ * There is no option to reach here without going through that check because:
+ * 1. validate_queue_index() assigns true to is_kernel_allocated_cb for any submission to
+ * an external queue.
+ * 2. For Gaudi, we only parse CBs that were submitted to the external queues.
*/
memcpy(parser->patched_cb->kernel_address,
parser->user_cb->kernel_address,
@@ -5654,15 +5552,17 @@ static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev,
{
struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
- u32 nic_mask_q_id = 1 << (HW_CAP_NIC_SHIFT +
- ((parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2));
+ u32 nic_queue_offset, nic_mask_q_id;
if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) &&
- (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3) &&
- (!(gaudi->hw_cap_initialized & nic_mask_q_id))) {
- dev_err(hdev->dev, "h/w queue %d is disabled\n",
- parser->hw_queue_id);
- return -EINVAL;
+ (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3)) {
+ nic_queue_offset = parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0;
+ nic_mask_q_id = 1 << (HW_CAP_NIC_SHIFT + (nic_queue_offset >> 2));
+
+ if (!(gaudi->hw_cap_initialized & nic_mask_q_id)) {
+ dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
+ return -EINVAL;
+ }
}
/* For internal queue jobs just check if CB address is valid */
@@ -5705,18 +5605,24 @@ static int gaudi_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
return gaudi_parse_cb_no_mmu(hdev, parser);
}
-static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
- void *kernel_address, u32 len,
- u64 cq_addr, u32 cq_val, u32 msi_vec,
- bool eb)
+static void gaudi_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
+ u32 len, u32 original_len, u64 cq_addr, u32 cq_val,
+ u32 msi_vec, bool eb)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct packet_msg_prot *cq_pkt;
+ struct packet_nop *cq_padding;
u64 msi_addr;
u32 tmp;
+ cq_padding = kernel_address + original_len;
cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
+ while ((void *)cq_padding < (void *)cq_pkt) {
+ cq_padding->ctl = cpu_to_le32(FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_NOP));
+ cq_padding++;
+ }
+
tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
@@ -6039,10 +5945,10 @@ static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 size = hdev->asic_prop.mmu_pgt_size +
+ hdev->asic_prop.mmu_cache_mng_size;
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 addr = prop->mmu_pgt_addr;
- u32 size = prop->mmu_pgt_size + MMU_CACHE_MNG_SIZE;
+ u64 addr = hdev->asic_prop.mmu_pgt_addr;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
return 0;
@@ -6113,10 +6019,7 @@ static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
bool is_eng_idle;
int rc = 0, dma_id;
- kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
- hdev, SZ_2M,
- &dma_addr,
- GFP_KERNEL | __GFP_ZERO);
+ kernel_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &dma_addr, GFP_KERNEL | __GFP_ZERO);
if (!kernel_addr)
return -ENOMEM;
@@ -6205,8 +6108,7 @@ static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
out:
hdev->asic_funcs->hw_queues_unlock(hdev);
- hdev->asic_funcs->asic_dma_free_coherent(hdev, SZ_2M, kernel_addr,
- dma_addr);
+ hl_asic_dma_free_coherent(hdev, SZ_2M, kernel_addr, dma_addr);
return rc;
}
@@ -6552,8 +6454,7 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
return -EBUSY;
}
- fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
- &fence_dma_addr);
+ fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
"Failed to allocate fence memory for QMAN0\n");
@@ -6599,8 +6500,7 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
free_fence_ptr:
WREG32(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT));
- hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
- fence_dma_addr);
+ hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
return rc;
}
@@ -6983,14 +6883,15 @@ static inline u32 gaudi_queue_idx_dec(u32 idx, u32 q_len)
}
/**
- * gaudi_print_sw_config_stream_data - print SW config stream data
+ * gaudi_handle_sw_config_stream_data - print SW config stream data
*
* @hdev: pointer to the habanalabs device structure
* @stream: the QMAN's stream
* @qman_base: base address of QMAN registers block
+ * @event_mask: mask of the last events occurred
*/
-static void gaudi_print_sw_config_stream_data(struct hl_device *hdev, u32 stream,
- u64 qman_base)
+static void gaudi_handle_sw_config_stream_data(struct hl_device *hdev, u32 stream,
+ u64 qman_base, u64 event_mask)
{
u64 cq_ptr_lo, cq_ptr_hi, cq_tsize, cq_ptr;
u32 cq_ptr_lo_off, size;
@@ -7008,24 +6909,32 @@ static void gaudi_print_sw_config_stream_data(struct hl_device *hdev, u32 stream
size = RREG32(cq_tsize);
dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %u\n",
stream, cq_ptr, size);
+
+ if (event_mask & HL_NOTIFIER_EVENT_UNDEFINED_OPCODE) {
+ hdev->last_error.undef_opcode.cq_addr = cq_ptr;
+ hdev->last_error.undef_opcode.cq_size = size;
+ hdev->last_error.undef_opcode.stream_id = stream;
+ }
}
/**
- * gaudi_print_last_pqes_on_err - print last PQEs on error
+ * gaudi_handle_last_pqes_on_err - print last PQEs on error
*
* @hdev: pointer to the habanalabs device structure
* @qid_base: first QID of the QMAN (out of 4 streams)
* @stream: the QMAN's stream
* @qman_base: base address of QMAN registers block
+ * @event_mask: mask of the last events occurred
* @pr_sw_conf: if true print the SW config stream data (CQ PTR and SIZE)
*/
-static void gaudi_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
+static void gaudi_handle_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
u32 stream, u64 qman_base,
+ u64 event_mask,
bool pr_sw_conf)
{
u32 ci, qm_ci_stream_off, queue_len;
struct hl_hw_queue *q;
- u64 pq_ci;
+ u64 pq_ci, addr[PQ_FETCHER_CACHE_SIZE];
int i;
q = &hdev->kernel_queues[qid_base + stream];
@@ -7040,16 +6949,16 @@ static void gaudi_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
hdev->asic_funcs->hw_queues_lock(hdev);
if (pr_sw_conf)
- gaudi_print_sw_config_stream_data(hdev, stream, qman_base);
+ gaudi_handle_sw_config_stream_data(hdev, stream, qman_base, event_mask);
ci = RREG32(pq_ci);
/* we should start printing form ci -1 */
ci = gaudi_queue_idx_dec(ci, queue_len);
+ memset(addr, 0, sizeof(addr));
for (i = 0; i < PQ_FETCHER_CACHE_SIZE; i++) {
struct hl_bd *bd;
- u64 addr;
u32 len;
bd = q->kernel_address;
@@ -7060,52 +6969,68 @@ static void gaudi_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
if (!len)
break;
- addr = le64_to_cpu(bd->ptr);
+ addr[i] = le64_to_cpu(bd->ptr);
dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %u\n",
- stream, ci, addr, len);
+ stream, ci, addr[i], len);
/* get previous ci, wrap if needed */
ci = gaudi_queue_idx_dec(ci, queue_len);
}
+ if (event_mask & HL_NOTIFIER_EVENT_UNDEFINED_OPCODE) {
+ struct undefined_opcode_info *undef_opcode = &hdev->last_error.undef_opcode;
+ u32 arr_idx = undef_opcode->cb_addr_streams_len;
+
+ if (arr_idx == 0) {
+ undef_opcode->timestamp = ktime_get();
+ undef_opcode->engine_id = gaudi_queue_id_to_engine_id[qid_base];
+ }
+
+ memcpy(undef_opcode->cb_addr_streams[arr_idx], addr, sizeof(addr));
+ undef_opcode->cb_addr_streams_len++;
+ }
+
hdev->asic_funcs->hw_queues_unlock(hdev);
}
/**
- * print_qman_data_on_err - extract QMAN data on error
+ * handle_qman_data_on_err - extract QMAN data on error
*
* @hdev: pointer to the habanalabs device structure
* @qid_base: first QID of the QMAN (out of 4 streams)
* @stream: the QMAN's stream
* @qman_base: base address of QMAN registers block
+ * @event_mask: mask of the last events occurred
*
* This function attempt to exatract as much data as possible on QMAN error.
* On upper CP print the SW config stream data and last 8 PQEs.
* On lower CP print SW config data and last PQEs of ALL 4 upper CPs
*/
-static void print_qman_data_on_err(struct hl_device *hdev, u32 qid_base,
- u32 stream, u64 qman_base)
+static void handle_qman_data_on_err(struct hl_device *hdev, u32 qid_base,
+ u32 stream, u64 qman_base, u64 event_mask)
{
u32 i;
if (stream != QMAN_STREAMS) {
- gaudi_print_last_pqes_on_err(hdev, qid_base, stream, qman_base,
- true);
+ gaudi_handle_last_pqes_on_err(hdev, qid_base, stream,
+ qman_base, event_mask, true);
return;
}
- gaudi_print_sw_config_stream_data(hdev, stream, qman_base);
+ /* handle Lower-CP */
+ gaudi_handle_sw_config_stream_data(hdev, stream, qman_base, event_mask);
for (i = 0; i < QMAN_STREAMS; i++)
- gaudi_print_last_pqes_on_err(hdev, qid_base, i, qman_base,
- false);
+ gaudi_handle_last_pqes_on_err(hdev, qid_base, i,
+ qman_base, event_mask, false);
}
static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
const char *qm_name,
u64 qman_base,
- u32 qid_base)
+ u32 qid_base,
+ u64 *event_mask)
{
u32 i, j, glbl_sts_val, arb_err_val, glbl_sts_clr_val;
u64 glbl_sts_addr, arb_err_addr;
@@ -7136,12 +7061,21 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
glbl_sts_clr_val |= BIT(j);
}
}
+ /* check for undefined opcode */
+ if (glbl_sts_val & TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK &&
+ hdev->last_error.undef_opcode.write_enable) {
+ memset(&hdev->last_error.undef_opcode, 0,
+ sizeof(hdev->last_error.undef_opcode));
+
+ hdev->last_error.undef_opcode.write_enable = false;
+ *event_mask |= HL_NOTIFIER_EVENT_UNDEFINED_OPCODE;
+ }
/* Write 1 clear errors */
if (!hdev->stop_on_err)
WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
else
- print_qman_data_on_err(hdev, qid_base, i, qman_base);
+ handle_qman_data_on_err(hdev, qid_base, i, qman_base, *event_mask);
}
arb_err_val = RREG32(arb_err_addr);
@@ -7290,7 +7224,7 @@ extract_ecc_info:
ecc_address, ecc_syndrom, memory_wrapper_idx);
}
-static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
+static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
u64 qman_base;
char desc[32];
@@ -7299,14 +7233,25 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
switch (event_type) {
case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
+ /* In TPC QM event, notify on TPC assertion. While there isn't
+ * a specific event for assertion yet, the FW generates QM event.
+ * The SW upper layer will inspect an internal mapped area to indicate
+ * if the event is a tpc assertion or tpc QM.
+ */
+ *event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT;
index = event_type - GAUDI_EVENT_TPC0_QM;
qid_base = GAUDI_QUEUE_ID_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmTPC0_QM_BASE + index * TPC_QMAN_OFFSET;
snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC_QM", index);
break;
case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
- index = event_type - GAUDI_EVENT_MME0_QM;
- qid_base = GAUDI_QUEUE_ID_MME_0_0 + index * QMAN_STREAMS;
+ if (event_type == GAUDI_EVENT_MME0_QM) {
+ index = 0;
+ qid_base = GAUDI_QUEUE_ID_MME_0_0;
+ } else { /* event_type == GAUDI_EVENT_MME2_QM */
+ index = 2;
+ qid_base = GAUDI_QUEUE_ID_MME_1_0;
+ }
qman_base = mmMME0_QM_BASE + index * MME_QMAN_OFFSET;
snprintf(desc, ARRAY_SIZE(desc), "%s%d", "MME_QM", index);
break;
@@ -7373,7 +7318,7 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
return;
}
- gaudi_handle_qman_err_generic(hdev, desc, qman_base, qid_base);
+ gaudi_handle_qman_err_generic(hdev, desc, qman_base, qid_base, event_mask);
}
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
@@ -7404,8 +7349,8 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
/* In case it's the first razwi, save its parameters*/
- rc = atomic_cmpxchg(&hdev->last_error.razwi.write_disable, 0, 1);
- if (!rc) {
+ rc = atomic_cmpxchg(&hdev->last_error.razwi.write_enable, 1, 0);
+ if (rc) {
hdev->last_error.razwi.timestamp = ktime_get();
hdev->last_error.razwi.addr = razwi_addr;
hdev->last_error.razwi.engine_id_1 = engine_id_1;
@@ -7662,8 +7607,7 @@ static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6;
}
-static void gaudi_print_clk_change_info(struct hl_device *hdev,
- u16 event_type)
+static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type)
{
ktime_t zero_time = ktime_set(0, 0);
@@ -7711,16 +7655,15 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev,
mutex_unlock(&hdev->clk_throttling.lock);
}
-static void gaudi_handle_eqe(struct hl_device *hdev,
- struct hl_eq_entry *eq_entry)
+static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 data = le64_to_cpu(eq_entry->data[0]);
+ u64 data = le64_to_cpu(eq_entry->data[0]), event_mask = 0;
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
- u32 fw_fatal_err_flag = 0;
+ u32 fw_fatal_err_flag = 0, flags = 0;
u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
>> EQ_CTL_EVENT_TYPE_SHIFT);
- bool reset_required;
+ bool reset_required, reset_direct = false;
u8 cause;
int rc;
@@ -7808,7 +7751,8 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- hl_device_reset(hdev, 0);
+ reset_direct = true;
+ goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7830,7 +7774,8 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- hl_device_reset(hdev, 0);
+ reset_direct = true;
+ goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7892,22 +7837,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_NIC4_QM0:
case GAUDI_EVENT_NIC4_QM1:
case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE:
- gaudi_print_irq_info(hdev, event_type, true);
- gaudi_handle_qman_err(hdev, event_type);
- hl_fw_unmask_irq(hdev, event_type);
- break;
-
case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
gaudi_print_irq_info(hdev, event_type, true);
- gaudi_handle_qman_err(hdev, event_type);
+ gaudi_handle_qman_err(hdev, event_type, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
-
- /* In TPC QM event, notify on TPC assertion. While there isn't
- * a specific event for assertion yet, the FW generates QM event.
- * The SW upper layer will inspect an internal mapped area to indicate
- * if the event is a tpc assertion or tpc QM.
- */
- hl_notifier_event_send_all(hdev, HL_NOTIFIER_EVENT_TPC_ASSERT);
break;
case GAUDI_EVENT_RAZWI_OR_ADC_SW:
@@ -7978,21 +7911,38 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
break;
}
+ if (event_mask)
+ hl_notifier_event_send_all(hdev, event_mask);
+
return;
reset_device:
- if (hdev->asic_prop.fw_security_enabled)
- hl_device_reset(hdev, HL_DRV_RESET_HARD
- | HL_DRV_RESET_BYPASS_REQ_TO_FW
- | fw_fatal_err_flag);
- else if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_DELAY | fw_fatal_err_flag);
+ reset_required = true;
+
+ if (hdev->asic_prop.fw_security_enabled && !reset_direct) {
+ flags = HL_DRV_RESET_HARD | HL_DRV_RESET_BYPASS_REQ_TO_FW | fw_fatal_err_flag;
+
+ /* notify on device unavailable while the reset triggered by fw */
+ event_mask |= (HL_NOTIFIER_EVENT_DEVICE_RESET |
+ HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE);
+ } else if (hdev->hard_reset_on_fw_events) {
+ flags = HL_DRV_RESET_HARD | HL_DRV_RESET_DELAY | fw_fatal_err_flag;
+ event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
+ } else {
+ reset_required = false;
+ }
+
+ /* despite reset doesn't execute. a notification on
+ * occurred event needs to be sent here
+ */
+ hl_notifier_event_send_all(hdev, event_mask);
+ if (reset_required)
+ hl_device_reset(hdev, flags);
else
hl_fw_unmask_irq(hdev, event_type);
}
-static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate,
- u32 *size)
+static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
{
struct gaudi_device *gaudi = hdev->asic_specific;
@@ -8005,8 +7955,7 @@ static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate,
return gaudi->events_stat;
}
-static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
- u32 flags)
+static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 status, timeout_usec;
@@ -8049,8 +7998,7 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
return hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
}
-static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev,
- u32 asid, u64 phys_addr)
+static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, u64 phys_addr)
{
u32 status, timeout_usec;
int rc;
@@ -8405,11 +8353,10 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
return 0;
- hdev->internal_cb_pool_virt_addr =
- hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
- HOST_SPACE_INTERNAL_CB_SZ,
- &hdev->internal_cb_pool_dma_addr,
- GFP_KERNEL | __GFP_ZERO);
+ hdev->internal_cb_pool_virt_addr = hl_asic_dma_alloc_coherent(hdev,
+ HOST_SPACE_INTERNAL_CB_SZ,
+ &hdev->internal_cb_pool_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
if (!hdev->internal_cb_pool_virt_addr)
return -ENOMEM;
@@ -8464,10 +8411,8 @@ unreserve_internal_cb_pool:
destroy_internal_cb_pool:
gen_pool_destroy(hdev->internal_cb_pool);
free_internal_cb_pool:
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HOST_SPACE_INTERNAL_CB_SZ,
- hdev->internal_cb_pool_virt_addr,
- hdev->internal_cb_pool_dma_addr);
+ hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
+ hdev->internal_cb_pool_dma_addr);
return rc;
}
@@ -8490,10 +8435,8 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
gen_pool_destroy(hdev->internal_cb_pool);
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HOST_SPACE_INTERNAL_CB_SZ,
- hdev->internal_cb_pool_virt_addr,
- hdev->internal_cb_pool_dma_addr);
+ hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
+ hdev->internal_cb_pool_dma_addr);
}
static int gaudi_ctx_init(struct hl_ctx *ctx)
@@ -8522,6 +8465,11 @@ static void gaudi_ctx_fini(struct hl_ctx *ctx)
gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
}
+static int gaudi_pre_schedule_cs(struct hl_cs *cs)
+{
+ return 0;
+}
+
static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
return gaudi_cq_assignment[cq_idx];
@@ -8893,6 +8841,11 @@ static void gaudi_enable_events_from_fw(struct hl_device *hdev)
gaudi_irq_map_table[GAUDI_EVENT_INTS_REGISTER].cpu_id);
}
+static int gaudi_ack_mmu_page_fault_or_access_error(struct hl_device *hdev, u64 mmu_cap_mask)
+{
+ return -EINVAL;
+}
+
static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
{
switch (pll_idx) {
@@ -9167,10 +9120,8 @@ static u32 *gaudi_get_stream_master_qid_arr(void)
return gaudi_stream_master;
}
-static void gaudi_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info)
+static void gaudi_check_if_razwi_happened(struct hl_device *hdev)
{
- /* set 0 since multiple pages are not supported */
- info->page_order_bitmask = 0;
}
static ssize_t infineon_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -9187,6 +9138,7 @@ static DEVICE_ATTR_RO(infineon_ver);
static struct attribute *gaudi_vrm_dev_attrs[] = {
&dev_attr_infineon_ver.attr,
+ NULL,
};
static void gaudi_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
@@ -9224,7 +9176,6 @@ static const struct hl_asic_funcs gaudi_funcs = {
.hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
.cs_parser = gaudi_cs_parser,
.asic_dma_map_sgtable = hl_dma_map_sgtable,
- .get_dma_desc_list_size = gaudi_get_dma_desc_list_size,
.add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
.update_eq_ci = gaudi_update_eq_ci,
.context_switch = gaudi_context_switch,
@@ -9244,6 +9195,8 @@ static const struct hl_asic_funcs gaudi_funcs = {
.non_hard_reset_late_init = gaudi_non_hard_reset_late_init,
.hw_queues_lock = gaudi_hw_queues_lock,
.hw_queues_unlock = gaudi_hw_queues_unlock,
+ .kdma_lock = NULL,
+ .kdma_unlock = NULL,
.get_pci_id = gaudi_get_pci_id,
.get_eeprom_data = gaudi_get_eeprom_data,
.get_monitor_dump = gaudi_get_monitor_dump,
@@ -9255,6 +9208,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.halt_coresight = gaudi_halt_coresight,
.ctx_init = gaudi_ctx_init,
.ctx_fini = gaudi_ctx_fini,
+ .pre_schedule_cs = gaudi_pre_schedule_cs,
.get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
.load_firmware_to_device = gaudi_load_firmware_to_device,
.load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
@@ -9265,24 +9219,27 @@ static const struct hl_asic_funcs gaudi_funcs = {
.reset_sob = gaudi_reset_sob,
.reset_sob_group = gaudi_reset_sob_group,
.get_device_time = gaudi_get_device_time,
+ .pb_print_security_errors = NULL,
.collective_wait_init_cs = gaudi_collective_wait_init_cs,
.collective_wait_create_jobs = gaudi_collective_wait_create_jobs,
+ .get_dec_base_addr = NULL,
.scramble_addr = hl_mmu_scramble_addr,
.descramble_addr = hl_mmu_descramble_addr,
.ack_protection_bits_errors = gaudi_ack_protection_bits_errors,
.get_hw_block_id = gaudi_get_hw_block_id,
.hw_block_mmap = gaudi_block_mmap,
.enable_events_from_fw = gaudi_enable_events_from_fw,
+ .ack_mmu_errors = gaudi_ack_mmu_page_fault_or_access_error,
.map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx,
+ .init_firmware_preload_params = gaudi_init_firmware_preload_params,
.init_firmware_loader = gaudi_init_firmware_loader,
.init_cpu_scrambler_dram = gaudi_init_scrambler_hbm,
.state_dump_init = gaudi_state_dump_init,
.get_sob_addr = gaudi_get_sob_addr,
.set_pci_memory_regions = gaudi_set_pci_memory_regions,
.get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr,
- .is_valid_dram_page_size = NULL,
+ .check_if_razwi_happened = gaudi_check_if_razwi_happened,
.mmu_get_real_page_size = hl_mmu_get_real_page_size,
- .get_valid_dram_page_orders = gaudi_get_valid_dram_page_orders,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi_set_hbm_bar_base,
};
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 25ac87cebd45..81a3c79a8bc6 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -469,7 +469,7 @@ static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
};
/**
- * gaudi_set_block_as_protected - set the given block as protected
+ * gaudi_pb_set_block - set the given block as protected
*
* @hdev: pointer to hl_device structure
* @base: block base address
diff --git a/drivers/misc/habanalabs/gaudi2/Makefile b/drivers/misc/habanalabs/gaudi2/Makefile
new file mode 100644
index 000000000000..1e047883ba74
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi2/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+HL_GAUDI2_FILES := gaudi2/gaudi2.o gaudi2/gaudi2_security.o \
+ gaudi2/gaudi2_coresight.o
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2.c b/drivers/misc/habanalabs/gaudi2/gaudi2.c
new file mode 100644
index 000000000000..98336a1a84b0
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2.c
@@ -0,0 +1,9986 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudi2P.h"
+#include "gaudi2_masks.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_v2_0.h"
+#include "../include/gaudi2/gaudi2_packets.h"
+#include "../include/gaudi2/gaudi2_reg_map.h"
+#include "../include/gaudi2/gaudi2_async_ids_map_extended.h"
+#include "../include/gaudi2/arc/gaudi2_arc_common_packets.h"
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <linux/iommu.h>
+
+#define GAUDI2_DMA_POOL_BLK_SIZE SZ_256 /* 256 bytes */
+
+#define GAUDI2_RESET_TIMEOUT_MSEC 500 /* 500ms */
+#define GAUDI2_RESET_POLL_TIMEOUT_USEC 50000 /* 50ms */
+#define GAUDI2_PLDM_HRESET_TIMEOUT_MSEC 25000 /* 25s */
+#define GAUDI2_PLDM_SRESET_TIMEOUT_MSEC 25000 /* 25s */
+#define GAUDI2_PLDM_RESET_POLL_TIMEOUT_USEC 3000000 /* 3s */
+#define GAUDI2_RESET_POLL_CNT 3
+#define GAUDI2_RESET_WAIT_MSEC 1 /* 1ms */
+#define GAUDI2_CPU_RESET_WAIT_MSEC 100 /* 100ms */
+#define GAUDI2_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
+#define GAUDI2_CB_POOL_CB_CNT 512
+#define GAUDI2_CB_POOL_CB_SIZE SZ_128K /* 128KB */
+#define GAUDI2_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
+#define GAUDI2_WAIT_FOR_BL_TIMEOUT_USEC 25000000 /* 25s */
+#define GAUDI2_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
+#define GAUDI2_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
+
+#define GAUDI2_ALLOC_CPU_MEM_RETRY_CNT 3
+
+/*
+ * since the code already has built-in support for binning of up to MAX_FAULTY_TPCS TPCs
+ * and the code relies on that value (for array size etc..) we define another value
+ * for MAX faulty TPCs which reflects the cluster binning requirements
+ */
+#define MAX_CLUSTER_BINNING_FAULTY_TPCS 1
+#define MAX_FAULTY_XBARS 1
+#define MAX_FAULTY_EDMAS 1
+#define MAX_FAULTY_DECODERS 1
+
+#define GAUDI2_TPC_FULL_MASK 0x1FFFFFF
+#define GAUDI2_HIF_HMMU_FULL_MASK 0xFFFF
+#define GAUDI2_DECODER_FULL_MASK 0x3FF
+
+#define GAUDI2_NUM_OF_QM_ERR_CAUSE 18
+#define GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE 25
+#define GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE 3
+#define GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE 14
+#define GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE 3
+#define GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE 2
+#define GAUDI2_NUM_OF_ROT_ERR_CAUSE 22
+#define GAUDI2_NUM_OF_TPC_INTR_CAUSE 30
+#define GAUDI2_NUM_OF_DEC_ERR_CAUSE 25
+#define GAUDI2_NUM_OF_MME_ERR_CAUSE 16
+#define GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE 5
+#define GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE 7
+#define GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE 8
+#define GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE 19
+#define GAUDI2_NUM_OF_HBM_SEI_CAUSE 9
+#define GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE 3
+#define GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE 3
+#define GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE 2
+#define GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE 2
+#define GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE 2
+#define GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE 5
+
+#define GAUDI2_MMU_CACHE_INV_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 10)
+#define GAUDI2_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 200)
+#define GAUDI2_ARB_WDT_TIMEOUT (0x1000000)
+
+#define GAUDI2_VDEC_TIMEOUT_USEC 10000 /* 10ms */
+#define GAUDI2_PLDM_VDEC_TIMEOUT_USEC (GAUDI2_VDEC_TIMEOUT_USEC * 100)
+
+#define KDMA_TIMEOUT_USEC USEC_PER_SEC
+
+#define IS_DMA_IDLE(dma_core_idle_ind_mask) \
+ (!((dma_core_idle_ind_mask) & \
+ ((DCORE0_EDMA0_CORE_IDLE_IND_MASK_DESC_CNT_STS_MASK) | \
+ (DCORE0_EDMA0_CORE_IDLE_IND_MASK_COMP_MASK))))
+
+#define IS_MME_IDLE(mme_arch_sts) (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
+
+#define IS_TPC_IDLE(tpc_cfg_sts) (((tpc_cfg_sts) & (TPC_IDLE_MASK)) == (TPC_IDLE_MASK))
+
+#define IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) \
+ ((((qm_glbl_sts0) & (QM_IDLE_MASK)) == (QM_IDLE_MASK)) && \
+ (((qm_glbl_sts1) & (QM_ARC_IDLE_MASK)) == (QM_ARC_IDLE_MASK)) && \
+ (((qm_cgm_sts) & (CGM_IDLE_MASK)) == (CGM_IDLE_MASK)))
+
+#define PCIE_DEC_EN_MASK 0x300
+#define DEC_WORK_STATE_IDLE 0
+#define DEC_WORK_STATE_PEND 3
+#define IS_DEC_IDLE(dec_swreg15) \
+ (((dec_swreg15) & DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_MASK) == DEC_WORK_STATE_IDLE || \
+ ((dec_swreg15) & DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_MASK) == DEC_WORK_STATE_PEND)
+
+/* HBM MMU address scrambling parameters */
+#define GAUDI2_HBM_MMU_SCRM_MEM_SIZE SZ_8M
+#define GAUDI2_HBM_MMU_SCRM_DIV_SHIFT 26
+#define GAUDI2_HBM_MMU_SCRM_MOD_SHIFT 0
+#define GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK DRAM_VA_HINT_MASK
+#define GAUDI2_COMPENSATE_TLB_PAGE_SIZE_FACTOR 16
+#define MMU_RANGE_INV_VA_LSB_SHIFT 12
+#define MMU_RANGE_INV_VA_MSB_SHIFT 44
+#define MMU_RANGE_INV_EN_SHIFT 0
+#define MMU_RANGE_INV_ASID_EN_SHIFT 1
+#define MMU_RANGE_INV_ASID_SHIFT 2
+
+#define GAUDI2_MAX_STRING_LEN 64
+
+#define GAUDI2_VDEC_MSIX_ENTRIES (GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM - \
+ GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + 1)
+
+enum hl_pmmu_fatal_cause {
+ LATENCY_RD_OUT_FIFO_OVERRUN,
+ LATENCY_WR_OUT_FIFO_OVERRUN,
+};
+
+enum hl_pcie_drain_ind_cause {
+ LBW_AXI_DRAIN_IND,
+ HBW_AXI_DRAIN_IND
+};
+
+static const u32 cluster_hmmu_hif_enabled_mask[GAUDI2_HBM_NUM] = {
+ [HBM_ID0] = 0xFFFC,
+ [HBM_ID1] = 0xFFCF,
+ [HBM_ID2] = 0xF7F7,
+ [HBM_ID3] = 0x7F7F,
+ [HBM_ID4] = 0xFCFF,
+ [HBM_ID5] = 0xCFFF,
+};
+
+static const u8 xbar_edge_to_hbm_cluster[EDMA_ID_SIZE] = {
+ [0] = HBM_ID0,
+ [1] = HBM_ID1,
+ [2] = HBM_ID4,
+ [3] = HBM_ID5,
+};
+
+static const u8 edma_to_hbm_cluster[EDMA_ID_SIZE] = {
+ [EDMA_ID_DCORE0_INSTANCE0] = HBM_ID0,
+ [EDMA_ID_DCORE0_INSTANCE1] = HBM_ID2,
+ [EDMA_ID_DCORE1_INSTANCE0] = HBM_ID1,
+ [EDMA_ID_DCORE1_INSTANCE1] = HBM_ID3,
+ [EDMA_ID_DCORE2_INSTANCE0] = HBM_ID2,
+ [EDMA_ID_DCORE2_INSTANCE1] = HBM_ID4,
+ [EDMA_ID_DCORE3_INSTANCE0] = HBM_ID3,
+ [EDMA_ID_DCORE3_INSTANCE1] = HBM_ID5,
+};
+
+static const int gaudi2_qman_async_event_id[] = {
+ [GAUDI2_QUEUE_ID_PDMA_0_0] = GAUDI2_EVENT_PDMA0_QM,
+ [GAUDI2_QUEUE_ID_PDMA_0_1] = GAUDI2_EVENT_PDMA0_QM,
+ [GAUDI2_QUEUE_ID_PDMA_0_2] = GAUDI2_EVENT_PDMA0_QM,
+ [GAUDI2_QUEUE_ID_PDMA_0_3] = GAUDI2_EVENT_PDMA0_QM,
+ [GAUDI2_QUEUE_ID_PDMA_1_0] = GAUDI2_EVENT_PDMA1_QM,
+ [GAUDI2_QUEUE_ID_PDMA_1_1] = GAUDI2_EVENT_PDMA1_QM,
+ [GAUDI2_QUEUE_ID_PDMA_1_2] = GAUDI2_EVENT_PDMA1_QM,
+ [GAUDI2_QUEUE_ID_PDMA_1_3] = GAUDI2_EVENT_PDMA1_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0] = GAUDI2_EVENT_HDMA0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1] = GAUDI2_EVENT_HDMA0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2] = GAUDI2_EVENT_HDMA0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = GAUDI2_EVENT_HDMA0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0] = GAUDI2_EVENT_HDMA1_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1] = GAUDI2_EVENT_HDMA1_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2] = GAUDI2_EVENT_HDMA1_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = GAUDI2_EVENT_HDMA1_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_0] = GAUDI2_EVENT_MME0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_1] = GAUDI2_EVENT_MME0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_2] = GAUDI2_EVENT_MME0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = GAUDI2_EVENT_MME0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_0] = GAUDI2_EVENT_TPC0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_1] = GAUDI2_EVENT_TPC0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_2] = GAUDI2_EVENT_TPC0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = GAUDI2_EVENT_TPC0_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_0] = GAUDI2_EVENT_TPC1_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_1] = GAUDI2_EVENT_TPC1_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_2] = GAUDI2_EVENT_TPC1_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = GAUDI2_EVENT_TPC1_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_0] = GAUDI2_EVENT_TPC2_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_1] = GAUDI2_EVENT_TPC2_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_2] = GAUDI2_EVENT_TPC2_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = GAUDI2_EVENT_TPC2_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_0] = GAUDI2_EVENT_TPC3_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_1] = GAUDI2_EVENT_TPC3_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_2] = GAUDI2_EVENT_TPC3_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = GAUDI2_EVENT_TPC3_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_0] = GAUDI2_EVENT_TPC4_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_1] = GAUDI2_EVENT_TPC4_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_2] = GAUDI2_EVENT_TPC4_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = GAUDI2_EVENT_TPC4_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_0] = GAUDI2_EVENT_TPC5_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_1] = GAUDI2_EVENT_TPC5_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_2] = GAUDI2_EVENT_TPC5_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = GAUDI2_EVENT_TPC5_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_0] = GAUDI2_EVENT_TPC24_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_1] = GAUDI2_EVENT_TPC24_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_2] = GAUDI2_EVENT_TPC24_QM,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = GAUDI2_EVENT_TPC24_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0] = GAUDI2_EVENT_HDMA2_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1] = GAUDI2_EVENT_HDMA2_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2] = GAUDI2_EVENT_HDMA2_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = GAUDI2_EVENT_HDMA2_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0] = GAUDI2_EVENT_HDMA3_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1] = GAUDI2_EVENT_HDMA3_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2] = GAUDI2_EVENT_HDMA3_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = GAUDI2_EVENT_HDMA3_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_0] = GAUDI2_EVENT_MME1_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_1] = GAUDI2_EVENT_MME1_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_2] = GAUDI2_EVENT_MME1_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = GAUDI2_EVENT_MME1_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_0] = GAUDI2_EVENT_TPC6_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_1] = GAUDI2_EVENT_TPC6_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_2] = GAUDI2_EVENT_TPC6_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = GAUDI2_EVENT_TPC6_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_0] = GAUDI2_EVENT_TPC7_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_1] = GAUDI2_EVENT_TPC7_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_2] = GAUDI2_EVENT_TPC7_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = GAUDI2_EVENT_TPC7_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_0] = GAUDI2_EVENT_TPC8_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_1] = GAUDI2_EVENT_TPC8_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_2] = GAUDI2_EVENT_TPC8_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = GAUDI2_EVENT_TPC8_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_0] = GAUDI2_EVENT_TPC9_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_1] = GAUDI2_EVENT_TPC9_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_2] = GAUDI2_EVENT_TPC9_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = GAUDI2_EVENT_TPC9_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_0] = GAUDI2_EVENT_TPC10_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_1] = GAUDI2_EVENT_TPC10_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_2] = GAUDI2_EVENT_TPC10_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = GAUDI2_EVENT_TPC10_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_0] = GAUDI2_EVENT_TPC11_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_1] = GAUDI2_EVENT_TPC11_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_2] = GAUDI2_EVENT_TPC11_QM,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = GAUDI2_EVENT_TPC11_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0] = GAUDI2_EVENT_HDMA4_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1] = GAUDI2_EVENT_HDMA4_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2] = GAUDI2_EVENT_HDMA4_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = GAUDI2_EVENT_HDMA4_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0] = GAUDI2_EVENT_HDMA5_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1] = GAUDI2_EVENT_HDMA5_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2] = GAUDI2_EVENT_HDMA5_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = GAUDI2_EVENT_HDMA5_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_0] = GAUDI2_EVENT_MME2_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_1] = GAUDI2_EVENT_MME2_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_2] = GAUDI2_EVENT_MME2_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = GAUDI2_EVENT_MME2_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_0] = GAUDI2_EVENT_TPC12_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_1] = GAUDI2_EVENT_TPC12_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_2] = GAUDI2_EVENT_TPC12_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = GAUDI2_EVENT_TPC12_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_0] = GAUDI2_EVENT_TPC13_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_1] = GAUDI2_EVENT_TPC13_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_2] = GAUDI2_EVENT_TPC13_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = GAUDI2_EVENT_TPC13_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_0] = GAUDI2_EVENT_TPC14_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_1] = GAUDI2_EVENT_TPC14_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_2] = GAUDI2_EVENT_TPC14_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = GAUDI2_EVENT_TPC14_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_0] = GAUDI2_EVENT_TPC15_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_1] = GAUDI2_EVENT_TPC15_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_2] = GAUDI2_EVENT_TPC15_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = GAUDI2_EVENT_TPC15_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_0] = GAUDI2_EVENT_TPC16_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_1] = GAUDI2_EVENT_TPC16_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_2] = GAUDI2_EVENT_TPC16_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = GAUDI2_EVENT_TPC16_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_0] = GAUDI2_EVENT_TPC17_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_1] = GAUDI2_EVENT_TPC17_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_2] = GAUDI2_EVENT_TPC17_QM,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = GAUDI2_EVENT_TPC17_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0] = GAUDI2_EVENT_HDMA6_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1] = GAUDI2_EVENT_HDMA6_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2] = GAUDI2_EVENT_HDMA6_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = GAUDI2_EVENT_HDMA6_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0] = GAUDI2_EVENT_HDMA7_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1] = GAUDI2_EVENT_HDMA7_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2] = GAUDI2_EVENT_HDMA7_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = GAUDI2_EVENT_HDMA7_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_0] = GAUDI2_EVENT_MME3_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_1] = GAUDI2_EVENT_MME3_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_2] = GAUDI2_EVENT_MME3_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = GAUDI2_EVENT_MME3_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_0] = GAUDI2_EVENT_TPC18_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_1] = GAUDI2_EVENT_TPC18_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_2] = GAUDI2_EVENT_TPC18_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = GAUDI2_EVENT_TPC18_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_0] = GAUDI2_EVENT_TPC19_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_1] = GAUDI2_EVENT_TPC19_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_2] = GAUDI2_EVENT_TPC19_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = GAUDI2_EVENT_TPC19_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_0] = GAUDI2_EVENT_TPC20_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_1] = GAUDI2_EVENT_TPC20_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_2] = GAUDI2_EVENT_TPC20_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = GAUDI2_EVENT_TPC20_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_0] = GAUDI2_EVENT_TPC21_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_1] = GAUDI2_EVENT_TPC21_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_2] = GAUDI2_EVENT_TPC21_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = GAUDI2_EVENT_TPC21_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_0] = GAUDI2_EVENT_TPC22_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_1] = GAUDI2_EVENT_TPC22_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_2] = GAUDI2_EVENT_TPC22_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = GAUDI2_EVENT_TPC22_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_0] = GAUDI2_EVENT_TPC23_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_1] = GAUDI2_EVENT_TPC23_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_2] = GAUDI2_EVENT_TPC23_QM,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = GAUDI2_EVENT_TPC23_QM,
+ [GAUDI2_QUEUE_ID_NIC_0_0] = GAUDI2_EVENT_NIC0_QM0,
+ [GAUDI2_QUEUE_ID_NIC_0_1] = GAUDI2_EVENT_NIC0_QM0,
+ [GAUDI2_QUEUE_ID_NIC_0_2] = GAUDI2_EVENT_NIC0_QM0,
+ [GAUDI2_QUEUE_ID_NIC_0_3] = GAUDI2_EVENT_NIC0_QM0,
+ [GAUDI2_QUEUE_ID_NIC_1_0] = GAUDI2_EVENT_NIC0_QM1,
+ [GAUDI2_QUEUE_ID_NIC_1_1] = GAUDI2_EVENT_NIC0_QM1,
+ [GAUDI2_QUEUE_ID_NIC_1_2] = GAUDI2_EVENT_NIC0_QM1,
+ [GAUDI2_QUEUE_ID_NIC_1_3] = GAUDI2_EVENT_NIC0_QM1,
+ [GAUDI2_QUEUE_ID_NIC_2_0] = GAUDI2_EVENT_NIC1_QM0,
+ [GAUDI2_QUEUE_ID_NIC_2_1] = GAUDI2_EVENT_NIC1_QM0,
+ [GAUDI2_QUEUE_ID_NIC_2_2] = GAUDI2_EVENT_NIC1_QM0,
+ [GAUDI2_QUEUE_ID_NIC_2_3] = GAUDI2_EVENT_NIC1_QM0,
+ [GAUDI2_QUEUE_ID_NIC_3_0] = GAUDI2_EVENT_NIC1_QM1,
+ [GAUDI2_QUEUE_ID_NIC_3_1] = GAUDI2_EVENT_NIC1_QM1,
+ [GAUDI2_QUEUE_ID_NIC_3_2] = GAUDI2_EVENT_NIC1_QM1,
+ [GAUDI2_QUEUE_ID_NIC_3_3] = GAUDI2_EVENT_NIC1_QM1,
+ [GAUDI2_QUEUE_ID_NIC_4_0] = GAUDI2_EVENT_NIC2_QM0,
+ [GAUDI2_QUEUE_ID_NIC_4_1] = GAUDI2_EVENT_NIC2_QM0,
+ [GAUDI2_QUEUE_ID_NIC_4_2] = GAUDI2_EVENT_NIC2_QM0,
+ [GAUDI2_QUEUE_ID_NIC_4_3] = GAUDI2_EVENT_NIC2_QM0,
+ [GAUDI2_QUEUE_ID_NIC_5_0] = GAUDI2_EVENT_NIC2_QM1,
+ [GAUDI2_QUEUE_ID_NIC_5_1] = GAUDI2_EVENT_NIC2_QM1,
+ [GAUDI2_QUEUE_ID_NIC_5_2] = GAUDI2_EVENT_NIC2_QM1,
+ [GAUDI2_QUEUE_ID_NIC_5_3] = GAUDI2_EVENT_NIC2_QM1,
+ [GAUDI2_QUEUE_ID_NIC_6_0] = GAUDI2_EVENT_NIC3_QM0,
+ [GAUDI2_QUEUE_ID_NIC_6_1] = GAUDI2_EVENT_NIC3_QM0,
+ [GAUDI2_QUEUE_ID_NIC_6_2] = GAUDI2_EVENT_NIC3_QM0,
+ [GAUDI2_QUEUE_ID_NIC_6_3] = GAUDI2_EVENT_NIC3_QM0,
+ [GAUDI2_QUEUE_ID_NIC_7_0] = GAUDI2_EVENT_NIC3_QM1,
+ [GAUDI2_QUEUE_ID_NIC_7_1] = GAUDI2_EVENT_NIC3_QM1,
+ [GAUDI2_QUEUE_ID_NIC_7_2] = GAUDI2_EVENT_NIC3_QM1,
+ [GAUDI2_QUEUE_ID_NIC_7_3] = GAUDI2_EVENT_NIC3_QM1,
+ [GAUDI2_QUEUE_ID_NIC_8_0] = GAUDI2_EVENT_NIC4_QM0,
+ [GAUDI2_QUEUE_ID_NIC_8_1] = GAUDI2_EVENT_NIC4_QM0,
+ [GAUDI2_QUEUE_ID_NIC_8_2] = GAUDI2_EVENT_NIC4_QM0,
+ [GAUDI2_QUEUE_ID_NIC_8_3] = GAUDI2_EVENT_NIC4_QM0,
+ [GAUDI2_QUEUE_ID_NIC_9_0] = GAUDI2_EVENT_NIC4_QM1,
+ [GAUDI2_QUEUE_ID_NIC_9_1] = GAUDI2_EVENT_NIC4_QM1,
+ [GAUDI2_QUEUE_ID_NIC_9_2] = GAUDI2_EVENT_NIC4_QM1,
+ [GAUDI2_QUEUE_ID_NIC_9_3] = GAUDI2_EVENT_NIC4_QM1,
+ [GAUDI2_QUEUE_ID_NIC_10_0] = GAUDI2_EVENT_NIC5_QM0,
+ [GAUDI2_QUEUE_ID_NIC_10_1] = GAUDI2_EVENT_NIC5_QM0,
+ [GAUDI2_QUEUE_ID_NIC_10_2] = GAUDI2_EVENT_NIC5_QM0,
+ [GAUDI2_QUEUE_ID_NIC_10_3] = GAUDI2_EVENT_NIC5_QM0,
+ [GAUDI2_QUEUE_ID_NIC_11_0] = GAUDI2_EVENT_NIC5_QM1,
+ [GAUDI2_QUEUE_ID_NIC_11_1] = GAUDI2_EVENT_NIC5_QM1,
+ [GAUDI2_QUEUE_ID_NIC_11_2] = GAUDI2_EVENT_NIC5_QM1,
+ [GAUDI2_QUEUE_ID_NIC_11_3] = GAUDI2_EVENT_NIC5_QM1,
+ [GAUDI2_QUEUE_ID_NIC_12_0] = GAUDI2_EVENT_NIC6_QM0,
+ [GAUDI2_QUEUE_ID_NIC_12_1] = GAUDI2_EVENT_NIC6_QM0,
+ [GAUDI2_QUEUE_ID_NIC_12_2] = GAUDI2_EVENT_NIC6_QM0,
+ [GAUDI2_QUEUE_ID_NIC_12_3] = GAUDI2_EVENT_NIC6_QM0,
+ [GAUDI2_QUEUE_ID_NIC_13_0] = GAUDI2_EVENT_NIC6_QM1,
+ [GAUDI2_QUEUE_ID_NIC_13_1] = GAUDI2_EVENT_NIC6_QM1,
+ [GAUDI2_QUEUE_ID_NIC_13_2] = GAUDI2_EVENT_NIC6_QM1,
+ [GAUDI2_QUEUE_ID_NIC_13_3] = GAUDI2_EVENT_NIC6_QM1,
+ [GAUDI2_QUEUE_ID_NIC_14_0] = GAUDI2_EVENT_NIC7_QM0,
+ [GAUDI2_QUEUE_ID_NIC_14_1] = GAUDI2_EVENT_NIC7_QM0,
+ [GAUDI2_QUEUE_ID_NIC_14_2] = GAUDI2_EVENT_NIC7_QM0,
+ [GAUDI2_QUEUE_ID_NIC_14_3] = GAUDI2_EVENT_NIC7_QM0,
+ [GAUDI2_QUEUE_ID_NIC_15_0] = GAUDI2_EVENT_NIC7_QM1,
+ [GAUDI2_QUEUE_ID_NIC_15_1] = GAUDI2_EVENT_NIC7_QM1,
+ [GAUDI2_QUEUE_ID_NIC_15_2] = GAUDI2_EVENT_NIC7_QM1,
+ [GAUDI2_QUEUE_ID_NIC_15_3] = GAUDI2_EVENT_NIC7_QM1,
+ [GAUDI2_QUEUE_ID_NIC_16_0] = GAUDI2_EVENT_NIC8_QM0,
+ [GAUDI2_QUEUE_ID_NIC_16_1] = GAUDI2_EVENT_NIC8_QM0,
+ [GAUDI2_QUEUE_ID_NIC_16_2] = GAUDI2_EVENT_NIC8_QM0,
+ [GAUDI2_QUEUE_ID_NIC_16_3] = GAUDI2_EVENT_NIC8_QM0,
+ [GAUDI2_QUEUE_ID_NIC_17_0] = GAUDI2_EVENT_NIC8_QM1,
+ [GAUDI2_QUEUE_ID_NIC_17_1] = GAUDI2_EVENT_NIC8_QM1,
+ [GAUDI2_QUEUE_ID_NIC_17_2] = GAUDI2_EVENT_NIC8_QM1,
+ [GAUDI2_QUEUE_ID_NIC_17_3] = GAUDI2_EVENT_NIC8_QM1,
+ [GAUDI2_QUEUE_ID_NIC_18_0] = GAUDI2_EVENT_NIC9_QM0,
+ [GAUDI2_QUEUE_ID_NIC_18_1] = GAUDI2_EVENT_NIC9_QM0,
+ [GAUDI2_QUEUE_ID_NIC_18_2] = GAUDI2_EVENT_NIC9_QM0,
+ [GAUDI2_QUEUE_ID_NIC_18_3] = GAUDI2_EVENT_NIC9_QM0,
+ [GAUDI2_QUEUE_ID_NIC_19_0] = GAUDI2_EVENT_NIC9_QM1,
+ [GAUDI2_QUEUE_ID_NIC_19_1] = GAUDI2_EVENT_NIC9_QM1,
+ [GAUDI2_QUEUE_ID_NIC_19_2] = GAUDI2_EVENT_NIC9_QM1,
+ [GAUDI2_QUEUE_ID_NIC_19_3] = GAUDI2_EVENT_NIC9_QM1,
+ [GAUDI2_QUEUE_ID_NIC_20_0] = GAUDI2_EVENT_NIC10_QM0,
+ [GAUDI2_QUEUE_ID_NIC_20_1] = GAUDI2_EVENT_NIC10_QM0,
+ [GAUDI2_QUEUE_ID_NIC_20_2] = GAUDI2_EVENT_NIC10_QM0,
+ [GAUDI2_QUEUE_ID_NIC_20_3] = GAUDI2_EVENT_NIC10_QM0,
+ [GAUDI2_QUEUE_ID_NIC_21_0] = GAUDI2_EVENT_NIC10_QM1,
+ [GAUDI2_QUEUE_ID_NIC_21_1] = GAUDI2_EVENT_NIC10_QM1,
+ [GAUDI2_QUEUE_ID_NIC_21_2] = GAUDI2_EVENT_NIC10_QM1,
+ [GAUDI2_QUEUE_ID_NIC_21_3] = GAUDI2_EVENT_NIC10_QM1,
+ [GAUDI2_QUEUE_ID_NIC_22_0] = GAUDI2_EVENT_NIC11_QM0,
+ [GAUDI2_QUEUE_ID_NIC_22_1] = GAUDI2_EVENT_NIC11_QM0,
+ [GAUDI2_QUEUE_ID_NIC_22_2] = GAUDI2_EVENT_NIC11_QM0,
+ [GAUDI2_QUEUE_ID_NIC_22_3] = GAUDI2_EVENT_NIC11_QM0,
+ [GAUDI2_QUEUE_ID_NIC_23_0] = GAUDI2_EVENT_NIC11_QM1,
+ [GAUDI2_QUEUE_ID_NIC_23_1] = GAUDI2_EVENT_NIC11_QM1,
+ [GAUDI2_QUEUE_ID_NIC_23_2] = GAUDI2_EVENT_NIC11_QM1,
+ [GAUDI2_QUEUE_ID_NIC_23_3] = GAUDI2_EVENT_NIC11_QM1,
+ [GAUDI2_QUEUE_ID_ROT_0_0] = GAUDI2_EVENT_ROTATOR0_ROT0_QM,
+ [GAUDI2_QUEUE_ID_ROT_0_1] = GAUDI2_EVENT_ROTATOR0_ROT0_QM,
+ [GAUDI2_QUEUE_ID_ROT_0_2] = GAUDI2_EVENT_ROTATOR0_ROT0_QM,
+ [GAUDI2_QUEUE_ID_ROT_0_3] = GAUDI2_EVENT_ROTATOR0_ROT0_QM,
+ [GAUDI2_QUEUE_ID_ROT_1_0] = GAUDI2_EVENT_ROTATOR1_ROT1_QM,
+ [GAUDI2_QUEUE_ID_ROT_1_1] = GAUDI2_EVENT_ROTATOR1_ROT1_QM,
+ [GAUDI2_QUEUE_ID_ROT_1_2] = GAUDI2_EVENT_ROTATOR1_ROT1_QM,
+ [GAUDI2_QUEUE_ID_ROT_1_3] = GAUDI2_EVENT_ROTATOR1_ROT1_QM
+};
+
+static const int gaudi2_dma_core_async_event_id[] = {
+ [DMA_CORE_ID_EDMA0] = GAUDI2_EVENT_HDMA0_CORE,
+ [DMA_CORE_ID_EDMA1] = GAUDI2_EVENT_HDMA1_CORE,
+ [DMA_CORE_ID_EDMA2] = GAUDI2_EVENT_HDMA2_CORE,
+ [DMA_CORE_ID_EDMA3] = GAUDI2_EVENT_HDMA3_CORE,
+ [DMA_CORE_ID_EDMA4] = GAUDI2_EVENT_HDMA4_CORE,
+ [DMA_CORE_ID_EDMA5] = GAUDI2_EVENT_HDMA5_CORE,
+ [DMA_CORE_ID_EDMA6] = GAUDI2_EVENT_HDMA6_CORE,
+ [DMA_CORE_ID_EDMA7] = GAUDI2_EVENT_HDMA7_CORE,
+ [DMA_CORE_ID_PDMA0] = GAUDI2_EVENT_PDMA0_CORE,
+ [DMA_CORE_ID_PDMA1] = GAUDI2_EVENT_PDMA1_CORE,
+ [DMA_CORE_ID_KDMA] = GAUDI2_EVENT_KDMA0_CORE,
+};
+
+static const char * const gaudi2_qm_sei_error_cause[GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE] = {
+ "qman sei intr",
+ "arc sei intr"
+};
+
+static const char * const gaudi2_cpu_sei_error_cause[GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE] = {
+ "AXI_TERMINATOR WR",
+ "AXI_TERMINATOR RD",
+ "AXI SPLIT SEI Status"
+};
+
+static const char * const gaudi2_arc_sei_error_cause[GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE] = {
+ "cbu_bresp_sei_intr_cause",
+ "cbu_rresp_sei_intr_cause",
+ "lbu_bresp_sei_intr_cause",
+ "lbu_rresp_sei_intr_cause",
+ "cbu_axi_split_intr_cause",
+ "lbu_axi_split_intr_cause",
+ "arc_ip_excptn_sei_intr_cause",
+ "dmi_bresp_sei_intr_cause",
+ "aux2apb_err_sei_intr_cause",
+ "cfg_lbw_wr_terminated_intr_cause",
+ "cfg_lbw_rd_terminated_intr_cause",
+ "cfg_dccm_wr_terminated_intr_cause",
+ "cfg_dccm_rd_terminated_intr_cause",
+ "cfg_hbw_rd_terminated_intr_cause"
+};
+
+static const char * const gaudi2_dec_error_cause[GAUDI2_NUM_OF_DEC_ERR_CAUSE] = {
+ "msix_vcd_hbw_sei",
+ "msix_l2c_hbw_sei",
+ "msix_nrm_hbw_sei",
+ "msix_abnrm_hbw_sei",
+ "msix_vcd_lbw_sei",
+ "msix_l2c_lbw_sei",
+ "msix_nrm_lbw_sei",
+ "msix_abnrm_lbw_sei",
+ "apb_vcd_lbw_sei",
+ "apb_l2c_lbw_sei",
+ "apb_nrm_lbw_sei",
+ "apb_abnrm_lbw_sei",
+ "dec_sei",
+ "dec_apb_sei",
+ "trc_apb_sei",
+ "lbw_mstr_if_sei",
+ "axi_split_bresp_err_sei",
+ "hbw_axi_wr_viol_sei",
+ "hbw_axi_rd_viol_sei",
+ "lbw_axi_wr_viol_sei",
+ "lbw_axi_rd_viol_sei",
+ "vcd_spi",
+ "l2c_spi",
+ "nrm_spi",
+ "abnrm_spi",
+};
+
+static const char * const gaudi2_qman_error_cause[GAUDI2_NUM_OF_QM_ERR_CAUSE] = {
+ "PQ AXI HBW error",
+ "CQ AXI HBW error",
+ "CP AXI HBW error",
+ "CP error due to undefined OPCODE",
+ "CP encountered STOP OPCODE",
+ "CP AXI LBW error",
+ "CP WRREG32 or WRBULK returned error",
+ "N/A",
+ "FENCE 0 inc over max value and clipped",
+ "FENCE 1 inc over max value and clipped",
+ "FENCE 2 inc over max value and clipped",
+ "FENCE 3 inc over max value and clipped",
+ "FENCE 0 dec under min value and clipped",
+ "FENCE 1 dec under min value and clipped",
+ "FENCE 2 dec under min value and clipped",
+ "FENCE 3 dec under min value and clipped",
+ "CPDMA Up overflow",
+ "PQC L2H error"
+};
+
+static const char * const gaudi2_qman_lower_cp_error_cause[GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE] = {
+ "RSVD0",
+ "CQ AXI HBW error",
+ "CP AXI HBW error",
+ "CP error due to undefined OPCODE",
+ "CP encountered STOP OPCODE",
+ "CP AXI LBW error",
+ "CP WRREG32 or WRBULK returned error",
+ "N/A",
+ "FENCE 0 inc over max value and clipped",
+ "FENCE 1 inc over max value and clipped",
+ "FENCE 2 inc over max value and clipped",
+ "FENCE 3 inc over max value and clipped",
+ "FENCE 0 dec under min value and clipped",
+ "FENCE 1 dec under min value and clipped",
+ "FENCE 2 dec under min value and clipped",
+ "FENCE 3 dec under min value and clipped",
+ "CPDMA Up overflow",
+ "RSVD17",
+ "CQ_WR_IFIFO_CI_ERR",
+ "CQ_WR_CTL_CI_ERR",
+ "ARC_CQF_RD_ERR",
+ "ARC_CQ_WR_IFIFO_CI_ERR",
+ "ARC_CQ_WR_CTL_CI_ERR",
+ "ARC_AXI_ERR",
+ "CP_SWITCH_WDT_ERR"
+};
+
+static const char * const gaudi2_qman_arb_error_cause[GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE] = {
+ "Choice push while full error",
+ "Choice Q watchdog error",
+ "MSG AXI LBW returned with error"
+};
+
+static const char * const guadi2_rot_error_cause[GAUDI2_NUM_OF_ROT_ERR_CAUSE] = {
+ "qm_axi_err",
+ "qm_trace_fence_events",
+ "qm_sw_err",
+ "qm_cp_sw_stop",
+ "lbw_mstr_rresp_err",
+ "lbw_mstr_bresp_err",
+ "lbw_msg_slverr",
+ "hbw_msg_slverr",
+ "wbc_slverr",
+ "hbw_mstr_rresp_err",
+ "hbw_mstr_bresp_err",
+ "sb_resp_intr",
+ "mrsb_resp_intr",
+ "core_dw_status_0",
+ "core_dw_status_1",
+ "core_dw_status_2",
+ "core_dw_status_3",
+ "core_dw_status_4",
+ "core_dw_status_5",
+ "core_dw_status_6",
+ "core_dw_status_7",
+ "async_arc2cpu_sei_intr",
+};
+
+static const char * const gaudi2_tpc_interrupts_cause[GAUDI2_NUM_OF_TPC_INTR_CAUSE] = {
+ "tpc_address_exceed_slm",
+ "tpc_div_by_0",
+ "tpc_spu_mac_overflow",
+ "tpc_spu_addsub_overflow",
+ "tpc_spu_abs_overflow",
+ "tpc_spu_fma_fp_dst_nan",
+ "tpc_spu_fma_fp_dst_inf",
+ "tpc_spu_convert_fp_dst_nan",
+ "tpc_spu_convert_fp_dst_inf",
+ "tpc_spu_fp_dst_denorm",
+ "tpc_vpu_mac_overflow",
+ "tpc_vpu_addsub_overflow",
+ "tpc_vpu_abs_overflow",
+ "tpc_vpu_convert_fp_dst_nan",
+ "tpc_vpu_convert_fp_dst_inf",
+ "tpc_vpu_fma_fp_dst_nan",
+ "tpc_vpu_fma_fp_dst_inf",
+ "tpc_vpu_fp_dst_denorm",
+ "tpc_assertions",
+ "tpc_illegal_instruction",
+ "tpc_pc_wrap_around",
+ "tpc_qm_sw_err",
+ "tpc_hbw_rresp_err",
+ "tpc_hbw_bresp_err",
+ "tpc_lbw_rresp_err",
+ "tpc_lbw_bresp_err",
+ "st_unlock_already_locked",
+ "invalid_lock_access",
+ "LD_L protection violation",
+ "ST_L protection violation",
+};
+
+static const char * const guadi2_mme_error_cause[GAUDI2_NUM_OF_MME_ERR_CAUSE] = {
+ "agu_resp_intr",
+ "qman_axi_err",
+ "wap sei (wbc axi err)",
+ "arc sei",
+ "mme_cfg_unalign_addr",
+ "qm_sw_err",
+ "sbte_dbg_intr_0",
+ "sbte_dbg_intr_1",
+ "sbte_dbg_intr_2",
+ "sbte_dbg_intr_3",
+ "sbte_dbg_intr_4",
+ "sbte_prtn_intr_0",
+ "sbte_prtn_intr_1",
+ "sbte_prtn_intr_2",
+ "sbte_prtn_intr_3",
+ "sbte_prtn_intr_4",
+};
+
+static const char * const guadi2_mme_sbte_error_cause[GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE] = {
+ "i0",
+ "i1",
+ "i2",
+ "i3",
+ "i4",
+};
+
+static const char * const guadi2_mme_wap_error_cause[GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE] = {
+ "WBC ERR RESP_0",
+ "WBC ERR RESP_1",
+ "AP SOURCE POS INF",
+ "AP SOURCE NEG INF",
+ "AP SOURCE NAN",
+ "AP RESULT POS INF",
+ "AP RESULT NEG INF",
+};
+
+static const char * const gaudi2_dma_core_interrupts_cause[GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE] = {
+ "HBW Read returned with error RRESP",
+ "HBW write returned with error BRESP",
+ "LBW write returned with error BRESP",
+ "descriptor_fifo_overflow",
+ "KDMA SB LBW Read returned with error",
+ "KDMA WBC LBW Write returned with error",
+ "TRANSPOSE ENGINE DESC FIFO OVERFLOW",
+ "WRONG CFG FOR COMMIT IN LIN DMA"
+};
+
+static const char * const gaudi2_kdma_core_interrupts_cause[GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE] = {
+ "HBW/LBW Read returned with error RRESP",
+ "HBW/LBW write returned with error BRESP",
+ "LBW write returned with error BRESP",
+ "descriptor_fifo_overflow",
+ "KDMA SB LBW Read returned with error",
+ "KDMA WBC LBW Write returned with error",
+ "TRANSPOSE ENGINE DESC FIFO OVERFLOW",
+ "WRONG CFG FOR COMMIT IN LIN DMA"
+};
+
+struct gaudi2_sm_sei_cause_data {
+ const char *cause_name;
+ const char *log_name;
+ u32 log_mask;
+};
+
+static const struct gaudi2_sm_sei_cause_data
+gaudi2_sm_sei_cause[GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE] = {
+ {"calculated SO value overflow/underflow", "SOB group ID", 0x7FF},
+ {"payload address of monitor is not aligned to 4B", "monitor addr", 0xFFFF},
+ {"armed monitor write got BRESP (SLVERR or DECERR)", "AXI id", 0xFFFF},
+};
+
+static const char * const
+gaudi2_pmmu_fatal_interrupts_cause[GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE] = {
+ "LATENCY_RD_OUT_FIFO_OVERRUN",
+ "LATENCY_WR_OUT_FIFO_OVERRUN",
+};
+
+static const char * const
+gaudi2_hif_fatal_interrupts_cause[GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE] = {
+ "LATENCY_RD_OUT_FIFO_OVERRUN",
+ "LATENCY_WR_OUT_FIFO_OVERRUN",
+};
+
+static const char * const
+gaudi2_psoc_axi_drain_interrupts_cause[GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE] = {
+ "AXI drain HBW",
+ "AXI drain LBW",
+};
+
+static const char * const
+gaudi2_pcie_addr_dec_error_cause[GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE] = {
+ "HBW error response",
+ "LBW error response",
+ "TLP is blocked by RR"
+};
+
+const u32 gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_SIZE] = {
+ [GAUDI2_QUEUE_ID_PDMA_0_0] = mmPDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_PDMA_0_1] = mmPDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_PDMA_0_2] = mmPDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_PDMA_0_3] = mmPDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_PDMA_1_0] = mmPDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_PDMA_1_1] = mmPDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_PDMA_1_2] = mmPDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_PDMA_1_3] = mmPDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0] = mmDCORE0_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1] = mmDCORE0_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2] = mmDCORE0_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = mmDCORE0_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0] = mmDCORE0_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1] = mmDCORE0_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2] = mmDCORE0_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = mmDCORE0_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_0] = mmDCORE0_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_1] = mmDCORE0_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_2] = mmDCORE0_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = mmDCORE0_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_0] = mmDCORE0_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_1] = mmDCORE0_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_2] = mmDCORE0_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = mmDCORE0_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_0] = mmDCORE0_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_1] = mmDCORE0_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_2] = mmDCORE0_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = mmDCORE0_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_0] = mmDCORE0_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_1] = mmDCORE0_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_2] = mmDCORE0_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = mmDCORE0_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_0] = mmDCORE0_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_1] = mmDCORE0_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_2] = mmDCORE0_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = mmDCORE0_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_0] = mmDCORE0_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_1] = mmDCORE0_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_2] = mmDCORE0_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = mmDCORE0_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_0] = mmDCORE0_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_1] = mmDCORE0_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_2] = mmDCORE0_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = mmDCORE0_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_0] = mmDCORE0_TPC6_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_1] = mmDCORE0_TPC6_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_2] = mmDCORE0_TPC6_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = mmDCORE0_TPC6_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0] = mmDCORE1_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1] = mmDCORE1_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2] = mmDCORE1_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = mmDCORE1_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0] = mmDCORE1_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1] = mmDCORE1_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2] = mmDCORE1_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = mmDCORE1_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_0] = mmDCORE1_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_1] = mmDCORE1_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_2] = mmDCORE1_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = mmDCORE1_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_0] = mmDCORE1_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_1] = mmDCORE1_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_2] = mmDCORE1_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = mmDCORE1_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_0] = mmDCORE1_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_1] = mmDCORE1_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_2] = mmDCORE1_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = mmDCORE1_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_0] = mmDCORE1_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_1] = mmDCORE1_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_2] = mmDCORE1_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = mmDCORE1_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_0] = mmDCORE1_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_1] = mmDCORE1_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_2] = mmDCORE1_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = mmDCORE1_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_0] = mmDCORE1_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_1] = mmDCORE1_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_2] = mmDCORE1_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = mmDCORE1_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_0] = mmDCORE1_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_1] = mmDCORE1_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_2] = mmDCORE1_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = mmDCORE1_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0] = mmDCORE2_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1] = mmDCORE2_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2] = mmDCORE2_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = mmDCORE2_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0] = mmDCORE2_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1] = mmDCORE2_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2] = mmDCORE2_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = mmDCORE2_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_0] = mmDCORE2_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_1] = mmDCORE2_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_2] = mmDCORE2_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = mmDCORE2_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_0] = mmDCORE2_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_1] = mmDCORE2_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_2] = mmDCORE2_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = mmDCORE2_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_0] = mmDCORE2_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_1] = mmDCORE2_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_2] = mmDCORE2_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = mmDCORE2_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_0] = mmDCORE2_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_1] = mmDCORE2_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_2] = mmDCORE2_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = mmDCORE2_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_0] = mmDCORE2_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_1] = mmDCORE2_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_2] = mmDCORE2_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = mmDCORE2_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_0] = mmDCORE2_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_1] = mmDCORE2_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_2] = mmDCORE2_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = mmDCORE2_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_0] = mmDCORE2_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_1] = mmDCORE2_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_2] = mmDCORE2_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = mmDCORE2_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0] = mmDCORE3_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1] = mmDCORE3_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2] = mmDCORE3_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = mmDCORE3_EDMA0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0] = mmDCORE3_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1] = mmDCORE3_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2] = mmDCORE3_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = mmDCORE3_EDMA1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_0] = mmDCORE3_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_1] = mmDCORE3_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_2] = mmDCORE3_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = mmDCORE3_MME_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_0] = mmDCORE3_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_1] = mmDCORE3_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_2] = mmDCORE3_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = mmDCORE3_TPC0_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_0] = mmDCORE3_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_1] = mmDCORE3_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_2] = mmDCORE3_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = mmDCORE3_TPC1_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_0] = mmDCORE3_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_1] = mmDCORE3_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_2] = mmDCORE3_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = mmDCORE3_TPC2_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_0] = mmDCORE3_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_1] = mmDCORE3_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_2] = mmDCORE3_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = mmDCORE3_TPC3_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_0] = mmDCORE3_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_1] = mmDCORE3_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_2] = mmDCORE3_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = mmDCORE3_TPC4_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_0] = mmDCORE3_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_1] = mmDCORE3_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_2] = mmDCORE3_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = mmDCORE3_TPC5_QM_BASE,
+ [GAUDI2_QUEUE_ID_NIC_0_0] = mmNIC0_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_0_1] = mmNIC0_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_0_2] = mmNIC0_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_0_3] = mmNIC0_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_1_0] = mmNIC0_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_1_1] = mmNIC0_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_1_2] = mmNIC0_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_1_3] = mmNIC0_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_2_0] = mmNIC1_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_2_1] = mmNIC1_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_2_2] = mmNIC1_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_2_3] = mmNIC1_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_3_0] = mmNIC1_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_3_1] = mmNIC1_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_3_2] = mmNIC1_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_3_3] = mmNIC1_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_4_0] = mmNIC2_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_4_1] = mmNIC2_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_4_2] = mmNIC2_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_4_3] = mmNIC2_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_5_0] = mmNIC2_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_5_1] = mmNIC2_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_5_2] = mmNIC2_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_5_3] = mmNIC2_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_6_0] = mmNIC3_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_6_1] = mmNIC3_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_6_2] = mmNIC3_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_6_3] = mmNIC3_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_7_0] = mmNIC3_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_7_1] = mmNIC3_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_7_2] = mmNIC3_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_7_3] = mmNIC3_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_8_0] = mmNIC4_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_8_1] = mmNIC4_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_8_2] = mmNIC4_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_8_3] = mmNIC4_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_9_0] = mmNIC4_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_9_1] = mmNIC4_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_9_2] = mmNIC4_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_9_3] = mmNIC4_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_10_0] = mmNIC5_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_10_1] = mmNIC5_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_10_2] = mmNIC5_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_10_3] = mmNIC5_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_11_0] = mmNIC5_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_11_1] = mmNIC5_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_11_2] = mmNIC5_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_11_3] = mmNIC5_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_12_0] = mmNIC6_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_12_1] = mmNIC6_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_12_2] = mmNIC6_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_12_3] = mmNIC6_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_13_0] = mmNIC6_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_13_1] = mmNIC6_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_13_2] = mmNIC6_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_13_3] = mmNIC6_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_14_0] = mmNIC7_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_14_1] = mmNIC7_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_14_2] = mmNIC7_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_14_3] = mmNIC7_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_15_0] = mmNIC7_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_15_1] = mmNIC7_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_15_2] = mmNIC7_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_15_3] = mmNIC7_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_16_0] = mmNIC8_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_16_1] = mmNIC8_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_16_2] = mmNIC8_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_16_3] = mmNIC8_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_17_0] = mmNIC8_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_17_1] = mmNIC8_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_17_2] = mmNIC8_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_17_3] = mmNIC8_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_18_0] = mmNIC9_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_18_1] = mmNIC9_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_18_2] = mmNIC9_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_18_3] = mmNIC9_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_19_0] = mmNIC9_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_19_1] = mmNIC9_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_19_2] = mmNIC9_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_19_3] = mmNIC9_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_20_0] = mmNIC10_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_20_1] = mmNIC10_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_20_2] = mmNIC10_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_20_3] = mmNIC10_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_21_0] = mmNIC10_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_21_1] = mmNIC10_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_21_2] = mmNIC10_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_21_3] = mmNIC10_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_22_0] = mmNIC11_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_22_1] = mmNIC11_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_22_2] = mmNIC11_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_22_3] = mmNIC11_QM0_BASE,
+ [GAUDI2_QUEUE_ID_NIC_23_0] = mmNIC11_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_23_1] = mmNIC11_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_23_2] = mmNIC11_QM1_BASE,
+ [GAUDI2_QUEUE_ID_NIC_23_3] = mmNIC11_QM1_BASE,
+ [GAUDI2_QUEUE_ID_ROT_0_0] = mmROT0_QM_BASE,
+ [GAUDI2_QUEUE_ID_ROT_0_1] = mmROT0_QM_BASE,
+ [GAUDI2_QUEUE_ID_ROT_0_2] = mmROT0_QM_BASE,
+ [GAUDI2_QUEUE_ID_ROT_0_3] = mmROT0_QM_BASE,
+ [GAUDI2_QUEUE_ID_ROT_1_0] = mmROT1_QM_BASE,
+ [GAUDI2_QUEUE_ID_ROT_1_1] = mmROT1_QM_BASE,
+ [GAUDI2_QUEUE_ID_ROT_1_2] = mmROT1_QM_BASE,
+ [GAUDI2_QUEUE_ID_ROT_1_3] = mmROT1_QM_BASE
+};
+
+static const u32 gaudi2_arc_blocks_bases[NUM_ARC_CPUS] = {
+ [CPU_ID_SCHED_ARC0] = mmARC_FARM_ARC0_AUX_BASE,
+ [CPU_ID_SCHED_ARC1] = mmARC_FARM_ARC1_AUX_BASE,
+ [CPU_ID_SCHED_ARC2] = mmARC_FARM_ARC2_AUX_BASE,
+ [CPU_ID_SCHED_ARC3] = mmARC_FARM_ARC3_AUX_BASE,
+ [CPU_ID_SCHED_ARC4] = mmDCORE1_MME_QM_ARC_AUX_BASE,
+ [CPU_ID_SCHED_ARC5] = mmDCORE3_MME_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC0] = mmDCORE0_TPC0_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC1] = mmDCORE0_TPC1_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC2] = mmDCORE0_TPC2_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC3] = mmDCORE0_TPC3_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC4] = mmDCORE0_TPC4_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC5] = mmDCORE0_TPC5_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC6] = mmDCORE1_TPC0_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC7] = mmDCORE1_TPC1_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC8] = mmDCORE1_TPC2_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC9] = mmDCORE1_TPC3_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC10] = mmDCORE1_TPC4_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC11] = mmDCORE1_TPC5_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC12] = mmDCORE2_TPC0_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC13] = mmDCORE2_TPC1_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC14] = mmDCORE2_TPC2_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC15] = mmDCORE2_TPC3_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC16] = mmDCORE2_TPC4_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC17] = mmDCORE2_TPC5_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC18] = mmDCORE3_TPC0_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC19] = mmDCORE3_TPC1_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC20] = mmDCORE3_TPC2_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC21] = mmDCORE3_TPC3_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC22] = mmDCORE3_TPC4_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC23] = mmDCORE3_TPC5_QM_ARC_AUX_BASE,
+ [CPU_ID_TPC_QMAN_ARC24] = mmDCORE0_TPC6_QM_ARC_AUX_BASE,
+ [CPU_ID_MME_QMAN_ARC0] = mmDCORE0_MME_QM_ARC_AUX_BASE,
+ [CPU_ID_MME_QMAN_ARC1] = mmDCORE2_MME_QM_ARC_AUX_BASE,
+ [CPU_ID_EDMA_QMAN_ARC0] = mmDCORE0_EDMA0_QM_ARC_AUX_BASE,
+ [CPU_ID_EDMA_QMAN_ARC1] = mmDCORE0_EDMA1_QM_ARC_AUX_BASE,
+ [CPU_ID_EDMA_QMAN_ARC2] = mmDCORE1_EDMA0_QM_ARC_AUX_BASE,
+ [CPU_ID_EDMA_QMAN_ARC3] = mmDCORE1_EDMA1_QM_ARC_AUX_BASE,
+ [CPU_ID_EDMA_QMAN_ARC4] = mmDCORE2_EDMA0_QM_ARC_AUX_BASE,
+ [CPU_ID_EDMA_QMAN_ARC5] = mmDCORE2_EDMA1_QM_ARC_AUX_BASE,
+ [CPU_ID_EDMA_QMAN_ARC6] = mmDCORE3_EDMA0_QM_ARC_AUX_BASE,
+ [CPU_ID_EDMA_QMAN_ARC7] = mmDCORE3_EDMA1_QM_ARC_AUX_BASE,
+ [CPU_ID_PDMA_QMAN_ARC0] = mmPDMA0_QM_ARC_AUX_BASE,
+ [CPU_ID_PDMA_QMAN_ARC1] = mmPDMA1_QM_ARC_AUX_BASE,
+ [CPU_ID_ROT_QMAN_ARC0] = mmROT0_QM_ARC_AUX_BASE,
+ [CPU_ID_ROT_QMAN_ARC1] = mmROT1_QM_ARC_AUX_BASE,
+ [CPU_ID_NIC_QMAN_ARC0] = mmNIC0_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC1] = mmNIC0_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC2] = mmNIC1_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC3] = mmNIC1_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC4] = mmNIC2_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC5] = mmNIC2_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC6] = mmNIC3_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC7] = mmNIC3_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC8] = mmNIC4_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC9] = mmNIC4_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC10] = mmNIC5_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC11] = mmNIC5_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC12] = mmNIC6_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC13] = mmNIC6_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC14] = mmNIC7_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC15] = mmNIC7_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC16] = mmNIC8_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC17] = mmNIC8_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC18] = mmNIC9_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC19] = mmNIC9_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC20] = mmNIC10_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC21] = mmNIC10_QM_ARC_AUX1_BASE,
+ [CPU_ID_NIC_QMAN_ARC22] = mmNIC11_QM_ARC_AUX0_BASE,
+ [CPU_ID_NIC_QMAN_ARC23] = mmNIC11_QM_ARC_AUX1_BASE,
+};
+
+static const u32 gaudi2_arc_dccm_bases[NUM_ARC_CPUS] = {
+ [CPU_ID_SCHED_ARC0] = mmARC_FARM_ARC0_DCCM0_BASE,
+ [CPU_ID_SCHED_ARC1] = mmARC_FARM_ARC1_DCCM0_BASE,
+ [CPU_ID_SCHED_ARC2] = mmARC_FARM_ARC2_DCCM0_BASE,
+ [CPU_ID_SCHED_ARC3] = mmARC_FARM_ARC3_DCCM0_BASE,
+ [CPU_ID_SCHED_ARC4] = mmDCORE1_MME_QM_ARC_DCCM_BASE,
+ [CPU_ID_SCHED_ARC5] = mmDCORE3_MME_QM_ARC_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC0] = mmDCORE0_TPC0_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC1] = mmDCORE0_TPC1_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC2] = mmDCORE0_TPC2_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC3] = mmDCORE0_TPC3_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC4] = mmDCORE0_TPC4_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC5] = mmDCORE0_TPC5_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC6] = mmDCORE1_TPC0_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC7] = mmDCORE1_TPC1_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC8] = mmDCORE1_TPC2_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC9] = mmDCORE1_TPC3_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC10] = mmDCORE1_TPC4_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC11] = mmDCORE1_TPC5_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC12] = mmDCORE2_TPC0_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC13] = mmDCORE2_TPC1_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC14] = mmDCORE2_TPC2_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC15] = mmDCORE2_TPC3_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC16] = mmDCORE2_TPC4_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC17] = mmDCORE2_TPC5_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC18] = mmDCORE3_TPC0_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC19] = mmDCORE3_TPC1_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC20] = mmDCORE3_TPC2_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC21] = mmDCORE3_TPC3_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC22] = mmDCORE3_TPC4_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC23] = mmDCORE3_TPC5_QM_DCCM_BASE,
+ [CPU_ID_TPC_QMAN_ARC24] = mmDCORE0_TPC6_QM_DCCM_BASE,
+ [CPU_ID_MME_QMAN_ARC0] = mmDCORE0_MME_QM_ARC_DCCM_BASE,
+ [CPU_ID_MME_QMAN_ARC1] = mmDCORE2_MME_QM_ARC_DCCM_BASE,
+ [CPU_ID_EDMA_QMAN_ARC0] = mmDCORE0_EDMA0_QM_DCCM_BASE,
+ [CPU_ID_EDMA_QMAN_ARC1] = mmDCORE0_EDMA1_QM_DCCM_BASE,
+ [CPU_ID_EDMA_QMAN_ARC2] = mmDCORE1_EDMA0_QM_DCCM_BASE,
+ [CPU_ID_EDMA_QMAN_ARC3] = mmDCORE1_EDMA1_QM_DCCM_BASE,
+ [CPU_ID_EDMA_QMAN_ARC4] = mmDCORE2_EDMA0_QM_DCCM_BASE,
+ [CPU_ID_EDMA_QMAN_ARC5] = mmDCORE2_EDMA1_QM_DCCM_BASE,
+ [CPU_ID_EDMA_QMAN_ARC6] = mmDCORE3_EDMA0_QM_DCCM_BASE,
+ [CPU_ID_EDMA_QMAN_ARC7] = mmDCORE3_EDMA1_QM_DCCM_BASE,
+ [CPU_ID_PDMA_QMAN_ARC0] = mmPDMA0_QM_ARC_DCCM_BASE,
+ [CPU_ID_PDMA_QMAN_ARC1] = mmPDMA1_QM_ARC_DCCM_BASE,
+ [CPU_ID_ROT_QMAN_ARC0] = mmROT0_QM_ARC_DCCM_BASE,
+ [CPU_ID_ROT_QMAN_ARC1] = mmROT1_QM_ARC_DCCM_BASE,
+ [CPU_ID_NIC_QMAN_ARC0] = mmNIC0_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC1] = mmNIC0_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC2] = mmNIC1_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC3] = mmNIC1_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC4] = mmNIC2_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC5] = mmNIC2_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC6] = mmNIC3_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC7] = mmNIC3_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC8] = mmNIC4_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC9] = mmNIC4_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC10] = mmNIC5_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC11] = mmNIC5_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC12] = mmNIC6_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC13] = mmNIC6_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC14] = mmNIC7_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC15] = mmNIC7_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC16] = mmNIC8_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC17] = mmNIC8_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC18] = mmNIC9_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC19] = mmNIC9_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC20] = mmNIC10_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC21] = mmNIC10_QM_DCCM1_BASE,
+ [CPU_ID_NIC_QMAN_ARC22] = mmNIC11_QM_DCCM0_BASE,
+ [CPU_ID_NIC_QMAN_ARC23] = mmNIC11_QM_DCCM1_BASE,
+};
+
+const u32 gaudi2_mme_ctrl_lo_blocks_bases[MME_ID_SIZE] = {
+ [MME_ID_DCORE0] = mmDCORE0_MME_CTRL_LO_BASE,
+ [MME_ID_DCORE1] = mmDCORE1_MME_CTRL_LO_BASE,
+ [MME_ID_DCORE2] = mmDCORE2_MME_CTRL_LO_BASE,
+ [MME_ID_DCORE3] = mmDCORE3_MME_CTRL_LO_BASE,
+};
+
+static const u32 gaudi2_queue_id_to_arc_id[GAUDI2_QUEUE_ID_SIZE] = {
+ [GAUDI2_QUEUE_ID_PDMA_0_0] = CPU_ID_PDMA_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_PDMA_0_1] = CPU_ID_PDMA_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_PDMA_0_2] = CPU_ID_PDMA_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_PDMA_0_3] = CPU_ID_PDMA_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_PDMA_1_0] = CPU_ID_PDMA_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_PDMA_1_1] = CPU_ID_PDMA_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_PDMA_1_2] = CPU_ID_PDMA_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_PDMA_1_3] = CPU_ID_PDMA_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_0] = CPU_ID_MME_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_1] = CPU_ID_MME_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_2] = CPU_ID_MME_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = CPU_ID_MME_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_0] = CPU_ID_TPC_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_1] = CPU_ID_TPC_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_2] = CPU_ID_TPC_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = CPU_ID_TPC_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_0] = CPU_ID_TPC_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_1] = CPU_ID_TPC_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_2] = CPU_ID_TPC_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = CPU_ID_TPC_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_0] = CPU_ID_TPC_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_1] = CPU_ID_TPC_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_2] = CPU_ID_TPC_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = CPU_ID_TPC_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_0] = CPU_ID_TPC_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_1] = CPU_ID_TPC_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_2] = CPU_ID_TPC_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = CPU_ID_TPC_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_0] = CPU_ID_TPC_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_1] = CPU_ID_TPC_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_2] = CPU_ID_TPC_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = CPU_ID_TPC_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_0] = CPU_ID_TPC_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_1] = CPU_ID_TPC_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_2] = CPU_ID_TPC_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = CPU_ID_TPC_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_0] = CPU_ID_TPC_QMAN_ARC24,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_1] = CPU_ID_TPC_QMAN_ARC24,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_2] = CPU_ID_TPC_QMAN_ARC24,
+ [GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = CPU_ID_TPC_QMAN_ARC24,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_0] = CPU_ID_SCHED_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_1] = CPU_ID_SCHED_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_2] = CPU_ID_SCHED_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = CPU_ID_SCHED_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_0] = CPU_ID_TPC_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_1] = CPU_ID_TPC_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_2] = CPU_ID_TPC_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = CPU_ID_TPC_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_0] = CPU_ID_TPC_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_1] = CPU_ID_TPC_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_2] = CPU_ID_TPC_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = CPU_ID_TPC_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_0] = CPU_ID_TPC_QMAN_ARC8,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_1] = CPU_ID_TPC_QMAN_ARC8,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_2] = CPU_ID_TPC_QMAN_ARC8,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = CPU_ID_TPC_QMAN_ARC8,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_0] = CPU_ID_TPC_QMAN_ARC9,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_1] = CPU_ID_TPC_QMAN_ARC9,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_2] = CPU_ID_TPC_QMAN_ARC9,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = CPU_ID_TPC_QMAN_ARC9,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_0] = CPU_ID_TPC_QMAN_ARC10,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_1] = CPU_ID_TPC_QMAN_ARC10,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_2] = CPU_ID_TPC_QMAN_ARC10,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = CPU_ID_TPC_QMAN_ARC10,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_0] = CPU_ID_TPC_QMAN_ARC11,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_1] = CPU_ID_TPC_QMAN_ARC11,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_2] = CPU_ID_TPC_QMAN_ARC11,
+ [GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = CPU_ID_TPC_QMAN_ARC11,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_0] = CPU_ID_MME_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_1] = CPU_ID_MME_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_2] = CPU_ID_MME_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = CPU_ID_MME_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_0] = CPU_ID_TPC_QMAN_ARC12,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_1] = CPU_ID_TPC_QMAN_ARC12,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_2] = CPU_ID_TPC_QMAN_ARC12,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = CPU_ID_TPC_QMAN_ARC12,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_0] = CPU_ID_TPC_QMAN_ARC13,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_1] = CPU_ID_TPC_QMAN_ARC13,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_2] = CPU_ID_TPC_QMAN_ARC13,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = CPU_ID_TPC_QMAN_ARC13,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_0] = CPU_ID_TPC_QMAN_ARC14,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_1] = CPU_ID_TPC_QMAN_ARC14,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_2] = CPU_ID_TPC_QMAN_ARC14,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = CPU_ID_TPC_QMAN_ARC14,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_0] = CPU_ID_TPC_QMAN_ARC15,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_1] = CPU_ID_TPC_QMAN_ARC15,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_2] = CPU_ID_TPC_QMAN_ARC15,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = CPU_ID_TPC_QMAN_ARC15,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_0] = CPU_ID_TPC_QMAN_ARC16,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_1] = CPU_ID_TPC_QMAN_ARC16,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_2] = CPU_ID_TPC_QMAN_ARC16,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = CPU_ID_TPC_QMAN_ARC16,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_0] = CPU_ID_TPC_QMAN_ARC17,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_1] = CPU_ID_TPC_QMAN_ARC17,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_2] = CPU_ID_TPC_QMAN_ARC17,
+ [GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = CPU_ID_TPC_QMAN_ARC17,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_0] = CPU_ID_SCHED_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_1] = CPU_ID_SCHED_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_2] = CPU_ID_SCHED_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = CPU_ID_SCHED_ARC5,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_0] = CPU_ID_TPC_QMAN_ARC18,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_1] = CPU_ID_TPC_QMAN_ARC18,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_2] = CPU_ID_TPC_QMAN_ARC18,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = CPU_ID_TPC_QMAN_ARC18,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_0] = CPU_ID_TPC_QMAN_ARC19,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_1] = CPU_ID_TPC_QMAN_ARC19,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_2] = CPU_ID_TPC_QMAN_ARC19,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = CPU_ID_TPC_QMAN_ARC19,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_0] = CPU_ID_TPC_QMAN_ARC20,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_1] = CPU_ID_TPC_QMAN_ARC20,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_2] = CPU_ID_TPC_QMAN_ARC20,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = CPU_ID_TPC_QMAN_ARC20,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_0] = CPU_ID_TPC_QMAN_ARC21,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_1] = CPU_ID_TPC_QMAN_ARC21,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_2] = CPU_ID_TPC_QMAN_ARC21,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = CPU_ID_TPC_QMAN_ARC21,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_0] = CPU_ID_TPC_QMAN_ARC22,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_1] = CPU_ID_TPC_QMAN_ARC22,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_2] = CPU_ID_TPC_QMAN_ARC22,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = CPU_ID_TPC_QMAN_ARC22,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_0] = CPU_ID_TPC_QMAN_ARC23,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_1] = CPU_ID_TPC_QMAN_ARC23,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_2] = CPU_ID_TPC_QMAN_ARC23,
+ [GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = CPU_ID_TPC_QMAN_ARC23,
+ [GAUDI2_QUEUE_ID_NIC_0_0] = CPU_ID_NIC_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_NIC_0_1] = CPU_ID_NIC_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_NIC_0_2] = CPU_ID_NIC_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_NIC_0_3] = CPU_ID_NIC_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_NIC_1_0] = CPU_ID_NIC_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_NIC_1_1] = CPU_ID_NIC_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_NIC_1_2] = CPU_ID_NIC_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_NIC_1_3] = CPU_ID_NIC_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_NIC_2_0] = CPU_ID_NIC_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_NIC_2_1] = CPU_ID_NIC_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_NIC_2_2] = CPU_ID_NIC_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_NIC_2_3] = CPU_ID_NIC_QMAN_ARC2,
+ [GAUDI2_QUEUE_ID_NIC_3_0] = CPU_ID_NIC_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_NIC_3_1] = CPU_ID_NIC_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_NIC_3_2] = CPU_ID_NIC_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_NIC_3_3] = CPU_ID_NIC_QMAN_ARC3,
+ [GAUDI2_QUEUE_ID_NIC_4_0] = CPU_ID_NIC_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_NIC_4_1] = CPU_ID_NIC_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_NIC_4_2] = CPU_ID_NIC_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_NIC_4_3] = CPU_ID_NIC_QMAN_ARC4,
+ [GAUDI2_QUEUE_ID_NIC_5_0] = CPU_ID_NIC_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_NIC_5_1] = CPU_ID_NIC_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_NIC_5_2] = CPU_ID_NIC_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_NIC_5_3] = CPU_ID_NIC_QMAN_ARC5,
+ [GAUDI2_QUEUE_ID_NIC_6_0] = CPU_ID_NIC_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_NIC_6_1] = CPU_ID_NIC_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_NIC_6_2] = CPU_ID_NIC_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_NIC_6_3] = CPU_ID_NIC_QMAN_ARC6,
+ [GAUDI2_QUEUE_ID_NIC_7_0] = CPU_ID_NIC_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_NIC_7_1] = CPU_ID_NIC_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_NIC_7_2] = CPU_ID_NIC_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_NIC_7_3] = CPU_ID_NIC_QMAN_ARC7,
+ [GAUDI2_QUEUE_ID_NIC_8_0] = CPU_ID_NIC_QMAN_ARC8,
+ [GAUDI2_QUEUE_ID_NIC_8_1] = CPU_ID_NIC_QMAN_ARC8,
+ [GAUDI2_QUEUE_ID_NIC_8_2] = CPU_ID_NIC_QMAN_ARC8,
+ [GAUDI2_QUEUE_ID_NIC_8_3] = CPU_ID_NIC_QMAN_ARC8,
+ [GAUDI2_QUEUE_ID_NIC_9_0] = CPU_ID_NIC_QMAN_ARC9,
+ [GAUDI2_QUEUE_ID_NIC_9_1] = CPU_ID_NIC_QMAN_ARC9,
+ [GAUDI2_QUEUE_ID_NIC_9_2] = CPU_ID_NIC_QMAN_ARC9,
+ [GAUDI2_QUEUE_ID_NIC_9_3] = CPU_ID_NIC_QMAN_ARC9,
+ [GAUDI2_QUEUE_ID_NIC_10_0] = CPU_ID_NIC_QMAN_ARC10,
+ [GAUDI2_QUEUE_ID_NIC_10_1] = CPU_ID_NIC_QMAN_ARC10,
+ [GAUDI2_QUEUE_ID_NIC_10_2] = CPU_ID_NIC_QMAN_ARC10,
+ [GAUDI2_QUEUE_ID_NIC_10_3] = CPU_ID_NIC_QMAN_ARC10,
+ [GAUDI2_QUEUE_ID_NIC_11_0] = CPU_ID_NIC_QMAN_ARC11,
+ [GAUDI2_QUEUE_ID_NIC_11_1] = CPU_ID_NIC_QMAN_ARC11,
+ [GAUDI2_QUEUE_ID_NIC_11_2] = CPU_ID_NIC_QMAN_ARC11,
+ [GAUDI2_QUEUE_ID_NIC_11_3] = CPU_ID_NIC_QMAN_ARC11,
+ [GAUDI2_QUEUE_ID_NIC_12_0] = CPU_ID_NIC_QMAN_ARC12,
+ [GAUDI2_QUEUE_ID_NIC_12_1] = CPU_ID_NIC_QMAN_ARC12,
+ [GAUDI2_QUEUE_ID_NIC_12_2] = CPU_ID_NIC_QMAN_ARC12,
+ [GAUDI2_QUEUE_ID_NIC_12_3] = CPU_ID_NIC_QMAN_ARC12,
+ [GAUDI2_QUEUE_ID_NIC_13_0] = CPU_ID_NIC_QMAN_ARC13,
+ [GAUDI2_QUEUE_ID_NIC_13_1] = CPU_ID_NIC_QMAN_ARC13,
+ [GAUDI2_QUEUE_ID_NIC_13_2] = CPU_ID_NIC_QMAN_ARC13,
+ [GAUDI2_QUEUE_ID_NIC_13_3] = CPU_ID_NIC_QMAN_ARC13,
+ [GAUDI2_QUEUE_ID_NIC_14_0] = CPU_ID_NIC_QMAN_ARC14,
+ [GAUDI2_QUEUE_ID_NIC_14_1] = CPU_ID_NIC_QMAN_ARC14,
+ [GAUDI2_QUEUE_ID_NIC_14_2] = CPU_ID_NIC_QMAN_ARC14,
+ [GAUDI2_QUEUE_ID_NIC_14_3] = CPU_ID_NIC_QMAN_ARC14,
+ [GAUDI2_QUEUE_ID_NIC_15_0] = CPU_ID_NIC_QMAN_ARC15,
+ [GAUDI2_QUEUE_ID_NIC_15_1] = CPU_ID_NIC_QMAN_ARC15,
+ [GAUDI2_QUEUE_ID_NIC_15_2] = CPU_ID_NIC_QMAN_ARC15,
+ [GAUDI2_QUEUE_ID_NIC_15_3] = CPU_ID_NIC_QMAN_ARC15,
+ [GAUDI2_QUEUE_ID_NIC_16_0] = CPU_ID_NIC_QMAN_ARC16,
+ [GAUDI2_QUEUE_ID_NIC_16_1] = CPU_ID_NIC_QMAN_ARC16,
+ [GAUDI2_QUEUE_ID_NIC_16_2] = CPU_ID_NIC_QMAN_ARC16,
+ [GAUDI2_QUEUE_ID_NIC_16_3] = CPU_ID_NIC_QMAN_ARC16,
+ [GAUDI2_QUEUE_ID_NIC_17_0] = CPU_ID_NIC_QMAN_ARC17,
+ [GAUDI2_QUEUE_ID_NIC_17_1] = CPU_ID_NIC_QMAN_ARC17,
+ [GAUDI2_QUEUE_ID_NIC_17_2] = CPU_ID_NIC_QMAN_ARC17,
+ [GAUDI2_QUEUE_ID_NIC_17_3] = CPU_ID_NIC_QMAN_ARC17,
+ [GAUDI2_QUEUE_ID_NIC_18_0] = CPU_ID_NIC_QMAN_ARC18,
+ [GAUDI2_QUEUE_ID_NIC_18_1] = CPU_ID_NIC_QMAN_ARC18,
+ [GAUDI2_QUEUE_ID_NIC_18_2] = CPU_ID_NIC_QMAN_ARC18,
+ [GAUDI2_QUEUE_ID_NIC_18_3] = CPU_ID_NIC_QMAN_ARC18,
+ [GAUDI2_QUEUE_ID_NIC_19_0] = CPU_ID_NIC_QMAN_ARC19,
+ [GAUDI2_QUEUE_ID_NIC_19_1] = CPU_ID_NIC_QMAN_ARC19,
+ [GAUDI2_QUEUE_ID_NIC_19_2] = CPU_ID_NIC_QMAN_ARC19,
+ [GAUDI2_QUEUE_ID_NIC_19_3] = CPU_ID_NIC_QMAN_ARC19,
+ [GAUDI2_QUEUE_ID_NIC_20_0] = CPU_ID_NIC_QMAN_ARC20,
+ [GAUDI2_QUEUE_ID_NIC_20_1] = CPU_ID_NIC_QMAN_ARC20,
+ [GAUDI2_QUEUE_ID_NIC_20_2] = CPU_ID_NIC_QMAN_ARC20,
+ [GAUDI2_QUEUE_ID_NIC_20_3] = CPU_ID_NIC_QMAN_ARC20,
+ [GAUDI2_QUEUE_ID_NIC_21_0] = CPU_ID_NIC_QMAN_ARC21,
+ [GAUDI2_QUEUE_ID_NIC_21_1] = CPU_ID_NIC_QMAN_ARC21,
+ [GAUDI2_QUEUE_ID_NIC_21_2] = CPU_ID_NIC_QMAN_ARC21,
+ [GAUDI2_QUEUE_ID_NIC_21_3] = CPU_ID_NIC_QMAN_ARC21,
+ [GAUDI2_QUEUE_ID_NIC_22_0] = CPU_ID_NIC_QMAN_ARC22,
+ [GAUDI2_QUEUE_ID_NIC_22_1] = CPU_ID_NIC_QMAN_ARC22,
+ [GAUDI2_QUEUE_ID_NIC_22_2] = CPU_ID_NIC_QMAN_ARC22,
+ [GAUDI2_QUEUE_ID_NIC_22_3] = CPU_ID_NIC_QMAN_ARC22,
+ [GAUDI2_QUEUE_ID_NIC_23_0] = CPU_ID_NIC_QMAN_ARC23,
+ [GAUDI2_QUEUE_ID_NIC_23_1] = CPU_ID_NIC_QMAN_ARC23,
+ [GAUDI2_QUEUE_ID_NIC_23_2] = CPU_ID_NIC_QMAN_ARC23,
+ [GAUDI2_QUEUE_ID_NIC_23_3] = CPU_ID_NIC_QMAN_ARC23,
+ [GAUDI2_QUEUE_ID_ROT_0_0] = CPU_ID_ROT_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_ROT_0_1] = CPU_ID_ROT_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_ROT_0_2] = CPU_ID_ROT_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_ROT_0_3] = CPU_ID_ROT_QMAN_ARC0,
+ [GAUDI2_QUEUE_ID_ROT_1_0] = CPU_ID_ROT_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_ROT_1_1] = CPU_ID_ROT_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_ROT_1_2] = CPU_ID_ROT_QMAN_ARC1,
+ [GAUDI2_QUEUE_ID_ROT_1_3] = CPU_ID_ROT_QMAN_ARC1
+};
+
+const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE] = {
+ [DMA_CORE_ID_PDMA0] = mmPDMA0_CORE_BASE,
+ [DMA_CORE_ID_PDMA1] = mmPDMA1_CORE_BASE,
+ [DMA_CORE_ID_EDMA0] = mmDCORE0_EDMA0_CORE_BASE,
+ [DMA_CORE_ID_EDMA1] = mmDCORE0_EDMA1_CORE_BASE,
+ [DMA_CORE_ID_EDMA2] = mmDCORE1_EDMA0_CORE_BASE,
+ [DMA_CORE_ID_EDMA3] = mmDCORE1_EDMA1_CORE_BASE,
+ [DMA_CORE_ID_EDMA4] = mmDCORE2_EDMA0_CORE_BASE,
+ [DMA_CORE_ID_EDMA5] = mmDCORE2_EDMA1_CORE_BASE,
+ [DMA_CORE_ID_EDMA6] = mmDCORE3_EDMA0_CORE_BASE,
+ [DMA_CORE_ID_EDMA7] = mmDCORE3_EDMA1_CORE_BASE,
+ [DMA_CORE_ID_KDMA] = mmARC_FARM_KDMA_BASE
+};
+
+const u32 gaudi2_mme_acc_blocks_bases[MME_ID_SIZE] = {
+ [MME_ID_DCORE0] = mmDCORE0_MME_ACC_BASE,
+ [MME_ID_DCORE1] = mmDCORE1_MME_ACC_BASE,
+ [MME_ID_DCORE2] = mmDCORE2_MME_ACC_BASE,
+ [MME_ID_DCORE3] = mmDCORE3_MME_ACC_BASE
+};
+
+static const u32 gaudi2_tpc_cfg_blocks_bases[TPC_ID_SIZE] = {
+ [TPC_ID_DCORE0_TPC0] = mmDCORE0_TPC0_CFG_BASE,
+ [TPC_ID_DCORE0_TPC1] = mmDCORE0_TPC1_CFG_BASE,
+ [TPC_ID_DCORE0_TPC2] = mmDCORE0_TPC2_CFG_BASE,
+ [TPC_ID_DCORE0_TPC3] = mmDCORE0_TPC3_CFG_BASE,
+ [TPC_ID_DCORE0_TPC4] = mmDCORE0_TPC4_CFG_BASE,
+ [TPC_ID_DCORE0_TPC5] = mmDCORE0_TPC5_CFG_BASE,
+ [TPC_ID_DCORE1_TPC0] = mmDCORE1_TPC0_CFG_BASE,
+ [TPC_ID_DCORE1_TPC1] = mmDCORE1_TPC1_CFG_BASE,
+ [TPC_ID_DCORE1_TPC2] = mmDCORE1_TPC2_CFG_BASE,
+ [TPC_ID_DCORE1_TPC3] = mmDCORE1_TPC3_CFG_BASE,
+ [TPC_ID_DCORE1_TPC4] = mmDCORE1_TPC4_CFG_BASE,
+ [TPC_ID_DCORE1_TPC5] = mmDCORE1_TPC5_CFG_BASE,
+ [TPC_ID_DCORE2_TPC0] = mmDCORE2_TPC0_CFG_BASE,
+ [TPC_ID_DCORE2_TPC1] = mmDCORE2_TPC1_CFG_BASE,
+ [TPC_ID_DCORE2_TPC2] = mmDCORE2_TPC2_CFG_BASE,
+ [TPC_ID_DCORE2_TPC3] = mmDCORE2_TPC3_CFG_BASE,
+ [TPC_ID_DCORE2_TPC4] = mmDCORE2_TPC4_CFG_BASE,
+ [TPC_ID_DCORE2_TPC5] = mmDCORE2_TPC5_CFG_BASE,
+ [TPC_ID_DCORE3_TPC0] = mmDCORE3_TPC0_CFG_BASE,
+ [TPC_ID_DCORE3_TPC1] = mmDCORE3_TPC1_CFG_BASE,
+ [TPC_ID_DCORE3_TPC2] = mmDCORE3_TPC2_CFG_BASE,
+ [TPC_ID_DCORE3_TPC3] = mmDCORE3_TPC3_CFG_BASE,
+ [TPC_ID_DCORE3_TPC4] = mmDCORE3_TPC4_CFG_BASE,
+ [TPC_ID_DCORE3_TPC5] = mmDCORE3_TPC5_CFG_BASE,
+ [TPC_ID_DCORE0_TPC6] = mmDCORE0_TPC6_CFG_BASE,
+};
+
+const u32 gaudi2_rot_blocks_bases[ROTATOR_ID_SIZE] = {
+ [ROTATOR_ID_0] = mmROT0_BASE,
+ [ROTATOR_ID_1] = mmROT1_BASE
+};
+
+static const u32 gaudi2_tpc_id_to_queue_id[TPC_ID_SIZE] = {
+ [TPC_ID_DCORE0_TPC0] = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0,
+ [TPC_ID_DCORE0_TPC1] = GAUDI2_QUEUE_ID_DCORE0_TPC_1_0,
+ [TPC_ID_DCORE0_TPC2] = GAUDI2_QUEUE_ID_DCORE0_TPC_2_0,
+ [TPC_ID_DCORE0_TPC3] = GAUDI2_QUEUE_ID_DCORE0_TPC_3_0,
+ [TPC_ID_DCORE0_TPC4] = GAUDI2_QUEUE_ID_DCORE0_TPC_4_0,
+ [TPC_ID_DCORE0_TPC5] = GAUDI2_QUEUE_ID_DCORE0_TPC_5_0,
+ [TPC_ID_DCORE1_TPC0] = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0,
+ [TPC_ID_DCORE1_TPC1] = GAUDI2_QUEUE_ID_DCORE1_TPC_1_0,
+ [TPC_ID_DCORE1_TPC2] = GAUDI2_QUEUE_ID_DCORE1_TPC_2_0,
+ [TPC_ID_DCORE1_TPC3] = GAUDI2_QUEUE_ID_DCORE1_TPC_3_0,
+ [TPC_ID_DCORE1_TPC4] = GAUDI2_QUEUE_ID_DCORE1_TPC_4_0,
+ [TPC_ID_DCORE1_TPC5] = GAUDI2_QUEUE_ID_DCORE1_TPC_5_0,
+ [TPC_ID_DCORE2_TPC0] = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0,
+ [TPC_ID_DCORE2_TPC1] = GAUDI2_QUEUE_ID_DCORE2_TPC_1_0,
+ [TPC_ID_DCORE2_TPC2] = GAUDI2_QUEUE_ID_DCORE2_TPC_2_0,
+ [TPC_ID_DCORE2_TPC3] = GAUDI2_QUEUE_ID_DCORE2_TPC_3_0,
+ [TPC_ID_DCORE2_TPC4] = GAUDI2_QUEUE_ID_DCORE2_TPC_4_0,
+ [TPC_ID_DCORE2_TPC5] = GAUDI2_QUEUE_ID_DCORE2_TPC_5_0,
+ [TPC_ID_DCORE3_TPC0] = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0,
+ [TPC_ID_DCORE3_TPC1] = GAUDI2_QUEUE_ID_DCORE3_TPC_1_0,
+ [TPC_ID_DCORE3_TPC2] = GAUDI2_QUEUE_ID_DCORE3_TPC_2_0,
+ [TPC_ID_DCORE3_TPC3] = GAUDI2_QUEUE_ID_DCORE3_TPC_3_0,
+ [TPC_ID_DCORE3_TPC4] = GAUDI2_QUEUE_ID_DCORE3_TPC_4_0,
+ [TPC_ID_DCORE3_TPC5] = GAUDI2_QUEUE_ID_DCORE3_TPC_5_0,
+ [TPC_ID_DCORE0_TPC6] = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0,
+};
+
+static const u32 gaudi2_rot_id_to_queue_id[ROTATOR_ID_SIZE] = {
+ [ROTATOR_ID_0] = GAUDI2_QUEUE_ID_ROT_0_0,
+ [ROTATOR_ID_1] = GAUDI2_QUEUE_ID_ROT_1_0,
+};
+
+const u32 edma_stream_base[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0,
+};
+
+static const char gaudi2_vdec_irq_name[GAUDI2_VDEC_MSIX_ENTRIES][GAUDI2_MAX_STRING_LEN] = {
+ "gaudi2 vdec 0_0", "gaudi2 vdec 0_0 abnormal",
+ "gaudi2 vdec 0_1", "gaudi2 vdec 0_1 abnormal",
+ "gaudi2 vdec 1_0", "gaudi2 vdec 1_0 abnormal",
+ "gaudi2 vdec 1_1", "gaudi2 vdec 1_1 abnormal",
+ "gaudi2 vdec 2_0", "gaudi2 vdec 2_0 abnormal",
+ "gaudi2 vdec 2_1", "gaudi2 vdec 2_1 abnormal",
+ "gaudi2 vdec 3_0", "gaudi2 vdec 3_0 abnormal",
+ "gaudi2 vdec 3_1", "gaudi2 vdec 3_1 abnormal",
+ "gaudi2 vdec s_0", "gaudi2 vdec s_0 abnormal",
+ "gaudi2 vdec s_1", "gaudi2 vdec s_1 abnormal"
+};
+
+static const u32 rtr_coordinates_to_rtr_id[NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES] = {
+ RTR_ID_X_Y(2, 4),
+ RTR_ID_X_Y(3, 4),
+ RTR_ID_X_Y(4, 4),
+ RTR_ID_X_Y(5, 4),
+ RTR_ID_X_Y(6, 4),
+ RTR_ID_X_Y(7, 4),
+ RTR_ID_X_Y(8, 4),
+ RTR_ID_X_Y(9, 4),
+ RTR_ID_X_Y(10, 4),
+ RTR_ID_X_Y(11, 4),
+ RTR_ID_X_Y(12, 4),
+ RTR_ID_X_Y(13, 4),
+ RTR_ID_X_Y(14, 4),
+ RTR_ID_X_Y(15, 4),
+ RTR_ID_X_Y(16, 4),
+ RTR_ID_X_Y(17, 4),
+ RTR_ID_X_Y(2, 11),
+ RTR_ID_X_Y(3, 11),
+ RTR_ID_X_Y(4, 11),
+ RTR_ID_X_Y(5, 11),
+ RTR_ID_X_Y(6, 11),
+ RTR_ID_X_Y(7, 11),
+ RTR_ID_X_Y(8, 11),
+ RTR_ID_X_Y(9, 11),
+ RTR_ID_X_Y(0, 0),/* 24 no id */
+ RTR_ID_X_Y(0, 0),/* 25 no id */
+ RTR_ID_X_Y(0, 0),/* 26 no id */
+ RTR_ID_X_Y(0, 0),/* 27 no id */
+ RTR_ID_X_Y(14, 11),
+ RTR_ID_X_Y(15, 11),
+ RTR_ID_X_Y(16, 11),
+ RTR_ID_X_Y(17, 11)
+};
+
+static const u32 gaudi2_tpc_initiator_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
+ 1, 1, 2, 2, 3, 3, 14, 14, 13, 13, 12, 12, 19, 19, 18, 18, 17,
+ 17, 28, 28, 29, 29, 30, 30, 0
+};
+
+static const u32 gaudi2_dec_initiator_rtr_id[NUMBER_OF_DEC] = {
+ 0, 0, 15, 15, 16, 16, 31, 31, 0, 0
+};
+
+static const u32 gaudi2_nic_initiator_rtr_id[NIC_NUMBER_OF_MACROS] = {
+ 15, 15, 15, 15, 15, 16, 16, 16, 16, 31, 31, 31
+};
+
+struct sft_info {
+ u8 interface_id;
+ u8 dcore_id;
+};
+
+static const struct sft_info gaudi2_edma_initiator_sft_id[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
+ {0, 0}, {1, 0}, {0, 1}, {1, 1}, {1, 2}, {1, 3}, {0, 2}, {0, 3},
+};
+
+static const u32 gaudi2_pdma_initiator_rtr_id[NUM_OF_PDMA] = {
+ 0, 0
+};
+
+static const u32 gaudi2_rot_initiator_rtr_id[NUM_OF_ROT] = {
+ 16, 31
+};
+
+struct mme_initiators_rtr_id {
+ u32 wap0;
+ u32 wap1;
+ u32 write;
+ u32 read;
+ u32 sbte0;
+ u32 sbte1;
+ u32 sbte2;
+ u32 sbte3;
+ u32 sbte4;
+};
+
+enum mme_initiators {
+ MME_WAP0 = 0,
+ MME_WAP1,
+ MME_WRITE,
+ MME_READ,
+ MME_SBTE0,
+ MME_SBTE1,
+ MME_SBTE2,
+ MME_SBTE3,
+ MME_SBTE4,
+ MME_INITIATORS_MAX
+};
+
+static const struct mme_initiators_rtr_id
+gaudi2_mme_initiator_rtr_id[NUM_OF_MME_PER_DCORE * NUM_OF_DCORES] = {
+ { .wap0 = 5, .wap1 = 7, .write = 6, .read = 7,
+ .sbte0 = 7, .sbte1 = 4, .sbte2 = 4, .sbte3 = 5, .sbte4 = 6},
+ { .wap0 = 10, .wap1 = 8, .write = 9, .read = 8,
+ .sbte0 = 11, .sbte1 = 11, .sbte2 = 10, .sbte3 = 9, .sbte4 = 8},
+ { .wap0 = 21, .wap1 = 23, .write = 22, .read = 23,
+ .sbte0 = 20, .sbte1 = 20, .sbte2 = 21, .sbte3 = 22, .sbte4 = 23},
+ { .wap0 = 30, .wap1 = 28, .write = 29, .read = 30,
+ .sbte0 = 31, .sbte1 = 31, .sbte2 = 30, .sbte3 = 29, .sbte4 = 28},
+};
+
+enum razwi_event_sources {
+ RAZWI_TPC,
+ RAZWI_MME,
+ RAZWI_EDMA,
+ RAZWI_PDMA,
+ RAZWI_NIC,
+ RAZWI_DEC,
+ RAZWI_ROT
+};
+
+struct hbm_mc_error_causes {
+ u32 mask;
+ char cause[50];
+};
+
+static struct hbm_mc_error_causes hbm_mc_spi[GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE] = {
+ {HBM_MC_SPI_TEMP_PIN_CHG_MASK, "temperature pins changed"},
+ {HBM_MC_SPI_THR_ENG_MASK, "temperature-based throttling engaged"},
+ {HBM_MC_SPI_THR_DIS_ENG_MASK, "temperature-based throttling disengaged"},
+ {HBM_MC_SPI_IEEE1500_COMP_MASK, "IEEE1500 op comp"},
+ {HBM_MC_SPI_IEEE1500_PAUSED_MASK, "IEEE1500 op paused"},
+};
+
+static const char * const hbm_mc_sei_cause[GAUDI2_NUM_OF_HBM_SEI_CAUSE] = {
+ [HBM_SEI_CMD_PARITY_EVEN] = "SEI C/A parity even",
+ [HBM_SEI_CMD_PARITY_ODD] = "SEI C/A parity odd",
+ [HBM_SEI_READ_ERR] = "SEI read data error",
+ [HBM_SEI_WRITE_DATA_PARITY_ERR] = "SEI write data parity error",
+ [HBM_SEI_CATTRIP] = "SEI CATTRIP asserted",
+ [HBM_SEI_MEM_BIST_FAIL] = "SEI memory BIST fail",
+ [HBM_SEI_DFI] = "SEI DFI error",
+ [HBM_SEI_INV_TEMP_READ_OUT] = "SEI invalid temp read",
+ [HBM_SEI_BIST_FAIL] = "SEI BIST fail"
+};
+
+struct mmu_spi_sei_cause {
+ char cause[50];
+ int clear_bit;
+};
+
+static const struct mmu_spi_sei_cause gaudi2_mmu_spi_sei[GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE] = {
+ {"page fault", 1}, /* INTERRUPT_CLR[1] */
+ {"page access", 1}, /* INTERRUPT_CLR[1] */
+ {"bypass ddr", 2}, /* INTERRUPT_CLR[2] */
+ {"multi hit", 2}, /* INTERRUPT_CLR[2] */
+ {"mmu rei0", -1}, /* no clear register bit */
+ {"mmu rei1", -1}, /* no clear register bit */
+ {"stlb rei0", -1}, /* no clear register bit */
+ {"stlb rei1", -1}, /* no clear register bit */
+ {"rr privileged write hit", 2}, /* INTERRUPT_CLR[2] */
+ {"rr privileged read hit", 2}, /* INTERRUPT_CLR[2] */
+ {"rr secure write hit", 2}, /* INTERRUPT_CLR[2] */
+ {"rr secure read hit", 2}, /* INTERRUPT_CLR[2] */
+ {"bist_fail no use", 2}, /* INTERRUPT_CLR[2] */
+ {"bist_fail no use", 2}, /* INTERRUPT_CLR[2] */
+ {"bist_fail no use", 2}, /* INTERRUPT_CLR[2] */
+ {"bist_fail no use", 2}, /* INTERRUPT_CLR[2] */
+ {"slave error", 16}, /* INTERRUPT_CLR[16] */
+ {"dec error", 17}, /* INTERRUPT_CLR[17] */
+ {"burst fifo full", 2} /* INTERRUPT_CLR[2] */
+};
+
+struct gaudi2_cache_invld_params {
+ u64 start_va;
+ u64 end_va;
+ u32 inv_start_val;
+ u32 flags;
+ bool range_invalidation;
+};
+
+struct gaudi2_tpc_idle_data {
+ struct seq_file *s;
+ unsigned long *mask;
+ bool *is_idle;
+ const char *tpc_fmt;
+};
+
+struct gaudi2_tpc_mmu_data {
+ u32 rw_asid;
+};
+
+static s64 gaudi2_state_dump_specs_props[SP_MAX] = {0};
+
+static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, u64 val);
+static bool gaudi2_is_queue_enabled(struct hl_device *hdev, u32 hw_queue_id);
+static bool gaudi2_is_arc_enabled(struct hl_device *hdev, u64 arc_id);
+static void gaudi2_clr_arc_id_cap(struct hl_device *hdev, u64 arc_id);
+static void gaudi2_set_arc_id_cap(struct hl_device *hdev, u64 arc_id);
+static void gaudi2_memset_device_lbw(struct hl_device *hdev, u32 addr, u32 size, u32 val);
+static int gaudi2_send_job_to_kdma(struct hl_device *hdev, u64 src_addr, u64 dst_addr, u32 size,
+ bool is_memset);
+static u64 gaudi2_mmu_scramble_addr(struct hl_device *hdev, u64 raw_addr);
+
+static void gaudi2_init_scrambler_hbm(struct hl_device *hdev)
+{
+
+}
+
+static u32 gaudi2_get_signal_cb_size(struct hl_device *hdev)
+{
+ return sizeof(struct packet_msg_short);
+}
+
+static u32 gaudi2_get_wait_cb_size(struct hl_device *hdev)
+{
+ return sizeof(struct packet_msg_short) * 4 + sizeof(struct packet_fence);
+}
+
+void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int dcore, inst, tpc_seq;
+ u32 offset;
+
+ for (dcore = 0; dcore < NUM_OF_DCORES; dcore++) {
+ for (inst = 0; inst < NUM_OF_TPC_PER_DCORE; inst++) {
+ tpc_seq = dcore * NUM_OF_TPC_PER_DCORE + inst;
+
+ if (!(prop->tpc_enabled_mask & BIT(tpc_seq)))
+ continue;
+
+ offset = (DCORE_OFFSET * dcore) + (DCORE_TPC_OFFSET * inst);
+
+ ctx->fn(hdev, dcore, inst, offset, ctx->data);
+ }
+ }
+
+ if (!(prop->tpc_enabled_mask & BIT(TPC_ID_DCORE0_TPC6)))
+ return;
+
+ /* special check for PCI TPC (DCORE0_TPC6) */
+ offset = DCORE_TPC_OFFSET * (NUM_DCORE0_TPC - 1);
+ ctx->fn(hdev, 0, NUM_DCORE0_TPC - 1, offset, ctx->data);
+}
+
+static bool gaudi2_host_phys_addr_valid(u64 addr)
+{
+ if ((addr < HOST_PHYS_BASE_0 + HOST_PHYS_SIZE_0) || (addr >= HOST_PHYS_BASE_1))
+ return true;
+
+ return false;
+}
+
+static int set_number_of_functional_hbms(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u8 faulty_hbms = hweight64(hdev->dram_binning);
+
+ /* check if all HBMs should be used */
+ if (!faulty_hbms) {
+ dev_dbg(hdev->dev, "All HBM are in use (no binning)\n");
+ prop->num_functional_hbms = GAUDI2_HBM_NUM;
+ return 0;
+ }
+
+ /*
+ * check for error condition in which number of binning
+ * candidates is higher than the maximum supported by the
+ * driver (in which case binning mask shall be ignored and driver will
+ * set the default)
+ */
+ if (faulty_hbms > MAX_FAULTY_HBMS) {
+ dev_err(hdev->dev,
+ "HBM binning supports max of %d faulty HBMs, supplied mask 0x%llx.\n",
+ MAX_FAULTY_HBMS, hdev->dram_binning);
+ return -EINVAL;
+ }
+
+ /*
+ * by default, number of functional HBMs in Gaudi2 is always
+ * GAUDI2_HBM_NUM - 1.
+ */
+ prop->num_functional_hbms = GAUDI2_HBM_NUM - faulty_hbms;
+ return 0;
+}
+
+static int gaudi2_set_dram_properties(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 basic_hbm_page_size;
+ int rc;
+
+ rc = set_number_of_functional_hbms(hdev);
+ if (rc)
+ return -EINVAL;
+
+ /*
+ * Due to HW bug in which TLB size is x16 smaller than expected we use a workaround
+ * in which we are using x16 bigger page size to be able to populate the entire
+ * HBM mappings in the TLB
+ */
+ basic_hbm_page_size = prop->num_functional_hbms * SZ_8M;
+ prop->dram_page_size = GAUDI2_COMPENSATE_TLB_PAGE_SIZE_FACTOR * basic_hbm_page_size;
+ prop->device_mem_alloc_default_page_size = prop->dram_page_size;
+ prop->dram_size = prop->num_functional_hbms * SZ_16G;
+ prop->dram_base_address = DRAM_PHYS_BASE;
+ prop->dram_end_address = prop->dram_base_address + prop->dram_size;
+ prop->dram_supports_virtual_memory = true;
+
+ prop->dram_user_base_address = DRAM_PHYS_BASE + prop->dram_page_size;
+ prop->dram_hints_align_mask = ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK;
+ prop->hints_dram_reserved_va_range.start_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HBM_START;
+ prop->hints_dram_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HBM_END;
+
+ /* since DRAM page size differs from DMMU page size we need to allocate
+ * DRAM memory in units of dram_page size and mapping this memory in
+ * units of DMMU page size. we overcome this size mismatch using a
+ * scrambling routine which takes a DRAM page and converts it to a DMMU
+ * page.
+ * We therefore:
+ * 1. partition the virtual address space to DRAM-page (whole) pages.
+ * (suppose we get n such pages)
+ * 2. limit the amount of virtual address space we got from 1 above to
+ * a multiple of 64M as we don't want the scrambled address to cross
+ * the DRAM virtual address space.
+ * ( m = (n * DRAM_page_size) / DMMU_page_size).
+ * 3. determine the and address accordingly
+ * end_addr = start_addr + m * 48M
+ *
+ * the DRAM address MSBs (63:48) are not part of the roundup calculation
+ */
+ prop->dmmu.start_addr = prop->dram_base_address +
+ (prop->dram_page_size *
+ DIV_ROUND_UP_SECTOR_T(prop->dram_size, prop->dram_page_size));
+
+ prop->dmmu.end_addr = prop->dmmu.start_addr + prop->dram_page_size *
+ div_u64((VA_HBM_SPACE_END - prop->dmmu.start_addr), prop->dmmu.page_size);
+
+ return 0;
+}
+
+static int gaudi2_set_fixed_properties(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hw_queue_properties *q_props;
+ u32 num_sync_stream_queues = 0;
+ int i;
+
+ prop->max_queues = GAUDI2_QUEUE_ID_SIZE;
+ prop->hw_queues_props = kcalloc(prop->max_queues, sizeof(struct hw_queue_properties),
+ GFP_KERNEL);
+
+ if (!prop->hw_queues_props)
+ return -ENOMEM;
+
+ q_props = prop->hw_queues_props;
+
+ for (i = 0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i++) {
+ q_props[i].type = QUEUE_TYPE_HW;
+ q_props[i].driver_only = 0;
+
+ if (i >= GAUDI2_QUEUE_ID_NIC_0_0 && i <= GAUDI2_QUEUE_ID_NIC_23_3) {
+ q_props[i].supports_sync_stream = 0;
+ } else {
+ q_props[i].supports_sync_stream = 1;
+ num_sync_stream_queues++;
+ }
+
+ q_props[i].cb_alloc_flags = CB_ALLOC_USER;
+ }
+
+ q_props[GAUDI2_QUEUE_ID_CPU_PQ].type = QUEUE_TYPE_CPU;
+ q_props[GAUDI2_QUEUE_ID_CPU_PQ].driver_only = 1;
+ q_props[GAUDI2_QUEUE_ID_CPU_PQ].cb_alloc_flags = CB_ALLOC_KERNEL;
+
+ prop->cache_line_size = DEVICE_CACHE_LINE_SIZE;
+ prop->cfg_base_address = CFG_BASE;
+ prop->device_dma_offset_for_host_access = HOST_PHYS_BASE_0;
+ prop->host_base_address = HOST_PHYS_BASE_0;
+ prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE_0;
+ prop->max_pending_cs = GAUDI2_MAX_PENDING_CS;
+ prop->completion_queues_count = GAUDI2_RESERVED_CQ_NUMBER;
+ prop->user_dec_intr_count = NUMBER_OF_DEC;
+ prop->user_interrupt_count = GAUDI2_IRQ_NUM_USER_LAST - GAUDI2_IRQ_NUM_USER_FIRST + 1;
+ prop->completion_mode = HL_COMPLETION_MODE_CS;
+ prop->sync_stream_first_sob = GAUDI2_RESERVED_SOB_NUMBER;
+ prop->sync_stream_first_mon = GAUDI2_RESERVED_MON_NUMBER;
+
+ prop->sram_base_address = SRAM_BASE_ADDR;
+ prop->sram_size = SRAM_SIZE;
+ prop->sram_end_address = prop->sram_base_address + prop->sram_size;
+ prop->sram_user_base_address = prop->sram_base_address + SRAM_USER_BASE_OFFSET;
+
+ prop->hints_range_reservation = true;
+
+ if (hdev->pldm)
+ prop->mmu_pgt_size = 0x800000; /* 8MB */
+ else
+ prop->mmu_pgt_size = MMU_PAGE_TABLES_INITIAL_SIZE;
+
+ prop->mmu_pte_size = HL_PTE_SIZE;
+ prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
+
+ prop->dmmu.hop_shifts[MMU_HOP0] = DHOP0_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP1] = DHOP1_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP2] = DHOP2_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP3] = DHOP3_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP4] = DHOP4_SHIFT;
+ prop->dmmu.hop_masks[MMU_HOP0] = DHOP0_MASK;
+ prop->dmmu.hop_masks[MMU_HOP1] = DHOP1_MASK;
+ prop->dmmu.hop_masks[MMU_HOP2] = DHOP2_MASK;
+ prop->dmmu.hop_masks[MMU_HOP3] = DHOP3_MASK;
+ prop->dmmu.hop_masks[MMU_HOP4] = DHOP4_MASK;
+ prop->dmmu.page_size = PAGE_SIZE_1GB;
+ prop->dmmu.num_hops = MMU_ARCH_6_HOPS;
+ prop->dmmu.last_mask = LAST_MASK;
+ prop->dmmu.host_resident = 1;
+ /* TODO: will be duplicated until implementing per-MMU props */
+ prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
+ prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+
+ /*
+ * this is done in order to be able to validate FW descriptor (i.e. validating that
+ * the addresses and allocated space for FW image does not cross memory bounds).
+ * for this reason we set the DRAM size to the minimum possible and later it will
+ * be modified according to what reported in the cpucp info packet
+ */
+ prop->dram_size = (GAUDI2_HBM_NUM - 1) * SZ_16G;
+
+ hdev->pmmu_huge_range = true;
+ prop->pmmu.host_resident = 1;
+ prop->pmmu.num_hops = MMU_ARCH_6_HOPS;
+ prop->pmmu.last_mask = LAST_MASK;
+ /* TODO: will be duplicated until implementing per-MMU props */
+ prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
+ prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+
+ prop->hints_host_reserved_va_range.start_addr = RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START;
+ prop->hints_host_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HOST_END;
+ prop->hints_host_hpage_reserved_va_range.start_addr =
+ RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START;
+ prop->hints_host_hpage_reserved_va_range.end_addr =
+ RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END;
+
+ if (PAGE_SIZE == SZ_64K) {
+ prop->pmmu.hop_shifts[MMU_HOP0] = HOP0_SHIFT_64K;
+ prop->pmmu.hop_shifts[MMU_HOP1] = HOP1_SHIFT_64K;
+ prop->pmmu.hop_shifts[MMU_HOP2] = HOP2_SHIFT_64K;
+ prop->pmmu.hop_shifts[MMU_HOP3] = HOP3_SHIFT_64K;
+ prop->pmmu.hop_shifts[MMU_HOP4] = HOP4_SHIFT_64K;
+ prop->pmmu.hop_shifts[MMU_HOP5] = HOP5_SHIFT_64K;
+ prop->pmmu.hop_masks[MMU_HOP0] = HOP0_MASK_64K;
+ prop->pmmu.hop_masks[MMU_HOP1] = HOP1_MASK_64K;
+ prop->pmmu.hop_masks[MMU_HOP2] = HOP2_MASK_64K;
+ prop->pmmu.hop_masks[MMU_HOP3] = HOP3_MASK_64K;
+ prop->pmmu.hop_masks[MMU_HOP4] = HOP4_MASK_64K;
+ prop->pmmu.hop_masks[MMU_HOP5] = HOP5_MASK_64K;
+ prop->pmmu.start_addr = VA_HOST_SPACE_PAGE_START;
+ prop->pmmu.end_addr = VA_HOST_SPACE_PAGE_END;
+ prop->pmmu.page_size = PAGE_SIZE_64KB;
+
+ /* shifts and masks are the same in PMMU and HPMMU */
+ memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
+ prop->pmmu_huge.page_size = PAGE_SIZE_16MB;
+ prop->pmmu_huge.start_addr = VA_HOST_SPACE_HPAGE_START;
+ prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
+ } else {
+ prop->pmmu.hop_shifts[MMU_HOP0] = HOP0_SHIFT_4K;
+ prop->pmmu.hop_shifts[MMU_HOP1] = HOP1_SHIFT_4K;
+ prop->pmmu.hop_shifts[MMU_HOP2] = HOP2_SHIFT_4K;
+ prop->pmmu.hop_shifts[MMU_HOP3] = HOP3_SHIFT_4K;
+ prop->pmmu.hop_shifts[MMU_HOP4] = HOP4_SHIFT_4K;
+ prop->pmmu.hop_shifts[MMU_HOP5] = HOP5_SHIFT_4K;
+ prop->pmmu.hop_masks[MMU_HOP0] = HOP0_MASK_4K;
+ prop->pmmu.hop_masks[MMU_HOP1] = HOP1_MASK_4K;
+ prop->pmmu.hop_masks[MMU_HOP2] = HOP2_MASK_4K;
+ prop->pmmu.hop_masks[MMU_HOP3] = HOP3_MASK_4K;
+ prop->pmmu.hop_masks[MMU_HOP4] = HOP4_MASK_4K;
+ prop->pmmu.hop_masks[MMU_HOP5] = HOP5_MASK_4K;
+ prop->pmmu.start_addr = VA_HOST_SPACE_PAGE_START;
+ prop->pmmu.end_addr = VA_HOST_SPACE_PAGE_END;
+ prop->pmmu.page_size = PAGE_SIZE_4KB;
+
+ /* shifts and masks are the same in PMMU and HPMMU */
+ memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
+ prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
+ prop->pmmu_huge.start_addr = VA_HOST_SPACE_HPAGE_START;
+ prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
+ }
+
+ prop->cfg_size = CFG_SIZE;
+ prop->max_asid = MAX_ASID;
+ prop->num_of_events = GAUDI2_EVENT_SIZE;
+
+ prop->dc_power_default = DC_POWER_DEFAULT;
+
+ prop->cb_pool_cb_cnt = GAUDI2_CB_POOL_CB_CNT;
+ prop->cb_pool_cb_size = GAUDI2_CB_POOL_CB_SIZE;
+ prop->pcie_dbi_base_address = CFG_BASE + mmPCIE_DBI_BASE;
+ prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
+
+ strncpy(prop->cpucp_info.card_name, GAUDI2_DEFAULT_CARD_NAME, CARD_NAME_MAX_LEN);
+
+ prop->mme_master_slave_mode = 1;
+
+ prop->first_available_user_sob[0] = GAUDI2_RESERVED_SOB_NUMBER +
+ (num_sync_stream_queues * HL_RSVD_SOBS);
+
+ prop->first_available_user_mon[0] = GAUDI2_RESERVED_MON_NUMBER +
+ (num_sync_stream_queues * HL_RSVD_MONS);
+
+ prop->first_available_user_interrupt = GAUDI2_IRQ_NUM_USER_FIRST;
+
+ prop->first_available_cq[0] = GAUDI2_RESERVED_CQ_NUMBER;
+
+ prop->fw_cpu_boot_dev_sts0_valid = false;
+ prop->fw_cpu_boot_dev_sts1_valid = false;
+ prop->hard_reset_done_by_fw = false;
+ prop->gic_interrupts_enable = true;
+
+ prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+
+ prop->cb_va_start_addr = VA_HOST_SPACE_USER_MAPPED_CB_START;
+ prop->cb_va_end_addr = VA_HOST_SPACE_USER_MAPPED_CB_END;
+
+ prop->max_dec = NUMBER_OF_DEC;
+
+ prop->clk_pll_index = HL_GAUDI2_MME_PLL;
+
+ prop->dma_mask = 64;
+
+ return 0;
+}
+
+static int gaudi2_pci_bars_map(struct hl_device *hdev)
+{
+ static const char * const name[] = {"CFG_SRAM", "MSIX", "DRAM"};
+ bool is_wc[3] = {false, false, true};
+ int rc;
+
+ rc = hl_pci_bars_map(hdev, name, is_wc);
+ if (rc)
+ return rc;
+
+ hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] + (CFG_BASE - STM_FLASH_BASE_ADDR);
+
+ return 0;
+}
+
+static u64 gaudi2_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct hl_inbound_pci_region pci_region;
+ u64 old_addr = addr;
+ int rc;
+
+ if ((gaudi2) && (gaudi2->dram_bar_cur_addr == addr))
+ return old_addr;
+
+ if (hdev->asic_prop.iatu_done_by_fw)
+ return U64_MAX;
+
+ /* Inbound Region 2 - Bar 4 - Point to DRAM */
+ pci_region.mode = PCI_BAR_MATCH_MODE;
+ pci_region.bar = DRAM_BAR_ID;
+ pci_region.addr = addr;
+ rc = hl_pci_set_inbound_region(hdev, 2, &pci_region);
+ if (rc)
+ return U64_MAX;
+
+ if (gaudi2) {
+ old_addr = gaudi2->dram_bar_cur_addr;
+ gaudi2->dram_bar_cur_addr = addr;
+ }
+
+ return old_addr;
+}
+
+static int gaudi2_init_iatu(struct hl_device *hdev)
+{
+ struct hl_inbound_pci_region inbound_region;
+ struct hl_outbound_pci_region outbound_region;
+ u32 bar_addr_low, bar_addr_high;
+ int rc;
+
+ if (hdev->asic_prop.iatu_done_by_fw)
+ return 0;
+
+ /* Temporary inbound Region 0 - Bar 0 - Point to CFG
+ * We must map this region in BAR match mode in order to
+ * fetch BAR physical base address
+ */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = SRAM_CFG_BAR_ID;
+ /* Base address must be aligned to Bar size which is 256 MB */
+ inbound_region.addr = STM_FLASH_BASE_ADDR - STM_FLASH_ALIGNED_OFF;
+ rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
+ if (rc)
+ return rc;
+
+ /* Fetch physical BAR address */
+ bar_addr_high = RREG32(mmPCIE_DBI_BAR1_REG + STM_FLASH_ALIGNED_OFF);
+ bar_addr_low = RREG32(mmPCIE_DBI_BAR0_REG + STM_FLASH_ALIGNED_OFF) & ~0xF;
+
+ hdev->pcie_bar_phys[SRAM_CFG_BAR_ID] = (u64)bar_addr_high << 32 | bar_addr_low;
+
+ /* Inbound Region 0 - Bar 0 - Point to CFG */
+ inbound_region.mode = PCI_ADDRESS_MATCH_MODE;
+ inbound_region.bar = SRAM_CFG_BAR_ID;
+ inbound_region.offset_in_bar = 0;
+ inbound_region.addr = STM_FLASH_BASE_ADDR;
+ inbound_region.size = CFG_REGION_SIZE;
+ rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
+ if (rc)
+ return rc;
+
+ /* Inbound Region 1 - Bar 0 - Point to BAR0_RESERVED + SRAM */
+ inbound_region.mode = PCI_ADDRESS_MATCH_MODE;
+ inbound_region.bar = SRAM_CFG_BAR_ID;
+ inbound_region.offset_in_bar = CFG_REGION_SIZE;
+ inbound_region.addr = BAR0_RSRVD_BASE_ADDR;
+ inbound_region.size = BAR0_RSRVD_SIZE + SRAM_SIZE;
+ rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
+ if (rc)
+ return rc;
+
+ /* Inbound Region 2 - Bar 4 - Point to DRAM */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = DRAM_BAR_ID;
+ inbound_region.addr = DRAM_PHYS_BASE;
+ rc = hl_pci_set_inbound_region(hdev, 2, &inbound_region);
+ if (rc)
+ return rc;
+
+ /* Outbound Region 0 - Point to Host */
+ outbound_region.addr = HOST_PHYS_BASE_0;
+ outbound_region.size = HOST_PHYS_SIZE_0;
+ rc = hl_pci_set_outbound_region(hdev, &outbound_region);
+
+ return rc;
+}
+
+static enum hl_device_hw_state gaudi2_get_hw_state(struct hl_device *hdev)
+{
+ return RREG32(mmHW_STATE);
+}
+
+static int gaudi2_tpc_binning_init_prop(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ /*
+ * check for error condition in which number of binning candidates
+ * is higher than the maximum supported by the driver
+ */
+ if (hweight64(hdev->tpc_binning) > MAX_CLUSTER_BINNING_FAULTY_TPCS) {
+ dev_err(hdev->dev, "TPC binning is supported for max of %d faulty TPCs, provided mask 0x%llx\n",
+ MAX_CLUSTER_BINNING_FAULTY_TPCS,
+ hdev->tpc_binning);
+ return -EINVAL;
+ }
+
+ prop->tpc_binning_mask = hdev->tpc_binning;
+ prop->tpc_enabled_mask = GAUDI2_TPC_FULL_MASK;
+
+ return 0;
+}
+
+static int gaudi2_set_tpc_binning_masks(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hw_queue_properties *q_props = prop->hw_queues_props;
+ u64 tpc_binning_mask;
+ u8 subst_idx = 0;
+ int i, rc;
+
+ rc = gaudi2_tpc_binning_init_prop(hdev);
+ if (rc)
+ return rc;
+
+ tpc_binning_mask = prop->tpc_binning_mask;
+
+ for (i = 0 ; i < MAX_FAULTY_TPCS ; i++) {
+ u8 subst_seq, binned, qid_base;
+
+ if (tpc_binning_mask == 0)
+ break;
+
+ if (subst_idx == 0) {
+ subst_seq = TPC_ID_DCORE0_TPC6;
+ qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0;
+ } else {
+ subst_seq = TPC_ID_DCORE3_TPC5;
+ qid_base = GAUDI2_QUEUE_ID_DCORE3_TPC_5_0;
+ }
+
+
+ /* clear bit from mask */
+ binned = __ffs(tpc_binning_mask);
+ /*
+ * Coverity complains about possible out-of-bound access in
+ * clear_bit
+ */
+ if (binned >= TPC_ID_SIZE) {
+ dev_err(hdev->dev,
+ "Invalid binned TPC (binning mask: %llx)\n",
+ tpc_binning_mask);
+ return -EINVAL;
+ }
+ clear_bit(binned, (unsigned long *)&tpc_binning_mask);
+
+ /* also clear replacing TPC bit from enabled mask */
+ clear_bit(subst_seq, (unsigned long *)&prop->tpc_enabled_mask);
+
+ /* bin substite TPC's Qs */
+ q_props[qid_base].binned = 1;
+ q_props[qid_base + 1].binned = 1;
+ q_props[qid_base + 2].binned = 1;
+ q_props[qid_base + 3].binned = 1;
+
+ subst_idx++;
+ }
+
+ return 0;
+}
+
+static int gaudi2_set_dec_binning_masks(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u8 num_faulty;
+
+ num_faulty = hweight32(hdev->decoder_binning);
+
+ /*
+ * check for error condition in which number of binning candidates
+ * is higher than the maximum supported by the driver
+ */
+ if (num_faulty > MAX_FAULTY_DECODERS) {
+ dev_err(hdev->dev, "decoder binning is supported for max of single faulty decoder, provided mask 0x%x\n",
+ hdev->decoder_binning);
+ return -EINVAL;
+ }
+
+ prop->decoder_binning_mask = (hdev->decoder_binning & GAUDI2_DECODER_FULL_MASK);
+
+ if (prop->decoder_binning_mask)
+ prop->decoder_enabled_mask = (GAUDI2_DECODER_FULL_MASK & ~BIT(DEC_ID_PCIE_VDEC1));
+ else
+ prop->decoder_enabled_mask = GAUDI2_DECODER_FULL_MASK;
+
+ return 0;
+}
+
+static void gaudi2_set_dram_binning_masks(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ /* check if we should override default binning */
+ if (!hdev->dram_binning) {
+ prop->dram_binning_mask = 0;
+ prop->dram_enabled_mask = GAUDI2_DRAM_FULL_MASK;
+ return;
+ }
+
+ /* set DRAM binning constraints */
+ prop->faulty_dram_cluster_map |= hdev->dram_binning;
+ prop->dram_binning_mask = hdev->dram_binning;
+ prop->dram_enabled_mask = GAUDI2_DRAM_FULL_MASK & ~BIT(HBM_ID5);
+}
+
+static int gaudi2_set_edma_binning_masks(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hw_queue_properties *q_props;
+ u8 seq, num_faulty;
+
+ num_faulty = hweight32(hdev->edma_binning);
+
+ /*
+ * check for error condition in which number of binning candidates
+ * is higher than the maximum supported by the driver
+ */
+ if (num_faulty > MAX_FAULTY_EDMAS) {
+ dev_err(hdev->dev,
+ "EDMA binning is supported for max of single faulty EDMA, provided mask 0x%x\n",
+ hdev->edma_binning);
+ return -EINVAL;
+ }
+
+ if (!hdev->edma_binning) {
+ prop->edma_binning_mask = 0;
+ prop->edma_enabled_mask = GAUDI2_EDMA_FULL_MASK;
+ return 0;
+ }
+
+ seq = __ffs((unsigned long)hdev->edma_binning);
+
+ /* set binning constraints */
+ prop->faulty_dram_cluster_map |= BIT(edma_to_hbm_cluster[seq]);
+ prop->edma_binning_mask = hdev->edma_binning;
+ prop->edma_enabled_mask = GAUDI2_EDMA_FULL_MASK & ~BIT(EDMA_ID_DCORE3_INSTANCE1);
+
+ /* bin substitute EDMA's queue */
+ q_props = prop->hw_queues_props;
+ q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0].binned = 1;
+ q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1].binned = 1;
+ q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2].binned = 1;
+ q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3].binned = 1;
+
+ return 0;
+}
+
+static int gaudi2_set_xbar_edge_enable_mask(struct hl_device *hdev, u32 xbar_edge_iso_mask)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u8 num_faulty, seq;
+
+ /* check if we should override default binning */
+ if (!xbar_edge_iso_mask) {
+ prop->xbar_edge_enabled_mask = GAUDI2_XBAR_EDGE_FULL_MASK;
+ return 0;
+ }
+
+ /*
+ * note that it can be set to value other than 0 only after cpucp packet (i.e.
+ * only the FW can set a redundancy value). for user it'll always be 0.
+ */
+ num_faulty = hweight32(xbar_edge_iso_mask);
+
+ /*
+ * check for error condition in which number of binning candidates
+ * is higher than the maximum supported by the driver
+ */
+ if (num_faulty > MAX_FAULTY_XBARS) {
+ dev_err(hdev->dev, "we cannot have more than %d faulty XBAR EDGE\n",
+ MAX_FAULTY_XBARS);
+ return -EINVAL;
+ }
+
+ seq = __ffs((unsigned long)xbar_edge_iso_mask);
+
+ /* set binning constraints */
+ prop->faulty_dram_cluster_map |= BIT(xbar_edge_to_hbm_cluster[seq]);
+ prop->xbar_edge_enabled_mask = (~xbar_edge_iso_mask) & GAUDI2_XBAR_EDGE_FULL_MASK;
+
+ return 0;
+}
+
+static int gaudi2_set_cluster_binning_masks_common(struct hl_device *hdev, u8 xbar_edge_iso_mask)
+{
+ int rc;
+
+ /*
+ * mark all clusters as good, each component will "fail" cluster
+ * based on eFuse/user values.
+ * If more than single cluster is faulty- the chip is unusable
+ */
+ hdev->asic_prop.faulty_dram_cluster_map = 0;
+
+ gaudi2_set_dram_binning_masks(hdev);
+
+ rc = gaudi2_set_edma_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_set_xbar_edge_enable_mask(hdev, xbar_edge_iso_mask);
+ if (rc)
+ return rc;
+
+
+ /* always initially set to full mask */
+ hdev->asic_prop.hmmu_hif_enabled_mask = GAUDI2_HIF_HMMU_FULL_MASK;
+
+ return 0;
+}
+
+static int gaudi2_set_cluster_binning_masks(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ rc = gaudi2_set_cluster_binning_masks_common(hdev, prop->cpucp_info.xbar_binning_mask);
+ if (rc)
+ return rc;
+
+ /* if we have DRAM binning reported by FW we should perform cluster config */
+ if (prop->faulty_dram_cluster_map) {
+ u8 cluster_seq = __ffs((unsigned long)prop->faulty_dram_cluster_map);
+
+ prop->hmmu_hif_enabled_mask = cluster_hmmu_hif_enabled_mask[cluster_seq];
+ }
+
+ return 0;
+}
+
+static int gaudi2_cpucp_info_get(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ long max_power;
+ u64 dram_size;
+ int rc;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ /* No point of asking this information again when not doing hard reset, as the device
+ * CPU hasn't been reset
+ */
+ if (hdev->reset_info.in_compute_reset)
+ return 0;
+
+ rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
+ mmCPU_BOOT_ERR1);
+ if (rc)
+ return rc;
+
+ dram_size = le64_to_cpu(prop->cpucp_info.dram_size);
+ if (dram_size) {
+ /* we can have wither 5 or 6 HBMs. other values are invalid */
+
+ if ((dram_size != ((GAUDI2_HBM_NUM - 1) * SZ_16G)) &&
+ (dram_size != (GAUDI2_HBM_NUM * SZ_16G))) {
+ dev_err(hdev->dev,
+ "F/W reported invalid DRAM size %llu. Trying to use default size %llu\n",
+ dram_size, prop->dram_size);
+ dram_size = prop->dram_size;
+ }
+
+ prop->dram_size = dram_size;
+ prop->dram_end_address = prop->dram_base_address + dram_size;
+ }
+
+ if (!strlen(prop->cpucp_info.card_name))
+ strncpy(prop->cpucp_info.card_name, GAUDI2_DEFAULT_CARD_NAME, CARD_NAME_MAX_LEN);
+
+ /* Overwrite binning masks with the actual binning values from F/W */
+ hdev->dram_binning = prop->cpucp_info.dram_binning_mask;
+ hdev->edma_binning = prop->cpucp_info.edma_binning_mask;
+ hdev->tpc_binning = le64_to_cpu(prop->cpucp_info.tpc_binning_mask);
+ hdev->decoder_binning = lower_32_bits(le64_to_cpu(prop->cpucp_info.decoder_binning_mask));
+
+ /*
+ * at this point the DRAM parameters need to be updated according to data obtained
+ * from the FW
+ */
+ rc = gaudi2_set_dram_properties(hdev);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_set_cluster_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_set_tpc_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_set_dec_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ max_power = hl_fw_get_max_power(hdev);
+ if (max_power < 0)
+ return max_power;
+
+ prop->max_power_default = (u64) max_power;
+
+ return 0;
+}
+
+static int gaudi2_fetch_psoc_frequency(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS];
+ int rc;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI2_CPU_PLL, pll_freq_arr);
+ if (rc)
+ return rc;
+
+ hdev->asic_prop.psoc_timestamp_frequency = pll_freq_arr[3];
+
+ return 0;
+}
+
+static int gaudi2_early_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pci_dev *pdev = hdev->pdev;
+ resource_size_t pci_bar_size;
+ u32 fw_boot_status;
+ int rc;
+
+ rc = gaudi2_set_fixed_properties(hdev);
+ if (rc)
+ return rc;
+
+ /* Check BAR sizes */
+ pci_bar_size = pci_resource_len(pdev, SRAM_CFG_BAR_ID);
+
+ if (pci_bar_size != CFG_BAR_SIZE) {
+ dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
+ SRAM_CFG_BAR_ID, &pci_bar_size, CFG_BAR_SIZE);
+ rc = -ENODEV;
+ goto free_queue_props;
+ }
+
+ pci_bar_size = pci_resource_len(pdev, MSIX_BAR_ID);
+ if (pci_bar_size != MSIX_BAR_SIZE) {
+ dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
+ MSIX_BAR_ID, &pci_bar_size, MSIX_BAR_SIZE);
+ rc = -ENODEV;
+ goto free_queue_props;
+ }
+
+ prop->dram_pci_bar_size = pci_resource_len(pdev, DRAM_BAR_ID);
+ hdev->dram_pci_bar_start = pci_resource_start(pdev, DRAM_BAR_ID);
+
+ /* If FW security is enabled at this point it means no access to ELBI */
+ if (hdev->asic_prop.fw_security_enabled) {
+ hdev->asic_prop.iatu_done_by_fw = true;
+ goto pci_init;
+ }
+
+ rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0, &fw_boot_status);
+ if (rc)
+ goto free_queue_props;
+
+ /* Check whether FW is configuring iATU */
+ if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
+ (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
+ hdev->asic_prop.iatu_done_by_fw = true;
+
+pci_init:
+ rc = hl_pci_init(hdev);
+ if (rc)
+ goto free_queue_props;
+
+ /* Before continuing in the initialization, we need to read the preboot
+ * version to determine whether we run with a security-enabled firmware
+ */
+ rc = hl_fw_read_preboot_status(hdev);
+ if (rc) {
+ if (hdev->reset_on_preboot_fail)
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+ goto pci_fini;
+ }
+
+ if (gaudi2_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+ }
+
+ return 0;
+
+pci_fini:
+ hl_pci_fini(hdev);
+free_queue_props:
+ kfree(hdev->asic_prop.hw_queues_props);
+ return rc;
+}
+
+static int gaudi2_early_fini(struct hl_device *hdev)
+{
+ kfree(hdev->asic_prop.hw_queues_props);
+ hl_pci_fini(hdev);
+
+ return 0;
+}
+
+static bool gaudi2_is_arc_nic_owned(u64 arc_id)
+{
+ switch (arc_id) {
+ case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool gaudi2_is_arc_tpc_owned(u64 arc_id)
+{
+ switch (arc_id) {
+ case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void gaudi2_init_arcs(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u64 arc_id;
+ u32 i;
+
+ for (i = CPU_ID_SCHED_ARC0 ; i <= CPU_ID_SCHED_ARC3 ; i++) {
+ if (gaudi2_is_arc_enabled(hdev, i))
+ continue;
+
+ gaudi2_set_arc_id_cap(hdev, i);
+ }
+
+ for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i += 4) {
+ if (!gaudi2_is_queue_enabled(hdev, i))
+ continue;
+
+ arc_id = gaudi2_queue_id_to_arc_id[i];
+ if (gaudi2_is_arc_enabled(hdev, arc_id))
+ continue;
+
+ if (gaudi2_is_arc_nic_owned(arc_id) &&
+ !(hdev->nic_ports_mask & BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0)))
+ continue;
+
+ if (gaudi2_is_arc_tpc_owned(arc_id) && !(gaudi2->tpc_hw_cap_initialized &
+ BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0)))
+ continue;
+
+ gaudi2_set_arc_id_cap(hdev, arc_id);
+ }
+}
+
+static int gaudi2_scrub_arc_dccm(struct hl_device *hdev, u32 cpu_id)
+{
+ u32 reg_base, reg_val;
+ int rc;
+
+ switch (cpu_id) {
+ case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC3:
+ /* Each ARC scheduler has 2 consecutive DCCM blocks */
+ rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id],
+ ARC_DCCM_BLOCK_SIZE * 2, true);
+ if (rc)
+ return rc;
+ break;
+ case CPU_ID_SCHED_ARC4:
+ case CPU_ID_SCHED_ARC5:
+ case CPU_ID_MME_QMAN_ARC0:
+ case CPU_ID_MME_QMAN_ARC1:
+ reg_base = gaudi2_arc_blocks_bases[cpu_id];
+
+ /* Scrub lower DCCM block */
+ rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id],
+ ARC_DCCM_BLOCK_SIZE, true);
+ if (rc)
+ return rc;
+
+ /* Switch to upper DCCM block */
+ reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK, 1);
+ WREG32(reg_base + ARC_DCCM_UPPER_EN_OFFSET, reg_val);
+
+ /* Scrub upper DCCM block */
+ rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id],
+ ARC_DCCM_BLOCK_SIZE, true);
+ if (rc)
+ return rc;
+
+ /* Switch to lower DCCM block */
+ reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK, 0);
+ WREG32(reg_base + ARC_DCCM_UPPER_EN_OFFSET, reg_val);
+ break;
+ default:
+ rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id],
+ ARC_DCCM_BLOCK_SIZE, true);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void gaudi2_scrub_arcs_dccm(struct hl_device *hdev)
+{
+ u16 arc_id;
+
+ for (arc_id = CPU_ID_SCHED_ARC0 ; arc_id < CPU_ID_MAX ; arc_id++) {
+ if (!gaudi2_is_arc_enabled(hdev, arc_id))
+ continue;
+
+ gaudi2_scrub_arc_dccm(hdev, arc_id);
+ }
+}
+
+static int gaudi2_late_init(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int rc;
+
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS,
+ gaudi2->virt_msix_db_dma_addr);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ return rc;
+ }
+
+ rc = gaudi2_fetch_psoc_frequency(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
+ goto disable_pci_access;
+ }
+
+ gaudi2_init_arcs(hdev);
+ gaudi2_scrub_arcs_dccm(hdev);
+ gaudi2_init_security(hdev);
+
+ return 0;
+
+disable_pci_access:
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
+
+ return rc;
+}
+
+static void gaudi2_late_fini(struct hl_device *hdev)
+{
+ const struct hwmon_channel_info **channel_info_arr;
+ int i = 0;
+
+ if (!hdev->hl_chip_info->info)
+ return;
+
+ channel_info_arr = hdev->hl_chip_info->info;
+
+ while (channel_info_arr[i]) {
+ kfree(channel_info_arr[i]->config);
+ kfree(channel_info_arr[i]);
+ i++;
+ }
+
+ kfree(channel_info_arr);
+
+ hdev->hl_chip_info->info = NULL;
+}
+
+static void gaudi2_user_mapped_dec_init(struct gaudi2_device *gaudi2, u32 start_idx)
+{
+ struct user_mapped_block *blocks = gaudi2->mapped_blocks;
+
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE0_DEC0_CMD_BASE, HL_BLOCK_SIZE);
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE0_DEC1_CMD_BASE, HL_BLOCK_SIZE);
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE1_DEC0_CMD_BASE, HL_BLOCK_SIZE);
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE1_DEC1_CMD_BASE, HL_BLOCK_SIZE);
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE2_DEC0_CMD_BASE, HL_BLOCK_SIZE);
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE2_DEC1_CMD_BASE, HL_BLOCK_SIZE);
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE3_DEC0_CMD_BASE, HL_BLOCK_SIZE);
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE3_DEC1_CMD_BASE, HL_BLOCK_SIZE);
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmPCIE_DEC0_CMD_BASE, HL_BLOCK_SIZE);
+ HL_USR_MAPPED_BLK_INIT(&blocks[start_idx], mmPCIE_DEC1_CMD_BASE, HL_BLOCK_SIZE);
+}
+
+static void gaudi2_user_mapped_blocks_init(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct user_mapped_block *blocks = gaudi2->mapped_blocks;
+ u32 block_size, umr_start_idx, num_umr_blocks;
+ int i;
+
+ for (i = 0 ; i < NUM_ARC_CPUS ; i++) {
+ if (i >= CPU_ID_SCHED_ARC0 && i <= CPU_ID_SCHED_ARC3)
+ block_size = ARC_DCCM_BLOCK_SIZE * 2;
+ else
+ block_size = ARC_DCCM_BLOCK_SIZE;
+
+ blocks[i].address = gaudi2_arc_dccm_bases[i];
+ blocks[i].size = block_size;
+ }
+
+ blocks[NUM_ARC_CPUS].address = mmARC_FARM_ARC0_ACP_ENG_BASE;
+ blocks[NUM_ARC_CPUS].size = HL_BLOCK_SIZE;
+
+ blocks[NUM_ARC_CPUS + 1].address = mmARC_FARM_ARC1_ACP_ENG_BASE;
+ blocks[NUM_ARC_CPUS + 1].size = HL_BLOCK_SIZE;
+
+ blocks[NUM_ARC_CPUS + 2].address = mmARC_FARM_ARC2_ACP_ENG_BASE;
+ blocks[NUM_ARC_CPUS + 2].size = HL_BLOCK_SIZE;
+
+ blocks[NUM_ARC_CPUS + 3].address = mmARC_FARM_ARC3_ACP_ENG_BASE;
+ blocks[NUM_ARC_CPUS + 3].size = HL_BLOCK_SIZE;
+
+ blocks[NUM_ARC_CPUS + 4].address = mmDCORE0_MME_QM_ARC_ACP_ENG_BASE;
+ blocks[NUM_ARC_CPUS + 4].size = HL_BLOCK_SIZE;
+
+ blocks[NUM_ARC_CPUS + 5].address = mmDCORE1_MME_QM_ARC_ACP_ENG_BASE;
+ blocks[NUM_ARC_CPUS + 5].size = HL_BLOCK_SIZE;
+
+ blocks[NUM_ARC_CPUS + 6].address = mmDCORE2_MME_QM_ARC_ACP_ENG_BASE;
+ blocks[NUM_ARC_CPUS + 6].size = HL_BLOCK_SIZE;
+
+ blocks[NUM_ARC_CPUS + 7].address = mmDCORE3_MME_QM_ARC_ACP_ENG_BASE;
+ blocks[NUM_ARC_CPUS + 7].size = HL_BLOCK_SIZE;
+
+ umr_start_idx = NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS;
+ num_umr_blocks = NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS;
+ for (i = 0 ; i < num_umr_blocks ; i++) {
+ u8 nic_id, umr_block_id;
+
+ nic_id = i / NUM_OF_USER_NIC_UMR_BLOCKS;
+ umr_block_id = i % NUM_OF_USER_NIC_UMR_BLOCKS;
+
+ blocks[umr_start_idx + i].address =
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE +
+ (nic_id / NIC_NUMBER_OF_QM_PER_MACRO) * NIC_OFFSET +
+ (nic_id % NIC_NUMBER_OF_QM_PER_MACRO) * NIC_QM_OFFSET +
+ umr_block_id * NIC_UMR_OFFSET;
+ blocks[umr_start_idx + i].size = HL_BLOCK_SIZE;
+ }
+
+ /* Expose decoder HW configuration block to user */
+ gaudi2_user_mapped_dec_init(gaudi2, USR_MAPPED_BLK_DEC_START_IDX);
+
+ for (i = 1; i < NUM_OF_DCORES; ++i) {
+ blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1)].size = SM_OBJS_BLOCK_SIZE;
+ blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1) + 1].size = HL_BLOCK_SIZE;
+
+ blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1)].address =
+ mmDCORE0_SYNC_MNGR_OBJS_BASE + i * DCORE_OFFSET;
+
+ blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1) + 1].address =
+ mmDCORE0_SYNC_MNGR_GLBL_BASE + i * DCORE_OFFSET;
+ }
+}
+
+static int gaudi2_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
+{
+ dma_addr_t dma_addr_arr[GAUDI2_ALLOC_CPU_MEM_RETRY_CNT] = {}, end_addr;
+ void *virt_addr_arr[GAUDI2_ALLOC_CPU_MEM_RETRY_CNT] = {};
+ int i, j, rc = 0;
+
+ /* The device ARC works with 32-bits addresses, and because there is a single HW register
+ * that holds the extension bits (49..28), these bits must be identical in all the allocated
+ * range.
+ */
+
+ for (i = 0 ; i < GAUDI2_ALLOC_CPU_MEM_RETRY_CNT ; i++) {
+ virt_addr_arr[i] = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
+ &dma_addr_arr[i], GFP_KERNEL | __GFP_ZERO);
+ if (!virt_addr_arr[i]) {
+ rc = -ENOMEM;
+ goto free_dma_mem_arr;
+ }
+
+ end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1;
+ if (GAUDI2_ARC_PCI_MSB_ADDR(dma_addr_arr[i]) == GAUDI2_ARC_PCI_MSB_ADDR(end_addr))
+ break;
+ }
+
+ if (i == GAUDI2_ALLOC_CPU_MEM_RETRY_CNT) {
+ dev_err(hdev->dev,
+ "MSB of ARC accessible DMA memory are not identical in all range\n");
+ rc = -EFAULT;
+ goto free_dma_mem_arr;
+ }
+
+ hdev->cpu_accessible_dma_mem = virt_addr_arr[i];
+ hdev->cpu_accessible_dma_address = dma_addr_arr[i];
+
+free_dma_mem_arr:
+ for (j = 0 ; j < i ; j++)
+ hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, virt_addr_arr[j],
+ dma_addr_arr[j]);
+
+ return rc;
+}
+
+static void gaudi2_set_pci_memory_regions(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pci_mem_region *region;
+
+ /* CFG */
+ region = &hdev->pci_mem_region[PCI_REGION_CFG];
+ region->region_base = CFG_BASE;
+ region->region_size = CFG_SIZE;
+ region->offset_in_bar = CFG_BASE - STM_FLASH_BASE_ADDR;
+ region->bar_size = CFG_BAR_SIZE;
+ region->bar_id = SRAM_CFG_BAR_ID;
+ region->used = 1;
+
+ /* SRAM */
+ region = &hdev->pci_mem_region[PCI_REGION_SRAM];
+ region->region_base = SRAM_BASE_ADDR;
+ region->region_size = SRAM_SIZE;
+ region->offset_in_bar = CFG_REGION_SIZE + BAR0_RSRVD_SIZE;
+ region->bar_size = CFG_BAR_SIZE;
+ region->bar_id = SRAM_CFG_BAR_ID;
+ region->used = 1;
+
+ /* DRAM */
+ region = &hdev->pci_mem_region[PCI_REGION_DRAM];
+ region->region_base = DRAM_PHYS_BASE;
+ region->region_size = hdev->asic_prop.dram_size;
+ region->offset_in_bar = 0;
+ region->bar_size = prop->dram_pci_bar_size;
+ region->bar_id = DRAM_BAR_ID;
+ region->used = 1;
+}
+
+static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int i, j, k;
+
+ /* Initialize common user CQ interrupt */
+ HL_USR_INTR_STRUCT_INIT(hdev->common_user_cq_interrupt, hdev,
+ HL_COMMON_USER_CQ_INTERRUPT_ID, false);
+
+ /* Initialize common decoder interrupt */
+ HL_USR_INTR_STRUCT_INIT(hdev->common_decoder_interrupt, hdev,
+ HL_COMMON_DEC_INTERRUPT_ID, true);
+
+ /* User interrupts structure holds both decoder and user interrupts from various engines.
+ * We first initialize the decoder interrupts and then we add the user interrupts.
+ * The only limitation is that the last decoder interrupt id must be smaller
+ * then GAUDI2_IRQ_NUM_USER_FIRST. This is checked at compilation time.
+ */
+
+ /* Initialize decoder interrupts, expose only normal interrupts,
+ * error interrupts to be handled by driver
+ */
+ for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, j = 0 ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_NRM;
+ i += 2, j++)
+ HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, true);
+
+ for (i = GAUDI2_IRQ_NUM_USER_FIRST, k = 0 ; k < prop->user_interrupt_count; i++, j++, k++)
+ HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, false);
+}
+
+static inline int gaudi2_get_non_zero_random_int(void)
+{
+ int rand = get_random_int();
+
+ return rand ? rand : 1;
+}
+
+static int gaudi2_sw_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2;
+ int i, rc;
+
+ /* Allocate device structure */
+ gaudi2 = kzalloc(sizeof(*gaudi2), GFP_KERNEL);
+ if (!gaudi2)
+ return -ENOMEM;
+
+ for (i = 0 ; i < ARRAY_SIZE(gaudi2_irq_map_table) ; i++) {
+ if (gaudi2_irq_map_table[i].msg || !gaudi2_irq_map_table[i].valid)
+ continue;
+
+ if (gaudi2->num_of_valid_hw_events == GAUDI2_EVENT_SIZE) {
+ dev_err(hdev->dev, "H/W events array exceeds the limit of %u events\n",
+ GAUDI2_EVENT_SIZE);
+ rc = -EINVAL;
+ goto free_gaudi2_device;
+ }
+
+ gaudi2->hw_events[gaudi2->num_of_valid_hw_events++] = gaudi2_irq_map_table[i].fc_id;
+ }
+
+ for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++)
+ gaudi2->lfsr_rand_seeds[i] = gaudi2_get_non_zero_random_int();
+
+ gaudi2->cpucp_info_get = gaudi2_cpucp_info_get;
+
+ hdev->asic_specific = gaudi2;
+
+ /* Create DMA pool for small allocations.
+ * Use DEVICE_CACHE_LINE_SIZE for alignment since the NIC memory-mapped
+ * PI/CI registers allocated from this pool have this restriction
+ */
+ hdev->dma_pool = dma_pool_create(dev_name(hdev->dev), &hdev->pdev->dev,
+ GAUDI2_DMA_POOL_BLK_SIZE, DEVICE_CACHE_LINE_SIZE, 0);
+ if (!hdev->dma_pool) {
+ dev_err(hdev->dev, "failed to create DMA pool\n");
+ rc = -ENOMEM;
+ goto free_gaudi2_device;
+ }
+
+ rc = gaudi2_alloc_cpu_accessible_dma_mem(hdev);
+ if (rc)
+ goto free_dma_pool;
+
+ hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
+ if (!hdev->cpu_accessible_dma_pool) {
+ dev_err(hdev->dev, "Failed to create CPU accessible DMA pool\n");
+ rc = -ENOMEM;
+ goto free_cpu_dma_mem;
+ }
+
+ rc = gen_pool_add(hdev->cpu_accessible_dma_pool, (uintptr_t) hdev->cpu_accessible_dma_mem,
+ HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to add memory to CPU accessible DMA pool\n");
+ rc = -EFAULT;
+ goto free_cpu_accessible_dma_pool;
+ }
+
+ gaudi2->virt_msix_db_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, prop->pmmu.page_size,
+ &gaudi2->virt_msix_db_dma_addr);
+ if (!gaudi2->virt_msix_db_cpu_addr) {
+ dev_err(hdev->dev, "Failed to allocate DMA memory for virtual MSI-X doorbell\n");
+ rc = -ENOMEM;
+ goto free_cpu_accessible_dma_pool;
+ }
+
+ spin_lock_init(&gaudi2->hw_queues_lock);
+ spin_lock_init(&gaudi2->kdma_lock);
+
+ gaudi2->scratchpad_kernel_address = hl_asic_dma_alloc_coherent(hdev, PAGE_SIZE,
+ &gaudi2->scratchpad_bus_address,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!gaudi2->scratchpad_kernel_address) {
+ rc = -ENOMEM;
+ goto free_virt_msix_db_mem;
+ }
+
+ gaudi2_user_mapped_blocks_init(hdev);
+
+ /* Initialize user interrupts */
+ gaudi2_user_interrupt_setup(hdev);
+
+ hdev->supports_coresight = true;
+ hdev->supports_sync_stream = true;
+ hdev->supports_cb_mapping = true;
+ hdev->supports_wait_for_multi_cs = false;
+
+ prop->supports_compute_reset = true;
+
+ hdev->asic_funcs->set_pci_memory_regions(hdev);
+
+ return 0;
+
+free_virt_msix_db_mem:
+ hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
+free_cpu_accessible_dma_pool:
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+free_cpu_dma_mem:
+ hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+free_dma_pool:
+ dma_pool_destroy(hdev->dma_pool);
+free_gaudi2_device:
+ kfree(gaudi2);
+ return rc;
+}
+
+static int gaudi2_sw_fini(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
+
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+
+ hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+
+ hl_asic_dma_free_coherent(hdev, PAGE_SIZE, gaudi2->scratchpad_kernel_address,
+ gaudi2->scratchpad_bus_address);
+
+ dma_pool_destroy(hdev->dma_pool);
+
+ kfree(gaudi2);
+
+ return 0;
+}
+
+static void gaudi2_stop_qman_common(struct hl_device *hdev, u32 reg_base)
+{
+ WREG32(reg_base + QM_GLBL_CFG1_OFFSET, QM_GLBL_CFG1_PQF_STOP |
+ QM_GLBL_CFG1_CQF_STOP |
+ QM_GLBL_CFG1_CP_STOP);
+
+ /* stop also the ARC */
+ WREG32(reg_base + QM_GLBL_CFG2_OFFSET, QM_GLBL_CFG2_ARC_CQF_STOP);
+}
+
+static void gaudi2_flush_qman_common(struct hl_device *hdev, u32 reg_base)
+{
+ WREG32(reg_base + QM_GLBL_CFG1_OFFSET, QM_GLBL_CFG1_PQF_FLUSH |
+ QM_GLBL_CFG1_CQF_FLUSH |
+ QM_GLBL_CFG1_CP_FLUSH);
+}
+
+static void gaudi2_flush_qman_arc_common(struct hl_device *hdev, u32 reg_base)
+{
+ WREG32(reg_base + QM_GLBL_CFG2_OFFSET, QM_GLBL_CFG2_ARC_CQF_FLUSH);
+}
+
+/**
+ * gaudi2_clear_qm_fence_counters_common - clear QM's fence counters
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @queue_id: queue to clear fence counters to
+ * @skip_fence: if true set maximum fence value to all fence counters to avoid
+ * getting stuck on any fence value. otherwise set all fence
+ * counters to 0 (standard clear of fence counters)
+ */
+static void gaudi2_clear_qm_fence_counters_common(struct hl_device *hdev, u32 queue_id,
+ bool skip_fence)
+{
+ u32 size, reg_base;
+ u32 addr, val;
+
+ reg_base = gaudi2_qm_blocks_bases[queue_id];
+
+ addr = reg_base + QM_CP_FENCE0_CNT_0_OFFSET;
+ size = mmPDMA0_QM_CP_BARRIER_CFG - mmPDMA0_QM_CP_FENCE0_CNT_0;
+
+ /*
+ * in case we want to make sure that QM that is stuck on a fence will
+ * be released we should set the fence counter to a higher value that
+ * the value the QM waiting for. to comply with any fence counter of
+ * any value we set maximum fence value to all counters
+ */
+ val = skip_fence ? U32_MAX : 0;
+ gaudi2_memset_device_lbw(hdev, addr, size, val);
+}
+
+static void gaudi2_qman_manual_flush_common(struct hl_device *hdev, u32 queue_id)
+{
+ u32 reg_base = gaudi2_qm_blocks_bases[queue_id];
+
+ gaudi2_clear_qm_fence_counters_common(hdev, queue_id, true);
+ gaudi2_flush_qman_common(hdev, reg_base);
+ gaudi2_flush_qman_arc_common(hdev, reg_base);
+}
+
+static void gaudi2_stop_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int dcore, inst;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK))
+ goto stop_edma_qmans;
+
+ /* Stop CPs of PDMA QMANs */
+ gaudi2_stop_qman_common(hdev, mmPDMA0_QM_BASE);
+ gaudi2_stop_qman_common(hdev, mmPDMA1_QM_BASE);
+
+stop_edma_qmans:
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK))
+ return;
+
+ for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
+ for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) {
+ u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst;
+ u32 qm_base;
+
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + seq)))
+ continue;
+
+ qm_base = mmDCORE0_EDMA0_QM_BASE + dcore * DCORE_OFFSET +
+ inst * DCORE_EDMA_OFFSET;
+
+ /* Stop CPs of EDMA QMANs */
+ gaudi2_stop_qman_common(hdev, qm_base);
+ }
+ }
+}
+
+static void gaudi2_stop_mme_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 offset, i;
+
+ offset = mmDCORE1_MME_QM_BASE - mmDCORE0_MME_QM_BASE;
+
+ for (i = 0 ; i < NUM_OF_DCORES ; i++) {
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + i)))
+ continue;
+
+ gaudi2_stop_qman_common(hdev, mmDCORE0_MME_QM_BASE + (i * offset));
+ }
+}
+
+static void gaudi2_stop_tpc_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base;
+ int i;
+
+ if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK))
+ return;
+
+ for (i = 0 ; i < TPC_ID_SIZE ; i++) {
+ if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + i)))
+ continue;
+
+ reg_base = gaudi2_qm_blocks_bases[gaudi2_tpc_id_to_queue_id[i]];
+ gaudi2_stop_qman_common(hdev, reg_base);
+ }
+}
+
+static void gaudi2_stop_rot_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base;
+ int i;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_ROT_MASK))
+ return;
+
+ for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) {
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_ROT_SHIFT + i)))
+ continue;
+
+ reg_base = gaudi2_qm_blocks_bases[gaudi2_rot_id_to_queue_id[i]];
+ gaudi2_stop_qman_common(hdev, reg_base);
+ }
+}
+
+static void gaudi2_stop_nic_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base, queue_id;
+ int i;
+
+ if (!(gaudi2->nic_hw_cap_initialized & HW_CAP_NIC_MASK))
+ return;
+
+ queue_id = GAUDI2_QUEUE_ID_NIC_0_0;
+
+ for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) {
+ if (!(hdev->nic_ports_mask & BIT(i)))
+ continue;
+
+ reg_base = gaudi2_qm_blocks_bases[queue_id];
+ gaudi2_stop_qman_common(hdev, reg_base);
+ }
+}
+
+static void gaudi2_stall_dma_common(struct hl_device *hdev, u32 reg_base)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(PDMA0_CORE_CFG_1_HALT_MASK, 0x1);
+ WREG32(reg_base + DMA_CORE_CFG_1_OFFSET, reg_val);
+}
+
+static void gaudi2_dma_stall(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int dcore, inst;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK))
+ goto stall_edma;
+
+ gaudi2_stall_dma_common(hdev, mmPDMA0_CORE_BASE);
+ gaudi2_stall_dma_common(hdev, mmPDMA1_CORE_BASE);
+
+stall_edma:
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK))
+ return;
+
+ for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
+ for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) {
+ u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst;
+ u32 core_base;
+
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + seq)))
+ continue;
+
+ core_base = mmDCORE0_EDMA0_CORE_BASE + dcore * DCORE_OFFSET +
+ inst * DCORE_EDMA_OFFSET;
+
+ /* Stall CPs of EDMA QMANs */
+ gaudi2_stall_dma_common(hdev, core_base);
+ }
+ }
+}
+
+static void gaudi2_mme_stall(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 offset, i;
+
+ offset = mmDCORE1_MME_CTRL_LO_QM_STALL - mmDCORE0_MME_CTRL_LO_QM_STALL;
+
+ for (i = 0 ; i < NUM_OF_DCORES ; i++)
+ if (gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + i))
+ WREG32(mmDCORE0_MME_CTRL_LO_QM_STALL + (i * offset), 1);
+}
+
+static void gaudi2_tpc_stall(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base;
+ int i;
+
+ if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK))
+ return;
+
+ for (i = 0 ; i < TPC_ID_SIZE ; i++) {
+ if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + i)))
+ continue;
+
+ reg_base = gaudi2_tpc_cfg_blocks_bases[i];
+ WREG32(reg_base + TPC_CFG_STALL_OFFSET, 1);
+ }
+}
+
+static void gaudi2_rotator_stall(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_val;
+ int i;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_ROT_MASK))
+ return;
+
+ reg_val = FIELD_PREP(ROT_MSS_HALT_WBC_MASK, 0x1) |
+ FIELD_PREP(ROT_MSS_HALT_RSB_MASK, 0x1) |
+ FIELD_PREP(ROT_MSS_HALT_MRSB_MASK, 0x1);
+
+ for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) {
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_ROT_SHIFT + i)))
+ continue;
+
+ WREG32(mmROT0_MSS_HALT + i * ROT_OFFSET, reg_val);
+ }
+}
+
+static void gaudi2_disable_qman_common(struct hl_device *hdev, u32 reg_base)
+{
+ WREG32(reg_base + QM_GLBL_CFG0_OFFSET, 0);
+}
+
+static void gaudi2_disable_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int dcore, inst;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK))
+ goto stop_edma_qmans;
+
+ gaudi2_disable_qman_common(hdev, mmPDMA0_QM_BASE);
+ gaudi2_disable_qman_common(hdev, mmPDMA1_QM_BASE);
+
+stop_edma_qmans:
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK))
+ return;
+
+ for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
+ for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) {
+ u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst;
+ u32 qm_base;
+
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + seq)))
+ continue;
+
+ qm_base = mmDCORE0_EDMA0_QM_BASE + dcore * DCORE_OFFSET +
+ inst * DCORE_EDMA_OFFSET;
+
+ /* Disable CPs of EDMA QMANs */
+ gaudi2_disable_qman_common(hdev, qm_base);
+ }
+ }
+}
+
+static void gaudi2_disable_mme_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 offset, i;
+
+ offset = mmDCORE1_MME_QM_BASE - mmDCORE0_MME_QM_BASE;
+
+ for (i = 0 ; i < NUM_OF_DCORES ; i++)
+ if (gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + i))
+ gaudi2_disable_qman_common(hdev, mmDCORE0_MME_QM_BASE + (i * offset));
+}
+
+static void gaudi2_disable_tpc_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base;
+ int i;
+
+ if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK))
+ return;
+
+ for (i = 0 ; i < TPC_ID_SIZE ; i++) {
+ if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + i)))
+ continue;
+
+ reg_base = gaudi2_qm_blocks_bases[gaudi2_tpc_id_to_queue_id[i]];
+ gaudi2_disable_qman_common(hdev, reg_base);
+ }
+}
+
+static void gaudi2_disable_rot_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base;
+ int i;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_ROT_MASK))
+ return;
+
+ for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) {
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_ROT_SHIFT + i)))
+ continue;
+
+ reg_base = gaudi2_qm_blocks_bases[gaudi2_rot_id_to_queue_id[i]];
+ gaudi2_disable_qman_common(hdev, reg_base);
+ }
+}
+
+static void gaudi2_disable_nic_qmans(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base, queue_id;
+ int i;
+
+ if (!(gaudi2->nic_hw_cap_initialized & HW_CAP_NIC_MASK))
+ return;
+
+ queue_id = GAUDI2_QUEUE_ID_NIC_0_0;
+
+ for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) {
+ if (!(hdev->nic_ports_mask & BIT(i)))
+ continue;
+
+ reg_base = gaudi2_qm_blocks_bases[queue_id];
+ gaudi2_disable_qman_common(hdev, reg_base);
+ }
+}
+
+static void gaudi2_enable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE, 0);
+
+ /* Zero the lower/upper parts of the 64-bit counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE + 0xC, 0);
+ WREG32(mmPSOC_TIMESTAMP_BASE + 0x8, 0);
+
+ /* Enable the counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE, 1);
+}
+
+static void gaudi2_disable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE, 0);
+}
+
+static const char *gaudi2_irq_name(u16 irq_number)
+{
+ switch (irq_number) {
+ case GAUDI2_IRQ_NUM_EVENT_QUEUE:
+ return "gaudi2 cpu eq";
+ case GAUDI2_IRQ_NUM_COMPLETION:
+ return "gaudi2 completion";
+ case GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ... GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM:
+ return gaudi2_vdec_irq_name[irq_number - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM];
+ case GAUDI2_IRQ_NUM_USER_FIRST ... GAUDI2_IRQ_NUM_USER_LAST:
+ return "gaudi2 user completion";
+ default:
+ return "invalid";
+ }
+}
+
+static void gaudi2_dec_disable_msix(struct hl_device *hdev, u32 max_irq_num)
+{
+ int i, irq, relative_idx;
+ struct hl_dec *dec;
+
+ for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ; i < max_irq_num ; i++) {
+ irq = pci_irq_vector(hdev->pdev, i);
+ relative_idx = i - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM;
+
+ dec = hdev->dec + relative_idx / 2;
+
+ /* We pass different structures depending on the irq handler. For the abnormal
+ * interrupt we pass hl_dec and for the regular interrupt we pass the relevant
+ * user_interrupt entry
+ */
+ free_irq(irq, ((relative_idx % 2) ?
+ (void *) dec :
+ (void *) &hdev->user_interrupt[dec->core_id]));
+ }
+}
+
+static int gaudi2_dec_enable_msix(struct hl_device *hdev)
+{
+ int rc, i, irq_init_cnt, irq, relative_idx;
+ irq_handler_t irq_handler;
+ struct hl_dec *dec;
+
+ for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, irq_init_cnt = 0;
+ i <= GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM;
+ i++, irq_init_cnt++) {
+
+ irq = pci_irq_vector(hdev->pdev, i);
+ relative_idx = i - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM;
+
+ irq_handler = (relative_idx % 2) ?
+ hl_irq_handler_dec_abnrm :
+ hl_irq_handler_user_interrupt;
+
+ dec = hdev->dec + relative_idx / 2;
+
+ /* We pass different structures depending on the irq handler. For the abnormal
+ * interrupt we pass hl_dec and for the regular interrupt we pass the relevant
+ * user_interrupt entry
+ */
+ rc = request_irq(irq, irq_handler, 0, gaudi2_irq_name(i),
+ ((relative_idx % 2) ?
+ (void *) dec :
+ (void *) &hdev->user_interrupt[dec->core_id]));
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_dec_irqs;
+ }
+ }
+
+ return 0;
+
+free_dec_irqs:
+ gaudi2_dec_disable_msix(hdev, (GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + irq_init_cnt));
+ return rc;
+}
+
+static int gaudi2_enable_msix(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int rc, irq, i, j, user_irq_init_cnt;
+ irq_handler_t irq_handler;
+ struct hl_cq *cq;
+
+ if (gaudi2->hw_cap_initialized & HW_CAP_MSIX)
+ return 0;
+
+ rc = pci_alloc_irq_vectors(hdev->pdev, GAUDI2_MSIX_ENTRIES, GAUDI2_MSIX_ENTRIES,
+ PCI_IRQ_MSIX);
+ if (rc < 0) {
+ dev_err(hdev->dev, "MSI-X: Failed to enable support -- %d/%d\n",
+ GAUDI2_MSIX_ENTRIES, rc);
+ return rc;
+ }
+
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION);
+ cq = &hdev->completion_queue[GAUDI2_RESERVED_CQ_CS_COMPLETION];
+ rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi2_irq_name(GAUDI2_IRQ_NUM_COMPLETION), cq);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_irq_vectors;
+ }
+
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE);
+ rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi2_irq_name(GAUDI2_IRQ_NUM_EVENT_QUEUE),
+ &hdev->event_queue);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_completion_irq;
+ }
+
+ rc = gaudi2_dec_enable_msix(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to enable decoder IRQ");
+ goto free_completion_irq;
+ }
+
+ for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0;
+ user_irq_init_cnt < prop->user_interrupt_count;
+ i++, j++, user_irq_init_cnt++) {
+
+ irq = pci_irq_vector(hdev->pdev, i);
+ irq_handler = hl_irq_handler_user_interrupt;
+
+ rc = request_irq(irq, irq_handler, 0, gaudi2_irq_name(i), &hdev->user_interrupt[j]);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_user_irq;
+ }
+ }
+
+ gaudi2->hw_cap_initialized |= HW_CAP_MSIX;
+
+ return 0;
+
+free_user_irq:
+ for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count;
+ i < GAUDI2_IRQ_NUM_USER_FIRST + user_irq_init_cnt ; i++, j++) {
+
+ irq = pci_irq_vector(hdev->pdev, i);
+ free_irq(irq, &hdev->user_interrupt[j]);
+ }
+
+ gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1);
+
+free_completion_irq:
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION);
+ free_irq(irq, cq);
+
+free_irq_vectors:
+ pci_free_irq_vectors(hdev->pdev);
+
+ return rc;
+}
+
+static void gaudi2_sync_irqs(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int i, j;
+ int irq;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_MSIX))
+ return;
+
+ /* Wait for all pending IRQs to be finished */
+ synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION));
+
+ for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM ; i++) {
+ irq = pci_irq_vector(hdev->pdev, i);
+ synchronize_irq(irq);
+ }
+
+ for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = 0 ; j < hdev->asic_prop.user_interrupt_count;
+ i++, j++) {
+ irq = pci_irq_vector(hdev->pdev, i);
+ synchronize_irq(irq);
+ }
+
+ synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE));
+}
+
+static void gaudi2_disable_msix(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct hl_cq *cq;
+ int irq, i, j, k;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_MSIX))
+ return;
+
+ gaudi2_sync_irqs(hdev);
+
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE);
+ free_irq(irq, &hdev->event_queue);
+
+ gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1);
+
+ for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, k = 0;
+ k < hdev->asic_prop.user_interrupt_count ; i++, j++, k++) {
+
+ irq = pci_irq_vector(hdev->pdev, i);
+ free_irq(irq, &hdev->user_interrupt[j]);
+ }
+
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION);
+ cq = &hdev->completion_queue[GAUDI2_RESERVED_CQ_CS_COMPLETION];
+ free_irq(irq, cq);
+
+ pci_free_irq_vectors(hdev->pdev);
+
+ gaudi2->hw_cap_initialized &= ~HW_CAP_MSIX;
+}
+
+static void gaudi2_stop_dcore_dec(struct hl_device *hdev, int dcore_id)
+{
+ u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1);
+ u32 graceful_pend_mask = DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_PEND_MASK;
+ u32 timeout_usec, dec_id, dec_bit, offset, graceful;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI2_PLDM_VDEC_TIMEOUT_USEC;
+ else
+ timeout_usec = GAUDI2_VDEC_TIMEOUT_USEC;
+
+ for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) {
+ dec_bit = dcore_id * NUM_OF_DEC_PER_DCORE + dec_id;
+ if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit)))
+ continue;
+
+ offset = dcore_id * DCORE_OFFSET + dec_id * DCORE_VDEC_OFFSET;
+
+ WREG32(mmDCORE0_DEC0_CMD_SWREG16 + offset, 0);
+
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_GRACEFUL + offset, reg_val);
+
+ /* Wait till all traffic from decoder stops
+ * before apply core reset.
+ */
+ rc = hl_poll_timeout(
+ hdev,
+ mmDCORE0_VDEC0_BRDG_CTRL_GRACEFUL + offset,
+ graceful,
+ (graceful & graceful_pend_mask),
+ 100,
+ timeout_usec);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to stop traffic from DCORE%d Decoder %d\n",
+ dcore_id, dec_id);
+ }
+}
+
+static void gaudi2_stop_pcie_dec(struct hl_device *hdev)
+{
+ u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1);
+ u32 graceful_pend_mask = PCIE_VDEC0_BRDG_CTRL_GRACEFUL_PEND_MASK;
+ u32 timeout_usec, dec_id, dec_bit, offset, graceful;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI2_PLDM_VDEC_TIMEOUT_USEC;
+ else
+ timeout_usec = GAUDI2_VDEC_TIMEOUT_USEC;
+
+ for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) {
+ dec_bit = PCIE_DEC_SHIFT + dec_id;
+ if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit)))
+ continue;
+
+ offset = dec_id * PCIE_VDEC_OFFSET;
+
+ WREG32(mmPCIE_DEC0_CMD_SWREG16 + offset, 0);
+
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_GRACEFUL + offset, reg_val);
+
+ /* Wait till all traffic from decoder stops
+ * before apply core reset.
+ */
+ rc = hl_poll_timeout(
+ hdev,
+ mmPCIE_VDEC0_BRDG_CTRL_GRACEFUL + offset,
+ graceful,
+ (graceful & graceful_pend_mask),
+ 100,
+ timeout_usec);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to stop traffic from PCIe Decoder %d\n",
+ dec_id);
+ }
+}
+
+static void gaudi2_stop_dec(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int dcore_id;
+
+ if ((gaudi2->dec_hw_cap_initialized & HW_CAP_DEC_MASK) == 0)
+ return;
+
+ for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++)
+ gaudi2_stop_dcore_dec(hdev, dcore_id);
+
+ gaudi2_stop_pcie_dec(hdev);
+}
+
+static void gaudi2_halt_arc(struct hl_device *hdev, u32 cpu_id)
+{
+ u32 reg_base, reg_val;
+
+ reg_base = gaudi2_arc_blocks_bases[cpu_id];
+
+ /* Halt ARC */
+ reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_MASK, 1);
+ WREG32(reg_base + ARC_HALT_REQ_OFFSET, reg_val);
+}
+
+static void gaudi2_halt_arcs(struct hl_device *hdev)
+{
+ u16 arc_id;
+
+ for (arc_id = CPU_ID_SCHED_ARC0; arc_id < CPU_ID_MAX; arc_id++) {
+ if (gaudi2_is_arc_enabled(hdev, arc_id))
+ gaudi2_halt_arc(hdev, arc_id);
+ }
+}
+
+static void gaudi2_reset_arcs(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u16 arc_id;
+
+ if (!gaudi2)
+ return;
+
+ for (arc_id = CPU_ID_SCHED_ARC0; arc_id < CPU_ID_MAX; arc_id++)
+ if (gaudi2_is_arc_enabled(hdev, arc_id))
+ gaudi2_clr_arc_id_cap(hdev, arc_id);
+}
+
+static void gaudi2_nic_qmans_manual_flush(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 queue_id;
+ int i;
+
+ if (!(gaudi2->nic_hw_cap_initialized & HW_CAP_NIC_MASK))
+ return;
+
+ queue_id = GAUDI2_QUEUE_ID_NIC_0_0;
+
+ for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN)
+ gaudi2_qman_manual_flush_common(hdev, queue_id);
+}
+
+static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+{
+ u32 wait_timeout_ms;
+
+ if (hdev->pldm)
+ wait_timeout_ms = GAUDI2_PLDM_RESET_WAIT_MSEC;
+ else
+ wait_timeout_ms = GAUDI2_RESET_WAIT_MSEC;
+
+ if (fw_reset)
+ goto skip_engines;
+
+ gaudi2_stop_dma_qmans(hdev);
+ gaudi2_stop_mme_qmans(hdev);
+ gaudi2_stop_tpc_qmans(hdev);
+ gaudi2_stop_rot_qmans(hdev);
+ gaudi2_stop_nic_qmans(hdev);
+ msleep(wait_timeout_ms);
+
+ gaudi2_halt_arcs(hdev);
+ gaudi2_dma_stall(hdev);
+ gaudi2_mme_stall(hdev);
+ gaudi2_tpc_stall(hdev);
+ gaudi2_rotator_stall(hdev);
+
+ msleep(wait_timeout_ms);
+
+ gaudi2_stop_dec(hdev);
+
+ /*
+ * in case of soft reset do a manual flush for QMANs (currently called
+ * only for NIC QMANs
+ */
+ if (!hard_reset)
+ gaudi2_nic_qmans_manual_flush(hdev);
+
+ gaudi2_disable_dma_qmans(hdev);
+ gaudi2_disable_mme_qmans(hdev);
+ gaudi2_disable_tpc_qmans(hdev);
+ gaudi2_disable_rot_qmans(hdev);
+ gaudi2_disable_nic_qmans(hdev);
+ gaudi2_disable_timestamp(hdev);
+
+skip_engines:
+ if (hard_reset) {
+ gaudi2_disable_msix(hdev);
+ return;
+ }
+
+ gaudi2_sync_irqs(hdev);
+}
+
+static void gaudi2_init_firmware_preload_params(struct hl_device *hdev)
+{
+ struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
+
+ pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
+ pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0;
+ pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1;
+ pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0;
+ pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1;
+ pre_fw_load->wait_for_preboot_timeout = GAUDI2_PREBOOT_REQ_TIMEOUT_USEC;
+}
+
+static void gaudi2_init_firmware_loader(struct hl_device *hdev)
+{
+ struct fw_load_mgr *fw_loader = &hdev->fw_loader;
+ struct dynamic_fw_load_mgr *dynamic_loader;
+ struct cpu_dyn_regs *dyn_regs;
+
+ /* fill common fields */
+ fw_loader->fw_comp_loaded = FW_TYPE_NONE;
+ fw_loader->boot_fit_img.image_name = GAUDI2_BOOT_FIT_FILE;
+ fw_loader->linux_img.image_name = GAUDI2_LINUX_FW_FILE;
+ fw_loader->boot_fit_timeout = GAUDI2_BOOT_FIT_REQ_TIMEOUT_USEC;
+ fw_loader->skip_bmc = false;
+ fw_loader->sram_bar_id = SRAM_CFG_BAR_ID;
+ fw_loader->dram_bar_id = DRAM_BAR_ID;
+
+ if (hdev->asic_type == ASIC_GAUDI2 || hdev->asic_type == ASIC_GAUDI2_SEC)
+ fw_loader->cpu_timeout = GAUDI2_CPU_TIMEOUT_USEC;
+ else /* ASIC_GAUDI2_FPGA */
+ fw_loader->cpu_timeout = GAUDI2_FPGA_CPU_TIMEOUT;
+
+ /* here we update initial values for few specific dynamic regs (as
+ * before reading the first descriptor from FW those value has to be
+ * hard-coded). in later stages of the protocol those values will be
+ * updated automatically by reading the FW descriptor so data there
+ * will always be up-to-date
+ */
+ dynamic_loader = &hdev->fw_loader.dynamic_loader;
+ dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs;
+ dyn_regs->kmd_msg_to_cpu = cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU);
+ dyn_regs->cpu_cmd_status_to_host = cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST);
+ dynamic_loader->wait_for_bl_timeout = GAUDI2_WAIT_FOR_BL_TIMEOUT_USEC;
+}
+
+static int gaudi2_init_cpu(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int rc;
+
+ if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
+ return 0;
+
+ if (gaudi2->hw_cap_initialized & HW_CAP_CPU)
+ return 0;
+
+ rc = hl_fw_init_cpu(hdev);
+ if (rc)
+ return rc;
+
+ gaudi2->hw_cap_initialized |= HW_CAP_CPU;
+
+ return 0;
+}
+
+static int gaudi2_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
+{
+ struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct cpu_dyn_regs *dyn_regs;
+ struct hl_eq *eq;
+ u32 status;
+ int err;
+
+ if (!hdev->cpu_queues_enable)
+ return 0;
+
+ if (gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)
+ return 0;
+
+ eq = &hdev->event_queue;
+
+ dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+
+ WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
+ WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
+
+ WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
+ WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
+
+ WREG32(mmCPU_IF_CQ_BASE_ADDR_LOW, lower_32_bits(hdev->cpu_accessible_dma_address));
+ WREG32(mmCPU_IF_CQ_BASE_ADDR_HIGH, upper_32_bits(hdev->cpu_accessible_dma_address));
+
+ WREG32(mmCPU_IF_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
+ WREG32(mmCPU_IF_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
+ WREG32(mmCPU_IF_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
+
+ /* Used for EQ CI */
+ WREG32(mmCPU_IF_EQ_RD_OFFS, 0);
+
+ WREG32(mmCPU_IF_PF_PQ_PI, 0);
+
+ WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP);
+
+ /* Let the ARC know we are ready as it is now handling those queues */
+
+ WREG32(le32_to_cpu(dyn_regs->gic_host_pi_upd_irq),
+ gaudi2_irq_map_table[GAUDI2_EVENT_CPU_PI_UPDATE].cpu_id);
+
+ err = hl_poll_timeout(
+ hdev,
+ mmCPU_IF_QUEUE_INIT,
+ status,
+ (status == PQ_INIT_STATUS_READY_FOR_HOST),
+ 1000,
+ cpu_timeout);
+
+ if (err) {
+ dev_err(hdev->dev, "Failed to communicate with device CPU (timeout)\n");
+ return -EIO;
+ }
+
+ /* update FW application security bits */
+ if (prop->fw_cpu_boot_dev_sts0_valid)
+ prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
+
+ if (prop->fw_cpu_boot_dev_sts1_valid)
+ prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
+
+ gaudi2->hw_cap_initialized |= HW_CAP_CPU_Q;
+ return 0;
+}
+
+static void gaudi2_init_qman_pq(struct hl_device *hdev, u32 reg_base,
+ u32 queue_id_base)
+{
+ struct hl_hw_queue *q;
+ u32 pq_id, pq_offset;
+
+ for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) {
+ q = &hdev->kernel_queues[queue_id_base + pq_id];
+ pq_offset = pq_id * 4;
+
+ WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset,
+ lower_32_bits(q->bus_address));
+ WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset,
+ upper_32_bits(q->bus_address));
+ WREG32(reg_base + QM_PQ_SIZE_0_OFFSET + pq_offset, ilog2(HL_QUEUE_LENGTH));
+ WREG32(reg_base + QM_PQ_PI_0_OFFSET + pq_offset, 0);
+ WREG32(reg_base + QM_PQ_CI_0_OFFSET + pq_offset, 0);
+ }
+}
+
+static void gaudi2_init_qman_cp(struct hl_device *hdev, u32 reg_base)
+{
+ u32 cp_id, cp_offset, mtr_base_lo, mtr_base_hi, so_base_lo, so_base_hi;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ for (cp_id = 0 ; cp_id < NUM_OF_CP_PER_QMAN; cp_id++) {
+ cp_offset = cp_id * 4;
+
+ WREG32(reg_base + QM_CP_MSG_BASE0_ADDR_LO_0_OFFSET + cp_offset, mtr_base_lo);
+ WREG32(reg_base + QM_CP_MSG_BASE0_ADDR_HI_0_OFFSET + cp_offset, mtr_base_hi);
+ WREG32(reg_base + QM_CP_MSG_BASE1_ADDR_LO_0_OFFSET + cp_offset, so_base_lo);
+ WREG32(reg_base + QM_CP_MSG_BASE1_ADDR_HI_0_OFFSET + cp_offset, so_base_hi);
+ }
+
+ /* allow QMANs to accept work from ARC CQF */
+ WREG32(reg_base + QM_CP_CFG_OFFSET, FIELD_PREP(PDMA0_QM_CP_CFG_SWITCH_EN_MASK, 0x1));
+}
+
+static void gaudi2_init_qman_pqc(struct hl_device *hdev, u32 reg_base,
+ u32 queue_id_base)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 pq_id, pq_offset, so_base_lo, so_base_hi;
+
+ so_base_lo = lower_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) {
+ pq_offset = pq_id * 4;
+
+ /* Configure QMAN HBW to scratchpad as it is not needed */
+ WREG32(reg_base + QM_PQC_HBW_BASE_LO_0_OFFSET + pq_offset,
+ lower_32_bits(gaudi2->scratchpad_bus_address));
+ WREG32(reg_base + QM_PQC_HBW_BASE_HI_0_OFFSET + pq_offset,
+ upper_32_bits(gaudi2->scratchpad_bus_address));
+ WREG32(reg_base + QM_PQC_SIZE_0_OFFSET + pq_offset,
+ ilog2(PAGE_SIZE / sizeof(struct hl_cq_entry)));
+
+ WREG32(reg_base + QM_PQC_PI_0_OFFSET + pq_offset, 0);
+ WREG32(reg_base + QM_PQC_LBW_WDATA_0_OFFSET + pq_offset, QM_PQC_LBW_WDATA);
+ WREG32(reg_base + QM_PQC_LBW_BASE_LO_0_OFFSET + pq_offset, so_base_lo);
+ WREG32(reg_base + QM_PQC_LBW_BASE_HI_0_OFFSET + pq_offset, so_base_hi);
+ }
+
+ /* Enable QMAN H/W completion */
+ WREG32(reg_base + QM_PQC_CFG_OFFSET, 1 << PDMA0_QM_PQC_CFG_EN_SHIFT);
+}
+
+static u32 gaudi2_get_dyn_sp_reg(struct hl_device *hdev, u32 queue_id_base)
+{
+ struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+ u32 sp_reg_addr;
+
+ switch (queue_id_base) {
+ case GAUDI2_QUEUE_ID_PDMA_0_0...GAUDI2_QUEUE_ID_PDMA_1_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3:
+ sp_reg_addr = le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl);
+ break;
+ case GAUDI2_QUEUE_ID_DCORE0_MME_0_0...GAUDI2_QUEUE_ID_DCORE0_MME_0_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE1_MME_0_0...GAUDI2_QUEUE_ID_DCORE1_MME_0_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE2_MME_0_0...GAUDI2_QUEUE_ID_DCORE2_MME_0_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE3_MME_0_0...GAUDI2_QUEUE_ID_DCORE3_MME_0_3:
+ sp_reg_addr = le32_to_cpu(dyn_regs->gic_mme_qm_irq_ctrl);
+ break;
+ case GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE0_TPC_6_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE1_TPC_5_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE2_TPC_5_3:
+ fallthrough;
+ case GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE3_TPC_5_3:
+ sp_reg_addr = le32_to_cpu(dyn_regs->gic_tpc_qm_irq_ctrl);
+ break;
+ case GAUDI2_QUEUE_ID_ROT_0_0...GAUDI2_QUEUE_ID_ROT_1_3:
+ sp_reg_addr = le32_to_cpu(dyn_regs->gic_rot_qm_irq_ctrl);
+ break;
+ case GAUDI2_QUEUE_ID_NIC_0_0...GAUDI2_QUEUE_ID_NIC_23_3:
+ sp_reg_addr = le32_to_cpu(dyn_regs->gic_nic_qm_irq_ctrl);
+ break;
+ default:
+ dev_err(hdev->dev, "Unexpected h/w queue %d\n", queue_id_base);
+ return 0;
+ }
+
+ return sp_reg_addr;
+}
+
+static void gaudi2_init_qman_common(struct hl_device *hdev, u32 reg_base,
+ u32 queue_id_base)
+{
+ u32 glbl_prot = QMAN_MAKE_TRUSTED, irq_handler_offset;
+ int map_table_entry;
+
+ WREG32(reg_base + QM_GLBL_PROT_OFFSET, glbl_prot);
+
+ irq_handler_offset = gaudi2_get_dyn_sp_reg(hdev, queue_id_base);
+ WREG32(reg_base + QM_GLBL_ERR_ADDR_LO_OFFSET, lower_32_bits(CFG_BASE + irq_handler_offset));
+ WREG32(reg_base + QM_GLBL_ERR_ADDR_HI_OFFSET, upper_32_bits(CFG_BASE + irq_handler_offset));
+
+ map_table_entry = gaudi2_qman_async_event_id[queue_id_base];
+ WREG32(reg_base + QM_GLBL_ERR_WDATA_OFFSET,
+ gaudi2_irq_map_table[map_table_entry].cpu_id);
+
+ WREG32(reg_base + QM_ARB_ERR_MSG_EN_OFFSET, QM_ARB_ERR_MSG_EN_MASK);
+
+ WREG32(reg_base + QM_ARB_SLV_CHOISE_WDT_OFFSET, GAUDI2_ARB_WDT_TIMEOUT);
+ WREG32(reg_base + QM_GLBL_CFG1_OFFSET, 0);
+ WREG32(reg_base + QM_GLBL_CFG2_OFFSET, 0);
+
+ /* Enable the QMAN channel.
+ * PDMA1 QMAN configuration is different, as we do not allow user to
+ * access CP2/3, it is reserved for the ARC usage.
+ */
+ if (reg_base == gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_1_0])
+ WREG32(reg_base + QM_GLBL_CFG0_OFFSET, PDMA1_QMAN_ENABLE);
+ else
+ WREG32(reg_base + QM_GLBL_CFG0_OFFSET, QMAN_ENABLE);
+}
+
+static void gaudi2_init_qman(struct hl_device *hdev, u32 reg_base,
+ u32 queue_id_base)
+{
+ u32 pq_id;
+
+ for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++)
+ hdev->kernel_queues[queue_id_base + pq_id].cq_id = GAUDI2_RESERVED_CQ_CS_COMPLETION;
+
+ gaudi2_init_qman_pq(hdev, reg_base, queue_id_base);
+ gaudi2_init_qman_cp(hdev, reg_base);
+ gaudi2_init_qman_pqc(hdev, reg_base, queue_id_base);
+ gaudi2_init_qman_common(hdev, reg_base, queue_id_base);
+}
+
+static void gaudi2_init_dma_core(struct hl_device *hdev, u32 reg_base,
+ u32 dma_core_id, bool is_secure)
+{
+ u32 prot, irq_handler_offset;
+ struct cpu_dyn_regs *dyn_regs;
+ int map_table_entry;
+
+ prot = 1 << ARC_FARM_KDMA_PROT_ERR_VAL_SHIFT;
+ if (is_secure)
+ prot |= 1 << ARC_FARM_KDMA_PROT_VAL_SHIFT;
+
+ WREG32(reg_base + DMA_CORE_PROT_OFFSET, prot);
+
+ dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+ irq_handler_offset = le32_to_cpu(dyn_regs->gic_dma_core_irq_ctrl);
+
+ WREG32(reg_base + DMA_CORE_ERRMSG_ADDR_LO_OFFSET,
+ lower_32_bits(CFG_BASE + irq_handler_offset));
+
+ WREG32(reg_base + DMA_CORE_ERRMSG_ADDR_HI_OFFSET,
+ upper_32_bits(CFG_BASE + irq_handler_offset));
+
+ map_table_entry = gaudi2_dma_core_async_event_id[dma_core_id];
+ WREG32(reg_base + DMA_CORE_ERRMSG_WDATA_OFFSET,
+ gaudi2_irq_map_table[map_table_entry].cpu_id);
+
+ /* Enable the DMA channel */
+ WREG32(reg_base + DMA_CORE_CFG_0_OFFSET, 1 << ARC_FARM_KDMA_CFG_0_EN_SHIFT);
+}
+
+static void gaudi2_init_kdma(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base;
+
+ if ((gaudi2->hw_cap_initialized & HW_CAP_KDMA) == HW_CAP_KDMA)
+ return;
+
+ reg_base = gaudi2_dma_core_blocks_bases[DMA_CORE_ID_KDMA];
+
+ gaudi2_init_dma_core(hdev, reg_base, DMA_CORE_ID_KDMA, true);
+
+ gaudi2->hw_cap_initialized |= HW_CAP_KDMA;
+}
+
+static void gaudi2_init_pdma(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base;
+
+ if ((gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK) == HW_CAP_PDMA_MASK)
+ return;
+
+ reg_base = gaudi2_dma_core_blocks_bases[DMA_CORE_ID_PDMA0];
+ gaudi2_init_dma_core(hdev, reg_base, DMA_CORE_ID_PDMA0, false);
+
+ reg_base = gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_0_0];
+ gaudi2_init_qman(hdev, reg_base, GAUDI2_QUEUE_ID_PDMA_0_0);
+
+ reg_base = gaudi2_dma_core_blocks_bases[DMA_CORE_ID_PDMA1];
+ gaudi2_init_dma_core(hdev, reg_base, DMA_CORE_ID_PDMA1, false);
+
+ reg_base = gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_1_0];
+ gaudi2_init_qman(hdev, reg_base, GAUDI2_QUEUE_ID_PDMA_1_0);
+
+ gaudi2->hw_cap_initialized |= HW_CAP_PDMA_MASK;
+}
+
+static void gaudi2_init_edma_instance(struct hl_device *hdev, u8 seq)
+{
+ u32 reg_base, base_edma_core_id, base_edma_qman_id;
+
+ base_edma_core_id = DMA_CORE_ID_EDMA0 + seq;
+ base_edma_qman_id = edma_stream_base[seq];
+
+ reg_base = gaudi2_dma_core_blocks_bases[base_edma_core_id];
+ gaudi2_init_dma_core(hdev, reg_base, base_edma_core_id, false);
+
+ reg_base = gaudi2_qm_blocks_bases[base_edma_qman_id];
+ gaudi2_init_qman(hdev, reg_base, base_edma_qman_id);
+}
+
+static void gaudi2_init_edma(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int dcore, inst;
+
+ if ((gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK) == HW_CAP_EDMA_MASK)
+ return;
+
+ for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
+ for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) {
+ u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst;
+
+ if (!(prop->edma_enabled_mask & BIT(seq)))
+ continue;
+
+ gaudi2_init_edma_instance(hdev, seq);
+
+ gaudi2->hw_cap_initialized |= BIT_ULL(HW_CAP_EDMA_SHIFT + seq);
+ }
+ }
+}
+
+/*
+ * gaudi2_arm_monitors_for_virt_msix_db() - Arm monitors for writing to the virtual MSI-X doorbell.
+ * @hdev: pointer to habanalabs device structure.
+ * @sob_id: sync object ID.
+ * @first_mon_id: ID of first monitor out of 3 consecutive monitors.
+ * @interrupt_id: interrupt ID.
+ *
+ * Some initiators cannot have HBW address in their completion address registers, and thus cannot
+ * write directly to the HBW host memory of the virtual MSI-X doorbell.
+ * Instead, they are configured to LBW write to a sync object, and a monitor will do the HBW write.
+ *
+ * The mechanism in the sync manager block is composed of a master monitor with 3 messages.
+ * In addition to the HBW write, the other 2 messages are for preparing the monitor to next
+ * completion, by decrementing the sync object value and re-arming the monitor.
+ */
+static void gaudi2_arm_monitors_for_virt_msix_db(struct hl_device *hdev, u32 sob_id,
+ u32 first_mon_id, u32 interrupt_id)
+{
+ u32 sob_offset, first_mon_offset, mon_offset, payload, sob_group, mode, arm, config;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u64 addr;
+ u8 mask;
+
+ /* Reset the SOB value */
+ sob_offset = sob_id * sizeof(u32);
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0);
+
+ /* Configure 3 monitors:
+ * 1. Write interrupt ID to the virtual MSI-X doorbell (master monitor)
+ * 2. Decrement SOB value by 1.
+ * 3. Re-arm the master monitor.
+ */
+
+ first_mon_offset = first_mon_id * sizeof(u32);
+
+ /* 2nd monitor: Decrement SOB value by 1 */
+ mon_offset = first_mon_offset + sizeof(u32);
+
+ addr = CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset;
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, lower_32_bits(addr));
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_offset, upper_32_bits(addr));
+
+ payload = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 0x7FFF) | /* "-1" */
+ FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_MASK, 1) |
+ FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1);
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, payload);
+
+ /* 3rd monitor: Re-arm the master monitor */
+ mon_offset = first_mon_offset + 2 * sizeof(u32);
+
+ addr = CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + first_mon_offset;
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, lower_32_bits(addr));
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_offset, upper_32_bits(addr));
+
+ sob_group = sob_id / 8;
+ mask = ~BIT(sob_id & 0x7);
+ mode = 0; /* comparison mode is "greater than or equal to" */
+ arm = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SID_MASK, sob_group) |
+ FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_MASK_MASK, mask) |
+ FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOP_MASK, mode) |
+ FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOD_MASK, 1);
+
+ payload = arm;
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, payload);
+
+ /* 1st monitor (master): Write interrupt ID to the virtual MSI-X doorbell */
+ mon_offset = first_mon_offset;
+
+ config = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_WR_NUM_MASK, 2); /* "2": 3 writes */
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + mon_offset, config);
+
+ addr = gaudi2->virt_msix_db_dma_addr;
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, lower_32_bits(addr));
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_offset, upper_32_bits(addr));
+
+ payload = interrupt_id;
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, payload);
+
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + mon_offset, arm);
+}
+
+static void gaudi2_prepare_sm_for_virt_msix_db(struct hl_device *hdev)
+{
+ u32 decoder_id, sob_id, first_mon_id, interrupt_id;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ /* Decoder normal/abnormal interrupts */
+ for (decoder_id = 0 ; decoder_id < NUMBER_OF_DEC ; ++decoder_id) {
+ if (!(prop->decoder_enabled_mask & BIT(decoder_id)))
+ continue;
+
+ sob_id = GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + decoder_id;
+ first_mon_id = GAUDI2_RESERVED_MON_DEC_NRM_FIRST + 3 * decoder_id;
+ interrupt_id = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + 2 * decoder_id;
+ gaudi2_arm_monitors_for_virt_msix_db(hdev, sob_id, first_mon_id, interrupt_id);
+
+ sob_id = GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + decoder_id;
+ first_mon_id = GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST + 3 * decoder_id;
+ interrupt_id += 1;
+ gaudi2_arm_monitors_for_virt_msix_db(hdev, sob_id, first_mon_id, interrupt_id);
+ }
+}
+
+static void gaudi2_init_sm(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u64 cq_address;
+ u32 reg_val;
+ int i;
+
+ /* Enable HBW/LBW CQ for completion monitors */
+ reg_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_CQ_EN_MASK, 1);
+ reg_val |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_LBW_EN_MASK, 1);
+
+ for (i = 0 ; i < GAUDI2_MAX_PENDING_CS ; i++)
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + (4 * i), reg_val);
+
+ /* Enable only HBW CQ for KDMA completion monitor */
+ reg_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_CQ_EN_MASK, 1);
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + (4 * i), reg_val);
+
+ /* Init CQ0 DB */
+ /* Configure the monitor to trigger MSI-X interrupt */
+ /* TODO:
+ * Remove the if statement when virtual MSI-X doorbell is supported in simulator (SW-93022)
+ * and in F/W (SW-93024).
+ */
+ if (!hdev->pdev || hdev->asic_prop.fw_security_enabled) {
+ u64 msix_db_reg = CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF;
+
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, lower_32_bits(msix_db_reg));
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, upper_32_bits(msix_db_reg));
+ } else {
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0,
+ lower_32_bits(gaudi2->virt_msix_db_dma_addr));
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0,
+ upper_32_bits(gaudi2->virt_msix_db_dma_addr));
+ }
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0, GAUDI2_IRQ_NUM_COMPLETION);
+
+ for (i = 0 ; i < GAUDI2_RESERVED_CQ_NUMBER ; i++) {
+ cq_address =
+ hdev->completion_queue[i].bus_address;
+
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 + (4 * i),
+ lower_32_bits(cq_address));
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 + (4 * i),
+ upper_32_bits(cq_address));
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 + (4 * i),
+ ilog2(HL_CQ_SIZE_IN_BYTES));
+ }
+
+ /* Configure kernel ASID and MMU BP*/
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_SEC, 0x10000);
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV, 0);
+
+ /* Initialize sync objects and monitors which are used for the virtual MSI-X doorbell */
+ gaudi2_prepare_sm_for_virt_msix_db(hdev);
+}
+
+static void gaudi2_init_mme_acc(struct hl_device *hdev, u32 reg_base)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_val;
+ int i;
+
+ reg_val = FIELD_PREP(MME_ACC_INTR_MASK_WBC_ERR_RESP_MASK, 0);
+ reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_SRC_POS_INF_MASK, 1);
+ reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_SRC_NEG_INF_MASK, 1);
+ reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_SRC_NAN_MASK, 1);
+ reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_RESULT_POS_INF_MASK, 1);
+ reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_RESULT_NEG_INF_MASK, 1);
+
+ WREG32(reg_base + MME_ACC_INTR_MASK_OFFSET, reg_val);
+ WREG32(reg_base + MME_ACC_AP_LFSR_POLY_OFFSET, 0x80DEADAF);
+
+ for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++) {
+ WREG32(reg_base + MME_ACC_AP_LFSR_SEED_SEL_OFFSET, i);
+ WREG32(reg_base + MME_ACC_AP_LFSR_SEED_WDATA_OFFSET, gaudi2->lfsr_rand_seeds[i]);
+ }
+}
+
+static void gaudi2_init_dcore_mme(struct hl_device *hdev, int dcore_id,
+ bool config_qman_only)
+{
+ u32 queue_id_base, reg_base, clk_en_addr = 0;
+
+ switch (dcore_id) {
+ case 0:
+ queue_id_base = GAUDI2_QUEUE_ID_DCORE0_MME_0_0;
+ break;
+ case 1:
+ queue_id_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0;
+ clk_en_addr = mmDCORE1_MME_CTRL_LO_QM_SLV_CLK_EN;
+ break;
+ case 2:
+ queue_id_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0;
+ break;
+ case 3:
+ queue_id_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0;
+ clk_en_addr = mmDCORE3_MME_CTRL_LO_QM_SLV_CLK_EN;
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid dcore id %u\n", dcore_id);
+ return;
+ }
+
+ if (clk_en_addr && !(hdev->fw_components & FW_TYPE_BOOT_CPU))
+ WREG32(clk_en_addr, 0x1);
+
+ if (!config_qman_only) {
+ reg_base = gaudi2_mme_acc_blocks_bases[dcore_id];
+ gaudi2_init_mme_acc(hdev, reg_base);
+ }
+
+ reg_base = gaudi2_qm_blocks_bases[queue_id_base];
+ gaudi2_init_qman(hdev, reg_base, queue_id_base);
+}
+
+static void gaudi2_init_mme(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int i;
+
+ if ((gaudi2->hw_cap_initialized & HW_CAP_MME_MASK) == HW_CAP_MME_MASK)
+ return;
+
+ for (i = 0 ; i < NUM_OF_DCORES ; i++) {
+ gaudi2_init_dcore_mme(hdev, i, false);
+
+ gaudi2->hw_cap_initialized |= BIT_ULL(HW_CAP_MME_SHIFT + i);
+ }
+}
+
+static void gaudi2_init_tpc_cfg(struct hl_device *hdev, u32 reg_base)
+{
+ /* Mask arithmetic and QM interrupts in TPC */
+ WREG32(reg_base + TPC_CFG_TPC_INTR_MASK_OFFSET, 0x23FFFE);
+
+ /* Set 16 cache lines */
+ WREG32(reg_base + TPC_CFG_MSS_CONFIG_OFFSET,
+ 2 << DCORE0_TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT);
+}
+
+struct gaudi2_tpc_init_cfg_data {
+ enum gaudi2_queue_id dcore_tpc_qid_base[NUM_OF_DCORES];
+};
+
+static void gaudi2_init_tpc_config(struct hl_device *hdev, int dcore, int inst,
+ u32 offset, void *data)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct gaudi2_tpc_init_cfg_data *cfg_data = data;
+ u32 queue_id_base;
+ u8 seq;
+
+ queue_id_base = cfg_data->dcore_tpc_qid_base[dcore] + (inst * NUM_OF_PQ_PER_QMAN);
+
+ if (dcore == 0 && inst == (NUM_DCORE0_TPC - 1))
+ /* gets last sequence number */
+ seq = NUM_OF_DCORES * NUM_OF_TPC_PER_DCORE;
+ else
+ seq = dcore * NUM_OF_TPC_PER_DCORE + inst;
+
+ gaudi2_init_tpc_cfg(hdev, mmDCORE0_TPC0_CFG_BASE + offset);
+ gaudi2_init_qman(hdev, mmDCORE0_TPC0_QM_BASE + offset, queue_id_base);
+
+ gaudi2->tpc_hw_cap_initialized |= BIT_ULL(HW_CAP_TPC_SHIFT + seq);
+}
+
+static void gaudi2_init_tpc(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct gaudi2_tpc_init_cfg_data init_cfg_data;
+ struct iterate_module_ctx tpc_iter;
+
+ if (!hdev->asic_prop.tpc_enabled_mask)
+ return;
+
+ if ((gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK) == HW_CAP_TPC_MASK)
+ return;
+
+ init_cfg_data.dcore_tpc_qid_base[0] = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0;
+ init_cfg_data.dcore_tpc_qid_base[1] = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0;
+ init_cfg_data.dcore_tpc_qid_base[2] = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0;
+ init_cfg_data.dcore_tpc_qid_base[3] = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0;
+ tpc_iter.fn = &gaudi2_init_tpc_config;
+ tpc_iter.data = &init_cfg_data;
+ gaudi2_iterate_tpcs(hdev, &tpc_iter);
+}
+
+static void gaudi2_init_rotator(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 i, reg_base, queue_id;
+
+ queue_id = GAUDI2_QUEUE_ID_ROT_0_0;
+
+ for (i = 0 ; i < NUM_OF_ROT ; i++, queue_id += NUM_OF_PQ_PER_QMAN) {
+ reg_base = gaudi2_qm_blocks_bases[queue_id];
+ gaudi2_init_qman(hdev, reg_base, queue_id);
+
+ gaudi2->hw_cap_initialized |= BIT_ULL(HW_CAP_ROT_SHIFT + i);
+ }
+}
+
+static void gaudi2_init_vdec_brdg_ctrl(struct hl_device *hdev, u64 base_addr, u32 decoder_id)
+{
+ u32 sob_id;
+
+ /* TODO:
+ * Remove when virtual MSI-X doorbell is supported in simulator (SW-93022) and in F/W
+ * (SW-93024).
+ */
+ if (!hdev->pdev || hdev->asic_prop.fw_security_enabled) {
+ u32 interrupt_id = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + 2 * decoder_id;
+
+ WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_AWADDR, mmPCIE_DBI_MSIX_DOORBELL_OFF);
+ WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_WDATA, interrupt_id);
+ WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR, mmPCIE_DBI_MSIX_DOORBELL_OFF);
+ WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_WDATA, interrupt_id + 1);
+ return;
+ }
+
+ /* VCMD normal interrupt */
+ sob_id = GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + decoder_id;
+ WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_AWADDR,
+ mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_id * sizeof(u32));
+ WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_WDATA, GAUDI2_SOB_INCREMENT_BY_ONE);
+
+ /* VCMD abnormal interrupt */
+ sob_id = GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + decoder_id;
+ WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR,
+ mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_id * sizeof(u32));
+ WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_WDATA, GAUDI2_SOB_INCREMENT_BY_ONE);
+}
+
+static void gaudi2_init_dec(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 dcore_id, dec_id, dec_bit;
+ u64 base_addr;
+
+ if (!hdev->asic_prop.decoder_enabled_mask)
+ return;
+
+ if ((gaudi2->dec_hw_cap_initialized & HW_CAP_DEC_MASK) == HW_CAP_DEC_MASK)
+ return;
+
+ for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++)
+ for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) {
+ dec_bit = dcore_id * NUM_OF_DEC_PER_DCORE + dec_id;
+
+ if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit)))
+ continue;
+
+ base_addr = mmDCORE0_DEC0_CMD_BASE +
+ BRDG_CTRL_BLOCK_OFFSET +
+ dcore_id * DCORE_OFFSET +
+ dec_id * DCORE_VDEC_OFFSET;
+
+ gaudi2_init_vdec_brdg_ctrl(hdev, base_addr, dec_bit);
+
+ gaudi2->dec_hw_cap_initialized |= BIT_ULL(HW_CAP_DEC_SHIFT + dec_bit);
+ }
+
+ for (dec_id = 0 ; dec_id < NUM_OF_PCIE_VDEC ; dec_id++) {
+ dec_bit = PCIE_DEC_SHIFT + dec_id;
+ if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit)))
+ continue;
+
+ base_addr = mmPCIE_DEC0_CMD_BASE + BRDG_CTRL_BLOCK_OFFSET +
+ dec_id * DCORE_VDEC_OFFSET;
+
+ gaudi2_init_vdec_brdg_ctrl(hdev, base_addr, dec_bit);
+
+ gaudi2->dec_hw_cap_initialized |= BIT_ULL(HW_CAP_DEC_SHIFT + dec_bit);
+ }
+}
+
+static void gaudi2_init_msix_gw_table(struct hl_device *hdev)
+{
+ u32 first_reg_offset, last_reg_offset, msix_gw_table_base;
+ u8 first_bit, last_bit;
+ int i;
+
+ msix_gw_table_base = mmPCIE_WRAP_MSIX_GW_TABLE_0;
+ first_reg_offset = (GAUDI2_IRQ_NUM_USER_FIRST >> 5) << 2;
+ first_bit = GAUDI2_IRQ_NUM_USER_FIRST % 32;
+ last_reg_offset = (GAUDI2_IRQ_NUM_USER_LAST >> 5) << 2;
+ last_bit = GAUDI2_IRQ_NUM_USER_LAST % 32;
+
+ if (first_reg_offset == last_reg_offset) {
+ WREG32(msix_gw_table_base + first_reg_offset, GENMASK(last_bit, first_bit));
+ return;
+ }
+
+ WREG32(msix_gw_table_base + first_reg_offset, GENMASK(31, first_bit));
+ WREG32(msix_gw_table_base + last_reg_offset, GENMASK(last_bit, 0));
+
+ for (i = first_reg_offset + 4; i < last_reg_offset ; i += 4)
+ WREG32(msix_gw_table_base + i, 0xFFFFFFFF);
+}
+
+static int gaudi2_mmu_update_asid_hop0_addr(struct hl_device *hdev,
+ u32 stlb_base, u32 asid, u64 phys_addr)
+{
+ u32 status, timeout_usec;
+ int rc;
+
+ if (hdev->pldm || !hdev->pdev)
+ timeout_usec = GAUDI2_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ WREG32(stlb_base + STLB_ASID_OFFSET, asid);
+ WREG32(stlb_base + STLB_HOP0_PA43_12_OFFSET, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
+ WREG32(stlb_base + STLB_HOP0_PA63_44_OFFSET, phys_addr >> MMU_HOP0_PA63_44_SHIFT);
+ WREG32(stlb_base + STLB_BUSY_OFFSET, 0x80000000);
+
+ rc = hl_poll_timeout(
+ hdev,
+ stlb_base + STLB_BUSY_OFFSET,
+ status,
+ !(status & 0x80000000),
+ 1000,
+ timeout_usec);
+
+ if (rc) {
+ dev_err(hdev->dev, "Timeout during MMU hop0 config of asid %d\n", asid);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void gaudi2_mmu_send_invalidate_cache_cmd(struct hl_device *hdev, u32 stlb_base,
+ u32 start_offset, u32 inv_start_val,
+ u32 flags)
+{
+ /* clear PMMU mem line cache (only needed in mmu range invalidation) */
+ if (flags & MMU_OP_CLEAR_MEMCACHE)
+ WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INVALIDATION, 0x1);
+
+ if (flags & MMU_OP_SKIP_LOW_CACHE_INV)
+ return;
+
+ WREG32(stlb_base + start_offset, inv_start_val);
+}
+
+static int gaudi2_mmu_invalidate_cache_status_poll(struct hl_device *hdev, u32 stlb_base,
+ struct gaudi2_cache_invld_params *inv_params)
+{
+ u32 status, timeout_usec, start_offset;
+ int rc;
+
+ timeout_usec = (hdev->pldm) ? GAUDI2_PLDM_MMU_TIMEOUT_USEC :
+ GAUDI2_MMU_CACHE_INV_TIMEOUT_USEC;
+
+ /* poll PMMU mem line cache (only needed in mmu range invalidation) */
+ if (inv_params->flags & MMU_OP_CLEAR_MEMCACHE) {
+ rc = hl_poll_timeout(
+ hdev,
+ mmPMMU_HBW_STLB_MEM_CACHE_INV_STATUS,
+ status,
+ status & 0x1,
+ 1000,
+ timeout_usec);
+
+ if (rc)
+ return rc;
+
+ /* Need to manually reset the status to 0 */
+ WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INV_STATUS, 0x0);
+ }
+
+ /* Lower cache does not work with cache lines, hence we can skip its
+ * invalidation upon map and invalidate only upon unmap
+ */
+ if (inv_params->flags & MMU_OP_SKIP_LOW_CACHE_INV)
+ return 0;
+
+ start_offset = inv_params->range_invalidation ?
+ STLB_RANGE_CACHE_INVALIDATION_OFFSET : STLB_INV_ALL_START_OFFSET;
+
+ rc = hl_poll_timeout(
+ hdev,
+ stlb_base + start_offset,
+ status,
+ !(status & 0x1),
+ 1000,
+ timeout_usec);
+
+ return rc;
+}
+
+bool gaudi2_is_hmmu_enabled(struct hl_device *hdev, int dcore_id, int hmmu_id)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 hw_cap;
+
+ hw_cap = HW_CAP_DCORE0_DMMU0 << (NUM_OF_HMMU_PER_DCORE * dcore_id + hmmu_id);
+
+ if (gaudi2->hw_cap_initialized & hw_cap)
+ return true;
+
+ return false;
+}
+
+/* this function shall be called only for HMMUs for which capability bit is set */
+static inline u32 get_hmmu_stlb_base(int dcore_id, int hmmu_id)
+{
+ u32 offset;
+
+ offset = (u32) (dcore_id * DCORE_OFFSET + hmmu_id * DCORE_HMMU_OFFSET);
+ return (u32)(mmDCORE0_HMMU0_STLB_BASE + offset);
+}
+
+static void gaudi2_mmu_invalidate_cache_trigger(struct hl_device *hdev, u32 stlb_base,
+ struct gaudi2_cache_invld_params *inv_params)
+{
+ u32 start_offset;
+
+ if (inv_params->range_invalidation) {
+ /* Set the addresses range
+ * Note: that the start address we set in register, is not included in
+ * the range of the invalidation, by design.
+ * that's why we need to set lower address than the one we actually
+ * want to be included in the range invalidation.
+ */
+ u64 start = inv_params->start_va - 1;
+
+ start_offset = STLB_RANGE_CACHE_INVALIDATION_OFFSET;
+
+ WREG32(stlb_base + STLB_RANGE_INV_START_LSB_OFFSET,
+ start >> MMU_RANGE_INV_VA_LSB_SHIFT);
+
+ WREG32(stlb_base + STLB_RANGE_INV_START_MSB_OFFSET,
+ start >> MMU_RANGE_INV_VA_MSB_SHIFT);
+
+ WREG32(stlb_base + STLB_RANGE_INV_END_LSB_OFFSET,
+ inv_params->end_va >> MMU_RANGE_INV_VA_LSB_SHIFT);
+
+ WREG32(stlb_base + STLB_RANGE_INV_END_MSB_OFFSET,
+ inv_params->end_va >> MMU_RANGE_INV_VA_MSB_SHIFT);
+ } else {
+ start_offset = STLB_INV_ALL_START_OFFSET;
+ }
+
+ gaudi2_mmu_send_invalidate_cache_cmd(hdev, stlb_base, start_offset,
+ inv_params->inv_start_val, inv_params->flags);
+}
+
+static inline void gaudi2_hmmu_invalidate_cache_trigger(struct hl_device *hdev,
+ int dcore_id, int hmmu_id,
+ struct gaudi2_cache_invld_params *inv_params)
+{
+ u32 stlb_base = get_hmmu_stlb_base(dcore_id, hmmu_id);
+
+ gaudi2_mmu_invalidate_cache_trigger(hdev, stlb_base, inv_params);
+}
+
+static inline int gaudi2_hmmu_invalidate_cache_status_poll(struct hl_device *hdev,
+ int dcore_id, int hmmu_id,
+ struct gaudi2_cache_invld_params *inv_params)
+{
+ u32 stlb_base = get_hmmu_stlb_base(dcore_id, hmmu_id);
+
+ return gaudi2_mmu_invalidate_cache_status_poll(hdev, stlb_base, inv_params);
+}
+
+static int gaudi2_hmmus_invalidate_cache(struct hl_device *hdev,
+ struct gaudi2_cache_invld_params *inv_params)
+{
+ int dcore_id, hmmu_id;
+
+ /* first send all invalidation commands */
+ for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
+ for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) {
+ if (!gaudi2_is_hmmu_enabled(hdev, dcore_id, hmmu_id))
+ continue;
+
+ gaudi2_hmmu_invalidate_cache_trigger(hdev, dcore_id, hmmu_id, inv_params);
+ }
+ }
+
+ /* next, poll all invalidations status */
+ for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
+ for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) {
+ int rc;
+
+ if (!gaudi2_is_hmmu_enabled(hdev, dcore_id, hmmu_id))
+ continue;
+
+ rc = gaudi2_hmmu_invalidate_cache_status_poll(hdev, dcore_id, hmmu_id,
+ inv_params);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int gaudi2_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct gaudi2_cache_invld_params invld_params;
+ int rc = 0;
+
+ if (hdev->reset_info.hard_reset_pending)
+ return rc;
+
+ invld_params.range_invalidation = false;
+ invld_params.inv_start_val = 1;
+
+ if ((flags & MMU_OP_USERPTR) && (gaudi2->hw_cap_initialized & HW_CAP_PMMU)) {
+ invld_params.flags = flags;
+ gaudi2_mmu_invalidate_cache_trigger(hdev, mmPMMU_HBW_STLB_BASE, &invld_params);
+ rc = gaudi2_mmu_invalidate_cache_status_poll(hdev, mmPMMU_HBW_STLB_BASE,
+ &invld_params);
+ } else if (flags & MMU_OP_PHYS_PACK) {
+ invld_params.flags = 0;
+ rc = gaudi2_hmmus_invalidate_cache(hdev, &invld_params);
+ }
+
+ return rc;
+}
+
+static int gaudi2_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
+ u32 flags, u32 asid, u64 va, u64 size)
+{
+ struct gaudi2_cache_invld_params invld_params = {0};
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u64 start_va, end_va;
+ u32 inv_start_val;
+ int rc = 0;
+
+ if (hdev->reset_info.hard_reset_pending)
+ return 0;
+
+ inv_start_val = (1 << MMU_RANGE_INV_EN_SHIFT |
+ 1 << MMU_RANGE_INV_ASID_EN_SHIFT |
+ asid << MMU_RANGE_INV_ASID_SHIFT);
+ start_va = va;
+ end_va = start_va + size;
+
+ if ((flags & MMU_OP_USERPTR) && (gaudi2->hw_cap_initialized & HW_CAP_PMMU)) {
+ /* As range invalidation does not support zero address we will
+ * do full invalidation in this case
+ */
+ if (start_va) {
+ invld_params.range_invalidation = true;
+ invld_params.start_va = start_va;
+ invld_params.end_va = end_va;
+ invld_params.inv_start_val = inv_start_val;
+ invld_params.flags = flags | MMU_OP_CLEAR_MEMCACHE;
+ } else {
+ invld_params.range_invalidation = false;
+ invld_params.inv_start_val = 1;
+ invld_params.flags = flags;
+ }
+
+
+ gaudi2_mmu_invalidate_cache_trigger(hdev, mmPMMU_HBW_STLB_BASE, &invld_params);
+ rc = gaudi2_mmu_invalidate_cache_status_poll(hdev, mmPMMU_HBW_STLB_BASE,
+ &invld_params);
+ if (rc)
+ return rc;
+
+ } else if (flags & MMU_OP_PHYS_PACK) {
+ invld_params.start_va = gaudi2_mmu_scramble_addr(hdev, start_va);
+ invld_params.end_va = gaudi2_mmu_scramble_addr(hdev, end_va);
+ invld_params.inv_start_val = inv_start_val;
+ invld_params.flags = flags;
+ rc = gaudi2_hmmus_invalidate_cache(hdev, &invld_params);
+ }
+
+ return rc;
+}
+
+static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 hop0_addr;
+ u32 asid, max_asid = prop->max_asid;
+ int rc;
+
+ /* it takes too much time to init all of the ASIDs on palladium */
+ if (hdev->pldm)
+ max_asid = min((u32) 8, max_asid);
+
+ for (asid = 0 ; asid < max_asid ; asid++) {
+ hop0_addr = hdev->mmu_priv.hr.mmu_asid_hop0[asid].phys_addr;
+ rc = gaudi2_mmu_update_asid_hop0_addr(hdev, stlb_base, asid, hop0_addr);
+ if (rc) {
+ dev_err(hdev->dev, "failed to set hop0 addr for asid %d\n", asid);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base,
+ u32 stlb_base)
+{
+ u32 status, timeout_usec;
+ int rc;
+
+ if (hdev->pldm || !hdev->pdev)
+ timeout_usec = GAUDI2_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = GAUDI2_MMU_CACHE_INV_TIMEOUT_USEC;
+
+ WREG32(stlb_base + STLB_INV_ALL_START_OFFSET, 1);
+
+ rc = hl_poll_timeout(
+ hdev,
+ stlb_base + STLB_SRAM_INIT_OFFSET,
+ status,
+ !status,
+ 1000,
+ timeout_usec);
+
+ if (rc)
+ dev_notice_ratelimited(hdev->dev, "Timeout when waiting for MMU SRAM init\n");
+
+ rc = gaudi2_mmu_update_hop0_addr(hdev, stlb_base);
+ if (rc)
+ return rc;
+
+ WREG32(mmu_base + MMU_BYPASS_OFFSET, 0);
+ WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, 0xF);
+
+ rc = hl_poll_timeout(
+ hdev,
+ stlb_base + STLB_INV_ALL_START_OFFSET,
+ status,
+ !status,
+ 1000,
+ timeout_usec);
+
+ if (rc)
+ dev_notice_ratelimited(hdev->dev, "Timeout when waiting for MMU invalidate all\n");
+
+ WREG32(mmu_base + MMU_ENABLE_OFFSET, 1);
+
+ return rc;
+}
+
+static int gaudi2_pci_mmu_init(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 mmu_base, stlb_base;
+ int rc;
+
+ if (gaudi2->hw_cap_initialized & HW_CAP_PMMU)
+ return 0;
+
+ mmu_base = mmPMMU_HBW_MMU_BASE;
+ stlb_base = mmPMMU_HBW_STLB_BASE;
+
+ RMWREG32(stlb_base + STLB_HOP_CONFIGURATION_OFFSET,
+ (0 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT) |
+ (5 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_SHIFT) |
+ (4 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_SHIFT) |
+ (5 << PMMU_HBW_STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT) |
+ (5 << PMMU_HBW_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_SHIFT),
+ PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK |
+ PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK |
+ PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK |
+ PMMU_HBW_STLB_HOP_CONFIGURATION_LAST_HOP_MASK |
+ PMMU_HBW_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK);
+
+ WREG32(stlb_base + STLB_LL_LOOKUP_MASK_63_32_OFFSET, 0);
+
+ if (PAGE_SIZE == SZ_64K) {
+ /* Set page sizes to 64K on hop5 and 16M on hop4 + enable 8 bit hops */
+ RMWREG32(mmu_base + MMU_STATIC_MULTI_PAGE_SIZE_OFFSET,
+ FIELD_PREP(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP5_PAGE_SIZE_MASK, 4) |
+ FIELD_PREP(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK, 3) |
+ FIELD_PREP(
+ DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK,
+ 1),
+ DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP5_PAGE_SIZE_MASK |
+ DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK |
+ DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK);
+ }
+
+ rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
+ if (rc)
+ return rc;
+
+ gaudi2->hw_cap_initialized |= HW_CAP_PMMU;
+
+ return 0;
+}
+
+static int gaudi2_dcore_hmmu_init(struct hl_device *hdev, int dcore_id,
+ int hmmu_id)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 offset, mmu_base, stlb_base, hw_cap;
+ u8 dmmu_seq;
+ int rc;
+
+ dmmu_seq = NUM_OF_HMMU_PER_DCORE * dcore_id + hmmu_id;
+ hw_cap = HW_CAP_DCORE0_DMMU0 << dmmu_seq;
+
+ /*
+ * return if DMMU is already initialized or if it's not out of
+ * isolation (due to cluster binning)
+ */
+ if ((gaudi2->hw_cap_initialized & hw_cap) || !(prop->hmmu_hif_enabled_mask & BIT(dmmu_seq)))
+ return 0;
+
+ offset = (u32) (dcore_id * DCORE_OFFSET + hmmu_id * DCORE_HMMU_OFFSET);
+ mmu_base = mmDCORE0_HMMU0_MMU_BASE + offset;
+ stlb_base = mmDCORE0_HMMU0_STLB_BASE + offset;
+
+ RMWREG32(mmu_base + MMU_STATIC_MULTI_PAGE_SIZE_OFFSET, 5 /* 64MB */,
+ MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK);
+
+ RMWREG32(stlb_base + STLB_HOP_CONFIGURATION_OFFSET,
+ FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK, 0) |
+ FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK, 3) |
+ FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK, 3) |
+ FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LAST_HOP_MASK, 3) |
+ FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK, 3),
+ DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK |
+ DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK |
+ DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK |
+ DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LAST_HOP_MASK |
+ DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK);
+
+ RMWREG32(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, 1,
+ STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK);
+
+ rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
+ if (rc)
+ return rc;
+
+ gaudi2->hw_cap_initialized |= hw_cap;
+
+ return 0;
+}
+
+static int gaudi2_hbm_mmu_init(struct hl_device *hdev)
+{
+ int rc, dcore_id, hmmu_id;
+
+ for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++)
+ for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE; hmmu_id++) {
+ rc = gaudi2_dcore_hmmu_init(hdev, dcore_id, hmmu_id);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int gaudi2_mmu_init(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = gaudi2_pci_mmu_init(hdev);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_hbm_mmu_init(hdev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int gaudi2_hw_init(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int rc;
+
+ /* Let's mark in the H/W that we have reached this point. We check
+ * this value in the reset_before_init function to understand whether
+ * we need to reset the chip before doing H/W init. This register is
+ * cleared by the H/W upon H/W reset
+ */
+ WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
+
+ /* Perform read from the device to make sure device is up */
+ RREG32(mmHW_STATE);
+
+ /* If iATU is done by FW, the HBM bar ALWAYS points to DRAM_PHYS_BASE.
+ * So we set it here and if anyone tries to move it later to
+ * a different address, there will be an error
+ */
+ if (hdev->asic_prop.iatu_done_by_fw)
+ gaudi2->dram_bar_cur_addr = DRAM_PHYS_BASE;
+
+ /*
+ * Before pushing u-boot/linux to device, need to set the hbm bar to
+ * base address of dram
+ */
+ if (gaudi2_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
+ dev_err(hdev->dev, "failed to map HBM bar to DRAM base address\n");
+ return -EIO;
+ }
+
+ rc = gaudi2_init_cpu(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU\n");
+ return rc;
+ }
+
+ gaudi2_init_msix_gw_table(hdev);
+
+ gaudi2_init_scrambler_hbm(hdev);
+ gaudi2_init_kdma(hdev);
+
+ rc = gaudi2_init_cpu_queues(hdev, GAUDI2_CPU_TIMEOUT_USEC);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n", rc);
+ return rc;
+ }
+
+ rc = gaudi2->cpucp_info_get(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get cpucp info\n");
+ return rc;
+ }
+
+ rc = gaudi2_mmu_init(hdev);
+ if (rc)
+ return rc;
+
+ gaudi2_init_pdma(hdev);
+ gaudi2_init_edma(hdev);
+ gaudi2_init_sm(hdev);
+ gaudi2_init_tpc(hdev);
+ gaudi2_init_mme(hdev);
+ gaudi2_init_rotator(hdev);
+ gaudi2_init_dec(hdev);
+ gaudi2_enable_timestamp(hdev);
+
+ rc = gaudi2_coresight_init(hdev);
+ if (rc)
+ goto disable_queues;
+
+ rc = gaudi2_enable_msix(hdev);
+ if (rc)
+ goto disable_queues;
+
+ /* Perform read from the device to flush all configuration */
+ RREG32(mmHW_STATE);
+
+ return 0;
+
+disable_queues:
+ gaudi2_disable_dma_qmans(hdev);
+ gaudi2_disable_mme_qmans(hdev);
+ gaudi2_disable_tpc_qmans(hdev);
+ gaudi2_disable_rot_qmans(hdev);
+ gaudi2_disable_nic_qmans(hdev);
+
+ gaudi2_disable_timestamp(hdev);
+
+ return rc;
+}
+
+/**
+ * gaudi2_send_hard_reset_cmd - common function to handle reset
+ *
+ * @hdev: pointer to the habanalabs device structure
+ *
+ * This function handles the various possible scenarios for reset.
+ * It considers if reset is handled by driver\FW and what FW components are loaded
+ */
+static void gaudi2_send_hard_reset_cmd(struct hl_device *hdev)
+{
+ struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+ bool heartbeat_reset, preboot_only, cpu_initialized = false;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 cpu_boot_status;
+
+ preboot_only = (hdev->fw_loader.fw_comp_loaded == FW_TYPE_PREBOOT_CPU);
+ heartbeat_reset = (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT);
+
+ /*
+ * Handle corner case where failure was at cpu management app load,
+ * and driver didn't detect any failure while loading the FW,
+ * then at such scenario driver will send only HALT_MACHINE
+ * and no one will respond to this request since FW already back to preboot
+ * and it cannot handle such cmd.
+ * In this case next time the management app loads it'll check on events register
+ * which will still have the halt indication, and will reboot the device.
+ * The solution is to let preboot clear all relevant registers before next boot
+ * once driver send COMMS_RST_DEV.
+ */
+ cpu_boot_status = RREG32(mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS);
+
+ if (gaudi2 && (gaudi2->hw_cap_initialized & HW_CAP_CPU) &&
+ (cpu_boot_status == CPU_BOOT_STATUS_SRAM_AVAIL))
+ cpu_initialized = true;
+
+ /*
+ * when Linux/Bootfit exist this write to the SP can be interpreted in 2 ways:
+ * 1. FW reset: FW initiate the reset sequence
+ * 2. driver reset: FW will start HALT sequence (the preparations for the
+ * reset but not the reset itself as it is not implemented
+ * on their part) and LKD will wait to let FW complete the
+ * sequence before issuing the reset
+ */
+ if (!preboot_only && cpu_initialized) {
+ WREG32(le32_to_cpu(dyn_regs->gic_host_halt_irq),
+ gaudi2_irq_map_table[GAUDI2_EVENT_CPU_HALT_MACHINE].cpu_id);
+
+ msleep(GAUDI2_CPU_RESET_WAIT_MSEC);
+ }
+
+ /*
+ * When working with preboot (without Linux/Boot fit) we can
+ * communicate only using the COMMS commands to issue halt/reset.
+ *
+ * For the case in which we are working with Linux/Bootfit this is a hail-mary
+ * attempt to revive the card in the small chance that the f/w has
+ * experienced a watchdog event, which caused it to return back to preboot.
+ * In that case, triggering reset through GIC won't help. We need to
+ * trigger the reset as if Linux wasn't loaded.
+ *
+ * We do it only if the reset cause was HB, because that would be the
+ * indication of such an event.
+ *
+ * In case watchdog hasn't expired but we still got HB, then this won't
+ * do any damage.
+ */
+
+ if (heartbeat_reset || preboot_only || !cpu_initialized) {
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ hl_fw_ask_hard_reset_without_linux(hdev);
+ else
+ hl_fw_ask_halt_machine_without_linux(hdev);
+ }
+}
+
+/**
+ * gaudi2_execute_hard_reset - execute hard reset by driver/FW
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @reset_sleep_ms: sleep time in msec after reset
+ *
+ * This function executes hard reset based on if driver/FW should do the reset
+ */
+static void gaudi2_execute_hard_reset(struct hl_device *hdev, u32 reset_sleep_ms)
+{
+ if (hdev->asic_prop.hard_reset_done_by_fw) {
+ gaudi2_send_hard_reset_cmd(hdev);
+ return;
+ }
+
+ /* Set device to handle FLR by H/W as we will put the device
+ * CPU to halt mode
+ */
+ WREG32(mmPCIE_AUX_FLR_CTRL,
+ (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK | PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
+
+ gaudi2_send_hard_reset_cmd(hdev);
+
+ WREG32(mmPSOC_RESET_CONF_SW_ALL_RST, 1);
+}
+
+/**
+ * gaudi2_execute_soft_reset - execute soft reset by driver/FW
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @reset_sleep_ms: sleep time in msec after reset
+ * @driver_performs_reset: true if driver should perform reset instead of f/w.
+ *
+ * This function executes soft reset based on if driver/FW should do the reset
+ */
+static void gaudi2_execute_soft_reset(struct hl_device *hdev, u32 reset_sleep_ms,
+ bool driver_performs_reset)
+{
+ struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+
+ if (!driver_performs_reset) {
+ /* set SP to indicate reset request sent to FW */
+ WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
+
+ WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq),
+ gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
+ return;
+ }
+
+ /* Block access to engines, QMANs and SM during reset, these
+ * RRs will be reconfigured after soft reset.
+ * PCIE_MSIX is left unsecured to allow NIC packets processing during the reset.
+ */
+ gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_LONG, NUM_LONG_LBW_RR - 1,
+ mmDCORE0_TPC0_QM_DCCM_BASE, mmPCIE_MSIX_BASE);
+
+ gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_LONG, NUM_LONG_LBW_RR - 2,
+ mmPCIE_MSIX_BASE + HL_BLOCK_SIZE,
+ mmPCIE_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE + HL_BLOCK_SIZE);
+
+ WREG32(mmPSOC_RESET_CONF_SOFT_RST, 1);
+}
+
+static void gaudi2_poll_btm_indication(struct hl_device *hdev, u32 reset_sleep_ms,
+ u32 poll_timeout_us)
+{
+ int i, rc = 0;
+ u32 reg_val;
+
+ /* without this sleep reset will not work */
+ msleep(reset_sleep_ms);
+
+ /* We poll the BTM done indication multiple times after reset due to
+ * a HW errata 'GAUDI2_0300'
+ */
+ for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
+ rc = hl_poll_timeout(
+ hdev,
+ mmPSOC_GLOBAL_CONF_BTM_FSM,
+ reg_val,
+ reg_val == 0,
+ 1000,
+ poll_timeout_us);
+
+ if (rc)
+ dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", reg_val);
+}
+
+static void gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_timeout_us)
+{
+ int i, rc = 0;
+ u32 reg_val;
+
+ for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
+ rc = hl_poll_timeout(
+ hdev,
+ mmCPU_RST_STATUS_TO_HOST,
+ reg_val,
+ reg_val == CPU_RST_STATUS_SOFT_RST_DONE,
+ 1000,
+ poll_timeout_us);
+
+ if (rc)
+ dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n",
+ reg_val);
+}
+
+static void gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 poll_timeout_us, reset_sleep_ms;
+ bool driver_performs_reset = false;
+
+ if (hdev->pldm) {
+ reset_sleep_ms = hard_reset ? GAUDI2_PLDM_HRESET_TIMEOUT_MSEC :
+ GAUDI2_PLDM_SRESET_TIMEOUT_MSEC;
+ poll_timeout_us = GAUDI2_PLDM_RESET_POLL_TIMEOUT_USEC;
+ } else {
+ reset_sleep_ms = GAUDI2_RESET_TIMEOUT_MSEC;
+ poll_timeout_us = GAUDI2_RESET_POLL_TIMEOUT_USEC;
+ }
+
+ if (fw_reset)
+ goto skip_reset;
+
+ gaudi2_reset_arcs(hdev);
+
+ if (hard_reset) {
+ driver_performs_reset = !hdev->asic_prop.hard_reset_done_by_fw;
+ gaudi2_execute_hard_reset(hdev, reset_sleep_ms);
+ } else {
+ /*
+ * As we have to support also work with preboot only (which does not supports
+ * soft reset) we have to make sure that security is disabled before letting driver
+ * do the reset. user shall control the BFE flags to avoid asking soft reset in
+ * secured device with preboot only.
+ */
+ driver_performs_reset = (hdev->fw_components == FW_TYPE_PREBOOT_CPU &&
+ !hdev->asic_prop.fw_security_enabled);
+ gaudi2_execute_soft_reset(hdev, reset_sleep_ms, driver_performs_reset);
+ }
+
+skip_reset:
+ if (driver_performs_reset || hard_reset)
+ gaudi2_poll_btm_indication(hdev, reset_sleep_ms, poll_timeout_us);
+ else
+ gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
+
+ if (!gaudi2)
+ return;
+
+ gaudi2->dec_hw_cap_initialized &= ~(HW_CAP_DEC_MASK);
+ gaudi2->tpc_hw_cap_initialized &= ~(HW_CAP_TPC_MASK);
+
+ /*
+ * Clear NIC capability mask in order for driver to re-configure
+ * NIC QMANs. NIC ports will not be re-configured during soft
+ * reset as we call gaudi2_nic_init only during hard reset
+ */
+ gaudi2->nic_hw_cap_initialized &= ~(HW_CAP_NIC_MASK);
+
+ if (hard_reset) {
+ gaudi2->hw_cap_initialized &=
+ ~(HW_CAP_DRAM | HW_CAP_CLK_GATE | HW_CAP_HBM_SCRAMBLER_MASK |
+ HW_CAP_PMMU | HW_CAP_CPU | HW_CAP_CPU_Q |
+ HW_CAP_SRAM_SCRAMBLER | HW_CAP_DMMU_MASK |
+ HW_CAP_PDMA_MASK | HW_CAP_EDMA_MASK | HW_CAP_KDMA |
+ HW_CAP_MME_MASK | HW_CAP_ROT_MASK);
+
+ memset(gaudi2->events_stat, 0, sizeof(gaudi2->events_stat));
+ } else {
+ gaudi2->hw_cap_initialized &=
+ ~(HW_CAP_CLK_GATE | HW_CAP_HBM_SCRAMBLER_SW_RESET |
+ HW_CAP_PDMA_MASK | HW_CAP_EDMA_MASK | HW_CAP_MME_MASK |
+ HW_CAP_ROT_MASK);
+ }
+}
+
+static int gaudi2_suspend(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
+ if (rc)
+ dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
+
+ return rc;
+}
+
+static int gaudi2_resume(struct hl_device *hdev)
+{
+ return gaudi2_init_iatu(hdev);
+}
+
+static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ int rc;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE;
+
+#ifdef _HAS_DMA_MMAP_COHERENT
+
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ if (rc)
+ dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
+
+#else
+
+ rc = remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(cpu_addr) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+#endif
+
+ return rc;
+}
+
+static bool gaudi2_is_queue_enabled(struct hl_device *hdev, u32 hw_queue_id)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u64 hw_cap_mask = 0;
+ u64 hw_tpc_cap_bit = 0;
+ u64 hw_nic_cap_bit = 0;
+ u64 hw_test_cap_bit = 0;
+
+ switch (hw_queue_id) {
+ case GAUDI2_QUEUE_ID_PDMA_0_0 ... GAUDI2_QUEUE_ID_PDMA_1_1:
+ hw_cap_mask = HW_CAP_PDMA_MASK;
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3:
+ hw_test_cap_bit = HW_CAP_EDMA_SHIFT +
+ ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0) >> 2);
+ break;
+ case GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3:
+ hw_test_cap_bit = HW_CAP_EDMA_SHIFT + NUM_OF_EDMA_PER_DCORE +
+ ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0) >> 2);
+ break;
+ case GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3:
+ hw_test_cap_bit = HW_CAP_EDMA_SHIFT + 2 * NUM_OF_EDMA_PER_DCORE +
+ ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0) >> 2);
+ break;
+ case GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3:
+ hw_test_cap_bit = HW_CAP_EDMA_SHIFT + 3 * NUM_OF_EDMA_PER_DCORE +
+ ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0) >> 2);
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE0_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE0_MME_0_3:
+ hw_test_cap_bit = HW_CAP_MME_SHIFT;
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE1_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE1_MME_0_3:
+ hw_test_cap_bit = HW_CAP_MME_SHIFT + 1;
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE2_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE2_MME_0_3:
+ hw_test_cap_bit = HW_CAP_MME_SHIFT + 2;
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE3_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE3_MME_0_3:
+ hw_test_cap_bit = HW_CAP_MME_SHIFT + 3;
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE0_TPC_5_3:
+ hw_tpc_cap_bit = HW_CAP_TPC_SHIFT +
+ ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE0_TPC_0_0) >> 2);
+
+ /* special case where cap bit refers to the first queue id */
+ if (!hw_tpc_cap_bit)
+ return !!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(0));
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE1_TPC_5_3:
+ hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + NUM_OF_TPC_PER_DCORE +
+ ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE1_TPC_0_0) >> 2);
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE2_TPC_5_3:
+ hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + (2 * NUM_OF_TPC_PER_DCORE) +
+ ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE2_TPC_0_0) >> 2);
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE3_TPC_5_3:
+ hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + (3 * NUM_OF_TPC_PER_DCORE) +
+ ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE3_TPC_0_0) >> 2);
+ break;
+
+ case GAUDI2_QUEUE_ID_DCORE0_TPC_6_0 ... GAUDI2_QUEUE_ID_DCORE0_TPC_6_3:
+ hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + (4 * NUM_OF_TPC_PER_DCORE);
+ break;
+
+ case GAUDI2_QUEUE_ID_ROT_0_0 ... GAUDI2_QUEUE_ID_ROT_1_3:
+ hw_test_cap_bit = HW_CAP_ROT_SHIFT + ((hw_queue_id - GAUDI2_QUEUE_ID_ROT_0_0) >> 2);
+ break;
+
+ case GAUDI2_QUEUE_ID_NIC_0_0 ... GAUDI2_QUEUE_ID_NIC_23_3:
+ hw_nic_cap_bit = HW_CAP_NIC_SHIFT + ((hw_queue_id - GAUDI2_QUEUE_ID_NIC_0_0) >> 2);
+
+ /* special case where cap bit refers to the first queue id */
+ if (!hw_nic_cap_bit)
+ return !!(gaudi2->nic_hw_cap_initialized & BIT_ULL(0));
+ break;
+
+ case GAUDI2_QUEUE_ID_CPU_PQ:
+ return !!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q);
+
+ default:
+ return false;
+ }
+
+ if (hw_tpc_cap_bit)
+ return !!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(hw_tpc_cap_bit));
+
+ if (hw_nic_cap_bit)
+ return !!(gaudi2->nic_hw_cap_initialized & BIT_ULL(hw_nic_cap_bit));
+
+ if (hw_test_cap_bit)
+ hw_cap_mask = BIT_ULL(hw_test_cap_bit);
+
+ return !!(gaudi2->hw_cap_initialized & hw_cap_mask);
+}
+
+static bool gaudi2_is_arc_enabled(struct hl_device *hdev, u64 arc_id)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ switch (arc_id) {
+ case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC5:
+ case CPU_ID_MME_QMAN_ARC0...CPU_ID_ROT_QMAN_ARC1:
+ return !!(gaudi2->active_hw_arc & BIT_ULL(arc_id));
+
+ case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24:
+ return !!(gaudi2->active_tpc_arc & BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0));
+
+ case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23:
+ return !!(gaudi2->active_nic_arc & BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0));
+
+ default:
+ return false;
+ }
+}
+
+static void gaudi2_clr_arc_id_cap(struct hl_device *hdev, u64 arc_id)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ switch (arc_id) {
+ case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC5:
+ case CPU_ID_MME_QMAN_ARC0...CPU_ID_ROT_QMAN_ARC1:
+ gaudi2->active_hw_arc &= ~(BIT_ULL(arc_id));
+ break;
+
+ case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24:
+ gaudi2->active_tpc_arc &= ~(BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0));
+ break;
+
+ case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23:
+ gaudi2->active_nic_arc &= ~(BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0));
+ break;
+
+ default:
+ return;
+ }
+}
+
+static void gaudi2_set_arc_id_cap(struct hl_device *hdev, u64 arc_id)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ switch (arc_id) {
+ case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC5:
+ case CPU_ID_MME_QMAN_ARC0...CPU_ID_ROT_QMAN_ARC1:
+ gaudi2->active_hw_arc |= BIT_ULL(arc_id);
+ break;
+
+ case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24:
+ gaudi2->active_tpc_arc |= BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0);
+ break;
+
+ case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23:
+ gaudi2->active_nic_arc |= BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0);
+ break;
+
+ default:
+ return;
+ }
+}
+
+static void gaudi2_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
+{
+ struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+ u32 pq_offset, reg_base, db_reg_offset, db_value;
+
+ if (hw_queue_id != GAUDI2_QUEUE_ID_CPU_PQ) {
+ /*
+ * QMAN has 4 successive PQ_PI registers, 1 for each of the QMAN PQs.
+ * Masking the H/W queue ID with 0x3 extracts the QMAN internal PQ
+ * number.
+ */
+ pq_offset = (hw_queue_id & 0x3) * 4;
+ reg_base = gaudi2_qm_blocks_bases[hw_queue_id];
+ db_reg_offset = reg_base + QM_PQ_PI_0_OFFSET + pq_offset;
+ } else {
+ db_reg_offset = mmCPU_IF_PF_PQ_PI;
+ }
+
+ db_value = pi;
+
+ /* ring the doorbell */
+ WREG32(db_reg_offset, db_value);
+
+ if (hw_queue_id == GAUDI2_QUEUE_ID_CPU_PQ) {
+ /* make sure device CPU will read latest data from host */
+ mb();
+ WREG32(le32_to_cpu(dyn_regs->gic_host_pi_upd_irq),
+ gaudi2_irq_map_table[GAUDI2_EVENT_CPU_PI_UPDATE].cpu_id);
+ }
+}
+
+static void gaudi2_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
+{
+ __le64 *pbd = (__le64 *) bd;
+
+ /* The QMANs are on the host memory so a simple copy suffice */
+ pqe[0] = pbd[0];
+ pqe[1] = pbd[1];
+}
+
+static void *gaudi2_dma_alloc_coherent(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
+{
+ return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
+}
+
+static void gaudi2_dma_free_coherent(struct hl_device *hdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
+}
+
+static int gaudi2_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
+ u32 timeout, u64 *result)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) {
+ if (result)
+ *result = 0;
+ return 0;
+ }
+
+ if (!timeout)
+ timeout = GAUDI2_MSG_TO_CPU_TIMEOUT_USEC;
+
+ return hl_fw_send_cpu_message(hdev, GAUDI2_QUEUE_ID_CPU_PQ, msg, len, timeout, result);
+}
+
+static void *gaudi2_dma_pool_zalloc(struct hl_device *hdev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma_handle)
+{
+ if (size > GAUDI2_DMA_POOL_BLK_SIZE)
+ return NULL;
+
+ return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
+}
+
+static void gaudi2_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr)
+{
+ dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
+}
+
+static void *gaudi2_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+}
+
+static void gaudi2_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
+{
+ hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
+}
+
+static dma_addr_t gaudi2_dma_map_single(struct hl_device *hdev, void *addr, int len,
+ enum dma_data_direction dir)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(&hdev->pdev->dev, addr, len, dir);
+ if (unlikely(dma_mapping_error(&hdev->pdev->dev, dma_addr)))
+ return 0;
+
+ return dma_addr;
+}
+
+static void gaudi2_dma_unmap_single(struct hl_device *hdev, dma_addr_t addr, int len,
+ enum dma_data_direction dir)
+{
+ dma_unmap_single(&hdev->pdev->dev, addr, len, dir);
+}
+
+static int gaudi2_validate_cb_address(struct hl_device *hdev, struct hl_cs_parser *parser)
+{
+ struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (!gaudi2_is_queue_enabled(hdev, parser->hw_queue_id)) {
+ dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
+ return -EINVAL;
+ }
+
+ /* Just check if CB address is valid */
+
+ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->sram_user_base_address,
+ asic_prop->sram_end_address))
+ return 0;
+
+ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->dram_user_base_address,
+ asic_prop->dram_end_address))
+ return 0;
+
+ if ((gaudi2->hw_cap_initialized & HW_CAP_DMMU_MASK) &&
+ hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->dmmu.start_addr,
+ asic_prop->dmmu.end_addr))
+ return 0;
+
+ if (gaudi2->hw_cap_initialized & HW_CAP_PMMU) {
+ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->pmmu.start_addr,
+ asic_prop->pmmu.end_addr) ||
+ hl_mem_area_inside_range(
+ (u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->pmmu_huge.start_addr,
+ asic_prop->pmmu_huge.end_addr))
+ return 0;
+
+ } else if (gaudi2_host_phys_addr_valid((u64) (uintptr_t) parser->user_cb)) {
+ if (!hdev->pdev)
+ return 0;
+
+ if (!device_iommu_mapped(&hdev->pdev->dev))
+ return 0;
+ }
+
+ dev_err(hdev->dev, "CB address %p + 0x%x for internal QMAN is not valid\n",
+ parser->user_cb, parser->user_cb_size);
+
+ return -EFAULT;
+}
+
+static int gaudi2_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (!parser->is_kernel_allocated_cb)
+ return gaudi2_validate_cb_address(hdev, parser);
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU)) {
+ dev_err(hdev->dev, "PMMU not initialized - Unsupported mode in Gaudi2\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gaudi2_send_heartbeat(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_send_heartbeat(hdev);
+}
+
+/* This is an internal helper function, used to update the KDMA mmu props.
+ * Should be called with a proper kdma lock.
+ */
+static void gaudi2_kdma_set_mmbp_asid(struct hl_device *hdev,
+ bool mmu_bypass, u32 asid)
+{
+ u32 rw_asid, rw_mmu_bp;
+
+ rw_asid = (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT) |
+ (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT);
+
+ rw_mmu_bp = (!!mmu_bypass << ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_SHIFT) |
+ (!!mmu_bypass << ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_SHIFT);
+
+ WREG32(mmARC_FARM_KDMA_CTX_AXUSER_HB_ASID, rw_asid);
+ WREG32(mmARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP, rw_mmu_bp);
+}
+
+static void gaudi2_arm_cq_monitor(struct hl_device *hdev, u32 sob_id, u32 mon_id, u32 cq_id,
+ u32 mon_payload, u32 sync_value)
+{
+ u32 sob_offset, mon_offset, sync_group_id, mode, mon_arm;
+ u8 mask;
+
+ sob_offset = sob_id * 4;
+ mon_offset = mon_id * 4;
+
+ /* Reset the SOB value */
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0);
+
+ /* Configure this address with CQ_ID 0 because CQ_EN is set */
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, cq_id);
+
+ /* Configure this address with CS index because CQ_EN is set */
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, mon_payload);
+
+ sync_group_id = sob_id / 8;
+ mask = ~(1 << (sob_id & 0x7));
+ mode = 1; /* comparison mode is "equal to" */
+
+ mon_arm = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOD_MASK, sync_value);
+ mon_arm |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOP_MASK, mode);
+ mon_arm |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_MASK_MASK, mask);
+ mon_arm |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SID_MASK, sync_group_id);
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + mon_offset, mon_arm);
+}
+
+/* This is an internal helper function used by gaudi2_send_job_to_kdma only */
+static int gaudi2_send_job_to_kdma(struct hl_device *hdev,
+ u64 src_addr, u64 dst_addr,
+ u32 size, bool is_memset)
+{
+ u32 comp_val, commit_mask, *polling_addr, timeout, status = 0;
+ struct hl_cq_entry *cq_base;
+ struct hl_cq *cq;
+ u64 comp_addr;
+ int rc;
+
+ gaudi2_arm_cq_monitor(hdev, GAUDI2_RESERVED_SOB_KDMA_COMPLETION,
+ GAUDI2_RESERVED_MON_KDMA_COMPLETION,
+ GAUDI2_RESERVED_CQ_KDMA_COMPLETION, 1, 1);
+
+ comp_addr = CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ (GAUDI2_RESERVED_SOB_KDMA_COMPLETION * sizeof(u32));
+
+ comp_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1) |
+ FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1);
+
+ WREG32(mmARC_FARM_KDMA_CTX_SRC_BASE_LO, lower_32_bits(src_addr));
+ WREG32(mmARC_FARM_KDMA_CTX_SRC_BASE_HI, upper_32_bits(src_addr));
+ WREG32(mmARC_FARM_KDMA_CTX_DST_BASE_LO, lower_32_bits(dst_addr));
+ WREG32(mmARC_FARM_KDMA_CTX_DST_BASE_HI, upper_32_bits(dst_addr));
+ WREG32(mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO, lower_32_bits(comp_addr));
+ WREG32(mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI, upper_32_bits(comp_addr));
+ WREG32(mmARC_FARM_KDMA_CTX_WR_COMP_WDATA, comp_val);
+ WREG32(mmARC_FARM_KDMA_CTX_DST_TSIZE_0, size);
+
+ commit_mask = FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_LIN_MASK, 1) |
+ FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_WR_COMP_EN_MASK, 1);
+
+ if (is_memset)
+ commit_mask |= FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_MEM_SET_MASK, 1);
+
+ WREG32(mmARC_FARM_KDMA_CTX_COMMIT, commit_mask);
+
+ /* Wait for completion */
+ cq = &hdev->completion_queue[GAUDI2_RESERVED_CQ_KDMA_COMPLETION];
+ cq_base = cq->kernel_address;
+ polling_addr = (u32 *)&cq_base[cq->ci];
+
+ if (hdev->pldm)
+ /* for each 1MB 20 second of timeout */
+ timeout = ((size / SZ_1M) + 1) * USEC_PER_SEC * 20;
+ else
+ timeout = KDMA_TIMEOUT_USEC;
+
+ /* Polling */
+ rc = hl_poll_timeout_memory(
+ hdev,
+ polling_addr,
+ status,
+ (status == 1),
+ 1000,
+ timeout,
+ true);
+
+ *polling_addr = 0;
+
+ if (rc) {
+ dev_err(hdev->dev, "Timeout while waiting for KDMA to be idle\n");
+ WREG32(mmARC_FARM_KDMA_CFG_1, 1 << ARC_FARM_KDMA_CFG_1_HALT_SHIFT);
+ return rc;
+ }
+
+ cq->ci = hl_cq_inc_ptr(cq->ci);
+
+ return 0;
+}
+
+static void gaudi2_memset_device_lbw(struct hl_device *hdev, u32 addr, u32 size, u32 val)
+{
+ u32 i;
+
+ for (i = 0 ; i < size ; i += sizeof(u32))
+ WREG32(addr + i, val);
+}
+
+static void gaudi2_qman_set_test_mode(struct hl_device *hdev, u32 hw_queue_id, bool enable)
+{
+ u32 reg_base = gaudi2_qm_blocks_bases[hw_queue_id];
+
+ if (enable) {
+ WREG32(reg_base + QM_GLBL_PROT_OFFSET, QMAN_MAKE_TRUSTED_TEST_MODE);
+ WREG32(reg_base + QM_PQC_CFG_OFFSET, 0);
+ } else {
+ WREG32(reg_base + QM_GLBL_PROT_OFFSET, QMAN_MAKE_TRUSTED);
+ WREG32(reg_base + QM_PQC_CFG_OFFSET, 1 << PDMA0_QM_PQC_CFG_EN_SHIFT);
+ }
+}
+
+static int gaudi2_test_queue(struct hl_device *hdev, u32 hw_queue_id)
+{
+ u32 sob_offset = hdev->asic_prop.first_available_user_sob[0] * 4;
+ u32 sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset;
+ u32 timeout_usec, tmp, sob_base = 1, sob_val = 0x5a5a;
+ struct packet_msg_short *msg_short_pkt;
+ dma_addr_t pkt_dma_addr;
+ size_t pkt_size;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI2_PLDM_TEST_QUEUE_WAIT_USEC;
+ else
+ timeout_usec = GAUDI2_TEST_QUEUE_WAIT_USEC;
+
+ pkt_size = sizeof(*msg_short_pkt);
+ msg_short_pkt = hl_asic_dma_pool_zalloc(hdev, pkt_size, GFP_KERNEL, &pkt_dma_addr);
+ if (!msg_short_pkt) {
+ dev_err(hdev->dev, "Failed to allocate packet for H/W queue %d testing\n",
+ hw_queue_id);
+ return -ENOMEM;
+ }
+
+ tmp = (PACKET_MSG_SHORT << GAUDI2_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI2_PKT_CTL_EB_SHIFT) |
+ (1 << GAUDI2_PKT_CTL_MB_SHIFT) |
+ (sob_base << GAUDI2_PKT_SHORT_CTL_BASE_SHIFT) |
+ (sob_offset << GAUDI2_PKT_SHORT_CTL_ADDR_SHIFT);
+
+ msg_short_pkt->value = cpu_to_le32(sob_val);
+ msg_short_pkt->ctl = cpu_to_le32(tmp);
+
+ /* Reset the SOB value */
+ WREG32(sob_addr, 0);
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, pkt_dma_addr);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to send msg_short packet to H/W queue %d\n",
+ hw_queue_id);
+ goto free_pkt;
+ }
+
+ rc = hl_poll_timeout(
+ hdev,
+ sob_addr,
+ tmp,
+ (tmp == sob_val),
+ 1000,
+ timeout_usec);
+
+ if (rc == -ETIMEDOUT) {
+ dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n",
+ hw_queue_id, tmp);
+ rc = -EIO;
+ }
+
+ /* Reset the SOB value */
+ WREG32(sob_addr, 0);
+
+free_pkt:
+ hl_asic_dma_pool_free(hdev, (void *) msg_short_pkt, pkt_dma_addr);
+ return rc;
+}
+
+static int gaudi2_test_cpu_queue(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ /*
+ * check capability here as send_cpu_message() won't update the result
+ * value if no capability
+ */
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_test_cpu_queue(hdev);
+}
+
+static int gaudi2_test_queues(struct hl_device *hdev)
+{
+ int i, rc, ret_val = 0;
+
+ for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) {
+ if (!gaudi2_is_queue_enabled(hdev, i))
+ continue;
+
+ gaudi2_qman_set_test_mode(hdev, i, true);
+ rc = gaudi2_test_queue(hdev, i);
+ gaudi2_qman_set_test_mode(hdev, i, false);
+
+ if (rc) {
+ ret_val = -EINVAL;
+ goto done;
+ }
+ }
+
+ rc = gaudi2_test_cpu_queue(hdev);
+ if (rc) {
+ ret_val = -EINVAL;
+ goto done;
+ }
+
+done:
+ return ret_val;
+}
+
+static int gaudi2_non_hard_reset_late_init(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ size_t irq_arr_size;
+
+ /* TODO: missing gaudi2_nic_resume.
+ * Until implemented nic_hw_cap_initialized will remain zeroed
+ */
+ gaudi2_init_arcs(hdev);
+ gaudi2_scrub_arcs_dccm(hdev);
+ gaudi2_init_security(hdev);
+
+ /* Unmask all IRQs since some could have been received during the soft reset */
+ irq_arr_size = gaudi2->num_of_valid_hw_events * sizeof(gaudi2->hw_events[0]);
+ return hl_fw_unmask_irq_arr(hdev, gaudi2->hw_events, irq_arr_size);
+}
+
+static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int inst, u32 offset,
+ void *data)
+{
+ struct gaudi2_tpc_idle_data *idle_data = (struct gaudi2_tpc_idle_data *)data;
+ u32 tpc_cfg_sts, qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
+ bool is_eng_idle;
+ int engine_idx;
+
+ if ((dcore == 0) && (inst == (NUM_DCORE0_TPC - 1)))
+ engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_6;
+ else
+ engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_0 +
+ dcore * GAUDI2_ENGINE_ID_DCORE_OFFSET + inst;
+
+ tpc_cfg_sts = RREG32(mmDCORE0_TPC0_CFG_STATUS + offset);
+ qm_glbl_sts0 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS0 + offset);
+ qm_glbl_sts1 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS1 + offset);
+ qm_cgm_sts = RREG32(mmDCORE0_TPC0_QM_CGM_STS + offset);
+
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
+ IS_TPC_IDLE(tpc_cfg_sts);
+ *(idle_data->is_idle) &= is_eng_idle;
+
+ if (idle_data->mask && !is_eng_idle)
+ set_bit(engine_idx, idle_data->mask);
+
+ if (idle_data->s)
+ seq_printf(idle_data->s, idle_data->tpc_fmt, dcore, inst,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
+}
+
+static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
+ u8 mask_len, struct seq_file *s)
+{
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_idle_ind_mask,
+ mme_arch_sts, dec_swreg15, dec_enabled_bit;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ const char *rot_fmt = "%-6d%-5d%-9s%#-14x%#-12x%s\n";
+ unsigned long *mask = (unsigned long *) mask_arr;
+ const char *edma_fmt = "%-6d%-6d%-9s%#-14x%#x\n";
+ const char *mme_fmt = "%-5d%-6s%-9s%#-14x%#x\n";
+ const char *nic_fmt = "%-5d%-9s%#-14x%#-12x\n";
+ const char *pdma_fmt = "%-6d%-9s%#-14x%#x\n";
+ const char *pcie_dec_fmt = "%-10d%-9s%#x\n";
+ const char *dec_fmt = "%-6d%-5d%-9s%#x\n";
+ bool is_idle = true, is_eng_idle;
+ u64 offset;
+
+ struct gaudi2_tpc_idle_data tpc_idle_data = {
+ .tpc_fmt = "%-6d%-5d%-9s%#-14x%#-12x%#x\n",
+ .s = s,
+ .mask = mask,
+ .is_idle = &is_idle,
+ };
+ struct iterate_module_ctx tpc_iter = {
+ .fn = &gaudi2_is_tpc_engine_idle,
+ .data = &tpc_idle_data,
+ };
+
+ int engine_idx, i, j;
+
+ /* EDMA, Two engines per Dcore */
+ if (s)
+ seq_puts(s,
+ "\nCORE EDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
+ "---- ---- ------- ------------ ----------------------\n");
+
+ for (i = 0; i < NUM_OF_DCORES; i++) {
+ for (j = 0 ; j < NUM_OF_EDMA_PER_DCORE ; j++) {
+ int seq = i * NUM_OF_EDMA_PER_DCORE + j;
+
+ if (!(prop->edma_enabled_mask & BIT(seq)))
+ continue;
+
+ engine_idx = GAUDI2_DCORE0_ENGINE_ID_EDMA_0 +
+ i * GAUDI2_ENGINE_ID_DCORE_OFFSET + j;
+ offset = i * DCORE_OFFSET + j * DCORE_EDMA_OFFSET;
+
+ dma_core_idle_ind_mask =
+ RREG32(mmDCORE0_EDMA0_CORE_IDLE_IND_MASK + offset);
+
+ qm_glbl_sts0 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS0 + offset);
+ qm_glbl_sts1 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS1 + offset);
+ qm_cgm_sts = RREG32(mmDCORE0_EDMA0_QM_CGM_STS + offset);
+
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
+ IS_DMA_IDLE(dma_core_idle_ind_mask);
+ is_idle &= is_eng_idle;
+
+ if (mask && !is_eng_idle)
+ set_bit(engine_idx, mask);
+
+ if (s)
+ seq_printf(s, edma_fmt, i, j,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0,
+ dma_core_idle_ind_mask);
+ }
+ }
+
+ /* PDMA, Two engines in Full chip */
+ if (s)
+ seq_puts(s,
+ "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
+ "---- ------- ------------ ----------------------\n");
+
+ for (i = 0 ; i < NUM_OF_PDMA ; i++) {
+ engine_idx = GAUDI2_ENGINE_ID_PDMA_0 + i;
+ offset = i * PDMA_OFFSET;
+ dma_core_idle_ind_mask = RREG32(mmPDMA0_CORE_IDLE_IND_MASK + offset);
+
+ qm_glbl_sts0 = RREG32(mmPDMA0_QM_GLBL_STS0 + offset);
+ qm_glbl_sts1 = RREG32(mmPDMA0_QM_GLBL_STS1 + offset);
+ qm_cgm_sts = RREG32(mmPDMA0_QM_CGM_STS + offset);
+
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
+ IS_DMA_IDLE(dma_core_idle_ind_mask);
+ is_idle &= is_eng_idle;
+
+ if (mask && !is_eng_idle)
+ set_bit(engine_idx, mask);
+
+ if (s)
+ seq_printf(s, pdma_fmt, i, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
+ dma_core_idle_ind_mask);
+ }
+
+ /* NIC, twelve macros in Full chip */
+ if (s && hdev->nic_ports_mask)
+ seq_puts(s,
+ "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
+ "--- ------- ------------ ----------\n");
+
+ for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
+ if (!(i & 1))
+ offset = i / 2 * NIC_OFFSET;
+ else
+ offset += NIC_QM_OFFSET;
+
+ if (!(hdev->nic_ports_mask & BIT(i)))
+ continue;
+
+ engine_idx = GAUDI2_ENGINE_ID_NIC0_0 + i;
+
+
+ qm_glbl_sts0 = RREG32(mmNIC0_QM0_GLBL_STS0 + offset);
+ qm_glbl_sts1 = RREG32(mmNIC0_QM0_GLBL_STS1 + offset);
+ qm_cgm_sts = RREG32(mmNIC0_QM0_CGM_STS + offset);
+
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts);
+ is_idle &= is_eng_idle;
+
+ if (mask && !is_eng_idle)
+ set_bit(engine_idx, mask);
+
+ if (s)
+ seq_printf(s, nic_fmt, i, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
+ qm_cgm_sts);
+ }
+
+ if (s)
+ seq_puts(s,
+ "\nMME Stub is_idle QM_GLBL_STS0 MME_ARCH_STATUS\n"
+ "--- ---- ------- ------------ ---------------\n");
+ /* MME, one per Dcore */
+ for (i = 0 ; i < NUM_OF_DCORES ; i++) {
+ engine_idx = GAUDI2_DCORE0_ENGINE_ID_MME + i * GAUDI2_ENGINE_ID_DCORE_OFFSET;
+ offset = i * DCORE_OFFSET;
+
+ qm_glbl_sts0 = RREG32(mmDCORE0_MME_QM_GLBL_STS0 + offset);
+ qm_glbl_sts1 = RREG32(mmDCORE0_MME_QM_GLBL_STS1 + offset);
+ qm_cgm_sts = RREG32(mmDCORE0_MME_QM_CGM_STS + offset);
+
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts);
+ is_idle &= is_eng_idle;
+
+ mme_arch_sts = RREG32(mmDCORE0_MME_CTRL_LO_ARCH_STATUS + offset);
+ is_eng_idle &= IS_MME_IDLE(mme_arch_sts);
+ is_idle &= is_eng_idle;
+
+ if (s)
+ seq_printf(s, mme_fmt, i, "N",
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0,
+ mme_arch_sts);
+
+ if (mask && !is_eng_idle)
+ set_bit(engine_idx, mask);
+ }
+
+ /*
+ * TPC
+ */
+ if (s && prop->tpc_enabled_mask)
+ seq_puts(s,
+ "\nCORE TPC is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_IDLE_IND_MASK\n"
+ "---- --- -------- ------------ ---------- ----------------------\n");
+
+ gaudi2_iterate_tpcs(hdev, &tpc_iter);
+
+ /* Decoders, two each Dcore and two shared PCIe decoders */
+ if (s && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK)))
+ seq_puts(s,
+ "\nCORE DEC is_idle VSI_CMD_SWREG15\n"
+ "---- --- ------- ---------------\n");
+
+ for (i = 0 ; i < NUM_OF_DCORES ; i++) {
+ for (j = 0 ; j < NUM_OF_DEC_PER_DCORE ; j++) {
+ dec_enabled_bit = 1 << (i * NUM_OF_DEC_PER_DCORE + j);
+ if (!(prop->decoder_enabled_mask & dec_enabled_bit))
+ continue;
+
+ engine_idx = GAUDI2_DCORE0_ENGINE_ID_DEC_0 +
+ i * GAUDI2_ENGINE_ID_DCORE_OFFSET + j;
+ offset = i * DCORE_OFFSET + j * DCORE_DEC_OFFSET;
+
+ dec_swreg15 = RREG32(mmDCORE0_DEC0_CMD_SWREG15 + offset);
+ is_eng_idle = IS_DEC_IDLE(dec_swreg15);
+ is_idle &= is_eng_idle;
+
+ if (mask && !is_eng_idle)
+ set_bit(engine_idx, mask);
+
+ if (s)
+ seq_printf(s, dec_fmt, i, j, is_eng_idle ? "Y" : "N", dec_swreg15);
+ }
+ }
+
+ if (s && (prop->decoder_enabled_mask & PCIE_DEC_EN_MASK))
+ seq_puts(s,
+ "\nPCIe DEC is_idle VSI_CMD_SWREG15\n"
+ "-------- ------- ---------------\n");
+
+ /* Check shared(PCIe) decoders */
+ for (i = 0 ; i < NUM_OF_DEC_PER_DCORE ; i++) {
+ dec_enabled_bit = PCIE_DEC_SHIFT + i;
+ if (!(prop->decoder_enabled_mask & BIT(dec_enabled_bit)))
+ continue;
+
+ engine_idx = GAUDI2_PCIE_ENGINE_ID_DEC_0 + i;
+ offset = i * DCORE_DEC_OFFSET;
+ dec_swreg15 = RREG32(mmPCIE_DEC0_CMD_SWREG15 + offset);
+ is_eng_idle = IS_DEC_IDLE(dec_swreg15);
+ is_idle &= is_eng_idle;
+
+ if (mask && !is_eng_idle)
+ set_bit(engine_idx, mask);
+
+ if (s)
+ seq_printf(s, pcie_dec_fmt, i, is_eng_idle ? "Y" : "N", dec_swreg15);
+ }
+
+ if (s)
+ seq_puts(s,
+ "\nCORE ROT is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
+ "---- ---- ------- ------------ ---------- -------------\n");
+
+ for (i = 0 ; i < NUM_OF_ROT ; i++) {
+ engine_idx = GAUDI2_ENGINE_ID_ROT_0 + i;
+
+ offset = i * ROT_OFFSET;
+
+ qm_glbl_sts0 = RREG32(mmROT0_QM_GLBL_STS0 + offset);
+ qm_glbl_sts1 = RREG32(mmROT0_QM_GLBL_STS1 + offset);
+ qm_cgm_sts = RREG32(mmROT0_QM_CGM_STS + offset);
+
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts);
+ is_idle &= is_eng_idle;
+
+ if (mask && !is_eng_idle)
+ set_bit(engine_idx, mask);
+
+ if (s)
+ seq_printf(s, rot_fmt, i, 0, is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts, "-");
+ }
+
+ return is_idle;
+}
+
+static void gaudi2_hw_queues_lock(struct hl_device *hdev)
+ __acquires(&gaudi2->hw_queues_lock)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ spin_lock(&gaudi2->hw_queues_lock);
+}
+
+static void gaudi2_hw_queues_unlock(struct hl_device *hdev)
+ __releases(&gaudi2->hw_queues_lock)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ spin_unlock(&gaudi2->hw_queues_lock);
+}
+
+static void gaudi2_kdma_lock(struct hl_device *hdev, int dcore_id)
+ __acquires(&gaudi2->kdma_lock)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ spin_lock(&gaudi2->kdma_lock);
+}
+
+static void gaudi2_kdma_unlock(struct hl_device *hdev, int dcore_id)
+ __releases(&gaudi2->kdma_lock)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ spin_unlock(&gaudi2->kdma_lock);
+}
+
+static u32 gaudi2_get_pci_id(struct hl_device *hdev)
+{
+ return hdev->pdev->device;
+}
+
+static int gaudi2_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_get_eeprom_data(hdev, data, max_size);
+}
+
+static void gaudi2_update_eq_ci(struct hl_device *hdev, u32 val)
+{
+ WREG32(mmCPU_IF_EQ_RD_OFFS, val);
+}
+
+static void *gaudi2_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (aggregate) {
+ *size = (u32) sizeof(gaudi2->events_stat_aggregate);
+ return gaudi2->events_stat_aggregate;
+ }
+
+ *size = (u32) sizeof(gaudi2->events_stat);
+ return gaudi2->events_stat;
+}
+
+static void gaudi2_mmu_vdec_dcore_prepare(struct hl_device *hdev, int dcore_id,
+ int dcore_vdec_id, u32 rw_asid, u32 rw_mmu_bp)
+{
+ u32 offset = (mmDCORE0_VDEC1_BRDG_CTRL_BASE - mmDCORE0_VDEC0_BRDG_CTRL_BASE) *
+ dcore_vdec_id + DCORE_OFFSET * dcore_id;
+
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_ASID + offset, rw_asid);
+
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_ASID + offset, rw_asid);
+
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_ASID + offset, rw_asid);
+
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_ASID + offset, rw_asid);
+
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_ASID + offset, rw_asid);
+}
+
+static void gaudi2_mmu_dcore_prepare(struct hl_device *hdev, int dcore_id, u32 asid)
+{
+ u32 rw_asid = (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT) |
+ (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT);
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 dcore_offset = dcore_id * DCORE_OFFSET;
+ u32 vdec_id, i, ports_offset, reg_val;
+ u8 edma_seq_base;
+
+ /* EDMA */
+ edma_seq_base = dcore_id * NUM_OF_EDMA_PER_DCORE;
+ if (prop->edma_enabled_mask & BIT(edma_seq_base)) {
+ WREG32(mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0);
+ WREG32(mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_ASID + dcore_offset, rw_asid);
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0);
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_ASID + dcore_offset, rw_asid);
+ }
+
+ if (prop->edma_enabled_mask & BIT(edma_seq_base + 1)) {
+ WREG32(mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0);
+ WREG32(mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_ASID + dcore_offset, rw_asid);
+ WREG32(mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_ASID + dcore_offset, rw_asid);
+ WREG32(mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0);
+ }
+
+ /* Sync Mngr */
+ WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV + dcore_offset, asid);
+ /*
+ * Sync Mngrs on dcores 1 - 3 are exposed to user, so must use user ASID
+ * for any access type
+ */
+ if (dcore_id > 0) {
+ reg_val = (asid << DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_RD_SHIFT) |
+ (asid << DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_WR_SHIFT);
+ WREG32(mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID + dcore_offset, reg_val);
+ WREG32(mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP + dcore_offset, 0);
+ }
+
+ WREG32(mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_MMU_BP + dcore_offset, 0);
+ WREG32(mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_ASID + dcore_offset, rw_asid);
+
+ for (i = 0 ; i < NUM_OF_MME_SBTE_PORTS ; i++) {
+ ports_offset = i * DCORE_MME_SBTE_OFFSET;
+ WREG32(mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_MMU_BP +
+ dcore_offset + ports_offset, 0);
+ WREG32(mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_ASID +
+ dcore_offset + ports_offset, rw_asid);
+ }
+
+ for (i = 0 ; i < NUM_OF_MME_WB_PORTS ; i++) {
+ ports_offset = i * DCORE_MME_WB_OFFSET;
+ WREG32(mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_MMU_BP +
+ dcore_offset + ports_offset, 0);
+ WREG32(mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_ASID +
+ dcore_offset + ports_offset, rw_asid);
+ }
+
+ WREG32(mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0);
+ WREG32(mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_ASID + dcore_offset, rw_asid);
+
+ /*
+ * Decoders
+ */
+ for (vdec_id = 0 ; vdec_id < NUM_OF_DEC_PER_DCORE ; vdec_id++) {
+ if (prop->decoder_enabled_mask & BIT(dcore_id * NUM_OF_DEC_PER_DCORE + vdec_id))
+ gaudi2_mmu_vdec_dcore_prepare(hdev, dcore_id, vdec_id, rw_asid, 0);
+ }
+}
+
+static void gudi2_mmu_vdec_shared_prepare(struct hl_device *hdev,
+ int shared_vdec_id, u32 rw_asid, u32 rw_mmu_bp)
+{
+ u32 offset = (mmPCIE_VDEC1_BRDG_CTRL_BASE - mmPCIE_VDEC0_BRDG_CTRL_BASE) * shared_vdec_id;
+
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_ASID + offset, rw_asid);
+
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_ASID + offset, rw_asid);
+
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_ASID + offset, rw_asid);
+
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_ASID + offset, rw_asid);
+
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_ASID + offset, rw_asid);
+}
+
+static void gudi2_mmu_arc_farm_arc_dup_eng_prepare(struct hl_device *hdev, int arc_farm_id,
+ u32 rw_asid, u32 rw_mmu_bp)
+{
+ u32 offset = (mmARC_FARM_ARC1_DUP_ENG_BASE - mmARC_FARM_ARC0_DUP_ENG_BASE) * arc_farm_id;
+
+ WREG32(mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_MMU_BP + offset, rw_mmu_bp);
+ WREG32(mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_ASID + offset, rw_asid);
+}
+
+static void gaudi2_arc_mmu_prepare(struct hl_device *hdev, u32 cpu_id, u32 asid)
+{
+ u32 reg_base, reg_offset, reg_val = 0;
+
+ reg_base = gaudi2_arc_blocks_bases[cpu_id];
+
+ /* Enable MMU and configure asid for all relevant ARC regions */
+ reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_ARC_REGION_CFG_MMU_BP_MASK, 0);
+ reg_val |= FIELD_PREP(ARC_FARM_ARC0_AUX_ARC_REGION_CFG_0_ASID_MASK, asid);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION3_GENERAL);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION4_HBM0_FW);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION5_HBM1_GC_DATA);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION6_HBM2_GC_DATA);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION7_HBM3_GC_DATA);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION9_PCIE);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION10_GENERAL);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION11_GENERAL);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION12_GENERAL);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION13_GENERAL);
+ WREG32(reg_base + reg_offset, reg_val);
+
+ reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION14_GENERAL);
+ WREG32(reg_base + reg_offset, reg_val);
+}
+
+static int gaudi2_arc_mmu_prepare_all(struct hl_device *hdev, u32 asid)
+{
+ int i;
+
+ if (hdev->fw_components & FW_TYPE_BOOT_CPU)
+ return hl_fw_cpucp_engine_core_asid_set(hdev, asid);
+
+ for (i = CPU_ID_SCHED_ARC0 ; i < NUM_OF_ARC_FARMS_ARC ; i++)
+ gaudi2_arc_mmu_prepare(hdev, i, asid);
+
+ for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i += 4) {
+ if (!gaudi2_is_queue_enabled(hdev, i))
+ continue;
+
+ gaudi2_arc_mmu_prepare(hdev, gaudi2_queue_id_to_arc_id[i], asid);
+ }
+
+ return 0;
+}
+
+static int gaudi2_mmu_shared_prepare(struct hl_device *hdev, u32 asid)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 rw_asid, offset;
+ int rc, i;
+
+ rw_asid = FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_MASK, asid) |
+ FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_MASK, asid);
+
+ WREG32(mmPDMA0_QM_AXUSER_NONSECURED_HB_ASID, rw_asid);
+ WREG32(mmPDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP, 0);
+ WREG32(mmPDMA0_CORE_CTX_AXUSER_HB_ASID, rw_asid);
+ WREG32(mmPDMA0_CORE_CTX_AXUSER_HB_MMU_BP, 0);
+
+ WREG32(mmPDMA1_QM_AXUSER_NONSECURED_HB_ASID, rw_asid);
+ WREG32(mmPDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP, 0);
+ WREG32(mmPDMA1_CORE_CTX_AXUSER_HB_ASID, rw_asid);
+ WREG32(mmPDMA1_CORE_CTX_AXUSER_HB_MMU_BP, 0);
+
+ /* ROT */
+ for (i = 0 ; i < NUM_OF_ROT ; i++) {
+ offset = i * ROT_OFFSET;
+ WREG32(mmROT0_QM_AXUSER_NONSECURED_HB_ASID + offset, rw_asid);
+ WREG32(mmROT0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0);
+ RMWREG32(mmROT0_CPL_QUEUE_AWUSER + offset, asid, MMUBP_ASID_MASK);
+ RMWREG32(mmROT0_DESC_HBW_ARUSER_LO + offset, asid, MMUBP_ASID_MASK);
+ RMWREG32(mmROT0_DESC_HBW_AWUSER_LO + offset, asid, MMUBP_ASID_MASK);
+ }
+
+ /* Shared Decoders are the last bits in the decoders mask */
+ if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 0))
+ gudi2_mmu_vdec_shared_prepare(hdev, 0, rw_asid, 0);
+
+ if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 1))
+ gudi2_mmu_vdec_shared_prepare(hdev, 1, rw_asid, 0);
+
+ /* arc farm arc dup eng */
+ for (i = 0 ; i < NUM_OF_ARC_FARMS_ARC ; i++)
+ gudi2_mmu_arc_farm_arc_dup_eng_prepare(hdev, i, rw_asid, 0);
+
+ rc = gaudi2_arc_mmu_prepare_all(hdev, asid);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static void gaudi2_tpc_mmu_prepare(struct hl_device *hdev, int dcore, int inst, u32 offset,
+ void *data)
+{
+ struct gaudi2_tpc_mmu_data *mmu_data = (struct gaudi2_tpc_mmu_data *)data;
+
+ WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_MMU_BP + offset, 0);
+ WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_ASID + offset, mmu_data->rw_asid);
+ WREG32(mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0);
+ WREG32(mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_ASID + offset, mmu_data->rw_asid);
+}
+
+/* zero the MMUBP and set the ASID */
+static int gaudi2_mmu_prepare(struct hl_device *hdev, u32 asid)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct gaudi2_tpc_mmu_data tpc_mmu_data;
+ struct iterate_module_ctx tpc_iter = {
+ .fn = &gaudi2_tpc_mmu_prepare,
+ .data = &tpc_mmu_data,
+ };
+ int rc, i;
+
+ if (asid & ~DCORE0_HMMU0_STLB_ASID_ASID_MASK) {
+ dev_crit(hdev->dev, "asid %u is too big\n", asid);
+ return -EINVAL;
+ }
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_MMU_MASK))
+ return 0;
+
+ rc = gaudi2_mmu_shared_prepare(hdev, asid);
+ if (rc)
+ return rc;
+
+ /* configure DCORE MMUs */
+ tpc_mmu_data.rw_asid = (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT) |
+ (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT);
+ gaudi2_iterate_tpcs(hdev, &tpc_iter);
+ for (i = 0 ; i < NUM_OF_DCORES ; i++)
+ gaudi2_mmu_dcore_prepare(hdev, i, asid);
+
+ return 0;
+}
+
+static inline bool is_info_event(u32 event)
+{
+ switch (event) {
+ case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void gaudi2_print_irq_info(struct hl_device *hdev, u16 event_type)
+{
+ char desc[64] = "";
+ bool event_valid = false;
+
+ /* return in case of NIC status event - these events are received periodically and not as
+ * an indication to an error, thus not printed.
+ */
+ if (event_type >= GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 &&
+ event_type <= GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1)
+ return;
+
+ if (gaudi2_irq_map_table[event_type].valid) {
+ snprintf(desc, sizeof(desc), gaudi2_irq_map_table[event_type].name);
+ event_valid = true;
+ }
+
+ if (!event_valid)
+ snprintf(desc, sizeof(desc), "N/A");
+
+ if (is_info_event(event_type))
+ dev_info_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
+ event_type, desc);
+ else
+ dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
+ event_type, desc);
+}
+
+static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
+ struct hl_eq_ecc_data *ecc_data)
+{
+ u64 ecc_address = 0, ecc_syndrom = 0;
+ u8 memory_wrapper_idx = 0;
+
+ ecc_address = le64_to_cpu(ecc_data->ecc_address);
+ ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
+ memory_wrapper_idx = ecc_data->memory_wrapper_idx;
+
+ dev_err(hdev->dev,
+ "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u. critical %u.\n",
+ ecc_address, ecc_syndrom, memory_wrapper_idx, ecc_data->is_critical);
+
+ return !!ecc_data->is_critical;
+}
+
+/*
+ * gaudi2_queue_idx_dec - decrement queue index (pi/ci) and handle wrap
+ *
+ * @idx: the current pi/ci value
+ * @q_len: the queue length (power of 2)
+ *
+ * @return the cyclically decremented index
+ */
+static inline u32 gaudi2_queue_idx_dec(u32 idx, u32 q_len)
+{
+ u32 mask = q_len - 1;
+
+ /*
+ * modular decrement is equivalent to adding (queue_size -1)
+ * later we take LSBs to make sure the value is in the
+ * range [0, queue_len - 1]
+ */
+ return (idx + q_len - 1) & mask;
+}
+
+/**
+ * gaudi2_print_sw_config_stream_data - print SW config stream data
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @stream: the QMAN's stream
+ * @qman_base: base address of QMAN registers block
+ */
+static void gaudi2_print_sw_config_stream_data(struct hl_device *hdev,
+ u32 stream, u64 qman_base)
+{
+ u64 cq_ptr_lo, cq_ptr_hi, cq_tsize, cq_ptr;
+ u32 cq_ptr_lo_off, size;
+
+ cq_ptr_lo_off = mmDCORE0_TPC0_QM_CQ_PTR_LO_1 - mmDCORE0_TPC0_QM_CQ_PTR_LO_0;
+
+ cq_ptr_lo = qman_base + (mmDCORE0_TPC0_QM_CQ_PTR_LO_0 - mmDCORE0_TPC0_QM_BASE) +
+ stream * cq_ptr_lo_off;
+
+ cq_ptr_hi = cq_ptr_lo + (mmDCORE0_TPC0_QM_CQ_PTR_HI_0 - mmDCORE0_TPC0_QM_CQ_PTR_LO_0);
+
+ cq_tsize = cq_ptr_lo + (mmDCORE0_TPC0_QM_CQ_TSIZE_0 - mmDCORE0_TPC0_QM_CQ_PTR_LO_0);
+
+ cq_ptr = (((u64) RREG32(cq_ptr_hi)) << 32) | RREG32(cq_ptr_lo);
+ size = RREG32(cq_tsize);
+ dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %x\n",
+ stream, cq_ptr, size);
+}
+
+/**
+ * gaudi2_print_last_pqes_on_err - print last PQEs on error
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @qid_base: first QID of the QMAN (out of 4 streams)
+ * @stream: the QMAN's stream
+ * @qman_base: base address of QMAN registers block
+ * @pr_sw_conf: if true print the SW config stream data (CQ PTR and SIZE)
+ */
+static void gaudi2_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base, u32 stream,
+ u64 qman_base, bool pr_sw_conf)
+{
+ u32 ci, qm_ci_stream_off;
+ struct hl_hw_queue *q;
+ u64 pq_ci;
+ int i;
+
+ q = &hdev->kernel_queues[qid_base + stream];
+
+ qm_ci_stream_off = mmDCORE0_TPC0_QM_PQ_CI_1 - mmDCORE0_TPC0_QM_PQ_CI_0;
+ pq_ci = qman_base + (mmDCORE0_TPC0_QM_PQ_CI_0 - mmDCORE0_TPC0_QM_BASE) +
+ stream * qm_ci_stream_off;
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ if (pr_sw_conf)
+ gaudi2_print_sw_config_stream_data(hdev, stream, qman_base);
+
+ ci = RREG32(pq_ci);
+
+ /* we should start printing form ci -1 */
+ ci = gaudi2_queue_idx_dec(ci, HL_QUEUE_LENGTH);
+
+ for (i = 0; i < PQ_FETCHER_CACHE_SIZE; i++) {
+ struct hl_bd *bd;
+ u64 addr;
+ u32 len;
+
+ bd = q->kernel_address;
+ bd += ci;
+
+ len = le32_to_cpu(bd->len);
+ /* len 0 means uninitialized entry- break */
+ if (!len)
+ break;
+
+ addr = le64_to_cpu(bd->ptr);
+
+ dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %x\n",
+ stream, ci, addr, len);
+
+ /* get previous ci, wrap if needed */
+ ci = gaudi2_queue_idx_dec(ci, HL_QUEUE_LENGTH);
+ }
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+}
+
+/**
+ * print_qman_data_on_err - extract QMAN data on error
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @qid_base: first QID of the QMAN (out of 4 streams)
+ * @stream: the QMAN's stream
+ * @qman_base: base address of QMAN registers block
+ *
+ * This function attempt to extract as much data as possible on QMAN error.
+ * On upper CP print the SW config stream data and last 8 PQEs.
+ * On lower CP print SW config data and last PQEs of ALL 4 upper CPs
+ */
+static void print_qman_data_on_err(struct hl_device *hdev, u32 qid_base, u32 stream, u64 qman_base)
+{
+ u32 i;
+
+ if (stream != QMAN_STREAMS) {
+ gaudi2_print_last_pqes_on_err(hdev, qid_base, stream, qman_base, true);
+ return;
+ }
+
+ gaudi2_print_sw_config_stream_data(hdev, stream, qman_base);
+
+ for (i = 0 ; i < QMAN_STREAMS ; i++)
+ gaudi2_print_last_pqes_on_err(hdev, qid_base, i, qman_base, false);
+}
+
+static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *qm_name,
+ u64 qman_base, u32 qid_base)
+{
+ u32 i, j, glbl_sts_val, arb_err_val, num_error_causes;
+ u64 glbl_sts_addr, arb_err_addr;
+ char reg_desc[32];
+
+ glbl_sts_addr = qman_base + (mmDCORE0_TPC0_QM_GLBL_ERR_STS_0 - mmDCORE0_TPC0_QM_BASE);
+ arb_err_addr = qman_base + (mmDCORE0_TPC0_QM_ARB_ERR_CAUSE - mmDCORE0_TPC0_QM_BASE);
+
+ /* Iterate through all stream GLBL_ERR_STS registers + Lower CP */
+ for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) {
+ glbl_sts_val = RREG32(glbl_sts_addr + 4 * i);
+
+ if (!glbl_sts_val)
+ continue;
+
+ if (i == QMAN_STREAMS) {
+ snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerCP");
+ num_error_causes = GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE;
+ } else {
+ snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i);
+ num_error_causes = GAUDI2_NUM_OF_QM_ERR_CAUSE;
+ }
+
+ for (j = 0 ; j < num_error_causes ; j++)
+ if (glbl_sts_val & BIT(j))
+ dev_err_ratelimited(hdev->dev, "%s %s. err cause: %s\n",
+ qm_name, reg_desc,
+ i == QMAN_STREAMS ?
+ gaudi2_qman_lower_cp_error_cause[j] :
+ gaudi2_qman_error_cause[j]);
+
+ print_qman_data_on_err(hdev, qid_base, i, qman_base);
+ }
+
+ arb_err_val = RREG32(arb_err_addr);
+
+ if (!arb_err_val)
+ return;
+
+ for (j = 0 ; j < GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE ; j++) {
+ if (arb_err_val & BIT(j))
+ dev_err_ratelimited(hdev->dev, "%s ARB_ERR. err cause: %s\n",
+ qm_name, gaudi2_qman_arb_error_cause[j]);
+ }
+}
+
+static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
+ u64 rtr_mstr_if_base_addr, bool is_write, char *name,
+ bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info)
+{
+ u32 razwi_hi, razwi_lo, razwi_xy;
+
+ if (is_write) {
+ if (read_razwi_regs) {
+ razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HI);
+ razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_LO);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_XY);
+ } else {
+ razwi_hi = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_hi_reg);
+ razwi_lo = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_lo_reg);
+ razwi_xy = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_id_reg);
+ }
+
+ dev_err_ratelimited(hdev->dev,
+ "%s-RAZWI SHARED RR HBW WR error, captured address HI 0x%x LO 0x%x, Initiator coordinates 0x%x\n",
+ name, razwi_hi, razwi_lo, razwi_xy);
+ } else {
+ if (read_razwi_regs) {
+ razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI);
+ razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_LO);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_XY);
+ } else {
+ razwi_hi = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_hi_reg);
+ razwi_lo = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_lo_reg);
+ razwi_xy = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_id_reg);
+ }
+
+ dev_err_ratelimited(hdev->dev,
+ "%s-RAZWI SHARED RR HBW AR error, captured address HI 0x%x LO 0x%x, Initiator coordinates 0x%x\n",
+ name, razwi_hi, razwi_lo, razwi_xy);
+ }
+}
+
+static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev,
+ u64 rtr_mstr_if_base_addr, bool is_write, char *name,
+ bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info)
+{
+ u32 razwi_addr, razwi_xy;
+
+ if (is_write) {
+ if (read_razwi_regs) {
+ razwi_addr = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_XY);
+ } else {
+ razwi_addr = le32_to_cpu(razwi_info->lbw.rr_aw_razwi_reg);
+ razwi_xy = le32_to_cpu(razwi_info->lbw.rr_aw_razwi_id_reg);
+ }
+
+ dev_err_ratelimited(hdev->dev,
+ "%s-RAZWI SHARED RR LBW WR error, mstr_if 0x%llx, captured address 0x%x, Initiator coordinates 0x%x\n",
+ name, rtr_mstr_if_base_addr, razwi_addr, razwi_xy);
+ } else {
+ if (read_razwi_regs) {
+ razwi_addr = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_XY);
+ } else {
+ razwi_addr = le32_to_cpu(razwi_info->lbw.rr_ar_razwi_reg);
+ razwi_xy = le32_to_cpu(razwi_info->lbw.rr_ar_razwi_id_reg);
+ }
+
+ dev_err_ratelimited(hdev->dev,
+ "%s-RAZWI SHARED RR LBW AR error, mstr_if 0x%llx, captured address 0x%x Initiator coordinates 0x%x\n",
+ name, rtr_mstr_if_base_addr, razwi_addr, razwi_xy);
+ }
+}
+
+/*
+ * This function handles RR(Range register) hit events.
+ * raised be initiators not PSOC RAZWI.
+ */
+static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
+ enum razwi_event_sources module, u8 module_idx,
+ u8 module_sub_idx, struct hl_eq_razwi_info *razwi_info)
+{
+ bool via_sft = false, read_razwi_regs = false;
+ u32 rtr_id, dcore_id, dcore_rtr_id, sft_id;
+ u64 rtr_mstr_if_base_addr;
+ u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0;
+ u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0;
+ char initiator_name[64];
+
+ if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX) || !razwi_info)
+ read_razwi_regs = true;
+
+ switch (module) {
+ case RAZWI_TPC:
+ rtr_id = gaudi2_tpc_initiator_rtr_id[module_idx];
+ sprintf(initiator_name, "TPC_%u", module_idx);
+ break;
+ case RAZWI_MME:
+ sprintf(initiator_name, "MME_%u", module_idx);
+ switch (module_sub_idx) {
+ case MME_WAP0:
+ rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap0;
+ break;
+ case MME_WAP1:
+ rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap1;
+ break;
+ case MME_WRITE:
+ rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].write;
+ break;
+ case MME_READ:
+ rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].read;
+ break;
+ case MME_SBTE0:
+ rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte0;
+ break;
+ case MME_SBTE1:
+ rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte1;
+ break;
+ case MME_SBTE2:
+ rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte2;
+ break;
+ case MME_SBTE3:
+ rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte3;
+ break;
+ case MME_SBTE4:
+ rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte4;
+ break;
+ default:
+ return;
+ }
+ break;
+ case RAZWI_EDMA:
+ sft_id = gaudi2_edma_initiator_sft_id[module_idx].interface_id;
+ dcore_id = gaudi2_edma_initiator_sft_id[module_idx].dcore_id;
+ via_sft = true;
+ sprintf(initiator_name, "EDMA_%u", module_idx);
+ break;
+ case RAZWI_PDMA:
+ rtr_id = gaudi2_pdma_initiator_rtr_id[module_idx];
+ sprintf(initiator_name, "PDMA_%u", module_idx);
+ break;
+ case RAZWI_NIC:
+ rtr_id = gaudi2_nic_initiator_rtr_id[module_idx];
+ sprintf(initiator_name, "NIC_%u", module_idx);
+ break;
+ case RAZWI_DEC:
+ rtr_id = gaudi2_dec_initiator_rtr_id[module_idx];
+ sprintf(initiator_name, "DEC_%u", module_idx);
+ break;
+ case RAZWI_ROT:
+ rtr_id = gaudi2_rot_initiator_rtr_id[module_idx];
+ sprintf(initiator_name, "ROT_%u", module_idx);
+ break;
+ default:
+ return;
+ }
+
+ if (!read_razwi_regs) {
+ if (le32_to_cpu(razwi_info->razwi_happened_mask) & RAZWI_HAPPENED_HBW) {
+ hbw_shrd_aw = le32_to_cpu(razwi_info->razwi_happened_mask) &
+ RAZWI_HAPPENED_AW;
+ hbw_shrd_ar = le32_to_cpu(razwi_info->razwi_happened_mask) &
+ RAZWI_HAPPENED_AR;
+ } else if (le32_to_cpu(razwi_info->razwi_happened_mask) & RAZWI_HAPPENED_LBW) {
+ lbw_shrd_aw = le32_to_cpu(razwi_info->razwi_happened_mask) &
+ RAZWI_HAPPENED_AW;
+ lbw_shrd_ar = le32_to_cpu(razwi_info->razwi_happened_mask) &
+ RAZWI_HAPPENED_AR;
+ }
+ rtr_mstr_if_base_addr = 0;
+
+ goto dump_info;
+ }
+
+ /* Find router mstr_if register base */
+ if (via_sft) {
+ rtr_mstr_if_base_addr = mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE +
+ dcore_id * SFT_DCORE_OFFSET +
+ sft_id * SFT_IF_OFFSET +
+ RTR_MSTR_IF_OFFSET;
+ } else {
+ dcore_id = rtr_id / NUM_OF_RTR_PER_DCORE;
+ dcore_rtr_id = rtr_id % NUM_OF_RTR_PER_DCORE;
+ rtr_mstr_if_base_addr = mmDCORE0_RTR0_CTRL_BASE +
+ dcore_id * DCORE_OFFSET +
+ dcore_rtr_id * DCORE_RTR_OFFSET +
+ RTR_MSTR_IF_OFFSET;
+ }
+
+ /* Find out event cause by reading "RAZWI_HAPPENED" registers */
+ hbw_shrd_aw = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED);
+
+ hbw_shrd_ar = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED);
+
+ if (via_sft) {
+ /* SFT has separate MSTR_IF for LBW, only there we can
+ * read the LBW razwi related registers
+ */
+ u64 base;
+
+ base = mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE + dcore_id * SFT_DCORE_OFFSET +
+ RTR_LBW_MSTR_IF_OFFSET;
+
+ lbw_shrd_aw = RREG32(base + RR_SHRD_LBW_AW_RAZWI_HAPPENED);
+
+ lbw_shrd_ar = RREG32(base + RR_SHRD_LBW_AR_RAZWI_HAPPENED);
+ } else {
+ lbw_shrd_aw = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED);
+
+ lbw_shrd_ar = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED);
+ }
+
+dump_info:
+ /* check if there is no RR razwi indication at all */
+ if (!hbw_shrd_aw && !hbw_shrd_ar && !lbw_shrd_aw && !lbw_shrd_ar)
+ return;
+
+ if (hbw_shrd_aw) {
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true,
+ initiator_name, read_razwi_regs, razwi_info);
+
+ /* Clear event indication */
+ if (read_razwi_regs)
+ WREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED, hbw_shrd_aw);
+ }
+
+ if (hbw_shrd_ar) {
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false,
+ initiator_name, read_razwi_regs, razwi_info);
+
+ /* Clear event indication */
+ if (read_razwi_regs)
+ WREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED, hbw_shrd_ar);
+ }
+
+ if (lbw_shrd_aw) {
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true,
+ initiator_name, read_razwi_regs, razwi_info);
+
+ /* Clear event indication */
+ if (read_razwi_regs)
+ WREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED, lbw_shrd_aw);
+ }
+
+ if (lbw_shrd_ar) {
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false,
+ initiator_name, read_razwi_regs, razwi_info);
+
+ /* Clear event indication */
+ if (read_razwi_regs)
+ WREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED, lbw_shrd_ar);
+ }
+}
+
+static void gaudi2_check_if_razwi_happened(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u8 mod_idx, sub_mod;
+
+ /* check all TPCs */
+ for (mod_idx = 0 ; mod_idx < (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1) ; mod_idx++) {
+ if (prop->tpc_enabled_mask & BIT(mod_idx))
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL);
+ }
+
+ /* check all MMEs */
+ for (mod_idx = 0 ; mod_idx < (NUM_OF_MME_PER_DCORE * NUM_OF_DCORES) ; mod_idx++)
+ for (sub_mod = MME_WAP0 ; sub_mod < MME_INITIATORS_MAX ; sub_mod++)
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mod_idx,
+ sub_mod, NULL);
+
+ /* check all EDMAs */
+ for (mod_idx = 0 ; mod_idx < (NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES) ; mod_idx++)
+ if (prop->edma_enabled_mask & BIT(mod_idx))
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL);
+
+ /* check all PDMAs */
+ for (mod_idx = 0 ; mod_idx < NUM_OF_PDMA ; mod_idx++)
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL);
+
+ /* check all NICs */
+ for (mod_idx = 0 ; mod_idx < NIC_NUMBER_OF_PORTS ; mod_idx++)
+ if (hdev->nic_ports_mask & BIT(mod_idx))
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_NIC, mod_idx >> 1, 0,
+ NULL);
+
+ /* check all DECs */
+ for (mod_idx = 0 ; mod_idx < NUMBER_OF_DEC ; mod_idx++)
+ if (prop->decoder_enabled_mask & BIT(mod_idx))
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL);
+
+ /* check all ROTs */
+ for (mod_idx = 0 ; mod_idx < NUM_OF_ROT ; mod_idx++)
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL);
+}
+
+static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev,
+ u64 rtr_ctrl_base_addr, bool is_write)
+{
+ u32 razwi_hi, razwi_lo;
+
+ if (is_write) {
+ razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_HI);
+ razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_LO);
+
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI PSOC unmapped HBW WR error, ctr_base 0x%llx, captured address HI 0x%x, LO 0x%x\n",
+ rtr_ctrl_base_addr, razwi_hi, razwi_lo);
+
+ /* Clear set indication */
+ WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET, 0x1);
+ } else {
+ razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_HI);
+
+ razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_LO);
+
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI PSOC unmapped HBW AR error, ctr_base 0x%llx, captured address HI 0x%x, LO 0x%x\n",
+ rtr_ctrl_base_addr, razwi_hi, razwi_lo);
+
+ /* Clear set indication */
+ WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET, 0x1);
+ }
+}
+
+static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev,
+ u64 rtr_ctrl_base_addr, bool is_write)
+{
+ u32 razwi_addr;
+
+ if (is_write) {
+ razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR);
+
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI PSOC unmapped LBW WR error, ctr_base 0x%llx, captured address 0x%x\n",
+ rtr_ctrl_base_addr, razwi_addr);
+
+ /* Clear set indication */
+ WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET, 0x1);
+ } else {
+ razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR);
+
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI PSOC unmapped LBW AR error, ctr_base 0x%llx, captured address 0x%x\n",
+ rtr_ctrl_base_addr, razwi_addr);
+
+ /* Clear set indication */
+ WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET, 0x1);
+ }
+}
+
+/* PSOC RAZWI interrupt occurs only when trying to access a bad address */
+static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev)
+{
+ u32 hbw_aw_set, hbw_ar_set, lbw_aw_set, lbw_ar_set, rtr_id, dcore_id, dcore_rtr_id, xy,
+ razwi_mask_info, razwi_intr = 0;
+ int rtr_map_arr_len = NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES;
+ u64 rtr_ctrl_base_addr;
+
+ if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX)) {
+ razwi_intr = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT);
+ if (!razwi_intr)
+ return;
+ }
+
+ razwi_mask_info = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO);
+
+ xy = (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK)
+ >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_SHIFT;
+
+ dev_err_ratelimited(hdev->dev,
+ "PSOC RAZWI interrupt: Mask %d, WAS_AR %d, WAS_AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n",
+ (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK)
+ >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_SHIFT,
+ (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK)
+ >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_SHIFT,
+ (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK)
+ >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_SHIFT, xy,
+ (razwi_mask_info &
+ PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK)
+ >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_SHIFT);
+ if (xy == 0) {
+ dev_err_ratelimited(hdev->dev,
+ "PSOC RAZWI interrupt: received event from 0 rtr coordinates\n");
+ goto clear;
+ }
+
+ /* Find router id by router coordinates */
+ for (rtr_id = 0 ; rtr_id < rtr_map_arr_len ; rtr_id++)
+ if (rtr_coordinates_to_rtr_id[rtr_id] == xy)
+ break;
+
+ if (rtr_id == rtr_map_arr_len) {
+ dev_err_ratelimited(hdev->dev,
+ "PSOC RAZWI interrupt: invalid rtr coordinates (0x%x)\n", xy);
+ goto clear;
+ }
+
+ /* Find router mstr_if register base */
+ dcore_id = rtr_id / NUM_OF_RTR_PER_DCORE;
+ dcore_rtr_id = rtr_id % NUM_OF_RTR_PER_DCORE;
+ rtr_ctrl_base_addr = mmDCORE0_RTR0_CTRL_BASE + dcore_id * DCORE_OFFSET +
+ dcore_rtr_id * DCORE_RTR_OFFSET;
+
+ hbw_aw_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET);
+ hbw_ar_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET);
+ lbw_aw_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET);
+ lbw_ar_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET);
+
+ if (hbw_aw_set)
+ gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_ctrl_base_addr, true);
+
+ if (hbw_ar_set)
+ gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_ctrl_base_addr, false);
+
+ if (lbw_aw_set)
+ gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_ctrl_base_addr, true);
+
+ if (lbw_ar_set)
+ gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_ctrl_base_addr, false);
+
+clear:
+ /* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */
+ if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX))
+ WREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT, razwi_intr);
+}
+
+static void _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base)
+{
+ u32 i, sts_val, sts_clr_val = 0;
+
+ sts_val = RREG32(qman_base + QM_SEI_STATUS_OFFSET);
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE ; i++) {
+ if (sts_val & BIT(i)) {
+ dev_err_ratelimited(hdev->dev, "QM SEI. err cause: %s\n",
+ gaudi2_qm_sei_error_cause[i]);
+ sts_clr_val |= BIT(i);
+ }
+ }
+
+ WREG32(qman_base + QM_SEI_STATUS_OFFSET, sts_clr_val);
+}
+
+static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type,
+ struct hl_eq_razwi_info *razwi_info)
+{
+ u64 qman_base;
+ u8 index;
+
+ switch (event_type) {
+ case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC23_AXI_ERR_RSP:
+ index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP;
+ qman_base = mmDCORE0_TPC0_QM_BASE +
+ (index / NUM_OF_TPC_PER_DCORE) * DCORE_OFFSET +
+ (index % NUM_OF_TPC_PER_DCORE) * DCORE_TPC_OFFSET;
+ break;
+ case GAUDI2_EVENT_TPC24_AXI_ERR_RSP:
+ qman_base = mmDCORE0_TPC6_QM_BASE;
+ break;
+ case GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE:
+ case GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE:
+ case GAUDI2_EVENT_MME2_CTRL_AXI_ERROR_RESPONSE:
+ case GAUDI2_EVENT_MME3_CTRL_AXI_ERROR_RESPONSE:
+ index = (event_type - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE) /
+ (GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE -
+ GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE);
+ qman_base = mmDCORE0_MME_QM_BASE + index * DCORE_OFFSET;
+ break;
+ case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP:
+ case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP:
+ index = event_type - GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP;
+ qman_base = mmPDMA0_QM_BASE + index * PDMA_OFFSET;
+ break;
+ case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE:
+ case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE:
+ index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE;
+ qman_base = mmROT0_QM_BASE + index * ROT_OFFSET;
+ break;
+ default:
+ return;
+ }
+
+ _gaudi2_handle_qm_sei_err(hdev, qman_base);
+
+ /* There is a single event per NIC macro, so should check its both QMAN blocks */
+ if (event_type >= GAUDI2_EVENT_NIC0_AXI_ERROR_RESPONSE &&
+ event_type <= GAUDI2_EVENT_NIC11_AXI_ERROR_RESPONSE)
+ _gaudi2_handle_qm_sei_err(hdev, qman_base + NIC_QM_OFFSET);
+
+ /* check if RAZWI happened */
+ if (razwi_info)
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, 0, 0, razwi_info);
+}
+
+static void gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type)
+{
+ u32 qid_base;
+ u64 qman_base;
+ char desc[32];
+ u8 index;
+
+ switch (event_type) {
+ case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_TPC5_QM:
+ index = event_type - GAUDI2_EVENT_TPC0_QM;
+ qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 + index * QMAN_STREAMS;
+ qman_base = mmDCORE0_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE0_TPC%d_QM", index);
+ break;
+ case GAUDI2_EVENT_TPC6_QM ... GAUDI2_EVENT_TPC11_QM:
+ index = event_type - GAUDI2_EVENT_TPC6_QM;
+ qid_base = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 + index * QMAN_STREAMS;
+ qman_base = mmDCORE1_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE1_TPC%d_QM", index);
+ break;
+ case GAUDI2_EVENT_TPC12_QM ... GAUDI2_EVENT_TPC17_QM:
+ index = event_type - GAUDI2_EVENT_TPC12_QM;
+ qid_base = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 + index * QMAN_STREAMS;
+ qman_base = mmDCORE2_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE2_TPC%d_QM", index);
+ break;
+ case GAUDI2_EVENT_TPC18_QM ... GAUDI2_EVENT_TPC23_QM:
+ index = event_type - GAUDI2_EVENT_TPC18_QM;
+ qid_base = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 + index * QMAN_STREAMS;
+ qman_base = mmDCORE3_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE3_TPC%d_QM", index);
+ break;
+ case GAUDI2_EVENT_TPC24_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0;
+ qman_base = mmDCORE0_TPC6_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE0_TPC6_QM");
+ break;
+ case GAUDI2_EVENT_MME0_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE0_MME_0_0;
+ qman_base = mmDCORE0_MME_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE0_MME_QM");
+ break;
+ case GAUDI2_EVENT_MME1_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0;
+ qman_base = mmDCORE1_MME_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE1_MME_QM");
+ break;
+ case GAUDI2_EVENT_MME2_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0;
+ qman_base = mmDCORE2_MME_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE2_MME_QM");
+ break;
+ case GAUDI2_EVENT_MME3_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0;
+ qman_base = mmDCORE3_MME_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE3_MME_QM");
+ break;
+ case GAUDI2_EVENT_HDMA0_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0;
+ qman_base = mmDCORE0_EDMA0_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE0_EDMA0_QM");
+ break;
+ case GAUDI2_EVENT_HDMA1_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0;
+ qman_base = mmDCORE0_EDMA1_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE0_EDMA1_QM");
+ break;
+ case GAUDI2_EVENT_HDMA2_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0;
+ qman_base = mmDCORE1_EDMA0_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE1_EDMA0_QM");
+ break;
+ case GAUDI2_EVENT_HDMA3_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0;
+ qman_base = mmDCORE1_EDMA1_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE1_EDMA1_QM");
+ break;
+ case GAUDI2_EVENT_HDMA4_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0;
+ qman_base = mmDCORE2_EDMA0_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE2_EDMA0_QM");
+ break;
+ case GAUDI2_EVENT_HDMA5_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0;
+ qman_base = mmDCORE2_EDMA1_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE2_EDMA1_QM");
+ break;
+ case GAUDI2_EVENT_HDMA6_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0;
+ qman_base = mmDCORE3_EDMA0_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE3_EDMA0_QM");
+ break;
+ case GAUDI2_EVENT_HDMA7_QM:
+ qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0;
+ qman_base = mmDCORE3_EDMA1_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE3_EDMA1_QM");
+ break;
+ case GAUDI2_EVENT_PDMA0_QM:
+ qid_base = GAUDI2_QUEUE_ID_PDMA_0_0;
+ qman_base = mmPDMA0_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "PDMA0_QM");
+ break;
+ case GAUDI2_EVENT_PDMA1_QM:
+ qid_base = GAUDI2_QUEUE_ID_PDMA_1_0;
+ qman_base = mmPDMA1_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "PDMA1_QM");
+ break;
+ case GAUDI2_EVENT_ROTATOR0_ROT0_QM:
+ qid_base = GAUDI2_QUEUE_ID_ROT_0_0;
+ qman_base = mmROT0_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "ROTATOR0_QM");
+ break;
+ case GAUDI2_EVENT_ROTATOR1_ROT1_QM:
+ qid_base = GAUDI2_QUEUE_ID_ROT_1_0;
+ qman_base = mmROT1_QM_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "ROTATOR1_QM");
+ break;
+ default:
+ return;
+ }
+
+ gaudi2_handle_qman_err_generic(hdev, desc, qman_base, qid_base);
+
+ /* Handle EDMA QM SEI here because there is no AXI error response event for EDMA */
+ if (event_type >= GAUDI2_EVENT_HDMA2_QM && event_type <= GAUDI2_EVENT_HDMA5_QM)
+ _gaudi2_handle_qm_sei_err(hdev, qman_base);
+}
+
+static void gaudi2_handle_arc_farm_sei_err(struct hl_device *hdev)
+{
+ u32 i, sts_val, sts_clr_val = 0;
+
+ sts_val = RREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS);
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE ; i++) {
+ if (sts_val & BIT(i)) {
+ dev_err_ratelimited(hdev->dev, "ARC SEI. err cause: %s\n",
+ gaudi2_arc_sei_error_cause[i]);
+ sts_clr_val |= BIT(i);
+ }
+ }
+
+ WREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR, sts_clr_val);
+}
+
+static void gaudi2_handle_cpu_sei_err(struct hl_device *hdev)
+{
+ u32 i, sts_val, sts_clr_val = 0;
+
+ sts_val = RREG32(mmCPU_IF_CPU_SEI_INTR_STS);
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE ; i++) {
+ if (sts_val & BIT(i)) {
+ dev_err_ratelimited(hdev->dev, "CPU SEI. err cause: %s\n",
+ gaudi2_cpu_sei_error_cause[i]);
+ sts_clr_val |= BIT(i);
+ }
+ }
+
+ WREG32(mmCPU_IF_CPU_SEI_INTR_CLR, sts_clr_val);
+}
+
+static void gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index,
+ struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause)
+{
+ u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data);
+ int i;
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_ROT_ERR_CAUSE ; i++)
+ if (intr_cause_data & BIT(i))
+ dev_err_ratelimited(hdev->dev, "ROT%u. err cause: %s\n",
+ rot_index, guadi2_rot_error_cause[i]);
+
+ /* check if RAZWI happened */
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0,
+ &razwi_with_intr_cause->razwi_info);
+}
+
+static void gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, char *interrupt_name,
+ struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause)
+{
+ u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data);
+ int i;
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_TPC_INTR_CAUSE ; i++)
+ if (intr_cause_data & BIT(i))
+ dev_err_ratelimited(hdev->dev, "TPC%d_%s interrupt cause: %s\n",
+ tpc_index, interrupt_name, gaudi2_tpc_interrupts_cause[i]);
+
+ /* check if RAZWI happened */
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0,
+ &razwi_with_intr_cause->razwi_info);
+}
+
+static void gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, const char *interrupt_name,
+ struct hl_eq_razwi_info *razwi_info)
+{
+ u32 sts_addr, sts_val, sts_clr_val = 0;
+ int i;
+
+ if (dec_index < NUM_OF_VDEC_PER_DCORE * NUM_OF_DCORES)
+ /* DCORE DEC */
+ sts_addr = mmDCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR +
+ DCORE_OFFSET * (dec_index / NUM_OF_DEC_PER_DCORE) +
+ DCORE_VDEC_OFFSET * (dec_index % NUM_OF_DEC_PER_DCORE);
+ else
+ /* PCIE DEC */
+ sts_addr = mmPCIE_VDEC0_BRDG_CTRL_CAUSE_INTR + PCIE_VDEC_OFFSET *
+ (dec_index - NUM_OF_VDEC_PER_DCORE * NUM_OF_DCORES);
+
+ sts_val = RREG32(sts_addr);
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_DEC_ERR_CAUSE ; i++) {
+ if (sts_val & BIT(i)) {
+ dev_err_ratelimited(hdev->dev, "DEC%u_%s err cause: %s\n",
+ dec_index, interrupt_name, gaudi2_dec_error_cause[i]);
+ sts_clr_val |= BIT(i);
+ }
+ }
+
+ /* check if RAZWI happened */
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, razwi_info);
+
+ /* Write 1 clear errors */
+ WREG32(sts_addr, sts_clr_val);
+}
+
+static void gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, const char *interrupt_name,
+ struct hl_eq_razwi_info *razwi_info)
+{
+ u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0;
+ int i;
+
+ sts_addr = mmDCORE0_MME_CTRL_LO_INTR_CAUSE + DCORE_OFFSET * mme_index;
+ sts_clr_addr = mmDCORE0_MME_CTRL_LO_INTR_CLEAR + DCORE_OFFSET * mme_index;
+
+ sts_val = RREG32(sts_addr);
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_MME_ERR_CAUSE ; i++) {
+ if (sts_val & BIT(i)) {
+ dev_err_ratelimited(hdev->dev, "MME%u_%s err cause: %s\n",
+ mme_index, interrupt_name, guadi2_mme_error_cause[i]);
+ sts_clr_val |= BIT(i);
+ }
+ }
+
+ /* check if RAZWI happened */
+ for (i = MME_WRITE ; i < MME_INITIATORS_MAX ; i++)
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, razwi_info);
+
+ WREG32(sts_clr_addr, sts_clr_val);
+}
+
+static void gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u8 mme_index, u8 sbte_index,
+ u64 intr_cause_data)
+{
+ int i;
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE ; i++)
+ if (intr_cause_data & BIT(i))
+ dev_err_ratelimited(hdev->dev, "MME%uSBTE%u_AXI_ERR_RSP err cause: %s\n",
+ mme_index, sbte_index, guadi2_mme_sbte_error_cause[i]);
+}
+
+static void gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index,
+ struct hl_eq_razwi_info *razwi_info)
+{
+ u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0;
+ int i;
+
+ sts_addr = mmDCORE0_MME_ACC_INTR_CAUSE + DCORE_OFFSET * mme_index;
+ sts_clr_addr = mmDCORE0_MME_ACC_INTR_CLEAR + DCORE_OFFSET * mme_index;
+
+ sts_val = RREG32(sts_addr);
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE ; i++) {
+ if (sts_val & BIT(i)) {
+ dev_err_ratelimited(hdev->dev,
+ "MME%u_WAP_SOURCE_RESULT_INVALID err cause: %s\n",
+ mme_index, guadi2_mme_wap_error_cause[i]);
+ sts_clr_val |= BIT(i);
+ }
+ }
+
+ /* check if RAZWI happened on WAP0/1 */
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, razwi_info);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, razwi_info);
+
+ WREG32(sts_clr_addr, sts_clr_val);
+}
+
+static void gaudi2_handle_kdma_core_event(struct hl_device *hdev, u64 intr_cause_data)
+{
+ int i;
+
+ /* If an AXI read or write error is received, an error is reported and
+ * interrupt message is sent. Due to an HW errata, when reading the cause
+ * register of the KDMA engine, the reported error is always HBW even if
+ * the actual error caused by a LBW KDMA transaction.
+ */
+ for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
+ if (intr_cause_data & BIT(i))
+ dev_err_ratelimited(hdev->dev, "kdma core err cause: %s\n",
+ gaudi2_kdma_core_interrupts_cause[i]);
+}
+
+static void gaudi2_handle_dma_core_event(struct hl_device *hdev, u64 intr_cause_data)
+{
+ int i;
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
+ if (intr_cause_data & BIT(i))
+ dev_err_ratelimited(hdev->dev, "dma core err cause: %s\n",
+ gaudi2_dma_core_interrupts_cause[i]);
+}
+
+static void gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u64 intr_cause_data)
+{
+ int i;
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE; i++)
+ if (intr_cause_data & BIT_ULL(i))
+ dev_err_ratelimited(hdev->dev, "PCIE ADDR DEC Error: %s\n",
+ gaudi2_pcie_addr_dec_error_cause[i]);
+}
+
+static void gaudi2_handle_pif_fatal(struct hl_device *hdev, u64 intr_cause_data)
+
+{
+ int i;
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE ; i++) {
+ if (intr_cause_data & BIT_ULL(i))
+ dev_err_ratelimited(hdev->dev, "PMMU PIF err cause: %s\n",
+ gaudi2_pmmu_fatal_interrupts_cause[i]);
+ }
+}
+
+static void gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 intr_cause_data)
+{
+ u32 dcore_id, hif_id;
+ int i;
+
+ dcore_id = (event_type - GAUDI2_EVENT_HIF0_FATAL) / 4;
+ hif_id = (event_type - GAUDI2_EVENT_HIF0_FATAL) % 4;
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE ; i++) {
+ if (intr_cause_data & BIT_ULL(i))
+ dev_err_ratelimited(hdev->dev, "DCORE%u_HIF%u: %s\n", dcore_id, hif_id,
+ gaudi2_hif_fatal_interrupts_cause[i]);
+ }
+}
+
+static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu)
+{
+ u32 valid, val;
+ u64 addr;
+
+ valid = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID));
+
+ if (!(valid & DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_PAGE_ERR_VALID_ENTRY_MASK))
+ return;
+
+ val = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE));
+ addr = val & DCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA_63_32_MASK;
+ addr <<= 32;
+ addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA));
+
+ dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx\n",
+ is_pmmu ? "PMMU" : "HMMU", addr);
+
+ WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE), 0);
+}
+
+static void gaudi2_handle_access_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu)
+{
+ u32 valid, val;
+ u64 addr;
+
+ valid = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID));
+
+ if (!(valid & DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_ACCESS_ERR_VALID_ENTRY_MASK))
+ return;
+
+ val = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE));
+ addr = val & DCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA_63_32_MASK;
+ addr <<= 32;
+ addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA));
+
+ dev_err_ratelimited(hdev->dev, "%s access error on va 0x%llx\n",
+ is_pmmu ? "PMMU" : "HMMU", addr);
+ WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE), 0);
+}
+
+static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char *mmu_name,
+ u64 mmu_base, bool is_pmmu)
+{
+ u32 spi_sei_cause, interrupt_clr = 0x0;
+ int i;
+
+ spi_sei_cause = RREG32(mmu_base + MMU_SPI_SEI_CAUSE_OFFSET);
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE ; i++) {
+ if (spi_sei_cause & BIT(i)) {
+ dev_err_ratelimited(hdev->dev, "%s SPI_SEI ERR. err cause: %s\n",
+ mmu_name, gaudi2_mmu_spi_sei[i].cause);
+
+ if (i == 0)
+ gaudi2_handle_page_error(hdev, mmu_base, is_pmmu);
+ else if (i == 1)
+ gaudi2_handle_access_error(hdev, mmu_base, is_pmmu);
+
+ if (gaudi2_mmu_spi_sei[i].clear_bit >= 0)
+ interrupt_clr |= BIT(gaudi2_mmu_spi_sei[i].clear_bit);
+ }
+ }
+
+ /* Clear cause */
+ WREG32_AND(mmu_base + MMU_SPI_SEI_CAUSE_OFFSET, ~spi_sei_cause);
+
+ /* Clear interrupt */
+ WREG32(mmu_base + MMU_INTERRUPT_CLR_OFFSET, interrupt_clr);
+}
+
+static bool gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index)
+{
+ u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log;
+ u32 cq_intr_addr, cq_intr_val, cq_intr_queue_index;
+ bool reset = true;
+ int i;
+
+ sei_cause_addr = mmDCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE + DCORE_OFFSET * sm_index;
+ cq_intr_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_INTR + DCORE_OFFSET * sm_index;
+
+ sei_cause_val = RREG32(sei_cause_addr);
+ sei_cause_cause = FIELD_GET(DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_CAUSE_MASK, sei_cause_val);
+ cq_intr_val = RREG32(cq_intr_addr);
+
+ /* SEI interrupt */
+ if (sei_cause_cause) {
+ /* There are corresponding SEI_CAUSE_log bits for every SEI_CAUSE_cause bit */
+ sei_cause_log = FIELD_GET(DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_LOG_MASK,
+ sei_cause_val);
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE ; i++) {
+ if (!(sei_cause_cause & BIT(i)))
+ continue;
+
+ dev_err_ratelimited(hdev->dev, "SM%u SEI ERR. err cause: %s. %s: 0x%X\n",
+ sm_index,
+ gaudi2_sm_sei_cause[i].cause_name,
+ gaudi2_sm_sei_cause[i].log_name,
+ sei_cause_log & gaudi2_sm_sei_cause[i].log_mask);
+
+ /* Due to a potential H/W issue, do not reset upon BRESP errors */
+ if (i == 2)
+ reset = false;
+ break;
+ }
+
+ /* Clear SM_SEI_CAUSE */
+ WREG32(sei_cause_addr, 0);
+ }
+
+ /* CQ interrupt */
+ if (cq_intr_val & DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_SEC_INTR_MASK) {
+ cq_intr_queue_index =
+ FIELD_GET(DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_INTR_QUEUE_INDEX_MASK,
+ cq_intr_val);
+
+ dev_err_ratelimited(hdev->dev, "SM%u err. err cause: CQ_INTR. queue index: %u\n",
+ sm_index, cq_intr_queue_index);
+
+ /* Clear CQ_INTR */
+ WREG32(cq_intr_addr, 0);
+ }
+
+ return reset;
+}
+
+static void gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type)
+{
+ bool is_pmmu = false;
+ char desc[32];
+ u64 mmu_base;
+ u8 index;
+
+ switch (event_type) {
+ case GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM ... GAUDI2_EVENT_HMMU3_SECURITY_ERROR:
+ index = (event_type - GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM) / 3;
+ mmu_base = mmDCORE0_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE0_HMMU%d", index);
+ break;
+ case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_3_AXI_ERR_RSP:
+ index = (event_type - GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP);
+ mmu_base = mmDCORE0_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE0_HMMU%d", index);
+ break;
+ case GAUDI2_EVENT_HMMU8_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU11_SECURITY_ERROR:
+ index = (event_type - GAUDI2_EVENT_HMMU8_PAGE_FAULT_WR_PERM) / 3;
+ mmu_base = mmDCORE1_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE1_HMMU%d", index);
+ break;
+ case GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_11_AXI_ERR_RSP:
+ index = (event_type - GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP);
+ mmu_base = mmDCORE1_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE1_HMMU%d", index);
+ break;
+ case GAUDI2_EVENT_HMMU7_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU4_SECURITY_ERROR:
+ index = (event_type - GAUDI2_EVENT_HMMU7_PAGE_FAULT_WR_PERM) / 3;
+ mmu_base = mmDCORE2_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE2_HMMU%d", index);
+ break;
+ case GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_4_AXI_ERR_RSP:
+ index = (event_type - GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP);
+ mmu_base = mmDCORE2_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE2_HMMU%d", index);
+ break;
+ case GAUDI2_EVENT_HMMU15_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
+ index = (event_type - GAUDI2_EVENT_HMMU15_PAGE_FAULT_WR_PERM) / 3;
+ mmu_base = mmDCORE3_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE3_HMMU%d", index);
+ break;
+ case GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
+ index = (event_type - GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP);
+ mmu_base = mmDCORE3_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DCORE3_HMMU%d", index);
+ break;
+ case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR:
+ case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
+ is_pmmu = true;
+ mmu_base = mmPMMU_HBW_MMU_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "PMMU");
+ break;
+ default:
+ return;
+ }
+
+ gaudi2_handle_mmu_spi_sei_generic(hdev, desc, mmu_base, is_pmmu);
+}
+
+
+/* returns true if hard reset is required (ECC DERR or Read parity), false otherwise (ECC SERR) */
+static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
+ struct hl_eq_hbm_sei_read_err_intr_info *rd_err_data, u32 err_cnt)
+{
+ u32 addr, beat, beat_shift;
+ bool rc = false;
+
+ dev_err_ratelimited(hdev->dev,
+ "READ ERROR count: ECC SERR: %d, ECC DERR: %d, RD_PARITY: %d\n",
+ FIELD_GET(HBM_ECC_SERR_CNTR_MASK, err_cnt),
+ FIELD_GET(HBM_ECC_DERR_CNTR_MASK, err_cnt),
+ FIELD_GET(HBM_RD_PARITY_CNTR_MASK, err_cnt));
+
+ addr = le32_to_cpu(rd_err_data->dbg_rd_err_addr.rd_addr_val);
+ dev_err_ratelimited(hdev->dev,
+ "READ ERROR address: sid(%u), bg(%u), ba(%u), col(%u), row(%u)\n",
+ FIELD_GET(HBM_RD_ADDR_SID_MASK, addr),
+ FIELD_GET(HBM_RD_ADDR_BG_MASK, addr),
+ FIELD_GET(HBM_RD_ADDR_BA_MASK, addr),
+ FIELD_GET(HBM_RD_ADDR_COL_MASK, addr),
+ FIELD_GET(HBM_RD_ADDR_ROW_MASK, addr));
+
+ /* For each beat (RDQS edge), look for possible errors and print relevant info */
+ for (beat = 0 ; beat < 4 ; beat++) {
+ if (le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
+ (HBM_RD_ERR_SERR_BEAT0_MASK << beat))
+ dev_err_ratelimited(hdev->dev, "Beat%d ECC SERR: DM: %#x, Syndrome: %#x\n",
+ beat,
+ le32_to_cpu(rd_err_data->dbg_rd_err_dm),
+ le32_to_cpu(rd_err_data->dbg_rd_err_syndrome));
+
+ if (le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
+ (HBM_RD_ERR_DERR_BEAT0_MASK << beat)) {
+ dev_err_ratelimited(hdev->dev, "Beat%d ECC DERR: DM: %#x, Syndrome: %#x\n",
+ beat,
+ le32_to_cpu(rd_err_data->dbg_rd_err_dm),
+ le32_to_cpu(rd_err_data->dbg_rd_err_syndrome));
+ rc |= true;
+ }
+
+ beat_shift = beat * HBM_RD_ERR_BEAT_SHIFT;
+ if (le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
+ (HBM_RD_ERR_PAR_ERR_BEAT0_MASK << beat_shift)) {
+ dev_err_ratelimited(hdev->dev,
+ "Beat%d read PARITY: DM: %#x, PAR data: %#x\n",
+ beat,
+ le32_to_cpu(rd_err_data->dbg_rd_err_dm),
+ (le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
+ (HBM_RD_ERR_PAR_DATA_BEAT0_MASK << beat_shift)) >>
+ (HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT + beat_shift));
+ rc |= true;
+ }
+
+ dev_err_ratelimited(hdev->dev, "Beat%d DQ data:\n", beat);
+ dev_err_ratelimited(hdev->dev, "\t0x%08x\n",
+ le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2]));
+ dev_err_ratelimited(hdev->dev, "\t0x%08x\n",
+ le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2 + 1]));
+ }
+
+ return rc;
+}
+
+static void gaudi2_hbm_sei_print_wr_par_info(struct hl_device *hdev,
+ struct hl_eq_hbm_sei_wr_par_intr_info *wr_par_err_data, u32 err_cnt)
+{
+ struct hbm_sei_wr_cmd_address *wr_cmd_addr = wr_par_err_data->dbg_last_wr_cmds;
+ u32 i, curr_addr, derr = wr_par_err_data->dbg_derr;
+
+ dev_err_ratelimited(hdev->dev, "WRITE PARITY ERROR count: %d\n", err_cnt);
+
+ dev_err_ratelimited(hdev->dev, "CK-0 DERR: 0x%02x, CK-1 DERR: 0x%02x\n",
+ derr & 0x3, derr & 0xc);
+
+ /* JIRA H6-3286 - the following prints may not be valid */
+ dev_err_ratelimited(hdev->dev, "Last latched write commands addresses:\n");
+ for (i = 0 ; i < HBM_WR_PAR_CMD_LIFO_LEN ; i++) {
+ curr_addr = le32_to_cpu(wr_cmd_addr[i].dbg_wr_cmd_addr);
+ dev_err_ratelimited(hdev->dev,
+ "\twrite cmd[%u]: Address: SID(%u) BG(%u) BA(%u) COL(%u).\n",
+ i,
+ FIELD_GET(WR_PAR_LAST_CMD_SID_MASK, curr_addr),
+ FIELD_GET(WR_PAR_LAST_CMD_BG_MASK, curr_addr),
+ FIELD_GET(WR_PAR_LAST_CMD_BA_MASK, curr_addr),
+ FIELD_GET(WR_PAR_LAST_CMD_COL_MASK, curr_addr));
+ }
+}
+
+static void gaudi2_hbm_sei_print_ca_par_info(struct hl_device *hdev,
+ struct hl_eq_hbm_sei_ca_par_intr_info *ca_par_err_data, u32 err_cnt)
+{
+ __le32 *col_cmd = ca_par_err_data->dbg_col;
+ __le16 *row_cmd = ca_par_err_data->dbg_row;
+ u32 i;
+
+ dev_err_ratelimited(hdev->dev, "CA ERROR count: %d\n", err_cnt);
+
+ dev_err_ratelimited(hdev->dev, "Last latched C&R bus commands:\n");
+ for (i = 0 ; i < HBM_CA_ERR_CMD_LIFO_LEN ; i++)
+ dev_err_ratelimited(hdev->dev, "cmd%u: ROW(0x%04x) COL(0x%05x)\n", i,
+ le16_to_cpu(row_cmd[i]) & (u16)GENMASK(13, 0),
+ le32_to_cpu(col_cmd[i]) & (u32)GENMASK(17, 0));
+}
+
+/* Returns true if hard reset is needed or false otherwise */
+static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type,
+ struct hl_eq_hbm_sei_data *sei_data)
+{
+ bool require_hard_reset = false;
+ u32 hbm_id, mc_id, cause_idx;
+
+ hbm_id = (event_type - GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE) / 4;
+ mc_id = ((event_type - GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE) / 2) % 2;
+
+ cause_idx = sei_data->hdr.sei_cause;
+ if (cause_idx > GAUDI2_NUM_OF_HBM_SEI_CAUSE - 1) {
+ dev_err_ratelimited(hdev->dev, "Invalid HBM SEI event cause (%d) provided by FW\n",
+ cause_idx);
+ return true;
+ }
+
+ dev_err_ratelimited(hdev->dev,
+ "System Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Critical(%u). Error cause: %s\n",
+ hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
+ sei_data->hdr.is_critical, hbm_mc_sei_cause[cause_idx]);
+
+ /* Print error-specific info */
+ switch (cause_idx) {
+ case HBM_SEI_CATTRIP:
+ require_hard_reset = true;
+ break;
+
+ case HBM_SEI_CMD_PARITY_EVEN:
+ gaudi2_hbm_sei_print_ca_par_info(hdev, &sei_data->ca_parity_even_info,
+ le32_to_cpu(sei_data->hdr.cnt));
+ require_hard_reset = true;
+ break;
+
+ case HBM_SEI_CMD_PARITY_ODD:
+ gaudi2_hbm_sei_print_ca_par_info(hdev, &sei_data->ca_parity_odd_info,
+ le32_to_cpu(sei_data->hdr.cnt));
+ require_hard_reset = true;
+ break;
+
+ case HBM_SEI_WRITE_DATA_PARITY_ERR:
+ gaudi2_hbm_sei_print_wr_par_info(hdev, &sei_data->wr_parity_info,
+ le32_to_cpu(sei_data->hdr.cnt));
+ require_hard_reset = true;
+ break;
+
+ case HBM_SEI_READ_ERR:
+ /* Unlike other SEI events, read error requires further processing of the
+ * raw data in order to determine the root cause.
+ */
+ require_hard_reset = gaudi2_hbm_sei_handle_read_err(hdev,
+ &sei_data->read_err_info,
+ le32_to_cpu(sei_data->hdr.cnt));
+ break;
+
+ default:
+ break;
+ }
+
+ require_hard_reset |= !!sei_data->hdr.is_critical;
+
+ return require_hard_reset;
+}
+
+static void gaudi2_handle_hbm_cattrip(struct hl_device *hdev, u64 intr_cause_data)
+{
+ dev_err(hdev->dev,
+ "HBM catastrophic temperature error (CATTRIP) cause %#llx\n",
+ intr_cause_data);
+}
+
+static void gaudi2_handle_hbm_mc_spi(struct hl_device *hdev, u64 intr_cause_data)
+{
+ u32 i;
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE ; i++)
+ if (intr_cause_data & hbm_mc_spi[i].mask)
+ dev_dbg(hdev->dev, "HBM spi event: notification cause(%s)\n",
+ hbm_mc_spi[i].cause);
+}
+
+static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type)
+{
+ ktime_t zero_time = ktime_set(0, 0);
+
+ mutex_lock(&hdev->clk_throttling.lock);
+
+ switch (event_type) {
+ case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S:
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
+ dev_info_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
+ break;
+
+ case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E:
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
+ dev_info_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
+ break;
+
+ case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
+ dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");
+ break;
+
+ case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
+ dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type);
+ break;
+ }
+
+ mutex_unlock(&hdev->clk_throttling.lock);
+}
+
+static void gaudi2_print_out_of_sync_info(struct hl_device *hdev,
+ struct cpucp_pkt_sync_err *sync_err)
+{
+ struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
+
+ dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
+ sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
+}
+
+static void gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev)
+{
+ u32 p2p_intr, msix_gw_intr;
+
+ p2p_intr = RREG32(mmPCIE_WRAP_P2P_INTR);
+ msix_gw_intr = RREG32(mmPCIE_WRAP_MSIX_GW_INTR);
+
+ if (p2p_intr) {
+ dev_err_ratelimited(hdev->dev,
+ "pcie p2p transaction terminated due to security, req_id(0x%x)\n",
+ RREG32(mmPCIE_WRAP_P2P_REQ_ID));
+
+ WREG32(mmPCIE_WRAP_P2P_INTR, 0x1);
+ }
+
+ if (msix_gw_intr) {
+ dev_err_ratelimited(hdev->dev,
+ "pcie msi-x gen denied due to vector num check failure, vec(0x%X)\n",
+ RREG32(mmPCIE_WRAP_MSIX_GW_VEC));
+
+ WREG32(mmPCIE_WRAP_MSIX_GW_INTR, 0x1);
+ }
+}
+
+static void gaudi2_handle_pcie_drain(struct hl_device *hdev,
+ struct hl_eq_pcie_drain_ind_data *drain_data)
+{
+ u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause;
+
+ cause = le64_to_cpu(drain_data->intr_cause.intr_cause_data);
+ lbw_rd = le64_to_cpu(drain_data->drain_rd_addr_lbw);
+ lbw_wr = le64_to_cpu(drain_data->drain_wr_addr_lbw);
+ hbw_rd = le64_to_cpu(drain_data->drain_rd_addr_hbw);
+ hbw_wr = le64_to_cpu(drain_data->drain_wr_addr_hbw);
+
+ if (cause & BIT_ULL(0))
+ dev_err_ratelimited(hdev->dev,
+ "PCIE AXI drain LBW completed, read_err %u, write_err %u\n",
+ !!lbw_rd, !!lbw_wr);
+
+ if (cause & BIT_ULL(1))
+ dev_err_ratelimited(hdev->dev,
+ "PCIE AXI drain HBW completed, raddr %#llx, waddr %#llx\n",
+ hbw_rd, hbw_wr);
+}
+
+static void gaudi2_handle_psoc_drain(struct hl_device *hdev, u64 intr_cause_data)
+{
+ int i;
+
+ for (i = 0 ; i < GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE ; i++) {
+ if (intr_cause_data & BIT_ULL(i))
+ dev_err_ratelimited(hdev->dev, "PSOC %s completed\n",
+ gaudi2_psoc_axi_drain_interrupts_cause[i]);
+ }
+}
+
+static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev,
+ struct cpucp_pkt_sync_err *sync_err)
+{
+ struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
+
+ dev_warn(hdev->dev,
+ "FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
+ sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
+}
+
+static void hl_arc_event_handle(struct hl_device *hdev,
+ struct hl_eq_engine_arc_intr_data *data)
+{
+ struct hl_engine_arc_dccm_queue_full_irq *q;
+ u32 intr_type, engine_id;
+ u64 payload;
+
+ intr_type = le32_to_cpu(data->intr_type);
+ engine_id = le32_to_cpu(data->engine_id);
+ payload = le64_to_cpu(data->payload);
+
+ switch (intr_type) {
+ case ENGINE_ARC_DCCM_QUEUE_FULL_IRQ:
+ q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
+
+ dev_err_ratelimited(hdev->dev,
+ "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u\n",
+ engine_id, intr_type, q->queue_index);
+ break;
+ default:
+ dev_err_ratelimited(hdev->dev, "Unknown ARC event type\n");
+ }
+}
+
+static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
+{
+ u32 ctl, reset_flags = HL_DRV_RESET_HARD | HL_DRV_RESET_DELAY;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ bool reset_required = false, skip_reset = false;
+ int index, sbte_index;
+ u16 event_type;
+
+ ctl = le32_to_cpu(eq_entry->hdr.ctl);
+ event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) >> EQ_CTL_EVENT_TYPE_SHIFT);
+
+ if (event_type >= GAUDI2_EVENT_SIZE) {
+ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
+ event_type, GAUDI2_EVENT_SIZE - 1);
+ return;
+ }
+
+ gaudi2->events_stat[event_type]++;
+ gaudi2->events_stat_aggregate[event_type]++;
+
+ gaudi2_print_irq_info(hdev, event_type);
+
+ switch (event_type) {
+ case GAUDI2_EVENT_PCIE_CORE_SERR ... GAUDI2_EVENT_ARC0_ECC_DERR:
+ fallthrough;
+ case GAUDI2_EVENT_ROTATOR0_SERR ... GAUDI2_EVENT_ROTATOR1_DERR:
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ reset_required = gaudi2_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ break;
+
+ case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_PDMA1_QM:
+ fallthrough;
+ case GAUDI2_EVENT_ROTATOR0_ROT0_QM ... GAUDI2_EVENT_ROTATOR1_ROT1_QM:
+ fallthrough;
+ case GAUDI2_EVENT_NIC0_QM0 ... GAUDI2_EVENT_NIC11_QM1:
+ gaudi2_handle_qman_err(hdev, event_type);
+ break;
+
+ case GAUDI2_EVENT_ARC_AXI_ERROR_RESPONSE_0:
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ gaudi2_handle_arc_farm_sei_err(hdev);
+ break;
+
+ case GAUDI2_EVENT_CPU_AXI_ERR_RSP:
+ gaudi2_handle_cpu_sei_err(hdev);
+ break;
+
+ case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP:
+ case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP:
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ gaudi2_handle_qm_sei_err(hdev, event_type, &eq_entry->razwi_info);
+ break;
+
+ case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE:
+ case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE:
+ index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE;
+ gaudi2_handle_rot_err(hdev, index, &eq_entry->razwi_with_intr_cause);
+ gaudi2_handle_qm_sei_err(hdev, event_type, NULL);
+ break;
+
+ case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP:
+ index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP;
+ gaudi2_tpc_ack_interrupts(hdev, index, "AXI_ERR_RSP",
+ &eq_entry->razwi_with_intr_cause);
+ gaudi2_handle_qm_sei_err(hdev, event_type, NULL);
+ break;
+
+ case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE:
+ index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE;
+ gaudi2_handle_dec_err(hdev, index, "AXI_ERR_RESPONSE", &eq_entry->razwi_info);
+ break;
+
+ case GAUDI2_EVENT_TPC0_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC1_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC2_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC3_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC4_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC5_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC6_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC7_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC8_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC9_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC10_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC11_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC12_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC13_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC14_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC15_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC16_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC17_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC18_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC19_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC20_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC21_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC22_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC23_KERNEL_ERR:
+ case GAUDI2_EVENT_TPC24_KERNEL_ERR:
+ index = (event_type - GAUDI2_EVENT_TPC0_KERNEL_ERR) /
+ (GAUDI2_EVENT_TPC1_KERNEL_ERR - GAUDI2_EVENT_TPC0_KERNEL_ERR);
+ gaudi2_tpc_ack_interrupts(hdev, index, "KRN_ERR", &eq_entry->razwi_with_intr_cause);
+ break;
+
+ case GAUDI2_EVENT_DEC0_SPI:
+ case GAUDI2_EVENT_DEC1_SPI:
+ case GAUDI2_EVENT_DEC2_SPI:
+ case GAUDI2_EVENT_DEC3_SPI:
+ case GAUDI2_EVENT_DEC4_SPI:
+ case GAUDI2_EVENT_DEC5_SPI:
+ case GAUDI2_EVENT_DEC6_SPI:
+ case GAUDI2_EVENT_DEC7_SPI:
+ case GAUDI2_EVENT_DEC8_SPI:
+ case GAUDI2_EVENT_DEC9_SPI:
+ index = (event_type - GAUDI2_EVENT_DEC0_SPI) /
+ (GAUDI2_EVENT_DEC1_SPI - GAUDI2_EVENT_DEC0_SPI);
+ gaudi2_handle_dec_err(hdev, index, "SPI", &eq_entry->razwi_info);
+ break;
+
+ case GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE:
+ case GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE:
+ case GAUDI2_EVENT_MME2_CTRL_AXI_ERROR_RESPONSE:
+ case GAUDI2_EVENT_MME3_CTRL_AXI_ERROR_RESPONSE:
+ index = (event_type - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE) /
+ (GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE -
+ GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE);
+ gaudi2_handle_mme_err(hdev, index,
+ "CTRL_AXI_ERROR_RESPONSE", &eq_entry->razwi_info);
+ gaudi2_handle_qm_sei_err(hdev, event_type, NULL);
+ break;
+
+ case GAUDI2_EVENT_MME0_QMAN_SW_ERROR:
+ case GAUDI2_EVENT_MME1_QMAN_SW_ERROR:
+ case GAUDI2_EVENT_MME2_QMAN_SW_ERROR:
+ case GAUDI2_EVENT_MME3_QMAN_SW_ERROR:
+ index = (event_type - GAUDI2_EVENT_MME0_QMAN_SW_ERROR) /
+ (GAUDI2_EVENT_MME1_QMAN_SW_ERROR -
+ GAUDI2_EVENT_MME0_QMAN_SW_ERROR);
+ gaudi2_handle_mme_err(hdev, index, "QMAN_SW_ERROR", &eq_entry->razwi_info);
+ break;
+
+ case GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID:
+ case GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID:
+ case GAUDI2_EVENT_MME2_WAP_SOURCE_RESULT_INVALID:
+ case GAUDI2_EVENT_MME3_WAP_SOURCE_RESULT_INVALID:
+ index = (event_type - GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID) /
+ (GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID -
+ GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID);
+ gaudi2_handle_mme_wap_err(hdev, index, &eq_entry->razwi_info);
+ break;
+
+ case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP:
+ case GAUDI2_EVENT_KDMA0_CORE:
+ gaudi2_handle_kdma_core_event(hdev,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ break;
+
+ case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_PDMA1_CORE:
+ gaudi2_handle_dma_core_event(hdev,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ break;
+
+ case GAUDI2_EVENT_PCIE_ADDR_DEC_ERR:
+ gaudi2_print_pcie_addr_dec_info(hdev,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ break;
+
+ case GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
+ case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
+ case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR:
+ case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
+ gaudi2_handle_mmu_spi_sei_err(hdev, event_type);
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ break;
+
+ case GAUDI2_EVENT_HIF0_FATAL ... GAUDI2_EVENT_HIF12_FATAL:
+ gaudi2_handle_hif_fatal(hdev, event_type,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ break;
+
+ case GAUDI2_EVENT_PMMU_FATAL_0:
+ gaudi2_handle_pif_fatal(hdev,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ break;
+
+ case GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT:
+ gaudi2_ack_psoc_razwi_event_handler(hdev);
+ break;
+
+ case GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE ... GAUDI2_EVENT_HBM5_MC1_SEI_NON_SEVERE:
+ if (gaudi2_handle_hbm_mc_sei_err(hdev, event_type, &eq_entry->sei_data)) {
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ reset_required = true;
+ }
+ break;
+
+ case GAUDI2_EVENT_HBM_CATTRIP_0 ... GAUDI2_EVENT_HBM_CATTRIP_5:
+ gaudi2_handle_hbm_cattrip(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ break;
+
+ case GAUDI2_EVENT_HBM0_MC0_SPI ... GAUDI2_EVENT_HBM5_MC1_SPI:
+ gaudi2_handle_hbm_mc_spi(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ break;
+
+ case GAUDI2_EVENT_PCIE_DRAIN_COMPLETE:
+ gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
+ break;
+
+ case GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN:
+ gaudi2_handle_psoc_drain(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ break;
+
+ case GAUDI2_EVENT_CPU_AXI_ECC:
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ break;
+ case GAUDI2_EVENT_CPU_L2_RAM_ECC:
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ break;
+ case GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME0_SBTE4_AXI_ERR_RSP:
+ case GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME1_SBTE4_AXI_ERR_RSP:
+ case GAUDI2_EVENT_MME2_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME2_SBTE4_AXI_ERR_RSP:
+ case GAUDI2_EVENT_MME3_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME3_SBTE4_AXI_ERR_RSP:
+ index = (event_type - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP) /
+ (GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP -
+ GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP);
+ sbte_index = (event_type - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP) %
+ (GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP -
+ GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP);
+ gaudi2_handle_mme_sbte_err(hdev, index, sbte_index,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ break;
+ case GAUDI2_EVENT_VM0_ALARM_A ... GAUDI2_EVENT_VM3_ALARM_B:
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ break;
+ case GAUDI2_EVENT_PSOC_AXI_ERR_RSP:
+ case GAUDI2_EVENT_PSOC_PRSTN_FALL:
+ break;
+ case GAUDI2_EVENT_PCIE_APB_TIMEOUT:
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ break;
+ case GAUDI2_EVENT_PCIE_FATAL_ERR:
+ break;
+ case GAUDI2_EVENT_TPC0_BMON_SPMU:
+ case GAUDI2_EVENT_TPC1_BMON_SPMU:
+ case GAUDI2_EVENT_TPC2_BMON_SPMU:
+ case GAUDI2_EVENT_TPC3_BMON_SPMU:
+ case GAUDI2_EVENT_TPC4_BMON_SPMU:
+ case GAUDI2_EVENT_TPC5_BMON_SPMU:
+ case GAUDI2_EVENT_TPC6_BMON_SPMU:
+ case GAUDI2_EVENT_TPC7_BMON_SPMU:
+ case GAUDI2_EVENT_TPC8_BMON_SPMU:
+ case GAUDI2_EVENT_TPC9_BMON_SPMU:
+ case GAUDI2_EVENT_TPC10_BMON_SPMU:
+ case GAUDI2_EVENT_TPC11_BMON_SPMU:
+ case GAUDI2_EVENT_TPC12_BMON_SPMU:
+ case GAUDI2_EVENT_TPC13_BMON_SPMU:
+ case GAUDI2_EVENT_TPC14_BMON_SPMU:
+ case GAUDI2_EVENT_TPC15_BMON_SPMU:
+ case GAUDI2_EVENT_TPC16_BMON_SPMU:
+ case GAUDI2_EVENT_TPC17_BMON_SPMU:
+ case GAUDI2_EVENT_TPC18_BMON_SPMU:
+ case GAUDI2_EVENT_TPC19_BMON_SPMU:
+ case GAUDI2_EVENT_TPC20_BMON_SPMU:
+ case GAUDI2_EVENT_TPC21_BMON_SPMU:
+ case GAUDI2_EVENT_TPC22_BMON_SPMU:
+ case GAUDI2_EVENT_TPC23_BMON_SPMU:
+ case GAUDI2_EVENT_TPC24_BMON_SPMU:
+ case GAUDI2_EVENT_MME0_CTRL_BMON_SPMU:
+ case GAUDI2_EVENT_MME0_SBTE_BMON_SPMU:
+ case GAUDI2_EVENT_MME0_WAP_BMON_SPMU:
+ case GAUDI2_EVENT_MME1_CTRL_BMON_SPMU:
+ case GAUDI2_EVENT_MME1_SBTE_BMON_SPMU:
+ case GAUDI2_EVENT_MME1_WAP_BMON_SPMU:
+ case GAUDI2_EVENT_MME2_CTRL_BMON_SPMU:
+ case GAUDI2_EVENT_MME2_SBTE_BMON_SPMU:
+ case GAUDI2_EVENT_MME2_WAP_BMON_SPMU:
+ case GAUDI2_EVENT_MME3_CTRL_BMON_SPMU:
+ case GAUDI2_EVENT_MME3_SBTE_BMON_SPMU:
+ case GAUDI2_EVENT_MME3_WAP_BMON_SPMU:
+ case GAUDI2_EVENT_HDMA2_BM_SPMU ... GAUDI2_EVENT_PDMA1_BM_SPMU:
+ fallthrough;
+ case GAUDI2_EVENT_DEC0_BMON_SPMU:
+ case GAUDI2_EVENT_DEC1_BMON_SPMU:
+ case GAUDI2_EVENT_DEC2_BMON_SPMU:
+ case GAUDI2_EVENT_DEC3_BMON_SPMU:
+ case GAUDI2_EVENT_DEC4_BMON_SPMU:
+ case GAUDI2_EVENT_DEC5_BMON_SPMU:
+ case GAUDI2_EVENT_DEC6_BMON_SPMU:
+ case GAUDI2_EVENT_DEC7_BMON_SPMU:
+ case GAUDI2_EVENT_DEC8_BMON_SPMU:
+ case GAUDI2_EVENT_DEC9_BMON_SPMU:
+ case GAUDI2_EVENT_ROTATOR0_BMON_SPMU ... GAUDI2_EVENT_SM3_BMON_SPMU:
+ break;
+
+ case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S:
+ case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E:
+ case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
+ case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
+ gaudi2_print_clk_change_info(hdev, event_type);
+ break;
+
+ case GAUDI2_EVENT_CPU_PKT_QUEUE_OUT_SYNC:
+ gaudi2_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+ break;
+
+ case GAUDI2_EVENT_PCIE_FLR_REQUESTED:
+ /* Do nothing- FW will handle it */
+ break;
+
+ case GAUDI2_EVENT_PCIE_P2P_MSIX:
+ gaudi2_handle_pcie_p2p_msix(hdev);
+ break;
+
+ case GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE:
+ index = event_type - GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE;
+ skip_reset = !gaudi2_handle_sm_err(hdev, index);
+ break;
+
+ case GAUDI2_EVENT_PSOC_MME_PLL_LOCK_ERR ... GAUDI2_EVENT_DCORE2_HBM_PLL_LOCK_ERR:
+ break;
+
+ case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
+ dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n",
+ le64_to_cpu(eq_entry->data[0]));
+ break;
+ case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_EVENT:
+ dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
+ le64_to_cpu(eq_entry->data[0]));
+ break;
+
+ case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
+ gaudi2_print_cpu_pkt_failure_info(hdev, &eq_entry->pkt_sync_err);
+ break;
+
+ case GAUDI2_EVENT_ARC_DCCM_FULL:
+ hl_arc_event_handle(hdev, &eq_entry->arc_data);
+ break;
+
+ default:
+ if (gaudi2_irq_map_table[event_type].valid)
+ dev_err_ratelimited(hdev->dev, "Cannot find handler for event %d\n",
+ event_type);
+ }
+
+ if ((gaudi2_irq_map_table[event_type].reset || reset_required) && !skip_reset)
+ goto reset_device;
+
+ /* Send unmask irq only for interrupts not classified as MSG */
+ if (!gaudi2_irq_map_table[event_type].msg)
+ hl_fw_unmask_irq(hdev, event_type);
+
+ return;
+
+reset_device:
+ if (hdev->hard_reset_on_fw_events) {
+ hl_device_reset(hdev, reset_flags);
+ } else {
+ if (!gaudi2_irq_map_table[event_type].msg)
+ hl_fw_unmask_irq(hdev, event_type);
+ }
+}
+
+static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, u64 val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 comp_addr, cur_addr = addr, end_addr = addr + size;
+ u32 chunk_size, busy, dcore, edma_idx, sob_offset, sob_addr, comp_val, edma_commit;
+ u32 old_mmubp, mmubp;
+ int rc = 0;
+
+ sob_offset = hdev->asic_prop.first_available_user_sob[0] * 4;
+ sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset;
+ comp_addr = CFG_BASE + sob_addr;
+ comp_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1) |
+ FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1);
+
+ edma_commit = FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_LIN_MASK, 1) |
+ FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_MEM_SET_MASK, 1) |
+ FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_WR_COMP_EN_MASK, 1);
+ mmubp = FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_MASK, 1) |
+ FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_MASK, 1);
+
+ if (prop->edma_enabled_mask == 0) {
+ dev_info(hdev->dev, "non of the EDMA engines is enabled - skip dram scrubbing\n");
+ return -EIO;
+ }
+
+ /*
+ * set mmu bypass for the scrubbing - all ddmas are configured the same so save
+ * only the first one to restore later
+ */
+ old_mmubp = RREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP);
+ for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
+ for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) {
+ u32 edma_offset = dcore * DCORE_OFFSET + edma_idx * DCORE_EDMA_OFFSET;
+ u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx;
+
+ if (!(prop->edma_enabled_mask & BIT(edma_bit)))
+ continue;
+
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP +
+ edma_offset, mmubp);
+ }
+ }
+
+ while (cur_addr < end_addr) {
+ int dma_num = 0;
+
+ WREG32(sob_addr, 0);
+ for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
+ for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) {
+ u32 edma_offset = dcore * DCORE_OFFSET +
+ edma_idx * DCORE_EDMA_OFFSET;
+ u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx;
+
+ if (!(prop->edma_enabled_mask & BIT(edma_bit)))
+ continue;
+
+ chunk_size = min_t(u64, SZ_2G, end_addr - cur_addr);
+
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_LO + edma_offset,
+ lower_32_bits(val));
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_HI + edma_offset,
+ upper_32_bits(val));
+
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_DST_BASE_LO + edma_offset,
+ lower_32_bits(cur_addr));
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_DST_BASE_HI + edma_offset,
+ upper_32_bits(cur_addr));
+
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO + edma_offset,
+ lower_32_bits(comp_addr));
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI + edma_offset,
+ upper_32_bits(comp_addr));
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA + edma_offset,
+ comp_val);
+
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_0 + edma_offset,
+ chunk_size);
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_COMMIT + edma_offset, edma_commit);
+
+ dma_num++;
+
+ cur_addr += chunk_size;
+
+ if (cur_addr == end_addr)
+ goto poll;
+ }
+ }
+poll:
+ rc = hl_poll_timeout(hdev, sob_addr, busy, (busy == dma_num), 1000, 1000000);
+ if (rc) {
+ dev_err(hdev->dev, "DMA Timeout during HBM scrubbing\n");
+ goto end;
+ }
+ }
+end:
+ for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
+ for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) {
+ u32 edma_offset = dcore * DCORE_OFFSET + edma_idx * DCORE_EDMA_OFFSET;
+ u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx;
+
+ if (!(prop->edma_enabled_mask & BIT(edma_bit)))
+ continue;
+
+ WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + edma_offset, old_mmubp);
+ }
+ }
+
+ WREG32(sob_addr, 0);
+ return rc;
+}
+
+static int gaudi2_scrub_device_dram(struct hl_device *hdev, u64 val)
+{
+ int rc;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 size = prop->dram_end_address - prop->dram_user_base_address;
+
+ rc = gaudi2_memset_device_memory(hdev, prop->dram_user_base_address, size, val);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to scrub dram, address: 0x%llx size: %llu\n",
+ prop->dram_user_base_address, size);
+ return rc;
+}
+
+static int gaudi2_scrub_device_mem(struct hl_device *hdev)
+{
+ int rc;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 val = hdev->memory_scrub_val;
+ u64 addr, size;
+
+ if (!hdev->memory_scrub)
+ return 0;
+
+ /* scrub SRAM */
+ addr = prop->sram_user_base_address;
+ size = hdev->pldm ? 0x10000 : (prop->sram_size - SRAM_USER_BASE_OFFSET);
+ dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx, val: 0x%llx\n",
+ addr, addr + size, val);
+ rc = gaudi2_memset_device_memory(hdev, addr, size, val);
+ if (rc) {
+ dev_err(hdev->dev, "scrubbing SRAM failed (%d)\n", rc);
+ return rc;
+ }
+
+ /* scrub DRAM */
+ rc = gaudi2_scrub_device_dram(hdev, val);
+ if (rc) {
+ dev_err(hdev->dev, "scrubbing DRAM failed (%d)\n", rc);
+ return rc;
+ }
+ return 0;
+}
+
+static void gaudi2_restore_user_sm_registers(struct hl_device *hdev)
+{
+ u64 addr, mon_sts_addr, mon_cfg_addr, cq_lbw_l_addr, cq_lbw_h_addr,
+ cq_lbw_data_addr, cq_base_l_addr, cq_base_h_addr, cq_size_addr;
+ u32 val, size, offset;
+ int dcore_id;
+
+ offset = hdev->asic_prop.first_available_cq[0] * 4;
+ cq_lbw_l_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + offset;
+ cq_lbw_h_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 + offset;
+ cq_lbw_data_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0 + offset;
+ cq_base_l_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 + offset;
+ cq_base_h_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 + offset;
+ cq_size_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 + offset;
+ size = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 -
+ (mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + offset);
+
+ /* memset dcore0 CQ registers */
+ gaudi2_memset_device_lbw(hdev, cq_lbw_l_addr, size, 0);
+ gaudi2_memset_device_lbw(hdev, cq_lbw_h_addr, size, 0);
+ gaudi2_memset_device_lbw(hdev, cq_lbw_data_addr, size, 0);
+ gaudi2_memset_device_lbw(hdev, cq_base_l_addr, size, 0);
+ gaudi2_memset_device_lbw(hdev, cq_base_h_addr, size, 0);
+
+ cq_lbw_l_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + DCORE_OFFSET;
+ cq_lbw_h_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 + DCORE_OFFSET;
+ cq_lbw_data_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0 + DCORE_OFFSET;
+ cq_base_l_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 + DCORE_OFFSET;
+ cq_base_h_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 + DCORE_OFFSET;
+ cq_size_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 + DCORE_OFFSET;
+ size = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 - mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0;
+
+ for (dcore_id = 1 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
+ gaudi2_memset_device_lbw(hdev, cq_lbw_l_addr, size, 0);
+ gaudi2_memset_device_lbw(hdev, cq_lbw_h_addr, size, 0);
+ gaudi2_memset_device_lbw(hdev, cq_lbw_data_addr, size, 0);
+ gaudi2_memset_device_lbw(hdev, cq_base_l_addr, size, 0);
+ gaudi2_memset_device_lbw(hdev, cq_base_h_addr, size, 0);
+ gaudi2_memset_device_lbw(hdev, cq_size_addr, size, 0);
+
+ cq_lbw_l_addr += DCORE_OFFSET;
+ cq_lbw_h_addr += DCORE_OFFSET;
+ cq_lbw_data_addr += DCORE_OFFSET;
+ cq_base_l_addr += DCORE_OFFSET;
+ cq_base_h_addr += DCORE_OFFSET;
+ cq_size_addr += DCORE_OFFSET;
+ }
+
+ offset = hdev->asic_prop.first_available_user_mon[0] * 4;
+ addr = mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 + offset;
+ val = 1 << DCORE0_SYNC_MNGR_OBJS_MON_STATUS_PROT_SHIFT;
+ size = mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - (mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 + offset);
+
+ /* memset dcore0 monitors */
+ gaudi2_memset_device_lbw(hdev, addr, size, val);
+
+ addr = mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + offset;
+ gaudi2_memset_device_lbw(hdev, addr, size, 0);
+
+ mon_sts_addr = mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 + DCORE_OFFSET;
+ mon_cfg_addr = mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + DCORE_OFFSET;
+ size = mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0;
+
+ for (dcore_id = 1 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
+ gaudi2_memset_device_lbw(hdev, mon_sts_addr, size, val);
+ gaudi2_memset_device_lbw(hdev, mon_cfg_addr, size, 0);
+ mon_sts_addr += DCORE_OFFSET;
+ mon_cfg_addr += DCORE_OFFSET;
+ }
+
+ offset = hdev->asic_prop.first_available_user_sob[0] * 4;
+ addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + offset;
+ val = 0;
+ size = mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 -
+ (mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + offset);
+
+ /* memset dcore0 sobs */
+ gaudi2_memset_device_lbw(hdev, addr, size, val);
+
+ addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + DCORE_OFFSET;
+ size = mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 - mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0;
+
+ for (dcore_id = 1 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
+ gaudi2_memset_device_lbw(hdev, addr, size, val);
+ addr += DCORE_OFFSET;
+ }
+
+ /* Flush all WREG to prevent race */
+ val = RREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + offset);
+}
+
+static void gaudi2_restore_user_qm_registers(struct hl_device *hdev)
+{
+ u32 reg_base, hw_queue_id;
+
+ for (hw_queue_id = GAUDI2_QUEUE_ID_PDMA_0_0 ; hw_queue_id <= GAUDI2_QUEUE_ID_ROT_1_0;
+ hw_queue_id += NUM_OF_PQ_PER_QMAN) {
+ if (!gaudi2_is_queue_enabled(hdev, hw_queue_id))
+ continue;
+
+ gaudi2_clear_qm_fence_counters_common(hdev, hw_queue_id, false);
+
+ reg_base = gaudi2_qm_blocks_bases[hw_queue_id];
+ WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0);
+ }
+
+ /* Flush all WREG to prevent race */
+ RREG32(mmPDMA0_QM_ARB_CFG_0);
+}
+
+static void gaudi2_restore_nic_qm_registers(struct hl_device *hdev)
+{
+ u32 reg_base, hw_queue_id;
+
+ for (hw_queue_id = GAUDI2_QUEUE_ID_NIC_0_0 ; hw_queue_id <= GAUDI2_QUEUE_ID_NIC_23_3;
+ hw_queue_id += NUM_OF_PQ_PER_QMAN) {
+ if (!gaudi2_is_queue_enabled(hdev, hw_queue_id))
+ continue;
+
+ gaudi2_clear_qm_fence_counters_common(hdev, hw_queue_id, false);
+
+ reg_base = gaudi2_qm_blocks_bases[hw_queue_id];
+ WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0);
+ }
+
+ /* Flush all WREG to prevent race */
+ RREG32(mmPDMA0_QM_ARB_CFG_0);
+}
+
+static int gaudi2_context_switch(struct hl_device *hdev, u32 asid)
+{
+ return 0;
+}
+
+static void gaudi2_restore_phase_topology(struct hl_device *hdev)
+{
+}
+
+static void gaudi2_init_block_instances(struct hl_device *hdev, u32 block_idx,
+ struct dup_block_ctx *cfg_ctx)
+{
+ u64 block_base = cfg_ctx->base + block_idx * cfg_ctx->block_off;
+ u8 seq;
+ int i;
+
+ for (i = 0 ; i < cfg_ctx->instances ; i++) {
+ seq = block_idx * cfg_ctx->instances + i;
+
+ /* skip disabled instance */
+ if (!(cfg_ctx->enabled_mask & BIT_ULL(seq)))
+ continue;
+
+ cfg_ctx->instance_cfg_fn(hdev, block_base + i * cfg_ctx->instance_off,
+ cfg_ctx->data);
+ }
+}
+
+static void gaudi2_init_blocks_with_mask(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx,
+ u64 mask)
+{
+ int i;
+
+ cfg_ctx->enabled_mask = mask;
+
+ for (i = 0 ; i < cfg_ctx->blocks ; i++)
+ gaudi2_init_block_instances(hdev, i, cfg_ctx);
+}
+
+void gaudi2_init_blocks(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx)
+{
+ gaudi2_init_blocks_with_mask(hdev, cfg_ctx, U64_MAX);
+}
+
+static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr)
+{
+ void *host_mem_virtual_addr;
+ dma_addr_t host_mem_dma_addr;
+ u64 reserved_va_base;
+ u32 pos, size_left, size_to_dma;
+ struct hl_ctx *ctx;
+ int rc = 0;
+
+ /* Fetch the ctx */
+ ctx = hl_get_compute_ctx(hdev);
+ if (!ctx) {
+ dev_err(hdev->dev, "No ctx available\n");
+ return -EINVAL;
+ }
+
+ /* Allocate buffers for read and for poll */
+ host_mem_virtual_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &host_mem_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (host_mem_virtual_addr == NULL) {
+ dev_err(hdev->dev, "Failed to allocate memory for KDMA read\n");
+ rc = -ENOMEM;
+ goto put_ctx;
+ }
+
+ /* Reserve VM region on asic side */
+ reserved_va_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST, SZ_2M,
+ HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
+ if (!reserved_va_base) {
+ dev_err(hdev->dev, "Failed to reserve vmem on asic\n");
+ rc = -ENOMEM;
+ goto free_data_buffer;
+ }
+
+ /* Create mapping on asic side */
+ mutex_lock(&ctx->mmu_lock);
+ rc = hl_mmu_map_contiguous(ctx, reserved_va_base, host_mem_dma_addr, SZ_2M);
+ hl_mmu_invalidate_cache_range(hdev, false,
+ MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV,
+ ctx->asid, reserved_va_base, SZ_2M);
+ mutex_unlock(&ctx->mmu_lock);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to create mapping on asic mmu\n");
+ goto unreserve_va;
+ }
+
+ hdev->asic_funcs->kdma_lock(hdev, 0);
+
+ /* Enable MMU on KDMA */
+ gaudi2_kdma_set_mmbp_asid(hdev, false, ctx->asid);
+
+ pos = 0;
+ size_left = size;
+ size_to_dma = SZ_2M;
+
+ while (size_left > 0) {
+ if (size_left < SZ_2M)
+ size_to_dma = size_left;
+
+ rc = gaudi2_send_job_to_kdma(hdev, addr, reserved_va_base, size_to_dma, false);
+ if (rc)
+ break;
+
+ memcpy(blob_addr + pos, host_mem_virtual_addr, size_to_dma);
+
+ if (size_left <= SZ_2M)
+ break;
+
+ pos += SZ_2M;
+ addr += SZ_2M;
+ size_left -= SZ_2M;
+ }
+
+ gaudi2_kdma_set_mmbp_asid(hdev, true, HL_KERNEL_ASID_ID);
+
+ hdev->asic_funcs->kdma_unlock(hdev, 0);
+
+ mutex_lock(&ctx->mmu_lock);
+ hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
+ hl_mmu_invalidate_cache_range(hdev, false, MMU_OP_USERPTR,
+ ctx->asid, reserved_va_base, SZ_2M);
+ mutex_unlock(&ctx->mmu_lock);
+unreserve_va:
+ hl_unreserve_va_block(hdev, ctx, reserved_va_base, SZ_2M);
+free_data_buffer:
+ hl_asic_dma_free_coherent(hdev, SZ_2M, host_mem_virtual_addr, host_mem_dma_addr);
+put_ctx:
+ hl_ctx_put(ctx);
+
+ return rc;
+}
+
+static int gaudi2_internal_cb_pool_init(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int min_alloc_order, rc;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU))
+ return 0;
+
+ hdev->internal_cb_pool_virt_addr = hl_asic_dma_alloc_coherent(hdev,
+ HOST_SPACE_INTERNAL_CB_SZ,
+ &hdev->internal_cb_pool_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+
+ if (!hdev->internal_cb_pool_virt_addr)
+ return -ENOMEM;
+
+ min_alloc_order = ilog2(min(gaudi2_get_signal_cb_size(hdev),
+ gaudi2_get_wait_cb_size(hdev)));
+
+ hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1);
+ if (!hdev->internal_cb_pool) {
+ dev_err(hdev->dev, "Failed to create internal CB pool\n");
+ rc = -ENOMEM;
+ goto free_internal_cb_pool;
+ }
+
+ rc = gen_pool_add(hdev->internal_cb_pool, (uintptr_t) hdev->internal_cb_pool_virt_addr,
+ HOST_SPACE_INTERNAL_CB_SZ, -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to add memory to internal CB pool\n");
+ rc = -EFAULT;
+ goto destroy_internal_cb_pool;
+ }
+
+ hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
+ HOST_SPACE_INTERNAL_CB_SZ, HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
+
+ if (!hdev->internal_cb_va_base) {
+ rc = -ENOMEM;
+ goto destroy_internal_cb_pool;
+ }
+
+ mutex_lock(&ctx->mmu_lock);
+ rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, hdev->internal_cb_pool_dma_addr,
+ HOST_SPACE_INTERNAL_CB_SZ);
+ hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
+ mutex_unlock(&ctx->mmu_lock);
+
+ if (rc)
+ goto unreserve_internal_cb_pool;
+
+ return 0;
+
+unreserve_internal_cb_pool:
+ hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
+destroy_internal_cb_pool:
+ gen_pool_destroy(hdev->internal_cb_pool);
+free_internal_cb_pool:
+ hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
+ hdev->internal_cb_pool_dma_addr);
+
+ return rc;
+}
+
+static void gaudi2_internal_cb_pool_fini(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU))
+ return;
+
+ mutex_lock(&ctx->mmu_lock);
+ hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
+ hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
+ hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
+ mutex_unlock(&ctx->mmu_lock);
+
+ gen_pool_destroy(hdev->internal_cb_pool);
+
+ hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
+ hdev->internal_cb_pool_dma_addr);
+}
+
+static void gaudi2_restore_user_registers(struct hl_device *hdev)
+{
+ gaudi2_restore_user_sm_registers(hdev);
+ gaudi2_restore_user_qm_registers(hdev);
+}
+
+static int gaudi2_map_virtual_msix_doorbell_memory(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int rc;
+
+ rc = hl_mmu_map_page(ctx, RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START,
+ gaudi2->virt_msix_db_dma_addr, prop->pmmu.page_size, true);
+ if (rc)
+ dev_err(hdev->dev, "Failed to map VA %#llx for virtual MSI-X doorbell memory\n",
+ RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START);
+
+ return rc;
+}
+
+static void gaudi2_unmap_virtual_msix_doorbell_memory(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ rc = hl_mmu_unmap_page(ctx, RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START,
+ prop->pmmu.page_size, true);
+ if (rc)
+ dev_err(hdev->dev, "Failed to unmap VA %#llx of virtual MSI-X doorbell memory\n",
+ RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START);
+}
+
+static int gaudi2_ctx_init(struct hl_ctx *ctx)
+{
+ int rc;
+
+ rc = gaudi2_mmu_prepare(ctx->hdev, ctx->asid);
+ if (rc)
+ return rc;
+
+ /* No need to clear user registers if the device has just
+ * performed reset, we restore only nic qm registers
+ */
+ if (ctx->hdev->reset_upon_device_release)
+ gaudi2_restore_nic_qm_registers(ctx->hdev);
+ else
+ gaudi2_restore_user_registers(ctx->hdev);
+
+ rc = gaudi2_internal_cb_pool_init(ctx->hdev, ctx);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_map_virtual_msix_doorbell_memory(ctx);
+ if (rc)
+ gaudi2_internal_cb_pool_fini(ctx->hdev, ctx);
+
+ return rc;
+}
+
+static void gaudi2_ctx_fini(struct hl_ctx *ctx)
+{
+ if (ctx->asid == HL_KERNEL_ASID_ID)
+ return;
+
+ gaudi2_internal_cb_pool_fini(ctx->hdev, ctx);
+
+ gaudi2_unmap_virtual_msix_doorbell_memory(ctx);
+}
+
+static int gaudi2_pre_schedule_cs(struct hl_cs *cs)
+{
+ struct hl_device *hdev = cs->ctx->hdev;
+ int index = cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
+ u32 mon_payload, sob_id, mon_id;
+
+ if (!cs_needs_completion(cs))
+ return 0;
+
+ /*
+ * First 64 SOB/MON are reserved for driver for QMAN auto completion
+ * mechanism. Each SOB/MON pair are used for a pending CS with the same
+ * cyclic index. The SOB value is increased when each of the CS jobs is
+ * completed. When the SOB reaches the number of CS jobs, the monitor
+ * generates MSI-X interrupt.
+ */
+
+ sob_id = mon_id = index;
+ mon_payload = (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
+ (1 << CQ_ENTRY_READY_SHIFT) | index;
+
+ gaudi2_arm_cq_monitor(hdev, sob_id, mon_id, GAUDI2_RESERVED_CQ_CS_COMPLETION, mon_payload,
+ cs->jobs_cnt);
+
+ return 0;
+}
+
+static u32 gaudi2_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
+{
+ return HL_INVALID_QUEUE;
+}
+
+static u32 gaudi2_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, u32 size, bool eb)
+{
+ struct hl_cb *cb = (struct hl_cb *) data;
+ struct packet_msg_short *pkt;
+ u32 value, ctl, pkt_size = sizeof(*pkt);
+
+ pkt = (struct packet_msg_short *) (uintptr_t) (cb->kernel_address + size);
+ memset(pkt, 0, pkt_size);
+
+ /* Inc by 1, Mode ADD */
+ value = FIELD_PREP(GAUDI2_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1);
+ value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_SOB_MOD_MASK, 1);
+
+ ctl = FIELD_PREP(GAUDI2_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
+ ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 1); /* SOB base */
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, eb);
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 1);
+
+ pkt->value = cpu_to_le32(value);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return size + pkt_size;
+}
+
+static u32 gaudi2_add_mon_msg_short(struct packet_msg_short *pkt, u32 value, u16 addr)
+{
+ u32 ctl, pkt_size = sizeof(*pkt);
+
+ memset(pkt, 0, pkt_size);
+
+ ctl = FIELD_PREP(GAUDI2_PKT_SHORT_CTL_ADDR_MASK, addr);
+ ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 0);
+
+ pkt->value = cpu_to_le32(value);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return pkt_size;
+}
+
+static u32 gaudi2_add_arm_monitor_pkt(struct hl_device *hdev, struct packet_msg_short *pkt,
+ u16 sob_base, u8 sob_mask, u16 sob_val, u16 addr)
+{
+ u32 ctl, value, pkt_size = sizeof(*pkt);
+ u8 mask;
+
+ if (hl_gen_sob_mask(sob_base, sob_mask, &mask)) {
+ dev_err(hdev->dev, "sob_base %u (mask %#x) is not valid\n", sob_base, sob_mask);
+ return 0;
+ }
+
+ memset(pkt, 0, pkt_size);
+
+ value = FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_base / 8);
+ value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val);
+ value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_MODE_MASK, 0); /* GREATER OR EQUAL*/
+ value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_MASK_MASK, mask);
+
+ ctl = FIELD_PREP(GAUDI2_PKT_SHORT_CTL_ADDR_MASK, addr);
+ ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 1);
+
+ pkt->value = cpu_to_le32(value);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return pkt_size;
+}
+
+static u32 gaudi2_add_fence_pkt(struct packet_fence *pkt)
+{
+ u32 ctl, cfg, pkt_size = sizeof(*pkt);
+
+ memset(pkt, 0, pkt_size);
+
+ cfg = FIELD_PREP(GAUDI2_PKT_FENCE_CFG_DEC_VAL_MASK, 1);
+ cfg |= FIELD_PREP(GAUDI2_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
+ cfg |= FIELD_PREP(GAUDI2_PKT_FENCE_CFG_ID_MASK, 2);
+
+ ctl = FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_FENCE);
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 1);
+
+ pkt->cfg = cpu_to_le32(cfg);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return pkt_size;
+}
+
+static u32 gaudi2_gen_wait_cb(struct hl_device *hdev, struct hl_gen_wait_properties *prop)
+{
+ struct hl_cb *cb = (struct hl_cb *) prop->data;
+ void *buf = (void *) (uintptr_t) (cb->kernel_address);
+
+ u64 monitor_base, fence_addr = 0;
+ u32 stream_index, size = prop->size;
+ u16 msg_addr_offset;
+
+ stream_index = prop->q_idx % 4;
+ fence_addr = CFG_BASE + gaudi2_qm_blocks_bases[prop->q_idx] +
+ QM_FENCE2_OFFSET + stream_index * 4;
+
+ /*
+ * monitor_base should be the content of the base0 address registers,
+ * so it will be added to the msg short offsets
+ */
+ monitor_base = mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
+
+ /* First monitor config packet: low address of the sync */
+ msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + prop->mon_id * 4) -
+ monitor_base;
+
+ size += gaudi2_add_mon_msg_short(buf + size, (u32) fence_addr, msg_addr_offset);
+
+ /* Second monitor config packet: high address of the sync */
+ msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + prop->mon_id * 4) -
+ monitor_base;
+
+ size += gaudi2_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32), msg_addr_offset);
+
+ /*
+ * Third monitor config packet: the payload, i.e. what to write when the
+ * sync triggers
+ */
+ msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + prop->mon_id * 4) -
+ monitor_base;
+
+ size += gaudi2_add_mon_msg_short(buf + size, 1, msg_addr_offset);
+
+ /* Fourth monitor config packet: bind the monitor to a sync object */
+ msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + prop->mon_id * 4) - monitor_base;
+
+ size += gaudi2_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base, prop->sob_mask,
+ prop->sob_val, msg_addr_offset);
+
+ /* Fence packet */
+ size += gaudi2_add_fence_pkt(buf + size);
+
+ return size;
+}
+
+static void gaudi2_reset_sob(struct hl_device *hdev, void *data)
+{
+ struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
+
+ dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx, hw_sob->sob_id);
+
+ WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4, 0);
+
+ kref_init(&hw_sob->kref);
+}
+
+static void gaudi2_reset_sob_group(struct hl_device *hdev, u16 sob_group)
+{
+}
+
+static u64 gaudi2_get_device_time(struct hl_device *hdev)
+{
+ u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
+
+ return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
+}
+
+static int gaudi2_collective_wait_init_cs(struct hl_cs *cs)
+{
+ return 0;
+}
+
+static int gaudi2_collective_wait_create_jobs(struct hl_device *hdev, struct hl_ctx *ctx,
+ struct hl_cs *cs, u32 wait_queue_id,
+ u32 collective_engine_id, u32 encaps_signal_offset)
+{
+ return -EINVAL;
+}
+
+/*
+ * hl_mmu_scramble - converts a dram (non power of 2) page-size aligned address
+ * to DMMU page-size address (64MB) before mapping it in
+ * the MMU.
+ * The operation is performed on both the virtual and physical addresses.
+ * for device with 6 HBMs the scramble is:
+ * (addr[47:0] / 48M) * 64M + addr % 48M + addr[63:48]
+ *
+ * Example:
+ * =============================================================================
+ * Allocated DRAM Reserved VA scrambled VA for MMU mapping Scrambled PA
+ * Phys address in MMU last
+ * HOP
+ * =============================================================================
+ * PA1 0x3000000 VA1 0x9C000000 SVA1= (VA1/48M)*64M 0xD0000000 <- PA1/48M 0x1
+ * PA2 0x9000000 VA2 0x9F000000 SVA2= (VA2/48M)*64M 0xD4000000 <- PA2/48M 0x3
+ * =============================================================================
+ */
+static u64 gaudi2_mmu_scramble_addr(struct hl_device *hdev, u64 raw_addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 divisor, mod_va;
+ u64 div_va;
+
+ /* accept any address in the DRAM address space */
+ if (hl_mem_area_inside_range(raw_addr, sizeof(raw_addr), DRAM_PHYS_BASE,
+ VA_HBM_SPACE_END)) {
+
+ divisor = prop->num_functional_hbms * GAUDI2_HBM_MMU_SCRM_MEM_SIZE;
+ div_va = div_u64_rem(raw_addr & GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK, divisor, &mod_va);
+ return (raw_addr & ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK) |
+ (div_va << GAUDI2_HBM_MMU_SCRM_DIV_SHIFT) |
+ (mod_va << GAUDI2_HBM_MMU_SCRM_MOD_SHIFT);
+ }
+
+ return raw_addr;
+}
+
+static u64 gaudi2_mmu_descramble_addr(struct hl_device *hdev, u64 scrambled_addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 divisor, mod_va;
+ u64 div_va;
+
+ /* accept any address in the DRAM address space */
+ if (hl_mem_area_inside_range(scrambled_addr, sizeof(scrambled_addr), DRAM_PHYS_BASE,
+ VA_HBM_SPACE_END)) {
+
+ divisor = prop->num_functional_hbms * GAUDI2_HBM_MMU_SCRM_MEM_SIZE;
+ div_va = div_u64_rem(scrambled_addr & GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK,
+ PAGE_SIZE_64MB, &mod_va);
+
+ return ((scrambled_addr & ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK) +
+ (div_va * divisor + mod_va));
+ }
+
+ return scrambled_addr;
+}
+
+static u32 gaudi2_get_dec_base_addr(struct hl_device *hdev, u32 core_id)
+{
+ u32 base = 0, dcore_id, dec_id;
+
+ if (core_id >= NUMBER_OF_DEC) {
+ dev_err(hdev->dev, "Unexpected core number %d for DEC\n", core_id);
+ goto out;
+ }
+
+ if (core_id < 8) {
+ dcore_id = core_id / NUM_OF_DEC_PER_DCORE;
+ dec_id = core_id % NUM_OF_DEC_PER_DCORE;
+
+ base = mmDCORE0_DEC0_CMD_BASE + dcore_id * DCORE_OFFSET +
+ dec_id * DCORE_VDEC_OFFSET;
+ } else {
+ /* PCIe Shared Decoder */
+ base = mmPCIE_DEC0_CMD_BASE + ((core_id % 8) * PCIE_VDEC_OFFSET);
+ }
+out:
+ return base;
+}
+
+static int gaudi2_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
+ u32 *block_size, u32 *block_id)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ int i;
+
+ for (i = 0 ; i < NUM_USER_MAPPED_BLOCKS ; i++) {
+ if (block_addr == CFG_BASE + gaudi2->mapped_blocks[i].address) {
+ *block_id = i;
+ if (block_size)
+ *block_size = gaudi2->mapped_blocks[i].size;
+ return 0;
+ }
+ }
+
+ dev_err(hdev->dev, "Invalid block address %#llx", block_addr);
+
+ return -EINVAL;
+}
+
+static int gaudi2_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+ u32 block_id, u32 block_size)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u64 offset_in_bar;
+ u64 address;
+ int rc;
+
+ if (block_id >= NUM_USER_MAPPED_BLOCKS) {
+ dev_err(hdev->dev, "Invalid block id %u", block_id);
+ return -EINVAL;
+ }
+
+ /* we allow mapping only an entire block */
+ if (block_size != gaudi2->mapped_blocks[block_id].size) {
+ dev_err(hdev->dev, "Invalid block size %u", block_size);
+ return -EINVAL;
+ }
+
+ offset_in_bar = CFG_BASE + gaudi2->mapped_blocks[block_id].address - STM_FLASH_BASE_ADDR;
+
+ address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE;
+
+ rc = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
+ block_size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ return rc;
+}
+
+static void gaudi2_enable_events_from_fw(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+ u32 irq_handler_offset = le32_to_cpu(dyn_regs->gic_host_ints_irq);
+
+ if (gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)
+ WREG32(irq_handler_offset,
+ gaudi2_irq_map_table[GAUDI2_EVENT_CPU_INTS_REGISTER].cpu_id);
+}
+
+static int gaudi2_get_mmu_base(struct hl_device *hdev, u64 mmu_id, u32 *mmu_base)
+{
+ switch (mmu_id) {
+ case HW_CAP_DCORE0_DMMU0:
+ *mmu_base = mmDCORE0_HMMU0_MMU_BASE;
+ break;
+ case HW_CAP_DCORE0_DMMU1:
+ *mmu_base = mmDCORE0_HMMU1_MMU_BASE;
+ break;
+ case HW_CAP_DCORE0_DMMU2:
+ *mmu_base = mmDCORE0_HMMU2_MMU_BASE;
+ break;
+ case HW_CAP_DCORE0_DMMU3:
+ *mmu_base = mmDCORE0_HMMU3_MMU_BASE;
+ break;
+ case HW_CAP_DCORE1_DMMU0:
+ *mmu_base = mmDCORE1_HMMU0_MMU_BASE;
+ break;
+ case HW_CAP_DCORE1_DMMU1:
+ *mmu_base = mmDCORE1_HMMU1_MMU_BASE;
+ break;
+ case HW_CAP_DCORE1_DMMU2:
+ *mmu_base = mmDCORE1_HMMU2_MMU_BASE;
+ break;
+ case HW_CAP_DCORE1_DMMU3:
+ *mmu_base = mmDCORE1_HMMU3_MMU_BASE;
+ break;
+ case HW_CAP_DCORE2_DMMU0:
+ *mmu_base = mmDCORE2_HMMU0_MMU_BASE;
+ break;
+ case HW_CAP_DCORE2_DMMU1:
+ *mmu_base = mmDCORE2_HMMU1_MMU_BASE;
+ break;
+ case HW_CAP_DCORE2_DMMU2:
+ *mmu_base = mmDCORE2_HMMU2_MMU_BASE;
+ break;
+ case HW_CAP_DCORE2_DMMU3:
+ *mmu_base = mmDCORE2_HMMU3_MMU_BASE;
+ break;
+ case HW_CAP_DCORE3_DMMU0:
+ *mmu_base = mmDCORE3_HMMU0_MMU_BASE;
+ break;
+ case HW_CAP_DCORE3_DMMU1:
+ *mmu_base = mmDCORE3_HMMU1_MMU_BASE;
+ break;
+ case HW_CAP_DCORE3_DMMU2:
+ *mmu_base = mmDCORE3_HMMU2_MMU_BASE;
+ break;
+ case HW_CAP_DCORE3_DMMU3:
+ *mmu_base = mmDCORE3_HMMU3_MMU_BASE;
+ break;
+ case HW_CAP_PMMU:
+ *mmu_base = mmPMMU_HBW_MMU_BASE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void gaudi2_ack_mmu_error(struct hl_device *hdev, u64 mmu_id)
+{
+ bool is_pmmu = (mmu_id == HW_CAP_PMMU ? true : false);
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 mmu_base;
+
+ if (!(gaudi2->hw_cap_initialized & mmu_id))
+ return;
+
+ if (gaudi2_get_mmu_base(hdev, mmu_id, &mmu_base))
+ return;
+
+ gaudi2_handle_page_error(hdev, mmu_base, is_pmmu);
+ gaudi2_handle_access_error(hdev, mmu_base, is_pmmu);
+}
+
+static int gaudi2_ack_mmu_page_fault_or_access_error(struct hl_device *hdev, u64 mmu_cap_mask)
+{
+ u32 i, mmu_id, num_of_hmmus = NUM_OF_HMMU_PER_DCORE * NUM_OF_DCORES;
+
+ /* check all HMMUs */
+ for (i = 0 ; i < num_of_hmmus ; i++) {
+ mmu_id = HW_CAP_DCORE0_DMMU0 << i;
+
+ if (mmu_cap_mask & mmu_id)
+ gaudi2_ack_mmu_error(hdev, mmu_id);
+ }
+
+ /* check PMMU */
+ if (mmu_cap_mask & HW_CAP_PMMU)
+ gaudi2_ack_mmu_error(hdev, HW_CAP_PMMU);
+
+ return 0;
+}
+
+static void gaudi2_get_msi_info(__le32 *table)
+{
+ table[CPUCP_EVENT_QUEUE_MSI_TYPE] = cpu_to_le32(GAUDI2_EVENT_QUEUE_MSIX_IDX);
+}
+
+static int gaudi2_map_pll_idx_to_fw_idx(u32 pll_idx)
+{
+ switch (pll_idx) {
+ case HL_GAUDI2_CPU_PLL: return CPU_PLL;
+ case HL_GAUDI2_PCI_PLL: return PCI_PLL;
+ case HL_GAUDI2_NIC_PLL: return NIC_PLL;
+ case HL_GAUDI2_DMA_PLL: return DMA_PLL;
+ case HL_GAUDI2_MESH_PLL: return MESH_PLL;
+ case HL_GAUDI2_MME_PLL: return MME_PLL;
+ case HL_GAUDI2_TPC_PLL: return TPC_PLL;
+ case HL_GAUDI2_IF_PLL: return IF_PLL;
+ case HL_GAUDI2_SRAM_PLL: return SRAM_PLL;
+ case HL_GAUDI2_HBM_PLL: return HBM_PLL;
+ case HL_GAUDI2_VID_PLL: return VID_PLL;
+ case HL_GAUDI2_MSS_PLL: return MSS_PLL;
+ default: return -EINVAL;
+ }
+}
+
+static int gaudi2_gen_sync_to_engine_map(struct hl_device *hdev, struct hl_sync_to_engine_map *map)
+{
+ /* Not implemented */
+ return 0;
+}
+
+static int gaudi2_monitor_valid(struct hl_mon_state_dump *mon)
+{
+ /* Not implemented */
+ return 0;
+}
+
+static int gaudi2_print_single_monitor(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev, struct hl_mon_state_dump *mon)
+{
+ /* Not implemented */
+ return 0;
+}
+
+
+static int gaudi2_print_fences_single_engine(struct hl_device *hdev, u64 base_offset,
+ u64 status_base_offset, enum hl_sync_engine_type engine_type,
+ u32 engine_id, char **buf, size_t *size, size_t *offset)
+{
+ /* Not implemented */
+ return 0;
+}
+
+
+static struct hl_state_dump_specs_funcs gaudi2_state_dump_funcs = {
+ .monitor_valid = gaudi2_monitor_valid,
+ .print_single_monitor = gaudi2_print_single_monitor,
+ .gen_sync_to_engine_map = gaudi2_gen_sync_to_engine_map,
+ .print_fences_single_engine = gaudi2_print_fences_single_engine,
+};
+
+static void gaudi2_state_dump_init(struct hl_device *hdev)
+{
+ /* Not implemented */
+ hdev->state_dump_specs.props = gaudi2_state_dump_specs_props;
+ hdev->state_dump_specs.funcs = gaudi2_state_dump_funcs;
+}
+
+static u32 gaudi2_get_sob_addr(struct hl_device *hdev, u32 sob_id)
+{
+ return 0;
+}
+
+static u32 *gaudi2_get_stream_master_qid_arr(void)
+{
+ return NULL;
+}
+
+static void gaudi2_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
+ struct attribute_group *dev_vrm_attr_grp)
+{
+ hl_sysfs_add_dev_clk_attr(hdev, dev_clk_attr_grp);
+ hl_sysfs_add_dev_vrm_attr(hdev, dev_vrm_attr_grp);
+}
+
+static int gaudi2_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+ u32 page_size, u32 *real_page_size, bool is_dram_addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ /* for host pages the page size must be */
+ if (!is_dram_addr) {
+ if (page_size % mmu_prop->page_size)
+ goto page_size_err;
+
+ *real_page_size = mmu_prop->page_size;
+ return 0;
+ }
+
+ if ((page_size % prop->dram_page_size) || (prop->dram_page_size > mmu_prop->page_size))
+ goto page_size_err;
+
+ /*
+ * MMU page size is different from DRAM page size (more precisely, DMMU page is greater
+ * than DRAM page size).
+ * for this reason work with the DRAM page size and let the MMU scrambling routine handle
+ * this mismatch when calculating the address to place in the MMU page table.
+ * (in that case also make sure that the dram_page_size is not greater than the
+ * mmu page size)
+ */
+ *real_page_size = prop->dram_page_size;
+
+ return 0;
+
+page_size_err:
+ dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
+ page_size, mmu_prop->page_size >> 10);
+ return -EFAULT;
+}
+
+static int gaudi2_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct hl_asic_funcs gaudi2_funcs = {
+ .early_init = gaudi2_early_init,
+ .early_fini = gaudi2_early_fini,
+ .late_init = gaudi2_late_init,
+ .late_fini = gaudi2_late_fini,
+ .sw_init = gaudi2_sw_init,
+ .sw_fini = gaudi2_sw_fini,
+ .hw_init = gaudi2_hw_init,
+ .hw_fini = gaudi2_hw_fini,
+ .halt_engines = gaudi2_halt_engines,
+ .suspend = gaudi2_suspend,
+ .resume = gaudi2_resume,
+ .mmap = gaudi2_mmap,
+ .ring_doorbell = gaudi2_ring_doorbell,
+ .pqe_write = gaudi2_pqe_write,
+ .asic_dma_alloc_coherent = gaudi2_dma_alloc_coherent,
+ .asic_dma_free_coherent = gaudi2_dma_free_coherent,
+ .scrub_device_mem = gaudi2_scrub_device_mem,
+ .scrub_device_dram = gaudi2_scrub_device_dram,
+ .get_int_queue_base = NULL,
+ .test_queues = gaudi2_test_queues,
+ .asic_dma_pool_zalloc = gaudi2_dma_pool_zalloc,
+ .asic_dma_pool_free = gaudi2_dma_pool_free,
+ .cpu_accessible_dma_pool_alloc = gaudi2_cpu_accessible_dma_pool_alloc,
+ .cpu_accessible_dma_pool_free = gaudi2_cpu_accessible_dma_pool_free,
+ .asic_dma_unmap_single = gaudi2_dma_unmap_single,
+ .asic_dma_map_single = gaudi2_dma_map_single,
+ .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
+ .cs_parser = gaudi2_cs_parser,
+ .asic_dma_map_sgtable = hl_dma_map_sgtable,
+ .add_end_of_cb_packets = NULL,
+ .update_eq_ci = gaudi2_update_eq_ci,
+ .context_switch = gaudi2_context_switch,
+ .restore_phase_topology = gaudi2_restore_phase_topology,
+ .debugfs_read_dma = gaudi2_debugfs_read_dma,
+ .add_device_attr = gaudi2_add_device_attr,
+ .handle_eqe = gaudi2_handle_eqe,
+ .get_events_stat = gaudi2_get_events_stat,
+ .read_pte = NULL,
+ .write_pte = NULL,
+ .mmu_invalidate_cache = gaudi2_mmu_invalidate_cache,
+ .mmu_invalidate_cache_range = gaudi2_mmu_invalidate_cache_range,
+ .mmu_prefetch_cache_range = NULL,
+ .send_heartbeat = gaudi2_send_heartbeat,
+ .debug_coresight = gaudi2_debug_coresight,
+ .is_device_idle = gaudi2_is_device_idle,
+ .non_hard_reset_late_init = gaudi2_non_hard_reset_late_init,
+ .hw_queues_lock = gaudi2_hw_queues_lock,
+ .hw_queues_unlock = gaudi2_hw_queues_unlock,
+ .kdma_lock = gaudi2_kdma_lock,
+ .kdma_unlock = gaudi2_kdma_unlock,
+ .get_pci_id = gaudi2_get_pci_id,
+ .get_eeprom_data = gaudi2_get_eeprom_data,
+ .get_monitor_dump = gaudi2_get_monitor_dump,
+ .send_cpu_message = gaudi2_send_cpu_message,
+ .pci_bars_map = gaudi2_pci_bars_map,
+ .init_iatu = gaudi2_init_iatu,
+ .rreg = hl_rreg,
+ .wreg = hl_wreg,
+ .halt_coresight = gaudi2_halt_coresight,
+ .ctx_init = gaudi2_ctx_init,
+ .ctx_fini = gaudi2_ctx_fini,
+ .pre_schedule_cs = gaudi2_pre_schedule_cs,
+ .get_queue_id_for_cq = gaudi2_get_queue_id_for_cq,
+ .load_firmware_to_device = NULL,
+ .load_boot_fit_to_device = NULL,
+ .get_signal_cb_size = gaudi2_get_signal_cb_size,
+ .get_wait_cb_size = gaudi2_get_wait_cb_size,
+ .gen_signal_cb = gaudi2_gen_signal_cb,
+ .gen_wait_cb = gaudi2_gen_wait_cb,
+ .reset_sob = gaudi2_reset_sob,
+ .reset_sob_group = gaudi2_reset_sob_group,
+ .get_device_time = gaudi2_get_device_time,
+ .pb_print_security_errors = gaudi2_pb_print_security_errors,
+ .collective_wait_init_cs = gaudi2_collective_wait_init_cs,
+ .collective_wait_create_jobs = gaudi2_collective_wait_create_jobs,
+ .get_dec_base_addr = gaudi2_get_dec_base_addr,
+ .scramble_addr = gaudi2_mmu_scramble_addr,
+ .descramble_addr = gaudi2_mmu_descramble_addr,
+ .ack_protection_bits_errors = gaudi2_ack_protection_bits_errors,
+ .get_hw_block_id = gaudi2_get_hw_block_id,
+ .hw_block_mmap = gaudi2_block_mmap,
+ .enable_events_from_fw = gaudi2_enable_events_from_fw,
+ .ack_mmu_errors = gaudi2_ack_mmu_page_fault_or_access_error,
+ .get_msi_info = gaudi2_get_msi_info,
+ .map_pll_idx_to_fw_idx = gaudi2_map_pll_idx_to_fw_idx,
+ .init_firmware_preload_params = gaudi2_init_firmware_preload_params,
+ .init_firmware_loader = gaudi2_init_firmware_loader,
+ .init_cpu_scrambler_dram = gaudi2_init_scrambler_hbm,
+ .state_dump_init = gaudi2_state_dump_init,
+ .get_sob_addr = &gaudi2_get_sob_addr,
+ .set_pci_memory_regions = gaudi2_set_pci_memory_regions,
+ .get_stream_master_qid_arr = gaudi2_get_stream_master_qid_arr,
+ .check_if_razwi_happened = gaudi2_check_if_razwi_happened,
+ .mmu_get_real_page_size = gaudi2_mmu_get_real_page_size,
+ .access_dev_mem = hl_access_dev_mem,
+ .set_dram_bar_base = gaudi2_set_hbm_bar_base,
+};
+
+void gaudi2_set_asic_funcs(struct hl_device *hdev)
+{
+ hdev->asic_funcs = &gaudi2_funcs;
+}
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2P.h b/drivers/misc/habanalabs/gaudi2/gaudi2P.h
new file mode 100644
index 000000000000..e4bc4009f05b
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2P.h
@@ -0,0 +1,566 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI2P_H_
+#define GAUDI2P_H_
+
+#include <uapi/misc/habanalabs.h>
+#include "../common/habanalabs.h"
+#include "../include/common/hl_boot_if.h"
+#include "../include/gaudi2/gaudi2.h"
+#include "../include/gaudi2/gaudi2_packets.h"
+#include "../include/gaudi2/gaudi2_fw_if.h"
+#include "../include/gaudi2/gaudi2_async_events.h"
+#include "../include/gaudi2/gaudi2_async_virt_events.h"
+
+#define GAUDI2_LINUX_FW_FILE "habanalabs/gaudi2/gaudi2-fit.itb"
+#define GAUDI2_BOOT_FIT_FILE "habanalabs/gaudi2/gaudi2-boot-fit.itb"
+
+#define MMU_PAGE_TABLES_INITIAL_SIZE 0x10000000 /* 256MB */
+
+#define GAUDI2_CPU_TIMEOUT_USEC 30000000 /* 30s */
+
+#define GAUDI2_FPGA_CPU_TIMEOUT 100000000 /* 100s */
+
+#define NUMBER_OF_PDMA_QUEUES 2
+#define NUMBER_OF_EDMA_QUEUES 8
+#define NUMBER_OF_MME_QUEUES 4
+#define NUMBER_OF_TPC_QUEUES 25
+#define NUMBER_OF_NIC_QUEUES 24
+#define NUMBER_OF_ROT_QUEUES 2
+#define NUMBER_OF_CPU_QUEUES 1
+
+#define NUMBER_OF_HW_QUEUES ((NUMBER_OF_PDMA_QUEUES + \
+ NUMBER_OF_EDMA_QUEUES + \
+ NUMBER_OF_MME_QUEUES + \
+ NUMBER_OF_TPC_QUEUES + \
+ NUMBER_OF_NIC_QUEUES + \
+ NUMBER_OF_ROT_QUEUES + \
+ NUMBER_OF_CPU_QUEUES) * \
+ NUM_OF_PQ_PER_QMAN)
+
+#define NUMBER_OF_QUEUES (NUMBER_OF_CPU_QUEUES + NUMBER_OF_HW_QUEUES)
+
+#define DCORE_NUM_OF_SOB \
+ (((mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8191 - \
+ mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
+
+#define DCORE_NUM_OF_MONITORS \
+ (((mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2047 - \
+ mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2)
+
+#define NUMBER_OF_DEC ((NUM_OF_DEC_PER_DCORE * NUM_OF_DCORES) + NUMBER_OF_PCIE_DEC)
+
+/* Map all arcs dccm + arc schedulers acp blocks */
+#define NUM_OF_USER_ACP_BLOCKS (NUM_OF_SCHEDULER_ARC + 2)
+#define NUM_OF_USER_NIC_UMR_BLOCKS 15
+#define NUM_OF_EXPOSED_SM_BLOCKS ((NUM_OF_DCORES - 1) * 2)
+#define NUM_USER_MAPPED_BLOCKS \
+ (NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + NUMBER_OF_DEC + \
+ NUM_OF_EXPOSED_SM_BLOCKS + \
+ (NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
+
+/* Within the user mapped array, decoder entries start post all the ARC related
+ * entries
+ */
+#define USR_MAPPED_BLK_DEC_START_IDX \
+ (NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + \
+ (NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
+
+#define USR_MAPPED_BLK_SM_START_IDX \
+ (NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + NUMBER_OF_DEC + \
+ (NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
+
+#define SM_OBJS_BLOCK_SIZE (mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - \
+ mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0)
+
+#define GAUDI2_MAX_PENDING_CS 64
+
+#if !IS_MAX_PENDING_CS_VALID(GAUDI2_MAX_PENDING_CS)
+#error "GAUDI2_MAX_PENDING_CS must be power of 2 and greater than 1"
+#endif
+
+#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
+
+#define GAUDI2_PREBOOT_REQ_TIMEOUT_USEC 25000000 /* 25s */
+
+#define GAUDI2_BOOT_FIT_REQ_TIMEOUT_USEC 10000000 /* 10s */
+
+#define GAUDI2_NIC_CLK_FREQ 450000000ull /* 450 MHz */
+
+#define DC_POWER_DEFAULT 60000 /* 60W */
+
+#define GAUDI2_HBM_NUM 6
+
+#define DMA_MAX_TRANSFER_SIZE U32_MAX
+
+#define GAUDI2_DEFAULT_CARD_NAME "HL225"
+
+#define QMAN_STREAMS 4
+#define PQ_FETCHER_CACHE_SIZE 8
+#define NUM_OF_MME_SBTE_PORTS 5
+#define NUM_OF_MME_WB_PORTS 2
+
+#define GAUDI2_ENGINE_ID_DCORE_OFFSET \
+ (GAUDI2_DCORE1_ENGINE_ID_EDMA_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)
+
+/* DRAM Memory Map */
+
+#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
+
+/* This define should be used only when working in a debug mode without dram.
+ * When working with dram, the driver size will be calculated dynamically.
+ */
+#define NIC_DEFAULT_DRV_SIZE 0x20000000 /* 512MB */
+
+#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
+
+#define NIC_NUMBER_OF_PORTS NIC_NUMBER_OF_ENGINES
+
+#define NUMBER_OF_PCIE_DEC 2
+#define PCIE_DEC_SHIFT 8
+
+#define SRAM_USER_BASE_OFFSET 0
+
+/* cluster binning */
+#define MAX_FAULTY_HBMS 1
+#define GAUDI2_XBAR_EDGE_FULL_MASK 0xF
+#define GAUDI2_EDMA_FULL_MASK 0xFF
+#define GAUDI2_DRAM_FULL_MASK 0x3F
+
+/* Host virtual address space. */
+
+#define VA_HOST_SPACE_PAGE_START 0xFFF0000000000000ull
+#define VA_HOST_SPACE_PAGE_END 0xFFF0800000000000ull /* 140TB */
+
+#define VA_HOST_SPACE_HPAGE_START 0xFFF0800000000000ull
+#define VA_HOST_SPACE_HPAGE_END 0xFFF1000000000000ull /* 140TB */
+
+#define VA_HOST_SPACE_USER_MAPPED_CB_START 0xFFF1000000000000ull
+#define VA_HOST_SPACE_USER_MAPPED_CB_END 0xFFF1000100000000ull /* 4GB */
+
+/* 140TB */
+#define VA_HOST_SPACE_PAGE_SIZE (VA_HOST_SPACE_PAGE_END - VA_HOST_SPACE_PAGE_START)
+
+/* 140TB */
+#define VA_HOST_SPACE_HPAGE_SIZE (VA_HOST_SPACE_HPAGE_END - VA_HOST_SPACE_HPAGE_START)
+
+#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_PAGE_SIZE + VA_HOST_SPACE_HPAGE_SIZE)
+
+#define HOST_SPACE_INTERNAL_CB_SZ SZ_2M
+
+/*
+ * HBM virtual address space
+ * Gaudi2 has 6 HBM devices, each supporting 16GB total of 96GB at most.
+ * No core separation is supported so we can have one chunk of virtual address
+ * space just above the physical ones.
+ * The virtual address space starts immediately after the end of the physical
+ * address space which is determined at run-time.
+ */
+#define VA_HBM_SPACE_END 0x1002000000000000ull
+
+#define HW_CAP_PLL BIT_ULL(0)
+#define HW_CAP_DRAM BIT_ULL(1)
+#define HW_CAP_PMMU BIT_ULL(2)
+#define HW_CAP_CPU BIT_ULL(3)
+#define HW_CAP_MSIX BIT_ULL(4)
+
+#define HW_CAP_CPU_Q BIT_ULL(5)
+#define HW_CAP_CPU_Q_SHIFT 5
+
+#define HW_CAP_CLK_GATE BIT_ULL(6)
+#define HW_CAP_KDMA BIT_ULL(7)
+#define HW_CAP_SRAM_SCRAMBLER BIT_ULL(8)
+
+#define HW_CAP_DCORE0_DMMU0 BIT_ULL(9)
+#define HW_CAP_DCORE0_DMMU1 BIT_ULL(10)
+#define HW_CAP_DCORE0_DMMU2 BIT_ULL(11)
+#define HW_CAP_DCORE0_DMMU3 BIT_ULL(12)
+#define HW_CAP_DCORE1_DMMU0 BIT_ULL(13)
+#define HW_CAP_DCORE1_DMMU1 BIT_ULL(14)
+#define HW_CAP_DCORE1_DMMU2 BIT_ULL(15)
+#define HW_CAP_DCORE1_DMMU3 BIT_ULL(16)
+#define HW_CAP_DCORE2_DMMU0 BIT_ULL(17)
+#define HW_CAP_DCORE2_DMMU1 BIT_ULL(18)
+#define HW_CAP_DCORE2_DMMU2 BIT_ULL(19)
+#define HW_CAP_DCORE2_DMMU3 BIT_ULL(20)
+#define HW_CAP_DCORE3_DMMU0 BIT_ULL(21)
+#define HW_CAP_DCORE3_DMMU1 BIT_ULL(22)
+#define HW_CAP_DCORE3_DMMU2 BIT_ULL(23)
+#define HW_CAP_DCORE3_DMMU3 BIT_ULL(24)
+#define HW_CAP_DMMU_MASK GENMASK_ULL(24, 9)
+#define HW_CAP_DMMU_SHIFT 9
+#define HW_CAP_PDMA_MASK BIT_ULL(26)
+#define HW_CAP_EDMA_MASK GENMASK_ULL(34, 27)
+#define HW_CAP_EDMA_SHIFT 27
+#define HW_CAP_MME_MASK GENMASK_ULL(38, 35)
+#define HW_CAP_MME_SHIFT 35
+#define HW_CAP_ROT_MASK GENMASK_ULL(40, 39)
+#define HW_CAP_ROT_SHIFT 39
+#define HW_CAP_HBM_SCRAMBLER_HW_RESET BIT_ULL(41)
+#define HW_CAP_HBM_SCRAMBLER_SW_RESET BIT_ULL(42)
+#define HW_CAP_HBM_SCRAMBLER_MASK (HW_CAP_HBM_SCRAMBLER_HW_RESET | \
+ HW_CAP_HBM_SCRAMBLER_SW_RESET)
+#define HW_CAP_HBM_SCRAMBLER_SHIFT 41
+#define HW_CAP_RESERVED BIT(43)
+#define HW_CAP_MMU_MASK (HW_CAP_PMMU | HW_CAP_DMMU_MASK)
+
+/* Range Registers */
+#define RR_TYPE_SHORT 0
+#define RR_TYPE_LONG 1
+#define RR_TYPE_SHORT_PRIV 2
+#define RR_TYPE_LONG_PRIV 3
+#define NUM_SHORT_LBW_RR 14
+#define NUM_LONG_LBW_RR 4
+#define NUM_SHORT_HBW_RR 6
+#define NUM_LONG_HBW_RR 4
+
+/* RAZWI initiator coordinates- X- 5 bits, Y- 4 bits */
+#define RAZWI_INITIATOR_X_SHIFT 0
+#define RAZWI_INITIATOR_X_MASK 0x1F
+#define RAZWI_INITIATOR_Y_SHIFT 5
+#define RAZWI_INITIATOR_Y_MASK 0xF
+
+#define RTR_ID_X_Y(x, y) \
+ ((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \
+ (((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT))
+
+/* decoders have separate mask */
+#define HW_CAP_DEC_SHIFT 0
+#define HW_CAP_DEC_MASK GENMASK_ULL(9, 0)
+
+/* TPCs have separate mask */
+#define HW_CAP_TPC_SHIFT 0
+#define HW_CAP_TPC_MASK GENMASK_ULL(24, 0)
+
+/* nics have separate mask */
+#define HW_CAP_NIC_SHIFT 0
+#define HW_CAP_NIC_MASK GENMASK_ULL(NIC_NUMBER_OF_ENGINES - 1, 0)
+
+#define GAUDI2_ARC_PCI_MSB_ADDR(addr) (((addr) & GENMASK_ULL(49, 28)) >> 28)
+
+#define GAUDI2_SOB_INCREMENT_BY_ONE (FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \
+ FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1))
+
+enum gaudi2_reserved_sob_id {
+ GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
+ GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
+ GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST + GAUDI2_MAX_PENDING_CS - 1,
+ GAUDI2_RESERVED_SOB_KDMA_COMPLETION,
+ GAUDI2_RESERVED_SOB_DEC_NRM_FIRST,
+ GAUDI2_RESERVED_SOB_DEC_NRM_LAST =
+ GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + NUMBER_OF_DEC - 1,
+ GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST,
+ GAUDI2_RESERVED_SOB_DEC_ABNRM_LAST =
+ GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + NUMBER_OF_DEC - 1,
+ GAUDI2_RESERVED_SOB_NUMBER
+};
+
+enum gaudi2_reserved_mon_id {
+ GAUDI2_RESERVED_MON_CS_COMPLETION_FIRST,
+ GAUDI2_RESERVED_MON_CS_COMPLETION_LAST =
+ GAUDI2_RESERVED_MON_CS_COMPLETION_FIRST + GAUDI2_MAX_PENDING_CS - 1,
+ GAUDI2_RESERVED_MON_KDMA_COMPLETION,
+ GAUDI2_RESERVED_MON_DEC_NRM_FIRST,
+ GAUDI2_RESERVED_MON_DEC_NRM_LAST =
+ GAUDI2_RESERVED_MON_DEC_NRM_FIRST + 3 * NUMBER_OF_DEC - 1,
+ GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST,
+ GAUDI2_RESERVED_MON_DEC_ABNRM_LAST =
+ GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST + 3 * NUMBER_OF_DEC - 1,
+ GAUDI2_RESERVED_MON_NUMBER
+};
+
+enum gaudi2_reserved_cq_id {
+ GAUDI2_RESERVED_CQ_CS_COMPLETION,
+ GAUDI2_RESERVED_CQ_KDMA_COMPLETION,
+ GAUDI2_RESERVED_CQ_NUMBER
+};
+
+/*
+ * Gaudi2 subtitute TPCs Numbering
+ * At most- two faulty TPCs are allowed
+ * First replacement to a faulty TPC will be TPC24, second- TPC23
+ */
+enum substitude_tpc {
+ FAULTY_TPC_SUBTS_1_TPC_24,
+ FAULTY_TPC_SUBTS_2_TPC_23,
+ MAX_FAULTY_TPCS
+};
+
+enum gaudi2_dma_core_id {
+ DMA_CORE_ID_PDMA0, /* Dcore 0 */
+ DMA_CORE_ID_PDMA1, /* Dcore 0 */
+ DMA_CORE_ID_EDMA0, /* Dcore 0 */
+ DMA_CORE_ID_EDMA1, /* Dcore 0 */
+ DMA_CORE_ID_EDMA2, /* Dcore 1 */
+ DMA_CORE_ID_EDMA3, /* Dcore 1 */
+ DMA_CORE_ID_EDMA4, /* Dcore 2 */
+ DMA_CORE_ID_EDMA5, /* Dcore 2 */
+ DMA_CORE_ID_EDMA6, /* Dcore 3 */
+ DMA_CORE_ID_EDMA7, /* Dcore 3 */
+ DMA_CORE_ID_KDMA, /* Dcore 0 */
+ DMA_CORE_ID_SIZE
+};
+
+enum gaudi2_rotator_id {
+ ROTATOR_ID_0,
+ ROTATOR_ID_1,
+ ROTATOR_ID_SIZE,
+};
+
+enum gaudi2_mme_id {
+ MME_ID_DCORE0,
+ MME_ID_DCORE1,
+ MME_ID_DCORE2,
+ MME_ID_DCORE3,
+ MME_ID_SIZE,
+};
+
+enum gaudi2_tpc_id {
+ TPC_ID_DCORE0_TPC0,
+ TPC_ID_DCORE0_TPC1,
+ TPC_ID_DCORE0_TPC2,
+ TPC_ID_DCORE0_TPC3,
+ TPC_ID_DCORE0_TPC4,
+ TPC_ID_DCORE0_TPC5,
+ TPC_ID_DCORE1_TPC0,
+ TPC_ID_DCORE1_TPC1,
+ TPC_ID_DCORE1_TPC2,
+ TPC_ID_DCORE1_TPC3,
+ TPC_ID_DCORE1_TPC4,
+ TPC_ID_DCORE1_TPC5,
+ TPC_ID_DCORE2_TPC0,
+ TPC_ID_DCORE2_TPC1,
+ TPC_ID_DCORE2_TPC2,
+ TPC_ID_DCORE2_TPC3,
+ TPC_ID_DCORE2_TPC4,
+ TPC_ID_DCORE2_TPC5,
+ TPC_ID_DCORE3_TPC0,
+ TPC_ID_DCORE3_TPC1,
+ TPC_ID_DCORE3_TPC2,
+ TPC_ID_DCORE3_TPC3,
+ TPC_ID_DCORE3_TPC4,
+ TPC_ID_DCORE3_TPC5,
+ /* the PCI TPC is placed last (mapped liked HW) */
+ TPC_ID_DCORE0_TPC6,
+ TPC_ID_SIZE,
+};
+
+enum gaudi2_dec_id {
+ DEC_ID_DCORE0_DEC0,
+ DEC_ID_DCORE0_DEC1,
+ DEC_ID_DCORE1_DEC0,
+ DEC_ID_DCORE1_DEC1,
+ DEC_ID_DCORE2_DEC0,
+ DEC_ID_DCORE2_DEC1,
+ DEC_ID_DCORE3_DEC0,
+ DEC_ID_DCORE3_DEC1,
+ DEC_ID_PCIE_VDEC0,
+ DEC_ID_PCIE_VDEC1,
+ DEC_ID_SIZE,
+};
+
+enum gaudi2_hbm_id {
+ HBM_ID0,
+ HBM_ID1,
+ HBM_ID2,
+ HBM_ID3,
+ HBM_ID4,
+ HBM_ID5,
+ HBM_ID_SIZE,
+};
+
+/* specific EDMA enumeration */
+enum gaudi2_edma_id {
+ EDMA_ID_DCORE0_INSTANCE0,
+ EDMA_ID_DCORE0_INSTANCE1,
+ EDMA_ID_DCORE1_INSTANCE0,
+ EDMA_ID_DCORE1_INSTANCE1,
+ EDMA_ID_DCORE2_INSTANCE0,
+ EDMA_ID_DCORE2_INSTANCE1,
+ EDMA_ID_DCORE3_INSTANCE0,
+ EDMA_ID_DCORE3_INSTANCE1,
+ EDMA_ID_SIZE,
+};
+
+/* User interrupt count is aligned with HW CQ count.
+ * We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
+ */
+#define GAUDI2_NUM_USER_INTERRUPTS 255
+
+enum gaudi2_irq_num {
+ GAUDI2_IRQ_NUM_EVENT_QUEUE = GAUDI2_EVENT_QUEUE_MSIX_IDX,
+ GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM,
+ GAUDI2_IRQ_NUM_DCORE0_DEC0_ABNRM,
+ GAUDI2_IRQ_NUM_DCORE0_DEC1_NRM,
+ GAUDI2_IRQ_NUM_DCORE0_DEC1_ABNRM,
+ GAUDI2_IRQ_NUM_DCORE1_DEC0_NRM,
+ GAUDI2_IRQ_NUM_DCORE1_DEC0_ABNRM,
+ GAUDI2_IRQ_NUM_DCORE1_DEC1_NRM,
+ GAUDI2_IRQ_NUM_DCORE1_DEC1_ABNRM,
+ GAUDI2_IRQ_NUM_DCORE2_DEC0_NRM,
+ GAUDI2_IRQ_NUM_DCORE2_DEC0_ABNRM,
+ GAUDI2_IRQ_NUM_DCORE2_DEC1_NRM,
+ GAUDI2_IRQ_NUM_DCORE2_DEC1_ABNRM,
+ GAUDI2_IRQ_NUM_DCORE3_DEC0_NRM,
+ GAUDI2_IRQ_NUM_DCORE3_DEC0_ABNRM,
+ GAUDI2_IRQ_NUM_DCORE3_DEC1_NRM,
+ GAUDI2_IRQ_NUM_DCORE3_DEC1_ABNRM,
+ GAUDI2_IRQ_NUM_SHARED_DEC0_NRM,
+ GAUDI2_IRQ_NUM_SHARED_DEC0_ABNRM,
+ GAUDI2_IRQ_NUM_SHARED_DEC1_NRM,
+ GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
+ GAUDI2_IRQ_NUM_COMPLETION,
+ GAUDI2_IRQ_NUM_NIC_PORT_FIRST,
+ GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
+ GAUDI2_IRQ_NUM_RESERVED_FIRST,
+ GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_NUM_USER_INTERRUPTS - 1),
+ GAUDI2_IRQ_NUM_USER_FIRST,
+ GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
+ GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
+};
+
+static_assert(GAUDI2_IRQ_NUM_USER_FIRST > GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM);
+
+/**
+ * struct dup_block_ctx - context to initialize unit instances across multiple
+ * blocks where block can be either a dcore of duplicated
+ * common module. this code relies on constant offsets
+ * of blocks and unit instances in a block.
+ * @instance_cfg_fn: instance specific configuration function.
+ * @data: private configuration data.
+ * @base: base address of the first instance in the first block.
+ * @block_off: subsequent blocks address spacing.
+ * @instance_off: subsequent block's instances address spacing.
+ * @enabled_mask: mask of enabled instances (1- enabled, 0- disabled).
+ * @blocks: number of blocks.
+ * @instances: unit instances per block.
+ */
+struct dup_block_ctx {
+ void (*instance_cfg_fn)(struct hl_device *hdev, u64 base, void *data);
+ void *data;
+ u64 base;
+ u64 block_off;
+ u64 instance_off;
+ u64 enabled_mask;
+ unsigned int blocks;
+ unsigned int instances;
+};
+
+/**
+ * struct gaudi2_device - ASIC specific manage structure.
+ * @cpucp_info_get: get information on device from CPU-CP
+ * @mapped_blocks: array that holds the base address and size of all blocks
+ * the user can map.
+ * @lfsr_rand_seeds: array of MME ACC random seeds to set.
+ * @hw_queues_lock: protects the H/W queues from concurrent access.
+ * @kdma_lock: protects the KDMA engine from concurrent access.
+ * @scratchpad_kernel_address: general purpose PAGE_SIZE contiguous memory,
+ * this memory region should be write-only.
+ * currently used for HBW QMAN writes which is
+ * redundant.
+ * @scratchpad_bus_address: scratchpad bus address
+ * @virt_msix_db_cpu_addr: host memory page for the virtual MSI-X doorbell.
+ * @virt_msix_db_dma_addr: bus address of the page for the virtual MSI-X doorbell.
+ * @dram_bar_cur_addr: current address of DRAM PCI bar.
+ * @hw_cap_initialized: This field contains a bit per H/W engine. When that
+ * engine is initialized, that bit is set by the driver to
+ * signal we can use this engine in later code paths.
+ * Each bit is cleared upon reset of its corresponding H/W
+ * engine.
+ * @active_hw_arc: This field contains a bit per ARC of an H/W engine with
+ * exception of TPC and NIC engines. Once an engine arc is
+ * initialized, its respective bit is set. Driver can uniquely
+ * identify each initialized ARC and use this information in
+ * later code paths. Each respective bit is cleared upon reset
+ * of its corresponding ARC of the H/W engine.
+ * @dec_hw_cap_initialized: This field contains a bit per decoder H/W engine.
+ * When that engine is initialized, that bit is set by
+ * the driver to signal we can use this engine in later
+ * code paths.
+ * Each bit is cleared upon reset of its corresponding H/W
+ * engine.
+ * @tpc_hw_cap_initialized: This field contains a bit per TPC H/W engine.
+ * When that engine is initialized, that bit is set by
+ * the driver to signal we can use this engine in later
+ * code paths.
+ * Each bit is cleared upon reset of its corresponding H/W
+ * engine.
+ * @active_tpc_arc: This field contains a bit per ARC of the TPC engines.
+ * Once an engine arc is initialized, its respective bit is
+ * set. Each respective bit is cleared upon reset of its
+ * corresponding ARC of the TPC engine.
+ * @nic_hw_cap_initialized: This field contains a bit per nic H/W engine.
+ * @active_nic_arc: This field contains a bit per ARC of the NIC engines.
+ * Once an engine arc is initialized, its respective bit is
+ * set. Each respective bit is cleared upon reset of its
+ * corresponding ARC of the NIC engine.
+ * @hw_events: array that holds all H/W events that are defined valid.
+ * @events_stat: array that holds histogram of all received events.
+ * @events_stat_aggregate: same as events_stat but doesn't get cleared on reset.
+ * @num_of_valid_hw_events: used to hold the number of valid H/W events.
+ * @nic_ports: array that holds all NIC ports manage structures.
+ * @nic_macros: array that holds all NIC macro manage structures.
+ * @core_info: core info to be used by the Ethernet driver.
+ * @aux_ops: functions for core <-> aux drivers communication.
+ * @flush_db_fifo: flag to force flush DB FIFO after a write.
+ * @hbm_cfg: HBM subsystem settings
+ * @hw_queues_lock_mutex: used by simulator instead of hw_queues_lock.
+ * @kdma_lock_mutex: used by simulator instead of kdma_lock.
+ * @use_deprecated_event_mappings: use old event mappings which are about to be
+ * deprecated
+ */
+struct gaudi2_device {
+ int (*cpucp_info_get)(struct hl_device *hdev);
+
+ struct user_mapped_block mapped_blocks[NUM_USER_MAPPED_BLOCKS];
+ int lfsr_rand_seeds[MME_NUM_OF_LFSR_SEEDS];
+
+ spinlock_t hw_queues_lock;
+ spinlock_t kdma_lock;
+
+ void *scratchpad_kernel_address;
+ dma_addr_t scratchpad_bus_address;
+
+ void *virt_msix_db_cpu_addr;
+ dma_addr_t virt_msix_db_dma_addr;
+
+ u64 dram_bar_cur_addr;
+ u64 hw_cap_initialized;
+ u64 active_hw_arc;
+ u64 dec_hw_cap_initialized;
+ u64 tpc_hw_cap_initialized;
+ u64 active_tpc_arc;
+ u64 nic_hw_cap_initialized;
+ u64 active_nic_arc;
+ u32 hw_events[GAUDI2_EVENT_SIZE];
+ u32 events_stat[GAUDI2_EVENT_SIZE];
+ u32 events_stat_aggregate[GAUDI2_EVENT_SIZE];
+ u32 num_of_valid_hw_events;
+};
+
+extern const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE];
+extern const u32 gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_SIZE];
+extern const u32 gaudi2_mme_acc_blocks_bases[MME_ID_SIZE];
+extern const u32 gaudi2_mme_ctrl_lo_blocks_bases[MME_ID_SIZE];
+extern const u32 edma_stream_base[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES];
+extern const u32 gaudi2_rot_blocks_bases[ROTATOR_ID_SIZE];
+
+void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx);
+int gaudi2_coresight_init(struct hl_device *hdev);
+int gaudi2_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
+void gaudi2_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx);
+void gaudi2_init_blocks(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx);
+bool gaudi2_is_hmmu_enabled(struct hl_device *hdev, int dcore_id, int hmmu_id);
+void gaudi2_write_rr_to_all_lbw_rtrs(struct hl_device *hdev, u8 rr_type, u32 rr_index, u64 min_val,
+ u64 max_val);
+void gaudi2_pb_print_security_errors(struct hl_device *hdev, u32 block_addr, u32 cause,
+ u32 offended_addr);
+int gaudi2_init_security(struct hl_device *hdev);
+void gaudi2_ack_protection_bits_errors(struct hl_device *hdev);
+
+#endif /* GAUDI2P_H_ */
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c
new file mode 100644
index 000000000000..56c6ab692482
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c
@@ -0,0 +1,2720 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2019-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+#include "gaudi2_coresight_regs.h"
+#include <uapi/misc/habanalabs.h>
+
+#define GAUDI2_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 2000)
+#define SPMU_MAX_COUNTERS 6
+
+#define COMPONENT_ID_INVALID ((u32)(-1))
+#define MAX_BMONS_PER_UNIT 8
+
+enum gaudi2_hif_hmmu_id {
+ HMMU_ID_DCORE0_HMMU0,
+ HMMU_ID_DCORE0_HMMU1,
+ HMMU_ID_DCORE0_HMMU2,
+ HMMU_ID_DCORE0_HMMU3,
+ HMMU_ID_DCORE1_HMMU0,
+ HMMU_ID_DCORE1_HMMU1,
+ HMMU_ID_DCORE1_HMMU2,
+ HMMU_ID_DCORE1_HMMU3,
+ HMMU_ID_DCORE2_HMMU0,
+ HMMU_ID_DCORE2_HMMU1,
+ HMMU_ID_DCORE2_HMMU2,
+ HMMU_ID_DCORE2_HMMU3,
+ HMMU_ID_DCORE3_HMMU0,
+ HMMU_ID_DCORE3_HMMU1,
+ HMMU_ID_DCORE3_HMMU2,
+ HMMU_ID_DCORE3_HMMU3,
+ HMMU_ID_SIZE,
+};
+
+enum gaudi2_xbar_edge_id {
+ XBAR_EDGE_ID_DCORE0,
+ XBAR_EDGE_ID_DCORE1,
+ XBAR_EDGE_ID_DCORE2,
+ XBAR_EDGE_ID_DCORE3,
+ XBAR_EDGE_ID_SIZE
+};
+
+/**
+ * struct component_config_offsets - per cs_dbg unit - view off all related components indices
+ * @funnel_id: funnel id - index in debug_funnel_regs
+ * @etf_id: etf id - index in debug_etf_regs
+ * @stm_id: stm id - index in debug_stm_regs
+ * @spmu_id: spmu_id - index in debug_spmu_regs
+ * @bmon_count: number of bmons per unit
+ * @bmon_ids: array of bmon id (max size - MAX_BMONS_PER_UNIT) index in debug_bmon_regs
+ */
+struct component_config_offsets {
+ u32 funnel_id;
+ u32 etf_id;
+ u32 stm_id;
+ u32 spmu_id;
+ u32 bmon_count;
+ u32 bmon_ids[MAX_BMONS_PER_UNIT];
+};
+
+static u64 debug_stm_regs[GAUDI2_STM_LAST + 1] = {
+ [GAUDI2_STM_DCORE0_TPC0_EML] = mmDCORE0_TPC0_EML_STM_BASE,
+ [GAUDI2_STM_DCORE0_TPC1_EML] = mmDCORE0_TPC1_EML_STM_BASE,
+ [GAUDI2_STM_DCORE0_TPC2_EML] = mmDCORE0_TPC2_EML_STM_BASE,
+ [GAUDI2_STM_DCORE0_TPC3_EML] = mmDCORE0_TPC3_EML_STM_BASE,
+ [GAUDI2_STM_DCORE0_TPC4_EML] = mmDCORE0_TPC4_EML_STM_BASE,
+ [GAUDI2_STM_DCORE0_TPC5_EML] = mmDCORE0_TPC5_EML_STM_BASE,
+ [GAUDI2_STM_DCORE0_TPC6_EML] = mmDCORE0_TPC6_EML_STM_BASE,
+ [GAUDI2_STM_DCORE1_TPC0_EML] = mmDCORE1_TPC0_EML_STM_BASE,
+ [GAUDI2_STM_DCORE1_TPC1_EML] = mmDCORE1_TPC1_EML_STM_BASE,
+ [GAUDI2_STM_DCORE1_TPC2_EML] = mmDCORE1_TPC2_EML_STM_BASE,
+ [GAUDI2_STM_DCORE1_TPC3_EML] = mmDCORE1_TPC3_EML_STM_BASE,
+ [GAUDI2_STM_DCORE1_TPC4_EML] = mmDCORE1_TPC4_EML_STM_BASE,
+ [GAUDI2_STM_DCORE1_TPC5_EML] = mmDCORE1_TPC5_EML_STM_BASE,
+ [GAUDI2_STM_DCORE2_TPC0_EML] = mmDCORE2_TPC0_EML_STM_BASE,
+ [GAUDI2_STM_DCORE2_TPC1_EML] = mmDCORE2_TPC1_EML_STM_BASE,
+ [GAUDI2_STM_DCORE2_TPC2_EML] = mmDCORE2_TPC2_EML_STM_BASE,
+ [GAUDI2_STM_DCORE2_TPC3_EML] = mmDCORE2_TPC3_EML_STM_BASE,
+ [GAUDI2_STM_DCORE2_TPC4_EML] = mmDCORE2_TPC4_EML_STM_BASE,
+ [GAUDI2_STM_DCORE2_TPC5_EML] = mmDCORE2_TPC5_EML_STM_BASE,
+ [GAUDI2_STM_DCORE3_TPC0_EML] = mmDCORE3_TPC0_EML_STM_BASE,
+ [GAUDI2_STM_DCORE3_TPC1_EML] = mmDCORE3_TPC1_EML_STM_BASE,
+ [GAUDI2_STM_DCORE3_TPC2_EML] = mmDCORE3_TPC2_EML_STM_BASE,
+ [GAUDI2_STM_DCORE3_TPC3_EML] = mmDCORE3_TPC3_EML_STM_BASE,
+ [GAUDI2_STM_DCORE3_TPC4_EML] = mmDCORE3_TPC4_EML_STM_BASE,
+ [GAUDI2_STM_DCORE3_TPC5_EML] = mmDCORE3_TPC5_EML_STM_BASE,
+ [GAUDI2_STM_DCORE0_HMMU0_CS] = mmDCORE0_HMMU0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE0_HMMU1_CS] = mmDCORE0_HMMU1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE0_HMMU2_CS] = mmDCORE0_HMMU2_CS_STM_BASE,
+ [GAUDI2_STM_DCORE0_HMMU3_CS] = mmDCORE0_HMMU3_CS_STM_BASE,
+ [GAUDI2_STM_DCORE0_MME_CTRL] = mmDCORE0_MME_CTRL_STM_BASE,
+ [GAUDI2_STM_DCORE0_MME_SBTE0] = mmDCORE0_MME_SBTE0_STM_BASE,
+ [GAUDI2_STM_DCORE0_MME_SBTE1] = mmDCORE0_MME_SBTE1_STM_BASE,
+ [GAUDI2_STM_DCORE0_MME_SBTE2] = mmDCORE0_MME_SBTE2_STM_BASE,
+ [GAUDI2_STM_DCORE0_MME_SBTE3] = mmDCORE0_MME_SBTE3_STM_BASE,
+ [GAUDI2_STM_DCORE0_MME_SBTE4] = mmDCORE0_MME_SBTE4_STM_BASE,
+ [GAUDI2_STM_DCORE0_MME_ACC] = mmDCORE0_MME_ACC_STM_BASE,
+ [GAUDI2_STM_DCORE0_SM] = mmDCORE0_SM_STM_BASE,
+ [GAUDI2_STM_DCORE0_EDMA0_CS] = mmDCORE0_EDMA0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE0_EDMA1_CS] = mmDCORE0_EDMA1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE0_VDEC0_CS] = mmDCORE0_VDEC0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE0_VDEC1_CS] = mmDCORE0_VDEC1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE1_HMMU0_CS] = mmDCORE1_HMMU0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE1_HMMU1_CS] = mmDCORE1_HMMU1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE1_HMMU2_CS] = mmDCORE1_HMMU2_CS_STM_BASE,
+ [GAUDI2_STM_DCORE1_HMMU3_CS] = mmDCORE1_HMMU3_CS_STM_BASE,
+ [GAUDI2_STM_DCORE1_MME_CTRL] = mmDCORE1_MME_CTRL_STM_BASE,
+ [GAUDI2_STM_DCORE1_MME_SBTE0] = mmDCORE1_MME_SBTE0_STM_BASE,
+ [GAUDI2_STM_DCORE1_MME_SBTE1] = mmDCORE1_MME_SBTE1_STM_BASE,
+ [GAUDI2_STM_DCORE1_MME_SBTE2] = mmDCORE1_MME_SBTE2_STM_BASE,
+ [GAUDI2_STM_DCORE1_MME_SBTE3] = mmDCORE1_MME_SBTE3_STM_BASE,
+ [GAUDI2_STM_DCORE1_MME_SBTE4] = mmDCORE1_MME_SBTE4_STM_BASE,
+ [GAUDI2_STM_DCORE1_MME_ACC] = mmDCORE1_MME_ACC_STM_BASE,
+ [GAUDI2_STM_DCORE1_SM] = mmDCORE1_SM_STM_BASE,
+ [GAUDI2_STM_DCORE1_EDMA0_CS] = mmDCORE1_EDMA0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE1_EDMA1_CS] = mmDCORE1_EDMA1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE1_VDEC0_CS] = mmDCORE1_VDEC0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE1_VDEC1_CS] = mmDCORE1_VDEC1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE2_HMMU0_CS] = mmDCORE2_HMMU0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE2_HMMU1_CS] = mmDCORE2_HMMU1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE2_HMMU2_CS] = mmDCORE2_HMMU2_CS_STM_BASE,
+ [GAUDI2_STM_DCORE2_HMMU3_CS] = mmDCORE2_HMMU3_CS_STM_BASE,
+ [GAUDI2_STM_DCORE2_MME_CTRL] = mmDCORE2_MME_CTRL_STM_BASE,
+ [GAUDI2_STM_DCORE2_MME_SBTE0] = mmDCORE2_MME_SBTE0_STM_BASE,
+ [GAUDI2_STM_DCORE2_MME_SBTE1] = mmDCORE2_MME_SBTE1_STM_BASE,
+ [GAUDI2_STM_DCORE2_MME_SBTE2] = mmDCORE2_MME_SBTE2_STM_BASE,
+ [GAUDI2_STM_DCORE2_MME_SBTE3] = mmDCORE2_MME_SBTE3_STM_BASE,
+ [GAUDI2_STM_DCORE2_MME_SBTE4] = mmDCORE2_MME_SBTE4_STM_BASE,
+ [GAUDI2_STM_DCORE2_MME_ACC] = mmDCORE2_MME_ACC_STM_BASE,
+ [GAUDI2_STM_DCORE2_SM] = mmDCORE2_SM_STM_BASE,
+ [GAUDI2_STM_DCORE2_EDMA0_CS] = mmDCORE2_EDMA0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE2_EDMA1_CS] = mmDCORE2_EDMA1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE2_VDEC0_CS] = mmDCORE2_VDEC0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE2_VDEC1_CS] = mmDCORE2_VDEC1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE3_HMMU0_CS] = mmDCORE3_HMMU0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE3_HMMU1_CS] = mmDCORE3_HMMU1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE3_HMMU2_CS] = mmDCORE3_HMMU2_CS_STM_BASE,
+ [GAUDI2_STM_DCORE3_HMMU3_CS] = mmDCORE3_HMMU3_CS_STM_BASE,
+ [GAUDI2_STM_DCORE3_MME_CTRL] = mmDCORE3_MME_CTRL_STM_BASE,
+ [GAUDI2_STM_DCORE3_MME_SBTE0] = mmDCORE3_MME_SBTE0_STM_BASE,
+ [GAUDI2_STM_DCORE3_MME_SBTE1] = mmDCORE3_MME_SBTE1_STM_BASE,
+ [GAUDI2_STM_DCORE3_MME_SBTE2] = mmDCORE3_MME_SBTE2_STM_BASE,
+ [GAUDI2_STM_DCORE3_MME_SBTE3] = mmDCORE3_MME_SBTE3_STM_BASE,
+ [GAUDI2_STM_DCORE3_MME_SBTE4] = mmDCORE3_MME_SBTE4_STM_BASE,
+ [GAUDI2_STM_DCORE3_MME_ACC] = mmDCORE3_MME_ACC_STM_BASE,
+ [GAUDI2_STM_DCORE3_SM] = mmDCORE3_SM_STM_BASE,
+ [GAUDI2_STM_DCORE3_EDMA0_CS] = mmDCORE3_EDMA0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE3_EDMA1_CS] = mmDCORE3_EDMA1_CS_STM_BASE,
+ [GAUDI2_STM_DCORE3_VDEC0_CS] = mmDCORE3_VDEC0_CS_STM_BASE,
+ [GAUDI2_STM_DCORE3_VDEC1_CS] = mmDCORE3_VDEC1_CS_STM_BASE,
+ [GAUDI2_STM_PCIE] = mmPCIE_STM_BASE,
+ [GAUDI2_STM_PSOC] = mmPSOC_STM_BASE,
+ [GAUDI2_STM_PSOC_ARC0_CS] = mmPSOC_ARC0_CS_STM_BASE,
+ [GAUDI2_STM_PSOC_ARC1_CS] = mmPSOC_ARC1_CS_STM_BASE,
+ [GAUDI2_STM_PDMA0_CS] = mmPDMA0_CS_STM_BASE,
+ [GAUDI2_STM_PDMA1_CS] = mmPDMA1_CS_STM_BASE,
+ [GAUDI2_STM_CPU] = mmCPU_STM_BASE,
+ [GAUDI2_STM_PMMU_CS] = mmPMMU_CS_STM_BASE,
+ [GAUDI2_STM_ROT0_CS] = mmROT0_CS_STM_BASE,
+ [GAUDI2_STM_ROT1_CS] = mmROT1_CS_STM_BASE,
+ [GAUDI2_STM_ARC_FARM_CS] = mmARC_FARM_CS_STM_BASE,
+ [GAUDI2_STM_KDMA_CS] = mmKDMA_CS_STM_BASE,
+ [GAUDI2_STM_PCIE_VDEC0_CS] = mmPCIE_VDEC0_CS_STM_BASE,
+ [GAUDI2_STM_PCIE_VDEC1_CS] = mmPCIE_VDEC1_CS_STM_BASE,
+ [GAUDI2_STM_HBM0_MC0_CS] = mmHBM0_MC0_CS_STM_BASE,
+ [GAUDI2_STM_HBM0_MC1_CS] = mmHBM0_MC1_CS_STM_BASE,
+ [GAUDI2_STM_HBM1_MC0_CS] = mmHBM1_MC0_CS_STM_BASE,
+ [GAUDI2_STM_HBM1_MC1_CS] = mmHBM1_MC1_CS_STM_BASE,
+ [GAUDI2_STM_HBM2_MC0_CS] = mmHBM2_MC0_CS_STM_BASE,
+ [GAUDI2_STM_HBM2_MC1_CS] = mmHBM2_MC1_CS_STM_BASE,
+ [GAUDI2_STM_HBM3_MC0_CS] = mmHBM3_MC0_CS_STM_BASE,
+ [GAUDI2_STM_HBM3_MC1_CS] = mmHBM3_MC1_CS_STM_BASE,
+ [GAUDI2_STM_HBM4_MC0_CS] = mmHBM4_MC0_CS_STM_BASE,
+ [GAUDI2_STM_HBM4_MC1_CS] = mmHBM4_MC1_CS_STM_BASE,
+ [GAUDI2_STM_HBM5_MC0_CS] = mmHBM5_MC0_CS_STM_BASE,
+ [GAUDI2_STM_HBM5_MC1_CS] = mmHBM5_MC1_CS_STM_BASE,
+ [GAUDI2_STM_NIC0_DBG_0] = mmNIC0_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC0_DBG_1] = mmNIC0_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC1_DBG_0] = mmNIC1_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC1_DBG_1] = mmNIC1_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC2_DBG_0] = mmNIC2_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC2_DBG_1] = mmNIC2_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC3_DBG_0] = mmNIC3_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC3_DBG_1] = mmNIC3_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC4_DBG_0] = mmNIC4_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC4_DBG_1] = mmNIC4_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC5_DBG_0] = mmNIC5_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC5_DBG_1] = mmNIC5_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC6_DBG_0] = mmNIC6_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC6_DBG_1] = mmNIC6_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC7_DBG_0] = mmNIC7_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC7_DBG_1] = mmNIC7_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC8_DBG_0] = mmNIC8_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC8_DBG_1] = mmNIC8_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC9_DBG_0] = mmNIC9_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC9_DBG_1] = mmNIC9_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC10_DBG_0] = mmNIC10_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC10_DBG_1] = mmNIC10_DBG_STM_1_BASE,
+ [GAUDI2_STM_NIC11_DBG_0] = mmNIC11_DBG_STM_0_BASE,
+ [GAUDI2_STM_NIC11_DBG_1] = mmNIC11_DBG_STM_1_BASE
+};
+
+static u64 debug_etf_regs[GAUDI2_ETF_LAST + 1] = {
+ [GAUDI2_ETF_DCORE0_TPC0_EML] = mmDCORE0_TPC0_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_TPC1_EML] = mmDCORE0_TPC1_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_TPC2_EML] = mmDCORE0_TPC2_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_TPC3_EML] = mmDCORE0_TPC3_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_TPC4_EML] = mmDCORE0_TPC4_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_TPC5_EML] = mmDCORE0_TPC5_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_TPC6_EML] = mmDCORE0_TPC6_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_TPC0_EML] = mmDCORE1_TPC0_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_TPC1_EML] = mmDCORE1_TPC1_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_TPC2_EML] = mmDCORE1_TPC2_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_TPC3_EML] = mmDCORE1_TPC3_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_TPC4_EML] = mmDCORE1_TPC4_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_TPC5_EML] = mmDCORE1_TPC5_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_TPC0_EML] = mmDCORE2_TPC0_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_TPC1_EML] = mmDCORE2_TPC1_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_TPC2_EML] = mmDCORE2_TPC2_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_TPC3_EML] = mmDCORE2_TPC3_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_TPC4_EML] = mmDCORE2_TPC4_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_TPC5_EML] = mmDCORE2_TPC5_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_TPC0_EML] = mmDCORE3_TPC0_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_TPC1_EML] = mmDCORE3_TPC1_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_TPC2_EML] = mmDCORE3_TPC2_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_TPC3_EML] = mmDCORE3_TPC3_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_TPC4_EML] = mmDCORE3_TPC4_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_TPC5_EML] = mmDCORE3_TPC5_EML_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_HMMU0_CS] = mmDCORE0_HMMU0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_HMMU1_CS] = mmDCORE0_HMMU1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_HMMU2_CS] = mmDCORE0_HMMU2_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_HMMU3_CS] = mmDCORE0_HMMU3_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_MME_CTRL] = mmDCORE0_MME_CTRL_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_MME_SBTE0] = mmDCORE0_MME_SBTE0_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_MME_SBTE1] = mmDCORE0_MME_SBTE1_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_MME_SBTE2] = mmDCORE0_MME_SBTE2_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_MME_SBTE3] = mmDCORE0_MME_SBTE3_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_MME_SBTE4] = mmDCORE0_MME_SBTE4_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_MME_ACC] = mmDCORE0_MME_ACC_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_SM] = mmDCORE0_SM_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_EDMA0_CS] = mmDCORE0_EDMA0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_EDMA1_CS] = mmDCORE0_EDMA1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_VDEC0_CS] = mmDCORE0_VDEC0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE0_VDEC1_CS] = mmDCORE0_VDEC1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_HMMU0_CS] = mmDCORE1_HMMU0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_HMMU1_CS] = mmDCORE1_HMMU1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_HMMU2_CS] = mmDCORE1_HMMU2_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_HMMU3_CS] = mmDCORE1_HMMU3_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_MME_CTRL] = mmDCORE1_MME_CTRL_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_MME_SBTE0] = mmDCORE1_MME_SBTE0_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_MME_SBTE1] = mmDCORE1_MME_SBTE1_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_MME_SBTE2] = mmDCORE1_MME_SBTE2_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_MME_SBTE3] = mmDCORE1_MME_SBTE3_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_MME_SBTE4] = mmDCORE1_MME_SBTE4_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_MME_ACC] = mmDCORE1_MME_ACC_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_SM] = mmDCORE1_SM_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_EDMA0_CS] = mmDCORE1_EDMA0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_EDMA1_CS] = mmDCORE1_EDMA1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_VDEC0_CS] = mmDCORE1_VDEC0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE1_VDEC1_CS] = mmDCORE1_VDEC1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_HMMU0_CS] = mmDCORE2_HMMU0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_HMMU1_CS] = mmDCORE2_HMMU1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_HMMU2_CS] = mmDCORE2_HMMU2_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_HMMU3_CS] = mmDCORE2_HMMU3_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_MME_CTRL] = mmDCORE2_MME_CTRL_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_MME_SBTE0] = mmDCORE2_MME_SBTE0_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_MME_SBTE1] = mmDCORE2_MME_SBTE1_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_MME_SBTE2] = mmDCORE2_MME_SBTE2_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_MME_SBTE3] = mmDCORE2_MME_SBTE3_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_MME_SBTE4] = mmDCORE2_MME_SBTE4_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_MME_ACC] = mmDCORE2_MME_ACC_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_SM] = mmDCORE2_SM_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_EDMA0_CS] = mmDCORE2_EDMA0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_EDMA1_CS] = mmDCORE2_EDMA1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_VDEC0_CS] = mmDCORE2_VDEC0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE2_VDEC1_CS] = mmDCORE2_VDEC1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_HMMU0_CS] = mmDCORE3_HMMU0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_HMMU1_CS] = mmDCORE3_HMMU1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_HMMU2_CS] = mmDCORE3_HMMU2_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_HMMU3_CS] = mmDCORE3_HMMU3_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_MME_CTRL] = mmDCORE3_MME_CTRL_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_MME_SBTE0] = mmDCORE3_MME_SBTE0_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_MME_SBTE1] = mmDCORE3_MME_SBTE1_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_MME_SBTE2] = mmDCORE3_MME_SBTE2_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_MME_SBTE3] = mmDCORE3_MME_SBTE3_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_MME_SBTE4] = mmDCORE3_MME_SBTE4_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_MME_ACC] = mmDCORE3_MME_ACC_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_SM] = mmDCORE3_SM_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_EDMA0_CS] = mmDCORE3_EDMA0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_EDMA1_CS] = mmDCORE3_EDMA1_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_VDEC0_CS] = mmDCORE3_VDEC0_CS_ETF_BASE,
+ [GAUDI2_ETF_DCORE3_VDEC1_CS] = mmDCORE3_VDEC1_CS_ETF_BASE,
+ [GAUDI2_ETF_PCIE] = mmPCIE_ETF_BASE,
+ [GAUDI2_ETF_PSOC] = mmPSOC_ETF_BASE,
+ [GAUDI2_ETF_PSOC_ARC0_CS] = mmPSOC_ARC0_CS_ETF_BASE,
+ [GAUDI2_ETF_PSOC_ARC1_CS] = mmPSOC_ARC1_CS_ETF_BASE,
+ [GAUDI2_ETF_PDMA0_CS] = mmPDMA0_CS_ETF_BASE,
+ [GAUDI2_ETF_PDMA1_CS] = mmPDMA1_CS_ETF_BASE,
+ [GAUDI2_ETF_CPU_0] = mmCPU_ETF_0_BASE,
+ [GAUDI2_ETF_CPU_1] = mmCPU_ETF_1_BASE,
+ [GAUDI2_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE,
+ [GAUDI2_ETF_PMMU_CS] = mmPMMU_CS_ETF_BASE,
+ [GAUDI2_ETF_ROT0_CS] = mmROT0_CS_ETF_BASE,
+ [GAUDI2_ETF_ROT1_CS] = mmROT1_CS_ETF_BASE,
+ [GAUDI2_ETF_ARC_FARM_CS] = mmARC_FARM_CS_ETF_BASE,
+ [GAUDI2_ETF_KDMA_CS] = mmKDMA_CS_ETF_BASE,
+ [GAUDI2_ETF_PCIE_VDEC0_CS] = mmPCIE_VDEC0_CS_ETF_BASE,
+ [GAUDI2_ETF_PCIE_VDEC1_CS] = mmPCIE_VDEC1_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM0_MC0_CS] = mmHBM0_MC0_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM0_MC1_CS] = mmHBM0_MC1_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM1_MC0_CS] = mmHBM1_MC0_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM1_MC1_CS] = mmHBM1_MC1_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM2_MC0_CS] = mmHBM2_MC0_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM2_MC1_CS] = mmHBM2_MC1_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM3_MC0_CS] = mmHBM3_MC0_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM3_MC1_CS] = mmHBM3_MC1_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM4_MC0_CS] = mmHBM4_MC0_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM4_MC1_CS] = mmHBM4_MC1_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM5_MC0_CS] = mmHBM5_MC0_CS_ETF_BASE,
+ [GAUDI2_ETF_HBM5_MC1_CS] = mmHBM5_MC1_CS_ETF_BASE,
+ [GAUDI2_ETF_NIC0_DBG_0] = mmNIC0_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC0_DBG_1] = mmNIC0_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC1_DBG_0] = mmNIC1_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC1_DBG_1] = mmNIC1_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC2_DBG_0] = mmNIC2_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC2_DBG_1] = mmNIC2_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC3_DBG_0] = mmNIC3_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC3_DBG_1] = mmNIC3_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC4_DBG_0] = mmNIC4_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC4_DBG_1] = mmNIC4_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC5_DBG_0] = mmNIC5_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC5_DBG_1] = mmNIC5_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC6_DBG_0] = mmNIC6_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC6_DBG_1] = mmNIC6_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC7_DBG_0] = mmNIC7_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC7_DBG_1] = mmNIC7_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC8_DBG_0] = mmNIC8_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC8_DBG_1] = mmNIC8_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC9_DBG_0] = mmNIC9_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC9_DBG_1] = mmNIC9_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC10_DBG_0] = mmNIC10_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC10_DBG_1] = mmNIC10_DBG_ETF_1_BASE,
+ [GAUDI2_ETF_NIC11_DBG_0] = mmNIC11_DBG_ETF_0_BASE,
+ [GAUDI2_ETF_NIC11_DBG_1] = mmNIC11_DBG_ETF_1_BASE
+};
+
+static u64 debug_funnel_regs[GAUDI2_FUNNEL_LAST + 1] = {
+ [GAUDI2_FUNNEL_DCORE0_TPC0_EML] = mmDCORE0_TPC0_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_TPC1_EML] = mmDCORE0_TPC1_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_TPC2_EML] = mmDCORE0_TPC2_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_TPC3_EML] = mmDCORE0_TPC3_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_TPC4_EML] = mmDCORE0_TPC4_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_TPC5_EML] = mmDCORE0_TPC5_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_TPC6_EML] = mmDCORE0_TPC6_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_TPC0_EML] = mmDCORE1_TPC0_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_TPC1_EML] = mmDCORE1_TPC1_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_TPC2_EML] = mmDCORE1_TPC2_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_TPC3_EML] = mmDCORE1_TPC3_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_TPC4_EML] = mmDCORE1_TPC4_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_TPC5_EML] = mmDCORE1_TPC5_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_TPC0_EML] = mmDCORE2_TPC0_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_TPC1_EML] = mmDCORE2_TPC1_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_TPC2_EML] = mmDCORE2_TPC2_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_TPC3_EML] = mmDCORE2_TPC3_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_TPC4_EML] = mmDCORE2_TPC4_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_TPC5_EML] = mmDCORE2_TPC5_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_TPC0_EML] = mmDCORE3_TPC0_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_TPC1_EML] = mmDCORE3_TPC1_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_TPC2_EML] = mmDCORE3_TPC2_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_TPC3_EML] = mmDCORE3_TPC3_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_TPC4_EML] = mmDCORE3_TPC4_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_TPC5_EML] = mmDCORE3_TPC5_EML_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_XFT] = mmDCORE0_XFT_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_TFT0] = mmDCORE0_TFT0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_TFT1] = mmDCORE0_TFT1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_TFT2] = mmDCORE0_TFT2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_RTR0] = mmDCORE0_RTR0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_RTR1] = mmDCORE0_RTR1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_RTR2] = mmDCORE0_RTR2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_RTR3] = mmDCORE0_RTR3_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_RTR4] = mmDCORE0_RTR4_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_MIF0] = mmDCORE0_MIF0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_RTR5] = mmDCORE0_RTR5_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_MIF1] = mmDCORE0_MIF1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_RTR6] = mmDCORE0_RTR6_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_MIF2] = mmDCORE0_MIF2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_RTR7] = mmDCORE0_RTR7_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_MIF3] = mmDCORE0_MIF3_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_XFT] = mmDCORE1_XFT_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_TFT0] = mmDCORE1_TFT0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_TFT1] = mmDCORE1_TFT1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_TFT2] = mmDCORE1_TFT2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_RTR0] = mmDCORE1_RTR0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_MIF0] = mmDCORE1_MIF0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_RTR1] = mmDCORE1_RTR1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_MIF1] = mmDCORE1_MIF1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_RTR2] = mmDCORE1_RTR2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_MIF2] = mmDCORE1_MIF2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_RTR3] = mmDCORE1_RTR3_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_MIF3] = mmDCORE1_MIF3_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_RTR4] = mmDCORE1_RTR4_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_RTR5] = mmDCORE1_RTR5_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_RTR6] = mmDCORE1_RTR6_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_RTR7] = mmDCORE1_RTR7_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_XFT] = mmDCORE2_XFT_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_TFT0] = mmDCORE2_TFT0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_TFT1] = mmDCORE2_TFT1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_TFT2] = mmDCORE2_TFT2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_RTR0] = mmDCORE2_RTR0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_RTR1] = mmDCORE2_RTR1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_RTR2] = mmDCORE2_RTR2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_RTR3] = mmDCORE2_RTR3_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_RTR4] = mmDCORE2_RTR4_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_MIF0] = mmDCORE2_MIF0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_RTR5] = mmDCORE2_RTR5_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_MIF1] = mmDCORE2_MIF1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_RTR6] = mmDCORE2_RTR6_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_MIF2] = mmDCORE2_MIF2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_RTR7] = mmDCORE2_RTR7_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_MIF3] = mmDCORE2_MIF3_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_XFT] = mmDCORE3_XFT_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_TFT0] = mmDCORE3_TFT0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_TFT1] = mmDCORE3_TFT1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_TFT2] = mmDCORE3_TFT2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_RTR0] = mmDCORE3_RTR0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_MIF0] = mmDCORE3_MIF0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_RTR1] = mmDCORE3_RTR1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_MIF1] = mmDCORE3_MIF1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_RTR2] = mmDCORE3_RTR2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_MIF2] = mmDCORE3_MIF2_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_RTR3] = mmDCORE3_RTR3_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_MIF3] = mmDCORE3_MIF3_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_RTR4] = mmDCORE3_RTR4_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_RTR5] = mmDCORE3_RTR5_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_RTR6] = mmDCORE3_RTR6_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_RTR7] = mmDCORE3_RTR7_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_PSOC_ARC0] = mmPSOC_ARC0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_PSOC_ARC1] = mmPSOC_ARC1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_XDMA] = mmXDMA_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_CPU] = mmCPU_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_PMMU] = mmPMMU_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_PMMU_DEC] = mmPMMU_FUNNEL_DEC_BASE,
+ [GAUDI2_FUNNEL_DCORE0_XBAR_MID] = mmDCORE0_XBAR_MID_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE0_XBAR_EDGE] = mmDCORE0_XBAR_EDGE_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_XBAR_MID] = mmDCORE1_XBAR_MID_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE1_XBAR_EDGE] = mmDCORE1_XBAR_EDGE_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_XBAR_MID] = mmDCORE2_XBAR_MID_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE2_XBAR_EDGE] = mmDCORE2_XBAR_EDGE_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_XBAR_MID] = mmDCORE3_XBAR_MID_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_DCORE3_XBAR_EDGE] = mmDCORE3_XBAR_EDGE_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_ARC_FARM] = mmARC_FARM_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM0_MC0] = mmHBM0_MC0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM0_MC1] = mmHBM0_MC1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM1_MC0] = mmHBM1_MC0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM1_MC1] = mmHBM1_MC1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM2_MC0] = mmHBM2_MC0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM2_MC1] = mmHBM2_MC1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM3_MC0] = mmHBM3_MC0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM3_MC1] = mmHBM3_MC1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM4_MC0] = mmHBM4_MC0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM4_MC1] = mmHBM4_MC1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM5_MC0] = mmHBM5_MC0_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_HBM5_MC1] = mmHBM5_MC1_FUNNEL_BASE,
+ [GAUDI2_FUNNEL_NIC0_DBG_TX] = mmNIC0_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC0_DBG_NCH] = mmNIC0_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC1_DBG_TX] = mmNIC1_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC1_DBG_NCH] = mmNIC1_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC2_DBG_TX] = mmNIC2_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC2_DBG_NCH] = mmNIC2_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC3_DBG_TX] = mmNIC3_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC3_DBG_NCH] = mmNIC3_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC4_DBG_TX] = mmNIC4_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC4_DBG_NCH] = mmNIC4_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC5_DBG_TX] = mmNIC5_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC5_DBG_NCH] = mmNIC5_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC6_DBG_TX] = mmNIC6_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC6_DBG_NCH] = mmNIC6_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC7_DBG_TX] = mmNIC7_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC7_DBG_NCH] = mmNIC7_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC8_DBG_TX] = mmNIC8_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC8_DBG_NCH] = mmNIC8_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC9_DBG_TX] = mmNIC9_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC9_DBG_NCH] = mmNIC9_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC10_DBG_TX] = mmNIC10_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC10_DBG_NCH] = mmNIC10_DBG_FUNNEL_NCH_BASE,
+ [GAUDI2_FUNNEL_NIC11_DBG_TX] = mmNIC11_DBG_FUNNEL_TX_BASE,
+ [GAUDI2_FUNNEL_NIC11_DBG_NCH] = mmNIC11_DBG_FUNNEL_NCH_BASE
+};
+
+static u64 debug_bmon_regs[GAUDI2_BMON_LAST + 1] = {
+ [GAUDI2_BMON_DCORE0_TPC0_EML_0] = mmDCORE0_TPC0_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_TPC0_EML_1] = mmDCORE0_TPC0_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_TPC0_EML_2] = mmDCORE0_TPC0_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_TPC0_EML_3] = mmDCORE0_TPC0_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_TPC1_EML_0] = mmDCORE0_TPC1_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_TPC1_EML_1] = mmDCORE0_TPC1_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_TPC1_EML_2] = mmDCORE0_TPC1_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_TPC1_EML_3] = mmDCORE0_TPC1_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_TPC2_EML_0] = mmDCORE0_TPC2_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_TPC2_EML_1] = mmDCORE0_TPC2_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_TPC2_EML_2] = mmDCORE0_TPC2_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_TPC2_EML_3] = mmDCORE0_TPC2_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_TPC3_EML_0] = mmDCORE0_TPC3_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_TPC3_EML_1] = mmDCORE0_TPC3_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_TPC3_EML_2] = mmDCORE0_TPC3_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_TPC3_EML_3] = mmDCORE0_TPC3_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_TPC4_EML_0] = mmDCORE0_TPC4_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_TPC4_EML_1] = mmDCORE0_TPC4_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_TPC4_EML_2] = mmDCORE0_TPC4_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_TPC4_EML_3] = mmDCORE0_TPC4_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_TPC5_EML_0] = mmDCORE0_TPC5_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_TPC5_EML_1] = mmDCORE0_TPC5_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_TPC5_EML_2] = mmDCORE0_TPC5_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_TPC5_EML_3] = mmDCORE0_TPC5_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_TPC6_EML_0] = mmDCORE0_TPC6_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_TPC6_EML_1] = mmDCORE0_TPC6_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_TPC6_EML_2] = mmDCORE0_TPC6_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_TPC6_EML_3] = mmDCORE0_TPC6_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_TPC0_EML_0] = mmDCORE1_TPC0_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_TPC0_EML_1] = mmDCORE1_TPC0_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_TPC0_EML_2] = mmDCORE1_TPC0_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_TPC0_EML_3] = mmDCORE1_TPC0_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_TPC1_EML_0] = mmDCORE1_TPC1_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_TPC1_EML_1] = mmDCORE1_TPC1_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_TPC1_EML_2] = mmDCORE1_TPC1_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_TPC1_EML_3] = mmDCORE1_TPC1_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_TPC2_EML_0] = mmDCORE1_TPC2_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_TPC2_EML_1] = mmDCORE1_TPC2_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_TPC2_EML_2] = mmDCORE1_TPC2_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_TPC2_EML_3] = mmDCORE1_TPC2_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_TPC3_EML_0] = mmDCORE1_TPC3_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_TPC3_EML_1] = mmDCORE1_TPC3_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_TPC3_EML_2] = mmDCORE1_TPC3_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_TPC3_EML_3] = mmDCORE1_TPC3_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_TPC4_EML_0] = mmDCORE1_TPC4_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_TPC4_EML_1] = mmDCORE1_TPC4_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_TPC4_EML_2] = mmDCORE1_TPC4_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_TPC4_EML_3] = mmDCORE1_TPC4_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_TPC5_EML_0] = mmDCORE1_TPC5_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_TPC5_EML_1] = mmDCORE1_TPC5_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_TPC5_EML_2] = mmDCORE1_TPC5_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_TPC5_EML_3] = mmDCORE1_TPC5_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_TPC0_EML_0] = mmDCORE2_TPC0_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_TPC0_EML_1] = mmDCORE2_TPC0_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_TPC0_EML_2] = mmDCORE2_TPC0_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_TPC0_EML_3] = mmDCORE2_TPC0_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_TPC1_EML_0] = mmDCORE2_TPC1_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_TPC1_EML_1] = mmDCORE2_TPC1_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_TPC1_EML_2] = mmDCORE2_TPC1_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_TPC1_EML_3] = mmDCORE2_TPC1_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_TPC2_EML_0] = mmDCORE2_TPC2_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_TPC2_EML_1] = mmDCORE2_TPC2_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_TPC2_EML_2] = mmDCORE2_TPC2_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_TPC2_EML_3] = mmDCORE2_TPC2_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_TPC3_EML_0] = mmDCORE2_TPC3_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_TPC3_EML_1] = mmDCORE2_TPC3_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_TPC3_EML_2] = mmDCORE2_TPC3_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_TPC3_EML_3] = mmDCORE2_TPC3_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_TPC4_EML_0] = mmDCORE2_TPC4_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_TPC4_EML_1] = mmDCORE2_TPC4_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_TPC4_EML_2] = mmDCORE2_TPC4_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_TPC4_EML_3] = mmDCORE2_TPC4_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_TPC5_EML_0] = mmDCORE2_TPC5_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_TPC5_EML_1] = mmDCORE2_TPC5_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_TPC5_EML_2] = mmDCORE2_TPC5_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_TPC5_EML_3] = mmDCORE2_TPC5_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_TPC0_EML_0] = mmDCORE3_TPC0_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_TPC0_EML_1] = mmDCORE3_TPC0_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_TPC0_EML_2] = mmDCORE3_TPC0_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_TPC0_EML_3] = mmDCORE3_TPC0_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_TPC1_EML_0] = mmDCORE3_TPC1_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_TPC1_EML_1] = mmDCORE3_TPC1_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_TPC1_EML_2] = mmDCORE3_TPC1_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_TPC1_EML_3] = mmDCORE3_TPC1_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_TPC2_EML_0] = mmDCORE3_TPC2_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_TPC2_EML_1] = mmDCORE3_TPC2_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_TPC2_EML_2] = mmDCORE3_TPC2_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_TPC2_EML_3] = mmDCORE3_TPC2_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_TPC3_EML_0] = mmDCORE3_TPC3_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_TPC3_EML_1] = mmDCORE3_TPC3_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_TPC3_EML_2] = mmDCORE3_TPC3_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_TPC3_EML_3] = mmDCORE3_TPC3_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_TPC4_EML_0] = mmDCORE3_TPC4_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_TPC4_EML_1] = mmDCORE3_TPC4_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_TPC4_EML_2] = mmDCORE3_TPC4_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_TPC4_EML_3] = mmDCORE3_TPC4_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_TPC5_EML_0] = mmDCORE3_TPC5_EML_BUSMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_TPC5_EML_1] = mmDCORE3_TPC5_EML_BUSMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_TPC5_EML_2] = mmDCORE3_TPC5_EML_BUSMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_TPC5_EML_3] = mmDCORE3_TPC5_EML_BUSMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU0_0] = mmDCORE0_HMMU0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU0_1] = mmDCORE0_HMMU0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU0_3] = mmDCORE0_HMMU0_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU0_2] = mmDCORE0_HMMU0_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU0_4] = mmDCORE0_HMMU0_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU1_0] = mmDCORE0_HMMU1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU1_1] = mmDCORE0_HMMU1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU1_3] = mmDCORE0_HMMU1_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU1_2] = mmDCORE0_HMMU1_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU1_4] = mmDCORE0_HMMU1_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU2_0] = mmDCORE0_HMMU2_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU2_1] = mmDCORE0_HMMU2_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU2_3] = mmDCORE0_HMMU2_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU2_2] = mmDCORE0_HMMU2_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU2_4] = mmDCORE0_HMMU2_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU3_0] = mmDCORE0_HMMU3_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU3_1] = mmDCORE0_HMMU3_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU3_3] = mmDCORE0_HMMU3_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU3_2] = mmDCORE0_HMMU3_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_HMMU3_4] = mmDCORE0_HMMU3_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE0_MME_CTRL_0] = mmDCORE0_MME_CTRL_BMON0_BASE,
+ [GAUDI2_BMON_DCORE0_MME_CTRL_1] = mmDCORE0_MME_CTRL_BMON1_BASE,
+ [GAUDI2_BMON_DCORE0_MME_CTRL_2] = mmDCORE0_MME_CTRL_BMON2_BASE,
+ [GAUDI2_BMON_DCORE0_MME_CTRL_3] = mmDCORE0_MME_CTRL_BMON3_BASE,
+ [GAUDI2_BMON_DCORE0_MME_SBTE0_0] = mmDCORE0_MME_SBTE0_BMON0_BASE,
+ [GAUDI2_BMON_DCORE0_MME_SBTE1_0] = mmDCORE0_MME_SBTE1_BMON0_BASE,
+ [GAUDI2_BMON_DCORE0_MME_SBTE2_0] = mmDCORE0_MME_SBTE2_BMON0_BASE,
+ [GAUDI2_BMON_DCORE0_MME_SBTE3_0] = mmDCORE0_MME_SBTE3_BMON0_BASE,
+ [GAUDI2_BMON_DCORE0_MME_SBTE4_0] = mmDCORE0_MME_SBTE4_BMON0_BASE,
+ [GAUDI2_BMON_DCORE0_MME_ACC_0] = mmDCORE0_MME_ACC_BMON0_BASE,
+ [GAUDI2_BMON_DCORE0_MME_ACC_1] = mmDCORE0_MME_ACC_BMON1_BASE,
+ [GAUDI2_BMON_DCORE0_SM] = mmDCORE0_SM_BMON_BASE,
+ [GAUDI2_BMON_DCORE0_SM_1] = mmDCORE0_SM_BMON1_BASE,
+ [GAUDI2_BMON_DCORE0_EDMA0_0] = mmDCORE0_EDMA0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_EDMA0_1] = mmDCORE0_EDMA0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_EDMA1_0] = mmDCORE0_EDMA1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_EDMA1_1] = mmDCORE0_EDMA1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_VDEC0_0] = mmDCORE0_VDEC0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_VDEC0_1] = mmDCORE0_VDEC0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_VDEC0_2] = mmDCORE0_VDEC0_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE0_VDEC1_0] = mmDCORE0_VDEC1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE0_VDEC1_1] = mmDCORE0_VDEC1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE0_VDEC1_2] = mmDCORE0_VDEC1_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU0_0] = mmDCORE1_HMMU0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU0_1] = mmDCORE1_HMMU0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU0_3] = mmDCORE1_HMMU0_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU0_2] = mmDCORE1_HMMU0_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU0_4] = mmDCORE1_HMMU0_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU1_0] = mmDCORE1_HMMU1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU1_1] = mmDCORE1_HMMU1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU1_3] = mmDCORE1_HMMU1_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU1_2] = mmDCORE1_HMMU1_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU1_4] = mmDCORE1_HMMU1_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU2_0] = mmDCORE1_HMMU2_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU2_1] = mmDCORE1_HMMU2_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU2_3] = mmDCORE1_HMMU2_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU2_2] = mmDCORE1_HMMU2_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU2_4] = mmDCORE1_HMMU2_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU3_0] = mmDCORE1_HMMU3_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU3_1] = mmDCORE1_HMMU3_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU3_3] = mmDCORE1_HMMU3_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU3_2] = mmDCORE1_HMMU3_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_HMMU3_4] = mmDCORE1_HMMU3_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE1_MME_CTRL_0] = mmDCORE1_MME_CTRL_BMON0_BASE,
+ [GAUDI2_BMON_DCORE1_MME_CTRL_1] = mmDCORE1_MME_CTRL_BMON1_BASE,
+ [GAUDI2_BMON_DCORE1_MME_CTRL_2] = mmDCORE1_MME_CTRL_BMON2_BASE,
+ [GAUDI2_BMON_DCORE1_MME_CTRL_3] = mmDCORE1_MME_CTRL_BMON3_BASE,
+ [GAUDI2_BMON_DCORE1_MME_SBTE0_0] = mmDCORE1_MME_SBTE0_BMON0_BASE,
+ [GAUDI2_BMON_DCORE1_MME_SBTE1_0] = mmDCORE1_MME_SBTE1_BMON0_BASE,
+ [GAUDI2_BMON_DCORE1_MME_SBTE2_0] = mmDCORE1_MME_SBTE2_BMON0_BASE,
+ [GAUDI2_BMON_DCORE1_MME_SBTE3_0] = mmDCORE1_MME_SBTE3_BMON0_BASE,
+ [GAUDI2_BMON_DCORE1_MME_SBTE4_0] = mmDCORE1_MME_SBTE4_BMON0_BASE,
+ [GAUDI2_BMON_DCORE1_MME_ACC_0] = mmDCORE1_MME_ACC_BMON0_BASE,
+ [GAUDI2_BMON_DCORE1_MME_ACC_1] = mmDCORE1_MME_ACC_BMON1_BASE,
+ [GAUDI2_BMON_DCORE1_SM] = mmDCORE1_SM_BMON_BASE,
+ [GAUDI2_BMON_DCORE1_SM_1] = mmDCORE1_SM_BMON1_BASE,
+ [GAUDI2_BMON_DCORE1_EDMA0_0] = mmDCORE1_EDMA0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_EDMA0_1] = mmDCORE1_EDMA0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_EDMA1_0] = mmDCORE1_EDMA1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_EDMA1_1] = mmDCORE1_EDMA1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_VDEC0_0] = mmDCORE1_VDEC0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_VDEC0_1] = mmDCORE1_VDEC0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_VDEC0_2] = mmDCORE1_VDEC0_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE1_VDEC1_0] = mmDCORE1_VDEC1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE1_VDEC1_1] = mmDCORE1_VDEC1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE1_VDEC1_2] = mmDCORE1_VDEC1_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU0_0] = mmDCORE2_HMMU0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU0_1] = mmDCORE2_HMMU0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU0_3] = mmDCORE2_HMMU0_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU0_2] = mmDCORE2_HMMU0_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU0_4] = mmDCORE2_HMMU0_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU1_0] = mmDCORE2_HMMU1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU1_1] = mmDCORE2_HMMU1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU1_3] = mmDCORE2_HMMU1_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU1_2] = mmDCORE2_HMMU1_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU1_4] = mmDCORE2_HMMU1_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU2_0] = mmDCORE2_HMMU2_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU2_1] = mmDCORE2_HMMU2_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU2_3] = mmDCORE2_HMMU2_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU2_2] = mmDCORE2_HMMU2_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU2_4] = mmDCORE2_HMMU2_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU3_0] = mmDCORE2_HMMU3_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU3_1] = mmDCORE2_HMMU3_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU3_3] = mmDCORE2_HMMU3_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU3_2] = mmDCORE2_HMMU3_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_HMMU3_4] = mmDCORE2_HMMU3_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE2_MME_CTRL_0] = mmDCORE2_MME_CTRL_BMON0_BASE,
+ [GAUDI2_BMON_DCORE2_MME_CTRL_1] = mmDCORE2_MME_CTRL_BMON1_BASE,
+ [GAUDI2_BMON_DCORE2_MME_CTRL_2] = mmDCORE2_MME_CTRL_BMON2_BASE,
+ [GAUDI2_BMON_DCORE2_MME_CTRL_3] = mmDCORE2_MME_CTRL_BMON3_BASE,
+ [GAUDI2_BMON_DCORE2_MME_SBTE0_0] = mmDCORE2_MME_SBTE0_BMON0_BASE,
+ [GAUDI2_BMON_DCORE2_MME_SBTE1_0] = mmDCORE2_MME_SBTE1_BMON0_BASE,
+ [GAUDI2_BMON_DCORE2_MME_SBTE2_0] = mmDCORE2_MME_SBTE2_BMON0_BASE,
+ [GAUDI2_BMON_DCORE2_MME_SBTE3_0] = mmDCORE2_MME_SBTE3_BMON0_BASE,
+ [GAUDI2_BMON_DCORE2_MME_SBTE4_0] = mmDCORE2_MME_SBTE4_BMON0_BASE,
+ [GAUDI2_BMON_DCORE2_MME_ACC_0] = mmDCORE2_MME_ACC_BMON0_BASE,
+ [GAUDI2_BMON_DCORE2_MME_ACC_1] = mmDCORE2_MME_ACC_BMON1_BASE,
+ [GAUDI2_BMON_DCORE2_SM] = mmDCORE2_SM_BMON_BASE,
+ [GAUDI2_BMON_DCORE2_SM_1] = mmDCORE2_SM_BMON1_BASE,
+ [GAUDI2_BMON_DCORE2_EDMA0_0] = mmDCORE2_EDMA0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_EDMA0_1] = mmDCORE2_EDMA0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_EDMA1_0] = mmDCORE2_EDMA1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_EDMA1_1] = mmDCORE2_EDMA1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_VDEC0_0] = mmDCORE2_VDEC0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_VDEC0_1] = mmDCORE2_VDEC0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_VDEC0_2] = mmDCORE2_VDEC0_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE2_VDEC1_0] = mmDCORE2_VDEC1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE2_VDEC1_1] = mmDCORE2_VDEC1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE2_VDEC1_2] = mmDCORE2_VDEC1_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU0_0] = mmDCORE3_HMMU0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU0_1] = mmDCORE3_HMMU0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU0_3] = mmDCORE3_HMMU0_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU0_2] = mmDCORE3_HMMU0_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU0_4] = mmDCORE3_HMMU0_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU1_0] = mmDCORE3_HMMU1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU1_1] = mmDCORE3_HMMU1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU1_3] = mmDCORE3_HMMU1_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU1_2] = mmDCORE3_HMMU1_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU1_4] = mmDCORE3_HMMU1_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU2_0] = mmDCORE3_HMMU2_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU2_1] = mmDCORE3_HMMU2_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU2_3] = mmDCORE3_HMMU2_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU2_2] = mmDCORE3_HMMU2_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU2_4] = mmDCORE3_HMMU2_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU3_0] = mmDCORE3_HMMU3_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU3_1] = mmDCORE3_HMMU3_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU3_3] = mmDCORE3_HMMU3_BMON_3_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU3_2] = mmDCORE3_HMMU3_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_HMMU3_4] = mmDCORE3_HMMU3_BMON_4_BASE,
+ [GAUDI2_BMON_DCORE3_MME_CTRL_0] = mmDCORE3_MME_CTRL_BMON0_BASE,
+ [GAUDI2_BMON_DCORE3_MME_CTRL_1] = mmDCORE3_MME_CTRL_BMON1_BASE,
+ [GAUDI2_BMON_DCORE3_MME_CTRL_2] = mmDCORE3_MME_CTRL_BMON2_BASE,
+ [GAUDI2_BMON_DCORE3_MME_CTRL_3] = mmDCORE3_MME_CTRL_BMON3_BASE,
+ [GAUDI2_BMON_DCORE3_MME_SBTE0_0] = mmDCORE3_MME_SBTE0_BMON0_BASE,
+ [GAUDI2_BMON_DCORE3_MME_SBTE1_0] = mmDCORE3_MME_SBTE1_BMON0_BASE,
+ [GAUDI2_BMON_DCORE3_MME_SBTE2_0] = mmDCORE3_MME_SBTE2_BMON0_BASE,
+ [GAUDI2_BMON_DCORE3_MME_SBTE3_0] = mmDCORE3_MME_SBTE3_BMON0_BASE,
+ [GAUDI2_BMON_DCORE3_MME_SBTE4_0] = mmDCORE3_MME_SBTE4_BMON0_BASE,
+ [GAUDI2_BMON_DCORE3_MME_ACC_0] = mmDCORE3_MME_ACC_BMON0_BASE,
+ [GAUDI2_BMON_DCORE3_MME_ACC_1] = mmDCORE3_MME_ACC_BMON1_BASE,
+ [GAUDI2_BMON_DCORE3_SM] = mmDCORE3_SM_BMON_BASE,
+ [GAUDI2_BMON_DCORE3_SM_1] = mmDCORE3_SM_BMON1_BASE,
+ [GAUDI2_BMON_DCORE3_EDMA0_0] = mmDCORE3_EDMA0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_EDMA0_1] = mmDCORE3_EDMA0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_EDMA1_0] = mmDCORE3_EDMA1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_EDMA1_1] = mmDCORE3_EDMA1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_VDEC0_0] = mmDCORE3_VDEC0_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_VDEC0_1] = mmDCORE3_VDEC0_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_VDEC0_2] = mmDCORE3_VDEC0_BMON_2_BASE,
+ [GAUDI2_BMON_DCORE3_VDEC1_0] = mmDCORE3_VDEC1_BMON_0_BASE,
+ [GAUDI2_BMON_DCORE3_VDEC1_1] = mmDCORE3_VDEC1_BMON_1_BASE,
+ [GAUDI2_BMON_DCORE3_VDEC1_2] = mmDCORE3_VDEC1_BMON_2_BASE,
+ [GAUDI2_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE,
+ [GAUDI2_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE,
+ [GAUDI2_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE,
+ [GAUDI2_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE,
+ [GAUDI2_BMON_PSOC_ARC0_0] = mmPSOC_ARC0_BMON_0_BASE,
+ [GAUDI2_BMON_PSOC_ARC0_1] = mmPSOC_ARC0_BMON_1_BASE,
+ [GAUDI2_BMON_PSOC_ARC1_0] = mmPSOC_ARC1_BMON_0_BASE,
+ [GAUDI2_BMON_PSOC_ARC1_1] = mmPSOC_ARC1_BMON_1_BASE,
+ [GAUDI2_BMON_PDMA0_0] = mmPDMA0_BMON_0_BASE,
+ [GAUDI2_BMON_PDMA0_1] = mmPDMA0_BMON_1_BASE,
+ [GAUDI2_BMON_PDMA1_0] = mmPDMA1_BMON_0_BASE,
+ [GAUDI2_BMON_PDMA1_1] = mmPDMA1_BMON_1_BASE,
+ [GAUDI2_BMON_CPU_WR] = mmCPU_WR_BMON_BASE,
+ [GAUDI2_BMON_CPU_RD] = mmCPU_RD_BMON_BASE,
+ [GAUDI2_BMON_PMMU_0] = mmPMMU_BMON_0_BASE,
+ [GAUDI2_BMON_PMMU_1] = mmPMMU_BMON_1_BASE,
+ [GAUDI2_BMON_PMMU_2] = mmPMMU_BMON_2_BASE,
+ [GAUDI2_BMON_PMMU_3] = mmPMMU_BMON_3_BASE,
+ [GAUDI2_BMON_PMMU_4] = mmPMMU_BMON_4_BASE,
+ [GAUDI2_BMON_ROT0_0] = mmROT0_BMON_0_BASE,
+ [GAUDI2_BMON_ROT0_1] = mmROT0_BMON_1_BASE,
+ [GAUDI2_BMON_ROT0_2] = mmROT0_BMON_2_BASE,
+ [GAUDI2_BMON_ROT0_3] = mmROT0_BMON_3_BASE,
+ [GAUDI2_BMON_ROT1_0] = mmROT1_BMON_0_BASE,
+ [GAUDI2_BMON_ROT1_1] = mmROT1_BMON_1_BASE,
+ [GAUDI2_BMON_ROT1_2] = mmROT1_BMON_2_BASE,
+ [GAUDI2_BMON_ROT1_3] = mmROT1_BMON_3_BASE,
+ [GAUDI2_BMON_ARC_FARM_0] = mmARC_FARM_BMON_0_BASE,
+ [GAUDI2_BMON_ARC_FARM_1] = mmARC_FARM_BMON_1_BASE,
+ [GAUDI2_BMON_ARC_FARM_2] = mmARC_FARM_BMON_2_BASE,
+ [GAUDI2_BMON_ARC_FARM_3] = mmARC_FARM_BMON_3_BASE,
+ [GAUDI2_BMON_KDMA_0] = mmKDMA_BMON_0_BASE,
+ [GAUDI2_BMON_KDMA_1] = mmKDMA_BMON_1_BASE,
+ [GAUDI2_BMON_KDMA_2] = mmKDMA_BMON_2_BASE,
+ [GAUDI2_BMON_KDMA_3] = mmKDMA_BMON_3_BASE,
+ [GAUDI2_BMON_PCIE_VDEC0_0] = mmPCIE_VDEC0_BMON_0_BASE,
+ [GAUDI2_BMON_PCIE_VDEC0_1] = mmPCIE_VDEC0_BMON_1_BASE,
+ [GAUDI2_BMON_PCIE_VDEC0_2] = mmPCIE_VDEC0_BMON_2_BASE,
+ [GAUDI2_BMON_PCIE_VDEC1_0] = mmPCIE_VDEC1_BMON_0_BASE,
+ [GAUDI2_BMON_PCIE_VDEC1_1] = mmPCIE_VDEC1_BMON_1_BASE,
+ [GAUDI2_BMON_PCIE_VDEC1_2] = mmPCIE_VDEC1_BMON_2_BASE,
+ [GAUDI2_BMON_NIC0_DBG_0_0] = mmNIC0_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC0_DBG_1_0] = mmNIC0_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC0_DBG_2_0] = mmNIC0_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC0_DBG_0_1] = mmNIC0_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC0_DBG_1_1] = mmNIC0_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC0_DBG_2_1] = mmNIC0_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC1_DBG_0_0] = mmNIC1_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC1_DBG_1_0] = mmNIC1_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC1_DBG_2_0] = mmNIC1_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC1_DBG_0_1] = mmNIC1_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC1_DBG_1_1] = mmNIC1_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC1_DBG_2_1] = mmNIC1_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC2_DBG_0_0] = mmNIC2_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC2_DBG_1_0] = mmNIC2_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC2_DBG_2_0] = mmNIC2_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC2_DBG_0_1] = mmNIC2_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC2_DBG_1_1] = mmNIC2_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC2_DBG_2_1] = mmNIC2_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC3_DBG_0_0] = mmNIC3_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC3_DBG_1_0] = mmNIC3_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC3_DBG_2_0] = mmNIC3_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC3_DBG_0_1] = mmNIC3_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC3_DBG_1_1] = mmNIC3_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC3_DBG_2_1] = mmNIC3_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC4_DBG_0_0] = mmNIC4_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC4_DBG_1_0] = mmNIC4_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC4_DBG_2_0] = mmNIC4_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC4_DBG_0_1] = mmNIC4_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC4_DBG_1_1] = mmNIC4_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC4_DBG_2_1] = mmNIC4_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC5_DBG_0_0] = mmNIC5_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC5_DBG_1_0] = mmNIC5_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC5_DBG_2_0] = mmNIC5_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC5_DBG_0_1] = mmNIC5_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC5_DBG_1_1] = mmNIC5_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC5_DBG_2_1] = mmNIC5_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC6_DBG_0_0] = mmNIC6_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC6_DBG_1_0] = mmNIC6_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC6_DBG_2_0] = mmNIC6_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC6_DBG_0_1] = mmNIC6_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC6_DBG_1_1] = mmNIC6_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC6_DBG_2_1] = mmNIC6_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC7_DBG_0_0] = mmNIC7_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC7_DBG_1_0] = mmNIC7_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC7_DBG_2_0] = mmNIC7_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC7_DBG_0_1] = mmNIC7_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC7_DBG_1_1] = mmNIC7_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC7_DBG_2_1] = mmNIC7_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC8_DBG_0_0] = mmNIC8_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC8_DBG_1_0] = mmNIC8_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC8_DBG_2_0] = mmNIC8_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC8_DBG_0_1] = mmNIC8_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC8_DBG_1_1] = mmNIC8_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC8_DBG_2_1] = mmNIC8_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC9_DBG_0_0] = mmNIC9_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC9_DBG_1_0] = mmNIC9_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC9_DBG_2_0] = mmNIC9_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC9_DBG_0_1] = mmNIC9_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC9_DBG_1_1] = mmNIC9_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC9_DBG_2_1] = mmNIC9_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC10_DBG_0_0] = mmNIC10_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC10_DBG_1_0] = mmNIC10_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC10_DBG_2_0] = mmNIC10_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC10_DBG_0_1] = mmNIC10_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC10_DBG_1_1] = mmNIC10_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC10_DBG_2_1] = mmNIC10_DBG_BMON2_1_BASE,
+ [GAUDI2_BMON_NIC11_DBG_0_0] = mmNIC11_DBG_BMON0_0_BASE,
+ [GAUDI2_BMON_NIC11_DBG_1_0] = mmNIC11_DBG_BMON1_0_BASE,
+ [GAUDI2_BMON_NIC11_DBG_2_0] = mmNIC11_DBG_BMON2_0_BASE,
+ [GAUDI2_BMON_NIC11_DBG_0_1] = mmNIC11_DBG_BMON0_1_BASE,
+ [GAUDI2_BMON_NIC11_DBG_1_1] = mmNIC11_DBG_BMON1_1_BASE,
+ [GAUDI2_BMON_NIC11_DBG_2_1] = mmNIC11_DBG_BMON2_1_BASE
+};
+
+static u64 debug_spmu_regs[GAUDI2_SPMU_LAST + 1] = {
+ [GAUDI2_SPMU_DCORE0_TPC0_EML] = mmDCORE0_TPC0_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_TPC1_EML] = mmDCORE0_TPC1_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_TPC2_EML] = mmDCORE0_TPC2_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_TPC3_EML] = mmDCORE0_TPC3_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_TPC4_EML] = mmDCORE0_TPC4_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_TPC5_EML] = mmDCORE0_TPC5_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_TPC6_EML] = mmDCORE0_TPC6_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_TPC0_EML] = mmDCORE1_TPC0_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_TPC1_EML] = mmDCORE1_TPC1_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_TPC2_EML] = mmDCORE1_TPC2_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_TPC3_EML] = mmDCORE1_TPC3_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_TPC4_EML] = mmDCORE1_TPC4_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_TPC5_EML] = mmDCORE1_TPC5_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_TPC0_EML] = mmDCORE2_TPC0_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_TPC1_EML] = mmDCORE2_TPC1_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_TPC2_EML] = mmDCORE2_TPC2_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_TPC3_EML] = mmDCORE2_TPC3_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_TPC4_EML] = mmDCORE2_TPC4_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_TPC5_EML] = mmDCORE2_TPC5_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_TPC0_EML] = mmDCORE3_TPC0_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_TPC1_EML] = mmDCORE3_TPC1_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_TPC2_EML] = mmDCORE3_TPC2_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_TPC3_EML] = mmDCORE3_TPC3_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_TPC4_EML] = mmDCORE3_TPC4_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_TPC5_EML] = mmDCORE3_TPC5_EML_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_HMMU0_CS] = mmDCORE0_HMMU0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_HMMU1_CS] = mmDCORE0_HMMU1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_HMMU2_CS] = mmDCORE0_HMMU2_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_HMMU3_CS] = mmDCORE0_HMMU3_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_MME_CTRL] = mmDCORE0_MME_CTRL_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_MME_SBTE0] = mmDCORE0_MME_SBTE0_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_MME_SBTE1] = mmDCORE0_MME_SBTE1_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_MME_SBTE2] = mmDCORE0_MME_SBTE2_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_MME_SBTE3] = mmDCORE0_MME_SBTE3_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_MME_SBTE4] = mmDCORE0_MME_SBTE4_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_MME_ACC] = mmDCORE0_MME_ACC_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_SM] = mmDCORE0_SM_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_EDMA0_CS] = mmDCORE0_EDMA0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_EDMA1_CS] = mmDCORE0_EDMA1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_VDEC0_CS] = mmDCORE0_VDEC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE0_VDEC1_CS] = mmDCORE0_VDEC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_HMMU0_CS] = mmDCORE1_HMMU0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_HMMU1_CS] = mmDCORE1_HMMU1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_HMMU2_CS] = mmDCORE1_HMMU2_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_HMMU3_CS] = mmDCORE1_HMMU3_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_MME_CTRL] = mmDCORE1_MME_CTRL_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_MME_SBTE0] = mmDCORE1_MME_SBTE0_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_MME_SBTE1] = mmDCORE1_MME_SBTE1_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_MME_SBTE2] = mmDCORE1_MME_SBTE2_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_MME_SBTE3] = mmDCORE1_MME_SBTE3_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_MME_SBTE4] = mmDCORE1_MME_SBTE4_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_MME_ACC] = mmDCORE1_MME_ACC_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_SM] = mmDCORE1_SM_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_EDMA0_CS] = mmDCORE1_EDMA0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_EDMA1_CS] = mmDCORE1_EDMA1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_VDEC0_CS] = mmDCORE1_VDEC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE1_VDEC1_CS] = mmDCORE1_VDEC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_HMMU0_CS] = mmDCORE2_HMMU0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_HMMU1_CS] = mmDCORE2_HMMU1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_HMMU2_CS] = mmDCORE2_HMMU2_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_HMMU3_CS] = mmDCORE2_HMMU3_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_MME_CTRL] = mmDCORE2_MME_CTRL_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_MME_SBTE0] = mmDCORE2_MME_SBTE0_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_MME_SBTE1] = mmDCORE2_MME_SBTE1_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_MME_SBTE2] = mmDCORE2_MME_SBTE2_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_MME_SBTE3] = mmDCORE2_MME_SBTE3_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_MME_SBTE4] = mmDCORE2_MME_SBTE4_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_MME_ACC] = mmDCORE2_MME_ACC_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_SM] = mmDCORE2_SM_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_EDMA0_CS] = mmDCORE2_EDMA0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_EDMA1_CS] = mmDCORE2_EDMA1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_VDEC0_CS] = mmDCORE2_VDEC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE2_VDEC1_CS] = mmDCORE2_VDEC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_HMMU0_CS] = mmDCORE3_HMMU0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_HMMU1_CS] = mmDCORE3_HMMU1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_HMMU2_CS] = mmDCORE3_HMMU2_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_HMMU3_CS] = mmDCORE3_HMMU3_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_MME_CTRL] = mmDCORE3_MME_CTRL_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_MME_SBTE0] = mmDCORE3_MME_SBTE0_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_MME_SBTE1] = mmDCORE3_MME_SBTE1_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_MME_SBTE2] = mmDCORE3_MME_SBTE2_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_MME_SBTE3] = mmDCORE3_MME_SBTE3_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_MME_SBTE4] = mmDCORE3_MME_SBTE4_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_MME_ACC] = mmDCORE3_MME_ACC_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_SM] = mmDCORE3_SM_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_EDMA0_CS] = mmDCORE3_EDMA0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_EDMA1_CS] = mmDCORE3_EDMA1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_VDEC0_CS] = mmDCORE3_VDEC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_DCORE3_VDEC1_CS] = mmDCORE3_VDEC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_PCIE] = mmPCIE_SPMU_BASE,
+ [GAUDI2_SPMU_PSOC_ARC0_CS] = mmPSOC_ARC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_PSOC_ARC1_CS] = mmPSOC_ARC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_PDMA0_CS] = mmPDMA0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_PDMA1_CS] = mmPDMA1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_PMMU_CS] = mmPMMU_CS_SPMU_BASE,
+ [GAUDI2_SPMU_ROT0_CS] = mmROT0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_ROT1_CS] = mmROT1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_ARC_FARM_CS] = mmARC_FARM_CS_SPMU_BASE,
+ [GAUDI2_SPMU_KDMA_CS] = mmKDMA_CS_SPMU_BASE,
+ [GAUDI2_SPMU_PCIE_VDEC0_CS] = mmPCIE_VDEC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_PCIE_VDEC1_CS] = mmPCIE_VDEC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM0_MC0_CS] = mmHBM0_MC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM0_MC1_CS] = mmHBM0_MC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM1_MC0_CS] = mmHBM1_MC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM1_MC1_CS] = mmHBM1_MC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM2_MC0_CS] = mmHBM2_MC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM2_MC1_CS] = mmHBM2_MC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM3_MC0_CS] = mmHBM3_MC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM3_MC1_CS] = mmHBM3_MC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM4_MC0_CS] = mmHBM4_MC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM4_MC1_CS] = mmHBM4_MC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM5_MC0_CS] = mmHBM5_MC0_CS_SPMU_BASE,
+ [GAUDI2_SPMU_HBM5_MC1_CS] = mmHBM5_MC1_CS_SPMU_BASE,
+ [GAUDI2_SPMU_NIC0_DBG_0] = mmNIC0_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC0_DBG_1] = mmNIC0_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC1_DBG_0] = mmNIC1_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC1_DBG_1] = mmNIC1_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC2_DBG_0] = mmNIC2_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC2_DBG_1] = mmNIC2_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC3_DBG_0] = mmNIC3_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC3_DBG_1] = mmNIC3_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC4_DBG_0] = mmNIC4_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC4_DBG_1] = mmNIC4_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC5_DBG_0] = mmNIC5_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC5_DBG_1] = mmNIC5_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC6_DBG_0] = mmNIC6_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC6_DBG_1] = mmNIC6_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC7_DBG_0] = mmNIC7_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC7_DBG_1] = mmNIC7_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC8_DBG_0] = mmNIC8_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC8_DBG_1] = mmNIC8_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC9_DBG_0] = mmNIC9_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC9_DBG_1] = mmNIC9_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC10_DBG_0] = mmNIC10_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC10_DBG_1] = mmNIC10_DBG_SPMU_1_BASE,
+ [GAUDI2_SPMU_NIC11_DBG_0] = mmNIC11_DBG_SPMU_0_BASE,
+ [GAUDI2_SPMU_NIC11_DBG_1] = mmNIC11_DBG_SPMU_1_BASE
+};
+
+static struct component_config_offsets xbar_edge_binning_cfg_table[XBAR_EDGE_ID_SIZE] = {
+ [XBAR_EDGE_ID_DCORE0] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE0_XBAR_EDGE,
+ .etf_id = COMPONENT_ID_INVALID,
+ .stm_id = COMPONENT_ID_INVALID,
+ .spmu_id = COMPONENT_ID_INVALID,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [XBAR_EDGE_ID_DCORE1] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE1_XBAR_EDGE,
+ .etf_id = COMPONENT_ID_INVALID,
+ .stm_id = COMPONENT_ID_INVALID,
+ .spmu_id = COMPONENT_ID_INVALID,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [XBAR_EDGE_ID_DCORE2] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE2_XBAR_EDGE,
+ .etf_id = COMPONENT_ID_INVALID,
+ .stm_id = COMPONENT_ID_INVALID,
+ .spmu_id = COMPONENT_ID_INVALID,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [XBAR_EDGE_ID_DCORE3] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE3_XBAR_EDGE,
+ .etf_id = COMPONENT_ID_INVALID,
+ .stm_id = COMPONENT_ID_INVALID,
+ .spmu_id = COMPONENT_ID_INVALID,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+};
+
+
+static struct component_config_offsets hmmu_binning_cfg_table[HMMU_ID_SIZE] = {
+ [HMMU_ID_DCORE0_HMMU0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE0_HMMU0_CS,
+ .stm_id = GAUDI2_STM_DCORE0_HMMU0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE0_HMMU0_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_HMMU0_0,
+ GAUDI2_BMON_DCORE0_HMMU0_1,
+ GAUDI2_BMON_DCORE0_HMMU0_2,
+ GAUDI2_BMON_DCORE0_HMMU0_3,
+ GAUDI2_BMON_DCORE0_HMMU0_4,
+ }
+ },
+ [HMMU_ID_DCORE0_HMMU1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE0_HMMU1_CS,
+ .stm_id = GAUDI2_STM_DCORE0_HMMU1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE0_HMMU1_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_HMMU1_0,
+ GAUDI2_BMON_DCORE0_HMMU1_1,
+ GAUDI2_BMON_DCORE0_HMMU1_2,
+ GAUDI2_BMON_DCORE0_HMMU1_3,
+ GAUDI2_BMON_DCORE0_HMMU1_4,
+ }
+ },
+ [HMMU_ID_DCORE0_HMMU2] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE0_HMMU2_CS,
+ .stm_id = GAUDI2_STM_DCORE0_HMMU2_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE0_HMMU2_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_HMMU2_0,
+ GAUDI2_BMON_DCORE0_HMMU2_1,
+ GAUDI2_BMON_DCORE0_HMMU2_2,
+ GAUDI2_BMON_DCORE0_HMMU2_3,
+ GAUDI2_BMON_DCORE0_HMMU2_4,
+ }
+ },
+ [HMMU_ID_DCORE0_HMMU3] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE0_HMMU3_CS,
+ .stm_id = GAUDI2_STM_DCORE0_HMMU3_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE0_HMMU3_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_HMMU3_0,
+ GAUDI2_BMON_DCORE0_HMMU3_1,
+ GAUDI2_BMON_DCORE0_HMMU3_2,
+ GAUDI2_BMON_DCORE0_HMMU3_3,
+ GAUDI2_BMON_DCORE0_HMMU3_4,
+ }
+ },
+ [HMMU_ID_DCORE1_HMMU0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE1_HMMU0_CS,
+ .stm_id = GAUDI2_STM_DCORE1_HMMU0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE1_HMMU0_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_HMMU0_0,
+ GAUDI2_BMON_DCORE1_HMMU0_1,
+ GAUDI2_BMON_DCORE1_HMMU0_2,
+ GAUDI2_BMON_DCORE1_HMMU0_3,
+ GAUDI2_BMON_DCORE1_HMMU0_4,
+ }
+ },
+ [HMMU_ID_DCORE1_HMMU1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE1_HMMU1_CS,
+ .stm_id = GAUDI2_STM_DCORE1_HMMU1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE1_HMMU1_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_HMMU1_0,
+ GAUDI2_BMON_DCORE1_HMMU1_1,
+ GAUDI2_BMON_DCORE1_HMMU1_2,
+ GAUDI2_BMON_DCORE1_HMMU1_3,
+ GAUDI2_BMON_DCORE1_HMMU1_4,
+ }
+ },
+ [HMMU_ID_DCORE1_HMMU2] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE1_HMMU2_CS,
+ .stm_id = GAUDI2_STM_DCORE1_HMMU2_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE1_HMMU2_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_HMMU2_0,
+ GAUDI2_BMON_DCORE1_HMMU2_1,
+ GAUDI2_BMON_DCORE1_HMMU2_2,
+ GAUDI2_BMON_DCORE1_HMMU2_3,
+ GAUDI2_BMON_DCORE1_HMMU2_4,
+ }
+ },
+ [HMMU_ID_DCORE1_HMMU3] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE1_HMMU3_CS,
+ .stm_id = GAUDI2_STM_DCORE1_HMMU3_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE1_HMMU3_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_HMMU3_0,
+ GAUDI2_BMON_DCORE1_HMMU3_1,
+ GAUDI2_BMON_DCORE1_HMMU3_2,
+ GAUDI2_BMON_DCORE1_HMMU3_3,
+ GAUDI2_BMON_DCORE1_HMMU3_4,
+ }
+ },
+ [HMMU_ID_DCORE2_HMMU0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE2_HMMU0_CS,
+ .stm_id = GAUDI2_STM_DCORE2_HMMU0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE2_HMMU0_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_HMMU0_0,
+ GAUDI2_BMON_DCORE2_HMMU0_1,
+ GAUDI2_BMON_DCORE2_HMMU0_2,
+ GAUDI2_BMON_DCORE2_HMMU0_3,
+ GAUDI2_BMON_DCORE2_HMMU0_4,
+ }
+ },
+ [HMMU_ID_DCORE2_HMMU1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE2_HMMU1_CS,
+ .stm_id = GAUDI2_STM_DCORE2_HMMU1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE2_HMMU1_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_HMMU1_0,
+ GAUDI2_BMON_DCORE2_HMMU1_1,
+ GAUDI2_BMON_DCORE2_HMMU1_2,
+ GAUDI2_BMON_DCORE2_HMMU1_3,
+ GAUDI2_BMON_DCORE2_HMMU1_4,
+ }
+ },
+ [HMMU_ID_DCORE2_HMMU2] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE2_HMMU2_CS,
+ .stm_id = GAUDI2_STM_DCORE2_HMMU2_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE2_HMMU2_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_HMMU2_0,
+ GAUDI2_BMON_DCORE2_HMMU2_1,
+ GAUDI2_BMON_DCORE2_HMMU2_2,
+ GAUDI2_BMON_DCORE2_HMMU2_3,
+ GAUDI2_BMON_DCORE2_HMMU2_4,
+ }
+ },
+ [HMMU_ID_DCORE2_HMMU3] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE2_HMMU3_CS,
+ .stm_id = GAUDI2_STM_DCORE2_HMMU3_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE2_HMMU3_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_HMMU3_0,
+ GAUDI2_BMON_DCORE2_HMMU3_1,
+ GAUDI2_BMON_DCORE2_HMMU3_2,
+ GAUDI2_BMON_DCORE2_HMMU3_3,
+ GAUDI2_BMON_DCORE2_HMMU3_4,
+ }
+ },
+ [HMMU_ID_DCORE3_HMMU0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE3_HMMU0_CS,
+ .stm_id = GAUDI2_STM_DCORE3_HMMU0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE3_HMMU0_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_HMMU0_0,
+ GAUDI2_BMON_DCORE3_HMMU0_1,
+ GAUDI2_BMON_DCORE3_HMMU0_2,
+ GAUDI2_BMON_DCORE3_HMMU0_3,
+ GAUDI2_BMON_DCORE3_HMMU0_4,
+ }
+ },
+ [HMMU_ID_DCORE3_HMMU1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE3_HMMU1_CS,
+ .stm_id = GAUDI2_STM_DCORE3_HMMU1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE3_HMMU1_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_HMMU1_0,
+ GAUDI2_BMON_DCORE3_HMMU1_1,
+ GAUDI2_BMON_DCORE3_HMMU1_2,
+ GAUDI2_BMON_DCORE3_HMMU1_3,
+ GAUDI2_BMON_DCORE3_HMMU1_4,
+ }
+ },
+ [HMMU_ID_DCORE3_HMMU2] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE3_HMMU2_CS,
+ .stm_id = GAUDI2_STM_DCORE3_HMMU2_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE3_HMMU2_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_HMMU2_0,
+ GAUDI2_BMON_DCORE3_HMMU2_1,
+ GAUDI2_BMON_DCORE3_HMMU2_2,
+ GAUDI2_BMON_DCORE3_HMMU2_3,
+ GAUDI2_BMON_DCORE3_HMMU2_4,
+ }
+ },
+ [HMMU_ID_DCORE3_HMMU3] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE3_HMMU3_CS,
+ .stm_id = GAUDI2_STM_DCORE3_HMMU3_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE3_HMMU3_CS,
+ .bmon_count = 5,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_HMMU3_0,
+ GAUDI2_BMON_DCORE3_HMMU3_1,
+ GAUDI2_BMON_DCORE3_HMMU3_2,
+ GAUDI2_BMON_DCORE3_HMMU3_3,
+ GAUDI2_BMON_DCORE3_HMMU3_4,
+ }
+ },
+};
+
+static struct component_config_offsets hbm_mc0_binning_cfg_table[HBM_ID_SIZE] = {
+ [HBM_ID0] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM0_MC0,
+ .etf_id = GAUDI2_ETF_HBM0_MC0_CS,
+ .stm_id = GAUDI2_STM_HBM0_MC0_CS,
+ .spmu_id = GAUDI2_SPMU_HBM0_MC0_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID1] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM1_MC0,
+ .etf_id = GAUDI2_ETF_HBM1_MC0_CS,
+ .stm_id = GAUDI2_STM_HBM1_MC0_CS,
+ .spmu_id = GAUDI2_SPMU_HBM1_MC0_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID2] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM2_MC0,
+ .etf_id = GAUDI2_ETF_HBM2_MC0_CS,
+ .stm_id = GAUDI2_STM_HBM2_MC0_CS,
+ .spmu_id = GAUDI2_SPMU_HBM2_MC0_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID3] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM3_MC0,
+ .etf_id = GAUDI2_ETF_HBM3_MC0_CS,
+ .stm_id = GAUDI2_STM_HBM3_MC0_CS,
+ .spmu_id = GAUDI2_SPMU_HBM3_MC0_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID4] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM4_MC0,
+ .etf_id = GAUDI2_ETF_HBM4_MC0_CS,
+ .stm_id = GAUDI2_STM_HBM4_MC0_CS,
+ .spmu_id = GAUDI2_SPMU_HBM4_MC0_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID5] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM5_MC0,
+ .etf_id = GAUDI2_ETF_HBM5_MC0_CS,
+ .stm_id = GAUDI2_STM_HBM5_MC0_CS,
+ .spmu_id = GAUDI2_SPMU_HBM5_MC0_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+};
+
+static struct component_config_offsets hbm_mc1_binning_cfg_table[HBM_ID_SIZE] = {
+ [HBM_ID0] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM0_MC1,
+ .etf_id = GAUDI2_ETF_HBM0_MC1_CS,
+ .stm_id = GAUDI2_STM_HBM0_MC1_CS,
+ .spmu_id = GAUDI2_SPMU_HBM0_MC1_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID1] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM1_MC1,
+ .etf_id = GAUDI2_ETF_HBM1_MC1_CS,
+ .stm_id = GAUDI2_STM_HBM1_MC1_CS,
+ .spmu_id = GAUDI2_SPMU_HBM1_MC1_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID2] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM2_MC1,
+ .etf_id = GAUDI2_ETF_HBM2_MC1_CS,
+ .stm_id = GAUDI2_STM_HBM2_MC1_CS,
+ .spmu_id = GAUDI2_SPMU_HBM2_MC1_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID3] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM3_MC1,
+ .etf_id = GAUDI2_ETF_HBM3_MC1_CS,
+ .stm_id = GAUDI2_STM_HBM3_MC1_CS,
+ .spmu_id = GAUDI2_SPMU_HBM3_MC1_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID4] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM4_MC1,
+ .etf_id = GAUDI2_ETF_HBM4_MC1_CS,
+ .stm_id = GAUDI2_STM_HBM4_MC1_CS,
+ .spmu_id = GAUDI2_SPMU_HBM4_MC1_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+ [HBM_ID5] = {
+ .funnel_id = GAUDI2_FUNNEL_HBM5_MC1,
+ .etf_id = GAUDI2_ETF_HBM5_MC1_CS,
+ .stm_id = GAUDI2_STM_HBM5_MC1_CS,
+ .spmu_id = GAUDI2_SPMU_HBM5_MC1_CS,
+ .bmon_count = 0,
+ .bmon_ids = {COMPONENT_ID_INVALID}
+ },
+};
+
+static struct component_config_offsets decoder_binning_cfg_table[DEC_ID_SIZE] = {
+ [DEC_ID_DCORE0_DEC0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE0_VDEC0_CS,
+ .stm_id = GAUDI2_STM_DCORE0_VDEC0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE0_VDEC0_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_VDEC0_0,
+ GAUDI2_BMON_DCORE0_VDEC0_1,
+ GAUDI2_BMON_DCORE0_VDEC0_2,
+ }
+ },
+ [DEC_ID_DCORE0_DEC1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE0_VDEC1_CS,
+ .stm_id = GAUDI2_STM_DCORE0_VDEC1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE0_VDEC1_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_VDEC1_0,
+ GAUDI2_BMON_DCORE0_VDEC1_1,
+ GAUDI2_BMON_DCORE0_VDEC1_2,
+ }
+ },
+ [DEC_ID_DCORE1_DEC0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE1_VDEC0_CS,
+ .stm_id = GAUDI2_STM_DCORE1_VDEC0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE1_VDEC0_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_VDEC0_0,
+ GAUDI2_BMON_DCORE1_VDEC0_1,
+ GAUDI2_BMON_DCORE1_VDEC0_2,
+ }
+ },
+ [DEC_ID_DCORE1_DEC1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE1_VDEC1_CS,
+ .stm_id = GAUDI2_STM_DCORE1_VDEC1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE1_VDEC1_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_VDEC1_0,
+ GAUDI2_BMON_DCORE1_VDEC1_1,
+ GAUDI2_BMON_DCORE1_VDEC1_2,
+ }
+ },
+ [DEC_ID_DCORE2_DEC0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE2_VDEC0_CS,
+ .stm_id = GAUDI2_STM_DCORE2_VDEC0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE2_VDEC0_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_VDEC0_0,
+ GAUDI2_BMON_DCORE2_VDEC0_1,
+ GAUDI2_BMON_DCORE2_VDEC0_2,
+ }
+ },
+ [DEC_ID_DCORE2_DEC1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE2_VDEC1_CS,
+ .stm_id = GAUDI2_STM_DCORE2_VDEC1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE2_VDEC1_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_VDEC1_0,
+ GAUDI2_BMON_DCORE2_VDEC1_1,
+ GAUDI2_BMON_DCORE2_VDEC1_2,
+ }
+ },
+ [DEC_ID_DCORE3_DEC0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE3_VDEC0_CS,
+ .stm_id = GAUDI2_STM_DCORE3_VDEC0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE3_VDEC0_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_VDEC0_0,
+ GAUDI2_BMON_DCORE3_VDEC0_1,
+ GAUDI2_BMON_DCORE3_VDEC0_2,
+ }
+ },
+ [DEC_ID_DCORE3_DEC1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE3_VDEC1_CS,
+ .stm_id = GAUDI2_STM_DCORE3_VDEC1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE3_VDEC1_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_VDEC1_0,
+ GAUDI2_BMON_DCORE3_VDEC1_1,
+ GAUDI2_BMON_DCORE3_VDEC1_2,
+ }
+ },
+ [DEC_ID_PCIE_VDEC0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_PCIE_VDEC0_CS,
+ .stm_id = GAUDI2_STM_PCIE_VDEC0_CS,
+ .spmu_id = GAUDI2_SPMU_PCIE_VDEC0_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_PCIE_VDEC0_0,
+ GAUDI2_BMON_PCIE_VDEC0_1,
+ GAUDI2_BMON_PCIE_VDEC0_2,
+ }
+ },
+ [DEC_ID_PCIE_VDEC1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_PCIE_VDEC1_CS,
+ .stm_id = GAUDI2_STM_PCIE_VDEC1_CS,
+ .spmu_id = GAUDI2_SPMU_PCIE_VDEC1_CS,
+ .bmon_count = 3,
+ .bmon_ids = {
+ GAUDI2_BMON_PCIE_VDEC1_0,
+ GAUDI2_BMON_PCIE_VDEC1_1,
+ GAUDI2_BMON_PCIE_VDEC1_2,
+ }
+ },
+};
+
+static struct component_config_offsets edma_binning_cfg_table[EDMA_ID_SIZE] = {
+ [EDMA_ID_DCORE0_INSTANCE0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE0_EDMA0_CS,
+ .stm_id = GAUDI2_STM_DCORE0_EDMA0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE0_EDMA0_CS,
+ .bmon_count = 2,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_EDMA0_0,
+ GAUDI2_BMON_DCORE0_EDMA0_1,
+ }
+ },
+ [EDMA_ID_DCORE0_INSTANCE1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE0_EDMA1_CS,
+ .stm_id = GAUDI2_STM_DCORE0_EDMA1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE0_EDMA1_CS,
+ .bmon_count = 2,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_EDMA1_0,
+ GAUDI2_BMON_DCORE0_EDMA1_1,
+ }
+ },
+ [EDMA_ID_DCORE1_INSTANCE0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE1_EDMA0_CS,
+ .stm_id = GAUDI2_STM_DCORE1_EDMA0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE1_EDMA0_CS,
+ .bmon_count = 2,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_EDMA0_0,
+ GAUDI2_BMON_DCORE1_EDMA0_1,
+ }
+ },
+ [EDMA_ID_DCORE1_INSTANCE1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE1_EDMA1_CS,
+ .stm_id = GAUDI2_STM_DCORE1_EDMA1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE1_EDMA1_CS,
+ .bmon_count = 2,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_EDMA1_0,
+ GAUDI2_BMON_DCORE1_EDMA1_1,
+ }
+ },
+ [EDMA_ID_DCORE2_INSTANCE0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE2_EDMA0_CS,
+ .stm_id = GAUDI2_STM_DCORE2_EDMA0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE2_EDMA0_CS,
+ .bmon_count = 2,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_EDMA0_0,
+ GAUDI2_BMON_DCORE2_EDMA0_1,
+ }
+ },
+ [EDMA_ID_DCORE2_INSTANCE1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE2_EDMA1_CS,
+ .stm_id = GAUDI2_STM_DCORE2_EDMA1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE2_EDMA1_CS,
+ .bmon_count = 2,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_EDMA1_0,
+ GAUDI2_BMON_DCORE2_EDMA1_1,
+ }
+ },
+ [EDMA_ID_DCORE3_INSTANCE0] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE3_EDMA0_CS,
+ .stm_id = GAUDI2_STM_DCORE3_EDMA0_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE3_EDMA0_CS,
+ .bmon_count = 2,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_EDMA0_0,
+ GAUDI2_BMON_DCORE3_EDMA0_1,
+ }
+ },
+ [EDMA_ID_DCORE3_INSTANCE1] = {
+ .funnel_id = COMPONENT_ID_INVALID,
+ .etf_id = GAUDI2_ETF_DCORE3_EDMA1_CS,
+ .stm_id = GAUDI2_STM_DCORE3_EDMA1_CS,
+ .spmu_id = GAUDI2_SPMU_DCORE3_EDMA1_CS,
+ .bmon_count = 2,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_EDMA1_0,
+ GAUDI2_BMON_DCORE3_EDMA1_1,
+ }
+ },
+};
+
+static struct component_config_offsets tpc_binning_cfg_table[TPC_ID_SIZE] = {
+ [TPC_ID_DCORE0_TPC0] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE0_TPC0_EML,
+ .etf_id = GAUDI2_ETF_DCORE0_TPC0_EML,
+ .stm_id = GAUDI2_STM_DCORE0_TPC0_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE0_TPC0_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_TPC0_EML_0,
+ GAUDI2_BMON_DCORE0_TPC0_EML_1,
+ GAUDI2_BMON_DCORE0_TPC0_EML_2,
+ GAUDI2_BMON_DCORE0_TPC0_EML_3,
+ }
+ },
+ [TPC_ID_DCORE0_TPC1] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE0_TPC1_EML,
+ .etf_id = GAUDI2_ETF_DCORE0_TPC1_EML,
+ .stm_id = GAUDI2_STM_DCORE0_TPC1_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE0_TPC1_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_TPC1_EML_0,
+ GAUDI2_BMON_DCORE0_TPC1_EML_1,
+ GAUDI2_BMON_DCORE0_TPC1_EML_2,
+ GAUDI2_BMON_DCORE0_TPC1_EML_3,
+ }
+ },
+ [TPC_ID_DCORE0_TPC2] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE0_TPC2_EML,
+ .etf_id = GAUDI2_ETF_DCORE0_TPC2_EML,
+ .stm_id = GAUDI2_STM_DCORE0_TPC2_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE0_TPC2_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_TPC2_EML_0,
+ GAUDI2_BMON_DCORE0_TPC2_EML_1,
+ GAUDI2_BMON_DCORE0_TPC2_EML_2,
+ GAUDI2_BMON_DCORE0_TPC2_EML_3,
+ }
+ },
+ [TPC_ID_DCORE0_TPC3] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE0_TPC3_EML,
+ .etf_id = GAUDI2_ETF_DCORE0_TPC3_EML,
+ .stm_id = GAUDI2_STM_DCORE0_TPC3_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE0_TPC3_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_TPC3_EML_0,
+ GAUDI2_BMON_DCORE0_TPC3_EML_1,
+ GAUDI2_BMON_DCORE0_TPC3_EML_2,
+ GAUDI2_BMON_DCORE0_TPC3_EML_3,
+ }
+ },
+ [TPC_ID_DCORE0_TPC4] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE0_TPC4_EML,
+ .etf_id = GAUDI2_ETF_DCORE0_TPC4_EML,
+ .stm_id = GAUDI2_STM_DCORE0_TPC4_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE0_TPC4_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_TPC4_EML_0,
+ GAUDI2_BMON_DCORE0_TPC4_EML_1,
+ GAUDI2_BMON_DCORE0_TPC4_EML_2,
+ GAUDI2_BMON_DCORE0_TPC4_EML_3,
+ }
+ },
+ [TPC_ID_DCORE0_TPC5] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE0_TPC5_EML,
+ .etf_id = GAUDI2_ETF_DCORE0_TPC5_EML,
+ .stm_id = GAUDI2_STM_DCORE0_TPC5_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE0_TPC5_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_TPC5_EML_0,
+ GAUDI2_BMON_DCORE0_TPC5_EML_1,
+ GAUDI2_BMON_DCORE0_TPC5_EML_2,
+ GAUDI2_BMON_DCORE0_TPC5_EML_3,
+ }
+ },
+ [TPC_ID_DCORE1_TPC0] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE1_TPC0_EML,
+ .etf_id = GAUDI2_ETF_DCORE1_TPC0_EML,
+ .stm_id = GAUDI2_STM_DCORE1_TPC0_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE1_TPC0_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_TPC0_EML_0,
+ GAUDI2_BMON_DCORE1_TPC0_EML_1,
+ GAUDI2_BMON_DCORE1_TPC0_EML_2,
+ GAUDI2_BMON_DCORE1_TPC0_EML_3,
+ }
+ },
+ [TPC_ID_DCORE1_TPC1] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE1_TPC1_EML,
+ .etf_id = GAUDI2_ETF_DCORE1_TPC1_EML,
+ .stm_id = GAUDI2_STM_DCORE1_TPC1_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE1_TPC1_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_TPC1_EML_0,
+ GAUDI2_BMON_DCORE1_TPC1_EML_1,
+ GAUDI2_BMON_DCORE1_TPC1_EML_2,
+ GAUDI2_BMON_DCORE1_TPC1_EML_3,
+ }
+ },
+ [TPC_ID_DCORE1_TPC2] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE1_TPC2_EML,
+ .etf_id = GAUDI2_ETF_DCORE1_TPC2_EML,
+ .stm_id = GAUDI2_STM_DCORE1_TPC2_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE1_TPC2_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_TPC2_EML_0,
+ GAUDI2_BMON_DCORE1_TPC2_EML_1,
+ GAUDI2_BMON_DCORE1_TPC2_EML_2,
+ GAUDI2_BMON_DCORE1_TPC2_EML_3,
+ }
+ },
+ [TPC_ID_DCORE1_TPC3] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE1_TPC3_EML,
+ .etf_id = GAUDI2_ETF_DCORE1_TPC3_EML,
+ .stm_id = GAUDI2_STM_DCORE1_TPC3_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE1_TPC3_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_TPC3_EML_0,
+ GAUDI2_BMON_DCORE1_TPC3_EML_1,
+ GAUDI2_BMON_DCORE1_TPC3_EML_2,
+ GAUDI2_BMON_DCORE1_TPC3_EML_3,
+ }
+ },
+ [TPC_ID_DCORE1_TPC4] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE1_TPC4_EML,
+ .etf_id = GAUDI2_ETF_DCORE1_TPC4_EML,
+ .stm_id = GAUDI2_STM_DCORE1_TPC4_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE1_TPC4_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_TPC4_EML_0,
+ GAUDI2_BMON_DCORE1_TPC4_EML_1,
+ GAUDI2_BMON_DCORE1_TPC4_EML_2,
+ GAUDI2_BMON_DCORE1_TPC4_EML_3,
+ }
+ },
+ [TPC_ID_DCORE1_TPC5] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE1_TPC5_EML,
+ .etf_id = GAUDI2_ETF_DCORE1_TPC5_EML,
+ .stm_id = GAUDI2_STM_DCORE1_TPC5_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE1_TPC5_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE1_TPC5_EML_0,
+ GAUDI2_BMON_DCORE1_TPC5_EML_1,
+ GAUDI2_BMON_DCORE1_TPC5_EML_2,
+ GAUDI2_BMON_DCORE1_TPC5_EML_3,
+ }
+ },
+ [TPC_ID_DCORE2_TPC0] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE2_TPC0_EML,
+ .etf_id = GAUDI2_ETF_DCORE2_TPC0_EML,
+ .stm_id = GAUDI2_STM_DCORE2_TPC0_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE2_TPC0_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_TPC0_EML_0,
+ GAUDI2_BMON_DCORE2_TPC0_EML_1,
+ GAUDI2_BMON_DCORE2_TPC0_EML_2,
+ GAUDI2_BMON_DCORE2_TPC0_EML_3,
+ }
+ },
+ [TPC_ID_DCORE2_TPC1] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE2_TPC1_EML,
+ .etf_id = GAUDI2_ETF_DCORE2_TPC1_EML,
+ .stm_id = GAUDI2_STM_DCORE2_TPC1_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE2_TPC1_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_TPC1_EML_0,
+ GAUDI2_BMON_DCORE2_TPC1_EML_1,
+ GAUDI2_BMON_DCORE2_TPC1_EML_2,
+ GAUDI2_BMON_DCORE2_TPC1_EML_3,
+ }
+ },
+ [TPC_ID_DCORE2_TPC2] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE2_TPC2_EML,
+ .etf_id = GAUDI2_ETF_DCORE2_TPC2_EML,
+ .stm_id = GAUDI2_STM_DCORE2_TPC2_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE2_TPC2_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_TPC2_EML_0,
+ GAUDI2_BMON_DCORE2_TPC2_EML_1,
+ GAUDI2_BMON_DCORE2_TPC2_EML_2,
+ GAUDI2_BMON_DCORE2_TPC2_EML_3,
+ }
+ },
+ [TPC_ID_DCORE2_TPC3] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE2_TPC3_EML,
+ .etf_id = GAUDI2_ETF_DCORE2_TPC3_EML,
+ .stm_id = GAUDI2_STM_DCORE2_TPC3_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE2_TPC3_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_TPC3_EML_0,
+ GAUDI2_BMON_DCORE2_TPC3_EML_1,
+ GAUDI2_BMON_DCORE2_TPC3_EML_2,
+ GAUDI2_BMON_DCORE2_TPC3_EML_3,
+ }
+ },
+ [TPC_ID_DCORE2_TPC4] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE2_TPC4_EML,
+ .etf_id = GAUDI2_ETF_DCORE2_TPC4_EML,
+ .stm_id = GAUDI2_STM_DCORE2_TPC4_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE2_TPC4_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_TPC4_EML_0,
+ GAUDI2_BMON_DCORE2_TPC4_EML_1,
+ GAUDI2_BMON_DCORE2_TPC4_EML_2,
+ GAUDI2_BMON_DCORE2_TPC4_EML_3,
+ }
+ },
+ [TPC_ID_DCORE2_TPC5] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE2_TPC5_EML,
+ .etf_id = GAUDI2_ETF_DCORE2_TPC5_EML,
+ .stm_id = GAUDI2_STM_DCORE2_TPC5_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE2_TPC5_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE2_TPC5_EML_0,
+ GAUDI2_BMON_DCORE2_TPC5_EML_1,
+ GAUDI2_BMON_DCORE2_TPC5_EML_2,
+ GAUDI2_BMON_DCORE2_TPC5_EML_3,
+ }
+ },
+ [TPC_ID_DCORE3_TPC0] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE3_TPC0_EML,
+ .etf_id = GAUDI2_ETF_DCORE3_TPC0_EML,
+ .stm_id = GAUDI2_STM_DCORE3_TPC0_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE3_TPC0_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_TPC0_EML_0,
+ GAUDI2_BMON_DCORE3_TPC0_EML_1,
+ GAUDI2_BMON_DCORE3_TPC0_EML_2,
+ GAUDI2_BMON_DCORE3_TPC0_EML_3,
+ }
+ },
+ [TPC_ID_DCORE3_TPC1] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE3_TPC1_EML,
+ .etf_id = GAUDI2_ETF_DCORE3_TPC1_EML,
+ .stm_id = GAUDI2_STM_DCORE3_TPC1_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE3_TPC1_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_TPC1_EML_0,
+ GAUDI2_BMON_DCORE3_TPC1_EML_1,
+ GAUDI2_BMON_DCORE3_TPC1_EML_2,
+ GAUDI2_BMON_DCORE3_TPC1_EML_3,
+ }
+ },
+ [TPC_ID_DCORE3_TPC2] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE3_TPC2_EML,
+ .etf_id = GAUDI2_ETF_DCORE3_TPC2_EML,
+ .stm_id = GAUDI2_STM_DCORE3_TPC2_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE3_TPC2_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_TPC2_EML_0,
+ GAUDI2_BMON_DCORE3_TPC2_EML_1,
+ GAUDI2_BMON_DCORE3_TPC2_EML_2,
+ GAUDI2_BMON_DCORE3_TPC2_EML_3,
+ }
+ },
+ [TPC_ID_DCORE3_TPC3] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE3_TPC3_EML,
+ .etf_id = GAUDI2_ETF_DCORE3_TPC3_EML,
+ .stm_id = GAUDI2_STM_DCORE3_TPC3_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE3_TPC3_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_TPC3_EML_0,
+ GAUDI2_BMON_DCORE3_TPC3_EML_1,
+ GAUDI2_BMON_DCORE3_TPC3_EML_2,
+ GAUDI2_BMON_DCORE3_TPC3_EML_3,
+ }
+ },
+ [TPC_ID_DCORE3_TPC4] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE3_TPC4_EML,
+ .etf_id = GAUDI2_ETF_DCORE3_TPC4_EML,
+ .stm_id = GAUDI2_STM_DCORE3_TPC4_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE3_TPC4_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_TPC4_EML_0,
+ GAUDI2_BMON_DCORE3_TPC4_EML_1,
+ GAUDI2_BMON_DCORE3_TPC4_EML_2,
+ GAUDI2_BMON_DCORE3_TPC4_EML_3,
+ }
+ },
+ [TPC_ID_DCORE3_TPC5] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE3_TPC5_EML,
+ .etf_id = GAUDI2_ETF_DCORE3_TPC5_EML,
+ .stm_id = GAUDI2_STM_DCORE3_TPC5_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE3_TPC5_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE3_TPC5_EML_0,
+ GAUDI2_BMON_DCORE3_TPC5_EML_1,
+ GAUDI2_BMON_DCORE3_TPC5_EML_2,
+ GAUDI2_BMON_DCORE3_TPC5_EML_3,
+ }
+ },
+ [TPC_ID_DCORE0_TPC6] = {
+ .funnel_id = GAUDI2_FUNNEL_DCORE0_TPC6_EML,
+ .etf_id = GAUDI2_ETF_DCORE0_TPC6_EML,
+ .stm_id = GAUDI2_STM_DCORE0_TPC6_EML,
+ .spmu_id = GAUDI2_SPMU_DCORE0_TPC6_EML,
+ .bmon_count = 4,
+ .bmon_ids = {
+ GAUDI2_BMON_DCORE0_TPC6_EML_0,
+ GAUDI2_BMON_DCORE0_TPC6_EML_1,
+ GAUDI2_BMON_DCORE0_TPC6_EML_2,
+ GAUDI2_BMON_DCORE0_TPC6_EML_3,
+ }
+ }
+};
+
+static int gaudi2_coresight_timeout(struct hl_device *hdev, u64 addr,
+ int position, bool up)
+{
+ int rc;
+ u32 val, timeout_usec;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI2_PLDM_CORESIGHT_TIMEOUT_USEC;
+ else
+ timeout_usec = CORESIGHT_TIMEOUT_USEC;
+
+ rc = hl_poll_timeout(
+ hdev,
+ addr,
+ val,
+ up ? val & BIT(position) : !(val & BIT(position)),
+ 1000,
+ timeout_usec);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
+ addr, position, up);
+
+ return rc;
+}
+
+static int gaudi2_unlock_coresight_unit(struct hl_device *hdev,
+ const u64 base_reg)
+{
+ int rc = 0;
+
+ WREG32(base_reg + mmCORESIGHT_UNLOCK_REGISTER_OFFSET, CORESIGHT_UNLOCK);
+
+ rc = gaudi2_coresight_timeout(hdev, base_reg + mmCORESIGHT_UNLOCK_STATUS_REGISTER_OFFSET,
+ 1, 0);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to unlock register base addr: 0x%llx , position: 1, up: 0\n",
+ base_reg);
+
+ return rc;
+}
+
+static int gaudi2_config_stm(struct hl_device *hdev, struct hl_debug_params *params)
+{
+ struct hl_debug_params_stm *input;
+ u64 base_reg;
+ u32 frequency;
+ u32 read_reg;
+ int rc;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
+ dev_err(hdev->dev, "Invalid register index in STM\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_stm_regs[params->reg_idx];
+
+ /*
+ * in case base reg is 0x0 we ignore this configuration
+ */
+ if (!base_reg)
+ return 0;
+
+ /* check if stub component on pldm
+ * we check offset 0xCFC STMDMAIDR in case
+ * return value is 0x0 - hence stub component
+ */
+ read_reg = RREG32(base_reg + mmSTM_STMDMAIDR_OFFSET);
+ if (hdev->pldm && read_reg == 0x0)
+ return 0;
+
+ rc = gaudi2_unlock_coresight_unit(hdev, base_reg);
+ if (rc)
+ return -EIO;
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + mmSTM_STMTCSR_OFFSET, 0x80004);
+ /* dummy read for pldm to flush outstanding writes */
+ if (hdev->pldm)
+ RREG32(base_reg + mmSTM_STMTCSR_OFFSET);
+
+ WREG32(base_reg + mmSTM_STMHEMCR_OFFSET, 7);
+ WREG32(base_reg + mmSTM_STMHEBSR_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMHEER_OFFSET, lower_32_bits(input->he_mask));
+ WREG32(base_reg + mmSTM_STMHEBSR_OFFSET, 1);
+ WREG32(base_reg + mmSTM_STMHEER_OFFSET, upper_32_bits(input->he_mask));
+ WREG32(base_reg + mmSTM_STMSPTRIGCSR_OFFSET, 0x10);
+ WREG32(base_reg + mmSTM_STMSPSCR_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMSPER_OFFSET, lower_32_bits(input->sp_mask));
+ WREG32(base_reg + mmSTM_STMITATBID_OFFSET, input->id);
+ WREG32(base_reg + mmSTM_STMHEMASTR_OFFSET, 0x80);
+ frequency = hdev->asic_prop.psoc_timestamp_frequency;
+ if (frequency == 0)
+ frequency = input->frequency;
+ WREG32(base_reg + mmSTM_STMTSFREQR_OFFSET, frequency);
+ WREG32(base_reg + mmSTM_STMSYNCR_OFFSET, 0x7FF);
+ WREG32(base_reg + mmSTM_STMTCSR_OFFSET, 0x27 | (input->id << 16));
+ } else {
+ WREG32(base_reg + mmSTM_STMTCSR_OFFSET, 4);
+ WREG32(base_reg + mmSTM_STMHEMCR_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMHEBSR_OFFSET, 1);
+ WREG32(base_reg + mmSTM_STMHEER_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMHETER_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMHEBSR_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMSPTER_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMSPER_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMHEMASTR_OFFSET, 0x80);
+ WREG32(base_reg + mmSTM_STMSPTRIGCSR_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMSPSCR_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMSPMSCR_OFFSET, 0);
+ WREG32(base_reg + mmSTM_STMTSFREQR_OFFSET, 0);
+
+ rc = gaudi2_coresight_timeout(hdev, base_reg + mmSTM_STMTCSR_OFFSET, 23, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to disable STM on timeout, error %d\n", rc);
+ return rc;
+ }
+
+ WREG32(base_reg + mmSTM_STMTCSR_OFFSET, 4);
+ }
+
+ return 0;
+}
+
+static int gaudi2_config_etf(struct hl_device *hdev, struct hl_debug_params *params)
+{
+ struct hl_debug_params_etf *input;
+ u64 base_reg;
+ u32 read_reg;
+ u32 val;
+ int rc;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) {
+ dev_err(hdev->dev, "Invalid register index in ETF\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_etf_regs[params->reg_idx];
+
+ /*
+ * in case base reg is 0x0 we ignore this configuration
+ */
+ if (!base_reg)
+ return 0;
+
+
+ /* in pldm we need to check if unit is not stub
+ * for doing do need to read ETF STS register and check
+ * it is not return 0x0 - in case it does
+ * it means that this is stub, we ignore this and return 0
+ * means success
+ */
+ read_reg = RREG32(base_reg + mmETF_STS_OFFSET);
+ if (hdev->pldm && read_reg == 0x0)
+ return 0;
+
+ rc = gaudi2_unlock_coresight_unit(hdev, base_reg);
+ if (rc)
+ return -EIO;
+
+ val = RREG32(base_reg + mmETF_FFCR_OFFSET);
+ val |= 0x1000;
+ WREG32(base_reg + mmETF_FFCR_OFFSET, val);
+ val |= 0x40;
+ WREG32(base_reg + mmETF_FFCR_OFFSET, val);
+
+ rc = gaudi2_coresight_timeout(hdev, base_reg + mmETF_FFCR_OFFSET, 6, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETF on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ rc = gaudi2_coresight_timeout(hdev, base_reg + mmETF_STS_OFFSET, 2, true);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETF on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ WREG32(base_reg + mmETF_CTL_OFFSET, 0);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + mmETF_BUFWM_OFFSET, 0x3FFC);
+ WREG32(base_reg + mmETF_MODE_OFFSET, input->sink_mode);
+ WREG32(base_reg + mmETF_FFCR_OFFSET, 0x4001);
+ WREG32(base_reg + mmETF_PSCR_OFFSET, 0x10);
+ WREG32(base_reg + mmETF_CTL_OFFSET, 1);
+ } else {
+ WREG32(base_reg + mmETF_BUFWM_OFFSET, 0);
+ WREG32(base_reg + mmETF_MODE_OFFSET, 0);
+ WREG32(base_reg + mmETF_FFCR_OFFSET, 0);
+ }
+
+ return 0;
+}
+
+static int gaudi2_etr_validate_address(struct hl_device *hdev, u64 addr, u64 size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (addr > (addr + size)) {
+ dev_err(hdev->dev, "ETR buffer size %llu overflow\n", size);
+ return false;
+ }
+
+ if (gaudi2->hw_cap_initialized & HW_CAP_PMMU) {
+ if (hl_mem_area_inside_range(addr, size,
+ prop->pmmu.start_addr,
+ prop->pmmu.end_addr))
+ return true;
+
+ if (hl_mem_area_inside_range(addr, size,
+ prop->pmmu_huge.start_addr,
+ prop->pmmu_huge.end_addr))
+ return true;
+
+ if (hl_mem_area_inside_range(addr, size,
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr))
+ return true;
+ } else {
+ if (hl_mem_area_inside_range(addr, size,
+ prop->dram_user_base_address,
+ prop->dram_end_address))
+ return true;
+ }
+
+ if (hl_mem_area_inside_range(addr, size,
+ prop->sram_user_base_address,
+ prop->sram_end_address))
+ return true;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU))
+ dev_err(hdev->dev, "ETR buffer should be in SRAM/DRAM\n");
+
+ return false;
+}
+
+static int gaudi2_config_etr(struct hl_device *hdev, struct hl_ctx *ctx,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_etr *input;
+ u64 msb;
+ u32 val;
+ int rc;
+
+ rc = gaudi2_unlock_coresight_unit(hdev, mmPSOC_ETR_BASE);
+ if (rc)
+ return -EIO;
+
+ val = RREG32(mmPSOC_ETR_FFCR);
+ val |= 0x1000;
+ WREG32(mmPSOC_ETR_FFCR, val);
+ val |= 0x40;
+ WREG32(mmPSOC_ETR_FFCR, val);
+
+ rc = gaudi2_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ rc = gaudi2_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ WREG32(mmPSOC_ETR_CTL, 0);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ if (input->buffer_size == 0) {
+ dev_err(hdev->dev, "ETR buffer size should be bigger than 0\n");
+ return -EINVAL;
+ }
+
+ if (!gaudi2_etr_validate_address(hdev, input->buffer_address, input->buffer_size)) {
+ dev_err(hdev->dev, "ETR buffer address is invalid\n");
+ return -EINVAL;
+ }
+
+ RMWREG32(mmPSOC_GLOBAL_CONF_TRACE_AWUSER, ctx->asid, MMUBP_ASID_MASK);
+ RMWREG32(mmPSOC_GLOBAL_CONF_TRACE_ARUSER, ctx->asid, MMUBP_ASID_MASK);
+
+ msb = upper_32_bits(input->buffer_address) >> 8;
+ WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb);
+
+ WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
+ WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
+ WREG32(mmPSOC_ETR_MODE, input->sink_mode);
+ /* write the protection bits only if security is disable */
+ if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
+ /* make ETR not privileged */
+ val = FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
+ /* make ETR non-secured (inverted logic) */
+ val |= FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1);
+ /* burst size 16 */
+ val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK, 0xF);
+ WREG32(mmPSOC_ETR_AXICTL, val);
+ }
+ WREG32(mmPSOC_ETR_DBALO, lower_32_bits(input->buffer_address));
+ WREG32(mmPSOC_ETR_DBAHI, upper_32_bits(input->buffer_address));
+ WREG32(mmPSOC_ETR_FFCR, 3);
+ WREG32(mmPSOC_ETR_PSCR, 0x10);
+ WREG32(mmPSOC_ETR_CTL, 1);
+ } else {
+ WREG32(mmPSOC_ETR_BUFWM, 0);
+ WREG32(mmPSOC_ETR_RSZ, 0x400);
+ WREG32(mmPSOC_ETR_DBALO, 0);
+ WREG32(mmPSOC_ETR_DBAHI, 0);
+ WREG32(mmPSOC_ETR_PSCR, 0);
+ WREG32(mmPSOC_ETR_MODE, 0);
+ WREG32(mmPSOC_ETR_FFCR, 0);
+
+ if (params->output_size >= sizeof(u64)) {
+ u32 rwp, rwphi;
+
+ /*
+ * The trace buffer address is 64 bits wide. The end of
+ * the buffer is set in the RWP register (lower 32
+ * bits), and in the RWPHI register (upper 8 bits).
+ * The 24 msb of the 64-bit address are stored in a
+ * global configuration register.
+ */
+ rwp = RREG32(mmPSOC_ETR_RWP);
+ rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
+ msb = RREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR);
+ *(u64 *) params->output = ((u64) msb << 40) | ((u64) rwphi << 32) | rwp;
+ }
+ }
+
+ return 0;
+}
+
+static int gaudi2_config_funnel(struct hl_device *hdev, struct hl_debug_params *params)
+{
+ u64 base_reg;
+ u32 val = params->enable ? 0xFFF : 0;
+ u32 read_reg;
+ int rc = 0;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) {
+ dev_err(hdev->dev, "Invalid register index in FUNNEL\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_funnel_regs[params->reg_idx];
+
+ /*
+ * in case base reg is 0x0 we ignore this configuration
+ */
+ if (!base_reg)
+ return 0;
+
+
+ /* in pldm we need to check if unit is not stub
+ * for doing so, need to read DEVID value.
+ * in case return 0x0 - it means that this is stub,
+ * we ignore this and return 0 - means success
+ */
+ read_reg = RREG32(base_reg + mmFUNNEL_DEVID_OFFSET);
+ if (hdev->pldm && read_reg == 0x0)
+ return 0;
+
+ rc = gaudi2_unlock_coresight_unit(hdev, base_reg);
+ if (rc)
+ return -EIO;
+
+ WREG32(base_reg, val);
+
+ return 0;
+}
+
+static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *params)
+{
+ struct hl_debug_params_bmon *input;
+ u64 base_reg;
+ u32 read_reg;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) {
+ dev_err(hdev->dev, "Invalid register index in BMON\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_bmon_regs[params->reg_idx];
+
+ /*
+ * in case base reg is 0x0 we ignore this configuration
+ */
+ if (!base_reg)
+ return 0;
+
+
+ /* in pldm we need to check if unit is not stub
+ * for doing do need to read Control Register (offset 0x0) and check
+ * it is not return 0x0 - in case it does
+ * it means that this is stub, we ignore this and return 0
+ * means success
+ */
+ read_reg = RREG32(base_reg + mmBMON_CR_OFFSET);
+ if (hdev->pldm && read_reg == 0x0)
+ return 0;
+
+ WREG32(base_reg + mmBMON_ATTREN_OFFSET, 1);
+ /* dummy read for pldm to flush outstanding writes */
+ if (hdev->pldm)
+ RREG32(base_reg + mmBMON_ATTREN_OFFSET);
+
+ /* Write Only Reset AXIMON */
+
+ WREG32(base_reg + mmBMON_RESET_OFFSET, 0x1);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + mmBMON_ADDRL_S0_OFFSET, lower_32_bits(input->start_addr0));
+ WREG32(base_reg + mmBMON_ADDRH_S0_OFFSET, upper_32_bits(input->start_addr0));
+ WREG32(base_reg + mmBMON_ADDRL_E0_OFFSET, lower_32_bits(input->addr_mask0));
+ WREG32(base_reg + mmBMON_ADDRH_E0_OFFSET, upper_32_bits(input->addr_mask0));
+ WREG32(base_reg + mmBMON_ADDRL_S1_OFFSET, lower_32_bits(input->start_addr1));
+ WREG32(base_reg + mmBMON_ADDRH_S1_OFFSET, upper_32_bits(input->start_addr1));
+ WREG32(base_reg + mmBMON_ADDRL_E1_OFFSET, lower_32_bits(input->addr_mask1));
+ WREG32(base_reg + mmBMON_ADDRH_E1_OFFSET, upper_32_bits(input->addr_mask1));
+ WREG32(base_reg + mmBMON_ADDRL_S2_OFFSET, lower_32_bits(input->start_addr2));
+ WREG32(base_reg + mmBMON_ADDRH_S2_OFFSET, upper_32_bits(input->start_addr2));
+ WREG32(base_reg + mmBMON_ADDRL_E2_OFFSET, lower_32_bits(input->end_addr2));
+ WREG32(base_reg + mmBMON_ADDRH_E2_OFFSET, upper_32_bits(input->end_addr2));
+ WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, lower_32_bits(input->start_addr2));
+ WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, upper_32_bits(input->start_addr2));
+ WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, lower_32_bits(input->end_addr2));
+ WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, upper_32_bits(input->end_addr2));
+
+ WREG32(base_reg + mmBMON_IDL_OFFSET, 0x0);
+ WREG32(base_reg + mmBMON_IDH_OFFSET, 0x0);
+
+ WREG32(base_reg + mmBMON_ATTREN_OFFSET, 0);
+ WREG32(base_reg + mmBMON_BW_WIN_OFFSET, input->bw_win);
+ WREG32(base_reg + mmBMON_WIN_CAPTURE_OFFSET, input->win_capture);
+ WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0x1 | (13 << 8));
+ WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (input->id << 8));
+ WREG32(base_reg + mmBMON_CR_OFFSET, input->control);
+ } else {
+ WREG32(base_reg + mmBMON_ADDRL_S0_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRH_S0_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRL_E0_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRH_E0_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRL_S1_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRH_S1_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRL_E1_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRH_E1_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRL_S2_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRH_S2_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRL_E2_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRH_E2_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, 0);
+ WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, 0);
+ WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0);
+ WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (0xA << 8));
+ WREG32(base_reg + mmBMON_CR_OFFSET, 0x77 | 0xf << 24);
+ }
+
+ return 0;
+}
+
+static int gaudi2_config_spmu(struct hl_device *hdev, struct hl_debug_params *params)
+{
+ struct hl_debug_params_spmu *input = params->input;
+ u32 output_arr_len;
+ u32 cycle_cnt_idx;
+ u32 overflow_idx;
+ u32 events_num;
+ u32 event_mask;
+ u64 base_reg;
+ u32 read_reg;
+ u64 *output;
+ int i;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) {
+ dev_err(hdev->dev, "Invalid register index in SPMU\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_spmu_regs[params->reg_idx];
+
+ /*
+ * in case base reg is 0x0 we ignore this configuration
+ */
+ if (!base_reg)
+ return 0;
+
+ /* in pldm we need to check if unit is not stub
+ * for doing do need to read PMTRC (at offset 0x200)
+ * address and check if return value is 0x0 - in case it does
+ * it means that this is stub, we ignore this and return 0
+ * means success
+ */
+ read_reg = RREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET);
+ if (hdev->pldm && read_reg == 0x0)
+ return 0;
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ if (input->event_types_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev, "too many event types values for SPMU enable\n");
+ return -EINVAL;
+ }
+
+ WREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET, 0x41013046);
+ WREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET, 0x41013040);
+
+ /* dummy read for pldm to flush outstanding writes */
+ if (hdev->pldm)
+ RREG32(base_reg);
+
+ for (i = 0 ; i < input->event_types_num ; i++)
+ WREG32(base_reg + mmSPMU_PMEVTYPER0_EL0_OFFSET + i * 4,
+ input->event_types[i]);
+
+ WREG32(base_reg + mmSPMU_PMTRC_OFFSET, input->pmtrc_val);
+ WREG32(base_reg + mmSPMU_TRC_CTRL_HOST_OFFSET, input->trc_ctrl_host_val);
+ WREG32(base_reg + mmSPMU_TRC_EN_HOST_OFFSET, input->trc_en_host_val);
+
+ WREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET, 0x41013041);
+
+ /*
+ * set enabled events mask based on input->event_types_num
+ */
+ event_mask = 0x80000000;
+ event_mask |= GENMASK(input->event_types_num, 0);
+
+ WREG32(base_reg + mmSPMU_PMCNTENSET_EL0_OFFSET, event_mask);
+ } else {
+ output = params->output;
+ output_arr_len = params->output_size / 8;
+ events_num = output_arr_len - 2;
+ overflow_idx = output_arr_len - 2;
+ cycle_cnt_idx = output_arr_len - 1;
+
+ WREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET, 0x41013040);
+
+ if (output && output_arr_len > 2) {
+
+ if (events_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev, "too many events values for SPMU disable\n");
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < events_num ; i++) {
+ const u64 performance_counter_offset =
+ base_reg + mmSPMU_PMEVCNTR0_EL0_OFFSET + (i * 8);
+
+ output[i] = RREG32(performance_counter_offset);
+ }
+
+ output[overflow_idx] = RREG32(base_reg + mmSPMU_PMOVSSET_EL0_OFFSET);
+ output[cycle_cnt_idx] = RREG32(base_reg + mmSPMU_PMCCNTR_H_EL0_OFFSET);
+ output[cycle_cnt_idx] <<= 32;
+ output[cycle_cnt_idx] |= RREG32(base_reg + mmSPMU_PMCCNTR_L_EL0_OFFSET);
+ }
+
+ WREG32(base_reg + mmSPMU_PMOVSSET_EL0_OFFSET, 0);
+
+ /* clean pmtrc to reset value */
+ WREG32(base_reg + mmSPMU_PMTRC_OFFSET, 0x100400);
+ }
+
+ return 0;
+}
+
+int gaudi2_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data)
+{
+ struct hl_debug_params *params = data;
+ int rc = 0;
+
+ switch (params->op) {
+ case HL_DEBUG_OP_STM:
+ rc = gaudi2_config_stm(hdev, params);
+ break;
+ case HL_DEBUG_OP_ETF:
+ rc = gaudi2_config_etf(hdev, params);
+ break;
+ case HL_DEBUG_OP_ETR:
+ rc = gaudi2_config_etr(hdev, ctx, params);
+ break;
+ case HL_DEBUG_OP_FUNNEL:
+ rc = gaudi2_config_funnel(hdev, params);
+ break;
+ case HL_DEBUG_OP_BMON:
+ rc = gaudi2_config_bmon(hdev, params);
+ break;
+ case HL_DEBUG_OP_SPMU:
+ rc = gaudi2_config_spmu(hdev, params);
+ break;
+ case HL_DEBUG_OP_TIMESTAMP:
+ /* Do nothing as this opcode is deprecated */
+ break;
+ default:
+ dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+void gaudi2_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ struct hl_debug_params params = {};
+ int i, rc;
+
+ /* in pldm attempting to access stubbed etfs can cause problems */
+ if (!hdev->pldm)
+ for (i = GAUDI2_ETF_FIRST ; i <= GAUDI2_ETF_LAST ; i++) {
+ params.reg_idx = i;
+ rc = gaudi2_config_etf(hdev, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i);
+ }
+
+ rc = gaudi2_config_etr(hdev, ctx, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETR failed, %d\n", rc);
+}
+
+
+static int gaudi2_coresight_set_disabled_components(struct hl_device *hdev, u32 unit_count,
+ u32 enabled_mask,
+ const struct component_config_offsets *binning_table)
+{
+ u32 component_idx = 0;
+ u32 disabled_mask;
+ u32 full_mask;
+
+ /* in case no unit - no need to do work */
+ if (!unit_count)
+ return 0;
+
+ full_mask = GENMASK(unit_count - 1, 0);
+
+ /* set the disable bits on disabled mask */
+ disabled_mask = (~enabled_mask) & full_mask;
+
+ while (disabled_mask) {
+ u32 component_mask = 1 << component_idx;
+
+ if (component_idx >= unit_count) {
+ dev_err(hdev->dev, "index is out of range index(%u) >= units_count(%u)\n",
+ component_idx, unit_count);
+ return -EINVAL;
+ }
+
+ /*
+ * in case mask is set, driver need to set to 0x0
+ * all offsets for the following structures in the appropriate indices:
+ * debug_funnel_regs - offsets for all cs_dbg FUNNELs
+ * debug_etf_regs - offsets for all cs_dbg ETFs
+ * debug_stm_regs - offsets for all cs_dbg STMs
+ * debug_spmu_regs - offsets for all cs_dbg SPMUs
+ * debug_bmon_regs - offsets for all cs_dbg BMONs
+ * when value is set to COMPONENT_ID_INVALID -
+ * it means there is no such register for current component.
+ */
+
+ if (disabled_mask & component_mask) {
+ u32 bmon_idx;
+ const struct component_config_offsets *binned_component =
+ &(binning_table[component_idx]);
+
+ if (binned_component->funnel_id != COMPONENT_ID_INVALID)
+ debug_funnel_regs[binned_component->funnel_id] = 0x0;
+
+ if (binned_component->etf_id != COMPONENT_ID_INVALID)
+ debug_etf_regs[binned_component->etf_id] = 0x0;
+
+ if (binned_component->stm_id != COMPONENT_ID_INVALID)
+ debug_stm_regs[binned_component->stm_id] = 0x0;
+
+ if (binned_component->spmu_id != COMPONENT_ID_INVALID)
+ debug_spmu_regs[binned_component->spmu_id] = 0x0;
+
+ for (bmon_idx = 0; bmon_idx < binned_component->bmon_count; bmon_idx++)
+ debug_bmon_regs[binned_component->bmon_ids[bmon_idx]] = 0x0;
+
+ /*
+ * reset enabled bit
+ */
+ disabled_mask &= ~component_mask;
+ }
+
+ component_idx++;
+ }
+
+ return 0;
+}
+
+int gaudi2_coresight_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int ret;
+
+ /*
+ * Mask out all the disabled binned offsets.
+ * so when user request to configure a binned or masked out component,
+ * driver will ignore programing it ( happens when offset value is set to 0x0 )
+ * this is being set in gaudi2_coresight_set_disabled_components
+ */
+
+ /* Set TPC disable components */
+ ret = gaudi2_coresight_set_disabled_components(hdev, TPC_ID_SIZE, prop->tpc_enabled_mask,
+ tpc_binning_cfg_table);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to set disabled cs_dbg units for tpc coresight\n");
+ return ret;
+ }
+
+ /* Set decoder disable components */
+ ret = gaudi2_coresight_set_disabled_components(hdev, DEC_ID_SIZE,
+ prop->decoder_enabled_mask, decoder_binning_cfg_table);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to set disabled cs_dbg units for decoder coresight\n");
+ return ret;
+ }
+
+ /* Set HBM (MC0 and MC1) disable components */
+ ret = gaudi2_coresight_set_disabled_components(hdev, HBM_ID_SIZE, prop->dram_enabled_mask,
+ hbm_mc0_binning_cfg_table);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to set disabled cs_dbg units for hbm mc0 coresight\n");
+ return ret;
+ }
+
+ ret = gaudi2_coresight_set_disabled_components(hdev, HBM_ID_SIZE, prop->dram_enabled_mask,
+ hbm_mc1_binning_cfg_table);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to set disabled cs_dbg units for hbm mc1 coresight\n");
+ return ret;
+ }
+
+ /* Set HIF_HMMU disable components */
+ ret = gaudi2_coresight_set_disabled_components(hdev, HMMU_ID_SIZE,
+ prop->hmmu_hif_enabled_mask, hmmu_binning_cfg_table);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to set disabled cs_dbg units for hmmu coresight\n");
+ return ret;
+ }
+
+ /* Set XBAR_EDGE disable components */
+ ret = gaudi2_coresight_set_disabled_components(hdev, XBAR_EDGE_ID_SIZE,
+ prop->xbar_edge_enabled_mask, xbar_edge_binning_cfg_table);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to set disabled cs_dbg units for xbar_edge coresight\n");
+ return ret;
+ }
+
+ /* Set EDMA disable components */
+ ret = gaudi2_coresight_set_disabled_components(hdev, EDMA_ID_SIZE, prop->edma_enabled_mask,
+ edma_binning_cfg_table);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to set disabled cs_dbg units for edma coresight\n");
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h b/drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h
new file mode 100644
index 000000000000..df8729286e06
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h
@@ -0,0 +1,1063 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+#ifndef GAUDI2_CORESIGHT_REGS_DRV_H_
+#define GAUDI2_CORESIGHT_REGS_DRV_H_
+
+#include "gaudi2_masks.h"
+#include "../include/gaudi2/gaudi2_coresight.h"
+#include "gaudi2P.h"
+
+/* FUNNEL Offsets - same offsets for all funnels*/
+#define mmFUNNEL_CTRL_REG_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_CTRL_REG - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_PRIORITY_CTRL_REG_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_PRIORITY_CTRL_REG - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_ITATBDATA0_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_ITATBDATA0 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_ITATBCTR2_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_ITATBCTR2 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_ITATBCTR1_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_ITATBCTR1 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_ITATBCTR0_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_ITATBCTR0 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_ITCTRL_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_ITCTRL - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_CLAIMSET_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_CLAIMSET - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_CLAIMCLR_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_CLAIMCLR - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_LOCKACCESS_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_LOCKACCESS - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_LOCKSTATUS_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_LOCKSTATUS - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_AUTHSTATUS_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_AUTHSTATUS - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_DEVID_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_DEVID - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_DEVTYPE_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_DEVTYPE - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_PIDR4_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_PIDR4 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_PERIPHID5_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_PERIPHID5 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_PERIPHID6_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_PERIPHID6 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_PERIPHID7_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_PERIPHID7 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_PIDR0_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_PIDR0 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_PIDR1_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_PIDR1 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_PIDR2_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_PIDR2 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_PIDR3_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_PIDR3 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_CID0_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_CID0 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_CID1_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_CID1 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_CID2_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_CID2 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+#define mmFUNNEL_CID3_OFFSET \
+ (mmDCORE0_TPC0_EML_FUNNEL_CID3 - \
+ mmDCORE0_TPC0_EML_FUNNEL_BASE)
+
+/* ETF Offsets - same offsets for all etfs */
+#define mmETF_RSZ_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_RSZ - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_STS_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_STS - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_RRD_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_RRD - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_RRP_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_RRP - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_RWP_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_RWP - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_TRG_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_TRG - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_CTL_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_CTL - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_RWD_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_RWD - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_MODE_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_MODE - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_LBUFLEVEL_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_LBUFLEVEL - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_CBUFLEVEL_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_CBUFLEVEL - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_BUFWM_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_BUFWM - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_FFSR_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_FFSR - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_FFCR_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_FFCR - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_PSCR_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_PSCR - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITATBMDATA0_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITATBMDATA0 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITATBMCTR2_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITATBMCTR2 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITATBMCTR1_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITATBMCTR1 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITATBMCTR0_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITATBMCTR0 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITMISCOP0_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITMISCOP0 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITTRFLIN_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITTRFLIN - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITATBDATA0_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITATBDATA0 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITATBCTR2_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITATBCTR2 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITATBCTR1_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITATBCTR1 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITATBCTR0_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITATBCTR0 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_ITCTRL_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_ITCTRL - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_CLAIMSET_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_CLAIMSET - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_CLAIMCLR_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_CLAIMCLR - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_LAR_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_LAR - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_LSR_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_LSR - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_AUTHSTATUS_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_AUTHSTATUS - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_DEVID_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_DEVID - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_DEVTYPE_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_DEVTYPE - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_PERIPHID4_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_PERIPHID4 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_PERIPHID5_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_PERIPHID5 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_PERIPHID6_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_PERIPHID6 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_PERIPHID7_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_PERIPHID7 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_PERIPHID0_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_PERIPHID0 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_PERIPHID1_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_PERIPHID1 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_PERIPHID2_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_PERIPHID2 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_PERIPHID3_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_PERIPHID3 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_COMPID0_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_COMPID0 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_COMPID1_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_COMPID1 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_COMPID2_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_COMPID2 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+#define mmETF_COMPID3_OFFSET \
+ (mmDCORE0_TPC0_EML_ETF_COMPID3 - \
+ mmDCORE0_TPC0_EML_ETF_BASE)
+
+
+/* STM OFFSETS - same offsets for all stms */
+#define mmSTM_STMDMASTARTR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMDMASTARTR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMDMASTOPR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMDMASTOPR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMDMASTATR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMDMASTATR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMDMACTLR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMDMACTLR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMDMAIDR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMDMAIDR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMHEER_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMHEER - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMHETER_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMHETER - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMHEBSR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMHEBSR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMHEMCR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMHEMCR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMHEEXTMUXR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMHEEXTMUXR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMHEMASTR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMHEMASTR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMHEFEAT1R_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMHEFEAT1R - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMHEIDR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMHEIDR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMSPER_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMSPER - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMSPTER_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMSPTER - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMSPSCR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMSPSCR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMSPMSCR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMSPMSCR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMSPOVERRIDER_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMSPOVERRIDER - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMSPMOVERRIDER_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMSPMOVERRIDER - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMSPTRIGCSR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMSPTRIGCSR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMTCSR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMTCSR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMTSSTIMR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMTSSTIMR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMTSFREQR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMTSFREQR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMSYNCR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMSYNCR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMAUXCR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMAUXCR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMFEAT1R_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMFEAT1R - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMFEAT2R_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMFEAT2R - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMFEAT3R_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMFEAT3R - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMITTRIGGER_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMITTRIGGER - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMITATBDATA0_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMITATBDATA0 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMITATBCTR2_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMITATBCTR2 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMITATBID_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMITATBID - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMITATBCTR0_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMITATBCTR0 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMITCTRL_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMITCTRL - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMCLAIMSET_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMCLAIMSET - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMCLAIMCLR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMCLAIMCLR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMLAR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMLAR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMLSR_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMLSR - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMAUTHSTATUS_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMAUTHSTATUS - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMDEVARCH_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMDEVARCH - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMDEVID_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMDEVID - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMDEVTYPE_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMDEVTYPE - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMPIDR4_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMPIDR4 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMPIDR5_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMPIDR5 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMPIDR6_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMPIDR6 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMPIDR7_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMPIDR7 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMPIDR0_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMPIDR0 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMPIDR1_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMPIDR1 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMPIDR2_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMPIDR2 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMPIDR3_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMPIDR3 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMCIDR0_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMCIDR0 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMCIDR1_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMCIDR1 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMCIDR2_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMCIDR2 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+#define mmSTM_STMCIDR3_OFFSET \
+ (mmDCORE0_TPC0_EML_STM_STMCIDR3 - \
+ mmDCORE0_TPC0_EML_STM_BASE)
+
+
+/* SPMU OFFSETS - same offsets for all SPMUs */
+#define mmSPMU_PMEVCNTR0_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTR0_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTR1_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTR1_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTR2_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTR2_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTR3_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTR3_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTR4_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTR4_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTR5_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTR5_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCCNTR_L_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCCNTR_L_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCCNTR_H_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCCNTR_H_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMTRC_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMTRC - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_TRC_CTRL_HOST_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_TRC_CTRL_HOST - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_TRC_STAT_HOST_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_TRC_STAT_HOST - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_TRC_EN_HOST_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_TRC_EN_HOST - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVTYPER0_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVTYPER0_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVTYPER1_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVTYPER1_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVTYPER2_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVTYPER2_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVTYPER3_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVTYPER3_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVTYPER4_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVTYPER4_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVTYPER5_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVTYPER5_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMSSR_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMSSR - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMOVSSR_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMOVSSR - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCCNTSR_L_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCCNTSR_L - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCCNTSR_H_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCCNTSR_H - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTSR0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTSR1_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR1 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTSR2_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR2 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTSR3_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR3 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTSR4_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR4 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMEVCNTSR5_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR5 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMSCR_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMSCR - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMSRR_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMSRR - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCNTENSET_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCNTENSET_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCNTENCLR_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCNTENCLR_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMINTENSET_EL1_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMINTENSET_EL1 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMINTENCLR_EL1_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMINTENCLR_EL1 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMOVSCLR_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMOVSCLR_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMSWINC_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMSWINC_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMOVSSET_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMOVSSET_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCFGR_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCFGR - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCR_EL0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCR_EL0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMITCTRL_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMITCTRL - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCLAIMSET_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCLAIMSET - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCLAIMCLR_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCLAIMCLR - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMDEVAFF0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMDEVAFF0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMDEVAFF1_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMDEVAFF1 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMLAR_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMLAR - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMLSR_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMLSR - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMAUTHSTATUS_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMAUTHSTATUS - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMDEVARCH_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMDEVARCH - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMDEVID2_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMDEVID2 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMDEVID1_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMDEVID1 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMDEVID_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMDEVID - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMDEVTYPE_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMDEVTYPE - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMPIDR4_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMPIDR4 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMPIDR5_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMPIDR5 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMPIDR6_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMPIDR6 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMPIDR7_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMPIDR7 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMPIDR0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMPIDR0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMPIDR1_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMPIDR1 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMPIDR2_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMPIDR2 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMPIDR3_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMPIDR3 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCIDR0_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCIDR0 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCIDR1_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCIDR1 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCIDR2_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCIDR2 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+#define mmSPMU_PMCIDR3_OFFSET \
+ (mmDCORE0_TPC0_EML_SPMU_PMCIDR3 - \
+ mmDCORE0_TPC0_EML_SPMU_BASE)
+
+
+/* BMON OFFSETS - same offsets for all BMONs*/
+#define mmBMON_CR_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_CR - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_RESET_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_REG_RESET - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_INT_CLR_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_INT_CLR - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_TRIG_TH_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_TRIG_TH - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRL_S0_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_S0 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRH_S0_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_S0 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRL_E0_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_E0 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRH_E0_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_E0 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRL_S1_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_S1 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRH_S1_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_S1 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRL_E1_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_E1 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRH_E1_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_E1 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRL_S2_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_S2 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRH_S2_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_S2 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRL_E2_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_E2 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRH_E2_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_E2 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRL_S3_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_S3 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRH_S3_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_S3 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRL_E3_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_E3 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRH_E3_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_E3 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_REDUCTION_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_REDUCTION - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_IDL_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_IDL - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_IDH_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_IDH - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_IDENL_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_IDENL - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_IDENH_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_IDENH - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_LATENCY_SMP_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_LATENCY_SMP - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ATTR_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ATTR - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ATTREN_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ATTREN - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_USRENL_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_USRENL - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_USRL_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_USRL - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_USRENH_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_USRENH - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_USRH_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_USRH - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_CAPTURE_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_CAPTURE - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_RELEASE_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_RELEASE - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_WIN_CAPTURE_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_WIN_CAPTURE - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_BW_WIN_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_BW_WIN - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_MATCH_CNT_SOD_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_MATCH_CNT_SOD - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_MATCH_CNT_WIN_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_MATCH_CNT_WIN - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_CYCCNT_L_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_CYCCNT_L - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_CYCCNT_H_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_CYCCNT_H - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_MAXLAT_SOD_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_MAXLAT_SOD - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_MINLAT_SOD_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_MINLAT_SOD - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_MAXBW_SOD_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_MAXBW_SOD - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_MINBW_SOD_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_MINBW_SOD - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_MAXOS_SOD_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_MAXOS_SOD - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_MINOS_SOD_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_MINOS_SOD - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRL_SNAPSHOT_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_SNAPSHOT - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ADDRH_SNAPSHOT_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_SNAPSHOT - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_IDL_SNAPSHOT_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_IDL_SNAPSHOT - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_IDH_SNAPSHOT_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_IDH_SNAPSHOT - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_ATTR_SNAPSHOT_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_ATTR_SNAPSHOT - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_STM_TRC_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_STM_TRC - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_STM_TRC_DROP_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_STM_TRC_DROP - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_DEVARCH_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_DEVARCH - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PMDEVID2_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PMDEVID2 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PMDEVID1_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PMDEVID1 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PMDEVID_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PMDEVID - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_DEVTYPE_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_DEVTYPE - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PIDR4_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PIDR4 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PIDR5_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PIDR5 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PIDR6_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PIDR6 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PIDR7_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PIDR7 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PIDR0_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PIDR0 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PIDR1_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PIDR1 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PIDR2_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PIDR2 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_PIDR3_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_PIDR3 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_CIDR0_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_CIDR0 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_CIDR1_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_CIDR1 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_CIDR2_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_CIDR2 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+#define mmBMON_CIDR3_OFFSET \
+ (mmDCORE0_TPC0_EML_BUSMON_0_CIDR3 - \
+ mmDCORE0_TPC0_EML_BUSMON_0_BASE)
+
+
+/* Coresight unlock offset */
+#define mmCORESIGHT_UNLOCK_REGISTER_OFFSET mmSTM_STMLAR_OFFSET
+#define mmCORESIGHT_UNLOCK_STATUS_REGISTER_OFFSET mmSTM_STMLSR_OFFSET
+
+#endif /* GAUDI2_CORESIGHT_REGS_DRV_H_ */
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h b/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
new file mode 100644
index 000000000000..eed16d642a5a
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI2_MASKS_H_
+#define GAUDI2_MASKS_H_
+
+#include "../include/gaudi2/asic_reg/gaudi2_regs.h"
+
+/* Useful masks for bits in various registers */
+#define QMAN_GLBL_ERR_CFG_MSG_EN_MASK \
+ ((0xF << PDMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+
+#define QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK \
+ ((0xF << PDMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT))
+
+#define QMAN_GLBL_ERR_CFG1_MSG_EN_MASK \
+ (0x1 << PDMA0_QM_GLBL_ERR_CFG1_CQF_ERR_MSG_EN_SHIFT)
+
+#define QMAN_GLBL_ERR_CFG1_STOP_ON_ERR_EN_MASK \
+ ((0x1 << PDMA0_QM_GLBL_ERR_CFG1_CQF_STOP_ON_ERR_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_ERR_CFG1_ARC_STOP_ON_ERR_SHIFT))
+
+#define QM_PQC_LBW_WDATA \
+ ((1 << DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_SHIFT) | \
+ (1 << DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_SHIFT))
+
+#define QMAN_MAKE_TRUSTED \
+ ((0xF << PDMA0_QM_GLBL_PROT_PQF_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_PROT_ERR_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_PROT_PQC_SHIFT))
+
+#define QMAN_MAKE_TRUSTED_TEST_MODE \
+ ((0xF << PDMA0_QM_GLBL_PROT_PQF_SHIFT) | \
+ (0xF << PDMA0_QM_GLBL_PROT_CQF_SHIFT) | \
+ (0xF << PDMA0_QM_GLBL_PROT_CP_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_PROT_ERR_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_PROT_PQC_SHIFT))
+
+#define QMAN_ENABLE \
+ ((0xF << PDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
+
+#define PDMA1_QMAN_ENABLE \
+ ((0x3 << PDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
+
+/* QM_IDLE_MASK is valid for all engines QM idle check */
+#define QM_IDLE_MASK (DCORE0_EDMA0_QM_GLBL_STS0_PQF_IDLE_MASK | \
+ DCORE0_EDMA0_QM_GLBL_STS0_CQF_IDLE_MASK | \
+ DCORE0_EDMA0_QM_GLBL_STS0_CP_IDLE_MASK)
+
+#define QM_ARC_IDLE_MASK DCORE0_EDMA0_QM_GLBL_STS1_ARC_CQF_IDLE_MASK
+
+#define MME_ARCH_IDLE_MASK \
+ (DCORE0_MME_CTRL_LO_ARCH_STATUS_SB_IN_EMPTY_MASK | \
+ DCORE0_MME_CTRL_LO_ARCH_STATUS_AGU_COUT_SM_IDLE_MASK | \
+ DCORE0_MME_CTRL_LO_ARCH_STATUS_WBC_AXI_IDLE_MASK | \
+ DCORE0_MME_CTRL_LO_ARCH_STATUS_SB_IN_AXI_IDLE_MASK | \
+ DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_IDLE_MASK | \
+ DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_RDY_MASK)
+
+#define TPC_IDLE_MASK (DCORE0_TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK | \
+ DCORE0_TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK | \
+ DCORE0_TPC0_CFG_STATUS_IQ_EMPTY_MASK | \
+ DCORE0_TPC0_CFG_STATUS_SB_EMPTY_MASK | \
+ DCORE0_TPC0_CFG_STATUS_QM_IDLE_MASK | \
+ DCORE0_TPC0_CFG_STATUS_QM_RDY_MASK)
+
+#define DCORE0_TPC0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
+
+/* CGM_IDLE_MASK is valid for all engines CGM idle check */
+#define CGM_IDLE_MASK DCORE0_TPC0_QM_CGM_STS_AGENT_IDLE_MASK
+
+#define QM_GLBL_CFG1_PQF_STOP PDMA0_QM_GLBL_CFG1_PQF_STOP_MASK
+#define QM_GLBL_CFG1_CQF_STOP PDMA0_QM_GLBL_CFG1_CQF_STOP_MASK
+#define QM_GLBL_CFG1_CP_STOP PDMA0_QM_GLBL_CFG1_CP_STOP_MASK
+#define QM_GLBL_CFG1_PQF_FLUSH PDMA0_QM_GLBL_CFG1_PQF_FLUSH_MASK
+#define QM_GLBL_CFG1_CQF_FLUSH PDMA0_QM_GLBL_CFG1_CQF_FLUSH_MASK
+#define QM_GLBL_CFG1_CP_FLUSH PDMA0_QM_GLBL_CFG1_CP_FLUSH_MASK
+
+#define QM_GLBL_CFG2_ARC_CQF_STOP PDMA0_QM_GLBL_CFG2_ARC_CQF_STOP_MASK
+#define QM_GLBL_CFG2_ARC_CQF_FLUSH PDMA0_QM_GLBL_CFG2_ARC_CQF_FLUSH_MASK
+
+#define QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
+#define QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
+#define QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+#define QM_ARB_ERR_MSG_EN_MASK (\
+ QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK |\
+ QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
+ QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
+
+#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
+#define PCIE_AUX_FLR_CTRL_INT_MASK_MASK 0x2
+
+#define MME_ACC_INTR_MASK_WBC_ERR_RESP_MASK GENMASK(1, 0)
+#define MME_ACC_INTR_MASK_AP_SRC_POS_INF_MASK BIT(2)
+#define MME_ACC_INTR_MASK_AP_SRC_NEG_INF_MASK BIT(3)
+#define MME_ACC_INTR_MASK_AP_SRC_NAN_MASK BIT(4)
+#define MME_ACC_INTR_MASK_AP_RESULT_POS_INF_MASK BIT(5)
+#define MME_ACC_INTR_MASK_AP_RESULT_NEG_INF_MASK BIT(6)
+
+#define SM_CQ_L2H_MASK_VAL 0xFFFFFFFFFC000000ull
+#define SM_CQ_L2H_CMPR_VAL 0x1000007FFC000000ull
+#define SM_CQ_L2H_LOW_MASK GENMASK(31, 20)
+#define SM_CQ_L2H_LOW_SHIFT 20
+
+#define MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK \
+ REG_FIELD_MASK(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE, HOP4_PAGE_SIZE)
+#define STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK \
+ REG_FIELD_MASK(DCORE0_HMMU0_STLB_HOP_CONFIGURATION, ONLY_LARGE_PAGE)
+
+#define AXUSER_HB_SEC_ASID_MASK 0x3FF
+#define AXUSER_HB_SEC_MMBP_MASK 0x400
+
+#define MMUBP_ASID_MASK (AXUSER_HB_SEC_ASID_MASK | AXUSER_HB_SEC_MMBP_MASK)
+
+#define ROT_MSS_HALT_WBC_MASK BIT(0)
+#define ROT_MSS_HALT_RSB_MASK BIT(1)
+#define ROT_MSS_HALT_MRSB_MASK BIT(2)
+
+#define PCIE_DBI_MSIX_ADDRESS_MATCH_LOW_OFF_MSIX_ADDRESS_MATCH_EN_SHIFT 0
+#define PCIE_DBI_MSIX_ADDRESS_MATCH_LOW_OFF_MSIX_ADDRESS_MATCH_EN_MASK 0x1
+
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_SHIFT 15
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_MASK 0x8000
+
+#endif /* GAUDI2_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c b/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
new file mode 100644
index 000000000000..89a06ff5ba34
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
@@ -0,0 +1,3849 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudi2P.h"
+#include "../include/gaudi2/asic_reg/gaudi2_regs.h"
+
+#define UNSET_GLBL_SEC_BIT(array, b) ((array)[((b) / 32)] |= (1 << ((b) % 32)))
+
+#define SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_RD PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_RD_MASK
+#define SPECIAL_GLBL_ERR_CAUSE_APB_SEC_RD PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_SEC_RD_MASK
+#define SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_WR PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_WR_MASK
+#define SPECIAL_GLBL_ERR_CAUSE_APB_SEC_WR PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_SEC_WR_MASK
+#define SPECIAL_GLBL_ERR_CAUSE_EXT_SEC_WR PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_EXT_SEC_WR_MASK
+#define SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_RD \
+ PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_RD_MASK
+#define SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_WR \
+ PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_WR_MASK
+#define SPECIAL_GLBL_ERR_CAUSE_EXT_UNMAPPED_WR \
+ PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_EXT_UNMAPPED_WR_MASK
+
+/* LBW RR */
+#define SFT_NUM_OF_LBW_RTR 1
+#define SFT_LBW_RTR_OFFSET 0
+#define RR_LBW_LONG_MASK 0x7FFFFFFull
+#define RR_LBW_SHORT_MASK 0x7FFF000ull
+
+/* HBW RR */
+#define SFT_NUM_OF_HBW_RTR 2
+#define RR_HBW_SHORT_LO_MASK 0xFFFFFFFF000ull
+#define RR_HBW_SHORT_HI_MASK 0xF00000000000ull
+#define RR_HBW_LONG_LO_MASK 0xFFFFFFFF000ull
+#define RR_HBW_LONG_HI_MASK 0xFFFFF00000000000ull
+
+struct rr_config {
+ u64 min;
+ u64 max;
+ u32 index;
+ u8 type;
+};
+
+struct gaudi2_atypical_bp_blocks {
+ u32 mm_block_base_addr;
+ u32 block_size;
+ u32 glbl_sec_offset;
+ u32 glbl_sec_length;
+};
+
+static const struct gaudi2_atypical_bp_blocks gaudi2_pb_dcr0_sm_objs = {
+ mmDCORE0_SYNC_MNGR_OBJS_BASE,
+ 128 * 1024,
+ SM_OBJS_PROT_BITS_OFFS,
+ 640
+};
+
+static const u32 gaudi2_pb_sft0[] = {
+ mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE,
+ mmSFT0_HBW_RTR_IF0_RTR_H3_BASE,
+ mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT0_HBW_RTR_IF0_ADDR_DEC_HBW_BASE,
+ mmSFT0_HBW_RTR_IF1_RTR_CTRL_BASE,
+ mmSFT0_HBW_RTR_IF1_RTR_H3_BASE,
+ mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT0_HBW_RTR_IF1_ADDR_DEC_HBW_BASE,
+ mmSFT0_LBW_RTR_IF_RTR_CTRL_BASE,
+ mmSFT0_LBW_RTR_IF_RTR_H3_BASE,
+ mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT0_LBW_RTR_IF_ADDR_DEC_HBW_BASE,
+ mmSFT0_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_hif[] = {
+ mmDCORE0_HIF0_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_rtr0[] = {
+ mmDCORE0_RTR0_CTRL_BASE,
+ mmDCORE0_RTR0_H3_BASE,
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmDCORE0_RTR0_ADD_DEC_HBW_BASE,
+ mmDCORE0_RTR0_BASE,
+ mmDCORE0_RTR0_DBG_ADDR_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_hmmu0[] = {
+ mmDCORE0_HMMU0_MMU_BASE,
+ mmDCORE0_HMMU0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmDCORE0_HMMU0_SCRAMB_OUT_BASE,
+ mmDCORE0_HMMU0_STLB_BASE,
+};
+
+static const u32 gaudi2_pb_cpu_if[] = {
+ mmCPU_IF_BASE,
+};
+
+static const u32 gaudi2_pb_cpu[] = {
+ mmCPU_CA53_CFG_BASE,
+ mmCPU_MSTR_IF_RR_SHRD_HBW_BASE,
+};
+
+static const u32 gaudi2_pb_kdma[] = {
+ mmARC_FARM_KDMA_BASE,
+ mmARC_FARM_KDMA_MSTR_IF_RR_SHRD_HBW_BASE,
+};
+
+static const u32 gaudi2_pb_pdma0[] = {
+ mmPDMA0_CORE_BASE,
+ mmPDMA0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmPDMA0_QM_BASE,
+};
+
+static const u32 gaudi2_pb_pdma0_arc[] = {
+ mmPDMA0_QM_ARC_AUX_BASE,
+};
+
+static const struct range gaudi2_pb_pdma0_arc_unsecured_regs[] = {
+ {mmPDMA0_QM_ARC_AUX_RUN_HALT_REQ, mmPDMA0_QM_ARC_AUX_RUN_HALT_ACK},
+ {mmPDMA0_QM_ARC_AUX_CLUSTER_NUM, mmPDMA0_QM_ARC_AUX_WAKE_UP_EVENT},
+ {mmPDMA0_QM_ARC_AUX_ARC_RST_REQ, mmPDMA0_QM_ARC_AUX_CID_OFFSET_7},
+ {mmPDMA0_QM_ARC_AUX_SCRATCHPAD_0, mmPDMA0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
+ {mmPDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN, mmPDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
+ {mmPDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN, mmPDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
+ {mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0, mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
+ {mmPDMA0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT, mmPDMA0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
+ {mmPDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT, mmPDMA0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
+};
+
+static const u32 gaudi2_pb_pdma0_unsecured_regs[] = {
+ mmPDMA0_CORE_CTX_AXUSER_HB_WR_REDUCTION,
+ mmPDMA0_CORE_CTX_WR_COMP_ADDR_HI,
+ mmPDMA0_CORE_CTX_WR_COMP_ADDR_LO,
+ mmPDMA0_CORE_CTX_WR_COMP_WDATA,
+ mmPDMA0_CORE_CTX_SRC_BASE_LO,
+ mmPDMA0_CORE_CTX_SRC_BASE_HI,
+ mmPDMA0_CORE_CTX_DST_BASE_LO,
+ mmPDMA0_CORE_CTX_DST_BASE_HI,
+ mmPDMA0_CORE_CTX_SRC_TSIZE_0,
+ mmPDMA0_CORE_CTX_SRC_TSIZE_1,
+ mmPDMA0_CORE_CTX_SRC_TSIZE_2,
+ mmPDMA0_CORE_CTX_SRC_TSIZE_3,
+ mmPDMA0_CORE_CTX_SRC_TSIZE_4,
+ mmPDMA0_CORE_CTX_SRC_STRIDE_1,
+ mmPDMA0_CORE_CTX_SRC_STRIDE_2,
+ mmPDMA0_CORE_CTX_SRC_STRIDE_3,
+ mmPDMA0_CORE_CTX_SRC_STRIDE_4,
+ mmPDMA0_CORE_CTX_SRC_OFFSET_LO,
+ mmPDMA0_CORE_CTX_SRC_OFFSET_HI,
+ mmPDMA0_CORE_CTX_DST_TSIZE_0,
+ mmPDMA0_CORE_CTX_DST_TSIZE_1,
+ mmPDMA0_CORE_CTX_DST_TSIZE_2,
+ mmPDMA0_CORE_CTX_DST_TSIZE_3,
+ mmPDMA0_CORE_CTX_DST_TSIZE_4,
+ mmPDMA0_CORE_CTX_DST_STRIDE_1,
+ mmPDMA0_CORE_CTX_DST_STRIDE_2,
+ mmPDMA0_CORE_CTX_DST_STRIDE_3,
+ mmPDMA0_CORE_CTX_DST_STRIDE_4,
+ mmPDMA0_CORE_CTX_DST_OFFSET_LO,
+ mmPDMA0_CORE_CTX_DST_OFFSET_HI,
+ mmPDMA0_CORE_CTX_COMMIT,
+ mmPDMA0_CORE_CTX_CTRL,
+ mmPDMA0_CORE_CTX_TE_NUMROWS,
+ mmPDMA0_CORE_CTX_IDX,
+ mmPDMA0_CORE_CTX_IDX_INC,
+ mmPDMA0_QM_CQ_CFG0_0,
+ mmPDMA0_QM_CQ_CFG0_1,
+ mmPDMA0_QM_CQ_CFG0_2,
+ mmPDMA0_QM_CQ_CFG0_3,
+ mmPDMA0_QM_CQ_CFG0_4,
+ mmPDMA0_QM_CP_FENCE0_RDATA_0,
+ mmPDMA0_QM_CP_FENCE0_RDATA_1,
+ mmPDMA0_QM_CP_FENCE0_RDATA_2,
+ mmPDMA0_QM_CP_FENCE0_RDATA_3,
+ mmPDMA0_QM_CP_FENCE0_RDATA_4,
+ mmPDMA0_QM_CP_FENCE1_RDATA_0,
+ mmPDMA0_QM_CP_FENCE1_RDATA_1,
+ mmPDMA0_QM_CP_FENCE1_RDATA_2,
+ mmPDMA0_QM_CP_FENCE1_RDATA_3,
+ mmPDMA0_QM_CP_FENCE1_RDATA_4,
+ mmPDMA0_QM_CP_FENCE2_RDATA_0,
+ mmPDMA0_QM_CP_FENCE2_RDATA_1,
+ mmPDMA0_QM_CP_FENCE2_RDATA_2,
+ mmPDMA0_QM_CP_FENCE2_RDATA_3,
+ mmPDMA0_QM_CP_FENCE2_RDATA_4,
+ mmPDMA0_QM_CP_FENCE3_RDATA_0,
+ mmPDMA0_QM_CP_FENCE3_RDATA_1,
+ mmPDMA0_QM_CP_FENCE3_RDATA_2,
+ mmPDMA0_QM_CP_FENCE3_RDATA_3,
+ mmPDMA0_QM_CP_FENCE3_RDATA_4,
+ mmPDMA0_QM_CP_FENCE0_CNT_0,
+ mmPDMA0_QM_CP_FENCE0_CNT_1,
+ mmPDMA0_QM_CP_FENCE0_CNT_2,
+ mmPDMA0_QM_CP_FENCE0_CNT_3,
+ mmPDMA0_QM_CP_FENCE0_CNT_4,
+ mmPDMA0_QM_CP_FENCE1_CNT_0,
+ mmPDMA0_QM_CP_FENCE1_CNT_1,
+ mmPDMA0_QM_CP_FENCE1_CNT_2,
+ mmPDMA0_QM_CP_FENCE1_CNT_3,
+ mmPDMA0_QM_CP_FENCE1_CNT_4,
+ mmPDMA0_QM_CP_FENCE2_CNT_0,
+ mmPDMA0_QM_CP_FENCE2_CNT_1,
+ mmPDMA0_QM_CP_FENCE2_CNT_2,
+ mmPDMA0_QM_CP_FENCE2_CNT_3,
+ mmPDMA0_QM_CP_FENCE2_CNT_4,
+ mmPDMA0_QM_CP_FENCE3_CNT_0,
+ mmPDMA0_QM_CP_FENCE3_CNT_1,
+ mmPDMA0_QM_CP_FENCE3_CNT_2,
+ mmPDMA0_QM_CP_FENCE3_CNT_3,
+ mmPDMA0_QM_CP_FENCE3_CNT_4,
+ mmPDMA0_QM_CQ_PTR_LO_0,
+ mmPDMA0_QM_CQ_PTR_HI_0,
+ mmPDMA0_QM_CQ_TSIZE_0,
+ mmPDMA0_QM_CQ_CTL_0,
+ mmPDMA0_QM_CQ_PTR_LO_1,
+ mmPDMA0_QM_CQ_PTR_HI_1,
+ mmPDMA0_QM_CQ_TSIZE_1,
+ mmPDMA0_QM_CQ_CTL_1,
+ mmPDMA0_QM_CQ_PTR_LO_2,
+ mmPDMA0_QM_CQ_PTR_HI_2,
+ mmPDMA0_QM_CQ_TSIZE_2,
+ mmPDMA0_QM_CQ_CTL_2,
+ mmPDMA0_QM_CQ_PTR_LO_3,
+ mmPDMA0_QM_CQ_PTR_HI_3,
+ mmPDMA0_QM_CQ_TSIZE_3,
+ mmPDMA0_QM_CQ_CTL_3,
+ mmPDMA0_QM_CQ_PTR_LO_4,
+ mmPDMA0_QM_CQ_PTR_HI_4,
+ mmPDMA0_QM_CQ_TSIZE_4,
+ mmPDMA0_QM_CQ_CTL_4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE,
+ mmPDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
+ mmPDMA0_QM_ARC_CQ_PTR_LO,
+ mmPDMA0_QM_ARC_CQ_PTR_LO_STS,
+ mmPDMA0_QM_ARC_CQ_PTR_HI,
+ mmPDMA0_QM_ARC_CQ_PTR_HI_STS,
+ mmPDMA0_QM_ARB_CFG_0,
+ mmPDMA0_QM_ARB_MST_QUIET_PER,
+ mmPDMA0_QM_ARB_CHOICE_Q_PUSH,
+ mmPDMA0_QM_ARB_WRR_WEIGHT_0,
+ mmPDMA0_QM_ARB_WRR_WEIGHT_1,
+ mmPDMA0_QM_ARB_WRR_WEIGHT_2,
+ mmPDMA0_QM_ARB_WRR_WEIGHT_3,
+ mmPDMA0_QM_ARB_BASE_LO,
+ mmPDMA0_QM_ARB_BASE_HI,
+ mmPDMA0_QM_ARB_MST_SLAVE_EN,
+ mmPDMA0_QM_ARB_MST_SLAVE_EN_1,
+ mmPDMA0_QM_ARB_MST_CRED_INC,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_0,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_1,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_2,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_3,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_4,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_5,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_6,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_7,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_8,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_9,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_10,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_11,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_12,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_13,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_14,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_15,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_16,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_17,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_18,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_19,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_20,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_21,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_22,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_23,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_24,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_25,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_26,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_27,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_28,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_29,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_30,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_31,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_32,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_33,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_34,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_35,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_36,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_37,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_38,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_39,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_40,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_41,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_42,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_43,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_44,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_45,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_46,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_47,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_48,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_49,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_50,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_51,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_52,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_53,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_54,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_55,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_56,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_57,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_58,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_59,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_60,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_61,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_62,
+ mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_63,
+ mmPDMA0_QM_ARB_SLV_ID,
+ mmPDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST,
+ mmPDMA0_QM_ARC_CQ_CFG0,
+ mmPDMA0_QM_CQ_IFIFO_CI_0,
+ mmPDMA0_QM_CQ_IFIFO_CI_1,
+ mmPDMA0_QM_CQ_IFIFO_CI_2,
+ mmPDMA0_QM_CQ_IFIFO_CI_3,
+ mmPDMA0_QM_CQ_IFIFO_CI_4,
+ mmPDMA0_QM_ARC_CQ_IFIFO_CI,
+ mmPDMA0_QM_CQ_CTL_CI_0,
+ mmPDMA0_QM_CQ_CTL_CI_1,
+ mmPDMA0_QM_CQ_CTL_CI_2,
+ mmPDMA0_QM_CQ_CTL_CI_3,
+ mmPDMA0_QM_CQ_CTL_CI_4,
+ mmPDMA0_QM_ARC_CQ_CTL_CI,
+ mmPDMA0_QM_ARC_CQ_TSIZE,
+ mmPDMA0_QM_ARC_CQ_CTL,
+ mmPDMA0_QM_CP_SWITCH_WD_SET,
+ mmPDMA0_QM_CP_EXT_SWITCH,
+ mmPDMA0_QM_CP_PRED_0,
+ mmPDMA0_QM_CP_PRED_1,
+ mmPDMA0_QM_CP_PRED_2,
+ mmPDMA0_QM_CP_PRED_3,
+ mmPDMA0_QM_CP_PRED_4,
+ mmPDMA0_QM_CP_PRED_UPEN_0,
+ mmPDMA0_QM_CP_PRED_UPEN_1,
+ mmPDMA0_QM_CP_PRED_UPEN_2,
+ mmPDMA0_QM_CP_PRED_UPEN_3,
+ mmPDMA0_QM_CP_PRED_UPEN_4,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_0,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_1,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_2,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_3,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_4,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_0,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_1,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_2,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_3,
+ mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_4,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_0,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_1,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_2,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_3,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_4,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_0,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_1,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_2,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_3,
+ mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_4,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_0,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_1,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_2,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_3,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_4,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_0,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_1,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_2,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_3,
+ mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_4,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_0,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_1,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_2,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_3,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_4,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_0,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_1,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_2,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_3,
+ mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_4,
+ mmPDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
+ mmPDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO,
+ mmPDMA0_QM_CQ_IFIFO_MSG_BASE_LO,
+ mmPDMA0_QM_CQ_CTL_MSG_BASE_LO
+};
+
+static const u32 gaudi2_pb_dcr0_edma0[] = {
+ mmDCORE0_EDMA0_CORE_BASE,
+ mmDCORE0_EDMA0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmDCORE0_EDMA0_QM_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_edma0_arc[] = {
+ mmDCORE0_EDMA0_QM_ARC_AUX_BASE,
+};
+
+static const struct range gaudi2_pb_dcr0_edma0_arc_unsecured_regs[] = {
+ {mmDCORE0_EDMA0_QM_ARC_AUX_RUN_HALT_REQ, mmDCORE0_EDMA0_QM_ARC_AUX_RUN_HALT_ACK},
+ {mmDCORE0_EDMA0_QM_ARC_AUX_CLUSTER_NUM, mmDCORE0_EDMA0_QM_ARC_AUX_WAKE_UP_EVENT},
+ {mmDCORE0_EDMA0_QM_ARC_AUX_ARC_RST_REQ, mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_7},
+ {mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_0, mmDCORE0_EDMA0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
+ {mmDCORE0_EDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN,
+ mmDCORE0_EDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
+ {mmDCORE0_EDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN,
+ mmDCORE0_EDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
+ {mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0,
+ mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
+ {mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT,
+ mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
+ {mmDCORE0_EDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT,
+ mmDCORE0_EDMA0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
+};
+
+static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
+ mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_WR_REDUCTION,
+ mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI,
+ mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO,
+ mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_LO,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_HI,
+ mmDCORE0_EDMA0_CORE_CTX_DST_BASE_LO,
+ mmDCORE0_EDMA0_CORE_CTX_DST_BASE_HI,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_0,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_1,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_2,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_3,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_4,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_1,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_2,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_3,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_4,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_OFFSET_LO,
+ mmDCORE0_EDMA0_CORE_CTX_SRC_OFFSET_HI,
+ mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_0,
+ mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_1,
+ mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_2,
+ mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_3,
+ mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_4,
+ mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_1,
+ mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_2,
+ mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_3,
+ mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_4,
+ mmDCORE0_EDMA0_CORE_CTX_DST_OFFSET_LO,
+ mmDCORE0_EDMA0_CORE_CTX_DST_OFFSET_HI,
+ mmDCORE0_EDMA0_CORE_CTX_COMMIT,
+ mmDCORE0_EDMA0_CORE_CTX_CTRL,
+ mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
+ mmDCORE0_EDMA0_CORE_CTX_IDX,
+ mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
+ mmDCORE0_EDMA0_QM_CQ_CFG0_0,
+ mmDCORE0_EDMA0_QM_CQ_CFG0_1,
+ mmDCORE0_EDMA0_QM_CQ_CFG0_2,
+ mmDCORE0_EDMA0_QM_CQ_CFG0_3,
+ mmDCORE0_EDMA0_QM_CQ_CFG0_4,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_0,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_1,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_2,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_3,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_4,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_0,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_1,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_2,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_3,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_4,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_0,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_1,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_2,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_3,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_4,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_0,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_1,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_2,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_3,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_4,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_0,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_1,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_2,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_3,
+ mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_4,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_0,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_1,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_2,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_3,
+ mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_4,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_0,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_1,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_2,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_3,
+ mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_4,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_0,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_1,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_2,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_3,
+ mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_4,
+ mmDCORE0_EDMA0_QM_CQ_PTR_LO_0,
+ mmDCORE0_EDMA0_QM_CQ_PTR_HI_0,
+ mmDCORE0_EDMA0_QM_CQ_TSIZE_0,
+ mmDCORE0_EDMA0_QM_CQ_CTL_0,
+ mmDCORE0_EDMA0_QM_CQ_PTR_LO_1,
+ mmDCORE0_EDMA0_QM_CQ_PTR_HI_1,
+ mmDCORE0_EDMA0_QM_CQ_TSIZE_1,
+ mmDCORE0_EDMA0_QM_CQ_CTL_1,
+ mmDCORE0_EDMA0_QM_CQ_PTR_LO_2,
+ mmDCORE0_EDMA0_QM_CQ_PTR_HI_2,
+ mmDCORE0_EDMA0_QM_CQ_TSIZE_2,
+ mmDCORE0_EDMA0_QM_CQ_CTL_2,
+ mmDCORE0_EDMA0_QM_CQ_PTR_LO_3,
+ mmDCORE0_EDMA0_QM_CQ_PTR_HI_3,
+ mmDCORE0_EDMA0_QM_CQ_TSIZE_3,
+ mmDCORE0_EDMA0_QM_CQ_CTL_3,
+ mmDCORE0_EDMA0_QM_CQ_PTR_LO_4,
+ mmDCORE0_EDMA0_QM_CQ_PTR_HI_4,
+ mmDCORE0_EDMA0_QM_CQ_TSIZE_4,
+ mmDCORE0_EDMA0_QM_CQ_CTL_4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE,
+ mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
+ mmDCORE0_EDMA0_QM_ARC_CQ_PTR_LO,
+ mmDCORE0_EDMA0_QM_ARC_CQ_PTR_LO_STS,
+ mmDCORE0_EDMA0_QM_ARC_CQ_PTR_HI,
+ mmDCORE0_EDMA0_QM_ARC_CQ_PTR_HI_STS,
+ mmDCORE0_EDMA0_QM_ARB_CFG_0,
+ mmDCORE0_EDMA0_QM_ARB_MST_QUIET_PER,
+ mmDCORE0_EDMA0_QM_ARB_CHOICE_Q_PUSH,
+ mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_0,
+ mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_1,
+ mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_2,
+ mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_3,
+ mmDCORE0_EDMA0_QM_ARB_BASE_LO,
+ mmDCORE0_EDMA0_QM_ARB_BASE_HI,
+ mmDCORE0_EDMA0_QM_ARB_MST_SLAVE_EN,
+ mmDCORE0_EDMA0_QM_ARB_MST_SLAVE_EN_1,
+ mmDCORE0_EDMA0_QM_ARB_MST_CRED_INC,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_0,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_1,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_2,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_3,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_4,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_5,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_6,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_7,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_8,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_9,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_10,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_11,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_12,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_13,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_14,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_15,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_16,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_17,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_18,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_19,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_20,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_21,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_22,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_23,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_24,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_25,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_26,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_27,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_28,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_29,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_30,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_31,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_32,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_33,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_34,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_35,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_36,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_37,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_38,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_39,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_40,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_41,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_42,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_43,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_44,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_45,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_46,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_47,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_48,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_49,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_50,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_51,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_52,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_53,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_54,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_55,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_56,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_57,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_58,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_59,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_60,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_61,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_62,
+ mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_63,
+ mmDCORE0_EDMA0_QM_ARB_SLV_ID,
+ mmDCORE0_EDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST,
+ mmDCORE0_EDMA0_QM_ARC_CQ_CFG0,
+ mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_0,
+ mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_1,
+ mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_2,
+ mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_3,
+ mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_4,
+ mmDCORE0_EDMA0_QM_ARC_CQ_IFIFO_CI,
+ mmDCORE0_EDMA0_QM_CQ_CTL_CI_0,
+ mmDCORE0_EDMA0_QM_CQ_CTL_CI_1,
+ mmDCORE0_EDMA0_QM_CQ_CTL_CI_2,
+ mmDCORE0_EDMA0_QM_CQ_CTL_CI_3,
+ mmDCORE0_EDMA0_QM_CQ_CTL_CI_4,
+ mmDCORE0_EDMA0_QM_ARC_CQ_CTL_CI,
+ mmDCORE0_EDMA0_QM_ARC_CQ_TSIZE,
+ mmDCORE0_EDMA0_QM_ARC_CQ_CTL,
+ mmDCORE0_EDMA0_QM_CP_SWITCH_WD_SET,
+ mmDCORE0_EDMA0_QM_CP_EXT_SWITCH,
+ mmDCORE0_EDMA0_QM_CP_PRED_0,
+ mmDCORE0_EDMA0_QM_CP_PRED_1,
+ mmDCORE0_EDMA0_QM_CP_PRED_2,
+ mmDCORE0_EDMA0_QM_CP_PRED_3,
+ mmDCORE0_EDMA0_QM_CP_PRED_4,
+ mmDCORE0_EDMA0_QM_CP_PRED_UPEN_0,
+ mmDCORE0_EDMA0_QM_CP_PRED_UPEN_1,
+ mmDCORE0_EDMA0_QM_CP_PRED_UPEN_2,
+ mmDCORE0_EDMA0_QM_CP_PRED_UPEN_3,
+ mmDCORE0_EDMA0_QM_CP_PRED_UPEN_4,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_0,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_1,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_2,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_3,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_4,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_0,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_1,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_2,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_3,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_4,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_0,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_1,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_2,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_3,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_4,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_0,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_1,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_2,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_3,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_4,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_0,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_1,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_2,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_3,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_4,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_0,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_1,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_2,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_3,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_4,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_0,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_1,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_2,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_3,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_4,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_0,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_1,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_2,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_3,
+ mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_4,
+ mmDCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
+ mmDCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO,
+ mmDCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_LO,
+ mmDCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_LO
+};
+
+static const u32 gaudi2_pb_dcr0_mme_sbte[] = {
+ mmDCORE0_MME_SBTE0_BASE,
+ mmDCORE0_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_mme_qm[] = {
+ mmDCORE0_MME_QM_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_mme_eng[] = {
+ mmDCORE0_MME_ACC_BASE,
+ mmDCORE0_MME_CTRL_HI_BASE,
+ mmDCORE0_MME_CTRL_LO_BASE,
+ mmDCORE0_MME_CTRL_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmDCORE0_MME_WB0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmDCORE0_MME_WB1_MSTR_IF_RR_SHRD_HBW_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_mme_arc[] = {
+ mmDCORE0_MME_QM_ARC_AUX_BASE,
+ mmDCORE0_MME_QM_ARC_DUP_ENG_BASE,
+};
+
+static const struct range gaudi2_pb_dcr0_mme_arc_unsecured_regs[] = {
+ {mmDCORE0_MME_QM_ARC_AUX_RUN_HALT_REQ, mmDCORE0_MME_QM_ARC_AUX_RUN_HALT_ACK},
+ {mmDCORE0_MME_QM_ARC_AUX_CLUSTER_NUM, mmDCORE0_MME_QM_ARC_AUX_WAKE_UP_EVENT},
+ {mmDCORE0_MME_QM_ARC_AUX_ARC_RST_REQ, mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_7},
+ {mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_0, mmDCORE0_MME_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
+ {mmDCORE0_MME_QM_ARC_AUX_CBU_EARLY_BRESP_EN, mmDCORE0_MME_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
+ {mmDCORE0_MME_QM_ARC_AUX_LBU_EARLY_BRESP_EN, mmDCORE0_MME_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
+ {mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0,
+ mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
+ {mmDCORE0_MME_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT,
+ mmDCORE0_MME_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
+ {mmDCORE0_MME_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT,
+ mmDCORE0_MME_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
+ {mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_0,
+ mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_63},
+ {mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_STRONG_ORDER,
+ mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_LB_OVRD},
+};
+
+static const u32 gaudi2_pb_dcr0_mme_qm_unsecured_regs[] = {
+ mmDCORE0_MME_QM_CQ_CFG0_0,
+ mmDCORE0_MME_QM_CQ_CFG0_1,
+ mmDCORE0_MME_QM_CQ_CFG0_2,
+ mmDCORE0_MME_QM_CQ_CFG0_3,
+ mmDCORE0_MME_QM_CQ_CFG0_4,
+ mmDCORE0_MME_QM_CP_FENCE0_RDATA_0,
+ mmDCORE0_MME_QM_CP_FENCE0_RDATA_1,
+ mmDCORE0_MME_QM_CP_FENCE0_RDATA_2,
+ mmDCORE0_MME_QM_CP_FENCE0_RDATA_3,
+ mmDCORE0_MME_QM_CP_FENCE0_RDATA_4,
+ mmDCORE0_MME_QM_CP_FENCE1_RDATA_0,
+ mmDCORE0_MME_QM_CP_FENCE1_RDATA_1,
+ mmDCORE0_MME_QM_CP_FENCE1_RDATA_2,
+ mmDCORE0_MME_QM_CP_FENCE1_RDATA_3,
+ mmDCORE0_MME_QM_CP_FENCE1_RDATA_4,
+ mmDCORE0_MME_QM_CP_FENCE2_RDATA_0,
+ mmDCORE0_MME_QM_CP_FENCE2_RDATA_1,
+ mmDCORE0_MME_QM_CP_FENCE2_RDATA_2,
+ mmDCORE0_MME_QM_CP_FENCE2_RDATA_3,
+ mmDCORE0_MME_QM_CP_FENCE2_RDATA_4,
+ mmDCORE0_MME_QM_CP_FENCE3_RDATA_0,
+ mmDCORE0_MME_QM_CP_FENCE3_RDATA_1,
+ mmDCORE0_MME_QM_CP_FENCE3_RDATA_2,
+ mmDCORE0_MME_QM_CP_FENCE3_RDATA_3,
+ mmDCORE0_MME_QM_CP_FENCE3_RDATA_4,
+ mmDCORE0_MME_QM_CP_FENCE0_CNT_0,
+ mmDCORE0_MME_QM_CP_FENCE0_CNT_1,
+ mmDCORE0_MME_QM_CP_FENCE0_CNT_2,
+ mmDCORE0_MME_QM_CP_FENCE0_CNT_3,
+ mmDCORE0_MME_QM_CP_FENCE0_CNT_4,
+ mmDCORE0_MME_QM_CP_FENCE1_CNT_0,
+ mmDCORE0_MME_QM_CP_FENCE1_CNT_1,
+ mmDCORE0_MME_QM_CP_FENCE1_CNT_2,
+ mmDCORE0_MME_QM_CP_FENCE1_CNT_3,
+ mmDCORE0_MME_QM_CP_FENCE1_CNT_4,
+ mmDCORE0_MME_QM_CP_FENCE2_CNT_0,
+ mmDCORE0_MME_QM_CP_FENCE2_CNT_1,
+ mmDCORE0_MME_QM_CP_FENCE2_CNT_2,
+ mmDCORE0_MME_QM_CP_FENCE2_CNT_3,
+ mmDCORE0_MME_QM_CP_FENCE2_CNT_4,
+ mmDCORE0_MME_QM_CP_FENCE3_CNT_0,
+ mmDCORE0_MME_QM_CP_FENCE3_CNT_1,
+ mmDCORE0_MME_QM_CP_FENCE3_CNT_2,
+ mmDCORE0_MME_QM_CP_FENCE3_CNT_3,
+ mmDCORE0_MME_QM_CP_FENCE3_CNT_4,
+ mmDCORE0_MME_QM_CQ_PTR_LO_0,
+ mmDCORE0_MME_QM_CQ_PTR_HI_0,
+ mmDCORE0_MME_QM_CQ_TSIZE_0,
+ mmDCORE0_MME_QM_CQ_CTL_0,
+ mmDCORE0_MME_QM_CQ_PTR_LO_1,
+ mmDCORE0_MME_QM_CQ_PTR_HI_1,
+ mmDCORE0_MME_QM_CQ_TSIZE_1,
+ mmDCORE0_MME_QM_CQ_CTL_1,
+ mmDCORE0_MME_QM_CQ_PTR_LO_2,
+ mmDCORE0_MME_QM_CQ_PTR_HI_2,
+ mmDCORE0_MME_QM_CQ_TSIZE_2,
+ mmDCORE0_MME_QM_CQ_CTL_2,
+ mmDCORE0_MME_QM_CQ_PTR_LO_3,
+ mmDCORE0_MME_QM_CQ_PTR_HI_3,
+ mmDCORE0_MME_QM_CQ_TSIZE_3,
+ mmDCORE0_MME_QM_CQ_CTL_3,
+ mmDCORE0_MME_QM_CQ_PTR_LO_4,
+ mmDCORE0_MME_QM_CQ_PTR_HI_4,
+ mmDCORE0_MME_QM_CQ_TSIZE_4,
+ mmDCORE0_MME_QM_CQ_CTL_4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR0_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR1_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR2_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR3_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR4_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR5_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR6_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR7_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR8_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR9_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR10_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR11_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR12_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR13_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR14_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR15_BASE,
+ mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
+ mmDCORE0_MME_QM_ARC_CQ_PTR_LO,
+ mmDCORE0_MME_QM_ARC_CQ_PTR_LO_STS,
+ mmDCORE0_MME_QM_ARC_CQ_PTR_HI,
+ mmDCORE0_MME_QM_ARC_CQ_PTR_HI_STS,
+ mmDCORE0_MME_QM_ARB_CFG_0,
+ mmDCORE0_MME_QM_ARB_MST_QUIET_PER,
+ mmDCORE0_MME_QM_ARB_CHOICE_Q_PUSH,
+ mmDCORE0_MME_QM_ARB_WRR_WEIGHT_0,
+ mmDCORE0_MME_QM_ARB_WRR_WEIGHT_1,
+ mmDCORE0_MME_QM_ARB_WRR_WEIGHT_2,
+ mmDCORE0_MME_QM_ARB_WRR_WEIGHT_3,
+ mmDCORE0_MME_QM_ARB_BASE_LO,
+ mmDCORE0_MME_QM_ARB_BASE_HI,
+ mmDCORE0_MME_QM_ARB_MST_SLAVE_EN,
+ mmDCORE0_MME_QM_ARB_MST_SLAVE_EN_1,
+ mmDCORE0_MME_QM_ARB_MST_CRED_INC,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_0,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_1,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_2,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_3,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_4,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_5,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_6,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_7,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_8,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_9,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_10,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_11,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_12,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_13,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_14,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_15,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_16,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_17,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_18,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_19,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_20,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_21,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_22,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_23,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_24,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_25,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_26,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_27,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_28,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_29,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_30,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_31,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_32,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_33,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_34,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_35,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_36,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_37,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_38,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_39,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_40,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_41,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_42,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_43,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_44,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_45,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_46,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_47,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_48,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_49,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_50,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_51,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_52,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_53,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_54,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_55,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_56,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_57,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_58,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_59,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_60,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_61,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_62,
+ mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_63,
+ mmDCORE0_MME_QM_ARB_SLV_ID,
+ mmDCORE0_MME_QM_ARB_SLV_MASTER_INC_CRED_OFST,
+ mmDCORE0_MME_QM_ARC_CQ_CFG0,
+ mmDCORE0_MME_QM_CQ_IFIFO_CI_0,
+ mmDCORE0_MME_QM_CQ_IFIFO_CI_1,
+ mmDCORE0_MME_QM_CQ_IFIFO_CI_2,
+ mmDCORE0_MME_QM_CQ_IFIFO_CI_3,
+ mmDCORE0_MME_QM_CQ_IFIFO_CI_4,
+ mmDCORE0_MME_QM_ARC_CQ_IFIFO_CI,
+ mmDCORE0_MME_QM_CQ_CTL_CI_0,
+ mmDCORE0_MME_QM_CQ_CTL_CI_1,
+ mmDCORE0_MME_QM_CQ_CTL_CI_2,
+ mmDCORE0_MME_QM_CQ_CTL_CI_3,
+ mmDCORE0_MME_QM_CQ_CTL_CI_4,
+ mmDCORE0_MME_QM_ARC_CQ_CTL_CI,
+ mmDCORE0_MME_QM_ARC_CQ_TSIZE,
+ mmDCORE0_MME_QM_ARC_CQ_CTL,
+ mmDCORE0_MME_QM_CP_SWITCH_WD_SET,
+ mmDCORE0_MME_QM_CP_EXT_SWITCH,
+ mmDCORE0_MME_QM_CP_PRED_0,
+ mmDCORE0_MME_QM_CP_PRED_1,
+ mmDCORE0_MME_QM_CP_PRED_2,
+ mmDCORE0_MME_QM_CP_PRED_3,
+ mmDCORE0_MME_QM_CP_PRED_4,
+ mmDCORE0_MME_QM_CP_PRED_UPEN_0,
+ mmDCORE0_MME_QM_CP_PRED_UPEN_1,
+ mmDCORE0_MME_QM_CP_PRED_UPEN_2,
+ mmDCORE0_MME_QM_CP_PRED_UPEN_3,
+ mmDCORE0_MME_QM_CP_PRED_UPEN_4,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_0,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_1,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_2,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_3,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_4,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_0,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_1,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_2,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_3,
+ mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_4,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_0,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_1,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_2,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_3,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_4,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_0,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_1,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_2,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_3,
+ mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_4,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_0,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_1,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_2,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_3,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_4,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_0,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_1,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_2,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_3,
+ mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_4,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_0,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_1,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_2,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_3,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_4,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_0,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_1,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_2,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_3,
+ mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_4,
+ mmDCORE0_MME_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
+ mmDCORE0_MME_QM_ARC_CQ_CTL_MSG_BASE_LO,
+ mmDCORE0_MME_QM_CQ_IFIFO_MSG_BASE_LO,
+ mmDCORE0_MME_QM_CQ_CTL_MSG_BASE_LO
+};
+
+static const u32 gaudi2_pb_dcr0_mme_eng_unsecured_regs[] = {
+ mmDCORE0_MME_CTRL_LO_CMD,
+ mmDCORE0_MME_CTRL_LO_AGU,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BRAINS_LOW,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BRAINS_HIGH,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_HEADER_LOW,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_HEADER_HIGH,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_EUS_MASTER,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_EUS_SLAVE,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_KERNEL_SIZE_MINUS_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_LOW,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_HIGH,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_OUTER_LOOP,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_NUM_ITERATIONS_MINUS_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SB_REPEAT,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_FP8_BIAS,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_RATE_LIMITER,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_USER_DATA,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_PERF_EVT_IN,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_PERF_EVT_OUT,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_PCU,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SLAVE_SYNC_OBJ0_ADDR,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SLAVE_SYNC_OBJ1_ADDR,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_POWER_LOOP,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE0_MASTER,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE1_MASTER,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE2_MASTER,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE3_MASTER,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE0_SLAVE,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE1_SLAVE,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE2_SLAVE,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE3_SLAVE,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_WKL_ID,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT1_LOW,
+ mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT1_HIGH,
+ mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT0_LOW,
+ mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT0_HIGH,
+ mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_A_LOW,
+ mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_A_HIGH,
+ mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_B_LOW,
+ mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_B_HIGH,
+ mmDCORE0_MME_CTRL_LO_ARCH_STATUS,
+ mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0,
+ mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0,
+ mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0,
+ mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR1,
+ mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1,
+ mmDCORE0_MME_CTRL_LO_ARCH_A_SS,
+ mmDCORE0_MME_CTRL_LO_ARCH_B_SS,
+ mmDCORE0_MME_CTRL_LO_ARCH_COUT_SS,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_BASE,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_4,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_0,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_1,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_2,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_3,
+ mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_4,
+ mmDCORE0_MME_ACC_AP_LFSR_POLY,
+ mmDCORE0_MME_ACC_AP_LFSR_SEED_WDATA,
+ mmDCORE0_MME_ACC_AP_LFSR_SEED_SEL,
+ mmDCORE0_MME_ACC_AP_LFSR_SEED_RDATA,
+ mmDCORE0_MME_ACC_AP_LFSR_CLOSE_CGATE_DLY,
+ mmDCORE0_MME_ACC_WBC_SRC_BP,
+};
+
+static const u32 gaudi2_pb_dcr0_tpc0[] = {
+ mmDCORE0_TPC0_QM_BASE,
+ mmDCORE0_TPC0_CFG_BASE,
+ mmDCORE0_TPC0_MSTR_IF_RR_SHRD_HBW_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_tpc0_arc[] = {
+ mmDCORE0_TPC0_QM_ARC_AUX_BASE,
+};
+
+static const struct range gaudi2_pb_dcr0_tpc0_arc_unsecured_regs[] = {
+ {mmDCORE0_TPC0_QM_ARC_AUX_RUN_HALT_REQ, mmDCORE0_TPC0_QM_ARC_AUX_RUN_HALT_ACK},
+ {mmDCORE0_TPC0_QM_ARC_AUX_CLUSTER_NUM, mmDCORE0_TPC0_QM_ARC_AUX_WAKE_UP_EVENT},
+ {mmDCORE0_TPC0_QM_ARC_AUX_ARC_RST_REQ, mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_7},
+ {mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_0, mmDCORE0_TPC0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
+ {mmDCORE0_TPC0_QM_ARC_AUX_CBU_EARLY_BRESP_EN, mmDCORE0_TPC0_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
+ {mmDCORE0_TPC0_QM_ARC_AUX_LBU_EARLY_BRESP_EN, mmDCORE0_TPC0_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
+ {mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0,
+ mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
+ {mmDCORE0_TPC0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT,
+ mmDCORE0_TPC0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
+ {mmDCORE0_TPC0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT,
+ mmDCORE0_TPC0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
+};
+
+static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
+ mmDCORE0_TPC0_QM_CQ_CFG0_0,
+ mmDCORE0_TPC0_QM_CQ_CFG0_1,
+ mmDCORE0_TPC0_QM_CQ_CFG0_2,
+ mmDCORE0_TPC0_QM_CQ_CFG0_3,
+ mmDCORE0_TPC0_QM_CQ_CFG0_4,
+ mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_0,
+ mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_1,
+ mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_2,
+ mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_3,
+ mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_4,
+ mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_0,
+ mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_1,
+ mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_2,
+ mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_3,
+ mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_4,
+ mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_0,
+ mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_1,
+ mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_2,
+ mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_3,
+ mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_4,
+ mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_0,
+ mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_1,
+ mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_2,
+ mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_3,
+ mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_4,
+ mmDCORE0_TPC0_QM_CP_FENCE0_CNT_0,
+ mmDCORE0_TPC0_QM_CP_FENCE0_CNT_1,
+ mmDCORE0_TPC0_QM_CP_FENCE0_CNT_2,
+ mmDCORE0_TPC0_QM_CP_FENCE0_CNT_3,
+ mmDCORE0_TPC0_QM_CP_FENCE0_CNT_4,
+ mmDCORE0_TPC0_QM_CP_FENCE1_CNT_0,
+ mmDCORE0_TPC0_QM_CP_FENCE1_CNT_1,
+ mmDCORE0_TPC0_QM_CP_FENCE1_CNT_2,
+ mmDCORE0_TPC0_QM_CP_FENCE1_CNT_3,
+ mmDCORE0_TPC0_QM_CP_FENCE1_CNT_4,
+ mmDCORE0_TPC0_QM_CP_FENCE2_CNT_0,
+ mmDCORE0_TPC0_QM_CP_FENCE2_CNT_1,
+ mmDCORE0_TPC0_QM_CP_FENCE2_CNT_2,
+ mmDCORE0_TPC0_QM_CP_FENCE2_CNT_3,
+ mmDCORE0_TPC0_QM_CP_FENCE2_CNT_4,
+ mmDCORE0_TPC0_QM_CP_FENCE3_CNT_0,
+ mmDCORE0_TPC0_QM_CP_FENCE3_CNT_1,
+ mmDCORE0_TPC0_QM_CP_FENCE3_CNT_2,
+ mmDCORE0_TPC0_QM_CP_FENCE3_CNT_3,
+ mmDCORE0_TPC0_QM_CP_FENCE3_CNT_4,
+ mmDCORE0_TPC0_QM_CQ_PTR_LO_0,
+ mmDCORE0_TPC0_QM_CQ_PTR_HI_0,
+ mmDCORE0_TPC0_QM_CQ_TSIZE_0,
+ mmDCORE0_TPC0_QM_CQ_CTL_0,
+ mmDCORE0_TPC0_QM_CQ_PTR_LO_1,
+ mmDCORE0_TPC0_QM_CQ_PTR_HI_1,
+ mmDCORE0_TPC0_QM_CQ_TSIZE_1,
+ mmDCORE0_TPC0_QM_CQ_CTL_1,
+ mmDCORE0_TPC0_QM_CQ_PTR_LO_2,
+ mmDCORE0_TPC0_QM_CQ_PTR_HI_2,
+ mmDCORE0_TPC0_QM_CQ_TSIZE_2,
+ mmDCORE0_TPC0_QM_CQ_CTL_2,
+ mmDCORE0_TPC0_QM_CQ_PTR_LO_3,
+ mmDCORE0_TPC0_QM_CQ_PTR_HI_3,
+ mmDCORE0_TPC0_QM_CQ_TSIZE_3,
+ mmDCORE0_TPC0_QM_CQ_CTL_3,
+ mmDCORE0_TPC0_QM_CQ_PTR_LO_4,
+ mmDCORE0_TPC0_QM_CQ_PTR_HI_4,
+ mmDCORE0_TPC0_QM_CQ_TSIZE_4,
+ mmDCORE0_TPC0_QM_CQ_CTL_4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR0_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR1_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR2_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR3_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR4_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR5_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR6_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR7_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR8_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR9_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR10_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR11_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR12_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR13_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR14_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR15_BASE,
+ mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
+ mmDCORE0_TPC0_QM_ARC_CQ_PTR_LO,
+ mmDCORE0_TPC0_QM_ARC_CQ_PTR_LO_STS,
+ mmDCORE0_TPC0_QM_ARC_CQ_PTR_HI,
+ mmDCORE0_TPC0_QM_ARC_CQ_PTR_HI_STS,
+ mmDCORE0_TPC0_QM_ARB_CFG_0,
+ mmDCORE0_TPC0_QM_ARB_MST_QUIET_PER,
+ mmDCORE0_TPC0_QM_ARB_CHOICE_Q_PUSH,
+ mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_0,
+ mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_1,
+ mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_2,
+ mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_3,
+ mmDCORE0_TPC0_QM_ARB_BASE_LO,
+ mmDCORE0_TPC0_QM_ARB_BASE_HI,
+ mmDCORE0_TPC0_QM_ARB_MST_SLAVE_EN,
+ mmDCORE0_TPC0_QM_ARB_MST_SLAVE_EN_1,
+ mmDCORE0_TPC0_QM_ARB_MST_CRED_INC,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_0,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_1,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_2,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_3,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_4,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_5,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_6,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_7,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_8,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_9,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_10,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_11,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_12,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_13,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_14,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_15,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_16,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_17,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_18,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_19,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_20,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_21,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_22,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_23,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_24,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_25,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_26,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_27,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_28,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_29,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_30,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_31,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_32,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_33,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_34,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_35,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_36,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_37,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_38,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_39,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_40,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_41,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_42,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_43,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_44,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_45,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_46,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_47,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_48,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_49,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_50,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_51,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_52,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_53,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_54,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_55,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_56,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_57,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_58,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_59,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_60,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_61,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_62,
+ mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_63,
+ mmDCORE0_TPC0_QM_ARB_SLV_ID,
+ mmDCORE0_TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST,
+ mmDCORE0_TPC0_QM_ARC_CQ_CFG0,
+ mmDCORE0_TPC0_QM_CQ_IFIFO_CI_0,
+ mmDCORE0_TPC0_QM_CQ_IFIFO_CI_1,
+ mmDCORE0_TPC0_QM_CQ_IFIFO_CI_2,
+ mmDCORE0_TPC0_QM_CQ_IFIFO_CI_3,
+ mmDCORE0_TPC0_QM_CQ_IFIFO_CI_4,
+ mmDCORE0_TPC0_QM_ARC_CQ_IFIFO_CI,
+ mmDCORE0_TPC0_QM_CQ_CTL_CI_0,
+ mmDCORE0_TPC0_QM_CQ_CTL_CI_1,
+ mmDCORE0_TPC0_QM_CQ_CTL_CI_2,
+ mmDCORE0_TPC0_QM_CQ_CTL_CI_3,
+ mmDCORE0_TPC0_QM_CQ_CTL_CI_4,
+ mmDCORE0_TPC0_QM_ARC_CQ_CTL_CI,
+ mmDCORE0_TPC0_QM_ARC_CQ_TSIZE,
+ mmDCORE0_TPC0_QM_ARC_CQ_CTL,
+ mmDCORE0_TPC0_QM_CP_SWITCH_WD_SET,
+ mmDCORE0_TPC0_QM_CP_EXT_SWITCH,
+ mmDCORE0_TPC0_QM_CP_PRED_0,
+ mmDCORE0_TPC0_QM_CP_PRED_1,
+ mmDCORE0_TPC0_QM_CP_PRED_2,
+ mmDCORE0_TPC0_QM_CP_PRED_3,
+ mmDCORE0_TPC0_QM_CP_PRED_4,
+ mmDCORE0_TPC0_QM_CP_PRED_UPEN_0,
+ mmDCORE0_TPC0_QM_CP_PRED_UPEN_1,
+ mmDCORE0_TPC0_QM_CP_PRED_UPEN_2,
+ mmDCORE0_TPC0_QM_CP_PRED_UPEN_3,
+ mmDCORE0_TPC0_QM_CP_PRED_UPEN_4,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_0,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_1,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_2,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_3,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_4,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_0,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_1,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_2,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_3,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_4,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_0,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_1,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_2,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_3,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_4,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_0,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_1,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_2,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_3,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_4,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_0,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_1,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_2,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_3,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_4,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_0,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_1,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_2,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_3,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_4,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_0,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_1,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_2,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_3,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_4,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_0,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_1,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_2,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_3,
+ mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_4,
+ mmDCORE0_TPC0_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
+ mmDCORE0_TPC0_QM_ARC_CQ_CTL_MSG_BASE_LO,
+ mmDCORE0_TPC0_QM_CQ_IFIFO_MSG_BASE_LO,
+ mmDCORE0_TPC0_QM_CQ_CTL_MSG_BASE_LO,
+ mmDCORE0_TPC0_CFG_QM_SYNC_OBJECT_MESSAGE,
+ mmDCORE0_TPC0_CFG_QM_SYNC_OBJECT_ADDR,
+ mmDCORE0_TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW,
+ mmDCORE0_TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_0,
+ mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_0,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_1,
+ mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_1,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_2,
+ mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_2,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_3,
+ mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_3,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_4,
+ mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_4,
+ mmDCORE0_TPC0_CFG_QM_KERNEL_CONFIG,
+ mmDCORE0_TPC0_CFG_QM_KERNEL_ID,
+ mmDCORE0_TPC0_CFG_QM_POWER_LOOP,
+ mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_LO,
+ mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_HI,
+ mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_LO,
+ mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_HI,
+ mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_LO,
+ mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_HI,
+ mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_LO,
+ mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_HI,
+ mmDCORE0_TPC0_CFG_ROUND_CSR,
+ mmDCORE0_TPC0_CFG_CONV_ROUND_CSR,
+ mmDCORE0_TPC0_CFG_SEMAPHORE,
+ mmDCORE0_TPC0_CFG_LFSR_POLYNOM,
+ mmDCORE0_TPC0_CFG_STATUS,
+ mmDCORE0_TPC0_CFG_TPC_CMD,
+ mmDCORE0_TPC0_CFG_TPC_EXECUTE,
+ mmDCORE0_TPC0_CFG_TPC_DCACHE_L0CD,
+ mmDCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_LOW,
+ mmDCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH,
+ mmDCORE0_TPC0_CFG_RD_RATE_LIMIT,
+ mmDCORE0_TPC0_CFG_WR_RATE_LIMIT,
+ mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO,
+ mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI,
+ mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO,
+ mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI,
+ mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO,
+ mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI,
+ mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO,
+ mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_0,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_1,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_2,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_3,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_4,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_5,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_6,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_7,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_8,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_9,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_10,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_11,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_12,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_13,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_14,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_15,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_16,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_17,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_18,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_19,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_20,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_21,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_22,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_23,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_24,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_25,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_26,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_27,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_28,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_29,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_30,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_31,
+ mmDCORE0_TPC0_CFG_TPC_SB_L0CD,
+ mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_1,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_2,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_3,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_4,
+ mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_0,
+ mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_1,
+ mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_2,
+ mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_3
+};
+
+static const u32 gaudi2_pb_dcr0_tpc0_ktensor_unsecured_regs[] = {
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_PREF_STRIDE,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_STRIDE_HIGH,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_STRIDE_HIGH,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_STRIDE_HIGH,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_STRIDE_HIGH,
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_STRIDE_HIGH,
+};
+
+static const u32 gaudi2_pb_dcr0_tpc0_qtensor_unsecured_regs[] = {
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_PADDING_VALUE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_PREF_STRIDE,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_STRIDE_HIGH,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_STRIDE_HIGH,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_STRIDE_HIGH,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_STRIDE_HIGH,
+ mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_STRIDE_HIGH,
+};
+
+static const u32 gaudi2_pb_dcr0_sram0[] = {
+ mmDCORE0_SRAM0_BANK_BASE,
+ mmDCORE0_SRAM0_DBG_CNT_N_HBW_DBG_CNT_BASE,
+ mmDCORE0_SRAM0_RTR_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_sm_mstr_if[] = {
+ mmDCORE0_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_BASE,
+};
+
+static const u32 gaudi2_pb_dcr0_sm_glbl[] = {
+ mmDCORE0_SYNC_MNGR_GLBL_BASE,
+};
+
+static const struct range gaudi2_pb_dcr0_sm_glbl_unsecured_regs[] = {
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_1, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_1, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_1, mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_63},
+};
+
+static const struct range gaudi2_pb_dcr_x_sm_glbl_unsecured_regs[] = {
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_63},
+ {mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_63},
+};
+
+static const u32 gaudi2_pb_arc_sched[] = {
+ mmARC_FARM_ARC0_AUX_BASE,
+ mmARC_FARM_ARC0_DUP_ENG_BASE,
+ mmARC_FARM_ARC0_ACP_ENG_BASE,
+};
+
+static const struct range gaudi2_pb_arc_sched_unsecured_regs[] = {
+ {mmARC_FARM_ARC0_AUX_RUN_HALT_REQ, mmARC_FARM_ARC0_AUX_RUN_HALT_ACK},
+ {mmARC_FARM_ARC0_AUX_CLUSTER_NUM, mmARC_FARM_ARC0_AUX_WAKE_UP_EVENT},
+ {mmARC_FARM_ARC0_AUX_ARC_RST_REQ, mmARC_FARM_ARC0_AUX_CID_OFFSET_7},
+ {mmARC_FARM_ARC0_AUX_SCRATCHPAD_0, mmARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT},
+ {mmARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN, mmARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN},
+ {mmARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN, mmARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN},
+ {mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_0, mmARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG},
+ {mmARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT, mmARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI},
+ {mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT, mmARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN},
+ {mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_0, mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_63},
+ {mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_STRONG_ORDER, mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_OVRD},
+ {mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_0, mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_REG},
+};
+
+static const u32 gaudi2_pb_xbar_mid[] = {
+ mmXBAR_MID_0_BASE,
+};
+
+static const u32 gaudi2_pb_xbar_mid_unsecured_regs[] = {
+ mmXBAR_MID_0_UPSCALE,
+ mmXBAR_MID_0_DOWN_CONV,
+ mmXBAR_MID_0_DOWN_CONV_LFSR_EN,
+ mmXBAR_MID_0_DOWN_CONV_LFSR_SET_VLD,
+ mmXBAR_MID_0_DOWN_CONV_LFSR_SET_VALUE,
+ mmXBAR_MID_0_DOWN_CONV_LFSR_CFG_POLY,
+};
+
+static const u32 gaudi2_pb_xbar_edge[] = {
+ mmXBAR_EDGE_0_BASE,
+};
+
+static const u32 gaudi2_pb_xbar_edge_unsecured_regs[] = {
+ mmXBAR_EDGE_0_UPSCALE,
+ mmXBAR_EDGE_0_DOWN_CONV,
+ mmXBAR_EDGE_0_DOWN_CONV_LFSR_EN,
+ mmXBAR_EDGE_0_DOWN_CONV_LFSR_SET_VLD,
+ mmXBAR_EDGE_0_DOWN_CONV_LFSR_SET_VALUE,
+ mmXBAR_EDGE_0_DOWN_CONV_LFSR_CFG_POLY,
+};
+
+static const u32 gaudi2_pb_nic0[] = {
+ mmNIC0_TMR_BASE,
+ mmNIC0_RXB_CORE_BASE,
+ mmNIC0_RXE0_BASE,
+ mmNIC0_RXE1_BASE,
+ mmNIC0_RXE0_AXUSER_AXUSER_CQ0_BASE,
+ mmNIC0_RXE1_AXUSER_AXUSER_CQ0_BASE,
+ mmNIC0_TXS0_BASE,
+ mmNIC0_TXS1_BASE,
+ mmNIC0_TXE0_BASE,
+ mmNIC0_TXE1_BASE,
+ mmNIC0_TXB_BASE,
+ mmNIC0_MSTR_IF_RR_SHRD_HBW_BASE,
+};
+
+static const u32 gaudi2_pb_nic0_qm_qpc[] = {
+ mmNIC0_QM0_BASE,
+ mmNIC0_QPC0_BASE,
+};
+
+static const u32 gaudi2_pb_nic0_qm_arc_aux0[] = {
+ mmNIC0_QM_ARC_AUX0_BASE,
+};
+
+static const struct range gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs[] = {
+ {mmNIC0_QM_ARC_AUX0_RUN_HALT_REQ, mmNIC0_QM_ARC_AUX0_RUN_HALT_ACK},
+ {mmNIC0_QM_ARC_AUX0_CLUSTER_NUM, mmNIC0_QM_ARC_AUX0_WAKE_UP_EVENT},
+ {mmNIC0_QM_ARC_AUX0_ARC_RST_REQ, mmNIC0_QM_ARC_AUX0_CID_OFFSET_7},
+ {mmNIC0_QM_ARC_AUX0_SCRATCHPAD_0, mmNIC0_QM_ARC_AUX0_INFLIGHT_LBU_RD_CNT},
+ {mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN},
+ {mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_0, mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_ALERT_MSG},
+ {mmNIC0_QM_ARC_AUX0_DCCM_Q_PUSH_FIFO_CNT, mmNIC0_QM_ARC_AUX0_QMAN_ARC_CQ_SHADOW_CI},
+ {mmNIC0_QM_ARC_AUX0_ARC_AXI_ORDERING_WR_IF_CNT, mmNIC0_QM_ARC_AUX0_MME_ARC_UPPER_DCCM_EN},
+};
+
+static const u32 gaudi2_pb_nic0_umr[] = {
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE,
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 1, /* UMR0_1 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 2, /* UMR0_2 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 3, /* UMR0_3 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 4, /* UMR0_4 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 5, /* UMR0_5 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 6, /* UMR0_6 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 7, /* UMR0_7 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 8, /* UMR0_8 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 9, /* UMR0_9 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 10, /* UMR0_10 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 11, /* UMR0_11 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 12, /* UMR0_12 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 13, /* UMR0_13 */
+ mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 14, /* UMR0_14 */
+};
+
+static const struct range gaudi2_pb_nic0_umr_unsecured_regs[] = {
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32,
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 1, /* UMR0_1 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 1},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 2, /* UMR0_2 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 2},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 3, /* UMR0_3 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 3},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 4, /* UMR0_4 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 4},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 5, /* UMR0_5 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 5},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 6, /* UMR0_6 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 6},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 7, /* UMR0_7 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 7},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 8, /* UMR0_8 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 8},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 9, /* UMR0_9 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 9},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 10, /* UMR0_10 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 10},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 11, /* UMR0_11 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 11},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 12, /* UMR0_12 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 12},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 13, /* UMR0_13 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 13},
+ {mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 14, /* UMR0_14 */
+ mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 14},
+};
+
+/*
+ * mmNIC0_QPC0_LINEAR_WQE_QPN and mmNIC0_QPC0_MULTI_STRIDE_WQE_QPN are 32-bit
+ * registers and since the user writes in bulks of 64 bits we need to un-secure
+ * also the following 32 bits (that's why we added also the next 4 bytes to the
+ * table). In the RTL, as part of ECO (2874), writing to the next 4 bytes
+ * triggers a write to the SPECIAL_GLBL_SPARE register, hence it's must be
+ * unsecured as well.
+ */
+#define mmNIC0_QPC0_LINEAR_WQE_RSV (mmNIC0_QPC0_LINEAR_WQE_QPN + 4)
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_RSV (mmNIC0_QPC0_MULTI_STRIDE_WQE_QPN + 4)
+#define mmNIC0_QPC0_SPECIAL_GLBL_SPARE 0x541FF60
+
+static const u32 gaudi2_pb_nic0_qm_qpc_unsecured_regs[] = {
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_0,
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_1,
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_2,
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_3,
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_4,
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_5,
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_6,
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_7,
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_8,
+ mmNIC0_QPC0_LINEAR_WQE_STATIC_9,
+ mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_0,
+ mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_1,
+ mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_2,
+ mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_3,
+ mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_4,
+ mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_5,
+ mmNIC0_QPC0_LINEAR_WQE_QPN,
+ mmNIC0_QPC0_LINEAR_WQE_RSV,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_0,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_1,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_2,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_3,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_4,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_5,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_6,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_7,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_8,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_9,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_10,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_11,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_12,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_13,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_14,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_15,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_16,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_17,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_0,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_1,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_2,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_3,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_4,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_5,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_QPN,
+ mmNIC0_QPC0_MULTI_STRIDE_WQE_RSV,
+ mmNIC0_QPC0_QMAN_DOORBELL,
+ mmNIC0_QPC0_QMAN_DOORBELL_QPN,
+ mmNIC0_QPC0_SPECIAL_GLBL_SPARE,
+ mmNIC0_QM0_CQ_CFG0_0,
+ mmNIC0_QM0_CQ_CFG0_1,
+ mmNIC0_QM0_CQ_CFG0_2,
+ mmNIC0_QM0_CQ_CFG0_3,
+ mmNIC0_QM0_CQ_CFG0_4,
+ mmNIC0_QM0_CP_FENCE0_RDATA_0,
+ mmNIC0_QM0_CP_FENCE0_RDATA_1,
+ mmNIC0_QM0_CP_FENCE0_RDATA_2,
+ mmNIC0_QM0_CP_FENCE0_RDATA_3,
+ mmNIC0_QM0_CP_FENCE0_RDATA_4,
+ mmNIC0_QM0_CP_FENCE1_RDATA_0,
+ mmNIC0_QM0_CP_FENCE1_RDATA_1,
+ mmNIC0_QM0_CP_FENCE1_RDATA_2,
+ mmNIC0_QM0_CP_FENCE1_RDATA_3,
+ mmNIC0_QM0_CP_FENCE1_RDATA_4,
+ mmNIC0_QM0_CP_FENCE2_RDATA_0,
+ mmNIC0_QM0_CP_FENCE2_RDATA_1,
+ mmNIC0_QM0_CP_FENCE2_RDATA_2,
+ mmNIC0_QM0_CP_FENCE2_RDATA_3,
+ mmNIC0_QM0_CP_FENCE2_RDATA_4,
+ mmNIC0_QM0_CP_FENCE3_RDATA_0,
+ mmNIC0_QM0_CP_FENCE3_RDATA_1,
+ mmNIC0_QM0_CP_FENCE3_RDATA_2,
+ mmNIC0_QM0_CP_FENCE3_RDATA_3,
+ mmNIC0_QM0_CP_FENCE3_RDATA_4,
+ mmNIC0_QM0_CP_FENCE0_CNT_0,
+ mmNIC0_QM0_CP_FENCE0_CNT_1,
+ mmNIC0_QM0_CP_FENCE0_CNT_2,
+ mmNIC0_QM0_CP_FENCE0_CNT_3,
+ mmNIC0_QM0_CP_FENCE0_CNT_4,
+ mmNIC0_QM0_CP_FENCE1_CNT_0,
+ mmNIC0_QM0_CP_FENCE1_CNT_1,
+ mmNIC0_QM0_CP_FENCE1_CNT_2,
+ mmNIC0_QM0_CP_FENCE1_CNT_3,
+ mmNIC0_QM0_CP_FENCE1_CNT_4,
+ mmNIC0_QM0_CP_FENCE2_CNT_0,
+ mmNIC0_QM0_CP_FENCE2_CNT_1,
+ mmNIC0_QM0_CP_FENCE2_CNT_2,
+ mmNIC0_QM0_CP_FENCE2_CNT_3,
+ mmNIC0_QM0_CP_FENCE2_CNT_4,
+ mmNIC0_QM0_CP_FENCE3_CNT_0,
+ mmNIC0_QM0_CP_FENCE3_CNT_1,
+ mmNIC0_QM0_CP_FENCE3_CNT_2,
+ mmNIC0_QM0_CP_FENCE3_CNT_3,
+ mmNIC0_QM0_CP_FENCE3_CNT_4,
+ mmNIC0_QM0_CQ_PTR_LO_0,
+ mmNIC0_QM0_CQ_PTR_HI_0,
+ mmNIC0_QM0_CQ_TSIZE_0,
+ mmNIC0_QM0_CQ_CTL_0,
+ mmNIC0_QM0_CQ_PTR_LO_1,
+ mmNIC0_QM0_CQ_PTR_HI_1,
+ mmNIC0_QM0_CQ_TSIZE_1,
+ mmNIC0_QM0_CQ_CTL_1,
+ mmNIC0_QM0_CQ_PTR_LO_2,
+ mmNIC0_QM0_CQ_PTR_HI_2,
+ mmNIC0_QM0_CQ_TSIZE_2,
+ mmNIC0_QM0_CQ_CTL_2,
+ mmNIC0_QM0_CQ_PTR_LO_3,
+ mmNIC0_QM0_CQ_PTR_HI_3,
+ mmNIC0_QM0_CQ_TSIZE_3,
+ mmNIC0_QM0_CQ_CTL_3,
+ mmNIC0_QM0_CQ_PTR_LO_4,
+ mmNIC0_QM0_CQ_PTR_HI_4,
+ mmNIC0_QM0_CQ_TSIZE_4,
+ mmNIC0_QM0_CQ_CTL_4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR0_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR0_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR1_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR1_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR2_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR2_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR3_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR3_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR4_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR4_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR5_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR5_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR6_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR6_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR7_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR7_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR8_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR8_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR9_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR9_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR10_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR10_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR11_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR11_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR12_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR12_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR13_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR13_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR14_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR14_BASE + 4,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR15_BASE,
+ mmNIC0_QM0_QMAN_WR64_BASE_ADDR15_BASE + 4,
+ mmNIC0_QM0_ARC_CQ_PTR_LO,
+ mmNIC0_QM0_ARC_CQ_PTR_LO_STS,
+ mmNIC0_QM0_ARC_CQ_PTR_HI,
+ mmNIC0_QM0_ARC_CQ_PTR_HI_STS,
+ mmNIC0_QM0_ARB_CFG_0,
+ mmNIC0_QM0_ARB_MST_QUIET_PER,
+ mmNIC0_QM0_ARB_CHOICE_Q_PUSH,
+ mmNIC0_QM0_ARB_WRR_WEIGHT_0,
+ mmNIC0_QM0_ARB_WRR_WEIGHT_1,
+ mmNIC0_QM0_ARB_WRR_WEIGHT_2,
+ mmNIC0_QM0_ARB_WRR_WEIGHT_3,
+ mmNIC0_QM0_ARB_BASE_LO,
+ mmNIC0_QM0_ARB_BASE_HI,
+ mmNIC0_QM0_ARB_MST_SLAVE_EN,
+ mmNIC0_QM0_ARB_MST_SLAVE_EN_1,
+ mmNIC0_QM0_ARB_MST_CRED_INC,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_0,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_1,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_2,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_3,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_4,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_5,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_6,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_7,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_8,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_9,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_10,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_11,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_12,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_13,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_14,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_15,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_16,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_17,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_18,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_19,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_20,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_21,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_22,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_23,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_24,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_25,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_26,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_27,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_28,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_29,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_30,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_31,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_32,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_33,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_34,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_35,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_36,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_37,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_38,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_39,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_40,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_41,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_42,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_43,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_44,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_45,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_46,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_47,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_48,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_49,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_50,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_51,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_52,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_53,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_54,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_55,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_56,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_57,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_58,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_59,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_60,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_61,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_62,
+ mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_63,
+ mmNIC0_QM0_ARB_SLV_ID,
+ mmNIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST,
+ mmNIC0_QM0_ARC_CQ_CFG0,
+ mmNIC0_QM0_CQ_IFIFO_CI_0,
+ mmNIC0_QM0_CQ_IFIFO_CI_1,
+ mmNIC0_QM0_CQ_IFIFO_CI_2,
+ mmNIC0_QM0_CQ_IFIFO_CI_3,
+ mmNIC0_QM0_CQ_IFIFO_CI_4,
+ mmNIC0_QM0_ARC_CQ_IFIFO_CI,
+ mmNIC0_QM0_CQ_CTL_CI_0,
+ mmNIC0_QM0_CQ_CTL_CI_1,
+ mmNIC0_QM0_CQ_CTL_CI_2,
+ mmNIC0_QM0_CQ_CTL_CI_3,
+ mmNIC0_QM0_CQ_CTL_CI_4,
+ mmNIC0_QM0_ARC_CQ_CTL_CI,
+ mmNIC0_QM0_ARC_CQ_TSIZE,
+ mmNIC0_QM0_ARC_CQ_CTL,
+ mmNIC0_QM0_CP_SWITCH_WD_SET,
+ mmNIC0_QM0_CP_EXT_SWITCH,
+ mmNIC0_QM0_CP_PRED_0,
+ mmNIC0_QM0_CP_PRED_1,
+ mmNIC0_QM0_CP_PRED_2,
+ mmNIC0_QM0_CP_PRED_3,
+ mmNIC0_QM0_CP_PRED_4,
+ mmNIC0_QM0_CP_PRED_UPEN_0,
+ mmNIC0_QM0_CP_PRED_UPEN_1,
+ mmNIC0_QM0_CP_PRED_UPEN_2,
+ mmNIC0_QM0_CP_PRED_UPEN_3,
+ mmNIC0_QM0_CP_PRED_UPEN_4,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3,
+ mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3,
+ mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3,
+ mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3,
+ mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4,
+ mmNIC0_QM0_ARC_CQ_IFIFO_MSG_BASE_LO,
+ mmNIC0_QM0_ARC_CQ_CTL_MSG_BASE_LO,
+ mmNIC0_QM0_CQ_IFIFO_MSG_BASE_LO,
+ mmNIC0_QM0_CQ_CTL_MSG_BASE_LO
+};
+
+static const u32 gaudi2_pb_rot0[] = {
+ mmROT0_BASE,
+ mmROT0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmROT0_QM_BASE,
+};
+
+static const u32 gaudi2_pb_rot0_arc[] = {
+ mmROT0_QM_ARC_AUX_BASE
+};
+
+static const struct range gaudi2_pb_rot0_arc_unsecured_regs[] = {
+ {mmROT0_QM_ARC_AUX_RUN_HALT_REQ, mmROT0_QM_ARC_AUX_RUN_HALT_ACK},
+ {mmROT0_QM_ARC_AUX_CLUSTER_NUM, mmROT0_QM_ARC_AUX_WAKE_UP_EVENT},
+ {mmROT0_QM_ARC_AUX_ARC_RST_REQ, mmROT0_QM_ARC_AUX_CID_OFFSET_7},
+ {mmROT0_QM_ARC_AUX_SCRATCHPAD_0, mmROT0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
+ {mmROT0_QM_ARC_AUX_CBU_EARLY_BRESP_EN, mmROT0_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
+ {mmROT0_QM_ARC_AUX_LBU_EARLY_BRESP_EN, mmROT0_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
+ {mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0, mmROT0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
+ {mmROT0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT, mmROT0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
+ {mmROT0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT, mmROT0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
+};
+
+static const u32 gaudi2_pb_rot0_unsecured_regs[] = {
+ mmROT0_QM_CQ_CFG0_0,
+ mmROT0_QM_CQ_CFG0_1,
+ mmROT0_QM_CQ_CFG0_2,
+ mmROT0_QM_CQ_CFG0_3,
+ mmROT0_QM_CQ_CFG0_4,
+ mmROT0_QM_CP_FENCE0_RDATA_0,
+ mmROT0_QM_CP_FENCE0_RDATA_1,
+ mmROT0_QM_CP_FENCE0_RDATA_2,
+ mmROT0_QM_CP_FENCE0_RDATA_3,
+ mmROT0_QM_CP_FENCE0_RDATA_4,
+ mmROT0_QM_CP_FENCE1_RDATA_0,
+ mmROT0_QM_CP_FENCE1_RDATA_1,
+ mmROT0_QM_CP_FENCE1_RDATA_2,
+ mmROT0_QM_CP_FENCE1_RDATA_3,
+ mmROT0_QM_CP_FENCE1_RDATA_4,
+ mmROT0_QM_CP_FENCE2_RDATA_0,
+ mmROT0_QM_CP_FENCE2_RDATA_1,
+ mmROT0_QM_CP_FENCE2_RDATA_2,
+ mmROT0_QM_CP_FENCE2_RDATA_3,
+ mmROT0_QM_CP_FENCE2_RDATA_4,
+ mmROT0_QM_CP_FENCE3_RDATA_0,
+ mmROT0_QM_CP_FENCE3_RDATA_1,
+ mmROT0_QM_CP_FENCE3_RDATA_2,
+ mmROT0_QM_CP_FENCE3_RDATA_3,
+ mmROT0_QM_CP_FENCE3_RDATA_4,
+ mmROT0_QM_CP_FENCE0_CNT_0,
+ mmROT0_QM_CP_FENCE0_CNT_1,
+ mmROT0_QM_CP_FENCE0_CNT_2,
+ mmROT0_QM_CP_FENCE0_CNT_3,
+ mmROT0_QM_CP_FENCE0_CNT_4,
+ mmROT0_QM_CP_FENCE1_CNT_0,
+ mmROT0_QM_CP_FENCE1_CNT_1,
+ mmROT0_QM_CP_FENCE1_CNT_2,
+ mmROT0_QM_CP_FENCE1_CNT_3,
+ mmROT0_QM_CP_FENCE1_CNT_4,
+ mmROT0_QM_CP_FENCE2_CNT_0,
+ mmROT0_QM_CP_FENCE2_CNT_1,
+ mmROT0_QM_CP_FENCE2_CNT_2,
+ mmROT0_QM_CP_FENCE2_CNT_3,
+ mmROT0_QM_CP_FENCE2_CNT_4,
+ mmROT0_QM_CP_FENCE3_CNT_0,
+ mmROT0_QM_CP_FENCE3_CNT_1,
+ mmROT0_QM_CP_FENCE3_CNT_2,
+ mmROT0_QM_CP_FENCE3_CNT_3,
+ mmROT0_QM_CP_FENCE3_CNT_4,
+ mmROT0_QM_CQ_PTR_LO_0,
+ mmROT0_QM_CQ_PTR_HI_0,
+ mmROT0_QM_CQ_TSIZE_0,
+ mmROT0_QM_CQ_CTL_0,
+ mmROT0_QM_CQ_PTR_LO_1,
+ mmROT0_QM_CQ_PTR_HI_1,
+ mmROT0_QM_CQ_TSIZE_1,
+ mmROT0_QM_CQ_CTL_1,
+ mmROT0_QM_CQ_PTR_LO_2,
+ mmROT0_QM_CQ_PTR_HI_2,
+ mmROT0_QM_CQ_TSIZE_2,
+ mmROT0_QM_CQ_CTL_2,
+ mmROT0_QM_CQ_PTR_LO_3,
+ mmROT0_QM_CQ_PTR_HI_3,
+ mmROT0_QM_CQ_TSIZE_3,
+ mmROT0_QM_CQ_CTL_3,
+ mmROT0_QM_CQ_PTR_LO_4,
+ mmROT0_QM_CQ_PTR_HI_4,
+ mmROT0_QM_CQ_TSIZE_4,
+ mmROT0_QM_CQ_CTL_4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR0_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR1_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR2_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR3_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR4_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR5_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR6_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR7_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR8_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR9_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR10_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR11_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR12_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR13_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR14_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR15_BASE,
+ mmROT0_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
+ mmROT0_QM_ARC_CQ_PTR_LO,
+ mmROT0_QM_ARC_CQ_PTR_LO_STS,
+ mmROT0_QM_ARC_CQ_PTR_HI,
+ mmROT0_QM_ARC_CQ_PTR_HI_STS,
+ mmROT0_QM_ARB_CFG_0,
+ mmROT0_QM_ARB_MST_QUIET_PER,
+ mmROT0_QM_ARB_CHOICE_Q_PUSH,
+ mmROT0_QM_ARB_WRR_WEIGHT_0,
+ mmROT0_QM_ARB_WRR_WEIGHT_1,
+ mmROT0_QM_ARB_WRR_WEIGHT_2,
+ mmROT0_QM_ARB_WRR_WEIGHT_3,
+ mmROT0_QM_ARB_BASE_LO,
+ mmROT0_QM_ARB_BASE_HI,
+ mmROT0_QM_ARB_MST_SLAVE_EN,
+ mmROT0_QM_ARB_MST_SLAVE_EN_1,
+ mmROT0_QM_ARB_MST_CRED_INC,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_0,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_1,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_2,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_3,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_4,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_5,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_6,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_7,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_8,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_9,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_10,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_11,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_12,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_13,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_14,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_15,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_16,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_17,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_18,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_19,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_20,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_21,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_22,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_23,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_24,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_25,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_26,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_27,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_28,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_29,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_30,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_31,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_32,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_33,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_34,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_35,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_36,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_37,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_38,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_39,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_40,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_41,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_42,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_43,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_44,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_45,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_46,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_47,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_48,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_49,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_50,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_51,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_52,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_53,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_54,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_55,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_56,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_57,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_58,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_59,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_60,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_61,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_62,
+ mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_63,
+ mmROT0_QM_ARB_SLV_ID,
+ mmROT0_QM_ARB_SLV_MASTER_INC_CRED_OFST,
+ mmROT0_QM_ARC_CQ_CFG0,
+ mmROT0_QM_CQ_IFIFO_CI_0,
+ mmROT0_QM_CQ_IFIFO_CI_1,
+ mmROT0_QM_CQ_IFIFO_CI_2,
+ mmROT0_QM_CQ_IFIFO_CI_3,
+ mmROT0_QM_CQ_IFIFO_CI_4,
+ mmROT0_QM_ARC_CQ_IFIFO_CI,
+ mmROT0_QM_CQ_CTL_CI_0,
+ mmROT0_QM_CQ_CTL_CI_1,
+ mmROT0_QM_CQ_CTL_CI_2,
+ mmROT0_QM_CQ_CTL_CI_3,
+ mmROT0_QM_CQ_CTL_CI_4,
+ mmROT0_QM_ARC_CQ_CTL_CI,
+ mmROT0_QM_ARC_CQ_TSIZE,
+ mmROT0_QM_ARC_CQ_CTL,
+ mmROT0_QM_CP_SWITCH_WD_SET,
+ mmROT0_QM_CP_EXT_SWITCH,
+ mmROT0_QM_CP_PRED_0,
+ mmROT0_QM_CP_PRED_1,
+ mmROT0_QM_CP_PRED_2,
+ mmROT0_QM_CP_PRED_3,
+ mmROT0_QM_CP_PRED_4,
+ mmROT0_QM_CP_PRED_UPEN_0,
+ mmROT0_QM_CP_PRED_UPEN_1,
+ mmROT0_QM_CP_PRED_UPEN_2,
+ mmROT0_QM_CP_PRED_UPEN_3,
+ mmROT0_QM_CP_PRED_UPEN_4,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_LO_0,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_LO_1,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_LO_2,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_LO_3,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_LO_4,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_HI_0,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_HI_1,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_HI_2,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_HI_3,
+ mmROT0_QM_CP_MSG_BASE0_ADDR_HI_4,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_LO_0,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_LO_1,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_LO_2,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_LO_3,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_LO_4,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_HI_0,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_HI_1,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_HI_2,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_HI_3,
+ mmROT0_QM_CP_MSG_BASE1_ADDR_HI_4,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_LO_0,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_LO_1,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_LO_2,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_LO_3,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_LO_4,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_HI_0,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_HI_1,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_HI_2,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_HI_3,
+ mmROT0_QM_CP_MSG_BASE2_ADDR_HI_4,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_LO_0,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_LO_1,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_LO_2,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_LO_3,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_LO_4,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_HI_0,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_HI_1,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_HI_2,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_HI_3,
+ mmROT0_QM_CP_MSG_BASE3_ADDR_HI_4,
+ mmROT0_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
+ mmROT0_QM_ARC_CQ_CTL_MSG_BASE_LO,
+ mmROT0_QM_CQ_IFIFO_MSG_BASE_LO,
+ mmROT0_QM_CQ_CTL_MSG_BASE_LO,
+ mmROT0_DESC_CONTEXT_ID,
+ mmROT0_DESC_IN_IMG_START_ADDR_L,
+ mmROT0_DESC_IN_IMG_START_ADDR_H,
+ mmROT0_DESC_OUT_IMG_START_ADDR_L,
+ mmROT0_DESC_OUT_IMG_START_ADDR_H,
+ mmROT0_DESC_CFG,
+ mmROT0_DESC_IM_READ_SLOPE,
+ mmROT0_DESC_SIN_D,
+ mmROT0_DESC_COS_D,
+ mmROT0_DESC_IN_IMG,
+ mmROT0_DESC_IN_STRIDE,
+ mmROT0_DESC_IN_STRIPE,
+ mmROT0_DESC_IN_CENTER,
+ mmROT0_DESC_OUT_IMG,
+ mmROT0_DESC_OUT_STRIDE,
+ mmROT0_DESC_OUT_STRIPE,
+ mmROT0_DESC_OUT_CENTER,
+ mmROT0_DESC_BACKGROUND,
+ mmROT0_DESC_CPL_MSG_EN,
+ mmROT0_DESC_IDLE_STATE,
+ mmROT0_DESC_CPL_MSG_ADDR,
+ mmROT0_DESC_CPL_MSG_DATA,
+ mmROT0_DESC_X_I_START_OFFSET,
+ mmROT0_DESC_X_I_START_OFFSET_FLIP,
+ mmROT0_DESC_X_I_FIRST,
+ mmROT0_DESC_Y_I_FIRST,
+ mmROT0_DESC_Y_I,
+ mmROT0_DESC_OUT_STRIPE_SIZE,
+ mmROT0_DESC_RSB_CFG_0,
+ mmROT0_DESC_RSB_PAD_VAL,
+ mmROT0_DESC_OWM_CFG,
+ mmROT0_DESC_CTRL_CFG,
+ mmROT0_DESC_PIXEL_PAD,
+ mmROT0_DESC_PREC_SHIFT,
+ mmROT0_DESC_MAX_VAL,
+ mmROT0_DESC_A0_M11,
+ mmROT0_DESC_A1_M12,
+ mmROT0_DESC_A2,
+ mmROT0_DESC_B0_M21,
+ mmROT0_DESC_B1_M22,
+ mmROT0_DESC_B2,
+ mmROT0_DESC_C0,
+ mmROT0_DESC_C1,
+ mmROT0_DESC_C2,
+ mmROT0_DESC_D0,
+ mmROT0_DESC_D1,
+ mmROT0_DESC_D2,
+ mmROT0_DESC_INV_PROC_SIZE_M_1,
+ mmROT0_DESC_MESH_IMG_START_ADDR_L,
+ mmROT0_DESC_MESH_IMG_START_ADDR_H,
+ mmROT0_DESC_MESH_IMG,
+ mmROT0_DESC_MESH_STRIDE,
+ mmROT0_DESC_MESH_STRIPE,
+ mmROT0_DESC_MESH_CTRL,
+ mmROT0_DESC_MESH_GH,
+ mmROT0_DESC_MESH_GV,
+ mmROT0_DESC_MRSB_CFG_0,
+ mmROT0_DESC_MRSB_PAD_VAL,
+ mmROT0_DESC_BUF_CFG,
+ mmROT0_DESC_CID_OFFSET,
+ mmROT0_DESC_PUSH_DESC
+};
+
+static const u32 gaudi2_pb_psoc_global_conf[] = {
+ mmPSOC_GLOBAL_CONF_BASE
+};
+
+static const u32 gaudi2_pb_psoc[] = {
+ mmPSOC_EFUSE_BASE,
+ mmPSOC_BTL_BASE,
+ mmPSOC_CS_TRACE_BASE,
+ mmPSOC_DFT_EFUSE_BASE,
+ mmPSOC_PID_BASE,
+ mmPSOC_ARC0_CFG_BASE,
+ mmPSOC_ARC0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmPSOC_ARC0_AUX_BASE,
+ mmPSOC_ARC1_CFG_BASE,
+ mmPSOC_ARC1_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmPSOC_ARC1_AUX_BASE,
+ mmJT_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSMI_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmI2C_S_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmPSOC_SVID0_BASE,
+ mmPSOC_SVID1_BASE,
+ mmPSOC_SVID2_BASE,
+ mmPSOC_AVS0_BASE,
+ mmPSOC_AVS1_BASE,
+ mmPSOC_AVS2_BASE,
+ mmPSOC_PWM0_BASE,
+ mmPSOC_PWM1_BASE,
+ mmPSOC_MSTR_IF_RR_SHRD_HBW_BASE,
+};
+
+static const u32 gaudi2_pb_pmmu[] = {
+ mmPMMU_HBW_MMU_BASE,
+ mmPMMU_HBW_STLB_BASE,
+ mmPMMU_HBW_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmPMMU_PIF_BASE,
+};
+
+static const u32 gaudi2_pb_psoc_pll[] = {
+ mmPSOC_MME_PLL_CTRL_BASE,
+ mmPSOC_CPU_PLL_CTRL_BASE,
+ mmPSOC_VID_PLL_CTRL_BASE
+};
+
+static const u32 gaudi2_pb_pmmu_pll[] = {
+ mmPMMU_MME_PLL_CTRL_BASE,
+ mmPMMU_VID_PLL_CTRL_BASE
+};
+
+static const u32 gaudi2_pb_xbar_pll[] = {
+ mmDCORE0_XBAR_DMA_PLL_CTRL_BASE,
+ mmDCORE0_XBAR_MMU_PLL_CTRL_BASE,
+ mmDCORE0_XBAR_IF_PLL_CTRL_BASE,
+ mmDCORE0_XBAR_MESH_PLL_CTRL_BASE,
+ mmDCORE1_XBAR_DMA_PLL_CTRL_BASE,
+ mmDCORE1_XBAR_MMU_PLL_CTRL_BASE,
+ mmDCORE1_XBAR_IF_PLL_CTRL_BASE,
+ mmDCORE1_XBAR_MESH_PLL_CTRL_BASE,
+ mmDCORE1_XBAR_HBM_PLL_CTRL_BASE,
+ mmDCORE2_XBAR_DMA_PLL_CTRL_BASE,
+ mmDCORE2_XBAR_MMU_PLL_CTRL_BASE,
+ mmDCORE2_XBAR_IF_PLL_CTRL_BASE,
+ mmDCORE2_XBAR_BANK_PLL_CTRL_BASE,
+ mmDCORE2_XBAR_HBM_PLL_CTRL_BASE,
+ mmDCORE3_XBAR_DMA_PLL_CTRL_BASE,
+ mmDCORE3_XBAR_MMU_PLL_CTRL_BASE,
+ mmDCORE3_XBAR_IF_PLL_CTRL_BASE,
+ mmDCORE3_XBAR_BANK_PLL_CTRL_BASE
+};
+
+static const u32 gaudi2_pb_xft_pll[] = {
+ mmDCORE0_HBM_PLL_CTRL_BASE,
+ mmDCORE0_TPC_PLL_CTRL_BASE,
+ mmDCORE0_PCI_PLL_CTRL_BASE,
+ mmDCORE1_HBM_PLL_CTRL_BASE,
+ mmDCORE1_TPC_PLL_CTRL_BASE,
+ mmDCORE1_NIC_PLL_CTRL_BASE,
+ mmDCORE2_HBM_PLL_CTRL_BASE,
+ mmDCORE2_TPC_PLL_CTRL_BASE,
+ mmDCORE3_HBM_PLL_CTRL_BASE,
+ mmDCORE3_TPC_PLL_CTRL_BASE,
+ mmDCORE3_NIC_PLL_CTRL_BASE,
+};
+
+static const u32 gaudi2_pb_pcie[] = {
+ mmPCIE_ELBI_RR_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmPCIE_LBW_RR_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmPCIE_WRAP_BASE,
+};
+
+static const u32 gaudi2_pb_thermal_sensor0[] = {
+ mmDCORE0_XFT_BASE,
+ mmDCORE0_TSTDVS_BASE,
+};
+
+static const u32 gaudi2_pb_hbm[] = {
+ mmHBM0_MC0_BASE,
+ mmHBM0_MC1_BASE,
+};
+
+static const u32 gaudi2_pb_mme_qm_arc_acp_eng[] = {
+ mmDCORE0_MME_QM_ARC_ACP_ENG_BASE,
+};
+
+static const struct range gaudi2_pb_mme_qm_arc_acp_eng_unsecured_regs[] = {
+ {mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_0, mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_REG},
+};
+
+struct gaudi2_tpc_pb_data {
+ struct hl_block_glbl_sec *glbl_sec;
+ u32 block_array_size;
+};
+
+static void gaudi2_config_tpcs_glbl_sec(struct hl_device *hdev, int dcore, int inst, u32 offset,
+ void *data)
+{
+ struct gaudi2_tpc_pb_data *pb_data = (struct gaudi2_tpc_pb_data *)data;
+
+ hl_config_glbl_sec(hdev, gaudi2_pb_dcr0_tpc0, pb_data->glbl_sec,
+ offset, pb_data->block_array_size);
+}
+
+static int gaudi2_init_pb_tpc(struct hl_device *hdev)
+{
+ u32 stride, kernel_tensor_stride, qm_tensor_stride, block_array_size;
+ struct gaudi2_tpc_pb_data tpc_pb_data;
+ struct hl_block_glbl_sec *glbl_sec;
+ struct iterate_module_ctx tpc_iter;
+ int i;
+
+ block_array_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0);
+
+ glbl_sec = kcalloc(block_array_size, sizeof(struct hl_block_glbl_sec), GFP_KERNEL);
+ if (!glbl_sec)
+ return -ENOMEM;
+
+ kernel_tensor_stride = mmDCORE0_TPC0_CFG_KERNEL_TENSOR_1_BASE -
+ mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_BASE;
+ qm_tensor_stride = mmDCORE0_TPC0_CFG_QM_TENSOR_1_BASE - mmDCORE0_TPC0_CFG_QM_TENSOR_0_BASE;
+
+ hl_secure_block(hdev, glbl_sec, block_array_size);
+ hl_unsecure_registers(hdev, gaudi2_pb_dcr0_tpc0_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_unsecured_regs),
+ 0, gaudi2_pb_dcr0_tpc0, glbl_sec,
+ block_array_size);
+
+ /* Unsecure all TPC kernel tensors */
+ for (i = 0 ; i < TPC_NUM_OF_KERNEL_TENSORS ; i++)
+ hl_unsecure_registers(hdev,
+ gaudi2_pb_dcr0_tpc0_ktensor_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_ktensor_unsecured_regs),
+ i * kernel_tensor_stride, gaudi2_pb_dcr0_tpc0,
+ glbl_sec, block_array_size);
+
+ /* Unsecure all TPC QM tensors */
+ for (i = 0 ; i < TPC_NUM_OF_QM_TENSORS ; i++)
+ hl_unsecure_registers(hdev,
+ gaudi2_pb_dcr0_tpc0_qtensor_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_qtensor_unsecured_regs),
+ i * qm_tensor_stride,
+ gaudi2_pb_dcr0_tpc0, glbl_sec, block_array_size);
+
+ /* unsecure all 32 TPC QM SRF regs */
+ stride = mmDCORE0_TPC0_CFG_QM_SRF_1 - mmDCORE0_TPC0_CFG_QM_SRF_0;
+ for (i = 0 ; i < 32 ; i++)
+ hl_unsecure_register(hdev, mmDCORE0_TPC0_CFG_QM_SRF_0,
+ i * stride, gaudi2_pb_dcr0_tpc0, glbl_sec,
+ block_array_size);
+
+ /* unsecure the 4 TPC LOCK VALUE regs */
+ stride = mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_1 - mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_0;
+ for (i = 0 ; i < 4 ; i++)
+ hl_unsecure_register(hdev, mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_0,
+ i * stride, gaudi2_pb_dcr0_tpc0, glbl_sec,
+ block_array_size);
+
+ /* prepare data for TPC iterator */
+ tpc_pb_data.glbl_sec = glbl_sec;
+ tpc_pb_data.block_array_size = block_array_size;
+ tpc_iter.fn = &gaudi2_config_tpcs_glbl_sec;
+ tpc_iter.data = &tpc_pb_data;
+ gaudi2_iterate_tpcs(hdev, &tpc_iter);
+
+ kfree(glbl_sec);
+
+ return 0;
+}
+
+struct gaudi2_tpc_arc_pb_data {
+ u32 unsecured_regs_arr_size;
+ u32 arc_regs_arr_size;
+ int rc;
+};
+
+static void gaudi2_config_tpcs_pb_ranges(struct hl_device *hdev, int dcore, int inst, u32 offset,
+ void *data)
+{
+ struct gaudi2_tpc_arc_pb_data *pb_data = (struct gaudi2_tpc_arc_pb_data *)data;
+
+ pb_data->rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA, 1,
+ offset, gaudi2_pb_dcr0_tpc0_arc,
+ pb_data->arc_regs_arr_size,
+ gaudi2_pb_dcr0_tpc0_arc_unsecured_regs,
+ pb_data->unsecured_regs_arr_size);
+}
+
+static int gaudi2_init_pb_tpc_arc(struct hl_device *hdev)
+{
+ struct gaudi2_tpc_arc_pb_data tpc_arc_pb_data;
+ struct iterate_module_ctx tpc_iter;
+
+ tpc_arc_pb_data.arc_regs_arr_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc);
+ tpc_arc_pb_data.unsecured_regs_arr_size =
+ ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc_unsecured_regs);
+ tpc_arc_pb_data.rc = 0;
+ tpc_iter.fn = &gaudi2_config_tpcs_pb_ranges;
+ tpc_iter.data = &tpc_arc_pb_data;
+ gaudi2_iterate_tpcs(hdev, &tpc_iter);
+
+ return tpc_arc_pb_data.rc;
+}
+
+static int gaudi2_init_pb_sm_objs(struct hl_device *hdev)
+{
+ int i, j, glbl_sec_array_len = gaudi2_pb_dcr0_sm_objs.glbl_sec_length;
+ u32 sec_entry, *sec_array, array_base, first_sob, first_mon;
+
+ array_base = gaudi2_pb_dcr0_sm_objs.mm_block_base_addr +
+ gaudi2_pb_dcr0_sm_objs.glbl_sec_offset;
+
+ sec_array = kcalloc(glbl_sec_array_len, sizeof(u32), GFP_KERNEL);
+ if (!sec_array)
+ return -ENOMEM;
+
+ first_sob = GAUDI2_RESERVED_SOB_NUMBER;
+ first_mon = GAUDI2_RESERVED_MON_NUMBER;
+
+ /* 8192 SOB_OBJs skipping first GAUDI2_MAX_PENDING_CS of them */
+ for (j = i = first_sob ; i < DCORE_NUM_OF_SOB ; i++, j++)
+ UNSET_GLBL_SEC_BIT(sec_array, j);
+
+ /* 2048 MON_PAY ADDR_L skipping first GAUDI2_MAX_PENDING_CS of them */
+ for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
+ UNSET_GLBL_SEC_BIT(sec_array, j);
+
+ /* 2048 MON_PAY ADDR_H skipping first GAUDI2_MAX_PENDING_CS of them */
+ for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
+ UNSET_GLBL_SEC_BIT(sec_array, j);
+
+ /* 2048 MON_PAY DATA skipping first GAUDI2_MAX_PENDING_CS of them */
+ for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
+ UNSET_GLBL_SEC_BIT(sec_array, j);
+
+ /* 2048 MON_ARM skipping first GAUDI2_MAX_PENDING_CS of them */
+ for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
+ UNSET_GLBL_SEC_BIT(sec_array, j);
+
+ /* 2048 MON_CONFIG skipping first GAUDI2_MAX_PENDING_CS of them */
+ for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
+ UNSET_GLBL_SEC_BIT(sec_array, j);
+
+ /* 2048 MON_STATUS skipping first GAUDI2_MAX_PENDING_CS of them */
+ for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
+ UNSET_GLBL_SEC_BIT(sec_array, j);
+
+ /* Unsecure selected Dcore0 registers */
+ for (i = 0 ; i < glbl_sec_array_len ; i++) {
+ sec_entry = array_base + i * sizeof(u32);
+ WREG32(sec_entry, sec_array[i]);
+ }
+
+ /* Unsecure Dcore1 - Dcore3 registers */
+ memset(sec_array, -1, glbl_sec_array_len * sizeof(u32));
+
+ for (i = 1 ; i < NUM_OF_DCORES ; i++) {
+ for (j = 0 ; j < glbl_sec_array_len ; j++) {
+ sec_entry = DCORE_OFFSET * i + array_base + j * sizeof(u32);
+ WREG32(sec_entry, sec_array[j]);
+ }
+ }
+
+ kfree(sec_array);
+
+ return 0;
+}
+
+static void gaudi2_write_lbw_range_register(struct hl_device *hdev, u64 base, void *data)
+{
+ u32 reg_min_offset, reg_max_offset, write_min, write_max;
+ struct rr_config *rr_cfg = (struct rr_config *) data;
+
+ switch (rr_cfg->type) {
+ case RR_TYPE_SHORT:
+ reg_min_offset = RR_LBW_SEC_RANGE_MIN_SHORT_0_OFFSET;
+ reg_max_offset = RR_LBW_SEC_RANGE_MAX_SHORT_0_OFFSET;
+ break;
+
+ case RR_TYPE_LONG:
+ reg_min_offset = RR_LBW_SEC_RANGE_MIN_0_OFFSET;
+ reg_max_offset = RR_LBW_SEC_RANGE_MAX_0_OFFSET;
+ break;
+
+ case RR_TYPE_SHORT_PRIV:
+ reg_min_offset = RR_LBW_PRIV_RANGE_MIN_SHORT_0_OFFSET;
+ reg_max_offset = RR_LBW_PRIV_RANGE_MAX_SHORT_0_OFFSET;
+ break;
+
+ case RR_TYPE_LONG_PRIV:
+ reg_min_offset = RR_LBW_PRIV_RANGE_MIN_0_OFFSET;
+ reg_max_offset = RR_LBW_PRIV_RANGE_MAX_0_OFFSET;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid LBW RR type %u\n", rr_cfg->type);
+ return;
+ }
+
+ reg_min_offset += rr_cfg->index * sizeof(u32);
+ reg_max_offset += rr_cfg->index * sizeof(u32);
+
+ if (rr_cfg->type == RR_TYPE_SHORT || rr_cfg->type == RR_TYPE_SHORT_PRIV) {
+ write_min = FIELD_GET(RR_LBW_SHORT_MASK, lower_32_bits(rr_cfg->min));
+ write_max = FIELD_GET(RR_LBW_SHORT_MASK, lower_32_bits(rr_cfg->max));
+
+ } else {
+ write_min = FIELD_GET(RR_LBW_LONG_MASK, lower_32_bits(rr_cfg->min));
+ write_max = FIELD_GET(RR_LBW_LONG_MASK, lower_32_bits(rr_cfg->max));
+ }
+
+ /* Configure LBW RR:
+ * Both RR types start blocking from base address 0x1000007FF8000000
+ * SHORT RRs address bits [26:12]
+ * LONG RRs address bits [26:0]
+ */
+ WREG32(base + reg_min_offset, write_min);
+ WREG32(base + reg_max_offset, write_max);
+}
+
+void gaudi2_write_rr_to_all_lbw_rtrs(struct hl_device *hdev, u8 rr_type, u32 rr_index, u64 min_val,
+ u64 max_val)
+{
+ struct dup_block_ctx block_ctx;
+ struct rr_config rr_cfg;
+
+ if ((rr_type == RR_TYPE_SHORT || rr_type == RR_TYPE_SHORT_PRIV) &&
+ rr_index >= NUM_SHORT_LBW_RR) {
+
+ dev_err(hdev->dev, "invalid short LBW %s range register index: %u",
+ rr_type == RR_TYPE_SHORT ? "secure" : "privileged", rr_index);
+ return;
+ }
+
+ if ((rr_type == RR_TYPE_LONG || rr_type == RR_TYPE_LONG_PRIV) &&
+ rr_index >= NUM_LONG_LBW_RR) {
+
+ dev_err(hdev->dev, "invalid long LBW %s range register index: %u",
+ rr_type == RR_TYPE_LONG ? "secure" : "privileged", rr_index);
+ return;
+ }
+
+ rr_cfg.type = rr_type;
+ rr_cfg.index = rr_index;
+ rr_cfg.min = min_val;
+ rr_cfg.max = max_val;
+
+ block_ctx.instance_cfg_fn = &gaudi2_write_lbw_range_register;
+ block_ctx.data = &rr_cfg;
+
+ /* SFT */
+ block_ctx.base = mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_BASE;
+ block_ctx.blocks = NUM_OF_SFT;
+ block_ctx.block_off = SFT_OFFSET;
+ block_ctx.instances = SFT_NUM_OF_LBW_RTR;
+ block_ctx.instance_off = SFT_LBW_RTR_OFFSET;
+ gaudi2_init_blocks(hdev, &block_ctx);
+
+ /* SIF */
+ block_ctx.base = mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE;
+ block_ctx.blocks = NUM_OF_DCORES;
+ block_ctx.block_off = DCORE_OFFSET;
+ block_ctx.instances = NUM_OF_RTR_PER_DCORE;
+ block_ctx.instance_off = DCORE_RTR_OFFSET;
+ gaudi2_init_blocks(hdev, &block_ctx);
+
+ block_ctx.blocks = 1;
+ block_ctx.block_off = 0;
+ block_ctx.instances = 1;
+ block_ctx.instance_off = 0;
+
+ /* PCIE ELBI */
+ block_ctx.base = mmPCIE_ELBI_RR_MSTR_IF_RR_SHRD_LBW_BASE;
+ gaudi2_init_blocks(hdev, &block_ctx);
+
+ /* PCIE MSTR */
+ block_ctx.base = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_LBW_BASE;
+ gaudi2_init_blocks(hdev, &block_ctx);
+
+ /* PCIE LBW */
+ block_ctx.base = mmPCIE_LBW_RR_MSTR_IF_RR_SHRD_LBW_BASE;
+ gaudi2_init_blocks(hdev, &block_ctx);
+}
+
+static void gaudi2_init_lbw_range_registers_secure(struct hl_device *hdev)
+{
+ int i;
+
+ /* Up to 14 14bit-address regs.
+ *
+ * - range 0: NIC0_CFG
+ * - range 1: NIC1_CFG
+ * - range 2: NIC2_CFG
+ * - range 3: NIC3_CFG
+ * - range 4: NIC4_CFG
+ * - range 5: NIC5_CFG
+ * - range 6: NIC6_CFG
+ * - range 7: NIC7_CFG
+ * - range 8: NIC8_CFG
+ * - range 9: NIC9_CFG
+ * - range 10: NIC10_CFG
+ * - range 11: NIC11_CFG + *_DBG (not including TPC_DBG)
+ *
+ * If F/W security is not enabled:
+ * - ranges 12,13: PSOC_CFG (excluding PSOC_TIMESTAMP)
+ */
+ u64 lbw_range_min_short[] = {
+ mmNIC0_TX_AXUSER_BASE,
+ mmNIC1_TX_AXUSER_BASE,
+ mmNIC2_TX_AXUSER_BASE,
+ mmNIC3_TX_AXUSER_BASE,
+ mmNIC4_TX_AXUSER_BASE,
+ mmNIC5_TX_AXUSER_BASE,
+ mmNIC6_TX_AXUSER_BASE,
+ mmNIC7_TX_AXUSER_BASE,
+ mmNIC8_TX_AXUSER_BASE,
+ mmNIC9_TX_AXUSER_BASE,
+ mmNIC10_TX_AXUSER_BASE,
+ mmNIC11_TX_AXUSER_BASE,
+ mmPSOC_I2C_M0_BASE,
+ mmPSOC_EFUSE_BASE
+ };
+ u64 lbw_range_max_short[] = {
+ mmNIC0_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC1_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC2_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC3_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC4_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC5_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC6_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC7_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC8_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC9_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC10_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
+ mmNIC11_DBG_FUNNEL_NCH_BASE + HL_BLOCK_SIZE,
+ mmPSOC_WDOG_BASE + HL_BLOCK_SIZE,
+ mmSVID2_AC_BASE + HL_BLOCK_SIZE
+ };
+
+ /* Up to 4 26bit-address regs.
+ *
+ * - range 0: TPC_DBG
+ * - range 1: PCIE_DBI.MSIX_DOORBELL_OFF
+ * - range 2/3: used in soft reset to block access to several blocks and are cleared here
+ */
+ u64 lbw_range_min_long[] = {
+ mmDCORE0_TPC0_ROM_TABLE_BASE,
+ mmPCIE_DBI_MSIX_DOORBELL_OFF,
+ 0x0,
+ 0x0
+ };
+ u64 lbw_range_max_long[] = {
+ mmDCORE3_TPC5_EML_CS_BASE + HL_BLOCK_SIZE,
+ mmPCIE_DBI_MSIX_DOORBELL_OFF + 0x4,
+ 0x0,
+ 0x0
+ };
+
+ /* write short range registers to all lbw rtrs */
+ for (i = 0 ; i < ARRAY_SIZE(lbw_range_min_short) ; i++) {
+ if ((lbw_range_min_short[i] == mmPSOC_I2C_M0_BASE ||
+ lbw_range_min_short[i] == mmPSOC_EFUSE_BASE) &&
+ hdev->asic_prop.fw_security_enabled)
+ continue;
+
+ gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_SHORT, i,
+ lbw_range_min_short[i], lbw_range_max_short[i]);
+ }
+
+ /* write long range registers to all lbw rtrs */
+ for (i = 0 ; i < ARRAY_SIZE(lbw_range_min_long) ; i++) {
+ gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_LONG, i,
+ lbw_range_min_long[i], lbw_range_max_long[i]);
+ }
+}
+
+static void gaudi2_init_lbw_range_registers(struct hl_device *hdev)
+{
+ gaudi2_init_lbw_range_registers_secure(hdev);
+}
+
+static void gaudi2_write_hbw_range_register(struct hl_device *hdev, u64 base, void *data)
+{
+ u32 min_lo_reg_offset, min_hi_reg_offset, max_lo_reg_offset, max_hi_reg_offset;
+ struct rr_config *rr_cfg = (struct rr_config *) data;
+ u64 val_min, val_max;
+
+ switch (rr_cfg->type) {
+ case RR_TYPE_SHORT:
+ min_lo_reg_offset = RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_0_OFFSET;
+ min_hi_reg_offset = RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_0_OFFSET;
+ max_lo_reg_offset = RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_0_OFFSET;
+ max_hi_reg_offset = RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_0_OFFSET;
+ break;
+
+ case RR_TYPE_LONG:
+ min_lo_reg_offset = RR_SHRD_HBW_SEC_RANGE_MIN_LO_0_OFFSET;
+ min_hi_reg_offset = RR_SHRD_HBW_SEC_RANGE_MIN_HI_0_OFFSET;
+ max_lo_reg_offset = RR_SHRD_HBW_SEC_RANGE_MAX_LO_0_OFFSET;
+ max_hi_reg_offset = RR_SHRD_HBW_SEC_RANGE_MAX_HI_0_OFFSET;
+ break;
+
+ case RR_TYPE_SHORT_PRIV:
+ min_lo_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_0_OFFSET;
+ min_hi_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_0_OFFSET;
+ max_lo_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_0_OFFSET;
+ max_hi_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_0_OFFSET;
+ break;
+
+ case RR_TYPE_LONG_PRIV:
+ min_lo_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MIN_LO_0_OFFSET;
+ min_hi_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MIN_HI_0_OFFSET;
+ max_lo_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MAX_LO_0_OFFSET;
+ max_hi_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MAX_HI_0_OFFSET;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid HBW RR type %u\n", rr_cfg->type);
+ return;
+ }
+
+ min_lo_reg_offset += rr_cfg->index * sizeof(u32);
+ min_hi_reg_offset += rr_cfg->index * sizeof(u32);
+ max_lo_reg_offset += rr_cfg->index * sizeof(u32);
+ max_hi_reg_offset += rr_cfg->index * sizeof(u32);
+
+ if (rr_cfg->type == RR_TYPE_SHORT || rr_cfg->type == RR_TYPE_SHORT_PRIV) {
+ val_min = FIELD_GET(RR_HBW_SHORT_HI_MASK, rr_cfg->min) |
+ FIELD_GET(RR_HBW_SHORT_LO_MASK, rr_cfg->min);
+ val_max = FIELD_GET(RR_HBW_SHORT_HI_MASK, rr_cfg->max) |
+ FIELD_GET(RR_HBW_SHORT_LO_MASK, rr_cfg->max);
+ } else {
+ val_min = FIELD_GET(RR_HBW_LONG_HI_MASK, rr_cfg->min) |
+ FIELD_GET(RR_HBW_LONG_LO_MASK, rr_cfg->min);
+ val_max = FIELD_GET(RR_HBW_LONG_HI_MASK, rr_cfg->max) |
+ FIELD_GET(RR_HBW_LONG_LO_MASK, rr_cfg->max);
+ }
+
+ /* Configure HBW RR:
+ * SHORT RRs (0x1000_<36bits>000) - HI: address bits [47:44], LO: address bits [43:12]
+ * LONG RRs (0x<52bits>000) - HI: address bits [63:44], LO: address bits [43:12]
+ */
+ WREG32(base + min_lo_reg_offset, lower_32_bits(val_min));
+ WREG32(base + min_hi_reg_offset, upper_32_bits(val_min));
+ WREG32(base + max_lo_reg_offset, lower_32_bits(val_max));
+ WREG32(base + max_hi_reg_offset, upper_32_bits(val_max));
+}
+
+static void gaudi2_write_hbw_rr_to_all_mstr_if(struct hl_device *hdev, u8 rr_type, u32 rr_index,
+ u64 min_val, u64 max_val)
+{
+ struct dup_block_ctx block_ctx;
+ struct rr_config rr_cfg;
+
+ if ((rr_type == RR_TYPE_SHORT || rr_type == RR_TYPE_SHORT_PRIV) &&
+ rr_index >= NUM_SHORT_HBW_RR) {
+
+ dev_err(hdev->dev, "invalid short HBW %s range register index: %u",
+ rr_type == RR_TYPE_SHORT ? "secure" : "privileged", rr_index);
+ return;
+ }
+
+ if ((rr_type == RR_TYPE_LONG || rr_type == RR_TYPE_LONG_PRIV) &&
+ rr_index >= NUM_LONG_HBW_RR) {
+
+ dev_err(hdev->dev, "invalid long HBW %s range register index: %u",
+ rr_type == RR_TYPE_LONG ? "secure" : "privileged", rr_index);
+ return;
+ }
+
+ rr_cfg.type = rr_type;
+ rr_cfg.index = rr_index;
+ rr_cfg.min = min_val;
+ rr_cfg.max = max_val;
+
+ block_ctx.instance_cfg_fn = &gaudi2_write_hbw_range_register;
+ block_ctx.data = &rr_cfg;
+
+ /* SFT */
+ block_ctx.base = mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE;
+ block_ctx.blocks = NUM_OF_SFT;
+ block_ctx.block_off = SFT_OFFSET;
+ block_ctx.instances = SFT_NUM_OF_HBW_RTR;
+ block_ctx.instance_off = SFT_IF_RTR_OFFSET;
+ gaudi2_init_blocks(hdev, &block_ctx);
+
+ /* SIF */
+ block_ctx.base = mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE;
+ block_ctx.blocks = NUM_OF_DCORES;
+ block_ctx.block_off = DCORE_OFFSET;
+ block_ctx.instances = NUM_OF_RTR_PER_DCORE;
+ block_ctx.instance_off = DCORE_RTR_OFFSET;
+ gaudi2_init_blocks(hdev, &block_ctx);
+
+ /* PCIE MSTR */
+ block_ctx.base = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE;
+ block_ctx.blocks = 1;
+ block_ctx.block_off = 0;
+ block_ctx.instances = 1;
+ block_ctx.instance_off = 0;
+ gaudi2_init_blocks(hdev, &block_ctx);
+}
+
+static void gaudi2_init_hbw_range_registers(struct hl_device *hdev)
+{
+ int i;
+
+ /* Up to 6 short RR (0x1000_<36bits>000) and 4 long RR (0x<52bits>000).
+ *
+ * - short range 0:
+ * SPI Flash, ARC0/1 ICCM/DCCM, Secure Boot ROM, PSOC_FW/Scratchpad/PCIE_FW SRAM
+ */
+ u64 hbw_range_min_short[] = {
+ SPI_FLASH_BASE_ADDR
+ };
+ u64 hbw_range_max_short[] = {
+ PCIE_FW_SRAM_ADDR + PCIE_FW_SRAM_SIZE
+ };
+
+ for (i = 0 ; i < ARRAY_SIZE(hbw_range_min_short) ; i++) {
+ gaudi2_write_hbw_rr_to_all_mstr_if(hdev, RR_TYPE_SHORT, i, hbw_range_min_short[i],
+ hbw_range_max_short[i]);
+ }
+}
+
+static void gaudi2_write_mmu_range_register(struct hl_device *hdev, u64 base,
+ struct rr_config *rr_cfg)
+{
+ u32 min_lo_reg_offset, min_hi_reg_offset, max_lo_reg_offset, max_hi_reg_offset;
+
+ switch (rr_cfg->type) {
+ case RR_TYPE_LONG:
+ min_lo_reg_offset = MMU_RR_SEC_MIN_31_0_0_OFFSET;
+ min_hi_reg_offset = MMU_RR_SEC_MIN_63_32_0_OFFSET;
+ max_lo_reg_offset = MMU_RR_SEC_MAX_31_0_0_OFFSET;
+ max_hi_reg_offset = MMU_RR_SEC_MAX_63_32_0_OFFSET;
+ break;
+
+ case RR_TYPE_LONG_PRIV:
+ min_lo_reg_offset = MMU_RR_PRIV_MIN_31_0_0_OFFSET;
+ min_hi_reg_offset = MMU_RR_PRIV_MIN_63_32_0_OFFSET;
+ max_lo_reg_offset = MMU_RR_PRIV_MAX_31_0_0_OFFSET;
+ max_hi_reg_offset = MMU_RR_PRIV_MAX_63_32_0_OFFSET;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid MMU RR type %u\n", rr_cfg->type);
+ return;
+ }
+
+ min_lo_reg_offset += rr_cfg->index * sizeof(u32);
+ min_hi_reg_offset += rr_cfg->index * sizeof(u32);
+ max_lo_reg_offset += rr_cfg->index * sizeof(u32);
+ max_hi_reg_offset += rr_cfg->index * sizeof(u32);
+
+ /* Configure MMU RR (address bits [63:0]) */
+ WREG32(base + min_lo_reg_offset, lower_32_bits(rr_cfg->min));
+ WREG32(base + min_hi_reg_offset, upper_32_bits(rr_cfg->min));
+ WREG32(base + max_lo_reg_offset, lower_32_bits(rr_cfg->max));
+ WREG32(base + max_hi_reg_offset, upper_32_bits(rr_cfg->max));
+}
+
+static void gaudi2_init_mmu_range_registers(struct hl_device *hdev)
+{
+ u32 dcore_id, hmmu_id, hmmu_base;
+ struct rr_config rr_cfg;
+
+ /* Up to 8 ranges [63:0].
+ *
+ * - range 0: Reserved HBM area for F/W and driver
+ */
+
+ /* The RRs are located after the HMMU so need to use the scrambled addresses */
+ rr_cfg.min = hdev->asic_funcs->scramble_addr(hdev, DRAM_PHYS_BASE);
+ rr_cfg.max = hdev->asic_funcs->scramble_addr(hdev, hdev->asic_prop.dram_user_base_address);
+ rr_cfg.index = 0;
+ rr_cfg.type = RR_TYPE_LONG;
+
+ for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
+ for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE; hmmu_id++) {
+ if (!gaudi2_is_hmmu_enabled(hdev, dcore_id, hmmu_id))
+ continue;
+
+ hmmu_base = mmDCORE0_HMMU0_MMU_BASE + dcore_id * DCORE_OFFSET +
+ hmmu_id * DCORE_HMMU_OFFSET;
+
+ gaudi2_write_mmu_range_register(hdev, hmmu_base, &rr_cfg);
+ }
+ }
+}
+
+/**
+ * gaudi2_init_range_registers -
+ * Initialize range registers of all initiators
+ *
+ * @hdev: pointer to hl_device structure
+ */
+static void gaudi2_init_range_registers(struct hl_device *hdev)
+{
+ gaudi2_init_lbw_range_registers(hdev);
+ gaudi2_init_hbw_range_registers(hdev);
+ gaudi2_init_mmu_range_registers(hdev);
+}
+
+/**
+ * gaudi2_init_protection_bits -
+ * Initialize protection bits of specific registers
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * All protection bits are 1 by default, means not protected. Need to set to 0
+ * each bit that belongs to a protected register.
+ *
+ */
+static int gaudi2_init_protection_bits(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 instance_offset;
+ int rc = 0;
+ u8 i;
+
+ /* SFT */
+ instance_offset = mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE - mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE;
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA, 4, instance_offset,
+ gaudi2_pb_sft0, ARRAY_SIZE(gaudi2_pb_sft0),
+ NULL, HL_PB_NA);
+
+ /* HIF */
+ instance_offset = mmDCORE0_HIF1_BASE - mmDCORE0_HIF0_BASE;
+ rc |= hl_init_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
+ NUM_OF_HIF_PER_DCORE, instance_offset,
+ gaudi2_pb_dcr0_hif, ARRAY_SIZE(gaudi2_pb_dcr0_hif),
+ NULL, HL_PB_NA, prop->hmmu_hif_enabled_mask);
+
+ /* RTR */
+ instance_offset = mmDCORE0_RTR1_CTRL_BASE - mmDCORE0_RTR0_CTRL_BASE;
+ rc |= hl_init_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, 8, instance_offset,
+ gaudi2_pb_dcr0_rtr0, ARRAY_SIZE(gaudi2_pb_dcr0_rtr0),
+ NULL, HL_PB_NA);
+
+ /* HMMU */
+ rc |= hl_init_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
+ NUM_OF_HMMU_PER_DCORE, DCORE_HMMU_OFFSET,
+ gaudi2_pb_dcr0_hmmu0, ARRAY_SIZE(gaudi2_pb_dcr0_hmmu0),
+ NULL, HL_PB_NA, prop->hmmu_hif_enabled_mask);
+
+ /* CPU.
+ * Except for CPU_IF, skip when security is enabled in F/W, because the blocks are protected
+ * by privileged RR.
+ */
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_cpu_if, ARRAY_SIZE(gaudi2_pb_cpu_if),
+ NULL, HL_PB_NA);
+
+ if (!hdev->asic_prop.fw_security_enabled)
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_cpu, ARRAY_SIZE(gaudi2_pb_cpu),
+ NULL, HL_PB_NA);
+
+ /* KDMA */
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_kdma, ARRAY_SIZE(gaudi2_pb_kdma),
+ NULL, HL_PB_NA);
+
+ /* PDMA */
+ instance_offset = mmPDMA1_CORE_BASE - mmPDMA0_CORE_BASE;
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA, 2, instance_offset,
+ gaudi2_pb_pdma0, ARRAY_SIZE(gaudi2_pb_pdma0),
+ gaudi2_pb_pdma0_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_pdma0_unsecured_regs));
+
+ /* ARC PDMA */
+ rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA, 2,
+ instance_offset, gaudi2_pb_pdma0_arc,
+ ARRAY_SIZE(gaudi2_pb_pdma0_arc),
+ gaudi2_pb_pdma0_arc_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_pdma0_arc_unsecured_regs));
+
+ /* EDMA */
+ instance_offset = mmDCORE0_EDMA1_CORE_BASE - mmDCORE0_EDMA0_CORE_BASE;
+ rc |= hl_init_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET, 2,
+ instance_offset, gaudi2_pb_dcr0_edma0,
+ ARRAY_SIZE(gaudi2_pb_dcr0_edma0),
+ gaudi2_pb_dcr0_edma0_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr0_edma0_unsecured_regs),
+ prop->edma_enabled_mask);
+
+ /* ARC EDMA */
+ rc |= hl_init_pb_ranges_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET, 2,
+ instance_offset, gaudi2_pb_dcr0_edma0_arc,
+ ARRAY_SIZE(gaudi2_pb_dcr0_edma0_arc),
+ gaudi2_pb_dcr0_edma0_arc_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr0_edma0_arc_unsecured_regs),
+ prop->edma_enabled_mask);
+
+ /* MME */
+ instance_offset = mmDCORE0_MME_SBTE1_BASE - mmDCORE0_MME_SBTE0_BASE;
+
+ for (i = 0 ; i < NUM_OF_DCORES * NUM_OF_MME_PER_DCORE ; i++) {
+ /* MME SBTE */
+ rc |= hl_init_pb_single_dcore(hdev, (DCORE_OFFSET * i), 5,
+ instance_offset, gaudi2_pb_dcr0_mme_sbte,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_sbte), NULL,
+ HL_PB_NA);
+
+ /* MME */
+ rc |= hl_init_pb_single_dcore(hdev, (DCORE_OFFSET * i),
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_mme_eng,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_eng),
+ gaudi2_pb_dcr0_mme_eng_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_eng_unsecured_regs));
+ }
+
+ /*
+ * we have special iteration for case in which we would like to
+ * configure stubbed MME's ARC/QMAN
+ */
+ for (i = 0 ; i < NUM_OF_DCORES * NUM_OF_MME_PER_DCORE ; i++) {
+ /* MME QM */
+ rc |= hl_init_pb_single_dcore(hdev, (DCORE_OFFSET * i),
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_mme_qm,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_qm),
+ gaudi2_pb_dcr0_mme_qm_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_qm_unsecured_regs));
+
+ /* ARC MME */
+ rc |= hl_init_pb_ranges_single_dcore(hdev, (DCORE_OFFSET * i),
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_mme_arc,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_arc),
+ gaudi2_pb_dcr0_mme_arc_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_arc_unsecured_regs));
+ }
+
+ /* MME QM ARC ACP ENG */
+ rc |= hl_init_pb_ranges_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_mme_qm_arc_acp_eng,
+ ARRAY_SIZE(gaudi2_pb_mme_qm_arc_acp_eng),
+ gaudi2_pb_mme_qm_arc_acp_eng_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_mme_qm_arc_acp_eng_unsecured_regs),
+ (BIT(NUM_OF_DCORES * NUM_OF_MME_PER_DCORE) - 1));
+
+ /* TPC */
+ rc |= gaudi2_init_pb_tpc(hdev);
+ rc |= gaudi2_init_pb_tpc_arc(hdev);
+
+ /* SRAM */
+ instance_offset = mmDCORE0_SRAM1_BANK_BASE - mmDCORE0_SRAM0_BANK_BASE;
+ rc |= hl_init_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, 8, instance_offset,
+ gaudi2_pb_dcr0_sram0, ARRAY_SIZE(gaudi2_pb_dcr0_sram0),
+ NULL, HL_PB_NA);
+
+ /* Sync Manager MSTR IF */
+ rc |= hl_init_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_sm_mstr_if,
+ ARRAY_SIZE(gaudi2_pb_dcr0_sm_mstr_if),
+ NULL, HL_PB_NA);
+
+ /* Sync Manager GLBL */
+
+ /* Unsecure all CQ registers */
+ rc |= hl_init_pb_ranges(hdev, NUM_OF_DCORES, DCORE_OFFSET,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_sm_glbl,
+ ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl),
+ gaudi2_pb_dcr_x_sm_glbl_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr_x_sm_glbl_unsecured_regs));
+
+ /* Secure Dcore0 CQ0 registers */
+ rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_sm_glbl,
+ ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl),
+ gaudi2_pb_dcr0_sm_glbl_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl_unsecured_regs));
+
+ /* PSOC.
+ * Except for PSOC_GLOBAL_CONF, skip when security is enabled in F/W, because the blocks are
+ * protected by privileged RR.
+ */
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_psoc_global_conf, ARRAY_SIZE(gaudi2_pb_psoc_global_conf),
+ NULL, HL_PB_NA);
+
+ if (!hdev->asic_prop.fw_security_enabled)
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_psoc, ARRAY_SIZE(gaudi2_pb_psoc),
+ NULL, HL_PB_NA);
+
+ /* PMMU */
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_pmmu, ARRAY_SIZE(gaudi2_pb_pmmu),
+ NULL, HL_PB_NA);
+
+ /* PLL.
+ * Skip PSOC/XFT PLL when security is enabled in F/W, because these blocks are protected by
+ * privileged RR.
+ */
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_pmmu_pll, ARRAY_SIZE(gaudi2_pb_pmmu_pll),
+ NULL, HL_PB_NA);
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_xbar_pll, ARRAY_SIZE(gaudi2_pb_xbar_pll),
+ NULL, HL_PB_NA);
+
+ if (!hdev->asic_prop.fw_security_enabled) {
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_psoc_pll, ARRAY_SIZE(gaudi2_pb_psoc_pll),
+ NULL, HL_PB_NA);
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_xft_pll, ARRAY_SIZE(gaudi2_pb_xft_pll),
+ NULL, HL_PB_NA);
+ }
+
+ /* PCIE */
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_pcie, ARRAY_SIZE(gaudi2_pb_pcie),
+ NULL, HL_PB_NA);
+
+ /* Thermal Sensor.
+ * Skip when security is enabled in F/W, because the blocks are protected by privileged RR.
+ */
+ if (!hdev->asic_prop.fw_security_enabled) {
+ instance_offset = mmDCORE1_XFT_BASE - mmDCORE0_XFT_BASE;
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA, 4, instance_offset,
+ gaudi2_pb_thermal_sensor0,
+ ARRAY_SIZE(gaudi2_pb_thermal_sensor0), NULL, HL_PB_NA);
+ }
+
+ /* HBM */
+ /* Temporarily skip until SW-63348 is solved
+ * instance_offset = mmHBM1_MC0_BASE - mmHBM0_MC0_BASE;
+ * rc |= hl_init_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, GAUDI2_HBM_NUM,
+ * instance_offset, gaudi2_pb_hbm,
+ * ARRAY_SIZE(gaudi2_pb_hbm), NULL, HL_PB_NA,
+ * prop->dram_enabled_mask);
+ */
+
+ /* Scheduler ARCs */
+ instance_offset = mmARC_FARM_ARC1_AUX_BASE - mmARC_FARM_ARC0_AUX_BASE;
+ rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA,
+ NUM_OF_ARC_FARMS_ARC,
+ instance_offset, gaudi2_pb_arc_sched,
+ ARRAY_SIZE(gaudi2_pb_arc_sched),
+ gaudi2_pb_arc_sched_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_arc_sched_unsecured_regs));
+
+ /* XBAR MIDs */
+ instance_offset = mmXBAR_MID_1_BASE - mmXBAR_MID_0_BASE;
+ rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_XBAR,
+ instance_offset, gaudi2_pb_xbar_mid,
+ ARRAY_SIZE(gaudi2_pb_xbar_mid),
+ gaudi2_pb_xbar_mid_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_xbar_mid_unsecured_regs));
+
+ /* XBAR EDGEs */
+ instance_offset = mmXBAR_EDGE_1_BASE - mmXBAR_EDGE_0_BASE;
+ rc |= hl_init_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_XBAR,
+ instance_offset, gaudi2_pb_xbar_edge,
+ ARRAY_SIZE(gaudi2_pb_xbar_edge),
+ gaudi2_pb_xbar_edge_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_xbar_edge_unsecured_regs),
+ prop->xbar_edge_enabled_mask);
+
+ /* NIC */
+ rc |= hl_init_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_nic0, ARRAY_SIZE(gaudi2_pb_nic0),
+ NULL, HL_PB_NA, hdev->nic_ports_mask);
+
+ /* NIC QM and QPC */
+ rc |= hl_init_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET,
+ NIC_NUMBER_OF_QM_PER_MACRO, NIC_QM_OFFSET,
+ gaudi2_pb_nic0_qm_qpc, ARRAY_SIZE(gaudi2_pb_nic0_qm_qpc),
+ gaudi2_pb_nic0_qm_qpc_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_nic0_qm_qpc_unsecured_regs),
+ hdev->nic_ports_mask);
+
+ /* NIC QM ARC */
+ rc |= hl_init_pb_ranges_with_mask(hdev, NIC_NUMBER_OF_MACROS,
+ NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO, NIC_QM_OFFSET,
+ gaudi2_pb_nic0_qm_arc_aux0,
+ ARRAY_SIZE(gaudi2_pb_nic0_qm_arc_aux0),
+ gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs),
+ hdev->nic_ports_mask);
+
+ /* NIC UMR */
+ rc |= hl_init_pb_ranges_with_mask(hdev, NIC_NUMBER_OF_MACROS,
+ NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO, NIC_QM_OFFSET,
+ gaudi2_pb_nic0_umr,
+ ARRAY_SIZE(gaudi2_pb_nic0_umr),
+ gaudi2_pb_nic0_umr_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_nic0_umr_unsecured_regs),
+ hdev->nic_ports_mask);
+
+ /* Rotators */
+ instance_offset = mmROT1_BASE - mmROT0_BASE;
+ rc |= hl_init_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_ROT,
+ instance_offset, gaudi2_pb_rot0,
+ ARRAY_SIZE(gaudi2_pb_rot0),
+ gaudi2_pb_rot0_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_rot0_unsecured_regs),
+ (BIT(NUM_OF_ROT) - 1));
+
+ /* Rotators ARCS */
+ rc |= hl_init_pb_ranges_with_mask(hdev, HL_PB_SHARED,
+ HL_PB_NA, NUM_OF_ROT, instance_offset,
+ gaudi2_pb_rot0_arc, ARRAY_SIZE(gaudi2_pb_rot0_arc),
+ gaudi2_pb_rot0_arc_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_rot0_arc_unsecured_regs),
+ (BIT(NUM_OF_ROT) - 1));
+
+ rc |= gaudi2_init_pb_sm_objs(hdev);
+
+ return rc;
+}
+
+/**
+ * gaudi2_init_security - Initialize security model
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Initialize the security model of the device
+ * That includes range registers and protection bit per register.
+ */
+int gaudi2_init_security(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = gaudi2_init_protection_bits(hdev);
+ if (rc)
+ return rc;
+
+ gaudi2_init_range_registers(hdev);
+
+ return 0;
+}
+
+struct gaudi2_ack_pb_tpc_data {
+ u32 tpc_regs_array_size;
+ u32 arc_tpc_regs_array_size;
+};
+
+static void gaudi2_ack_pb_tpc_config(struct hl_device *hdev, int dcore, int inst, u32 offset,
+ void *data)
+{
+ struct gaudi2_ack_pb_tpc_data *pb_data = (struct gaudi2_ack_pb_tpc_data *)data;
+
+ hl_ack_pb_single_dcore(hdev, offset, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_tpc0, pb_data->tpc_regs_array_size);
+
+ hl_ack_pb_single_dcore(hdev, offset, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_tpc0_arc, pb_data->arc_tpc_regs_array_size);
+}
+
+static void gaudi2_ack_pb_tpc(struct hl_device *hdev)
+{
+ struct iterate_module_ctx tpc_iter = {
+ .fn = &gaudi2_ack_pb_tpc_config,
+ };
+ struct gaudi2_ack_pb_tpc_data data;
+
+ data.tpc_regs_array_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0);
+ data.arc_tpc_regs_array_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc);
+ tpc_iter.data = &data;
+
+ gaudi2_iterate_tpcs(hdev, &tpc_iter);
+}
+
+/**
+ * gaudi2_ack_protection_bits_errors - scan all blocks having protection bits
+ * and for every protection error found, display the appropriate error message
+ * and clear the error.
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * All protection bits are 1 by default, means not protected. Need to set to 0
+ * each bit that belongs to a protected register.
+ *
+ */
+void gaudi2_ack_protection_bits_errors(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 instance_offset;
+ u8 i;
+
+ /* SFT */
+ instance_offset = mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE - mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE;
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, 4, instance_offset,
+ gaudi2_pb_sft0, ARRAY_SIZE(gaudi2_pb_sft0));
+
+ /* HIF */
+ instance_offset = mmDCORE0_HIF1_BASE - mmDCORE0_HIF0_BASE;
+ hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
+ NUM_OF_HIF_PER_DCORE, instance_offset,
+ gaudi2_pb_dcr0_hif, ARRAY_SIZE(gaudi2_pb_dcr0_hif),
+ prop->hmmu_hif_enabled_mask);
+
+ /* RTR */
+ instance_offset = mmDCORE0_RTR1_CTRL_BASE - mmDCORE0_RTR0_CTRL_BASE;
+ hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, 8, instance_offset,
+ gaudi2_pb_dcr0_rtr0, ARRAY_SIZE(gaudi2_pb_dcr0_rtr0));
+
+ /* HMMU */
+ hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
+ NUM_OF_HMMU_PER_DCORE, DCORE_HMMU_OFFSET,
+ gaudi2_pb_dcr0_hmmu0, ARRAY_SIZE(gaudi2_pb_dcr0_hmmu0),
+ prop->hmmu_hif_enabled_mask);
+
+ /* CPU.
+ * Except for CPU_IF, skip when security is enabled in F/W, because the blocks are protected
+ * by privileged RR.
+ */
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_cpu_if, ARRAY_SIZE(gaudi2_pb_cpu_if));
+ if (!hdev->asic_prop.fw_security_enabled)
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_cpu, ARRAY_SIZE(gaudi2_pb_cpu));
+
+ /* KDMA */
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_kdma, ARRAY_SIZE(gaudi2_pb_kdma));
+
+ /* PDMA */
+ instance_offset = mmPDMA1_CORE_BASE - mmPDMA0_CORE_BASE;
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, 2, instance_offset,
+ gaudi2_pb_pdma0, ARRAY_SIZE(gaudi2_pb_pdma0));
+
+ /* ARC PDMA */
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, 2, instance_offset,
+ gaudi2_pb_pdma0_arc, ARRAY_SIZE(gaudi2_pb_pdma0_arc));
+
+ /* EDMA */
+ instance_offset = mmDCORE0_EDMA1_CORE_BASE - mmDCORE0_EDMA0_CORE_BASE;
+ hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET, 2,
+ instance_offset, gaudi2_pb_dcr0_edma0,
+ ARRAY_SIZE(gaudi2_pb_dcr0_edma0),
+ prop->edma_enabled_mask);
+
+ /* ARC EDMA */
+ hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET, 2,
+ instance_offset, gaudi2_pb_dcr0_edma0_arc,
+ ARRAY_SIZE(gaudi2_pb_dcr0_edma0_arc),
+ prop->edma_enabled_mask);
+
+ /* MME */
+ instance_offset = mmDCORE0_MME_SBTE1_BASE - mmDCORE0_MME_SBTE0_BASE;
+
+ for (i = 0 ; i < NUM_OF_DCORES * NUM_OF_MME_PER_DCORE ; i++) {
+ /* MME SBTE */
+ hl_ack_pb_single_dcore(hdev, (DCORE_OFFSET * i), 5,
+ instance_offset, gaudi2_pb_dcr0_mme_sbte,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_sbte));
+
+ /* MME */
+ hl_ack_pb_single_dcore(hdev, (DCORE_OFFSET * i),
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_mme_eng,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_eng));
+ }
+
+ /*
+ * we have special iteration for case in which we would like to
+ * configure stubbed MME's ARC/QMAN
+ */
+ for (i = 0 ; i < NUM_OF_DCORES * NUM_OF_MME_PER_DCORE ; i++) {
+ /* MME QM */
+ hl_ack_pb_single_dcore(hdev, (DCORE_OFFSET * i),
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_mme_qm,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_qm));
+
+ /* ARC MME */
+ hl_ack_pb_single_dcore(hdev, (DCORE_OFFSET * i),
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_mme_arc,
+ ARRAY_SIZE(gaudi2_pb_dcr0_mme_arc));
+ }
+
+ /* MME QM ARC ACP ENG */
+ hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_mme_qm_arc_acp_eng,
+ ARRAY_SIZE(gaudi2_pb_mme_qm_arc_acp_eng),
+ (BIT(NUM_OF_DCORES * NUM_OF_MME_PER_DCORE) - 1));
+
+ /* TPC */
+ gaudi2_ack_pb_tpc(hdev);
+
+ /* SRAM */
+ instance_offset = mmDCORE0_SRAM1_BANK_BASE - mmDCORE0_SRAM0_BANK_BASE;
+ hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, 8, instance_offset,
+ gaudi2_pb_dcr0_sram0, ARRAY_SIZE(gaudi2_pb_dcr0_sram0));
+
+ /* Sync Manager MSTR IF */
+ hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_sm_mstr_if, ARRAY_SIZE(gaudi2_pb_dcr0_sm_mstr_if));
+
+ /* Sync Manager */
+ hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_sm_glbl, ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl));
+
+ hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr0_sm_mstr_if, ARRAY_SIZE(gaudi2_pb_dcr0_sm_mstr_if));
+
+ /* PSOC.
+ * Except for PSOC_GLOBAL_CONF, skip when security is enabled in F/W, because the blocks are
+ * protected by privileged RR.
+ */
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_psoc_global_conf, ARRAY_SIZE(gaudi2_pb_psoc_global_conf));
+ if (!hdev->asic_prop.fw_security_enabled)
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_psoc, ARRAY_SIZE(gaudi2_pb_psoc));
+
+ /* PMMU */
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_pmmu, ARRAY_SIZE(gaudi2_pb_pmmu));
+
+ /* PLL.
+ * Skip PSOC/XFT PLL when security is enabled in F/W, because these blocks are protected by
+ * privileged RR.
+ */
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_pmmu_pll, ARRAY_SIZE(gaudi2_pb_pmmu_pll));
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_xbar_pll, ARRAY_SIZE(gaudi2_pb_xbar_pll));
+ if (!hdev->asic_prop.fw_security_enabled) {
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_psoc_pll, ARRAY_SIZE(gaudi2_pb_psoc_pll));
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_xft_pll, ARRAY_SIZE(gaudi2_pb_xft_pll));
+ }
+
+ /* PCIE */
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_pcie, ARRAY_SIZE(gaudi2_pb_pcie));
+
+ /* Thermal Sensor.
+ * Skip when security is enabled in F/W, because the blocks are protected by privileged RR.
+ */
+ if (!hdev->asic_prop.fw_security_enabled) {
+ instance_offset = mmDCORE1_XFT_BASE - mmDCORE0_XFT_BASE;
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, 4, instance_offset,
+ gaudi2_pb_thermal_sensor0, ARRAY_SIZE(gaudi2_pb_thermal_sensor0));
+ }
+
+ /* HBM */
+ instance_offset = mmHBM1_MC0_BASE - mmHBM0_MC0_BASE;
+ hl_ack_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, GAUDI2_HBM_NUM,
+ instance_offset, gaudi2_pb_hbm,
+ ARRAY_SIZE(gaudi2_pb_hbm), prop->dram_enabled_mask);
+
+ /* Scheduler ARCs */
+ instance_offset = mmARC_FARM_ARC1_AUX_BASE - mmARC_FARM_ARC0_AUX_BASE;
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_ARC_FARMS_ARC,
+ instance_offset, gaudi2_pb_arc_sched,
+ ARRAY_SIZE(gaudi2_pb_arc_sched));
+
+ /* XBAR MIDs */
+ instance_offset = mmXBAR_MID_1_BASE - mmXBAR_MID_0_BASE;
+ hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_XBAR,
+ instance_offset, gaudi2_pb_xbar_mid,
+ ARRAY_SIZE(gaudi2_pb_xbar_mid));
+
+ /* XBAR EDGEs */
+ instance_offset = mmXBAR_EDGE_1_BASE - mmXBAR_EDGE_0_BASE;
+ hl_ack_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_XBAR,
+ instance_offset, gaudi2_pb_xbar_edge,
+ ARRAY_SIZE(gaudi2_pb_xbar_edge), prop->xbar_edge_enabled_mask);
+
+ /* NIC */
+ hl_ack_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_nic0, ARRAY_SIZE(gaudi2_pb_nic0), hdev->nic_ports_mask);
+
+ /* NIC QM and QPC */
+ hl_ack_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO,
+ NIC_QM_OFFSET, gaudi2_pb_nic0_qm_qpc, ARRAY_SIZE(gaudi2_pb_nic0_qm_qpc),
+ hdev->nic_ports_mask);
+
+ /* NIC QM ARC */
+ hl_ack_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO,
+ NIC_QM_OFFSET, gaudi2_pb_nic0_qm_arc_aux0,
+ ARRAY_SIZE(gaudi2_pb_nic0_qm_arc_aux0), hdev->nic_ports_mask);
+
+ /* NIC UMR */
+ hl_ack_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO,
+ NIC_QM_OFFSET, gaudi2_pb_nic0_umr, ARRAY_SIZE(gaudi2_pb_nic0_umr),
+ hdev->nic_ports_mask);
+
+ /* Rotators */
+ instance_offset = mmROT1_BASE - mmROT0_BASE;
+ hl_ack_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_ROT, instance_offset,
+ gaudi2_pb_rot0, ARRAY_SIZE(gaudi2_pb_rot0), (BIT(NUM_OF_ROT) - 1));
+
+ /* Rotators ARCS */
+ hl_ack_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_ROT, instance_offset,
+ gaudi2_pb_rot0_arc, ARRAY_SIZE(gaudi2_pb_rot0_arc), (BIT(NUM_OF_ROT) - 1));
+}
+
+/*
+ * Print PB security errors
+ */
+
+void gaudi2_pb_print_security_errors(struct hl_device *hdev, u32 block_addr, u32 cause,
+ u32 offended_addr)
+{
+ int i = 0;
+ const char *error_format =
+ "Security error at block 0x%x, offending address 0x%x\n"
+ "Cause 0x%x: %s %s %s %s %s %s %s %s\n";
+ char *mcause[8] = {"Unknown", "", "", "", "", "", "", "" };
+
+ if (!cause)
+ return;
+
+ if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_RD)
+ mcause[i++] = "APB_PRIV_RD";
+
+ if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_SEC_RD)
+ mcause[i++] = "APB_SEC_RD";
+
+ if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_RD)
+ mcause[i++] = "APB_UNMAPPED_RD";
+
+ if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_WR)
+ mcause[i++] = "APB_PRIV_WR";
+
+ if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_SEC_WR)
+ mcause[i++] = "APB_SEC_WR";
+
+ if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_WR)
+ mcause[i++] = "APB_UNMAPPED_WR";
+
+ if (cause & SPECIAL_GLBL_ERR_CAUSE_EXT_SEC_WR)
+ mcause[i++] = "EXT_SEC_WR";
+
+ if (cause & SPECIAL_GLBL_ERR_CAUSE_EXT_UNMAPPED_WR)
+ mcause[i++] = "APB_EXT_UNMAPPED_WR";
+
+ dev_err_ratelimited(hdev->dev, error_format, block_addr, offended_addr,
+ cause, mcause[0], mcause[1], mcause[2], mcause[3],
+ mcause[4], mcause[5], mcause[6], mcause[7]);
+}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 4cde505a7416..db4487c33582 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -389,11 +389,12 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
}
+ prop->cfg_base_address = CFG_BASE;
prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->host_base_address = HOST_PHYS_BASE;
prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
-
+ prop->completion_mode = HL_COMPLETION_MODE_JOB;
prop->dram_base_address = DRAM_PHYS_BASE;
prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
prop->dram_end_address = prop->dram_base_address + prop->dram_size;
@@ -470,7 +471,7 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->max_pending_cs = GOYA_MAX_PENDING_CS;
- prop->first_available_user_msix_interrupt = USHRT_MAX;
+ prop->first_available_user_interrupt = USHRT_MAX;
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
@@ -608,6 +609,7 @@ static int goya_early_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
+ resource_size_t pci_bar_size;
u32 fw_boot_status, val;
int rc;
@@ -618,24 +620,20 @@ static int goya_early_init(struct hl_device *hdev)
}
/* Check BAR sizes */
- if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
- dev_err(hdev->dev,
- "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
- SRAM_CFG_BAR_ID,
- (unsigned long long) pci_resource_len(pdev,
- SRAM_CFG_BAR_ID),
- CFG_BAR_SIZE);
+ pci_bar_size = pci_resource_len(pdev, SRAM_CFG_BAR_ID);
+
+ if (pci_bar_size != CFG_BAR_SIZE) {
+ dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
+ SRAM_CFG_BAR_ID, &pci_bar_size, CFG_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
- if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
- dev_err(hdev->dev,
- "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
- MSIX_BAR_ID,
- (unsigned long long) pci_resource_len(pdev,
- MSIX_BAR_ID),
- MSIX_BAR_SIZE);
+ pci_bar_size = pci_resource_len(pdev, MSIX_BAR_ID);
+
+ if (pci_bar_size != MSIX_BAR_SIZE) {
+ dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
+ MSIX_BAR_ID, &pci_bar_size, MSIX_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
@@ -667,11 +665,7 @@ pci_init:
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
- rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
- mmCPU_BOOT_DEV_STS0,
- mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
- mmCPU_BOOT_ERR1,
- GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
+ rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
hdev->asic_funcs->hw_fini(hdev, true, false);
@@ -679,8 +673,7 @@ pci_init:
}
if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
+ dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
hdev->asic_funcs->hw_fini(hdev, true, false);
}
@@ -894,7 +887,7 @@ int goya_late_init(struct hl_device *hdev)
*/
WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
- rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
if (rc) {
dev_err(hdev->dev,
"Failed to enable PCI access from CPU %d\n", rc);
@@ -1012,11 +1005,9 @@ static int goya_sw_init(struct hl_device *hdev)
goto free_goya_device;
}
- hdev->cpu_accessible_dma_mem =
- hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
- HL_CPU_ACCESSIBLE_MEM_SIZE,
- &hdev->cpu_accessible_dma_address,
- GFP_KERNEL | __GFP_ZERO);
+ hdev->cpu_accessible_dma_mem = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
+ &hdev->cpu_accessible_dma_address,
+ GFP_KERNEL | __GFP_ZERO);
if (!hdev->cpu_accessible_dma_mem) {
rc = -ENOMEM;
@@ -1046,7 +1037,7 @@ static int goya_sw_init(struct hl_device *hdev)
spin_lock_init(&goya->hw_queues_lock);
hdev->supports_coresight = true;
- hdev->asic_prop.supports_soft_reset = true;
+ hdev->asic_prop.supports_compute_reset = true;
hdev->asic_prop.allow_inference_soft_reset = true;
hdev->supports_wait_for_multi_cs = false;
@@ -1066,10 +1057,8 @@ static int goya_sw_init(struct hl_device *hdev)
free_cpu_accessible_dma_pool:
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
free_cpu_dma_mem:
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HL_CPU_ACCESSIBLE_MEM_SIZE,
- hdev->cpu_accessible_dma_mem,
- hdev->cpu_accessible_dma_address);
+ hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
free_dma_pool:
dma_pool_destroy(hdev->dma_pool);
free_goya_device:
@@ -1090,10 +1079,8 @@ static int goya_sw_fini(struct hl_device *hdev)
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HL_CPU_ACCESSIBLE_MEM_SIZE,
- hdev->cpu_accessible_dma_mem,
- hdev->cpu_accessible_dma_address);
+ hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
dma_pool_destroy(hdev->dma_pool);
@@ -2588,6 +2575,18 @@ static void goya_init_static_firmware_loader(struct hl_device *hdev)
static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR));
}
+static void goya_init_firmware_preload_params(struct hl_device *hdev)
+{
+ struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
+
+ pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
+ pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0;
+ pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1;
+ pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0;
+ pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1;
+ pre_fw_load->wait_for_preboot_timeout = GOYA_BOOT_FIT_REQ_TIMEOUT_USEC;
+}
+
static void goya_init_firmware_loader(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -2878,7 +2877,7 @@ int goya_suspend(struct hl_device *hdev)
{
int rc;
- rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -3019,7 +3018,7 @@ static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
}
-int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
+int goya_scrub_device_mem(struct hl_device *hdev)
{
return 0;
}
@@ -3102,8 +3101,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
return -EBUSY;
}
- fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
- &fence_dma_addr);
+ fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
"Failed to allocate fence memory for QMAN0\n");
@@ -3143,8 +3141,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
}
free_fence_ptr:
- hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
- fence_dma_addr);
+ hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
goya_qman0_set_security(hdev, false);
@@ -3180,8 +3177,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
fence_val = GOYA_QMAN0_FENCE_VAL;
- fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
- &fence_dma_addr);
+ fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
"Failed to allocate memory for H/W queue %d testing\n",
@@ -3191,9 +3187,8 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
*fence_ptr = 0;
- fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
- sizeof(struct packet_msg_prot),
- GFP_KERNEL, &pkt_dma_addr);
+ fence_pkt = hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_prot), GFP_KERNEL,
+ &pkt_dma_addr);
if (!fence_pkt) {
dev_err(hdev->dev,
"Failed to allocate packet for H/W queue %d testing\n",
@@ -3232,11 +3227,9 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
}
free_pkt:
- hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
- pkt_dma_addr);
+ hl_asic_dma_pool_free(hdev, (void *) fence_pkt, pkt_dma_addr);
free_fence_ptr:
- hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
- fence_dma_addr);
+ hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
return rc;
}
@@ -3403,7 +3396,7 @@ static int goya_validate_dma_pkt_host(struct hl_device *hdev,
{
u64 device_memory_addr, addr;
enum dma_data_direction dir;
- enum goya_dma_direction user_dir;
+ enum hl_goya_dma_direction user_dir;
bool sram_addr = true;
bool skip_host_mem_pin = false;
bool user_memset;
@@ -3419,7 +3412,7 @@ static int goya_validate_dma_pkt_host(struct hl_device *hdev,
GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
switch (user_dir) {
- case DMA_HOST_TO_DRAM:
+ case HL_DMA_HOST_TO_DRAM:
dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
dir = DMA_TO_DEVICE;
sram_addr = false;
@@ -3429,7 +3422,7 @@ static int goya_validate_dma_pkt_host(struct hl_device *hdev,
skip_host_mem_pin = true;
break;
- case DMA_DRAM_TO_HOST:
+ case HL_DMA_DRAM_TO_HOST:
dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
dir = DMA_FROM_DEVICE;
sram_addr = false;
@@ -3437,7 +3430,7 @@ static int goya_validate_dma_pkt_host(struct hl_device *hdev,
device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
break;
- case DMA_HOST_TO_SRAM:
+ case HL_DMA_HOST_TO_SRAM:
dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
dir = DMA_TO_DEVICE;
addr = le64_to_cpu(user_dma_pkt->src_addr);
@@ -3446,14 +3439,14 @@ static int goya_validate_dma_pkt_host(struct hl_device *hdev,
skip_host_mem_pin = true;
break;
- case DMA_SRAM_TO_HOST:
+ case HL_DMA_SRAM_TO_HOST:
dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
dir = DMA_FROM_DEVICE;
addr = le64_to_cpu(user_dma_pkt->dst_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
break;
default:
- dev_err(hdev->dev, "DMA direction is undefined\n");
+ dev_err(hdev->dev, "DMA direction %d is unsupported/undefined\n", user_dir);
return -EFAULT;
}
@@ -3505,14 +3498,14 @@ static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
struct packet_lin_dma *user_dma_pkt)
{
u64 sram_memory_addr, dram_memory_addr;
- enum goya_dma_direction user_dir;
+ enum hl_goya_dma_direction user_dir;
u32 ctl;
ctl = le32_to_cpu(user_dma_pkt->ctl);
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
- if (user_dir == DMA_DRAM_TO_SRAM) {
+ if (user_dir == HL_DMA_DRAM_TO_SRAM) {
dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
@@ -3549,7 +3542,7 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt)
{
- enum goya_dma_direction user_dir;
+ enum hl_goya_dma_direction user_dir;
u32 ctl;
int rc;
@@ -3574,7 +3567,7 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
return -EINVAL;
}
- if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
+ if ((user_dir == HL_DMA_DRAM_TO_SRAM) || (user_dir == HL_DMA_SRAM_TO_DRAM))
rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
else
rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
@@ -3781,7 +3774,7 @@ static int goya_patch_dma_packet(struct hl_device *hdev,
u32 count, dma_desc_cnt;
u64 len, len_next;
dma_addr_t dma_addr, dma_addr_next;
- enum goya_dma_direction user_dir;
+ enum hl_goya_dma_direction user_dir;
u64 device_memory_addr, addr;
enum dma_data_direction dir;
struct sg_table *sgt;
@@ -3797,14 +3790,14 @@ static int goya_patch_dma_packet(struct hl_device *hdev,
user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
- if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
+ if ((user_dir == HL_DMA_DRAM_TO_SRAM) || (user_dir == HL_DMA_SRAM_TO_DRAM) ||
(user_dma_pkt->tsize == 0)) {
memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
*new_dma_pkt_size = sizeof(*new_dma_pkt);
return 0;
}
- if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
+ if ((user_dir == HL_DMA_HOST_TO_DRAM) || (user_dir == HL_DMA_HOST_TO_SRAM)) {
addr = le64_to_cpu(user_dma_pkt->src_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
dir = DMA_TO_DEVICE;
@@ -4166,8 +4159,8 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
}
void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
- u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
- bool eb)
+ u32 len, u32 original_len, u64 cq_addr, u32 cq_val,
+ u32 msix_vec, bool eb)
{
struct packet_msg_prot *cq_pkt;
u32 tmp;
@@ -4804,7 +4797,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
(1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
(1 << GOYA_PKT_CTL_RB_SHIFT) |
(1 << GOYA_PKT_CTL_MB_SHIFT));
- ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
+ ctl |= (is_dram ? HL_DMA_HOST_TO_DRAM : HL_DMA_HOST_TO_SRAM) <<
GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
lin_dma_pkt->ctl = cpu_to_le32(ctl);
@@ -5268,6 +5261,11 @@ static int goya_ctx_init(struct hl_ctx *ctx)
return 0;
}
+static int goya_pre_schedule_cs(struct hl_cs *cs)
+{
+ return 0;
+}
+
u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
return cq_idx;
@@ -5347,6 +5345,11 @@ static void goya_enable_events_from_fw(struct hl_device *hdev)
GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
}
+static int goya_ack_mmu_page_fault_or_access_error(struct hl_device *hdev, u64 mmu_cap_mask)
+{
+ return -EINVAL;
+}
+
static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
{
switch (pll_idx) {
@@ -5417,15 +5420,13 @@ static u32 *goya_get_stream_master_qid_arr(void)
return NULL;
}
-static void goya_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info)
+static int goya_get_monitor_dump(struct hl_device *hdev, void *data)
{
- /* set 0 since multiple pages are not supported */
- info->page_order_bitmask = 0;
+ return -EOPNOTSUPP;
}
-static int goya_get_monitor_dump(struct hl_device *hdev, void *data)
+static void goya_check_if_razwi_happened(struct hl_device *hdev)
{
- return -EOPNOTSUPP;
}
static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
@@ -5461,7 +5462,6 @@ static const struct hl_asic_funcs goya_funcs = {
.hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
.cs_parser = goya_cs_parser,
.asic_dma_map_sgtable = hl_dma_map_sgtable,
- .get_dma_desc_list_size = goya_get_dma_desc_list_size,
.add_end_of_cb_packets = goya_add_end_of_cb_packets,
.update_eq_ci = goya_update_eq_ci,
.context_switch = goya_context_switch,
@@ -5481,6 +5481,8 @@ static const struct hl_asic_funcs goya_funcs = {
.non_hard_reset_late_init = goya_non_hard_reset_late_init,
.hw_queues_lock = goya_hw_queues_lock,
.hw_queues_unlock = goya_hw_queues_unlock,
+ .kdma_lock = NULL,
+ .kdma_unlock = NULL,
.get_pci_id = goya_get_pci_id,
.get_eeprom_data = goya_get_eeprom_data,
.get_monitor_dump = goya_get_monitor_dump,
@@ -5492,6 +5494,7 @@ static const struct hl_asic_funcs goya_funcs = {
.halt_coresight = goya_halt_coresight,
.ctx_init = goya_ctx_init,
.ctx_fini = goya_ctx_fini,
+ .pre_schedule_cs = goya_pre_schedule_cs,
.get_queue_id_for_cq = goya_get_queue_id_for_cq,
.load_firmware_to_device = goya_load_firmware_to_device,
.load_boot_fit_to_device = goya_load_boot_fit_to_device,
@@ -5502,24 +5505,27 @@ static const struct hl_asic_funcs goya_funcs = {
.reset_sob = goya_reset_sob,
.reset_sob_group = goya_reset_sob_group,
.get_device_time = goya_get_device_time,
+ .pb_print_security_errors = NULL,
.collective_wait_init_cs = goya_collective_wait_init_cs,
.collective_wait_create_jobs = goya_collective_wait_create_jobs,
+ .get_dec_base_addr = NULL,
.scramble_addr = hl_mmu_scramble_addr,
.descramble_addr = hl_mmu_descramble_addr,
.ack_protection_bits_errors = goya_ack_protection_bits_errors,
.get_hw_block_id = goya_get_hw_block_id,
.hw_block_mmap = goya_block_mmap,
.enable_events_from_fw = goya_enable_events_from_fw,
+ .ack_mmu_errors = goya_ack_mmu_page_fault_or_access_error,
.map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx,
+ .init_firmware_preload_params = goya_init_firmware_preload_params,
.init_firmware_loader = goya_init_firmware_loader,
.init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram,
.state_dump_init = goya_state_dump_init,
.get_sob_addr = &goya_get_sob_addr,
.set_pci_memory_regions = goya_set_pci_memory_regions,
.get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
- .is_valid_dram_page_size = NULL,
+ .check_if_razwi_happened = goya_check_if_razwi_happened,
.mmu_get_real_page_size = hl_mmu_get_real_page_size,
- .get_valid_dram_page_orders = goya_get_valid_dram_page_orders,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = goya_set_ddr_bar_base,
};
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 647f57402616..d6ec43d6f6b0 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -230,10 +230,10 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size);
void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
- u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
- bool eb);
+ u32 len, u32 original_len, u64 cq_addr, u32 cq_val,
+ u32 msix_vec, bool eb);
int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
-int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size);
+int goya_scrub_device_mem(struct hl_device *hdev);
void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len);
u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt);
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 6580fc6a486a..b595721751c1 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -359,6 +359,7 @@ static struct attribute *goya_clk_dev_attrs[] = {
&dev_attr_pm_mng_profile.attr,
&dev_attr_tpc_clk.attr,
&dev_attr_tpc_clk_curr.attr,
+ NULL,
};
static ssize_t infineon_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -375,6 +376,7 @@ static DEVICE_ATTR_RO(infineon_ver);
static struct attribute *goya_vrm_dev_attrs[] = {
&dev_attr_infineon_ver.attr,
+ NULL,
};
void goya_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 38e44b6cf581..abf40e1c4965 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2020-2021 HabanaLabs, Ltd.
+ * Copyright 2020-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -68,7 +68,8 @@ struct hl_eq_ecc_data {
__le64 ecc_address;
__le64 ecc_syndrom;
__u8 memory_wrapper_idx;
- __u8 pad[7];
+ __u8 is_critical;
+ __u8 pad[6];
};
enum hl_sm_sei_cause {
@@ -98,27 +99,265 @@ struct hl_eq_fw_alive {
__u8 pad[7];
};
-enum hl_pcie_addr_dec_cause {
- PCIE_ADDR_DEC_HBW_ERR_RESP,
- PCIE_ADDR_DEC_LBW_ERR_RESP,
- PCIE_ADDR_DEC_TLP_BLOCKED_BY_RR
+struct hl_eq_intr_cause {
+ __le64 intr_cause_data;
};
-struct hl_eq_pcie_addr_dec_data {
- /* enum hl_pcie_addr_dec_cause */
- __u8 addr_dec_cause;
- __u8 pad[7];
+struct hl_eq_pcie_drain_ind_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 drain_wr_addr_lbw;
+ __le64 drain_rd_addr_lbw;
+ __le64 drain_wr_addr_hbw;
+ __le64 drain_rd_addr_hbw;
+};
+
+struct hl_eq_razwi_lbw_info_regs {
+ __le32 rr_aw_razwi_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+struct hl_eq_razwi_hbw_info_regs {
+ __le32 rr_aw_razwi_hi_reg;
+ __le32 rr_aw_razwi_lo_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_hi_reg;
+ __le32 rr_ar_razwi_lo_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+/* razwi_happened masks */
+#define RAZWI_HAPPENED_HBW 0x1
+#define RAZWI_HAPPENED_LBW 0x2
+#define RAZWI_HAPPENED_AW 0x4
+#define RAZWI_HAPPENED_AR 0x8
+
+struct hl_eq_razwi_info {
+ __le32 razwi_happened_mask;
+ union {
+ struct hl_eq_razwi_lbw_info_regs lbw;
+ struct hl_eq_razwi_hbw_info_regs hbw;
+ };
+ __le32 pad;
+};
+
+struct hl_eq_razwi_with_intr_cause {
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_intr_cause intr_cause;
+};
+
+#define HBM_CA_ERR_CMD_LIFO_LEN 8
+#define HBM_RD_ERR_DATA_LIFO_LEN 8
+#define HBM_WR_PAR_CMD_LIFO_LEN 11
+
+enum hl_hbm_sei_cause {
+ /* Command/address parity error event is split into 2 events due to
+ * size limitation: ODD suffix for odd HBM CK_t cycles and EVEN suffix
+ * for even HBM CK_t cycles
+ */
+ HBM_SEI_CMD_PARITY_EVEN,
+ HBM_SEI_CMD_PARITY_ODD,
+ /* Read errors can be reflected as a combination of SERR/DERR/parity
+ * errors. Therefore, we define one event for all read error types.
+ * LKD will perform further proccessing.
+ */
+ HBM_SEI_READ_ERR,
+ HBM_SEI_WRITE_DATA_PARITY_ERR,
+ HBM_SEI_CATTRIP,
+ HBM_SEI_MEM_BIST_FAIL,
+ HBM_SEI_DFI,
+ HBM_SEI_INV_TEMP_READ_OUT,
+ HBM_SEI_BIST_FAIL,
+};
+
+/* Masks for parsing hl_hbm_sei_headr fields */
+#define HBM_ECC_SERR_CNTR_MASK 0xFF
+#define HBM_ECC_DERR_CNTR_MASK 0xFF00
+#define HBM_RD_PARITY_CNTR_MASK 0xFF0000
+
+/* HBM index and MC index are known by the event_id */
+struct hl_hbm_sei_header {
+ union {
+ /* relevant only in case of HBM read error */
+ struct {
+ __u8 ecc_serr_cnt;
+ __u8 ecc_derr_cnt;
+ __u8 read_par_cnt;
+ __u8 reserved;
+ };
+ /* All other cases */
+ __le32 cnt;
+ };
+ __u8 sei_cause; /* enum hl_hbm_sei_cause */
+ __u8 mc_channel; /* range: 0-3 */
+ __u8 mc_pseudo_channel; /* range: 0-7 */
+ __u8 is_critical;
+};
+
+#define HBM_RD_ADDR_SID_SHIFT 0
+#define HBM_RD_ADDR_SID_MASK 0x1
+#define HBM_RD_ADDR_BG_SHIFT 1
+#define HBM_RD_ADDR_BG_MASK 0x6
+#define HBM_RD_ADDR_BA_SHIFT 3
+#define HBM_RD_ADDR_BA_MASK 0x18
+#define HBM_RD_ADDR_COL_SHIFT 5
+#define HBM_RD_ADDR_COL_MASK 0x7E0
+#define HBM_RD_ADDR_ROW_SHIFT 11
+#define HBM_RD_ADDR_ROW_MASK 0x3FFF800
+
+struct hbm_rd_addr {
+ union {
+ /* bit fields are only for FW use */
+ struct {
+ u32 dbg_rd_err_addr_sid:1;
+ u32 dbg_rd_err_addr_bg:2;
+ u32 dbg_rd_err_addr_ba:2;
+ u32 dbg_rd_err_addr_col:6;
+ u32 dbg_rd_err_addr_row:15;
+ u32 reserved:6;
+ };
+ __le32 rd_addr_val;
+ };
+};
+
+#define HBM_RD_ERR_BEAT_SHIFT 2
+/* dbg_rd_err_misc fields: */
+/* Read parity is calculated per DW on every beat */
+#define HBM_RD_ERR_PAR_ERR_BEAT0_SHIFT 0
+#define HBM_RD_ERR_PAR_ERR_BEAT0_MASK 0x3
+#define HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT 8
+#define HBM_RD_ERR_PAR_DATA_BEAT0_MASK 0x300
+/* ECC is calculated per PC on every beat */
+#define HBM_RD_ERR_SERR_BEAT0_SHIFT 16
+#define HBM_RD_ERR_SERR_BEAT0_MASK 0x10000
+#define HBM_RD_ERR_DERR_BEAT0_SHIFT 24
+#define HBM_RD_ERR_DERR_BEAT0_MASK 0x100000
+
+struct hl_eq_hbm_sei_read_err_intr_info {
+ /* DFI_RD_ERR_REP_ADDR */
+ struct hbm_rd_addr dbg_rd_err_addr;
+ /* DFI_RD_ERR_REP_ERR */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 dbg_rd_err_par:8;
+ u32 dbg_rd_err_par_data:8;
+ u32 dbg_rd_err_serr:4;
+ u32 dbg_rd_err_derr:4;
+ u32 reserved:8;
+ };
+ __le32 dbg_rd_err_misc;
+ };
+ /* DFI_RD_ERR_REP_DM */
+ __le32 dbg_rd_err_dm;
+ /* DFI_RD_ERR_REP_SYNDROME */
+ __le32 dbg_rd_err_syndrome;
+ /* DFI_RD_ERR_REP_DATA */
+ __le32 dbg_rd_err_data[HBM_RD_ERR_DATA_LIFO_LEN];
+};
+
+struct hl_eq_hbm_sei_ca_par_intr_info {
+ /* 14 LSBs */
+ __le16 dbg_row[HBM_CA_ERR_CMD_LIFO_LEN];
+ /* 18 LSBs */
+ __le32 dbg_col[HBM_CA_ERR_CMD_LIFO_LEN];
+};
+
+#define WR_PAR_LAST_CMD_COL_SHIFT 0
+#define WR_PAR_LAST_CMD_COL_MASK 0x3F
+#define WR_PAR_LAST_CMD_BG_SHIFT 6
+#define WR_PAR_LAST_CMD_BG_MASK 0xC0
+#define WR_PAR_LAST_CMD_BA_SHIFT 8
+#define WR_PAR_LAST_CMD_BA_MASK 0x300
+#define WR_PAR_LAST_CMD_SID_SHIFT 10
+#define WR_PAR_LAST_CMD_SID_MASK 0x400
+
+/* Row address isn't latched */
+struct hbm_sei_wr_cmd_address {
+ /* DFI_DERR_LAST_CMD */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 col:6;
+ u32 bg:2;
+ u32 ba:2;
+ u32 sid:1;
+ u32 reserved:21;
+ };
+ __le32 dbg_wr_cmd_addr;
+ };
+};
+
+struct hl_eq_hbm_sei_wr_par_intr_info {
+ /* entry 0: WR command address from the 1st cycle prior to the error
+ * entry 1: WR command address from the 2nd cycle prior to the error
+ * and so on...
+ */
+ struct hbm_sei_wr_cmd_address dbg_last_wr_cmds[HBM_WR_PAR_CMD_LIFO_LEN];
+ /* derr[0:1] - 1st HBM cycle DERR output
+ * derr[2:3] - 2nd HBM cycle DERR output
+ */
+ __u8 dbg_derr;
+ /* extend to reach 8B */
+ __u8 pad[3];
+};
+
+/*
+ * this struct represents the following sei causes:
+ * command parity, ECC double error, ECC single error, dfi error, cattrip,
+ * temperature read-out, read parity error and write parity error.
+ * some only use the header while some have extra data.
+ */
+struct hl_eq_hbm_sei_data {
+ struct hl_hbm_sei_header hdr;
+ union {
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_even_info;
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_odd_info;
+ struct hl_eq_hbm_sei_read_err_intr_info read_err_info;
+ struct hl_eq_hbm_sei_wr_par_intr_info wr_parity_info;
+ };
+};
+
+/* Engine/farm arc interrupt type */
+enum hl_engine_arc_interrupt_type {
+ /* Qman/farm ARC DCCM QUEUE FULL interrupt type */
+ ENGINE_ARC_DCCM_QUEUE_FULL_IRQ = 1
+};
+
+/* Data structure specifies details of payload of DCCM QUEUE FULL interrupt */
+struct hl_engine_arc_dccm_queue_full_irq {
+ /* Queue index value which caused DCCM QUEUE FULL */
+ __le32 queue_index;
+ __le32 pad;
+};
+
+/* Data structure specifies details of QM/FARM ARC interrupt */
+struct hl_eq_engine_arc_intr_data {
+ /* ARC engine id e.g. DCORE0_TPC0_QM_ARC, DCORE0_TCP1_QM_ARC */
+ __le32 engine_id;
+ __le32 intr_type; /* enum hl_engine_arc_interrupt_type */
+ /* More info related to the interrupt e.g. queue index
+ * incase of DCCM_QUEUE_FULL interrupt.
+ */
+ __le64 payload;
+ __le64 pad[5];
};
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
struct hl_eq_ecc_data ecc_data;
- struct hl_eq_hbm_ecc_data hbm_ecc_data;
+ struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Gaudi1 HBM */
struct hl_eq_sm_sei_data sm_sei_data;
struct cpucp_pkt_sync_err pkt_sync_err;
struct hl_eq_fw_alive fw_alive;
- struct hl_eq_pcie_addr_dec_data pcie_addr_dec_data;
+ struct hl_eq_intr_cause intr_cause;
+ struct hl_eq_pcie_drain_ind_data pcie_drain_ind_data;
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause;
+ struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */
+ struct hl_eq_engine_arc_intr_data arc_data;
__le64 data[7];
};
};
@@ -792,10 +1031,23 @@ struct cpucp_security_info {
* @infineon_second_stage_version: Infineon 2nd stage DC-DC version.
* @dram_size: available DRAM size.
* @card_name: card name that will be displayed in HWMON subsystem on the host
+ * @tpc_binning_mask: TPC binning mask, 1 bit per TPC instance
+ * (0 = functional, 1 = binned)
+ * @decoder_binning_mask: Decoder binning mask, 1 bit per decoder instance
+ * (0 = functional, 1 = binned), maximum 1 per dcore
+ * @sram_binning: Categorize SRAM functionality
+ * (0 = fully functional, 1 = lower-half is not functional,
+ * 2 = upper-half is not functional)
* @sec_info: security information
* @pll_map: Bit map of supported PLLs for current ASIC version.
* @mme_binning_mask: MME binning mask,
- * (0 = functional, 1 = binned)
+ * bits [0:6] <==> dcore0 mme fma
+ * bits [7:13] <==> dcore1 mme fma
+ * bits [14:20] <==> dcore0 mme ima
+ * bits [21:27] <==> dcore1 mme ima
+ * For each group, if the 6th bit is set then first 5 bits
+ * represent the col's idx [0-31], otherwise these bits are
+ * ignored, and col idx 32 is binned. 7th bit is don't care.
* @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
* (0 = functional 1 = binned)
* @memory_repair_flag: eFuse flag indicating memory repair
@@ -803,6 +1055,8 @@ struct cpucp_security_info {
* (0 = functional 1 = binned)
* @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance
* (0 = functional 1 = binned)
+ * @interposer_version: Interposer version programmed in eFuse
+ * @substrate_version: Substrate version programmed in eFuse
* @fw_os_version: Firmware OS Version
*/
struct cpucp_info {
@@ -819,16 +1073,18 @@ struct cpucp_info {
__le32 infineon_second_stage_version;
__le64 dram_size;
char card_name[CARD_NAME_MAX_LEN];
- __le64 reserved3;
- __le64 reserved4;
- __u8 reserved5;
+ __le64 tpc_binning_mask;
+ __le64 decoder_binning_mask;
+ __u8 sram_binning;
__u8 dram_binning_mask;
__u8 memory_repair_flag;
__u8 edma_binning_mask;
__u8 xbar_binning_mask;
- __u8 pad[3];
+ __u8 interposer_version;
+ __u8 substrate_version;
+ __u8 reserved2;
struct cpucp_security_info sec_info;
- __le32 reserved6;
+ __le32 reserved3;
__u8 pll_map[PLL_MAP_LEN];
__le64 mme_binning_mask;
__u8 fw_os_version[VERSION_MAX_LEN];
@@ -932,6 +1188,11 @@ struct cpucp_hbm_row_replaced_rows_info {
struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
};
+enum cpu_reset_status {
+ CPU_RST_STATUS_NA = 0,
+ CPU_RST_STATUS_SOFT_RST_DONE = 1,
+};
+
/*
* struct dcore_monitor_regs_data - DCORE monitor regs data.
* the structure follows sync manager block layout. relevant only to Gaudi.
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index 15f91ae9de6e..a3594119bc51 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -525,6 +525,13 @@ struct lkd_fw_comms_msg {
struct {
__u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
};
+ struct {
+ __le64 tpc_binning_conf;
+ __le32 dec_binning_conf;
+ __le32 hbm_binning_conf;
+ __le32 edma_binning_conf;
+ __le32 mme_redundancy_conf; /* use MME_REDUNDANT_COLUMN */
+ };
};
};
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index acc85d3ed98b..880c57b26c63 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -444,6 +444,7 @@ enum axi_id {
#define QM_ARB_ERR_MSG_EN_MASK (\
QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK |\
+ QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
diff --git a/drivers/misc/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h b/drivers/misc/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h
new file mode 100644
index 000000000000..2cf30c206ac6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 HabanaLabs Ltd.
+ * All Rights Reserved.
+ */
+
+#ifndef __GAUDI2_ARC_COMMON_PACKETS_H__
+#define __GAUDI2_ARC_COMMON_PACKETS_H__
+
+/*
+ * CPU IDs for each ARC CPUs
+ */
+
+#define CPU_ID_SCHED_ARC0 0 /* FARM_ARC0 */
+#define CPU_ID_SCHED_ARC1 1 /* FARM_ARC1 */
+#define CPU_ID_SCHED_ARC2 2 /* FARM_ARC2 */
+#define CPU_ID_SCHED_ARC3 3 /* FARM_ARC3 */
+/* Dcore1 MME Engine ARC instance used as scheduler */
+#define CPU_ID_SCHED_ARC4 4 /* DCORE1_MME0 */
+/* Dcore3 MME Engine ARC instance used as scheduler */
+#define CPU_ID_SCHED_ARC5 5 /* DCORE3_MME0 */
+
+#define CPU_ID_TPC_QMAN_ARC0 6 /* DCORE0_TPC0 */
+#define CPU_ID_TPC_QMAN_ARC1 7 /* DCORE0_TPC1 */
+#define CPU_ID_TPC_QMAN_ARC2 8 /* DCORE0_TPC2 */
+#define CPU_ID_TPC_QMAN_ARC3 9 /* DCORE0_TPC3 */
+#define CPU_ID_TPC_QMAN_ARC4 10 /* DCORE0_TPC4 */
+#define CPU_ID_TPC_QMAN_ARC5 11 /* DCORE0_TPC5 */
+#define CPU_ID_TPC_QMAN_ARC6 12 /* DCORE1_TPC0 */
+#define CPU_ID_TPC_QMAN_ARC7 13 /* DCORE1_TPC1 */
+#define CPU_ID_TPC_QMAN_ARC8 14 /* DCORE1_TPC2 */
+#define CPU_ID_TPC_QMAN_ARC9 15 /* DCORE1_TPC3 */
+#define CPU_ID_TPC_QMAN_ARC10 16 /* DCORE1_TPC4 */
+#define CPU_ID_TPC_QMAN_ARC11 17 /* DCORE1_TPC5 */
+#define CPU_ID_TPC_QMAN_ARC12 18 /* DCORE2_TPC0 */
+#define CPU_ID_TPC_QMAN_ARC13 19 /* DCORE2_TPC1 */
+#define CPU_ID_TPC_QMAN_ARC14 20 /* DCORE2_TPC2 */
+#define CPU_ID_TPC_QMAN_ARC15 21 /* DCORE2_TPC3 */
+#define CPU_ID_TPC_QMAN_ARC16 22 /* DCORE2_TPC4 */
+#define CPU_ID_TPC_QMAN_ARC17 23 /* DCORE2_TPC5 */
+#define CPU_ID_TPC_QMAN_ARC18 24 /* DCORE3_TPC0 */
+#define CPU_ID_TPC_QMAN_ARC19 25 /* DCORE3_TPC1 */
+#define CPU_ID_TPC_QMAN_ARC20 26 /* DCORE3_TPC2 */
+#define CPU_ID_TPC_QMAN_ARC21 27 /* DCORE3_TPC3 */
+#define CPU_ID_TPC_QMAN_ARC22 28 /* DCORE3_TPC4 */
+#define CPU_ID_TPC_QMAN_ARC23 29 /* DCORE3_TPC5 */
+#define CPU_ID_TPC_QMAN_ARC24 30 /* DCORE0_TPC6 - Never present */
+
+#define CPU_ID_MME_QMAN_ARC0 31 /* DCORE0_MME0 */
+#define CPU_ID_MME_QMAN_ARC1 32 /* DCORE2_MME0 */
+
+#define CPU_ID_EDMA_QMAN_ARC0 33 /* DCORE0_EDMA0 */
+#define CPU_ID_EDMA_QMAN_ARC1 34 /* DCORE0_EDMA1 */
+#define CPU_ID_EDMA_QMAN_ARC2 35 /* DCORE1_EDMA0 */
+#define CPU_ID_EDMA_QMAN_ARC3 36 /* DCORE1_EDMA1 */
+#define CPU_ID_EDMA_QMAN_ARC4 37 /* DCORE2_EDMA0 */
+#define CPU_ID_EDMA_QMAN_ARC5 38 /* DCORE2_EDMA1 */
+#define CPU_ID_EDMA_QMAN_ARC6 39 /* DCORE3_EDMA0 */
+#define CPU_ID_EDMA_QMAN_ARC7 40 /* DCORE3_EDMA1 */
+
+#define CPU_ID_PDMA_QMAN_ARC0 41 /* DCORE0_PDMA0 */
+#define CPU_ID_PDMA_QMAN_ARC1 42 /* DCORE0_PDMA1 */
+
+#define CPU_ID_ROT_QMAN_ARC0 43 /* ROT0 */
+#define CPU_ID_ROT_QMAN_ARC1 44 /* ROT1 */
+
+#define CPU_ID_NIC_QMAN_ARC0 45 /* NIC0_0 */
+#define CPU_ID_NIC_QMAN_ARC1 46 /* NIC0_1 */
+#define CPU_ID_NIC_QMAN_ARC2 47 /* NIC1_0 */
+#define CPU_ID_NIC_QMAN_ARC3 48 /* NIC1_1 */
+#define CPU_ID_NIC_QMAN_ARC4 49 /* NIC2_0 */
+#define CPU_ID_NIC_QMAN_ARC5 50 /* NIC2_1 */
+#define CPU_ID_NIC_QMAN_ARC6 51 /* NIC3_0 */
+#define CPU_ID_NIC_QMAN_ARC7 52 /* NIC3_1 */
+#define CPU_ID_NIC_QMAN_ARC8 53 /* NIC4_0 */
+#define CPU_ID_NIC_QMAN_ARC9 54 /* NIC4_1 */
+#define CPU_ID_NIC_QMAN_ARC10 55 /* NIC5_0 */
+#define CPU_ID_NIC_QMAN_ARC11 56 /* NIC5_1 */
+#define CPU_ID_NIC_QMAN_ARC12 57 /* NIC6_0 */
+#define CPU_ID_NIC_QMAN_ARC13 58 /* NIC6_1 */
+#define CPU_ID_NIC_QMAN_ARC14 59 /* NIC7_0 */
+#define CPU_ID_NIC_QMAN_ARC15 60 /* NIC7_1 */
+#define CPU_ID_NIC_QMAN_ARC16 61 /* NIC8_0 */
+#define CPU_ID_NIC_QMAN_ARC17 62 /* NIC8_1 */
+#define CPU_ID_NIC_QMAN_ARC18 63 /* NIC9_0 */
+#define CPU_ID_NIC_QMAN_ARC19 64 /* NIC9_1 */
+#define CPU_ID_NIC_QMAN_ARC20 65 /* NIC10_0 */
+#define CPU_ID_NIC_QMAN_ARC21 66 /* NIC10_1 */
+#define CPU_ID_NIC_QMAN_ARC22 67 /* NIC11_0 */
+#define CPU_ID_NIC_QMAN_ARC23 68 /* NIC11_1 */
+
+#define CPU_ID_MAX 69
+#define CPU_ID_SCHED_MAX 6
+
+#define CPU_ID_ALL 0xFE
+#define CPU_ID_INVALID 0xFF
+
+enum arc_regions_t {
+ ARC_REGION0_UNSED = 0,
+ /*
+ * Extension registers
+ * None
+ */
+ ARC_REGION1_SRAM = 1,
+ /*
+ * Extension registers
+ * AUX_SRAM_LSB_ADDR
+ * AUX_SRAM_MSB_ADDR
+ * ARC Address: 0x1000_0000
+ */
+ ARC_REGION2_CFG = 2,
+ /*
+ * Extension registers
+ * AUX_CFG_LSB_ADDR
+ * AUX_CFG_MSB_ADDR
+ * ARC Address: 0x2000_0000
+ */
+ ARC_REGION3_GENERAL = 3,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_0
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_0
+ * ARC Address: 0x3000_0000
+ */
+ ARC_REGION4_HBM0_FW = 4,
+ /*
+ * Extension registers
+ * AUX_HBM0_LSB_ADDR
+ * AUX_HBM0_MSB_ADDR
+ * AUX_HBM0_OFFSET
+ * ARC Address: 0x4000_0000
+ */
+ ARC_REGION5_HBM1_GC_DATA = 5,
+ /*
+ * Extension registers
+ * AUX_HBM1_LSB_ADDR
+ * AUX_HBM1_MSB_ADDR
+ * AUX_HBM1_OFFSET
+ * ARC Address: 0x5000_0000
+ */
+ ARC_REGION6_HBM2_GC_DATA = 6,
+ /*
+ * Extension registers
+ * AUX_HBM2_LSB_ADDR
+ * AUX_HBM2_MSB_ADDR
+ * AUX_HBM2_OFFSET
+ * ARC Address: 0x6000_0000
+ */
+ ARC_REGION7_HBM3_GC_DATA = 7,
+ /*
+ * Extension registers
+ * AUX_HBM3_LSB_ADDR
+ * AUX_HBM3_MSB_ADDR
+ * AUX_HBM3_OFFSET
+ * ARC Address: 0x7000_0000
+ */
+ ARC_REGION8_DCCM = 8,
+ /*
+ * Extension registers
+ * None
+ * ARC Address: 0x8000_0000
+ */
+ ARC_REGION9_PCIE = 9,
+ /*
+ * Extension registers
+ * AUX_PCIE_LSB_ADDR
+ * AUX_PCIE_MSB_ADDR
+ * ARC Address: 0x9000_0000
+ */
+ ARC_REGION10_GENERAL = 10,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_1
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_1
+ * ARC Address: 0xA000_0000
+ */
+ ARC_REGION11_GENERAL = 11,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_2
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_2
+ * ARC Address: 0xB000_0000
+ */
+ ARC_REGION12_GENERAL = 12,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_3
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_3
+ * ARC Address: 0xC000_0000
+ */
+ ARC_REGION13_GENERAL = 13,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_4
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_4
+ * ARC Address: 0xD000_0000
+ */
+ ARC_REGION14_GENERAL = 14,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_5
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_5
+ * ARC Address: 0xE000_0000
+ */
+ ARC_REGION15_LBU = 15
+ /*
+ * Extension registers
+ * None
+ * ARC Address: 0xF000_0000
+ */
+};
+
+#endif /* __GAUDI2_ARC_COMMON_PACKETS_H__ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h
new file mode 100644
index 000000000000..1974df13b5f9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_ARC0_ACP_ENG_REGS_H_
+#define ASIC_REG_ARC_FARM_ARC0_ACP_ENG_REGS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_ARC0_ACP_ENG
+ * (Prototype: ARC_ACP_ENG)
+ *****************************************
+ */
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_0 0x4E8F000
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_1 0x4E8F004
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_2 0x4E8F008
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_3 0x4E8F00C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_4 0x4E8F010
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_5 0x4E8F014
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_6 0x4E8F018
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_7 0x4E8F01C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_8 0x4E8F020
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_9 0x4E8F024
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_10 0x4E8F028
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_11 0x4E8F02C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_12 0x4E8F030
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_13 0x4E8F034
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_14 0x4E8F038
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_15 0x4E8F03C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_16 0x4E8F040
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_17 0x4E8F044
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_18 0x4E8F048
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_19 0x4E8F04C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_20 0x4E8F050
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_21 0x4E8F054
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_22 0x4E8F058
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_23 0x4E8F05C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_24 0x4E8F060
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_25 0x4E8F064
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_26 0x4E8F068
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_27 0x4E8F06C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_28 0x4E8F070
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_29 0x4E8F074
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_30 0x4E8F078
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_31 0x4E8F07C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_32 0x4E8F080
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_33 0x4E8F084
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_34 0x4E8F088
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_35 0x4E8F08C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_36 0x4E8F090
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_37 0x4E8F094
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_38 0x4E8F098
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_39 0x4E8F09C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_40 0x4E8F0A0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_41 0x4E8F0A4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_42 0x4E8F0A8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_43 0x4E8F0AC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_44 0x4E8F0B0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_45 0x4E8F0B4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_46 0x4E8F0B8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_47 0x4E8F0BC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_48 0x4E8F0C0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_49 0x4E8F0C4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_50 0x4E8F0C8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_51 0x4E8F0CC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_52 0x4E8F0D0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_53 0x4E8F0D4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_54 0x4E8F0D8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_55 0x4E8F0DC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_56 0x4E8F0E0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_57 0x4E8F0E4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_58 0x4E8F0E8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_59 0x4E8F0EC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_60 0x4E8F0F0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_61 0x4E8F0F4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_62 0x4E8F0F8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_63 0x4E8F0FC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_0 0x4E8F100
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_1 0x4E8F104
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_2 0x4E8F108
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_3 0x4E8F10C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_4 0x4E8F110
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_5 0x4E8F114
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_6 0x4E8F118
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_7 0x4E8F11C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_8 0x4E8F120
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_9 0x4E8F124
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_10 0x4E8F128
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_11 0x4E8F12C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_12 0x4E8F130
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_13 0x4E8F134
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_14 0x4E8F138
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_15 0x4E8F13C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_16 0x4E8F140
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_17 0x4E8F144
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_18 0x4E8F148
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_19 0x4E8F14C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_20 0x4E8F150
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_21 0x4E8F154
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_22 0x4E8F158
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_23 0x4E8F15C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_24 0x4E8F160
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_25 0x4E8F164
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_26 0x4E8F168
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_27 0x4E8F16C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_28 0x4E8F170
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_29 0x4E8F174
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_30 0x4E8F178
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_31 0x4E8F17C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_32 0x4E8F180
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_33 0x4E8F184
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_34 0x4E8F188
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_35 0x4E8F18C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_36 0x4E8F190
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_37 0x4E8F194
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_38 0x4E8F198
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_39 0x4E8F19C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_40 0x4E8F1A0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_41 0x4E8F1A4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_42 0x4E8F1A8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_43 0x4E8F1AC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_44 0x4E8F1B0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_45 0x4E8F1B4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_46 0x4E8F1B8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_47 0x4E8F1BC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_48 0x4E8F1C0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_49 0x4E8F1C4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_50 0x4E8F1C8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_51 0x4E8F1CC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_52 0x4E8F1D0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_53 0x4E8F1D4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_54 0x4E8F1D8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_55 0x4E8F1DC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_56 0x4E8F1E0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_57 0x4E8F1E4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_58 0x4E8F1E8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_59 0x4E8F1EC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_60 0x4E8F1F0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_61 0x4E8F1F4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_62 0x4E8F1F8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_63 0x4E8F1FC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_0 0x4E8F200
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_1 0x4E8F204
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_2 0x4E8F208
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_3 0x4E8F20C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_4 0x4E8F210
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_5 0x4E8F214
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_6 0x4E8F218
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_7 0x4E8F21C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_8 0x4E8F220
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_9 0x4E8F224
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_10 0x4E8F228
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_11 0x4E8F22C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_12 0x4E8F230
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_13 0x4E8F234
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_14 0x4E8F238
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_15 0x4E8F23C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_16 0x4E8F240
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_17 0x4E8F244
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_18 0x4E8F248
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_19 0x4E8F24C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_20 0x4E8F250
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_21 0x4E8F254
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_22 0x4E8F258
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_23 0x4E8F25C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_24 0x4E8F260
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_25 0x4E8F264
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_26 0x4E8F268
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_27 0x4E8F26C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_28 0x4E8F270
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_29 0x4E8F274
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_30 0x4E8F278
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_31 0x4E8F27C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_32 0x4E8F280
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_33 0x4E8F284
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_34 0x4E8F288
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_35 0x4E8F28C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_36 0x4E8F290
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_37 0x4E8F294
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_38 0x4E8F298
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_39 0x4E8F29C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_40 0x4E8F2A0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_41 0x4E8F2A4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_42 0x4E8F2A8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_43 0x4E8F2AC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_44 0x4E8F2B0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_45 0x4E8F2B4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_46 0x4E8F2B8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_47 0x4E8F2BC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_48 0x4E8F2C0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_49 0x4E8F2C4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_50 0x4E8F2C8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_51 0x4E8F2CC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_52 0x4E8F2D0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_53 0x4E8F2D4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_54 0x4E8F2D8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_55 0x4E8F2DC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_56 0x4E8F2E0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_57 0x4E8F2E4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_58 0x4E8F2E8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_59 0x4E8F2EC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_60 0x4E8F2F0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_61 0x4E8F2F4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_62 0x4E8F2F8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_63 0x4E8F2FC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_0 0x4E8F300
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_1 0x4E8F304
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_2 0x4E8F308
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_3 0x4E8F30C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_4 0x4E8F310
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_5 0x4E8F314
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_6 0x4E8F318
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_7 0x4E8F31C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_8 0x4E8F320
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_9 0x4E8F324
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_10 0x4E8F328
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_11 0x4E8F32C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_12 0x4E8F330
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_13 0x4E8F334
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_14 0x4E8F338
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_15 0x4E8F33C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_16 0x4E8F340
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_17 0x4E8F344
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_18 0x4E8F348
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_19 0x4E8F34C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_20 0x4E8F350
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_21 0x4E8F354
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_22 0x4E8F358
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_23 0x4E8F35C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_24 0x4E8F360
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_25 0x4E8F364
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_26 0x4E8F368
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_27 0x4E8F36C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_28 0x4E8F370
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_29 0x4E8F374
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_30 0x4E8F378
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_31 0x4E8F37C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_32 0x4E8F380
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_33 0x4E8F384
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_34 0x4E8F388
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_35 0x4E8F38C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_36 0x4E8F390
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_37 0x4E8F394
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_38 0x4E8F398
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_39 0x4E8F39C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_40 0x4E8F3A0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_41 0x4E8F3A4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_42 0x4E8F3A8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_43 0x4E8F3AC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_44 0x4E8F3B0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_45 0x4E8F3B4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_46 0x4E8F3B8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_47 0x4E8F3BC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_48 0x4E8F3C0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_49 0x4E8F3C4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_50 0x4E8F3C8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_51 0x4E8F3CC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_52 0x4E8F3D0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_53 0x4E8F3D4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_54 0x4E8F3D8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_55 0x4E8F3DC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_56 0x4E8F3E0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_57 0x4E8F3E4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_58 0x4E8F3E8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_59 0x4E8F3EC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_60 0x4E8F3F0
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_61 0x4E8F3F4
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_62 0x4E8F3F8
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_63 0x4E8F3FC
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_SELECTED_QUEUE_ID 0x4E8F400
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_WEIGHT_PRIO_0 0x4E8F404
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_WEIGHT_PRIO_1 0x4E8F408
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_WEIGHT_PRIO_2 0x4E8F40C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_COUNTER_PRIO_0 0x4E8F410
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_COUNTER_PRIO_1 0x4E8F414
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_COUNTER_PRIO_2 0x4E8F418
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_0 0x4E8F41C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_1 0x4E8F420
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_2 0x4E8F424
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_3 0x4E8F428
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_RD_CNT_0 0x4E8F42C
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_RD_CNT_1 0x4E8F430
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_RD_CNT_2 0x4E8F434
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_RD_CNT_3 0x4E8F438
+
+#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_REG 0x4E8F43C
+
+#endif /* ASIC_REG_ARC_FARM_ARC0_ACP_ENG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h
new file mode 100644
index 000000000000..fc2c52af6509
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h
@@ -0,0 +1,819 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_ARC0_AUX_MASKS_H_
+#define ASIC_REG_ARC_FARM_ARC0_AUX_MASKS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_ARC0_AUX
+ * (Prototype: QMAN_ARC_AUX)
+ *****************************************
+ */
+
+/* ARC_FARM_ARC0_AUX_RUN_HALT_REQ */
+#define ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_SHIFT 0
+#define ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK 0x1
+#define ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_SHIFT 1
+#define ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_MASK 0x2
+
+/* ARC_FARM_ARC0_AUX_RUN_HALT_ACK */
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_RUN_ACK_SHIFT 0
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_RUN_ACK_MASK 0x1
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_HALT_ACK_SHIFT 4
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_HALT_ACK_MASK 0x10
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_HALT_R_SHIFT 8
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_HALT_R_MASK 0x100
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_TF_HALT_R_SHIFT 12
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_TF_HALT_R_MASK 0x1000
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_SLEEP_R_SHIFT 16
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_SLEEP_R_MASK 0x10000
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_SLEEP_MODE_R_SHIFT 17
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_SLEEP_MODE_R_MASK 0xE0000
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_WATCHDOG_RESET_SHIFT 20
+#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_WATCHDOG_RESET_MASK 0x100000
+
+/* ARC_FARM_ARC0_AUX_RST_VEC_ADDR */
+#define ARC_FARM_ARC0_AUX_RST_VEC_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_RST_VEC_ADDR_VAL_MASK 0x3FFFFF
+
+/* ARC_FARM_ARC0_AUX_DBG_MODE */
+#define ARC_FARM_ARC0_AUX_DBG_MODE_DBG_PROT_SEL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DBG_MODE_DBG_PROT_SEL_MASK 0x1
+#define ARC_FARM_ARC0_AUX_DBG_MODE_DBGEN_SHIFT 4
+#define ARC_FARM_ARC0_AUX_DBG_MODE_DBGEN_MASK 0x10
+#define ARC_FARM_ARC0_AUX_DBG_MODE_NIDEN_SHIFT 8
+#define ARC_FARM_ARC0_AUX_DBG_MODE_NIDEN_MASK 0x100
+#define ARC_FARM_ARC0_AUX_DBG_MODE_CASHE_RST_DISABLE_SHIFT 12
+#define ARC_FARM_ARC0_AUX_DBG_MODE_CASHE_RST_DISABLE_MASK 0x1000
+#define ARC_FARM_ARC0_AUX_DBG_MODE_DDCM_DMI_PRIORITY_SHIFT 16
+#define ARC_FARM_ARC0_AUX_DBG_MODE_DDCM_DMI_PRIORITY_MASK 0x10000
+
+/* ARC_FARM_ARC0_AUX_CLUSTER_NUM */
+#define ARC_FARM_ARC0_AUX_CLUSTER_NUM_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CLUSTER_NUM_VAL_MASK 0xFF
+
+/* ARC_FARM_ARC0_AUX_ARC_NUM */
+#define ARC_FARM_ARC0_AUX_ARC_NUM_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_NUM_VAL_MASK 0xFF
+
+/* ARC_FARM_ARC0_AUX_WAKE_UP_EVENT */
+#define ARC_FARM_ARC0_AUX_WAKE_UP_EVENT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_WAKE_UP_EVENT_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_DCCM_SYS_ADDR_BASE */
+#define ARC_FARM_ARC0_AUX_DCCM_SYS_ADDR_BASE_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_SYS_ADDR_BASE_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CTI_AP_STS */
+#define ARC_FARM_ARC0_AUX_CTI_AP_STS_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CTI_AP_STS_VAL_MASK 0xFF
+
+/* ARC_FARM_ARC0_AUX_CTI_CFG_MUX_SEL */
+#define ARC_FARM_ARC0_AUX_CTI_CFG_MUX_SEL_RUN_HALT_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CTI_CFG_MUX_SEL_RUN_HALT_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_ARC_RST */
+#define ARC_FARM_ARC0_AUX_ARC_RST_CORE_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_RST_CORE_MASK 0x1
+#define ARC_FARM_ARC0_AUX_ARC_RST_PRESETDBGN_SHIFT 4
+#define ARC_FARM_ARC0_AUX_ARC_RST_PRESETDBGN_MASK 0x10
+
+/* ARC_FARM_ARC0_AUX_ARC_RST_REQ */
+#define ARC_FARM_ARC0_AUX_ARC_RST_REQ_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_RST_REQ_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_SRAM_LSB_ADDR */
+#define ARC_FARM_ARC0_AUX_SRAM_LSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_SRAM_LSB_ADDR_VAL_MASK 0x3F
+
+/* ARC_FARM_ARC0_AUX_SRAM_MSB_ADDR */
+#define ARC_FARM_ARC0_AUX_SRAM_MSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_SRAM_MSB_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_PCIE_LSB_ADDR */
+#define ARC_FARM_ARC0_AUX_PCIE_LSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_PCIE_LSB_ADDR_VAL_MASK 0xF
+
+/* ARC_FARM_ARC0_AUX_PCIE_MSB_ADDR */
+#define ARC_FARM_ARC0_AUX_PCIE_MSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_PCIE_MSB_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CFG_LSB_ADDR */
+#define ARC_FARM_ARC0_AUX_CFG_LSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_LSB_ADDR_VAL_MASK 0xF
+
+/* ARC_FARM_ARC0_AUX_CFG_MSB_ADDR */
+#define ARC_FARM_ARC0_AUX_CFG_MSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_MSB_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_HBM0_LSB_ADDR */
+#define ARC_FARM_ARC0_AUX_HBM0_LSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM0_LSB_ADDR_VAL_MASK 0xF
+
+/* ARC_FARM_ARC0_AUX_HBM0_MSB_ADDR */
+#define ARC_FARM_ARC0_AUX_HBM0_MSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM0_MSB_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_HBM1_LSB_ADDR */
+#define ARC_FARM_ARC0_AUX_HBM1_LSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM1_LSB_ADDR_VAL_MASK 0xF
+
+/* ARC_FARM_ARC0_AUX_HBM1_MSB_ADDR */
+#define ARC_FARM_ARC0_AUX_HBM1_MSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM1_MSB_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_HBM2_LSB_ADDR */
+#define ARC_FARM_ARC0_AUX_HBM2_LSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM2_LSB_ADDR_VAL_MASK 0xF
+
+/* ARC_FARM_ARC0_AUX_HBM2_MSB_ADDR */
+#define ARC_FARM_ARC0_AUX_HBM2_MSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM2_MSB_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_HBM3_LSB_ADDR */
+#define ARC_FARM_ARC0_AUX_HBM3_LSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM3_LSB_ADDR_VAL_MASK 0xF
+
+/* ARC_FARM_ARC0_AUX_HBM3_MSB_ADDR */
+#define ARC_FARM_ARC0_AUX_HBM3_MSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM3_MSB_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_HBM0_OFFSET */
+#define ARC_FARM_ARC0_AUX_HBM0_OFFSET_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM0_OFFSET_VAL_MASK 0xFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_HBM1_OFFSET */
+#define ARC_FARM_ARC0_AUX_HBM1_OFFSET_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM1_OFFSET_VAL_MASK 0xFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_HBM2_OFFSET */
+#define ARC_FARM_ARC0_AUX_HBM2_OFFSET_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM2_OFFSET_VAL_MASK 0xFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_HBM3_OFFSET */
+#define ARC_FARM_ARC0_AUX_HBM3_OFFSET_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_HBM3_OFFSET_VAL_MASK 0xFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR */
+#define ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_VAL_MASK 0xF
+
+/* ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR */
+#define ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR */
+#define ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR_AXI_WRITE_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR_AXI_WRITE_MASK 0xF
+#define ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR_AXI_WRITE_EN_SHIFT 4
+#define ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR_AXI_WRITE_EN_MASK 0xF0
+
+/* ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR */
+#define ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR_AXI_WRITE_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR_AXI_WRITE_MASK 0xF
+#define ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR_AXI_WRITE_EN_SHIFT 4
+#define ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR_AXI_WRITE_EN_MASK 0xF0
+
+/* ARC_FARM_ARC0_AUX_CONTEXT_ID */
+#define ARC_FARM_ARC0_AUX_CONTEXT_ID_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CONTEXT_ID_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CID_OFFSET */
+#define ARC_FARM_ARC0_AUX_CID_OFFSET_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CID_OFFSET_VAL_MASK 0xFF
+
+/* ARC_FARM_ARC0_AUX_SW_INTR */
+#define ARC_FARM_ARC0_AUX_SW_INTR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_SW_INTR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_IRQ_INTR_MASK */
+#define ARC_FARM_ARC0_AUX_IRQ_INTR_MASK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_IRQ_INTR_MASK_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS */
+#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS_VAL_MASK 0x3FFF
+
+/* ARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR */
+#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR_VAL_MASK 0x3FFF
+
+/* ARC_FARM_ARC0_AUX_ARC_SEI_INTR_MASK */
+#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_MASK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_MASK_VAL_MASK 0x3FFF
+
+/* ARC_FARM_ARC0_AUX_ARC_EXCPTN_CAUSE */
+#define ARC_FARM_ARC0_AUX_ARC_EXCPTN_CAUSE_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_EXCPTN_CAUSE_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN */
+#define ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN_INTR_EN_SHIFT 0
+#define ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN_INTR_EN_MASK 0x1
+#define ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN_HALT_EN_SHIFT 1
+#define ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN_HALT_EN_MASK 0x2
+
+/* ARC_FARM_ARC0_AUX_ARC_SEI_INTR_HALT_MASK */
+#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_HALT_MASK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_HALT_MASK_VAL_MASK 0x3FFF
+
+/* ARC_FARM_ARC0_AUX_QMAN_SEI_INTR_HALT_MASK */
+#define ARC_FARM_ARC0_AUX_QMAN_SEI_INTR_HALT_MASK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_QMAN_SEI_INTR_HALT_MASK_VAL_MASK 0x3FFF
+
+/* ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS */
+#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS_SERR_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS_SERR_MASK 0x1
+#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS_DERR_SHIFT 1
+#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS_DERR_MASK 0x2
+
+/* ARC_FARM_ARC0_AUX_ARC_REI_INTR_CLR */
+#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_CLR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_CLR_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_ARC_REI_INTR_MASK */
+#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_MASK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_MASK_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_DCCM_ECC_ERR_ADDR */
+#define ARC_FARM_ARC0_AUX_DCCM_ECC_ERR_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_ECC_ERR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_ECC_SYNDROME */
+#define ARC_FARM_ARC0_AUX_DCCM_ECC_SYNDROME_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_ECC_SYNDROME_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_I_CACHE_ECC_ERR_ADDR */
+#define ARC_FARM_ARC0_AUX_I_CACHE_ECC_ERR_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_I_CACHE_ECC_ERR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_I_CACHE_ECC_SYNDROME */
+#define ARC_FARM_ARC0_AUX_I_CACHE_ECC_SYNDROME_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_I_CACHE_ECC_SYNDROME_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_D_CACHE_ECC_ERR_ADDR */
+#define ARC_FARM_ARC0_AUX_D_CACHE_ECC_ERR_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_D_CACHE_ECC_ERR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_D_CACHE_ECC_SYNDROME */
+#define ARC_FARM_ARC0_AUX_D_CACHE_ECC_SYNDROME_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_D_CACHE_ECC_SYNDROME_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_LBW_TRMINATE_AWADDR_ERR */
+#define ARC_FARM_ARC0_AUX_LBW_TRMINATE_AWADDR_ERR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBW_TRMINATE_AWADDR_ERR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_LBW_TRMINATE_ARADDR_ERR */
+#define ARC_FARM_ARC0_AUX_LBW_TRMINATE_ARADDR_ERR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBW_TRMINATE_ARADDR_ERR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_BRESP */
+#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_BRESP_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_BRESP_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_RRESP */
+#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_RRESP_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_RRESP_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXLEN */
+#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXLEN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXLEN_VAL_MASK 0xFF
+
+/* ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXSIZE */
+#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXSIZE_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXSIZE_VAL_MASK 0x7
+
+/* ARC_FARM_ARC0_AUX_SCRATCHPAD */
+#define ARC_FARM_ARC0_AUX_SCRATCHPAD_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_SCRATCHPAD_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_TOTAL_CBU_WR_CNT */
+#define ARC_FARM_ARC0_AUX_TOTAL_CBU_WR_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_TOTAL_CBU_WR_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_INFLIGHT_CBU_WR_CNT */
+#define ARC_FARM_ARC0_AUX_INFLIGHT_CBU_WR_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_INFLIGHT_CBU_WR_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_TOTAL_CBU_RD_CNT */
+#define ARC_FARM_ARC0_AUX_TOTAL_CBU_RD_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_TOTAL_CBU_RD_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_INFLIGHT_CBU_RD_CNT */
+#define ARC_FARM_ARC0_AUX_INFLIGHT_CBU_RD_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_INFLIGHT_CBU_RD_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_TOTAL_LBU_WR_CNT */
+#define ARC_FARM_ARC0_AUX_TOTAL_LBU_WR_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_TOTAL_LBU_WR_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_INFLIGHT_LBU_WR_CNT */
+#define ARC_FARM_ARC0_AUX_INFLIGHT_LBU_WR_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_INFLIGHT_LBU_WR_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_TOTAL_LBU_RD_CNT */
+#define ARC_FARM_ARC0_AUX_TOTAL_LBU_RD_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_TOTAL_LBU_RD_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT */
+#define ARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR */
+#define ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_EN */
+#define ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_EN_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR */
+#define ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_EN */
+#define ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_EN_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR */
+#define ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_VAL_MASK 0x3FF
+
+/* ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_EN */
+#define ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_EN_VAL_MASK 0x3FF
+
+/* ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR */
+#define ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_VAL_MASK 0x3FF
+
+/* ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_EN */
+#define ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_EN_VAL_MASK 0x3FF
+
+/* ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR */
+#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_READ_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_READ_MASK 0xF
+#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_WRITE_SHIFT 4
+#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_WRITE_MASK 0xF0
+#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_RD_EN_SHIFT 8
+#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_RD_EN_MASK 0xF00
+#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_WR_EN_SHIFT 12
+#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_WR_EN_MASK 0xF000
+
+/* ARC_FARM_ARC0_AUX_CBU_LOCK_OVR */
+#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_READ_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_READ_MASK 0x3
+#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_WRITE_SHIFT 4
+#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_WRITE_MASK 0x30
+#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_RD_EN_SHIFT 8
+#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_RD_EN_MASK 0x300
+#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_WR_EN_SHIFT 12
+#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_WR_EN_MASK 0x3000
+
+/* ARC_FARM_ARC0_AUX_CBU_PROT_OVR */
+#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_READ_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_READ_MASK 0x7
+#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_WRITE_SHIFT 4
+#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_WRITE_MASK 0x70
+#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_RD_EN_SHIFT 8
+#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_RD_EN_MASK 0x700
+#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_WR_EN_SHIFT 12
+#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_WR_EN_MASK 0x7000
+
+/* ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING */
+#define ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING_CBU_READ_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING_CBU_READ_MASK 0xFF
+#define ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING_CBU_WRITE_SHIFT 8
+#define ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING_CBU_WRITE_MASK 0xFF00
+
+/* ARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN */
+#define ARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN_CBU_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN_CBU_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_CBU_FORCE_RSP_OK */
+#define ARC_FARM_ARC0_AUX_CBU_FORCE_RSP_OK_CBU_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORCE_RSP_OK_CBU_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_CBU_NO_WR_INFLIGHT */
+#define ARC_FARM_ARC0_AUX_CBU_NO_WR_INFLIGHT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_NO_WR_INFLIGHT_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_CBU_SEI_INTR_ID */
+#define ARC_FARM_ARC0_AUX_CBU_SEI_INTR_ID_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_SEI_INTR_ID_VAL_MASK 0x7F
+
+/* ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR */
+#define ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_EN */
+#define ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_EN_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR */
+#define ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_EN */
+#define ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_EN_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR */
+#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_READ_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_READ_MASK 0xF
+#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_WRITE_SHIFT 4
+#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_WRITE_MASK 0xF0
+#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_RD_EN_SHIFT 8
+#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_RD_EN_MASK 0xF00
+#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_WR_EN_SHIFT 12
+#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_WR_EN_MASK 0xF000
+
+/* ARC_FARM_ARC0_AUX_LBU_LOCK_OVR */
+#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_READ_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_READ_MASK 0x3
+#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_WRITE_SHIFT 4
+#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_WRITE_MASK 0x30
+#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_RD_EN_SHIFT 8
+#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_RD_EN_MASK 0x300
+#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_WR_EN_SHIFT 12
+#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_WR_EN_MASK 0x3000
+
+/* ARC_FARM_ARC0_AUX_LBU_PROT_OVR */
+#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_READ_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_READ_MASK 0x7
+#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_WRITE_SHIFT 4
+#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_WRITE_MASK 0x70
+#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_RD_EN_SHIFT 8
+#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_RD_EN_MASK 0x700
+#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_WR_EN_SHIFT 12
+#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_WR_EN_MASK 0x7000
+
+/* ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING */
+#define ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING_LBU_READ_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING_LBU_READ_MASK 0xFF
+#define ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING_LBU_WRITE_SHIFT 8
+#define ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING_LBU_WRITE_MASK 0xFF00
+
+/* ARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN */
+#define ARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_LBU_FORCE_RSP_OK */
+#define ARC_FARM_ARC0_AUX_LBU_FORCE_RSP_OK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_FORCE_RSP_OK_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_LBU_NO_WR_INFLIGHT */
+#define ARC_FARM_ARC0_AUX_LBU_NO_WR_INFLIGHT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_NO_WR_INFLIGHT_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_LBU_SEI_INTR_ID */
+#define ARC_FARM_ARC0_AUX_LBU_SEI_INTR_ID_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBU_SEI_INTR_ID_VAL_MASK 0x3FF
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_VAL_MASK 0xFFFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_VAL_MASK 0xFFFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_PI */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_VAL_MASK 0xFFFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_CI */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_VAL_MASK 0xFFFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_VAL_MASK 0xFFFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_VAL_MASK 0xFFFFFF
+
+/* ARC_FARM_ARC0_AUX_GENERAL_Q_VLD_ENTRY_MASK */
+#define ARC_FARM_ARC0_AUX_GENERAL_Q_VLD_ENTRY_MASK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_GENERAL_Q_VLD_ENTRY_MASK_VAL_MASK 0xFF
+
+/* ARC_FARM_ARC0_AUX_NIC_Q_VLD_ENTRY_MASK */
+#define ARC_FARM_ARC0_AUX_NIC_Q_VLD_ENTRY_MASK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_NIC_Q_VLD_ENTRY_MASK_VAL_MASK 0xFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_DROP_EN */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_DROP_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_DROP_EN_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_WARN_MSG */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_WARN_MSG_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_WARN_MSG_VAL_MASK 0xFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG */
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG_VAL_MASK 0xFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWPROT */
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWPROT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWPROT_VAL_MASK 0x7
+
+/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWUSER */
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWUSER_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWBURST */
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWBURST_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWBURST_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWLOCK */
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWLOCK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWLOCK_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWCACHE */
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWCACHE_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWCACHE_VAL_MASK 0xF
+
+/* ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT */
+#define ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT_LBW_SLV_AXI_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT_LBW_SLV_AXI_MASK 0xF
+#define ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT_GEN_AXI_SHIFT 4
+#define ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT_GEN_AXI_MASK 0xF0
+
+/* ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG */
+#define ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG_VAL_MASK 0x1F
+
+/* ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT */
+#define ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT_VAL_MASK 0x1F
+
+/* ARC_FARM_ARC0_AUX_QMAN_CQ_IFIFO_SHADOW_CI */
+#define ARC_FARM_ARC0_AUX_QMAN_CQ_IFIFO_SHADOW_CI_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_QMAN_CQ_IFIFO_SHADOW_CI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI */
+#define ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_QMAN_CQ_SHADOW_CI */
+#define ARC_FARM_ARC0_AUX_QMAN_CQ_SHADOW_CI_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_QMAN_CQ_SHADOW_CI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI */
+#define ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_AUX2APB_PROT */
+#define ARC_FARM_ARC0_AUX_AUX2APB_PROT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_AUX2APB_PROT_VAL_MASK 0x7
+
+/* ARC_FARM_ARC0_AUX_LBW_FORK_WIN_EN */
+#define ARC_FARM_ARC0_AUX_LBW_FORK_WIN_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBW_FORK_WIN_EN_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR0 */
+#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR0_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR0_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK0 */
+#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK0_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK0_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR1 */
+#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR1_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR1_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK1 */
+#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK1_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK1_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR0 */
+#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR0_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR0_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK0 */
+#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK0_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK0_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR1 */
+#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR1_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR1_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK1 */
+#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK1_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK1_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR0 */
+#define ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR0_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR0_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR1 */
+#define ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR1_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR1_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_LBW_FORK_MASK */
+#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_LBW_FORK_MASK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_LBW_FORK_MASK_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_DUP_ENG_LBW_FORK_ADDR */
+#define ARC_FARM_ARC0_AUX_ARC_DUP_ENG_LBW_FORK_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_DUP_ENG_LBW_FORK_ADDR_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_ACP_ENG_LBW_FORK_ADDR */
+#define ARC_FARM_ARC0_AUX_ARC_ACP_ENG_LBW_FORK_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_ACP_ENG_LBW_FORK_ADDR_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR */
+#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_WIN_EN */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_WIN_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_WIN_EN_VAL_MASK 0xF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_LSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_LSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_LSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_MSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_MSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_MSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_LSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_LSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_LSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_MSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_MSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_MSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_LSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_LSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_LSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_MSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_MSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_MSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_LSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_LSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_LSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_MSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_MSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_MSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_LSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_LSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_LSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_MSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_MSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_MSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_LSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_LSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_LSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_MSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_MSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_MSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_LSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_LSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_LSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_MSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_MSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_MSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_LSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_LSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_LSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_MSB */
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_MSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_MSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_LSB */
+#define ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_LSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_LSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_MSB */
+#define ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_MSB_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_MSB_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_BRESP */
+#define ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_BRESP_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_BRESP_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_RRESP */
+#define ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_RRESP_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_RRESP_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_ARC_REGION_CFG */
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_0_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_0_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_1_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_1_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_2_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_2_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_3_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_3_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_4_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_4_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_5_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_5_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_6_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_6_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_7_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_7_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_8_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_8_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_9_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_9_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_10_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_10_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_11_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_11_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_12_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_12_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_13_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_13_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_14_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_14_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_15_ASID_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_15_ASID_MASK 0x3FF
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_MMU_BP_SHIFT 12
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_MMU_BP_MASK 0x1000
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_PROT_VAL_SHIFT 16
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_PROT_VAL_MASK 0x70000
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_PROT_VAL_EN_SHIFT 20
+#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_PROT_VAL_EN_MASK 0x700000
+
+/* ARC_FARM_ARC0_AUX_DCCM_TRMINATE_AWADDR_ERR */
+#define ARC_FARM_ARC0_AUX_DCCM_TRMINATE_AWADDR_ERR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_TRMINATE_AWADDR_ERR_VAL_MASK 0xFFFFFF
+
+/* ARC_FARM_ARC0_AUX_DCCM_TRMINATE_ARADDR_ERR */
+#define ARC_FARM_ARC0_AUX_DCCM_TRMINATE_ARADDR_ERR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_DCCM_TRMINATE_ARADDR_ERR_VAL_MASK 0xFFFFFF
+
+/* ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_BRESP */
+#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_BRESP_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_BRESP_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_RRESP */
+#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_RRESP_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_RRESP_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_EN */
+#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_EN_VAL_MASK 0x1
+
+/* ARC_FARM_ARC0_AUX_CFG_DCCM_SECURE_REGION */
+#define ARC_FARM_ARC0_AUX_CFG_DCCM_SECURE_REGION_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_CFG_DCCM_SECURE_REGION_VAL_MASK 0xFFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT */
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL */
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL_ENABLE_BP_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL_ENABLE_BP_MASK 0x1
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL_RD_DELAY_CC_SHIFT 1
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL_RD_DELAY_CC_MASK 0x3E
+
+/* ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_MSK */
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_MSK_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_MSK_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR */
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_VAL_MASK 0x7FFFFFF
+
+/* ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_BUSER */
+#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_BUSER_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_BUSER_VAL_MASK 0x3
+
+/* ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN */
+#define ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_SHIFT 0
+#define ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK 0x1
+
+#endif /* ASIC_REG_ARC_FARM_ARC0_AUX_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h
new file mode 100644
index 000000000000..5345b5faa3a2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_ARC0_AUX_REGS_H_
+#define ASIC_REG_ARC_FARM_ARC0_AUX_REGS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_ARC0_AUX
+ * (Prototype: QMAN_ARC_AUX)
+ *****************************************
+ */
+
+#define mmARC_FARM_ARC0_AUX_RUN_HALT_REQ 0x4E88100
+
+#define mmARC_FARM_ARC0_AUX_RUN_HALT_ACK 0x4E88104
+
+#define mmARC_FARM_ARC0_AUX_RST_VEC_ADDR 0x4E88108
+
+#define mmARC_FARM_ARC0_AUX_DBG_MODE 0x4E8810C
+
+#define mmARC_FARM_ARC0_AUX_CLUSTER_NUM 0x4E88110
+
+#define mmARC_FARM_ARC0_AUX_ARC_NUM 0x4E88114
+
+#define mmARC_FARM_ARC0_AUX_WAKE_UP_EVENT 0x4E88118
+
+#define mmARC_FARM_ARC0_AUX_DCCM_SYS_ADDR_BASE 0x4E8811C
+
+#define mmARC_FARM_ARC0_AUX_CTI_AP_STS 0x4E88120
+
+#define mmARC_FARM_ARC0_AUX_CTI_CFG_MUX_SEL 0x4E88124
+
+#define mmARC_FARM_ARC0_AUX_ARC_RST 0x4E88128
+
+#define mmARC_FARM_ARC0_AUX_ARC_RST_REQ 0x4E8812C
+
+#define mmARC_FARM_ARC0_AUX_SRAM_LSB_ADDR 0x4E88130
+
+#define mmARC_FARM_ARC0_AUX_SRAM_MSB_ADDR 0x4E88134
+
+#define mmARC_FARM_ARC0_AUX_PCIE_LSB_ADDR 0x4E88138
+
+#define mmARC_FARM_ARC0_AUX_PCIE_MSB_ADDR 0x4E8813C
+
+#define mmARC_FARM_ARC0_AUX_CFG_LSB_ADDR 0x4E88140
+
+#define mmARC_FARM_ARC0_AUX_CFG_MSB_ADDR 0x4E88144
+
+#define mmARC_FARM_ARC0_AUX_HBM0_LSB_ADDR 0x4E88150
+
+#define mmARC_FARM_ARC0_AUX_HBM0_MSB_ADDR 0x4E88154
+
+#define mmARC_FARM_ARC0_AUX_HBM1_LSB_ADDR 0x4E88158
+
+#define mmARC_FARM_ARC0_AUX_HBM1_MSB_ADDR 0x4E8815C
+
+#define mmARC_FARM_ARC0_AUX_HBM2_LSB_ADDR 0x4E88160
+
+#define mmARC_FARM_ARC0_AUX_HBM2_MSB_ADDR 0x4E88164
+
+#define mmARC_FARM_ARC0_AUX_HBM3_LSB_ADDR 0x4E88168
+
+#define mmARC_FARM_ARC0_AUX_HBM3_MSB_ADDR 0x4E8816C
+
+#define mmARC_FARM_ARC0_AUX_HBM0_OFFSET 0x4E88170
+
+#define mmARC_FARM_ARC0_AUX_HBM1_OFFSET 0x4E88174
+
+#define mmARC_FARM_ARC0_AUX_HBM2_OFFSET 0x4E88178
+
+#define mmARC_FARM_ARC0_AUX_HBM3_OFFSET 0x4E8817C
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_0 0x4E88180
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_1 0x4E88184
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_2 0x4E88188
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_3 0x4E8818C
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_4 0x4E88190
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_5 0x4E88194
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_6 0x4E88198
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_0 0x4E8819C
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_1 0x4E881A0
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_2 0x4E881A4
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_3 0x4E881A8
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_4 0x4E881AC
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_5 0x4E881B0
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_6 0x4E881B4
+
+#define mmARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR 0x4E881B8
+
+#define mmARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR 0x4E881BC
+
+#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_0 0x4E881C0
+
+#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_1 0x4E881C4
+
+#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_2 0x4E881C8
+
+#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_3 0x4E881CC
+
+#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_4 0x4E881D0
+
+#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_5 0x4E881D4
+
+#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_6 0x4E881D8
+
+#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_7 0x4E881DC
+
+#define mmARC_FARM_ARC0_AUX_CID_OFFSET_0 0x4E881E0
+
+#define mmARC_FARM_ARC0_AUX_CID_OFFSET_1 0x4E881E4
+
+#define mmARC_FARM_ARC0_AUX_CID_OFFSET_2 0x4E881E8
+
+#define mmARC_FARM_ARC0_AUX_CID_OFFSET_3 0x4E881EC
+
+#define mmARC_FARM_ARC0_AUX_CID_OFFSET_4 0x4E881F0
+
+#define mmARC_FARM_ARC0_AUX_CID_OFFSET_5 0x4E881F4
+
+#define mmARC_FARM_ARC0_AUX_CID_OFFSET_6 0x4E881F8
+
+#define mmARC_FARM_ARC0_AUX_CID_OFFSET_7 0x4E881FC
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_0 0x4E88200
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_1 0x4E88204
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_2 0x4E88208
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_3 0x4E8820C
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_4 0x4E88210
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_5 0x4E88214
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_6 0x4E88218
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_7 0x4E8821C
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_8 0x4E88220
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_9 0x4E88224
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_10 0x4E88228
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_11 0x4E8822C
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_12 0x4E88230
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_13 0x4E88234
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_14 0x4E88238
+
+#define mmARC_FARM_ARC0_AUX_SW_INTR_15 0x4E8823C
+
+#define mmARC_FARM_ARC0_AUX_IRQ_INTR_MASK_0 0x4E88280
+
+#define mmARC_FARM_ARC0_AUX_IRQ_INTR_MASK_1 0x4E88284
+
+#define mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS 0x4E88290
+
+#define mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR 0x4E88294
+
+#define mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_MASK 0x4E88298
+
+#define mmARC_FARM_ARC0_AUX_ARC_EXCPTN_CAUSE 0x4E8829C
+
+#define mmARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN 0x4E882A0
+
+#define mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_HALT_MASK 0x4E882A4
+
+#define mmARC_FARM_ARC0_AUX_QMAN_SEI_INTR_HALT_MASK 0x4E882A8
+
+#define mmARC_FARM_ARC0_AUX_ARC_REI_INTR_STS 0x4E882B0
+
+#define mmARC_FARM_ARC0_AUX_ARC_REI_INTR_CLR 0x4E882B4
+
+#define mmARC_FARM_ARC0_AUX_ARC_REI_INTR_MASK 0x4E882B8
+
+#define mmARC_FARM_ARC0_AUX_DCCM_ECC_ERR_ADDR 0x4E882BC
+
+#define mmARC_FARM_ARC0_AUX_DCCM_ECC_SYNDROME 0x4E882C0
+
+#define mmARC_FARM_ARC0_AUX_I_CACHE_ECC_ERR_ADDR 0x4E882C4
+
+#define mmARC_FARM_ARC0_AUX_I_CACHE_ECC_SYNDROME 0x4E882C8
+
+#define mmARC_FARM_ARC0_AUX_D_CACHE_ECC_ERR_ADDR 0x4E882CC
+
+#define mmARC_FARM_ARC0_AUX_D_CACHE_ECC_SYNDROME 0x4E882D0
+
+#define mmARC_FARM_ARC0_AUX_LBW_TRMINATE_AWADDR_ERR 0x4E882E0
+
+#define mmARC_FARM_ARC0_AUX_LBW_TRMINATE_ARADDR_ERR 0x4E882E4
+
+#define mmARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_BRESP 0x4E882E8
+
+#define mmARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_RRESP 0x4E882EC
+
+#define mmARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXLEN 0x4E882F0
+
+#define mmARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXSIZE 0x4E882F4
+
+#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_0 0x4E88300
+
+#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_1 0x4E88304
+
+#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_2 0x4E88308
+
+#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_3 0x4E8830C
+
+#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_4 0x4E88310
+
+#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_5 0x4E88314
+
+#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_6 0x4E88318
+
+#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_7 0x4E8831C
+
+#define mmARC_FARM_ARC0_AUX_TOTAL_CBU_WR_CNT 0x4E88320
+
+#define mmARC_FARM_ARC0_AUX_INFLIGHT_CBU_WR_CNT 0x4E88324
+
+#define mmARC_FARM_ARC0_AUX_TOTAL_CBU_RD_CNT 0x4E88328
+
+#define mmARC_FARM_ARC0_AUX_INFLIGHT_CBU_RD_CNT 0x4E8832C
+
+#define mmARC_FARM_ARC0_AUX_TOTAL_LBU_WR_CNT 0x4E88330
+
+#define mmARC_FARM_ARC0_AUX_INFLIGHT_LBU_WR_CNT 0x4E88334
+
+#define mmARC_FARM_ARC0_AUX_TOTAL_LBU_RD_CNT 0x4E88338
+
+#define mmARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT 0x4E8833C
+
+#define mmARC_FARM_ARC0_AUX_CBU_ARUSER_OVR 0x4E88350
+
+#define mmARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_EN 0x4E88354
+
+#define mmARC_FARM_ARC0_AUX_CBU_AWUSER_OVR 0x4E88358
+
+#define mmARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_EN 0x4E8835C
+
+#define mmARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR 0x4E88360
+
+#define mmARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_EN 0x4E88364
+
+#define mmARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR 0x4E88368
+
+#define mmARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_EN 0x4E8836C
+
+#define mmARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR 0x4E88370
+
+#define mmARC_FARM_ARC0_AUX_CBU_LOCK_OVR 0x4E88374
+
+#define mmARC_FARM_ARC0_AUX_CBU_PROT_OVR 0x4E88378
+
+#define mmARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING 0x4E8837C
+
+#define mmARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN 0x4E88380
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORCE_RSP_OK 0x4E88384
+
+#define mmARC_FARM_ARC0_AUX_CBU_NO_WR_INFLIGHT 0x4E8838C
+
+#define mmARC_FARM_ARC0_AUX_CBU_SEI_INTR_ID 0x4E88390
+
+#define mmARC_FARM_ARC0_AUX_LBU_ARUSER_OVR 0x4E88400
+
+#define mmARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_EN 0x4E88404
+
+#define mmARC_FARM_ARC0_AUX_LBU_AWUSER_OVR 0x4E88408
+
+#define mmARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_EN 0x4E8840C
+
+#define mmARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR 0x4E88420
+
+#define mmARC_FARM_ARC0_AUX_LBU_LOCK_OVR 0x4E88424
+
+#define mmARC_FARM_ARC0_AUX_LBU_PROT_OVR 0x4E88428
+
+#define mmARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING 0x4E8842C
+
+#define mmARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN 0x4E88430
+
+#define mmARC_FARM_ARC0_AUX_LBU_FORCE_RSP_OK 0x4E88434
+
+#define mmARC_FARM_ARC0_AUX_LBU_NO_WR_INFLIGHT 0x4E8843C
+
+#define mmARC_FARM_ARC0_AUX_LBU_SEI_INTR_ID 0x4E88440
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_0 0x4E88500
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_1 0x4E88504
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_2 0x4E88508
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_3 0x4E8850C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_4 0x4E88510
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_5 0x4E88514
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_6 0x4E88518
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_7 0x4E8851C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_0 0x4E88520
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_1 0x4E88524
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_2 0x4E88528
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_3 0x4E8852C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_4 0x4E88530
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_5 0x4E88534
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_6 0x4E88538
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_7 0x4E8853C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_0 0x4E88540
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_1 0x4E88544
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_2 0x4E88548
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_3 0x4E8854C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_4 0x4E88550
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_5 0x4E88554
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_6 0x4E88558
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_7 0x4E8855C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_0 0x4E88560
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_1 0x4E88564
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_2 0x4E88568
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_3 0x4E8856C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_4 0x4E88570
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_5 0x4E88574
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_6 0x4E88578
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_7 0x4E8857C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_0 0x4E88580
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_1 0x4E88584
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_2 0x4E88588
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_3 0x4E8858C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_4 0x4E88590
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_5 0x4E88594
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_6 0x4E88598
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_7 0x4E8859C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_0 0x4E885A0
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_1 0x4E885A4
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_2 0x4E885A8
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_3 0x4E885AC
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_4 0x4E885B0
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_5 0x4E885B4
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_6 0x4E885B8
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_7 0x4E885BC
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_0 0x4E885C0
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_1 0x4E885C4
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_2 0x4E885C8
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_3 0x4E885CC
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_4 0x4E885D0
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_5 0x4E885D4
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_6 0x4E885D8
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_7 0x4E885DC
+
+#define mmARC_FARM_ARC0_AUX_GENERAL_Q_VLD_ENTRY_MASK 0x4E885E0
+
+#define mmARC_FARM_ARC0_AUX_NIC_Q_VLD_ENTRY_MASK 0x4E885E4
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_DROP_EN 0x4E88620
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_WARN_MSG 0x4E88624
+
+#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG 0x4E88628
+
+#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWPROT 0x4E88630
+
+#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWUSER 0x4E88634
+
+#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWBURST 0x4E88638
+
+#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWLOCK 0x4E8863C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWCACHE 0x4E88640
+
+#define mmARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT 0x4E88644
+
+#define mmARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG 0x4E88648
+
+#define mmARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT 0x4E8864C
+
+#define mmARC_FARM_ARC0_AUX_QMAN_CQ_IFIFO_SHADOW_CI 0x4E88650
+
+#define mmARC_FARM_ARC0_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI 0x4E88654
+
+#define mmARC_FARM_ARC0_AUX_QMAN_CQ_SHADOW_CI 0x4E88658
+
+#define mmARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI 0x4E8865C
+
+#define mmARC_FARM_ARC0_AUX_AUX2APB_PROT 0x4E88700
+
+#define mmARC_FARM_ARC0_AUX_LBW_FORK_WIN_EN 0x4E88704
+
+#define mmARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR0 0x4E88708
+
+#define mmARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK0 0x4E8870C
+
+#define mmARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR1 0x4E88710
+
+#define mmARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK1 0x4E88714
+
+#define mmARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR0 0x4E88718
+
+#define mmARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK0 0x4E8871C
+
+#define mmARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR1 0x4E88720
+
+#define mmARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK1 0x4E88724
+
+#define mmARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR0 0x4E88728
+
+#define mmARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR1 0x4E8872C
+
+#define mmARC_FARM_ARC0_AUX_ARC_ACC_ENGS_LBW_FORK_MASK 0x4E88730
+
+#define mmARC_FARM_ARC0_AUX_ARC_DUP_ENG_LBW_FORK_ADDR 0x4E88734
+
+#define mmARC_FARM_ARC0_AUX_ARC_ACP_ENG_LBW_FORK_ADDR 0x4E88738
+
+#define mmARC_FARM_ARC0_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR 0x4E8873C
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_WIN_EN 0x4E88740
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_LSB 0x4E88750
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_MSB 0x4E88754
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_LSB 0x4E88758
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_MSB 0x4E8875C
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_LSB 0x4E88760
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_MSB 0x4E88764
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_LSB 0x4E88768
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_MSB 0x4E8876C
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_LSB 0x4E88770
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_MSB 0x4E88774
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_LSB 0x4E88778
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_MSB 0x4E8877C
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_LSB 0x4E88780
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_MSB 0x4E88784
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_LSB 0x4E88788
+
+#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_MSB 0x4E8878C
+
+#define mmARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_LSB 0x4E88790
+
+#define mmARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_MSB 0x4E88794
+
+#define mmARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_BRESP 0x4E88798
+
+#define mmARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_RRESP 0x4E8879C
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_0 0x4E88800
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_1 0x4E88804
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_2 0x4E88808
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_3 0x4E8880C
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_4 0x4E88810
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_5 0x4E88814
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_6 0x4E88818
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_7 0x4E8881C
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_8 0x4E88820
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_9 0x4E88824
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_10 0x4E88828
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_11 0x4E8882C
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_12 0x4E88830
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_13 0x4E88834
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_14 0x4E88838
+
+#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_15 0x4E8883C
+
+#define mmARC_FARM_ARC0_AUX_DCCM_TRMINATE_AWADDR_ERR 0x4E88840
+
+#define mmARC_FARM_ARC0_AUX_DCCM_TRMINATE_ARADDR_ERR 0x4E88844
+
+#define mmARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_BRESP 0x4E88848
+
+#define mmARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_RRESP 0x4E8884C
+
+#define mmARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_EN 0x4E88850
+
+#define mmARC_FARM_ARC0_AUX_CFG_DCCM_SECURE_REGION 0x4E88854
+
+#define mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT 0x4E88900
+
+#define mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL 0x4E88904
+
+#define mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_MSK 0x4E88908
+
+#define mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR 0x4E8890C
+
+#define mmARC_FARM_ARC0_AUX_ARC_ACC_ENGS_BUSER 0x4E88910
+
+#define mmARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN 0x4E88920
+
+#endif /* ASIC_REG_ARC_FARM_ARC0_AUX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h
new file mode 100644
index 000000000000..bde077eed285
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_ARC0_DUP_ENG_AXUSER_REGS_H_
+#define ASIC_REG_ARC_FARM_ARC0_DUP_ENG_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_ARC0_DUP_ENG_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_ASID 0x4E89900
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_MMU_BP 0x4E89904
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_STRONG_ORDER 0x4E89908
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_NO_SNOOP 0x4E8990C
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_WR_REDUCTION 0x4E89910
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RD_ATOMIC 0x4E89914
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_QOS 0x4E89918
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RSVD 0x4E8991C
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_EMEM_CPAGE 0x4E89920
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_CORE 0x4E89924
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_E2E_COORD 0x4E89928
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_WR_OVRD_LO 0x4E89930
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_WR_OVRD_HI 0x4E89934
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RD_OVRD_LO 0x4E89938
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RD_OVRD_HI 0x4E8993C
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_COORD 0x4E89940
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_LOCK 0x4E89944
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_RSVD 0x4E89948
+
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_OVRD 0x4E8994C
+
+#endif /* ASIC_REG_ARC_FARM_ARC0_DUP_ENG_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h
new file mode 100644
index 000000000000..491af75c12c3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h
@@ -0,0 +1,575 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_ARC0_DUP_ENG_REGS_H_
+#define ASIC_REG_ARC_FARM_ARC0_DUP_ENG_REGS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_ARC0_DUP_ENG
+ * (Prototype: ARC_DUP_ENG)
+ *****************************************
+ */
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_0 0x4E89000
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_1 0x4E89004
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_2 0x4E89008
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_3 0x4E8900C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_4 0x4E89010
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_5 0x4E89014
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_6 0x4E89018
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_7 0x4E8901C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_8 0x4E89020
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_9 0x4E89024
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_10 0x4E89028
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_11 0x4E8902C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_12 0x4E89030
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_13 0x4E89034
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_14 0x4E89038
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_15 0x4E8903C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_16 0x4E89040
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_17 0x4E89044
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_18 0x4E89048
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_19 0x4E8904C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_20 0x4E89050
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_21 0x4E89054
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_22 0x4E89058
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_23 0x4E8905C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_24 0x4E89060
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_ADDR_0 0x4E89064
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_ADDR_1 0x4E89068
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_ADDR_2 0x4E8906C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_ADDR_3 0x4E89070
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_0 0x4E89074
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_1 0x4E89078
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_2 0x4E8907C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_3 0x4E89080
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_4 0x4E89084
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_5 0x4E89088
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_6 0x4E8908C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_7 0x4E89090
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_8 0x4E89094
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_9 0x4E89098
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_10 0x4E8909C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_11 0x4E890A0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_12 0x4E890A4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_13 0x4E890A8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_14 0x4E890AC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_15 0x4E890B0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_16 0x4E890B4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_17 0x4E890B8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_18 0x4E890BC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_19 0x4E890C0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_20 0x4E890C4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_21 0x4E890C8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_22 0x4E890CC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_23 0x4E890D0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_0 0x4E890D4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_1 0x4E890D8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_2 0x4E890DC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_3 0x4E890E0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_4 0x4E890E4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_5 0x4E890E8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_6 0x4E890EC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_7 0x4E890F0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_PDMA_ENG_ADDR_0 0x4E890F4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_PDMA_ENG_ADDR_1 0x4E890F8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_ROT_ENG_ADDR_0 0x4E890FC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_ROT_ENG_ADDR_1 0x4E89100
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_0 0x4E89104
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_1 0x4E89108
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_2 0x4E8910C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_3 0x4E89110
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_4 0x4E89114
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_5 0x4E89118
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_6 0x4E8911C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_7 0x4E89120
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_8 0x4E89124
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_9 0x4E89128
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_10 0x4E8912C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_11 0x4E89130
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_12 0x4E89134
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_13 0x4E89138
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_14 0x4E8913C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_15 0x4E89140
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_MASK 0x4E89200
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_MASK 0x4E89204
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_MASK 0x4E89208
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_PDMA_ENG_MASK 0x4E8920C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_ROT_ENG_MASK 0x4E89210
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_MASK 0x4E89214
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_0 0x4E89218
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_1 0x4E8921C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_2 0x4E89220
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_3 0x4E89224
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_4 0x4E89228
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_5 0x4E8922C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_6 0x4E89230
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_7 0x4E89234
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_0 0x4E89238
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_1 0x4E8923C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_2 0x4E89240
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_3 0x4E89244
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_4 0x4E89248
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_5 0x4E8924C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_6 0x4E89250
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_7 0x4E89254
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_8 0x4E89258
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_9 0x4E8925C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_10 0x4E89260
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_11 0x4E89264
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_12 0x4E89268
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_13 0x4E8926C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_0 0x4E89288
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_1 0x4E8928C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_2 0x4E89290
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_3 0x4E89294
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_4 0x4E89298
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_5 0x4E8929C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_0 0x4E892A0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_1 0x4E892A4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_2 0x4E892A8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_3 0x4E892AC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_4 0x4E892B0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_5 0x4E892B4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_0 0x4E892B8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_1 0x4E892BC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_2 0x4E892C0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_3 0x4E892C4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_4 0x4E892C8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_5 0x4E892CC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GENERAL_CFG 0x4E892D0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_BP_CFG 0x4E892D4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_0 0x4E892D8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_1 0x4E892DC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_2 0x4E892E0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_3 0x4E892E4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_4 0x4E892E8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_5 0x4E892EC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_6 0x4E892F0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_7 0x4E892F4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_8 0x4E892F8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_9 0x4E892FC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_10 0x4E89300
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_11 0x4E89304
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_12 0x4E89308
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_13 0x4E8930C
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_IN_GRP_TRANS_0 0x4E894A0
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_IN_GRP_TRANS_1 0x4E894A4
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_IN_GRP_TRANS_2 0x4E894A8
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_STS 0x4E894AC
+
+#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_OUT_RQ_CNT 0x4E894B0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_0 0x4E894B4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_1 0x4E894B8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_2 0x4E894BC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_3 0x4E894C0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_4 0x4E894C4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_5 0x4E894C8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_6 0x4E894CC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_7 0x4E894D0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_8 0x4E894D4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_9 0x4E894D8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_10 0x4E894DC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_11 0x4E894E0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_12 0x4E894E4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_13 0x4E894E8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_14 0x4E894EC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_15 0x4E894F0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_16 0x4E894F4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_17 0x4E894F8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_18 0x4E894FC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_19 0x4E89500
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_20 0x4E89504
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_21 0x4E89508
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_22 0x4E8950C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_23 0x4E89510
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_24 0x4E89514
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_25 0x4E89518
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_26 0x4E8951C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_27 0x4E89520
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_28 0x4E89524
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_29 0x4E89528
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_30 0x4E8952C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_31 0x4E89530
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_32 0x4E89534
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_33 0x4E89538
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_34 0x4E8953C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_35 0x4E89540
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_36 0x4E89544
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_37 0x4E89548
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_38 0x4E8954C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_39 0x4E89550
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_40 0x4E89554
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_41 0x4E89558
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_42 0x4E8955C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_43 0x4E89560
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_44 0x4E89564
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_45 0x4E89568
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_46 0x4E8956C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_47 0x4E89570
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_48 0x4E89574
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_49 0x4E89578
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_50 0x4E8957C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_51 0x4E89580
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_52 0x4E89584
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_53 0x4E89588
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_54 0x4E8958C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_55 0x4E89590
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_56 0x4E89594
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_57 0x4E89598
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_58 0x4E8959C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_59 0x4E895A0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_60 0x4E895A4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_61 0x4E895A8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_62 0x4E895AC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_63 0x4E895B0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_0 0x4E895B4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_1 0x4E895B8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_2 0x4E895BC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_3 0x4E895C0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_4 0x4E895C4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_5 0x4E895C8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_6 0x4E895CC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_7 0x4E895D0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_8 0x4E895D4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_9 0x4E895D8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_10 0x4E895DC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_11 0x4E895E0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_12 0x4E895E4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_13 0x4E895E8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_14 0x4E895EC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_15 0x4E895F0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_16 0x4E895F4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_17 0x4E895F8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_18 0x4E895FC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_19 0x4E89600
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_20 0x4E89604
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_21 0x4E89608
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_22 0x4E8960C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_23 0x4E89610
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_24 0x4E89614
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_25 0x4E89618
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_26 0x4E8961C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_27 0x4E89620
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_28 0x4E89624
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_29 0x4E89628
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_30 0x4E8962C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_31 0x4E89630
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_32 0x4E89634
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_33 0x4E89638
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_34 0x4E8963C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_35 0x4E89640
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_36 0x4E89644
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_37 0x4E89648
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_38 0x4E8964C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_39 0x4E89650
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_40 0x4E89654
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_41 0x4E89658
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_42 0x4E8965C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_43 0x4E89660
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_44 0x4E89664
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_45 0x4E89668
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_46 0x4E8966C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_47 0x4E89670
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_48 0x4E89674
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_49 0x4E89678
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_50 0x4E8967C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_51 0x4E89680
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_52 0x4E89684
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_53 0x4E89688
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_54 0x4E8968C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_55 0x4E89690
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_56 0x4E89694
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_57 0x4E89698
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_58 0x4E8969C
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_59 0x4E896A0
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_60 0x4E896A4
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_61 0x4E896A8
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_62 0x4E896AC
+
+#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_63 0x4E896B0
+
+#endif /* ASIC_REG_ARC_FARM_ARC0_DUP_ENG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h
new file mode 100644
index 000000000000..12d6a124a2e9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_MASKS_H_
+#define ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_MASKS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_KDMA_CTX_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_ASID */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_MASK 0x3FF
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT 16
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_MASK 0x3FF0000
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_MASK 0x1
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_SHIFT 4
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_MASK 0x10
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER_WR_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER_WR_MASK 0x1
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER_RD_SHIFT 4
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER_RD_MASK 0x10
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP_WR_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP_WR_MASK 0x1
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP_RD_SHIFT 4
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP_RD_MASK 0x10
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_IND_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_IND_MASK 0x1
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_DTYPE_SHIFT 4
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_DTYPE_MASK 0xF0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_OP_SHIFT 8
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_OP_MASK 0x300
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_ROUND_SHIFT 12
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_ROUND_MASK 0x3000
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_MAX_SHIFT 16
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_MAX_MASK 0x10000
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_IND_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_IND_MASK 0x3
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_ADDITION_SIZE_SHIFT 4
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_ADDITION_SIZE_MASK 0xFF0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_MSB_MASK_SHIFT 12
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_MSB_MASK_MASK 0x1F000
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_QOS */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_QOS_WR_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_QOS_WR_MASK 0xF
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_QOS_RD_SHIFT 4
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_QOS_RD_MASK 0x70
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_27_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_27_MASK 0x1
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_28_SHIFT 1
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_28_MASK 0x2
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_30_SHIFT 2
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_30_MASK 0x4
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_31_SHIFT 3
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_31_MASK 0x8
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE_WR_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE_WR_MASK 0x1
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE_RD_SHIFT 4
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE_RD_MASK 0x10
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_CORE */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_CORE_WR_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_CORE_WR_MASK 0x1
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_CORE_RD_SHIFT 4
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_CORE_RD_MASK 0x10
+
+/* ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD */
+#define ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD_X_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD_X_MASK 0x1F
+#define ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD_Y_SHIFT 8
+#define ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD_Y_MASK 0xF00
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_LO */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_HI */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_HI_VAL_MASK 0x3FF
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_LO */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_HI */
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_HI_VAL_MASK 0x3FF
+
+/* ARC_FARM_KDMA_CTX_AXUSER_LB_COORD */
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_COORD_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_COORD_VAL_MASK 0x3FF
+
+/* ARC_FARM_KDMA_CTX_AXUSER_LB_LOCK */
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_LOCK_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_LOCK_VAL_MASK 0x1
+
+/* ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD */
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD_BIT_21_11_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD_BIT_21_11_MASK 0x7FF
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD_BIT_22_SHIFT 12
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD_BIT_22_MASK 0x1000
+
+/* ARC_FARM_KDMA_CTX_AXUSER_LB_OVRD */
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_OVRD_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_AXUSER_LB_OVRD_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h
new file mode 100644
index 000000000000..23f9d2df52a7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_REGS_H_
+#define ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_KDMA_CTX_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_ASID 0x4E8B800
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP 0x4E8B804
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER 0x4E8B808
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP 0x4E8B80C
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION 0x4E8B810
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC 0x4E8B814
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_QOS 0x4E8B818
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_RSVD 0x4E8B81C
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE 0x4E8B820
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_CORE 0x4E8B824
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_E2E_COORD 0x4E8B828
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_LO 0x4E8B830
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_HI 0x4E8B834
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_LO 0x4E8B838
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_HI 0x4E8B83C
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_LB_COORD 0x4E8B840
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_LB_LOCK 0x4E8B844
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_LB_RSVD 0x4E8B848
+
+#define mmARC_FARM_KDMA_CTX_AXUSER_LB_OVRD 0x4E8B84C
+
+#endif /* ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h
new file mode 100644
index 000000000000..bee4de0b28d6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_KDMA_CTX_MASKS_H_
+#define ASIC_REG_ARC_FARM_KDMA_CTX_MASKS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_KDMA_CTX
+ * (Prototype: DMA_CORE_CTX)
+ *****************************************
+ */
+
+/* ARC_FARM_KDMA_CTX_RATE_LIM_TKN */
+#define ARC_FARM_KDMA_CTX_RATE_LIM_TKN_RD_SHIFT 0
+#define ARC_FARM_KDMA_CTX_RATE_LIM_TKN_RD_MASK 0xFF
+#define ARC_FARM_KDMA_CTX_RATE_LIM_TKN_WR_SHIFT 16
+#define ARC_FARM_KDMA_CTX_RATE_LIM_TKN_WR_MASK 0xFF0000
+
+/* ARC_FARM_KDMA_CTX_PWRLP */
+#define ARC_FARM_KDMA_CTX_PWRLP_DATA_SHIFT 0
+#define ARC_FARM_KDMA_CTX_PWRLP_DATA_MASK 0xFF
+#define ARC_FARM_KDMA_CTX_PWRLP_EN_SHIFT 8
+#define ARC_FARM_KDMA_CTX_PWRLP_EN_MASK 0x100
+
+/* ARC_FARM_KDMA_CTX_TE_NUMROWS */
+#define ARC_FARM_KDMA_CTX_TE_NUMROWS_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_TE_NUMROWS_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_IDX */
+#define ARC_FARM_KDMA_CTX_IDX_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_IDX_VAL_MASK 0xFFFF
+
+/* ARC_FARM_KDMA_CTX_IDX_INC */
+#define ARC_FARM_KDMA_CTX_IDX_INC_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_IDX_INC_VAL_MASK 0xFF
+
+/* ARC_FARM_KDMA_CTX_CTRL */
+#define ARC_FARM_KDMA_CTX_CTRL_TRANSPOSE_SHIFT 0
+#define ARC_FARM_KDMA_CTX_CTRL_TRANSPOSE_MASK 0x1
+#define ARC_FARM_KDMA_CTX_CTRL_DTYPE_SHIFT 4
+#define ARC_FARM_KDMA_CTX_CTRL_DTYPE_MASK 0x30
+#define ARC_FARM_KDMA_CTX_CTRL_COMPRESS_SHIFT 8
+#define ARC_FARM_KDMA_CTX_CTRL_COMPRESS_MASK 0x100
+#define ARC_FARM_KDMA_CTX_CTRL_DECOMPRESS_SHIFT 9
+#define ARC_FARM_KDMA_CTX_CTRL_DECOMPRESS_MASK 0x200
+#define ARC_FARM_KDMA_CTX_CTRL_RD_UNCACHEABLE_SHIFT 12
+#define ARC_FARM_KDMA_CTX_CTRL_RD_UNCACHEABLE_MASK 0x1000
+
+/* ARC_FARM_KDMA_CTX_SRC_TSIZE_0 */
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_0_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_TSIZE_1 */
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_1_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_STRIDE_1 */
+#define ARC_FARM_KDMA_CTX_SRC_STRIDE_1_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_STRIDE_1_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_TSIZE_2 */
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_2_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_STRIDE_2 */
+#define ARC_FARM_KDMA_CTX_SRC_STRIDE_2_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_STRIDE_2_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_TSIZE_3 */
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_3_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_STRIDE_3 */
+#define ARC_FARM_KDMA_CTX_SRC_STRIDE_3_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_STRIDE_3_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_TSIZE_4 */
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_4_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_STRIDE_4 */
+#define ARC_FARM_KDMA_CTX_SRC_STRIDE_4_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_STRIDE_4_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_TSIZE_1 */
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_1_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_STRIDE_1 */
+#define ARC_FARM_KDMA_CTX_DST_STRIDE_1_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_STRIDE_1_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_TSIZE_2 */
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_2_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_STRIDE_2 */
+#define ARC_FARM_KDMA_CTX_DST_STRIDE_2_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_STRIDE_2_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_TSIZE_3 */
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_3_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_STRIDE_3 */
+#define ARC_FARM_KDMA_CTX_DST_STRIDE_3_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_STRIDE_3_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_TSIZE_4 */
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_4_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_STRIDE_4 */
+#define ARC_FARM_KDMA_CTX_DST_STRIDE_4_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_STRIDE_4_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI */
+#define ARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO */
+#define ARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_WR_COMP_WDATA */
+#define ARC_FARM_KDMA_CTX_WR_COMP_WDATA_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_WR_COMP_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_OFFSET_LO */
+#define ARC_FARM_KDMA_CTX_SRC_OFFSET_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_OFFSET_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_OFFSET_HI */
+#define ARC_FARM_KDMA_CTX_SRC_OFFSET_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_OFFSET_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_OFFSET_LO */
+#define ARC_FARM_KDMA_CTX_DST_OFFSET_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_OFFSET_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_OFFSET_HI */
+#define ARC_FARM_KDMA_CTX_DST_OFFSET_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_OFFSET_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_BASE_LO */
+#define ARC_FARM_KDMA_CTX_SRC_BASE_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_SRC_BASE_HI */
+#define ARC_FARM_KDMA_CTX_SRC_BASE_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_SRC_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_BASE_LO */
+#define ARC_FARM_KDMA_CTX_DST_BASE_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_BASE_HI */
+#define ARC_FARM_KDMA_CTX_DST_BASE_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_DST_TSIZE_0 */
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_0_VAL_SHIFT 0
+#define ARC_FARM_KDMA_CTX_DST_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_CTX_COMMIT */
+#define ARC_FARM_KDMA_CTX_COMMIT_WR_COMP_EN_SHIFT 0
+#define ARC_FARM_KDMA_CTX_COMMIT_WR_COMP_EN_MASK 0x1
+#define ARC_FARM_KDMA_CTX_COMMIT_ENDIAN_SWAP_SHIFT 1
+#define ARC_FARM_KDMA_CTX_COMMIT_ENDIAN_SWAP_MASK 0x6
+#define ARC_FARM_KDMA_CTX_COMMIT_MEM_SET_SHIFT 4
+#define ARC_FARM_KDMA_CTX_COMMIT_MEM_SET_MASK 0x10
+#define ARC_FARM_KDMA_CTX_COMMIT_BF16_SHIFT 6
+#define ARC_FARM_KDMA_CTX_COMMIT_BF16_MASK 0x40
+#define ARC_FARM_KDMA_CTX_COMMIT_FP16_SHIFT 7
+#define ARC_FARM_KDMA_CTX_COMMIT_FP16_MASK 0x80
+#define ARC_FARM_KDMA_CTX_COMMIT_CTX_ID_INC_SHIFT 8
+#define ARC_FARM_KDMA_CTX_COMMIT_CTX_ID_INC_MASK 0x100
+#define ARC_FARM_KDMA_CTX_COMMIT_ADD_OFFSET_0_SHIFT 9
+#define ARC_FARM_KDMA_CTX_COMMIT_ADD_OFFSET_0_MASK 0x200
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE0_FROM_DST_SIZE0_SHIFT 10
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE0_FROM_DST_SIZE0_MASK 0x400
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_OFST_FROM_DST_OFST_SHIFT 11
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_OFST_FROM_DST_OFST_MASK 0x800
+#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM1_SHIFT 12
+#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM1_MASK 0x1000
+#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM2_SHIFT 13
+#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM2_MASK 0x2000
+#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM3_SHIFT 14
+#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM3_MASK 0x4000
+#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM4_SHIFT 15
+#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM4_MASK 0x8000
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE1_FROM_DST_SIZE1_SHIFT 16
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE1_FROM_DST_SIZE1_MASK 0x10000
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE2_FROM_DST_SIZE2_SHIFT 17
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE2_FROM_DST_SIZE2_MASK 0x20000
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE3_FROM_DST_SIZE3_SHIFT 18
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE3_FROM_DST_SIZE3_MASK 0x40000
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE4_FROM_DST_SIZE4_SHIFT 19
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE4_FROM_DST_SIZE4_MASK 0x80000
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD1_FROM_DST_STRD1_SHIFT 20
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD1_FROM_DST_STRD1_MASK 0x100000
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD2_FROM_DST_STRD2_SHIFT 21
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD2_FROM_DST_STRD2_MASK 0x200000
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD3_FROM_DST_STRD3_SHIFT 22
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD3_FROM_DST_STRD3_MASK 0x400000
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD4_FROM_DST_STRD4_SHIFT 23
+#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD4_FROM_DST_STRD4_MASK 0x800000
+#define ARC_FARM_KDMA_CTX_COMMIT_LIN_SHIFT 31
+#define ARC_FARM_KDMA_CTX_COMMIT_LIN_MASK 0x80000000
+
+#endif /* ASIC_REG_ARC_FARM_KDMA_CTX_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h
new file mode 100644
index 000000000000..b9f09e8199e6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_KDMA_CTX_REGS_H_
+#define ASIC_REG_ARC_FARM_KDMA_CTX_REGS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_KDMA_CTX
+ * (Prototype: DMA_CORE_CTX)
+ *****************************************
+ */
+
+#define mmARC_FARM_KDMA_CTX_RATE_LIM_TKN 0x4E8B860
+
+#define mmARC_FARM_KDMA_CTX_PWRLP 0x4E8B864
+
+#define mmARC_FARM_KDMA_CTX_TE_NUMROWS 0x4E8B868
+
+#define mmARC_FARM_KDMA_CTX_IDX 0x4E8B86C
+
+#define mmARC_FARM_KDMA_CTX_IDX_INC 0x4E8B870
+
+#define mmARC_FARM_KDMA_CTX_CTRL 0x4E8B874
+
+#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_0 0x4E8B878
+
+#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_1 0x4E8B87C
+
+#define mmARC_FARM_KDMA_CTX_SRC_STRIDE_1 0x4E8B880
+
+#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_2 0x4E8B884
+
+#define mmARC_FARM_KDMA_CTX_SRC_STRIDE_2 0x4E8B888
+
+#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_3 0x4E8B88C
+
+#define mmARC_FARM_KDMA_CTX_SRC_STRIDE_3 0x4E8B890
+
+#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_4 0x4E8B894
+
+#define mmARC_FARM_KDMA_CTX_SRC_STRIDE_4 0x4E8B898
+
+#define mmARC_FARM_KDMA_CTX_DST_TSIZE_1 0x4E8B89C
+
+#define mmARC_FARM_KDMA_CTX_DST_STRIDE_1 0x4E8B8A0
+
+#define mmARC_FARM_KDMA_CTX_DST_TSIZE_2 0x4E8B8A4
+
+#define mmARC_FARM_KDMA_CTX_DST_STRIDE_2 0x4E8B8A8
+
+#define mmARC_FARM_KDMA_CTX_DST_TSIZE_3 0x4E8B8AC
+
+#define mmARC_FARM_KDMA_CTX_DST_STRIDE_3 0x4E8B8B0
+
+#define mmARC_FARM_KDMA_CTX_DST_TSIZE_4 0x4E8B8B4
+
+#define mmARC_FARM_KDMA_CTX_DST_STRIDE_4 0x4E8B8B8
+
+#define mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI 0x4E8B8BC
+
+#define mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO 0x4E8B8C0
+
+#define mmARC_FARM_KDMA_CTX_WR_COMP_WDATA 0x4E8B8C4
+
+#define mmARC_FARM_KDMA_CTX_SRC_OFFSET_LO 0x4E8B8C8
+
+#define mmARC_FARM_KDMA_CTX_SRC_OFFSET_HI 0x4E8B8CC
+
+#define mmARC_FARM_KDMA_CTX_DST_OFFSET_LO 0x4E8B8D0
+
+#define mmARC_FARM_KDMA_CTX_DST_OFFSET_HI 0x4E8B8D4
+
+#define mmARC_FARM_KDMA_CTX_SRC_BASE_LO 0x4E8B8D8
+
+#define mmARC_FARM_KDMA_CTX_SRC_BASE_HI 0x4E8B8DC
+
+#define mmARC_FARM_KDMA_CTX_DST_BASE_LO 0x4E8B8E0
+
+#define mmARC_FARM_KDMA_CTX_DST_BASE_HI 0x4E8B8E4
+
+#define mmARC_FARM_KDMA_CTX_DST_TSIZE_0 0x4E8B8E8
+
+#define mmARC_FARM_KDMA_CTX_COMMIT 0x4E8B8EC
+
+#endif /* ASIC_REG_ARC_FARM_KDMA_CTX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h
new file mode 100644
index 000000000000..d6dd2c066fa9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_KDMA_KDMA_CGM_REGS_H_
+#define ASIC_REG_ARC_FARM_KDMA_KDMA_CGM_REGS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_KDMA_KDMA_CGM
+ * (Prototype: QMAN_CGM)
+ *****************************************
+ */
+
+#define mmARC_FARM_KDMA_KDMA_CGM_CFG 0x4E8BE00
+
+#define mmARC_FARM_KDMA_KDMA_CGM_STS 0x4E8BE04
+
+#define mmARC_FARM_KDMA_KDMA_CGM_CFG1 0x4E8BE08
+
+#endif /* ASIC_REG_ARC_FARM_KDMA_KDMA_CGM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h
new file mode 100644
index 000000000000..5903dbacec80
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h
@@ -0,0 +1,415 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_KDMA_MASKS_H_
+#define ASIC_REG_ARC_FARM_KDMA_MASKS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_KDMA
+ * (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+/* ARC_FARM_KDMA_CFG_0 */
+#define ARC_FARM_KDMA_CFG_0_EN_SHIFT 0
+#define ARC_FARM_KDMA_CFG_0_EN_MASK 0x1
+
+/* ARC_FARM_KDMA_CFG_1 */
+#define ARC_FARM_KDMA_CFG_1_HALT_SHIFT 0
+#define ARC_FARM_KDMA_CFG_1_HALT_MASK 0x1
+#define ARC_FARM_KDMA_CFG_1_FLUSH_SHIFT 1
+#define ARC_FARM_KDMA_CFG_1_FLUSH_MASK 0x2
+
+/* ARC_FARM_KDMA_PROT */
+#define ARC_FARM_KDMA_PROT_VAL_SHIFT 0
+#define ARC_FARM_KDMA_PROT_VAL_MASK 0x1
+#define ARC_FARM_KDMA_PROT_ERR_VAL_SHIFT 1
+#define ARC_FARM_KDMA_PROT_ERR_VAL_MASK 0x2
+
+/* ARC_FARM_KDMA_CKG */
+#define ARC_FARM_KDMA_CKG_HBW_RBUF_SHIFT 0
+#define ARC_FARM_KDMA_CKG_HBW_RBUF_MASK 0x1
+#define ARC_FARM_KDMA_CKG_LBW_RBUF_KDMA_SHIFT 1
+#define ARC_FARM_KDMA_CKG_LBW_RBUF_KDMA_MASK 0x2
+#define ARC_FARM_KDMA_CKG_TE_SHIFT 2
+#define ARC_FARM_KDMA_CKG_TE_MASK 0x4
+
+/* ARC_FARM_KDMA_RD_GLBL */
+#define ARC_FARM_KDMA_RD_GLBL_LBW_VIA_HBW_SHIFT 0
+#define ARC_FARM_KDMA_RD_GLBL_LBW_VIA_HBW_MASK 0x1
+#define ARC_FARM_KDMA_RD_GLBL_HBW_FORCE_MISS_SHIFT 4
+#define ARC_FARM_KDMA_RD_GLBL_HBW_FORCE_MISS_MASK 0x10
+#define ARC_FARM_KDMA_RD_GLBL_LBW_FORCE_MISS_SHIFT 5
+#define ARC_FARM_KDMA_RD_GLBL_LBW_FORCE_MISS_MASK 0x20
+
+/* ARC_FARM_KDMA_RD_HBW_MAX_OUTSTAND */
+#define ARC_FARM_KDMA_RD_HBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define ARC_FARM_KDMA_RD_HBW_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* ARC_FARM_KDMA_RD_HBW_MAX_SIZE */
+#define ARC_FARM_KDMA_RD_HBW_MAX_SIZE_DATA_SHIFT 0
+#define ARC_FARM_KDMA_RD_HBW_MAX_SIZE_DATA_MASK 0xFFF
+#define ARC_FARM_KDMA_RD_HBW_MAX_SIZE_MD_SHIFT 16
+#define ARC_FARM_KDMA_RD_HBW_MAX_SIZE_MD_MASK 0xFFF0000
+
+/* ARC_FARM_KDMA_RD_HBW_ARCACHE */
+#define ARC_FARM_KDMA_RD_HBW_ARCACHE_VAL_SHIFT 0
+#define ARC_FARM_KDMA_RD_HBW_ARCACHE_VAL_MASK 0xF
+
+/* ARC_FARM_KDMA_RD_HBW_INFLIGHTS */
+#define ARC_FARM_KDMA_RD_HBW_INFLIGHTS_VAL_SHIFT 0
+#define ARC_FARM_KDMA_RD_HBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG */
+#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_EN_SHIFT 31
+#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* ARC_FARM_KDMA_RD_LBW_MAX_OUTSTAND */
+#define ARC_FARM_KDMA_RD_LBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define ARC_FARM_KDMA_RD_LBW_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* ARC_FARM_KDMA_RD_LBW_MAX_SIZE */
+#define ARC_FARM_KDMA_RD_LBW_MAX_SIZE_DATA_SHIFT 0
+#define ARC_FARM_KDMA_RD_LBW_MAX_SIZE_DATA_MASK 0xFFF
+#define ARC_FARM_KDMA_RD_LBW_MAX_SIZE_MD_SHIFT 16
+#define ARC_FARM_KDMA_RD_LBW_MAX_SIZE_MD_MASK 0xFFF0000
+
+/* ARC_FARM_KDMA_RD_LBW_ARCACHE */
+#define ARC_FARM_KDMA_RD_LBW_ARCACHE_VAL_SHIFT 0
+#define ARC_FARM_KDMA_RD_LBW_ARCACHE_VAL_MASK 0xF
+
+/* ARC_FARM_KDMA_RD_LBW_INFLIGHTS */
+#define ARC_FARM_KDMA_RD_LBW_INFLIGHTS_VAL_SHIFT 0
+#define ARC_FARM_KDMA_RD_LBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG */
+#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_EN_SHIFT 31
+#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* ARC_FARM_KDMA_WR_HBW_MAX_OUTSTAND */
+#define ARC_FARM_KDMA_WR_HBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_HBW_MAX_OUTSTAND_VAL_MASK 0xFFFF
+
+/* ARC_FARM_KDMA_WR_HBW_MAX_AWID */
+#define ARC_FARM_KDMA_WR_HBW_MAX_AWID_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_HBW_MAX_AWID_VAL_MASK 0x3FFF
+
+/* ARC_FARM_KDMA_WR_HBW_AWCACHE */
+#define ARC_FARM_KDMA_WR_HBW_AWCACHE_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_HBW_AWCACHE_VAL_MASK 0xF
+
+/* ARC_FARM_KDMA_WR_HBW_INFLIGHTS */
+#define ARC_FARM_KDMA_WR_HBW_INFLIGHTS_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_HBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG */
+#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_EN_SHIFT 31
+#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* ARC_FARM_KDMA_WR_LBW_MAX_OUTSTAND */
+#define ARC_FARM_KDMA_WR_LBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_LBW_MAX_OUTSTAND_VAL_MASK 0xFFFF
+
+/* ARC_FARM_KDMA_WR_LBW_MAX_AWID */
+#define ARC_FARM_KDMA_WR_LBW_MAX_AWID_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_LBW_MAX_AWID_VAL_MASK 0x7F
+
+/* ARC_FARM_KDMA_WR_LBW_AWCACHE */
+#define ARC_FARM_KDMA_WR_LBW_AWCACHE_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_LBW_AWCACHE_VAL_MASK 0xF
+
+/* ARC_FARM_KDMA_WR_LBW_INFLIGHTS */
+#define ARC_FARM_KDMA_WR_LBW_INFLIGHTS_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_LBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG */
+#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_EN_SHIFT 31
+#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* ARC_FARM_KDMA_WR_COMP_MAX_OUTSTAND */
+#define ARC_FARM_KDMA_WR_COMP_MAX_OUTSTAND_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_COMP_MAX_OUTSTAND_VAL_MASK 0x1F
+
+/* ARC_FARM_KDMA_WR_COMP_AWUSER */
+#define ARC_FARM_KDMA_WR_COMP_AWUSER_VAL_SHIFT 0
+#define ARC_FARM_KDMA_WR_COMP_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_ERR_CFG */
+#define ARC_FARM_KDMA_ERR_CFG_ERR_MSG_EN_SHIFT 0
+#define ARC_FARM_KDMA_ERR_CFG_ERR_MSG_EN_MASK 0x1
+#define ARC_FARM_KDMA_ERR_CFG_STOP_ON_ERR_SHIFT 1
+#define ARC_FARM_KDMA_ERR_CFG_STOP_ON_ERR_MASK 0x2
+
+/* ARC_FARM_KDMA_ERR_CAUSE */
+#define ARC_FARM_KDMA_ERR_CAUSE_HBW_RD_ERR_SHIFT 0
+#define ARC_FARM_KDMA_ERR_CAUSE_HBW_RD_ERR_MASK 0x1
+#define ARC_FARM_KDMA_ERR_CAUSE_HBW_WR_ERR_SHIFT 1
+#define ARC_FARM_KDMA_ERR_CAUSE_HBW_WR_ERR_MASK 0x2
+#define ARC_FARM_KDMA_ERR_CAUSE_LBW_MSG_WR_ERR_SHIFT 2
+#define ARC_FARM_KDMA_ERR_CAUSE_LBW_MSG_WR_ERR_MASK 0x4
+#define ARC_FARM_KDMA_ERR_CAUSE_DESC_OVF_SHIFT 3
+#define ARC_FARM_KDMA_ERR_CAUSE_DESC_OVF_MASK 0x8
+#define ARC_FARM_KDMA_ERR_CAUSE_LBW_RD_ERR_SHIFT 4
+#define ARC_FARM_KDMA_ERR_CAUSE_LBW_RD_ERR_MASK 0x10
+#define ARC_FARM_KDMA_ERR_CAUSE_LBW_WR_ERR_SHIFT 5
+#define ARC_FARM_KDMA_ERR_CAUSE_LBW_WR_ERR_MASK 0x20
+#define ARC_FARM_KDMA_ERR_CAUSE_TE_DESC_FIFO_OVFL_SHIFT 6
+#define ARC_FARM_KDMA_ERR_CAUSE_TE_DESC_FIFO_OVFL_MASK 0x40
+#define ARC_FARM_KDMA_ERR_CAUSE_LIN_DMA_COMMIT_CFG_ERR_SHIFT 7
+#define ARC_FARM_KDMA_ERR_CAUSE_LIN_DMA_COMMIT_CFG_ERR_MASK 0x80
+
+/* ARC_FARM_KDMA_ERRMSG_ADDR_LO */
+#define ARC_FARM_KDMA_ERRMSG_ADDR_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_ERRMSG_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_ERRMSG_ADDR_HI */
+#define ARC_FARM_KDMA_ERRMSG_ADDR_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_ERRMSG_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_ERRMSG_WDATA */
+#define ARC_FARM_KDMA_ERRMSG_WDATA_VAL_SHIFT 0
+#define ARC_FARM_KDMA_ERRMSG_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS0 */
+#define ARC_FARM_KDMA_STS0_RD_REQ_CNT_SHIFT 0
+#define ARC_FARM_KDMA_STS0_RD_REQ_CNT_MASK 0x7FFF
+#define ARC_FARM_KDMA_STS0_WR_REQ_CNT_SHIFT 16
+#define ARC_FARM_KDMA_STS0_WR_REQ_CNT_MASK 0x7FFF0000
+#define ARC_FARM_KDMA_STS0_BUSY_SHIFT 31
+#define ARC_FARM_KDMA_STS0_BUSY_MASK 0x80000000
+
+/* ARC_FARM_KDMA_STS1 */
+#define ARC_FARM_KDMA_STS1_IS_HALT_SHIFT 0
+#define ARC_FARM_KDMA_STS1_IS_HALT_MASK 0x1
+
+/* ARC_FARM_KDMA_STS_RD_CTX_SEL */
+#define ARC_FARM_KDMA_STS_RD_CTX_SEL_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_RD_CTX_SEL_VAL_MASK 0x7
+#define ARC_FARM_KDMA_STS_RD_CTX_SEL_STRIDE_SHIFT 8
+#define ARC_FARM_KDMA_STS_RD_CTX_SEL_STRIDE_MASK 0x100
+
+/* ARC_FARM_KDMA_STS_RD_CTX_SIZE */
+#define ARC_FARM_KDMA_STS_RD_CTX_SIZE_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_RD_CTX_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS_RD_CTX_BASE_LO */
+#define ARC_FARM_KDMA_STS_RD_CTX_BASE_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_RD_CTX_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS_RD_CTX_BASE_HI */
+#define ARC_FARM_KDMA_STS_RD_CTX_BASE_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_RD_CTX_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS_RD_CTX_ID */
+#define ARC_FARM_KDMA_STS_RD_CTX_ID_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_RD_CTX_ID_VAL_MASK 0xFFFF
+
+/* ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_LO */
+#define ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_HI */
+#define ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR */
+#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_VAL_MASK 0x3FFFFFF
+#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_RDY_SHIFT 30
+#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_RDY_MASK 0x40000000
+#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_VLD_SHIFT 31
+#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_VLD_MASK 0x80000000
+
+/* ARC_FARM_KDMA_STS_WR_CTX_SEL */
+#define ARC_FARM_KDMA_STS_WR_CTX_SEL_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_WR_CTX_SEL_VAL_MASK 0x7
+#define ARC_FARM_KDMA_STS_WR_CTX_SEL_STRIDE_SHIFT 8
+#define ARC_FARM_KDMA_STS_WR_CTX_SEL_STRIDE_MASK 0x100
+
+/* ARC_FARM_KDMA_STS_WR_CTX_SIZE */
+#define ARC_FARM_KDMA_STS_WR_CTX_SIZE_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_WR_CTX_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS_WR_CTX_BASE_LO */
+#define ARC_FARM_KDMA_STS_WR_CTX_BASE_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_WR_CTX_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS_WR_CTX_BASE_HI */
+#define ARC_FARM_KDMA_STS_WR_CTX_BASE_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_WR_CTX_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS_WR_CTX_ID */
+#define ARC_FARM_KDMA_STS_WR_CTX_ID_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_WR_CTX_ID_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO */
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_VAL_MASK 0x3FFFF
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_RDY_SHIFT 30
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_RDY_MASK 0x40000000
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_VLD_SHIFT 31
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_VLD_MASK 0x80000000
+
+/* ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI */
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_VAL_MASK 0x3FFFF
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_RDY_SHIFT 30
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_RDY_MASK 0x40000000
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_VLD_SHIFT 31
+#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_VLD_MASK 0x80000000
+
+/* ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR */
+#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_VAL_SHIFT 0
+#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_VAL_MASK 0x3FFFFFF
+#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_RDY_SHIFT 30
+#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_RDY_MASK 0x40000000
+#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_VLD_SHIFT 31
+#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_VLD_MASK 0x80000000
+
+/* ARC_FARM_KDMA_PWRLP_CFG */
+#define ARC_FARM_KDMA_PWRLP_CFG_GLBL_EN_SHIFT 0
+#define ARC_FARM_KDMA_PWRLP_CFG_GLBL_EN_MASK 0x1
+#define ARC_FARM_KDMA_PWRLP_CFG_CLR_SHIFT 4
+#define ARC_FARM_KDMA_PWRLP_CFG_CLR_MASK 0x10
+
+/* ARC_FARM_KDMA_PWRLP_STS */
+#define ARC_FARM_KDMA_PWRLP_STS_RLVL_SHIFT 0
+#define ARC_FARM_KDMA_PWRLP_STS_RLVL_MASK 0x7F
+#define ARC_FARM_KDMA_PWRLP_STS_WLVL_SHIFT 8
+#define ARC_FARM_KDMA_PWRLP_STS_WLVL_MASK 0x7F00
+#define ARC_FARM_KDMA_PWRLP_STS_RCNT_SHIFT 16
+#define ARC_FARM_KDMA_PWRLP_STS_RCNT_MASK 0x7F0000
+#define ARC_FARM_KDMA_PWRLP_STS_WCNT_SHIFT 23
+#define ARC_FARM_KDMA_PWRLP_STS_WCNT_MASK 0x3F800000
+#define ARC_FARM_KDMA_PWRLP_STS_RFULL_SHIFT 30
+#define ARC_FARM_KDMA_PWRLP_STS_RFULL_MASK 0x40000000
+#define ARC_FARM_KDMA_PWRLP_STS_WFULL_SHIFT 31
+#define ARC_FARM_KDMA_PWRLP_STS_WFULL_MASK 0x80000000
+
+/* ARC_FARM_KDMA_DBG_DESC_CNT */
+#define ARC_FARM_KDMA_DBG_DESC_CNT_VAL_SHIFT 0
+#define ARC_FARM_KDMA_DBG_DESC_CNT_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_DBG_STS */
+#define ARC_FARM_KDMA_DBG_STS_RD_CTX_FULL_SHIFT 0
+#define ARC_FARM_KDMA_DBG_STS_RD_CTX_FULL_MASK 0x1
+#define ARC_FARM_KDMA_DBG_STS_WR_CTX_FULL_SHIFT 1
+#define ARC_FARM_KDMA_DBG_STS_WR_CTX_FULL_MASK 0x2
+#define ARC_FARM_KDMA_DBG_STS_WR_COMP_FULL_SHIFT 2
+#define ARC_FARM_KDMA_DBG_STS_WR_COMP_FULL_MASK 0x4
+#define ARC_FARM_KDMA_DBG_STS_RD_CTX_EMPTY_SHIFT 3
+#define ARC_FARM_KDMA_DBG_STS_RD_CTX_EMPTY_MASK 0x8
+#define ARC_FARM_KDMA_DBG_STS_WR_CTX_EMPTY_SHIFT 4
+#define ARC_FARM_KDMA_DBG_STS_WR_CTX_EMPTY_MASK 0x10
+#define ARC_FARM_KDMA_DBG_STS_WR_COMP_EMPTY_SHIFT 5
+#define ARC_FARM_KDMA_DBG_STS_WR_COMP_EMPTY_MASK 0x20
+#define ARC_FARM_KDMA_DBG_STS_TE_EMPTY_SHIFT 6
+#define ARC_FARM_KDMA_DBG_STS_TE_EMPTY_MASK 0x40
+#define ARC_FARM_KDMA_DBG_STS_TE_BUSY_SHIFT 7
+#define ARC_FARM_KDMA_DBG_STS_TE_BUSY_MASK 0x80
+#define ARC_FARM_KDMA_DBG_STS_GSKT_EMPTY_SHIFT 8
+#define ARC_FARM_KDMA_DBG_STS_GSKT_EMPTY_MASK 0x100
+#define ARC_FARM_KDMA_DBG_STS_GSKT_FULL_SHIFT 9
+#define ARC_FARM_KDMA_DBG_STS_GSKT_FULL_MASK 0x200
+#define ARC_FARM_KDMA_DBG_STS_RD_AGU_CS_SHIFT 10
+#define ARC_FARM_KDMA_DBG_STS_RD_AGU_CS_MASK 0x400
+#define ARC_FARM_KDMA_DBG_STS_WR_AGU_CS_SHIFT 11
+#define ARC_FARM_KDMA_DBG_STS_WR_AGU_CS_MASK 0x800
+
+/* ARC_FARM_KDMA_DBG_BUF_STS */
+#define ARC_FARM_KDMA_DBG_BUF_STS_HBW_FULLNESS_SHIFT 0
+#define ARC_FARM_KDMA_DBG_BUF_STS_HBW_FULLNESS_MASK 0xFFF
+#define ARC_FARM_KDMA_DBG_BUF_STS_LBW_FULLNESS_SHIFT 16
+#define ARC_FARM_KDMA_DBG_BUF_STS_LBW_FULLNESS_MASK 0xFFF0000
+
+/* ARC_FARM_KDMA_DBG_RD_DESC_ID */
+#define ARC_FARM_KDMA_DBG_RD_DESC_ID_VAL_SHIFT 0
+#define ARC_FARM_KDMA_DBG_RD_DESC_ID_VAL_MASK 0xFFFF
+
+/* ARC_FARM_KDMA_DBG_WR_DESC_ID */
+#define ARC_FARM_KDMA_DBG_WR_DESC_ID_VAL_SHIFT 0
+#define ARC_FARM_KDMA_DBG_WR_DESC_ID_VAL_MASK 0xFFFF
+
+/* ARC_FARM_KDMA_APB_DMA_LBW_BASE */
+#define ARC_FARM_KDMA_APB_DMA_LBW_BASE_VAL_SHIFT 0
+#define ARC_FARM_KDMA_APB_DMA_LBW_BASE_VAL_MASK 0xFFFF
+
+/* ARC_FARM_KDMA_APB_MSTR_IF_LBW_BASE */
+#define ARC_FARM_KDMA_APB_MSTR_IF_LBW_BASE_VAL_SHIFT 0
+#define ARC_FARM_KDMA_APB_MSTR_IF_LBW_BASE_VAL_MASK 0xFFFF
+
+/* ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG */
+#define ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG_Y_X_FORCE_SHIFT 0
+#define ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG_Y_X_FORCE_MASK 0x1FF
+#define ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG_FORCE_EN_SHIFT 9
+#define ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG_FORCE_EN_MASK 0x200
+
+/* ARC_FARM_KDMA_DBG_APB_ENABLER */
+#define ARC_FARM_KDMA_DBG_APB_ENABLER_DIS_SHIFT 0
+#define ARC_FARM_KDMA_DBG_APB_ENABLER_DIS_MASK 0x1
+
+/* ARC_FARM_KDMA_L2H_CMPR_LO */
+#define ARC_FARM_KDMA_L2H_CMPR_LO_VAL_SHIFT 20
+#define ARC_FARM_KDMA_L2H_CMPR_LO_VAL_MASK 0xFFF00000
+
+/* ARC_FARM_KDMA_L2H_CMPR_HI */
+#define ARC_FARM_KDMA_L2H_CMPR_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_L2H_CMPR_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_L2H_MASK_LO */
+#define ARC_FARM_KDMA_L2H_MASK_LO_VAL_SHIFT 20
+#define ARC_FARM_KDMA_L2H_MASK_LO_VAL_MASK 0xFFF00000
+
+/* ARC_FARM_KDMA_L2H_MASK_HI */
+#define ARC_FARM_KDMA_L2H_MASK_HI_VAL_SHIFT 0
+#define ARC_FARM_KDMA_L2H_MASK_HI_VAL_MASK 0xFFFFFFFF
+
+/* ARC_FARM_KDMA_IDLE_IND_MASK */
+#define ARC_FARM_KDMA_IDLE_IND_MASK_DESC_SHIFT 0
+#define ARC_FARM_KDMA_IDLE_IND_MASK_DESC_MASK 0x1
+#define ARC_FARM_KDMA_IDLE_IND_MASK_COMP_SHIFT 1
+#define ARC_FARM_KDMA_IDLE_IND_MASK_COMP_MASK 0x2
+#define ARC_FARM_KDMA_IDLE_IND_MASK_INSTAGE_SHIFT 2
+#define ARC_FARM_KDMA_IDLE_IND_MASK_INSTAGE_MASK 0x4
+#define ARC_FARM_KDMA_IDLE_IND_MASK_CORE_SHIFT 3
+#define ARC_FARM_KDMA_IDLE_IND_MASK_CORE_MASK 0x8
+#define ARC_FARM_KDMA_IDLE_IND_MASK_DESC_CNT_STS_SHIFT 8
+#define ARC_FARM_KDMA_IDLE_IND_MASK_DESC_CNT_STS_MASK 0x1F00
+#define ARC_FARM_KDMA_IDLE_IND_MASK_COMP_CNT_STS_SHIFT 16
+#define ARC_FARM_KDMA_IDLE_IND_MASK_COMP_CNT_STS_MASK 0x1F0000
+#define ARC_FARM_KDMA_IDLE_IND_MASK_INSTAGE_EMPTY_SHIFT 24
+#define ARC_FARM_KDMA_IDLE_IND_MASK_INSTAGE_EMPTY_MASK 0x1000000
+#define ARC_FARM_KDMA_IDLE_IND_MASK_CORE_IDLE_STS_SHIFT 25
+#define ARC_FARM_KDMA_IDLE_IND_MASK_CORE_IDLE_STS_MASK 0x2000000
+
+/* ARC_FARM_KDMA_APB_ENABLER */
+#define ARC_FARM_KDMA_APB_ENABLER_DIS_SHIFT 0
+#define ARC_FARM_KDMA_APB_ENABLER_DIS_MASK 0x1
+
+#endif /* ASIC_REG_ARC_FARM_KDMA_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h
new file mode 100644
index 000000000000..e312cf810c0e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ARC_FARM_KDMA_REGS_H_
+#define ASIC_REG_ARC_FARM_KDMA_REGS_H_
+
+/*
+ *****************************************
+ * ARC_FARM_KDMA
+ * (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmARC_FARM_KDMA_CFG_0 0x4E8B000
+
+#define mmARC_FARM_KDMA_CFG_1 0x4E8B004
+
+#define mmARC_FARM_KDMA_PROT 0x4E8B008
+
+#define mmARC_FARM_KDMA_CKG 0x4E8B00C
+
+#define mmARC_FARM_KDMA_RD_GLBL 0x4E8B07C
+
+#define mmARC_FARM_KDMA_RD_HBW_MAX_OUTSTAND 0x4E8B080
+
+#define mmARC_FARM_KDMA_RD_HBW_MAX_SIZE 0x4E8B084
+
+#define mmARC_FARM_KDMA_RD_HBW_ARCACHE 0x4E8B088
+
+#define mmARC_FARM_KDMA_RD_HBW_INFLIGHTS 0x4E8B090
+
+#define mmARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG 0x4E8B094
+
+#define mmARC_FARM_KDMA_RD_LBW_MAX_OUTSTAND 0x4E8B0C0
+
+#define mmARC_FARM_KDMA_RD_LBW_MAX_SIZE 0x4E8B0C4
+
+#define mmARC_FARM_KDMA_RD_LBW_ARCACHE 0x4E8B0C8
+
+#define mmARC_FARM_KDMA_RD_LBW_INFLIGHTS 0x4E8B0D0
+
+#define mmARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG 0x4E8B0D4
+
+#define mmARC_FARM_KDMA_WR_HBW_MAX_OUTSTAND 0x4E8B100
+
+#define mmARC_FARM_KDMA_WR_HBW_MAX_AWID 0x4E8B104
+
+#define mmARC_FARM_KDMA_WR_HBW_AWCACHE 0x4E8B108
+
+#define mmARC_FARM_KDMA_WR_HBW_INFLIGHTS 0x4E8B10C
+
+#define mmARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG 0x4E8B110
+
+#define mmARC_FARM_KDMA_WR_LBW_MAX_OUTSTAND 0x4E8B140
+
+#define mmARC_FARM_KDMA_WR_LBW_MAX_AWID 0x4E8B144
+
+#define mmARC_FARM_KDMA_WR_LBW_AWCACHE 0x4E8B148
+
+#define mmARC_FARM_KDMA_WR_LBW_INFLIGHTS 0x4E8B14C
+
+#define mmARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG 0x4E8B150
+
+#define mmARC_FARM_KDMA_WR_COMP_MAX_OUTSTAND 0x4E8B180
+
+#define mmARC_FARM_KDMA_WR_COMP_AWUSER 0x4E8B184
+
+#define mmARC_FARM_KDMA_ERR_CFG 0x4E8B300
+
+#define mmARC_FARM_KDMA_ERR_CAUSE 0x4E8B304
+
+#define mmARC_FARM_KDMA_ERRMSG_ADDR_LO 0x4E8B308
+
+#define mmARC_FARM_KDMA_ERRMSG_ADDR_HI 0x4E8B30C
+
+#define mmARC_FARM_KDMA_ERRMSG_WDATA 0x4E8B310
+
+#define mmARC_FARM_KDMA_STS0 0x4E8B380
+
+#define mmARC_FARM_KDMA_STS1 0x4E8B384
+
+#define mmARC_FARM_KDMA_STS_RD_CTX_SEL 0x4E8B400
+
+#define mmARC_FARM_KDMA_STS_RD_CTX_SIZE 0x4E8B404
+
+#define mmARC_FARM_KDMA_STS_RD_CTX_BASE_LO 0x4E8B408
+
+#define mmARC_FARM_KDMA_STS_RD_CTX_BASE_HI 0x4E8B40C
+
+#define mmARC_FARM_KDMA_STS_RD_CTX_ID 0x4E8B410
+
+#define mmARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_LO 0x4E8B414
+
+#define mmARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_HI 0x4E8B418
+
+#define mmARC_FARM_KDMA_STS_RD_LB_AXI_ADDR 0x4E8B41C
+
+#define mmARC_FARM_KDMA_STS_WR_CTX_SEL 0x4E8B420
+
+#define mmARC_FARM_KDMA_STS_WR_CTX_SIZE 0x4E8B424
+
+#define mmARC_FARM_KDMA_STS_WR_CTX_BASE_LO 0x4E8B428
+
+#define mmARC_FARM_KDMA_STS_WR_CTX_BASE_HI 0x4E8B42C
+
+#define mmARC_FARM_KDMA_STS_WR_CTX_ID 0x4E8B430
+
+#define mmARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO 0x4E8B434
+
+#define mmARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI 0x4E8B438
+
+#define mmARC_FARM_KDMA_STS_WR_LB_AXI_ADDR 0x4E8B43C
+
+#define mmARC_FARM_KDMA_PWRLP_CFG 0x4E8B700
+
+#define mmARC_FARM_KDMA_PWRLP_STS 0x4E8B704
+
+#define mmARC_FARM_KDMA_DBG_DESC_CNT 0x4E8B710
+
+#define mmARC_FARM_KDMA_DBG_STS 0x4E8B714
+
+#define mmARC_FARM_KDMA_DBG_BUF_STS 0x4E8B718
+
+#define mmARC_FARM_KDMA_DBG_RD_DESC_ID 0x4E8B720
+
+#define mmARC_FARM_KDMA_DBG_WR_DESC_ID 0x4E8B724
+
+#define mmARC_FARM_KDMA_APB_DMA_LBW_BASE 0x4E8B728
+
+#define mmARC_FARM_KDMA_APB_MSTR_IF_LBW_BASE 0x4E8B72C
+
+#define mmARC_FARM_KDMA_E2E_CRED_ASYNC_CFG 0x4E8B730
+
+#define mmARC_FARM_KDMA_DBG_APB_ENABLER 0x4E8BE1C
+
+#define mmARC_FARM_KDMA_L2H_CMPR_LO 0x4E8BE20
+
+#define mmARC_FARM_KDMA_L2H_CMPR_HI 0x4E8BE24
+
+#define mmARC_FARM_KDMA_L2H_MASK_LO 0x4E8BE28
+
+#define mmARC_FARM_KDMA_L2H_MASK_HI 0x4E8BE2C
+
+#define mmARC_FARM_KDMA_IDLE_IND_MASK 0x4E8BE30
+
+#define mmARC_FARM_KDMA_APB_ENABLER 0x4E8BE34
+
+#endif /* ASIC_REG_ARC_FARM_KDMA_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h
new file mode 100644
index 000000000000..9b3eceec9d5d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h
@@ -0,0 +1,777 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_IF_REGS_H_
+#define ASIC_REG_CPU_IF_REGS_H_
+
+/*
+ *****************************************
+ * CPU_IF
+ * (Prototype: CPU_IF)
+ *****************************************
+ */
+
+#define mmCPU_IF_ARUSER_OVR 0x4CC1104
+
+#define mmCPU_IF_ARUSER_OVR_EN 0x4CC1108
+
+#define mmCPU_IF_AWUSER_OVR 0x4CC110C
+
+#define mmCPU_IF_AWUSER_OVR_EN 0x4CC1110
+
+#define mmCPU_IF_ARUSER_MSB_OVR 0x4CC1114
+
+#define mmCPU_IF_AWUSER_MSB_OVR 0x4CC1120
+
+#define mmCPU_IF_AXCACHE_OVR 0x4CC1128
+
+#define mmCPU_IF_LOCK_OVR 0x4CC112C
+
+#define mmCPU_IF_PROT_OVR 0x4CC1130
+
+#define mmCPU_IF_MAX_OUTSTANDING 0x4CC1134
+
+#define mmCPU_IF_EARLY_BRESP_EN 0x4CC1138
+
+#define mmCPU_IF_FORCE_RSP_OK 0x4CC113C
+
+#define mmCPU_IF_CPU_SEI_INTR_STS 0x4CC1140
+
+#define mmCPU_IF_CPU_SEI_INTR_CLR 0x4CC1144
+
+#define mmCPU_IF_CPU_SEI_INTR_MASK 0x4CC1148
+
+#define mmCPU_IF_AXI_SPLIT_NO_WR_INFLIGHT 0x4CC114C
+
+#define mmCPU_IF_AXI_SPLIT_SEI_INTR_ID 0x4CC1150
+
+#define mmCPU_IF_TOTAL_WR_CNT 0x4CC1154
+
+#define mmCPU_IF_INFLIGHT_WR_CNT 0x4CC1158
+
+#define mmCPU_IF_TOTAL_RD_CNT 0x4CC115C
+
+#define mmCPU_IF_INFLIGHT_RD_CNT 0x4CC1160
+
+#define mmCPU_IF_SRAM_MSB_ADDR 0x4CC1164
+
+#define mmCPU_IF_CFG_MSB_ADDR 0x4CC1168
+
+#define mmCPU_IF_HBM_MSB_ADDR 0x4CC116C
+
+#define mmCPU_IF_PCIE_MSB_ADDR 0x4CC1170
+
+#define mmCPU_IF_KMD_HW_DIRTY_STATUS 0x4CC1174
+
+#define mmCPU_IF_MSTR_IF_E2E_FORCE_BP 0x4CC1188
+
+#define mmCPU_IF_MSTR_IF_E2E_GRCFL_CLR 0x4CC118C
+
+#define mmCPU_IF_LBW_TERMINATE_AWADDR_ERR 0x4CC11A0
+
+#define mmCPU_IF_LBW_TERMINATE_ARADDR_ERR 0x4CC11A4
+
+#define mmCPU_IF_CFG_LBW_TERMINATE_BRESP 0x4CC11A8
+
+#define mmCPU_IF_CFG_LBW_TERMINATE_RRESP 0x4CC11AC
+
+#define mmCPU_IF_PF_PQ_PI 0x4CC1200
+
+#define mmCPU_IF_PQ_BASE_ADDR_LOW 0x4CC1204
+
+#define mmCPU_IF_PQ_BASE_ADDR_HIGH 0x4CC1208
+
+#define mmCPU_IF_PQ_LENGTH 0x4CC120C
+
+#define mmCPU_IF_CQ_BASE_ADDR_LOW 0x4CC1210
+
+#define mmCPU_IF_CQ_BASE_ADDR_HIGH 0x4CC1214
+
+#define mmCPU_IF_CQ_LENGTH 0x4CC1218
+
+#define mmCPU_IF_EQ_BASE_ADDR_LOW 0x4CC1220
+
+#define mmCPU_IF_EQ_BASE_ADDR_HIGH 0x4CC1224
+
+#define mmCPU_IF_EQ_LENGTH 0x4CC1228
+
+#define mmCPU_IF_EQ_RD_OFFS 0x4CC122C
+
+#define mmCPU_IF_QUEUE_INIT 0x4CC1230
+
+#define mmCPU_IF_TPC_SERR_INTR_STS 0x4CC1300
+
+#define mmCPU_IF_TPC_SERR_INTR_CLR 0x4CC1304
+
+#define mmCPU_IF_TPC_SERR_INTR_MASK 0x4CC1308
+
+#define mmCPU_IF_TPC_DERR_INTR_STS 0x4CC1310
+
+#define mmCPU_IF_TPC_DERR_INTR_CLR 0x4CC1314
+
+#define mmCPU_IF_TPC_DERR_INTR_MASK 0x4CC1318
+
+#define mmCPU_IF_MME_SERR_INTR_STS_0 0x4CC1320
+
+#define mmCPU_IF_MME_SERR_INTR_STS_1 0x4CC1324
+
+#define mmCPU_IF_MME_SERR_INTR_STS_2 0x4CC1328
+
+#define mmCPU_IF_MME_SERR_INTR_STS_3 0x4CC132C
+
+#define mmCPU_IF_MME_SERR_INTR_CLR_0 0x4CC1330
+
+#define mmCPU_IF_MME_SERR_INTR_CLR_1 0x4CC1334
+
+#define mmCPU_IF_MME_SERR_INTR_CLR_2 0x4CC1338
+
+#define mmCPU_IF_MME_SERR_INTR_CLR_3 0x4CC133C
+
+#define mmCPU_IF_MME_SERR_INTR_MASK_0 0x4CC1340
+
+#define mmCPU_IF_MME_SERR_INTR_MASK_1 0x4CC1344
+
+#define mmCPU_IF_MME_SERR_INTR_MASK_2 0x4CC1348
+
+#define mmCPU_IF_MME_SERR_INTR_MASK_3 0x4CC134C
+
+#define mmCPU_IF_MME_DERR_INTR_STS_0 0x4CC1350
+
+#define mmCPU_IF_MME_DERR_INTR_STS_1 0x4CC1354
+
+#define mmCPU_IF_MME_DERR_INTR_STS_2 0x4CC1358
+
+#define mmCPU_IF_MME_DERR_INTR_STS_3 0x4CC135C
+
+#define mmCPU_IF_MME_DERR_INTR_CLR_0 0x4CC1360
+
+#define mmCPU_IF_MME_DERR_INTR_CLR_1 0x4CC1364
+
+#define mmCPU_IF_MME_DERR_INTR_CLR_2 0x4CC1368
+
+#define mmCPU_IF_MME_DERR_INTR_CLR_3 0x4CC136C
+
+#define mmCPU_IF_MME_DERR_INTR_MASK_0 0x4CC1370
+
+#define mmCPU_IF_MME_DERR_INTR_MASK_1 0x4CC1374
+
+#define mmCPU_IF_MME_DERR_INTR_MASK_2 0x4CC1378
+
+#define mmCPU_IF_MME_DERR_INTR_MASK_3 0x4CC137C
+
+#define mmCPU_IF_HDMA_SERR_INTR_STS 0x4CC1380
+
+#define mmCPU_IF_HDMA_SERR_INTR_CLR 0x4CC1384
+
+#define mmCPU_IF_HDMA_SERR_INTR_MASK 0x4CC1388
+
+#define mmCPU_IF_HDMA_DERR_INTR_STS 0x4CC1390
+
+#define mmCPU_IF_HDMA_DERR_INTR_CLR 0x4CC1394
+
+#define mmCPU_IF_HDMA_DERR_INTR_MASK 0x4CC1398
+
+#define mmCPU_IF_PDMA_SERR_INTR_STS 0x4CC13A0
+
+#define mmCPU_IF_PDMA_SERR_INTR_CLR 0x4CC13A4
+
+#define mmCPU_IF_PDMA_SERR_INTR_MASK 0x4CC13A8
+
+#define mmCPU_IF_PDMA_DERR_INTR_STS 0x4CC13B0
+
+#define mmCPU_IF_PDMA_DERR_INTR_CLR 0x4CC13B4
+
+#define mmCPU_IF_PDMA_DERR_INTR_MASK 0x4CC13B8
+
+#define mmCPU_IF_SRAM_SERR_INTR_STS 0x4CC13C0
+
+#define mmCPU_IF_SRAM_SERR_INTR_CLR 0x4CC13C4
+
+#define mmCPU_IF_SRAM_SERR_INTR_MASK 0x4CC13C8
+
+#define mmCPU_IF_SRAM_DERR_INTR_STS 0x4CC13D0
+
+#define mmCPU_IF_SRAM_DERR_INTR_CLR 0x4CC13D4
+
+#define mmCPU_IF_SRAM_DERR_INTR_MASK 0x4CC13D8
+
+#define mmCPU_IF_HBM_SERR_INTR_STS 0x4CC13E0
+
+#define mmCPU_IF_HBM_SERR_INTR_CLR 0x4CC13E4
+
+#define mmCPU_IF_HBM_SERR_INTR_MASK 0x4CC13E8
+
+#define mmCPU_IF_HBM_DERR_INTR_STS 0x4CC13F0
+
+#define mmCPU_IF_HBM_DERR_INTR_CLR 0x4CC13F4
+
+#define mmCPU_IF_HBM_DERR_INTR_MASK 0x4CC13F8
+
+#define mmCPU_IF_HMMU_SERR_INTR_STS 0x4CC1400
+
+#define mmCPU_IF_HMMU_SERR_INTR_CLR 0x4CC1404
+
+#define mmCPU_IF_HMMU_SERR_INTR_MASK 0x4CC1408
+
+#define mmCPU_IF_HMMU_DERR_INTR_STS 0x4CC1410
+
+#define mmCPU_IF_HMMU_DERR_INTR_CLR 0x4CC1414
+
+#define mmCPU_IF_HMMU_DERR_INTR_MASK 0x4CC1418
+
+#define mmCPU_IF_DEC_SERR_INTR_STS 0x4CC1420
+
+#define mmCPU_IF_DEC_SERR_INTR_CLR 0x4CC1424
+
+#define mmCPU_IF_DEC_SERR_INTR_MASK 0x4CC1428
+
+#define mmCPU_IF_DEC_DERR_INTR_STS 0x4CC1430
+
+#define mmCPU_IF_DEC_DERR_INTR_CLR 0x4CC1434
+
+#define mmCPU_IF_DEC_DERR_INTR_MASK 0x4CC1438
+
+#define mmCPU_IF_NIC_SERR_INTR_STS 0x4CC1440
+
+#define mmCPU_IF_NIC_SERR_INTR_CLR 0x4CC1444
+
+#define mmCPU_IF_NIC_SERR_INTR_MASK 0x4CC1448
+
+#define mmCPU_IF_NIC_DERR_INTR_STS 0x4CC1450
+
+#define mmCPU_IF_NIC_DERR_INTR_CLR 0x4CC1454
+
+#define mmCPU_IF_NIC_DERR_INTR_MASK 0x4CC1458
+
+#define mmCPU_IF_SYNC_MNGR_SERR_INTR_STS 0x4CC1460
+
+#define mmCPU_IF_SYNC_MNGR_SERR_INTR_CLR 0x4CC1464
+
+#define mmCPU_IF_SYNC_MNGR_SERR_INTR_MASK 0x4CC1468
+
+#define mmCPU_IF_SYNC_MNGR_DERR_INTR_STS 0x4CC1470
+
+#define mmCPU_IF_SYNC_MNGR_DERR_INTR_CLR 0x4CC1474
+
+#define mmCPU_IF_SYNC_MNGR_DERR_INTR_MASK 0x4CC1478
+
+#define mmCPU_IF_HIF_SERR_INTR_STS 0x4CC1480
+
+#define mmCPU_IF_HIF_SERR_INTR_CLR 0x4CC1484
+
+#define mmCPU_IF_HIF_SERR_INTR_MASK 0x4CC1488
+
+#define mmCPU_IF_HIF_DERR_INTR_STS 0x4CC1490
+
+#define mmCPU_IF_HIF_DERR_INTR_CLR 0x4CC1494
+
+#define mmCPU_IF_HIF_DERR_INTR_MASK 0x4CC1498
+
+#define mmCPU_IF_XBAR_SERR_INTR_STS 0x4CC14A0
+
+#define mmCPU_IF_XBAR_SERR_INTR_CLR 0x4CC14A4
+
+#define mmCPU_IF_XBAR_SERR_INTR_MASK 0x4CC14A8
+
+#define mmCPU_IF_XBAR_DERR_INTR_STS 0x4CC14B0
+
+#define mmCPU_IF_XBAR_DERR_INTR_CLR 0x4CC14B4
+
+#define mmCPU_IF_XBAR_DERR_INTR_MASK 0x4CC14B8
+
+#define mmCPU_IF_TPC_SEI_INTR_STS 0x4CC14C0
+
+#define mmCPU_IF_TPC_SEI_INTR_CLR 0x4CC14C4
+
+#define mmCPU_IF_TPC_SEI_INTR_MASK 0x4CC14C8
+
+#define mmCPU_IF_MME_SEI_INTR_STS_0 0x4CC14D0
+
+#define mmCPU_IF_MME_SEI_INTR_STS_1 0x4CC14D4
+
+#define mmCPU_IF_MME_SEI_INTR_STS_2 0x4CC14D8
+
+#define mmCPU_IF_MME_SEI_INTR_STS_3 0x4CC14DC
+
+#define mmCPU_IF_MME_SEI_INTR_CLR_0 0x4CC14E0
+
+#define mmCPU_IF_MME_SEI_INTR_CLR_1 0x4CC14E4
+
+#define mmCPU_IF_MME_SEI_INTR_CLR_2 0x4CC14E8
+
+#define mmCPU_IF_MME_SEI_INTR_CLR_3 0x4CC14EC
+
+#define mmCPU_IF_MME_SEI_INTR_MASK_0 0x4CC14F0
+
+#define mmCPU_IF_MME_SEI_INTR_MASK_1 0x4CC14F4
+
+#define mmCPU_IF_MME_SEI_INTR_MASK_2 0x4CC14F8
+
+#define mmCPU_IF_MME_SEI_INTR_MASK_3 0x4CC14FC
+
+#define mmCPU_IF_PLL_LSB_SEI_INTR_STS 0x4CC1500
+
+#define mmCPU_IF_PLL_LSB_SEI_INTR_CLR 0x4CC1504
+
+#define mmCPU_IF_PLL_LSB_SEI_INTR_MASK 0x4CC1508
+
+#define mmCPU_IF_PLL_MSB_SEI_INTR_STS 0x4CC1510
+
+#define mmCPU_IF_PLL_MSB_SEI_INTR_CLR 0x4CC1514
+
+#define mmCPU_IF_PLL_MSB_SEI_INTR_MASK 0x4CC1518
+
+#define mmCPU_IF_HMMU_SEI_INTR_STS 0x4CC1520
+
+#define mmCPU_IF_HMMU_SEI_INTR_CLR 0x4CC1524
+
+#define mmCPU_IF_HMMU_SEI_INTR_MASK 0x4CC1528
+
+#define mmCPU_IF_HDMA_SEI_INTR_STS 0x4CC1530
+
+#define mmCPU_IF_HDMA_SEI_INTR_CLR 0x4CC1534
+
+#define mmCPU_IF_HDMA_SEI_INTR_MASK 0x4CC1538
+
+#define mmCPU_IF_PDMA_SEI_INTR_STS 0x4CC1540
+
+#define mmCPU_IF_PDMA_SEI_INTR_CLR 0x4CC1544
+
+#define mmCPU_IF_PDMA_SEI_INTR_MASK 0x4CC1548
+
+#define mmCPU_IF_HBM_SEI_INTR_STS 0x4CC1550
+
+#define mmCPU_IF_HBM_SEI_INTR_CLR 0x4CC1554
+
+#define mmCPU_IF_HBM_SEI_INTR_MASK 0x4CC1558
+
+#define mmCPU_IF_DEC_SEI_INTR_STS 0x4CC1560
+
+#define mmCPU_IF_DEC_SEI_INTR_CLR 0x4CC1564
+
+#define mmCPU_IF_DEC_SEI_INTR_MASK 0x4CC1568
+
+#define mmCPU_IF_HIF_SEI_INTR_STS 0x4CC1570
+
+#define mmCPU_IF_HIF_SEI_INTR_CLR 0x4CC1574
+
+#define mmCPU_IF_HIF_SEI_INTR_MASK 0x4CC1578
+
+#define mmCPU_IF_SYNC_MNGR_SEI_INTR_STS 0x4CC1580
+
+#define mmCPU_IF_SYNC_MNGR_SEI_INTR_CLR 0x4CC1584
+
+#define mmCPU_IF_SYNC_MNGR_SEI_INTR_MASK 0x4CC1588
+
+#define mmCPU_IF_NIC_SEI_INTR_STS 0x4CC1590
+
+#define mmCPU_IF_NIC_SEI_INTR_CLR 0x4CC1594
+
+#define mmCPU_IF_NIC_SEI_INTR_MASK 0x4CC1598
+
+#define mmCPU_IF_PCIE_SPI_INTR_STS 0x4CC1600
+
+#define mmCPU_IF_PCIE_SPI_INTR_CLR 0x4CC1604
+
+#define mmCPU_IF_PCIE_SPI_INTR_MASK 0x4CC1608
+
+#define mmCPU_IF_MME_SPI_INTR_STS_0 0x4CC1610
+
+#define mmCPU_IF_MME_SPI_INTR_STS_1 0x4CC1614
+
+#define mmCPU_IF_MME_SPI_INTR_STS_2 0x4CC1618
+
+#define mmCPU_IF_MME_SPI_INTR_STS_3 0x4CC161C
+
+#define mmCPU_IF_MME_SPI_INTR_CLR_0 0x4CC1620
+
+#define mmCPU_IF_MME_SPI_INTR_CLR_1 0x4CC1624
+
+#define mmCPU_IF_MME_SPI_INTR_CLR_2 0x4CC1628
+
+#define mmCPU_IF_MME_SPI_INTR_CLR_3 0x4CC162C
+
+#define mmCPU_IF_MME_SPI_INTR_MASK_0 0x4CC1630
+
+#define mmCPU_IF_MME_SPI_INTR_MASK_1 0x4CC1634
+
+#define mmCPU_IF_MME_SPI_INTR_MASK_2 0x4CC1638
+
+#define mmCPU_IF_MME_SPI_INTR_MASK_3 0x4CC163C
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_0 0x4CC1640
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_1 0x4CC1644
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_2 0x4CC1648
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_3 0x4CC164C
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_4 0x4CC1650
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_5 0x4CC1654
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_6 0x4CC1658
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_7 0x4CC165C
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_8 0x4CC1660
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_9 0x4CC1664
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_10 0x4CC1668
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_11 0x4CC166C
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_12 0x4CC1670
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_13 0x4CC1674
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_14 0x4CC1678
+
+#define mmCPU_IF_HMMU_SPI_INTR_STS_15 0x4CC167C
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_0 0x4CC1680
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_1 0x4CC1684
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_2 0x4CC1688
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_3 0x4CC168C
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_4 0x4CC1690
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_5 0x4CC1694
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_6 0x4CC1698
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_7 0x4CC169C
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_8 0x4CC16A0
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_9 0x4CC16A4
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_10 0x4CC16A8
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_11 0x4CC16AC
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_12 0x4CC16B0
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_13 0x4CC16B4
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_14 0x4CC16B8
+
+#define mmCPU_IF_HMMU_SPI_INTR_CLR_15 0x4CC16BC
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_0 0x4CC16C0
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_1 0x4CC16C4
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_2 0x4CC16C8
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_3 0x4CC16CC
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_4 0x4CC16D0
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_5 0x4CC16D4
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_6 0x4CC16D8
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_7 0x4CC16DC
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_8 0x4CC16E0
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_9 0x4CC16E4
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_10 0x4CC16E8
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_11 0x4CC16EC
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_12 0x4CC16F0
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_13 0x4CC16F4
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_14 0x4CC16F8
+
+#define mmCPU_IF_HMMU_SPI_INTR_MASK_15 0x4CC16FC
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_0 0x4CC1700
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_1 0x4CC1704
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_2 0x4CC1708
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_3 0x4CC170C
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_4 0x4CC1710
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_5 0x4CC1714
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_6 0x4CC1718
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_7 0x4CC171C
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_8 0x4CC1720
+
+#define mmCPU_IF_DEC_SPI_INTR_STS_9 0x4CC1724
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_0 0x4CC1730
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_1 0x4CC1734
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_2 0x4CC1738
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_3 0x4CC173C
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_4 0x4CC1740
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_5 0x4CC1744
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_6 0x4CC1748
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_7 0x4CC174C
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_8 0x4CC1750
+
+#define mmCPU_IF_DEC_SPI_INTR_CLR_9 0x4CC1754
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_0 0x4CC1760
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_1 0x4CC1764
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_2 0x4CC1768
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_3 0x4CC176C
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_4 0x4CC1770
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_5 0x4CC1774
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_6 0x4CC1778
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_7 0x4CC177C
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_8 0x4CC1780
+
+#define mmCPU_IF_DEC_SPI_INTR_MASK_9 0x4CC1784
+
+#define mmCPU_IF_HIF_SPI_INTR_STS 0x4CC17A0
+
+#define mmCPU_IF_HIF_SPI_INTR_CLR 0x4CC17A4
+
+#define mmCPU_IF_HIF_SPI_INTR_MASK 0x4CC17A8
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_0 0x4CC17B0
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_1 0x4CC17B4
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_2 0x4CC17B8
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_3 0x4CC17BC
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_4 0x4CC17C0
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_5 0x4CC17C4
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_6 0x4CC17C8
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_7 0x4CC17CC
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_8 0x4CC17D0
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_9 0x4CC17D4
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_10 0x4CC17D8
+
+#define mmCPU_IF_NIC_SPI_INTR_STS_11 0x4CC17DC
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_0 0x4CC17E0
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_1 0x4CC17E4
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_2 0x4CC17E8
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_3 0x4CC17EC
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_4 0x4CC17F0
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_5 0x4CC17F4
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_6 0x4CC17F8
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_7 0x4CC17FC
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_8 0x4CC1800
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_9 0x4CC1804
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_10 0x4CC1808
+
+#define mmCPU_IF_NIC_SPI_INTR_CLR_11 0x4CC180C
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_0 0x4CC1810
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_1 0x4CC1814
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_2 0x4CC1818
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_3 0x4CC181C
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_4 0x4CC1820
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_5 0x4CC1824
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_6 0x4CC1828
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_7 0x4CC182C
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_8 0x4CC1830
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_9 0x4CC1834
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_10 0x4CC1838
+
+#define mmCPU_IF_NIC_SPI_INTR_MASK_11 0x4CC183C
+
+#define mmCPU_IF_DEC_ECO_INTR_STS 0x4CC1840
+
+#define mmCPU_IF_DEC_ECO_INTR_CLR 0x4CC1844
+
+#define mmCPU_IF_DEC_ECO_INTR_MASK 0x4CC1848
+
+#define mmCPU_IF_HIF_ECO_INTR_STS 0x4CC1850
+
+#define mmCPU_IF_HIF_ECO_INTR_CLR 0x4CC1854
+
+#define mmCPU_IF_HIF_ECO_INTR_MASK 0x4CC1858
+
+#define mmCPU_IF_HMMU_ECO_INTR_STS 0x4CC1860
+
+#define mmCPU_IF_HMMU_ECO_INTR_CLR 0x4CC1864
+
+#define mmCPU_IF_HMMU_ECO_INTR_MASK 0x4CC1868
+
+#define mmCPU_IF_NIC_ECO_INTR_STS 0x4CC1870
+
+#define mmCPU_IF_NIC_ECO_INTR_CLR 0x4CC1874
+
+#define mmCPU_IF_NIC_ECO_INTR_MASK 0x4CC1878
+
+#define mmCPU_IF_MSI_X_INTR_STS_0 0x4CC1900
+
+#define mmCPU_IF_MSI_X_INTR_STS_1 0x4CC1904
+
+#define mmCPU_IF_MSI_X_INTR_STS_2 0x4CC1908
+
+#define mmCPU_IF_MSI_X_INTR_STS_3 0x4CC190C
+
+#define mmCPU_IF_MSI_X_INTR_STS_4 0x4CC1910
+
+#define mmCPU_IF_MSI_X_INTR_STS_5 0x4CC1914
+
+#define mmCPU_IF_MSI_X_INTR_STS_6 0x4CC1918
+
+#define mmCPU_IF_MSI_X_INTR_STS_7 0x4CC191C
+
+#define mmCPU_IF_MSI_X_INTR_STS_8 0x4CC1920
+
+#define mmCPU_IF_MSI_X_INTR_STS_9 0x4CC1924
+
+#define mmCPU_IF_MSI_X_INTR_STS_10 0x4CC1928
+
+#define mmCPU_IF_MSI_X_INTR_STS_11 0x4CC192C
+
+#define mmCPU_IF_MSI_X_INTR_STS_12 0x4CC1930
+
+#define mmCPU_IF_MSI_X_INTR_STS_13 0x4CC1934
+
+#define mmCPU_IF_MSI_X_INTR_STS_14 0x4CC1938
+
+#define mmCPU_IF_MSI_X_INTR_STS_15 0x4CC193C
+
+#define mmCPU_IF_MSI_X_INTR_CLR_0 0x4CC1940
+
+#define mmCPU_IF_MSI_X_INTR_CLR_1 0x4CC1944
+
+#define mmCPU_IF_MSI_X_INTR_CLR_2 0x4CC1948
+
+#define mmCPU_IF_MSI_X_INTR_CLR_3 0x4CC194C
+
+#define mmCPU_IF_MSI_X_INTR_CLR_4 0x4CC1950
+
+#define mmCPU_IF_MSI_X_INTR_CLR_5 0x4CC1954
+
+#define mmCPU_IF_MSI_X_INTR_CLR_6 0x4CC1958
+
+#define mmCPU_IF_MSI_X_INTR_CLR_7 0x4CC195C
+
+#define mmCPU_IF_MSI_X_INTR_CLR_8 0x4CC1960
+
+#define mmCPU_IF_MSI_X_INTR_CLR_9 0x4CC1964
+
+#define mmCPU_IF_MSI_X_INTR_CLR_10 0x4CC1968
+
+#define mmCPU_IF_MSI_X_INTR_CLR_11 0x4CC196C
+
+#define mmCPU_IF_MSI_X_INTR_CLR_12 0x4CC1970
+
+#define mmCPU_IF_MSI_X_INTR_CLR_13 0x4CC1974
+
+#define mmCPU_IF_MSI_X_INTR_CLR_14 0x4CC1978
+
+#define mmCPU_IF_MSI_X_INTR_CLR_15 0x4CC197C
+
+#define mmCPU_IF_MSI_X_INTR_MASK_0 0x4CC1980
+
+#define mmCPU_IF_MSI_X_INTR_MASK_1 0x4CC1984
+
+#define mmCPU_IF_MSI_X_INTR_MASK_2 0x4CC1988
+
+#define mmCPU_IF_MSI_X_INTR_MASK_3 0x4CC198C
+
+#define mmCPU_IF_MSI_X_INTR_MASK_4 0x4CC1990
+
+#define mmCPU_IF_MSI_X_INTR_MASK_5 0x4CC1994
+
+#define mmCPU_IF_MSI_X_INTR_MASK_6 0x4CC1998
+
+#define mmCPU_IF_MSI_X_INTR_MASK_7 0x4CC199C
+
+#define mmCPU_IF_MSI_X_INTR_MASK_8 0x4CC19A0
+
+#define mmCPU_IF_MSI_X_INTR_MASK_9 0x4CC19A4
+
+#define mmCPU_IF_MSI_X_INTR_MASK_10 0x4CC19A8
+
+#define mmCPU_IF_MSI_X_INTR_MASK_11 0x4CC19AC
+
+#define mmCPU_IF_MSI_X_INTR_MASK_12 0x4CC19B0
+
+#define mmCPU_IF_MSI_X_INTR_MASK_13 0x4CC19B4
+
+#define mmCPU_IF_MSI_X_INTR_MASK_14 0x4CC19B8
+
+#define mmCPU_IF_MSI_X_INTR_MASK_15 0x4CC19BC
+
+#define mmCPU_IF_MSI_X_BUSY_INTR_STS 0x4CC19C0
+
+#define mmCPU_IF_MSI_X_BUSY_INTR_CLR 0x4CC19C4
+
+#define mmCPU_IF_MSI_X_BUSY_INTR_MASK 0x4CC19C8
+
+#define mmCPU_IF_MSI_X_GEN_ADDR 0x4CC19D0
+
+#define mmCPU_IF_MSI_X_GEN_DATA 0x4CC19D4
+
+#define mmCPU_IF_MSI_X_GEN_AWPROT 0x4CC19D8
+
+#endif /* ASIC_REG_CPU_IF_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h
new file mode 100644
index 000000000000..296ab832013f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_DEC0_CMD_MASKS_H_
+#define ASIC_REG_DCORE0_DEC0_CMD_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_DEC0_CMD
+ * (Prototype: VSI_CMD)
+ *****************************************
+ */
+
+/* DCORE0_DEC0_CMD_SWREG0 */
+#define DCORE0_DEC0_CMD_SWREG0_SW_HW_VERSION_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG0_SW_HW_VERSION_MASK 0xFFFF
+#define DCORE0_DEC0_CMD_SWREG0_SW_HW_ID_SHIFT 16
+#define DCORE0_DEC0_CMD_SWREG0_SW_HW_ID_MASK 0xFFFF0000
+
+/* DCORE0_DEC0_CMD_SWREG1 */
+#define DCORE0_DEC0_CMD_SWREG1_SW_HW_BUILDDATE_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG1_SW_HW_BUILDDATE_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG2 */
+#define DCORE0_DEC0_CMD_SWREG2_SW_EXT_NORM_INTR_SRC_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG2_SW_EXT_NORM_INTR_SRC_MASK 0xFFFF
+#define DCORE0_DEC0_CMD_SWREG2_SW_EXT_ABN_INTR_SRC_SHIFT 16
+#define DCORE0_DEC0_CMD_SWREG2_SW_EXT_ABN_INTR_SRC_MASK 0xFFFF0000
+
+/* DCORE0_DEC0_CMD_SWREG3 */
+#define DCORE0_DEC0_CMD_SWREG3_SW_EXE_CMDBUF_COUNT_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG3_SW_EXE_CMDBUF_COUNT_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG4 */
+#define DCORE0_DEC0_CMD_SWREG4_SW_CMD_EXE_LSB_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG4_SW_CMD_EXE_LSB_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG5 */
+#define DCORE0_DEC0_CMD_SWREG5_SW_CMD_EXE_MSB_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG5_SW_CMD_EXE_MSB_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG6 */
+#define DCORE0_DEC0_CMD_SWREG6_SW_AXI_TOTALARLEN_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG6_SW_AXI_TOTALARLEN_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG7 */
+#define DCORE0_DEC0_CMD_SWREG7_SW_AXI_TOTALR_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG7_SW_AXI_TOTALR_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG8 */
+#define DCORE0_DEC0_CMD_SWREG8_SW_AXI_TOTALAR_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG8_SW_AXI_TOTALAR_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG9 */
+#define DCORE0_DEC0_CMD_SWREG9_SW_AXI_TOTALRLAST_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG9_SW_AXI_TOTALRLAST_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG10 */
+#define DCORE0_DEC0_CMD_SWREG10_SW_AXI_TOTALAWLEN_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG10_SW_AXI_TOTALAWLEN_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG11 */
+#define DCORE0_DEC0_CMD_SWREG11_SW_AXI_TOTALW_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG11_SW_AXI_TOTALW_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG12 */
+#define DCORE0_DEC0_CMD_SWREG12_SW_AXI_TOTALAW_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG12_SW_AXI_TOTALAW_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG13 */
+#define DCORE0_DEC0_CMD_SWREG13_SW_AXI_TOTALWLAST_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG13_SW_AXI_TOTALWLAST_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG14 */
+#define DCORE0_DEC0_CMD_SWREG14_SW_AXI_TOTALB_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG14_SW_AXI_TOTALB_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG15 */
+#define DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_MASK 0x7
+#define DCORE0_DEC0_CMD_SWREG15_RSV_SHIFT 3
+#define DCORE0_DEC0_CMD_SWREG15_RSV_MASK 0x3FFFF8
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_BREADY_SHIFT 22
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_BREADY_MASK 0x400000
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_BVALID_SHIFT 23
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_BVALID_MASK 0x800000
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_WREADY_SHIFT 24
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_WREADY_MASK 0x1000000
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_WVALID_SHIFT 25
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_WVALID_MASK 0x2000000
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_AWREADY_SHIFT 26
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_AWREADY_MASK 0x4000000
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_AWVALID_SHIFT 27
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_AWVALID_MASK 0x8000000
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_RREADY_SHIFT 28
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_RREADY_MASK 0x10000000
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_RVALID_SHIFT 29
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_RVALID_MASK 0x20000000
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_ARREADY_SHIFT 30
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_ARREADY_MASK 0x40000000
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_ARVALID_SHIFT 31
+#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_ARVALID_MASK 0x80000000
+
+/* DCORE0_DEC0_CMD_SWREG16 */
+#define DCORE0_DEC0_CMD_SWREG16_SW_START_TRIGGER_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG16_SW_START_TRIGGER_MASK 0x1
+#define DCORE0_DEC0_CMD_SWREG16_SW_RESET_ALL_SHIFT 1
+#define DCORE0_DEC0_CMD_SWREG16_SW_RESET_ALL_MASK 0x2
+#define DCORE0_DEC0_CMD_SWREG16_SW_RESET_CORE_SHIFT 2
+#define DCORE0_DEC0_CMD_SWREG16_SW_RESET_CORE_MASK 0x4
+#define DCORE0_DEC0_CMD_SWREG16_SW_ABORT_MODE_SHIFT 3
+#define DCORE0_DEC0_CMD_SWREG16_SW_ABORT_MODE_MASK 0x8
+#define DCORE0_DEC0_CMD_SWREG16_SW_CORE_CLK_GATE_DISABLE_SHIFT 4
+#define DCORE0_DEC0_CMD_SWREG16_SW_CORE_CLK_GATE_DISABLE_MASK 0x10
+#define DCORE0_DEC0_CMD_SWREG16_SW_MASTER_OUT_CLK_GATE_DISABLE_SHIFT 5
+#define DCORE0_DEC0_CMD_SWREG16_SW_MASTER_OUT_CLK_GATE_DISABLE_MASK 0x20
+#define DCORE0_DEC0_CMD_SWREG16_SW_AXI_CLK_GATE_DISABLE_SHIFT 6
+#define DCORE0_DEC0_CMD_SWREG16_SW_AXI_CLK_GATE_DISABLE_MASK 0x40
+#define DCORE0_DEC0_CMD_SWREG16_RSV_SHIFT 7
+#define DCORE0_DEC0_CMD_SWREG16_RSV_MASK 0xFFFFFF80
+
+/* DCORE0_DEC0_CMD_SWREG17 */
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_ENDCMD_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_ENDCMD_MASK 0x1
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_BUSERR_SHIFT 1
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_BUSERR_MASK 0x2
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_TIMEOUT_SHIFT 2
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_TIMEOUT_MASK 0x4
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_CMDERR_SHIFT 3
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_CMDERR_MASK 0x8
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_ABORT_SHIFT 4
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_ABORT_MASK 0x10
+#define DCORE0_DEC0_CMD_SWREG17_RSV_1_SHIFT 5
+#define DCORE0_DEC0_CMD_SWREG17_RSV_1_MASK 0x20
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_JMP_SHIFT 6
+#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_JMP_MASK 0x40
+#define DCORE0_DEC0_CMD_SWREG17_RSV_SHIFT 7
+#define DCORE0_DEC0_CMD_SWREG17_RSV_MASK 0xFFFFFF80
+
+/* DCORE0_DEC0_CMD_SWREG18 */
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_ENDCMD_EN_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_ENDCMD_EN_MASK 0x1
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_BUSERR_EN_SHIFT 1
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_BUSERR_EN_MASK 0x2
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_TIMEOUT_EN_SHIFT 2
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_TIMEOUT_EN_MASK 0x4
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_CMDERR_EN_SHIFT 3
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_CMDERR_EN_MASK 0x8
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_ABORT_EN_SHIFT 4
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_ABORT_EN_MASK 0x10
+#define DCORE0_DEC0_CMD_SWREG18_RSV_1_SHIFT 5
+#define DCORE0_DEC0_CMD_SWREG18_RSV_1_MASK 0x20
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_JMP_EN_SHIFT 6
+#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_JMP_EN_MASK 0x40
+#define DCORE0_DEC0_CMD_SWREG18_RSV_SHIFT 7
+#define DCORE0_DEC0_CMD_SWREG18_RSV_MASK 0xFFFFFF80
+
+/* DCORE0_DEC0_CMD_SWREG19 */
+#define DCORE0_DEC0_CMD_SWREG19_SW_TIMEOUT_CYCLES_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG19_SW_TIMEOUT_CYCLES_MASK 0x7FFFFFFF
+#define DCORE0_DEC0_CMD_SWREG19_SW_TIMEOUT_ENABLE_SHIFT 31
+#define DCORE0_DEC0_CMD_SWREG19_SW_TIMEOUT_ENABLE_MASK 0x80000000
+
+/* DCORE0_DEC0_CMD_SWREG20 */
+#define DCORE0_DEC0_CMD_SWREG20_SW_CMDBUF_EXE_ADDR_LSB_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG20_SW_CMDBUF_EXE_ADDR_LSB_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG21 */
+#define DCORE0_DEC0_CMD_SWREG21_SW_CMDBUF_EXE_ADDR_MSB_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG21_SW_CMDBUF_EXE_ADDR_MSB_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG22 */
+#define DCORE0_DEC0_CMD_SWREG22_SW_CMDBUF_EXE_LENGTH_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG22_SW_CMDBUF_EXE_LENGTH_MASK 0xFFFF
+#define DCORE0_DEC0_CMD_SWREG22_RSV_SHIFT 16
+#define DCORE0_DEC0_CMD_SWREG22_RSV_MASK 0xFFFF0000
+
+/* DCORE0_DEC0_CMD_SWREG23 */
+#define DCORE0_DEC0_CMD_SWREG23_SW_AXI_ID_WR_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG23_SW_AXI_ID_WR_MASK 0xFF
+#define DCORE0_DEC0_CMD_SWREG23_SW_AXI_ID_RD_SHIFT 8
+#define DCORE0_DEC0_CMD_SWREG23_SW_AXI_ID_RD_MASK 0xFF00
+#define DCORE0_DEC0_CMD_SWREG23_SW_MAX_BURST_LEN_SHIFT 16
+#define DCORE0_DEC0_CMD_SWREG23_SW_MAX_BURST_LEN_MASK 0xFF0000
+#define DCORE0_DEC0_CMD_SWREG23_RSV_SHIFT 24
+#define DCORE0_DEC0_CMD_SWREG23_RSV_MASK 0xF000000
+#define DCORE0_DEC0_CMD_SWREG23_SW_CMD_SWAP_SHIFT 28
+#define DCORE0_DEC0_CMD_SWREG23_SW_CMD_SWAP_MASK 0xF0000000
+
+/* DCORE0_DEC0_CMD_SWREG24 */
+#define DCORE0_DEC0_CMD_SWREG24_SW_RDY_CMDBUF_COUNT_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG24_SW_RDY_CMDBUF_COUNT_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG25 */
+#define DCORE0_DEC0_CMD_SWREG25_SW_EXT_NORM_INTR_GATE_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG25_SW_EXT_NORM_INTR_GATE_MASK 0xFFFF
+#define DCORE0_DEC0_CMD_SWREG25_SW_EXT_ABN_INTR_GATE_SHIFT 16
+#define DCORE0_DEC0_CMD_SWREG25_SW_EXT_ABN_INTR_GATE_MASK 0xFFFF0000
+
+/* DCORE0_DEC0_CMD_SWREG26 */
+#define DCORE0_DEC0_CMD_SWREG26_SW_CMDBUF_EXE_ID_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG26_SW_CMDBUF_EXE_ID_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG64 */
+#define DCORE0_DEC0_CMD_SWREG64_SW_DUMMY0_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG64_SW_DUMMY0_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG65 */
+#define DCORE0_DEC0_CMD_SWREG65_SW_DUMMY1_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG65_SW_DUMMY1_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG66 */
+#define DCORE0_DEC0_CMD_SWREG66_SW_DUMMY2_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG66_SW_DUMMY2_MASK 0xFFFFFFFF
+
+/* DCORE0_DEC0_CMD_SWREG67 */
+#define DCORE0_DEC0_CMD_SWREG67_SW_DUMMY3_SHIFT 0
+#define DCORE0_DEC0_CMD_SWREG67_SW_DUMMY3_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_DCORE0_DEC0_CMD_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h
new file mode 100644
index 000000000000..e26f0d77c9dc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_DEC0_CMD_REGS_H_
+#define ASIC_REG_DCORE0_DEC0_CMD_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_DEC0_CMD
+ * (Prototype: VSI_CMD)
+ *****************************************
+ */
+
+#define mmDCORE0_DEC0_CMD_SWREG0 0x41E0000
+
+#define mmDCORE0_DEC0_CMD_SWREG1 0x41E0004
+
+#define mmDCORE0_DEC0_CMD_SWREG2 0x41E0008
+
+#define mmDCORE0_DEC0_CMD_SWREG3 0x41E000C
+
+#define mmDCORE0_DEC0_CMD_SWREG4 0x41E0010
+
+#define mmDCORE0_DEC0_CMD_SWREG5 0x41E0014
+
+#define mmDCORE0_DEC0_CMD_SWREG6 0x41E0018
+
+#define mmDCORE0_DEC0_CMD_SWREG7 0x41E001C
+
+#define mmDCORE0_DEC0_CMD_SWREG8 0x41E0020
+
+#define mmDCORE0_DEC0_CMD_SWREG9 0x41E0024
+
+#define mmDCORE0_DEC0_CMD_SWREG10 0x41E0028
+
+#define mmDCORE0_DEC0_CMD_SWREG11 0x41E002C
+
+#define mmDCORE0_DEC0_CMD_SWREG12 0x41E0030
+
+#define mmDCORE0_DEC0_CMD_SWREG13 0x41E0034
+
+#define mmDCORE0_DEC0_CMD_SWREG14 0x41E0038
+
+#define mmDCORE0_DEC0_CMD_SWREG15 0x41E003C
+
+#define mmDCORE0_DEC0_CMD_SWREG16 0x41E0040
+
+#define mmDCORE0_DEC0_CMD_SWREG17 0x41E0044
+
+#define mmDCORE0_DEC0_CMD_SWREG18 0x41E0048
+
+#define mmDCORE0_DEC0_CMD_SWREG19 0x41E004C
+
+#define mmDCORE0_DEC0_CMD_SWREG20 0x41E0050
+
+#define mmDCORE0_DEC0_CMD_SWREG21 0x41E0054
+
+#define mmDCORE0_DEC0_CMD_SWREG22 0x41E0058
+
+#define mmDCORE0_DEC0_CMD_SWREG23 0x41E005C
+
+#define mmDCORE0_DEC0_CMD_SWREG24 0x41E0060
+
+#define mmDCORE0_DEC0_CMD_SWREG25 0x41E0064
+
+#define mmDCORE0_DEC0_CMD_SWREG26 0x41E0068
+
+#define mmDCORE0_DEC0_CMD_SWREG64 0x41E0100
+
+#define mmDCORE0_DEC0_CMD_SWREG65 0x41E0104
+
+#define mmDCORE0_DEC0_CMD_SWREG66 0x41E0108
+
+#define mmDCORE0_DEC0_CMD_SWREG67 0x41E010C
+
+#endif /* ASIC_REG_DCORE0_DEC0_CMD_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h
new file mode 100644
index 000000000000..8de48939243b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA0_CORE_CTX_AXUSER_REGS_H_
+#define ASIC_REG_DCORE0_EDMA0_CORE_CTX_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA0_CORE_CTX_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_ASID 0x41CB800
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP 0x41CB804
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_STRONG_ORDER 0x41CB808
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_NO_SNOOP 0x41CB80C
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_WR_REDUCTION 0x41CB810
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_RD_ATOMIC 0x41CB814
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_QOS 0x41CB818
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_RSVD 0x41CB81C
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_EMEM_CPAGE 0x41CB820
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_CORE 0x41CB824
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_E2E_COORD 0x41CB828
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_WR_OVRD_LO 0x41CB830
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_WR_OVRD_HI 0x41CB834
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_RD_OVRD_LO 0x41CB838
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_RD_OVRD_HI 0x41CB83C
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_LB_COORD 0x41CB840
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_LB_LOCK 0x41CB844
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_LB_RSVD 0x41CB848
+
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_LB_OVRD 0x41CB84C
+
+#endif /* ASIC_REG_DCORE0_EDMA0_CORE_CTX_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h
new file mode 100644
index 000000000000..f73e76c8f5bd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA0_CORE_CTX_REGS_H_
+#define ASIC_REG_DCORE0_EDMA0_CORE_CTX_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA0_CORE_CTX
+ * (Prototype: DMA_CORE_CTX)
+ *****************************************
+ */
+
+#define mmDCORE0_EDMA0_CORE_CTX_RATE_LIM_TKN 0x41CB860
+
+#define mmDCORE0_EDMA0_CORE_CTX_PWRLP 0x41CB864
+
+#define mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS 0x41CB868
+
+#define mmDCORE0_EDMA0_CORE_CTX_IDX 0x41CB86C
+
+#define mmDCORE0_EDMA0_CORE_CTX_IDX_INC 0x41CB870
+
+#define mmDCORE0_EDMA0_CORE_CTX_CTRL 0x41CB874
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_0 0x41CB878
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_1 0x41CB87C
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_1 0x41CB880
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_2 0x41CB884
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_2 0x41CB888
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_3 0x41CB88C
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_3 0x41CB890
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_4 0x41CB894
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_4 0x41CB898
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_1 0x41CB89C
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_1 0x41CB8A0
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_2 0x41CB8A4
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_2 0x41CB8A8
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_3 0x41CB8AC
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_3 0x41CB8B0
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_4 0x41CB8B4
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_4 0x41CB8B8
+
+#define mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI 0x41CB8BC
+
+#define mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO 0x41CB8C0
+
+#define mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA 0x41CB8C4
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_OFFSET_LO 0x41CB8C8
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_OFFSET_HI 0x41CB8CC
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_OFFSET_LO 0x41CB8D0
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_OFFSET_HI 0x41CB8D4
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_LO 0x41CB8D8
+
+#define mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_HI 0x41CB8DC
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_BASE_LO 0x41CB8E0
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_BASE_HI 0x41CB8E4
+
+#define mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_0 0x41CB8E8
+
+#define mmDCORE0_EDMA0_CORE_CTX_COMMIT 0x41CB8EC
+
+#endif /* ASIC_REG_DCORE0_EDMA0_CORE_CTX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h
new file mode 100644
index 000000000000..d600f6bf70d8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h
@@ -0,0 +1,415 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA0_CORE_MASKS_H_
+#define ASIC_REG_DCORE0_EDMA0_CORE_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA0_CORE
+ * (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+/* DCORE0_EDMA0_CORE_CFG_0 */
+#define DCORE0_EDMA0_CORE_CFG_0_EN_SHIFT 0
+#define DCORE0_EDMA0_CORE_CFG_0_EN_MASK 0x1
+
+/* DCORE0_EDMA0_CORE_CFG_1 */
+#define DCORE0_EDMA0_CORE_CFG_1_HALT_SHIFT 0
+#define DCORE0_EDMA0_CORE_CFG_1_HALT_MASK 0x1
+#define DCORE0_EDMA0_CORE_CFG_1_FLUSH_SHIFT 1
+#define DCORE0_EDMA0_CORE_CFG_1_FLUSH_MASK 0x2
+
+/* DCORE0_EDMA0_CORE_PROT */
+#define DCORE0_EDMA0_CORE_PROT_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_PROT_VAL_MASK 0x1
+#define DCORE0_EDMA0_CORE_PROT_ERR_VAL_SHIFT 1
+#define DCORE0_EDMA0_CORE_PROT_ERR_VAL_MASK 0x2
+
+/* DCORE0_EDMA0_CORE_CKG */
+#define DCORE0_EDMA0_CORE_CKG_HBW_RBUF_SHIFT 0
+#define DCORE0_EDMA0_CORE_CKG_HBW_RBUF_MASK 0x1
+#define DCORE0_EDMA0_CORE_CKG_LBW_RBUF_KDMA_SHIFT 1
+#define DCORE0_EDMA0_CORE_CKG_LBW_RBUF_KDMA_MASK 0x2
+#define DCORE0_EDMA0_CORE_CKG_TE_SHIFT 2
+#define DCORE0_EDMA0_CORE_CKG_TE_MASK 0x4
+
+/* DCORE0_EDMA0_CORE_RD_GLBL */
+#define DCORE0_EDMA0_CORE_RD_GLBL_LBW_VIA_HBW_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_GLBL_LBW_VIA_HBW_MASK 0x1
+#define DCORE0_EDMA0_CORE_RD_GLBL_HBW_FORCE_MISS_SHIFT 4
+#define DCORE0_EDMA0_CORE_RD_GLBL_HBW_FORCE_MISS_MASK 0x10
+#define DCORE0_EDMA0_CORE_RD_GLBL_LBW_FORCE_MISS_SHIFT 5
+#define DCORE0_EDMA0_CORE_RD_GLBL_LBW_FORCE_MISS_MASK 0x20
+
+/* DCORE0_EDMA0_CORE_RD_HBW_MAX_OUTSTAND */
+#define DCORE0_EDMA0_CORE_RD_HBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_HBW_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* DCORE0_EDMA0_CORE_RD_HBW_MAX_SIZE */
+#define DCORE0_EDMA0_CORE_RD_HBW_MAX_SIZE_DATA_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_HBW_MAX_SIZE_DATA_MASK 0xFFF
+#define DCORE0_EDMA0_CORE_RD_HBW_MAX_SIZE_MD_SHIFT 16
+#define DCORE0_EDMA0_CORE_RD_HBW_MAX_SIZE_MD_MASK 0xFFF0000
+
+/* DCORE0_EDMA0_CORE_RD_HBW_ARCACHE */
+#define DCORE0_EDMA0_CORE_RD_HBW_ARCACHE_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_HBW_ARCACHE_VAL_MASK 0xF
+
+/* DCORE0_EDMA0_CORE_RD_HBW_INFLIGHTS */
+#define DCORE0_EDMA0_CORE_RD_HBW_INFLIGHTS_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_HBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_RD_HBW_RATE_LIM_CFG */
+#define DCORE0_EDMA0_CORE_RD_HBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_HBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define DCORE0_EDMA0_CORE_RD_HBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define DCORE0_EDMA0_CORE_RD_HBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define DCORE0_EDMA0_CORE_RD_HBW_RATE_LIM_CFG_EN_SHIFT 31
+#define DCORE0_EDMA0_CORE_RD_HBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_RD_LBW_MAX_OUTSTAND */
+#define DCORE0_EDMA0_CORE_RD_LBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_LBW_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* DCORE0_EDMA0_CORE_RD_LBW_MAX_SIZE */
+#define DCORE0_EDMA0_CORE_RD_LBW_MAX_SIZE_DATA_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_LBW_MAX_SIZE_DATA_MASK 0xFFF
+#define DCORE0_EDMA0_CORE_RD_LBW_MAX_SIZE_MD_SHIFT 16
+#define DCORE0_EDMA0_CORE_RD_LBW_MAX_SIZE_MD_MASK 0xFFF0000
+
+/* DCORE0_EDMA0_CORE_RD_LBW_ARCACHE */
+#define DCORE0_EDMA0_CORE_RD_LBW_ARCACHE_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_LBW_ARCACHE_VAL_MASK 0xF
+
+/* DCORE0_EDMA0_CORE_RD_LBW_INFLIGHTS */
+#define DCORE0_EDMA0_CORE_RD_LBW_INFLIGHTS_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_LBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG */
+#define DCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define DCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define DCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define DCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define DCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG_EN_SHIFT 31
+#define DCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_WR_HBW_MAX_OUTSTAND */
+#define DCORE0_EDMA0_CORE_WR_HBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_HBW_MAX_OUTSTAND_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_CORE_WR_HBW_MAX_AWID */
+#define DCORE0_EDMA0_CORE_WR_HBW_MAX_AWID_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_HBW_MAX_AWID_VAL_MASK 0x3FFF
+
+/* DCORE0_EDMA0_CORE_WR_HBW_AWCACHE */
+#define DCORE0_EDMA0_CORE_WR_HBW_AWCACHE_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_HBW_AWCACHE_VAL_MASK 0xF
+
+/* DCORE0_EDMA0_CORE_WR_HBW_INFLIGHTS */
+#define DCORE0_EDMA0_CORE_WR_HBW_INFLIGHTS_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_HBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_WR_HBW_RATE_LIM_CFG */
+#define DCORE0_EDMA0_CORE_WR_HBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_HBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define DCORE0_EDMA0_CORE_WR_HBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define DCORE0_EDMA0_CORE_WR_HBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define DCORE0_EDMA0_CORE_WR_HBW_RATE_LIM_CFG_EN_SHIFT 31
+#define DCORE0_EDMA0_CORE_WR_HBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_WR_LBW_MAX_OUTSTAND */
+#define DCORE0_EDMA0_CORE_WR_LBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_LBW_MAX_OUTSTAND_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_CORE_WR_LBW_MAX_AWID */
+#define DCORE0_EDMA0_CORE_WR_LBW_MAX_AWID_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_LBW_MAX_AWID_VAL_MASK 0x7F
+
+/* DCORE0_EDMA0_CORE_WR_LBW_AWCACHE */
+#define DCORE0_EDMA0_CORE_WR_LBW_AWCACHE_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_LBW_AWCACHE_VAL_MASK 0xF
+
+/* DCORE0_EDMA0_CORE_WR_LBW_INFLIGHTS */
+#define DCORE0_EDMA0_CORE_WR_LBW_INFLIGHTS_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_LBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_WR_LBW_RATE_LIM_CFG */
+#define DCORE0_EDMA0_CORE_WR_LBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_LBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define DCORE0_EDMA0_CORE_WR_LBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define DCORE0_EDMA0_CORE_WR_LBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define DCORE0_EDMA0_CORE_WR_LBW_RATE_LIM_CFG_EN_SHIFT 31
+#define DCORE0_EDMA0_CORE_WR_LBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND */
+#define DCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND_VAL_MASK 0x1F
+
+/* DCORE0_EDMA0_CORE_WR_COMP_AWUSER */
+#define DCORE0_EDMA0_CORE_WR_COMP_AWUSER_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_WR_COMP_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_ERR_CFG */
+#define DCORE0_EDMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT 0
+#define DCORE0_EDMA0_CORE_ERR_CFG_ERR_MSG_EN_MASK 0x1
+#define DCORE0_EDMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT 1
+#define DCORE0_EDMA0_CORE_ERR_CFG_STOP_ON_ERR_MASK 0x2
+
+/* DCORE0_EDMA0_CORE_ERR_CAUSE */
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_HBW_RD_ERR_SHIFT 0
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK 0x1
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_HBW_WR_ERR_SHIFT 1
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK 0x2
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_LBW_MSG_WR_ERR_SHIFT 2
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_LBW_MSG_WR_ERR_MASK 0x4
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_DESC_OVF_SHIFT 3
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_DESC_OVF_MASK 0x8
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_LBW_RD_ERR_SHIFT 4
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_LBW_RD_ERR_MASK 0x10
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_LBW_WR_ERR_SHIFT 5
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_LBW_WR_ERR_MASK 0x20
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_TE_DESC_FIFO_OVFL_SHIFT 6
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_TE_DESC_FIFO_OVFL_MASK 0x40
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_LIN_DMA_COMMIT_CFG_ERR_SHIFT 7
+#define DCORE0_EDMA0_CORE_ERR_CAUSE_LIN_DMA_COMMIT_CFG_ERR_MASK 0x80
+
+/* DCORE0_EDMA0_CORE_ERRMSG_ADDR_LO */
+#define DCORE0_EDMA0_CORE_ERRMSG_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_ERRMSG_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_ERRMSG_ADDR_HI */
+#define DCORE0_EDMA0_CORE_ERRMSG_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_ERRMSG_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_ERRMSG_WDATA */
+#define DCORE0_EDMA0_CORE_ERRMSG_WDATA_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_ERRMSG_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS0 */
+#define DCORE0_EDMA0_CORE_STS0_RD_REQ_CNT_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS0_RD_REQ_CNT_MASK 0x7FFF
+#define DCORE0_EDMA0_CORE_STS0_WR_REQ_CNT_SHIFT 16
+#define DCORE0_EDMA0_CORE_STS0_WR_REQ_CNT_MASK 0x7FFF0000
+#define DCORE0_EDMA0_CORE_STS0_BUSY_SHIFT 31
+#define DCORE0_EDMA0_CORE_STS0_BUSY_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_STS1 */
+#define DCORE0_EDMA0_CORE_STS1_IS_HALT_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS1_IS_HALT_MASK 0x1
+
+/* DCORE0_EDMA0_CORE_STS_RD_CTX_SEL */
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_SEL_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_SEL_VAL_MASK 0x7
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_SEL_STRIDE_SHIFT 8
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_SEL_STRIDE_MASK 0x100
+
+/* DCORE0_EDMA0_CORE_STS_RD_CTX_SIZE */
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_SIZE_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS_RD_CTX_BASE_LO */
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS_RD_CTX_BASE_HI */
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS_RD_CTX_ID */
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_ID_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_RD_CTX_ID_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_CORE_STS_RD_HB_AXI_ADDR_LO */
+#define DCORE0_EDMA0_CORE_STS_RD_HB_AXI_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_RD_HB_AXI_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS_RD_HB_AXI_ADDR_HI */
+#define DCORE0_EDMA0_CORE_STS_RD_HB_AXI_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_RD_HB_AXI_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS_RD_LB_AXI_ADDR */
+#define DCORE0_EDMA0_CORE_STS_RD_LB_AXI_ADDR_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_RD_LB_AXI_ADDR_VAL_MASK 0x3FFFFFF
+#define DCORE0_EDMA0_CORE_STS_RD_LB_AXI_ADDR_RDY_SHIFT 30
+#define DCORE0_EDMA0_CORE_STS_RD_LB_AXI_ADDR_RDY_MASK 0x40000000
+#define DCORE0_EDMA0_CORE_STS_RD_LB_AXI_ADDR_VLD_SHIFT 31
+#define DCORE0_EDMA0_CORE_STS_RD_LB_AXI_ADDR_VLD_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_STS_WR_CTX_SEL */
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_SEL_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_SEL_VAL_MASK 0x7
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_SEL_STRIDE_SHIFT 8
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_SEL_STRIDE_MASK 0x100
+
+/* DCORE0_EDMA0_CORE_STS_WR_CTX_SIZE */
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_SIZE_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS_WR_CTX_BASE_LO */
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS_WR_CTX_BASE_HI */
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS_WR_CTX_ID */
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_ID_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_WR_CTX_ID_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_LO */
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_VAL_MASK 0x3FFFF
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_RDY_SHIFT 30
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_RDY_MASK 0x40000000
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_VLD_SHIFT 31
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_VLD_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_HI */
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_VAL_MASK 0x3FFFF
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_RDY_SHIFT 30
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_RDY_MASK 0x40000000
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_VLD_SHIFT 31
+#define DCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_VLD_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_STS_WR_LB_AXI_ADDR */
+#define DCORE0_EDMA0_CORE_STS_WR_LB_AXI_ADDR_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_STS_WR_LB_AXI_ADDR_VAL_MASK 0x3FFFFFF
+#define DCORE0_EDMA0_CORE_STS_WR_LB_AXI_ADDR_RDY_SHIFT 30
+#define DCORE0_EDMA0_CORE_STS_WR_LB_AXI_ADDR_RDY_MASK 0x40000000
+#define DCORE0_EDMA0_CORE_STS_WR_LB_AXI_ADDR_VLD_SHIFT 31
+#define DCORE0_EDMA0_CORE_STS_WR_LB_AXI_ADDR_VLD_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_PWRLP_CFG */
+#define DCORE0_EDMA0_CORE_PWRLP_CFG_GLBL_EN_SHIFT 0
+#define DCORE0_EDMA0_CORE_PWRLP_CFG_GLBL_EN_MASK 0x1
+#define DCORE0_EDMA0_CORE_PWRLP_CFG_CLR_SHIFT 4
+#define DCORE0_EDMA0_CORE_PWRLP_CFG_CLR_MASK 0x10
+
+/* DCORE0_EDMA0_CORE_PWRLP_STS */
+#define DCORE0_EDMA0_CORE_PWRLP_STS_RLVL_SHIFT 0
+#define DCORE0_EDMA0_CORE_PWRLP_STS_RLVL_MASK 0x7F
+#define DCORE0_EDMA0_CORE_PWRLP_STS_WLVL_SHIFT 8
+#define DCORE0_EDMA0_CORE_PWRLP_STS_WLVL_MASK 0x7F00
+#define DCORE0_EDMA0_CORE_PWRLP_STS_RCNT_SHIFT 16
+#define DCORE0_EDMA0_CORE_PWRLP_STS_RCNT_MASK 0x7F0000
+#define DCORE0_EDMA0_CORE_PWRLP_STS_WCNT_SHIFT 23
+#define DCORE0_EDMA0_CORE_PWRLP_STS_WCNT_MASK 0x3F800000
+#define DCORE0_EDMA0_CORE_PWRLP_STS_RFULL_SHIFT 30
+#define DCORE0_EDMA0_CORE_PWRLP_STS_RFULL_MASK 0x40000000
+#define DCORE0_EDMA0_CORE_PWRLP_STS_WFULL_SHIFT 31
+#define DCORE0_EDMA0_CORE_PWRLP_STS_WFULL_MASK 0x80000000
+
+/* DCORE0_EDMA0_CORE_DBG_DESC_CNT */
+#define DCORE0_EDMA0_CORE_DBG_DESC_CNT_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_DBG_DESC_CNT_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_DBG_STS */
+#define DCORE0_EDMA0_CORE_DBG_STS_RD_CTX_FULL_SHIFT 0
+#define DCORE0_EDMA0_CORE_DBG_STS_RD_CTX_FULL_MASK 0x1
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_CTX_FULL_SHIFT 1
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_CTX_FULL_MASK 0x2
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_COMP_FULL_SHIFT 2
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_COMP_FULL_MASK 0x4
+#define DCORE0_EDMA0_CORE_DBG_STS_RD_CTX_EMPTY_SHIFT 3
+#define DCORE0_EDMA0_CORE_DBG_STS_RD_CTX_EMPTY_MASK 0x8
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_CTX_EMPTY_SHIFT 4
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_CTX_EMPTY_MASK 0x10
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_COMP_EMPTY_SHIFT 5
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_COMP_EMPTY_MASK 0x20
+#define DCORE0_EDMA0_CORE_DBG_STS_TE_EMPTY_SHIFT 6
+#define DCORE0_EDMA0_CORE_DBG_STS_TE_EMPTY_MASK 0x40
+#define DCORE0_EDMA0_CORE_DBG_STS_TE_BUSY_SHIFT 7
+#define DCORE0_EDMA0_CORE_DBG_STS_TE_BUSY_MASK 0x80
+#define DCORE0_EDMA0_CORE_DBG_STS_GSKT_EMPTY_SHIFT 8
+#define DCORE0_EDMA0_CORE_DBG_STS_GSKT_EMPTY_MASK 0x100
+#define DCORE0_EDMA0_CORE_DBG_STS_GSKT_FULL_SHIFT 9
+#define DCORE0_EDMA0_CORE_DBG_STS_GSKT_FULL_MASK 0x200
+#define DCORE0_EDMA0_CORE_DBG_STS_RD_AGU_CS_SHIFT 10
+#define DCORE0_EDMA0_CORE_DBG_STS_RD_AGU_CS_MASK 0x400
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_AGU_CS_SHIFT 11
+#define DCORE0_EDMA0_CORE_DBG_STS_WR_AGU_CS_MASK 0x800
+
+/* DCORE0_EDMA0_CORE_DBG_BUF_STS */
+#define DCORE0_EDMA0_CORE_DBG_BUF_STS_HBW_FULLNESS_SHIFT 0
+#define DCORE0_EDMA0_CORE_DBG_BUF_STS_HBW_FULLNESS_MASK 0xFFF
+#define DCORE0_EDMA0_CORE_DBG_BUF_STS_LBW_FULLNESS_SHIFT 16
+#define DCORE0_EDMA0_CORE_DBG_BUF_STS_LBW_FULLNESS_MASK 0xFFF0000
+
+/* DCORE0_EDMA0_CORE_DBG_RD_DESC_ID */
+#define DCORE0_EDMA0_CORE_DBG_RD_DESC_ID_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_DBG_RD_DESC_ID_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_CORE_DBG_WR_DESC_ID */
+#define DCORE0_EDMA0_CORE_DBG_WR_DESC_ID_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_DBG_WR_DESC_ID_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_CORE_APB_DMA_LBW_BASE */
+#define DCORE0_EDMA0_CORE_APB_DMA_LBW_BASE_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_APB_DMA_LBW_BASE_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_CORE_APB_MSTR_IF_LBW_BASE */
+#define DCORE0_EDMA0_CORE_APB_MSTR_IF_LBW_BASE_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_APB_MSTR_IF_LBW_BASE_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_CORE_E2E_CRED_ASYNC_CFG */
+#define DCORE0_EDMA0_CORE_E2E_CRED_ASYNC_CFG_Y_X_FORCE_SHIFT 0
+#define DCORE0_EDMA0_CORE_E2E_CRED_ASYNC_CFG_Y_X_FORCE_MASK 0x1FF
+#define DCORE0_EDMA0_CORE_E2E_CRED_ASYNC_CFG_FORCE_EN_SHIFT 9
+#define DCORE0_EDMA0_CORE_E2E_CRED_ASYNC_CFG_FORCE_EN_MASK 0x200
+
+/* DCORE0_EDMA0_CORE_DBG_APB_ENABLER */
+#define DCORE0_EDMA0_CORE_DBG_APB_ENABLER_DIS_SHIFT 0
+#define DCORE0_EDMA0_CORE_DBG_APB_ENABLER_DIS_MASK 0x1
+
+/* DCORE0_EDMA0_CORE_L2H_CMPR_LO */
+#define DCORE0_EDMA0_CORE_L2H_CMPR_LO_VAL_SHIFT 20
+#define DCORE0_EDMA0_CORE_L2H_CMPR_LO_VAL_MASK 0xFFF00000
+
+/* DCORE0_EDMA0_CORE_L2H_CMPR_HI */
+#define DCORE0_EDMA0_CORE_L2H_CMPR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_L2H_CMPR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_L2H_MASK_LO */
+#define DCORE0_EDMA0_CORE_L2H_MASK_LO_VAL_SHIFT 20
+#define DCORE0_EDMA0_CORE_L2H_MASK_LO_VAL_MASK 0xFFF00000
+
+/* DCORE0_EDMA0_CORE_L2H_MASK_HI */
+#define DCORE0_EDMA0_CORE_L2H_MASK_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_CORE_L2H_MASK_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_CORE_IDLE_IND_MASK */
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_DESC_SHIFT 0
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_DESC_MASK 0x1
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_COMP_SHIFT 1
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_COMP_MASK 0x2
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_INSTAGE_SHIFT 2
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_INSTAGE_MASK 0x4
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_CORE_SHIFT 3
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_CORE_MASK 0x8
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_DESC_CNT_STS_SHIFT 8
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_DESC_CNT_STS_MASK 0x1F00
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_COMP_CNT_STS_SHIFT 16
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_COMP_CNT_STS_MASK 0x1F0000
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_INSTAGE_EMPTY_SHIFT 24
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_INSTAGE_EMPTY_MASK 0x1000000
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_CORE_IDLE_STS_SHIFT 25
+#define DCORE0_EDMA0_CORE_IDLE_IND_MASK_CORE_IDLE_STS_MASK 0x2000000
+
+/* DCORE0_EDMA0_CORE_APB_ENABLER */
+#define DCORE0_EDMA0_CORE_APB_ENABLER_DIS_SHIFT 0
+#define DCORE0_EDMA0_CORE_APB_ENABLER_DIS_MASK 0x1
+
+#endif /* ASIC_REG_DCORE0_EDMA0_CORE_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h
new file mode 100644
index 000000000000..84f068e4c602
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA0_CORE_REGS_H_
+#define ASIC_REG_DCORE0_EDMA0_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA0_CORE
+ * (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_EDMA0_CORE_CFG_0 0x41CB000
+
+#define mmDCORE0_EDMA0_CORE_CFG_1 0x41CB004
+
+#define mmDCORE0_EDMA0_CORE_PROT 0x41CB008
+
+#define mmDCORE0_EDMA0_CORE_CKG 0x41CB00C
+
+#define mmDCORE0_EDMA0_CORE_RD_GLBL 0x41CB07C
+
+#define mmDCORE0_EDMA0_CORE_RD_HBW_MAX_OUTSTAND 0x41CB080
+
+#define mmDCORE0_EDMA0_CORE_RD_HBW_MAX_SIZE 0x41CB084
+
+#define mmDCORE0_EDMA0_CORE_RD_HBW_ARCACHE 0x41CB088
+
+#define mmDCORE0_EDMA0_CORE_RD_HBW_INFLIGHTS 0x41CB090
+
+#define mmDCORE0_EDMA0_CORE_RD_HBW_RATE_LIM_CFG 0x41CB094
+
+#define mmDCORE0_EDMA0_CORE_RD_LBW_MAX_OUTSTAND 0x41CB0C0
+
+#define mmDCORE0_EDMA0_CORE_RD_LBW_MAX_SIZE 0x41CB0C4
+
+#define mmDCORE0_EDMA0_CORE_RD_LBW_ARCACHE 0x41CB0C8
+
+#define mmDCORE0_EDMA0_CORE_RD_LBW_INFLIGHTS 0x41CB0D0
+
+#define mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG 0x41CB0D4
+
+#define mmDCORE0_EDMA0_CORE_WR_HBW_MAX_OUTSTAND 0x41CB100
+
+#define mmDCORE0_EDMA0_CORE_WR_HBW_MAX_AWID 0x41CB104
+
+#define mmDCORE0_EDMA0_CORE_WR_HBW_AWCACHE 0x41CB108
+
+#define mmDCORE0_EDMA0_CORE_WR_HBW_INFLIGHTS 0x41CB10C
+
+#define mmDCORE0_EDMA0_CORE_WR_HBW_RATE_LIM_CFG 0x41CB110
+
+#define mmDCORE0_EDMA0_CORE_WR_LBW_MAX_OUTSTAND 0x41CB140
+
+#define mmDCORE0_EDMA0_CORE_WR_LBW_MAX_AWID 0x41CB144
+
+#define mmDCORE0_EDMA0_CORE_WR_LBW_AWCACHE 0x41CB148
+
+#define mmDCORE0_EDMA0_CORE_WR_LBW_INFLIGHTS 0x41CB14C
+
+#define mmDCORE0_EDMA0_CORE_WR_LBW_RATE_LIM_CFG 0x41CB150
+
+#define mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND 0x41CB180
+
+#define mmDCORE0_EDMA0_CORE_WR_COMP_AWUSER 0x41CB184
+
+#define mmDCORE0_EDMA0_CORE_ERR_CFG 0x41CB300
+
+#define mmDCORE0_EDMA0_CORE_ERR_CAUSE 0x41CB304
+
+#define mmDCORE0_EDMA0_CORE_ERRMSG_ADDR_LO 0x41CB308
+
+#define mmDCORE0_EDMA0_CORE_ERRMSG_ADDR_HI 0x41CB30C
+
+#define mmDCORE0_EDMA0_CORE_ERRMSG_WDATA 0x41CB310
+
+#define mmDCORE0_EDMA0_CORE_STS0 0x41CB380
+
+#define mmDCORE0_EDMA0_CORE_STS1 0x41CB384
+
+#define mmDCORE0_EDMA0_CORE_STS_RD_CTX_SEL 0x41CB400
+
+#define mmDCORE0_EDMA0_CORE_STS_RD_CTX_SIZE 0x41CB404
+
+#define mmDCORE0_EDMA0_CORE_STS_RD_CTX_BASE_LO 0x41CB408
+
+#define mmDCORE0_EDMA0_CORE_STS_RD_CTX_BASE_HI 0x41CB40C
+
+#define mmDCORE0_EDMA0_CORE_STS_RD_CTX_ID 0x41CB410
+
+#define mmDCORE0_EDMA0_CORE_STS_RD_HB_AXI_ADDR_LO 0x41CB414
+
+#define mmDCORE0_EDMA0_CORE_STS_RD_HB_AXI_ADDR_HI 0x41CB418
+
+#define mmDCORE0_EDMA0_CORE_STS_RD_LB_AXI_ADDR 0x41CB41C
+
+#define mmDCORE0_EDMA0_CORE_STS_WR_CTX_SEL 0x41CB420
+
+#define mmDCORE0_EDMA0_CORE_STS_WR_CTX_SIZE 0x41CB424
+
+#define mmDCORE0_EDMA0_CORE_STS_WR_CTX_BASE_LO 0x41CB428
+
+#define mmDCORE0_EDMA0_CORE_STS_WR_CTX_BASE_HI 0x41CB42C
+
+#define mmDCORE0_EDMA0_CORE_STS_WR_CTX_ID 0x41CB430
+
+#define mmDCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_LO 0x41CB434
+
+#define mmDCORE0_EDMA0_CORE_STS_WR_HB_AXI_ADDR_HI 0x41CB438
+
+#define mmDCORE0_EDMA0_CORE_STS_WR_LB_AXI_ADDR 0x41CB43C
+
+#define mmDCORE0_EDMA0_CORE_PWRLP_CFG 0x41CB700
+
+#define mmDCORE0_EDMA0_CORE_PWRLP_STS 0x41CB704
+
+#define mmDCORE0_EDMA0_CORE_DBG_DESC_CNT 0x41CB710
+
+#define mmDCORE0_EDMA0_CORE_DBG_STS 0x41CB714
+
+#define mmDCORE0_EDMA0_CORE_DBG_BUF_STS 0x41CB718
+
+#define mmDCORE0_EDMA0_CORE_DBG_RD_DESC_ID 0x41CB720
+
+#define mmDCORE0_EDMA0_CORE_DBG_WR_DESC_ID 0x41CB724
+
+#define mmDCORE0_EDMA0_CORE_APB_DMA_LBW_BASE 0x41CB728
+
+#define mmDCORE0_EDMA0_CORE_APB_MSTR_IF_LBW_BASE 0x41CB72C
+
+#define mmDCORE0_EDMA0_CORE_E2E_CRED_ASYNC_CFG 0x41CB730
+
+#define mmDCORE0_EDMA0_CORE_DBG_APB_ENABLER 0x41CBE1C
+
+#define mmDCORE0_EDMA0_CORE_L2H_CMPR_LO 0x41CBE20
+
+#define mmDCORE0_EDMA0_CORE_L2H_CMPR_HI 0x41CBE24
+
+#define mmDCORE0_EDMA0_CORE_L2H_MASK_LO 0x41CBE28
+
+#define mmDCORE0_EDMA0_CORE_L2H_MASK_HI 0x41CBE2C
+
+#define mmDCORE0_EDMA0_CORE_IDLE_IND_MASK 0x41CBE30
+
+#define mmDCORE0_EDMA0_CORE_APB_ENABLER 0x41CBE34
+
+#endif /* ASIC_REG_DCORE0_EDMA0_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h
new file mode 100644
index 000000000000..0fc45300df81
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA0_QM_ARC_AUX_REGS_H_
+#define ASIC_REG_DCORE0_EDMA0_QM_ARC_AUX_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA0_QM_ARC_AUX
+ * (Prototype: QMAN_ARC_AUX)
+ *****************************************
+ */
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_RUN_HALT_REQ 0x41C8100
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_RUN_HALT_ACK 0x41C8104
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_RST_VEC_ADDR 0x41C8108
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DBG_MODE 0x41C810C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CLUSTER_NUM 0x41C8110
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_NUM 0x41C8114
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_WAKE_UP_EVENT 0x41C8118
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_SYS_ADDR_BASE 0x41C811C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CTI_AP_STS 0x41C8120
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CTI_CFG_MUX_SEL 0x41C8124
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_RST 0x41C8128
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_RST_REQ 0x41C812C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SRAM_LSB_ADDR 0x41C8130
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SRAM_MSB_ADDR 0x41C8134
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_PCIE_LSB_ADDR 0x41C8138
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_PCIE_MSB_ADDR 0x41C813C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_LSB_ADDR 0x41C8140
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_MSB_ADDR 0x41C8144
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM0_LSB_ADDR 0x41C8150
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM0_MSB_ADDR 0x41C8154
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM1_LSB_ADDR 0x41C8158
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM1_MSB_ADDR 0x41C815C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM2_LSB_ADDR 0x41C8160
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM2_MSB_ADDR 0x41C8164
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM3_LSB_ADDR 0x41C8168
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM3_MSB_ADDR 0x41C816C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM0_OFFSET 0x41C8170
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM1_OFFSET 0x41C8174
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM2_OFFSET 0x41C8178
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_HBM3_OFFSET 0x41C817C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_0 0x41C8180
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_1 0x41C8184
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_2 0x41C8188
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_3 0x41C818C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_4 0x41C8190
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_5 0x41C8194
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_6 0x41C8198
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_0 0x41C819C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_1 0x41C81A0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_2 0x41C81A4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_3 0x41C81A8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_4 0x41C81AC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_5 0x41C81B0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_6 0x41C81B4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_CBU_AWCACHE_OVR 0x41C81B8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_LBU_AWCACHE_OVR 0x41C81BC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CONTEXT_ID_0 0x41C81C0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CONTEXT_ID_1 0x41C81C4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CONTEXT_ID_2 0x41C81C8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CONTEXT_ID_3 0x41C81CC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CONTEXT_ID_4 0x41C81D0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CONTEXT_ID_5 0x41C81D4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CONTEXT_ID_6 0x41C81D8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CONTEXT_ID_7 0x41C81DC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_0 0x41C81E0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_1 0x41C81E4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_2 0x41C81E8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_3 0x41C81EC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_4 0x41C81F0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_5 0x41C81F4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_6 0x41C81F8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_7 0x41C81FC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_0 0x41C8200
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_1 0x41C8204
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_2 0x41C8208
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_3 0x41C820C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_4 0x41C8210
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_5 0x41C8214
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_6 0x41C8218
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_7 0x41C821C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_8 0x41C8220
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_9 0x41C8224
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_10 0x41C8228
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_11 0x41C822C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_12 0x41C8230
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_13 0x41C8234
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_14 0x41C8238
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SW_INTR_15 0x41C823C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_IRQ_INTR_MASK_0 0x41C8280
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_IRQ_INTR_MASK_1 0x41C8284
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_SEI_INTR_STS 0x41C8290
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_SEI_INTR_CLR 0x41C8294
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_SEI_INTR_MASK 0x41C8298
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_EXCPTN_CAUSE 0x41C829C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SEI_INTR_HALT_EN 0x41C82A0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_SEI_INTR_HALT_MASK 0x41C82A4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_SEI_INTR_HALT_MASK 0x41C82A8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REI_INTR_STS 0x41C82B0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REI_INTR_CLR 0x41C82B4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REI_INTR_MASK 0x41C82B8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_ECC_ERR_ADDR 0x41C82BC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_ECC_SYNDROME 0x41C82C0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_I_CACHE_ECC_ERR_ADDR 0x41C82C4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_I_CACHE_ECC_SYNDROME 0x41C82C8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_D_CACHE_ECC_ERR_ADDR 0x41C82CC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_D_CACHE_ECC_SYNDROME 0x41C82D0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBW_TRMINATE_AWADDR_ERR 0x41C82E0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBW_TRMINATE_ARADDR_ERR 0x41C82E4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_LBW_TERMINATE_BRESP 0x41C82E8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_LBW_TERMINATE_RRESP 0x41C82EC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_LBW_TERMINATE_AXLEN 0x41C82F0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_LBW_TERMINATE_AXSIZE 0x41C82F4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_0 0x41C8300
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_1 0x41C8304
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_2 0x41C8308
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_3 0x41C830C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_4 0x41C8310
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_5 0x41C8314
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_6 0x41C8318
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_7 0x41C831C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_TOTAL_CBU_WR_CNT 0x41C8320
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_INFLIGHT_CBU_WR_CNT 0x41C8324
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_TOTAL_CBU_RD_CNT 0x41C8328
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_INFLIGHT_CBU_RD_CNT 0x41C832C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_TOTAL_LBU_WR_CNT 0x41C8330
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_INFLIGHT_LBU_WR_CNT 0x41C8334
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_TOTAL_LBU_RD_CNT 0x41C8338
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT 0x41C833C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_ARUSER_OVR 0x41C8350
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_ARUSER_OVR_EN 0x41C8354
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_AWUSER_OVR 0x41C8358
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_AWUSER_OVR_EN 0x41C835C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_ARUSER_MSB_OVR 0x41C8360
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_ARUSER_MSB_OVR_EN 0x41C8364
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_AWUSER_MSB_OVR 0x41C8368
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_AWUSER_MSB_OVR_EN 0x41C836C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_AXCACHE_OVR 0x41C8370
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_LOCK_OVR 0x41C8374
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_PROT_OVR 0x41C8378
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_MAX_OUTSTANDING 0x41C837C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN 0x41C8380
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORCE_RSP_OK 0x41C8384
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_NO_WR_INFLIGHT 0x41C838C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_SEI_INTR_ID 0x41C8390
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_ARUSER_OVR 0x41C8400
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_ARUSER_OVR_EN 0x41C8404
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_AWUSER_OVR 0x41C8408
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_AWUSER_OVR_EN 0x41C840C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_AXCACHE_OVR 0x41C8420
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_LOCK_OVR 0x41C8424
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_PROT_OVR 0x41C8428
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_MAX_OUTSTANDING 0x41C842C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN 0x41C8430
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_FORCE_RSP_OK 0x41C8434
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_NO_WR_INFLIGHT 0x41C843C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBU_SEI_INTR_ID 0x41C8440
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0 0x41C8500
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_1 0x41C8504
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_2 0x41C8508
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_3 0x41C850C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_4 0x41C8510
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_5 0x41C8514
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_6 0x41C8518
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_7 0x41C851C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_0 0x41C8520
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_1 0x41C8524
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_2 0x41C8528
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_3 0x41C852C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_4 0x41C8530
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_5 0x41C8534
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_6 0x41C8538
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_7 0x41C853C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_0 0x41C8540
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_1 0x41C8544
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_2 0x41C8548
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_3 0x41C854C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_4 0x41C8550
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_5 0x41C8554
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_6 0x41C8558
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_7 0x41C855C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_0 0x41C8560
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_1 0x41C8564
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_2 0x41C8568
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_3 0x41C856C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_4 0x41C8570
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_5 0x41C8574
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_6 0x41C8578
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_7 0x41C857C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_0 0x41C8580
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_1 0x41C8584
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_2 0x41C8588
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_3 0x41C858C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_4 0x41C8590
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_5 0x41C8594
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_6 0x41C8598
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_7 0x41C859C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_0 0x41C85A0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_1 0x41C85A4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_2 0x41C85A8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_3 0x41C85AC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_4 0x41C85B0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_5 0x41C85B4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_6 0x41C85B8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_7 0x41C85BC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_0 0x41C85C0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_1 0x41C85C4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_2 0x41C85C8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_3 0x41C85CC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_4 0x41C85D0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_5 0x41C85D4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_6 0x41C85D8
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_7 0x41C85DC
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_GENERAL_Q_VLD_ENTRY_MASK 0x41C85E0
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_NIC_Q_VLD_ENTRY_MASK 0x41C85E4
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_DROP_EN 0x41C8620
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_WARN_MSG 0x41C8624
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG 0x41C8628
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWPROT 0x41C8630
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWUSER 0x41C8634
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWBURST 0x41C8638
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWLOCK 0x41C863C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWCACHE 0x41C8640
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_WRR_ARB_WEIGHT 0x41C8644
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG 0x41C8648
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT 0x41C864C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_CQ_IFIFO_SHADOW_CI 0x41C8650
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI 0x41C8654
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_CQ_SHADOW_CI 0x41C8658
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI 0x41C865C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_AUX2APB_PROT 0x41C8700
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBW_FORK_WIN_EN 0x41C8704
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR0 0x41C8708
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK0 0x41C870C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR1 0x41C8710
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK1 0x41C8714
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR0 0x41C8718
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK0 0x41C871C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR1 0x41C8720
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK1 0x41C8724
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR0 0x41C8728
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR1 0x41C872C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_ACC_ENGS_LBW_FORK_MASK 0x41C8730
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_DUP_ENG_LBW_FORK_ADDR 0x41C8734
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_ACP_ENG_LBW_FORK_ADDR 0x41C8738
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR 0x41C873C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_WIN_EN 0x41C8740
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_LSB 0x41C8750
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_MSB 0x41C8754
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_LSB 0x41C8758
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_MSB 0x41C875C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_LSB 0x41C8760
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_MSB 0x41C8764
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_LSB 0x41C8768
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_MSB 0x41C876C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_LSB 0x41C8770
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_MSB 0x41C8774
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_LSB 0x41C8778
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_MSB 0x41C877C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_LSB 0x41C8780
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_MSB 0x41C8784
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_LSB 0x41C8788
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_MSB 0x41C878C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_TRMINATE_ARADDR_LSB 0x41C8790
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CBU_TRMINATE_ARADDR_MSB 0x41C8794
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_CBU_TERMINATE_BRESP 0x41C8798
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_CBU_TERMINATE_RRESP 0x41C879C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_0 0x41C8800
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_1 0x41C8804
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_2 0x41C8808
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_3 0x41C880C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_4 0x41C8810
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_5 0x41C8814
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_6 0x41C8818
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_7 0x41C881C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_8 0x41C8820
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_9 0x41C8824
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_10 0x41C8828
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_11 0x41C882C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_12 0x41C8830
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_13 0x41C8834
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_14 0x41C8838
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_REGION_CFG_15 0x41C883C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_TRMINATE_AWADDR_ERR 0x41C8840
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_TRMINATE_ARADDR_ERR 0x41C8844
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_DCCM_TERMINATE_BRESP 0x41C8848
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_DCCM_TERMINATE_RRESP 0x41C884C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_DCCM_TERMINATE_EN 0x41C8850
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_CFG_DCCM_SECURE_REGION 0x41C8854
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT 0x41C8900
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_CTL 0x41C8904
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR_MSK 0x41C8908
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR 0x41C890C
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_ARC_ACC_ENGS_BUSER 0x41C8910
+
+#define mmDCORE0_EDMA0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN 0x41C8920
+
+#endif /* ASIC_REG_DCORE0_EDMA0_QM_ARC_AUX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h
new file mode 100644
index 000000000000..88d2a133f129
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA0_QM_AXUSER_NONSECURED_REGS_H_
+#define ASIC_REG_DCORE0_EDMA0_QM_AXUSER_NONSECURED_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA0_QM_AXUSER_NONSECURED
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_ASID 0x41CAB80
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP 0x41CAB84
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_STRONG_ORDER 0x41CAB88
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_NO_SNOOP 0x41CAB8C
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_WR_REDUCTION 0x41CAB90
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_RD_ATOMIC 0x41CAB94
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_QOS 0x41CAB98
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_RSVD 0x41CAB9C
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_EMEM_CPAGE 0x41CABA0
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_CORE 0x41CABA4
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_E2E_COORD 0x41CABA8
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_WR_OVRD_LO 0x41CABB0
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_WR_OVRD_HI 0x41CABB4
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_RD_OVRD_LO 0x41CABB8
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_RD_OVRD_HI 0x41CABBC
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_LB_COORD 0x41CABC0
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_LB_LOCK 0x41CABC4
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_LB_RSVD 0x41CABC8
+
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_LB_OVRD 0x41CABCC
+
+#endif /* ASIC_REG_DCORE0_EDMA0_QM_AXUSER_NONSECURED_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h
new file mode 100644
index 000000000000..0b0a76a5b2a0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA0_QM_CGM_REGS_H_
+#define ASIC_REG_DCORE0_EDMA0_QM_CGM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA0_QM_CGM
+ * (Prototype: QMAN_CGM)
+ *****************************************
+ */
+
+#define mmDCORE0_EDMA0_QM_CGM_CFG 0x41CAD80
+
+#define mmDCORE0_EDMA0_QM_CGM_STS 0x41CAD84
+
+#define mmDCORE0_EDMA0_QM_CGM_CFG1 0x41CAD88
+
+#endif /* ASIC_REG_DCORE0_EDMA0_QM_CGM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h
new file mode 100644
index 000000000000..102e2a65811c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h
@@ -0,0 +1,1165 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA0_QM_MASKS_H_
+#define ASIC_REG_DCORE0_EDMA0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA0_QM
+ * (Prototype: QMAN)
+ *****************************************
+ */
+
+/* DCORE0_EDMA0_QM_GLBL_CFG0 */
+#define DCORE0_EDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_CFG0_PQF_EN_MASK 0xF
+#define DCORE0_EDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0
+#define DCORE0_EDMA0_QM_GLBL_CFG0_CP_EN_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00
+#define DCORE0_EDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT 14
+#define DCORE0_EDMA0_QM_GLBL_CFG0_ARC_CQF_EN_MASK 0x4000
+
+/* DCORE0_EDMA0_QM_GLBL_CFG1 */
+#define DCORE0_EDMA0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF
+#define DCORE0_EDMA0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0
+#define DCORE0_EDMA0_QM_GLBL_CFG1_CP_STOP_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00
+#define DCORE0_EDMA0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000
+#define DCORE0_EDMA0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20
+#define DCORE0_EDMA0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000
+#define DCORE0_EDMA0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25
+#define DCORE0_EDMA0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000
+
+/* DCORE0_EDMA0_QM_GLBL_CFG2 */
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_CQF_STOP_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_CQF_STOP_MASK 0x1
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_CQF_FLUSH_SHIFT 1
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_CQF_FLUSH_MASK 0x2
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_AWUSER_OVRD_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_AWUSER_OVRD_MASK 0x10
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_ARUSER_OVRD_SHIFT 5
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_ARUSER_OVRD_MASK 0x20
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_AWUSER_OVRD_SHIFT 6
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_AWUSER_OVRD_MASK 0x40
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_ARUSER_OVRD_SHIFT 7
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_ARUSER_OVRD_MASK 0x80
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_AWPROT_OVRD_SHIFT 8
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_AWPROT_OVRD_MASK 0x100
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_ARPROT_OVRD_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_ARPROT_OVRD_MASK 0x200
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_AWPROT_OVRD_SHIFT 10
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_AWPROT_OVRD_MASK 0x400
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_ARPROT_OVRD_SHIFT 11
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_ARPROT_OVRD_MASK 0x800
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_AWCACHE_OVRD_SHIFT 12
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_AWCACHE_OVRD_MASK 0x1000
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_ARCACHE_OVRD_SHIFT 13
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_HBW_ARCACHE_OVRD_MASK 0x2000
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_AWCACHE_OVRD_SHIFT 14
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_AWCACHE_OVRD_MASK 0x4000
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_ARCACHE_OVRD_SHIFT 15
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_ARCACHE_OVRD_MASK 0x8000
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_BUSER_OVRD_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_CFG2_ARC_LBW_BUSER_OVRD_MASK 0x10000
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_CFG */
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_CFG1 */
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG1_CQF_ERR_MSG_EN_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG1_CQF_ERR_MSG_EN_MASK 0x1
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG1_CQF_STOP_ON_ERR_SHIFT 1
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG1_CQF_STOP_ON_ERR_MASK 0x2
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG1_ARC_STOP_ON_ERR_SHIFT 2
+#define DCORE0_EDMA0_QM_GLBL_ERR_CFG1_ARC_STOP_ON_ERR_MASK 0x4
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_ARC_HALT_EN */
+#define DCORE0_EDMA0_QM_GLBL_ERR_ARC_HALT_EN_ERR_IND_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_ARC_HALT_EN_ERR_IND_MASK 0xFFFFFF
+
+/* DCORE0_EDMA0_QM_GLBL_AXCACHE */
+#define DCORE0_EDMA0_QM_GLBL_AXCACHE_HBW_AR_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_AXCACHE_HBW_AR_MASK 0xF
+#define DCORE0_EDMA0_QM_GLBL_AXCACHE_HBW_AW_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_AXCACHE_HBW_AW_MASK 0xF0000
+#define DCORE0_EDMA0_QM_GLBL_AXCACHE_LBW_AW_SHIFT 20
+#define DCORE0_EDMA0_QM_GLBL_AXCACHE_LBW_AW_MASK 0xF00000
+#define DCORE0_EDMA0_QM_GLBL_AXCACHE_LBW_AR_SHIFT 24
+#define DCORE0_EDMA0_QM_GLBL_AXCACHE_LBW_AR_MASK 0xF000000
+
+/* DCORE0_EDMA0_QM_GLBL_STS0 */
+#define DCORE0_EDMA0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF
+#define DCORE0_EDMA0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0
+#define DCORE0_EDMA0_QM_GLBL_STS0_CP_IDLE_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00
+#define DCORE0_EDMA0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000
+#define DCORE0_EDMA0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20
+#define DCORE0_EDMA0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000
+#define DCORE0_EDMA0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25
+#define DCORE0_EDMA0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000
+#define DCORE0_EDMA0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31
+#define DCORE0_EDMA0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000
+
+/* DCORE0_EDMA0_QM_GLBL_STS1 */
+#define DCORE0_EDMA0_QM_GLBL_STS1_ARC_CQF_IDLE_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_STS1_ARC_CQF_IDLE_MASK 0x1
+#define DCORE0_EDMA0_QM_GLBL_STS1_ARC_CQF_IS_STOP_SHIFT 1
+#define DCORE0_EDMA0_QM_GLBL_STS1_ARC_CQF_IS_STOP_MASK 0x2
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_STS */
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_PQF_RD_ERR_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_PQF_RD_ERR_MASK 0x1
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CQF_RD_ERR_SHIFT 1
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CQF_RD_ERR_MASK 0x2
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_RD_ERR_SHIFT 2
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_RD_ERR_MASK 0x4
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_STOP_OP_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_STOP_OP_MASK 0x10
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_MSG_WR_ERR_SHIFT 5
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_MSG_WR_ERR_MASK 0x20
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_WREG_ERR_SHIFT 6
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_WREG_ERR_MASK 0x40
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CP_FENCE3_UDF_ERR_MASK 0x8000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CPDMA_UP_OVF_ERR_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_CPDMA_UP_OVF_ERR_MASK 0x10000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_PQC_L2H_ERR_SHIFT 17
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_PQC_L2H_ERR_MASK 0x20000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_RSVD_18_24_SHIFT 18
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_RSVD_18_24_MASK 0x1FC0000
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_STS_4 */
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_RSVD0_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_RSVD0_MASK 0x1
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CQF_RD_ERR_SHIFT 1
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CQF_RD_ERR_MASK 0x2
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_RD_ERR_SHIFT 2
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_RD_ERR_MASK 0x4
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_STOP_OP_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_STOP_OP_MASK 0x10
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_MSG_WR_ERR_SHIFT 5
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_MSG_WR_ERR_MASK 0x20
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_WREG_ERR_SHIFT 6
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_WREG_ERR_MASK 0x40
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CPDMA_UP_OVF_ERR_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CPDMA_UP_OVF_ERR_MASK 0x10000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_RSVD17_SHIFT 17
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_RSVD17_MASK 0x20000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CQ_WR_IFIFO_CI_ERR_SHIFT 18
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CQ_WR_IFIFO_CI_ERR_MASK 0x40000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CQ_WR_CTL_CI_ERR_SHIFT 19
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CQ_WR_CTL_CI_ERR_MASK 0x80000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_ARC_CQF_RD_ERR_SHIFT 20
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_ARC_CQF_RD_ERR_MASK 0x100000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_ARC_CQ_WR_IFIFO_CI_ERR_SHIFT 21
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_ARC_CQ_WR_IFIFO_CI_ERR_MASK 0x200000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_ARC_CQ_WR_CTL_CI_ERR_SHIFT 22
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_ARC_CQ_WR_CTL_CI_ERR_MASK 0x400000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_ARC_AXI_ERR_SHIFT 23
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_ARC_AXI_ERR_MASK 0x800000
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_SWITCH_WDT_ERR_SHIFT 24
+#define DCORE0_EDMA0_QM_GLBL_ERR_STS_4_CP_SWITCH_WDT_ERR_MASK 0x1000000
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN */
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_PQF_RD_ERR_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_PQF_RD_ERR_MASK 0x1
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CQF_RD_ERR_SHIFT 1
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CQF_RD_ERR_MASK 0x2
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_RD_ERR_SHIFT 2
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_RD_ERR_MASK 0x4
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_STOP_OP_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_STOP_OP_MASK 0x10
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_MSG_WR_ERR_SHIFT 5
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_MSG_WR_ERR_MASK 0x20
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_WREG_ERR_SHIFT 6
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_WREG_ERR_MASK 0x40
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CPDMA_UP_OVF_ERR_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_CPDMA_UP_OVF_ERR_MASK 0x10000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_PQC_L2H_ERR_SHIFT 17
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_PQC_L2H_ERR_MASK 0x20000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_RSVD_18_24_SHIFT 18
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_RSVD_18_24_MASK 0x1FC0000
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4 */
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_RSVD0_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_RSVD0_MASK 0x1
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CQF_RD_ERR_SHIFT 1
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CQF_RD_ERR_MASK 0x2
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_RD_ERR_SHIFT 2
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_RD_ERR_MASK 0x4
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_STOP_OP_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_STOP_OP_MASK 0x10
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_WREG_ERR_SHIFT 6
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_WREG_ERR_MASK 0x40
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CPDMA_UP_OVF_ERR_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CPDMA_UP_OVF_ERR_MASK 0x10000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_RSVD17_SHIFT 17
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_RSVD17_MASK 0x20000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CQ_WR_IFIFO_CI_ERR_SHIFT 18
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CQ_WR_IFIFO_CI_ERR_MASK 0x40000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CQ_WR_CTL_CI_ERR_SHIFT 19
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CQ_WR_CTL_CI_ERR_MASK 0x80000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQF_RD_ERR_SHIFT 20
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQF_RD_ERR_MASK 0x100000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQ_WR_IFIFO_CI_ERR_SHIFT 21
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQ_WR_IFIFO_CI_ERR_MASK 0x200000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQ_WR_CTL_CI_ERR_SHIFT 22
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQ_WR_CTL_CI_ERR_MASK 0x400000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_AXI_ERR_SHIFT 23
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_AXI_ERR_MASK 0x800000
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_SWITCH_WDT_ERR_SHIFT 24
+#define DCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4_CP_SWITCH_WDT_ERR_MASK 0x1000000
+
+/* DCORE0_EDMA0_QM_GLBL_PROT */
+#define DCORE0_EDMA0_QM_GLBL_PROT_PQF_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_PROT_PQF_MASK 0xF
+#define DCORE0_EDMA0_QM_GLBL_PROT_CQF_SHIFT 4
+#define DCORE0_EDMA0_QM_GLBL_PROT_CQF_MASK 0x1F0
+#define DCORE0_EDMA0_QM_GLBL_PROT_CP_SHIFT 9
+#define DCORE0_EDMA0_QM_GLBL_PROT_CP_MASK 0x3E00
+#define DCORE0_EDMA0_QM_GLBL_PROT_ERR_SHIFT 14
+#define DCORE0_EDMA0_QM_GLBL_PROT_ERR_MASK 0x4000
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARB_SHIFT 15
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARB_MASK 0x8000
+#define DCORE0_EDMA0_QM_GLBL_PROT_PQC_SHIFT 16
+#define DCORE0_EDMA0_QM_GLBL_PROT_PQC_MASK 0x10000
+#define DCORE0_EDMA0_QM_GLBL_PROT_CQ_IFIFO_MSG_SHIFT 17
+#define DCORE0_EDMA0_QM_GLBL_PROT_CQ_IFIFO_MSG_MASK 0x20000
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARC_CQ_IFIFO_MSG_SHIFT 18
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARC_CQ_IFIFO_MSG_MASK 0x40000
+#define DCORE0_EDMA0_QM_GLBL_PROT_CQ_CTL_MSG_SHIFT 19
+#define DCORE0_EDMA0_QM_GLBL_PROT_CQ_CTL_MSG_MASK 0x80000
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARC_CQ_CTL_MSG_SHIFT 20
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARC_CQ_CTL_MSG_MASK 0x100000
+#define DCORE0_EDMA0_QM_GLBL_PROT_CP_WR_ARC_SHIFT 21
+#define DCORE0_EDMA0_QM_GLBL_PROT_CP_WR_ARC_MASK 0x200000
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARC_CQF_SHIFT 22
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARC_CQF_MASK 0x400000
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARC_CORE_SHIFT 23
+#define DCORE0_EDMA0_QM_GLBL_PROT_ARC_CORE_MASK 0x800000
+
+/* DCORE0_EDMA0_QM_PQ_BASE_LO */
+#define DCORE0_EDMA0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQ_BASE_HI */
+#define DCORE0_EDMA0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQ_SIZE */
+#define DCORE0_EDMA0_QM_PQ_SIZE_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQ_SIZE_VAL_MASK 0x1F
+
+/* DCORE0_EDMA0_QM_PQ_PI */
+#define DCORE0_EDMA0_QM_PQ_PI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQ_CI */
+#define DCORE0_EDMA0_QM_PQ_CI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQ_CFG0 */
+#define DCORE0_EDMA0_QM_PQ_CFG0_FORCE_STALL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQ_CFG0_FORCE_STALL_MASK 0x1
+
+/* DCORE0_EDMA0_QM_PQ_CFG1 */
+#define DCORE0_EDMA0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DCORE0_EDMA0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFF
+#define DCORE0_EDMA0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DCORE0_EDMA0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFF0000
+
+/* DCORE0_EDMA0_QM_PQ_STS0 */
+#define DCORE0_EDMA0_QM_PQ_STS0_CREDIT_CNT_SHIFT 0
+#define DCORE0_EDMA0_QM_PQ_STS0_CREDIT_CNT_MASK 0xFF
+#define DCORE0_EDMA0_QM_PQ_STS0_FREE_CNT_SHIFT 8
+#define DCORE0_EDMA0_QM_PQ_STS0_FREE_CNT_MASK 0xFF00
+#define DCORE0_EDMA0_QM_PQ_STS0_INFLIGHT_CNT_SHIFT 16
+#define DCORE0_EDMA0_QM_PQ_STS0_INFLIGHT_CNT_MASK 0xFF0000
+
+/* DCORE0_EDMA0_QM_PQ_STS1 */
+#define DCORE0_EDMA0_QM_PQ_STS1_BUF_EMPTY_SHIFT 0
+#define DCORE0_EDMA0_QM_PQ_STS1_BUF_EMPTY_MASK 0x1
+#define DCORE0_EDMA0_QM_PQ_STS1_BUSY_SHIFT 1
+#define DCORE0_EDMA0_QM_PQ_STS1_BUSY_MASK 0x2
+
+/* DCORE0_EDMA0_QM_CQ_CFG0 */
+#define DCORE0_EDMA0_QM_CQ_CFG0_IF_B2B_EN_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_CFG0_IF_B2B_EN_MASK 0x1
+#define DCORE0_EDMA0_QM_CQ_CFG0_IF_MSG_EN_SHIFT 1
+#define DCORE0_EDMA0_QM_CQ_CFG0_IF_MSG_EN_MASK 0x2
+#define DCORE0_EDMA0_QM_CQ_CFG0_CTL_MSG_EN_SHIFT 2
+#define DCORE0_EDMA0_QM_CQ_CFG0_CTL_MSG_EN_MASK 0x4
+
+/* DCORE0_EDMA0_QM_CQ_STS0 */
+#define DCORE0_EDMA0_QM_CQ_STS0_CREDIT_CNT_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_STS0_CREDIT_CNT_MASK 0xFF
+#define DCORE0_EDMA0_QM_CQ_STS0_FREE_CNT_SHIFT 8
+#define DCORE0_EDMA0_QM_CQ_STS0_FREE_CNT_MASK 0xFF00
+#define DCORE0_EDMA0_QM_CQ_STS0_INFLIGHT_CNT_SHIFT 16
+#define DCORE0_EDMA0_QM_CQ_STS0_INFLIGHT_CNT_MASK 0xFF0000
+
+/* DCORE0_EDMA0_QM_CQ_CFG1 */
+#define DCORE0_EDMA0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFF
+#define DCORE0_EDMA0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DCORE0_EDMA0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFF0000
+
+/* DCORE0_EDMA0_QM_CQ_STS1 */
+#define DCORE0_EDMA0_QM_CQ_STS1_BUF_EMPTY_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_STS1_BUF_EMPTY_MASK 0x1
+#define DCORE0_EDMA0_QM_CQ_STS1_BUSY_SHIFT 1
+#define DCORE0_EDMA0_QM_CQ_STS1_BUSY_MASK 0x2
+
+/* DCORE0_EDMA0_QM_CQ_PTR_LO_0 */
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_0_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_PTR_HI_0 */
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_0_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_TSIZE_0 */
+#define DCORE0_EDMA0_QM_CQ_TSIZE_0_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_CTL_0 */
+#define DCORE0_EDMA0_QM_CQ_CTL_0_UP_SHIFT 28
+#define DCORE0_EDMA0_QM_CQ_CTL_0_UP_MASK 0xF0000000
+
+/* DCORE0_EDMA0_QM_CQ_PTR_LO_1 */
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_1_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_PTR_HI_1 */
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_1_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_TSIZE_1 */
+#define DCORE0_EDMA0_QM_CQ_TSIZE_1_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_CTL_1 */
+#define DCORE0_EDMA0_QM_CQ_CTL_1_UP_SHIFT 28
+#define DCORE0_EDMA0_QM_CQ_CTL_1_UP_MASK 0xF0000000
+
+/* DCORE0_EDMA0_QM_CQ_PTR_LO_2 */
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_2_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_PTR_HI_2 */
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_2_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_TSIZE_2 */
+#define DCORE0_EDMA0_QM_CQ_TSIZE_2_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_CTL_2 */
+#define DCORE0_EDMA0_QM_CQ_CTL_2_UP_SHIFT 28
+#define DCORE0_EDMA0_QM_CQ_CTL_2_UP_MASK 0xF0000000
+
+/* DCORE0_EDMA0_QM_CQ_PTR_LO_3 */
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_3_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_PTR_HI_3 */
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_3_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_TSIZE_3 */
+#define DCORE0_EDMA0_QM_CQ_TSIZE_3_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_CTL_3 */
+#define DCORE0_EDMA0_QM_CQ_CTL_3_UP_SHIFT 28
+#define DCORE0_EDMA0_QM_CQ_CTL_3_UP_MASK 0xF0000000
+
+/* DCORE0_EDMA0_QM_CQ_PTR_LO_4 */
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_4_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_PTR_HI_4 */
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_4_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_TSIZE_4 */
+#define DCORE0_EDMA0_QM_CQ_TSIZE_4_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_CTL_4 */
+#define DCORE0_EDMA0_QM_CQ_CTL_4_UP_SHIFT 28
+#define DCORE0_EDMA0_QM_CQ_CTL_4_UP_MASK 0xF0000000
+
+/* DCORE0_EDMA0_QM_CQ_TSIZE_STS */
+#define DCORE0_EDMA0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_PTR_LO_STS */
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_PTR_HI_STS */
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_IFIFO_STS */
+#define DCORE0_EDMA0_QM_CQ_IFIFO_STS_CNT_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_IFIFO_STS_CNT_MASK 0x7
+#define DCORE0_EDMA0_QM_CQ_IFIFO_STS_RDY_SHIFT 4
+#define DCORE0_EDMA0_QM_CQ_IFIFO_STS_RDY_MASK 0x10
+#define DCORE0_EDMA0_QM_CQ_IFIFO_STS_CTL_STALL_SHIFT 8
+#define DCORE0_EDMA0_QM_CQ_IFIFO_STS_CTL_STALL_MASK 0x100
+
+/* DCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO */
+#define DCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI */
+#define DCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO */
+#define DCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI */
+#define DCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO */
+#define DCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI */
+#define DCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO */
+#define DCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI */
+#define DCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_FENCE0_RDATA */
+#define DCORE0_EDMA0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* DCORE0_EDMA0_QM_CP_FENCE1_RDATA */
+#define DCORE0_EDMA0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* DCORE0_EDMA0_QM_CP_FENCE2_RDATA */
+#define DCORE0_EDMA0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* DCORE0_EDMA0_QM_CP_FENCE3_RDATA */
+#define DCORE0_EDMA0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* DCORE0_EDMA0_QM_CP_FENCE0_CNT */
+#define DCORE0_EDMA0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF
+
+/* DCORE0_EDMA0_QM_CP_FENCE1_CNT */
+#define DCORE0_EDMA0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF
+
+/* DCORE0_EDMA0_QM_CP_FENCE2_CNT */
+#define DCORE0_EDMA0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF
+
+/* DCORE0_EDMA0_QM_CP_FENCE3_CNT */
+#define DCORE0_EDMA0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF
+
+/* DCORE0_EDMA0_QM_CP_BARRIER_CFG */
+#define DCORE0_EDMA0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+#define DCORE0_EDMA0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16
+#define DCORE0_EDMA0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000
+
+/* DCORE0_EDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define DCORE0_EDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define DCORE0_EDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_CP_LDMA_TSIZE_OFFSET */
+#define DCORE0_EDMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_0 */
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_0_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_0_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_1 */
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_1_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_1_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_2 */
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_2_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_2_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_3 */
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_3_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_3_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_4 */
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_4_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_4_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_CP_STS */
+#define DCORE0_EDMA0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFF
+#define DCORE0_EDMA0_QM_CP_STS_ERDY_SHIFT 8
+#define DCORE0_EDMA0_QM_CP_STS_ERDY_MASK 0x100
+#define DCORE0_EDMA0_QM_CP_STS_SWITCH_EN_SHIFT 9
+#define DCORE0_EDMA0_QM_CP_STS_SWITCH_EN_MASK 0x200
+#define DCORE0_EDMA0_QM_CP_STS_MRDY_SHIFT 10
+#define DCORE0_EDMA0_QM_CP_STS_MRDY_MASK 0x400
+#define DCORE0_EDMA0_QM_CP_STS_SW_STOP_SHIFT 11
+#define DCORE0_EDMA0_QM_CP_STS_SW_STOP_MASK 0x800
+#define DCORE0_EDMA0_QM_CP_STS_FENCE_ID_SHIFT 12
+#define DCORE0_EDMA0_QM_CP_STS_FENCE_ID_MASK 0x3000
+#define DCORE0_EDMA0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 14
+#define DCORE0_EDMA0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x4000
+#define DCORE0_EDMA0_QM_CP_STS_FENCE_TARGET_SHIFT 16
+#define DCORE0_EDMA0_QM_CP_STS_FENCE_TARGET_MASK 0x3FFF0000
+#define DCORE0_EDMA0_QM_CP_STS_CUR_CQ_SHIFT 30
+#define DCORE0_EDMA0_QM_CP_STS_CUR_CQ_MASK 0x40000000
+
+/* DCORE0_EDMA0_QM_CP_CURRENT_INST_LO */
+#define DCORE0_EDMA0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_CURRENT_INST_HI */
+#define DCORE0_EDMA0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_PRED */
+#define DCORE0_EDMA0_QM_CP_PRED_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_PRED_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_PRED_UPEN */
+#define DCORE0_EDMA0_QM_CP_PRED_UPEN_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_PRED_UPEN_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_DBG_0 */
+#define DCORE0_EDMA0_QM_CP_DBG_0_CS_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_DBG_0_CS_MASK 0x1F
+#define DCORE0_EDMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 5
+#define DCORE0_EDMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x20
+#define DCORE0_EDMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 6
+#define DCORE0_EDMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x40
+#define DCORE0_EDMA0_QM_CP_DBG_0_MREB_STALL_SHIFT 7
+#define DCORE0_EDMA0_QM_CP_DBG_0_MREB_STALL_MASK 0x80
+#define DCORE0_EDMA0_QM_CP_DBG_0_STALL_SHIFT 8
+#define DCORE0_EDMA0_QM_CP_DBG_0_STALL_MASK 0x100
+
+/* DCORE0_EDMA0_QM_CP_CPDMA_UP_CRED */
+#define DCORE0_EDMA0_QM_CP_CPDMA_UP_CRED_TH_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_CPDMA_UP_CRED_TH_MASK 0x3
+#define DCORE0_EDMA0_QM_CP_CPDMA_UP_CRED_VAL_SHIFT 8
+#define DCORE0_EDMA0_QM_CP_CPDMA_UP_CRED_VAL_MASK 0x300
+
+/* DCORE0_EDMA0_QM_CP_IN_DATA_LO */
+#define DCORE0_EDMA0_QM_CP_IN_DATA_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_IN_DATA_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_IN_DATA_HI */
+#define DCORE0_EDMA0_QM_CP_IN_DATA_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_IN_DATA_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQC_HBW_BASE_LO */
+#define DCORE0_EDMA0_QM_PQC_HBW_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_HBW_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQC_HBW_BASE_HI */
+#define DCORE0_EDMA0_QM_PQC_HBW_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_HBW_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQC_SIZE */
+#define DCORE0_EDMA0_QM_PQC_SIZE_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQC_PI */
+#define DCORE0_EDMA0_QM_PQC_PI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_PI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQC_LBW_WDATA */
+#define DCORE0_EDMA0_QM_PQC_LBW_WDATA_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQC_LBW_BASE_LO */
+#define DCORE0_EDMA0_QM_PQC_LBW_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_LBW_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQC_LBW_BASE_HI */
+#define DCORE0_EDMA0_QM_PQC_LBW_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_LBW_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PQC_CFG */
+#define DCORE0_EDMA0_QM_PQC_CFG_EN_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_CFG_EN_MASK 0x1
+#define DCORE0_EDMA0_QM_PQC_CFG_DIRECT_SHIFT 4
+#define DCORE0_EDMA0_QM_PQC_CFG_DIRECT_MASK 0x10
+
+/* DCORE0_EDMA0_QM_PQC_SECURE_PUSH_IND */
+#define DCORE0_EDMA0_QM_PQC_SECURE_PUSH_IND_CP_NUM_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_SECURE_PUSH_IND_CP_NUM_MASK 0x3
+
+/* DCORE0_EDMA0_QM_ARB_MASK */
+#define DCORE0_EDMA0_QM_ARB_MASK_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MASK_VAL_MASK 0xF
+
+/* DCORE0_EDMA0_QM_ARB_CFG_0 */
+#define DCORE0_EDMA0_QM_ARB_CFG_0_PRIO_TYPE_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_CFG_0_PRIO_TYPE_MASK 0x1
+#define DCORE0_EDMA0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4
+#define DCORE0_EDMA0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10
+#define DCORE0_EDMA0_QM_ARB_CFG_0_EN_SHIFT 8
+#define DCORE0_EDMA0_QM_ARB_CFG_0_EN_MASK 0x100
+#define DCORE0_EDMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 9
+#define DCORE0_EDMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x200
+
+/* DCORE0_EDMA0_QM_ARB_CHOICE_Q_PUSH */
+#define DCORE0_EDMA0_QM_ARB_CHOICE_Q_PUSH_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_CHOICE_Q_PUSH_VAL_MASK 0x3
+
+/* DCORE0_EDMA0_QM_ARB_WRR_WEIGHT */
+#define DCORE0_EDMA0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFF
+
+/* DCORE0_EDMA0_QM_ARB_CFG_1 */
+#define DCORE0_EDMA0_QM_ARB_CFG_1_CLR_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_CFG_1_CLR_MASK 0x1
+
+/* DCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED */
+#define DCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F
+
+/* DCORE0_EDMA0_QM_ARB_MST_CRED_INC */
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST */
+#define DCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST */
+#define DCORE0_EDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_MST_SLAVE_EN */
+#define DCORE0_EDMA0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_MST_SLAVE_EN_1 */
+#define DCORE0_EDMA0_QM_ARB_MST_SLAVE_EN_1_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MST_SLAVE_EN_1_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_SLV_CHOICE_WDT */
+#define DCORE0_EDMA0_QM_ARB_SLV_CHOICE_WDT_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_SLV_CHOICE_WDT_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_SLV_ID */
+#define DCORE0_EDMA0_QM_ARB_SLV_ID_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_SLV_ID_VAL_MASK 0x7F
+
+/* DCORE0_EDMA0_QM_ARB_MST_QUIET_PER */
+#define DCORE0_EDMA0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_MSG_MAX_INFLIGHT */
+#define DCORE0_EDMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F
+
+/* DCORE0_EDMA0_QM_ARB_BASE_LO */
+#define DCORE0_EDMA0_QM_ARB_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_BASE_HI */
+#define DCORE0_EDMA0_QM_ARB_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_STATE_STS */
+#define DCORE0_EDMA0_QM_ARB_STATE_STS_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARB_CHOICE_FULLNESS_STS */
+#define DCORE0_EDMA0_QM_ARB_CHOICE_FULLNESS_STS_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_CHOICE_FULLNESS_STS_VAL_MASK 0x7F
+
+/* DCORE0_EDMA0_QM_ARB_MSG_STS */
+#define DCORE0_EDMA0_QM_ARB_MSG_STS_FULL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MSG_STS_FULL_MASK 0x1
+#define DCORE0_EDMA0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1
+#define DCORE0_EDMA0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2
+
+/* DCORE0_EDMA0_QM_ARB_SLV_CHOICE_Q_HEAD */
+#define DCORE0_EDMA0_QM_ARB_SLV_CHOICE_Q_HEAD_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_SLV_CHOICE_Q_HEAD_VAL_MASK 0x3
+
+/* DCORE0_EDMA0_QM_ARB_ERR_CAUSE */
+#define DCORE0_EDMA0_QM_ARB_ERR_CAUSE_CHOICE_OVF_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_ERR_CAUSE_CHOICE_OVF_MASK 0x1
+#define DCORE0_EDMA0_QM_ARB_ERR_CAUSE_CHOICE_WDT_SHIFT 1
+#define DCORE0_EDMA0_QM_ARB_ERR_CAUSE_CHOICE_WDT_MASK 0x2
+#define DCORE0_EDMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2
+#define DCORE0_EDMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4
+
+/* DCORE0_EDMA0_QM_ARB_ERR_MSG_EN */
+#define DCORE0_EDMA0_QM_ARB_ERR_MSG_EN_CHOICE_OVF_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_ERR_MSG_EN_CHOICE_OVF_MASK 0x1
+#define DCORE0_EDMA0_QM_ARB_ERR_MSG_EN_CHOICE_WDT_SHIFT 1
+#define DCORE0_EDMA0_QM_ARB_ERR_MSG_EN_CHOICE_WDT_MASK 0x2
+#define DCORE0_EDMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2
+#define DCORE0_EDMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+/* DCORE0_EDMA0_QM_ARB_ERR_STS_DRP */
+#define DCORE0_EDMA0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3
+
+/* DCORE0_EDMA0_QM_ARB_MST_CRED_STS */
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_STS_IDX_SHIFT 24
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_STS_IDX_MASK 0x1F000000
+
+/* DCORE0_EDMA0_QM_ARB_MST_CRED_STS_1 */
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_STS_1_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_STS_1_VAL_MASK 0x7F
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_STS_1_IDX_SHIFT 24
+#define DCORE0_EDMA0_QM_ARB_MST_CRED_STS_1_IDX_MASK 0x1F000000
+
+/* DCORE0_EDMA0_QM_CSMR_STRICT_PRIO_CFG */
+#define DCORE0_EDMA0_QM_CSMR_STRICT_PRIO_CFG_ARB_TYPE_SHIFT 0
+#define DCORE0_EDMA0_QM_CSMR_STRICT_PRIO_CFG_ARB_TYPE_MASK 0x1
+#define DCORE0_EDMA0_QM_CSMR_STRICT_PRIO_CFG_PER_ENTRY_SHIFT 4
+#define DCORE0_EDMA0_QM_CSMR_STRICT_PRIO_CFG_PER_ENTRY_MASK 0x10
+
+/* DCORE0_EDMA0_QM_ARC_CQ_CFG0 */
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG0_IF_B2B_EN_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG0_IF_B2B_EN_MASK 0x1
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG0_IF_MSG_EN_SHIFT 1
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG0_IF_MSG_EN_MASK 0x2
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG0_CTL_MSG_EN_SHIFT 2
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG0_CTL_MSG_EN_MASK 0x4
+
+/* DCORE0_EDMA0_QM_ARC_CQ_CFG1 */
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG1_CREDIT_LIM_MASK 0xFF
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DCORE0_EDMA0_QM_ARC_CQ_CFG1_MAX_INFLIGHT_MASK 0xFF0000
+
+/* DCORE0_EDMA0_QM_ARC_CQ_PTR_LO */
+#define DCORE0_EDMA0_QM_ARC_CQ_PTR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_PTR_HI */
+#define DCORE0_EDMA0_QM_ARC_CQ_PTR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_TSIZE */
+#define DCORE0_EDMA0_QM_ARC_CQ_TSIZE_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_CTL */
+#define DCORE0_EDMA0_QM_ARC_CQ_CTL_UP_SHIFT 28
+#define DCORE0_EDMA0_QM_ARC_CQ_CTL_UP_MASK 0xF0000000
+
+/* DCORE0_EDMA0_QM_ARC_CQ_IFIFO_STS */
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_STS_CNT_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_STS_CNT_MASK 0x7
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_STS_RDY_SHIFT 4
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_STS_RDY_MASK 0x10
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_STS_CTL_STALL_SHIFT 8
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_STS_CTL_STALL_MASK 0x100
+
+/* DCORE0_EDMA0_QM_ARC_CQ_STS0 */
+#define DCORE0_EDMA0_QM_ARC_CQ_STS0_CREDIT_CNT_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_STS0_CREDIT_CNT_MASK 0xFF
+#define DCORE0_EDMA0_QM_ARC_CQ_STS0_FREE_CNT_SHIFT 8
+#define DCORE0_EDMA0_QM_ARC_CQ_STS0_FREE_CNT_MASK 0xFF00
+#define DCORE0_EDMA0_QM_ARC_CQ_STS0_INFLIGHT_CNT_SHIFT 16
+#define DCORE0_EDMA0_QM_ARC_CQ_STS0_INFLIGHT_CNT_MASK 0xFF0000
+
+/* DCORE0_EDMA0_QM_ARC_CQ_STS1 */
+#define DCORE0_EDMA0_QM_ARC_CQ_STS1_BUF_EMPTY_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_STS1_BUF_EMPTY_MASK 0x1
+#define DCORE0_EDMA0_QM_ARC_CQ_STS1_BUSY_SHIFT 1
+#define DCORE0_EDMA0_QM_ARC_CQ_STS1_BUSY_MASK 0x2
+
+/* DCORE0_EDMA0_QM_ARC_CQ_TSIZE_STS */
+#define DCORE0_EDMA0_QM_ARC_CQ_TSIZE_STS_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_PTR_LO_STS */
+#define DCORE0_EDMA0_QM_ARC_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_PTR_HI_STS */
+#define DCORE0_EDMA0_QM_ARC_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_WR_ARC_ADDR_HI */
+#define DCORE0_EDMA0_QM_CP_WR_ARC_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_WR_ARC_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_WR_ARC_ADDR_LO */
+#define DCORE0_EDMA0_QM_CP_WR_ARC_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_WR_ARC_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_HI */
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO */
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_HI */
+#define DCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO */
+#define DCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_HI */
+#define DCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_LO */
+#define DCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_HI */
+#define DCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_LO */
+#define DCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ADDR_OVRD */
+#define DCORE0_EDMA0_QM_ADDR_OVRD_IDX_SHIFT 0
+#define DCORE0_EDMA0_QM_ADDR_OVRD_IDX_MASK 0xFF
+
+/* DCORE0_EDMA0_QM_CQ_IFIFO_CI */
+#define DCORE0_EDMA0_QM_CQ_IFIFO_CI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_IFIFO_CI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_IFIFO_CI */
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_CI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_IFIFO_CI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CQ_CTL_CI */
+#define DCORE0_EDMA0_QM_CQ_CTL_CI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CQ_CTL_CI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_CQ_CTL_CI */
+#define DCORE0_EDMA0_QM_ARC_CQ_CTL_CI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_CQ_CTL_CI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_CFG */
+#define DCORE0_EDMA0_QM_CP_CFG_SWITCH_EN_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_CFG_SWITCH_EN_MASK 0x1
+#define DCORE0_EDMA0_QM_CP_CFG_SWITCH_WD_EN_SHIFT 1
+#define DCORE0_EDMA0_QM_CP_CFG_SWITCH_WD_EN_MASK 0x2
+
+/* DCORE0_EDMA0_QM_CP_EXT_SWITCH */
+#define DCORE0_EDMA0_QM_CP_EXT_SWITCH_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_EXT_SWITCH_VAL_MASK 0x1
+
+/* DCORE0_EDMA0_QM_CP_SWITCH_WD_SET */
+#define DCORE0_EDMA0_QM_CP_SWITCH_WD_SET_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_SWITCH_WD_SET_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_CP_SWITCH_WD */
+#define DCORE0_EDMA0_QM_CP_SWITCH_WD_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_CP_SWITCH_WD_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_LB_ADDR_BASE_LO */
+#define DCORE0_EDMA0_QM_ARC_LB_ADDR_BASE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_LB_ADDR_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_LB_ADDR_BASE_HI */
+#define DCORE0_EDMA0_QM_ARC_LB_ADDR_BASE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_LB_ADDR_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ENGINE_BASE_ADDR_HI */
+#define DCORE0_EDMA0_QM_ENGINE_BASE_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ENGINE_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ENGINE_BASE_ADDR_LO */
+#define DCORE0_EDMA0_QM_ENGINE_BASE_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ENGINE_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ENGINE_ADDR_RANGE_SIZE */
+#define DCORE0_EDMA0_QM_ENGINE_ADDR_RANGE_SIZE_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_ENGINE_ADDR_RANGE_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_QM_ARC_AUX_BASE_ADDR_HI */
+#define DCORE0_EDMA0_QM_QM_ARC_AUX_BASE_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_QM_ARC_AUX_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_QM_ARC_AUX_BASE_ADDR_LO */
+#define DCORE0_EDMA0_QM_QM_ARC_AUX_BASE_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_QM_ARC_AUX_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_QM_BASE_ADDR_HI */
+#define DCORE0_EDMA0_QM_QM_BASE_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_QM_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_QM_BASE_ADDR_LO */
+#define DCORE0_EDMA0_QM_QM_BASE_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_QM_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_ARC_PQC_SECURE_PUSH_IND */
+#define DCORE0_EDMA0_QM_ARC_PQC_SECURE_PUSH_IND_CP_NUM_SHIFT 0
+#define DCORE0_EDMA0_QM_ARC_PQC_SECURE_PUSH_IND_CP_NUM_MASK 0x3
+
+/* DCORE0_EDMA0_QM_PQC_STS_0 */
+#define DCORE0_EDMA0_QM_PQC_STS_0_COMP_DATA_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_STS_0_COMP_DATA_MASK 0xFFFF
+#define DCORE0_EDMA0_QM_PQC_STS_0_COMP_OFST_SHIFT 16
+#define DCORE0_EDMA0_QM_PQC_STS_0_COMP_OFST_MASK 0xFFFF0000
+
+/* DCORE0_EDMA0_QM_PQC_STS_1 */
+#define DCORE0_EDMA0_QM_PQC_STS_1_COMP_FIFO_CNTR_SHIFT 0
+#define DCORE0_EDMA0_QM_PQC_STS_1_COMP_FIFO_CNTR_MASK 0xF
+#define DCORE0_EDMA0_QM_PQC_STS_1_COMP_FIFO_EMPTY_SHIFT 4
+#define DCORE0_EDMA0_QM_PQC_STS_1_COMP_FIFO_EMPTY_MASK 0x10
+#define DCORE0_EDMA0_QM_PQC_STS_1_COMP_FIFO_FULL_SHIFT 5
+#define DCORE0_EDMA0_QM_PQC_STS_1_COMP_FIFO_FULL_MASK 0x20
+
+/* DCORE0_EDMA0_QM_SEI_STATUS */
+#define DCORE0_EDMA0_QM_SEI_STATUS_QM_INT_SHIFT 0
+#define DCORE0_EDMA0_QM_SEI_STATUS_QM_INT_MASK 0x1
+#define DCORE0_EDMA0_QM_SEI_STATUS_ARC_INT_SHIFT 1
+#define DCORE0_EDMA0_QM_SEI_STATUS_ARC_INT_MASK 0x2
+
+/* DCORE0_EDMA0_QM_SEI_MASK */
+#define DCORE0_EDMA0_QM_SEI_MASK_QM_INT_SHIFT 0
+#define DCORE0_EDMA0_QM_SEI_MASK_QM_INT_MASK 0x1
+#define DCORE0_EDMA0_QM_SEI_MASK_ARC_INT_SHIFT 1
+#define DCORE0_EDMA0_QM_SEI_MASK_ARC_INT_MASK 0x2
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_ADDR_LO */
+#define DCORE0_EDMA0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_ADDR_HI */
+#define DCORE0_EDMA0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_GLBL_ERR_WDATA */
+#define DCORE0_EDMA0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_L2H_MASK_LO */
+#define DCORE0_EDMA0_QM_L2H_MASK_LO_VAL_SHIFT 20
+#define DCORE0_EDMA0_QM_L2H_MASK_LO_VAL_MASK 0xFFF00000
+
+/* DCORE0_EDMA0_QM_L2H_MASK_HI */
+#define DCORE0_EDMA0_QM_L2H_MASK_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_L2H_MASK_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_L2H_CMPR_LO */
+#define DCORE0_EDMA0_QM_L2H_CMPR_LO_VAL_SHIFT 20
+#define DCORE0_EDMA0_QM_L2H_CMPR_LO_VAL_MASK 0xFFF00000
+
+/* DCORE0_EDMA0_QM_L2H_CMPR_HI */
+#define DCORE0_EDMA0_QM_L2H_CMPR_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_L2H_CMPR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_LOCAL_RANGE_BASE */
+#define DCORE0_EDMA0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_LOCAL_RANGE_SIZE */
+#define DCORE0_EDMA0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF
+
+/* DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_1 */
+#define DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_0 */
+#define DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_1 */
+#define DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_0 */
+#define DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DCORE0_EDMA0_QM_IND_GW_APB_CFG */
+#define DCORE0_EDMA0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0
+#define DCORE0_EDMA0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF
+#define DCORE0_EDMA0_QM_IND_GW_APB_CFG_CMD_SHIFT 31
+#define DCORE0_EDMA0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000
+
+/* DCORE0_EDMA0_QM_IND_GW_APB_WDATA */
+#define DCORE0_EDMA0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_IND_GW_APB_RDATA */
+#define DCORE0_EDMA0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_IND_GW_APB_STATUS */
+#define DCORE0_EDMA0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0
+#define DCORE0_EDMA0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1
+#define DCORE0_EDMA0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1
+#define DCORE0_EDMA0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2
+
+/* DCORE0_EDMA0_QM_PERF_CNT_FREE_LO */
+#define DCORE0_EDMA0_QM_PERF_CNT_FREE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PERF_CNT_FREE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PERF_CNT_FREE_HI */
+#define DCORE0_EDMA0_QM_PERF_CNT_FREE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PERF_CNT_FREE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PERF_CNT_IDLE_LO */
+#define DCORE0_EDMA0_QM_PERF_CNT_IDLE_LO_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PERF_CNT_IDLE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PERF_CNT_IDLE_HI */
+#define DCORE0_EDMA0_QM_PERF_CNT_IDLE_HI_VAL_SHIFT 0
+#define DCORE0_EDMA0_QM_PERF_CNT_IDLE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_EDMA0_QM_PERF_CNT_CFG */
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_PQ_MASK_SHIFT 0
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_PQ_MASK_MASK 0xF
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_CQ_MASK_SHIFT 8
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_CQ_MASK_MASK 0x1F00
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_CP_MASK_SHIFT 16
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_CP_MASK_MASK 0x1F0000
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_AGENT_MASK_SHIFT 24
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_AGENT_MASK_MASK 0x1000000
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_EN_FREE_SHIFT 30
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_EN_FREE_MASK 0x40000000
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_EN_IDLE_SHIFT 31
+#define DCORE0_EDMA0_QM_PERF_CNT_CFG_EN_IDLE_MASK 0x80000000
+
+#endif /* ASIC_REG_DCORE0_EDMA0_QM_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h
new file mode 100644
index 000000000000..32d475b9ed11
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA0_QM_REGS_H_
+#define ASIC_REG_DCORE0_EDMA0_QM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA0_QM
+ * (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDCORE0_EDMA0_QM_GLBL_CFG0 0x41CA000
+
+#define mmDCORE0_EDMA0_QM_GLBL_CFG1 0x41CA004
+
+#define mmDCORE0_EDMA0_QM_GLBL_CFG2 0x41CA008
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_CFG 0x41CA00C
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_CFG1 0x41CA010
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_ARC_HALT_EN 0x41CA014
+
+#define mmDCORE0_EDMA0_QM_GLBL_AXCACHE 0x41CA018
+
+#define mmDCORE0_EDMA0_QM_GLBL_STS0 0x41CA01C
+
+#define mmDCORE0_EDMA0_QM_GLBL_STS1 0x41CA020
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_STS_0 0x41CA024
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_STS_1 0x41CA028
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_STS_2 0x41CA02C
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_STS_3 0x41CA030
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_STS_4 0x41CA034
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_0 0x41CA038
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_1 0x41CA03C
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_2 0x41CA040
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_3 0x41CA044
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_MSG_EN_4 0x41CA048
+
+#define mmDCORE0_EDMA0_QM_GLBL_PROT 0x41CA04C
+
+#define mmDCORE0_EDMA0_QM_PQ_BASE_LO_0 0x41CA050
+
+#define mmDCORE0_EDMA0_QM_PQ_BASE_LO_1 0x41CA054
+
+#define mmDCORE0_EDMA0_QM_PQ_BASE_LO_2 0x41CA058
+
+#define mmDCORE0_EDMA0_QM_PQ_BASE_LO_3 0x41CA05C
+
+#define mmDCORE0_EDMA0_QM_PQ_BASE_HI_0 0x41CA060
+
+#define mmDCORE0_EDMA0_QM_PQ_BASE_HI_1 0x41CA064
+
+#define mmDCORE0_EDMA0_QM_PQ_BASE_HI_2 0x41CA068
+
+#define mmDCORE0_EDMA0_QM_PQ_BASE_HI_3 0x41CA06C
+
+#define mmDCORE0_EDMA0_QM_PQ_SIZE_0 0x41CA070
+
+#define mmDCORE0_EDMA0_QM_PQ_SIZE_1 0x41CA074
+
+#define mmDCORE0_EDMA0_QM_PQ_SIZE_2 0x41CA078
+
+#define mmDCORE0_EDMA0_QM_PQ_SIZE_3 0x41CA07C
+
+#define mmDCORE0_EDMA0_QM_PQ_PI_0 0x41CA080
+
+#define mmDCORE0_EDMA0_QM_PQ_PI_1 0x41CA084
+
+#define mmDCORE0_EDMA0_QM_PQ_PI_2 0x41CA088
+
+#define mmDCORE0_EDMA0_QM_PQ_PI_3 0x41CA08C
+
+#define mmDCORE0_EDMA0_QM_PQ_CI_0 0x41CA090
+
+#define mmDCORE0_EDMA0_QM_PQ_CI_1 0x41CA094
+
+#define mmDCORE0_EDMA0_QM_PQ_CI_2 0x41CA098
+
+#define mmDCORE0_EDMA0_QM_PQ_CI_3 0x41CA09C
+
+#define mmDCORE0_EDMA0_QM_PQ_CFG0_0 0x41CA0A0
+
+#define mmDCORE0_EDMA0_QM_PQ_CFG0_1 0x41CA0A4
+
+#define mmDCORE0_EDMA0_QM_PQ_CFG0_2 0x41CA0A8
+
+#define mmDCORE0_EDMA0_QM_PQ_CFG0_3 0x41CA0AC
+
+#define mmDCORE0_EDMA0_QM_PQ_CFG1_0 0x41CA0B0
+
+#define mmDCORE0_EDMA0_QM_PQ_CFG1_1 0x41CA0B4
+
+#define mmDCORE0_EDMA0_QM_PQ_CFG1_2 0x41CA0B8
+
+#define mmDCORE0_EDMA0_QM_PQ_CFG1_3 0x41CA0BC
+
+#define mmDCORE0_EDMA0_QM_PQ_STS0_0 0x41CA0C0
+
+#define mmDCORE0_EDMA0_QM_PQ_STS0_1 0x41CA0C4
+
+#define mmDCORE0_EDMA0_QM_PQ_STS0_2 0x41CA0C8
+
+#define mmDCORE0_EDMA0_QM_PQ_STS0_3 0x41CA0CC
+
+#define mmDCORE0_EDMA0_QM_PQ_STS1_0 0x41CA0D0
+
+#define mmDCORE0_EDMA0_QM_PQ_STS1_1 0x41CA0D4
+
+#define mmDCORE0_EDMA0_QM_PQ_STS1_2 0x41CA0D8
+
+#define mmDCORE0_EDMA0_QM_PQ_STS1_3 0x41CA0DC
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG0_0 0x41CA0E0
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG0_1 0x41CA0E4
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG0_2 0x41CA0E8
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG0_3 0x41CA0EC
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG0_4 0x41CA0F0
+
+#define mmDCORE0_EDMA0_QM_CQ_STS0_0 0x41CA0F4
+
+#define mmDCORE0_EDMA0_QM_CQ_STS0_1 0x41CA0F8
+
+#define mmDCORE0_EDMA0_QM_CQ_STS0_2 0x41CA0FC
+
+#define mmDCORE0_EDMA0_QM_CQ_STS0_3 0x41CA100
+
+#define mmDCORE0_EDMA0_QM_CQ_STS0_4 0x41CA104
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG1_0 0x41CA108
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG1_1 0x41CA10C
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG1_2 0x41CA110
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG1_3 0x41CA114
+
+#define mmDCORE0_EDMA0_QM_CQ_CFG1_4 0x41CA118
+
+#define mmDCORE0_EDMA0_QM_CQ_STS1_0 0x41CA11C
+
+#define mmDCORE0_EDMA0_QM_CQ_STS1_1 0x41CA120
+
+#define mmDCORE0_EDMA0_QM_CQ_STS1_2 0x41CA124
+
+#define mmDCORE0_EDMA0_QM_CQ_STS1_3 0x41CA128
+
+#define mmDCORE0_EDMA0_QM_CQ_STS1_4 0x41CA12C
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_0 0x41CA150
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_0 0x41CA154
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_0 0x41CA158
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_0 0x41CA15C
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_1 0x41CA160
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_1 0x41CA164
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_1 0x41CA168
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_1 0x41CA16C
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_2 0x41CA170
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_2 0x41CA174
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_2 0x41CA178
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_2 0x41CA17C
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_3 0x41CA180
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_3 0x41CA184
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_3 0x41CA188
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_3 0x41CA18C
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_4 0x41CA190
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_4 0x41CA194
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_4 0x41CA198
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_4 0x41CA19C
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_STS_0 0x41CA1A0
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_STS_1 0x41CA1A4
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_STS_2 0x41CA1A8
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_STS_3 0x41CA1AC
+
+#define mmDCORE0_EDMA0_QM_CQ_TSIZE_STS_4 0x41CA1B0
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_STS_0 0x41CA1B4
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_STS_1 0x41CA1B8
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_STS_2 0x41CA1BC
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_STS_3 0x41CA1C0
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_LO_STS_4 0x41CA1C4
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_STS_0 0x41CA1C8
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_STS_1 0x41CA1CC
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_STS_2 0x41CA1D0
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_STS_3 0x41CA1D4
+
+#define mmDCORE0_EDMA0_QM_CQ_PTR_HI_STS_4 0x41CA1D8
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_STS_0 0x41CA1DC
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_STS_1 0x41CA1E0
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_STS_2 0x41CA1E4
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_STS_3 0x41CA1E8
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_STS_4 0x41CA1EC
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 0x41CA1F0
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 0x41CA1F4
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 0x41CA1F8
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 0x41CA1FC
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 0x41CA200
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 0x41CA204
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 0x41CA208
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 0x41CA20C
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 0x41CA210
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 0x41CA214
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 0x41CA218
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 0x41CA21C
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 0x41CA220
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 0x41CA224
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 0x41CA228
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 0x41CA22C
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 0x41CA230
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 0x41CA234
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 0x41CA238
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 0x41CA23C
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 0x41CA240
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 0x41CA244
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 0x41CA248
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 0x41CA24C
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 0x41CA250
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 0x41CA254
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 0x41CA258
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 0x41CA25C
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 0x41CA260
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 0x41CA264
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 0x41CA268
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 0x41CA26C
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 0x41CA270
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 0x41CA274
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 0x41CA278
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 0x41CA27C
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 0x41CA280
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 0x41CA284
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 0x41CA288
+
+#define mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 0x41CA28C
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_0 0x41CA290
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_1 0x41CA294
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_2 0x41CA298
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_3 0x41CA29C
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_4 0x41CA2A0
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_0 0x41CA2A4
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_1 0x41CA2A8
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_2 0x41CA2AC
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_3 0x41CA2B0
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_4 0x41CA2B4
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_0 0x41CA2B8
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_1 0x41CA2BC
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_2 0x41CA2C0
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_3 0x41CA2C4
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_4 0x41CA2C8
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_0 0x41CA2CC
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_1 0x41CA2D0
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_2 0x41CA2D4
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_3 0x41CA2D8
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_4 0x41CA2DC
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_0 0x41CA2E0
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_1 0x41CA2E4
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_2 0x41CA2E8
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_3 0x41CA2EC
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_4 0x41CA2F0
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_0 0x41CA2F4
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_1 0x41CA2F8
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_2 0x41CA2FC
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_3 0x41CA300
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_4 0x41CA304
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_0 0x41CA308
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_1 0x41CA30C
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_2 0x41CA310
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_3 0x41CA314
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_4 0x41CA318
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_0 0x41CA31C
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_1 0x41CA320
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_2 0x41CA324
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_3 0x41CA328
+
+#define mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_4 0x41CA32C
+
+#define mmDCORE0_EDMA0_QM_CP_BARRIER_CFG 0x41CA330
+
+#define mmDCORE0_EDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0x41CA334
+
+#define mmDCORE0_EDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET 0x41CA338
+
+#define mmDCORE0_EDMA0_QM_CP_LDMA_TSIZE_OFFSET 0x41CA33C
+
+#define mmDCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_0 0x41CA340
+
+#define mmDCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_1 0x41CA344
+
+#define mmDCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_2 0x41CA348
+
+#define mmDCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_3 0x41CA34C
+
+#define mmDCORE0_EDMA0_QM_CP_CQ_PTR_LO_OFFSET_4 0x41CA350
+
+#define mmDCORE0_EDMA0_QM_CP_STS_0 0x41CA368
+
+#define mmDCORE0_EDMA0_QM_CP_STS_1 0x41CA36C
+
+#define mmDCORE0_EDMA0_QM_CP_STS_2 0x41CA370
+
+#define mmDCORE0_EDMA0_QM_CP_STS_3 0x41CA374
+
+#define mmDCORE0_EDMA0_QM_CP_STS_4 0x41CA378
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_LO_0 0x41CA37C
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_LO_1 0x41CA380
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_LO_2 0x41CA384
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_LO_3 0x41CA388
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_LO_4 0x41CA38C
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_HI_0 0x41CA390
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_HI_1 0x41CA394
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_HI_2 0x41CA398
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_HI_3 0x41CA39C
+
+#define mmDCORE0_EDMA0_QM_CP_CURRENT_INST_HI_4 0x41CA3A0
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_0 0x41CA3A4
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_1 0x41CA3A8
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_2 0x41CA3AC
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_3 0x41CA3B0
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_4 0x41CA3B4
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_UPEN_0 0x41CA3B8
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_UPEN_1 0x41CA3BC
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_UPEN_2 0x41CA3C0
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_UPEN_3 0x41CA3C4
+
+#define mmDCORE0_EDMA0_QM_CP_PRED_UPEN_4 0x41CA3C8
+
+#define mmDCORE0_EDMA0_QM_CP_DBG_0_0 0x41CA3CC
+
+#define mmDCORE0_EDMA0_QM_CP_DBG_0_1 0x41CA3D0
+
+#define mmDCORE0_EDMA0_QM_CP_DBG_0_2 0x41CA3D4
+
+#define mmDCORE0_EDMA0_QM_CP_DBG_0_3 0x41CA3D8
+
+#define mmDCORE0_EDMA0_QM_CP_DBG_0_4 0x41CA3DC
+
+#define mmDCORE0_EDMA0_QM_CP_CPDMA_UP_CRED_0 0x41CA3E0
+
+#define mmDCORE0_EDMA0_QM_CP_CPDMA_UP_CRED_1 0x41CA3E4
+
+#define mmDCORE0_EDMA0_QM_CP_CPDMA_UP_CRED_2 0x41CA3E8
+
+#define mmDCORE0_EDMA0_QM_CP_CPDMA_UP_CRED_3 0x41CA3EC
+
+#define mmDCORE0_EDMA0_QM_CP_CPDMA_UP_CRED_4 0x41CA3F0
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_LO_0 0x41CA3F4
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_LO_1 0x41CA3F8
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_LO_2 0x41CA3FC
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_LO_3 0x41CA400
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_LO_4 0x41CA404
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_HI_0 0x41CA408
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_HI_1 0x41CA40C
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_HI_2 0x41CA410
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_HI_3 0x41CA414
+
+#define mmDCORE0_EDMA0_QM_CP_IN_DATA_HI_4 0x41CA418
+
+#define mmDCORE0_EDMA0_QM_PQC_HBW_BASE_LO_0 0x41CA41C
+
+#define mmDCORE0_EDMA0_QM_PQC_HBW_BASE_LO_1 0x41CA420
+
+#define mmDCORE0_EDMA0_QM_PQC_HBW_BASE_LO_2 0x41CA424
+
+#define mmDCORE0_EDMA0_QM_PQC_HBW_BASE_LO_3 0x41CA428
+
+#define mmDCORE0_EDMA0_QM_PQC_HBW_BASE_HI_0 0x41CA42C
+
+#define mmDCORE0_EDMA0_QM_PQC_HBW_BASE_HI_1 0x41CA430
+
+#define mmDCORE0_EDMA0_QM_PQC_HBW_BASE_HI_2 0x41CA434
+
+#define mmDCORE0_EDMA0_QM_PQC_HBW_BASE_HI_3 0x41CA438
+
+#define mmDCORE0_EDMA0_QM_PQC_SIZE_0 0x41CA43C
+
+#define mmDCORE0_EDMA0_QM_PQC_SIZE_1 0x41CA440
+
+#define mmDCORE0_EDMA0_QM_PQC_SIZE_2 0x41CA444
+
+#define mmDCORE0_EDMA0_QM_PQC_SIZE_3 0x41CA448
+
+#define mmDCORE0_EDMA0_QM_PQC_PI_0 0x41CA44C
+
+#define mmDCORE0_EDMA0_QM_PQC_PI_1 0x41CA450
+
+#define mmDCORE0_EDMA0_QM_PQC_PI_2 0x41CA454
+
+#define mmDCORE0_EDMA0_QM_PQC_PI_3 0x41CA458
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_WDATA_0 0x41CA45C
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_WDATA_1 0x41CA460
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_WDATA_2 0x41CA464
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_WDATA_3 0x41CA468
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_BASE_LO_0 0x41CA46C
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_BASE_LO_1 0x41CA470
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_BASE_LO_2 0x41CA474
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_BASE_LO_3 0x41CA478
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_BASE_HI_0 0x41CA47C
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_BASE_HI_1 0x41CA480
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_BASE_HI_2 0x41CA484
+
+#define mmDCORE0_EDMA0_QM_PQC_LBW_BASE_HI_3 0x41CA488
+
+#define mmDCORE0_EDMA0_QM_PQC_CFG 0x41CA48C
+
+#define mmDCORE0_EDMA0_QM_PQC_SECURE_PUSH_IND 0x41CA490
+
+#define mmDCORE0_EDMA0_QM_ARB_MASK 0x41CA4A0
+
+#define mmDCORE0_EDMA0_QM_ARB_CFG_0 0x41CA4A4
+
+#define mmDCORE0_EDMA0_QM_ARB_CHOICE_Q_PUSH 0x41CA4A8
+
+#define mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_0 0x41CA4AC
+
+#define mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_1 0x41CA4B0
+
+#define mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_2 0x41CA4B4
+
+#define mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_3 0x41CA4B8
+
+#define mmDCORE0_EDMA0_QM_ARB_CFG_1 0x41CA4BC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_0 0x41CA4C0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_1 0x41CA4C4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_2 0x41CA4C8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_3 0x41CA4CC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_4 0x41CA4D0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_5 0x41CA4D4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_6 0x41CA4D8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_7 0x41CA4DC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_8 0x41CA4E0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_9 0x41CA4E4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_10 0x41CA4E8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_11 0x41CA4EC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_12 0x41CA4F0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_13 0x41CA4F4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_14 0x41CA4F8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_15 0x41CA4FC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_16 0x41CA500
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_17 0x41CA504
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_18 0x41CA508
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_19 0x41CA50C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_20 0x41CA510
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_21 0x41CA514
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_22 0x41CA518
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_23 0x41CA51C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_24 0x41CA520
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_25 0x41CA524
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_26 0x41CA528
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_27 0x41CA52C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_28 0x41CA530
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_29 0x41CA534
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_30 0x41CA538
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_31 0x41CA53C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_32 0x41CA540
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_33 0x41CA544
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_34 0x41CA548
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_35 0x41CA54C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_36 0x41CA550
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_37 0x41CA554
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_38 0x41CA558
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_39 0x41CA55C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_40 0x41CA560
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_41 0x41CA564
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_42 0x41CA568
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_43 0x41CA56C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_44 0x41CA570
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_45 0x41CA574
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_46 0x41CA578
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_47 0x41CA57C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_48 0x41CA580
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_49 0x41CA584
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_50 0x41CA588
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_51 0x41CA58C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_52 0x41CA590
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_53 0x41CA594
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_54 0x41CA598
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_55 0x41CA59C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_56 0x41CA5A0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_57 0x41CA5A4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_58 0x41CA5A8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_59 0x41CA5AC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_60 0x41CA5B0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_61 0x41CA5B4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_62 0x41CA5B8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_AVAIL_CRED_63 0x41CA5BC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CRED_INC 0x41CA5E0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_0 0x41CA5E4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_1 0x41CA5E8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_2 0x41CA5EC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_3 0x41CA5F0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_4 0x41CA5F4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_5 0x41CA5F8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_6 0x41CA5FC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_7 0x41CA600
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_8 0x41CA604
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_9 0x41CA608
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_10 0x41CA60C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_11 0x41CA610
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_12 0x41CA614
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_13 0x41CA618
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_14 0x41CA61C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_15 0x41CA620
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_16 0x41CA624
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_17 0x41CA628
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_18 0x41CA62C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_19 0x41CA630
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_20 0x41CA634
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_21 0x41CA638
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_22 0x41CA63C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_23 0x41CA640
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_24 0x41CA644
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_25 0x41CA648
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_26 0x41CA64C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_27 0x41CA650
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_28 0x41CA654
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_29 0x41CA658
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_30 0x41CA65C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_31 0x41CA660
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_32 0x41CA664
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_33 0x41CA668
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_34 0x41CA66C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_35 0x41CA670
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_36 0x41CA674
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_37 0x41CA678
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_38 0x41CA67C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_39 0x41CA680
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_40 0x41CA684
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_41 0x41CA688
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_42 0x41CA68C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_43 0x41CA690
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_44 0x41CA694
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_45 0x41CA698
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_46 0x41CA69C
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_47 0x41CA6A0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_48 0x41CA6A4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_49 0x41CA6A8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_50 0x41CA6AC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_51 0x41CA6B0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_52 0x41CA6B4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_53 0x41CA6B8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_54 0x41CA6BC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_55 0x41CA6C0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_56 0x41CA6C4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_57 0x41CA6C8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_58 0x41CA6CC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_59 0x41CA6D0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_60 0x41CA6D4
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_61 0x41CA6D8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_62 0x41CA6DC
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_63 0x41CA6E0
+
+#define mmDCORE0_EDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x41CA704
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_SLAVE_EN 0x41CA708
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_SLAVE_EN_1 0x41CA70C
+
+#define mmDCORE0_EDMA0_QM_ARB_SLV_CHOICE_WDT 0x41CA710
+
+#define mmDCORE0_EDMA0_QM_ARB_SLV_ID 0x41CA714
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_QUIET_PER 0x41CA718
+
+#define mmDCORE0_EDMA0_QM_ARB_MSG_MAX_INFLIGHT 0x41CA744
+
+#define mmDCORE0_EDMA0_QM_ARB_BASE_LO 0x41CA754
+
+#define mmDCORE0_EDMA0_QM_ARB_BASE_HI 0x41CA758
+
+#define mmDCORE0_EDMA0_QM_ARB_STATE_STS 0x41CA780
+
+#define mmDCORE0_EDMA0_QM_ARB_CHOICE_FULLNESS_STS 0x41CA784
+
+#define mmDCORE0_EDMA0_QM_ARB_MSG_STS 0x41CA788
+
+#define mmDCORE0_EDMA0_QM_ARB_SLV_CHOICE_Q_HEAD 0x41CA78C
+
+#define mmDCORE0_EDMA0_QM_ARB_ERR_CAUSE 0x41CA79C
+
+#define mmDCORE0_EDMA0_QM_ARB_ERR_MSG_EN 0x41CA7A0
+
+#define mmDCORE0_EDMA0_QM_ARB_ERR_STS_DRP 0x41CA7A8
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CRED_STS 0x41CA7B0
+
+#define mmDCORE0_EDMA0_QM_ARB_MST_CRED_STS_1 0x41CA7B4
+
+#define mmDCORE0_EDMA0_QM_CSMR_STRICT_PRIO_CFG 0x41CA7FC
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_CFG0 0x41CA800
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_CFG1 0x41CA804
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_PTR_LO 0x41CA808
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_PTR_HI 0x41CA80C
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_TSIZE 0x41CA810
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_CTL 0x41CA814
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_IFIFO_STS 0x41CA81C
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_STS0 0x41CA820
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_STS1 0x41CA824
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_TSIZE_STS 0x41CA828
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_PTR_LO_STS 0x41CA82C
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_PTR_HI_STS 0x41CA830
+
+#define mmDCORE0_EDMA0_QM_CP_WR_ARC_ADDR_HI 0x41CA834
+
+#define mmDCORE0_EDMA0_QM_CP_WR_ARC_ADDR_LO 0x41CA838
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_HI 0x41CA83C
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO 0x41CA840
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_HI 0x41CA844
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO 0x41CA848
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_HI 0x41CA84C
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_LO 0x41CA850
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_HI 0x41CA854
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_LO 0x41CA858
+
+#define mmDCORE0_EDMA0_QM_ADDR_OVRD 0x41CA85C
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_0 0x41CA860
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_1 0x41CA864
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_2 0x41CA868
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_3 0x41CA86C
+
+#define mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_4 0x41CA870
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_IFIFO_CI 0x41CA874
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_CI_0 0x41CA878
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_CI_1 0x41CA87C
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_CI_2 0x41CA880
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_CI_3 0x41CA884
+
+#define mmDCORE0_EDMA0_QM_CQ_CTL_CI_4 0x41CA888
+
+#define mmDCORE0_EDMA0_QM_ARC_CQ_CTL_CI 0x41CA88C
+
+#define mmDCORE0_EDMA0_QM_CP_CFG 0x41CA890
+
+#define mmDCORE0_EDMA0_QM_CP_EXT_SWITCH 0x41CA894
+
+#define mmDCORE0_EDMA0_QM_CP_SWITCH_WD_SET 0x41CA898
+
+#define mmDCORE0_EDMA0_QM_CP_SWITCH_WD 0x41CA89C
+
+#define mmDCORE0_EDMA0_QM_ARC_LB_ADDR_BASE_LO 0x41CA8A4
+
+#define mmDCORE0_EDMA0_QM_ARC_LB_ADDR_BASE_HI 0x41CA8A8
+
+#define mmDCORE0_EDMA0_QM_ENGINE_BASE_ADDR_HI 0x41CA8AC
+
+#define mmDCORE0_EDMA0_QM_ENGINE_BASE_ADDR_LO 0x41CA8B0
+
+#define mmDCORE0_EDMA0_QM_ENGINE_ADDR_RANGE_SIZE 0x41CA8B4
+
+#define mmDCORE0_EDMA0_QM_QM_ARC_AUX_BASE_ADDR_HI 0x41CA8B8
+
+#define mmDCORE0_EDMA0_QM_QM_ARC_AUX_BASE_ADDR_LO 0x41CA8BC
+
+#define mmDCORE0_EDMA0_QM_QM_BASE_ADDR_HI 0x41CA8C0
+
+#define mmDCORE0_EDMA0_QM_QM_BASE_ADDR_LO 0x41CA8C4
+
+#define mmDCORE0_EDMA0_QM_ARC_PQC_SECURE_PUSH_IND 0x41CA8C8
+
+#define mmDCORE0_EDMA0_QM_PQC_STS_0_0 0x41CA8D0
+
+#define mmDCORE0_EDMA0_QM_PQC_STS_0_1 0x41CA8D4
+
+#define mmDCORE0_EDMA0_QM_PQC_STS_0_2 0x41CA8D8
+
+#define mmDCORE0_EDMA0_QM_PQC_STS_0_3 0x41CA8DC
+
+#define mmDCORE0_EDMA0_QM_PQC_STS_1_0 0x41CA8E0
+
+#define mmDCORE0_EDMA0_QM_PQC_STS_1_1 0x41CA8E4
+
+#define mmDCORE0_EDMA0_QM_PQC_STS_1_2 0x41CA8E8
+
+#define mmDCORE0_EDMA0_QM_PQC_STS_1_3 0x41CA8EC
+
+#define mmDCORE0_EDMA0_QM_SEI_STATUS 0x41CA8F0
+
+#define mmDCORE0_EDMA0_QM_SEI_MASK 0x41CA8F4
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_ADDR_LO 0x41CAD00
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_ADDR_HI 0x41CAD04
+
+#define mmDCORE0_EDMA0_QM_GLBL_ERR_WDATA 0x41CAD08
+
+#define mmDCORE0_EDMA0_QM_L2H_MASK_LO 0x41CAD14
+
+#define mmDCORE0_EDMA0_QM_L2H_MASK_HI 0x41CAD18
+
+#define mmDCORE0_EDMA0_QM_L2H_CMPR_LO 0x41CAD1C
+
+#define mmDCORE0_EDMA0_QM_L2H_CMPR_HI 0x41CAD20
+
+#define mmDCORE0_EDMA0_QM_LOCAL_RANGE_BASE 0x41CAD24
+
+#define mmDCORE0_EDMA0_QM_LOCAL_RANGE_SIZE 0x41CAD28
+
+#define mmDCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_1 0x41CAD30
+
+#define mmDCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_0 0x41CAD34
+
+#define mmDCORE0_EDMA0_QM_LBW_WR_RATE_LIM_CFG_1 0x41CAD38
+
+#define mmDCORE0_EDMA0_QM_HBW_RD_RATE_LIM_CFG_0 0x41CAD3C
+
+#define mmDCORE0_EDMA0_QM_IND_GW_APB_CFG 0x41CAD40
+
+#define mmDCORE0_EDMA0_QM_IND_GW_APB_WDATA 0x41CAD44
+
+#define mmDCORE0_EDMA0_QM_IND_GW_APB_RDATA 0x41CAD48
+
+#define mmDCORE0_EDMA0_QM_IND_GW_APB_STATUS 0x41CAD4C
+
+#define mmDCORE0_EDMA0_QM_PERF_CNT_FREE_LO 0x41CAD60
+
+#define mmDCORE0_EDMA0_QM_PERF_CNT_FREE_HI 0x41CAD64
+
+#define mmDCORE0_EDMA0_QM_PERF_CNT_IDLE_LO 0x41CAD68
+
+#define mmDCORE0_EDMA0_QM_PERF_CNT_IDLE_HI 0x41CAD6C
+
+#define mmDCORE0_EDMA0_QM_PERF_CNT_CFG 0x41CAD70
+
+#endif /* ASIC_REG_DCORE0_EDMA0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h
new file mode 100644
index 000000000000..b608a634562f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA1_CORE_CTX_AXUSER_REGS_H_
+#define ASIC_REG_DCORE0_EDMA1_CORE_CTX_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA1_CORE_CTX_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_ASID 0x41DB800
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_MMU_BP 0x41DB804
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_STRONG_ORDER 0x41DB808
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_NO_SNOOP 0x41DB80C
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_WR_REDUCTION 0x41DB810
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_RD_ATOMIC 0x41DB814
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_QOS 0x41DB818
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_RSVD 0x41DB81C
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_EMEM_CPAGE 0x41DB820
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_CORE 0x41DB824
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_E2E_COORD 0x41DB828
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_WR_OVRD_LO 0x41DB830
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_WR_OVRD_HI 0x41DB834
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_RD_OVRD_LO 0x41DB838
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_RD_OVRD_HI 0x41DB83C
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_LB_COORD 0x41DB840
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_LB_LOCK 0x41DB844
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_LB_RSVD 0x41DB848
+
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_LB_OVRD 0x41DB84C
+
+#endif /* ASIC_REG_DCORE0_EDMA1_CORE_CTX_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h
new file mode 100644
index 000000000000..c3a462f2a9ac
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_EDMA1_QM_AXUSER_NONSECURED_REGS_H_
+#define ASIC_REG_DCORE0_EDMA1_QM_AXUSER_NONSECURED_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_EDMA1_QM_AXUSER_NONSECURED
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_ASID 0x41DAB80
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP 0x41DAB84
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_STRONG_ORDER 0x41DAB88
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_NO_SNOOP 0x41DAB8C
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_WR_REDUCTION 0x41DAB90
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_RD_ATOMIC 0x41DAB94
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_QOS 0x41DAB98
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_RSVD 0x41DAB9C
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_EMEM_CPAGE 0x41DABA0
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_CORE 0x41DABA4
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_E2E_COORD 0x41DABA8
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_WR_OVRD_LO 0x41DABB0
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_WR_OVRD_HI 0x41DABB4
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_RD_OVRD_LO 0x41DABB8
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_RD_OVRD_HI 0x41DABBC
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_LB_COORD 0x41DABC0
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_LB_LOCK 0x41DABC4
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_LB_RSVD 0x41DABC8
+
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_LB_OVRD 0x41DABCC
+
+#endif /* ASIC_REG_DCORE0_EDMA1_QM_AXUSER_NONSECURED_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h
new file mode 100644
index 000000000000..df51eac10dd7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_HMMU0_MMU_MASKS_H_
+#define ASIC_REG_DCORE0_HMMU0_MMU_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_HMMU0_MMU
+ * (Prototype: MMU)
+ *****************************************
+ */
+
+/* DCORE0_HMMU0_MMU_MMU_ENABLE */
+#define DCORE0_HMMU0_MMU_MMU_ENABLE_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_ENABLE_R_MASK 0x1
+
+/* DCORE0_HMMU0_MMU_FORCE_ORDERING */
+#define DCORE0_HMMU0_MMU_FORCE_ORDERING_WEAK_ORDERING_SHIFT 0
+#define DCORE0_HMMU0_MMU_FORCE_ORDERING_WEAK_ORDERING_MASK 0x1
+#define DCORE0_HMMU0_MMU_FORCE_ORDERING_STRONG_ORDERING_SHIFT 1
+#define DCORE0_HMMU0_MMU_FORCE_ORDERING_STRONG_ORDERING_MASK 0x2
+
+/* DCORE0_HMMU0_MMU_FEATURE_ENABLE */
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_VA_ORDERING_EN_SHIFT 0
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_VA_ORDERING_EN_MASK 0x1
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_SHIFT 1
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_MASK 0x2
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_HOP_OFFSET_EN_SHIFT 2
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_HOP_OFFSET_EN_MASK 0x4
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_OBI_ORDERING_EN_SHIFT 3
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_OBI_ORDERING_EN_MASK 0x8
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_SHIFT 4
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_MASK 0x10
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_TRACE_ENABLE_SHIFT 5
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_TRACE_ENABLE_MASK 0x20
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_TRACE_EV_MMU_OR_STLB_SHIFT 6
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_TRACE_EV_MMU_OR_STLB_MASK 0x40
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_TRACE_CLKH_EQUAL_CLKL_SHIFT 7
+#define DCORE0_HMMU0_MMU_FEATURE_ENABLE_TRACE_CLKH_EQUAL_CLKL_MASK 0x80
+
+/* DCORE0_HMMU0_MMU_VA_ORDERING_MASK_38_7 */
+#define DCORE0_HMMU0_MMU_VA_ORDERING_MASK_38_7_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_VA_ORDERING_MASK_38_7_R_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_VA_ORDERING_MASK_64_39 */
+#define DCORE0_HMMU0_MMU_VA_ORDERING_MASK_64_39_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_VA_ORDERING_MASK_64_39_R_MASK 0x3FFFFFF
+
+/* DCORE0_HMMU0_MMU_LOG2_DDR_SIZE */
+#define DCORE0_HMMU0_MMU_LOG2_DDR_SIZE_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_LOG2_DDR_SIZE_R_MASK 0xFF
+
+/* DCORE0_HMMU0_MMU_SCRAMBLER */
+#define DCORE0_HMMU0_MMU_SCRAMBLER_ADDR_BIT_SHIFT 0
+#define DCORE0_HMMU0_MMU_SCRAMBLER_ADDR_BIT_MASK 0x3F
+#define DCORE0_HMMU0_MMU_SCRAMBLER_SINGLE_DDR_EN_SHIFT 6
+#define DCORE0_HMMU0_MMU_SCRAMBLER_SINGLE_DDR_EN_MASK 0x40
+#define DCORE0_HMMU0_MMU_SCRAMBLER_SINGLE_DDR_ID_SHIFT 7
+#define DCORE0_HMMU0_MMU_SCRAMBLER_SINGLE_DDR_ID_MASK 0x80
+#define DCORE0_HMMU0_MMU_SCRAMBLER_DDR_CH_LSB_BIT_LOCATION_SHIFT 8
+#define DCORE0_HMMU0_MMU_SCRAMBLER_DDR_CH_LSB_BIT_LOCATION_MASK 0x7F00
+
+/* DCORE0_HMMU0_MMU_MEM_INIT_BUSY */
+#define DCORE0_HMMU0_MMU_MEM_INIT_BUSY_DATA_SHIFT 0
+#define DCORE0_HMMU0_MMU_MEM_INIT_BUSY_DATA_MASK 0x3
+#define DCORE0_HMMU0_MMU_MEM_INIT_BUSY_OBI0_SHIFT 2
+#define DCORE0_HMMU0_MMU_MEM_INIT_BUSY_OBI0_MASK 0x4
+#define DCORE0_HMMU0_MMU_MEM_INIT_BUSY_OBI1_SHIFT 3
+#define DCORE0_HMMU0_MMU_MEM_INIT_BUSY_OBI1_MASK 0x8
+
+/* DCORE0_HMMU0_MMU_SPI_SEI_MASK */
+#define DCORE0_HMMU0_MMU_SPI_SEI_MASK_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_SPI_SEI_MASK_R_MASK 0x7FFFF
+
+/* DCORE0_HMMU0_MMU_SPI_SEI_CAUSE */
+#define DCORE0_HMMU0_MMU_SPI_SEI_CAUSE_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_SPI_SEI_CAUSE_R_MASK 0x7FFFF
+
+/* DCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE */
+#define DCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA_63_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA_63_32_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA */
+#define DCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE */
+#define DCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA_63_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA_63_32_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA */
+#define DCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID */
+#define DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_PAGE_ERR_VALID_ENTRY_SHIFT 0
+#define DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_PAGE_ERR_VALID_ENTRY_MASK 0x1
+#define DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_ACCESS_ERR_VALID_ENTRY_SHIFT 1
+#define DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_ACCESS_ERR_VALID_ENTRY_MASK 0x2
+
+/* DCORE0_HMMU0_MMU_INTERRUPT_CLR */
+#define DCORE0_HMMU0_MMU_INTERRUPT_CLR_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_INTERRUPT_CLR_R_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_INTERRUPT_MASK */
+#define DCORE0_HMMU0_MMU_INTERRUPT_MASK_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_INTERRUPT_MASK_R_MASK 0xFF
+
+/* DCORE0_HMMU0_MMU_DBG_MEM_WRAP_RM */
+#define DCORE0_HMMU0_MMU_DBG_MEM_WRAP_RM_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_DBG_MEM_WRAP_RM_R_MASK 0x3FFFFFFF
+
+/* DCORE0_HMMU0_MMU_SPI_CAUSE_CLR */
+#define DCORE0_HMMU0_MMU_SPI_CAUSE_CLR_CLR_SHIFT 0
+#define DCORE0_HMMU0_MMU_SPI_CAUSE_CLR_CLR_MASK 0x1
+
+/* DCORE0_HMMU0_MMU_PIPE_CREDIT */
+#define DCORE0_HMMU0_MMU_PIPE_CREDIT_READ_CREDIT_SHIFT 0
+#define DCORE0_HMMU0_MMU_PIPE_CREDIT_READ_CREDIT_MASK 0xF
+#define DCORE0_HMMU0_MMU_PIPE_CREDIT_READ_FORCE_FULL_SHIFT 7
+#define DCORE0_HMMU0_MMU_PIPE_CREDIT_READ_FORCE_FULL_MASK 0x80
+#define DCORE0_HMMU0_MMU_PIPE_CREDIT_WRITE_CREDIT_SHIFT 8
+#define DCORE0_HMMU0_MMU_PIPE_CREDIT_WRITE_CREDIT_MASK 0xF00
+#define DCORE0_HMMU0_MMU_PIPE_CREDIT_WRITE_FORCE_FULL_SHIFT 15
+#define DCORE0_HMMU0_MMU_PIPE_CREDIT_WRITE_FORCE_FULL_MASK 0x8000
+
+/* DCORE0_HMMU0_MMU_MMU_BYPASS */
+#define DCORE0_HMMU0_MMU_MMU_BYPASS_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_BYPASS_R_MASK 0x1
+
+/* DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE */
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP5_PAGE_SIZE_SHIFT 0
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP5_PAGE_SIZE_MASK 0xF
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_SHIFT 4
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK 0xF0
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP3_PAGE_SIZE_SHIFT 8
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP3_PAGE_SIZE_MASK 0xF00
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP2_PAGE_SIZE_SHIFT 12
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP2_PAGE_SIZE_MASK 0xF000
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP1_PAGE_SIZE_SHIFT 16
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP1_PAGE_SIZE_MASK 0xF0000
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_SHIFT 20
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK \
+0x100000
+
+/* DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG */
+#define DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG_CORE_SET_MASK_SHIFT 0
+#define DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG_CORE_SET_MASK_MASK 0x1FF
+#define DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG_CORE_SET_MIN_SHIFT 10
+#define DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG_CORE_SET_MIN_MASK 0x7FC00
+#define DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG_CORE_SET_MAX_SHIFT 20
+#define DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG_CORE_SET_MAX_MASK 0x1FF00000
+
+/* DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT */
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_WRITE_CRED_SHIFT 0
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_WRITE_CRED_MASK 0x1FF
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_READ_CRED_SHIFT 9
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_READ_CRED_MASK 0x3FE00
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_TOTAL_SHIFT 18
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_TOTAL_MASK 0x7FC0000
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_FORCE_FULL_WRITE_SHIFT 27
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_FORCE_FULL_WRITE_MASK 0x8000000
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_FORCE_FULL_READ_SHIFT 28
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_FORCE_FULL_READ_MASK 0x10000000
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_FORCE_FULL_TOTAL_SHIFT 29
+#define DCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT_FORCE_FULL_TOTAL_MASK 0x20000000
+
+/* DCORE0_HMMU0_MMU_TOTAL_SLICE_CREDIT */
+#define DCORE0_HMMU0_MMU_TOTAL_SLICE_CREDIT_TOTAL_SHIFT 18
+#define DCORE0_HMMU0_MMU_TOTAL_SLICE_CREDIT_TOTAL_MASK 0x7FC0000
+#define DCORE0_HMMU0_MMU_TOTAL_SLICE_CREDIT_FORCE_FULL_TOTAL_SHIFT 29
+#define DCORE0_HMMU0_MMU_TOTAL_SLICE_CREDIT_FORCE_FULL_TOTAL_MASK 0x20000000
+
+/* DCORE0_HMMU0_MMU_PAGE_FAULT_ID_LSB */
+#define DCORE0_HMMU0_MMU_PAGE_FAULT_ID_LSB_PAGE_FAULT_ID_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_PAGE_FAULT_ID_LSB_PAGE_FAULT_ID_31_0_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_PAGE_FAULT_ID_MSB */
+#define DCORE0_HMMU0_MMU_PAGE_FAULT_ID_MSB_PAGE_FAULT_ID_42_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_PAGE_FAULT_ID_MSB_PAGE_FAULT_ID_42_32_MASK 0x7FF
+
+/* DCORE0_HMMU0_MMU_PAGE_ACCESS_ID_LSB */
+#define DCORE0_HMMU0_MMU_PAGE_ACCESS_ID_LSB_PAGE_ACCESS_ID_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_PAGE_ACCESS_ID_LSB_PAGE_ACCESS_ID_31_0_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_PAGE_ACCESS_ID_MSB */
+#define DCORE0_HMMU0_MMU_PAGE_ACCESS_ID_MSB_PAGE_ACCESS_ID_42_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_PAGE_ACCESS_ID_MSB_PAGE_ACCESS_ID_42_32_MASK 0x7FF
+
+/* DCORE0_HMMU0_MMU_DDR_RANGE_REG_ENABLE */
+#define DCORE0_HMMU0_MMU_DDR_RANGE_REG_ENABLE_ENABLE_SHIFT 0
+#define DCORE0_HMMU0_MMU_DDR_RANGE_REG_ENABLE_ENABLE_MASK 0x1
+
+/* DCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32 */
+#define DCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_SEC_MIN_63_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_SEC_MIN_63_32_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0 */
+#define DCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_SEC_MIN_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_SEC_MIN_31_0_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32 */
+#define DCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_SEC_MAX_63_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_SEC_MAX_63_32_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0 */
+#define DCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_SEC_MAX_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_SEC_MAX_31_0_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32 */
+#define DCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_PRIV_MIN_63_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_PRIV_MIN_63_32_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0 */
+#define DCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_PRIV_MIN_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_PRIV_MIN_31_0_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32 */
+#define DCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_PRIV_MAX_63_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_PRIV_MAX_63_32_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0 */
+#define DCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_PRIV_MAX_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_PRIV_MAX_31_0_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32 */
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_MASK \
+0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0 */
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_MASK \
+0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32 */
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_SHIFT 0
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_MASK \
+0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0 */
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_SHIFT 0
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_MASK \
+0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_RAZWI_WRITE_VLD */
+#define DCORE0_HMMU0_MMU_RAZWI_WRITE_VLD_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_RAZWI_WRITE_VLD_R_MASK 0x1
+
+/* DCORE0_HMMU0_MMU_RAZWI_WRITE_ID_31_0 */
+#define DCORE0_HMMU0_MMU_RAZWI_WRITE_ID_31_0_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_RAZWI_WRITE_ID_31_0_R_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_RAZWI_WRITE_ID_42_32 */
+#define DCORE0_HMMU0_MMU_RAZWI_WRITE_ID_42_32_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_RAZWI_WRITE_ID_42_32_R_MASK 0x7FF
+
+/* DCORE0_HMMU0_MMU_RAZWI_READ_VLD */
+#define DCORE0_HMMU0_MMU_RAZWI_READ_VLD_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_RAZWI_READ_VLD_R_MASK 0x1
+
+/* DCORE0_HMMU0_MMU_RAZWI_READ_ID_31_0 */
+#define DCORE0_HMMU0_MMU_RAZWI_READ_ID_31_0_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_RAZWI_READ_ID_31_0_R_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_RAZWI_READ_ID_42_32 */
+#define DCORE0_HMMU0_MMU_RAZWI_READ_ID_42_32_R_SHIFT 0
+#define DCORE0_HMMU0_MMU_RAZWI_READ_ID_42_32_R_MASK 0x7FF
+
+/* DCORE0_HMMU0_MMU_MMU_SRC_NUM */
+#define DCORE0_HMMU0_MMU_MMU_SRC_NUM_OVERRIDE_SRC_NUM_EN_SHIFT 0
+#define DCORE0_HMMU0_MMU_MMU_SRC_NUM_OVERRIDE_SRC_NUM_EN_MASK 0x1
+#define DCORE0_HMMU0_MMU_MMU_SRC_NUM_SRC_NUM_SHIFT 1
+#define DCORE0_HMMU0_MMU_MMU_SRC_NUM_SRC_NUM_MASK 0x1E
+
+/* DCORE0_HMMU0_MMU_RAZWI_ADDR_LSB */
+#define DCORE0_HMMU0_MMU_RAZWI_ADDR_LSB_ADDR_SHIFT 0
+#define DCORE0_HMMU0_MMU_RAZWI_ADDR_LSB_ADDR_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_MMU_RAZWI_ADDR_MSB */
+#define DCORE0_HMMU0_MMU_RAZWI_ADDR_MSB_ADDR_SHIFT 0
+#define DCORE0_HMMU0_MMU_RAZWI_ADDR_MSB_ADDR_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_DCORE0_HMMU0_MMU_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h
new file mode 100644
index 000000000000..08ccd695ec89
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_HMMU0_MMU_REGS_H_
+#define ASIC_REG_DCORE0_HMMU0_MMU_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_HMMU0_MMU
+ * (Prototype: MMU)
+ *****************************************
+ */
+
+#define mmDCORE0_HMMU0_MMU_MMU_ENABLE 0x408000C
+
+#define mmDCORE0_HMMU0_MMU_FORCE_ORDERING 0x4080010
+
+#define mmDCORE0_HMMU0_MMU_FEATURE_ENABLE 0x4080014
+
+#define mmDCORE0_HMMU0_MMU_VA_ORDERING_MASK_38_7 0x4080018
+
+#define mmDCORE0_HMMU0_MMU_VA_ORDERING_MASK_64_39 0x408001C
+
+#define mmDCORE0_HMMU0_MMU_LOG2_DDR_SIZE 0x4080020
+
+#define mmDCORE0_HMMU0_MMU_SCRAMBLER 0x4080024
+
+#define mmDCORE0_HMMU0_MMU_MEM_INIT_BUSY 0x4080028
+
+#define mmDCORE0_HMMU0_MMU_SPI_SEI_MASK 0x408002C
+
+#define mmDCORE0_HMMU0_MMU_SPI_SEI_CAUSE 0x4080030
+
+#define mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE 0x4080034
+
+#define mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA 0x4080038
+
+#define mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE 0x408003C
+
+#define mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA 0x4080040
+
+#define mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID 0x4080044
+
+#define mmDCORE0_HMMU0_MMU_INTERRUPT_CLR 0x4080048
+
+#define mmDCORE0_HMMU0_MMU_INTERRUPT_MASK 0x408004C
+
+#define mmDCORE0_HMMU0_MMU_DBG_MEM_WRAP_RM 0x4080050
+
+#define mmDCORE0_HMMU0_MMU_SPI_CAUSE_CLR 0x4080054
+
+#define mmDCORE0_HMMU0_MMU_PIPE_CREDIT 0x4080058
+
+#define mmDCORE0_HMMU0_MMU_MMU_BYPASS 0x408006C
+
+#define mmDCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE 0x4080070
+
+#define mmDCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG 0x40800A0
+
+#define mmDCORE0_HMMU0_MMU_CORE_SEP_SLICE_CRDT 0x40800D0
+
+#define mmDCORE0_HMMU0_MMU_TOTAL_SLICE_CREDIT 0x40800F4
+
+#define mmDCORE0_HMMU0_MMU_PAGE_FAULT_ID_LSB 0x40800F8
+
+#define mmDCORE0_HMMU0_MMU_PAGE_FAULT_ID_MSB 0x40800FC
+
+#define mmDCORE0_HMMU0_MMU_PAGE_ACCESS_ID_LSB 0x4080100
+
+#define mmDCORE0_HMMU0_MMU_PAGE_ACCESS_ID_MSB 0x4080104
+
+#define mmDCORE0_HMMU0_MMU_DDR_RANGE_REG_ENABLE 0x4080108
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_0 0x4080110
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_1 0x4080114
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_2 0x4080118
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_3 0x408011C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_4 0x4080120
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_5 0x4080124
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_6 0x4080128
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_7 0x408012C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_0 0x4080140
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_1 0x4080144
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_2 0x4080148
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_3 0x408014C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_4 0x4080150
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_5 0x4080154
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_6 0x4080158
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_7 0x408015C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_0 0x4080170
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_1 0x4080174
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_2 0x4080178
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_3 0x408017C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_4 0x4080180
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_5 0x4080184
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_6 0x4080188
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_7 0x408018C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_0 0x40801A0
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_1 0x40801A4
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_2 0x40801A8
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_3 0x40801AC
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_4 0x40801B0
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_5 0x40801B4
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_6 0x40801B8
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_7 0x40801BC
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_0 0x40801D0
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_1 0x40801D4
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_2 0x40801D8
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_3 0x40801DC
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_4 0x40801E0
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_5 0x40801E4
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_6 0x40801E8
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_7 0x40801EC
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_0 0x4080200
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_1 0x4080204
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_2 0x4080208
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_3 0x408020C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_4 0x4080210
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_5 0x4080214
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_6 0x4080218
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_7 0x408021C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_0 0x4080230
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_1 0x4080234
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_2 0x4080238
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_3 0x408023C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_4 0x4080240
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_5 0x4080244
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_6 0x4080248
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_7 0x408024C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_0 0x4080260
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_1 0x4080264
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_2 0x4080268
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_3 0x408026C
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_4 0x4080270
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_5 0x4080274
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_6 0x4080278
+
+#define mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_7 0x408027C
+
+#define mmDCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32 0x4080290
+
+#define mmDCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0 0x4080294
+
+#define mmDCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32 0x4080298
+
+#define mmDCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0 0x408029C
+
+#define mmDCORE0_HMMU0_MMU_RAZWI_WRITE_VLD 0x4080300
+
+#define mmDCORE0_HMMU0_MMU_RAZWI_WRITE_ID_31_0 0x4080304
+
+#define mmDCORE0_HMMU0_MMU_RAZWI_WRITE_ID_42_32 0x4080308
+
+#define mmDCORE0_HMMU0_MMU_RAZWI_READ_VLD 0x408030C
+
+#define mmDCORE0_HMMU0_MMU_RAZWI_READ_ID_31_0 0x4080310
+
+#define mmDCORE0_HMMU0_MMU_RAZWI_READ_ID_42_32 0x4080314
+
+#define mmDCORE0_HMMU0_MMU_MMU_SRC_NUM 0x408031C
+
+#define mmDCORE0_HMMU0_MMU_RAZWI_ADDR_LSB 0x4080320
+
+#define mmDCORE0_HMMU0_MMU_RAZWI_ADDR_MSB 0x4080324
+
+#endif /* ASIC_REG_DCORE0_HMMU0_MMU_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h
new file mode 100644
index 000000000000..192eba5f07bb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_HMMU0_STLB_MASKS_H_
+#define ASIC_REG_DCORE0_HMMU0_STLB_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_HMMU0_STLB
+ * (Prototype: STLB)
+ *****************************************
+ */
+
+/* DCORE0_HMMU0_STLB_BUSY */
+#define DCORE0_HMMU0_STLB_BUSY_BUSY_SHIFT 0
+#define DCORE0_HMMU0_STLB_BUSY_BUSY_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_STLB_ASID */
+#define DCORE0_HMMU0_STLB_ASID_ASID_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_ASID_MASK 0x3FF
+
+/* DCORE0_HMMU0_STLB_HOP0_PA43_12 */
+#define DCORE0_HMMU0_STLB_HOP0_PA43_12_HOP0_PA43_12_SHIFT 0
+#define DCORE0_HMMU0_STLB_HOP0_PA43_12_HOP0_PA43_12_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_STLB_HOP0_PA63_44 */
+#define DCORE0_HMMU0_STLB_HOP0_PA63_44_HOP0_PA63_44_SHIFT 0
+#define DCORE0_HMMU0_STLB_HOP0_PA63_44_HOP0_PA63_44_MASK 0xFFFFF
+
+/* DCORE0_HMMU0_STLB_CACHE_INV */
+#define DCORE0_HMMU0_STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
+#define DCORE0_HMMU0_STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF
+#define DCORE0_HMMU0_STLB_CACHE_INV_INDEX_MASK_SHIFT 8
+#define DCORE0_HMMU0_STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00
+
+/* DCORE0_HMMU0_STLB_CACHE_INV_BASE_39_8 */
+#define DCORE0_HMMU0_STLB_CACHE_INV_BASE_39_8_PA_SHIFT 0
+#define DCORE0_HMMU0_STLB_CACHE_INV_BASE_39_8_PA_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_STLB_CACHE_INV_BASE_63_40 */
+#define DCORE0_HMMU0_STLB_CACHE_INV_BASE_63_40_PA_SHIFT 0
+#define DCORE0_HMMU0_STLB_CACHE_INV_BASE_63_40_PA_MASK 0xFFFFFF
+
+/* DCORE0_HMMU0_STLB_STLB_FEATURE_EN */
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_SHIFT 0
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_MASK 0x1
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_SHIFT 1
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_MASK 0x2
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_LOOKUP_EN_SHIFT 2
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_LOOKUP_EN_MASK 0x4
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_BYPASS_SHIFT 3
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_BYPASS_MASK 0x8
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_BANK_STOP_SHIFT 4
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_BANK_STOP_MASK 0x10
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_TRACE_EN_SHIFT 5
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_TRACE_EN_MASK 0x20
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_FOLLOWER_EN_SHIFT 6
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK 0x40
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_CACHING_EN_SHIFT 7
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_CACHING_EN_MASK 0x1F80
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_FOLLOWING_NUM_LIMIT_SHIFT 13
+#define DCORE0_HMMU0_STLB_STLB_FEATURE_EN_FOLLOWING_NUM_LIMIT_MASK 0xE000
+
+/* DCORE0_HMMU0_STLB_STLB_AXI_CACHE */
+#define DCORE0_HMMU0_STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_SHIFT 0
+#define DCORE0_HMMU0_STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_MASK 0xF
+#define DCORE0_HMMU0_STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_SHIFT 4
+#define DCORE0_HMMU0_STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_MASK 0xF0
+#define DCORE0_HMMU0_STLB_STLB_AXI_CACHE_INV_ARCACHE_SHIFT 8
+#define DCORE0_HMMU0_STLB_STLB_AXI_CACHE_INV_ARCACHE_MASK 0xF00
+
+/* DCORE0_HMMU0_STLB_HOP_CONFIGURATION */
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT 0
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK 0x7
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_SHIFT 4
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK 0x70
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_SHIFT 8
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK 0x700
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT 12
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LAST_HOP_MASK 0x7000
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_SHIFT 16
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK 0x70000
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_SHIFT 20
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK 0x100000
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_SHIFT 21
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_MASK \
+0x7E00000
+
+/* DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32 */
+#define DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32_R_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_31_0 */
+#define DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_31_0_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_31_0_R_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_STLB_INV_ALL_START */
+#define DCORE0_HMMU0_STLB_INV_ALL_START_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_INV_ALL_START_R_MASK 0x1
+
+/* DCORE0_HMMU0_STLB_INV_ALL_SET */
+#define DCORE0_HMMU0_STLB_INV_ALL_SET_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_INV_ALL_SET_R_MASK 0xFF
+
+/* DCORE0_HMMU0_STLB_INV_PS */
+#define DCORE0_HMMU0_STLB_INV_PS_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_INV_PS_R_MASK 0x3
+
+/* DCORE0_HMMU0_STLB_INV_CONSUMER_INDEX */
+#define DCORE0_HMMU0_STLB_INV_CONSUMER_INDEX_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_INV_CONSUMER_INDEX_R_MASK 0xFF
+
+/* DCORE0_HMMU0_STLB_INV_HIT_COUNT */
+#define DCORE0_HMMU0_STLB_INV_HIT_COUNT_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_INV_HIT_COUNT_R_MASK 0x7FF
+
+/* DCORE0_HMMU0_STLB_INV_SET */
+#define DCORE0_HMMU0_STLB_INV_SET_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_INV_SET_R_MASK 0xFF
+
+/* DCORE0_HMMU0_STLB_SRAM_INIT */
+#define DCORE0_HMMU0_STLB_SRAM_INIT_BUSY_TAG_SHIFT 0
+#define DCORE0_HMMU0_STLB_SRAM_INIT_BUSY_TAG_MASK 0x3
+#define DCORE0_HMMU0_STLB_SRAM_INIT_BUSY_SLICE_SHIFT 2
+#define DCORE0_HMMU0_STLB_SRAM_INIT_BUSY_SLICE_MASK 0xC
+#define DCORE0_HMMU0_STLB_SRAM_INIT_BUSY_DATA_SHIFT 4
+#define DCORE0_HMMU0_STLB_SRAM_INIT_BUSY_DATA_MASK 0x10
+
+/* DCORE0_HMMU0_STLB_MEM_CACHE_INVALIDATION */
+
+/* DCORE0_HMMU0_STLB_MEM_CACHE_INV_STATUS */
+#define DCORE0_HMMU0_STLB_MEM_CACHE_INV_STATUS_INVALIDATE_DONE_SHIFT 0
+#define DCORE0_HMMU0_STLB_MEM_CACHE_INV_STATUS_INVALIDATE_DONE_MASK 0x1
+#define DCORE0_HMMU0_STLB_MEM_CACHE_INV_STATUS_CACHE_IDLE_SHIFT 1
+#define DCORE0_HMMU0_STLB_MEM_CACHE_INV_STATUS_CACHE_IDLE_MASK 0x2
+
+/* DCORE0_HMMU0_STLB_MEM_CACHE_BASE_38_7 */
+#define DCORE0_HMMU0_STLB_MEM_CACHE_BASE_38_7_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_MEM_CACHE_BASE_38_7_R_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_STLB_MEM_CACHE_BASE_63_39 */
+#define DCORE0_HMMU0_STLB_MEM_CACHE_BASE_63_39_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_MEM_CACHE_BASE_63_39_R_MASK 0x1FFFFFF
+
+/* DCORE0_HMMU0_STLB_MEM_CACHE_CONFIG */
+#define DCORE0_HMMU0_STLB_MEM_CACHE_CONFIG_CACHE_HOP_EN_SHIFT 0
+#define DCORE0_HMMU0_STLB_MEM_CACHE_CONFIG_CACHE_HOP_EN_MASK 0x3F
+#define DCORE0_HMMU0_STLB_MEM_CACHE_CONFIG_CACHE_HOP_PREFETCH_EN_SHIFT 6
+#define DCORE0_HMMU0_STLB_MEM_CACHE_CONFIG_CACHE_HOP_PREFETCH_EN_MASK 0xFC0
+#define DCORE0_HMMU0_STLB_MEM_CACHE_CONFIG_BYPASS_EN_SHIFT 12
+#define DCORE0_HMMU0_STLB_MEM_CACHE_CONFIG_BYPASS_EN_MASK 0x1000
+#define DCORE0_HMMU0_STLB_MEM_CACHE_CONFIG_RELEASE_INVALIDATE_SHIFT 13
+#define DCORE0_HMMU0_STLB_MEM_CACHE_CONFIG_RELEASE_INVALIDATE_MASK 0x2000
+
+/* DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP5 */
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP5_MIN_SHIFT 0
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP5_MIN_MASK 0x1FF
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP5_MAX_SHIFT 9
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP5_MAX_MASK 0x3FE00
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP5_MASK_SHIFT 18
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP5_MASK_MASK 0x7FC0000
+
+/* DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP4 */
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP4_MIN_SHIFT 0
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP4_MIN_MASK 0x1FF
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP4_MAX_SHIFT 9
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP4_MAX_MASK 0x3FE00
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP4_MASK_SHIFT 18
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP4_MASK_MASK 0x7FC0000
+
+/* DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP3 */
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP3_MIN_SHIFT 0
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP3_MIN_MASK 0x1FF
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP3_MAX_SHIFT 9
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP3_MAX_MASK 0x3FE00
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP3_MASK_SHIFT 18
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP3_MASK_MASK 0x7FC0000
+
+/* DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP2 */
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP2_MIN_SHIFT 0
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP2_MIN_MASK 0x1FF
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP2_MAX_SHIFT 9
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP2_MAX_MASK 0x3FE00
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP2_MASK_SHIFT 18
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP2_MASK_MASK 0x7FC0000
+
+/* DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP1 */
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP1_MIN_SHIFT 0
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP1_MIN_MASK 0x1FF
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP1_MAX_SHIFT 9
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP1_MAX_MASK 0x3FE00
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP1_MASK_SHIFT 18
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP1_MASK_MASK 0x7FC0000
+
+/* DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP0 */
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP0_MIN_SHIFT 0
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP0_MIN_MASK 0x1FF
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP0_MAX_SHIFT 9
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP0_MAX_MASK 0x3FE00
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP0_MASK_SHIFT 18
+#define DCORE0_HMMU0_STLB_SET_THRESHOLD_HOP0_MASK_MASK 0x7FC0000
+
+/* DCORE0_HMMU0_STLB_MULTI_HIT_INTERRUPT_CLR */
+
+/* DCORE0_HMMU0_STLB_MULTI_HIT_INTERRUPT_MASK */
+#define DCORE0_HMMU0_STLB_MULTI_HIT_INTERRUPT_MASK_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_MULTI_HIT_INTERRUPT_MASK_R_MASK 0x1
+
+/* DCORE0_HMMU0_STLB_MEM_L0_CACHE_CFG */
+#define DCORE0_HMMU0_STLB_MEM_L0_CACHE_CFG_PLRU_EVICTION_SHIFT 0
+#define DCORE0_HMMU0_STLB_MEM_L0_CACHE_CFG_PLRU_EVICTION_MASK 0x1
+#define DCORE0_HMMU0_STLB_MEM_L0_CACHE_CFG_CACHE_STOP_SHIFT 1
+#define DCORE0_HMMU0_STLB_MEM_L0_CACHE_CFG_CACHE_STOP_MASK 0x2
+#define DCORE0_HMMU0_STLB_MEM_L0_CACHE_CFG_INV_WRITEBACK_SHIFT 2
+#define DCORE0_HMMU0_STLB_MEM_L0_CACHE_CFG_INV_WRITEBACK_MASK 0x4
+
+/* DCORE0_HMMU0_STLB_MEM_READ_ARPROT */
+#define DCORE0_HMMU0_STLB_MEM_READ_ARPROT_R_SHIFT 0
+#define DCORE0_HMMU0_STLB_MEM_READ_ARPROT_R_MASK 0x7
+
+/* DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION */
+#define \
+DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT \
+0
+#define \
+DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \
+0x1
+#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_SHIFT 1
+#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_MASK 0x2
+#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_SHIFT 2
+#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_MASK 0xFFC
+
+/* DCORE0_HMMU0_STLB_RANGE_INV_START_LSB */
+#define DCORE0_HMMU0_STLB_RANGE_INV_START_LSB_INV_START_LSB_SHIFT 0
+#define DCORE0_HMMU0_STLB_RANGE_INV_START_LSB_INV_START_LSB_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_STLB_RANGE_INV_START_MSB */
+#define DCORE0_HMMU0_STLB_RANGE_INV_START_MSB_INV_START_MSB_SHIFT 0
+#define DCORE0_HMMU0_STLB_RANGE_INV_START_MSB_INV_START_MSB_MASK 0xFFFFF
+
+/* DCORE0_HMMU0_STLB_RANGE_INV_END_LSB */
+#define DCORE0_HMMU0_STLB_RANGE_INV_END_LSB_INV_END_LSB_SHIFT 0
+#define DCORE0_HMMU0_STLB_RANGE_INV_END_LSB_INV_END_LSB_MASK 0xFFFFFFFF
+
+/* DCORE0_HMMU0_STLB_RANGE_INV_END_MSB */
+#define DCORE0_HMMU0_STLB_RANGE_INV_END_MSB_INV_END_MSB_SHIFT 0
+#define DCORE0_HMMU0_STLB_RANGE_INV_END_MSB_INV_END_MSB_MASK 0xFFFFF
+
+/* DCORE0_HMMU0_STLB_ASID_SCRAMBLER_CTRL */
+#define DCORE0_HMMU0_STLB_ASID_SCRAMBLER_CTRL_SCRAMBLER_SCRAM_EN_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCRAMBLER_CTRL_SCRAMBLER_SCRAM_EN_MASK 0x1
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_MASK \
+0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_11 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_11_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_11_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_12 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_12_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_12_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_13 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_13_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_13_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_14 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_14_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_14_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_15 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_15_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_15_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_16 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_16_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_16_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_17 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_17_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_17_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_18 */
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_18_ASID_POLY_MATRIX_H3_SHIFT 0
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_18_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+#endif /* ASIC_REG_DCORE0_HMMU0_STLB_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h
new file mode 100644
index 000000000000..864a259f68e2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_HMMU0_STLB_REGS_H_
+#define ASIC_REG_DCORE0_HMMU0_STLB_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_HMMU0_STLB
+ * (Prototype: STLB)
+ *****************************************
+ */
+
+#define mmDCORE0_HMMU0_STLB_BUSY 0x4081000
+
+#define mmDCORE0_HMMU0_STLB_ASID 0x4081004
+
+#define mmDCORE0_HMMU0_STLB_HOP0_PA43_12 0x4081008
+
+#define mmDCORE0_HMMU0_STLB_HOP0_PA63_44 0x408100C
+
+#define mmDCORE0_HMMU0_STLB_CACHE_INV 0x4081010
+
+#define mmDCORE0_HMMU0_STLB_CACHE_INV_BASE_39_8 0x4081014
+
+#define mmDCORE0_HMMU0_STLB_CACHE_INV_BASE_63_40 0x4081018
+
+#define mmDCORE0_HMMU0_STLB_STLB_FEATURE_EN 0x408101C
+
+#define mmDCORE0_HMMU0_STLB_STLB_AXI_CACHE 0x4081020
+
+#define mmDCORE0_HMMU0_STLB_HOP_CONFIGURATION 0x4081024
+
+#define mmDCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32 0x4081028
+
+#define mmDCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_31_0 0x408102C
+
+#define mmDCORE0_HMMU0_STLB_INV_ALL_START 0x4081034
+
+#define mmDCORE0_HMMU0_STLB_INV_ALL_SET 0x4081038
+
+#define mmDCORE0_HMMU0_STLB_INV_PS 0x408103C
+
+#define mmDCORE0_HMMU0_STLB_INV_CONSUMER_INDEX 0x4081040
+
+#define mmDCORE0_HMMU0_STLB_INV_HIT_COUNT 0x4081044
+
+#define mmDCORE0_HMMU0_STLB_INV_SET 0x4081048
+
+#define mmDCORE0_HMMU0_STLB_SRAM_INIT 0x408104C
+
+#define mmDCORE0_HMMU0_STLB_MEM_CACHE_INVALIDATION 0x4081050
+
+#define mmDCORE0_HMMU0_STLB_MEM_CACHE_INV_STATUS 0x4081054
+
+#define mmDCORE0_HMMU0_STLB_MEM_CACHE_BASE_38_7 0x4081058
+
+#define mmDCORE0_HMMU0_STLB_MEM_CACHE_BASE_63_39 0x408105C
+
+#define mmDCORE0_HMMU0_STLB_MEM_CACHE_CONFIG 0x4081060
+
+#define mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP5 0x4081064
+
+#define mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP4 0x4081068
+
+#define mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP3 0x408106C
+
+#define mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP2 0x4081070
+
+#define mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP1 0x4081074
+
+#define mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP0 0x4081078
+
+#define mmDCORE0_HMMU0_STLB_MULTI_HIT_INTERRUPT_CLR 0x408107C
+
+#define mmDCORE0_HMMU0_STLB_MULTI_HIT_INTERRUPT_MASK 0x4081080
+
+#define mmDCORE0_HMMU0_STLB_MEM_L0_CACHE_CFG 0x4081084
+
+#define mmDCORE0_HMMU0_STLB_MEM_READ_ARPROT 0x4081088
+
+#define mmDCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION 0x408108C
+
+#define mmDCORE0_HMMU0_STLB_RANGE_INV_START_LSB 0x4081090
+
+#define mmDCORE0_HMMU0_STLB_RANGE_INV_START_MSB 0x4081094
+
+#define mmDCORE0_HMMU0_STLB_RANGE_INV_END_LSB 0x4081098
+
+#define mmDCORE0_HMMU0_STLB_RANGE_INV_END_MSB 0x408109C
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCRAMBLER_CTRL 0x4081100
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0 0x4081104
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1 0x4081108
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2 0x408110C
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3 0x4081110
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4 0x4081114
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5 0x4081118
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6 0x408111C
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7 0x4081120
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8 0x4081124
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9 0x4081128
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10 0x408112C
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_11 0x4081130
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_12 0x4081134
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_13 0x4081138
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_14 0x408113C
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_15 0x4081140
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_16 0x4081144
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_17 0x4081148
+
+#define mmDCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_18 0x408114C
+
+#endif /* ASIC_REG_DCORE0_HMMU0_STLB_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h
new file mode 100644
index 000000000000..07bed3ec740e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_ACC_REGS_H_
+#define ASIC_REG_DCORE0_MME_ACC_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_ACC
+ * (Prototype: ACC)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_ACC_WBC0_AXI 0x40F8000
+
+#define mmDCORE0_MME_ACC_WBC1_AXI 0x40F8004
+
+#define mmDCORE0_MME_ACC_WBC0_RL 0x40F8008
+
+#define mmDCORE0_MME_ACC_WBC1_RL 0x40F800C
+
+#define mmDCORE0_MME_ACC_WBC_STALL 0x40F8010
+
+#define mmDCORE0_MME_ACC_AWCACHE 0x40F8014
+
+#define mmDCORE0_MME_ACC_AWPROT 0x40F8018
+
+#define mmDCORE0_MME_ACC_AP_LFSR_POLY 0x40F801C
+
+#define mmDCORE0_MME_ACC_AP_LFSR_SEED_WDATA 0x40F8020
+
+#define mmDCORE0_MME_ACC_AP_LFSR_SEED_SEL 0x40F8024
+
+#define mmDCORE0_MME_ACC_AP_LFSR_SEED_RDATA 0x40F8028
+
+#define mmDCORE0_MME_ACC_AP_LFSR_CLOSE_CGATE_DLY 0x40F802C
+
+#define mmDCORE0_MME_ACC_WBC_SRC_BP 0x40F8030
+
+#define mmDCORE0_MME_ACC_CLK_GATE_EN 0x40F8034
+
+#define mmDCORE0_MME_ACC_WBC_INFLIGHTS 0x40F8038
+
+#define mmDCORE0_MME_ACC_HBW_CLK_ENABLER_DIS 0x40F803C
+
+#define mmDCORE0_MME_ACC_E2E_CRDT_TOP0 0x40F8040
+
+#define mmDCORE0_MME_ACC_E2E_CRDT_TOP1 0x40F8044
+
+#define mmDCORE0_MME_ACC_INTR_CAUSE 0x40F8048
+
+#define mmDCORE0_MME_ACC_INTR_MASK 0x40F804C
+
+#define mmDCORE0_MME_ACC_INTR_CLEAR 0x40F8050
+
+#define mmDCORE0_MME_ACC_WR_AXI_AGG_COUT0 0x40F8054
+
+#define mmDCORE0_MME_ACC_WR_AXI_AGG_COUT1 0x40F8058
+
+#define mmDCORE0_MME_ACC_BIST 0x40F805C
+
+#define mmDCORE0_MME_ACC_WR_AXI_AGG_2P_BVALID 0x40F8060
+
+#endif /* ASIC_REG_DCORE0_MME_ACC_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h
new file mode 100644
index 000000000000..c9043979fd69
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_0 0x40CB22C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_1 0x40CB230
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_2 0x40CB234
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_3 0x40CB238
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_4 0x40CB23C
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h
new file mode 100644
index 000000000000..7d74aea4576f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_0 0x40CB240
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_1 0x40CB244
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_2 0x40CB248
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_3 0x40CB24C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_4 0x40CB250
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h
new file mode 100644
index 000000000000..f6f519eb5f6f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_0 0x40CB254
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_1 0x40CB258
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_2 0x40CB25C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_3 0x40CB260
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_4 0x40CB264
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h
new file mode 100644
index 000000000000..0e0c056ade9b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_0 0x40CB268
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_1 0x40CB26C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_2 0x40CB270
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_3 0x40CB274
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_4 0x40CB278
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h
new file mode 100644
index 000000000000..34c6134a2f93
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_0 0x40CB15C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_1 0x40CB160
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_2 0x40CB164
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_3 0x40CB168
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_4 0x40CB16C
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h
new file mode 100644
index 000000000000..55065032f87c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_0 0x40CB170
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_1 0x40CB174
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_2 0x40CB178
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_3 0x40CB17C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_4 0x40CB180
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h
new file mode 100644
index 000000000000..6022b387eacf
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_0 0x40CB184
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_1 0x40CB188
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_2 0x40CB18C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_3 0x40CB190
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_4 0x40CB194
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h
new file mode 100644
index 000000000000..f9c9b01f0d1a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_0 0x40CB198
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_1 0x40CB19C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_2 0x40CB1A0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_3 0x40CB1A4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_4 0x40CB1A8
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h
new file mode 100644
index 000000000000..d96119b8c435
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_0 0x40CB1AC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_1 0x40CB1B0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_2 0x40CB1B4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_3 0x40CB1B8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_4 0x40CB1BC
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h
new file mode 100644
index 000000000000..c80d6817efe1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_0 0x40CB1C0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_1 0x40CB1C4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_2 0x40CB1C8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_3 0x40CB1CC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_4 0x40CB1D0
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h
new file mode 100644
index 000000000000..753b31dc1760
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_0 0x40CB1D4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_1 0x40CB1D8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_2 0x40CB1DC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_3 0x40CB1E0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_4 0x40CB1E4
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h
new file mode 100644
index 000000000000..f68d043edcd9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_0 0x40CB1E8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_1 0x40CB1EC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_2 0x40CB1F0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_3 0x40CB1F4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_4 0x40CB1F8
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h
new file mode 100644
index 000000000000..a6dce326bd74
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_0 0x40CB1FC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_1 0x40CB200
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_2 0x40CB204
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_3 0x40CB208
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_4 0x40CB20C
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h
new file mode 100644
index 000000000000..5ace0f43cc78
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE
+ * (Prototype: MME_AGU_CORE)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_0 0x40CB210
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_1 0x40CB214
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_2 0x40CB218
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_3 0x40CB21C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_4 0x40CB220
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h
new file mode 100644
index 000000000000..b375393dfdc0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_BASE_ADDR
+ * (Prototype: MME_ADDRESS_DESCRIPTOR)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT1_LOW 0x40CB008
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT1_HIGH 0x40CB00C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT0_LOW 0x40CB010
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT0_HIGH 0x40CB014
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_A_LOW 0x40CB018
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_A_HIGH 0x40CB01C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_B_LOW 0x40CB020
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_B_HIGH 0x40CB024
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h
new file mode 100644
index 000000000000..7c22b9383f3c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END
+ * (Prototype: MME_NON_TENSOR_DESCRIPTOR)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_KERNEL_SIZE_MINUS_1 \
+0x40CB280
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_LOW 0x40CB284
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_HIGH 0x40CB288
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_OUTER_LOOP 0x40CB28C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_NUM_ITERATIONS_MINUS_1 \
+0x40CB290
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SB_REPEAT 0x40CB294
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_FP8_BIAS 0x40CB298
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_RATE_LIMITER 0x40CB29C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_USER_DATA 0x40CB2A0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_PERF_EVT_IN 0x40CB2A4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_PERF_EVT_OUT 0x40CB2A8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_PCU 0x40CB2AC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SLAVE_SYNC_OBJ0_ADDR 0x40CB2B0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SLAVE_SYNC_OBJ1_ADDR 0x40CB2B4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_POWER_LOOP 0x40CB2B8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE0_MASTER 0x40CB2BC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE1_MASTER 0x40CB2C0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE2_MASTER 0x40CB2C4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE3_MASTER 0x40CB2C8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE0_SLAVE 0x40CB2CC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE1_SLAVE 0x40CB2D0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE2_SLAVE 0x40CB2D4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE3_SLAVE 0x40CB2D8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_WKL_ID 0x40CB2DC
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h
new file mode 100644
index 000000000000..d17c165faf8b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START
+ * (Prototype: MME_NON_TENSOR_DESCRIPTOR_START)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BRAINS_LOW 0x40CB028
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BRAINS_HIGH 0x40CB02C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_HEADER_LOW 0x40CB030
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_HEADER_HIGH 0x40CB034
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_EUS_MASTER 0x40CB038
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_EUS_SLAVE 0x40CB03C
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h
new file mode 100644
index 000000000000..7b77884e0024
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_TENSOR_A_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_TENSOR_A_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_TENSOR_A
+ * (Prototype: MME_TENSOR)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_0 0x40CB040
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_1 0x40CB044
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_2 0x40CB048
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_3 0x40CB04C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_4 0x40CB050
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_0 0x40CB054
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_1 0x40CB058
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_2 0x40CB05C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_3 0x40CB060
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_4 0x40CB064
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_0 0x40CB068
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_1 0x40CB06C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_2 0x40CB070
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_3 0x40CB074
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_0 0x40CB078
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_1 0x40CB07C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_2 0x40CB080
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_3 0x40CB084
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_0 0x40CB088
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_1 0x40CB08C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_2 0x40CB090
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_3 0x40CB094
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_TENSOR_A_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h
new file mode 100644
index 000000000000..a2a2ba454d6d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_TENSOR_B_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_TENSOR_B_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_TENSOR_B
+ * (Prototype: MME_TENSOR)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_0 0x40CB098
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_1 0x40CB09C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_2 0x40CB0A0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_3 0x40CB0A4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_4 0x40CB0A8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_0 0x40CB0AC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_1 0x40CB0B0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_2 0x40CB0B4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_3 0x40CB0B8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_4 0x40CB0BC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_0 0x40CB0C0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_1 0x40CB0C4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_2 0x40CB0C8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_3 0x40CB0CC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_0 0x40CB0D0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_1 0x40CB0D4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_2 0x40CB0D8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_3 0x40CB0DC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_0 0x40CB0E0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_1 0x40CB0E4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_2 0x40CB0E8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_3 0x40CB0EC
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_TENSOR_B_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h
new file mode 100644
index 000000000000..7ad7b197cf87
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT
+ * (Prototype: MME_TENSOR)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_0 0x40CB0F0
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_1 0x40CB0F4
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_2 0x40CB0F8
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_3 0x40CB0FC
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_4 0x40CB100
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_0 0x40CB104
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_1 0x40CB108
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_2 0x40CB10C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_3 0x40CB110
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_4 0x40CB114
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_0 0x40CB118
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_1 0x40CB11C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_2 0x40CB120
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_3 0x40CB124
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_0 0x40CB128
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_1 0x40CB12C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_2 0x40CB130
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_3 0x40CB134
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_0 0x40CB138
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_1 0x40CB13C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_2 0x40CB140
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_3 0x40CB144
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h
new file mode 100644
index 000000000000..f699661d76aa
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_MASKS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO
+ * (Prototype: MME_CTRL_LO)
+ *****************************************
+ */
+
+/* DCORE0_MME_CTRL_LO_ARCH_STATUS */
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_AGU_IN_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_AGU_IN_MASK 0x1F
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_EU_SHIFT 5
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_EU_MASK 0x20
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_AP_SHIFT 6
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_AP_MASK 0x40
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_AGU_COUT_SHIFT 7
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_AGU_COUT_MASK 0x180
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_SB_IN_EMPTY_SHIFT 9
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_SB_IN_EMPTY_MASK 0x3E00
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_AGU_COUT_SM_IDLE_SHIFT 14
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_AGU_COUT_SM_IDLE_MASK 0xC000
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_WBC_AXI_IDLE_SHIFT 16
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_WBC_AXI_IDLE_MASK 0x30000
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_SB_IN_AXI_IDLE_SHIFT 18
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_SB_IN_AXI_IDLE_MASK 0x7C0000
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_ACCUM_FREE_SHIFT 23
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_ACCUM_FREE_MASK 0x3800000
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_IDLE_SHIFT 30
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_IDLE_MASK 0x40000000
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_RDY_SHIFT 31
+#define DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_RDY_MASK 0x80000000
+
+/* DCORE0_MME_CTRL_LO_CMD */
+#define DCORE0_MME_CTRL_LO_CMD_AGU_IN_SHIFT 0
+#define DCORE0_MME_CTRL_LO_CMD_AGU_IN_MASK 0x1F
+#define DCORE0_MME_CTRL_LO_CMD_EU_SHIFT 5
+#define DCORE0_MME_CTRL_LO_CMD_EU_MASK 0x20
+#define DCORE0_MME_CTRL_LO_CMD_AP_SHIFT 6
+#define DCORE0_MME_CTRL_LO_CMD_AP_MASK 0x40
+#define DCORE0_MME_CTRL_LO_CMD_AGU_COUT_SHIFT 7
+#define DCORE0_MME_CTRL_LO_CMD_AGU_COUT_MASK 0x180
+#define DCORE0_MME_CTRL_LO_CMD_COPY_AND_INC_SHIFT 9
+#define DCORE0_MME_CTRL_LO_CMD_COPY_AND_INC_MASK 0x200
+#define DCORE0_MME_CTRL_LO_CMD_DESC_SEL_SHIFT 10
+#define DCORE0_MME_CTRL_LO_CMD_DESC_SEL_MASK 0xC00
+#define DCORE0_MME_CTRL_LO_CMD_MASK_IDLE_IND_SHIFT 12
+#define DCORE0_MME_CTRL_LO_CMD_MASK_IDLE_IND_MASK 0x1000
+#define DCORE0_MME_CTRL_LO_CMD_AGU_OUT1_FROM_AGU0_DW0_SHIFT 13
+#define DCORE0_MME_CTRL_LO_CMD_AGU_OUT1_FROM_AGU0_DW0_MASK 0x2000
+#define DCORE0_MME_CTRL_LO_CMD_AGU_OUT1_FROM_AGU0_DW1_4_SHIFT 14
+#define DCORE0_MME_CTRL_LO_CMD_AGU_OUT1_FROM_AGU0_DW1_4_MASK 0x4000
+#define DCORE0_MME_CTRL_LO_CMD_NULL_DESC_SHIFT 15
+#define DCORE0_MME_CTRL_LO_CMD_NULL_DESC_MASK 0x8000
+
+/* DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0 */
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SIGNAL_MASK0_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SIGNAL_MASK0_MASK 0x3F
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SIGNAL_EN0_SHIFT 6
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SIGNAL_EN0_MASK 0x40
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SIGNAL_MASK1_SHIFT 8
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SIGNAL_MASK1_MASK 0x3F00
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SIGNAL_EN1_SHIFT 14
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SIGNAL_EN1_MASK 0x4000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_MASTER_WAIT_SLAVE_FENCE_SHIFT 15
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_MASTER_WAIT_SLAVE_FENCE_MASK 0x8000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_SHIFT 16
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_MASK \
+0x10000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SIGNAL_EN_SHIFT 17
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SIGNAL_EN_MASK 0x20000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_SLV_ADR_SHIFT 18
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_SLV_ADR_MASK 0x40000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_SLV_ADR_SHIFT 19
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_SLV_ADR_MASK 0x80000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_SHIFT 20
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_MASK \
+0x100000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_SHIFT 21
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_MASK \
+0x200000
+
+/* DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0 */
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0 */
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0_SO_VALUE_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0_SO_VALUE_MASK 0x7FFF
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0_SO_RESERVED_SHIFT 15
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0_SO_RESERVED_MASK 0x3FFF8000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0_SO_PERF_EN_SHIFT 30
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0_SO_PERF_EN_MASK 0x40000000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0_SO_OP_SHIFT 31
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0_SO_OP_MASK 0x80000000
+
+/* DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR1 */
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR1_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR1_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1 */
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1_SO_VALUE_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1_SO_VALUE_MASK 0x7FFF
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1_SO_RESERVED_SHIFT 15
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1_SO_RESERVED_MASK 0x3FFF8000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1_SO_PERF_EN_SHIFT 30
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1_SO_PERF_EN_MASK 0x40000000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1_SO_OP_SHIFT 31
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1_SO_OP_MASK 0x80000000
+
+/* DCORE0_MME_CTRL_LO_ARCH_A_SS */
+#define DCORE0_MME_CTRL_LO_ARCH_A_SS_MINUS_1_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ARCH_A_SS_MINUS_1_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_ARCH_B_SS */
+#define DCORE0_MME_CTRL_LO_ARCH_B_SS_MINUS_1_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ARCH_B_SS_MINUS_1_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_ARCH_COUT_SS */
+#define DCORE0_MME_CTRL_LO_ARCH_COUT_SS_MINUS_1_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ARCH_COUT_SS_MINUS_1_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_QM_STALL */
+#define DCORE0_MME_CTRL_LO_QM_STALL_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_QM_STALL_V_MASK 0x1
+
+/* DCORE0_MME_CTRL_LO_LOG_SHADOW_LO */
+#define DCORE0_MME_CTRL_LO_LOG_SHADOW_LO_MASK_0_SHIFT 0
+#define DCORE0_MME_CTRL_LO_LOG_SHADOW_LO_MASK_0_MASK 0x1FF
+#define DCORE0_MME_CTRL_LO_LOG_SHADOW_LO_MASK_1_SHIFT 9
+#define DCORE0_MME_CTRL_LO_LOG_SHADOW_LO_MASK_1_MASK 0x3FE00
+
+/* DCORE0_MME_CTRL_LO_LOG_SHADOW_HI */
+#define DCORE0_MME_CTRL_LO_LOG_SHADOW_HI_MASK_2_SHIFT 0
+#define DCORE0_MME_CTRL_LO_LOG_SHADOW_HI_MASK_2_MASK 0x1FF
+#define DCORE0_MME_CTRL_LO_LOG_SHADOW_HI_MASK_3_SHIFT 9
+#define DCORE0_MME_CTRL_LO_LOG_SHADOW_HI_MASK_3_MASK 0x3FE00
+
+/* DCORE0_MME_CTRL_LO_SYNC_OBJECT_FIFO_TH */
+#define DCORE0_MME_CTRL_LO_SYNC_OBJECT_FIFO_TH_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_SYNC_OBJECT_FIFO_TH_V_MASK 0x1F
+
+/* DCORE0_MME_CTRL_LO_REDUN */
+#define DCORE0_MME_CTRL_LO_REDUN_FMA_SHIFT 0
+#define DCORE0_MME_CTRL_LO_REDUN_FMA_MASK 0x3F
+
+/* DCORE0_MME_CTRL_LO_EUS_LOCAL_FIFO_TH */
+#define DCORE0_MME_CTRL_LO_EUS_LOCAL_FIFO_TH_FIFO0_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EUS_LOCAL_FIFO_TH_FIFO0_MASK 0x1F
+#define DCORE0_MME_CTRL_LO_EUS_LOCAL_FIFO_TH_FIFO1_SHIFT 5
+#define DCORE0_MME_CTRL_LO_EUS_LOCAL_FIFO_TH_FIFO1_MASK 0x3E0
+#define DCORE0_MME_CTRL_LO_EUS_LOCAL_FIFO_TH_FIFO2_SHIFT 10
+#define DCORE0_MME_CTRL_LO_EUS_LOCAL_FIFO_TH_FIFO2_MASK 0x7C00
+
+/* DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0 */
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_MASK 0xFF
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_PE0_SHIFT 8
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_PE0_MASK 0x1F00
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_PE1_SHIFT 13
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_PE1_MASK 0x3E000
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_PE2_SHIFT 18
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_PE2_MASK 0x7C0000
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_PE3_SHIFT 23
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0_FP_PE3_MASK 0xF800000
+
+/* DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW1 */
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW1_FP_PE4_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW1_FP_PE4_MASK 0x1F
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW1_FP_PE_HI_SHIFT 5
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW1_FP_PE_HI_MASK 0x3E0
+
+/* DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F16 */
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F16_DLY_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F16_DLY_MASK 0xFFF
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F16_EN_SHIFT 31
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F16_EN_MASK 0x80000000
+
+/* DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F8 */
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F8_DLY_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F8_DLY_MASK 0xFFF
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F8_EN_SHIFT 31
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F8_EN_MASK 0x80000000
+
+/* DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32 */
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32_DLY_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32_DLY_MASK 0xFFF
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32_EN_SHIFT 31
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32_EN_MASK 0x80000000
+
+/* DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32I */
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32I_DLY_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32I_DLY_MASK 0xFFF
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32I_EN_SHIFT 31
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32I_EN_MASK 0x80000000
+
+/* DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_TF32 */
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_TF32_DLY_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_TF32_DLY_MASK 0xFFF
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_TF32_EN_SHIFT 31
+#define DCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_TF32_EN_MASK 0x80000000
+
+/* DCORE0_MME_CTRL_LO_PCU_RL_DESC0 */
+#define DCORE0_MME_CTRL_LO_PCU_RL_DESC0_RL_RST_TOKEN_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_RL_DESC0_RL_RST_TOKEN_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_RL_DESC0_RL_TIMEOUT_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_RL_DESC0_RL_TIMEOUT_MASK 0xFF0000
+#define DCORE0_MME_CTRL_LO_PCU_RL_DESC0_RL_DUMMY2REAL_PERIOD_SHIFT 24
+#define DCORE0_MME_CTRL_LO_PCU_RL_DESC0_RL_DUMMY2REAL_PERIOD_MASK 0xFF000000
+
+/* DCORE0_MME_CTRL_LO_PCU_RL_TOKEN_UPDATE */
+#define DCORE0_MME_CTRL_LO_PCU_RL_TOKEN_UPDATE_INC_VAL_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_RL_TOKEN_UPDATE_INC_VAL_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_RL_TOKEN_UPDATE_DEC_VAL_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_RL_TOKEN_UPDATE_DEC_VAL_MASK 0xFFFF0000
+
+/* DCORE0_MME_CTRL_LO_PCU_RL_TH */
+#define DCORE0_MME_CTRL_LO_PCU_RL_TH_POOL_TH_DEC_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_RL_TH_POOL_TH_DEC_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_RL_TH_DUMMY_REAL_DIFF_TH_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_RL_TH_DUMMY_REAL_DIFF_TH_MASK 0xFFFF0000
+
+/* DCORE0_MME_CTRL_LO_PCU_RL_MIN */
+#define DCORE0_MME_CTRL_LO_PCU_RL_MIN_AVG_MIN_TO_FORCE_DUMMY_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_RL_MIN_AVG_MIN_TO_FORCE_DUMMY_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_RL_MIN_TOKEN_MIN_VAL_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_RL_MIN_TOKEN_MIN_VAL_MASK 0xFFFF0000
+
+/* DCORE0_MME_CTRL_LO_PCU_RL_CTRL_EN */
+#define DCORE0_MME_CTRL_LO_PCU_RL_CTRL_EN_PCU_DISABLE_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_RL_CTRL_EN_PCU_DISABLE_MASK 0x1
+#define DCORE0_MME_CTRL_LO_PCU_RL_CTRL_EN_MIN_VAL_PROT_EN_SHIFT 1
+#define DCORE0_MME_CTRL_LO_PCU_RL_CTRL_EN_MIN_VAL_PROT_EN_MASK 0x2
+
+/* DCORE0_MME_CTRL_LO_PCU_RL_HISTORY_LOG_SIZE */
+#define DCORE0_MME_CTRL_LO_PCU_RL_HISTORY_LOG_SIZE_ALL_MACS_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_RL_HISTORY_LOG_SIZE_ALL_MACS_MASK 0x7
+#define DCORE0_MME_CTRL_LO_PCU_RL_HISTORY_LOG_SIZE_REAL_MACS_SHIFT 3
+#define DCORE0_MME_CTRL_LO_PCU_RL_HISTORY_LOG_SIZE_REAL_MACS_MASK 0x18
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_A_BF16 */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_BF16_ODD_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_BF16_ODD_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_BF16_EVEN_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_BF16_EVEN_MASK 0xFFFF0000
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_B_BF16 */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_BF16_ODD_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_BF16_ODD_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_BF16_EVEN_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_BF16_EVEN_MASK 0xFFFF0000
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP16 */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP16_ODD_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP16_ODD_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP16_EVEN_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP16_EVEN_MASK 0xFFFF0000
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP16 */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP16_ODD_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP16_ODD_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP16_EVEN_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP16_EVEN_MASK 0xFFFF0000
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_F8 */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_F8_A_VAL_ODD_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_F8_A_VAL_ODD_MASK 0xFF
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_F8_A_VAL_EVEN_SHIFT 8
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_F8_A_VAL_EVEN_MASK 0xFF00
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_F8_B_VAL_ODD_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_F8_B_VAL_ODD_MASK 0xFF0000
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_F8_B_VAL_EVEN_SHIFT 24
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_F8_B_VAL_EVEN_MASK 0xFF000000
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP32_ODD */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP32_ODD_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP32_ODD_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP32_EVEN */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP32_EVEN_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP32_EVEN_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP32_ODD */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP32_ODD_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP32_ODD_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP32_EVEN */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP32_EVEN_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP32_EVEN_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_A_TF32_ODD */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_TF32_ODD_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_TF32_ODD_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_A_TF32_EVEN */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_TF32_EVEN_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_A_TF32_EVEN_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_B_TF32_ODD */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_TF32_ODD_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_TF32_ODD_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_PCU_DUMMY_B_TF32_EVEN */
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_TF32_EVEN_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DUMMY_B_TF32_EVEN_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_PROT */
+#define DCORE0_MME_CTRL_LO_PROT_VALUE_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PROT_VALUE_MASK 0x7
+
+/* DCORE0_MME_CTRL_LO_EU */
+#define DCORE0_MME_CTRL_LO_EU_POWER_SAVE_DISABLE_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EU_POWER_SAVE_DISABLE_MASK 0x1
+#define DCORE0_MME_CTRL_LO_EU_FP_PYR_CLOSE_CGATE_EN_SHIFT 1
+#define DCORE0_MME_CTRL_LO_EU_FP_PYR_CLOSE_CGATE_EN_MASK 0x2
+#define DCORE0_MME_CTRL_LO_EU_FP_CLS_CLOSE_CGATE_EN_SHIFT 2
+#define DCORE0_MME_CTRL_LO_EU_FP_CLS_CLOSE_CGATE_EN_MASK 0x4
+#define DCORE0_MME_CTRL_LO_EU_FP_CLOSE_CGATE_DLY_SHIFT 8
+#define DCORE0_MME_CTRL_LO_EU_FP_CLOSE_CGATE_DLY_MASK 0xFFF00
+#define DCORE0_MME_CTRL_LO_EU_FP_CLOSE_CGATE_ON_DESC_SHIFT 20
+#define DCORE0_MME_CTRL_LO_EU_FP_CLOSE_CGATE_ON_DESC_MASK 0x100000
+#define DCORE0_MME_CTRL_LO_EU_FP_ROLLUP_CDC_STALL_DIS_SHIFT 21
+#define DCORE0_MME_CTRL_LO_EU_FP_ROLLUP_CDC_STALL_DIS_MASK 0x200000
+
+/* DCORE0_MME_CTRL_LO_SBTE */
+#define DCORE0_MME_CTRL_LO_SBTE_CLOSE_CGATE_SHIFT 0
+#define DCORE0_MME_CTRL_LO_SBTE_CLOSE_CGATE_MASK 0x1F
+
+/* DCORE0_MME_CTRL_LO_AGU_SM_INFLIGHT_CNTR */
+#define DCORE0_MME_CTRL_LO_AGU_SM_INFLIGHT_CNTR_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_AGU_SM_INFLIGHT_CNTR_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_AGU_SM_TOTAL_CNTR */
+#define DCORE0_MME_CTRL_LO_AGU_SM_TOTAL_CNTR_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_AGU_SM_TOTAL_CNTR_V_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_PCU_RL_SAT_SEC */
+#define DCORE0_MME_CTRL_LO_PCU_RL_SAT_SEC_VAL_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_RL_SAT_SEC_VAL_MASK 0xFFFFF
+#define DCORE0_MME_CTRL_LO_PCU_RL_SAT_SEC_SEL_SHIFT 31
+#define DCORE0_MME_CTRL_LO_PCU_RL_SAT_SEC_SEL_MASK 0x80000000
+
+/* DCORE0_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN32 */
+#define DCORE0_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN32_V_NMB__SHIFT 0
+#define DCORE0_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN32_V_NMB__MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN33 */
+#define DCORE0_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN33_V_NMB__SHIFT 0
+#define DCORE0_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN33_V_NMB__MASK 0x1
+
+/* DCORE0_MME_CTRL_LO_EU_ISOLATION_DIS */
+#define DCORE0_MME_CTRL_LO_EU_ISOLATION_DIS_FMA_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EU_ISOLATION_DIS_FMA_MASK 0x1
+
+/* DCORE0_MME_CTRL_LO_QM_SLV_CLK_EN */
+#define DCORE0_MME_CTRL_LO_QM_SLV_CLK_EN_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_QM_SLV_CLK_EN_V_MASK 0x1
+
+/* DCORE0_MME_CTRL_LO_HBW_CLK_ENABLER_DIS */
+#define DCORE0_MME_CTRL_LO_HBW_CLK_ENABLER_DIS_AXI_SHIFT 0
+#define DCORE0_MME_CTRL_LO_HBW_CLK_ENABLER_DIS_AXI_MASK 0x1
+#define DCORE0_MME_CTRL_LO_HBW_CLK_ENABLER_DIS_APB_SHIFT 1
+#define DCORE0_MME_CTRL_LO_HBW_CLK_ENABLER_DIS_APB_MASK 0x2
+
+/* DCORE0_MME_CTRL_LO_AGU */
+#define DCORE0_MME_CTRL_LO_AGU_COUT_H_FROM_SPATIAL_LOOP_SHIFT 0
+#define DCORE0_MME_CTRL_LO_AGU_COUT_H_FROM_SPATIAL_LOOP_MASK 0x1
+
+/* DCORE0_MME_CTRL_LO_QM */
+#define DCORE0_MME_CTRL_LO_QM_STOP_ON_SBTE_ERR_SHIFT 0
+#define DCORE0_MME_CTRL_LO_QM_STOP_ON_SBTE_ERR_MASK 0x1
+#define DCORE0_MME_CTRL_LO_QM_EXT_ADDR_ERR_EN_SHIFT 1
+#define DCORE0_MME_CTRL_LO_QM_EXT_ADDR_ERR_EN_MASK 0x2
+
+/* DCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS */
+#define DCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS_AGU_COUT0_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS_AGU_COUT0_MASK 0xF
+#define DCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS_AGU_COUT1_SHIFT 4
+#define DCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS_AGU_COUT1_MASK 0xF0
+#define DCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS_AP_BRAIN_SHIFT 8
+#define DCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS_AP_BRAIN_MASK 0xF00
+#define DCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS_EU_BRAIN_SHIFT 12
+#define DCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS_EU_BRAIN_MASK 0xF000
+
+/* DCORE0_MME_CTRL_LO_INTR_CAUSE */
+#define DCORE0_MME_CTRL_LO_INTR_CAUSE_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_INTR_CAUSE_V_MASK 0xFFFF
+
+/* DCORE0_MME_CTRL_LO_INTR_MASK */
+#define DCORE0_MME_CTRL_LO_INTR_MASK_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_INTR_MASK_V_MASK 0x3FFFFF
+
+/* DCORE0_MME_CTRL_LO_INTR_CLEAR */
+#define DCORE0_MME_CTRL_LO_INTR_CLEAR_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_INTR_CLEAR_V_MASK 0xFFFF
+
+/* DCORE0_MME_CTRL_LO_REDUN_PSOC_SEL_SEC */
+#define DCORE0_MME_CTRL_LO_REDUN_PSOC_SEL_SEC_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_REDUN_PSOC_SEL_SEC_V_MASK 0x1
+
+/* DCORE0_MME_CTRL_LO_BIST */
+#define DCORE0_MME_CTRL_LO_BIST_FUNC_MODE_SHIFT 0
+#define DCORE0_MME_CTRL_LO_BIST_FUNC_MODE_MASK 0x1
+#define DCORE0_MME_CTRL_LO_BIST_APB_SW_MODE_SHIFT 1
+#define DCORE0_MME_CTRL_LO_BIST_APB_SW_MODE_MASK 0x2
+
+/* DCORE0_MME_CTRL_LO_EU_RL_ENABLE */
+#define DCORE0_MME_CTRL_LO_EU_RL_ENABLE_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EU_RL_ENABLE_V_MASK 0x1
+
+/* DCORE0_MME_CTRL_LO_EU_RL_TOKEN_SEL */
+#define DCORE0_MME_CTRL_LO_EU_RL_TOKEN_SEL_STAT_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EU_RL_TOKEN_SEL_STAT_MASK 0x1
+
+/* DCORE0_MME_CTRL_LO_EU_RL_CFG */
+#define DCORE0_MME_CTRL_LO_EU_RL_CFG_RST_TOKEN_SHIFT 0
+#define DCORE0_MME_CTRL_LO_EU_RL_CFG_RST_TOKEN_MASK 0xFF
+#define DCORE0_MME_CTRL_LO_EU_RL_CFG_TIMEOUT_SHIFT 8
+#define DCORE0_MME_CTRL_LO_EU_RL_CFG_TIMEOUT_MASK 0xFF00
+#define DCORE0_MME_CTRL_LO_EU_RL_CFG_SATURATION_SHIFT 16
+#define DCORE0_MME_CTRL_LO_EU_RL_CFG_SATURATION_MASK 0xFF0000
+#define DCORE0_MME_CTRL_LO_EU_RL_CFG_DATA_SIZE_SHIFT 24
+#define DCORE0_MME_CTRL_LO_EU_RL_CFG_DATA_SIZE_MASK 0xFF000000
+
+/* DCORE0_MME_CTRL_LO_PCU_DBG_DW0 */
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW0_FSM_STATE_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW0_FSM_STATE_MASK 0x1
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW0_REAL_POOL_TOKENS_SHIFT 8
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW0_REAL_POOL_TOKENS_MASK 0xFFFFF00
+
+/* DCORE0_MME_CTRL_LO_PCU_DBG_DW1 */
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW1_ALL_POOL_TOKENS_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW1_ALL_POOL_TOKENS_MASK 0xFFFFF
+
+/* DCORE0_MME_CTRL_LO_PCU_DBG_DW2 */
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW2_BUBBLE_CYC_CNTR_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW2_BUBBLE_CYC_CNTR_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW2_DUMMY_CYC_CNTR_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW2_DUMMY_CYC_CNTR_MASK 0xFFFF0000
+
+/* DCORE0_MME_CTRL_LO_PCU_DBG_DW3 */
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW3_REAL_MACS_HISTORY_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW3_REAL_MACS_HISTORY_MASK 0xFFFF
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW3_ALL_MACS_HISTORY_SHIFT 16
+#define DCORE0_MME_CTRL_LO_PCU_DBG_DW3_ALL_MACS_HISTORY_MASK 0xFFFF0000
+
+/* DCORE0_MME_CTRL_LO_PCU_DBG_WKL_ID */
+#define DCORE0_MME_CTRL_LO_PCU_DBG_WKL_ID_B_SHIFT 0
+#define DCORE0_MME_CTRL_LO_PCU_DBG_WKL_ID_B_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_CTRL_LO_ETF_MEM_WRAP_RM */
+#define DCORE0_MME_CTRL_LO_ETF_MEM_WRAP_RM_V_SHIFT 0
+#define DCORE0_MME_CTRL_LO_ETF_MEM_WRAP_RM_V_MASK 0x3FFFFFFF
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h
new file mode 100644
index 000000000000..a51617a6f1fb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_MME_AXUSER_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_MME_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO_MME_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_ASID 0x40CBE00
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_MMU_BP 0x40CBE04
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_STRONG_ORDER 0x40CBE08
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_NO_SNOOP 0x40CBE0C
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_WR_REDUCTION 0x40CBE10
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_RD_ATOMIC 0x40CBE14
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_QOS 0x40CBE18
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_RSVD 0x40CBE1C
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_EMEM_CPAGE 0x40CBE20
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_CORE 0x40CBE24
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_E2E_COORD 0x40CBE28
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_WR_OVRD_LO 0x40CBE30
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_WR_OVRD_HI 0x40CBE34
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_RD_OVRD_LO 0x40CBE38
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_RD_OVRD_HI 0x40CBE3C
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_LB_COORD 0x40CBE40
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_LB_LOCK 0x40CBE44
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_LB_RSVD 0x40CBE48
+
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_LB_OVRD 0x40CBE4C
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_MME_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h
new file mode 100644
index 000000000000..1b91c9c13132
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_CTRL_LO_REGS_H_
+#define ASIC_REG_DCORE0_MME_CTRL_LO_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_CTRL_LO
+ * (Prototype: MME_CTRL_LO)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_STATUS 0x40CB000
+
+#define mmDCORE0_MME_CTRL_LO_CMD 0x40CB004
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0 0x40CB148
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0 0x40CB14C
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0 0x40CB150
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR1 0x40CB154
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1 0x40CB158
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_A_SS 0x40CB224
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_B_SS 0x40CB228
+
+#define mmDCORE0_MME_CTRL_LO_ARCH_COUT_SS 0x40CB27C
+
+#define mmDCORE0_MME_CTRL_LO_QM_STALL 0x40CB400
+
+#define mmDCORE0_MME_CTRL_LO_LOG_SHADOW_LO 0x40CB404
+
+#define mmDCORE0_MME_CTRL_LO_LOG_SHADOW_HI 0x40CB408
+
+#define mmDCORE0_MME_CTRL_LO_SYNC_OBJECT_FIFO_TH 0x40CB40C
+
+#define mmDCORE0_MME_CTRL_LO_REDUN 0x40CB410
+
+#define mmDCORE0_MME_CTRL_LO_EUS_LOCAL_FIFO_TH 0x40CB414
+
+#define mmDCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0 0x40CB418
+
+#define mmDCORE0_MME_CTRL_LO_EUS_ROLLUP_DLY_DW1 0x40CB41C
+
+#define mmDCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F16 0x40CB420
+
+#define mmDCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F8 0x40CB424
+
+#define mmDCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32 0x40CB428
+
+#define mmDCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32I 0x40CB42C
+
+#define mmDCORE0_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_TF32 0x40CB430
+
+#define mmDCORE0_MME_CTRL_LO_PCU_RL_DESC0 0x40CB434
+
+#define mmDCORE0_MME_CTRL_LO_PCU_RL_TOKEN_UPDATE 0x40CB438
+
+#define mmDCORE0_MME_CTRL_LO_PCU_RL_TH 0x40CB43C
+
+#define mmDCORE0_MME_CTRL_LO_PCU_RL_MIN 0x40CB440
+
+#define mmDCORE0_MME_CTRL_LO_PCU_RL_CTRL_EN 0x40CB444
+
+#define mmDCORE0_MME_CTRL_LO_PCU_RL_HISTORY_LOG_SIZE 0x40CB448
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_A_BF16 0x40CB44C
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_B_BF16 0x40CB450
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP16 0x40CB454
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP16 0x40CB458
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_F8 0x40CB45C
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP32_ODD 0x40CB460
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_A_FP32_EVEN 0x40CB464
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP32_ODD 0x40CB468
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_B_FP32_EVEN 0x40CB46C
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_A_TF32_ODD 0x40CB470
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_A_TF32_EVEN 0x40CB474
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_B_TF32_ODD 0x40CB478
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DUMMY_B_TF32_EVEN 0x40CB47C
+
+#define mmDCORE0_MME_CTRL_LO_PROT 0x40CB480
+
+#define mmDCORE0_MME_CTRL_LO_EU 0x40CB484
+
+#define mmDCORE0_MME_CTRL_LO_SBTE 0x40CB488
+
+#define mmDCORE0_MME_CTRL_LO_AGU_SM_INFLIGHT_CNTR 0x40CB48C
+
+#define mmDCORE0_MME_CTRL_LO_AGU_SM_TOTAL_CNTR 0x40CB490
+
+#define mmDCORE0_MME_CTRL_LO_PCU_RL_SAT_SEC 0x40CB494
+
+#define mmDCORE0_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN32 0x40CB498
+
+#define mmDCORE0_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN33 0x40CB49C
+
+#define mmDCORE0_MME_CTRL_LO_EU_ISOLATION_DIS 0x40CB4A0
+
+#define mmDCORE0_MME_CTRL_LO_QM_SLV_CLK_EN 0x40CB4A4
+
+#define mmDCORE0_MME_CTRL_LO_HBW_CLK_ENABLER_DIS 0x40CB4A8
+
+#define mmDCORE0_MME_CTRL_LO_AGU 0x40CB4AC
+
+#define mmDCORE0_MME_CTRL_LO_QM 0x40CB4B0
+
+#define mmDCORE0_MME_CTRL_LO_EARLY_RELEASE_STATUS 0x40CB4B4
+
+#define mmDCORE0_MME_CTRL_LO_INTR_CAUSE 0x40CB4B8
+
+#define mmDCORE0_MME_CTRL_LO_INTR_MASK 0x40CB4BC
+
+#define mmDCORE0_MME_CTRL_LO_INTR_CLEAR 0x40CB4C0
+
+#define mmDCORE0_MME_CTRL_LO_REDUN_PSOC_SEL_SEC 0x40CB4C4
+
+#define mmDCORE0_MME_CTRL_LO_BIST 0x40CB4C8
+
+#define mmDCORE0_MME_CTRL_LO_EU_RL_ENABLE 0x40CB4CC
+
+#define mmDCORE0_MME_CTRL_LO_EU_RL_TOKEN_SEL 0x40CB4D0
+
+#define mmDCORE0_MME_CTRL_LO_EU_RL_CFG 0x40CB4D4
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DBG_DW0 0x40CB4D8
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DBG_DW1 0x40CB4DC
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DBG_DW2 0x40CB4E0
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DBG_DW3 0x40CB4E4
+
+#define mmDCORE0_MME_CTRL_LO_PCU_DBG_WKL_ID 0x40CB4E8
+
+#define mmDCORE0_MME_CTRL_LO_ETF_MEM_WRAP_RM 0x40CB4EC
+
+#endif /* ASIC_REG_DCORE0_MME_CTRL_LO_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h
new file mode 100644
index 000000000000..f702fe6e9365
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_QM_ARC_ACP_ENG_REGS_H_
+#define ASIC_REG_DCORE0_MME_QM_ARC_ACP_ENG_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_QM_ARC_ACP_ENG
+ * (Prototype: ARC_ACP_ENG)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_0 0x40CF000
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_1 0x40CF004
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_2 0x40CF008
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_3 0x40CF00C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_4 0x40CF010
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_5 0x40CF014
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_6 0x40CF018
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_7 0x40CF01C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_8 0x40CF020
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_9 0x40CF024
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_10 0x40CF028
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_11 0x40CF02C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_12 0x40CF030
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_13 0x40CF034
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_14 0x40CF038
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_15 0x40CF03C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_16 0x40CF040
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_17 0x40CF044
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_18 0x40CF048
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_19 0x40CF04C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_20 0x40CF050
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_21 0x40CF054
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_22 0x40CF058
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_23 0x40CF05C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_24 0x40CF060
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_25 0x40CF064
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_26 0x40CF068
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_27 0x40CF06C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_28 0x40CF070
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_29 0x40CF074
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_30 0x40CF078
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_31 0x40CF07C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_32 0x40CF080
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_33 0x40CF084
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_34 0x40CF088
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_35 0x40CF08C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_36 0x40CF090
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_37 0x40CF094
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_38 0x40CF098
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_39 0x40CF09C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_40 0x40CF0A0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_41 0x40CF0A4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_42 0x40CF0A8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_43 0x40CF0AC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_44 0x40CF0B0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_45 0x40CF0B4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_46 0x40CF0B8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_47 0x40CF0BC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_48 0x40CF0C0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_49 0x40CF0C4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_50 0x40CF0C8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_51 0x40CF0CC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_52 0x40CF0D0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_53 0x40CF0D4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_54 0x40CF0D8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_55 0x40CF0DC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_56 0x40CF0E0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_57 0x40CF0E4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_58 0x40CF0E8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_59 0x40CF0EC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_60 0x40CF0F0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_61 0x40CF0F4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_62 0x40CF0F8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_63 0x40CF0FC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_0 0x40CF100
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_1 0x40CF104
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_2 0x40CF108
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_3 0x40CF10C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_4 0x40CF110
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_5 0x40CF114
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_6 0x40CF118
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_7 0x40CF11C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_8 0x40CF120
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_9 0x40CF124
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_10 0x40CF128
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_11 0x40CF12C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_12 0x40CF130
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_13 0x40CF134
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_14 0x40CF138
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_15 0x40CF13C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_16 0x40CF140
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_17 0x40CF144
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_18 0x40CF148
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_19 0x40CF14C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_20 0x40CF150
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_21 0x40CF154
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_22 0x40CF158
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_23 0x40CF15C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_24 0x40CF160
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_25 0x40CF164
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_26 0x40CF168
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_27 0x40CF16C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_28 0x40CF170
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_29 0x40CF174
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_30 0x40CF178
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_31 0x40CF17C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_32 0x40CF180
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_33 0x40CF184
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_34 0x40CF188
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_35 0x40CF18C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_36 0x40CF190
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_37 0x40CF194
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_38 0x40CF198
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_39 0x40CF19C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_40 0x40CF1A0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_41 0x40CF1A4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_42 0x40CF1A8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_43 0x40CF1AC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_44 0x40CF1B0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_45 0x40CF1B4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_46 0x40CF1B8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_47 0x40CF1BC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_48 0x40CF1C0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_49 0x40CF1C4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_50 0x40CF1C8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_51 0x40CF1CC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_52 0x40CF1D0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_53 0x40CF1D4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_54 0x40CF1D8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_55 0x40CF1DC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_56 0x40CF1E0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_57 0x40CF1E4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_58 0x40CF1E8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_59 0x40CF1EC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_60 0x40CF1F0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_61 0x40CF1F4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_62 0x40CF1F8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_CI_REG_63 0x40CF1FC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_0 0x40CF200
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_1 0x40CF204
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_2 0x40CF208
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_3 0x40CF20C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_4 0x40CF210
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_5 0x40CF214
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_6 0x40CF218
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_7 0x40CF21C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_8 0x40CF220
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_9 0x40CF224
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_10 0x40CF228
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_11 0x40CF22C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_12 0x40CF230
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_13 0x40CF234
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_14 0x40CF238
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_15 0x40CF23C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_16 0x40CF240
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_17 0x40CF244
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_18 0x40CF248
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_19 0x40CF24C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_20 0x40CF250
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_21 0x40CF254
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_22 0x40CF258
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_23 0x40CF25C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_24 0x40CF260
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_25 0x40CF264
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_26 0x40CF268
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_27 0x40CF26C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_28 0x40CF270
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_29 0x40CF274
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_30 0x40CF278
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_31 0x40CF27C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_32 0x40CF280
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_33 0x40CF284
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_34 0x40CF288
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_35 0x40CF28C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_36 0x40CF290
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_37 0x40CF294
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_38 0x40CF298
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_39 0x40CF29C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_40 0x40CF2A0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_41 0x40CF2A4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_42 0x40CF2A8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_43 0x40CF2AC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_44 0x40CF2B0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_45 0x40CF2B4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_46 0x40CF2B8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_47 0x40CF2BC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_48 0x40CF2C0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_49 0x40CF2C4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_50 0x40CF2C8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_51 0x40CF2CC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_52 0x40CF2D0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_53 0x40CF2D4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_54 0x40CF2D8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_55 0x40CF2DC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_56 0x40CF2E0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_57 0x40CF2E4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_58 0x40CF2E8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_59 0x40CF2EC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_60 0x40CF2F0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_61 0x40CF2F4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_62 0x40CF2F8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PR_REG_63 0x40CF2FC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_0 0x40CF300
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_1 0x40CF304
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_2 0x40CF308
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_3 0x40CF30C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_4 0x40CF310
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_5 0x40CF314
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_6 0x40CF318
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_7 0x40CF31C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_8 0x40CF320
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_9 0x40CF324
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_10 0x40CF328
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_11 0x40CF32C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_12 0x40CF330
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_13 0x40CF334
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_14 0x40CF338
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_15 0x40CF33C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_16 0x40CF340
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_17 0x40CF344
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_18 0x40CF348
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_19 0x40CF34C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_20 0x40CF350
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_21 0x40CF354
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_22 0x40CF358
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_23 0x40CF35C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_24 0x40CF360
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_25 0x40CF364
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_26 0x40CF368
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_27 0x40CF36C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_28 0x40CF370
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_29 0x40CF374
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_30 0x40CF378
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_31 0x40CF37C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_32 0x40CF380
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_33 0x40CF384
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_34 0x40CF388
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_35 0x40CF38C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_36 0x40CF390
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_37 0x40CF394
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_38 0x40CF398
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_39 0x40CF39C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_40 0x40CF3A0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_41 0x40CF3A4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_42 0x40CF3A8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_43 0x40CF3AC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_44 0x40CF3B0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_45 0x40CF3B4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_46 0x40CF3B8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_47 0x40CF3BC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_48 0x40CF3C0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_49 0x40CF3C4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_50 0x40CF3C8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_51 0x40CF3CC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_52 0x40CF3D0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_53 0x40CF3D4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_54 0x40CF3D8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_55 0x40CF3DC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_56 0x40CF3E0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_57 0x40CF3E4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_58 0x40CF3E8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_59 0x40CF3EC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_60 0x40CF3F0
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_61 0x40CF3F4
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_62 0x40CF3F8
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_MK_REG_63 0x40CF3FC
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_SELECTED_QUEUE_ID 0x40CF400
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_GRANTS_WEIGHT_PRIO_0 0x40CF404
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_GRANTS_WEIGHT_PRIO_1 0x40CF408
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_GRANTS_WEIGHT_PRIO_2 0x40CF40C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_GRANTS_COUNTER_PRIO_0 0x40CF410
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_GRANTS_COUNTER_PRIO_1 0x40CF414
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_GRANTS_COUNTER_PRIO_2 0x40CF418
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_0 0x40CF41C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_1 0x40CF420
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_2 0x40CF424
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_3 0x40CF428
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_PRIO_RD_CNT_0 0x40CF42C
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_PRIO_RD_CNT_1 0x40CF430
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_PRIO_RD_CNT_2 0x40CF434
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_PRIO_RD_CNT_3 0x40CF438
+
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_REG 0x40CF43C
+
+#endif /* ASIC_REG_DCORE0_MME_QM_ARC_ACP_ENG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h
new file mode 100644
index 000000000000..917f8ab88373
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_QM_ARC_AUX_REGS_H_
+#define ASIC_REG_DCORE0_MME_QM_ARC_AUX_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_QM_ARC_AUX
+ * (Prototype: QMAN_ARC_AUX)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_QM_ARC_AUX_RUN_HALT_REQ 0x40C8100
+
+#define mmDCORE0_MME_QM_ARC_AUX_RUN_HALT_ACK 0x40C8104
+
+#define mmDCORE0_MME_QM_ARC_AUX_RST_VEC_ADDR 0x40C8108
+
+#define mmDCORE0_MME_QM_ARC_AUX_DBG_MODE 0x40C810C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CLUSTER_NUM 0x40C8110
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_NUM 0x40C8114
+
+#define mmDCORE0_MME_QM_ARC_AUX_WAKE_UP_EVENT 0x40C8118
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_SYS_ADDR_BASE 0x40C811C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CTI_AP_STS 0x40C8120
+
+#define mmDCORE0_MME_QM_ARC_AUX_CTI_CFG_MUX_SEL 0x40C8124
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_RST 0x40C8128
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_RST_REQ 0x40C812C
+
+#define mmDCORE0_MME_QM_ARC_AUX_SRAM_LSB_ADDR 0x40C8130
+
+#define mmDCORE0_MME_QM_ARC_AUX_SRAM_MSB_ADDR 0x40C8134
+
+#define mmDCORE0_MME_QM_ARC_AUX_PCIE_LSB_ADDR 0x40C8138
+
+#define mmDCORE0_MME_QM_ARC_AUX_PCIE_MSB_ADDR 0x40C813C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_LSB_ADDR 0x40C8140
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_MSB_ADDR 0x40C8144
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM0_LSB_ADDR 0x40C8150
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM0_MSB_ADDR 0x40C8154
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM1_LSB_ADDR 0x40C8158
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM1_MSB_ADDR 0x40C815C
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM2_LSB_ADDR 0x40C8160
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM2_MSB_ADDR 0x40C8164
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM3_LSB_ADDR 0x40C8168
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM3_MSB_ADDR 0x40C816C
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM0_OFFSET 0x40C8170
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM1_OFFSET 0x40C8174
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM2_OFFSET 0x40C8178
+
+#define mmDCORE0_MME_QM_ARC_AUX_HBM3_OFFSET 0x40C817C
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_0 0x40C8180
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_1 0x40C8184
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_2 0x40C8188
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_3 0x40C818C
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_4 0x40C8190
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_5 0x40C8194
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_6 0x40C8198
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_0 0x40C819C
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_1 0x40C81A0
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_2 0x40C81A4
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_3 0x40C81A8
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_4 0x40C81AC
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_5 0x40C81B0
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_6 0x40C81B4
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_CBU_AWCACHE_OVR 0x40C81B8
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_LBU_AWCACHE_OVR 0x40C81BC
+
+#define mmDCORE0_MME_QM_ARC_AUX_CONTEXT_ID_0 0x40C81C0
+
+#define mmDCORE0_MME_QM_ARC_AUX_CONTEXT_ID_1 0x40C81C4
+
+#define mmDCORE0_MME_QM_ARC_AUX_CONTEXT_ID_2 0x40C81C8
+
+#define mmDCORE0_MME_QM_ARC_AUX_CONTEXT_ID_3 0x40C81CC
+
+#define mmDCORE0_MME_QM_ARC_AUX_CONTEXT_ID_4 0x40C81D0
+
+#define mmDCORE0_MME_QM_ARC_AUX_CONTEXT_ID_5 0x40C81D4
+
+#define mmDCORE0_MME_QM_ARC_AUX_CONTEXT_ID_6 0x40C81D8
+
+#define mmDCORE0_MME_QM_ARC_AUX_CONTEXT_ID_7 0x40C81DC
+
+#define mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_0 0x40C81E0
+
+#define mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_1 0x40C81E4
+
+#define mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_2 0x40C81E8
+
+#define mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_3 0x40C81EC
+
+#define mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_4 0x40C81F0
+
+#define mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_5 0x40C81F4
+
+#define mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_6 0x40C81F8
+
+#define mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_7 0x40C81FC
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_0 0x40C8200
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_1 0x40C8204
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_2 0x40C8208
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_3 0x40C820C
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_4 0x40C8210
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_5 0x40C8214
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_6 0x40C8218
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_7 0x40C821C
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_8 0x40C8220
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_9 0x40C8224
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_10 0x40C8228
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_11 0x40C822C
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_12 0x40C8230
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_13 0x40C8234
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_14 0x40C8238
+
+#define mmDCORE0_MME_QM_ARC_AUX_SW_INTR_15 0x40C823C
+
+#define mmDCORE0_MME_QM_ARC_AUX_IRQ_INTR_MASK_0 0x40C8280
+
+#define mmDCORE0_MME_QM_ARC_AUX_IRQ_INTR_MASK_1 0x40C8284
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_SEI_INTR_STS 0x40C8290
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_SEI_INTR_CLR 0x40C8294
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_SEI_INTR_MASK 0x40C8298
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_EXCPTN_CAUSE 0x40C829C
+
+#define mmDCORE0_MME_QM_ARC_AUX_SEI_INTR_HALT_EN 0x40C82A0
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_SEI_INTR_HALT_MASK 0x40C82A4
+
+#define mmDCORE0_MME_QM_ARC_AUX_QMAN_SEI_INTR_HALT_MASK 0x40C82A8
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REI_INTR_STS 0x40C82B0
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REI_INTR_CLR 0x40C82B4
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REI_INTR_MASK 0x40C82B8
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_ECC_ERR_ADDR 0x40C82BC
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_ECC_SYNDROME 0x40C82C0
+
+#define mmDCORE0_MME_QM_ARC_AUX_I_CACHE_ECC_ERR_ADDR 0x40C82C4
+
+#define mmDCORE0_MME_QM_ARC_AUX_I_CACHE_ECC_SYNDROME 0x40C82C8
+
+#define mmDCORE0_MME_QM_ARC_AUX_D_CACHE_ECC_ERR_ADDR 0x40C82CC
+
+#define mmDCORE0_MME_QM_ARC_AUX_D_CACHE_ECC_SYNDROME 0x40C82D0
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBW_TRMINATE_AWADDR_ERR 0x40C82E0
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBW_TRMINATE_ARADDR_ERR 0x40C82E4
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_LBW_TERMINATE_BRESP 0x40C82E8
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_LBW_TERMINATE_RRESP 0x40C82EC
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_LBW_TERMINATE_AXLEN 0x40C82F0
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_LBW_TERMINATE_AXSIZE 0x40C82F4
+
+#define mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_0 0x40C8300
+
+#define mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_1 0x40C8304
+
+#define mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_2 0x40C8308
+
+#define mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_3 0x40C830C
+
+#define mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_4 0x40C8310
+
+#define mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_5 0x40C8314
+
+#define mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_6 0x40C8318
+
+#define mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_7 0x40C831C
+
+#define mmDCORE0_MME_QM_ARC_AUX_TOTAL_CBU_WR_CNT 0x40C8320
+
+#define mmDCORE0_MME_QM_ARC_AUX_INFLIGHT_CBU_WR_CNT 0x40C8324
+
+#define mmDCORE0_MME_QM_ARC_AUX_TOTAL_CBU_RD_CNT 0x40C8328
+
+#define mmDCORE0_MME_QM_ARC_AUX_INFLIGHT_CBU_RD_CNT 0x40C832C
+
+#define mmDCORE0_MME_QM_ARC_AUX_TOTAL_LBU_WR_CNT 0x40C8330
+
+#define mmDCORE0_MME_QM_ARC_AUX_INFLIGHT_LBU_WR_CNT 0x40C8334
+
+#define mmDCORE0_MME_QM_ARC_AUX_TOTAL_LBU_RD_CNT 0x40C8338
+
+#define mmDCORE0_MME_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT 0x40C833C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_ARUSER_OVR 0x40C8350
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_ARUSER_OVR_EN 0x40C8354
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_AWUSER_OVR 0x40C8358
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_AWUSER_OVR_EN 0x40C835C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_ARUSER_MSB_OVR 0x40C8360
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_ARUSER_MSB_OVR_EN 0x40C8364
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_AWUSER_MSB_OVR 0x40C8368
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_AWUSER_MSB_OVR_EN 0x40C836C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_AXCACHE_OVR 0x40C8370
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_LOCK_OVR 0x40C8374
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_PROT_OVR 0x40C8378
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_MAX_OUTSTANDING 0x40C837C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_EARLY_BRESP_EN 0x40C8380
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORCE_RSP_OK 0x40C8384
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_NO_WR_INFLIGHT 0x40C838C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_SEI_INTR_ID 0x40C8390
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_ARUSER_OVR 0x40C8400
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_ARUSER_OVR_EN 0x40C8404
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_AWUSER_OVR 0x40C8408
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_AWUSER_OVR_EN 0x40C840C
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_AXCACHE_OVR 0x40C8420
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_LOCK_OVR 0x40C8424
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_PROT_OVR 0x40C8428
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_MAX_OUTSTANDING 0x40C842C
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_EARLY_BRESP_EN 0x40C8430
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_FORCE_RSP_OK 0x40C8434
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_NO_WR_INFLIGHT 0x40C843C
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBU_SEI_INTR_ID 0x40C8440
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0 0x40C8500
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_1 0x40C8504
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_2 0x40C8508
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_3 0x40C850C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_4 0x40C8510
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_5 0x40C8514
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_6 0x40C8518
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_7 0x40C851C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_SIZE_0 0x40C8520
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_SIZE_1 0x40C8524
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_SIZE_2 0x40C8528
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_SIZE_3 0x40C852C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_SIZE_4 0x40C8530
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_SIZE_5 0x40C8534
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_SIZE_6 0x40C8538
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_SIZE_7 0x40C853C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PI_0 0x40C8540
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PI_1 0x40C8544
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PI_2 0x40C8548
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PI_3 0x40C854C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PI_4 0x40C8550
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PI_5 0x40C8554
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PI_6 0x40C8558
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PI_7 0x40C855C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_CI_0 0x40C8560
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_CI_1 0x40C8564
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_CI_2 0x40C8568
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_CI_3 0x40C856C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_CI_4 0x40C8570
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_CI_5 0x40C8574
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_CI_6 0x40C8578
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_CI_7 0x40C857C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_0 0x40C8580
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_1 0x40C8584
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_2 0x40C8588
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_3 0x40C858C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_4 0x40C8590
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_5 0x40C8594
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_6 0x40C8598
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_7 0x40C859C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_0 0x40C85A0
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_1 0x40C85A4
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_2 0x40C85A8
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_3 0x40C85AC
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_4 0x40C85B0
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_5 0x40C85B4
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_6 0x40C85B8
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_7 0x40C85BC
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_0 0x40C85C0
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_1 0x40C85C4
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_2 0x40C85C8
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_3 0x40C85CC
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_4 0x40C85D0
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_5 0x40C85D4
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_6 0x40C85D8
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_7 0x40C85DC
+
+#define mmDCORE0_MME_QM_ARC_AUX_GENERAL_Q_VLD_ENTRY_MASK 0x40C85E0
+
+#define mmDCORE0_MME_QM_ARC_AUX_NIC_Q_VLD_ENTRY_MASK 0x40C85E4
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_DROP_EN 0x40C8620
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_WARN_MSG 0x40C8624
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG 0x40C8628
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_GEN_AXI_AWPROT 0x40C8630
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_GEN_AXI_AWUSER 0x40C8634
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_GEN_AXI_AWBURST 0x40C8638
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_GEN_AXI_AWLOCK 0x40C863C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_GEN_AXI_AWCACHE 0x40C8640
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_WRR_ARB_WEIGHT 0x40C8644
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG 0x40C8648
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT 0x40C864C
+
+#define mmDCORE0_MME_QM_ARC_AUX_QMAN_CQ_IFIFO_SHADOW_CI 0x40C8650
+
+#define mmDCORE0_MME_QM_ARC_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI 0x40C8654
+
+#define mmDCORE0_MME_QM_ARC_AUX_QMAN_CQ_SHADOW_CI 0x40C8658
+
+#define mmDCORE0_MME_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI 0x40C865C
+
+#define mmDCORE0_MME_QM_ARC_AUX_AUX2APB_PROT 0x40C8700
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBW_FORK_WIN_EN 0x40C8704
+
+#define mmDCORE0_MME_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR0 0x40C8708
+
+#define mmDCORE0_MME_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK0 0x40C870C
+
+#define mmDCORE0_MME_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR1 0x40C8710
+
+#define mmDCORE0_MME_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK1 0x40C8714
+
+#define mmDCORE0_MME_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR0 0x40C8718
+
+#define mmDCORE0_MME_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK0 0x40C871C
+
+#define mmDCORE0_MME_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR1 0x40C8720
+
+#define mmDCORE0_MME_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK1 0x40C8724
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR0 0x40C8728
+
+#define mmDCORE0_MME_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR1 0x40C872C
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_ACC_ENGS_LBW_FORK_MASK 0x40C8730
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_DUP_ENG_LBW_FORK_ADDR 0x40C8734
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_ACP_ENG_LBW_FORK_ADDR 0x40C8738
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR 0x40C873C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_WIN_EN 0x40C8740
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_LSB 0x40C8750
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_MSB 0x40C8754
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_LSB 0x40C8758
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_MSB 0x40C875C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_LSB 0x40C8760
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_MSB 0x40C8764
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_LSB 0x40C8768
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_MSB 0x40C876C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_LSB 0x40C8770
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_MSB 0x40C8774
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_LSB 0x40C8778
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_MSB 0x40C877C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_LSB 0x40C8780
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_MSB 0x40C8784
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_LSB 0x40C8788
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_MSB 0x40C878C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_TRMINATE_ARADDR_LSB 0x40C8790
+
+#define mmDCORE0_MME_QM_ARC_AUX_CBU_TRMINATE_ARADDR_MSB 0x40C8794
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_CBU_TERMINATE_BRESP 0x40C8798
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_CBU_TERMINATE_RRESP 0x40C879C
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_0 0x40C8800
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_1 0x40C8804
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_2 0x40C8808
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_3 0x40C880C
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_4 0x40C8810
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_5 0x40C8814
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_6 0x40C8818
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_7 0x40C881C
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_8 0x40C8820
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_9 0x40C8824
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_10 0x40C8828
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_11 0x40C882C
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_12 0x40C8830
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_13 0x40C8834
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_14 0x40C8838
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_REGION_CFG_15 0x40C883C
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_TRMINATE_AWADDR_ERR 0x40C8840
+
+#define mmDCORE0_MME_QM_ARC_AUX_DCCM_TRMINATE_ARADDR_ERR 0x40C8844
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_DCCM_TERMINATE_BRESP 0x40C8848
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_DCCM_TERMINATE_RRESP 0x40C884C
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_DCCM_TERMINATE_EN 0x40C8850
+
+#define mmDCORE0_MME_QM_ARC_AUX_CFG_DCCM_SECURE_REGION 0x40C8854
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT 0x40C8900
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_AXI_ORDERING_CTL 0x40C8904
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR_MSK 0x40C8908
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR 0x40C890C
+
+#define mmDCORE0_MME_QM_ARC_AUX_ARC_ACC_ENGS_BUSER 0x40C8910
+
+#define mmDCORE0_MME_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN 0x40C8920
+
+#endif /* ASIC_REG_DCORE0_MME_QM_ARC_AUX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h
new file mode 100644
index 000000000000..c7ebaf73c51e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_QM_ARC_DUP_ENG_AXUSER_REGS_H_
+#define ASIC_REG_DCORE0_MME_QM_ARC_DUP_ENG_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_QM_ARC_DUP_ENG_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_ASID 0x40C9900
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_MMU_BP 0x40C9904
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_STRONG_ORDER 0x40C9908
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_NO_SNOOP 0x40C990C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_WR_REDUCTION 0x40C9910
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_RD_ATOMIC 0x40C9914
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_QOS 0x40C9918
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_RSVD 0x40C991C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_EMEM_CPAGE 0x40C9920
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_CORE 0x40C9924
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_E2E_COORD 0x40C9928
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_WR_OVRD_LO 0x40C9930
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_WR_OVRD_HI 0x40C9934
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_RD_OVRD_LO 0x40C9938
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_RD_OVRD_HI 0x40C993C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_LB_COORD 0x40C9940
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_LB_LOCK 0x40C9944
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_LB_RSVD 0x40C9948
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_LB_OVRD 0x40C994C
+
+#endif /* ASIC_REG_DCORE0_MME_QM_ARC_DUP_ENG_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h
new file mode 100644
index 000000000000..61654e37335b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h
@@ -0,0 +1,575 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_QM_ARC_DUP_ENG_REGS_H_
+#define ASIC_REG_DCORE0_MME_QM_ARC_DUP_ENG_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_QM_ARC_DUP_ENG
+ * (Prototype: ARC_DUP_ENG)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_0 0x40C9000
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_1 0x40C9004
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_2 0x40C9008
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_3 0x40C900C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_4 0x40C9010
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_5 0x40C9014
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_6 0x40C9018
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_7 0x40C901C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_8 0x40C9020
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_9 0x40C9024
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_10 0x40C9028
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_11 0x40C902C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_12 0x40C9030
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_13 0x40C9034
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_14 0x40C9038
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_15 0x40C903C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_16 0x40C9040
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_17 0x40C9044
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_18 0x40C9048
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_19 0x40C904C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_20 0x40C9050
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_21 0x40C9054
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_22 0x40C9058
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_23 0x40C905C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_24 0x40C9060
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_MME_ENG_ADDR_0 0x40C9064
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_MME_ENG_ADDR_1 0x40C9068
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_MME_ENG_ADDR_2 0x40C906C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_MME_ENG_ADDR_3 0x40C9070
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_0 0x40C9074
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_1 0x40C9078
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_2 0x40C907C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_3 0x40C9080
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_4 0x40C9084
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_5 0x40C9088
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_6 0x40C908C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_7 0x40C9090
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_8 0x40C9094
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_9 0x40C9098
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_10 0x40C909C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_11 0x40C90A0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_12 0x40C90A4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_13 0x40C90A8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_14 0x40C90AC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_15 0x40C90B0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_16 0x40C90B4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_17 0x40C90B8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_18 0x40C90BC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_19 0x40C90C0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_20 0x40C90C4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_21 0x40C90C8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_22 0x40C90CC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_ADDR_23 0x40C90D0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_EDMA_ENG_ADDR_0 0x40C90D4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_EDMA_ENG_ADDR_1 0x40C90D8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_EDMA_ENG_ADDR_2 0x40C90DC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_EDMA_ENG_ADDR_3 0x40C90E0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_EDMA_ENG_ADDR_4 0x40C90E4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_EDMA_ENG_ADDR_5 0x40C90E8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_EDMA_ENG_ADDR_6 0x40C90EC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_EDMA_ENG_ADDR_7 0x40C90F0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_PDMA_ENG_ADDR_0 0x40C90F4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_PDMA_ENG_ADDR_1 0x40C90F8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_ROT_ENG_ADDR_0 0x40C90FC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_ROT_ENG_ADDR_1 0x40C9100
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_0 0x40C9104
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_1 0x40C9108
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_2 0x40C910C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_3 0x40C9110
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_4 0x40C9114
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_5 0x40C9118
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_6 0x40C911C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_7 0x40C9120
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_8 0x40C9124
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_9 0x40C9128
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_10 0x40C912C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_11 0x40C9130
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_12 0x40C9134
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_13 0x40C9138
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_14 0x40C913C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_ADDR_15 0x40C9140
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_MASK 0x40C9200
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_MME_ENG_MASK 0x40C9204
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_EDMA_ENG_MASK 0x40C9208
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_PDMA_ENG_MASK 0x40C920C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_ROT_ENG_MASK 0x40C9210
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_RSVD_ENG_MASK 0x40C9214
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_MASK_0 0x40C9218
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_MASK_1 0x40C921C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_MASK_2 0x40C9220
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_MASK_3 0x40C9224
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_MASK_4 0x40C9228
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_MASK_5 0x40C922C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_MASK_6 0x40C9230
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_NIC_ENG_MASK_7 0x40C9234
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_0 0x40C9238
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_1 0x40C923C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_2 0x40C9240
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_3 0x40C9244
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_4 0x40C9248
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_5 0x40C924C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_6 0x40C9250
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_7 0x40C9254
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_8 0x40C9258
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_9 0x40C925C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_10 0x40C9260
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_11 0x40C9264
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_12 0x40C9268
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_0_13 0x40C926C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_1_0 0x40C9288
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_1_1 0x40C928C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_1_2 0x40C9290
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_1_3 0x40C9294
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_1_4 0x40C9298
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_1_5 0x40C929C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_2_0 0x40C92A0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_2_1 0x40C92A4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_2_2 0x40C92A8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_2_3 0x40C92AC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_2_4 0x40C92B0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_2_5 0x40C92B4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_3_0 0x40C92B8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_3_1 0x40C92BC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_3_2 0x40C92C0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_3_3 0x40C92C4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_3_4 0x40C92C8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TRANS_DATA_Q_3_5 0x40C92CC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GENERAL_CFG 0x40C92D0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_BP_CFG 0x40C92D4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_0 0x40C92D8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_1 0x40C92DC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_2 0x40C92E0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_3 0x40C92E4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_4 0x40C92E8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_5 0x40C92EC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_6 0x40C92F0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_7 0x40C92F4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_8 0x40C92F8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_9 0x40C92FC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_10 0x40C9300
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_11 0x40C9304
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_12 0x40C9308
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_13 0x40C930C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_DBG_IN_GRP_TRANS_0 0x40C94A0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_DBG_IN_GRP_TRANS_1 0x40C94A4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_DBG_IN_GRP_TRANS_2 0x40C94A8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_DBG_STS 0x40C94AC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_DBG_OUT_RQ_CNT 0x40C94B0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_0 0x40C94B4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_1 0x40C94B8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_2 0x40C94BC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_3 0x40C94C0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_4 0x40C94C4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_5 0x40C94C8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_6 0x40C94CC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_7 0x40C94D0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_8 0x40C94D4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_9 0x40C94D8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_10 0x40C94DC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_11 0x40C94E0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_12 0x40C94E4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_13 0x40C94E8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_14 0x40C94EC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_15 0x40C94F0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_16 0x40C94F4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_17 0x40C94F8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_18 0x40C94FC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_19 0x40C9500
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_20 0x40C9504
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_21 0x40C9508
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_22 0x40C950C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_23 0x40C9510
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_24 0x40C9514
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_25 0x40C9518
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_26 0x40C951C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_27 0x40C9520
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_28 0x40C9524
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_29 0x40C9528
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_30 0x40C952C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_31 0x40C9530
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_32 0x40C9534
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_33 0x40C9538
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_34 0x40C953C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_35 0x40C9540
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_36 0x40C9544
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_37 0x40C9548
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_38 0x40C954C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_39 0x40C9550
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_40 0x40C9554
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_41 0x40C9558
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_42 0x40C955C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_43 0x40C9560
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_44 0x40C9564
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_45 0x40C9568
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_46 0x40C956C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_47 0x40C9570
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_48 0x40C9574
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_49 0x40C9578
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_50 0x40C957C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_51 0x40C9580
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_52 0x40C9584
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_53 0x40C9588
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_54 0x40C958C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_55 0x40C9590
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_56 0x40C9594
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_57 0x40C9598
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_58 0x40C959C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_59 0x40C95A0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_60 0x40C95A4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_61 0x40C95A8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_62 0x40C95AC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CONTEXT_ID_63 0x40C95B0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_0 0x40C95B4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_1 0x40C95B8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_2 0x40C95BC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_3 0x40C95C0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_4 0x40C95C4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_5 0x40C95C8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_6 0x40C95CC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_7 0x40C95D0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_8 0x40C95D4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_9 0x40C95D8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_10 0x40C95DC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_11 0x40C95E0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_12 0x40C95E4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_13 0x40C95E8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_14 0x40C95EC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_15 0x40C95F0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_16 0x40C95F4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_17 0x40C95F8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_18 0x40C95FC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_19 0x40C9600
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_20 0x40C9604
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_21 0x40C9608
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_22 0x40C960C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_23 0x40C9610
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_24 0x40C9614
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_25 0x40C9618
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_26 0x40C961C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_27 0x40C9620
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_28 0x40C9624
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_29 0x40C9628
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_30 0x40C962C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_31 0x40C9630
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_32 0x40C9634
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_33 0x40C9638
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_34 0x40C963C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_35 0x40C9640
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_36 0x40C9644
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_37 0x40C9648
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_38 0x40C964C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_39 0x40C9650
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_40 0x40C9654
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_41 0x40C9658
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_42 0x40C965C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_43 0x40C9660
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_44 0x40C9664
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_45 0x40C9668
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_46 0x40C966C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_47 0x40C9670
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_48 0x40C9674
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_49 0x40C9678
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_50 0x40C967C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_51 0x40C9680
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_52 0x40C9684
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_53 0x40C9688
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_54 0x40C968C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_55 0x40C9690
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_56 0x40C9694
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_57 0x40C9698
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_58 0x40C969C
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_59 0x40C96A0
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_60 0x40C96A4
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_61 0x40C96A8
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_62 0x40C96AC
+
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_63 0x40C96B0
+
+#endif /* ASIC_REG_DCORE0_MME_QM_ARC_DUP_ENG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h
new file mode 100644
index 000000000000..32089b8250ed
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_QM_AXUSER_NONSECURED_REGS_H_
+#define ASIC_REG_DCORE0_MME_QM_AXUSER_NONSECURED_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_QM_AXUSER_NONSECURED
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_ASID 0x40CAB80
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_MMU_BP 0x40CAB84
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_STRONG_ORDER 0x40CAB88
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_NO_SNOOP 0x40CAB8C
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_WR_REDUCTION 0x40CAB90
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_RD_ATOMIC 0x40CAB94
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_QOS 0x40CAB98
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_RSVD 0x40CAB9C
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_EMEM_CPAGE 0x40CABA0
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_CORE 0x40CABA4
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_E2E_COORD 0x40CABA8
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_WR_OVRD_LO 0x40CABB0
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_WR_OVRD_HI 0x40CABB4
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_RD_OVRD_LO 0x40CABB8
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_RD_OVRD_HI 0x40CABBC
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_LB_COORD 0x40CABC0
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_LB_LOCK 0x40CABC4
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_LB_RSVD 0x40CABC8
+
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_LB_OVRD 0x40CABCC
+
+#endif /* ASIC_REG_DCORE0_MME_QM_AXUSER_NONSECURED_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h
new file mode 100644
index 000000000000..e168c1cc2a7d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_QM_AXUSER_SECURED_REGS_H_
+#define ASIC_REG_DCORE0_MME_QM_AXUSER_SECURED_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_QM_AXUSER_SECURED
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_ASID 0x40CAB00
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_MMU_BP 0x40CAB04
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_STRONG_ORDER 0x40CAB08
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_NO_SNOOP 0x40CAB0C
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_WR_REDUCTION 0x40CAB10
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_RD_ATOMIC 0x40CAB14
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_QOS 0x40CAB18
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_RSVD 0x40CAB1C
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_EMEM_CPAGE 0x40CAB20
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_CORE 0x40CAB24
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_E2E_COORD 0x40CAB28
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_WR_OVRD_LO 0x40CAB30
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_WR_OVRD_HI 0x40CAB34
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_RD_OVRD_LO 0x40CAB38
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_HB_RD_OVRD_HI 0x40CAB3C
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_LB_COORD 0x40CAB40
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_LB_LOCK 0x40CAB44
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_LB_RSVD 0x40CAB48
+
+#define mmDCORE0_MME_QM_AXUSER_SECURED_LB_OVRD 0x40CAB4C
+
+#endif /* ASIC_REG_DCORE0_MME_QM_AXUSER_SECURED_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h
new file mode 100644
index 000000000000..543aba18ef68
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_QM_CGM_REGS_H_
+#define ASIC_REG_DCORE0_MME_QM_CGM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_QM_CGM
+ * (Prototype: QMAN_CGM)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_QM_CGM_CFG 0x40CAD80
+
+#define mmDCORE0_MME_QM_CGM_STS 0x40CAD84
+
+#define mmDCORE0_MME_QM_CGM_CFG1 0x40CAD88
+
+#endif /* ASIC_REG_DCORE0_MME_QM_CGM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h
new file mode 100644
index 000000000000..c45583fcc2cf
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_QM_REGS_H_
+#define ASIC_REG_DCORE0_MME_QM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_QM
+ * (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_QM_GLBL_CFG0 0x40CA000
+
+#define mmDCORE0_MME_QM_GLBL_CFG1 0x40CA004
+
+#define mmDCORE0_MME_QM_GLBL_CFG2 0x40CA008
+
+#define mmDCORE0_MME_QM_GLBL_ERR_CFG 0x40CA00C
+
+#define mmDCORE0_MME_QM_GLBL_ERR_CFG1 0x40CA010
+
+#define mmDCORE0_MME_QM_GLBL_ERR_ARC_HALT_EN 0x40CA014
+
+#define mmDCORE0_MME_QM_GLBL_AXCACHE 0x40CA018
+
+#define mmDCORE0_MME_QM_GLBL_STS0 0x40CA01C
+
+#define mmDCORE0_MME_QM_GLBL_STS1 0x40CA020
+
+#define mmDCORE0_MME_QM_GLBL_ERR_STS_0 0x40CA024
+
+#define mmDCORE0_MME_QM_GLBL_ERR_STS_1 0x40CA028
+
+#define mmDCORE0_MME_QM_GLBL_ERR_STS_2 0x40CA02C
+
+#define mmDCORE0_MME_QM_GLBL_ERR_STS_3 0x40CA030
+
+#define mmDCORE0_MME_QM_GLBL_ERR_STS_4 0x40CA034
+
+#define mmDCORE0_MME_QM_GLBL_ERR_MSG_EN_0 0x40CA038
+
+#define mmDCORE0_MME_QM_GLBL_ERR_MSG_EN_1 0x40CA03C
+
+#define mmDCORE0_MME_QM_GLBL_ERR_MSG_EN_2 0x40CA040
+
+#define mmDCORE0_MME_QM_GLBL_ERR_MSG_EN_3 0x40CA044
+
+#define mmDCORE0_MME_QM_GLBL_ERR_MSG_EN_4 0x40CA048
+
+#define mmDCORE0_MME_QM_GLBL_PROT 0x40CA04C
+
+#define mmDCORE0_MME_QM_PQ_BASE_LO_0 0x40CA050
+
+#define mmDCORE0_MME_QM_PQ_BASE_LO_1 0x40CA054
+
+#define mmDCORE0_MME_QM_PQ_BASE_LO_2 0x40CA058
+
+#define mmDCORE0_MME_QM_PQ_BASE_LO_3 0x40CA05C
+
+#define mmDCORE0_MME_QM_PQ_BASE_HI_0 0x40CA060
+
+#define mmDCORE0_MME_QM_PQ_BASE_HI_1 0x40CA064
+
+#define mmDCORE0_MME_QM_PQ_BASE_HI_2 0x40CA068
+
+#define mmDCORE0_MME_QM_PQ_BASE_HI_3 0x40CA06C
+
+#define mmDCORE0_MME_QM_PQ_SIZE_0 0x40CA070
+
+#define mmDCORE0_MME_QM_PQ_SIZE_1 0x40CA074
+
+#define mmDCORE0_MME_QM_PQ_SIZE_2 0x40CA078
+
+#define mmDCORE0_MME_QM_PQ_SIZE_3 0x40CA07C
+
+#define mmDCORE0_MME_QM_PQ_PI_0 0x40CA080
+
+#define mmDCORE0_MME_QM_PQ_PI_1 0x40CA084
+
+#define mmDCORE0_MME_QM_PQ_PI_2 0x40CA088
+
+#define mmDCORE0_MME_QM_PQ_PI_3 0x40CA08C
+
+#define mmDCORE0_MME_QM_PQ_CI_0 0x40CA090
+
+#define mmDCORE0_MME_QM_PQ_CI_1 0x40CA094
+
+#define mmDCORE0_MME_QM_PQ_CI_2 0x40CA098
+
+#define mmDCORE0_MME_QM_PQ_CI_3 0x40CA09C
+
+#define mmDCORE0_MME_QM_PQ_CFG0_0 0x40CA0A0
+
+#define mmDCORE0_MME_QM_PQ_CFG0_1 0x40CA0A4
+
+#define mmDCORE0_MME_QM_PQ_CFG0_2 0x40CA0A8
+
+#define mmDCORE0_MME_QM_PQ_CFG0_3 0x40CA0AC
+
+#define mmDCORE0_MME_QM_PQ_CFG1_0 0x40CA0B0
+
+#define mmDCORE0_MME_QM_PQ_CFG1_1 0x40CA0B4
+
+#define mmDCORE0_MME_QM_PQ_CFG1_2 0x40CA0B8
+
+#define mmDCORE0_MME_QM_PQ_CFG1_3 0x40CA0BC
+
+#define mmDCORE0_MME_QM_PQ_STS0_0 0x40CA0C0
+
+#define mmDCORE0_MME_QM_PQ_STS0_1 0x40CA0C4
+
+#define mmDCORE0_MME_QM_PQ_STS0_2 0x40CA0C8
+
+#define mmDCORE0_MME_QM_PQ_STS0_3 0x40CA0CC
+
+#define mmDCORE0_MME_QM_PQ_STS1_0 0x40CA0D0
+
+#define mmDCORE0_MME_QM_PQ_STS1_1 0x40CA0D4
+
+#define mmDCORE0_MME_QM_PQ_STS1_2 0x40CA0D8
+
+#define mmDCORE0_MME_QM_PQ_STS1_3 0x40CA0DC
+
+#define mmDCORE0_MME_QM_CQ_CFG0_0 0x40CA0E0
+
+#define mmDCORE0_MME_QM_CQ_CFG0_1 0x40CA0E4
+
+#define mmDCORE0_MME_QM_CQ_CFG0_2 0x40CA0E8
+
+#define mmDCORE0_MME_QM_CQ_CFG0_3 0x40CA0EC
+
+#define mmDCORE0_MME_QM_CQ_CFG0_4 0x40CA0F0
+
+#define mmDCORE0_MME_QM_CQ_STS0_0 0x40CA0F4
+
+#define mmDCORE0_MME_QM_CQ_STS0_1 0x40CA0F8
+
+#define mmDCORE0_MME_QM_CQ_STS0_2 0x40CA0FC
+
+#define mmDCORE0_MME_QM_CQ_STS0_3 0x40CA100
+
+#define mmDCORE0_MME_QM_CQ_STS0_4 0x40CA104
+
+#define mmDCORE0_MME_QM_CQ_CFG1_0 0x40CA108
+
+#define mmDCORE0_MME_QM_CQ_CFG1_1 0x40CA10C
+
+#define mmDCORE0_MME_QM_CQ_CFG1_2 0x40CA110
+
+#define mmDCORE0_MME_QM_CQ_CFG1_3 0x40CA114
+
+#define mmDCORE0_MME_QM_CQ_CFG1_4 0x40CA118
+
+#define mmDCORE0_MME_QM_CQ_STS1_0 0x40CA11C
+
+#define mmDCORE0_MME_QM_CQ_STS1_1 0x40CA120
+
+#define mmDCORE0_MME_QM_CQ_STS1_2 0x40CA124
+
+#define mmDCORE0_MME_QM_CQ_STS1_3 0x40CA128
+
+#define mmDCORE0_MME_QM_CQ_STS1_4 0x40CA12C
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_0 0x40CA150
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_0 0x40CA154
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_0 0x40CA158
+
+#define mmDCORE0_MME_QM_CQ_CTL_0 0x40CA15C
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_1 0x40CA160
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_1 0x40CA164
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_1 0x40CA168
+
+#define mmDCORE0_MME_QM_CQ_CTL_1 0x40CA16C
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_2 0x40CA170
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_2 0x40CA174
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_2 0x40CA178
+
+#define mmDCORE0_MME_QM_CQ_CTL_2 0x40CA17C
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_3 0x40CA180
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_3 0x40CA184
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_3 0x40CA188
+
+#define mmDCORE0_MME_QM_CQ_CTL_3 0x40CA18C
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_4 0x40CA190
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_4 0x40CA194
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_4 0x40CA198
+
+#define mmDCORE0_MME_QM_CQ_CTL_4 0x40CA19C
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_STS_0 0x40CA1A0
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_STS_1 0x40CA1A4
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_STS_2 0x40CA1A8
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_STS_3 0x40CA1AC
+
+#define mmDCORE0_MME_QM_CQ_TSIZE_STS_4 0x40CA1B0
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_STS_0 0x40CA1B4
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_STS_1 0x40CA1B8
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_STS_2 0x40CA1BC
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_STS_3 0x40CA1C0
+
+#define mmDCORE0_MME_QM_CQ_PTR_LO_STS_4 0x40CA1C4
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_STS_0 0x40CA1C8
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_STS_1 0x40CA1CC
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_STS_2 0x40CA1D0
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_STS_3 0x40CA1D4
+
+#define mmDCORE0_MME_QM_CQ_PTR_HI_STS_4 0x40CA1D8
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_STS_0 0x40CA1DC
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_STS_1 0x40CA1E0
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_STS_2 0x40CA1E4
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_STS_3 0x40CA1E8
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_STS_4 0x40CA1EC
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_0 0x40CA1F0
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_1 0x40CA1F4
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_2 0x40CA1F8
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_3 0x40CA1FC
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_4 0x40CA200
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_0 0x40CA204
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_1 0x40CA208
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_2 0x40CA20C
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_3 0x40CA210
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_4 0x40CA214
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_0 0x40CA218
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_1 0x40CA21C
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_2 0x40CA220
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_3 0x40CA224
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_4 0x40CA228
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_0 0x40CA22C
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_1 0x40CA230
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_2 0x40CA234
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_3 0x40CA238
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_4 0x40CA23C
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_0 0x40CA240
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_1 0x40CA244
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_2 0x40CA248
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_3 0x40CA24C
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_4 0x40CA250
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_0 0x40CA254
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_1 0x40CA258
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_2 0x40CA25C
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_3 0x40CA260
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_4 0x40CA264
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_0 0x40CA268
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_1 0x40CA26C
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_2 0x40CA270
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_3 0x40CA274
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_4 0x40CA278
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_0 0x40CA27C
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_1 0x40CA280
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_2 0x40CA284
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_3 0x40CA288
+
+#define mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_4 0x40CA28C
+
+#define mmDCORE0_MME_QM_CP_FENCE0_RDATA_0 0x40CA290
+
+#define mmDCORE0_MME_QM_CP_FENCE0_RDATA_1 0x40CA294
+
+#define mmDCORE0_MME_QM_CP_FENCE0_RDATA_2 0x40CA298
+
+#define mmDCORE0_MME_QM_CP_FENCE0_RDATA_3 0x40CA29C
+
+#define mmDCORE0_MME_QM_CP_FENCE0_RDATA_4 0x40CA2A0
+
+#define mmDCORE0_MME_QM_CP_FENCE1_RDATA_0 0x40CA2A4
+
+#define mmDCORE0_MME_QM_CP_FENCE1_RDATA_1 0x40CA2A8
+
+#define mmDCORE0_MME_QM_CP_FENCE1_RDATA_2 0x40CA2AC
+
+#define mmDCORE0_MME_QM_CP_FENCE1_RDATA_3 0x40CA2B0
+
+#define mmDCORE0_MME_QM_CP_FENCE1_RDATA_4 0x40CA2B4
+
+#define mmDCORE0_MME_QM_CP_FENCE2_RDATA_0 0x40CA2B8
+
+#define mmDCORE0_MME_QM_CP_FENCE2_RDATA_1 0x40CA2BC
+
+#define mmDCORE0_MME_QM_CP_FENCE2_RDATA_2 0x40CA2C0
+
+#define mmDCORE0_MME_QM_CP_FENCE2_RDATA_3 0x40CA2C4
+
+#define mmDCORE0_MME_QM_CP_FENCE2_RDATA_4 0x40CA2C8
+
+#define mmDCORE0_MME_QM_CP_FENCE3_RDATA_0 0x40CA2CC
+
+#define mmDCORE0_MME_QM_CP_FENCE3_RDATA_1 0x40CA2D0
+
+#define mmDCORE0_MME_QM_CP_FENCE3_RDATA_2 0x40CA2D4
+
+#define mmDCORE0_MME_QM_CP_FENCE3_RDATA_3 0x40CA2D8
+
+#define mmDCORE0_MME_QM_CP_FENCE3_RDATA_4 0x40CA2DC
+
+#define mmDCORE0_MME_QM_CP_FENCE0_CNT_0 0x40CA2E0
+
+#define mmDCORE0_MME_QM_CP_FENCE0_CNT_1 0x40CA2E4
+
+#define mmDCORE0_MME_QM_CP_FENCE0_CNT_2 0x40CA2E8
+
+#define mmDCORE0_MME_QM_CP_FENCE0_CNT_3 0x40CA2EC
+
+#define mmDCORE0_MME_QM_CP_FENCE0_CNT_4 0x40CA2F0
+
+#define mmDCORE0_MME_QM_CP_FENCE1_CNT_0 0x40CA2F4
+
+#define mmDCORE0_MME_QM_CP_FENCE1_CNT_1 0x40CA2F8
+
+#define mmDCORE0_MME_QM_CP_FENCE1_CNT_2 0x40CA2FC
+
+#define mmDCORE0_MME_QM_CP_FENCE1_CNT_3 0x40CA300
+
+#define mmDCORE0_MME_QM_CP_FENCE1_CNT_4 0x40CA304
+
+#define mmDCORE0_MME_QM_CP_FENCE2_CNT_0 0x40CA308
+
+#define mmDCORE0_MME_QM_CP_FENCE2_CNT_1 0x40CA30C
+
+#define mmDCORE0_MME_QM_CP_FENCE2_CNT_2 0x40CA310
+
+#define mmDCORE0_MME_QM_CP_FENCE2_CNT_3 0x40CA314
+
+#define mmDCORE0_MME_QM_CP_FENCE2_CNT_4 0x40CA318
+
+#define mmDCORE0_MME_QM_CP_FENCE3_CNT_0 0x40CA31C
+
+#define mmDCORE0_MME_QM_CP_FENCE3_CNT_1 0x40CA320
+
+#define mmDCORE0_MME_QM_CP_FENCE3_CNT_2 0x40CA324
+
+#define mmDCORE0_MME_QM_CP_FENCE3_CNT_3 0x40CA328
+
+#define mmDCORE0_MME_QM_CP_FENCE3_CNT_4 0x40CA32C
+
+#define mmDCORE0_MME_QM_CP_BARRIER_CFG 0x40CA330
+
+#define mmDCORE0_MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0x40CA334
+
+#define mmDCORE0_MME_QM_CP_LDMA_DST_BASE_LO_OFFSET 0x40CA338
+
+#define mmDCORE0_MME_QM_CP_LDMA_TSIZE_OFFSET 0x40CA33C
+
+#define mmDCORE0_MME_QM_CP_CQ_PTR_LO_OFFSET_0 0x40CA340
+
+#define mmDCORE0_MME_QM_CP_CQ_PTR_LO_OFFSET_1 0x40CA344
+
+#define mmDCORE0_MME_QM_CP_CQ_PTR_LO_OFFSET_2 0x40CA348
+
+#define mmDCORE0_MME_QM_CP_CQ_PTR_LO_OFFSET_3 0x40CA34C
+
+#define mmDCORE0_MME_QM_CP_CQ_PTR_LO_OFFSET_4 0x40CA350
+
+#define mmDCORE0_MME_QM_CP_STS_0 0x40CA368
+
+#define mmDCORE0_MME_QM_CP_STS_1 0x40CA36C
+
+#define mmDCORE0_MME_QM_CP_STS_2 0x40CA370
+
+#define mmDCORE0_MME_QM_CP_STS_3 0x40CA374
+
+#define mmDCORE0_MME_QM_CP_STS_4 0x40CA378
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_LO_0 0x40CA37C
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_LO_1 0x40CA380
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_LO_2 0x40CA384
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_LO_3 0x40CA388
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_LO_4 0x40CA38C
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_HI_0 0x40CA390
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_HI_1 0x40CA394
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_HI_2 0x40CA398
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_HI_3 0x40CA39C
+
+#define mmDCORE0_MME_QM_CP_CURRENT_INST_HI_4 0x40CA3A0
+
+#define mmDCORE0_MME_QM_CP_PRED_0 0x40CA3A4
+
+#define mmDCORE0_MME_QM_CP_PRED_1 0x40CA3A8
+
+#define mmDCORE0_MME_QM_CP_PRED_2 0x40CA3AC
+
+#define mmDCORE0_MME_QM_CP_PRED_3 0x40CA3B0
+
+#define mmDCORE0_MME_QM_CP_PRED_4 0x40CA3B4
+
+#define mmDCORE0_MME_QM_CP_PRED_UPEN_0 0x40CA3B8
+
+#define mmDCORE0_MME_QM_CP_PRED_UPEN_1 0x40CA3BC
+
+#define mmDCORE0_MME_QM_CP_PRED_UPEN_2 0x40CA3C0
+
+#define mmDCORE0_MME_QM_CP_PRED_UPEN_3 0x40CA3C4
+
+#define mmDCORE0_MME_QM_CP_PRED_UPEN_4 0x40CA3C8
+
+#define mmDCORE0_MME_QM_CP_DBG_0_0 0x40CA3CC
+
+#define mmDCORE0_MME_QM_CP_DBG_0_1 0x40CA3D0
+
+#define mmDCORE0_MME_QM_CP_DBG_0_2 0x40CA3D4
+
+#define mmDCORE0_MME_QM_CP_DBG_0_3 0x40CA3D8
+
+#define mmDCORE0_MME_QM_CP_DBG_0_4 0x40CA3DC
+
+#define mmDCORE0_MME_QM_CP_CPDMA_UP_CRED_0 0x40CA3E0
+
+#define mmDCORE0_MME_QM_CP_CPDMA_UP_CRED_1 0x40CA3E4
+
+#define mmDCORE0_MME_QM_CP_CPDMA_UP_CRED_2 0x40CA3E8
+
+#define mmDCORE0_MME_QM_CP_CPDMA_UP_CRED_3 0x40CA3EC
+
+#define mmDCORE0_MME_QM_CP_CPDMA_UP_CRED_4 0x40CA3F0
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_LO_0 0x40CA3F4
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_LO_1 0x40CA3F8
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_LO_2 0x40CA3FC
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_LO_3 0x40CA400
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_LO_4 0x40CA404
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_HI_0 0x40CA408
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_HI_1 0x40CA40C
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_HI_2 0x40CA410
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_HI_3 0x40CA414
+
+#define mmDCORE0_MME_QM_CP_IN_DATA_HI_4 0x40CA418
+
+#define mmDCORE0_MME_QM_PQC_HBW_BASE_LO_0 0x40CA41C
+
+#define mmDCORE0_MME_QM_PQC_HBW_BASE_LO_1 0x40CA420
+
+#define mmDCORE0_MME_QM_PQC_HBW_BASE_LO_2 0x40CA424
+
+#define mmDCORE0_MME_QM_PQC_HBW_BASE_LO_3 0x40CA428
+
+#define mmDCORE0_MME_QM_PQC_HBW_BASE_HI_0 0x40CA42C
+
+#define mmDCORE0_MME_QM_PQC_HBW_BASE_HI_1 0x40CA430
+
+#define mmDCORE0_MME_QM_PQC_HBW_BASE_HI_2 0x40CA434
+
+#define mmDCORE0_MME_QM_PQC_HBW_BASE_HI_3 0x40CA438
+
+#define mmDCORE0_MME_QM_PQC_SIZE_0 0x40CA43C
+
+#define mmDCORE0_MME_QM_PQC_SIZE_1 0x40CA440
+
+#define mmDCORE0_MME_QM_PQC_SIZE_2 0x40CA444
+
+#define mmDCORE0_MME_QM_PQC_SIZE_3 0x40CA448
+
+#define mmDCORE0_MME_QM_PQC_PI_0 0x40CA44C
+
+#define mmDCORE0_MME_QM_PQC_PI_1 0x40CA450
+
+#define mmDCORE0_MME_QM_PQC_PI_2 0x40CA454
+
+#define mmDCORE0_MME_QM_PQC_PI_3 0x40CA458
+
+#define mmDCORE0_MME_QM_PQC_LBW_WDATA_0 0x40CA45C
+
+#define mmDCORE0_MME_QM_PQC_LBW_WDATA_1 0x40CA460
+
+#define mmDCORE0_MME_QM_PQC_LBW_WDATA_2 0x40CA464
+
+#define mmDCORE0_MME_QM_PQC_LBW_WDATA_3 0x40CA468
+
+#define mmDCORE0_MME_QM_PQC_LBW_BASE_LO_0 0x40CA46C
+
+#define mmDCORE0_MME_QM_PQC_LBW_BASE_LO_1 0x40CA470
+
+#define mmDCORE0_MME_QM_PQC_LBW_BASE_LO_2 0x40CA474
+
+#define mmDCORE0_MME_QM_PQC_LBW_BASE_LO_3 0x40CA478
+
+#define mmDCORE0_MME_QM_PQC_LBW_BASE_HI_0 0x40CA47C
+
+#define mmDCORE0_MME_QM_PQC_LBW_BASE_HI_1 0x40CA480
+
+#define mmDCORE0_MME_QM_PQC_LBW_BASE_HI_2 0x40CA484
+
+#define mmDCORE0_MME_QM_PQC_LBW_BASE_HI_3 0x40CA488
+
+#define mmDCORE0_MME_QM_PQC_CFG 0x40CA48C
+
+#define mmDCORE0_MME_QM_PQC_SECURE_PUSH_IND 0x40CA490
+
+#define mmDCORE0_MME_QM_ARB_MASK 0x40CA4A0
+
+#define mmDCORE0_MME_QM_ARB_CFG_0 0x40CA4A4
+
+#define mmDCORE0_MME_QM_ARB_CHOICE_Q_PUSH 0x40CA4A8
+
+#define mmDCORE0_MME_QM_ARB_WRR_WEIGHT_0 0x40CA4AC
+
+#define mmDCORE0_MME_QM_ARB_WRR_WEIGHT_1 0x40CA4B0
+
+#define mmDCORE0_MME_QM_ARB_WRR_WEIGHT_2 0x40CA4B4
+
+#define mmDCORE0_MME_QM_ARB_WRR_WEIGHT_3 0x40CA4B8
+
+#define mmDCORE0_MME_QM_ARB_CFG_1 0x40CA4BC
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_0 0x40CA4C0
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_1 0x40CA4C4
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_2 0x40CA4C8
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_3 0x40CA4CC
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_4 0x40CA4D0
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_5 0x40CA4D4
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_6 0x40CA4D8
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_7 0x40CA4DC
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_8 0x40CA4E0
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_9 0x40CA4E4
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_10 0x40CA4E8
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_11 0x40CA4EC
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_12 0x40CA4F0
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_13 0x40CA4F4
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_14 0x40CA4F8
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_15 0x40CA4FC
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_16 0x40CA500
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_17 0x40CA504
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_18 0x40CA508
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_19 0x40CA50C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_20 0x40CA510
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_21 0x40CA514
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_22 0x40CA518
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_23 0x40CA51C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_24 0x40CA520
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_25 0x40CA524
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_26 0x40CA528
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_27 0x40CA52C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_28 0x40CA530
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_29 0x40CA534
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_30 0x40CA538
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_31 0x40CA53C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_32 0x40CA540
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_33 0x40CA544
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_34 0x40CA548
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_35 0x40CA54C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_36 0x40CA550
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_37 0x40CA554
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_38 0x40CA558
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_39 0x40CA55C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_40 0x40CA560
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_41 0x40CA564
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_42 0x40CA568
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_43 0x40CA56C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_44 0x40CA570
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_45 0x40CA574
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_46 0x40CA578
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_47 0x40CA57C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_48 0x40CA580
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_49 0x40CA584
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_50 0x40CA588
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_51 0x40CA58C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_52 0x40CA590
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_53 0x40CA594
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_54 0x40CA598
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_55 0x40CA59C
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_56 0x40CA5A0
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_57 0x40CA5A4
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_58 0x40CA5A8
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_59 0x40CA5AC
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_60 0x40CA5B0
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_61 0x40CA5B4
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_62 0x40CA5B8
+
+#define mmDCORE0_MME_QM_ARB_MST_AVAIL_CRED_63 0x40CA5BC
+
+#define mmDCORE0_MME_QM_ARB_MST_CRED_INC 0x40CA5E0
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_0 0x40CA5E4
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_1 0x40CA5E8
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_2 0x40CA5EC
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_3 0x40CA5F0
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_4 0x40CA5F4
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_5 0x40CA5F8
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_6 0x40CA5FC
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_7 0x40CA600
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_8 0x40CA604
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_9 0x40CA608
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_10 0x40CA60C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_11 0x40CA610
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_12 0x40CA614
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_13 0x40CA618
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_14 0x40CA61C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_15 0x40CA620
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_16 0x40CA624
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_17 0x40CA628
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_18 0x40CA62C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_19 0x40CA630
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_20 0x40CA634
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_21 0x40CA638
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_22 0x40CA63C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_23 0x40CA640
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_24 0x40CA644
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_25 0x40CA648
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_26 0x40CA64C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_27 0x40CA650
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_28 0x40CA654
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_29 0x40CA658
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_30 0x40CA65C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_31 0x40CA660
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_32 0x40CA664
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_33 0x40CA668
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_34 0x40CA66C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_35 0x40CA670
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_36 0x40CA674
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_37 0x40CA678
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_38 0x40CA67C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_39 0x40CA680
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_40 0x40CA684
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_41 0x40CA688
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_42 0x40CA68C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_43 0x40CA690
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_44 0x40CA694
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_45 0x40CA698
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_46 0x40CA69C
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_47 0x40CA6A0
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_48 0x40CA6A4
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_49 0x40CA6A8
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_50 0x40CA6AC
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_51 0x40CA6B0
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_52 0x40CA6B4
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_53 0x40CA6B8
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_54 0x40CA6BC
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_55 0x40CA6C0
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_56 0x40CA6C4
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_57 0x40CA6C8
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_58 0x40CA6CC
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_59 0x40CA6D0
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_60 0x40CA6D4
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_61 0x40CA6D8
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_62 0x40CA6DC
+
+#define mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_63 0x40CA6E0
+
+#define mmDCORE0_MME_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x40CA704
+
+#define mmDCORE0_MME_QM_ARB_MST_SLAVE_EN 0x40CA708
+
+#define mmDCORE0_MME_QM_ARB_MST_SLAVE_EN_1 0x40CA70C
+
+#define mmDCORE0_MME_QM_ARB_SLV_CHOICE_WDT 0x40CA710
+
+#define mmDCORE0_MME_QM_ARB_SLV_ID 0x40CA714
+
+#define mmDCORE0_MME_QM_ARB_MST_QUIET_PER 0x40CA718
+
+#define mmDCORE0_MME_QM_ARB_MSG_MAX_INFLIGHT 0x40CA744
+
+#define mmDCORE0_MME_QM_ARB_BASE_LO 0x40CA754
+
+#define mmDCORE0_MME_QM_ARB_BASE_HI 0x40CA758
+
+#define mmDCORE0_MME_QM_ARB_STATE_STS 0x40CA780
+
+#define mmDCORE0_MME_QM_ARB_CHOICE_FULLNESS_STS 0x40CA784
+
+#define mmDCORE0_MME_QM_ARB_MSG_STS 0x40CA788
+
+#define mmDCORE0_MME_QM_ARB_SLV_CHOICE_Q_HEAD 0x40CA78C
+
+#define mmDCORE0_MME_QM_ARB_ERR_CAUSE 0x40CA79C
+
+#define mmDCORE0_MME_QM_ARB_ERR_MSG_EN 0x40CA7A0
+
+#define mmDCORE0_MME_QM_ARB_ERR_STS_DRP 0x40CA7A8
+
+#define mmDCORE0_MME_QM_ARB_MST_CRED_STS 0x40CA7B0
+
+#define mmDCORE0_MME_QM_ARB_MST_CRED_STS_1 0x40CA7B4
+
+#define mmDCORE0_MME_QM_CSMR_STRICT_PRIO_CFG 0x40CA7FC
+
+#define mmDCORE0_MME_QM_ARC_CQ_CFG0 0x40CA800
+
+#define mmDCORE0_MME_QM_ARC_CQ_CFG1 0x40CA804
+
+#define mmDCORE0_MME_QM_ARC_CQ_PTR_LO 0x40CA808
+
+#define mmDCORE0_MME_QM_ARC_CQ_PTR_HI 0x40CA80C
+
+#define mmDCORE0_MME_QM_ARC_CQ_TSIZE 0x40CA810
+
+#define mmDCORE0_MME_QM_ARC_CQ_CTL 0x40CA814
+
+#define mmDCORE0_MME_QM_ARC_CQ_IFIFO_STS 0x40CA81C
+
+#define mmDCORE0_MME_QM_ARC_CQ_STS0 0x40CA820
+
+#define mmDCORE0_MME_QM_ARC_CQ_STS1 0x40CA824
+
+#define mmDCORE0_MME_QM_ARC_CQ_TSIZE_STS 0x40CA828
+
+#define mmDCORE0_MME_QM_ARC_CQ_PTR_LO_STS 0x40CA82C
+
+#define mmDCORE0_MME_QM_ARC_CQ_PTR_HI_STS 0x40CA830
+
+#define mmDCORE0_MME_QM_CP_WR_ARC_ADDR_HI 0x40CA834
+
+#define mmDCORE0_MME_QM_CP_WR_ARC_ADDR_LO 0x40CA838
+
+#define mmDCORE0_MME_QM_ARC_CQ_IFIFO_MSG_BASE_HI 0x40CA83C
+
+#define mmDCORE0_MME_QM_ARC_CQ_IFIFO_MSG_BASE_LO 0x40CA840
+
+#define mmDCORE0_MME_QM_ARC_CQ_CTL_MSG_BASE_HI 0x40CA844
+
+#define mmDCORE0_MME_QM_ARC_CQ_CTL_MSG_BASE_LO 0x40CA848
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_MSG_BASE_HI 0x40CA84C
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_MSG_BASE_LO 0x40CA850
+
+#define mmDCORE0_MME_QM_CQ_CTL_MSG_BASE_HI 0x40CA854
+
+#define mmDCORE0_MME_QM_CQ_CTL_MSG_BASE_LO 0x40CA858
+
+#define mmDCORE0_MME_QM_ADDR_OVRD 0x40CA85C
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_CI_0 0x40CA860
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_CI_1 0x40CA864
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_CI_2 0x40CA868
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_CI_3 0x40CA86C
+
+#define mmDCORE0_MME_QM_CQ_IFIFO_CI_4 0x40CA870
+
+#define mmDCORE0_MME_QM_ARC_CQ_IFIFO_CI 0x40CA874
+
+#define mmDCORE0_MME_QM_CQ_CTL_CI_0 0x40CA878
+
+#define mmDCORE0_MME_QM_CQ_CTL_CI_1 0x40CA87C
+
+#define mmDCORE0_MME_QM_CQ_CTL_CI_2 0x40CA880
+
+#define mmDCORE0_MME_QM_CQ_CTL_CI_3 0x40CA884
+
+#define mmDCORE0_MME_QM_CQ_CTL_CI_4 0x40CA888
+
+#define mmDCORE0_MME_QM_ARC_CQ_CTL_CI 0x40CA88C
+
+#define mmDCORE0_MME_QM_CP_CFG 0x40CA890
+
+#define mmDCORE0_MME_QM_CP_EXT_SWITCH 0x40CA894
+
+#define mmDCORE0_MME_QM_CP_SWITCH_WD_SET 0x40CA898
+
+#define mmDCORE0_MME_QM_CP_SWITCH_WD 0x40CA89C
+
+#define mmDCORE0_MME_QM_ARC_LB_ADDR_BASE_LO 0x40CA8A4
+
+#define mmDCORE0_MME_QM_ARC_LB_ADDR_BASE_HI 0x40CA8A8
+
+#define mmDCORE0_MME_QM_ENGINE_BASE_ADDR_HI 0x40CA8AC
+
+#define mmDCORE0_MME_QM_ENGINE_BASE_ADDR_LO 0x40CA8B0
+
+#define mmDCORE0_MME_QM_ENGINE_ADDR_RANGE_SIZE 0x40CA8B4
+
+#define mmDCORE0_MME_QM_QM_ARC_AUX_BASE_ADDR_HI 0x40CA8B8
+
+#define mmDCORE0_MME_QM_QM_ARC_AUX_BASE_ADDR_LO 0x40CA8BC
+
+#define mmDCORE0_MME_QM_QM_BASE_ADDR_HI 0x40CA8C0
+
+#define mmDCORE0_MME_QM_QM_BASE_ADDR_LO 0x40CA8C4
+
+#define mmDCORE0_MME_QM_ARC_PQC_SECURE_PUSH_IND 0x40CA8C8
+
+#define mmDCORE0_MME_QM_PQC_STS_0_0 0x40CA8D0
+
+#define mmDCORE0_MME_QM_PQC_STS_0_1 0x40CA8D4
+
+#define mmDCORE0_MME_QM_PQC_STS_0_2 0x40CA8D8
+
+#define mmDCORE0_MME_QM_PQC_STS_0_3 0x40CA8DC
+
+#define mmDCORE0_MME_QM_PQC_STS_1_0 0x40CA8E0
+
+#define mmDCORE0_MME_QM_PQC_STS_1_1 0x40CA8E4
+
+#define mmDCORE0_MME_QM_PQC_STS_1_2 0x40CA8E8
+
+#define mmDCORE0_MME_QM_PQC_STS_1_3 0x40CA8EC
+
+#define mmDCORE0_MME_QM_SEI_STATUS 0x40CA8F0
+
+#define mmDCORE0_MME_QM_SEI_MASK 0x40CA8F4
+
+#define mmDCORE0_MME_QM_GLBL_ERR_ADDR_LO 0x40CAD00
+
+#define mmDCORE0_MME_QM_GLBL_ERR_ADDR_HI 0x40CAD04
+
+#define mmDCORE0_MME_QM_GLBL_ERR_WDATA 0x40CAD08
+
+#define mmDCORE0_MME_QM_L2H_MASK_LO 0x40CAD14
+
+#define mmDCORE0_MME_QM_L2H_MASK_HI 0x40CAD18
+
+#define mmDCORE0_MME_QM_L2H_CMPR_LO 0x40CAD1C
+
+#define mmDCORE0_MME_QM_L2H_CMPR_HI 0x40CAD20
+
+#define mmDCORE0_MME_QM_LOCAL_RANGE_BASE 0x40CAD24
+
+#define mmDCORE0_MME_QM_LOCAL_RANGE_SIZE 0x40CAD28
+
+#define mmDCORE0_MME_QM_HBW_RD_RATE_LIM_CFG_1 0x40CAD30
+
+#define mmDCORE0_MME_QM_LBW_WR_RATE_LIM_CFG_0 0x40CAD34
+
+#define mmDCORE0_MME_QM_LBW_WR_RATE_LIM_CFG_1 0x40CAD38
+
+#define mmDCORE0_MME_QM_HBW_RD_RATE_LIM_CFG_0 0x40CAD3C
+
+#define mmDCORE0_MME_QM_IND_GW_APB_CFG 0x40CAD40
+
+#define mmDCORE0_MME_QM_IND_GW_APB_WDATA 0x40CAD44
+
+#define mmDCORE0_MME_QM_IND_GW_APB_RDATA 0x40CAD48
+
+#define mmDCORE0_MME_QM_IND_GW_APB_STATUS 0x40CAD4C
+
+#define mmDCORE0_MME_QM_PERF_CNT_FREE_LO 0x40CAD60
+
+#define mmDCORE0_MME_QM_PERF_CNT_FREE_HI 0x40CAD64
+
+#define mmDCORE0_MME_QM_PERF_CNT_IDLE_LO 0x40CAD68
+
+#define mmDCORE0_MME_QM_PERF_CNT_IDLE_HI 0x40CAD6C
+
+#define mmDCORE0_MME_QM_PERF_CNT_CFG 0x40CAD70
+
+#endif /* ASIC_REG_DCORE0_MME_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h
new file mode 100644
index 000000000000..077ae5232790
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_SBTE0_MASKS_H_
+#define ASIC_REG_DCORE0_MME_SBTE0_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_SBTE0
+ * (Prototype: SB)
+ *****************************************
+ */
+
+/* DCORE0_MME_SBTE0_MAX_SIZE */
+#define DCORE0_MME_SBTE0_MAX_SIZE_DATA_SHIFT 0
+#define DCORE0_MME_SBTE0_MAX_SIZE_DATA_MASK 0xFFFF
+#define DCORE0_MME_SBTE0_MAX_SIZE_MD_SHIFT 16
+#define DCORE0_MME_SBTE0_MAX_SIZE_MD_MASK 0xFFFF0000
+
+/* DCORE0_MME_SBTE0_FORCE_MISS */
+#define DCORE0_MME_SBTE0_FORCE_MISS_R_SHIFT 0
+#define DCORE0_MME_SBTE0_FORCE_MISS_R_MASK 0x1
+
+/* DCORE0_MME_SBTE0_MAX */
+#define DCORE0_MME_SBTE0_MAX_OS_SHIFT 0
+#define DCORE0_MME_SBTE0_MAX_OS_MASK 0xFFFF
+
+/* DCORE0_MME_SBTE0_RL */
+#define DCORE0_MME_SBTE0_RL_SATURATION_SHIFT 0
+#define DCORE0_MME_SBTE0_RL_SATURATION_MASK 0xFF
+#define DCORE0_MME_SBTE0_RL_TIMEOUT_SHIFT 8
+#define DCORE0_MME_SBTE0_RL_TIMEOUT_MASK 0xFF00
+#define DCORE0_MME_SBTE0_RL_RATE_LIMITER_EN_SHIFT 16
+#define DCORE0_MME_SBTE0_RL_RATE_LIMITER_EN_MASK 0x10000
+
+/* DCORE0_MME_SBTE0_SB_STALL */
+#define DCORE0_MME_SBTE0_SB_STALL_R_SHIFT 0
+#define DCORE0_MME_SBTE0_SB_STALL_R_MASK 0x1
+
+/* DCORE0_MME_SBTE0_INTR */
+#define DCORE0_MME_SBTE0_INTR_I0_SHIFT 0
+#define DCORE0_MME_SBTE0_INTR_I0_MASK 0x1
+
+/* DCORE0_MME_SBTE0_ARUSER */
+#define DCORE0_MME_SBTE0_ARUSER_ASID_SHIFT 0
+#define DCORE0_MME_SBTE0_ARUSER_ASID_MASK 0x3FF
+#define DCORE0_MME_SBTE0_ARUSER_MMBP_SHIFT 10
+#define DCORE0_MME_SBTE0_ARUSER_MMBP_MASK 0x400
+#define DCORE0_MME_SBTE0_ARUSER_DUMMY_SHIFT 11
+#define DCORE0_MME_SBTE0_ARUSER_DUMMY_MASK 0xFFFFF800
+
+/* DCORE0_MME_SBTE0_ARCACHE */
+#define DCORE0_MME_SBTE0_ARCACHE_N_SHIFT 0
+#define DCORE0_MME_SBTE0_ARCACHE_N_MASK 0xF
+
+/* DCORE0_MME_SBTE0_STATUS */
+#define DCORE0_MME_SBTE0_STATUS_DROP_CNT_SHIFT 0
+#define DCORE0_MME_SBTE0_STATUS_DROP_CNT_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_SBTE0_PRTN */
+#define DCORE0_MME_SBTE0_PRTN_CLK_EN_SHIFT 0
+#define DCORE0_MME_SBTE0_PRTN_CLK_EN_MASK 0x1
+
+/* DCORE0_MME_SBTE0_CFG_SB_INFLIGHTS */
+#define DCORE0_MME_SBTE0_CFG_SB_INFLIGHTS_W_SHIFT 0
+#define DCORE0_MME_SBTE0_CFG_SB_INFLIGHTS_W_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_SBTE0_PROT */
+#define DCORE0_MME_SBTE0_PROT_W_SHIFT 0
+#define DCORE0_MME_SBTE0_PROT_W_MASK 0x7
+
+/* DCORE0_MME_SBTE0_INTR_MASK */
+#define DCORE0_MME_SBTE0_INTR_MASK_W_SHIFT 0
+#define DCORE0_MME_SBTE0_INTR_MASK_W_MASK 0x1
+
+/* DCORE0_MME_SBTE0_ARUSER_MSB */
+#define DCORE0_MME_SBTE0_ARUSER_MSB_VAL_SHIFT 0
+#define DCORE0_MME_SBTE0_ARUSER_MSB_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_SBTE0_CFG_SB_OCCUPIENCY */
+#define DCORE0_MME_SBTE0_CFG_SB_OCCUPIENCY_VAL_SHIFT 0
+#define DCORE0_MME_SBTE0_CFG_SB_OCCUPIENCY_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_SBTE0_ENABLE_CGATE */
+#define DCORE0_MME_SBTE0_ENABLE_CGATE_TE_EN_SHIFT 0
+#define DCORE0_MME_SBTE0_ENABLE_CGATE_TE_EN_MASK 0x1
+#define DCORE0_MME_SBTE0_ENABLE_CGATE_SB_EN_SHIFT 4
+#define DCORE0_MME_SBTE0_ENABLE_CGATE_SB_EN_MASK 0x10
+
+/* DCORE0_MME_SBTE0_INTF_VLD_DBG */
+#define DCORE0_MME_SBTE0_INTF_VLD_DBG_VLD_SHIFT 0
+#define DCORE0_MME_SBTE0_INTF_VLD_DBG_VLD_MASK 0xFFFFFFFF
+
+/* DCORE0_MME_SBTE0_INTF_RDY_DBG */
+#define DCORE0_MME_SBTE0_INTF_RDY_DBG_RDY_SHIFT 0
+#define DCORE0_MME_SBTE0_INTF_RDY_DBG_RDY_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_DCORE0_MME_SBTE0_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h
new file mode 100644
index 000000000000..211fa2c2c35b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_SBTE0_MSTR_IF_AXUSER_REGS_H_
+#define ASIC_REG_DCORE0_MME_SBTE0_MSTR_IF_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_SBTE0_MSTR_IF_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_ASID 0x40D1A80
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_MMU_BP 0x40D1A84
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_STRONG_ORDER 0x40D1A88
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_NO_SNOOP 0x40D1A8C
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_WR_REDUCTION 0x40D1A90
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_RD_ATOMIC 0x40D1A94
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_QOS 0x40D1A98
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_RSVD 0x40D1A9C
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_EMEM_CPAGE 0x40D1AA0
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_CORE 0x40D1AA4
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_E2E_COORD 0x40D1AA8
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_WR_OVRD_LO 0x40D1AB0
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_WR_OVRD_HI 0x40D1AB4
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_RD_OVRD_LO 0x40D1AB8
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_RD_OVRD_HI 0x40D1ABC
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_LB_COORD 0x40D1AC0
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_LB_LOCK 0x40D1AC4
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_LB_RSVD 0x40D1AC8
+
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_LB_OVRD 0x40D1ACC
+
+#endif /* ASIC_REG_DCORE0_MME_SBTE0_MSTR_IF_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h
new file mode 100644
index 000000000000..374a01d2b8d5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_MME_WB0_MSTR_IF_AXUSER_REGS_H_
+#define ASIC_REG_DCORE0_MME_WB0_MSTR_IF_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_MME_WB0_MSTR_IF_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_ASID 0x40F9A80
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_MMU_BP 0x40F9A84
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_STRONG_ORDER 0x40F9A88
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_NO_SNOOP 0x40F9A8C
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_WR_REDUCTION 0x40F9A90
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_RD_ATOMIC 0x40F9A94
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_QOS 0x40F9A98
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_RSVD 0x40F9A9C
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_EMEM_CPAGE 0x40F9AA0
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_CORE 0x40F9AA4
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_E2E_COORD 0x40F9AA8
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_WR_OVRD_LO 0x40F9AB0
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_WR_OVRD_HI 0x40F9AB4
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_RD_OVRD_LO 0x40F9AB8
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_RD_OVRD_HI 0x40F9ABC
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_LB_COORD 0x40F9AC0
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_LB_LOCK 0x40F9AC4
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_LB_RSVD 0x40F9AC8
+
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_LB_OVRD 0x40F9ACC
+
+#endif /* ASIC_REG_DCORE0_MME_WB0_MSTR_IF_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h
new file mode 100644
index 000000000000..22f4d6c805c5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_RTR0_CTRL_REGS_H_
+#define ASIC_REG_DCORE0_RTR0_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_RTR0_CTRL
+ * (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDCORE0_RTR0_CTRL_MEM_NUM 0x4140100
+
+#define mmDCORE0_RTR0_CTRL_MEM_MAP 0x4140104
+
+#define mmDCORE0_RTR0_CTRL_WR_RL_MEM 0x4140108
+
+#define mmDCORE0_RTR0_CTRL_WR_RL_PCI 0x414010C
+
+#define mmDCORE0_RTR0_CTRL_WR_RL_SRAM 0x4140110
+
+#define mmDCORE0_RTR0_CTRL_RD_RL_MEM 0x4140114
+
+#define mmDCORE0_RTR0_CTRL_RD_RL_PCI 0x4140118
+
+#define mmDCORE0_RTR0_CTRL_RD_RL_SRAM 0x414011C
+
+#define mmDCORE0_RTR0_CTRL_WR_RL_MEM_RED 0x4140120
+
+#define mmDCORE0_RTR0_CTRL_RL_MEM_REDUCTION 0x4140124
+
+#define mmDCORE0_RTR0_CTRL_WR_RL_SRAM_RED 0x4140128
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_CFG_0 0x4140400
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_CFG_1 0x4140404
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_SHIFT_0 0x4140408
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_SHIFT_1 0x414040C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_0 0x4140410
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_1 0x4140414
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_2 0x4140418
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_3 0x414041C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_4 0x4140420
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_5 0x4140424
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_6 0x4140428
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_7 0x414042C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_8 0x4140430
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_9 0x4140434
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_10 0x4140438
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_11 0x414043C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_12 0x4140440
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_13 0x4140444
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_14 0x4140448
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_EXPECTED_LAT_15 0x414044C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_0 0x4140450
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_1 0x4140454
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_2 0x4140458
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_3 0x414045C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_4 0x4140460
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_5 0x4140464
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_6 0x4140468
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_7 0x414046C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_8 0x4140470
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_9 0x4140474
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_10 0x4140478
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_11 0x414047C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_12 0x4140480
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_13 0x4140484
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_14 0x4140488
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_TOKEN_15 0x414048C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_0 0x4140490
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_1 0x4140494
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_2 0x4140498
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_3 0x414049C
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_4 0x41404A0
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_5 0x41404A4
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_6 0x41404A8
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_7 0x41404AC
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_8 0x41404B0
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_9 0x41404B4
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_10 0x41404B8
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_11 0x41404BC
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_12 0x41404C0
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_13 0x41404C4
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_14 0x41404C8
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_BANK_ID_15 0x41404CC
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_WDT_0 0x41404D0
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_WDT_1 0x41404D4
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_DEC_TOKEN_0 0x41404D8
+
+#define mmDCORE0_RTR0_CTRL_RGL_SRAM_DEC_TOKEN_1 0x41404DC
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AW_HI_ADDR 0x4140AB8
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AW_LO_ADDR 0x4140ABC
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AW_SET 0x4140AC0
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AR_HI_ADDR 0x4140AC4
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AR_LO_ADDR 0x4140AC8
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AR_SET 0x4140ACC
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_LBW_AW_ADDR 0x4140AD0
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_LBW_AW_SET 0x4140AD4
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_LBW_AR_ADDR 0x4140AD8
+
+#define mmDCORE0_RTR0_CTRL_DEC_RAZWI_LBW_AR_SET 0x4140ADC
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_CFG_0 0x4140AE4
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_CFG_1 0x4140AE8
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_SHIFT_0 0x4140AEC
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_SHIFT_1 0x4140AF0
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_0 0x4140AF4
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_1 0x4140AF8
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_2 0x4140AFC
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_3 0x4140B00
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_4 0x4140B04
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_5 0x4140B08
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_6 0x4140B0C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_7 0x4140B10
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_8 0x4140B14
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_9 0x4140B18
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_10 0x4140B1C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_11 0x4140B20
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_12 0x4140B24
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_13 0x4140B28
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_14 0x4140B2C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_EXPECTED_LAT_15 0x4140B30
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_0 0x4140B34
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_1 0x4140B38
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_2 0x4140B3C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_3 0x4140B40
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_4 0x4140B44
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_5 0x4140B48
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_6 0x4140B4C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_7 0x4140B50
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_8 0x4140B54
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_9 0x4140B58
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_10 0x4140B5C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_11 0x4140B60
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_12 0x4140B64
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_13 0x4140B68
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_14 0x4140B6C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_TOKEN_15 0x4140B70
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_0 0x4140B74
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_1 0x4140B78
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_2 0x4140B7C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_3 0x4140B80
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_4 0x4140B84
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_5 0x4140B88
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_6 0x4140B8C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_7 0x4140B90
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_8 0x4140B94
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_9 0x4140B98
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_10 0x4140B9C
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_11 0x4140BA0
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_12 0x4140BA4
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_13 0x4140BA8
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_14 0x4140BAC
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_ID_15 0x4140BB0
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_WDT_0 0x4140BB4
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_WDT_1 0x4140BB8
+
+#define mmDCORE0_RTR0_CTRL_RGL_WR_RED_CNT 0x4140BBC
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_DEC_TOKEN_0 0x4140BC0
+
+#define mmDCORE0_RTR0_CTRL_RGL_MEM_DEC_TOKEN_1 0x4140BC4
+
+#endif /* ASIC_REG_DCORE0_RTR0_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h
new file mode 100644
index 000000000000..3a7290b3a5c9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_REGS_H_
+#define ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_RTR0_MSTR_IF_RR_PRVT_HBW
+ * (Prototype: RANGE_REG_HBW)
+ *****************************************
+ */
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_LO_0 0x4142200
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_LO_1 0x4142204
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_LO_2 0x4142208
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_LO_3 0x414220C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_LO_4 0x4142210
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_LO_5 0x4142214
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_HI_0 0x4142218
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_HI_1 0x414221C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_HI_2 0x4142220
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_HI_3 0x4142224
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_HI_4 0x4142228
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_SHORT_HI_5 0x414222C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_LO_0 0x4142230
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_LO_1 0x4142234
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_LO_2 0x4142238
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_LO_3 0x414223C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_LO_4 0x4142240
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_LO_5 0x4142244
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_HI_0 0x4142248
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_HI_1 0x414224C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_HI_2 0x4142250
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_HI_3 0x4142254
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_HI_4 0x4142258
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_SHORT_HI_5 0x414225C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_LO_0 0x4142260
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_LO_1 0x4142264
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_LO_2 0x4142268
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_LO_3 0x414226C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_LO_4 0x4142270
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_LO_5 0x4142274
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_HI_0 0x4142278
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_HI_1 0x414227C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_HI_2 0x4142280
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_HI_3 0x4142284
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_HI_4 0x4142288
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_SHORT_HI_5 0x414228C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_LO_0 0x4142290
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_LO_1 0x4142294
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_LO_2 0x4142298
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_LO_3 0x414229C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_LO_4 0x41422A0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_LO_5 0x41422A4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_HI_0 0x41422A8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_HI_1 0x41422AC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_HI_2 0x41422B0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_HI_3 0x41422B4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_HI_4 0x41422B8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_SHORT_HI_5 0x41422BC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_HI_0 0x41422C0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_HI_1 0x41422C4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_HI_2 0x41422C8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_HI_3 0x41422CC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_LO_0 0x41422D0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_LO_1 0x41422D4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_LO_2 0x41422D8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MIN_LO_3 0x41422DC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_HI_0 0x41422E0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_HI_1 0x41422E4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_HI_2 0x41422E8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_HI_3 0x41422EC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_LO_0 0x41422F0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_LO_1 0x41422F4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_LO_2 0x41422F8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_RANGE_MAX_LO_3 0x41422FC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_HI_0 0x4142300
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_HI_1 0x4142304
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_HI_2 0x4142308
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_HI_3 0x414230C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_LO_0 0x4142310
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_LO_1 0x4142314
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_LO_2 0x4142318
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MIN_LO_3 0x414231C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_HI_0 0x4142320
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_HI_1 0x4142324
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_HI_2 0x4142328
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_HI_3 0x414232C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_LO_0 0x4142330
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_LO_1 0x4142334
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_LO_2 0x4142338
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_RANGE_MAX_LO_3 0x414233C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_PCIE_EN 0x4142340
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_PCIE_EN 0x4142344
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_HIT_AW 0x4142348
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_HIT_AW 0x414234C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SEC_HIT_AR 0x4142350
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_PRIV_HIT_AR 0x4142354
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_AW_RAZWI_HI 0x4142358
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_AW_RAZWI_LO 0x414235C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_AR_RAZWI_HI 0x4142360
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_AR_RAZWI_LO 0x4142364
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_AW_RAZWI_XY 0x4142368
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_AR_RAZWI_XY 0x414236C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_AW_RAZWI_HAPPENED 0x4142370
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_AR_RAZWI_HAPPENED 0x4142374
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_RAZWI_ERR_RESP 0x4142378
+
+#endif /* ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h
new file mode 100644
index 000000000000..5b52b88fee0f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_REGS_H_
+#define ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_RTR0_MSTR_IF_RR_PRVT_LBW
+ * (Prototype: RANGE_REG_LBW)
+ *****************************************
+ */
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_0 0x4142600
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_1 0x4142604
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_2 0x4142608
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_3 0x414260C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_4 0x4142610
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_5 0x4142614
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_6 0x4142618
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_7 0x414261C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_8 0x4142620
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_9 0x4142624
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_10 0x4142628
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_11 0x414262C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_12 0x4142630
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_SHORT_13 0x4142634
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_0 0x4142638
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_1 0x414263C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_2 0x4142640
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_3 0x4142644
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_4 0x4142648
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_5 0x414264C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_6 0x4142650
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_7 0x4142654
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_8 0x4142658
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_9 0x414265C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_10 0x4142660
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_11 0x4142664
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_12 0x4142668
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_SHORT_13 0x414266C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_0 0x4142670
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_1 0x4142674
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_2 0x4142678
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_3 0x414267C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_4 0x4142680
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_5 0x4142684
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_6 0x4142688
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_7 0x414268C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_8 0x4142690
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_9 0x4142694
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_10 0x4142698
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_11 0x414269C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_12 0x41426A0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_SHORT_13 0x41426A4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_0 0x41426A8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_1 0x41426AC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_2 0x41426B0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_3 0x41426B4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_4 0x41426B8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_5 0x41426BC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_6 0x41426C0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_7 0x41426C4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_8 0x41426C8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_9 0x41426CC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_10 0x41426D0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_11 0x41426D4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_12 0x41426D8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_SHORT_13 0x41426DC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_0 0x41426E0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_1 0x41426E4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_2 0x41426E8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MIN_3 0x41426EC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_0 0x41426F0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_1 0x41426F4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_2 0x41426F8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_RANGE_MAX_3 0x41426FC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_0 0x4142700
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_1 0x4142704
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_2 0x4142708
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MIN_3 0x414270C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_0 0x4142710
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_1 0x4142714
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_2 0x4142718
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_RANGE_MAX_3 0x414271C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_HIT_AW 0x4142720
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_HIT_AW 0x4142724
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SEC_HIT_AR 0x4142728
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_PRIV_HIT_AR 0x414272C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_AW_RAZWI 0x4142730
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_AR_RAZWI 0x4142734
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_AW_RAZWI_XY 0x4142738
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_AR_RAZWI_XY 0x414273C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_AW_RAZWI_HAPPENED 0x4142740
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_AR_RAZWI_HAPPENED 0x4142744
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_RAZWI_ERR_RESP 0x4142748
+
+#endif /* ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h
new file mode 100644
index 000000000000..d9b3f5cd392b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_REGS_H_
+#define ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_RTR0_MSTR_IF_RR_SHRD_HBW
+ * (Prototype: RANGE_REG_HBW)
+ *****************************************
+ */
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_0 0x4142000
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_1 0x4142004
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_2 0x4142008
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_3 0x414200C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_4 0x4142010
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_5 0x4142014
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_0 0x4142018
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_1 0x414201C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_2 0x4142020
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_3 0x4142024
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_4 0x4142028
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_5 0x414202C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_0 0x4142030
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_1 0x4142034
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_2 0x4142038
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_3 0x414203C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_4 0x4142040
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_5 0x4142044
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_0 0x4142048
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_1 0x414204C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_2 0x4142050
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_3 0x4142054
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_4 0x4142058
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_5 0x414205C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_0 0x4142060
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_1 0x4142064
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_2 0x4142068
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_3 0x414206C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_4 0x4142070
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_5 0x4142074
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_0 0x4142078
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_1 0x414207C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_2 0x4142080
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_3 0x4142084
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_4 0x4142088
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_5 0x414208C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_0 0x4142090
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_1 0x4142094
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_2 0x4142098
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_3 0x414209C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_4 0x41420A0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_5 0x41420A4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_0 0x41420A8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_1 0x41420AC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_2 0x41420B0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_3 0x41420B4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_4 0x41420B8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_5 0x41420BC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_HI_0 0x41420C0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_HI_1 0x41420C4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_HI_2 0x41420C8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_HI_3 0x41420CC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_LO_0 0x41420D0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_LO_1 0x41420D4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_LO_2 0x41420D8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_LO_3 0x41420DC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_HI_0 0x41420E0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_HI_1 0x41420E4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_HI_2 0x41420E8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_HI_3 0x41420EC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_LO_0 0x41420F0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_LO_1 0x41420F4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_LO_2 0x41420F8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_LO_3 0x41420FC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_HI_0 0x4142100
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_HI_1 0x4142104
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_HI_2 0x4142108
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_HI_3 0x414210C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_LO_0 0x4142110
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_LO_1 0x4142114
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_LO_2 0x4142118
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_LO_3 0x414211C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_HI_0 0x4142120
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_HI_1 0x4142124
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_HI_2 0x4142128
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_HI_3 0x414212C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_LO_0 0x4142130
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_LO_1 0x4142134
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_LO_2 0x4142138
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_LO_3 0x414213C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_PCIE_EN 0x4142140
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_PCIE_EN 0x4142144
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_HIT_AW 0x4142148
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_HIT_AW 0x414214C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_HIT_AR 0x4142150
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_HIT_AR 0x4142154
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AW_RAZWI_HI 0x4142158
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AW_RAZWI_LO 0x414215C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AR_RAZWI_HI 0x4142160
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AR_RAZWI_LO 0x4142164
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AW_RAZWI_XY 0x4142168
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AR_RAZWI_XY 0x414216C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AW_RAZWI_HAPPENED 0x4142170
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AR_RAZWI_HAPPENED 0x4142174
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_RAZWI_ERR_RESP 0x4142178
+
+#endif /* ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h
new file mode 100644
index 000000000000..1bba940d3031
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_REGS_H_
+#define ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_RTR0_MSTR_IF_RR_SHRD_LBW
+ * (Prototype: RANGE_REG_LBW)
+ *****************************************
+ */
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_0 0x4142400
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_1 0x4142404
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_2 0x4142408
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_3 0x414240C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_4 0x4142410
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_5 0x4142414
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_6 0x4142418
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_7 0x414241C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_8 0x4142420
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_9 0x4142424
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_10 0x4142428
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_11 0x414242C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_12 0x4142430
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_13 0x4142434
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_0 0x4142438
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_1 0x414243C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_2 0x4142440
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_3 0x4142444
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_4 0x4142448
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_5 0x414244C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_6 0x4142450
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_7 0x4142454
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_8 0x4142458
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_9 0x414245C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_10 0x4142460
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_11 0x4142464
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_12 0x4142468
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_13 0x414246C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_0 0x4142470
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_1 0x4142474
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_2 0x4142478
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_3 0x414247C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_4 0x4142480
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_5 0x4142484
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_6 0x4142488
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_7 0x414248C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_8 0x4142490
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_9 0x4142494
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_10 0x4142498
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_11 0x414249C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_12 0x41424A0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_13 0x41424A4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_0 0x41424A8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_1 0x41424AC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_2 0x41424B0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_3 0x41424B4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_4 0x41424B8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_5 0x41424BC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_6 0x41424C0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_7 0x41424C4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_8 0x41424C8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_9 0x41424CC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_10 0x41424D0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_11 0x41424D4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_12 0x41424D8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_13 0x41424DC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_0 0x41424E0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_1 0x41424E4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_2 0x41424E8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_3 0x41424EC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_0 0x41424F0
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_1 0x41424F4
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_2 0x41424F8
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_3 0x41424FC
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_0 0x4142500
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_1 0x4142504
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_2 0x4142508
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_3 0x414250C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_0 0x4142510
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_1 0x4142514
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_2 0x4142518
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_3 0x414251C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_HIT_AW 0x4142520
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_HIT_AW 0x4142524
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_HIT_AR 0x4142528
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_HIT_AR 0x414252C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AW_RAZWI 0x4142530
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AR_RAZWI 0x4142534
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AW_RAZWI_XY 0x4142538
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AR_RAZWI_XY 0x414253C
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AW_RAZWI_HAPPENED 0x4142540
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AR_RAZWI_HAPPENED 0x4142544
+
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_RAZWI_ERR_RESP 0x4142548
+
+#endif /* ASIC_REG_DCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h
new file mode 100644
index 000000000000..f21540501cdd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_SYNC_MNGR_GLBL_MASKS_H_
+#define ASIC_REG_DCORE0_SYNC_MNGR_GLBL_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_SYNC_MNGR_GLBL
+ * (Prototype: SOB_GLBL)
+ *****************************************
+ */
+
+/* DCORE0_SYNC_MNGR_GLBL_SM_SEI_MASK */
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_MASK_SO_OVERFLOW_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_MASK_SO_OVERFLOW_MASK 0x1
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_MASK_MST_UNALIGN4B_SHIFT 1
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_MASK_MST_UNALIGN4B_MASK 0x2
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_MASK_MST_RSP_ERR_SHIFT 2
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_MASK_MST_RSP_ERR_MASK 0x4
+
+/* DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE */
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_CAUSE_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_CAUSE_MASK 0x7
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_LOG_SHIFT 4
+#define DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_LOG_MASK 0xFFFF0
+
+/* DCORE0_SYNC_MNGR_GLBL_L2H_CPMR_L */
+#define DCORE0_SYNC_MNGR_GLBL_L2H_CPMR_L_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_L2H_CPMR_L_VAL_MASK 0xFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_L2H_CPMR_H */
+#define DCORE0_SYNC_MNGR_GLBL_L2H_CPMR_H_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_L2H_CPMR_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_L2H_MASK_L */
+#define DCORE0_SYNC_MNGR_GLBL_L2H_MASK_L_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_L2H_MASK_L_VAL_MASK 0xFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_L2H_MASK_H */
+#define DCORE0_SYNC_MNGR_GLBL_L2H_MASK_H_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_L2H_MASK_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_ASID_SEC */
+#define DCORE0_SYNC_MNGR_GLBL_ASID_SEC_ASID_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_ASID_SEC_ASID_MASK 0xFFFF
+#define DCORE0_SYNC_MNGR_GLBL_ASID_SEC_BP_MMU_SHIFT 16
+#define DCORE0_SYNC_MNGR_GLBL_ASID_SEC_BP_MMU_MASK 0x10000
+
+/* DCORE0_SYNC_MNGR_GLBL_ASID_PRIV_ONLY */
+#define DCORE0_SYNC_MNGR_GLBL_ASID_PRIV_ONLY_ASID_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_ASID_PRIV_ONLY_ASID_MASK 0xFFFF
+#define DCORE0_SYNC_MNGR_GLBL_ASID_PRIV_ONLY_BP_MMU_SHIFT 16
+#define DCORE0_SYNC_MNGR_GLBL_ASID_PRIV_ONLY_BP_MMU_MASK 0x10000
+
+/* DCORE0_SYNC_MNGR_GLBL_LBW_DELAY */
+#define DCORE0_SYNC_MNGR_GLBL_LBW_DELAY_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_LBW_DELAY_VAL_MASK 0xFFFF
+#define DCORE0_SYNC_MNGR_GLBL_LBW_DELAY_EN_SHIFT 16
+#define DCORE0_SYNC_MNGR_GLBL_LBW_DELAY_EN_MASK 0x10000
+
+/* DCORE0_SYNC_MNGR_GLBL_PI_SIZE */
+#define DCORE0_SYNC_MNGR_GLBL_PI_SIZE_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_PI_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_SOB_ONLY */
+#define DCORE0_SYNC_MNGR_GLBL_SOB_ONLY_EN_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_SOB_ONLY_EN_MASK 0x1
+
+/* DCORE0_SYNC_MNGR_GLBL_CQ_INTR */
+#define DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_SEC_INTR_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_SEC_INTR_MASK 0x1
+#define DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_SEC_INTR_MASK_SHIFT 8
+#define DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_SEC_INTR_MASK_MASK 0x100
+#define DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_INTR_QUEUE_INDEX_SHIFT 16
+#define DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_INTR_QUEUE_INDEX_MASK 0x3F0000
+
+/* DCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV */
+#define DCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV_ASID_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV_ASID_MASK 0xFFFF
+#define DCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV_BP_MMU_SHIFT 16
+#define DCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV_BP_MMU_MASK 0x10000
+
+/* DCORE0_SYNC_MNGR_GLBL_PI_INC_MODE_SIZE */
+#define DCORE0_SYNC_MNGR_GLBL_PI_INC_MODE_SIZE_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_PI_INC_MODE_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L */
+#define DCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H */
+#define DCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2 */
+#define DCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_VAL_MASK 0xFF
+
+/* DCORE0_SYNC_MNGR_GLBL_CQ_PI */
+#define DCORE0_SYNC_MNGR_GLBL_CQ_PI_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_CQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_CQ_SEC */
+#define DCORE0_SYNC_MNGR_GLBL_CQ_SEC_SEC_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_CQ_SEC_SEC_MASK 0x1
+#define DCORE0_SYNC_MNGR_GLBL_CQ_SEC_PRIV_SHIFT 4
+#define DCORE0_SYNC_MNGR_GLBL_CQ_SEC_PRIV_MASK 0x10
+
+/* DCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L */
+#define DCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_ADDRL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_ADDRL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H */
+#define DCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_ADDRH_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_ADDRH_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_LBW_DATA */
+#define DCORE0_SYNC_MNGR_GLBL_LBW_DATA_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_LBW_DATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE */
+#define DCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_MODE_SHIFT 0
+#define DCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_MODE_MASK 0x1
+
+#endif /* ASIC_REG_DCORE0_SYNC_MNGR_GLBL_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h
new file mode 100644
index 000000000000..c3c4991e6660
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h
@@ -0,0 +1,1203 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_SYNC_MNGR_GLBL_REGS_H_
+#define ASIC_REG_DCORE0_SYNC_MNGR_GLBL_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_SYNC_MNGR_GLBL
+ * (Prototype: SOB_GLBL)
+ *****************************************
+ */
+
+#define mmDCORE0_SYNC_MNGR_GLBL_SM_SEI_MASK 0x411E000
+
+#define mmDCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE 0x411E004
+
+#define mmDCORE0_SYNC_MNGR_GLBL_L2H_CPMR_L 0x411E008
+
+#define mmDCORE0_SYNC_MNGR_GLBL_L2H_CPMR_H 0x411E00C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_L2H_MASK_L 0x411E020
+
+#define mmDCORE0_SYNC_MNGR_GLBL_L2H_MASK_H 0x411E024
+
+#define mmDCORE0_SYNC_MNGR_GLBL_ASID_SEC 0x411E030
+
+#define mmDCORE0_SYNC_MNGR_GLBL_ASID_PRIV_ONLY 0x411E034
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DELAY 0x411E038
+
+#define mmDCORE0_SYNC_MNGR_GLBL_PI_SIZE 0x411E03C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_SOB_ONLY 0x411E040
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INTR 0x411E044
+
+#define mmDCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV 0x411E048
+
+#define mmDCORE0_SYNC_MNGR_GLBL_PI_INC_MODE_SIZE 0x411E04C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 0x411E050
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_1 0x411E054
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_2 0x411E058
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_3 0x411E05C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_4 0x411E060
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_5 0x411E064
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_6 0x411E068
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_7 0x411E06C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_8 0x411E070
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_9 0x411E074
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_10 0x411E078
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_11 0x411E07C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_12 0x411E080
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_13 0x411E084
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_14 0x411E088
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_15 0x411E08C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_16 0x411E090
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_17 0x411E094
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_18 0x411E098
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_19 0x411E09C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_20 0x411E0A0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_21 0x411E0A4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_22 0x411E0A8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_23 0x411E0AC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_24 0x411E0B0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_25 0x411E0B4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_26 0x411E0B8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_27 0x411E0BC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_28 0x411E0C0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_29 0x411E0C4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_30 0x411E0C8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_31 0x411E0CC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_32 0x411E0D0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_33 0x411E0D4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_34 0x411E0D8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_35 0x411E0DC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_36 0x411E0E0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_37 0x411E0E4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_38 0x411E0E8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_39 0x411E0EC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_40 0x411E0F0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_41 0x411E0F4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_42 0x411E0F8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_43 0x411E0FC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_44 0x411E100
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_45 0x411E104
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_46 0x411E108
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_47 0x411E10C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_48 0x411E110
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_49 0x411E114
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_50 0x411E118
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_51 0x411E11C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_52 0x411E120
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_53 0x411E124
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_54 0x411E128
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_55 0x411E12C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_56 0x411E130
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_57 0x411E134
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_58 0x411E138
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_59 0x411E13C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_60 0x411E140
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_61 0x411E144
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_62 0x411E148
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63 0x411E14C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 0x411E150
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_1 0x411E154
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_2 0x411E158
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_3 0x411E15C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_4 0x411E160
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_5 0x411E164
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_6 0x411E168
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_7 0x411E16C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_8 0x411E170
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_9 0x411E174
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_10 0x411E178
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_11 0x411E17C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_12 0x411E180
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_13 0x411E184
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_14 0x411E188
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_15 0x411E18C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_16 0x411E190
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_17 0x411E194
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_18 0x411E198
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_19 0x411E19C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_20 0x411E1A0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_21 0x411E1A4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_22 0x411E1A8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_23 0x411E1AC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_24 0x411E1B0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_25 0x411E1B4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_26 0x411E1B8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_27 0x411E1BC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_28 0x411E1C0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_29 0x411E1C4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_30 0x411E1C8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_31 0x411E1CC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_32 0x411E1D0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_33 0x411E1D4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_34 0x411E1D8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_35 0x411E1DC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_36 0x411E1E0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_37 0x411E1E4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_38 0x411E1E8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_39 0x411E1EC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_40 0x411E1F0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_41 0x411E1F4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_42 0x411E1F8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_43 0x411E1FC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_44 0x411E200
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_45 0x411E204
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_46 0x411E208
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_47 0x411E20C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_48 0x411E210
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_49 0x411E214
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_50 0x411E218
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_51 0x411E21C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_52 0x411E220
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_53 0x411E224
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_54 0x411E228
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_55 0x411E22C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_56 0x411E230
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_57 0x411E234
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_58 0x411E238
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_59 0x411E23C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_60 0x411E240
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_61 0x411E244
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_62 0x411E248
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63 0x411E24C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 0x411E250
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_1 0x411E254
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_2 0x411E258
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_3 0x411E25C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_4 0x411E260
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_5 0x411E264
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_6 0x411E268
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_7 0x411E26C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_8 0x411E270
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_9 0x411E274
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_10 0x411E278
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_11 0x411E27C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_12 0x411E280
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_13 0x411E284
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_14 0x411E288
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_15 0x411E28C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_16 0x411E290
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_17 0x411E294
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_18 0x411E298
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_19 0x411E29C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_20 0x411E2A0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_21 0x411E2A4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_22 0x411E2A8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_23 0x411E2AC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_24 0x411E2B0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_25 0x411E2B4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_26 0x411E2B8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_27 0x411E2BC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_28 0x411E2C0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_29 0x411E2C4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_30 0x411E2C8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_31 0x411E2CC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_32 0x411E2D0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_33 0x411E2D4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_34 0x411E2D8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_35 0x411E2DC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_36 0x411E2E0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_37 0x411E2E4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_38 0x411E2E8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_39 0x411E2EC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_40 0x411E2F0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_41 0x411E2F4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_42 0x411E2F8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_43 0x411E2FC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_44 0x411E300
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_45 0x411E304
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_46 0x411E308
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_47 0x411E30C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_48 0x411E310
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_49 0x411E314
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_50 0x411E318
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_51 0x411E31C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_52 0x411E320
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_53 0x411E324
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_54 0x411E328
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_55 0x411E32C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_56 0x411E330
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_57 0x411E334
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_58 0x411E338
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_59 0x411E33C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_60 0x411E340
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_61 0x411E344
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_62 0x411E348
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63 0x411E34C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_0 0x411E350
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_1 0x411E354
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_2 0x411E358
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_3 0x411E35C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_4 0x411E360
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_5 0x411E364
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_6 0x411E368
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_7 0x411E36C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_8 0x411E370
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_9 0x411E374
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_10 0x411E378
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_11 0x411E37C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_12 0x411E380
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_13 0x411E384
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_14 0x411E388
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_15 0x411E38C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_16 0x411E390
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_17 0x411E394
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_18 0x411E398
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_19 0x411E39C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_20 0x411E3A0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_21 0x411E3A4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_22 0x411E3A8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_23 0x411E3AC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_24 0x411E3B0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_25 0x411E3B4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_26 0x411E3B8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_27 0x411E3BC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_28 0x411E3C0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_29 0x411E3C4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_30 0x411E3C8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_31 0x411E3CC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_32 0x411E3D0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_33 0x411E3D4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_34 0x411E3D8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_35 0x411E3DC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_36 0x411E3E0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_37 0x411E3E4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_38 0x411E3E8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_39 0x411E3EC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_40 0x411E3F0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_41 0x411E3F4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_42 0x411E3F8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_43 0x411E3FC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_44 0x411E400
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_45 0x411E404
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_46 0x411E408
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_47 0x411E40C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_48 0x411E410
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_49 0x411E414
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_50 0x411E418
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_51 0x411E41C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_52 0x411E420
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_53 0x411E424
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_54 0x411E428
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_55 0x411E42C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_56 0x411E430
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_57 0x411E434
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_58 0x411E438
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_59 0x411E43C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_60 0x411E440
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_61 0x411E444
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_62 0x411E448
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_63 0x411E44C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_0 0x411E450
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_1 0x411E454
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_2 0x411E458
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_3 0x411E45C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_4 0x411E460
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_5 0x411E464
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_6 0x411E468
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_7 0x411E46C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_8 0x411E470
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_9 0x411E474
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_10 0x411E478
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_11 0x411E47C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_12 0x411E480
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_13 0x411E484
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_14 0x411E488
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_15 0x411E48C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_16 0x411E490
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_17 0x411E494
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_18 0x411E498
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_19 0x411E49C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_20 0x411E4A0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_21 0x411E4A4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_22 0x411E4A8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_23 0x411E4AC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_24 0x411E4B0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_25 0x411E4B4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_26 0x411E4B8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_27 0x411E4BC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_28 0x411E4C0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_29 0x411E4C4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_30 0x411E4C8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_31 0x411E4CC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_32 0x411E4D0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_33 0x411E4D4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_34 0x411E4D8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_35 0x411E4DC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_36 0x411E4E0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_37 0x411E4E4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_38 0x411E4E8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_39 0x411E4EC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_40 0x411E4F0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_41 0x411E4F4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_42 0x411E4F8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_43 0x411E4FC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_44 0x411E500
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_45 0x411E504
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_46 0x411E508
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_47 0x411E50C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_48 0x411E510
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_49 0x411E514
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_50 0x411E518
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_51 0x411E51C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_52 0x411E520
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_53 0x411E524
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_54 0x411E528
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_55 0x411E52C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_56 0x411E530
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_57 0x411E534
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_58 0x411E538
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_59 0x411E53C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_60 0x411E540
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_61 0x411E544
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_62 0x411E548
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_SEC_63 0x411E54C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 0x411E550
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_1 0x411E554
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_2 0x411E558
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_3 0x411E55C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_4 0x411E560
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_5 0x411E564
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_6 0x411E568
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_7 0x411E56C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_8 0x411E570
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_9 0x411E574
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_10 0x411E578
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_11 0x411E57C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_12 0x411E580
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_13 0x411E584
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_14 0x411E588
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_15 0x411E58C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_16 0x411E590
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_17 0x411E594
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_18 0x411E598
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_19 0x411E59C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_20 0x411E5A0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_21 0x411E5A4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_22 0x411E5A8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_23 0x411E5AC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_24 0x411E5B0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_25 0x411E5B4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_26 0x411E5B8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_27 0x411E5BC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_28 0x411E5C0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_29 0x411E5C4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_30 0x411E5C8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_31 0x411E5CC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_32 0x411E5D0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_33 0x411E5D4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_34 0x411E5D8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_35 0x411E5DC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_36 0x411E5E0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_37 0x411E5E4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_38 0x411E5E8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_39 0x411E5EC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_40 0x411E5F0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_41 0x411E5F4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_42 0x411E5F8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_43 0x411E5FC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_44 0x411E600
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_45 0x411E604
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_46 0x411E608
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_47 0x411E60C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_48 0x411E610
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_49 0x411E614
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_50 0x411E618
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_51 0x411E61C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_52 0x411E620
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_53 0x411E624
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_54 0x411E628
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_55 0x411E62C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_56 0x411E630
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_57 0x411E634
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_58 0x411E638
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_59 0x411E63C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_60 0x411E640
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_61 0x411E644
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_62 0x411E648
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_63 0x411E64C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 0x411E650
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_1 0x411E654
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_2 0x411E658
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_3 0x411E65C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_4 0x411E660
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_5 0x411E664
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_6 0x411E668
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_7 0x411E66C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_8 0x411E670
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_9 0x411E674
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_10 0x411E678
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_11 0x411E67C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_12 0x411E680
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_13 0x411E684
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_14 0x411E688
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_15 0x411E68C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_16 0x411E690
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_17 0x411E694
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_18 0x411E698
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_19 0x411E69C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_20 0x411E6A0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_21 0x411E6A4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_22 0x411E6A8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_23 0x411E6AC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_24 0x411E6B0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_25 0x411E6B4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_26 0x411E6B8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_27 0x411E6BC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_28 0x411E6C0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_29 0x411E6C4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_30 0x411E6C8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_31 0x411E6CC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_32 0x411E6D0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_33 0x411E6D4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_34 0x411E6D8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_35 0x411E6DC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_36 0x411E6E0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_37 0x411E6E4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_38 0x411E6E8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_39 0x411E6EC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_40 0x411E6F0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_41 0x411E6F4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_42 0x411E6F8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_43 0x411E6FC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_44 0x411E700
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_45 0x411E704
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_46 0x411E708
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_47 0x411E70C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_48 0x411E710
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_49 0x411E714
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_50 0x411E718
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_51 0x411E71C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_52 0x411E720
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_53 0x411E724
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_54 0x411E728
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_55 0x411E72C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_56 0x411E730
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_57 0x411E734
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_58 0x411E738
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_59 0x411E73C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_60 0x411E740
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_61 0x411E744
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_62 0x411E748
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_63 0x411E74C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0 0x411E750
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_1 0x411E754
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_2 0x411E758
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_3 0x411E75C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_4 0x411E760
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_5 0x411E764
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_6 0x411E768
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_7 0x411E76C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_8 0x411E770
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_9 0x411E774
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_10 0x411E778
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_11 0x411E77C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_12 0x411E780
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_13 0x411E784
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_14 0x411E788
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_15 0x411E78C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_16 0x411E790
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_17 0x411E794
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_18 0x411E798
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_19 0x411E79C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_20 0x411E7A0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_21 0x411E7A4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_22 0x411E7A8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_23 0x411E7AC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_24 0x411E7B0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_25 0x411E7B4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_26 0x411E7B8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_27 0x411E7BC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_28 0x411E7C0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_29 0x411E7C4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_30 0x411E7C8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_31 0x411E7CC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_32 0x411E7D0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_33 0x411E7D4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_34 0x411E7D8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_35 0x411E7DC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_36 0x411E7E0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_37 0x411E7E4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_38 0x411E7E8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_39 0x411E7EC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_40 0x411E7F0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_41 0x411E7F4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_42 0x411E7F8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_43 0x411E7FC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_44 0x411E800
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_45 0x411E804
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_46 0x411E808
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_47 0x411E80C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_48 0x411E810
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_49 0x411E814
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_50 0x411E818
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_51 0x411E81C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_52 0x411E820
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_53 0x411E824
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_54 0x411E828
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_55 0x411E82C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_56 0x411E830
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_57 0x411E834
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_58 0x411E838
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_59 0x411E83C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_60 0x411E840
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_61 0x411E844
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_62 0x411E848
+
+#define mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_63 0x411E84C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_0 0x411E850
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_1 0x411E854
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_2 0x411E858
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_3 0x411E85C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_4 0x411E860
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_5 0x411E864
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_6 0x411E868
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_7 0x411E86C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_8 0x411E870
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_9 0x411E874
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_10 0x411E878
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_11 0x411E87C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_12 0x411E880
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_13 0x411E884
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_14 0x411E888
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_15 0x411E88C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_16 0x411E890
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_17 0x411E894
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_18 0x411E898
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_19 0x411E89C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_20 0x411E8A0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_21 0x411E8A4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_22 0x411E8A8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_23 0x411E8AC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_24 0x411E8B0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_25 0x411E8B4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_26 0x411E8B8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_27 0x411E8BC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_28 0x411E8C0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_29 0x411E8C4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_30 0x411E8C8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_31 0x411E8CC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_32 0x411E8D0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_33 0x411E8D4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_34 0x411E8D8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_35 0x411E8DC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_36 0x411E8E0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_37 0x411E8E4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_38 0x411E8E8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_39 0x411E8EC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_40 0x411E8F0
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_41 0x411E8F4
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_42 0x411E8F8
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_43 0x411E8FC
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_44 0x411E900
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_45 0x411E904
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_46 0x411E908
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_47 0x411E90C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_48 0x411E910
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_49 0x411E914
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_50 0x411E918
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_51 0x411E91C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_52 0x411E920
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_53 0x411E924
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_54 0x411E928
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_55 0x411E92C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_56 0x411E930
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_57 0x411E934
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_58 0x411E938
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_59 0x411E93C
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_60 0x411E940
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_61 0x411E944
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_62 0x411E948
+
+#define mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_63 0x411E94C
+
+#endif /* ASIC_REG_DCORE0_SYNC_MNGR_GLBL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h
new file mode 100644
index 000000000000..76b273a41255
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_MASKS_H_
+#define ASIC_REG_DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_SYNC_MNGR_MSTR_IF_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_WR_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_WR_MASK 0x3FF
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_RD_SHIFT 16
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_RD_MASK 0x3FF0000
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP_WR_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP_WR_MASK 0x1
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP_RD_SHIFT 4
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP_RD_MASK 0x10
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_STRONG_ORDER */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_STRONG_ORDER_WR_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_STRONG_ORDER_WR_MASK 0x1
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_STRONG_ORDER_RD_SHIFT 4
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_STRONG_ORDER_RD_MASK 0x10
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_NO_SNOOP */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_NO_SNOOP_WR_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_NO_SNOOP_WR_MASK 0x1
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_NO_SNOOP_RD_SHIFT 4
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_NO_SNOOP_RD_MASK 0x10
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_IND_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_IND_MASK 0x1
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_DTYPE_SHIFT 4
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_DTYPE_MASK 0xF0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_OP_SHIFT 8
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_OP_MASK 0x300
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_ROUND_SHIFT 12
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_ROUND_MASK 0x3000
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_MAX_SHIFT 16
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION_MAX_MASK 0x10000
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_ATOMIC */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_ATOMIC_IND_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_ATOMIC_IND_MASK 0x3
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_ATOMIC_ADDITION_SIZE_SHIFT 4
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_ATOMIC_ADDITION_SIZE_MASK 0xFF0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_ATOMIC_MSB_MASK_SHIFT 12
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_ATOMIC_MSB_MASK_MASK 0x1F000
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_QOS */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_QOS_WR_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_QOS_WR_MASK 0xF
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_QOS_RD_SHIFT 4
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_QOS_RD_MASK 0x70
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD_WR_BIT_27_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD_WR_BIT_27_MASK 0x1
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD_WR_BIT_28_SHIFT 1
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD_WR_BIT_28_MASK 0x2
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD_WR_BIT_30_SHIFT 2
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD_WR_BIT_30_MASK 0x4
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD_WR_BIT_31_SHIFT 3
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD_WR_BIT_31_MASK 0x8
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_EMEM_CPAGE */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_EMEM_CPAGE_WR_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_EMEM_CPAGE_WR_MASK 0x1
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_EMEM_CPAGE_RD_SHIFT 4
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_EMEM_CPAGE_RD_MASK 0x10
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_CORE */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_CORE_WR_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_CORE_WR_MASK 0x1
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_CORE_RD_SHIFT 4
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_CORE_RD_MASK 0x10
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_E2E_COORD */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_E2E_COORD_X_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_E2E_COORD_X_MASK 0x1F
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_E2E_COORD_Y_SHIFT 8
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_E2E_COORD_Y_MASK 0xF00
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_OVRD_LO */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_OVRD_LO_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_OVRD_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_OVRD_HI */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_OVRD_HI_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_OVRD_HI_VAL_MASK 0x3FF
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_OVRD_LO */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_OVRD_LO_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_OVRD_LO_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_OVRD_HI */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_OVRD_HI_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_OVRD_HI_VAL_MASK 0x3FF
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_COORD */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_COORD_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_COORD_VAL_MASK 0x3FF
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_LOCK */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_LOCK_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_LOCK_VAL_MASK 0x1
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_RSVD */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_RSVD_BIT_21_11_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_RSVD_BIT_21_11_MASK 0x7FF
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_RSVD_BIT_22_SHIFT 12
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_RSVD_BIT_22_MASK 0x1000
+
+/* DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_OVRD */
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_OVRD_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_OVRD_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h
new file mode 100644
index 000000000000..0bddc734329f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_REGS_H_
+#define ASIC_REG_DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_SYNC_MNGR_MSTR_IF_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID 0x411FA80
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP 0x411FA84
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_STRONG_ORDER 0x411FA88
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_NO_SNOOP 0x411FA8C
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_REDUCTION 0x411FA90
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_ATOMIC 0x411FA94
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_QOS 0x411FA98
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RSVD 0x411FA9C
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_EMEM_CPAGE 0x411FAA0
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_CORE 0x411FAA4
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_E2E_COORD 0x411FAA8
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_OVRD_LO 0x411FAB0
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_WR_OVRD_HI 0x411FAB4
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_OVRD_LO 0x411FAB8
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_RD_OVRD_HI 0x411FABC
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_COORD 0x411FAC0
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_LOCK 0x411FAC4
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_RSVD 0x411FAC8
+
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_LB_OVRD 0x411FACC
+
+#endif /* ASIC_REG_DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h
new file mode 100644
index 000000000000..3a5b27df0ab4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_SYNC_MNGR_OBJS_MASKS_H_
+#define ASIC_REG_DCORE0_SYNC_MNGR_OBJS_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_SYNC_MNGR_OBJS
+ * (Prototype: SOB_OBJS)
+ *****************************************
+ */
+
+/* DCORE0_SYNC_MNGR_OBJS_SOB_OBJ */
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_SHIFT 0
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK 0x7FFF
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_LONG_SOB_SHIFT 24
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_LONG_SOB_MASK 0x1000000
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_TRACE_EVICT_SHIFT 30
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_TRACE_EVICT_MASK 0x40000000
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_SHIFT 31
+#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK 0x80000000
+
+/* DCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL */
+#define DCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_ADDRL_SHIFT 0
+#define DCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_ADDRL_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH */
+#define DCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_ADDRH_SHIFT 0
+#define DCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_ADDRH_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA */
+#define DCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_DATA_SHIFT 0
+#define DCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_DATA_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_OBJS_MON_ARM */
+#define DCORE0_SYNC_MNGR_OBJS_MON_ARM_SID_SHIFT 0
+#define DCORE0_SYNC_MNGR_OBJS_MON_ARM_SID_MASK 0xFF
+#define DCORE0_SYNC_MNGR_OBJS_MON_ARM_MASK_SHIFT 8
+#define DCORE0_SYNC_MNGR_OBJS_MON_ARM_MASK_MASK 0xFF00
+#define DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOP_SHIFT 16
+#define DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOP_MASK 0x10000
+#define DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOD_SHIFT 17
+#define DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOD_MASK 0xFFFE0000
+
+/* DCORE0_SYNC_MNGR_OBJS_MON_CONFIG */
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_LONG_SOB_SHIFT 0
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_LONG_SOB_MASK 0x1
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_CQ_EN_SHIFT 4
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_CQ_EN_MASK 0x10
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_WR_NUM_SHIFT 5
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_WR_NUM_MASK 0x60
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_LBW_EN_SHIFT 8
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_LBW_EN_MASK 0x100
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_MSB_SID_SHIFT 16
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_MSB_SID_MASK 0xF0000
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_LONG_HIGH_GROUP_SHIFT 31
+#define DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_LONG_HIGH_GROUP_MASK 0x80000000
+
+/* DCORE0_SYNC_MNGR_OBJS_MON_STATUS */
+#define DCORE0_SYNC_MNGR_OBJS_MON_STATUS_VALID_SHIFT 0
+#define DCORE0_SYNC_MNGR_OBJS_MON_STATUS_VALID_MASK 0x1
+#define DCORE0_SYNC_MNGR_OBJS_MON_STATUS_PENDING_SHIFT 1
+#define DCORE0_SYNC_MNGR_OBJS_MON_STATUS_PENDING_MASK 0x1FE
+#define DCORE0_SYNC_MNGR_OBJS_MON_STATUS_PROT_SHIFT 9
+#define DCORE0_SYNC_MNGR_OBJS_MON_STATUS_PROT_MASK 0x200
+#define DCORE0_SYNC_MNGR_OBJS_MON_STATUS_PRIV_SHIFT 10
+#define DCORE0_SYNC_MNGR_OBJS_MON_STATUS_PRIV_MASK 0x400
+
+/* DCORE0_SYNC_MNGR_OBJS_SM_SEC */
+#define DCORE0_SYNC_MNGR_OBJS_SM_SEC_SEC_VEC_SHIFT 0
+#define DCORE0_SYNC_MNGR_OBJS_SM_SEC_SEC_VEC_MASK 0xFFFFFFFF
+
+/* DCORE0_SYNC_MNGR_OBJS_SM_PRIV */
+#define DCORE0_SYNC_MNGR_OBJS_SM_PRIV_PRIV_SHIFT 0
+#define DCORE0_SYNC_MNGR_OBJS_SM_PRIV_PRIV_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_DCORE0_SYNC_MNGR_OBJS_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h
new file mode 100644
index 000000000000..8f082a1c9b1b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h
@@ -0,0 +1,43543 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_SYNC_MNGR_OBJS_REGS_H_
+#define ASIC_REG_DCORE0_SYNC_MNGR_OBJS_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_SYNC_MNGR_OBJS
+ * (Prototype: SOB_OBJS)
+ *****************************************
+ */
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4100000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1 0x4100004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2 0x4100008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3 0x410000C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4 0x4100010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5 0x4100014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6 0x4100018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7 0x410001C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8 0x4100020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_9 0x4100024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_10 0x4100028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_11 0x410002C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_12 0x4100030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_13 0x4100034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_14 0x4100038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_15 0x410003C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_16 0x4100040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_17 0x4100044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_18 0x4100048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_19 0x410004C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_20 0x4100050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_21 0x4100054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_22 0x4100058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_23 0x410005C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_24 0x4100060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_25 0x4100064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_26 0x4100068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_27 0x410006C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_28 0x4100070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_29 0x4100074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_30 0x4100078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_31 0x410007C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_32 0x4100080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_33 0x4100084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_34 0x4100088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_35 0x410008C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_36 0x4100090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_37 0x4100094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_38 0x4100098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_39 0x410009C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_40 0x41000A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_41 0x41000A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_42 0x41000A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_43 0x41000AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_44 0x41000B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_45 0x41000B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_46 0x41000B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_47 0x41000BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_48 0x41000C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_49 0x41000C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_50 0x41000C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_51 0x41000CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_52 0x41000D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_53 0x41000D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_54 0x41000D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_55 0x41000DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_56 0x41000E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_57 0x41000E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_58 0x41000E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_59 0x41000EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_60 0x41000F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_61 0x41000F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_62 0x41000F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_63 0x41000FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_64 0x4100100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_65 0x4100104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_66 0x4100108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_67 0x410010C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_68 0x4100110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_69 0x4100114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_70 0x4100118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_71 0x410011C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_72 0x4100120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_73 0x4100124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_74 0x4100128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_75 0x410012C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_76 0x4100130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_77 0x4100134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_78 0x4100138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_79 0x410013C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_80 0x4100140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_81 0x4100144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_82 0x4100148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_83 0x410014C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_84 0x4100150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_85 0x4100154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_86 0x4100158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_87 0x410015C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_88 0x4100160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_89 0x4100164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_90 0x4100168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_91 0x410016C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_92 0x4100170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_93 0x4100174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_94 0x4100178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_95 0x410017C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_96 0x4100180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_97 0x4100184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_98 0x4100188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_99 0x410018C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_100 0x4100190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_101 0x4100194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_102 0x4100198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_103 0x410019C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_104 0x41001A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_105 0x41001A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_106 0x41001A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_107 0x41001AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_108 0x41001B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_109 0x41001B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_110 0x41001B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_111 0x41001BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_112 0x41001C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_113 0x41001C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_114 0x41001C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_115 0x41001CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_116 0x41001D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_117 0x41001D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_118 0x41001D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_119 0x41001DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_120 0x41001E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_121 0x41001E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_122 0x41001E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_123 0x41001EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_124 0x41001F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_125 0x41001F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_126 0x41001F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_127 0x41001FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_128 0x4100200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_129 0x4100204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_130 0x4100208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_131 0x410020C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_132 0x4100210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_133 0x4100214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_134 0x4100218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_135 0x410021C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_136 0x4100220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_137 0x4100224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_138 0x4100228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_139 0x410022C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_140 0x4100230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_141 0x4100234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_142 0x4100238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_143 0x410023C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_144 0x4100240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_145 0x4100244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_146 0x4100248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_147 0x410024C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_148 0x4100250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_149 0x4100254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_150 0x4100258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_151 0x410025C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_152 0x4100260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_153 0x4100264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_154 0x4100268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_155 0x410026C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_156 0x4100270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_157 0x4100274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_158 0x4100278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_159 0x410027C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_160 0x4100280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_161 0x4100284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_162 0x4100288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_163 0x410028C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_164 0x4100290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_165 0x4100294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_166 0x4100298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_167 0x410029C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_168 0x41002A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_169 0x41002A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_170 0x41002A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_171 0x41002AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_172 0x41002B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_173 0x41002B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_174 0x41002B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_175 0x41002BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_176 0x41002C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_177 0x41002C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_178 0x41002C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_179 0x41002CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_180 0x41002D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_181 0x41002D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_182 0x41002D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_183 0x41002DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_184 0x41002E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_185 0x41002E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_186 0x41002E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_187 0x41002EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_188 0x41002F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_189 0x41002F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_190 0x41002F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_191 0x41002FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_192 0x4100300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_193 0x4100304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_194 0x4100308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_195 0x410030C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_196 0x4100310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_197 0x4100314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_198 0x4100318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_199 0x410031C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_200 0x4100320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_201 0x4100324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_202 0x4100328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_203 0x410032C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_204 0x4100330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_205 0x4100334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_206 0x4100338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_207 0x410033C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_208 0x4100340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_209 0x4100344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_210 0x4100348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_211 0x410034C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_212 0x4100350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_213 0x4100354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_214 0x4100358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_215 0x410035C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_216 0x4100360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_217 0x4100364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_218 0x4100368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_219 0x410036C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_220 0x4100370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_221 0x4100374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_222 0x4100378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_223 0x410037C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_224 0x4100380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_225 0x4100384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_226 0x4100388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_227 0x410038C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_228 0x4100390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_229 0x4100394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_230 0x4100398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_231 0x410039C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_232 0x41003A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_233 0x41003A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_234 0x41003A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_235 0x41003AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_236 0x41003B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_237 0x41003B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_238 0x41003B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_239 0x41003BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_240 0x41003C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_241 0x41003C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_242 0x41003C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_243 0x41003CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_244 0x41003D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_245 0x41003D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_246 0x41003D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_247 0x41003DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_248 0x41003E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_249 0x41003E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_250 0x41003E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_251 0x41003EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_252 0x41003F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_253 0x41003F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_254 0x41003F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_255 0x41003FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_256 0x4100400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_257 0x4100404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_258 0x4100408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_259 0x410040C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_260 0x4100410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_261 0x4100414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_262 0x4100418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_263 0x410041C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_264 0x4100420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_265 0x4100424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_266 0x4100428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_267 0x410042C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_268 0x4100430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_269 0x4100434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_270 0x4100438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_271 0x410043C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_272 0x4100440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_273 0x4100444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_274 0x4100448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_275 0x410044C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_276 0x4100450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_277 0x4100454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_278 0x4100458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_279 0x410045C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_280 0x4100460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_281 0x4100464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_282 0x4100468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_283 0x410046C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_284 0x4100470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_285 0x4100474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_286 0x4100478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_287 0x410047C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_288 0x4100480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_289 0x4100484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_290 0x4100488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_291 0x410048C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_292 0x4100490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_293 0x4100494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_294 0x4100498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_295 0x410049C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_296 0x41004A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_297 0x41004A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_298 0x41004A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_299 0x41004AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_300 0x41004B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_301 0x41004B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_302 0x41004B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_303 0x41004BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_304 0x41004C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_305 0x41004C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_306 0x41004C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_307 0x41004CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_308 0x41004D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_309 0x41004D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_310 0x41004D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_311 0x41004DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_312 0x41004E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_313 0x41004E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_314 0x41004E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_315 0x41004EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_316 0x41004F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_317 0x41004F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_318 0x41004F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_319 0x41004FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_320 0x4100500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_321 0x4100504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_322 0x4100508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_323 0x410050C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_324 0x4100510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_325 0x4100514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_326 0x4100518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_327 0x410051C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_328 0x4100520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_329 0x4100524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_330 0x4100528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_331 0x410052C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_332 0x4100530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_333 0x4100534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_334 0x4100538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_335 0x410053C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_336 0x4100540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_337 0x4100544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_338 0x4100548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_339 0x410054C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_340 0x4100550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_341 0x4100554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_342 0x4100558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_343 0x410055C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_344 0x4100560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_345 0x4100564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_346 0x4100568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_347 0x410056C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_348 0x4100570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_349 0x4100574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_350 0x4100578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_351 0x410057C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_352 0x4100580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_353 0x4100584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_354 0x4100588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_355 0x410058C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_356 0x4100590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_357 0x4100594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_358 0x4100598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_359 0x410059C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_360 0x41005A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_361 0x41005A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_362 0x41005A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_363 0x41005AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_364 0x41005B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_365 0x41005B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_366 0x41005B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_367 0x41005BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_368 0x41005C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_369 0x41005C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_370 0x41005C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_371 0x41005CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_372 0x41005D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_373 0x41005D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_374 0x41005D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_375 0x41005DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_376 0x41005E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_377 0x41005E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_378 0x41005E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_379 0x41005EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_380 0x41005F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_381 0x41005F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_382 0x41005F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_383 0x41005FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_384 0x4100600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_385 0x4100604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_386 0x4100608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_387 0x410060C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_388 0x4100610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_389 0x4100614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_390 0x4100618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_391 0x410061C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_392 0x4100620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_393 0x4100624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_394 0x4100628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_395 0x410062C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_396 0x4100630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_397 0x4100634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_398 0x4100638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_399 0x410063C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_400 0x4100640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_401 0x4100644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_402 0x4100648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_403 0x410064C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_404 0x4100650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_405 0x4100654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_406 0x4100658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_407 0x410065C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_408 0x4100660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_409 0x4100664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_410 0x4100668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_411 0x410066C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_412 0x4100670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_413 0x4100674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_414 0x4100678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_415 0x410067C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_416 0x4100680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_417 0x4100684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_418 0x4100688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_419 0x410068C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_420 0x4100690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_421 0x4100694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_422 0x4100698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_423 0x410069C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_424 0x41006A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_425 0x41006A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_426 0x41006A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_427 0x41006AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_428 0x41006B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_429 0x41006B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_430 0x41006B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_431 0x41006BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_432 0x41006C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_433 0x41006C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_434 0x41006C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_435 0x41006CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_436 0x41006D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_437 0x41006D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_438 0x41006D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_439 0x41006DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_440 0x41006E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_441 0x41006E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_442 0x41006E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_443 0x41006EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_444 0x41006F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_445 0x41006F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_446 0x41006F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_447 0x41006FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_448 0x4100700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_449 0x4100704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_450 0x4100708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_451 0x410070C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_452 0x4100710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_453 0x4100714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_454 0x4100718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_455 0x410071C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_456 0x4100720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_457 0x4100724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_458 0x4100728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_459 0x410072C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_460 0x4100730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_461 0x4100734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_462 0x4100738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_463 0x410073C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_464 0x4100740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_465 0x4100744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_466 0x4100748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_467 0x410074C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_468 0x4100750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_469 0x4100754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_470 0x4100758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_471 0x410075C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_472 0x4100760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_473 0x4100764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_474 0x4100768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_475 0x410076C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_476 0x4100770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_477 0x4100774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_478 0x4100778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_479 0x410077C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_480 0x4100780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_481 0x4100784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_482 0x4100788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_483 0x410078C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_484 0x4100790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_485 0x4100794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_486 0x4100798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_487 0x410079C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_488 0x41007A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_489 0x41007A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_490 0x41007A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_491 0x41007AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_492 0x41007B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_493 0x41007B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_494 0x41007B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_495 0x41007BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_496 0x41007C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_497 0x41007C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_498 0x41007C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_499 0x41007CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_500 0x41007D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_501 0x41007D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_502 0x41007D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_503 0x41007DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_504 0x41007E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_505 0x41007E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_506 0x41007E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_507 0x41007EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_508 0x41007F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_509 0x41007F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_510 0x41007F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_511 0x41007FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_512 0x4100800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_513 0x4100804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_514 0x4100808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_515 0x410080C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_516 0x4100810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_517 0x4100814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_518 0x4100818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_519 0x410081C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_520 0x4100820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_521 0x4100824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_522 0x4100828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_523 0x410082C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_524 0x4100830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_525 0x4100834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_526 0x4100838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_527 0x410083C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_528 0x4100840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_529 0x4100844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_530 0x4100848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_531 0x410084C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_532 0x4100850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_533 0x4100854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_534 0x4100858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_535 0x410085C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_536 0x4100860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_537 0x4100864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_538 0x4100868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_539 0x410086C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_540 0x4100870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_541 0x4100874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_542 0x4100878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_543 0x410087C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_544 0x4100880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_545 0x4100884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_546 0x4100888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_547 0x410088C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_548 0x4100890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_549 0x4100894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_550 0x4100898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_551 0x410089C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_552 0x41008A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_553 0x41008A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_554 0x41008A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_555 0x41008AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_556 0x41008B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_557 0x41008B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_558 0x41008B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_559 0x41008BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_560 0x41008C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_561 0x41008C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_562 0x41008C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_563 0x41008CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_564 0x41008D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_565 0x41008D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_566 0x41008D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_567 0x41008DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_568 0x41008E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_569 0x41008E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_570 0x41008E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_571 0x41008EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_572 0x41008F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_573 0x41008F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_574 0x41008F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_575 0x41008FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_576 0x4100900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_577 0x4100904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_578 0x4100908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_579 0x410090C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_580 0x4100910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_581 0x4100914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_582 0x4100918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_583 0x410091C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_584 0x4100920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_585 0x4100924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_586 0x4100928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_587 0x410092C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_588 0x4100930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_589 0x4100934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_590 0x4100938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_591 0x410093C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_592 0x4100940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_593 0x4100944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_594 0x4100948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_595 0x410094C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_596 0x4100950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_597 0x4100954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_598 0x4100958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_599 0x410095C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_600 0x4100960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_601 0x4100964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_602 0x4100968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_603 0x410096C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_604 0x4100970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_605 0x4100974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_606 0x4100978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_607 0x410097C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_608 0x4100980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_609 0x4100984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_610 0x4100988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_611 0x410098C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_612 0x4100990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_613 0x4100994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_614 0x4100998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_615 0x410099C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_616 0x41009A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_617 0x41009A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_618 0x41009A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_619 0x41009AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_620 0x41009B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_621 0x41009B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_622 0x41009B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_623 0x41009BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_624 0x41009C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_625 0x41009C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_626 0x41009C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_627 0x41009CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_628 0x41009D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_629 0x41009D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_630 0x41009D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_631 0x41009DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_632 0x41009E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_633 0x41009E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_634 0x41009E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_635 0x41009EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_636 0x41009F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_637 0x41009F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_638 0x41009F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_639 0x41009FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_640 0x4100A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_641 0x4100A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_642 0x4100A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_643 0x4100A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_644 0x4100A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_645 0x4100A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_646 0x4100A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_647 0x4100A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_648 0x4100A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_649 0x4100A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_650 0x4100A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_651 0x4100A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_652 0x4100A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_653 0x4100A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_654 0x4100A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_655 0x4100A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_656 0x4100A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_657 0x4100A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_658 0x4100A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_659 0x4100A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_660 0x4100A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_661 0x4100A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_662 0x4100A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_663 0x4100A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_664 0x4100A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_665 0x4100A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_666 0x4100A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_667 0x4100A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_668 0x4100A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_669 0x4100A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_670 0x4100A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_671 0x4100A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_672 0x4100A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_673 0x4100A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_674 0x4100A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_675 0x4100A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_676 0x4100A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_677 0x4100A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_678 0x4100A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_679 0x4100A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_680 0x4100AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_681 0x4100AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_682 0x4100AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_683 0x4100AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_684 0x4100AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_685 0x4100AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_686 0x4100AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_687 0x4100ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_688 0x4100AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_689 0x4100AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_690 0x4100AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_691 0x4100ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_692 0x4100AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_693 0x4100AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_694 0x4100AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_695 0x4100ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_696 0x4100AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_697 0x4100AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_698 0x4100AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_699 0x4100AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_700 0x4100AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_701 0x4100AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_702 0x4100AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_703 0x4100AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_704 0x4100B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_705 0x4100B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_706 0x4100B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_707 0x4100B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_708 0x4100B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_709 0x4100B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_710 0x4100B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_711 0x4100B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_712 0x4100B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_713 0x4100B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_714 0x4100B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_715 0x4100B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_716 0x4100B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_717 0x4100B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_718 0x4100B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_719 0x4100B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_720 0x4100B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_721 0x4100B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_722 0x4100B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_723 0x4100B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_724 0x4100B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_725 0x4100B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_726 0x4100B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_727 0x4100B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_728 0x4100B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_729 0x4100B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_730 0x4100B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_731 0x4100B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_732 0x4100B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_733 0x4100B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_734 0x4100B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_735 0x4100B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_736 0x4100B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_737 0x4100B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_738 0x4100B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_739 0x4100B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_740 0x4100B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_741 0x4100B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_742 0x4100B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_743 0x4100B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_744 0x4100BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_745 0x4100BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_746 0x4100BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_747 0x4100BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_748 0x4100BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_749 0x4100BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_750 0x4100BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_751 0x4100BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_752 0x4100BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_753 0x4100BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_754 0x4100BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_755 0x4100BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_756 0x4100BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_757 0x4100BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_758 0x4100BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_759 0x4100BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_760 0x4100BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_761 0x4100BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_762 0x4100BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_763 0x4100BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_764 0x4100BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_765 0x4100BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_766 0x4100BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_767 0x4100BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_768 0x4100C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_769 0x4100C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_770 0x4100C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_771 0x4100C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_772 0x4100C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_773 0x4100C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_774 0x4100C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_775 0x4100C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_776 0x4100C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_777 0x4100C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_778 0x4100C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_779 0x4100C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_780 0x4100C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_781 0x4100C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_782 0x4100C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_783 0x4100C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_784 0x4100C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_785 0x4100C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_786 0x4100C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_787 0x4100C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_788 0x4100C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_789 0x4100C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_790 0x4100C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_791 0x4100C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_792 0x4100C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_793 0x4100C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_794 0x4100C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_795 0x4100C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_796 0x4100C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_797 0x4100C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_798 0x4100C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_799 0x4100C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_800 0x4100C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_801 0x4100C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_802 0x4100C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_803 0x4100C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_804 0x4100C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_805 0x4100C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_806 0x4100C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_807 0x4100C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_808 0x4100CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_809 0x4100CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_810 0x4100CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_811 0x4100CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_812 0x4100CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_813 0x4100CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_814 0x4100CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_815 0x4100CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_816 0x4100CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_817 0x4100CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_818 0x4100CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_819 0x4100CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_820 0x4100CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_821 0x4100CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_822 0x4100CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_823 0x4100CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_824 0x4100CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_825 0x4100CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_826 0x4100CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_827 0x4100CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_828 0x4100CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_829 0x4100CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_830 0x4100CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_831 0x4100CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_832 0x4100D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_833 0x4100D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_834 0x4100D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_835 0x4100D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_836 0x4100D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_837 0x4100D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_838 0x4100D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_839 0x4100D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_840 0x4100D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_841 0x4100D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_842 0x4100D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_843 0x4100D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_844 0x4100D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_845 0x4100D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_846 0x4100D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_847 0x4100D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_848 0x4100D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_849 0x4100D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_850 0x4100D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_851 0x4100D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_852 0x4100D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_853 0x4100D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_854 0x4100D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_855 0x4100D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_856 0x4100D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_857 0x4100D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_858 0x4100D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_859 0x4100D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_860 0x4100D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_861 0x4100D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_862 0x4100D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_863 0x4100D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_864 0x4100D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_865 0x4100D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_866 0x4100D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_867 0x4100D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_868 0x4100D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_869 0x4100D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_870 0x4100D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_871 0x4100D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_872 0x4100DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_873 0x4100DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_874 0x4100DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_875 0x4100DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_876 0x4100DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_877 0x4100DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_878 0x4100DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_879 0x4100DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_880 0x4100DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_881 0x4100DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_882 0x4100DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_883 0x4100DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_884 0x4100DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_885 0x4100DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_886 0x4100DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_887 0x4100DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_888 0x4100DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_889 0x4100DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_890 0x4100DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_891 0x4100DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_892 0x4100DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_893 0x4100DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_894 0x4100DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_895 0x4100DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_896 0x4100E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_897 0x4100E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_898 0x4100E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_899 0x4100E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_900 0x4100E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_901 0x4100E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_902 0x4100E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_903 0x4100E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_904 0x4100E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_905 0x4100E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_906 0x4100E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_907 0x4100E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_908 0x4100E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_909 0x4100E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_910 0x4100E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_911 0x4100E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_912 0x4100E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_913 0x4100E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_914 0x4100E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_915 0x4100E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_916 0x4100E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_917 0x4100E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_918 0x4100E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_919 0x4100E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_920 0x4100E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_921 0x4100E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_922 0x4100E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_923 0x4100E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_924 0x4100E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_925 0x4100E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_926 0x4100E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_927 0x4100E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_928 0x4100E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_929 0x4100E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_930 0x4100E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_931 0x4100E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_932 0x4100E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_933 0x4100E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_934 0x4100E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_935 0x4100E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_936 0x4100EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_937 0x4100EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_938 0x4100EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_939 0x4100EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_940 0x4100EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_941 0x4100EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_942 0x4100EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_943 0x4100EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_944 0x4100EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_945 0x4100EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_946 0x4100EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_947 0x4100ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_948 0x4100ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_949 0x4100ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_950 0x4100ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_951 0x4100EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_952 0x4100EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_953 0x4100EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_954 0x4100EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_955 0x4100EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_956 0x4100EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_957 0x4100EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_958 0x4100EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_959 0x4100EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_960 0x4100F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_961 0x4100F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_962 0x4100F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_963 0x4100F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_964 0x4100F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_965 0x4100F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_966 0x4100F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_967 0x4100F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_968 0x4100F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_969 0x4100F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_970 0x4100F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_971 0x4100F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_972 0x4100F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_973 0x4100F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_974 0x4100F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_975 0x4100F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_976 0x4100F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_977 0x4100F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_978 0x4100F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_979 0x4100F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_980 0x4100F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_981 0x4100F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_982 0x4100F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_983 0x4100F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_984 0x4100F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_985 0x4100F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_986 0x4100F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_987 0x4100F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_988 0x4100F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_989 0x4100F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_990 0x4100F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_991 0x4100F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_992 0x4100F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_993 0x4100F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_994 0x4100F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_995 0x4100F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_996 0x4100F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_997 0x4100F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_998 0x4100F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_999 0x4100F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1000 0x4100FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1001 0x4100FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1002 0x4100FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1003 0x4100FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1004 0x4100FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1005 0x4100FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1006 0x4100FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1007 0x4100FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1008 0x4100FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1009 0x4100FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1010 0x4100FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1011 0x4100FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1012 0x4100FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1013 0x4100FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1014 0x4100FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1015 0x4100FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1016 0x4100FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1017 0x4100FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1018 0x4100FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1019 0x4100FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1020 0x4100FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1021 0x4100FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1022 0x4100FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1023 0x4100FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1024 0x4101000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1025 0x4101004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1026 0x4101008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1027 0x410100C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1028 0x4101010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1029 0x4101014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1030 0x4101018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1031 0x410101C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1032 0x4101020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1033 0x4101024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1034 0x4101028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1035 0x410102C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1036 0x4101030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1037 0x4101034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1038 0x4101038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1039 0x410103C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1040 0x4101040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1041 0x4101044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1042 0x4101048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1043 0x410104C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1044 0x4101050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1045 0x4101054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1046 0x4101058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1047 0x410105C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1048 0x4101060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1049 0x4101064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1050 0x4101068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1051 0x410106C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1052 0x4101070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1053 0x4101074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1054 0x4101078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1055 0x410107C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1056 0x4101080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1057 0x4101084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1058 0x4101088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1059 0x410108C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1060 0x4101090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1061 0x4101094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1062 0x4101098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1063 0x410109C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1064 0x41010A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1065 0x41010A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1066 0x41010A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1067 0x41010AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1068 0x41010B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1069 0x41010B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1070 0x41010B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1071 0x41010BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1072 0x41010C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1073 0x41010C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1074 0x41010C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1075 0x41010CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1076 0x41010D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1077 0x41010D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1078 0x41010D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1079 0x41010DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1080 0x41010E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1081 0x41010E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1082 0x41010E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1083 0x41010EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1084 0x41010F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1085 0x41010F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1086 0x41010F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1087 0x41010FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1088 0x4101100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1089 0x4101104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1090 0x4101108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1091 0x410110C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1092 0x4101110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1093 0x4101114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1094 0x4101118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1095 0x410111C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1096 0x4101120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1097 0x4101124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1098 0x4101128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1099 0x410112C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1100 0x4101130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1101 0x4101134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1102 0x4101138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1103 0x410113C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1104 0x4101140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1105 0x4101144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1106 0x4101148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1107 0x410114C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1108 0x4101150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1109 0x4101154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1110 0x4101158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1111 0x410115C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1112 0x4101160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1113 0x4101164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1114 0x4101168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1115 0x410116C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1116 0x4101170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1117 0x4101174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1118 0x4101178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1119 0x410117C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1120 0x4101180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1121 0x4101184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1122 0x4101188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1123 0x410118C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1124 0x4101190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1125 0x4101194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1126 0x4101198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1127 0x410119C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1128 0x41011A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1129 0x41011A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1130 0x41011A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1131 0x41011AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1132 0x41011B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1133 0x41011B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1134 0x41011B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1135 0x41011BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1136 0x41011C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1137 0x41011C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1138 0x41011C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1139 0x41011CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1140 0x41011D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1141 0x41011D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1142 0x41011D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1143 0x41011DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1144 0x41011E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1145 0x41011E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1146 0x41011E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1147 0x41011EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1148 0x41011F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1149 0x41011F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1150 0x41011F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1151 0x41011FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1152 0x4101200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1153 0x4101204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1154 0x4101208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1155 0x410120C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1156 0x4101210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1157 0x4101214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1158 0x4101218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1159 0x410121C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1160 0x4101220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1161 0x4101224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1162 0x4101228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1163 0x410122C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1164 0x4101230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1165 0x4101234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1166 0x4101238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1167 0x410123C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1168 0x4101240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1169 0x4101244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1170 0x4101248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1171 0x410124C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1172 0x4101250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1173 0x4101254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1174 0x4101258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1175 0x410125C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1176 0x4101260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1177 0x4101264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1178 0x4101268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1179 0x410126C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1180 0x4101270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1181 0x4101274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1182 0x4101278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1183 0x410127C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1184 0x4101280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1185 0x4101284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1186 0x4101288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1187 0x410128C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1188 0x4101290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1189 0x4101294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1190 0x4101298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1191 0x410129C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1192 0x41012A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1193 0x41012A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1194 0x41012A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1195 0x41012AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1196 0x41012B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1197 0x41012B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1198 0x41012B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1199 0x41012BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1200 0x41012C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1201 0x41012C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1202 0x41012C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1203 0x41012CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1204 0x41012D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1205 0x41012D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1206 0x41012D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1207 0x41012DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1208 0x41012E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1209 0x41012E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1210 0x41012E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1211 0x41012EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1212 0x41012F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1213 0x41012F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1214 0x41012F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1215 0x41012FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1216 0x4101300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1217 0x4101304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1218 0x4101308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1219 0x410130C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1220 0x4101310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1221 0x4101314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1222 0x4101318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1223 0x410131C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1224 0x4101320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1225 0x4101324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1226 0x4101328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1227 0x410132C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1228 0x4101330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1229 0x4101334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1230 0x4101338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1231 0x410133C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1232 0x4101340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1233 0x4101344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1234 0x4101348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1235 0x410134C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1236 0x4101350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1237 0x4101354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1238 0x4101358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1239 0x410135C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1240 0x4101360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1241 0x4101364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1242 0x4101368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1243 0x410136C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1244 0x4101370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1245 0x4101374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1246 0x4101378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1247 0x410137C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1248 0x4101380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1249 0x4101384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1250 0x4101388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1251 0x410138C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1252 0x4101390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1253 0x4101394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1254 0x4101398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1255 0x410139C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1256 0x41013A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1257 0x41013A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1258 0x41013A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1259 0x41013AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1260 0x41013B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1261 0x41013B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1262 0x41013B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1263 0x41013BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1264 0x41013C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1265 0x41013C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1266 0x41013C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1267 0x41013CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1268 0x41013D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1269 0x41013D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1270 0x41013D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1271 0x41013DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1272 0x41013E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1273 0x41013E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1274 0x41013E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1275 0x41013EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1276 0x41013F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1277 0x41013F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1278 0x41013F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1279 0x41013FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1280 0x4101400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1281 0x4101404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1282 0x4101408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1283 0x410140C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1284 0x4101410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1285 0x4101414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1286 0x4101418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1287 0x410141C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1288 0x4101420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1289 0x4101424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1290 0x4101428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1291 0x410142C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1292 0x4101430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1293 0x4101434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1294 0x4101438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1295 0x410143C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1296 0x4101440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1297 0x4101444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1298 0x4101448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1299 0x410144C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1300 0x4101450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1301 0x4101454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1302 0x4101458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1303 0x410145C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1304 0x4101460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1305 0x4101464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1306 0x4101468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1307 0x410146C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1308 0x4101470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1309 0x4101474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1310 0x4101478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1311 0x410147C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1312 0x4101480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1313 0x4101484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1314 0x4101488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1315 0x410148C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1316 0x4101490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1317 0x4101494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1318 0x4101498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1319 0x410149C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1320 0x41014A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1321 0x41014A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1322 0x41014A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1323 0x41014AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1324 0x41014B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1325 0x41014B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1326 0x41014B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1327 0x41014BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1328 0x41014C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1329 0x41014C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1330 0x41014C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1331 0x41014CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1332 0x41014D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1333 0x41014D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1334 0x41014D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1335 0x41014DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1336 0x41014E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1337 0x41014E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1338 0x41014E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1339 0x41014EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1340 0x41014F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1341 0x41014F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1342 0x41014F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1343 0x41014FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1344 0x4101500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1345 0x4101504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1346 0x4101508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1347 0x410150C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1348 0x4101510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1349 0x4101514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1350 0x4101518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1351 0x410151C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1352 0x4101520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1353 0x4101524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1354 0x4101528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1355 0x410152C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1356 0x4101530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1357 0x4101534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1358 0x4101538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1359 0x410153C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1360 0x4101540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1361 0x4101544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1362 0x4101548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1363 0x410154C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1364 0x4101550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1365 0x4101554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1366 0x4101558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1367 0x410155C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1368 0x4101560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1369 0x4101564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1370 0x4101568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1371 0x410156C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1372 0x4101570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1373 0x4101574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1374 0x4101578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1375 0x410157C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1376 0x4101580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1377 0x4101584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1378 0x4101588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1379 0x410158C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1380 0x4101590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1381 0x4101594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1382 0x4101598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1383 0x410159C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1384 0x41015A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1385 0x41015A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1386 0x41015A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1387 0x41015AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1388 0x41015B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1389 0x41015B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1390 0x41015B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1391 0x41015BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1392 0x41015C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1393 0x41015C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1394 0x41015C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1395 0x41015CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1396 0x41015D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1397 0x41015D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1398 0x41015D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1399 0x41015DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1400 0x41015E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1401 0x41015E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1402 0x41015E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1403 0x41015EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1404 0x41015F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1405 0x41015F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1406 0x41015F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1407 0x41015FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1408 0x4101600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1409 0x4101604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1410 0x4101608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1411 0x410160C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1412 0x4101610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1413 0x4101614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1414 0x4101618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1415 0x410161C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1416 0x4101620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1417 0x4101624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1418 0x4101628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1419 0x410162C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1420 0x4101630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1421 0x4101634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1422 0x4101638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1423 0x410163C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1424 0x4101640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1425 0x4101644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1426 0x4101648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1427 0x410164C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1428 0x4101650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1429 0x4101654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1430 0x4101658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1431 0x410165C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1432 0x4101660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1433 0x4101664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1434 0x4101668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1435 0x410166C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1436 0x4101670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1437 0x4101674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1438 0x4101678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1439 0x410167C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1440 0x4101680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1441 0x4101684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1442 0x4101688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1443 0x410168C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1444 0x4101690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1445 0x4101694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1446 0x4101698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1447 0x410169C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1448 0x41016A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1449 0x41016A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1450 0x41016A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1451 0x41016AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1452 0x41016B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1453 0x41016B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1454 0x41016B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1455 0x41016BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1456 0x41016C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1457 0x41016C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1458 0x41016C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1459 0x41016CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1460 0x41016D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1461 0x41016D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1462 0x41016D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1463 0x41016DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1464 0x41016E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1465 0x41016E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1466 0x41016E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1467 0x41016EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1468 0x41016F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1469 0x41016F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1470 0x41016F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1471 0x41016FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1472 0x4101700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1473 0x4101704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1474 0x4101708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1475 0x410170C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1476 0x4101710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1477 0x4101714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1478 0x4101718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1479 0x410171C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1480 0x4101720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1481 0x4101724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1482 0x4101728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1483 0x410172C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1484 0x4101730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1485 0x4101734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1486 0x4101738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1487 0x410173C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1488 0x4101740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1489 0x4101744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1490 0x4101748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1491 0x410174C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1492 0x4101750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1493 0x4101754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1494 0x4101758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1495 0x410175C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1496 0x4101760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1497 0x4101764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1498 0x4101768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1499 0x410176C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1500 0x4101770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1501 0x4101774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1502 0x4101778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1503 0x410177C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1504 0x4101780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1505 0x4101784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1506 0x4101788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1507 0x410178C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1508 0x4101790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1509 0x4101794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1510 0x4101798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1511 0x410179C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1512 0x41017A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1513 0x41017A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1514 0x41017A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1515 0x41017AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1516 0x41017B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1517 0x41017B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1518 0x41017B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1519 0x41017BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1520 0x41017C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1521 0x41017C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1522 0x41017C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1523 0x41017CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1524 0x41017D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1525 0x41017D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1526 0x41017D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1527 0x41017DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1528 0x41017E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1529 0x41017E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1530 0x41017E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1531 0x41017EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1532 0x41017F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1533 0x41017F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1534 0x41017F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1535 0x41017FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1536 0x4101800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1537 0x4101804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1538 0x4101808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1539 0x410180C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1540 0x4101810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1541 0x4101814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1542 0x4101818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1543 0x410181C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1544 0x4101820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1545 0x4101824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1546 0x4101828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1547 0x410182C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1548 0x4101830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1549 0x4101834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1550 0x4101838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1551 0x410183C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1552 0x4101840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1553 0x4101844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1554 0x4101848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1555 0x410184C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1556 0x4101850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1557 0x4101854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1558 0x4101858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1559 0x410185C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1560 0x4101860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1561 0x4101864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1562 0x4101868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1563 0x410186C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1564 0x4101870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1565 0x4101874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1566 0x4101878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1567 0x410187C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1568 0x4101880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1569 0x4101884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1570 0x4101888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1571 0x410188C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1572 0x4101890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1573 0x4101894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1574 0x4101898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1575 0x410189C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1576 0x41018A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1577 0x41018A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1578 0x41018A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1579 0x41018AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1580 0x41018B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1581 0x41018B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1582 0x41018B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1583 0x41018BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1584 0x41018C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1585 0x41018C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1586 0x41018C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1587 0x41018CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1588 0x41018D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1589 0x41018D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1590 0x41018D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1591 0x41018DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1592 0x41018E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1593 0x41018E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1594 0x41018E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1595 0x41018EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1596 0x41018F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1597 0x41018F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1598 0x41018F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1599 0x41018FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1600 0x4101900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1601 0x4101904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1602 0x4101908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1603 0x410190C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1604 0x4101910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1605 0x4101914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1606 0x4101918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1607 0x410191C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1608 0x4101920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1609 0x4101924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1610 0x4101928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1611 0x410192C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1612 0x4101930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1613 0x4101934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1614 0x4101938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1615 0x410193C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1616 0x4101940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1617 0x4101944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1618 0x4101948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1619 0x410194C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1620 0x4101950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1621 0x4101954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1622 0x4101958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1623 0x410195C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1624 0x4101960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1625 0x4101964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1626 0x4101968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1627 0x410196C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1628 0x4101970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1629 0x4101974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1630 0x4101978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1631 0x410197C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1632 0x4101980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1633 0x4101984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1634 0x4101988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1635 0x410198C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1636 0x4101990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1637 0x4101994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1638 0x4101998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1639 0x410199C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1640 0x41019A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1641 0x41019A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1642 0x41019A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1643 0x41019AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1644 0x41019B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1645 0x41019B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1646 0x41019B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1647 0x41019BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1648 0x41019C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1649 0x41019C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1650 0x41019C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1651 0x41019CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1652 0x41019D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1653 0x41019D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1654 0x41019D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1655 0x41019DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1656 0x41019E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1657 0x41019E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1658 0x41019E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1659 0x41019EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1660 0x41019F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1661 0x41019F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1662 0x41019F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1663 0x41019FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1664 0x4101A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1665 0x4101A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1666 0x4101A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1667 0x4101A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1668 0x4101A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1669 0x4101A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1670 0x4101A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1671 0x4101A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1672 0x4101A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1673 0x4101A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1674 0x4101A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1675 0x4101A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1676 0x4101A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1677 0x4101A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1678 0x4101A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1679 0x4101A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1680 0x4101A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1681 0x4101A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1682 0x4101A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1683 0x4101A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1684 0x4101A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1685 0x4101A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1686 0x4101A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1687 0x4101A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1688 0x4101A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1689 0x4101A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1690 0x4101A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1691 0x4101A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1692 0x4101A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1693 0x4101A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1694 0x4101A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1695 0x4101A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1696 0x4101A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1697 0x4101A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1698 0x4101A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1699 0x4101A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1700 0x4101A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1701 0x4101A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1702 0x4101A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1703 0x4101A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1704 0x4101AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1705 0x4101AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1706 0x4101AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1707 0x4101AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1708 0x4101AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1709 0x4101AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1710 0x4101AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1711 0x4101ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1712 0x4101AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1713 0x4101AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1714 0x4101AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1715 0x4101ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1716 0x4101AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1717 0x4101AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1718 0x4101AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1719 0x4101ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1720 0x4101AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1721 0x4101AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1722 0x4101AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1723 0x4101AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1724 0x4101AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1725 0x4101AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1726 0x4101AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1727 0x4101AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1728 0x4101B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1729 0x4101B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1730 0x4101B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1731 0x4101B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1732 0x4101B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1733 0x4101B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1734 0x4101B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1735 0x4101B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1736 0x4101B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1737 0x4101B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1738 0x4101B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1739 0x4101B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1740 0x4101B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1741 0x4101B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1742 0x4101B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1743 0x4101B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1744 0x4101B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1745 0x4101B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1746 0x4101B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1747 0x4101B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1748 0x4101B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1749 0x4101B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1750 0x4101B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1751 0x4101B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1752 0x4101B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1753 0x4101B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1754 0x4101B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1755 0x4101B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1756 0x4101B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1757 0x4101B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1758 0x4101B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1759 0x4101B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1760 0x4101B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1761 0x4101B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1762 0x4101B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1763 0x4101B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1764 0x4101B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1765 0x4101B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1766 0x4101B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1767 0x4101B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1768 0x4101BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1769 0x4101BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1770 0x4101BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1771 0x4101BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1772 0x4101BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1773 0x4101BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1774 0x4101BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1775 0x4101BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1776 0x4101BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1777 0x4101BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1778 0x4101BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1779 0x4101BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1780 0x4101BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1781 0x4101BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1782 0x4101BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1783 0x4101BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1784 0x4101BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1785 0x4101BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1786 0x4101BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1787 0x4101BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1788 0x4101BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1789 0x4101BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1790 0x4101BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1791 0x4101BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1792 0x4101C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1793 0x4101C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1794 0x4101C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1795 0x4101C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1796 0x4101C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1797 0x4101C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1798 0x4101C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1799 0x4101C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1800 0x4101C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1801 0x4101C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1802 0x4101C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1803 0x4101C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1804 0x4101C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1805 0x4101C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1806 0x4101C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1807 0x4101C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1808 0x4101C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1809 0x4101C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1810 0x4101C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1811 0x4101C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1812 0x4101C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1813 0x4101C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1814 0x4101C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1815 0x4101C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1816 0x4101C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1817 0x4101C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1818 0x4101C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1819 0x4101C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1820 0x4101C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1821 0x4101C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1822 0x4101C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1823 0x4101C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1824 0x4101C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1825 0x4101C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1826 0x4101C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1827 0x4101C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1828 0x4101C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1829 0x4101C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1830 0x4101C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1831 0x4101C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1832 0x4101CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1833 0x4101CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1834 0x4101CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1835 0x4101CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1836 0x4101CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1837 0x4101CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1838 0x4101CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1839 0x4101CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1840 0x4101CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1841 0x4101CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1842 0x4101CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1843 0x4101CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1844 0x4101CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1845 0x4101CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1846 0x4101CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1847 0x4101CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1848 0x4101CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1849 0x4101CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1850 0x4101CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1851 0x4101CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1852 0x4101CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1853 0x4101CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1854 0x4101CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1855 0x4101CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1856 0x4101D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1857 0x4101D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1858 0x4101D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1859 0x4101D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1860 0x4101D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1861 0x4101D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1862 0x4101D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1863 0x4101D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1864 0x4101D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1865 0x4101D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1866 0x4101D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1867 0x4101D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1868 0x4101D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1869 0x4101D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1870 0x4101D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1871 0x4101D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1872 0x4101D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1873 0x4101D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1874 0x4101D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1875 0x4101D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1876 0x4101D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1877 0x4101D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1878 0x4101D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1879 0x4101D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1880 0x4101D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1881 0x4101D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1882 0x4101D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1883 0x4101D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1884 0x4101D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1885 0x4101D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1886 0x4101D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1887 0x4101D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1888 0x4101D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1889 0x4101D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1890 0x4101D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1891 0x4101D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1892 0x4101D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1893 0x4101D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1894 0x4101D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1895 0x4101D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1896 0x4101DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1897 0x4101DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1898 0x4101DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1899 0x4101DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1900 0x4101DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1901 0x4101DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1902 0x4101DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1903 0x4101DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1904 0x4101DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1905 0x4101DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1906 0x4101DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1907 0x4101DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1908 0x4101DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1909 0x4101DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1910 0x4101DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1911 0x4101DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1912 0x4101DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1913 0x4101DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1914 0x4101DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1915 0x4101DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1916 0x4101DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1917 0x4101DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1918 0x4101DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1919 0x4101DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1920 0x4101E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1921 0x4101E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1922 0x4101E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1923 0x4101E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1924 0x4101E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1925 0x4101E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1926 0x4101E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1927 0x4101E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1928 0x4101E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1929 0x4101E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1930 0x4101E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1931 0x4101E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1932 0x4101E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1933 0x4101E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1934 0x4101E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1935 0x4101E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1936 0x4101E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1937 0x4101E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1938 0x4101E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1939 0x4101E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1940 0x4101E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1941 0x4101E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1942 0x4101E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1943 0x4101E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1944 0x4101E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1945 0x4101E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1946 0x4101E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1947 0x4101E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1948 0x4101E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1949 0x4101E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1950 0x4101E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1951 0x4101E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1952 0x4101E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1953 0x4101E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1954 0x4101E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1955 0x4101E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1956 0x4101E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1957 0x4101E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1958 0x4101E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1959 0x4101E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1960 0x4101EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1961 0x4101EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1962 0x4101EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1963 0x4101EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1964 0x4101EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1965 0x4101EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1966 0x4101EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1967 0x4101EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1968 0x4101EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1969 0x4101EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1970 0x4101EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1971 0x4101ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1972 0x4101ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1973 0x4101ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1974 0x4101ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1975 0x4101EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1976 0x4101EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1977 0x4101EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1978 0x4101EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1979 0x4101EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1980 0x4101EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1981 0x4101EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1982 0x4101EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1983 0x4101EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1984 0x4101F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1985 0x4101F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1986 0x4101F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1987 0x4101F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1988 0x4101F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1989 0x4101F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1990 0x4101F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1991 0x4101F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1992 0x4101F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1993 0x4101F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1994 0x4101F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1995 0x4101F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1996 0x4101F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1997 0x4101F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1998 0x4101F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_1999 0x4101F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2000 0x4101F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2001 0x4101F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2002 0x4101F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2003 0x4101F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2004 0x4101F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2005 0x4101F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2006 0x4101F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2007 0x4101F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2008 0x4101F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2009 0x4101F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2010 0x4101F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2011 0x4101F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2012 0x4101F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2013 0x4101F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2014 0x4101F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2015 0x4101F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2016 0x4101F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2017 0x4101F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2018 0x4101F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2019 0x4101F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2020 0x4101F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2021 0x4101F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2022 0x4101F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2023 0x4101F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2024 0x4101FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2025 0x4101FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2026 0x4101FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2027 0x4101FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2028 0x4101FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2029 0x4101FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2030 0x4101FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2031 0x4101FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2032 0x4101FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2033 0x4101FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2034 0x4101FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2035 0x4101FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2036 0x4101FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2037 0x4101FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2038 0x4101FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2039 0x4101FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2040 0x4101FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2041 0x4101FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2042 0x4101FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2043 0x4101FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2044 0x4101FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2045 0x4101FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2046 0x4101FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2047 0x4101FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2048 0x4102000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2049 0x4102004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2050 0x4102008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2051 0x410200C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2052 0x4102010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2053 0x4102014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2054 0x4102018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2055 0x410201C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2056 0x4102020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2057 0x4102024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2058 0x4102028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2059 0x410202C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2060 0x4102030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2061 0x4102034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2062 0x4102038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2063 0x410203C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2064 0x4102040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2065 0x4102044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2066 0x4102048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2067 0x410204C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2068 0x4102050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2069 0x4102054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2070 0x4102058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2071 0x410205C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2072 0x4102060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2073 0x4102064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2074 0x4102068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2075 0x410206C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2076 0x4102070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2077 0x4102074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2078 0x4102078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2079 0x410207C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2080 0x4102080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2081 0x4102084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2082 0x4102088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2083 0x410208C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2084 0x4102090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2085 0x4102094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2086 0x4102098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2087 0x410209C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2088 0x41020A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2089 0x41020A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2090 0x41020A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2091 0x41020AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2092 0x41020B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2093 0x41020B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2094 0x41020B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2095 0x41020BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2096 0x41020C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2097 0x41020C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2098 0x41020C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2099 0x41020CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2100 0x41020D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2101 0x41020D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2102 0x41020D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2103 0x41020DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2104 0x41020E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2105 0x41020E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2106 0x41020E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2107 0x41020EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2108 0x41020F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2109 0x41020F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2110 0x41020F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2111 0x41020FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2112 0x4102100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2113 0x4102104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2114 0x4102108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2115 0x410210C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2116 0x4102110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2117 0x4102114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2118 0x4102118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2119 0x410211C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2120 0x4102120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2121 0x4102124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2122 0x4102128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2123 0x410212C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2124 0x4102130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2125 0x4102134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2126 0x4102138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2127 0x410213C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2128 0x4102140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2129 0x4102144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2130 0x4102148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2131 0x410214C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2132 0x4102150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2133 0x4102154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2134 0x4102158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2135 0x410215C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2136 0x4102160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2137 0x4102164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2138 0x4102168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2139 0x410216C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2140 0x4102170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2141 0x4102174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2142 0x4102178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2143 0x410217C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2144 0x4102180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2145 0x4102184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2146 0x4102188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2147 0x410218C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2148 0x4102190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2149 0x4102194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2150 0x4102198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2151 0x410219C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2152 0x41021A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2153 0x41021A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2154 0x41021A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2155 0x41021AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2156 0x41021B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2157 0x41021B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2158 0x41021B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2159 0x41021BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2160 0x41021C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2161 0x41021C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2162 0x41021C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2163 0x41021CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2164 0x41021D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2165 0x41021D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2166 0x41021D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2167 0x41021DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2168 0x41021E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2169 0x41021E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2170 0x41021E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2171 0x41021EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2172 0x41021F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2173 0x41021F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2174 0x41021F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2175 0x41021FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2176 0x4102200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2177 0x4102204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2178 0x4102208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2179 0x410220C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2180 0x4102210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2181 0x4102214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2182 0x4102218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2183 0x410221C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2184 0x4102220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2185 0x4102224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2186 0x4102228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2187 0x410222C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2188 0x4102230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2189 0x4102234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2190 0x4102238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2191 0x410223C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2192 0x4102240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2193 0x4102244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2194 0x4102248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2195 0x410224C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2196 0x4102250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2197 0x4102254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2198 0x4102258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2199 0x410225C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2200 0x4102260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2201 0x4102264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2202 0x4102268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2203 0x410226C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2204 0x4102270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2205 0x4102274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2206 0x4102278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2207 0x410227C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2208 0x4102280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2209 0x4102284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2210 0x4102288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2211 0x410228C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2212 0x4102290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2213 0x4102294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2214 0x4102298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2215 0x410229C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2216 0x41022A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2217 0x41022A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2218 0x41022A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2219 0x41022AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2220 0x41022B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2221 0x41022B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2222 0x41022B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2223 0x41022BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2224 0x41022C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2225 0x41022C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2226 0x41022C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2227 0x41022CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2228 0x41022D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2229 0x41022D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2230 0x41022D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2231 0x41022DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2232 0x41022E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2233 0x41022E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2234 0x41022E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2235 0x41022EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2236 0x41022F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2237 0x41022F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2238 0x41022F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2239 0x41022FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2240 0x4102300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2241 0x4102304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2242 0x4102308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2243 0x410230C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2244 0x4102310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2245 0x4102314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2246 0x4102318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2247 0x410231C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2248 0x4102320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2249 0x4102324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2250 0x4102328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2251 0x410232C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2252 0x4102330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2253 0x4102334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2254 0x4102338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2255 0x410233C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2256 0x4102340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2257 0x4102344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2258 0x4102348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2259 0x410234C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2260 0x4102350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2261 0x4102354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2262 0x4102358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2263 0x410235C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2264 0x4102360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2265 0x4102364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2266 0x4102368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2267 0x410236C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2268 0x4102370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2269 0x4102374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2270 0x4102378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2271 0x410237C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2272 0x4102380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2273 0x4102384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2274 0x4102388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2275 0x410238C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2276 0x4102390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2277 0x4102394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2278 0x4102398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2279 0x410239C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2280 0x41023A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2281 0x41023A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2282 0x41023A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2283 0x41023AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2284 0x41023B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2285 0x41023B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2286 0x41023B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2287 0x41023BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2288 0x41023C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2289 0x41023C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2290 0x41023C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2291 0x41023CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2292 0x41023D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2293 0x41023D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2294 0x41023D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2295 0x41023DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2296 0x41023E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2297 0x41023E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2298 0x41023E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2299 0x41023EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2300 0x41023F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2301 0x41023F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2302 0x41023F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2303 0x41023FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2304 0x4102400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2305 0x4102404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2306 0x4102408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2307 0x410240C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2308 0x4102410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2309 0x4102414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2310 0x4102418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2311 0x410241C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2312 0x4102420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2313 0x4102424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2314 0x4102428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2315 0x410242C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2316 0x4102430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2317 0x4102434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2318 0x4102438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2319 0x410243C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2320 0x4102440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2321 0x4102444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2322 0x4102448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2323 0x410244C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2324 0x4102450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2325 0x4102454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2326 0x4102458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2327 0x410245C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2328 0x4102460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2329 0x4102464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2330 0x4102468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2331 0x410246C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2332 0x4102470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2333 0x4102474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2334 0x4102478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2335 0x410247C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2336 0x4102480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2337 0x4102484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2338 0x4102488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2339 0x410248C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2340 0x4102490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2341 0x4102494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2342 0x4102498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2343 0x410249C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2344 0x41024A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2345 0x41024A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2346 0x41024A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2347 0x41024AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2348 0x41024B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2349 0x41024B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2350 0x41024B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2351 0x41024BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2352 0x41024C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2353 0x41024C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2354 0x41024C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2355 0x41024CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2356 0x41024D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2357 0x41024D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2358 0x41024D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2359 0x41024DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2360 0x41024E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2361 0x41024E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2362 0x41024E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2363 0x41024EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2364 0x41024F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2365 0x41024F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2366 0x41024F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2367 0x41024FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2368 0x4102500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2369 0x4102504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2370 0x4102508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2371 0x410250C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2372 0x4102510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2373 0x4102514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2374 0x4102518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2375 0x410251C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2376 0x4102520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2377 0x4102524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2378 0x4102528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2379 0x410252C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2380 0x4102530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2381 0x4102534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2382 0x4102538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2383 0x410253C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2384 0x4102540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2385 0x4102544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2386 0x4102548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2387 0x410254C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2388 0x4102550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2389 0x4102554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2390 0x4102558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2391 0x410255C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2392 0x4102560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2393 0x4102564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2394 0x4102568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2395 0x410256C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2396 0x4102570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2397 0x4102574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2398 0x4102578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2399 0x410257C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2400 0x4102580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2401 0x4102584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2402 0x4102588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2403 0x410258C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2404 0x4102590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2405 0x4102594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2406 0x4102598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2407 0x410259C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2408 0x41025A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2409 0x41025A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2410 0x41025A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2411 0x41025AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2412 0x41025B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2413 0x41025B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2414 0x41025B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2415 0x41025BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2416 0x41025C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2417 0x41025C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2418 0x41025C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2419 0x41025CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2420 0x41025D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2421 0x41025D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2422 0x41025D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2423 0x41025DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2424 0x41025E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2425 0x41025E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2426 0x41025E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2427 0x41025EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2428 0x41025F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2429 0x41025F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2430 0x41025F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2431 0x41025FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2432 0x4102600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2433 0x4102604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2434 0x4102608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2435 0x410260C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2436 0x4102610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2437 0x4102614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2438 0x4102618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2439 0x410261C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2440 0x4102620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2441 0x4102624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2442 0x4102628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2443 0x410262C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2444 0x4102630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2445 0x4102634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2446 0x4102638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2447 0x410263C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2448 0x4102640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2449 0x4102644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2450 0x4102648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2451 0x410264C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2452 0x4102650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2453 0x4102654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2454 0x4102658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2455 0x410265C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2456 0x4102660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2457 0x4102664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2458 0x4102668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2459 0x410266C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2460 0x4102670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2461 0x4102674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2462 0x4102678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2463 0x410267C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2464 0x4102680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2465 0x4102684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2466 0x4102688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2467 0x410268C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2468 0x4102690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2469 0x4102694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2470 0x4102698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2471 0x410269C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2472 0x41026A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2473 0x41026A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2474 0x41026A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2475 0x41026AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2476 0x41026B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2477 0x41026B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2478 0x41026B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2479 0x41026BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2480 0x41026C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2481 0x41026C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2482 0x41026C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2483 0x41026CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2484 0x41026D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2485 0x41026D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2486 0x41026D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2487 0x41026DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2488 0x41026E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2489 0x41026E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2490 0x41026E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2491 0x41026EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2492 0x41026F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2493 0x41026F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2494 0x41026F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2495 0x41026FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2496 0x4102700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2497 0x4102704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2498 0x4102708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2499 0x410270C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2500 0x4102710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2501 0x4102714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2502 0x4102718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2503 0x410271C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2504 0x4102720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2505 0x4102724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2506 0x4102728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2507 0x410272C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2508 0x4102730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2509 0x4102734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2510 0x4102738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2511 0x410273C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2512 0x4102740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2513 0x4102744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2514 0x4102748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2515 0x410274C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2516 0x4102750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2517 0x4102754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2518 0x4102758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2519 0x410275C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2520 0x4102760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2521 0x4102764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2522 0x4102768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2523 0x410276C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2524 0x4102770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2525 0x4102774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2526 0x4102778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2527 0x410277C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2528 0x4102780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2529 0x4102784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2530 0x4102788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2531 0x410278C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2532 0x4102790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2533 0x4102794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2534 0x4102798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2535 0x410279C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2536 0x41027A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2537 0x41027A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2538 0x41027A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2539 0x41027AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2540 0x41027B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2541 0x41027B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2542 0x41027B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2543 0x41027BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2544 0x41027C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2545 0x41027C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2546 0x41027C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2547 0x41027CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2548 0x41027D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2549 0x41027D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2550 0x41027D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2551 0x41027DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2552 0x41027E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2553 0x41027E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2554 0x41027E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2555 0x41027EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2556 0x41027F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2557 0x41027F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2558 0x41027F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2559 0x41027FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2560 0x4102800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2561 0x4102804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2562 0x4102808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2563 0x410280C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2564 0x4102810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2565 0x4102814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2566 0x4102818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2567 0x410281C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2568 0x4102820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2569 0x4102824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2570 0x4102828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2571 0x410282C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2572 0x4102830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2573 0x4102834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2574 0x4102838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2575 0x410283C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2576 0x4102840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2577 0x4102844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2578 0x4102848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2579 0x410284C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2580 0x4102850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2581 0x4102854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2582 0x4102858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2583 0x410285C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2584 0x4102860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2585 0x4102864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2586 0x4102868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2587 0x410286C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2588 0x4102870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2589 0x4102874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2590 0x4102878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2591 0x410287C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2592 0x4102880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2593 0x4102884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2594 0x4102888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2595 0x410288C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2596 0x4102890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2597 0x4102894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2598 0x4102898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2599 0x410289C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2600 0x41028A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2601 0x41028A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2602 0x41028A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2603 0x41028AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2604 0x41028B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2605 0x41028B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2606 0x41028B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2607 0x41028BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2608 0x41028C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2609 0x41028C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2610 0x41028C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2611 0x41028CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2612 0x41028D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2613 0x41028D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2614 0x41028D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2615 0x41028DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2616 0x41028E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2617 0x41028E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2618 0x41028E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2619 0x41028EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2620 0x41028F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2621 0x41028F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2622 0x41028F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2623 0x41028FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2624 0x4102900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2625 0x4102904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2626 0x4102908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2627 0x410290C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2628 0x4102910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2629 0x4102914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2630 0x4102918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2631 0x410291C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2632 0x4102920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2633 0x4102924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2634 0x4102928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2635 0x410292C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2636 0x4102930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2637 0x4102934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2638 0x4102938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2639 0x410293C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2640 0x4102940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2641 0x4102944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2642 0x4102948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2643 0x410294C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2644 0x4102950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2645 0x4102954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2646 0x4102958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2647 0x410295C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2648 0x4102960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2649 0x4102964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2650 0x4102968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2651 0x410296C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2652 0x4102970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2653 0x4102974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2654 0x4102978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2655 0x410297C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2656 0x4102980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2657 0x4102984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2658 0x4102988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2659 0x410298C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2660 0x4102990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2661 0x4102994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2662 0x4102998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2663 0x410299C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2664 0x41029A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2665 0x41029A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2666 0x41029A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2667 0x41029AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2668 0x41029B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2669 0x41029B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2670 0x41029B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2671 0x41029BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2672 0x41029C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2673 0x41029C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2674 0x41029C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2675 0x41029CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2676 0x41029D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2677 0x41029D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2678 0x41029D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2679 0x41029DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2680 0x41029E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2681 0x41029E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2682 0x41029E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2683 0x41029EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2684 0x41029F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2685 0x41029F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2686 0x41029F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2687 0x41029FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2688 0x4102A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2689 0x4102A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2690 0x4102A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2691 0x4102A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2692 0x4102A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2693 0x4102A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2694 0x4102A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2695 0x4102A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2696 0x4102A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2697 0x4102A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2698 0x4102A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2699 0x4102A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2700 0x4102A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2701 0x4102A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2702 0x4102A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2703 0x4102A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2704 0x4102A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2705 0x4102A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2706 0x4102A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2707 0x4102A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2708 0x4102A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2709 0x4102A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2710 0x4102A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2711 0x4102A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2712 0x4102A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2713 0x4102A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2714 0x4102A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2715 0x4102A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2716 0x4102A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2717 0x4102A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2718 0x4102A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2719 0x4102A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2720 0x4102A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2721 0x4102A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2722 0x4102A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2723 0x4102A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2724 0x4102A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2725 0x4102A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2726 0x4102A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2727 0x4102A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2728 0x4102AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2729 0x4102AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2730 0x4102AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2731 0x4102AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2732 0x4102AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2733 0x4102AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2734 0x4102AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2735 0x4102ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2736 0x4102AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2737 0x4102AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2738 0x4102AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2739 0x4102ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2740 0x4102AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2741 0x4102AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2742 0x4102AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2743 0x4102ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2744 0x4102AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2745 0x4102AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2746 0x4102AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2747 0x4102AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2748 0x4102AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2749 0x4102AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2750 0x4102AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2751 0x4102AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2752 0x4102B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2753 0x4102B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2754 0x4102B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2755 0x4102B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2756 0x4102B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2757 0x4102B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2758 0x4102B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2759 0x4102B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2760 0x4102B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2761 0x4102B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2762 0x4102B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2763 0x4102B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2764 0x4102B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2765 0x4102B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2766 0x4102B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2767 0x4102B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2768 0x4102B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2769 0x4102B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2770 0x4102B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2771 0x4102B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2772 0x4102B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2773 0x4102B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2774 0x4102B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2775 0x4102B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2776 0x4102B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2777 0x4102B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2778 0x4102B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2779 0x4102B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2780 0x4102B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2781 0x4102B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2782 0x4102B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2783 0x4102B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2784 0x4102B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2785 0x4102B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2786 0x4102B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2787 0x4102B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2788 0x4102B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2789 0x4102B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2790 0x4102B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2791 0x4102B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2792 0x4102BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2793 0x4102BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2794 0x4102BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2795 0x4102BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2796 0x4102BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2797 0x4102BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2798 0x4102BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2799 0x4102BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2800 0x4102BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2801 0x4102BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2802 0x4102BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2803 0x4102BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2804 0x4102BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2805 0x4102BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2806 0x4102BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2807 0x4102BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2808 0x4102BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2809 0x4102BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2810 0x4102BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2811 0x4102BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2812 0x4102BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2813 0x4102BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2814 0x4102BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2815 0x4102BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2816 0x4102C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2817 0x4102C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2818 0x4102C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2819 0x4102C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2820 0x4102C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2821 0x4102C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2822 0x4102C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2823 0x4102C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2824 0x4102C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2825 0x4102C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2826 0x4102C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2827 0x4102C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2828 0x4102C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2829 0x4102C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2830 0x4102C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2831 0x4102C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2832 0x4102C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2833 0x4102C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2834 0x4102C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2835 0x4102C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2836 0x4102C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2837 0x4102C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2838 0x4102C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2839 0x4102C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2840 0x4102C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2841 0x4102C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2842 0x4102C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2843 0x4102C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2844 0x4102C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2845 0x4102C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2846 0x4102C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2847 0x4102C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2848 0x4102C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2849 0x4102C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2850 0x4102C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2851 0x4102C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2852 0x4102C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2853 0x4102C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2854 0x4102C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2855 0x4102C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2856 0x4102CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2857 0x4102CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2858 0x4102CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2859 0x4102CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2860 0x4102CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2861 0x4102CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2862 0x4102CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2863 0x4102CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2864 0x4102CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2865 0x4102CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2866 0x4102CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2867 0x4102CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2868 0x4102CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2869 0x4102CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2870 0x4102CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2871 0x4102CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2872 0x4102CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2873 0x4102CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2874 0x4102CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2875 0x4102CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2876 0x4102CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2877 0x4102CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2878 0x4102CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2879 0x4102CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2880 0x4102D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2881 0x4102D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2882 0x4102D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2883 0x4102D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2884 0x4102D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2885 0x4102D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2886 0x4102D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2887 0x4102D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2888 0x4102D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2889 0x4102D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2890 0x4102D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2891 0x4102D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2892 0x4102D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2893 0x4102D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2894 0x4102D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2895 0x4102D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2896 0x4102D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2897 0x4102D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2898 0x4102D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2899 0x4102D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2900 0x4102D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2901 0x4102D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2902 0x4102D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2903 0x4102D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2904 0x4102D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2905 0x4102D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2906 0x4102D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2907 0x4102D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2908 0x4102D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2909 0x4102D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2910 0x4102D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2911 0x4102D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2912 0x4102D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2913 0x4102D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2914 0x4102D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2915 0x4102D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2916 0x4102D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2917 0x4102D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2918 0x4102D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2919 0x4102D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2920 0x4102DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2921 0x4102DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2922 0x4102DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2923 0x4102DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2924 0x4102DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2925 0x4102DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2926 0x4102DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2927 0x4102DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2928 0x4102DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2929 0x4102DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2930 0x4102DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2931 0x4102DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2932 0x4102DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2933 0x4102DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2934 0x4102DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2935 0x4102DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2936 0x4102DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2937 0x4102DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2938 0x4102DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2939 0x4102DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2940 0x4102DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2941 0x4102DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2942 0x4102DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2943 0x4102DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2944 0x4102E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2945 0x4102E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2946 0x4102E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2947 0x4102E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2948 0x4102E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2949 0x4102E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2950 0x4102E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2951 0x4102E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2952 0x4102E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2953 0x4102E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2954 0x4102E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2955 0x4102E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2956 0x4102E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2957 0x4102E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2958 0x4102E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2959 0x4102E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2960 0x4102E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2961 0x4102E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2962 0x4102E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2963 0x4102E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2964 0x4102E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2965 0x4102E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2966 0x4102E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2967 0x4102E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2968 0x4102E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2969 0x4102E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2970 0x4102E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2971 0x4102E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2972 0x4102E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2973 0x4102E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2974 0x4102E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2975 0x4102E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2976 0x4102E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2977 0x4102E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2978 0x4102E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2979 0x4102E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2980 0x4102E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2981 0x4102E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2982 0x4102E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2983 0x4102E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2984 0x4102EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2985 0x4102EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2986 0x4102EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2987 0x4102EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2988 0x4102EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2989 0x4102EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2990 0x4102EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2991 0x4102EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2992 0x4102EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2993 0x4102EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2994 0x4102EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2995 0x4102ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2996 0x4102ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2997 0x4102ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2998 0x4102ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_2999 0x4102EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3000 0x4102EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3001 0x4102EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3002 0x4102EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3003 0x4102EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3004 0x4102EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3005 0x4102EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3006 0x4102EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3007 0x4102EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3008 0x4102F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3009 0x4102F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3010 0x4102F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3011 0x4102F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3012 0x4102F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3013 0x4102F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3014 0x4102F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3015 0x4102F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3016 0x4102F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3017 0x4102F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3018 0x4102F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3019 0x4102F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3020 0x4102F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3021 0x4102F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3022 0x4102F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3023 0x4102F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3024 0x4102F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3025 0x4102F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3026 0x4102F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3027 0x4102F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3028 0x4102F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3029 0x4102F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3030 0x4102F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3031 0x4102F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3032 0x4102F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3033 0x4102F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3034 0x4102F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3035 0x4102F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3036 0x4102F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3037 0x4102F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3038 0x4102F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3039 0x4102F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3040 0x4102F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3041 0x4102F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3042 0x4102F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3043 0x4102F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3044 0x4102F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3045 0x4102F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3046 0x4102F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3047 0x4102F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3048 0x4102FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3049 0x4102FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3050 0x4102FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3051 0x4102FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3052 0x4102FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3053 0x4102FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3054 0x4102FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3055 0x4102FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3056 0x4102FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3057 0x4102FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3058 0x4102FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3059 0x4102FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3060 0x4102FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3061 0x4102FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3062 0x4102FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3063 0x4102FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3064 0x4102FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3065 0x4102FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3066 0x4102FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3067 0x4102FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3068 0x4102FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3069 0x4102FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3070 0x4102FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3071 0x4102FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3072 0x4103000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3073 0x4103004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3074 0x4103008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3075 0x410300C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3076 0x4103010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3077 0x4103014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3078 0x4103018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3079 0x410301C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3080 0x4103020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3081 0x4103024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3082 0x4103028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3083 0x410302C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3084 0x4103030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3085 0x4103034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3086 0x4103038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3087 0x410303C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3088 0x4103040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3089 0x4103044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3090 0x4103048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3091 0x410304C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3092 0x4103050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3093 0x4103054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3094 0x4103058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3095 0x410305C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3096 0x4103060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3097 0x4103064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3098 0x4103068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3099 0x410306C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3100 0x4103070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3101 0x4103074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3102 0x4103078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3103 0x410307C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3104 0x4103080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3105 0x4103084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3106 0x4103088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3107 0x410308C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3108 0x4103090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3109 0x4103094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3110 0x4103098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3111 0x410309C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3112 0x41030A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3113 0x41030A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3114 0x41030A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3115 0x41030AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3116 0x41030B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3117 0x41030B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3118 0x41030B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3119 0x41030BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3120 0x41030C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3121 0x41030C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3122 0x41030C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3123 0x41030CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3124 0x41030D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3125 0x41030D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3126 0x41030D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3127 0x41030DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3128 0x41030E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3129 0x41030E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3130 0x41030E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3131 0x41030EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3132 0x41030F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3133 0x41030F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3134 0x41030F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3135 0x41030FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3136 0x4103100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3137 0x4103104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3138 0x4103108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3139 0x410310C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3140 0x4103110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3141 0x4103114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3142 0x4103118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3143 0x410311C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3144 0x4103120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3145 0x4103124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3146 0x4103128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3147 0x410312C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3148 0x4103130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3149 0x4103134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3150 0x4103138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3151 0x410313C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3152 0x4103140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3153 0x4103144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3154 0x4103148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3155 0x410314C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3156 0x4103150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3157 0x4103154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3158 0x4103158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3159 0x410315C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3160 0x4103160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3161 0x4103164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3162 0x4103168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3163 0x410316C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3164 0x4103170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3165 0x4103174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3166 0x4103178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3167 0x410317C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3168 0x4103180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3169 0x4103184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3170 0x4103188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3171 0x410318C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3172 0x4103190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3173 0x4103194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3174 0x4103198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3175 0x410319C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3176 0x41031A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3177 0x41031A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3178 0x41031A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3179 0x41031AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3180 0x41031B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3181 0x41031B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3182 0x41031B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3183 0x41031BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3184 0x41031C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3185 0x41031C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3186 0x41031C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3187 0x41031CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3188 0x41031D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3189 0x41031D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3190 0x41031D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3191 0x41031DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3192 0x41031E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3193 0x41031E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3194 0x41031E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3195 0x41031EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3196 0x41031F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3197 0x41031F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3198 0x41031F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3199 0x41031FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3200 0x4103200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3201 0x4103204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3202 0x4103208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3203 0x410320C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3204 0x4103210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3205 0x4103214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3206 0x4103218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3207 0x410321C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3208 0x4103220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3209 0x4103224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3210 0x4103228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3211 0x410322C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3212 0x4103230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3213 0x4103234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3214 0x4103238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3215 0x410323C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3216 0x4103240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3217 0x4103244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3218 0x4103248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3219 0x410324C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3220 0x4103250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3221 0x4103254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3222 0x4103258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3223 0x410325C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3224 0x4103260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3225 0x4103264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3226 0x4103268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3227 0x410326C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3228 0x4103270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3229 0x4103274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3230 0x4103278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3231 0x410327C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3232 0x4103280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3233 0x4103284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3234 0x4103288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3235 0x410328C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3236 0x4103290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3237 0x4103294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3238 0x4103298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3239 0x410329C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3240 0x41032A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3241 0x41032A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3242 0x41032A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3243 0x41032AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3244 0x41032B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3245 0x41032B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3246 0x41032B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3247 0x41032BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3248 0x41032C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3249 0x41032C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3250 0x41032C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3251 0x41032CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3252 0x41032D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3253 0x41032D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3254 0x41032D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3255 0x41032DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3256 0x41032E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3257 0x41032E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3258 0x41032E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3259 0x41032EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3260 0x41032F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3261 0x41032F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3262 0x41032F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3263 0x41032FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3264 0x4103300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3265 0x4103304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3266 0x4103308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3267 0x410330C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3268 0x4103310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3269 0x4103314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3270 0x4103318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3271 0x410331C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3272 0x4103320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3273 0x4103324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3274 0x4103328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3275 0x410332C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3276 0x4103330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3277 0x4103334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3278 0x4103338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3279 0x410333C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3280 0x4103340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3281 0x4103344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3282 0x4103348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3283 0x410334C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3284 0x4103350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3285 0x4103354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3286 0x4103358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3287 0x410335C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3288 0x4103360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3289 0x4103364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3290 0x4103368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3291 0x410336C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3292 0x4103370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3293 0x4103374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3294 0x4103378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3295 0x410337C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3296 0x4103380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3297 0x4103384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3298 0x4103388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3299 0x410338C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3300 0x4103390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3301 0x4103394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3302 0x4103398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3303 0x410339C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3304 0x41033A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3305 0x41033A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3306 0x41033A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3307 0x41033AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3308 0x41033B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3309 0x41033B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3310 0x41033B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3311 0x41033BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3312 0x41033C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3313 0x41033C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3314 0x41033C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3315 0x41033CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3316 0x41033D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3317 0x41033D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3318 0x41033D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3319 0x41033DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3320 0x41033E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3321 0x41033E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3322 0x41033E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3323 0x41033EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3324 0x41033F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3325 0x41033F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3326 0x41033F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3327 0x41033FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3328 0x4103400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3329 0x4103404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3330 0x4103408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3331 0x410340C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3332 0x4103410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3333 0x4103414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3334 0x4103418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3335 0x410341C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3336 0x4103420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3337 0x4103424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3338 0x4103428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3339 0x410342C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3340 0x4103430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3341 0x4103434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3342 0x4103438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3343 0x410343C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3344 0x4103440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3345 0x4103444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3346 0x4103448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3347 0x410344C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3348 0x4103450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3349 0x4103454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3350 0x4103458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3351 0x410345C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3352 0x4103460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3353 0x4103464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3354 0x4103468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3355 0x410346C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3356 0x4103470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3357 0x4103474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3358 0x4103478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3359 0x410347C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3360 0x4103480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3361 0x4103484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3362 0x4103488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3363 0x410348C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3364 0x4103490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3365 0x4103494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3366 0x4103498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3367 0x410349C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3368 0x41034A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3369 0x41034A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3370 0x41034A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3371 0x41034AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3372 0x41034B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3373 0x41034B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3374 0x41034B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3375 0x41034BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3376 0x41034C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3377 0x41034C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3378 0x41034C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3379 0x41034CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3380 0x41034D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3381 0x41034D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3382 0x41034D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3383 0x41034DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3384 0x41034E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3385 0x41034E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3386 0x41034E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3387 0x41034EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3388 0x41034F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3389 0x41034F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3390 0x41034F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3391 0x41034FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3392 0x4103500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3393 0x4103504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3394 0x4103508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3395 0x410350C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3396 0x4103510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3397 0x4103514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3398 0x4103518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3399 0x410351C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3400 0x4103520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3401 0x4103524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3402 0x4103528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3403 0x410352C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3404 0x4103530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3405 0x4103534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3406 0x4103538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3407 0x410353C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3408 0x4103540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3409 0x4103544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3410 0x4103548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3411 0x410354C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3412 0x4103550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3413 0x4103554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3414 0x4103558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3415 0x410355C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3416 0x4103560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3417 0x4103564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3418 0x4103568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3419 0x410356C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3420 0x4103570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3421 0x4103574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3422 0x4103578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3423 0x410357C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3424 0x4103580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3425 0x4103584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3426 0x4103588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3427 0x410358C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3428 0x4103590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3429 0x4103594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3430 0x4103598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3431 0x410359C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3432 0x41035A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3433 0x41035A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3434 0x41035A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3435 0x41035AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3436 0x41035B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3437 0x41035B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3438 0x41035B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3439 0x41035BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3440 0x41035C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3441 0x41035C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3442 0x41035C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3443 0x41035CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3444 0x41035D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3445 0x41035D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3446 0x41035D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3447 0x41035DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3448 0x41035E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3449 0x41035E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3450 0x41035E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3451 0x41035EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3452 0x41035F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3453 0x41035F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3454 0x41035F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3455 0x41035FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3456 0x4103600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3457 0x4103604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3458 0x4103608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3459 0x410360C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3460 0x4103610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3461 0x4103614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3462 0x4103618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3463 0x410361C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3464 0x4103620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3465 0x4103624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3466 0x4103628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3467 0x410362C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3468 0x4103630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3469 0x4103634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3470 0x4103638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3471 0x410363C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3472 0x4103640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3473 0x4103644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3474 0x4103648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3475 0x410364C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3476 0x4103650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3477 0x4103654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3478 0x4103658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3479 0x410365C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3480 0x4103660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3481 0x4103664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3482 0x4103668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3483 0x410366C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3484 0x4103670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3485 0x4103674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3486 0x4103678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3487 0x410367C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3488 0x4103680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3489 0x4103684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3490 0x4103688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3491 0x410368C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3492 0x4103690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3493 0x4103694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3494 0x4103698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3495 0x410369C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3496 0x41036A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3497 0x41036A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3498 0x41036A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3499 0x41036AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3500 0x41036B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3501 0x41036B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3502 0x41036B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3503 0x41036BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3504 0x41036C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3505 0x41036C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3506 0x41036C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3507 0x41036CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3508 0x41036D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3509 0x41036D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3510 0x41036D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3511 0x41036DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3512 0x41036E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3513 0x41036E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3514 0x41036E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3515 0x41036EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3516 0x41036F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3517 0x41036F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3518 0x41036F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3519 0x41036FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3520 0x4103700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3521 0x4103704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3522 0x4103708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3523 0x410370C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3524 0x4103710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3525 0x4103714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3526 0x4103718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3527 0x410371C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3528 0x4103720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3529 0x4103724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3530 0x4103728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3531 0x410372C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3532 0x4103730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3533 0x4103734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3534 0x4103738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3535 0x410373C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3536 0x4103740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3537 0x4103744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3538 0x4103748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3539 0x410374C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3540 0x4103750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3541 0x4103754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3542 0x4103758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3543 0x410375C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3544 0x4103760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3545 0x4103764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3546 0x4103768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3547 0x410376C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3548 0x4103770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3549 0x4103774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3550 0x4103778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3551 0x410377C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3552 0x4103780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3553 0x4103784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3554 0x4103788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3555 0x410378C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3556 0x4103790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3557 0x4103794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3558 0x4103798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3559 0x410379C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3560 0x41037A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3561 0x41037A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3562 0x41037A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3563 0x41037AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3564 0x41037B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3565 0x41037B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3566 0x41037B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3567 0x41037BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3568 0x41037C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3569 0x41037C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3570 0x41037C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3571 0x41037CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3572 0x41037D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3573 0x41037D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3574 0x41037D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3575 0x41037DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3576 0x41037E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3577 0x41037E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3578 0x41037E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3579 0x41037EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3580 0x41037F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3581 0x41037F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3582 0x41037F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3583 0x41037FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3584 0x4103800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3585 0x4103804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3586 0x4103808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3587 0x410380C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3588 0x4103810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3589 0x4103814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3590 0x4103818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3591 0x410381C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3592 0x4103820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3593 0x4103824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3594 0x4103828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3595 0x410382C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3596 0x4103830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3597 0x4103834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3598 0x4103838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3599 0x410383C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3600 0x4103840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3601 0x4103844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3602 0x4103848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3603 0x410384C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3604 0x4103850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3605 0x4103854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3606 0x4103858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3607 0x410385C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3608 0x4103860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3609 0x4103864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3610 0x4103868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3611 0x410386C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3612 0x4103870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3613 0x4103874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3614 0x4103878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3615 0x410387C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3616 0x4103880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3617 0x4103884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3618 0x4103888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3619 0x410388C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3620 0x4103890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3621 0x4103894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3622 0x4103898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3623 0x410389C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3624 0x41038A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3625 0x41038A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3626 0x41038A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3627 0x41038AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3628 0x41038B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3629 0x41038B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3630 0x41038B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3631 0x41038BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3632 0x41038C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3633 0x41038C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3634 0x41038C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3635 0x41038CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3636 0x41038D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3637 0x41038D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3638 0x41038D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3639 0x41038DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3640 0x41038E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3641 0x41038E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3642 0x41038E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3643 0x41038EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3644 0x41038F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3645 0x41038F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3646 0x41038F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3647 0x41038FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3648 0x4103900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3649 0x4103904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3650 0x4103908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3651 0x410390C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3652 0x4103910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3653 0x4103914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3654 0x4103918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3655 0x410391C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3656 0x4103920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3657 0x4103924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3658 0x4103928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3659 0x410392C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3660 0x4103930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3661 0x4103934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3662 0x4103938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3663 0x410393C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3664 0x4103940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3665 0x4103944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3666 0x4103948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3667 0x410394C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3668 0x4103950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3669 0x4103954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3670 0x4103958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3671 0x410395C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3672 0x4103960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3673 0x4103964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3674 0x4103968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3675 0x410396C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3676 0x4103970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3677 0x4103974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3678 0x4103978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3679 0x410397C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3680 0x4103980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3681 0x4103984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3682 0x4103988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3683 0x410398C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3684 0x4103990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3685 0x4103994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3686 0x4103998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3687 0x410399C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3688 0x41039A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3689 0x41039A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3690 0x41039A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3691 0x41039AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3692 0x41039B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3693 0x41039B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3694 0x41039B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3695 0x41039BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3696 0x41039C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3697 0x41039C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3698 0x41039C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3699 0x41039CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3700 0x41039D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3701 0x41039D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3702 0x41039D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3703 0x41039DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3704 0x41039E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3705 0x41039E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3706 0x41039E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3707 0x41039EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3708 0x41039F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3709 0x41039F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3710 0x41039F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3711 0x41039FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3712 0x4103A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3713 0x4103A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3714 0x4103A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3715 0x4103A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3716 0x4103A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3717 0x4103A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3718 0x4103A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3719 0x4103A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3720 0x4103A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3721 0x4103A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3722 0x4103A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3723 0x4103A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3724 0x4103A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3725 0x4103A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3726 0x4103A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3727 0x4103A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3728 0x4103A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3729 0x4103A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3730 0x4103A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3731 0x4103A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3732 0x4103A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3733 0x4103A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3734 0x4103A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3735 0x4103A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3736 0x4103A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3737 0x4103A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3738 0x4103A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3739 0x4103A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3740 0x4103A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3741 0x4103A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3742 0x4103A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3743 0x4103A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3744 0x4103A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3745 0x4103A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3746 0x4103A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3747 0x4103A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3748 0x4103A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3749 0x4103A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3750 0x4103A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3751 0x4103A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3752 0x4103AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3753 0x4103AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3754 0x4103AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3755 0x4103AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3756 0x4103AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3757 0x4103AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3758 0x4103AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3759 0x4103ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3760 0x4103AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3761 0x4103AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3762 0x4103AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3763 0x4103ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3764 0x4103AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3765 0x4103AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3766 0x4103AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3767 0x4103ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3768 0x4103AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3769 0x4103AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3770 0x4103AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3771 0x4103AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3772 0x4103AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3773 0x4103AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3774 0x4103AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3775 0x4103AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3776 0x4103B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3777 0x4103B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3778 0x4103B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3779 0x4103B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3780 0x4103B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3781 0x4103B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3782 0x4103B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3783 0x4103B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3784 0x4103B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3785 0x4103B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3786 0x4103B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3787 0x4103B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3788 0x4103B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3789 0x4103B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3790 0x4103B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3791 0x4103B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3792 0x4103B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3793 0x4103B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3794 0x4103B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3795 0x4103B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3796 0x4103B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3797 0x4103B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3798 0x4103B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3799 0x4103B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3800 0x4103B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3801 0x4103B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3802 0x4103B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3803 0x4103B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3804 0x4103B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3805 0x4103B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3806 0x4103B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3807 0x4103B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3808 0x4103B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3809 0x4103B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3810 0x4103B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3811 0x4103B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3812 0x4103B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3813 0x4103B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3814 0x4103B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3815 0x4103B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3816 0x4103BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3817 0x4103BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3818 0x4103BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3819 0x4103BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3820 0x4103BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3821 0x4103BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3822 0x4103BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3823 0x4103BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3824 0x4103BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3825 0x4103BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3826 0x4103BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3827 0x4103BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3828 0x4103BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3829 0x4103BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3830 0x4103BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3831 0x4103BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3832 0x4103BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3833 0x4103BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3834 0x4103BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3835 0x4103BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3836 0x4103BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3837 0x4103BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3838 0x4103BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3839 0x4103BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3840 0x4103C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3841 0x4103C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3842 0x4103C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3843 0x4103C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3844 0x4103C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3845 0x4103C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3846 0x4103C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3847 0x4103C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3848 0x4103C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3849 0x4103C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3850 0x4103C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3851 0x4103C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3852 0x4103C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3853 0x4103C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3854 0x4103C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3855 0x4103C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3856 0x4103C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3857 0x4103C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3858 0x4103C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3859 0x4103C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3860 0x4103C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3861 0x4103C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3862 0x4103C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3863 0x4103C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3864 0x4103C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3865 0x4103C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3866 0x4103C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3867 0x4103C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3868 0x4103C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3869 0x4103C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3870 0x4103C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3871 0x4103C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3872 0x4103C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3873 0x4103C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3874 0x4103C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3875 0x4103C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3876 0x4103C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3877 0x4103C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3878 0x4103C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3879 0x4103C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3880 0x4103CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3881 0x4103CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3882 0x4103CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3883 0x4103CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3884 0x4103CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3885 0x4103CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3886 0x4103CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3887 0x4103CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3888 0x4103CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3889 0x4103CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3890 0x4103CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3891 0x4103CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3892 0x4103CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3893 0x4103CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3894 0x4103CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3895 0x4103CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3896 0x4103CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3897 0x4103CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3898 0x4103CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3899 0x4103CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3900 0x4103CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3901 0x4103CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3902 0x4103CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3903 0x4103CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3904 0x4103D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3905 0x4103D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3906 0x4103D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3907 0x4103D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3908 0x4103D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3909 0x4103D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3910 0x4103D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3911 0x4103D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3912 0x4103D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3913 0x4103D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3914 0x4103D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3915 0x4103D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3916 0x4103D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3917 0x4103D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3918 0x4103D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3919 0x4103D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3920 0x4103D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3921 0x4103D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3922 0x4103D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3923 0x4103D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3924 0x4103D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3925 0x4103D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3926 0x4103D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3927 0x4103D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3928 0x4103D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3929 0x4103D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3930 0x4103D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3931 0x4103D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3932 0x4103D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3933 0x4103D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3934 0x4103D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3935 0x4103D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3936 0x4103D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3937 0x4103D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3938 0x4103D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3939 0x4103D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3940 0x4103D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3941 0x4103D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3942 0x4103D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3943 0x4103D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3944 0x4103DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3945 0x4103DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3946 0x4103DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3947 0x4103DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3948 0x4103DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3949 0x4103DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3950 0x4103DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3951 0x4103DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3952 0x4103DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3953 0x4103DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3954 0x4103DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3955 0x4103DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3956 0x4103DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3957 0x4103DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3958 0x4103DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3959 0x4103DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3960 0x4103DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3961 0x4103DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3962 0x4103DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3963 0x4103DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3964 0x4103DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3965 0x4103DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3966 0x4103DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3967 0x4103DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3968 0x4103E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3969 0x4103E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3970 0x4103E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3971 0x4103E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3972 0x4103E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3973 0x4103E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3974 0x4103E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3975 0x4103E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3976 0x4103E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3977 0x4103E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3978 0x4103E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3979 0x4103E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3980 0x4103E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3981 0x4103E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3982 0x4103E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3983 0x4103E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3984 0x4103E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3985 0x4103E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3986 0x4103E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3987 0x4103E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3988 0x4103E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3989 0x4103E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3990 0x4103E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3991 0x4103E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3992 0x4103E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3993 0x4103E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3994 0x4103E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3995 0x4103E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3996 0x4103E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3997 0x4103E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3998 0x4103E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_3999 0x4103E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4000 0x4103E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4001 0x4103E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4002 0x4103E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4003 0x4103E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4004 0x4103E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4005 0x4103E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4006 0x4103E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4007 0x4103E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4008 0x4103EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4009 0x4103EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4010 0x4103EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4011 0x4103EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4012 0x4103EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4013 0x4103EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4014 0x4103EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4015 0x4103EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4016 0x4103EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4017 0x4103EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4018 0x4103EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4019 0x4103ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4020 0x4103ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4021 0x4103ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4022 0x4103ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4023 0x4103EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4024 0x4103EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4025 0x4103EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4026 0x4103EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4027 0x4103EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4028 0x4103EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4029 0x4103EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4030 0x4103EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4031 0x4103EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4032 0x4103F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4033 0x4103F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4034 0x4103F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4035 0x4103F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4036 0x4103F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4037 0x4103F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4038 0x4103F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4039 0x4103F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4040 0x4103F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4041 0x4103F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4042 0x4103F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4043 0x4103F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4044 0x4103F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4045 0x4103F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4046 0x4103F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4047 0x4103F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4048 0x4103F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4049 0x4103F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4050 0x4103F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4051 0x4103F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4052 0x4103F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4053 0x4103F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4054 0x4103F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4055 0x4103F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4056 0x4103F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4057 0x4103F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4058 0x4103F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4059 0x4103F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4060 0x4103F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4061 0x4103F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4062 0x4103F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4063 0x4103F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4064 0x4103F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4065 0x4103F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4066 0x4103F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4067 0x4103F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4068 0x4103F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4069 0x4103F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4070 0x4103F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4071 0x4103F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4072 0x4103FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4073 0x4103FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4074 0x4103FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4075 0x4103FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4076 0x4103FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4077 0x4103FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4078 0x4103FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4079 0x4103FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4080 0x4103FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4081 0x4103FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4082 0x4103FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4083 0x4103FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4084 0x4103FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4085 0x4103FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4086 0x4103FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4087 0x4103FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4088 0x4103FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4089 0x4103FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4090 0x4103FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4091 0x4103FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4092 0x4103FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4093 0x4103FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4094 0x4103FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4095 0x4103FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4096 0x4104000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4097 0x4104004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4098 0x4104008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4099 0x410400C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4100 0x4104010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4101 0x4104014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4102 0x4104018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4103 0x410401C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4104 0x4104020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4105 0x4104024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4106 0x4104028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4107 0x410402C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4108 0x4104030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4109 0x4104034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4110 0x4104038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4111 0x410403C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4112 0x4104040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4113 0x4104044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4114 0x4104048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4115 0x410404C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4116 0x4104050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4117 0x4104054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4118 0x4104058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4119 0x410405C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4120 0x4104060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4121 0x4104064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4122 0x4104068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4123 0x410406C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4124 0x4104070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4125 0x4104074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4126 0x4104078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4127 0x410407C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4128 0x4104080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4129 0x4104084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4130 0x4104088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4131 0x410408C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4132 0x4104090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4133 0x4104094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4134 0x4104098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4135 0x410409C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4136 0x41040A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4137 0x41040A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4138 0x41040A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4139 0x41040AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4140 0x41040B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4141 0x41040B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4142 0x41040B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4143 0x41040BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4144 0x41040C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4145 0x41040C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4146 0x41040C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4147 0x41040CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4148 0x41040D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4149 0x41040D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4150 0x41040D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4151 0x41040DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4152 0x41040E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4153 0x41040E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4154 0x41040E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4155 0x41040EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4156 0x41040F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4157 0x41040F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4158 0x41040F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4159 0x41040FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4160 0x4104100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4161 0x4104104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4162 0x4104108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4163 0x410410C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4164 0x4104110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4165 0x4104114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4166 0x4104118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4167 0x410411C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4168 0x4104120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4169 0x4104124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4170 0x4104128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4171 0x410412C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4172 0x4104130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4173 0x4104134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4174 0x4104138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4175 0x410413C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4176 0x4104140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4177 0x4104144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4178 0x4104148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4179 0x410414C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4180 0x4104150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4181 0x4104154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4182 0x4104158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4183 0x410415C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4184 0x4104160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4185 0x4104164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4186 0x4104168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4187 0x410416C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4188 0x4104170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4189 0x4104174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4190 0x4104178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4191 0x410417C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4192 0x4104180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4193 0x4104184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4194 0x4104188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4195 0x410418C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4196 0x4104190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4197 0x4104194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4198 0x4104198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4199 0x410419C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4200 0x41041A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4201 0x41041A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4202 0x41041A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4203 0x41041AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4204 0x41041B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4205 0x41041B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4206 0x41041B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4207 0x41041BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4208 0x41041C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4209 0x41041C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4210 0x41041C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4211 0x41041CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4212 0x41041D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4213 0x41041D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4214 0x41041D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4215 0x41041DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4216 0x41041E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4217 0x41041E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4218 0x41041E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4219 0x41041EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4220 0x41041F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4221 0x41041F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4222 0x41041F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4223 0x41041FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4224 0x4104200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4225 0x4104204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4226 0x4104208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4227 0x410420C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4228 0x4104210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4229 0x4104214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4230 0x4104218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4231 0x410421C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4232 0x4104220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4233 0x4104224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4234 0x4104228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4235 0x410422C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4236 0x4104230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4237 0x4104234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4238 0x4104238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4239 0x410423C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4240 0x4104240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4241 0x4104244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4242 0x4104248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4243 0x410424C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4244 0x4104250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4245 0x4104254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4246 0x4104258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4247 0x410425C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4248 0x4104260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4249 0x4104264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4250 0x4104268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4251 0x410426C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4252 0x4104270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4253 0x4104274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4254 0x4104278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4255 0x410427C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4256 0x4104280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4257 0x4104284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4258 0x4104288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4259 0x410428C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4260 0x4104290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4261 0x4104294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4262 0x4104298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4263 0x410429C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4264 0x41042A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4265 0x41042A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4266 0x41042A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4267 0x41042AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4268 0x41042B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4269 0x41042B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4270 0x41042B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4271 0x41042BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4272 0x41042C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4273 0x41042C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4274 0x41042C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4275 0x41042CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4276 0x41042D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4277 0x41042D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4278 0x41042D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4279 0x41042DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4280 0x41042E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4281 0x41042E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4282 0x41042E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4283 0x41042EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4284 0x41042F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4285 0x41042F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4286 0x41042F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4287 0x41042FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4288 0x4104300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4289 0x4104304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4290 0x4104308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4291 0x410430C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4292 0x4104310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4293 0x4104314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4294 0x4104318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4295 0x410431C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4296 0x4104320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4297 0x4104324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4298 0x4104328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4299 0x410432C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4300 0x4104330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4301 0x4104334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4302 0x4104338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4303 0x410433C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4304 0x4104340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4305 0x4104344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4306 0x4104348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4307 0x410434C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4308 0x4104350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4309 0x4104354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4310 0x4104358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4311 0x410435C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4312 0x4104360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4313 0x4104364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4314 0x4104368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4315 0x410436C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4316 0x4104370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4317 0x4104374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4318 0x4104378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4319 0x410437C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4320 0x4104380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4321 0x4104384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4322 0x4104388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4323 0x410438C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4324 0x4104390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4325 0x4104394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4326 0x4104398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4327 0x410439C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4328 0x41043A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4329 0x41043A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4330 0x41043A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4331 0x41043AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4332 0x41043B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4333 0x41043B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4334 0x41043B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4335 0x41043BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4336 0x41043C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4337 0x41043C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4338 0x41043C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4339 0x41043CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4340 0x41043D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4341 0x41043D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4342 0x41043D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4343 0x41043DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4344 0x41043E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4345 0x41043E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4346 0x41043E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4347 0x41043EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4348 0x41043F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4349 0x41043F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4350 0x41043F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4351 0x41043FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4352 0x4104400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4353 0x4104404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4354 0x4104408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4355 0x410440C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4356 0x4104410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4357 0x4104414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4358 0x4104418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4359 0x410441C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4360 0x4104420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4361 0x4104424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4362 0x4104428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4363 0x410442C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4364 0x4104430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4365 0x4104434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4366 0x4104438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4367 0x410443C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4368 0x4104440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4369 0x4104444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4370 0x4104448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4371 0x410444C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4372 0x4104450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4373 0x4104454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4374 0x4104458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4375 0x410445C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4376 0x4104460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4377 0x4104464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4378 0x4104468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4379 0x410446C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4380 0x4104470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4381 0x4104474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4382 0x4104478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4383 0x410447C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4384 0x4104480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4385 0x4104484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4386 0x4104488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4387 0x410448C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4388 0x4104490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4389 0x4104494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4390 0x4104498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4391 0x410449C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4392 0x41044A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4393 0x41044A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4394 0x41044A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4395 0x41044AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4396 0x41044B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4397 0x41044B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4398 0x41044B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4399 0x41044BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4400 0x41044C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4401 0x41044C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4402 0x41044C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4403 0x41044CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4404 0x41044D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4405 0x41044D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4406 0x41044D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4407 0x41044DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4408 0x41044E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4409 0x41044E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4410 0x41044E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4411 0x41044EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4412 0x41044F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4413 0x41044F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4414 0x41044F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4415 0x41044FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4416 0x4104500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4417 0x4104504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4418 0x4104508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4419 0x410450C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4420 0x4104510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4421 0x4104514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4422 0x4104518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4423 0x410451C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4424 0x4104520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4425 0x4104524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4426 0x4104528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4427 0x410452C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4428 0x4104530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4429 0x4104534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4430 0x4104538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4431 0x410453C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4432 0x4104540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4433 0x4104544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4434 0x4104548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4435 0x410454C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4436 0x4104550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4437 0x4104554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4438 0x4104558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4439 0x410455C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4440 0x4104560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4441 0x4104564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4442 0x4104568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4443 0x410456C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4444 0x4104570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4445 0x4104574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4446 0x4104578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4447 0x410457C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4448 0x4104580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4449 0x4104584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4450 0x4104588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4451 0x410458C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4452 0x4104590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4453 0x4104594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4454 0x4104598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4455 0x410459C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4456 0x41045A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4457 0x41045A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4458 0x41045A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4459 0x41045AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4460 0x41045B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4461 0x41045B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4462 0x41045B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4463 0x41045BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4464 0x41045C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4465 0x41045C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4466 0x41045C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4467 0x41045CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4468 0x41045D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4469 0x41045D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4470 0x41045D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4471 0x41045DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4472 0x41045E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4473 0x41045E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4474 0x41045E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4475 0x41045EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4476 0x41045F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4477 0x41045F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4478 0x41045F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4479 0x41045FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4480 0x4104600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4481 0x4104604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4482 0x4104608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4483 0x410460C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4484 0x4104610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4485 0x4104614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4486 0x4104618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4487 0x410461C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4488 0x4104620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4489 0x4104624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4490 0x4104628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4491 0x410462C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4492 0x4104630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4493 0x4104634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4494 0x4104638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4495 0x410463C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4496 0x4104640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4497 0x4104644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4498 0x4104648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4499 0x410464C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4500 0x4104650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4501 0x4104654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4502 0x4104658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4503 0x410465C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4504 0x4104660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4505 0x4104664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4506 0x4104668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4507 0x410466C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4508 0x4104670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4509 0x4104674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4510 0x4104678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4511 0x410467C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4512 0x4104680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4513 0x4104684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4514 0x4104688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4515 0x410468C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4516 0x4104690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4517 0x4104694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4518 0x4104698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4519 0x410469C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4520 0x41046A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4521 0x41046A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4522 0x41046A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4523 0x41046AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4524 0x41046B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4525 0x41046B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4526 0x41046B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4527 0x41046BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4528 0x41046C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4529 0x41046C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4530 0x41046C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4531 0x41046CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4532 0x41046D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4533 0x41046D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4534 0x41046D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4535 0x41046DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4536 0x41046E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4537 0x41046E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4538 0x41046E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4539 0x41046EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4540 0x41046F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4541 0x41046F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4542 0x41046F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4543 0x41046FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4544 0x4104700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4545 0x4104704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4546 0x4104708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4547 0x410470C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4548 0x4104710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4549 0x4104714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4550 0x4104718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4551 0x410471C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4552 0x4104720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4553 0x4104724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4554 0x4104728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4555 0x410472C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4556 0x4104730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4557 0x4104734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4558 0x4104738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4559 0x410473C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4560 0x4104740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4561 0x4104744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4562 0x4104748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4563 0x410474C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4564 0x4104750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4565 0x4104754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4566 0x4104758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4567 0x410475C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4568 0x4104760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4569 0x4104764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4570 0x4104768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4571 0x410476C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4572 0x4104770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4573 0x4104774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4574 0x4104778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4575 0x410477C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4576 0x4104780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4577 0x4104784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4578 0x4104788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4579 0x410478C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4580 0x4104790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4581 0x4104794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4582 0x4104798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4583 0x410479C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4584 0x41047A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4585 0x41047A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4586 0x41047A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4587 0x41047AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4588 0x41047B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4589 0x41047B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4590 0x41047B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4591 0x41047BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4592 0x41047C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4593 0x41047C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4594 0x41047C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4595 0x41047CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4596 0x41047D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4597 0x41047D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4598 0x41047D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4599 0x41047DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4600 0x41047E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4601 0x41047E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4602 0x41047E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4603 0x41047EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4604 0x41047F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4605 0x41047F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4606 0x41047F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4607 0x41047FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4608 0x4104800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4609 0x4104804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4610 0x4104808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4611 0x410480C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4612 0x4104810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4613 0x4104814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4614 0x4104818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4615 0x410481C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4616 0x4104820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4617 0x4104824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4618 0x4104828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4619 0x410482C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4620 0x4104830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4621 0x4104834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4622 0x4104838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4623 0x410483C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4624 0x4104840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4625 0x4104844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4626 0x4104848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4627 0x410484C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4628 0x4104850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4629 0x4104854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4630 0x4104858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4631 0x410485C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4632 0x4104860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4633 0x4104864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4634 0x4104868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4635 0x410486C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4636 0x4104870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4637 0x4104874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4638 0x4104878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4639 0x410487C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4640 0x4104880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4641 0x4104884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4642 0x4104888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4643 0x410488C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4644 0x4104890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4645 0x4104894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4646 0x4104898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4647 0x410489C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4648 0x41048A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4649 0x41048A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4650 0x41048A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4651 0x41048AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4652 0x41048B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4653 0x41048B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4654 0x41048B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4655 0x41048BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4656 0x41048C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4657 0x41048C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4658 0x41048C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4659 0x41048CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4660 0x41048D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4661 0x41048D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4662 0x41048D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4663 0x41048DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4664 0x41048E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4665 0x41048E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4666 0x41048E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4667 0x41048EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4668 0x41048F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4669 0x41048F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4670 0x41048F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4671 0x41048FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4672 0x4104900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4673 0x4104904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4674 0x4104908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4675 0x410490C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4676 0x4104910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4677 0x4104914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4678 0x4104918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4679 0x410491C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4680 0x4104920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4681 0x4104924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4682 0x4104928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4683 0x410492C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4684 0x4104930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4685 0x4104934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4686 0x4104938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4687 0x410493C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4688 0x4104940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4689 0x4104944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4690 0x4104948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4691 0x410494C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4692 0x4104950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4693 0x4104954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4694 0x4104958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4695 0x410495C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4696 0x4104960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4697 0x4104964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4698 0x4104968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4699 0x410496C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4700 0x4104970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4701 0x4104974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4702 0x4104978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4703 0x410497C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4704 0x4104980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4705 0x4104984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4706 0x4104988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4707 0x410498C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4708 0x4104990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4709 0x4104994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4710 0x4104998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4711 0x410499C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4712 0x41049A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4713 0x41049A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4714 0x41049A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4715 0x41049AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4716 0x41049B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4717 0x41049B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4718 0x41049B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4719 0x41049BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4720 0x41049C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4721 0x41049C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4722 0x41049C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4723 0x41049CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4724 0x41049D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4725 0x41049D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4726 0x41049D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4727 0x41049DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4728 0x41049E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4729 0x41049E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4730 0x41049E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4731 0x41049EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4732 0x41049F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4733 0x41049F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4734 0x41049F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4735 0x41049FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4736 0x4104A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4737 0x4104A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4738 0x4104A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4739 0x4104A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4740 0x4104A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4741 0x4104A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4742 0x4104A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4743 0x4104A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4744 0x4104A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4745 0x4104A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4746 0x4104A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4747 0x4104A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4748 0x4104A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4749 0x4104A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4750 0x4104A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4751 0x4104A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4752 0x4104A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4753 0x4104A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4754 0x4104A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4755 0x4104A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4756 0x4104A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4757 0x4104A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4758 0x4104A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4759 0x4104A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4760 0x4104A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4761 0x4104A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4762 0x4104A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4763 0x4104A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4764 0x4104A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4765 0x4104A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4766 0x4104A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4767 0x4104A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4768 0x4104A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4769 0x4104A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4770 0x4104A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4771 0x4104A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4772 0x4104A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4773 0x4104A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4774 0x4104A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4775 0x4104A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4776 0x4104AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4777 0x4104AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4778 0x4104AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4779 0x4104AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4780 0x4104AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4781 0x4104AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4782 0x4104AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4783 0x4104ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4784 0x4104AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4785 0x4104AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4786 0x4104AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4787 0x4104ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4788 0x4104AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4789 0x4104AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4790 0x4104AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4791 0x4104ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4792 0x4104AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4793 0x4104AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4794 0x4104AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4795 0x4104AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4796 0x4104AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4797 0x4104AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4798 0x4104AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4799 0x4104AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4800 0x4104B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4801 0x4104B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4802 0x4104B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4803 0x4104B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4804 0x4104B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4805 0x4104B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4806 0x4104B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4807 0x4104B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4808 0x4104B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4809 0x4104B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4810 0x4104B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4811 0x4104B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4812 0x4104B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4813 0x4104B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4814 0x4104B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4815 0x4104B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4816 0x4104B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4817 0x4104B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4818 0x4104B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4819 0x4104B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4820 0x4104B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4821 0x4104B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4822 0x4104B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4823 0x4104B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4824 0x4104B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4825 0x4104B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4826 0x4104B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4827 0x4104B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4828 0x4104B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4829 0x4104B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4830 0x4104B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4831 0x4104B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4832 0x4104B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4833 0x4104B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4834 0x4104B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4835 0x4104B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4836 0x4104B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4837 0x4104B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4838 0x4104B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4839 0x4104B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4840 0x4104BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4841 0x4104BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4842 0x4104BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4843 0x4104BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4844 0x4104BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4845 0x4104BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4846 0x4104BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4847 0x4104BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4848 0x4104BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4849 0x4104BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4850 0x4104BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4851 0x4104BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4852 0x4104BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4853 0x4104BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4854 0x4104BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4855 0x4104BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4856 0x4104BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4857 0x4104BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4858 0x4104BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4859 0x4104BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4860 0x4104BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4861 0x4104BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4862 0x4104BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4863 0x4104BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4864 0x4104C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4865 0x4104C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4866 0x4104C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4867 0x4104C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4868 0x4104C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4869 0x4104C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4870 0x4104C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4871 0x4104C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4872 0x4104C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4873 0x4104C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4874 0x4104C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4875 0x4104C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4876 0x4104C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4877 0x4104C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4878 0x4104C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4879 0x4104C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4880 0x4104C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4881 0x4104C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4882 0x4104C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4883 0x4104C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4884 0x4104C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4885 0x4104C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4886 0x4104C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4887 0x4104C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4888 0x4104C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4889 0x4104C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4890 0x4104C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4891 0x4104C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4892 0x4104C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4893 0x4104C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4894 0x4104C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4895 0x4104C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4896 0x4104C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4897 0x4104C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4898 0x4104C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4899 0x4104C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4900 0x4104C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4901 0x4104C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4902 0x4104C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4903 0x4104C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4904 0x4104CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4905 0x4104CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4906 0x4104CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4907 0x4104CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4908 0x4104CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4909 0x4104CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4910 0x4104CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4911 0x4104CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4912 0x4104CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4913 0x4104CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4914 0x4104CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4915 0x4104CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4916 0x4104CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4917 0x4104CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4918 0x4104CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4919 0x4104CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4920 0x4104CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4921 0x4104CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4922 0x4104CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4923 0x4104CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4924 0x4104CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4925 0x4104CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4926 0x4104CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4927 0x4104CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4928 0x4104D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4929 0x4104D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4930 0x4104D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4931 0x4104D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4932 0x4104D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4933 0x4104D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4934 0x4104D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4935 0x4104D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4936 0x4104D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4937 0x4104D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4938 0x4104D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4939 0x4104D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4940 0x4104D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4941 0x4104D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4942 0x4104D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4943 0x4104D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4944 0x4104D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4945 0x4104D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4946 0x4104D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4947 0x4104D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4948 0x4104D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4949 0x4104D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4950 0x4104D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4951 0x4104D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4952 0x4104D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4953 0x4104D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4954 0x4104D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4955 0x4104D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4956 0x4104D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4957 0x4104D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4958 0x4104D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4959 0x4104D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4960 0x4104D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4961 0x4104D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4962 0x4104D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4963 0x4104D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4964 0x4104D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4965 0x4104D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4966 0x4104D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4967 0x4104D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4968 0x4104DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4969 0x4104DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4970 0x4104DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4971 0x4104DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4972 0x4104DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4973 0x4104DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4974 0x4104DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4975 0x4104DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4976 0x4104DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4977 0x4104DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4978 0x4104DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4979 0x4104DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4980 0x4104DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4981 0x4104DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4982 0x4104DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4983 0x4104DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4984 0x4104DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4985 0x4104DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4986 0x4104DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4987 0x4104DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4988 0x4104DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4989 0x4104DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4990 0x4104DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4991 0x4104DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4992 0x4104E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4993 0x4104E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4994 0x4104E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4995 0x4104E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4996 0x4104E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4997 0x4104E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4998 0x4104E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_4999 0x4104E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5000 0x4104E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5001 0x4104E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5002 0x4104E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5003 0x4104E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5004 0x4104E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5005 0x4104E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5006 0x4104E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5007 0x4104E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5008 0x4104E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5009 0x4104E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5010 0x4104E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5011 0x4104E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5012 0x4104E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5013 0x4104E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5014 0x4104E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5015 0x4104E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5016 0x4104E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5017 0x4104E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5018 0x4104E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5019 0x4104E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5020 0x4104E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5021 0x4104E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5022 0x4104E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5023 0x4104E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5024 0x4104E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5025 0x4104E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5026 0x4104E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5027 0x4104E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5028 0x4104E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5029 0x4104E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5030 0x4104E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5031 0x4104E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5032 0x4104EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5033 0x4104EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5034 0x4104EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5035 0x4104EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5036 0x4104EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5037 0x4104EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5038 0x4104EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5039 0x4104EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5040 0x4104EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5041 0x4104EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5042 0x4104EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5043 0x4104ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5044 0x4104ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5045 0x4104ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5046 0x4104ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5047 0x4104EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5048 0x4104EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5049 0x4104EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5050 0x4104EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5051 0x4104EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5052 0x4104EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5053 0x4104EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5054 0x4104EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5055 0x4104EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5056 0x4104F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5057 0x4104F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5058 0x4104F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5059 0x4104F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5060 0x4104F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5061 0x4104F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5062 0x4104F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5063 0x4104F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5064 0x4104F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5065 0x4104F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5066 0x4104F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5067 0x4104F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5068 0x4104F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5069 0x4104F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5070 0x4104F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5071 0x4104F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5072 0x4104F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5073 0x4104F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5074 0x4104F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5075 0x4104F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5076 0x4104F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5077 0x4104F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5078 0x4104F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5079 0x4104F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5080 0x4104F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5081 0x4104F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5082 0x4104F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5083 0x4104F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5084 0x4104F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5085 0x4104F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5086 0x4104F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5087 0x4104F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5088 0x4104F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5089 0x4104F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5090 0x4104F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5091 0x4104F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5092 0x4104F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5093 0x4104F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5094 0x4104F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5095 0x4104F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5096 0x4104FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5097 0x4104FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5098 0x4104FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5099 0x4104FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5100 0x4104FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5101 0x4104FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5102 0x4104FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5103 0x4104FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5104 0x4104FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5105 0x4104FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5106 0x4104FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5107 0x4104FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5108 0x4104FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5109 0x4104FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5110 0x4104FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5111 0x4104FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5112 0x4104FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5113 0x4104FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5114 0x4104FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5115 0x4104FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5116 0x4104FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5117 0x4104FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5118 0x4104FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5119 0x4104FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5120 0x4105000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5121 0x4105004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5122 0x4105008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5123 0x410500C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5124 0x4105010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5125 0x4105014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5126 0x4105018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5127 0x410501C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5128 0x4105020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5129 0x4105024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5130 0x4105028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5131 0x410502C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5132 0x4105030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5133 0x4105034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5134 0x4105038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5135 0x410503C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5136 0x4105040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5137 0x4105044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5138 0x4105048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5139 0x410504C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5140 0x4105050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5141 0x4105054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5142 0x4105058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5143 0x410505C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5144 0x4105060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5145 0x4105064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5146 0x4105068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5147 0x410506C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5148 0x4105070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5149 0x4105074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5150 0x4105078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5151 0x410507C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5152 0x4105080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5153 0x4105084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5154 0x4105088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5155 0x410508C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5156 0x4105090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5157 0x4105094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5158 0x4105098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5159 0x410509C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5160 0x41050A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5161 0x41050A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5162 0x41050A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5163 0x41050AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5164 0x41050B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5165 0x41050B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5166 0x41050B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5167 0x41050BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5168 0x41050C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5169 0x41050C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5170 0x41050C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5171 0x41050CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5172 0x41050D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5173 0x41050D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5174 0x41050D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5175 0x41050DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5176 0x41050E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5177 0x41050E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5178 0x41050E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5179 0x41050EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5180 0x41050F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5181 0x41050F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5182 0x41050F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5183 0x41050FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5184 0x4105100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5185 0x4105104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5186 0x4105108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5187 0x410510C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5188 0x4105110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5189 0x4105114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5190 0x4105118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5191 0x410511C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5192 0x4105120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5193 0x4105124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5194 0x4105128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5195 0x410512C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5196 0x4105130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5197 0x4105134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5198 0x4105138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5199 0x410513C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5200 0x4105140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5201 0x4105144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5202 0x4105148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5203 0x410514C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5204 0x4105150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5205 0x4105154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5206 0x4105158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5207 0x410515C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5208 0x4105160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5209 0x4105164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5210 0x4105168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5211 0x410516C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5212 0x4105170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5213 0x4105174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5214 0x4105178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5215 0x410517C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5216 0x4105180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5217 0x4105184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5218 0x4105188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5219 0x410518C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5220 0x4105190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5221 0x4105194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5222 0x4105198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5223 0x410519C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5224 0x41051A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5225 0x41051A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5226 0x41051A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5227 0x41051AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5228 0x41051B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5229 0x41051B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5230 0x41051B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5231 0x41051BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5232 0x41051C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5233 0x41051C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5234 0x41051C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5235 0x41051CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5236 0x41051D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5237 0x41051D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5238 0x41051D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5239 0x41051DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5240 0x41051E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5241 0x41051E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5242 0x41051E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5243 0x41051EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5244 0x41051F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5245 0x41051F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5246 0x41051F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5247 0x41051FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5248 0x4105200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5249 0x4105204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5250 0x4105208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5251 0x410520C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5252 0x4105210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5253 0x4105214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5254 0x4105218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5255 0x410521C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5256 0x4105220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5257 0x4105224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5258 0x4105228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5259 0x410522C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5260 0x4105230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5261 0x4105234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5262 0x4105238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5263 0x410523C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5264 0x4105240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5265 0x4105244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5266 0x4105248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5267 0x410524C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5268 0x4105250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5269 0x4105254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5270 0x4105258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5271 0x410525C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5272 0x4105260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5273 0x4105264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5274 0x4105268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5275 0x410526C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5276 0x4105270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5277 0x4105274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5278 0x4105278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5279 0x410527C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5280 0x4105280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5281 0x4105284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5282 0x4105288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5283 0x410528C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5284 0x4105290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5285 0x4105294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5286 0x4105298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5287 0x410529C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5288 0x41052A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5289 0x41052A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5290 0x41052A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5291 0x41052AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5292 0x41052B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5293 0x41052B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5294 0x41052B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5295 0x41052BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5296 0x41052C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5297 0x41052C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5298 0x41052C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5299 0x41052CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5300 0x41052D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5301 0x41052D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5302 0x41052D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5303 0x41052DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5304 0x41052E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5305 0x41052E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5306 0x41052E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5307 0x41052EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5308 0x41052F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5309 0x41052F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5310 0x41052F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5311 0x41052FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5312 0x4105300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5313 0x4105304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5314 0x4105308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5315 0x410530C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5316 0x4105310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5317 0x4105314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5318 0x4105318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5319 0x410531C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5320 0x4105320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5321 0x4105324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5322 0x4105328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5323 0x410532C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5324 0x4105330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5325 0x4105334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5326 0x4105338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5327 0x410533C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5328 0x4105340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5329 0x4105344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5330 0x4105348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5331 0x410534C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5332 0x4105350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5333 0x4105354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5334 0x4105358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5335 0x410535C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5336 0x4105360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5337 0x4105364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5338 0x4105368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5339 0x410536C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5340 0x4105370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5341 0x4105374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5342 0x4105378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5343 0x410537C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5344 0x4105380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5345 0x4105384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5346 0x4105388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5347 0x410538C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5348 0x4105390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5349 0x4105394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5350 0x4105398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5351 0x410539C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5352 0x41053A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5353 0x41053A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5354 0x41053A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5355 0x41053AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5356 0x41053B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5357 0x41053B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5358 0x41053B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5359 0x41053BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5360 0x41053C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5361 0x41053C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5362 0x41053C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5363 0x41053CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5364 0x41053D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5365 0x41053D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5366 0x41053D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5367 0x41053DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5368 0x41053E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5369 0x41053E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5370 0x41053E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5371 0x41053EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5372 0x41053F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5373 0x41053F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5374 0x41053F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5375 0x41053FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5376 0x4105400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5377 0x4105404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5378 0x4105408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5379 0x410540C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5380 0x4105410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5381 0x4105414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5382 0x4105418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5383 0x410541C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5384 0x4105420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5385 0x4105424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5386 0x4105428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5387 0x410542C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5388 0x4105430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5389 0x4105434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5390 0x4105438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5391 0x410543C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5392 0x4105440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5393 0x4105444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5394 0x4105448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5395 0x410544C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5396 0x4105450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5397 0x4105454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5398 0x4105458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5399 0x410545C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5400 0x4105460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5401 0x4105464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5402 0x4105468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5403 0x410546C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5404 0x4105470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5405 0x4105474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5406 0x4105478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5407 0x410547C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5408 0x4105480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5409 0x4105484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5410 0x4105488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5411 0x410548C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5412 0x4105490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5413 0x4105494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5414 0x4105498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5415 0x410549C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5416 0x41054A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5417 0x41054A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5418 0x41054A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5419 0x41054AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5420 0x41054B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5421 0x41054B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5422 0x41054B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5423 0x41054BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5424 0x41054C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5425 0x41054C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5426 0x41054C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5427 0x41054CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5428 0x41054D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5429 0x41054D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5430 0x41054D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5431 0x41054DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5432 0x41054E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5433 0x41054E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5434 0x41054E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5435 0x41054EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5436 0x41054F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5437 0x41054F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5438 0x41054F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5439 0x41054FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5440 0x4105500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5441 0x4105504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5442 0x4105508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5443 0x410550C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5444 0x4105510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5445 0x4105514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5446 0x4105518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5447 0x410551C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5448 0x4105520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5449 0x4105524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5450 0x4105528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5451 0x410552C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5452 0x4105530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5453 0x4105534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5454 0x4105538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5455 0x410553C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5456 0x4105540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5457 0x4105544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5458 0x4105548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5459 0x410554C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5460 0x4105550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5461 0x4105554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5462 0x4105558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5463 0x410555C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5464 0x4105560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5465 0x4105564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5466 0x4105568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5467 0x410556C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5468 0x4105570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5469 0x4105574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5470 0x4105578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5471 0x410557C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5472 0x4105580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5473 0x4105584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5474 0x4105588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5475 0x410558C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5476 0x4105590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5477 0x4105594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5478 0x4105598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5479 0x410559C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5480 0x41055A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5481 0x41055A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5482 0x41055A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5483 0x41055AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5484 0x41055B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5485 0x41055B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5486 0x41055B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5487 0x41055BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5488 0x41055C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5489 0x41055C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5490 0x41055C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5491 0x41055CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5492 0x41055D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5493 0x41055D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5494 0x41055D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5495 0x41055DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5496 0x41055E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5497 0x41055E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5498 0x41055E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5499 0x41055EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5500 0x41055F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5501 0x41055F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5502 0x41055F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5503 0x41055FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5504 0x4105600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5505 0x4105604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5506 0x4105608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5507 0x410560C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5508 0x4105610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5509 0x4105614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5510 0x4105618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5511 0x410561C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5512 0x4105620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5513 0x4105624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5514 0x4105628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5515 0x410562C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5516 0x4105630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5517 0x4105634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5518 0x4105638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5519 0x410563C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5520 0x4105640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5521 0x4105644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5522 0x4105648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5523 0x410564C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5524 0x4105650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5525 0x4105654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5526 0x4105658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5527 0x410565C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5528 0x4105660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5529 0x4105664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5530 0x4105668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5531 0x410566C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5532 0x4105670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5533 0x4105674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5534 0x4105678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5535 0x410567C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5536 0x4105680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5537 0x4105684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5538 0x4105688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5539 0x410568C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5540 0x4105690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5541 0x4105694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5542 0x4105698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5543 0x410569C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5544 0x41056A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5545 0x41056A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5546 0x41056A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5547 0x41056AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5548 0x41056B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5549 0x41056B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5550 0x41056B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5551 0x41056BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5552 0x41056C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5553 0x41056C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5554 0x41056C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5555 0x41056CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5556 0x41056D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5557 0x41056D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5558 0x41056D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5559 0x41056DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5560 0x41056E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5561 0x41056E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5562 0x41056E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5563 0x41056EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5564 0x41056F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5565 0x41056F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5566 0x41056F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5567 0x41056FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5568 0x4105700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5569 0x4105704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5570 0x4105708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5571 0x410570C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5572 0x4105710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5573 0x4105714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5574 0x4105718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5575 0x410571C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5576 0x4105720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5577 0x4105724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5578 0x4105728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5579 0x410572C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5580 0x4105730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5581 0x4105734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5582 0x4105738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5583 0x410573C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5584 0x4105740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5585 0x4105744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5586 0x4105748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5587 0x410574C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5588 0x4105750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5589 0x4105754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5590 0x4105758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5591 0x410575C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5592 0x4105760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5593 0x4105764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5594 0x4105768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5595 0x410576C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5596 0x4105770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5597 0x4105774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5598 0x4105778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5599 0x410577C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5600 0x4105780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5601 0x4105784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5602 0x4105788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5603 0x410578C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5604 0x4105790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5605 0x4105794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5606 0x4105798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5607 0x410579C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5608 0x41057A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5609 0x41057A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5610 0x41057A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5611 0x41057AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5612 0x41057B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5613 0x41057B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5614 0x41057B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5615 0x41057BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5616 0x41057C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5617 0x41057C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5618 0x41057C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5619 0x41057CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5620 0x41057D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5621 0x41057D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5622 0x41057D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5623 0x41057DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5624 0x41057E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5625 0x41057E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5626 0x41057E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5627 0x41057EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5628 0x41057F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5629 0x41057F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5630 0x41057F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5631 0x41057FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5632 0x4105800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5633 0x4105804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5634 0x4105808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5635 0x410580C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5636 0x4105810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5637 0x4105814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5638 0x4105818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5639 0x410581C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5640 0x4105820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5641 0x4105824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5642 0x4105828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5643 0x410582C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5644 0x4105830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5645 0x4105834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5646 0x4105838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5647 0x410583C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5648 0x4105840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5649 0x4105844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5650 0x4105848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5651 0x410584C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5652 0x4105850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5653 0x4105854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5654 0x4105858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5655 0x410585C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5656 0x4105860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5657 0x4105864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5658 0x4105868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5659 0x410586C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5660 0x4105870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5661 0x4105874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5662 0x4105878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5663 0x410587C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5664 0x4105880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5665 0x4105884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5666 0x4105888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5667 0x410588C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5668 0x4105890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5669 0x4105894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5670 0x4105898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5671 0x410589C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5672 0x41058A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5673 0x41058A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5674 0x41058A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5675 0x41058AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5676 0x41058B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5677 0x41058B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5678 0x41058B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5679 0x41058BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5680 0x41058C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5681 0x41058C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5682 0x41058C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5683 0x41058CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5684 0x41058D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5685 0x41058D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5686 0x41058D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5687 0x41058DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5688 0x41058E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5689 0x41058E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5690 0x41058E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5691 0x41058EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5692 0x41058F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5693 0x41058F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5694 0x41058F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5695 0x41058FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5696 0x4105900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5697 0x4105904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5698 0x4105908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5699 0x410590C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5700 0x4105910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5701 0x4105914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5702 0x4105918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5703 0x410591C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5704 0x4105920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5705 0x4105924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5706 0x4105928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5707 0x410592C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5708 0x4105930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5709 0x4105934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5710 0x4105938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5711 0x410593C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5712 0x4105940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5713 0x4105944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5714 0x4105948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5715 0x410594C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5716 0x4105950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5717 0x4105954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5718 0x4105958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5719 0x410595C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5720 0x4105960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5721 0x4105964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5722 0x4105968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5723 0x410596C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5724 0x4105970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5725 0x4105974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5726 0x4105978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5727 0x410597C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5728 0x4105980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5729 0x4105984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5730 0x4105988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5731 0x410598C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5732 0x4105990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5733 0x4105994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5734 0x4105998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5735 0x410599C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5736 0x41059A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5737 0x41059A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5738 0x41059A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5739 0x41059AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5740 0x41059B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5741 0x41059B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5742 0x41059B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5743 0x41059BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5744 0x41059C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5745 0x41059C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5746 0x41059C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5747 0x41059CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5748 0x41059D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5749 0x41059D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5750 0x41059D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5751 0x41059DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5752 0x41059E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5753 0x41059E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5754 0x41059E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5755 0x41059EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5756 0x41059F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5757 0x41059F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5758 0x41059F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5759 0x41059FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5760 0x4105A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5761 0x4105A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5762 0x4105A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5763 0x4105A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5764 0x4105A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5765 0x4105A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5766 0x4105A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5767 0x4105A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5768 0x4105A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5769 0x4105A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5770 0x4105A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5771 0x4105A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5772 0x4105A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5773 0x4105A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5774 0x4105A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5775 0x4105A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5776 0x4105A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5777 0x4105A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5778 0x4105A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5779 0x4105A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5780 0x4105A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5781 0x4105A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5782 0x4105A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5783 0x4105A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5784 0x4105A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5785 0x4105A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5786 0x4105A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5787 0x4105A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5788 0x4105A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5789 0x4105A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5790 0x4105A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5791 0x4105A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5792 0x4105A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5793 0x4105A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5794 0x4105A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5795 0x4105A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5796 0x4105A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5797 0x4105A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5798 0x4105A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5799 0x4105A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5800 0x4105AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5801 0x4105AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5802 0x4105AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5803 0x4105AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5804 0x4105AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5805 0x4105AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5806 0x4105AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5807 0x4105ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5808 0x4105AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5809 0x4105AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5810 0x4105AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5811 0x4105ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5812 0x4105AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5813 0x4105AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5814 0x4105AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5815 0x4105ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5816 0x4105AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5817 0x4105AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5818 0x4105AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5819 0x4105AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5820 0x4105AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5821 0x4105AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5822 0x4105AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5823 0x4105AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5824 0x4105B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5825 0x4105B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5826 0x4105B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5827 0x4105B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5828 0x4105B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5829 0x4105B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5830 0x4105B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5831 0x4105B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5832 0x4105B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5833 0x4105B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5834 0x4105B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5835 0x4105B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5836 0x4105B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5837 0x4105B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5838 0x4105B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5839 0x4105B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5840 0x4105B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5841 0x4105B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5842 0x4105B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5843 0x4105B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5844 0x4105B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5845 0x4105B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5846 0x4105B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5847 0x4105B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5848 0x4105B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5849 0x4105B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5850 0x4105B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5851 0x4105B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5852 0x4105B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5853 0x4105B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5854 0x4105B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5855 0x4105B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5856 0x4105B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5857 0x4105B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5858 0x4105B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5859 0x4105B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5860 0x4105B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5861 0x4105B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5862 0x4105B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5863 0x4105B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5864 0x4105BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5865 0x4105BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5866 0x4105BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5867 0x4105BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5868 0x4105BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5869 0x4105BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5870 0x4105BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5871 0x4105BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5872 0x4105BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5873 0x4105BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5874 0x4105BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5875 0x4105BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5876 0x4105BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5877 0x4105BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5878 0x4105BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5879 0x4105BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5880 0x4105BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5881 0x4105BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5882 0x4105BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5883 0x4105BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5884 0x4105BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5885 0x4105BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5886 0x4105BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5887 0x4105BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5888 0x4105C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5889 0x4105C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5890 0x4105C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5891 0x4105C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5892 0x4105C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5893 0x4105C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5894 0x4105C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5895 0x4105C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5896 0x4105C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5897 0x4105C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5898 0x4105C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5899 0x4105C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5900 0x4105C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5901 0x4105C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5902 0x4105C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5903 0x4105C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5904 0x4105C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5905 0x4105C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5906 0x4105C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5907 0x4105C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5908 0x4105C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5909 0x4105C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5910 0x4105C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5911 0x4105C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5912 0x4105C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5913 0x4105C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5914 0x4105C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5915 0x4105C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5916 0x4105C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5917 0x4105C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5918 0x4105C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5919 0x4105C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5920 0x4105C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5921 0x4105C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5922 0x4105C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5923 0x4105C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5924 0x4105C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5925 0x4105C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5926 0x4105C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5927 0x4105C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5928 0x4105CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5929 0x4105CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5930 0x4105CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5931 0x4105CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5932 0x4105CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5933 0x4105CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5934 0x4105CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5935 0x4105CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5936 0x4105CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5937 0x4105CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5938 0x4105CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5939 0x4105CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5940 0x4105CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5941 0x4105CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5942 0x4105CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5943 0x4105CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5944 0x4105CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5945 0x4105CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5946 0x4105CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5947 0x4105CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5948 0x4105CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5949 0x4105CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5950 0x4105CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5951 0x4105CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5952 0x4105D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5953 0x4105D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5954 0x4105D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5955 0x4105D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5956 0x4105D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5957 0x4105D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5958 0x4105D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5959 0x4105D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5960 0x4105D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5961 0x4105D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5962 0x4105D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5963 0x4105D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5964 0x4105D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5965 0x4105D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5966 0x4105D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5967 0x4105D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5968 0x4105D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5969 0x4105D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5970 0x4105D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5971 0x4105D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5972 0x4105D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5973 0x4105D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5974 0x4105D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5975 0x4105D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5976 0x4105D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5977 0x4105D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5978 0x4105D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5979 0x4105D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5980 0x4105D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5981 0x4105D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5982 0x4105D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5983 0x4105D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5984 0x4105D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5985 0x4105D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5986 0x4105D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5987 0x4105D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5988 0x4105D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5989 0x4105D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5990 0x4105D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5991 0x4105D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5992 0x4105DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5993 0x4105DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5994 0x4105DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5995 0x4105DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5996 0x4105DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5997 0x4105DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5998 0x4105DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_5999 0x4105DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6000 0x4105DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6001 0x4105DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6002 0x4105DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6003 0x4105DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6004 0x4105DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6005 0x4105DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6006 0x4105DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6007 0x4105DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6008 0x4105DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6009 0x4105DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6010 0x4105DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6011 0x4105DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6012 0x4105DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6013 0x4105DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6014 0x4105DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6015 0x4105DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6016 0x4105E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6017 0x4105E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6018 0x4105E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6019 0x4105E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6020 0x4105E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6021 0x4105E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6022 0x4105E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6023 0x4105E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6024 0x4105E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6025 0x4105E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6026 0x4105E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6027 0x4105E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6028 0x4105E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6029 0x4105E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6030 0x4105E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6031 0x4105E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6032 0x4105E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6033 0x4105E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6034 0x4105E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6035 0x4105E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6036 0x4105E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6037 0x4105E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6038 0x4105E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6039 0x4105E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6040 0x4105E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6041 0x4105E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6042 0x4105E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6043 0x4105E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6044 0x4105E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6045 0x4105E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6046 0x4105E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6047 0x4105E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6048 0x4105E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6049 0x4105E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6050 0x4105E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6051 0x4105E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6052 0x4105E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6053 0x4105E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6054 0x4105E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6055 0x4105E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6056 0x4105EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6057 0x4105EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6058 0x4105EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6059 0x4105EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6060 0x4105EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6061 0x4105EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6062 0x4105EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6063 0x4105EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6064 0x4105EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6065 0x4105EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6066 0x4105EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6067 0x4105ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6068 0x4105ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6069 0x4105ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6070 0x4105ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6071 0x4105EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6072 0x4105EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6073 0x4105EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6074 0x4105EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6075 0x4105EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6076 0x4105EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6077 0x4105EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6078 0x4105EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6079 0x4105EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6080 0x4105F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6081 0x4105F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6082 0x4105F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6083 0x4105F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6084 0x4105F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6085 0x4105F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6086 0x4105F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6087 0x4105F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6088 0x4105F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6089 0x4105F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6090 0x4105F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6091 0x4105F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6092 0x4105F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6093 0x4105F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6094 0x4105F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6095 0x4105F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6096 0x4105F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6097 0x4105F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6098 0x4105F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6099 0x4105F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6100 0x4105F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6101 0x4105F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6102 0x4105F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6103 0x4105F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6104 0x4105F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6105 0x4105F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6106 0x4105F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6107 0x4105F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6108 0x4105F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6109 0x4105F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6110 0x4105F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6111 0x4105F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6112 0x4105F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6113 0x4105F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6114 0x4105F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6115 0x4105F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6116 0x4105F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6117 0x4105F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6118 0x4105F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6119 0x4105F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6120 0x4105FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6121 0x4105FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6122 0x4105FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6123 0x4105FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6124 0x4105FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6125 0x4105FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6126 0x4105FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6127 0x4105FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6128 0x4105FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6129 0x4105FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6130 0x4105FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6131 0x4105FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6132 0x4105FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6133 0x4105FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6134 0x4105FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6135 0x4105FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6136 0x4105FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6137 0x4105FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6138 0x4105FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6139 0x4105FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6140 0x4105FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6141 0x4105FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6142 0x4105FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6143 0x4105FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6144 0x4106000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6145 0x4106004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6146 0x4106008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6147 0x410600C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6148 0x4106010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6149 0x4106014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6150 0x4106018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6151 0x410601C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6152 0x4106020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6153 0x4106024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6154 0x4106028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6155 0x410602C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6156 0x4106030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6157 0x4106034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6158 0x4106038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6159 0x410603C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6160 0x4106040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6161 0x4106044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6162 0x4106048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6163 0x410604C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6164 0x4106050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6165 0x4106054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6166 0x4106058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6167 0x410605C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6168 0x4106060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6169 0x4106064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6170 0x4106068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6171 0x410606C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6172 0x4106070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6173 0x4106074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6174 0x4106078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6175 0x410607C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6176 0x4106080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6177 0x4106084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6178 0x4106088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6179 0x410608C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6180 0x4106090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6181 0x4106094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6182 0x4106098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6183 0x410609C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6184 0x41060A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6185 0x41060A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6186 0x41060A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6187 0x41060AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6188 0x41060B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6189 0x41060B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6190 0x41060B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6191 0x41060BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6192 0x41060C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6193 0x41060C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6194 0x41060C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6195 0x41060CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6196 0x41060D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6197 0x41060D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6198 0x41060D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6199 0x41060DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6200 0x41060E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6201 0x41060E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6202 0x41060E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6203 0x41060EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6204 0x41060F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6205 0x41060F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6206 0x41060F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6207 0x41060FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6208 0x4106100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6209 0x4106104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6210 0x4106108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6211 0x410610C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6212 0x4106110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6213 0x4106114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6214 0x4106118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6215 0x410611C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6216 0x4106120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6217 0x4106124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6218 0x4106128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6219 0x410612C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6220 0x4106130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6221 0x4106134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6222 0x4106138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6223 0x410613C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6224 0x4106140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6225 0x4106144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6226 0x4106148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6227 0x410614C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6228 0x4106150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6229 0x4106154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6230 0x4106158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6231 0x410615C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6232 0x4106160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6233 0x4106164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6234 0x4106168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6235 0x410616C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6236 0x4106170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6237 0x4106174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6238 0x4106178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6239 0x410617C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6240 0x4106180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6241 0x4106184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6242 0x4106188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6243 0x410618C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6244 0x4106190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6245 0x4106194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6246 0x4106198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6247 0x410619C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6248 0x41061A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6249 0x41061A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6250 0x41061A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6251 0x41061AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6252 0x41061B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6253 0x41061B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6254 0x41061B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6255 0x41061BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6256 0x41061C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6257 0x41061C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6258 0x41061C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6259 0x41061CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6260 0x41061D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6261 0x41061D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6262 0x41061D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6263 0x41061DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6264 0x41061E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6265 0x41061E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6266 0x41061E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6267 0x41061EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6268 0x41061F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6269 0x41061F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6270 0x41061F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6271 0x41061FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6272 0x4106200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6273 0x4106204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6274 0x4106208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6275 0x410620C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6276 0x4106210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6277 0x4106214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6278 0x4106218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6279 0x410621C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6280 0x4106220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6281 0x4106224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6282 0x4106228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6283 0x410622C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6284 0x4106230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6285 0x4106234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6286 0x4106238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6287 0x410623C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6288 0x4106240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6289 0x4106244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6290 0x4106248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6291 0x410624C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6292 0x4106250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6293 0x4106254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6294 0x4106258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6295 0x410625C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6296 0x4106260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6297 0x4106264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6298 0x4106268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6299 0x410626C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6300 0x4106270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6301 0x4106274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6302 0x4106278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6303 0x410627C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6304 0x4106280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6305 0x4106284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6306 0x4106288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6307 0x410628C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6308 0x4106290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6309 0x4106294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6310 0x4106298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6311 0x410629C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6312 0x41062A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6313 0x41062A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6314 0x41062A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6315 0x41062AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6316 0x41062B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6317 0x41062B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6318 0x41062B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6319 0x41062BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6320 0x41062C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6321 0x41062C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6322 0x41062C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6323 0x41062CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6324 0x41062D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6325 0x41062D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6326 0x41062D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6327 0x41062DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6328 0x41062E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6329 0x41062E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6330 0x41062E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6331 0x41062EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6332 0x41062F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6333 0x41062F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6334 0x41062F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6335 0x41062FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6336 0x4106300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6337 0x4106304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6338 0x4106308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6339 0x410630C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6340 0x4106310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6341 0x4106314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6342 0x4106318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6343 0x410631C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6344 0x4106320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6345 0x4106324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6346 0x4106328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6347 0x410632C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6348 0x4106330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6349 0x4106334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6350 0x4106338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6351 0x410633C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6352 0x4106340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6353 0x4106344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6354 0x4106348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6355 0x410634C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6356 0x4106350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6357 0x4106354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6358 0x4106358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6359 0x410635C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6360 0x4106360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6361 0x4106364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6362 0x4106368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6363 0x410636C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6364 0x4106370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6365 0x4106374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6366 0x4106378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6367 0x410637C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6368 0x4106380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6369 0x4106384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6370 0x4106388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6371 0x410638C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6372 0x4106390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6373 0x4106394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6374 0x4106398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6375 0x410639C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6376 0x41063A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6377 0x41063A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6378 0x41063A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6379 0x41063AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6380 0x41063B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6381 0x41063B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6382 0x41063B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6383 0x41063BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6384 0x41063C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6385 0x41063C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6386 0x41063C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6387 0x41063CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6388 0x41063D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6389 0x41063D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6390 0x41063D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6391 0x41063DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6392 0x41063E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6393 0x41063E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6394 0x41063E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6395 0x41063EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6396 0x41063F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6397 0x41063F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6398 0x41063F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6399 0x41063FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6400 0x4106400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6401 0x4106404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6402 0x4106408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6403 0x410640C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6404 0x4106410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6405 0x4106414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6406 0x4106418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6407 0x410641C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6408 0x4106420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6409 0x4106424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6410 0x4106428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6411 0x410642C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6412 0x4106430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6413 0x4106434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6414 0x4106438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6415 0x410643C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6416 0x4106440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6417 0x4106444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6418 0x4106448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6419 0x410644C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6420 0x4106450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6421 0x4106454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6422 0x4106458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6423 0x410645C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6424 0x4106460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6425 0x4106464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6426 0x4106468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6427 0x410646C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6428 0x4106470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6429 0x4106474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6430 0x4106478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6431 0x410647C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6432 0x4106480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6433 0x4106484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6434 0x4106488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6435 0x410648C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6436 0x4106490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6437 0x4106494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6438 0x4106498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6439 0x410649C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6440 0x41064A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6441 0x41064A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6442 0x41064A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6443 0x41064AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6444 0x41064B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6445 0x41064B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6446 0x41064B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6447 0x41064BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6448 0x41064C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6449 0x41064C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6450 0x41064C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6451 0x41064CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6452 0x41064D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6453 0x41064D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6454 0x41064D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6455 0x41064DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6456 0x41064E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6457 0x41064E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6458 0x41064E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6459 0x41064EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6460 0x41064F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6461 0x41064F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6462 0x41064F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6463 0x41064FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6464 0x4106500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6465 0x4106504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6466 0x4106508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6467 0x410650C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6468 0x4106510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6469 0x4106514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6470 0x4106518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6471 0x410651C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6472 0x4106520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6473 0x4106524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6474 0x4106528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6475 0x410652C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6476 0x4106530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6477 0x4106534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6478 0x4106538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6479 0x410653C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6480 0x4106540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6481 0x4106544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6482 0x4106548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6483 0x410654C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6484 0x4106550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6485 0x4106554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6486 0x4106558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6487 0x410655C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6488 0x4106560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6489 0x4106564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6490 0x4106568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6491 0x410656C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6492 0x4106570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6493 0x4106574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6494 0x4106578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6495 0x410657C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6496 0x4106580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6497 0x4106584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6498 0x4106588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6499 0x410658C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6500 0x4106590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6501 0x4106594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6502 0x4106598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6503 0x410659C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6504 0x41065A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6505 0x41065A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6506 0x41065A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6507 0x41065AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6508 0x41065B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6509 0x41065B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6510 0x41065B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6511 0x41065BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6512 0x41065C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6513 0x41065C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6514 0x41065C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6515 0x41065CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6516 0x41065D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6517 0x41065D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6518 0x41065D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6519 0x41065DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6520 0x41065E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6521 0x41065E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6522 0x41065E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6523 0x41065EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6524 0x41065F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6525 0x41065F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6526 0x41065F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6527 0x41065FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6528 0x4106600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6529 0x4106604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6530 0x4106608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6531 0x410660C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6532 0x4106610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6533 0x4106614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6534 0x4106618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6535 0x410661C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6536 0x4106620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6537 0x4106624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6538 0x4106628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6539 0x410662C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6540 0x4106630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6541 0x4106634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6542 0x4106638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6543 0x410663C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6544 0x4106640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6545 0x4106644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6546 0x4106648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6547 0x410664C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6548 0x4106650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6549 0x4106654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6550 0x4106658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6551 0x410665C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6552 0x4106660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6553 0x4106664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6554 0x4106668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6555 0x410666C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6556 0x4106670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6557 0x4106674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6558 0x4106678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6559 0x410667C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6560 0x4106680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6561 0x4106684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6562 0x4106688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6563 0x410668C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6564 0x4106690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6565 0x4106694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6566 0x4106698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6567 0x410669C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6568 0x41066A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6569 0x41066A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6570 0x41066A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6571 0x41066AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6572 0x41066B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6573 0x41066B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6574 0x41066B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6575 0x41066BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6576 0x41066C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6577 0x41066C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6578 0x41066C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6579 0x41066CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6580 0x41066D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6581 0x41066D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6582 0x41066D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6583 0x41066DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6584 0x41066E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6585 0x41066E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6586 0x41066E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6587 0x41066EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6588 0x41066F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6589 0x41066F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6590 0x41066F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6591 0x41066FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6592 0x4106700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6593 0x4106704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6594 0x4106708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6595 0x410670C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6596 0x4106710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6597 0x4106714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6598 0x4106718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6599 0x410671C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6600 0x4106720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6601 0x4106724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6602 0x4106728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6603 0x410672C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6604 0x4106730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6605 0x4106734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6606 0x4106738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6607 0x410673C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6608 0x4106740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6609 0x4106744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6610 0x4106748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6611 0x410674C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6612 0x4106750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6613 0x4106754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6614 0x4106758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6615 0x410675C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6616 0x4106760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6617 0x4106764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6618 0x4106768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6619 0x410676C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6620 0x4106770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6621 0x4106774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6622 0x4106778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6623 0x410677C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6624 0x4106780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6625 0x4106784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6626 0x4106788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6627 0x410678C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6628 0x4106790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6629 0x4106794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6630 0x4106798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6631 0x410679C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6632 0x41067A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6633 0x41067A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6634 0x41067A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6635 0x41067AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6636 0x41067B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6637 0x41067B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6638 0x41067B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6639 0x41067BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6640 0x41067C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6641 0x41067C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6642 0x41067C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6643 0x41067CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6644 0x41067D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6645 0x41067D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6646 0x41067D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6647 0x41067DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6648 0x41067E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6649 0x41067E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6650 0x41067E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6651 0x41067EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6652 0x41067F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6653 0x41067F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6654 0x41067F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6655 0x41067FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6656 0x4106800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6657 0x4106804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6658 0x4106808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6659 0x410680C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6660 0x4106810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6661 0x4106814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6662 0x4106818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6663 0x410681C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6664 0x4106820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6665 0x4106824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6666 0x4106828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6667 0x410682C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6668 0x4106830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6669 0x4106834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6670 0x4106838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6671 0x410683C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6672 0x4106840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6673 0x4106844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6674 0x4106848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6675 0x410684C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6676 0x4106850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6677 0x4106854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6678 0x4106858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6679 0x410685C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6680 0x4106860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6681 0x4106864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6682 0x4106868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6683 0x410686C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6684 0x4106870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6685 0x4106874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6686 0x4106878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6687 0x410687C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6688 0x4106880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6689 0x4106884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6690 0x4106888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6691 0x410688C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6692 0x4106890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6693 0x4106894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6694 0x4106898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6695 0x410689C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6696 0x41068A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6697 0x41068A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6698 0x41068A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6699 0x41068AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6700 0x41068B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6701 0x41068B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6702 0x41068B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6703 0x41068BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6704 0x41068C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6705 0x41068C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6706 0x41068C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6707 0x41068CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6708 0x41068D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6709 0x41068D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6710 0x41068D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6711 0x41068DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6712 0x41068E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6713 0x41068E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6714 0x41068E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6715 0x41068EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6716 0x41068F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6717 0x41068F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6718 0x41068F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6719 0x41068FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6720 0x4106900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6721 0x4106904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6722 0x4106908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6723 0x410690C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6724 0x4106910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6725 0x4106914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6726 0x4106918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6727 0x410691C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6728 0x4106920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6729 0x4106924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6730 0x4106928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6731 0x410692C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6732 0x4106930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6733 0x4106934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6734 0x4106938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6735 0x410693C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6736 0x4106940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6737 0x4106944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6738 0x4106948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6739 0x410694C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6740 0x4106950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6741 0x4106954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6742 0x4106958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6743 0x410695C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6744 0x4106960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6745 0x4106964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6746 0x4106968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6747 0x410696C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6748 0x4106970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6749 0x4106974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6750 0x4106978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6751 0x410697C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6752 0x4106980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6753 0x4106984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6754 0x4106988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6755 0x410698C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6756 0x4106990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6757 0x4106994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6758 0x4106998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6759 0x410699C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6760 0x41069A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6761 0x41069A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6762 0x41069A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6763 0x41069AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6764 0x41069B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6765 0x41069B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6766 0x41069B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6767 0x41069BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6768 0x41069C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6769 0x41069C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6770 0x41069C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6771 0x41069CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6772 0x41069D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6773 0x41069D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6774 0x41069D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6775 0x41069DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6776 0x41069E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6777 0x41069E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6778 0x41069E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6779 0x41069EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6780 0x41069F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6781 0x41069F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6782 0x41069F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6783 0x41069FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6784 0x4106A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6785 0x4106A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6786 0x4106A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6787 0x4106A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6788 0x4106A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6789 0x4106A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6790 0x4106A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6791 0x4106A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6792 0x4106A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6793 0x4106A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6794 0x4106A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6795 0x4106A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6796 0x4106A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6797 0x4106A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6798 0x4106A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6799 0x4106A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6800 0x4106A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6801 0x4106A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6802 0x4106A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6803 0x4106A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6804 0x4106A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6805 0x4106A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6806 0x4106A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6807 0x4106A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6808 0x4106A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6809 0x4106A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6810 0x4106A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6811 0x4106A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6812 0x4106A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6813 0x4106A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6814 0x4106A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6815 0x4106A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6816 0x4106A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6817 0x4106A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6818 0x4106A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6819 0x4106A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6820 0x4106A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6821 0x4106A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6822 0x4106A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6823 0x4106A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6824 0x4106AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6825 0x4106AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6826 0x4106AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6827 0x4106AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6828 0x4106AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6829 0x4106AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6830 0x4106AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6831 0x4106ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6832 0x4106AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6833 0x4106AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6834 0x4106AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6835 0x4106ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6836 0x4106AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6837 0x4106AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6838 0x4106AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6839 0x4106ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6840 0x4106AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6841 0x4106AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6842 0x4106AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6843 0x4106AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6844 0x4106AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6845 0x4106AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6846 0x4106AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6847 0x4106AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6848 0x4106B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6849 0x4106B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6850 0x4106B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6851 0x4106B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6852 0x4106B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6853 0x4106B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6854 0x4106B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6855 0x4106B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6856 0x4106B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6857 0x4106B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6858 0x4106B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6859 0x4106B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6860 0x4106B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6861 0x4106B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6862 0x4106B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6863 0x4106B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6864 0x4106B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6865 0x4106B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6866 0x4106B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6867 0x4106B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6868 0x4106B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6869 0x4106B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6870 0x4106B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6871 0x4106B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6872 0x4106B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6873 0x4106B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6874 0x4106B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6875 0x4106B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6876 0x4106B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6877 0x4106B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6878 0x4106B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6879 0x4106B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6880 0x4106B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6881 0x4106B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6882 0x4106B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6883 0x4106B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6884 0x4106B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6885 0x4106B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6886 0x4106B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6887 0x4106B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6888 0x4106BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6889 0x4106BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6890 0x4106BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6891 0x4106BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6892 0x4106BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6893 0x4106BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6894 0x4106BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6895 0x4106BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6896 0x4106BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6897 0x4106BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6898 0x4106BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6899 0x4106BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6900 0x4106BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6901 0x4106BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6902 0x4106BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6903 0x4106BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6904 0x4106BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6905 0x4106BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6906 0x4106BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6907 0x4106BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6908 0x4106BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6909 0x4106BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6910 0x4106BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6911 0x4106BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6912 0x4106C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6913 0x4106C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6914 0x4106C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6915 0x4106C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6916 0x4106C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6917 0x4106C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6918 0x4106C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6919 0x4106C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6920 0x4106C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6921 0x4106C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6922 0x4106C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6923 0x4106C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6924 0x4106C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6925 0x4106C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6926 0x4106C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6927 0x4106C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6928 0x4106C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6929 0x4106C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6930 0x4106C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6931 0x4106C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6932 0x4106C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6933 0x4106C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6934 0x4106C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6935 0x4106C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6936 0x4106C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6937 0x4106C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6938 0x4106C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6939 0x4106C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6940 0x4106C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6941 0x4106C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6942 0x4106C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6943 0x4106C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6944 0x4106C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6945 0x4106C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6946 0x4106C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6947 0x4106C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6948 0x4106C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6949 0x4106C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6950 0x4106C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6951 0x4106C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6952 0x4106CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6953 0x4106CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6954 0x4106CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6955 0x4106CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6956 0x4106CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6957 0x4106CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6958 0x4106CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6959 0x4106CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6960 0x4106CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6961 0x4106CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6962 0x4106CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6963 0x4106CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6964 0x4106CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6965 0x4106CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6966 0x4106CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6967 0x4106CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6968 0x4106CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6969 0x4106CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6970 0x4106CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6971 0x4106CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6972 0x4106CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6973 0x4106CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6974 0x4106CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6975 0x4106CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6976 0x4106D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6977 0x4106D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6978 0x4106D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6979 0x4106D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6980 0x4106D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6981 0x4106D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6982 0x4106D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6983 0x4106D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6984 0x4106D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6985 0x4106D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6986 0x4106D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6987 0x4106D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6988 0x4106D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6989 0x4106D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6990 0x4106D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6991 0x4106D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6992 0x4106D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6993 0x4106D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6994 0x4106D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6995 0x4106D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6996 0x4106D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6997 0x4106D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6998 0x4106D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_6999 0x4106D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7000 0x4106D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7001 0x4106D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7002 0x4106D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7003 0x4106D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7004 0x4106D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7005 0x4106D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7006 0x4106D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7007 0x4106D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7008 0x4106D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7009 0x4106D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7010 0x4106D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7011 0x4106D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7012 0x4106D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7013 0x4106D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7014 0x4106D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7015 0x4106D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7016 0x4106DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7017 0x4106DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7018 0x4106DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7019 0x4106DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7020 0x4106DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7021 0x4106DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7022 0x4106DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7023 0x4106DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7024 0x4106DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7025 0x4106DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7026 0x4106DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7027 0x4106DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7028 0x4106DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7029 0x4106DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7030 0x4106DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7031 0x4106DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7032 0x4106DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7033 0x4106DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7034 0x4106DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7035 0x4106DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7036 0x4106DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7037 0x4106DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7038 0x4106DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7039 0x4106DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7040 0x4106E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7041 0x4106E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7042 0x4106E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7043 0x4106E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7044 0x4106E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7045 0x4106E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7046 0x4106E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7047 0x4106E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7048 0x4106E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7049 0x4106E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7050 0x4106E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7051 0x4106E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7052 0x4106E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7053 0x4106E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7054 0x4106E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7055 0x4106E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7056 0x4106E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7057 0x4106E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7058 0x4106E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7059 0x4106E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7060 0x4106E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7061 0x4106E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7062 0x4106E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7063 0x4106E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7064 0x4106E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7065 0x4106E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7066 0x4106E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7067 0x4106E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7068 0x4106E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7069 0x4106E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7070 0x4106E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7071 0x4106E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7072 0x4106E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7073 0x4106E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7074 0x4106E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7075 0x4106E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7076 0x4106E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7077 0x4106E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7078 0x4106E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7079 0x4106E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7080 0x4106EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7081 0x4106EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7082 0x4106EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7083 0x4106EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7084 0x4106EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7085 0x4106EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7086 0x4106EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7087 0x4106EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7088 0x4106EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7089 0x4106EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7090 0x4106EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7091 0x4106ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7092 0x4106ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7093 0x4106ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7094 0x4106ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7095 0x4106EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7096 0x4106EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7097 0x4106EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7098 0x4106EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7099 0x4106EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7100 0x4106EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7101 0x4106EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7102 0x4106EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7103 0x4106EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7104 0x4106F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7105 0x4106F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7106 0x4106F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7107 0x4106F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7108 0x4106F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7109 0x4106F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7110 0x4106F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7111 0x4106F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7112 0x4106F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7113 0x4106F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7114 0x4106F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7115 0x4106F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7116 0x4106F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7117 0x4106F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7118 0x4106F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7119 0x4106F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7120 0x4106F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7121 0x4106F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7122 0x4106F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7123 0x4106F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7124 0x4106F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7125 0x4106F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7126 0x4106F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7127 0x4106F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7128 0x4106F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7129 0x4106F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7130 0x4106F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7131 0x4106F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7132 0x4106F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7133 0x4106F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7134 0x4106F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7135 0x4106F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7136 0x4106F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7137 0x4106F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7138 0x4106F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7139 0x4106F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7140 0x4106F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7141 0x4106F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7142 0x4106F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7143 0x4106F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7144 0x4106FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7145 0x4106FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7146 0x4106FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7147 0x4106FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7148 0x4106FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7149 0x4106FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7150 0x4106FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7151 0x4106FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7152 0x4106FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7153 0x4106FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7154 0x4106FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7155 0x4106FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7156 0x4106FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7157 0x4106FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7158 0x4106FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7159 0x4106FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7160 0x4106FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7161 0x4106FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7162 0x4106FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7163 0x4106FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7164 0x4106FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7165 0x4106FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7166 0x4106FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7167 0x4106FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7168 0x4107000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7169 0x4107004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7170 0x4107008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7171 0x410700C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7172 0x4107010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7173 0x4107014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7174 0x4107018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7175 0x410701C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7176 0x4107020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7177 0x4107024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7178 0x4107028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7179 0x410702C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7180 0x4107030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7181 0x4107034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7182 0x4107038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7183 0x410703C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7184 0x4107040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7185 0x4107044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7186 0x4107048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7187 0x410704C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7188 0x4107050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7189 0x4107054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7190 0x4107058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7191 0x410705C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7192 0x4107060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7193 0x4107064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7194 0x4107068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7195 0x410706C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7196 0x4107070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7197 0x4107074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7198 0x4107078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7199 0x410707C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7200 0x4107080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7201 0x4107084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7202 0x4107088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7203 0x410708C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7204 0x4107090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7205 0x4107094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7206 0x4107098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7207 0x410709C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7208 0x41070A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7209 0x41070A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7210 0x41070A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7211 0x41070AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7212 0x41070B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7213 0x41070B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7214 0x41070B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7215 0x41070BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7216 0x41070C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7217 0x41070C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7218 0x41070C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7219 0x41070CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7220 0x41070D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7221 0x41070D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7222 0x41070D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7223 0x41070DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7224 0x41070E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7225 0x41070E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7226 0x41070E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7227 0x41070EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7228 0x41070F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7229 0x41070F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7230 0x41070F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7231 0x41070FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7232 0x4107100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7233 0x4107104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7234 0x4107108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7235 0x410710C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7236 0x4107110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7237 0x4107114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7238 0x4107118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7239 0x410711C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7240 0x4107120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7241 0x4107124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7242 0x4107128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7243 0x410712C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7244 0x4107130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7245 0x4107134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7246 0x4107138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7247 0x410713C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7248 0x4107140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7249 0x4107144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7250 0x4107148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7251 0x410714C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7252 0x4107150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7253 0x4107154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7254 0x4107158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7255 0x410715C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7256 0x4107160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7257 0x4107164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7258 0x4107168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7259 0x410716C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7260 0x4107170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7261 0x4107174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7262 0x4107178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7263 0x410717C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7264 0x4107180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7265 0x4107184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7266 0x4107188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7267 0x410718C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7268 0x4107190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7269 0x4107194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7270 0x4107198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7271 0x410719C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7272 0x41071A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7273 0x41071A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7274 0x41071A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7275 0x41071AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7276 0x41071B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7277 0x41071B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7278 0x41071B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7279 0x41071BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7280 0x41071C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7281 0x41071C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7282 0x41071C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7283 0x41071CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7284 0x41071D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7285 0x41071D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7286 0x41071D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7287 0x41071DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7288 0x41071E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7289 0x41071E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7290 0x41071E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7291 0x41071EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7292 0x41071F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7293 0x41071F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7294 0x41071F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7295 0x41071FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7296 0x4107200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7297 0x4107204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7298 0x4107208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7299 0x410720C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7300 0x4107210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7301 0x4107214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7302 0x4107218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7303 0x410721C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7304 0x4107220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7305 0x4107224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7306 0x4107228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7307 0x410722C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7308 0x4107230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7309 0x4107234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7310 0x4107238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7311 0x410723C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7312 0x4107240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7313 0x4107244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7314 0x4107248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7315 0x410724C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7316 0x4107250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7317 0x4107254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7318 0x4107258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7319 0x410725C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7320 0x4107260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7321 0x4107264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7322 0x4107268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7323 0x410726C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7324 0x4107270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7325 0x4107274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7326 0x4107278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7327 0x410727C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7328 0x4107280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7329 0x4107284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7330 0x4107288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7331 0x410728C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7332 0x4107290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7333 0x4107294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7334 0x4107298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7335 0x410729C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7336 0x41072A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7337 0x41072A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7338 0x41072A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7339 0x41072AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7340 0x41072B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7341 0x41072B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7342 0x41072B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7343 0x41072BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7344 0x41072C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7345 0x41072C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7346 0x41072C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7347 0x41072CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7348 0x41072D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7349 0x41072D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7350 0x41072D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7351 0x41072DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7352 0x41072E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7353 0x41072E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7354 0x41072E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7355 0x41072EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7356 0x41072F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7357 0x41072F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7358 0x41072F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7359 0x41072FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7360 0x4107300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7361 0x4107304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7362 0x4107308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7363 0x410730C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7364 0x4107310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7365 0x4107314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7366 0x4107318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7367 0x410731C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7368 0x4107320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7369 0x4107324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7370 0x4107328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7371 0x410732C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7372 0x4107330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7373 0x4107334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7374 0x4107338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7375 0x410733C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7376 0x4107340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7377 0x4107344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7378 0x4107348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7379 0x410734C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7380 0x4107350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7381 0x4107354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7382 0x4107358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7383 0x410735C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7384 0x4107360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7385 0x4107364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7386 0x4107368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7387 0x410736C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7388 0x4107370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7389 0x4107374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7390 0x4107378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7391 0x410737C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7392 0x4107380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7393 0x4107384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7394 0x4107388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7395 0x410738C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7396 0x4107390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7397 0x4107394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7398 0x4107398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7399 0x410739C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7400 0x41073A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7401 0x41073A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7402 0x41073A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7403 0x41073AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7404 0x41073B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7405 0x41073B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7406 0x41073B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7407 0x41073BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7408 0x41073C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7409 0x41073C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7410 0x41073C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7411 0x41073CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7412 0x41073D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7413 0x41073D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7414 0x41073D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7415 0x41073DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7416 0x41073E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7417 0x41073E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7418 0x41073E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7419 0x41073EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7420 0x41073F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7421 0x41073F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7422 0x41073F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7423 0x41073FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7424 0x4107400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7425 0x4107404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7426 0x4107408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7427 0x410740C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7428 0x4107410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7429 0x4107414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7430 0x4107418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7431 0x410741C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7432 0x4107420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7433 0x4107424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7434 0x4107428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7435 0x410742C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7436 0x4107430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7437 0x4107434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7438 0x4107438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7439 0x410743C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7440 0x4107440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7441 0x4107444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7442 0x4107448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7443 0x410744C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7444 0x4107450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7445 0x4107454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7446 0x4107458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7447 0x410745C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7448 0x4107460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7449 0x4107464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7450 0x4107468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7451 0x410746C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7452 0x4107470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7453 0x4107474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7454 0x4107478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7455 0x410747C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7456 0x4107480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7457 0x4107484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7458 0x4107488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7459 0x410748C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7460 0x4107490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7461 0x4107494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7462 0x4107498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7463 0x410749C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7464 0x41074A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7465 0x41074A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7466 0x41074A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7467 0x41074AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7468 0x41074B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7469 0x41074B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7470 0x41074B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7471 0x41074BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7472 0x41074C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7473 0x41074C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7474 0x41074C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7475 0x41074CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7476 0x41074D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7477 0x41074D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7478 0x41074D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7479 0x41074DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7480 0x41074E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7481 0x41074E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7482 0x41074E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7483 0x41074EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7484 0x41074F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7485 0x41074F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7486 0x41074F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7487 0x41074FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7488 0x4107500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7489 0x4107504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7490 0x4107508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7491 0x410750C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7492 0x4107510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7493 0x4107514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7494 0x4107518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7495 0x410751C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7496 0x4107520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7497 0x4107524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7498 0x4107528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7499 0x410752C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7500 0x4107530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7501 0x4107534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7502 0x4107538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7503 0x410753C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7504 0x4107540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7505 0x4107544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7506 0x4107548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7507 0x410754C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7508 0x4107550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7509 0x4107554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7510 0x4107558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7511 0x410755C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7512 0x4107560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7513 0x4107564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7514 0x4107568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7515 0x410756C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7516 0x4107570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7517 0x4107574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7518 0x4107578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7519 0x410757C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7520 0x4107580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7521 0x4107584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7522 0x4107588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7523 0x410758C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7524 0x4107590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7525 0x4107594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7526 0x4107598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7527 0x410759C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7528 0x41075A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7529 0x41075A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7530 0x41075A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7531 0x41075AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7532 0x41075B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7533 0x41075B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7534 0x41075B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7535 0x41075BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7536 0x41075C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7537 0x41075C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7538 0x41075C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7539 0x41075CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7540 0x41075D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7541 0x41075D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7542 0x41075D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7543 0x41075DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7544 0x41075E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7545 0x41075E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7546 0x41075E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7547 0x41075EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7548 0x41075F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7549 0x41075F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7550 0x41075F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7551 0x41075FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7552 0x4107600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7553 0x4107604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7554 0x4107608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7555 0x410760C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7556 0x4107610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7557 0x4107614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7558 0x4107618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7559 0x410761C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7560 0x4107620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7561 0x4107624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7562 0x4107628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7563 0x410762C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7564 0x4107630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7565 0x4107634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7566 0x4107638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7567 0x410763C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7568 0x4107640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7569 0x4107644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7570 0x4107648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7571 0x410764C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7572 0x4107650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7573 0x4107654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7574 0x4107658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7575 0x410765C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7576 0x4107660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7577 0x4107664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7578 0x4107668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7579 0x410766C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7580 0x4107670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7581 0x4107674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7582 0x4107678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7583 0x410767C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7584 0x4107680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7585 0x4107684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7586 0x4107688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7587 0x410768C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7588 0x4107690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7589 0x4107694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7590 0x4107698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7591 0x410769C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7592 0x41076A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7593 0x41076A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7594 0x41076A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7595 0x41076AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7596 0x41076B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7597 0x41076B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7598 0x41076B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7599 0x41076BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7600 0x41076C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7601 0x41076C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7602 0x41076C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7603 0x41076CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7604 0x41076D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7605 0x41076D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7606 0x41076D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7607 0x41076DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7608 0x41076E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7609 0x41076E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7610 0x41076E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7611 0x41076EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7612 0x41076F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7613 0x41076F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7614 0x41076F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7615 0x41076FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7616 0x4107700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7617 0x4107704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7618 0x4107708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7619 0x410770C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7620 0x4107710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7621 0x4107714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7622 0x4107718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7623 0x410771C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7624 0x4107720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7625 0x4107724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7626 0x4107728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7627 0x410772C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7628 0x4107730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7629 0x4107734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7630 0x4107738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7631 0x410773C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7632 0x4107740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7633 0x4107744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7634 0x4107748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7635 0x410774C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7636 0x4107750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7637 0x4107754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7638 0x4107758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7639 0x410775C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7640 0x4107760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7641 0x4107764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7642 0x4107768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7643 0x410776C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7644 0x4107770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7645 0x4107774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7646 0x4107778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7647 0x410777C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7648 0x4107780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7649 0x4107784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7650 0x4107788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7651 0x410778C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7652 0x4107790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7653 0x4107794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7654 0x4107798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7655 0x410779C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7656 0x41077A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7657 0x41077A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7658 0x41077A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7659 0x41077AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7660 0x41077B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7661 0x41077B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7662 0x41077B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7663 0x41077BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7664 0x41077C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7665 0x41077C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7666 0x41077C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7667 0x41077CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7668 0x41077D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7669 0x41077D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7670 0x41077D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7671 0x41077DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7672 0x41077E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7673 0x41077E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7674 0x41077E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7675 0x41077EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7676 0x41077F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7677 0x41077F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7678 0x41077F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7679 0x41077FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7680 0x4107800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7681 0x4107804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7682 0x4107808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7683 0x410780C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7684 0x4107810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7685 0x4107814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7686 0x4107818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7687 0x410781C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7688 0x4107820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7689 0x4107824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7690 0x4107828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7691 0x410782C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7692 0x4107830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7693 0x4107834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7694 0x4107838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7695 0x410783C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7696 0x4107840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7697 0x4107844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7698 0x4107848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7699 0x410784C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7700 0x4107850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7701 0x4107854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7702 0x4107858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7703 0x410785C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7704 0x4107860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7705 0x4107864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7706 0x4107868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7707 0x410786C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7708 0x4107870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7709 0x4107874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7710 0x4107878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7711 0x410787C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7712 0x4107880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7713 0x4107884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7714 0x4107888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7715 0x410788C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7716 0x4107890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7717 0x4107894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7718 0x4107898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7719 0x410789C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7720 0x41078A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7721 0x41078A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7722 0x41078A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7723 0x41078AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7724 0x41078B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7725 0x41078B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7726 0x41078B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7727 0x41078BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7728 0x41078C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7729 0x41078C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7730 0x41078C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7731 0x41078CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7732 0x41078D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7733 0x41078D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7734 0x41078D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7735 0x41078DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7736 0x41078E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7737 0x41078E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7738 0x41078E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7739 0x41078EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7740 0x41078F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7741 0x41078F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7742 0x41078F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7743 0x41078FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7744 0x4107900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7745 0x4107904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7746 0x4107908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7747 0x410790C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7748 0x4107910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7749 0x4107914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7750 0x4107918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7751 0x410791C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7752 0x4107920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7753 0x4107924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7754 0x4107928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7755 0x410792C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7756 0x4107930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7757 0x4107934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7758 0x4107938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7759 0x410793C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7760 0x4107940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7761 0x4107944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7762 0x4107948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7763 0x410794C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7764 0x4107950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7765 0x4107954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7766 0x4107958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7767 0x410795C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7768 0x4107960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7769 0x4107964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7770 0x4107968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7771 0x410796C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7772 0x4107970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7773 0x4107974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7774 0x4107978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7775 0x410797C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7776 0x4107980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7777 0x4107984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7778 0x4107988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7779 0x410798C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7780 0x4107990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7781 0x4107994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7782 0x4107998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7783 0x410799C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7784 0x41079A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7785 0x41079A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7786 0x41079A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7787 0x41079AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7788 0x41079B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7789 0x41079B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7790 0x41079B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7791 0x41079BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7792 0x41079C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7793 0x41079C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7794 0x41079C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7795 0x41079CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7796 0x41079D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7797 0x41079D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7798 0x41079D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7799 0x41079DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7800 0x41079E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7801 0x41079E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7802 0x41079E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7803 0x41079EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7804 0x41079F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7805 0x41079F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7806 0x41079F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7807 0x41079FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7808 0x4107A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7809 0x4107A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7810 0x4107A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7811 0x4107A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7812 0x4107A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7813 0x4107A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7814 0x4107A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7815 0x4107A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7816 0x4107A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7817 0x4107A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7818 0x4107A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7819 0x4107A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7820 0x4107A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7821 0x4107A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7822 0x4107A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7823 0x4107A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7824 0x4107A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7825 0x4107A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7826 0x4107A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7827 0x4107A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7828 0x4107A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7829 0x4107A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7830 0x4107A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7831 0x4107A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7832 0x4107A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7833 0x4107A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7834 0x4107A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7835 0x4107A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7836 0x4107A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7837 0x4107A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7838 0x4107A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7839 0x4107A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7840 0x4107A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7841 0x4107A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7842 0x4107A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7843 0x4107A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7844 0x4107A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7845 0x4107A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7846 0x4107A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7847 0x4107A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7848 0x4107AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7849 0x4107AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7850 0x4107AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7851 0x4107AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7852 0x4107AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7853 0x4107AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7854 0x4107AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7855 0x4107ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7856 0x4107AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7857 0x4107AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7858 0x4107AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7859 0x4107ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7860 0x4107AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7861 0x4107AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7862 0x4107AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7863 0x4107ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7864 0x4107AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7865 0x4107AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7866 0x4107AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7867 0x4107AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7868 0x4107AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7869 0x4107AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7870 0x4107AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7871 0x4107AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7872 0x4107B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7873 0x4107B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7874 0x4107B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7875 0x4107B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7876 0x4107B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7877 0x4107B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7878 0x4107B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7879 0x4107B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7880 0x4107B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7881 0x4107B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7882 0x4107B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7883 0x4107B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7884 0x4107B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7885 0x4107B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7886 0x4107B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7887 0x4107B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7888 0x4107B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7889 0x4107B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7890 0x4107B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7891 0x4107B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7892 0x4107B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7893 0x4107B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7894 0x4107B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7895 0x4107B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7896 0x4107B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7897 0x4107B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7898 0x4107B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7899 0x4107B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7900 0x4107B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7901 0x4107B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7902 0x4107B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7903 0x4107B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7904 0x4107B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7905 0x4107B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7906 0x4107B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7907 0x4107B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7908 0x4107B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7909 0x4107B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7910 0x4107B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7911 0x4107B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7912 0x4107BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7913 0x4107BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7914 0x4107BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7915 0x4107BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7916 0x4107BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7917 0x4107BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7918 0x4107BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7919 0x4107BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7920 0x4107BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7921 0x4107BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7922 0x4107BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7923 0x4107BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7924 0x4107BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7925 0x4107BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7926 0x4107BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7927 0x4107BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7928 0x4107BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7929 0x4107BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7930 0x4107BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7931 0x4107BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7932 0x4107BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7933 0x4107BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7934 0x4107BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7935 0x4107BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7936 0x4107C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7937 0x4107C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7938 0x4107C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7939 0x4107C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7940 0x4107C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7941 0x4107C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7942 0x4107C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7943 0x4107C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7944 0x4107C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7945 0x4107C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7946 0x4107C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7947 0x4107C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7948 0x4107C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7949 0x4107C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7950 0x4107C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7951 0x4107C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7952 0x4107C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7953 0x4107C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7954 0x4107C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7955 0x4107C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7956 0x4107C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7957 0x4107C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7958 0x4107C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7959 0x4107C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7960 0x4107C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7961 0x4107C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7962 0x4107C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7963 0x4107C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7964 0x4107C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7965 0x4107C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7966 0x4107C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7967 0x4107C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7968 0x4107C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7969 0x4107C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7970 0x4107C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7971 0x4107C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7972 0x4107C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7973 0x4107C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7974 0x4107C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7975 0x4107C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7976 0x4107CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7977 0x4107CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7978 0x4107CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7979 0x4107CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7980 0x4107CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7981 0x4107CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7982 0x4107CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7983 0x4107CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7984 0x4107CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7985 0x4107CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7986 0x4107CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7987 0x4107CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7988 0x4107CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7989 0x4107CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7990 0x4107CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7991 0x4107CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7992 0x4107CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7993 0x4107CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7994 0x4107CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7995 0x4107CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7996 0x4107CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7997 0x4107CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7998 0x4107CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_7999 0x4107CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8000 0x4107D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8001 0x4107D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8002 0x4107D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8003 0x4107D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8004 0x4107D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8005 0x4107D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8006 0x4107D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8007 0x4107D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8008 0x4107D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8009 0x4107D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8010 0x4107D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8011 0x4107D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8012 0x4107D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8013 0x4107D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8014 0x4107D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8015 0x4107D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8016 0x4107D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8017 0x4107D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8018 0x4107D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8019 0x4107D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8020 0x4107D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8021 0x4107D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8022 0x4107D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8023 0x4107D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8024 0x4107D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8025 0x4107D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8026 0x4107D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8027 0x4107D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8028 0x4107D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8029 0x4107D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8030 0x4107D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8031 0x4107D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8032 0x4107D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8033 0x4107D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8034 0x4107D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8035 0x4107D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8036 0x4107D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8037 0x4107D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8038 0x4107D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8039 0x4107D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8040 0x4107DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8041 0x4107DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8042 0x4107DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8043 0x4107DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8044 0x4107DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8045 0x4107DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8046 0x4107DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8047 0x4107DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8048 0x4107DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8049 0x4107DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8050 0x4107DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8051 0x4107DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8052 0x4107DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8053 0x4107DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8054 0x4107DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8055 0x4107DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8056 0x4107DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8057 0x4107DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8058 0x4107DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8059 0x4107DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8060 0x4107DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8061 0x4107DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8062 0x4107DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8063 0x4107DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8064 0x4107E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8065 0x4107E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8066 0x4107E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8067 0x4107E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8068 0x4107E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8069 0x4107E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8070 0x4107E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8071 0x4107E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8072 0x4107E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8073 0x4107E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8074 0x4107E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8075 0x4107E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8076 0x4107E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8077 0x4107E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8078 0x4107E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8079 0x4107E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8080 0x4107E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8081 0x4107E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8082 0x4107E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8083 0x4107E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8084 0x4107E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8085 0x4107E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8086 0x4107E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8087 0x4107E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8088 0x4107E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8089 0x4107E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8090 0x4107E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8091 0x4107E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8092 0x4107E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8093 0x4107E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8094 0x4107E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8095 0x4107E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8096 0x4107E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8097 0x4107E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8098 0x4107E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8099 0x4107E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8100 0x4107E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8101 0x4107E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8102 0x4107E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8103 0x4107E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8104 0x4107EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8105 0x4107EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8106 0x4107EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8107 0x4107EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8108 0x4107EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8109 0x4107EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8110 0x4107EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8111 0x4107EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8112 0x4107EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8113 0x4107EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8114 0x4107EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8115 0x4107ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8116 0x4107ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8117 0x4107ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8118 0x4107ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8119 0x4107EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8120 0x4107EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8121 0x4107EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8122 0x4107EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8123 0x4107EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8124 0x4107EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8125 0x4107EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8126 0x4107EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8127 0x4107EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8128 0x4107F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8129 0x4107F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8130 0x4107F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8131 0x4107F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8132 0x4107F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8133 0x4107F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8134 0x4107F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8135 0x4107F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8136 0x4107F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8137 0x4107F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8138 0x4107F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8139 0x4107F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8140 0x4107F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8141 0x4107F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8142 0x4107F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8143 0x4107F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8144 0x4107F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8145 0x4107F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8146 0x4107F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8147 0x4107F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8148 0x4107F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8149 0x4107F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8150 0x4107F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8151 0x4107F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8152 0x4107F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8153 0x4107F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8154 0x4107F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8155 0x4107F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8156 0x4107F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8157 0x4107F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8158 0x4107F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8159 0x4107F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8160 0x4107F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8161 0x4107F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8162 0x4107F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8163 0x4107F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8164 0x4107F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8165 0x4107F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8166 0x4107F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8167 0x4107F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8168 0x4107FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8169 0x4107FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8170 0x4107FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8171 0x4107FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8172 0x4107FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8173 0x4107FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8174 0x4107FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8175 0x4107FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8176 0x4107FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8177 0x4107FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8178 0x4107FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8179 0x4107FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8180 0x4107FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8181 0x4107FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8182 0x4107FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8183 0x4107FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8184 0x4107FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8185 0x4107FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8186 0x4107FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8187 0x4107FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8188 0x4107FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8189 0x4107FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8190 0x4107FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8191 0x4107FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x4108000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1 0x4108004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2 0x4108008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_3 0x410800C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_4 0x4108010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_5 0x4108014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_6 0x4108018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_7 0x410801C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_8 0x4108020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_9 0x4108024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_10 0x4108028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_11 0x410802C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_12 0x4108030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_13 0x4108034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_14 0x4108038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_15 0x410803C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_16 0x4108040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_17 0x4108044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_18 0x4108048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_19 0x410804C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_20 0x4108050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_21 0x4108054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_22 0x4108058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_23 0x410805C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_24 0x4108060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_25 0x4108064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_26 0x4108068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_27 0x410806C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_28 0x4108070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_29 0x4108074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_30 0x4108078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_31 0x410807C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_32 0x4108080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_33 0x4108084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_34 0x4108088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_35 0x410808C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_36 0x4108090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_37 0x4108094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_38 0x4108098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_39 0x410809C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_40 0x41080A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_41 0x41080A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_42 0x41080A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_43 0x41080AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_44 0x41080B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_45 0x41080B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_46 0x41080B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_47 0x41080BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_48 0x41080C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_49 0x41080C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_50 0x41080C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_51 0x41080CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_52 0x41080D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_53 0x41080D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_54 0x41080D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_55 0x41080DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_56 0x41080E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_57 0x41080E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_58 0x41080E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_59 0x41080EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_60 0x41080F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_61 0x41080F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_62 0x41080F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_63 0x41080FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_64 0x4108100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_65 0x4108104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_66 0x4108108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_67 0x410810C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_68 0x4108110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_69 0x4108114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_70 0x4108118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_71 0x410811C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_72 0x4108120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_73 0x4108124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_74 0x4108128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_75 0x410812C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_76 0x4108130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_77 0x4108134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_78 0x4108138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_79 0x410813C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_80 0x4108140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_81 0x4108144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_82 0x4108148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_83 0x410814C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_84 0x4108150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_85 0x4108154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_86 0x4108158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_87 0x410815C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_88 0x4108160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_89 0x4108164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_90 0x4108168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_91 0x410816C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_92 0x4108170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_93 0x4108174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_94 0x4108178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_95 0x410817C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_96 0x4108180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_97 0x4108184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_98 0x4108188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_99 0x410818C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_100 0x4108190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_101 0x4108194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_102 0x4108198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_103 0x410819C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_104 0x41081A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_105 0x41081A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_106 0x41081A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_107 0x41081AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_108 0x41081B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_109 0x41081B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_110 0x41081B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_111 0x41081BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_112 0x41081C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_113 0x41081C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_114 0x41081C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_115 0x41081CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_116 0x41081D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_117 0x41081D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_118 0x41081D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_119 0x41081DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_120 0x41081E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_121 0x41081E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_122 0x41081E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_123 0x41081EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_124 0x41081F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_125 0x41081F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_126 0x41081F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_127 0x41081FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_128 0x4108200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_129 0x4108204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_130 0x4108208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_131 0x410820C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_132 0x4108210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_133 0x4108214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_134 0x4108218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_135 0x410821C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_136 0x4108220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_137 0x4108224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_138 0x4108228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_139 0x410822C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_140 0x4108230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_141 0x4108234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_142 0x4108238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_143 0x410823C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_144 0x4108240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_145 0x4108244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_146 0x4108248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_147 0x410824C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_148 0x4108250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_149 0x4108254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_150 0x4108258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_151 0x410825C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_152 0x4108260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_153 0x4108264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_154 0x4108268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_155 0x410826C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_156 0x4108270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_157 0x4108274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_158 0x4108278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_159 0x410827C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_160 0x4108280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_161 0x4108284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_162 0x4108288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_163 0x410828C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_164 0x4108290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_165 0x4108294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_166 0x4108298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_167 0x410829C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_168 0x41082A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_169 0x41082A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_170 0x41082A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_171 0x41082AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_172 0x41082B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_173 0x41082B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_174 0x41082B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_175 0x41082BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_176 0x41082C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_177 0x41082C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_178 0x41082C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_179 0x41082CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_180 0x41082D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_181 0x41082D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_182 0x41082D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_183 0x41082DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_184 0x41082E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_185 0x41082E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_186 0x41082E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_187 0x41082EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_188 0x41082F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_189 0x41082F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_190 0x41082F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_191 0x41082FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_192 0x4108300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_193 0x4108304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_194 0x4108308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_195 0x410830C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_196 0x4108310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_197 0x4108314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_198 0x4108318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_199 0x410831C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_200 0x4108320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_201 0x4108324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_202 0x4108328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_203 0x410832C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_204 0x4108330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_205 0x4108334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_206 0x4108338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_207 0x410833C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_208 0x4108340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_209 0x4108344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_210 0x4108348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_211 0x410834C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_212 0x4108350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_213 0x4108354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_214 0x4108358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_215 0x410835C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_216 0x4108360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_217 0x4108364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_218 0x4108368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_219 0x410836C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_220 0x4108370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_221 0x4108374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_222 0x4108378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_223 0x410837C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_224 0x4108380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_225 0x4108384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_226 0x4108388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_227 0x410838C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_228 0x4108390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_229 0x4108394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_230 0x4108398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_231 0x410839C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_232 0x41083A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_233 0x41083A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_234 0x41083A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_235 0x41083AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_236 0x41083B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_237 0x41083B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_238 0x41083B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_239 0x41083BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_240 0x41083C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_241 0x41083C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_242 0x41083C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_243 0x41083CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_244 0x41083D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_245 0x41083D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_246 0x41083D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_247 0x41083DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_248 0x41083E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_249 0x41083E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_250 0x41083E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_251 0x41083EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_252 0x41083F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_253 0x41083F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_254 0x41083F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_255 0x41083FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_256 0x4108400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_257 0x4108404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_258 0x4108408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_259 0x410840C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_260 0x4108410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_261 0x4108414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_262 0x4108418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_263 0x410841C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_264 0x4108420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_265 0x4108424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_266 0x4108428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_267 0x410842C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_268 0x4108430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_269 0x4108434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_270 0x4108438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_271 0x410843C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_272 0x4108440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_273 0x4108444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_274 0x4108448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_275 0x410844C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_276 0x4108450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_277 0x4108454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_278 0x4108458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_279 0x410845C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_280 0x4108460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_281 0x4108464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_282 0x4108468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_283 0x410846C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_284 0x4108470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_285 0x4108474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_286 0x4108478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_287 0x410847C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_288 0x4108480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_289 0x4108484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_290 0x4108488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_291 0x410848C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_292 0x4108490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_293 0x4108494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_294 0x4108498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_295 0x410849C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_296 0x41084A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_297 0x41084A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_298 0x41084A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_299 0x41084AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_300 0x41084B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_301 0x41084B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_302 0x41084B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_303 0x41084BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_304 0x41084C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_305 0x41084C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_306 0x41084C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_307 0x41084CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_308 0x41084D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_309 0x41084D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_310 0x41084D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_311 0x41084DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_312 0x41084E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_313 0x41084E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_314 0x41084E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_315 0x41084EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_316 0x41084F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_317 0x41084F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_318 0x41084F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_319 0x41084FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_320 0x4108500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_321 0x4108504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_322 0x4108508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_323 0x410850C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_324 0x4108510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_325 0x4108514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_326 0x4108518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_327 0x410851C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_328 0x4108520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_329 0x4108524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_330 0x4108528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_331 0x410852C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_332 0x4108530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_333 0x4108534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_334 0x4108538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_335 0x410853C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_336 0x4108540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_337 0x4108544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_338 0x4108548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_339 0x410854C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_340 0x4108550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_341 0x4108554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_342 0x4108558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_343 0x410855C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_344 0x4108560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_345 0x4108564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_346 0x4108568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_347 0x410856C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_348 0x4108570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_349 0x4108574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_350 0x4108578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_351 0x410857C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_352 0x4108580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_353 0x4108584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_354 0x4108588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_355 0x410858C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_356 0x4108590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_357 0x4108594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_358 0x4108598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_359 0x410859C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_360 0x41085A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_361 0x41085A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_362 0x41085A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_363 0x41085AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_364 0x41085B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_365 0x41085B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_366 0x41085B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_367 0x41085BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_368 0x41085C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_369 0x41085C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_370 0x41085C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_371 0x41085CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_372 0x41085D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_373 0x41085D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_374 0x41085D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_375 0x41085DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_376 0x41085E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_377 0x41085E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_378 0x41085E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_379 0x41085EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_380 0x41085F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_381 0x41085F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_382 0x41085F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_383 0x41085FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_384 0x4108600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_385 0x4108604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_386 0x4108608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_387 0x410860C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_388 0x4108610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_389 0x4108614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_390 0x4108618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_391 0x410861C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_392 0x4108620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_393 0x4108624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_394 0x4108628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_395 0x410862C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_396 0x4108630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_397 0x4108634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_398 0x4108638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_399 0x410863C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_400 0x4108640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_401 0x4108644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_402 0x4108648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_403 0x410864C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_404 0x4108650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_405 0x4108654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_406 0x4108658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_407 0x410865C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_408 0x4108660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_409 0x4108664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_410 0x4108668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_411 0x410866C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_412 0x4108670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_413 0x4108674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_414 0x4108678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_415 0x410867C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_416 0x4108680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_417 0x4108684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_418 0x4108688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_419 0x410868C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_420 0x4108690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_421 0x4108694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_422 0x4108698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_423 0x410869C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_424 0x41086A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_425 0x41086A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_426 0x41086A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_427 0x41086AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_428 0x41086B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_429 0x41086B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_430 0x41086B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_431 0x41086BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_432 0x41086C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_433 0x41086C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_434 0x41086C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_435 0x41086CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_436 0x41086D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_437 0x41086D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_438 0x41086D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_439 0x41086DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_440 0x41086E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_441 0x41086E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_442 0x41086E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_443 0x41086EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_444 0x41086F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_445 0x41086F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_446 0x41086F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_447 0x41086FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_448 0x4108700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_449 0x4108704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_450 0x4108708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_451 0x410870C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_452 0x4108710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_453 0x4108714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_454 0x4108718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_455 0x410871C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_456 0x4108720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_457 0x4108724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_458 0x4108728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_459 0x410872C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_460 0x4108730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_461 0x4108734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_462 0x4108738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_463 0x410873C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_464 0x4108740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_465 0x4108744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_466 0x4108748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_467 0x410874C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_468 0x4108750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_469 0x4108754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_470 0x4108758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_471 0x410875C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_472 0x4108760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_473 0x4108764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_474 0x4108768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_475 0x410876C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_476 0x4108770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_477 0x4108774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_478 0x4108778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_479 0x410877C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_480 0x4108780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_481 0x4108784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_482 0x4108788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_483 0x410878C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_484 0x4108790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_485 0x4108794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_486 0x4108798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_487 0x410879C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_488 0x41087A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_489 0x41087A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_490 0x41087A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_491 0x41087AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_492 0x41087B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_493 0x41087B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_494 0x41087B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_495 0x41087BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_496 0x41087C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_497 0x41087C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_498 0x41087C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_499 0x41087CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_500 0x41087D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_501 0x41087D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_502 0x41087D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_503 0x41087DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_504 0x41087E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_505 0x41087E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_506 0x41087E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_507 0x41087EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_508 0x41087F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_509 0x41087F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_510 0x41087F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_511 0x41087FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_512 0x4108800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_513 0x4108804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_514 0x4108808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_515 0x410880C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_516 0x4108810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_517 0x4108814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_518 0x4108818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_519 0x410881C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_520 0x4108820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_521 0x4108824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_522 0x4108828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_523 0x410882C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_524 0x4108830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_525 0x4108834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_526 0x4108838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_527 0x410883C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_528 0x4108840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_529 0x4108844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_530 0x4108848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_531 0x410884C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_532 0x4108850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_533 0x4108854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_534 0x4108858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_535 0x410885C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_536 0x4108860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_537 0x4108864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_538 0x4108868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_539 0x410886C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_540 0x4108870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_541 0x4108874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_542 0x4108878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_543 0x410887C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_544 0x4108880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_545 0x4108884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_546 0x4108888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_547 0x410888C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_548 0x4108890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_549 0x4108894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_550 0x4108898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_551 0x410889C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_552 0x41088A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_553 0x41088A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_554 0x41088A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_555 0x41088AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_556 0x41088B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_557 0x41088B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_558 0x41088B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_559 0x41088BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_560 0x41088C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_561 0x41088C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_562 0x41088C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_563 0x41088CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_564 0x41088D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_565 0x41088D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_566 0x41088D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_567 0x41088DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_568 0x41088E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_569 0x41088E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_570 0x41088E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_571 0x41088EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_572 0x41088F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_573 0x41088F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_574 0x41088F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_575 0x41088FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_576 0x4108900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_577 0x4108904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_578 0x4108908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_579 0x410890C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_580 0x4108910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_581 0x4108914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_582 0x4108918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_583 0x410891C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_584 0x4108920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_585 0x4108924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_586 0x4108928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_587 0x410892C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_588 0x4108930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_589 0x4108934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_590 0x4108938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_591 0x410893C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_592 0x4108940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_593 0x4108944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_594 0x4108948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_595 0x410894C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_596 0x4108950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_597 0x4108954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_598 0x4108958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_599 0x410895C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_600 0x4108960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_601 0x4108964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_602 0x4108968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_603 0x410896C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_604 0x4108970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_605 0x4108974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_606 0x4108978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_607 0x410897C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_608 0x4108980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_609 0x4108984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_610 0x4108988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_611 0x410898C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_612 0x4108990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_613 0x4108994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_614 0x4108998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_615 0x410899C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_616 0x41089A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_617 0x41089A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_618 0x41089A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_619 0x41089AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_620 0x41089B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_621 0x41089B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_622 0x41089B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_623 0x41089BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_624 0x41089C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_625 0x41089C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_626 0x41089C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_627 0x41089CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_628 0x41089D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_629 0x41089D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_630 0x41089D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_631 0x41089DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_632 0x41089E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_633 0x41089E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_634 0x41089E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_635 0x41089EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_636 0x41089F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_637 0x41089F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_638 0x41089F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_639 0x41089FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_640 0x4108A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_641 0x4108A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_642 0x4108A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_643 0x4108A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_644 0x4108A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_645 0x4108A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_646 0x4108A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_647 0x4108A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_648 0x4108A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_649 0x4108A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_650 0x4108A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_651 0x4108A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_652 0x4108A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_653 0x4108A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_654 0x4108A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_655 0x4108A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_656 0x4108A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_657 0x4108A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_658 0x4108A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_659 0x4108A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_660 0x4108A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_661 0x4108A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_662 0x4108A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_663 0x4108A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_664 0x4108A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_665 0x4108A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_666 0x4108A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_667 0x4108A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_668 0x4108A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_669 0x4108A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_670 0x4108A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_671 0x4108A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_672 0x4108A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_673 0x4108A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_674 0x4108A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_675 0x4108A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_676 0x4108A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_677 0x4108A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_678 0x4108A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_679 0x4108A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_680 0x4108AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_681 0x4108AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_682 0x4108AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_683 0x4108AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_684 0x4108AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_685 0x4108AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_686 0x4108AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_687 0x4108ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_688 0x4108AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_689 0x4108AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_690 0x4108AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_691 0x4108ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_692 0x4108AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_693 0x4108AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_694 0x4108AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_695 0x4108ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_696 0x4108AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_697 0x4108AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_698 0x4108AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_699 0x4108AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_700 0x4108AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_701 0x4108AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_702 0x4108AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_703 0x4108AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_704 0x4108B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_705 0x4108B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_706 0x4108B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_707 0x4108B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_708 0x4108B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_709 0x4108B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_710 0x4108B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_711 0x4108B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_712 0x4108B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_713 0x4108B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_714 0x4108B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_715 0x4108B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_716 0x4108B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_717 0x4108B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_718 0x4108B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_719 0x4108B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_720 0x4108B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_721 0x4108B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_722 0x4108B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_723 0x4108B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_724 0x4108B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_725 0x4108B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_726 0x4108B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_727 0x4108B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_728 0x4108B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_729 0x4108B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_730 0x4108B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_731 0x4108B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_732 0x4108B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_733 0x4108B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_734 0x4108B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_735 0x4108B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_736 0x4108B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_737 0x4108B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_738 0x4108B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_739 0x4108B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_740 0x4108B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_741 0x4108B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_742 0x4108B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_743 0x4108B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_744 0x4108BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_745 0x4108BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_746 0x4108BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_747 0x4108BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_748 0x4108BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_749 0x4108BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_750 0x4108BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_751 0x4108BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_752 0x4108BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_753 0x4108BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_754 0x4108BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_755 0x4108BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_756 0x4108BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_757 0x4108BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_758 0x4108BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_759 0x4108BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_760 0x4108BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_761 0x4108BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_762 0x4108BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_763 0x4108BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_764 0x4108BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_765 0x4108BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_766 0x4108BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_767 0x4108BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_768 0x4108C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_769 0x4108C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_770 0x4108C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_771 0x4108C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_772 0x4108C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_773 0x4108C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_774 0x4108C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_775 0x4108C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_776 0x4108C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_777 0x4108C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_778 0x4108C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_779 0x4108C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_780 0x4108C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_781 0x4108C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_782 0x4108C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_783 0x4108C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_784 0x4108C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_785 0x4108C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_786 0x4108C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_787 0x4108C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_788 0x4108C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_789 0x4108C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_790 0x4108C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_791 0x4108C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_792 0x4108C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_793 0x4108C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_794 0x4108C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_795 0x4108C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_796 0x4108C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_797 0x4108C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_798 0x4108C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_799 0x4108C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_800 0x4108C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_801 0x4108C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_802 0x4108C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_803 0x4108C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_804 0x4108C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_805 0x4108C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_806 0x4108C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_807 0x4108C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_808 0x4108CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_809 0x4108CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_810 0x4108CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_811 0x4108CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_812 0x4108CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_813 0x4108CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_814 0x4108CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_815 0x4108CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_816 0x4108CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_817 0x4108CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_818 0x4108CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_819 0x4108CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_820 0x4108CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_821 0x4108CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_822 0x4108CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_823 0x4108CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_824 0x4108CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_825 0x4108CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_826 0x4108CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_827 0x4108CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_828 0x4108CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_829 0x4108CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_830 0x4108CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_831 0x4108CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_832 0x4108D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_833 0x4108D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_834 0x4108D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_835 0x4108D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_836 0x4108D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_837 0x4108D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_838 0x4108D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_839 0x4108D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_840 0x4108D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_841 0x4108D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_842 0x4108D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_843 0x4108D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_844 0x4108D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_845 0x4108D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_846 0x4108D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_847 0x4108D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_848 0x4108D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_849 0x4108D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_850 0x4108D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_851 0x4108D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_852 0x4108D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_853 0x4108D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_854 0x4108D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_855 0x4108D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_856 0x4108D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_857 0x4108D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_858 0x4108D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_859 0x4108D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_860 0x4108D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_861 0x4108D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_862 0x4108D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_863 0x4108D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_864 0x4108D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_865 0x4108D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_866 0x4108D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_867 0x4108D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_868 0x4108D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_869 0x4108D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_870 0x4108D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_871 0x4108D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_872 0x4108DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_873 0x4108DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_874 0x4108DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_875 0x4108DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_876 0x4108DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_877 0x4108DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_878 0x4108DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_879 0x4108DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_880 0x4108DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_881 0x4108DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_882 0x4108DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_883 0x4108DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_884 0x4108DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_885 0x4108DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_886 0x4108DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_887 0x4108DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_888 0x4108DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_889 0x4108DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_890 0x4108DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_891 0x4108DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_892 0x4108DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_893 0x4108DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_894 0x4108DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_895 0x4108DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_896 0x4108E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_897 0x4108E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_898 0x4108E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_899 0x4108E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_900 0x4108E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_901 0x4108E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_902 0x4108E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_903 0x4108E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_904 0x4108E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_905 0x4108E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_906 0x4108E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_907 0x4108E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_908 0x4108E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_909 0x4108E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_910 0x4108E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_911 0x4108E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_912 0x4108E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_913 0x4108E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_914 0x4108E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_915 0x4108E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_916 0x4108E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_917 0x4108E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_918 0x4108E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_919 0x4108E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_920 0x4108E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_921 0x4108E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_922 0x4108E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_923 0x4108E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_924 0x4108E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_925 0x4108E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_926 0x4108E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_927 0x4108E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_928 0x4108E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_929 0x4108E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_930 0x4108E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_931 0x4108E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_932 0x4108E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_933 0x4108E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_934 0x4108E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_935 0x4108E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_936 0x4108EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_937 0x4108EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_938 0x4108EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_939 0x4108EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_940 0x4108EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_941 0x4108EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_942 0x4108EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_943 0x4108EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_944 0x4108EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_945 0x4108EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_946 0x4108EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_947 0x4108ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_948 0x4108ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_949 0x4108ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_950 0x4108ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_951 0x4108EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_952 0x4108EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_953 0x4108EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_954 0x4108EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_955 0x4108EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_956 0x4108EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_957 0x4108EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_958 0x4108EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_959 0x4108EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_960 0x4108F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_961 0x4108F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_962 0x4108F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_963 0x4108F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_964 0x4108F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_965 0x4108F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_966 0x4108F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_967 0x4108F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_968 0x4108F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_969 0x4108F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_970 0x4108F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_971 0x4108F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_972 0x4108F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_973 0x4108F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_974 0x4108F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_975 0x4108F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_976 0x4108F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_977 0x4108F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_978 0x4108F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_979 0x4108F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_980 0x4108F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_981 0x4108F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_982 0x4108F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_983 0x4108F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_984 0x4108F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_985 0x4108F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_986 0x4108F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_987 0x4108F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_988 0x4108F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_989 0x4108F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_990 0x4108F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_991 0x4108F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_992 0x4108F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_993 0x4108F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_994 0x4108F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_995 0x4108F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_996 0x4108F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_997 0x4108F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_998 0x4108F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_999 0x4108F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1000 0x4108FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1001 0x4108FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1002 0x4108FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1003 0x4108FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1004 0x4108FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1005 0x4108FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1006 0x4108FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1007 0x4108FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1008 0x4108FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1009 0x4108FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1010 0x4108FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1011 0x4108FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1012 0x4108FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1013 0x4108FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1014 0x4108FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1015 0x4108FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1016 0x4108FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1017 0x4108FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1018 0x4108FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1019 0x4108FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1020 0x4108FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1021 0x4108FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1022 0x4108FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1023 0x4108FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1024 0x4109000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1025 0x4109004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1026 0x4109008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1027 0x410900C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1028 0x4109010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1029 0x4109014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1030 0x4109018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1031 0x410901C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1032 0x4109020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1033 0x4109024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1034 0x4109028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1035 0x410902C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1036 0x4109030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1037 0x4109034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1038 0x4109038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1039 0x410903C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1040 0x4109040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1041 0x4109044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1042 0x4109048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1043 0x410904C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1044 0x4109050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1045 0x4109054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1046 0x4109058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1047 0x410905C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1048 0x4109060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1049 0x4109064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1050 0x4109068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1051 0x410906C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1052 0x4109070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1053 0x4109074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1054 0x4109078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1055 0x410907C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1056 0x4109080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1057 0x4109084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1058 0x4109088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1059 0x410908C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1060 0x4109090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1061 0x4109094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1062 0x4109098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1063 0x410909C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1064 0x41090A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1065 0x41090A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1066 0x41090A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1067 0x41090AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1068 0x41090B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1069 0x41090B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1070 0x41090B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1071 0x41090BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1072 0x41090C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1073 0x41090C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1074 0x41090C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1075 0x41090CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1076 0x41090D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1077 0x41090D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1078 0x41090D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1079 0x41090DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1080 0x41090E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1081 0x41090E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1082 0x41090E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1083 0x41090EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1084 0x41090F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1085 0x41090F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1086 0x41090F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1087 0x41090FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1088 0x4109100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1089 0x4109104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1090 0x4109108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1091 0x410910C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1092 0x4109110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1093 0x4109114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1094 0x4109118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1095 0x410911C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1096 0x4109120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1097 0x4109124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1098 0x4109128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1099 0x410912C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1100 0x4109130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1101 0x4109134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1102 0x4109138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1103 0x410913C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1104 0x4109140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1105 0x4109144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1106 0x4109148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1107 0x410914C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1108 0x4109150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1109 0x4109154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1110 0x4109158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1111 0x410915C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1112 0x4109160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1113 0x4109164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1114 0x4109168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1115 0x410916C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1116 0x4109170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1117 0x4109174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1118 0x4109178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1119 0x410917C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1120 0x4109180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1121 0x4109184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1122 0x4109188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1123 0x410918C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1124 0x4109190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1125 0x4109194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1126 0x4109198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1127 0x410919C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1128 0x41091A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1129 0x41091A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1130 0x41091A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1131 0x41091AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1132 0x41091B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1133 0x41091B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1134 0x41091B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1135 0x41091BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1136 0x41091C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1137 0x41091C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1138 0x41091C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1139 0x41091CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1140 0x41091D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1141 0x41091D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1142 0x41091D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1143 0x41091DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1144 0x41091E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1145 0x41091E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1146 0x41091E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1147 0x41091EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1148 0x41091F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1149 0x41091F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1150 0x41091F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1151 0x41091FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1152 0x4109200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1153 0x4109204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1154 0x4109208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1155 0x410920C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1156 0x4109210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1157 0x4109214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1158 0x4109218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1159 0x410921C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1160 0x4109220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1161 0x4109224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1162 0x4109228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1163 0x410922C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1164 0x4109230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1165 0x4109234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1166 0x4109238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1167 0x410923C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1168 0x4109240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1169 0x4109244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1170 0x4109248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1171 0x410924C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1172 0x4109250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1173 0x4109254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1174 0x4109258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1175 0x410925C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1176 0x4109260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1177 0x4109264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1178 0x4109268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1179 0x410926C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1180 0x4109270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1181 0x4109274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1182 0x4109278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1183 0x410927C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1184 0x4109280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1185 0x4109284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1186 0x4109288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1187 0x410928C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1188 0x4109290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1189 0x4109294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1190 0x4109298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1191 0x410929C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1192 0x41092A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1193 0x41092A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1194 0x41092A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1195 0x41092AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1196 0x41092B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1197 0x41092B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1198 0x41092B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1199 0x41092BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1200 0x41092C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1201 0x41092C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1202 0x41092C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1203 0x41092CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1204 0x41092D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1205 0x41092D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1206 0x41092D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1207 0x41092DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1208 0x41092E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1209 0x41092E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1210 0x41092E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1211 0x41092EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1212 0x41092F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1213 0x41092F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1214 0x41092F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1215 0x41092FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1216 0x4109300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1217 0x4109304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1218 0x4109308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1219 0x410930C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1220 0x4109310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1221 0x4109314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1222 0x4109318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1223 0x410931C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1224 0x4109320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1225 0x4109324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1226 0x4109328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1227 0x410932C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1228 0x4109330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1229 0x4109334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1230 0x4109338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1231 0x410933C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1232 0x4109340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1233 0x4109344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1234 0x4109348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1235 0x410934C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1236 0x4109350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1237 0x4109354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1238 0x4109358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1239 0x410935C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1240 0x4109360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1241 0x4109364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1242 0x4109368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1243 0x410936C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1244 0x4109370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1245 0x4109374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1246 0x4109378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1247 0x410937C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1248 0x4109380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1249 0x4109384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1250 0x4109388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1251 0x410938C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1252 0x4109390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1253 0x4109394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1254 0x4109398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1255 0x410939C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1256 0x41093A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1257 0x41093A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1258 0x41093A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1259 0x41093AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1260 0x41093B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1261 0x41093B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1262 0x41093B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1263 0x41093BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1264 0x41093C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1265 0x41093C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1266 0x41093C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1267 0x41093CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1268 0x41093D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1269 0x41093D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1270 0x41093D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1271 0x41093DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1272 0x41093E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1273 0x41093E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1274 0x41093E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1275 0x41093EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1276 0x41093F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1277 0x41093F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1278 0x41093F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1279 0x41093FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1280 0x4109400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1281 0x4109404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1282 0x4109408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1283 0x410940C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1284 0x4109410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1285 0x4109414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1286 0x4109418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1287 0x410941C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1288 0x4109420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1289 0x4109424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1290 0x4109428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1291 0x410942C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1292 0x4109430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1293 0x4109434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1294 0x4109438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1295 0x410943C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1296 0x4109440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1297 0x4109444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1298 0x4109448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1299 0x410944C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1300 0x4109450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1301 0x4109454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1302 0x4109458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1303 0x410945C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1304 0x4109460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1305 0x4109464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1306 0x4109468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1307 0x410946C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1308 0x4109470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1309 0x4109474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1310 0x4109478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1311 0x410947C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1312 0x4109480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1313 0x4109484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1314 0x4109488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1315 0x410948C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1316 0x4109490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1317 0x4109494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1318 0x4109498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1319 0x410949C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1320 0x41094A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1321 0x41094A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1322 0x41094A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1323 0x41094AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1324 0x41094B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1325 0x41094B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1326 0x41094B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1327 0x41094BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1328 0x41094C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1329 0x41094C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1330 0x41094C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1331 0x41094CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1332 0x41094D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1333 0x41094D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1334 0x41094D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1335 0x41094DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1336 0x41094E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1337 0x41094E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1338 0x41094E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1339 0x41094EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1340 0x41094F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1341 0x41094F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1342 0x41094F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1343 0x41094FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1344 0x4109500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1345 0x4109504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1346 0x4109508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1347 0x410950C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1348 0x4109510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1349 0x4109514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1350 0x4109518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1351 0x410951C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1352 0x4109520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1353 0x4109524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1354 0x4109528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1355 0x410952C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1356 0x4109530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1357 0x4109534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1358 0x4109538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1359 0x410953C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1360 0x4109540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1361 0x4109544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1362 0x4109548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1363 0x410954C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1364 0x4109550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1365 0x4109554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1366 0x4109558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1367 0x410955C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1368 0x4109560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1369 0x4109564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1370 0x4109568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1371 0x410956C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1372 0x4109570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1373 0x4109574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1374 0x4109578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1375 0x410957C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1376 0x4109580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1377 0x4109584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1378 0x4109588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1379 0x410958C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1380 0x4109590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1381 0x4109594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1382 0x4109598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1383 0x410959C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1384 0x41095A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1385 0x41095A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1386 0x41095A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1387 0x41095AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1388 0x41095B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1389 0x41095B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1390 0x41095B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1391 0x41095BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1392 0x41095C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1393 0x41095C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1394 0x41095C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1395 0x41095CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1396 0x41095D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1397 0x41095D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1398 0x41095D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1399 0x41095DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1400 0x41095E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1401 0x41095E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1402 0x41095E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1403 0x41095EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1404 0x41095F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1405 0x41095F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1406 0x41095F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1407 0x41095FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1408 0x4109600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1409 0x4109604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1410 0x4109608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1411 0x410960C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1412 0x4109610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1413 0x4109614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1414 0x4109618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1415 0x410961C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1416 0x4109620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1417 0x4109624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1418 0x4109628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1419 0x410962C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1420 0x4109630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1421 0x4109634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1422 0x4109638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1423 0x410963C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1424 0x4109640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1425 0x4109644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1426 0x4109648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1427 0x410964C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1428 0x4109650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1429 0x4109654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1430 0x4109658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1431 0x410965C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1432 0x4109660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1433 0x4109664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1434 0x4109668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1435 0x410966C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1436 0x4109670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1437 0x4109674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1438 0x4109678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1439 0x410967C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1440 0x4109680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1441 0x4109684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1442 0x4109688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1443 0x410968C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1444 0x4109690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1445 0x4109694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1446 0x4109698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1447 0x410969C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1448 0x41096A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1449 0x41096A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1450 0x41096A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1451 0x41096AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1452 0x41096B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1453 0x41096B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1454 0x41096B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1455 0x41096BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1456 0x41096C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1457 0x41096C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1458 0x41096C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1459 0x41096CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1460 0x41096D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1461 0x41096D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1462 0x41096D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1463 0x41096DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1464 0x41096E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1465 0x41096E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1466 0x41096E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1467 0x41096EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1468 0x41096F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1469 0x41096F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1470 0x41096F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1471 0x41096FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1472 0x4109700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1473 0x4109704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1474 0x4109708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1475 0x410970C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1476 0x4109710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1477 0x4109714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1478 0x4109718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1479 0x410971C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1480 0x4109720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1481 0x4109724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1482 0x4109728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1483 0x410972C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1484 0x4109730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1485 0x4109734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1486 0x4109738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1487 0x410973C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1488 0x4109740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1489 0x4109744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1490 0x4109748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1491 0x410974C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1492 0x4109750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1493 0x4109754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1494 0x4109758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1495 0x410975C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1496 0x4109760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1497 0x4109764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1498 0x4109768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1499 0x410976C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1500 0x4109770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1501 0x4109774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1502 0x4109778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1503 0x410977C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1504 0x4109780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1505 0x4109784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1506 0x4109788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1507 0x410978C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1508 0x4109790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1509 0x4109794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1510 0x4109798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1511 0x410979C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1512 0x41097A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1513 0x41097A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1514 0x41097A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1515 0x41097AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1516 0x41097B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1517 0x41097B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1518 0x41097B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1519 0x41097BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1520 0x41097C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1521 0x41097C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1522 0x41097C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1523 0x41097CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1524 0x41097D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1525 0x41097D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1526 0x41097D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1527 0x41097DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1528 0x41097E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1529 0x41097E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1530 0x41097E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1531 0x41097EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1532 0x41097F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1533 0x41097F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1534 0x41097F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1535 0x41097FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1536 0x4109800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1537 0x4109804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1538 0x4109808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1539 0x410980C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1540 0x4109810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1541 0x4109814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1542 0x4109818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1543 0x410981C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1544 0x4109820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1545 0x4109824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1546 0x4109828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1547 0x410982C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1548 0x4109830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1549 0x4109834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1550 0x4109838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1551 0x410983C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1552 0x4109840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1553 0x4109844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1554 0x4109848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1555 0x410984C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1556 0x4109850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1557 0x4109854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1558 0x4109858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1559 0x410985C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1560 0x4109860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1561 0x4109864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1562 0x4109868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1563 0x410986C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1564 0x4109870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1565 0x4109874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1566 0x4109878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1567 0x410987C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1568 0x4109880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1569 0x4109884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1570 0x4109888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1571 0x410988C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1572 0x4109890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1573 0x4109894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1574 0x4109898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1575 0x410989C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1576 0x41098A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1577 0x41098A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1578 0x41098A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1579 0x41098AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1580 0x41098B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1581 0x41098B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1582 0x41098B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1583 0x41098BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1584 0x41098C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1585 0x41098C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1586 0x41098C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1587 0x41098CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1588 0x41098D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1589 0x41098D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1590 0x41098D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1591 0x41098DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1592 0x41098E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1593 0x41098E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1594 0x41098E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1595 0x41098EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1596 0x41098F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1597 0x41098F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1598 0x41098F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1599 0x41098FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1600 0x4109900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1601 0x4109904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1602 0x4109908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1603 0x410990C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1604 0x4109910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1605 0x4109914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1606 0x4109918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1607 0x410991C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1608 0x4109920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1609 0x4109924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1610 0x4109928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1611 0x410992C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1612 0x4109930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1613 0x4109934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1614 0x4109938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1615 0x410993C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1616 0x4109940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1617 0x4109944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1618 0x4109948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1619 0x410994C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1620 0x4109950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1621 0x4109954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1622 0x4109958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1623 0x410995C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1624 0x4109960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1625 0x4109964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1626 0x4109968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1627 0x410996C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1628 0x4109970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1629 0x4109974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1630 0x4109978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1631 0x410997C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1632 0x4109980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1633 0x4109984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1634 0x4109988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1635 0x410998C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1636 0x4109990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1637 0x4109994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1638 0x4109998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1639 0x410999C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1640 0x41099A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1641 0x41099A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1642 0x41099A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1643 0x41099AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1644 0x41099B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1645 0x41099B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1646 0x41099B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1647 0x41099BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1648 0x41099C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1649 0x41099C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1650 0x41099C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1651 0x41099CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1652 0x41099D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1653 0x41099D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1654 0x41099D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1655 0x41099DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1656 0x41099E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1657 0x41099E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1658 0x41099E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1659 0x41099EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1660 0x41099F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1661 0x41099F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1662 0x41099F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1663 0x41099FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1664 0x4109A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1665 0x4109A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1666 0x4109A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1667 0x4109A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1668 0x4109A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1669 0x4109A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1670 0x4109A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1671 0x4109A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1672 0x4109A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1673 0x4109A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1674 0x4109A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1675 0x4109A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1676 0x4109A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1677 0x4109A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1678 0x4109A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1679 0x4109A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1680 0x4109A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1681 0x4109A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1682 0x4109A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1683 0x4109A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1684 0x4109A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1685 0x4109A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1686 0x4109A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1687 0x4109A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1688 0x4109A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1689 0x4109A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1690 0x4109A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1691 0x4109A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1692 0x4109A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1693 0x4109A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1694 0x4109A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1695 0x4109A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1696 0x4109A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1697 0x4109A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1698 0x4109A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1699 0x4109A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1700 0x4109A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1701 0x4109A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1702 0x4109A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1703 0x4109A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1704 0x4109AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1705 0x4109AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1706 0x4109AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1707 0x4109AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1708 0x4109AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1709 0x4109AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1710 0x4109AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1711 0x4109ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1712 0x4109AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1713 0x4109AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1714 0x4109AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1715 0x4109ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1716 0x4109AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1717 0x4109AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1718 0x4109AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1719 0x4109ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1720 0x4109AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1721 0x4109AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1722 0x4109AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1723 0x4109AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1724 0x4109AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1725 0x4109AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1726 0x4109AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1727 0x4109AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1728 0x4109B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1729 0x4109B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1730 0x4109B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1731 0x4109B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1732 0x4109B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1733 0x4109B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1734 0x4109B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1735 0x4109B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1736 0x4109B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1737 0x4109B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1738 0x4109B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1739 0x4109B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1740 0x4109B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1741 0x4109B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1742 0x4109B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1743 0x4109B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1744 0x4109B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1745 0x4109B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1746 0x4109B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1747 0x4109B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1748 0x4109B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1749 0x4109B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1750 0x4109B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1751 0x4109B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1752 0x4109B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1753 0x4109B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1754 0x4109B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1755 0x4109B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1756 0x4109B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1757 0x4109B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1758 0x4109B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1759 0x4109B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1760 0x4109B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1761 0x4109B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1762 0x4109B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1763 0x4109B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1764 0x4109B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1765 0x4109B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1766 0x4109B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1767 0x4109B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1768 0x4109BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1769 0x4109BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1770 0x4109BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1771 0x4109BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1772 0x4109BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1773 0x4109BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1774 0x4109BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1775 0x4109BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1776 0x4109BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1777 0x4109BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1778 0x4109BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1779 0x4109BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1780 0x4109BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1781 0x4109BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1782 0x4109BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1783 0x4109BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1784 0x4109BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1785 0x4109BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1786 0x4109BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1787 0x4109BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1788 0x4109BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1789 0x4109BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1790 0x4109BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1791 0x4109BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1792 0x4109C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1793 0x4109C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1794 0x4109C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1795 0x4109C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1796 0x4109C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1797 0x4109C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1798 0x4109C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1799 0x4109C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1800 0x4109C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1801 0x4109C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1802 0x4109C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1803 0x4109C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1804 0x4109C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1805 0x4109C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1806 0x4109C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1807 0x4109C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1808 0x4109C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1809 0x4109C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1810 0x4109C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1811 0x4109C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1812 0x4109C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1813 0x4109C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1814 0x4109C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1815 0x4109C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1816 0x4109C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1817 0x4109C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1818 0x4109C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1819 0x4109C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1820 0x4109C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1821 0x4109C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1822 0x4109C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1823 0x4109C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1824 0x4109C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1825 0x4109C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1826 0x4109C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1827 0x4109C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1828 0x4109C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1829 0x4109C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1830 0x4109C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1831 0x4109C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1832 0x4109CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1833 0x4109CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1834 0x4109CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1835 0x4109CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1836 0x4109CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1837 0x4109CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1838 0x4109CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1839 0x4109CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1840 0x4109CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1841 0x4109CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1842 0x4109CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1843 0x4109CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1844 0x4109CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1845 0x4109CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1846 0x4109CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1847 0x4109CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1848 0x4109CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1849 0x4109CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1850 0x4109CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1851 0x4109CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1852 0x4109CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1853 0x4109CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1854 0x4109CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1855 0x4109CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1856 0x4109D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1857 0x4109D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1858 0x4109D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1859 0x4109D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1860 0x4109D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1861 0x4109D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1862 0x4109D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1863 0x4109D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1864 0x4109D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1865 0x4109D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1866 0x4109D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1867 0x4109D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1868 0x4109D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1869 0x4109D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1870 0x4109D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1871 0x4109D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1872 0x4109D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1873 0x4109D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1874 0x4109D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1875 0x4109D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1876 0x4109D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1877 0x4109D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1878 0x4109D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1879 0x4109D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1880 0x4109D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1881 0x4109D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1882 0x4109D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1883 0x4109D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1884 0x4109D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1885 0x4109D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1886 0x4109D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1887 0x4109D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1888 0x4109D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1889 0x4109D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1890 0x4109D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1891 0x4109D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1892 0x4109D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1893 0x4109D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1894 0x4109D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1895 0x4109D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1896 0x4109DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1897 0x4109DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1898 0x4109DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1899 0x4109DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1900 0x4109DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1901 0x4109DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1902 0x4109DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1903 0x4109DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1904 0x4109DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1905 0x4109DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1906 0x4109DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1907 0x4109DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1908 0x4109DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1909 0x4109DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1910 0x4109DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1911 0x4109DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1912 0x4109DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1913 0x4109DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1914 0x4109DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1915 0x4109DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1916 0x4109DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1917 0x4109DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1918 0x4109DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1919 0x4109DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1920 0x4109E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1921 0x4109E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1922 0x4109E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1923 0x4109E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1924 0x4109E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1925 0x4109E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1926 0x4109E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1927 0x4109E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1928 0x4109E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1929 0x4109E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1930 0x4109E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1931 0x4109E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1932 0x4109E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1933 0x4109E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1934 0x4109E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1935 0x4109E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1936 0x4109E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1937 0x4109E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1938 0x4109E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1939 0x4109E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1940 0x4109E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1941 0x4109E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1942 0x4109E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1943 0x4109E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1944 0x4109E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1945 0x4109E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1946 0x4109E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1947 0x4109E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1948 0x4109E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1949 0x4109E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1950 0x4109E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1951 0x4109E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1952 0x4109E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1953 0x4109E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1954 0x4109E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1955 0x4109E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1956 0x4109E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1957 0x4109E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1958 0x4109E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1959 0x4109E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1960 0x4109EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1961 0x4109EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1962 0x4109EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1963 0x4109EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1964 0x4109EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1965 0x4109EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1966 0x4109EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1967 0x4109EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1968 0x4109EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1969 0x4109EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1970 0x4109EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1971 0x4109ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1972 0x4109ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1973 0x4109ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1974 0x4109ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1975 0x4109EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1976 0x4109EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1977 0x4109EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1978 0x4109EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1979 0x4109EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1980 0x4109EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1981 0x4109EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1982 0x4109EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1983 0x4109EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1984 0x4109F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1985 0x4109F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1986 0x4109F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1987 0x4109F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1988 0x4109F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1989 0x4109F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1990 0x4109F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1991 0x4109F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1992 0x4109F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1993 0x4109F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1994 0x4109F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1995 0x4109F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1996 0x4109F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1997 0x4109F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1998 0x4109F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_1999 0x4109F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2000 0x4109F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2001 0x4109F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2002 0x4109F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2003 0x4109F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2004 0x4109F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2005 0x4109F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2006 0x4109F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2007 0x4109F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2008 0x4109F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2009 0x4109F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2010 0x4109F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2011 0x4109F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2012 0x4109F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2013 0x4109F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2014 0x4109F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2015 0x4109F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2016 0x4109F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2017 0x4109F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2018 0x4109F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2019 0x4109F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2020 0x4109F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2021 0x4109F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2022 0x4109F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2023 0x4109F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2024 0x4109FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2025 0x4109FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2026 0x4109FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2027 0x4109FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2028 0x4109FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2029 0x4109FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2030 0x4109FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2031 0x4109FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2032 0x4109FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2033 0x4109FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2034 0x4109FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2035 0x4109FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2036 0x4109FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2037 0x4109FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2038 0x4109FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2039 0x4109FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2040 0x4109FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2041 0x4109FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2042 0x4109FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2043 0x4109FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2044 0x4109FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2045 0x4109FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2046 0x4109FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_2047 0x4109FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 0x410A000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1 0x410A004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2 0x410A008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_3 0x410A00C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_4 0x410A010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_5 0x410A014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_6 0x410A018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_7 0x410A01C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_8 0x410A020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_9 0x410A024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_10 0x410A028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_11 0x410A02C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_12 0x410A030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_13 0x410A034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_14 0x410A038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_15 0x410A03C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_16 0x410A040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_17 0x410A044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_18 0x410A048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_19 0x410A04C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_20 0x410A050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_21 0x410A054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_22 0x410A058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_23 0x410A05C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_24 0x410A060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_25 0x410A064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_26 0x410A068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_27 0x410A06C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_28 0x410A070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_29 0x410A074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_30 0x410A078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_31 0x410A07C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_32 0x410A080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_33 0x410A084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_34 0x410A088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_35 0x410A08C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_36 0x410A090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_37 0x410A094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_38 0x410A098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_39 0x410A09C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_40 0x410A0A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_41 0x410A0A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_42 0x410A0A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_43 0x410A0AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_44 0x410A0B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_45 0x410A0B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_46 0x410A0B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_47 0x410A0BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_48 0x410A0C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_49 0x410A0C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_50 0x410A0C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_51 0x410A0CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_52 0x410A0D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_53 0x410A0D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_54 0x410A0D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_55 0x410A0DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_56 0x410A0E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_57 0x410A0E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_58 0x410A0E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_59 0x410A0EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_60 0x410A0F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_61 0x410A0F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_62 0x410A0F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_63 0x410A0FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_64 0x410A100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_65 0x410A104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_66 0x410A108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_67 0x410A10C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_68 0x410A110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_69 0x410A114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_70 0x410A118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_71 0x410A11C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_72 0x410A120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_73 0x410A124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_74 0x410A128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_75 0x410A12C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_76 0x410A130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_77 0x410A134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_78 0x410A138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_79 0x410A13C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_80 0x410A140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_81 0x410A144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_82 0x410A148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_83 0x410A14C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_84 0x410A150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_85 0x410A154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_86 0x410A158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_87 0x410A15C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_88 0x410A160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_89 0x410A164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_90 0x410A168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_91 0x410A16C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_92 0x410A170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_93 0x410A174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_94 0x410A178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_95 0x410A17C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_96 0x410A180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_97 0x410A184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_98 0x410A188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_99 0x410A18C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_100 0x410A190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_101 0x410A194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_102 0x410A198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_103 0x410A19C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_104 0x410A1A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_105 0x410A1A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_106 0x410A1A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_107 0x410A1AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_108 0x410A1B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_109 0x410A1B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_110 0x410A1B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_111 0x410A1BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_112 0x410A1C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_113 0x410A1C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_114 0x410A1C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_115 0x410A1CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_116 0x410A1D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_117 0x410A1D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_118 0x410A1D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_119 0x410A1DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_120 0x410A1E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_121 0x410A1E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_122 0x410A1E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_123 0x410A1EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_124 0x410A1F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_125 0x410A1F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_126 0x410A1F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_127 0x410A1FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_128 0x410A200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_129 0x410A204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_130 0x410A208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_131 0x410A20C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_132 0x410A210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_133 0x410A214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_134 0x410A218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_135 0x410A21C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_136 0x410A220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_137 0x410A224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_138 0x410A228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_139 0x410A22C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_140 0x410A230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_141 0x410A234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_142 0x410A238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_143 0x410A23C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_144 0x410A240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_145 0x410A244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_146 0x410A248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_147 0x410A24C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_148 0x410A250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_149 0x410A254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_150 0x410A258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_151 0x410A25C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_152 0x410A260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_153 0x410A264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_154 0x410A268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_155 0x410A26C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_156 0x410A270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_157 0x410A274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_158 0x410A278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_159 0x410A27C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_160 0x410A280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_161 0x410A284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_162 0x410A288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_163 0x410A28C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_164 0x410A290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_165 0x410A294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_166 0x410A298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_167 0x410A29C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_168 0x410A2A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_169 0x410A2A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_170 0x410A2A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_171 0x410A2AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_172 0x410A2B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_173 0x410A2B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_174 0x410A2B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_175 0x410A2BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_176 0x410A2C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_177 0x410A2C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_178 0x410A2C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_179 0x410A2CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_180 0x410A2D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_181 0x410A2D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_182 0x410A2D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_183 0x410A2DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_184 0x410A2E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_185 0x410A2E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_186 0x410A2E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_187 0x410A2EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_188 0x410A2F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_189 0x410A2F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_190 0x410A2F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_191 0x410A2FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_192 0x410A300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_193 0x410A304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_194 0x410A308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_195 0x410A30C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_196 0x410A310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_197 0x410A314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_198 0x410A318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_199 0x410A31C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_200 0x410A320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_201 0x410A324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_202 0x410A328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_203 0x410A32C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_204 0x410A330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_205 0x410A334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_206 0x410A338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_207 0x410A33C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_208 0x410A340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_209 0x410A344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_210 0x410A348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_211 0x410A34C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_212 0x410A350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_213 0x410A354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_214 0x410A358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_215 0x410A35C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_216 0x410A360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_217 0x410A364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_218 0x410A368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_219 0x410A36C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_220 0x410A370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_221 0x410A374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_222 0x410A378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_223 0x410A37C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_224 0x410A380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_225 0x410A384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_226 0x410A388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_227 0x410A38C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_228 0x410A390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_229 0x410A394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_230 0x410A398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_231 0x410A39C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_232 0x410A3A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_233 0x410A3A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_234 0x410A3A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_235 0x410A3AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_236 0x410A3B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_237 0x410A3B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_238 0x410A3B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_239 0x410A3BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_240 0x410A3C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_241 0x410A3C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_242 0x410A3C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_243 0x410A3CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_244 0x410A3D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_245 0x410A3D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_246 0x410A3D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_247 0x410A3DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_248 0x410A3E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_249 0x410A3E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_250 0x410A3E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_251 0x410A3EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_252 0x410A3F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_253 0x410A3F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_254 0x410A3F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_255 0x410A3FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_256 0x410A400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_257 0x410A404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_258 0x410A408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_259 0x410A40C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_260 0x410A410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_261 0x410A414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_262 0x410A418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_263 0x410A41C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_264 0x410A420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_265 0x410A424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_266 0x410A428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_267 0x410A42C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_268 0x410A430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_269 0x410A434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_270 0x410A438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_271 0x410A43C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_272 0x410A440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_273 0x410A444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_274 0x410A448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_275 0x410A44C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_276 0x410A450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_277 0x410A454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_278 0x410A458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_279 0x410A45C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_280 0x410A460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_281 0x410A464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_282 0x410A468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_283 0x410A46C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_284 0x410A470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_285 0x410A474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_286 0x410A478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_287 0x410A47C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_288 0x410A480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_289 0x410A484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_290 0x410A488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_291 0x410A48C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_292 0x410A490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_293 0x410A494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_294 0x410A498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_295 0x410A49C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_296 0x410A4A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_297 0x410A4A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_298 0x410A4A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_299 0x410A4AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_300 0x410A4B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_301 0x410A4B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_302 0x410A4B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_303 0x410A4BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_304 0x410A4C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_305 0x410A4C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_306 0x410A4C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_307 0x410A4CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_308 0x410A4D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_309 0x410A4D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_310 0x410A4D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_311 0x410A4DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_312 0x410A4E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_313 0x410A4E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_314 0x410A4E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_315 0x410A4EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_316 0x410A4F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_317 0x410A4F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_318 0x410A4F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_319 0x410A4FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_320 0x410A500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_321 0x410A504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_322 0x410A508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_323 0x410A50C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_324 0x410A510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_325 0x410A514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_326 0x410A518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_327 0x410A51C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_328 0x410A520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_329 0x410A524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_330 0x410A528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_331 0x410A52C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_332 0x410A530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_333 0x410A534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_334 0x410A538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_335 0x410A53C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_336 0x410A540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_337 0x410A544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_338 0x410A548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_339 0x410A54C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_340 0x410A550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_341 0x410A554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_342 0x410A558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_343 0x410A55C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_344 0x410A560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_345 0x410A564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_346 0x410A568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_347 0x410A56C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_348 0x410A570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_349 0x410A574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_350 0x410A578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_351 0x410A57C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_352 0x410A580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_353 0x410A584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_354 0x410A588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_355 0x410A58C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_356 0x410A590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_357 0x410A594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_358 0x410A598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_359 0x410A59C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_360 0x410A5A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_361 0x410A5A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_362 0x410A5A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_363 0x410A5AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_364 0x410A5B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_365 0x410A5B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_366 0x410A5B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_367 0x410A5BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_368 0x410A5C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_369 0x410A5C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_370 0x410A5C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_371 0x410A5CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_372 0x410A5D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_373 0x410A5D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_374 0x410A5D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_375 0x410A5DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_376 0x410A5E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_377 0x410A5E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_378 0x410A5E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_379 0x410A5EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_380 0x410A5F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_381 0x410A5F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_382 0x410A5F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_383 0x410A5FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_384 0x410A600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_385 0x410A604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_386 0x410A608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_387 0x410A60C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_388 0x410A610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_389 0x410A614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_390 0x410A618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_391 0x410A61C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_392 0x410A620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_393 0x410A624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_394 0x410A628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_395 0x410A62C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_396 0x410A630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_397 0x410A634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_398 0x410A638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_399 0x410A63C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_400 0x410A640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_401 0x410A644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_402 0x410A648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_403 0x410A64C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_404 0x410A650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_405 0x410A654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_406 0x410A658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_407 0x410A65C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_408 0x410A660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_409 0x410A664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_410 0x410A668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_411 0x410A66C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_412 0x410A670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_413 0x410A674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_414 0x410A678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_415 0x410A67C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_416 0x410A680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_417 0x410A684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_418 0x410A688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_419 0x410A68C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_420 0x410A690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_421 0x410A694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_422 0x410A698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_423 0x410A69C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_424 0x410A6A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_425 0x410A6A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_426 0x410A6A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_427 0x410A6AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_428 0x410A6B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_429 0x410A6B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_430 0x410A6B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_431 0x410A6BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_432 0x410A6C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_433 0x410A6C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_434 0x410A6C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_435 0x410A6CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_436 0x410A6D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_437 0x410A6D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_438 0x410A6D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_439 0x410A6DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_440 0x410A6E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_441 0x410A6E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_442 0x410A6E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_443 0x410A6EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_444 0x410A6F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_445 0x410A6F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_446 0x410A6F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_447 0x410A6FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_448 0x410A700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_449 0x410A704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_450 0x410A708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_451 0x410A70C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_452 0x410A710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_453 0x410A714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_454 0x410A718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_455 0x410A71C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_456 0x410A720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_457 0x410A724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_458 0x410A728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_459 0x410A72C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_460 0x410A730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_461 0x410A734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_462 0x410A738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_463 0x410A73C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_464 0x410A740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_465 0x410A744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_466 0x410A748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_467 0x410A74C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_468 0x410A750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_469 0x410A754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_470 0x410A758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_471 0x410A75C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_472 0x410A760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_473 0x410A764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_474 0x410A768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_475 0x410A76C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_476 0x410A770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_477 0x410A774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_478 0x410A778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_479 0x410A77C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_480 0x410A780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_481 0x410A784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_482 0x410A788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_483 0x410A78C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_484 0x410A790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_485 0x410A794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_486 0x410A798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_487 0x410A79C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_488 0x410A7A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_489 0x410A7A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_490 0x410A7A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_491 0x410A7AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_492 0x410A7B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_493 0x410A7B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_494 0x410A7B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_495 0x410A7BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_496 0x410A7C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_497 0x410A7C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_498 0x410A7C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_499 0x410A7CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_500 0x410A7D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_501 0x410A7D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_502 0x410A7D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_503 0x410A7DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_504 0x410A7E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_505 0x410A7E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_506 0x410A7E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_507 0x410A7EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_508 0x410A7F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_509 0x410A7F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_510 0x410A7F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_511 0x410A7FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_512 0x410A800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_513 0x410A804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_514 0x410A808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_515 0x410A80C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_516 0x410A810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_517 0x410A814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_518 0x410A818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_519 0x410A81C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_520 0x410A820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_521 0x410A824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_522 0x410A828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_523 0x410A82C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_524 0x410A830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_525 0x410A834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_526 0x410A838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_527 0x410A83C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_528 0x410A840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_529 0x410A844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_530 0x410A848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_531 0x410A84C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_532 0x410A850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_533 0x410A854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_534 0x410A858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_535 0x410A85C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_536 0x410A860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_537 0x410A864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_538 0x410A868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_539 0x410A86C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_540 0x410A870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_541 0x410A874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_542 0x410A878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_543 0x410A87C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_544 0x410A880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_545 0x410A884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_546 0x410A888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_547 0x410A88C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_548 0x410A890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_549 0x410A894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_550 0x410A898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_551 0x410A89C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_552 0x410A8A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_553 0x410A8A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_554 0x410A8A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_555 0x410A8AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_556 0x410A8B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_557 0x410A8B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_558 0x410A8B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_559 0x410A8BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_560 0x410A8C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_561 0x410A8C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_562 0x410A8C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_563 0x410A8CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_564 0x410A8D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_565 0x410A8D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_566 0x410A8D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_567 0x410A8DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_568 0x410A8E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_569 0x410A8E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_570 0x410A8E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_571 0x410A8EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_572 0x410A8F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_573 0x410A8F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_574 0x410A8F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_575 0x410A8FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_576 0x410A900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_577 0x410A904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_578 0x410A908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_579 0x410A90C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_580 0x410A910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_581 0x410A914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_582 0x410A918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_583 0x410A91C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_584 0x410A920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_585 0x410A924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_586 0x410A928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_587 0x410A92C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_588 0x410A930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_589 0x410A934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_590 0x410A938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_591 0x410A93C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_592 0x410A940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_593 0x410A944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_594 0x410A948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_595 0x410A94C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_596 0x410A950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_597 0x410A954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_598 0x410A958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_599 0x410A95C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_600 0x410A960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_601 0x410A964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_602 0x410A968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_603 0x410A96C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_604 0x410A970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_605 0x410A974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_606 0x410A978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_607 0x410A97C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_608 0x410A980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_609 0x410A984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_610 0x410A988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_611 0x410A98C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_612 0x410A990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_613 0x410A994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_614 0x410A998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_615 0x410A99C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_616 0x410A9A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_617 0x410A9A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_618 0x410A9A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_619 0x410A9AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_620 0x410A9B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_621 0x410A9B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_622 0x410A9B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_623 0x410A9BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_624 0x410A9C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_625 0x410A9C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_626 0x410A9C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_627 0x410A9CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_628 0x410A9D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_629 0x410A9D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_630 0x410A9D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_631 0x410A9DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_632 0x410A9E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_633 0x410A9E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_634 0x410A9E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_635 0x410A9EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_636 0x410A9F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_637 0x410A9F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_638 0x410A9F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_639 0x410A9FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_640 0x410AA00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_641 0x410AA04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_642 0x410AA08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_643 0x410AA0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_644 0x410AA10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_645 0x410AA14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_646 0x410AA18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_647 0x410AA1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_648 0x410AA20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_649 0x410AA24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_650 0x410AA28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_651 0x410AA2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_652 0x410AA30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_653 0x410AA34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_654 0x410AA38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_655 0x410AA3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_656 0x410AA40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_657 0x410AA44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_658 0x410AA48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_659 0x410AA4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_660 0x410AA50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_661 0x410AA54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_662 0x410AA58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_663 0x410AA5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_664 0x410AA60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_665 0x410AA64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_666 0x410AA68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_667 0x410AA6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_668 0x410AA70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_669 0x410AA74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_670 0x410AA78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_671 0x410AA7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_672 0x410AA80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_673 0x410AA84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_674 0x410AA88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_675 0x410AA8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_676 0x410AA90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_677 0x410AA94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_678 0x410AA98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_679 0x410AA9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_680 0x410AAA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_681 0x410AAA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_682 0x410AAA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_683 0x410AAAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_684 0x410AAB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_685 0x410AAB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_686 0x410AAB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_687 0x410AABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_688 0x410AAC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_689 0x410AAC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_690 0x410AAC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_691 0x410AACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_692 0x410AAD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_693 0x410AAD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_694 0x410AAD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_695 0x410AADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_696 0x410AAE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_697 0x410AAE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_698 0x410AAE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_699 0x410AAEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_700 0x410AAF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_701 0x410AAF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_702 0x410AAF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_703 0x410AAFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_704 0x410AB00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_705 0x410AB04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_706 0x410AB08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_707 0x410AB0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_708 0x410AB10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_709 0x410AB14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_710 0x410AB18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_711 0x410AB1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_712 0x410AB20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_713 0x410AB24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_714 0x410AB28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_715 0x410AB2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_716 0x410AB30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_717 0x410AB34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_718 0x410AB38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_719 0x410AB3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_720 0x410AB40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_721 0x410AB44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_722 0x410AB48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_723 0x410AB4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_724 0x410AB50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_725 0x410AB54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_726 0x410AB58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_727 0x410AB5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_728 0x410AB60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_729 0x410AB64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_730 0x410AB68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_731 0x410AB6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_732 0x410AB70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_733 0x410AB74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_734 0x410AB78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_735 0x410AB7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_736 0x410AB80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_737 0x410AB84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_738 0x410AB88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_739 0x410AB8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_740 0x410AB90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_741 0x410AB94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_742 0x410AB98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_743 0x410AB9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_744 0x410ABA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_745 0x410ABA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_746 0x410ABA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_747 0x410ABAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_748 0x410ABB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_749 0x410ABB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_750 0x410ABB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_751 0x410ABBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_752 0x410ABC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_753 0x410ABC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_754 0x410ABC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_755 0x410ABCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_756 0x410ABD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_757 0x410ABD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_758 0x410ABD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_759 0x410ABDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_760 0x410ABE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_761 0x410ABE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_762 0x410ABE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_763 0x410ABEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_764 0x410ABF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_765 0x410ABF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_766 0x410ABF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_767 0x410ABFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_768 0x410AC00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_769 0x410AC04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_770 0x410AC08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_771 0x410AC0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_772 0x410AC10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_773 0x410AC14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_774 0x410AC18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_775 0x410AC1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_776 0x410AC20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_777 0x410AC24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_778 0x410AC28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_779 0x410AC2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_780 0x410AC30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_781 0x410AC34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_782 0x410AC38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_783 0x410AC3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_784 0x410AC40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_785 0x410AC44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_786 0x410AC48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_787 0x410AC4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_788 0x410AC50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_789 0x410AC54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_790 0x410AC58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_791 0x410AC5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_792 0x410AC60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_793 0x410AC64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_794 0x410AC68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_795 0x410AC6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_796 0x410AC70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_797 0x410AC74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_798 0x410AC78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_799 0x410AC7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_800 0x410AC80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_801 0x410AC84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_802 0x410AC88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_803 0x410AC8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_804 0x410AC90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_805 0x410AC94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_806 0x410AC98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_807 0x410AC9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_808 0x410ACA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_809 0x410ACA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_810 0x410ACA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_811 0x410ACAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_812 0x410ACB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_813 0x410ACB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_814 0x410ACB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_815 0x410ACBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_816 0x410ACC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_817 0x410ACC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_818 0x410ACC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_819 0x410ACCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_820 0x410ACD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_821 0x410ACD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_822 0x410ACD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_823 0x410ACDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_824 0x410ACE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_825 0x410ACE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_826 0x410ACE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_827 0x410ACEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_828 0x410ACF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_829 0x410ACF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_830 0x410ACF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_831 0x410ACFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_832 0x410AD00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_833 0x410AD04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_834 0x410AD08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_835 0x410AD0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_836 0x410AD10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_837 0x410AD14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_838 0x410AD18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_839 0x410AD1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_840 0x410AD20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_841 0x410AD24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_842 0x410AD28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_843 0x410AD2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_844 0x410AD30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_845 0x410AD34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_846 0x410AD38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_847 0x410AD3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_848 0x410AD40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_849 0x410AD44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_850 0x410AD48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_851 0x410AD4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_852 0x410AD50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_853 0x410AD54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_854 0x410AD58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_855 0x410AD5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_856 0x410AD60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_857 0x410AD64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_858 0x410AD68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_859 0x410AD6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_860 0x410AD70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_861 0x410AD74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_862 0x410AD78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_863 0x410AD7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_864 0x410AD80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_865 0x410AD84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_866 0x410AD88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_867 0x410AD8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_868 0x410AD90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_869 0x410AD94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_870 0x410AD98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_871 0x410AD9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_872 0x410ADA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_873 0x410ADA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_874 0x410ADA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_875 0x410ADAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_876 0x410ADB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_877 0x410ADB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_878 0x410ADB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_879 0x410ADBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_880 0x410ADC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_881 0x410ADC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_882 0x410ADC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_883 0x410ADCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_884 0x410ADD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_885 0x410ADD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_886 0x410ADD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_887 0x410ADDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_888 0x410ADE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_889 0x410ADE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_890 0x410ADE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_891 0x410ADEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_892 0x410ADF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_893 0x410ADF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_894 0x410ADF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_895 0x410ADFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_896 0x410AE00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_897 0x410AE04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_898 0x410AE08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_899 0x410AE0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_900 0x410AE10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_901 0x410AE14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_902 0x410AE18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_903 0x410AE1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_904 0x410AE20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_905 0x410AE24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_906 0x410AE28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_907 0x410AE2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_908 0x410AE30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_909 0x410AE34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_910 0x410AE38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_911 0x410AE3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_912 0x410AE40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_913 0x410AE44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_914 0x410AE48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_915 0x410AE4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_916 0x410AE50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_917 0x410AE54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_918 0x410AE58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_919 0x410AE5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_920 0x410AE60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_921 0x410AE64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_922 0x410AE68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_923 0x410AE6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_924 0x410AE70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_925 0x410AE74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_926 0x410AE78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_927 0x410AE7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_928 0x410AE80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_929 0x410AE84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_930 0x410AE88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_931 0x410AE8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_932 0x410AE90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_933 0x410AE94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_934 0x410AE98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_935 0x410AE9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_936 0x410AEA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_937 0x410AEA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_938 0x410AEA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_939 0x410AEAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_940 0x410AEB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_941 0x410AEB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_942 0x410AEB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_943 0x410AEBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_944 0x410AEC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_945 0x410AEC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_946 0x410AEC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_947 0x410AECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_948 0x410AED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_949 0x410AED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_950 0x410AED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_951 0x410AEDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_952 0x410AEE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_953 0x410AEE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_954 0x410AEE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_955 0x410AEEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_956 0x410AEF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_957 0x410AEF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_958 0x410AEF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_959 0x410AEFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_960 0x410AF00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_961 0x410AF04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_962 0x410AF08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_963 0x410AF0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_964 0x410AF10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_965 0x410AF14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_966 0x410AF18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_967 0x410AF1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_968 0x410AF20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_969 0x410AF24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_970 0x410AF28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_971 0x410AF2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_972 0x410AF30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_973 0x410AF34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_974 0x410AF38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_975 0x410AF3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_976 0x410AF40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_977 0x410AF44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_978 0x410AF48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_979 0x410AF4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_980 0x410AF50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_981 0x410AF54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_982 0x410AF58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_983 0x410AF5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_984 0x410AF60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_985 0x410AF64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_986 0x410AF68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_987 0x410AF6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_988 0x410AF70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_989 0x410AF74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_990 0x410AF78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_991 0x410AF7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_992 0x410AF80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_993 0x410AF84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_994 0x410AF88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_995 0x410AF8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_996 0x410AF90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_997 0x410AF94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_998 0x410AF98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_999 0x410AF9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1000 0x410AFA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1001 0x410AFA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1002 0x410AFA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1003 0x410AFAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1004 0x410AFB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1005 0x410AFB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1006 0x410AFB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1007 0x410AFBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1008 0x410AFC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1009 0x410AFC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1010 0x410AFC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1011 0x410AFCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1012 0x410AFD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1013 0x410AFD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1014 0x410AFD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1015 0x410AFDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1016 0x410AFE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1017 0x410AFE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1018 0x410AFE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1019 0x410AFEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1020 0x410AFF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1021 0x410AFF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1022 0x410AFF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1023 0x410AFFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1024 0x410B000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1025 0x410B004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1026 0x410B008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1027 0x410B00C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1028 0x410B010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1029 0x410B014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1030 0x410B018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1031 0x410B01C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1032 0x410B020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1033 0x410B024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1034 0x410B028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1035 0x410B02C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1036 0x410B030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1037 0x410B034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1038 0x410B038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1039 0x410B03C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1040 0x410B040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1041 0x410B044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1042 0x410B048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1043 0x410B04C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1044 0x410B050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1045 0x410B054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1046 0x410B058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1047 0x410B05C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1048 0x410B060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1049 0x410B064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1050 0x410B068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1051 0x410B06C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1052 0x410B070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1053 0x410B074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1054 0x410B078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1055 0x410B07C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1056 0x410B080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1057 0x410B084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1058 0x410B088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1059 0x410B08C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1060 0x410B090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1061 0x410B094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1062 0x410B098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1063 0x410B09C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1064 0x410B0A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1065 0x410B0A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1066 0x410B0A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1067 0x410B0AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1068 0x410B0B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1069 0x410B0B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1070 0x410B0B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1071 0x410B0BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1072 0x410B0C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1073 0x410B0C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1074 0x410B0C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1075 0x410B0CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1076 0x410B0D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1077 0x410B0D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1078 0x410B0D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1079 0x410B0DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1080 0x410B0E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1081 0x410B0E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1082 0x410B0E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1083 0x410B0EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1084 0x410B0F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1085 0x410B0F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1086 0x410B0F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1087 0x410B0FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1088 0x410B100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1089 0x410B104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1090 0x410B108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1091 0x410B10C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1092 0x410B110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1093 0x410B114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1094 0x410B118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1095 0x410B11C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1096 0x410B120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1097 0x410B124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1098 0x410B128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1099 0x410B12C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1100 0x410B130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1101 0x410B134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1102 0x410B138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1103 0x410B13C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1104 0x410B140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1105 0x410B144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1106 0x410B148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1107 0x410B14C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1108 0x410B150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1109 0x410B154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1110 0x410B158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1111 0x410B15C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1112 0x410B160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1113 0x410B164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1114 0x410B168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1115 0x410B16C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1116 0x410B170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1117 0x410B174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1118 0x410B178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1119 0x410B17C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1120 0x410B180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1121 0x410B184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1122 0x410B188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1123 0x410B18C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1124 0x410B190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1125 0x410B194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1126 0x410B198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1127 0x410B19C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1128 0x410B1A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1129 0x410B1A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1130 0x410B1A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1131 0x410B1AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1132 0x410B1B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1133 0x410B1B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1134 0x410B1B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1135 0x410B1BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1136 0x410B1C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1137 0x410B1C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1138 0x410B1C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1139 0x410B1CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1140 0x410B1D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1141 0x410B1D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1142 0x410B1D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1143 0x410B1DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1144 0x410B1E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1145 0x410B1E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1146 0x410B1E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1147 0x410B1EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1148 0x410B1F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1149 0x410B1F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1150 0x410B1F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1151 0x410B1FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1152 0x410B200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1153 0x410B204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1154 0x410B208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1155 0x410B20C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1156 0x410B210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1157 0x410B214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1158 0x410B218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1159 0x410B21C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1160 0x410B220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1161 0x410B224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1162 0x410B228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1163 0x410B22C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1164 0x410B230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1165 0x410B234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1166 0x410B238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1167 0x410B23C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1168 0x410B240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1169 0x410B244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1170 0x410B248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1171 0x410B24C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1172 0x410B250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1173 0x410B254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1174 0x410B258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1175 0x410B25C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1176 0x410B260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1177 0x410B264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1178 0x410B268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1179 0x410B26C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1180 0x410B270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1181 0x410B274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1182 0x410B278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1183 0x410B27C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1184 0x410B280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1185 0x410B284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1186 0x410B288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1187 0x410B28C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1188 0x410B290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1189 0x410B294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1190 0x410B298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1191 0x410B29C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1192 0x410B2A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1193 0x410B2A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1194 0x410B2A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1195 0x410B2AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1196 0x410B2B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1197 0x410B2B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1198 0x410B2B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1199 0x410B2BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1200 0x410B2C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1201 0x410B2C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1202 0x410B2C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1203 0x410B2CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1204 0x410B2D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1205 0x410B2D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1206 0x410B2D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1207 0x410B2DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1208 0x410B2E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1209 0x410B2E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1210 0x410B2E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1211 0x410B2EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1212 0x410B2F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1213 0x410B2F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1214 0x410B2F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1215 0x410B2FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1216 0x410B300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1217 0x410B304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1218 0x410B308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1219 0x410B30C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1220 0x410B310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1221 0x410B314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1222 0x410B318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1223 0x410B31C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1224 0x410B320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1225 0x410B324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1226 0x410B328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1227 0x410B32C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1228 0x410B330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1229 0x410B334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1230 0x410B338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1231 0x410B33C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1232 0x410B340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1233 0x410B344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1234 0x410B348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1235 0x410B34C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1236 0x410B350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1237 0x410B354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1238 0x410B358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1239 0x410B35C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1240 0x410B360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1241 0x410B364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1242 0x410B368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1243 0x410B36C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1244 0x410B370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1245 0x410B374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1246 0x410B378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1247 0x410B37C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1248 0x410B380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1249 0x410B384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1250 0x410B388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1251 0x410B38C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1252 0x410B390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1253 0x410B394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1254 0x410B398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1255 0x410B39C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1256 0x410B3A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1257 0x410B3A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1258 0x410B3A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1259 0x410B3AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1260 0x410B3B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1261 0x410B3B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1262 0x410B3B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1263 0x410B3BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1264 0x410B3C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1265 0x410B3C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1266 0x410B3C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1267 0x410B3CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1268 0x410B3D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1269 0x410B3D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1270 0x410B3D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1271 0x410B3DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1272 0x410B3E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1273 0x410B3E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1274 0x410B3E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1275 0x410B3EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1276 0x410B3F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1277 0x410B3F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1278 0x410B3F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1279 0x410B3FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1280 0x410B400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1281 0x410B404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1282 0x410B408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1283 0x410B40C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1284 0x410B410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1285 0x410B414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1286 0x410B418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1287 0x410B41C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1288 0x410B420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1289 0x410B424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1290 0x410B428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1291 0x410B42C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1292 0x410B430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1293 0x410B434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1294 0x410B438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1295 0x410B43C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1296 0x410B440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1297 0x410B444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1298 0x410B448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1299 0x410B44C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1300 0x410B450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1301 0x410B454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1302 0x410B458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1303 0x410B45C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1304 0x410B460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1305 0x410B464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1306 0x410B468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1307 0x410B46C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1308 0x410B470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1309 0x410B474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1310 0x410B478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1311 0x410B47C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1312 0x410B480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1313 0x410B484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1314 0x410B488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1315 0x410B48C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1316 0x410B490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1317 0x410B494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1318 0x410B498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1319 0x410B49C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1320 0x410B4A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1321 0x410B4A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1322 0x410B4A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1323 0x410B4AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1324 0x410B4B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1325 0x410B4B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1326 0x410B4B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1327 0x410B4BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1328 0x410B4C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1329 0x410B4C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1330 0x410B4C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1331 0x410B4CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1332 0x410B4D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1333 0x410B4D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1334 0x410B4D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1335 0x410B4DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1336 0x410B4E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1337 0x410B4E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1338 0x410B4E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1339 0x410B4EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1340 0x410B4F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1341 0x410B4F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1342 0x410B4F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1343 0x410B4FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1344 0x410B500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1345 0x410B504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1346 0x410B508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1347 0x410B50C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1348 0x410B510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1349 0x410B514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1350 0x410B518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1351 0x410B51C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1352 0x410B520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1353 0x410B524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1354 0x410B528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1355 0x410B52C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1356 0x410B530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1357 0x410B534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1358 0x410B538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1359 0x410B53C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1360 0x410B540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1361 0x410B544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1362 0x410B548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1363 0x410B54C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1364 0x410B550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1365 0x410B554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1366 0x410B558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1367 0x410B55C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1368 0x410B560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1369 0x410B564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1370 0x410B568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1371 0x410B56C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1372 0x410B570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1373 0x410B574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1374 0x410B578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1375 0x410B57C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1376 0x410B580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1377 0x410B584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1378 0x410B588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1379 0x410B58C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1380 0x410B590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1381 0x410B594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1382 0x410B598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1383 0x410B59C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1384 0x410B5A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1385 0x410B5A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1386 0x410B5A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1387 0x410B5AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1388 0x410B5B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1389 0x410B5B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1390 0x410B5B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1391 0x410B5BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1392 0x410B5C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1393 0x410B5C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1394 0x410B5C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1395 0x410B5CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1396 0x410B5D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1397 0x410B5D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1398 0x410B5D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1399 0x410B5DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1400 0x410B5E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1401 0x410B5E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1402 0x410B5E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1403 0x410B5EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1404 0x410B5F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1405 0x410B5F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1406 0x410B5F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1407 0x410B5FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1408 0x410B600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1409 0x410B604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1410 0x410B608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1411 0x410B60C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1412 0x410B610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1413 0x410B614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1414 0x410B618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1415 0x410B61C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1416 0x410B620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1417 0x410B624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1418 0x410B628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1419 0x410B62C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1420 0x410B630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1421 0x410B634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1422 0x410B638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1423 0x410B63C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1424 0x410B640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1425 0x410B644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1426 0x410B648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1427 0x410B64C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1428 0x410B650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1429 0x410B654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1430 0x410B658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1431 0x410B65C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1432 0x410B660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1433 0x410B664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1434 0x410B668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1435 0x410B66C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1436 0x410B670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1437 0x410B674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1438 0x410B678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1439 0x410B67C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1440 0x410B680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1441 0x410B684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1442 0x410B688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1443 0x410B68C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1444 0x410B690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1445 0x410B694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1446 0x410B698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1447 0x410B69C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1448 0x410B6A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1449 0x410B6A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1450 0x410B6A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1451 0x410B6AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1452 0x410B6B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1453 0x410B6B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1454 0x410B6B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1455 0x410B6BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1456 0x410B6C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1457 0x410B6C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1458 0x410B6C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1459 0x410B6CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1460 0x410B6D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1461 0x410B6D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1462 0x410B6D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1463 0x410B6DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1464 0x410B6E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1465 0x410B6E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1466 0x410B6E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1467 0x410B6EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1468 0x410B6F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1469 0x410B6F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1470 0x410B6F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1471 0x410B6FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1472 0x410B700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1473 0x410B704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1474 0x410B708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1475 0x410B70C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1476 0x410B710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1477 0x410B714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1478 0x410B718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1479 0x410B71C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1480 0x410B720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1481 0x410B724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1482 0x410B728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1483 0x410B72C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1484 0x410B730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1485 0x410B734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1486 0x410B738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1487 0x410B73C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1488 0x410B740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1489 0x410B744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1490 0x410B748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1491 0x410B74C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1492 0x410B750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1493 0x410B754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1494 0x410B758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1495 0x410B75C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1496 0x410B760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1497 0x410B764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1498 0x410B768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1499 0x410B76C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1500 0x410B770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1501 0x410B774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1502 0x410B778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1503 0x410B77C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1504 0x410B780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1505 0x410B784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1506 0x410B788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1507 0x410B78C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1508 0x410B790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1509 0x410B794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1510 0x410B798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1511 0x410B79C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1512 0x410B7A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1513 0x410B7A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1514 0x410B7A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1515 0x410B7AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1516 0x410B7B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1517 0x410B7B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1518 0x410B7B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1519 0x410B7BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1520 0x410B7C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1521 0x410B7C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1522 0x410B7C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1523 0x410B7CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1524 0x410B7D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1525 0x410B7D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1526 0x410B7D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1527 0x410B7DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1528 0x410B7E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1529 0x410B7E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1530 0x410B7E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1531 0x410B7EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1532 0x410B7F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1533 0x410B7F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1534 0x410B7F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1535 0x410B7FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1536 0x410B800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1537 0x410B804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1538 0x410B808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1539 0x410B80C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1540 0x410B810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1541 0x410B814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1542 0x410B818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1543 0x410B81C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1544 0x410B820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1545 0x410B824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1546 0x410B828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1547 0x410B82C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1548 0x410B830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1549 0x410B834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1550 0x410B838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1551 0x410B83C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1552 0x410B840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1553 0x410B844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1554 0x410B848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1555 0x410B84C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1556 0x410B850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1557 0x410B854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1558 0x410B858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1559 0x410B85C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1560 0x410B860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1561 0x410B864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1562 0x410B868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1563 0x410B86C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1564 0x410B870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1565 0x410B874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1566 0x410B878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1567 0x410B87C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1568 0x410B880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1569 0x410B884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1570 0x410B888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1571 0x410B88C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1572 0x410B890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1573 0x410B894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1574 0x410B898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1575 0x410B89C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1576 0x410B8A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1577 0x410B8A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1578 0x410B8A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1579 0x410B8AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1580 0x410B8B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1581 0x410B8B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1582 0x410B8B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1583 0x410B8BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1584 0x410B8C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1585 0x410B8C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1586 0x410B8C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1587 0x410B8CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1588 0x410B8D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1589 0x410B8D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1590 0x410B8D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1591 0x410B8DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1592 0x410B8E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1593 0x410B8E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1594 0x410B8E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1595 0x410B8EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1596 0x410B8F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1597 0x410B8F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1598 0x410B8F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1599 0x410B8FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1600 0x410B900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1601 0x410B904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1602 0x410B908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1603 0x410B90C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1604 0x410B910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1605 0x410B914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1606 0x410B918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1607 0x410B91C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1608 0x410B920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1609 0x410B924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1610 0x410B928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1611 0x410B92C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1612 0x410B930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1613 0x410B934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1614 0x410B938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1615 0x410B93C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1616 0x410B940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1617 0x410B944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1618 0x410B948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1619 0x410B94C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1620 0x410B950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1621 0x410B954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1622 0x410B958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1623 0x410B95C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1624 0x410B960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1625 0x410B964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1626 0x410B968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1627 0x410B96C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1628 0x410B970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1629 0x410B974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1630 0x410B978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1631 0x410B97C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1632 0x410B980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1633 0x410B984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1634 0x410B988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1635 0x410B98C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1636 0x410B990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1637 0x410B994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1638 0x410B998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1639 0x410B99C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1640 0x410B9A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1641 0x410B9A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1642 0x410B9A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1643 0x410B9AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1644 0x410B9B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1645 0x410B9B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1646 0x410B9B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1647 0x410B9BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1648 0x410B9C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1649 0x410B9C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1650 0x410B9C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1651 0x410B9CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1652 0x410B9D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1653 0x410B9D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1654 0x410B9D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1655 0x410B9DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1656 0x410B9E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1657 0x410B9E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1658 0x410B9E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1659 0x410B9EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1660 0x410B9F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1661 0x410B9F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1662 0x410B9F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1663 0x410B9FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1664 0x410BA00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1665 0x410BA04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1666 0x410BA08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1667 0x410BA0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1668 0x410BA10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1669 0x410BA14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1670 0x410BA18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1671 0x410BA1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1672 0x410BA20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1673 0x410BA24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1674 0x410BA28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1675 0x410BA2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1676 0x410BA30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1677 0x410BA34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1678 0x410BA38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1679 0x410BA3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1680 0x410BA40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1681 0x410BA44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1682 0x410BA48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1683 0x410BA4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1684 0x410BA50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1685 0x410BA54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1686 0x410BA58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1687 0x410BA5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1688 0x410BA60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1689 0x410BA64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1690 0x410BA68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1691 0x410BA6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1692 0x410BA70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1693 0x410BA74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1694 0x410BA78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1695 0x410BA7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1696 0x410BA80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1697 0x410BA84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1698 0x410BA88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1699 0x410BA8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1700 0x410BA90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1701 0x410BA94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1702 0x410BA98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1703 0x410BA9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1704 0x410BAA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1705 0x410BAA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1706 0x410BAA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1707 0x410BAAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1708 0x410BAB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1709 0x410BAB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1710 0x410BAB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1711 0x410BABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1712 0x410BAC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1713 0x410BAC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1714 0x410BAC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1715 0x410BACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1716 0x410BAD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1717 0x410BAD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1718 0x410BAD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1719 0x410BADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1720 0x410BAE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1721 0x410BAE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1722 0x410BAE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1723 0x410BAEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1724 0x410BAF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1725 0x410BAF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1726 0x410BAF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1727 0x410BAFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1728 0x410BB00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1729 0x410BB04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1730 0x410BB08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1731 0x410BB0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1732 0x410BB10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1733 0x410BB14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1734 0x410BB18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1735 0x410BB1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1736 0x410BB20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1737 0x410BB24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1738 0x410BB28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1739 0x410BB2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1740 0x410BB30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1741 0x410BB34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1742 0x410BB38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1743 0x410BB3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1744 0x410BB40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1745 0x410BB44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1746 0x410BB48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1747 0x410BB4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1748 0x410BB50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1749 0x410BB54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1750 0x410BB58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1751 0x410BB5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1752 0x410BB60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1753 0x410BB64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1754 0x410BB68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1755 0x410BB6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1756 0x410BB70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1757 0x410BB74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1758 0x410BB78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1759 0x410BB7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1760 0x410BB80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1761 0x410BB84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1762 0x410BB88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1763 0x410BB8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1764 0x410BB90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1765 0x410BB94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1766 0x410BB98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1767 0x410BB9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1768 0x410BBA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1769 0x410BBA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1770 0x410BBA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1771 0x410BBAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1772 0x410BBB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1773 0x410BBB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1774 0x410BBB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1775 0x410BBBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1776 0x410BBC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1777 0x410BBC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1778 0x410BBC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1779 0x410BBCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1780 0x410BBD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1781 0x410BBD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1782 0x410BBD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1783 0x410BBDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1784 0x410BBE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1785 0x410BBE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1786 0x410BBE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1787 0x410BBEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1788 0x410BBF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1789 0x410BBF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1790 0x410BBF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1791 0x410BBFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1792 0x410BC00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1793 0x410BC04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1794 0x410BC08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1795 0x410BC0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1796 0x410BC10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1797 0x410BC14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1798 0x410BC18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1799 0x410BC1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1800 0x410BC20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1801 0x410BC24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1802 0x410BC28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1803 0x410BC2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1804 0x410BC30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1805 0x410BC34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1806 0x410BC38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1807 0x410BC3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1808 0x410BC40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1809 0x410BC44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1810 0x410BC48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1811 0x410BC4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1812 0x410BC50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1813 0x410BC54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1814 0x410BC58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1815 0x410BC5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1816 0x410BC60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1817 0x410BC64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1818 0x410BC68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1819 0x410BC6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1820 0x410BC70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1821 0x410BC74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1822 0x410BC78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1823 0x410BC7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1824 0x410BC80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1825 0x410BC84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1826 0x410BC88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1827 0x410BC8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1828 0x410BC90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1829 0x410BC94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1830 0x410BC98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1831 0x410BC9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1832 0x410BCA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1833 0x410BCA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1834 0x410BCA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1835 0x410BCAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1836 0x410BCB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1837 0x410BCB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1838 0x410BCB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1839 0x410BCBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1840 0x410BCC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1841 0x410BCC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1842 0x410BCC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1843 0x410BCCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1844 0x410BCD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1845 0x410BCD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1846 0x410BCD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1847 0x410BCDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1848 0x410BCE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1849 0x410BCE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1850 0x410BCE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1851 0x410BCEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1852 0x410BCF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1853 0x410BCF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1854 0x410BCF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1855 0x410BCFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1856 0x410BD00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1857 0x410BD04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1858 0x410BD08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1859 0x410BD0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1860 0x410BD10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1861 0x410BD14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1862 0x410BD18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1863 0x410BD1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1864 0x410BD20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1865 0x410BD24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1866 0x410BD28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1867 0x410BD2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1868 0x410BD30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1869 0x410BD34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1870 0x410BD38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1871 0x410BD3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1872 0x410BD40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1873 0x410BD44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1874 0x410BD48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1875 0x410BD4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1876 0x410BD50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1877 0x410BD54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1878 0x410BD58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1879 0x410BD5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1880 0x410BD60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1881 0x410BD64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1882 0x410BD68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1883 0x410BD6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1884 0x410BD70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1885 0x410BD74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1886 0x410BD78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1887 0x410BD7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1888 0x410BD80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1889 0x410BD84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1890 0x410BD88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1891 0x410BD8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1892 0x410BD90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1893 0x410BD94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1894 0x410BD98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1895 0x410BD9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1896 0x410BDA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1897 0x410BDA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1898 0x410BDA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1899 0x410BDAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1900 0x410BDB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1901 0x410BDB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1902 0x410BDB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1903 0x410BDBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1904 0x410BDC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1905 0x410BDC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1906 0x410BDC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1907 0x410BDCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1908 0x410BDD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1909 0x410BDD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1910 0x410BDD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1911 0x410BDDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1912 0x410BDE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1913 0x410BDE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1914 0x410BDE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1915 0x410BDEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1916 0x410BDF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1917 0x410BDF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1918 0x410BDF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1919 0x410BDFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1920 0x410BE00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1921 0x410BE04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1922 0x410BE08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1923 0x410BE0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1924 0x410BE10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1925 0x410BE14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1926 0x410BE18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1927 0x410BE1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1928 0x410BE20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1929 0x410BE24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1930 0x410BE28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1931 0x410BE2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1932 0x410BE30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1933 0x410BE34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1934 0x410BE38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1935 0x410BE3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1936 0x410BE40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1937 0x410BE44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1938 0x410BE48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1939 0x410BE4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1940 0x410BE50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1941 0x410BE54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1942 0x410BE58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1943 0x410BE5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1944 0x410BE60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1945 0x410BE64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1946 0x410BE68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1947 0x410BE6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1948 0x410BE70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1949 0x410BE74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1950 0x410BE78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1951 0x410BE7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1952 0x410BE80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1953 0x410BE84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1954 0x410BE88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1955 0x410BE8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1956 0x410BE90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1957 0x410BE94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1958 0x410BE98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1959 0x410BE9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1960 0x410BEA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1961 0x410BEA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1962 0x410BEA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1963 0x410BEAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1964 0x410BEB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1965 0x410BEB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1966 0x410BEB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1967 0x410BEBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1968 0x410BEC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1969 0x410BEC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1970 0x410BEC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1971 0x410BECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1972 0x410BED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1973 0x410BED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1974 0x410BED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1975 0x410BEDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1976 0x410BEE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1977 0x410BEE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1978 0x410BEE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1979 0x410BEEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1980 0x410BEF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1981 0x410BEF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1982 0x410BEF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1983 0x410BEFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1984 0x410BF00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1985 0x410BF04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1986 0x410BF08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1987 0x410BF0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1988 0x410BF10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1989 0x410BF14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1990 0x410BF18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1991 0x410BF1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1992 0x410BF20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1993 0x410BF24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1994 0x410BF28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1995 0x410BF2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1996 0x410BF30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1997 0x410BF34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1998 0x410BF38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_1999 0x410BF3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2000 0x410BF40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2001 0x410BF44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2002 0x410BF48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2003 0x410BF4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2004 0x410BF50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2005 0x410BF54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2006 0x410BF58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2007 0x410BF5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2008 0x410BF60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2009 0x410BF64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2010 0x410BF68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2011 0x410BF6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2012 0x410BF70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2013 0x410BF74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2014 0x410BF78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2015 0x410BF7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2016 0x410BF80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2017 0x410BF84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2018 0x410BF88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2019 0x410BF8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2020 0x410BF90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2021 0x410BF94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2022 0x410BF98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2023 0x410BF9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2024 0x410BFA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2025 0x410BFA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2026 0x410BFA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2027 0x410BFAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2028 0x410BFB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2029 0x410BFB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2030 0x410BFB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2031 0x410BFBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2032 0x410BFC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2033 0x410BFC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2034 0x410BFC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2035 0x410BFCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2036 0x410BFD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2037 0x410BFD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2038 0x410BFD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2039 0x410BFDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2040 0x410BFE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2041 0x410BFE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2042 0x410BFE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2043 0x410BFEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2044 0x410BFF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2045 0x410BFF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2046 0x410BFF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_2047 0x410BFFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 0x410C000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1 0x410C004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2 0x410C008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_3 0x410C00C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_4 0x410C010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_5 0x410C014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_6 0x410C018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_7 0x410C01C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_8 0x410C020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_9 0x410C024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_10 0x410C028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_11 0x410C02C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_12 0x410C030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_13 0x410C034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_14 0x410C038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_15 0x410C03C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_16 0x410C040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_17 0x410C044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_18 0x410C048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_19 0x410C04C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_20 0x410C050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_21 0x410C054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_22 0x410C058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_23 0x410C05C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_24 0x410C060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_25 0x410C064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_26 0x410C068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_27 0x410C06C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_28 0x410C070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_29 0x410C074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_30 0x410C078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_31 0x410C07C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_32 0x410C080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_33 0x410C084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_34 0x410C088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_35 0x410C08C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_36 0x410C090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_37 0x410C094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_38 0x410C098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_39 0x410C09C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_40 0x410C0A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_41 0x410C0A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_42 0x410C0A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_43 0x410C0AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_44 0x410C0B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_45 0x410C0B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_46 0x410C0B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_47 0x410C0BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_48 0x410C0C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_49 0x410C0C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_50 0x410C0C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_51 0x410C0CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_52 0x410C0D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_53 0x410C0D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_54 0x410C0D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_55 0x410C0DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_56 0x410C0E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_57 0x410C0E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_58 0x410C0E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_59 0x410C0EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_60 0x410C0F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_61 0x410C0F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_62 0x410C0F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_63 0x410C0FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_64 0x410C100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_65 0x410C104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_66 0x410C108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_67 0x410C10C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_68 0x410C110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_69 0x410C114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_70 0x410C118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_71 0x410C11C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_72 0x410C120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_73 0x410C124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_74 0x410C128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_75 0x410C12C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_76 0x410C130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_77 0x410C134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_78 0x410C138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_79 0x410C13C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_80 0x410C140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_81 0x410C144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_82 0x410C148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_83 0x410C14C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_84 0x410C150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_85 0x410C154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_86 0x410C158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_87 0x410C15C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_88 0x410C160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_89 0x410C164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_90 0x410C168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_91 0x410C16C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_92 0x410C170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_93 0x410C174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_94 0x410C178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_95 0x410C17C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_96 0x410C180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_97 0x410C184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_98 0x410C188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_99 0x410C18C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_100 0x410C190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_101 0x410C194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_102 0x410C198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_103 0x410C19C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_104 0x410C1A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_105 0x410C1A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_106 0x410C1A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_107 0x410C1AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_108 0x410C1B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_109 0x410C1B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_110 0x410C1B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_111 0x410C1BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_112 0x410C1C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_113 0x410C1C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_114 0x410C1C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_115 0x410C1CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_116 0x410C1D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_117 0x410C1D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_118 0x410C1D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_119 0x410C1DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_120 0x410C1E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_121 0x410C1E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_122 0x410C1E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_123 0x410C1EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_124 0x410C1F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_125 0x410C1F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_126 0x410C1F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_127 0x410C1FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_128 0x410C200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_129 0x410C204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_130 0x410C208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_131 0x410C20C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_132 0x410C210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_133 0x410C214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_134 0x410C218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_135 0x410C21C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_136 0x410C220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_137 0x410C224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_138 0x410C228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_139 0x410C22C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_140 0x410C230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_141 0x410C234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_142 0x410C238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_143 0x410C23C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_144 0x410C240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_145 0x410C244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_146 0x410C248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_147 0x410C24C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_148 0x410C250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_149 0x410C254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_150 0x410C258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_151 0x410C25C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_152 0x410C260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_153 0x410C264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_154 0x410C268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_155 0x410C26C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_156 0x410C270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_157 0x410C274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_158 0x410C278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_159 0x410C27C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_160 0x410C280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_161 0x410C284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_162 0x410C288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_163 0x410C28C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_164 0x410C290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_165 0x410C294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_166 0x410C298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_167 0x410C29C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_168 0x410C2A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_169 0x410C2A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_170 0x410C2A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_171 0x410C2AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_172 0x410C2B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_173 0x410C2B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_174 0x410C2B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_175 0x410C2BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_176 0x410C2C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_177 0x410C2C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_178 0x410C2C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_179 0x410C2CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_180 0x410C2D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_181 0x410C2D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_182 0x410C2D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_183 0x410C2DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_184 0x410C2E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_185 0x410C2E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_186 0x410C2E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_187 0x410C2EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_188 0x410C2F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_189 0x410C2F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_190 0x410C2F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_191 0x410C2FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_192 0x410C300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_193 0x410C304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_194 0x410C308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_195 0x410C30C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_196 0x410C310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_197 0x410C314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_198 0x410C318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_199 0x410C31C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_200 0x410C320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_201 0x410C324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_202 0x410C328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_203 0x410C32C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_204 0x410C330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_205 0x410C334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_206 0x410C338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_207 0x410C33C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_208 0x410C340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_209 0x410C344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_210 0x410C348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_211 0x410C34C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_212 0x410C350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_213 0x410C354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_214 0x410C358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_215 0x410C35C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_216 0x410C360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_217 0x410C364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_218 0x410C368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_219 0x410C36C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_220 0x410C370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_221 0x410C374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_222 0x410C378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_223 0x410C37C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_224 0x410C380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_225 0x410C384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_226 0x410C388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_227 0x410C38C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_228 0x410C390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_229 0x410C394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_230 0x410C398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_231 0x410C39C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_232 0x410C3A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_233 0x410C3A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_234 0x410C3A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_235 0x410C3AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_236 0x410C3B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_237 0x410C3B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_238 0x410C3B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_239 0x410C3BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_240 0x410C3C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_241 0x410C3C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_242 0x410C3C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_243 0x410C3CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_244 0x410C3D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_245 0x410C3D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_246 0x410C3D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_247 0x410C3DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_248 0x410C3E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_249 0x410C3E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_250 0x410C3E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_251 0x410C3EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_252 0x410C3F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_253 0x410C3F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_254 0x410C3F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_255 0x410C3FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_256 0x410C400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_257 0x410C404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_258 0x410C408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_259 0x410C40C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_260 0x410C410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_261 0x410C414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_262 0x410C418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_263 0x410C41C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_264 0x410C420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_265 0x410C424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_266 0x410C428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_267 0x410C42C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_268 0x410C430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_269 0x410C434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_270 0x410C438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_271 0x410C43C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_272 0x410C440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_273 0x410C444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_274 0x410C448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_275 0x410C44C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_276 0x410C450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_277 0x410C454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_278 0x410C458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_279 0x410C45C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_280 0x410C460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_281 0x410C464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_282 0x410C468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_283 0x410C46C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_284 0x410C470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_285 0x410C474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_286 0x410C478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_287 0x410C47C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_288 0x410C480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_289 0x410C484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_290 0x410C488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_291 0x410C48C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_292 0x410C490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_293 0x410C494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_294 0x410C498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_295 0x410C49C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_296 0x410C4A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_297 0x410C4A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_298 0x410C4A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_299 0x410C4AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_300 0x410C4B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_301 0x410C4B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_302 0x410C4B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_303 0x410C4BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_304 0x410C4C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_305 0x410C4C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_306 0x410C4C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_307 0x410C4CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_308 0x410C4D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_309 0x410C4D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_310 0x410C4D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_311 0x410C4DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_312 0x410C4E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_313 0x410C4E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_314 0x410C4E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_315 0x410C4EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_316 0x410C4F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_317 0x410C4F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_318 0x410C4F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_319 0x410C4FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_320 0x410C500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_321 0x410C504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_322 0x410C508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_323 0x410C50C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_324 0x410C510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_325 0x410C514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_326 0x410C518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_327 0x410C51C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_328 0x410C520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_329 0x410C524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_330 0x410C528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_331 0x410C52C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_332 0x410C530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_333 0x410C534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_334 0x410C538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_335 0x410C53C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_336 0x410C540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_337 0x410C544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_338 0x410C548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_339 0x410C54C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_340 0x410C550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_341 0x410C554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_342 0x410C558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_343 0x410C55C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_344 0x410C560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_345 0x410C564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_346 0x410C568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_347 0x410C56C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_348 0x410C570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_349 0x410C574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_350 0x410C578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_351 0x410C57C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_352 0x410C580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_353 0x410C584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_354 0x410C588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_355 0x410C58C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_356 0x410C590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_357 0x410C594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_358 0x410C598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_359 0x410C59C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_360 0x410C5A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_361 0x410C5A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_362 0x410C5A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_363 0x410C5AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_364 0x410C5B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_365 0x410C5B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_366 0x410C5B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_367 0x410C5BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_368 0x410C5C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_369 0x410C5C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_370 0x410C5C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_371 0x410C5CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_372 0x410C5D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_373 0x410C5D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_374 0x410C5D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_375 0x410C5DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_376 0x410C5E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_377 0x410C5E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_378 0x410C5E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_379 0x410C5EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_380 0x410C5F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_381 0x410C5F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_382 0x410C5F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_383 0x410C5FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_384 0x410C600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_385 0x410C604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_386 0x410C608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_387 0x410C60C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_388 0x410C610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_389 0x410C614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_390 0x410C618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_391 0x410C61C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_392 0x410C620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_393 0x410C624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_394 0x410C628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_395 0x410C62C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_396 0x410C630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_397 0x410C634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_398 0x410C638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_399 0x410C63C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_400 0x410C640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_401 0x410C644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_402 0x410C648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_403 0x410C64C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_404 0x410C650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_405 0x410C654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_406 0x410C658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_407 0x410C65C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_408 0x410C660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_409 0x410C664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_410 0x410C668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_411 0x410C66C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_412 0x410C670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_413 0x410C674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_414 0x410C678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_415 0x410C67C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_416 0x410C680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_417 0x410C684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_418 0x410C688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_419 0x410C68C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_420 0x410C690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_421 0x410C694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_422 0x410C698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_423 0x410C69C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_424 0x410C6A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_425 0x410C6A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_426 0x410C6A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_427 0x410C6AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_428 0x410C6B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_429 0x410C6B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_430 0x410C6B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_431 0x410C6BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_432 0x410C6C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_433 0x410C6C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_434 0x410C6C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_435 0x410C6CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_436 0x410C6D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_437 0x410C6D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_438 0x410C6D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_439 0x410C6DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_440 0x410C6E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_441 0x410C6E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_442 0x410C6E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_443 0x410C6EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_444 0x410C6F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_445 0x410C6F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_446 0x410C6F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_447 0x410C6FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_448 0x410C700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_449 0x410C704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_450 0x410C708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_451 0x410C70C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_452 0x410C710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_453 0x410C714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_454 0x410C718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_455 0x410C71C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_456 0x410C720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_457 0x410C724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_458 0x410C728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_459 0x410C72C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_460 0x410C730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_461 0x410C734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_462 0x410C738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_463 0x410C73C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_464 0x410C740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_465 0x410C744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_466 0x410C748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_467 0x410C74C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_468 0x410C750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_469 0x410C754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_470 0x410C758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_471 0x410C75C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_472 0x410C760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_473 0x410C764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_474 0x410C768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_475 0x410C76C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_476 0x410C770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_477 0x410C774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_478 0x410C778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_479 0x410C77C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_480 0x410C780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_481 0x410C784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_482 0x410C788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_483 0x410C78C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_484 0x410C790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_485 0x410C794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_486 0x410C798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_487 0x410C79C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_488 0x410C7A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_489 0x410C7A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_490 0x410C7A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_491 0x410C7AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_492 0x410C7B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_493 0x410C7B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_494 0x410C7B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_495 0x410C7BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_496 0x410C7C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_497 0x410C7C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_498 0x410C7C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_499 0x410C7CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_500 0x410C7D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_501 0x410C7D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_502 0x410C7D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_503 0x410C7DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_504 0x410C7E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_505 0x410C7E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_506 0x410C7E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_507 0x410C7EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_508 0x410C7F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_509 0x410C7F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_510 0x410C7F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_511 0x410C7FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_512 0x410C800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_513 0x410C804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_514 0x410C808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_515 0x410C80C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_516 0x410C810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_517 0x410C814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_518 0x410C818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_519 0x410C81C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_520 0x410C820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_521 0x410C824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_522 0x410C828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_523 0x410C82C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_524 0x410C830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_525 0x410C834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_526 0x410C838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_527 0x410C83C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_528 0x410C840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_529 0x410C844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_530 0x410C848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_531 0x410C84C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_532 0x410C850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_533 0x410C854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_534 0x410C858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_535 0x410C85C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_536 0x410C860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_537 0x410C864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_538 0x410C868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_539 0x410C86C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_540 0x410C870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_541 0x410C874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_542 0x410C878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_543 0x410C87C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_544 0x410C880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_545 0x410C884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_546 0x410C888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_547 0x410C88C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_548 0x410C890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_549 0x410C894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_550 0x410C898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_551 0x410C89C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_552 0x410C8A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_553 0x410C8A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_554 0x410C8A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_555 0x410C8AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_556 0x410C8B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_557 0x410C8B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_558 0x410C8B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_559 0x410C8BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_560 0x410C8C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_561 0x410C8C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_562 0x410C8C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_563 0x410C8CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_564 0x410C8D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_565 0x410C8D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_566 0x410C8D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_567 0x410C8DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_568 0x410C8E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_569 0x410C8E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_570 0x410C8E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_571 0x410C8EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_572 0x410C8F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_573 0x410C8F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_574 0x410C8F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_575 0x410C8FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_576 0x410C900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_577 0x410C904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_578 0x410C908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_579 0x410C90C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_580 0x410C910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_581 0x410C914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_582 0x410C918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_583 0x410C91C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_584 0x410C920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_585 0x410C924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_586 0x410C928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_587 0x410C92C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_588 0x410C930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_589 0x410C934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_590 0x410C938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_591 0x410C93C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_592 0x410C940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_593 0x410C944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_594 0x410C948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_595 0x410C94C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_596 0x410C950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_597 0x410C954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_598 0x410C958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_599 0x410C95C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_600 0x410C960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_601 0x410C964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_602 0x410C968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_603 0x410C96C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_604 0x410C970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_605 0x410C974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_606 0x410C978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_607 0x410C97C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_608 0x410C980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_609 0x410C984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_610 0x410C988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_611 0x410C98C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_612 0x410C990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_613 0x410C994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_614 0x410C998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_615 0x410C99C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_616 0x410C9A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_617 0x410C9A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_618 0x410C9A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_619 0x410C9AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_620 0x410C9B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_621 0x410C9B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_622 0x410C9B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_623 0x410C9BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_624 0x410C9C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_625 0x410C9C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_626 0x410C9C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_627 0x410C9CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_628 0x410C9D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_629 0x410C9D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_630 0x410C9D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_631 0x410C9DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_632 0x410C9E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_633 0x410C9E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_634 0x410C9E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_635 0x410C9EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_636 0x410C9F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_637 0x410C9F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_638 0x410C9F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_639 0x410C9FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_640 0x410CA00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_641 0x410CA04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_642 0x410CA08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_643 0x410CA0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_644 0x410CA10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_645 0x410CA14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_646 0x410CA18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_647 0x410CA1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_648 0x410CA20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_649 0x410CA24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_650 0x410CA28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_651 0x410CA2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_652 0x410CA30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_653 0x410CA34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_654 0x410CA38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_655 0x410CA3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_656 0x410CA40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_657 0x410CA44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_658 0x410CA48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_659 0x410CA4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_660 0x410CA50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_661 0x410CA54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_662 0x410CA58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_663 0x410CA5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_664 0x410CA60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_665 0x410CA64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_666 0x410CA68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_667 0x410CA6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_668 0x410CA70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_669 0x410CA74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_670 0x410CA78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_671 0x410CA7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_672 0x410CA80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_673 0x410CA84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_674 0x410CA88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_675 0x410CA8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_676 0x410CA90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_677 0x410CA94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_678 0x410CA98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_679 0x410CA9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_680 0x410CAA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_681 0x410CAA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_682 0x410CAA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_683 0x410CAAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_684 0x410CAB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_685 0x410CAB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_686 0x410CAB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_687 0x410CABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_688 0x410CAC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_689 0x410CAC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_690 0x410CAC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_691 0x410CACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_692 0x410CAD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_693 0x410CAD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_694 0x410CAD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_695 0x410CADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_696 0x410CAE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_697 0x410CAE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_698 0x410CAE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_699 0x410CAEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_700 0x410CAF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_701 0x410CAF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_702 0x410CAF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_703 0x410CAFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_704 0x410CB00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_705 0x410CB04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_706 0x410CB08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_707 0x410CB0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_708 0x410CB10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_709 0x410CB14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_710 0x410CB18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_711 0x410CB1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_712 0x410CB20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_713 0x410CB24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_714 0x410CB28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_715 0x410CB2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_716 0x410CB30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_717 0x410CB34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_718 0x410CB38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_719 0x410CB3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_720 0x410CB40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_721 0x410CB44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_722 0x410CB48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_723 0x410CB4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_724 0x410CB50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_725 0x410CB54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_726 0x410CB58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_727 0x410CB5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_728 0x410CB60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_729 0x410CB64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_730 0x410CB68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_731 0x410CB6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_732 0x410CB70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_733 0x410CB74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_734 0x410CB78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_735 0x410CB7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_736 0x410CB80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_737 0x410CB84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_738 0x410CB88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_739 0x410CB8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_740 0x410CB90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_741 0x410CB94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_742 0x410CB98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_743 0x410CB9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_744 0x410CBA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_745 0x410CBA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_746 0x410CBA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_747 0x410CBAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_748 0x410CBB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_749 0x410CBB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_750 0x410CBB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_751 0x410CBBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_752 0x410CBC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_753 0x410CBC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_754 0x410CBC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_755 0x410CBCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_756 0x410CBD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_757 0x410CBD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_758 0x410CBD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_759 0x410CBDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_760 0x410CBE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_761 0x410CBE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_762 0x410CBE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_763 0x410CBEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_764 0x410CBF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_765 0x410CBF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_766 0x410CBF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_767 0x410CBFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_768 0x410CC00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_769 0x410CC04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_770 0x410CC08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_771 0x410CC0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_772 0x410CC10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_773 0x410CC14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_774 0x410CC18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_775 0x410CC1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_776 0x410CC20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_777 0x410CC24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_778 0x410CC28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_779 0x410CC2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_780 0x410CC30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_781 0x410CC34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_782 0x410CC38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_783 0x410CC3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_784 0x410CC40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_785 0x410CC44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_786 0x410CC48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_787 0x410CC4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_788 0x410CC50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_789 0x410CC54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_790 0x410CC58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_791 0x410CC5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_792 0x410CC60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_793 0x410CC64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_794 0x410CC68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_795 0x410CC6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_796 0x410CC70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_797 0x410CC74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_798 0x410CC78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_799 0x410CC7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_800 0x410CC80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_801 0x410CC84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_802 0x410CC88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_803 0x410CC8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_804 0x410CC90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_805 0x410CC94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_806 0x410CC98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_807 0x410CC9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_808 0x410CCA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_809 0x410CCA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_810 0x410CCA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_811 0x410CCAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_812 0x410CCB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_813 0x410CCB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_814 0x410CCB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_815 0x410CCBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_816 0x410CCC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_817 0x410CCC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_818 0x410CCC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_819 0x410CCCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_820 0x410CCD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_821 0x410CCD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_822 0x410CCD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_823 0x410CCDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_824 0x410CCE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_825 0x410CCE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_826 0x410CCE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_827 0x410CCEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_828 0x410CCF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_829 0x410CCF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_830 0x410CCF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_831 0x410CCFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_832 0x410CD00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_833 0x410CD04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_834 0x410CD08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_835 0x410CD0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_836 0x410CD10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_837 0x410CD14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_838 0x410CD18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_839 0x410CD1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_840 0x410CD20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_841 0x410CD24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_842 0x410CD28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_843 0x410CD2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_844 0x410CD30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_845 0x410CD34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_846 0x410CD38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_847 0x410CD3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_848 0x410CD40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_849 0x410CD44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_850 0x410CD48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_851 0x410CD4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_852 0x410CD50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_853 0x410CD54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_854 0x410CD58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_855 0x410CD5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_856 0x410CD60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_857 0x410CD64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_858 0x410CD68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_859 0x410CD6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_860 0x410CD70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_861 0x410CD74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_862 0x410CD78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_863 0x410CD7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_864 0x410CD80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_865 0x410CD84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_866 0x410CD88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_867 0x410CD8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_868 0x410CD90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_869 0x410CD94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_870 0x410CD98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_871 0x410CD9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_872 0x410CDA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_873 0x410CDA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_874 0x410CDA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_875 0x410CDAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_876 0x410CDB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_877 0x410CDB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_878 0x410CDB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_879 0x410CDBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_880 0x410CDC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_881 0x410CDC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_882 0x410CDC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_883 0x410CDCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_884 0x410CDD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_885 0x410CDD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_886 0x410CDD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_887 0x410CDDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_888 0x410CDE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_889 0x410CDE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_890 0x410CDE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_891 0x410CDEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_892 0x410CDF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_893 0x410CDF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_894 0x410CDF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_895 0x410CDFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_896 0x410CE00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_897 0x410CE04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_898 0x410CE08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_899 0x410CE0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_900 0x410CE10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_901 0x410CE14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_902 0x410CE18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_903 0x410CE1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_904 0x410CE20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_905 0x410CE24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_906 0x410CE28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_907 0x410CE2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_908 0x410CE30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_909 0x410CE34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_910 0x410CE38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_911 0x410CE3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_912 0x410CE40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_913 0x410CE44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_914 0x410CE48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_915 0x410CE4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_916 0x410CE50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_917 0x410CE54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_918 0x410CE58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_919 0x410CE5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_920 0x410CE60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_921 0x410CE64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_922 0x410CE68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_923 0x410CE6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_924 0x410CE70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_925 0x410CE74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_926 0x410CE78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_927 0x410CE7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_928 0x410CE80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_929 0x410CE84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_930 0x410CE88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_931 0x410CE8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_932 0x410CE90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_933 0x410CE94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_934 0x410CE98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_935 0x410CE9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_936 0x410CEA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_937 0x410CEA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_938 0x410CEA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_939 0x410CEAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_940 0x410CEB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_941 0x410CEB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_942 0x410CEB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_943 0x410CEBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_944 0x410CEC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_945 0x410CEC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_946 0x410CEC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_947 0x410CECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_948 0x410CED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_949 0x410CED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_950 0x410CED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_951 0x410CEDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_952 0x410CEE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_953 0x410CEE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_954 0x410CEE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_955 0x410CEEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_956 0x410CEF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_957 0x410CEF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_958 0x410CEF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_959 0x410CEFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_960 0x410CF00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_961 0x410CF04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_962 0x410CF08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_963 0x410CF0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_964 0x410CF10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_965 0x410CF14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_966 0x410CF18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_967 0x410CF1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_968 0x410CF20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_969 0x410CF24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_970 0x410CF28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_971 0x410CF2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_972 0x410CF30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_973 0x410CF34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_974 0x410CF38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_975 0x410CF3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_976 0x410CF40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_977 0x410CF44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_978 0x410CF48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_979 0x410CF4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_980 0x410CF50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_981 0x410CF54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_982 0x410CF58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_983 0x410CF5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_984 0x410CF60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_985 0x410CF64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_986 0x410CF68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_987 0x410CF6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_988 0x410CF70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_989 0x410CF74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_990 0x410CF78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_991 0x410CF7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_992 0x410CF80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_993 0x410CF84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_994 0x410CF88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_995 0x410CF8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_996 0x410CF90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_997 0x410CF94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_998 0x410CF98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_999 0x410CF9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1000 0x410CFA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1001 0x410CFA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1002 0x410CFA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1003 0x410CFAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1004 0x410CFB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1005 0x410CFB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1006 0x410CFB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1007 0x410CFBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1008 0x410CFC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1009 0x410CFC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1010 0x410CFC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1011 0x410CFCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1012 0x410CFD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1013 0x410CFD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1014 0x410CFD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1015 0x410CFDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1016 0x410CFE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1017 0x410CFE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1018 0x410CFE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1019 0x410CFEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1020 0x410CFF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1021 0x410CFF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1022 0x410CFF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1023 0x410CFFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1024 0x410D000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1025 0x410D004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1026 0x410D008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1027 0x410D00C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1028 0x410D010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1029 0x410D014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1030 0x410D018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1031 0x410D01C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1032 0x410D020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1033 0x410D024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1034 0x410D028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1035 0x410D02C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1036 0x410D030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1037 0x410D034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1038 0x410D038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1039 0x410D03C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1040 0x410D040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1041 0x410D044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1042 0x410D048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1043 0x410D04C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1044 0x410D050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1045 0x410D054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1046 0x410D058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1047 0x410D05C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1048 0x410D060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1049 0x410D064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1050 0x410D068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1051 0x410D06C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1052 0x410D070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1053 0x410D074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1054 0x410D078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1055 0x410D07C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1056 0x410D080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1057 0x410D084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1058 0x410D088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1059 0x410D08C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1060 0x410D090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1061 0x410D094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1062 0x410D098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1063 0x410D09C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1064 0x410D0A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1065 0x410D0A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1066 0x410D0A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1067 0x410D0AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1068 0x410D0B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1069 0x410D0B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1070 0x410D0B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1071 0x410D0BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1072 0x410D0C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1073 0x410D0C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1074 0x410D0C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1075 0x410D0CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1076 0x410D0D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1077 0x410D0D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1078 0x410D0D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1079 0x410D0DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1080 0x410D0E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1081 0x410D0E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1082 0x410D0E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1083 0x410D0EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1084 0x410D0F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1085 0x410D0F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1086 0x410D0F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1087 0x410D0FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1088 0x410D100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1089 0x410D104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1090 0x410D108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1091 0x410D10C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1092 0x410D110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1093 0x410D114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1094 0x410D118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1095 0x410D11C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1096 0x410D120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1097 0x410D124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1098 0x410D128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1099 0x410D12C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1100 0x410D130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1101 0x410D134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1102 0x410D138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1103 0x410D13C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1104 0x410D140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1105 0x410D144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1106 0x410D148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1107 0x410D14C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1108 0x410D150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1109 0x410D154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1110 0x410D158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1111 0x410D15C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1112 0x410D160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1113 0x410D164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1114 0x410D168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1115 0x410D16C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1116 0x410D170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1117 0x410D174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1118 0x410D178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1119 0x410D17C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1120 0x410D180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1121 0x410D184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1122 0x410D188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1123 0x410D18C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1124 0x410D190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1125 0x410D194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1126 0x410D198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1127 0x410D19C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1128 0x410D1A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1129 0x410D1A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1130 0x410D1A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1131 0x410D1AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1132 0x410D1B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1133 0x410D1B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1134 0x410D1B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1135 0x410D1BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1136 0x410D1C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1137 0x410D1C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1138 0x410D1C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1139 0x410D1CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1140 0x410D1D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1141 0x410D1D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1142 0x410D1D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1143 0x410D1DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1144 0x410D1E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1145 0x410D1E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1146 0x410D1E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1147 0x410D1EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1148 0x410D1F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1149 0x410D1F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1150 0x410D1F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1151 0x410D1FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1152 0x410D200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1153 0x410D204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1154 0x410D208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1155 0x410D20C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1156 0x410D210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1157 0x410D214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1158 0x410D218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1159 0x410D21C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1160 0x410D220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1161 0x410D224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1162 0x410D228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1163 0x410D22C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1164 0x410D230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1165 0x410D234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1166 0x410D238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1167 0x410D23C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1168 0x410D240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1169 0x410D244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1170 0x410D248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1171 0x410D24C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1172 0x410D250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1173 0x410D254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1174 0x410D258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1175 0x410D25C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1176 0x410D260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1177 0x410D264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1178 0x410D268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1179 0x410D26C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1180 0x410D270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1181 0x410D274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1182 0x410D278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1183 0x410D27C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1184 0x410D280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1185 0x410D284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1186 0x410D288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1187 0x410D28C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1188 0x410D290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1189 0x410D294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1190 0x410D298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1191 0x410D29C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1192 0x410D2A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1193 0x410D2A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1194 0x410D2A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1195 0x410D2AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1196 0x410D2B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1197 0x410D2B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1198 0x410D2B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1199 0x410D2BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1200 0x410D2C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1201 0x410D2C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1202 0x410D2C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1203 0x410D2CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1204 0x410D2D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1205 0x410D2D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1206 0x410D2D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1207 0x410D2DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1208 0x410D2E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1209 0x410D2E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1210 0x410D2E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1211 0x410D2EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1212 0x410D2F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1213 0x410D2F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1214 0x410D2F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1215 0x410D2FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1216 0x410D300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1217 0x410D304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1218 0x410D308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1219 0x410D30C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1220 0x410D310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1221 0x410D314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1222 0x410D318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1223 0x410D31C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1224 0x410D320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1225 0x410D324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1226 0x410D328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1227 0x410D32C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1228 0x410D330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1229 0x410D334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1230 0x410D338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1231 0x410D33C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1232 0x410D340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1233 0x410D344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1234 0x410D348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1235 0x410D34C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1236 0x410D350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1237 0x410D354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1238 0x410D358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1239 0x410D35C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1240 0x410D360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1241 0x410D364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1242 0x410D368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1243 0x410D36C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1244 0x410D370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1245 0x410D374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1246 0x410D378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1247 0x410D37C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1248 0x410D380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1249 0x410D384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1250 0x410D388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1251 0x410D38C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1252 0x410D390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1253 0x410D394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1254 0x410D398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1255 0x410D39C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1256 0x410D3A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1257 0x410D3A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1258 0x410D3A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1259 0x410D3AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1260 0x410D3B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1261 0x410D3B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1262 0x410D3B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1263 0x410D3BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1264 0x410D3C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1265 0x410D3C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1266 0x410D3C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1267 0x410D3CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1268 0x410D3D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1269 0x410D3D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1270 0x410D3D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1271 0x410D3DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1272 0x410D3E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1273 0x410D3E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1274 0x410D3E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1275 0x410D3EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1276 0x410D3F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1277 0x410D3F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1278 0x410D3F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1279 0x410D3FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1280 0x410D400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1281 0x410D404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1282 0x410D408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1283 0x410D40C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1284 0x410D410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1285 0x410D414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1286 0x410D418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1287 0x410D41C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1288 0x410D420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1289 0x410D424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1290 0x410D428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1291 0x410D42C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1292 0x410D430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1293 0x410D434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1294 0x410D438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1295 0x410D43C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1296 0x410D440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1297 0x410D444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1298 0x410D448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1299 0x410D44C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1300 0x410D450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1301 0x410D454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1302 0x410D458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1303 0x410D45C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1304 0x410D460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1305 0x410D464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1306 0x410D468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1307 0x410D46C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1308 0x410D470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1309 0x410D474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1310 0x410D478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1311 0x410D47C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1312 0x410D480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1313 0x410D484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1314 0x410D488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1315 0x410D48C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1316 0x410D490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1317 0x410D494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1318 0x410D498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1319 0x410D49C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1320 0x410D4A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1321 0x410D4A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1322 0x410D4A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1323 0x410D4AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1324 0x410D4B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1325 0x410D4B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1326 0x410D4B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1327 0x410D4BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1328 0x410D4C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1329 0x410D4C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1330 0x410D4C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1331 0x410D4CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1332 0x410D4D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1333 0x410D4D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1334 0x410D4D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1335 0x410D4DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1336 0x410D4E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1337 0x410D4E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1338 0x410D4E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1339 0x410D4EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1340 0x410D4F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1341 0x410D4F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1342 0x410D4F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1343 0x410D4FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1344 0x410D500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1345 0x410D504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1346 0x410D508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1347 0x410D50C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1348 0x410D510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1349 0x410D514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1350 0x410D518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1351 0x410D51C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1352 0x410D520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1353 0x410D524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1354 0x410D528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1355 0x410D52C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1356 0x410D530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1357 0x410D534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1358 0x410D538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1359 0x410D53C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1360 0x410D540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1361 0x410D544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1362 0x410D548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1363 0x410D54C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1364 0x410D550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1365 0x410D554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1366 0x410D558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1367 0x410D55C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1368 0x410D560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1369 0x410D564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1370 0x410D568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1371 0x410D56C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1372 0x410D570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1373 0x410D574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1374 0x410D578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1375 0x410D57C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1376 0x410D580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1377 0x410D584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1378 0x410D588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1379 0x410D58C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1380 0x410D590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1381 0x410D594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1382 0x410D598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1383 0x410D59C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1384 0x410D5A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1385 0x410D5A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1386 0x410D5A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1387 0x410D5AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1388 0x410D5B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1389 0x410D5B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1390 0x410D5B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1391 0x410D5BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1392 0x410D5C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1393 0x410D5C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1394 0x410D5C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1395 0x410D5CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1396 0x410D5D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1397 0x410D5D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1398 0x410D5D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1399 0x410D5DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1400 0x410D5E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1401 0x410D5E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1402 0x410D5E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1403 0x410D5EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1404 0x410D5F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1405 0x410D5F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1406 0x410D5F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1407 0x410D5FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1408 0x410D600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1409 0x410D604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1410 0x410D608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1411 0x410D60C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1412 0x410D610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1413 0x410D614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1414 0x410D618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1415 0x410D61C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1416 0x410D620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1417 0x410D624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1418 0x410D628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1419 0x410D62C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1420 0x410D630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1421 0x410D634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1422 0x410D638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1423 0x410D63C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1424 0x410D640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1425 0x410D644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1426 0x410D648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1427 0x410D64C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1428 0x410D650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1429 0x410D654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1430 0x410D658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1431 0x410D65C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1432 0x410D660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1433 0x410D664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1434 0x410D668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1435 0x410D66C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1436 0x410D670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1437 0x410D674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1438 0x410D678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1439 0x410D67C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1440 0x410D680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1441 0x410D684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1442 0x410D688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1443 0x410D68C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1444 0x410D690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1445 0x410D694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1446 0x410D698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1447 0x410D69C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1448 0x410D6A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1449 0x410D6A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1450 0x410D6A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1451 0x410D6AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1452 0x410D6B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1453 0x410D6B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1454 0x410D6B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1455 0x410D6BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1456 0x410D6C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1457 0x410D6C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1458 0x410D6C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1459 0x410D6CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1460 0x410D6D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1461 0x410D6D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1462 0x410D6D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1463 0x410D6DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1464 0x410D6E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1465 0x410D6E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1466 0x410D6E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1467 0x410D6EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1468 0x410D6F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1469 0x410D6F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1470 0x410D6F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1471 0x410D6FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1472 0x410D700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1473 0x410D704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1474 0x410D708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1475 0x410D70C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1476 0x410D710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1477 0x410D714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1478 0x410D718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1479 0x410D71C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1480 0x410D720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1481 0x410D724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1482 0x410D728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1483 0x410D72C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1484 0x410D730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1485 0x410D734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1486 0x410D738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1487 0x410D73C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1488 0x410D740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1489 0x410D744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1490 0x410D748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1491 0x410D74C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1492 0x410D750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1493 0x410D754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1494 0x410D758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1495 0x410D75C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1496 0x410D760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1497 0x410D764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1498 0x410D768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1499 0x410D76C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1500 0x410D770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1501 0x410D774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1502 0x410D778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1503 0x410D77C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1504 0x410D780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1505 0x410D784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1506 0x410D788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1507 0x410D78C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1508 0x410D790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1509 0x410D794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1510 0x410D798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1511 0x410D79C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1512 0x410D7A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1513 0x410D7A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1514 0x410D7A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1515 0x410D7AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1516 0x410D7B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1517 0x410D7B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1518 0x410D7B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1519 0x410D7BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1520 0x410D7C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1521 0x410D7C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1522 0x410D7C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1523 0x410D7CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1524 0x410D7D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1525 0x410D7D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1526 0x410D7D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1527 0x410D7DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1528 0x410D7E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1529 0x410D7E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1530 0x410D7E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1531 0x410D7EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1532 0x410D7F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1533 0x410D7F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1534 0x410D7F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1535 0x410D7FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1536 0x410D800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1537 0x410D804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1538 0x410D808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1539 0x410D80C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1540 0x410D810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1541 0x410D814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1542 0x410D818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1543 0x410D81C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1544 0x410D820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1545 0x410D824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1546 0x410D828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1547 0x410D82C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1548 0x410D830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1549 0x410D834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1550 0x410D838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1551 0x410D83C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1552 0x410D840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1553 0x410D844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1554 0x410D848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1555 0x410D84C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1556 0x410D850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1557 0x410D854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1558 0x410D858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1559 0x410D85C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1560 0x410D860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1561 0x410D864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1562 0x410D868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1563 0x410D86C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1564 0x410D870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1565 0x410D874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1566 0x410D878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1567 0x410D87C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1568 0x410D880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1569 0x410D884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1570 0x410D888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1571 0x410D88C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1572 0x410D890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1573 0x410D894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1574 0x410D898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1575 0x410D89C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1576 0x410D8A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1577 0x410D8A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1578 0x410D8A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1579 0x410D8AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1580 0x410D8B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1581 0x410D8B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1582 0x410D8B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1583 0x410D8BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1584 0x410D8C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1585 0x410D8C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1586 0x410D8C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1587 0x410D8CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1588 0x410D8D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1589 0x410D8D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1590 0x410D8D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1591 0x410D8DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1592 0x410D8E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1593 0x410D8E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1594 0x410D8E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1595 0x410D8EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1596 0x410D8F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1597 0x410D8F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1598 0x410D8F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1599 0x410D8FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1600 0x410D900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1601 0x410D904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1602 0x410D908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1603 0x410D90C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1604 0x410D910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1605 0x410D914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1606 0x410D918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1607 0x410D91C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1608 0x410D920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1609 0x410D924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1610 0x410D928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1611 0x410D92C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1612 0x410D930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1613 0x410D934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1614 0x410D938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1615 0x410D93C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1616 0x410D940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1617 0x410D944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1618 0x410D948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1619 0x410D94C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1620 0x410D950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1621 0x410D954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1622 0x410D958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1623 0x410D95C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1624 0x410D960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1625 0x410D964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1626 0x410D968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1627 0x410D96C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1628 0x410D970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1629 0x410D974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1630 0x410D978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1631 0x410D97C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1632 0x410D980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1633 0x410D984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1634 0x410D988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1635 0x410D98C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1636 0x410D990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1637 0x410D994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1638 0x410D998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1639 0x410D99C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1640 0x410D9A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1641 0x410D9A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1642 0x410D9A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1643 0x410D9AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1644 0x410D9B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1645 0x410D9B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1646 0x410D9B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1647 0x410D9BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1648 0x410D9C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1649 0x410D9C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1650 0x410D9C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1651 0x410D9CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1652 0x410D9D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1653 0x410D9D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1654 0x410D9D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1655 0x410D9DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1656 0x410D9E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1657 0x410D9E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1658 0x410D9E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1659 0x410D9EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1660 0x410D9F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1661 0x410D9F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1662 0x410D9F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1663 0x410D9FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1664 0x410DA00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1665 0x410DA04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1666 0x410DA08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1667 0x410DA0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1668 0x410DA10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1669 0x410DA14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1670 0x410DA18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1671 0x410DA1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1672 0x410DA20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1673 0x410DA24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1674 0x410DA28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1675 0x410DA2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1676 0x410DA30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1677 0x410DA34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1678 0x410DA38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1679 0x410DA3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1680 0x410DA40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1681 0x410DA44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1682 0x410DA48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1683 0x410DA4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1684 0x410DA50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1685 0x410DA54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1686 0x410DA58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1687 0x410DA5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1688 0x410DA60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1689 0x410DA64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1690 0x410DA68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1691 0x410DA6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1692 0x410DA70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1693 0x410DA74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1694 0x410DA78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1695 0x410DA7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1696 0x410DA80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1697 0x410DA84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1698 0x410DA88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1699 0x410DA8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1700 0x410DA90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1701 0x410DA94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1702 0x410DA98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1703 0x410DA9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1704 0x410DAA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1705 0x410DAA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1706 0x410DAA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1707 0x410DAAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1708 0x410DAB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1709 0x410DAB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1710 0x410DAB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1711 0x410DABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1712 0x410DAC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1713 0x410DAC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1714 0x410DAC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1715 0x410DACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1716 0x410DAD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1717 0x410DAD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1718 0x410DAD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1719 0x410DADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1720 0x410DAE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1721 0x410DAE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1722 0x410DAE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1723 0x410DAEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1724 0x410DAF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1725 0x410DAF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1726 0x410DAF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1727 0x410DAFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1728 0x410DB00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1729 0x410DB04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1730 0x410DB08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1731 0x410DB0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1732 0x410DB10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1733 0x410DB14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1734 0x410DB18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1735 0x410DB1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1736 0x410DB20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1737 0x410DB24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1738 0x410DB28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1739 0x410DB2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1740 0x410DB30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1741 0x410DB34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1742 0x410DB38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1743 0x410DB3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1744 0x410DB40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1745 0x410DB44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1746 0x410DB48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1747 0x410DB4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1748 0x410DB50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1749 0x410DB54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1750 0x410DB58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1751 0x410DB5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1752 0x410DB60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1753 0x410DB64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1754 0x410DB68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1755 0x410DB6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1756 0x410DB70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1757 0x410DB74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1758 0x410DB78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1759 0x410DB7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1760 0x410DB80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1761 0x410DB84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1762 0x410DB88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1763 0x410DB8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1764 0x410DB90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1765 0x410DB94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1766 0x410DB98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1767 0x410DB9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1768 0x410DBA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1769 0x410DBA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1770 0x410DBA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1771 0x410DBAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1772 0x410DBB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1773 0x410DBB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1774 0x410DBB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1775 0x410DBBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1776 0x410DBC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1777 0x410DBC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1778 0x410DBC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1779 0x410DBCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1780 0x410DBD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1781 0x410DBD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1782 0x410DBD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1783 0x410DBDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1784 0x410DBE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1785 0x410DBE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1786 0x410DBE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1787 0x410DBEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1788 0x410DBF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1789 0x410DBF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1790 0x410DBF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1791 0x410DBFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1792 0x410DC00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1793 0x410DC04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1794 0x410DC08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1795 0x410DC0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1796 0x410DC10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1797 0x410DC14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1798 0x410DC18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1799 0x410DC1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1800 0x410DC20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1801 0x410DC24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1802 0x410DC28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1803 0x410DC2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1804 0x410DC30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1805 0x410DC34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1806 0x410DC38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1807 0x410DC3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1808 0x410DC40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1809 0x410DC44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1810 0x410DC48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1811 0x410DC4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1812 0x410DC50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1813 0x410DC54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1814 0x410DC58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1815 0x410DC5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1816 0x410DC60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1817 0x410DC64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1818 0x410DC68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1819 0x410DC6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1820 0x410DC70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1821 0x410DC74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1822 0x410DC78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1823 0x410DC7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1824 0x410DC80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1825 0x410DC84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1826 0x410DC88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1827 0x410DC8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1828 0x410DC90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1829 0x410DC94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1830 0x410DC98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1831 0x410DC9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1832 0x410DCA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1833 0x410DCA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1834 0x410DCA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1835 0x410DCAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1836 0x410DCB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1837 0x410DCB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1838 0x410DCB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1839 0x410DCBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1840 0x410DCC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1841 0x410DCC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1842 0x410DCC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1843 0x410DCCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1844 0x410DCD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1845 0x410DCD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1846 0x410DCD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1847 0x410DCDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1848 0x410DCE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1849 0x410DCE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1850 0x410DCE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1851 0x410DCEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1852 0x410DCF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1853 0x410DCF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1854 0x410DCF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1855 0x410DCFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1856 0x410DD00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1857 0x410DD04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1858 0x410DD08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1859 0x410DD0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1860 0x410DD10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1861 0x410DD14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1862 0x410DD18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1863 0x410DD1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1864 0x410DD20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1865 0x410DD24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1866 0x410DD28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1867 0x410DD2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1868 0x410DD30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1869 0x410DD34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1870 0x410DD38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1871 0x410DD3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1872 0x410DD40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1873 0x410DD44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1874 0x410DD48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1875 0x410DD4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1876 0x410DD50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1877 0x410DD54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1878 0x410DD58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1879 0x410DD5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1880 0x410DD60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1881 0x410DD64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1882 0x410DD68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1883 0x410DD6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1884 0x410DD70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1885 0x410DD74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1886 0x410DD78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1887 0x410DD7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1888 0x410DD80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1889 0x410DD84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1890 0x410DD88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1891 0x410DD8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1892 0x410DD90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1893 0x410DD94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1894 0x410DD98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1895 0x410DD9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1896 0x410DDA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1897 0x410DDA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1898 0x410DDA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1899 0x410DDAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1900 0x410DDB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1901 0x410DDB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1902 0x410DDB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1903 0x410DDBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1904 0x410DDC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1905 0x410DDC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1906 0x410DDC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1907 0x410DDCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1908 0x410DDD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1909 0x410DDD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1910 0x410DDD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1911 0x410DDDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1912 0x410DDE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1913 0x410DDE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1914 0x410DDE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1915 0x410DDEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1916 0x410DDF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1917 0x410DDF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1918 0x410DDF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1919 0x410DDFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1920 0x410DE00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1921 0x410DE04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1922 0x410DE08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1923 0x410DE0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1924 0x410DE10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1925 0x410DE14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1926 0x410DE18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1927 0x410DE1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1928 0x410DE20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1929 0x410DE24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1930 0x410DE28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1931 0x410DE2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1932 0x410DE30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1933 0x410DE34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1934 0x410DE38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1935 0x410DE3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1936 0x410DE40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1937 0x410DE44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1938 0x410DE48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1939 0x410DE4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1940 0x410DE50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1941 0x410DE54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1942 0x410DE58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1943 0x410DE5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1944 0x410DE60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1945 0x410DE64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1946 0x410DE68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1947 0x410DE6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1948 0x410DE70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1949 0x410DE74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1950 0x410DE78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1951 0x410DE7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1952 0x410DE80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1953 0x410DE84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1954 0x410DE88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1955 0x410DE8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1956 0x410DE90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1957 0x410DE94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1958 0x410DE98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1959 0x410DE9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1960 0x410DEA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1961 0x410DEA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1962 0x410DEA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1963 0x410DEAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1964 0x410DEB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1965 0x410DEB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1966 0x410DEB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1967 0x410DEBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1968 0x410DEC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1969 0x410DEC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1970 0x410DEC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1971 0x410DECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1972 0x410DED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1973 0x410DED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1974 0x410DED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1975 0x410DEDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1976 0x410DEE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1977 0x410DEE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1978 0x410DEE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1979 0x410DEEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1980 0x410DEF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1981 0x410DEF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1982 0x410DEF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1983 0x410DEFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1984 0x410DF00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1985 0x410DF04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1986 0x410DF08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1987 0x410DF0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1988 0x410DF10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1989 0x410DF14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1990 0x410DF18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1991 0x410DF1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1992 0x410DF20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1993 0x410DF24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1994 0x410DF28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1995 0x410DF2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1996 0x410DF30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1997 0x410DF34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1998 0x410DF38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_1999 0x410DF3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2000 0x410DF40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2001 0x410DF44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2002 0x410DF48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2003 0x410DF4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2004 0x410DF50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2005 0x410DF54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2006 0x410DF58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2007 0x410DF5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2008 0x410DF60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2009 0x410DF64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2010 0x410DF68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2011 0x410DF6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2012 0x410DF70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2013 0x410DF74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2014 0x410DF78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2015 0x410DF7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2016 0x410DF80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2017 0x410DF84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2018 0x410DF88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2019 0x410DF8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2020 0x410DF90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2021 0x410DF94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2022 0x410DF98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2023 0x410DF9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2024 0x410DFA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2025 0x410DFA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2026 0x410DFA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2027 0x410DFAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2028 0x410DFB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2029 0x410DFB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2030 0x410DFB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2031 0x410DFBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2032 0x410DFC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2033 0x410DFC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2034 0x410DFC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2035 0x410DFCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2036 0x410DFD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2037 0x410DFD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2038 0x410DFD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2039 0x410DFDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2040 0x410DFE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2041 0x410DFE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2042 0x410DFE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2043 0x410DFEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2044 0x410DFF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2045 0x410DFF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2046 0x410DFF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_2047 0x410DFFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 0x410E000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1 0x410E004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2 0x410E008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_3 0x410E00C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_4 0x410E010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_5 0x410E014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_6 0x410E018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_7 0x410E01C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_8 0x410E020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_9 0x410E024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_10 0x410E028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_11 0x410E02C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_12 0x410E030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_13 0x410E034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_14 0x410E038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_15 0x410E03C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_16 0x410E040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_17 0x410E044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_18 0x410E048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_19 0x410E04C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_20 0x410E050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_21 0x410E054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_22 0x410E058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_23 0x410E05C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_24 0x410E060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_25 0x410E064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_26 0x410E068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_27 0x410E06C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_28 0x410E070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_29 0x410E074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_30 0x410E078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_31 0x410E07C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_32 0x410E080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_33 0x410E084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_34 0x410E088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_35 0x410E08C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_36 0x410E090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_37 0x410E094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_38 0x410E098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_39 0x410E09C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_40 0x410E0A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_41 0x410E0A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_42 0x410E0A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_43 0x410E0AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_44 0x410E0B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_45 0x410E0B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_46 0x410E0B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_47 0x410E0BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_48 0x410E0C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_49 0x410E0C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_50 0x410E0C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_51 0x410E0CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_52 0x410E0D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_53 0x410E0D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_54 0x410E0D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_55 0x410E0DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_56 0x410E0E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_57 0x410E0E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_58 0x410E0E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_59 0x410E0EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_60 0x410E0F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_61 0x410E0F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_62 0x410E0F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_63 0x410E0FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_64 0x410E100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_65 0x410E104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_66 0x410E108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_67 0x410E10C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_68 0x410E110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_69 0x410E114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_70 0x410E118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_71 0x410E11C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_72 0x410E120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_73 0x410E124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_74 0x410E128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_75 0x410E12C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_76 0x410E130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_77 0x410E134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_78 0x410E138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_79 0x410E13C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_80 0x410E140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_81 0x410E144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_82 0x410E148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_83 0x410E14C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_84 0x410E150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_85 0x410E154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_86 0x410E158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_87 0x410E15C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_88 0x410E160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_89 0x410E164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_90 0x410E168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_91 0x410E16C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_92 0x410E170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_93 0x410E174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_94 0x410E178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_95 0x410E17C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_96 0x410E180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_97 0x410E184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_98 0x410E188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_99 0x410E18C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_100 0x410E190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_101 0x410E194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_102 0x410E198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_103 0x410E19C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_104 0x410E1A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_105 0x410E1A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_106 0x410E1A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_107 0x410E1AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_108 0x410E1B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_109 0x410E1B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_110 0x410E1B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_111 0x410E1BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_112 0x410E1C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_113 0x410E1C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_114 0x410E1C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_115 0x410E1CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_116 0x410E1D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_117 0x410E1D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_118 0x410E1D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_119 0x410E1DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_120 0x410E1E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_121 0x410E1E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_122 0x410E1E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_123 0x410E1EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_124 0x410E1F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_125 0x410E1F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_126 0x410E1F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_127 0x410E1FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_128 0x410E200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_129 0x410E204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_130 0x410E208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_131 0x410E20C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_132 0x410E210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_133 0x410E214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_134 0x410E218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_135 0x410E21C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_136 0x410E220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_137 0x410E224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_138 0x410E228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_139 0x410E22C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_140 0x410E230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_141 0x410E234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_142 0x410E238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_143 0x410E23C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_144 0x410E240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_145 0x410E244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_146 0x410E248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_147 0x410E24C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_148 0x410E250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_149 0x410E254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_150 0x410E258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_151 0x410E25C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_152 0x410E260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_153 0x410E264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_154 0x410E268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_155 0x410E26C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_156 0x410E270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_157 0x410E274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_158 0x410E278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_159 0x410E27C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_160 0x410E280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_161 0x410E284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_162 0x410E288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_163 0x410E28C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_164 0x410E290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_165 0x410E294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_166 0x410E298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_167 0x410E29C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_168 0x410E2A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_169 0x410E2A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_170 0x410E2A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_171 0x410E2AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_172 0x410E2B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_173 0x410E2B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_174 0x410E2B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_175 0x410E2BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_176 0x410E2C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_177 0x410E2C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_178 0x410E2C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_179 0x410E2CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_180 0x410E2D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_181 0x410E2D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_182 0x410E2D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_183 0x410E2DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_184 0x410E2E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_185 0x410E2E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_186 0x410E2E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_187 0x410E2EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_188 0x410E2F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_189 0x410E2F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_190 0x410E2F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_191 0x410E2FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_192 0x410E300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_193 0x410E304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_194 0x410E308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_195 0x410E30C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_196 0x410E310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_197 0x410E314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_198 0x410E318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_199 0x410E31C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_200 0x410E320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_201 0x410E324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_202 0x410E328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_203 0x410E32C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_204 0x410E330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_205 0x410E334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_206 0x410E338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_207 0x410E33C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_208 0x410E340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_209 0x410E344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_210 0x410E348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_211 0x410E34C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_212 0x410E350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_213 0x410E354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_214 0x410E358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_215 0x410E35C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_216 0x410E360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_217 0x410E364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_218 0x410E368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_219 0x410E36C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_220 0x410E370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_221 0x410E374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_222 0x410E378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_223 0x410E37C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_224 0x410E380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_225 0x410E384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_226 0x410E388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_227 0x410E38C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_228 0x410E390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_229 0x410E394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_230 0x410E398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_231 0x410E39C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_232 0x410E3A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_233 0x410E3A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_234 0x410E3A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_235 0x410E3AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_236 0x410E3B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_237 0x410E3B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_238 0x410E3B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_239 0x410E3BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_240 0x410E3C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_241 0x410E3C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_242 0x410E3C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_243 0x410E3CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_244 0x410E3D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_245 0x410E3D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_246 0x410E3D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_247 0x410E3DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_248 0x410E3E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_249 0x410E3E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_250 0x410E3E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_251 0x410E3EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_252 0x410E3F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_253 0x410E3F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_254 0x410E3F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_255 0x410E3FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_256 0x410E400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_257 0x410E404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_258 0x410E408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_259 0x410E40C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_260 0x410E410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_261 0x410E414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_262 0x410E418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_263 0x410E41C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_264 0x410E420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_265 0x410E424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_266 0x410E428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_267 0x410E42C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_268 0x410E430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_269 0x410E434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_270 0x410E438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_271 0x410E43C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_272 0x410E440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_273 0x410E444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_274 0x410E448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_275 0x410E44C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_276 0x410E450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_277 0x410E454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_278 0x410E458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_279 0x410E45C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_280 0x410E460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_281 0x410E464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_282 0x410E468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_283 0x410E46C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_284 0x410E470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_285 0x410E474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_286 0x410E478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_287 0x410E47C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_288 0x410E480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_289 0x410E484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_290 0x410E488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_291 0x410E48C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_292 0x410E490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_293 0x410E494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_294 0x410E498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_295 0x410E49C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_296 0x410E4A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_297 0x410E4A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_298 0x410E4A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_299 0x410E4AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_300 0x410E4B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_301 0x410E4B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_302 0x410E4B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_303 0x410E4BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_304 0x410E4C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_305 0x410E4C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_306 0x410E4C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_307 0x410E4CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_308 0x410E4D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_309 0x410E4D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_310 0x410E4D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_311 0x410E4DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_312 0x410E4E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_313 0x410E4E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_314 0x410E4E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_315 0x410E4EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_316 0x410E4F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_317 0x410E4F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_318 0x410E4F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_319 0x410E4FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_320 0x410E500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_321 0x410E504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_322 0x410E508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_323 0x410E50C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_324 0x410E510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_325 0x410E514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_326 0x410E518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_327 0x410E51C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_328 0x410E520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_329 0x410E524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_330 0x410E528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_331 0x410E52C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_332 0x410E530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_333 0x410E534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_334 0x410E538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_335 0x410E53C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_336 0x410E540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_337 0x410E544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_338 0x410E548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_339 0x410E54C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_340 0x410E550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_341 0x410E554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_342 0x410E558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_343 0x410E55C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_344 0x410E560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_345 0x410E564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_346 0x410E568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_347 0x410E56C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_348 0x410E570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_349 0x410E574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_350 0x410E578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_351 0x410E57C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_352 0x410E580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_353 0x410E584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_354 0x410E588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_355 0x410E58C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_356 0x410E590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_357 0x410E594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_358 0x410E598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_359 0x410E59C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_360 0x410E5A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_361 0x410E5A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_362 0x410E5A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_363 0x410E5AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_364 0x410E5B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_365 0x410E5B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_366 0x410E5B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_367 0x410E5BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_368 0x410E5C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_369 0x410E5C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_370 0x410E5C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_371 0x410E5CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_372 0x410E5D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_373 0x410E5D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_374 0x410E5D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_375 0x410E5DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_376 0x410E5E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_377 0x410E5E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_378 0x410E5E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_379 0x410E5EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_380 0x410E5F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_381 0x410E5F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_382 0x410E5F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_383 0x410E5FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_384 0x410E600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_385 0x410E604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_386 0x410E608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_387 0x410E60C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_388 0x410E610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_389 0x410E614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_390 0x410E618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_391 0x410E61C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_392 0x410E620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_393 0x410E624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_394 0x410E628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_395 0x410E62C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_396 0x410E630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_397 0x410E634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_398 0x410E638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_399 0x410E63C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_400 0x410E640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_401 0x410E644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_402 0x410E648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_403 0x410E64C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_404 0x410E650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_405 0x410E654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_406 0x410E658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_407 0x410E65C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_408 0x410E660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_409 0x410E664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_410 0x410E668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_411 0x410E66C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_412 0x410E670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_413 0x410E674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_414 0x410E678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_415 0x410E67C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_416 0x410E680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_417 0x410E684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_418 0x410E688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_419 0x410E68C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_420 0x410E690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_421 0x410E694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_422 0x410E698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_423 0x410E69C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_424 0x410E6A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_425 0x410E6A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_426 0x410E6A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_427 0x410E6AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_428 0x410E6B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_429 0x410E6B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_430 0x410E6B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_431 0x410E6BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_432 0x410E6C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_433 0x410E6C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_434 0x410E6C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_435 0x410E6CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_436 0x410E6D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_437 0x410E6D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_438 0x410E6D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_439 0x410E6DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_440 0x410E6E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_441 0x410E6E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_442 0x410E6E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_443 0x410E6EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_444 0x410E6F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_445 0x410E6F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_446 0x410E6F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_447 0x410E6FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_448 0x410E700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_449 0x410E704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_450 0x410E708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_451 0x410E70C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_452 0x410E710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_453 0x410E714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_454 0x410E718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_455 0x410E71C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_456 0x410E720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_457 0x410E724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_458 0x410E728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_459 0x410E72C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_460 0x410E730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_461 0x410E734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_462 0x410E738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_463 0x410E73C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_464 0x410E740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_465 0x410E744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_466 0x410E748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_467 0x410E74C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_468 0x410E750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_469 0x410E754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_470 0x410E758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_471 0x410E75C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_472 0x410E760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_473 0x410E764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_474 0x410E768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_475 0x410E76C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_476 0x410E770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_477 0x410E774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_478 0x410E778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_479 0x410E77C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_480 0x410E780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_481 0x410E784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_482 0x410E788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_483 0x410E78C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_484 0x410E790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_485 0x410E794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_486 0x410E798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_487 0x410E79C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_488 0x410E7A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_489 0x410E7A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_490 0x410E7A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_491 0x410E7AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_492 0x410E7B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_493 0x410E7B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_494 0x410E7B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_495 0x410E7BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_496 0x410E7C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_497 0x410E7C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_498 0x410E7C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_499 0x410E7CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_500 0x410E7D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_501 0x410E7D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_502 0x410E7D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_503 0x410E7DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_504 0x410E7E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_505 0x410E7E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_506 0x410E7E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_507 0x410E7EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_508 0x410E7F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_509 0x410E7F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_510 0x410E7F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_511 0x410E7FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_512 0x410E800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_513 0x410E804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_514 0x410E808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_515 0x410E80C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_516 0x410E810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_517 0x410E814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_518 0x410E818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_519 0x410E81C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_520 0x410E820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_521 0x410E824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_522 0x410E828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_523 0x410E82C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_524 0x410E830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_525 0x410E834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_526 0x410E838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_527 0x410E83C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_528 0x410E840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_529 0x410E844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_530 0x410E848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_531 0x410E84C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_532 0x410E850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_533 0x410E854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_534 0x410E858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_535 0x410E85C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_536 0x410E860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_537 0x410E864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_538 0x410E868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_539 0x410E86C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_540 0x410E870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_541 0x410E874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_542 0x410E878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_543 0x410E87C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_544 0x410E880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_545 0x410E884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_546 0x410E888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_547 0x410E88C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_548 0x410E890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_549 0x410E894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_550 0x410E898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_551 0x410E89C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_552 0x410E8A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_553 0x410E8A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_554 0x410E8A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_555 0x410E8AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_556 0x410E8B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_557 0x410E8B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_558 0x410E8B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_559 0x410E8BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_560 0x410E8C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_561 0x410E8C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_562 0x410E8C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_563 0x410E8CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_564 0x410E8D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_565 0x410E8D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_566 0x410E8D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_567 0x410E8DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_568 0x410E8E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_569 0x410E8E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_570 0x410E8E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_571 0x410E8EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_572 0x410E8F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_573 0x410E8F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_574 0x410E8F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_575 0x410E8FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_576 0x410E900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_577 0x410E904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_578 0x410E908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_579 0x410E90C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_580 0x410E910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_581 0x410E914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_582 0x410E918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_583 0x410E91C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_584 0x410E920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_585 0x410E924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_586 0x410E928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_587 0x410E92C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_588 0x410E930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_589 0x410E934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_590 0x410E938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_591 0x410E93C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_592 0x410E940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_593 0x410E944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_594 0x410E948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_595 0x410E94C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_596 0x410E950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_597 0x410E954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_598 0x410E958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_599 0x410E95C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_600 0x410E960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_601 0x410E964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_602 0x410E968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_603 0x410E96C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_604 0x410E970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_605 0x410E974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_606 0x410E978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_607 0x410E97C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_608 0x410E980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_609 0x410E984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_610 0x410E988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_611 0x410E98C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_612 0x410E990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_613 0x410E994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_614 0x410E998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_615 0x410E99C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_616 0x410E9A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_617 0x410E9A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_618 0x410E9A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_619 0x410E9AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_620 0x410E9B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_621 0x410E9B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_622 0x410E9B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_623 0x410E9BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_624 0x410E9C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_625 0x410E9C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_626 0x410E9C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_627 0x410E9CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_628 0x410E9D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_629 0x410E9D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_630 0x410E9D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_631 0x410E9DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_632 0x410E9E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_633 0x410E9E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_634 0x410E9E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_635 0x410E9EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_636 0x410E9F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_637 0x410E9F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_638 0x410E9F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_639 0x410E9FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_640 0x410EA00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_641 0x410EA04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_642 0x410EA08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_643 0x410EA0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_644 0x410EA10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_645 0x410EA14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_646 0x410EA18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_647 0x410EA1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_648 0x410EA20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_649 0x410EA24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_650 0x410EA28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_651 0x410EA2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_652 0x410EA30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_653 0x410EA34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_654 0x410EA38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_655 0x410EA3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_656 0x410EA40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_657 0x410EA44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_658 0x410EA48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_659 0x410EA4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_660 0x410EA50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_661 0x410EA54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_662 0x410EA58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_663 0x410EA5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_664 0x410EA60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_665 0x410EA64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_666 0x410EA68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_667 0x410EA6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_668 0x410EA70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_669 0x410EA74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_670 0x410EA78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_671 0x410EA7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_672 0x410EA80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_673 0x410EA84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_674 0x410EA88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_675 0x410EA8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_676 0x410EA90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_677 0x410EA94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_678 0x410EA98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_679 0x410EA9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_680 0x410EAA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_681 0x410EAA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_682 0x410EAA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_683 0x410EAAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_684 0x410EAB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_685 0x410EAB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_686 0x410EAB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_687 0x410EABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_688 0x410EAC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_689 0x410EAC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_690 0x410EAC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_691 0x410EACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_692 0x410EAD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_693 0x410EAD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_694 0x410EAD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_695 0x410EADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_696 0x410EAE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_697 0x410EAE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_698 0x410EAE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_699 0x410EAEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_700 0x410EAF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_701 0x410EAF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_702 0x410EAF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_703 0x410EAFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_704 0x410EB00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_705 0x410EB04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_706 0x410EB08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_707 0x410EB0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_708 0x410EB10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_709 0x410EB14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_710 0x410EB18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_711 0x410EB1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_712 0x410EB20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_713 0x410EB24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_714 0x410EB28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_715 0x410EB2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_716 0x410EB30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_717 0x410EB34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_718 0x410EB38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_719 0x410EB3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_720 0x410EB40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_721 0x410EB44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_722 0x410EB48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_723 0x410EB4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_724 0x410EB50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_725 0x410EB54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_726 0x410EB58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_727 0x410EB5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_728 0x410EB60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_729 0x410EB64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_730 0x410EB68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_731 0x410EB6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_732 0x410EB70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_733 0x410EB74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_734 0x410EB78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_735 0x410EB7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_736 0x410EB80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_737 0x410EB84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_738 0x410EB88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_739 0x410EB8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_740 0x410EB90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_741 0x410EB94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_742 0x410EB98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_743 0x410EB9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_744 0x410EBA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_745 0x410EBA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_746 0x410EBA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_747 0x410EBAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_748 0x410EBB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_749 0x410EBB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_750 0x410EBB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_751 0x410EBBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_752 0x410EBC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_753 0x410EBC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_754 0x410EBC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_755 0x410EBCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_756 0x410EBD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_757 0x410EBD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_758 0x410EBD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_759 0x410EBDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_760 0x410EBE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_761 0x410EBE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_762 0x410EBE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_763 0x410EBEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_764 0x410EBF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_765 0x410EBF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_766 0x410EBF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_767 0x410EBFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_768 0x410EC00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_769 0x410EC04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_770 0x410EC08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_771 0x410EC0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_772 0x410EC10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_773 0x410EC14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_774 0x410EC18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_775 0x410EC1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_776 0x410EC20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_777 0x410EC24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_778 0x410EC28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_779 0x410EC2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_780 0x410EC30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_781 0x410EC34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_782 0x410EC38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_783 0x410EC3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_784 0x410EC40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_785 0x410EC44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_786 0x410EC48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_787 0x410EC4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_788 0x410EC50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_789 0x410EC54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_790 0x410EC58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_791 0x410EC5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_792 0x410EC60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_793 0x410EC64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_794 0x410EC68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_795 0x410EC6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_796 0x410EC70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_797 0x410EC74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_798 0x410EC78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_799 0x410EC7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_800 0x410EC80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_801 0x410EC84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_802 0x410EC88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_803 0x410EC8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_804 0x410EC90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_805 0x410EC94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_806 0x410EC98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_807 0x410EC9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_808 0x410ECA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_809 0x410ECA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_810 0x410ECA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_811 0x410ECAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_812 0x410ECB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_813 0x410ECB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_814 0x410ECB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_815 0x410ECBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_816 0x410ECC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_817 0x410ECC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_818 0x410ECC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_819 0x410ECCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_820 0x410ECD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_821 0x410ECD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_822 0x410ECD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_823 0x410ECDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_824 0x410ECE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_825 0x410ECE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_826 0x410ECE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_827 0x410ECEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_828 0x410ECF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_829 0x410ECF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_830 0x410ECF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_831 0x410ECFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_832 0x410ED00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_833 0x410ED04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_834 0x410ED08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_835 0x410ED0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_836 0x410ED10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_837 0x410ED14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_838 0x410ED18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_839 0x410ED1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_840 0x410ED20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_841 0x410ED24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_842 0x410ED28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_843 0x410ED2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_844 0x410ED30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_845 0x410ED34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_846 0x410ED38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_847 0x410ED3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_848 0x410ED40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_849 0x410ED44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_850 0x410ED48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_851 0x410ED4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_852 0x410ED50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_853 0x410ED54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_854 0x410ED58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_855 0x410ED5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_856 0x410ED60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_857 0x410ED64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_858 0x410ED68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_859 0x410ED6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_860 0x410ED70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_861 0x410ED74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_862 0x410ED78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_863 0x410ED7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_864 0x410ED80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_865 0x410ED84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_866 0x410ED88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_867 0x410ED8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_868 0x410ED90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_869 0x410ED94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_870 0x410ED98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_871 0x410ED9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_872 0x410EDA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_873 0x410EDA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_874 0x410EDA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_875 0x410EDAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_876 0x410EDB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_877 0x410EDB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_878 0x410EDB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_879 0x410EDBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_880 0x410EDC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_881 0x410EDC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_882 0x410EDC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_883 0x410EDCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_884 0x410EDD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_885 0x410EDD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_886 0x410EDD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_887 0x410EDDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_888 0x410EDE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_889 0x410EDE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_890 0x410EDE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_891 0x410EDEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_892 0x410EDF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_893 0x410EDF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_894 0x410EDF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_895 0x410EDFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_896 0x410EE00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_897 0x410EE04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_898 0x410EE08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_899 0x410EE0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_900 0x410EE10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_901 0x410EE14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_902 0x410EE18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_903 0x410EE1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_904 0x410EE20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_905 0x410EE24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_906 0x410EE28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_907 0x410EE2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_908 0x410EE30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_909 0x410EE34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_910 0x410EE38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_911 0x410EE3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_912 0x410EE40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_913 0x410EE44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_914 0x410EE48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_915 0x410EE4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_916 0x410EE50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_917 0x410EE54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_918 0x410EE58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_919 0x410EE5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_920 0x410EE60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_921 0x410EE64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_922 0x410EE68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_923 0x410EE6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_924 0x410EE70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_925 0x410EE74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_926 0x410EE78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_927 0x410EE7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_928 0x410EE80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_929 0x410EE84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_930 0x410EE88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_931 0x410EE8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_932 0x410EE90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_933 0x410EE94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_934 0x410EE98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_935 0x410EE9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_936 0x410EEA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_937 0x410EEA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_938 0x410EEA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_939 0x410EEAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_940 0x410EEB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_941 0x410EEB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_942 0x410EEB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_943 0x410EEBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_944 0x410EEC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_945 0x410EEC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_946 0x410EEC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_947 0x410EECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_948 0x410EED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_949 0x410EED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_950 0x410EED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_951 0x410EEDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_952 0x410EEE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_953 0x410EEE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_954 0x410EEE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_955 0x410EEEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_956 0x410EEF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_957 0x410EEF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_958 0x410EEF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_959 0x410EEFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_960 0x410EF00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_961 0x410EF04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_962 0x410EF08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_963 0x410EF0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_964 0x410EF10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_965 0x410EF14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_966 0x410EF18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_967 0x410EF1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_968 0x410EF20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_969 0x410EF24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_970 0x410EF28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_971 0x410EF2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_972 0x410EF30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_973 0x410EF34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_974 0x410EF38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_975 0x410EF3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_976 0x410EF40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_977 0x410EF44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_978 0x410EF48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_979 0x410EF4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_980 0x410EF50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_981 0x410EF54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_982 0x410EF58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_983 0x410EF5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_984 0x410EF60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_985 0x410EF64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_986 0x410EF68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_987 0x410EF6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_988 0x410EF70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_989 0x410EF74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_990 0x410EF78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_991 0x410EF7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_992 0x410EF80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_993 0x410EF84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_994 0x410EF88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_995 0x410EF8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_996 0x410EF90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_997 0x410EF94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_998 0x410EF98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_999 0x410EF9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1000 0x410EFA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1001 0x410EFA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1002 0x410EFA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1003 0x410EFAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1004 0x410EFB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1005 0x410EFB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1006 0x410EFB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1007 0x410EFBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1008 0x410EFC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1009 0x410EFC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1010 0x410EFC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1011 0x410EFCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1012 0x410EFD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1013 0x410EFD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1014 0x410EFD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1015 0x410EFDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1016 0x410EFE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1017 0x410EFE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1018 0x410EFE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1019 0x410EFEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1020 0x410EFF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1021 0x410EFF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1022 0x410EFF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1023 0x410EFFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1024 0x410F000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1025 0x410F004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1026 0x410F008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1027 0x410F00C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1028 0x410F010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1029 0x410F014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1030 0x410F018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1031 0x410F01C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1032 0x410F020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1033 0x410F024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1034 0x410F028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1035 0x410F02C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1036 0x410F030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1037 0x410F034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1038 0x410F038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1039 0x410F03C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1040 0x410F040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1041 0x410F044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1042 0x410F048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1043 0x410F04C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1044 0x410F050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1045 0x410F054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1046 0x410F058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1047 0x410F05C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1048 0x410F060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1049 0x410F064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1050 0x410F068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1051 0x410F06C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1052 0x410F070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1053 0x410F074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1054 0x410F078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1055 0x410F07C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1056 0x410F080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1057 0x410F084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1058 0x410F088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1059 0x410F08C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1060 0x410F090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1061 0x410F094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1062 0x410F098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1063 0x410F09C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1064 0x410F0A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1065 0x410F0A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1066 0x410F0A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1067 0x410F0AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1068 0x410F0B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1069 0x410F0B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1070 0x410F0B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1071 0x410F0BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1072 0x410F0C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1073 0x410F0C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1074 0x410F0C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1075 0x410F0CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1076 0x410F0D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1077 0x410F0D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1078 0x410F0D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1079 0x410F0DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1080 0x410F0E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1081 0x410F0E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1082 0x410F0E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1083 0x410F0EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1084 0x410F0F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1085 0x410F0F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1086 0x410F0F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1087 0x410F0FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1088 0x410F100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1089 0x410F104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1090 0x410F108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1091 0x410F10C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1092 0x410F110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1093 0x410F114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1094 0x410F118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1095 0x410F11C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1096 0x410F120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1097 0x410F124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1098 0x410F128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1099 0x410F12C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1100 0x410F130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1101 0x410F134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1102 0x410F138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1103 0x410F13C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1104 0x410F140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1105 0x410F144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1106 0x410F148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1107 0x410F14C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1108 0x410F150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1109 0x410F154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1110 0x410F158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1111 0x410F15C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1112 0x410F160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1113 0x410F164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1114 0x410F168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1115 0x410F16C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1116 0x410F170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1117 0x410F174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1118 0x410F178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1119 0x410F17C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1120 0x410F180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1121 0x410F184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1122 0x410F188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1123 0x410F18C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1124 0x410F190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1125 0x410F194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1126 0x410F198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1127 0x410F19C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1128 0x410F1A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1129 0x410F1A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1130 0x410F1A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1131 0x410F1AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1132 0x410F1B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1133 0x410F1B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1134 0x410F1B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1135 0x410F1BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1136 0x410F1C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1137 0x410F1C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1138 0x410F1C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1139 0x410F1CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1140 0x410F1D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1141 0x410F1D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1142 0x410F1D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1143 0x410F1DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1144 0x410F1E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1145 0x410F1E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1146 0x410F1E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1147 0x410F1EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1148 0x410F1F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1149 0x410F1F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1150 0x410F1F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1151 0x410F1FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1152 0x410F200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1153 0x410F204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1154 0x410F208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1155 0x410F20C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1156 0x410F210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1157 0x410F214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1158 0x410F218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1159 0x410F21C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1160 0x410F220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1161 0x410F224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1162 0x410F228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1163 0x410F22C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1164 0x410F230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1165 0x410F234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1166 0x410F238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1167 0x410F23C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1168 0x410F240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1169 0x410F244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1170 0x410F248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1171 0x410F24C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1172 0x410F250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1173 0x410F254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1174 0x410F258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1175 0x410F25C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1176 0x410F260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1177 0x410F264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1178 0x410F268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1179 0x410F26C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1180 0x410F270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1181 0x410F274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1182 0x410F278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1183 0x410F27C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1184 0x410F280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1185 0x410F284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1186 0x410F288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1187 0x410F28C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1188 0x410F290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1189 0x410F294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1190 0x410F298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1191 0x410F29C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1192 0x410F2A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1193 0x410F2A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1194 0x410F2A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1195 0x410F2AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1196 0x410F2B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1197 0x410F2B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1198 0x410F2B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1199 0x410F2BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1200 0x410F2C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1201 0x410F2C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1202 0x410F2C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1203 0x410F2CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1204 0x410F2D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1205 0x410F2D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1206 0x410F2D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1207 0x410F2DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1208 0x410F2E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1209 0x410F2E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1210 0x410F2E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1211 0x410F2EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1212 0x410F2F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1213 0x410F2F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1214 0x410F2F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1215 0x410F2FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1216 0x410F300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1217 0x410F304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1218 0x410F308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1219 0x410F30C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1220 0x410F310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1221 0x410F314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1222 0x410F318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1223 0x410F31C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1224 0x410F320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1225 0x410F324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1226 0x410F328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1227 0x410F32C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1228 0x410F330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1229 0x410F334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1230 0x410F338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1231 0x410F33C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1232 0x410F340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1233 0x410F344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1234 0x410F348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1235 0x410F34C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1236 0x410F350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1237 0x410F354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1238 0x410F358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1239 0x410F35C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1240 0x410F360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1241 0x410F364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1242 0x410F368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1243 0x410F36C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1244 0x410F370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1245 0x410F374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1246 0x410F378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1247 0x410F37C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1248 0x410F380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1249 0x410F384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1250 0x410F388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1251 0x410F38C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1252 0x410F390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1253 0x410F394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1254 0x410F398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1255 0x410F39C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1256 0x410F3A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1257 0x410F3A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1258 0x410F3A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1259 0x410F3AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1260 0x410F3B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1261 0x410F3B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1262 0x410F3B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1263 0x410F3BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1264 0x410F3C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1265 0x410F3C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1266 0x410F3C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1267 0x410F3CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1268 0x410F3D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1269 0x410F3D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1270 0x410F3D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1271 0x410F3DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1272 0x410F3E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1273 0x410F3E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1274 0x410F3E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1275 0x410F3EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1276 0x410F3F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1277 0x410F3F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1278 0x410F3F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1279 0x410F3FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1280 0x410F400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1281 0x410F404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1282 0x410F408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1283 0x410F40C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1284 0x410F410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1285 0x410F414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1286 0x410F418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1287 0x410F41C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1288 0x410F420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1289 0x410F424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1290 0x410F428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1291 0x410F42C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1292 0x410F430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1293 0x410F434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1294 0x410F438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1295 0x410F43C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1296 0x410F440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1297 0x410F444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1298 0x410F448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1299 0x410F44C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1300 0x410F450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1301 0x410F454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1302 0x410F458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1303 0x410F45C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1304 0x410F460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1305 0x410F464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1306 0x410F468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1307 0x410F46C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1308 0x410F470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1309 0x410F474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1310 0x410F478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1311 0x410F47C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1312 0x410F480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1313 0x410F484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1314 0x410F488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1315 0x410F48C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1316 0x410F490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1317 0x410F494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1318 0x410F498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1319 0x410F49C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1320 0x410F4A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1321 0x410F4A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1322 0x410F4A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1323 0x410F4AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1324 0x410F4B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1325 0x410F4B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1326 0x410F4B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1327 0x410F4BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1328 0x410F4C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1329 0x410F4C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1330 0x410F4C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1331 0x410F4CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1332 0x410F4D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1333 0x410F4D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1334 0x410F4D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1335 0x410F4DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1336 0x410F4E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1337 0x410F4E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1338 0x410F4E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1339 0x410F4EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1340 0x410F4F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1341 0x410F4F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1342 0x410F4F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1343 0x410F4FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1344 0x410F500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1345 0x410F504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1346 0x410F508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1347 0x410F50C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1348 0x410F510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1349 0x410F514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1350 0x410F518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1351 0x410F51C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1352 0x410F520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1353 0x410F524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1354 0x410F528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1355 0x410F52C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1356 0x410F530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1357 0x410F534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1358 0x410F538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1359 0x410F53C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1360 0x410F540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1361 0x410F544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1362 0x410F548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1363 0x410F54C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1364 0x410F550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1365 0x410F554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1366 0x410F558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1367 0x410F55C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1368 0x410F560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1369 0x410F564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1370 0x410F568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1371 0x410F56C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1372 0x410F570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1373 0x410F574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1374 0x410F578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1375 0x410F57C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1376 0x410F580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1377 0x410F584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1378 0x410F588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1379 0x410F58C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1380 0x410F590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1381 0x410F594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1382 0x410F598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1383 0x410F59C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1384 0x410F5A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1385 0x410F5A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1386 0x410F5A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1387 0x410F5AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1388 0x410F5B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1389 0x410F5B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1390 0x410F5B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1391 0x410F5BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1392 0x410F5C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1393 0x410F5C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1394 0x410F5C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1395 0x410F5CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1396 0x410F5D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1397 0x410F5D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1398 0x410F5D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1399 0x410F5DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1400 0x410F5E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1401 0x410F5E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1402 0x410F5E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1403 0x410F5EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1404 0x410F5F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1405 0x410F5F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1406 0x410F5F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1407 0x410F5FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1408 0x410F600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1409 0x410F604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1410 0x410F608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1411 0x410F60C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1412 0x410F610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1413 0x410F614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1414 0x410F618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1415 0x410F61C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1416 0x410F620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1417 0x410F624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1418 0x410F628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1419 0x410F62C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1420 0x410F630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1421 0x410F634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1422 0x410F638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1423 0x410F63C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1424 0x410F640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1425 0x410F644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1426 0x410F648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1427 0x410F64C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1428 0x410F650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1429 0x410F654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1430 0x410F658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1431 0x410F65C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1432 0x410F660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1433 0x410F664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1434 0x410F668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1435 0x410F66C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1436 0x410F670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1437 0x410F674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1438 0x410F678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1439 0x410F67C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1440 0x410F680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1441 0x410F684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1442 0x410F688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1443 0x410F68C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1444 0x410F690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1445 0x410F694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1446 0x410F698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1447 0x410F69C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1448 0x410F6A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1449 0x410F6A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1450 0x410F6A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1451 0x410F6AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1452 0x410F6B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1453 0x410F6B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1454 0x410F6B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1455 0x410F6BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1456 0x410F6C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1457 0x410F6C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1458 0x410F6C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1459 0x410F6CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1460 0x410F6D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1461 0x410F6D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1462 0x410F6D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1463 0x410F6DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1464 0x410F6E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1465 0x410F6E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1466 0x410F6E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1467 0x410F6EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1468 0x410F6F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1469 0x410F6F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1470 0x410F6F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1471 0x410F6FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1472 0x410F700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1473 0x410F704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1474 0x410F708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1475 0x410F70C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1476 0x410F710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1477 0x410F714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1478 0x410F718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1479 0x410F71C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1480 0x410F720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1481 0x410F724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1482 0x410F728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1483 0x410F72C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1484 0x410F730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1485 0x410F734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1486 0x410F738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1487 0x410F73C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1488 0x410F740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1489 0x410F744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1490 0x410F748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1491 0x410F74C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1492 0x410F750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1493 0x410F754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1494 0x410F758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1495 0x410F75C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1496 0x410F760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1497 0x410F764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1498 0x410F768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1499 0x410F76C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1500 0x410F770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1501 0x410F774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1502 0x410F778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1503 0x410F77C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1504 0x410F780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1505 0x410F784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1506 0x410F788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1507 0x410F78C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1508 0x410F790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1509 0x410F794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1510 0x410F798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1511 0x410F79C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1512 0x410F7A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1513 0x410F7A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1514 0x410F7A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1515 0x410F7AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1516 0x410F7B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1517 0x410F7B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1518 0x410F7B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1519 0x410F7BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1520 0x410F7C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1521 0x410F7C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1522 0x410F7C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1523 0x410F7CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1524 0x410F7D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1525 0x410F7D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1526 0x410F7D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1527 0x410F7DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1528 0x410F7E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1529 0x410F7E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1530 0x410F7E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1531 0x410F7EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1532 0x410F7F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1533 0x410F7F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1534 0x410F7F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1535 0x410F7FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1536 0x410F800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1537 0x410F804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1538 0x410F808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1539 0x410F80C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1540 0x410F810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1541 0x410F814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1542 0x410F818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1543 0x410F81C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1544 0x410F820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1545 0x410F824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1546 0x410F828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1547 0x410F82C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1548 0x410F830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1549 0x410F834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1550 0x410F838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1551 0x410F83C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1552 0x410F840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1553 0x410F844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1554 0x410F848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1555 0x410F84C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1556 0x410F850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1557 0x410F854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1558 0x410F858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1559 0x410F85C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1560 0x410F860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1561 0x410F864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1562 0x410F868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1563 0x410F86C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1564 0x410F870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1565 0x410F874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1566 0x410F878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1567 0x410F87C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1568 0x410F880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1569 0x410F884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1570 0x410F888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1571 0x410F88C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1572 0x410F890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1573 0x410F894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1574 0x410F898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1575 0x410F89C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1576 0x410F8A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1577 0x410F8A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1578 0x410F8A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1579 0x410F8AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1580 0x410F8B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1581 0x410F8B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1582 0x410F8B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1583 0x410F8BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1584 0x410F8C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1585 0x410F8C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1586 0x410F8C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1587 0x410F8CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1588 0x410F8D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1589 0x410F8D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1590 0x410F8D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1591 0x410F8DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1592 0x410F8E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1593 0x410F8E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1594 0x410F8E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1595 0x410F8EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1596 0x410F8F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1597 0x410F8F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1598 0x410F8F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1599 0x410F8FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1600 0x410F900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1601 0x410F904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1602 0x410F908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1603 0x410F90C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1604 0x410F910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1605 0x410F914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1606 0x410F918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1607 0x410F91C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1608 0x410F920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1609 0x410F924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1610 0x410F928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1611 0x410F92C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1612 0x410F930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1613 0x410F934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1614 0x410F938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1615 0x410F93C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1616 0x410F940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1617 0x410F944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1618 0x410F948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1619 0x410F94C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1620 0x410F950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1621 0x410F954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1622 0x410F958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1623 0x410F95C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1624 0x410F960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1625 0x410F964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1626 0x410F968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1627 0x410F96C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1628 0x410F970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1629 0x410F974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1630 0x410F978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1631 0x410F97C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1632 0x410F980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1633 0x410F984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1634 0x410F988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1635 0x410F98C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1636 0x410F990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1637 0x410F994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1638 0x410F998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1639 0x410F99C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1640 0x410F9A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1641 0x410F9A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1642 0x410F9A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1643 0x410F9AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1644 0x410F9B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1645 0x410F9B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1646 0x410F9B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1647 0x410F9BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1648 0x410F9C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1649 0x410F9C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1650 0x410F9C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1651 0x410F9CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1652 0x410F9D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1653 0x410F9D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1654 0x410F9D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1655 0x410F9DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1656 0x410F9E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1657 0x410F9E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1658 0x410F9E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1659 0x410F9EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1660 0x410F9F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1661 0x410F9F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1662 0x410F9F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1663 0x410F9FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1664 0x410FA00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1665 0x410FA04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1666 0x410FA08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1667 0x410FA0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1668 0x410FA10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1669 0x410FA14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1670 0x410FA18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1671 0x410FA1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1672 0x410FA20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1673 0x410FA24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1674 0x410FA28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1675 0x410FA2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1676 0x410FA30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1677 0x410FA34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1678 0x410FA38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1679 0x410FA3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1680 0x410FA40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1681 0x410FA44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1682 0x410FA48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1683 0x410FA4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1684 0x410FA50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1685 0x410FA54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1686 0x410FA58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1687 0x410FA5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1688 0x410FA60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1689 0x410FA64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1690 0x410FA68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1691 0x410FA6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1692 0x410FA70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1693 0x410FA74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1694 0x410FA78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1695 0x410FA7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1696 0x410FA80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1697 0x410FA84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1698 0x410FA88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1699 0x410FA8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1700 0x410FA90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1701 0x410FA94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1702 0x410FA98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1703 0x410FA9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1704 0x410FAA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1705 0x410FAA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1706 0x410FAA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1707 0x410FAAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1708 0x410FAB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1709 0x410FAB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1710 0x410FAB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1711 0x410FABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1712 0x410FAC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1713 0x410FAC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1714 0x410FAC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1715 0x410FACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1716 0x410FAD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1717 0x410FAD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1718 0x410FAD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1719 0x410FADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1720 0x410FAE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1721 0x410FAE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1722 0x410FAE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1723 0x410FAEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1724 0x410FAF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1725 0x410FAF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1726 0x410FAF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1727 0x410FAFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1728 0x410FB00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1729 0x410FB04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1730 0x410FB08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1731 0x410FB0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1732 0x410FB10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1733 0x410FB14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1734 0x410FB18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1735 0x410FB1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1736 0x410FB20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1737 0x410FB24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1738 0x410FB28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1739 0x410FB2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1740 0x410FB30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1741 0x410FB34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1742 0x410FB38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1743 0x410FB3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1744 0x410FB40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1745 0x410FB44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1746 0x410FB48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1747 0x410FB4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1748 0x410FB50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1749 0x410FB54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1750 0x410FB58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1751 0x410FB5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1752 0x410FB60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1753 0x410FB64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1754 0x410FB68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1755 0x410FB6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1756 0x410FB70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1757 0x410FB74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1758 0x410FB78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1759 0x410FB7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1760 0x410FB80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1761 0x410FB84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1762 0x410FB88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1763 0x410FB8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1764 0x410FB90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1765 0x410FB94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1766 0x410FB98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1767 0x410FB9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1768 0x410FBA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1769 0x410FBA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1770 0x410FBA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1771 0x410FBAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1772 0x410FBB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1773 0x410FBB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1774 0x410FBB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1775 0x410FBBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1776 0x410FBC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1777 0x410FBC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1778 0x410FBC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1779 0x410FBCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1780 0x410FBD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1781 0x410FBD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1782 0x410FBD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1783 0x410FBDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1784 0x410FBE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1785 0x410FBE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1786 0x410FBE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1787 0x410FBEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1788 0x410FBF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1789 0x410FBF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1790 0x410FBF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1791 0x410FBFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1792 0x410FC00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1793 0x410FC04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1794 0x410FC08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1795 0x410FC0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1796 0x410FC10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1797 0x410FC14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1798 0x410FC18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1799 0x410FC1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1800 0x410FC20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1801 0x410FC24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1802 0x410FC28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1803 0x410FC2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1804 0x410FC30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1805 0x410FC34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1806 0x410FC38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1807 0x410FC3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1808 0x410FC40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1809 0x410FC44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1810 0x410FC48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1811 0x410FC4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1812 0x410FC50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1813 0x410FC54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1814 0x410FC58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1815 0x410FC5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1816 0x410FC60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1817 0x410FC64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1818 0x410FC68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1819 0x410FC6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1820 0x410FC70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1821 0x410FC74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1822 0x410FC78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1823 0x410FC7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1824 0x410FC80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1825 0x410FC84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1826 0x410FC88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1827 0x410FC8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1828 0x410FC90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1829 0x410FC94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1830 0x410FC98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1831 0x410FC9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1832 0x410FCA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1833 0x410FCA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1834 0x410FCA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1835 0x410FCAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1836 0x410FCB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1837 0x410FCB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1838 0x410FCB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1839 0x410FCBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1840 0x410FCC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1841 0x410FCC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1842 0x410FCC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1843 0x410FCCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1844 0x410FCD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1845 0x410FCD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1846 0x410FCD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1847 0x410FCDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1848 0x410FCE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1849 0x410FCE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1850 0x410FCE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1851 0x410FCEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1852 0x410FCF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1853 0x410FCF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1854 0x410FCF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1855 0x410FCFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1856 0x410FD00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1857 0x410FD04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1858 0x410FD08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1859 0x410FD0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1860 0x410FD10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1861 0x410FD14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1862 0x410FD18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1863 0x410FD1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1864 0x410FD20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1865 0x410FD24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1866 0x410FD28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1867 0x410FD2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1868 0x410FD30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1869 0x410FD34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1870 0x410FD38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1871 0x410FD3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1872 0x410FD40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1873 0x410FD44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1874 0x410FD48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1875 0x410FD4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1876 0x410FD50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1877 0x410FD54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1878 0x410FD58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1879 0x410FD5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1880 0x410FD60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1881 0x410FD64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1882 0x410FD68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1883 0x410FD6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1884 0x410FD70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1885 0x410FD74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1886 0x410FD78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1887 0x410FD7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1888 0x410FD80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1889 0x410FD84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1890 0x410FD88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1891 0x410FD8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1892 0x410FD90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1893 0x410FD94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1894 0x410FD98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1895 0x410FD9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1896 0x410FDA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1897 0x410FDA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1898 0x410FDA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1899 0x410FDAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1900 0x410FDB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1901 0x410FDB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1902 0x410FDB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1903 0x410FDBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1904 0x410FDC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1905 0x410FDC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1906 0x410FDC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1907 0x410FDCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1908 0x410FDD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1909 0x410FDD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1910 0x410FDD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1911 0x410FDDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1912 0x410FDE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1913 0x410FDE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1914 0x410FDE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1915 0x410FDEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1916 0x410FDF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1917 0x410FDF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1918 0x410FDF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1919 0x410FDFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1920 0x410FE00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1921 0x410FE04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1922 0x410FE08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1923 0x410FE0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1924 0x410FE10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1925 0x410FE14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1926 0x410FE18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1927 0x410FE1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1928 0x410FE20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1929 0x410FE24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1930 0x410FE28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1931 0x410FE2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1932 0x410FE30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1933 0x410FE34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1934 0x410FE38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1935 0x410FE3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1936 0x410FE40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1937 0x410FE44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1938 0x410FE48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1939 0x410FE4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1940 0x410FE50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1941 0x410FE54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1942 0x410FE58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1943 0x410FE5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1944 0x410FE60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1945 0x410FE64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1946 0x410FE68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1947 0x410FE6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1948 0x410FE70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1949 0x410FE74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1950 0x410FE78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1951 0x410FE7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1952 0x410FE80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1953 0x410FE84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1954 0x410FE88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1955 0x410FE8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1956 0x410FE90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1957 0x410FE94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1958 0x410FE98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1959 0x410FE9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1960 0x410FEA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1961 0x410FEA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1962 0x410FEA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1963 0x410FEAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1964 0x410FEB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1965 0x410FEB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1966 0x410FEB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1967 0x410FEBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1968 0x410FEC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1969 0x410FEC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1970 0x410FEC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1971 0x410FECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1972 0x410FED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1973 0x410FED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1974 0x410FED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1975 0x410FEDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1976 0x410FEE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1977 0x410FEE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1978 0x410FEE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1979 0x410FEEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1980 0x410FEF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1981 0x410FEF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1982 0x410FEF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1983 0x410FEFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1984 0x410FF00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1985 0x410FF04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1986 0x410FF08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1987 0x410FF0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1988 0x410FF10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1989 0x410FF14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1990 0x410FF18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1991 0x410FF1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1992 0x410FF20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1993 0x410FF24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1994 0x410FF28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1995 0x410FF2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1996 0x410FF30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1997 0x410FF34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1998 0x410FF38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_1999 0x410FF3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2000 0x410FF40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2001 0x410FF44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2002 0x410FF48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2003 0x410FF4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2004 0x410FF50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2005 0x410FF54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2006 0x410FF58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2007 0x410FF5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2008 0x410FF60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2009 0x410FF64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2010 0x410FF68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2011 0x410FF6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2012 0x410FF70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2013 0x410FF74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2014 0x410FF78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2015 0x410FF7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2016 0x410FF80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2017 0x410FF84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2018 0x410FF88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2019 0x410FF8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2020 0x410FF90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2021 0x410FF94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2022 0x410FF98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2023 0x410FF9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2024 0x410FFA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2025 0x410FFA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2026 0x410FFA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2027 0x410FFAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2028 0x410FFB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2029 0x410FFB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2030 0x410FFB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2031 0x410FFBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2032 0x410FFC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2033 0x410FFC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2034 0x410FFC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2035 0x410FFCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2036 0x410FFD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2037 0x410FFD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2038 0x410FFD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2039 0x410FFDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2040 0x410FFE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2041 0x410FFE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2042 0x410FFE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2043 0x410FFEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2044 0x410FFF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2045 0x410FFF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2046 0x410FFF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_2047 0x410FFFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 0x4110000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1 0x4110004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2 0x4110008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_3 0x411000C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_4 0x4110010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_5 0x4110014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_6 0x4110018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_7 0x411001C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_8 0x4110020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_9 0x4110024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_10 0x4110028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_11 0x411002C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_12 0x4110030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_13 0x4110034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_14 0x4110038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_15 0x411003C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_16 0x4110040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_17 0x4110044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_18 0x4110048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_19 0x411004C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_20 0x4110050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_21 0x4110054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_22 0x4110058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_23 0x411005C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_24 0x4110060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_25 0x4110064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_26 0x4110068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_27 0x411006C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_28 0x4110070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_29 0x4110074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_30 0x4110078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_31 0x411007C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_32 0x4110080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_33 0x4110084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_34 0x4110088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_35 0x411008C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_36 0x4110090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_37 0x4110094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_38 0x4110098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_39 0x411009C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_40 0x41100A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_41 0x41100A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_42 0x41100A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_43 0x41100AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_44 0x41100B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_45 0x41100B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_46 0x41100B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_47 0x41100BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_48 0x41100C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_49 0x41100C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_50 0x41100C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_51 0x41100CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_52 0x41100D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_53 0x41100D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_54 0x41100D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_55 0x41100DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_56 0x41100E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_57 0x41100E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_58 0x41100E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_59 0x41100EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_60 0x41100F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_61 0x41100F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_62 0x41100F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_63 0x41100FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_64 0x4110100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_65 0x4110104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_66 0x4110108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_67 0x411010C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_68 0x4110110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_69 0x4110114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_70 0x4110118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_71 0x411011C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_72 0x4110120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_73 0x4110124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_74 0x4110128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_75 0x411012C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_76 0x4110130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_77 0x4110134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_78 0x4110138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_79 0x411013C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_80 0x4110140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_81 0x4110144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_82 0x4110148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_83 0x411014C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_84 0x4110150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_85 0x4110154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_86 0x4110158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_87 0x411015C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_88 0x4110160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_89 0x4110164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_90 0x4110168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_91 0x411016C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_92 0x4110170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_93 0x4110174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_94 0x4110178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_95 0x411017C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_96 0x4110180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_97 0x4110184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_98 0x4110188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_99 0x411018C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_100 0x4110190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_101 0x4110194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_102 0x4110198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_103 0x411019C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_104 0x41101A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_105 0x41101A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_106 0x41101A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_107 0x41101AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_108 0x41101B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_109 0x41101B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_110 0x41101B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_111 0x41101BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_112 0x41101C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_113 0x41101C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_114 0x41101C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_115 0x41101CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_116 0x41101D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_117 0x41101D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_118 0x41101D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_119 0x41101DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_120 0x41101E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_121 0x41101E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_122 0x41101E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_123 0x41101EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_124 0x41101F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_125 0x41101F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_126 0x41101F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_127 0x41101FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_128 0x4110200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_129 0x4110204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_130 0x4110208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_131 0x411020C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_132 0x4110210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_133 0x4110214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_134 0x4110218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_135 0x411021C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_136 0x4110220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_137 0x4110224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_138 0x4110228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_139 0x411022C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_140 0x4110230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_141 0x4110234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_142 0x4110238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_143 0x411023C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_144 0x4110240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_145 0x4110244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_146 0x4110248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_147 0x411024C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_148 0x4110250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_149 0x4110254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_150 0x4110258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_151 0x411025C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_152 0x4110260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_153 0x4110264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_154 0x4110268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_155 0x411026C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_156 0x4110270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_157 0x4110274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_158 0x4110278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_159 0x411027C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_160 0x4110280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_161 0x4110284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_162 0x4110288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_163 0x411028C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_164 0x4110290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_165 0x4110294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_166 0x4110298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_167 0x411029C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_168 0x41102A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_169 0x41102A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_170 0x41102A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_171 0x41102AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_172 0x41102B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_173 0x41102B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_174 0x41102B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_175 0x41102BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_176 0x41102C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_177 0x41102C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_178 0x41102C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_179 0x41102CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_180 0x41102D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_181 0x41102D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_182 0x41102D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_183 0x41102DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_184 0x41102E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_185 0x41102E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_186 0x41102E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_187 0x41102EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_188 0x41102F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_189 0x41102F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_190 0x41102F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_191 0x41102FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_192 0x4110300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_193 0x4110304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_194 0x4110308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_195 0x411030C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_196 0x4110310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_197 0x4110314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_198 0x4110318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_199 0x411031C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_200 0x4110320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_201 0x4110324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_202 0x4110328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_203 0x411032C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_204 0x4110330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_205 0x4110334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_206 0x4110338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_207 0x411033C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_208 0x4110340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_209 0x4110344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_210 0x4110348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_211 0x411034C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_212 0x4110350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_213 0x4110354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_214 0x4110358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_215 0x411035C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_216 0x4110360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_217 0x4110364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_218 0x4110368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_219 0x411036C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_220 0x4110370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_221 0x4110374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_222 0x4110378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_223 0x411037C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_224 0x4110380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_225 0x4110384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_226 0x4110388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_227 0x411038C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_228 0x4110390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_229 0x4110394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_230 0x4110398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_231 0x411039C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_232 0x41103A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_233 0x41103A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_234 0x41103A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_235 0x41103AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_236 0x41103B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_237 0x41103B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_238 0x41103B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_239 0x41103BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_240 0x41103C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_241 0x41103C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_242 0x41103C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_243 0x41103CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_244 0x41103D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_245 0x41103D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_246 0x41103D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_247 0x41103DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_248 0x41103E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_249 0x41103E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_250 0x41103E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_251 0x41103EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_252 0x41103F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_253 0x41103F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_254 0x41103F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_255 0x41103FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_256 0x4110400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_257 0x4110404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_258 0x4110408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_259 0x411040C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_260 0x4110410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_261 0x4110414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_262 0x4110418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_263 0x411041C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_264 0x4110420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_265 0x4110424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_266 0x4110428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_267 0x411042C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_268 0x4110430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_269 0x4110434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_270 0x4110438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_271 0x411043C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_272 0x4110440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_273 0x4110444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_274 0x4110448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_275 0x411044C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_276 0x4110450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_277 0x4110454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_278 0x4110458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_279 0x411045C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_280 0x4110460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_281 0x4110464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_282 0x4110468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_283 0x411046C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_284 0x4110470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_285 0x4110474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_286 0x4110478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_287 0x411047C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_288 0x4110480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_289 0x4110484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_290 0x4110488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_291 0x411048C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_292 0x4110490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_293 0x4110494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_294 0x4110498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_295 0x411049C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_296 0x41104A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_297 0x41104A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_298 0x41104A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_299 0x41104AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_300 0x41104B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_301 0x41104B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_302 0x41104B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_303 0x41104BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_304 0x41104C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_305 0x41104C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_306 0x41104C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_307 0x41104CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_308 0x41104D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_309 0x41104D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_310 0x41104D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_311 0x41104DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_312 0x41104E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_313 0x41104E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_314 0x41104E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_315 0x41104EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_316 0x41104F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_317 0x41104F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_318 0x41104F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_319 0x41104FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_320 0x4110500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_321 0x4110504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_322 0x4110508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_323 0x411050C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_324 0x4110510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_325 0x4110514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_326 0x4110518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_327 0x411051C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_328 0x4110520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_329 0x4110524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_330 0x4110528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_331 0x411052C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_332 0x4110530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_333 0x4110534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_334 0x4110538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_335 0x411053C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_336 0x4110540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_337 0x4110544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_338 0x4110548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_339 0x411054C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_340 0x4110550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_341 0x4110554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_342 0x4110558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_343 0x411055C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_344 0x4110560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_345 0x4110564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_346 0x4110568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_347 0x411056C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_348 0x4110570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_349 0x4110574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_350 0x4110578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_351 0x411057C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_352 0x4110580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_353 0x4110584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_354 0x4110588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_355 0x411058C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_356 0x4110590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_357 0x4110594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_358 0x4110598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_359 0x411059C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_360 0x41105A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_361 0x41105A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_362 0x41105A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_363 0x41105AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_364 0x41105B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_365 0x41105B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_366 0x41105B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_367 0x41105BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_368 0x41105C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_369 0x41105C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_370 0x41105C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_371 0x41105CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_372 0x41105D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_373 0x41105D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_374 0x41105D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_375 0x41105DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_376 0x41105E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_377 0x41105E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_378 0x41105E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_379 0x41105EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_380 0x41105F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_381 0x41105F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_382 0x41105F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_383 0x41105FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_384 0x4110600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_385 0x4110604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_386 0x4110608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_387 0x411060C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_388 0x4110610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_389 0x4110614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_390 0x4110618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_391 0x411061C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_392 0x4110620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_393 0x4110624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_394 0x4110628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_395 0x411062C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_396 0x4110630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_397 0x4110634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_398 0x4110638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_399 0x411063C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_400 0x4110640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_401 0x4110644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_402 0x4110648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_403 0x411064C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_404 0x4110650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_405 0x4110654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_406 0x4110658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_407 0x411065C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_408 0x4110660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_409 0x4110664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_410 0x4110668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_411 0x411066C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_412 0x4110670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_413 0x4110674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_414 0x4110678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_415 0x411067C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_416 0x4110680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_417 0x4110684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_418 0x4110688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_419 0x411068C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_420 0x4110690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_421 0x4110694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_422 0x4110698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_423 0x411069C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_424 0x41106A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_425 0x41106A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_426 0x41106A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_427 0x41106AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_428 0x41106B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_429 0x41106B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_430 0x41106B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_431 0x41106BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_432 0x41106C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_433 0x41106C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_434 0x41106C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_435 0x41106CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_436 0x41106D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_437 0x41106D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_438 0x41106D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_439 0x41106DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_440 0x41106E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_441 0x41106E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_442 0x41106E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_443 0x41106EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_444 0x41106F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_445 0x41106F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_446 0x41106F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_447 0x41106FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_448 0x4110700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_449 0x4110704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_450 0x4110708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_451 0x411070C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_452 0x4110710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_453 0x4110714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_454 0x4110718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_455 0x411071C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_456 0x4110720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_457 0x4110724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_458 0x4110728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_459 0x411072C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_460 0x4110730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_461 0x4110734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_462 0x4110738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_463 0x411073C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_464 0x4110740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_465 0x4110744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_466 0x4110748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_467 0x411074C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_468 0x4110750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_469 0x4110754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_470 0x4110758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_471 0x411075C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_472 0x4110760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_473 0x4110764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_474 0x4110768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_475 0x411076C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_476 0x4110770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_477 0x4110774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_478 0x4110778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_479 0x411077C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_480 0x4110780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_481 0x4110784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_482 0x4110788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_483 0x411078C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_484 0x4110790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_485 0x4110794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_486 0x4110798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_487 0x411079C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_488 0x41107A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_489 0x41107A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_490 0x41107A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_491 0x41107AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_492 0x41107B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_493 0x41107B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_494 0x41107B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_495 0x41107BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_496 0x41107C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_497 0x41107C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_498 0x41107C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_499 0x41107CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_500 0x41107D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_501 0x41107D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_502 0x41107D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_503 0x41107DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_504 0x41107E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_505 0x41107E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_506 0x41107E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_507 0x41107EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_508 0x41107F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_509 0x41107F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_510 0x41107F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_511 0x41107FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_512 0x4110800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_513 0x4110804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_514 0x4110808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_515 0x411080C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_516 0x4110810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_517 0x4110814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_518 0x4110818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_519 0x411081C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_520 0x4110820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_521 0x4110824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_522 0x4110828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_523 0x411082C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_524 0x4110830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_525 0x4110834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_526 0x4110838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_527 0x411083C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_528 0x4110840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_529 0x4110844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_530 0x4110848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_531 0x411084C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_532 0x4110850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_533 0x4110854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_534 0x4110858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_535 0x411085C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_536 0x4110860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_537 0x4110864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_538 0x4110868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_539 0x411086C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_540 0x4110870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_541 0x4110874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_542 0x4110878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_543 0x411087C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_544 0x4110880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_545 0x4110884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_546 0x4110888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_547 0x411088C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_548 0x4110890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_549 0x4110894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_550 0x4110898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_551 0x411089C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_552 0x41108A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_553 0x41108A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_554 0x41108A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_555 0x41108AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_556 0x41108B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_557 0x41108B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_558 0x41108B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_559 0x41108BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_560 0x41108C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_561 0x41108C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_562 0x41108C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_563 0x41108CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_564 0x41108D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_565 0x41108D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_566 0x41108D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_567 0x41108DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_568 0x41108E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_569 0x41108E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_570 0x41108E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_571 0x41108EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_572 0x41108F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_573 0x41108F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_574 0x41108F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_575 0x41108FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_576 0x4110900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_577 0x4110904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_578 0x4110908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_579 0x411090C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_580 0x4110910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_581 0x4110914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_582 0x4110918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_583 0x411091C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_584 0x4110920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_585 0x4110924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_586 0x4110928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_587 0x411092C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_588 0x4110930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_589 0x4110934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_590 0x4110938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_591 0x411093C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_592 0x4110940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_593 0x4110944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_594 0x4110948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_595 0x411094C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_596 0x4110950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_597 0x4110954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_598 0x4110958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_599 0x411095C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_600 0x4110960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_601 0x4110964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_602 0x4110968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_603 0x411096C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_604 0x4110970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_605 0x4110974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_606 0x4110978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_607 0x411097C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_608 0x4110980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_609 0x4110984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_610 0x4110988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_611 0x411098C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_612 0x4110990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_613 0x4110994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_614 0x4110998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_615 0x411099C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_616 0x41109A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_617 0x41109A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_618 0x41109A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_619 0x41109AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_620 0x41109B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_621 0x41109B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_622 0x41109B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_623 0x41109BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_624 0x41109C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_625 0x41109C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_626 0x41109C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_627 0x41109CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_628 0x41109D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_629 0x41109D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_630 0x41109D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_631 0x41109DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_632 0x41109E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_633 0x41109E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_634 0x41109E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_635 0x41109EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_636 0x41109F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_637 0x41109F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_638 0x41109F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_639 0x41109FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_640 0x4110A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_641 0x4110A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_642 0x4110A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_643 0x4110A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_644 0x4110A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_645 0x4110A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_646 0x4110A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_647 0x4110A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_648 0x4110A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_649 0x4110A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_650 0x4110A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_651 0x4110A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_652 0x4110A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_653 0x4110A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_654 0x4110A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_655 0x4110A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_656 0x4110A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_657 0x4110A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_658 0x4110A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_659 0x4110A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_660 0x4110A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_661 0x4110A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_662 0x4110A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_663 0x4110A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_664 0x4110A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_665 0x4110A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_666 0x4110A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_667 0x4110A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_668 0x4110A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_669 0x4110A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_670 0x4110A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_671 0x4110A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_672 0x4110A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_673 0x4110A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_674 0x4110A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_675 0x4110A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_676 0x4110A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_677 0x4110A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_678 0x4110A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_679 0x4110A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_680 0x4110AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_681 0x4110AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_682 0x4110AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_683 0x4110AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_684 0x4110AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_685 0x4110AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_686 0x4110AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_687 0x4110ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_688 0x4110AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_689 0x4110AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_690 0x4110AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_691 0x4110ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_692 0x4110AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_693 0x4110AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_694 0x4110AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_695 0x4110ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_696 0x4110AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_697 0x4110AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_698 0x4110AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_699 0x4110AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_700 0x4110AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_701 0x4110AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_702 0x4110AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_703 0x4110AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_704 0x4110B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_705 0x4110B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_706 0x4110B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_707 0x4110B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_708 0x4110B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_709 0x4110B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_710 0x4110B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_711 0x4110B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_712 0x4110B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_713 0x4110B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_714 0x4110B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_715 0x4110B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_716 0x4110B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_717 0x4110B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_718 0x4110B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_719 0x4110B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_720 0x4110B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_721 0x4110B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_722 0x4110B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_723 0x4110B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_724 0x4110B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_725 0x4110B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_726 0x4110B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_727 0x4110B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_728 0x4110B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_729 0x4110B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_730 0x4110B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_731 0x4110B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_732 0x4110B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_733 0x4110B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_734 0x4110B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_735 0x4110B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_736 0x4110B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_737 0x4110B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_738 0x4110B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_739 0x4110B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_740 0x4110B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_741 0x4110B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_742 0x4110B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_743 0x4110B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_744 0x4110BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_745 0x4110BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_746 0x4110BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_747 0x4110BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_748 0x4110BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_749 0x4110BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_750 0x4110BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_751 0x4110BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_752 0x4110BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_753 0x4110BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_754 0x4110BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_755 0x4110BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_756 0x4110BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_757 0x4110BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_758 0x4110BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_759 0x4110BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_760 0x4110BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_761 0x4110BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_762 0x4110BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_763 0x4110BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_764 0x4110BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_765 0x4110BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_766 0x4110BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_767 0x4110BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_768 0x4110C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_769 0x4110C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_770 0x4110C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_771 0x4110C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_772 0x4110C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_773 0x4110C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_774 0x4110C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_775 0x4110C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_776 0x4110C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_777 0x4110C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_778 0x4110C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_779 0x4110C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_780 0x4110C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_781 0x4110C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_782 0x4110C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_783 0x4110C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_784 0x4110C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_785 0x4110C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_786 0x4110C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_787 0x4110C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_788 0x4110C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_789 0x4110C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_790 0x4110C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_791 0x4110C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_792 0x4110C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_793 0x4110C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_794 0x4110C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_795 0x4110C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_796 0x4110C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_797 0x4110C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_798 0x4110C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_799 0x4110C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_800 0x4110C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_801 0x4110C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_802 0x4110C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_803 0x4110C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_804 0x4110C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_805 0x4110C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_806 0x4110C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_807 0x4110C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_808 0x4110CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_809 0x4110CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_810 0x4110CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_811 0x4110CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_812 0x4110CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_813 0x4110CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_814 0x4110CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_815 0x4110CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_816 0x4110CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_817 0x4110CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_818 0x4110CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_819 0x4110CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_820 0x4110CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_821 0x4110CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_822 0x4110CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_823 0x4110CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_824 0x4110CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_825 0x4110CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_826 0x4110CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_827 0x4110CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_828 0x4110CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_829 0x4110CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_830 0x4110CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_831 0x4110CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_832 0x4110D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_833 0x4110D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_834 0x4110D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_835 0x4110D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_836 0x4110D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_837 0x4110D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_838 0x4110D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_839 0x4110D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_840 0x4110D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_841 0x4110D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_842 0x4110D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_843 0x4110D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_844 0x4110D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_845 0x4110D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_846 0x4110D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_847 0x4110D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_848 0x4110D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_849 0x4110D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_850 0x4110D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_851 0x4110D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_852 0x4110D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_853 0x4110D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_854 0x4110D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_855 0x4110D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_856 0x4110D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_857 0x4110D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_858 0x4110D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_859 0x4110D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_860 0x4110D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_861 0x4110D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_862 0x4110D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_863 0x4110D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_864 0x4110D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_865 0x4110D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_866 0x4110D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_867 0x4110D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_868 0x4110D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_869 0x4110D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_870 0x4110D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_871 0x4110D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_872 0x4110DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_873 0x4110DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_874 0x4110DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_875 0x4110DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_876 0x4110DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_877 0x4110DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_878 0x4110DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_879 0x4110DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_880 0x4110DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_881 0x4110DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_882 0x4110DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_883 0x4110DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_884 0x4110DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_885 0x4110DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_886 0x4110DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_887 0x4110DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_888 0x4110DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_889 0x4110DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_890 0x4110DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_891 0x4110DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_892 0x4110DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_893 0x4110DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_894 0x4110DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_895 0x4110DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_896 0x4110E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_897 0x4110E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_898 0x4110E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_899 0x4110E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_900 0x4110E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_901 0x4110E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_902 0x4110E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_903 0x4110E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_904 0x4110E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_905 0x4110E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_906 0x4110E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_907 0x4110E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_908 0x4110E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_909 0x4110E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_910 0x4110E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_911 0x4110E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_912 0x4110E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_913 0x4110E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_914 0x4110E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_915 0x4110E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_916 0x4110E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_917 0x4110E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_918 0x4110E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_919 0x4110E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_920 0x4110E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_921 0x4110E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_922 0x4110E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_923 0x4110E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_924 0x4110E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_925 0x4110E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_926 0x4110E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_927 0x4110E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_928 0x4110E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_929 0x4110E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_930 0x4110E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_931 0x4110E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_932 0x4110E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_933 0x4110E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_934 0x4110E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_935 0x4110E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_936 0x4110EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_937 0x4110EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_938 0x4110EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_939 0x4110EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_940 0x4110EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_941 0x4110EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_942 0x4110EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_943 0x4110EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_944 0x4110EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_945 0x4110EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_946 0x4110EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_947 0x4110ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_948 0x4110ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_949 0x4110ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_950 0x4110ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_951 0x4110EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_952 0x4110EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_953 0x4110EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_954 0x4110EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_955 0x4110EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_956 0x4110EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_957 0x4110EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_958 0x4110EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_959 0x4110EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_960 0x4110F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_961 0x4110F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_962 0x4110F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_963 0x4110F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_964 0x4110F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_965 0x4110F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_966 0x4110F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_967 0x4110F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_968 0x4110F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_969 0x4110F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_970 0x4110F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_971 0x4110F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_972 0x4110F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_973 0x4110F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_974 0x4110F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_975 0x4110F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_976 0x4110F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_977 0x4110F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_978 0x4110F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_979 0x4110F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_980 0x4110F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_981 0x4110F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_982 0x4110F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_983 0x4110F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_984 0x4110F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_985 0x4110F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_986 0x4110F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_987 0x4110F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_988 0x4110F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_989 0x4110F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_990 0x4110F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_991 0x4110F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_992 0x4110F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_993 0x4110F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_994 0x4110F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_995 0x4110F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_996 0x4110F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_997 0x4110F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_998 0x4110F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_999 0x4110F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1000 0x4110FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1001 0x4110FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1002 0x4110FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1003 0x4110FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1004 0x4110FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1005 0x4110FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1006 0x4110FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1007 0x4110FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1008 0x4110FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1009 0x4110FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1010 0x4110FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1011 0x4110FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1012 0x4110FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1013 0x4110FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1014 0x4110FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1015 0x4110FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1016 0x4110FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1017 0x4110FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1018 0x4110FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1019 0x4110FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1020 0x4110FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1021 0x4110FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1022 0x4110FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1023 0x4110FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1024 0x4111000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1025 0x4111004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1026 0x4111008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1027 0x411100C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1028 0x4111010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1029 0x4111014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1030 0x4111018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1031 0x411101C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1032 0x4111020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1033 0x4111024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1034 0x4111028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1035 0x411102C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1036 0x4111030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1037 0x4111034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1038 0x4111038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1039 0x411103C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1040 0x4111040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1041 0x4111044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1042 0x4111048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1043 0x411104C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1044 0x4111050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1045 0x4111054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1046 0x4111058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1047 0x411105C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1048 0x4111060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1049 0x4111064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1050 0x4111068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1051 0x411106C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1052 0x4111070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1053 0x4111074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1054 0x4111078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1055 0x411107C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1056 0x4111080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1057 0x4111084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1058 0x4111088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1059 0x411108C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1060 0x4111090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1061 0x4111094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1062 0x4111098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1063 0x411109C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1064 0x41110A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1065 0x41110A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1066 0x41110A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1067 0x41110AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1068 0x41110B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1069 0x41110B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1070 0x41110B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1071 0x41110BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1072 0x41110C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1073 0x41110C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1074 0x41110C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1075 0x41110CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1076 0x41110D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1077 0x41110D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1078 0x41110D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1079 0x41110DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1080 0x41110E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1081 0x41110E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1082 0x41110E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1083 0x41110EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1084 0x41110F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1085 0x41110F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1086 0x41110F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1087 0x41110FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1088 0x4111100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1089 0x4111104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1090 0x4111108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1091 0x411110C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1092 0x4111110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1093 0x4111114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1094 0x4111118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1095 0x411111C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1096 0x4111120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1097 0x4111124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1098 0x4111128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1099 0x411112C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1100 0x4111130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1101 0x4111134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1102 0x4111138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1103 0x411113C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1104 0x4111140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1105 0x4111144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1106 0x4111148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1107 0x411114C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1108 0x4111150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1109 0x4111154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1110 0x4111158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1111 0x411115C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1112 0x4111160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1113 0x4111164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1114 0x4111168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1115 0x411116C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1116 0x4111170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1117 0x4111174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1118 0x4111178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1119 0x411117C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1120 0x4111180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1121 0x4111184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1122 0x4111188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1123 0x411118C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1124 0x4111190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1125 0x4111194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1126 0x4111198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1127 0x411119C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1128 0x41111A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1129 0x41111A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1130 0x41111A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1131 0x41111AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1132 0x41111B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1133 0x41111B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1134 0x41111B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1135 0x41111BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1136 0x41111C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1137 0x41111C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1138 0x41111C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1139 0x41111CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1140 0x41111D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1141 0x41111D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1142 0x41111D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1143 0x41111DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1144 0x41111E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1145 0x41111E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1146 0x41111E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1147 0x41111EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1148 0x41111F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1149 0x41111F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1150 0x41111F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1151 0x41111FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1152 0x4111200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1153 0x4111204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1154 0x4111208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1155 0x411120C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1156 0x4111210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1157 0x4111214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1158 0x4111218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1159 0x411121C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1160 0x4111220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1161 0x4111224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1162 0x4111228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1163 0x411122C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1164 0x4111230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1165 0x4111234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1166 0x4111238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1167 0x411123C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1168 0x4111240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1169 0x4111244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1170 0x4111248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1171 0x411124C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1172 0x4111250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1173 0x4111254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1174 0x4111258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1175 0x411125C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1176 0x4111260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1177 0x4111264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1178 0x4111268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1179 0x411126C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1180 0x4111270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1181 0x4111274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1182 0x4111278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1183 0x411127C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1184 0x4111280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1185 0x4111284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1186 0x4111288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1187 0x411128C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1188 0x4111290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1189 0x4111294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1190 0x4111298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1191 0x411129C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1192 0x41112A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1193 0x41112A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1194 0x41112A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1195 0x41112AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1196 0x41112B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1197 0x41112B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1198 0x41112B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1199 0x41112BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1200 0x41112C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1201 0x41112C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1202 0x41112C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1203 0x41112CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1204 0x41112D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1205 0x41112D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1206 0x41112D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1207 0x41112DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1208 0x41112E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1209 0x41112E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1210 0x41112E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1211 0x41112EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1212 0x41112F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1213 0x41112F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1214 0x41112F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1215 0x41112FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1216 0x4111300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1217 0x4111304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1218 0x4111308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1219 0x411130C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1220 0x4111310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1221 0x4111314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1222 0x4111318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1223 0x411131C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1224 0x4111320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1225 0x4111324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1226 0x4111328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1227 0x411132C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1228 0x4111330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1229 0x4111334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1230 0x4111338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1231 0x411133C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1232 0x4111340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1233 0x4111344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1234 0x4111348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1235 0x411134C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1236 0x4111350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1237 0x4111354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1238 0x4111358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1239 0x411135C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1240 0x4111360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1241 0x4111364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1242 0x4111368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1243 0x411136C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1244 0x4111370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1245 0x4111374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1246 0x4111378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1247 0x411137C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1248 0x4111380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1249 0x4111384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1250 0x4111388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1251 0x411138C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1252 0x4111390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1253 0x4111394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1254 0x4111398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1255 0x411139C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1256 0x41113A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1257 0x41113A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1258 0x41113A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1259 0x41113AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1260 0x41113B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1261 0x41113B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1262 0x41113B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1263 0x41113BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1264 0x41113C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1265 0x41113C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1266 0x41113C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1267 0x41113CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1268 0x41113D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1269 0x41113D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1270 0x41113D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1271 0x41113DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1272 0x41113E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1273 0x41113E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1274 0x41113E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1275 0x41113EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1276 0x41113F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1277 0x41113F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1278 0x41113F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1279 0x41113FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1280 0x4111400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1281 0x4111404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1282 0x4111408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1283 0x411140C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1284 0x4111410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1285 0x4111414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1286 0x4111418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1287 0x411141C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1288 0x4111420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1289 0x4111424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1290 0x4111428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1291 0x411142C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1292 0x4111430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1293 0x4111434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1294 0x4111438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1295 0x411143C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1296 0x4111440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1297 0x4111444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1298 0x4111448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1299 0x411144C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1300 0x4111450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1301 0x4111454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1302 0x4111458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1303 0x411145C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1304 0x4111460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1305 0x4111464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1306 0x4111468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1307 0x411146C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1308 0x4111470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1309 0x4111474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1310 0x4111478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1311 0x411147C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1312 0x4111480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1313 0x4111484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1314 0x4111488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1315 0x411148C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1316 0x4111490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1317 0x4111494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1318 0x4111498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1319 0x411149C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1320 0x41114A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1321 0x41114A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1322 0x41114A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1323 0x41114AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1324 0x41114B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1325 0x41114B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1326 0x41114B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1327 0x41114BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1328 0x41114C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1329 0x41114C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1330 0x41114C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1331 0x41114CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1332 0x41114D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1333 0x41114D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1334 0x41114D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1335 0x41114DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1336 0x41114E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1337 0x41114E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1338 0x41114E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1339 0x41114EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1340 0x41114F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1341 0x41114F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1342 0x41114F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1343 0x41114FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1344 0x4111500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1345 0x4111504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1346 0x4111508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1347 0x411150C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1348 0x4111510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1349 0x4111514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1350 0x4111518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1351 0x411151C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1352 0x4111520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1353 0x4111524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1354 0x4111528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1355 0x411152C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1356 0x4111530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1357 0x4111534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1358 0x4111538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1359 0x411153C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1360 0x4111540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1361 0x4111544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1362 0x4111548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1363 0x411154C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1364 0x4111550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1365 0x4111554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1366 0x4111558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1367 0x411155C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1368 0x4111560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1369 0x4111564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1370 0x4111568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1371 0x411156C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1372 0x4111570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1373 0x4111574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1374 0x4111578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1375 0x411157C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1376 0x4111580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1377 0x4111584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1378 0x4111588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1379 0x411158C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1380 0x4111590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1381 0x4111594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1382 0x4111598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1383 0x411159C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1384 0x41115A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1385 0x41115A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1386 0x41115A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1387 0x41115AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1388 0x41115B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1389 0x41115B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1390 0x41115B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1391 0x41115BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1392 0x41115C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1393 0x41115C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1394 0x41115C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1395 0x41115CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1396 0x41115D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1397 0x41115D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1398 0x41115D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1399 0x41115DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1400 0x41115E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1401 0x41115E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1402 0x41115E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1403 0x41115EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1404 0x41115F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1405 0x41115F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1406 0x41115F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1407 0x41115FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1408 0x4111600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1409 0x4111604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1410 0x4111608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1411 0x411160C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1412 0x4111610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1413 0x4111614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1414 0x4111618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1415 0x411161C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1416 0x4111620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1417 0x4111624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1418 0x4111628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1419 0x411162C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1420 0x4111630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1421 0x4111634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1422 0x4111638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1423 0x411163C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1424 0x4111640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1425 0x4111644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1426 0x4111648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1427 0x411164C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1428 0x4111650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1429 0x4111654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1430 0x4111658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1431 0x411165C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1432 0x4111660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1433 0x4111664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1434 0x4111668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1435 0x411166C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1436 0x4111670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1437 0x4111674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1438 0x4111678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1439 0x411167C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1440 0x4111680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1441 0x4111684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1442 0x4111688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1443 0x411168C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1444 0x4111690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1445 0x4111694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1446 0x4111698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1447 0x411169C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1448 0x41116A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1449 0x41116A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1450 0x41116A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1451 0x41116AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1452 0x41116B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1453 0x41116B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1454 0x41116B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1455 0x41116BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1456 0x41116C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1457 0x41116C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1458 0x41116C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1459 0x41116CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1460 0x41116D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1461 0x41116D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1462 0x41116D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1463 0x41116DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1464 0x41116E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1465 0x41116E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1466 0x41116E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1467 0x41116EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1468 0x41116F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1469 0x41116F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1470 0x41116F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1471 0x41116FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1472 0x4111700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1473 0x4111704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1474 0x4111708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1475 0x411170C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1476 0x4111710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1477 0x4111714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1478 0x4111718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1479 0x411171C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1480 0x4111720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1481 0x4111724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1482 0x4111728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1483 0x411172C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1484 0x4111730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1485 0x4111734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1486 0x4111738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1487 0x411173C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1488 0x4111740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1489 0x4111744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1490 0x4111748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1491 0x411174C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1492 0x4111750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1493 0x4111754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1494 0x4111758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1495 0x411175C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1496 0x4111760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1497 0x4111764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1498 0x4111768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1499 0x411176C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1500 0x4111770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1501 0x4111774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1502 0x4111778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1503 0x411177C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1504 0x4111780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1505 0x4111784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1506 0x4111788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1507 0x411178C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1508 0x4111790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1509 0x4111794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1510 0x4111798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1511 0x411179C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1512 0x41117A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1513 0x41117A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1514 0x41117A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1515 0x41117AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1516 0x41117B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1517 0x41117B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1518 0x41117B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1519 0x41117BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1520 0x41117C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1521 0x41117C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1522 0x41117C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1523 0x41117CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1524 0x41117D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1525 0x41117D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1526 0x41117D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1527 0x41117DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1528 0x41117E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1529 0x41117E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1530 0x41117E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1531 0x41117EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1532 0x41117F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1533 0x41117F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1534 0x41117F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1535 0x41117FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1536 0x4111800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1537 0x4111804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1538 0x4111808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1539 0x411180C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1540 0x4111810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1541 0x4111814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1542 0x4111818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1543 0x411181C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1544 0x4111820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1545 0x4111824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1546 0x4111828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1547 0x411182C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1548 0x4111830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1549 0x4111834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1550 0x4111838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1551 0x411183C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1552 0x4111840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1553 0x4111844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1554 0x4111848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1555 0x411184C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1556 0x4111850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1557 0x4111854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1558 0x4111858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1559 0x411185C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1560 0x4111860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1561 0x4111864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1562 0x4111868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1563 0x411186C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1564 0x4111870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1565 0x4111874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1566 0x4111878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1567 0x411187C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1568 0x4111880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1569 0x4111884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1570 0x4111888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1571 0x411188C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1572 0x4111890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1573 0x4111894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1574 0x4111898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1575 0x411189C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1576 0x41118A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1577 0x41118A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1578 0x41118A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1579 0x41118AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1580 0x41118B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1581 0x41118B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1582 0x41118B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1583 0x41118BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1584 0x41118C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1585 0x41118C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1586 0x41118C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1587 0x41118CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1588 0x41118D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1589 0x41118D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1590 0x41118D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1591 0x41118DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1592 0x41118E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1593 0x41118E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1594 0x41118E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1595 0x41118EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1596 0x41118F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1597 0x41118F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1598 0x41118F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1599 0x41118FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1600 0x4111900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1601 0x4111904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1602 0x4111908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1603 0x411190C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1604 0x4111910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1605 0x4111914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1606 0x4111918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1607 0x411191C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1608 0x4111920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1609 0x4111924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1610 0x4111928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1611 0x411192C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1612 0x4111930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1613 0x4111934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1614 0x4111938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1615 0x411193C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1616 0x4111940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1617 0x4111944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1618 0x4111948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1619 0x411194C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1620 0x4111950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1621 0x4111954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1622 0x4111958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1623 0x411195C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1624 0x4111960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1625 0x4111964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1626 0x4111968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1627 0x411196C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1628 0x4111970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1629 0x4111974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1630 0x4111978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1631 0x411197C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1632 0x4111980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1633 0x4111984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1634 0x4111988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1635 0x411198C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1636 0x4111990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1637 0x4111994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1638 0x4111998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1639 0x411199C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1640 0x41119A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1641 0x41119A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1642 0x41119A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1643 0x41119AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1644 0x41119B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1645 0x41119B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1646 0x41119B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1647 0x41119BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1648 0x41119C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1649 0x41119C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1650 0x41119C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1651 0x41119CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1652 0x41119D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1653 0x41119D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1654 0x41119D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1655 0x41119DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1656 0x41119E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1657 0x41119E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1658 0x41119E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1659 0x41119EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1660 0x41119F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1661 0x41119F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1662 0x41119F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1663 0x41119FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1664 0x4111A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1665 0x4111A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1666 0x4111A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1667 0x4111A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1668 0x4111A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1669 0x4111A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1670 0x4111A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1671 0x4111A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1672 0x4111A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1673 0x4111A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1674 0x4111A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1675 0x4111A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1676 0x4111A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1677 0x4111A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1678 0x4111A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1679 0x4111A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1680 0x4111A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1681 0x4111A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1682 0x4111A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1683 0x4111A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1684 0x4111A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1685 0x4111A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1686 0x4111A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1687 0x4111A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1688 0x4111A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1689 0x4111A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1690 0x4111A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1691 0x4111A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1692 0x4111A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1693 0x4111A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1694 0x4111A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1695 0x4111A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1696 0x4111A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1697 0x4111A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1698 0x4111A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1699 0x4111A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1700 0x4111A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1701 0x4111A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1702 0x4111A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1703 0x4111A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1704 0x4111AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1705 0x4111AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1706 0x4111AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1707 0x4111AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1708 0x4111AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1709 0x4111AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1710 0x4111AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1711 0x4111ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1712 0x4111AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1713 0x4111AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1714 0x4111AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1715 0x4111ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1716 0x4111AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1717 0x4111AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1718 0x4111AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1719 0x4111ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1720 0x4111AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1721 0x4111AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1722 0x4111AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1723 0x4111AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1724 0x4111AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1725 0x4111AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1726 0x4111AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1727 0x4111AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1728 0x4111B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1729 0x4111B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1730 0x4111B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1731 0x4111B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1732 0x4111B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1733 0x4111B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1734 0x4111B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1735 0x4111B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1736 0x4111B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1737 0x4111B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1738 0x4111B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1739 0x4111B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1740 0x4111B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1741 0x4111B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1742 0x4111B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1743 0x4111B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1744 0x4111B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1745 0x4111B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1746 0x4111B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1747 0x4111B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1748 0x4111B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1749 0x4111B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1750 0x4111B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1751 0x4111B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1752 0x4111B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1753 0x4111B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1754 0x4111B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1755 0x4111B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1756 0x4111B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1757 0x4111B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1758 0x4111B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1759 0x4111B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1760 0x4111B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1761 0x4111B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1762 0x4111B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1763 0x4111B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1764 0x4111B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1765 0x4111B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1766 0x4111B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1767 0x4111B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1768 0x4111BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1769 0x4111BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1770 0x4111BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1771 0x4111BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1772 0x4111BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1773 0x4111BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1774 0x4111BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1775 0x4111BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1776 0x4111BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1777 0x4111BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1778 0x4111BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1779 0x4111BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1780 0x4111BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1781 0x4111BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1782 0x4111BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1783 0x4111BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1784 0x4111BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1785 0x4111BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1786 0x4111BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1787 0x4111BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1788 0x4111BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1789 0x4111BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1790 0x4111BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1791 0x4111BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1792 0x4111C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1793 0x4111C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1794 0x4111C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1795 0x4111C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1796 0x4111C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1797 0x4111C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1798 0x4111C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1799 0x4111C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1800 0x4111C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1801 0x4111C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1802 0x4111C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1803 0x4111C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1804 0x4111C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1805 0x4111C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1806 0x4111C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1807 0x4111C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1808 0x4111C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1809 0x4111C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1810 0x4111C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1811 0x4111C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1812 0x4111C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1813 0x4111C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1814 0x4111C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1815 0x4111C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1816 0x4111C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1817 0x4111C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1818 0x4111C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1819 0x4111C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1820 0x4111C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1821 0x4111C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1822 0x4111C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1823 0x4111C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1824 0x4111C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1825 0x4111C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1826 0x4111C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1827 0x4111C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1828 0x4111C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1829 0x4111C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1830 0x4111C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1831 0x4111C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1832 0x4111CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1833 0x4111CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1834 0x4111CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1835 0x4111CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1836 0x4111CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1837 0x4111CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1838 0x4111CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1839 0x4111CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1840 0x4111CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1841 0x4111CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1842 0x4111CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1843 0x4111CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1844 0x4111CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1845 0x4111CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1846 0x4111CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1847 0x4111CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1848 0x4111CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1849 0x4111CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1850 0x4111CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1851 0x4111CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1852 0x4111CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1853 0x4111CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1854 0x4111CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1855 0x4111CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1856 0x4111D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1857 0x4111D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1858 0x4111D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1859 0x4111D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1860 0x4111D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1861 0x4111D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1862 0x4111D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1863 0x4111D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1864 0x4111D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1865 0x4111D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1866 0x4111D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1867 0x4111D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1868 0x4111D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1869 0x4111D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1870 0x4111D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1871 0x4111D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1872 0x4111D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1873 0x4111D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1874 0x4111D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1875 0x4111D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1876 0x4111D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1877 0x4111D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1878 0x4111D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1879 0x4111D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1880 0x4111D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1881 0x4111D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1882 0x4111D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1883 0x4111D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1884 0x4111D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1885 0x4111D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1886 0x4111D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1887 0x4111D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1888 0x4111D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1889 0x4111D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1890 0x4111D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1891 0x4111D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1892 0x4111D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1893 0x4111D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1894 0x4111D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1895 0x4111D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1896 0x4111DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1897 0x4111DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1898 0x4111DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1899 0x4111DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1900 0x4111DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1901 0x4111DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1902 0x4111DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1903 0x4111DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1904 0x4111DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1905 0x4111DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1906 0x4111DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1907 0x4111DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1908 0x4111DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1909 0x4111DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1910 0x4111DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1911 0x4111DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1912 0x4111DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1913 0x4111DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1914 0x4111DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1915 0x4111DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1916 0x4111DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1917 0x4111DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1918 0x4111DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1919 0x4111DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1920 0x4111E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1921 0x4111E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1922 0x4111E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1923 0x4111E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1924 0x4111E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1925 0x4111E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1926 0x4111E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1927 0x4111E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1928 0x4111E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1929 0x4111E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1930 0x4111E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1931 0x4111E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1932 0x4111E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1933 0x4111E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1934 0x4111E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1935 0x4111E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1936 0x4111E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1937 0x4111E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1938 0x4111E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1939 0x4111E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1940 0x4111E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1941 0x4111E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1942 0x4111E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1943 0x4111E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1944 0x4111E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1945 0x4111E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1946 0x4111E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1947 0x4111E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1948 0x4111E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1949 0x4111E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1950 0x4111E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1951 0x4111E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1952 0x4111E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1953 0x4111E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1954 0x4111E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1955 0x4111E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1956 0x4111E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1957 0x4111E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1958 0x4111E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1959 0x4111E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1960 0x4111EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1961 0x4111EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1962 0x4111EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1963 0x4111EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1964 0x4111EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1965 0x4111EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1966 0x4111EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1967 0x4111EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1968 0x4111EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1969 0x4111EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1970 0x4111EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1971 0x4111ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1972 0x4111ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1973 0x4111ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1974 0x4111ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1975 0x4111EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1976 0x4111EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1977 0x4111EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1978 0x4111EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1979 0x4111EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1980 0x4111EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1981 0x4111EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1982 0x4111EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1983 0x4111EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1984 0x4111F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1985 0x4111F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1986 0x4111F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1987 0x4111F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1988 0x4111F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1989 0x4111F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1990 0x4111F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1991 0x4111F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1992 0x4111F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1993 0x4111F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1994 0x4111F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1995 0x4111F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1996 0x4111F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1997 0x4111F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1998 0x4111F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_1999 0x4111F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2000 0x4111F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2001 0x4111F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2002 0x4111F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2003 0x4111F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2004 0x4111F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2005 0x4111F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2006 0x4111F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2007 0x4111F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2008 0x4111F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2009 0x4111F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2010 0x4111F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2011 0x4111F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2012 0x4111F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2013 0x4111F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2014 0x4111F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2015 0x4111F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2016 0x4111F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2017 0x4111F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2018 0x4111F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2019 0x4111F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2020 0x4111F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2021 0x4111F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2022 0x4111F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2023 0x4111F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2024 0x4111FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2025 0x4111FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2026 0x4111FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2027 0x4111FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2028 0x4111FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2029 0x4111FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2030 0x4111FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2031 0x4111FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2032 0x4111FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2033 0x4111FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2034 0x4111FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2035 0x4111FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2036 0x4111FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2037 0x4111FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2038 0x4111FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2039 0x4111FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2040 0x4111FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2041 0x4111FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2042 0x4111FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2043 0x4111FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2044 0x4111FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2045 0x4111FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2046 0x4111FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_2047 0x4111FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 0x4112000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1 0x4112004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2 0x4112008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_3 0x411200C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_4 0x4112010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_5 0x4112014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_6 0x4112018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_7 0x411201C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_8 0x4112020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_9 0x4112024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_10 0x4112028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_11 0x411202C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_12 0x4112030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_13 0x4112034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_14 0x4112038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_15 0x411203C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_16 0x4112040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_17 0x4112044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_18 0x4112048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_19 0x411204C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_20 0x4112050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_21 0x4112054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_22 0x4112058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_23 0x411205C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_24 0x4112060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_25 0x4112064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_26 0x4112068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_27 0x411206C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_28 0x4112070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_29 0x4112074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_30 0x4112078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_31 0x411207C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_32 0x4112080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_33 0x4112084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_34 0x4112088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_35 0x411208C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_36 0x4112090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_37 0x4112094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_38 0x4112098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_39 0x411209C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_40 0x41120A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_41 0x41120A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_42 0x41120A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_43 0x41120AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_44 0x41120B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_45 0x41120B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_46 0x41120B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_47 0x41120BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_48 0x41120C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_49 0x41120C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_50 0x41120C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_51 0x41120CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_52 0x41120D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_53 0x41120D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_54 0x41120D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_55 0x41120DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_56 0x41120E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_57 0x41120E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_58 0x41120E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_59 0x41120EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_60 0x41120F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_61 0x41120F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_62 0x41120F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_63 0x41120FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_64 0x4112100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_65 0x4112104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_66 0x4112108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_67 0x411210C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_68 0x4112110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_69 0x4112114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_70 0x4112118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_71 0x411211C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_72 0x4112120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_73 0x4112124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_74 0x4112128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_75 0x411212C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_76 0x4112130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_77 0x4112134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_78 0x4112138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_79 0x411213C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_80 0x4112140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_81 0x4112144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_82 0x4112148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_83 0x411214C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_84 0x4112150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_85 0x4112154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_86 0x4112158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_87 0x411215C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_88 0x4112160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_89 0x4112164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_90 0x4112168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_91 0x411216C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_92 0x4112170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_93 0x4112174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_94 0x4112178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_95 0x411217C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_96 0x4112180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_97 0x4112184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_98 0x4112188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_99 0x411218C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_100 0x4112190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_101 0x4112194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_102 0x4112198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_103 0x411219C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_104 0x41121A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_105 0x41121A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_106 0x41121A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_107 0x41121AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_108 0x41121B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_109 0x41121B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_110 0x41121B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_111 0x41121BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_112 0x41121C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_113 0x41121C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_114 0x41121C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_115 0x41121CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_116 0x41121D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_117 0x41121D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_118 0x41121D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_119 0x41121DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_120 0x41121E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_121 0x41121E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_122 0x41121E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_123 0x41121EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_124 0x41121F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_125 0x41121F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_126 0x41121F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_127 0x41121FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_128 0x4112200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_129 0x4112204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_130 0x4112208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_131 0x411220C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_132 0x4112210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_133 0x4112214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_134 0x4112218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_135 0x411221C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_136 0x4112220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_137 0x4112224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_138 0x4112228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_139 0x411222C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_140 0x4112230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_141 0x4112234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_142 0x4112238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_143 0x411223C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_144 0x4112240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_145 0x4112244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_146 0x4112248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_147 0x411224C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_148 0x4112250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_149 0x4112254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_150 0x4112258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_151 0x411225C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_152 0x4112260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_153 0x4112264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_154 0x4112268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_155 0x411226C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_156 0x4112270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_157 0x4112274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_158 0x4112278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_159 0x411227C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_160 0x4112280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_161 0x4112284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_162 0x4112288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_163 0x411228C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_164 0x4112290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_165 0x4112294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_166 0x4112298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_167 0x411229C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_168 0x41122A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_169 0x41122A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_170 0x41122A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_171 0x41122AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_172 0x41122B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_173 0x41122B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_174 0x41122B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_175 0x41122BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_176 0x41122C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_177 0x41122C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_178 0x41122C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_179 0x41122CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_180 0x41122D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_181 0x41122D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_182 0x41122D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_183 0x41122DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_184 0x41122E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_185 0x41122E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_186 0x41122E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_187 0x41122EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_188 0x41122F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_189 0x41122F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_190 0x41122F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_191 0x41122FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_192 0x4112300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_193 0x4112304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_194 0x4112308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_195 0x411230C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_196 0x4112310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_197 0x4112314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_198 0x4112318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_199 0x411231C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_200 0x4112320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_201 0x4112324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_202 0x4112328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_203 0x411232C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_204 0x4112330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_205 0x4112334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_206 0x4112338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_207 0x411233C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_208 0x4112340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_209 0x4112344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_210 0x4112348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_211 0x411234C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_212 0x4112350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_213 0x4112354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_214 0x4112358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_215 0x411235C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_216 0x4112360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_217 0x4112364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_218 0x4112368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_219 0x411236C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_220 0x4112370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_221 0x4112374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_222 0x4112378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_223 0x411237C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_224 0x4112380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_225 0x4112384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_226 0x4112388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_227 0x411238C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_228 0x4112390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_229 0x4112394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_230 0x4112398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_231 0x411239C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_232 0x41123A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_233 0x41123A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_234 0x41123A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_235 0x41123AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_236 0x41123B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_237 0x41123B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_238 0x41123B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_239 0x41123BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_240 0x41123C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_241 0x41123C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_242 0x41123C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_243 0x41123CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_244 0x41123D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_245 0x41123D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_246 0x41123D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_247 0x41123DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_248 0x41123E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_249 0x41123E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_250 0x41123E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_251 0x41123EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_252 0x41123F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_253 0x41123F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_254 0x41123F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_255 0x41123FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_256 0x4112400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_257 0x4112404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_258 0x4112408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_259 0x411240C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_260 0x4112410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_261 0x4112414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_262 0x4112418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_263 0x411241C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_264 0x4112420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_265 0x4112424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_266 0x4112428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_267 0x411242C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_268 0x4112430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_269 0x4112434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_270 0x4112438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_271 0x411243C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_272 0x4112440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_273 0x4112444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_274 0x4112448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_275 0x411244C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_276 0x4112450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_277 0x4112454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_278 0x4112458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_279 0x411245C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_280 0x4112460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_281 0x4112464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_282 0x4112468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_283 0x411246C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_284 0x4112470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_285 0x4112474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_286 0x4112478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_287 0x411247C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_288 0x4112480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_289 0x4112484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_290 0x4112488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_291 0x411248C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_292 0x4112490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_293 0x4112494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_294 0x4112498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_295 0x411249C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_296 0x41124A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_297 0x41124A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_298 0x41124A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_299 0x41124AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_300 0x41124B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_301 0x41124B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_302 0x41124B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_303 0x41124BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_304 0x41124C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_305 0x41124C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_306 0x41124C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_307 0x41124CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_308 0x41124D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_309 0x41124D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_310 0x41124D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_311 0x41124DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_312 0x41124E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_313 0x41124E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_314 0x41124E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_315 0x41124EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_316 0x41124F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_317 0x41124F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_318 0x41124F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_319 0x41124FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_320 0x4112500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_321 0x4112504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_322 0x4112508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_323 0x411250C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_324 0x4112510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_325 0x4112514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_326 0x4112518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_327 0x411251C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_328 0x4112520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_329 0x4112524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_330 0x4112528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_331 0x411252C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_332 0x4112530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_333 0x4112534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_334 0x4112538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_335 0x411253C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_336 0x4112540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_337 0x4112544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_338 0x4112548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_339 0x411254C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_340 0x4112550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_341 0x4112554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_342 0x4112558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_343 0x411255C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_344 0x4112560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_345 0x4112564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_346 0x4112568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_347 0x411256C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_348 0x4112570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_349 0x4112574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_350 0x4112578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_351 0x411257C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_352 0x4112580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_353 0x4112584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_354 0x4112588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_355 0x411258C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_356 0x4112590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_357 0x4112594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_358 0x4112598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_359 0x411259C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_360 0x41125A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_361 0x41125A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_362 0x41125A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_363 0x41125AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_364 0x41125B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_365 0x41125B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_366 0x41125B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_367 0x41125BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_368 0x41125C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_369 0x41125C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_370 0x41125C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_371 0x41125CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_372 0x41125D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_373 0x41125D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_374 0x41125D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_375 0x41125DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_376 0x41125E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_377 0x41125E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_378 0x41125E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_379 0x41125EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_380 0x41125F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_381 0x41125F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_382 0x41125F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_383 0x41125FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_384 0x4112600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_385 0x4112604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_386 0x4112608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_387 0x411260C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_388 0x4112610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_389 0x4112614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_390 0x4112618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_391 0x411261C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_392 0x4112620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_393 0x4112624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_394 0x4112628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_395 0x411262C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_396 0x4112630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_397 0x4112634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_398 0x4112638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_399 0x411263C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_400 0x4112640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_401 0x4112644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_402 0x4112648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_403 0x411264C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_404 0x4112650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_405 0x4112654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_406 0x4112658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_407 0x411265C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_408 0x4112660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_409 0x4112664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_410 0x4112668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_411 0x411266C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_412 0x4112670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_413 0x4112674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_414 0x4112678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_415 0x411267C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_416 0x4112680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_417 0x4112684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_418 0x4112688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_419 0x411268C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_420 0x4112690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_421 0x4112694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_422 0x4112698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_423 0x411269C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_424 0x41126A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_425 0x41126A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_426 0x41126A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_427 0x41126AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_428 0x41126B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_429 0x41126B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_430 0x41126B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_431 0x41126BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_432 0x41126C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_433 0x41126C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_434 0x41126C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_435 0x41126CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_436 0x41126D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_437 0x41126D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_438 0x41126D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_439 0x41126DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_440 0x41126E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_441 0x41126E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_442 0x41126E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_443 0x41126EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_444 0x41126F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_445 0x41126F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_446 0x41126F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_447 0x41126FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_448 0x4112700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_449 0x4112704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_450 0x4112708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_451 0x411270C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_452 0x4112710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_453 0x4112714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_454 0x4112718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_455 0x411271C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_456 0x4112720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_457 0x4112724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_458 0x4112728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_459 0x411272C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_460 0x4112730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_461 0x4112734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_462 0x4112738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_463 0x411273C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_464 0x4112740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_465 0x4112744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_466 0x4112748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_467 0x411274C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_468 0x4112750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_469 0x4112754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_470 0x4112758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_471 0x411275C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_472 0x4112760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_473 0x4112764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_474 0x4112768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_475 0x411276C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_476 0x4112770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_477 0x4112774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_478 0x4112778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_479 0x411277C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_480 0x4112780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_481 0x4112784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_482 0x4112788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_483 0x411278C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_484 0x4112790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_485 0x4112794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_486 0x4112798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_487 0x411279C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_488 0x41127A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_489 0x41127A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_490 0x41127A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_491 0x41127AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_492 0x41127B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_493 0x41127B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_494 0x41127B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_495 0x41127BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_496 0x41127C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_497 0x41127C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_498 0x41127C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_499 0x41127CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_500 0x41127D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_501 0x41127D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_502 0x41127D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_503 0x41127DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_504 0x41127E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_505 0x41127E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_506 0x41127E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_507 0x41127EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_508 0x41127F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_509 0x41127F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_510 0x41127F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_511 0x41127FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_512 0x4112800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_513 0x4112804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_514 0x4112808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_515 0x411280C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_516 0x4112810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_517 0x4112814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_518 0x4112818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_519 0x411281C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_520 0x4112820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_521 0x4112824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_522 0x4112828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_523 0x411282C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_524 0x4112830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_525 0x4112834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_526 0x4112838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_527 0x411283C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_528 0x4112840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_529 0x4112844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_530 0x4112848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_531 0x411284C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_532 0x4112850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_533 0x4112854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_534 0x4112858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_535 0x411285C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_536 0x4112860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_537 0x4112864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_538 0x4112868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_539 0x411286C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_540 0x4112870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_541 0x4112874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_542 0x4112878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_543 0x411287C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_544 0x4112880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_545 0x4112884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_546 0x4112888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_547 0x411288C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_548 0x4112890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_549 0x4112894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_550 0x4112898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_551 0x411289C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_552 0x41128A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_553 0x41128A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_554 0x41128A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_555 0x41128AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_556 0x41128B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_557 0x41128B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_558 0x41128B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_559 0x41128BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_560 0x41128C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_561 0x41128C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_562 0x41128C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_563 0x41128CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_564 0x41128D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_565 0x41128D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_566 0x41128D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_567 0x41128DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_568 0x41128E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_569 0x41128E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_570 0x41128E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_571 0x41128EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_572 0x41128F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_573 0x41128F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_574 0x41128F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_575 0x41128FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_576 0x4112900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_577 0x4112904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_578 0x4112908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_579 0x411290C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_580 0x4112910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_581 0x4112914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_582 0x4112918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_583 0x411291C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_584 0x4112920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_585 0x4112924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_586 0x4112928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_587 0x411292C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_588 0x4112930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_589 0x4112934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_590 0x4112938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_591 0x411293C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_592 0x4112940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_593 0x4112944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_594 0x4112948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_595 0x411294C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_596 0x4112950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_597 0x4112954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_598 0x4112958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_599 0x411295C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_600 0x4112960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_601 0x4112964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_602 0x4112968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_603 0x411296C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_604 0x4112970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_605 0x4112974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_606 0x4112978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_607 0x411297C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_608 0x4112980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_609 0x4112984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_610 0x4112988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_611 0x411298C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_612 0x4112990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_613 0x4112994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_614 0x4112998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_615 0x411299C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_616 0x41129A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_617 0x41129A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_618 0x41129A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_619 0x41129AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_620 0x41129B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_621 0x41129B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_622 0x41129B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_623 0x41129BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_624 0x41129C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_625 0x41129C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_626 0x41129C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_627 0x41129CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_628 0x41129D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_629 0x41129D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_630 0x41129D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_631 0x41129DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_632 0x41129E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_633 0x41129E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_634 0x41129E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_635 0x41129EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_636 0x41129F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_637 0x41129F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_638 0x41129F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_639 0x41129FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_640 0x4112A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_641 0x4112A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_642 0x4112A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_643 0x4112A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_644 0x4112A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_645 0x4112A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_646 0x4112A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_647 0x4112A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_648 0x4112A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_649 0x4112A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_650 0x4112A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_651 0x4112A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_652 0x4112A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_653 0x4112A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_654 0x4112A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_655 0x4112A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_656 0x4112A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_657 0x4112A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_658 0x4112A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_659 0x4112A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_660 0x4112A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_661 0x4112A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_662 0x4112A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_663 0x4112A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_664 0x4112A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_665 0x4112A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_666 0x4112A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_667 0x4112A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_668 0x4112A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_669 0x4112A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_670 0x4112A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_671 0x4112A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_672 0x4112A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_673 0x4112A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_674 0x4112A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_675 0x4112A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_676 0x4112A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_677 0x4112A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_678 0x4112A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_679 0x4112A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_680 0x4112AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_681 0x4112AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_682 0x4112AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_683 0x4112AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_684 0x4112AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_685 0x4112AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_686 0x4112AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_687 0x4112ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_688 0x4112AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_689 0x4112AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_690 0x4112AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_691 0x4112ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_692 0x4112AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_693 0x4112AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_694 0x4112AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_695 0x4112ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_696 0x4112AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_697 0x4112AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_698 0x4112AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_699 0x4112AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_700 0x4112AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_701 0x4112AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_702 0x4112AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_703 0x4112AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_704 0x4112B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_705 0x4112B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_706 0x4112B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_707 0x4112B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_708 0x4112B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_709 0x4112B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_710 0x4112B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_711 0x4112B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_712 0x4112B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_713 0x4112B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_714 0x4112B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_715 0x4112B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_716 0x4112B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_717 0x4112B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_718 0x4112B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_719 0x4112B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_720 0x4112B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_721 0x4112B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_722 0x4112B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_723 0x4112B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_724 0x4112B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_725 0x4112B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_726 0x4112B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_727 0x4112B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_728 0x4112B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_729 0x4112B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_730 0x4112B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_731 0x4112B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_732 0x4112B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_733 0x4112B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_734 0x4112B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_735 0x4112B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_736 0x4112B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_737 0x4112B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_738 0x4112B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_739 0x4112B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_740 0x4112B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_741 0x4112B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_742 0x4112B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_743 0x4112B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_744 0x4112BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_745 0x4112BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_746 0x4112BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_747 0x4112BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_748 0x4112BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_749 0x4112BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_750 0x4112BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_751 0x4112BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_752 0x4112BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_753 0x4112BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_754 0x4112BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_755 0x4112BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_756 0x4112BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_757 0x4112BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_758 0x4112BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_759 0x4112BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_760 0x4112BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_761 0x4112BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_762 0x4112BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_763 0x4112BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_764 0x4112BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_765 0x4112BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_766 0x4112BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_767 0x4112BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_768 0x4112C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_769 0x4112C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_770 0x4112C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_771 0x4112C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_772 0x4112C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_773 0x4112C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_774 0x4112C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_775 0x4112C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_776 0x4112C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_777 0x4112C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_778 0x4112C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_779 0x4112C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_780 0x4112C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_781 0x4112C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_782 0x4112C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_783 0x4112C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_784 0x4112C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_785 0x4112C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_786 0x4112C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_787 0x4112C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_788 0x4112C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_789 0x4112C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_790 0x4112C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_791 0x4112C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_792 0x4112C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_793 0x4112C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_794 0x4112C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_795 0x4112C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_796 0x4112C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_797 0x4112C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_798 0x4112C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_799 0x4112C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_800 0x4112C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_801 0x4112C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_802 0x4112C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_803 0x4112C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_804 0x4112C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_805 0x4112C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_806 0x4112C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_807 0x4112C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_808 0x4112CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_809 0x4112CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_810 0x4112CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_811 0x4112CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_812 0x4112CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_813 0x4112CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_814 0x4112CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_815 0x4112CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_816 0x4112CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_817 0x4112CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_818 0x4112CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_819 0x4112CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_820 0x4112CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_821 0x4112CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_822 0x4112CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_823 0x4112CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_824 0x4112CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_825 0x4112CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_826 0x4112CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_827 0x4112CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_828 0x4112CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_829 0x4112CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_830 0x4112CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_831 0x4112CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_832 0x4112D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_833 0x4112D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_834 0x4112D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_835 0x4112D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_836 0x4112D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_837 0x4112D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_838 0x4112D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_839 0x4112D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_840 0x4112D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_841 0x4112D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_842 0x4112D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_843 0x4112D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_844 0x4112D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_845 0x4112D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_846 0x4112D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_847 0x4112D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_848 0x4112D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_849 0x4112D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_850 0x4112D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_851 0x4112D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_852 0x4112D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_853 0x4112D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_854 0x4112D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_855 0x4112D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_856 0x4112D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_857 0x4112D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_858 0x4112D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_859 0x4112D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_860 0x4112D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_861 0x4112D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_862 0x4112D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_863 0x4112D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_864 0x4112D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_865 0x4112D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_866 0x4112D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_867 0x4112D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_868 0x4112D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_869 0x4112D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_870 0x4112D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_871 0x4112D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_872 0x4112DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_873 0x4112DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_874 0x4112DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_875 0x4112DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_876 0x4112DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_877 0x4112DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_878 0x4112DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_879 0x4112DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_880 0x4112DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_881 0x4112DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_882 0x4112DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_883 0x4112DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_884 0x4112DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_885 0x4112DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_886 0x4112DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_887 0x4112DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_888 0x4112DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_889 0x4112DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_890 0x4112DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_891 0x4112DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_892 0x4112DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_893 0x4112DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_894 0x4112DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_895 0x4112DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_896 0x4112E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_897 0x4112E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_898 0x4112E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_899 0x4112E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_900 0x4112E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_901 0x4112E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_902 0x4112E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_903 0x4112E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_904 0x4112E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_905 0x4112E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_906 0x4112E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_907 0x4112E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_908 0x4112E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_909 0x4112E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_910 0x4112E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_911 0x4112E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_912 0x4112E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_913 0x4112E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_914 0x4112E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_915 0x4112E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_916 0x4112E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_917 0x4112E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_918 0x4112E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_919 0x4112E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_920 0x4112E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_921 0x4112E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_922 0x4112E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_923 0x4112E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_924 0x4112E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_925 0x4112E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_926 0x4112E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_927 0x4112E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_928 0x4112E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_929 0x4112E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_930 0x4112E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_931 0x4112E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_932 0x4112E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_933 0x4112E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_934 0x4112E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_935 0x4112E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_936 0x4112EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_937 0x4112EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_938 0x4112EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_939 0x4112EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_940 0x4112EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_941 0x4112EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_942 0x4112EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_943 0x4112EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_944 0x4112EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_945 0x4112EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_946 0x4112EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_947 0x4112ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_948 0x4112ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_949 0x4112ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_950 0x4112ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_951 0x4112EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_952 0x4112EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_953 0x4112EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_954 0x4112EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_955 0x4112EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_956 0x4112EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_957 0x4112EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_958 0x4112EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_959 0x4112EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_960 0x4112F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_961 0x4112F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_962 0x4112F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_963 0x4112F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_964 0x4112F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_965 0x4112F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_966 0x4112F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_967 0x4112F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_968 0x4112F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_969 0x4112F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_970 0x4112F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_971 0x4112F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_972 0x4112F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_973 0x4112F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_974 0x4112F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_975 0x4112F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_976 0x4112F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_977 0x4112F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_978 0x4112F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_979 0x4112F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_980 0x4112F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_981 0x4112F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_982 0x4112F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_983 0x4112F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_984 0x4112F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_985 0x4112F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_986 0x4112F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_987 0x4112F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_988 0x4112F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_989 0x4112F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_990 0x4112F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_991 0x4112F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_992 0x4112F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_993 0x4112F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_994 0x4112F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_995 0x4112F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_996 0x4112F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_997 0x4112F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_998 0x4112F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_999 0x4112F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1000 0x4112FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1001 0x4112FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1002 0x4112FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1003 0x4112FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1004 0x4112FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1005 0x4112FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1006 0x4112FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1007 0x4112FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1008 0x4112FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1009 0x4112FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1010 0x4112FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1011 0x4112FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1012 0x4112FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1013 0x4112FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1014 0x4112FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1015 0x4112FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1016 0x4112FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1017 0x4112FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1018 0x4112FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1019 0x4112FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1020 0x4112FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1021 0x4112FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1022 0x4112FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1023 0x4112FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1024 0x4113000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1025 0x4113004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1026 0x4113008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1027 0x411300C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1028 0x4113010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1029 0x4113014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1030 0x4113018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1031 0x411301C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1032 0x4113020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1033 0x4113024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1034 0x4113028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1035 0x411302C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1036 0x4113030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1037 0x4113034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1038 0x4113038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1039 0x411303C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1040 0x4113040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1041 0x4113044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1042 0x4113048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1043 0x411304C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1044 0x4113050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1045 0x4113054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1046 0x4113058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1047 0x411305C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1048 0x4113060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1049 0x4113064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1050 0x4113068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1051 0x411306C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1052 0x4113070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1053 0x4113074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1054 0x4113078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1055 0x411307C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1056 0x4113080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1057 0x4113084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1058 0x4113088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1059 0x411308C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1060 0x4113090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1061 0x4113094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1062 0x4113098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1063 0x411309C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1064 0x41130A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1065 0x41130A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1066 0x41130A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1067 0x41130AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1068 0x41130B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1069 0x41130B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1070 0x41130B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1071 0x41130BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1072 0x41130C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1073 0x41130C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1074 0x41130C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1075 0x41130CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1076 0x41130D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1077 0x41130D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1078 0x41130D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1079 0x41130DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1080 0x41130E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1081 0x41130E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1082 0x41130E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1083 0x41130EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1084 0x41130F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1085 0x41130F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1086 0x41130F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1087 0x41130FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1088 0x4113100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1089 0x4113104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1090 0x4113108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1091 0x411310C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1092 0x4113110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1093 0x4113114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1094 0x4113118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1095 0x411311C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1096 0x4113120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1097 0x4113124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1098 0x4113128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1099 0x411312C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1100 0x4113130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1101 0x4113134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1102 0x4113138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1103 0x411313C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1104 0x4113140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1105 0x4113144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1106 0x4113148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1107 0x411314C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1108 0x4113150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1109 0x4113154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1110 0x4113158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1111 0x411315C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1112 0x4113160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1113 0x4113164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1114 0x4113168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1115 0x411316C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1116 0x4113170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1117 0x4113174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1118 0x4113178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1119 0x411317C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1120 0x4113180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1121 0x4113184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1122 0x4113188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1123 0x411318C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1124 0x4113190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1125 0x4113194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1126 0x4113198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1127 0x411319C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1128 0x41131A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1129 0x41131A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1130 0x41131A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1131 0x41131AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1132 0x41131B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1133 0x41131B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1134 0x41131B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1135 0x41131BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1136 0x41131C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1137 0x41131C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1138 0x41131C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1139 0x41131CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1140 0x41131D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1141 0x41131D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1142 0x41131D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1143 0x41131DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1144 0x41131E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1145 0x41131E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1146 0x41131E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1147 0x41131EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1148 0x41131F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1149 0x41131F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1150 0x41131F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1151 0x41131FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1152 0x4113200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1153 0x4113204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1154 0x4113208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1155 0x411320C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1156 0x4113210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1157 0x4113214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1158 0x4113218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1159 0x411321C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1160 0x4113220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1161 0x4113224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1162 0x4113228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1163 0x411322C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1164 0x4113230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1165 0x4113234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1166 0x4113238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1167 0x411323C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1168 0x4113240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1169 0x4113244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1170 0x4113248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1171 0x411324C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1172 0x4113250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1173 0x4113254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1174 0x4113258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1175 0x411325C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1176 0x4113260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1177 0x4113264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1178 0x4113268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1179 0x411326C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1180 0x4113270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1181 0x4113274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1182 0x4113278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1183 0x411327C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1184 0x4113280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1185 0x4113284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1186 0x4113288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1187 0x411328C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1188 0x4113290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1189 0x4113294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1190 0x4113298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1191 0x411329C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1192 0x41132A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1193 0x41132A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1194 0x41132A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1195 0x41132AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1196 0x41132B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1197 0x41132B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1198 0x41132B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1199 0x41132BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1200 0x41132C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1201 0x41132C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1202 0x41132C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1203 0x41132CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1204 0x41132D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1205 0x41132D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1206 0x41132D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1207 0x41132DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1208 0x41132E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1209 0x41132E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1210 0x41132E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1211 0x41132EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1212 0x41132F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1213 0x41132F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1214 0x41132F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1215 0x41132FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1216 0x4113300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1217 0x4113304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1218 0x4113308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1219 0x411330C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1220 0x4113310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1221 0x4113314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1222 0x4113318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1223 0x411331C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1224 0x4113320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1225 0x4113324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1226 0x4113328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1227 0x411332C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1228 0x4113330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1229 0x4113334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1230 0x4113338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1231 0x411333C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1232 0x4113340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1233 0x4113344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1234 0x4113348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1235 0x411334C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1236 0x4113350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1237 0x4113354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1238 0x4113358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1239 0x411335C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1240 0x4113360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1241 0x4113364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1242 0x4113368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1243 0x411336C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1244 0x4113370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1245 0x4113374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1246 0x4113378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1247 0x411337C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1248 0x4113380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1249 0x4113384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1250 0x4113388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1251 0x411338C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1252 0x4113390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1253 0x4113394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1254 0x4113398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1255 0x411339C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1256 0x41133A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1257 0x41133A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1258 0x41133A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1259 0x41133AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1260 0x41133B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1261 0x41133B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1262 0x41133B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1263 0x41133BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1264 0x41133C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1265 0x41133C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1266 0x41133C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1267 0x41133CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1268 0x41133D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1269 0x41133D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1270 0x41133D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1271 0x41133DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1272 0x41133E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1273 0x41133E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1274 0x41133E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1275 0x41133EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1276 0x41133F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1277 0x41133F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1278 0x41133F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1279 0x41133FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1280 0x4113400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1281 0x4113404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1282 0x4113408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1283 0x411340C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1284 0x4113410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1285 0x4113414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1286 0x4113418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1287 0x411341C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1288 0x4113420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1289 0x4113424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1290 0x4113428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1291 0x411342C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1292 0x4113430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1293 0x4113434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1294 0x4113438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1295 0x411343C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1296 0x4113440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1297 0x4113444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1298 0x4113448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1299 0x411344C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1300 0x4113450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1301 0x4113454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1302 0x4113458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1303 0x411345C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1304 0x4113460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1305 0x4113464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1306 0x4113468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1307 0x411346C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1308 0x4113470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1309 0x4113474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1310 0x4113478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1311 0x411347C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1312 0x4113480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1313 0x4113484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1314 0x4113488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1315 0x411348C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1316 0x4113490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1317 0x4113494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1318 0x4113498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1319 0x411349C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1320 0x41134A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1321 0x41134A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1322 0x41134A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1323 0x41134AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1324 0x41134B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1325 0x41134B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1326 0x41134B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1327 0x41134BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1328 0x41134C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1329 0x41134C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1330 0x41134C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1331 0x41134CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1332 0x41134D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1333 0x41134D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1334 0x41134D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1335 0x41134DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1336 0x41134E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1337 0x41134E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1338 0x41134E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1339 0x41134EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1340 0x41134F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1341 0x41134F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1342 0x41134F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1343 0x41134FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1344 0x4113500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1345 0x4113504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1346 0x4113508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1347 0x411350C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1348 0x4113510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1349 0x4113514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1350 0x4113518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1351 0x411351C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1352 0x4113520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1353 0x4113524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1354 0x4113528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1355 0x411352C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1356 0x4113530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1357 0x4113534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1358 0x4113538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1359 0x411353C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1360 0x4113540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1361 0x4113544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1362 0x4113548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1363 0x411354C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1364 0x4113550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1365 0x4113554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1366 0x4113558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1367 0x411355C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1368 0x4113560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1369 0x4113564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1370 0x4113568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1371 0x411356C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1372 0x4113570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1373 0x4113574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1374 0x4113578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1375 0x411357C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1376 0x4113580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1377 0x4113584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1378 0x4113588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1379 0x411358C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1380 0x4113590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1381 0x4113594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1382 0x4113598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1383 0x411359C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1384 0x41135A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1385 0x41135A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1386 0x41135A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1387 0x41135AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1388 0x41135B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1389 0x41135B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1390 0x41135B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1391 0x41135BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1392 0x41135C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1393 0x41135C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1394 0x41135C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1395 0x41135CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1396 0x41135D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1397 0x41135D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1398 0x41135D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1399 0x41135DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1400 0x41135E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1401 0x41135E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1402 0x41135E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1403 0x41135EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1404 0x41135F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1405 0x41135F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1406 0x41135F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1407 0x41135FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1408 0x4113600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1409 0x4113604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1410 0x4113608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1411 0x411360C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1412 0x4113610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1413 0x4113614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1414 0x4113618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1415 0x411361C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1416 0x4113620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1417 0x4113624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1418 0x4113628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1419 0x411362C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1420 0x4113630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1421 0x4113634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1422 0x4113638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1423 0x411363C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1424 0x4113640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1425 0x4113644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1426 0x4113648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1427 0x411364C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1428 0x4113650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1429 0x4113654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1430 0x4113658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1431 0x411365C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1432 0x4113660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1433 0x4113664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1434 0x4113668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1435 0x411366C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1436 0x4113670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1437 0x4113674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1438 0x4113678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1439 0x411367C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1440 0x4113680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1441 0x4113684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1442 0x4113688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1443 0x411368C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1444 0x4113690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1445 0x4113694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1446 0x4113698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1447 0x411369C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1448 0x41136A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1449 0x41136A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1450 0x41136A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1451 0x41136AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1452 0x41136B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1453 0x41136B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1454 0x41136B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1455 0x41136BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1456 0x41136C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1457 0x41136C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1458 0x41136C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1459 0x41136CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1460 0x41136D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1461 0x41136D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1462 0x41136D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1463 0x41136DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1464 0x41136E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1465 0x41136E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1466 0x41136E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1467 0x41136EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1468 0x41136F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1469 0x41136F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1470 0x41136F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1471 0x41136FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1472 0x4113700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1473 0x4113704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1474 0x4113708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1475 0x411370C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1476 0x4113710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1477 0x4113714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1478 0x4113718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1479 0x411371C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1480 0x4113720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1481 0x4113724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1482 0x4113728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1483 0x411372C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1484 0x4113730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1485 0x4113734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1486 0x4113738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1487 0x411373C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1488 0x4113740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1489 0x4113744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1490 0x4113748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1491 0x411374C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1492 0x4113750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1493 0x4113754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1494 0x4113758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1495 0x411375C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1496 0x4113760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1497 0x4113764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1498 0x4113768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1499 0x411376C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1500 0x4113770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1501 0x4113774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1502 0x4113778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1503 0x411377C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1504 0x4113780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1505 0x4113784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1506 0x4113788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1507 0x411378C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1508 0x4113790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1509 0x4113794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1510 0x4113798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1511 0x411379C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1512 0x41137A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1513 0x41137A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1514 0x41137A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1515 0x41137AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1516 0x41137B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1517 0x41137B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1518 0x41137B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1519 0x41137BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1520 0x41137C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1521 0x41137C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1522 0x41137C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1523 0x41137CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1524 0x41137D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1525 0x41137D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1526 0x41137D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1527 0x41137DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1528 0x41137E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1529 0x41137E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1530 0x41137E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1531 0x41137EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1532 0x41137F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1533 0x41137F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1534 0x41137F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1535 0x41137FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1536 0x4113800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1537 0x4113804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1538 0x4113808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1539 0x411380C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1540 0x4113810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1541 0x4113814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1542 0x4113818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1543 0x411381C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1544 0x4113820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1545 0x4113824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1546 0x4113828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1547 0x411382C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1548 0x4113830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1549 0x4113834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1550 0x4113838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1551 0x411383C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1552 0x4113840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1553 0x4113844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1554 0x4113848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1555 0x411384C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1556 0x4113850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1557 0x4113854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1558 0x4113858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1559 0x411385C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1560 0x4113860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1561 0x4113864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1562 0x4113868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1563 0x411386C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1564 0x4113870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1565 0x4113874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1566 0x4113878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1567 0x411387C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1568 0x4113880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1569 0x4113884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1570 0x4113888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1571 0x411388C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1572 0x4113890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1573 0x4113894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1574 0x4113898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1575 0x411389C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1576 0x41138A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1577 0x41138A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1578 0x41138A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1579 0x41138AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1580 0x41138B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1581 0x41138B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1582 0x41138B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1583 0x41138BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1584 0x41138C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1585 0x41138C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1586 0x41138C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1587 0x41138CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1588 0x41138D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1589 0x41138D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1590 0x41138D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1591 0x41138DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1592 0x41138E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1593 0x41138E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1594 0x41138E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1595 0x41138EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1596 0x41138F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1597 0x41138F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1598 0x41138F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1599 0x41138FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1600 0x4113900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1601 0x4113904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1602 0x4113908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1603 0x411390C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1604 0x4113910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1605 0x4113914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1606 0x4113918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1607 0x411391C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1608 0x4113920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1609 0x4113924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1610 0x4113928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1611 0x411392C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1612 0x4113930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1613 0x4113934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1614 0x4113938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1615 0x411393C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1616 0x4113940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1617 0x4113944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1618 0x4113948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1619 0x411394C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1620 0x4113950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1621 0x4113954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1622 0x4113958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1623 0x411395C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1624 0x4113960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1625 0x4113964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1626 0x4113968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1627 0x411396C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1628 0x4113970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1629 0x4113974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1630 0x4113978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1631 0x411397C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1632 0x4113980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1633 0x4113984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1634 0x4113988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1635 0x411398C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1636 0x4113990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1637 0x4113994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1638 0x4113998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1639 0x411399C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1640 0x41139A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1641 0x41139A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1642 0x41139A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1643 0x41139AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1644 0x41139B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1645 0x41139B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1646 0x41139B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1647 0x41139BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1648 0x41139C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1649 0x41139C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1650 0x41139C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1651 0x41139CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1652 0x41139D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1653 0x41139D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1654 0x41139D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1655 0x41139DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1656 0x41139E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1657 0x41139E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1658 0x41139E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1659 0x41139EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1660 0x41139F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1661 0x41139F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1662 0x41139F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1663 0x41139FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1664 0x4113A00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1665 0x4113A04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1666 0x4113A08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1667 0x4113A0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1668 0x4113A10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1669 0x4113A14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1670 0x4113A18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1671 0x4113A1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1672 0x4113A20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1673 0x4113A24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1674 0x4113A28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1675 0x4113A2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1676 0x4113A30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1677 0x4113A34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1678 0x4113A38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1679 0x4113A3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1680 0x4113A40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1681 0x4113A44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1682 0x4113A48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1683 0x4113A4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1684 0x4113A50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1685 0x4113A54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1686 0x4113A58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1687 0x4113A5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1688 0x4113A60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1689 0x4113A64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1690 0x4113A68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1691 0x4113A6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1692 0x4113A70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1693 0x4113A74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1694 0x4113A78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1695 0x4113A7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1696 0x4113A80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1697 0x4113A84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1698 0x4113A88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1699 0x4113A8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1700 0x4113A90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1701 0x4113A94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1702 0x4113A98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1703 0x4113A9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1704 0x4113AA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1705 0x4113AA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1706 0x4113AA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1707 0x4113AAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1708 0x4113AB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1709 0x4113AB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1710 0x4113AB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1711 0x4113ABC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1712 0x4113AC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1713 0x4113AC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1714 0x4113AC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1715 0x4113ACC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1716 0x4113AD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1717 0x4113AD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1718 0x4113AD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1719 0x4113ADC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1720 0x4113AE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1721 0x4113AE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1722 0x4113AE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1723 0x4113AEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1724 0x4113AF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1725 0x4113AF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1726 0x4113AF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1727 0x4113AFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1728 0x4113B00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1729 0x4113B04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1730 0x4113B08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1731 0x4113B0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1732 0x4113B10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1733 0x4113B14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1734 0x4113B18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1735 0x4113B1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1736 0x4113B20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1737 0x4113B24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1738 0x4113B28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1739 0x4113B2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1740 0x4113B30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1741 0x4113B34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1742 0x4113B38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1743 0x4113B3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1744 0x4113B40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1745 0x4113B44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1746 0x4113B48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1747 0x4113B4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1748 0x4113B50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1749 0x4113B54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1750 0x4113B58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1751 0x4113B5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1752 0x4113B60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1753 0x4113B64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1754 0x4113B68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1755 0x4113B6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1756 0x4113B70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1757 0x4113B74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1758 0x4113B78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1759 0x4113B7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1760 0x4113B80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1761 0x4113B84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1762 0x4113B88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1763 0x4113B8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1764 0x4113B90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1765 0x4113B94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1766 0x4113B98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1767 0x4113B9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1768 0x4113BA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1769 0x4113BA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1770 0x4113BA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1771 0x4113BAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1772 0x4113BB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1773 0x4113BB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1774 0x4113BB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1775 0x4113BBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1776 0x4113BC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1777 0x4113BC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1778 0x4113BC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1779 0x4113BCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1780 0x4113BD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1781 0x4113BD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1782 0x4113BD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1783 0x4113BDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1784 0x4113BE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1785 0x4113BE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1786 0x4113BE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1787 0x4113BEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1788 0x4113BF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1789 0x4113BF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1790 0x4113BF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1791 0x4113BFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1792 0x4113C00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1793 0x4113C04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1794 0x4113C08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1795 0x4113C0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1796 0x4113C10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1797 0x4113C14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1798 0x4113C18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1799 0x4113C1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1800 0x4113C20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1801 0x4113C24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1802 0x4113C28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1803 0x4113C2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1804 0x4113C30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1805 0x4113C34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1806 0x4113C38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1807 0x4113C3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1808 0x4113C40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1809 0x4113C44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1810 0x4113C48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1811 0x4113C4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1812 0x4113C50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1813 0x4113C54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1814 0x4113C58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1815 0x4113C5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1816 0x4113C60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1817 0x4113C64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1818 0x4113C68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1819 0x4113C6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1820 0x4113C70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1821 0x4113C74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1822 0x4113C78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1823 0x4113C7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1824 0x4113C80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1825 0x4113C84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1826 0x4113C88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1827 0x4113C8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1828 0x4113C90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1829 0x4113C94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1830 0x4113C98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1831 0x4113C9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1832 0x4113CA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1833 0x4113CA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1834 0x4113CA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1835 0x4113CAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1836 0x4113CB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1837 0x4113CB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1838 0x4113CB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1839 0x4113CBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1840 0x4113CC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1841 0x4113CC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1842 0x4113CC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1843 0x4113CCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1844 0x4113CD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1845 0x4113CD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1846 0x4113CD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1847 0x4113CDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1848 0x4113CE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1849 0x4113CE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1850 0x4113CE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1851 0x4113CEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1852 0x4113CF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1853 0x4113CF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1854 0x4113CF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1855 0x4113CFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1856 0x4113D00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1857 0x4113D04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1858 0x4113D08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1859 0x4113D0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1860 0x4113D10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1861 0x4113D14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1862 0x4113D18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1863 0x4113D1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1864 0x4113D20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1865 0x4113D24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1866 0x4113D28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1867 0x4113D2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1868 0x4113D30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1869 0x4113D34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1870 0x4113D38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1871 0x4113D3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1872 0x4113D40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1873 0x4113D44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1874 0x4113D48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1875 0x4113D4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1876 0x4113D50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1877 0x4113D54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1878 0x4113D58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1879 0x4113D5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1880 0x4113D60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1881 0x4113D64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1882 0x4113D68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1883 0x4113D6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1884 0x4113D70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1885 0x4113D74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1886 0x4113D78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1887 0x4113D7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1888 0x4113D80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1889 0x4113D84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1890 0x4113D88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1891 0x4113D8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1892 0x4113D90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1893 0x4113D94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1894 0x4113D98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1895 0x4113D9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1896 0x4113DA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1897 0x4113DA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1898 0x4113DA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1899 0x4113DAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1900 0x4113DB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1901 0x4113DB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1902 0x4113DB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1903 0x4113DBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1904 0x4113DC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1905 0x4113DC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1906 0x4113DC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1907 0x4113DCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1908 0x4113DD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1909 0x4113DD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1910 0x4113DD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1911 0x4113DDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1912 0x4113DE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1913 0x4113DE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1914 0x4113DE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1915 0x4113DEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1916 0x4113DF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1917 0x4113DF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1918 0x4113DF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1919 0x4113DFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1920 0x4113E00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1921 0x4113E04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1922 0x4113E08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1923 0x4113E0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1924 0x4113E10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1925 0x4113E14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1926 0x4113E18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1927 0x4113E1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1928 0x4113E20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1929 0x4113E24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1930 0x4113E28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1931 0x4113E2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1932 0x4113E30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1933 0x4113E34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1934 0x4113E38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1935 0x4113E3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1936 0x4113E40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1937 0x4113E44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1938 0x4113E48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1939 0x4113E4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1940 0x4113E50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1941 0x4113E54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1942 0x4113E58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1943 0x4113E5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1944 0x4113E60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1945 0x4113E64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1946 0x4113E68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1947 0x4113E6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1948 0x4113E70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1949 0x4113E74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1950 0x4113E78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1951 0x4113E7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1952 0x4113E80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1953 0x4113E84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1954 0x4113E88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1955 0x4113E8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1956 0x4113E90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1957 0x4113E94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1958 0x4113E98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1959 0x4113E9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1960 0x4113EA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1961 0x4113EA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1962 0x4113EA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1963 0x4113EAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1964 0x4113EB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1965 0x4113EB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1966 0x4113EB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1967 0x4113EBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1968 0x4113EC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1969 0x4113EC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1970 0x4113EC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1971 0x4113ECC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1972 0x4113ED0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1973 0x4113ED4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1974 0x4113ED8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1975 0x4113EDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1976 0x4113EE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1977 0x4113EE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1978 0x4113EE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1979 0x4113EEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1980 0x4113EF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1981 0x4113EF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1982 0x4113EF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1983 0x4113EFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1984 0x4113F00
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1985 0x4113F04
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1986 0x4113F08
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1987 0x4113F0C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1988 0x4113F10
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1989 0x4113F14
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1990 0x4113F18
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1991 0x4113F1C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1992 0x4113F20
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1993 0x4113F24
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1994 0x4113F28
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1995 0x4113F2C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1996 0x4113F30
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1997 0x4113F34
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1998 0x4113F38
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_1999 0x4113F3C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2000 0x4113F40
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2001 0x4113F44
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2002 0x4113F48
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2003 0x4113F4C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2004 0x4113F50
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2005 0x4113F54
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2006 0x4113F58
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2007 0x4113F5C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2008 0x4113F60
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2009 0x4113F64
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2010 0x4113F68
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2011 0x4113F6C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2012 0x4113F70
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2013 0x4113F74
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2014 0x4113F78
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2015 0x4113F7C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2016 0x4113F80
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2017 0x4113F84
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2018 0x4113F88
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2019 0x4113F8C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2020 0x4113F90
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2021 0x4113F94
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2022 0x4113F98
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2023 0x4113F9C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2024 0x4113FA0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2025 0x4113FA4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2026 0x4113FA8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2027 0x4113FAC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2028 0x4113FB0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2029 0x4113FB4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2030 0x4113FB8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2031 0x4113FBC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2032 0x4113FC0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2033 0x4113FC4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2034 0x4113FC8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2035 0x4113FCC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2036 0x4113FD0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2037 0x4113FD4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2038 0x4113FD8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2039 0x4113FDC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2040 0x4113FE0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2041 0x4113FE4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2042 0x4113FE8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2043 0x4113FEC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2044 0x4113FF0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2045 0x4113FF4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2046 0x4113FF8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2047 0x4113FFC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 0x4114000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_1 0x4114004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_2 0x4114008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_3 0x411400C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_4 0x4114010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_5 0x4114014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_6 0x4114018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_7 0x411401C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_8 0x4114020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_9 0x4114024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_10 0x4114028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_11 0x411402C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_12 0x4114030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_13 0x4114034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_14 0x4114038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_15 0x411403C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_16 0x4114040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_17 0x4114044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_18 0x4114048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_19 0x411404C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_20 0x4114050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_21 0x4114054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_22 0x4114058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_23 0x411405C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_24 0x4114060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_25 0x4114064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_26 0x4114068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_27 0x411406C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_28 0x4114070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_29 0x4114074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_30 0x4114078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_31 0x411407C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_32 0x4114080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_33 0x4114084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_34 0x4114088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_35 0x411408C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_36 0x4114090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_37 0x4114094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_38 0x4114098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_39 0x411409C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_40 0x41140A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_41 0x41140A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_42 0x41140A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_43 0x41140AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_44 0x41140B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_45 0x41140B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_46 0x41140B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_47 0x41140BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_48 0x41140C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_49 0x41140C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_50 0x41140C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_51 0x41140CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_52 0x41140D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_53 0x41140D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_54 0x41140D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_55 0x41140DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_56 0x41140E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_57 0x41140E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_58 0x41140E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_59 0x41140EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_60 0x41140F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_61 0x41140F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_62 0x41140F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_63 0x41140FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_64 0x4114100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_65 0x4114104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_66 0x4114108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_67 0x411410C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_68 0x4114110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_69 0x4114114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_70 0x4114118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_71 0x411411C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_72 0x4114120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_73 0x4114124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_74 0x4114128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_75 0x411412C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_76 0x4114130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_77 0x4114134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_78 0x4114138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_79 0x411413C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_80 0x4114140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_81 0x4114144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_82 0x4114148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_83 0x411414C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_84 0x4114150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_85 0x4114154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_86 0x4114158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_87 0x411415C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_88 0x4114160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_89 0x4114164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_90 0x4114168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_91 0x411416C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_92 0x4114170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_93 0x4114174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_94 0x4114178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_95 0x411417C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_96 0x4114180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_97 0x4114184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_98 0x4114188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_99 0x411418C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_100 0x4114190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_101 0x4114194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_102 0x4114198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_103 0x411419C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_104 0x41141A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_105 0x41141A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_106 0x41141A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_107 0x41141AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_108 0x41141B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_109 0x41141B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_110 0x41141B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_111 0x41141BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_112 0x41141C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_113 0x41141C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_114 0x41141C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_115 0x41141CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_116 0x41141D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_117 0x41141D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_118 0x41141D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_119 0x41141DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_120 0x41141E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_121 0x41141E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_122 0x41141E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_123 0x41141EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_124 0x41141F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_125 0x41141F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_126 0x41141F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_127 0x41141FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_128 0x4114200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_129 0x4114204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_130 0x4114208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_131 0x411420C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_132 0x4114210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_133 0x4114214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_134 0x4114218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_135 0x411421C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_136 0x4114220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_137 0x4114224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_138 0x4114228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_139 0x411422C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_140 0x4114230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_141 0x4114234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_142 0x4114238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_143 0x411423C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_144 0x4114240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_145 0x4114244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_146 0x4114248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_147 0x411424C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_148 0x4114250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_149 0x4114254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_150 0x4114258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_151 0x411425C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_152 0x4114260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_153 0x4114264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_154 0x4114268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_155 0x411426C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_156 0x4114270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_157 0x4114274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_158 0x4114278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_159 0x411427C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_160 0x4114280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_161 0x4114284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_162 0x4114288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_163 0x411428C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_164 0x4114290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_165 0x4114294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_166 0x4114298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_167 0x411429C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_168 0x41142A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_169 0x41142A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_170 0x41142A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_171 0x41142AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_172 0x41142B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_173 0x41142B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_174 0x41142B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_175 0x41142BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_176 0x41142C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_177 0x41142C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_178 0x41142C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_179 0x41142CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_180 0x41142D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_181 0x41142D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_182 0x41142D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_183 0x41142DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_184 0x41142E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_185 0x41142E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_186 0x41142E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_187 0x41142EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_188 0x41142F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_189 0x41142F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_190 0x41142F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_191 0x41142FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_192 0x4114300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_193 0x4114304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_194 0x4114308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_195 0x411430C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_196 0x4114310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_197 0x4114314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_198 0x4114318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_199 0x411431C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_200 0x4114320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_201 0x4114324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_202 0x4114328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_203 0x411432C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_204 0x4114330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_205 0x4114334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_206 0x4114338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_207 0x411433C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_208 0x4114340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_209 0x4114344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_210 0x4114348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_211 0x411434C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_212 0x4114350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_213 0x4114354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_214 0x4114358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_215 0x411435C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_216 0x4114360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_217 0x4114364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_218 0x4114368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_219 0x411436C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_220 0x4114370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_221 0x4114374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_222 0x4114378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_223 0x411437C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_224 0x4114380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_225 0x4114384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_226 0x4114388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_227 0x411438C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_228 0x4114390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_229 0x4114394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_230 0x4114398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_231 0x411439C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_232 0x41143A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_233 0x41143A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_234 0x41143A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_235 0x41143AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_236 0x41143B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_237 0x41143B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_238 0x41143B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_239 0x41143BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_240 0x41143C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_241 0x41143C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_242 0x41143C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_243 0x41143CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_244 0x41143D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_245 0x41143D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_246 0x41143D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_247 0x41143DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_248 0x41143E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_249 0x41143E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_250 0x41143E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_251 0x41143EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_252 0x41143F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_253 0x41143F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_254 0x41143F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_255 0x41143FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_256 0x4114400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_257 0x4114404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_258 0x4114408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_259 0x411440C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_260 0x4114410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_261 0x4114414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_262 0x4114418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_263 0x411441C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_264 0x4114420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_265 0x4114424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_266 0x4114428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_267 0x411442C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_268 0x4114430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_269 0x4114434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_270 0x4114438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_271 0x411443C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_272 0x4114440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_273 0x4114444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_274 0x4114448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_275 0x411444C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_276 0x4114450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_277 0x4114454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_278 0x4114458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_279 0x411445C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_280 0x4114460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_281 0x4114464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_282 0x4114468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_283 0x411446C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_284 0x4114470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_285 0x4114474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_286 0x4114478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_287 0x411447C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_288 0x4114480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_289 0x4114484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_290 0x4114488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_291 0x411448C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_292 0x4114490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_293 0x4114494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_294 0x4114498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_295 0x411449C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_296 0x41144A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_297 0x41144A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_298 0x41144A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_299 0x41144AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_300 0x41144B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_301 0x41144B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_302 0x41144B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_303 0x41144BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_304 0x41144C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_305 0x41144C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_306 0x41144C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_307 0x41144CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_308 0x41144D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_309 0x41144D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_310 0x41144D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_311 0x41144DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_312 0x41144E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_313 0x41144E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_314 0x41144E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_315 0x41144EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_316 0x41144F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_317 0x41144F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_318 0x41144F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_319 0x41144FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_320 0x4114500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_321 0x4114504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_322 0x4114508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_323 0x411450C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_324 0x4114510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_325 0x4114514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_326 0x4114518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_327 0x411451C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_328 0x4114520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_329 0x4114524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_330 0x4114528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_331 0x411452C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_332 0x4114530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_333 0x4114534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_334 0x4114538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_335 0x411453C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_336 0x4114540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_337 0x4114544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_338 0x4114548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_339 0x411454C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_340 0x4114550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_341 0x4114554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_342 0x4114558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_343 0x411455C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_344 0x4114560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_345 0x4114564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_346 0x4114568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_347 0x411456C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_348 0x4114570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_349 0x4114574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_350 0x4114578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_351 0x411457C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_352 0x4114580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_353 0x4114584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_354 0x4114588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_355 0x411458C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_356 0x4114590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_357 0x4114594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_358 0x4114598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_359 0x411459C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_360 0x41145A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_361 0x41145A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_362 0x41145A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_363 0x41145AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_364 0x41145B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_365 0x41145B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_366 0x41145B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_367 0x41145BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_368 0x41145C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_369 0x41145C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_370 0x41145C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_371 0x41145CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_372 0x41145D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_373 0x41145D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_374 0x41145D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_375 0x41145DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_376 0x41145E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_377 0x41145E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_378 0x41145E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_379 0x41145EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_380 0x41145F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_381 0x41145F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_382 0x41145F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_383 0x41145FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_384 0x4114600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_385 0x4114604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_386 0x4114608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_387 0x411460C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_388 0x4114610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_389 0x4114614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_390 0x4114618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_391 0x411461C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_392 0x4114620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_393 0x4114624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_394 0x4114628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_395 0x411462C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_396 0x4114630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_397 0x4114634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_398 0x4114638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_399 0x411463C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_400 0x4114640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_401 0x4114644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_402 0x4114648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_403 0x411464C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_404 0x4114650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_405 0x4114654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_406 0x4114658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_407 0x411465C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_408 0x4114660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_409 0x4114664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_410 0x4114668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_411 0x411466C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_412 0x4114670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_413 0x4114674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_414 0x4114678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_415 0x411467C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_416 0x4114680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_417 0x4114684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_418 0x4114688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_419 0x411468C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_420 0x4114690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_421 0x4114694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_422 0x4114698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_423 0x411469C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_424 0x41146A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_425 0x41146A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_426 0x41146A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_427 0x41146AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_428 0x41146B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_429 0x41146B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_430 0x41146B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_431 0x41146BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_432 0x41146C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_433 0x41146C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_434 0x41146C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_435 0x41146CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_436 0x41146D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_437 0x41146D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_438 0x41146D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_439 0x41146DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_440 0x41146E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_441 0x41146E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_442 0x41146E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_443 0x41146EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_444 0x41146F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_445 0x41146F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_446 0x41146F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_447 0x41146FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_448 0x4114700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_449 0x4114704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_450 0x4114708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_451 0x411470C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_452 0x4114710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_453 0x4114714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_454 0x4114718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_455 0x411471C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_456 0x4114720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_457 0x4114724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_458 0x4114728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_459 0x411472C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_460 0x4114730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_461 0x4114734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_462 0x4114738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_463 0x411473C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_464 0x4114740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_465 0x4114744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_466 0x4114748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_467 0x411474C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_468 0x4114750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_469 0x4114754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_470 0x4114758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_471 0x411475C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_472 0x4114760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_473 0x4114764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_474 0x4114768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_475 0x411476C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_476 0x4114770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_477 0x4114774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_478 0x4114778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_479 0x411477C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_480 0x4114780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_481 0x4114784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_482 0x4114788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_483 0x411478C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_484 0x4114790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_485 0x4114794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_486 0x4114798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_487 0x411479C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_488 0x41147A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_489 0x41147A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_490 0x41147A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_491 0x41147AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_492 0x41147B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_493 0x41147B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_494 0x41147B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_495 0x41147BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_496 0x41147C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_497 0x41147C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_498 0x41147C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_499 0x41147CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_500 0x41147D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_501 0x41147D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_502 0x41147D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_503 0x41147DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_504 0x41147E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_505 0x41147E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_506 0x41147E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_507 0x41147EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_508 0x41147F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_509 0x41147F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_510 0x41147F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_511 0x41147FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_512 0x4114800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_513 0x4114804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_514 0x4114808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_515 0x411480C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_516 0x4114810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_517 0x4114814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_518 0x4114818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_519 0x411481C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_520 0x4114820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_521 0x4114824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_522 0x4114828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_523 0x411482C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_524 0x4114830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_525 0x4114834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_526 0x4114838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_527 0x411483C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_528 0x4114840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_529 0x4114844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_530 0x4114848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_531 0x411484C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_532 0x4114850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_533 0x4114854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_534 0x4114858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_535 0x411485C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_536 0x4114860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_537 0x4114864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_538 0x4114868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_539 0x411486C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_540 0x4114870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_541 0x4114874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_542 0x4114878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_543 0x411487C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_544 0x4114880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_545 0x4114884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_546 0x4114888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_547 0x411488C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_548 0x4114890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_549 0x4114894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_550 0x4114898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_551 0x411489C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_552 0x41148A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_553 0x41148A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_554 0x41148A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_555 0x41148AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_556 0x41148B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_557 0x41148B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_558 0x41148B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_559 0x41148BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_560 0x41148C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_561 0x41148C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_562 0x41148C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_563 0x41148CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_564 0x41148D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_565 0x41148D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_566 0x41148D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_567 0x41148DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_568 0x41148E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_569 0x41148E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_570 0x41148E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_571 0x41148EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_572 0x41148F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_573 0x41148F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_574 0x41148F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_575 0x41148FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_576 0x4114900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_577 0x4114904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_578 0x4114908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_579 0x411490C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_580 0x4114910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_581 0x4114914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_582 0x4114918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_583 0x411491C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_584 0x4114920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_585 0x4114924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_586 0x4114928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_587 0x411492C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_588 0x4114930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_589 0x4114934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_590 0x4114938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_591 0x411493C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_592 0x4114940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_593 0x4114944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_594 0x4114948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_595 0x411494C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_596 0x4114950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_597 0x4114954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_598 0x4114958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_599 0x411495C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_600 0x4114960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_601 0x4114964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_602 0x4114968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_603 0x411496C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_604 0x4114970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_605 0x4114974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_606 0x4114978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_607 0x411497C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_608 0x4114980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_609 0x4114984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_610 0x4114988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_611 0x411498C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_612 0x4114990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_613 0x4114994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_614 0x4114998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_615 0x411499C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_616 0x41149A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_617 0x41149A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_618 0x41149A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_619 0x41149AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_620 0x41149B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_621 0x41149B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_622 0x41149B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_623 0x41149BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_624 0x41149C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_625 0x41149C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_626 0x41149C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_627 0x41149CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_628 0x41149D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_629 0x41149D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_630 0x41149D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_631 0x41149DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_632 0x41149E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_633 0x41149E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_634 0x41149E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_635 0x41149EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_636 0x41149F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_637 0x41149F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_638 0x41149F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_639 0x41149FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_0 0x4115000
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_1 0x4115004
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_2 0x4115008
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_3 0x411500C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_4 0x4115010
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_5 0x4115014
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_6 0x4115018
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_7 0x411501C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_8 0x4115020
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_9 0x4115024
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_10 0x4115028
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_11 0x411502C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_12 0x4115030
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_13 0x4115034
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_14 0x4115038
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_15 0x411503C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_16 0x4115040
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_17 0x4115044
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_18 0x4115048
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_19 0x411504C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_20 0x4115050
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_21 0x4115054
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_22 0x4115058
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_23 0x411505C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_24 0x4115060
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_25 0x4115064
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_26 0x4115068
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_27 0x411506C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_28 0x4115070
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_29 0x4115074
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_30 0x4115078
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_31 0x411507C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_32 0x4115080
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_33 0x4115084
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_34 0x4115088
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_35 0x411508C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_36 0x4115090
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_37 0x4115094
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_38 0x4115098
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_39 0x411509C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_40 0x41150A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_41 0x41150A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_42 0x41150A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_43 0x41150AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_44 0x41150B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_45 0x41150B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_46 0x41150B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_47 0x41150BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_48 0x41150C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_49 0x41150C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_50 0x41150C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_51 0x41150CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_52 0x41150D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_53 0x41150D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_54 0x41150D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_55 0x41150DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_56 0x41150E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_57 0x41150E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_58 0x41150E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_59 0x41150EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_60 0x41150F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_61 0x41150F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_62 0x41150F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_63 0x41150FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_64 0x4115100
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_65 0x4115104
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_66 0x4115108
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_67 0x411510C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_68 0x4115110
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_69 0x4115114
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_70 0x4115118
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_71 0x411511C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_72 0x4115120
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_73 0x4115124
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_74 0x4115128
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_75 0x411512C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_76 0x4115130
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_77 0x4115134
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_78 0x4115138
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_79 0x411513C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_80 0x4115140
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_81 0x4115144
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_82 0x4115148
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_83 0x411514C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_84 0x4115150
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_85 0x4115154
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_86 0x4115158
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_87 0x411515C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_88 0x4115160
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_89 0x4115164
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_90 0x4115168
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_91 0x411516C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_92 0x4115170
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_93 0x4115174
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_94 0x4115178
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_95 0x411517C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_96 0x4115180
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_97 0x4115184
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_98 0x4115188
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_99 0x411518C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_100 0x4115190
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_101 0x4115194
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_102 0x4115198
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_103 0x411519C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_104 0x41151A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_105 0x41151A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_106 0x41151A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_107 0x41151AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_108 0x41151B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_109 0x41151B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_110 0x41151B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_111 0x41151BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_112 0x41151C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_113 0x41151C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_114 0x41151C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_115 0x41151CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_116 0x41151D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_117 0x41151D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_118 0x41151D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_119 0x41151DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_120 0x41151E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_121 0x41151E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_122 0x41151E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_123 0x41151EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_124 0x41151F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_125 0x41151F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_126 0x41151F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_127 0x41151FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_128 0x4115200
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_129 0x4115204
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_130 0x4115208
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_131 0x411520C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_132 0x4115210
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_133 0x4115214
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_134 0x4115218
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_135 0x411521C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_136 0x4115220
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_137 0x4115224
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_138 0x4115228
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_139 0x411522C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_140 0x4115230
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_141 0x4115234
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_142 0x4115238
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_143 0x411523C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_144 0x4115240
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_145 0x4115244
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_146 0x4115248
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_147 0x411524C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_148 0x4115250
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_149 0x4115254
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_150 0x4115258
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_151 0x411525C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_152 0x4115260
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_153 0x4115264
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_154 0x4115268
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_155 0x411526C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_156 0x4115270
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_157 0x4115274
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_158 0x4115278
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_159 0x411527C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_160 0x4115280
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_161 0x4115284
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_162 0x4115288
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_163 0x411528C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_164 0x4115290
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_165 0x4115294
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_166 0x4115298
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_167 0x411529C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_168 0x41152A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_169 0x41152A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_170 0x41152A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_171 0x41152AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_172 0x41152B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_173 0x41152B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_174 0x41152B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_175 0x41152BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_176 0x41152C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_177 0x41152C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_178 0x41152C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_179 0x41152CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_180 0x41152D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_181 0x41152D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_182 0x41152D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_183 0x41152DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_184 0x41152E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_185 0x41152E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_186 0x41152E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_187 0x41152EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_188 0x41152F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_189 0x41152F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_190 0x41152F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_191 0x41152FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_192 0x4115300
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_193 0x4115304
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_194 0x4115308
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_195 0x411530C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_196 0x4115310
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_197 0x4115314
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_198 0x4115318
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_199 0x411531C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_200 0x4115320
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_201 0x4115324
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_202 0x4115328
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_203 0x411532C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_204 0x4115330
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_205 0x4115334
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_206 0x4115338
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_207 0x411533C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_208 0x4115340
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_209 0x4115344
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_210 0x4115348
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_211 0x411534C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_212 0x4115350
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_213 0x4115354
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_214 0x4115358
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_215 0x411535C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_216 0x4115360
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_217 0x4115364
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_218 0x4115368
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_219 0x411536C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_220 0x4115370
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_221 0x4115374
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_222 0x4115378
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_223 0x411537C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_224 0x4115380
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_225 0x4115384
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_226 0x4115388
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_227 0x411538C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_228 0x4115390
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_229 0x4115394
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_230 0x4115398
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_231 0x411539C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_232 0x41153A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_233 0x41153A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_234 0x41153A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_235 0x41153AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_236 0x41153B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_237 0x41153B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_238 0x41153B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_239 0x41153BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_240 0x41153C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_241 0x41153C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_242 0x41153C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_243 0x41153CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_244 0x41153D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_245 0x41153D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_246 0x41153D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_247 0x41153DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_248 0x41153E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_249 0x41153E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_250 0x41153E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_251 0x41153EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_252 0x41153F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_253 0x41153F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_254 0x41153F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_255 0x41153FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_256 0x4115400
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_257 0x4115404
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_258 0x4115408
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_259 0x411540C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_260 0x4115410
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_261 0x4115414
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_262 0x4115418
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_263 0x411541C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_264 0x4115420
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_265 0x4115424
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_266 0x4115428
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_267 0x411542C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_268 0x4115430
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_269 0x4115434
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_270 0x4115438
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_271 0x411543C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_272 0x4115440
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_273 0x4115444
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_274 0x4115448
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_275 0x411544C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_276 0x4115450
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_277 0x4115454
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_278 0x4115458
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_279 0x411545C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_280 0x4115460
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_281 0x4115464
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_282 0x4115468
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_283 0x411546C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_284 0x4115470
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_285 0x4115474
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_286 0x4115478
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_287 0x411547C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_288 0x4115480
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_289 0x4115484
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_290 0x4115488
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_291 0x411548C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_292 0x4115490
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_293 0x4115494
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_294 0x4115498
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_295 0x411549C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_296 0x41154A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_297 0x41154A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_298 0x41154A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_299 0x41154AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_300 0x41154B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_301 0x41154B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_302 0x41154B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_303 0x41154BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_304 0x41154C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_305 0x41154C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_306 0x41154C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_307 0x41154CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_308 0x41154D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_309 0x41154D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_310 0x41154D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_311 0x41154DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_312 0x41154E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_313 0x41154E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_314 0x41154E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_315 0x41154EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_316 0x41154F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_317 0x41154F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_318 0x41154F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_319 0x41154FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_320 0x4115500
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_321 0x4115504
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_322 0x4115508
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_323 0x411550C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_324 0x4115510
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_325 0x4115514
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_326 0x4115518
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_327 0x411551C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_328 0x4115520
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_329 0x4115524
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_330 0x4115528
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_331 0x411552C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_332 0x4115530
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_333 0x4115534
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_334 0x4115538
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_335 0x411553C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_336 0x4115540
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_337 0x4115544
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_338 0x4115548
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_339 0x411554C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_340 0x4115550
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_341 0x4115554
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_342 0x4115558
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_343 0x411555C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_344 0x4115560
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_345 0x4115564
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_346 0x4115568
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_347 0x411556C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_348 0x4115570
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_349 0x4115574
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_350 0x4115578
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_351 0x411557C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_352 0x4115580
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_353 0x4115584
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_354 0x4115588
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_355 0x411558C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_356 0x4115590
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_357 0x4115594
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_358 0x4115598
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_359 0x411559C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_360 0x41155A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_361 0x41155A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_362 0x41155A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_363 0x41155AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_364 0x41155B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_365 0x41155B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_366 0x41155B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_367 0x41155BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_368 0x41155C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_369 0x41155C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_370 0x41155C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_371 0x41155CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_372 0x41155D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_373 0x41155D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_374 0x41155D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_375 0x41155DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_376 0x41155E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_377 0x41155E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_378 0x41155E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_379 0x41155EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_380 0x41155F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_381 0x41155F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_382 0x41155F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_383 0x41155FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_384 0x4115600
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_385 0x4115604
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_386 0x4115608
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_387 0x411560C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_388 0x4115610
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_389 0x4115614
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_390 0x4115618
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_391 0x411561C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_392 0x4115620
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_393 0x4115624
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_394 0x4115628
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_395 0x411562C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_396 0x4115630
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_397 0x4115634
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_398 0x4115638
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_399 0x411563C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_400 0x4115640
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_401 0x4115644
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_402 0x4115648
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_403 0x411564C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_404 0x4115650
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_405 0x4115654
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_406 0x4115658
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_407 0x411565C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_408 0x4115660
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_409 0x4115664
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_410 0x4115668
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_411 0x411566C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_412 0x4115670
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_413 0x4115674
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_414 0x4115678
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_415 0x411567C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_416 0x4115680
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_417 0x4115684
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_418 0x4115688
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_419 0x411568C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_420 0x4115690
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_421 0x4115694
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_422 0x4115698
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_423 0x411569C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_424 0x41156A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_425 0x41156A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_426 0x41156A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_427 0x41156AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_428 0x41156B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_429 0x41156B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_430 0x41156B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_431 0x41156BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_432 0x41156C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_433 0x41156C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_434 0x41156C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_435 0x41156CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_436 0x41156D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_437 0x41156D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_438 0x41156D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_439 0x41156DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_440 0x41156E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_441 0x41156E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_442 0x41156E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_443 0x41156EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_444 0x41156F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_445 0x41156F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_446 0x41156F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_447 0x41156FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_448 0x4115700
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_449 0x4115704
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_450 0x4115708
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_451 0x411570C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_452 0x4115710
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_453 0x4115714
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_454 0x4115718
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_455 0x411571C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_456 0x4115720
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_457 0x4115724
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_458 0x4115728
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_459 0x411572C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_460 0x4115730
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_461 0x4115734
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_462 0x4115738
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_463 0x411573C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_464 0x4115740
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_465 0x4115744
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_466 0x4115748
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_467 0x411574C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_468 0x4115750
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_469 0x4115754
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_470 0x4115758
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_471 0x411575C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_472 0x4115760
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_473 0x4115764
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_474 0x4115768
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_475 0x411576C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_476 0x4115770
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_477 0x4115774
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_478 0x4115778
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_479 0x411577C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_480 0x4115780
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_481 0x4115784
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_482 0x4115788
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_483 0x411578C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_484 0x4115790
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_485 0x4115794
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_486 0x4115798
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_487 0x411579C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_488 0x41157A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_489 0x41157A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_490 0x41157A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_491 0x41157AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_492 0x41157B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_493 0x41157B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_494 0x41157B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_495 0x41157BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_496 0x41157C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_497 0x41157C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_498 0x41157C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_499 0x41157CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_500 0x41157D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_501 0x41157D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_502 0x41157D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_503 0x41157DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_504 0x41157E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_505 0x41157E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_506 0x41157E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_507 0x41157EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_508 0x41157F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_509 0x41157F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_510 0x41157F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_511 0x41157FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_512 0x4115800
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_513 0x4115804
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_514 0x4115808
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_515 0x411580C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_516 0x4115810
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_517 0x4115814
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_518 0x4115818
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_519 0x411581C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_520 0x4115820
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_521 0x4115824
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_522 0x4115828
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_523 0x411582C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_524 0x4115830
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_525 0x4115834
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_526 0x4115838
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_527 0x411583C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_528 0x4115840
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_529 0x4115844
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_530 0x4115848
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_531 0x411584C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_532 0x4115850
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_533 0x4115854
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_534 0x4115858
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_535 0x411585C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_536 0x4115860
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_537 0x4115864
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_538 0x4115868
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_539 0x411586C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_540 0x4115870
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_541 0x4115874
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_542 0x4115878
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_543 0x411587C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_544 0x4115880
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_545 0x4115884
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_546 0x4115888
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_547 0x411588C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_548 0x4115890
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_549 0x4115894
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_550 0x4115898
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_551 0x411589C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_552 0x41158A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_553 0x41158A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_554 0x41158A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_555 0x41158AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_556 0x41158B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_557 0x41158B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_558 0x41158B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_559 0x41158BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_560 0x41158C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_561 0x41158C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_562 0x41158C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_563 0x41158CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_564 0x41158D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_565 0x41158D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_566 0x41158D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_567 0x41158DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_568 0x41158E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_569 0x41158E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_570 0x41158E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_571 0x41158EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_572 0x41158F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_573 0x41158F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_574 0x41158F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_575 0x41158FC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_576 0x4115900
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_577 0x4115904
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_578 0x4115908
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_579 0x411590C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_580 0x4115910
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_581 0x4115914
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_582 0x4115918
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_583 0x411591C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_584 0x4115920
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_585 0x4115924
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_586 0x4115928
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_587 0x411592C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_588 0x4115930
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_589 0x4115934
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_590 0x4115938
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_591 0x411593C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_592 0x4115940
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_593 0x4115944
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_594 0x4115948
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_595 0x411594C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_596 0x4115950
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_597 0x4115954
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_598 0x4115958
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_599 0x411595C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_600 0x4115960
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_601 0x4115964
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_602 0x4115968
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_603 0x411596C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_604 0x4115970
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_605 0x4115974
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_606 0x4115978
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_607 0x411597C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_608 0x4115980
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_609 0x4115984
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_610 0x4115988
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_611 0x411598C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_612 0x4115990
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_613 0x4115994
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_614 0x4115998
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_615 0x411599C
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_616 0x41159A0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_617 0x41159A4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_618 0x41159A8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_619 0x41159AC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_620 0x41159B0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_621 0x41159B4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_622 0x41159B8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_623 0x41159BC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_624 0x41159C0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_625 0x41159C4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_626 0x41159C8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_627 0x41159CC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_628 0x41159D0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_629 0x41159D4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_630 0x41159D8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_631 0x41159DC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_632 0x41159E0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_633 0x41159E4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_634 0x41159E8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_635 0x41159EC
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_636 0x41159F0
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_637 0x41159F4
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_638 0x41159F8
+
+#define mmDCORE0_SYNC_MNGR_OBJS_SM_PRIV_639 0x41159FC
+
+#endif /* ASIC_REG_DCORE0_SYNC_MNGR_OBJS_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h
new file mode 100644
index 000000000000..2d4a22680a23
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_CFG_AXUSER_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_CFG_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_CFG_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_ASID 0x400BE00
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_MMU_BP 0x400BE04
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_STRONG_ORDER 0x400BE08
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_NO_SNOOP 0x400BE0C
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_WR_REDUCTION 0x400BE10
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_RD_ATOMIC 0x400BE14
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_QOS 0x400BE18
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_RSVD 0x400BE1C
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_EMEM_CPAGE 0x400BE20
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_CORE 0x400BE24
+
+#define mmDCORE0_TPC0_CFG_AXUSER_E2E_COORD 0x400BE28
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_WR_OVRD_LO 0x400BE30
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_WR_OVRD_HI 0x400BE34
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_RD_OVRD_LO 0x400BE38
+
+#define mmDCORE0_TPC0_CFG_AXUSER_HB_RD_OVRD_HI 0x400BE3C
+
+#define mmDCORE0_TPC0_CFG_AXUSER_LB_COORD 0x400BE40
+
+#define mmDCORE0_TPC0_CFG_AXUSER_LB_LOCK 0x400BE44
+
+#define mmDCORE0_TPC0_CFG_AXUSER_LB_RSVD 0x400BE48
+
+#define mmDCORE0_TPC0_CFG_AXUSER_LB_OVRD 0x400BE4C
+
+#endif /* ASIC_REG_DCORE0_TPC0_CFG_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h
new file mode 100644
index 000000000000..cdab39debd2c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_CFG_KERNEL_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_CFG_KERNEL_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_CFG_KERNEL
+ * (Prototype: TPC_NON_TENSOR_DESCRIPTOR)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0x400B508
+
+#define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0x400B50C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_0 0x400B510
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_0 0x400B514
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_1 0x400B518
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_1 0x400B51C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_2 0x400B520
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_2 0x400B524
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_3 0x400B528
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_3 0x400B52C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_DIM_4 0x400B530
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_SIZE_DIM_4 0x400B534
+
+#define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_CONFIG 0x400B538
+
+#define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_ID 0x400B53C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_POWER_LOOP 0x400B540
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_0 0x400B544
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_1 0x400B548
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_2 0x400B54C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_3 0x400B550
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_4 0x400B554
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_5 0x400B558
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_6 0x400B55C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_7 0x400B560
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_8 0x400B564
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_9 0x400B568
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_10 0x400B56C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_11 0x400B570
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_12 0x400B574
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_13 0x400B578
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_14 0x400B57C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_15 0x400B580
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_16 0x400B584
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_17 0x400B588
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_18 0x400B58C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_19 0x400B590
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_20 0x400B594
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_21 0x400B598
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_22 0x400B59C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_23 0x400B5A0
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_24 0x400B5A4
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_25 0x400B5A8
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_26 0x400B5AC
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_27 0x400B5B0
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_28 0x400B5B4
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_29 0x400B5B8
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_30 0x400B5BC
+
+#define mmDCORE0_TPC0_CFG_KERNEL_SRF_31 0x400B5C0
+
+#define mmDCORE0_TPC0_CFG_KERNEL_KERNEL_ID_INC 0x400B5C4
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_0 0x400B5C8
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_1 0x400B5CC
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_2 0x400B5D0
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_3 0x400B5D4
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TID_BASE_SIZE_HIGH_DIM_4 0x400B5D8
+
+#endif /* ASIC_REG_DCORE0_TPC0_CFG_KERNEL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h
new file mode 100644
index 000000000000..4ef1c1edc5f7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_CFG_KERNEL_TENSOR_0_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_CFG_KERNEL_TENSOR_0_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_CFG_KERNEL_TENSOR_0
+ * (Prototype: TPC_TENSOR)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0x400B000
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0x400B004
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0x400B008
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0x400B00C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0x400B010
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0x400B014
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0x400B018
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0x400B01C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0x400B020
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0x400B024
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0x400B028
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0x400B02C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0x400B030
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0x400B034
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_PREF_STRIDE 0x400B038
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_STRIDE_HIGH 0x400B03C
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_STRIDE_HIGH 0x400B040
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_STRIDE_HIGH 0x400B044
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_STRIDE_HIGH 0x400B048
+
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_STRIDE_HIGH 0x400B04C
+
+#endif /* ASIC_REG_DCORE0_TPC0_CFG_KERNEL_TENSOR_0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h
new file mode 100644
index 000000000000..cdecbd0f9d84
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h
@@ -0,0 +1,509 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_CFG_MASKS_H_
+#define ASIC_REG_DCORE0_TPC0_CFG_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_CFG
+ * (Prototype: TPC)
+ *****************************************
+ */
+
+/* DCORE0_TPC0_CFG_TPC_COUNT */
+#define DCORE0_TPC0_CFG_TPC_COUNT_V_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_COUNT_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_TPC_ID */
+#define DCORE0_TPC0_CFG_TPC_ID_V_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_ID_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_STALL_ON_ERR */
+#define DCORE0_TPC0_CFG_STALL_ON_ERR_V_SHIFT 0
+#define DCORE0_TPC0_CFG_STALL_ON_ERR_V_MASK 0x1
+
+/* DCORE0_TPC0_CFG_CLK_EN */
+#define DCORE0_TPC0_CFG_CLK_EN_LBW_CFG_DIS_SHIFT 0
+#define DCORE0_TPC0_CFG_CLK_EN_LBW_CFG_DIS_MASK 0x1
+#define DCORE0_TPC0_CFG_CLK_EN_DBG_CFG_DIS_SHIFT 4
+#define DCORE0_TPC0_CFG_CLK_EN_DBG_CFG_DIS_MASK 0x10
+
+/* DCORE0_TPC0_CFG_IQ_RL_EN */
+#define DCORE0_TPC0_CFG_IQ_RL_EN_V_SHIFT 0
+#define DCORE0_TPC0_CFG_IQ_RL_EN_V_MASK 0x1
+
+/* DCORE0_TPC0_CFG_IQ_RL_SAT */
+#define DCORE0_TPC0_CFG_IQ_RL_SAT_V_SHIFT 0
+#define DCORE0_TPC0_CFG_IQ_RL_SAT_V_MASK 0xFF
+
+/* DCORE0_TPC0_CFG_IQ_RL_RST_TOKEN */
+#define DCORE0_TPC0_CFG_IQ_RL_RST_TOKEN_V_SHIFT 0
+#define DCORE0_TPC0_CFG_IQ_RL_RST_TOKEN_V_MASK 0xFF
+
+/* DCORE0_TPC0_CFG_IQ_RL_TIMEOUT */
+#define DCORE0_TPC0_CFG_IQ_RL_TIMEOUT_V_SHIFT 0
+#define DCORE0_TPC0_CFG_IQ_RL_TIMEOUT_V_MASK 0xFF
+
+/* DCORE0_TPC0_CFG_TSB_CFG_MTRR_2 */
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_2_PHY_BASE_ADD_LO_SHIFT 0
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_2_PHY_BASE_ADD_LO_MASK 0xFFFFFF
+
+/* DCORE0_TPC0_CFG_IQ_LBW_CLK_EN */
+#define DCORE0_TPC0_CFG_IQ_LBW_CLK_EN_V_SHIFT 0
+#define DCORE0_TPC0_CFG_IQ_LBW_CLK_EN_V_MASK 0x1
+
+/* DCORE0_TPC0_CFG_TPC_LOCK_VALUE */
+#define DCORE0_TPC0_CFG_TPC_LOCK_VALUE_VALUE_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_LOCK_VALUE_VALUE_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_TPC_LOCK */
+#define DCORE0_TPC0_CFG_TPC_LOCK_LOCK_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_LOCK_LOCK_MASK 0x1
+
+/* DCORE0_TPC0_CFG_CGU_SB */
+#define DCORE0_TPC0_CFG_CGU_SB_TSB_DISABLE_SHIFT 0
+#define DCORE0_TPC0_CFG_CGU_SB_TSB_DISABLE_MASK 0x1
+
+/* DCORE0_TPC0_CFG_CGU_CNT */
+#define DCORE0_TPC0_CFG_CGU_CNT_DCACHE_DISABLE_SHIFT 0
+#define DCORE0_TPC0_CFG_CGU_CNT_DCACHE_DISABLE_MASK 0x1
+#define DCORE0_TPC0_CFG_CGU_CNT_WQ_DISABLE_SHIFT 1
+#define DCORE0_TPC0_CFG_CGU_CNT_WQ_DISABLE_MASK 0x2
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_0_DISABLE_SHIFT 2
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_0_DISABLE_MASK 0x4
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_1_DISABLE_SHIFT 3
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_1_DISABLE_MASK 0x8
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_2_DISABLE_SHIFT 4
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_2_DISABLE_MASK 0x10
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_3_DISABLE_SHIFT 5
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_3_DISABLE_MASK 0x20
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_4_DISABLE_SHIFT 6
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_ADDSUB_4_DISABLE_MASK 0x40
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_0_DISABLE_SHIFT 7
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_0_DISABLE_MASK 0x80
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_1_DISABLE_SHIFT 8
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_1_DISABLE_MASK 0x100
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_2_DISABLE_SHIFT 9
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_2_DISABLE_MASK 0x200
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_3_DISABLE_SHIFT 10
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_3_DISABLE_MASK 0x400
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_4_DISABLE_SHIFT 11
+#define DCORE0_TPC0_CFG_CGU_CNT_SPU_AGU_CMP_4_DISABLE_MASK 0x800
+#define DCORE0_TPC0_CFG_CGU_CNT_MSAC_DISABLE_SHIFT 12
+#define DCORE0_TPC0_CFG_CGU_CNT_MSAC_DISABLE_MASK 0x1000
+#define DCORE0_TPC0_CFG_CGU_CNT_CONV_DISABLE_SHIFT 13
+#define DCORE0_TPC0_CFG_CGU_CNT_CONV_DISABLE_MASK 0x2000
+#define DCORE0_TPC0_CFG_CGU_CNT_NEARBYINT_DISABLE_SHIFT 14
+#define DCORE0_TPC0_CFG_CGU_CNT_NEARBYINT_DISABLE_MASK 0x4000
+#define DCORE0_TPC0_CFG_CGU_CNT_CMP_DISABLE_SHIFT 15
+#define DCORE0_TPC0_CFG_CGU_CNT_CMP_DISABLE_MASK 0x8000
+#define DCORE0_TPC0_CFG_CGU_CNT_FP_MAC_DISABLE_SHIFT 16
+#define DCORE0_TPC0_CFG_CGU_CNT_FP_MAC_DISABLE_MASK 0x10000
+#define DCORE0_TPC0_CFG_CGU_CNT_SOPS_SRC_A_D2_DISABLE_SHIFT 17
+#define DCORE0_TPC0_CFG_CGU_CNT_SOPS_SRC_A_D2_DISABLE_MASK 0x20000
+#define DCORE0_TPC0_CFG_CGU_CNT_SOPS_SRC_B_D2_DISABLE_SHIFT 18
+#define DCORE0_TPC0_CFG_CGU_CNT_SOPS_SRC_B_D2_DISABLE_MASK 0x40000
+#define DCORE0_TPC0_CFG_CGU_CNT_SOPS_SRC_E_D2_DISABLE_SHIFT 19
+#define DCORE0_TPC0_CFG_CGU_CNT_SOPS_SRC_E_D2_DISABLE_MASK 0x80000
+#define DCORE0_TPC0_CFG_CGU_CNT_SOPS_FMA_SRC_C_E1_DISABLE_SHIFT 20
+#define DCORE0_TPC0_CFG_CGU_CNT_SOPS_FMA_SRC_C_E1_DISABLE_MASK 0x100000
+#define DCORE0_TPC0_CFG_CGU_CNT_LD_SOPS_SRC_A_D2_DISABLE_SHIFT 21
+#define DCORE0_TPC0_CFG_CGU_CNT_LD_SOPS_SRC_A_D2_DISABLE_MASK 0x200000
+#define DCORE0_TPC0_CFG_CGU_CNT_ST_SOPS_SRC_A_D2_DISABLE_SHIFT 22
+#define DCORE0_TPC0_CFG_CGU_CNT_ST_SOPS_SRC_A_D2_DISABLE_MASK 0x400000
+#define DCORE0_TPC0_CFG_CGU_CNT_FP_ADDSUB_DISABLE_SHIFT 23
+#define DCORE0_TPC0_CFG_CGU_CNT_FP_ADDSUB_DISABLE_MASK 0x800000
+
+/* DCORE0_TPC0_CFG_CGU_CPE */
+#define DCORE0_TPC0_CFG_CGU_CPE_NEARBYINT_DISABLE_SHIFT 0
+#define DCORE0_TPC0_CFG_CGU_CPE_NEARBYINT_DISABLE_MASK 0x1
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_A_DISABLE_SHIFT 1
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_A_DISABLE_MASK 0x2
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_B_DISABLE_SHIFT 2
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_B_DISABLE_MASK 0x4
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_E_DISABLE_SHIFT 3
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_E_DISABLE_MASK 0x8
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_D_DISABLE_SHIFT 4
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_D_DISABLE_MASK 0x10
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_C_DISABLE_SHIFT 5
+#define DCORE0_TPC0_CFG_CGU_CPE_SOPS_SRC_C_DISABLE_MASK 0x20
+#define DCORE0_TPC0_CFG_CGU_CPE_LD_SOPS_SRC_A_DISABLE_SHIFT 6
+#define DCORE0_TPC0_CFG_CGU_CPE_LD_SOPS_SRC_A_DISABLE_MASK 0x40
+#define DCORE0_TPC0_CFG_CGU_CPE_MSAC_DISABLE_SHIFT 7
+#define DCORE0_TPC0_CFG_CGU_CPE_MSAC_DISABLE_MASK 0x80
+#define DCORE0_TPC0_CFG_CGU_CPE_ADDSUB_DISABLE_SHIFT 8
+#define DCORE0_TPC0_CFG_CGU_CPE_ADDSUB_DISABLE_MASK 0x100
+#define DCORE0_TPC0_CFG_CGU_CPE_SHIFT_DISABLE_SHIFT 9
+#define DCORE0_TPC0_CFG_CGU_CPE_SHIFT_DISABLE_MASK 0x200
+#define DCORE0_TPC0_CFG_CGU_CPE_GLE_DISABLE_SHIFT 10
+#define DCORE0_TPC0_CFG_CGU_CPE_GLE_DISABLE_MASK 0x400
+#define DCORE0_TPC0_CFG_CGU_CPE_CMP_DISABLE_SHIFT 11
+#define DCORE0_TPC0_CFG_CGU_CPE_CMP_DISABLE_MASK 0x800
+#define DCORE0_TPC0_CFG_CGU_CPE_CONV_DISABLE_SHIFT 12
+#define DCORE0_TPC0_CFG_CGU_CPE_CONV_DISABLE_MASK 0x1000
+#define DCORE0_TPC0_CFG_CGU_CPE_SB_DISABLE_SHIFT 13
+#define DCORE0_TPC0_CFG_CGU_CPE_SB_DISABLE_MASK 0x2000
+#define DCORE0_TPC0_CFG_CGU_CPE_TBUF_DISABLE_SHIFT 14
+#define DCORE0_TPC0_CFG_CGU_CPE_TBUF_DISABLE_MASK 0x4000
+#define DCORE0_TPC0_CFG_CGU_CPE_ST_G_DISABLE_SHIFT 15
+#define DCORE0_TPC0_CFG_CGU_CPE_ST_G_DISABLE_MASK 0x8000
+#define DCORE0_TPC0_CFG_CGU_CPE_FP_MAC_0_DISABLE_SHIFT 16
+#define DCORE0_TPC0_CFG_CGU_CPE_FP_MAC_0_DISABLE_MASK 0x10000
+#define DCORE0_TPC0_CFG_CGU_CPE_FP_MAC_1_DISABLE_SHIFT 17
+#define DCORE0_TPC0_CFG_CGU_CPE_FP_MAC_1_DISABLE_MASK 0x20000
+#define DCORE0_TPC0_CFG_CGU_CPE_FP_ADDSUB_DISABLE_SHIFT 18
+#define DCORE0_TPC0_CFG_CGU_CPE_FP_ADDSUB_DISABLE_MASK 0x40000
+#define DCORE0_TPC0_CFG_CGU_CPE_ST_SOPS_SRC_C_DISABLE_SHIFT 19
+#define DCORE0_TPC0_CFG_CGU_CPE_ST_SOPS_SRC_C_DISABLE_MASK 0x80000
+
+/* DCORE0_TPC0_CFG_FP16_FTZ_IN */
+#define DCORE0_TPC0_CFG_FP16_FTZ_IN_MODE_SHIFT 0
+#define DCORE0_TPC0_CFG_FP16_FTZ_IN_MODE_MASK 0x1
+
+/* DCORE0_TPC0_CFG_DCACHE_CFG */
+#define DCORE0_TPC0_CFG_DCACHE_CFG_G_PREF_DIS_SHIFT 0
+#define DCORE0_TPC0_CFG_DCACHE_CFG_G_PREF_DIS_MASK 0x1
+#define DCORE0_TPC0_CFG_DCACHE_CFG_G_PREF_VLD_CLR_SHIFT 1
+#define DCORE0_TPC0_CFG_DCACHE_CFG_G_PREF_VLD_CLR_MASK 0x2
+#define DCORE0_TPC0_CFG_DCACHE_CFG_HALT_FLUSH_SHIFT 2
+#define DCORE0_TPC0_CFG_DCACHE_CFG_HALT_FLUSH_MASK 0x4
+#define DCORE0_TPC0_CFG_DCACHE_CFG_DEALIGN_DIS_SHIFT 3
+#define DCORE0_TPC0_CFG_DCACHE_CFG_DEALIGN_DIS_MASK 0x8
+
+/* DCORE0_TPC0_CFG_E2E_CRDT_TOP */
+#define DCORE0_TPC0_CFG_E2E_CRDT_TOP_FORCE_EN_SHIFT 0
+#define DCORE0_TPC0_CFG_E2E_CRDT_TOP_FORCE_EN_MASK 0x1
+#define DCORE0_TPC0_CFG_E2E_CRDT_TOP_Y_X_FORCE_SHIFT 4
+#define DCORE0_TPC0_CFG_E2E_CRDT_TOP_Y_X_FORCE_MASK 0x1FF0
+
+/* DCORE0_TPC0_CFG_TPC_DCACHE_L0CD */
+#define DCORE0_TPC0_CFG_TPC_DCACHE_L0CD_VAL_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_DCACHE_L0CD_VAL_MASK 0x1
+
+/* DCORE0_TPC0_CFG_TPC_SB_L0CD */
+#define DCORE0_TPC0_CFG_TPC_SB_L0CD_VAL_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_SB_L0CD_VAL_MASK 0x1
+
+/* DCORE0_TPC0_CFG_CONV_ROUND_CSR */
+#define DCORE0_TPC0_CFG_CONV_ROUND_CSR_MODE_SHIFT 0
+#define DCORE0_TPC0_CFG_CONV_ROUND_CSR_MODE_MASK 0x7
+
+/* DCORE0_TPC0_CFG_TSB_OCCUPANCY */
+#define DCORE0_TPC0_CFG_TSB_OCCUPANCY_V_SHIFT 0
+#define DCORE0_TPC0_CFG_TSB_OCCUPANCY_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_ARB_QNT_HBW_WEIGHT */
+#define DCORE0_TPC0_CFG_ARB_QNT_HBW_WEIGHT_AR_SHIFT 0
+#define DCORE0_TPC0_CFG_ARB_QNT_HBW_WEIGHT_AR_MASK 0xFFF
+#define DCORE0_TPC0_CFG_ARB_QNT_HBW_WEIGHT_AW_SHIFT 12
+#define DCORE0_TPC0_CFG_ARB_QNT_HBW_WEIGHT_AW_MASK 0xFF000
+
+/* DCORE0_TPC0_CFG_ARB_QNT_LBW_WEIGHT */
+#define DCORE0_TPC0_CFG_ARB_QNT_LBW_WEIGHT_AW_SHIFT 0
+#define DCORE0_TPC0_CFG_ARB_QNT_LBW_WEIGHT_AW_MASK 0xFF
+#define DCORE0_TPC0_CFG_ARB_QNT_LBW_WEIGHT_AR_SHIFT 8
+#define DCORE0_TPC0_CFG_ARB_QNT_LBW_WEIGHT_AR_MASK 0xFF00
+
+/* DCORE0_TPC0_CFG_ARB_CNT_HBW_WEIGHT */
+#define DCORE0_TPC0_CFG_ARB_CNT_HBW_WEIGHT_AR_SHIFT 0
+#define DCORE0_TPC0_CFG_ARB_CNT_HBW_WEIGHT_AR_MASK 0xFFF
+#define DCORE0_TPC0_CFG_ARB_CNT_HBW_WEIGHT_AW_SHIFT 12
+#define DCORE0_TPC0_CFG_ARB_CNT_HBW_WEIGHT_AW_MASK 0xFFF000
+
+/* DCORE0_TPC0_CFG_ARB_CNT_LBW_WEIGHT */
+#define DCORE0_TPC0_CFG_ARB_CNT_LBW_WEIGHT_AR_SHIFT 0
+#define DCORE0_TPC0_CFG_ARB_CNT_LBW_WEIGHT_AR_MASK 0xFF
+#define DCORE0_TPC0_CFG_ARB_CNT_LBW_WEIGHT_AW_SHIFT 8
+#define DCORE0_TPC0_CFG_ARB_CNT_LBW_WEIGHT_AW_MASK 0xFFF00
+
+/* DCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_LO */
+#define DCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_LO_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_HI */
+#define DCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_HI_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_LO */
+#define DCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_LO_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_HI */
+#define DCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_HI_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_LO */
+#define DCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_LO_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_HI */
+#define DCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_HI_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_LO */
+#define DCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_LO_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_HI */
+#define DCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_HI_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_SPE_LFSR_POLYNOM */
+#define DCORE0_TPC0_CFG_SPE_LFSR_POLYNOM_V_SHIFT 0
+#define DCORE0_TPC0_CFG_SPE_LFSR_POLYNOM_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_TSB_CFG_MTRR_GLBL */
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_GLBL_EN_SHIFT 0
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_GLBL_EN_MASK 0x1
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_GLBL_DEFAULT_MEMORY_TYPE_SHIFT 4
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_GLBL_DEFAULT_MEMORY_TYPE_MASK 0x10
+
+/* DCORE0_TPC0_CFG_TSB_CFG_MTRR */
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_VALID_SHIFT 0
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_VALID_MASK 0x1
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_MEMORY_TYPE_SHIFT 4
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_MEMORY_TYPE_MASK 0x10
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_PHY_BASE_ADD_SHIFT 8
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_PHY_BASE_ADD_MASK 0xFFFF00
+
+/* DCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_LO */
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_LO_V_SHIFT 0
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_LO_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_HI */
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_HI_V_SHIFT 0
+#define DCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_HI_V_MASK 0xFF
+
+/* DCORE0_TPC0_CFG_FP8_143_BIAS */
+#define DCORE0_TPC0_CFG_FP8_143_BIAS_BIAS_143_SHIFT 0
+#define DCORE0_TPC0_CFG_FP8_143_BIAS_BIAS_143_MASK 0xF
+
+/* DCORE0_TPC0_CFG_ROUND_CSR */
+#define DCORE0_TPC0_CFG_ROUND_CSR_MODE_SHIFT 0
+#define DCORE0_TPC0_CFG_ROUND_CSR_MODE_MASK 0x7
+
+/* DCORE0_TPC0_CFG_HB_PROT */
+#define DCORE0_TPC0_CFG_HB_PROT_AWPROT_SHIFT 0
+#define DCORE0_TPC0_CFG_HB_PROT_AWPROT_MASK 0x7
+#define DCORE0_TPC0_CFG_HB_PROT_ARPROT_SHIFT 3
+#define DCORE0_TPC0_CFG_HB_PROT_ARPROT_MASK 0x38
+
+/* DCORE0_TPC0_CFG_LB_PROT */
+#define DCORE0_TPC0_CFG_LB_PROT_AWPROT_SHIFT 0
+#define DCORE0_TPC0_CFG_LB_PROT_AWPROT_MASK 0x7
+#define DCORE0_TPC0_CFG_LB_PROT_ARPROT_SHIFT 3
+#define DCORE0_TPC0_CFG_LB_PROT_ARPROT_MASK 0x38
+
+/* DCORE0_TPC0_CFG_SEMAPHORE */
+#define DCORE0_TPC0_CFG_SEMAPHORE_V_SHIFT 0
+#define DCORE0_TPC0_CFG_SEMAPHORE_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_VFLAGS */
+#define DCORE0_TPC0_CFG_VFLAGS_V_SHIFT 0
+#define DCORE0_TPC0_CFG_VFLAGS_V_MASK 0x7F
+
+/* DCORE0_TPC0_CFG_SFLAGS */
+#define DCORE0_TPC0_CFG_SFLAGS_V_SHIFT 0
+#define DCORE0_TPC0_CFG_SFLAGS_V_MASK 0x7F
+
+/* DCORE0_TPC0_CFG_LFSR_POLYNOM */
+#define DCORE0_TPC0_CFG_LFSR_POLYNOM_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LFSR_POLYNOM_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_STATUS */
+#define DCORE0_TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT 1
+#define DCORE0_TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK 0x2
+#define DCORE0_TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT 2
+#define DCORE0_TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK 0x4
+#define DCORE0_TPC0_CFG_STATUS_IQ_EMPTY_SHIFT 3
+#define DCORE0_TPC0_CFG_STATUS_IQ_EMPTY_MASK 0x8
+#define DCORE0_TPC0_CFG_STATUS_SB_EMPTY_SHIFT 5
+#define DCORE0_TPC0_CFG_STATUS_SB_EMPTY_MASK 0x20
+#define DCORE0_TPC0_CFG_STATUS_QM_IDLE_SHIFT 6
+#define DCORE0_TPC0_CFG_STATUS_QM_IDLE_MASK 0x40
+#define DCORE0_TPC0_CFG_STATUS_QM_RDY_SHIFT 7
+#define DCORE0_TPC0_CFG_STATUS_QM_RDY_MASK 0x80
+
+/* DCORE0_TPC0_CFG_CFG_BASE_ADDRESS_HIGH */
+#define DCORE0_TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define DCORE0_TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_CFG_SUBTRACT_VALUE */
+#define DCORE0_TPC0_CFG_CFG_SUBTRACT_VALUE_V_SHIFT 0
+#define DCORE0_TPC0_CFG_CFG_SUBTRACT_VALUE_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_SM_BASE_ADDRESS_HIGH */
+#define DCORE0_TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define DCORE0_TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_TPC_CMD */
+#define DCORE0_TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_MASK 0x1
+#define DCORE0_TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_SHIFT 1
+#define DCORE0_TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_MASK 0x2
+#define DCORE0_TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_SHIFT 2
+#define DCORE0_TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_MASK 0x4
+#define DCORE0_TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_SHIFT 3
+#define DCORE0_TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_MASK 0x8
+#define DCORE0_TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT 4
+#define DCORE0_TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_MASK 0x10
+#define DCORE0_TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_SHIFT 5
+#define DCORE0_TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_MASK 0x20
+#define DCORE0_TPC0_CFG_TPC_CMD_QMAN_STOP_SHIFT 6
+#define DCORE0_TPC0_CFG_TPC_CMD_QMAN_STOP_MASK 0x40
+
+/* DCORE0_TPC0_CFG_TPC_EXECUTE */
+#define DCORE0_TPC0_CFG_TPC_EXECUTE_V_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_EXECUTE_V_MASK 0x1
+
+/* DCORE0_TPC0_CFG_TPC_STALL */
+#define DCORE0_TPC0_CFG_TPC_STALL_V_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_STALL_V_MASK 0x1
+
+/* DCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_LOW */
+#define DCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_SHIFT 0
+#define DCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH */
+#define DCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_SHIFT 0
+#define DCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_RD_RATE_LIMIT */
+#define DCORE0_TPC0_CFG_RD_RATE_LIMIT_ENABLE_SHIFT 0
+#define DCORE0_TPC0_CFG_RD_RATE_LIMIT_ENABLE_MASK 0x1
+#define DCORE0_TPC0_CFG_RD_RATE_LIMIT_SATURATION_SHIFT 1
+#define DCORE0_TPC0_CFG_RD_RATE_LIMIT_SATURATION_MASK 0x1FE
+#define DCORE0_TPC0_CFG_RD_RATE_LIMIT_TIMEOUT_SHIFT 9
+#define DCORE0_TPC0_CFG_RD_RATE_LIMIT_TIMEOUT_MASK 0x1FE00
+
+/* DCORE0_TPC0_CFG_WR_RATE_LIMIT */
+#define DCORE0_TPC0_CFG_WR_RATE_LIMIT_ENABLE_SHIFT 0
+#define DCORE0_TPC0_CFG_WR_RATE_LIMIT_ENABLE_MASK 0x1
+#define DCORE0_TPC0_CFG_WR_RATE_LIMIT_SATURATION_SHIFT 1
+#define DCORE0_TPC0_CFG_WR_RATE_LIMIT_SATURATION_MASK 0x1FE
+#define DCORE0_TPC0_CFG_WR_RATE_LIMIT_TIMEOUT_SHIFT 9
+#define DCORE0_TPC0_CFG_WR_RATE_LIMIT_TIMEOUT_MASK 0x1FE00
+
+/* DCORE0_TPC0_CFG_MSS_CONFIG */
+#define DCORE0_TPC0_CFG_MSS_CONFIG_AWCACHE_SHIFT 0
+#define DCORE0_TPC0_CFG_MSS_CONFIG_AWCACHE_MASK 0xF
+#define DCORE0_TPC0_CFG_MSS_CONFIG_ARCACHE_SHIFT 4
+#define DCORE0_TPC0_CFG_MSS_CONFIG_ARCACHE_MASK 0xF0
+#define DCORE0_TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT 8
+#define DCORE0_TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_MASK 0x300
+#define DCORE0_TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_SHIFT 10
+#define DCORE0_TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_MASK 0x400
+#define DCORE0_TPC0_CFG_MSS_CONFIG_DCACHE_PREFETCH_DIS_SHIFT 11
+#define DCORE0_TPC0_CFG_MSS_CONFIG_DCACHE_PREFETCH_DIS_MASK 0x800
+
+/* DCORE0_TPC0_CFG_TPC_INTR_CAUSE */
+#define DCORE0_TPC0_CFG_TPC_INTR_CAUSE_CAUSE_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_TPC_INTR_MASK */
+#define DCORE0_TPC0_CFG_TPC_INTR_MASK_MASK_SHIFT 0
+#define DCORE0_TPC0_CFG_TPC_INTR_MASK_MASK_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_WQ_CREDITS */
+#define DCORE0_TPC0_CFG_WQ_CREDITS_ST_G_SHIFT 0
+#define DCORE0_TPC0_CFG_WQ_CREDITS_ST_G_MASK 0xF
+#define DCORE0_TPC0_CFG_WQ_CREDITS_KERNEL_FIFO_SHIFT 4
+#define DCORE0_TPC0_CFG_WQ_CREDITS_KERNEL_FIFO_MASK 0x70
+
+/* DCORE0_TPC0_CFG_OPCODE_EXEC */
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_SPU_OP_SHIFT 0
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_SPU_OP_MASK 0x7F
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_SPU_EN_SHIFT 7
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_SPU_EN_MASK 0x80
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_VPU_OP_SHIFT 8
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_VPU_OP_MASK 0x7F00
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_VPU_EN_SHIFT 15
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_VPU_EN_MASK 0x8000
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_LD_OP_SHIFT 16
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_LD_OP_MASK 0x7F0000
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_LD_EN_SHIFT 23
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_LD_EN_MASK 0x800000
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_ST_OP_SHIFT 24
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_ST_OP_MASK 0x7F000000
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_ST_EN_SHIFT 31
+#define DCORE0_TPC0_CFG_OPCODE_EXEC_ST_EN_MASK 0x80000000
+
+/* DCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO */
+#define DCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI */
+#define DCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO */
+#define DCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI */
+#define DCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO */
+#define DCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI */
+#define DCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO */
+#define DCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI */
+#define DCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI_V_SHIFT 0
+#define DCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_TSB_CFG_MAX_SIZE */
+#define DCORE0_TPC0_CFG_TSB_CFG_MAX_SIZE_DATA_SHIFT 0
+#define DCORE0_TPC0_CFG_TSB_CFG_MAX_SIZE_DATA_MASK 0xFFFF
+#define DCORE0_TPC0_CFG_TSB_CFG_MAX_SIZE_MD_SHIFT 16
+#define DCORE0_TPC0_CFG_TSB_CFG_MAX_SIZE_MD_MASK 0xFFFF0000
+
+/* DCORE0_TPC0_CFG_TSB_CFG */
+#define DCORE0_TPC0_CFG_TSB_CFG_CACHE_DISABLE_SHIFT 0
+#define DCORE0_TPC0_CFG_TSB_CFG_CACHE_DISABLE_MASK 0x1
+#define DCORE0_TPC0_CFG_TSB_CFG_MAX_OS_SHIFT 1
+#define DCORE0_TPC0_CFG_TSB_CFG_MAX_OS_MASK 0x1FFFE
+#define DCORE0_TPC0_CFG_TSB_CFG_ENABLE_CGATE_SHIFT 17
+#define DCORE0_TPC0_CFG_TSB_CFG_ENABLE_CGATE_MASK 0x20000
+
+/* DCORE0_TPC0_CFG_TSB_INFLIGHT_CNTR */
+#define DCORE0_TPC0_CFG_TSB_INFLIGHT_CNTR_V_SHIFT 0
+#define DCORE0_TPC0_CFG_TSB_INFLIGHT_CNTR_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_WQ_INFLIGHT_CNTR */
+#define DCORE0_TPC0_CFG_WQ_INFLIGHT_CNTR_HBW_SHIFT 0
+#define DCORE0_TPC0_CFG_WQ_INFLIGHT_CNTR_HBW_MASK 0xFFFF
+#define DCORE0_TPC0_CFG_WQ_INFLIGHT_CNTR_LBW_SHIFT 16
+#define DCORE0_TPC0_CFG_WQ_INFLIGHT_CNTR_LBW_MASK 0x1FF0000
+
+/* DCORE0_TPC0_CFG_WQ_LBW_TOTAL_CNTR */
+#define DCORE0_TPC0_CFG_WQ_LBW_TOTAL_CNTR_V_SHIFT 0
+#define DCORE0_TPC0_CFG_WQ_LBW_TOTAL_CNTR_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_WQ_HBW_TOTAL_CNTR */
+#define DCORE0_TPC0_CFG_WQ_HBW_TOTAL_CNTR_V_SHIFT 0
+#define DCORE0_TPC0_CFG_WQ_HBW_TOTAL_CNTR_V_MASK 0xFFFFFFFF
+
+/* DCORE0_TPC0_CFG_IRQ_OCCOUPY_CNTR */
+#define DCORE0_TPC0_CFG_IRQ_OCCOUPY_CNTR_V_SHIFT 0
+#define DCORE0_TPC0_CFG_IRQ_OCCOUPY_CNTR_V_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_DCORE0_TPC0_CFG_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h
new file mode 100644
index 000000000000..4cd9e26a150f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_CFG_QM_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_CFG_QM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_CFG_QM
+ * (Prototype: TPC_NON_TENSOR_DESCRIPTOR)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0x400BAE4
+
+#define mmDCORE0_TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0x400BAE8
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_0 0x400BAEC
+
+#define mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_0 0x400BAF0
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_1 0x400BAF4
+
+#define mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_1 0x400BAF8
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_2 0x400BAFC
+
+#define mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_2 0x400BB00
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_3 0x400BB04
+
+#define mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_3 0x400BB08
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_4 0x400BB0C
+
+#define mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_4 0x400BB10
+
+#define mmDCORE0_TPC0_CFG_QM_KERNEL_CONFIG 0x400BB14
+
+#define mmDCORE0_TPC0_CFG_QM_KERNEL_ID 0x400BB18
+
+#define mmDCORE0_TPC0_CFG_QM_POWER_LOOP 0x400BB1C
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_0 0x400BB20
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_1 0x400BB24
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_2 0x400BB28
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_3 0x400BB2C
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_4 0x400BB30
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_5 0x400BB34
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_6 0x400BB38
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_7 0x400BB3C
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_8 0x400BB40
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_9 0x400BB44
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_10 0x400BB48
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_11 0x400BB4C
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_12 0x400BB50
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_13 0x400BB54
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_14 0x400BB58
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_15 0x400BB5C
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_16 0x400BB60
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_17 0x400BB64
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_18 0x400BB68
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_19 0x400BB6C
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_20 0x400BB70
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_21 0x400BB74
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_22 0x400BB78
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_23 0x400BB7C
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_24 0x400BB80
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_25 0x400BB84
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_26 0x400BB88
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_27 0x400BB8C
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_28 0x400BB90
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_29 0x400BB94
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_30 0x400BB98
+
+#define mmDCORE0_TPC0_CFG_QM_SRF_31 0x400BB9C
+
+#define mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC 0x400BBA0
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0 0x400BBA4
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_1 0x400BBA8
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_2 0x400BBAC
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_3 0x400BBB0
+
+#define mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_4 0x400BBB4
+
+#endif /* ASIC_REG_DCORE0_TPC0_CFG_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h
new file mode 100644
index 000000000000..8da278a3f3fe
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_CFG_QM_SYNC_OBJECT_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_CFG_QM_SYNC_OBJECT_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_CFG_QM_SYNC_OBJECT
+ * (Prototype: SYNC_OBJECT)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_CFG_QM_SYNC_OBJECT_MESSAGE 0x400BADC
+
+#define mmDCORE0_TPC0_CFG_QM_SYNC_OBJECT_ADDR 0x400BAE0
+
+#endif /* ASIC_REG_DCORE0_TPC0_CFG_QM_SYNC_OBJECT_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h
new file mode 100644
index 000000000000..2e4ff06e4858
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_CFG_QM_TENSOR_0_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_CFG_QM_TENSOR_0_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_CFG_QM_TENSOR_0
+ * (Prototype: TPC_TENSOR)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0x400B5DC
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0x400B5E0
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_PADDING_VALUE 0x400B5E4
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG 0x400B5E8
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE 0x400B5EC
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE 0x400B5F0
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE 0x400B5F4
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE 0x400B5F8
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE 0x400B5FC
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE 0x400B600
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE 0x400B604
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE 0x400B608
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE 0x400B60C
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE 0x400B610
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_PREF_STRIDE 0x400B614
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_STRIDE_HIGH 0x400B618
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_STRIDE_HIGH 0x400B61C
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_STRIDE_HIGH 0x400B620
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_STRIDE_HIGH 0x400B624
+
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_STRIDE_HIGH 0x400B628
+
+#endif /* ASIC_REG_DCORE0_TPC0_CFG_QM_TENSOR_0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h
new file mode 100644
index 000000000000..4d48f0c6880b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_CFG_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_CFG_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_CFG
+ * (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_CFG_TPC_COUNT 0x400BC18
+
+#define mmDCORE0_TPC0_CFG_TPC_ID 0x400BC1C
+
+#define mmDCORE0_TPC0_CFG_STALL_ON_ERR 0x400BC20
+
+#define mmDCORE0_TPC0_CFG_CLK_EN 0x400BC24
+
+#define mmDCORE0_TPC0_CFG_IQ_RL_EN 0x400BC28
+
+#define mmDCORE0_TPC0_CFG_IQ_RL_SAT 0x400BC2C
+
+#define mmDCORE0_TPC0_CFG_IQ_RL_RST_TOKEN 0x400BC30
+
+#define mmDCORE0_TPC0_CFG_IQ_RL_TIMEOUT 0x400BC34
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_0 0x400BC38
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_1 0x400BC3C
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_2 0x400BC40
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_3 0x400BC44
+
+#define mmDCORE0_TPC0_CFG_IQ_LBW_CLK_EN 0x400BC48
+
+#define mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_0 0x400BC4C
+
+#define mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_1 0x400BC50
+
+#define mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_2 0x400BC54
+
+#define mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_3 0x400BC58
+
+#define mmDCORE0_TPC0_CFG_TPC_LOCK_0 0x400BC5C
+
+#define mmDCORE0_TPC0_CFG_TPC_LOCK_1 0x400BC60
+
+#define mmDCORE0_TPC0_CFG_TPC_LOCK_2 0x400BC64
+
+#define mmDCORE0_TPC0_CFG_TPC_LOCK_3 0x400BC68
+
+#define mmDCORE0_TPC0_CFG_CGU_SB 0x400BC6C
+
+#define mmDCORE0_TPC0_CFG_CGU_CNT 0x400BC70
+
+#define mmDCORE0_TPC0_CFG_CGU_CPE_0 0x400BC74
+
+#define mmDCORE0_TPC0_CFG_CGU_CPE_1 0x400BC78
+
+#define mmDCORE0_TPC0_CFG_CGU_CPE_2 0x400BC7C
+
+#define mmDCORE0_TPC0_CFG_CGU_CPE_3 0x400BC80
+
+#define mmDCORE0_TPC0_CFG_CGU_CPE_4 0x400BC84
+
+#define mmDCORE0_TPC0_CFG_CGU_CPE_5 0x400BC88
+
+#define mmDCORE0_TPC0_CFG_CGU_CPE_6 0x400BC8C
+
+#define mmDCORE0_TPC0_CFG_CGU_CPE_7 0x400BC90
+
+#define mmDCORE0_TPC0_CFG_FP16_FTZ_IN 0x400BC94
+
+#define mmDCORE0_TPC0_CFG_DCACHE_CFG 0x400BC98
+
+#define mmDCORE0_TPC0_CFG_E2E_CRDT_TOP 0x400BC9C
+
+#define mmDCORE0_TPC0_CFG_TPC_DCACHE_L0CD 0x400BCA0
+
+#define mmDCORE0_TPC0_CFG_TPC_SB_L0CD 0x400BCA4
+
+#define mmDCORE0_TPC0_CFG_CONV_ROUND_CSR 0x400BCA8
+
+#define mmDCORE0_TPC0_CFG_TSB_OCCUPANCY 0x400BCAC
+
+#define mmDCORE0_TPC0_CFG_ARB_QNT_HBW_WEIGHT 0x400BCB0
+
+#define mmDCORE0_TPC0_CFG_ARB_QNT_LBW_WEIGHT 0x400BCB4
+
+#define mmDCORE0_TPC0_CFG_ARB_CNT_HBW_WEIGHT 0x400BCB8
+
+#define mmDCORE0_TPC0_CFG_ARB_CNT_LBW_WEIGHT 0x400BCBC
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_LO 0x400BCC0
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_HI 0x400BCC4
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_LO 0x400BCC8
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_HI 0x400BCCC
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_LO 0x400BCD0
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_HI 0x400BCD4
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_LO 0x400BCD8
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_HI 0x400BCDC
+
+#define mmDCORE0_TPC0_CFG_SPE_LFSR_POLYNOM 0x400BCE0
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_GLBL 0x400BCE4
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_0 0x400BCE8
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_1 0x400BCEC
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2 0x400BCF0
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_3 0x400BCF4
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_LO_0 0x400BCF8
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_LO_1 0x400BCFC
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_LO_2 0x400BD00
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_LO_3 0x400BD04
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_HI_0 0x400BD08
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_HI_1 0x400BD0C
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_HI_2 0x400BD10
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_MASK_HI_3 0x400BD14
+
+#define mmDCORE0_TPC0_CFG_FP8_143_BIAS 0x400BD64
+
+#define mmDCORE0_TPC0_CFG_ROUND_CSR 0x400BD68
+
+#define mmDCORE0_TPC0_CFG_HB_PROT 0x400BD6C
+
+#define mmDCORE0_TPC0_CFG_LB_PROT 0x400BD70
+
+#define mmDCORE0_TPC0_CFG_SEMAPHORE 0x400BD74
+
+#define mmDCORE0_TPC0_CFG_VFLAGS 0x400BD78
+
+#define mmDCORE0_TPC0_CFG_SFLAGS 0x400BD7C
+
+#define mmDCORE0_TPC0_CFG_LFSR_POLYNOM 0x400BD80
+
+#define mmDCORE0_TPC0_CFG_STATUS 0x400BD84
+
+#define mmDCORE0_TPC0_CFG_CFG_BASE_ADDRESS_HIGH 0x400BD88
+
+#define mmDCORE0_TPC0_CFG_CFG_SUBTRACT_VALUE 0x400BD8C
+
+#define mmDCORE0_TPC0_CFG_SM_BASE_ADDRESS_HIGH 0x400BD90
+
+#define mmDCORE0_TPC0_CFG_TPC_CMD 0x400BD94
+
+#define mmDCORE0_TPC0_CFG_TPC_EXECUTE 0x400BD98
+
+#define mmDCORE0_TPC0_CFG_TPC_STALL 0x400BD9C
+
+#define mmDCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_LOW 0x400BDA0
+
+#define mmDCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH 0x400BDA4
+
+#define mmDCORE0_TPC0_CFG_RD_RATE_LIMIT 0x400BDA8
+
+#define mmDCORE0_TPC0_CFG_WR_RATE_LIMIT 0x400BDAC
+
+#define mmDCORE0_TPC0_CFG_MSS_CONFIG 0x400BDB0
+
+#define mmDCORE0_TPC0_CFG_TPC_INTR_CAUSE 0x400BDB4
+
+#define mmDCORE0_TPC0_CFG_TPC_INTR_MASK 0x400BDB8
+
+#define mmDCORE0_TPC0_CFG_WQ_CREDITS 0x400BDBC
+
+#define mmDCORE0_TPC0_CFG_OPCODE_EXEC 0x400BDC0
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO 0x400BDC4
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI 0x400BDC8
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO 0x400BDCC
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI 0x400BDD0
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO 0x400BDD4
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI 0x400BDD8
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO 0x400BDDC
+
+#define mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI 0x400BDE0
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG_MAX_SIZE 0x400BDE4
+
+#define mmDCORE0_TPC0_CFG_TSB_CFG 0x400BDE8
+
+#define mmDCORE0_TPC0_CFG_TSB_INFLIGHT_CNTR 0x400BDEC
+
+#define mmDCORE0_TPC0_CFG_WQ_INFLIGHT_CNTR 0x400BDF0
+
+#define mmDCORE0_TPC0_CFG_WQ_LBW_TOTAL_CNTR 0x400BDF4
+
+#define mmDCORE0_TPC0_CFG_WQ_HBW_TOTAL_CNTR 0x400BDF8
+
+#define mmDCORE0_TPC0_CFG_IRQ_OCCOUPY_CNTR 0x400BDFC
+
+#endif /* ASIC_REG_DCORE0_TPC0_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h
new file mode 100644
index 000000000000..76ab8a1a7f31
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_CFG_SPECIAL_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_CFG_SPECIAL_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_CFG_SPECIAL
+ * (Prototype: SPECIAL_REGS)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_0 0x400BE80
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_1 0x400BE84
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_2 0x400BE88
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_3 0x400BE8C
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_4 0x400BE90
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_5 0x400BE94
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_6 0x400BE98
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_7 0x400BE9C
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_8 0x400BEA0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_9 0x400BEA4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_10 0x400BEA8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_11 0x400BEAC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_12 0x400BEB0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_13 0x400BEB4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_14 0x400BEB8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_15 0x400BEBC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_16 0x400BEC0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_17 0x400BEC4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_18 0x400BEC8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_19 0x400BECC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_20 0x400BED0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_21 0x400BED4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_22 0x400BED8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_23 0x400BEDC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_24 0x400BEE0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_25 0x400BEE4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_26 0x400BEE8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_27 0x400BEEC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_28 0x400BEF0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_29 0x400BEF4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_30 0x400BEF8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_PRIV_31 0x400BEFC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_GW_DATA 0x400BF00
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_GW_REQ 0x400BF04
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_NUMOF 0x400BF0C
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_ECC_SEL 0x400BF10
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_ECC_CTL 0x400BF14
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_ECC_ERR_MASK 0x400BF18
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_ECC_GLBL_ERR_MASK 0x400BF1C
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_ECC_ERR_STS 0x400BF20
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_ECC_ERR_ADDR 0x400BF24
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_MEM_RM 0x400BF28
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_ERR_MASK 0x400BF40
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_ERR_ADDR 0x400BF44
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_ERR_CAUSE 0x400BF48
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_0 0x400BF60
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_1 0x400BF64
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_2 0x400BF68
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_3 0x400BF6C
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_0 0x400BF80
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_1 0x400BF84
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_2 0x400BF88
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_3 0x400BF8C
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_4 0x400BF90
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_5 0x400BF94
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_6 0x400BF98
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_7 0x400BF9C
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_8 0x400BFA0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_9 0x400BFA4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_10 0x400BFA8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_11 0x400BFAC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_12 0x400BFB0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_13 0x400BFB4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_14 0x400BFB8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_15 0x400BFBC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_16 0x400BFC0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_17 0x400BFC4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_18 0x400BFC8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_19 0x400BFCC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_20 0x400BFD0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_21 0x400BFD4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_22 0x400BFD8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_23 0x400BFDC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_24 0x400BFE0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_25 0x400BFE4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_26 0x400BFE8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_27 0x400BFEC
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_28 0x400BFF0
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_29 0x400BFF4
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_30 0x400BFF8
+
+#define mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SEC_31 0x400BFFC
+
+#endif /* ASIC_REG_DCORE0_TPC0_CFG_SPECIAL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h
new file mode 100644
index 000000000000..f07da4a24f06
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_EML_BUSMON_0_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_EML_BUSMON_0_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_EML_BUSMON_0
+ * (Prototype: BMON)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_CR 0x7000
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_REG_RESET 0x7004
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_INT_CLR 0x7008
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_TRIG_TH 0x700C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_S0 0x7020
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_S0 0x7024
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_E0 0x7028
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_E0 0x702C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_S1 0x7030
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_S1 0x7034
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_E1 0x7038
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_E1 0x703C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_S2 0x7040
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_S2 0x7044
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_E2 0x7048
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_E2 0x704C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_S3 0x7050
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_S3 0x7054
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_E3 0x7058
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_E3 0x705C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_REDUCTION 0x7060
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_IDL 0x7070
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_IDH 0x7074
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_IDENL 0x7078
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_IDENH 0x707C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_LATENCY_SMP 0x7090
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ATTR 0x7100
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ATTREN 0x7104
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_USRENL 0x7108
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_USRL 0x710C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_USRENH 0x7120
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_USRH 0x7124
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_CAPTURE 0x7200
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_RELEASE 0x7204
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_WIN_CAPTURE 0x7208
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_BW_WIN 0x720C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_MATCH_CNT_SOD 0x7220
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_MATCH_CNT_WIN 0x7224
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_CYCCNT_L 0x7228
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_CYCCNT_H 0x722C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_MAXLAT_SOD 0x7304
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_MINLAT_SOD 0x7308
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_MAXBW_SOD 0x7310
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_MINBW_SOD 0x7314
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_MAXOS_SOD 0x7320
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_MINOS_SOD 0x7324
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRL_SNAPSHOT 0x7400
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ADDRH_SNAPSHOT 0x7404
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_IDL_SNAPSHOT 0x7408
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_IDH_SNAPSHOT 0x740C
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_ATTR_SNAPSHOT 0x7410
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_STM_TRC 0x7420
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_STM_TRC_DROP 0x7424
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_DEVARCH 0x7FBC
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PMDEVID2 0x7FC0
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PMDEVID1 0x7FC4
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PMDEVID 0x7FC8
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_DEVTYPE 0x7FCC
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PIDR4 0x7FD0
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PIDR5 0x7FD4
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PIDR6 0x7FD8
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PIDR7 0x7FDC
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PIDR0 0x7FE0
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PIDR1 0x7FE4
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PIDR2 0x7FE8
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_PIDR3 0x7FEC
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_CIDR0 0x7FF0
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_CIDR1 0x7FF4
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_CIDR2 0x7FF8
+
+#define mmDCORE0_TPC0_EML_BUSMON_0_CIDR3 0x7FFC
+
+#endif /* ASIC_REG_DCORE0_TPC0_EML_BUSMON_0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h
new file mode 100644
index 000000000000..aee9cbc78c3d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_EML_ETF_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_EML_ETF_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_EML_ETF
+ * (Prototype: ETF_1KB)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_EML_ETF_RSZ 0x2004
+
+#define mmDCORE0_TPC0_EML_ETF_STS 0x200C
+
+#define mmDCORE0_TPC0_EML_ETF_RRD 0x2010
+
+#define mmDCORE0_TPC0_EML_ETF_RRP 0x2014
+
+#define mmDCORE0_TPC0_EML_ETF_RWP 0x2018
+
+#define mmDCORE0_TPC0_EML_ETF_TRG 0x201C
+
+#define mmDCORE0_TPC0_EML_ETF_CTL 0x2020
+
+#define mmDCORE0_TPC0_EML_ETF_RWD 0x2024
+
+#define mmDCORE0_TPC0_EML_ETF_MODE 0x2028
+
+#define mmDCORE0_TPC0_EML_ETF_LBUFLEVEL 0x202C
+
+#define mmDCORE0_TPC0_EML_ETF_CBUFLEVEL 0x2030
+
+#define mmDCORE0_TPC0_EML_ETF_BUFWM 0x2034
+
+#define mmDCORE0_TPC0_EML_ETF_FFSR 0x2300
+
+#define mmDCORE0_TPC0_EML_ETF_FFCR 0x2304
+
+#define mmDCORE0_TPC0_EML_ETF_PSCR 0x2308
+
+#define mmDCORE0_TPC0_EML_ETF_ITATBMDATA0 0x2ED0
+
+#define mmDCORE0_TPC0_EML_ETF_ITATBMCTR2 0x2ED4
+
+#define mmDCORE0_TPC0_EML_ETF_ITATBMCTR1 0x2ED8
+
+#define mmDCORE0_TPC0_EML_ETF_ITATBMCTR0 0x2EDC
+
+#define mmDCORE0_TPC0_EML_ETF_ITMISCOP0 0x2EE0
+
+#define mmDCORE0_TPC0_EML_ETF_ITTRFLIN 0x2EE8
+
+#define mmDCORE0_TPC0_EML_ETF_ITATBDATA0 0x2EEC
+
+#define mmDCORE0_TPC0_EML_ETF_ITATBCTR2 0x2EF0
+
+#define mmDCORE0_TPC0_EML_ETF_ITATBCTR1 0x2EF4
+
+#define mmDCORE0_TPC0_EML_ETF_ITATBCTR0 0x2EF8
+
+#define mmDCORE0_TPC0_EML_ETF_ITCTRL 0x2F00
+
+#define mmDCORE0_TPC0_EML_ETF_CLAIMSET 0x2FA0
+
+#define mmDCORE0_TPC0_EML_ETF_CLAIMCLR 0x2FA4
+
+#define mmDCORE0_TPC0_EML_ETF_LAR 0x2FB0
+
+#define mmDCORE0_TPC0_EML_ETF_LSR 0x2FB4
+
+#define mmDCORE0_TPC0_EML_ETF_AUTHSTATUS 0x2FB8
+
+#define mmDCORE0_TPC0_EML_ETF_DEVID 0x2FC8
+
+#define mmDCORE0_TPC0_EML_ETF_DEVTYPE 0x2FCC
+
+#define mmDCORE0_TPC0_EML_ETF_PERIPHID4 0x2FD0
+
+#define mmDCORE0_TPC0_EML_ETF_PERIPHID5 0x2FD4
+
+#define mmDCORE0_TPC0_EML_ETF_PERIPHID6 0x2FD8
+
+#define mmDCORE0_TPC0_EML_ETF_PERIPHID7 0x2FDC
+
+#define mmDCORE0_TPC0_EML_ETF_PERIPHID0 0x2FE0
+
+#define mmDCORE0_TPC0_EML_ETF_PERIPHID1 0x2FE4
+
+#define mmDCORE0_TPC0_EML_ETF_PERIPHID2 0x2FE8
+
+#define mmDCORE0_TPC0_EML_ETF_PERIPHID3 0x2FEC
+
+#define mmDCORE0_TPC0_EML_ETF_COMPID0 0x2FF0
+
+#define mmDCORE0_TPC0_EML_ETF_COMPID1 0x2FF4
+
+#define mmDCORE0_TPC0_EML_ETF_COMPID2 0x2FF8
+
+#define mmDCORE0_TPC0_EML_ETF_COMPID3 0x2FFC
+
+#endif /* ASIC_REG_DCORE0_TPC0_EML_ETF_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h
new file mode 100644
index 000000000000..dee670b666ee
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_EML_FUNNEL_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_EML_FUNNEL_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_EML_FUNNEL
+ * (Prototype: FUNNEL_2X1)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_EML_FUNNEL_CTRL_REG 0x6000
+
+#define mmDCORE0_TPC0_EML_FUNNEL_PRIORITY_CTRL_REG 0x6004
+
+#define mmDCORE0_TPC0_EML_FUNNEL_ITATBDATA0 0x6EEC
+
+#define mmDCORE0_TPC0_EML_FUNNEL_ITATBCTR2 0x6EF0
+
+#define mmDCORE0_TPC0_EML_FUNNEL_ITATBCTR1 0x6EF4
+
+#define mmDCORE0_TPC0_EML_FUNNEL_ITATBCTR0 0x6EF8
+
+#define mmDCORE0_TPC0_EML_FUNNEL_ITCTRL 0x6F00
+
+#define mmDCORE0_TPC0_EML_FUNNEL_CLAIMSET 0x6FA0
+
+#define mmDCORE0_TPC0_EML_FUNNEL_CLAIMCLR 0x6FA4
+
+#define mmDCORE0_TPC0_EML_FUNNEL_LOCKACCESS 0x6FB0
+
+#define mmDCORE0_TPC0_EML_FUNNEL_LOCKSTATUS 0x6FB4
+
+#define mmDCORE0_TPC0_EML_FUNNEL_AUTHSTATUS 0x6FB8
+
+#define mmDCORE0_TPC0_EML_FUNNEL_DEVID 0x6FC8
+
+#define mmDCORE0_TPC0_EML_FUNNEL_DEVTYPE 0x6FCC
+
+#define mmDCORE0_TPC0_EML_FUNNEL_PIDR4 0x6FD0
+
+#define mmDCORE0_TPC0_EML_FUNNEL_PERIPHID5 0x6FD4
+
+#define mmDCORE0_TPC0_EML_FUNNEL_PERIPHID6 0x6FD8
+
+#define mmDCORE0_TPC0_EML_FUNNEL_PERIPHID7 0x6FDC
+
+#define mmDCORE0_TPC0_EML_FUNNEL_PIDR0 0x6FE0
+
+#define mmDCORE0_TPC0_EML_FUNNEL_PIDR1 0x6FE4
+
+#define mmDCORE0_TPC0_EML_FUNNEL_PIDR2 0x6FE8
+
+#define mmDCORE0_TPC0_EML_FUNNEL_PIDR3 0x6FEC
+
+#define mmDCORE0_TPC0_EML_FUNNEL_CID0 0x6FF0
+
+#define mmDCORE0_TPC0_EML_FUNNEL_CID1 0x6FF4
+
+#define mmDCORE0_TPC0_EML_FUNNEL_CID2 0x6FF8
+
+#define mmDCORE0_TPC0_EML_FUNNEL_CID3 0x6FFC
+
+#endif /* ASIC_REG_DCORE0_TPC0_EML_FUNNEL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h
new file mode 100644
index 000000000000..580ae57476bd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_EML_SPMU_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_EML_SPMU_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_EML_SPMU
+ * (Prototype: SPMU)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTR0_EL0 0x1000
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTR1_EL0 0x1008
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTR2_EL0 0x1010
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTR3_EL0 0x1018
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTR4_EL0 0x1020
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTR5_EL0 0x1028
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCCNTR_L_EL0 0x10F8
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCCNTR_H_EL0 0x10FC
+
+#define mmDCORE0_TPC0_EML_SPMU_PMTRC 0x1200
+
+#define mmDCORE0_TPC0_EML_SPMU_TRC_CTRL_HOST 0x1204
+
+#define mmDCORE0_TPC0_EML_SPMU_TRC_STAT_HOST 0x1208
+
+#define mmDCORE0_TPC0_EML_SPMU_TRC_EN_HOST 0x120C
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVTYPER0_EL0 0x1400
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVTYPER1_EL0 0x1404
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVTYPER2_EL0 0x1408
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVTYPER3_EL0 0x140C
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVTYPER4_EL0 0x1410
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVTYPER5_EL0 0x1414
+
+#define mmDCORE0_TPC0_EML_SPMU_PMSSR 0x1610
+
+#define mmDCORE0_TPC0_EML_SPMU_PMOVSSR 0x1614
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCCNTSR_L 0x1618
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCCNTSR_H 0x161C
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR0 0x1620
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR1 0x1624
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR2 0x1628
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR3 0x162C
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR4 0x1630
+
+#define mmDCORE0_TPC0_EML_SPMU_PMEVCNTSR5 0x1634
+
+#define mmDCORE0_TPC0_EML_SPMU_PMSCR 0x16F0
+
+#define mmDCORE0_TPC0_EML_SPMU_PMSRR 0x16F4
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCNTENSET_EL0 0x1C00
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCNTENCLR_EL0 0x1C20
+
+#define mmDCORE0_TPC0_EML_SPMU_PMINTENSET_EL1 0x1C40
+
+#define mmDCORE0_TPC0_EML_SPMU_PMINTENCLR_EL1 0x1C60
+
+#define mmDCORE0_TPC0_EML_SPMU_PMOVSCLR_EL0 0x1C80
+
+#define mmDCORE0_TPC0_EML_SPMU_PMSWINC_EL0 0x1CA0
+
+#define mmDCORE0_TPC0_EML_SPMU_PMOVSSET_EL0 0x1CC0
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCFGR 0x1E00
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCR_EL0 0x1E04
+
+#define mmDCORE0_TPC0_EML_SPMU_PMITCTRL 0x1F00
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCLAIMSET 0x1FA0
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCLAIMCLR 0x1FA4
+
+#define mmDCORE0_TPC0_EML_SPMU_PMDEVAFF0 0x1FA8
+
+#define mmDCORE0_TPC0_EML_SPMU_PMDEVAFF1 0x1FAC
+
+#define mmDCORE0_TPC0_EML_SPMU_PMLAR 0x1FB0
+
+#define mmDCORE0_TPC0_EML_SPMU_PMLSR 0x1FB4
+
+#define mmDCORE0_TPC0_EML_SPMU_PMAUTHSTATUS 0x1FB8
+
+#define mmDCORE0_TPC0_EML_SPMU_PMDEVARCH 0x1FBC
+
+#define mmDCORE0_TPC0_EML_SPMU_PMDEVID2 0x1FC0
+
+#define mmDCORE0_TPC0_EML_SPMU_PMDEVID1 0x1FC4
+
+#define mmDCORE0_TPC0_EML_SPMU_PMDEVID 0x1FC8
+
+#define mmDCORE0_TPC0_EML_SPMU_PMDEVTYPE 0x1FCC
+
+#define mmDCORE0_TPC0_EML_SPMU_PMPIDR4 0x1FD0
+
+#define mmDCORE0_TPC0_EML_SPMU_PMPIDR5 0x1FD4
+
+#define mmDCORE0_TPC0_EML_SPMU_PMPIDR6 0x1FD8
+
+#define mmDCORE0_TPC0_EML_SPMU_PMPIDR7 0x1FDC
+
+#define mmDCORE0_TPC0_EML_SPMU_PMPIDR0 0x1FE0
+
+#define mmDCORE0_TPC0_EML_SPMU_PMPIDR1 0x1FE4
+
+#define mmDCORE0_TPC0_EML_SPMU_PMPIDR2 0x1FE8
+
+#define mmDCORE0_TPC0_EML_SPMU_PMPIDR3 0x1FEC
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCIDR0 0x1FF0
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCIDR1 0x1FF4
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCIDR2 0x1FF8
+
+#define mmDCORE0_TPC0_EML_SPMU_PMCIDR3 0x1FFC
+
+#endif /* ASIC_REG_DCORE0_TPC0_EML_SPMU_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h
new file mode 100644
index 000000000000..91686c563fe5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_EML_STM_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_EML_STM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_EML_STM
+ * (Prototype: STM)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_EML_STM_STMDMASTARTR 0x3C04
+
+#define mmDCORE0_TPC0_EML_STM_STMDMASTOPR 0x3C08
+
+#define mmDCORE0_TPC0_EML_STM_STMDMASTATR 0x3C0C
+
+#define mmDCORE0_TPC0_EML_STM_STMDMACTLR 0x3C10
+
+#define mmDCORE0_TPC0_EML_STM_STMDMAIDR 0x3CFC
+
+#define mmDCORE0_TPC0_EML_STM_STMHEER 0x3D00
+
+#define mmDCORE0_TPC0_EML_STM_STMHETER 0x3D20
+
+#define mmDCORE0_TPC0_EML_STM_STMHEBSR 0x3D60
+
+#define mmDCORE0_TPC0_EML_STM_STMHEMCR 0x3D64
+
+#define mmDCORE0_TPC0_EML_STM_STMHEEXTMUXR 0x3D68
+
+#define mmDCORE0_TPC0_EML_STM_STMHEMASTR 0x3DF4
+
+#define mmDCORE0_TPC0_EML_STM_STMHEFEAT1R 0x3DF8
+
+#define mmDCORE0_TPC0_EML_STM_STMHEIDR 0x3DFC
+
+#define mmDCORE0_TPC0_EML_STM_STMSPER 0x3E00
+
+#define mmDCORE0_TPC0_EML_STM_STMSPTER 0x3E20
+
+#define mmDCORE0_TPC0_EML_STM_STMSPSCR 0x3E60
+
+#define mmDCORE0_TPC0_EML_STM_STMSPMSCR 0x3E64
+
+#define mmDCORE0_TPC0_EML_STM_STMSPOVERRIDER 0x3E68
+
+#define mmDCORE0_TPC0_EML_STM_STMSPMOVERRIDER 0x3E6C
+
+#define mmDCORE0_TPC0_EML_STM_STMSPTRIGCSR 0x3E70
+
+#define mmDCORE0_TPC0_EML_STM_STMTCSR 0x3E80
+
+#define mmDCORE0_TPC0_EML_STM_STMTSSTIMR 0x3E84
+
+#define mmDCORE0_TPC0_EML_STM_STMTSFREQR 0x3E8C
+
+#define mmDCORE0_TPC0_EML_STM_STMSYNCR 0x3E90
+
+#define mmDCORE0_TPC0_EML_STM_STMAUXCR 0x3E94
+
+#define mmDCORE0_TPC0_EML_STM_STMFEAT1R 0x3EA0
+
+#define mmDCORE0_TPC0_EML_STM_STMFEAT2R 0x3EA4
+
+#define mmDCORE0_TPC0_EML_STM_STMFEAT3R 0x3EA8
+
+#define mmDCORE0_TPC0_EML_STM_STMITTRIGGER 0x3EE8
+
+#define mmDCORE0_TPC0_EML_STM_STMITATBDATA0 0x3EEC
+
+#define mmDCORE0_TPC0_EML_STM_STMITATBCTR2 0x3EF0
+
+#define mmDCORE0_TPC0_EML_STM_STMITATBID 0x3EF4
+
+#define mmDCORE0_TPC0_EML_STM_STMITATBCTR0 0x3EF8
+
+#define mmDCORE0_TPC0_EML_STM_STMITCTRL 0x3F00
+
+#define mmDCORE0_TPC0_EML_STM_STMCLAIMSET 0x3FA0
+
+#define mmDCORE0_TPC0_EML_STM_STMCLAIMCLR 0x3FA4
+
+#define mmDCORE0_TPC0_EML_STM_STMLAR 0x3FB0
+
+#define mmDCORE0_TPC0_EML_STM_STMLSR 0x3FB4
+
+#define mmDCORE0_TPC0_EML_STM_STMAUTHSTATUS 0x3FB8
+
+#define mmDCORE0_TPC0_EML_STM_STMDEVARCH 0x3FBC
+
+#define mmDCORE0_TPC0_EML_STM_STMDEVID 0x3FC8
+
+#define mmDCORE0_TPC0_EML_STM_STMDEVTYPE 0x3FCC
+
+#define mmDCORE0_TPC0_EML_STM_STMPIDR4 0x3FD0
+
+#define mmDCORE0_TPC0_EML_STM_STMPIDR5 0x3FD4
+
+#define mmDCORE0_TPC0_EML_STM_STMPIDR6 0x3FD8
+
+#define mmDCORE0_TPC0_EML_STM_STMPIDR7 0x3FDC
+
+#define mmDCORE0_TPC0_EML_STM_STMPIDR0 0x3FE0
+
+#define mmDCORE0_TPC0_EML_STM_STMPIDR1 0x3FE4
+
+#define mmDCORE0_TPC0_EML_STM_STMPIDR2 0x3FE8
+
+#define mmDCORE0_TPC0_EML_STM_STMPIDR3 0x3FEC
+
+#define mmDCORE0_TPC0_EML_STM_STMCIDR0 0x3FF0
+
+#define mmDCORE0_TPC0_EML_STM_STMCIDR1 0x3FF4
+
+#define mmDCORE0_TPC0_EML_STM_STMCIDR2 0x3FF8
+
+#define mmDCORE0_TPC0_EML_STM_STMCIDR3 0x3FFC
+
+#endif /* ASIC_REG_DCORE0_TPC0_EML_STM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h
new file mode 100644
index 000000000000..e007dabc5382
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_QM_ARC_AUX_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_QM_ARC_AUX_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_QM_ARC_AUX
+ * (Prototype: QMAN_ARC_AUX)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_RUN_HALT_REQ 0x4008100
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_RUN_HALT_ACK 0x4008104
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_RST_VEC_ADDR 0x4008108
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DBG_MODE 0x400810C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CLUSTER_NUM 0x4008110
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_NUM 0x4008114
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_WAKE_UP_EVENT 0x4008118
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_SYS_ADDR_BASE 0x400811C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CTI_AP_STS 0x4008120
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CTI_CFG_MUX_SEL 0x4008124
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_RST 0x4008128
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_RST_REQ 0x400812C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SRAM_LSB_ADDR 0x4008130
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SRAM_MSB_ADDR 0x4008134
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_PCIE_LSB_ADDR 0x4008138
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_PCIE_MSB_ADDR 0x400813C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_LSB_ADDR 0x4008140
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_MSB_ADDR 0x4008144
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM0_LSB_ADDR 0x4008150
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM0_MSB_ADDR 0x4008154
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM1_LSB_ADDR 0x4008158
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM1_MSB_ADDR 0x400815C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM2_LSB_ADDR 0x4008160
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM2_MSB_ADDR 0x4008164
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM3_LSB_ADDR 0x4008168
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM3_MSB_ADDR 0x400816C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM0_OFFSET 0x4008170
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM1_OFFSET 0x4008174
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM2_OFFSET 0x4008178
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_HBM3_OFFSET 0x400817C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_0 0x4008180
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_1 0x4008184
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_2 0x4008188
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_3 0x400818C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_4 0x4008190
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_5 0x4008194
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_6 0x4008198
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_0 0x400819C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_1 0x40081A0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_2 0x40081A4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_3 0x40081A8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_4 0x40081AC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_5 0x40081B0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_6 0x40081B4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_CBU_AWCACHE_OVR 0x40081B8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_LBU_AWCACHE_OVR 0x40081BC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CONTEXT_ID_0 0x40081C0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CONTEXT_ID_1 0x40081C4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CONTEXT_ID_2 0x40081C8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CONTEXT_ID_3 0x40081CC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CONTEXT_ID_4 0x40081D0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CONTEXT_ID_5 0x40081D4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CONTEXT_ID_6 0x40081D8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CONTEXT_ID_7 0x40081DC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_0 0x40081E0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_1 0x40081E4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_2 0x40081E8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_3 0x40081EC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_4 0x40081F0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_5 0x40081F4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_6 0x40081F8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_7 0x40081FC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_0 0x4008200
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_1 0x4008204
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_2 0x4008208
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_3 0x400820C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_4 0x4008210
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_5 0x4008214
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_6 0x4008218
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_7 0x400821C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_8 0x4008220
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_9 0x4008224
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_10 0x4008228
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_11 0x400822C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_12 0x4008230
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_13 0x4008234
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_14 0x4008238
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SW_INTR_15 0x400823C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_IRQ_INTR_MASK_0 0x4008280
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_IRQ_INTR_MASK_1 0x4008284
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_SEI_INTR_STS 0x4008290
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_SEI_INTR_CLR 0x4008294
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_SEI_INTR_MASK 0x4008298
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_EXCPTN_CAUSE 0x400829C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SEI_INTR_HALT_EN 0x40082A0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_SEI_INTR_HALT_MASK 0x40082A4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_QMAN_SEI_INTR_HALT_MASK 0x40082A8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REI_INTR_STS 0x40082B0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REI_INTR_CLR 0x40082B4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REI_INTR_MASK 0x40082B8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_ECC_ERR_ADDR 0x40082BC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_ECC_SYNDROME 0x40082C0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_I_CACHE_ECC_ERR_ADDR 0x40082C4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_I_CACHE_ECC_SYNDROME 0x40082C8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_D_CACHE_ECC_ERR_ADDR 0x40082CC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_D_CACHE_ECC_SYNDROME 0x40082D0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBW_TRMINATE_AWADDR_ERR 0x40082E0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBW_TRMINATE_ARADDR_ERR 0x40082E4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_LBW_TERMINATE_BRESP 0x40082E8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_LBW_TERMINATE_RRESP 0x40082EC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_LBW_TERMINATE_AXLEN 0x40082F0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_LBW_TERMINATE_AXSIZE 0x40082F4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_0 0x4008300
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_1 0x4008304
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_2 0x4008308
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_3 0x400830C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_4 0x4008310
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_5 0x4008314
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_6 0x4008318
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_7 0x400831C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_TOTAL_CBU_WR_CNT 0x4008320
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_INFLIGHT_CBU_WR_CNT 0x4008324
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_TOTAL_CBU_RD_CNT 0x4008328
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_INFLIGHT_CBU_RD_CNT 0x400832C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_TOTAL_LBU_WR_CNT 0x4008330
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_INFLIGHT_LBU_WR_CNT 0x4008334
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_TOTAL_LBU_RD_CNT 0x4008338
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT 0x400833C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_ARUSER_OVR 0x4008350
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_ARUSER_OVR_EN 0x4008354
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_AWUSER_OVR 0x4008358
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_AWUSER_OVR_EN 0x400835C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_ARUSER_MSB_OVR 0x4008360
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_ARUSER_MSB_OVR_EN 0x4008364
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_AWUSER_MSB_OVR 0x4008368
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_AWUSER_MSB_OVR_EN 0x400836C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_AXCACHE_OVR 0x4008370
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_LOCK_OVR 0x4008374
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_PROT_OVR 0x4008378
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_MAX_OUTSTANDING 0x400837C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_EARLY_BRESP_EN 0x4008380
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORCE_RSP_OK 0x4008384
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_NO_WR_INFLIGHT 0x400838C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_SEI_INTR_ID 0x4008390
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_ARUSER_OVR 0x4008400
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_ARUSER_OVR_EN 0x4008404
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_AWUSER_OVR 0x4008408
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_AWUSER_OVR_EN 0x400840C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_AXCACHE_OVR 0x4008420
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_LOCK_OVR 0x4008424
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_PROT_OVR 0x4008428
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_MAX_OUTSTANDING 0x400842C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_EARLY_BRESP_EN 0x4008430
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_FORCE_RSP_OK 0x4008434
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_NO_WR_INFLIGHT 0x400843C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBU_SEI_INTR_ID 0x4008440
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0 0x4008500
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_1 0x4008504
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_2 0x4008508
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_3 0x400850C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_4 0x4008510
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_5 0x4008514
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_6 0x4008518
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_7 0x400851C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_SIZE_0 0x4008520
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_SIZE_1 0x4008524
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_SIZE_2 0x4008528
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_SIZE_3 0x400852C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_SIZE_4 0x4008530
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_SIZE_5 0x4008534
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_SIZE_6 0x4008538
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_SIZE_7 0x400853C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PI_0 0x4008540
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PI_1 0x4008544
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PI_2 0x4008548
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PI_3 0x400854C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PI_4 0x4008550
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PI_5 0x4008554
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PI_6 0x4008558
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PI_7 0x400855C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_CI_0 0x4008560
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_CI_1 0x4008564
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_CI_2 0x4008568
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_CI_3 0x400856C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_CI_4 0x4008570
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_CI_5 0x4008574
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_CI_6 0x4008578
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_CI_7 0x400857C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_0 0x4008580
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_1 0x4008584
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_2 0x4008588
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_3 0x400858C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_4 0x4008590
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_5 0x4008594
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_6 0x4008598
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_7 0x400859C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_0 0x40085A0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_1 0x40085A4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_2 0x40085A8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_3 0x40085AC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_4 0x40085B0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_5 0x40085B4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_6 0x40085B8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_7 0x40085BC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_0 0x40085C0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_1 0x40085C4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_2 0x40085C8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_3 0x40085CC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_4 0x40085D0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_5 0x40085D4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_6 0x40085D8
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_7 0x40085DC
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_GENERAL_Q_VLD_ENTRY_MASK 0x40085E0
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_NIC_Q_VLD_ENTRY_MASK 0x40085E4
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_DROP_EN 0x4008620
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_WARN_MSG 0x4008624
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG 0x4008628
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_GEN_AXI_AWPROT 0x4008630
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_GEN_AXI_AWUSER 0x4008634
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_GEN_AXI_AWBURST 0x4008638
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_GEN_AXI_AWLOCK 0x400863C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_GEN_AXI_AWCACHE 0x4008640
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_WRR_ARB_WEIGHT 0x4008644
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG 0x4008648
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT 0x400864C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_QMAN_CQ_IFIFO_SHADOW_CI 0x4008650
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI 0x4008654
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_QMAN_CQ_SHADOW_CI 0x4008658
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI 0x400865C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_AUX2APB_PROT 0x4008700
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBW_FORK_WIN_EN 0x4008704
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR0 0x4008708
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK0 0x400870C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR1 0x4008710
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK1 0x4008714
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR0 0x4008718
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK0 0x400871C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR1 0x4008720
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK1 0x4008724
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR0 0x4008728
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR1 0x400872C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_ACC_ENGS_LBW_FORK_MASK 0x4008730
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_DUP_ENG_LBW_FORK_ADDR 0x4008734
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_ACP_ENG_LBW_FORK_ADDR 0x4008738
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR 0x400873C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_WIN_EN 0x4008740
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_LSB 0x4008750
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_MSB 0x4008754
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_LSB 0x4008758
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_MSB 0x400875C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_LSB 0x4008760
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_MSB 0x4008764
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_LSB 0x4008768
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_MSB 0x400876C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_LSB 0x4008770
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_MSB 0x4008774
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_LSB 0x4008778
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_MSB 0x400877C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_LSB 0x4008780
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_MSB 0x4008784
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_LSB 0x4008788
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_MSB 0x400878C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_TRMINATE_ARADDR_LSB 0x4008790
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CBU_TRMINATE_ARADDR_MSB 0x4008794
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_CBU_TERMINATE_BRESP 0x4008798
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_CBU_TERMINATE_RRESP 0x400879C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_0 0x4008800
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_1 0x4008804
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_2 0x4008808
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_3 0x400880C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_4 0x4008810
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_5 0x4008814
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_6 0x4008818
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_7 0x400881C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_8 0x4008820
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_9 0x4008824
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_10 0x4008828
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_11 0x400882C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_12 0x4008830
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_13 0x4008834
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_14 0x4008838
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_REGION_CFG_15 0x400883C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_TRMINATE_AWADDR_ERR 0x4008840
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_DCCM_TRMINATE_ARADDR_ERR 0x4008844
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_DCCM_TERMINATE_BRESP 0x4008848
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_DCCM_TERMINATE_RRESP 0x400884C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_DCCM_TERMINATE_EN 0x4008850
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_CFG_DCCM_SECURE_REGION 0x4008854
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT 0x4008900
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_AXI_ORDERING_CTL 0x4008904
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR_MSK 0x4008908
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR 0x400890C
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_ARC_ACC_ENGS_BUSER 0x4008910
+
+#define mmDCORE0_TPC0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN 0x4008920
+
+#endif /* ASIC_REG_DCORE0_TPC0_QM_ARC_AUX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h
new file mode 100644
index 000000000000..149b85f5f045
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_QM_AXUSER_NONSECURED_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_QM_AXUSER_NONSECURED_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_QM_AXUSER_NONSECURED
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_ASID 0x400AB80
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_MMU_BP 0x400AB84
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_STRONG_ORDER 0x400AB88
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_NO_SNOOP 0x400AB8C
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_WR_REDUCTION 0x400AB90
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_RD_ATOMIC 0x400AB94
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_QOS 0x400AB98
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_RSVD 0x400AB9C
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_EMEM_CPAGE 0x400ABA0
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_CORE 0x400ABA4
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_E2E_COORD 0x400ABA8
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_WR_OVRD_LO 0x400ABB0
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_WR_OVRD_HI 0x400ABB4
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_RD_OVRD_LO 0x400ABB8
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_RD_OVRD_HI 0x400ABBC
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_LB_COORD 0x400ABC0
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_LB_LOCK 0x400ABC4
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_LB_RSVD 0x400ABC8
+
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_LB_OVRD 0x400ABCC
+
+#endif /* ASIC_REG_DCORE0_TPC0_QM_AXUSER_NONSECURED_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h
new file mode 100644
index 000000000000..d4aad1875ad6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_QM_CGM_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_QM_CGM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_QM_CGM
+ * (Prototype: QMAN_CGM)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_QM_CGM_CFG 0x400AD80
+
+#define mmDCORE0_TPC0_QM_CGM_STS 0x400AD84
+
+#define mmDCORE0_TPC0_QM_CGM_CFG1 0x400AD88
+
+#endif /* ASIC_REG_DCORE0_TPC0_QM_CGM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h
new file mode 100644
index 000000000000..cca8683cbca1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_TPC0_QM_REGS_H_
+#define ASIC_REG_DCORE0_TPC0_QM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_TPC0_QM
+ * (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDCORE0_TPC0_QM_GLBL_CFG0 0x400A000
+
+#define mmDCORE0_TPC0_QM_GLBL_CFG1 0x400A004
+
+#define mmDCORE0_TPC0_QM_GLBL_CFG2 0x400A008
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_CFG 0x400A00C
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_CFG1 0x400A010
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_ARC_HALT_EN 0x400A014
+
+#define mmDCORE0_TPC0_QM_GLBL_AXCACHE 0x400A018
+
+#define mmDCORE0_TPC0_QM_GLBL_STS0 0x400A01C
+
+#define mmDCORE0_TPC0_QM_GLBL_STS1 0x400A020
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_STS_0 0x400A024
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_STS_1 0x400A028
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_STS_2 0x400A02C
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_STS_3 0x400A030
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_STS_4 0x400A034
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_MSG_EN_0 0x400A038
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_MSG_EN_1 0x400A03C
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_MSG_EN_2 0x400A040
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_MSG_EN_3 0x400A044
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_MSG_EN_4 0x400A048
+
+#define mmDCORE0_TPC0_QM_GLBL_PROT 0x400A04C
+
+#define mmDCORE0_TPC0_QM_PQ_BASE_LO_0 0x400A050
+
+#define mmDCORE0_TPC0_QM_PQ_BASE_LO_1 0x400A054
+
+#define mmDCORE0_TPC0_QM_PQ_BASE_LO_2 0x400A058
+
+#define mmDCORE0_TPC0_QM_PQ_BASE_LO_3 0x400A05C
+
+#define mmDCORE0_TPC0_QM_PQ_BASE_HI_0 0x400A060
+
+#define mmDCORE0_TPC0_QM_PQ_BASE_HI_1 0x400A064
+
+#define mmDCORE0_TPC0_QM_PQ_BASE_HI_2 0x400A068
+
+#define mmDCORE0_TPC0_QM_PQ_BASE_HI_3 0x400A06C
+
+#define mmDCORE0_TPC0_QM_PQ_SIZE_0 0x400A070
+
+#define mmDCORE0_TPC0_QM_PQ_SIZE_1 0x400A074
+
+#define mmDCORE0_TPC0_QM_PQ_SIZE_2 0x400A078
+
+#define mmDCORE0_TPC0_QM_PQ_SIZE_3 0x400A07C
+
+#define mmDCORE0_TPC0_QM_PQ_PI_0 0x400A080
+
+#define mmDCORE0_TPC0_QM_PQ_PI_1 0x400A084
+
+#define mmDCORE0_TPC0_QM_PQ_PI_2 0x400A088
+
+#define mmDCORE0_TPC0_QM_PQ_PI_3 0x400A08C
+
+#define mmDCORE0_TPC0_QM_PQ_CI_0 0x400A090
+
+#define mmDCORE0_TPC0_QM_PQ_CI_1 0x400A094
+
+#define mmDCORE0_TPC0_QM_PQ_CI_2 0x400A098
+
+#define mmDCORE0_TPC0_QM_PQ_CI_3 0x400A09C
+
+#define mmDCORE0_TPC0_QM_PQ_CFG0_0 0x400A0A0
+
+#define mmDCORE0_TPC0_QM_PQ_CFG0_1 0x400A0A4
+
+#define mmDCORE0_TPC0_QM_PQ_CFG0_2 0x400A0A8
+
+#define mmDCORE0_TPC0_QM_PQ_CFG0_3 0x400A0AC
+
+#define mmDCORE0_TPC0_QM_PQ_CFG1_0 0x400A0B0
+
+#define mmDCORE0_TPC0_QM_PQ_CFG1_1 0x400A0B4
+
+#define mmDCORE0_TPC0_QM_PQ_CFG1_2 0x400A0B8
+
+#define mmDCORE0_TPC0_QM_PQ_CFG1_3 0x400A0BC
+
+#define mmDCORE0_TPC0_QM_PQ_STS0_0 0x400A0C0
+
+#define mmDCORE0_TPC0_QM_PQ_STS0_1 0x400A0C4
+
+#define mmDCORE0_TPC0_QM_PQ_STS0_2 0x400A0C8
+
+#define mmDCORE0_TPC0_QM_PQ_STS0_3 0x400A0CC
+
+#define mmDCORE0_TPC0_QM_PQ_STS1_0 0x400A0D0
+
+#define mmDCORE0_TPC0_QM_PQ_STS1_1 0x400A0D4
+
+#define mmDCORE0_TPC0_QM_PQ_STS1_2 0x400A0D8
+
+#define mmDCORE0_TPC0_QM_PQ_STS1_3 0x400A0DC
+
+#define mmDCORE0_TPC0_QM_CQ_CFG0_0 0x400A0E0
+
+#define mmDCORE0_TPC0_QM_CQ_CFG0_1 0x400A0E4
+
+#define mmDCORE0_TPC0_QM_CQ_CFG0_2 0x400A0E8
+
+#define mmDCORE0_TPC0_QM_CQ_CFG0_3 0x400A0EC
+
+#define mmDCORE0_TPC0_QM_CQ_CFG0_4 0x400A0F0
+
+#define mmDCORE0_TPC0_QM_CQ_STS0_0 0x400A0F4
+
+#define mmDCORE0_TPC0_QM_CQ_STS0_1 0x400A0F8
+
+#define mmDCORE0_TPC0_QM_CQ_STS0_2 0x400A0FC
+
+#define mmDCORE0_TPC0_QM_CQ_STS0_3 0x400A100
+
+#define mmDCORE0_TPC0_QM_CQ_STS0_4 0x400A104
+
+#define mmDCORE0_TPC0_QM_CQ_CFG1_0 0x400A108
+
+#define mmDCORE0_TPC0_QM_CQ_CFG1_1 0x400A10C
+
+#define mmDCORE0_TPC0_QM_CQ_CFG1_2 0x400A110
+
+#define mmDCORE0_TPC0_QM_CQ_CFG1_3 0x400A114
+
+#define mmDCORE0_TPC0_QM_CQ_CFG1_4 0x400A118
+
+#define mmDCORE0_TPC0_QM_CQ_STS1_0 0x400A11C
+
+#define mmDCORE0_TPC0_QM_CQ_STS1_1 0x400A120
+
+#define mmDCORE0_TPC0_QM_CQ_STS1_2 0x400A124
+
+#define mmDCORE0_TPC0_QM_CQ_STS1_3 0x400A128
+
+#define mmDCORE0_TPC0_QM_CQ_STS1_4 0x400A12C
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_0 0x400A150
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_0 0x400A154
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_0 0x400A158
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_0 0x400A15C
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_1 0x400A160
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_1 0x400A164
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_1 0x400A168
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_1 0x400A16C
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_2 0x400A170
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_2 0x400A174
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_2 0x400A178
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_2 0x400A17C
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_3 0x400A180
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_3 0x400A184
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_3 0x400A188
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_3 0x400A18C
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_4 0x400A190
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_4 0x400A194
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_4 0x400A198
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_4 0x400A19C
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_STS_0 0x400A1A0
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_STS_1 0x400A1A4
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_STS_2 0x400A1A8
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_STS_3 0x400A1AC
+
+#define mmDCORE0_TPC0_QM_CQ_TSIZE_STS_4 0x400A1B0
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_STS_0 0x400A1B4
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_STS_1 0x400A1B8
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_STS_2 0x400A1BC
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_STS_3 0x400A1C0
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_LO_STS_4 0x400A1C4
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_STS_0 0x400A1C8
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_STS_1 0x400A1CC
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_STS_2 0x400A1D0
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_STS_3 0x400A1D4
+
+#define mmDCORE0_TPC0_QM_CQ_PTR_HI_STS_4 0x400A1D8
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_STS_0 0x400A1DC
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_STS_1 0x400A1E0
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_STS_2 0x400A1E4
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_STS_3 0x400A1E8
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_STS_4 0x400A1EC
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_0 0x400A1F0
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_1 0x400A1F4
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_2 0x400A1F8
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_3 0x400A1FC
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_4 0x400A200
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_0 0x400A204
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_1 0x400A208
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_2 0x400A20C
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_3 0x400A210
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_4 0x400A214
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_0 0x400A218
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_1 0x400A21C
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_2 0x400A220
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_3 0x400A224
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_4 0x400A228
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_0 0x400A22C
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_1 0x400A230
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_2 0x400A234
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_3 0x400A238
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_4 0x400A23C
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_0 0x400A240
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_1 0x400A244
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_2 0x400A248
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_3 0x400A24C
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_4 0x400A250
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_0 0x400A254
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_1 0x400A258
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_2 0x400A25C
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_3 0x400A260
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_4 0x400A264
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_0 0x400A268
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_1 0x400A26C
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_2 0x400A270
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_3 0x400A274
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_4 0x400A278
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_0 0x400A27C
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_1 0x400A280
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_2 0x400A284
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_3 0x400A288
+
+#define mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_4 0x400A28C
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_0 0x400A290
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_1 0x400A294
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_2 0x400A298
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_3 0x400A29C
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_4 0x400A2A0
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_0 0x400A2A4
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_1 0x400A2A8
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_2 0x400A2AC
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_3 0x400A2B0
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_4 0x400A2B4
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_0 0x400A2B8
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_1 0x400A2BC
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_2 0x400A2C0
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_3 0x400A2C4
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_4 0x400A2C8
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_0 0x400A2CC
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_1 0x400A2D0
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_2 0x400A2D4
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_3 0x400A2D8
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_4 0x400A2DC
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_CNT_0 0x400A2E0
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_CNT_1 0x400A2E4
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_CNT_2 0x400A2E8
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_CNT_3 0x400A2EC
+
+#define mmDCORE0_TPC0_QM_CP_FENCE0_CNT_4 0x400A2F0
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_CNT_0 0x400A2F4
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_CNT_1 0x400A2F8
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_CNT_2 0x400A2FC
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_CNT_3 0x400A300
+
+#define mmDCORE0_TPC0_QM_CP_FENCE1_CNT_4 0x400A304
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_CNT_0 0x400A308
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_CNT_1 0x400A30C
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_CNT_2 0x400A310
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_CNT_3 0x400A314
+
+#define mmDCORE0_TPC0_QM_CP_FENCE2_CNT_4 0x400A318
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_CNT_0 0x400A31C
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_CNT_1 0x400A320
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_CNT_2 0x400A324
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_CNT_3 0x400A328
+
+#define mmDCORE0_TPC0_QM_CP_FENCE3_CNT_4 0x400A32C
+
+#define mmDCORE0_TPC0_QM_CP_BARRIER_CFG 0x400A330
+
+#define mmDCORE0_TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0x400A334
+
+#define mmDCORE0_TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET 0x400A338
+
+#define mmDCORE0_TPC0_QM_CP_LDMA_TSIZE_OFFSET 0x400A33C
+
+#define mmDCORE0_TPC0_QM_CP_CQ_PTR_LO_OFFSET_0 0x400A340
+
+#define mmDCORE0_TPC0_QM_CP_CQ_PTR_LO_OFFSET_1 0x400A344
+
+#define mmDCORE0_TPC0_QM_CP_CQ_PTR_LO_OFFSET_2 0x400A348
+
+#define mmDCORE0_TPC0_QM_CP_CQ_PTR_LO_OFFSET_3 0x400A34C
+
+#define mmDCORE0_TPC0_QM_CP_CQ_PTR_LO_OFFSET_4 0x400A350
+
+#define mmDCORE0_TPC0_QM_CP_STS_0 0x400A368
+
+#define mmDCORE0_TPC0_QM_CP_STS_1 0x400A36C
+
+#define mmDCORE0_TPC0_QM_CP_STS_2 0x400A370
+
+#define mmDCORE0_TPC0_QM_CP_STS_3 0x400A374
+
+#define mmDCORE0_TPC0_QM_CP_STS_4 0x400A378
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_LO_0 0x400A37C
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_LO_1 0x400A380
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_LO_2 0x400A384
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_LO_3 0x400A388
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_LO_4 0x400A38C
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_HI_0 0x400A390
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_HI_1 0x400A394
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_HI_2 0x400A398
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_HI_3 0x400A39C
+
+#define mmDCORE0_TPC0_QM_CP_CURRENT_INST_HI_4 0x400A3A0
+
+#define mmDCORE0_TPC0_QM_CP_PRED_0 0x400A3A4
+
+#define mmDCORE0_TPC0_QM_CP_PRED_1 0x400A3A8
+
+#define mmDCORE0_TPC0_QM_CP_PRED_2 0x400A3AC
+
+#define mmDCORE0_TPC0_QM_CP_PRED_3 0x400A3B0
+
+#define mmDCORE0_TPC0_QM_CP_PRED_4 0x400A3B4
+
+#define mmDCORE0_TPC0_QM_CP_PRED_UPEN_0 0x400A3B8
+
+#define mmDCORE0_TPC0_QM_CP_PRED_UPEN_1 0x400A3BC
+
+#define mmDCORE0_TPC0_QM_CP_PRED_UPEN_2 0x400A3C0
+
+#define mmDCORE0_TPC0_QM_CP_PRED_UPEN_3 0x400A3C4
+
+#define mmDCORE0_TPC0_QM_CP_PRED_UPEN_4 0x400A3C8
+
+#define mmDCORE0_TPC0_QM_CP_DBG_0_0 0x400A3CC
+
+#define mmDCORE0_TPC0_QM_CP_DBG_0_1 0x400A3D0
+
+#define mmDCORE0_TPC0_QM_CP_DBG_0_2 0x400A3D4
+
+#define mmDCORE0_TPC0_QM_CP_DBG_0_3 0x400A3D8
+
+#define mmDCORE0_TPC0_QM_CP_DBG_0_4 0x400A3DC
+
+#define mmDCORE0_TPC0_QM_CP_CPDMA_UP_CRED_0 0x400A3E0
+
+#define mmDCORE0_TPC0_QM_CP_CPDMA_UP_CRED_1 0x400A3E4
+
+#define mmDCORE0_TPC0_QM_CP_CPDMA_UP_CRED_2 0x400A3E8
+
+#define mmDCORE0_TPC0_QM_CP_CPDMA_UP_CRED_3 0x400A3EC
+
+#define mmDCORE0_TPC0_QM_CP_CPDMA_UP_CRED_4 0x400A3F0
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_LO_0 0x400A3F4
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_LO_1 0x400A3F8
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_LO_2 0x400A3FC
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_LO_3 0x400A400
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_LO_4 0x400A404
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_HI_0 0x400A408
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_HI_1 0x400A40C
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_HI_2 0x400A410
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_HI_3 0x400A414
+
+#define mmDCORE0_TPC0_QM_CP_IN_DATA_HI_4 0x400A418
+
+#define mmDCORE0_TPC0_QM_PQC_HBW_BASE_LO_0 0x400A41C
+
+#define mmDCORE0_TPC0_QM_PQC_HBW_BASE_LO_1 0x400A420
+
+#define mmDCORE0_TPC0_QM_PQC_HBW_BASE_LO_2 0x400A424
+
+#define mmDCORE0_TPC0_QM_PQC_HBW_BASE_LO_3 0x400A428
+
+#define mmDCORE0_TPC0_QM_PQC_HBW_BASE_HI_0 0x400A42C
+
+#define mmDCORE0_TPC0_QM_PQC_HBW_BASE_HI_1 0x400A430
+
+#define mmDCORE0_TPC0_QM_PQC_HBW_BASE_HI_2 0x400A434
+
+#define mmDCORE0_TPC0_QM_PQC_HBW_BASE_HI_3 0x400A438
+
+#define mmDCORE0_TPC0_QM_PQC_SIZE_0 0x400A43C
+
+#define mmDCORE0_TPC0_QM_PQC_SIZE_1 0x400A440
+
+#define mmDCORE0_TPC0_QM_PQC_SIZE_2 0x400A444
+
+#define mmDCORE0_TPC0_QM_PQC_SIZE_3 0x400A448
+
+#define mmDCORE0_TPC0_QM_PQC_PI_0 0x400A44C
+
+#define mmDCORE0_TPC0_QM_PQC_PI_1 0x400A450
+
+#define mmDCORE0_TPC0_QM_PQC_PI_2 0x400A454
+
+#define mmDCORE0_TPC0_QM_PQC_PI_3 0x400A458
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_WDATA_0 0x400A45C
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_WDATA_1 0x400A460
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_WDATA_2 0x400A464
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_WDATA_3 0x400A468
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_BASE_LO_0 0x400A46C
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_BASE_LO_1 0x400A470
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_BASE_LO_2 0x400A474
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_BASE_LO_3 0x400A478
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_BASE_HI_0 0x400A47C
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_BASE_HI_1 0x400A480
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_BASE_HI_2 0x400A484
+
+#define mmDCORE0_TPC0_QM_PQC_LBW_BASE_HI_3 0x400A488
+
+#define mmDCORE0_TPC0_QM_PQC_CFG 0x400A48C
+
+#define mmDCORE0_TPC0_QM_PQC_SECURE_PUSH_IND 0x400A490
+
+#define mmDCORE0_TPC0_QM_ARB_MASK 0x400A4A0
+
+#define mmDCORE0_TPC0_QM_ARB_CFG_0 0x400A4A4
+
+#define mmDCORE0_TPC0_QM_ARB_CHOICE_Q_PUSH 0x400A4A8
+
+#define mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_0 0x400A4AC
+
+#define mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_1 0x400A4B0
+
+#define mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_2 0x400A4B4
+
+#define mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_3 0x400A4B8
+
+#define mmDCORE0_TPC0_QM_ARB_CFG_1 0x400A4BC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_0 0x400A4C0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_1 0x400A4C4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_2 0x400A4C8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_3 0x400A4CC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_4 0x400A4D0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_5 0x400A4D4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_6 0x400A4D8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_7 0x400A4DC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_8 0x400A4E0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_9 0x400A4E4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_10 0x400A4E8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_11 0x400A4EC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_12 0x400A4F0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_13 0x400A4F4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_14 0x400A4F8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_15 0x400A4FC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_16 0x400A500
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_17 0x400A504
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_18 0x400A508
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_19 0x400A50C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_20 0x400A510
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_21 0x400A514
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_22 0x400A518
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_23 0x400A51C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_24 0x400A520
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_25 0x400A524
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_26 0x400A528
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_27 0x400A52C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_28 0x400A530
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_29 0x400A534
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_30 0x400A538
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_31 0x400A53C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_32 0x400A540
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_33 0x400A544
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_34 0x400A548
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_35 0x400A54C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_36 0x400A550
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_37 0x400A554
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_38 0x400A558
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_39 0x400A55C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_40 0x400A560
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_41 0x400A564
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_42 0x400A568
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_43 0x400A56C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_44 0x400A570
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_45 0x400A574
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_46 0x400A578
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_47 0x400A57C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_48 0x400A580
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_49 0x400A584
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_50 0x400A588
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_51 0x400A58C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_52 0x400A590
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_53 0x400A594
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_54 0x400A598
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_55 0x400A59C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_56 0x400A5A0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_57 0x400A5A4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_58 0x400A5A8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_59 0x400A5AC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_60 0x400A5B0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_61 0x400A5B4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_62 0x400A5B8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_AVAIL_CRED_63 0x400A5BC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CRED_INC 0x400A5E0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_0 0x400A5E4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_1 0x400A5E8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_2 0x400A5EC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_3 0x400A5F0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_4 0x400A5F4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_5 0x400A5F8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_6 0x400A5FC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_7 0x400A600
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_8 0x400A604
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_9 0x400A608
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_10 0x400A60C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_11 0x400A610
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_12 0x400A614
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_13 0x400A618
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_14 0x400A61C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_15 0x400A620
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_16 0x400A624
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_17 0x400A628
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_18 0x400A62C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_19 0x400A630
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_20 0x400A634
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_21 0x400A638
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_22 0x400A63C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_23 0x400A640
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_24 0x400A644
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_25 0x400A648
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_26 0x400A64C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_27 0x400A650
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_28 0x400A654
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_29 0x400A658
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_30 0x400A65C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_31 0x400A660
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_32 0x400A664
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_33 0x400A668
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_34 0x400A66C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_35 0x400A670
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_36 0x400A674
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_37 0x400A678
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_38 0x400A67C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_39 0x400A680
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_40 0x400A684
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_41 0x400A688
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_42 0x400A68C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_43 0x400A690
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_44 0x400A694
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_45 0x400A698
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_46 0x400A69C
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_47 0x400A6A0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_48 0x400A6A4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_49 0x400A6A8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_50 0x400A6AC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_51 0x400A6B0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_52 0x400A6B4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_53 0x400A6B8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_54 0x400A6BC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_55 0x400A6C0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_56 0x400A6C4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_57 0x400A6C8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_58 0x400A6CC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_59 0x400A6D0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_60 0x400A6D4
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_61 0x400A6D8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_62 0x400A6DC
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_63 0x400A6E0
+
+#define mmDCORE0_TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x400A704
+
+#define mmDCORE0_TPC0_QM_ARB_MST_SLAVE_EN 0x400A708
+
+#define mmDCORE0_TPC0_QM_ARB_MST_SLAVE_EN_1 0x400A70C
+
+#define mmDCORE0_TPC0_QM_ARB_SLV_CHOICE_WDT 0x400A710
+
+#define mmDCORE0_TPC0_QM_ARB_SLV_ID 0x400A714
+
+#define mmDCORE0_TPC0_QM_ARB_MST_QUIET_PER 0x400A718
+
+#define mmDCORE0_TPC0_QM_ARB_MSG_MAX_INFLIGHT 0x400A744
+
+#define mmDCORE0_TPC0_QM_ARB_BASE_LO 0x400A754
+
+#define mmDCORE0_TPC0_QM_ARB_BASE_HI 0x400A758
+
+#define mmDCORE0_TPC0_QM_ARB_STATE_STS 0x400A780
+
+#define mmDCORE0_TPC0_QM_ARB_CHOICE_FULLNESS_STS 0x400A784
+
+#define mmDCORE0_TPC0_QM_ARB_MSG_STS 0x400A788
+
+#define mmDCORE0_TPC0_QM_ARB_SLV_CHOICE_Q_HEAD 0x400A78C
+
+#define mmDCORE0_TPC0_QM_ARB_ERR_CAUSE 0x400A79C
+
+#define mmDCORE0_TPC0_QM_ARB_ERR_MSG_EN 0x400A7A0
+
+#define mmDCORE0_TPC0_QM_ARB_ERR_STS_DRP 0x400A7A8
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CRED_STS 0x400A7B0
+
+#define mmDCORE0_TPC0_QM_ARB_MST_CRED_STS_1 0x400A7B4
+
+#define mmDCORE0_TPC0_QM_CSMR_STRICT_PRIO_CFG 0x400A7FC
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_CFG0 0x400A800
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_CFG1 0x400A804
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_PTR_LO 0x400A808
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_PTR_HI 0x400A80C
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_TSIZE 0x400A810
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_CTL 0x400A814
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_IFIFO_STS 0x400A81C
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_STS0 0x400A820
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_STS1 0x400A824
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_TSIZE_STS 0x400A828
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_PTR_LO_STS 0x400A82C
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_PTR_HI_STS 0x400A830
+
+#define mmDCORE0_TPC0_QM_CP_WR_ARC_ADDR_HI 0x400A834
+
+#define mmDCORE0_TPC0_QM_CP_WR_ARC_ADDR_LO 0x400A838
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_IFIFO_MSG_BASE_HI 0x400A83C
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_IFIFO_MSG_BASE_LO 0x400A840
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_CTL_MSG_BASE_HI 0x400A844
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_CTL_MSG_BASE_LO 0x400A848
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_MSG_BASE_HI 0x400A84C
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_MSG_BASE_LO 0x400A850
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_MSG_BASE_HI 0x400A854
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_MSG_BASE_LO 0x400A858
+
+#define mmDCORE0_TPC0_QM_ADDR_OVRD 0x400A85C
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_CI_0 0x400A860
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_CI_1 0x400A864
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_CI_2 0x400A868
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_CI_3 0x400A86C
+
+#define mmDCORE0_TPC0_QM_CQ_IFIFO_CI_4 0x400A870
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_IFIFO_CI 0x400A874
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_CI_0 0x400A878
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_CI_1 0x400A87C
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_CI_2 0x400A880
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_CI_3 0x400A884
+
+#define mmDCORE0_TPC0_QM_CQ_CTL_CI_4 0x400A888
+
+#define mmDCORE0_TPC0_QM_ARC_CQ_CTL_CI 0x400A88C
+
+#define mmDCORE0_TPC0_QM_CP_CFG 0x400A890
+
+#define mmDCORE0_TPC0_QM_CP_EXT_SWITCH 0x400A894
+
+#define mmDCORE0_TPC0_QM_CP_SWITCH_WD_SET 0x400A898
+
+#define mmDCORE0_TPC0_QM_CP_SWITCH_WD 0x400A89C
+
+#define mmDCORE0_TPC0_QM_ARC_LB_ADDR_BASE_LO 0x400A8A4
+
+#define mmDCORE0_TPC0_QM_ARC_LB_ADDR_BASE_HI 0x400A8A8
+
+#define mmDCORE0_TPC0_QM_ENGINE_BASE_ADDR_HI 0x400A8AC
+
+#define mmDCORE0_TPC0_QM_ENGINE_BASE_ADDR_LO 0x400A8B0
+
+#define mmDCORE0_TPC0_QM_ENGINE_ADDR_RANGE_SIZE 0x400A8B4
+
+#define mmDCORE0_TPC0_QM_QM_ARC_AUX_BASE_ADDR_HI 0x400A8B8
+
+#define mmDCORE0_TPC0_QM_QM_ARC_AUX_BASE_ADDR_LO 0x400A8BC
+
+#define mmDCORE0_TPC0_QM_QM_BASE_ADDR_HI 0x400A8C0
+
+#define mmDCORE0_TPC0_QM_QM_BASE_ADDR_LO 0x400A8C4
+
+#define mmDCORE0_TPC0_QM_ARC_PQC_SECURE_PUSH_IND 0x400A8C8
+
+#define mmDCORE0_TPC0_QM_PQC_STS_0_0 0x400A8D0
+
+#define mmDCORE0_TPC0_QM_PQC_STS_0_1 0x400A8D4
+
+#define mmDCORE0_TPC0_QM_PQC_STS_0_2 0x400A8D8
+
+#define mmDCORE0_TPC0_QM_PQC_STS_0_3 0x400A8DC
+
+#define mmDCORE0_TPC0_QM_PQC_STS_1_0 0x400A8E0
+
+#define mmDCORE0_TPC0_QM_PQC_STS_1_1 0x400A8E4
+
+#define mmDCORE0_TPC0_QM_PQC_STS_1_2 0x400A8E8
+
+#define mmDCORE0_TPC0_QM_PQC_STS_1_3 0x400A8EC
+
+#define mmDCORE0_TPC0_QM_SEI_STATUS 0x400A8F0
+
+#define mmDCORE0_TPC0_QM_SEI_MASK 0x400A8F4
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_ADDR_LO 0x400AD00
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_ADDR_HI 0x400AD04
+
+#define mmDCORE0_TPC0_QM_GLBL_ERR_WDATA 0x400AD08
+
+#define mmDCORE0_TPC0_QM_L2H_MASK_LO 0x400AD14
+
+#define mmDCORE0_TPC0_QM_L2H_MASK_HI 0x400AD18
+
+#define mmDCORE0_TPC0_QM_L2H_CMPR_LO 0x400AD1C
+
+#define mmDCORE0_TPC0_QM_L2H_CMPR_HI 0x400AD20
+
+#define mmDCORE0_TPC0_QM_LOCAL_RANGE_BASE 0x400AD24
+
+#define mmDCORE0_TPC0_QM_LOCAL_RANGE_SIZE 0x400AD28
+
+#define mmDCORE0_TPC0_QM_HBW_RD_RATE_LIM_CFG_1 0x400AD30
+
+#define mmDCORE0_TPC0_QM_LBW_WR_RATE_LIM_CFG_0 0x400AD34
+
+#define mmDCORE0_TPC0_QM_LBW_WR_RATE_LIM_CFG_1 0x400AD38
+
+#define mmDCORE0_TPC0_QM_HBW_RD_RATE_LIM_CFG_0 0x400AD3C
+
+#define mmDCORE0_TPC0_QM_IND_GW_APB_CFG 0x400AD40
+
+#define mmDCORE0_TPC0_QM_IND_GW_APB_WDATA 0x400AD44
+
+#define mmDCORE0_TPC0_QM_IND_GW_APB_RDATA 0x400AD48
+
+#define mmDCORE0_TPC0_QM_IND_GW_APB_STATUS 0x400AD4C
+
+#define mmDCORE0_TPC0_QM_PERF_CNT_FREE_LO 0x400AD60
+
+#define mmDCORE0_TPC0_QM_PERF_CNT_FREE_HI 0x400AD64
+
+#define mmDCORE0_TPC0_QM_PERF_CNT_IDLE_LO 0x400AD68
+
+#define mmDCORE0_TPC0_QM_PERF_CNT_IDLE_HI 0x400AD6C
+
+#define mmDCORE0_TPC0_QM_PERF_CNT_CFG 0x400AD70
+
+#endif /* ASIC_REG_DCORE0_TPC0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h
new file mode 100644
index 000000000000..e68667cc795a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_REGS_H_
+#define ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_ASID 0x41E3C00
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_MMU_BP 0x41E3C04
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_STRONG_ORDER 0x41E3C08
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_NO_SNOOP 0x41E3C0C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_WR_REDUCTION 0x41E3C10
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_RD_ATOMIC 0x41E3C14
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_QOS 0x41E3C18
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_RSVD 0x41E3C1C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_EMEM_CPAGE 0x41E3C20
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_CORE 0x41E3C24
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_E2E_COORD 0x41E3C28
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_WR_OVRD_LO 0x41E3C30
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_WR_OVRD_HI 0x41E3C34
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_RD_OVRD_LO 0x41E3C38
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_RD_OVRD_HI 0x41E3C3C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_LB_COORD 0x41E3C40
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_LB_LOCK 0x41E3C44
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_LB_RSVD 0x41E3C48
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_LB_OVRD 0x41E3C4C
+
+#endif /* ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
new file mode 100644
index 000000000000..f7ffdcbd1a76
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_REGS_H_
+#define ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_ASID 0x41E3B00
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_MMU_BP 0x41E3B04
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_STRONG_ORDER 0x41E3B08
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_NO_SNOOP 0x41E3B0C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_WR_REDUCTION 0x41E3B10
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_RD_ATOMIC 0x41E3B14
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_QOS 0x41E3B18
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_RSVD 0x41E3B1C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_EMEM_CPAGE 0x41E3B20
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_CORE 0x41E3B24
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_E2E_COORD 0x41E3B28
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_WR_OVRD_LO 0x41E3B30
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_WR_OVRD_HI 0x41E3B34
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_RD_OVRD_LO 0x41E3B38
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_RD_OVRD_HI 0x41E3B3C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_LB_COORD 0x41E3B40
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_LB_LOCK 0x41E3B44
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_LB_RSVD 0x41E3B48
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_LB_OVRD 0x41E3B4C
+
+#endif /* ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
new file mode 100644
index 000000000000..4c1bb5306cba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_REGS_H_
+#define ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_ASID 0x41E3900
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_MMU_BP 0x41E3904
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_STRONG_ORDER 0x41E3908
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_NO_SNOOP 0x41E390C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_WR_REDUCTION 0x41E3910
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_RD_ATOMIC 0x41E3914
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_QOS 0x41E3918
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_RSVD 0x41E391C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_EMEM_CPAGE 0x41E3920
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_CORE 0x41E3924
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_E2E_COORD 0x41E3928
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_WR_OVRD_LO 0x41E3930
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_WR_OVRD_HI 0x41E3934
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_RD_OVRD_LO 0x41E3938
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_RD_OVRD_HI 0x41E393C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_LB_COORD 0x41E3940
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_LB_LOCK 0x41E3944
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_LB_RSVD 0x41E3948
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_LB_OVRD 0x41E394C
+
+#endif /* ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
new file mode 100644
index 000000000000..e413905ffe25
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_REGS_H_
+#define ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_ASID 0x41E3A00
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_MMU_BP 0x41E3A04
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_STRONG_ORDER 0x41E3A08
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_NO_SNOOP 0x41E3A0C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_WR_REDUCTION 0x41E3A10
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_RD_ATOMIC 0x41E3A14
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_QOS 0x41E3A18
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_RSVD 0x41E3A1C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_EMEM_CPAGE 0x41E3A20
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_CORE 0x41E3A24
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_E2E_COORD 0x41E3A28
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_WR_OVRD_LO 0x41E3A30
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_WR_OVRD_HI 0x41E3A34
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_RD_OVRD_LO 0x41E3A38
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_RD_OVRD_HI 0x41E3A3C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_LB_COORD 0x41E3A40
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_LB_LOCK 0x41E3A44
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_LB_RSVD 0x41E3A48
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_LB_OVRD 0x41E3A4C
+
+#endif /* ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
new file mode 100644
index 000000000000..bce75ac6e279
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_REGS_H_
+#define ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_ASID 0x41E3800
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_MMU_BP 0x41E3804
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_STRONG_ORDER 0x41E3808
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_NO_SNOOP 0x41E380C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_WR_REDUCTION 0x41E3810
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_RD_ATOMIC 0x41E3814
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_QOS 0x41E3818
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_RSVD 0x41E381C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_EMEM_CPAGE 0x41E3820
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_CORE 0x41E3824
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_E2E_COORD 0x41E3828
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_WR_OVRD_LO 0x41E3830
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_WR_OVRD_HI 0x41E3834
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_RD_OVRD_LO 0x41E3838
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_RD_OVRD_HI 0x41E383C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_LB_COORD 0x41E3840
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_LB_LOCK 0x41E3844
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_LB_RSVD 0x41E3848
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_LB_OVRD 0x41E384C
+
+#endif /* ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h
new file mode 100644
index 000000000000..68dd98459c86
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_MASKS_H_
+#define ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_MASKS_H_
+
+/*
+ *****************************************
+ * DCORE0_VDEC0_BRDG_CTRL
+ * (Prototype: VDEC_BRDG_CTRL)
+ *****************************************
+ */
+
+/* DCORE0_VDEC0_BRDG_CTRL_CGM_DISABLE */
+#define DCORE0_VDEC0_BRDG_CTRL_CGM_DISABLE_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_CGM_DISABLE_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_IDLE_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_IDLE_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_IDLE_MASK_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_APB_CGM_CNT */
+#define DCORE0_VDEC0_BRDG_CTRL_APB_CGM_CNT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_APB_CGM_CNT_VAL_MASK 0xFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_APB_ARB_WDOG_CNT */
+#define DCORE0_VDEC0_BRDG_CTRL_APB_ARB_WDOG_CNT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_APB_ARB_WDOG_CNT_VAL_MASK 0xFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_GRACEFUL */
+#define DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_PEND_SHIFT 4
+#define DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_PEND_MASK 0x10
+
+/* DCORE0_VDEC0_BRDG_CTRL_IDLE_CGM_CNT */
+#define DCORE0_VDEC0_BRDG_CTRL_IDLE_CGM_CNT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_IDLE_CGM_CNT_VAL_MASK 0xFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR */
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_VCD_HBW_SEI_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_VCD_HBW_SEI_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_L2C_HBW_SEI_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_L2C_HBW_SEI_MASK 0x2
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_NRM_HBW_SEI_SHIFT 2
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_NRM_HBW_SEI_MASK 0x4
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_ABNRM_HBW_SEI_SHIFT 3
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_ABNRM_HBW_SEI_MASK 0x8
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_VCD_LBW_SEI_SHIFT 4
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_VCD_LBW_SEI_MASK 0x10
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_L2C_LBW_SEI_SHIFT 5
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_L2C_LBW_SEI_MASK 0x20
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_NRM_LBW_SEI_SHIFT 6
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_NRM_LBW_SEI_MASK 0x40
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_ABNRM_LBW_SEI_SHIFT 7
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_ABNRM_LBW_SEI_MASK 0x80
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_VCD_LBW_SEI_SHIFT 8
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_VCD_LBW_SEI_MASK 0x100
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_L2C_LBW_SEI_SHIFT 9
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_L2C_LBW_SEI_MASK 0x200
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_NRM_LBW_SEI_SHIFT 10
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_NRM_LBW_SEI_MASK 0x400
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_ABNRM_LBW_SEI_SHIFT 11
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_ABNRM_LBW_SEI_MASK 0x800
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_DEC_SEI_SHIFT 12
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_DEC_SEI_MASK 0x1000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_DEC_APB_SEI_SHIFT 13
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_DEC_APB_SEI_MASK 0x2000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_TRC_APB_SEI_SHIFT 14
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_TRC_APB_SEI_MASK 0x4000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_MSTR_IF_SEI_SHIFT 15
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_MSTR_IF_SEI_MASK 0x8000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_AXI_SPLIT_BRESP_ERR_SEI_SHIFT 16
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_AXI_SPLIT_BRESP_ERR_SEI_MASK 0x10000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_HBW_AXI_WR_VIOL_SEI_SHIFT 17
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_HBW_AXI_WR_VIOL_SEI_MASK 0x20000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_HBW_AXI_RD_VIOL_SEI_SHIFT 18
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_HBW_AXI_RD_VIOL_SEI_MASK 0x40000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_AXI_WR_VIOL_SEI_SHIFT 19
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_AXI_WR_VIOL_SEI_MASK 0x80000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_AXI_RD_VIOL_SEI_SHIFT 20
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_AXI_RD_VIOL_SEI_MASK 0x100000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_VCD_SPI_SHIFT 21
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_VCD_SPI_MASK 0x200000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_L2C_SPI_SHIFT 22
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_L2C_SPI_MASK 0x400000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_NRM_SPI_SHIFT 23
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_NRM_SPI_MASK 0x800000
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_ABNRM_SPI_SHIFT 24
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_ABNRM_SPI_MASK 0x1000000
+
+/* DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE */
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWLEN_GT_31_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWLEN_GT_31_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWLOCK_VIOL_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWLOCK_VIOL_MASK 0x2
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_SHIFT 2
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_MASK 0x4
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_SHIFT 3
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_MASK \
+0x8
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_SHIFT 4
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_MASK 0x10
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLEN_GT_31_SHIFT 5
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLEN_GT_31_MASK 0x20
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLOCK_VIOL_SHIFT 6
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLOCK_VIOL_MASK 0x40
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_SHIFT 7
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_MASK 0x80
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_SHIFT 8
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK \
+0x100
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_SHIFT 9
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_MASK 0x200
+
+/* DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE */
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_READ_ACCESS_VIOL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_READ_ACCESS_VIOL_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWLOCK_VIOL_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWLOCK_VIOL_MASK 0x2
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWADDR_ALIGN_VIOL_SHIFT 2
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWADDR_ALIGN_VIOL_MASK 0x4
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_SHIFT 3
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_MASK 0x8
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWLEN_VIOL_SHIFT 4
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWLEN_VIOL_MASK 0x10
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_WSTRB_VIOL_SHIFT 5
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_WSTRB_VIOL_MASK 0x20
+
+/* DCORE0_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM */
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_HBW_AW_VIOL_CLR_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_HBW_AW_VIOL_CLR_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_HBW_AR_VIOL_CLR_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_HBW_AR_VIOL_CLR_MASK 0x2
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_LBW_AW_VIOL_CLR_SHIFT 2
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_LBW_AW_VIOL_CLR_MASK 0x4
+
+/* DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MASK_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_MASK_MASK_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_MASK_MASK_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_MASK_MASK_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_MASK_MASK_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_GIC_INTR_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_GIC_INTR_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_GIC_INTR_MASK_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_GIC_INTR_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_GIC_INTR_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_GIC_INTR_MASK_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_GIC_INTR_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_GIC_INTR_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_GIC_INTR_MASK_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_GIC_INTR_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_GIC_INTR_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_GIC_INTR_MASK_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_ARPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_ARPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_ARPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_ARPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_ARPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_ARPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_SLV_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_SLV_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_SLV_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_SLV_ARPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_SLV_ARPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_LBW_SLV_ARPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_HBW_LEGAL_AWSIZE_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_HBW_LEGAL_AWSIZE_MASK 0x7
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_HBW_LEGAL_ARSIZE_SHIFT 3
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_HBW_LEGAL_ARSIZE_MASK 0x38
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_LBW_LEGAL_AWSIZE_SHIFT 6
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_LBW_LEGAL_AWSIZE_MASK 0x1C0
+
+/* DCORE0_VDEC0_BRDG_CTRL_ARC_MSG_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_ARC_MSG_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ARC_MSG_MASK_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_ARC_START_LBW_WDATA */
+#define DCORE0_VDEC0_BRDG_CTRL_ARC_START_LBW_WDATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ARC_START_LBW_WDATA_VAL_MASK 0xFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ARC_FINISH_LBW_WDATA */
+#define DCORE0_VDEC0_BRDG_CTRL_ARC_FINISH_LBW_WDATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ARC_FINISH_LBW_WDATA_VAL_MASK 0xFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_HWEVENT_TRACE_SEL */
+#define DCORE0_VDEC0_BRDG_CTRL_HWEVENT_TRACE_SEL_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HWEVENT_TRACE_SEL_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_HWEVENT_TRACE_ADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_HWEVENT_TRACE_ADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HWEVENT_TRACE_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_L */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_L_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_L_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_H */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_H_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_L */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_L_IND_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_L_IND_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_H */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_H_IND_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_H_IND_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_L */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_L_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_L_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_H */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_H_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_L */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_L_IND_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_L_IND_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_H */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_H_IND_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_H_IND_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_STAT_CNTR_EN */
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_CNTR_EN_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_CNTR_EN_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_INTR_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_INTR_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_INTR_MASK_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_APB_WR_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_APB_WR_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_APB_RD_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_APB_RD_MASK 0x2
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_LBW_SHIFT 2
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_LBW_MASK 0x4
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_STAT_VCD_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_VCD_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_VCD_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_STAT_VCD_MSIX_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_VCD_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_VCD_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_SWREG1_ADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_SWREG1_ADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_SWREG1_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_APB_WR_ADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_APB_WR_ADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_APB_WR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_APB_WR_DATA */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_APB_WR_DATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_APB_WR_DATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_L */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_L_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_H */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_H_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_WDATA */
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_WDATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_INTR_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_INTR_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_INTR_MASK_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_APB_WR_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_APB_WR_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_APB_RD_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_APB_RD_MASK 0x2
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_LBW_SHIFT 2
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_LBW_MASK 0x4
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_STAT_L2C_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_L2C_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_L2C_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_STAT_L2C_MSIX_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_L2C_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_L2C_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_SWREG1_ADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_SWREG1_ADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_SWREG1_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_APB_WR_ADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_APB_WR_ADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_APB_WR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_APB_WR_DATA */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_APB_WR_DATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_APB_WR_DATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_L */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_L_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_H */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_H_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_WDATA */
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_WDATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_INTR_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_INTR_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_INTR_MASK_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_APB_WR_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_APB_WR_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_APB_RD_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_APB_RD_MASK 0x2
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_LBW_SHIFT 2
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_LBW_MASK 0x4
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_STAT_NRM_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_NRM_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_NRM_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_STAT_NRM_MSIX_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_NRM_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_NRM_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_SWREG1_ADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_SWREG1_ADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_SWREG1_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_APB_WR_ADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_APB_WR_ADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_APB_WR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_APB_WR_DATA */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_APB_WR_DATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_APB_WR_DATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_L */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_L_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_H */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_H_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_WDATA */
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_WDATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_INTR_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_INTR_MASK_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_INTR_MASK_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_APB_WR_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_APB_WR_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_APB_RD_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_APB_RD_MASK 0x2
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_LBW_SHIFT 2
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_LBW_MASK 0x4
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_STAT_ABNRM_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_ABNRM_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_ABNRM_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_STAT_ABNRM_MSIX_WAIT_CNTR */
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_ABNRM_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_STAT_ABNRM_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_SWREG1_ADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_SWREG1_ADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_SWREG1_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_APB_WR_ADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_APB_WR_ADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_APB_WR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_APB_WR_DATA */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_APB_WR_DATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_APB_WR_DATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_L */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_L_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_H */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_H_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWPROT */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWPROT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWPROT_VAL_MASK 0x7
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_WDATA */
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_WDATA_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_BRESP_ERR_ID */
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_BRESP_ERR_ID_ID_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_BRESP_ERR_ID_ID_MASK 0xFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG */
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_FORCE_RESP_OK_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_FORCE_RESP_OK_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_FORCE_WR_BUF_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_FORCE_WR_BUF_MASK 0x2
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_NUM_RD_OS_SHIFT 8
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_NUM_RD_OS_MASK 0xFF00
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_NUM_WR_OS_SHIFT 16
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_NUM_WR_OS_MASK 0xFF0000
+
+/* DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_NO_WR_INFLIGHT */
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_NO_WR_INFLIGHT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_NO_WR_INFLIGHT_VAL_MASK 0x1
+
+/* DCORE0_VDEC0_BRDG_CTRL_HWEVENT_MASK */
+#define DCORE0_VDEC0_BRDG_CTRL_HWEVENT_MASK_MASK_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_HWEVENT_MASK_MASK_MASK 0x2
+
+/* DCORE0_VDEC0_BRDG_CTRL_HWEVENT_CNTXT */
+#define DCORE0_VDEC0_BRDG_CTRL_HWEVENT_CNTXT_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HWEVENT_CNTXT_VAL_MASK 0xFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_LBW_SLV_TERM_ERR_RESP */
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_SLV_TERM_ERR_RESP_ERR_RESP_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_SLV_TERM_ERR_RESP_ERR_RESP_MASK 0x3
+
+/* DCORE0_VDEC0_BRDG_CTRL_LBW_MSTR_TERM_ERR_RESP */
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_MSTR_TERM_ERR_RESP_ERR_RESP_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_LBW_MSTR_TERM_ERR_RESP_ERR_RESP_MASK 0x3
+
+/* DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP */
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP_WR_ERR_RESP_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP_WR_ERR_RESP_MASK 0x3
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP_RD_ERR_RESP_SHIFT 2
+#define DCORE0_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP_RD_ERR_RESP_MASK 0xC
+
+/* DCORE0_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS */
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS_AW_STA_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS_AW_STA_MASK 0x1
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS_AR_STA_SHIFT 1
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS_AR_STA_MASK 0x2
+
+/* DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_L */
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_L_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_L_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_H */
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_H_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_H_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_L */
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_L_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_L_VAL_MASK 0xFFFFFFFF
+
+/* DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_H */
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_H_VAL_SHIFT 0
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_H_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h
new file mode 100644
index 000000000000..d2844307a6bf
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_REGS_H_
+#define ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_VDEC0_BRDG_CTRL
+ * (Prototype: VDEC_BRDG_CTRL)
+ *****************************************
+ */
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_CGM_DISABLE 0x41E3100
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_IDLE_MASK 0x41E3104
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_APB_CGM_CNT 0x41E3108
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_APB_ARB_WDOG_CNT 0x41E310C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_GRACEFUL 0x41E3110
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_IDLE_CGM_CNT 0x41E3114
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR 0x41E3120
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE 0x41E3124
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE 0x41E3128
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM 0x41E312C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR_MASK 0x41E3130
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_MASK 0x41E3134
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_MASK 0x41E3138
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_GIC_INTR_MASK 0x41E3160
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_GIC_INTR_MASK 0x41E3170
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_GIC_INTR_MASK 0x41E3180
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_GIC_INTR_MASK 0x41E3190
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_HBW_AWPROT 0x41E31A0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_HBW_ARPROT 0x41E31A4
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_LBW_AWPROT 0x41E31B0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_LBW_ARPROT 0x41E31B4
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_LBW_SLV_AWPROT 0x41E31C0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_LBW_SLV_ARPROT 0x41E31C4
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE 0x41E31D0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ARC_MSG_MASK 0x41E3200
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ARC_START_LBW_WDATA 0x41E3230
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ARC_FINISH_LBW_WDATA 0x41E3260
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HWEVENT_TRACE_SEL 0x41E3270
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HWEVENT_TRACE_ADDR 0x41E3280
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_L 0x41E3290
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_H 0x41E3294
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_L 0x41E32A0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_H 0x41E32A4
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_L 0x41E32B0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_H 0x41E32B4
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_L 0x41E32C0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_H 0x41E32C4
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_STAT_CNTR_EN 0x41E32D0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_INTR_MASK 0x41E3300
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK 0x41E3310
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_WAIT_CNTR 0x41E3320
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_WAIT_CNTR 0x41E3330
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_STAT_VCD_WAIT_CNTR 0x41E3334
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_STAT_VCD_MSIX_WAIT_CNTR 0x41E3338
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_SWREG1_ADDR 0x41E3340
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_APB_WR_ADDR 0x41E3350
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_APB_WR_DATA 0x41E3360
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWPROT 0x41E3380
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_L 0x41E3390
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_H 0x41E3394
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWPROT 0x41E33C0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWADDR 0x41E33D0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_WDATA 0x41E33E0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_INTR_MASK 0x41E3400
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK 0x41E3410
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_WAIT_CNTR 0x41E3420
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_WAIT_CNTR 0x41E3430
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_STAT_L2C_WAIT_CNTR 0x41E3434
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_STAT_L2C_MSIX_WAIT_CNTR 0x41E3438
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_SWREG1_ADDR 0x41E3440
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_APB_WR_ADDR 0x41E3450
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_APB_WR_DATA 0x41E3460
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWPROT 0x41E3480
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_L 0x41E3490
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_H 0x41E3494
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWPROT 0x41E34C0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWADDR 0x41E34D0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_WDATA 0x41E34E0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_INTR_MASK 0x41E3500
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK 0x41E3510
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_WAIT_CNTR 0x41E3520
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_WAIT_CNTR 0x41E3530
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_STAT_NRM_WAIT_CNTR 0x41E3534
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_STAT_NRM_MSIX_WAIT_CNTR 0x41E3538
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_SWREG1_ADDR 0x41E3540
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_APB_WR_ADDR 0x41E3550
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_APB_WR_DATA 0x41E3560
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWPROT 0x41E3580
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_L 0x41E3590
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_H 0x41E3594
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWPROT 0x41E35C0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWADDR 0x41E35D0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_WDATA 0x41E35E0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_INTR_MASK 0x41E3600
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK 0x41E3610
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_WAIT_CNTR 0x41E3620
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_WAIT_CNTR 0x41E3630
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_STAT_ABNRM_WAIT_CNTR 0x41E3634
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_STAT_ABNRM_MSIX_WAIT_CNTR 0x41E3638
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_SWREG1_ADDR 0x41E3640
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_APB_WR_ADDR 0x41E3650
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_APB_WR_DATA 0x41E3660
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWPROT 0x41E3680
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_L 0x41E3690
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_H 0x41E3694
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWPROT 0x41E36C0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR 0x41E36D0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_WDATA 0x41E36E0
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_BRESP_ERR_ID 0x41E3700
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG 0x41E3704
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXI_SPLIT_NO_WR_INFLIGHT 0x41E3708
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HWEVENT_MASK 0x41E370C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HWEVENT_CNTXT 0x41E3714
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_LBW_SLV_TERM_ERR_RESP 0x41E3718
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_LBW_MSTR_TERM_ERR_RESP 0x41E371C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP 0x41E3720
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS 0x41E3724
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_L 0x41E3728
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_H 0x41E372C
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_L 0x41E3730
+
+#define mmDCORE0_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_H 0x41E3734
+
+#endif /* ASIC_REG_DCORE0_VDEC0_BRDG_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h
new file mode 100644
index 000000000000..89b522b12998
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE0_VDEC0_CTRL_SPECIAL_REGS_H_
+#define ASIC_REG_DCORE0_VDEC0_CTRL_SPECIAL_REGS_H_
+
+/*
+ *****************************************
+ * DCORE0_VDEC0_CTRL_SPECIAL
+ * (Prototype: SPECIAL_REGS)
+ *****************************************
+ */
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_0 0x41E4E80
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_1 0x41E4E84
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_2 0x41E4E88
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_3 0x41E4E8C
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_4 0x41E4E90
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_5 0x41E4E94
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_6 0x41E4E98
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_7 0x41E4E9C
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_8 0x41E4EA0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_9 0x41E4EA4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_10 0x41E4EA8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_11 0x41E4EAC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_12 0x41E4EB0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_13 0x41E4EB4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_14 0x41E4EB8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_15 0x41E4EBC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_16 0x41E4EC0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_17 0x41E4EC4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_18 0x41E4EC8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_19 0x41E4ECC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_20 0x41E4ED0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_21 0x41E4ED4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_22 0x41E4ED8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_23 0x41E4EDC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_24 0x41E4EE0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_25 0x41E4EE4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_26 0x41E4EE8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_27 0x41E4EEC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_28 0x41E4EF0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_29 0x41E4EF4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_30 0x41E4EF8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_PRIV_31 0x41E4EFC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_GW_DATA 0x41E4F00
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_GW_REQ 0x41E4F04
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_NUMOF 0x41E4F0C
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_ECC_SEL 0x41E4F10
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_ECC_CTL 0x41E4F14
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_ECC_ERR_MASK 0x41E4F18
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_ECC_GLBL_ERR_MASK 0x41E4F1C
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_ECC_ERR_STS 0x41E4F20
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_ECC_ERR_ADDR 0x41E4F24
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_MEM_RM 0x41E4F28
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_ERR_MASK 0x41E4F40
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_ERR_ADDR 0x41E4F44
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_ERR_CAUSE 0x41E4F48
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SPARE_0 0x41E4F60
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SPARE_1 0x41E4F64
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SPARE_2 0x41E4F68
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SPARE_3 0x41E4F6C
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_0 0x41E4F80
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_1 0x41E4F84
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_2 0x41E4F88
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_3 0x41E4F8C
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_4 0x41E4F90
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_5 0x41E4F94
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_6 0x41E4F98
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_7 0x41E4F9C
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_8 0x41E4FA0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_9 0x41E4FA4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_10 0x41E4FA8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_11 0x41E4FAC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_12 0x41E4FB0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_13 0x41E4FB4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_14 0x41E4FB8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_15 0x41E4FBC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_16 0x41E4FC0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_17 0x41E4FC4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_18 0x41E4FC8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_19 0x41E4FCC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_20 0x41E4FD0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_21 0x41E4FD4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_22 0x41E4FD8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_23 0x41E4FDC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_24 0x41E4FE0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_25 0x41E4FE4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_26 0x41E4FE8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_27 0x41E4FEC
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_28 0x41E4FF0
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_29 0x41E4FF4
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_30 0x41E4FF8
+
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_GLBL_SEC_31 0x41E4FFC
+
+#endif /* ASIC_REG_DCORE0_VDEC0_CTRL_SPECIAL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h
new file mode 100644
index 000000000000..622613dc76fb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE1_MME_CTRL_LO_REGS_H_
+#define ASIC_REG_DCORE1_MME_CTRL_LO_REGS_H_
+
+/*
+ *****************************************
+ * DCORE1_MME_CTRL_LO
+ * (Prototype: MME_CTRL_LO)
+ *****************************************
+ */
+
+#define mmDCORE1_MME_CTRL_LO_ARCH_STATUS 0x42CB000
+
+#define mmDCORE1_MME_CTRL_LO_CMD 0x42CB004
+
+#define mmDCORE1_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0 0x42CB148
+
+#define mmDCORE1_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0 0x42CB14C
+
+#define mmDCORE1_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0 0x42CB150
+
+#define mmDCORE1_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR1 0x42CB154
+
+#define mmDCORE1_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1 0x42CB158
+
+#define mmDCORE1_MME_CTRL_LO_ARCH_A_SS 0x42CB224
+
+#define mmDCORE1_MME_CTRL_LO_ARCH_B_SS 0x42CB228
+
+#define mmDCORE1_MME_CTRL_LO_ARCH_COUT_SS 0x42CB27C
+
+#define mmDCORE1_MME_CTRL_LO_QM_STALL 0x42CB400
+
+#define mmDCORE1_MME_CTRL_LO_LOG_SHADOW_LO 0x42CB404
+
+#define mmDCORE1_MME_CTRL_LO_LOG_SHADOW_HI 0x42CB408
+
+#define mmDCORE1_MME_CTRL_LO_SYNC_OBJECT_FIFO_TH 0x42CB40C
+
+#define mmDCORE1_MME_CTRL_LO_REDUN 0x42CB410
+
+#define mmDCORE1_MME_CTRL_LO_EUS_LOCAL_FIFO_TH 0x42CB414
+
+#define mmDCORE1_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0 0x42CB418
+
+#define mmDCORE1_MME_CTRL_LO_EUS_ROLLUP_DLY_DW1 0x42CB41C
+
+#define mmDCORE1_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F16 0x42CB420
+
+#define mmDCORE1_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F8 0x42CB424
+
+#define mmDCORE1_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32 0x42CB428
+
+#define mmDCORE1_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32I 0x42CB42C
+
+#define mmDCORE1_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_TF32 0x42CB430
+
+#define mmDCORE1_MME_CTRL_LO_PCU_RL_DESC0 0x42CB434
+
+#define mmDCORE1_MME_CTRL_LO_PCU_RL_TOKEN_UPDATE 0x42CB438
+
+#define mmDCORE1_MME_CTRL_LO_PCU_RL_TH 0x42CB43C
+
+#define mmDCORE1_MME_CTRL_LO_PCU_RL_MIN 0x42CB440
+
+#define mmDCORE1_MME_CTRL_LO_PCU_RL_CTRL_EN 0x42CB444
+
+#define mmDCORE1_MME_CTRL_LO_PCU_RL_HISTORY_LOG_SIZE 0x42CB448
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_A_BF16 0x42CB44C
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_B_BF16 0x42CB450
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_A_FP16 0x42CB454
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_B_FP16 0x42CB458
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_F8 0x42CB45C
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_A_FP32_ODD 0x42CB460
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_A_FP32_EVEN 0x42CB464
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_B_FP32_ODD 0x42CB468
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_B_FP32_EVEN 0x42CB46C
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_A_TF32_ODD 0x42CB470
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_A_TF32_EVEN 0x42CB474
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_B_TF32_ODD 0x42CB478
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DUMMY_B_TF32_EVEN 0x42CB47C
+
+#define mmDCORE1_MME_CTRL_LO_PROT 0x42CB480
+
+#define mmDCORE1_MME_CTRL_LO_EU 0x42CB484
+
+#define mmDCORE1_MME_CTRL_LO_SBTE 0x42CB488
+
+#define mmDCORE1_MME_CTRL_LO_AGU_SM_INFLIGHT_CNTR 0x42CB48C
+
+#define mmDCORE1_MME_CTRL_LO_AGU_SM_TOTAL_CNTR 0x42CB490
+
+#define mmDCORE1_MME_CTRL_LO_PCU_RL_SAT_SEC 0x42CB494
+
+#define mmDCORE1_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN32 0x42CB498
+
+#define mmDCORE1_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN33 0x42CB49C
+
+#define mmDCORE1_MME_CTRL_LO_EU_ISOLATION_DIS 0x42CB4A0
+
+#define mmDCORE1_MME_CTRL_LO_QM_SLV_CLK_EN 0x42CB4A4
+
+#define mmDCORE1_MME_CTRL_LO_HBW_CLK_ENABLER_DIS 0x42CB4A8
+
+#define mmDCORE1_MME_CTRL_LO_AGU 0x42CB4AC
+
+#define mmDCORE1_MME_CTRL_LO_QM 0x42CB4B0
+
+#define mmDCORE1_MME_CTRL_LO_EARLY_RELEASE_STATUS 0x42CB4B4
+
+#define mmDCORE1_MME_CTRL_LO_INTR_CAUSE 0x42CB4B8
+
+#define mmDCORE1_MME_CTRL_LO_INTR_MASK 0x42CB4BC
+
+#define mmDCORE1_MME_CTRL_LO_INTR_CLEAR 0x42CB4C0
+
+#define mmDCORE1_MME_CTRL_LO_REDUN_PSOC_SEL_SEC 0x42CB4C4
+
+#define mmDCORE1_MME_CTRL_LO_BIST 0x42CB4C8
+
+#define mmDCORE1_MME_CTRL_LO_EU_RL_ENABLE 0x42CB4CC
+
+#define mmDCORE1_MME_CTRL_LO_EU_RL_TOKEN_SEL 0x42CB4D0
+
+#define mmDCORE1_MME_CTRL_LO_EU_RL_CFG 0x42CB4D4
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DBG_DW0 0x42CB4D8
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DBG_DW1 0x42CB4DC
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DBG_DW2 0x42CB4E0
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DBG_DW3 0x42CB4E4
+
+#define mmDCORE1_MME_CTRL_LO_PCU_DBG_WKL_ID 0x42CB4E8
+
+#define mmDCORE1_MME_CTRL_LO_ETF_MEM_WRAP_RM 0x42CB4EC
+
+#endif /* ASIC_REG_DCORE1_MME_CTRL_LO_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h
new file mode 100644
index 000000000000..b06469f5a279
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE3_MME_CTRL_LO_REGS_H_
+#define ASIC_REG_DCORE3_MME_CTRL_LO_REGS_H_
+
+/*
+ *****************************************
+ * DCORE3_MME_CTRL_LO
+ * (Prototype: MME_CTRL_LO)
+ *****************************************
+ */
+
+#define mmDCORE3_MME_CTRL_LO_ARCH_STATUS 0x46CB000
+
+#define mmDCORE3_MME_CTRL_LO_CMD 0x46CB004
+
+#define mmDCORE3_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0 0x46CB148
+
+#define mmDCORE3_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0 0x46CB14C
+
+#define mmDCORE3_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0 0x46CB150
+
+#define mmDCORE3_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR1 0x46CB154
+
+#define mmDCORE3_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1 0x46CB158
+
+#define mmDCORE3_MME_CTRL_LO_ARCH_A_SS 0x46CB224
+
+#define mmDCORE3_MME_CTRL_LO_ARCH_B_SS 0x46CB228
+
+#define mmDCORE3_MME_CTRL_LO_ARCH_COUT_SS 0x46CB27C
+
+#define mmDCORE3_MME_CTRL_LO_QM_STALL 0x46CB400
+
+#define mmDCORE3_MME_CTRL_LO_LOG_SHADOW_LO 0x46CB404
+
+#define mmDCORE3_MME_CTRL_LO_LOG_SHADOW_HI 0x46CB408
+
+#define mmDCORE3_MME_CTRL_LO_SYNC_OBJECT_FIFO_TH 0x46CB40C
+
+#define mmDCORE3_MME_CTRL_LO_REDUN 0x46CB410
+
+#define mmDCORE3_MME_CTRL_LO_EUS_LOCAL_FIFO_TH 0x46CB414
+
+#define mmDCORE3_MME_CTRL_LO_EUS_ROLLUP_DLY_DW0 0x46CB418
+
+#define mmDCORE3_MME_CTRL_LO_EUS_ROLLUP_DLY_DW1 0x46CB41C
+
+#define mmDCORE3_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F16 0x46CB420
+
+#define mmDCORE3_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_F8 0x46CB424
+
+#define mmDCORE3_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32 0x46CB428
+
+#define mmDCORE3_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_FP32I 0x46CB42C
+
+#define mmDCORE3_MME_CTRL_LO_EUS_ROLLUP_CD_PROT_TF32 0x46CB430
+
+#define mmDCORE3_MME_CTRL_LO_PCU_RL_DESC0 0x46CB434
+
+#define mmDCORE3_MME_CTRL_LO_PCU_RL_TOKEN_UPDATE 0x46CB438
+
+#define mmDCORE3_MME_CTRL_LO_PCU_RL_TH 0x46CB43C
+
+#define mmDCORE3_MME_CTRL_LO_PCU_RL_MIN 0x46CB440
+
+#define mmDCORE3_MME_CTRL_LO_PCU_RL_CTRL_EN 0x46CB444
+
+#define mmDCORE3_MME_CTRL_LO_PCU_RL_HISTORY_LOG_SIZE 0x46CB448
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_A_BF16 0x46CB44C
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_B_BF16 0x46CB450
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_A_FP16 0x46CB454
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_B_FP16 0x46CB458
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_F8 0x46CB45C
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_A_FP32_ODD 0x46CB460
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_A_FP32_EVEN 0x46CB464
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_B_FP32_ODD 0x46CB468
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_B_FP32_EVEN 0x46CB46C
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_A_TF32_ODD 0x46CB470
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_A_TF32_EVEN 0x46CB474
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_B_TF32_ODD 0x46CB478
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DUMMY_B_TF32_EVEN 0x46CB47C
+
+#define mmDCORE3_MME_CTRL_LO_PROT 0x46CB480
+
+#define mmDCORE3_MME_CTRL_LO_EU 0x46CB484
+
+#define mmDCORE3_MME_CTRL_LO_SBTE 0x46CB488
+
+#define mmDCORE3_MME_CTRL_LO_AGU_SM_INFLIGHT_CNTR 0x46CB48C
+
+#define mmDCORE3_MME_CTRL_LO_AGU_SM_TOTAL_CNTR 0x46CB490
+
+#define mmDCORE3_MME_CTRL_LO_PCU_RL_SAT_SEC 0x46CB494
+
+#define mmDCORE3_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN32 0x46CB498
+
+#define mmDCORE3_MME_CTRL_LO_FMA_FUNC_REDUN_CLK_EN33 0x46CB49C
+
+#define mmDCORE3_MME_CTRL_LO_EU_ISOLATION_DIS 0x46CB4A0
+
+#define mmDCORE3_MME_CTRL_LO_QM_SLV_CLK_EN 0x46CB4A4
+
+#define mmDCORE3_MME_CTRL_LO_HBW_CLK_ENABLER_DIS 0x46CB4A8
+
+#define mmDCORE3_MME_CTRL_LO_AGU 0x46CB4AC
+
+#define mmDCORE3_MME_CTRL_LO_QM 0x46CB4B0
+
+#define mmDCORE3_MME_CTRL_LO_EARLY_RELEASE_STATUS 0x46CB4B4
+
+#define mmDCORE3_MME_CTRL_LO_INTR_CAUSE 0x46CB4B8
+
+#define mmDCORE3_MME_CTRL_LO_INTR_MASK 0x46CB4BC
+
+#define mmDCORE3_MME_CTRL_LO_INTR_CLEAR 0x46CB4C0
+
+#define mmDCORE3_MME_CTRL_LO_REDUN_PSOC_SEL_SEC 0x46CB4C4
+
+#define mmDCORE3_MME_CTRL_LO_BIST 0x46CB4C8
+
+#define mmDCORE3_MME_CTRL_LO_EU_RL_ENABLE 0x46CB4CC
+
+#define mmDCORE3_MME_CTRL_LO_EU_RL_TOKEN_SEL 0x46CB4D0
+
+#define mmDCORE3_MME_CTRL_LO_EU_RL_CFG 0x46CB4D4
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DBG_DW0 0x46CB4D8
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DBG_DW1 0x46CB4DC
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DBG_DW2 0x46CB4E0
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DBG_DW3 0x46CB4E4
+
+#define mmDCORE3_MME_CTRL_LO_PCU_DBG_WKL_ID 0x46CB4E8
+
+#define mmDCORE3_MME_CTRL_LO_ETF_MEM_WRAP_RM 0x46CB4EC
+
+#endif /* ASIC_REG_DCORE3_MME_CTRL_LO_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h
new file mode 100644
index 000000000000..3caee4515ad6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h
@@ -0,0 +1,45067 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef GAUDI2_BLOCKS_LINUX_DRIVER_H_
+#define GAUDI2_BLOCKS_LINUX_DRIVER_H_
+
+#define mmDCORE0_TPC0_ROM_TABLE_BASE 0x0ull
+#define DCORE0_TPC0_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_ROM_TABLE_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_SPMU_BASE 0x1000ull
+#define DCORE0_TPC0_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_SPMU_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_ETF_BASE 0x2000ull
+#define DCORE0_TPC0_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_ETF_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_STM_BASE 0x3000ull
+#define DCORE0_TPC0_EML_STM_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_STM_SECTION 0x2000
+#define mmDCORE0_TPC0_EML_CTI_BASE 0x5000ull
+#define DCORE0_TPC0_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_CTI_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_FUNNEL_BASE 0x6000ull
+#define DCORE0_TPC0_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_BUSMON_0_BASE 0x7000ull
+#define DCORE0_TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_BUSMON_1_BASE 0x8000ull
+#define DCORE0_TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_BUSMON_2_BASE 0x9000ull
+#define DCORE0_TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_BUSMON_3_BASE 0xA000ull
+#define DCORE0_TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE0_TPC0_QM_ARC_RTT_BASE 0xB000ull
+#define DCORE0_TPC0_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_TPC0_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE0_TPC0_EML_CFG_BASE 0x40000ull
+#define DCORE0_TPC0_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_CFG_SECTION 0xE800
+#define mmDCORE0_TPC0_EML_CFG_SPECIAL_BASE 0x40E80ull
+#define DCORE0_TPC0_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC0_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x41000ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC0_EML_TPC_CFG_BASE 0x41000ull
+#define DCORE0_TPC0_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x41050ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x410A0ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x410F0ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x41140ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x41190ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x411E0ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x41230ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x41280ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x412D0ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x41320ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x41370ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x413C0ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x41410ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x41460ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x414B0ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x41500ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_CFG_KERNEL_BASE 0x41508ull
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC0_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_0_BASE 0x415DCull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_1_BASE 0x4162Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_2_BASE 0x4167Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_3_BASE 0x416CCull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_4_BASE 0x4171Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_5_BASE 0x4176Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_6_BASE 0x417BCull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_7_BASE 0x4180Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_8_BASE 0x4185Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_9_BASE 0x418ACull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_10_BASE 0x418FCull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_11_BASE 0x4194Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_12_BASE 0x4199Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_13_BASE 0x419ECull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_14_BASE 0x41A3Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_15_BASE 0x41A8Cull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x41ADCull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_CFG_QM_BASE 0x41AE4ull
+#define DCORE0_TPC0_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC0_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC0_EML_TPC_CFG_AXUSER_BASE 0x41E00ull
+#define DCORE0_TPC0_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_CFG_SPECIAL_BASE 0x41E80ull
+#define DCORE0_TPC0_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC0_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC0_EML_QM_DCCM_BASE 0x42000ull
+#define DCORE0_TPC0_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC0_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_QM_ARCAUX_BASE 0x4A000ull
+#define DCORE0_TPC0_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE0_TPC0_EML_QM_ARCAUX_SPECIAL_BASE 0x4AE80ull
+#define DCORE0_TPC0_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC0_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC0_EML_TPC_QM_BASE 0x4C000ull
+#define DCORE0_TPC0_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x4C900ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x4C908ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x4C910ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x4C918ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x4C920ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x4C928ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x4C930ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x4C938ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x4C940ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x4C948ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x4C950ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x4C958ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x4C960ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x4C968ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x4C970ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x4C978ull
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC0_EML_TPC_QM_AXUSER_SECURED_BASE 0x4CB00ull
+#define DCORE0_TPC0_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x4CB80ull
+#define DCORE0_TPC0_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_DBG_HBW_BASE 0x4CC00ull
+#define DCORE0_TPC0_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC0_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC0_EML_TPC_QM_DBG_LBW_BASE 0x4CC80ull
+#define DCORE0_TPC0_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC0_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_TPC_QM_CGM_BASE 0x4CD80ull
+#define DCORE0_TPC0_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC0_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC0_EML_TPC_QM_SPECIAL_BASE 0x4CE80ull
+#define DCORE0_TPC0_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC0_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE0_TPC0_EML_CS_BASE 0x1FF000ull
+#define DCORE0_TPC0_EML_CS_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_EML_CS_SECTION 0x1000
+#define mmDCORE0_TPC1_ROM_TABLE_BASE 0x200000ull
+#define DCORE0_TPC1_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_ROM_TABLE_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_SPMU_BASE 0x201000ull
+#define DCORE0_TPC1_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_SPMU_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_ETF_BASE 0x202000ull
+#define DCORE0_TPC1_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_ETF_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_STM_BASE 0x203000ull
+#define DCORE0_TPC1_EML_STM_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_STM_SECTION 0x2000
+#define mmDCORE0_TPC1_EML_CTI_BASE 0x205000ull
+#define DCORE0_TPC1_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_CTI_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_FUNNEL_BASE 0x206000ull
+#define DCORE0_TPC1_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_BUSMON_0_BASE 0x207000ull
+#define DCORE0_TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_BUSMON_1_BASE 0x208000ull
+#define DCORE0_TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_BUSMON_2_BASE 0x209000ull
+#define DCORE0_TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_BUSMON_3_BASE 0x20A000ull
+#define DCORE0_TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE0_TPC1_QM_ARC_RTT_BASE 0x20B000ull
+#define DCORE0_TPC1_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_TPC1_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE0_TPC1_EML_CFG_BASE 0x240000ull
+#define DCORE0_TPC1_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_CFG_SECTION 0xE800
+#define mmDCORE0_TPC1_EML_CFG_SPECIAL_BASE 0x240E80ull
+#define DCORE0_TPC1_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC1_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x241000ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC1_EML_TPC_CFG_BASE 0x241000ull
+#define DCORE0_TPC1_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x241050ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x2410A0ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x2410F0ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x241140ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x241190ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x2411E0ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x241230ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x241280ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x2412D0ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x241320ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x241370ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x2413C0ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x241410ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x241460ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x2414B0ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x241500ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_CFG_KERNEL_BASE 0x241508ull
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC1_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_0_BASE 0x2415DCull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_1_BASE 0x24162Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_2_BASE 0x24167Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_3_BASE 0x2416CCull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_4_BASE 0x24171Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_5_BASE 0x24176Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_6_BASE 0x2417BCull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_7_BASE 0x24180Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_8_BASE 0x24185Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_9_BASE 0x2418ACull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_10_BASE 0x2418FCull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_11_BASE 0x24194Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_12_BASE 0x24199Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_13_BASE 0x2419ECull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_14_BASE 0x241A3Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_15_BASE 0x241A8Cull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x241ADCull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_CFG_QM_BASE 0x241AE4ull
+#define DCORE0_TPC1_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC1_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC1_EML_TPC_CFG_AXUSER_BASE 0x241E00ull
+#define DCORE0_TPC1_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_CFG_SPECIAL_BASE 0x241E80ull
+#define DCORE0_TPC1_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC1_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC1_EML_QM_DCCM_BASE 0x242000ull
+#define DCORE0_TPC1_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC1_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_QM_ARCAUX_BASE 0x24A000ull
+#define DCORE0_TPC1_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE0_TPC1_EML_QM_ARCAUX_SPECIAL_BASE 0x24AE80ull
+#define DCORE0_TPC1_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC1_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC1_EML_TPC_QM_BASE 0x24C000ull
+#define DCORE0_TPC1_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x24C900ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x24C908ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x24C910ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x24C918ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x24C920ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x24C928ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x24C930ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x24C938ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x24C940ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x24C948ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x24C950ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x24C958ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x24C960ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x24C968ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x24C970ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x24C978ull
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC1_EML_TPC_QM_AXUSER_SECURED_BASE 0x24CB00ull
+#define DCORE0_TPC1_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x24CB80ull
+#define DCORE0_TPC1_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_DBG_HBW_BASE 0x24CC00ull
+#define DCORE0_TPC1_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC1_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC1_EML_TPC_QM_DBG_LBW_BASE 0x24CC80ull
+#define DCORE0_TPC1_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC1_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_TPC_QM_CGM_BASE 0x24CD80ull
+#define DCORE0_TPC1_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC1_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC1_EML_TPC_QM_SPECIAL_BASE 0x24CE80ull
+#define DCORE0_TPC1_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC1_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE0_TPC1_EML_CS_BASE 0x3FF000ull
+#define DCORE0_TPC1_EML_CS_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_EML_CS_SECTION 0x1000
+#define mmDCORE0_TPC2_ROM_TABLE_BASE 0x400000ull
+#define DCORE0_TPC2_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_ROM_TABLE_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_SPMU_BASE 0x401000ull
+#define DCORE0_TPC2_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_SPMU_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_ETF_BASE 0x402000ull
+#define DCORE0_TPC2_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_ETF_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_STM_BASE 0x403000ull
+#define DCORE0_TPC2_EML_STM_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_STM_SECTION 0x2000
+#define mmDCORE0_TPC2_EML_CTI_BASE 0x405000ull
+#define DCORE0_TPC2_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_CTI_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_FUNNEL_BASE 0x406000ull
+#define DCORE0_TPC2_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_BUSMON_0_BASE 0x407000ull
+#define DCORE0_TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_BUSMON_1_BASE 0x408000ull
+#define DCORE0_TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_BUSMON_2_BASE 0x409000ull
+#define DCORE0_TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_BUSMON_3_BASE 0x40A000ull
+#define DCORE0_TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE0_TPC2_QM_ARC_RTT_BASE 0x40B000ull
+#define DCORE0_TPC2_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_TPC2_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE0_TPC2_EML_CFG_BASE 0x440000ull
+#define DCORE0_TPC2_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_CFG_SECTION 0xE800
+#define mmDCORE0_TPC2_EML_CFG_SPECIAL_BASE 0x440E80ull
+#define DCORE0_TPC2_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC2_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x441000ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC2_EML_TPC_CFG_BASE 0x441000ull
+#define DCORE0_TPC2_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x441050ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x4410A0ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x4410F0ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x441140ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x441190ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x4411E0ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x441230ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x441280ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x4412D0ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x441320ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x441370ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x4413C0ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x441410ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x441460ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x4414B0ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x441500ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_CFG_KERNEL_BASE 0x441508ull
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC2_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_0_BASE 0x4415DCull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_1_BASE 0x44162Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_2_BASE 0x44167Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_3_BASE 0x4416CCull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_4_BASE 0x44171Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_5_BASE 0x44176Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_6_BASE 0x4417BCull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_7_BASE 0x44180Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_8_BASE 0x44185Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_9_BASE 0x4418ACull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_10_BASE 0x4418FCull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_11_BASE 0x44194Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_12_BASE 0x44199Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_13_BASE 0x4419ECull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_14_BASE 0x441A3Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_15_BASE 0x441A8Cull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x441ADCull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_CFG_QM_BASE 0x441AE4ull
+#define DCORE0_TPC2_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC2_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC2_EML_TPC_CFG_AXUSER_BASE 0x441E00ull
+#define DCORE0_TPC2_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_CFG_SPECIAL_BASE 0x441E80ull
+#define DCORE0_TPC2_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC2_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC2_EML_QM_DCCM_BASE 0x442000ull
+#define DCORE0_TPC2_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC2_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_QM_ARCAUX_BASE 0x44A000ull
+#define DCORE0_TPC2_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE0_TPC2_EML_QM_ARCAUX_SPECIAL_BASE 0x44AE80ull
+#define DCORE0_TPC2_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC2_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC2_EML_TPC_QM_BASE 0x44C000ull
+#define DCORE0_TPC2_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x44C900ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x44C908ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x44C910ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x44C918ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x44C920ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x44C928ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x44C930ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x44C938ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x44C940ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x44C948ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x44C950ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x44C958ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x44C960ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x44C968ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x44C970ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x44C978ull
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC2_EML_TPC_QM_AXUSER_SECURED_BASE 0x44CB00ull
+#define DCORE0_TPC2_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x44CB80ull
+#define DCORE0_TPC2_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_DBG_HBW_BASE 0x44CC00ull
+#define DCORE0_TPC2_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC2_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC2_EML_TPC_QM_DBG_LBW_BASE 0x44CC80ull
+#define DCORE0_TPC2_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC2_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_TPC_QM_CGM_BASE 0x44CD80ull
+#define DCORE0_TPC2_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC2_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC2_EML_TPC_QM_SPECIAL_BASE 0x44CE80ull
+#define DCORE0_TPC2_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC2_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE0_TPC2_EML_CS_BASE 0x5FF000ull
+#define DCORE0_TPC2_EML_CS_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_EML_CS_SECTION 0x1000
+#define mmDCORE0_TPC3_ROM_TABLE_BASE 0x600000ull
+#define DCORE0_TPC3_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_ROM_TABLE_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_SPMU_BASE 0x601000ull
+#define DCORE0_TPC3_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_SPMU_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_ETF_BASE 0x602000ull
+#define DCORE0_TPC3_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_ETF_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_STM_BASE 0x603000ull
+#define DCORE0_TPC3_EML_STM_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_STM_SECTION 0x2000
+#define mmDCORE0_TPC3_EML_CTI_BASE 0x605000ull
+#define DCORE0_TPC3_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_CTI_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_FUNNEL_BASE 0x606000ull
+#define DCORE0_TPC3_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_BUSMON_0_BASE 0x607000ull
+#define DCORE0_TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_BUSMON_1_BASE 0x608000ull
+#define DCORE0_TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_BUSMON_2_BASE 0x609000ull
+#define DCORE0_TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_BUSMON_3_BASE 0x60A000ull
+#define DCORE0_TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE0_TPC3_QM_ARC_RTT_BASE 0x60B000ull
+#define DCORE0_TPC3_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_TPC3_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE0_TPC3_EML_CFG_BASE 0x640000ull
+#define DCORE0_TPC3_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_CFG_SECTION 0xE800
+#define mmDCORE0_TPC3_EML_CFG_SPECIAL_BASE 0x640E80ull
+#define DCORE0_TPC3_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC3_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x641000ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC3_EML_TPC_CFG_BASE 0x641000ull
+#define DCORE0_TPC3_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x641050ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x6410A0ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x6410F0ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x641140ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x641190ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x6411E0ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x641230ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x641280ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x6412D0ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x641320ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x641370ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x6413C0ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x641410ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x641460ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x6414B0ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x641500ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_CFG_KERNEL_BASE 0x641508ull
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC3_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_0_BASE 0x6415DCull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_1_BASE 0x64162Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_2_BASE 0x64167Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_3_BASE 0x6416CCull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_4_BASE 0x64171Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_5_BASE 0x64176Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_6_BASE 0x6417BCull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_7_BASE 0x64180Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_8_BASE 0x64185Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_9_BASE 0x6418ACull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_10_BASE 0x6418FCull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_11_BASE 0x64194Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_12_BASE 0x64199Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_13_BASE 0x6419ECull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_14_BASE 0x641A3Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_15_BASE 0x641A8Cull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x641ADCull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_CFG_QM_BASE 0x641AE4ull
+#define DCORE0_TPC3_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC3_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC3_EML_TPC_CFG_AXUSER_BASE 0x641E00ull
+#define DCORE0_TPC3_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_CFG_SPECIAL_BASE 0x641E80ull
+#define DCORE0_TPC3_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC3_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC3_EML_QM_DCCM_BASE 0x642000ull
+#define DCORE0_TPC3_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC3_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_QM_ARCAUX_BASE 0x64A000ull
+#define DCORE0_TPC3_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE0_TPC3_EML_QM_ARCAUX_SPECIAL_BASE 0x64AE80ull
+#define DCORE0_TPC3_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC3_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC3_EML_TPC_QM_BASE 0x64C000ull
+#define DCORE0_TPC3_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x64C900ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x64C908ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x64C910ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x64C918ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x64C920ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x64C928ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x64C930ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x64C938ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x64C940ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x64C948ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x64C950ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x64C958ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x64C960ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x64C968ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x64C970ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x64C978ull
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC3_EML_TPC_QM_AXUSER_SECURED_BASE 0x64CB00ull
+#define DCORE0_TPC3_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x64CB80ull
+#define DCORE0_TPC3_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_DBG_HBW_BASE 0x64CC00ull
+#define DCORE0_TPC3_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC3_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC3_EML_TPC_QM_DBG_LBW_BASE 0x64CC80ull
+#define DCORE0_TPC3_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC3_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_TPC_QM_CGM_BASE 0x64CD80ull
+#define DCORE0_TPC3_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC3_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC3_EML_TPC_QM_SPECIAL_BASE 0x64CE80ull
+#define DCORE0_TPC3_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC3_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE0_TPC3_EML_CS_BASE 0x7FF000ull
+#define DCORE0_TPC3_EML_CS_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_EML_CS_SECTION 0x1000
+#define mmDCORE0_TPC4_ROM_TABLE_BASE 0x800000ull
+#define DCORE0_TPC4_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_ROM_TABLE_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_SPMU_BASE 0x801000ull
+#define DCORE0_TPC4_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_SPMU_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_ETF_BASE 0x802000ull
+#define DCORE0_TPC4_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_ETF_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_STM_BASE 0x803000ull
+#define DCORE0_TPC4_EML_STM_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_STM_SECTION 0x2000
+#define mmDCORE0_TPC4_EML_CTI_BASE 0x805000ull
+#define DCORE0_TPC4_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_CTI_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_FUNNEL_BASE 0x806000ull
+#define DCORE0_TPC4_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_BUSMON_0_BASE 0x807000ull
+#define DCORE0_TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_BUSMON_1_BASE 0x808000ull
+#define DCORE0_TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_BUSMON_2_BASE 0x809000ull
+#define DCORE0_TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_BUSMON_3_BASE 0x80A000ull
+#define DCORE0_TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE0_TPC4_QM_ARC_RTT_BASE 0x80B000ull
+#define DCORE0_TPC4_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_TPC4_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE0_TPC4_EML_CFG_BASE 0x840000ull
+#define DCORE0_TPC4_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_CFG_SECTION 0xE800
+#define mmDCORE0_TPC4_EML_CFG_SPECIAL_BASE 0x840E80ull
+#define DCORE0_TPC4_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC4_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x841000ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC4_EML_TPC_CFG_BASE 0x841000ull
+#define DCORE0_TPC4_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x841050ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x8410A0ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x8410F0ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x841140ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x841190ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x8411E0ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x841230ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x841280ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x8412D0ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x841320ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x841370ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x8413C0ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x841410ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x841460ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x8414B0ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x841500ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_CFG_KERNEL_BASE 0x841508ull
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC4_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_0_BASE 0x8415DCull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_1_BASE 0x84162Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_2_BASE 0x84167Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_3_BASE 0x8416CCull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_4_BASE 0x84171Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_5_BASE 0x84176Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_6_BASE 0x8417BCull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_7_BASE 0x84180Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_8_BASE 0x84185Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_9_BASE 0x8418ACull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_10_BASE 0x8418FCull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_11_BASE 0x84194Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_12_BASE 0x84199Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_13_BASE 0x8419ECull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_14_BASE 0x841A3Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_15_BASE 0x841A8Cull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x841ADCull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_CFG_QM_BASE 0x841AE4ull
+#define DCORE0_TPC4_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC4_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC4_EML_TPC_CFG_AXUSER_BASE 0x841E00ull
+#define DCORE0_TPC4_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_CFG_SPECIAL_BASE 0x841E80ull
+#define DCORE0_TPC4_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC4_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC4_EML_QM_DCCM_BASE 0x842000ull
+#define DCORE0_TPC4_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC4_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_QM_ARCAUX_BASE 0x84A000ull
+#define DCORE0_TPC4_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE0_TPC4_EML_QM_ARCAUX_SPECIAL_BASE 0x84AE80ull
+#define DCORE0_TPC4_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC4_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC4_EML_TPC_QM_BASE 0x84C000ull
+#define DCORE0_TPC4_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x84C900ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x84C908ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x84C910ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x84C918ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x84C920ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x84C928ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x84C930ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x84C938ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x84C940ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x84C948ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x84C950ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x84C958ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x84C960ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x84C968ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x84C970ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x84C978ull
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC4_EML_TPC_QM_AXUSER_SECURED_BASE 0x84CB00ull
+#define DCORE0_TPC4_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x84CB80ull
+#define DCORE0_TPC4_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_DBG_HBW_BASE 0x84CC00ull
+#define DCORE0_TPC4_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC4_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC4_EML_TPC_QM_DBG_LBW_BASE 0x84CC80ull
+#define DCORE0_TPC4_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC4_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_TPC_QM_CGM_BASE 0x84CD80ull
+#define DCORE0_TPC4_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC4_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC4_EML_TPC_QM_SPECIAL_BASE 0x84CE80ull
+#define DCORE0_TPC4_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC4_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE0_TPC4_EML_CS_BASE 0x9FF000ull
+#define DCORE0_TPC4_EML_CS_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_EML_CS_SECTION 0x1000
+#define mmDCORE0_TPC5_ROM_TABLE_BASE 0xA00000ull
+#define DCORE0_TPC5_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_ROM_TABLE_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_SPMU_BASE 0xA01000ull
+#define DCORE0_TPC5_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_SPMU_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_ETF_BASE 0xA02000ull
+#define DCORE0_TPC5_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_ETF_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_STM_BASE 0xA03000ull
+#define DCORE0_TPC5_EML_STM_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_STM_SECTION 0x2000
+#define mmDCORE0_TPC5_EML_CTI_BASE 0xA05000ull
+#define DCORE0_TPC5_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_CTI_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_FUNNEL_BASE 0xA06000ull
+#define DCORE0_TPC5_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_BUSMON_0_BASE 0xA07000ull
+#define DCORE0_TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_BUSMON_1_BASE 0xA08000ull
+#define DCORE0_TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_BUSMON_2_BASE 0xA09000ull
+#define DCORE0_TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_BUSMON_3_BASE 0xA0A000ull
+#define DCORE0_TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE0_TPC5_QM_ARC_RTT_BASE 0xA0B000ull
+#define DCORE0_TPC5_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_TPC5_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE0_TPC5_EML_CFG_BASE 0xA40000ull
+#define DCORE0_TPC5_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_CFG_SECTION 0xE800
+#define mmDCORE0_TPC5_EML_CFG_SPECIAL_BASE 0xA40E80ull
+#define DCORE0_TPC5_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC5_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0xA41000ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC5_EML_TPC_CFG_BASE 0xA41000ull
+#define DCORE0_TPC5_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0xA41050ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0xA410A0ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0xA410F0ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0xA41140ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0xA41190ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0xA411E0ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0xA41230ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0xA41280ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0xA412D0ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0xA41320ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0xA41370ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0xA413C0ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0xA41410ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0xA41460ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0xA414B0ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0xA41500ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_CFG_KERNEL_BASE 0xA41508ull
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC5_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_0_BASE 0xA415DCull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_1_BASE 0xA4162Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_2_BASE 0xA4167Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_3_BASE 0xA416CCull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_4_BASE 0xA4171Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_5_BASE 0xA4176Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_6_BASE 0xA417BCull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_7_BASE 0xA4180Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_8_BASE 0xA4185Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_9_BASE 0xA418ACull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_10_BASE 0xA418FCull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_11_BASE 0xA4194Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_12_BASE 0xA4199Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_13_BASE 0xA419ECull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_14_BASE 0xA41A3Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_15_BASE 0xA41A8Cull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0xA41ADCull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_CFG_QM_BASE 0xA41AE4ull
+#define DCORE0_TPC5_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC5_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC5_EML_TPC_CFG_AXUSER_BASE 0xA41E00ull
+#define DCORE0_TPC5_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_CFG_SPECIAL_BASE 0xA41E80ull
+#define DCORE0_TPC5_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC5_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC5_EML_QM_DCCM_BASE 0xA42000ull
+#define DCORE0_TPC5_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC5_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_QM_ARCAUX_BASE 0xA4A000ull
+#define DCORE0_TPC5_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE0_TPC5_EML_QM_ARCAUX_SPECIAL_BASE 0xA4AE80ull
+#define DCORE0_TPC5_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC5_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC5_EML_TPC_QM_BASE 0xA4C000ull
+#define DCORE0_TPC5_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0xA4C900ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0xA4C908ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0xA4C910ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0xA4C918ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0xA4C920ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0xA4C928ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0xA4C930ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0xA4C938ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0xA4C940ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0xA4C948ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0xA4C950ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0xA4C958ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0xA4C960ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0xA4C968ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0xA4C970ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0xA4C978ull
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC5_EML_TPC_QM_AXUSER_SECURED_BASE 0xA4CB00ull
+#define DCORE0_TPC5_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_AXUSER_NONSECURED_BASE 0xA4CB80ull
+#define DCORE0_TPC5_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_DBG_HBW_BASE 0xA4CC00ull
+#define DCORE0_TPC5_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC5_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC5_EML_TPC_QM_DBG_LBW_BASE 0xA4CC80ull
+#define DCORE0_TPC5_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC5_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_TPC_QM_CGM_BASE 0xA4CD80ull
+#define DCORE0_TPC5_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC5_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC5_EML_TPC_QM_SPECIAL_BASE 0xA4CE80ull
+#define DCORE0_TPC5_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC5_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE0_TPC5_EML_CS_BASE 0xBFF000ull
+#define DCORE0_TPC5_EML_CS_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_EML_CS_SECTION 0x1000
+#define mmDCORE0_TPC6_ROM_TABLE_BASE 0xC00000ull
+#define DCORE0_TPC6_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_ROM_TABLE_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_SPMU_BASE 0xC01000ull
+#define DCORE0_TPC6_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_SPMU_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_ETF_BASE 0xC02000ull
+#define DCORE0_TPC6_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_ETF_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_STM_BASE 0xC03000ull
+#define DCORE0_TPC6_EML_STM_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_STM_SECTION 0x2000
+#define mmDCORE0_TPC6_EML_CTI_BASE 0xC05000ull
+#define DCORE0_TPC6_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_CTI_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_FUNNEL_BASE 0xC06000ull
+#define DCORE0_TPC6_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_BUSMON_0_BASE 0xC07000ull
+#define DCORE0_TPC6_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_BUSMON_1_BASE 0xC08000ull
+#define DCORE0_TPC6_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_BUSMON_2_BASE 0xC09000ull
+#define DCORE0_TPC6_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_BUSMON_3_BASE 0xC0A000ull
+#define DCORE0_TPC6_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE0_TPC6_QM_ARC_RTT_BASE 0xC0B000ull
+#define DCORE0_TPC6_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_TPC6_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE0_TPC6_EML_CFG_BASE 0xC40000ull
+#define DCORE0_TPC6_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_CFG_SECTION 0xE800
+#define mmDCORE0_TPC6_EML_CFG_SPECIAL_BASE 0xC40E80ull
+#define DCORE0_TPC6_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC6_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0xC41000ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC6_EML_TPC_CFG_BASE 0xC41000ull
+#define DCORE0_TPC6_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0xC41050ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0xC410A0ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0xC410F0ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0xC41140ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0xC41190ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0xC411E0ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0xC41230ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0xC41280ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0xC412D0ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0xC41320ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0xC41370ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0xC413C0ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0xC41410ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0xC41460ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0xC414B0ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0xC41500ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_CFG_KERNEL_BASE 0xC41508ull
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC6_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_0_BASE 0xC415DCull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_1_BASE 0xC4162Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_2_BASE 0xC4167Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_3_BASE 0xC416CCull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_4_BASE 0xC4171Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_5_BASE 0xC4176Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_6_BASE 0xC417BCull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_7_BASE 0xC4180Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_8_BASE 0xC4185Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_9_BASE 0xC418ACull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_10_BASE 0xC418FCull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_11_BASE 0xC4194Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_12_BASE 0xC4199Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_13_BASE 0xC419ECull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_14_BASE 0xC41A3Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_15_BASE 0xC41A8Cull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0xC41ADCull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_CFG_QM_BASE 0xC41AE4ull
+#define DCORE0_TPC6_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC6_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC6_EML_TPC_CFG_AXUSER_BASE 0xC41E00ull
+#define DCORE0_TPC6_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_CFG_SPECIAL_BASE 0xC41E80ull
+#define DCORE0_TPC6_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC6_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC6_EML_QM_DCCM_BASE 0xC42000ull
+#define DCORE0_TPC6_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC6_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_QM_ARCAUX_BASE 0xC4A000ull
+#define DCORE0_TPC6_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE0_TPC6_EML_QM_ARCAUX_SPECIAL_BASE 0xC4AE80ull
+#define DCORE0_TPC6_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC6_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC6_EML_TPC_QM_BASE 0xC4C000ull
+#define DCORE0_TPC6_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0xC4C900ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0xC4C908ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0xC4C910ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0xC4C918ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0xC4C920ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0xC4C928ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0xC4C930ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0xC4C938ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0xC4C940ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0xC4C948ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0xC4C950ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0xC4C958ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0xC4C960ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0xC4C968ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0xC4C970ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0xC4C978ull
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC6_EML_TPC_QM_AXUSER_SECURED_BASE 0xC4CB00ull
+#define DCORE0_TPC6_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_AXUSER_NONSECURED_BASE 0xC4CB80ull
+#define DCORE0_TPC6_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_DBG_HBW_BASE 0xC4CC00ull
+#define DCORE0_TPC6_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC6_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC6_EML_TPC_QM_DBG_LBW_BASE 0xC4CC80ull
+#define DCORE0_TPC6_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC6_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_TPC_QM_CGM_BASE 0xC4CD80ull
+#define DCORE0_TPC6_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC6_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC6_EML_TPC_QM_SPECIAL_BASE 0xC4CE80ull
+#define DCORE0_TPC6_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC6_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE0_TPC6_EML_CS_BASE 0xDFF000ull
+#define DCORE0_TPC6_EML_CS_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_EML_CS_SECTION 0x201000
+#define mmDCORE1_TPC0_ROM_TABLE_BASE 0x1000000ull
+#define DCORE1_TPC0_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_ROM_TABLE_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_SPMU_BASE 0x1001000ull
+#define DCORE1_TPC0_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_SPMU_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_ETF_BASE 0x1002000ull
+#define DCORE1_TPC0_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_ETF_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_STM_BASE 0x1003000ull
+#define DCORE1_TPC0_EML_STM_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_STM_SECTION 0x2000
+#define mmDCORE1_TPC0_EML_CTI_BASE 0x1005000ull
+#define DCORE1_TPC0_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_CTI_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_FUNNEL_BASE 0x1006000ull
+#define DCORE1_TPC0_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_BUSMON_0_BASE 0x1007000ull
+#define DCORE1_TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_BUSMON_1_BASE 0x1008000ull
+#define DCORE1_TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_BUSMON_2_BASE 0x1009000ull
+#define DCORE1_TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_BUSMON_3_BASE 0x100A000ull
+#define DCORE1_TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE1_TPC0_QM_ARC_RTT_BASE 0x100B000ull
+#define DCORE1_TPC0_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE1_TPC0_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE1_TPC0_EML_CFG_BASE 0x1040000ull
+#define DCORE1_TPC0_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_CFG_SECTION 0xE800
+#define mmDCORE1_TPC0_EML_CFG_SPECIAL_BASE 0x1040E80ull
+#define DCORE1_TPC0_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC0_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x1041000ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC0_EML_TPC_CFG_BASE 0x1041000ull
+#define DCORE1_TPC0_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x1041050ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x10410A0ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x10410F0ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x1041140ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x1041190ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x10411E0ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x1041230ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x1041280ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x10412D0ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x1041320ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x1041370ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x10413C0ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x1041410ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x1041460ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x10414B0ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x1041500ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_CFG_KERNEL_BASE 0x1041508ull
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC0_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_0_BASE 0x10415DCull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_1_BASE 0x104162Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_2_BASE 0x104167Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_3_BASE 0x10416CCull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_4_BASE 0x104171Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_5_BASE 0x104176Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_6_BASE 0x10417BCull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_7_BASE 0x104180Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_8_BASE 0x104185Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_9_BASE 0x10418ACull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_10_BASE 0x10418FCull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_11_BASE 0x104194Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_12_BASE 0x104199Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_13_BASE 0x10419ECull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_14_BASE 0x1041A3Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_15_BASE 0x1041A8Cull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x1041ADCull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_CFG_QM_BASE 0x1041AE4ull
+#define DCORE1_TPC0_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC0_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC0_EML_TPC_CFG_AXUSER_BASE 0x1041E00ull
+#define DCORE1_TPC0_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_CFG_SPECIAL_BASE 0x1041E80ull
+#define DCORE1_TPC0_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC0_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC0_EML_QM_DCCM_BASE 0x1042000ull
+#define DCORE1_TPC0_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC0_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_QM_ARCAUX_BASE 0x104A000ull
+#define DCORE1_TPC0_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE1_TPC0_EML_QM_ARCAUX_SPECIAL_BASE 0x104AE80ull
+#define DCORE1_TPC0_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC0_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC0_EML_TPC_QM_BASE 0x104C000ull
+#define DCORE1_TPC0_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x104C900ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x104C908ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x104C910ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x104C918ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x104C920ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x104C928ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x104C930ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x104C938ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x104C940ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x104C948ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x104C950ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x104C958ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x104C960ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x104C968ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x104C970ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x104C978ull
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC0_EML_TPC_QM_AXUSER_SECURED_BASE 0x104CB00ull
+#define DCORE1_TPC0_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x104CB80ull
+#define DCORE1_TPC0_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_DBG_HBW_BASE 0x104CC00ull
+#define DCORE1_TPC0_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC0_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC0_EML_TPC_QM_DBG_LBW_BASE 0x104CC80ull
+#define DCORE1_TPC0_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC0_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_TPC_QM_CGM_BASE 0x104CD80ull
+#define DCORE1_TPC0_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC0_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC0_EML_TPC_QM_SPECIAL_BASE 0x104CE80ull
+#define DCORE1_TPC0_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC0_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE1_TPC0_EML_CS_BASE 0x11FF000ull
+#define DCORE1_TPC0_EML_CS_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_EML_CS_SECTION 0x1000
+#define mmDCORE1_TPC1_ROM_TABLE_BASE 0x1200000ull
+#define DCORE1_TPC1_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_ROM_TABLE_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_SPMU_BASE 0x1201000ull
+#define DCORE1_TPC1_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_SPMU_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_ETF_BASE 0x1202000ull
+#define DCORE1_TPC1_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_ETF_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_STM_BASE 0x1203000ull
+#define DCORE1_TPC1_EML_STM_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_STM_SECTION 0x2000
+#define mmDCORE1_TPC1_EML_CTI_BASE 0x1205000ull
+#define DCORE1_TPC1_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_CTI_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_FUNNEL_BASE 0x1206000ull
+#define DCORE1_TPC1_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_BUSMON_0_BASE 0x1207000ull
+#define DCORE1_TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_BUSMON_1_BASE 0x1208000ull
+#define DCORE1_TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_BUSMON_2_BASE 0x1209000ull
+#define DCORE1_TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_BUSMON_3_BASE 0x120A000ull
+#define DCORE1_TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE1_TPC1_QM_ARC_RTT_BASE 0x120B000ull
+#define DCORE1_TPC1_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE1_TPC1_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE1_TPC1_EML_CFG_BASE 0x1240000ull
+#define DCORE1_TPC1_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_CFG_SECTION 0xE800
+#define mmDCORE1_TPC1_EML_CFG_SPECIAL_BASE 0x1240E80ull
+#define DCORE1_TPC1_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC1_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x1241000ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC1_EML_TPC_CFG_BASE 0x1241000ull
+#define DCORE1_TPC1_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x1241050ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x12410A0ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x12410F0ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x1241140ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x1241190ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x12411E0ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x1241230ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x1241280ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x12412D0ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x1241320ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x1241370ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x12413C0ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x1241410ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x1241460ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x12414B0ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x1241500ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_CFG_KERNEL_BASE 0x1241508ull
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC1_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_0_BASE 0x12415DCull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_1_BASE 0x124162Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_2_BASE 0x124167Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_3_BASE 0x12416CCull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_4_BASE 0x124171Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_5_BASE 0x124176Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_6_BASE 0x12417BCull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_7_BASE 0x124180Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_8_BASE 0x124185Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_9_BASE 0x12418ACull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_10_BASE 0x12418FCull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_11_BASE 0x124194Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_12_BASE 0x124199Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_13_BASE 0x12419ECull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_14_BASE 0x1241A3Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_15_BASE 0x1241A8Cull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x1241ADCull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_CFG_QM_BASE 0x1241AE4ull
+#define DCORE1_TPC1_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC1_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC1_EML_TPC_CFG_AXUSER_BASE 0x1241E00ull
+#define DCORE1_TPC1_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_CFG_SPECIAL_BASE 0x1241E80ull
+#define DCORE1_TPC1_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC1_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC1_EML_QM_DCCM_BASE 0x1242000ull
+#define DCORE1_TPC1_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC1_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_QM_ARCAUX_BASE 0x124A000ull
+#define DCORE1_TPC1_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE1_TPC1_EML_QM_ARCAUX_SPECIAL_BASE 0x124AE80ull
+#define DCORE1_TPC1_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC1_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC1_EML_TPC_QM_BASE 0x124C000ull
+#define DCORE1_TPC1_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x124C900ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x124C908ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x124C910ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x124C918ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x124C920ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x124C928ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x124C930ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x124C938ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x124C940ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x124C948ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x124C950ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x124C958ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x124C960ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x124C968ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x124C970ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x124C978ull
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC1_EML_TPC_QM_AXUSER_SECURED_BASE 0x124CB00ull
+#define DCORE1_TPC1_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x124CB80ull
+#define DCORE1_TPC1_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_DBG_HBW_BASE 0x124CC00ull
+#define DCORE1_TPC1_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC1_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC1_EML_TPC_QM_DBG_LBW_BASE 0x124CC80ull
+#define DCORE1_TPC1_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC1_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_TPC_QM_CGM_BASE 0x124CD80ull
+#define DCORE1_TPC1_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC1_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC1_EML_TPC_QM_SPECIAL_BASE 0x124CE80ull
+#define DCORE1_TPC1_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC1_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE1_TPC1_EML_CS_BASE 0x13FF000ull
+#define DCORE1_TPC1_EML_CS_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_EML_CS_SECTION 0x1000
+#define mmDCORE1_TPC2_ROM_TABLE_BASE 0x1400000ull
+#define DCORE1_TPC2_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_ROM_TABLE_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_SPMU_BASE 0x1401000ull
+#define DCORE1_TPC2_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_SPMU_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_ETF_BASE 0x1402000ull
+#define DCORE1_TPC2_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_ETF_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_STM_BASE 0x1403000ull
+#define DCORE1_TPC2_EML_STM_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_STM_SECTION 0x2000
+#define mmDCORE1_TPC2_EML_CTI_BASE 0x1405000ull
+#define DCORE1_TPC2_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_CTI_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_FUNNEL_BASE 0x1406000ull
+#define DCORE1_TPC2_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_BUSMON_0_BASE 0x1407000ull
+#define DCORE1_TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_BUSMON_1_BASE 0x1408000ull
+#define DCORE1_TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_BUSMON_2_BASE 0x1409000ull
+#define DCORE1_TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_BUSMON_3_BASE 0x140A000ull
+#define DCORE1_TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE1_TPC2_QM_ARC_RTT_BASE 0x140B000ull
+#define DCORE1_TPC2_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE1_TPC2_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE1_TPC2_EML_CFG_BASE 0x1440000ull
+#define DCORE1_TPC2_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_CFG_SECTION 0xE800
+#define mmDCORE1_TPC2_EML_CFG_SPECIAL_BASE 0x1440E80ull
+#define DCORE1_TPC2_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC2_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x1441000ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC2_EML_TPC_CFG_BASE 0x1441000ull
+#define DCORE1_TPC2_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x1441050ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x14410A0ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x14410F0ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x1441140ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x1441190ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x14411E0ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x1441230ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x1441280ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x14412D0ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x1441320ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x1441370ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x14413C0ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x1441410ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x1441460ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x14414B0ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x1441500ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_CFG_KERNEL_BASE 0x1441508ull
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC2_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_0_BASE 0x14415DCull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_1_BASE 0x144162Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_2_BASE 0x144167Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_3_BASE 0x14416CCull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_4_BASE 0x144171Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_5_BASE 0x144176Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_6_BASE 0x14417BCull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_7_BASE 0x144180Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_8_BASE 0x144185Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_9_BASE 0x14418ACull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_10_BASE 0x14418FCull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_11_BASE 0x144194Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_12_BASE 0x144199Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_13_BASE 0x14419ECull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_14_BASE 0x1441A3Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_15_BASE 0x1441A8Cull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x1441ADCull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_CFG_QM_BASE 0x1441AE4ull
+#define DCORE1_TPC2_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC2_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC2_EML_TPC_CFG_AXUSER_BASE 0x1441E00ull
+#define DCORE1_TPC2_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_CFG_SPECIAL_BASE 0x1441E80ull
+#define DCORE1_TPC2_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC2_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC2_EML_QM_DCCM_BASE 0x1442000ull
+#define DCORE1_TPC2_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC2_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_QM_ARCAUX_BASE 0x144A000ull
+#define DCORE1_TPC2_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE1_TPC2_EML_QM_ARCAUX_SPECIAL_BASE 0x144AE80ull
+#define DCORE1_TPC2_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC2_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC2_EML_TPC_QM_BASE 0x144C000ull
+#define DCORE1_TPC2_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x144C900ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x144C908ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x144C910ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x144C918ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x144C920ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x144C928ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x144C930ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x144C938ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x144C940ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x144C948ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x144C950ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x144C958ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x144C960ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x144C968ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x144C970ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x144C978ull
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC2_EML_TPC_QM_AXUSER_SECURED_BASE 0x144CB00ull
+#define DCORE1_TPC2_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x144CB80ull
+#define DCORE1_TPC2_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_DBG_HBW_BASE 0x144CC00ull
+#define DCORE1_TPC2_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC2_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC2_EML_TPC_QM_DBG_LBW_BASE 0x144CC80ull
+#define DCORE1_TPC2_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC2_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_TPC_QM_CGM_BASE 0x144CD80ull
+#define DCORE1_TPC2_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC2_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC2_EML_TPC_QM_SPECIAL_BASE 0x144CE80ull
+#define DCORE1_TPC2_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC2_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE1_TPC2_EML_CS_BASE 0x15FF000ull
+#define DCORE1_TPC2_EML_CS_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_EML_CS_SECTION 0x1000
+#define mmDCORE1_TPC3_ROM_TABLE_BASE 0x1600000ull
+#define DCORE1_TPC3_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_ROM_TABLE_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_SPMU_BASE 0x1601000ull
+#define DCORE1_TPC3_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_SPMU_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_ETF_BASE 0x1602000ull
+#define DCORE1_TPC3_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_ETF_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_STM_BASE 0x1603000ull
+#define DCORE1_TPC3_EML_STM_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_STM_SECTION 0x2000
+#define mmDCORE1_TPC3_EML_CTI_BASE 0x1605000ull
+#define DCORE1_TPC3_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_CTI_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_FUNNEL_BASE 0x1606000ull
+#define DCORE1_TPC3_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_BUSMON_0_BASE 0x1607000ull
+#define DCORE1_TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_BUSMON_1_BASE 0x1608000ull
+#define DCORE1_TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_BUSMON_2_BASE 0x1609000ull
+#define DCORE1_TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_BUSMON_3_BASE 0x160A000ull
+#define DCORE1_TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE1_TPC3_QM_ARC_RTT_BASE 0x160B000ull
+#define DCORE1_TPC3_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE1_TPC3_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE1_TPC3_EML_CFG_BASE 0x1640000ull
+#define DCORE1_TPC3_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_CFG_SECTION 0xE800
+#define mmDCORE1_TPC3_EML_CFG_SPECIAL_BASE 0x1640E80ull
+#define DCORE1_TPC3_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC3_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x1641000ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC3_EML_TPC_CFG_BASE 0x1641000ull
+#define DCORE1_TPC3_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x1641050ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x16410A0ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x16410F0ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x1641140ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x1641190ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x16411E0ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x1641230ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x1641280ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x16412D0ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x1641320ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x1641370ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x16413C0ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x1641410ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x1641460ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x16414B0ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x1641500ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_CFG_KERNEL_BASE 0x1641508ull
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC3_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_0_BASE 0x16415DCull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_1_BASE 0x164162Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_2_BASE 0x164167Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_3_BASE 0x16416CCull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_4_BASE 0x164171Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_5_BASE 0x164176Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_6_BASE 0x16417BCull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_7_BASE 0x164180Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_8_BASE 0x164185Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_9_BASE 0x16418ACull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_10_BASE 0x16418FCull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_11_BASE 0x164194Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_12_BASE 0x164199Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_13_BASE 0x16419ECull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_14_BASE 0x1641A3Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_15_BASE 0x1641A8Cull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x1641ADCull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_CFG_QM_BASE 0x1641AE4ull
+#define DCORE1_TPC3_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC3_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC3_EML_TPC_CFG_AXUSER_BASE 0x1641E00ull
+#define DCORE1_TPC3_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_CFG_SPECIAL_BASE 0x1641E80ull
+#define DCORE1_TPC3_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC3_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC3_EML_QM_DCCM_BASE 0x1642000ull
+#define DCORE1_TPC3_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC3_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_QM_ARCAUX_BASE 0x164A000ull
+#define DCORE1_TPC3_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE1_TPC3_EML_QM_ARCAUX_SPECIAL_BASE 0x164AE80ull
+#define DCORE1_TPC3_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC3_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC3_EML_TPC_QM_BASE 0x164C000ull
+#define DCORE1_TPC3_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x164C900ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x164C908ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x164C910ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x164C918ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x164C920ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x164C928ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x164C930ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x164C938ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x164C940ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x164C948ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x164C950ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x164C958ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x164C960ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x164C968ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x164C970ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x164C978ull
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC3_EML_TPC_QM_AXUSER_SECURED_BASE 0x164CB00ull
+#define DCORE1_TPC3_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x164CB80ull
+#define DCORE1_TPC3_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_DBG_HBW_BASE 0x164CC00ull
+#define DCORE1_TPC3_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC3_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC3_EML_TPC_QM_DBG_LBW_BASE 0x164CC80ull
+#define DCORE1_TPC3_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC3_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_TPC_QM_CGM_BASE 0x164CD80ull
+#define DCORE1_TPC3_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC3_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC3_EML_TPC_QM_SPECIAL_BASE 0x164CE80ull
+#define DCORE1_TPC3_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC3_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE1_TPC3_EML_CS_BASE 0x17FF000ull
+#define DCORE1_TPC3_EML_CS_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_EML_CS_SECTION 0x1000
+#define mmDCORE1_TPC4_ROM_TABLE_BASE 0x1800000ull
+#define DCORE1_TPC4_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_ROM_TABLE_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_SPMU_BASE 0x1801000ull
+#define DCORE1_TPC4_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_SPMU_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_ETF_BASE 0x1802000ull
+#define DCORE1_TPC4_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_ETF_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_STM_BASE 0x1803000ull
+#define DCORE1_TPC4_EML_STM_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_STM_SECTION 0x2000
+#define mmDCORE1_TPC4_EML_CTI_BASE 0x1805000ull
+#define DCORE1_TPC4_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_CTI_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_FUNNEL_BASE 0x1806000ull
+#define DCORE1_TPC4_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_BUSMON_0_BASE 0x1807000ull
+#define DCORE1_TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_BUSMON_1_BASE 0x1808000ull
+#define DCORE1_TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_BUSMON_2_BASE 0x1809000ull
+#define DCORE1_TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_BUSMON_3_BASE 0x180A000ull
+#define DCORE1_TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE1_TPC4_QM_ARC_RTT_BASE 0x180B000ull
+#define DCORE1_TPC4_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE1_TPC4_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE1_TPC4_EML_CFG_BASE 0x1840000ull
+#define DCORE1_TPC4_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_CFG_SECTION 0xE800
+#define mmDCORE1_TPC4_EML_CFG_SPECIAL_BASE 0x1840E80ull
+#define DCORE1_TPC4_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC4_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x1841000ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC4_EML_TPC_CFG_BASE 0x1841000ull
+#define DCORE1_TPC4_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x1841050ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x18410A0ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x18410F0ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x1841140ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x1841190ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x18411E0ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x1841230ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x1841280ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x18412D0ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x1841320ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x1841370ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x18413C0ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x1841410ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x1841460ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x18414B0ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x1841500ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_CFG_KERNEL_BASE 0x1841508ull
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC4_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_0_BASE 0x18415DCull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_1_BASE 0x184162Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_2_BASE 0x184167Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_3_BASE 0x18416CCull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_4_BASE 0x184171Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_5_BASE 0x184176Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_6_BASE 0x18417BCull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_7_BASE 0x184180Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_8_BASE 0x184185Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_9_BASE 0x18418ACull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_10_BASE 0x18418FCull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_11_BASE 0x184194Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_12_BASE 0x184199Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_13_BASE 0x18419ECull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_14_BASE 0x1841A3Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_15_BASE 0x1841A8Cull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x1841ADCull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_CFG_QM_BASE 0x1841AE4ull
+#define DCORE1_TPC4_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC4_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC4_EML_TPC_CFG_AXUSER_BASE 0x1841E00ull
+#define DCORE1_TPC4_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_CFG_SPECIAL_BASE 0x1841E80ull
+#define DCORE1_TPC4_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC4_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC4_EML_QM_DCCM_BASE 0x1842000ull
+#define DCORE1_TPC4_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC4_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_QM_ARCAUX_BASE 0x184A000ull
+#define DCORE1_TPC4_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE1_TPC4_EML_QM_ARCAUX_SPECIAL_BASE 0x184AE80ull
+#define DCORE1_TPC4_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC4_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC4_EML_TPC_QM_BASE 0x184C000ull
+#define DCORE1_TPC4_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x184C900ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x184C908ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x184C910ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x184C918ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x184C920ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x184C928ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x184C930ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x184C938ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x184C940ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x184C948ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x184C950ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x184C958ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x184C960ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x184C968ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x184C970ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x184C978ull
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC4_EML_TPC_QM_AXUSER_SECURED_BASE 0x184CB00ull
+#define DCORE1_TPC4_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x184CB80ull
+#define DCORE1_TPC4_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_DBG_HBW_BASE 0x184CC00ull
+#define DCORE1_TPC4_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC4_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC4_EML_TPC_QM_DBG_LBW_BASE 0x184CC80ull
+#define DCORE1_TPC4_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC4_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_TPC_QM_CGM_BASE 0x184CD80ull
+#define DCORE1_TPC4_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC4_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC4_EML_TPC_QM_SPECIAL_BASE 0x184CE80ull
+#define DCORE1_TPC4_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC4_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE1_TPC4_EML_CS_BASE 0x19FF000ull
+#define DCORE1_TPC4_EML_CS_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_EML_CS_SECTION 0x1000
+#define mmDCORE1_TPC5_ROM_TABLE_BASE 0x1A00000ull
+#define DCORE1_TPC5_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_ROM_TABLE_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_SPMU_BASE 0x1A01000ull
+#define DCORE1_TPC5_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_SPMU_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_ETF_BASE 0x1A02000ull
+#define DCORE1_TPC5_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_ETF_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_STM_BASE 0x1A03000ull
+#define DCORE1_TPC5_EML_STM_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_STM_SECTION 0x2000
+#define mmDCORE1_TPC5_EML_CTI_BASE 0x1A05000ull
+#define DCORE1_TPC5_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_CTI_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_FUNNEL_BASE 0x1A06000ull
+#define DCORE1_TPC5_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_BUSMON_0_BASE 0x1A07000ull
+#define DCORE1_TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_BUSMON_1_BASE 0x1A08000ull
+#define DCORE1_TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_BUSMON_2_BASE 0x1A09000ull
+#define DCORE1_TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_BUSMON_3_BASE 0x1A0A000ull
+#define DCORE1_TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE1_TPC5_QM_ARC_RTT_BASE 0x1A0B000ull
+#define DCORE1_TPC5_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE1_TPC5_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE1_TPC5_EML_CFG_BASE 0x1A40000ull
+#define DCORE1_TPC5_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_CFG_SECTION 0xE800
+#define mmDCORE1_TPC5_EML_CFG_SPECIAL_BASE 0x1A40E80ull
+#define DCORE1_TPC5_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC5_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x1A41000ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC5_EML_TPC_CFG_BASE 0x1A41000ull
+#define DCORE1_TPC5_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x1A41050ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x1A410A0ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x1A410F0ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x1A41140ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x1A41190ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x1A411E0ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x1A41230ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x1A41280ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x1A412D0ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x1A41320ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x1A41370ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x1A413C0ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x1A41410ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x1A41460ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x1A414B0ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x1A41500ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_CFG_KERNEL_BASE 0x1A41508ull
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC5_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_0_BASE 0x1A415DCull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_1_BASE 0x1A4162Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_2_BASE 0x1A4167Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_3_BASE 0x1A416CCull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_4_BASE 0x1A4171Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_5_BASE 0x1A4176Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_6_BASE 0x1A417BCull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_7_BASE 0x1A4180Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_8_BASE 0x1A4185Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_9_BASE 0x1A418ACull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_10_BASE 0x1A418FCull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_11_BASE 0x1A4194Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_12_BASE 0x1A4199Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_13_BASE 0x1A419ECull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_14_BASE 0x1A41A3Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_15_BASE 0x1A41A8Cull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x1A41ADCull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_CFG_QM_BASE 0x1A41AE4ull
+#define DCORE1_TPC5_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC5_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC5_EML_TPC_CFG_AXUSER_BASE 0x1A41E00ull
+#define DCORE1_TPC5_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_CFG_SPECIAL_BASE 0x1A41E80ull
+#define DCORE1_TPC5_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC5_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC5_EML_QM_DCCM_BASE 0x1A42000ull
+#define DCORE1_TPC5_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC5_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_QM_ARCAUX_BASE 0x1A4A000ull
+#define DCORE1_TPC5_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE1_TPC5_EML_QM_ARCAUX_SPECIAL_BASE 0x1A4AE80ull
+#define DCORE1_TPC5_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC5_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC5_EML_TPC_QM_BASE 0x1A4C000ull
+#define DCORE1_TPC5_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x1A4C900ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x1A4C908ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x1A4C910ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x1A4C918ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x1A4C920ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x1A4C928ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x1A4C930ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x1A4C938ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x1A4C940ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x1A4C948ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x1A4C950ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x1A4C958ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x1A4C960ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x1A4C968ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x1A4C970ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x1A4C978ull
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC5_EML_TPC_QM_AXUSER_SECURED_BASE 0x1A4CB00ull
+#define DCORE1_TPC5_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x1A4CB80ull
+#define DCORE1_TPC5_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_DBG_HBW_BASE 0x1A4CC00ull
+#define DCORE1_TPC5_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC5_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC5_EML_TPC_QM_DBG_LBW_BASE 0x1A4CC80ull
+#define DCORE1_TPC5_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC5_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_TPC_QM_CGM_BASE 0x1A4CD80ull
+#define DCORE1_TPC5_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC5_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC5_EML_TPC_QM_SPECIAL_BASE 0x1A4CE80ull
+#define DCORE1_TPC5_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC5_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE1_TPC5_EML_CS_BASE 0x1BFF000ull
+#define DCORE1_TPC5_EML_CS_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_EML_CS_SECTION 0x401000
+#define mmDCORE2_TPC0_ROM_TABLE_BASE 0x2000000ull
+#define DCORE2_TPC0_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_ROM_TABLE_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_SPMU_BASE 0x2001000ull
+#define DCORE2_TPC0_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_SPMU_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_ETF_BASE 0x2002000ull
+#define DCORE2_TPC0_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_ETF_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_STM_BASE 0x2003000ull
+#define DCORE2_TPC0_EML_STM_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_STM_SECTION 0x2000
+#define mmDCORE2_TPC0_EML_CTI_BASE 0x2005000ull
+#define DCORE2_TPC0_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_CTI_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_FUNNEL_BASE 0x2006000ull
+#define DCORE2_TPC0_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_BUSMON_0_BASE 0x2007000ull
+#define DCORE2_TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_BUSMON_1_BASE 0x2008000ull
+#define DCORE2_TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_BUSMON_2_BASE 0x2009000ull
+#define DCORE2_TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_BUSMON_3_BASE 0x200A000ull
+#define DCORE2_TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE2_TPC0_QM_ARC_RTT_BASE 0x200B000ull
+#define DCORE2_TPC0_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE2_TPC0_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE2_TPC0_EML_CFG_BASE 0x2040000ull
+#define DCORE2_TPC0_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_CFG_SECTION 0xE800
+#define mmDCORE2_TPC0_EML_CFG_SPECIAL_BASE 0x2040E80ull
+#define DCORE2_TPC0_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC0_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x2041000ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC0_EML_TPC_CFG_BASE 0x2041000ull
+#define DCORE2_TPC0_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x2041050ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x20410A0ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x20410F0ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x2041140ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x2041190ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x20411E0ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x2041230ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x2041280ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x20412D0ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x2041320ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x2041370ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x20413C0ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x2041410ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x2041460ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x20414B0ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x2041500ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_CFG_KERNEL_BASE 0x2041508ull
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC0_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_0_BASE 0x20415DCull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_1_BASE 0x204162Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_2_BASE 0x204167Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_3_BASE 0x20416CCull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_4_BASE 0x204171Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_5_BASE 0x204176Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_6_BASE 0x20417BCull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_7_BASE 0x204180Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_8_BASE 0x204185Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_9_BASE 0x20418ACull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_10_BASE 0x20418FCull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_11_BASE 0x204194Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_12_BASE 0x204199Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_13_BASE 0x20419ECull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_14_BASE 0x2041A3Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_15_BASE 0x2041A8Cull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x2041ADCull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_CFG_QM_BASE 0x2041AE4ull
+#define DCORE2_TPC0_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC0_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC0_EML_TPC_CFG_AXUSER_BASE 0x2041E00ull
+#define DCORE2_TPC0_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_CFG_SPECIAL_BASE 0x2041E80ull
+#define DCORE2_TPC0_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC0_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC0_EML_QM_DCCM_BASE 0x2042000ull
+#define DCORE2_TPC0_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC0_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_QM_ARCAUX_BASE 0x204A000ull
+#define DCORE2_TPC0_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE2_TPC0_EML_QM_ARCAUX_SPECIAL_BASE 0x204AE80ull
+#define DCORE2_TPC0_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC0_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC0_EML_TPC_QM_BASE 0x204C000ull
+#define DCORE2_TPC0_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x204C900ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x204C908ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x204C910ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x204C918ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x204C920ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x204C928ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x204C930ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x204C938ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x204C940ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x204C948ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x204C950ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x204C958ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x204C960ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x204C968ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x204C970ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x204C978ull
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC0_EML_TPC_QM_AXUSER_SECURED_BASE 0x204CB00ull
+#define DCORE2_TPC0_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x204CB80ull
+#define DCORE2_TPC0_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_DBG_HBW_BASE 0x204CC00ull
+#define DCORE2_TPC0_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC0_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC0_EML_TPC_QM_DBG_LBW_BASE 0x204CC80ull
+#define DCORE2_TPC0_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC0_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_TPC_QM_CGM_BASE 0x204CD80ull
+#define DCORE2_TPC0_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC0_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC0_EML_TPC_QM_SPECIAL_BASE 0x204CE80ull
+#define DCORE2_TPC0_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC0_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE2_TPC0_EML_CS_BASE 0x21FF000ull
+#define DCORE2_TPC0_EML_CS_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_EML_CS_SECTION 0x1000
+#define mmDCORE2_TPC1_ROM_TABLE_BASE 0x2200000ull
+#define DCORE2_TPC1_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_ROM_TABLE_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_SPMU_BASE 0x2201000ull
+#define DCORE2_TPC1_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_SPMU_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_ETF_BASE 0x2202000ull
+#define DCORE2_TPC1_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_ETF_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_STM_BASE 0x2203000ull
+#define DCORE2_TPC1_EML_STM_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_STM_SECTION 0x2000
+#define mmDCORE2_TPC1_EML_CTI_BASE 0x2205000ull
+#define DCORE2_TPC1_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_CTI_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_FUNNEL_BASE 0x2206000ull
+#define DCORE2_TPC1_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_BUSMON_0_BASE 0x2207000ull
+#define DCORE2_TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_BUSMON_1_BASE 0x2208000ull
+#define DCORE2_TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_BUSMON_2_BASE 0x2209000ull
+#define DCORE2_TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_BUSMON_3_BASE 0x220A000ull
+#define DCORE2_TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE2_TPC1_QM_ARC_RTT_BASE 0x220B000ull
+#define DCORE2_TPC1_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE2_TPC1_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE2_TPC1_EML_CFG_BASE 0x2240000ull
+#define DCORE2_TPC1_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_CFG_SECTION 0xE800
+#define mmDCORE2_TPC1_EML_CFG_SPECIAL_BASE 0x2240E80ull
+#define DCORE2_TPC1_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC1_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x2241000ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC1_EML_TPC_CFG_BASE 0x2241000ull
+#define DCORE2_TPC1_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x2241050ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x22410A0ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x22410F0ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x2241140ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x2241190ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x22411E0ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x2241230ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x2241280ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x22412D0ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x2241320ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x2241370ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x22413C0ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x2241410ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x2241460ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x22414B0ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x2241500ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_CFG_KERNEL_BASE 0x2241508ull
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC1_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_0_BASE 0x22415DCull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_1_BASE 0x224162Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_2_BASE 0x224167Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_3_BASE 0x22416CCull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_4_BASE 0x224171Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_5_BASE 0x224176Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_6_BASE 0x22417BCull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_7_BASE 0x224180Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_8_BASE 0x224185Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_9_BASE 0x22418ACull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_10_BASE 0x22418FCull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_11_BASE 0x224194Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_12_BASE 0x224199Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_13_BASE 0x22419ECull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_14_BASE 0x2241A3Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_15_BASE 0x2241A8Cull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x2241ADCull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_CFG_QM_BASE 0x2241AE4ull
+#define DCORE2_TPC1_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC1_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC1_EML_TPC_CFG_AXUSER_BASE 0x2241E00ull
+#define DCORE2_TPC1_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_CFG_SPECIAL_BASE 0x2241E80ull
+#define DCORE2_TPC1_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC1_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC1_EML_QM_DCCM_BASE 0x2242000ull
+#define DCORE2_TPC1_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC1_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_QM_ARCAUX_BASE 0x224A000ull
+#define DCORE2_TPC1_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE2_TPC1_EML_QM_ARCAUX_SPECIAL_BASE 0x224AE80ull
+#define DCORE2_TPC1_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC1_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC1_EML_TPC_QM_BASE 0x224C000ull
+#define DCORE2_TPC1_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x224C900ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x224C908ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x224C910ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x224C918ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x224C920ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x224C928ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x224C930ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x224C938ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x224C940ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x224C948ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x224C950ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x224C958ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x224C960ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x224C968ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x224C970ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x224C978ull
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC1_EML_TPC_QM_AXUSER_SECURED_BASE 0x224CB00ull
+#define DCORE2_TPC1_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x224CB80ull
+#define DCORE2_TPC1_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_DBG_HBW_BASE 0x224CC00ull
+#define DCORE2_TPC1_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC1_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC1_EML_TPC_QM_DBG_LBW_BASE 0x224CC80ull
+#define DCORE2_TPC1_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC1_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_TPC_QM_CGM_BASE 0x224CD80ull
+#define DCORE2_TPC1_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC1_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC1_EML_TPC_QM_SPECIAL_BASE 0x224CE80ull
+#define DCORE2_TPC1_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC1_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE2_TPC1_EML_CS_BASE 0x23FF000ull
+#define DCORE2_TPC1_EML_CS_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_EML_CS_SECTION 0x1000
+#define mmDCORE2_TPC2_ROM_TABLE_BASE 0x2400000ull
+#define DCORE2_TPC2_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_ROM_TABLE_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_SPMU_BASE 0x2401000ull
+#define DCORE2_TPC2_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_SPMU_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_ETF_BASE 0x2402000ull
+#define DCORE2_TPC2_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_ETF_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_STM_BASE 0x2403000ull
+#define DCORE2_TPC2_EML_STM_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_STM_SECTION 0x2000
+#define mmDCORE2_TPC2_EML_CTI_BASE 0x2405000ull
+#define DCORE2_TPC2_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_CTI_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_FUNNEL_BASE 0x2406000ull
+#define DCORE2_TPC2_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_BUSMON_0_BASE 0x2407000ull
+#define DCORE2_TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_BUSMON_1_BASE 0x2408000ull
+#define DCORE2_TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_BUSMON_2_BASE 0x2409000ull
+#define DCORE2_TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_BUSMON_3_BASE 0x240A000ull
+#define DCORE2_TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE2_TPC2_QM_ARC_RTT_BASE 0x240B000ull
+#define DCORE2_TPC2_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE2_TPC2_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE2_TPC2_EML_CFG_BASE 0x2440000ull
+#define DCORE2_TPC2_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_CFG_SECTION 0xE800
+#define mmDCORE2_TPC2_EML_CFG_SPECIAL_BASE 0x2440E80ull
+#define DCORE2_TPC2_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC2_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x2441000ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC2_EML_TPC_CFG_BASE 0x2441000ull
+#define DCORE2_TPC2_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x2441050ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x24410A0ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x24410F0ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x2441140ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x2441190ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x24411E0ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x2441230ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x2441280ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x24412D0ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x2441320ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x2441370ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x24413C0ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x2441410ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x2441460ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x24414B0ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x2441500ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_CFG_KERNEL_BASE 0x2441508ull
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC2_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_0_BASE 0x24415DCull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_1_BASE 0x244162Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_2_BASE 0x244167Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_3_BASE 0x24416CCull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_4_BASE 0x244171Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_5_BASE 0x244176Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_6_BASE 0x24417BCull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_7_BASE 0x244180Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_8_BASE 0x244185Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_9_BASE 0x24418ACull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_10_BASE 0x24418FCull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_11_BASE 0x244194Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_12_BASE 0x244199Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_13_BASE 0x24419ECull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_14_BASE 0x2441A3Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_15_BASE 0x2441A8Cull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x2441ADCull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_CFG_QM_BASE 0x2441AE4ull
+#define DCORE2_TPC2_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC2_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC2_EML_TPC_CFG_AXUSER_BASE 0x2441E00ull
+#define DCORE2_TPC2_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_CFG_SPECIAL_BASE 0x2441E80ull
+#define DCORE2_TPC2_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC2_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC2_EML_QM_DCCM_BASE 0x2442000ull
+#define DCORE2_TPC2_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC2_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_QM_ARCAUX_BASE 0x244A000ull
+#define DCORE2_TPC2_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE2_TPC2_EML_QM_ARCAUX_SPECIAL_BASE 0x244AE80ull
+#define DCORE2_TPC2_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC2_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC2_EML_TPC_QM_BASE 0x244C000ull
+#define DCORE2_TPC2_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x244C900ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x244C908ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x244C910ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x244C918ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x244C920ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x244C928ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x244C930ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x244C938ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x244C940ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x244C948ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x244C950ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x244C958ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x244C960ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x244C968ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x244C970ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x244C978ull
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC2_EML_TPC_QM_AXUSER_SECURED_BASE 0x244CB00ull
+#define DCORE2_TPC2_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x244CB80ull
+#define DCORE2_TPC2_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_DBG_HBW_BASE 0x244CC00ull
+#define DCORE2_TPC2_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC2_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC2_EML_TPC_QM_DBG_LBW_BASE 0x244CC80ull
+#define DCORE2_TPC2_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC2_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_TPC_QM_CGM_BASE 0x244CD80ull
+#define DCORE2_TPC2_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC2_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC2_EML_TPC_QM_SPECIAL_BASE 0x244CE80ull
+#define DCORE2_TPC2_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC2_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE2_TPC2_EML_CS_BASE 0x25FF000ull
+#define DCORE2_TPC2_EML_CS_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_EML_CS_SECTION 0x1000
+#define mmDCORE2_TPC3_ROM_TABLE_BASE 0x2600000ull
+#define DCORE2_TPC3_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_ROM_TABLE_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_SPMU_BASE 0x2601000ull
+#define DCORE2_TPC3_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_SPMU_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_ETF_BASE 0x2602000ull
+#define DCORE2_TPC3_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_ETF_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_STM_BASE 0x2603000ull
+#define DCORE2_TPC3_EML_STM_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_STM_SECTION 0x2000
+#define mmDCORE2_TPC3_EML_CTI_BASE 0x2605000ull
+#define DCORE2_TPC3_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_CTI_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_FUNNEL_BASE 0x2606000ull
+#define DCORE2_TPC3_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_BUSMON_0_BASE 0x2607000ull
+#define DCORE2_TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_BUSMON_1_BASE 0x2608000ull
+#define DCORE2_TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_BUSMON_2_BASE 0x2609000ull
+#define DCORE2_TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_BUSMON_3_BASE 0x260A000ull
+#define DCORE2_TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE2_TPC3_QM_ARC_RTT_BASE 0x260B000ull
+#define DCORE2_TPC3_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE2_TPC3_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE2_TPC3_EML_CFG_BASE 0x2640000ull
+#define DCORE2_TPC3_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_CFG_SECTION 0xE800
+#define mmDCORE2_TPC3_EML_CFG_SPECIAL_BASE 0x2640E80ull
+#define DCORE2_TPC3_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC3_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x2641000ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC3_EML_TPC_CFG_BASE 0x2641000ull
+#define DCORE2_TPC3_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x2641050ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x26410A0ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x26410F0ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x2641140ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x2641190ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x26411E0ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x2641230ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x2641280ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x26412D0ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x2641320ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x2641370ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x26413C0ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x2641410ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x2641460ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x26414B0ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x2641500ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_CFG_KERNEL_BASE 0x2641508ull
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC3_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_0_BASE 0x26415DCull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_1_BASE 0x264162Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_2_BASE 0x264167Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_3_BASE 0x26416CCull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_4_BASE 0x264171Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_5_BASE 0x264176Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_6_BASE 0x26417BCull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_7_BASE 0x264180Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_8_BASE 0x264185Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_9_BASE 0x26418ACull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_10_BASE 0x26418FCull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_11_BASE 0x264194Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_12_BASE 0x264199Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_13_BASE 0x26419ECull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_14_BASE 0x2641A3Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_15_BASE 0x2641A8Cull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x2641ADCull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_CFG_QM_BASE 0x2641AE4ull
+#define DCORE2_TPC3_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC3_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC3_EML_TPC_CFG_AXUSER_BASE 0x2641E00ull
+#define DCORE2_TPC3_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_CFG_SPECIAL_BASE 0x2641E80ull
+#define DCORE2_TPC3_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC3_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC3_EML_QM_DCCM_BASE 0x2642000ull
+#define DCORE2_TPC3_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC3_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_QM_ARCAUX_BASE 0x264A000ull
+#define DCORE2_TPC3_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE2_TPC3_EML_QM_ARCAUX_SPECIAL_BASE 0x264AE80ull
+#define DCORE2_TPC3_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC3_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC3_EML_TPC_QM_BASE 0x264C000ull
+#define DCORE2_TPC3_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x264C900ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x264C908ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x264C910ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x264C918ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x264C920ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x264C928ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x264C930ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x264C938ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x264C940ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x264C948ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x264C950ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x264C958ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x264C960ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x264C968ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x264C970ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x264C978ull
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC3_EML_TPC_QM_AXUSER_SECURED_BASE 0x264CB00ull
+#define DCORE2_TPC3_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x264CB80ull
+#define DCORE2_TPC3_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_DBG_HBW_BASE 0x264CC00ull
+#define DCORE2_TPC3_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC3_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC3_EML_TPC_QM_DBG_LBW_BASE 0x264CC80ull
+#define DCORE2_TPC3_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC3_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_TPC_QM_CGM_BASE 0x264CD80ull
+#define DCORE2_TPC3_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC3_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC3_EML_TPC_QM_SPECIAL_BASE 0x264CE80ull
+#define DCORE2_TPC3_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC3_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE2_TPC3_EML_CS_BASE 0x27FF000ull
+#define DCORE2_TPC3_EML_CS_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_EML_CS_SECTION 0x1000
+#define mmDCORE2_TPC4_ROM_TABLE_BASE 0x2800000ull
+#define DCORE2_TPC4_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_ROM_TABLE_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_SPMU_BASE 0x2801000ull
+#define DCORE2_TPC4_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_SPMU_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_ETF_BASE 0x2802000ull
+#define DCORE2_TPC4_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_ETF_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_STM_BASE 0x2803000ull
+#define DCORE2_TPC4_EML_STM_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_STM_SECTION 0x2000
+#define mmDCORE2_TPC4_EML_CTI_BASE 0x2805000ull
+#define DCORE2_TPC4_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_CTI_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_FUNNEL_BASE 0x2806000ull
+#define DCORE2_TPC4_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_BUSMON_0_BASE 0x2807000ull
+#define DCORE2_TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_BUSMON_1_BASE 0x2808000ull
+#define DCORE2_TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_BUSMON_2_BASE 0x2809000ull
+#define DCORE2_TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_BUSMON_3_BASE 0x280A000ull
+#define DCORE2_TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE2_TPC4_QM_ARC_RTT_BASE 0x280B000ull
+#define DCORE2_TPC4_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE2_TPC4_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE2_TPC4_EML_CFG_BASE 0x2840000ull
+#define DCORE2_TPC4_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_CFG_SECTION 0xE800
+#define mmDCORE2_TPC4_EML_CFG_SPECIAL_BASE 0x2840E80ull
+#define DCORE2_TPC4_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC4_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x2841000ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC4_EML_TPC_CFG_BASE 0x2841000ull
+#define DCORE2_TPC4_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x2841050ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x28410A0ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x28410F0ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x2841140ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x2841190ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x28411E0ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x2841230ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x2841280ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x28412D0ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x2841320ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x2841370ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x28413C0ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x2841410ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x2841460ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x28414B0ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x2841500ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_CFG_KERNEL_BASE 0x2841508ull
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC4_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_0_BASE 0x28415DCull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_1_BASE 0x284162Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_2_BASE 0x284167Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_3_BASE 0x28416CCull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_4_BASE 0x284171Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_5_BASE 0x284176Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_6_BASE 0x28417BCull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_7_BASE 0x284180Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_8_BASE 0x284185Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_9_BASE 0x28418ACull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_10_BASE 0x28418FCull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_11_BASE 0x284194Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_12_BASE 0x284199Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_13_BASE 0x28419ECull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_14_BASE 0x2841A3Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_15_BASE 0x2841A8Cull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x2841ADCull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_CFG_QM_BASE 0x2841AE4ull
+#define DCORE2_TPC4_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC4_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC4_EML_TPC_CFG_AXUSER_BASE 0x2841E00ull
+#define DCORE2_TPC4_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_CFG_SPECIAL_BASE 0x2841E80ull
+#define DCORE2_TPC4_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC4_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC4_EML_QM_DCCM_BASE 0x2842000ull
+#define DCORE2_TPC4_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC4_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_QM_ARCAUX_BASE 0x284A000ull
+#define DCORE2_TPC4_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE2_TPC4_EML_QM_ARCAUX_SPECIAL_BASE 0x284AE80ull
+#define DCORE2_TPC4_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC4_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC4_EML_TPC_QM_BASE 0x284C000ull
+#define DCORE2_TPC4_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x284C900ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x284C908ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x284C910ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x284C918ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x284C920ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x284C928ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x284C930ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x284C938ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x284C940ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x284C948ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x284C950ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x284C958ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x284C960ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x284C968ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x284C970ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x284C978ull
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC4_EML_TPC_QM_AXUSER_SECURED_BASE 0x284CB00ull
+#define DCORE2_TPC4_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x284CB80ull
+#define DCORE2_TPC4_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_DBG_HBW_BASE 0x284CC00ull
+#define DCORE2_TPC4_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC4_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC4_EML_TPC_QM_DBG_LBW_BASE 0x284CC80ull
+#define DCORE2_TPC4_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC4_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_TPC_QM_CGM_BASE 0x284CD80ull
+#define DCORE2_TPC4_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC4_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC4_EML_TPC_QM_SPECIAL_BASE 0x284CE80ull
+#define DCORE2_TPC4_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC4_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE2_TPC4_EML_CS_BASE 0x29FF000ull
+#define DCORE2_TPC4_EML_CS_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_EML_CS_SECTION 0x1000
+#define mmDCORE2_TPC5_ROM_TABLE_BASE 0x2A00000ull
+#define DCORE2_TPC5_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_ROM_TABLE_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_SPMU_BASE 0x2A01000ull
+#define DCORE2_TPC5_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_SPMU_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_ETF_BASE 0x2A02000ull
+#define DCORE2_TPC5_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_ETF_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_STM_BASE 0x2A03000ull
+#define DCORE2_TPC5_EML_STM_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_STM_SECTION 0x2000
+#define mmDCORE2_TPC5_EML_CTI_BASE 0x2A05000ull
+#define DCORE2_TPC5_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_CTI_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_FUNNEL_BASE 0x2A06000ull
+#define DCORE2_TPC5_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_BUSMON_0_BASE 0x2A07000ull
+#define DCORE2_TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_BUSMON_1_BASE 0x2A08000ull
+#define DCORE2_TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_BUSMON_2_BASE 0x2A09000ull
+#define DCORE2_TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_BUSMON_3_BASE 0x2A0A000ull
+#define DCORE2_TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE2_TPC5_QM_ARC_RTT_BASE 0x2A0B000ull
+#define DCORE2_TPC5_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE2_TPC5_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE2_TPC5_EML_CFG_BASE 0x2A40000ull
+#define DCORE2_TPC5_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_CFG_SECTION 0xE800
+#define mmDCORE2_TPC5_EML_CFG_SPECIAL_BASE 0x2A40E80ull
+#define DCORE2_TPC5_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC5_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x2A41000ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC5_EML_TPC_CFG_BASE 0x2A41000ull
+#define DCORE2_TPC5_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x2A41050ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x2A410A0ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x2A410F0ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x2A41140ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x2A41190ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x2A411E0ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x2A41230ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x2A41280ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x2A412D0ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x2A41320ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x2A41370ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x2A413C0ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x2A41410ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x2A41460ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x2A414B0ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x2A41500ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_CFG_KERNEL_BASE 0x2A41508ull
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC5_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_0_BASE 0x2A415DCull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_1_BASE 0x2A4162Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_2_BASE 0x2A4167Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_3_BASE 0x2A416CCull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_4_BASE 0x2A4171Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_5_BASE 0x2A4176Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_6_BASE 0x2A417BCull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_7_BASE 0x2A4180Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_8_BASE 0x2A4185Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_9_BASE 0x2A418ACull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_10_BASE 0x2A418FCull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_11_BASE 0x2A4194Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_12_BASE 0x2A4199Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_13_BASE 0x2A419ECull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_14_BASE 0x2A41A3Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_15_BASE 0x2A41A8Cull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x2A41ADCull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_CFG_QM_BASE 0x2A41AE4ull
+#define DCORE2_TPC5_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC5_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC5_EML_TPC_CFG_AXUSER_BASE 0x2A41E00ull
+#define DCORE2_TPC5_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_CFG_SPECIAL_BASE 0x2A41E80ull
+#define DCORE2_TPC5_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC5_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC5_EML_QM_DCCM_BASE 0x2A42000ull
+#define DCORE2_TPC5_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC5_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_QM_ARCAUX_BASE 0x2A4A000ull
+#define DCORE2_TPC5_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE2_TPC5_EML_QM_ARCAUX_SPECIAL_BASE 0x2A4AE80ull
+#define DCORE2_TPC5_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC5_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC5_EML_TPC_QM_BASE 0x2A4C000ull
+#define DCORE2_TPC5_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x2A4C900ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x2A4C908ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x2A4C910ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x2A4C918ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x2A4C920ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x2A4C928ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x2A4C930ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x2A4C938ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x2A4C940ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x2A4C948ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x2A4C950ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x2A4C958ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x2A4C960ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x2A4C968ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x2A4C970ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x2A4C978ull
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC5_EML_TPC_QM_AXUSER_SECURED_BASE 0x2A4CB00ull
+#define DCORE2_TPC5_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x2A4CB80ull
+#define DCORE2_TPC5_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_DBG_HBW_BASE 0x2A4CC00ull
+#define DCORE2_TPC5_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC5_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC5_EML_TPC_QM_DBG_LBW_BASE 0x2A4CC80ull
+#define DCORE2_TPC5_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC5_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_TPC_QM_CGM_BASE 0x2A4CD80ull
+#define DCORE2_TPC5_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC5_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC5_EML_TPC_QM_SPECIAL_BASE 0x2A4CE80ull
+#define DCORE2_TPC5_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC5_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE2_TPC5_EML_CS_BASE 0x2BFF000ull
+#define DCORE2_TPC5_EML_CS_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_EML_CS_SECTION 0x401000
+#define mmDCORE3_TPC0_ROM_TABLE_BASE 0x3000000ull
+#define DCORE3_TPC0_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_ROM_TABLE_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_SPMU_BASE 0x3001000ull
+#define DCORE3_TPC0_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_SPMU_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_ETF_BASE 0x3002000ull
+#define DCORE3_TPC0_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_ETF_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_STM_BASE 0x3003000ull
+#define DCORE3_TPC0_EML_STM_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_STM_SECTION 0x2000
+#define mmDCORE3_TPC0_EML_CTI_BASE 0x3005000ull
+#define DCORE3_TPC0_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_CTI_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_FUNNEL_BASE 0x3006000ull
+#define DCORE3_TPC0_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_BUSMON_0_BASE 0x3007000ull
+#define DCORE3_TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_BUSMON_1_BASE 0x3008000ull
+#define DCORE3_TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_BUSMON_2_BASE 0x3009000ull
+#define DCORE3_TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_BUSMON_3_BASE 0x300A000ull
+#define DCORE3_TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE3_TPC0_QM_ARC_RTT_BASE 0x300B000ull
+#define DCORE3_TPC0_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE3_TPC0_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE3_TPC0_EML_CFG_BASE 0x3040000ull
+#define DCORE3_TPC0_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_CFG_SECTION 0xE800
+#define mmDCORE3_TPC0_EML_CFG_SPECIAL_BASE 0x3040E80ull
+#define DCORE3_TPC0_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC0_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x3041000ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC0_EML_TPC_CFG_BASE 0x3041000ull
+#define DCORE3_TPC0_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x3041050ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x30410A0ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x30410F0ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x3041140ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x3041190ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x30411E0ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x3041230ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x3041280ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x30412D0ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x3041320ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x3041370ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x30413C0ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x3041410ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x3041460ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x30414B0ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x3041500ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_CFG_KERNEL_BASE 0x3041508ull
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC0_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_0_BASE 0x30415DCull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_1_BASE 0x304162Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_2_BASE 0x304167Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_3_BASE 0x30416CCull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_4_BASE 0x304171Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_5_BASE 0x304176Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_6_BASE 0x30417BCull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_7_BASE 0x304180Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_8_BASE 0x304185Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_9_BASE 0x30418ACull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_10_BASE 0x30418FCull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_11_BASE 0x304194Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_12_BASE 0x304199Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_13_BASE 0x30419ECull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_14_BASE 0x3041A3Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_15_BASE 0x3041A8Cull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x3041ADCull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_CFG_QM_BASE 0x3041AE4ull
+#define DCORE3_TPC0_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC0_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC0_EML_TPC_CFG_AXUSER_BASE 0x3041E00ull
+#define DCORE3_TPC0_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_CFG_SPECIAL_BASE 0x3041E80ull
+#define DCORE3_TPC0_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC0_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC0_EML_QM_DCCM_BASE 0x3042000ull
+#define DCORE3_TPC0_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC0_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_QM_ARCAUX_BASE 0x304A000ull
+#define DCORE3_TPC0_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE3_TPC0_EML_QM_ARCAUX_SPECIAL_BASE 0x304AE80ull
+#define DCORE3_TPC0_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC0_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC0_EML_TPC_QM_BASE 0x304C000ull
+#define DCORE3_TPC0_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x304C900ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x304C908ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x304C910ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x304C918ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x304C920ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x304C928ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x304C930ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x304C938ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x304C940ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x304C948ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x304C950ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x304C958ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x304C960ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x304C968ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x304C970ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x304C978ull
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC0_EML_TPC_QM_AXUSER_SECURED_BASE 0x304CB00ull
+#define DCORE3_TPC0_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x304CB80ull
+#define DCORE3_TPC0_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_DBG_HBW_BASE 0x304CC00ull
+#define DCORE3_TPC0_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC0_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC0_EML_TPC_QM_DBG_LBW_BASE 0x304CC80ull
+#define DCORE3_TPC0_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC0_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_TPC_QM_CGM_BASE 0x304CD80ull
+#define DCORE3_TPC0_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC0_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC0_EML_TPC_QM_SPECIAL_BASE 0x304CE80ull
+#define DCORE3_TPC0_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC0_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE3_TPC0_EML_CS_BASE 0x31FF000ull
+#define DCORE3_TPC0_EML_CS_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_EML_CS_SECTION 0x1000
+#define mmDCORE3_TPC1_ROM_TABLE_BASE 0x3200000ull
+#define DCORE3_TPC1_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_ROM_TABLE_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_SPMU_BASE 0x3201000ull
+#define DCORE3_TPC1_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_SPMU_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_ETF_BASE 0x3202000ull
+#define DCORE3_TPC1_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_ETF_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_STM_BASE 0x3203000ull
+#define DCORE3_TPC1_EML_STM_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_STM_SECTION 0x2000
+#define mmDCORE3_TPC1_EML_CTI_BASE 0x3205000ull
+#define DCORE3_TPC1_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_CTI_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_FUNNEL_BASE 0x3206000ull
+#define DCORE3_TPC1_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_BUSMON_0_BASE 0x3207000ull
+#define DCORE3_TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_BUSMON_1_BASE 0x3208000ull
+#define DCORE3_TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_BUSMON_2_BASE 0x3209000ull
+#define DCORE3_TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_BUSMON_3_BASE 0x320A000ull
+#define DCORE3_TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE3_TPC1_QM_ARC_RTT_BASE 0x320B000ull
+#define DCORE3_TPC1_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE3_TPC1_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE3_TPC1_EML_CFG_BASE 0x3240000ull
+#define DCORE3_TPC1_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_CFG_SECTION 0xE800
+#define mmDCORE3_TPC1_EML_CFG_SPECIAL_BASE 0x3240E80ull
+#define DCORE3_TPC1_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC1_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x3241000ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC1_EML_TPC_CFG_BASE 0x3241000ull
+#define DCORE3_TPC1_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x3241050ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x32410A0ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x32410F0ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x3241140ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x3241190ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x32411E0ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x3241230ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x3241280ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x32412D0ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x3241320ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x3241370ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x32413C0ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x3241410ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x3241460ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x32414B0ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x3241500ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_CFG_KERNEL_BASE 0x3241508ull
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC1_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_0_BASE 0x32415DCull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_1_BASE 0x324162Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_2_BASE 0x324167Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_3_BASE 0x32416CCull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_4_BASE 0x324171Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_5_BASE 0x324176Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_6_BASE 0x32417BCull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_7_BASE 0x324180Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_8_BASE 0x324185Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_9_BASE 0x32418ACull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_10_BASE 0x32418FCull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_11_BASE 0x324194Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_12_BASE 0x324199Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_13_BASE 0x32419ECull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_14_BASE 0x3241A3Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_15_BASE 0x3241A8Cull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x3241ADCull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_CFG_QM_BASE 0x3241AE4ull
+#define DCORE3_TPC1_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC1_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC1_EML_TPC_CFG_AXUSER_BASE 0x3241E00ull
+#define DCORE3_TPC1_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_CFG_SPECIAL_BASE 0x3241E80ull
+#define DCORE3_TPC1_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC1_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC1_EML_QM_DCCM_BASE 0x3242000ull
+#define DCORE3_TPC1_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC1_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_QM_ARCAUX_BASE 0x324A000ull
+#define DCORE3_TPC1_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE3_TPC1_EML_QM_ARCAUX_SPECIAL_BASE 0x324AE80ull
+#define DCORE3_TPC1_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC1_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC1_EML_TPC_QM_BASE 0x324C000ull
+#define DCORE3_TPC1_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x324C900ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x324C908ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x324C910ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x324C918ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x324C920ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x324C928ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x324C930ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x324C938ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x324C940ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x324C948ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x324C950ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x324C958ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x324C960ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x324C968ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x324C970ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x324C978ull
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC1_EML_TPC_QM_AXUSER_SECURED_BASE 0x324CB00ull
+#define DCORE3_TPC1_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x324CB80ull
+#define DCORE3_TPC1_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_DBG_HBW_BASE 0x324CC00ull
+#define DCORE3_TPC1_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC1_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC1_EML_TPC_QM_DBG_LBW_BASE 0x324CC80ull
+#define DCORE3_TPC1_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC1_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_TPC_QM_CGM_BASE 0x324CD80ull
+#define DCORE3_TPC1_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC1_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC1_EML_TPC_QM_SPECIAL_BASE 0x324CE80ull
+#define DCORE3_TPC1_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC1_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE3_TPC1_EML_CS_BASE 0x33FF000ull
+#define DCORE3_TPC1_EML_CS_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_EML_CS_SECTION 0x1000
+#define mmDCORE3_TPC2_ROM_TABLE_BASE 0x3400000ull
+#define DCORE3_TPC2_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_ROM_TABLE_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_SPMU_BASE 0x3401000ull
+#define DCORE3_TPC2_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_SPMU_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_ETF_BASE 0x3402000ull
+#define DCORE3_TPC2_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_ETF_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_STM_BASE 0x3403000ull
+#define DCORE3_TPC2_EML_STM_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_STM_SECTION 0x2000
+#define mmDCORE3_TPC2_EML_CTI_BASE 0x3405000ull
+#define DCORE3_TPC2_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_CTI_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_FUNNEL_BASE 0x3406000ull
+#define DCORE3_TPC2_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_BUSMON_0_BASE 0x3407000ull
+#define DCORE3_TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_BUSMON_1_BASE 0x3408000ull
+#define DCORE3_TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_BUSMON_2_BASE 0x3409000ull
+#define DCORE3_TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_BUSMON_3_BASE 0x340A000ull
+#define DCORE3_TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE3_TPC2_QM_ARC_RTT_BASE 0x340B000ull
+#define DCORE3_TPC2_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE3_TPC2_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE3_TPC2_EML_CFG_BASE 0x3440000ull
+#define DCORE3_TPC2_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_CFG_SECTION 0xE800
+#define mmDCORE3_TPC2_EML_CFG_SPECIAL_BASE 0x3440E80ull
+#define DCORE3_TPC2_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC2_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x3441000ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC2_EML_TPC_CFG_BASE 0x3441000ull
+#define DCORE3_TPC2_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x3441050ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x34410A0ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x34410F0ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x3441140ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x3441190ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x34411E0ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x3441230ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x3441280ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x34412D0ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x3441320ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x3441370ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x34413C0ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x3441410ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x3441460ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x34414B0ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x3441500ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_CFG_KERNEL_BASE 0x3441508ull
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC2_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_0_BASE 0x34415DCull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_1_BASE 0x344162Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_2_BASE 0x344167Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_3_BASE 0x34416CCull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_4_BASE 0x344171Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_5_BASE 0x344176Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_6_BASE 0x34417BCull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_7_BASE 0x344180Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_8_BASE 0x344185Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_9_BASE 0x34418ACull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_10_BASE 0x34418FCull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_11_BASE 0x344194Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_12_BASE 0x344199Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_13_BASE 0x34419ECull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_14_BASE 0x3441A3Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_15_BASE 0x3441A8Cull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x3441ADCull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_CFG_QM_BASE 0x3441AE4ull
+#define DCORE3_TPC2_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC2_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC2_EML_TPC_CFG_AXUSER_BASE 0x3441E00ull
+#define DCORE3_TPC2_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_CFG_SPECIAL_BASE 0x3441E80ull
+#define DCORE3_TPC2_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC2_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC2_EML_QM_DCCM_BASE 0x3442000ull
+#define DCORE3_TPC2_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC2_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_QM_ARCAUX_BASE 0x344A000ull
+#define DCORE3_TPC2_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE3_TPC2_EML_QM_ARCAUX_SPECIAL_BASE 0x344AE80ull
+#define DCORE3_TPC2_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC2_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC2_EML_TPC_QM_BASE 0x344C000ull
+#define DCORE3_TPC2_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x344C900ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x344C908ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x344C910ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x344C918ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x344C920ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x344C928ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x344C930ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x344C938ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x344C940ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x344C948ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x344C950ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x344C958ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x344C960ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x344C968ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x344C970ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x344C978ull
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC2_EML_TPC_QM_AXUSER_SECURED_BASE 0x344CB00ull
+#define DCORE3_TPC2_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x344CB80ull
+#define DCORE3_TPC2_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_DBG_HBW_BASE 0x344CC00ull
+#define DCORE3_TPC2_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC2_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC2_EML_TPC_QM_DBG_LBW_BASE 0x344CC80ull
+#define DCORE3_TPC2_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC2_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_TPC_QM_CGM_BASE 0x344CD80ull
+#define DCORE3_TPC2_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC2_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC2_EML_TPC_QM_SPECIAL_BASE 0x344CE80ull
+#define DCORE3_TPC2_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC2_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE3_TPC2_EML_CS_BASE 0x35FF000ull
+#define DCORE3_TPC2_EML_CS_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_EML_CS_SECTION 0x1000
+#define mmDCORE3_TPC3_ROM_TABLE_BASE 0x3600000ull
+#define DCORE3_TPC3_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_ROM_TABLE_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_SPMU_BASE 0x3601000ull
+#define DCORE3_TPC3_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_SPMU_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_ETF_BASE 0x3602000ull
+#define DCORE3_TPC3_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_ETF_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_STM_BASE 0x3603000ull
+#define DCORE3_TPC3_EML_STM_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_STM_SECTION 0x2000
+#define mmDCORE3_TPC3_EML_CTI_BASE 0x3605000ull
+#define DCORE3_TPC3_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_CTI_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_FUNNEL_BASE 0x3606000ull
+#define DCORE3_TPC3_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_BUSMON_0_BASE 0x3607000ull
+#define DCORE3_TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_BUSMON_1_BASE 0x3608000ull
+#define DCORE3_TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_BUSMON_2_BASE 0x3609000ull
+#define DCORE3_TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_BUSMON_3_BASE 0x360A000ull
+#define DCORE3_TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE3_TPC3_QM_ARC_RTT_BASE 0x360B000ull
+#define DCORE3_TPC3_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE3_TPC3_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE3_TPC3_EML_CFG_BASE 0x3640000ull
+#define DCORE3_TPC3_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_CFG_SECTION 0xE800
+#define mmDCORE3_TPC3_EML_CFG_SPECIAL_BASE 0x3640E80ull
+#define DCORE3_TPC3_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC3_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x3641000ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC3_EML_TPC_CFG_BASE 0x3641000ull
+#define DCORE3_TPC3_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x3641050ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x36410A0ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x36410F0ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x3641140ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x3641190ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x36411E0ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x3641230ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x3641280ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x36412D0ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x3641320ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x3641370ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x36413C0ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x3641410ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x3641460ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x36414B0ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x3641500ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_CFG_KERNEL_BASE 0x3641508ull
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC3_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_0_BASE 0x36415DCull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_1_BASE 0x364162Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_2_BASE 0x364167Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_3_BASE 0x36416CCull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_4_BASE 0x364171Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_5_BASE 0x364176Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_6_BASE 0x36417BCull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_7_BASE 0x364180Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_8_BASE 0x364185Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_9_BASE 0x36418ACull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_10_BASE 0x36418FCull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_11_BASE 0x364194Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_12_BASE 0x364199Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_13_BASE 0x36419ECull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_14_BASE 0x3641A3Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_15_BASE 0x3641A8Cull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x3641ADCull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_CFG_QM_BASE 0x3641AE4ull
+#define DCORE3_TPC3_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC3_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC3_EML_TPC_CFG_AXUSER_BASE 0x3641E00ull
+#define DCORE3_TPC3_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_CFG_SPECIAL_BASE 0x3641E80ull
+#define DCORE3_TPC3_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC3_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC3_EML_QM_DCCM_BASE 0x3642000ull
+#define DCORE3_TPC3_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC3_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_QM_ARCAUX_BASE 0x364A000ull
+#define DCORE3_TPC3_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE3_TPC3_EML_QM_ARCAUX_SPECIAL_BASE 0x364AE80ull
+#define DCORE3_TPC3_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC3_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC3_EML_TPC_QM_BASE 0x364C000ull
+#define DCORE3_TPC3_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x364C900ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x364C908ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x364C910ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x364C918ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x364C920ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x364C928ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x364C930ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x364C938ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x364C940ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x364C948ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x364C950ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x364C958ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x364C960ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x364C968ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x364C970ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x364C978ull
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC3_EML_TPC_QM_AXUSER_SECURED_BASE 0x364CB00ull
+#define DCORE3_TPC3_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x364CB80ull
+#define DCORE3_TPC3_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_DBG_HBW_BASE 0x364CC00ull
+#define DCORE3_TPC3_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC3_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC3_EML_TPC_QM_DBG_LBW_BASE 0x364CC80ull
+#define DCORE3_TPC3_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC3_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_TPC_QM_CGM_BASE 0x364CD80ull
+#define DCORE3_TPC3_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC3_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC3_EML_TPC_QM_SPECIAL_BASE 0x364CE80ull
+#define DCORE3_TPC3_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC3_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE3_TPC3_EML_CS_BASE 0x37FF000ull
+#define DCORE3_TPC3_EML_CS_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_EML_CS_SECTION 0x1000
+#define mmDCORE3_TPC4_ROM_TABLE_BASE 0x3800000ull
+#define DCORE3_TPC4_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_ROM_TABLE_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_SPMU_BASE 0x3801000ull
+#define DCORE3_TPC4_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_SPMU_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_ETF_BASE 0x3802000ull
+#define DCORE3_TPC4_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_ETF_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_STM_BASE 0x3803000ull
+#define DCORE3_TPC4_EML_STM_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_STM_SECTION 0x2000
+#define mmDCORE3_TPC4_EML_CTI_BASE 0x3805000ull
+#define DCORE3_TPC4_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_CTI_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_FUNNEL_BASE 0x3806000ull
+#define DCORE3_TPC4_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_BUSMON_0_BASE 0x3807000ull
+#define DCORE3_TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_BUSMON_1_BASE 0x3808000ull
+#define DCORE3_TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_BUSMON_2_BASE 0x3809000ull
+#define DCORE3_TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_BUSMON_3_BASE 0x380A000ull
+#define DCORE3_TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE3_TPC4_QM_ARC_RTT_BASE 0x380B000ull
+#define DCORE3_TPC4_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE3_TPC4_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE3_TPC4_EML_CFG_BASE 0x3840000ull
+#define DCORE3_TPC4_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_CFG_SECTION 0xE800
+#define mmDCORE3_TPC4_EML_CFG_SPECIAL_BASE 0x3840E80ull
+#define DCORE3_TPC4_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC4_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x3841000ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC4_EML_TPC_CFG_BASE 0x3841000ull
+#define DCORE3_TPC4_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x3841050ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x38410A0ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x38410F0ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x3841140ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x3841190ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x38411E0ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x3841230ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x3841280ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x38412D0ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x3841320ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x3841370ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x38413C0ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x3841410ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x3841460ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x38414B0ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x3841500ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_CFG_KERNEL_BASE 0x3841508ull
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC4_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_0_BASE 0x38415DCull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_1_BASE 0x384162Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_2_BASE 0x384167Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_3_BASE 0x38416CCull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_4_BASE 0x384171Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_5_BASE 0x384176Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_6_BASE 0x38417BCull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_7_BASE 0x384180Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_8_BASE 0x384185Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_9_BASE 0x38418ACull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_10_BASE 0x38418FCull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_11_BASE 0x384194Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_12_BASE 0x384199Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_13_BASE 0x38419ECull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_14_BASE 0x3841A3Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_15_BASE 0x3841A8Cull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x3841ADCull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_CFG_QM_BASE 0x3841AE4ull
+#define DCORE3_TPC4_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC4_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC4_EML_TPC_CFG_AXUSER_BASE 0x3841E00ull
+#define DCORE3_TPC4_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_CFG_SPECIAL_BASE 0x3841E80ull
+#define DCORE3_TPC4_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC4_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC4_EML_QM_DCCM_BASE 0x3842000ull
+#define DCORE3_TPC4_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC4_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_QM_ARCAUX_BASE 0x384A000ull
+#define DCORE3_TPC4_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE3_TPC4_EML_QM_ARCAUX_SPECIAL_BASE 0x384AE80ull
+#define DCORE3_TPC4_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC4_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC4_EML_TPC_QM_BASE 0x384C000ull
+#define DCORE3_TPC4_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x384C900ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x384C908ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x384C910ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x384C918ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x384C920ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x384C928ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x384C930ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x384C938ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x384C940ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x384C948ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x384C950ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x384C958ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x384C960ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x384C968ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x384C970ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x384C978ull
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC4_EML_TPC_QM_AXUSER_SECURED_BASE 0x384CB00ull
+#define DCORE3_TPC4_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x384CB80ull
+#define DCORE3_TPC4_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_DBG_HBW_BASE 0x384CC00ull
+#define DCORE3_TPC4_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC4_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC4_EML_TPC_QM_DBG_LBW_BASE 0x384CC80ull
+#define DCORE3_TPC4_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC4_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_TPC_QM_CGM_BASE 0x384CD80ull
+#define DCORE3_TPC4_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC4_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC4_EML_TPC_QM_SPECIAL_BASE 0x384CE80ull
+#define DCORE3_TPC4_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC4_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE3_TPC4_EML_CS_BASE 0x39FF000ull
+#define DCORE3_TPC4_EML_CS_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_EML_CS_SECTION 0x1000
+#define mmDCORE3_TPC5_ROM_TABLE_BASE 0x3A00000ull
+#define DCORE3_TPC5_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_ROM_TABLE_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_SPMU_BASE 0x3A01000ull
+#define DCORE3_TPC5_EML_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_SPMU_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_ETF_BASE 0x3A02000ull
+#define DCORE3_TPC5_EML_ETF_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_ETF_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_STM_BASE 0x3A03000ull
+#define DCORE3_TPC5_EML_STM_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_STM_SECTION 0x2000
+#define mmDCORE3_TPC5_EML_CTI_BASE 0x3A05000ull
+#define DCORE3_TPC5_EML_CTI_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_CTI_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_FUNNEL_BASE 0x3A06000ull
+#define DCORE3_TPC5_EML_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_FUNNEL_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_BUSMON_0_BASE 0x3A07000ull
+#define DCORE3_TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_BUSMON_0_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_BUSMON_1_BASE 0x3A08000ull
+#define DCORE3_TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_BUSMON_1_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_BUSMON_2_BASE 0x3A09000ull
+#define DCORE3_TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_BUSMON_2_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_BUSMON_3_BASE 0x3A0A000ull
+#define DCORE3_TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_BUSMON_3_SECTION 0x1000
+#define mmDCORE3_TPC5_QM_ARC_RTT_BASE 0x3A0B000ull
+#define DCORE3_TPC5_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE3_TPC5_QM_ARC_RTT_SECTION 0x35000
+#define mmDCORE3_TPC5_EML_CFG_BASE 0x3A40000ull
+#define DCORE3_TPC5_EML_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_CFG_SECTION 0xE800
+#define mmDCORE3_TPC5_EML_CFG_SPECIAL_BASE 0x3A40E80ull
+#define DCORE3_TPC5_EML_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC5_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_0_BASE 0x3A41000ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC5_EML_TPC_CFG_BASE 0x3A41000ull
+#define DCORE3_TPC5_EML_TPC_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_TPC_CFG_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_BASE 0x3A41050ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_BASE 0x3A410A0ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_BASE 0x3A410F0ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_BASE 0x3A41140ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_BASE 0x3A41190ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_BASE 0x3A411E0ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_BASE 0x3A41230ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_BASE 0x3A41280ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_BASE 0x3A412D0ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_BASE 0x3A41320ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_BASE 0x3A41370ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_BASE 0x3A413C0ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_BASE 0x3A41410ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_BASE 0x3A41460ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_BASE 0x3A414B0ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_BASE 0x3A41500ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_CFG_KERNEL_BASE 0x3A41508ull
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC5_EML_TPC_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_0_BASE 0x3A415DCull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_1_BASE 0x3A4162Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_2_BASE 0x3A4167Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_3_BASE 0x3A416CCull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_4_BASE 0x3A4171Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_5_BASE 0x3A4176Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_6_BASE 0x3A417BCull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_7_BASE 0x3A4180Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_8_BASE 0x3A4185Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_9_BASE 0x3A418ACull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_10_BASE 0x3A418FCull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_11_BASE 0x3A4194Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_12_BASE 0x3A4199Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_13_BASE 0x3A419ECull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_14_BASE 0x3A41A3Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_15_BASE 0x3A41A8Cull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_BASE 0x3A41ADCull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_CFG_QM_BASE 0x3A41AE4ull
+#define DCORE3_TPC5_EML_TPC_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC5_EML_TPC_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC5_EML_TPC_CFG_AXUSER_BASE 0x3A41E00ull
+#define DCORE3_TPC5_EML_TPC_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_CFG_SPECIAL_BASE 0x3A41E80ull
+#define DCORE3_TPC5_EML_TPC_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC5_EML_TPC_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC5_EML_QM_DCCM_BASE 0x3A42000ull
+#define DCORE3_TPC5_EML_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC5_EML_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_QM_ARCAUX_BASE 0x3A4A000ull
+#define DCORE3_TPC5_EML_QM_ARCAUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_QM_ARCAUX_SECTION 0xE800
+#define mmDCORE3_TPC5_EML_QM_ARCAUX_SPECIAL_BASE 0x3A4AE80ull
+#define DCORE3_TPC5_EML_QM_ARCAUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC5_EML_QM_ARCAUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC5_EML_TPC_QM_BASE 0x3A4C000ull
+#define DCORE3_TPC5_EML_TPC_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_TPC_QM_SECTION 0x9000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_BASE 0x3A4C900ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_BASE 0x3A4C908ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_BASE 0x3A4C910ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_BASE 0x3A4C918ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_BASE 0x3A4C920ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_BASE 0x3A4C928ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_BASE 0x3A4C930ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_BASE 0x3A4C938ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_BASE 0x3A4C940ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_BASE 0x3A4C948ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_BASE 0x3A4C950ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_BASE 0x3A4C958ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_BASE 0x3A4C960ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_BASE 0x3A4C968ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_BASE 0x3A4C970ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_BASE 0x3A4C978ull
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_EML_TPC_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC5_EML_TPC_QM_AXUSER_SECURED_BASE 0x3A4CB00ull
+#define DCORE3_TPC5_EML_TPC_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_AXUSER_NONSECURED_BASE 0x3A4CB80ull
+#define DCORE3_TPC5_EML_TPC_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_EML_TPC_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_DBG_HBW_BASE 0x3A4CC00ull
+#define DCORE3_TPC5_EML_TPC_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC5_EML_TPC_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC5_EML_TPC_QM_DBG_LBW_BASE 0x3A4CC80ull
+#define DCORE3_TPC5_EML_TPC_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC5_EML_TPC_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_TPC_QM_CGM_BASE 0x3A4CD80ull
+#define DCORE3_TPC5_EML_TPC_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC5_EML_TPC_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC5_EML_TPC_QM_SPECIAL_BASE 0x3A4CE80ull
+#define DCORE3_TPC5_EML_TPC_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC5_EML_TPC_QM_SPECIAL_SECTION 0x1B2180
+#define mmDCORE3_TPC5_EML_CS_BASE 0x3BFF000ull
+#define DCORE3_TPC5_EML_CS_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_EML_CS_SECTION 0x401000
+#define mmDCORE0_TPC0_QM_DCCM_BASE 0x4000000ull
+#define DCORE0_TPC0_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC0_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_ARC_AUX_BASE 0x4008000ull
+#define DCORE0_TPC0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_TPC0_QM_ARC_AUX_SPECIAL_BASE 0x4008E80ull
+#define DCORE0_TPC0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC0_QM_BASE 0x400A000ull
+#define DCORE0_TPC0_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_QM_SECTION 0x9000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x400A900ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x400A908ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x400A910ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x400A918ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x400A920ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x400A928ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x400A930ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x400A938ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x400A940ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x400A948ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x400A950ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x400A958ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x400A960ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x400A968ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x400A970ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x400A978ull
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC0_QM_AXUSER_SECURED_BASE 0x400AB00ull
+#define DCORE0_TPC0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_AXUSER_NONSECURED_BASE 0x400AB80ull
+#define DCORE0_TPC0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_DBG_HBW_BASE 0x400AC00ull
+#define DCORE0_TPC0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC0_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC0_QM_DBG_LBW_BASE 0x400AC80ull
+#define DCORE0_TPC0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC0_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC0_QM_CGM_BASE 0x400AD80ull
+#define DCORE0_TPC0_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC0_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC0_QM_SPECIAL_BASE 0x400AE80ull
+#define DCORE0_TPC0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_BASE 0x400B000ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC0_CFG_BASE 0x400B000ull
+#define DCORE0_TPC0_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC0_CFG_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_1_BASE 0x400B050ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_2_BASE 0x400B0A0ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_3_BASE 0x400B0F0ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_4_BASE 0x400B140ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_5_BASE 0x400B190ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_6_BASE 0x400B1E0ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_7_BASE 0x400B230ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_8_BASE 0x400B280ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_9_BASE 0x400B2D0ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_10_BASE 0x400B320ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_11_BASE 0x400B370ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_12_BASE 0x400B3C0ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_13_BASE 0x400B410ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_14_BASE 0x400B460ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_TENSOR_15_BASE 0x400B4B0ull
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_KERNEL_SYNC_OBJECT_BASE 0x400B500ull
+#define DCORE0_TPC0_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC0_CFG_KERNEL_BASE 0x400B508ull
+#define DCORE0_TPC0_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC0_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_0_BASE 0x400B5DCull
+#define DCORE0_TPC0_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_1_BASE 0x400B62Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_2_BASE 0x400B67Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_3_BASE 0x400B6CCull
+#define DCORE0_TPC0_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_4_BASE 0x400B71Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_5_BASE 0x400B76Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_6_BASE 0x400B7BCull
+#define DCORE0_TPC0_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_7_BASE 0x400B80Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_8_BASE 0x400B85Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_9_BASE 0x400B8ACull
+#define DCORE0_TPC0_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_10_BASE 0x400B8FCull
+#define DCORE0_TPC0_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_11_BASE 0x400B94Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_12_BASE 0x400B99Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_13_BASE 0x400B9ECull
+#define DCORE0_TPC0_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_14_BASE 0x400BA3Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_TENSOR_15_BASE 0x400BA8Cull
+#define DCORE0_TPC0_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC0_CFG_QM_SYNC_OBJECT_BASE 0x400BADCull
+#define DCORE0_TPC0_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC0_CFG_QM_BASE 0x400BAE4ull
+#define DCORE0_TPC0_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC0_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC0_CFG_AXUSER_BASE 0x400BE00ull
+#define DCORE0_TPC0_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC0_CFG_SPECIAL_BASE 0x400BE80ull
+#define DCORE0_TPC0_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC0_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC0_MSTR_IF_RR_SHRD_HBW_BASE 0x400C000ull
+#define DCORE0_TPC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_TPC0_MSTR_IF_RR_PRVT_HBW_BASE 0x400C200ull
+#define DCORE0_TPC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_TPC0_MSTR_IF_RR_SHRD_LBW_BASE 0x400C400ull
+#define DCORE0_TPC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_TPC0_MSTR_IF_RR_PRVT_LBW_BASE 0x400C600ull
+#define DCORE0_TPC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_TPC0_MSTR_IF_E2E_CRDT_BASE 0x400C800ull
+#define DCORE0_TPC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_TPC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_TPC0_MSTR_IF_AXUSER_BASE 0x400CA80ull
+#define DCORE0_TPC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC0_MSTR_IF_DBG_HBW_BASE 0x400CB00ull
+#define DCORE0_TPC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC0_MSTR_IF_DBG_LBW_BASE 0x400CB80ull
+#define DCORE0_TPC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_TPC0_MSTR_IF_CORE_HBW_BASE 0x400CC00ull
+#define DCORE0_TPC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_TPC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_TPC0_MSTR_IF_CORE_LBW_BASE 0x400CD80ull
+#define DCORE0_TPC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_TPC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_TPC0_MSTR_IF_SPECIAL_BASE 0x400CE80ull
+#define DCORE0_TPC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE0_TPC1_QM_DCCM_BASE 0x4010000ull
+#define DCORE0_TPC1_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC1_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_ARC_AUX_BASE 0x4018000ull
+#define DCORE0_TPC1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_TPC1_QM_ARC_AUX_SPECIAL_BASE 0x4018E80ull
+#define DCORE0_TPC1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC1_QM_BASE 0x401A000ull
+#define DCORE0_TPC1_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_QM_SECTION 0x9000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x401A900ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x401A908ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x401A910ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x401A918ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x401A920ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x401A928ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x401A930ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x401A938ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x401A940ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x401A948ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x401A950ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x401A958ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x401A960ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x401A968ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x401A970ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x401A978ull
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC1_QM_AXUSER_SECURED_BASE 0x401AB00ull
+#define DCORE0_TPC1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_AXUSER_NONSECURED_BASE 0x401AB80ull
+#define DCORE0_TPC1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_DBG_HBW_BASE 0x401AC00ull
+#define DCORE0_TPC1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC1_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC1_QM_DBG_LBW_BASE 0x401AC80ull
+#define DCORE0_TPC1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC1_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC1_QM_CGM_BASE 0x401AD80ull
+#define DCORE0_TPC1_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC1_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC1_QM_SPECIAL_BASE 0x401AE80ull
+#define DCORE0_TPC1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_0_BASE 0x401B000ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC1_CFG_BASE 0x401B000ull
+#define DCORE0_TPC1_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC1_CFG_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_1_BASE 0x401B050ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_2_BASE 0x401B0A0ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_3_BASE 0x401B0F0ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_4_BASE 0x401B140ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_5_BASE 0x401B190ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_6_BASE 0x401B1E0ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_7_BASE 0x401B230ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_8_BASE 0x401B280ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_9_BASE 0x401B2D0ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_10_BASE 0x401B320ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_11_BASE 0x401B370ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_12_BASE 0x401B3C0ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_13_BASE 0x401B410ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_14_BASE 0x401B460ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_TENSOR_15_BASE 0x401B4B0ull
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_KERNEL_SYNC_OBJECT_BASE 0x401B500ull
+#define DCORE0_TPC1_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC1_CFG_KERNEL_BASE 0x401B508ull
+#define DCORE0_TPC1_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC1_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_0_BASE 0x401B5DCull
+#define DCORE0_TPC1_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_1_BASE 0x401B62Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_2_BASE 0x401B67Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_3_BASE 0x401B6CCull
+#define DCORE0_TPC1_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_4_BASE 0x401B71Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_5_BASE 0x401B76Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_6_BASE 0x401B7BCull
+#define DCORE0_TPC1_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_7_BASE 0x401B80Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_8_BASE 0x401B85Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_9_BASE 0x401B8ACull
+#define DCORE0_TPC1_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_10_BASE 0x401B8FCull
+#define DCORE0_TPC1_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_11_BASE 0x401B94Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_12_BASE 0x401B99Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_13_BASE 0x401B9ECull
+#define DCORE0_TPC1_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_14_BASE 0x401BA3Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_TENSOR_15_BASE 0x401BA8Cull
+#define DCORE0_TPC1_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC1_CFG_QM_SYNC_OBJECT_BASE 0x401BADCull
+#define DCORE0_TPC1_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC1_CFG_QM_BASE 0x401BAE4ull
+#define DCORE0_TPC1_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC1_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC1_CFG_AXUSER_BASE 0x401BE00ull
+#define DCORE0_TPC1_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC1_CFG_SPECIAL_BASE 0x401BE80ull
+#define DCORE0_TPC1_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC1_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC1_MSTR_IF_RR_SHRD_HBW_BASE 0x401C000ull
+#define DCORE0_TPC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_TPC1_MSTR_IF_RR_PRVT_HBW_BASE 0x401C200ull
+#define DCORE0_TPC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_TPC1_MSTR_IF_RR_SHRD_LBW_BASE 0x401C400ull
+#define DCORE0_TPC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_TPC1_MSTR_IF_RR_PRVT_LBW_BASE 0x401C600ull
+#define DCORE0_TPC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_TPC1_MSTR_IF_E2E_CRDT_BASE 0x401C800ull
+#define DCORE0_TPC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_TPC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_TPC1_MSTR_IF_AXUSER_BASE 0x401CA80ull
+#define DCORE0_TPC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC1_MSTR_IF_DBG_HBW_BASE 0x401CB00ull
+#define DCORE0_TPC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC1_MSTR_IF_DBG_LBW_BASE 0x401CB80ull
+#define DCORE0_TPC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_TPC1_MSTR_IF_CORE_HBW_BASE 0x401CC00ull
+#define DCORE0_TPC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_TPC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_TPC1_MSTR_IF_CORE_LBW_BASE 0x401CD80ull
+#define DCORE0_TPC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_TPC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_TPC1_MSTR_IF_SPECIAL_BASE 0x401CE80ull
+#define DCORE0_TPC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC1_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE0_TPC2_QM_DCCM_BASE 0x4020000ull
+#define DCORE0_TPC2_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC2_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_ARC_AUX_BASE 0x4028000ull
+#define DCORE0_TPC2_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_TPC2_QM_ARC_AUX_SPECIAL_BASE 0x4028E80ull
+#define DCORE0_TPC2_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC2_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC2_QM_BASE 0x402A000ull
+#define DCORE0_TPC2_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_QM_SECTION 0x9000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR0_BASE 0x402A900ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR1_BASE 0x402A908ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR2_BASE 0x402A910ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR3_BASE 0x402A918ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR4_BASE 0x402A920ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR5_BASE 0x402A928ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR6_BASE 0x402A930ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR7_BASE 0x402A938ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR8_BASE 0x402A940ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR9_BASE 0x402A948ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR10_BASE 0x402A950ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR11_BASE 0x402A958ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR12_BASE 0x402A960ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR13_BASE 0x402A968ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR14_BASE 0x402A970ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR15_BASE 0x402A978ull
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC2_QM_AXUSER_SECURED_BASE 0x402AB00ull
+#define DCORE0_TPC2_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_AXUSER_NONSECURED_BASE 0x402AB80ull
+#define DCORE0_TPC2_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_DBG_HBW_BASE 0x402AC00ull
+#define DCORE0_TPC2_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC2_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC2_QM_DBG_LBW_BASE 0x402AC80ull
+#define DCORE0_TPC2_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC2_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC2_QM_CGM_BASE 0x402AD80ull
+#define DCORE0_TPC2_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC2_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC2_QM_SPECIAL_BASE 0x402AE80ull
+#define DCORE0_TPC2_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC2_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_0_BASE 0x402B000ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC2_CFG_BASE 0x402B000ull
+#define DCORE0_TPC2_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC2_CFG_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_1_BASE 0x402B050ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_2_BASE 0x402B0A0ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_3_BASE 0x402B0F0ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_4_BASE 0x402B140ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_5_BASE 0x402B190ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_6_BASE 0x402B1E0ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_7_BASE 0x402B230ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_8_BASE 0x402B280ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_9_BASE 0x402B2D0ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_10_BASE 0x402B320ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_11_BASE 0x402B370ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_12_BASE 0x402B3C0ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_13_BASE 0x402B410ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_14_BASE 0x402B460ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_TENSOR_15_BASE 0x402B4B0ull
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_KERNEL_SYNC_OBJECT_BASE 0x402B500ull
+#define DCORE0_TPC2_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC2_CFG_KERNEL_BASE 0x402B508ull
+#define DCORE0_TPC2_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC2_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_0_BASE 0x402B5DCull
+#define DCORE0_TPC2_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_1_BASE 0x402B62Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_2_BASE 0x402B67Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_3_BASE 0x402B6CCull
+#define DCORE0_TPC2_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_4_BASE 0x402B71Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_5_BASE 0x402B76Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_6_BASE 0x402B7BCull
+#define DCORE0_TPC2_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_7_BASE 0x402B80Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_8_BASE 0x402B85Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_9_BASE 0x402B8ACull
+#define DCORE0_TPC2_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_10_BASE 0x402B8FCull
+#define DCORE0_TPC2_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_11_BASE 0x402B94Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_12_BASE 0x402B99Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_13_BASE 0x402B9ECull
+#define DCORE0_TPC2_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_14_BASE 0x402BA3Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_TENSOR_15_BASE 0x402BA8Cull
+#define DCORE0_TPC2_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC2_CFG_QM_SYNC_OBJECT_BASE 0x402BADCull
+#define DCORE0_TPC2_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC2_CFG_QM_BASE 0x402BAE4ull
+#define DCORE0_TPC2_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC2_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC2_CFG_AXUSER_BASE 0x402BE00ull
+#define DCORE0_TPC2_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC2_CFG_SPECIAL_BASE 0x402BE80ull
+#define DCORE0_TPC2_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC2_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC2_MSTR_IF_RR_SHRD_HBW_BASE 0x402C000ull
+#define DCORE0_TPC2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_TPC2_MSTR_IF_RR_PRVT_HBW_BASE 0x402C200ull
+#define DCORE0_TPC2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_TPC2_MSTR_IF_RR_SHRD_LBW_BASE 0x402C400ull
+#define DCORE0_TPC2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_TPC2_MSTR_IF_RR_PRVT_LBW_BASE 0x402C600ull
+#define DCORE0_TPC2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_TPC2_MSTR_IF_E2E_CRDT_BASE 0x402C800ull
+#define DCORE0_TPC2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_TPC2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_TPC2_MSTR_IF_AXUSER_BASE 0x402CA80ull
+#define DCORE0_TPC2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC2_MSTR_IF_DBG_HBW_BASE 0x402CB00ull
+#define DCORE0_TPC2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC2_MSTR_IF_DBG_LBW_BASE 0x402CB80ull
+#define DCORE0_TPC2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_TPC2_MSTR_IF_CORE_HBW_BASE 0x402CC00ull
+#define DCORE0_TPC2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_TPC2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_TPC2_MSTR_IF_CORE_LBW_BASE 0x402CD80ull
+#define DCORE0_TPC2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_TPC2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_TPC2_MSTR_IF_SPECIAL_BASE 0x402CE80ull
+#define DCORE0_TPC2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC2_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE0_TPC3_QM_DCCM_BASE 0x4030000ull
+#define DCORE0_TPC3_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC3_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_ARC_AUX_BASE 0x4038000ull
+#define DCORE0_TPC3_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_TPC3_QM_ARC_AUX_SPECIAL_BASE 0x4038E80ull
+#define DCORE0_TPC3_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC3_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC3_QM_BASE 0x403A000ull
+#define DCORE0_TPC3_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_QM_SECTION 0x9000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR0_BASE 0x403A900ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR1_BASE 0x403A908ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR2_BASE 0x403A910ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR3_BASE 0x403A918ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR4_BASE 0x403A920ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR5_BASE 0x403A928ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR6_BASE 0x403A930ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR7_BASE 0x403A938ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR8_BASE 0x403A940ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR9_BASE 0x403A948ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR10_BASE 0x403A950ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR11_BASE 0x403A958ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR12_BASE 0x403A960ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR13_BASE 0x403A968ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR14_BASE 0x403A970ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR15_BASE 0x403A978ull
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC3_QM_AXUSER_SECURED_BASE 0x403AB00ull
+#define DCORE0_TPC3_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_AXUSER_NONSECURED_BASE 0x403AB80ull
+#define DCORE0_TPC3_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_DBG_HBW_BASE 0x403AC00ull
+#define DCORE0_TPC3_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC3_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC3_QM_DBG_LBW_BASE 0x403AC80ull
+#define DCORE0_TPC3_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC3_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC3_QM_CGM_BASE 0x403AD80ull
+#define DCORE0_TPC3_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC3_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC3_QM_SPECIAL_BASE 0x403AE80ull
+#define DCORE0_TPC3_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC3_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_0_BASE 0x403B000ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC3_CFG_BASE 0x403B000ull
+#define DCORE0_TPC3_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC3_CFG_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_1_BASE 0x403B050ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_2_BASE 0x403B0A0ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_3_BASE 0x403B0F0ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_4_BASE 0x403B140ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_5_BASE 0x403B190ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_6_BASE 0x403B1E0ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_7_BASE 0x403B230ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_8_BASE 0x403B280ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_9_BASE 0x403B2D0ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_10_BASE 0x403B320ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_11_BASE 0x403B370ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_12_BASE 0x403B3C0ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_13_BASE 0x403B410ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_14_BASE 0x403B460ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_TENSOR_15_BASE 0x403B4B0ull
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_KERNEL_SYNC_OBJECT_BASE 0x403B500ull
+#define DCORE0_TPC3_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC3_CFG_KERNEL_BASE 0x403B508ull
+#define DCORE0_TPC3_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC3_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_0_BASE 0x403B5DCull
+#define DCORE0_TPC3_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_1_BASE 0x403B62Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_2_BASE 0x403B67Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_3_BASE 0x403B6CCull
+#define DCORE0_TPC3_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_4_BASE 0x403B71Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_5_BASE 0x403B76Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_6_BASE 0x403B7BCull
+#define DCORE0_TPC3_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_7_BASE 0x403B80Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_8_BASE 0x403B85Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_9_BASE 0x403B8ACull
+#define DCORE0_TPC3_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_10_BASE 0x403B8FCull
+#define DCORE0_TPC3_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_11_BASE 0x403B94Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_12_BASE 0x403B99Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_13_BASE 0x403B9ECull
+#define DCORE0_TPC3_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_14_BASE 0x403BA3Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_TENSOR_15_BASE 0x403BA8Cull
+#define DCORE0_TPC3_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC3_CFG_QM_SYNC_OBJECT_BASE 0x403BADCull
+#define DCORE0_TPC3_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC3_CFG_QM_BASE 0x403BAE4ull
+#define DCORE0_TPC3_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC3_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC3_CFG_AXUSER_BASE 0x403BE00ull
+#define DCORE0_TPC3_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC3_CFG_SPECIAL_BASE 0x403BE80ull
+#define DCORE0_TPC3_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC3_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC3_MSTR_IF_RR_SHRD_HBW_BASE 0x403C000ull
+#define DCORE0_TPC3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_TPC3_MSTR_IF_RR_PRVT_HBW_BASE 0x403C200ull
+#define DCORE0_TPC3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_TPC3_MSTR_IF_RR_SHRD_LBW_BASE 0x403C400ull
+#define DCORE0_TPC3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_TPC3_MSTR_IF_RR_PRVT_LBW_BASE 0x403C600ull
+#define DCORE0_TPC3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_TPC3_MSTR_IF_E2E_CRDT_BASE 0x403C800ull
+#define DCORE0_TPC3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_TPC3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_TPC3_MSTR_IF_AXUSER_BASE 0x403CA80ull
+#define DCORE0_TPC3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC3_MSTR_IF_DBG_HBW_BASE 0x403CB00ull
+#define DCORE0_TPC3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC3_MSTR_IF_DBG_LBW_BASE 0x403CB80ull
+#define DCORE0_TPC3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_TPC3_MSTR_IF_CORE_HBW_BASE 0x403CC00ull
+#define DCORE0_TPC3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_TPC3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_TPC3_MSTR_IF_CORE_LBW_BASE 0x403CD80ull
+#define DCORE0_TPC3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_TPC3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_TPC3_MSTR_IF_SPECIAL_BASE 0x403CE80ull
+#define DCORE0_TPC3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC3_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE0_TPC4_QM_DCCM_BASE 0x4040000ull
+#define DCORE0_TPC4_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC4_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_ARC_AUX_BASE 0x4048000ull
+#define DCORE0_TPC4_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_TPC4_QM_ARC_AUX_SPECIAL_BASE 0x4048E80ull
+#define DCORE0_TPC4_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC4_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC4_QM_BASE 0x404A000ull
+#define DCORE0_TPC4_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_QM_SECTION 0x9000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR0_BASE 0x404A900ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR1_BASE 0x404A908ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR2_BASE 0x404A910ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR3_BASE 0x404A918ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR4_BASE 0x404A920ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR5_BASE 0x404A928ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR6_BASE 0x404A930ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR7_BASE 0x404A938ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR8_BASE 0x404A940ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR9_BASE 0x404A948ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR10_BASE 0x404A950ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR11_BASE 0x404A958ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR12_BASE 0x404A960ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR13_BASE 0x404A968ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR14_BASE 0x404A970ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR15_BASE 0x404A978ull
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC4_QM_AXUSER_SECURED_BASE 0x404AB00ull
+#define DCORE0_TPC4_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_AXUSER_NONSECURED_BASE 0x404AB80ull
+#define DCORE0_TPC4_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_DBG_HBW_BASE 0x404AC00ull
+#define DCORE0_TPC4_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC4_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC4_QM_DBG_LBW_BASE 0x404AC80ull
+#define DCORE0_TPC4_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC4_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC4_QM_CGM_BASE 0x404AD80ull
+#define DCORE0_TPC4_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC4_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC4_QM_SPECIAL_BASE 0x404AE80ull
+#define DCORE0_TPC4_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC4_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_0_BASE 0x404B000ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC4_CFG_BASE 0x404B000ull
+#define DCORE0_TPC4_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC4_CFG_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_1_BASE 0x404B050ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_2_BASE 0x404B0A0ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_3_BASE 0x404B0F0ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_4_BASE 0x404B140ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_5_BASE 0x404B190ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_6_BASE 0x404B1E0ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_7_BASE 0x404B230ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_8_BASE 0x404B280ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_9_BASE 0x404B2D0ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_10_BASE 0x404B320ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_11_BASE 0x404B370ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_12_BASE 0x404B3C0ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_13_BASE 0x404B410ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_14_BASE 0x404B460ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_TENSOR_15_BASE 0x404B4B0ull
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_KERNEL_SYNC_OBJECT_BASE 0x404B500ull
+#define DCORE0_TPC4_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC4_CFG_KERNEL_BASE 0x404B508ull
+#define DCORE0_TPC4_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC4_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_0_BASE 0x404B5DCull
+#define DCORE0_TPC4_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_1_BASE 0x404B62Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_2_BASE 0x404B67Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_3_BASE 0x404B6CCull
+#define DCORE0_TPC4_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_4_BASE 0x404B71Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_5_BASE 0x404B76Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_6_BASE 0x404B7BCull
+#define DCORE0_TPC4_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_7_BASE 0x404B80Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_8_BASE 0x404B85Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_9_BASE 0x404B8ACull
+#define DCORE0_TPC4_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_10_BASE 0x404B8FCull
+#define DCORE0_TPC4_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_11_BASE 0x404B94Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_12_BASE 0x404B99Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_13_BASE 0x404B9ECull
+#define DCORE0_TPC4_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_14_BASE 0x404BA3Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_TENSOR_15_BASE 0x404BA8Cull
+#define DCORE0_TPC4_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC4_CFG_QM_SYNC_OBJECT_BASE 0x404BADCull
+#define DCORE0_TPC4_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC4_CFG_QM_BASE 0x404BAE4ull
+#define DCORE0_TPC4_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC4_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC4_CFG_AXUSER_BASE 0x404BE00ull
+#define DCORE0_TPC4_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC4_CFG_SPECIAL_BASE 0x404BE80ull
+#define DCORE0_TPC4_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC4_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC4_MSTR_IF_RR_SHRD_HBW_BASE 0x404C000ull
+#define DCORE0_TPC4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_TPC4_MSTR_IF_RR_PRVT_HBW_BASE 0x404C200ull
+#define DCORE0_TPC4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_TPC4_MSTR_IF_RR_SHRD_LBW_BASE 0x404C400ull
+#define DCORE0_TPC4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_TPC4_MSTR_IF_RR_PRVT_LBW_BASE 0x404C600ull
+#define DCORE0_TPC4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_TPC4_MSTR_IF_E2E_CRDT_BASE 0x404C800ull
+#define DCORE0_TPC4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_TPC4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_TPC4_MSTR_IF_AXUSER_BASE 0x404CA80ull
+#define DCORE0_TPC4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC4_MSTR_IF_DBG_HBW_BASE 0x404CB00ull
+#define DCORE0_TPC4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC4_MSTR_IF_DBG_LBW_BASE 0x404CB80ull
+#define DCORE0_TPC4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_TPC4_MSTR_IF_CORE_HBW_BASE 0x404CC00ull
+#define DCORE0_TPC4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_TPC4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_TPC4_MSTR_IF_CORE_LBW_BASE 0x404CD80ull
+#define DCORE0_TPC4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_TPC4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_TPC4_MSTR_IF_SPECIAL_BASE 0x404CE80ull
+#define DCORE0_TPC4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC4_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE0_TPC5_QM_DCCM_BASE 0x4050000ull
+#define DCORE0_TPC5_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC5_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_ARC_AUX_BASE 0x4058000ull
+#define DCORE0_TPC5_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_TPC5_QM_ARC_AUX_SPECIAL_BASE 0x4058E80ull
+#define DCORE0_TPC5_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC5_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC5_QM_BASE 0x405A000ull
+#define DCORE0_TPC5_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_QM_SECTION 0x9000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR0_BASE 0x405A900ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR1_BASE 0x405A908ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR2_BASE 0x405A910ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR3_BASE 0x405A918ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR4_BASE 0x405A920ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR5_BASE 0x405A928ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR6_BASE 0x405A930ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR7_BASE 0x405A938ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR8_BASE 0x405A940ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR9_BASE 0x405A948ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR10_BASE 0x405A950ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR11_BASE 0x405A958ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR12_BASE 0x405A960ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR13_BASE 0x405A968ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR14_BASE 0x405A970ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR15_BASE 0x405A978ull
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC5_QM_AXUSER_SECURED_BASE 0x405AB00ull
+#define DCORE0_TPC5_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_AXUSER_NONSECURED_BASE 0x405AB80ull
+#define DCORE0_TPC5_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_DBG_HBW_BASE 0x405AC00ull
+#define DCORE0_TPC5_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC5_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC5_QM_DBG_LBW_BASE 0x405AC80ull
+#define DCORE0_TPC5_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC5_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC5_QM_CGM_BASE 0x405AD80ull
+#define DCORE0_TPC5_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC5_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC5_QM_SPECIAL_BASE 0x405AE80ull
+#define DCORE0_TPC5_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC5_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_0_BASE 0x405B000ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC5_CFG_BASE 0x405B000ull
+#define DCORE0_TPC5_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC5_CFG_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_1_BASE 0x405B050ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_2_BASE 0x405B0A0ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_3_BASE 0x405B0F0ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_4_BASE 0x405B140ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_5_BASE 0x405B190ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_6_BASE 0x405B1E0ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_7_BASE 0x405B230ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_8_BASE 0x405B280ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_9_BASE 0x405B2D0ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_10_BASE 0x405B320ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_11_BASE 0x405B370ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_12_BASE 0x405B3C0ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_13_BASE 0x405B410ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_14_BASE 0x405B460ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_TENSOR_15_BASE 0x405B4B0ull
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_KERNEL_SYNC_OBJECT_BASE 0x405B500ull
+#define DCORE0_TPC5_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC5_CFG_KERNEL_BASE 0x405B508ull
+#define DCORE0_TPC5_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC5_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_0_BASE 0x405B5DCull
+#define DCORE0_TPC5_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_1_BASE 0x405B62Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_2_BASE 0x405B67Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_3_BASE 0x405B6CCull
+#define DCORE0_TPC5_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_4_BASE 0x405B71Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_5_BASE 0x405B76Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_6_BASE 0x405B7BCull
+#define DCORE0_TPC5_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_7_BASE 0x405B80Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_8_BASE 0x405B85Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_9_BASE 0x405B8ACull
+#define DCORE0_TPC5_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_10_BASE 0x405B8FCull
+#define DCORE0_TPC5_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_11_BASE 0x405B94Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_12_BASE 0x405B99Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_13_BASE 0x405B9ECull
+#define DCORE0_TPC5_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_14_BASE 0x405BA3Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_TENSOR_15_BASE 0x405BA8Cull
+#define DCORE0_TPC5_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC5_CFG_QM_SYNC_OBJECT_BASE 0x405BADCull
+#define DCORE0_TPC5_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC5_CFG_QM_BASE 0x405BAE4ull
+#define DCORE0_TPC5_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC5_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC5_CFG_AXUSER_BASE 0x405BE00ull
+#define DCORE0_TPC5_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC5_CFG_SPECIAL_BASE 0x405BE80ull
+#define DCORE0_TPC5_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC5_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC5_MSTR_IF_RR_SHRD_HBW_BASE 0x405C000ull
+#define DCORE0_TPC5_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC5_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_TPC5_MSTR_IF_RR_PRVT_HBW_BASE 0x405C200ull
+#define DCORE0_TPC5_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC5_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_TPC5_MSTR_IF_RR_SHRD_LBW_BASE 0x405C400ull
+#define DCORE0_TPC5_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC5_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_TPC5_MSTR_IF_RR_PRVT_LBW_BASE 0x405C600ull
+#define DCORE0_TPC5_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC5_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_TPC5_MSTR_IF_E2E_CRDT_BASE 0x405C800ull
+#define DCORE0_TPC5_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_TPC5_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_TPC5_MSTR_IF_AXUSER_BASE 0x405CA80ull
+#define DCORE0_TPC5_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC5_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC5_MSTR_IF_DBG_HBW_BASE 0x405CB00ull
+#define DCORE0_TPC5_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC5_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC5_MSTR_IF_DBG_LBW_BASE 0x405CB80ull
+#define DCORE0_TPC5_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC5_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_TPC5_MSTR_IF_CORE_HBW_BASE 0x405CC00ull
+#define DCORE0_TPC5_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_TPC5_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_TPC5_MSTR_IF_CORE_LBW_BASE 0x405CD80ull
+#define DCORE0_TPC5_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_TPC5_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_TPC5_MSTR_IF_SPECIAL_BASE 0x405CE80ull
+#define DCORE0_TPC5_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC5_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE0_TPC6_QM_DCCM_BASE 0x4060000ull
+#define DCORE0_TPC6_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_TPC6_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_ARC_AUX_BASE 0x4068000ull
+#define DCORE0_TPC6_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_TPC6_QM_ARC_AUX_SPECIAL_BASE 0x4068E80ull
+#define DCORE0_TPC6_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC6_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TPC6_QM_BASE 0x406A000ull
+#define DCORE0_TPC6_QM_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_QM_SECTION 0x9000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR0_BASE 0x406A900ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR1_BASE 0x406A908ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR2_BASE 0x406A910ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR3_BASE 0x406A918ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR4_BASE 0x406A920ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR5_BASE 0x406A928ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR6_BASE 0x406A930ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR7_BASE 0x406A938ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR8_BASE 0x406A940ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR9_BASE 0x406A948ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR10_BASE 0x406A950ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR11_BASE 0x406A958ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR12_BASE 0x406A960ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR13_BASE 0x406A968ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR14_BASE 0x406A970ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR15_BASE 0x406A978ull
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_TPC6_QM_AXUSER_SECURED_BASE 0x406AB00ull
+#define DCORE0_TPC6_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_AXUSER_NONSECURED_BASE 0x406AB80ull
+#define DCORE0_TPC6_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_DBG_HBW_BASE 0x406AC00ull
+#define DCORE0_TPC6_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC6_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC6_QM_DBG_LBW_BASE 0x406AC80ull
+#define DCORE0_TPC6_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC6_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_TPC6_QM_CGM_BASE 0x406AD80ull
+#define DCORE0_TPC6_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_TPC6_QM_CGM_SECTION 0x1000
+#define mmDCORE0_TPC6_QM_SPECIAL_BASE 0x406AE80ull
+#define DCORE0_TPC6_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC6_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_0_BASE 0x406B000ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC6_CFG_BASE 0x406B000ull
+#define DCORE0_TPC6_CFG_MAX_OFFSET 0x1000
+#define DCORE0_TPC6_CFG_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_1_BASE 0x406B050ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_2_BASE 0x406B0A0ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_3_BASE 0x406B0F0ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_4_BASE 0x406B140ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_5_BASE 0x406B190ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_6_BASE 0x406B1E0ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_7_BASE 0x406B230ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_8_BASE 0x406B280ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_9_BASE 0x406B2D0ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_10_BASE 0x406B320ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_11_BASE 0x406B370ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_12_BASE 0x406B3C0ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_13_BASE 0x406B410ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_14_BASE 0x406B460ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_TENSOR_15_BASE 0x406B4B0ull
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_KERNEL_SYNC_OBJECT_BASE 0x406B500ull
+#define DCORE0_TPC6_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC6_CFG_KERNEL_BASE 0x406B508ull
+#define DCORE0_TPC6_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE0_TPC6_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_0_BASE 0x406B5DCull
+#define DCORE0_TPC6_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_1_BASE 0x406B62Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_2_BASE 0x406B67Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_3_BASE 0x406B6CCull
+#define DCORE0_TPC6_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_4_BASE 0x406B71Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_5_BASE 0x406B76Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_6_BASE 0x406B7BCull
+#define DCORE0_TPC6_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_7_BASE 0x406B80Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_8_BASE 0x406B85Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_9_BASE 0x406B8ACull
+#define DCORE0_TPC6_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_10_BASE 0x406B8FCull
+#define DCORE0_TPC6_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_11_BASE 0x406B94Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_12_BASE 0x406B99Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_13_BASE 0x406B9ECull
+#define DCORE0_TPC6_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_14_BASE 0x406BA3Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_TENSOR_15_BASE 0x406BA8Cull
+#define DCORE0_TPC6_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE0_TPC6_CFG_QM_SYNC_OBJECT_BASE 0x406BADCull
+#define DCORE0_TPC6_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE0_TPC6_CFG_QM_BASE 0x406BAE4ull
+#define DCORE0_TPC6_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE0_TPC6_CFG_QM_SECTION 0x31C0
+#define mmDCORE0_TPC6_CFG_AXUSER_BASE 0x406BE00ull
+#define DCORE0_TPC6_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC6_CFG_SPECIAL_BASE 0x406BE80ull
+#define DCORE0_TPC6_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC6_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC6_MSTR_IF_RR_SHRD_HBW_BASE 0x406C000ull
+#define DCORE0_TPC6_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC6_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_TPC6_MSTR_IF_RR_PRVT_HBW_BASE 0x406C200ull
+#define DCORE0_TPC6_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_TPC6_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_TPC6_MSTR_IF_RR_SHRD_LBW_BASE 0x406C400ull
+#define DCORE0_TPC6_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC6_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_TPC6_MSTR_IF_RR_PRVT_LBW_BASE 0x406C600ull
+#define DCORE0_TPC6_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_TPC6_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_TPC6_MSTR_IF_E2E_CRDT_BASE 0x406C800ull
+#define DCORE0_TPC6_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_TPC6_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_TPC6_MSTR_IF_AXUSER_BASE 0x406CA80ull
+#define DCORE0_TPC6_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_TPC6_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_TPC6_MSTR_IF_DBG_HBW_BASE 0x406CB00ull
+#define DCORE0_TPC6_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC6_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_TPC6_MSTR_IF_DBG_LBW_BASE 0x406CB80ull
+#define DCORE0_TPC6_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_TPC6_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_TPC6_MSTR_IF_CORE_HBW_BASE 0x406CC00ull
+#define DCORE0_TPC6_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_TPC6_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_TPC6_MSTR_IF_CORE_LBW_BASE 0x406CD80ull
+#define DCORE0_TPC6_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_TPC6_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_TPC6_MSTR_IF_SPECIAL_BASE 0x406CE80ull
+#define DCORE0_TPC6_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC6_MSTR_IF_SPECIAL_SECTION 0x13180
+#define mmDCORE0_HMMU0_MMU_BASE 0x4080000ull
+#define DCORE0_HMMU0_MMU_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_MMU_SECTION 0xE800
+#define mmDCORE0_HMMU0_MMU_SPECIAL_BASE 0x4080E80ull
+#define DCORE0_HMMU0_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU0_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HMMU0_STLB_BASE 0x4081000ull
+#define DCORE0_HMMU0_STLB_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_STLB_SECTION 0xE800
+#define mmDCORE0_HMMU0_STLB_SPECIAL_BASE 0x4081E80ull
+#define DCORE0_HMMU0_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU0_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE0_HMMU0_SCRAMB_OUT_BASE 0x4083000ull
+#define DCORE0_HMMU0_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE0_HMMU0_SCRAMB_OUT_SPECIAL_BASE 0x4083E80ull
+#define DCORE0_HMMU0_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU0_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HMMU0_MSTR_IF_RR_SHRD_HBW_BASE 0x4084000ull
+#define DCORE0_HMMU0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_HMMU0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_HMMU0_MSTR_IF_RR_PRVT_HBW_BASE 0x4084200ull
+#define DCORE0_HMMU0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_HMMU0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_HMMU0_MSTR_IF_RR_SHRD_LBW_BASE 0x4084400ull
+#define DCORE0_HMMU0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_HMMU0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_HMMU0_MSTR_IF_RR_PRVT_LBW_BASE 0x4084600ull
+#define DCORE0_HMMU0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_HMMU0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_HMMU0_MSTR_IF_E2E_CRDT_BASE 0x4084800ull
+#define DCORE0_HMMU0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_HMMU0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_HMMU0_MSTR_IF_AXUSER_BASE 0x4084A80ull
+#define DCORE0_HMMU0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_HMMU0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_HMMU0_MSTR_IF_DBG_HBW_BASE 0x4084B00ull
+#define DCORE0_HMMU0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_HMMU0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_HMMU0_MSTR_IF_DBG_LBW_BASE 0x4084B80ull
+#define DCORE0_HMMU0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_HMMU0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_HMMU0_MSTR_IF_CORE_HBW_BASE 0x4084C00ull
+#define DCORE0_HMMU0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_HMMU0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_HMMU0_MSTR_IF_CORE_LBW_BASE 0x4084D80ull
+#define DCORE0_HMMU0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_HMMU0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_HMMU0_MSTR_IF_SPECIAL_BASE 0x4084E80ull
+#define DCORE0_HMMU0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU0_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE0_HMMU1_MMU_BASE 0x4090000ull
+#define DCORE0_HMMU1_MMU_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_MMU_SECTION 0xE800
+#define mmDCORE0_HMMU1_MMU_SPECIAL_BASE 0x4090E80ull
+#define DCORE0_HMMU1_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU1_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HMMU1_STLB_BASE 0x4091000ull
+#define DCORE0_HMMU1_STLB_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_STLB_SECTION 0xE800
+#define mmDCORE0_HMMU1_STLB_SPECIAL_BASE 0x4091E80ull
+#define DCORE0_HMMU1_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU1_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE0_HMMU1_SCRAMB_OUT_BASE 0x4093000ull
+#define DCORE0_HMMU1_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE0_HMMU1_SCRAMB_OUT_SPECIAL_BASE 0x4093E80ull
+#define DCORE0_HMMU1_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU1_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HMMU1_MSTR_IF_RR_SHRD_HBW_BASE 0x4094000ull
+#define DCORE0_HMMU1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_HMMU1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_HMMU1_MSTR_IF_RR_PRVT_HBW_BASE 0x4094200ull
+#define DCORE0_HMMU1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_HMMU1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_HMMU1_MSTR_IF_RR_SHRD_LBW_BASE 0x4094400ull
+#define DCORE0_HMMU1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_HMMU1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_HMMU1_MSTR_IF_RR_PRVT_LBW_BASE 0x4094600ull
+#define DCORE0_HMMU1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_HMMU1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_HMMU1_MSTR_IF_E2E_CRDT_BASE 0x4094800ull
+#define DCORE0_HMMU1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_HMMU1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_HMMU1_MSTR_IF_AXUSER_BASE 0x4094A80ull
+#define DCORE0_HMMU1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_HMMU1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_HMMU1_MSTR_IF_DBG_HBW_BASE 0x4094B00ull
+#define DCORE0_HMMU1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_HMMU1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_HMMU1_MSTR_IF_DBG_LBW_BASE 0x4094B80ull
+#define DCORE0_HMMU1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_HMMU1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_HMMU1_MSTR_IF_CORE_HBW_BASE 0x4094C00ull
+#define DCORE0_HMMU1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_HMMU1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_HMMU1_MSTR_IF_CORE_LBW_BASE 0x4094D80ull
+#define DCORE0_HMMU1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_HMMU1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_HMMU1_MSTR_IF_SPECIAL_BASE 0x4094E80ull
+#define DCORE0_HMMU1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU1_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE0_HMMU2_MMU_BASE 0x40A0000ull
+#define DCORE0_HMMU2_MMU_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_MMU_SECTION 0xE800
+#define mmDCORE0_HMMU2_MMU_SPECIAL_BASE 0x40A0E80ull
+#define DCORE0_HMMU2_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU2_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HMMU2_STLB_BASE 0x40A1000ull
+#define DCORE0_HMMU2_STLB_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_STLB_SECTION 0xE800
+#define mmDCORE0_HMMU2_STLB_SPECIAL_BASE 0x40A1E80ull
+#define DCORE0_HMMU2_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU2_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE0_HMMU2_SCRAMB_OUT_BASE 0x40A3000ull
+#define DCORE0_HMMU2_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE0_HMMU2_SCRAMB_OUT_SPECIAL_BASE 0x40A3E80ull
+#define DCORE0_HMMU2_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU2_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HMMU2_MSTR_IF_RR_SHRD_HBW_BASE 0x40A4000ull
+#define DCORE0_HMMU2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_HMMU2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_HMMU2_MSTR_IF_RR_PRVT_HBW_BASE 0x40A4200ull
+#define DCORE0_HMMU2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_HMMU2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_HMMU2_MSTR_IF_RR_SHRD_LBW_BASE 0x40A4400ull
+#define DCORE0_HMMU2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_HMMU2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_HMMU2_MSTR_IF_RR_PRVT_LBW_BASE 0x40A4600ull
+#define DCORE0_HMMU2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_HMMU2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_HMMU2_MSTR_IF_E2E_CRDT_BASE 0x40A4800ull
+#define DCORE0_HMMU2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_HMMU2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_HMMU2_MSTR_IF_AXUSER_BASE 0x40A4A80ull
+#define DCORE0_HMMU2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_HMMU2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_HMMU2_MSTR_IF_DBG_HBW_BASE 0x40A4B00ull
+#define DCORE0_HMMU2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_HMMU2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_HMMU2_MSTR_IF_DBG_LBW_BASE 0x40A4B80ull
+#define DCORE0_HMMU2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_HMMU2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_HMMU2_MSTR_IF_CORE_HBW_BASE 0x40A4C00ull
+#define DCORE0_HMMU2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_HMMU2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_HMMU2_MSTR_IF_CORE_LBW_BASE 0x40A4D80ull
+#define DCORE0_HMMU2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_HMMU2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_HMMU2_MSTR_IF_SPECIAL_BASE 0x40A4E80ull
+#define DCORE0_HMMU2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU2_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE0_HMMU3_MMU_BASE 0x40B0000ull
+#define DCORE0_HMMU3_MMU_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_MMU_SECTION 0xE800
+#define mmDCORE0_HMMU3_MMU_SPECIAL_BASE 0x40B0E80ull
+#define DCORE0_HMMU3_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU3_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HMMU3_STLB_BASE 0x40B1000ull
+#define DCORE0_HMMU3_STLB_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_STLB_SECTION 0xE800
+#define mmDCORE0_HMMU3_STLB_SPECIAL_BASE 0x40B1E80ull
+#define DCORE0_HMMU3_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU3_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE0_HMMU3_SCRAMB_OUT_BASE 0x40B3000ull
+#define DCORE0_HMMU3_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE0_HMMU3_SCRAMB_OUT_SPECIAL_BASE 0x40B3E80ull
+#define DCORE0_HMMU3_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU3_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HMMU3_MSTR_IF_RR_SHRD_HBW_BASE 0x40B4000ull
+#define DCORE0_HMMU3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_HMMU3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_HMMU3_MSTR_IF_RR_PRVT_HBW_BASE 0x40B4200ull
+#define DCORE0_HMMU3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_HMMU3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_HMMU3_MSTR_IF_RR_SHRD_LBW_BASE 0x40B4400ull
+#define DCORE0_HMMU3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_HMMU3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_HMMU3_MSTR_IF_RR_PRVT_LBW_BASE 0x40B4600ull
+#define DCORE0_HMMU3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_HMMU3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_HMMU3_MSTR_IF_E2E_CRDT_BASE 0x40B4800ull
+#define DCORE0_HMMU3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_HMMU3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_HMMU3_MSTR_IF_AXUSER_BASE 0x40B4A80ull
+#define DCORE0_HMMU3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_HMMU3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_HMMU3_MSTR_IF_DBG_HBW_BASE 0x40B4B00ull
+#define DCORE0_HMMU3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_HMMU3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_HMMU3_MSTR_IF_DBG_LBW_BASE 0x40B4B80ull
+#define DCORE0_HMMU3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_HMMU3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_HMMU3_MSTR_IF_CORE_HBW_BASE 0x40B4C00ull
+#define DCORE0_HMMU3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_HMMU3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_HMMU3_MSTR_IF_CORE_LBW_BASE 0x40B4D80ull
+#define DCORE0_HMMU3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_HMMU3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_HMMU3_MSTR_IF_SPECIAL_BASE 0x40B4E80ull
+#define DCORE0_HMMU3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HMMU3_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE0_MME_QM_ARC_DCCM_BASE 0x40C0000ull
+#define DCORE0_MME_QM_ARC_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_MME_QM_ARC_DCCM_SECTION 0x8000
+#define mmDCORE0_MME_QM_ARC_AUX_BASE 0x40C8000ull
+#define DCORE0_MME_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_MME_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_MME_QM_ARC_AUX_SPECIAL_BASE 0x40C8E80ull
+#define DCORE0_MME_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_QM_ARC_AUX_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_BASE 0x40C9000ull
+#define DCORE0_MME_QM_ARC_DUP_ENG_MAX_OFFSET 0x1000
+#define DCORE0_MME_QM_ARC_DUP_ENG_SECTION 0x9000
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_BASE 0x40C9900ull
+#define DCORE0_MME_QM_ARC_DUP_ENG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_QM_ARC_DUP_ENG_AXUSER_SECTION 0x5800
+#define mmDCORE0_MME_QM_ARC_DUP_ENG_SPECIAL_BASE 0x40C9E80ull
+#define DCORE0_MME_QM_ARC_DUP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_QM_ARC_DUP_ENG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_QM_BASE 0x40CA000ull
+#define DCORE0_MME_QM_MAX_OFFSET 0x1000
+#define DCORE0_MME_QM_SECTION 0x9000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR0_BASE 0x40CA900ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR1_BASE 0x40CA908ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR2_BASE 0x40CA910ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR3_BASE 0x40CA918ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR4_BASE 0x40CA920ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR5_BASE 0x40CA928ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR6_BASE 0x40CA930ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR7_BASE 0x40CA938ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR8_BASE 0x40CA940ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR9_BASE 0x40CA948ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR10_BASE 0x40CA950ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR11_BASE 0x40CA958ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR12_BASE 0x40CA960ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR13_BASE 0x40CA968ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR14_BASE 0x40CA970ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR15_BASE 0x40CA978ull
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_MME_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_MME_QM_AXUSER_SECURED_BASE 0x40CAB00ull
+#define DCORE0_MME_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_MME_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_MME_QM_AXUSER_NONSECURED_BASE 0x40CAB80ull
+#define DCORE0_MME_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_MME_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_MME_QM_DBG_HBW_BASE 0x40CAC00ull
+#define DCORE0_MME_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_MME_QM_DBG_LBW_BASE 0x40CAC80ull
+#define DCORE0_MME_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_MME_QM_CGM_BASE 0x40CAD80ull
+#define DCORE0_MME_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_MME_QM_CGM_SECTION 0x1000
+#define mmDCORE0_MME_QM_SPECIAL_BASE 0x40CAE80ull
+#define DCORE0_MME_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_LO_BASE 0x40CB000ull
+#define DCORE0_MME_CTRL_LO_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_LO_SECTION 0x8000
+#define mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_BASE 0x40CB008ull
+#define DCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_SECTION 0x2000
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BASE 0x40CB028ull
+#define DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_BASE 0x40CB040ull
+#define DCORE0_MME_CTRL_LO_ARCH_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_BASE 0x40CB098ull
+#define DCORE0_MME_CTRL_LO_ARCH_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_BASE 0x40CB0F0ull
+#define DCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_BASE 0x40CB15Cull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_BASE 0x40CB170ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_BASE 0x40CB184ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_BASE 0x40CB198ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_BASE 0x40CB1ACull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_BASE 0x40CB1C0ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_BASE 0x40CB1D4ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_BASE 0x40CB1E8ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_BASE 0x40CB1FCull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_BASE 0x40CB210ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_BASE 0x40CB22Cull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_BASE 0x40CB240ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_BASE 0x40CB254ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_BASE 0x40CB268ull
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_BASE 0x40CB280ull
+#define DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SECTION 0xB800
+#define mmDCORE0_MME_CTRL_LO_MME_AXUSER_BASE 0x40CBE00ull
+#define DCORE0_MME_CTRL_LO_MME_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_CTRL_LO_MME_AXUSER_SECTION 0x8000
+#define mmDCORE0_MME_CTRL_LO_SPECIAL_BASE 0x40CBE80ull
+#define DCORE0_MME_CTRL_LO_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_CTRL_LO_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_HI_BASE 0x40CC000ull
+#define DCORE0_MME_CTRL_HI_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_HI_SECTION 0x8000
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_BASE_ADDR_BASE 0x40CC008ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE0_MME_CTRL_HI_SHADOW_0_BASE_ADDR_SECTION 0x2000
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_BASE 0x40CC028ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE0_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_TENSOR_A_BASE 0x40CC040ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_0_TENSOR_A_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_TENSOR_B_BASE 0x40CC098ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_0_TENSOR_B_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_BASE 0x40CC0F0ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_BASE 0x40CC15Cull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_BASE 0x40CC170ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_BASE 0x40CC184ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_BASE 0x40CC198ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_BASE 0x40CC1ACull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_BASE 0x40CC1C0ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_BASE 0x40CC1D4ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_BASE 0x40CC1E8ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_BASE 0x40CC1FCull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_BASE 0x40CC210ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_BASE 0x40CC22Cull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_BASE 0x40CC240ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_BASE 0x40CC254ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_BASE 0x40CC268ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_BASE 0x40CC280ull
+#define DCORE0_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE0_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_BASE_ADDR_BASE 0x40CC308ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE0_MME_CTRL_HI_SHADOW_1_BASE_ADDR_SECTION 0x2000
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_BASE 0x40CC328ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE0_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_TENSOR_A_BASE 0x40CC340ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_1_TENSOR_A_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_TENSOR_B_BASE 0x40CC398ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_1_TENSOR_B_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_BASE 0x40CC3F0ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_BASE 0x40CC45Cull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_BASE 0x40CC470ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_BASE 0x40CC484ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_BASE 0x40CC498ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_BASE 0x40CC4ACull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_BASE 0x40CC4C0ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_BASE 0x40CC4D4ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_BASE 0x40CC4E8ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_BASE 0x40CC4FCull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_BASE 0x40CC510ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_BASE 0x40CC52Cull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_BASE 0x40CC540ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_BASE 0x40CC554ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_BASE 0x40CC568ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_BASE 0x40CC580ull
+#define DCORE0_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE0_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_BASE_ADDR_BASE 0x40CC608ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE0_MME_CTRL_HI_SHADOW_2_BASE_ADDR_SECTION 0x2000
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_BASE 0x40CC628ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE0_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_TENSOR_A_BASE 0x40CC640ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_2_TENSOR_A_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_TENSOR_B_BASE 0x40CC698ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_2_TENSOR_B_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_BASE 0x40CC6F0ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_BASE 0x40CC75Cull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_BASE 0x40CC770ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_BASE 0x40CC784ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_BASE 0x40CC798ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_BASE 0x40CC7ACull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_BASE 0x40CC7C0ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_BASE 0x40CC7D4ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_BASE 0x40CC7E8ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_BASE 0x40CC7FCull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_BASE 0x40CC810ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_BASE 0x40CC82Cull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_BASE 0x40CC840ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_BASE 0x40CC854ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_BASE 0x40CC868ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_BASE 0x40CC880ull
+#define DCORE0_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE0_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_BASE_ADDR_BASE 0x40CC908ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE0_MME_CTRL_HI_SHADOW_3_BASE_ADDR_SECTION 0x2000
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_BASE 0x40CC928ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE0_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_TENSOR_A_BASE 0x40CC940ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_3_TENSOR_A_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_TENSOR_B_BASE 0x40CC998ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_3_TENSOR_B_SECTION 0x5800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_BASE 0x40CC9F0ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_BASE 0x40CCA5Cull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_BASE 0x40CCA70ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_BASE 0x40CCA84ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_BASE 0x40CCA98ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_BASE 0x40CCAACull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_BASE 0x40CCAC0ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_BASE 0x40CCAD4ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_BASE 0x40CCAE8ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_BASE 0x40CCAFCull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_BASE 0x40CCB10ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_BASE 0x40CCB2Cull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_BASE 0x40CCB40ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_BASE 0x40CCB54ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_BASE 0x40CCB68ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_BASE 0x40CCB80ull
+#define DCORE0_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE0_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_SECTION 0x3000
+#define mmDCORE0_MME_CTRL_HI_SPECIAL_BASE 0x40CCE80ull
+#define DCORE0_MME_CTRL_HI_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_CTRL_HI_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_EU_BIST_BASE 0x40CD000ull
+#define DCORE0_MME_EU_BIST_MAX_OFFSET 0x1000
+#define DCORE0_MME_EU_BIST_SECTION 0xE800
+#define mmDCORE0_MME_EU_BIST_SPECIAL_BASE 0x40CDE80ull
+#define DCORE0_MME_EU_BIST_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_EU_BIST_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_MSTR_IF_RR_SHRD_HBW_BASE 0x40CE000ull
+#define DCORE0_MME_CTRL_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_CTRL_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_MME_CTRL_MSTR_IF_RR_PRVT_HBW_BASE 0x40CE200ull
+#define DCORE0_MME_CTRL_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_CTRL_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_MME_CTRL_MSTR_IF_RR_SHRD_LBW_BASE 0x40CE400ull
+#define DCORE0_MME_CTRL_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_CTRL_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_MME_CTRL_MSTR_IF_RR_PRVT_LBW_BASE 0x40CE600ull
+#define DCORE0_MME_CTRL_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_CTRL_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_MME_CTRL_MSTR_IF_E2E_CRDT_BASE 0x40CE800ull
+#define DCORE0_MME_CTRL_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_MME_CTRL_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_MME_CTRL_MSTR_IF_AXUSER_BASE 0x40CEA80ull
+#define DCORE0_MME_CTRL_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_CTRL_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_MME_CTRL_MSTR_IF_DBG_HBW_BASE 0x40CEB00ull
+#define DCORE0_MME_CTRL_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_MME_CTRL_MSTR_IF_DBG_LBW_BASE 0x40CEB80ull
+#define DCORE0_MME_CTRL_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_CTRL_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_MME_CTRL_MSTR_IF_CORE_HBW_BASE 0x40CEC00ull
+#define DCORE0_MME_CTRL_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_MME_CTRL_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_MME_CTRL_MSTR_IF_CORE_LBW_BASE 0x40CED80ull
+#define DCORE0_MME_CTRL_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_MME_CTRL_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_MSTR_IF_SPECIAL_BASE 0x40CEE80ull
+#define DCORE0_MME_CTRL_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_CTRL_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_BASE 0x40CF000ull
+#define DCORE0_MME_QM_ARC_ACP_ENG_MAX_OFFSET 0x1000
+#define DCORE0_MME_QM_ARC_ACP_ENG_SECTION 0xE800
+#define mmDCORE0_MME_QM_ARC_ACP_ENG_SPECIAL_BASE 0x40CFE80ull
+#define DCORE0_MME_QM_ARC_ACP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_QM_ARC_ACP_ENG_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_SBTE0_BASE 0x40D0000ull
+#define DCORE0_MME_SBTE0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE0_SECTION 0xE800
+#define mmDCORE0_MME_SBTE0_SPECIAL_BASE 0x40D0E80ull
+#define DCORE0_MME_SBTE0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE0_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_BASE 0x40D1000ull
+#define DCORE0_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_BASE 0x40D1200ull
+#define DCORE0_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_BASE 0x40D1400ull
+#define DCORE0_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_BASE 0x40D1600ull
+#define DCORE0_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE0_MSTR_IF_E2E_CRDT_BASE 0x40D1800ull
+#define DCORE0_MME_SBTE0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_MME_SBTE0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_BASE 0x40D1A80ull
+#define DCORE0_MME_SBTE0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_SBTE0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_MME_SBTE0_MSTR_IF_DBG_HBW_BASE 0x40D1B00ull
+#define DCORE0_MME_SBTE0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE0_MSTR_IF_DBG_LBW_BASE 0x40D1B80ull
+#define DCORE0_MME_SBTE0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE0_MSTR_IF_CORE_HBW_BASE 0x40D1C00ull
+#define DCORE0_MME_SBTE0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_MME_SBTE0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_MME_SBTE0_MSTR_IF_CORE_LBW_BASE 0x40D1D80ull
+#define DCORE0_MME_SBTE0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_MME_SBTE0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_MME_SBTE0_MSTR_IF_SPECIAL_BASE 0x40D1E80ull
+#define DCORE0_MME_SBTE0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE0_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE0_MME_SBTE1_BASE 0x40D8000ull
+#define DCORE0_MME_SBTE1_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE1_SECTION 0xE800
+#define mmDCORE0_MME_SBTE1_SPECIAL_BASE 0x40D8E80ull
+#define DCORE0_MME_SBTE1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE1_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_BASE 0x40D9000ull
+#define DCORE0_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_BASE 0x40D9200ull
+#define DCORE0_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_BASE 0x40D9400ull
+#define DCORE0_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_BASE 0x40D9600ull
+#define DCORE0_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE1_MSTR_IF_E2E_CRDT_BASE 0x40D9800ull
+#define DCORE0_MME_SBTE1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_MME_SBTE1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_MME_SBTE1_MSTR_IF_AXUSER_BASE 0x40D9A80ull
+#define DCORE0_MME_SBTE1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_SBTE1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_MME_SBTE1_MSTR_IF_DBG_HBW_BASE 0x40D9B00ull
+#define DCORE0_MME_SBTE1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE1_MSTR_IF_DBG_LBW_BASE 0x40D9B80ull
+#define DCORE0_MME_SBTE1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE1_MSTR_IF_CORE_HBW_BASE 0x40D9C00ull
+#define DCORE0_MME_SBTE1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_MME_SBTE1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_MME_SBTE1_MSTR_IF_CORE_LBW_BASE 0x40D9D80ull
+#define DCORE0_MME_SBTE1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_MME_SBTE1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_MME_SBTE1_MSTR_IF_SPECIAL_BASE 0x40D9E80ull
+#define DCORE0_MME_SBTE1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE1_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE0_MME_SBTE2_BASE 0x40E0000ull
+#define DCORE0_MME_SBTE2_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE2_SECTION 0xE800
+#define mmDCORE0_MME_SBTE2_SPECIAL_BASE 0x40E0E80ull
+#define DCORE0_MME_SBTE2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE2_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_BASE 0x40E1000ull
+#define DCORE0_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_BASE 0x40E1200ull
+#define DCORE0_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_BASE 0x40E1400ull
+#define DCORE0_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_BASE 0x40E1600ull
+#define DCORE0_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE2_MSTR_IF_E2E_CRDT_BASE 0x40E1800ull
+#define DCORE0_MME_SBTE2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_MME_SBTE2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_MME_SBTE2_MSTR_IF_AXUSER_BASE 0x40E1A80ull
+#define DCORE0_MME_SBTE2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_SBTE2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_MME_SBTE2_MSTR_IF_DBG_HBW_BASE 0x40E1B00ull
+#define DCORE0_MME_SBTE2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE2_MSTR_IF_DBG_LBW_BASE 0x40E1B80ull
+#define DCORE0_MME_SBTE2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE2_MSTR_IF_CORE_HBW_BASE 0x40E1C00ull
+#define DCORE0_MME_SBTE2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_MME_SBTE2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_MME_SBTE2_MSTR_IF_CORE_LBW_BASE 0x40E1D80ull
+#define DCORE0_MME_SBTE2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_MME_SBTE2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_MME_SBTE2_MSTR_IF_SPECIAL_BASE 0x40E1E80ull
+#define DCORE0_MME_SBTE2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE2_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE0_MME_SBTE3_BASE 0x40E8000ull
+#define DCORE0_MME_SBTE3_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE3_SECTION 0xE800
+#define mmDCORE0_MME_SBTE3_SPECIAL_BASE 0x40E8E80ull
+#define DCORE0_MME_SBTE3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_BASE 0x40E9000ull
+#define DCORE0_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_BASE 0x40E9200ull
+#define DCORE0_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_BASE 0x40E9400ull
+#define DCORE0_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_BASE 0x40E9600ull
+#define DCORE0_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE3_MSTR_IF_E2E_CRDT_BASE 0x40E9800ull
+#define DCORE0_MME_SBTE3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_MME_SBTE3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_MME_SBTE3_MSTR_IF_AXUSER_BASE 0x40E9A80ull
+#define DCORE0_MME_SBTE3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_SBTE3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_MME_SBTE3_MSTR_IF_DBG_HBW_BASE 0x40E9B00ull
+#define DCORE0_MME_SBTE3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE3_MSTR_IF_DBG_LBW_BASE 0x40E9B80ull
+#define DCORE0_MME_SBTE3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE3_MSTR_IF_CORE_HBW_BASE 0x40E9C00ull
+#define DCORE0_MME_SBTE3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_MME_SBTE3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_MME_SBTE3_MSTR_IF_CORE_LBW_BASE 0x40E9D80ull
+#define DCORE0_MME_SBTE3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_MME_SBTE3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_MME_SBTE3_MSTR_IF_SPECIAL_BASE 0x40E9E80ull
+#define DCORE0_MME_SBTE3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE3_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE0_MME_SBTE4_BASE 0x40F0000ull
+#define DCORE0_MME_SBTE4_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE4_SECTION 0xE800
+#define mmDCORE0_MME_SBTE4_SPECIAL_BASE 0x40F0E80ull
+#define DCORE0_MME_SBTE4_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE4_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_BASE 0x40F1000ull
+#define DCORE0_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_BASE 0x40F1200ull
+#define DCORE0_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_BASE 0x40F1400ull
+#define DCORE0_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_BASE 0x40F1600ull
+#define DCORE0_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_MME_SBTE4_MSTR_IF_E2E_CRDT_BASE 0x40F1800ull
+#define DCORE0_MME_SBTE4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_MME_SBTE4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_MME_SBTE4_MSTR_IF_AXUSER_BASE 0x40F1A80ull
+#define DCORE0_MME_SBTE4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_SBTE4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_MME_SBTE4_MSTR_IF_DBG_HBW_BASE 0x40F1B00ull
+#define DCORE0_MME_SBTE4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE4_MSTR_IF_DBG_LBW_BASE 0x40F1B80ull
+#define DCORE0_MME_SBTE4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_SBTE4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_MME_SBTE4_MSTR_IF_CORE_HBW_BASE 0x40F1C00ull
+#define DCORE0_MME_SBTE4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_MME_SBTE4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_MME_SBTE4_MSTR_IF_CORE_LBW_BASE 0x40F1D80ull
+#define DCORE0_MME_SBTE4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_MME_SBTE4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_MME_SBTE4_MSTR_IF_SPECIAL_BASE 0x40F1E80ull
+#define DCORE0_MME_SBTE4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_SBTE4_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE0_MME_ACC_BASE 0x40F8000ull
+#define DCORE0_MME_ACC_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_SECTION 0xE800
+#define mmDCORE0_MME_ACC_SPECIAL_BASE 0x40F8E80ull
+#define DCORE0_MME_ACC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_ACC_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_WB0_MSTR_IF_RR_SHRD_HBW_BASE 0x40F9000ull
+#define DCORE0_MME_WB0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_WB0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_MME_WB0_MSTR_IF_RR_PRVT_HBW_BASE 0x40F9200ull
+#define DCORE0_MME_WB0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_WB0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_MME_WB0_MSTR_IF_RR_SHRD_LBW_BASE 0x40F9400ull
+#define DCORE0_MME_WB0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_WB0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_MME_WB0_MSTR_IF_RR_PRVT_LBW_BASE 0x40F9600ull
+#define DCORE0_MME_WB0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_WB0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_MME_WB0_MSTR_IF_E2E_CRDT_BASE 0x40F9800ull
+#define DCORE0_MME_WB0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_MME_WB0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_MME_WB0_MSTR_IF_AXUSER_BASE 0x40F9A80ull
+#define DCORE0_MME_WB0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_WB0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_MME_WB0_MSTR_IF_DBG_HBW_BASE 0x40F9B00ull
+#define DCORE0_MME_WB0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_WB0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_MME_WB0_MSTR_IF_DBG_LBW_BASE 0x40F9B80ull
+#define DCORE0_MME_WB0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_WB0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_MME_WB0_MSTR_IF_CORE_HBW_BASE 0x40F9C00ull
+#define DCORE0_MME_WB0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_MME_WB0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_MME_WB0_MSTR_IF_CORE_LBW_BASE 0x40F9D80ull
+#define DCORE0_MME_WB0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_MME_WB0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_MME_WB0_MSTR_IF_SPECIAL_BASE 0x40F9E80ull
+#define DCORE0_MME_WB0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_WB0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_MME_WB1_MSTR_IF_RR_SHRD_HBW_BASE 0x40FA000ull
+#define DCORE0_MME_WB1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_WB1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_MME_WB1_MSTR_IF_RR_PRVT_HBW_BASE 0x40FA200ull
+#define DCORE0_MME_WB1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_MME_WB1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_MME_WB1_MSTR_IF_RR_SHRD_LBW_BASE 0x40FA400ull
+#define DCORE0_MME_WB1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_WB1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_MME_WB1_MSTR_IF_RR_PRVT_LBW_BASE 0x40FA600ull
+#define DCORE0_MME_WB1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_MME_WB1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_MME_WB1_MSTR_IF_E2E_CRDT_BASE 0x40FA800ull
+#define DCORE0_MME_WB1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_MME_WB1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_MME_WB1_MSTR_IF_AXUSER_BASE 0x40FAA80ull
+#define DCORE0_MME_WB1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_MME_WB1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_MME_WB1_MSTR_IF_DBG_HBW_BASE 0x40FAB00ull
+#define DCORE0_MME_WB1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_WB1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_MME_WB1_MSTR_IF_DBG_LBW_BASE 0x40FAB80ull
+#define DCORE0_MME_WB1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_MME_WB1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_MME_WB1_MSTR_IF_CORE_HBW_BASE 0x40FAC00ull
+#define DCORE0_MME_WB1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_MME_WB1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_MME_WB1_MSTR_IF_CORE_LBW_BASE 0x40FAD80ull
+#define DCORE0_MME_WB1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_MME_WB1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_MME_WB1_MSTR_IF_SPECIAL_BASE 0x40FAE80ull
+#define DCORE0_MME_WB1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_MME_WB1_MSTR_IF_SPECIAL_SECTION 0x5180
+#define mmDCORE0_SYNC_MNGR_OBJS_BASE 0x4100000ull
+#define DCORE0_SYNC_MNGR_OBJS_MAX_OFFSET 0x15A00
+#define DCORE0_SYNC_MNGR_OBJS_SECTION 0x1E000
+#define mmDCORE0_SYNC_MNGR_GLBL_BASE 0x411E000ull
+#define DCORE0_SYNC_MNGR_GLBL_MAX_OFFSET 0x1000
+#define DCORE0_SYNC_MNGR_GLBL_SECTION 0xE800
+#define mmDCORE0_SYNC_MNGR_GLBL_SPECIAL_BASE 0x411EE80ull
+#define DCORE0_SYNC_MNGR_GLBL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SYNC_MNGR_GLBL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_BASE 0x411F000ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_BASE 0x411F200ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_BASE 0x411F400ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_BASE 0x411F600ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_E2E_CRDT_BASE 0x411F800ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_SYNC_MNGR_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_BASE 0x411FA80ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_DBG_HBW_BASE 0x411FB00ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_SYNC_MNGR_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_DBG_LBW_BASE 0x411FB80ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_SYNC_MNGR_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_CORE_HBW_BASE 0x411FC00ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_SYNC_MNGR_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_CORE_LBW_BASE 0x411FD80ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_SYNC_MNGR_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_SYNC_MNGR_MSTR_IF_SPECIAL_BASE 0x411FE80ull
+#define DCORE0_SYNC_MNGR_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SYNC_MNGR_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HIF0_BASE 0x4120000ull
+#define DCORE0_HIF0_MAX_OFFSET 0x1000
+#define DCORE0_HIF0_SECTION 0xE800
+#define mmDCORE0_HIF0_SPECIAL_BASE 0x4120E80ull
+#define DCORE0_HIF0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HIF0_SPECIAL_SECTION 0x3180
+#define mmDCORE0_HIF1_BASE 0x4124000ull
+#define DCORE0_HIF1_MAX_OFFSET 0x1000
+#define DCORE0_HIF1_SECTION 0xE800
+#define mmDCORE0_HIF1_SPECIAL_BASE 0x4124E80ull
+#define DCORE0_HIF1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HIF1_SPECIAL_SECTION 0x3180
+#define mmDCORE0_HIF2_BASE 0x4128000ull
+#define DCORE0_HIF2_MAX_OFFSET 0x1000
+#define DCORE0_HIF2_SECTION 0xE800
+#define mmDCORE0_HIF2_SPECIAL_BASE 0x4128E80ull
+#define DCORE0_HIF2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HIF2_SPECIAL_SECTION 0x3180
+#define mmDCORE0_HIF3_BASE 0x412C000ull
+#define DCORE0_HIF3_MAX_OFFSET 0x1000
+#define DCORE0_HIF3_SECTION 0xE800
+#define mmDCORE0_HIF3_SPECIAL_BASE 0x412CE80ull
+#define DCORE0_HIF3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HIF3_SPECIAL_SECTION 0x13180
+#define mmDCORE0_RTR0_CTRL_BASE 0x4140000ull
+#define DCORE0_RTR0_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_RTR0_CTRL_SECTION 0xE800
+#define mmDCORE0_RTR0_CTRL_SPECIAL_BASE 0x4140E80ull
+#define DCORE0_RTR0_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR0_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR0_H3_BASE 0x4141000ull
+#define DCORE0_RTR0_H3_MAX_OFFSET 0x1000
+#define DCORE0_RTR0_H3_SECTION 0xE800
+#define mmDCORE0_RTR0_H3_SPECIAL_BASE 0x4141E80ull
+#define DCORE0_RTR0_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR0_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE 0x4142000ull
+#define DCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_BASE 0x4142200ull
+#define DCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE 0x4142400ull
+#define DCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_BASE 0x4142600ull
+#define DCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_RTR0_MSTR_IF_E2E_CRDT_BASE 0x4142800ull
+#define DCORE0_RTR0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_RTR0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_RTR0_MSTR_IF_AXUSER_BASE 0x4142A80ull
+#define DCORE0_RTR0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_RTR0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_RTR0_MSTR_IF_DBG_HBW_BASE 0x4142B00ull
+#define DCORE0_RTR0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_RTR0_MSTR_IF_DBG_LBW_BASE 0x4142B80ull
+#define DCORE0_RTR0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_RTR0_MSTR_IF_CORE_HBW_BASE 0x4142C00ull
+#define DCORE0_RTR0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_RTR0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_RTR0_MSTR_IF_CORE_LBW_BASE 0x4142D80ull
+#define DCORE0_RTR0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_RTR0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_RTR0_MSTR_IF_SPECIAL_BASE 0x4142E80ull
+#define DCORE0_RTR0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR0_ADD_DEC_HBW_BASE 0x4143000ull
+#define DCORE0_RTR0_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE0_RTR0_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE0_RTR0_ADD_DEC_LBW_BASE 0x4143400ull
+#define DCORE0_RTR0_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE0_RTR0_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE0_RTR0_ADD_DEC_SPECIAL_BASE 0x4143E80ull
+#define DCORE0_RTR0_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR0_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR0_BASE 0x4144000ull
+#define DCORE0_RTR0_MAX_OFFSET 0x1000
+#define DCORE0_RTR0_SECTION 0x3000
+#define mmDCORE0_RTR0_HBW_RD_RQ_LL_STAT_BASE 0x4144300ull
+#define DCORE0_RTR0_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR0_HBW_RD_RS_LL_STAT_BASE 0x4144340ull
+#define DCORE0_RTR0_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR0_HBW_WR_RQ_LL_STAT_BASE 0x4144380ull
+#define DCORE0_RTR0_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR0_HBW_WR_RS_LL_STAT_BASE 0x41443C0ull
+#define DCORE0_RTR0_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR0_LBW_RD_RQ_LL_STAT_BASE 0x4144400ull
+#define DCORE0_RTR0_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR0_LBW_RD_RS_LL_STAT_BASE 0x4144440ull
+#define DCORE0_RTR0_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR0_LBW_WR_RQ_LL_STAT_BASE 0x4144480ull
+#define DCORE0_RTR0_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR0_LBW_WR_RS_LL_STAT_BASE 0x41444C0ull
+#define DCORE0_RTR0_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR0_HBW_MFIFO_BASE 0x4144500ull
+#define DCORE0_RTR0_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE0_RTR0_E2E_RD_LL_STAT_BASE 0x4144540ull
+#define DCORE0_RTR0_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR0_E2E_WR_LL_STAT_BASE 0x4144580ull
+#define DCORE0_RTR0_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR0_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE0_RTR0_RTR_HBW_XACT_STAT_BASE 0x4144600ull
+#define DCORE0_RTR0_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR0_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR0_RTR_LBW_XACT_STAT_BASE 0x4144680ull
+#define DCORE0_RTR0_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR0_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR0_RTR_E2E_XACT_STAT_BASE 0x4144700ull
+#define DCORE0_RTR0_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR0_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE0_RTR0_SPECIAL_BASE 0x4144E80ull
+#define DCORE0_RTR0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR0_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR0_DBG_ADDR_BASE 0x4145000ull
+#define DCORE0_RTR0_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE0_RTR0_DBG_ADDR_SECTION 0xE800
+#define mmDCORE0_RTR0_DBG_ADDR_SPECIAL_BASE 0x4145E80ull
+#define DCORE0_RTR0_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR0_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE0_RTR1_CTRL_BASE 0x4148000ull
+#define DCORE0_RTR1_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_RTR1_CTRL_SECTION 0xE800
+#define mmDCORE0_RTR1_CTRL_SPECIAL_BASE 0x4148E80ull
+#define DCORE0_RTR1_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR1_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR1_H3_BASE 0x4149000ull
+#define DCORE0_RTR1_H3_MAX_OFFSET 0x1000
+#define DCORE0_RTR1_H3_SECTION 0xE800
+#define mmDCORE0_RTR1_H3_SPECIAL_BASE 0x4149E80ull
+#define DCORE0_RTR1_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR1_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR1_MSTR_IF_RR_SHRD_HBW_BASE 0x414A000ull
+#define DCORE0_RTR1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_RTR1_MSTR_IF_RR_PRVT_HBW_BASE 0x414A200ull
+#define DCORE0_RTR1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_RTR1_MSTR_IF_RR_SHRD_LBW_BASE 0x414A400ull
+#define DCORE0_RTR1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_RTR1_MSTR_IF_RR_PRVT_LBW_BASE 0x414A600ull
+#define DCORE0_RTR1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_RTR1_MSTR_IF_E2E_CRDT_BASE 0x414A800ull
+#define DCORE0_RTR1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_RTR1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_RTR1_MSTR_IF_AXUSER_BASE 0x414AA80ull
+#define DCORE0_RTR1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_RTR1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_RTR1_MSTR_IF_DBG_HBW_BASE 0x414AB00ull
+#define DCORE0_RTR1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_RTR1_MSTR_IF_DBG_LBW_BASE 0x414AB80ull
+#define DCORE0_RTR1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_RTR1_MSTR_IF_CORE_HBW_BASE 0x414AC00ull
+#define DCORE0_RTR1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_RTR1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_RTR1_MSTR_IF_CORE_LBW_BASE 0x414AD80ull
+#define DCORE0_RTR1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_RTR1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_RTR1_MSTR_IF_SPECIAL_BASE 0x414AE80ull
+#define DCORE0_RTR1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR1_ADD_DEC_HBW_BASE 0x414B000ull
+#define DCORE0_RTR1_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE0_RTR1_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE0_RTR1_ADD_DEC_LBW_BASE 0x414B400ull
+#define DCORE0_RTR1_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE0_RTR1_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE0_RTR1_ADD_DEC_SPECIAL_BASE 0x414BE80ull
+#define DCORE0_RTR1_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR1_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR1_BASE 0x414C000ull
+#define DCORE0_RTR1_MAX_OFFSET 0x1000
+#define DCORE0_RTR1_SECTION 0x3000
+#define mmDCORE0_RTR1_HBW_RD_RQ_LL_STAT_BASE 0x414C300ull
+#define DCORE0_RTR1_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR1_HBW_RD_RS_LL_STAT_BASE 0x414C340ull
+#define DCORE0_RTR1_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR1_HBW_WR_RQ_LL_STAT_BASE 0x414C380ull
+#define DCORE0_RTR1_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR1_HBW_WR_RS_LL_STAT_BASE 0x414C3C0ull
+#define DCORE0_RTR1_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR1_LBW_RD_RQ_LL_STAT_BASE 0x414C400ull
+#define DCORE0_RTR1_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR1_LBW_RD_RS_LL_STAT_BASE 0x414C440ull
+#define DCORE0_RTR1_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR1_LBW_WR_RQ_LL_STAT_BASE 0x414C480ull
+#define DCORE0_RTR1_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR1_LBW_WR_RS_LL_STAT_BASE 0x414C4C0ull
+#define DCORE0_RTR1_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR1_HBW_MFIFO_BASE 0x414C500ull
+#define DCORE0_RTR1_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE0_RTR1_E2E_RD_LL_STAT_BASE 0x414C540ull
+#define DCORE0_RTR1_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR1_E2E_WR_LL_STAT_BASE 0x414C580ull
+#define DCORE0_RTR1_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR1_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE0_RTR1_RTR_HBW_XACT_STAT_BASE 0x414C600ull
+#define DCORE0_RTR1_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR1_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR1_RTR_LBW_XACT_STAT_BASE 0x414C680ull
+#define DCORE0_RTR1_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR1_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR1_RTR_E2E_XACT_STAT_BASE 0x414C700ull
+#define DCORE0_RTR1_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR1_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE0_RTR1_SPECIAL_BASE 0x414CE80ull
+#define DCORE0_RTR1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR1_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR1_DBG_ADDR_BASE 0x414D000ull
+#define DCORE0_RTR1_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE0_RTR1_DBG_ADDR_SECTION 0xE800
+#define mmDCORE0_RTR1_DBG_ADDR_SPECIAL_BASE 0x414DE80ull
+#define DCORE0_RTR1_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR1_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE0_RTR2_CTRL_BASE 0x4150000ull
+#define DCORE0_RTR2_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_RTR2_CTRL_SECTION 0xE800
+#define mmDCORE0_RTR2_CTRL_SPECIAL_BASE 0x4150E80ull
+#define DCORE0_RTR2_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR2_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR2_H3_BASE 0x4151000ull
+#define DCORE0_RTR2_H3_MAX_OFFSET 0x1000
+#define DCORE0_RTR2_H3_SECTION 0xE800
+#define mmDCORE0_RTR2_H3_SPECIAL_BASE 0x4151E80ull
+#define DCORE0_RTR2_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR2_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR2_MSTR_IF_RR_SHRD_HBW_BASE 0x4152000ull
+#define DCORE0_RTR2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_RTR2_MSTR_IF_RR_PRVT_HBW_BASE 0x4152200ull
+#define DCORE0_RTR2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_RTR2_MSTR_IF_RR_SHRD_LBW_BASE 0x4152400ull
+#define DCORE0_RTR2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_RTR2_MSTR_IF_RR_PRVT_LBW_BASE 0x4152600ull
+#define DCORE0_RTR2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_RTR2_MSTR_IF_E2E_CRDT_BASE 0x4152800ull
+#define DCORE0_RTR2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_RTR2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_RTR2_MSTR_IF_AXUSER_BASE 0x4152A80ull
+#define DCORE0_RTR2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_RTR2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_RTR2_MSTR_IF_DBG_HBW_BASE 0x4152B00ull
+#define DCORE0_RTR2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_RTR2_MSTR_IF_DBG_LBW_BASE 0x4152B80ull
+#define DCORE0_RTR2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_RTR2_MSTR_IF_CORE_HBW_BASE 0x4152C00ull
+#define DCORE0_RTR2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_RTR2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_RTR2_MSTR_IF_CORE_LBW_BASE 0x4152D80ull
+#define DCORE0_RTR2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_RTR2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_RTR2_MSTR_IF_SPECIAL_BASE 0x4152E80ull
+#define DCORE0_RTR2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR2_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR2_ADD_DEC_HBW_BASE 0x4153000ull
+#define DCORE0_RTR2_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE0_RTR2_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE0_RTR2_ADD_DEC_LBW_BASE 0x4153400ull
+#define DCORE0_RTR2_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE0_RTR2_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE0_RTR2_ADD_DEC_SPECIAL_BASE 0x4153E80ull
+#define DCORE0_RTR2_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR2_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR2_BASE 0x4154000ull
+#define DCORE0_RTR2_MAX_OFFSET 0x1000
+#define DCORE0_RTR2_SECTION 0x3000
+#define mmDCORE0_RTR2_HBW_RD_RQ_LL_STAT_BASE 0x4154300ull
+#define DCORE0_RTR2_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR2_HBW_RD_RS_LL_STAT_BASE 0x4154340ull
+#define DCORE0_RTR2_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR2_HBW_WR_RQ_LL_STAT_BASE 0x4154380ull
+#define DCORE0_RTR2_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR2_HBW_WR_RS_LL_STAT_BASE 0x41543C0ull
+#define DCORE0_RTR2_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR2_LBW_RD_RQ_LL_STAT_BASE 0x4154400ull
+#define DCORE0_RTR2_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR2_LBW_RD_RS_LL_STAT_BASE 0x4154440ull
+#define DCORE0_RTR2_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR2_LBW_WR_RQ_LL_STAT_BASE 0x4154480ull
+#define DCORE0_RTR2_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR2_LBW_WR_RS_LL_STAT_BASE 0x41544C0ull
+#define DCORE0_RTR2_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR2_HBW_MFIFO_BASE 0x4154500ull
+#define DCORE0_RTR2_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE0_RTR2_E2E_RD_LL_STAT_BASE 0x4154540ull
+#define DCORE0_RTR2_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR2_E2E_WR_LL_STAT_BASE 0x4154580ull
+#define DCORE0_RTR2_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR2_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE0_RTR2_RTR_HBW_XACT_STAT_BASE 0x4154600ull
+#define DCORE0_RTR2_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR2_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR2_RTR_LBW_XACT_STAT_BASE 0x4154680ull
+#define DCORE0_RTR2_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR2_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR2_RTR_E2E_XACT_STAT_BASE 0x4154700ull
+#define DCORE0_RTR2_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR2_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE0_RTR2_SPECIAL_BASE 0x4154E80ull
+#define DCORE0_RTR2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR2_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR2_DBG_ADDR_BASE 0x4155000ull
+#define DCORE0_RTR2_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE0_RTR2_DBG_ADDR_SECTION 0xE800
+#define mmDCORE0_RTR2_DBG_ADDR_SPECIAL_BASE 0x4155E80ull
+#define DCORE0_RTR2_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR2_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE0_RTR3_CTRL_BASE 0x4158000ull
+#define DCORE0_RTR3_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_RTR3_CTRL_SECTION 0xE800
+#define mmDCORE0_RTR3_CTRL_SPECIAL_BASE 0x4158E80ull
+#define DCORE0_RTR3_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR3_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR3_H3_BASE 0x4159000ull
+#define DCORE0_RTR3_H3_MAX_OFFSET 0x1000
+#define DCORE0_RTR3_H3_SECTION 0xE800
+#define mmDCORE0_RTR3_H3_SPECIAL_BASE 0x4159E80ull
+#define DCORE0_RTR3_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR3_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR3_MSTR_IF_RR_SHRD_HBW_BASE 0x415A000ull
+#define DCORE0_RTR3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_RTR3_MSTR_IF_RR_PRVT_HBW_BASE 0x415A200ull
+#define DCORE0_RTR3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_RTR3_MSTR_IF_RR_SHRD_LBW_BASE 0x415A400ull
+#define DCORE0_RTR3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_RTR3_MSTR_IF_RR_PRVT_LBW_BASE 0x415A600ull
+#define DCORE0_RTR3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_RTR3_MSTR_IF_E2E_CRDT_BASE 0x415A800ull
+#define DCORE0_RTR3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_RTR3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_RTR3_MSTR_IF_AXUSER_BASE 0x415AA80ull
+#define DCORE0_RTR3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_RTR3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_RTR3_MSTR_IF_DBG_HBW_BASE 0x415AB00ull
+#define DCORE0_RTR3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_RTR3_MSTR_IF_DBG_LBW_BASE 0x415AB80ull
+#define DCORE0_RTR3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_RTR3_MSTR_IF_CORE_HBW_BASE 0x415AC00ull
+#define DCORE0_RTR3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_RTR3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_RTR3_MSTR_IF_CORE_LBW_BASE 0x415AD80ull
+#define DCORE0_RTR3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_RTR3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_RTR3_MSTR_IF_SPECIAL_BASE 0x415AE80ull
+#define DCORE0_RTR3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR3_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR3_ADD_DEC_HBW_BASE 0x415B000ull
+#define DCORE0_RTR3_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE0_RTR3_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE0_RTR3_ADD_DEC_LBW_BASE 0x415B400ull
+#define DCORE0_RTR3_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE0_RTR3_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE0_RTR3_ADD_DEC_SPECIAL_BASE 0x415BE80ull
+#define DCORE0_RTR3_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR3_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR3_BASE 0x415C000ull
+#define DCORE0_RTR3_MAX_OFFSET 0x1000
+#define DCORE0_RTR3_SECTION 0x3000
+#define mmDCORE0_RTR3_HBW_RD_RQ_LL_STAT_BASE 0x415C300ull
+#define DCORE0_RTR3_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR3_HBW_RD_RS_LL_STAT_BASE 0x415C340ull
+#define DCORE0_RTR3_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR3_HBW_WR_RQ_LL_STAT_BASE 0x415C380ull
+#define DCORE0_RTR3_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR3_HBW_WR_RS_LL_STAT_BASE 0x415C3C0ull
+#define DCORE0_RTR3_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR3_LBW_RD_RQ_LL_STAT_BASE 0x415C400ull
+#define DCORE0_RTR3_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR3_LBW_RD_RS_LL_STAT_BASE 0x415C440ull
+#define DCORE0_RTR3_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR3_LBW_WR_RQ_LL_STAT_BASE 0x415C480ull
+#define DCORE0_RTR3_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR3_LBW_WR_RS_LL_STAT_BASE 0x415C4C0ull
+#define DCORE0_RTR3_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR3_HBW_MFIFO_BASE 0x415C500ull
+#define DCORE0_RTR3_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE0_RTR3_E2E_RD_LL_STAT_BASE 0x415C540ull
+#define DCORE0_RTR3_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR3_E2E_WR_LL_STAT_BASE 0x415C580ull
+#define DCORE0_RTR3_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR3_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE0_RTR3_RTR_HBW_XACT_STAT_BASE 0x415C600ull
+#define DCORE0_RTR3_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR3_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR3_RTR_LBW_XACT_STAT_BASE 0x415C680ull
+#define DCORE0_RTR3_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR3_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR3_RTR_E2E_XACT_STAT_BASE 0x415C700ull
+#define DCORE0_RTR3_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR3_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE0_RTR3_SPECIAL_BASE 0x415CE80ull
+#define DCORE0_RTR3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR3_DBG_ADDR_BASE 0x415D000ull
+#define DCORE0_RTR3_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE0_RTR3_DBG_ADDR_SECTION 0xE800
+#define mmDCORE0_RTR3_DBG_ADDR_SPECIAL_BASE 0x415DE80ull
+#define DCORE0_RTR3_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR3_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE0_RTR4_CTRL_BASE 0x4160000ull
+#define DCORE0_RTR4_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_RTR4_CTRL_SECTION 0xE800
+#define mmDCORE0_RTR4_CTRL_SPECIAL_BASE 0x4160E80ull
+#define DCORE0_RTR4_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR4_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR4_H3_BASE 0x4161000ull
+#define DCORE0_RTR4_H3_MAX_OFFSET 0x1000
+#define DCORE0_RTR4_H3_SECTION 0xE800
+#define mmDCORE0_RTR4_H3_SPECIAL_BASE 0x4161E80ull
+#define DCORE0_RTR4_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR4_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR4_MSTR_IF_RR_SHRD_HBW_BASE 0x4162000ull
+#define DCORE0_RTR4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_RTR4_MSTR_IF_RR_PRVT_HBW_BASE 0x4162200ull
+#define DCORE0_RTR4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_RTR4_MSTR_IF_RR_SHRD_LBW_BASE 0x4162400ull
+#define DCORE0_RTR4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_RTR4_MSTR_IF_RR_PRVT_LBW_BASE 0x4162600ull
+#define DCORE0_RTR4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_RTR4_MSTR_IF_E2E_CRDT_BASE 0x4162800ull
+#define DCORE0_RTR4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_RTR4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_RTR4_MSTR_IF_AXUSER_BASE 0x4162A80ull
+#define DCORE0_RTR4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_RTR4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_RTR4_MSTR_IF_DBG_HBW_BASE 0x4162B00ull
+#define DCORE0_RTR4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_RTR4_MSTR_IF_DBG_LBW_BASE 0x4162B80ull
+#define DCORE0_RTR4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_RTR4_MSTR_IF_CORE_HBW_BASE 0x4162C00ull
+#define DCORE0_RTR4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_RTR4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_RTR4_MSTR_IF_CORE_LBW_BASE 0x4162D80ull
+#define DCORE0_RTR4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_RTR4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_RTR4_MSTR_IF_SPECIAL_BASE 0x4162E80ull
+#define DCORE0_RTR4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR4_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR4_ADD_DEC_HBW_BASE 0x4163000ull
+#define DCORE0_RTR4_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE0_RTR4_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE0_RTR4_ADD_DEC_LBW_BASE 0x4163400ull
+#define DCORE0_RTR4_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE0_RTR4_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE0_RTR4_ADD_DEC_SPECIAL_BASE 0x4163E80ull
+#define DCORE0_RTR4_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR4_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR4_BASE 0x4164000ull
+#define DCORE0_RTR4_MAX_OFFSET 0x1000
+#define DCORE0_RTR4_SECTION 0x3000
+#define mmDCORE0_RTR4_HBW_RD_RQ_LL_STAT_BASE 0x4164300ull
+#define DCORE0_RTR4_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR4_HBW_RD_RS_LL_STAT_BASE 0x4164340ull
+#define DCORE0_RTR4_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR4_HBW_WR_RQ_LL_STAT_BASE 0x4164380ull
+#define DCORE0_RTR4_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR4_HBW_WR_RS_LL_STAT_BASE 0x41643C0ull
+#define DCORE0_RTR4_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR4_LBW_RD_RQ_LL_STAT_BASE 0x4164400ull
+#define DCORE0_RTR4_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR4_LBW_RD_RS_LL_STAT_BASE 0x4164440ull
+#define DCORE0_RTR4_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR4_LBW_WR_RQ_LL_STAT_BASE 0x4164480ull
+#define DCORE0_RTR4_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR4_LBW_WR_RS_LL_STAT_BASE 0x41644C0ull
+#define DCORE0_RTR4_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR4_HBW_MFIFO_BASE 0x4164500ull
+#define DCORE0_RTR4_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE0_RTR4_E2E_RD_LL_STAT_BASE 0x4164540ull
+#define DCORE0_RTR4_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR4_E2E_WR_LL_STAT_BASE 0x4164580ull
+#define DCORE0_RTR4_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR4_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE0_RTR4_RTR_HBW_XACT_STAT_BASE 0x4164600ull
+#define DCORE0_RTR4_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR4_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR4_RTR_LBW_XACT_STAT_BASE 0x4164680ull
+#define DCORE0_RTR4_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR4_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR4_RTR_E2E_XACT_STAT_BASE 0x4164700ull
+#define DCORE0_RTR4_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR4_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE0_RTR4_SPECIAL_BASE 0x4164E80ull
+#define DCORE0_RTR4_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR4_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR4_DBG_ADDR_BASE 0x4165000ull
+#define DCORE0_RTR4_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE0_RTR4_DBG_ADDR_SECTION 0xE800
+#define mmDCORE0_RTR4_DBG_ADDR_SPECIAL_BASE 0x4165E80ull
+#define DCORE0_RTR4_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR4_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE0_RTR5_CTRL_BASE 0x4168000ull
+#define DCORE0_RTR5_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_RTR5_CTRL_SECTION 0xE800
+#define mmDCORE0_RTR5_CTRL_SPECIAL_BASE 0x4168E80ull
+#define DCORE0_RTR5_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR5_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR5_H3_BASE 0x4169000ull
+#define DCORE0_RTR5_H3_MAX_OFFSET 0x1000
+#define DCORE0_RTR5_H3_SECTION 0xE800
+#define mmDCORE0_RTR5_H3_SPECIAL_BASE 0x4169E80ull
+#define DCORE0_RTR5_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR5_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR5_MSTR_IF_RR_SHRD_HBW_BASE 0x416A000ull
+#define DCORE0_RTR5_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR5_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_RTR5_MSTR_IF_RR_PRVT_HBW_BASE 0x416A200ull
+#define DCORE0_RTR5_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR5_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_RTR5_MSTR_IF_RR_SHRD_LBW_BASE 0x416A400ull
+#define DCORE0_RTR5_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR5_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_RTR5_MSTR_IF_RR_PRVT_LBW_BASE 0x416A600ull
+#define DCORE0_RTR5_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR5_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_RTR5_MSTR_IF_E2E_CRDT_BASE 0x416A800ull
+#define DCORE0_RTR5_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_RTR5_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_RTR5_MSTR_IF_AXUSER_BASE 0x416AA80ull
+#define DCORE0_RTR5_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_RTR5_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_RTR5_MSTR_IF_DBG_HBW_BASE 0x416AB00ull
+#define DCORE0_RTR5_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR5_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_RTR5_MSTR_IF_DBG_LBW_BASE 0x416AB80ull
+#define DCORE0_RTR5_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR5_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_RTR5_MSTR_IF_CORE_HBW_BASE 0x416AC00ull
+#define DCORE0_RTR5_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_RTR5_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_RTR5_MSTR_IF_CORE_LBW_BASE 0x416AD80ull
+#define DCORE0_RTR5_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_RTR5_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_RTR5_MSTR_IF_SPECIAL_BASE 0x416AE80ull
+#define DCORE0_RTR5_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR5_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR5_ADD_DEC_HBW_BASE 0x416B000ull
+#define DCORE0_RTR5_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE0_RTR5_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE0_RTR5_ADD_DEC_LBW_BASE 0x416B400ull
+#define DCORE0_RTR5_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE0_RTR5_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE0_RTR5_ADD_DEC_SPECIAL_BASE 0x416BE80ull
+#define DCORE0_RTR5_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR5_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR5_BASE 0x416C000ull
+#define DCORE0_RTR5_MAX_OFFSET 0x1000
+#define DCORE0_RTR5_SECTION 0x3000
+#define mmDCORE0_RTR5_HBW_RD_RQ_LL_STAT_BASE 0x416C300ull
+#define DCORE0_RTR5_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR5_HBW_RD_RS_LL_STAT_BASE 0x416C340ull
+#define DCORE0_RTR5_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR5_HBW_WR_RQ_LL_STAT_BASE 0x416C380ull
+#define DCORE0_RTR5_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR5_HBW_WR_RS_LL_STAT_BASE 0x416C3C0ull
+#define DCORE0_RTR5_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR5_LBW_RD_RQ_LL_STAT_BASE 0x416C400ull
+#define DCORE0_RTR5_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR5_LBW_RD_RS_LL_STAT_BASE 0x416C440ull
+#define DCORE0_RTR5_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR5_LBW_WR_RQ_LL_STAT_BASE 0x416C480ull
+#define DCORE0_RTR5_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR5_LBW_WR_RS_LL_STAT_BASE 0x416C4C0ull
+#define DCORE0_RTR5_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR5_HBW_MFIFO_BASE 0x416C500ull
+#define DCORE0_RTR5_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE0_RTR5_E2E_RD_LL_STAT_BASE 0x416C540ull
+#define DCORE0_RTR5_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR5_E2E_WR_LL_STAT_BASE 0x416C580ull
+#define DCORE0_RTR5_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR5_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE0_RTR5_RTR_HBW_XACT_STAT_BASE 0x416C600ull
+#define DCORE0_RTR5_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR5_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR5_RTR_LBW_XACT_STAT_BASE 0x416C680ull
+#define DCORE0_RTR5_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR5_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR5_RTR_E2E_XACT_STAT_BASE 0x416C700ull
+#define DCORE0_RTR5_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR5_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE0_RTR5_SPECIAL_BASE 0x416CE80ull
+#define DCORE0_RTR5_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR5_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR5_DBG_ADDR_BASE 0x416D000ull
+#define DCORE0_RTR5_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE0_RTR5_DBG_ADDR_SECTION 0xE800
+#define mmDCORE0_RTR5_DBG_ADDR_SPECIAL_BASE 0x416DE80ull
+#define DCORE0_RTR5_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR5_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE0_RTR6_CTRL_BASE 0x4170000ull
+#define DCORE0_RTR6_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_RTR6_CTRL_SECTION 0xE800
+#define mmDCORE0_RTR6_CTRL_SPECIAL_BASE 0x4170E80ull
+#define DCORE0_RTR6_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR6_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR6_H3_BASE 0x4171000ull
+#define DCORE0_RTR6_H3_MAX_OFFSET 0x1000
+#define DCORE0_RTR6_H3_SECTION 0xE800
+#define mmDCORE0_RTR6_H3_SPECIAL_BASE 0x4171E80ull
+#define DCORE0_RTR6_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR6_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR6_MSTR_IF_RR_SHRD_HBW_BASE 0x4172000ull
+#define DCORE0_RTR6_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR6_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_RTR6_MSTR_IF_RR_PRVT_HBW_BASE 0x4172200ull
+#define DCORE0_RTR6_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR6_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_RTR6_MSTR_IF_RR_SHRD_LBW_BASE 0x4172400ull
+#define DCORE0_RTR6_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR6_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_RTR6_MSTR_IF_RR_PRVT_LBW_BASE 0x4172600ull
+#define DCORE0_RTR6_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR6_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_RTR6_MSTR_IF_E2E_CRDT_BASE 0x4172800ull
+#define DCORE0_RTR6_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_RTR6_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_RTR6_MSTR_IF_AXUSER_BASE 0x4172A80ull
+#define DCORE0_RTR6_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_RTR6_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_RTR6_MSTR_IF_DBG_HBW_BASE 0x4172B00ull
+#define DCORE0_RTR6_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR6_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_RTR6_MSTR_IF_DBG_LBW_BASE 0x4172B80ull
+#define DCORE0_RTR6_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR6_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_RTR6_MSTR_IF_CORE_HBW_BASE 0x4172C00ull
+#define DCORE0_RTR6_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_RTR6_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_RTR6_MSTR_IF_CORE_LBW_BASE 0x4172D80ull
+#define DCORE0_RTR6_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_RTR6_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_RTR6_MSTR_IF_SPECIAL_BASE 0x4172E80ull
+#define DCORE0_RTR6_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR6_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR6_ADD_DEC_HBW_BASE 0x4173000ull
+#define DCORE0_RTR6_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE0_RTR6_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE0_RTR6_ADD_DEC_LBW_BASE 0x4173400ull
+#define DCORE0_RTR6_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE0_RTR6_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE0_RTR6_ADD_DEC_SPECIAL_BASE 0x4173E80ull
+#define DCORE0_RTR6_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR6_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR6_BASE 0x4174000ull
+#define DCORE0_RTR6_MAX_OFFSET 0x1000
+#define DCORE0_RTR6_SECTION 0x3000
+#define mmDCORE0_RTR6_HBW_RD_RQ_LL_STAT_BASE 0x4174300ull
+#define DCORE0_RTR6_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR6_HBW_RD_RS_LL_STAT_BASE 0x4174340ull
+#define DCORE0_RTR6_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR6_HBW_WR_RQ_LL_STAT_BASE 0x4174380ull
+#define DCORE0_RTR6_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR6_HBW_WR_RS_LL_STAT_BASE 0x41743C0ull
+#define DCORE0_RTR6_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR6_LBW_RD_RQ_LL_STAT_BASE 0x4174400ull
+#define DCORE0_RTR6_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR6_LBW_RD_RS_LL_STAT_BASE 0x4174440ull
+#define DCORE0_RTR6_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR6_LBW_WR_RQ_LL_STAT_BASE 0x4174480ull
+#define DCORE0_RTR6_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR6_LBW_WR_RS_LL_STAT_BASE 0x41744C0ull
+#define DCORE0_RTR6_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR6_HBW_MFIFO_BASE 0x4174500ull
+#define DCORE0_RTR6_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE0_RTR6_E2E_RD_LL_STAT_BASE 0x4174540ull
+#define DCORE0_RTR6_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR6_E2E_WR_LL_STAT_BASE 0x4174580ull
+#define DCORE0_RTR6_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR6_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE0_RTR6_RTR_HBW_XACT_STAT_BASE 0x4174600ull
+#define DCORE0_RTR6_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR6_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR6_RTR_LBW_XACT_STAT_BASE 0x4174680ull
+#define DCORE0_RTR6_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR6_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR6_RTR_E2E_XACT_STAT_BASE 0x4174700ull
+#define DCORE0_RTR6_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR6_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE0_RTR6_SPECIAL_BASE 0x4174E80ull
+#define DCORE0_RTR6_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR6_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR6_DBG_ADDR_BASE 0x4175000ull
+#define DCORE0_RTR6_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE0_RTR6_DBG_ADDR_SECTION 0xE800
+#define mmDCORE0_RTR6_DBG_ADDR_SPECIAL_BASE 0x4175E80ull
+#define DCORE0_RTR6_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR6_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE0_RTR7_CTRL_BASE 0x4178000ull
+#define DCORE0_RTR7_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_RTR7_CTRL_SECTION 0xE800
+#define mmDCORE0_RTR7_CTRL_SPECIAL_BASE 0x4178E80ull
+#define DCORE0_RTR7_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR7_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR7_H3_BASE 0x4179000ull
+#define DCORE0_RTR7_H3_MAX_OFFSET 0x1000
+#define DCORE0_RTR7_H3_SECTION 0xE800
+#define mmDCORE0_RTR7_H3_SPECIAL_BASE 0x4179E80ull
+#define DCORE0_RTR7_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR7_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR7_MSTR_IF_RR_SHRD_HBW_BASE 0x417A000ull
+#define DCORE0_RTR7_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR7_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_RTR7_MSTR_IF_RR_PRVT_HBW_BASE 0x417A200ull
+#define DCORE0_RTR7_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_RTR7_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_RTR7_MSTR_IF_RR_SHRD_LBW_BASE 0x417A400ull
+#define DCORE0_RTR7_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR7_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_RTR7_MSTR_IF_RR_PRVT_LBW_BASE 0x417A600ull
+#define DCORE0_RTR7_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_RTR7_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_RTR7_MSTR_IF_E2E_CRDT_BASE 0x417A800ull
+#define DCORE0_RTR7_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_RTR7_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_RTR7_MSTR_IF_AXUSER_BASE 0x417AA80ull
+#define DCORE0_RTR7_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_RTR7_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_RTR7_MSTR_IF_DBG_HBW_BASE 0x417AB00ull
+#define DCORE0_RTR7_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR7_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_RTR7_MSTR_IF_DBG_LBW_BASE 0x417AB80ull
+#define DCORE0_RTR7_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_RTR7_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_RTR7_MSTR_IF_CORE_HBW_BASE 0x417AC00ull
+#define DCORE0_RTR7_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_RTR7_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_RTR7_MSTR_IF_CORE_LBW_BASE 0x417AD80ull
+#define DCORE0_RTR7_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_RTR7_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_RTR7_MSTR_IF_SPECIAL_BASE 0x417AE80ull
+#define DCORE0_RTR7_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR7_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR7_ADD_DEC_HBW_BASE 0x417B000ull
+#define DCORE0_RTR7_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE0_RTR7_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE0_RTR7_ADD_DEC_LBW_BASE 0x417B400ull
+#define DCORE0_RTR7_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE0_RTR7_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE0_RTR7_ADD_DEC_SPECIAL_BASE 0x417BE80ull
+#define DCORE0_RTR7_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR7_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR7_BASE 0x417C000ull
+#define DCORE0_RTR7_MAX_OFFSET 0x1000
+#define DCORE0_RTR7_SECTION 0x3000
+#define mmDCORE0_RTR7_HBW_RD_RQ_LL_STAT_BASE 0x417C300ull
+#define DCORE0_RTR7_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR7_HBW_RD_RS_LL_STAT_BASE 0x417C340ull
+#define DCORE0_RTR7_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR7_HBW_WR_RQ_LL_STAT_BASE 0x417C380ull
+#define DCORE0_RTR7_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR7_HBW_WR_RS_LL_STAT_BASE 0x417C3C0ull
+#define DCORE0_RTR7_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR7_LBW_RD_RQ_LL_STAT_BASE 0x417C400ull
+#define DCORE0_RTR7_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR7_LBW_RD_RS_LL_STAT_BASE 0x417C440ull
+#define DCORE0_RTR7_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR7_LBW_WR_RQ_LL_STAT_BASE 0x417C480ull
+#define DCORE0_RTR7_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR7_LBW_WR_RS_LL_STAT_BASE 0x417C4C0ull
+#define DCORE0_RTR7_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR7_HBW_MFIFO_BASE 0x417C500ull
+#define DCORE0_RTR7_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE0_RTR7_E2E_RD_LL_STAT_BASE 0x417C540ull
+#define DCORE0_RTR7_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE0_RTR7_E2E_WR_LL_STAT_BASE 0x417C580ull
+#define DCORE0_RTR7_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE0_RTR7_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE0_RTR7_RTR_HBW_XACT_STAT_BASE 0x417C600ull
+#define DCORE0_RTR7_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR7_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR7_RTR_LBW_XACT_STAT_BASE 0x417C680ull
+#define DCORE0_RTR7_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR7_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE0_RTR7_RTR_E2E_XACT_STAT_BASE 0x417C700ull
+#define DCORE0_RTR7_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE0_RTR7_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE0_RTR7_SPECIAL_BASE 0x417CE80ull
+#define DCORE0_RTR7_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR7_SPECIAL_SECTION 0x1800
+#define mmDCORE0_RTR7_DBG_ADDR_BASE 0x417D000ull
+#define DCORE0_RTR7_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE0_RTR7_DBG_ADDR_SECTION 0xE800
+#define mmDCORE0_RTR7_DBG_ADDR_SPECIAL_BASE 0x417DE80ull
+#define DCORE0_RTR7_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_RTR7_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE0_SRAM0_BANK_BASE 0x4180000ull
+#define DCORE0_SRAM0_BANK_MAX_OFFSET 0x1000
+#define DCORE0_SRAM0_BANK_SECTION 0xE800
+#define mmDCORE0_SRAM0_BANK_SPECIAL_BASE 0x4180E80ull
+#define DCORE0_SRAM0_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM0_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM0_RTR_BASE 0x4181000ull
+#define DCORE0_SRAM0_RTR_MAX_OFFSET 0x1000
+#define DCORE0_SRAM0_RTR_SECTION 0xE800
+#define mmDCORE0_SRAM0_RTR_SPECIAL_BASE 0x4181E80ull
+#define DCORE0_SRAM0_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM0_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM0_DBG_CNT_N_HBW_DBG_CNT_BASE 0x4182000ull
+#define DCORE0_SRAM0_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM0_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM0_DBG_CNT_S_HBW_DBG_CNT_BASE 0x4182100ull
+#define DCORE0_SRAM0_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM0_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x4182200ull
+#define DCORE0_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x4182300ull
+#define DCORE0_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM0_DBG_CNT_N_LBW_DBG_CNT_BASE 0x4182400ull
+#define DCORE0_SRAM0_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM0_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM0_DBG_CNT_S_LBW_DBG_CNT_BASE 0x4182500ull
+#define DCORE0_SRAM0_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM0_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM0_DBG_CNT_L_LBW_DBG_CNT_BASE 0x4182600ull
+#define DCORE0_SRAM0_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM0_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x4182700ull
+#define DCORE0_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x4182780ull
+#define DCORE0_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x4182800ull
+#define DCORE0_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x4182880ull
+#define DCORE0_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x4182900ull
+#define DCORE0_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x4182980ull
+#define DCORE0_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x4182A00ull
+#define DCORE0_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x4182A80ull
+#define DCORE0_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE0_SRAM0_DBG_CNT_SPECIAL_BASE 0x4182E80ull
+#define DCORE0_SRAM0_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM0_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE0_SRAM1_BANK_BASE 0x4188000ull
+#define DCORE0_SRAM1_BANK_MAX_OFFSET 0x1000
+#define DCORE0_SRAM1_BANK_SECTION 0xE800
+#define mmDCORE0_SRAM1_BANK_SPECIAL_BASE 0x4188E80ull
+#define DCORE0_SRAM1_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM1_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM1_RTR_BASE 0x4189000ull
+#define DCORE0_SRAM1_RTR_MAX_OFFSET 0x1000
+#define DCORE0_SRAM1_RTR_SECTION 0xE800
+#define mmDCORE0_SRAM1_RTR_SPECIAL_BASE 0x4189E80ull
+#define DCORE0_SRAM1_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM1_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM1_DBG_CNT_N_HBW_DBG_CNT_BASE 0x418A000ull
+#define DCORE0_SRAM1_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM1_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM1_DBG_CNT_S_HBW_DBG_CNT_BASE 0x418A100ull
+#define DCORE0_SRAM1_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM1_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x418A200ull
+#define DCORE0_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x418A300ull
+#define DCORE0_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM1_DBG_CNT_N_LBW_DBG_CNT_BASE 0x418A400ull
+#define DCORE0_SRAM1_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM1_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM1_DBG_CNT_S_LBW_DBG_CNT_BASE 0x418A500ull
+#define DCORE0_SRAM1_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM1_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM1_DBG_CNT_L_LBW_DBG_CNT_BASE 0x418A600ull
+#define DCORE0_SRAM1_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM1_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x418A700ull
+#define DCORE0_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x418A780ull
+#define DCORE0_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x418A800ull
+#define DCORE0_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x418A880ull
+#define DCORE0_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x418A900ull
+#define DCORE0_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x418A980ull
+#define DCORE0_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x418AA00ull
+#define DCORE0_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x418AA80ull
+#define DCORE0_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE0_SRAM1_DBG_CNT_SPECIAL_BASE 0x418AE80ull
+#define DCORE0_SRAM1_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM1_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE0_SRAM2_BANK_BASE 0x4190000ull
+#define DCORE0_SRAM2_BANK_MAX_OFFSET 0x1000
+#define DCORE0_SRAM2_BANK_SECTION 0xE800
+#define mmDCORE0_SRAM2_BANK_SPECIAL_BASE 0x4190E80ull
+#define DCORE0_SRAM2_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM2_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM2_RTR_BASE 0x4191000ull
+#define DCORE0_SRAM2_RTR_MAX_OFFSET 0x1000
+#define DCORE0_SRAM2_RTR_SECTION 0xE800
+#define mmDCORE0_SRAM2_RTR_SPECIAL_BASE 0x4191E80ull
+#define DCORE0_SRAM2_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM2_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM2_DBG_CNT_N_HBW_DBG_CNT_BASE 0x4192000ull
+#define DCORE0_SRAM2_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM2_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM2_DBG_CNT_S_HBW_DBG_CNT_BASE 0x4192100ull
+#define DCORE0_SRAM2_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM2_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x4192200ull
+#define DCORE0_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x4192300ull
+#define DCORE0_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM2_DBG_CNT_N_LBW_DBG_CNT_BASE 0x4192400ull
+#define DCORE0_SRAM2_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM2_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM2_DBG_CNT_S_LBW_DBG_CNT_BASE 0x4192500ull
+#define DCORE0_SRAM2_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM2_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM2_DBG_CNT_L_LBW_DBG_CNT_BASE 0x4192600ull
+#define DCORE0_SRAM2_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM2_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x4192700ull
+#define DCORE0_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x4192780ull
+#define DCORE0_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x4192800ull
+#define DCORE0_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x4192880ull
+#define DCORE0_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x4192900ull
+#define DCORE0_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x4192980ull
+#define DCORE0_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x4192A00ull
+#define DCORE0_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x4192A80ull
+#define DCORE0_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE0_SRAM2_DBG_CNT_SPECIAL_BASE 0x4192E80ull
+#define DCORE0_SRAM2_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM2_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE0_SRAM3_BANK_BASE 0x4198000ull
+#define DCORE0_SRAM3_BANK_MAX_OFFSET 0x1000
+#define DCORE0_SRAM3_BANK_SECTION 0xE800
+#define mmDCORE0_SRAM3_BANK_SPECIAL_BASE 0x4198E80ull
+#define DCORE0_SRAM3_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM3_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM3_RTR_BASE 0x4199000ull
+#define DCORE0_SRAM3_RTR_MAX_OFFSET 0x1000
+#define DCORE0_SRAM3_RTR_SECTION 0xE800
+#define mmDCORE0_SRAM3_RTR_SPECIAL_BASE 0x4199E80ull
+#define DCORE0_SRAM3_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM3_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM3_DBG_CNT_N_HBW_DBG_CNT_BASE 0x419A000ull
+#define DCORE0_SRAM3_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM3_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM3_DBG_CNT_S_HBW_DBG_CNT_BASE 0x419A100ull
+#define DCORE0_SRAM3_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM3_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x419A200ull
+#define DCORE0_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x419A300ull
+#define DCORE0_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM3_DBG_CNT_N_LBW_DBG_CNT_BASE 0x419A400ull
+#define DCORE0_SRAM3_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM3_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM3_DBG_CNT_S_LBW_DBG_CNT_BASE 0x419A500ull
+#define DCORE0_SRAM3_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM3_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM3_DBG_CNT_L_LBW_DBG_CNT_BASE 0x419A600ull
+#define DCORE0_SRAM3_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM3_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x419A700ull
+#define DCORE0_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x419A780ull
+#define DCORE0_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x419A800ull
+#define DCORE0_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x419A880ull
+#define DCORE0_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x419A900ull
+#define DCORE0_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x419A980ull
+#define DCORE0_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x419AA00ull
+#define DCORE0_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x419AA80ull
+#define DCORE0_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE0_SRAM3_DBG_CNT_SPECIAL_BASE 0x419AE80ull
+#define DCORE0_SRAM3_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM3_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE0_SRAM4_BANK_BASE 0x41A0000ull
+#define DCORE0_SRAM4_BANK_MAX_OFFSET 0x1000
+#define DCORE0_SRAM4_BANK_SECTION 0xE800
+#define mmDCORE0_SRAM4_BANK_SPECIAL_BASE 0x41A0E80ull
+#define DCORE0_SRAM4_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM4_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM4_RTR_BASE 0x41A1000ull
+#define DCORE0_SRAM4_RTR_MAX_OFFSET 0x1000
+#define DCORE0_SRAM4_RTR_SECTION 0xE800
+#define mmDCORE0_SRAM4_RTR_SPECIAL_BASE 0x41A1E80ull
+#define DCORE0_SRAM4_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM4_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM4_DBG_CNT_N_HBW_DBG_CNT_BASE 0x41A2000ull
+#define DCORE0_SRAM4_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM4_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM4_DBG_CNT_S_HBW_DBG_CNT_BASE 0x41A2100ull
+#define DCORE0_SRAM4_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM4_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x41A2200ull
+#define DCORE0_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x41A2300ull
+#define DCORE0_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM4_DBG_CNT_N_LBW_DBG_CNT_BASE 0x41A2400ull
+#define DCORE0_SRAM4_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM4_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM4_DBG_CNT_S_LBW_DBG_CNT_BASE 0x41A2500ull
+#define DCORE0_SRAM4_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM4_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM4_DBG_CNT_L_LBW_DBG_CNT_BASE 0x41A2600ull
+#define DCORE0_SRAM4_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM4_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x41A2700ull
+#define DCORE0_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x41A2780ull
+#define DCORE0_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x41A2800ull
+#define DCORE0_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x41A2880ull
+#define DCORE0_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x41A2900ull
+#define DCORE0_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x41A2980ull
+#define DCORE0_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x41A2A00ull
+#define DCORE0_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x41A2A80ull
+#define DCORE0_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE0_SRAM4_DBG_CNT_SPECIAL_BASE 0x41A2E80ull
+#define DCORE0_SRAM4_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM4_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE0_SRAM5_BANK_BASE 0x41A8000ull
+#define DCORE0_SRAM5_BANK_MAX_OFFSET 0x1000
+#define DCORE0_SRAM5_BANK_SECTION 0xE800
+#define mmDCORE0_SRAM5_BANK_SPECIAL_BASE 0x41A8E80ull
+#define DCORE0_SRAM5_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM5_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM5_RTR_BASE 0x41A9000ull
+#define DCORE0_SRAM5_RTR_MAX_OFFSET 0x1000
+#define DCORE0_SRAM5_RTR_SECTION 0xE800
+#define mmDCORE0_SRAM5_RTR_SPECIAL_BASE 0x41A9E80ull
+#define DCORE0_SRAM5_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM5_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM5_DBG_CNT_N_HBW_DBG_CNT_BASE 0x41AA000ull
+#define DCORE0_SRAM5_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM5_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM5_DBG_CNT_S_HBW_DBG_CNT_BASE 0x41AA100ull
+#define DCORE0_SRAM5_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM5_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x41AA200ull
+#define DCORE0_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x41AA300ull
+#define DCORE0_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM5_DBG_CNT_N_LBW_DBG_CNT_BASE 0x41AA400ull
+#define DCORE0_SRAM5_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM5_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM5_DBG_CNT_S_LBW_DBG_CNT_BASE 0x41AA500ull
+#define DCORE0_SRAM5_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM5_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM5_DBG_CNT_L_LBW_DBG_CNT_BASE 0x41AA600ull
+#define DCORE0_SRAM5_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM5_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x41AA700ull
+#define DCORE0_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x41AA780ull
+#define DCORE0_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x41AA800ull
+#define DCORE0_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x41AA880ull
+#define DCORE0_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x41AA900ull
+#define DCORE0_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x41AA980ull
+#define DCORE0_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x41AAA00ull
+#define DCORE0_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x41AAA80ull
+#define DCORE0_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE0_SRAM5_DBG_CNT_SPECIAL_BASE 0x41AAE80ull
+#define DCORE0_SRAM5_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM5_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE0_SRAM6_BANK_BASE 0x41B0000ull
+#define DCORE0_SRAM6_BANK_MAX_OFFSET 0x1000
+#define DCORE0_SRAM6_BANK_SECTION 0xE800
+#define mmDCORE0_SRAM6_BANK_SPECIAL_BASE 0x41B0E80ull
+#define DCORE0_SRAM6_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM6_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM6_RTR_BASE 0x41B1000ull
+#define DCORE0_SRAM6_RTR_MAX_OFFSET 0x1000
+#define DCORE0_SRAM6_RTR_SECTION 0xE800
+#define mmDCORE0_SRAM6_RTR_SPECIAL_BASE 0x41B1E80ull
+#define DCORE0_SRAM6_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM6_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM6_DBG_CNT_N_HBW_DBG_CNT_BASE 0x41B2000ull
+#define DCORE0_SRAM6_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM6_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM6_DBG_CNT_S_HBW_DBG_CNT_BASE 0x41B2100ull
+#define DCORE0_SRAM6_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM6_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x41B2200ull
+#define DCORE0_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x41B2300ull
+#define DCORE0_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM6_DBG_CNT_N_LBW_DBG_CNT_BASE 0x41B2400ull
+#define DCORE0_SRAM6_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM6_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM6_DBG_CNT_S_LBW_DBG_CNT_BASE 0x41B2500ull
+#define DCORE0_SRAM6_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM6_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM6_DBG_CNT_L_LBW_DBG_CNT_BASE 0x41B2600ull
+#define DCORE0_SRAM6_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM6_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x41B2700ull
+#define DCORE0_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x41B2780ull
+#define DCORE0_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x41B2800ull
+#define DCORE0_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x41B2880ull
+#define DCORE0_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x41B2900ull
+#define DCORE0_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x41B2980ull
+#define DCORE0_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x41B2A00ull
+#define DCORE0_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x41B2A80ull
+#define DCORE0_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE0_SRAM6_DBG_CNT_SPECIAL_BASE 0x41B2E80ull
+#define DCORE0_SRAM6_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM6_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE0_SRAM7_BANK_BASE 0x41B8000ull
+#define DCORE0_SRAM7_BANK_MAX_OFFSET 0x1000
+#define DCORE0_SRAM7_BANK_SECTION 0xE800
+#define mmDCORE0_SRAM7_BANK_SPECIAL_BASE 0x41B8E80ull
+#define DCORE0_SRAM7_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM7_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM7_RTR_BASE 0x41B9000ull
+#define DCORE0_SRAM7_RTR_MAX_OFFSET 0x1000
+#define DCORE0_SRAM7_RTR_SECTION 0xE800
+#define mmDCORE0_SRAM7_RTR_SPECIAL_BASE 0x41B9E80ull
+#define DCORE0_SRAM7_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM7_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE0_SRAM7_DBG_CNT_N_HBW_DBG_CNT_BASE 0x41BA000ull
+#define DCORE0_SRAM7_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM7_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM7_DBG_CNT_S_HBW_DBG_CNT_BASE 0x41BA100ull
+#define DCORE0_SRAM7_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM7_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x41BA200ull
+#define DCORE0_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x41BA300ull
+#define DCORE0_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM7_DBG_CNT_N_LBW_DBG_CNT_BASE 0x41BA400ull
+#define DCORE0_SRAM7_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM7_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM7_DBG_CNT_S_LBW_DBG_CNT_BASE 0x41BA500ull
+#define DCORE0_SRAM7_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM7_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM7_DBG_CNT_L_LBW_DBG_CNT_BASE 0x41BA600ull
+#define DCORE0_SRAM7_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE0_SRAM7_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE0_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x41BA700ull
+#define DCORE0_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x41BA780ull
+#define DCORE0_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x41BA800ull
+#define DCORE0_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x41BA880ull
+#define DCORE0_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x41BA900ull
+#define DCORE0_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x41BA980ull
+#define DCORE0_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x41BAA00ull
+#define DCORE0_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE0_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x41BAA80ull
+#define DCORE0_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE0_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE0_SRAM7_DBG_CNT_SPECIAL_BASE 0x41BAE80ull
+#define DCORE0_SRAM7_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_SRAM7_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE0_EDMA0_QM_DCCM_BASE 0x41C0000ull
+#define DCORE0_EDMA0_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_EDMA0_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_ARC_AUX_BASE 0x41C8000ull
+#define DCORE0_EDMA0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_EDMA0_QM_ARC_AUX_SPECIAL_BASE 0x41C8E80ull
+#define DCORE0_EDMA0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_EDMA0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_EDMA0_QM_BASE 0x41CA000ull
+#define DCORE0_EDMA0_QM_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_QM_SECTION 0x9000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x41CA900ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x41CA908ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x41CA910ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x41CA918ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x41CA920ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x41CA928ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x41CA930ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x41CA938ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x41CA940ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x41CA948ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x41CA950ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x41CA958ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x41CA960ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x41CA968ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x41CA970ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x41CA978ull
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_EDMA0_QM_AXUSER_SECURED_BASE 0x41CAB00ull
+#define DCORE0_EDMA0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_EDMA0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_BASE 0x41CAB80ull
+#define DCORE0_EDMA0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_EDMA0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_DBG_HBW_BASE 0x41CAC00ull
+#define DCORE0_EDMA0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_EDMA0_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_EDMA0_QM_DBG_LBW_BASE 0x41CAC80ull
+#define DCORE0_EDMA0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_EDMA0_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_EDMA0_QM_CGM_BASE 0x41CAD80ull
+#define DCORE0_EDMA0_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_EDMA0_QM_CGM_SECTION 0x1000
+#define mmDCORE0_EDMA0_QM_SPECIAL_BASE 0x41CAE80ull
+#define DCORE0_EDMA0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_EDMA0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_EDMA0_CORE_BASE 0x41CB000ull
+#define DCORE0_EDMA0_CORE_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_CORE_SECTION 0x8000
+#define mmDCORE0_EDMA0_CORE_CTX_AXUSER_BASE 0x41CB800ull
+#define DCORE0_EDMA0_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_EDMA0_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmDCORE0_EDMA0_CORE_CTX_BASE 0x41CB860ull
+#define DCORE0_EDMA0_CORE_CTX_MAX_OFFSET 0x9000
+#define DCORE0_EDMA0_CORE_CTX_SECTION 0x5A00
+#define mmDCORE0_EDMA0_CORE_KDMA_CGM_BASE 0x41CBE00ull
+#define DCORE0_EDMA0_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define DCORE0_EDMA0_CORE_KDMA_CGM_SECTION 0x8000
+#define mmDCORE0_EDMA0_CORE_SPECIAL_BASE 0x41CBE80ull
+#define DCORE0_EDMA0_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_EDMA0_CORE_SPECIAL_SECTION 0x1800
+#define mmDCORE0_EDMA0_MSTR_IF_RR_SHRD_HBW_BASE 0x41CC000ull
+#define DCORE0_EDMA0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_EDMA0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_EDMA0_MSTR_IF_RR_PRVT_HBW_BASE 0x41CC200ull
+#define DCORE0_EDMA0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_EDMA0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_EDMA0_MSTR_IF_RR_SHRD_LBW_BASE 0x41CC400ull
+#define DCORE0_EDMA0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_EDMA0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_EDMA0_MSTR_IF_RR_PRVT_LBW_BASE 0x41CC600ull
+#define DCORE0_EDMA0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_EDMA0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_EDMA0_MSTR_IF_E2E_CRDT_BASE 0x41CC800ull
+#define DCORE0_EDMA0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_EDMA0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_EDMA0_MSTR_IF_AXUSER_BASE 0x41CCA80ull
+#define DCORE0_EDMA0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_EDMA0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_EDMA0_MSTR_IF_DBG_HBW_BASE 0x41CCB00ull
+#define DCORE0_EDMA0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_EDMA0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_EDMA0_MSTR_IF_DBG_LBW_BASE 0x41CCB80ull
+#define DCORE0_EDMA0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_EDMA0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_EDMA0_MSTR_IF_CORE_HBW_BASE 0x41CCC00ull
+#define DCORE0_EDMA0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_EDMA0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_EDMA0_MSTR_IF_CORE_LBW_BASE 0x41CCD80ull
+#define DCORE0_EDMA0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_EDMA0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_EDMA0_MSTR_IF_SPECIAL_BASE 0x41CCE80ull
+#define DCORE0_EDMA0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_EDMA0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE0_EDMA1_QM_DCCM_BASE 0x41D0000ull
+#define DCORE0_EDMA1_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE0_EDMA1_QM_DCCM_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_ARC_AUX_BASE 0x41D8000ull
+#define DCORE0_EDMA1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE0_EDMA1_QM_ARC_AUX_SPECIAL_BASE 0x41D8E80ull
+#define DCORE0_EDMA1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_EDMA1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE0_EDMA1_QM_BASE 0x41DA000ull
+#define DCORE0_EDMA1_QM_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_QM_SECTION 0x9000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x41DA900ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x41DA908ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x41DA910ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x41DA918ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x41DA920ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x41DA928ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x41DA930ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x41DA938ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x41DA940ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x41DA948ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x41DA950ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x41DA958ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x41DA960ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x41DA968ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x41DA970ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x41DA978ull
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE0_EDMA1_QM_AXUSER_SECURED_BASE 0x41DAB00ull
+#define DCORE0_EDMA1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE0_EDMA1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_BASE 0x41DAB80ull
+#define DCORE0_EDMA1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE0_EDMA1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_DBG_HBW_BASE 0x41DAC00ull
+#define DCORE0_EDMA1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_EDMA1_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_EDMA1_QM_DBG_LBW_BASE 0x41DAC80ull
+#define DCORE0_EDMA1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_EDMA1_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE0_EDMA1_QM_CGM_BASE 0x41DAD80ull
+#define DCORE0_EDMA1_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE0_EDMA1_QM_CGM_SECTION 0x1000
+#define mmDCORE0_EDMA1_QM_SPECIAL_BASE 0x41DAE80ull
+#define DCORE0_EDMA1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_EDMA1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE0_EDMA1_CORE_BASE 0x41DB000ull
+#define DCORE0_EDMA1_CORE_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_CORE_SECTION 0x8000
+#define mmDCORE0_EDMA1_CORE_CTX_AXUSER_BASE 0x41DB800ull
+#define DCORE0_EDMA1_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_EDMA1_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmDCORE0_EDMA1_CORE_CTX_BASE 0x41DB860ull
+#define DCORE0_EDMA1_CORE_CTX_MAX_OFFSET 0x9000
+#define DCORE0_EDMA1_CORE_CTX_SECTION 0x5A00
+#define mmDCORE0_EDMA1_CORE_KDMA_CGM_BASE 0x41DBE00ull
+#define DCORE0_EDMA1_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define DCORE0_EDMA1_CORE_KDMA_CGM_SECTION 0x8000
+#define mmDCORE0_EDMA1_CORE_SPECIAL_BASE 0x41DBE80ull
+#define DCORE0_EDMA1_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_EDMA1_CORE_SPECIAL_SECTION 0x1800
+#define mmDCORE0_EDMA1_MSTR_IF_RR_SHRD_HBW_BASE 0x41DC000ull
+#define DCORE0_EDMA1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_EDMA1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_EDMA1_MSTR_IF_RR_PRVT_HBW_BASE 0x41DC200ull
+#define DCORE0_EDMA1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_EDMA1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_EDMA1_MSTR_IF_RR_SHRD_LBW_BASE 0x41DC400ull
+#define DCORE0_EDMA1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_EDMA1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_EDMA1_MSTR_IF_RR_PRVT_LBW_BASE 0x41DC600ull
+#define DCORE0_EDMA1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_EDMA1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_EDMA1_MSTR_IF_E2E_CRDT_BASE 0x41DC800ull
+#define DCORE0_EDMA1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_EDMA1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_EDMA1_MSTR_IF_AXUSER_BASE 0x41DCA80ull
+#define DCORE0_EDMA1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_EDMA1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_EDMA1_MSTR_IF_DBG_HBW_BASE 0x41DCB00ull
+#define DCORE0_EDMA1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_EDMA1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_EDMA1_MSTR_IF_DBG_LBW_BASE 0x41DCB80ull
+#define DCORE0_EDMA1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_EDMA1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_EDMA1_MSTR_IF_CORE_HBW_BASE 0x41DCC00ull
+#define DCORE0_EDMA1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_EDMA1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_EDMA1_MSTR_IF_CORE_LBW_BASE 0x41DCD80ull
+#define DCORE0_EDMA1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_EDMA1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_EDMA1_MSTR_IF_SPECIAL_BASE 0x41DCE80ull
+#define DCORE0_EDMA1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_EDMA1_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE0_DEC0_CMD_BASE 0x41E0000ull
+#define DCORE0_DEC0_CMD_MAX_OFFSET 0x1100
+#define DCORE0_DEC0_CMD_SECTION 0x1000
+#define mmDCORE0_DEC0_VSI_BASE 0x41E1000ull
+#define DCORE0_DEC0_VSI_MAX_OFFSET 0x6FC0
+#define DCORE0_DEC0_VSI_SECTION 0x1000
+#define mmDCORE0_DEC0_L2C_BASE 0x41E2000ull
+#define DCORE0_DEC0_L2C_MAX_OFFSET 0x39C0
+#define DCORE0_DEC0_L2C_SECTION 0x1000
+#define mmDCORE0_VDEC0_BRDG_CTRL_BASE 0x41E3000ull
+#define DCORE0_VDEC0_BRDG_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_BRDG_CTRL_SECTION 0x8000
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x41E3800ull
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x41E3900ull
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x41E3A00ull
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x41E3B00ull
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_BASE 0x41E3C00ull
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define DCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmDCORE0_VDEC0_BRDG_CTRL_SPECIAL_BASE 0x41E3E80ull
+#define DCORE0_VDEC0_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_VDEC0_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_VDEC0_CTRL_BASE 0x41E4000ull
+#define DCORE0_VDEC0_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_CTRL_SECTION 0xE800
+#define mmDCORE0_VDEC0_CTRL_SPECIAL_BASE 0x41E4E80ull
+#define DCORE0_VDEC0_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_VDEC0_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_VDEC0_MSTR_IF_RR_SHRD_HBW_BASE 0x41E5000ull
+#define DCORE0_VDEC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_VDEC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_VDEC0_MSTR_IF_RR_PRVT_HBW_BASE 0x41E5200ull
+#define DCORE0_VDEC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_VDEC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_VDEC0_MSTR_IF_RR_SHRD_LBW_BASE 0x41E5400ull
+#define DCORE0_VDEC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_VDEC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_VDEC0_MSTR_IF_RR_PRVT_LBW_BASE 0x41E5600ull
+#define DCORE0_VDEC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_VDEC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_VDEC0_MSTR_IF_E2E_CRDT_BASE 0x41E5800ull
+#define DCORE0_VDEC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_VDEC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_VDEC0_MSTR_IF_AXUSER_BASE 0x41E5A80ull
+#define DCORE0_VDEC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_VDEC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_VDEC0_MSTR_IF_DBG_HBW_BASE 0x41E5B00ull
+#define DCORE0_VDEC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_VDEC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_VDEC0_MSTR_IF_DBG_LBW_BASE 0x41E5B80ull
+#define DCORE0_VDEC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_VDEC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_VDEC0_MSTR_IF_CORE_HBW_BASE 0x41E5C00ull
+#define DCORE0_VDEC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_VDEC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_VDEC0_MSTR_IF_CORE_LBW_BASE 0x41E5D80ull
+#define DCORE0_VDEC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_VDEC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_VDEC0_MSTR_IF_SPECIAL_BASE 0x41E5E80ull
+#define DCORE0_VDEC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_VDEC0_MSTR_IF_SPECIAL_SECTION 0xA180
+#define mmDCORE0_DEC1_CMD_BASE 0x41F0000ull
+#define DCORE0_DEC1_CMD_MAX_OFFSET 0x1100
+#define DCORE0_DEC1_CMD_SECTION 0x1000
+#define mmDCORE0_DEC1_VSI_BASE 0x41F1000ull
+#define DCORE0_DEC1_VSI_MAX_OFFSET 0x6FC0
+#define DCORE0_DEC1_VSI_SECTION 0x1000
+#define mmDCORE0_DEC1_L2C_BASE 0x41F2000ull
+#define DCORE0_DEC1_L2C_MAX_OFFSET 0x39C0
+#define DCORE0_DEC1_L2C_SECTION 0x1000
+#define mmDCORE0_VDEC1_BRDG_CTRL_BASE 0x41F3000ull
+#define DCORE0_VDEC1_BRDG_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_BRDG_CTRL_SECTION 0x8000
+#define mmDCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x41F3800ull
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmDCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x41F3900ull
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmDCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x41F3A00ull
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmDCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x41F3B00ull
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmDCORE0_VDEC1_BRDG_CTRL_AXUSER_DEC_BASE 0x41F3C00ull
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define DCORE0_VDEC1_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmDCORE0_VDEC1_BRDG_CTRL_SPECIAL_BASE 0x41F3E80ull
+#define DCORE0_VDEC1_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_VDEC1_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_VDEC1_CTRL_BASE 0x41F4000ull
+#define DCORE0_VDEC1_CTRL_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_CTRL_SECTION 0xE800
+#define mmDCORE0_VDEC1_CTRL_SPECIAL_BASE 0x41F4E80ull
+#define DCORE0_VDEC1_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_VDEC1_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE 0x41F5000ull
+#define DCORE0_VDEC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_VDEC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE0_VDEC1_MSTR_IF_RR_PRVT_HBW_BASE 0x41F5200ull
+#define DCORE0_VDEC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE0_VDEC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE0_VDEC1_MSTR_IF_RR_SHRD_LBW_BASE 0x41F5400ull
+#define DCORE0_VDEC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_VDEC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE0_VDEC1_MSTR_IF_RR_PRVT_LBW_BASE 0x41F5600ull
+#define DCORE0_VDEC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE0_VDEC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE0_VDEC1_MSTR_IF_E2E_CRDT_BASE 0x41F5800ull
+#define DCORE0_VDEC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE0_VDEC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE0_VDEC1_MSTR_IF_AXUSER_BASE 0x41F5A80ull
+#define DCORE0_VDEC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE0_VDEC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE0_VDEC1_MSTR_IF_DBG_HBW_BASE 0x41F5B00ull
+#define DCORE0_VDEC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE0_VDEC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE0_VDEC1_MSTR_IF_DBG_LBW_BASE 0x41F5B80ull
+#define DCORE0_VDEC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE0_VDEC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE0_VDEC1_MSTR_IF_CORE_HBW_BASE 0x41F5C00ull
+#define DCORE0_VDEC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE0_VDEC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE0_VDEC1_MSTR_IF_CORE_LBW_BASE 0x41F5D80ull
+#define DCORE0_VDEC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE0_VDEC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE0_VDEC1_MSTR_IF_SPECIAL_BASE 0x41F5E80ull
+#define DCORE0_VDEC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_VDEC1_MSTR_IF_SPECIAL_SECTION 0xA180
+#define mmDCORE1_TPC0_QM_DCCM_BASE 0x4200000ull
+#define DCORE1_TPC0_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC0_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_ARC_AUX_BASE 0x4208000ull
+#define DCORE1_TPC0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE1_TPC0_QM_ARC_AUX_SPECIAL_BASE 0x4208E80ull
+#define DCORE1_TPC0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC0_QM_BASE 0x420A000ull
+#define DCORE1_TPC0_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_QM_SECTION 0x9000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x420A900ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x420A908ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x420A910ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x420A918ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x420A920ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x420A928ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x420A930ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x420A938ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x420A940ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x420A948ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x420A950ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x420A958ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x420A960ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x420A968ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x420A970ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x420A978ull
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC0_QM_AXUSER_SECURED_BASE 0x420AB00ull
+#define DCORE1_TPC0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_AXUSER_NONSECURED_BASE 0x420AB80ull
+#define DCORE1_TPC0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_DBG_HBW_BASE 0x420AC00ull
+#define DCORE1_TPC0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC0_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC0_QM_DBG_LBW_BASE 0x420AC80ull
+#define DCORE1_TPC0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC0_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC0_QM_CGM_BASE 0x420AD80ull
+#define DCORE1_TPC0_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC0_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC0_QM_SPECIAL_BASE 0x420AE80ull
+#define DCORE1_TPC0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_0_BASE 0x420B000ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC0_CFG_BASE 0x420B000ull
+#define DCORE1_TPC0_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC0_CFG_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_1_BASE 0x420B050ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_2_BASE 0x420B0A0ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_3_BASE 0x420B0F0ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_4_BASE 0x420B140ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_5_BASE 0x420B190ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_6_BASE 0x420B1E0ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_7_BASE 0x420B230ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_8_BASE 0x420B280ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_9_BASE 0x420B2D0ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_10_BASE 0x420B320ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_11_BASE 0x420B370ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_12_BASE 0x420B3C0ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_13_BASE 0x420B410ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_14_BASE 0x420B460ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_TENSOR_15_BASE 0x420B4B0ull
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_KERNEL_SYNC_OBJECT_BASE 0x420B500ull
+#define DCORE1_TPC0_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC0_CFG_KERNEL_BASE 0x420B508ull
+#define DCORE1_TPC0_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC0_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_0_BASE 0x420B5DCull
+#define DCORE1_TPC0_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_1_BASE 0x420B62Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_2_BASE 0x420B67Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_3_BASE 0x420B6CCull
+#define DCORE1_TPC0_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_4_BASE 0x420B71Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_5_BASE 0x420B76Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_6_BASE 0x420B7BCull
+#define DCORE1_TPC0_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_7_BASE 0x420B80Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_8_BASE 0x420B85Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_9_BASE 0x420B8ACull
+#define DCORE1_TPC0_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_10_BASE 0x420B8FCull
+#define DCORE1_TPC0_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_11_BASE 0x420B94Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_12_BASE 0x420B99Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_13_BASE 0x420B9ECull
+#define DCORE1_TPC0_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_14_BASE 0x420BA3Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_TENSOR_15_BASE 0x420BA8Cull
+#define DCORE1_TPC0_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC0_CFG_QM_SYNC_OBJECT_BASE 0x420BADCull
+#define DCORE1_TPC0_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC0_CFG_QM_BASE 0x420BAE4ull
+#define DCORE1_TPC0_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC0_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC0_CFG_AXUSER_BASE 0x420BE00ull
+#define DCORE1_TPC0_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC0_CFG_SPECIAL_BASE 0x420BE80ull
+#define DCORE1_TPC0_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC0_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC0_MSTR_IF_RR_SHRD_HBW_BASE 0x420C000ull
+#define DCORE1_TPC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_TPC0_MSTR_IF_RR_PRVT_HBW_BASE 0x420C200ull
+#define DCORE1_TPC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_TPC0_MSTR_IF_RR_SHRD_LBW_BASE 0x420C400ull
+#define DCORE1_TPC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_TPC0_MSTR_IF_RR_PRVT_LBW_BASE 0x420C600ull
+#define DCORE1_TPC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_TPC0_MSTR_IF_E2E_CRDT_BASE 0x420C800ull
+#define DCORE1_TPC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_TPC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_TPC0_MSTR_IF_AXUSER_BASE 0x420CA80ull
+#define DCORE1_TPC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC0_MSTR_IF_DBG_HBW_BASE 0x420CB00ull
+#define DCORE1_TPC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC0_MSTR_IF_DBG_LBW_BASE 0x420CB80ull
+#define DCORE1_TPC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_TPC0_MSTR_IF_CORE_HBW_BASE 0x420CC00ull
+#define DCORE1_TPC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_TPC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_TPC0_MSTR_IF_CORE_LBW_BASE 0x420CD80ull
+#define DCORE1_TPC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_TPC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_TPC0_MSTR_IF_SPECIAL_BASE 0x420CE80ull
+#define DCORE1_TPC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE1_TPC1_QM_DCCM_BASE 0x4210000ull
+#define DCORE1_TPC1_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC1_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_ARC_AUX_BASE 0x4218000ull
+#define DCORE1_TPC1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE1_TPC1_QM_ARC_AUX_SPECIAL_BASE 0x4218E80ull
+#define DCORE1_TPC1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC1_QM_BASE 0x421A000ull
+#define DCORE1_TPC1_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_QM_SECTION 0x9000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x421A900ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x421A908ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x421A910ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x421A918ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x421A920ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x421A928ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x421A930ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x421A938ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x421A940ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x421A948ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x421A950ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x421A958ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x421A960ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x421A968ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x421A970ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x421A978ull
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC1_QM_AXUSER_SECURED_BASE 0x421AB00ull
+#define DCORE1_TPC1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_AXUSER_NONSECURED_BASE 0x421AB80ull
+#define DCORE1_TPC1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_DBG_HBW_BASE 0x421AC00ull
+#define DCORE1_TPC1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC1_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC1_QM_DBG_LBW_BASE 0x421AC80ull
+#define DCORE1_TPC1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC1_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC1_QM_CGM_BASE 0x421AD80ull
+#define DCORE1_TPC1_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC1_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC1_QM_SPECIAL_BASE 0x421AE80ull
+#define DCORE1_TPC1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_0_BASE 0x421B000ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC1_CFG_BASE 0x421B000ull
+#define DCORE1_TPC1_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC1_CFG_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_1_BASE 0x421B050ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_2_BASE 0x421B0A0ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_3_BASE 0x421B0F0ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_4_BASE 0x421B140ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_5_BASE 0x421B190ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_6_BASE 0x421B1E0ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_7_BASE 0x421B230ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_8_BASE 0x421B280ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_9_BASE 0x421B2D0ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_10_BASE 0x421B320ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_11_BASE 0x421B370ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_12_BASE 0x421B3C0ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_13_BASE 0x421B410ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_14_BASE 0x421B460ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_TENSOR_15_BASE 0x421B4B0ull
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_KERNEL_SYNC_OBJECT_BASE 0x421B500ull
+#define DCORE1_TPC1_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC1_CFG_KERNEL_BASE 0x421B508ull
+#define DCORE1_TPC1_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC1_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_0_BASE 0x421B5DCull
+#define DCORE1_TPC1_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_1_BASE 0x421B62Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_2_BASE 0x421B67Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_3_BASE 0x421B6CCull
+#define DCORE1_TPC1_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_4_BASE 0x421B71Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_5_BASE 0x421B76Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_6_BASE 0x421B7BCull
+#define DCORE1_TPC1_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_7_BASE 0x421B80Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_8_BASE 0x421B85Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_9_BASE 0x421B8ACull
+#define DCORE1_TPC1_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_10_BASE 0x421B8FCull
+#define DCORE1_TPC1_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_11_BASE 0x421B94Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_12_BASE 0x421B99Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_13_BASE 0x421B9ECull
+#define DCORE1_TPC1_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_14_BASE 0x421BA3Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_TENSOR_15_BASE 0x421BA8Cull
+#define DCORE1_TPC1_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC1_CFG_QM_SYNC_OBJECT_BASE 0x421BADCull
+#define DCORE1_TPC1_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC1_CFG_QM_BASE 0x421BAE4ull
+#define DCORE1_TPC1_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC1_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC1_CFG_AXUSER_BASE 0x421BE00ull
+#define DCORE1_TPC1_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC1_CFG_SPECIAL_BASE 0x421BE80ull
+#define DCORE1_TPC1_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC1_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC1_MSTR_IF_RR_SHRD_HBW_BASE 0x421C000ull
+#define DCORE1_TPC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_TPC1_MSTR_IF_RR_PRVT_HBW_BASE 0x421C200ull
+#define DCORE1_TPC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_TPC1_MSTR_IF_RR_SHRD_LBW_BASE 0x421C400ull
+#define DCORE1_TPC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_TPC1_MSTR_IF_RR_PRVT_LBW_BASE 0x421C600ull
+#define DCORE1_TPC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_TPC1_MSTR_IF_E2E_CRDT_BASE 0x421C800ull
+#define DCORE1_TPC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_TPC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_TPC1_MSTR_IF_AXUSER_BASE 0x421CA80ull
+#define DCORE1_TPC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC1_MSTR_IF_DBG_HBW_BASE 0x421CB00ull
+#define DCORE1_TPC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC1_MSTR_IF_DBG_LBW_BASE 0x421CB80ull
+#define DCORE1_TPC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_TPC1_MSTR_IF_CORE_HBW_BASE 0x421CC00ull
+#define DCORE1_TPC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_TPC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_TPC1_MSTR_IF_CORE_LBW_BASE 0x421CD80ull
+#define DCORE1_TPC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_TPC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_TPC1_MSTR_IF_SPECIAL_BASE 0x421CE80ull
+#define DCORE1_TPC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC1_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE1_TPC2_QM_DCCM_BASE 0x4220000ull
+#define DCORE1_TPC2_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC2_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_ARC_AUX_BASE 0x4228000ull
+#define DCORE1_TPC2_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE1_TPC2_QM_ARC_AUX_SPECIAL_BASE 0x4228E80ull
+#define DCORE1_TPC2_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC2_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC2_QM_BASE 0x422A000ull
+#define DCORE1_TPC2_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_QM_SECTION 0x9000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR0_BASE 0x422A900ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR1_BASE 0x422A908ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR2_BASE 0x422A910ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR3_BASE 0x422A918ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR4_BASE 0x422A920ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR5_BASE 0x422A928ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR6_BASE 0x422A930ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR7_BASE 0x422A938ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR8_BASE 0x422A940ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR9_BASE 0x422A948ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR10_BASE 0x422A950ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR11_BASE 0x422A958ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR12_BASE 0x422A960ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR13_BASE 0x422A968ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR14_BASE 0x422A970ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR15_BASE 0x422A978ull
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC2_QM_AXUSER_SECURED_BASE 0x422AB00ull
+#define DCORE1_TPC2_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_AXUSER_NONSECURED_BASE 0x422AB80ull
+#define DCORE1_TPC2_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_DBG_HBW_BASE 0x422AC00ull
+#define DCORE1_TPC2_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC2_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC2_QM_DBG_LBW_BASE 0x422AC80ull
+#define DCORE1_TPC2_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC2_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC2_QM_CGM_BASE 0x422AD80ull
+#define DCORE1_TPC2_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC2_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC2_QM_SPECIAL_BASE 0x422AE80ull
+#define DCORE1_TPC2_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC2_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_0_BASE 0x422B000ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC2_CFG_BASE 0x422B000ull
+#define DCORE1_TPC2_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC2_CFG_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_1_BASE 0x422B050ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_2_BASE 0x422B0A0ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_3_BASE 0x422B0F0ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_4_BASE 0x422B140ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_5_BASE 0x422B190ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_6_BASE 0x422B1E0ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_7_BASE 0x422B230ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_8_BASE 0x422B280ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_9_BASE 0x422B2D0ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_10_BASE 0x422B320ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_11_BASE 0x422B370ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_12_BASE 0x422B3C0ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_13_BASE 0x422B410ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_14_BASE 0x422B460ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_TENSOR_15_BASE 0x422B4B0ull
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_KERNEL_SYNC_OBJECT_BASE 0x422B500ull
+#define DCORE1_TPC2_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC2_CFG_KERNEL_BASE 0x422B508ull
+#define DCORE1_TPC2_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC2_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_0_BASE 0x422B5DCull
+#define DCORE1_TPC2_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_1_BASE 0x422B62Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_2_BASE 0x422B67Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_3_BASE 0x422B6CCull
+#define DCORE1_TPC2_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_4_BASE 0x422B71Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_5_BASE 0x422B76Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_6_BASE 0x422B7BCull
+#define DCORE1_TPC2_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_7_BASE 0x422B80Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_8_BASE 0x422B85Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_9_BASE 0x422B8ACull
+#define DCORE1_TPC2_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_10_BASE 0x422B8FCull
+#define DCORE1_TPC2_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_11_BASE 0x422B94Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_12_BASE 0x422B99Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_13_BASE 0x422B9ECull
+#define DCORE1_TPC2_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_14_BASE 0x422BA3Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_TENSOR_15_BASE 0x422BA8Cull
+#define DCORE1_TPC2_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC2_CFG_QM_SYNC_OBJECT_BASE 0x422BADCull
+#define DCORE1_TPC2_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC2_CFG_QM_BASE 0x422BAE4ull
+#define DCORE1_TPC2_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC2_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC2_CFG_AXUSER_BASE 0x422BE00ull
+#define DCORE1_TPC2_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC2_CFG_SPECIAL_BASE 0x422BE80ull
+#define DCORE1_TPC2_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC2_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC2_MSTR_IF_RR_SHRD_HBW_BASE 0x422C000ull
+#define DCORE1_TPC2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_TPC2_MSTR_IF_RR_PRVT_HBW_BASE 0x422C200ull
+#define DCORE1_TPC2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_TPC2_MSTR_IF_RR_SHRD_LBW_BASE 0x422C400ull
+#define DCORE1_TPC2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_TPC2_MSTR_IF_RR_PRVT_LBW_BASE 0x422C600ull
+#define DCORE1_TPC2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_TPC2_MSTR_IF_E2E_CRDT_BASE 0x422C800ull
+#define DCORE1_TPC2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_TPC2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_TPC2_MSTR_IF_AXUSER_BASE 0x422CA80ull
+#define DCORE1_TPC2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC2_MSTR_IF_DBG_HBW_BASE 0x422CB00ull
+#define DCORE1_TPC2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC2_MSTR_IF_DBG_LBW_BASE 0x422CB80ull
+#define DCORE1_TPC2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_TPC2_MSTR_IF_CORE_HBW_BASE 0x422CC00ull
+#define DCORE1_TPC2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_TPC2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_TPC2_MSTR_IF_CORE_LBW_BASE 0x422CD80ull
+#define DCORE1_TPC2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_TPC2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_TPC2_MSTR_IF_SPECIAL_BASE 0x422CE80ull
+#define DCORE1_TPC2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC2_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE1_TPC3_QM_DCCM_BASE 0x4230000ull
+#define DCORE1_TPC3_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC3_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_ARC_AUX_BASE 0x4238000ull
+#define DCORE1_TPC3_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE1_TPC3_QM_ARC_AUX_SPECIAL_BASE 0x4238E80ull
+#define DCORE1_TPC3_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC3_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC3_QM_BASE 0x423A000ull
+#define DCORE1_TPC3_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_QM_SECTION 0x9000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR0_BASE 0x423A900ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR1_BASE 0x423A908ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR2_BASE 0x423A910ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR3_BASE 0x423A918ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR4_BASE 0x423A920ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR5_BASE 0x423A928ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR6_BASE 0x423A930ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR7_BASE 0x423A938ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR8_BASE 0x423A940ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR9_BASE 0x423A948ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR10_BASE 0x423A950ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR11_BASE 0x423A958ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR12_BASE 0x423A960ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR13_BASE 0x423A968ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR14_BASE 0x423A970ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR15_BASE 0x423A978ull
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC3_QM_AXUSER_SECURED_BASE 0x423AB00ull
+#define DCORE1_TPC3_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_AXUSER_NONSECURED_BASE 0x423AB80ull
+#define DCORE1_TPC3_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_DBG_HBW_BASE 0x423AC00ull
+#define DCORE1_TPC3_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC3_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC3_QM_DBG_LBW_BASE 0x423AC80ull
+#define DCORE1_TPC3_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC3_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC3_QM_CGM_BASE 0x423AD80ull
+#define DCORE1_TPC3_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC3_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC3_QM_SPECIAL_BASE 0x423AE80ull
+#define DCORE1_TPC3_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC3_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_0_BASE 0x423B000ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC3_CFG_BASE 0x423B000ull
+#define DCORE1_TPC3_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC3_CFG_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_1_BASE 0x423B050ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_2_BASE 0x423B0A0ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_3_BASE 0x423B0F0ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_4_BASE 0x423B140ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_5_BASE 0x423B190ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_6_BASE 0x423B1E0ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_7_BASE 0x423B230ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_8_BASE 0x423B280ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_9_BASE 0x423B2D0ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_10_BASE 0x423B320ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_11_BASE 0x423B370ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_12_BASE 0x423B3C0ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_13_BASE 0x423B410ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_14_BASE 0x423B460ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_TENSOR_15_BASE 0x423B4B0ull
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_KERNEL_SYNC_OBJECT_BASE 0x423B500ull
+#define DCORE1_TPC3_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC3_CFG_KERNEL_BASE 0x423B508ull
+#define DCORE1_TPC3_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC3_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_0_BASE 0x423B5DCull
+#define DCORE1_TPC3_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_1_BASE 0x423B62Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_2_BASE 0x423B67Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_3_BASE 0x423B6CCull
+#define DCORE1_TPC3_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_4_BASE 0x423B71Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_5_BASE 0x423B76Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_6_BASE 0x423B7BCull
+#define DCORE1_TPC3_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_7_BASE 0x423B80Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_8_BASE 0x423B85Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_9_BASE 0x423B8ACull
+#define DCORE1_TPC3_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_10_BASE 0x423B8FCull
+#define DCORE1_TPC3_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_11_BASE 0x423B94Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_12_BASE 0x423B99Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_13_BASE 0x423B9ECull
+#define DCORE1_TPC3_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_14_BASE 0x423BA3Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_TENSOR_15_BASE 0x423BA8Cull
+#define DCORE1_TPC3_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC3_CFG_QM_SYNC_OBJECT_BASE 0x423BADCull
+#define DCORE1_TPC3_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC3_CFG_QM_BASE 0x423BAE4ull
+#define DCORE1_TPC3_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC3_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC3_CFG_AXUSER_BASE 0x423BE00ull
+#define DCORE1_TPC3_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC3_CFG_SPECIAL_BASE 0x423BE80ull
+#define DCORE1_TPC3_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC3_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC3_MSTR_IF_RR_SHRD_HBW_BASE 0x423C000ull
+#define DCORE1_TPC3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_TPC3_MSTR_IF_RR_PRVT_HBW_BASE 0x423C200ull
+#define DCORE1_TPC3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_TPC3_MSTR_IF_RR_SHRD_LBW_BASE 0x423C400ull
+#define DCORE1_TPC3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_TPC3_MSTR_IF_RR_PRVT_LBW_BASE 0x423C600ull
+#define DCORE1_TPC3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_TPC3_MSTR_IF_E2E_CRDT_BASE 0x423C800ull
+#define DCORE1_TPC3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_TPC3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_TPC3_MSTR_IF_AXUSER_BASE 0x423CA80ull
+#define DCORE1_TPC3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC3_MSTR_IF_DBG_HBW_BASE 0x423CB00ull
+#define DCORE1_TPC3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC3_MSTR_IF_DBG_LBW_BASE 0x423CB80ull
+#define DCORE1_TPC3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_TPC3_MSTR_IF_CORE_HBW_BASE 0x423CC00ull
+#define DCORE1_TPC3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_TPC3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_TPC3_MSTR_IF_CORE_LBW_BASE 0x423CD80ull
+#define DCORE1_TPC3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_TPC3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_TPC3_MSTR_IF_SPECIAL_BASE 0x423CE80ull
+#define DCORE1_TPC3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC3_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE1_TPC4_QM_DCCM_BASE 0x4240000ull
+#define DCORE1_TPC4_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC4_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_ARC_AUX_BASE 0x4248000ull
+#define DCORE1_TPC4_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE1_TPC4_QM_ARC_AUX_SPECIAL_BASE 0x4248E80ull
+#define DCORE1_TPC4_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC4_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC4_QM_BASE 0x424A000ull
+#define DCORE1_TPC4_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_QM_SECTION 0x9000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR0_BASE 0x424A900ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR1_BASE 0x424A908ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR2_BASE 0x424A910ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR3_BASE 0x424A918ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR4_BASE 0x424A920ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR5_BASE 0x424A928ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR6_BASE 0x424A930ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR7_BASE 0x424A938ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR8_BASE 0x424A940ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR9_BASE 0x424A948ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR10_BASE 0x424A950ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR11_BASE 0x424A958ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR12_BASE 0x424A960ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR13_BASE 0x424A968ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR14_BASE 0x424A970ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR15_BASE 0x424A978ull
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC4_QM_AXUSER_SECURED_BASE 0x424AB00ull
+#define DCORE1_TPC4_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_AXUSER_NONSECURED_BASE 0x424AB80ull
+#define DCORE1_TPC4_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_DBG_HBW_BASE 0x424AC00ull
+#define DCORE1_TPC4_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC4_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC4_QM_DBG_LBW_BASE 0x424AC80ull
+#define DCORE1_TPC4_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC4_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC4_QM_CGM_BASE 0x424AD80ull
+#define DCORE1_TPC4_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC4_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC4_QM_SPECIAL_BASE 0x424AE80ull
+#define DCORE1_TPC4_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC4_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_0_BASE 0x424B000ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC4_CFG_BASE 0x424B000ull
+#define DCORE1_TPC4_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC4_CFG_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_1_BASE 0x424B050ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_2_BASE 0x424B0A0ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_3_BASE 0x424B0F0ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_4_BASE 0x424B140ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_5_BASE 0x424B190ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_6_BASE 0x424B1E0ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_7_BASE 0x424B230ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_8_BASE 0x424B280ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_9_BASE 0x424B2D0ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_10_BASE 0x424B320ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_11_BASE 0x424B370ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_12_BASE 0x424B3C0ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_13_BASE 0x424B410ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_14_BASE 0x424B460ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_TENSOR_15_BASE 0x424B4B0ull
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_KERNEL_SYNC_OBJECT_BASE 0x424B500ull
+#define DCORE1_TPC4_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC4_CFG_KERNEL_BASE 0x424B508ull
+#define DCORE1_TPC4_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC4_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_0_BASE 0x424B5DCull
+#define DCORE1_TPC4_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_1_BASE 0x424B62Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_2_BASE 0x424B67Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_3_BASE 0x424B6CCull
+#define DCORE1_TPC4_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_4_BASE 0x424B71Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_5_BASE 0x424B76Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_6_BASE 0x424B7BCull
+#define DCORE1_TPC4_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_7_BASE 0x424B80Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_8_BASE 0x424B85Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_9_BASE 0x424B8ACull
+#define DCORE1_TPC4_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_10_BASE 0x424B8FCull
+#define DCORE1_TPC4_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_11_BASE 0x424B94Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_12_BASE 0x424B99Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_13_BASE 0x424B9ECull
+#define DCORE1_TPC4_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_14_BASE 0x424BA3Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_TENSOR_15_BASE 0x424BA8Cull
+#define DCORE1_TPC4_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC4_CFG_QM_SYNC_OBJECT_BASE 0x424BADCull
+#define DCORE1_TPC4_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC4_CFG_QM_BASE 0x424BAE4ull
+#define DCORE1_TPC4_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC4_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC4_CFG_AXUSER_BASE 0x424BE00ull
+#define DCORE1_TPC4_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC4_CFG_SPECIAL_BASE 0x424BE80ull
+#define DCORE1_TPC4_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC4_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC4_MSTR_IF_RR_SHRD_HBW_BASE 0x424C000ull
+#define DCORE1_TPC4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_TPC4_MSTR_IF_RR_PRVT_HBW_BASE 0x424C200ull
+#define DCORE1_TPC4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_TPC4_MSTR_IF_RR_SHRD_LBW_BASE 0x424C400ull
+#define DCORE1_TPC4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_TPC4_MSTR_IF_RR_PRVT_LBW_BASE 0x424C600ull
+#define DCORE1_TPC4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_TPC4_MSTR_IF_E2E_CRDT_BASE 0x424C800ull
+#define DCORE1_TPC4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_TPC4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_TPC4_MSTR_IF_AXUSER_BASE 0x424CA80ull
+#define DCORE1_TPC4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC4_MSTR_IF_DBG_HBW_BASE 0x424CB00ull
+#define DCORE1_TPC4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC4_MSTR_IF_DBG_LBW_BASE 0x424CB80ull
+#define DCORE1_TPC4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_TPC4_MSTR_IF_CORE_HBW_BASE 0x424CC00ull
+#define DCORE1_TPC4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_TPC4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_TPC4_MSTR_IF_CORE_LBW_BASE 0x424CD80ull
+#define DCORE1_TPC4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_TPC4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_TPC4_MSTR_IF_SPECIAL_BASE 0x424CE80ull
+#define DCORE1_TPC4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC4_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE1_TPC5_QM_DCCM_BASE 0x4250000ull
+#define DCORE1_TPC5_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_TPC5_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_ARC_AUX_BASE 0x4258000ull
+#define DCORE1_TPC5_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE1_TPC5_QM_ARC_AUX_SPECIAL_BASE 0x4258E80ull
+#define DCORE1_TPC5_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC5_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TPC5_QM_BASE 0x425A000ull
+#define DCORE1_TPC5_QM_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_QM_SECTION 0x9000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR0_BASE 0x425A900ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR1_BASE 0x425A908ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR2_BASE 0x425A910ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR3_BASE 0x425A918ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR4_BASE 0x425A920ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR5_BASE 0x425A928ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR6_BASE 0x425A930ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR7_BASE 0x425A938ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR8_BASE 0x425A940ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR9_BASE 0x425A948ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR10_BASE 0x425A950ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR11_BASE 0x425A958ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR12_BASE 0x425A960ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR13_BASE 0x425A968ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR14_BASE 0x425A970ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR15_BASE 0x425A978ull
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_TPC5_QM_AXUSER_SECURED_BASE 0x425AB00ull
+#define DCORE1_TPC5_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_AXUSER_NONSECURED_BASE 0x425AB80ull
+#define DCORE1_TPC5_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_DBG_HBW_BASE 0x425AC00ull
+#define DCORE1_TPC5_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC5_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC5_QM_DBG_LBW_BASE 0x425AC80ull
+#define DCORE1_TPC5_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC5_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_TPC5_QM_CGM_BASE 0x425AD80ull
+#define DCORE1_TPC5_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_TPC5_QM_CGM_SECTION 0x1000
+#define mmDCORE1_TPC5_QM_SPECIAL_BASE 0x425AE80ull
+#define DCORE1_TPC5_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC5_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_0_BASE 0x425B000ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC5_CFG_BASE 0x425B000ull
+#define DCORE1_TPC5_CFG_MAX_OFFSET 0x1000
+#define DCORE1_TPC5_CFG_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_1_BASE 0x425B050ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_2_BASE 0x425B0A0ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_3_BASE 0x425B0F0ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_4_BASE 0x425B140ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_5_BASE 0x425B190ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_6_BASE 0x425B1E0ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_7_BASE 0x425B230ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_8_BASE 0x425B280ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_9_BASE 0x425B2D0ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_10_BASE 0x425B320ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_11_BASE 0x425B370ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_12_BASE 0x425B3C0ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_13_BASE 0x425B410ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_14_BASE 0x425B460ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_TENSOR_15_BASE 0x425B4B0ull
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_KERNEL_SYNC_OBJECT_BASE 0x425B500ull
+#define DCORE1_TPC5_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC5_CFG_KERNEL_BASE 0x425B508ull
+#define DCORE1_TPC5_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE1_TPC5_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_0_BASE 0x425B5DCull
+#define DCORE1_TPC5_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_1_BASE 0x425B62Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_2_BASE 0x425B67Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_3_BASE 0x425B6CCull
+#define DCORE1_TPC5_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_4_BASE 0x425B71Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_5_BASE 0x425B76Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_6_BASE 0x425B7BCull
+#define DCORE1_TPC5_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_7_BASE 0x425B80Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_8_BASE 0x425B85Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_9_BASE 0x425B8ACull
+#define DCORE1_TPC5_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_10_BASE 0x425B8FCull
+#define DCORE1_TPC5_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_11_BASE 0x425B94Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_12_BASE 0x425B99Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_13_BASE 0x425B9ECull
+#define DCORE1_TPC5_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_14_BASE 0x425BA3Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_TENSOR_15_BASE 0x425BA8Cull
+#define DCORE1_TPC5_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE1_TPC5_CFG_QM_SYNC_OBJECT_BASE 0x425BADCull
+#define DCORE1_TPC5_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE1_TPC5_CFG_QM_BASE 0x425BAE4ull
+#define DCORE1_TPC5_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE1_TPC5_CFG_QM_SECTION 0x31C0
+#define mmDCORE1_TPC5_CFG_AXUSER_BASE 0x425BE00ull
+#define DCORE1_TPC5_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC5_CFG_SPECIAL_BASE 0x425BE80ull
+#define DCORE1_TPC5_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC5_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC5_MSTR_IF_RR_SHRD_HBW_BASE 0x425C000ull
+#define DCORE1_TPC5_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC5_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_TPC5_MSTR_IF_RR_PRVT_HBW_BASE 0x425C200ull
+#define DCORE1_TPC5_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_TPC5_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_TPC5_MSTR_IF_RR_SHRD_LBW_BASE 0x425C400ull
+#define DCORE1_TPC5_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC5_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_TPC5_MSTR_IF_RR_PRVT_LBW_BASE 0x425C600ull
+#define DCORE1_TPC5_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_TPC5_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_TPC5_MSTR_IF_E2E_CRDT_BASE 0x425C800ull
+#define DCORE1_TPC5_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_TPC5_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_TPC5_MSTR_IF_AXUSER_BASE 0x425CA80ull
+#define DCORE1_TPC5_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_TPC5_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_TPC5_MSTR_IF_DBG_HBW_BASE 0x425CB00ull
+#define DCORE1_TPC5_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC5_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_TPC5_MSTR_IF_DBG_LBW_BASE 0x425CB80ull
+#define DCORE1_TPC5_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_TPC5_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_TPC5_MSTR_IF_CORE_HBW_BASE 0x425CC00ull
+#define DCORE1_TPC5_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_TPC5_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_TPC5_MSTR_IF_CORE_LBW_BASE 0x425CD80ull
+#define DCORE1_TPC5_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_TPC5_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_TPC5_MSTR_IF_SPECIAL_BASE 0x425CE80ull
+#define DCORE1_TPC5_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC5_MSTR_IF_SPECIAL_SECTION 0x23180
+#define mmDCORE1_HMMU0_MMU_BASE 0x4280000ull
+#define DCORE1_HMMU0_MMU_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_MMU_SECTION 0xE800
+#define mmDCORE1_HMMU0_MMU_SPECIAL_BASE 0x4280E80ull
+#define DCORE1_HMMU0_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU0_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HMMU0_STLB_BASE 0x4281000ull
+#define DCORE1_HMMU0_STLB_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_STLB_SECTION 0xE800
+#define mmDCORE1_HMMU0_STLB_SPECIAL_BASE 0x4281E80ull
+#define DCORE1_HMMU0_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU0_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE1_HMMU0_SCRAMB_OUT_BASE 0x4283000ull
+#define DCORE1_HMMU0_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE1_HMMU0_SCRAMB_OUT_SPECIAL_BASE 0x4283E80ull
+#define DCORE1_HMMU0_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU0_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HMMU0_MSTR_IF_RR_SHRD_HBW_BASE 0x4284000ull
+#define DCORE1_HMMU0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_HMMU0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_HMMU0_MSTR_IF_RR_PRVT_HBW_BASE 0x4284200ull
+#define DCORE1_HMMU0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_HMMU0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_HMMU0_MSTR_IF_RR_SHRD_LBW_BASE 0x4284400ull
+#define DCORE1_HMMU0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_HMMU0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_HMMU0_MSTR_IF_RR_PRVT_LBW_BASE 0x4284600ull
+#define DCORE1_HMMU0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_HMMU0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_HMMU0_MSTR_IF_E2E_CRDT_BASE 0x4284800ull
+#define DCORE1_HMMU0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_HMMU0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_HMMU0_MSTR_IF_AXUSER_BASE 0x4284A80ull
+#define DCORE1_HMMU0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_HMMU0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_HMMU0_MSTR_IF_DBG_HBW_BASE 0x4284B00ull
+#define DCORE1_HMMU0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_HMMU0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_HMMU0_MSTR_IF_DBG_LBW_BASE 0x4284B80ull
+#define DCORE1_HMMU0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_HMMU0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_HMMU0_MSTR_IF_CORE_HBW_BASE 0x4284C00ull
+#define DCORE1_HMMU0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_HMMU0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_HMMU0_MSTR_IF_CORE_LBW_BASE 0x4284D80ull
+#define DCORE1_HMMU0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_HMMU0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_HMMU0_MSTR_IF_SPECIAL_BASE 0x4284E80ull
+#define DCORE1_HMMU0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU0_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE1_HMMU1_MMU_BASE 0x4290000ull
+#define DCORE1_HMMU1_MMU_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_MMU_SECTION 0xE800
+#define mmDCORE1_HMMU1_MMU_SPECIAL_BASE 0x4290E80ull
+#define DCORE1_HMMU1_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU1_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HMMU1_STLB_BASE 0x4291000ull
+#define DCORE1_HMMU1_STLB_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_STLB_SECTION 0xE800
+#define mmDCORE1_HMMU1_STLB_SPECIAL_BASE 0x4291E80ull
+#define DCORE1_HMMU1_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU1_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE1_HMMU1_SCRAMB_OUT_BASE 0x4293000ull
+#define DCORE1_HMMU1_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE1_HMMU1_SCRAMB_OUT_SPECIAL_BASE 0x4293E80ull
+#define DCORE1_HMMU1_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU1_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HMMU1_MSTR_IF_RR_SHRD_HBW_BASE 0x4294000ull
+#define DCORE1_HMMU1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_HMMU1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_HMMU1_MSTR_IF_RR_PRVT_HBW_BASE 0x4294200ull
+#define DCORE1_HMMU1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_HMMU1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_HMMU1_MSTR_IF_RR_SHRD_LBW_BASE 0x4294400ull
+#define DCORE1_HMMU1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_HMMU1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_HMMU1_MSTR_IF_RR_PRVT_LBW_BASE 0x4294600ull
+#define DCORE1_HMMU1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_HMMU1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_HMMU1_MSTR_IF_E2E_CRDT_BASE 0x4294800ull
+#define DCORE1_HMMU1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_HMMU1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_HMMU1_MSTR_IF_AXUSER_BASE 0x4294A80ull
+#define DCORE1_HMMU1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_HMMU1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_HMMU1_MSTR_IF_DBG_HBW_BASE 0x4294B00ull
+#define DCORE1_HMMU1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_HMMU1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_HMMU1_MSTR_IF_DBG_LBW_BASE 0x4294B80ull
+#define DCORE1_HMMU1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_HMMU1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_HMMU1_MSTR_IF_CORE_HBW_BASE 0x4294C00ull
+#define DCORE1_HMMU1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_HMMU1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_HMMU1_MSTR_IF_CORE_LBW_BASE 0x4294D80ull
+#define DCORE1_HMMU1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_HMMU1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_HMMU1_MSTR_IF_SPECIAL_BASE 0x4294E80ull
+#define DCORE1_HMMU1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU1_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE1_HMMU2_MMU_BASE 0x42A0000ull
+#define DCORE1_HMMU2_MMU_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_MMU_SECTION 0xE800
+#define mmDCORE1_HMMU2_MMU_SPECIAL_BASE 0x42A0E80ull
+#define DCORE1_HMMU2_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU2_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HMMU2_STLB_BASE 0x42A1000ull
+#define DCORE1_HMMU2_STLB_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_STLB_SECTION 0xE800
+#define mmDCORE1_HMMU2_STLB_SPECIAL_BASE 0x42A1E80ull
+#define DCORE1_HMMU2_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU2_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE1_HMMU2_SCRAMB_OUT_BASE 0x42A3000ull
+#define DCORE1_HMMU2_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE1_HMMU2_SCRAMB_OUT_SPECIAL_BASE 0x42A3E80ull
+#define DCORE1_HMMU2_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU2_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HMMU2_MSTR_IF_RR_SHRD_HBW_BASE 0x42A4000ull
+#define DCORE1_HMMU2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_HMMU2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_HMMU2_MSTR_IF_RR_PRVT_HBW_BASE 0x42A4200ull
+#define DCORE1_HMMU2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_HMMU2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_HMMU2_MSTR_IF_RR_SHRD_LBW_BASE 0x42A4400ull
+#define DCORE1_HMMU2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_HMMU2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_HMMU2_MSTR_IF_RR_PRVT_LBW_BASE 0x42A4600ull
+#define DCORE1_HMMU2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_HMMU2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_HMMU2_MSTR_IF_E2E_CRDT_BASE 0x42A4800ull
+#define DCORE1_HMMU2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_HMMU2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_HMMU2_MSTR_IF_AXUSER_BASE 0x42A4A80ull
+#define DCORE1_HMMU2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_HMMU2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_HMMU2_MSTR_IF_DBG_HBW_BASE 0x42A4B00ull
+#define DCORE1_HMMU2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_HMMU2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_HMMU2_MSTR_IF_DBG_LBW_BASE 0x42A4B80ull
+#define DCORE1_HMMU2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_HMMU2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_HMMU2_MSTR_IF_CORE_HBW_BASE 0x42A4C00ull
+#define DCORE1_HMMU2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_HMMU2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_HMMU2_MSTR_IF_CORE_LBW_BASE 0x42A4D80ull
+#define DCORE1_HMMU2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_HMMU2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_HMMU2_MSTR_IF_SPECIAL_BASE 0x42A4E80ull
+#define DCORE1_HMMU2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU2_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE1_HMMU3_MMU_BASE 0x42B0000ull
+#define DCORE1_HMMU3_MMU_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_MMU_SECTION 0xE800
+#define mmDCORE1_HMMU3_MMU_SPECIAL_BASE 0x42B0E80ull
+#define DCORE1_HMMU3_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU3_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HMMU3_STLB_BASE 0x42B1000ull
+#define DCORE1_HMMU3_STLB_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_STLB_SECTION 0xE800
+#define mmDCORE1_HMMU3_STLB_SPECIAL_BASE 0x42B1E80ull
+#define DCORE1_HMMU3_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU3_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE1_HMMU3_SCRAMB_OUT_BASE 0x42B3000ull
+#define DCORE1_HMMU3_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE1_HMMU3_SCRAMB_OUT_SPECIAL_BASE 0x42B3E80ull
+#define DCORE1_HMMU3_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU3_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HMMU3_MSTR_IF_RR_SHRD_HBW_BASE 0x42B4000ull
+#define DCORE1_HMMU3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_HMMU3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_HMMU3_MSTR_IF_RR_PRVT_HBW_BASE 0x42B4200ull
+#define DCORE1_HMMU3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_HMMU3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_HMMU3_MSTR_IF_RR_SHRD_LBW_BASE 0x42B4400ull
+#define DCORE1_HMMU3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_HMMU3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_HMMU3_MSTR_IF_RR_PRVT_LBW_BASE 0x42B4600ull
+#define DCORE1_HMMU3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_HMMU3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_HMMU3_MSTR_IF_E2E_CRDT_BASE 0x42B4800ull
+#define DCORE1_HMMU3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_HMMU3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_HMMU3_MSTR_IF_AXUSER_BASE 0x42B4A80ull
+#define DCORE1_HMMU3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_HMMU3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_HMMU3_MSTR_IF_DBG_HBW_BASE 0x42B4B00ull
+#define DCORE1_HMMU3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_HMMU3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_HMMU3_MSTR_IF_DBG_LBW_BASE 0x42B4B80ull
+#define DCORE1_HMMU3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_HMMU3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_HMMU3_MSTR_IF_CORE_HBW_BASE 0x42B4C00ull
+#define DCORE1_HMMU3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_HMMU3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_HMMU3_MSTR_IF_CORE_LBW_BASE 0x42B4D80ull
+#define DCORE1_HMMU3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_HMMU3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_HMMU3_MSTR_IF_SPECIAL_BASE 0x42B4E80ull
+#define DCORE1_HMMU3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HMMU3_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE1_MME_QM_ARC_DCCM_BASE 0x42C0000ull
+#define DCORE1_MME_QM_ARC_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_MME_QM_ARC_DCCM_SECTION 0x8000
+#define mmDCORE1_MME_QM_ARC_AUX_BASE 0x42C8000ull
+#define DCORE1_MME_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE1_MME_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE1_MME_QM_ARC_AUX_SPECIAL_BASE 0x42C8E80ull
+#define DCORE1_MME_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_QM_ARC_AUX_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_QM_ARC_DUP_ENG_BASE 0x42C9000ull
+#define DCORE1_MME_QM_ARC_DUP_ENG_MAX_OFFSET 0x1000
+#define DCORE1_MME_QM_ARC_DUP_ENG_SECTION 0x9000
+#define mmDCORE1_MME_QM_ARC_DUP_ENG_AXUSER_BASE 0x42C9900ull
+#define DCORE1_MME_QM_ARC_DUP_ENG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_QM_ARC_DUP_ENG_AXUSER_SECTION 0x5800
+#define mmDCORE1_MME_QM_ARC_DUP_ENG_SPECIAL_BASE 0x42C9E80ull
+#define DCORE1_MME_QM_ARC_DUP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_QM_ARC_DUP_ENG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_QM_BASE 0x42CA000ull
+#define DCORE1_MME_QM_MAX_OFFSET 0x1000
+#define DCORE1_MME_QM_SECTION 0x9000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR0_BASE 0x42CA900ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR1_BASE 0x42CA908ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR2_BASE 0x42CA910ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR3_BASE 0x42CA918ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR4_BASE 0x42CA920ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR5_BASE 0x42CA928ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR6_BASE 0x42CA930ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR7_BASE 0x42CA938ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR8_BASE 0x42CA940ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR9_BASE 0x42CA948ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR10_BASE 0x42CA950ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR11_BASE 0x42CA958ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR12_BASE 0x42CA960ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR13_BASE 0x42CA968ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR14_BASE 0x42CA970ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_MME_QM_QMAN_WR64_BASE_ADDR15_BASE 0x42CA978ull
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_MME_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_MME_QM_AXUSER_SECURED_BASE 0x42CAB00ull
+#define DCORE1_MME_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_MME_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_MME_QM_AXUSER_NONSECURED_BASE 0x42CAB80ull
+#define DCORE1_MME_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_MME_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_MME_QM_DBG_HBW_BASE 0x42CAC00ull
+#define DCORE1_MME_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_MME_QM_DBG_LBW_BASE 0x42CAC80ull
+#define DCORE1_MME_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_MME_QM_CGM_BASE 0x42CAD80ull
+#define DCORE1_MME_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_MME_QM_CGM_SECTION 0x1000
+#define mmDCORE1_MME_QM_SPECIAL_BASE 0x42CAE80ull
+#define DCORE1_MME_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_LO_BASE 0x42CB000ull
+#define DCORE1_MME_CTRL_LO_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_LO_SECTION 0x8000
+#define mmDCORE1_MME_CTRL_LO_ARCH_BASE_ADDR_BASE 0x42CB008ull
+#define DCORE1_MME_CTRL_LO_ARCH_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE1_MME_CTRL_LO_ARCH_BASE_ADDR_SECTION 0x2000
+#define mmDCORE1_MME_CTRL_LO_ARCH_NON_TENSOR_START_BASE 0x42CB028ull
+#define DCORE1_MME_CTRL_LO_ARCH_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE1_MME_CTRL_LO_ARCH_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_LO_ARCH_TENSOR_A_BASE 0x42CB040ull
+#define DCORE1_MME_CTRL_LO_ARCH_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_LO_ARCH_TENSOR_A_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_LO_ARCH_TENSOR_B_BASE 0x42CB098ull
+#define DCORE1_MME_CTRL_LO_ARCH_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_LO_ARCH_TENSOR_B_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_LO_ARCH_TENSOR_COUT_BASE 0x42CB0F0ull
+#define DCORE1_MME_CTRL_LO_ARCH_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_LO_ARCH_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_BASE 0x42CB15Cull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_BASE 0x42CB170ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_BASE 0x42CB184ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_BASE 0x42CB198ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_BASE 0x42CB1ACull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_BASE 0x42CB1C0ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_BASE 0x42CB1D4ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_BASE 0x42CB1E8ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_BASE 0x42CB1FCull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_BASE 0x42CB210ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_BASE 0x42CB22Cull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_BASE 0x42CB240ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_BASE 0x42CB254ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_BASE 0x42CB268ull
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_LO_ARCH_NON_TENSOR_END_BASE 0x42CB280ull
+#define DCORE1_MME_CTRL_LO_ARCH_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE1_MME_CTRL_LO_ARCH_NON_TENSOR_END_SECTION 0xB800
+#define mmDCORE1_MME_CTRL_LO_MME_AXUSER_BASE 0x42CBE00ull
+#define DCORE1_MME_CTRL_LO_MME_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_CTRL_LO_MME_AXUSER_SECTION 0x8000
+#define mmDCORE1_MME_CTRL_LO_SPECIAL_BASE 0x42CBE80ull
+#define DCORE1_MME_CTRL_LO_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_CTRL_LO_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_HI_BASE 0x42CC000ull
+#define DCORE1_MME_CTRL_HI_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_HI_SECTION 0x8000
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_BASE_ADDR_BASE 0x42CC008ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE1_MME_CTRL_HI_SHADOW_0_BASE_ADDR_SECTION 0x2000
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_BASE 0x42CC028ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE1_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_TENSOR_A_BASE 0x42CC040ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_0_TENSOR_A_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_TENSOR_B_BASE 0x42CC098ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_0_TENSOR_B_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_BASE 0x42CC0F0ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_BASE 0x42CC15Cull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_BASE 0x42CC170ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_BASE 0x42CC184ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_BASE 0x42CC198ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_BASE 0x42CC1ACull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_BASE 0x42CC1C0ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_BASE 0x42CC1D4ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_BASE 0x42CC1E8ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_BASE 0x42CC1FCull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_BASE 0x42CC210ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_BASE 0x42CC22Cull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_BASE 0x42CC240ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_BASE 0x42CC254ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_BASE 0x42CC268ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_BASE 0x42CC280ull
+#define DCORE1_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE1_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_BASE_ADDR_BASE 0x42CC308ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE1_MME_CTRL_HI_SHADOW_1_BASE_ADDR_SECTION 0x2000
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_BASE 0x42CC328ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE1_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_TENSOR_A_BASE 0x42CC340ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_1_TENSOR_A_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_TENSOR_B_BASE 0x42CC398ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_1_TENSOR_B_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_BASE 0x42CC3F0ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_BASE 0x42CC45Cull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_BASE 0x42CC470ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_BASE 0x42CC484ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_BASE 0x42CC498ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_BASE 0x42CC4ACull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_BASE 0x42CC4C0ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_BASE 0x42CC4D4ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_BASE 0x42CC4E8ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_BASE 0x42CC4FCull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_BASE 0x42CC510ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_BASE 0x42CC52Cull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_BASE 0x42CC540ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_BASE 0x42CC554ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_BASE 0x42CC568ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_BASE 0x42CC580ull
+#define DCORE1_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE1_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_BASE_ADDR_BASE 0x42CC608ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE1_MME_CTRL_HI_SHADOW_2_BASE_ADDR_SECTION 0x2000
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_BASE 0x42CC628ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE1_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_TENSOR_A_BASE 0x42CC640ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_2_TENSOR_A_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_TENSOR_B_BASE 0x42CC698ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_2_TENSOR_B_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_BASE 0x42CC6F0ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_BASE 0x42CC75Cull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_BASE 0x42CC770ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_BASE 0x42CC784ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_BASE 0x42CC798ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_BASE 0x42CC7ACull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_BASE 0x42CC7C0ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_BASE 0x42CC7D4ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_BASE 0x42CC7E8ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_BASE 0x42CC7FCull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_BASE 0x42CC810ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_BASE 0x42CC82Cull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_BASE 0x42CC840ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_BASE 0x42CC854ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_BASE 0x42CC868ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_BASE 0x42CC880ull
+#define DCORE1_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE1_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_BASE_ADDR_BASE 0x42CC908ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE1_MME_CTRL_HI_SHADOW_3_BASE_ADDR_SECTION 0x2000
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_BASE 0x42CC928ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE1_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_TENSOR_A_BASE 0x42CC940ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_3_TENSOR_A_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_TENSOR_B_BASE 0x42CC998ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_3_TENSOR_B_SECTION 0x5800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_BASE 0x42CC9F0ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_BASE 0x42CCA5Cull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_BASE 0x42CCA70ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_BASE 0x42CCA84ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_BASE 0x42CCA98ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_BASE 0x42CCAACull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_BASE 0x42CCAC0ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_BASE 0x42CCAD4ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_BASE 0x42CCAE8ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_BASE 0x42CCAFCull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_BASE 0x42CCB10ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_BASE 0x42CCB2Cull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_BASE 0x42CCB40ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_BASE 0x42CCB54ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_BASE 0x42CCB68ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_BASE 0x42CCB80ull
+#define DCORE1_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE1_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_SECTION 0x3000
+#define mmDCORE1_MME_CTRL_HI_SPECIAL_BASE 0x42CCE80ull
+#define DCORE1_MME_CTRL_HI_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_CTRL_HI_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_EU_BIST_BASE 0x42CD000ull
+#define DCORE1_MME_EU_BIST_MAX_OFFSET 0x1000
+#define DCORE1_MME_EU_BIST_SECTION 0xE800
+#define mmDCORE1_MME_EU_BIST_SPECIAL_BASE 0x42CDE80ull
+#define DCORE1_MME_EU_BIST_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_EU_BIST_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_MSTR_IF_RR_SHRD_HBW_BASE 0x42CE000ull
+#define DCORE1_MME_CTRL_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_CTRL_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_MME_CTRL_MSTR_IF_RR_PRVT_HBW_BASE 0x42CE200ull
+#define DCORE1_MME_CTRL_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_CTRL_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_MME_CTRL_MSTR_IF_RR_SHRD_LBW_BASE 0x42CE400ull
+#define DCORE1_MME_CTRL_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_CTRL_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_MME_CTRL_MSTR_IF_RR_PRVT_LBW_BASE 0x42CE600ull
+#define DCORE1_MME_CTRL_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_CTRL_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_MME_CTRL_MSTR_IF_E2E_CRDT_BASE 0x42CE800ull
+#define DCORE1_MME_CTRL_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_MME_CTRL_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_MME_CTRL_MSTR_IF_AXUSER_BASE 0x42CEA80ull
+#define DCORE1_MME_CTRL_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_CTRL_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_MME_CTRL_MSTR_IF_DBG_HBW_BASE 0x42CEB00ull
+#define DCORE1_MME_CTRL_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_MME_CTRL_MSTR_IF_DBG_LBW_BASE 0x42CEB80ull
+#define DCORE1_MME_CTRL_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_CTRL_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_MME_CTRL_MSTR_IF_CORE_HBW_BASE 0x42CEC00ull
+#define DCORE1_MME_CTRL_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_MME_CTRL_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_MME_CTRL_MSTR_IF_CORE_LBW_BASE 0x42CED80ull
+#define DCORE1_MME_CTRL_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_MME_CTRL_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_MSTR_IF_SPECIAL_BASE 0x42CEE80ull
+#define DCORE1_MME_CTRL_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_CTRL_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_QM_ARC_ACP_ENG_BASE 0x42CF000ull
+#define DCORE1_MME_QM_ARC_ACP_ENG_MAX_OFFSET 0x1000
+#define DCORE1_MME_QM_ARC_ACP_ENG_SECTION 0xE800
+#define mmDCORE1_MME_QM_ARC_ACP_ENG_SPECIAL_BASE 0x42CFE80ull
+#define DCORE1_MME_QM_ARC_ACP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_QM_ARC_ACP_ENG_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_SBTE0_BASE 0x42D0000ull
+#define DCORE1_MME_SBTE0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE0_SECTION 0xE800
+#define mmDCORE1_MME_SBTE0_SPECIAL_BASE 0x42D0E80ull
+#define DCORE1_MME_SBTE0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE0_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_BASE 0x42D1000ull
+#define DCORE1_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_BASE 0x42D1200ull
+#define DCORE1_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_BASE 0x42D1400ull
+#define DCORE1_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_BASE 0x42D1600ull
+#define DCORE1_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE0_MSTR_IF_E2E_CRDT_BASE 0x42D1800ull
+#define DCORE1_MME_SBTE0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_MME_SBTE0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_MME_SBTE0_MSTR_IF_AXUSER_BASE 0x42D1A80ull
+#define DCORE1_MME_SBTE0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_SBTE0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_MME_SBTE0_MSTR_IF_DBG_HBW_BASE 0x42D1B00ull
+#define DCORE1_MME_SBTE0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE0_MSTR_IF_DBG_LBW_BASE 0x42D1B80ull
+#define DCORE1_MME_SBTE0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE0_MSTR_IF_CORE_HBW_BASE 0x42D1C00ull
+#define DCORE1_MME_SBTE0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_MME_SBTE0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_MME_SBTE0_MSTR_IF_CORE_LBW_BASE 0x42D1D80ull
+#define DCORE1_MME_SBTE0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_MME_SBTE0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_MME_SBTE0_MSTR_IF_SPECIAL_BASE 0x42D1E80ull
+#define DCORE1_MME_SBTE0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE0_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE1_MME_SBTE1_BASE 0x42D8000ull
+#define DCORE1_MME_SBTE1_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE1_SECTION 0xE800
+#define mmDCORE1_MME_SBTE1_SPECIAL_BASE 0x42D8E80ull
+#define DCORE1_MME_SBTE1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE1_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_BASE 0x42D9000ull
+#define DCORE1_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_BASE 0x42D9200ull
+#define DCORE1_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_BASE 0x42D9400ull
+#define DCORE1_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_BASE 0x42D9600ull
+#define DCORE1_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE1_MSTR_IF_E2E_CRDT_BASE 0x42D9800ull
+#define DCORE1_MME_SBTE1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_MME_SBTE1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_MME_SBTE1_MSTR_IF_AXUSER_BASE 0x42D9A80ull
+#define DCORE1_MME_SBTE1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_SBTE1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_MME_SBTE1_MSTR_IF_DBG_HBW_BASE 0x42D9B00ull
+#define DCORE1_MME_SBTE1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE1_MSTR_IF_DBG_LBW_BASE 0x42D9B80ull
+#define DCORE1_MME_SBTE1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE1_MSTR_IF_CORE_HBW_BASE 0x42D9C00ull
+#define DCORE1_MME_SBTE1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_MME_SBTE1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_MME_SBTE1_MSTR_IF_CORE_LBW_BASE 0x42D9D80ull
+#define DCORE1_MME_SBTE1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_MME_SBTE1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_MME_SBTE1_MSTR_IF_SPECIAL_BASE 0x42D9E80ull
+#define DCORE1_MME_SBTE1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE1_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE1_MME_SBTE2_BASE 0x42E0000ull
+#define DCORE1_MME_SBTE2_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE2_SECTION 0xE800
+#define mmDCORE1_MME_SBTE2_SPECIAL_BASE 0x42E0E80ull
+#define DCORE1_MME_SBTE2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE2_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_BASE 0x42E1000ull
+#define DCORE1_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_BASE 0x42E1200ull
+#define DCORE1_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_BASE 0x42E1400ull
+#define DCORE1_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_BASE 0x42E1600ull
+#define DCORE1_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE2_MSTR_IF_E2E_CRDT_BASE 0x42E1800ull
+#define DCORE1_MME_SBTE2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_MME_SBTE2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_MME_SBTE2_MSTR_IF_AXUSER_BASE 0x42E1A80ull
+#define DCORE1_MME_SBTE2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_SBTE2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_MME_SBTE2_MSTR_IF_DBG_HBW_BASE 0x42E1B00ull
+#define DCORE1_MME_SBTE2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE2_MSTR_IF_DBG_LBW_BASE 0x42E1B80ull
+#define DCORE1_MME_SBTE2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE2_MSTR_IF_CORE_HBW_BASE 0x42E1C00ull
+#define DCORE1_MME_SBTE2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_MME_SBTE2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_MME_SBTE2_MSTR_IF_CORE_LBW_BASE 0x42E1D80ull
+#define DCORE1_MME_SBTE2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_MME_SBTE2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_MME_SBTE2_MSTR_IF_SPECIAL_BASE 0x42E1E80ull
+#define DCORE1_MME_SBTE2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE2_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE1_MME_SBTE3_BASE 0x42E8000ull
+#define DCORE1_MME_SBTE3_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE3_SECTION 0xE800
+#define mmDCORE1_MME_SBTE3_SPECIAL_BASE 0x42E8E80ull
+#define DCORE1_MME_SBTE3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_BASE 0x42E9000ull
+#define DCORE1_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_BASE 0x42E9200ull
+#define DCORE1_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_BASE 0x42E9400ull
+#define DCORE1_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_BASE 0x42E9600ull
+#define DCORE1_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE3_MSTR_IF_E2E_CRDT_BASE 0x42E9800ull
+#define DCORE1_MME_SBTE3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_MME_SBTE3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_MME_SBTE3_MSTR_IF_AXUSER_BASE 0x42E9A80ull
+#define DCORE1_MME_SBTE3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_SBTE3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_MME_SBTE3_MSTR_IF_DBG_HBW_BASE 0x42E9B00ull
+#define DCORE1_MME_SBTE3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE3_MSTR_IF_DBG_LBW_BASE 0x42E9B80ull
+#define DCORE1_MME_SBTE3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE3_MSTR_IF_CORE_HBW_BASE 0x42E9C00ull
+#define DCORE1_MME_SBTE3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_MME_SBTE3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_MME_SBTE3_MSTR_IF_CORE_LBW_BASE 0x42E9D80ull
+#define DCORE1_MME_SBTE3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_MME_SBTE3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_MME_SBTE3_MSTR_IF_SPECIAL_BASE 0x42E9E80ull
+#define DCORE1_MME_SBTE3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE3_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE1_MME_SBTE4_BASE 0x42F0000ull
+#define DCORE1_MME_SBTE4_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE4_SECTION 0xE800
+#define mmDCORE1_MME_SBTE4_SPECIAL_BASE 0x42F0E80ull
+#define DCORE1_MME_SBTE4_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE4_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_BASE 0x42F1000ull
+#define DCORE1_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_BASE 0x42F1200ull
+#define DCORE1_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_BASE 0x42F1400ull
+#define DCORE1_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_BASE 0x42F1600ull
+#define DCORE1_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_MME_SBTE4_MSTR_IF_E2E_CRDT_BASE 0x42F1800ull
+#define DCORE1_MME_SBTE4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_MME_SBTE4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_MME_SBTE4_MSTR_IF_AXUSER_BASE 0x42F1A80ull
+#define DCORE1_MME_SBTE4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_SBTE4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_MME_SBTE4_MSTR_IF_DBG_HBW_BASE 0x42F1B00ull
+#define DCORE1_MME_SBTE4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE4_MSTR_IF_DBG_LBW_BASE 0x42F1B80ull
+#define DCORE1_MME_SBTE4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_SBTE4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_MME_SBTE4_MSTR_IF_CORE_HBW_BASE 0x42F1C00ull
+#define DCORE1_MME_SBTE4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_MME_SBTE4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_MME_SBTE4_MSTR_IF_CORE_LBW_BASE 0x42F1D80ull
+#define DCORE1_MME_SBTE4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_MME_SBTE4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_MME_SBTE4_MSTR_IF_SPECIAL_BASE 0x42F1E80ull
+#define DCORE1_MME_SBTE4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_SBTE4_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE1_MME_ACC_BASE 0x42F8000ull
+#define DCORE1_MME_ACC_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_SECTION 0xE800
+#define mmDCORE1_MME_ACC_SPECIAL_BASE 0x42F8E80ull
+#define DCORE1_MME_ACC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_ACC_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_WB0_MSTR_IF_RR_SHRD_HBW_BASE 0x42F9000ull
+#define DCORE1_MME_WB0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_WB0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_MME_WB0_MSTR_IF_RR_PRVT_HBW_BASE 0x42F9200ull
+#define DCORE1_MME_WB0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_WB0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_MME_WB0_MSTR_IF_RR_SHRD_LBW_BASE 0x42F9400ull
+#define DCORE1_MME_WB0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_WB0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_MME_WB0_MSTR_IF_RR_PRVT_LBW_BASE 0x42F9600ull
+#define DCORE1_MME_WB0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_WB0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_MME_WB0_MSTR_IF_E2E_CRDT_BASE 0x42F9800ull
+#define DCORE1_MME_WB0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_MME_WB0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_MME_WB0_MSTR_IF_AXUSER_BASE 0x42F9A80ull
+#define DCORE1_MME_WB0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_WB0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_MME_WB0_MSTR_IF_DBG_HBW_BASE 0x42F9B00ull
+#define DCORE1_MME_WB0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_WB0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_MME_WB0_MSTR_IF_DBG_LBW_BASE 0x42F9B80ull
+#define DCORE1_MME_WB0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_WB0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_MME_WB0_MSTR_IF_CORE_HBW_BASE 0x42F9C00ull
+#define DCORE1_MME_WB0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_MME_WB0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_MME_WB0_MSTR_IF_CORE_LBW_BASE 0x42F9D80ull
+#define DCORE1_MME_WB0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_MME_WB0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_MME_WB0_MSTR_IF_SPECIAL_BASE 0x42F9E80ull
+#define DCORE1_MME_WB0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_WB0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_MME_WB1_MSTR_IF_RR_SHRD_HBW_BASE 0x42FA000ull
+#define DCORE1_MME_WB1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_WB1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_MME_WB1_MSTR_IF_RR_PRVT_HBW_BASE 0x42FA200ull
+#define DCORE1_MME_WB1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_MME_WB1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_MME_WB1_MSTR_IF_RR_SHRD_LBW_BASE 0x42FA400ull
+#define DCORE1_MME_WB1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_WB1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_MME_WB1_MSTR_IF_RR_PRVT_LBW_BASE 0x42FA600ull
+#define DCORE1_MME_WB1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_MME_WB1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_MME_WB1_MSTR_IF_E2E_CRDT_BASE 0x42FA800ull
+#define DCORE1_MME_WB1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_MME_WB1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_MME_WB1_MSTR_IF_AXUSER_BASE 0x42FAA80ull
+#define DCORE1_MME_WB1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_MME_WB1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_MME_WB1_MSTR_IF_DBG_HBW_BASE 0x42FAB00ull
+#define DCORE1_MME_WB1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_WB1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_MME_WB1_MSTR_IF_DBG_LBW_BASE 0x42FAB80ull
+#define DCORE1_MME_WB1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_MME_WB1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_MME_WB1_MSTR_IF_CORE_HBW_BASE 0x42FAC00ull
+#define DCORE1_MME_WB1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_MME_WB1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_MME_WB1_MSTR_IF_CORE_LBW_BASE 0x42FAD80ull
+#define DCORE1_MME_WB1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_MME_WB1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_MME_WB1_MSTR_IF_SPECIAL_BASE 0x42FAE80ull
+#define DCORE1_MME_WB1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_MME_WB1_MSTR_IF_SPECIAL_SECTION 0x5180
+#define mmDCORE1_SYNC_MNGR_OBJS_BASE 0x4300000ull
+#define DCORE1_SYNC_MNGR_OBJS_MAX_OFFSET 0x15A00
+#define DCORE1_SYNC_MNGR_OBJS_SECTION 0x1E000
+#define mmDCORE1_SYNC_MNGR_GLBL_BASE 0x431E000ull
+#define DCORE1_SYNC_MNGR_GLBL_MAX_OFFSET 0x1000
+#define DCORE1_SYNC_MNGR_GLBL_SECTION 0xE800
+#define mmDCORE1_SYNC_MNGR_GLBL_SPECIAL_BASE 0x431EE80ull
+#define DCORE1_SYNC_MNGR_GLBL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SYNC_MNGR_GLBL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_BASE 0x431F000ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_BASE 0x431F200ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_BASE 0x431F400ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_BASE 0x431F600ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_E2E_CRDT_BASE 0x431F800ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_SYNC_MNGR_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_AXUSER_BASE 0x431FA80ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_SYNC_MNGR_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_DBG_HBW_BASE 0x431FB00ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_SYNC_MNGR_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_DBG_LBW_BASE 0x431FB80ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_SYNC_MNGR_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_CORE_HBW_BASE 0x431FC00ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_SYNC_MNGR_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_CORE_LBW_BASE 0x431FD80ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_SYNC_MNGR_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_SYNC_MNGR_MSTR_IF_SPECIAL_BASE 0x431FE80ull
+#define DCORE1_SYNC_MNGR_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SYNC_MNGR_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HIF0_BASE 0x4320000ull
+#define DCORE1_HIF0_MAX_OFFSET 0x1000
+#define DCORE1_HIF0_SECTION 0xE800
+#define mmDCORE1_HIF0_SPECIAL_BASE 0x4320E80ull
+#define DCORE1_HIF0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HIF0_SPECIAL_SECTION 0x3180
+#define mmDCORE1_HIF1_BASE 0x4324000ull
+#define DCORE1_HIF1_MAX_OFFSET 0x1000
+#define DCORE1_HIF1_SECTION 0xE800
+#define mmDCORE1_HIF1_SPECIAL_BASE 0x4324E80ull
+#define DCORE1_HIF1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HIF1_SPECIAL_SECTION 0x3180
+#define mmDCORE1_HIF2_BASE 0x4328000ull
+#define DCORE1_HIF2_MAX_OFFSET 0x1000
+#define DCORE1_HIF2_SECTION 0xE800
+#define mmDCORE1_HIF2_SPECIAL_BASE 0x4328E80ull
+#define DCORE1_HIF2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HIF2_SPECIAL_SECTION 0x3180
+#define mmDCORE1_HIF3_BASE 0x432C000ull
+#define DCORE1_HIF3_MAX_OFFSET 0x1000
+#define DCORE1_HIF3_SECTION 0xE800
+#define mmDCORE1_HIF3_SPECIAL_BASE 0x432CE80ull
+#define DCORE1_HIF3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HIF3_SPECIAL_SECTION 0x13180
+#define mmDCORE1_RTR0_CTRL_BASE 0x4340000ull
+#define DCORE1_RTR0_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_RTR0_CTRL_SECTION 0xE800
+#define mmDCORE1_RTR0_CTRL_SPECIAL_BASE 0x4340E80ull
+#define DCORE1_RTR0_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR0_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR0_H3_BASE 0x4341000ull
+#define DCORE1_RTR0_H3_MAX_OFFSET 0x1000
+#define DCORE1_RTR0_H3_SECTION 0xE800
+#define mmDCORE1_RTR0_H3_SPECIAL_BASE 0x4341E80ull
+#define DCORE1_RTR0_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR0_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR0_MSTR_IF_RR_SHRD_HBW_BASE 0x4342000ull
+#define DCORE1_RTR0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_RTR0_MSTR_IF_RR_PRVT_HBW_BASE 0x4342200ull
+#define DCORE1_RTR0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_RTR0_MSTR_IF_RR_SHRD_LBW_BASE 0x4342400ull
+#define DCORE1_RTR0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_RTR0_MSTR_IF_RR_PRVT_LBW_BASE 0x4342600ull
+#define DCORE1_RTR0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_RTR0_MSTR_IF_E2E_CRDT_BASE 0x4342800ull
+#define DCORE1_RTR0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_RTR0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_RTR0_MSTR_IF_AXUSER_BASE 0x4342A80ull
+#define DCORE1_RTR0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_RTR0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_RTR0_MSTR_IF_DBG_HBW_BASE 0x4342B00ull
+#define DCORE1_RTR0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_RTR0_MSTR_IF_DBG_LBW_BASE 0x4342B80ull
+#define DCORE1_RTR0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_RTR0_MSTR_IF_CORE_HBW_BASE 0x4342C00ull
+#define DCORE1_RTR0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_RTR0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_RTR0_MSTR_IF_CORE_LBW_BASE 0x4342D80ull
+#define DCORE1_RTR0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_RTR0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_RTR0_MSTR_IF_SPECIAL_BASE 0x4342E80ull
+#define DCORE1_RTR0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR0_ADD_DEC_HBW_BASE 0x4343000ull
+#define DCORE1_RTR0_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE1_RTR0_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE1_RTR0_ADD_DEC_LBW_BASE 0x4343400ull
+#define DCORE1_RTR0_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE1_RTR0_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE1_RTR0_ADD_DEC_SPECIAL_BASE 0x4343E80ull
+#define DCORE1_RTR0_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR0_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR0_BASE 0x4344000ull
+#define DCORE1_RTR0_MAX_OFFSET 0x1000
+#define DCORE1_RTR0_SECTION 0x3000
+#define mmDCORE1_RTR0_HBW_RD_RQ_LL_STAT_BASE 0x4344300ull
+#define DCORE1_RTR0_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR0_HBW_RD_RS_LL_STAT_BASE 0x4344340ull
+#define DCORE1_RTR0_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR0_HBW_WR_RQ_LL_STAT_BASE 0x4344380ull
+#define DCORE1_RTR0_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR0_HBW_WR_RS_LL_STAT_BASE 0x43443C0ull
+#define DCORE1_RTR0_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR0_LBW_RD_RQ_LL_STAT_BASE 0x4344400ull
+#define DCORE1_RTR0_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR0_LBW_RD_RS_LL_STAT_BASE 0x4344440ull
+#define DCORE1_RTR0_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR0_LBW_WR_RQ_LL_STAT_BASE 0x4344480ull
+#define DCORE1_RTR0_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR0_LBW_WR_RS_LL_STAT_BASE 0x43444C0ull
+#define DCORE1_RTR0_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR0_HBW_MFIFO_BASE 0x4344500ull
+#define DCORE1_RTR0_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE1_RTR0_E2E_RD_LL_STAT_BASE 0x4344540ull
+#define DCORE1_RTR0_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR0_E2E_WR_LL_STAT_BASE 0x4344580ull
+#define DCORE1_RTR0_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR0_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE1_RTR0_RTR_HBW_XACT_STAT_BASE 0x4344600ull
+#define DCORE1_RTR0_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR0_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR0_RTR_LBW_XACT_STAT_BASE 0x4344680ull
+#define DCORE1_RTR0_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR0_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR0_RTR_E2E_XACT_STAT_BASE 0x4344700ull
+#define DCORE1_RTR0_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR0_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE1_RTR0_SPECIAL_BASE 0x4344E80ull
+#define DCORE1_RTR0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR0_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR0_DBG_ADDR_BASE 0x4345000ull
+#define DCORE1_RTR0_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE1_RTR0_DBG_ADDR_SECTION 0xE800
+#define mmDCORE1_RTR0_DBG_ADDR_SPECIAL_BASE 0x4345E80ull
+#define DCORE1_RTR0_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR0_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE1_RTR1_CTRL_BASE 0x4348000ull
+#define DCORE1_RTR1_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_RTR1_CTRL_SECTION 0xE800
+#define mmDCORE1_RTR1_CTRL_SPECIAL_BASE 0x4348E80ull
+#define DCORE1_RTR1_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR1_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR1_H3_BASE 0x4349000ull
+#define DCORE1_RTR1_H3_MAX_OFFSET 0x1000
+#define DCORE1_RTR1_H3_SECTION 0xE800
+#define mmDCORE1_RTR1_H3_SPECIAL_BASE 0x4349E80ull
+#define DCORE1_RTR1_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR1_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR1_MSTR_IF_RR_SHRD_HBW_BASE 0x434A000ull
+#define DCORE1_RTR1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_RTR1_MSTR_IF_RR_PRVT_HBW_BASE 0x434A200ull
+#define DCORE1_RTR1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_RTR1_MSTR_IF_RR_SHRD_LBW_BASE 0x434A400ull
+#define DCORE1_RTR1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_RTR1_MSTR_IF_RR_PRVT_LBW_BASE 0x434A600ull
+#define DCORE1_RTR1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_RTR1_MSTR_IF_E2E_CRDT_BASE 0x434A800ull
+#define DCORE1_RTR1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_RTR1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_RTR1_MSTR_IF_AXUSER_BASE 0x434AA80ull
+#define DCORE1_RTR1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_RTR1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_RTR1_MSTR_IF_DBG_HBW_BASE 0x434AB00ull
+#define DCORE1_RTR1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_RTR1_MSTR_IF_DBG_LBW_BASE 0x434AB80ull
+#define DCORE1_RTR1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_RTR1_MSTR_IF_CORE_HBW_BASE 0x434AC00ull
+#define DCORE1_RTR1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_RTR1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_RTR1_MSTR_IF_CORE_LBW_BASE 0x434AD80ull
+#define DCORE1_RTR1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_RTR1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_RTR1_MSTR_IF_SPECIAL_BASE 0x434AE80ull
+#define DCORE1_RTR1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR1_ADD_DEC_HBW_BASE 0x434B000ull
+#define DCORE1_RTR1_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE1_RTR1_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE1_RTR1_ADD_DEC_LBW_BASE 0x434B400ull
+#define DCORE1_RTR1_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE1_RTR1_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE1_RTR1_ADD_DEC_SPECIAL_BASE 0x434BE80ull
+#define DCORE1_RTR1_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR1_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR1_BASE 0x434C000ull
+#define DCORE1_RTR1_MAX_OFFSET 0x1000
+#define DCORE1_RTR1_SECTION 0x3000
+#define mmDCORE1_RTR1_HBW_RD_RQ_LL_STAT_BASE 0x434C300ull
+#define DCORE1_RTR1_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR1_HBW_RD_RS_LL_STAT_BASE 0x434C340ull
+#define DCORE1_RTR1_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR1_HBW_WR_RQ_LL_STAT_BASE 0x434C380ull
+#define DCORE1_RTR1_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR1_HBW_WR_RS_LL_STAT_BASE 0x434C3C0ull
+#define DCORE1_RTR1_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR1_LBW_RD_RQ_LL_STAT_BASE 0x434C400ull
+#define DCORE1_RTR1_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR1_LBW_RD_RS_LL_STAT_BASE 0x434C440ull
+#define DCORE1_RTR1_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR1_LBW_WR_RQ_LL_STAT_BASE 0x434C480ull
+#define DCORE1_RTR1_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR1_LBW_WR_RS_LL_STAT_BASE 0x434C4C0ull
+#define DCORE1_RTR1_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR1_HBW_MFIFO_BASE 0x434C500ull
+#define DCORE1_RTR1_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE1_RTR1_E2E_RD_LL_STAT_BASE 0x434C540ull
+#define DCORE1_RTR1_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR1_E2E_WR_LL_STAT_BASE 0x434C580ull
+#define DCORE1_RTR1_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR1_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE1_RTR1_RTR_HBW_XACT_STAT_BASE 0x434C600ull
+#define DCORE1_RTR1_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR1_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR1_RTR_LBW_XACT_STAT_BASE 0x434C680ull
+#define DCORE1_RTR1_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR1_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR1_RTR_E2E_XACT_STAT_BASE 0x434C700ull
+#define DCORE1_RTR1_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR1_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE1_RTR1_SPECIAL_BASE 0x434CE80ull
+#define DCORE1_RTR1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR1_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR1_DBG_ADDR_BASE 0x434D000ull
+#define DCORE1_RTR1_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE1_RTR1_DBG_ADDR_SECTION 0xE800
+#define mmDCORE1_RTR1_DBG_ADDR_SPECIAL_BASE 0x434DE80ull
+#define DCORE1_RTR1_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR1_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE1_RTR2_CTRL_BASE 0x4350000ull
+#define DCORE1_RTR2_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_RTR2_CTRL_SECTION 0xE800
+#define mmDCORE1_RTR2_CTRL_SPECIAL_BASE 0x4350E80ull
+#define DCORE1_RTR2_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR2_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR2_H3_BASE 0x4351000ull
+#define DCORE1_RTR2_H3_MAX_OFFSET 0x1000
+#define DCORE1_RTR2_H3_SECTION 0xE800
+#define mmDCORE1_RTR2_H3_SPECIAL_BASE 0x4351E80ull
+#define DCORE1_RTR2_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR2_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR2_MSTR_IF_RR_SHRD_HBW_BASE 0x4352000ull
+#define DCORE1_RTR2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_RTR2_MSTR_IF_RR_PRVT_HBW_BASE 0x4352200ull
+#define DCORE1_RTR2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_RTR2_MSTR_IF_RR_SHRD_LBW_BASE 0x4352400ull
+#define DCORE1_RTR2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_RTR2_MSTR_IF_RR_PRVT_LBW_BASE 0x4352600ull
+#define DCORE1_RTR2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_RTR2_MSTR_IF_E2E_CRDT_BASE 0x4352800ull
+#define DCORE1_RTR2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_RTR2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_RTR2_MSTR_IF_AXUSER_BASE 0x4352A80ull
+#define DCORE1_RTR2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_RTR2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_RTR2_MSTR_IF_DBG_HBW_BASE 0x4352B00ull
+#define DCORE1_RTR2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_RTR2_MSTR_IF_DBG_LBW_BASE 0x4352B80ull
+#define DCORE1_RTR2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_RTR2_MSTR_IF_CORE_HBW_BASE 0x4352C00ull
+#define DCORE1_RTR2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_RTR2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_RTR2_MSTR_IF_CORE_LBW_BASE 0x4352D80ull
+#define DCORE1_RTR2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_RTR2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_RTR2_MSTR_IF_SPECIAL_BASE 0x4352E80ull
+#define DCORE1_RTR2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR2_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR2_ADD_DEC_HBW_BASE 0x4353000ull
+#define DCORE1_RTR2_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE1_RTR2_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE1_RTR2_ADD_DEC_LBW_BASE 0x4353400ull
+#define DCORE1_RTR2_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE1_RTR2_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE1_RTR2_ADD_DEC_SPECIAL_BASE 0x4353E80ull
+#define DCORE1_RTR2_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR2_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR2_BASE 0x4354000ull
+#define DCORE1_RTR2_MAX_OFFSET 0x1000
+#define DCORE1_RTR2_SECTION 0x3000
+#define mmDCORE1_RTR2_HBW_RD_RQ_LL_STAT_BASE 0x4354300ull
+#define DCORE1_RTR2_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR2_HBW_RD_RS_LL_STAT_BASE 0x4354340ull
+#define DCORE1_RTR2_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR2_HBW_WR_RQ_LL_STAT_BASE 0x4354380ull
+#define DCORE1_RTR2_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR2_HBW_WR_RS_LL_STAT_BASE 0x43543C0ull
+#define DCORE1_RTR2_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR2_LBW_RD_RQ_LL_STAT_BASE 0x4354400ull
+#define DCORE1_RTR2_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR2_LBW_RD_RS_LL_STAT_BASE 0x4354440ull
+#define DCORE1_RTR2_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR2_LBW_WR_RQ_LL_STAT_BASE 0x4354480ull
+#define DCORE1_RTR2_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR2_LBW_WR_RS_LL_STAT_BASE 0x43544C0ull
+#define DCORE1_RTR2_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR2_HBW_MFIFO_BASE 0x4354500ull
+#define DCORE1_RTR2_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE1_RTR2_E2E_RD_LL_STAT_BASE 0x4354540ull
+#define DCORE1_RTR2_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR2_E2E_WR_LL_STAT_BASE 0x4354580ull
+#define DCORE1_RTR2_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR2_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE1_RTR2_RTR_HBW_XACT_STAT_BASE 0x4354600ull
+#define DCORE1_RTR2_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR2_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR2_RTR_LBW_XACT_STAT_BASE 0x4354680ull
+#define DCORE1_RTR2_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR2_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR2_RTR_E2E_XACT_STAT_BASE 0x4354700ull
+#define DCORE1_RTR2_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR2_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE1_RTR2_SPECIAL_BASE 0x4354E80ull
+#define DCORE1_RTR2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR2_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR2_DBG_ADDR_BASE 0x4355000ull
+#define DCORE1_RTR2_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE1_RTR2_DBG_ADDR_SECTION 0xE800
+#define mmDCORE1_RTR2_DBG_ADDR_SPECIAL_BASE 0x4355E80ull
+#define DCORE1_RTR2_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR2_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE1_RTR3_CTRL_BASE 0x4358000ull
+#define DCORE1_RTR3_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_RTR3_CTRL_SECTION 0xE800
+#define mmDCORE1_RTR3_CTRL_SPECIAL_BASE 0x4358E80ull
+#define DCORE1_RTR3_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR3_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR3_H3_BASE 0x4359000ull
+#define DCORE1_RTR3_H3_MAX_OFFSET 0x1000
+#define DCORE1_RTR3_H3_SECTION 0xE800
+#define mmDCORE1_RTR3_H3_SPECIAL_BASE 0x4359E80ull
+#define DCORE1_RTR3_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR3_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR3_MSTR_IF_RR_SHRD_HBW_BASE 0x435A000ull
+#define DCORE1_RTR3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_RTR3_MSTR_IF_RR_PRVT_HBW_BASE 0x435A200ull
+#define DCORE1_RTR3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_RTR3_MSTR_IF_RR_SHRD_LBW_BASE 0x435A400ull
+#define DCORE1_RTR3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_RTR3_MSTR_IF_RR_PRVT_LBW_BASE 0x435A600ull
+#define DCORE1_RTR3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_RTR3_MSTR_IF_E2E_CRDT_BASE 0x435A800ull
+#define DCORE1_RTR3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_RTR3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_RTR3_MSTR_IF_AXUSER_BASE 0x435AA80ull
+#define DCORE1_RTR3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_RTR3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_RTR3_MSTR_IF_DBG_HBW_BASE 0x435AB00ull
+#define DCORE1_RTR3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_RTR3_MSTR_IF_DBG_LBW_BASE 0x435AB80ull
+#define DCORE1_RTR3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_RTR3_MSTR_IF_CORE_HBW_BASE 0x435AC00ull
+#define DCORE1_RTR3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_RTR3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_RTR3_MSTR_IF_CORE_LBW_BASE 0x435AD80ull
+#define DCORE1_RTR3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_RTR3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_RTR3_MSTR_IF_SPECIAL_BASE 0x435AE80ull
+#define DCORE1_RTR3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR3_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR3_ADD_DEC_HBW_BASE 0x435B000ull
+#define DCORE1_RTR3_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE1_RTR3_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE1_RTR3_ADD_DEC_LBW_BASE 0x435B400ull
+#define DCORE1_RTR3_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE1_RTR3_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE1_RTR3_ADD_DEC_SPECIAL_BASE 0x435BE80ull
+#define DCORE1_RTR3_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR3_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR3_BASE 0x435C000ull
+#define DCORE1_RTR3_MAX_OFFSET 0x1000
+#define DCORE1_RTR3_SECTION 0x3000
+#define mmDCORE1_RTR3_HBW_RD_RQ_LL_STAT_BASE 0x435C300ull
+#define DCORE1_RTR3_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR3_HBW_RD_RS_LL_STAT_BASE 0x435C340ull
+#define DCORE1_RTR3_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR3_HBW_WR_RQ_LL_STAT_BASE 0x435C380ull
+#define DCORE1_RTR3_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR3_HBW_WR_RS_LL_STAT_BASE 0x435C3C0ull
+#define DCORE1_RTR3_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR3_LBW_RD_RQ_LL_STAT_BASE 0x435C400ull
+#define DCORE1_RTR3_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR3_LBW_RD_RS_LL_STAT_BASE 0x435C440ull
+#define DCORE1_RTR3_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR3_LBW_WR_RQ_LL_STAT_BASE 0x435C480ull
+#define DCORE1_RTR3_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR3_LBW_WR_RS_LL_STAT_BASE 0x435C4C0ull
+#define DCORE1_RTR3_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR3_HBW_MFIFO_BASE 0x435C500ull
+#define DCORE1_RTR3_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE1_RTR3_E2E_RD_LL_STAT_BASE 0x435C540ull
+#define DCORE1_RTR3_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR3_E2E_WR_LL_STAT_BASE 0x435C580ull
+#define DCORE1_RTR3_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR3_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE1_RTR3_RTR_HBW_XACT_STAT_BASE 0x435C600ull
+#define DCORE1_RTR3_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR3_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR3_RTR_LBW_XACT_STAT_BASE 0x435C680ull
+#define DCORE1_RTR3_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR3_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR3_RTR_E2E_XACT_STAT_BASE 0x435C700ull
+#define DCORE1_RTR3_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR3_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE1_RTR3_SPECIAL_BASE 0x435CE80ull
+#define DCORE1_RTR3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR3_DBG_ADDR_BASE 0x435D000ull
+#define DCORE1_RTR3_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE1_RTR3_DBG_ADDR_SECTION 0xE800
+#define mmDCORE1_RTR3_DBG_ADDR_SPECIAL_BASE 0x435DE80ull
+#define DCORE1_RTR3_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR3_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE1_RTR4_CTRL_BASE 0x4360000ull
+#define DCORE1_RTR4_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_RTR4_CTRL_SECTION 0xE800
+#define mmDCORE1_RTR4_CTRL_SPECIAL_BASE 0x4360E80ull
+#define DCORE1_RTR4_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR4_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR4_H3_BASE 0x4361000ull
+#define DCORE1_RTR4_H3_MAX_OFFSET 0x1000
+#define DCORE1_RTR4_H3_SECTION 0xE800
+#define mmDCORE1_RTR4_H3_SPECIAL_BASE 0x4361E80ull
+#define DCORE1_RTR4_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR4_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR4_MSTR_IF_RR_SHRD_HBW_BASE 0x4362000ull
+#define DCORE1_RTR4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_RTR4_MSTR_IF_RR_PRVT_HBW_BASE 0x4362200ull
+#define DCORE1_RTR4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_RTR4_MSTR_IF_RR_SHRD_LBW_BASE 0x4362400ull
+#define DCORE1_RTR4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_RTR4_MSTR_IF_RR_PRVT_LBW_BASE 0x4362600ull
+#define DCORE1_RTR4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_RTR4_MSTR_IF_E2E_CRDT_BASE 0x4362800ull
+#define DCORE1_RTR4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_RTR4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_RTR4_MSTR_IF_AXUSER_BASE 0x4362A80ull
+#define DCORE1_RTR4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_RTR4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_RTR4_MSTR_IF_DBG_HBW_BASE 0x4362B00ull
+#define DCORE1_RTR4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_RTR4_MSTR_IF_DBG_LBW_BASE 0x4362B80ull
+#define DCORE1_RTR4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_RTR4_MSTR_IF_CORE_HBW_BASE 0x4362C00ull
+#define DCORE1_RTR4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_RTR4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_RTR4_MSTR_IF_CORE_LBW_BASE 0x4362D80ull
+#define DCORE1_RTR4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_RTR4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_RTR4_MSTR_IF_SPECIAL_BASE 0x4362E80ull
+#define DCORE1_RTR4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR4_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR4_ADD_DEC_HBW_BASE 0x4363000ull
+#define DCORE1_RTR4_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE1_RTR4_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE1_RTR4_ADD_DEC_LBW_BASE 0x4363400ull
+#define DCORE1_RTR4_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE1_RTR4_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE1_RTR4_ADD_DEC_SPECIAL_BASE 0x4363E80ull
+#define DCORE1_RTR4_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR4_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR4_BASE 0x4364000ull
+#define DCORE1_RTR4_MAX_OFFSET 0x1000
+#define DCORE1_RTR4_SECTION 0x3000
+#define mmDCORE1_RTR4_HBW_RD_RQ_LL_STAT_BASE 0x4364300ull
+#define DCORE1_RTR4_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR4_HBW_RD_RS_LL_STAT_BASE 0x4364340ull
+#define DCORE1_RTR4_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR4_HBW_WR_RQ_LL_STAT_BASE 0x4364380ull
+#define DCORE1_RTR4_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR4_HBW_WR_RS_LL_STAT_BASE 0x43643C0ull
+#define DCORE1_RTR4_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR4_LBW_RD_RQ_LL_STAT_BASE 0x4364400ull
+#define DCORE1_RTR4_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR4_LBW_RD_RS_LL_STAT_BASE 0x4364440ull
+#define DCORE1_RTR4_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR4_LBW_WR_RQ_LL_STAT_BASE 0x4364480ull
+#define DCORE1_RTR4_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR4_LBW_WR_RS_LL_STAT_BASE 0x43644C0ull
+#define DCORE1_RTR4_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR4_HBW_MFIFO_BASE 0x4364500ull
+#define DCORE1_RTR4_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE1_RTR4_E2E_RD_LL_STAT_BASE 0x4364540ull
+#define DCORE1_RTR4_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR4_E2E_WR_LL_STAT_BASE 0x4364580ull
+#define DCORE1_RTR4_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR4_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE1_RTR4_RTR_HBW_XACT_STAT_BASE 0x4364600ull
+#define DCORE1_RTR4_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR4_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR4_RTR_LBW_XACT_STAT_BASE 0x4364680ull
+#define DCORE1_RTR4_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR4_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR4_RTR_E2E_XACT_STAT_BASE 0x4364700ull
+#define DCORE1_RTR4_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR4_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE1_RTR4_SPECIAL_BASE 0x4364E80ull
+#define DCORE1_RTR4_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR4_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR4_DBG_ADDR_BASE 0x4365000ull
+#define DCORE1_RTR4_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE1_RTR4_DBG_ADDR_SECTION 0xE800
+#define mmDCORE1_RTR4_DBG_ADDR_SPECIAL_BASE 0x4365E80ull
+#define DCORE1_RTR4_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR4_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE1_RTR5_CTRL_BASE 0x4368000ull
+#define DCORE1_RTR5_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_RTR5_CTRL_SECTION 0xE800
+#define mmDCORE1_RTR5_CTRL_SPECIAL_BASE 0x4368E80ull
+#define DCORE1_RTR5_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR5_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR5_H3_BASE 0x4369000ull
+#define DCORE1_RTR5_H3_MAX_OFFSET 0x1000
+#define DCORE1_RTR5_H3_SECTION 0xE800
+#define mmDCORE1_RTR5_H3_SPECIAL_BASE 0x4369E80ull
+#define DCORE1_RTR5_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR5_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR5_MSTR_IF_RR_SHRD_HBW_BASE 0x436A000ull
+#define DCORE1_RTR5_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR5_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_RTR5_MSTR_IF_RR_PRVT_HBW_BASE 0x436A200ull
+#define DCORE1_RTR5_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR5_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_RTR5_MSTR_IF_RR_SHRD_LBW_BASE 0x436A400ull
+#define DCORE1_RTR5_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR5_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_RTR5_MSTR_IF_RR_PRVT_LBW_BASE 0x436A600ull
+#define DCORE1_RTR5_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR5_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_RTR5_MSTR_IF_E2E_CRDT_BASE 0x436A800ull
+#define DCORE1_RTR5_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_RTR5_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_RTR5_MSTR_IF_AXUSER_BASE 0x436AA80ull
+#define DCORE1_RTR5_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_RTR5_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_RTR5_MSTR_IF_DBG_HBW_BASE 0x436AB00ull
+#define DCORE1_RTR5_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR5_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_RTR5_MSTR_IF_DBG_LBW_BASE 0x436AB80ull
+#define DCORE1_RTR5_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR5_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_RTR5_MSTR_IF_CORE_HBW_BASE 0x436AC00ull
+#define DCORE1_RTR5_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_RTR5_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_RTR5_MSTR_IF_CORE_LBW_BASE 0x436AD80ull
+#define DCORE1_RTR5_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_RTR5_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_RTR5_MSTR_IF_SPECIAL_BASE 0x436AE80ull
+#define DCORE1_RTR5_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR5_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR5_ADD_DEC_HBW_BASE 0x436B000ull
+#define DCORE1_RTR5_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE1_RTR5_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE1_RTR5_ADD_DEC_LBW_BASE 0x436B400ull
+#define DCORE1_RTR5_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE1_RTR5_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE1_RTR5_ADD_DEC_SPECIAL_BASE 0x436BE80ull
+#define DCORE1_RTR5_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR5_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR5_BASE 0x436C000ull
+#define DCORE1_RTR5_MAX_OFFSET 0x1000
+#define DCORE1_RTR5_SECTION 0x3000
+#define mmDCORE1_RTR5_HBW_RD_RQ_LL_STAT_BASE 0x436C300ull
+#define DCORE1_RTR5_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR5_HBW_RD_RS_LL_STAT_BASE 0x436C340ull
+#define DCORE1_RTR5_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR5_HBW_WR_RQ_LL_STAT_BASE 0x436C380ull
+#define DCORE1_RTR5_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR5_HBW_WR_RS_LL_STAT_BASE 0x436C3C0ull
+#define DCORE1_RTR5_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR5_LBW_RD_RQ_LL_STAT_BASE 0x436C400ull
+#define DCORE1_RTR5_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR5_LBW_RD_RS_LL_STAT_BASE 0x436C440ull
+#define DCORE1_RTR5_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR5_LBW_WR_RQ_LL_STAT_BASE 0x436C480ull
+#define DCORE1_RTR5_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR5_LBW_WR_RS_LL_STAT_BASE 0x436C4C0ull
+#define DCORE1_RTR5_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR5_HBW_MFIFO_BASE 0x436C500ull
+#define DCORE1_RTR5_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE1_RTR5_E2E_RD_LL_STAT_BASE 0x436C540ull
+#define DCORE1_RTR5_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR5_E2E_WR_LL_STAT_BASE 0x436C580ull
+#define DCORE1_RTR5_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR5_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE1_RTR5_RTR_HBW_XACT_STAT_BASE 0x436C600ull
+#define DCORE1_RTR5_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR5_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR5_RTR_LBW_XACT_STAT_BASE 0x436C680ull
+#define DCORE1_RTR5_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR5_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR5_RTR_E2E_XACT_STAT_BASE 0x436C700ull
+#define DCORE1_RTR5_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR5_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE1_RTR5_SPECIAL_BASE 0x436CE80ull
+#define DCORE1_RTR5_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR5_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR5_DBG_ADDR_BASE 0x436D000ull
+#define DCORE1_RTR5_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE1_RTR5_DBG_ADDR_SECTION 0xE800
+#define mmDCORE1_RTR5_DBG_ADDR_SPECIAL_BASE 0x436DE80ull
+#define DCORE1_RTR5_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR5_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE1_RTR6_CTRL_BASE 0x4370000ull
+#define DCORE1_RTR6_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_RTR6_CTRL_SECTION 0xE800
+#define mmDCORE1_RTR6_CTRL_SPECIAL_BASE 0x4370E80ull
+#define DCORE1_RTR6_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR6_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR6_H3_BASE 0x4371000ull
+#define DCORE1_RTR6_H3_MAX_OFFSET 0x1000
+#define DCORE1_RTR6_H3_SECTION 0xE800
+#define mmDCORE1_RTR6_H3_SPECIAL_BASE 0x4371E80ull
+#define DCORE1_RTR6_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR6_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR6_MSTR_IF_RR_SHRD_HBW_BASE 0x4372000ull
+#define DCORE1_RTR6_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR6_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_RTR6_MSTR_IF_RR_PRVT_HBW_BASE 0x4372200ull
+#define DCORE1_RTR6_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR6_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_RTR6_MSTR_IF_RR_SHRD_LBW_BASE 0x4372400ull
+#define DCORE1_RTR6_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR6_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_RTR6_MSTR_IF_RR_PRVT_LBW_BASE 0x4372600ull
+#define DCORE1_RTR6_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR6_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_RTR6_MSTR_IF_E2E_CRDT_BASE 0x4372800ull
+#define DCORE1_RTR6_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_RTR6_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_RTR6_MSTR_IF_AXUSER_BASE 0x4372A80ull
+#define DCORE1_RTR6_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_RTR6_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_RTR6_MSTR_IF_DBG_HBW_BASE 0x4372B00ull
+#define DCORE1_RTR6_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR6_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_RTR6_MSTR_IF_DBG_LBW_BASE 0x4372B80ull
+#define DCORE1_RTR6_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR6_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_RTR6_MSTR_IF_CORE_HBW_BASE 0x4372C00ull
+#define DCORE1_RTR6_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_RTR6_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_RTR6_MSTR_IF_CORE_LBW_BASE 0x4372D80ull
+#define DCORE1_RTR6_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_RTR6_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_RTR6_MSTR_IF_SPECIAL_BASE 0x4372E80ull
+#define DCORE1_RTR6_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR6_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR6_ADD_DEC_HBW_BASE 0x4373000ull
+#define DCORE1_RTR6_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE1_RTR6_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE1_RTR6_ADD_DEC_LBW_BASE 0x4373400ull
+#define DCORE1_RTR6_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE1_RTR6_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE1_RTR6_ADD_DEC_SPECIAL_BASE 0x4373E80ull
+#define DCORE1_RTR6_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR6_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR6_BASE 0x4374000ull
+#define DCORE1_RTR6_MAX_OFFSET 0x1000
+#define DCORE1_RTR6_SECTION 0x3000
+#define mmDCORE1_RTR6_HBW_RD_RQ_LL_STAT_BASE 0x4374300ull
+#define DCORE1_RTR6_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR6_HBW_RD_RS_LL_STAT_BASE 0x4374340ull
+#define DCORE1_RTR6_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR6_HBW_WR_RQ_LL_STAT_BASE 0x4374380ull
+#define DCORE1_RTR6_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR6_HBW_WR_RS_LL_STAT_BASE 0x43743C0ull
+#define DCORE1_RTR6_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR6_LBW_RD_RQ_LL_STAT_BASE 0x4374400ull
+#define DCORE1_RTR6_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR6_LBW_RD_RS_LL_STAT_BASE 0x4374440ull
+#define DCORE1_RTR6_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR6_LBW_WR_RQ_LL_STAT_BASE 0x4374480ull
+#define DCORE1_RTR6_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR6_LBW_WR_RS_LL_STAT_BASE 0x43744C0ull
+#define DCORE1_RTR6_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR6_HBW_MFIFO_BASE 0x4374500ull
+#define DCORE1_RTR6_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE1_RTR6_E2E_RD_LL_STAT_BASE 0x4374540ull
+#define DCORE1_RTR6_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR6_E2E_WR_LL_STAT_BASE 0x4374580ull
+#define DCORE1_RTR6_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR6_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE1_RTR6_RTR_HBW_XACT_STAT_BASE 0x4374600ull
+#define DCORE1_RTR6_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR6_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR6_RTR_LBW_XACT_STAT_BASE 0x4374680ull
+#define DCORE1_RTR6_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR6_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR6_RTR_E2E_XACT_STAT_BASE 0x4374700ull
+#define DCORE1_RTR6_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR6_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE1_RTR6_SPECIAL_BASE 0x4374E80ull
+#define DCORE1_RTR6_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR6_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR6_DBG_ADDR_BASE 0x4375000ull
+#define DCORE1_RTR6_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE1_RTR6_DBG_ADDR_SECTION 0xE800
+#define mmDCORE1_RTR6_DBG_ADDR_SPECIAL_BASE 0x4375E80ull
+#define DCORE1_RTR6_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR6_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE1_RTR7_CTRL_BASE 0x4378000ull
+#define DCORE1_RTR7_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_RTR7_CTRL_SECTION 0xE800
+#define mmDCORE1_RTR7_CTRL_SPECIAL_BASE 0x4378E80ull
+#define DCORE1_RTR7_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR7_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR7_H3_BASE 0x4379000ull
+#define DCORE1_RTR7_H3_MAX_OFFSET 0x1000
+#define DCORE1_RTR7_H3_SECTION 0xE800
+#define mmDCORE1_RTR7_H3_SPECIAL_BASE 0x4379E80ull
+#define DCORE1_RTR7_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR7_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR7_MSTR_IF_RR_SHRD_HBW_BASE 0x437A000ull
+#define DCORE1_RTR7_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR7_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_RTR7_MSTR_IF_RR_PRVT_HBW_BASE 0x437A200ull
+#define DCORE1_RTR7_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_RTR7_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_RTR7_MSTR_IF_RR_SHRD_LBW_BASE 0x437A400ull
+#define DCORE1_RTR7_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR7_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_RTR7_MSTR_IF_RR_PRVT_LBW_BASE 0x437A600ull
+#define DCORE1_RTR7_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_RTR7_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_RTR7_MSTR_IF_E2E_CRDT_BASE 0x437A800ull
+#define DCORE1_RTR7_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_RTR7_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_RTR7_MSTR_IF_AXUSER_BASE 0x437AA80ull
+#define DCORE1_RTR7_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_RTR7_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_RTR7_MSTR_IF_DBG_HBW_BASE 0x437AB00ull
+#define DCORE1_RTR7_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR7_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_RTR7_MSTR_IF_DBG_LBW_BASE 0x437AB80ull
+#define DCORE1_RTR7_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_RTR7_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_RTR7_MSTR_IF_CORE_HBW_BASE 0x437AC00ull
+#define DCORE1_RTR7_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_RTR7_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_RTR7_MSTR_IF_CORE_LBW_BASE 0x437AD80ull
+#define DCORE1_RTR7_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_RTR7_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_RTR7_MSTR_IF_SPECIAL_BASE 0x437AE80ull
+#define DCORE1_RTR7_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR7_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR7_ADD_DEC_HBW_BASE 0x437B000ull
+#define DCORE1_RTR7_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE1_RTR7_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE1_RTR7_ADD_DEC_LBW_BASE 0x437B400ull
+#define DCORE1_RTR7_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE1_RTR7_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE1_RTR7_ADD_DEC_SPECIAL_BASE 0x437BE80ull
+#define DCORE1_RTR7_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR7_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR7_BASE 0x437C000ull
+#define DCORE1_RTR7_MAX_OFFSET 0x1000
+#define DCORE1_RTR7_SECTION 0x3000
+#define mmDCORE1_RTR7_HBW_RD_RQ_LL_STAT_BASE 0x437C300ull
+#define DCORE1_RTR7_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR7_HBW_RD_RS_LL_STAT_BASE 0x437C340ull
+#define DCORE1_RTR7_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR7_HBW_WR_RQ_LL_STAT_BASE 0x437C380ull
+#define DCORE1_RTR7_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR7_HBW_WR_RS_LL_STAT_BASE 0x437C3C0ull
+#define DCORE1_RTR7_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR7_LBW_RD_RQ_LL_STAT_BASE 0x437C400ull
+#define DCORE1_RTR7_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR7_LBW_RD_RS_LL_STAT_BASE 0x437C440ull
+#define DCORE1_RTR7_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR7_LBW_WR_RQ_LL_STAT_BASE 0x437C480ull
+#define DCORE1_RTR7_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR7_LBW_WR_RS_LL_STAT_BASE 0x437C4C0ull
+#define DCORE1_RTR7_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR7_HBW_MFIFO_BASE 0x437C500ull
+#define DCORE1_RTR7_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE1_RTR7_E2E_RD_LL_STAT_BASE 0x437C540ull
+#define DCORE1_RTR7_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE1_RTR7_E2E_WR_LL_STAT_BASE 0x437C580ull
+#define DCORE1_RTR7_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE1_RTR7_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE1_RTR7_RTR_HBW_XACT_STAT_BASE 0x437C600ull
+#define DCORE1_RTR7_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR7_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR7_RTR_LBW_XACT_STAT_BASE 0x437C680ull
+#define DCORE1_RTR7_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR7_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE1_RTR7_RTR_E2E_XACT_STAT_BASE 0x437C700ull
+#define DCORE1_RTR7_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE1_RTR7_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE1_RTR7_SPECIAL_BASE 0x437CE80ull
+#define DCORE1_RTR7_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR7_SPECIAL_SECTION 0x1800
+#define mmDCORE1_RTR7_DBG_ADDR_BASE 0x437D000ull
+#define DCORE1_RTR7_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE1_RTR7_DBG_ADDR_SECTION 0xE800
+#define mmDCORE1_RTR7_DBG_ADDR_SPECIAL_BASE 0x437DE80ull
+#define DCORE1_RTR7_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_RTR7_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE1_SRAM0_BANK_BASE 0x4380000ull
+#define DCORE1_SRAM0_BANK_MAX_OFFSET 0x1000
+#define DCORE1_SRAM0_BANK_SECTION 0xE800
+#define mmDCORE1_SRAM0_BANK_SPECIAL_BASE 0x4380E80ull
+#define DCORE1_SRAM0_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM0_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM0_RTR_BASE 0x4381000ull
+#define DCORE1_SRAM0_RTR_MAX_OFFSET 0x1000
+#define DCORE1_SRAM0_RTR_SECTION 0xE800
+#define mmDCORE1_SRAM0_RTR_SPECIAL_BASE 0x4381E80ull
+#define DCORE1_SRAM0_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM0_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM0_DBG_CNT_N_HBW_DBG_CNT_BASE 0x4382000ull
+#define DCORE1_SRAM0_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM0_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM0_DBG_CNT_S_HBW_DBG_CNT_BASE 0x4382100ull
+#define DCORE1_SRAM0_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM0_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x4382200ull
+#define DCORE1_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x4382300ull
+#define DCORE1_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM0_DBG_CNT_N_LBW_DBG_CNT_BASE 0x4382400ull
+#define DCORE1_SRAM0_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM0_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM0_DBG_CNT_S_LBW_DBG_CNT_BASE 0x4382500ull
+#define DCORE1_SRAM0_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM0_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM0_DBG_CNT_L_LBW_DBG_CNT_BASE 0x4382600ull
+#define DCORE1_SRAM0_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM0_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x4382700ull
+#define DCORE1_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x4382780ull
+#define DCORE1_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x4382800ull
+#define DCORE1_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x4382880ull
+#define DCORE1_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x4382900ull
+#define DCORE1_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x4382980ull
+#define DCORE1_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x4382A00ull
+#define DCORE1_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x4382A80ull
+#define DCORE1_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE1_SRAM0_DBG_CNT_SPECIAL_BASE 0x4382E80ull
+#define DCORE1_SRAM0_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM0_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE1_SRAM1_BANK_BASE 0x4388000ull
+#define DCORE1_SRAM1_BANK_MAX_OFFSET 0x1000
+#define DCORE1_SRAM1_BANK_SECTION 0xE800
+#define mmDCORE1_SRAM1_BANK_SPECIAL_BASE 0x4388E80ull
+#define DCORE1_SRAM1_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM1_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM1_RTR_BASE 0x4389000ull
+#define DCORE1_SRAM1_RTR_MAX_OFFSET 0x1000
+#define DCORE1_SRAM1_RTR_SECTION 0xE800
+#define mmDCORE1_SRAM1_RTR_SPECIAL_BASE 0x4389E80ull
+#define DCORE1_SRAM1_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM1_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM1_DBG_CNT_N_HBW_DBG_CNT_BASE 0x438A000ull
+#define DCORE1_SRAM1_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM1_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM1_DBG_CNT_S_HBW_DBG_CNT_BASE 0x438A100ull
+#define DCORE1_SRAM1_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM1_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x438A200ull
+#define DCORE1_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x438A300ull
+#define DCORE1_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM1_DBG_CNT_N_LBW_DBG_CNT_BASE 0x438A400ull
+#define DCORE1_SRAM1_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM1_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM1_DBG_CNT_S_LBW_DBG_CNT_BASE 0x438A500ull
+#define DCORE1_SRAM1_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM1_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM1_DBG_CNT_L_LBW_DBG_CNT_BASE 0x438A600ull
+#define DCORE1_SRAM1_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM1_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x438A700ull
+#define DCORE1_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x438A780ull
+#define DCORE1_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x438A800ull
+#define DCORE1_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x438A880ull
+#define DCORE1_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x438A900ull
+#define DCORE1_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x438A980ull
+#define DCORE1_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x438AA00ull
+#define DCORE1_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x438AA80ull
+#define DCORE1_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE1_SRAM1_DBG_CNT_SPECIAL_BASE 0x438AE80ull
+#define DCORE1_SRAM1_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM1_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE1_SRAM2_BANK_BASE 0x4390000ull
+#define DCORE1_SRAM2_BANK_MAX_OFFSET 0x1000
+#define DCORE1_SRAM2_BANK_SECTION 0xE800
+#define mmDCORE1_SRAM2_BANK_SPECIAL_BASE 0x4390E80ull
+#define DCORE1_SRAM2_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM2_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM2_RTR_BASE 0x4391000ull
+#define DCORE1_SRAM2_RTR_MAX_OFFSET 0x1000
+#define DCORE1_SRAM2_RTR_SECTION 0xE800
+#define mmDCORE1_SRAM2_RTR_SPECIAL_BASE 0x4391E80ull
+#define DCORE1_SRAM2_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM2_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM2_DBG_CNT_N_HBW_DBG_CNT_BASE 0x4392000ull
+#define DCORE1_SRAM2_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM2_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM2_DBG_CNT_S_HBW_DBG_CNT_BASE 0x4392100ull
+#define DCORE1_SRAM2_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM2_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x4392200ull
+#define DCORE1_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x4392300ull
+#define DCORE1_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM2_DBG_CNT_N_LBW_DBG_CNT_BASE 0x4392400ull
+#define DCORE1_SRAM2_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM2_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM2_DBG_CNT_S_LBW_DBG_CNT_BASE 0x4392500ull
+#define DCORE1_SRAM2_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM2_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM2_DBG_CNT_L_LBW_DBG_CNT_BASE 0x4392600ull
+#define DCORE1_SRAM2_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM2_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x4392700ull
+#define DCORE1_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x4392780ull
+#define DCORE1_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x4392800ull
+#define DCORE1_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x4392880ull
+#define DCORE1_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x4392900ull
+#define DCORE1_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x4392980ull
+#define DCORE1_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x4392A00ull
+#define DCORE1_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x4392A80ull
+#define DCORE1_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE1_SRAM2_DBG_CNT_SPECIAL_BASE 0x4392E80ull
+#define DCORE1_SRAM2_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM2_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE1_SRAM3_BANK_BASE 0x4398000ull
+#define DCORE1_SRAM3_BANK_MAX_OFFSET 0x1000
+#define DCORE1_SRAM3_BANK_SECTION 0xE800
+#define mmDCORE1_SRAM3_BANK_SPECIAL_BASE 0x4398E80ull
+#define DCORE1_SRAM3_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM3_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM3_RTR_BASE 0x4399000ull
+#define DCORE1_SRAM3_RTR_MAX_OFFSET 0x1000
+#define DCORE1_SRAM3_RTR_SECTION 0xE800
+#define mmDCORE1_SRAM3_RTR_SPECIAL_BASE 0x4399E80ull
+#define DCORE1_SRAM3_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM3_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM3_DBG_CNT_N_HBW_DBG_CNT_BASE 0x439A000ull
+#define DCORE1_SRAM3_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM3_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM3_DBG_CNT_S_HBW_DBG_CNT_BASE 0x439A100ull
+#define DCORE1_SRAM3_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM3_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x439A200ull
+#define DCORE1_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x439A300ull
+#define DCORE1_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM3_DBG_CNT_N_LBW_DBG_CNT_BASE 0x439A400ull
+#define DCORE1_SRAM3_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM3_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM3_DBG_CNT_S_LBW_DBG_CNT_BASE 0x439A500ull
+#define DCORE1_SRAM3_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM3_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM3_DBG_CNT_L_LBW_DBG_CNT_BASE 0x439A600ull
+#define DCORE1_SRAM3_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM3_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x439A700ull
+#define DCORE1_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x439A780ull
+#define DCORE1_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x439A800ull
+#define DCORE1_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x439A880ull
+#define DCORE1_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x439A900ull
+#define DCORE1_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x439A980ull
+#define DCORE1_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x439AA00ull
+#define DCORE1_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x439AA80ull
+#define DCORE1_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE1_SRAM3_DBG_CNT_SPECIAL_BASE 0x439AE80ull
+#define DCORE1_SRAM3_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM3_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE1_SRAM4_BANK_BASE 0x43A0000ull
+#define DCORE1_SRAM4_BANK_MAX_OFFSET 0x1000
+#define DCORE1_SRAM4_BANK_SECTION 0xE800
+#define mmDCORE1_SRAM4_BANK_SPECIAL_BASE 0x43A0E80ull
+#define DCORE1_SRAM4_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM4_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM4_RTR_BASE 0x43A1000ull
+#define DCORE1_SRAM4_RTR_MAX_OFFSET 0x1000
+#define DCORE1_SRAM4_RTR_SECTION 0xE800
+#define mmDCORE1_SRAM4_RTR_SPECIAL_BASE 0x43A1E80ull
+#define DCORE1_SRAM4_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM4_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM4_DBG_CNT_N_HBW_DBG_CNT_BASE 0x43A2000ull
+#define DCORE1_SRAM4_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM4_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM4_DBG_CNT_S_HBW_DBG_CNT_BASE 0x43A2100ull
+#define DCORE1_SRAM4_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM4_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x43A2200ull
+#define DCORE1_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x43A2300ull
+#define DCORE1_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM4_DBG_CNT_N_LBW_DBG_CNT_BASE 0x43A2400ull
+#define DCORE1_SRAM4_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM4_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM4_DBG_CNT_S_LBW_DBG_CNT_BASE 0x43A2500ull
+#define DCORE1_SRAM4_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM4_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM4_DBG_CNT_L_LBW_DBG_CNT_BASE 0x43A2600ull
+#define DCORE1_SRAM4_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM4_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x43A2700ull
+#define DCORE1_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x43A2780ull
+#define DCORE1_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x43A2800ull
+#define DCORE1_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x43A2880ull
+#define DCORE1_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x43A2900ull
+#define DCORE1_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x43A2980ull
+#define DCORE1_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x43A2A00ull
+#define DCORE1_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x43A2A80ull
+#define DCORE1_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE1_SRAM4_DBG_CNT_SPECIAL_BASE 0x43A2E80ull
+#define DCORE1_SRAM4_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM4_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE1_SRAM5_BANK_BASE 0x43A8000ull
+#define DCORE1_SRAM5_BANK_MAX_OFFSET 0x1000
+#define DCORE1_SRAM5_BANK_SECTION 0xE800
+#define mmDCORE1_SRAM5_BANK_SPECIAL_BASE 0x43A8E80ull
+#define DCORE1_SRAM5_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM5_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM5_RTR_BASE 0x43A9000ull
+#define DCORE1_SRAM5_RTR_MAX_OFFSET 0x1000
+#define DCORE1_SRAM5_RTR_SECTION 0xE800
+#define mmDCORE1_SRAM5_RTR_SPECIAL_BASE 0x43A9E80ull
+#define DCORE1_SRAM5_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM5_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM5_DBG_CNT_N_HBW_DBG_CNT_BASE 0x43AA000ull
+#define DCORE1_SRAM5_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM5_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM5_DBG_CNT_S_HBW_DBG_CNT_BASE 0x43AA100ull
+#define DCORE1_SRAM5_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM5_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x43AA200ull
+#define DCORE1_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x43AA300ull
+#define DCORE1_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM5_DBG_CNT_N_LBW_DBG_CNT_BASE 0x43AA400ull
+#define DCORE1_SRAM5_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM5_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM5_DBG_CNT_S_LBW_DBG_CNT_BASE 0x43AA500ull
+#define DCORE1_SRAM5_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM5_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM5_DBG_CNT_L_LBW_DBG_CNT_BASE 0x43AA600ull
+#define DCORE1_SRAM5_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM5_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x43AA700ull
+#define DCORE1_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x43AA780ull
+#define DCORE1_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x43AA800ull
+#define DCORE1_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x43AA880ull
+#define DCORE1_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x43AA900ull
+#define DCORE1_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x43AA980ull
+#define DCORE1_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x43AAA00ull
+#define DCORE1_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x43AAA80ull
+#define DCORE1_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE1_SRAM5_DBG_CNT_SPECIAL_BASE 0x43AAE80ull
+#define DCORE1_SRAM5_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM5_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE1_SRAM6_BANK_BASE 0x43B0000ull
+#define DCORE1_SRAM6_BANK_MAX_OFFSET 0x1000
+#define DCORE1_SRAM6_BANK_SECTION 0xE800
+#define mmDCORE1_SRAM6_BANK_SPECIAL_BASE 0x43B0E80ull
+#define DCORE1_SRAM6_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM6_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM6_RTR_BASE 0x43B1000ull
+#define DCORE1_SRAM6_RTR_MAX_OFFSET 0x1000
+#define DCORE1_SRAM6_RTR_SECTION 0xE800
+#define mmDCORE1_SRAM6_RTR_SPECIAL_BASE 0x43B1E80ull
+#define DCORE1_SRAM6_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM6_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM6_DBG_CNT_N_HBW_DBG_CNT_BASE 0x43B2000ull
+#define DCORE1_SRAM6_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM6_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM6_DBG_CNT_S_HBW_DBG_CNT_BASE 0x43B2100ull
+#define DCORE1_SRAM6_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM6_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x43B2200ull
+#define DCORE1_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x43B2300ull
+#define DCORE1_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM6_DBG_CNT_N_LBW_DBG_CNT_BASE 0x43B2400ull
+#define DCORE1_SRAM6_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM6_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM6_DBG_CNT_S_LBW_DBG_CNT_BASE 0x43B2500ull
+#define DCORE1_SRAM6_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM6_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM6_DBG_CNT_L_LBW_DBG_CNT_BASE 0x43B2600ull
+#define DCORE1_SRAM6_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM6_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x43B2700ull
+#define DCORE1_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x43B2780ull
+#define DCORE1_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x43B2800ull
+#define DCORE1_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x43B2880ull
+#define DCORE1_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x43B2900ull
+#define DCORE1_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x43B2980ull
+#define DCORE1_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x43B2A00ull
+#define DCORE1_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x43B2A80ull
+#define DCORE1_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE1_SRAM6_DBG_CNT_SPECIAL_BASE 0x43B2E80ull
+#define DCORE1_SRAM6_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM6_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE1_SRAM7_BANK_BASE 0x43B8000ull
+#define DCORE1_SRAM7_BANK_MAX_OFFSET 0x1000
+#define DCORE1_SRAM7_BANK_SECTION 0xE800
+#define mmDCORE1_SRAM7_BANK_SPECIAL_BASE 0x43B8E80ull
+#define DCORE1_SRAM7_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM7_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM7_RTR_BASE 0x43B9000ull
+#define DCORE1_SRAM7_RTR_MAX_OFFSET 0x1000
+#define DCORE1_SRAM7_RTR_SECTION 0xE800
+#define mmDCORE1_SRAM7_RTR_SPECIAL_BASE 0x43B9E80ull
+#define DCORE1_SRAM7_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM7_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE1_SRAM7_DBG_CNT_N_HBW_DBG_CNT_BASE 0x43BA000ull
+#define DCORE1_SRAM7_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM7_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM7_DBG_CNT_S_HBW_DBG_CNT_BASE 0x43BA100ull
+#define DCORE1_SRAM7_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM7_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x43BA200ull
+#define DCORE1_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x43BA300ull
+#define DCORE1_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM7_DBG_CNT_N_LBW_DBG_CNT_BASE 0x43BA400ull
+#define DCORE1_SRAM7_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM7_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM7_DBG_CNT_S_LBW_DBG_CNT_BASE 0x43BA500ull
+#define DCORE1_SRAM7_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM7_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM7_DBG_CNT_L_LBW_DBG_CNT_BASE 0x43BA600ull
+#define DCORE1_SRAM7_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE1_SRAM7_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE1_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x43BA700ull
+#define DCORE1_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x43BA780ull
+#define DCORE1_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x43BA800ull
+#define DCORE1_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x43BA880ull
+#define DCORE1_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x43BA900ull
+#define DCORE1_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x43BA980ull
+#define DCORE1_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x43BAA00ull
+#define DCORE1_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE1_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x43BAA80ull
+#define DCORE1_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE1_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE1_SRAM7_DBG_CNT_SPECIAL_BASE 0x43BAE80ull
+#define DCORE1_SRAM7_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_SRAM7_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE1_EDMA0_QM_DCCM_BASE 0x43C0000ull
+#define DCORE1_EDMA0_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_EDMA0_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_ARC_AUX_BASE 0x43C8000ull
+#define DCORE1_EDMA0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE1_EDMA0_QM_ARC_AUX_SPECIAL_BASE 0x43C8E80ull
+#define DCORE1_EDMA0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_EDMA0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_EDMA0_QM_BASE 0x43CA000ull
+#define DCORE1_EDMA0_QM_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_QM_SECTION 0x9000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x43CA900ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x43CA908ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x43CA910ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x43CA918ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x43CA920ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x43CA928ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x43CA930ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x43CA938ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x43CA940ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x43CA948ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x43CA950ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x43CA958ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x43CA960ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x43CA968ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x43CA970ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x43CA978ull
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_EDMA0_QM_AXUSER_SECURED_BASE 0x43CAB00ull
+#define DCORE1_EDMA0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_EDMA0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_AXUSER_NONSECURED_BASE 0x43CAB80ull
+#define DCORE1_EDMA0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_EDMA0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_DBG_HBW_BASE 0x43CAC00ull
+#define DCORE1_EDMA0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_EDMA0_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_EDMA0_QM_DBG_LBW_BASE 0x43CAC80ull
+#define DCORE1_EDMA0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_EDMA0_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_EDMA0_QM_CGM_BASE 0x43CAD80ull
+#define DCORE1_EDMA0_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_EDMA0_QM_CGM_SECTION 0x1000
+#define mmDCORE1_EDMA0_QM_SPECIAL_BASE 0x43CAE80ull
+#define DCORE1_EDMA0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_EDMA0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_EDMA0_CORE_BASE 0x43CB000ull
+#define DCORE1_EDMA0_CORE_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_CORE_SECTION 0x8000
+#define mmDCORE1_EDMA0_CORE_CTX_AXUSER_BASE 0x43CB800ull
+#define DCORE1_EDMA0_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_EDMA0_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmDCORE1_EDMA0_CORE_CTX_BASE 0x43CB860ull
+#define DCORE1_EDMA0_CORE_CTX_MAX_OFFSET 0x9000
+#define DCORE1_EDMA0_CORE_CTX_SECTION 0x5A00
+#define mmDCORE1_EDMA0_CORE_KDMA_CGM_BASE 0x43CBE00ull
+#define DCORE1_EDMA0_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define DCORE1_EDMA0_CORE_KDMA_CGM_SECTION 0x8000
+#define mmDCORE1_EDMA0_CORE_SPECIAL_BASE 0x43CBE80ull
+#define DCORE1_EDMA0_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_EDMA0_CORE_SPECIAL_SECTION 0x1800
+#define mmDCORE1_EDMA0_MSTR_IF_RR_SHRD_HBW_BASE 0x43CC000ull
+#define DCORE1_EDMA0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_EDMA0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_EDMA0_MSTR_IF_RR_PRVT_HBW_BASE 0x43CC200ull
+#define DCORE1_EDMA0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_EDMA0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_EDMA0_MSTR_IF_RR_SHRD_LBW_BASE 0x43CC400ull
+#define DCORE1_EDMA0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_EDMA0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_EDMA0_MSTR_IF_RR_PRVT_LBW_BASE 0x43CC600ull
+#define DCORE1_EDMA0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_EDMA0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_EDMA0_MSTR_IF_E2E_CRDT_BASE 0x43CC800ull
+#define DCORE1_EDMA0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_EDMA0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_EDMA0_MSTR_IF_AXUSER_BASE 0x43CCA80ull
+#define DCORE1_EDMA0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_EDMA0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_EDMA0_MSTR_IF_DBG_HBW_BASE 0x43CCB00ull
+#define DCORE1_EDMA0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_EDMA0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_EDMA0_MSTR_IF_DBG_LBW_BASE 0x43CCB80ull
+#define DCORE1_EDMA0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_EDMA0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_EDMA0_MSTR_IF_CORE_HBW_BASE 0x43CCC00ull
+#define DCORE1_EDMA0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_EDMA0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_EDMA0_MSTR_IF_CORE_LBW_BASE 0x43CCD80ull
+#define DCORE1_EDMA0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_EDMA0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_EDMA0_MSTR_IF_SPECIAL_BASE 0x43CCE80ull
+#define DCORE1_EDMA0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_EDMA0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE1_EDMA1_QM_DCCM_BASE 0x43D0000ull
+#define DCORE1_EDMA1_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE1_EDMA1_QM_DCCM_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_ARC_AUX_BASE 0x43D8000ull
+#define DCORE1_EDMA1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE1_EDMA1_QM_ARC_AUX_SPECIAL_BASE 0x43D8E80ull
+#define DCORE1_EDMA1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_EDMA1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE1_EDMA1_QM_BASE 0x43DA000ull
+#define DCORE1_EDMA1_QM_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_QM_SECTION 0x9000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x43DA900ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x43DA908ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x43DA910ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x43DA918ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x43DA920ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x43DA928ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x43DA930ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x43DA938ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x43DA940ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x43DA948ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x43DA950ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x43DA958ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x43DA960ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x43DA968ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x43DA970ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x43DA978ull
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE1_EDMA1_QM_AXUSER_SECURED_BASE 0x43DAB00ull
+#define DCORE1_EDMA1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE1_EDMA1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_AXUSER_NONSECURED_BASE 0x43DAB80ull
+#define DCORE1_EDMA1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE1_EDMA1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_DBG_HBW_BASE 0x43DAC00ull
+#define DCORE1_EDMA1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_EDMA1_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_EDMA1_QM_DBG_LBW_BASE 0x43DAC80ull
+#define DCORE1_EDMA1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_EDMA1_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE1_EDMA1_QM_CGM_BASE 0x43DAD80ull
+#define DCORE1_EDMA1_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE1_EDMA1_QM_CGM_SECTION 0x1000
+#define mmDCORE1_EDMA1_QM_SPECIAL_BASE 0x43DAE80ull
+#define DCORE1_EDMA1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_EDMA1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE1_EDMA1_CORE_BASE 0x43DB000ull
+#define DCORE1_EDMA1_CORE_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_CORE_SECTION 0x8000
+#define mmDCORE1_EDMA1_CORE_CTX_AXUSER_BASE 0x43DB800ull
+#define DCORE1_EDMA1_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_EDMA1_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmDCORE1_EDMA1_CORE_CTX_BASE 0x43DB860ull
+#define DCORE1_EDMA1_CORE_CTX_MAX_OFFSET 0x9000
+#define DCORE1_EDMA1_CORE_CTX_SECTION 0x5A00
+#define mmDCORE1_EDMA1_CORE_KDMA_CGM_BASE 0x43DBE00ull
+#define DCORE1_EDMA1_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define DCORE1_EDMA1_CORE_KDMA_CGM_SECTION 0x8000
+#define mmDCORE1_EDMA1_CORE_SPECIAL_BASE 0x43DBE80ull
+#define DCORE1_EDMA1_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_EDMA1_CORE_SPECIAL_SECTION 0x1800
+#define mmDCORE1_EDMA1_MSTR_IF_RR_SHRD_HBW_BASE 0x43DC000ull
+#define DCORE1_EDMA1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_EDMA1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_EDMA1_MSTR_IF_RR_PRVT_HBW_BASE 0x43DC200ull
+#define DCORE1_EDMA1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_EDMA1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_EDMA1_MSTR_IF_RR_SHRD_LBW_BASE 0x43DC400ull
+#define DCORE1_EDMA1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_EDMA1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_EDMA1_MSTR_IF_RR_PRVT_LBW_BASE 0x43DC600ull
+#define DCORE1_EDMA1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_EDMA1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_EDMA1_MSTR_IF_E2E_CRDT_BASE 0x43DC800ull
+#define DCORE1_EDMA1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_EDMA1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_EDMA1_MSTR_IF_AXUSER_BASE 0x43DCA80ull
+#define DCORE1_EDMA1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_EDMA1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_EDMA1_MSTR_IF_DBG_HBW_BASE 0x43DCB00ull
+#define DCORE1_EDMA1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_EDMA1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_EDMA1_MSTR_IF_DBG_LBW_BASE 0x43DCB80ull
+#define DCORE1_EDMA1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_EDMA1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_EDMA1_MSTR_IF_CORE_HBW_BASE 0x43DCC00ull
+#define DCORE1_EDMA1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_EDMA1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_EDMA1_MSTR_IF_CORE_LBW_BASE 0x43DCD80ull
+#define DCORE1_EDMA1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_EDMA1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_EDMA1_MSTR_IF_SPECIAL_BASE 0x43DCE80ull
+#define DCORE1_EDMA1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_EDMA1_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE1_DEC0_CMD_BASE 0x43E0000ull
+#define DCORE1_DEC0_CMD_MAX_OFFSET 0x1100
+#define DCORE1_DEC0_CMD_SECTION 0x1000
+#define mmDCORE1_DEC0_VSI_BASE 0x43E1000ull
+#define DCORE1_DEC0_VSI_MAX_OFFSET 0x6FC0
+#define DCORE1_DEC0_VSI_SECTION 0x1000
+#define mmDCORE1_DEC0_L2C_BASE 0x43E2000ull
+#define DCORE1_DEC0_L2C_MAX_OFFSET 0x39C0
+#define DCORE1_DEC0_L2C_SECTION 0x1000
+#define mmDCORE1_VDEC0_BRDG_CTRL_BASE 0x43E3000ull
+#define DCORE1_VDEC0_BRDG_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_BRDG_CTRL_SECTION 0x8000
+#define mmDCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x43E3800ull
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmDCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x43E3900ull
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmDCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x43E3A00ull
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmDCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x43E3B00ull
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmDCORE1_VDEC0_BRDG_CTRL_AXUSER_DEC_BASE 0x43E3C00ull
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define DCORE1_VDEC0_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmDCORE1_VDEC0_BRDG_CTRL_SPECIAL_BASE 0x43E3E80ull
+#define DCORE1_VDEC0_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_VDEC0_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_VDEC0_CTRL_BASE 0x43E4000ull
+#define DCORE1_VDEC0_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_CTRL_SECTION 0xE800
+#define mmDCORE1_VDEC0_CTRL_SPECIAL_BASE 0x43E4E80ull
+#define DCORE1_VDEC0_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_VDEC0_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_VDEC0_MSTR_IF_RR_SHRD_HBW_BASE 0x43E5000ull
+#define DCORE1_VDEC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_VDEC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_VDEC0_MSTR_IF_RR_PRVT_HBW_BASE 0x43E5200ull
+#define DCORE1_VDEC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_VDEC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_VDEC0_MSTR_IF_RR_SHRD_LBW_BASE 0x43E5400ull
+#define DCORE1_VDEC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_VDEC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_VDEC0_MSTR_IF_RR_PRVT_LBW_BASE 0x43E5600ull
+#define DCORE1_VDEC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_VDEC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_VDEC0_MSTR_IF_E2E_CRDT_BASE 0x43E5800ull
+#define DCORE1_VDEC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_VDEC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_VDEC0_MSTR_IF_AXUSER_BASE 0x43E5A80ull
+#define DCORE1_VDEC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_VDEC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_VDEC0_MSTR_IF_DBG_HBW_BASE 0x43E5B00ull
+#define DCORE1_VDEC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_VDEC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_VDEC0_MSTR_IF_DBG_LBW_BASE 0x43E5B80ull
+#define DCORE1_VDEC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_VDEC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_VDEC0_MSTR_IF_CORE_HBW_BASE 0x43E5C00ull
+#define DCORE1_VDEC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_VDEC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_VDEC0_MSTR_IF_CORE_LBW_BASE 0x43E5D80ull
+#define DCORE1_VDEC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_VDEC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_VDEC0_MSTR_IF_SPECIAL_BASE 0x43E5E80ull
+#define DCORE1_VDEC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_VDEC0_MSTR_IF_SPECIAL_SECTION 0xA180
+#define mmDCORE1_DEC1_CMD_BASE 0x43F0000ull
+#define DCORE1_DEC1_CMD_MAX_OFFSET 0x1100
+#define DCORE1_DEC1_CMD_SECTION 0x1000
+#define mmDCORE1_DEC1_VSI_BASE 0x43F1000ull
+#define DCORE1_DEC1_VSI_MAX_OFFSET 0x6FC0
+#define DCORE1_DEC1_VSI_SECTION 0x1000
+#define mmDCORE1_DEC1_L2C_BASE 0x43F2000ull
+#define DCORE1_DEC1_L2C_MAX_OFFSET 0x39C0
+#define DCORE1_DEC1_L2C_SECTION 0x1000
+#define mmDCORE1_VDEC1_BRDG_CTRL_BASE 0x43F3000ull
+#define DCORE1_VDEC1_BRDG_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_BRDG_CTRL_SECTION 0x8000
+#define mmDCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x43F3800ull
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmDCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x43F3900ull
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmDCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x43F3A00ull
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmDCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x43F3B00ull
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmDCORE1_VDEC1_BRDG_CTRL_AXUSER_DEC_BASE 0x43F3C00ull
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define DCORE1_VDEC1_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmDCORE1_VDEC1_BRDG_CTRL_SPECIAL_BASE 0x43F3E80ull
+#define DCORE1_VDEC1_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_VDEC1_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_VDEC1_CTRL_BASE 0x43F4000ull
+#define DCORE1_VDEC1_CTRL_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_CTRL_SECTION 0xE800
+#define mmDCORE1_VDEC1_CTRL_SPECIAL_BASE 0x43F4E80ull
+#define DCORE1_VDEC1_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_VDEC1_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE 0x43F5000ull
+#define DCORE1_VDEC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_VDEC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE1_VDEC1_MSTR_IF_RR_PRVT_HBW_BASE 0x43F5200ull
+#define DCORE1_VDEC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE1_VDEC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE1_VDEC1_MSTR_IF_RR_SHRD_LBW_BASE 0x43F5400ull
+#define DCORE1_VDEC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_VDEC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE1_VDEC1_MSTR_IF_RR_PRVT_LBW_BASE 0x43F5600ull
+#define DCORE1_VDEC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE1_VDEC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE1_VDEC1_MSTR_IF_E2E_CRDT_BASE 0x43F5800ull
+#define DCORE1_VDEC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE1_VDEC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE1_VDEC1_MSTR_IF_AXUSER_BASE 0x43F5A80ull
+#define DCORE1_VDEC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE1_VDEC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE1_VDEC1_MSTR_IF_DBG_HBW_BASE 0x43F5B00ull
+#define DCORE1_VDEC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE1_VDEC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE1_VDEC1_MSTR_IF_DBG_LBW_BASE 0x43F5B80ull
+#define DCORE1_VDEC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE1_VDEC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE1_VDEC1_MSTR_IF_CORE_HBW_BASE 0x43F5C00ull
+#define DCORE1_VDEC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE1_VDEC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE1_VDEC1_MSTR_IF_CORE_LBW_BASE 0x43F5D80ull
+#define DCORE1_VDEC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE1_VDEC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE1_VDEC1_MSTR_IF_SPECIAL_BASE 0x43F5E80ull
+#define DCORE1_VDEC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_VDEC1_MSTR_IF_SPECIAL_SECTION 0xA180
+#define mmDCORE2_TPC0_QM_DCCM_BASE 0x4400000ull
+#define DCORE2_TPC0_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC0_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_ARC_AUX_BASE 0x4408000ull
+#define DCORE2_TPC0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE2_TPC0_QM_ARC_AUX_SPECIAL_BASE 0x4408E80ull
+#define DCORE2_TPC0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC0_QM_BASE 0x440A000ull
+#define DCORE2_TPC0_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_QM_SECTION 0x9000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x440A900ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x440A908ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x440A910ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x440A918ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x440A920ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x440A928ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x440A930ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x440A938ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x440A940ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x440A948ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x440A950ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x440A958ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x440A960ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x440A968ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x440A970ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x440A978ull
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC0_QM_AXUSER_SECURED_BASE 0x440AB00ull
+#define DCORE2_TPC0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_AXUSER_NONSECURED_BASE 0x440AB80ull
+#define DCORE2_TPC0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_DBG_HBW_BASE 0x440AC00ull
+#define DCORE2_TPC0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC0_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC0_QM_DBG_LBW_BASE 0x440AC80ull
+#define DCORE2_TPC0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC0_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC0_QM_CGM_BASE 0x440AD80ull
+#define DCORE2_TPC0_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC0_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC0_QM_SPECIAL_BASE 0x440AE80ull
+#define DCORE2_TPC0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_0_BASE 0x440B000ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC0_CFG_BASE 0x440B000ull
+#define DCORE2_TPC0_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC0_CFG_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_1_BASE 0x440B050ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_2_BASE 0x440B0A0ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_3_BASE 0x440B0F0ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_4_BASE 0x440B140ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_5_BASE 0x440B190ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_6_BASE 0x440B1E0ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_7_BASE 0x440B230ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_8_BASE 0x440B280ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_9_BASE 0x440B2D0ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_10_BASE 0x440B320ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_11_BASE 0x440B370ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_12_BASE 0x440B3C0ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_13_BASE 0x440B410ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_14_BASE 0x440B460ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_TENSOR_15_BASE 0x440B4B0ull
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_KERNEL_SYNC_OBJECT_BASE 0x440B500ull
+#define DCORE2_TPC0_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC0_CFG_KERNEL_BASE 0x440B508ull
+#define DCORE2_TPC0_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC0_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_0_BASE 0x440B5DCull
+#define DCORE2_TPC0_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_1_BASE 0x440B62Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_2_BASE 0x440B67Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_3_BASE 0x440B6CCull
+#define DCORE2_TPC0_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_4_BASE 0x440B71Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_5_BASE 0x440B76Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_6_BASE 0x440B7BCull
+#define DCORE2_TPC0_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_7_BASE 0x440B80Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_8_BASE 0x440B85Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_9_BASE 0x440B8ACull
+#define DCORE2_TPC0_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_10_BASE 0x440B8FCull
+#define DCORE2_TPC0_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_11_BASE 0x440B94Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_12_BASE 0x440B99Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_13_BASE 0x440B9ECull
+#define DCORE2_TPC0_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_14_BASE 0x440BA3Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_TENSOR_15_BASE 0x440BA8Cull
+#define DCORE2_TPC0_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC0_CFG_QM_SYNC_OBJECT_BASE 0x440BADCull
+#define DCORE2_TPC0_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC0_CFG_QM_BASE 0x440BAE4ull
+#define DCORE2_TPC0_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC0_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC0_CFG_AXUSER_BASE 0x440BE00ull
+#define DCORE2_TPC0_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC0_CFG_SPECIAL_BASE 0x440BE80ull
+#define DCORE2_TPC0_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC0_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC0_MSTR_IF_RR_SHRD_HBW_BASE 0x440C000ull
+#define DCORE2_TPC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_TPC0_MSTR_IF_RR_PRVT_HBW_BASE 0x440C200ull
+#define DCORE2_TPC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_TPC0_MSTR_IF_RR_SHRD_LBW_BASE 0x440C400ull
+#define DCORE2_TPC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_TPC0_MSTR_IF_RR_PRVT_LBW_BASE 0x440C600ull
+#define DCORE2_TPC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_TPC0_MSTR_IF_E2E_CRDT_BASE 0x440C800ull
+#define DCORE2_TPC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_TPC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_TPC0_MSTR_IF_AXUSER_BASE 0x440CA80ull
+#define DCORE2_TPC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC0_MSTR_IF_DBG_HBW_BASE 0x440CB00ull
+#define DCORE2_TPC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC0_MSTR_IF_DBG_LBW_BASE 0x440CB80ull
+#define DCORE2_TPC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_TPC0_MSTR_IF_CORE_HBW_BASE 0x440CC00ull
+#define DCORE2_TPC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_TPC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_TPC0_MSTR_IF_CORE_LBW_BASE 0x440CD80ull
+#define DCORE2_TPC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_TPC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_TPC0_MSTR_IF_SPECIAL_BASE 0x440CE80ull
+#define DCORE2_TPC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE2_TPC1_QM_DCCM_BASE 0x4410000ull
+#define DCORE2_TPC1_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC1_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_ARC_AUX_BASE 0x4418000ull
+#define DCORE2_TPC1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE2_TPC1_QM_ARC_AUX_SPECIAL_BASE 0x4418E80ull
+#define DCORE2_TPC1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC1_QM_BASE 0x441A000ull
+#define DCORE2_TPC1_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_QM_SECTION 0x9000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x441A900ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x441A908ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x441A910ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x441A918ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x441A920ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x441A928ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x441A930ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x441A938ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x441A940ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x441A948ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x441A950ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x441A958ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x441A960ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x441A968ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x441A970ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x441A978ull
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC1_QM_AXUSER_SECURED_BASE 0x441AB00ull
+#define DCORE2_TPC1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_AXUSER_NONSECURED_BASE 0x441AB80ull
+#define DCORE2_TPC1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_DBG_HBW_BASE 0x441AC00ull
+#define DCORE2_TPC1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC1_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC1_QM_DBG_LBW_BASE 0x441AC80ull
+#define DCORE2_TPC1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC1_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC1_QM_CGM_BASE 0x441AD80ull
+#define DCORE2_TPC1_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC1_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC1_QM_SPECIAL_BASE 0x441AE80ull
+#define DCORE2_TPC1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_0_BASE 0x441B000ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC1_CFG_BASE 0x441B000ull
+#define DCORE2_TPC1_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC1_CFG_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_1_BASE 0x441B050ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_2_BASE 0x441B0A0ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_3_BASE 0x441B0F0ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_4_BASE 0x441B140ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_5_BASE 0x441B190ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_6_BASE 0x441B1E0ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_7_BASE 0x441B230ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_8_BASE 0x441B280ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_9_BASE 0x441B2D0ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_10_BASE 0x441B320ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_11_BASE 0x441B370ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_12_BASE 0x441B3C0ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_13_BASE 0x441B410ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_14_BASE 0x441B460ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_TENSOR_15_BASE 0x441B4B0ull
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_KERNEL_SYNC_OBJECT_BASE 0x441B500ull
+#define DCORE2_TPC1_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC1_CFG_KERNEL_BASE 0x441B508ull
+#define DCORE2_TPC1_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC1_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_0_BASE 0x441B5DCull
+#define DCORE2_TPC1_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_1_BASE 0x441B62Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_2_BASE 0x441B67Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_3_BASE 0x441B6CCull
+#define DCORE2_TPC1_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_4_BASE 0x441B71Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_5_BASE 0x441B76Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_6_BASE 0x441B7BCull
+#define DCORE2_TPC1_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_7_BASE 0x441B80Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_8_BASE 0x441B85Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_9_BASE 0x441B8ACull
+#define DCORE2_TPC1_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_10_BASE 0x441B8FCull
+#define DCORE2_TPC1_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_11_BASE 0x441B94Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_12_BASE 0x441B99Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_13_BASE 0x441B9ECull
+#define DCORE2_TPC1_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_14_BASE 0x441BA3Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_TENSOR_15_BASE 0x441BA8Cull
+#define DCORE2_TPC1_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC1_CFG_QM_SYNC_OBJECT_BASE 0x441BADCull
+#define DCORE2_TPC1_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC1_CFG_QM_BASE 0x441BAE4ull
+#define DCORE2_TPC1_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC1_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC1_CFG_AXUSER_BASE 0x441BE00ull
+#define DCORE2_TPC1_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC1_CFG_SPECIAL_BASE 0x441BE80ull
+#define DCORE2_TPC1_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC1_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC1_MSTR_IF_RR_SHRD_HBW_BASE 0x441C000ull
+#define DCORE2_TPC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_TPC1_MSTR_IF_RR_PRVT_HBW_BASE 0x441C200ull
+#define DCORE2_TPC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_TPC1_MSTR_IF_RR_SHRD_LBW_BASE 0x441C400ull
+#define DCORE2_TPC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_TPC1_MSTR_IF_RR_PRVT_LBW_BASE 0x441C600ull
+#define DCORE2_TPC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_TPC1_MSTR_IF_E2E_CRDT_BASE 0x441C800ull
+#define DCORE2_TPC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_TPC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_TPC1_MSTR_IF_AXUSER_BASE 0x441CA80ull
+#define DCORE2_TPC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC1_MSTR_IF_DBG_HBW_BASE 0x441CB00ull
+#define DCORE2_TPC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC1_MSTR_IF_DBG_LBW_BASE 0x441CB80ull
+#define DCORE2_TPC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_TPC1_MSTR_IF_CORE_HBW_BASE 0x441CC00ull
+#define DCORE2_TPC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_TPC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_TPC1_MSTR_IF_CORE_LBW_BASE 0x441CD80ull
+#define DCORE2_TPC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_TPC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_TPC1_MSTR_IF_SPECIAL_BASE 0x441CE80ull
+#define DCORE2_TPC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC1_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE2_TPC2_QM_DCCM_BASE 0x4420000ull
+#define DCORE2_TPC2_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC2_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_ARC_AUX_BASE 0x4428000ull
+#define DCORE2_TPC2_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE2_TPC2_QM_ARC_AUX_SPECIAL_BASE 0x4428E80ull
+#define DCORE2_TPC2_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC2_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC2_QM_BASE 0x442A000ull
+#define DCORE2_TPC2_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_QM_SECTION 0x9000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR0_BASE 0x442A900ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR1_BASE 0x442A908ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR2_BASE 0x442A910ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR3_BASE 0x442A918ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR4_BASE 0x442A920ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR5_BASE 0x442A928ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR6_BASE 0x442A930ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR7_BASE 0x442A938ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR8_BASE 0x442A940ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR9_BASE 0x442A948ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR10_BASE 0x442A950ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR11_BASE 0x442A958ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR12_BASE 0x442A960ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR13_BASE 0x442A968ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR14_BASE 0x442A970ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR15_BASE 0x442A978ull
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC2_QM_AXUSER_SECURED_BASE 0x442AB00ull
+#define DCORE2_TPC2_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_AXUSER_NONSECURED_BASE 0x442AB80ull
+#define DCORE2_TPC2_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_DBG_HBW_BASE 0x442AC00ull
+#define DCORE2_TPC2_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC2_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC2_QM_DBG_LBW_BASE 0x442AC80ull
+#define DCORE2_TPC2_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC2_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC2_QM_CGM_BASE 0x442AD80ull
+#define DCORE2_TPC2_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC2_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC2_QM_SPECIAL_BASE 0x442AE80ull
+#define DCORE2_TPC2_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC2_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_0_BASE 0x442B000ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC2_CFG_BASE 0x442B000ull
+#define DCORE2_TPC2_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC2_CFG_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_1_BASE 0x442B050ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_2_BASE 0x442B0A0ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_3_BASE 0x442B0F0ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_4_BASE 0x442B140ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_5_BASE 0x442B190ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_6_BASE 0x442B1E0ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_7_BASE 0x442B230ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_8_BASE 0x442B280ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_9_BASE 0x442B2D0ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_10_BASE 0x442B320ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_11_BASE 0x442B370ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_12_BASE 0x442B3C0ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_13_BASE 0x442B410ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_14_BASE 0x442B460ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_TENSOR_15_BASE 0x442B4B0ull
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_KERNEL_SYNC_OBJECT_BASE 0x442B500ull
+#define DCORE2_TPC2_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC2_CFG_KERNEL_BASE 0x442B508ull
+#define DCORE2_TPC2_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC2_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_0_BASE 0x442B5DCull
+#define DCORE2_TPC2_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_1_BASE 0x442B62Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_2_BASE 0x442B67Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_3_BASE 0x442B6CCull
+#define DCORE2_TPC2_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_4_BASE 0x442B71Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_5_BASE 0x442B76Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_6_BASE 0x442B7BCull
+#define DCORE2_TPC2_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_7_BASE 0x442B80Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_8_BASE 0x442B85Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_9_BASE 0x442B8ACull
+#define DCORE2_TPC2_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_10_BASE 0x442B8FCull
+#define DCORE2_TPC2_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_11_BASE 0x442B94Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_12_BASE 0x442B99Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_13_BASE 0x442B9ECull
+#define DCORE2_TPC2_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_14_BASE 0x442BA3Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_TENSOR_15_BASE 0x442BA8Cull
+#define DCORE2_TPC2_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC2_CFG_QM_SYNC_OBJECT_BASE 0x442BADCull
+#define DCORE2_TPC2_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC2_CFG_QM_BASE 0x442BAE4ull
+#define DCORE2_TPC2_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC2_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC2_CFG_AXUSER_BASE 0x442BE00ull
+#define DCORE2_TPC2_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC2_CFG_SPECIAL_BASE 0x442BE80ull
+#define DCORE2_TPC2_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC2_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC2_MSTR_IF_RR_SHRD_HBW_BASE 0x442C000ull
+#define DCORE2_TPC2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_TPC2_MSTR_IF_RR_PRVT_HBW_BASE 0x442C200ull
+#define DCORE2_TPC2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_TPC2_MSTR_IF_RR_SHRD_LBW_BASE 0x442C400ull
+#define DCORE2_TPC2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_TPC2_MSTR_IF_RR_PRVT_LBW_BASE 0x442C600ull
+#define DCORE2_TPC2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_TPC2_MSTR_IF_E2E_CRDT_BASE 0x442C800ull
+#define DCORE2_TPC2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_TPC2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_TPC2_MSTR_IF_AXUSER_BASE 0x442CA80ull
+#define DCORE2_TPC2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC2_MSTR_IF_DBG_HBW_BASE 0x442CB00ull
+#define DCORE2_TPC2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC2_MSTR_IF_DBG_LBW_BASE 0x442CB80ull
+#define DCORE2_TPC2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_TPC2_MSTR_IF_CORE_HBW_BASE 0x442CC00ull
+#define DCORE2_TPC2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_TPC2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_TPC2_MSTR_IF_CORE_LBW_BASE 0x442CD80ull
+#define DCORE2_TPC2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_TPC2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_TPC2_MSTR_IF_SPECIAL_BASE 0x442CE80ull
+#define DCORE2_TPC2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC2_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE2_TPC3_QM_DCCM_BASE 0x4430000ull
+#define DCORE2_TPC3_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC3_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_ARC_AUX_BASE 0x4438000ull
+#define DCORE2_TPC3_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE2_TPC3_QM_ARC_AUX_SPECIAL_BASE 0x4438E80ull
+#define DCORE2_TPC3_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC3_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC3_QM_BASE 0x443A000ull
+#define DCORE2_TPC3_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_QM_SECTION 0x9000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR0_BASE 0x443A900ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR1_BASE 0x443A908ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR2_BASE 0x443A910ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR3_BASE 0x443A918ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR4_BASE 0x443A920ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR5_BASE 0x443A928ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR6_BASE 0x443A930ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR7_BASE 0x443A938ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR8_BASE 0x443A940ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR9_BASE 0x443A948ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR10_BASE 0x443A950ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR11_BASE 0x443A958ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR12_BASE 0x443A960ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR13_BASE 0x443A968ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR14_BASE 0x443A970ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR15_BASE 0x443A978ull
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC3_QM_AXUSER_SECURED_BASE 0x443AB00ull
+#define DCORE2_TPC3_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_AXUSER_NONSECURED_BASE 0x443AB80ull
+#define DCORE2_TPC3_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_DBG_HBW_BASE 0x443AC00ull
+#define DCORE2_TPC3_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC3_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC3_QM_DBG_LBW_BASE 0x443AC80ull
+#define DCORE2_TPC3_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC3_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC3_QM_CGM_BASE 0x443AD80ull
+#define DCORE2_TPC3_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC3_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC3_QM_SPECIAL_BASE 0x443AE80ull
+#define DCORE2_TPC3_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC3_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_0_BASE 0x443B000ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC3_CFG_BASE 0x443B000ull
+#define DCORE2_TPC3_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC3_CFG_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_1_BASE 0x443B050ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_2_BASE 0x443B0A0ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_3_BASE 0x443B0F0ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_4_BASE 0x443B140ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_5_BASE 0x443B190ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_6_BASE 0x443B1E0ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_7_BASE 0x443B230ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_8_BASE 0x443B280ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_9_BASE 0x443B2D0ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_10_BASE 0x443B320ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_11_BASE 0x443B370ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_12_BASE 0x443B3C0ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_13_BASE 0x443B410ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_14_BASE 0x443B460ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_TENSOR_15_BASE 0x443B4B0ull
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_KERNEL_SYNC_OBJECT_BASE 0x443B500ull
+#define DCORE2_TPC3_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC3_CFG_KERNEL_BASE 0x443B508ull
+#define DCORE2_TPC3_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC3_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_0_BASE 0x443B5DCull
+#define DCORE2_TPC3_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_1_BASE 0x443B62Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_2_BASE 0x443B67Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_3_BASE 0x443B6CCull
+#define DCORE2_TPC3_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_4_BASE 0x443B71Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_5_BASE 0x443B76Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_6_BASE 0x443B7BCull
+#define DCORE2_TPC3_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_7_BASE 0x443B80Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_8_BASE 0x443B85Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_9_BASE 0x443B8ACull
+#define DCORE2_TPC3_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_10_BASE 0x443B8FCull
+#define DCORE2_TPC3_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_11_BASE 0x443B94Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_12_BASE 0x443B99Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_13_BASE 0x443B9ECull
+#define DCORE2_TPC3_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_14_BASE 0x443BA3Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_TENSOR_15_BASE 0x443BA8Cull
+#define DCORE2_TPC3_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC3_CFG_QM_SYNC_OBJECT_BASE 0x443BADCull
+#define DCORE2_TPC3_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC3_CFG_QM_BASE 0x443BAE4ull
+#define DCORE2_TPC3_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC3_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC3_CFG_AXUSER_BASE 0x443BE00ull
+#define DCORE2_TPC3_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC3_CFG_SPECIAL_BASE 0x443BE80ull
+#define DCORE2_TPC3_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC3_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC3_MSTR_IF_RR_SHRD_HBW_BASE 0x443C000ull
+#define DCORE2_TPC3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_TPC3_MSTR_IF_RR_PRVT_HBW_BASE 0x443C200ull
+#define DCORE2_TPC3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_TPC3_MSTR_IF_RR_SHRD_LBW_BASE 0x443C400ull
+#define DCORE2_TPC3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_TPC3_MSTR_IF_RR_PRVT_LBW_BASE 0x443C600ull
+#define DCORE2_TPC3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_TPC3_MSTR_IF_E2E_CRDT_BASE 0x443C800ull
+#define DCORE2_TPC3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_TPC3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_TPC3_MSTR_IF_AXUSER_BASE 0x443CA80ull
+#define DCORE2_TPC3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC3_MSTR_IF_DBG_HBW_BASE 0x443CB00ull
+#define DCORE2_TPC3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC3_MSTR_IF_DBG_LBW_BASE 0x443CB80ull
+#define DCORE2_TPC3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_TPC3_MSTR_IF_CORE_HBW_BASE 0x443CC00ull
+#define DCORE2_TPC3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_TPC3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_TPC3_MSTR_IF_CORE_LBW_BASE 0x443CD80ull
+#define DCORE2_TPC3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_TPC3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_TPC3_MSTR_IF_SPECIAL_BASE 0x443CE80ull
+#define DCORE2_TPC3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC3_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE2_TPC4_QM_DCCM_BASE 0x4440000ull
+#define DCORE2_TPC4_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC4_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_ARC_AUX_BASE 0x4448000ull
+#define DCORE2_TPC4_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE2_TPC4_QM_ARC_AUX_SPECIAL_BASE 0x4448E80ull
+#define DCORE2_TPC4_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC4_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC4_QM_BASE 0x444A000ull
+#define DCORE2_TPC4_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_QM_SECTION 0x9000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR0_BASE 0x444A900ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR1_BASE 0x444A908ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR2_BASE 0x444A910ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR3_BASE 0x444A918ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR4_BASE 0x444A920ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR5_BASE 0x444A928ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR6_BASE 0x444A930ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR7_BASE 0x444A938ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR8_BASE 0x444A940ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR9_BASE 0x444A948ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR10_BASE 0x444A950ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR11_BASE 0x444A958ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR12_BASE 0x444A960ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR13_BASE 0x444A968ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR14_BASE 0x444A970ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR15_BASE 0x444A978ull
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC4_QM_AXUSER_SECURED_BASE 0x444AB00ull
+#define DCORE2_TPC4_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_AXUSER_NONSECURED_BASE 0x444AB80ull
+#define DCORE2_TPC4_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_DBG_HBW_BASE 0x444AC00ull
+#define DCORE2_TPC4_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC4_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC4_QM_DBG_LBW_BASE 0x444AC80ull
+#define DCORE2_TPC4_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC4_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC4_QM_CGM_BASE 0x444AD80ull
+#define DCORE2_TPC4_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC4_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC4_QM_SPECIAL_BASE 0x444AE80ull
+#define DCORE2_TPC4_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC4_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_0_BASE 0x444B000ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC4_CFG_BASE 0x444B000ull
+#define DCORE2_TPC4_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC4_CFG_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_1_BASE 0x444B050ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_2_BASE 0x444B0A0ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_3_BASE 0x444B0F0ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_4_BASE 0x444B140ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_5_BASE 0x444B190ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_6_BASE 0x444B1E0ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_7_BASE 0x444B230ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_8_BASE 0x444B280ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_9_BASE 0x444B2D0ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_10_BASE 0x444B320ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_11_BASE 0x444B370ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_12_BASE 0x444B3C0ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_13_BASE 0x444B410ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_14_BASE 0x444B460ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_TENSOR_15_BASE 0x444B4B0ull
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_KERNEL_SYNC_OBJECT_BASE 0x444B500ull
+#define DCORE2_TPC4_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC4_CFG_KERNEL_BASE 0x444B508ull
+#define DCORE2_TPC4_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC4_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_0_BASE 0x444B5DCull
+#define DCORE2_TPC4_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_1_BASE 0x444B62Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_2_BASE 0x444B67Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_3_BASE 0x444B6CCull
+#define DCORE2_TPC4_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_4_BASE 0x444B71Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_5_BASE 0x444B76Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_6_BASE 0x444B7BCull
+#define DCORE2_TPC4_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_7_BASE 0x444B80Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_8_BASE 0x444B85Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_9_BASE 0x444B8ACull
+#define DCORE2_TPC4_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_10_BASE 0x444B8FCull
+#define DCORE2_TPC4_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_11_BASE 0x444B94Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_12_BASE 0x444B99Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_13_BASE 0x444B9ECull
+#define DCORE2_TPC4_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_14_BASE 0x444BA3Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_TENSOR_15_BASE 0x444BA8Cull
+#define DCORE2_TPC4_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC4_CFG_QM_SYNC_OBJECT_BASE 0x444BADCull
+#define DCORE2_TPC4_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC4_CFG_QM_BASE 0x444BAE4ull
+#define DCORE2_TPC4_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC4_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC4_CFG_AXUSER_BASE 0x444BE00ull
+#define DCORE2_TPC4_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC4_CFG_SPECIAL_BASE 0x444BE80ull
+#define DCORE2_TPC4_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC4_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC4_MSTR_IF_RR_SHRD_HBW_BASE 0x444C000ull
+#define DCORE2_TPC4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_TPC4_MSTR_IF_RR_PRVT_HBW_BASE 0x444C200ull
+#define DCORE2_TPC4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_TPC4_MSTR_IF_RR_SHRD_LBW_BASE 0x444C400ull
+#define DCORE2_TPC4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_TPC4_MSTR_IF_RR_PRVT_LBW_BASE 0x444C600ull
+#define DCORE2_TPC4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_TPC4_MSTR_IF_E2E_CRDT_BASE 0x444C800ull
+#define DCORE2_TPC4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_TPC4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_TPC4_MSTR_IF_AXUSER_BASE 0x444CA80ull
+#define DCORE2_TPC4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC4_MSTR_IF_DBG_HBW_BASE 0x444CB00ull
+#define DCORE2_TPC4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC4_MSTR_IF_DBG_LBW_BASE 0x444CB80ull
+#define DCORE2_TPC4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_TPC4_MSTR_IF_CORE_HBW_BASE 0x444CC00ull
+#define DCORE2_TPC4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_TPC4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_TPC4_MSTR_IF_CORE_LBW_BASE 0x444CD80ull
+#define DCORE2_TPC4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_TPC4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_TPC4_MSTR_IF_SPECIAL_BASE 0x444CE80ull
+#define DCORE2_TPC4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC4_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE2_TPC5_QM_DCCM_BASE 0x4450000ull
+#define DCORE2_TPC5_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_TPC5_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_ARC_AUX_BASE 0x4458000ull
+#define DCORE2_TPC5_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE2_TPC5_QM_ARC_AUX_SPECIAL_BASE 0x4458E80ull
+#define DCORE2_TPC5_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC5_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_TPC5_QM_BASE 0x445A000ull
+#define DCORE2_TPC5_QM_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_QM_SECTION 0x9000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR0_BASE 0x445A900ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR1_BASE 0x445A908ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR2_BASE 0x445A910ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR3_BASE 0x445A918ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR4_BASE 0x445A920ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR5_BASE 0x445A928ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR6_BASE 0x445A930ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR7_BASE 0x445A938ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR8_BASE 0x445A940ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR9_BASE 0x445A948ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR10_BASE 0x445A950ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR11_BASE 0x445A958ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR12_BASE 0x445A960ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR13_BASE 0x445A968ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR14_BASE 0x445A970ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR15_BASE 0x445A978ull
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_TPC5_QM_AXUSER_SECURED_BASE 0x445AB00ull
+#define DCORE2_TPC5_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_AXUSER_NONSECURED_BASE 0x445AB80ull
+#define DCORE2_TPC5_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_DBG_HBW_BASE 0x445AC00ull
+#define DCORE2_TPC5_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC5_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC5_QM_DBG_LBW_BASE 0x445AC80ull
+#define DCORE2_TPC5_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC5_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_TPC5_QM_CGM_BASE 0x445AD80ull
+#define DCORE2_TPC5_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_TPC5_QM_CGM_SECTION 0x1000
+#define mmDCORE2_TPC5_QM_SPECIAL_BASE 0x445AE80ull
+#define DCORE2_TPC5_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC5_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_0_BASE 0x445B000ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC5_CFG_BASE 0x445B000ull
+#define DCORE2_TPC5_CFG_MAX_OFFSET 0x1000
+#define DCORE2_TPC5_CFG_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_1_BASE 0x445B050ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_2_BASE 0x445B0A0ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_3_BASE 0x445B0F0ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_4_BASE 0x445B140ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_5_BASE 0x445B190ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_6_BASE 0x445B1E0ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_7_BASE 0x445B230ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_8_BASE 0x445B280ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_9_BASE 0x445B2D0ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_10_BASE 0x445B320ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_11_BASE 0x445B370ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_12_BASE 0x445B3C0ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_13_BASE 0x445B410ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_14_BASE 0x445B460ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_TENSOR_15_BASE 0x445B4B0ull
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_KERNEL_SYNC_OBJECT_BASE 0x445B500ull
+#define DCORE2_TPC5_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC5_CFG_KERNEL_BASE 0x445B508ull
+#define DCORE2_TPC5_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE2_TPC5_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_0_BASE 0x445B5DCull
+#define DCORE2_TPC5_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_1_BASE 0x445B62Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_2_BASE 0x445B67Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_3_BASE 0x445B6CCull
+#define DCORE2_TPC5_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_4_BASE 0x445B71Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_5_BASE 0x445B76Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_6_BASE 0x445B7BCull
+#define DCORE2_TPC5_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_7_BASE 0x445B80Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_8_BASE 0x445B85Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_9_BASE 0x445B8ACull
+#define DCORE2_TPC5_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_10_BASE 0x445B8FCull
+#define DCORE2_TPC5_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_11_BASE 0x445B94Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_12_BASE 0x445B99Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_13_BASE 0x445B9ECull
+#define DCORE2_TPC5_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_14_BASE 0x445BA3Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_TENSOR_15_BASE 0x445BA8Cull
+#define DCORE2_TPC5_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE2_TPC5_CFG_QM_SYNC_OBJECT_BASE 0x445BADCull
+#define DCORE2_TPC5_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE2_TPC5_CFG_QM_BASE 0x445BAE4ull
+#define DCORE2_TPC5_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE2_TPC5_CFG_QM_SECTION 0x31C0
+#define mmDCORE2_TPC5_CFG_AXUSER_BASE 0x445BE00ull
+#define DCORE2_TPC5_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC5_CFG_SPECIAL_BASE 0x445BE80ull
+#define DCORE2_TPC5_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC5_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC5_MSTR_IF_RR_SHRD_HBW_BASE 0x445C000ull
+#define DCORE2_TPC5_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC5_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_TPC5_MSTR_IF_RR_PRVT_HBW_BASE 0x445C200ull
+#define DCORE2_TPC5_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_TPC5_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_TPC5_MSTR_IF_RR_SHRD_LBW_BASE 0x445C400ull
+#define DCORE2_TPC5_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC5_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_TPC5_MSTR_IF_RR_PRVT_LBW_BASE 0x445C600ull
+#define DCORE2_TPC5_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_TPC5_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_TPC5_MSTR_IF_E2E_CRDT_BASE 0x445C800ull
+#define DCORE2_TPC5_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_TPC5_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_TPC5_MSTR_IF_AXUSER_BASE 0x445CA80ull
+#define DCORE2_TPC5_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_TPC5_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_TPC5_MSTR_IF_DBG_HBW_BASE 0x445CB00ull
+#define DCORE2_TPC5_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC5_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_TPC5_MSTR_IF_DBG_LBW_BASE 0x445CB80ull
+#define DCORE2_TPC5_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_TPC5_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_TPC5_MSTR_IF_CORE_HBW_BASE 0x445CC00ull
+#define DCORE2_TPC5_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_TPC5_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_TPC5_MSTR_IF_CORE_LBW_BASE 0x445CD80ull
+#define DCORE2_TPC5_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_TPC5_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_TPC5_MSTR_IF_SPECIAL_BASE 0x445CE80ull
+#define DCORE2_TPC5_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC5_MSTR_IF_SPECIAL_SECTION 0x23180
+#define mmDCORE2_HMMU0_MMU_BASE 0x4480000ull
+#define DCORE2_HMMU0_MMU_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_MMU_SECTION 0xE800
+#define mmDCORE2_HMMU0_MMU_SPECIAL_BASE 0x4480E80ull
+#define DCORE2_HMMU0_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU0_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HMMU0_STLB_BASE 0x4481000ull
+#define DCORE2_HMMU0_STLB_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_STLB_SECTION 0xE800
+#define mmDCORE2_HMMU0_STLB_SPECIAL_BASE 0x4481E80ull
+#define DCORE2_HMMU0_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU0_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE2_HMMU0_SCRAMB_OUT_BASE 0x4483000ull
+#define DCORE2_HMMU0_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE2_HMMU0_SCRAMB_OUT_SPECIAL_BASE 0x4483E80ull
+#define DCORE2_HMMU0_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU0_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HMMU0_MSTR_IF_RR_SHRD_HBW_BASE 0x4484000ull
+#define DCORE2_HMMU0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_HMMU0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_HMMU0_MSTR_IF_RR_PRVT_HBW_BASE 0x4484200ull
+#define DCORE2_HMMU0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_HMMU0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_HMMU0_MSTR_IF_RR_SHRD_LBW_BASE 0x4484400ull
+#define DCORE2_HMMU0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_HMMU0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_HMMU0_MSTR_IF_RR_PRVT_LBW_BASE 0x4484600ull
+#define DCORE2_HMMU0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_HMMU0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_HMMU0_MSTR_IF_E2E_CRDT_BASE 0x4484800ull
+#define DCORE2_HMMU0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_HMMU0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_HMMU0_MSTR_IF_AXUSER_BASE 0x4484A80ull
+#define DCORE2_HMMU0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_HMMU0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_HMMU0_MSTR_IF_DBG_HBW_BASE 0x4484B00ull
+#define DCORE2_HMMU0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_HMMU0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_HMMU0_MSTR_IF_DBG_LBW_BASE 0x4484B80ull
+#define DCORE2_HMMU0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_HMMU0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_HMMU0_MSTR_IF_CORE_HBW_BASE 0x4484C00ull
+#define DCORE2_HMMU0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_HMMU0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_HMMU0_MSTR_IF_CORE_LBW_BASE 0x4484D80ull
+#define DCORE2_HMMU0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_HMMU0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_HMMU0_MSTR_IF_SPECIAL_BASE 0x4484E80ull
+#define DCORE2_HMMU0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU0_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE2_HMMU1_MMU_BASE 0x4490000ull
+#define DCORE2_HMMU1_MMU_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_MMU_SECTION 0xE800
+#define mmDCORE2_HMMU1_MMU_SPECIAL_BASE 0x4490E80ull
+#define DCORE2_HMMU1_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU1_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HMMU1_STLB_BASE 0x4491000ull
+#define DCORE2_HMMU1_STLB_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_STLB_SECTION 0xE800
+#define mmDCORE2_HMMU1_STLB_SPECIAL_BASE 0x4491E80ull
+#define DCORE2_HMMU1_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU1_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE2_HMMU1_SCRAMB_OUT_BASE 0x4493000ull
+#define DCORE2_HMMU1_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE2_HMMU1_SCRAMB_OUT_SPECIAL_BASE 0x4493E80ull
+#define DCORE2_HMMU1_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU1_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HMMU1_MSTR_IF_RR_SHRD_HBW_BASE 0x4494000ull
+#define DCORE2_HMMU1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_HMMU1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_HMMU1_MSTR_IF_RR_PRVT_HBW_BASE 0x4494200ull
+#define DCORE2_HMMU1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_HMMU1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_HMMU1_MSTR_IF_RR_SHRD_LBW_BASE 0x4494400ull
+#define DCORE2_HMMU1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_HMMU1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_HMMU1_MSTR_IF_RR_PRVT_LBW_BASE 0x4494600ull
+#define DCORE2_HMMU1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_HMMU1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_HMMU1_MSTR_IF_E2E_CRDT_BASE 0x4494800ull
+#define DCORE2_HMMU1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_HMMU1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_HMMU1_MSTR_IF_AXUSER_BASE 0x4494A80ull
+#define DCORE2_HMMU1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_HMMU1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_HMMU1_MSTR_IF_DBG_HBW_BASE 0x4494B00ull
+#define DCORE2_HMMU1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_HMMU1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_HMMU1_MSTR_IF_DBG_LBW_BASE 0x4494B80ull
+#define DCORE2_HMMU1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_HMMU1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_HMMU1_MSTR_IF_CORE_HBW_BASE 0x4494C00ull
+#define DCORE2_HMMU1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_HMMU1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_HMMU1_MSTR_IF_CORE_LBW_BASE 0x4494D80ull
+#define DCORE2_HMMU1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_HMMU1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_HMMU1_MSTR_IF_SPECIAL_BASE 0x4494E80ull
+#define DCORE2_HMMU1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU1_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE2_HMMU2_MMU_BASE 0x44A0000ull
+#define DCORE2_HMMU2_MMU_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_MMU_SECTION 0xE800
+#define mmDCORE2_HMMU2_MMU_SPECIAL_BASE 0x44A0E80ull
+#define DCORE2_HMMU2_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU2_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HMMU2_STLB_BASE 0x44A1000ull
+#define DCORE2_HMMU2_STLB_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_STLB_SECTION 0xE800
+#define mmDCORE2_HMMU2_STLB_SPECIAL_BASE 0x44A1E80ull
+#define DCORE2_HMMU2_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU2_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE2_HMMU2_SCRAMB_OUT_BASE 0x44A3000ull
+#define DCORE2_HMMU2_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE2_HMMU2_SCRAMB_OUT_SPECIAL_BASE 0x44A3E80ull
+#define DCORE2_HMMU2_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU2_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HMMU2_MSTR_IF_RR_SHRD_HBW_BASE 0x44A4000ull
+#define DCORE2_HMMU2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_HMMU2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_HMMU2_MSTR_IF_RR_PRVT_HBW_BASE 0x44A4200ull
+#define DCORE2_HMMU2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_HMMU2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_HMMU2_MSTR_IF_RR_SHRD_LBW_BASE 0x44A4400ull
+#define DCORE2_HMMU2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_HMMU2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_HMMU2_MSTR_IF_RR_PRVT_LBW_BASE 0x44A4600ull
+#define DCORE2_HMMU2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_HMMU2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_HMMU2_MSTR_IF_E2E_CRDT_BASE 0x44A4800ull
+#define DCORE2_HMMU2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_HMMU2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_HMMU2_MSTR_IF_AXUSER_BASE 0x44A4A80ull
+#define DCORE2_HMMU2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_HMMU2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_HMMU2_MSTR_IF_DBG_HBW_BASE 0x44A4B00ull
+#define DCORE2_HMMU2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_HMMU2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_HMMU2_MSTR_IF_DBG_LBW_BASE 0x44A4B80ull
+#define DCORE2_HMMU2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_HMMU2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_HMMU2_MSTR_IF_CORE_HBW_BASE 0x44A4C00ull
+#define DCORE2_HMMU2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_HMMU2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_HMMU2_MSTR_IF_CORE_LBW_BASE 0x44A4D80ull
+#define DCORE2_HMMU2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_HMMU2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_HMMU2_MSTR_IF_SPECIAL_BASE 0x44A4E80ull
+#define DCORE2_HMMU2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU2_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE2_HMMU3_MMU_BASE 0x44B0000ull
+#define DCORE2_HMMU3_MMU_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_MMU_SECTION 0xE800
+#define mmDCORE2_HMMU3_MMU_SPECIAL_BASE 0x44B0E80ull
+#define DCORE2_HMMU3_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU3_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HMMU3_STLB_BASE 0x44B1000ull
+#define DCORE2_HMMU3_STLB_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_STLB_SECTION 0xE800
+#define mmDCORE2_HMMU3_STLB_SPECIAL_BASE 0x44B1E80ull
+#define DCORE2_HMMU3_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU3_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE2_HMMU3_SCRAMB_OUT_BASE 0x44B3000ull
+#define DCORE2_HMMU3_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE2_HMMU3_SCRAMB_OUT_SPECIAL_BASE 0x44B3E80ull
+#define DCORE2_HMMU3_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU3_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HMMU3_MSTR_IF_RR_SHRD_HBW_BASE 0x44B4000ull
+#define DCORE2_HMMU3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_HMMU3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_HMMU3_MSTR_IF_RR_PRVT_HBW_BASE 0x44B4200ull
+#define DCORE2_HMMU3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_HMMU3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_HMMU3_MSTR_IF_RR_SHRD_LBW_BASE 0x44B4400ull
+#define DCORE2_HMMU3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_HMMU3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_HMMU3_MSTR_IF_RR_PRVT_LBW_BASE 0x44B4600ull
+#define DCORE2_HMMU3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_HMMU3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_HMMU3_MSTR_IF_E2E_CRDT_BASE 0x44B4800ull
+#define DCORE2_HMMU3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_HMMU3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_HMMU3_MSTR_IF_AXUSER_BASE 0x44B4A80ull
+#define DCORE2_HMMU3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_HMMU3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_HMMU3_MSTR_IF_DBG_HBW_BASE 0x44B4B00ull
+#define DCORE2_HMMU3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_HMMU3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_HMMU3_MSTR_IF_DBG_LBW_BASE 0x44B4B80ull
+#define DCORE2_HMMU3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_HMMU3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_HMMU3_MSTR_IF_CORE_HBW_BASE 0x44B4C00ull
+#define DCORE2_HMMU3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_HMMU3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_HMMU3_MSTR_IF_CORE_LBW_BASE 0x44B4D80ull
+#define DCORE2_HMMU3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_HMMU3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_HMMU3_MSTR_IF_SPECIAL_BASE 0x44B4E80ull
+#define DCORE2_HMMU3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HMMU3_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE2_MME_QM_ARC_DCCM_BASE 0x44C0000ull
+#define DCORE2_MME_QM_ARC_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_MME_QM_ARC_DCCM_SECTION 0x8000
+#define mmDCORE2_MME_QM_ARC_AUX_BASE 0x44C8000ull
+#define DCORE2_MME_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE2_MME_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE2_MME_QM_ARC_AUX_SPECIAL_BASE 0x44C8E80ull
+#define DCORE2_MME_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_QM_ARC_AUX_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_QM_ARC_DUP_ENG_BASE 0x44C9000ull
+#define DCORE2_MME_QM_ARC_DUP_ENG_MAX_OFFSET 0x1000
+#define DCORE2_MME_QM_ARC_DUP_ENG_SECTION 0x9000
+#define mmDCORE2_MME_QM_ARC_DUP_ENG_AXUSER_BASE 0x44C9900ull
+#define DCORE2_MME_QM_ARC_DUP_ENG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_QM_ARC_DUP_ENG_AXUSER_SECTION 0x5800
+#define mmDCORE2_MME_QM_ARC_DUP_ENG_SPECIAL_BASE 0x44C9E80ull
+#define DCORE2_MME_QM_ARC_DUP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_QM_ARC_DUP_ENG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_QM_BASE 0x44CA000ull
+#define DCORE2_MME_QM_MAX_OFFSET 0x1000
+#define DCORE2_MME_QM_SECTION 0x9000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR0_BASE 0x44CA900ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR1_BASE 0x44CA908ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR2_BASE 0x44CA910ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR3_BASE 0x44CA918ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR4_BASE 0x44CA920ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR5_BASE 0x44CA928ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR6_BASE 0x44CA930ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR7_BASE 0x44CA938ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR8_BASE 0x44CA940ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR9_BASE 0x44CA948ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR10_BASE 0x44CA950ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR11_BASE 0x44CA958ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR12_BASE 0x44CA960ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR13_BASE 0x44CA968ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR14_BASE 0x44CA970ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_MME_QM_QMAN_WR64_BASE_ADDR15_BASE 0x44CA978ull
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_MME_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_MME_QM_AXUSER_SECURED_BASE 0x44CAB00ull
+#define DCORE2_MME_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_MME_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_MME_QM_AXUSER_NONSECURED_BASE 0x44CAB80ull
+#define DCORE2_MME_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_MME_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_MME_QM_DBG_HBW_BASE 0x44CAC00ull
+#define DCORE2_MME_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_MME_QM_DBG_LBW_BASE 0x44CAC80ull
+#define DCORE2_MME_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_MME_QM_CGM_BASE 0x44CAD80ull
+#define DCORE2_MME_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_MME_QM_CGM_SECTION 0x1000
+#define mmDCORE2_MME_QM_SPECIAL_BASE 0x44CAE80ull
+#define DCORE2_MME_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_LO_BASE 0x44CB000ull
+#define DCORE2_MME_CTRL_LO_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_LO_SECTION 0x8000
+#define mmDCORE2_MME_CTRL_LO_ARCH_BASE_ADDR_BASE 0x44CB008ull
+#define DCORE2_MME_CTRL_LO_ARCH_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE2_MME_CTRL_LO_ARCH_BASE_ADDR_SECTION 0x2000
+#define mmDCORE2_MME_CTRL_LO_ARCH_NON_TENSOR_START_BASE 0x44CB028ull
+#define DCORE2_MME_CTRL_LO_ARCH_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE2_MME_CTRL_LO_ARCH_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_LO_ARCH_TENSOR_A_BASE 0x44CB040ull
+#define DCORE2_MME_CTRL_LO_ARCH_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_LO_ARCH_TENSOR_A_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_LO_ARCH_TENSOR_B_BASE 0x44CB098ull
+#define DCORE2_MME_CTRL_LO_ARCH_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_LO_ARCH_TENSOR_B_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_LO_ARCH_TENSOR_COUT_BASE 0x44CB0F0ull
+#define DCORE2_MME_CTRL_LO_ARCH_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_LO_ARCH_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_BASE 0x44CB15Cull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_BASE 0x44CB170ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_BASE 0x44CB184ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_BASE 0x44CB198ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_BASE 0x44CB1ACull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_BASE 0x44CB1C0ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_BASE 0x44CB1D4ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_BASE 0x44CB1E8ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_BASE 0x44CB1FCull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_BASE 0x44CB210ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_BASE 0x44CB22Cull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_BASE 0x44CB240ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_BASE 0x44CB254ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_BASE 0x44CB268ull
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_LO_ARCH_NON_TENSOR_END_BASE 0x44CB280ull
+#define DCORE2_MME_CTRL_LO_ARCH_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE2_MME_CTRL_LO_ARCH_NON_TENSOR_END_SECTION 0xB800
+#define mmDCORE2_MME_CTRL_LO_MME_AXUSER_BASE 0x44CBE00ull
+#define DCORE2_MME_CTRL_LO_MME_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_CTRL_LO_MME_AXUSER_SECTION 0x8000
+#define mmDCORE2_MME_CTRL_LO_SPECIAL_BASE 0x44CBE80ull
+#define DCORE2_MME_CTRL_LO_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_CTRL_LO_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_HI_BASE 0x44CC000ull
+#define DCORE2_MME_CTRL_HI_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_HI_SECTION 0x8000
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_BASE_ADDR_BASE 0x44CC008ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE2_MME_CTRL_HI_SHADOW_0_BASE_ADDR_SECTION 0x2000
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_BASE 0x44CC028ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE2_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_TENSOR_A_BASE 0x44CC040ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_0_TENSOR_A_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_TENSOR_B_BASE 0x44CC098ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_0_TENSOR_B_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_BASE 0x44CC0F0ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_BASE 0x44CC15Cull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_BASE 0x44CC170ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_BASE 0x44CC184ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_BASE 0x44CC198ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_BASE 0x44CC1ACull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_BASE 0x44CC1C0ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_BASE 0x44CC1D4ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_BASE 0x44CC1E8ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_BASE 0x44CC1FCull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_BASE 0x44CC210ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_BASE 0x44CC22Cull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_BASE 0x44CC240ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_BASE 0x44CC254ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_BASE 0x44CC268ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_BASE 0x44CC280ull
+#define DCORE2_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE2_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_BASE_ADDR_BASE 0x44CC308ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE2_MME_CTRL_HI_SHADOW_1_BASE_ADDR_SECTION 0x2000
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_BASE 0x44CC328ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE2_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_TENSOR_A_BASE 0x44CC340ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_1_TENSOR_A_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_TENSOR_B_BASE 0x44CC398ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_1_TENSOR_B_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_BASE 0x44CC3F0ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_BASE 0x44CC45Cull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_BASE 0x44CC470ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_BASE 0x44CC484ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_BASE 0x44CC498ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_BASE 0x44CC4ACull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_BASE 0x44CC4C0ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_BASE 0x44CC4D4ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_BASE 0x44CC4E8ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_BASE 0x44CC4FCull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_BASE 0x44CC510ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_BASE 0x44CC52Cull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_BASE 0x44CC540ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_BASE 0x44CC554ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_BASE 0x44CC568ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_BASE 0x44CC580ull
+#define DCORE2_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE2_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_BASE_ADDR_BASE 0x44CC608ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE2_MME_CTRL_HI_SHADOW_2_BASE_ADDR_SECTION 0x2000
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_BASE 0x44CC628ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE2_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_TENSOR_A_BASE 0x44CC640ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_2_TENSOR_A_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_TENSOR_B_BASE 0x44CC698ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_2_TENSOR_B_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_BASE 0x44CC6F0ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_BASE 0x44CC75Cull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_BASE 0x44CC770ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_BASE 0x44CC784ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_BASE 0x44CC798ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_BASE 0x44CC7ACull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_BASE 0x44CC7C0ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_BASE 0x44CC7D4ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_BASE 0x44CC7E8ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_BASE 0x44CC7FCull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_BASE 0x44CC810ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_BASE 0x44CC82Cull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_BASE 0x44CC840ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_BASE 0x44CC854ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_BASE 0x44CC868ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_BASE 0x44CC880ull
+#define DCORE2_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE2_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_BASE_ADDR_BASE 0x44CC908ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE2_MME_CTRL_HI_SHADOW_3_BASE_ADDR_SECTION 0x2000
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_BASE 0x44CC928ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE2_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_TENSOR_A_BASE 0x44CC940ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_3_TENSOR_A_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_TENSOR_B_BASE 0x44CC998ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_3_TENSOR_B_SECTION 0x5800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_BASE 0x44CC9F0ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_BASE 0x44CCA5Cull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_BASE 0x44CCA70ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_BASE 0x44CCA84ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_BASE 0x44CCA98ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_BASE 0x44CCAACull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_BASE 0x44CCAC0ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_BASE 0x44CCAD4ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_BASE 0x44CCAE8ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_BASE 0x44CCAFCull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_BASE 0x44CCB10ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_BASE 0x44CCB2Cull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_BASE 0x44CCB40ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_BASE 0x44CCB54ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_BASE 0x44CCB68ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_BASE 0x44CCB80ull
+#define DCORE2_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE2_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_SECTION 0x3000
+#define mmDCORE2_MME_CTRL_HI_SPECIAL_BASE 0x44CCE80ull
+#define DCORE2_MME_CTRL_HI_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_CTRL_HI_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_EU_BIST_BASE 0x44CD000ull
+#define DCORE2_MME_EU_BIST_MAX_OFFSET 0x1000
+#define DCORE2_MME_EU_BIST_SECTION 0xE800
+#define mmDCORE2_MME_EU_BIST_SPECIAL_BASE 0x44CDE80ull
+#define DCORE2_MME_EU_BIST_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_EU_BIST_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_MSTR_IF_RR_SHRD_HBW_BASE 0x44CE000ull
+#define DCORE2_MME_CTRL_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_CTRL_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_MME_CTRL_MSTR_IF_RR_PRVT_HBW_BASE 0x44CE200ull
+#define DCORE2_MME_CTRL_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_CTRL_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_MME_CTRL_MSTR_IF_RR_SHRD_LBW_BASE 0x44CE400ull
+#define DCORE2_MME_CTRL_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_CTRL_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_MME_CTRL_MSTR_IF_RR_PRVT_LBW_BASE 0x44CE600ull
+#define DCORE2_MME_CTRL_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_CTRL_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_MME_CTRL_MSTR_IF_E2E_CRDT_BASE 0x44CE800ull
+#define DCORE2_MME_CTRL_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_MME_CTRL_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_MME_CTRL_MSTR_IF_AXUSER_BASE 0x44CEA80ull
+#define DCORE2_MME_CTRL_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_CTRL_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_MME_CTRL_MSTR_IF_DBG_HBW_BASE 0x44CEB00ull
+#define DCORE2_MME_CTRL_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_MME_CTRL_MSTR_IF_DBG_LBW_BASE 0x44CEB80ull
+#define DCORE2_MME_CTRL_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_CTRL_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_MME_CTRL_MSTR_IF_CORE_HBW_BASE 0x44CEC00ull
+#define DCORE2_MME_CTRL_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_MME_CTRL_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_MME_CTRL_MSTR_IF_CORE_LBW_BASE 0x44CED80ull
+#define DCORE2_MME_CTRL_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_MME_CTRL_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_MSTR_IF_SPECIAL_BASE 0x44CEE80ull
+#define DCORE2_MME_CTRL_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_CTRL_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_QM_ARC_ACP_ENG_BASE 0x44CF000ull
+#define DCORE2_MME_QM_ARC_ACP_ENG_MAX_OFFSET 0x1000
+#define DCORE2_MME_QM_ARC_ACP_ENG_SECTION 0xE800
+#define mmDCORE2_MME_QM_ARC_ACP_ENG_SPECIAL_BASE 0x44CFE80ull
+#define DCORE2_MME_QM_ARC_ACP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_QM_ARC_ACP_ENG_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_SBTE0_BASE 0x44D0000ull
+#define DCORE2_MME_SBTE0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE0_SECTION 0xE800
+#define mmDCORE2_MME_SBTE0_SPECIAL_BASE 0x44D0E80ull
+#define DCORE2_MME_SBTE0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE0_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_BASE 0x44D1000ull
+#define DCORE2_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_BASE 0x44D1200ull
+#define DCORE2_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_BASE 0x44D1400ull
+#define DCORE2_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_BASE 0x44D1600ull
+#define DCORE2_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE0_MSTR_IF_E2E_CRDT_BASE 0x44D1800ull
+#define DCORE2_MME_SBTE0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_MME_SBTE0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_MME_SBTE0_MSTR_IF_AXUSER_BASE 0x44D1A80ull
+#define DCORE2_MME_SBTE0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_SBTE0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_MME_SBTE0_MSTR_IF_DBG_HBW_BASE 0x44D1B00ull
+#define DCORE2_MME_SBTE0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE0_MSTR_IF_DBG_LBW_BASE 0x44D1B80ull
+#define DCORE2_MME_SBTE0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE0_MSTR_IF_CORE_HBW_BASE 0x44D1C00ull
+#define DCORE2_MME_SBTE0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_MME_SBTE0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_MME_SBTE0_MSTR_IF_CORE_LBW_BASE 0x44D1D80ull
+#define DCORE2_MME_SBTE0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_MME_SBTE0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_MME_SBTE0_MSTR_IF_SPECIAL_BASE 0x44D1E80ull
+#define DCORE2_MME_SBTE0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE0_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE2_MME_SBTE1_BASE 0x44D8000ull
+#define DCORE2_MME_SBTE1_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE1_SECTION 0xE800
+#define mmDCORE2_MME_SBTE1_SPECIAL_BASE 0x44D8E80ull
+#define DCORE2_MME_SBTE1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE1_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_BASE 0x44D9000ull
+#define DCORE2_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_BASE 0x44D9200ull
+#define DCORE2_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_BASE 0x44D9400ull
+#define DCORE2_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_BASE 0x44D9600ull
+#define DCORE2_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE1_MSTR_IF_E2E_CRDT_BASE 0x44D9800ull
+#define DCORE2_MME_SBTE1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_MME_SBTE1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_MME_SBTE1_MSTR_IF_AXUSER_BASE 0x44D9A80ull
+#define DCORE2_MME_SBTE1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_SBTE1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_MME_SBTE1_MSTR_IF_DBG_HBW_BASE 0x44D9B00ull
+#define DCORE2_MME_SBTE1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE1_MSTR_IF_DBG_LBW_BASE 0x44D9B80ull
+#define DCORE2_MME_SBTE1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE1_MSTR_IF_CORE_HBW_BASE 0x44D9C00ull
+#define DCORE2_MME_SBTE1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_MME_SBTE1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_MME_SBTE1_MSTR_IF_CORE_LBW_BASE 0x44D9D80ull
+#define DCORE2_MME_SBTE1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_MME_SBTE1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_MME_SBTE1_MSTR_IF_SPECIAL_BASE 0x44D9E80ull
+#define DCORE2_MME_SBTE1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE1_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE2_MME_SBTE2_BASE 0x44E0000ull
+#define DCORE2_MME_SBTE2_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE2_SECTION 0xE800
+#define mmDCORE2_MME_SBTE2_SPECIAL_BASE 0x44E0E80ull
+#define DCORE2_MME_SBTE2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE2_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_BASE 0x44E1000ull
+#define DCORE2_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_BASE 0x44E1200ull
+#define DCORE2_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_BASE 0x44E1400ull
+#define DCORE2_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_BASE 0x44E1600ull
+#define DCORE2_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE2_MSTR_IF_E2E_CRDT_BASE 0x44E1800ull
+#define DCORE2_MME_SBTE2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_MME_SBTE2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_MME_SBTE2_MSTR_IF_AXUSER_BASE 0x44E1A80ull
+#define DCORE2_MME_SBTE2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_SBTE2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_MME_SBTE2_MSTR_IF_DBG_HBW_BASE 0x44E1B00ull
+#define DCORE2_MME_SBTE2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE2_MSTR_IF_DBG_LBW_BASE 0x44E1B80ull
+#define DCORE2_MME_SBTE2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE2_MSTR_IF_CORE_HBW_BASE 0x44E1C00ull
+#define DCORE2_MME_SBTE2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_MME_SBTE2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_MME_SBTE2_MSTR_IF_CORE_LBW_BASE 0x44E1D80ull
+#define DCORE2_MME_SBTE2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_MME_SBTE2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_MME_SBTE2_MSTR_IF_SPECIAL_BASE 0x44E1E80ull
+#define DCORE2_MME_SBTE2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE2_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE2_MME_SBTE3_BASE 0x44E8000ull
+#define DCORE2_MME_SBTE3_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE3_SECTION 0xE800
+#define mmDCORE2_MME_SBTE3_SPECIAL_BASE 0x44E8E80ull
+#define DCORE2_MME_SBTE3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_BASE 0x44E9000ull
+#define DCORE2_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_BASE 0x44E9200ull
+#define DCORE2_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_BASE 0x44E9400ull
+#define DCORE2_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_BASE 0x44E9600ull
+#define DCORE2_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE3_MSTR_IF_E2E_CRDT_BASE 0x44E9800ull
+#define DCORE2_MME_SBTE3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_MME_SBTE3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_MME_SBTE3_MSTR_IF_AXUSER_BASE 0x44E9A80ull
+#define DCORE2_MME_SBTE3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_SBTE3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_MME_SBTE3_MSTR_IF_DBG_HBW_BASE 0x44E9B00ull
+#define DCORE2_MME_SBTE3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE3_MSTR_IF_DBG_LBW_BASE 0x44E9B80ull
+#define DCORE2_MME_SBTE3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE3_MSTR_IF_CORE_HBW_BASE 0x44E9C00ull
+#define DCORE2_MME_SBTE3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_MME_SBTE3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_MME_SBTE3_MSTR_IF_CORE_LBW_BASE 0x44E9D80ull
+#define DCORE2_MME_SBTE3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_MME_SBTE3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_MME_SBTE3_MSTR_IF_SPECIAL_BASE 0x44E9E80ull
+#define DCORE2_MME_SBTE3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE3_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE2_MME_SBTE4_BASE 0x44F0000ull
+#define DCORE2_MME_SBTE4_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE4_SECTION 0xE800
+#define mmDCORE2_MME_SBTE4_SPECIAL_BASE 0x44F0E80ull
+#define DCORE2_MME_SBTE4_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE4_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_BASE 0x44F1000ull
+#define DCORE2_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_BASE 0x44F1200ull
+#define DCORE2_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_BASE 0x44F1400ull
+#define DCORE2_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_BASE 0x44F1600ull
+#define DCORE2_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_MME_SBTE4_MSTR_IF_E2E_CRDT_BASE 0x44F1800ull
+#define DCORE2_MME_SBTE4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_MME_SBTE4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_MME_SBTE4_MSTR_IF_AXUSER_BASE 0x44F1A80ull
+#define DCORE2_MME_SBTE4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_SBTE4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_MME_SBTE4_MSTR_IF_DBG_HBW_BASE 0x44F1B00ull
+#define DCORE2_MME_SBTE4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE4_MSTR_IF_DBG_LBW_BASE 0x44F1B80ull
+#define DCORE2_MME_SBTE4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_SBTE4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_MME_SBTE4_MSTR_IF_CORE_HBW_BASE 0x44F1C00ull
+#define DCORE2_MME_SBTE4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_MME_SBTE4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_MME_SBTE4_MSTR_IF_CORE_LBW_BASE 0x44F1D80ull
+#define DCORE2_MME_SBTE4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_MME_SBTE4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_MME_SBTE4_MSTR_IF_SPECIAL_BASE 0x44F1E80ull
+#define DCORE2_MME_SBTE4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_SBTE4_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE2_MME_ACC_BASE 0x44F8000ull
+#define DCORE2_MME_ACC_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_SECTION 0xE800
+#define mmDCORE2_MME_ACC_SPECIAL_BASE 0x44F8E80ull
+#define DCORE2_MME_ACC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_ACC_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_WB0_MSTR_IF_RR_SHRD_HBW_BASE 0x44F9000ull
+#define DCORE2_MME_WB0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_WB0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_MME_WB0_MSTR_IF_RR_PRVT_HBW_BASE 0x44F9200ull
+#define DCORE2_MME_WB0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_WB0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_MME_WB0_MSTR_IF_RR_SHRD_LBW_BASE 0x44F9400ull
+#define DCORE2_MME_WB0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_WB0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_MME_WB0_MSTR_IF_RR_PRVT_LBW_BASE 0x44F9600ull
+#define DCORE2_MME_WB0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_WB0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_MME_WB0_MSTR_IF_E2E_CRDT_BASE 0x44F9800ull
+#define DCORE2_MME_WB0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_MME_WB0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_MME_WB0_MSTR_IF_AXUSER_BASE 0x44F9A80ull
+#define DCORE2_MME_WB0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_WB0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_MME_WB0_MSTR_IF_DBG_HBW_BASE 0x44F9B00ull
+#define DCORE2_MME_WB0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_WB0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_MME_WB0_MSTR_IF_DBG_LBW_BASE 0x44F9B80ull
+#define DCORE2_MME_WB0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_WB0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_MME_WB0_MSTR_IF_CORE_HBW_BASE 0x44F9C00ull
+#define DCORE2_MME_WB0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_MME_WB0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_MME_WB0_MSTR_IF_CORE_LBW_BASE 0x44F9D80ull
+#define DCORE2_MME_WB0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_MME_WB0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_MME_WB0_MSTR_IF_SPECIAL_BASE 0x44F9E80ull
+#define DCORE2_MME_WB0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_WB0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_MME_WB1_MSTR_IF_RR_SHRD_HBW_BASE 0x44FA000ull
+#define DCORE2_MME_WB1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_WB1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_MME_WB1_MSTR_IF_RR_PRVT_HBW_BASE 0x44FA200ull
+#define DCORE2_MME_WB1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_MME_WB1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_MME_WB1_MSTR_IF_RR_SHRD_LBW_BASE 0x44FA400ull
+#define DCORE2_MME_WB1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_WB1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_MME_WB1_MSTR_IF_RR_PRVT_LBW_BASE 0x44FA600ull
+#define DCORE2_MME_WB1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_MME_WB1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_MME_WB1_MSTR_IF_E2E_CRDT_BASE 0x44FA800ull
+#define DCORE2_MME_WB1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_MME_WB1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_MME_WB1_MSTR_IF_AXUSER_BASE 0x44FAA80ull
+#define DCORE2_MME_WB1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_MME_WB1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_MME_WB1_MSTR_IF_DBG_HBW_BASE 0x44FAB00ull
+#define DCORE2_MME_WB1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_WB1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_MME_WB1_MSTR_IF_DBG_LBW_BASE 0x44FAB80ull
+#define DCORE2_MME_WB1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_MME_WB1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_MME_WB1_MSTR_IF_CORE_HBW_BASE 0x44FAC00ull
+#define DCORE2_MME_WB1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_MME_WB1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_MME_WB1_MSTR_IF_CORE_LBW_BASE 0x44FAD80ull
+#define DCORE2_MME_WB1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_MME_WB1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_MME_WB1_MSTR_IF_SPECIAL_BASE 0x44FAE80ull
+#define DCORE2_MME_WB1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_MME_WB1_MSTR_IF_SPECIAL_SECTION 0x5180
+#define mmDCORE2_SYNC_MNGR_OBJS_BASE 0x4500000ull
+#define DCORE2_SYNC_MNGR_OBJS_MAX_OFFSET 0x15A00
+#define DCORE2_SYNC_MNGR_OBJS_SECTION 0x1E000
+#define mmDCORE2_SYNC_MNGR_GLBL_BASE 0x451E000ull
+#define DCORE2_SYNC_MNGR_GLBL_MAX_OFFSET 0x1000
+#define DCORE2_SYNC_MNGR_GLBL_SECTION 0xE800
+#define mmDCORE2_SYNC_MNGR_GLBL_SPECIAL_BASE 0x451EE80ull
+#define DCORE2_SYNC_MNGR_GLBL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SYNC_MNGR_GLBL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_BASE 0x451F000ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_BASE 0x451F200ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_BASE 0x451F400ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_BASE 0x451F600ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_E2E_CRDT_BASE 0x451F800ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_SYNC_MNGR_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_AXUSER_BASE 0x451FA80ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_SYNC_MNGR_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_DBG_HBW_BASE 0x451FB00ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_SYNC_MNGR_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_DBG_LBW_BASE 0x451FB80ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_SYNC_MNGR_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_CORE_HBW_BASE 0x451FC00ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_SYNC_MNGR_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_CORE_LBW_BASE 0x451FD80ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_SYNC_MNGR_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_SYNC_MNGR_MSTR_IF_SPECIAL_BASE 0x451FE80ull
+#define DCORE2_SYNC_MNGR_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SYNC_MNGR_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HIF0_BASE 0x4520000ull
+#define DCORE2_HIF0_MAX_OFFSET 0x1000
+#define DCORE2_HIF0_SECTION 0xE800
+#define mmDCORE2_HIF0_SPECIAL_BASE 0x4520E80ull
+#define DCORE2_HIF0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HIF0_SPECIAL_SECTION 0x3180
+#define mmDCORE2_HIF1_BASE 0x4524000ull
+#define DCORE2_HIF1_MAX_OFFSET 0x1000
+#define DCORE2_HIF1_SECTION 0xE800
+#define mmDCORE2_HIF1_SPECIAL_BASE 0x4524E80ull
+#define DCORE2_HIF1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HIF1_SPECIAL_SECTION 0x3180
+#define mmDCORE2_HIF2_BASE 0x4528000ull
+#define DCORE2_HIF2_MAX_OFFSET 0x1000
+#define DCORE2_HIF2_SECTION 0xE800
+#define mmDCORE2_HIF2_SPECIAL_BASE 0x4528E80ull
+#define DCORE2_HIF2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HIF2_SPECIAL_SECTION 0x3180
+#define mmDCORE2_HIF3_BASE 0x452C000ull
+#define DCORE2_HIF3_MAX_OFFSET 0x1000
+#define DCORE2_HIF3_SECTION 0xE800
+#define mmDCORE2_HIF3_SPECIAL_BASE 0x452CE80ull
+#define DCORE2_HIF3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HIF3_SPECIAL_SECTION 0x13180
+#define mmDCORE2_RTR0_CTRL_BASE 0x4540000ull
+#define DCORE2_RTR0_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_RTR0_CTRL_SECTION 0xE800
+#define mmDCORE2_RTR0_CTRL_SPECIAL_BASE 0x4540E80ull
+#define DCORE2_RTR0_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR0_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR0_H3_BASE 0x4541000ull
+#define DCORE2_RTR0_H3_MAX_OFFSET 0x1000
+#define DCORE2_RTR0_H3_SECTION 0xE800
+#define mmDCORE2_RTR0_H3_SPECIAL_BASE 0x4541E80ull
+#define DCORE2_RTR0_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR0_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR0_MSTR_IF_RR_SHRD_HBW_BASE 0x4542000ull
+#define DCORE2_RTR0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_RTR0_MSTR_IF_RR_PRVT_HBW_BASE 0x4542200ull
+#define DCORE2_RTR0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_RTR0_MSTR_IF_RR_SHRD_LBW_BASE 0x4542400ull
+#define DCORE2_RTR0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_RTR0_MSTR_IF_RR_PRVT_LBW_BASE 0x4542600ull
+#define DCORE2_RTR0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_RTR0_MSTR_IF_E2E_CRDT_BASE 0x4542800ull
+#define DCORE2_RTR0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_RTR0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_RTR0_MSTR_IF_AXUSER_BASE 0x4542A80ull
+#define DCORE2_RTR0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_RTR0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_RTR0_MSTR_IF_DBG_HBW_BASE 0x4542B00ull
+#define DCORE2_RTR0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_RTR0_MSTR_IF_DBG_LBW_BASE 0x4542B80ull
+#define DCORE2_RTR0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_RTR0_MSTR_IF_CORE_HBW_BASE 0x4542C00ull
+#define DCORE2_RTR0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_RTR0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_RTR0_MSTR_IF_CORE_LBW_BASE 0x4542D80ull
+#define DCORE2_RTR0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_RTR0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_RTR0_MSTR_IF_SPECIAL_BASE 0x4542E80ull
+#define DCORE2_RTR0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR0_ADD_DEC_HBW_BASE 0x4543000ull
+#define DCORE2_RTR0_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE2_RTR0_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE2_RTR0_ADD_DEC_LBW_BASE 0x4543400ull
+#define DCORE2_RTR0_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE2_RTR0_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE2_RTR0_ADD_DEC_SPECIAL_BASE 0x4543E80ull
+#define DCORE2_RTR0_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR0_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR0_BASE 0x4544000ull
+#define DCORE2_RTR0_MAX_OFFSET 0x1000
+#define DCORE2_RTR0_SECTION 0x3000
+#define mmDCORE2_RTR0_HBW_RD_RQ_LL_STAT_BASE 0x4544300ull
+#define DCORE2_RTR0_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR0_HBW_RD_RS_LL_STAT_BASE 0x4544340ull
+#define DCORE2_RTR0_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR0_HBW_WR_RQ_LL_STAT_BASE 0x4544380ull
+#define DCORE2_RTR0_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR0_HBW_WR_RS_LL_STAT_BASE 0x45443C0ull
+#define DCORE2_RTR0_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR0_LBW_RD_RQ_LL_STAT_BASE 0x4544400ull
+#define DCORE2_RTR0_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR0_LBW_RD_RS_LL_STAT_BASE 0x4544440ull
+#define DCORE2_RTR0_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR0_LBW_WR_RQ_LL_STAT_BASE 0x4544480ull
+#define DCORE2_RTR0_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR0_LBW_WR_RS_LL_STAT_BASE 0x45444C0ull
+#define DCORE2_RTR0_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR0_HBW_MFIFO_BASE 0x4544500ull
+#define DCORE2_RTR0_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE2_RTR0_E2E_RD_LL_STAT_BASE 0x4544540ull
+#define DCORE2_RTR0_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR0_E2E_WR_LL_STAT_BASE 0x4544580ull
+#define DCORE2_RTR0_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR0_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE2_RTR0_RTR_HBW_XACT_STAT_BASE 0x4544600ull
+#define DCORE2_RTR0_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR0_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR0_RTR_LBW_XACT_STAT_BASE 0x4544680ull
+#define DCORE2_RTR0_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR0_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR0_RTR_E2E_XACT_STAT_BASE 0x4544700ull
+#define DCORE2_RTR0_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR0_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE2_RTR0_SPECIAL_BASE 0x4544E80ull
+#define DCORE2_RTR0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR0_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR0_DBG_ADDR_BASE 0x4545000ull
+#define DCORE2_RTR0_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE2_RTR0_DBG_ADDR_SECTION 0xE800
+#define mmDCORE2_RTR0_DBG_ADDR_SPECIAL_BASE 0x4545E80ull
+#define DCORE2_RTR0_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR0_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE2_RTR1_CTRL_BASE 0x4548000ull
+#define DCORE2_RTR1_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_RTR1_CTRL_SECTION 0xE800
+#define mmDCORE2_RTR1_CTRL_SPECIAL_BASE 0x4548E80ull
+#define DCORE2_RTR1_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR1_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR1_H3_BASE 0x4549000ull
+#define DCORE2_RTR1_H3_MAX_OFFSET 0x1000
+#define DCORE2_RTR1_H3_SECTION 0xE800
+#define mmDCORE2_RTR1_H3_SPECIAL_BASE 0x4549E80ull
+#define DCORE2_RTR1_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR1_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR1_MSTR_IF_RR_SHRD_HBW_BASE 0x454A000ull
+#define DCORE2_RTR1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_RTR1_MSTR_IF_RR_PRVT_HBW_BASE 0x454A200ull
+#define DCORE2_RTR1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_RTR1_MSTR_IF_RR_SHRD_LBW_BASE 0x454A400ull
+#define DCORE2_RTR1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_RTR1_MSTR_IF_RR_PRVT_LBW_BASE 0x454A600ull
+#define DCORE2_RTR1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_RTR1_MSTR_IF_E2E_CRDT_BASE 0x454A800ull
+#define DCORE2_RTR1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_RTR1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_RTR1_MSTR_IF_AXUSER_BASE 0x454AA80ull
+#define DCORE2_RTR1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_RTR1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_RTR1_MSTR_IF_DBG_HBW_BASE 0x454AB00ull
+#define DCORE2_RTR1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_RTR1_MSTR_IF_DBG_LBW_BASE 0x454AB80ull
+#define DCORE2_RTR1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_RTR1_MSTR_IF_CORE_HBW_BASE 0x454AC00ull
+#define DCORE2_RTR1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_RTR1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_RTR1_MSTR_IF_CORE_LBW_BASE 0x454AD80ull
+#define DCORE2_RTR1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_RTR1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_RTR1_MSTR_IF_SPECIAL_BASE 0x454AE80ull
+#define DCORE2_RTR1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR1_ADD_DEC_HBW_BASE 0x454B000ull
+#define DCORE2_RTR1_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE2_RTR1_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE2_RTR1_ADD_DEC_LBW_BASE 0x454B400ull
+#define DCORE2_RTR1_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE2_RTR1_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE2_RTR1_ADD_DEC_SPECIAL_BASE 0x454BE80ull
+#define DCORE2_RTR1_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR1_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR1_BASE 0x454C000ull
+#define DCORE2_RTR1_MAX_OFFSET 0x1000
+#define DCORE2_RTR1_SECTION 0x3000
+#define mmDCORE2_RTR1_HBW_RD_RQ_LL_STAT_BASE 0x454C300ull
+#define DCORE2_RTR1_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR1_HBW_RD_RS_LL_STAT_BASE 0x454C340ull
+#define DCORE2_RTR1_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR1_HBW_WR_RQ_LL_STAT_BASE 0x454C380ull
+#define DCORE2_RTR1_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR1_HBW_WR_RS_LL_STAT_BASE 0x454C3C0ull
+#define DCORE2_RTR1_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR1_LBW_RD_RQ_LL_STAT_BASE 0x454C400ull
+#define DCORE2_RTR1_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR1_LBW_RD_RS_LL_STAT_BASE 0x454C440ull
+#define DCORE2_RTR1_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR1_LBW_WR_RQ_LL_STAT_BASE 0x454C480ull
+#define DCORE2_RTR1_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR1_LBW_WR_RS_LL_STAT_BASE 0x454C4C0ull
+#define DCORE2_RTR1_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR1_HBW_MFIFO_BASE 0x454C500ull
+#define DCORE2_RTR1_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE2_RTR1_E2E_RD_LL_STAT_BASE 0x454C540ull
+#define DCORE2_RTR1_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR1_E2E_WR_LL_STAT_BASE 0x454C580ull
+#define DCORE2_RTR1_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR1_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE2_RTR1_RTR_HBW_XACT_STAT_BASE 0x454C600ull
+#define DCORE2_RTR1_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR1_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR1_RTR_LBW_XACT_STAT_BASE 0x454C680ull
+#define DCORE2_RTR1_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR1_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR1_RTR_E2E_XACT_STAT_BASE 0x454C700ull
+#define DCORE2_RTR1_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR1_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE2_RTR1_SPECIAL_BASE 0x454CE80ull
+#define DCORE2_RTR1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR1_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR1_DBG_ADDR_BASE 0x454D000ull
+#define DCORE2_RTR1_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE2_RTR1_DBG_ADDR_SECTION 0xE800
+#define mmDCORE2_RTR1_DBG_ADDR_SPECIAL_BASE 0x454DE80ull
+#define DCORE2_RTR1_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR1_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE2_RTR2_CTRL_BASE 0x4550000ull
+#define DCORE2_RTR2_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_RTR2_CTRL_SECTION 0xE800
+#define mmDCORE2_RTR2_CTRL_SPECIAL_BASE 0x4550E80ull
+#define DCORE2_RTR2_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR2_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR2_H3_BASE 0x4551000ull
+#define DCORE2_RTR2_H3_MAX_OFFSET 0x1000
+#define DCORE2_RTR2_H3_SECTION 0xE800
+#define mmDCORE2_RTR2_H3_SPECIAL_BASE 0x4551E80ull
+#define DCORE2_RTR2_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR2_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR2_MSTR_IF_RR_SHRD_HBW_BASE 0x4552000ull
+#define DCORE2_RTR2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_RTR2_MSTR_IF_RR_PRVT_HBW_BASE 0x4552200ull
+#define DCORE2_RTR2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_RTR2_MSTR_IF_RR_SHRD_LBW_BASE 0x4552400ull
+#define DCORE2_RTR2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_RTR2_MSTR_IF_RR_PRVT_LBW_BASE 0x4552600ull
+#define DCORE2_RTR2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_RTR2_MSTR_IF_E2E_CRDT_BASE 0x4552800ull
+#define DCORE2_RTR2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_RTR2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_RTR2_MSTR_IF_AXUSER_BASE 0x4552A80ull
+#define DCORE2_RTR2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_RTR2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_RTR2_MSTR_IF_DBG_HBW_BASE 0x4552B00ull
+#define DCORE2_RTR2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_RTR2_MSTR_IF_DBG_LBW_BASE 0x4552B80ull
+#define DCORE2_RTR2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_RTR2_MSTR_IF_CORE_HBW_BASE 0x4552C00ull
+#define DCORE2_RTR2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_RTR2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_RTR2_MSTR_IF_CORE_LBW_BASE 0x4552D80ull
+#define DCORE2_RTR2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_RTR2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_RTR2_MSTR_IF_SPECIAL_BASE 0x4552E80ull
+#define DCORE2_RTR2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR2_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR2_ADD_DEC_HBW_BASE 0x4553000ull
+#define DCORE2_RTR2_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE2_RTR2_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE2_RTR2_ADD_DEC_LBW_BASE 0x4553400ull
+#define DCORE2_RTR2_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE2_RTR2_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE2_RTR2_ADD_DEC_SPECIAL_BASE 0x4553E80ull
+#define DCORE2_RTR2_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR2_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR2_BASE 0x4554000ull
+#define DCORE2_RTR2_MAX_OFFSET 0x1000
+#define DCORE2_RTR2_SECTION 0x3000
+#define mmDCORE2_RTR2_HBW_RD_RQ_LL_STAT_BASE 0x4554300ull
+#define DCORE2_RTR2_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR2_HBW_RD_RS_LL_STAT_BASE 0x4554340ull
+#define DCORE2_RTR2_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR2_HBW_WR_RQ_LL_STAT_BASE 0x4554380ull
+#define DCORE2_RTR2_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR2_HBW_WR_RS_LL_STAT_BASE 0x45543C0ull
+#define DCORE2_RTR2_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR2_LBW_RD_RQ_LL_STAT_BASE 0x4554400ull
+#define DCORE2_RTR2_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR2_LBW_RD_RS_LL_STAT_BASE 0x4554440ull
+#define DCORE2_RTR2_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR2_LBW_WR_RQ_LL_STAT_BASE 0x4554480ull
+#define DCORE2_RTR2_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR2_LBW_WR_RS_LL_STAT_BASE 0x45544C0ull
+#define DCORE2_RTR2_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR2_HBW_MFIFO_BASE 0x4554500ull
+#define DCORE2_RTR2_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE2_RTR2_E2E_RD_LL_STAT_BASE 0x4554540ull
+#define DCORE2_RTR2_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR2_E2E_WR_LL_STAT_BASE 0x4554580ull
+#define DCORE2_RTR2_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR2_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE2_RTR2_RTR_HBW_XACT_STAT_BASE 0x4554600ull
+#define DCORE2_RTR2_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR2_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR2_RTR_LBW_XACT_STAT_BASE 0x4554680ull
+#define DCORE2_RTR2_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR2_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR2_RTR_E2E_XACT_STAT_BASE 0x4554700ull
+#define DCORE2_RTR2_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR2_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE2_RTR2_SPECIAL_BASE 0x4554E80ull
+#define DCORE2_RTR2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR2_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR2_DBG_ADDR_BASE 0x4555000ull
+#define DCORE2_RTR2_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE2_RTR2_DBG_ADDR_SECTION 0xE800
+#define mmDCORE2_RTR2_DBG_ADDR_SPECIAL_BASE 0x4555E80ull
+#define DCORE2_RTR2_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR2_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE2_RTR3_CTRL_BASE 0x4558000ull
+#define DCORE2_RTR3_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_RTR3_CTRL_SECTION 0xE800
+#define mmDCORE2_RTR3_CTRL_SPECIAL_BASE 0x4558E80ull
+#define DCORE2_RTR3_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR3_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR3_H3_BASE 0x4559000ull
+#define DCORE2_RTR3_H3_MAX_OFFSET 0x1000
+#define DCORE2_RTR3_H3_SECTION 0xE800
+#define mmDCORE2_RTR3_H3_SPECIAL_BASE 0x4559E80ull
+#define DCORE2_RTR3_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR3_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR3_MSTR_IF_RR_SHRD_HBW_BASE 0x455A000ull
+#define DCORE2_RTR3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_RTR3_MSTR_IF_RR_PRVT_HBW_BASE 0x455A200ull
+#define DCORE2_RTR3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_RTR3_MSTR_IF_RR_SHRD_LBW_BASE 0x455A400ull
+#define DCORE2_RTR3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_RTR3_MSTR_IF_RR_PRVT_LBW_BASE 0x455A600ull
+#define DCORE2_RTR3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_RTR3_MSTR_IF_E2E_CRDT_BASE 0x455A800ull
+#define DCORE2_RTR3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_RTR3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_RTR3_MSTR_IF_AXUSER_BASE 0x455AA80ull
+#define DCORE2_RTR3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_RTR3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_RTR3_MSTR_IF_DBG_HBW_BASE 0x455AB00ull
+#define DCORE2_RTR3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_RTR3_MSTR_IF_DBG_LBW_BASE 0x455AB80ull
+#define DCORE2_RTR3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_RTR3_MSTR_IF_CORE_HBW_BASE 0x455AC00ull
+#define DCORE2_RTR3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_RTR3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_RTR3_MSTR_IF_CORE_LBW_BASE 0x455AD80ull
+#define DCORE2_RTR3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_RTR3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_RTR3_MSTR_IF_SPECIAL_BASE 0x455AE80ull
+#define DCORE2_RTR3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR3_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR3_ADD_DEC_HBW_BASE 0x455B000ull
+#define DCORE2_RTR3_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE2_RTR3_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE2_RTR3_ADD_DEC_LBW_BASE 0x455B400ull
+#define DCORE2_RTR3_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE2_RTR3_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE2_RTR3_ADD_DEC_SPECIAL_BASE 0x455BE80ull
+#define DCORE2_RTR3_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR3_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR3_BASE 0x455C000ull
+#define DCORE2_RTR3_MAX_OFFSET 0x1000
+#define DCORE2_RTR3_SECTION 0x3000
+#define mmDCORE2_RTR3_HBW_RD_RQ_LL_STAT_BASE 0x455C300ull
+#define DCORE2_RTR3_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR3_HBW_RD_RS_LL_STAT_BASE 0x455C340ull
+#define DCORE2_RTR3_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR3_HBW_WR_RQ_LL_STAT_BASE 0x455C380ull
+#define DCORE2_RTR3_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR3_HBW_WR_RS_LL_STAT_BASE 0x455C3C0ull
+#define DCORE2_RTR3_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR3_LBW_RD_RQ_LL_STAT_BASE 0x455C400ull
+#define DCORE2_RTR3_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR3_LBW_RD_RS_LL_STAT_BASE 0x455C440ull
+#define DCORE2_RTR3_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR3_LBW_WR_RQ_LL_STAT_BASE 0x455C480ull
+#define DCORE2_RTR3_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR3_LBW_WR_RS_LL_STAT_BASE 0x455C4C0ull
+#define DCORE2_RTR3_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR3_HBW_MFIFO_BASE 0x455C500ull
+#define DCORE2_RTR3_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE2_RTR3_E2E_RD_LL_STAT_BASE 0x455C540ull
+#define DCORE2_RTR3_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR3_E2E_WR_LL_STAT_BASE 0x455C580ull
+#define DCORE2_RTR3_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR3_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE2_RTR3_RTR_HBW_XACT_STAT_BASE 0x455C600ull
+#define DCORE2_RTR3_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR3_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR3_RTR_LBW_XACT_STAT_BASE 0x455C680ull
+#define DCORE2_RTR3_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR3_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR3_RTR_E2E_XACT_STAT_BASE 0x455C700ull
+#define DCORE2_RTR3_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR3_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE2_RTR3_SPECIAL_BASE 0x455CE80ull
+#define DCORE2_RTR3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR3_DBG_ADDR_BASE 0x455D000ull
+#define DCORE2_RTR3_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE2_RTR3_DBG_ADDR_SECTION 0xE800
+#define mmDCORE2_RTR3_DBG_ADDR_SPECIAL_BASE 0x455DE80ull
+#define DCORE2_RTR3_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR3_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE2_RTR4_CTRL_BASE 0x4560000ull
+#define DCORE2_RTR4_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_RTR4_CTRL_SECTION 0xE800
+#define mmDCORE2_RTR4_CTRL_SPECIAL_BASE 0x4560E80ull
+#define DCORE2_RTR4_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR4_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR4_H3_BASE 0x4561000ull
+#define DCORE2_RTR4_H3_MAX_OFFSET 0x1000
+#define DCORE2_RTR4_H3_SECTION 0xE800
+#define mmDCORE2_RTR4_H3_SPECIAL_BASE 0x4561E80ull
+#define DCORE2_RTR4_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR4_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR4_MSTR_IF_RR_SHRD_HBW_BASE 0x4562000ull
+#define DCORE2_RTR4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_RTR4_MSTR_IF_RR_PRVT_HBW_BASE 0x4562200ull
+#define DCORE2_RTR4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_RTR4_MSTR_IF_RR_SHRD_LBW_BASE 0x4562400ull
+#define DCORE2_RTR4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_RTR4_MSTR_IF_RR_PRVT_LBW_BASE 0x4562600ull
+#define DCORE2_RTR4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_RTR4_MSTR_IF_E2E_CRDT_BASE 0x4562800ull
+#define DCORE2_RTR4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_RTR4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_RTR4_MSTR_IF_AXUSER_BASE 0x4562A80ull
+#define DCORE2_RTR4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_RTR4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_RTR4_MSTR_IF_DBG_HBW_BASE 0x4562B00ull
+#define DCORE2_RTR4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_RTR4_MSTR_IF_DBG_LBW_BASE 0x4562B80ull
+#define DCORE2_RTR4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_RTR4_MSTR_IF_CORE_HBW_BASE 0x4562C00ull
+#define DCORE2_RTR4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_RTR4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_RTR4_MSTR_IF_CORE_LBW_BASE 0x4562D80ull
+#define DCORE2_RTR4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_RTR4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_RTR4_MSTR_IF_SPECIAL_BASE 0x4562E80ull
+#define DCORE2_RTR4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR4_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR4_ADD_DEC_HBW_BASE 0x4563000ull
+#define DCORE2_RTR4_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE2_RTR4_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE2_RTR4_ADD_DEC_LBW_BASE 0x4563400ull
+#define DCORE2_RTR4_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE2_RTR4_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE2_RTR4_ADD_DEC_SPECIAL_BASE 0x4563E80ull
+#define DCORE2_RTR4_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR4_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR4_BASE 0x4564000ull
+#define DCORE2_RTR4_MAX_OFFSET 0x1000
+#define DCORE2_RTR4_SECTION 0x3000
+#define mmDCORE2_RTR4_HBW_RD_RQ_LL_STAT_BASE 0x4564300ull
+#define DCORE2_RTR4_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR4_HBW_RD_RS_LL_STAT_BASE 0x4564340ull
+#define DCORE2_RTR4_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR4_HBW_WR_RQ_LL_STAT_BASE 0x4564380ull
+#define DCORE2_RTR4_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR4_HBW_WR_RS_LL_STAT_BASE 0x45643C0ull
+#define DCORE2_RTR4_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR4_LBW_RD_RQ_LL_STAT_BASE 0x4564400ull
+#define DCORE2_RTR4_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR4_LBW_RD_RS_LL_STAT_BASE 0x4564440ull
+#define DCORE2_RTR4_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR4_LBW_WR_RQ_LL_STAT_BASE 0x4564480ull
+#define DCORE2_RTR4_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR4_LBW_WR_RS_LL_STAT_BASE 0x45644C0ull
+#define DCORE2_RTR4_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR4_HBW_MFIFO_BASE 0x4564500ull
+#define DCORE2_RTR4_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE2_RTR4_E2E_RD_LL_STAT_BASE 0x4564540ull
+#define DCORE2_RTR4_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR4_E2E_WR_LL_STAT_BASE 0x4564580ull
+#define DCORE2_RTR4_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR4_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE2_RTR4_RTR_HBW_XACT_STAT_BASE 0x4564600ull
+#define DCORE2_RTR4_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR4_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR4_RTR_LBW_XACT_STAT_BASE 0x4564680ull
+#define DCORE2_RTR4_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR4_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR4_RTR_E2E_XACT_STAT_BASE 0x4564700ull
+#define DCORE2_RTR4_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR4_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE2_RTR4_SPECIAL_BASE 0x4564E80ull
+#define DCORE2_RTR4_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR4_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR4_DBG_ADDR_BASE 0x4565000ull
+#define DCORE2_RTR4_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE2_RTR4_DBG_ADDR_SECTION 0xE800
+#define mmDCORE2_RTR4_DBG_ADDR_SPECIAL_BASE 0x4565E80ull
+#define DCORE2_RTR4_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR4_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE2_RTR5_CTRL_BASE 0x4568000ull
+#define DCORE2_RTR5_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_RTR5_CTRL_SECTION 0xE800
+#define mmDCORE2_RTR5_CTRL_SPECIAL_BASE 0x4568E80ull
+#define DCORE2_RTR5_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR5_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR5_H3_BASE 0x4569000ull
+#define DCORE2_RTR5_H3_MAX_OFFSET 0x1000
+#define DCORE2_RTR5_H3_SECTION 0xE800
+#define mmDCORE2_RTR5_H3_SPECIAL_BASE 0x4569E80ull
+#define DCORE2_RTR5_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR5_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR5_MSTR_IF_RR_SHRD_HBW_BASE 0x456A000ull
+#define DCORE2_RTR5_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR5_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_RTR5_MSTR_IF_RR_PRVT_HBW_BASE 0x456A200ull
+#define DCORE2_RTR5_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR5_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_RTR5_MSTR_IF_RR_SHRD_LBW_BASE 0x456A400ull
+#define DCORE2_RTR5_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR5_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_RTR5_MSTR_IF_RR_PRVT_LBW_BASE 0x456A600ull
+#define DCORE2_RTR5_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR5_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_RTR5_MSTR_IF_E2E_CRDT_BASE 0x456A800ull
+#define DCORE2_RTR5_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_RTR5_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_RTR5_MSTR_IF_AXUSER_BASE 0x456AA80ull
+#define DCORE2_RTR5_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_RTR5_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_RTR5_MSTR_IF_DBG_HBW_BASE 0x456AB00ull
+#define DCORE2_RTR5_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR5_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_RTR5_MSTR_IF_DBG_LBW_BASE 0x456AB80ull
+#define DCORE2_RTR5_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR5_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_RTR5_MSTR_IF_CORE_HBW_BASE 0x456AC00ull
+#define DCORE2_RTR5_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_RTR5_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_RTR5_MSTR_IF_CORE_LBW_BASE 0x456AD80ull
+#define DCORE2_RTR5_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_RTR5_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_RTR5_MSTR_IF_SPECIAL_BASE 0x456AE80ull
+#define DCORE2_RTR5_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR5_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR5_ADD_DEC_HBW_BASE 0x456B000ull
+#define DCORE2_RTR5_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE2_RTR5_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE2_RTR5_ADD_DEC_LBW_BASE 0x456B400ull
+#define DCORE2_RTR5_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE2_RTR5_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE2_RTR5_ADD_DEC_SPECIAL_BASE 0x456BE80ull
+#define DCORE2_RTR5_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR5_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR5_BASE 0x456C000ull
+#define DCORE2_RTR5_MAX_OFFSET 0x1000
+#define DCORE2_RTR5_SECTION 0x3000
+#define mmDCORE2_RTR5_HBW_RD_RQ_LL_STAT_BASE 0x456C300ull
+#define DCORE2_RTR5_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR5_HBW_RD_RS_LL_STAT_BASE 0x456C340ull
+#define DCORE2_RTR5_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR5_HBW_WR_RQ_LL_STAT_BASE 0x456C380ull
+#define DCORE2_RTR5_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR5_HBW_WR_RS_LL_STAT_BASE 0x456C3C0ull
+#define DCORE2_RTR5_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR5_LBW_RD_RQ_LL_STAT_BASE 0x456C400ull
+#define DCORE2_RTR5_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR5_LBW_RD_RS_LL_STAT_BASE 0x456C440ull
+#define DCORE2_RTR5_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR5_LBW_WR_RQ_LL_STAT_BASE 0x456C480ull
+#define DCORE2_RTR5_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR5_LBW_WR_RS_LL_STAT_BASE 0x456C4C0ull
+#define DCORE2_RTR5_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR5_HBW_MFIFO_BASE 0x456C500ull
+#define DCORE2_RTR5_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE2_RTR5_E2E_RD_LL_STAT_BASE 0x456C540ull
+#define DCORE2_RTR5_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR5_E2E_WR_LL_STAT_BASE 0x456C580ull
+#define DCORE2_RTR5_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR5_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE2_RTR5_RTR_HBW_XACT_STAT_BASE 0x456C600ull
+#define DCORE2_RTR5_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR5_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR5_RTR_LBW_XACT_STAT_BASE 0x456C680ull
+#define DCORE2_RTR5_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR5_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR5_RTR_E2E_XACT_STAT_BASE 0x456C700ull
+#define DCORE2_RTR5_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR5_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE2_RTR5_SPECIAL_BASE 0x456CE80ull
+#define DCORE2_RTR5_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR5_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR5_DBG_ADDR_BASE 0x456D000ull
+#define DCORE2_RTR5_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE2_RTR5_DBG_ADDR_SECTION 0xE800
+#define mmDCORE2_RTR5_DBG_ADDR_SPECIAL_BASE 0x456DE80ull
+#define DCORE2_RTR5_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR5_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE2_RTR6_CTRL_BASE 0x4570000ull
+#define DCORE2_RTR6_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_RTR6_CTRL_SECTION 0xE800
+#define mmDCORE2_RTR6_CTRL_SPECIAL_BASE 0x4570E80ull
+#define DCORE2_RTR6_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR6_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR6_H3_BASE 0x4571000ull
+#define DCORE2_RTR6_H3_MAX_OFFSET 0x1000
+#define DCORE2_RTR6_H3_SECTION 0xE800
+#define mmDCORE2_RTR6_H3_SPECIAL_BASE 0x4571E80ull
+#define DCORE2_RTR6_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR6_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR6_MSTR_IF_RR_SHRD_HBW_BASE 0x4572000ull
+#define DCORE2_RTR6_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR6_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_RTR6_MSTR_IF_RR_PRVT_HBW_BASE 0x4572200ull
+#define DCORE2_RTR6_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR6_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_RTR6_MSTR_IF_RR_SHRD_LBW_BASE 0x4572400ull
+#define DCORE2_RTR6_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR6_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_RTR6_MSTR_IF_RR_PRVT_LBW_BASE 0x4572600ull
+#define DCORE2_RTR6_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR6_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_RTR6_MSTR_IF_E2E_CRDT_BASE 0x4572800ull
+#define DCORE2_RTR6_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_RTR6_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_RTR6_MSTR_IF_AXUSER_BASE 0x4572A80ull
+#define DCORE2_RTR6_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_RTR6_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_RTR6_MSTR_IF_DBG_HBW_BASE 0x4572B00ull
+#define DCORE2_RTR6_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR6_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_RTR6_MSTR_IF_DBG_LBW_BASE 0x4572B80ull
+#define DCORE2_RTR6_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR6_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_RTR6_MSTR_IF_CORE_HBW_BASE 0x4572C00ull
+#define DCORE2_RTR6_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_RTR6_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_RTR6_MSTR_IF_CORE_LBW_BASE 0x4572D80ull
+#define DCORE2_RTR6_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_RTR6_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_RTR6_MSTR_IF_SPECIAL_BASE 0x4572E80ull
+#define DCORE2_RTR6_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR6_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR6_ADD_DEC_HBW_BASE 0x4573000ull
+#define DCORE2_RTR6_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE2_RTR6_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE2_RTR6_ADD_DEC_LBW_BASE 0x4573400ull
+#define DCORE2_RTR6_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE2_RTR6_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE2_RTR6_ADD_DEC_SPECIAL_BASE 0x4573E80ull
+#define DCORE2_RTR6_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR6_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR6_BASE 0x4574000ull
+#define DCORE2_RTR6_MAX_OFFSET 0x1000
+#define DCORE2_RTR6_SECTION 0x3000
+#define mmDCORE2_RTR6_HBW_RD_RQ_LL_STAT_BASE 0x4574300ull
+#define DCORE2_RTR6_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR6_HBW_RD_RS_LL_STAT_BASE 0x4574340ull
+#define DCORE2_RTR6_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR6_HBW_WR_RQ_LL_STAT_BASE 0x4574380ull
+#define DCORE2_RTR6_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR6_HBW_WR_RS_LL_STAT_BASE 0x45743C0ull
+#define DCORE2_RTR6_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR6_LBW_RD_RQ_LL_STAT_BASE 0x4574400ull
+#define DCORE2_RTR6_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR6_LBW_RD_RS_LL_STAT_BASE 0x4574440ull
+#define DCORE2_RTR6_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR6_LBW_WR_RQ_LL_STAT_BASE 0x4574480ull
+#define DCORE2_RTR6_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR6_LBW_WR_RS_LL_STAT_BASE 0x45744C0ull
+#define DCORE2_RTR6_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR6_HBW_MFIFO_BASE 0x4574500ull
+#define DCORE2_RTR6_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE2_RTR6_E2E_RD_LL_STAT_BASE 0x4574540ull
+#define DCORE2_RTR6_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR6_E2E_WR_LL_STAT_BASE 0x4574580ull
+#define DCORE2_RTR6_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR6_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE2_RTR6_RTR_HBW_XACT_STAT_BASE 0x4574600ull
+#define DCORE2_RTR6_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR6_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR6_RTR_LBW_XACT_STAT_BASE 0x4574680ull
+#define DCORE2_RTR6_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR6_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR6_RTR_E2E_XACT_STAT_BASE 0x4574700ull
+#define DCORE2_RTR6_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR6_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE2_RTR6_SPECIAL_BASE 0x4574E80ull
+#define DCORE2_RTR6_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR6_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR6_DBG_ADDR_BASE 0x4575000ull
+#define DCORE2_RTR6_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE2_RTR6_DBG_ADDR_SECTION 0xE800
+#define mmDCORE2_RTR6_DBG_ADDR_SPECIAL_BASE 0x4575E80ull
+#define DCORE2_RTR6_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR6_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE2_RTR7_CTRL_BASE 0x4578000ull
+#define DCORE2_RTR7_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_RTR7_CTRL_SECTION 0xE800
+#define mmDCORE2_RTR7_CTRL_SPECIAL_BASE 0x4578E80ull
+#define DCORE2_RTR7_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR7_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR7_H3_BASE 0x4579000ull
+#define DCORE2_RTR7_H3_MAX_OFFSET 0x1000
+#define DCORE2_RTR7_H3_SECTION 0xE800
+#define mmDCORE2_RTR7_H3_SPECIAL_BASE 0x4579E80ull
+#define DCORE2_RTR7_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR7_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR7_MSTR_IF_RR_SHRD_HBW_BASE 0x457A000ull
+#define DCORE2_RTR7_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR7_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_RTR7_MSTR_IF_RR_PRVT_HBW_BASE 0x457A200ull
+#define DCORE2_RTR7_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_RTR7_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_RTR7_MSTR_IF_RR_SHRD_LBW_BASE 0x457A400ull
+#define DCORE2_RTR7_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR7_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_RTR7_MSTR_IF_RR_PRVT_LBW_BASE 0x457A600ull
+#define DCORE2_RTR7_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_RTR7_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_RTR7_MSTR_IF_E2E_CRDT_BASE 0x457A800ull
+#define DCORE2_RTR7_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_RTR7_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_RTR7_MSTR_IF_AXUSER_BASE 0x457AA80ull
+#define DCORE2_RTR7_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_RTR7_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_RTR7_MSTR_IF_DBG_HBW_BASE 0x457AB00ull
+#define DCORE2_RTR7_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR7_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_RTR7_MSTR_IF_DBG_LBW_BASE 0x457AB80ull
+#define DCORE2_RTR7_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_RTR7_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_RTR7_MSTR_IF_CORE_HBW_BASE 0x457AC00ull
+#define DCORE2_RTR7_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_RTR7_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_RTR7_MSTR_IF_CORE_LBW_BASE 0x457AD80ull
+#define DCORE2_RTR7_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_RTR7_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_RTR7_MSTR_IF_SPECIAL_BASE 0x457AE80ull
+#define DCORE2_RTR7_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR7_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR7_ADD_DEC_HBW_BASE 0x457B000ull
+#define DCORE2_RTR7_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE2_RTR7_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE2_RTR7_ADD_DEC_LBW_BASE 0x457B400ull
+#define DCORE2_RTR7_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE2_RTR7_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE2_RTR7_ADD_DEC_SPECIAL_BASE 0x457BE80ull
+#define DCORE2_RTR7_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR7_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR7_BASE 0x457C000ull
+#define DCORE2_RTR7_MAX_OFFSET 0x1000
+#define DCORE2_RTR7_SECTION 0x3000
+#define mmDCORE2_RTR7_HBW_RD_RQ_LL_STAT_BASE 0x457C300ull
+#define DCORE2_RTR7_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR7_HBW_RD_RS_LL_STAT_BASE 0x457C340ull
+#define DCORE2_RTR7_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR7_HBW_WR_RQ_LL_STAT_BASE 0x457C380ull
+#define DCORE2_RTR7_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR7_HBW_WR_RS_LL_STAT_BASE 0x457C3C0ull
+#define DCORE2_RTR7_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR7_LBW_RD_RQ_LL_STAT_BASE 0x457C400ull
+#define DCORE2_RTR7_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR7_LBW_RD_RS_LL_STAT_BASE 0x457C440ull
+#define DCORE2_RTR7_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR7_LBW_WR_RQ_LL_STAT_BASE 0x457C480ull
+#define DCORE2_RTR7_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR7_LBW_WR_RS_LL_STAT_BASE 0x457C4C0ull
+#define DCORE2_RTR7_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR7_HBW_MFIFO_BASE 0x457C500ull
+#define DCORE2_RTR7_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE2_RTR7_E2E_RD_LL_STAT_BASE 0x457C540ull
+#define DCORE2_RTR7_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE2_RTR7_E2E_WR_LL_STAT_BASE 0x457C580ull
+#define DCORE2_RTR7_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE2_RTR7_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE2_RTR7_RTR_HBW_XACT_STAT_BASE 0x457C600ull
+#define DCORE2_RTR7_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR7_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR7_RTR_LBW_XACT_STAT_BASE 0x457C680ull
+#define DCORE2_RTR7_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR7_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE2_RTR7_RTR_E2E_XACT_STAT_BASE 0x457C700ull
+#define DCORE2_RTR7_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE2_RTR7_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE2_RTR7_SPECIAL_BASE 0x457CE80ull
+#define DCORE2_RTR7_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR7_SPECIAL_SECTION 0x1800
+#define mmDCORE2_RTR7_DBG_ADDR_BASE 0x457D000ull
+#define DCORE2_RTR7_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE2_RTR7_DBG_ADDR_SECTION 0xE800
+#define mmDCORE2_RTR7_DBG_ADDR_SPECIAL_BASE 0x457DE80ull
+#define DCORE2_RTR7_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_RTR7_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE2_SRAM0_BANK_BASE 0x4580000ull
+#define DCORE2_SRAM0_BANK_MAX_OFFSET 0x1000
+#define DCORE2_SRAM0_BANK_SECTION 0xE800
+#define mmDCORE2_SRAM0_BANK_SPECIAL_BASE 0x4580E80ull
+#define DCORE2_SRAM0_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM0_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM0_RTR_BASE 0x4581000ull
+#define DCORE2_SRAM0_RTR_MAX_OFFSET 0x1000
+#define DCORE2_SRAM0_RTR_SECTION 0xE800
+#define mmDCORE2_SRAM0_RTR_SPECIAL_BASE 0x4581E80ull
+#define DCORE2_SRAM0_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM0_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM0_DBG_CNT_N_HBW_DBG_CNT_BASE 0x4582000ull
+#define DCORE2_SRAM0_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM0_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM0_DBG_CNT_S_HBW_DBG_CNT_BASE 0x4582100ull
+#define DCORE2_SRAM0_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM0_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x4582200ull
+#define DCORE2_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x4582300ull
+#define DCORE2_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM0_DBG_CNT_N_LBW_DBG_CNT_BASE 0x4582400ull
+#define DCORE2_SRAM0_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM0_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM0_DBG_CNT_S_LBW_DBG_CNT_BASE 0x4582500ull
+#define DCORE2_SRAM0_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM0_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM0_DBG_CNT_L_LBW_DBG_CNT_BASE 0x4582600ull
+#define DCORE2_SRAM0_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM0_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x4582700ull
+#define DCORE2_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x4582780ull
+#define DCORE2_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x4582800ull
+#define DCORE2_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x4582880ull
+#define DCORE2_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x4582900ull
+#define DCORE2_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x4582980ull
+#define DCORE2_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x4582A00ull
+#define DCORE2_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x4582A80ull
+#define DCORE2_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE2_SRAM0_DBG_CNT_SPECIAL_BASE 0x4582E80ull
+#define DCORE2_SRAM0_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM0_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE2_SRAM1_BANK_BASE 0x4588000ull
+#define DCORE2_SRAM1_BANK_MAX_OFFSET 0x1000
+#define DCORE2_SRAM1_BANK_SECTION 0xE800
+#define mmDCORE2_SRAM1_BANK_SPECIAL_BASE 0x4588E80ull
+#define DCORE2_SRAM1_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM1_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM1_RTR_BASE 0x4589000ull
+#define DCORE2_SRAM1_RTR_MAX_OFFSET 0x1000
+#define DCORE2_SRAM1_RTR_SECTION 0xE800
+#define mmDCORE2_SRAM1_RTR_SPECIAL_BASE 0x4589E80ull
+#define DCORE2_SRAM1_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM1_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM1_DBG_CNT_N_HBW_DBG_CNT_BASE 0x458A000ull
+#define DCORE2_SRAM1_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM1_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM1_DBG_CNT_S_HBW_DBG_CNT_BASE 0x458A100ull
+#define DCORE2_SRAM1_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM1_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x458A200ull
+#define DCORE2_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x458A300ull
+#define DCORE2_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM1_DBG_CNT_N_LBW_DBG_CNT_BASE 0x458A400ull
+#define DCORE2_SRAM1_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM1_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM1_DBG_CNT_S_LBW_DBG_CNT_BASE 0x458A500ull
+#define DCORE2_SRAM1_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM1_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM1_DBG_CNT_L_LBW_DBG_CNT_BASE 0x458A600ull
+#define DCORE2_SRAM1_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM1_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x458A700ull
+#define DCORE2_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x458A780ull
+#define DCORE2_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x458A800ull
+#define DCORE2_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x458A880ull
+#define DCORE2_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x458A900ull
+#define DCORE2_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x458A980ull
+#define DCORE2_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x458AA00ull
+#define DCORE2_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x458AA80ull
+#define DCORE2_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE2_SRAM1_DBG_CNT_SPECIAL_BASE 0x458AE80ull
+#define DCORE2_SRAM1_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM1_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE2_SRAM2_BANK_BASE 0x4590000ull
+#define DCORE2_SRAM2_BANK_MAX_OFFSET 0x1000
+#define DCORE2_SRAM2_BANK_SECTION 0xE800
+#define mmDCORE2_SRAM2_BANK_SPECIAL_BASE 0x4590E80ull
+#define DCORE2_SRAM2_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM2_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM2_RTR_BASE 0x4591000ull
+#define DCORE2_SRAM2_RTR_MAX_OFFSET 0x1000
+#define DCORE2_SRAM2_RTR_SECTION 0xE800
+#define mmDCORE2_SRAM2_RTR_SPECIAL_BASE 0x4591E80ull
+#define DCORE2_SRAM2_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM2_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM2_DBG_CNT_N_HBW_DBG_CNT_BASE 0x4592000ull
+#define DCORE2_SRAM2_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM2_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM2_DBG_CNT_S_HBW_DBG_CNT_BASE 0x4592100ull
+#define DCORE2_SRAM2_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM2_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x4592200ull
+#define DCORE2_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x4592300ull
+#define DCORE2_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM2_DBG_CNT_N_LBW_DBG_CNT_BASE 0x4592400ull
+#define DCORE2_SRAM2_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM2_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM2_DBG_CNT_S_LBW_DBG_CNT_BASE 0x4592500ull
+#define DCORE2_SRAM2_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM2_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM2_DBG_CNT_L_LBW_DBG_CNT_BASE 0x4592600ull
+#define DCORE2_SRAM2_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM2_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x4592700ull
+#define DCORE2_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x4592780ull
+#define DCORE2_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x4592800ull
+#define DCORE2_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x4592880ull
+#define DCORE2_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x4592900ull
+#define DCORE2_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x4592980ull
+#define DCORE2_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x4592A00ull
+#define DCORE2_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x4592A80ull
+#define DCORE2_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE2_SRAM2_DBG_CNT_SPECIAL_BASE 0x4592E80ull
+#define DCORE2_SRAM2_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM2_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE2_SRAM3_BANK_BASE 0x4598000ull
+#define DCORE2_SRAM3_BANK_MAX_OFFSET 0x1000
+#define DCORE2_SRAM3_BANK_SECTION 0xE800
+#define mmDCORE2_SRAM3_BANK_SPECIAL_BASE 0x4598E80ull
+#define DCORE2_SRAM3_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM3_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM3_RTR_BASE 0x4599000ull
+#define DCORE2_SRAM3_RTR_MAX_OFFSET 0x1000
+#define DCORE2_SRAM3_RTR_SECTION 0xE800
+#define mmDCORE2_SRAM3_RTR_SPECIAL_BASE 0x4599E80ull
+#define DCORE2_SRAM3_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM3_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM3_DBG_CNT_N_HBW_DBG_CNT_BASE 0x459A000ull
+#define DCORE2_SRAM3_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM3_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM3_DBG_CNT_S_HBW_DBG_CNT_BASE 0x459A100ull
+#define DCORE2_SRAM3_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM3_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x459A200ull
+#define DCORE2_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x459A300ull
+#define DCORE2_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM3_DBG_CNT_N_LBW_DBG_CNT_BASE 0x459A400ull
+#define DCORE2_SRAM3_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM3_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM3_DBG_CNT_S_LBW_DBG_CNT_BASE 0x459A500ull
+#define DCORE2_SRAM3_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM3_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM3_DBG_CNT_L_LBW_DBG_CNT_BASE 0x459A600ull
+#define DCORE2_SRAM3_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM3_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x459A700ull
+#define DCORE2_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x459A780ull
+#define DCORE2_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x459A800ull
+#define DCORE2_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x459A880ull
+#define DCORE2_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x459A900ull
+#define DCORE2_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x459A980ull
+#define DCORE2_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x459AA00ull
+#define DCORE2_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x459AA80ull
+#define DCORE2_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE2_SRAM3_DBG_CNT_SPECIAL_BASE 0x459AE80ull
+#define DCORE2_SRAM3_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM3_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE2_SRAM4_BANK_BASE 0x45A0000ull
+#define DCORE2_SRAM4_BANK_MAX_OFFSET 0x1000
+#define DCORE2_SRAM4_BANK_SECTION 0xE800
+#define mmDCORE2_SRAM4_BANK_SPECIAL_BASE 0x45A0E80ull
+#define DCORE2_SRAM4_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM4_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM4_RTR_BASE 0x45A1000ull
+#define DCORE2_SRAM4_RTR_MAX_OFFSET 0x1000
+#define DCORE2_SRAM4_RTR_SECTION 0xE800
+#define mmDCORE2_SRAM4_RTR_SPECIAL_BASE 0x45A1E80ull
+#define DCORE2_SRAM4_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM4_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM4_DBG_CNT_N_HBW_DBG_CNT_BASE 0x45A2000ull
+#define DCORE2_SRAM4_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM4_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM4_DBG_CNT_S_HBW_DBG_CNT_BASE 0x45A2100ull
+#define DCORE2_SRAM4_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM4_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x45A2200ull
+#define DCORE2_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x45A2300ull
+#define DCORE2_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM4_DBG_CNT_N_LBW_DBG_CNT_BASE 0x45A2400ull
+#define DCORE2_SRAM4_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM4_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM4_DBG_CNT_S_LBW_DBG_CNT_BASE 0x45A2500ull
+#define DCORE2_SRAM4_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM4_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM4_DBG_CNT_L_LBW_DBG_CNT_BASE 0x45A2600ull
+#define DCORE2_SRAM4_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM4_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x45A2700ull
+#define DCORE2_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x45A2780ull
+#define DCORE2_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x45A2800ull
+#define DCORE2_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x45A2880ull
+#define DCORE2_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x45A2900ull
+#define DCORE2_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x45A2980ull
+#define DCORE2_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x45A2A00ull
+#define DCORE2_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x45A2A80ull
+#define DCORE2_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE2_SRAM4_DBG_CNT_SPECIAL_BASE 0x45A2E80ull
+#define DCORE2_SRAM4_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM4_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE2_SRAM5_BANK_BASE 0x45A8000ull
+#define DCORE2_SRAM5_BANK_MAX_OFFSET 0x1000
+#define DCORE2_SRAM5_BANK_SECTION 0xE800
+#define mmDCORE2_SRAM5_BANK_SPECIAL_BASE 0x45A8E80ull
+#define DCORE2_SRAM5_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM5_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM5_RTR_BASE 0x45A9000ull
+#define DCORE2_SRAM5_RTR_MAX_OFFSET 0x1000
+#define DCORE2_SRAM5_RTR_SECTION 0xE800
+#define mmDCORE2_SRAM5_RTR_SPECIAL_BASE 0x45A9E80ull
+#define DCORE2_SRAM5_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM5_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM5_DBG_CNT_N_HBW_DBG_CNT_BASE 0x45AA000ull
+#define DCORE2_SRAM5_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM5_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM5_DBG_CNT_S_HBW_DBG_CNT_BASE 0x45AA100ull
+#define DCORE2_SRAM5_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM5_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x45AA200ull
+#define DCORE2_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x45AA300ull
+#define DCORE2_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM5_DBG_CNT_N_LBW_DBG_CNT_BASE 0x45AA400ull
+#define DCORE2_SRAM5_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM5_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM5_DBG_CNT_S_LBW_DBG_CNT_BASE 0x45AA500ull
+#define DCORE2_SRAM5_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM5_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM5_DBG_CNT_L_LBW_DBG_CNT_BASE 0x45AA600ull
+#define DCORE2_SRAM5_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM5_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x45AA700ull
+#define DCORE2_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x45AA780ull
+#define DCORE2_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x45AA800ull
+#define DCORE2_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x45AA880ull
+#define DCORE2_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x45AA900ull
+#define DCORE2_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x45AA980ull
+#define DCORE2_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x45AAA00ull
+#define DCORE2_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x45AAA80ull
+#define DCORE2_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE2_SRAM5_DBG_CNT_SPECIAL_BASE 0x45AAE80ull
+#define DCORE2_SRAM5_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM5_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE2_SRAM6_BANK_BASE 0x45B0000ull
+#define DCORE2_SRAM6_BANK_MAX_OFFSET 0x1000
+#define DCORE2_SRAM6_BANK_SECTION 0xE800
+#define mmDCORE2_SRAM6_BANK_SPECIAL_BASE 0x45B0E80ull
+#define DCORE2_SRAM6_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM6_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM6_RTR_BASE 0x45B1000ull
+#define DCORE2_SRAM6_RTR_MAX_OFFSET 0x1000
+#define DCORE2_SRAM6_RTR_SECTION 0xE800
+#define mmDCORE2_SRAM6_RTR_SPECIAL_BASE 0x45B1E80ull
+#define DCORE2_SRAM6_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM6_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM6_DBG_CNT_N_HBW_DBG_CNT_BASE 0x45B2000ull
+#define DCORE2_SRAM6_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM6_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM6_DBG_CNT_S_HBW_DBG_CNT_BASE 0x45B2100ull
+#define DCORE2_SRAM6_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM6_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x45B2200ull
+#define DCORE2_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x45B2300ull
+#define DCORE2_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM6_DBG_CNT_N_LBW_DBG_CNT_BASE 0x45B2400ull
+#define DCORE2_SRAM6_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM6_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM6_DBG_CNT_S_LBW_DBG_CNT_BASE 0x45B2500ull
+#define DCORE2_SRAM6_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM6_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM6_DBG_CNT_L_LBW_DBG_CNT_BASE 0x45B2600ull
+#define DCORE2_SRAM6_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM6_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x45B2700ull
+#define DCORE2_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x45B2780ull
+#define DCORE2_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x45B2800ull
+#define DCORE2_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x45B2880ull
+#define DCORE2_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x45B2900ull
+#define DCORE2_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x45B2980ull
+#define DCORE2_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x45B2A00ull
+#define DCORE2_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x45B2A80ull
+#define DCORE2_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE2_SRAM6_DBG_CNT_SPECIAL_BASE 0x45B2E80ull
+#define DCORE2_SRAM6_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM6_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE2_SRAM7_BANK_BASE 0x45B8000ull
+#define DCORE2_SRAM7_BANK_MAX_OFFSET 0x1000
+#define DCORE2_SRAM7_BANK_SECTION 0xE800
+#define mmDCORE2_SRAM7_BANK_SPECIAL_BASE 0x45B8E80ull
+#define DCORE2_SRAM7_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM7_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM7_RTR_BASE 0x45B9000ull
+#define DCORE2_SRAM7_RTR_MAX_OFFSET 0x1000
+#define DCORE2_SRAM7_RTR_SECTION 0xE800
+#define mmDCORE2_SRAM7_RTR_SPECIAL_BASE 0x45B9E80ull
+#define DCORE2_SRAM7_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM7_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE2_SRAM7_DBG_CNT_N_HBW_DBG_CNT_BASE 0x45BA000ull
+#define DCORE2_SRAM7_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM7_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM7_DBG_CNT_S_HBW_DBG_CNT_BASE 0x45BA100ull
+#define DCORE2_SRAM7_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM7_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x45BA200ull
+#define DCORE2_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x45BA300ull
+#define DCORE2_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM7_DBG_CNT_N_LBW_DBG_CNT_BASE 0x45BA400ull
+#define DCORE2_SRAM7_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM7_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM7_DBG_CNT_S_LBW_DBG_CNT_BASE 0x45BA500ull
+#define DCORE2_SRAM7_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM7_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM7_DBG_CNT_L_LBW_DBG_CNT_BASE 0x45BA600ull
+#define DCORE2_SRAM7_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE2_SRAM7_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE2_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x45BA700ull
+#define DCORE2_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x45BA780ull
+#define DCORE2_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x45BA800ull
+#define DCORE2_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x45BA880ull
+#define DCORE2_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x45BA900ull
+#define DCORE2_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x45BA980ull
+#define DCORE2_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x45BAA00ull
+#define DCORE2_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE2_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x45BAA80ull
+#define DCORE2_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE2_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE2_SRAM7_DBG_CNT_SPECIAL_BASE 0x45BAE80ull
+#define DCORE2_SRAM7_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_SRAM7_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE2_EDMA0_QM_DCCM_BASE 0x45C0000ull
+#define DCORE2_EDMA0_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_EDMA0_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_ARC_AUX_BASE 0x45C8000ull
+#define DCORE2_EDMA0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE2_EDMA0_QM_ARC_AUX_SPECIAL_BASE 0x45C8E80ull
+#define DCORE2_EDMA0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_EDMA0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_EDMA0_QM_BASE 0x45CA000ull
+#define DCORE2_EDMA0_QM_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_QM_SECTION 0x9000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x45CA900ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x45CA908ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x45CA910ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x45CA918ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x45CA920ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x45CA928ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x45CA930ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x45CA938ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x45CA940ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x45CA948ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x45CA950ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x45CA958ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x45CA960ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x45CA968ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x45CA970ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x45CA978ull
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_EDMA0_QM_AXUSER_SECURED_BASE 0x45CAB00ull
+#define DCORE2_EDMA0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_EDMA0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_AXUSER_NONSECURED_BASE 0x45CAB80ull
+#define DCORE2_EDMA0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_EDMA0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_DBG_HBW_BASE 0x45CAC00ull
+#define DCORE2_EDMA0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_EDMA0_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_EDMA0_QM_DBG_LBW_BASE 0x45CAC80ull
+#define DCORE2_EDMA0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_EDMA0_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_EDMA0_QM_CGM_BASE 0x45CAD80ull
+#define DCORE2_EDMA0_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_EDMA0_QM_CGM_SECTION 0x1000
+#define mmDCORE2_EDMA0_QM_SPECIAL_BASE 0x45CAE80ull
+#define DCORE2_EDMA0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_EDMA0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_EDMA0_CORE_BASE 0x45CB000ull
+#define DCORE2_EDMA0_CORE_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_CORE_SECTION 0x8000
+#define mmDCORE2_EDMA0_CORE_CTX_AXUSER_BASE 0x45CB800ull
+#define DCORE2_EDMA0_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_EDMA0_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmDCORE2_EDMA0_CORE_CTX_BASE 0x45CB860ull
+#define DCORE2_EDMA0_CORE_CTX_MAX_OFFSET 0x9000
+#define DCORE2_EDMA0_CORE_CTX_SECTION 0x5A00
+#define mmDCORE2_EDMA0_CORE_KDMA_CGM_BASE 0x45CBE00ull
+#define DCORE2_EDMA0_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define DCORE2_EDMA0_CORE_KDMA_CGM_SECTION 0x8000
+#define mmDCORE2_EDMA0_CORE_SPECIAL_BASE 0x45CBE80ull
+#define DCORE2_EDMA0_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_EDMA0_CORE_SPECIAL_SECTION 0x1800
+#define mmDCORE2_EDMA0_MSTR_IF_RR_SHRD_HBW_BASE 0x45CC000ull
+#define DCORE2_EDMA0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_EDMA0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_EDMA0_MSTR_IF_RR_PRVT_HBW_BASE 0x45CC200ull
+#define DCORE2_EDMA0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_EDMA0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_EDMA0_MSTR_IF_RR_SHRD_LBW_BASE 0x45CC400ull
+#define DCORE2_EDMA0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_EDMA0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_EDMA0_MSTR_IF_RR_PRVT_LBW_BASE 0x45CC600ull
+#define DCORE2_EDMA0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_EDMA0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_EDMA0_MSTR_IF_E2E_CRDT_BASE 0x45CC800ull
+#define DCORE2_EDMA0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_EDMA0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_EDMA0_MSTR_IF_AXUSER_BASE 0x45CCA80ull
+#define DCORE2_EDMA0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_EDMA0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_EDMA0_MSTR_IF_DBG_HBW_BASE 0x45CCB00ull
+#define DCORE2_EDMA0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_EDMA0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_EDMA0_MSTR_IF_DBG_LBW_BASE 0x45CCB80ull
+#define DCORE2_EDMA0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_EDMA0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_EDMA0_MSTR_IF_CORE_HBW_BASE 0x45CCC00ull
+#define DCORE2_EDMA0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_EDMA0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_EDMA0_MSTR_IF_CORE_LBW_BASE 0x45CCD80ull
+#define DCORE2_EDMA0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_EDMA0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_EDMA0_MSTR_IF_SPECIAL_BASE 0x45CCE80ull
+#define DCORE2_EDMA0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_EDMA0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE2_EDMA1_QM_DCCM_BASE 0x45D0000ull
+#define DCORE2_EDMA1_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE2_EDMA1_QM_DCCM_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_ARC_AUX_BASE 0x45D8000ull
+#define DCORE2_EDMA1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE2_EDMA1_QM_ARC_AUX_SPECIAL_BASE 0x45D8E80ull
+#define DCORE2_EDMA1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_EDMA1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE2_EDMA1_QM_BASE 0x45DA000ull
+#define DCORE2_EDMA1_QM_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_QM_SECTION 0x9000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x45DA900ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x45DA908ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x45DA910ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x45DA918ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x45DA920ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x45DA928ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x45DA930ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x45DA938ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x45DA940ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x45DA948ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x45DA950ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x45DA958ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x45DA960ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x45DA968ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x45DA970ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x45DA978ull
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE2_EDMA1_QM_AXUSER_SECURED_BASE 0x45DAB00ull
+#define DCORE2_EDMA1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE2_EDMA1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_AXUSER_NONSECURED_BASE 0x45DAB80ull
+#define DCORE2_EDMA1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE2_EDMA1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_DBG_HBW_BASE 0x45DAC00ull
+#define DCORE2_EDMA1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_EDMA1_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_EDMA1_QM_DBG_LBW_BASE 0x45DAC80ull
+#define DCORE2_EDMA1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_EDMA1_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE2_EDMA1_QM_CGM_BASE 0x45DAD80ull
+#define DCORE2_EDMA1_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE2_EDMA1_QM_CGM_SECTION 0x1000
+#define mmDCORE2_EDMA1_QM_SPECIAL_BASE 0x45DAE80ull
+#define DCORE2_EDMA1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_EDMA1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE2_EDMA1_CORE_BASE 0x45DB000ull
+#define DCORE2_EDMA1_CORE_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_CORE_SECTION 0x8000
+#define mmDCORE2_EDMA1_CORE_CTX_AXUSER_BASE 0x45DB800ull
+#define DCORE2_EDMA1_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_EDMA1_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmDCORE2_EDMA1_CORE_CTX_BASE 0x45DB860ull
+#define DCORE2_EDMA1_CORE_CTX_MAX_OFFSET 0x9000
+#define DCORE2_EDMA1_CORE_CTX_SECTION 0x5A00
+#define mmDCORE2_EDMA1_CORE_KDMA_CGM_BASE 0x45DBE00ull
+#define DCORE2_EDMA1_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define DCORE2_EDMA1_CORE_KDMA_CGM_SECTION 0x8000
+#define mmDCORE2_EDMA1_CORE_SPECIAL_BASE 0x45DBE80ull
+#define DCORE2_EDMA1_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_EDMA1_CORE_SPECIAL_SECTION 0x1800
+#define mmDCORE2_EDMA1_MSTR_IF_RR_SHRD_HBW_BASE 0x45DC000ull
+#define DCORE2_EDMA1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_EDMA1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_EDMA1_MSTR_IF_RR_PRVT_HBW_BASE 0x45DC200ull
+#define DCORE2_EDMA1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_EDMA1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_EDMA1_MSTR_IF_RR_SHRD_LBW_BASE 0x45DC400ull
+#define DCORE2_EDMA1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_EDMA1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_EDMA1_MSTR_IF_RR_PRVT_LBW_BASE 0x45DC600ull
+#define DCORE2_EDMA1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_EDMA1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_EDMA1_MSTR_IF_E2E_CRDT_BASE 0x45DC800ull
+#define DCORE2_EDMA1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_EDMA1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_EDMA1_MSTR_IF_AXUSER_BASE 0x45DCA80ull
+#define DCORE2_EDMA1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_EDMA1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_EDMA1_MSTR_IF_DBG_HBW_BASE 0x45DCB00ull
+#define DCORE2_EDMA1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_EDMA1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_EDMA1_MSTR_IF_DBG_LBW_BASE 0x45DCB80ull
+#define DCORE2_EDMA1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_EDMA1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_EDMA1_MSTR_IF_CORE_HBW_BASE 0x45DCC00ull
+#define DCORE2_EDMA1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_EDMA1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_EDMA1_MSTR_IF_CORE_LBW_BASE 0x45DCD80ull
+#define DCORE2_EDMA1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_EDMA1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_EDMA1_MSTR_IF_SPECIAL_BASE 0x45DCE80ull
+#define DCORE2_EDMA1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_EDMA1_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE2_DEC0_CMD_BASE 0x45E0000ull
+#define DCORE2_DEC0_CMD_MAX_OFFSET 0x1100
+#define DCORE2_DEC0_CMD_SECTION 0x1000
+#define mmDCORE2_DEC0_VSI_BASE 0x45E1000ull
+#define DCORE2_DEC0_VSI_MAX_OFFSET 0x6FC0
+#define DCORE2_DEC0_VSI_SECTION 0x1000
+#define mmDCORE2_DEC0_L2C_BASE 0x45E2000ull
+#define DCORE2_DEC0_L2C_MAX_OFFSET 0x39C0
+#define DCORE2_DEC0_L2C_SECTION 0x1000
+#define mmDCORE2_VDEC0_BRDG_CTRL_BASE 0x45E3000ull
+#define DCORE2_VDEC0_BRDG_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_BRDG_CTRL_SECTION 0x8000
+#define mmDCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x45E3800ull
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmDCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x45E3900ull
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmDCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x45E3A00ull
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmDCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x45E3B00ull
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmDCORE2_VDEC0_BRDG_CTRL_AXUSER_DEC_BASE 0x45E3C00ull
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define DCORE2_VDEC0_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmDCORE2_VDEC0_BRDG_CTRL_SPECIAL_BASE 0x45E3E80ull
+#define DCORE2_VDEC0_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_VDEC0_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_VDEC0_CTRL_BASE 0x45E4000ull
+#define DCORE2_VDEC0_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_CTRL_SECTION 0xE800
+#define mmDCORE2_VDEC0_CTRL_SPECIAL_BASE 0x45E4E80ull
+#define DCORE2_VDEC0_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_VDEC0_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_VDEC0_MSTR_IF_RR_SHRD_HBW_BASE 0x45E5000ull
+#define DCORE2_VDEC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_VDEC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_VDEC0_MSTR_IF_RR_PRVT_HBW_BASE 0x45E5200ull
+#define DCORE2_VDEC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_VDEC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_VDEC0_MSTR_IF_RR_SHRD_LBW_BASE 0x45E5400ull
+#define DCORE2_VDEC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_VDEC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_VDEC0_MSTR_IF_RR_PRVT_LBW_BASE 0x45E5600ull
+#define DCORE2_VDEC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_VDEC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_VDEC0_MSTR_IF_E2E_CRDT_BASE 0x45E5800ull
+#define DCORE2_VDEC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_VDEC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_VDEC0_MSTR_IF_AXUSER_BASE 0x45E5A80ull
+#define DCORE2_VDEC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_VDEC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_VDEC0_MSTR_IF_DBG_HBW_BASE 0x45E5B00ull
+#define DCORE2_VDEC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_VDEC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_VDEC0_MSTR_IF_DBG_LBW_BASE 0x45E5B80ull
+#define DCORE2_VDEC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_VDEC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_VDEC0_MSTR_IF_CORE_HBW_BASE 0x45E5C00ull
+#define DCORE2_VDEC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_VDEC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_VDEC0_MSTR_IF_CORE_LBW_BASE 0x45E5D80ull
+#define DCORE2_VDEC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_VDEC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_VDEC0_MSTR_IF_SPECIAL_BASE 0x45E5E80ull
+#define DCORE2_VDEC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_VDEC0_MSTR_IF_SPECIAL_SECTION 0xA180
+#define mmDCORE2_DEC1_CMD_BASE 0x45F0000ull
+#define DCORE2_DEC1_CMD_MAX_OFFSET 0x1100
+#define DCORE2_DEC1_CMD_SECTION 0x1000
+#define mmDCORE2_DEC1_VSI_BASE 0x45F1000ull
+#define DCORE2_DEC1_VSI_MAX_OFFSET 0x6FC0
+#define DCORE2_DEC1_VSI_SECTION 0x1000
+#define mmDCORE2_DEC1_L2C_BASE 0x45F2000ull
+#define DCORE2_DEC1_L2C_MAX_OFFSET 0x39C0
+#define DCORE2_DEC1_L2C_SECTION 0x1000
+#define mmDCORE2_VDEC1_BRDG_CTRL_BASE 0x45F3000ull
+#define DCORE2_VDEC1_BRDG_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_BRDG_CTRL_SECTION 0x8000
+#define mmDCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x45F3800ull
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmDCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x45F3900ull
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmDCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x45F3A00ull
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmDCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x45F3B00ull
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmDCORE2_VDEC1_BRDG_CTRL_AXUSER_DEC_BASE 0x45F3C00ull
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define DCORE2_VDEC1_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmDCORE2_VDEC1_BRDG_CTRL_SPECIAL_BASE 0x45F3E80ull
+#define DCORE2_VDEC1_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_VDEC1_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_VDEC1_CTRL_BASE 0x45F4000ull
+#define DCORE2_VDEC1_CTRL_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_CTRL_SECTION 0xE800
+#define mmDCORE2_VDEC1_CTRL_SPECIAL_BASE 0x45F4E80ull
+#define DCORE2_VDEC1_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_VDEC1_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE 0x45F5000ull
+#define DCORE2_VDEC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_VDEC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE2_VDEC1_MSTR_IF_RR_PRVT_HBW_BASE 0x45F5200ull
+#define DCORE2_VDEC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE2_VDEC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE2_VDEC1_MSTR_IF_RR_SHRD_LBW_BASE 0x45F5400ull
+#define DCORE2_VDEC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_VDEC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE2_VDEC1_MSTR_IF_RR_PRVT_LBW_BASE 0x45F5600ull
+#define DCORE2_VDEC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE2_VDEC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE2_VDEC1_MSTR_IF_E2E_CRDT_BASE 0x45F5800ull
+#define DCORE2_VDEC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE2_VDEC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE2_VDEC1_MSTR_IF_AXUSER_BASE 0x45F5A80ull
+#define DCORE2_VDEC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE2_VDEC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE2_VDEC1_MSTR_IF_DBG_HBW_BASE 0x45F5B00ull
+#define DCORE2_VDEC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE2_VDEC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE2_VDEC1_MSTR_IF_DBG_LBW_BASE 0x45F5B80ull
+#define DCORE2_VDEC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE2_VDEC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE2_VDEC1_MSTR_IF_CORE_HBW_BASE 0x45F5C00ull
+#define DCORE2_VDEC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE2_VDEC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE2_VDEC1_MSTR_IF_CORE_LBW_BASE 0x45F5D80ull
+#define DCORE2_VDEC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE2_VDEC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE2_VDEC1_MSTR_IF_SPECIAL_BASE 0x45F5E80ull
+#define DCORE2_VDEC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_VDEC1_MSTR_IF_SPECIAL_SECTION 0xA180
+#define mmDCORE3_TPC0_QM_DCCM_BASE 0x4600000ull
+#define DCORE3_TPC0_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC0_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_ARC_AUX_BASE 0x4608000ull
+#define DCORE3_TPC0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE3_TPC0_QM_ARC_AUX_SPECIAL_BASE 0x4608E80ull
+#define DCORE3_TPC0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC0_QM_BASE 0x460A000ull
+#define DCORE3_TPC0_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_QM_SECTION 0x9000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x460A900ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x460A908ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x460A910ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x460A918ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x460A920ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x460A928ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x460A930ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x460A938ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x460A940ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x460A948ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x460A950ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x460A958ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x460A960ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x460A968ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x460A970ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x460A978ull
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC0_QM_AXUSER_SECURED_BASE 0x460AB00ull
+#define DCORE3_TPC0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_AXUSER_NONSECURED_BASE 0x460AB80ull
+#define DCORE3_TPC0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_DBG_HBW_BASE 0x460AC00ull
+#define DCORE3_TPC0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC0_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC0_QM_DBG_LBW_BASE 0x460AC80ull
+#define DCORE3_TPC0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC0_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC0_QM_CGM_BASE 0x460AD80ull
+#define DCORE3_TPC0_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC0_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC0_QM_SPECIAL_BASE 0x460AE80ull
+#define DCORE3_TPC0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_0_BASE 0x460B000ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC0_CFG_BASE 0x460B000ull
+#define DCORE3_TPC0_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC0_CFG_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_1_BASE 0x460B050ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_2_BASE 0x460B0A0ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_3_BASE 0x460B0F0ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_4_BASE 0x460B140ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_5_BASE 0x460B190ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_6_BASE 0x460B1E0ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_7_BASE 0x460B230ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_8_BASE 0x460B280ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_9_BASE 0x460B2D0ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_10_BASE 0x460B320ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_11_BASE 0x460B370ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_12_BASE 0x460B3C0ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_13_BASE 0x460B410ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_14_BASE 0x460B460ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_TENSOR_15_BASE 0x460B4B0ull
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_KERNEL_SYNC_OBJECT_BASE 0x460B500ull
+#define DCORE3_TPC0_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC0_CFG_KERNEL_BASE 0x460B508ull
+#define DCORE3_TPC0_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC0_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_0_BASE 0x460B5DCull
+#define DCORE3_TPC0_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_1_BASE 0x460B62Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_2_BASE 0x460B67Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_3_BASE 0x460B6CCull
+#define DCORE3_TPC0_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_4_BASE 0x460B71Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_5_BASE 0x460B76Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_6_BASE 0x460B7BCull
+#define DCORE3_TPC0_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_7_BASE 0x460B80Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_8_BASE 0x460B85Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_9_BASE 0x460B8ACull
+#define DCORE3_TPC0_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_10_BASE 0x460B8FCull
+#define DCORE3_TPC0_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_11_BASE 0x460B94Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_12_BASE 0x460B99Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_13_BASE 0x460B9ECull
+#define DCORE3_TPC0_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_14_BASE 0x460BA3Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_TENSOR_15_BASE 0x460BA8Cull
+#define DCORE3_TPC0_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC0_CFG_QM_SYNC_OBJECT_BASE 0x460BADCull
+#define DCORE3_TPC0_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC0_CFG_QM_BASE 0x460BAE4ull
+#define DCORE3_TPC0_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC0_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC0_CFG_AXUSER_BASE 0x460BE00ull
+#define DCORE3_TPC0_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC0_CFG_SPECIAL_BASE 0x460BE80ull
+#define DCORE3_TPC0_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC0_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC0_MSTR_IF_RR_SHRD_HBW_BASE 0x460C000ull
+#define DCORE3_TPC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_TPC0_MSTR_IF_RR_PRVT_HBW_BASE 0x460C200ull
+#define DCORE3_TPC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_TPC0_MSTR_IF_RR_SHRD_LBW_BASE 0x460C400ull
+#define DCORE3_TPC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_TPC0_MSTR_IF_RR_PRVT_LBW_BASE 0x460C600ull
+#define DCORE3_TPC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_TPC0_MSTR_IF_E2E_CRDT_BASE 0x460C800ull
+#define DCORE3_TPC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_TPC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_TPC0_MSTR_IF_AXUSER_BASE 0x460CA80ull
+#define DCORE3_TPC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC0_MSTR_IF_DBG_HBW_BASE 0x460CB00ull
+#define DCORE3_TPC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC0_MSTR_IF_DBG_LBW_BASE 0x460CB80ull
+#define DCORE3_TPC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_TPC0_MSTR_IF_CORE_HBW_BASE 0x460CC00ull
+#define DCORE3_TPC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_TPC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_TPC0_MSTR_IF_CORE_LBW_BASE 0x460CD80ull
+#define DCORE3_TPC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_TPC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_TPC0_MSTR_IF_SPECIAL_BASE 0x460CE80ull
+#define DCORE3_TPC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE3_TPC1_QM_DCCM_BASE 0x4610000ull
+#define DCORE3_TPC1_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC1_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_ARC_AUX_BASE 0x4618000ull
+#define DCORE3_TPC1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE3_TPC1_QM_ARC_AUX_SPECIAL_BASE 0x4618E80ull
+#define DCORE3_TPC1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC1_QM_BASE 0x461A000ull
+#define DCORE3_TPC1_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_QM_SECTION 0x9000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x461A900ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x461A908ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x461A910ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x461A918ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x461A920ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x461A928ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x461A930ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x461A938ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x461A940ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x461A948ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x461A950ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x461A958ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x461A960ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x461A968ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x461A970ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x461A978ull
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC1_QM_AXUSER_SECURED_BASE 0x461AB00ull
+#define DCORE3_TPC1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_AXUSER_NONSECURED_BASE 0x461AB80ull
+#define DCORE3_TPC1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_DBG_HBW_BASE 0x461AC00ull
+#define DCORE3_TPC1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC1_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC1_QM_DBG_LBW_BASE 0x461AC80ull
+#define DCORE3_TPC1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC1_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC1_QM_CGM_BASE 0x461AD80ull
+#define DCORE3_TPC1_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC1_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC1_QM_SPECIAL_BASE 0x461AE80ull
+#define DCORE3_TPC1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_0_BASE 0x461B000ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC1_CFG_BASE 0x461B000ull
+#define DCORE3_TPC1_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC1_CFG_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_1_BASE 0x461B050ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_2_BASE 0x461B0A0ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_3_BASE 0x461B0F0ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_4_BASE 0x461B140ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_5_BASE 0x461B190ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_6_BASE 0x461B1E0ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_7_BASE 0x461B230ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_8_BASE 0x461B280ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_9_BASE 0x461B2D0ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_10_BASE 0x461B320ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_11_BASE 0x461B370ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_12_BASE 0x461B3C0ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_13_BASE 0x461B410ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_14_BASE 0x461B460ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_TENSOR_15_BASE 0x461B4B0ull
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_KERNEL_SYNC_OBJECT_BASE 0x461B500ull
+#define DCORE3_TPC1_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC1_CFG_KERNEL_BASE 0x461B508ull
+#define DCORE3_TPC1_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC1_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_0_BASE 0x461B5DCull
+#define DCORE3_TPC1_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_1_BASE 0x461B62Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_2_BASE 0x461B67Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_3_BASE 0x461B6CCull
+#define DCORE3_TPC1_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_4_BASE 0x461B71Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_5_BASE 0x461B76Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_6_BASE 0x461B7BCull
+#define DCORE3_TPC1_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_7_BASE 0x461B80Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_8_BASE 0x461B85Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_9_BASE 0x461B8ACull
+#define DCORE3_TPC1_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_10_BASE 0x461B8FCull
+#define DCORE3_TPC1_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_11_BASE 0x461B94Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_12_BASE 0x461B99Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_13_BASE 0x461B9ECull
+#define DCORE3_TPC1_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_14_BASE 0x461BA3Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_TENSOR_15_BASE 0x461BA8Cull
+#define DCORE3_TPC1_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC1_CFG_QM_SYNC_OBJECT_BASE 0x461BADCull
+#define DCORE3_TPC1_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC1_CFG_QM_BASE 0x461BAE4ull
+#define DCORE3_TPC1_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC1_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC1_CFG_AXUSER_BASE 0x461BE00ull
+#define DCORE3_TPC1_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC1_CFG_SPECIAL_BASE 0x461BE80ull
+#define DCORE3_TPC1_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC1_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC1_MSTR_IF_RR_SHRD_HBW_BASE 0x461C000ull
+#define DCORE3_TPC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_TPC1_MSTR_IF_RR_PRVT_HBW_BASE 0x461C200ull
+#define DCORE3_TPC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_TPC1_MSTR_IF_RR_SHRD_LBW_BASE 0x461C400ull
+#define DCORE3_TPC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_TPC1_MSTR_IF_RR_PRVT_LBW_BASE 0x461C600ull
+#define DCORE3_TPC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_TPC1_MSTR_IF_E2E_CRDT_BASE 0x461C800ull
+#define DCORE3_TPC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_TPC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_TPC1_MSTR_IF_AXUSER_BASE 0x461CA80ull
+#define DCORE3_TPC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC1_MSTR_IF_DBG_HBW_BASE 0x461CB00ull
+#define DCORE3_TPC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC1_MSTR_IF_DBG_LBW_BASE 0x461CB80ull
+#define DCORE3_TPC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_TPC1_MSTR_IF_CORE_HBW_BASE 0x461CC00ull
+#define DCORE3_TPC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_TPC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_TPC1_MSTR_IF_CORE_LBW_BASE 0x461CD80ull
+#define DCORE3_TPC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_TPC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_TPC1_MSTR_IF_SPECIAL_BASE 0x461CE80ull
+#define DCORE3_TPC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC1_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE3_TPC2_QM_DCCM_BASE 0x4620000ull
+#define DCORE3_TPC2_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC2_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_ARC_AUX_BASE 0x4628000ull
+#define DCORE3_TPC2_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE3_TPC2_QM_ARC_AUX_SPECIAL_BASE 0x4628E80ull
+#define DCORE3_TPC2_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC2_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC2_QM_BASE 0x462A000ull
+#define DCORE3_TPC2_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_QM_SECTION 0x9000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR0_BASE 0x462A900ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR1_BASE 0x462A908ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR2_BASE 0x462A910ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR3_BASE 0x462A918ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR4_BASE 0x462A920ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR5_BASE 0x462A928ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR6_BASE 0x462A930ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR7_BASE 0x462A938ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR8_BASE 0x462A940ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR9_BASE 0x462A948ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR10_BASE 0x462A950ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR11_BASE 0x462A958ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR12_BASE 0x462A960ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR13_BASE 0x462A968ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR14_BASE 0x462A970ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR15_BASE 0x462A978ull
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC2_QM_AXUSER_SECURED_BASE 0x462AB00ull
+#define DCORE3_TPC2_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_AXUSER_NONSECURED_BASE 0x462AB80ull
+#define DCORE3_TPC2_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_DBG_HBW_BASE 0x462AC00ull
+#define DCORE3_TPC2_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC2_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC2_QM_DBG_LBW_BASE 0x462AC80ull
+#define DCORE3_TPC2_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC2_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC2_QM_CGM_BASE 0x462AD80ull
+#define DCORE3_TPC2_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC2_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC2_QM_SPECIAL_BASE 0x462AE80ull
+#define DCORE3_TPC2_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC2_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_0_BASE 0x462B000ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC2_CFG_BASE 0x462B000ull
+#define DCORE3_TPC2_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC2_CFG_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_1_BASE 0x462B050ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_2_BASE 0x462B0A0ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_3_BASE 0x462B0F0ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_4_BASE 0x462B140ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_5_BASE 0x462B190ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_6_BASE 0x462B1E0ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_7_BASE 0x462B230ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_8_BASE 0x462B280ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_9_BASE 0x462B2D0ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_10_BASE 0x462B320ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_11_BASE 0x462B370ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_12_BASE 0x462B3C0ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_13_BASE 0x462B410ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_14_BASE 0x462B460ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_TENSOR_15_BASE 0x462B4B0ull
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_KERNEL_SYNC_OBJECT_BASE 0x462B500ull
+#define DCORE3_TPC2_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC2_CFG_KERNEL_BASE 0x462B508ull
+#define DCORE3_TPC2_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC2_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_0_BASE 0x462B5DCull
+#define DCORE3_TPC2_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_1_BASE 0x462B62Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_2_BASE 0x462B67Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_3_BASE 0x462B6CCull
+#define DCORE3_TPC2_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_4_BASE 0x462B71Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_5_BASE 0x462B76Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_6_BASE 0x462B7BCull
+#define DCORE3_TPC2_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_7_BASE 0x462B80Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_8_BASE 0x462B85Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_9_BASE 0x462B8ACull
+#define DCORE3_TPC2_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_10_BASE 0x462B8FCull
+#define DCORE3_TPC2_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_11_BASE 0x462B94Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_12_BASE 0x462B99Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_13_BASE 0x462B9ECull
+#define DCORE3_TPC2_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_14_BASE 0x462BA3Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_TENSOR_15_BASE 0x462BA8Cull
+#define DCORE3_TPC2_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC2_CFG_QM_SYNC_OBJECT_BASE 0x462BADCull
+#define DCORE3_TPC2_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC2_CFG_QM_BASE 0x462BAE4ull
+#define DCORE3_TPC2_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC2_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC2_CFG_AXUSER_BASE 0x462BE00ull
+#define DCORE3_TPC2_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC2_CFG_SPECIAL_BASE 0x462BE80ull
+#define DCORE3_TPC2_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC2_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC2_MSTR_IF_RR_SHRD_HBW_BASE 0x462C000ull
+#define DCORE3_TPC2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_TPC2_MSTR_IF_RR_PRVT_HBW_BASE 0x462C200ull
+#define DCORE3_TPC2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_TPC2_MSTR_IF_RR_SHRD_LBW_BASE 0x462C400ull
+#define DCORE3_TPC2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_TPC2_MSTR_IF_RR_PRVT_LBW_BASE 0x462C600ull
+#define DCORE3_TPC2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_TPC2_MSTR_IF_E2E_CRDT_BASE 0x462C800ull
+#define DCORE3_TPC2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_TPC2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_TPC2_MSTR_IF_AXUSER_BASE 0x462CA80ull
+#define DCORE3_TPC2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC2_MSTR_IF_DBG_HBW_BASE 0x462CB00ull
+#define DCORE3_TPC2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC2_MSTR_IF_DBG_LBW_BASE 0x462CB80ull
+#define DCORE3_TPC2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_TPC2_MSTR_IF_CORE_HBW_BASE 0x462CC00ull
+#define DCORE3_TPC2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_TPC2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_TPC2_MSTR_IF_CORE_LBW_BASE 0x462CD80ull
+#define DCORE3_TPC2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_TPC2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_TPC2_MSTR_IF_SPECIAL_BASE 0x462CE80ull
+#define DCORE3_TPC2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC2_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE3_TPC3_QM_DCCM_BASE 0x4630000ull
+#define DCORE3_TPC3_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC3_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_ARC_AUX_BASE 0x4638000ull
+#define DCORE3_TPC3_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE3_TPC3_QM_ARC_AUX_SPECIAL_BASE 0x4638E80ull
+#define DCORE3_TPC3_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC3_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC3_QM_BASE 0x463A000ull
+#define DCORE3_TPC3_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_QM_SECTION 0x9000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR0_BASE 0x463A900ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR1_BASE 0x463A908ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR2_BASE 0x463A910ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR3_BASE 0x463A918ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR4_BASE 0x463A920ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR5_BASE 0x463A928ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR6_BASE 0x463A930ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR7_BASE 0x463A938ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR8_BASE 0x463A940ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR9_BASE 0x463A948ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR10_BASE 0x463A950ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR11_BASE 0x463A958ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR12_BASE 0x463A960ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR13_BASE 0x463A968ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR14_BASE 0x463A970ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR15_BASE 0x463A978ull
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC3_QM_AXUSER_SECURED_BASE 0x463AB00ull
+#define DCORE3_TPC3_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_AXUSER_NONSECURED_BASE 0x463AB80ull
+#define DCORE3_TPC3_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_DBG_HBW_BASE 0x463AC00ull
+#define DCORE3_TPC3_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC3_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC3_QM_DBG_LBW_BASE 0x463AC80ull
+#define DCORE3_TPC3_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC3_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC3_QM_CGM_BASE 0x463AD80ull
+#define DCORE3_TPC3_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC3_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC3_QM_SPECIAL_BASE 0x463AE80ull
+#define DCORE3_TPC3_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC3_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_0_BASE 0x463B000ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC3_CFG_BASE 0x463B000ull
+#define DCORE3_TPC3_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC3_CFG_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_1_BASE 0x463B050ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_2_BASE 0x463B0A0ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_3_BASE 0x463B0F0ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_4_BASE 0x463B140ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_5_BASE 0x463B190ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_6_BASE 0x463B1E0ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_7_BASE 0x463B230ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_8_BASE 0x463B280ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_9_BASE 0x463B2D0ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_10_BASE 0x463B320ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_11_BASE 0x463B370ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_12_BASE 0x463B3C0ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_13_BASE 0x463B410ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_14_BASE 0x463B460ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_TENSOR_15_BASE 0x463B4B0ull
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_KERNEL_SYNC_OBJECT_BASE 0x463B500ull
+#define DCORE3_TPC3_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC3_CFG_KERNEL_BASE 0x463B508ull
+#define DCORE3_TPC3_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC3_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_0_BASE 0x463B5DCull
+#define DCORE3_TPC3_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_1_BASE 0x463B62Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_2_BASE 0x463B67Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_3_BASE 0x463B6CCull
+#define DCORE3_TPC3_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_4_BASE 0x463B71Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_5_BASE 0x463B76Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_6_BASE 0x463B7BCull
+#define DCORE3_TPC3_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_7_BASE 0x463B80Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_8_BASE 0x463B85Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_9_BASE 0x463B8ACull
+#define DCORE3_TPC3_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_10_BASE 0x463B8FCull
+#define DCORE3_TPC3_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_11_BASE 0x463B94Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_12_BASE 0x463B99Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_13_BASE 0x463B9ECull
+#define DCORE3_TPC3_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_14_BASE 0x463BA3Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_TENSOR_15_BASE 0x463BA8Cull
+#define DCORE3_TPC3_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC3_CFG_QM_SYNC_OBJECT_BASE 0x463BADCull
+#define DCORE3_TPC3_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC3_CFG_QM_BASE 0x463BAE4ull
+#define DCORE3_TPC3_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC3_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC3_CFG_AXUSER_BASE 0x463BE00ull
+#define DCORE3_TPC3_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC3_CFG_SPECIAL_BASE 0x463BE80ull
+#define DCORE3_TPC3_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC3_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC3_MSTR_IF_RR_SHRD_HBW_BASE 0x463C000ull
+#define DCORE3_TPC3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_TPC3_MSTR_IF_RR_PRVT_HBW_BASE 0x463C200ull
+#define DCORE3_TPC3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_TPC3_MSTR_IF_RR_SHRD_LBW_BASE 0x463C400ull
+#define DCORE3_TPC3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_TPC3_MSTR_IF_RR_PRVT_LBW_BASE 0x463C600ull
+#define DCORE3_TPC3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_TPC3_MSTR_IF_E2E_CRDT_BASE 0x463C800ull
+#define DCORE3_TPC3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_TPC3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_TPC3_MSTR_IF_AXUSER_BASE 0x463CA80ull
+#define DCORE3_TPC3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC3_MSTR_IF_DBG_HBW_BASE 0x463CB00ull
+#define DCORE3_TPC3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC3_MSTR_IF_DBG_LBW_BASE 0x463CB80ull
+#define DCORE3_TPC3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_TPC3_MSTR_IF_CORE_HBW_BASE 0x463CC00ull
+#define DCORE3_TPC3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_TPC3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_TPC3_MSTR_IF_CORE_LBW_BASE 0x463CD80ull
+#define DCORE3_TPC3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_TPC3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_TPC3_MSTR_IF_SPECIAL_BASE 0x463CE80ull
+#define DCORE3_TPC3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC3_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE3_TPC4_QM_DCCM_BASE 0x4640000ull
+#define DCORE3_TPC4_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC4_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_ARC_AUX_BASE 0x4648000ull
+#define DCORE3_TPC4_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE3_TPC4_QM_ARC_AUX_SPECIAL_BASE 0x4648E80ull
+#define DCORE3_TPC4_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC4_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC4_QM_BASE 0x464A000ull
+#define DCORE3_TPC4_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_QM_SECTION 0x9000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR0_BASE 0x464A900ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR1_BASE 0x464A908ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR2_BASE 0x464A910ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR3_BASE 0x464A918ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR4_BASE 0x464A920ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR5_BASE 0x464A928ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR6_BASE 0x464A930ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR7_BASE 0x464A938ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR8_BASE 0x464A940ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR9_BASE 0x464A948ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR10_BASE 0x464A950ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR11_BASE 0x464A958ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR12_BASE 0x464A960ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR13_BASE 0x464A968ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR14_BASE 0x464A970ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR15_BASE 0x464A978ull
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC4_QM_AXUSER_SECURED_BASE 0x464AB00ull
+#define DCORE3_TPC4_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_AXUSER_NONSECURED_BASE 0x464AB80ull
+#define DCORE3_TPC4_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_DBG_HBW_BASE 0x464AC00ull
+#define DCORE3_TPC4_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC4_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC4_QM_DBG_LBW_BASE 0x464AC80ull
+#define DCORE3_TPC4_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC4_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC4_QM_CGM_BASE 0x464AD80ull
+#define DCORE3_TPC4_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC4_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC4_QM_SPECIAL_BASE 0x464AE80ull
+#define DCORE3_TPC4_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC4_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_0_BASE 0x464B000ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC4_CFG_BASE 0x464B000ull
+#define DCORE3_TPC4_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC4_CFG_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_1_BASE 0x464B050ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_2_BASE 0x464B0A0ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_3_BASE 0x464B0F0ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_4_BASE 0x464B140ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_5_BASE 0x464B190ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_6_BASE 0x464B1E0ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_7_BASE 0x464B230ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_8_BASE 0x464B280ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_9_BASE 0x464B2D0ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_10_BASE 0x464B320ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_11_BASE 0x464B370ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_12_BASE 0x464B3C0ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_13_BASE 0x464B410ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_14_BASE 0x464B460ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_TENSOR_15_BASE 0x464B4B0ull
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_KERNEL_SYNC_OBJECT_BASE 0x464B500ull
+#define DCORE3_TPC4_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC4_CFG_KERNEL_BASE 0x464B508ull
+#define DCORE3_TPC4_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC4_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_0_BASE 0x464B5DCull
+#define DCORE3_TPC4_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_1_BASE 0x464B62Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_2_BASE 0x464B67Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_3_BASE 0x464B6CCull
+#define DCORE3_TPC4_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_4_BASE 0x464B71Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_5_BASE 0x464B76Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_6_BASE 0x464B7BCull
+#define DCORE3_TPC4_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_7_BASE 0x464B80Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_8_BASE 0x464B85Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_9_BASE 0x464B8ACull
+#define DCORE3_TPC4_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_10_BASE 0x464B8FCull
+#define DCORE3_TPC4_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_11_BASE 0x464B94Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_12_BASE 0x464B99Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_13_BASE 0x464B9ECull
+#define DCORE3_TPC4_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_14_BASE 0x464BA3Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_TENSOR_15_BASE 0x464BA8Cull
+#define DCORE3_TPC4_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC4_CFG_QM_SYNC_OBJECT_BASE 0x464BADCull
+#define DCORE3_TPC4_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC4_CFG_QM_BASE 0x464BAE4ull
+#define DCORE3_TPC4_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC4_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC4_CFG_AXUSER_BASE 0x464BE00ull
+#define DCORE3_TPC4_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC4_CFG_SPECIAL_BASE 0x464BE80ull
+#define DCORE3_TPC4_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC4_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC4_MSTR_IF_RR_SHRD_HBW_BASE 0x464C000ull
+#define DCORE3_TPC4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_TPC4_MSTR_IF_RR_PRVT_HBW_BASE 0x464C200ull
+#define DCORE3_TPC4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_TPC4_MSTR_IF_RR_SHRD_LBW_BASE 0x464C400ull
+#define DCORE3_TPC4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_TPC4_MSTR_IF_RR_PRVT_LBW_BASE 0x464C600ull
+#define DCORE3_TPC4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_TPC4_MSTR_IF_E2E_CRDT_BASE 0x464C800ull
+#define DCORE3_TPC4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_TPC4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_TPC4_MSTR_IF_AXUSER_BASE 0x464CA80ull
+#define DCORE3_TPC4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC4_MSTR_IF_DBG_HBW_BASE 0x464CB00ull
+#define DCORE3_TPC4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC4_MSTR_IF_DBG_LBW_BASE 0x464CB80ull
+#define DCORE3_TPC4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_TPC4_MSTR_IF_CORE_HBW_BASE 0x464CC00ull
+#define DCORE3_TPC4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_TPC4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_TPC4_MSTR_IF_CORE_LBW_BASE 0x464CD80ull
+#define DCORE3_TPC4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_TPC4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_TPC4_MSTR_IF_SPECIAL_BASE 0x464CE80ull
+#define DCORE3_TPC4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC4_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE3_TPC5_QM_DCCM_BASE 0x4650000ull
+#define DCORE3_TPC5_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_TPC5_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_ARC_AUX_BASE 0x4658000ull
+#define DCORE3_TPC5_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE3_TPC5_QM_ARC_AUX_SPECIAL_BASE 0x4658E80ull
+#define DCORE3_TPC5_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC5_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TPC5_QM_BASE 0x465A000ull
+#define DCORE3_TPC5_QM_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_QM_SECTION 0x9000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR0_BASE 0x465A900ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR1_BASE 0x465A908ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR2_BASE 0x465A910ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR3_BASE 0x465A918ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR4_BASE 0x465A920ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR5_BASE 0x465A928ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR6_BASE 0x465A930ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR7_BASE 0x465A938ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR8_BASE 0x465A940ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR9_BASE 0x465A948ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR10_BASE 0x465A950ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR11_BASE 0x465A958ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR12_BASE 0x465A960ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR13_BASE 0x465A968ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR14_BASE 0x465A970ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR15_BASE 0x465A978ull
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_TPC5_QM_AXUSER_SECURED_BASE 0x465AB00ull
+#define DCORE3_TPC5_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_AXUSER_NONSECURED_BASE 0x465AB80ull
+#define DCORE3_TPC5_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_DBG_HBW_BASE 0x465AC00ull
+#define DCORE3_TPC5_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC5_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC5_QM_DBG_LBW_BASE 0x465AC80ull
+#define DCORE3_TPC5_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC5_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_TPC5_QM_CGM_BASE 0x465AD80ull
+#define DCORE3_TPC5_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_TPC5_QM_CGM_SECTION 0x1000
+#define mmDCORE3_TPC5_QM_SPECIAL_BASE 0x465AE80ull
+#define DCORE3_TPC5_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC5_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_0_BASE 0x465B000ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC5_CFG_BASE 0x465B000ull
+#define DCORE3_TPC5_CFG_MAX_OFFSET 0x1000
+#define DCORE3_TPC5_CFG_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_1_BASE 0x465B050ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_2_BASE 0x465B0A0ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_3_BASE 0x465B0F0ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_4_BASE 0x465B140ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_5_BASE 0x465B190ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_6_BASE 0x465B1E0ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_7_BASE 0x465B230ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_8_BASE 0x465B280ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_9_BASE 0x465B2D0ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_10_BASE 0x465B320ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_11_BASE 0x465B370ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_12_BASE 0x465B3C0ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_13_BASE 0x465B410ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_14_BASE 0x465B460ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_TENSOR_15_BASE 0x465B4B0ull
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_KERNEL_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_KERNEL_SYNC_OBJECT_BASE 0x465B500ull
+#define DCORE3_TPC5_CFG_KERNEL_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_CFG_KERNEL_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC5_CFG_KERNEL_BASE 0x465B508ull
+#define DCORE3_TPC5_CFG_KERNEL_MAX_OFFSET 0xD400
+#define DCORE3_TPC5_CFG_KERNEL_SECTION 0xD400
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_0_BASE 0x465B5DCull
+#define DCORE3_TPC5_CFG_QM_TENSOR_0_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_0_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_1_BASE 0x465B62Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_1_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_1_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_2_BASE 0x465B67Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_2_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_2_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_3_BASE 0x465B6CCull
+#define DCORE3_TPC5_CFG_QM_TENSOR_3_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_3_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_4_BASE 0x465B71Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_4_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_4_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_5_BASE 0x465B76Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_5_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_5_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_6_BASE 0x465B7BCull
+#define DCORE3_TPC5_CFG_QM_TENSOR_6_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_6_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_7_BASE 0x465B80Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_7_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_7_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_8_BASE 0x465B85Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_8_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_8_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_9_BASE 0x465B8ACull
+#define DCORE3_TPC5_CFG_QM_TENSOR_9_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_9_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_10_BASE 0x465B8FCull
+#define DCORE3_TPC5_CFG_QM_TENSOR_10_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_10_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_11_BASE 0x465B94Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_11_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_11_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_12_BASE 0x465B99Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_12_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_12_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_13_BASE 0x465B9ECull
+#define DCORE3_TPC5_CFG_QM_TENSOR_13_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_13_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_14_BASE 0x465BA3Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_14_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_14_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_TENSOR_15_BASE 0x465BA8Cull
+#define DCORE3_TPC5_CFG_QM_TENSOR_15_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_QM_TENSOR_15_SECTION 0x5000
+#define mmDCORE3_TPC5_CFG_QM_SYNC_OBJECT_BASE 0x465BADCull
+#define DCORE3_TPC5_CFG_QM_SYNC_OBJECT_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_CFG_QM_SYNC_OBJECT_SECTION 0x8000
+#define mmDCORE3_TPC5_CFG_QM_BASE 0x465BAE4ull
+#define DCORE3_TPC5_CFG_QM_MAX_OFFSET 0xD400
+#define DCORE3_TPC5_CFG_QM_SECTION 0x31C0
+#define mmDCORE3_TPC5_CFG_AXUSER_BASE 0x465BE00ull
+#define DCORE3_TPC5_CFG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_CFG_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC5_CFG_SPECIAL_BASE 0x465BE80ull
+#define DCORE3_TPC5_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC5_CFG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC5_MSTR_IF_RR_SHRD_HBW_BASE 0x465C000ull
+#define DCORE3_TPC5_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC5_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_TPC5_MSTR_IF_RR_PRVT_HBW_BASE 0x465C200ull
+#define DCORE3_TPC5_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_TPC5_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_TPC5_MSTR_IF_RR_SHRD_LBW_BASE 0x465C400ull
+#define DCORE3_TPC5_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC5_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_TPC5_MSTR_IF_RR_PRVT_LBW_BASE 0x465C600ull
+#define DCORE3_TPC5_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_TPC5_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_TPC5_MSTR_IF_E2E_CRDT_BASE 0x465C800ull
+#define DCORE3_TPC5_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_TPC5_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_TPC5_MSTR_IF_AXUSER_BASE 0x465CA80ull
+#define DCORE3_TPC5_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_TPC5_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_TPC5_MSTR_IF_DBG_HBW_BASE 0x465CB00ull
+#define DCORE3_TPC5_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC5_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_TPC5_MSTR_IF_DBG_LBW_BASE 0x465CB80ull
+#define DCORE3_TPC5_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_TPC5_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_TPC5_MSTR_IF_CORE_HBW_BASE 0x465CC00ull
+#define DCORE3_TPC5_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_TPC5_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_TPC5_MSTR_IF_CORE_LBW_BASE 0x465CD80ull
+#define DCORE3_TPC5_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_TPC5_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_TPC5_MSTR_IF_SPECIAL_BASE 0x465CE80ull
+#define DCORE3_TPC5_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC5_MSTR_IF_SPECIAL_SECTION 0x23180
+#define mmDCORE3_HMMU0_MMU_BASE 0x4680000ull
+#define DCORE3_HMMU0_MMU_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_MMU_SECTION 0xE800
+#define mmDCORE3_HMMU0_MMU_SPECIAL_BASE 0x4680E80ull
+#define DCORE3_HMMU0_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU0_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HMMU0_STLB_BASE 0x4681000ull
+#define DCORE3_HMMU0_STLB_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_STLB_SECTION 0xE800
+#define mmDCORE3_HMMU0_STLB_SPECIAL_BASE 0x4681E80ull
+#define DCORE3_HMMU0_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU0_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE3_HMMU0_SCRAMB_OUT_BASE 0x4683000ull
+#define DCORE3_HMMU0_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE3_HMMU0_SCRAMB_OUT_SPECIAL_BASE 0x4683E80ull
+#define DCORE3_HMMU0_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU0_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HMMU0_MSTR_IF_RR_SHRD_HBW_BASE 0x4684000ull
+#define DCORE3_HMMU0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_HMMU0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_HMMU0_MSTR_IF_RR_PRVT_HBW_BASE 0x4684200ull
+#define DCORE3_HMMU0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_HMMU0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_HMMU0_MSTR_IF_RR_SHRD_LBW_BASE 0x4684400ull
+#define DCORE3_HMMU0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_HMMU0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_HMMU0_MSTR_IF_RR_PRVT_LBW_BASE 0x4684600ull
+#define DCORE3_HMMU0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_HMMU0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_HMMU0_MSTR_IF_E2E_CRDT_BASE 0x4684800ull
+#define DCORE3_HMMU0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_HMMU0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_HMMU0_MSTR_IF_AXUSER_BASE 0x4684A80ull
+#define DCORE3_HMMU0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_HMMU0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_HMMU0_MSTR_IF_DBG_HBW_BASE 0x4684B00ull
+#define DCORE3_HMMU0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_HMMU0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_HMMU0_MSTR_IF_DBG_LBW_BASE 0x4684B80ull
+#define DCORE3_HMMU0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_HMMU0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_HMMU0_MSTR_IF_CORE_HBW_BASE 0x4684C00ull
+#define DCORE3_HMMU0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_HMMU0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_HMMU0_MSTR_IF_CORE_LBW_BASE 0x4684D80ull
+#define DCORE3_HMMU0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_HMMU0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_HMMU0_MSTR_IF_SPECIAL_BASE 0x4684E80ull
+#define DCORE3_HMMU0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU0_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE3_HMMU1_MMU_BASE 0x4690000ull
+#define DCORE3_HMMU1_MMU_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_MMU_SECTION 0xE800
+#define mmDCORE3_HMMU1_MMU_SPECIAL_BASE 0x4690E80ull
+#define DCORE3_HMMU1_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU1_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HMMU1_STLB_BASE 0x4691000ull
+#define DCORE3_HMMU1_STLB_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_STLB_SECTION 0xE800
+#define mmDCORE3_HMMU1_STLB_SPECIAL_BASE 0x4691E80ull
+#define DCORE3_HMMU1_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU1_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE3_HMMU1_SCRAMB_OUT_BASE 0x4693000ull
+#define DCORE3_HMMU1_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE3_HMMU1_SCRAMB_OUT_SPECIAL_BASE 0x4693E80ull
+#define DCORE3_HMMU1_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU1_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HMMU1_MSTR_IF_RR_SHRD_HBW_BASE 0x4694000ull
+#define DCORE3_HMMU1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_HMMU1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_HMMU1_MSTR_IF_RR_PRVT_HBW_BASE 0x4694200ull
+#define DCORE3_HMMU1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_HMMU1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_HMMU1_MSTR_IF_RR_SHRD_LBW_BASE 0x4694400ull
+#define DCORE3_HMMU1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_HMMU1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_HMMU1_MSTR_IF_RR_PRVT_LBW_BASE 0x4694600ull
+#define DCORE3_HMMU1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_HMMU1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_HMMU1_MSTR_IF_E2E_CRDT_BASE 0x4694800ull
+#define DCORE3_HMMU1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_HMMU1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_HMMU1_MSTR_IF_AXUSER_BASE 0x4694A80ull
+#define DCORE3_HMMU1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_HMMU1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_HMMU1_MSTR_IF_DBG_HBW_BASE 0x4694B00ull
+#define DCORE3_HMMU1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_HMMU1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_HMMU1_MSTR_IF_DBG_LBW_BASE 0x4694B80ull
+#define DCORE3_HMMU1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_HMMU1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_HMMU1_MSTR_IF_CORE_HBW_BASE 0x4694C00ull
+#define DCORE3_HMMU1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_HMMU1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_HMMU1_MSTR_IF_CORE_LBW_BASE 0x4694D80ull
+#define DCORE3_HMMU1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_HMMU1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_HMMU1_MSTR_IF_SPECIAL_BASE 0x4694E80ull
+#define DCORE3_HMMU1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU1_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE3_HMMU2_MMU_BASE 0x46A0000ull
+#define DCORE3_HMMU2_MMU_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_MMU_SECTION 0xE800
+#define mmDCORE3_HMMU2_MMU_SPECIAL_BASE 0x46A0E80ull
+#define DCORE3_HMMU2_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU2_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HMMU2_STLB_BASE 0x46A1000ull
+#define DCORE3_HMMU2_STLB_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_STLB_SECTION 0xE800
+#define mmDCORE3_HMMU2_STLB_SPECIAL_BASE 0x46A1E80ull
+#define DCORE3_HMMU2_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU2_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE3_HMMU2_SCRAMB_OUT_BASE 0x46A3000ull
+#define DCORE3_HMMU2_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE3_HMMU2_SCRAMB_OUT_SPECIAL_BASE 0x46A3E80ull
+#define DCORE3_HMMU2_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU2_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HMMU2_MSTR_IF_RR_SHRD_HBW_BASE 0x46A4000ull
+#define DCORE3_HMMU2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_HMMU2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_HMMU2_MSTR_IF_RR_PRVT_HBW_BASE 0x46A4200ull
+#define DCORE3_HMMU2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_HMMU2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_HMMU2_MSTR_IF_RR_SHRD_LBW_BASE 0x46A4400ull
+#define DCORE3_HMMU2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_HMMU2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_HMMU2_MSTR_IF_RR_PRVT_LBW_BASE 0x46A4600ull
+#define DCORE3_HMMU2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_HMMU2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_HMMU2_MSTR_IF_E2E_CRDT_BASE 0x46A4800ull
+#define DCORE3_HMMU2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_HMMU2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_HMMU2_MSTR_IF_AXUSER_BASE 0x46A4A80ull
+#define DCORE3_HMMU2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_HMMU2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_HMMU2_MSTR_IF_DBG_HBW_BASE 0x46A4B00ull
+#define DCORE3_HMMU2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_HMMU2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_HMMU2_MSTR_IF_DBG_LBW_BASE 0x46A4B80ull
+#define DCORE3_HMMU2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_HMMU2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_HMMU2_MSTR_IF_CORE_HBW_BASE 0x46A4C00ull
+#define DCORE3_HMMU2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_HMMU2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_HMMU2_MSTR_IF_CORE_LBW_BASE 0x46A4D80ull
+#define DCORE3_HMMU2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_HMMU2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_HMMU2_MSTR_IF_SPECIAL_BASE 0x46A4E80ull
+#define DCORE3_HMMU2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU2_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE3_HMMU3_MMU_BASE 0x46B0000ull
+#define DCORE3_HMMU3_MMU_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_MMU_SECTION 0xE800
+#define mmDCORE3_HMMU3_MMU_SPECIAL_BASE 0x46B0E80ull
+#define DCORE3_HMMU3_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU3_MMU_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HMMU3_STLB_BASE 0x46B1000ull
+#define DCORE3_HMMU3_STLB_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_STLB_SECTION 0xE800
+#define mmDCORE3_HMMU3_STLB_SPECIAL_BASE 0x46B1E80ull
+#define DCORE3_HMMU3_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU3_STLB_SPECIAL_SECTION 0x1180
+#define mmDCORE3_HMMU3_SCRAMB_OUT_BASE 0x46B3000ull
+#define DCORE3_HMMU3_SCRAMB_OUT_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_SCRAMB_OUT_SECTION 0xE800
+#define mmDCORE3_HMMU3_SCRAMB_OUT_SPECIAL_BASE 0x46B3E80ull
+#define DCORE3_HMMU3_SCRAMB_OUT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU3_SCRAMB_OUT_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HMMU3_MSTR_IF_RR_SHRD_HBW_BASE 0x46B4000ull
+#define DCORE3_HMMU3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_HMMU3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_HMMU3_MSTR_IF_RR_PRVT_HBW_BASE 0x46B4200ull
+#define DCORE3_HMMU3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_HMMU3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_HMMU3_MSTR_IF_RR_SHRD_LBW_BASE 0x46B4400ull
+#define DCORE3_HMMU3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_HMMU3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_HMMU3_MSTR_IF_RR_PRVT_LBW_BASE 0x46B4600ull
+#define DCORE3_HMMU3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_HMMU3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_HMMU3_MSTR_IF_E2E_CRDT_BASE 0x46B4800ull
+#define DCORE3_HMMU3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_HMMU3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_HMMU3_MSTR_IF_AXUSER_BASE 0x46B4A80ull
+#define DCORE3_HMMU3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_HMMU3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_HMMU3_MSTR_IF_DBG_HBW_BASE 0x46B4B00ull
+#define DCORE3_HMMU3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_HMMU3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_HMMU3_MSTR_IF_DBG_LBW_BASE 0x46B4B80ull
+#define DCORE3_HMMU3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_HMMU3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_HMMU3_MSTR_IF_CORE_HBW_BASE 0x46B4C00ull
+#define DCORE3_HMMU3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_HMMU3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_HMMU3_MSTR_IF_CORE_LBW_BASE 0x46B4D80ull
+#define DCORE3_HMMU3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_HMMU3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_HMMU3_MSTR_IF_SPECIAL_BASE 0x46B4E80ull
+#define DCORE3_HMMU3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HMMU3_MSTR_IF_SPECIAL_SECTION 0xB180
+#define mmDCORE3_MME_QM_ARC_DCCM_BASE 0x46C0000ull
+#define DCORE3_MME_QM_ARC_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_MME_QM_ARC_DCCM_SECTION 0x8000
+#define mmDCORE3_MME_QM_ARC_AUX_BASE 0x46C8000ull
+#define DCORE3_MME_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE3_MME_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE3_MME_QM_ARC_AUX_SPECIAL_BASE 0x46C8E80ull
+#define DCORE3_MME_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_QM_ARC_AUX_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_QM_ARC_DUP_ENG_BASE 0x46C9000ull
+#define DCORE3_MME_QM_ARC_DUP_ENG_MAX_OFFSET 0x1000
+#define DCORE3_MME_QM_ARC_DUP_ENG_SECTION 0x9000
+#define mmDCORE3_MME_QM_ARC_DUP_ENG_AXUSER_BASE 0x46C9900ull
+#define DCORE3_MME_QM_ARC_DUP_ENG_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_QM_ARC_DUP_ENG_AXUSER_SECTION 0x5800
+#define mmDCORE3_MME_QM_ARC_DUP_ENG_SPECIAL_BASE 0x46C9E80ull
+#define DCORE3_MME_QM_ARC_DUP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_QM_ARC_DUP_ENG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_QM_BASE 0x46CA000ull
+#define DCORE3_MME_QM_MAX_OFFSET 0x1000
+#define DCORE3_MME_QM_SECTION 0x9000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR0_BASE 0x46CA900ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR1_BASE 0x46CA908ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR2_BASE 0x46CA910ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR3_BASE 0x46CA918ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR4_BASE 0x46CA920ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR5_BASE 0x46CA928ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR6_BASE 0x46CA930ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR7_BASE 0x46CA938ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR8_BASE 0x46CA940ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR9_BASE 0x46CA948ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR10_BASE 0x46CA950ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR11_BASE 0x46CA958ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR12_BASE 0x46CA960ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR13_BASE 0x46CA968ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR14_BASE 0x46CA970ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_MME_QM_QMAN_WR64_BASE_ADDR15_BASE 0x46CA978ull
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_MME_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_MME_QM_AXUSER_SECURED_BASE 0x46CAB00ull
+#define DCORE3_MME_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_MME_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_MME_QM_AXUSER_NONSECURED_BASE 0x46CAB80ull
+#define DCORE3_MME_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_MME_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_MME_QM_DBG_HBW_BASE 0x46CAC00ull
+#define DCORE3_MME_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_MME_QM_DBG_LBW_BASE 0x46CAC80ull
+#define DCORE3_MME_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_MME_QM_CGM_BASE 0x46CAD80ull
+#define DCORE3_MME_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_MME_QM_CGM_SECTION 0x1000
+#define mmDCORE3_MME_QM_SPECIAL_BASE 0x46CAE80ull
+#define DCORE3_MME_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_LO_BASE 0x46CB000ull
+#define DCORE3_MME_CTRL_LO_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_LO_SECTION 0x8000
+#define mmDCORE3_MME_CTRL_LO_ARCH_BASE_ADDR_BASE 0x46CB008ull
+#define DCORE3_MME_CTRL_LO_ARCH_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE3_MME_CTRL_LO_ARCH_BASE_ADDR_SECTION 0x2000
+#define mmDCORE3_MME_CTRL_LO_ARCH_NON_TENSOR_START_BASE 0x46CB028ull
+#define DCORE3_MME_CTRL_LO_ARCH_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE3_MME_CTRL_LO_ARCH_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_LO_ARCH_TENSOR_A_BASE 0x46CB040ull
+#define DCORE3_MME_CTRL_LO_ARCH_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_LO_ARCH_TENSOR_A_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_LO_ARCH_TENSOR_B_BASE 0x46CB098ull
+#define DCORE3_MME_CTRL_LO_ARCH_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_LO_ARCH_TENSOR_B_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_LO_ARCH_TENSOR_COUT_BASE 0x46CB0F0ull
+#define DCORE3_MME_CTRL_LO_ARCH_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_LO_ARCH_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_BASE 0x46CB15Cull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_BASE 0x46CB170ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_BASE 0x46CB184ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_BASE 0x46CB198ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_BASE 0x46CB1ACull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_BASE 0x46CB1C0ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_BASE 0x46CB1D4ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_BASE 0x46CB1E8ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_BASE 0x46CB1FCull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_BASE 0x46CB210ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_BASE 0x46CB22Cull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_BASE 0x46CB240ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_BASE 0x46CB254ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_BASE 0x46CB268ull
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_LO_ARCH_NON_TENSOR_END_BASE 0x46CB280ull
+#define DCORE3_MME_CTRL_LO_ARCH_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE3_MME_CTRL_LO_ARCH_NON_TENSOR_END_SECTION 0xB800
+#define mmDCORE3_MME_CTRL_LO_MME_AXUSER_BASE 0x46CBE00ull
+#define DCORE3_MME_CTRL_LO_MME_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_CTRL_LO_MME_AXUSER_SECTION 0x8000
+#define mmDCORE3_MME_CTRL_LO_SPECIAL_BASE 0x46CBE80ull
+#define DCORE3_MME_CTRL_LO_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_CTRL_LO_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_HI_BASE 0x46CC000ull
+#define DCORE3_MME_CTRL_HI_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_HI_SECTION 0x8000
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_BASE_ADDR_BASE 0x46CC008ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE3_MME_CTRL_HI_SHADOW_0_BASE_ADDR_SECTION 0x2000
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_BASE 0x46CC028ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE3_MME_CTRL_HI_SHADOW_0_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_TENSOR_A_BASE 0x46CC040ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_0_TENSOR_A_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_TENSOR_B_BASE 0x46CC098ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_0_TENSOR_B_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_BASE 0x46CC0F0ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_0_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_BASE 0x46CC15Cull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_BASE 0x46CC170ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_BASE 0x46CC184ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_BASE 0x46CC198ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_BASE 0x46CC1ACull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_BASE 0x46CC1C0ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_BASE 0x46CC1D4ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_BASE 0x46CC1E8ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_BASE 0x46CC1FCull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_BASE 0x46CC210ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_BASE 0x46CC22Cull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_BASE 0x46CC240ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_BASE 0x46CC254ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_BASE 0x46CC268ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_0_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_BASE 0x46CC280ull
+#define DCORE3_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE3_MME_CTRL_HI_SHADOW_0_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_BASE_ADDR_BASE 0x46CC308ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE3_MME_CTRL_HI_SHADOW_1_BASE_ADDR_SECTION 0x2000
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_BASE 0x46CC328ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE3_MME_CTRL_HI_SHADOW_1_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_TENSOR_A_BASE 0x46CC340ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_1_TENSOR_A_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_TENSOR_B_BASE 0x46CC398ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_1_TENSOR_B_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_BASE 0x46CC3F0ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_1_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_BASE 0x46CC45Cull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_BASE 0x46CC470ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_BASE 0x46CC484ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_BASE 0x46CC498ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_BASE 0x46CC4ACull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_BASE 0x46CC4C0ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_BASE 0x46CC4D4ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_BASE 0x46CC4E8ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_BASE 0x46CC4FCull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_BASE 0x46CC510ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_BASE 0x46CC52Cull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_BASE 0x46CC540ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_BASE 0x46CC554ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_BASE 0x46CC568ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_1_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_BASE 0x46CC580ull
+#define DCORE3_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE3_MME_CTRL_HI_SHADOW_1_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_BASE_ADDR_BASE 0x46CC608ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE3_MME_CTRL_HI_SHADOW_2_BASE_ADDR_SECTION 0x2000
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_BASE 0x46CC628ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE3_MME_CTRL_HI_SHADOW_2_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_TENSOR_A_BASE 0x46CC640ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_2_TENSOR_A_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_TENSOR_B_BASE 0x46CC698ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_2_TENSOR_B_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_BASE 0x46CC6F0ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_2_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_BASE 0x46CC75Cull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_BASE 0x46CC770ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_BASE 0x46CC784ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_BASE 0x46CC798ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_BASE 0x46CC7ACull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_BASE 0x46CC7C0ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_BASE 0x46CC7D4ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_BASE 0x46CC7E8ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_BASE 0x46CC7FCull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_BASE 0x46CC810ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_BASE 0x46CC82Cull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_BASE 0x46CC840ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_BASE 0x46CC854ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_BASE 0x46CC868ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_2_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_BASE 0x46CC880ull
+#define DCORE3_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE3_MME_CTRL_HI_SHADOW_2_NON_TENSOR_END_SECTION 0x8800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_BASE_ADDR_BASE 0x46CC908ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_BASE_ADDR_MAX_OFFSET 0x2000
+#define DCORE3_MME_CTRL_HI_SHADOW_3_BASE_ADDR_SECTION 0x2000
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_BASE 0x46CC928ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_MAX_OFFSET 0x1800
+#define DCORE3_MME_CTRL_HI_SHADOW_3_NON_TENSOR_START_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_TENSOR_A_BASE 0x46CC940ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_TENSOR_A_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_3_TENSOR_A_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_TENSOR_B_BASE 0x46CC998ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_TENSOR_B_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_3_TENSOR_B_SECTION 0x5800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_BASE 0x46CC9F0ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_HI_SHADOW_3_TENSOR_COUT_SECTION 0x6C00
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_BASE 0x46CCA5Cull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_BASE 0x46CCA70ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_BASE 0x46CCA84ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_BASE 0x46CCA98ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN1_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_BASE 0x46CCAACull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN2_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_BASE 0x46CCAC0ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN2_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_BASE 0x46CCAD4ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN3_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_BASE 0x46CCAE8ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN3_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_BASE 0x46CCAFCull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN4_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_BASE 0x46CCB10ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_IN4_SLAVE_SECTION 0x1C00
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_BASE 0x46CCB2Cull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT0_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_BASE 0x46CCB40ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT0_SLAVE_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_BASE 0x46CCB54ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT1_MASTER_SECTION 0x1400
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_BASE 0x46CCB68ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_HI_SHADOW_3_AGU_COUT1_SLAVE_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_BASE 0x46CCB80ull
+#define DCORE3_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_MAX_OFFSET 0x6000
+#define DCORE3_MME_CTRL_HI_SHADOW_3_NON_TENSOR_END_SECTION 0x3000
+#define mmDCORE3_MME_CTRL_HI_SPECIAL_BASE 0x46CCE80ull
+#define DCORE3_MME_CTRL_HI_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_CTRL_HI_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_EU_BIST_BASE 0x46CD000ull
+#define DCORE3_MME_EU_BIST_MAX_OFFSET 0x1000
+#define DCORE3_MME_EU_BIST_SECTION 0xE800
+#define mmDCORE3_MME_EU_BIST_SPECIAL_BASE 0x46CDE80ull
+#define DCORE3_MME_EU_BIST_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_EU_BIST_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_MSTR_IF_RR_SHRD_HBW_BASE 0x46CE000ull
+#define DCORE3_MME_CTRL_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_CTRL_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_MME_CTRL_MSTR_IF_RR_PRVT_HBW_BASE 0x46CE200ull
+#define DCORE3_MME_CTRL_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_CTRL_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_MME_CTRL_MSTR_IF_RR_SHRD_LBW_BASE 0x46CE400ull
+#define DCORE3_MME_CTRL_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_CTRL_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_MME_CTRL_MSTR_IF_RR_PRVT_LBW_BASE 0x46CE600ull
+#define DCORE3_MME_CTRL_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_CTRL_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_MME_CTRL_MSTR_IF_E2E_CRDT_BASE 0x46CE800ull
+#define DCORE3_MME_CTRL_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_MME_CTRL_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_MME_CTRL_MSTR_IF_AXUSER_BASE 0x46CEA80ull
+#define DCORE3_MME_CTRL_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_CTRL_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_MME_CTRL_MSTR_IF_DBG_HBW_BASE 0x46CEB00ull
+#define DCORE3_MME_CTRL_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_MME_CTRL_MSTR_IF_DBG_LBW_BASE 0x46CEB80ull
+#define DCORE3_MME_CTRL_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_CTRL_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_MME_CTRL_MSTR_IF_CORE_HBW_BASE 0x46CEC00ull
+#define DCORE3_MME_CTRL_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_MME_CTRL_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_MME_CTRL_MSTR_IF_CORE_LBW_BASE 0x46CED80ull
+#define DCORE3_MME_CTRL_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_MME_CTRL_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_MSTR_IF_SPECIAL_BASE 0x46CEE80ull
+#define DCORE3_MME_CTRL_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_CTRL_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_QM_ARC_ACP_ENG_BASE 0x46CF000ull
+#define DCORE3_MME_QM_ARC_ACP_ENG_MAX_OFFSET 0x1000
+#define DCORE3_MME_QM_ARC_ACP_ENG_SECTION 0xE800
+#define mmDCORE3_MME_QM_ARC_ACP_ENG_SPECIAL_BASE 0x46CFE80ull
+#define DCORE3_MME_QM_ARC_ACP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_QM_ARC_ACP_ENG_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_SBTE0_BASE 0x46D0000ull
+#define DCORE3_MME_SBTE0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE0_SECTION 0xE800
+#define mmDCORE3_MME_SBTE0_SPECIAL_BASE 0x46D0E80ull
+#define DCORE3_MME_SBTE0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE0_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_BASE 0x46D1000ull
+#define DCORE3_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_BASE 0x46D1200ull
+#define DCORE3_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_BASE 0x46D1400ull
+#define DCORE3_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_BASE 0x46D1600ull
+#define DCORE3_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE0_MSTR_IF_E2E_CRDT_BASE 0x46D1800ull
+#define DCORE3_MME_SBTE0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_MME_SBTE0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_MME_SBTE0_MSTR_IF_AXUSER_BASE 0x46D1A80ull
+#define DCORE3_MME_SBTE0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_SBTE0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_MME_SBTE0_MSTR_IF_DBG_HBW_BASE 0x46D1B00ull
+#define DCORE3_MME_SBTE0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE0_MSTR_IF_DBG_LBW_BASE 0x46D1B80ull
+#define DCORE3_MME_SBTE0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE0_MSTR_IF_CORE_HBW_BASE 0x46D1C00ull
+#define DCORE3_MME_SBTE0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_MME_SBTE0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_MME_SBTE0_MSTR_IF_CORE_LBW_BASE 0x46D1D80ull
+#define DCORE3_MME_SBTE0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_MME_SBTE0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_MME_SBTE0_MSTR_IF_SPECIAL_BASE 0x46D1E80ull
+#define DCORE3_MME_SBTE0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE0_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE3_MME_SBTE1_BASE 0x46D8000ull
+#define DCORE3_MME_SBTE1_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE1_SECTION 0xE800
+#define mmDCORE3_MME_SBTE1_SPECIAL_BASE 0x46D8E80ull
+#define DCORE3_MME_SBTE1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE1_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_BASE 0x46D9000ull
+#define DCORE3_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_BASE 0x46D9200ull
+#define DCORE3_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_BASE 0x46D9400ull
+#define DCORE3_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_BASE 0x46D9600ull
+#define DCORE3_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE1_MSTR_IF_E2E_CRDT_BASE 0x46D9800ull
+#define DCORE3_MME_SBTE1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_MME_SBTE1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_MME_SBTE1_MSTR_IF_AXUSER_BASE 0x46D9A80ull
+#define DCORE3_MME_SBTE1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_SBTE1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_MME_SBTE1_MSTR_IF_DBG_HBW_BASE 0x46D9B00ull
+#define DCORE3_MME_SBTE1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE1_MSTR_IF_DBG_LBW_BASE 0x46D9B80ull
+#define DCORE3_MME_SBTE1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE1_MSTR_IF_CORE_HBW_BASE 0x46D9C00ull
+#define DCORE3_MME_SBTE1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_MME_SBTE1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_MME_SBTE1_MSTR_IF_CORE_LBW_BASE 0x46D9D80ull
+#define DCORE3_MME_SBTE1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_MME_SBTE1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_MME_SBTE1_MSTR_IF_SPECIAL_BASE 0x46D9E80ull
+#define DCORE3_MME_SBTE1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE1_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE3_MME_SBTE2_BASE 0x46E0000ull
+#define DCORE3_MME_SBTE2_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE2_SECTION 0xE800
+#define mmDCORE3_MME_SBTE2_SPECIAL_BASE 0x46E0E80ull
+#define DCORE3_MME_SBTE2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE2_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_BASE 0x46E1000ull
+#define DCORE3_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_BASE 0x46E1200ull
+#define DCORE3_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_BASE 0x46E1400ull
+#define DCORE3_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_BASE 0x46E1600ull
+#define DCORE3_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE2_MSTR_IF_E2E_CRDT_BASE 0x46E1800ull
+#define DCORE3_MME_SBTE2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_MME_SBTE2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_MME_SBTE2_MSTR_IF_AXUSER_BASE 0x46E1A80ull
+#define DCORE3_MME_SBTE2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_SBTE2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_MME_SBTE2_MSTR_IF_DBG_HBW_BASE 0x46E1B00ull
+#define DCORE3_MME_SBTE2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE2_MSTR_IF_DBG_LBW_BASE 0x46E1B80ull
+#define DCORE3_MME_SBTE2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE2_MSTR_IF_CORE_HBW_BASE 0x46E1C00ull
+#define DCORE3_MME_SBTE2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_MME_SBTE2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_MME_SBTE2_MSTR_IF_CORE_LBW_BASE 0x46E1D80ull
+#define DCORE3_MME_SBTE2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_MME_SBTE2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_MME_SBTE2_MSTR_IF_SPECIAL_BASE 0x46E1E80ull
+#define DCORE3_MME_SBTE2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE2_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE3_MME_SBTE3_BASE 0x46E8000ull
+#define DCORE3_MME_SBTE3_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE3_SECTION 0xE800
+#define mmDCORE3_MME_SBTE3_SPECIAL_BASE 0x46E8E80ull
+#define DCORE3_MME_SBTE3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_BASE 0x46E9000ull
+#define DCORE3_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_BASE 0x46E9200ull
+#define DCORE3_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_BASE 0x46E9400ull
+#define DCORE3_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_BASE 0x46E9600ull
+#define DCORE3_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE3_MSTR_IF_E2E_CRDT_BASE 0x46E9800ull
+#define DCORE3_MME_SBTE3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_MME_SBTE3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_MME_SBTE3_MSTR_IF_AXUSER_BASE 0x46E9A80ull
+#define DCORE3_MME_SBTE3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_SBTE3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_MME_SBTE3_MSTR_IF_DBG_HBW_BASE 0x46E9B00ull
+#define DCORE3_MME_SBTE3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE3_MSTR_IF_DBG_LBW_BASE 0x46E9B80ull
+#define DCORE3_MME_SBTE3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE3_MSTR_IF_CORE_HBW_BASE 0x46E9C00ull
+#define DCORE3_MME_SBTE3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_MME_SBTE3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_MME_SBTE3_MSTR_IF_CORE_LBW_BASE 0x46E9D80ull
+#define DCORE3_MME_SBTE3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_MME_SBTE3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_MME_SBTE3_MSTR_IF_SPECIAL_BASE 0x46E9E80ull
+#define DCORE3_MME_SBTE3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE3_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE3_MME_SBTE4_BASE 0x46F0000ull
+#define DCORE3_MME_SBTE4_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE4_SECTION 0xE800
+#define mmDCORE3_MME_SBTE4_SPECIAL_BASE 0x46F0E80ull
+#define DCORE3_MME_SBTE4_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE4_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_BASE 0x46F1000ull
+#define DCORE3_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_BASE 0x46F1200ull
+#define DCORE3_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_SBTE4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_BASE 0x46F1400ull
+#define DCORE3_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_BASE 0x46F1600ull
+#define DCORE3_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_SBTE4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_MME_SBTE4_MSTR_IF_E2E_CRDT_BASE 0x46F1800ull
+#define DCORE3_MME_SBTE4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_MME_SBTE4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_MME_SBTE4_MSTR_IF_AXUSER_BASE 0x46F1A80ull
+#define DCORE3_MME_SBTE4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_SBTE4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_MME_SBTE4_MSTR_IF_DBG_HBW_BASE 0x46F1B00ull
+#define DCORE3_MME_SBTE4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE4_MSTR_IF_DBG_LBW_BASE 0x46F1B80ull
+#define DCORE3_MME_SBTE4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_SBTE4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_MME_SBTE4_MSTR_IF_CORE_HBW_BASE 0x46F1C00ull
+#define DCORE3_MME_SBTE4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_MME_SBTE4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_MME_SBTE4_MSTR_IF_CORE_LBW_BASE 0x46F1D80ull
+#define DCORE3_MME_SBTE4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_MME_SBTE4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_MME_SBTE4_MSTR_IF_SPECIAL_BASE 0x46F1E80ull
+#define DCORE3_MME_SBTE4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_SBTE4_MSTR_IF_SPECIAL_SECTION 0x6180
+#define mmDCORE3_MME_ACC_BASE 0x46F8000ull
+#define DCORE3_MME_ACC_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_SECTION 0xE800
+#define mmDCORE3_MME_ACC_SPECIAL_BASE 0x46F8E80ull
+#define DCORE3_MME_ACC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_ACC_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_WB0_MSTR_IF_RR_SHRD_HBW_BASE 0x46F9000ull
+#define DCORE3_MME_WB0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_WB0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_MME_WB0_MSTR_IF_RR_PRVT_HBW_BASE 0x46F9200ull
+#define DCORE3_MME_WB0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_WB0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_MME_WB0_MSTR_IF_RR_SHRD_LBW_BASE 0x46F9400ull
+#define DCORE3_MME_WB0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_WB0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_MME_WB0_MSTR_IF_RR_PRVT_LBW_BASE 0x46F9600ull
+#define DCORE3_MME_WB0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_WB0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_MME_WB0_MSTR_IF_E2E_CRDT_BASE 0x46F9800ull
+#define DCORE3_MME_WB0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_MME_WB0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_MME_WB0_MSTR_IF_AXUSER_BASE 0x46F9A80ull
+#define DCORE3_MME_WB0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_WB0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_MME_WB0_MSTR_IF_DBG_HBW_BASE 0x46F9B00ull
+#define DCORE3_MME_WB0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_WB0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_MME_WB0_MSTR_IF_DBG_LBW_BASE 0x46F9B80ull
+#define DCORE3_MME_WB0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_WB0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_MME_WB0_MSTR_IF_CORE_HBW_BASE 0x46F9C00ull
+#define DCORE3_MME_WB0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_MME_WB0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_MME_WB0_MSTR_IF_CORE_LBW_BASE 0x46F9D80ull
+#define DCORE3_MME_WB0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_MME_WB0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_MME_WB0_MSTR_IF_SPECIAL_BASE 0x46F9E80ull
+#define DCORE3_MME_WB0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_WB0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_MME_WB1_MSTR_IF_RR_SHRD_HBW_BASE 0x46FA000ull
+#define DCORE3_MME_WB1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_WB1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_MME_WB1_MSTR_IF_RR_PRVT_HBW_BASE 0x46FA200ull
+#define DCORE3_MME_WB1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_MME_WB1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_MME_WB1_MSTR_IF_RR_SHRD_LBW_BASE 0x46FA400ull
+#define DCORE3_MME_WB1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_WB1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_MME_WB1_MSTR_IF_RR_PRVT_LBW_BASE 0x46FA600ull
+#define DCORE3_MME_WB1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_MME_WB1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_MME_WB1_MSTR_IF_E2E_CRDT_BASE 0x46FA800ull
+#define DCORE3_MME_WB1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_MME_WB1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_MME_WB1_MSTR_IF_AXUSER_BASE 0x46FAA80ull
+#define DCORE3_MME_WB1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_MME_WB1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_MME_WB1_MSTR_IF_DBG_HBW_BASE 0x46FAB00ull
+#define DCORE3_MME_WB1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_WB1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_MME_WB1_MSTR_IF_DBG_LBW_BASE 0x46FAB80ull
+#define DCORE3_MME_WB1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_MME_WB1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_MME_WB1_MSTR_IF_CORE_HBW_BASE 0x46FAC00ull
+#define DCORE3_MME_WB1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_MME_WB1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_MME_WB1_MSTR_IF_CORE_LBW_BASE 0x46FAD80ull
+#define DCORE3_MME_WB1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_MME_WB1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_MME_WB1_MSTR_IF_SPECIAL_BASE 0x46FAE80ull
+#define DCORE3_MME_WB1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_MME_WB1_MSTR_IF_SPECIAL_SECTION 0x5180
+#define mmDCORE3_SYNC_MNGR_OBJS_BASE 0x4700000ull
+#define DCORE3_SYNC_MNGR_OBJS_MAX_OFFSET 0x15A00
+#define DCORE3_SYNC_MNGR_OBJS_SECTION 0x1E000
+#define mmDCORE3_SYNC_MNGR_GLBL_BASE 0x471E000ull
+#define DCORE3_SYNC_MNGR_GLBL_MAX_OFFSET 0x1000
+#define DCORE3_SYNC_MNGR_GLBL_SECTION 0xE800
+#define mmDCORE3_SYNC_MNGR_GLBL_SPECIAL_BASE 0x471EE80ull
+#define DCORE3_SYNC_MNGR_GLBL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SYNC_MNGR_GLBL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_BASE 0x471F000ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_BASE 0x471F200ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_SYNC_MNGR_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_BASE 0x471F400ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_SYNC_MNGR_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_BASE 0x471F600ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_SYNC_MNGR_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_E2E_CRDT_BASE 0x471F800ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_SYNC_MNGR_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_AXUSER_BASE 0x471FA80ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_SYNC_MNGR_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_DBG_HBW_BASE 0x471FB00ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_SYNC_MNGR_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_DBG_LBW_BASE 0x471FB80ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_SYNC_MNGR_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_CORE_HBW_BASE 0x471FC00ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_SYNC_MNGR_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_CORE_LBW_BASE 0x471FD80ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_SYNC_MNGR_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_SYNC_MNGR_MSTR_IF_SPECIAL_BASE 0x471FE80ull
+#define DCORE3_SYNC_MNGR_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SYNC_MNGR_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HIF0_BASE 0x4720000ull
+#define DCORE3_HIF0_MAX_OFFSET 0x1000
+#define DCORE3_HIF0_SECTION 0xE800
+#define mmDCORE3_HIF0_SPECIAL_BASE 0x4720E80ull
+#define DCORE3_HIF0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HIF0_SPECIAL_SECTION 0x3180
+#define mmDCORE3_HIF1_BASE 0x4724000ull
+#define DCORE3_HIF1_MAX_OFFSET 0x1000
+#define DCORE3_HIF1_SECTION 0xE800
+#define mmDCORE3_HIF1_SPECIAL_BASE 0x4724E80ull
+#define DCORE3_HIF1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HIF1_SPECIAL_SECTION 0x3180
+#define mmDCORE3_HIF2_BASE 0x4728000ull
+#define DCORE3_HIF2_MAX_OFFSET 0x1000
+#define DCORE3_HIF2_SECTION 0xE800
+#define mmDCORE3_HIF2_SPECIAL_BASE 0x4728E80ull
+#define DCORE3_HIF2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HIF2_SPECIAL_SECTION 0x3180
+#define mmDCORE3_HIF3_BASE 0x472C000ull
+#define DCORE3_HIF3_MAX_OFFSET 0x1000
+#define DCORE3_HIF3_SECTION 0xE800
+#define mmDCORE3_HIF3_SPECIAL_BASE 0x472CE80ull
+#define DCORE3_HIF3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HIF3_SPECIAL_SECTION 0x13180
+#define mmDCORE3_RTR0_CTRL_BASE 0x4740000ull
+#define DCORE3_RTR0_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_RTR0_CTRL_SECTION 0xE800
+#define mmDCORE3_RTR0_CTRL_SPECIAL_BASE 0x4740E80ull
+#define DCORE3_RTR0_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR0_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR0_H3_BASE 0x4741000ull
+#define DCORE3_RTR0_H3_MAX_OFFSET 0x1000
+#define DCORE3_RTR0_H3_SECTION 0xE800
+#define mmDCORE3_RTR0_H3_SPECIAL_BASE 0x4741E80ull
+#define DCORE3_RTR0_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR0_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR0_MSTR_IF_RR_SHRD_HBW_BASE 0x4742000ull
+#define DCORE3_RTR0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_RTR0_MSTR_IF_RR_PRVT_HBW_BASE 0x4742200ull
+#define DCORE3_RTR0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_RTR0_MSTR_IF_RR_SHRD_LBW_BASE 0x4742400ull
+#define DCORE3_RTR0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_RTR0_MSTR_IF_RR_PRVT_LBW_BASE 0x4742600ull
+#define DCORE3_RTR0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_RTR0_MSTR_IF_E2E_CRDT_BASE 0x4742800ull
+#define DCORE3_RTR0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_RTR0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_RTR0_MSTR_IF_AXUSER_BASE 0x4742A80ull
+#define DCORE3_RTR0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_RTR0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_RTR0_MSTR_IF_DBG_HBW_BASE 0x4742B00ull
+#define DCORE3_RTR0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_RTR0_MSTR_IF_DBG_LBW_BASE 0x4742B80ull
+#define DCORE3_RTR0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_RTR0_MSTR_IF_CORE_HBW_BASE 0x4742C00ull
+#define DCORE3_RTR0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_RTR0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_RTR0_MSTR_IF_CORE_LBW_BASE 0x4742D80ull
+#define DCORE3_RTR0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_RTR0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_RTR0_MSTR_IF_SPECIAL_BASE 0x4742E80ull
+#define DCORE3_RTR0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR0_ADD_DEC_HBW_BASE 0x4743000ull
+#define DCORE3_RTR0_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE3_RTR0_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE3_RTR0_ADD_DEC_LBW_BASE 0x4743400ull
+#define DCORE3_RTR0_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE3_RTR0_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE3_RTR0_ADD_DEC_SPECIAL_BASE 0x4743E80ull
+#define DCORE3_RTR0_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR0_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR0_BASE 0x4744000ull
+#define DCORE3_RTR0_MAX_OFFSET 0x1000
+#define DCORE3_RTR0_SECTION 0x3000
+#define mmDCORE3_RTR0_HBW_RD_RQ_LL_STAT_BASE 0x4744300ull
+#define DCORE3_RTR0_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR0_HBW_RD_RS_LL_STAT_BASE 0x4744340ull
+#define DCORE3_RTR0_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR0_HBW_WR_RQ_LL_STAT_BASE 0x4744380ull
+#define DCORE3_RTR0_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR0_HBW_WR_RS_LL_STAT_BASE 0x47443C0ull
+#define DCORE3_RTR0_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR0_LBW_RD_RQ_LL_STAT_BASE 0x4744400ull
+#define DCORE3_RTR0_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR0_LBW_RD_RS_LL_STAT_BASE 0x4744440ull
+#define DCORE3_RTR0_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR0_LBW_WR_RQ_LL_STAT_BASE 0x4744480ull
+#define DCORE3_RTR0_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR0_LBW_WR_RS_LL_STAT_BASE 0x47444C0ull
+#define DCORE3_RTR0_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR0_HBW_MFIFO_BASE 0x4744500ull
+#define DCORE3_RTR0_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE3_RTR0_E2E_RD_LL_STAT_BASE 0x4744540ull
+#define DCORE3_RTR0_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR0_E2E_WR_LL_STAT_BASE 0x4744580ull
+#define DCORE3_RTR0_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR0_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE3_RTR0_RTR_HBW_XACT_STAT_BASE 0x4744600ull
+#define DCORE3_RTR0_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR0_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR0_RTR_LBW_XACT_STAT_BASE 0x4744680ull
+#define DCORE3_RTR0_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR0_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR0_RTR_E2E_XACT_STAT_BASE 0x4744700ull
+#define DCORE3_RTR0_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR0_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE3_RTR0_SPECIAL_BASE 0x4744E80ull
+#define DCORE3_RTR0_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR0_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR0_DBG_ADDR_BASE 0x4745000ull
+#define DCORE3_RTR0_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE3_RTR0_DBG_ADDR_SECTION 0xE800
+#define mmDCORE3_RTR0_DBG_ADDR_SPECIAL_BASE 0x4745E80ull
+#define DCORE3_RTR0_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR0_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE3_RTR1_CTRL_BASE 0x4748000ull
+#define DCORE3_RTR1_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_RTR1_CTRL_SECTION 0xE800
+#define mmDCORE3_RTR1_CTRL_SPECIAL_BASE 0x4748E80ull
+#define DCORE3_RTR1_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR1_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR1_H3_BASE 0x4749000ull
+#define DCORE3_RTR1_H3_MAX_OFFSET 0x1000
+#define DCORE3_RTR1_H3_SECTION 0xE800
+#define mmDCORE3_RTR1_H3_SPECIAL_BASE 0x4749E80ull
+#define DCORE3_RTR1_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR1_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR1_MSTR_IF_RR_SHRD_HBW_BASE 0x474A000ull
+#define DCORE3_RTR1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_RTR1_MSTR_IF_RR_PRVT_HBW_BASE 0x474A200ull
+#define DCORE3_RTR1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_RTR1_MSTR_IF_RR_SHRD_LBW_BASE 0x474A400ull
+#define DCORE3_RTR1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_RTR1_MSTR_IF_RR_PRVT_LBW_BASE 0x474A600ull
+#define DCORE3_RTR1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_RTR1_MSTR_IF_E2E_CRDT_BASE 0x474A800ull
+#define DCORE3_RTR1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_RTR1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_RTR1_MSTR_IF_AXUSER_BASE 0x474AA80ull
+#define DCORE3_RTR1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_RTR1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_RTR1_MSTR_IF_DBG_HBW_BASE 0x474AB00ull
+#define DCORE3_RTR1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_RTR1_MSTR_IF_DBG_LBW_BASE 0x474AB80ull
+#define DCORE3_RTR1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_RTR1_MSTR_IF_CORE_HBW_BASE 0x474AC00ull
+#define DCORE3_RTR1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_RTR1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_RTR1_MSTR_IF_CORE_LBW_BASE 0x474AD80ull
+#define DCORE3_RTR1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_RTR1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_RTR1_MSTR_IF_SPECIAL_BASE 0x474AE80ull
+#define DCORE3_RTR1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR1_ADD_DEC_HBW_BASE 0x474B000ull
+#define DCORE3_RTR1_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE3_RTR1_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE3_RTR1_ADD_DEC_LBW_BASE 0x474B400ull
+#define DCORE3_RTR1_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE3_RTR1_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE3_RTR1_ADD_DEC_SPECIAL_BASE 0x474BE80ull
+#define DCORE3_RTR1_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR1_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR1_BASE 0x474C000ull
+#define DCORE3_RTR1_MAX_OFFSET 0x1000
+#define DCORE3_RTR1_SECTION 0x3000
+#define mmDCORE3_RTR1_HBW_RD_RQ_LL_STAT_BASE 0x474C300ull
+#define DCORE3_RTR1_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR1_HBW_RD_RS_LL_STAT_BASE 0x474C340ull
+#define DCORE3_RTR1_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR1_HBW_WR_RQ_LL_STAT_BASE 0x474C380ull
+#define DCORE3_RTR1_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR1_HBW_WR_RS_LL_STAT_BASE 0x474C3C0ull
+#define DCORE3_RTR1_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR1_LBW_RD_RQ_LL_STAT_BASE 0x474C400ull
+#define DCORE3_RTR1_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR1_LBW_RD_RS_LL_STAT_BASE 0x474C440ull
+#define DCORE3_RTR1_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR1_LBW_WR_RQ_LL_STAT_BASE 0x474C480ull
+#define DCORE3_RTR1_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR1_LBW_WR_RS_LL_STAT_BASE 0x474C4C0ull
+#define DCORE3_RTR1_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR1_HBW_MFIFO_BASE 0x474C500ull
+#define DCORE3_RTR1_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE3_RTR1_E2E_RD_LL_STAT_BASE 0x474C540ull
+#define DCORE3_RTR1_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR1_E2E_WR_LL_STAT_BASE 0x474C580ull
+#define DCORE3_RTR1_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR1_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE3_RTR1_RTR_HBW_XACT_STAT_BASE 0x474C600ull
+#define DCORE3_RTR1_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR1_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR1_RTR_LBW_XACT_STAT_BASE 0x474C680ull
+#define DCORE3_RTR1_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR1_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR1_RTR_E2E_XACT_STAT_BASE 0x474C700ull
+#define DCORE3_RTR1_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR1_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE3_RTR1_SPECIAL_BASE 0x474CE80ull
+#define DCORE3_RTR1_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR1_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR1_DBG_ADDR_BASE 0x474D000ull
+#define DCORE3_RTR1_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE3_RTR1_DBG_ADDR_SECTION 0xE800
+#define mmDCORE3_RTR1_DBG_ADDR_SPECIAL_BASE 0x474DE80ull
+#define DCORE3_RTR1_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR1_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE3_RTR2_CTRL_BASE 0x4750000ull
+#define DCORE3_RTR2_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_RTR2_CTRL_SECTION 0xE800
+#define mmDCORE3_RTR2_CTRL_SPECIAL_BASE 0x4750E80ull
+#define DCORE3_RTR2_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR2_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR2_H3_BASE 0x4751000ull
+#define DCORE3_RTR2_H3_MAX_OFFSET 0x1000
+#define DCORE3_RTR2_H3_SECTION 0xE800
+#define mmDCORE3_RTR2_H3_SPECIAL_BASE 0x4751E80ull
+#define DCORE3_RTR2_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR2_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR2_MSTR_IF_RR_SHRD_HBW_BASE 0x4752000ull
+#define DCORE3_RTR2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_RTR2_MSTR_IF_RR_PRVT_HBW_BASE 0x4752200ull
+#define DCORE3_RTR2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_RTR2_MSTR_IF_RR_SHRD_LBW_BASE 0x4752400ull
+#define DCORE3_RTR2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_RTR2_MSTR_IF_RR_PRVT_LBW_BASE 0x4752600ull
+#define DCORE3_RTR2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_RTR2_MSTR_IF_E2E_CRDT_BASE 0x4752800ull
+#define DCORE3_RTR2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_RTR2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_RTR2_MSTR_IF_AXUSER_BASE 0x4752A80ull
+#define DCORE3_RTR2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_RTR2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_RTR2_MSTR_IF_DBG_HBW_BASE 0x4752B00ull
+#define DCORE3_RTR2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_RTR2_MSTR_IF_DBG_LBW_BASE 0x4752B80ull
+#define DCORE3_RTR2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_RTR2_MSTR_IF_CORE_HBW_BASE 0x4752C00ull
+#define DCORE3_RTR2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_RTR2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_RTR2_MSTR_IF_CORE_LBW_BASE 0x4752D80ull
+#define DCORE3_RTR2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_RTR2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_RTR2_MSTR_IF_SPECIAL_BASE 0x4752E80ull
+#define DCORE3_RTR2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR2_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR2_ADD_DEC_HBW_BASE 0x4753000ull
+#define DCORE3_RTR2_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE3_RTR2_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE3_RTR2_ADD_DEC_LBW_BASE 0x4753400ull
+#define DCORE3_RTR2_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE3_RTR2_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE3_RTR2_ADD_DEC_SPECIAL_BASE 0x4753E80ull
+#define DCORE3_RTR2_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR2_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR2_BASE 0x4754000ull
+#define DCORE3_RTR2_MAX_OFFSET 0x1000
+#define DCORE3_RTR2_SECTION 0x3000
+#define mmDCORE3_RTR2_HBW_RD_RQ_LL_STAT_BASE 0x4754300ull
+#define DCORE3_RTR2_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR2_HBW_RD_RS_LL_STAT_BASE 0x4754340ull
+#define DCORE3_RTR2_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR2_HBW_WR_RQ_LL_STAT_BASE 0x4754380ull
+#define DCORE3_RTR2_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR2_HBW_WR_RS_LL_STAT_BASE 0x47543C0ull
+#define DCORE3_RTR2_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR2_LBW_RD_RQ_LL_STAT_BASE 0x4754400ull
+#define DCORE3_RTR2_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR2_LBW_RD_RS_LL_STAT_BASE 0x4754440ull
+#define DCORE3_RTR2_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR2_LBW_WR_RQ_LL_STAT_BASE 0x4754480ull
+#define DCORE3_RTR2_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR2_LBW_WR_RS_LL_STAT_BASE 0x47544C0ull
+#define DCORE3_RTR2_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR2_HBW_MFIFO_BASE 0x4754500ull
+#define DCORE3_RTR2_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE3_RTR2_E2E_RD_LL_STAT_BASE 0x4754540ull
+#define DCORE3_RTR2_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR2_E2E_WR_LL_STAT_BASE 0x4754580ull
+#define DCORE3_RTR2_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR2_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE3_RTR2_RTR_HBW_XACT_STAT_BASE 0x4754600ull
+#define DCORE3_RTR2_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR2_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR2_RTR_LBW_XACT_STAT_BASE 0x4754680ull
+#define DCORE3_RTR2_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR2_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR2_RTR_E2E_XACT_STAT_BASE 0x4754700ull
+#define DCORE3_RTR2_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR2_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE3_RTR2_SPECIAL_BASE 0x4754E80ull
+#define DCORE3_RTR2_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR2_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR2_DBG_ADDR_BASE 0x4755000ull
+#define DCORE3_RTR2_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE3_RTR2_DBG_ADDR_SECTION 0xE800
+#define mmDCORE3_RTR2_DBG_ADDR_SPECIAL_BASE 0x4755E80ull
+#define DCORE3_RTR2_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR2_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE3_RTR3_CTRL_BASE 0x4758000ull
+#define DCORE3_RTR3_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_RTR3_CTRL_SECTION 0xE800
+#define mmDCORE3_RTR3_CTRL_SPECIAL_BASE 0x4758E80ull
+#define DCORE3_RTR3_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR3_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR3_H3_BASE 0x4759000ull
+#define DCORE3_RTR3_H3_MAX_OFFSET 0x1000
+#define DCORE3_RTR3_H3_SECTION 0xE800
+#define mmDCORE3_RTR3_H3_SPECIAL_BASE 0x4759E80ull
+#define DCORE3_RTR3_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR3_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR3_MSTR_IF_RR_SHRD_HBW_BASE 0x475A000ull
+#define DCORE3_RTR3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_RTR3_MSTR_IF_RR_PRVT_HBW_BASE 0x475A200ull
+#define DCORE3_RTR3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_RTR3_MSTR_IF_RR_SHRD_LBW_BASE 0x475A400ull
+#define DCORE3_RTR3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_RTR3_MSTR_IF_RR_PRVT_LBW_BASE 0x475A600ull
+#define DCORE3_RTR3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_RTR3_MSTR_IF_E2E_CRDT_BASE 0x475A800ull
+#define DCORE3_RTR3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_RTR3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_RTR3_MSTR_IF_AXUSER_BASE 0x475AA80ull
+#define DCORE3_RTR3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_RTR3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_RTR3_MSTR_IF_DBG_HBW_BASE 0x475AB00ull
+#define DCORE3_RTR3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_RTR3_MSTR_IF_DBG_LBW_BASE 0x475AB80ull
+#define DCORE3_RTR3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_RTR3_MSTR_IF_CORE_HBW_BASE 0x475AC00ull
+#define DCORE3_RTR3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_RTR3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_RTR3_MSTR_IF_CORE_LBW_BASE 0x475AD80ull
+#define DCORE3_RTR3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_RTR3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_RTR3_MSTR_IF_SPECIAL_BASE 0x475AE80ull
+#define DCORE3_RTR3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR3_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR3_ADD_DEC_HBW_BASE 0x475B000ull
+#define DCORE3_RTR3_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE3_RTR3_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE3_RTR3_ADD_DEC_LBW_BASE 0x475B400ull
+#define DCORE3_RTR3_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE3_RTR3_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE3_RTR3_ADD_DEC_SPECIAL_BASE 0x475BE80ull
+#define DCORE3_RTR3_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR3_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR3_BASE 0x475C000ull
+#define DCORE3_RTR3_MAX_OFFSET 0x1000
+#define DCORE3_RTR3_SECTION 0x3000
+#define mmDCORE3_RTR3_HBW_RD_RQ_LL_STAT_BASE 0x475C300ull
+#define DCORE3_RTR3_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR3_HBW_RD_RS_LL_STAT_BASE 0x475C340ull
+#define DCORE3_RTR3_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR3_HBW_WR_RQ_LL_STAT_BASE 0x475C380ull
+#define DCORE3_RTR3_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR3_HBW_WR_RS_LL_STAT_BASE 0x475C3C0ull
+#define DCORE3_RTR3_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR3_LBW_RD_RQ_LL_STAT_BASE 0x475C400ull
+#define DCORE3_RTR3_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR3_LBW_RD_RS_LL_STAT_BASE 0x475C440ull
+#define DCORE3_RTR3_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR3_LBW_WR_RQ_LL_STAT_BASE 0x475C480ull
+#define DCORE3_RTR3_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR3_LBW_WR_RS_LL_STAT_BASE 0x475C4C0ull
+#define DCORE3_RTR3_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR3_HBW_MFIFO_BASE 0x475C500ull
+#define DCORE3_RTR3_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE3_RTR3_E2E_RD_LL_STAT_BASE 0x475C540ull
+#define DCORE3_RTR3_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR3_E2E_WR_LL_STAT_BASE 0x475C580ull
+#define DCORE3_RTR3_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR3_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE3_RTR3_RTR_HBW_XACT_STAT_BASE 0x475C600ull
+#define DCORE3_RTR3_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR3_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR3_RTR_LBW_XACT_STAT_BASE 0x475C680ull
+#define DCORE3_RTR3_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR3_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR3_RTR_E2E_XACT_STAT_BASE 0x475C700ull
+#define DCORE3_RTR3_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR3_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE3_RTR3_SPECIAL_BASE 0x475CE80ull
+#define DCORE3_RTR3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR3_DBG_ADDR_BASE 0x475D000ull
+#define DCORE3_RTR3_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE3_RTR3_DBG_ADDR_SECTION 0xE800
+#define mmDCORE3_RTR3_DBG_ADDR_SPECIAL_BASE 0x475DE80ull
+#define DCORE3_RTR3_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR3_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE3_RTR4_CTRL_BASE 0x4760000ull
+#define DCORE3_RTR4_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_RTR4_CTRL_SECTION 0xE800
+#define mmDCORE3_RTR4_CTRL_SPECIAL_BASE 0x4760E80ull
+#define DCORE3_RTR4_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR4_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR4_H3_BASE 0x4761000ull
+#define DCORE3_RTR4_H3_MAX_OFFSET 0x1000
+#define DCORE3_RTR4_H3_SECTION 0xE800
+#define mmDCORE3_RTR4_H3_SPECIAL_BASE 0x4761E80ull
+#define DCORE3_RTR4_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR4_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR4_MSTR_IF_RR_SHRD_HBW_BASE 0x4762000ull
+#define DCORE3_RTR4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_RTR4_MSTR_IF_RR_PRVT_HBW_BASE 0x4762200ull
+#define DCORE3_RTR4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_RTR4_MSTR_IF_RR_SHRD_LBW_BASE 0x4762400ull
+#define DCORE3_RTR4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_RTR4_MSTR_IF_RR_PRVT_LBW_BASE 0x4762600ull
+#define DCORE3_RTR4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_RTR4_MSTR_IF_E2E_CRDT_BASE 0x4762800ull
+#define DCORE3_RTR4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_RTR4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_RTR4_MSTR_IF_AXUSER_BASE 0x4762A80ull
+#define DCORE3_RTR4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_RTR4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_RTR4_MSTR_IF_DBG_HBW_BASE 0x4762B00ull
+#define DCORE3_RTR4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_RTR4_MSTR_IF_DBG_LBW_BASE 0x4762B80ull
+#define DCORE3_RTR4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_RTR4_MSTR_IF_CORE_HBW_BASE 0x4762C00ull
+#define DCORE3_RTR4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_RTR4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_RTR4_MSTR_IF_CORE_LBW_BASE 0x4762D80ull
+#define DCORE3_RTR4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_RTR4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_RTR4_MSTR_IF_SPECIAL_BASE 0x4762E80ull
+#define DCORE3_RTR4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR4_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR4_ADD_DEC_HBW_BASE 0x4763000ull
+#define DCORE3_RTR4_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE3_RTR4_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE3_RTR4_ADD_DEC_LBW_BASE 0x4763400ull
+#define DCORE3_RTR4_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE3_RTR4_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE3_RTR4_ADD_DEC_SPECIAL_BASE 0x4763E80ull
+#define DCORE3_RTR4_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR4_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR4_BASE 0x4764000ull
+#define DCORE3_RTR4_MAX_OFFSET 0x1000
+#define DCORE3_RTR4_SECTION 0x3000
+#define mmDCORE3_RTR4_HBW_RD_RQ_LL_STAT_BASE 0x4764300ull
+#define DCORE3_RTR4_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR4_HBW_RD_RS_LL_STAT_BASE 0x4764340ull
+#define DCORE3_RTR4_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR4_HBW_WR_RQ_LL_STAT_BASE 0x4764380ull
+#define DCORE3_RTR4_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR4_HBW_WR_RS_LL_STAT_BASE 0x47643C0ull
+#define DCORE3_RTR4_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR4_LBW_RD_RQ_LL_STAT_BASE 0x4764400ull
+#define DCORE3_RTR4_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR4_LBW_RD_RS_LL_STAT_BASE 0x4764440ull
+#define DCORE3_RTR4_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR4_LBW_WR_RQ_LL_STAT_BASE 0x4764480ull
+#define DCORE3_RTR4_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR4_LBW_WR_RS_LL_STAT_BASE 0x47644C0ull
+#define DCORE3_RTR4_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR4_HBW_MFIFO_BASE 0x4764500ull
+#define DCORE3_RTR4_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE3_RTR4_E2E_RD_LL_STAT_BASE 0x4764540ull
+#define DCORE3_RTR4_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR4_E2E_WR_LL_STAT_BASE 0x4764580ull
+#define DCORE3_RTR4_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR4_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE3_RTR4_RTR_HBW_XACT_STAT_BASE 0x4764600ull
+#define DCORE3_RTR4_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR4_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR4_RTR_LBW_XACT_STAT_BASE 0x4764680ull
+#define DCORE3_RTR4_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR4_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR4_RTR_E2E_XACT_STAT_BASE 0x4764700ull
+#define DCORE3_RTR4_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR4_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE3_RTR4_SPECIAL_BASE 0x4764E80ull
+#define DCORE3_RTR4_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR4_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR4_DBG_ADDR_BASE 0x4765000ull
+#define DCORE3_RTR4_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE3_RTR4_DBG_ADDR_SECTION 0xE800
+#define mmDCORE3_RTR4_DBG_ADDR_SPECIAL_BASE 0x4765E80ull
+#define DCORE3_RTR4_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR4_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE3_RTR5_CTRL_BASE 0x4768000ull
+#define DCORE3_RTR5_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_RTR5_CTRL_SECTION 0xE800
+#define mmDCORE3_RTR5_CTRL_SPECIAL_BASE 0x4768E80ull
+#define DCORE3_RTR5_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR5_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR5_H3_BASE 0x4769000ull
+#define DCORE3_RTR5_H3_MAX_OFFSET 0x1000
+#define DCORE3_RTR5_H3_SECTION 0xE800
+#define mmDCORE3_RTR5_H3_SPECIAL_BASE 0x4769E80ull
+#define DCORE3_RTR5_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR5_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR5_MSTR_IF_RR_SHRD_HBW_BASE 0x476A000ull
+#define DCORE3_RTR5_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR5_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_RTR5_MSTR_IF_RR_PRVT_HBW_BASE 0x476A200ull
+#define DCORE3_RTR5_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR5_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_RTR5_MSTR_IF_RR_SHRD_LBW_BASE 0x476A400ull
+#define DCORE3_RTR5_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR5_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_RTR5_MSTR_IF_RR_PRVT_LBW_BASE 0x476A600ull
+#define DCORE3_RTR5_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR5_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_RTR5_MSTR_IF_E2E_CRDT_BASE 0x476A800ull
+#define DCORE3_RTR5_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_RTR5_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_RTR5_MSTR_IF_AXUSER_BASE 0x476AA80ull
+#define DCORE3_RTR5_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_RTR5_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_RTR5_MSTR_IF_DBG_HBW_BASE 0x476AB00ull
+#define DCORE3_RTR5_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR5_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_RTR5_MSTR_IF_DBG_LBW_BASE 0x476AB80ull
+#define DCORE3_RTR5_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR5_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_RTR5_MSTR_IF_CORE_HBW_BASE 0x476AC00ull
+#define DCORE3_RTR5_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_RTR5_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_RTR5_MSTR_IF_CORE_LBW_BASE 0x476AD80ull
+#define DCORE3_RTR5_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_RTR5_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_RTR5_MSTR_IF_SPECIAL_BASE 0x476AE80ull
+#define DCORE3_RTR5_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR5_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR5_ADD_DEC_HBW_BASE 0x476B000ull
+#define DCORE3_RTR5_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE3_RTR5_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE3_RTR5_ADD_DEC_LBW_BASE 0x476B400ull
+#define DCORE3_RTR5_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE3_RTR5_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE3_RTR5_ADD_DEC_SPECIAL_BASE 0x476BE80ull
+#define DCORE3_RTR5_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR5_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR5_BASE 0x476C000ull
+#define DCORE3_RTR5_MAX_OFFSET 0x1000
+#define DCORE3_RTR5_SECTION 0x3000
+#define mmDCORE3_RTR5_HBW_RD_RQ_LL_STAT_BASE 0x476C300ull
+#define DCORE3_RTR5_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR5_HBW_RD_RS_LL_STAT_BASE 0x476C340ull
+#define DCORE3_RTR5_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR5_HBW_WR_RQ_LL_STAT_BASE 0x476C380ull
+#define DCORE3_RTR5_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR5_HBW_WR_RS_LL_STAT_BASE 0x476C3C0ull
+#define DCORE3_RTR5_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR5_LBW_RD_RQ_LL_STAT_BASE 0x476C400ull
+#define DCORE3_RTR5_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR5_LBW_RD_RS_LL_STAT_BASE 0x476C440ull
+#define DCORE3_RTR5_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR5_LBW_WR_RQ_LL_STAT_BASE 0x476C480ull
+#define DCORE3_RTR5_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR5_LBW_WR_RS_LL_STAT_BASE 0x476C4C0ull
+#define DCORE3_RTR5_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR5_HBW_MFIFO_BASE 0x476C500ull
+#define DCORE3_RTR5_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE3_RTR5_E2E_RD_LL_STAT_BASE 0x476C540ull
+#define DCORE3_RTR5_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR5_E2E_WR_LL_STAT_BASE 0x476C580ull
+#define DCORE3_RTR5_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR5_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE3_RTR5_RTR_HBW_XACT_STAT_BASE 0x476C600ull
+#define DCORE3_RTR5_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR5_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR5_RTR_LBW_XACT_STAT_BASE 0x476C680ull
+#define DCORE3_RTR5_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR5_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR5_RTR_E2E_XACT_STAT_BASE 0x476C700ull
+#define DCORE3_RTR5_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR5_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE3_RTR5_SPECIAL_BASE 0x476CE80ull
+#define DCORE3_RTR5_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR5_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR5_DBG_ADDR_BASE 0x476D000ull
+#define DCORE3_RTR5_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE3_RTR5_DBG_ADDR_SECTION 0xE800
+#define mmDCORE3_RTR5_DBG_ADDR_SPECIAL_BASE 0x476DE80ull
+#define DCORE3_RTR5_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR5_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE3_RTR6_CTRL_BASE 0x4770000ull
+#define DCORE3_RTR6_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_RTR6_CTRL_SECTION 0xE800
+#define mmDCORE3_RTR6_CTRL_SPECIAL_BASE 0x4770E80ull
+#define DCORE3_RTR6_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR6_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR6_H3_BASE 0x4771000ull
+#define DCORE3_RTR6_H3_MAX_OFFSET 0x1000
+#define DCORE3_RTR6_H3_SECTION 0xE800
+#define mmDCORE3_RTR6_H3_SPECIAL_BASE 0x4771E80ull
+#define DCORE3_RTR6_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR6_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR6_MSTR_IF_RR_SHRD_HBW_BASE 0x4772000ull
+#define DCORE3_RTR6_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR6_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_RTR6_MSTR_IF_RR_PRVT_HBW_BASE 0x4772200ull
+#define DCORE3_RTR6_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR6_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_RTR6_MSTR_IF_RR_SHRD_LBW_BASE 0x4772400ull
+#define DCORE3_RTR6_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR6_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_RTR6_MSTR_IF_RR_PRVT_LBW_BASE 0x4772600ull
+#define DCORE3_RTR6_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR6_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_RTR6_MSTR_IF_E2E_CRDT_BASE 0x4772800ull
+#define DCORE3_RTR6_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_RTR6_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_RTR6_MSTR_IF_AXUSER_BASE 0x4772A80ull
+#define DCORE3_RTR6_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_RTR6_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_RTR6_MSTR_IF_DBG_HBW_BASE 0x4772B00ull
+#define DCORE3_RTR6_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR6_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_RTR6_MSTR_IF_DBG_LBW_BASE 0x4772B80ull
+#define DCORE3_RTR6_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR6_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_RTR6_MSTR_IF_CORE_HBW_BASE 0x4772C00ull
+#define DCORE3_RTR6_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_RTR6_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_RTR6_MSTR_IF_CORE_LBW_BASE 0x4772D80ull
+#define DCORE3_RTR6_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_RTR6_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_RTR6_MSTR_IF_SPECIAL_BASE 0x4772E80ull
+#define DCORE3_RTR6_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR6_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR6_ADD_DEC_HBW_BASE 0x4773000ull
+#define DCORE3_RTR6_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE3_RTR6_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE3_RTR6_ADD_DEC_LBW_BASE 0x4773400ull
+#define DCORE3_RTR6_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE3_RTR6_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE3_RTR6_ADD_DEC_SPECIAL_BASE 0x4773E80ull
+#define DCORE3_RTR6_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR6_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR6_BASE 0x4774000ull
+#define DCORE3_RTR6_MAX_OFFSET 0x1000
+#define DCORE3_RTR6_SECTION 0x3000
+#define mmDCORE3_RTR6_HBW_RD_RQ_LL_STAT_BASE 0x4774300ull
+#define DCORE3_RTR6_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR6_HBW_RD_RS_LL_STAT_BASE 0x4774340ull
+#define DCORE3_RTR6_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR6_HBW_WR_RQ_LL_STAT_BASE 0x4774380ull
+#define DCORE3_RTR6_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR6_HBW_WR_RS_LL_STAT_BASE 0x47743C0ull
+#define DCORE3_RTR6_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR6_LBW_RD_RQ_LL_STAT_BASE 0x4774400ull
+#define DCORE3_RTR6_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR6_LBW_RD_RS_LL_STAT_BASE 0x4774440ull
+#define DCORE3_RTR6_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR6_LBW_WR_RQ_LL_STAT_BASE 0x4774480ull
+#define DCORE3_RTR6_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR6_LBW_WR_RS_LL_STAT_BASE 0x47744C0ull
+#define DCORE3_RTR6_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR6_HBW_MFIFO_BASE 0x4774500ull
+#define DCORE3_RTR6_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE3_RTR6_E2E_RD_LL_STAT_BASE 0x4774540ull
+#define DCORE3_RTR6_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR6_E2E_WR_LL_STAT_BASE 0x4774580ull
+#define DCORE3_RTR6_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR6_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE3_RTR6_RTR_HBW_XACT_STAT_BASE 0x4774600ull
+#define DCORE3_RTR6_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR6_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR6_RTR_LBW_XACT_STAT_BASE 0x4774680ull
+#define DCORE3_RTR6_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR6_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR6_RTR_E2E_XACT_STAT_BASE 0x4774700ull
+#define DCORE3_RTR6_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR6_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE3_RTR6_SPECIAL_BASE 0x4774E80ull
+#define DCORE3_RTR6_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR6_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR6_DBG_ADDR_BASE 0x4775000ull
+#define DCORE3_RTR6_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE3_RTR6_DBG_ADDR_SECTION 0xE800
+#define mmDCORE3_RTR6_DBG_ADDR_SPECIAL_BASE 0x4775E80ull
+#define DCORE3_RTR6_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR6_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE3_RTR7_CTRL_BASE 0x4778000ull
+#define DCORE3_RTR7_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_RTR7_CTRL_SECTION 0xE800
+#define mmDCORE3_RTR7_CTRL_SPECIAL_BASE 0x4778E80ull
+#define DCORE3_RTR7_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR7_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR7_H3_BASE 0x4779000ull
+#define DCORE3_RTR7_H3_MAX_OFFSET 0x1000
+#define DCORE3_RTR7_H3_SECTION 0xE800
+#define mmDCORE3_RTR7_H3_SPECIAL_BASE 0x4779E80ull
+#define DCORE3_RTR7_H3_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR7_H3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR7_MSTR_IF_RR_SHRD_HBW_BASE 0x477A000ull
+#define DCORE3_RTR7_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR7_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_RTR7_MSTR_IF_RR_PRVT_HBW_BASE 0x477A200ull
+#define DCORE3_RTR7_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_RTR7_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_RTR7_MSTR_IF_RR_SHRD_LBW_BASE 0x477A400ull
+#define DCORE3_RTR7_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR7_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_RTR7_MSTR_IF_RR_PRVT_LBW_BASE 0x477A600ull
+#define DCORE3_RTR7_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_RTR7_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_RTR7_MSTR_IF_E2E_CRDT_BASE 0x477A800ull
+#define DCORE3_RTR7_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_RTR7_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_RTR7_MSTR_IF_AXUSER_BASE 0x477AA80ull
+#define DCORE3_RTR7_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_RTR7_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_RTR7_MSTR_IF_DBG_HBW_BASE 0x477AB00ull
+#define DCORE3_RTR7_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR7_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_RTR7_MSTR_IF_DBG_LBW_BASE 0x477AB80ull
+#define DCORE3_RTR7_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_RTR7_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_RTR7_MSTR_IF_CORE_HBW_BASE 0x477AC00ull
+#define DCORE3_RTR7_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_RTR7_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_RTR7_MSTR_IF_CORE_LBW_BASE 0x477AD80ull
+#define DCORE3_RTR7_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_RTR7_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_RTR7_MSTR_IF_SPECIAL_BASE 0x477AE80ull
+#define DCORE3_RTR7_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR7_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR7_ADD_DEC_HBW_BASE 0x477B000ull
+#define DCORE3_RTR7_ADD_DEC_HBW_MAX_OFFSET 0x4000
+#define DCORE3_RTR7_ADD_DEC_HBW_SECTION 0x4000
+#define mmDCORE3_RTR7_ADD_DEC_LBW_BASE 0x477B400ull
+#define DCORE3_RTR7_ADD_DEC_LBW_MAX_OFFSET 0xA600
+#define DCORE3_RTR7_ADD_DEC_LBW_SECTION 0xA800
+#define mmDCORE3_RTR7_ADD_DEC_SPECIAL_BASE 0x477BE80ull
+#define DCORE3_RTR7_ADD_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR7_ADD_DEC_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR7_BASE 0x477C000ull
+#define DCORE3_RTR7_MAX_OFFSET 0x1000
+#define DCORE3_RTR7_SECTION 0x3000
+#define mmDCORE3_RTR7_HBW_RD_RQ_LL_STAT_BASE 0x477C300ull
+#define DCORE3_RTR7_HBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_HBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR7_HBW_RD_RS_LL_STAT_BASE 0x477C340ull
+#define DCORE3_RTR7_HBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_HBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR7_HBW_WR_RQ_LL_STAT_BASE 0x477C380ull
+#define DCORE3_RTR7_HBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_HBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR7_HBW_WR_RS_LL_STAT_BASE 0x477C3C0ull
+#define DCORE3_RTR7_HBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_HBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR7_LBW_RD_RQ_LL_STAT_BASE 0x477C400ull
+#define DCORE3_RTR7_LBW_RD_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_LBW_RD_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR7_LBW_RD_RS_LL_STAT_BASE 0x477C440ull
+#define DCORE3_RTR7_LBW_RD_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_LBW_RD_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR7_LBW_WR_RQ_LL_STAT_BASE 0x477C480ull
+#define DCORE3_RTR7_LBW_WR_RQ_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_LBW_WR_RQ_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR7_LBW_WR_RS_LL_STAT_BASE 0x477C4C0ull
+#define DCORE3_RTR7_LBW_WR_RS_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_LBW_WR_RS_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR7_HBW_MFIFO_BASE 0x477C500ull
+#define DCORE3_RTR7_HBW_MFIFO_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_HBW_MFIFO_SECTION 0x4000
+#define mmDCORE3_RTR7_E2E_RD_LL_STAT_BASE 0x477C540ull
+#define DCORE3_RTR7_E2E_RD_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_E2E_RD_LL_STAT_SECTION 0x4000
+#define mmDCORE3_RTR7_E2E_WR_LL_STAT_BASE 0x477C580ull
+#define DCORE3_RTR7_E2E_WR_LL_STAT_MAX_OFFSET 0x3000
+#define DCORE3_RTR7_E2E_WR_LL_STAT_SECTION 0x8000
+#define mmDCORE3_RTR7_RTR_HBW_XACT_STAT_BASE 0x477C600ull
+#define DCORE3_RTR7_RTR_HBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR7_RTR_HBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR7_RTR_LBW_XACT_STAT_BASE 0x477C680ull
+#define DCORE3_RTR7_RTR_LBW_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR7_RTR_LBW_XACT_STAT_SECTION 0x8000
+#define mmDCORE3_RTR7_RTR_E2E_XACT_STAT_BASE 0x477C700ull
+#define DCORE3_RTR7_RTR_E2E_XACT_STAT_MAX_OFFSET 0x5000
+#define DCORE3_RTR7_RTR_E2E_XACT_STAT_SECTION 0x7800
+#define mmDCORE3_RTR7_SPECIAL_BASE 0x477CE80ull
+#define DCORE3_RTR7_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR7_SPECIAL_SECTION 0x1800
+#define mmDCORE3_RTR7_DBG_ADDR_BASE 0x477D000ull
+#define DCORE3_RTR7_DBG_ADDR_MAX_OFFSET 0x1000
+#define DCORE3_RTR7_DBG_ADDR_SECTION 0xE800
+#define mmDCORE3_RTR7_DBG_ADDR_SPECIAL_BASE 0x477DE80ull
+#define DCORE3_RTR7_DBG_ADDR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_RTR7_DBG_ADDR_SPECIAL_SECTION 0x2180
+#define mmDCORE3_SRAM0_BANK_BASE 0x4780000ull
+#define DCORE3_SRAM0_BANK_MAX_OFFSET 0x1000
+#define DCORE3_SRAM0_BANK_SECTION 0xE800
+#define mmDCORE3_SRAM0_BANK_SPECIAL_BASE 0x4780E80ull
+#define DCORE3_SRAM0_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM0_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM0_RTR_BASE 0x4781000ull
+#define DCORE3_SRAM0_RTR_MAX_OFFSET 0x1000
+#define DCORE3_SRAM0_RTR_SECTION 0xE800
+#define mmDCORE3_SRAM0_RTR_SPECIAL_BASE 0x4781E80ull
+#define DCORE3_SRAM0_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM0_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM0_DBG_CNT_N_HBW_DBG_CNT_BASE 0x4782000ull
+#define DCORE3_SRAM0_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM0_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM0_DBG_CNT_S_HBW_DBG_CNT_BASE 0x4782100ull
+#define DCORE3_SRAM0_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM0_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x4782200ull
+#define DCORE3_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM0_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x4782300ull
+#define DCORE3_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM0_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM0_DBG_CNT_N_LBW_DBG_CNT_BASE 0x4782400ull
+#define DCORE3_SRAM0_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM0_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM0_DBG_CNT_S_LBW_DBG_CNT_BASE 0x4782500ull
+#define DCORE3_SRAM0_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM0_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM0_DBG_CNT_L_LBW_DBG_CNT_BASE 0x4782600ull
+#define DCORE3_SRAM0_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM0_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x4782700ull
+#define DCORE3_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM0_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x4782780ull
+#define DCORE3_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM0_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x4782800ull
+#define DCORE3_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM0_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x4782880ull
+#define DCORE3_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM0_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x4782900ull
+#define DCORE3_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM0_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x4782980ull
+#define DCORE3_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM0_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x4782A00ull
+#define DCORE3_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM0_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x4782A80ull
+#define DCORE3_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM0_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE3_SRAM0_DBG_CNT_SPECIAL_BASE 0x4782E80ull
+#define DCORE3_SRAM0_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM0_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE3_SRAM1_BANK_BASE 0x4788000ull
+#define DCORE3_SRAM1_BANK_MAX_OFFSET 0x1000
+#define DCORE3_SRAM1_BANK_SECTION 0xE800
+#define mmDCORE3_SRAM1_BANK_SPECIAL_BASE 0x4788E80ull
+#define DCORE3_SRAM1_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM1_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM1_RTR_BASE 0x4789000ull
+#define DCORE3_SRAM1_RTR_MAX_OFFSET 0x1000
+#define DCORE3_SRAM1_RTR_SECTION 0xE800
+#define mmDCORE3_SRAM1_RTR_SPECIAL_BASE 0x4789E80ull
+#define DCORE3_SRAM1_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM1_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM1_DBG_CNT_N_HBW_DBG_CNT_BASE 0x478A000ull
+#define DCORE3_SRAM1_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM1_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM1_DBG_CNT_S_HBW_DBG_CNT_BASE 0x478A100ull
+#define DCORE3_SRAM1_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM1_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x478A200ull
+#define DCORE3_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM1_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x478A300ull
+#define DCORE3_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM1_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM1_DBG_CNT_N_LBW_DBG_CNT_BASE 0x478A400ull
+#define DCORE3_SRAM1_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM1_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM1_DBG_CNT_S_LBW_DBG_CNT_BASE 0x478A500ull
+#define DCORE3_SRAM1_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM1_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM1_DBG_CNT_L_LBW_DBG_CNT_BASE 0x478A600ull
+#define DCORE3_SRAM1_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM1_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x478A700ull
+#define DCORE3_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM1_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x478A780ull
+#define DCORE3_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM1_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x478A800ull
+#define DCORE3_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM1_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x478A880ull
+#define DCORE3_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM1_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x478A900ull
+#define DCORE3_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM1_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x478A980ull
+#define DCORE3_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM1_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x478AA00ull
+#define DCORE3_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM1_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x478AA80ull
+#define DCORE3_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM1_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE3_SRAM1_DBG_CNT_SPECIAL_BASE 0x478AE80ull
+#define DCORE3_SRAM1_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM1_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE3_SRAM2_BANK_BASE 0x4790000ull
+#define DCORE3_SRAM2_BANK_MAX_OFFSET 0x1000
+#define DCORE3_SRAM2_BANK_SECTION 0xE800
+#define mmDCORE3_SRAM2_BANK_SPECIAL_BASE 0x4790E80ull
+#define DCORE3_SRAM2_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM2_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM2_RTR_BASE 0x4791000ull
+#define DCORE3_SRAM2_RTR_MAX_OFFSET 0x1000
+#define DCORE3_SRAM2_RTR_SECTION 0xE800
+#define mmDCORE3_SRAM2_RTR_SPECIAL_BASE 0x4791E80ull
+#define DCORE3_SRAM2_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM2_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM2_DBG_CNT_N_HBW_DBG_CNT_BASE 0x4792000ull
+#define DCORE3_SRAM2_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM2_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM2_DBG_CNT_S_HBW_DBG_CNT_BASE 0x4792100ull
+#define DCORE3_SRAM2_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM2_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x4792200ull
+#define DCORE3_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM2_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x4792300ull
+#define DCORE3_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM2_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM2_DBG_CNT_N_LBW_DBG_CNT_BASE 0x4792400ull
+#define DCORE3_SRAM2_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM2_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM2_DBG_CNT_S_LBW_DBG_CNT_BASE 0x4792500ull
+#define DCORE3_SRAM2_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM2_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM2_DBG_CNT_L_LBW_DBG_CNT_BASE 0x4792600ull
+#define DCORE3_SRAM2_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM2_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x4792700ull
+#define DCORE3_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM2_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x4792780ull
+#define DCORE3_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM2_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x4792800ull
+#define DCORE3_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM2_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x4792880ull
+#define DCORE3_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM2_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x4792900ull
+#define DCORE3_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM2_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x4792980ull
+#define DCORE3_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM2_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x4792A00ull
+#define DCORE3_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM2_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x4792A80ull
+#define DCORE3_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM2_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE3_SRAM2_DBG_CNT_SPECIAL_BASE 0x4792E80ull
+#define DCORE3_SRAM2_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM2_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE3_SRAM3_BANK_BASE 0x4798000ull
+#define DCORE3_SRAM3_BANK_MAX_OFFSET 0x1000
+#define DCORE3_SRAM3_BANK_SECTION 0xE800
+#define mmDCORE3_SRAM3_BANK_SPECIAL_BASE 0x4798E80ull
+#define DCORE3_SRAM3_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM3_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM3_RTR_BASE 0x4799000ull
+#define DCORE3_SRAM3_RTR_MAX_OFFSET 0x1000
+#define DCORE3_SRAM3_RTR_SECTION 0xE800
+#define mmDCORE3_SRAM3_RTR_SPECIAL_BASE 0x4799E80ull
+#define DCORE3_SRAM3_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM3_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM3_DBG_CNT_N_HBW_DBG_CNT_BASE 0x479A000ull
+#define DCORE3_SRAM3_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM3_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM3_DBG_CNT_S_HBW_DBG_CNT_BASE 0x479A100ull
+#define DCORE3_SRAM3_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM3_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x479A200ull
+#define DCORE3_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM3_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x479A300ull
+#define DCORE3_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM3_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM3_DBG_CNT_N_LBW_DBG_CNT_BASE 0x479A400ull
+#define DCORE3_SRAM3_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM3_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM3_DBG_CNT_S_LBW_DBG_CNT_BASE 0x479A500ull
+#define DCORE3_SRAM3_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM3_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM3_DBG_CNT_L_LBW_DBG_CNT_BASE 0x479A600ull
+#define DCORE3_SRAM3_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM3_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x479A700ull
+#define DCORE3_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM3_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x479A780ull
+#define DCORE3_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM3_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x479A800ull
+#define DCORE3_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM3_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x479A880ull
+#define DCORE3_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM3_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x479A900ull
+#define DCORE3_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM3_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x479A980ull
+#define DCORE3_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM3_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x479AA00ull
+#define DCORE3_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM3_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x479AA80ull
+#define DCORE3_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM3_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE3_SRAM3_DBG_CNT_SPECIAL_BASE 0x479AE80ull
+#define DCORE3_SRAM3_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM3_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE3_SRAM4_BANK_BASE 0x47A0000ull
+#define DCORE3_SRAM4_BANK_MAX_OFFSET 0x1000
+#define DCORE3_SRAM4_BANK_SECTION 0xE800
+#define mmDCORE3_SRAM4_BANK_SPECIAL_BASE 0x47A0E80ull
+#define DCORE3_SRAM4_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM4_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM4_RTR_BASE 0x47A1000ull
+#define DCORE3_SRAM4_RTR_MAX_OFFSET 0x1000
+#define DCORE3_SRAM4_RTR_SECTION 0xE800
+#define mmDCORE3_SRAM4_RTR_SPECIAL_BASE 0x47A1E80ull
+#define DCORE3_SRAM4_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM4_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM4_DBG_CNT_N_HBW_DBG_CNT_BASE 0x47A2000ull
+#define DCORE3_SRAM4_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM4_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM4_DBG_CNT_S_HBW_DBG_CNT_BASE 0x47A2100ull
+#define DCORE3_SRAM4_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM4_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x47A2200ull
+#define DCORE3_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM4_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x47A2300ull
+#define DCORE3_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM4_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM4_DBG_CNT_N_LBW_DBG_CNT_BASE 0x47A2400ull
+#define DCORE3_SRAM4_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM4_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM4_DBG_CNT_S_LBW_DBG_CNT_BASE 0x47A2500ull
+#define DCORE3_SRAM4_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM4_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM4_DBG_CNT_L_LBW_DBG_CNT_BASE 0x47A2600ull
+#define DCORE3_SRAM4_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM4_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x47A2700ull
+#define DCORE3_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM4_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x47A2780ull
+#define DCORE3_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM4_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x47A2800ull
+#define DCORE3_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM4_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x47A2880ull
+#define DCORE3_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM4_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x47A2900ull
+#define DCORE3_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM4_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x47A2980ull
+#define DCORE3_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM4_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x47A2A00ull
+#define DCORE3_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM4_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x47A2A80ull
+#define DCORE3_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM4_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE3_SRAM4_DBG_CNT_SPECIAL_BASE 0x47A2E80ull
+#define DCORE3_SRAM4_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM4_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE3_SRAM5_BANK_BASE 0x47A8000ull
+#define DCORE3_SRAM5_BANK_MAX_OFFSET 0x1000
+#define DCORE3_SRAM5_BANK_SECTION 0xE800
+#define mmDCORE3_SRAM5_BANK_SPECIAL_BASE 0x47A8E80ull
+#define DCORE3_SRAM5_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM5_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM5_RTR_BASE 0x47A9000ull
+#define DCORE3_SRAM5_RTR_MAX_OFFSET 0x1000
+#define DCORE3_SRAM5_RTR_SECTION 0xE800
+#define mmDCORE3_SRAM5_RTR_SPECIAL_BASE 0x47A9E80ull
+#define DCORE3_SRAM5_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM5_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM5_DBG_CNT_N_HBW_DBG_CNT_BASE 0x47AA000ull
+#define DCORE3_SRAM5_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM5_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM5_DBG_CNT_S_HBW_DBG_CNT_BASE 0x47AA100ull
+#define DCORE3_SRAM5_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM5_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x47AA200ull
+#define DCORE3_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM5_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x47AA300ull
+#define DCORE3_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM5_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM5_DBG_CNT_N_LBW_DBG_CNT_BASE 0x47AA400ull
+#define DCORE3_SRAM5_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM5_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM5_DBG_CNT_S_LBW_DBG_CNT_BASE 0x47AA500ull
+#define DCORE3_SRAM5_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM5_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM5_DBG_CNT_L_LBW_DBG_CNT_BASE 0x47AA600ull
+#define DCORE3_SRAM5_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM5_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x47AA700ull
+#define DCORE3_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM5_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x47AA780ull
+#define DCORE3_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM5_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x47AA800ull
+#define DCORE3_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM5_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x47AA880ull
+#define DCORE3_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM5_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x47AA900ull
+#define DCORE3_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM5_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x47AA980ull
+#define DCORE3_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM5_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x47AAA00ull
+#define DCORE3_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM5_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x47AAA80ull
+#define DCORE3_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM5_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE3_SRAM5_DBG_CNT_SPECIAL_BASE 0x47AAE80ull
+#define DCORE3_SRAM5_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM5_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE3_SRAM6_BANK_BASE 0x47B0000ull
+#define DCORE3_SRAM6_BANK_MAX_OFFSET 0x1000
+#define DCORE3_SRAM6_BANK_SECTION 0xE800
+#define mmDCORE3_SRAM6_BANK_SPECIAL_BASE 0x47B0E80ull
+#define DCORE3_SRAM6_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM6_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM6_RTR_BASE 0x47B1000ull
+#define DCORE3_SRAM6_RTR_MAX_OFFSET 0x1000
+#define DCORE3_SRAM6_RTR_SECTION 0xE800
+#define mmDCORE3_SRAM6_RTR_SPECIAL_BASE 0x47B1E80ull
+#define DCORE3_SRAM6_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM6_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM6_DBG_CNT_N_HBW_DBG_CNT_BASE 0x47B2000ull
+#define DCORE3_SRAM6_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM6_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM6_DBG_CNT_S_HBW_DBG_CNT_BASE 0x47B2100ull
+#define DCORE3_SRAM6_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM6_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x47B2200ull
+#define DCORE3_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM6_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x47B2300ull
+#define DCORE3_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM6_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM6_DBG_CNT_N_LBW_DBG_CNT_BASE 0x47B2400ull
+#define DCORE3_SRAM6_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM6_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM6_DBG_CNT_S_LBW_DBG_CNT_BASE 0x47B2500ull
+#define DCORE3_SRAM6_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM6_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM6_DBG_CNT_L_LBW_DBG_CNT_BASE 0x47B2600ull
+#define DCORE3_SRAM6_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM6_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x47B2700ull
+#define DCORE3_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM6_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x47B2780ull
+#define DCORE3_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM6_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x47B2800ull
+#define DCORE3_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM6_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x47B2880ull
+#define DCORE3_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM6_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x47B2900ull
+#define DCORE3_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM6_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x47B2980ull
+#define DCORE3_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM6_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x47B2A00ull
+#define DCORE3_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM6_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x47B2A80ull
+#define DCORE3_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM6_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE3_SRAM6_DBG_CNT_SPECIAL_BASE 0x47B2E80ull
+#define DCORE3_SRAM6_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM6_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE3_SRAM7_BANK_BASE 0x47B8000ull
+#define DCORE3_SRAM7_BANK_MAX_OFFSET 0x1000
+#define DCORE3_SRAM7_BANK_SECTION 0xE800
+#define mmDCORE3_SRAM7_BANK_SPECIAL_BASE 0x47B8E80ull
+#define DCORE3_SRAM7_BANK_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM7_BANK_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM7_RTR_BASE 0x47B9000ull
+#define DCORE3_SRAM7_RTR_MAX_OFFSET 0x1000
+#define DCORE3_SRAM7_RTR_SECTION 0xE800
+#define mmDCORE3_SRAM7_RTR_SPECIAL_BASE 0x47B9E80ull
+#define DCORE3_SRAM7_RTR_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM7_RTR_SPECIAL_SECTION 0x1800
+#define mmDCORE3_SRAM7_DBG_CNT_N_HBW_DBG_CNT_BASE 0x47BA000ull
+#define DCORE3_SRAM7_DBG_CNT_N_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM7_DBG_CNT_N_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM7_DBG_CNT_S_HBW_DBG_CNT_BASE 0x47BA100ull
+#define DCORE3_SRAM7_DBG_CNT_S_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM7_DBG_CNT_S_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_BASE 0x47BA200ull
+#define DCORE3_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM7_DBG_CNT_L_BANK0_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_BASE 0x47BA300ull
+#define DCORE3_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM7_DBG_CNT_L_BANK1_HBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM7_DBG_CNT_N_LBW_DBG_CNT_BASE 0x47BA400ull
+#define DCORE3_SRAM7_DBG_CNT_N_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM7_DBG_CNT_N_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM7_DBG_CNT_S_LBW_DBG_CNT_BASE 0x47BA500ull
+#define DCORE3_SRAM7_DBG_CNT_S_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM7_DBG_CNT_S_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM7_DBG_CNT_L_LBW_DBG_CNT_BASE 0x47BA600ull
+#define DCORE3_SRAM7_DBG_CNT_L_LBW_DBG_CNT_MAX_OFFSET 0x5800
+#define DCORE3_SRAM7_DBG_CNT_L_LBW_DBG_CNT_SECTION 0x1000
+#define mmDCORE3_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_BASE 0x47BA700ull
+#define DCORE3_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM7_DBG_CNT_HBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_BASE 0x47BA780ull
+#define DCORE3_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM7_DBG_CNT_HBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_BASE 0x47BA800ull
+#define DCORE3_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM7_DBG_CNT_HBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_BASE 0x47BA880ull
+#define DCORE3_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM7_DBG_CNT_HBW_WR_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_BASE 0x47BA900ull
+#define DCORE3_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM7_DBG_CNT_LBW_RD_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_BASE 0x47BA980ull
+#define DCORE3_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM7_DBG_CNT_LBW_WR_RQ_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_BASE 0x47BAA00ull
+#define DCORE3_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM7_DBG_CNT_LBW_RD_RS_LL_STAT_CNT_SECTION 0x8000
+#define mmDCORE3_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_BASE 0x47BAA80ull
+#define DCORE3_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_MAX_OFFSET 0x3000
+#define DCORE3_SRAM7_DBG_CNT_LBW_WR_RS_LL_STAT_CNT_SECTION 0x4000
+#define mmDCORE3_SRAM7_DBG_CNT_SPECIAL_BASE 0x47BAE80ull
+#define DCORE3_SRAM7_DBG_CNT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_SRAM7_DBG_CNT_SPECIAL_SECTION 0x5180
+#define mmDCORE3_EDMA0_QM_DCCM_BASE 0x47C0000ull
+#define DCORE3_EDMA0_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_EDMA0_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_ARC_AUX_BASE 0x47C8000ull
+#define DCORE3_EDMA0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE3_EDMA0_QM_ARC_AUX_SPECIAL_BASE 0x47C8E80ull
+#define DCORE3_EDMA0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_EDMA0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_EDMA0_QM_BASE 0x47CA000ull
+#define DCORE3_EDMA0_QM_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_QM_SECTION 0x9000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x47CA900ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x47CA908ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x47CA910ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x47CA918ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x47CA920ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x47CA928ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x47CA930ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x47CA938ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x47CA940ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x47CA948ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x47CA950ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x47CA958ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x47CA960ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x47CA968ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x47CA970ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x47CA978ull
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_EDMA0_QM_AXUSER_SECURED_BASE 0x47CAB00ull
+#define DCORE3_EDMA0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_EDMA0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_AXUSER_NONSECURED_BASE 0x47CAB80ull
+#define DCORE3_EDMA0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_EDMA0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_DBG_HBW_BASE 0x47CAC00ull
+#define DCORE3_EDMA0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_EDMA0_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_EDMA0_QM_DBG_LBW_BASE 0x47CAC80ull
+#define DCORE3_EDMA0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_EDMA0_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_EDMA0_QM_CGM_BASE 0x47CAD80ull
+#define DCORE3_EDMA0_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_EDMA0_QM_CGM_SECTION 0x1000
+#define mmDCORE3_EDMA0_QM_SPECIAL_BASE 0x47CAE80ull
+#define DCORE3_EDMA0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_EDMA0_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_EDMA0_CORE_BASE 0x47CB000ull
+#define DCORE3_EDMA0_CORE_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_CORE_SECTION 0x8000
+#define mmDCORE3_EDMA0_CORE_CTX_AXUSER_BASE 0x47CB800ull
+#define DCORE3_EDMA0_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_EDMA0_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmDCORE3_EDMA0_CORE_CTX_BASE 0x47CB860ull
+#define DCORE3_EDMA0_CORE_CTX_MAX_OFFSET 0x9000
+#define DCORE3_EDMA0_CORE_CTX_SECTION 0x5A00
+#define mmDCORE3_EDMA0_CORE_KDMA_CGM_BASE 0x47CBE00ull
+#define DCORE3_EDMA0_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define DCORE3_EDMA0_CORE_KDMA_CGM_SECTION 0x8000
+#define mmDCORE3_EDMA0_CORE_SPECIAL_BASE 0x47CBE80ull
+#define DCORE3_EDMA0_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_EDMA0_CORE_SPECIAL_SECTION 0x1800
+#define mmDCORE3_EDMA0_MSTR_IF_RR_SHRD_HBW_BASE 0x47CC000ull
+#define DCORE3_EDMA0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_EDMA0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_EDMA0_MSTR_IF_RR_PRVT_HBW_BASE 0x47CC200ull
+#define DCORE3_EDMA0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_EDMA0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_EDMA0_MSTR_IF_RR_SHRD_LBW_BASE 0x47CC400ull
+#define DCORE3_EDMA0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_EDMA0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_EDMA0_MSTR_IF_RR_PRVT_LBW_BASE 0x47CC600ull
+#define DCORE3_EDMA0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_EDMA0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_EDMA0_MSTR_IF_E2E_CRDT_BASE 0x47CC800ull
+#define DCORE3_EDMA0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_EDMA0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_EDMA0_MSTR_IF_AXUSER_BASE 0x47CCA80ull
+#define DCORE3_EDMA0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_EDMA0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_EDMA0_MSTR_IF_DBG_HBW_BASE 0x47CCB00ull
+#define DCORE3_EDMA0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_EDMA0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_EDMA0_MSTR_IF_DBG_LBW_BASE 0x47CCB80ull
+#define DCORE3_EDMA0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_EDMA0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_EDMA0_MSTR_IF_CORE_HBW_BASE 0x47CCC00ull
+#define DCORE3_EDMA0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_EDMA0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_EDMA0_MSTR_IF_CORE_LBW_BASE 0x47CCD80ull
+#define DCORE3_EDMA0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_EDMA0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_EDMA0_MSTR_IF_SPECIAL_BASE 0x47CCE80ull
+#define DCORE3_EDMA0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_EDMA0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE3_EDMA1_QM_DCCM_BASE 0x47D0000ull
+#define DCORE3_EDMA1_QM_DCCM_MAX_OFFSET 0x4000
+#define DCORE3_EDMA1_QM_DCCM_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_ARC_AUX_BASE 0x47D8000ull
+#define DCORE3_EDMA1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_QM_ARC_AUX_SECTION 0xE800
+#define mmDCORE3_EDMA1_QM_ARC_AUX_SPECIAL_BASE 0x47D8E80ull
+#define DCORE3_EDMA1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_EDMA1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmDCORE3_EDMA1_QM_BASE 0x47DA000ull
+#define DCORE3_EDMA1_QM_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_QM_SECTION 0x9000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x47DA900ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x47DA908ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x47DA910ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x47DA918ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x47DA920ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x47DA928ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x47DA930ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x47DA938ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x47DA940ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x47DA948ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x47DA950ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x47DA958ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x47DA960ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x47DA968ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x47DA970ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x47DA978ull
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmDCORE3_EDMA1_QM_AXUSER_SECURED_BASE 0x47DAB00ull
+#define DCORE3_EDMA1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define DCORE3_EDMA1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_AXUSER_NONSECURED_BASE 0x47DAB80ull
+#define DCORE3_EDMA1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define DCORE3_EDMA1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_DBG_HBW_BASE 0x47DAC00ull
+#define DCORE3_EDMA1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_EDMA1_QM_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_EDMA1_QM_DBG_LBW_BASE 0x47DAC80ull
+#define DCORE3_EDMA1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_EDMA1_QM_DBG_LBW_SECTION 0x1000
+#define mmDCORE3_EDMA1_QM_CGM_BASE 0x47DAD80ull
+#define DCORE3_EDMA1_QM_CGM_MAX_OFFSET 0xC000
+#define DCORE3_EDMA1_QM_CGM_SECTION 0x1000
+#define mmDCORE3_EDMA1_QM_SPECIAL_BASE 0x47DAE80ull
+#define DCORE3_EDMA1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_EDMA1_QM_SPECIAL_SECTION 0x1800
+#define mmDCORE3_EDMA1_CORE_BASE 0x47DB000ull
+#define DCORE3_EDMA1_CORE_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_CORE_SECTION 0x8000
+#define mmDCORE3_EDMA1_CORE_CTX_AXUSER_BASE 0x47DB800ull
+#define DCORE3_EDMA1_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_EDMA1_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmDCORE3_EDMA1_CORE_CTX_BASE 0x47DB860ull
+#define DCORE3_EDMA1_CORE_CTX_MAX_OFFSET 0x9000
+#define DCORE3_EDMA1_CORE_CTX_SECTION 0x5A00
+#define mmDCORE3_EDMA1_CORE_KDMA_CGM_BASE 0x47DBE00ull
+#define DCORE3_EDMA1_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define DCORE3_EDMA1_CORE_KDMA_CGM_SECTION 0x8000
+#define mmDCORE3_EDMA1_CORE_SPECIAL_BASE 0x47DBE80ull
+#define DCORE3_EDMA1_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_EDMA1_CORE_SPECIAL_SECTION 0x1800
+#define mmDCORE3_EDMA1_MSTR_IF_RR_SHRD_HBW_BASE 0x47DC000ull
+#define DCORE3_EDMA1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_EDMA1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_EDMA1_MSTR_IF_RR_PRVT_HBW_BASE 0x47DC200ull
+#define DCORE3_EDMA1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_EDMA1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_EDMA1_MSTR_IF_RR_SHRD_LBW_BASE 0x47DC400ull
+#define DCORE3_EDMA1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_EDMA1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_EDMA1_MSTR_IF_RR_PRVT_LBW_BASE 0x47DC600ull
+#define DCORE3_EDMA1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_EDMA1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_EDMA1_MSTR_IF_E2E_CRDT_BASE 0x47DC800ull
+#define DCORE3_EDMA1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_EDMA1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_EDMA1_MSTR_IF_AXUSER_BASE 0x47DCA80ull
+#define DCORE3_EDMA1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_EDMA1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_EDMA1_MSTR_IF_DBG_HBW_BASE 0x47DCB00ull
+#define DCORE3_EDMA1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_EDMA1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_EDMA1_MSTR_IF_DBG_LBW_BASE 0x47DCB80ull
+#define DCORE3_EDMA1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_EDMA1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_EDMA1_MSTR_IF_CORE_HBW_BASE 0x47DCC00ull
+#define DCORE3_EDMA1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_EDMA1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_EDMA1_MSTR_IF_CORE_LBW_BASE 0x47DCD80ull
+#define DCORE3_EDMA1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_EDMA1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_EDMA1_MSTR_IF_SPECIAL_BASE 0x47DCE80ull
+#define DCORE3_EDMA1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_EDMA1_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmDCORE3_DEC0_CMD_BASE 0x47E0000ull
+#define DCORE3_DEC0_CMD_MAX_OFFSET 0x1100
+#define DCORE3_DEC0_CMD_SECTION 0x1000
+#define mmDCORE3_DEC0_VSI_BASE 0x47E1000ull
+#define DCORE3_DEC0_VSI_MAX_OFFSET 0x6FC0
+#define DCORE3_DEC0_VSI_SECTION 0x1000
+#define mmDCORE3_DEC0_L2C_BASE 0x47E2000ull
+#define DCORE3_DEC0_L2C_MAX_OFFSET 0x39C0
+#define DCORE3_DEC0_L2C_SECTION 0x1000
+#define mmDCORE3_VDEC0_BRDG_CTRL_BASE 0x47E3000ull
+#define DCORE3_VDEC0_BRDG_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_BRDG_CTRL_SECTION 0x8000
+#define mmDCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x47E3800ull
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmDCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x47E3900ull
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmDCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x47E3A00ull
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmDCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x47E3B00ull
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmDCORE3_VDEC0_BRDG_CTRL_AXUSER_DEC_BASE 0x47E3C00ull
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define DCORE3_VDEC0_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmDCORE3_VDEC0_BRDG_CTRL_SPECIAL_BASE 0x47E3E80ull
+#define DCORE3_VDEC0_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_VDEC0_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_VDEC0_CTRL_BASE 0x47E4000ull
+#define DCORE3_VDEC0_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_CTRL_SECTION 0xE800
+#define mmDCORE3_VDEC0_CTRL_SPECIAL_BASE 0x47E4E80ull
+#define DCORE3_VDEC0_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_VDEC0_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_VDEC0_MSTR_IF_RR_SHRD_HBW_BASE 0x47E5000ull
+#define DCORE3_VDEC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_VDEC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_VDEC0_MSTR_IF_RR_PRVT_HBW_BASE 0x47E5200ull
+#define DCORE3_VDEC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_VDEC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_VDEC0_MSTR_IF_RR_SHRD_LBW_BASE 0x47E5400ull
+#define DCORE3_VDEC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_VDEC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_VDEC0_MSTR_IF_RR_PRVT_LBW_BASE 0x47E5600ull
+#define DCORE3_VDEC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_VDEC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_VDEC0_MSTR_IF_E2E_CRDT_BASE 0x47E5800ull
+#define DCORE3_VDEC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_VDEC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_VDEC0_MSTR_IF_AXUSER_BASE 0x47E5A80ull
+#define DCORE3_VDEC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_VDEC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_VDEC0_MSTR_IF_DBG_HBW_BASE 0x47E5B00ull
+#define DCORE3_VDEC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_VDEC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_VDEC0_MSTR_IF_DBG_LBW_BASE 0x47E5B80ull
+#define DCORE3_VDEC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_VDEC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_VDEC0_MSTR_IF_CORE_HBW_BASE 0x47E5C00ull
+#define DCORE3_VDEC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_VDEC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_VDEC0_MSTR_IF_CORE_LBW_BASE 0x47E5D80ull
+#define DCORE3_VDEC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_VDEC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_VDEC0_MSTR_IF_SPECIAL_BASE 0x47E5E80ull
+#define DCORE3_VDEC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_VDEC0_MSTR_IF_SPECIAL_SECTION 0xA180
+#define mmDCORE3_DEC1_CMD_BASE 0x47F0000ull
+#define DCORE3_DEC1_CMD_MAX_OFFSET 0x1100
+#define DCORE3_DEC1_CMD_SECTION 0x1000
+#define mmDCORE3_DEC1_VSI_BASE 0x47F1000ull
+#define DCORE3_DEC1_VSI_MAX_OFFSET 0x6FC0
+#define DCORE3_DEC1_VSI_SECTION 0x1000
+#define mmDCORE3_DEC1_L2C_BASE 0x47F2000ull
+#define DCORE3_DEC1_L2C_MAX_OFFSET 0x39C0
+#define DCORE3_DEC1_L2C_SECTION 0x1000
+#define mmDCORE3_VDEC1_BRDG_CTRL_BASE 0x47F3000ull
+#define DCORE3_VDEC1_BRDG_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_BRDG_CTRL_SECTION 0x8000
+#define mmDCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x47F3800ull
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmDCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x47F3900ull
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmDCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x47F3A00ull
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmDCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x47F3B00ull
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmDCORE3_VDEC1_BRDG_CTRL_AXUSER_DEC_BASE 0x47F3C00ull
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define DCORE3_VDEC1_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmDCORE3_VDEC1_BRDG_CTRL_SPECIAL_BASE 0x47F3E80ull
+#define DCORE3_VDEC1_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_VDEC1_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_VDEC1_CTRL_BASE 0x47F4000ull
+#define DCORE3_VDEC1_CTRL_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_CTRL_SECTION 0xE800
+#define mmDCORE3_VDEC1_CTRL_SPECIAL_BASE 0x47F4E80ull
+#define DCORE3_VDEC1_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_VDEC1_CTRL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE 0x47F5000ull
+#define DCORE3_VDEC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_VDEC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmDCORE3_VDEC1_MSTR_IF_RR_PRVT_HBW_BASE 0x47F5200ull
+#define DCORE3_VDEC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define DCORE3_VDEC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmDCORE3_VDEC1_MSTR_IF_RR_SHRD_LBW_BASE 0x47F5400ull
+#define DCORE3_VDEC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_VDEC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmDCORE3_VDEC1_MSTR_IF_RR_PRVT_LBW_BASE 0x47F5600ull
+#define DCORE3_VDEC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define DCORE3_VDEC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmDCORE3_VDEC1_MSTR_IF_E2E_CRDT_BASE 0x47F5800ull
+#define DCORE3_VDEC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define DCORE3_VDEC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmDCORE3_VDEC1_MSTR_IF_AXUSER_BASE 0x47F5A80ull
+#define DCORE3_VDEC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define DCORE3_VDEC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmDCORE3_VDEC1_MSTR_IF_DBG_HBW_BASE 0x47F5B00ull
+#define DCORE3_VDEC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define DCORE3_VDEC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmDCORE3_VDEC1_MSTR_IF_DBG_LBW_BASE 0x47F5B80ull
+#define DCORE3_VDEC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define DCORE3_VDEC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmDCORE3_VDEC1_MSTR_IF_CORE_HBW_BASE 0x47F5C00ull
+#define DCORE3_VDEC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define DCORE3_VDEC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmDCORE3_VDEC1_MSTR_IF_CORE_LBW_BASE 0x47F5D80ull
+#define DCORE3_VDEC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define DCORE3_VDEC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmDCORE3_VDEC1_MSTR_IF_SPECIAL_BASE 0x47F5E80ull
+#define DCORE3_VDEC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_VDEC1_MSTR_IF_SPECIAL_SECTION 0xA180
+#define mmGIC_BASE 0x4800000ull
+#define GIC_MAX_OFFSET 0x10000
+#define GIC_SECTION 0x401000
+#define mmPCIE_WRAP_BASE 0x4C01000ull
+#define PCIE_WRAP_MAX_OFFSET 0x1000
+#define PCIE_WRAP_SECTION 0xE800
+#define mmPCIE_WRAP_SPECIAL_BASE 0x4C01E80ull
+#define PCIE_WRAP_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_WRAP_SPECIAL_SECTION 0x1800
+#define mmPCIE_DBI_BASE 0x4C02000ull
+#define PCIE_DBI_MAX_OFFSET 0xC040
+#define PCIE_DBI_SECTION 0x2000
+#define mmPCIE_CORE_BASE 0x4C04000ull
+#define PCIE_CORE_MAX_OFFSET 0x1000
+#define PCIE_CORE_SECTION 0xE800
+#define mmPCIE_CORE_SPECIAL_BASE 0x4C04E80ull
+#define PCIE_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_CORE_SPECIAL_SECTION 0x2180
+#define mmPCIE_AUX_BASE 0x4C07000ull
+#define PCIE_AUX_MAX_OFFSET 0x1000
+#define PCIE_AUX_SECTION 0xE800
+#define mmPCIE_AUX_SPECIAL_BASE 0x4C07E80ull
+#define PCIE_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_AUX_SPECIAL_SECTION 0x8180
+#define mmPCIE_PHY_BASE 0x4C10000ull
+#define PCIE_PHY_MAX_OFFSET 0x1000
+#define PCIE_PHY_SECTION 0xE800
+#define mmPCIE_PHY_SPECIAL_BASE 0x4C10E80ull
+#define PCIE_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_PHY_SPECIAL_SECTION 0x2180
+#define mmPCIE_MSI_BASE 0x4C13000ull
+#define PCIE_MSI_MAX_OFFSET 0x8000
+#define PCIE_MSI_SECTION 0x1000
+#define mmPCIE_ELBI_RR_MSTR_IF_RR_SHRD_HBW_BASE 0x4C14000ull
+#define PCIE_ELBI_RR_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PCIE_ELBI_RR_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPCIE_ELBI_RR_MSTR_IF_RR_PRVT_HBW_BASE 0x4C14200ull
+#define PCIE_ELBI_RR_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PCIE_ELBI_RR_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPCIE_ELBI_RR_MSTR_IF_RR_SHRD_LBW_BASE 0x4C14400ull
+#define PCIE_ELBI_RR_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PCIE_ELBI_RR_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPCIE_ELBI_RR_MSTR_IF_RR_PRVT_LBW_BASE 0x4C14600ull
+#define PCIE_ELBI_RR_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PCIE_ELBI_RR_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPCIE_ELBI_RR_MSTR_IF_E2E_CRDT_BASE 0x4C14800ull
+#define PCIE_ELBI_RR_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PCIE_ELBI_RR_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPCIE_ELBI_RR_MSTR_IF_AXUSER_BASE 0x4C14A80ull
+#define PCIE_ELBI_RR_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PCIE_ELBI_RR_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPCIE_ELBI_RR_MSTR_IF_DBG_HBW_BASE 0x4C14B00ull
+#define PCIE_ELBI_RR_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PCIE_ELBI_RR_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPCIE_ELBI_RR_MSTR_IF_DBG_LBW_BASE 0x4C14B80ull
+#define PCIE_ELBI_RR_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PCIE_ELBI_RR_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPCIE_ELBI_RR_MSTR_IF_CORE_HBW_BASE 0x4C14C00ull
+#define PCIE_ELBI_RR_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PCIE_ELBI_RR_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPCIE_ELBI_RR_MSTR_IF_CORE_LBW_BASE 0x4C14D80ull
+#define PCIE_ELBI_RR_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PCIE_ELBI_RR_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPCIE_ELBI_RR_MSTR_IF_SPECIAL_BASE 0x4C14E80ull
+#define PCIE_ELBI_RR_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_ELBI_RR_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE 0x4C15000ull
+#define PCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPCIE_MSTR_RR_MSTR_IF_RR_PRVT_HBW_BASE 0x4C15200ull
+#define PCIE_MSTR_RR_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PCIE_MSTR_RR_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_LBW_BASE 0x4C15400ull
+#define PCIE_MSTR_RR_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PCIE_MSTR_RR_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPCIE_MSTR_RR_MSTR_IF_RR_PRVT_LBW_BASE 0x4C15600ull
+#define PCIE_MSTR_RR_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PCIE_MSTR_RR_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPCIE_MSTR_RR_MSTR_IF_E2E_CRDT_BASE 0x4C15800ull
+#define PCIE_MSTR_RR_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PCIE_MSTR_RR_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPCIE_MSTR_RR_MSTR_IF_AXUSER_BASE 0x4C15A80ull
+#define PCIE_MSTR_RR_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PCIE_MSTR_RR_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPCIE_MSTR_RR_MSTR_IF_DBG_HBW_BASE 0x4C15B00ull
+#define PCIE_MSTR_RR_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PCIE_MSTR_RR_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPCIE_MSTR_RR_MSTR_IF_DBG_LBW_BASE 0x4C15B80ull
+#define PCIE_MSTR_RR_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PCIE_MSTR_RR_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPCIE_MSTR_RR_MSTR_IF_CORE_HBW_BASE 0x4C15C00ull
+#define PCIE_MSTR_RR_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PCIE_MSTR_RR_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPCIE_MSTR_RR_MSTR_IF_CORE_LBW_BASE 0x4C15D80ull
+#define PCIE_MSTR_RR_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PCIE_MSTR_RR_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPCIE_MSTR_RR_MSTR_IF_SPECIAL_BASE 0x4C15E80ull
+#define PCIE_MSTR_RR_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_MSTR_RR_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmPCIE_LBW_RR_MSTR_IF_RR_SHRD_HBW_BASE 0x4C16000ull
+#define PCIE_LBW_RR_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PCIE_LBW_RR_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPCIE_LBW_RR_MSTR_IF_RR_PRVT_HBW_BASE 0x4C16200ull
+#define PCIE_LBW_RR_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PCIE_LBW_RR_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPCIE_LBW_RR_MSTR_IF_RR_SHRD_LBW_BASE 0x4C16400ull
+#define PCIE_LBW_RR_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PCIE_LBW_RR_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPCIE_LBW_RR_MSTR_IF_RR_PRVT_LBW_BASE 0x4C16600ull
+#define PCIE_LBW_RR_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PCIE_LBW_RR_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPCIE_LBW_RR_MSTR_IF_E2E_CRDT_BASE 0x4C16800ull
+#define PCIE_LBW_RR_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PCIE_LBW_RR_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPCIE_LBW_RR_MSTR_IF_AXUSER_BASE 0x4C16A80ull
+#define PCIE_LBW_RR_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PCIE_LBW_RR_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPCIE_LBW_RR_MSTR_IF_DBG_HBW_BASE 0x4C16B00ull
+#define PCIE_LBW_RR_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PCIE_LBW_RR_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPCIE_LBW_RR_MSTR_IF_DBG_LBW_BASE 0x4C16B80ull
+#define PCIE_LBW_RR_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PCIE_LBW_RR_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPCIE_LBW_RR_MSTR_IF_CORE_HBW_BASE 0x4C16C00ull
+#define PCIE_LBW_RR_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PCIE_LBW_RR_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPCIE_LBW_RR_MSTR_IF_CORE_LBW_BASE 0x4C16D80ull
+#define PCIE_LBW_RR_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PCIE_LBW_RR_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPCIE_LBW_RR_MSTR_IF_SPECIAL_BASE 0x4C16E80ull
+#define PCIE_LBW_RR_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_LBW_RR_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmPCIE_MSIX_BASE 0x4C17000ull
+#define PCIE_MSIX_MAX_OFFSET 0x4000
+#define PCIE_MSIX_SECTION 0x29000
+#define mmPSOC_I2C_M0_BASE 0x4C40000ull
+#define PSOC_I2C_M0_MAX_OFFSET 0x1000
+#define PSOC_I2C_M0_SECTION 0x1000
+#define mmPSOC_I2C_M1_BASE 0x4C41000ull
+#define PSOC_I2C_M1_MAX_OFFSET 0x1000
+#define PSOC_I2C_M1_SECTION 0x1000
+#define mmPSOC_I2C_S_BASE 0x4C42000ull
+#define PSOC_I2C_S_MAX_OFFSET 0x1000
+#define PSOC_I2C_S_SECTION 0x1000
+#define mmPSOC_SPI_BASE 0x4C43000ull
+#define PSOC_SPI_MAX_OFFSET 0x1000
+#define PSOC_SPI_SECTION 0x1000
+#define mmPSOC_QSPI_BASE 0x4C44000ull
+#define PSOC_QSPI_MAX_OFFSET 0x1000
+#define PSOC_QSPI_SECTION 0x1000
+#define mmPSOC_UART_0_BASE 0x4C45000ull
+#define PSOC_UART_0_MAX_OFFSET 0x1000
+#define PSOC_UART_0_SECTION 0x1000
+#define mmPSOC_UART_1_BASE 0x4C46000ull
+#define PSOC_UART_1_MAX_OFFSET 0x1000
+#define PSOC_UART_1_SECTION 0x1000
+#define mmPSOC_TIMER_BASE 0x4C47000ull
+#define PSOC_TIMER_MAX_OFFSET 0x1000
+#define PSOC_TIMER_SECTION 0x1000
+#define mmPSOC_WDOG_BASE 0x4C48000ull
+#define PSOC_WDOG_MAX_OFFSET 0x1000
+#define PSOC_WDOG_SECTION 0x1000
+#define mmPSOC_TIMESTAMP_BASE 0x4C49000ull
+#define PSOC_TIMESTAMP_MAX_OFFSET 0x1000
+#define PSOC_TIMESTAMP_SECTION 0x1000
+#define mmPSOC_EFUSE_BASE 0x4C4A000ull
+#define PSOC_EFUSE_MAX_OFFSET 0x1000
+#define PSOC_EFUSE_SECTION 0xE800
+#define mmPSOC_EFUSE_SPECIAL_BASE 0x4C4AE80ull
+#define PSOC_EFUSE_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_EFUSE_SPECIAL_SECTION 0x1800
+#define mmPSOC_GLOBAL_CONF_BASE 0x4C4B000ull
+#define PSOC_GLOBAL_CONF_MAX_OFFSET 0x1000
+#define PSOC_GLOBAL_CONF_SECTION 0xE800
+#define mmPSOC_GLOBAL_CONF_SPECIAL_BASE 0x4C4BE80ull
+#define PSOC_GLOBAL_CONF_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_GLOBAL_CONF_SPECIAL_SECTION 0x1800
+#define mmPSOC_GPIO0_BASE 0x4C4C000ull
+#define PSOC_GPIO0_MAX_OFFSET 0x1000
+#define PSOC_GPIO0_SECTION 0x1000
+#define mmPSOC_GPIO1_BASE 0x4C4D000ull
+#define PSOC_GPIO1_MAX_OFFSET 0x1000
+#define PSOC_GPIO1_SECTION 0x1000
+#define mmPSOC_BTL_BASE 0x4C4E000ull
+#define PSOC_BTL_MAX_OFFSET 0x1000
+#define PSOC_BTL_SECTION 0xE800
+#define mmPSOC_BTL_SPECIAL_BASE 0x4C4EE80ull
+#define PSOC_BTL_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_BTL_SPECIAL_SECTION 0x1800
+#define mmPSOC_CS_TRACE_BASE 0x4C4F000ull
+#define PSOC_CS_TRACE_MAX_OFFSET 0x1000
+#define PSOC_CS_TRACE_SECTION 0xE800
+#define mmPSOC_CS_TRACE_SPECIAL_BASE 0x4C4FE80ull
+#define PSOC_CS_TRACE_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_CS_TRACE_SPECIAL_SECTION 0x1800
+#define mmPSOC_GPIO2_BASE 0x4C50000ull
+#define PSOC_GPIO2_MAX_OFFSET 0x1000
+#define PSOC_GPIO2_SECTION 0x1000
+#define mmPSOC_GPIO3_BASE 0x4C51000ull
+#define PSOC_GPIO3_MAX_OFFSET 0x1000
+#define PSOC_GPIO3_SECTION 0x2000
+#define mmPSOC_DFT_EFUSE_BASE 0x4C53000ull
+#define PSOC_DFT_EFUSE_MAX_OFFSET 0x1000
+#define PSOC_DFT_EFUSE_SECTION 0xE800
+#define mmPSOC_DFT_EFUSE_SPECIAL_BASE 0x4C53E80ull
+#define PSOC_DFT_EFUSE_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_DFT_EFUSE_SPECIAL_SECTION 0x1800
+#define mmPSOC_RPM_0_BASE 0x4C54000ull
+#define PSOC_RPM_0_MAX_OFFSET 0x1000
+#define PSOC_RPM_0_SECTION 0xE800
+#define mmPSOC_RPM_0_SPECIAL_BASE 0x4C54E80ull
+#define PSOC_RPM_0_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_RPM_0_SPECIAL_SECTION 0x1800
+#define mmPSOC_RPM_1_BASE 0x4C55000ull
+#define PSOC_RPM_1_MAX_OFFSET 0x1000
+#define PSOC_RPM_1_SECTION 0xE800
+#define mmPSOC_RPM_1_SPECIAL_BASE 0x4C55E80ull
+#define PSOC_RPM_1_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_RPM_1_SPECIAL_SECTION 0x1800
+#define mmPSOC_GPIO4_BASE 0x4C56000ull
+#define PSOC_GPIO4_MAX_OFFSET 0x1000
+#define PSOC_GPIO4_SECTION 0x1000
+#define mmPSOC_GPIO5_BASE 0x4C57000ull
+#define PSOC_GPIO5_MAX_OFFSET 0x1000
+#define PSOC_GPIO5_SECTION 0x1000
+#define mmPSOC_PID_BASE 0x4C58000ull
+#define PSOC_PID_MAX_OFFSET 0x1000
+#define PSOC_PID_SECTION 0xE800
+#define mmPSOC_PID_SPECIAL_BASE 0x4C58E80ull
+#define PSOC_PID_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_PID_SPECIAL_SECTION 0x1800
+#define mmPSOC_ARC0_CFG_BASE 0x4C59000ull
+#define PSOC_ARC0_CFG_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CFG_SECTION 0xE800
+#define mmPSOC_ARC0_CFG_SPECIAL_BASE 0x4C59E80ull
+#define PSOC_ARC0_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_ARC0_CFG_SPECIAL_SECTION 0x1800
+#define mmPSOC_ARC0_MSTR_IF_RR_SHRD_HBW_BASE 0x4C5A000ull
+#define PSOC_ARC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PSOC_ARC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPSOC_ARC0_MSTR_IF_RR_PRVT_HBW_BASE 0x4C5A200ull
+#define PSOC_ARC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PSOC_ARC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPSOC_ARC0_MSTR_IF_RR_SHRD_LBW_BASE 0x4C5A400ull
+#define PSOC_ARC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PSOC_ARC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPSOC_ARC0_MSTR_IF_RR_PRVT_LBW_BASE 0x4C5A600ull
+#define PSOC_ARC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PSOC_ARC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPSOC_ARC0_MSTR_IF_E2E_CRDT_BASE 0x4C5A800ull
+#define PSOC_ARC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PSOC_ARC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPSOC_ARC0_MSTR_IF_AXUSER_BASE 0x4C5AA80ull
+#define PSOC_ARC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PSOC_ARC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPSOC_ARC0_MSTR_IF_DBG_HBW_BASE 0x4C5AB00ull
+#define PSOC_ARC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PSOC_ARC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPSOC_ARC0_MSTR_IF_DBG_LBW_BASE 0x4C5AB80ull
+#define PSOC_ARC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PSOC_ARC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPSOC_ARC0_MSTR_IF_CORE_HBW_BASE 0x4C5AC00ull
+#define PSOC_ARC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PSOC_ARC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPSOC_ARC0_MSTR_IF_CORE_LBW_BASE 0x4C5AD80ull
+#define PSOC_ARC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PSOC_ARC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPSOC_ARC0_MSTR_IF_SPECIAL_BASE 0x4C5AE80ull
+#define PSOC_ARC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_ARC0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmPSOC_ARC0_AUX_BASE 0x4C5B000ull
+#define PSOC_ARC0_AUX_MAX_OFFSET 0x1000
+#define PSOC_ARC0_AUX_SECTION 0xE800
+#define mmPSOC_ARC0_AUX_SPECIAL_BASE 0x4C5BE80ull
+#define PSOC_ARC0_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_ARC0_AUX_SPECIAL_SECTION 0x1800
+#define mmPSOC_ARC1_CFG_BASE 0x4C5C000ull
+#define PSOC_ARC1_CFG_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CFG_SECTION 0xE800
+#define mmPSOC_ARC1_CFG_SPECIAL_BASE 0x4C5CE80ull
+#define PSOC_ARC1_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_ARC1_CFG_SPECIAL_SECTION 0x1800
+#define mmPSOC_ARC1_MSTR_IF_RR_SHRD_HBW_BASE 0x4C5D000ull
+#define PSOC_ARC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PSOC_ARC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPSOC_ARC1_MSTR_IF_RR_PRVT_HBW_BASE 0x4C5D200ull
+#define PSOC_ARC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PSOC_ARC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPSOC_ARC1_MSTR_IF_RR_SHRD_LBW_BASE 0x4C5D400ull
+#define PSOC_ARC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PSOC_ARC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPSOC_ARC1_MSTR_IF_RR_PRVT_LBW_BASE 0x4C5D600ull
+#define PSOC_ARC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PSOC_ARC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPSOC_ARC1_MSTR_IF_E2E_CRDT_BASE 0x4C5D800ull
+#define PSOC_ARC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PSOC_ARC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPSOC_ARC1_MSTR_IF_AXUSER_BASE 0x4C5DA80ull
+#define PSOC_ARC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PSOC_ARC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPSOC_ARC1_MSTR_IF_DBG_HBW_BASE 0x4C5DB00ull
+#define PSOC_ARC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PSOC_ARC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPSOC_ARC1_MSTR_IF_DBG_LBW_BASE 0x4C5DB80ull
+#define PSOC_ARC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PSOC_ARC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPSOC_ARC1_MSTR_IF_CORE_HBW_BASE 0x4C5DC00ull
+#define PSOC_ARC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PSOC_ARC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPSOC_ARC1_MSTR_IF_CORE_LBW_BASE 0x4C5DD80ull
+#define PSOC_ARC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PSOC_ARC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPSOC_ARC1_MSTR_IF_SPECIAL_BASE 0x4C5DE80ull
+#define PSOC_ARC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_ARC1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmPSOC_ARC1_AUX_BASE 0x4C5E000ull
+#define PSOC_ARC1_AUX_MAX_OFFSET 0x1000
+#define PSOC_ARC1_AUX_SECTION 0xE800
+#define mmPSOC_ARC1_AUX_SPECIAL_BASE 0x4C5EE80ull
+#define PSOC_ARC1_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_ARC1_AUX_SPECIAL_SECTION 0x1180
+#define mmPSOC_SECURITY_BASE 0x4C60000ull
+#define PSOC_SECURITY_MAX_OFFSET 0x1000
+#define PSOC_SECURITY_SECTION 0xE800
+#define mmPSOC_SECURITY_SPECIAL_BASE 0x4C60E80ull
+#define PSOC_SECURITY_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_SECURITY_SPECIAL_SECTION 0x1800
+#define mmJT_MSTR_IF_RR_SHRD_HBW_BASE 0x4C61000ull
+#define JT_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define JT_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmJT_MSTR_IF_RR_PRVT_HBW_BASE 0x4C61200ull
+#define JT_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define JT_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmJT_MSTR_IF_RR_SHRD_LBW_BASE 0x4C61400ull
+#define JT_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define JT_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmJT_MSTR_IF_RR_PRVT_LBW_BASE 0x4C61600ull
+#define JT_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define JT_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmJT_MSTR_IF_E2E_CRDT_BASE 0x4C61800ull
+#define JT_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define JT_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmJT_MSTR_IF_AXUSER_BASE 0x4C61A80ull
+#define JT_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define JT_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmJT_MSTR_IF_DBG_HBW_BASE 0x4C61B00ull
+#define JT_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define JT_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmJT_MSTR_IF_DBG_LBW_BASE 0x4C61B80ull
+#define JT_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define JT_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmJT_MSTR_IF_CORE_HBW_BASE 0x4C61C00ull
+#define JT_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define JT_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmJT_MSTR_IF_CORE_LBW_BASE 0x4C61D80ull
+#define JT_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define JT_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmJT_MSTR_IF_SPECIAL_BASE 0x4C61E80ull
+#define JT_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define JT_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSMI_MSTR_IF_RR_SHRD_HBW_BASE 0x4C62000ull
+#define SMI_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SMI_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSMI_MSTR_IF_RR_PRVT_HBW_BASE 0x4C62200ull
+#define SMI_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SMI_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSMI_MSTR_IF_RR_SHRD_LBW_BASE 0x4C62400ull
+#define SMI_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SMI_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSMI_MSTR_IF_RR_PRVT_LBW_BASE 0x4C62600ull
+#define SMI_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SMI_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSMI_MSTR_IF_E2E_CRDT_BASE 0x4C62800ull
+#define SMI_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SMI_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSMI_MSTR_IF_AXUSER_BASE 0x4C62A80ull
+#define SMI_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SMI_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSMI_MSTR_IF_DBG_HBW_BASE 0x4C62B00ull
+#define SMI_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SMI_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSMI_MSTR_IF_DBG_LBW_BASE 0x4C62B80ull
+#define SMI_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SMI_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSMI_MSTR_IF_CORE_HBW_BASE 0x4C62C00ull
+#define SMI_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SMI_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSMI_MSTR_IF_CORE_LBW_BASE 0x4C62D80ull
+#define SMI_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SMI_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSMI_MSTR_IF_SPECIAL_BASE 0x4C62E80ull
+#define SMI_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SMI_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmI2C_S_MSTR_IF_RR_SHRD_HBW_BASE 0x4C63000ull
+#define I2C_S_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define I2C_S_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmI2C_S_MSTR_IF_RR_PRVT_HBW_BASE 0x4C63200ull
+#define I2C_S_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define I2C_S_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmI2C_S_MSTR_IF_RR_SHRD_LBW_BASE 0x4C63400ull
+#define I2C_S_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define I2C_S_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmI2C_S_MSTR_IF_RR_PRVT_LBW_BASE 0x4C63600ull
+#define I2C_S_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define I2C_S_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmI2C_S_MSTR_IF_E2E_CRDT_BASE 0x4C63800ull
+#define I2C_S_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define I2C_S_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmI2C_S_MSTR_IF_AXUSER_BASE 0x4C63A80ull
+#define I2C_S_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define I2C_S_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmI2C_S_MSTR_IF_DBG_HBW_BASE 0x4C63B00ull
+#define I2C_S_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define I2C_S_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmI2C_S_MSTR_IF_DBG_LBW_BASE 0x4C63B80ull
+#define I2C_S_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define I2C_S_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmI2C_S_MSTR_IF_CORE_HBW_BASE 0x4C63C00ull
+#define I2C_S_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define I2C_S_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmI2C_S_MSTR_IF_CORE_LBW_BASE 0x4C63D80ull
+#define I2C_S_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define I2C_S_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmI2C_S_MSTR_IF_SPECIAL_BASE 0x4C63E80ull
+#define I2C_S_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define I2C_S_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmPSOC_SVID0_BASE 0x4C64000ull
+#define PSOC_SVID0_MAX_OFFSET 0x1000
+#define PSOC_SVID0_SECTION 0xE800
+#define mmPSOC_SVID0_SPECIAL_BASE 0x4C64E80ull
+#define PSOC_SVID0_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_SVID0_SPECIAL_SECTION 0x1800
+#define mmPSOC_SVID1_BASE 0x4C65000ull
+#define PSOC_SVID1_MAX_OFFSET 0x1000
+#define PSOC_SVID1_SECTION 0xE800
+#define mmPSOC_SVID1_SPECIAL_BASE 0x4C65E80ull
+#define PSOC_SVID1_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_SVID1_SPECIAL_SECTION 0x1800
+#define mmPSOC_SVID2_BASE 0x4C66000ull
+#define PSOC_SVID2_MAX_OFFSET 0x1000
+#define PSOC_SVID2_SECTION 0xE800
+#define mmPSOC_SVID2_SPECIAL_BASE 0x4C66E80ull
+#define PSOC_SVID2_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_SVID2_SPECIAL_SECTION 0x5180
+#define mmPSOC_MME_PLL_CTRL_BASE 0x4C6C000ull
+#define PSOC_MME_PLL_CTRL_MAX_OFFSET 0x3540
+#define PSOC_MME_PLL_CTRL_SECTION 0x3600
+#define mmPSOC_MME_PLL_ASIF_SLV_BASE 0x4C6C360ull
+#define PSOC_MME_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define PSOC_MME_PLL_ASIF_SLV_SECTION 0xA000
+#define mmPSOC_MME_PLL_DIV_0_RLX_BASE 0x4C6C400ull
+#define PSOC_MME_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define PSOC_MME_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmPSOC_MME_PLL_DIV_1_RLX_BASE 0x4C6C800ull
+#define PSOC_MME_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define PSOC_MME_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmPSOC_MME_PLL_DIV_2_RLX_BASE 0x4C6CA00ull
+#define PSOC_MME_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define PSOC_MME_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmPSOC_MME_PLL_DIV_3_RLX_BASE 0x4C6CC00ull
+#define PSOC_MME_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define PSOC_MME_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmPSOC_MME_PLL_SPECIAL_BASE 0x4C6CE80ull
+#define PSOC_MME_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_MME_PLL_SPECIAL_SECTION 0x1800
+#define mmPSOC_CPU_PLL_CTRL_BASE 0x4C6D000ull
+#define PSOC_CPU_PLL_CTRL_MAX_OFFSET 0x3540
+#define PSOC_CPU_PLL_CTRL_SECTION 0x3600
+#define mmPSOC_CPU_PLL_ASIF_SLV_BASE 0x4C6D360ull
+#define PSOC_CPU_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define PSOC_CPU_PLL_ASIF_SLV_SECTION 0xA000
+#define mmPSOC_CPU_PLL_DIV_0_RLX_BASE 0x4C6D400ull
+#define PSOC_CPU_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define PSOC_CPU_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmPSOC_CPU_PLL_DIV_1_RLX_BASE 0x4C6D800ull
+#define PSOC_CPU_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define PSOC_CPU_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmPSOC_CPU_PLL_DIV_2_RLX_BASE 0x4C6DA00ull
+#define PSOC_CPU_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define PSOC_CPU_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmPSOC_CPU_PLL_DIV_3_RLX_BASE 0x4C6DC00ull
+#define PSOC_CPU_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define PSOC_CPU_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmPSOC_CPU_PLL_SPECIAL_BASE 0x4C6DE80ull
+#define PSOC_CPU_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_CPU_PLL_SPECIAL_SECTION 0x1800
+#define mmPSOC_VID_PLL_CTRL_BASE 0x4C6E000ull
+#define PSOC_VID_PLL_CTRL_MAX_OFFSET 0x3540
+#define PSOC_VID_PLL_CTRL_SECTION 0x3600
+#define mmPSOC_VID_PLL_ASIF_SLV_BASE 0x4C6E360ull
+#define PSOC_VID_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define PSOC_VID_PLL_ASIF_SLV_SECTION 0xA000
+#define mmPSOC_VID_PLL_DIV_0_RLX_BASE 0x4C6E400ull
+#define PSOC_VID_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define PSOC_VID_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmPSOC_VID_PLL_DIV_1_RLX_BASE 0x4C6E800ull
+#define PSOC_VID_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define PSOC_VID_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmPSOC_VID_PLL_DIV_2_RLX_BASE 0x4C6EA00ull
+#define PSOC_VID_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define PSOC_VID_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmPSOC_VID_PLL_DIV_3_RLX_BASE 0x4C6EC00ull
+#define PSOC_VID_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define PSOC_VID_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmPSOC_VID_PLL_SPECIAL_BASE 0x4C6EE80ull
+#define PSOC_VID_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_VID_PLL_SPECIAL_SECTION 0x5180
+#define mmPSOC_RESET_CONF_BASE 0x4C74000ull
+#define PSOC_RESET_CONF_MAX_OFFSET 0x1000
+#define PSOC_RESET_CONF_SECTION 0xE800
+#define mmPSOC_RESET_CONF_SPECIAL_BASE 0x4C74E80ull
+#define PSOC_RESET_CONF_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_RESET_CONF_SPECIAL_SECTION 0x1800
+#define mmPSOC_DFT_APB_BASE 0x4C75000ull
+#define PSOC_DFT_APB_MAX_OFFSET 0x8000
+#define PSOC_DFT_APB_SECTION 0x1000
+#define mmPSOC_AVS0_BASE 0x4C76000ull
+#define PSOC_AVS0_MAX_OFFSET 0x1000
+#define PSOC_AVS0_SECTION 0xE800
+#define mmPSOC_AVS0_SPECIAL_BASE 0x4C76E80ull
+#define PSOC_AVS0_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_AVS0_SPECIAL_SECTION 0x1800
+#define mmPSOC_AVS1_BASE 0x4C77000ull
+#define PSOC_AVS1_MAX_OFFSET 0x1000
+#define PSOC_AVS1_SECTION 0xE800
+#define mmPSOC_AVS1_SPECIAL_BASE 0x4C77E80ull
+#define PSOC_AVS1_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_AVS1_SPECIAL_SECTION 0x1800
+#define mmPSOC_AVS2_BASE 0x4C78000ull
+#define PSOC_AVS2_MAX_OFFSET 0x1000
+#define PSOC_AVS2_SECTION 0xE800
+#define mmPSOC_AVS2_SPECIAL_BASE 0x4C78E80ull
+#define PSOC_AVS2_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_AVS2_SPECIAL_SECTION 0x1800
+#define mmPSOC_PWM0_BASE 0x4C79000ull
+#define PSOC_PWM0_MAX_OFFSET 0x1000
+#define PSOC_PWM0_SECTION 0xE800
+#define mmPSOC_PWM0_SPECIAL_BASE 0x4C79E80ull
+#define PSOC_PWM0_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_PWM0_SPECIAL_SECTION 0x1800
+#define mmPSOC_PWM1_BASE 0x4C7A000ull
+#define PSOC_PWM1_MAX_OFFSET 0x1000
+#define PSOC_PWM1_SECTION 0xE800
+#define mmPSOC_PWM1_SPECIAL_BASE 0x4C7AE80ull
+#define PSOC_PWM1_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_PWM1_SPECIAL_SECTION 0x1800
+#define mmSVID0_AC_BASE 0x4C7B000ull
+#define SVID0_AC_MAX_OFFSET 0x1000
+#define SVID0_AC_SECTION 0xE800
+#define mmSVID0_AC_SPECIAL_BASE 0x4C7BE80ull
+#define SVID0_AC_SPECIAL_MAX_OFFSET 0x1800
+#define SVID0_AC_SPECIAL_SECTION 0x1800
+#define mmSVID1_AC_BASE 0x4C7C000ull
+#define SVID1_AC_MAX_OFFSET 0x1000
+#define SVID1_AC_SECTION 0xE800
+#define mmSVID1_AC_SPECIAL_BASE 0x4C7CE80ull
+#define SVID1_AC_SPECIAL_MAX_OFFSET 0x1800
+#define SVID1_AC_SPECIAL_SECTION 0x1800
+#define mmSVID2_AC_BASE 0x4C7D000ull
+#define SVID2_AC_MAX_OFFSET 0x1000
+#define SVID2_AC_SECTION 0xE800
+#define mmSVID2_AC_SPECIAL_BASE 0x4C7DE80ull
+#define SVID2_AC_SPECIAL_MAX_OFFSET 0x1800
+#define SVID2_AC_SPECIAL_SECTION 0x1180
+#define mmPSOC_MSTR_IF_RR_SHRD_HBW_BASE 0x4C7F000ull
+#define PSOC_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PSOC_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPSOC_MSTR_IF_RR_PRVT_HBW_BASE 0x4C7F200ull
+#define PSOC_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PSOC_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPSOC_MSTR_IF_RR_SHRD_LBW_BASE 0x4C7F400ull
+#define PSOC_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PSOC_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPSOC_MSTR_IF_RR_PRVT_LBW_BASE 0x4C7F600ull
+#define PSOC_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PSOC_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPSOC_MSTR_IF_E2E_CRDT_BASE 0x4C7F800ull
+#define PSOC_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PSOC_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPSOC_MSTR_IF_AXUSER_BASE 0x4C7FA80ull
+#define PSOC_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PSOC_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPSOC_MSTR_IF_DBG_HBW_BASE 0x4C7FB00ull
+#define PSOC_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PSOC_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPSOC_MSTR_IF_DBG_LBW_BASE 0x4C7FB80ull
+#define PSOC_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PSOC_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPSOC_MSTR_IF_CORE_HBW_BASE 0x4C7FC00ull
+#define PSOC_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PSOC_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPSOC_MSTR_IF_CORE_LBW_BASE 0x4C7FD80ull
+#define PSOC_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PSOC_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPSOC_MSTR_IF_SPECIAL_BASE 0x4C7FE80ull
+#define PSOC_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PSOC_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmPDMA0_QM_ARC_DCCM_BASE 0x4C80000ull
+#define PDMA0_QM_ARC_DCCM_MAX_OFFSET 0x4000
+#define PDMA0_QM_ARC_DCCM_SECTION 0x8000
+#define mmPDMA0_QM_ARC_AUX_BASE 0x4C88000ull
+#define PDMA0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define PDMA0_QM_ARC_AUX_SECTION 0xE800
+#define mmPDMA0_QM_ARC_AUX_SPECIAL_BASE 0x4C88E80ull
+#define PDMA0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PDMA0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmPDMA0_QM_BASE 0x4C8A000ull
+#define PDMA0_QM_MAX_OFFSET 0x1000
+#define PDMA0_QM_SECTION 0x9000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x4C8A900ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x4C8A908ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x4C8A910ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x4C8A918ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x4C8A920ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x4C8A928ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x4C8A930ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x4C8A938ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x4C8A940ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x4C8A948ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x4C8A950ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x4C8A958ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x4C8A960ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x4C8A968ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x4C8A970ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmPDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x4C8A978ull
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define PDMA0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmPDMA0_QM_AXUSER_SECURED_BASE 0x4C8AB00ull
+#define PDMA0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define PDMA0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmPDMA0_QM_AXUSER_NONSECURED_BASE 0x4C8AB80ull
+#define PDMA0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define PDMA0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmPDMA0_QM_DBG_HBW_BASE 0x4C8AC00ull
+#define PDMA0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define PDMA0_QM_DBG_HBW_SECTION 0x8000
+#define mmPDMA0_QM_DBG_LBW_BASE 0x4C8AC80ull
+#define PDMA0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define PDMA0_QM_DBG_LBW_SECTION 0x1000
+#define mmPDMA0_QM_CGM_BASE 0x4C8AD80ull
+#define PDMA0_QM_CGM_MAX_OFFSET 0xC000
+#define PDMA0_QM_CGM_SECTION 0x1000
+#define mmPDMA0_QM_SPECIAL_BASE 0x4C8AE80ull
+#define PDMA0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define PDMA0_QM_SPECIAL_SECTION 0x1800
+#define mmPDMA0_CORE_BASE 0x4C8B000ull
+#define PDMA0_CORE_MAX_OFFSET 0x1000
+#define PDMA0_CORE_SECTION 0x8000
+#define mmPDMA0_CORE_CTX_AXUSER_BASE 0x4C8B800ull
+#define PDMA0_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define PDMA0_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmPDMA0_CORE_CTX_BASE 0x4C8B860ull
+#define PDMA0_CORE_CTX_MAX_OFFSET 0x9000
+#define PDMA0_CORE_CTX_SECTION 0x5A00
+#define mmPDMA0_CORE_KDMA_CGM_BASE 0x4C8BE00ull
+#define PDMA0_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define PDMA0_CORE_KDMA_CGM_SECTION 0x8000
+#define mmPDMA0_CORE_SPECIAL_BASE 0x4C8BE80ull
+#define PDMA0_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PDMA0_CORE_SPECIAL_SECTION 0x1800
+#define mmPDMA0_MSTR_IF_RR_SHRD_HBW_BASE 0x4C8C000ull
+#define PDMA0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PDMA0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPDMA0_MSTR_IF_RR_PRVT_HBW_BASE 0x4C8C200ull
+#define PDMA0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PDMA0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPDMA0_MSTR_IF_RR_SHRD_LBW_BASE 0x4C8C400ull
+#define PDMA0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PDMA0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPDMA0_MSTR_IF_RR_PRVT_LBW_BASE 0x4C8C600ull
+#define PDMA0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PDMA0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPDMA0_MSTR_IF_E2E_CRDT_BASE 0x4C8C800ull
+#define PDMA0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PDMA0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPDMA0_MSTR_IF_AXUSER_BASE 0x4C8CA80ull
+#define PDMA0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PDMA0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPDMA0_MSTR_IF_DBG_HBW_BASE 0x4C8CB00ull
+#define PDMA0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PDMA0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPDMA0_MSTR_IF_DBG_LBW_BASE 0x4C8CB80ull
+#define PDMA0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PDMA0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPDMA0_MSTR_IF_CORE_HBW_BASE 0x4C8CC00ull
+#define PDMA0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PDMA0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPDMA0_MSTR_IF_CORE_LBW_BASE 0x4C8CD80ull
+#define PDMA0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PDMA0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPDMA0_MSTR_IF_SPECIAL_BASE 0x4C8CE80ull
+#define PDMA0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PDMA0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmPDMA1_QM_ARC_DCCM_BASE 0x4C90000ull
+#define PDMA1_QM_ARC_DCCM_MAX_OFFSET 0x4000
+#define PDMA1_QM_ARC_DCCM_SECTION 0x8000
+#define mmPDMA1_QM_ARC_AUX_BASE 0x4C98000ull
+#define PDMA1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define PDMA1_QM_ARC_AUX_SECTION 0xE800
+#define mmPDMA1_QM_ARC_AUX_SPECIAL_BASE 0x4C98E80ull
+#define PDMA1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PDMA1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmPDMA1_QM_BASE 0x4C9A000ull
+#define PDMA1_QM_MAX_OFFSET 0x1000
+#define PDMA1_QM_SECTION 0x9000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x4C9A900ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x4C9A908ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x4C9A910ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x4C9A918ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x4C9A920ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x4C9A928ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x4C9A930ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x4C9A938ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x4C9A940ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x4C9A948ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x4C9A950ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x4C9A958ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x4C9A960ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x4C9A968ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x4C9A970ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmPDMA1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x4C9A978ull
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define PDMA1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmPDMA1_QM_AXUSER_SECURED_BASE 0x4C9AB00ull
+#define PDMA1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define PDMA1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmPDMA1_QM_AXUSER_NONSECURED_BASE 0x4C9AB80ull
+#define PDMA1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define PDMA1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmPDMA1_QM_DBG_HBW_BASE 0x4C9AC00ull
+#define PDMA1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define PDMA1_QM_DBG_HBW_SECTION 0x8000
+#define mmPDMA1_QM_DBG_LBW_BASE 0x4C9AC80ull
+#define PDMA1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define PDMA1_QM_DBG_LBW_SECTION 0x1000
+#define mmPDMA1_QM_CGM_BASE 0x4C9AD80ull
+#define PDMA1_QM_CGM_MAX_OFFSET 0xC000
+#define PDMA1_QM_CGM_SECTION 0x1000
+#define mmPDMA1_QM_SPECIAL_BASE 0x4C9AE80ull
+#define PDMA1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define PDMA1_QM_SPECIAL_SECTION 0x1800
+#define mmPDMA1_CORE_BASE 0x4C9B000ull
+#define PDMA1_CORE_MAX_OFFSET 0x1000
+#define PDMA1_CORE_SECTION 0x8000
+#define mmPDMA1_CORE_CTX_AXUSER_BASE 0x4C9B800ull
+#define PDMA1_CORE_CTX_AXUSER_MAX_OFFSET 0x5000
+#define PDMA1_CORE_CTX_AXUSER_SECTION 0x6000
+#define mmPDMA1_CORE_CTX_BASE 0x4C9B860ull
+#define PDMA1_CORE_CTX_MAX_OFFSET 0x9000
+#define PDMA1_CORE_CTX_SECTION 0x5A00
+#define mmPDMA1_CORE_KDMA_CGM_BASE 0x4C9BE00ull
+#define PDMA1_CORE_KDMA_CGM_MAX_OFFSET 0xC000
+#define PDMA1_CORE_KDMA_CGM_SECTION 0x8000
+#define mmPDMA1_CORE_SPECIAL_BASE 0x4C9BE80ull
+#define PDMA1_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PDMA1_CORE_SPECIAL_SECTION 0x1800
+#define mmPDMA1_MSTR_IF_RR_SHRD_HBW_BASE 0x4C9C000ull
+#define PDMA1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PDMA1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPDMA1_MSTR_IF_RR_PRVT_HBW_BASE 0x4C9C200ull
+#define PDMA1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PDMA1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPDMA1_MSTR_IF_RR_SHRD_LBW_BASE 0x4C9C400ull
+#define PDMA1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PDMA1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPDMA1_MSTR_IF_RR_PRVT_LBW_BASE 0x4C9C600ull
+#define PDMA1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PDMA1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPDMA1_MSTR_IF_E2E_CRDT_BASE 0x4C9C800ull
+#define PDMA1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PDMA1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPDMA1_MSTR_IF_AXUSER_BASE 0x4C9CA80ull
+#define PDMA1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PDMA1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPDMA1_MSTR_IF_DBG_HBW_BASE 0x4C9CB00ull
+#define PDMA1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PDMA1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPDMA1_MSTR_IF_DBG_LBW_BASE 0x4C9CB80ull
+#define PDMA1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PDMA1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPDMA1_MSTR_IF_CORE_HBW_BASE 0x4C9CC00ull
+#define PDMA1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PDMA1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPDMA1_MSTR_IF_CORE_LBW_BASE 0x4C9CD80ull
+#define PDMA1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PDMA1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPDMA1_MSTR_IF_SPECIAL_BASE 0x4C9CE80ull
+#define PDMA1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PDMA1_MSTR_IF_SPECIAL_SECTION 0x23180
+#define mmCPU_CA53_CFG_BASE 0x4CC0000ull
+#define CPU_CA53_CFG_MAX_OFFSET 0x1000
+#define CPU_CA53_CFG_SECTION 0xE800
+#define mmCPU_CA53_CFG_SPECIAL_BASE 0x4CC0E80ull
+#define CPU_CA53_CFG_SPECIAL_MAX_OFFSET 0x1800
+#define CPU_CA53_CFG_SPECIAL_SECTION 0x1800
+#define mmCPU_IF_BASE 0x4CC1000ull
+#define CPU_IF_MAX_OFFSET 0x1000
+#define CPU_IF_SECTION 0xE800
+#define mmCPU_IF_SPECIAL_BASE 0x4CC1E80ull
+#define CPU_IF_SPECIAL_MAX_OFFSET 0x1800
+#define CPU_IF_SPECIAL_SECTION 0x1800
+#define mmCPU_TIMESTAMP_BASE 0x4CC2000ull
+#define CPU_TIMESTAMP_MAX_OFFSET 0x1000
+#define CPU_TIMESTAMP_SECTION 0x1000
+#define mmCPU_MSTR_IF_RR_SHRD_HBW_BASE 0x4CC3000ull
+#define CPU_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define CPU_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmCPU_MSTR_IF_RR_PRVT_HBW_BASE 0x4CC3200ull
+#define CPU_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define CPU_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmCPU_MSTR_IF_RR_SHRD_LBW_BASE 0x4CC3400ull
+#define CPU_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define CPU_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmCPU_MSTR_IF_RR_PRVT_LBW_BASE 0x4CC3600ull
+#define CPU_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define CPU_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmCPU_MSTR_IF_E2E_CRDT_BASE 0x4CC3800ull
+#define CPU_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define CPU_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmCPU_MSTR_IF_AXUSER_BASE 0x4CC3A80ull
+#define CPU_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define CPU_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmCPU_MSTR_IF_DBG_HBW_BASE 0x4CC3B00ull
+#define CPU_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define CPU_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmCPU_MSTR_IF_DBG_LBW_BASE 0x4CC3B80ull
+#define CPU_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define CPU_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmCPU_MSTR_IF_CORE_HBW_BASE 0x4CC3C00ull
+#define CPU_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define CPU_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmCPU_MSTR_IF_CORE_LBW_BASE 0x4CC3D80ull
+#define CPU_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define CPU_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmCPU_MSTR_IF_SPECIAL_BASE 0x4CC3E80ull
+#define CPU_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define CPU_MSTR_IF_SPECIAL_SECTION 0x3C180
+#define mmPMMU_HBW_MMU_BASE 0x4D00000ull
+#define PMMU_HBW_MMU_MAX_OFFSET 0x1000
+#define PMMU_HBW_MMU_SECTION 0xE800
+#define mmPMMU_HBW_MMU_SPECIAL_BASE 0x4D00E80ull
+#define PMMU_HBW_MMU_SPECIAL_MAX_OFFSET 0x1800
+#define PMMU_HBW_MMU_SPECIAL_SECTION 0x1800
+#define mmPMMU_HBW_STLB_BASE 0x4D01000ull
+#define PMMU_HBW_STLB_MAX_OFFSET 0x1000
+#define PMMU_HBW_STLB_SECTION 0xE800
+#define mmPMMU_HBW_STLB_SPECIAL_BASE 0x4D01E80ull
+#define PMMU_HBW_STLB_SPECIAL_MAX_OFFSET 0x1800
+#define PMMU_HBW_STLB_SPECIAL_SECTION 0x1800
+#define mmPMMU_HBW_MSTR_IF_RR_SHRD_HBW_BASE 0x4D02000ull
+#define PMMU_HBW_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PMMU_HBW_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPMMU_HBW_MSTR_IF_RR_PRVT_HBW_BASE 0x4D02200ull
+#define PMMU_HBW_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PMMU_HBW_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPMMU_HBW_MSTR_IF_RR_SHRD_LBW_BASE 0x4D02400ull
+#define PMMU_HBW_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PMMU_HBW_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPMMU_HBW_MSTR_IF_RR_PRVT_LBW_BASE 0x4D02600ull
+#define PMMU_HBW_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PMMU_HBW_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPMMU_HBW_MSTR_IF_E2E_CRDT_BASE 0x4D02800ull
+#define PMMU_HBW_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PMMU_HBW_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPMMU_HBW_MSTR_IF_AXUSER_BASE 0x4D02A80ull
+#define PMMU_HBW_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PMMU_HBW_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPMMU_HBW_MSTR_IF_DBG_HBW_BASE 0x4D02B00ull
+#define PMMU_HBW_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PMMU_HBW_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPMMU_HBW_MSTR_IF_DBG_LBW_BASE 0x4D02B80ull
+#define PMMU_HBW_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PMMU_HBW_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPMMU_HBW_MSTR_IF_CORE_HBW_BASE 0x4D02C00ull
+#define PMMU_HBW_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PMMU_HBW_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPMMU_HBW_MSTR_IF_CORE_LBW_BASE 0x4D02D80ull
+#define PMMU_HBW_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PMMU_HBW_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPMMU_HBW_MSTR_IF_SPECIAL_BASE 0x4D02E80ull
+#define PMMU_HBW_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PMMU_HBW_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmPMMU_PIF_BASE 0x4D03000ull
+#define PMMU_PIF_MAX_OFFSET 0x1000
+#define PMMU_PIF_SECTION 0xE800
+#define mmPMMU_PIF_SPECIAL_BASE 0x4D03E80ull
+#define PMMU_PIF_SPECIAL_MAX_OFFSET 0x1800
+#define PMMU_PIF_SPECIAL_SECTION 0x1800
+#define mmPMMU_MME_PLL_CTRL_BASE 0x4D04000ull
+#define PMMU_MME_PLL_CTRL_MAX_OFFSET 0x3540
+#define PMMU_MME_PLL_CTRL_SECTION 0x3600
+#define mmPMMU_MME_PLL_ASIF_SLV_BASE 0x4D04360ull
+#define PMMU_MME_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define PMMU_MME_PLL_ASIF_SLV_SECTION 0xA000
+#define mmPMMU_MME_PLL_DIV_0_RLX_BASE 0x4D04400ull
+#define PMMU_MME_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define PMMU_MME_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmPMMU_MME_PLL_DIV_1_RLX_BASE 0x4D04800ull
+#define PMMU_MME_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define PMMU_MME_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmPMMU_MME_PLL_DIV_2_RLX_BASE 0x4D04A00ull
+#define PMMU_MME_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define PMMU_MME_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmPMMU_MME_PLL_DIV_3_RLX_BASE 0x4D04C00ull
+#define PMMU_MME_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define PMMU_MME_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmPMMU_MME_PLL_SPECIAL_BASE 0x4D04E80ull
+#define PMMU_MME_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define PMMU_MME_PLL_SPECIAL_SECTION 0x1800
+#define mmPMMU_VID_PLL_CTRL_BASE 0x4D05000ull
+#define PMMU_VID_PLL_CTRL_MAX_OFFSET 0x3540
+#define PMMU_VID_PLL_CTRL_SECTION 0x3600
+#define mmPMMU_VID_PLL_ASIF_SLV_BASE 0x4D05360ull
+#define PMMU_VID_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define PMMU_VID_PLL_ASIF_SLV_SECTION 0xA000
+#define mmPMMU_VID_PLL_DIV_0_RLX_BASE 0x4D05400ull
+#define PMMU_VID_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define PMMU_VID_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmPMMU_VID_PLL_DIV_1_RLX_BASE 0x4D05800ull
+#define PMMU_VID_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define PMMU_VID_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmPMMU_VID_PLL_DIV_2_RLX_BASE 0x4D05A00ull
+#define PMMU_VID_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define PMMU_VID_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmPMMU_VID_PLL_DIV_3_RLX_BASE 0x4D05C00ull
+#define PMMU_VID_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define PMMU_VID_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmPMMU_VID_PLL_SPECIAL_BASE 0x4D05E80ull
+#define PMMU_VID_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define PMMU_VID_PLL_SPECIAL_SECTION 0x3A180
+#define mmXBAR_MID_0_BASE 0x4D40000ull
+#define XBAR_MID_0_MAX_OFFSET 0x1000
+#define XBAR_MID_0_SECTION 0xE800
+#define mmXBAR_MID_0_SPECIAL_BASE 0x4D40E80ull
+#define XBAR_MID_0_SPECIAL_MAX_OFFSET 0x1800
+#define XBAR_MID_0_SPECIAL_SECTION 0x1800
+#define mmDCORE0_XBAR_DMA_PLL_CTRL_BASE 0x4D41000ull
+#define DCORE0_XBAR_DMA_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE0_XBAR_DMA_PLL_CTRL_SECTION 0x3600
+#define mmDCORE0_XBAR_DMA_PLL_ASIF_SLV_BASE 0x4D41360ull
+#define DCORE0_XBAR_DMA_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE0_XBAR_DMA_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE0_XBAR_DMA_PLL_DIV_0_RLX_BASE 0x4D41400ull
+#define DCORE0_XBAR_DMA_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE0_XBAR_DMA_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE0_XBAR_DMA_PLL_DIV_1_RLX_BASE 0x4D41800ull
+#define DCORE0_XBAR_DMA_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_DMA_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE0_XBAR_DMA_PLL_DIV_2_RLX_BASE 0x4D41A00ull
+#define DCORE0_XBAR_DMA_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_DMA_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE0_XBAR_DMA_PLL_DIV_3_RLX_BASE 0x4D41C00ull
+#define DCORE0_XBAR_DMA_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_DMA_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE0_XBAR_DMA_PLL_SPECIAL_BASE 0x4D41E80ull
+#define DCORE0_XBAR_DMA_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_XBAR_DMA_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_XBAR_MMU_PLL_CTRL_BASE 0x4D42000ull
+#define DCORE0_XBAR_MMU_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE0_XBAR_MMU_PLL_CTRL_SECTION 0x3600
+#define mmDCORE0_XBAR_MMU_PLL_ASIF_SLV_BASE 0x4D42360ull
+#define DCORE0_XBAR_MMU_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE0_XBAR_MMU_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE0_XBAR_MMU_PLL_DIV_0_RLX_BASE 0x4D42400ull
+#define DCORE0_XBAR_MMU_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE0_XBAR_MMU_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE0_XBAR_MMU_PLL_DIV_1_RLX_BASE 0x4D42800ull
+#define DCORE0_XBAR_MMU_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_MMU_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE0_XBAR_MMU_PLL_DIV_2_RLX_BASE 0x4D42A00ull
+#define DCORE0_XBAR_MMU_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_MMU_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE0_XBAR_MMU_PLL_DIV_3_RLX_BASE 0x4D42C00ull
+#define DCORE0_XBAR_MMU_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_MMU_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE0_XBAR_MMU_PLL_SPECIAL_BASE 0x4D42E80ull
+#define DCORE0_XBAR_MMU_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_XBAR_MMU_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_XBAR_IF_PLL_CTRL_BASE 0x4D43000ull
+#define DCORE0_XBAR_IF_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE0_XBAR_IF_PLL_CTRL_SECTION 0x3600
+#define mmDCORE0_XBAR_IF_PLL_ASIF_SLV_BASE 0x4D43360ull
+#define DCORE0_XBAR_IF_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE0_XBAR_IF_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE0_XBAR_IF_PLL_DIV_0_RLX_BASE 0x4D43400ull
+#define DCORE0_XBAR_IF_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE0_XBAR_IF_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE0_XBAR_IF_PLL_DIV_1_RLX_BASE 0x4D43800ull
+#define DCORE0_XBAR_IF_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_IF_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE0_XBAR_IF_PLL_DIV_2_RLX_BASE 0x4D43A00ull
+#define DCORE0_XBAR_IF_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_IF_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE0_XBAR_IF_PLL_DIV_3_RLX_BASE 0x4D43C00ull
+#define DCORE0_XBAR_IF_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_IF_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE0_XBAR_IF_PLL_SPECIAL_BASE 0x4D43E80ull
+#define DCORE0_XBAR_IF_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_XBAR_IF_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_XBAR_MESH_PLL_CTRL_BASE 0x4D44000ull
+#define DCORE0_XBAR_MESH_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE0_XBAR_MESH_PLL_CTRL_SECTION 0x3600
+#define mmDCORE0_XBAR_MESH_PLL_ASIF_SLV_BASE 0x4D44360ull
+#define DCORE0_XBAR_MESH_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE0_XBAR_MESH_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE0_XBAR_MESH_PLL_DIV_0_RLX_BASE 0x4D44400ull
+#define DCORE0_XBAR_MESH_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE0_XBAR_MESH_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE0_XBAR_MESH_PLL_DIV_1_RLX_BASE 0x4D44800ull
+#define DCORE0_XBAR_MESH_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_MESH_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE0_XBAR_MESH_PLL_DIV_2_RLX_BASE 0x4D44A00ull
+#define DCORE0_XBAR_MESH_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_MESH_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE0_XBAR_MESH_PLL_DIV_3_RLX_BASE 0x4D44C00ull
+#define DCORE0_XBAR_MESH_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE0_XBAR_MESH_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE0_XBAR_MESH_PLL_SPECIAL_BASE 0x4D44E80ull
+#define DCORE0_XBAR_MESH_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_XBAR_MESH_PLL_SPECIAL_SECTION 0x3180
+#define mmXBAR_EDGE_0_BASE 0x4D48000ull
+#define XBAR_EDGE_0_MAX_OFFSET 0x1000
+#define XBAR_EDGE_0_SECTION 0xE800
+#define mmXBAR_EDGE_0_SPECIAL_BASE 0x4D48E80ull
+#define XBAR_EDGE_0_SPECIAL_MAX_OFFSET 0x1800
+#define XBAR_EDGE_0_SPECIAL_SECTION 0x7180
+#define mmXBAR_MID_1_BASE 0x4D50000ull
+#define XBAR_MID_1_MAX_OFFSET 0x1000
+#define XBAR_MID_1_SECTION 0xE800
+#define mmXBAR_MID_1_SPECIAL_BASE 0x4D50E80ull
+#define XBAR_MID_1_SPECIAL_MAX_OFFSET 0x1800
+#define XBAR_MID_1_SPECIAL_SECTION 0x1800
+#define mmDCORE1_XBAR_DMA_PLL_CTRL_BASE 0x4D51000ull
+#define DCORE1_XBAR_DMA_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE1_XBAR_DMA_PLL_CTRL_SECTION 0x3600
+#define mmDCORE1_XBAR_DMA_PLL_ASIF_SLV_BASE 0x4D51360ull
+#define DCORE1_XBAR_DMA_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE1_XBAR_DMA_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE1_XBAR_DMA_PLL_DIV_0_RLX_BASE 0x4D51400ull
+#define DCORE1_XBAR_DMA_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_DMA_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE1_XBAR_DMA_PLL_DIV_1_RLX_BASE 0x4D51800ull
+#define DCORE1_XBAR_DMA_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_DMA_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_DMA_PLL_DIV_2_RLX_BASE 0x4D51A00ull
+#define DCORE1_XBAR_DMA_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_DMA_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_DMA_PLL_DIV_3_RLX_BASE 0x4D51C00ull
+#define DCORE1_XBAR_DMA_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_DMA_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE1_XBAR_DMA_PLL_SPECIAL_BASE 0x4D51E80ull
+#define DCORE1_XBAR_DMA_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_DMA_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_XBAR_MMU_PLL_CTRL_BASE 0x4D52000ull
+#define DCORE1_XBAR_MMU_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE1_XBAR_MMU_PLL_CTRL_SECTION 0x3600
+#define mmDCORE1_XBAR_MMU_PLL_ASIF_SLV_BASE 0x4D52360ull
+#define DCORE1_XBAR_MMU_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE1_XBAR_MMU_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE1_XBAR_MMU_PLL_DIV_0_RLX_BASE 0x4D52400ull
+#define DCORE1_XBAR_MMU_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_MMU_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE1_XBAR_MMU_PLL_DIV_1_RLX_BASE 0x4D52800ull
+#define DCORE1_XBAR_MMU_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_MMU_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_MMU_PLL_DIV_2_RLX_BASE 0x4D52A00ull
+#define DCORE1_XBAR_MMU_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_MMU_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_MMU_PLL_DIV_3_RLX_BASE 0x4D52C00ull
+#define DCORE1_XBAR_MMU_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_MMU_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE1_XBAR_MMU_PLL_SPECIAL_BASE 0x4D52E80ull
+#define DCORE1_XBAR_MMU_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_MMU_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_XBAR_IF_PLL_CTRL_BASE 0x4D53000ull
+#define DCORE1_XBAR_IF_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE1_XBAR_IF_PLL_CTRL_SECTION 0x3600
+#define mmDCORE1_XBAR_IF_PLL_ASIF_SLV_BASE 0x4D53360ull
+#define DCORE1_XBAR_IF_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE1_XBAR_IF_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE1_XBAR_IF_PLL_DIV_0_RLX_BASE 0x4D53400ull
+#define DCORE1_XBAR_IF_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_IF_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE1_XBAR_IF_PLL_DIV_1_RLX_BASE 0x4D53800ull
+#define DCORE1_XBAR_IF_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_IF_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_IF_PLL_DIV_2_RLX_BASE 0x4D53A00ull
+#define DCORE1_XBAR_IF_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_IF_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_IF_PLL_DIV_3_RLX_BASE 0x4D53C00ull
+#define DCORE1_XBAR_IF_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_IF_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE1_XBAR_IF_PLL_SPECIAL_BASE 0x4D53E80ull
+#define DCORE1_XBAR_IF_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_IF_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_XBAR_MESH_PLL_CTRL_BASE 0x4D54000ull
+#define DCORE1_XBAR_MESH_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE1_XBAR_MESH_PLL_CTRL_SECTION 0x3600
+#define mmDCORE1_XBAR_MESH_PLL_ASIF_SLV_BASE 0x4D54360ull
+#define DCORE1_XBAR_MESH_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE1_XBAR_MESH_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE1_XBAR_MESH_PLL_DIV_0_RLX_BASE 0x4D54400ull
+#define DCORE1_XBAR_MESH_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_MESH_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE1_XBAR_MESH_PLL_DIV_1_RLX_BASE 0x4D54800ull
+#define DCORE1_XBAR_MESH_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_MESH_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_MESH_PLL_DIV_2_RLX_BASE 0x4D54A00ull
+#define DCORE1_XBAR_MESH_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_MESH_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_MESH_PLL_DIV_3_RLX_BASE 0x4D54C00ull
+#define DCORE1_XBAR_MESH_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_MESH_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE1_XBAR_MESH_PLL_SPECIAL_BASE 0x4D54E80ull
+#define DCORE1_XBAR_MESH_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_MESH_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_XBAR_HBM_PLL_CTRL_BASE 0x4D55000ull
+#define DCORE1_XBAR_HBM_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE1_XBAR_HBM_PLL_CTRL_SECTION 0x3600
+#define mmDCORE1_XBAR_HBM_PLL_ASIF_SLV_BASE 0x4D55360ull
+#define DCORE1_XBAR_HBM_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE1_XBAR_HBM_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE1_XBAR_HBM_PLL_DIV_0_RLX_BASE 0x4D55400ull
+#define DCORE1_XBAR_HBM_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_HBM_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE1_XBAR_HBM_PLL_DIV_1_RLX_BASE 0x4D55800ull
+#define DCORE1_XBAR_HBM_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_HBM_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_HBM_PLL_DIV_2_RLX_BASE 0x4D55A00ull
+#define DCORE1_XBAR_HBM_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_HBM_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE1_XBAR_HBM_PLL_DIV_3_RLX_BASE 0x4D55C00ull
+#define DCORE1_XBAR_HBM_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE1_XBAR_HBM_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE1_XBAR_HBM_PLL_SPECIAL_BASE 0x4D55E80ull
+#define DCORE1_XBAR_HBM_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_XBAR_HBM_PLL_SPECIAL_SECTION 0x2180
+#define mmXBAR_EDGE_1_BASE 0x4D58000ull
+#define XBAR_EDGE_1_MAX_OFFSET 0x1000
+#define XBAR_EDGE_1_SECTION 0xE800
+#define mmXBAR_EDGE_1_SPECIAL_BASE 0x4D58E80ull
+#define XBAR_EDGE_1_SPECIAL_MAX_OFFSET 0x1800
+#define XBAR_EDGE_1_SPECIAL_SECTION 0x7180
+#define mmXBAR_MID_2_BASE 0x4D60000ull
+#define XBAR_MID_2_MAX_OFFSET 0x1000
+#define XBAR_MID_2_SECTION 0xE800
+#define mmXBAR_MID_2_SPECIAL_BASE 0x4D60E80ull
+#define XBAR_MID_2_SPECIAL_MAX_OFFSET 0x1800
+#define XBAR_MID_2_SPECIAL_SECTION 0x1800
+#define mmDCORE2_XBAR_DMA_PLL_CTRL_BASE 0x4D61000ull
+#define DCORE2_XBAR_DMA_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE2_XBAR_DMA_PLL_CTRL_SECTION 0x3600
+#define mmDCORE2_XBAR_DMA_PLL_ASIF_SLV_BASE 0x4D61360ull
+#define DCORE2_XBAR_DMA_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE2_XBAR_DMA_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE2_XBAR_DMA_PLL_DIV_0_RLX_BASE 0x4D61400ull
+#define DCORE2_XBAR_DMA_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_DMA_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE2_XBAR_DMA_PLL_DIV_1_RLX_BASE 0x4D61800ull
+#define DCORE2_XBAR_DMA_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_DMA_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_DMA_PLL_DIV_2_RLX_BASE 0x4D61A00ull
+#define DCORE2_XBAR_DMA_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_DMA_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_DMA_PLL_DIV_3_RLX_BASE 0x4D61C00ull
+#define DCORE2_XBAR_DMA_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_DMA_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE2_XBAR_DMA_PLL_SPECIAL_BASE 0x4D61E80ull
+#define DCORE2_XBAR_DMA_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_DMA_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_XBAR_MMU_PLL_CTRL_BASE 0x4D62000ull
+#define DCORE2_XBAR_MMU_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE2_XBAR_MMU_PLL_CTRL_SECTION 0x3600
+#define mmDCORE2_XBAR_MMU_PLL_ASIF_SLV_BASE 0x4D62360ull
+#define DCORE2_XBAR_MMU_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE2_XBAR_MMU_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE2_XBAR_MMU_PLL_DIV_0_RLX_BASE 0x4D62400ull
+#define DCORE2_XBAR_MMU_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_MMU_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE2_XBAR_MMU_PLL_DIV_1_RLX_BASE 0x4D62800ull
+#define DCORE2_XBAR_MMU_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_MMU_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_MMU_PLL_DIV_2_RLX_BASE 0x4D62A00ull
+#define DCORE2_XBAR_MMU_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_MMU_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_MMU_PLL_DIV_3_RLX_BASE 0x4D62C00ull
+#define DCORE2_XBAR_MMU_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_MMU_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE2_XBAR_MMU_PLL_SPECIAL_BASE 0x4D62E80ull
+#define DCORE2_XBAR_MMU_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_MMU_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_XBAR_IF_PLL_CTRL_BASE 0x4D63000ull
+#define DCORE2_XBAR_IF_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE2_XBAR_IF_PLL_CTRL_SECTION 0x3600
+#define mmDCORE2_XBAR_IF_PLL_ASIF_SLV_BASE 0x4D63360ull
+#define DCORE2_XBAR_IF_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE2_XBAR_IF_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE2_XBAR_IF_PLL_DIV_0_RLX_BASE 0x4D63400ull
+#define DCORE2_XBAR_IF_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_IF_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE2_XBAR_IF_PLL_DIV_1_RLX_BASE 0x4D63800ull
+#define DCORE2_XBAR_IF_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_IF_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_IF_PLL_DIV_2_RLX_BASE 0x4D63A00ull
+#define DCORE2_XBAR_IF_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_IF_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_IF_PLL_DIV_3_RLX_BASE 0x4D63C00ull
+#define DCORE2_XBAR_IF_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_IF_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE2_XBAR_IF_PLL_SPECIAL_BASE 0x4D63E80ull
+#define DCORE2_XBAR_IF_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_IF_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_XBAR_BANK_PLL_CTRL_BASE 0x4D64000ull
+#define DCORE2_XBAR_BANK_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE2_XBAR_BANK_PLL_CTRL_SECTION 0x3600
+#define mmDCORE2_XBAR_BANK_PLL_ASIF_SLV_BASE 0x4D64360ull
+#define DCORE2_XBAR_BANK_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE2_XBAR_BANK_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE2_XBAR_BANK_PLL_DIV_0_RLX_BASE 0x4D64400ull
+#define DCORE2_XBAR_BANK_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_BANK_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE2_XBAR_BANK_PLL_DIV_1_RLX_BASE 0x4D64800ull
+#define DCORE2_XBAR_BANK_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_BANK_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_BANK_PLL_DIV_2_RLX_BASE 0x4D64A00ull
+#define DCORE2_XBAR_BANK_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_BANK_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_BANK_PLL_DIV_3_RLX_BASE 0x4D64C00ull
+#define DCORE2_XBAR_BANK_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_BANK_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE2_XBAR_BANK_PLL_SPECIAL_BASE 0x4D64E80ull
+#define DCORE2_XBAR_BANK_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_BANK_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_XBAR_HBM_PLL_CTRL_BASE 0x4D65000ull
+#define DCORE2_XBAR_HBM_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE2_XBAR_HBM_PLL_CTRL_SECTION 0x3600
+#define mmDCORE2_XBAR_HBM_PLL_ASIF_SLV_BASE 0x4D65360ull
+#define DCORE2_XBAR_HBM_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE2_XBAR_HBM_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE2_XBAR_HBM_PLL_DIV_0_RLX_BASE 0x4D65400ull
+#define DCORE2_XBAR_HBM_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_HBM_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE2_XBAR_HBM_PLL_DIV_1_RLX_BASE 0x4D65800ull
+#define DCORE2_XBAR_HBM_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_HBM_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_HBM_PLL_DIV_2_RLX_BASE 0x4D65A00ull
+#define DCORE2_XBAR_HBM_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_HBM_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE2_XBAR_HBM_PLL_DIV_3_RLX_BASE 0x4D65C00ull
+#define DCORE2_XBAR_HBM_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE2_XBAR_HBM_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE2_XBAR_HBM_PLL_SPECIAL_BASE 0x4D65E80ull
+#define DCORE2_XBAR_HBM_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_XBAR_HBM_PLL_SPECIAL_SECTION 0x2180
+#define mmXBAR_EDGE_2_BASE 0x4D68000ull
+#define XBAR_EDGE_2_MAX_OFFSET 0x1000
+#define XBAR_EDGE_2_SECTION 0xE800
+#define mmXBAR_EDGE_2_SPECIAL_BASE 0x4D68E80ull
+#define XBAR_EDGE_2_SPECIAL_MAX_OFFSET 0x1800
+#define XBAR_EDGE_2_SPECIAL_SECTION 0x7180
+#define mmXBAR_MID_3_BASE 0x4D70000ull
+#define XBAR_MID_3_MAX_OFFSET 0x1000
+#define XBAR_MID_3_SECTION 0xE800
+#define mmXBAR_MID_3_SPECIAL_BASE 0x4D70E80ull
+#define XBAR_MID_3_SPECIAL_MAX_OFFSET 0x1800
+#define XBAR_MID_3_SPECIAL_SECTION 0x1800
+#define mmDCORE3_XBAR_DMA_PLL_CTRL_BASE 0x4D71000ull
+#define DCORE3_XBAR_DMA_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE3_XBAR_DMA_PLL_CTRL_SECTION 0x3600
+#define mmDCORE3_XBAR_DMA_PLL_ASIF_SLV_BASE 0x4D71360ull
+#define DCORE3_XBAR_DMA_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE3_XBAR_DMA_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE3_XBAR_DMA_PLL_DIV_0_RLX_BASE 0x4D71400ull
+#define DCORE3_XBAR_DMA_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE3_XBAR_DMA_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE3_XBAR_DMA_PLL_DIV_1_RLX_BASE 0x4D71800ull
+#define DCORE3_XBAR_DMA_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_DMA_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE3_XBAR_DMA_PLL_DIV_2_RLX_BASE 0x4D71A00ull
+#define DCORE3_XBAR_DMA_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_DMA_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE3_XBAR_DMA_PLL_DIV_3_RLX_BASE 0x4D71C00ull
+#define DCORE3_XBAR_DMA_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_DMA_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE3_XBAR_DMA_PLL_SPECIAL_BASE 0x4D71E80ull
+#define DCORE3_XBAR_DMA_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_XBAR_DMA_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_XBAR_MMU_PLL_CTRL_BASE 0x4D72000ull
+#define DCORE3_XBAR_MMU_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE3_XBAR_MMU_PLL_CTRL_SECTION 0x3600
+#define mmDCORE3_XBAR_MMU_PLL_ASIF_SLV_BASE 0x4D72360ull
+#define DCORE3_XBAR_MMU_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE3_XBAR_MMU_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE3_XBAR_MMU_PLL_DIV_0_RLX_BASE 0x4D72400ull
+#define DCORE3_XBAR_MMU_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE3_XBAR_MMU_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE3_XBAR_MMU_PLL_DIV_1_RLX_BASE 0x4D72800ull
+#define DCORE3_XBAR_MMU_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_MMU_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE3_XBAR_MMU_PLL_DIV_2_RLX_BASE 0x4D72A00ull
+#define DCORE3_XBAR_MMU_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_MMU_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE3_XBAR_MMU_PLL_DIV_3_RLX_BASE 0x4D72C00ull
+#define DCORE3_XBAR_MMU_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_MMU_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE3_XBAR_MMU_PLL_SPECIAL_BASE 0x4D72E80ull
+#define DCORE3_XBAR_MMU_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_XBAR_MMU_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_XBAR_IF_PLL_CTRL_BASE 0x4D73000ull
+#define DCORE3_XBAR_IF_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE3_XBAR_IF_PLL_CTRL_SECTION 0x3600
+#define mmDCORE3_XBAR_IF_PLL_ASIF_SLV_BASE 0x4D73360ull
+#define DCORE3_XBAR_IF_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE3_XBAR_IF_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE3_XBAR_IF_PLL_DIV_0_RLX_BASE 0x4D73400ull
+#define DCORE3_XBAR_IF_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE3_XBAR_IF_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE3_XBAR_IF_PLL_DIV_1_RLX_BASE 0x4D73800ull
+#define DCORE3_XBAR_IF_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_IF_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE3_XBAR_IF_PLL_DIV_2_RLX_BASE 0x4D73A00ull
+#define DCORE3_XBAR_IF_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_IF_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE3_XBAR_IF_PLL_DIV_3_RLX_BASE 0x4D73C00ull
+#define DCORE3_XBAR_IF_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_IF_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE3_XBAR_IF_PLL_SPECIAL_BASE 0x4D73E80ull
+#define DCORE3_XBAR_IF_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_XBAR_IF_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_XBAR_BANK_PLL_CTRL_BASE 0x4D74000ull
+#define DCORE3_XBAR_BANK_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE3_XBAR_BANK_PLL_CTRL_SECTION 0x3600
+#define mmDCORE3_XBAR_BANK_PLL_ASIF_SLV_BASE 0x4D74360ull
+#define DCORE3_XBAR_BANK_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE3_XBAR_BANK_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE3_XBAR_BANK_PLL_DIV_0_RLX_BASE 0x4D74400ull
+#define DCORE3_XBAR_BANK_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE3_XBAR_BANK_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE3_XBAR_BANK_PLL_DIV_1_RLX_BASE 0x4D74800ull
+#define DCORE3_XBAR_BANK_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_BANK_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE3_XBAR_BANK_PLL_DIV_2_RLX_BASE 0x4D74A00ull
+#define DCORE3_XBAR_BANK_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_BANK_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE3_XBAR_BANK_PLL_DIV_3_RLX_BASE 0x4D74C00ull
+#define DCORE3_XBAR_BANK_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE3_XBAR_BANK_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE3_XBAR_BANK_PLL_SPECIAL_BASE 0x4D74E80ull
+#define DCORE3_XBAR_BANK_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_XBAR_BANK_PLL_SPECIAL_SECTION 0x3180
+#define mmXBAR_EDGE_3_BASE 0x4D78000ull
+#define XBAR_EDGE_3_MAX_OFFSET 0x1000
+#define XBAR_EDGE_3_SECTION 0xE800
+#define mmXBAR_EDGE_3_SPECIAL_BASE 0x4D78E80ull
+#define XBAR_EDGE_3_SPECIAL_MAX_OFFSET 0x1800
+#define XBAR_EDGE_3_SPECIAL_SECTION 0x7180
+#define mmPCIE_PMA_0_BASE 0x4D80000ull
+#define PCIE_PMA_0_MAX_OFFSET 0x40000
+#define PCIE_PMA_0_SECTION 0x40000
+#define mmPCIE_PMA_1_BASE 0x4DC0000ull
+#define PCIE_PMA_1_MAX_OFFSET 0x40000
+#define PCIE_PMA_1_SECTION 0x40000
+#define mmROT0_QM_ARC_DCCM_BASE 0x4E00000ull
+#define ROT0_QM_ARC_DCCM_MAX_OFFSET 0x4000
+#define ROT0_QM_ARC_DCCM_SECTION 0x8000
+#define mmROT0_QM_ARC_AUX_BASE 0x4E08000ull
+#define ROT0_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define ROT0_QM_ARC_AUX_SECTION 0xE800
+#define mmROT0_QM_ARC_AUX_SPECIAL_BASE 0x4E08E80ull
+#define ROT0_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define ROT0_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmROT0_QM_BASE 0x4E0A000ull
+#define ROT0_QM_MAX_OFFSET 0x1000
+#define ROT0_QM_SECTION 0x9000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR0_BASE 0x4E0A900ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR1_BASE 0x4E0A908ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR2_BASE 0x4E0A910ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR3_BASE 0x4E0A918ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR4_BASE 0x4E0A920ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR5_BASE 0x4E0A928ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR6_BASE 0x4E0A930ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR7_BASE 0x4E0A938ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR8_BASE 0x4E0A940ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR9_BASE 0x4E0A948ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR10_BASE 0x4E0A950ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR11_BASE 0x4E0A958ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR12_BASE 0x4E0A960ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR13_BASE 0x4E0A968ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR14_BASE 0x4E0A970ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmROT0_QM_QMAN_WR64_BASE_ADDR15_BASE 0x4E0A978ull
+#define ROT0_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define ROT0_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmROT0_QM_AXUSER_SECURED_BASE 0x4E0AB00ull
+#define ROT0_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define ROT0_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmROT0_QM_AXUSER_NONSECURED_BASE 0x4E0AB80ull
+#define ROT0_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define ROT0_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmROT0_QM_DBG_HBW_BASE 0x4E0AC00ull
+#define ROT0_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define ROT0_QM_DBG_HBW_SECTION 0x8000
+#define mmROT0_QM_DBG_LBW_BASE 0x4E0AC80ull
+#define ROT0_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define ROT0_QM_DBG_LBW_SECTION 0x1000
+#define mmROT0_QM_CGM_BASE 0x4E0AD80ull
+#define ROT0_QM_CGM_MAX_OFFSET 0xC000
+#define ROT0_QM_CGM_SECTION 0x1000
+#define mmROT0_QM_SPECIAL_BASE 0x4E0AE80ull
+#define ROT0_QM_SPECIAL_MAX_OFFSET 0x1800
+#define ROT0_QM_SPECIAL_SECTION 0x1800
+#define mmROT0_BASE 0x4E0B000ull
+#define ROT0_MAX_OFFSET 0x1000
+#define ROT0_SECTION 0x1000
+#define mmROT0_DESC_BASE 0x4E0B100ull
+#define ROT0_DESC_MAX_OFFSET 0x1080
+#define ROT0_DESC_SECTION 0xD800
+#define mmROT0_SPECIAL_BASE 0x4E0BE80ull
+#define ROT0_SPECIAL_MAX_OFFSET 0x1800
+#define ROT0_SPECIAL_SECTION 0x1800
+#define mmROT0_MSTR_IF_RR_SHRD_HBW_BASE 0x4E0C000ull
+#define ROT0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define ROT0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmROT0_MSTR_IF_RR_PRVT_HBW_BASE 0x4E0C200ull
+#define ROT0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define ROT0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmROT0_MSTR_IF_RR_SHRD_LBW_BASE 0x4E0C400ull
+#define ROT0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define ROT0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmROT0_MSTR_IF_RR_PRVT_LBW_BASE 0x4E0C600ull
+#define ROT0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define ROT0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmROT0_MSTR_IF_E2E_CRDT_BASE 0x4E0C800ull
+#define ROT0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define ROT0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmROT0_MSTR_IF_AXUSER_BASE 0x4E0CA80ull
+#define ROT0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define ROT0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmROT0_MSTR_IF_DBG_HBW_BASE 0x4E0CB00ull
+#define ROT0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define ROT0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmROT0_MSTR_IF_DBG_LBW_BASE 0x4E0CB80ull
+#define ROT0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define ROT0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmROT0_MSTR_IF_CORE_HBW_BASE 0x4E0CC00ull
+#define ROT0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define ROT0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmROT0_MSTR_IF_CORE_LBW_BASE 0x4E0CD80ull
+#define ROT0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define ROT0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmROT0_MSTR_IF_SPECIAL_BASE 0x4E0CE80ull
+#define ROT0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define ROT0_MSTR_IF_SPECIAL_SECTION 0x3180
+#define mmROT1_QM_ARC_DCCM_BASE 0x4E10000ull
+#define ROT1_QM_ARC_DCCM_MAX_OFFSET 0x4000
+#define ROT1_QM_ARC_DCCM_SECTION 0x8000
+#define mmROT1_QM_ARC_AUX_BASE 0x4E18000ull
+#define ROT1_QM_ARC_AUX_MAX_OFFSET 0x1000
+#define ROT1_QM_ARC_AUX_SECTION 0xE800
+#define mmROT1_QM_ARC_AUX_SPECIAL_BASE 0x4E18E80ull
+#define ROT1_QM_ARC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define ROT1_QM_ARC_AUX_SPECIAL_SECTION 0x1180
+#define mmROT1_QM_BASE 0x4E1A000ull
+#define ROT1_QM_MAX_OFFSET 0x1000
+#define ROT1_QM_SECTION 0x9000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR0_BASE 0x4E1A900ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR1_BASE 0x4E1A908ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR2_BASE 0x4E1A910ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR3_BASE 0x4E1A918ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR4_BASE 0x4E1A920ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR5_BASE 0x4E1A928ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR6_BASE 0x4E1A930ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR7_BASE 0x4E1A938ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR8_BASE 0x4E1A940ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR9_BASE 0x4E1A948ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR10_BASE 0x4E1A950ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR11_BASE 0x4E1A958ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR12_BASE 0x4E1A960ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR13_BASE 0x4E1A968ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR14_BASE 0x4E1A970ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmROT1_QM_QMAN_WR64_BASE_ADDR15_BASE 0x4E1A978ull
+#define ROT1_QM_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define ROT1_QM_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmROT1_QM_AXUSER_SECURED_BASE 0x4E1AB00ull
+#define ROT1_QM_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define ROT1_QM_AXUSER_SECURED_SECTION 0x8000
+#define mmROT1_QM_AXUSER_NONSECURED_BASE 0x4E1AB80ull
+#define ROT1_QM_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define ROT1_QM_AXUSER_NONSECURED_SECTION 0x8000
+#define mmROT1_QM_DBG_HBW_BASE 0x4E1AC00ull
+#define ROT1_QM_DBG_HBW_MAX_OFFSET 0x5800
+#define ROT1_QM_DBG_HBW_SECTION 0x8000
+#define mmROT1_QM_DBG_LBW_BASE 0x4E1AC80ull
+#define ROT1_QM_DBG_LBW_MAX_OFFSET 0x5800
+#define ROT1_QM_DBG_LBW_SECTION 0x1000
+#define mmROT1_QM_CGM_BASE 0x4E1AD80ull
+#define ROT1_QM_CGM_MAX_OFFSET 0xC000
+#define ROT1_QM_CGM_SECTION 0x1000
+#define mmROT1_QM_SPECIAL_BASE 0x4E1AE80ull
+#define ROT1_QM_SPECIAL_MAX_OFFSET 0x1800
+#define ROT1_QM_SPECIAL_SECTION 0x1800
+#define mmROT1_BASE 0x4E1B000ull
+#define ROT1_MAX_OFFSET 0x1000
+#define ROT1_SECTION 0x1000
+#define mmROT1_DESC_BASE 0x4E1B100ull
+#define ROT1_DESC_MAX_OFFSET 0x1080
+#define ROT1_DESC_SECTION 0xD800
+#define mmROT1_SPECIAL_BASE 0x4E1BE80ull
+#define ROT1_SPECIAL_MAX_OFFSET 0x1800
+#define ROT1_SPECIAL_SECTION 0x1800
+#define mmROT1_MSTR_IF_RR_SHRD_HBW_BASE 0x4E1C000ull
+#define ROT1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define ROT1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmROT1_MSTR_IF_RR_PRVT_HBW_BASE 0x4E1C200ull
+#define ROT1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define ROT1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmROT1_MSTR_IF_RR_SHRD_LBW_BASE 0x4E1C400ull
+#define ROT1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define ROT1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmROT1_MSTR_IF_RR_PRVT_LBW_BASE 0x4E1C600ull
+#define ROT1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define ROT1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmROT1_MSTR_IF_E2E_CRDT_BASE 0x4E1C800ull
+#define ROT1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define ROT1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmROT1_MSTR_IF_AXUSER_BASE 0x4E1CA80ull
+#define ROT1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define ROT1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmROT1_MSTR_IF_DBG_HBW_BASE 0x4E1CB00ull
+#define ROT1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define ROT1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmROT1_MSTR_IF_DBG_LBW_BASE 0x4E1CB80ull
+#define ROT1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define ROT1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmROT1_MSTR_IF_CORE_HBW_BASE 0x4E1CC00ull
+#define ROT1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define ROT1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmROT1_MSTR_IF_CORE_LBW_BASE 0x4E1CD80ull
+#define ROT1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define ROT1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmROT1_MSTR_IF_SPECIAL_BASE 0x4E1CE80ull
+#define ROT1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define ROT1_MSTR_IF_SPECIAL_SECTION 0x23180
+#define mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE 0x4E40000ull
+#define SFT0_HBW_RTR_IF0_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT0_HBW_RTR_IF0_RTR_CTRL_SECTION 0xE800
+#define mmSFT0_HBW_RTR_IF0_RTR_CTRL_SPECIAL_BASE 0x4E40E80ull
+#define SFT0_HBW_RTR_IF0_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_HBW_RTR_IF0_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT0_HBW_RTR_IF0_RTR_H3_BASE 0x4E41000ull
+#define SFT0_HBW_RTR_IF0_RTR_H3_MAX_OFFSET 0x1000
+#define SFT0_HBW_RTR_IF0_RTR_H3_SECTION 0xE800
+#define mmSFT0_HBW_RTR_IF0_RTR_H3_SPECIAL_BASE 0x4E41E80ull
+#define SFT0_HBW_RTR_IF0_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_HBW_RTR_IF0_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE 0x4E42000ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_BASE 0x4E42200ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT0_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_BASE 0x4E42400ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_BASE 0x4E42600ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT0_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_BASE 0x4E42800ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT0_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_AXUSER_BASE 0x4E42A80ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT0_HBW_RTR_IF0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_DBG_HBW_BASE 0x4E42B00ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT0_HBW_RTR_IF0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_DBG_LBW_BASE 0x4E42B80ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT0_HBW_RTR_IF0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_CORE_HBW_BASE 0x4E42C00ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT0_HBW_RTR_IF0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_CORE_LBW_BASE 0x4E42D80ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT0_HBW_RTR_IF0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT0_HBW_RTR_IF0_MSTR_IF_SPECIAL_BASE 0x4E42E80ull
+#define SFT0_HBW_RTR_IF0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_HBW_RTR_IF0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT0_HBW_RTR_IF0_ADDR_DEC_HBW_BASE 0x4E43000ull
+#define SFT0_HBW_RTR_IF0_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT0_HBW_RTR_IF0_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT0_HBW_RTR_IF0_ADDR_DEC_LBW_BASE 0x4E43400ull
+#define SFT0_HBW_RTR_IF0_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT0_HBW_RTR_IF0_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT0_HBW_RTR_IF0_ADDR_DEC_SPECIAL_BASE 0x4E43E80ull
+#define SFT0_HBW_RTR_IF0_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_HBW_RTR_IF0_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT0_HBW_RTR_IF1_RTR_CTRL_BASE 0x4E44000ull
+#define SFT0_HBW_RTR_IF1_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT0_HBW_RTR_IF1_RTR_CTRL_SECTION 0xE800
+#define mmSFT0_HBW_RTR_IF1_RTR_CTRL_SPECIAL_BASE 0x4E44E80ull
+#define SFT0_HBW_RTR_IF1_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_HBW_RTR_IF1_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT0_HBW_RTR_IF1_RTR_H3_BASE 0x4E45000ull
+#define SFT0_HBW_RTR_IF1_RTR_H3_MAX_OFFSET 0x1000
+#define SFT0_HBW_RTR_IF1_RTR_H3_SECTION 0xE800
+#define mmSFT0_HBW_RTR_IF1_RTR_H3_SPECIAL_BASE 0x4E45E80ull
+#define SFT0_HBW_RTR_IF1_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_HBW_RTR_IF1_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE 0x4E46000ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_BASE 0x4E46200ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT0_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_BASE 0x4E46400ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_BASE 0x4E46600ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT0_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_BASE 0x4E46800ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT0_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_AXUSER_BASE 0x4E46A80ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT0_HBW_RTR_IF1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_DBG_HBW_BASE 0x4E46B00ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT0_HBW_RTR_IF1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_DBG_LBW_BASE 0x4E46B80ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT0_HBW_RTR_IF1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_CORE_HBW_BASE 0x4E46C00ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT0_HBW_RTR_IF1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_CORE_LBW_BASE 0x4E46D80ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT0_HBW_RTR_IF1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT0_HBW_RTR_IF1_MSTR_IF_SPECIAL_BASE 0x4E46E80ull
+#define SFT0_HBW_RTR_IF1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_HBW_RTR_IF1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT0_HBW_RTR_IF1_ADDR_DEC_HBW_BASE 0x4E47000ull
+#define SFT0_HBW_RTR_IF1_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT0_HBW_RTR_IF1_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT0_HBW_RTR_IF1_ADDR_DEC_LBW_BASE 0x4E47400ull
+#define SFT0_HBW_RTR_IF1_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT0_HBW_RTR_IF1_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT0_HBW_RTR_IF1_ADDR_DEC_SPECIAL_BASE 0x4E47E80ull
+#define SFT0_HBW_RTR_IF1_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_HBW_RTR_IF1_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT0_LBW_RTR_IF_RTR_CTRL_BASE 0x4E48000ull
+#define SFT0_LBW_RTR_IF_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT0_LBW_RTR_IF_RTR_CTRL_SECTION 0xE800
+#define mmSFT0_LBW_RTR_IF_RTR_CTRL_SPECIAL_BASE 0x4E48E80ull
+#define SFT0_LBW_RTR_IF_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_LBW_RTR_IF_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT0_LBW_RTR_IF_RTR_H3_BASE 0x4E49000ull
+#define SFT0_LBW_RTR_IF_RTR_H3_MAX_OFFSET 0x1000
+#define SFT0_LBW_RTR_IF_RTR_H3_SECTION 0xE800
+#define mmSFT0_LBW_RTR_IF_RTR_H3_SPECIAL_BASE 0x4E49E80ull
+#define SFT0_LBW_RTR_IF_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_LBW_RTR_IF_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE 0x4E4A000ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_BASE 0x4E4A200ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT0_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_BASE 0x4E4A400ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_BASE 0x4E4A600ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT0_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_E2E_CRDT_BASE 0x4E4A800ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT0_LBW_RTR_IF_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_AXUSER_BASE 0x4E4AA80ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT0_LBW_RTR_IF_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_DBG_HBW_BASE 0x4E4AB00ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT0_LBW_RTR_IF_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_DBG_LBW_BASE 0x4E4AB80ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT0_LBW_RTR_IF_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_CORE_HBW_BASE 0x4E4AC00ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT0_LBW_RTR_IF_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_CORE_LBW_BASE 0x4E4AD80ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT0_LBW_RTR_IF_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT0_LBW_RTR_IF_MSTR_IF_SPECIAL_BASE 0x4E4AE80ull
+#define SFT0_LBW_RTR_IF_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_LBW_RTR_IF_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT0_LBW_RTR_IF_ADDR_DEC_HBW_BASE 0x4E4B000ull
+#define SFT0_LBW_RTR_IF_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT0_LBW_RTR_IF_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT0_LBW_RTR_IF_ADDR_DEC_LBW_BASE 0x4E4B400ull
+#define SFT0_LBW_RTR_IF_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT0_LBW_RTR_IF_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT0_LBW_RTR_IF_ADDR_DEC_SPECIAL_BASE 0x4E4BE80ull
+#define SFT0_LBW_RTR_IF_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_LBW_RTR_IF_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT0_BASE 0x4E4C000ull
+#define SFT0_MAX_OFFSET 0x1000
+#define SFT0_SECTION 0xE800
+#define mmSFT0_SPECIAL_BASE 0x4E4CE80ull
+#define SFT0_SPECIAL_MAX_OFFSET 0x1800
+#define SFT0_SPECIAL_SECTION 0x3180
+#define mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE 0x4E50000ull
+#define SFT1_HBW_RTR_IF0_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT1_HBW_RTR_IF0_RTR_CTRL_SECTION 0xE800
+#define mmSFT1_HBW_RTR_IF0_RTR_CTRL_SPECIAL_BASE 0x4E50E80ull
+#define SFT1_HBW_RTR_IF0_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_HBW_RTR_IF0_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT1_HBW_RTR_IF0_RTR_H3_BASE 0x4E51000ull
+#define SFT1_HBW_RTR_IF0_RTR_H3_MAX_OFFSET 0x1000
+#define SFT1_HBW_RTR_IF0_RTR_H3_SECTION 0xE800
+#define mmSFT1_HBW_RTR_IF0_RTR_H3_SPECIAL_BASE 0x4E51E80ull
+#define SFT1_HBW_RTR_IF0_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_HBW_RTR_IF0_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE 0x4E52000ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_BASE 0x4E52200ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT1_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_BASE 0x4E52400ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_BASE 0x4E52600ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT1_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_BASE 0x4E52800ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT1_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_AXUSER_BASE 0x4E52A80ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT1_HBW_RTR_IF0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_DBG_HBW_BASE 0x4E52B00ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT1_HBW_RTR_IF0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_DBG_LBW_BASE 0x4E52B80ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT1_HBW_RTR_IF0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_CORE_HBW_BASE 0x4E52C00ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT1_HBW_RTR_IF0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_CORE_LBW_BASE 0x4E52D80ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT1_HBW_RTR_IF0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT1_HBW_RTR_IF0_MSTR_IF_SPECIAL_BASE 0x4E52E80ull
+#define SFT1_HBW_RTR_IF0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_HBW_RTR_IF0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT1_HBW_RTR_IF0_ADDR_DEC_HBW_BASE 0x4E53000ull
+#define SFT1_HBW_RTR_IF0_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT1_HBW_RTR_IF0_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT1_HBW_RTR_IF0_ADDR_DEC_LBW_BASE 0x4E53400ull
+#define SFT1_HBW_RTR_IF0_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT1_HBW_RTR_IF0_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT1_HBW_RTR_IF0_ADDR_DEC_SPECIAL_BASE 0x4E53E80ull
+#define SFT1_HBW_RTR_IF0_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_HBW_RTR_IF0_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT1_HBW_RTR_IF1_RTR_CTRL_BASE 0x4E54000ull
+#define SFT1_HBW_RTR_IF1_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT1_HBW_RTR_IF1_RTR_CTRL_SECTION 0xE800
+#define mmSFT1_HBW_RTR_IF1_RTR_CTRL_SPECIAL_BASE 0x4E54E80ull
+#define SFT1_HBW_RTR_IF1_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_HBW_RTR_IF1_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT1_HBW_RTR_IF1_RTR_H3_BASE 0x4E55000ull
+#define SFT1_HBW_RTR_IF1_RTR_H3_MAX_OFFSET 0x1000
+#define SFT1_HBW_RTR_IF1_RTR_H3_SECTION 0xE800
+#define mmSFT1_HBW_RTR_IF1_RTR_H3_SPECIAL_BASE 0x4E55E80ull
+#define SFT1_HBW_RTR_IF1_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_HBW_RTR_IF1_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE 0x4E56000ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_BASE 0x4E56200ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT1_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_BASE 0x4E56400ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_BASE 0x4E56600ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT1_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_BASE 0x4E56800ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT1_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_AXUSER_BASE 0x4E56A80ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT1_HBW_RTR_IF1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_DBG_HBW_BASE 0x4E56B00ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT1_HBW_RTR_IF1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_DBG_LBW_BASE 0x4E56B80ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT1_HBW_RTR_IF1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_CORE_HBW_BASE 0x4E56C00ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT1_HBW_RTR_IF1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_CORE_LBW_BASE 0x4E56D80ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT1_HBW_RTR_IF1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT1_HBW_RTR_IF1_MSTR_IF_SPECIAL_BASE 0x4E56E80ull
+#define SFT1_HBW_RTR_IF1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_HBW_RTR_IF1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT1_HBW_RTR_IF1_ADDR_DEC_HBW_BASE 0x4E57000ull
+#define SFT1_HBW_RTR_IF1_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT1_HBW_RTR_IF1_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT1_HBW_RTR_IF1_ADDR_DEC_LBW_BASE 0x4E57400ull
+#define SFT1_HBW_RTR_IF1_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT1_HBW_RTR_IF1_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT1_HBW_RTR_IF1_ADDR_DEC_SPECIAL_BASE 0x4E57E80ull
+#define SFT1_HBW_RTR_IF1_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_HBW_RTR_IF1_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT1_LBW_RTR_IF_RTR_CTRL_BASE 0x4E58000ull
+#define SFT1_LBW_RTR_IF_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT1_LBW_RTR_IF_RTR_CTRL_SECTION 0xE800
+#define mmSFT1_LBW_RTR_IF_RTR_CTRL_SPECIAL_BASE 0x4E58E80ull
+#define SFT1_LBW_RTR_IF_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_LBW_RTR_IF_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT1_LBW_RTR_IF_RTR_H3_BASE 0x4E59000ull
+#define SFT1_LBW_RTR_IF_RTR_H3_MAX_OFFSET 0x1000
+#define SFT1_LBW_RTR_IF_RTR_H3_SECTION 0xE800
+#define mmSFT1_LBW_RTR_IF_RTR_H3_SPECIAL_BASE 0x4E59E80ull
+#define SFT1_LBW_RTR_IF_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_LBW_RTR_IF_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE 0x4E5A000ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT1_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_BASE 0x4E5A200ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT1_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_BASE 0x4E5A400ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT1_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_BASE 0x4E5A600ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT1_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_E2E_CRDT_BASE 0x4E5A800ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT1_LBW_RTR_IF_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_AXUSER_BASE 0x4E5AA80ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT1_LBW_RTR_IF_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_DBG_HBW_BASE 0x4E5AB00ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT1_LBW_RTR_IF_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_DBG_LBW_BASE 0x4E5AB80ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT1_LBW_RTR_IF_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_CORE_HBW_BASE 0x4E5AC00ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT1_LBW_RTR_IF_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_CORE_LBW_BASE 0x4E5AD80ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT1_LBW_RTR_IF_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT1_LBW_RTR_IF_MSTR_IF_SPECIAL_BASE 0x4E5AE80ull
+#define SFT1_LBW_RTR_IF_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_LBW_RTR_IF_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT1_LBW_RTR_IF_ADDR_DEC_HBW_BASE 0x4E5B000ull
+#define SFT1_LBW_RTR_IF_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT1_LBW_RTR_IF_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT1_LBW_RTR_IF_ADDR_DEC_LBW_BASE 0x4E5B400ull
+#define SFT1_LBW_RTR_IF_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT1_LBW_RTR_IF_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT1_LBW_RTR_IF_ADDR_DEC_SPECIAL_BASE 0x4E5BE80ull
+#define SFT1_LBW_RTR_IF_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_LBW_RTR_IF_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT1_BASE 0x4E5C000ull
+#define SFT1_MAX_OFFSET 0x1000
+#define SFT1_SECTION 0xE800
+#define mmSFT1_SPECIAL_BASE 0x4E5CE80ull
+#define SFT1_SPECIAL_MAX_OFFSET 0x1800
+#define SFT1_SPECIAL_SECTION 0x3180
+#define mmSFT2_HBW_RTR_IF0_RTR_CTRL_BASE 0x4E60000ull
+#define SFT2_HBW_RTR_IF0_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT2_HBW_RTR_IF0_RTR_CTRL_SECTION 0xE800
+#define mmSFT2_HBW_RTR_IF0_RTR_CTRL_SPECIAL_BASE 0x4E60E80ull
+#define SFT2_HBW_RTR_IF0_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_HBW_RTR_IF0_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT2_HBW_RTR_IF0_RTR_H3_BASE 0x4E61000ull
+#define SFT2_HBW_RTR_IF0_RTR_H3_MAX_OFFSET 0x1000
+#define SFT2_HBW_RTR_IF0_RTR_H3_SECTION 0xE800
+#define mmSFT2_HBW_RTR_IF0_RTR_H3_SPECIAL_BASE 0x4E61E80ull
+#define SFT2_HBW_RTR_IF0_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_HBW_RTR_IF0_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE 0x4E62000ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_BASE 0x4E62200ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT2_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_BASE 0x4E62400ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_BASE 0x4E62600ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT2_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_BASE 0x4E62800ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT2_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_AXUSER_BASE 0x4E62A80ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT2_HBW_RTR_IF0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_DBG_HBW_BASE 0x4E62B00ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT2_HBW_RTR_IF0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_DBG_LBW_BASE 0x4E62B80ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT2_HBW_RTR_IF0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_CORE_HBW_BASE 0x4E62C00ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT2_HBW_RTR_IF0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_CORE_LBW_BASE 0x4E62D80ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT2_HBW_RTR_IF0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT2_HBW_RTR_IF0_MSTR_IF_SPECIAL_BASE 0x4E62E80ull
+#define SFT2_HBW_RTR_IF0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_HBW_RTR_IF0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT2_HBW_RTR_IF0_ADDR_DEC_HBW_BASE 0x4E63000ull
+#define SFT2_HBW_RTR_IF0_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT2_HBW_RTR_IF0_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT2_HBW_RTR_IF0_ADDR_DEC_LBW_BASE 0x4E63400ull
+#define SFT2_HBW_RTR_IF0_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT2_HBW_RTR_IF0_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT2_HBW_RTR_IF0_ADDR_DEC_SPECIAL_BASE 0x4E63E80ull
+#define SFT2_HBW_RTR_IF0_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_HBW_RTR_IF0_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT2_HBW_RTR_IF1_RTR_CTRL_BASE 0x4E64000ull
+#define SFT2_HBW_RTR_IF1_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT2_HBW_RTR_IF1_RTR_CTRL_SECTION 0xE800
+#define mmSFT2_HBW_RTR_IF1_RTR_CTRL_SPECIAL_BASE 0x4E64E80ull
+#define SFT2_HBW_RTR_IF1_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_HBW_RTR_IF1_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT2_HBW_RTR_IF1_RTR_H3_BASE 0x4E65000ull
+#define SFT2_HBW_RTR_IF1_RTR_H3_MAX_OFFSET 0x1000
+#define SFT2_HBW_RTR_IF1_RTR_H3_SECTION 0xE800
+#define mmSFT2_HBW_RTR_IF1_RTR_H3_SPECIAL_BASE 0x4E65E80ull
+#define SFT2_HBW_RTR_IF1_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_HBW_RTR_IF1_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE 0x4E66000ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_BASE 0x4E66200ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT2_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_BASE 0x4E66400ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_BASE 0x4E66600ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT2_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_BASE 0x4E66800ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT2_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_AXUSER_BASE 0x4E66A80ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT2_HBW_RTR_IF1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_DBG_HBW_BASE 0x4E66B00ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT2_HBW_RTR_IF1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_DBG_LBW_BASE 0x4E66B80ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT2_HBW_RTR_IF1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_CORE_HBW_BASE 0x4E66C00ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT2_HBW_RTR_IF1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_CORE_LBW_BASE 0x4E66D80ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT2_HBW_RTR_IF1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT2_HBW_RTR_IF1_MSTR_IF_SPECIAL_BASE 0x4E66E80ull
+#define SFT2_HBW_RTR_IF1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_HBW_RTR_IF1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT2_HBW_RTR_IF1_ADDR_DEC_HBW_BASE 0x4E67000ull
+#define SFT2_HBW_RTR_IF1_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT2_HBW_RTR_IF1_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT2_HBW_RTR_IF1_ADDR_DEC_LBW_BASE 0x4E67400ull
+#define SFT2_HBW_RTR_IF1_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT2_HBW_RTR_IF1_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT2_HBW_RTR_IF1_ADDR_DEC_SPECIAL_BASE 0x4E67E80ull
+#define SFT2_HBW_RTR_IF1_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_HBW_RTR_IF1_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT2_LBW_RTR_IF_RTR_CTRL_BASE 0x4E68000ull
+#define SFT2_LBW_RTR_IF_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT2_LBW_RTR_IF_RTR_CTRL_SECTION 0xE800
+#define mmSFT2_LBW_RTR_IF_RTR_CTRL_SPECIAL_BASE 0x4E68E80ull
+#define SFT2_LBW_RTR_IF_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_LBW_RTR_IF_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT2_LBW_RTR_IF_RTR_H3_BASE 0x4E69000ull
+#define SFT2_LBW_RTR_IF_RTR_H3_MAX_OFFSET 0x1000
+#define SFT2_LBW_RTR_IF_RTR_H3_SECTION 0xE800
+#define mmSFT2_LBW_RTR_IF_RTR_H3_SPECIAL_BASE 0x4E69E80ull
+#define SFT2_LBW_RTR_IF_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_LBW_RTR_IF_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE 0x4E6A000ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT2_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_BASE 0x4E6A200ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT2_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_BASE 0x4E6A400ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT2_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_BASE 0x4E6A600ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT2_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_E2E_CRDT_BASE 0x4E6A800ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT2_LBW_RTR_IF_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_AXUSER_BASE 0x4E6AA80ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT2_LBW_RTR_IF_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_DBG_HBW_BASE 0x4E6AB00ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT2_LBW_RTR_IF_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_DBG_LBW_BASE 0x4E6AB80ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT2_LBW_RTR_IF_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_CORE_HBW_BASE 0x4E6AC00ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT2_LBW_RTR_IF_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_CORE_LBW_BASE 0x4E6AD80ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT2_LBW_RTR_IF_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT2_LBW_RTR_IF_MSTR_IF_SPECIAL_BASE 0x4E6AE80ull
+#define SFT2_LBW_RTR_IF_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_LBW_RTR_IF_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT2_LBW_RTR_IF_ADDR_DEC_HBW_BASE 0x4E6B000ull
+#define SFT2_LBW_RTR_IF_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT2_LBW_RTR_IF_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT2_LBW_RTR_IF_ADDR_DEC_LBW_BASE 0x4E6B400ull
+#define SFT2_LBW_RTR_IF_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT2_LBW_RTR_IF_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT2_LBW_RTR_IF_ADDR_DEC_SPECIAL_BASE 0x4E6BE80ull
+#define SFT2_LBW_RTR_IF_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_LBW_RTR_IF_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT2_BASE 0x4E6C000ull
+#define SFT2_MAX_OFFSET 0x1000
+#define SFT2_SECTION 0xE800
+#define mmSFT2_SPECIAL_BASE 0x4E6CE80ull
+#define SFT2_SPECIAL_MAX_OFFSET 0x1800
+#define SFT2_SPECIAL_SECTION 0x3180
+#define mmSFT3_HBW_RTR_IF0_RTR_CTRL_BASE 0x4E70000ull
+#define SFT3_HBW_RTR_IF0_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT3_HBW_RTR_IF0_RTR_CTRL_SECTION 0xE800
+#define mmSFT3_HBW_RTR_IF0_RTR_CTRL_SPECIAL_BASE 0x4E70E80ull
+#define SFT3_HBW_RTR_IF0_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_HBW_RTR_IF0_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT3_HBW_RTR_IF0_RTR_H3_BASE 0x4E71000ull
+#define SFT3_HBW_RTR_IF0_RTR_H3_MAX_OFFSET 0x1000
+#define SFT3_HBW_RTR_IF0_RTR_H3_SECTION 0xE800
+#define mmSFT3_HBW_RTR_IF0_RTR_H3_SPECIAL_BASE 0x4E71E80ull
+#define SFT3_HBW_RTR_IF0_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_HBW_RTR_IF0_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE 0x4E72000ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_BASE 0x4E72200ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT3_HBW_RTR_IF0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_BASE 0x4E72400ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_BASE 0x4E72600ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT3_HBW_RTR_IF0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_BASE 0x4E72800ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT3_HBW_RTR_IF0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_AXUSER_BASE 0x4E72A80ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT3_HBW_RTR_IF0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_DBG_HBW_BASE 0x4E72B00ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT3_HBW_RTR_IF0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_DBG_LBW_BASE 0x4E72B80ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT3_HBW_RTR_IF0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_CORE_HBW_BASE 0x4E72C00ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT3_HBW_RTR_IF0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_CORE_LBW_BASE 0x4E72D80ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT3_HBW_RTR_IF0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT3_HBW_RTR_IF0_MSTR_IF_SPECIAL_BASE 0x4E72E80ull
+#define SFT3_HBW_RTR_IF0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_HBW_RTR_IF0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT3_HBW_RTR_IF0_ADDR_DEC_HBW_BASE 0x4E73000ull
+#define SFT3_HBW_RTR_IF0_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT3_HBW_RTR_IF0_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT3_HBW_RTR_IF0_ADDR_DEC_LBW_BASE 0x4E73400ull
+#define SFT3_HBW_RTR_IF0_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT3_HBW_RTR_IF0_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT3_HBW_RTR_IF0_ADDR_DEC_SPECIAL_BASE 0x4E73E80ull
+#define SFT3_HBW_RTR_IF0_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_HBW_RTR_IF0_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT3_HBW_RTR_IF1_RTR_CTRL_BASE 0x4E74000ull
+#define SFT3_HBW_RTR_IF1_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT3_HBW_RTR_IF1_RTR_CTRL_SECTION 0xE800
+#define mmSFT3_HBW_RTR_IF1_RTR_CTRL_SPECIAL_BASE 0x4E74E80ull
+#define SFT3_HBW_RTR_IF1_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_HBW_RTR_IF1_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT3_HBW_RTR_IF1_RTR_H3_BASE 0x4E75000ull
+#define SFT3_HBW_RTR_IF1_RTR_H3_MAX_OFFSET 0x1000
+#define SFT3_HBW_RTR_IF1_RTR_H3_SECTION 0xE800
+#define mmSFT3_HBW_RTR_IF1_RTR_H3_SPECIAL_BASE 0x4E75E80ull
+#define SFT3_HBW_RTR_IF1_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_HBW_RTR_IF1_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE 0x4E76000ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_BASE 0x4E76200ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT3_HBW_RTR_IF1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_BASE 0x4E76400ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_BASE 0x4E76600ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT3_HBW_RTR_IF1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_BASE 0x4E76800ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT3_HBW_RTR_IF1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_AXUSER_BASE 0x4E76A80ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT3_HBW_RTR_IF1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_DBG_HBW_BASE 0x4E76B00ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT3_HBW_RTR_IF1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_DBG_LBW_BASE 0x4E76B80ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT3_HBW_RTR_IF1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_CORE_HBW_BASE 0x4E76C00ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT3_HBW_RTR_IF1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_CORE_LBW_BASE 0x4E76D80ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT3_HBW_RTR_IF1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT3_HBW_RTR_IF1_MSTR_IF_SPECIAL_BASE 0x4E76E80ull
+#define SFT3_HBW_RTR_IF1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_HBW_RTR_IF1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT3_HBW_RTR_IF1_ADDR_DEC_HBW_BASE 0x4E77000ull
+#define SFT3_HBW_RTR_IF1_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT3_HBW_RTR_IF1_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT3_HBW_RTR_IF1_ADDR_DEC_LBW_BASE 0x4E77400ull
+#define SFT3_HBW_RTR_IF1_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT3_HBW_RTR_IF1_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT3_HBW_RTR_IF1_ADDR_DEC_SPECIAL_BASE 0x4E77E80ull
+#define SFT3_HBW_RTR_IF1_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_HBW_RTR_IF1_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT3_LBW_RTR_IF_RTR_CTRL_BASE 0x4E78000ull
+#define SFT3_LBW_RTR_IF_RTR_CTRL_MAX_OFFSET 0x1000
+#define SFT3_LBW_RTR_IF_RTR_CTRL_SECTION 0xE800
+#define mmSFT3_LBW_RTR_IF_RTR_CTRL_SPECIAL_BASE 0x4E78E80ull
+#define SFT3_LBW_RTR_IF_RTR_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_LBW_RTR_IF_RTR_CTRL_SPECIAL_SECTION 0x1800
+#define mmSFT3_LBW_RTR_IF_RTR_H3_BASE 0x4E79000ull
+#define SFT3_LBW_RTR_IF_RTR_H3_MAX_OFFSET 0x1000
+#define SFT3_LBW_RTR_IF_RTR_H3_SECTION 0xE800
+#define mmSFT3_LBW_RTR_IF_RTR_H3_SPECIAL_BASE 0x4E79E80ull
+#define SFT3_LBW_RTR_IF_RTR_H3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_LBW_RTR_IF_RTR_H3_SPECIAL_SECTION 0x1800
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE 0x4E7A000ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define SFT3_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_BASE 0x4E7A200ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define SFT3_LBW_RTR_IF_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_BASE 0x4E7A400ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define SFT3_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_BASE 0x4E7A600ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define SFT3_LBW_RTR_IF_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_E2E_CRDT_BASE 0x4E7A800ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define SFT3_LBW_RTR_IF_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_AXUSER_BASE 0x4E7AA80ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define SFT3_LBW_RTR_IF_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_DBG_HBW_BASE 0x4E7AB00ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define SFT3_LBW_RTR_IF_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_DBG_LBW_BASE 0x4E7AB80ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define SFT3_LBW_RTR_IF_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_CORE_HBW_BASE 0x4E7AC00ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define SFT3_LBW_RTR_IF_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_CORE_LBW_BASE 0x4E7AD80ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define SFT3_LBW_RTR_IF_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmSFT3_LBW_RTR_IF_MSTR_IF_SPECIAL_BASE 0x4E7AE80ull
+#define SFT3_LBW_RTR_IF_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_LBW_RTR_IF_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmSFT3_LBW_RTR_IF_ADDR_DEC_HBW_BASE 0x4E7B000ull
+#define SFT3_LBW_RTR_IF_ADDR_DEC_HBW_MAX_OFFSET 0x4000
+#define SFT3_LBW_RTR_IF_ADDR_DEC_HBW_SECTION 0x4000
+#define mmSFT3_LBW_RTR_IF_ADDR_DEC_LBW_BASE 0x4E7B400ull
+#define SFT3_LBW_RTR_IF_ADDR_DEC_LBW_MAX_OFFSET 0xA600
+#define SFT3_LBW_RTR_IF_ADDR_DEC_LBW_SECTION 0xA800
+#define mmSFT3_LBW_RTR_IF_ADDR_DEC_SPECIAL_BASE 0x4E7BE80ull
+#define SFT3_LBW_RTR_IF_ADDR_DEC_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_LBW_RTR_IF_ADDR_DEC_SPECIAL_SECTION 0x1800
+#define mmSFT3_BASE 0x4E7C000ull
+#define SFT3_MAX_OFFSET 0x1000
+#define SFT3_SECTION 0xE800
+#define mmSFT3_SPECIAL_BASE 0x4E7CE80ull
+#define SFT3_SPECIAL_MAX_OFFSET 0x1800
+#define SFT3_SPECIAL_SECTION 0x4180
+#define mmARC_FARM_FARM_BASE 0x4E81000ull
+#define ARC_FARM_FARM_MAX_OFFSET 0x1000
+#define ARC_FARM_FARM_SECTION 0xE800
+#define mmARC_FARM_FARM_SPECIAL_BASE 0x4E81E80ull
+#define ARC_FARM_FARM_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_FARM_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_FARM_MSTR_IF_RR_SHRD_HBW_BASE 0x4E82000ull
+#define ARC_FARM_FARM_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define ARC_FARM_FARM_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmARC_FARM_FARM_MSTR_IF_RR_PRVT_HBW_BASE 0x4E82200ull
+#define ARC_FARM_FARM_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define ARC_FARM_FARM_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmARC_FARM_FARM_MSTR_IF_RR_SHRD_LBW_BASE 0x4E82400ull
+#define ARC_FARM_FARM_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define ARC_FARM_FARM_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmARC_FARM_FARM_MSTR_IF_RR_PRVT_LBW_BASE 0x4E82600ull
+#define ARC_FARM_FARM_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define ARC_FARM_FARM_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmARC_FARM_FARM_MSTR_IF_E2E_CRDT_BASE 0x4E82800ull
+#define ARC_FARM_FARM_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define ARC_FARM_FARM_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmARC_FARM_FARM_MSTR_IF_AXUSER_BASE 0x4E82A80ull
+#define ARC_FARM_FARM_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define ARC_FARM_FARM_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmARC_FARM_FARM_MSTR_IF_DBG_HBW_BASE 0x4E82B00ull
+#define ARC_FARM_FARM_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define ARC_FARM_FARM_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmARC_FARM_FARM_MSTR_IF_DBG_LBW_BASE 0x4E82B80ull
+#define ARC_FARM_FARM_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define ARC_FARM_FARM_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmARC_FARM_FARM_MSTR_IF_CORE_HBW_BASE 0x4E82C00ull
+#define ARC_FARM_FARM_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define ARC_FARM_FARM_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmARC_FARM_FARM_MSTR_IF_CORE_LBW_BASE 0x4E82D80ull
+#define ARC_FARM_FARM_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define ARC_FARM_FARM_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmARC_FARM_FARM_MSTR_IF_SPECIAL_BASE 0x4E82E80ull
+#define ARC_FARM_FARM_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_FARM_MSTR_IF_SPECIAL_SECTION 0x5180
+#define mmARC_FARM_ARC0_AUX_BASE 0x4E88000ull
+#define ARC_FARM_ARC0_AUX_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC0_AUX_SECTION 0xE800
+#define mmARC_FARM_ARC0_AUX_SPECIAL_BASE 0x4E88E80ull
+#define ARC_FARM_ARC0_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC0_AUX_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_ARC0_DUP_ENG_BASE 0x4E89000ull
+#define ARC_FARM_ARC0_DUP_ENG_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC0_DUP_ENG_SECTION 0x9000
+#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_BASE 0x4E89900ull
+#define ARC_FARM_ARC0_DUP_ENG_AXUSER_MAX_OFFSET 0x5000
+#define ARC_FARM_ARC0_DUP_ENG_AXUSER_SECTION 0x5800
+#define mmARC_FARM_ARC0_DUP_ENG_SPECIAL_BASE 0x4E89E80ull
+#define ARC_FARM_ARC0_DUP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC0_DUP_ENG_SPECIAL_SECTION 0x1180
+#define mmARC_FARM_KDMA_BASE 0x4E8B000ull
+#define ARC_FARM_KDMA_MAX_OFFSET 0x1000
+#define ARC_FARM_KDMA_SECTION 0x8000
+#define mmARC_FARM_KDMA_CTX_AXUSER_BASE 0x4E8B800ull
+#define ARC_FARM_KDMA_CTX_AXUSER_MAX_OFFSET 0x5000
+#define ARC_FARM_KDMA_CTX_AXUSER_SECTION 0x6000
+#define mmARC_FARM_KDMA_CTX_BASE 0x4E8B860ull
+#define ARC_FARM_KDMA_CTX_MAX_OFFSET 0x9000
+#define ARC_FARM_KDMA_CTX_SECTION 0x5A00
+#define mmARC_FARM_KDMA_KDMA_CGM_BASE 0x4E8BE00ull
+#define ARC_FARM_KDMA_KDMA_CGM_MAX_OFFSET 0xC000
+#define ARC_FARM_KDMA_KDMA_CGM_SECTION 0x8000
+#define mmARC_FARM_KDMA_SPECIAL_BASE 0x4E8BE80ull
+#define ARC_FARM_KDMA_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_KDMA_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_KDMA_MSTR_IF_RR_SHRD_HBW_BASE 0x4E8C000ull
+#define ARC_FARM_KDMA_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define ARC_FARM_KDMA_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmARC_FARM_KDMA_MSTR_IF_RR_PRVT_HBW_BASE 0x4E8C200ull
+#define ARC_FARM_KDMA_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define ARC_FARM_KDMA_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmARC_FARM_KDMA_MSTR_IF_RR_SHRD_LBW_BASE 0x4E8C400ull
+#define ARC_FARM_KDMA_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define ARC_FARM_KDMA_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmARC_FARM_KDMA_MSTR_IF_RR_PRVT_LBW_BASE 0x4E8C600ull
+#define ARC_FARM_KDMA_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define ARC_FARM_KDMA_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmARC_FARM_KDMA_MSTR_IF_E2E_CRDT_BASE 0x4E8C800ull
+#define ARC_FARM_KDMA_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define ARC_FARM_KDMA_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmARC_FARM_KDMA_MSTR_IF_AXUSER_BASE 0x4E8CA80ull
+#define ARC_FARM_KDMA_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define ARC_FARM_KDMA_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmARC_FARM_KDMA_MSTR_IF_DBG_HBW_BASE 0x4E8CB00ull
+#define ARC_FARM_KDMA_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define ARC_FARM_KDMA_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmARC_FARM_KDMA_MSTR_IF_DBG_LBW_BASE 0x4E8CB80ull
+#define ARC_FARM_KDMA_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define ARC_FARM_KDMA_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmARC_FARM_KDMA_MSTR_IF_CORE_HBW_BASE 0x4E8CC00ull
+#define ARC_FARM_KDMA_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define ARC_FARM_KDMA_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmARC_FARM_KDMA_MSTR_IF_CORE_LBW_BASE 0x4E8CD80ull
+#define ARC_FARM_KDMA_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define ARC_FARM_KDMA_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmARC_FARM_KDMA_MSTR_IF_SPECIAL_BASE 0x4E8CE80ull
+#define ARC_FARM_KDMA_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_KDMA_MSTR_IF_SPECIAL_SECTION 0x2180
+#define mmARC_FARM_ARC0_ACP_ENG_BASE 0x4E8F000ull
+#define ARC_FARM_ARC0_ACP_ENG_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC0_ACP_ENG_SECTION 0xE800
+#define mmARC_FARM_ARC0_ACP_ENG_SPECIAL_BASE 0x4E8FE80ull
+#define ARC_FARM_ARC0_ACP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC0_ACP_ENG_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_ARC0_DCCM0_BASE 0x4E90000ull
+#define ARC_FARM_ARC0_DCCM0_MAX_OFFSET 0x4000
+#define ARC_FARM_ARC0_DCCM0_SECTION 0x8000
+#define mmARC_FARM_ARC0_DCCM1_BASE 0x4E98000ull
+#define ARC_FARM_ARC0_DCCM1_MAX_OFFSET 0x4000
+#define ARC_FARM_ARC0_DCCM1_SECTION 0x10000
+#define mmARC_FARM_ARC1_AUX_BASE 0x4EA8000ull
+#define ARC_FARM_ARC1_AUX_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC1_AUX_SECTION 0xE800
+#define mmARC_FARM_ARC1_AUX_SPECIAL_BASE 0x4EA8E80ull
+#define ARC_FARM_ARC1_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC1_AUX_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_ARC1_DUP_ENG_BASE 0x4EA9000ull
+#define ARC_FARM_ARC1_DUP_ENG_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC1_DUP_ENG_SECTION 0x9000
+#define mmARC_FARM_ARC1_DUP_ENG_AXUSER_BASE 0x4EA9900ull
+#define ARC_FARM_ARC1_DUP_ENG_AXUSER_MAX_OFFSET 0x5000
+#define ARC_FARM_ARC1_DUP_ENG_AXUSER_SECTION 0x5800
+#define mmARC_FARM_ARC1_DUP_ENG_SPECIAL_BASE 0x4EA9E80ull
+#define ARC_FARM_ARC1_DUP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC1_DUP_ENG_SPECIAL_SECTION 0x5180
+#define mmARC_FARM_ARC1_ACP_ENG_BASE 0x4EAF000ull
+#define ARC_FARM_ARC1_ACP_ENG_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC1_ACP_ENG_SECTION 0xE800
+#define mmARC_FARM_ARC1_ACP_ENG_SPECIAL_BASE 0x4EAFE80ull
+#define ARC_FARM_ARC1_ACP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC1_ACP_ENG_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_ARC1_DCCM0_BASE 0x4EB0000ull
+#define ARC_FARM_ARC1_DCCM0_MAX_OFFSET 0x4000
+#define ARC_FARM_ARC1_DCCM0_SECTION 0x8000
+#define mmARC_FARM_ARC1_DCCM1_BASE 0x4EB8000ull
+#define ARC_FARM_ARC1_DCCM1_MAX_OFFSET 0x4000
+#define ARC_FARM_ARC1_DCCM1_SECTION 0x10000
+#define mmARC_FARM_ARC2_AUX_BASE 0x4EC8000ull
+#define ARC_FARM_ARC2_AUX_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC2_AUX_SECTION 0xE800
+#define mmARC_FARM_ARC2_AUX_SPECIAL_BASE 0x4EC8E80ull
+#define ARC_FARM_ARC2_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC2_AUX_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_ARC2_DUP_ENG_BASE 0x4EC9000ull
+#define ARC_FARM_ARC2_DUP_ENG_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC2_DUP_ENG_SECTION 0x9000
+#define mmARC_FARM_ARC2_DUP_ENG_AXUSER_BASE 0x4EC9900ull
+#define ARC_FARM_ARC2_DUP_ENG_AXUSER_MAX_OFFSET 0x5000
+#define ARC_FARM_ARC2_DUP_ENG_AXUSER_SECTION 0x5800
+#define mmARC_FARM_ARC2_DUP_ENG_SPECIAL_BASE 0x4EC9E80ull
+#define ARC_FARM_ARC2_DUP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC2_DUP_ENG_SPECIAL_SECTION 0x5180
+#define mmARC_FARM_ARC2_ACP_ENG_BASE 0x4ECF000ull
+#define ARC_FARM_ARC2_ACP_ENG_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC2_ACP_ENG_SECTION 0xE800
+#define mmARC_FARM_ARC2_ACP_ENG_SPECIAL_BASE 0x4ECFE80ull
+#define ARC_FARM_ARC2_ACP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC2_ACP_ENG_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_ARC2_DCCM0_BASE 0x4ED0000ull
+#define ARC_FARM_ARC2_DCCM0_MAX_OFFSET 0x4000
+#define ARC_FARM_ARC2_DCCM0_SECTION 0x8000
+#define mmARC_FARM_ARC2_DCCM1_BASE 0x4ED8000ull
+#define ARC_FARM_ARC2_DCCM1_MAX_OFFSET 0x4000
+#define ARC_FARM_ARC2_DCCM1_SECTION 0x10000
+#define mmARC_FARM_ARC3_AUX_BASE 0x4EE8000ull
+#define ARC_FARM_ARC3_AUX_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC3_AUX_SECTION 0xE800
+#define mmARC_FARM_ARC3_AUX_SPECIAL_BASE 0x4EE8E80ull
+#define ARC_FARM_ARC3_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC3_AUX_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_ARC3_DUP_ENG_BASE 0x4EE9000ull
+#define ARC_FARM_ARC3_DUP_ENG_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC3_DUP_ENG_SECTION 0x9000
+#define mmARC_FARM_ARC3_DUP_ENG_AXUSER_BASE 0x4EE9900ull
+#define ARC_FARM_ARC3_DUP_ENG_AXUSER_MAX_OFFSET 0x5000
+#define ARC_FARM_ARC3_DUP_ENG_AXUSER_SECTION 0x5800
+#define mmARC_FARM_ARC3_DUP_ENG_SPECIAL_BASE 0x4EE9E80ull
+#define ARC_FARM_ARC3_DUP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC3_DUP_ENG_SPECIAL_SECTION 0x5180
+#define mmARC_FARM_ARC3_ACP_ENG_BASE 0x4EEF000ull
+#define ARC_FARM_ARC3_ACP_ENG_MAX_OFFSET 0x1000
+#define ARC_FARM_ARC3_ACP_ENG_SECTION 0xE800
+#define mmARC_FARM_ARC3_ACP_ENG_SPECIAL_BASE 0x4EEFE80ull
+#define ARC_FARM_ARC3_ACP_ENG_SPECIAL_MAX_OFFSET 0x1800
+#define ARC_FARM_ARC3_ACP_ENG_SPECIAL_SECTION 0x1800
+#define mmARC_FARM_ARC3_DCCM0_BASE 0x4EF0000ull
+#define ARC_FARM_ARC3_DCCM0_MAX_OFFSET 0x4000
+#define ARC_FARM_ARC3_DCCM0_SECTION 0x8000
+#define mmARC_FARM_ARC3_DCCM1_BASE 0x4EF8000ull
+#define ARC_FARM_ARC3_DCCM1_MAX_OFFSET 0x4000
+#define ARC_FARM_ARC3_DCCM1_SECTION 0x8000
+#define mmPCIE_DEC0_CMD_BASE 0x4F00000ull
+#define PCIE_DEC0_CMD_MAX_OFFSET 0x1100
+#define PCIE_DEC0_CMD_SECTION 0x1000
+#define mmPCIE_DEC0_VSI_BASE 0x4F01000ull
+#define PCIE_DEC0_VSI_MAX_OFFSET 0x6FC0
+#define PCIE_DEC0_VSI_SECTION 0x1000
+#define mmPCIE_DEC0_L2C_BASE 0x4F02000ull
+#define PCIE_DEC0_L2C_MAX_OFFSET 0x39C0
+#define PCIE_DEC0_L2C_SECTION 0x1000
+#define mmPCIE_VDEC0_BRDG_CTRL_BASE 0x4F03000ull
+#define PCIE_VDEC0_BRDG_CTRL_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_BRDG_CTRL_SECTION 0x8000
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x4F03800ull
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x4F03900ull
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x4F03A00ull
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x4F03B00ull
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_BASE 0x4F03C00ull
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define PCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmPCIE_VDEC0_BRDG_CTRL_SPECIAL_BASE 0x4F03E80ull
+#define PCIE_VDEC0_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_VDEC0_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmPCIE_VDEC0_CTRL_BASE 0x4F04000ull
+#define PCIE_VDEC0_CTRL_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_CTRL_SECTION 0xE800
+#define mmPCIE_VDEC0_CTRL_SPECIAL_BASE 0x4F04E80ull
+#define PCIE_VDEC0_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_VDEC0_CTRL_SPECIAL_SECTION 0x1800
+#define mmPCIE_VDEC0_MSTR_IF_RR_SHRD_HBW_BASE 0x4F05000ull
+#define PCIE_VDEC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PCIE_VDEC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPCIE_VDEC0_MSTR_IF_RR_PRVT_HBW_BASE 0x4F05200ull
+#define PCIE_VDEC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PCIE_VDEC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPCIE_VDEC0_MSTR_IF_RR_SHRD_LBW_BASE 0x4F05400ull
+#define PCIE_VDEC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PCIE_VDEC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPCIE_VDEC0_MSTR_IF_RR_PRVT_LBW_BASE 0x4F05600ull
+#define PCIE_VDEC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PCIE_VDEC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPCIE_VDEC0_MSTR_IF_E2E_CRDT_BASE 0x4F05800ull
+#define PCIE_VDEC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PCIE_VDEC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPCIE_VDEC0_MSTR_IF_AXUSER_BASE 0x4F05A80ull
+#define PCIE_VDEC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PCIE_VDEC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPCIE_VDEC0_MSTR_IF_DBG_HBW_BASE 0x4F05B00ull
+#define PCIE_VDEC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PCIE_VDEC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPCIE_VDEC0_MSTR_IF_DBG_LBW_BASE 0x4F05B80ull
+#define PCIE_VDEC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PCIE_VDEC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPCIE_VDEC0_MSTR_IF_CORE_HBW_BASE 0x4F05C00ull
+#define PCIE_VDEC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PCIE_VDEC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPCIE_VDEC0_MSTR_IF_CORE_LBW_BASE 0x4F05D80ull
+#define PCIE_VDEC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PCIE_VDEC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPCIE_VDEC0_MSTR_IF_SPECIAL_BASE 0x4F05E80ull
+#define PCIE_VDEC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_VDEC0_MSTR_IF_SPECIAL_SECTION 0xA180
+#define mmPCIE_DEC1_CMD_BASE 0x4F10000ull
+#define PCIE_DEC1_CMD_MAX_OFFSET 0x1100
+#define PCIE_DEC1_CMD_SECTION 0x1000
+#define mmPCIE_DEC1_VSI_BASE 0x4F11000ull
+#define PCIE_DEC1_VSI_MAX_OFFSET 0x6FC0
+#define PCIE_DEC1_VSI_SECTION 0x1000
+#define mmPCIE_DEC1_L2C_BASE 0x4F12000ull
+#define PCIE_DEC1_L2C_MAX_OFFSET 0x39C0
+#define PCIE_DEC1_L2C_SECTION 0x1000
+#define mmPCIE_VDEC1_BRDG_CTRL_BASE 0x4F13000ull
+#define PCIE_VDEC1_BRDG_CTRL_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_BRDG_CTRL_SECTION 0x8000
+#define mmPCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_BASE 0x4F13800ull
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_MAX_OFFSET 0x5000
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_VCD_SECTION 0x1000
+#define mmPCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_BASE 0x4F13900ull
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_MAX_OFFSET 0x5000
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_L2C_SECTION 0x1000
+#define mmPCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_BASE 0x4F13A00ull
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_MAX_OFFSET 0x5000
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_NRM_SECTION 0x1000
+#define mmPCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_BASE 0x4F13B00ull
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_MAX_OFFSET 0x5000
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_MSIX_ABNRM_SECTION 0x1000
+#define mmPCIE_VDEC1_BRDG_CTRL_AXUSER_DEC_BASE 0x4F13C00ull
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_DEC_MAX_OFFSET 0x5000
+#define PCIE_VDEC1_BRDG_CTRL_AXUSER_DEC_SECTION 0x2800
+#define mmPCIE_VDEC1_BRDG_CTRL_SPECIAL_BASE 0x4F13E80ull
+#define PCIE_VDEC1_BRDG_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_VDEC1_BRDG_CTRL_SPECIAL_SECTION 0x1800
+#define mmPCIE_VDEC1_CTRL_BASE 0x4F14000ull
+#define PCIE_VDEC1_CTRL_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_CTRL_SECTION 0xE800
+#define mmPCIE_VDEC1_CTRL_SPECIAL_BASE 0x4F14E80ull
+#define PCIE_VDEC1_CTRL_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_VDEC1_CTRL_SPECIAL_SECTION 0x1800
+#define mmPCIE_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE 0x4F15000ull
+#define PCIE_VDEC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define PCIE_VDEC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmPCIE_VDEC1_MSTR_IF_RR_PRVT_HBW_BASE 0x4F15200ull
+#define PCIE_VDEC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define PCIE_VDEC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmPCIE_VDEC1_MSTR_IF_RR_SHRD_LBW_BASE 0x4F15400ull
+#define PCIE_VDEC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define PCIE_VDEC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmPCIE_VDEC1_MSTR_IF_RR_PRVT_LBW_BASE 0x4F15600ull
+#define PCIE_VDEC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define PCIE_VDEC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmPCIE_VDEC1_MSTR_IF_E2E_CRDT_BASE 0x4F15800ull
+#define PCIE_VDEC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define PCIE_VDEC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmPCIE_VDEC1_MSTR_IF_AXUSER_BASE 0x4F15A80ull
+#define PCIE_VDEC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define PCIE_VDEC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmPCIE_VDEC1_MSTR_IF_DBG_HBW_BASE 0x4F15B00ull
+#define PCIE_VDEC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define PCIE_VDEC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmPCIE_VDEC1_MSTR_IF_DBG_LBW_BASE 0x4F15B80ull
+#define PCIE_VDEC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define PCIE_VDEC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmPCIE_VDEC1_MSTR_IF_CORE_HBW_BASE 0x4F15C00ull
+#define PCIE_VDEC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define PCIE_VDEC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmPCIE_VDEC1_MSTR_IF_CORE_LBW_BASE 0x4F15D80ull
+#define PCIE_VDEC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define PCIE_VDEC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmPCIE_VDEC1_MSTR_IF_SPECIAL_BASE 0x4F15E80ull
+#define PCIE_VDEC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define PCIE_VDEC1_MSTR_IF_SPECIAL_SECTION 0x2A180
+#define mmDCORE0_XFT_BASE 0x4F40000ull
+#define DCORE0_XFT_MAX_OFFSET 0x1000
+#define DCORE0_XFT_SECTION 0xE800
+#define mmDCORE0_XFT_SPECIAL_BASE 0x4F40E80ull
+#define DCORE0_XFT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_XFT_SPECIAL_SECTION 0x1800
+#define mmDCORE0_HBM_PLL_CTRL_BASE 0x4F41000ull
+#define DCORE0_HBM_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE0_HBM_PLL_CTRL_SECTION 0x3600
+#define mmDCORE0_HBM_PLL_ASIF_SLV_BASE 0x4F41360ull
+#define DCORE0_HBM_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE0_HBM_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE0_HBM_PLL_DIV_0_RLX_BASE 0x4F41400ull
+#define DCORE0_HBM_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE0_HBM_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE0_HBM_PLL_DIV_1_RLX_BASE 0x4F41800ull
+#define DCORE0_HBM_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE0_HBM_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE0_HBM_PLL_DIV_2_RLX_BASE 0x4F41A00ull
+#define DCORE0_HBM_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE0_HBM_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE0_HBM_PLL_DIV_3_RLX_BASE 0x4F41C00ull
+#define DCORE0_HBM_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE0_HBM_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE0_HBM_PLL_SPECIAL_BASE 0x4F41E80ull
+#define DCORE0_HBM_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_HBM_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_TPC_PLL_CTRL_BASE 0x4F42000ull
+#define DCORE0_TPC_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE0_TPC_PLL_CTRL_SECTION 0x3600
+#define mmDCORE0_TPC_PLL_ASIF_SLV_BASE 0x4F42360ull
+#define DCORE0_TPC_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE0_TPC_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE0_TPC_PLL_DIV_0_RLX_BASE 0x4F42400ull
+#define DCORE0_TPC_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE0_TPC_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE0_TPC_PLL_DIV_1_RLX_BASE 0x4F42800ull
+#define DCORE0_TPC_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE0_TPC_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE0_TPC_PLL_DIV_2_RLX_BASE 0x4F42A00ull
+#define DCORE0_TPC_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE0_TPC_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE0_TPC_PLL_DIV_3_RLX_BASE 0x4F42C00ull
+#define DCORE0_TPC_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE0_TPC_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE0_TPC_PLL_SPECIAL_BASE 0x4F42E80ull
+#define DCORE0_TPC_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_TPC_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE0_PCI_PLL_CTRL_BASE 0x4F43000ull
+#define DCORE0_PCI_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE0_PCI_PLL_CTRL_SECTION 0x3600
+#define mmDCORE0_PCI_PLL_ASIF_SLV_BASE 0x4F43360ull
+#define DCORE0_PCI_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE0_PCI_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE0_PCI_PLL_DIV_0_RLX_BASE 0x4F43400ull
+#define DCORE0_PCI_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE0_PCI_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE0_PCI_PLL_DIV_1_RLX_BASE 0x4F43800ull
+#define DCORE0_PCI_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE0_PCI_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE0_PCI_PLL_DIV_2_RLX_BASE 0x4F43A00ull
+#define DCORE0_PCI_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE0_PCI_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE0_PCI_PLL_DIV_3_RLX_BASE 0x4F43C00ull
+#define DCORE0_PCI_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE0_PCI_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE0_PCI_PLL_SPECIAL_BASE 0x4F43E80ull
+#define DCORE0_PCI_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE0_PCI_PLL_SPECIAL_SECTION 0x1180
+#define mmDCORE0_TSTDVS_BASE 0x4F45000ull
+#define DCORE0_TSTDVS_MAX_OFFSET 0x7800
+#define DCORE0_TSTDVS_SECTION 0x1000
+#define mmDCORE0_TS_WRAP_BASE 0x4F46000ull
+#define DCORE0_TS_WRAP_MAX_OFFSET 0x2380
+#define DCORE0_TS_WRAP_SECTION 0x2000
+#define mmDCORE0_TS_WRAP_ASIF_SLV_BASE 0x4F46200ull
+#define DCORE0_TS_WRAP_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE0_TS_WRAP_ASIF_SLV_SECTION 0x9E00
+#define mmDCORE1_XFT_BASE 0x4F50000ull
+#define DCORE1_XFT_MAX_OFFSET 0x1000
+#define DCORE1_XFT_SECTION 0xE800
+#define mmDCORE1_XFT_SPECIAL_BASE 0x4F50E80ull
+#define DCORE1_XFT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_XFT_SPECIAL_SECTION 0x1800
+#define mmDCORE1_HBM_PLL_CTRL_BASE 0x4F51000ull
+#define DCORE1_HBM_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE1_HBM_PLL_CTRL_SECTION 0x3600
+#define mmDCORE1_HBM_PLL_ASIF_SLV_BASE 0x4F51360ull
+#define DCORE1_HBM_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE1_HBM_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE1_HBM_PLL_DIV_0_RLX_BASE 0x4F51400ull
+#define DCORE1_HBM_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE1_HBM_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE1_HBM_PLL_DIV_1_RLX_BASE 0x4F51800ull
+#define DCORE1_HBM_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE1_HBM_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE1_HBM_PLL_DIV_2_RLX_BASE 0x4F51A00ull
+#define DCORE1_HBM_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE1_HBM_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE1_HBM_PLL_DIV_3_RLX_BASE 0x4F51C00ull
+#define DCORE1_HBM_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE1_HBM_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE1_HBM_PLL_SPECIAL_BASE 0x4F51E80ull
+#define DCORE1_HBM_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_HBM_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_TPC_PLL_CTRL_BASE 0x4F52000ull
+#define DCORE1_TPC_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE1_TPC_PLL_CTRL_SECTION 0x3600
+#define mmDCORE1_TPC_PLL_ASIF_SLV_BASE 0x4F52360ull
+#define DCORE1_TPC_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE1_TPC_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE1_TPC_PLL_DIV_0_RLX_BASE 0x4F52400ull
+#define DCORE1_TPC_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE1_TPC_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE1_TPC_PLL_DIV_1_RLX_BASE 0x4F52800ull
+#define DCORE1_TPC_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE1_TPC_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE1_TPC_PLL_DIV_2_RLX_BASE 0x4F52A00ull
+#define DCORE1_TPC_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE1_TPC_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE1_TPC_PLL_DIV_3_RLX_BASE 0x4F52C00ull
+#define DCORE1_TPC_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE1_TPC_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE1_TPC_PLL_SPECIAL_BASE 0x4F52E80ull
+#define DCORE1_TPC_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_TPC_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE1_NIC_PLL_CTRL_BASE 0x4F53000ull
+#define DCORE1_NIC_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE1_NIC_PLL_CTRL_SECTION 0x3600
+#define mmDCORE1_NIC_PLL_ASIF_SLV_BASE 0x4F53360ull
+#define DCORE1_NIC_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE1_NIC_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE1_NIC_PLL_DIV_0_RLX_BASE 0x4F53400ull
+#define DCORE1_NIC_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE1_NIC_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE1_NIC_PLL_DIV_1_RLX_BASE 0x4F53800ull
+#define DCORE1_NIC_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE1_NIC_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE1_NIC_PLL_DIV_2_RLX_BASE 0x4F53A00ull
+#define DCORE1_NIC_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE1_NIC_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE1_NIC_PLL_DIV_3_RLX_BASE 0x4F53C00ull
+#define DCORE1_NIC_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE1_NIC_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE1_NIC_PLL_SPECIAL_BASE 0x4F53E80ull
+#define DCORE1_NIC_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE1_NIC_PLL_SPECIAL_SECTION 0x1180
+#define mmDCORE1_TSTDVS_BASE 0x4F55000ull
+#define DCORE1_TSTDVS_MAX_OFFSET 0x7800
+#define DCORE1_TSTDVS_SECTION 0x1000
+#define mmDCORE1_TS_WRAP_BASE 0x4F56000ull
+#define DCORE1_TS_WRAP_MAX_OFFSET 0x2380
+#define DCORE1_TS_WRAP_SECTION 0x2000
+#define mmDCORE1_TS_WRAP_ASIF_SLV_BASE 0x4F56200ull
+#define DCORE1_TS_WRAP_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE1_TS_WRAP_ASIF_SLV_SECTION 0x9E00
+#define mmDCORE2_XFT_BASE 0x4F60000ull
+#define DCORE2_XFT_MAX_OFFSET 0x1000
+#define DCORE2_XFT_SECTION 0xE800
+#define mmDCORE2_XFT_SPECIAL_BASE 0x4F60E80ull
+#define DCORE2_XFT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_XFT_SPECIAL_SECTION 0x1800
+#define mmDCORE2_HBM_PLL_CTRL_BASE 0x4F61000ull
+#define DCORE2_HBM_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE2_HBM_PLL_CTRL_SECTION 0x3600
+#define mmDCORE2_HBM_PLL_ASIF_SLV_BASE 0x4F61360ull
+#define DCORE2_HBM_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE2_HBM_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE2_HBM_PLL_DIV_0_RLX_BASE 0x4F61400ull
+#define DCORE2_HBM_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE2_HBM_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE2_HBM_PLL_DIV_1_RLX_BASE 0x4F61800ull
+#define DCORE2_HBM_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE2_HBM_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE2_HBM_PLL_DIV_2_RLX_BASE 0x4F61A00ull
+#define DCORE2_HBM_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE2_HBM_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE2_HBM_PLL_DIV_3_RLX_BASE 0x4F61C00ull
+#define DCORE2_HBM_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE2_HBM_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE2_HBM_PLL_SPECIAL_BASE 0x4F61E80ull
+#define DCORE2_HBM_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_HBM_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE2_TPC_PLL_CTRL_BASE 0x4F62000ull
+#define DCORE2_TPC_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE2_TPC_PLL_CTRL_SECTION 0x3600
+#define mmDCORE2_TPC_PLL_ASIF_SLV_BASE 0x4F62360ull
+#define DCORE2_TPC_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE2_TPC_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE2_TPC_PLL_DIV_0_RLX_BASE 0x4F62400ull
+#define DCORE2_TPC_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE2_TPC_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE2_TPC_PLL_DIV_1_RLX_BASE 0x4F62800ull
+#define DCORE2_TPC_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE2_TPC_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE2_TPC_PLL_DIV_2_RLX_BASE 0x4F62A00ull
+#define DCORE2_TPC_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE2_TPC_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE2_TPC_PLL_DIV_3_RLX_BASE 0x4F62C00ull
+#define DCORE2_TPC_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE2_TPC_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE2_TPC_PLL_SPECIAL_BASE 0x4F62E80ull
+#define DCORE2_TPC_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE2_TPC_PLL_SPECIAL_SECTION 0x2180
+#define mmDCORE2_TSTDVS_BASE 0x4F65000ull
+#define DCORE2_TSTDVS_MAX_OFFSET 0x7800
+#define DCORE2_TSTDVS_SECTION 0x1000
+#define mmDCORE2_TS_WRAP_BASE 0x4F66000ull
+#define DCORE2_TS_WRAP_MAX_OFFSET 0x2380
+#define DCORE2_TS_WRAP_SECTION 0x2000
+#define mmDCORE2_TS_WRAP_ASIF_SLV_BASE 0x4F66200ull
+#define DCORE2_TS_WRAP_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE2_TS_WRAP_ASIF_SLV_SECTION 0x9E00
+#define mmDCORE3_XFT_BASE 0x4F70000ull
+#define DCORE3_XFT_MAX_OFFSET 0x1000
+#define DCORE3_XFT_SECTION 0xE800
+#define mmDCORE3_XFT_SPECIAL_BASE 0x4F70E80ull
+#define DCORE3_XFT_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_XFT_SPECIAL_SECTION 0x1800
+#define mmDCORE3_HBM_PLL_CTRL_BASE 0x4F71000ull
+#define DCORE3_HBM_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE3_HBM_PLL_CTRL_SECTION 0x3600
+#define mmDCORE3_HBM_PLL_ASIF_SLV_BASE 0x4F71360ull
+#define DCORE3_HBM_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE3_HBM_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE3_HBM_PLL_DIV_0_RLX_BASE 0x4F71400ull
+#define DCORE3_HBM_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE3_HBM_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE3_HBM_PLL_DIV_1_RLX_BASE 0x4F71800ull
+#define DCORE3_HBM_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE3_HBM_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE3_HBM_PLL_DIV_2_RLX_BASE 0x4F71A00ull
+#define DCORE3_HBM_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE3_HBM_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE3_HBM_PLL_DIV_3_RLX_BASE 0x4F71C00ull
+#define DCORE3_HBM_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE3_HBM_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE3_HBM_PLL_SPECIAL_BASE 0x4F71E80ull
+#define DCORE3_HBM_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_HBM_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_TPC_PLL_CTRL_BASE 0x4F72000ull
+#define DCORE3_TPC_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE3_TPC_PLL_CTRL_SECTION 0x3600
+#define mmDCORE3_TPC_PLL_ASIF_SLV_BASE 0x4F72360ull
+#define DCORE3_TPC_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE3_TPC_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE3_TPC_PLL_DIV_0_RLX_BASE 0x4F72400ull
+#define DCORE3_TPC_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE3_TPC_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE3_TPC_PLL_DIV_1_RLX_BASE 0x4F72800ull
+#define DCORE3_TPC_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE3_TPC_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE3_TPC_PLL_DIV_2_RLX_BASE 0x4F72A00ull
+#define DCORE3_TPC_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE3_TPC_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE3_TPC_PLL_DIV_3_RLX_BASE 0x4F72C00ull
+#define DCORE3_TPC_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE3_TPC_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE3_TPC_PLL_SPECIAL_BASE 0x4F72E80ull
+#define DCORE3_TPC_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_TPC_PLL_SPECIAL_SECTION 0x1800
+#define mmDCORE3_NIC_PLL_CTRL_BASE 0x4F73000ull
+#define DCORE3_NIC_PLL_CTRL_MAX_OFFSET 0x3540
+#define DCORE3_NIC_PLL_CTRL_SECTION 0x3600
+#define mmDCORE3_NIC_PLL_ASIF_SLV_BASE 0x4F73360ull
+#define DCORE3_NIC_PLL_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE3_NIC_PLL_ASIF_SLV_SECTION 0xA000
+#define mmDCORE3_NIC_PLL_DIV_0_RLX_BASE 0x4F73400ull
+#define DCORE3_NIC_PLL_DIV_0_RLX_MAX_OFFSET 0x1800
+#define DCORE3_NIC_PLL_DIV_0_RLX_SECTION 0x4000
+#define mmDCORE3_NIC_PLL_DIV_1_RLX_BASE 0x4F73800ull
+#define DCORE3_NIC_PLL_DIV_1_RLX_MAX_OFFSET 0xC000
+#define DCORE3_NIC_PLL_DIV_1_RLX_SECTION 0x2000
+#define mmDCORE3_NIC_PLL_DIV_2_RLX_BASE 0x4F73A00ull
+#define DCORE3_NIC_PLL_DIV_2_RLX_MAX_OFFSET 0xC000
+#define DCORE3_NIC_PLL_DIV_2_RLX_SECTION 0x2000
+#define mmDCORE3_NIC_PLL_DIV_3_RLX_BASE 0x4F73C00ull
+#define DCORE3_NIC_PLL_DIV_3_RLX_MAX_OFFSET 0xC000
+#define DCORE3_NIC_PLL_DIV_3_RLX_SECTION 0x2800
+#define mmDCORE3_NIC_PLL_SPECIAL_BASE 0x4F73E80ull
+#define DCORE3_NIC_PLL_SPECIAL_MAX_OFFSET 0x1800
+#define DCORE3_NIC_PLL_SPECIAL_SECTION 0x1180
+#define mmDCORE3_TSTDVS_BASE 0x4F75000ull
+#define DCORE3_TSTDVS_MAX_OFFSET 0x7800
+#define DCORE3_TSTDVS_SECTION 0x1000
+#define mmDCORE3_TS_WRAP_BASE 0x4F76000ull
+#define DCORE3_TS_WRAP_MAX_OFFSET 0x2380
+#define DCORE3_TS_WRAP_SECTION 0x2000
+#define mmDCORE3_TS_WRAP_ASIF_SLV_BASE 0x4F76200ull
+#define DCORE3_TS_WRAP_ASIF_SLV_MAX_OFFSET 0x3800
+#define DCORE3_TS_WRAP_ASIF_SLV_SECTION 0x9E00
+#define mmPCIE_PMA_2_BASE 0x4F80000ull
+#define PCIE_PMA_2_MAX_OFFSET 0x40000
+#define PCIE_PMA_2_SECTION 0x40000
+#define mmPCIE_PMA_3_BASE 0x4FC0000ull
+#define PCIE_PMA_3_MAX_OFFSET 0x40000
+#define PCIE_PMA_3_SECTION 0x40000
+#define mmHBM0_MC0_BASE 0x5000000ull
+#define HBM0_MC0_MAX_OFFSET 0x1000
+#define HBM0_MC0_SECTION 0xE800
+#define mmHBM0_MC0_SPECIAL_BASE 0x5000E80ull
+#define HBM0_MC0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC0BIST0_BASE 0x5001000ull
+#define HBM0_MC0BIST0_MAX_OFFSET 0x1000
+#define HBM0_MC0BIST0_SECTION 0xE800
+#define mmHBM0_MC0BIST0_SPECIAL_BASE 0x5001E80ull
+#define HBM0_MC0BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC0BIST1_BASE 0x5002000ull
+#define HBM0_MC0BIST1_MAX_OFFSET 0x1000
+#define HBM0_MC0BIST1_SECTION 0xE800
+#define mmHBM0_MC0BIST1_SPECIAL_BASE 0x5002E80ull
+#define HBM0_MC0BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC0BIST2_BASE 0x5003000ull
+#define HBM0_MC0BIST2_MAX_OFFSET 0x1000
+#define HBM0_MC0BIST2_SECTION 0xE800
+#define mmHBM0_MC0BIST2_SPECIAL_BASE 0x5003E80ull
+#define HBM0_MC0BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC0BIST3_BASE 0x5004000ull
+#define HBM0_MC0BIST3_MAX_OFFSET 0x1000
+#define HBM0_MC0BIST3_SECTION 0xE800
+#define mmHBM0_MC0BIST3_SPECIAL_BASE 0x5004E80ull
+#define HBM0_MC0BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC0BIST4_BASE 0x5005000ull
+#define HBM0_MC0BIST4_MAX_OFFSET 0x1000
+#define HBM0_MC0BIST4_SECTION 0xE800
+#define mmHBM0_MC0BIST4_SPECIAL_BASE 0x5005E80ull
+#define HBM0_MC0BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC0BIST5_BASE 0x5006000ull
+#define HBM0_MC0BIST5_MAX_OFFSET 0x1000
+#define HBM0_MC0BIST5_SECTION 0xE800
+#define mmHBM0_MC0BIST5_SPECIAL_BASE 0x5006E80ull
+#define HBM0_MC0BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC0BIST6_BASE 0x5007000ull
+#define HBM0_MC0BIST6_MAX_OFFSET 0x1000
+#define HBM0_MC0BIST6_SECTION 0xE800
+#define mmHBM0_MC0BIST6_SPECIAL_BASE 0x5007E80ull
+#define HBM0_MC0BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC0BIST7_BASE 0x5008000ull
+#define HBM0_MC0BIST7_MAX_OFFSET 0x1000
+#define HBM0_MC0BIST7_SECTION 0xE800
+#define mmHBM0_MC0BIST7_SPECIAL_BASE 0x5008E80ull
+#define HBM0_MC0BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC0BIST8_MEM_BASE 0x5009000ull
+#define HBM0_MC0BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM0_MC0BIST8_MEM_SECTION 0xE800
+#define mmHBM0_MC0BIST8_MEM_SPECIAL_BASE 0x5009E80ull
+#define HBM0_MC0BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC0BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM0_MC1_BASE 0x5020000ull
+#define HBM0_MC1_MAX_OFFSET 0x1000
+#define HBM0_MC1_SECTION 0xE800
+#define mmHBM0_MC1_SPECIAL_BASE 0x5020E80ull
+#define HBM0_MC1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC1BIST0_BASE 0x5021000ull
+#define HBM0_MC1BIST0_MAX_OFFSET 0x1000
+#define HBM0_MC1BIST0_SECTION 0xE800
+#define mmHBM0_MC1BIST0_SPECIAL_BASE 0x5021E80ull
+#define HBM0_MC1BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC1BIST1_BASE 0x5022000ull
+#define HBM0_MC1BIST1_MAX_OFFSET 0x1000
+#define HBM0_MC1BIST1_SECTION 0xE800
+#define mmHBM0_MC1BIST1_SPECIAL_BASE 0x5022E80ull
+#define HBM0_MC1BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC1BIST2_BASE 0x5023000ull
+#define HBM0_MC1BIST2_MAX_OFFSET 0x1000
+#define HBM0_MC1BIST2_SECTION 0xE800
+#define mmHBM0_MC1BIST2_SPECIAL_BASE 0x5023E80ull
+#define HBM0_MC1BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC1BIST3_BASE 0x5024000ull
+#define HBM0_MC1BIST3_MAX_OFFSET 0x1000
+#define HBM0_MC1BIST3_SECTION 0xE800
+#define mmHBM0_MC1BIST3_SPECIAL_BASE 0x5024E80ull
+#define HBM0_MC1BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC1BIST4_BASE 0x5025000ull
+#define HBM0_MC1BIST4_MAX_OFFSET 0x1000
+#define HBM0_MC1BIST4_SECTION 0xE800
+#define mmHBM0_MC1BIST4_SPECIAL_BASE 0x5025E80ull
+#define HBM0_MC1BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC1BIST5_BASE 0x5026000ull
+#define HBM0_MC1BIST5_MAX_OFFSET 0x1000
+#define HBM0_MC1BIST5_SECTION 0xE800
+#define mmHBM0_MC1BIST5_SPECIAL_BASE 0x5026E80ull
+#define HBM0_MC1BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC1BIST6_BASE 0x5027000ull
+#define HBM0_MC1BIST6_MAX_OFFSET 0x1000
+#define HBM0_MC1BIST6_SECTION 0xE800
+#define mmHBM0_MC1BIST6_SPECIAL_BASE 0x5027E80ull
+#define HBM0_MC1BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC1BIST7_BASE 0x5028000ull
+#define HBM0_MC1BIST7_MAX_OFFSET 0x1000
+#define HBM0_MC1BIST7_SECTION 0xE800
+#define mmHBM0_MC1BIST7_SPECIAL_BASE 0x5028E80ull
+#define HBM0_MC1BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM0_MC1BIST8_MEM_BASE 0x5029000ull
+#define HBM0_MC1BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM0_MC1BIST8_MEM_SECTION 0xE800
+#define mmHBM0_MC1BIST8_MEM_SPECIAL_BASE 0x5029E80ull
+#define HBM0_MC1BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM0_MC1BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM0_PHY_BASE 0x5040000ull
+#define HBM0_PHY_MAX_OFFSET 0x4000
+#define HBM0_PHY_SECTION 0x40000
+#define mmHBM1_MC0_BASE 0x5080000ull
+#define HBM1_MC0_MAX_OFFSET 0x1000
+#define HBM1_MC0_SECTION 0xE800
+#define mmHBM1_MC0_SPECIAL_BASE 0x5080E80ull
+#define HBM1_MC0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC0BIST0_BASE 0x5081000ull
+#define HBM1_MC0BIST0_MAX_OFFSET 0x1000
+#define HBM1_MC0BIST0_SECTION 0xE800
+#define mmHBM1_MC0BIST0_SPECIAL_BASE 0x5081E80ull
+#define HBM1_MC0BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC0BIST1_BASE 0x5082000ull
+#define HBM1_MC0BIST1_MAX_OFFSET 0x1000
+#define HBM1_MC0BIST1_SECTION 0xE800
+#define mmHBM1_MC0BIST1_SPECIAL_BASE 0x5082E80ull
+#define HBM1_MC0BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC0BIST2_BASE 0x5083000ull
+#define HBM1_MC0BIST2_MAX_OFFSET 0x1000
+#define HBM1_MC0BIST2_SECTION 0xE800
+#define mmHBM1_MC0BIST2_SPECIAL_BASE 0x5083E80ull
+#define HBM1_MC0BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC0BIST3_BASE 0x5084000ull
+#define HBM1_MC0BIST3_MAX_OFFSET 0x1000
+#define HBM1_MC0BIST3_SECTION 0xE800
+#define mmHBM1_MC0BIST3_SPECIAL_BASE 0x5084E80ull
+#define HBM1_MC0BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC0BIST4_BASE 0x5085000ull
+#define HBM1_MC0BIST4_MAX_OFFSET 0x1000
+#define HBM1_MC0BIST4_SECTION 0xE800
+#define mmHBM1_MC0BIST4_SPECIAL_BASE 0x5085E80ull
+#define HBM1_MC0BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC0BIST5_BASE 0x5086000ull
+#define HBM1_MC0BIST5_MAX_OFFSET 0x1000
+#define HBM1_MC0BIST5_SECTION 0xE800
+#define mmHBM1_MC0BIST5_SPECIAL_BASE 0x5086E80ull
+#define HBM1_MC0BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC0BIST6_BASE 0x5087000ull
+#define HBM1_MC0BIST6_MAX_OFFSET 0x1000
+#define HBM1_MC0BIST6_SECTION 0xE800
+#define mmHBM1_MC0BIST6_SPECIAL_BASE 0x5087E80ull
+#define HBM1_MC0BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC0BIST7_BASE 0x5088000ull
+#define HBM1_MC0BIST7_MAX_OFFSET 0x1000
+#define HBM1_MC0BIST7_SECTION 0xE800
+#define mmHBM1_MC0BIST7_SPECIAL_BASE 0x5088E80ull
+#define HBM1_MC0BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC0BIST8_MEM_BASE 0x5089000ull
+#define HBM1_MC0BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM1_MC0BIST8_MEM_SECTION 0xE800
+#define mmHBM1_MC0BIST8_MEM_SPECIAL_BASE 0x5089E80ull
+#define HBM1_MC0BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC0BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM1_MC1_BASE 0x50A0000ull
+#define HBM1_MC1_MAX_OFFSET 0x1000
+#define HBM1_MC1_SECTION 0xE800
+#define mmHBM1_MC1_SPECIAL_BASE 0x50A0E80ull
+#define HBM1_MC1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC1BIST0_BASE 0x50A1000ull
+#define HBM1_MC1BIST0_MAX_OFFSET 0x1000
+#define HBM1_MC1BIST0_SECTION 0xE800
+#define mmHBM1_MC1BIST0_SPECIAL_BASE 0x50A1E80ull
+#define HBM1_MC1BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC1BIST1_BASE 0x50A2000ull
+#define HBM1_MC1BIST1_MAX_OFFSET 0x1000
+#define HBM1_MC1BIST1_SECTION 0xE800
+#define mmHBM1_MC1BIST1_SPECIAL_BASE 0x50A2E80ull
+#define HBM1_MC1BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC1BIST2_BASE 0x50A3000ull
+#define HBM1_MC1BIST2_MAX_OFFSET 0x1000
+#define HBM1_MC1BIST2_SECTION 0xE800
+#define mmHBM1_MC1BIST2_SPECIAL_BASE 0x50A3E80ull
+#define HBM1_MC1BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC1BIST3_BASE 0x50A4000ull
+#define HBM1_MC1BIST3_MAX_OFFSET 0x1000
+#define HBM1_MC1BIST3_SECTION 0xE800
+#define mmHBM1_MC1BIST3_SPECIAL_BASE 0x50A4E80ull
+#define HBM1_MC1BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC1BIST4_BASE 0x50A5000ull
+#define HBM1_MC1BIST4_MAX_OFFSET 0x1000
+#define HBM1_MC1BIST4_SECTION 0xE800
+#define mmHBM1_MC1BIST4_SPECIAL_BASE 0x50A5E80ull
+#define HBM1_MC1BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC1BIST5_BASE 0x50A6000ull
+#define HBM1_MC1BIST5_MAX_OFFSET 0x1000
+#define HBM1_MC1BIST5_SECTION 0xE800
+#define mmHBM1_MC1BIST5_SPECIAL_BASE 0x50A6E80ull
+#define HBM1_MC1BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC1BIST6_BASE 0x50A7000ull
+#define HBM1_MC1BIST6_MAX_OFFSET 0x1000
+#define HBM1_MC1BIST6_SECTION 0xE800
+#define mmHBM1_MC1BIST6_SPECIAL_BASE 0x50A7E80ull
+#define HBM1_MC1BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC1BIST7_BASE 0x50A8000ull
+#define HBM1_MC1BIST7_MAX_OFFSET 0x1000
+#define HBM1_MC1BIST7_SECTION 0xE800
+#define mmHBM1_MC1BIST7_SPECIAL_BASE 0x50A8E80ull
+#define HBM1_MC1BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM1_MC1BIST8_MEM_BASE 0x50A9000ull
+#define HBM1_MC1BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM1_MC1BIST8_MEM_SECTION 0xE800
+#define mmHBM1_MC1BIST8_MEM_SPECIAL_BASE 0x50A9E80ull
+#define HBM1_MC1BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM1_MC1BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM1_PHY_BASE 0x50C0000ull
+#define HBM1_PHY_MAX_OFFSET 0x4000
+#define HBM1_PHY_SECTION 0x40000
+#define mmHBM2_MC0_BASE 0x5100000ull
+#define HBM2_MC0_MAX_OFFSET 0x1000
+#define HBM2_MC0_SECTION 0xE800
+#define mmHBM2_MC0_SPECIAL_BASE 0x5100E80ull
+#define HBM2_MC0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC0BIST0_BASE 0x5101000ull
+#define HBM2_MC0BIST0_MAX_OFFSET 0x1000
+#define HBM2_MC0BIST0_SECTION 0xE800
+#define mmHBM2_MC0BIST0_SPECIAL_BASE 0x5101E80ull
+#define HBM2_MC0BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC0BIST1_BASE 0x5102000ull
+#define HBM2_MC0BIST1_MAX_OFFSET 0x1000
+#define HBM2_MC0BIST1_SECTION 0xE800
+#define mmHBM2_MC0BIST1_SPECIAL_BASE 0x5102E80ull
+#define HBM2_MC0BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC0BIST2_BASE 0x5103000ull
+#define HBM2_MC0BIST2_MAX_OFFSET 0x1000
+#define HBM2_MC0BIST2_SECTION 0xE800
+#define mmHBM2_MC0BIST2_SPECIAL_BASE 0x5103E80ull
+#define HBM2_MC0BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC0BIST3_BASE 0x5104000ull
+#define HBM2_MC0BIST3_MAX_OFFSET 0x1000
+#define HBM2_MC0BIST3_SECTION 0xE800
+#define mmHBM2_MC0BIST3_SPECIAL_BASE 0x5104E80ull
+#define HBM2_MC0BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC0BIST4_BASE 0x5105000ull
+#define HBM2_MC0BIST4_MAX_OFFSET 0x1000
+#define HBM2_MC0BIST4_SECTION 0xE800
+#define mmHBM2_MC0BIST4_SPECIAL_BASE 0x5105E80ull
+#define HBM2_MC0BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC0BIST5_BASE 0x5106000ull
+#define HBM2_MC0BIST5_MAX_OFFSET 0x1000
+#define HBM2_MC0BIST5_SECTION 0xE800
+#define mmHBM2_MC0BIST5_SPECIAL_BASE 0x5106E80ull
+#define HBM2_MC0BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC0BIST6_BASE 0x5107000ull
+#define HBM2_MC0BIST6_MAX_OFFSET 0x1000
+#define HBM2_MC0BIST6_SECTION 0xE800
+#define mmHBM2_MC0BIST6_SPECIAL_BASE 0x5107E80ull
+#define HBM2_MC0BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC0BIST7_BASE 0x5108000ull
+#define HBM2_MC0BIST7_MAX_OFFSET 0x1000
+#define HBM2_MC0BIST7_SECTION 0xE800
+#define mmHBM2_MC0BIST7_SPECIAL_BASE 0x5108E80ull
+#define HBM2_MC0BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC0BIST8_MEM_BASE 0x5109000ull
+#define HBM2_MC0BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM2_MC0BIST8_MEM_SECTION 0xE800
+#define mmHBM2_MC0BIST8_MEM_SPECIAL_BASE 0x5109E80ull
+#define HBM2_MC0BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC0BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM2_MC1_BASE 0x5120000ull
+#define HBM2_MC1_MAX_OFFSET 0x1000
+#define HBM2_MC1_SECTION 0xE800
+#define mmHBM2_MC1_SPECIAL_BASE 0x5120E80ull
+#define HBM2_MC1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC1BIST0_BASE 0x5121000ull
+#define HBM2_MC1BIST0_MAX_OFFSET 0x1000
+#define HBM2_MC1BIST0_SECTION 0xE800
+#define mmHBM2_MC1BIST0_SPECIAL_BASE 0x5121E80ull
+#define HBM2_MC1BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC1BIST1_BASE 0x5122000ull
+#define HBM2_MC1BIST1_MAX_OFFSET 0x1000
+#define HBM2_MC1BIST1_SECTION 0xE800
+#define mmHBM2_MC1BIST1_SPECIAL_BASE 0x5122E80ull
+#define HBM2_MC1BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC1BIST2_BASE 0x5123000ull
+#define HBM2_MC1BIST2_MAX_OFFSET 0x1000
+#define HBM2_MC1BIST2_SECTION 0xE800
+#define mmHBM2_MC1BIST2_SPECIAL_BASE 0x5123E80ull
+#define HBM2_MC1BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC1BIST3_BASE 0x5124000ull
+#define HBM2_MC1BIST3_MAX_OFFSET 0x1000
+#define HBM2_MC1BIST3_SECTION 0xE800
+#define mmHBM2_MC1BIST3_SPECIAL_BASE 0x5124E80ull
+#define HBM2_MC1BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC1BIST4_BASE 0x5125000ull
+#define HBM2_MC1BIST4_MAX_OFFSET 0x1000
+#define HBM2_MC1BIST4_SECTION 0xE800
+#define mmHBM2_MC1BIST4_SPECIAL_BASE 0x5125E80ull
+#define HBM2_MC1BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC1BIST5_BASE 0x5126000ull
+#define HBM2_MC1BIST5_MAX_OFFSET 0x1000
+#define HBM2_MC1BIST5_SECTION 0xE800
+#define mmHBM2_MC1BIST5_SPECIAL_BASE 0x5126E80ull
+#define HBM2_MC1BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC1BIST6_BASE 0x5127000ull
+#define HBM2_MC1BIST6_MAX_OFFSET 0x1000
+#define HBM2_MC1BIST6_SECTION 0xE800
+#define mmHBM2_MC1BIST6_SPECIAL_BASE 0x5127E80ull
+#define HBM2_MC1BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC1BIST7_BASE 0x5128000ull
+#define HBM2_MC1BIST7_MAX_OFFSET 0x1000
+#define HBM2_MC1BIST7_SECTION 0xE800
+#define mmHBM2_MC1BIST7_SPECIAL_BASE 0x5128E80ull
+#define HBM2_MC1BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM2_MC1BIST8_MEM_BASE 0x5129000ull
+#define HBM2_MC1BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM2_MC1BIST8_MEM_SECTION 0xE800
+#define mmHBM2_MC1BIST8_MEM_SPECIAL_BASE 0x5129E80ull
+#define HBM2_MC1BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM2_MC1BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM2_PHY_BASE 0x5140000ull
+#define HBM2_PHY_MAX_OFFSET 0x4000
+#define HBM2_PHY_SECTION 0x40000
+#define mmHBM3_MC0_BASE 0x5180000ull
+#define HBM3_MC0_MAX_OFFSET 0x1000
+#define HBM3_MC0_SECTION 0xE800
+#define mmHBM3_MC0_SPECIAL_BASE 0x5180E80ull
+#define HBM3_MC0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC0BIST0_BASE 0x5181000ull
+#define HBM3_MC0BIST0_MAX_OFFSET 0x1000
+#define HBM3_MC0BIST0_SECTION 0xE800
+#define mmHBM3_MC0BIST0_SPECIAL_BASE 0x5181E80ull
+#define HBM3_MC0BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC0BIST1_BASE 0x5182000ull
+#define HBM3_MC0BIST1_MAX_OFFSET 0x1000
+#define HBM3_MC0BIST1_SECTION 0xE800
+#define mmHBM3_MC0BIST1_SPECIAL_BASE 0x5182E80ull
+#define HBM3_MC0BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC0BIST2_BASE 0x5183000ull
+#define HBM3_MC0BIST2_MAX_OFFSET 0x1000
+#define HBM3_MC0BIST2_SECTION 0xE800
+#define mmHBM3_MC0BIST2_SPECIAL_BASE 0x5183E80ull
+#define HBM3_MC0BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC0BIST3_BASE 0x5184000ull
+#define HBM3_MC0BIST3_MAX_OFFSET 0x1000
+#define HBM3_MC0BIST3_SECTION 0xE800
+#define mmHBM3_MC0BIST3_SPECIAL_BASE 0x5184E80ull
+#define HBM3_MC0BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC0BIST4_BASE 0x5185000ull
+#define HBM3_MC0BIST4_MAX_OFFSET 0x1000
+#define HBM3_MC0BIST4_SECTION 0xE800
+#define mmHBM3_MC0BIST4_SPECIAL_BASE 0x5185E80ull
+#define HBM3_MC0BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC0BIST5_BASE 0x5186000ull
+#define HBM3_MC0BIST5_MAX_OFFSET 0x1000
+#define HBM3_MC0BIST5_SECTION 0xE800
+#define mmHBM3_MC0BIST5_SPECIAL_BASE 0x5186E80ull
+#define HBM3_MC0BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC0BIST6_BASE 0x5187000ull
+#define HBM3_MC0BIST6_MAX_OFFSET 0x1000
+#define HBM3_MC0BIST6_SECTION 0xE800
+#define mmHBM3_MC0BIST6_SPECIAL_BASE 0x5187E80ull
+#define HBM3_MC0BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC0BIST7_BASE 0x5188000ull
+#define HBM3_MC0BIST7_MAX_OFFSET 0x1000
+#define HBM3_MC0BIST7_SECTION 0xE800
+#define mmHBM3_MC0BIST7_SPECIAL_BASE 0x5188E80ull
+#define HBM3_MC0BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC0BIST8_MEM_BASE 0x5189000ull
+#define HBM3_MC0BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM3_MC0BIST8_MEM_SECTION 0xE800
+#define mmHBM3_MC0BIST8_MEM_SPECIAL_BASE 0x5189E80ull
+#define HBM3_MC0BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC0BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM3_MC1_BASE 0x51A0000ull
+#define HBM3_MC1_MAX_OFFSET 0x1000
+#define HBM3_MC1_SECTION 0xE800
+#define mmHBM3_MC1_SPECIAL_BASE 0x51A0E80ull
+#define HBM3_MC1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC1BIST0_BASE 0x51A1000ull
+#define HBM3_MC1BIST0_MAX_OFFSET 0x1000
+#define HBM3_MC1BIST0_SECTION 0xE800
+#define mmHBM3_MC1BIST0_SPECIAL_BASE 0x51A1E80ull
+#define HBM3_MC1BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC1BIST1_BASE 0x51A2000ull
+#define HBM3_MC1BIST1_MAX_OFFSET 0x1000
+#define HBM3_MC1BIST1_SECTION 0xE800
+#define mmHBM3_MC1BIST1_SPECIAL_BASE 0x51A2E80ull
+#define HBM3_MC1BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC1BIST2_BASE 0x51A3000ull
+#define HBM3_MC1BIST2_MAX_OFFSET 0x1000
+#define HBM3_MC1BIST2_SECTION 0xE800
+#define mmHBM3_MC1BIST2_SPECIAL_BASE 0x51A3E80ull
+#define HBM3_MC1BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC1BIST3_BASE 0x51A4000ull
+#define HBM3_MC1BIST3_MAX_OFFSET 0x1000
+#define HBM3_MC1BIST3_SECTION 0xE800
+#define mmHBM3_MC1BIST3_SPECIAL_BASE 0x51A4E80ull
+#define HBM3_MC1BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC1BIST4_BASE 0x51A5000ull
+#define HBM3_MC1BIST4_MAX_OFFSET 0x1000
+#define HBM3_MC1BIST4_SECTION 0xE800
+#define mmHBM3_MC1BIST4_SPECIAL_BASE 0x51A5E80ull
+#define HBM3_MC1BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC1BIST5_BASE 0x51A6000ull
+#define HBM3_MC1BIST5_MAX_OFFSET 0x1000
+#define HBM3_MC1BIST5_SECTION 0xE800
+#define mmHBM3_MC1BIST5_SPECIAL_BASE 0x51A6E80ull
+#define HBM3_MC1BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC1BIST6_BASE 0x51A7000ull
+#define HBM3_MC1BIST6_MAX_OFFSET 0x1000
+#define HBM3_MC1BIST6_SECTION 0xE800
+#define mmHBM3_MC1BIST6_SPECIAL_BASE 0x51A7E80ull
+#define HBM3_MC1BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC1BIST7_BASE 0x51A8000ull
+#define HBM3_MC1BIST7_MAX_OFFSET 0x1000
+#define HBM3_MC1BIST7_SECTION 0xE800
+#define mmHBM3_MC1BIST7_SPECIAL_BASE 0x51A8E80ull
+#define HBM3_MC1BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM3_MC1BIST8_MEM_BASE 0x51A9000ull
+#define HBM3_MC1BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM3_MC1BIST8_MEM_SECTION 0xE800
+#define mmHBM3_MC1BIST8_MEM_SPECIAL_BASE 0x51A9E80ull
+#define HBM3_MC1BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM3_MC1BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM3_PHY_BASE 0x51C0000ull
+#define HBM3_PHY_MAX_OFFSET 0x4000
+#define HBM3_PHY_SECTION 0x40000
+#define mmHBM4_MC0_BASE 0x5200000ull
+#define HBM4_MC0_MAX_OFFSET 0x1000
+#define HBM4_MC0_SECTION 0xE800
+#define mmHBM4_MC0_SPECIAL_BASE 0x5200E80ull
+#define HBM4_MC0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC0BIST0_BASE 0x5201000ull
+#define HBM4_MC0BIST0_MAX_OFFSET 0x1000
+#define HBM4_MC0BIST0_SECTION 0xE800
+#define mmHBM4_MC0BIST0_SPECIAL_BASE 0x5201E80ull
+#define HBM4_MC0BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC0BIST1_BASE 0x5202000ull
+#define HBM4_MC0BIST1_MAX_OFFSET 0x1000
+#define HBM4_MC0BIST1_SECTION 0xE800
+#define mmHBM4_MC0BIST1_SPECIAL_BASE 0x5202E80ull
+#define HBM4_MC0BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC0BIST2_BASE 0x5203000ull
+#define HBM4_MC0BIST2_MAX_OFFSET 0x1000
+#define HBM4_MC0BIST2_SECTION 0xE800
+#define mmHBM4_MC0BIST2_SPECIAL_BASE 0x5203E80ull
+#define HBM4_MC0BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC0BIST3_BASE 0x5204000ull
+#define HBM4_MC0BIST3_MAX_OFFSET 0x1000
+#define HBM4_MC0BIST3_SECTION 0xE800
+#define mmHBM4_MC0BIST3_SPECIAL_BASE 0x5204E80ull
+#define HBM4_MC0BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC0BIST4_BASE 0x5205000ull
+#define HBM4_MC0BIST4_MAX_OFFSET 0x1000
+#define HBM4_MC0BIST4_SECTION 0xE800
+#define mmHBM4_MC0BIST4_SPECIAL_BASE 0x5205E80ull
+#define HBM4_MC0BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC0BIST5_BASE 0x5206000ull
+#define HBM4_MC0BIST5_MAX_OFFSET 0x1000
+#define HBM4_MC0BIST5_SECTION 0xE800
+#define mmHBM4_MC0BIST5_SPECIAL_BASE 0x5206E80ull
+#define HBM4_MC0BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC0BIST6_BASE 0x5207000ull
+#define HBM4_MC0BIST6_MAX_OFFSET 0x1000
+#define HBM4_MC0BIST6_SECTION 0xE800
+#define mmHBM4_MC0BIST6_SPECIAL_BASE 0x5207E80ull
+#define HBM4_MC0BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC0BIST7_BASE 0x5208000ull
+#define HBM4_MC0BIST7_MAX_OFFSET 0x1000
+#define HBM4_MC0BIST7_SECTION 0xE800
+#define mmHBM4_MC0BIST7_SPECIAL_BASE 0x5208E80ull
+#define HBM4_MC0BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC0BIST8_MEM_BASE 0x5209000ull
+#define HBM4_MC0BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM4_MC0BIST8_MEM_SECTION 0xE800
+#define mmHBM4_MC0BIST8_MEM_SPECIAL_BASE 0x5209E80ull
+#define HBM4_MC0BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC0BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM4_MC1_BASE 0x5220000ull
+#define HBM4_MC1_MAX_OFFSET 0x1000
+#define HBM4_MC1_SECTION 0xE800
+#define mmHBM4_MC1_SPECIAL_BASE 0x5220E80ull
+#define HBM4_MC1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC1BIST0_BASE 0x5221000ull
+#define HBM4_MC1BIST0_MAX_OFFSET 0x1000
+#define HBM4_MC1BIST0_SECTION 0xE800
+#define mmHBM4_MC1BIST0_SPECIAL_BASE 0x5221E80ull
+#define HBM4_MC1BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC1BIST1_BASE 0x5222000ull
+#define HBM4_MC1BIST1_MAX_OFFSET 0x1000
+#define HBM4_MC1BIST1_SECTION 0xE800
+#define mmHBM4_MC1BIST1_SPECIAL_BASE 0x5222E80ull
+#define HBM4_MC1BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC1BIST2_BASE 0x5223000ull
+#define HBM4_MC1BIST2_MAX_OFFSET 0x1000
+#define HBM4_MC1BIST2_SECTION 0xE800
+#define mmHBM4_MC1BIST2_SPECIAL_BASE 0x5223E80ull
+#define HBM4_MC1BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC1BIST3_BASE 0x5224000ull
+#define HBM4_MC1BIST3_MAX_OFFSET 0x1000
+#define HBM4_MC1BIST3_SECTION 0xE800
+#define mmHBM4_MC1BIST3_SPECIAL_BASE 0x5224E80ull
+#define HBM4_MC1BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC1BIST4_BASE 0x5225000ull
+#define HBM4_MC1BIST4_MAX_OFFSET 0x1000
+#define HBM4_MC1BIST4_SECTION 0xE800
+#define mmHBM4_MC1BIST4_SPECIAL_BASE 0x5225E80ull
+#define HBM4_MC1BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC1BIST5_BASE 0x5226000ull
+#define HBM4_MC1BIST5_MAX_OFFSET 0x1000
+#define HBM4_MC1BIST5_SECTION 0xE800
+#define mmHBM4_MC1BIST5_SPECIAL_BASE 0x5226E80ull
+#define HBM4_MC1BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC1BIST6_BASE 0x5227000ull
+#define HBM4_MC1BIST6_MAX_OFFSET 0x1000
+#define HBM4_MC1BIST6_SECTION 0xE800
+#define mmHBM4_MC1BIST6_SPECIAL_BASE 0x5227E80ull
+#define HBM4_MC1BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC1BIST7_BASE 0x5228000ull
+#define HBM4_MC1BIST7_MAX_OFFSET 0x1000
+#define HBM4_MC1BIST7_SECTION 0xE800
+#define mmHBM4_MC1BIST7_SPECIAL_BASE 0x5228E80ull
+#define HBM4_MC1BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM4_MC1BIST8_MEM_BASE 0x5229000ull
+#define HBM4_MC1BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM4_MC1BIST8_MEM_SECTION 0xE800
+#define mmHBM4_MC1BIST8_MEM_SPECIAL_BASE 0x5229E80ull
+#define HBM4_MC1BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM4_MC1BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM4_PHY_BASE 0x5240000ull
+#define HBM4_PHY_MAX_OFFSET 0x4000
+#define HBM4_PHY_SECTION 0x40000
+#define mmHBM5_MC0_BASE 0x5280000ull
+#define HBM5_MC0_MAX_OFFSET 0x1000
+#define HBM5_MC0_SECTION 0xE800
+#define mmHBM5_MC0_SPECIAL_BASE 0x5280E80ull
+#define HBM5_MC0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC0BIST0_BASE 0x5281000ull
+#define HBM5_MC0BIST0_MAX_OFFSET 0x1000
+#define HBM5_MC0BIST0_SECTION 0xE800
+#define mmHBM5_MC0BIST0_SPECIAL_BASE 0x5281E80ull
+#define HBM5_MC0BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC0BIST1_BASE 0x5282000ull
+#define HBM5_MC0BIST1_MAX_OFFSET 0x1000
+#define HBM5_MC0BIST1_SECTION 0xE800
+#define mmHBM5_MC0BIST1_SPECIAL_BASE 0x5282E80ull
+#define HBM5_MC0BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC0BIST2_BASE 0x5283000ull
+#define HBM5_MC0BIST2_MAX_OFFSET 0x1000
+#define HBM5_MC0BIST2_SECTION 0xE800
+#define mmHBM5_MC0BIST2_SPECIAL_BASE 0x5283E80ull
+#define HBM5_MC0BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC0BIST3_BASE 0x5284000ull
+#define HBM5_MC0BIST3_MAX_OFFSET 0x1000
+#define HBM5_MC0BIST3_SECTION 0xE800
+#define mmHBM5_MC0BIST3_SPECIAL_BASE 0x5284E80ull
+#define HBM5_MC0BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC0BIST4_BASE 0x5285000ull
+#define HBM5_MC0BIST4_MAX_OFFSET 0x1000
+#define HBM5_MC0BIST4_SECTION 0xE800
+#define mmHBM5_MC0BIST4_SPECIAL_BASE 0x5285E80ull
+#define HBM5_MC0BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC0BIST5_BASE 0x5286000ull
+#define HBM5_MC0BIST5_MAX_OFFSET 0x1000
+#define HBM5_MC0BIST5_SECTION 0xE800
+#define mmHBM5_MC0BIST5_SPECIAL_BASE 0x5286E80ull
+#define HBM5_MC0BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC0BIST6_BASE 0x5287000ull
+#define HBM5_MC0BIST6_MAX_OFFSET 0x1000
+#define HBM5_MC0BIST6_SECTION 0xE800
+#define mmHBM5_MC0BIST6_SPECIAL_BASE 0x5287E80ull
+#define HBM5_MC0BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC0BIST7_BASE 0x5288000ull
+#define HBM5_MC0BIST7_MAX_OFFSET 0x1000
+#define HBM5_MC0BIST7_SECTION 0xE800
+#define mmHBM5_MC0BIST7_SPECIAL_BASE 0x5288E80ull
+#define HBM5_MC0BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC0BIST8_MEM_BASE 0x5289000ull
+#define HBM5_MC0BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM5_MC0BIST8_MEM_SECTION 0xE800
+#define mmHBM5_MC0BIST8_MEM_SPECIAL_BASE 0x5289E80ull
+#define HBM5_MC0BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC0BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM5_MC1_BASE 0x52A0000ull
+#define HBM5_MC1_MAX_OFFSET 0x1000
+#define HBM5_MC1_SECTION 0xE800
+#define mmHBM5_MC1_SPECIAL_BASE 0x52A0E80ull
+#define HBM5_MC1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC1BIST0_BASE 0x52A1000ull
+#define HBM5_MC1BIST0_MAX_OFFSET 0x1000
+#define HBM5_MC1BIST0_SECTION 0xE800
+#define mmHBM5_MC1BIST0_SPECIAL_BASE 0x52A1E80ull
+#define HBM5_MC1BIST0_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1BIST0_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC1BIST1_BASE 0x52A2000ull
+#define HBM5_MC1BIST1_MAX_OFFSET 0x1000
+#define HBM5_MC1BIST1_SECTION 0xE800
+#define mmHBM5_MC1BIST1_SPECIAL_BASE 0x52A2E80ull
+#define HBM5_MC1BIST1_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1BIST1_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC1BIST2_BASE 0x52A3000ull
+#define HBM5_MC1BIST2_MAX_OFFSET 0x1000
+#define HBM5_MC1BIST2_SECTION 0xE800
+#define mmHBM5_MC1BIST2_SPECIAL_BASE 0x52A3E80ull
+#define HBM5_MC1BIST2_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1BIST2_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC1BIST3_BASE 0x52A4000ull
+#define HBM5_MC1BIST3_MAX_OFFSET 0x1000
+#define HBM5_MC1BIST3_SECTION 0xE800
+#define mmHBM5_MC1BIST3_SPECIAL_BASE 0x52A4E80ull
+#define HBM5_MC1BIST3_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1BIST3_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC1BIST4_BASE 0x52A5000ull
+#define HBM5_MC1BIST4_MAX_OFFSET 0x1000
+#define HBM5_MC1BIST4_SECTION 0xE800
+#define mmHBM5_MC1BIST4_SPECIAL_BASE 0x52A5E80ull
+#define HBM5_MC1BIST4_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1BIST4_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC1BIST5_BASE 0x52A6000ull
+#define HBM5_MC1BIST5_MAX_OFFSET 0x1000
+#define HBM5_MC1BIST5_SECTION 0xE800
+#define mmHBM5_MC1BIST5_SPECIAL_BASE 0x52A6E80ull
+#define HBM5_MC1BIST5_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1BIST5_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC1BIST6_BASE 0x52A7000ull
+#define HBM5_MC1BIST6_MAX_OFFSET 0x1000
+#define HBM5_MC1BIST6_SECTION 0xE800
+#define mmHBM5_MC1BIST6_SPECIAL_BASE 0x52A7E80ull
+#define HBM5_MC1BIST6_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1BIST6_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC1BIST7_BASE 0x52A8000ull
+#define HBM5_MC1BIST7_MAX_OFFSET 0x1000
+#define HBM5_MC1BIST7_SECTION 0xE800
+#define mmHBM5_MC1BIST7_SPECIAL_BASE 0x52A8E80ull
+#define HBM5_MC1BIST7_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1BIST7_SPECIAL_SECTION 0x1800
+#define mmHBM5_MC1BIST8_MEM_BASE 0x52A9000ull
+#define HBM5_MC1BIST8_MEM_MAX_OFFSET 0x1000
+#define HBM5_MC1BIST8_MEM_SECTION 0xE800
+#define mmHBM5_MC1BIST8_MEM_SPECIAL_BASE 0x52A9E80ull
+#define HBM5_MC1BIST8_MEM_SPECIAL_MAX_OFFSET 0x1800
+#define HBM5_MC1BIST8_MEM_SPECIAL_SECTION 0x16180
+#define mmHBM5_PHY_BASE 0x52C0000ull
+#define HBM5_PHY_MAX_OFFSET 0x4000
+#define HBM5_PHY_SECTION 0x140000
+#define mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5400000ull
+#define NIC0_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5400080ull
+#define NIC0_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5400100ull
+#define NIC0_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5400180ull
+#define NIC0_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_0_SPECIAL_BASE 0x5400E80ull
+#define NIC0_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5401000ull
+#define NIC0_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5401080ull
+#define NIC0_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5401100ull
+#define NIC0_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5401180ull
+#define NIC0_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_1_SPECIAL_BASE 0x5401E80ull
+#define NIC0_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5402000ull
+#define NIC0_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5402080ull
+#define NIC0_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5402100ull
+#define NIC0_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5402180ull
+#define NIC0_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_2_SPECIAL_BASE 0x5402E80ull
+#define NIC0_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5403000ull
+#define NIC0_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5403080ull
+#define NIC0_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5403100ull
+#define NIC0_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5403180ull
+#define NIC0_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_3_SPECIAL_BASE 0x5403E80ull
+#define NIC0_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5404000ull
+#define NIC0_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5404080ull
+#define NIC0_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5404100ull
+#define NIC0_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5404180ull
+#define NIC0_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_4_SPECIAL_BASE 0x5404E80ull
+#define NIC0_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5405000ull
+#define NIC0_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5405080ull
+#define NIC0_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5405100ull
+#define NIC0_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5405180ull
+#define NIC0_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_5_SPECIAL_BASE 0x5405E80ull
+#define NIC0_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5406000ull
+#define NIC0_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5406080ull
+#define NIC0_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5406100ull
+#define NIC0_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5406180ull
+#define NIC0_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_6_SPECIAL_BASE 0x5406E80ull
+#define NIC0_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5407000ull
+#define NIC0_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5407080ull
+#define NIC0_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5407100ull
+#define NIC0_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5407180ull
+#define NIC0_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_7_SPECIAL_BASE 0x5407E80ull
+#define NIC0_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5408000ull
+#define NIC0_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5408080ull
+#define NIC0_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5408100ull
+#define NIC0_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5408180ull
+#define NIC0_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_8_SPECIAL_BASE 0x5408E80ull
+#define NIC0_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5409000ull
+#define NIC0_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5409080ull
+#define NIC0_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5409100ull
+#define NIC0_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5409180ull
+#define NIC0_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_9_SPECIAL_BASE 0x5409E80ull
+#define NIC0_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_10_UNSECURE_DOORBELL0_BASE 0x540A000ull
+#define NIC0_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_10_UNSECURE_DOORBELL1_BASE 0x540A080ull
+#define NIC0_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x540A100ull
+#define NIC0_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x540A180ull
+#define NIC0_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_10_SPECIAL_BASE 0x540AE80ull
+#define NIC0_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_11_UNSECURE_DOORBELL0_BASE 0x540B000ull
+#define NIC0_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_11_UNSECURE_DOORBELL1_BASE 0x540B080ull
+#define NIC0_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x540B100ull
+#define NIC0_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x540B180ull
+#define NIC0_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_11_SPECIAL_BASE 0x540BE80ull
+#define NIC0_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_12_UNSECURE_DOORBELL0_BASE 0x540C000ull
+#define NIC0_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_12_UNSECURE_DOORBELL1_BASE 0x540C080ull
+#define NIC0_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x540C100ull
+#define NIC0_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x540C180ull
+#define NIC0_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_12_SPECIAL_BASE 0x540CE80ull
+#define NIC0_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_13_UNSECURE_DOORBELL0_BASE 0x540D000ull
+#define NIC0_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_13_UNSECURE_DOORBELL1_BASE 0x540D080ull
+#define NIC0_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x540D100ull
+#define NIC0_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x540D180ull
+#define NIC0_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_13_SPECIAL_BASE 0x540DE80ull
+#define NIC0_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR0_14_UNSECURE_DOORBELL0_BASE 0x540E000ull
+#define NIC0_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR0_14_UNSECURE_DOORBELL1_BASE 0x540E080ull
+#define NIC0_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x540E100ull
+#define NIC0_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x540E180ull
+#define NIC0_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR0_14_SPECIAL_BASE 0x540EE80ull
+#define NIC0_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC0_QM_DCCM0_BASE 0x5410000ull
+#define NIC0_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC0_QM_DCCM0_SECTION 0x8000
+#define mmNIC0_QM_ARC_AUX0_BASE 0x5418000ull
+#define NIC0_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC0_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC0_QM_ARC_AUX0_SPECIAL_BASE 0x5418E80ull
+#define NIC0_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC0_QM0_BASE 0x541A000ull
+#define NIC0_QM0_MAX_OFFSET 0x1000
+#define NIC0_QM0_SECTION 0x9000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x541A900ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x541A908ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x541A910ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x541A918ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x541A920ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x541A928ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x541A930ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x541A938ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x541A940ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x541A948ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x541A950ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x541A958ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x541A960ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x541A968ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x541A970ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC0_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x541A978ull
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC0_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC0_QM0_AXUSER_SECURED_BASE 0x541AB00ull
+#define NIC0_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC0_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC0_QM0_AXUSER_NONSECURED_BASE 0x541AB80ull
+#define NIC0_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC0_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC0_QM0_DBG_HBW_BASE 0x541AC00ull
+#define NIC0_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC0_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC0_QM0_DBG_LBW_BASE 0x541AC80ull
+#define NIC0_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC0_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC0_QM0_CGM_BASE 0x541AD80ull
+#define NIC0_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC0_QM0_CGM_SECTION 0x1000
+#define mmNIC0_QM0_SPECIAL_BASE 0x541AE80ull
+#define NIC0_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC0_QPC0_BASE 0x541F000ull
+#define NIC0_QPC0_MAX_OFFSET 0x1000
+#define NIC0_QPC0_SECTION 0x7200
+#define mmNIC0_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x541F720ull
+#define NIC0_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x541F728ull
+#define NIC0_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x541F730ull
+#define NIC0_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x541F738ull
+#define NIC0_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x541F740ull
+#define NIC0_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x541F748ull
+#define NIC0_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x541F750ull
+#define NIC0_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x541F758ull
+#define NIC0_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x541F760ull
+#define NIC0_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x541F768ull
+#define NIC0_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x541F770ull
+#define NIC0_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x541F778ull
+#define NIC0_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x541F780ull
+#define NIC0_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x541F788ull
+#define NIC0_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x541F790ull
+#define NIC0_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x541F798ull
+#define NIC0_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x541F7A0ull
+#define NIC0_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x541F7A8ull
+#define NIC0_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x541F7B0ull
+#define NIC0_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x541F7B8ull
+#define NIC0_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x541F7C0ull
+#define NIC0_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x541F7C8ull
+#define NIC0_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x541F7D0ull
+#define NIC0_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x541F7D8ull
+#define NIC0_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x541F7E0ull
+#define NIC0_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x541F7E8ull
+#define NIC0_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x541F7F0ull
+#define NIC0_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x541F7F8ull
+#define NIC0_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x541F800ull
+#define NIC0_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x541F808ull
+#define NIC0_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x541F810ull
+#define NIC0_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x541F818ull
+#define NIC0_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC0_QPC0_AXUSER_CONG_QUE_BASE 0x541FB80ull
+#define NIC0_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC0_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC0_QPC0_AXUSER_RXWQE_BASE 0x541FBE0ull
+#define NIC0_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC0_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC0_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x541FC40ull
+#define NIC0_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC0_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC0_QPC0_AXUSER_DB_FIFO_BASE 0x541FCA0ull
+#define NIC0_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC0_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC0_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x541FD00ull
+#define NIC0_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC0_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC0_QPC0_AXUSER_ERR_FIFO_BASE 0x541FD60ull
+#define NIC0_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC0_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC0_QPC0_AXUSER_QPC_RESP_BASE 0x541FDC0ull
+#define NIC0_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC0_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC0_QPC0_AXUSER_QPC_REQ_BASE 0x541FE20ull
+#define NIC0_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC0_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC0_QPC0_SPECIAL_BASE 0x541FE80ull
+#define NIC0_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_0_UNSECURE_DOORBELL0_BASE 0x5420000ull
+#define NIC0_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_0_UNSECURE_DOORBELL1_BASE 0x5420080ull
+#define NIC0_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x5420100ull
+#define NIC0_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x5420180ull
+#define NIC0_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_0_SPECIAL_BASE 0x5420E80ull
+#define NIC0_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_1_UNSECURE_DOORBELL0_BASE 0x5421000ull
+#define NIC0_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_1_UNSECURE_DOORBELL1_BASE 0x5421080ull
+#define NIC0_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x5421100ull
+#define NIC0_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x5421180ull
+#define NIC0_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_1_SPECIAL_BASE 0x5421E80ull
+#define NIC0_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_2_UNSECURE_DOORBELL0_BASE 0x5422000ull
+#define NIC0_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_2_UNSECURE_DOORBELL1_BASE 0x5422080ull
+#define NIC0_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x5422100ull
+#define NIC0_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x5422180ull
+#define NIC0_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_2_SPECIAL_BASE 0x5422E80ull
+#define NIC0_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_3_UNSECURE_DOORBELL0_BASE 0x5423000ull
+#define NIC0_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_3_UNSECURE_DOORBELL1_BASE 0x5423080ull
+#define NIC0_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x5423100ull
+#define NIC0_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x5423180ull
+#define NIC0_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_3_SPECIAL_BASE 0x5423E80ull
+#define NIC0_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_4_UNSECURE_DOORBELL0_BASE 0x5424000ull
+#define NIC0_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_4_UNSECURE_DOORBELL1_BASE 0x5424080ull
+#define NIC0_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x5424100ull
+#define NIC0_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x5424180ull
+#define NIC0_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_4_SPECIAL_BASE 0x5424E80ull
+#define NIC0_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_5_UNSECURE_DOORBELL0_BASE 0x5425000ull
+#define NIC0_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_5_UNSECURE_DOORBELL1_BASE 0x5425080ull
+#define NIC0_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x5425100ull
+#define NIC0_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x5425180ull
+#define NIC0_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_5_SPECIAL_BASE 0x5425E80ull
+#define NIC0_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_6_UNSECURE_DOORBELL0_BASE 0x5426000ull
+#define NIC0_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_6_UNSECURE_DOORBELL1_BASE 0x5426080ull
+#define NIC0_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x5426100ull
+#define NIC0_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x5426180ull
+#define NIC0_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_6_SPECIAL_BASE 0x5426E80ull
+#define NIC0_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_7_UNSECURE_DOORBELL0_BASE 0x5427000ull
+#define NIC0_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_7_UNSECURE_DOORBELL1_BASE 0x5427080ull
+#define NIC0_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x5427100ull
+#define NIC0_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x5427180ull
+#define NIC0_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_7_SPECIAL_BASE 0x5427E80ull
+#define NIC0_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_8_UNSECURE_DOORBELL0_BASE 0x5428000ull
+#define NIC0_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_8_UNSECURE_DOORBELL1_BASE 0x5428080ull
+#define NIC0_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x5428100ull
+#define NIC0_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x5428180ull
+#define NIC0_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_8_SPECIAL_BASE 0x5428E80ull
+#define NIC0_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_9_UNSECURE_DOORBELL0_BASE 0x5429000ull
+#define NIC0_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_9_UNSECURE_DOORBELL1_BASE 0x5429080ull
+#define NIC0_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x5429100ull
+#define NIC0_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x5429180ull
+#define NIC0_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_9_SPECIAL_BASE 0x5429E80ull
+#define NIC0_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_10_UNSECURE_DOORBELL0_BASE 0x542A000ull
+#define NIC0_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_10_UNSECURE_DOORBELL1_BASE 0x542A080ull
+#define NIC0_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x542A100ull
+#define NIC0_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x542A180ull
+#define NIC0_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_10_SPECIAL_BASE 0x542AE80ull
+#define NIC0_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_11_UNSECURE_DOORBELL0_BASE 0x542B000ull
+#define NIC0_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_11_UNSECURE_DOORBELL1_BASE 0x542B080ull
+#define NIC0_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x542B100ull
+#define NIC0_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x542B180ull
+#define NIC0_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_11_SPECIAL_BASE 0x542BE80ull
+#define NIC0_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_12_UNSECURE_DOORBELL0_BASE 0x542C000ull
+#define NIC0_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_12_UNSECURE_DOORBELL1_BASE 0x542C080ull
+#define NIC0_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x542C100ull
+#define NIC0_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x542C180ull
+#define NIC0_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_12_SPECIAL_BASE 0x542CE80ull
+#define NIC0_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_13_UNSECURE_DOORBELL0_BASE 0x542D000ull
+#define NIC0_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_13_UNSECURE_DOORBELL1_BASE 0x542D080ull
+#define NIC0_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x542D100ull
+#define NIC0_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x542D180ull
+#define NIC0_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_13_SPECIAL_BASE 0x542DE80ull
+#define NIC0_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC0_UMR1_14_UNSECURE_DOORBELL0_BASE 0x542E000ull
+#define NIC0_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC0_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC0_UMR1_14_UNSECURE_DOORBELL1_BASE 0x542E080ull
+#define NIC0_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC0_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC0_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x542E100ull
+#define NIC0_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC0_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC0_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x542E180ull
+#define NIC0_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC0_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC0_UMR1_14_SPECIAL_BASE 0x542EE80ull
+#define NIC0_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC0_QM_DCCM1_BASE 0x5430000ull
+#define NIC0_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC0_QM_DCCM1_SECTION 0x8000
+#define mmNIC0_QM_ARC_AUX1_BASE 0x5438000ull
+#define NIC0_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC0_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC0_QM_ARC_AUX1_SPECIAL_BASE 0x5438E80ull
+#define NIC0_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC0_QM1_BASE 0x543A000ull
+#define NIC0_QM1_MAX_OFFSET 0x1000
+#define NIC0_QM1_SECTION 0x9000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x543A900ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x543A908ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x543A910ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x543A918ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x543A920ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x543A928ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x543A930ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x543A938ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x543A940ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x543A948ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x543A950ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x543A958ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x543A960ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x543A968ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x543A970ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC0_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x543A978ull
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC0_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC0_QM1_AXUSER_SECURED_BASE 0x543AB00ull
+#define NIC0_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC0_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC0_QM1_AXUSER_NONSECURED_BASE 0x543AB80ull
+#define NIC0_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC0_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC0_QM1_DBG_HBW_BASE 0x543AC00ull
+#define NIC0_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC0_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC0_QM1_DBG_LBW_BASE 0x543AC80ull
+#define NIC0_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC0_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC0_QM1_CGM_BASE 0x543AD80ull
+#define NIC0_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC0_QM1_CGM_SECTION 0x1000
+#define mmNIC0_QM1_SPECIAL_BASE 0x543AE80ull
+#define NIC0_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC0_QPC1_BASE 0x543F000ull
+#define NIC0_QPC1_MAX_OFFSET 0x1000
+#define NIC0_QPC1_SECTION 0x7200
+#define mmNIC0_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x543F720ull
+#define NIC0_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x543F728ull
+#define NIC0_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x543F730ull
+#define NIC0_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x543F738ull
+#define NIC0_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x543F740ull
+#define NIC0_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x543F748ull
+#define NIC0_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x543F750ull
+#define NIC0_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x543F758ull
+#define NIC0_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x543F760ull
+#define NIC0_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x543F768ull
+#define NIC0_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x543F770ull
+#define NIC0_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x543F778ull
+#define NIC0_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x543F780ull
+#define NIC0_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x543F788ull
+#define NIC0_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x543F790ull
+#define NIC0_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x543F798ull
+#define NIC0_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x543F7A0ull
+#define NIC0_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x543F7A8ull
+#define NIC0_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x543F7B0ull
+#define NIC0_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x543F7B8ull
+#define NIC0_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x543F7C0ull
+#define NIC0_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x543F7C8ull
+#define NIC0_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x543F7D0ull
+#define NIC0_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x543F7D8ull
+#define NIC0_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x543F7E0ull
+#define NIC0_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x543F7E8ull
+#define NIC0_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x543F7F0ull
+#define NIC0_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x543F7F8ull
+#define NIC0_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x543F800ull
+#define NIC0_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x543F808ull
+#define NIC0_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x543F810ull
+#define NIC0_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC0_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x543F818ull
+#define NIC0_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC0_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC0_QPC1_AXUSER_CONG_QUE_BASE 0x543FB80ull
+#define NIC0_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC0_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC0_QPC1_AXUSER_RXWQE_BASE 0x543FBE0ull
+#define NIC0_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC0_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC0_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x543FC40ull
+#define NIC0_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC0_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC0_QPC1_AXUSER_DB_FIFO_BASE 0x543FCA0ull
+#define NIC0_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC0_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC0_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x543FD00ull
+#define NIC0_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC0_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC0_QPC1_AXUSER_ERR_FIFO_BASE 0x543FD60ull
+#define NIC0_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC0_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC0_QPC1_AXUSER_QPC_RESP_BASE 0x543FDC0ull
+#define NIC0_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC0_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC0_QPC1_AXUSER_QPC_REQ_BASE 0x543FE20ull
+#define NIC0_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC0_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC0_QPC1_SPECIAL_BASE 0x543FE80ull
+#define NIC0_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC0_TMR_BASE 0x5448000ull
+#define NIC0_TMR_MAX_OFFSET 0x1000
+#define NIC0_TMR_SECTION 0xD600
+#define mmNIC0_TMR_AXUSER_TMR_FREE_LIST_BASE 0x5448D60ull
+#define NIC0_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC0_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC0_TMR_AXUSER_TMR_FIFO_BASE 0x5448DC0ull
+#define NIC0_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC0_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC0_TMR_AXUSER_TMR_FSM_BASE 0x5448E20ull
+#define NIC0_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC0_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC0_TMR_SPECIAL_BASE 0x5448E80ull
+#define NIC0_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC0_RXB_CORE_BASE 0x5449000ull
+#define NIC0_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC0_RXB_CORE_SECTION 0x6100
+#define mmNIC0_RXB_CORE_SCT_AWUSER_BASE 0x5449610ull
+#define NIC0_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC0_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC0_RXB_CORE_SPECIAL_BASE 0x5449E80ull
+#define NIC0_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC0_RXE0_BASE 0x544A000ull
+#define NIC0_RXE0_MAX_OFFSET 0x1000
+#define NIC0_RXE0_SECTION 0x9000
+#define mmNIC0_RXE0_WQE_ARUSER_BASE 0x544A900ull
+#define NIC0_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC0_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC0_RXE0_SPECIAL_BASE 0x544AE80ull
+#define NIC0_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC0_RXE1_BASE 0x544B000ull
+#define NIC0_RXE1_MAX_OFFSET 0x1000
+#define NIC0_RXE1_SECTION 0x9000
+#define mmNIC0_RXE1_WQE_ARUSER_BASE 0x544B900ull
+#define NIC0_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC0_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC0_RXE1_SPECIAL_BASE 0x544BE80ull
+#define NIC0_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ0_BASE 0x544C000ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ1_BASE 0x544C050ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ2_BASE 0x544C0A0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ3_BASE 0x544C0F0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ4_BASE 0x544C140ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ5_BASE 0x544C190ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ6_BASE 0x544C1E0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ7_BASE 0x544C230ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ8_BASE 0x544C280ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ9_BASE 0x544C2D0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ10_BASE 0x544C320ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ11_BASE 0x544C370ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ12_BASE 0x544C3C0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ13_BASE 0x544C410ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ14_BASE 0x544C460ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ15_BASE 0x544C4B0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ16_BASE 0x544C500ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ17_BASE 0x544C550ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ18_BASE 0x544C5A0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ19_BASE 0x544C5F0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ20_BASE 0x544C640ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ21_BASE 0x544C690ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ22_BASE 0x544C6E0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ23_BASE 0x544C730ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ24_BASE 0x544C780ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ25_BASE 0x544C7D0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ26_BASE 0x544C820ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ27_BASE 0x544C870ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ28_BASE 0x544C8C0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ29_BASE 0x544C910ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ30_BASE 0x544C960ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC0_RXE0_AXUSER_AXUSER_CQ31_BASE 0x544C9B0ull
+#define NIC0_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC0_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC0_RXE0_AXUSER_SPECIAL_BASE 0x544CE80ull
+#define NIC0_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ0_BASE 0x544D000ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ1_BASE 0x544D050ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ2_BASE 0x544D0A0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ3_BASE 0x544D0F0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ4_BASE 0x544D140ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ5_BASE 0x544D190ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ6_BASE 0x544D1E0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ7_BASE 0x544D230ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ8_BASE 0x544D280ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ9_BASE 0x544D2D0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ10_BASE 0x544D320ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ11_BASE 0x544D370ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ12_BASE 0x544D3C0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ13_BASE 0x544D410ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ14_BASE 0x544D460ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ15_BASE 0x544D4B0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ16_BASE 0x544D500ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ17_BASE 0x544D550ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ18_BASE 0x544D5A0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ19_BASE 0x544D5F0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ20_BASE 0x544D640ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ21_BASE 0x544D690ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ22_BASE 0x544D6E0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ23_BASE 0x544D730ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ24_BASE 0x544D780ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ25_BASE 0x544D7D0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ26_BASE 0x544D820ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ27_BASE 0x544D870ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ28_BASE 0x544D8C0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ29_BASE 0x544D910ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ30_BASE 0x544D960ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC0_RXE1_AXUSER_AXUSER_CQ31_BASE 0x544D9B0ull
+#define NIC0_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC0_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC0_RXE1_AXUSER_SPECIAL_BASE 0x544DE80ull
+#define NIC0_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC0_TXS0_BASE 0x5450000ull
+#define NIC0_TXS0_MAX_OFFSET 0x1000
+#define NIC0_TXS0_SECTION 0xE800
+#define mmNIC0_TXS0_SPECIAL_BASE 0x5450E80ull
+#define NIC0_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC0_TXS1_BASE 0x5451000ull
+#define NIC0_TXS1_MAX_OFFSET 0x1000
+#define NIC0_TXS1_SECTION 0xE800
+#define mmNIC0_TXS1_SPECIAL_BASE 0x5451E80ull
+#define NIC0_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC0_TXE0_BASE 0x5452000ull
+#define NIC0_TXE0_MAX_OFFSET 0x1000
+#define NIC0_TXE0_SECTION 0xE800
+#define mmNIC0_TXE0_SPECIAL_BASE 0x5452E80ull
+#define NIC0_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC0_TXE1_BASE 0x5453000ull
+#define NIC0_TXE1_MAX_OFFSET 0x1000
+#define NIC0_TXE1_SECTION 0xE800
+#define mmNIC0_TXE1_SPECIAL_BASE 0x5453E80ull
+#define NIC0_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC0_TXB_BASE 0x5454000ull
+#define NIC0_TXB_MAX_OFFSET 0x1000
+#define NIC0_TXB_SECTION 0xE800
+#define mmNIC0_TXB_SPECIAL_BASE 0x5454E80ull
+#define NIC0_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC0_MSTR_IF_RR_SHRD_HBW_BASE 0x5455000ull
+#define NIC0_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC0_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC0_MSTR_IF_RR_PRVT_HBW_BASE 0x5455200ull
+#define NIC0_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC0_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC0_MSTR_IF_RR_SHRD_LBW_BASE 0x5455400ull
+#define NIC0_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC0_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC0_MSTR_IF_RR_PRVT_LBW_BASE 0x5455600ull
+#define NIC0_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC0_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC0_MSTR_IF_E2E_CRDT_BASE 0x5455800ull
+#define NIC0_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC0_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC0_MSTR_IF_AXUSER_BASE 0x5455A80ull
+#define NIC0_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC0_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC0_MSTR_IF_DBG_HBW_BASE 0x5455B00ull
+#define NIC0_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC0_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC0_MSTR_IF_DBG_LBW_BASE 0x5455B80ull
+#define NIC0_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC0_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC0_MSTR_IF_CORE_HBW_BASE 0x5455C00ull
+#define NIC0_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC0_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC0_MSTR_IF_CORE_LBW_BASE 0x5455D80ull
+#define NIC0_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC0_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC0_MSTR_IF_SPECIAL_BASE 0x5455E80ull
+#define NIC0_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC0_TX_AXUSER_BASE 0x5456000ull
+#define NIC0_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC0_TX_AXUSER_SECTION 0x2000
+#define mmNIC0_SERDES0_BASE 0x5458000ull
+#define NIC0_SERDES0_MAX_OFFSET 0x3E40
+#define NIC0_SERDES0_SECTION 0x4000
+#define mmNIC0_SERDES1_BASE 0x545C000ull
+#define NIC0_SERDES1_MAX_OFFSET 0x3E40
+#define NIC0_SERDES1_SECTION 0x4000
+#define mmNIC0_PHY_BASE 0x5460000ull
+#define NIC0_PHY_MAX_OFFSET 0x1000
+#define NIC0_PHY_SECTION 0xE800
+#define mmNIC0_PHY_SPECIAL_BASE 0x5460E80ull
+#define NIC0_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC0_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT0_MAC_AUX_BASE 0x5468000ull
+#define PRT0_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT0_MAC_AUX_SECTION 0xE800
+#define mmPRT0_MAC_AUX_SPECIAL_BASE 0x5468E80ull
+#define PRT0_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT0_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT0_MAC_CORE_BASE 0x5469000ull
+#define PRT0_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT0_MAC_CORE_SECTION 0xE800
+#define mmPRT0_MAC_CORE_SPECIAL_BASE 0x5469E80ull
+#define PRT0_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT0_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC0_MAC_RS_FEC_BASE 0x546A000ull
+#define NIC0_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC0_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC0_MAC_GLOB_STAT_CONTROL_REG_BASE 0x546B000ull
+#define NIC0_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC0_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC0_MAC_GLOB_STAT_RX0_BASE 0x546B100ull
+#define NIC0_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC0_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC0_MAC_GLOB_STAT_RX1_BASE 0x546B18Cull
+#define NIC0_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC0_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC0_MAC_GLOB_STAT_RX2_BASE 0x546B218ull
+#define NIC0_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC0_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC0_MAC_GLOB_STAT_RX3_BASE 0x546B2A4ull
+#define NIC0_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC0_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC0_MAC_GLOB_STAT_TX0_BASE 0x546B330ull
+#define NIC0_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC0_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC0_MAC_GLOB_STAT_TX1_BASE 0x546B398ull
+#define NIC0_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC0_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC0_MAC_GLOB_STAT_TX2_BASE 0x546B400ull
+#define NIC0_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC0_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC0_MAC_GLOB_STAT_TX3_BASE 0x546B468ull
+#define NIC0_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC0_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC0_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x546B800ull
+#define NIC0_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC0_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC0_MAC_CH0_MAC_PCS_BASE 0x546C000ull
+#define NIC0_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC0_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC0_MAC_CH0_MAC_128_BASE 0x546C400ull
+#define NIC0_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC0_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC0_MAC_CH0_MAC_AN_BASE 0x546C800ull
+#define NIC0_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC0_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC0_MAC_CH1_MAC_PCS_BASE 0x546D000ull
+#define NIC0_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC0_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC0_MAC_CH1_MAC_128_BASE 0x546D400ull
+#define NIC0_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC0_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC0_MAC_CH1_MAC_AN_BASE 0x546D800ull
+#define NIC0_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC0_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC0_MAC_CH2_MAC_PCS_BASE 0x546E000ull
+#define NIC0_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC0_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC0_MAC_CH2_MAC_128_BASE 0x546E400ull
+#define NIC0_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC0_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC0_MAC_CH2_MAC_AN_BASE 0x546E800ull
+#define NIC0_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC0_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC0_MAC_CH3_MAC_PCS_BASE 0x546F000ull
+#define NIC0_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC0_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC0_MAC_CH3_MAC_128_BASE 0x546F400ull
+#define NIC0_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC0_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC0_MAC_CH3_MAC_AN_BASE 0x546F800ull
+#define NIC0_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC0_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC1_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5480000ull
+#define NIC1_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5480080ull
+#define NIC1_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5480100ull
+#define NIC1_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5480180ull
+#define NIC1_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_0_SPECIAL_BASE 0x5480E80ull
+#define NIC1_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5481000ull
+#define NIC1_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5481080ull
+#define NIC1_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5481100ull
+#define NIC1_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5481180ull
+#define NIC1_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_1_SPECIAL_BASE 0x5481E80ull
+#define NIC1_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5482000ull
+#define NIC1_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5482080ull
+#define NIC1_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5482100ull
+#define NIC1_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5482180ull
+#define NIC1_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_2_SPECIAL_BASE 0x5482E80ull
+#define NIC1_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5483000ull
+#define NIC1_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5483080ull
+#define NIC1_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5483100ull
+#define NIC1_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5483180ull
+#define NIC1_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_3_SPECIAL_BASE 0x5483E80ull
+#define NIC1_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5484000ull
+#define NIC1_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5484080ull
+#define NIC1_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5484100ull
+#define NIC1_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5484180ull
+#define NIC1_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_4_SPECIAL_BASE 0x5484E80ull
+#define NIC1_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5485000ull
+#define NIC1_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5485080ull
+#define NIC1_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5485100ull
+#define NIC1_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5485180ull
+#define NIC1_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_5_SPECIAL_BASE 0x5485E80ull
+#define NIC1_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5486000ull
+#define NIC1_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5486080ull
+#define NIC1_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5486100ull
+#define NIC1_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5486180ull
+#define NIC1_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_6_SPECIAL_BASE 0x5486E80ull
+#define NIC1_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5487000ull
+#define NIC1_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5487080ull
+#define NIC1_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5487100ull
+#define NIC1_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5487180ull
+#define NIC1_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_7_SPECIAL_BASE 0x5487E80ull
+#define NIC1_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5488000ull
+#define NIC1_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5488080ull
+#define NIC1_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5488100ull
+#define NIC1_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5488180ull
+#define NIC1_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_8_SPECIAL_BASE 0x5488E80ull
+#define NIC1_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5489000ull
+#define NIC1_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5489080ull
+#define NIC1_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5489100ull
+#define NIC1_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5489180ull
+#define NIC1_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_9_SPECIAL_BASE 0x5489E80ull
+#define NIC1_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_10_UNSECURE_DOORBELL0_BASE 0x548A000ull
+#define NIC1_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_10_UNSECURE_DOORBELL1_BASE 0x548A080ull
+#define NIC1_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x548A100ull
+#define NIC1_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x548A180ull
+#define NIC1_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_10_SPECIAL_BASE 0x548AE80ull
+#define NIC1_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_11_UNSECURE_DOORBELL0_BASE 0x548B000ull
+#define NIC1_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_11_UNSECURE_DOORBELL1_BASE 0x548B080ull
+#define NIC1_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x548B100ull
+#define NIC1_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x548B180ull
+#define NIC1_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_11_SPECIAL_BASE 0x548BE80ull
+#define NIC1_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_12_UNSECURE_DOORBELL0_BASE 0x548C000ull
+#define NIC1_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_12_UNSECURE_DOORBELL1_BASE 0x548C080ull
+#define NIC1_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x548C100ull
+#define NIC1_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x548C180ull
+#define NIC1_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_12_SPECIAL_BASE 0x548CE80ull
+#define NIC1_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_13_UNSECURE_DOORBELL0_BASE 0x548D000ull
+#define NIC1_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_13_UNSECURE_DOORBELL1_BASE 0x548D080ull
+#define NIC1_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x548D100ull
+#define NIC1_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x548D180ull
+#define NIC1_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_13_SPECIAL_BASE 0x548DE80ull
+#define NIC1_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR0_14_UNSECURE_DOORBELL0_BASE 0x548E000ull
+#define NIC1_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR0_14_UNSECURE_DOORBELL1_BASE 0x548E080ull
+#define NIC1_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x548E100ull
+#define NIC1_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x548E180ull
+#define NIC1_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR0_14_SPECIAL_BASE 0x548EE80ull
+#define NIC1_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC1_QM_DCCM0_BASE 0x5490000ull
+#define NIC1_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC1_QM_DCCM0_SECTION 0x8000
+#define mmNIC1_QM_ARC_AUX0_BASE 0x5498000ull
+#define NIC1_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC1_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC1_QM_ARC_AUX0_SPECIAL_BASE 0x5498E80ull
+#define NIC1_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC1_QM0_BASE 0x549A000ull
+#define NIC1_QM0_MAX_OFFSET 0x1000
+#define NIC1_QM0_SECTION 0x9000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x549A900ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x549A908ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x549A910ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x549A918ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x549A920ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x549A928ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x549A930ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x549A938ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x549A940ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x549A948ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x549A950ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x549A958ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x549A960ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x549A968ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x549A970ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC1_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x549A978ull
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC1_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC1_QM0_AXUSER_SECURED_BASE 0x549AB00ull
+#define NIC1_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC1_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC1_QM0_AXUSER_NONSECURED_BASE 0x549AB80ull
+#define NIC1_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC1_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC1_QM0_DBG_HBW_BASE 0x549AC00ull
+#define NIC1_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC1_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC1_QM0_DBG_LBW_BASE 0x549AC80ull
+#define NIC1_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC1_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC1_QM0_CGM_BASE 0x549AD80ull
+#define NIC1_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC1_QM0_CGM_SECTION 0x1000
+#define mmNIC1_QM0_SPECIAL_BASE 0x549AE80ull
+#define NIC1_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC1_QPC0_BASE 0x549F000ull
+#define NIC1_QPC0_MAX_OFFSET 0x1000
+#define NIC1_QPC0_SECTION 0x7200
+#define mmNIC1_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x549F720ull
+#define NIC1_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x549F728ull
+#define NIC1_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x549F730ull
+#define NIC1_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x549F738ull
+#define NIC1_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x549F740ull
+#define NIC1_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x549F748ull
+#define NIC1_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x549F750ull
+#define NIC1_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x549F758ull
+#define NIC1_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x549F760ull
+#define NIC1_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x549F768ull
+#define NIC1_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x549F770ull
+#define NIC1_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x549F778ull
+#define NIC1_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x549F780ull
+#define NIC1_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x549F788ull
+#define NIC1_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x549F790ull
+#define NIC1_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x549F798ull
+#define NIC1_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x549F7A0ull
+#define NIC1_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x549F7A8ull
+#define NIC1_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x549F7B0ull
+#define NIC1_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x549F7B8ull
+#define NIC1_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x549F7C0ull
+#define NIC1_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x549F7C8ull
+#define NIC1_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x549F7D0ull
+#define NIC1_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x549F7D8ull
+#define NIC1_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x549F7E0ull
+#define NIC1_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x549F7E8ull
+#define NIC1_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x549F7F0ull
+#define NIC1_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x549F7F8ull
+#define NIC1_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x549F800ull
+#define NIC1_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x549F808ull
+#define NIC1_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x549F810ull
+#define NIC1_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x549F818ull
+#define NIC1_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC1_QPC0_AXUSER_CONG_QUE_BASE 0x549FB80ull
+#define NIC1_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC1_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC1_QPC0_AXUSER_RXWQE_BASE 0x549FBE0ull
+#define NIC1_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC1_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC1_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x549FC40ull
+#define NIC1_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC1_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC1_QPC0_AXUSER_DB_FIFO_BASE 0x549FCA0ull
+#define NIC1_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC1_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC1_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x549FD00ull
+#define NIC1_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC1_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC1_QPC0_AXUSER_ERR_FIFO_BASE 0x549FD60ull
+#define NIC1_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC1_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC1_QPC0_AXUSER_QPC_RESP_BASE 0x549FDC0ull
+#define NIC1_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC1_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC1_QPC0_AXUSER_QPC_REQ_BASE 0x549FE20ull
+#define NIC1_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC1_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC1_QPC0_SPECIAL_BASE 0x549FE80ull
+#define NIC1_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_0_UNSECURE_DOORBELL0_BASE 0x54A0000ull
+#define NIC1_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_0_UNSECURE_DOORBELL1_BASE 0x54A0080ull
+#define NIC1_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x54A0100ull
+#define NIC1_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x54A0180ull
+#define NIC1_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_0_SPECIAL_BASE 0x54A0E80ull
+#define NIC1_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_1_UNSECURE_DOORBELL0_BASE 0x54A1000ull
+#define NIC1_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_1_UNSECURE_DOORBELL1_BASE 0x54A1080ull
+#define NIC1_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x54A1100ull
+#define NIC1_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x54A1180ull
+#define NIC1_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_1_SPECIAL_BASE 0x54A1E80ull
+#define NIC1_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_2_UNSECURE_DOORBELL0_BASE 0x54A2000ull
+#define NIC1_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_2_UNSECURE_DOORBELL1_BASE 0x54A2080ull
+#define NIC1_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x54A2100ull
+#define NIC1_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x54A2180ull
+#define NIC1_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_2_SPECIAL_BASE 0x54A2E80ull
+#define NIC1_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_3_UNSECURE_DOORBELL0_BASE 0x54A3000ull
+#define NIC1_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_3_UNSECURE_DOORBELL1_BASE 0x54A3080ull
+#define NIC1_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x54A3100ull
+#define NIC1_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x54A3180ull
+#define NIC1_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_3_SPECIAL_BASE 0x54A3E80ull
+#define NIC1_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_4_UNSECURE_DOORBELL0_BASE 0x54A4000ull
+#define NIC1_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_4_UNSECURE_DOORBELL1_BASE 0x54A4080ull
+#define NIC1_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x54A4100ull
+#define NIC1_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x54A4180ull
+#define NIC1_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_4_SPECIAL_BASE 0x54A4E80ull
+#define NIC1_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_5_UNSECURE_DOORBELL0_BASE 0x54A5000ull
+#define NIC1_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_5_UNSECURE_DOORBELL1_BASE 0x54A5080ull
+#define NIC1_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x54A5100ull
+#define NIC1_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x54A5180ull
+#define NIC1_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_5_SPECIAL_BASE 0x54A5E80ull
+#define NIC1_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_6_UNSECURE_DOORBELL0_BASE 0x54A6000ull
+#define NIC1_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_6_UNSECURE_DOORBELL1_BASE 0x54A6080ull
+#define NIC1_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x54A6100ull
+#define NIC1_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x54A6180ull
+#define NIC1_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_6_SPECIAL_BASE 0x54A6E80ull
+#define NIC1_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_7_UNSECURE_DOORBELL0_BASE 0x54A7000ull
+#define NIC1_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_7_UNSECURE_DOORBELL1_BASE 0x54A7080ull
+#define NIC1_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x54A7100ull
+#define NIC1_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x54A7180ull
+#define NIC1_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_7_SPECIAL_BASE 0x54A7E80ull
+#define NIC1_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_8_UNSECURE_DOORBELL0_BASE 0x54A8000ull
+#define NIC1_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_8_UNSECURE_DOORBELL1_BASE 0x54A8080ull
+#define NIC1_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x54A8100ull
+#define NIC1_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x54A8180ull
+#define NIC1_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_8_SPECIAL_BASE 0x54A8E80ull
+#define NIC1_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_9_UNSECURE_DOORBELL0_BASE 0x54A9000ull
+#define NIC1_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_9_UNSECURE_DOORBELL1_BASE 0x54A9080ull
+#define NIC1_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x54A9100ull
+#define NIC1_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x54A9180ull
+#define NIC1_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_9_SPECIAL_BASE 0x54A9E80ull
+#define NIC1_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_10_UNSECURE_DOORBELL0_BASE 0x54AA000ull
+#define NIC1_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_10_UNSECURE_DOORBELL1_BASE 0x54AA080ull
+#define NIC1_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x54AA100ull
+#define NIC1_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x54AA180ull
+#define NIC1_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_10_SPECIAL_BASE 0x54AAE80ull
+#define NIC1_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_11_UNSECURE_DOORBELL0_BASE 0x54AB000ull
+#define NIC1_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_11_UNSECURE_DOORBELL1_BASE 0x54AB080ull
+#define NIC1_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x54AB100ull
+#define NIC1_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x54AB180ull
+#define NIC1_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_11_SPECIAL_BASE 0x54ABE80ull
+#define NIC1_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_12_UNSECURE_DOORBELL0_BASE 0x54AC000ull
+#define NIC1_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_12_UNSECURE_DOORBELL1_BASE 0x54AC080ull
+#define NIC1_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x54AC100ull
+#define NIC1_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x54AC180ull
+#define NIC1_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_12_SPECIAL_BASE 0x54ACE80ull
+#define NIC1_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_13_UNSECURE_DOORBELL0_BASE 0x54AD000ull
+#define NIC1_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_13_UNSECURE_DOORBELL1_BASE 0x54AD080ull
+#define NIC1_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x54AD100ull
+#define NIC1_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x54AD180ull
+#define NIC1_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_13_SPECIAL_BASE 0x54ADE80ull
+#define NIC1_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC1_UMR1_14_UNSECURE_DOORBELL0_BASE 0x54AE000ull
+#define NIC1_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC1_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC1_UMR1_14_UNSECURE_DOORBELL1_BASE 0x54AE080ull
+#define NIC1_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC1_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC1_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x54AE100ull
+#define NIC1_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC1_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC1_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x54AE180ull
+#define NIC1_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC1_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC1_UMR1_14_SPECIAL_BASE 0x54AEE80ull
+#define NIC1_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC1_QM_DCCM1_BASE 0x54B0000ull
+#define NIC1_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC1_QM_DCCM1_SECTION 0x8000
+#define mmNIC1_QM_ARC_AUX1_BASE 0x54B8000ull
+#define NIC1_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC1_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC1_QM_ARC_AUX1_SPECIAL_BASE 0x54B8E80ull
+#define NIC1_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC1_QM1_BASE 0x54BA000ull
+#define NIC1_QM1_MAX_OFFSET 0x1000
+#define NIC1_QM1_SECTION 0x9000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x54BA900ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x54BA908ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x54BA910ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x54BA918ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x54BA920ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x54BA928ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x54BA930ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x54BA938ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x54BA940ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x54BA948ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x54BA950ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x54BA958ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x54BA960ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x54BA968ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x54BA970ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC1_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x54BA978ull
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC1_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC1_QM1_AXUSER_SECURED_BASE 0x54BAB00ull
+#define NIC1_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC1_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC1_QM1_AXUSER_NONSECURED_BASE 0x54BAB80ull
+#define NIC1_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC1_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC1_QM1_DBG_HBW_BASE 0x54BAC00ull
+#define NIC1_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC1_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC1_QM1_DBG_LBW_BASE 0x54BAC80ull
+#define NIC1_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC1_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC1_QM1_CGM_BASE 0x54BAD80ull
+#define NIC1_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC1_QM1_CGM_SECTION 0x1000
+#define mmNIC1_QM1_SPECIAL_BASE 0x54BAE80ull
+#define NIC1_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC1_QPC1_BASE 0x54BF000ull
+#define NIC1_QPC1_MAX_OFFSET 0x1000
+#define NIC1_QPC1_SECTION 0x7200
+#define mmNIC1_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x54BF720ull
+#define NIC1_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x54BF728ull
+#define NIC1_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x54BF730ull
+#define NIC1_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x54BF738ull
+#define NIC1_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x54BF740ull
+#define NIC1_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x54BF748ull
+#define NIC1_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x54BF750ull
+#define NIC1_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x54BF758ull
+#define NIC1_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x54BF760ull
+#define NIC1_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x54BF768ull
+#define NIC1_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x54BF770ull
+#define NIC1_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x54BF778ull
+#define NIC1_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x54BF780ull
+#define NIC1_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x54BF788ull
+#define NIC1_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x54BF790ull
+#define NIC1_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x54BF798ull
+#define NIC1_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x54BF7A0ull
+#define NIC1_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x54BF7A8ull
+#define NIC1_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x54BF7B0ull
+#define NIC1_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x54BF7B8ull
+#define NIC1_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x54BF7C0ull
+#define NIC1_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x54BF7C8ull
+#define NIC1_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x54BF7D0ull
+#define NIC1_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x54BF7D8ull
+#define NIC1_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x54BF7E0ull
+#define NIC1_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x54BF7E8ull
+#define NIC1_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x54BF7F0ull
+#define NIC1_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x54BF7F8ull
+#define NIC1_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x54BF800ull
+#define NIC1_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x54BF808ull
+#define NIC1_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x54BF810ull
+#define NIC1_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC1_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x54BF818ull
+#define NIC1_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC1_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC1_QPC1_AXUSER_CONG_QUE_BASE 0x54BFB80ull
+#define NIC1_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC1_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC1_QPC1_AXUSER_RXWQE_BASE 0x54BFBE0ull
+#define NIC1_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC1_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC1_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x54BFC40ull
+#define NIC1_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC1_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC1_QPC1_AXUSER_DB_FIFO_BASE 0x54BFCA0ull
+#define NIC1_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC1_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC1_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x54BFD00ull
+#define NIC1_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC1_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC1_QPC1_AXUSER_ERR_FIFO_BASE 0x54BFD60ull
+#define NIC1_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC1_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC1_QPC1_AXUSER_QPC_RESP_BASE 0x54BFDC0ull
+#define NIC1_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC1_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC1_QPC1_AXUSER_QPC_REQ_BASE 0x54BFE20ull
+#define NIC1_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC1_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC1_QPC1_SPECIAL_BASE 0x54BFE80ull
+#define NIC1_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC1_TMR_BASE 0x54C8000ull
+#define NIC1_TMR_MAX_OFFSET 0x1000
+#define NIC1_TMR_SECTION 0xD600
+#define mmNIC1_TMR_AXUSER_TMR_FREE_LIST_BASE 0x54C8D60ull
+#define NIC1_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC1_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC1_TMR_AXUSER_TMR_FIFO_BASE 0x54C8DC0ull
+#define NIC1_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC1_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC1_TMR_AXUSER_TMR_FSM_BASE 0x54C8E20ull
+#define NIC1_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC1_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC1_TMR_SPECIAL_BASE 0x54C8E80ull
+#define NIC1_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC1_RXB_CORE_BASE 0x54C9000ull
+#define NIC1_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC1_RXB_CORE_SECTION 0x6100
+#define mmNIC1_RXB_CORE_SCT_AWUSER_BASE 0x54C9610ull
+#define NIC1_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC1_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC1_RXB_CORE_SPECIAL_BASE 0x54C9E80ull
+#define NIC1_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC1_RXE0_BASE 0x54CA000ull
+#define NIC1_RXE0_MAX_OFFSET 0x1000
+#define NIC1_RXE0_SECTION 0x9000
+#define mmNIC1_RXE0_WQE_ARUSER_BASE 0x54CA900ull
+#define NIC1_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC1_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC1_RXE0_SPECIAL_BASE 0x54CAE80ull
+#define NIC1_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC1_RXE1_BASE 0x54CB000ull
+#define NIC1_RXE1_MAX_OFFSET 0x1000
+#define NIC1_RXE1_SECTION 0x9000
+#define mmNIC1_RXE1_WQE_ARUSER_BASE 0x54CB900ull
+#define NIC1_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC1_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC1_RXE1_SPECIAL_BASE 0x54CBE80ull
+#define NIC1_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ0_BASE 0x54CC000ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ1_BASE 0x54CC050ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ2_BASE 0x54CC0A0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ3_BASE 0x54CC0F0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ4_BASE 0x54CC140ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ5_BASE 0x54CC190ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ6_BASE 0x54CC1E0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ7_BASE 0x54CC230ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ8_BASE 0x54CC280ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ9_BASE 0x54CC2D0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ10_BASE 0x54CC320ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ11_BASE 0x54CC370ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ12_BASE 0x54CC3C0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ13_BASE 0x54CC410ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ14_BASE 0x54CC460ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ15_BASE 0x54CC4B0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ16_BASE 0x54CC500ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ17_BASE 0x54CC550ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ18_BASE 0x54CC5A0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ19_BASE 0x54CC5F0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ20_BASE 0x54CC640ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ21_BASE 0x54CC690ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ22_BASE 0x54CC6E0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ23_BASE 0x54CC730ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ24_BASE 0x54CC780ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ25_BASE 0x54CC7D0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ26_BASE 0x54CC820ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ27_BASE 0x54CC870ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ28_BASE 0x54CC8C0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ29_BASE 0x54CC910ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ30_BASE 0x54CC960ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC1_RXE0_AXUSER_AXUSER_CQ31_BASE 0x54CC9B0ull
+#define NIC1_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC1_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC1_RXE0_AXUSER_SPECIAL_BASE 0x54CCE80ull
+#define NIC1_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ0_BASE 0x54CD000ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ1_BASE 0x54CD050ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ2_BASE 0x54CD0A0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ3_BASE 0x54CD0F0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ4_BASE 0x54CD140ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ5_BASE 0x54CD190ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ6_BASE 0x54CD1E0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ7_BASE 0x54CD230ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ8_BASE 0x54CD280ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ9_BASE 0x54CD2D0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ10_BASE 0x54CD320ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ11_BASE 0x54CD370ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ12_BASE 0x54CD3C0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ13_BASE 0x54CD410ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ14_BASE 0x54CD460ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ15_BASE 0x54CD4B0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ16_BASE 0x54CD500ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ17_BASE 0x54CD550ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ18_BASE 0x54CD5A0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ19_BASE 0x54CD5F0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ20_BASE 0x54CD640ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ21_BASE 0x54CD690ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ22_BASE 0x54CD6E0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ23_BASE 0x54CD730ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ24_BASE 0x54CD780ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ25_BASE 0x54CD7D0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ26_BASE 0x54CD820ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ27_BASE 0x54CD870ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ28_BASE 0x54CD8C0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ29_BASE 0x54CD910ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ30_BASE 0x54CD960ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC1_RXE1_AXUSER_AXUSER_CQ31_BASE 0x54CD9B0ull
+#define NIC1_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC1_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC1_RXE1_AXUSER_SPECIAL_BASE 0x54CDE80ull
+#define NIC1_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC1_TXS0_BASE 0x54D0000ull
+#define NIC1_TXS0_MAX_OFFSET 0x1000
+#define NIC1_TXS0_SECTION 0xE800
+#define mmNIC1_TXS0_SPECIAL_BASE 0x54D0E80ull
+#define NIC1_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC1_TXS1_BASE 0x54D1000ull
+#define NIC1_TXS1_MAX_OFFSET 0x1000
+#define NIC1_TXS1_SECTION 0xE800
+#define mmNIC1_TXS1_SPECIAL_BASE 0x54D1E80ull
+#define NIC1_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC1_TXE0_BASE 0x54D2000ull
+#define NIC1_TXE0_MAX_OFFSET 0x1000
+#define NIC1_TXE0_SECTION 0xE800
+#define mmNIC1_TXE0_SPECIAL_BASE 0x54D2E80ull
+#define NIC1_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC1_TXE1_BASE 0x54D3000ull
+#define NIC1_TXE1_MAX_OFFSET 0x1000
+#define NIC1_TXE1_SECTION 0xE800
+#define mmNIC1_TXE1_SPECIAL_BASE 0x54D3E80ull
+#define NIC1_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC1_TXB_BASE 0x54D4000ull
+#define NIC1_TXB_MAX_OFFSET 0x1000
+#define NIC1_TXB_SECTION 0xE800
+#define mmNIC1_TXB_SPECIAL_BASE 0x54D4E80ull
+#define NIC1_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC1_MSTR_IF_RR_SHRD_HBW_BASE 0x54D5000ull
+#define NIC1_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC1_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC1_MSTR_IF_RR_PRVT_HBW_BASE 0x54D5200ull
+#define NIC1_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC1_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC1_MSTR_IF_RR_SHRD_LBW_BASE 0x54D5400ull
+#define NIC1_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC1_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC1_MSTR_IF_RR_PRVT_LBW_BASE 0x54D5600ull
+#define NIC1_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC1_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC1_MSTR_IF_E2E_CRDT_BASE 0x54D5800ull
+#define NIC1_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC1_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC1_MSTR_IF_AXUSER_BASE 0x54D5A80ull
+#define NIC1_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC1_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC1_MSTR_IF_DBG_HBW_BASE 0x54D5B00ull
+#define NIC1_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC1_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC1_MSTR_IF_DBG_LBW_BASE 0x54D5B80ull
+#define NIC1_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC1_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC1_MSTR_IF_CORE_HBW_BASE 0x54D5C00ull
+#define NIC1_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC1_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC1_MSTR_IF_CORE_LBW_BASE 0x54D5D80ull
+#define NIC1_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC1_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC1_MSTR_IF_SPECIAL_BASE 0x54D5E80ull
+#define NIC1_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC1_TX_AXUSER_BASE 0x54D6000ull
+#define NIC1_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC1_TX_AXUSER_SECTION 0x2000
+#define mmNIC1_SERDES0_BASE 0x54D8000ull
+#define NIC1_SERDES0_MAX_OFFSET 0x3E40
+#define NIC1_SERDES0_SECTION 0x4000
+#define mmNIC1_SERDES1_BASE 0x54DC000ull
+#define NIC1_SERDES1_MAX_OFFSET 0x3E40
+#define NIC1_SERDES1_SECTION 0x4000
+#define mmNIC1_PHY_BASE 0x54E0000ull
+#define NIC1_PHY_MAX_OFFSET 0x1000
+#define NIC1_PHY_SECTION 0xE800
+#define mmNIC1_PHY_SPECIAL_BASE 0x54E0E80ull
+#define NIC1_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC1_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT1_MAC_AUX_BASE 0x54E8000ull
+#define PRT1_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT1_MAC_AUX_SECTION 0xE800
+#define mmPRT1_MAC_AUX_SPECIAL_BASE 0x54E8E80ull
+#define PRT1_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT1_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT1_MAC_CORE_BASE 0x54E9000ull
+#define PRT1_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT1_MAC_CORE_SECTION 0xE800
+#define mmPRT1_MAC_CORE_SPECIAL_BASE 0x54E9E80ull
+#define PRT1_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT1_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC1_MAC_RS_FEC_BASE 0x54EA000ull
+#define NIC1_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC1_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC1_MAC_GLOB_STAT_CONTROL_REG_BASE 0x54EB000ull
+#define NIC1_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC1_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC1_MAC_GLOB_STAT_RX0_BASE 0x54EB100ull
+#define NIC1_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC1_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC1_MAC_GLOB_STAT_RX1_BASE 0x54EB18Cull
+#define NIC1_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC1_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC1_MAC_GLOB_STAT_RX2_BASE 0x54EB218ull
+#define NIC1_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC1_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC1_MAC_GLOB_STAT_RX3_BASE 0x54EB2A4ull
+#define NIC1_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC1_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC1_MAC_GLOB_STAT_TX0_BASE 0x54EB330ull
+#define NIC1_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC1_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC1_MAC_GLOB_STAT_TX1_BASE 0x54EB398ull
+#define NIC1_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC1_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC1_MAC_GLOB_STAT_TX2_BASE 0x54EB400ull
+#define NIC1_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC1_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC1_MAC_GLOB_STAT_TX3_BASE 0x54EB468ull
+#define NIC1_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC1_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC1_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x54EB800ull
+#define NIC1_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC1_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC1_MAC_CH0_MAC_PCS_BASE 0x54EC000ull
+#define NIC1_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC1_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC1_MAC_CH0_MAC_128_BASE 0x54EC400ull
+#define NIC1_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC1_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC1_MAC_CH0_MAC_AN_BASE 0x54EC800ull
+#define NIC1_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC1_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC1_MAC_CH1_MAC_PCS_BASE 0x54ED000ull
+#define NIC1_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC1_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC1_MAC_CH1_MAC_128_BASE 0x54ED400ull
+#define NIC1_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC1_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC1_MAC_CH1_MAC_AN_BASE 0x54ED800ull
+#define NIC1_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC1_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC1_MAC_CH2_MAC_PCS_BASE 0x54EE000ull
+#define NIC1_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC1_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC1_MAC_CH2_MAC_128_BASE 0x54EE400ull
+#define NIC1_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC1_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC1_MAC_CH2_MAC_AN_BASE 0x54EE800ull
+#define NIC1_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC1_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC1_MAC_CH3_MAC_PCS_BASE 0x54EF000ull
+#define NIC1_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC1_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC1_MAC_CH3_MAC_128_BASE 0x54EF400ull
+#define NIC1_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC1_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC1_MAC_CH3_MAC_AN_BASE 0x54EF800ull
+#define NIC1_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC1_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC2_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5500000ull
+#define NIC2_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5500080ull
+#define NIC2_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5500100ull
+#define NIC2_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5500180ull
+#define NIC2_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_0_SPECIAL_BASE 0x5500E80ull
+#define NIC2_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5501000ull
+#define NIC2_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5501080ull
+#define NIC2_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5501100ull
+#define NIC2_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5501180ull
+#define NIC2_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_1_SPECIAL_BASE 0x5501E80ull
+#define NIC2_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5502000ull
+#define NIC2_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5502080ull
+#define NIC2_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5502100ull
+#define NIC2_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5502180ull
+#define NIC2_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_2_SPECIAL_BASE 0x5502E80ull
+#define NIC2_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5503000ull
+#define NIC2_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5503080ull
+#define NIC2_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5503100ull
+#define NIC2_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5503180ull
+#define NIC2_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_3_SPECIAL_BASE 0x5503E80ull
+#define NIC2_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5504000ull
+#define NIC2_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5504080ull
+#define NIC2_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5504100ull
+#define NIC2_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5504180ull
+#define NIC2_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_4_SPECIAL_BASE 0x5504E80ull
+#define NIC2_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5505000ull
+#define NIC2_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5505080ull
+#define NIC2_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5505100ull
+#define NIC2_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5505180ull
+#define NIC2_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_5_SPECIAL_BASE 0x5505E80ull
+#define NIC2_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5506000ull
+#define NIC2_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5506080ull
+#define NIC2_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5506100ull
+#define NIC2_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5506180ull
+#define NIC2_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_6_SPECIAL_BASE 0x5506E80ull
+#define NIC2_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5507000ull
+#define NIC2_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5507080ull
+#define NIC2_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5507100ull
+#define NIC2_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5507180ull
+#define NIC2_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_7_SPECIAL_BASE 0x5507E80ull
+#define NIC2_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5508000ull
+#define NIC2_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5508080ull
+#define NIC2_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5508100ull
+#define NIC2_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5508180ull
+#define NIC2_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_8_SPECIAL_BASE 0x5508E80ull
+#define NIC2_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5509000ull
+#define NIC2_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5509080ull
+#define NIC2_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5509100ull
+#define NIC2_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5509180ull
+#define NIC2_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_9_SPECIAL_BASE 0x5509E80ull
+#define NIC2_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_10_UNSECURE_DOORBELL0_BASE 0x550A000ull
+#define NIC2_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_10_UNSECURE_DOORBELL1_BASE 0x550A080ull
+#define NIC2_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x550A100ull
+#define NIC2_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x550A180ull
+#define NIC2_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_10_SPECIAL_BASE 0x550AE80ull
+#define NIC2_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_11_UNSECURE_DOORBELL0_BASE 0x550B000ull
+#define NIC2_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_11_UNSECURE_DOORBELL1_BASE 0x550B080ull
+#define NIC2_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x550B100ull
+#define NIC2_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x550B180ull
+#define NIC2_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_11_SPECIAL_BASE 0x550BE80ull
+#define NIC2_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_12_UNSECURE_DOORBELL0_BASE 0x550C000ull
+#define NIC2_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_12_UNSECURE_DOORBELL1_BASE 0x550C080ull
+#define NIC2_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x550C100ull
+#define NIC2_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x550C180ull
+#define NIC2_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_12_SPECIAL_BASE 0x550CE80ull
+#define NIC2_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_13_UNSECURE_DOORBELL0_BASE 0x550D000ull
+#define NIC2_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_13_UNSECURE_DOORBELL1_BASE 0x550D080ull
+#define NIC2_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x550D100ull
+#define NIC2_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x550D180ull
+#define NIC2_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_13_SPECIAL_BASE 0x550DE80ull
+#define NIC2_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR0_14_UNSECURE_DOORBELL0_BASE 0x550E000ull
+#define NIC2_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR0_14_UNSECURE_DOORBELL1_BASE 0x550E080ull
+#define NIC2_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x550E100ull
+#define NIC2_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x550E180ull
+#define NIC2_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR0_14_SPECIAL_BASE 0x550EE80ull
+#define NIC2_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC2_QM_DCCM0_BASE 0x5510000ull
+#define NIC2_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC2_QM_DCCM0_SECTION 0x8000
+#define mmNIC2_QM_ARC_AUX0_BASE 0x5518000ull
+#define NIC2_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC2_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC2_QM_ARC_AUX0_SPECIAL_BASE 0x5518E80ull
+#define NIC2_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC2_QM0_BASE 0x551A000ull
+#define NIC2_QM0_MAX_OFFSET 0x1000
+#define NIC2_QM0_SECTION 0x9000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x551A900ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x551A908ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x551A910ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x551A918ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x551A920ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x551A928ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x551A930ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x551A938ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x551A940ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x551A948ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x551A950ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x551A958ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x551A960ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x551A968ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x551A970ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC2_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x551A978ull
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC2_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC2_QM0_AXUSER_SECURED_BASE 0x551AB00ull
+#define NIC2_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC2_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC2_QM0_AXUSER_NONSECURED_BASE 0x551AB80ull
+#define NIC2_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC2_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC2_QM0_DBG_HBW_BASE 0x551AC00ull
+#define NIC2_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC2_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC2_QM0_DBG_LBW_BASE 0x551AC80ull
+#define NIC2_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC2_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC2_QM0_CGM_BASE 0x551AD80ull
+#define NIC2_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC2_QM0_CGM_SECTION 0x1000
+#define mmNIC2_QM0_SPECIAL_BASE 0x551AE80ull
+#define NIC2_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC2_QPC0_BASE 0x551F000ull
+#define NIC2_QPC0_MAX_OFFSET 0x1000
+#define NIC2_QPC0_SECTION 0x7200
+#define mmNIC2_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x551F720ull
+#define NIC2_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x551F728ull
+#define NIC2_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x551F730ull
+#define NIC2_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x551F738ull
+#define NIC2_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x551F740ull
+#define NIC2_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x551F748ull
+#define NIC2_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x551F750ull
+#define NIC2_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x551F758ull
+#define NIC2_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x551F760ull
+#define NIC2_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x551F768ull
+#define NIC2_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x551F770ull
+#define NIC2_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x551F778ull
+#define NIC2_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x551F780ull
+#define NIC2_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x551F788ull
+#define NIC2_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x551F790ull
+#define NIC2_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x551F798ull
+#define NIC2_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x551F7A0ull
+#define NIC2_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x551F7A8ull
+#define NIC2_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x551F7B0ull
+#define NIC2_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x551F7B8ull
+#define NIC2_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x551F7C0ull
+#define NIC2_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x551F7C8ull
+#define NIC2_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x551F7D0ull
+#define NIC2_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x551F7D8ull
+#define NIC2_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x551F7E0ull
+#define NIC2_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x551F7E8ull
+#define NIC2_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x551F7F0ull
+#define NIC2_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x551F7F8ull
+#define NIC2_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x551F800ull
+#define NIC2_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x551F808ull
+#define NIC2_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x551F810ull
+#define NIC2_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x551F818ull
+#define NIC2_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC2_QPC0_AXUSER_CONG_QUE_BASE 0x551FB80ull
+#define NIC2_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC2_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC2_QPC0_AXUSER_RXWQE_BASE 0x551FBE0ull
+#define NIC2_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC2_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC2_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x551FC40ull
+#define NIC2_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC2_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC2_QPC0_AXUSER_DB_FIFO_BASE 0x551FCA0ull
+#define NIC2_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC2_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC2_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x551FD00ull
+#define NIC2_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC2_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC2_QPC0_AXUSER_ERR_FIFO_BASE 0x551FD60ull
+#define NIC2_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC2_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC2_QPC0_AXUSER_QPC_RESP_BASE 0x551FDC0ull
+#define NIC2_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC2_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC2_QPC0_AXUSER_QPC_REQ_BASE 0x551FE20ull
+#define NIC2_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC2_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC2_QPC0_SPECIAL_BASE 0x551FE80ull
+#define NIC2_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_0_UNSECURE_DOORBELL0_BASE 0x5520000ull
+#define NIC2_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_0_UNSECURE_DOORBELL1_BASE 0x5520080ull
+#define NIC2_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x5520100ull
+#define NIC2_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x5520180ull
+#define NIC2_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_0_SPECIAL_BASE 0x5520E80ull
+#define NIC2_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_1_UNSECURE_DOORBELL0_BASE 0x5521000ull
+#define NIC2_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_1_UNSECURE_DOORBELL1_BASE 0x5521080ull
+#define NIC2_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x5521100ull
+#define NIC2_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x5521180ull
+#define NIC2_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_1_SPECIAL_BASE 0x5521E80ull
+#define NIC2_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_2_UNSECURE_DOORBELL0_BASE 0x5522000ull
+#define NIC2_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_2_UNSECURE_DOORBELL1_BASE 0x5522080ull
+#define NIC2_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x5522100ull
+#define NIC2_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x5522180ull
+#define NIC2_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_2_SPECIAL_BASE 0x5522E80ull
+#define NIC2_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_3_UNSECURE_DOORBELL0_BASE 0x5523000ull
+#define NIC2_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_3_UNSECURE_DOORBELL1_BASE 0x5523080ull
+#define NIC2_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x5523100ull
+#define NIC2_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x5523180ull
+#define NIC2_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_3_SPECIAL_BASE 0x5523E80ull
+#define NIC2_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_4_UNSECURE_DOORBELL0_BASE 0x5524000ull
+#define NIC2_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_4_UNSECURE_DOORBELL1_BASE 0x5524080ull
+#define NIC2_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x5524100ull
+#define NIC2_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x5524180ull
+#define NIC2_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_4_SPECIAL_BASE 0x5524E80ull
+#define NIC2_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_5_UNSECURE_DOORBELL0_BASE 0x5525000ull
+#define NIC2_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_5_UNSECURE_DOORBELL1_BASE 0x5525080ull
+#define NIC2_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x5525100ull
+#define NIC2_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x5525180ull
+#define NIC2_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_5_SPECIAL_BASE 0x5525E80ull
+#define NIC2_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_6_UNSECURE_DOORBELL0_BASE 0x5526000ull
+#define NIC2_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_6_UNSECURE_DOORBELL1_BASE 0x5526080ull
+#define NIC2_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x5526100ull
+#define NIC2_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x5526180ull
+#define NIC2_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_6_SPECIAL_BASE 0x5526E80ull
+#define NIC2_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_7_UNSECURE_DOORBELL0_BASE 0x5527000ull
+#define NIC2_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_7_UNSECURE_DOORBELL1_BASE 0x5527080ull
+#define NIC2_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x5527100ull
+#define NIC2_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x5527180ull
+#define NIC2_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_7_SPECIAL_BASE 0x5527E80ull
+#define NIC2_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_8_UNSECURE_DOORBELL0_BASE 0x5528000ull
+#define NIC2_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_8_UNSECURE_DOORBELL1_BASE 0x5528080ull
+#define NIC2_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x5528100ull
+#define NIC2_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x5528180ull
+#define NIC2_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_8_SPECIAL_BASE 0x5528E80ull
+#define NIC2_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_9_UNSECURE_DOORBELL0_BASE 0x5529000ull
+#define NIC2_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_9_UNSECURE_DOORBELL1_BASE 0x5529080ull
+#define NIC2_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x5529100ull
+#define NIC2_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x5529180ull
+#define NIC2_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_9_SPECIAL_BASE 0x5529E80ull
+#define NIC2_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_10_UNSECURE_DOORBELL0_BASE 0x552A000ull
+#define NIC2_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_10_UNSECURE_DOORBELL1_BASE 0x552A080ull
+#define NIC2_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x552A100ull
+#define NIC2_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x552A180ull
+#define NIC2_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_10_SPECIAL_BASE 0x552AE80ull
+#define NIC2_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_11_UNSECURE_DOORBELL0_BASE 0x552B000ull
+#define NIC2_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_11_UNSECURE_DOORBELL1_BASE 0x552B080ull
+#define NIC2_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x552B100ull
+#define NIC2_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x552B180ull
+#define NIC2_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_11_SPECIAL_BASE 0x552BE80ull
+#define NIC2_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_12_UNSECURE_DOORBELL0_BASE 0x552C000ull
+#define NIC2_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_12_UNSECURE_DOORBELL1_BASE 0x552C080ull
+#define NIC2_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x552C100ull
+#define NIC2_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x552C180ull
+#define NIC2_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_12_SPECIAL_BASE 0x552CE80ull
+#define NIC2_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_13_UNSECURE_DOORBELL0_BASE 0x552D000ull
+#define NIC2_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_13_UNSECURE_DOORBELL1_BASE 0x552D080ull
+#define NIC2_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x552D100ull
+#define NIC2_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x552D180ull
+#define NIC2_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_13_SPECIAL_BASE 0x552DE80ull
+#define NIC2_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC2_UMR1_14_UNSECURE_DOORBELL0_BASE 0x552E000ull
+#define NIC2_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC2_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC2_UMR1_14_UNSECURE_DOORBELL1_BASE 0x552E080ull
+#define NIC2_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC2_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC2_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x552E100ull
+#define NIC2_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC2_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC2_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x552E180ull
+#define NIC2_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC2_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC2_UMR1_14_SPECIAL_BASE 0x552EE80ull
+#define NIC2_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC2_QM_DCCM1_BASE 0x5530000ull
+#define NIC2_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC2_QM_DCCM1_SECTION 0x8000
+#define mmNIC2_QM_ARC_AUX1_BASE 0x5538000ull
+#define NIC2_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC2_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC2_QM_ARC_AUX1_SPECIAL_BASE 0x5538E80ull
+#define NIC2_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC2_QM1_BASE 0x553A000ull
+#define NIC2_QM1_MAX_OFFSET 0x1000
+#define NIC2_QM1_SECTION 0x9000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x553A900ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x553A908ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x553A910ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x553A918ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x553A920ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x553A928ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x553A930ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x553A938ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x553A940ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x553A948ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x553A950ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x553A958ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x553A960ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x553A968ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x553A970ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC2_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x553A978ull
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC2_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC2_QM1_AXUSER_SECURED_BASE 0x553AB00ull
+#define NIC2_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC2_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC2_QM1_AXUSER_NONSECURED_BASE 0x553AB80ull
+#define NIC2_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC2_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC2_QM1_DBG_HBW_BASE 0x553AC00ull
+#define NIC2_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC2_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC2_QM1_DBG_LBW_BASE 0x553AC80ull
+#define NIC2_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC2_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC2_QM1_CGM_BASE 0x553AD80ull
+#define NIC2_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC2_QM1_CGM_SECTION 0x1000
+#define mmNIC2_QM1_SPECIAL_BASE 0x553AE80ull
+#define NIC2_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC2_QPC1_BASE 0x553F000ull
+#define NIC2_QPC1_MAX_OFFSET 0x1000
+#define NIC2_QPC1_SECTION 0x7200
+#define mmNIC2_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x553F720ull
+#define NIC2_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x553F728ull
+#define NIC2_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x553F730ull
+#define NIC2_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x553F738ull
+#define NIC2_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x553F740ull
+#define NIC2_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x553F748ull
+#define NIC2_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x553F750ull
+#define NIC2_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x553F758ull
+#define NIC2_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x553F760ull
+#define NIC2_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x553F768ull
+#define NIC2_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x553F770ull
+#define NIC2_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x553F778ull
+#define NIC2_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x553F780ull
+#define NIC2_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x553F788ull
+#define NIC2_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x553F790ull
+#define NIC2_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x553F798ull
+#define NIC2_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x553F7A0ull
+#define NIC2_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x553F7A8ull
+#define NIC2_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x553F7B0ull
+#define NIC2_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x553F7B8ull
+#define NIC2_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x553F7C0ull
+#define NIC2_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x553F7C8ull
+#define NIC2_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x553F7D0ull
+#define NIC2_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x553F7D8ull
+#define NIC2_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x553F7E0ull
+#define NIC2_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x553F7E8ull
+#define NIC2_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x553F7F0ull
+#define NIC2_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x553F7F8ull
+#define NIC2_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x553F800ull
+#define NIC2_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x553F808ull
+#define NIC2_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x553F810ull
+#define NIC2_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC2_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x553F818ull
+#define NIC2_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC2_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC2_QPC1_AXUSER_CONG_QUE_BASE 0x553FB80ull
+#define NIC2_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC2_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC2_QPC1_AXUSER_RXWQE_BASE 0x553FBE0ull
+#define NIC2_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC2_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC2_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x553FC40ull
+#define NIC2_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC2_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC2_QPC1_AXUSER_DB_FIFO_BASE 0x553FCA0ull
+#define NIC2_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC2_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC2_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x553FD00ull
+#define NIC2_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC2_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC2_QPC1_AXUSER_ERR_FIFO_BASE 0x553FD60ull
+#define NIC2_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC2_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC2_QPC1_AXUSER_QPC_RESP_BASE 0x553FDC0ull
+#define NIC2_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC2_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC2_QPC1_AXUSER_QPC_REQ_BASE 0x553FE20ull
+#define NIC2_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC2_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC2_QPC1_SPECIAL_BASE 0x553FE80ull
+#define NIC2_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC2_TMR_BASE 0x5548000ull
+#define NIC2_TMR_MAX_OFFSET 0x1000
+#define NIC2_TMR_SECTION 0xD600
+#define mmNIC2_TMR_AXUSER_TMR_FREE_LIST_BASE 0x5548D60ull
+#define NIC2_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC2_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC2_TMR_AXUSER_TMR_FIFO_BASE 0x5548DC0ull
+#define NIC2_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC2_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC2_TMR_AXUSER_TMR_FSM_BASE 0x5548E20ull
+#define NIC2_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC2_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC2_TMR_SPECIAL_BASE 0x5548E80ull
+#define NIC2_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC2_RXB_CORE_BASE 0x5549000ull
+#define NIC2_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC2_RXB_CORE_SECTION 0x6100
+#define mmNIC2_RXB_CORE_SCT_AWUSER_BASE 0x5549610ull
+#define NIC2_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC2_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC2_RXB_CORE_SPECIAL_BASE 0x5549E80ull
+#define NIC2_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC2_RXE0_BASE 0x554A000ull
+#define NIC2_RXE0_MAX_OFFSET 0x1000
+#define NIC2_RXE0_SECTION 0x9000
+#define mmNIC2_RXE0_WQE_ARUSER_BASE 0x554A900ull
+#define NIC2_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC2_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC2_RXE0_SPECIAL_BASE 0x554AE80ull
+#define NIC2_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC2_RXE1_BASE 0x554B000ull
+#define NIC2_RXE1_MAX_OFFSET 0x1000
+#define NIC2_RXE1_SECTION 0x9000
+#define mmNIC2_RXE1_WQE_ARUSER_BASE 0x554B900ull
+#define NIC2_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC2_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC2_RXE1_SPECIAL_BASE 0x554BE80ull
+#define NIC2_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ0_BASE 0x554C000ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ1_BASE 0x554C050ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ2_BASE 0x554C0A0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ3_BASE 0x554C0F0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ4_BASE 0x554C140ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ5_BASE 0x554C190ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ6_BASE 0x554C1E0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ7_BASE 0x554C230ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ8_BASE 0x554C280ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ9_BASE 0x554C2D0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ10_BASE 0x554C320ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ11_BASE 0x554C370ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ12_BASE 0x554C3C0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ13_BASE 0x554C410ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ14_BASE 0x554C460ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ15_BASE 0x554C4B0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ16_BASE 0x554C500ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ17_BASE 0x554C550ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ18_BASE 0x554C5A0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ19_BASE 0x554C5F0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ20_BASE 0x554C640ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ21_BASE 0x554C690ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ22_BASE 0x554C6E0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ23_BASE 0x554C730ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ24_BASE 0x554C780ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ25_BASE 0x554C7D0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ26_BASE 0x554C820ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ27_BASE 0x554C870ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ28_BASE 0x554C8C0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ29_BASE 0x554C910ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ30_BASE 0x554C960ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC2_RXE0_AXUSER_AXUSER_CQ31_BASE 0x554C9B0ull
+#define NIC2_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC2_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC2_RXE0_AXUSER_SPECIAL_BASE 0x554CE80ull
+#define NIC2_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ0_BASE 0x554D000ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ1_BASE 0x554D050ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ2_BASE 0x554D0A0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ3_BASE 0x554D0F0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ4_BASE 0x554D140ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ5_BASE 0x554D190ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ6_BASE 0x554D1E0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ7_BASE 0x554D230ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ8_BASE 0x554D280ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ9_BASE 0x554D2D0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ10_BASE 0x554D320ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ11_BASE 0x554D370ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ12_BASE 0x554D3C0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ13_BASE 0x554D410ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ14_BASE 0x554D460ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ15_BASE 0x554D4B0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ16_BASE 0x554D500ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ17_BASE 0x554D550ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ18_BASE 0x554D5A0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ19_BASE 0x554D5F0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ20_BASE 0x554D640ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ21_BASE 0x554D690ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ22_BASE 0x554D6E0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ23_BASE 0x554D730ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ24_BASE 0x554D780ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ25_BASE 0x554D7D0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ26_BASE 0x554D820ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ27_BASE 0x554D870ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ28_BASE 0x554D8C0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ29_BASE 0x554D910ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ30_BASE 0x554D960ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC2_RXE1_AXUSER_AXUSER_CQ31_BASE 0x554D9B0ull
+#define NIC2_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC2_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC2_RXE1_AXUSER_SPECIAL_BASE 0x554DE80ull
+#define NIC2_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC2_TXS0_BASE 0x5550000ull
+#define NIC2_TXS0_MAX_OFFSET 0x1000
+#define NIC2_TXS0_SECTION 0xE800
+#define mmNIC2_TXS0_SPECIAL_BASE 0x5550E80ull
+#define NIC2_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC2_TXS1_BASE 0x5551000ull
+#define NIC2_TXS1_MAX_OFFSET 0x1000
+#define NIC2_TXS1_SECTION 0xE800
+#define mmNIC2_TXS1_SPECIAL_BASE 0x5551E80ull
+#define NIC2_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC2_TXE0_BASE 0x5552000ull
+#define NIC2_TXE0_MAX_OFFSET 0x1000
+#define NIC2_TXE0_SECTION 0xE800
+#define mmNIC2_TXE0_SPECIAL_BASE 0x5552E80ull
+#define NIC2_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC2_TXE1_BASE 0x5553000ull
+#define NIC2_TXE1_MAX_OFFSET 0x1000
+#define NIC2_TXE1_SECTION 0xE800
+#define mmNIC2_TXE1_SPECIAL_BASE 0x5553E80ull
+#define NIC2_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC2_TXB_BASE 0x5554000ull
+#define NIC2_TXB_MAX_OFFSET 0x1000
+#define NIC2_TXB_SECTION 0xE800
+#define mmNIC2_TXB_SPECIAL_BASE 0x5554E80ull
+#define NIC2_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC2_MSTR_IF_RR_SHRD_HBW_BASE 0x5555000ull
+#define NIC2_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC2_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC2_MSTR_IF_RR_PRVT_HBW_BASE 0x5555200ull
+#define NIC2_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC2_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC2_MSTR_IF_RR_SHRD_LBW_BASE 0x5555400ull
+#define NIC2_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC2_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC2_MSTR_IF_RR_PRVT_LBW_BASE 0x5555600ull
+#define NIC2_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC2_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC2_MSTR_IF_E2E_CRDT_BASE 0x5555800ull
+#define NIC2_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC2_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC2_MSTR_IF_AXUSER_BASE 0x5555A80ull
+#define NIC2_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC2_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC2_MSTR_IF_DBG_HBW_BASE 0x5555B00ull
+#define NIC2_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC2_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC2_MSTR_IF_DBG_LBW_BASE 0x5555B80ull
+#define NIC2_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC2_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC2_MSTR_IF_CORE_HBW_BASE 0x5555C00ull
+#define NIC2_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC2_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC2_MSTR_IF_CORE_LBW_BASE 0x5555D80ull
+#define NIC2_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC2_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC2_MSTR_IF_SPECIAL_BASE 0x5555E80ull
+#define NIC2_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC2_TX_AXUSER_BASE 0x5556000ull
+#define NIC2_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC2_TX_AXUSER_SECTION 0x2000
+#define mmNIC2_SERDES0_BASE 0x5558000ull
+#define NIC2_SERDES0_MAX_OFFSET 0x3E40
+#define NIC2_SERDES0_SECTION 0x4000
+#define mmNIC2_SERDES1_BASE 0x555C000ull
+#define NIC2_SERDES1_MAX_OFFSET 0x3E40
+#define NIC2_SERDES1_SECTION 0x4000
+#define mmNIC2_PHY_BASE 0x5560000ull
+#define NIC2_PHY_MAX_OFFSET 0x1000
+#define NIC2_PHY_SECTION 0xE800
+#define mmNIC2_PHY_SPECIAL_BASE 0x5560E80ull
+#define NIC2_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC2_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT2_MAC_AUX_BASE 0x5568000ull
+#define PRT2_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT2_MAC_AUX_SECTION 0xE800
+#define mmPRT2_MAC_AUX_SPECIAL_BASE 0x5568E80ull
+#define PRT2_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT2_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT2_MAC_CORE_BASE 0x5569000ull
+#define PRT2_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT2_MAC_CORE_SECTION 0xE800
+#define mmPRT2_MAC_CORE_SPECIAL_BASE 0x5569E80ull
+#define PRT2_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT2_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC2_MAC_RS_FEC_BASE 0x556A000ull
+#define NIC2_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC2_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC2_MAC_GLOB_STAT_CONTROL_REG_BASE 0x556B000ull
+#define NIC2_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC2_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC2_MAC_GLOB_STAT_RX0_BASE 0x556B100ull
+#define NIC2_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC2_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC2_MAC_GLOB_STAT_RX1_BASE 0x556B18Cull
+#define NIC2_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC2_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC2_MAC_GLOB_STAT_RX2_BASE 0x556B218ull
+#define NIC2_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC2_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC2_MAC_GLOB_STAT_RX3_BASE 0x556B2A4ull
+#define NIC2_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC2_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC2_MAC_GLOB_STAT_TX0_BASE 0x556B330ull
+#define NIC2_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC2_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC2_MAC_GLOB_STAT_TX1_BASE 0x556B398ull
+#define NIC2_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC2_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC2_MAC_GLOB_STAT_TX2_BASE 0x556B400ull
+#define NIC2_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC2_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC2_MAC_GLOB_STAT_TX3_BASE 0x556B468ull
+#define NIC2_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC2_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC2_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x556B800ull
+#define NIC2_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC2_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC2_MAC_CH0_MAC_PCS_BASE 0x556C000ull
+#define NIC2_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC2_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC2_MAC_CH0_MAC_128_BASE 0x556C400ull
+#define NIC2_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC2_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC2_MAC_CH0_MAC_AN_BASE 0x556C800ull
+#define NIC2_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC2_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC2_MAC_CH1_MAC_PCS_BASE 0x556D000ull
+#define NIC2_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC2_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC2_MAC_CH1_MAC_128_BASE 0x556D400ull
+#define NIC2_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC2_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC2_MAC_CH1_MAC_AN_BASE 0x556D800ull
+#define NIC2_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC2_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC2_MAC_CH2_MAC_PCS_BASE 0x556E000ull
+#define NIC2_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC2_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC2_MAC_CH2_MAC_128_BASE 0x556E400ull
+#define NIC2_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC2_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC2_MAC_CH2_MAC_AN_BASE 0x556E800ull
+#define NIC2_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC2_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC2_MAC_CH3_MAC_PCS_BASE 0x556F000ull
+#define NIC2_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC2_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC2_MAC_CH3_MAC_128_BASE 0x556F400ull
+#define NIC2_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC2_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC2_MAC_CH3_MAC_AN_BASE 0x556F800ull
+#define NIC2_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC2_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC3_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5580000ull
+#define NIC3_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5580080ull
+#define NIC3_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5580100ull
+#define NIC3_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5580180ull
+#define NIC3_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_0_SPECIAL_BASE 0x5580E80ull
+#define NIC3_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5581000ull
+#define NIC3_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5581080ull
+#define NIC3_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5581100ull
+#define NIC3_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5581180ull
+#define NIC3_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_1_SPECIAL_BASE 0x5581E80ull
+#define NIC3_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5582000ull
+#define NIC3_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5582080ull
+#define NIC3_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5582100ull
+#define NIC3_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5582180ull
+#define NIC3_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_2_SPECIAL_BASE 0x5582E80ull
+#define NIC3_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5583000ull
+#define NIC3_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5583080ull
+#define NIC3_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5583100ull
+#define NIC3_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5583180ull
+#define NIC3_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_3_SPECIAL_BASE 0x5583E80ull
+#define NIC3_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5584000ull
+#define NIC3_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5584080ull
+#define NIC3_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5584100ull
+#define NIC3_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5584180ull
+#define NIC3_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_4_SPECIAL_BASE 0x5584E80ull
+#define NIC3_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5585000ull
+#define NIC3_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5585080ull
+#define NIC3_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5585100ull
+#define NIC3_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5585180ull
+#define NIC3_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_5_SPECIAL_BASE 0x5585E80ull
+#define NIC3_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5586000ull
+#define NIC3_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5586080ull
+#define NIC3_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5586100ull
+#define NIC3_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5586180ull
+#define NIC3_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_6_SPECIAL_BASE 0x5586E80ull
+#define NIC3_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5587000ull
+#define NIC3_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5587080ull
+#define NIC3_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5587100ull
+#define NIC3_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5587180ull
+#define NIC3_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_7_SPECIAL_BASE 0x5587E80ull
+#define NIC3_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5588000ull
+#define NIC3_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5588080ull
+#define NIC3_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5588100ull
+#define NIC3_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5588180ull
+#define NIC3_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_8_SPECIAL_BASE 0x5588E80ull
+#define NIC3_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5589000ull
+#define NIC3_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5589080ull
+#define NIC3_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5589100ull
+#define NIC3_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5589180ull
+#define NIC3_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_9_SPECIAL_BASE 0x5589E80ull
+#define NIC3_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_10_UNSECURE_DOORBELL0_BASE 0x558A000ull
+#define NIC3_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_10_UNSECURE_DOORBELL1_BASE 0x558A080ull
+#define NIC3_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x558A100ull
+#define NIC3_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x558A180ull
+#define NIC3_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_10_SPECIAL_BASE 0x558AE80ull
+#define NIC3_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_11_UNSECURE_DOORBELL0_BASE 0x558B000ull
+#define NIC3_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_11_UNSECURE_DOORBELL1_BASE 0x558B080ull
+#define NIC3_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x558B100ull
+#define NIC3_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x558B180ull
+#define NIC3_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_11_SPECIAL_BASE 0x558BE80ull
+#define NIC3_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_12_UNSECURE_DOORBELL0_BASE 0x558C000ull
+#define NIC3_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_12_UNSECURE_DOORBELL1_BASE 0x558C080ull
+#define NIC3_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x558C100ull
+#define NIC3_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x558C180ull
+#define NIC3_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_12_SPECIAL_BASE 0x558CE80ull
+#define NIC3_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_13_UNSECURE_DOORBELL0_BASE 0x558D000ull
+#define NIC3_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_13_UNSECURE_DOORBELL1_BASE 0x558D080ull
+#define NIC3_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x558D100ull
+#define NIC3_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x558D180ull
+#define NIC3_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_13_SPECIAL_BASE 0x558DE80ull
+#define NIC3_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR0_14_UNSECURE_DOORBELL0_BASE 0x558E000ull
+#define NIC3_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR0_14_UNSECURE_DOORBELL1_BASE 0x558E080ull
+#define NIC3_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x558E100ull
+#define NIC3_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x558E180ull
+#define NIC3_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR0_14_SPECIAL_BASE 0x558EE80ull
+#define NIC3_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC3_QM_DCCM0_BASE 0x5590000ull
+#define NIC3_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC3_QM_DCCM0_SECTION 0x8000
+#define mmNIC3_QM_ARC_AUX0_BASE 0x5598000ull
+#define NIC3_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC3_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC3_QM_ARC_AUX0_SPECIAL_BASE 0x5598E80ull
+#define NIC3_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC3_QM0_BASE 0x559A000ull
+#define NIC3_QM0_MAX_OFFSET 0x1000
+#define NIC3_QM0_SECTION 0x9000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x559A900ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x559A908ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x559A910ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x559A918ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x559A920ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x559A928ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x559A930ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x559A938ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x559A940ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x559A948ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x559A950ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x559A958ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x559A960ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x559A968ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x559A970ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC3_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x559A978ull
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC3_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC3_QM0_AXUSER_SECURED_BASE 0x559AB00ull
+#define NIC3_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC3_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC3_QM0_AXUSER_NONSECURED_BASE 0x559AB80ull
+#define NIC3_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC3_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC3_QM0_DBG_HBW_BASE 0x559AC00ull
+#define NIC3_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC3_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC3_QM0_DBG_LBW_BASE 0x559AC80ull
+#define NIC3_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC3_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC3_QM0_CGM_BASE 0x559AD80ull
+#define NIC3_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC3_QM0_CGM_SECTION 0x1000
+#define mmNIC3_QM0_SPECIAL_BASE 0x559AE80ull
+#define NIC3_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC3_QPC0_BASE 0x559F000ull
+#define NIC3_QPC0_MAX_OFFSET 0x1000
+#define NIC3_QPC0_SECTION 0x7200
+#define mmNIC3_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x559F720ull
+#define NIC3_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x559F728ull
+#define NIC3_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x559F730ull
+#define NIC3_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x559F738ull
+#define NIC3_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x559F740ull
+#define NIC3_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x559F748ull
+#define NIC3_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x559F750ull
+#define NIC3_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x559F758ull
+#define NIC3_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x559F760ull
+#define NIC3_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x559F768ull
+#define NIC3_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x559F770ull
+#define NIC3_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x559F778ull
+#define NIC3_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x559F780ull
+#define NIC3_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x559F788ull
+#define NIC3_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x559F790ull
+#define NIC3_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x559F798ull
+#define NIC3_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x559F7A0ull
+#define NIC3_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x559F7A8ull
+#define NIC3_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x559F7B0ull
+#define NIC3_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x559F7B8ull
+#define NIC3_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x559F7C0ull
+#define NIC3_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x559F7C8ull
+#define NIC3_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x559F7D0ull
+#define NIC3_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x559F7D8ull
+#define NIC3_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x559F7E0ull
+#define NIC3_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x559F7E8ull
+#define NIC3_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x559F7F0ull
+#define NIC3_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x559F7F8ull
+#define NIC3_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x559F800ull
+#define NIC3_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x559F808ull
+#define NIC3_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x559F810ull
+#define NIC3_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x559F818ull
+#define NIC3_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC3_QPC0_AXUSER_CONG_QUE_BASE 0x559FB80ull
+#define NIC3_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC3_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC3_QPC0_AXUSER_RXWQE_BASE 0x559FBE0ull
+#define NIC3_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC3_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC3_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x559FC40ull
+#define NIC3_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC3_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC3_QPC0_AXUSER_DB_FIFO_BASE 0x559FCA0ull
+#define NIC3_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC3_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC3_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x559FD00ull
+#define NIC3_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC3_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC3_QPC0_AXUSER_ERR_FIFO_BASE 0x559FD60ull
+#define NIC3_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC3_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC3_QPC0_AXUSER_QPC_RESP_BASE 0x559FDC0ull
+#define NIC3_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC3_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC3_QPC0_AXUSER_QPC_REQ_BASE 0x559FE20ull
+#define NIC3_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC3_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC3_QPC0_SPECIAL_BASE 0x559FE80ull
+#define NIC3_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_0_UNSECURE_DOORBELL0_BASE 0x55A0000ull
+#define NIC3_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_0_UNSECURE_DOORBELL1_BASE 0x55A0080ull
+#define NIC3_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x55A0100ull
+#define NIC3_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x55A0180ull
+#define NIC3_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_0_SPECIAL_BASE 0x55A0E80ull
+#define NIC3_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_1_UNSECURE_DOORBELL0_BASE 0x55A1000ull
+#define NIC3_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_1_UNSECURE_DOORBELL1_BASE 0x55A1080ull
+#define NIC3_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x55A1100ull
+#define NIC3_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x55A1180ull
+#define NIC3_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_1_SPECIAL_BASE 0x55A1E80ull
+#define NIC3_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_2_UNSECURE_DOORBELL0_BASE 0x55A2000ull
+#define NIC3_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_2_UNSECURE_DOORBELL1_BASE 0x55A2080ull
+#define NIC3_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x55A2100ull
+#define NIC3_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x55A2180ull
+#define NIC3_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_2_SPECIAL_BASE 0x55A2E80ull
+#define NIC3_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_3_UNSECURE_DOORBELL0_BASE 0x55A3000ull
+#define NIC3_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_3_UNSECURE_DOORBELL1_BASE 0x55A3080ull
+#define NIC3_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x55A3100ull
+#define NIC3_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x55A3180ull
+#define NIC3_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_3_SPECIAL_BASE 0x55A3E80ull
+#define NIC3_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_4_UNSECURE_DOORBELL0_BASE 0x55A4000ull
+#define NIC3_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_4_UNSECURE_DOORBELL1_BASE 0x55A4080ull
+#define NIC3_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x55A4100ull
+#define NIC3_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x55A4180ull
+#define NIC3_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_4_SPECIAL_BASE 0x55A4E80ull
+#define NIC3_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_5_UNSECURE_DOORBELL0_BASE 0x55A5000ull
+#define NIC3_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_5_UNSECURE_DOORBELL1_BASE 0x55A5080ull
+#define NIC3_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x55A5100ull
+#define NIC3_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x55A5180ull
+#define NIC3_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_5_SPECIAL_BASE 0x55A5E80ull
+#define NIC3_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_6_UNSECURE_DOORBELL0_BASE 0x55A6000ull
+#define NIC3_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_6_UNSECURE_DOORBELL1_BASE 0x55A6080ull
+#define NIC3_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x55A6100ull
+#define NIC3_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x55A6180ull
+#define NIC3_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_6_SPECIAL_BASE 0x55A6E80ull
+#define NIC3_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_7_UNSECURE_DOORBELL0_BASE 0x55A7000ull
+#define NIC3_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_7_UNSECURE_DOORBELL1_BASE 0x55A7080ull
+#define NIC3_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x55A7100ull
+#define NIC3_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x55A7180ull
+#define NIC3_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_7_SPECIAL_BASE 0x55A7E80ull
+#define NIC3_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_8_UNSECURE_DOORBELL0_BASE 0x55A8000ull
+#define NIC3_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_8_UNSECURE_DOORBELL1_BASE 0x55A8080ull
+#define NIC3_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x55A8100ull
+#define NIC3_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x55A8180ull
+#define NIC3_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_8_SPECIAL_BASE 0x55A8E80ull
+#define NIC3_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_9_UNSECURE_DOORBELL0_BASE 0x55A9000ull
+#define NIC3_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_9_UNSECURE_DOORBELL1_BASE 0x55A9080ull
+#define NIC3_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x55A9100ull
+#define NIC3_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x55A9180ull
+#define NIC3_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_9_SPECIAL_BASE 0x55A9E80ull
+#define NIC3_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_10_UNSECURE_DOORBELL0_BASE 0x55AA000ull
+#define NIC3_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_10_UNSECURE_DOORBELL1_BASE 0x55AA080ull
+#define NIC3_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x55AA100ull
+#define NIC3_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x55AA180ull
+#define NIC3_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_10_SPECIAL_BASE 0x55AAE80ull
+#define NIC3_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_11_UNSECURE_DOORBELL0_BASE 0x55AB000ull
+#define NIC3_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_11_UNSECURE_DOORBELL1_BASE 0x55AB080ull
+#define NIC3_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x55AB100ull
+#define NIC3_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x55AB180ull
+#define NIC3_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_11_SPECIAL_BASE 0x55ABE80ull
+#define NIC3_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_12_UNSECURE_DOORBELL0_BASE 0x55AC000ull
+#define NIC3_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_12_UNSECURE_DOORBELL1_BASE 0x55AC080ull
+#define NIC3_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x55AC100ull
+#define NIC3_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x55AC180ull
+#define NIC3_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_12_SPECIAL_BASE 0x55ACE80ull
+#define NIC3_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_13_UNSECURE_DOORBELL0_BASE 0x55AD000ull
+#define NIC3_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_13_UNSECURE_DOORBELL1_BASE 0x55AD080ull
+#define NIC3_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x55AD100ull
+#define NIC3_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x55AD180ull
+#define NIC3_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_13_SPECIAL_BASE 0x55ADE80ull
+#define NIC3_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC3_UMR1_14_UNSECURE_DOORBELL0_BASE 0x55AE000ull
+#define NIC3_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC3_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC3_UMR1_14_UNSECURE_DOORBELL1_BASE 0x55AE080ull
+#define NIC3_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC3_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC3_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x55AE100ull
+#define NIC3_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC3_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC3_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x55AE180ull
+#define NIC3_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC3_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC3_UMR1_14_SPECIAL_BASE 0x55AEE80ull
+#define NIC3_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC3_QM_DCCM1_BASE 0x55B0000ull
+#define NIC3_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC3_QM_DCCM1_SECTION 0x8000
+#define mmNIC3_QM_ARC_AUX1_BASE 0x55B8000ull
+#define NIC3_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC3_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC3_QM_ARC_AUX1_SPECIAL_BASE 0x55B8E80ull
+#define NIC3_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC3_QM1_BASE 0x55BA000ull
+#define NIC3_QM1_MAX_OFFSET 0x1000
+#define NIC3_QM1_SECTION 0x9000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x55BA900ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x55BA908ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x55BA910ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x55BA918ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x55BA920ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x55BA928ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x55BA930ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x55BA938ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x55BA940ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x55BA948ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x55BA950ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x55BA958ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x55BA960ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x55BA968ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x55BA970ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC3_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x55BA978ull
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC3_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC3_QM1_AXUSER_SECURED_BASE 0x55BAB00ull
+#define NIC3_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC3_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC3_QM1_AXUSER_NONSECURED_BASE 0x55BAB80ull
+#define NIC3_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC3_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC3_QM1_DBG_HBW_BASE 0x55BAC00ull
+#define NIC3_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC3_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC3_QM1_DBG_LBW_BASE 0x55BAC80ull
+#define NIC3_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC3_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC3_QM1_CGM_BASE 0x55BAD80ull
+#define NIC3_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC3_QM1_CGM_SECTION 0x1000
+#define mmNIC3_QM1_SPECIAL_BASE 0x55BAE80ull
+#define NIC3_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC3_QPC1_BASE 0x55BF000ull
+#define NIC3_QPC1_MAX_OFFSET 0x1000
+#define NIC3_QPC1_SECTION 0x7200
+#define mmNIC3_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x55BF720ull
+#define NIC3_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x55BF728ull
+#define NIC3_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x55BF730ull
+#define NIC3_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x55BF738ull
+#define NIC3_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x55BF740ull
+#define NIC3_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x55BF748ull
+#define NIC3_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x55BF750ull
+#define NIC3_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x55BF758ull
+#define NIC3_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x55BF760ull
+#define NIC3_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x55BF768ull
+#define NIC3_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x55BF770ull
+#define NIC3_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x55BF778ull
+#define NIC3_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x55BF780ull
+#define NIC3_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x55BF788ull
+#define NIC3_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x55BF790ull
+#define NIC3_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x55BF798ull
+#define NIC3_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x55BF7A0ull
+#define NIC3_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x55BF7A8ull
+#define NIC3_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x55BF7B0ull
+#define NIC3_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x55BF7B8ull
+#define NIC3_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x55BF7C0ull
+#define NIC3_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x55BF7C8ull
+#define NIC3_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x55BF7D0ull
+#define NIC3_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x55BF7D8ull
+#define NIC3_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x55BF7E0ull
+#define NIC3_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x55BF7E8ull
+#define NIC3_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x55BF7F0ull
+#define NIC3_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x55BF7F8ull
+#define NIC3_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x55BF800ull
+#define NIC3_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x55BF808ull
+#define NIC3_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x55BF810ull
+#define NIC3_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC3_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x55BF818ull
+#define NIC3_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC3_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC3_QPC1_AXUSER_CONG_QUE_BASE 0x55BFB80ull
+#define NIC3_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC3_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC3_QPC1_AXUSER_RXWQE_BASE 0x55BFBE0ull
+#define NIC3_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC3_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC3_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x55BFC40ull
+#define NIC3_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC3_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC3_QPC1_AXUSER_DB_FIFO_BASE 0x55BFCA0ull
+#define NIC3_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC3_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC3_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x55BFD00ull
+#define NIC3_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC3_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC3_QPC1_AXUSER_ERR_FIFO_BASE 0x55BFD60ull
+#define NIC3_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC3_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC3_QPC1_AXUSER_QPC_RESP_BASE 0x55BFDC0ull
+#define NIC3_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC3_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC3_QPC1_AXUSER_QPC_REQ_BASE 0x55BFE20ull
+#define NIC3_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC3_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC3_QPC1_SPECIAL_BASE 0x55BFE80ull
+#define NIC3_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC3_TMR_BASE 0x55C8000ull
+#define NIC3_TMR_MAX_OFFSET 0x1000
+#define NIC3_TMR_SECTION 0xD600
+#define mmNIC3_TMR_AXUSER_TMR_FREE_LIST_BASE 0x55C8D60ull
+#define NIC3_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC3_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC3_TMR_AXUSER_TMR_FIFO_BASE 0x55C8DC0ull
+#define NIC3_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC3_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC3_TMR_AXUSER_TMR_FSM_BASE 0x55C8E20ull
+#define NIC3_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC3_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC3_TMR_SPECIAL_BASE 0x55C8E80ull
+#define NIC3_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC3_RXB_CORE_BASE 0x55C9000ull
+#define NIC3_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC3_RXB_CORE_SECTION 0x6100
+#define mmNIC3_RXB_CORE_SCT_AWUSER_BASE 0x55C9610ull
+#define NIC3_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC3_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC3_RXB_CORE_SPECIAL_BASE 0x55C9E80ull
+#define NIC3_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC3_RXE0_BASE 0x55CA000ull
+#define NIC3_RXE0_MAX_OFFSET 0x1000
+#define NIC3_RXE0_SECTION 0x9000
+#define mmNIC3_RXE0_WQE_ARUSER_BASE 0x55CA900ull
+#define NIC3_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC3_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC3_RXE0_SPECIAL_BASE 0x55CAE80ull
+#define NIC3_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC3_RXE1_BASE 0x55CB000ull
+#define NIC3_RXE1_MAX_OFFSET 0x1000
+#define NIC3_RXE1_SECTION 0x9000
+#define mmNIC3_RXE1_WQE_ARUSER_BASE 0x55CB900ull
+#define NIC3_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC3_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC3_RXE1_SPECIAL_BASE 0x55CBE80ull
+#define NIC3_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ0_BASE 0x55CC000ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ1_BASE 0x55CC050ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ2_BASE 0x55CC0A0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ3_BASE 0x55CC0F0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ4_BASE 0x55CC140ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ5_BASE 0x55CC190ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ6_BASE 0x55CC1E0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ7_BASE 0x55CC230ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ8_BASE 0x55CC280ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ9_BASE 0x55CC2D0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ10_BASE 0x55CC320ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ11_BASE 0x55CC370ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ12_BASE 0x55CC3C0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ13_BASE 0x55CC410ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ14_BASE 0x55CC460ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ15_BASE 0x55CC4B0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ16_BASE 0x55CC500ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ17_BASE 0x55CC550ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ18_BASE 0x55CC5A0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ19_BASE 0x55CC5F0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ20_BASE 0x55CC640ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ21_BASE 0x55CC690ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ22_BASE 0x55CC6E0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ23_BASE 0x55CC730ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ24_BASE 0x55CC780ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ25_BASE 0x55CC7D0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ26_BASE 0x55CC820ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ27_BASE 0x55CC870ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ28_BASE 0x55CC8C0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ29_BASE 0x55CC910ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ30_BASE 0x55CC960ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC3_RXE0_AXUSER_AXUSER_CQ31_BASE 0x55CC9B0ull
+#define NIC3_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC3_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC3_RXE0_AXUSER_SPECIAL_BASE 0x55CCE80ull
+#define NIC3_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ0_BASE 0x55CD000ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ1_BASE 0x55CD050ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ2_BASE 0x55CD0A0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ3_BASE 0x55CD0F0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ4_BASE 0x55CD140ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ5_BASE 0x55CD190ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ6_BASE 0x55CD1E0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ7_BASE 0x55CD230ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ8_BASE 0x55CD280ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ9_BASE 0x55CD2D0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ10_BASE 0x55CD320ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ11_BASE 0x55CD370ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ12_BASE 0x55CD3C0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ13_BASE 0x55CD410ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ14_BASE 0x55CD460ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ15_BASE 0x55CD4B0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ16_BASE 0x55CD500ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ17_BASE 0x55CD550ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ18_BASE 0x55CD5A0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ19_BASE 0x55CD5F0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ20_BASE 0x55CD640ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ21_BASE 0x55CD690ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ22_BASE 0x55CD6E0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ23_BASE 0x55CD730ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ24_BASE 0x55CD780ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ25_BASE 0x55CD7D0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ26_BASE 0x55CD820ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ27_BASE 0x55CD870ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ28_BASE 0x55CD8C0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ29_BASE 0x55CD910ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ30_BASE 0x55CD960ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC3_RXE1_AXUSER_AXUSER_CQ31_BASE 0x55CD9B0ull
+#define NIC3_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC3_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC3_RXE1_AXUSER_SPECIAL_BASE 0x55CDE80ull
+#define NIC3_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC3_TXS0_BASE 0x55D0000ull
+#define NIC3_TXS0_MAX_OFFSET 0x1000
+#define NIC3_TXS0_SECTION 0xE800
+#define mmNIC3_TXS0_SPECIAL_BASE 0x55D0E80ull
+#define NIC3_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC3_TXS1_BASE 0x55D1000ull
+#define NIC3_TXS1_MAX_OFFSET 0x1000
+#define NIC3_TXS1_SECTION 0xE800
+#define mmNIC3_TXS1_SPECIAL_BASE 0x55D1E80ull
+#define NIC3_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC3_TXE0_BASE 0x55D2000ull
+#define NIC3_TXE0_MAX_OFFSET 0x1000
+#define NIC3_TXE0_SECTION 0xE800
+#define mmNIC3_TXE0_SPECIAL_BASE 0x55D2E80ull
+#define NIC3_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC3_TXE1_BASE 0x55D3000ull
+#define NIC3_TXE1_MAX_OFFSET 0x1000
+#define NIC3_TXE1_SECTION 0xE800
+#define mmNIC3_TXE1_SPECIAL_BASE 0x55D3E80ull
+#define NIC3_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC3_TXB_BASE 0x55D4000ull
+#define NIC3_TXB_MAX_OFFSET 0x1000
+#define NIC3_TXB_SECTION 0xE800
+#define mmNIC3_TXB_SPECIAL_BASE 0x55D4E80ull
+#define NIC3_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC3_MSTR_IF_RR_SHRD_HBW_BASE 0x55D5000ull
+#define NIC3_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC3_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC3_MSTR_IF_RR_PRVT_HBW_BASE 0x55D5200ull
+#define NIC3_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC3_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC3_MSTR_IF_RR_SHRD_LBW_BASE 0x55D5400ull
+#define NIC3_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC3_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC3_MSTR_IF_RR_PRVT_LBW_BASE 0x55D5600ull
+#define NIC3_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC3_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC3_MSTR_IF_E2E_CRDT_BASE 0x55D5800ull
+#define NIC3_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC3_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC3_MSTR_IF_AXUSER_BASE 0x55D5A80ull
+#define NIC3_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC3_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC3_MSTR_IF_DBG_HBW_BASE 0x55D5B00ull
+#define NIC3_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC3_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC3_MSTR_IF_DBG_LBW_BASE 0x55D5B80ull
+#define NIC3_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC3_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC3_MSTR_IF_CORE_HBW_BASE 0x55D5C00ull
+#define NIC3_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC3_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC3_MSTR_IF_CORE_LBW_BASE 0x55D5D80ull
+#define NIC3_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC3_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC3_MSTR_IF_SPECIAL_BASE 0x55D5E80ull
+#define NIC3_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC3_TX_AXUSER_BASE 0x55D6000ull
+#define NIC3_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC3_TX_AXUSER_SECTION 0x2000
+#define mmNIC3_SERDES0_BASE 0x55D8000ull
+#define NIC3_SERDES0_MAX_OFFSET 0x3E40
+#define NIC3_SERDES0_SECTION 0x4000
+#define mmNIC3_SERDES1_BASE 0x55DC000ull
+#define NIC3_SERDES1_MAX_OFFSET 0x3E40
+#define NIC3_SERDES1_SECTION 0x4000
+#define mmNIC3_PHY_BASE 0x55E0000ull
+#define NIC3_PHY_MAX_OFFSET 0x1000
+#define NIC3_PHY_SECTION 0xE800
+#define mmNIC3_PHY_SPECIAL_BASE 0x55E0E80ull
+#define NIC3_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC3_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT3_MAC_AUX_BASE 0x55E8000ull
+#define PRT3_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT3_MAC_AUX_SECTION 0xE800
+#define mmPRT3_MAC_AUX_SPECIAL_BASE 0x55E8E80ull
+#define PRT3_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT3_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT3_MAC_CORE_BASE 0x55E9000ull
+#define PRT3_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT3_MAC_CORE_SECTION 0xE800
+#define mmPRT3_MAC_CORE_SPECIAL_BASE 0x55E9E80ull
+#define PRT3_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT3_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC3_MAC_RS_FEC_BASE 0x55EA000ull
+#define NIC3_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC3_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC3_MAC_GLOB_STAT_CONTROL_REG_BASE 0x55EB000ull
+#define NIC3_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC3_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC3_MAC_GLOB_STAT_RX0_BASE 0x55EB100ull
+#define NIC3_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC3_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC3_MAC_GLOB_STAT_RX1_BASE 0x55EB18Cull
+#define NIC3_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC3_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC3_MAC_GLOB_STAT_RX2_BASE 0x55EB218ull
+#define NIC3_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC3_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC3_MAC_GLOB_STAT_RX3_BASE 0x55EB2A4ull
+#define NIC3_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC3_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC3_MAC_GLOB_STAT_TX0_BASE 0x55EB330ull
+#define NIC3_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC3_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC3_MAC_GLOB_STAT_TX1_BASE 0x55EB398ull
+#define NIC3_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC3_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC3_MAC_GLOB_STAT_TX2_BASE 0x55EB400ull
+#define NIC3_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC3_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC3_MAC_GLOB_STAT_TX3_BASE 0x55EB468ull
+#define NIC3_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC3_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC3_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x55EB800ull
+#define NIC3_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC3_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC3_MAC_CH0_MAC_PCS_BASE 0x55EC000ull
+#define NIC3_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC3_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC3_MAC_CH0_MAC_128_BASE 0x55EC400ull
+#define NIC3_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC3_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC3_MAC_CH0_MAC_AN_BASE 0x55EC800ull
+#define NIC3_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC3_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC3_MAC_CH1_MAC_PCS_BASE 0x55ED000ull
+#define NIC3_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC3_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC3_MAC_CH1_MAC_128_BASE 0x55ED400ull
+#define NIC3_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC3_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC3_MAC_CH1_MAC_AN_BASE 0x55ED800ull
+#define NIC3_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC3_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC3_MAC_CH2_MAC_PCS_BASE 0x55EE000ull
+#define NIC3_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC3_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC3_MAC_CH2_MAC_128_BASE 0x55EE400ull
+#define NIC3_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC3_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC3_MAC_CH2_MAC_AN_BASE 0x55EE800ull
+#define NIC3_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC3_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC3_MAC_CH3_MAC_PCS_BASE 0x55EF000ull
+#define NIC3_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC3_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC3_MAC_CH3_MAC_128_BASE 0x55EF400ull
+#define NIC3_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC3_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC3_MAC_CH3_MAC_AN_BASE 0x55EF800ull
+#define NIC3_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC3_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC4_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5600000ull
+#define NIC4_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5600080ull
+#define NIC4_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5600100ull
+#define NIC4_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5600180ull
+#define NIC4_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_0_SPECIAL_BASE 0x5600E80ull
+#define NIC4_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5601000ull
+#define NIC4_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5601080ull
+#define NIC4_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5601100ull
+#define NIC4_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5601180ull
+#define NIC4_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_1_SPECIAL_BASE 0x5601E80ull
+#define NIC4_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5602000ull
+#define NIC4_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5602080ull
+#define NIC4_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5602100ull
+#define NIC4_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5602180ull
+#define NIC4_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_2_SPECIAL_BASE 0x5602E80ull
+#define NIC4_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5603000ull
+#define NIC4_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5603080ull
+#define NIC4_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5603100ull
+#define NIC4_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5603180ull
+#define NIC4_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_3_SPECIAL_BASE 0x5603E80ull
+#define NIC4_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5604000ull
+#define NIC4_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5604080ull
+#define NIC4_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5604100ull
+#define NIC4_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5604180ull
+#define NIC4_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_4_SPECIAL_BASE 0x5604E80ull
+#define NIC4_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5605000ull
+#define NIC4_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5605080ull
+#define NIC4_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5605100ull
+#define NIC4_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5605180ull
+#define NIC4_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_5_SPECIAL_BASE 0x5605E80ull
+#define NIC4_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5606000ull
+#define NIC4_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5606080ull
+#define NIC4_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5606100ull
+#define NIC4_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5606180ull
+#define NIC4_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_6_SPECIAL_BASE 0x5606E80ull
+#define NIC4_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5607000ull
+#define NIC4_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5607080ull
+#define NIC4_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5607100ull
+#define NIC4_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5607180ull
+#define NIC4_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_7_SPECIAL_BASE 0x5607E80ull
+#define NIC4_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5608000ull
+#define NIC4_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5608080ull
+#define NIC4_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5608100ull
+#define NIC4_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5608180ull
+#define NIC4_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_8_SPECIAL_BASE 0x5608E80ull
+#define NIC4_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5609000ull
+#define NIC4_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5609080ull
+#define NIC4_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5609100ull
+#define NIC4_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5609180ull
+#define NIC4_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_9_SPECIAL_BASE 0x5609E80ull
+#define NIC4_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_10_UNSECURE_DOORBELL0_BASE 0x560A000ull
+#define NIC4_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_10_UNSECURE_DOORBELL1_BASE 0x560A080ull
+#define NIC4_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x560A100ull
+#define NIC4_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x560A180ull
+#define NIC4_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_10_SPECIAL_BASE 0x560AE80ull
+#define NIC4_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_11_UNSECURE_DOORBELL0_BASE 0x560B000ull
+#define NIC4_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_11_UNSECURE_DOORBELL1_BASE 0x560B080ull
+#define NIC4_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x560B100ull
+#define NIC4_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x560B180ull
+#define NIC4_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_11_SPECIAL_BASE 0x560BE80ull
+#define NIC4_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_12_UNSECURE_DOORBELL0_BASE 0x560C000ull
+#define NIC4_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_12_UNSECURE_DOORBELL1_BASE 0x560C080ull
+#define NIC4_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x560C100ull
+#define NIC4_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x560C180ull
+#define NIC4_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_12_SPECIAL_BASE 0x560CE80ull
+#define NIC4_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_13_UNSECURE_DOORBELL0_BASE 0x560D000ull
+#define NIC4_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_13_UNSECURE_DOORBELL1_BASE 0x560D080ull
+#define NIC4_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x560D100ull
+#define NIC4_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x560D180ull
+#define NIC4_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_13_SPECIAL_BASE 0x560DE80ull
+#define NIC4_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR0_14_UNSECURE_DOORBELL0_BASE 0x560E000ull
+#define NIC4_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR0_14_UNSECURE_DOORBELL1_BASE 0x560E080ull
+#define NIC4_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x560E100ull
+#define NIC4_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x560E180ull
+#define NIC4_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR0_14_SPECIAL_BASE 0x560EE80ull
+#define NIC4_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC4_QM_DCCM0_BASE 0x5610000ull
+#define NIC4_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC4_QM_DCCM0_SECTION 0x8000
+#define mmNIC4_QM_ARC_AUX0_BASE 0x5618000ull
+#define NIC4_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC4_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC4_QM_ARC_AUX0_SPECIAL_BASE 0x5618E80ull
+#define NIC4_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC4_QM0_BASE 0x561A000ull
+#define NIC4_QM0_MAX_OFFSET 0x1000
+#define NIC4_QM0_SECTION 0x9000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x561A900ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x561A908ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x561A910ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x561A918ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x561A920ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x561A928ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x561A930ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x561A938ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x561A940ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x561A948ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x561A950ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x561A958ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x561A960ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x561A968ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x561A970ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC4_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x561A978ull
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC4_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC4_QM0_AXUSER_SECURED_BASE 0x561AB00ull
+#define NIC4_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC4_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC4_QM0_AXUSER_NONSECURED_BASE 0x561AB80ull
+#define NIC4_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC4_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC4_QM0_DBG_HBW_BASE 0x561AC00ull
+#define NIC4_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC4_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC4_QM0_DBG_LBW_BASE 0x561AC80ull
+#define NIC4_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC4_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC4_QM0_CGM_BASE 0x561AD80ull
+#define NIC4_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC4_QM0_CGM_SECTION 0x1000
+#define mmNIC4_QM0_SPECIAL_BASE 0x561AE80ull
+#define NIC4_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC4_QPC0_BASE 0x561F000ull
+#define NIC4_QPC0_MAX_OFFSET 0x1000
+#define NIC4_QPC0_SECTION 0x7200
+#define mmNIC4_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x561F720ull
+#define NIC4_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x561F728ull
+#define NIC4_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x561F730ull
+#define NIC4_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x561F738ull
+#define NIC4_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x561F740ull
+#define NIC4_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x561F748ull
+#define NIC4_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x561F750ull
+#define NIC4_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x561F758ull
+#define NIC4_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x561F760ull
+#define NIC4_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x561F768ull
+#define NIC4_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x561F770ull
+#define NIC4_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x561F778ull
+#define NIC4_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x561F780ull
+#define NIC4_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x561F788ull
+#define NIC4_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x561F790ull
+#define NIC4_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x561F798ull
+#define NIC4_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x561F7A0ull
+#define NIC4_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x561F7A8ull
+#define NIC4_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x561F7B0ull
+#define NIC4_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x561F7B8ull
+#define NIC4_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x561F7C0ull
+#define NIC4_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x561F7C8ull
+#define NIC4_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x561F7D0ull
+#define NIC4_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x561F7D8ull
+#define NIC4_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x561F7E0ull
+#define NIC4_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x561F7E8ull
+#define NIC4_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x561F7F0ull
+#define NIC4_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x561F7F8ull
+#define NIC4_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x561F800ull
+#define NIC4_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x561F808ull
+#define NIC4_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x561F810ull
+#define NIC4_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x561F818ull
+#define NIC4_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC4_QPC0_AXUSER_CONG_QUE_BASE 0x561FB80ull
+#define NIC4_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC4_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC4_QPC0_AXUSER_RXWQE_BASE 0x561FBE0ull
+#define NIC4_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC4_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC4_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x561FC40ull
+#define NIC4_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC4_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC4_QPC0_AXUSER_DB_FIFO_BASE 0x561FCA0ull
+#define NIC4_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC4_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC4_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x561FD00ull
+#define NIC4_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC4_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC4_QPC0_AXUSER_ERR_FIFO_BASE 0x561FD60ull
+#define NIC4_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC4_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC4_QPC0_AXUSER_QPC_RESP_BASE 0x561FDC0ull
+#define NIC4_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC4_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC4_QPC0_AXUSER_QPC_REQ_BASE 0x561FE20ull
+#define NIC4_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC4_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC4_QPC0_SPECIAL_BASE 0x561FE80ull
+#define NIC4_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_0_UNSECURE_DOORBELL0_BASE 0x5620000ull
+#define NIC4_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_0_UNSECURE_DOORBELL1_BASE 0x5620080ull
+#define NIC4_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x5620100ull
+#define NIC4_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x5620180ull
+#define NIC4_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_0_SPECIAL_BASE 0x5620E80ull
+#define NIC4_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_1_UNSECURE_DOORBELL0_BASE 0x5621000ull
+#define NIC4_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_1_UNSECURE_DOORBELL1_BASE 0x5621080ull
+#define NIC4_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x5621100ull
+#define NIC4_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x5621180ull
+#define NIC4_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_1_SPECIAL_BASE 0x5621E80ull
+#define NIC4_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_2_UNSECURE_DOORBELL0_BASE 0x5622000ull
+#define NIC4_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_2_UNSECURE_DOORBELL1_BASE 0x5622080ull
+#define NIC4_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x5622100ull
+#define NIC4_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x5622180ull
+#define NIC4_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_2_SPECIAL_BASE 0x5622E80ull
+#define NIC4_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_3_UNSECURE_DOORBELL0_BASE 0x5623000ull
+#define NIC4_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_3_UNSECURE_DOORBELL1_BASE 0x5623080ull
+#define NIC4_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x5623100ull
+#define NIC4_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x5623180ull
+#define NIC4_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_3_SPECIAL_BASE 0x5623E80ull
+#define NIC4_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_4_UNSECURE_DOORBELL0_BASE 0x5624000ull
+#define NIC4_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_4_UNSECURE_DOORBELL1_BASE 0x5624080ull
+#define NIC4_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x5624100ull
+#define NIC4_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x5624180ull
+#define NIC4_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_4_SPECIAL_BASE 0x5624E80ull
+#define NIC4_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_5_UNSECURE_DOORBELL0_BASE 0x5625000ull
+#define NIC4_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_5_UNSECURE_DOORBELL1_BASE 0x5625080ull
+#define NIC4_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x5625100ull
+#define NIC4_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x5625180ull
+#define NIC4_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_5_SPECIAL_BASE 0x5625E80ull
+#define NIC4_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_6_UNSECURE_DOORBELL0_BASE 0x5626000ull
+#define NIC4_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_6_UNSECURE_DOORBELL1_BASE 0x5626080ull
+#define NIC4_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x5626100ull
+#define NIC4_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x5626180ull
+#define NIC4_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_6_SPECIAL_BASE 0x5626E80ull
+#define NIC4_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_7_UNSECURE_DOORBELL0_BASE 0x5627000ull
+#define NIC4_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_7_UNSECURE_DOORBELL1_BASE 0x5627080ull
+#define NIC4_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x5627100ull
+#define NIC4_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x5627180ull
+#define NIC4_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_7_SPECIAL_BASE 0x5627E80ull
+#define NIC4_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_8_UNSECURE_DOORBELL0_BASE 0x5628000ull
+#define NIC4_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_8_UNSECURE_DOORBELL1_BASE 0x5628080ull
+#define NIC4_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x5628100ull
+#define NIC4_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x5628180ull
+#define NIC4_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_8_SPECIAL_BASE 0x5628E80ull
+#define NIC4_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_9_UNSECURE_DOORBELL0_BASE 0x5629000ull
+#define NIC4_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_9_UNSECURE_DOORBELL1_BASE 0x5629080ull
+#define NIC4_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x5629100ull
+#define NIC4_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x5629180ull
+#define NIC4_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_9_SPECIAL_BASE 0x5629E80ull
+#define NIC4_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_10_UNSECURE_DOORBELL0_BASE 0x562A000ull
+#define NIC4_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_10_UNSECURE_DOORBELL1_BASE 0x562A080ull
+#define NIC4_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x562A100ull
+#define NIC4_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x562A180ull
+#define NIC4_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_10_SPECIAL_BASE 0x562AE80ull
+#define NIC4_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_11_UNSECURE_DOORBELL0_BASE 0x562B000ull
+#define NIC4_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_11_UNSECURE_DOORBELL1_BASE 0x562B080ull
+#define NIC4_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x562B100ull
+#define NIC4_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x562B180ull
+#define NIC4_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_11_SPECIAL_BASE 0x562BE80ull
+#define NIC4_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_12_UNSECURE_DOORBELL0_BASE 0x562C000ull
+#define NIC4_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_12_UNSECURE_DOORBELL1_BASE 0x562C080ull
+#define NIC4_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x562C100ull
+#define NIC4_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x562C180ull
+#define NIC4_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_12_SPECIAL_BASE 0x562CE80ull
+#define NIC4_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_13_UNSECURE_DOORBELL0_BASE 0x562D000ull
+#define NIC4_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_13_UNSECURE_DOORBELL1_BASE 0x562D080ull
+#define NIC4_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x562D100ull
+#define NIC4_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x562D180ull
+#define NIC4_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_13_SPECIAL_BASE 0x562DE80ull
+#define NIC4_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC4_UMR1_14_UNSECURE_DOORBELL0_BASE 0x562E000ull
+#define NIC4_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC4_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC4_UMR1_14_UNSECURE_DOORBELL1_BASE 0x562E080ull
+#define NIC4_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC4_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC4_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x562E100ull
+#define NIC4_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC4_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC4_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x562E180ull
+#define NIC4_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC4_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC4_UMR1_14_SPECIAL_BASE 0x562EE80ull
+#define NIC4_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC4_QM_DCCM1_BASE 0x5630000ull
+#define NIC4_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC4_QM_DCCM1_SECTION 0x8000
+#define mmNIC4_QM_ARC_AUX1_BASE 0x5638000ull
+#define NIC4_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC4_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC4_QM_ARC_AUX1_SPECIAL_BASE 0x5638E80ull
+#define NIC4_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC4_QM1_BASE 0x563A000ull
+#define NIC4_QM1_MAX_OFFSET 0x1000
+#define NIC4_QM1_SECTION 0x9000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x563A900ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x563A908ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x563A910ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x563A918ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x563A920ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x563A928ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x563A930ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x563A938ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x563A940ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x563A948ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x563A950ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x563A958ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x563A960ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x563A968ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x563A970ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC4_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x563A978ull
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC4_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC4_QM1_AXUSER_SECURED_BASE 0x563AB00ull
+#define NIC4_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC4_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC4_QM1_AXUSER_NONSECURED_BASE 0x563AB80ull
+#define NIC4_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC4_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC4_QM1_DBG_HBW_BASE 0x563AC00ull
+#define NIC4_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC4_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC4_QM1_DBG_LBW_BASE 0x563AC80ull
+#define NIC4_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC4_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC4_QM1_CGM_BASE 0x563AD80ull
+#define NIC4_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC4_QM1_CGM_SECTION 0x1000
+#define mmNIC4_QM1_SPECIAL_BASE 0x563AE80ull
+#define NIC4_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC4_QPC1_BASE 0x563F000ull
+#define NIC4_QPC1_MAX_OFFSET 0x1000
+#define NIC4_QPC1_SECTION 0x7200
+#define mmNIC4_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x563F720ull
+#define NIC4_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x563F728ull
+#define NIC4_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x563F730ull
+#define NIC4_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x563F738ull
+#define NIC4_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x563F740ull
+#define NIC4_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x563F748ull
+#define NIC4_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x563F750ull
+#define NIC4_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x563F758ull
+#define NIC4_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x563F760ull
+#define NIC4_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x563F768ull
+#define NIC4_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x563F770ull
+#define NIC4_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x563F778ull
+#define NIC4_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x563F780ull
+#define NIC4_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x563F788ull
+#define NIC4_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x563F790ull
+#define NIC4_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x563F798ull
+#define NIC4_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x563F7A0ull
+#define NIC4_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x563F7A8ull
+#define NIC4_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x563F7B0ull
+#define NIC4_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x563F7B8ull
+#define NIC4_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x563F7C0ull
+#define NIC4_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x563F7C8ull
+#define NIC4_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x563F7D0ull
+#define NIC4_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x563F7D8ull
+#define NIC4_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x563F7E0ull
+#define NIC4_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x563F7E8ull
+#define NIC4_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x563F7F0ull
+#define NIC4_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x563F7F8ull
+#define NIC4_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x563F800ull
+#define NIC4_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x563F808ull
+#define NIC4_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x563F810ull
+#define NIC4_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC4_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x563F818ull
+#define NIC4_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC4_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC4_QPC1_AXUSER_CONG_QUE_BASE 0x563FB80ull
+#define NIC4_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC4_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC4_QPC1_AXUSER_RXWQE_BASE 0x563FBE0ull
+#define NIC4_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC4_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC4_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x563FC40ull
+#define NIC4_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC4_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC4_QPC1_AXUSER_DB_FIFO_BASE 0x563FCA0ull
+#define NIC4_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC4_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC4_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x563FD00ull
+#define NIC4_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC4_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC4_QPC1_AXUSER_ERR_FIFO_BASE 0x563FD60ull
+#define NIC4_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC4_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC4_QPC1_AXUSER_QPC_RESP_BASE 0x563FDC0ull
+#define NIC4_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC4_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC4_QPC1_AXUSER_QPC_REQ_BASE 0x563FE20ull
+#define NIC4_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC4_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC4_QPC1_SPECIAL_BASE 0x563FE80ull
+#define NIC4_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC4_TMR_BASE 0x5648000ull
+#define NIC4_TMR_MAX_OFFSET 0x1000
+#define NIC4_TMR_SECTION 0xD600
+#define mmNIC4_TMR_AXUSER_TMR_FREE_LIST_BASE 0x5648D60ull
+#define NIC4_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC4_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC4_TMR_AXUSER_TMR_FIFO_BASE 0x5648DC0ull
+#define NIC4_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC4_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC4_TMR_AXUSER_TMR_FSM_BASE 0x5648E20ull
+#define NIC4_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC4_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC4_TMR_SPECIAL_BASE 0x5648E80ull
+#define NIC4_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC4_RXB_CORE_BASE 0x5649000ull
+#define NIC4_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC4_RXB_CORE_SECTION 0x6100
+#define mmNIC4_RXB_CORE_SCT_AWUSER_BASE 0x5649610ull
+#define NIC4_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC4_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC4_RXB_CORE_SPECIAL_BASE 0x5649E80ull
+#define NIC4_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC4_RXE0_BASE 0x564A000ull
+#define NIC4_RXE0_MAX_OFFSET 0x1000
+#define NIC4_RXE0_SECTION 0x9000
+#define mmNIC4_RXE0_WQE_ARUSER_BASE 0x564A900ull
+#define NIC4_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC4_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC4_RXE0_SPECIAL_BASE 0x564AE80ull
+#define NIC4_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC4_RXE1_BASE 0x564B000ull
+#define NIC4_RXE1_MAX_OFFSET 0x1000
+#define NIC4_RXE1_SECTION 0x9000
+#define mmNIC4_RXE1_WQE_ARUSER_BASE 0x564B900ull
+#define NIC4_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC4_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC4_RXE1_SPECIAL_BASE 0x564BE80ull
+#define NIC4_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ0_BASE 0x564C000ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ1_BASE 0x564C050ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ2_BASE 0x564C0A0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ3_BASE 0x564C0F0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ4_BASE 0x564C140ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ5_BASE 0x564C190ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ6_BASE 0x564C1E0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ7_BASE 0x564C230ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ8_BASE 0x564C280ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ9_BASE 0x564C2D0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ10_BASE 0x564C320ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ11_BASE 0x564C370ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ12_BASE 0x564C3C0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ13_BASE 0x564C410ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ14_BASE 0x564C460ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ15_BASE 0x564C4B0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ16_BASE 0x564C500ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ17_BASE 0x564C550ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ18_BASE 0x564C5A0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ19_BASE 0x564C5F0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ20_BASE 0x564C640ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ21_BASE 0x564C690ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ22_BASE 0x564C6E0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ23_BASE 0x564C730ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ24_BASE 0x564C780ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ25_BASE 0x564C7D0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ26_BASE 0x564C820ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ27_BASE 0x564C870ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ28_BASE 0x564C8C0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ29_BASE 0x564C910ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ30_BASE 0x564C960ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC4_RXE0_AXUSER_AXUSER_CQ31_BASE 0x564C9B0ull
+#define NIC4_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC4_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC4_RXE0_AXUSER_SPECIAL_BASE 0x564CE80ull
+#define NIC4_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ0_BASE 0x564D000ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ1_BASE 0x564D050ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ2_BASE 0x564D0A0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ3_BASE 0x564D0F0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ4_BASE 0x564D140ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ5_BASE 0x564D190ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ6_BASE 0x564D1E0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ7_BASE 0x564D230ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ8_BASE 0x564D280ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ9_BASE 0x564D2D0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ10_BASE 0x564D320ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ11_BASE 0x564D370ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ12_BASE 0x564D3C0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ13_BASE 0x564D410ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ14_BASE 0x564D460ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ15_BASE 0x564D4B0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ16_BASE 0x564D500ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ17_BASE 0x564D550ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ18_BASE 0x564D5A0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ19_BASE 0x564D5F0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ20_BASE 0x564D640ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ21_BASE 0x564D690ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ22_BASE 0x564D6E0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ23_BASE 0x564D730ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ24_BASE 0x564D780ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ25_BASE 0x564D7D0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ26_BASE 0x564D820ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ27_BASE 0x564D870ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ28_BASE 0x564D8C0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ29_BASE 0x564D910ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ30_BASE 0x564D960ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC4_RXE1_AXUSER_AXUSER_CQ31_BASE 0x564D9B0ull
+#define NIC4_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC4_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC4_RXE1_AXUSER_SPECIAL_BASE 0x564DE80ull
+#define NIC4_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC4_TXS0_BASE 0x5650000ull
+#define NIC4_TXS0_MAX_OFFSET 0x1000
+#define NIC4_TXS0_SECTION 0xE800
+#define mmNIC4_TXS0_SPECIAL_BASE 0x5650E80ull
+#define NIC4_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC4_TXS1_BASE 0x5651000ull
+#define NIC4_TXS1_MAX_OFFSET 0x1000
+#define NIC4_TXS1_SECTION 0xE800
+#define mmNIC4_TXS1_SPECIAL_BASE 0x5651E80ull
+#define NIC4_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC4_TXE0_BASE 0x5652000ull
+#define NIC4_TXE0_MAX_OFFSET 0x1000
+#define NIC4_TXE0_SECTION 0xE800
+#define mmNIC4_TXE0_SPECIAL_BASE 0x5652E80ull
+#define NIC4_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC4_TXE1_BASE 0x5653000ull
+#define NIC4_TXE1_MAX_OFFSET 0x1000
+#define NIC4_TXE1_SECTION 0xE800
+#define mmNIC4_TXE1_SPECIAL_BASE 0x5653E80ull
+#define NIC4_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC4_TXB_BASE 0x5654000ull
+#define NIC4_TXB_MAX_OFFSET 0x1000
+#define NIC4_TXB_SECTION 0xE800
+#define mmNIC4_TXB_SPECIAL_BASE 0x5654E80ull
+#define NIC4_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC4_MSTR_IF_RR_SHRD_HBW_BASE 0x5655000ull
+#define NIC4_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC4_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC4_MSTR_IF_RR_PRVT_HBW_BASE 0x5655200ull
+#define NIC4_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC4_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC4_MSTR_IF_RR_SHRD_LBW_BASE 0x5655400ull
+#define NIC4_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC4_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC4_MSTR_IF_RR_PRVT_LBW_BASE 0x5655600ull
+#define NIC4_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC4_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC4_MSTR_IF_E2E_CRDT_BASE 0x5655800ull
+#define NIC4_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC4_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC4_MSTR_IF_AXUSER_BASE 0x5655A80ull
+#define NIC4_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC4_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC4_MSTR_IF_DBG_HBW_BASE 0x5655B00ull
+#define NIC4_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC4_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC4_MSTR_IF_DBG_LBW_BASE 0x5655B80ull
+#define NIC4_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC4_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC4_MSTR_IF_CORE_HBW_BASE 0x5655C00ull
+#define NIC4_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC4_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC4_MSTR_IF_CORE_LBW_BASE 0x5655D80ull
+#define NIC4_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC4_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC4_MSTR_IF_SPECIAL_BASE 0x5655E80ull
+#define NIC4_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC4_TX_AXUSER_BASE 0x5656000ull
+#define NIC4_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC4_TX_AXUSER_SECTION 0x2000
+#define mmNIC4_SERDES0_BASE 0x5658000ull
+#define NIC4_SERDES0_MAX_OFFSET 0x3E40
+#define NIC4_SERDES0_SECTION 0x4000
+#define mmNIC4_SERDES1_BASE 0x565C000ull
+#define NIC4_SERDES1_MAX_OFFSET 0x3E40
+#define NIC4_SERDES1_SECTION 0x4000
+#define mmNIC4_PHY_BASE 0x5660000ull
+#define NIC4_PHY_MAX_OFFSET 0x1000
+#define NIC4_PHY_SECTION 0xE800
+#define mmNIC4_PHY_SPECIAL_BASE 0x5660E80ull
+#define NIC4_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC4_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT4_MAC_AUX_BASE 0x5668000ull
+#define PRT4_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT4_MAC_AUX_SECTION 0xE800
+#define mmPRT4_MAC_AUX_SPECIAL_BASE 0x5668E80ull
+#define PRT4_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT4_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT4_MAC_CORE_BASE 0x5669000ull
+#define PRT4_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT4_MAC_CORE_SECTION 0xE800
+#define mmPRT4_MAC_CORE_SPECIAL_BASE 0x5669E80ull
+#define PRT4_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT4_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC4_MAC_RS_FEC_BASE 0x566A000ull
+#define NIC4_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC4_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC4_MAC_GLOB_STAT_CONTROL_REG_BASE 0x566B000ull
+#define NIC4_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC4_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC4_MAC_GLOB_STAT_RX0_BASE 0x566B100ull
+#define NIC4_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC4_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC4_MAC_GLOB_STAT_RX1_BASE 0x566B18Cull
+#define NIC4_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC4_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC4_MAC_GLOB_STAT_RX2_BASE 0x566B218ull
+#define NIC4_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC4_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC4_MAC_GLOB_STAT_RX3_BASE 0x566B2A4ull
+#define NIC4_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC4_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC4_MAC_GLOB_STAT_TX0_BASE 0x566B330ull
+#define NIC4_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC4_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC4_MAC_GLOB_STAT_TX1_BASE 0x566B398ull
+#define NIC4_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC4_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC4_MAC_GLOB_STAT_TX2_BASE 0x566B400ull
+#define NIC4_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC4_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC4_MAC_GLOB_STAT_TX3_BASE 0x566B468ull
+#define NIC4_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC4_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC4_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x566B800ull
+#define NIC4_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC4_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC4_MAC_CH0_MAC_PCS_BASE 0x566C000ull
+#define NIC4_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC4_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC4_MAC_CH0_MAC_128_BASE 0x566C400ull
+#define NIC4_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC4_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC4_MAC_CH0_MAC_AN_BASE 0x566C800ull
+#define NIC4_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC4_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC4_MAC_CH1_MAC_PCS_BASE 0x566D000ull
+#define NIC4_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC4_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC4_MAC_CH1_MAC_128_BASE 0x566D400ull
+#define NIC4_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC4_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC4_MAC_CH1_MAC_AN_BASE 0x566D800ull
+#define NIC4_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC4_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC4_MAC_CH2_MAC_PCS_BASE 0x566E000ull
+#define NIC4_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC4_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC4_MAC_CH2_MAC_128_BASE 0x566E400ull
+#define NIC4_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC4_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC4_MAC_CH2_MAC_AN_BASE 0x566E800ull
+#define NIC4_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC4_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC4_MAC_CH3_MAC_PCS_BASE 0x566F000ull
+#define NIC4_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC4_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC4_MAC_CH3_MAC_128_BASE 0x566F400ull
+#define NIC4_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC4_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC4_MAC_CH3_MAC_AN_BASE 0x566F800ull
+#define NIC4_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC4_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC5_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5680000ull
+#define NIC5_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5680080ull
+#define NIC5_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5680100ull
+#define NIC5_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5680180ull
+#define NIC5_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_0_SPECIAL_BASE 0x5680E80ull
+#define NIC5_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5681000ull
+#define NIC5_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5681080ull
+#define NIC5_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5681100ull
+#define NIC5_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5681180ull
+#define NIC5_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_1_SPECIAL_BASE 0x5681E80ull
+#define NIC5_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5682000ull
+#define NIC5_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5682080ull
+#define NIC5_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5682100ull
+#define NIC5_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5682180ull
+#define NIC5_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_2_SPECIAL_BASE 0x5682E80ull
+#define NIC5_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5683000ull
+#define NIC5_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5683080ull
+#define NIC5_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5683100ull
+#define NIC5_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5683180ull
+#define NIC5_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_3_SPECIAL_BASE 0x5683E80ull
+#define NIC5_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5684000ull
+#define NIC5_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5684080ull
+#define NIC5_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5684100ull
+#define NIC5_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5684180ull
+#define NIC5_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_4_SPECIAL_BASE 0x5684E80ull
+#define NIC5_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5685000ull
+#define NIC5_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5685080ull
+#define NIC5_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5685100ull
+#define NIC5_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5685180ull
+#define NIC5_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_5_SPECIAL_BASE 0x5685E80ull
+#define NIC5_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5686000ull
+#define NIC5_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5686080ull
+#define NIC5_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5686100ull
+#define NIC5_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5686180ull
+#define NIC5_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_6_SPECIAL_BASE 0x5686E80ull
+#define NIC5_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5687000ull
+#define NIC5_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5687080ull
+#define NIC5_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5687100ull
+#define NIC5_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5687180ull
+#define NIC5_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_7_SPECIAL_BASE 0x5687E80ull
+#define NIC5_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5688000ull
+#define NIC5_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5688080ull
+#define NIC5_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5688100ull
+#define NIC5_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5688180ull
+#define NIC5_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_8_SPECIAL_BASE 0x5688E80ull
+#define NIC5_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5689000ull
+#define NIC5_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5689080ull
+#define NIC5_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5689100ull
+#define NIC5_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5689180ull
+#define NIC5_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_9_SPECIAL_BASE 0x5689E80ull
+#define NIC5_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_10_UNSECURE_DOORBELL0_BASE 0x568A000ull
+#define NIC5_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_10_UNSECURE_DOORBELL1_BASE 0x568A080ull
+#define NIC5_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x568A100ull
+#define NIC5_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x568A180ull
+#define NIC5_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_10_SPECIAL_BASE 0x568AE80ull
+#define NIC5_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_11_UNSECURE_DOORBELL0_BASE 0x568B000ull
+#define NIC5_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_11_UNSECURE_DOORBELL1_BASE 0x568B080ull
+#define NIC5_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x568B100ull
+#define NIC5_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x568B180ull
+#define NIC5_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_11_SPECIAL_BASE 0x568BE80ull
+#define NIC5_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_12_UNSECURE_DOORBELL0_BASE 0x568C000ull
+#define NIC5_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_12_UNSECURE_DOORBELL1_BASE 0x568C080ull
+#define NIC5_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x568C100ull
+#define NIC5_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x568C180ull
+#define NIC5_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_12_SPECIAL_BASE 0x568CE80ull
+#define NIC5_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_13_UNSECURE_DOORBELL0_BASE 0x568D000ull
+#define NIC5_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_13_UNSECURE_DOORBELL1_BASE 0x568D080ull
+#define NIC5_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x568D100ull
+#define NIC5_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x568D180ull
+#define NIC5_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_13_SPECIAL_BASE 0x568DE80ull
+#define NIC5_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR0_14_UNSECURE_DOORBELL0_BASE 0x568E000ull
+#define NIC5_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR0_14_UNSECURE_DOORBELL1_BASE 0x568E080ull
+#define NIC5_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x568E100ull
+#define NIC5_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x568E180ull
+#define NIC5_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR0_14_SPECIAL_BASE 0x568EE80ull
+#define NIC5_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC5_QM_DCCM0_BASE 0x5690000ull
+#define NIC5_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC5_QM_DCCM0_SECTION 0x8000
+#define mmNIC5_QM_ARC_AUX0_BASE 0x5698000ull
+#define NIC5_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC5_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC5_QM_ARC_AUX0_SPECIAL_BASE 0x5698E80ull
+#define NIC5_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC5_QM0_BASE 0x569A000ull
+#define NIC5_QM0_MAX_OFFSET 0x1000
+#define NIC5_QM0_SECTION 0x9000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x569A900ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x569A908ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x569A910ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x569A918ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x569A920ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x569A928ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x569A930ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x569A938ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x569A940ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x569A948ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x569A950ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x569A958ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x569A960ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x569A968ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x569A970ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC5_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x569A978ull
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC5_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC5_QM0_AXUSER_SECURED_BASE 0x569AB00ull
+#define NIC5_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC5_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC5_QM0_AXUSER_NONSECURED_BASE 0x569AB80ull
+#define NIC5_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC5_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC5_QM0_DBG_HBW_BASE 0x569AC00ull
+#define NIC5_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC5_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC5_QM0_DBG_LBW_BASE 0x569AC80ull
+#define NIC5_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC5_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC5_QM0_CGM_BASE 0x569AD80ull
+#define NIC5_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC5_QM0_CGM_SECTION 0x1000
+#define mmNIC5_QM0_SPECIAL_BASE 0x569AE80ull
+#define NIC5_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC5_QPC0_BASE 0x569F000ull
+#define NIC5_QPC0_MAX_OFFSET 0x1000
+#define NIC5_QPC0_SECTION 0x7200
+#define mmNIC5_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x569F720ull
+#define NIC5_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x569F728ull
+#define NIC5_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x569F730ull
+#define NIC5_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x569F738ull
+#define NIC5_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x569F740ull
+#define NIC5_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x569F748ull
+#define NIC5_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x569F750ull
+#define NIC5_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x569F758ull
+#define NIC5_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x569F760ull
+#define NIC5_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x569F768ull
+#define NIC5_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x569F770ull
+#define NIC5_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x569F778ull
+#define NIC5_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x569F780ull
+#define NIC5_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x569F788ull
+#define NIC5_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x569F790ull
+#define NIC5_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x569F798ull
+#define NIC5_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x569F7A0ull
+#define NIC5_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x569F7A8ull
+#define NIC5_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x569F7B0ull
+#define NIC5_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x569F7B8ull
+#define NIC5_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x569F7C0ull
+#define NIC5_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x569F7C8ull
+#define NIC5_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x569F7D0ull
+#define NIC5_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x569F7D8ull
+#define NIC5_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x569F7E0ull
+#define NIC5_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x569F7E8ull
+#define NIC5_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x569F7F0ull
+#define NIC5_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x569F7F8ull
+#define NIC5_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x569F800ull
+#define NIC5_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x569F808ull
+#define NIC5_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x569F810ull
+#define NIC5_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x569F818ull
+#define NIC5_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC5_QPC0_AXUSER_CONG_QUE_BASE 0x569FB80ull
+#define NIC5_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC5_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC5_QPC0_AXUSER_RXWQE_BASE 0x569FBE0ull
+#define NIC5_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC5_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC5_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x569FC40ull
+#define NIC5_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC5_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC5_QPC0_AXUSER_DB_FIFO_BASE 0x569FCA0ull
+#define NIC5_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC5_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC5_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x569FD00ull
+#define NIC5_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC5_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC5_QPC0_AXUSER_ERR_FIFO_BASE 0x569FD60ull
+#define NIC5_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC5_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC5_QPC0_AXUSER_QPC_RESP_BASE 0x569FDC0ull
+#define NIC5_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC5_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC5_QPC0_AXUSER_QPC_REQ_BASE 0x569FE20ull
+#define NIC5_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC5_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC5_QPC0_SPECIAL_BASE 0x569FE80ull
+#define NIC5_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_0_UNSECURE_DOORBELL0_BASE 0x56A0000ull
+#define NIC5_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_0_UNSECURE_DOORBELL1_BASE 0x56A0080ull
+#define NIC5_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x56A0100ull
+#define NIC5_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x56A0180ull
+#define NIC5_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_0_SPECIAL_BASE 0x56A0E80ull
+#define NIC5_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_1_UNSECURE_DOORBELL0_BASE 0x56A1000ull
+#define NIC5_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_1_UNSECURE_DOORBELL1_BASE 0x56A1080ull
+#define NIC5_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x56A1100ull
+#define NIC5_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x56A1180ull
+#define NIC5_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_1_SPECIAL_BASE 0x56A1E80ull
+#define NIC5_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_2_UNSECURE_DOORBELL0_BASE 0x56A2000ull
+#define NIC5_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_2_UNSECURE_DOORBELL1_BASE 0x56A2080ull
+#define NIC5_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x56A2100ull
+#define NIC5_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x56A2180ull
+#define NIC5_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_2_SPECIAL_BASE 0x56A2E80ull
+#define NIC5_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_3_UNSECURE_DOORBELL0_BASE 0x56A3000ull
+#define NIC5_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_3_UNSECURE_DOORBELL1_BASE 0x56A3080ull
+#define NIC5_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x56A3100ull
+#define NIC5_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x56A3180ull
+#define NIC5_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_3_SPECIAL_BASE 0x56A3E80ull
+#define NIC5_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_4_UNSECURE_DOORBELL0_BASE 0x56A4000ull
+#define NIC5_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_4_UNSECURE_DOORBELL1_BASE 0x56A4080ull
+#define NIC5_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x56A4100ull
+#define NIC5_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x56A4180ull
+#define NIC5_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_4_SPECIAL_BASE 0x56A4E80ull
+#define NIC5_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_5_UNSECURE_DOORBELL0_BASE 0x56A5000ull
+#define NIC5_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_5_UNSECURE_DOORBELL1_BASE 0x56A5080ull
+#define NIC5_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x56A5100ull
+#define NIC5_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x56A5180ull
+#define NIC5_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_5_SPECIAL_BASE 0x56A5E80ull
+#define NIC5_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_6_UNSECURE_DOORBELL0_BASE 0x56A6000ull
+#define NIC5_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_6_UNSECURE_DOORBELL1_BASE 0x56A6080ull
+#define NIC5_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x56A6100ull
+#define NIC5_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x56A6180ull
+#define NIC5_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_6_SPECIAL_BASE 0x56A6E80ull
+#define NIC5_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_7_UNSECURE_DOORBELL0_BASE 0x56A7000ull
+#define NIC5_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_7_UNSECURE_DOORBELL1_BASE 0x56A7080ull
+#define NIC5_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x56A7100ull
+#define NIC5_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x56A7180ull
+#define NIC5_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_7_SPECIAL_BASE 0x56A7E80ull
+#define NIC5_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_8_UNSECURE_DOORBELL0_BASE 0x56A8000ull
+#define NIC5_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_8_UNSECURE_DOORBELL1_BASE 0x56A8080ull
+#define NIC5_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x56A8100ull
+#define NIC5_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x56A8180ull
+#define NIC5_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_8_SPECIAL_BASE 0x56A8E80ull
+#define NIC5_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_9_UNSECURE_DOORBELL0_BASE 0x56A9000ull
+#define NIC5_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_9_UNSECURE_DOORBELL1_BASE 0x56A9080ull
+#define NIC5_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x56A9100ull
+#define NIC5_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x56A9180ull
+#define NIC5_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_9_SPECIAL_BASE 0x56A9E80ull
+#define NIC5_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_10_UNSECURE_DOORBELL0_BASE 0x56AA000ull
+#define NIC5_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_10_UNSECURE_DOORBELL1_BASE 0x56AA080ull
+#define NIC5_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x56AA100ull
+#define NIC5_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x56AA180ull
+#define NIC5_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_10_SPECIAL_BASE 0x56AAE80ull
+#define NIC5_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_11_UNSECURE_DOORBELL0_BASE 0x56AB000ull
+#define NIC5_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_11_UNSECURE_DOORBELL1_BASE 0x56AB080ull
+#define NIC5_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x56AB100ull
+#define NIC5_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x56AB180ull
+#define NIC5_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_11_SPECIAL_BASE 0x56ABE80ull
+#define NIC5_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_12_UNSECURE_DOORBELL0_BASE 0x56AC000ull
+#define NIC5_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_12_UNSECURE_DOORBELL1_BASE 0x56AC080ull
+#define NIC5_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x56AC100ull
+#define NIC5_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x56AC180ull
+#define NIC5_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_12_SPECIAL_BASE 0x56ACE80ull
+#define NIC5_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_13_UNSECURE_DOORBELL0_BASE 0x56AD000ull
+#define NIC5_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_13_UNSECURE_DOORBELL1_BASE 0x56AD080ull
+#define NIC5_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x56AD100ull
+#define NIC5_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x56AD180ull
+#define NIC5_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_13_SPECIAL_BASE 0x56ADE80ull
+#define NIC5_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC5_UMR1_14_UNSECURE_DOORBELL0_BASE 0x56AE000ull
+#define NIC5_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC5_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC5_UMR1_14_UNSECURE_DOORBELL1_BASE 0x56AE080ull
+#define NIC5_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC5_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC5_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x56AE100ull
+#define NIC5_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC5_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC5_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x56AE180ull
+#define NIC5_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC5_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC5_UMR1_14_SPECIAL_BASE 0x56AEE80ull
+#define NIC5_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC5_QM_DCCM1_BASE 0x56B0000ull
+#define NIC5_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC5_QM_DCCM1_SECTION 0x8000
+#define mmNIC5_QM_ARC_AUX1_BASE 0x56B8000ull
+#define NIC5_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC5_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC5_QM_ARC_AUX1_SPECIAL_BASE 0x56B8E80ull
+#define NIC5_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC5_QM1_BASE 0x56BA000ull
+#define NIC5_QM1_MAX_OFFSET 0x1000
+#define NIC5_QM1_SECTION 0x9000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x56BA900ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x56BA908ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x56BA910ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x56BA918ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x56BA920ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x56BA928ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x56BA930ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x56BA938ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x56BA940ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x56BA948ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x56BA950ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x56BA958ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x56BA960ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x56BA968ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x56BA970ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC5_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x56BA978ull
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC5_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC5_QM1_AXUSER_SECURED_BASE 0x56BAB00ull
+#define NIC5_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC5_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC5_QM1_AXUSER_NONSECURED_BASE 0x56BAB80ull
+#define NIC5_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC5_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC5_QM1_DBG_HBW_BASE 0x56BAC00ull
+#define NIC5_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC5_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC5_QM1_DBG_LBW_BASE 0x56BAC80ull
+#define NIC5_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC5_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC5_QM1_CGM_BASE 0x56BAD80ull
+#define NIC5_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC5_QM1_CGM_SECTION 0x1000
+#define mmNIC5_QM1_SPECIAL_BASE 0x56BAE80ull
+#define NIC5_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC5_QPC1_BASE 0x56BF000ull
+#define NIC5_QPC1_MAX_OFFSET 0x1000
+#define NIC5_QPC1_SECTION 0x7200
+#define mmNIC5_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x56BF720ull
+#define NIC5_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x56BF728ull
+#define NIC5_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x56BF730ull
+#define NIC5_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x56BF738ull
+#define NIC5_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x56BF740ull
+#define NIC5_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x56BF748ull
+#define NIC5_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x56BF750ull
+#define NIC5_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x56BF758ull
+#define NIC5_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x56BF760ull
+#define NIC5_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x56BF768ull
+#define NIC5_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x56BF770ull
+#define NIC5_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x56BF778ull
+#define NIC5_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x56BF780ull
+#define NIC5_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x56BF788ull
+#define NIC5_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x56BF790ull
+#define NIC5_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x56BF798ull
+#define NIC5_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x56BF7A0ull
+#define NIC5_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x56BF7A8ull
+#define NIC5_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x56BF7B0ull
+#define NIC5_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x56BF7B8ull
+#define NIC5_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x56BF7C0ull
+#define NIC5_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x56BF7C8ull
+#define NIC5_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x56BF7D0ull
+#define NIC5_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x56BF7D8ull
+#define NIC5_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x56BF7E0ull
+#define NIC5_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x56BF7E8ull
+#define NIC5_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x56BF7F0ull
+#define NIC5_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x56BF7F8ull
+#define NIC5_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x56BF800ull
+#define NIC5_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x56BF808ull
+#define NIC5_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x56BF810ull
+#define NIC5_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC5_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x56BF818ull
+#define NIC5_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC5_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC5_QPC1_AXUSER_CONG_QUE_BASE 0x56BFB80ull
+#define NIC5_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC5_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC5_QPC1_AXUSER_RXWQE_BASE 0x56BFBE0ull
+#define NIC5_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC5_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC5_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x56BFC40ull
+#define NIC5_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC5_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC5_QPC1_AXUSER_DB_FIFO_BASE 0x56BFCA0ull
+#define NIC5_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC5_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC5_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x56BFD00ull
+#define NIC5_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC5_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC5_QPC1_AXUSER_ERR_FIFO_BASE 0x56BFD60ull
+#define NIC5_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC5_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC5_QPC1_AXUSER_QPC_RESP_BASE 0x56BFDC0ull
+#define NIC5_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC5_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC5_QPC1_AXUSER_QPC_REQ_BASE 0x56BFE20ull
+#define NIC5_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC5_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC5_QPC1_SPECIAL_BASE 0x56BFE80ull
+#define NIC5_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC5_TMR_BASE 0x56C8000ull
+#define NIC5_TMR_MAX_OFFSET 0x1000
+#define NIC5_TMR_SECTION 0xD600
+#define mmNIC5_TMR_AXUSER_TMR_FREE_LIST_BASE 0x56C8D60ull
+#define NIC5_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC5_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC5_TMR_AXUSER_TMR_FIFO_BASE 0x56C8DC0ull
+#define NIC5_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC5_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC5_TMR_AXUSER_TMR_FSM_BASE 0x56C8E20ull
+#define NIC5_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC5_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC5_TMR_SPECIAL_BASE 0x56C8E80ull
+#define NIC5_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC5_RXB_CORE_BASE 0x56C9000ull
+#define NIC5_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC5_RXB_CORE_SECTION 0x6100
+#define mmNIC5_RXB_CORE_SCT_AWUSER_BASE 0x56C9610ull
+#define NIC5_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC5_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC5_RXB_CORE_SPECIAL_BASE 0x56C9E80ull
+#define NIC5_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC5_RXE0_BASE 0x56CA000ull
+#define NIC5_RXE0_MAX_OFFSET 0x1000
+#define NIC5_RXE0_SECTION 0x9000
+#define mmNIC5_RXE0_WQE_ARUSER_BASE 0x56CA900ull
+#define NIC5_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC5_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC5_RXE0_SPECIAL_BASE 0x56CAE80ull
+#define NIC5_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC5_RXE1_BASE 0x56CB000ull
+#define NIC5_RXE1_MAX_OFFSET 0x1000
+#define NIC5_RXE1_SECTION 0x9000
+#define mmNIC5_RXE1_WQE_ARUSER_BASE 0x56CB900ull
+#define NIC5_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC5_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC5_RXE1_SPECIAL_BASE 0x56CBE80ull
+#define NIC5_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ0_BASE 0x56CC000ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ1_BASE 0x56CC050ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ2_BASE 0x56CC0A0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ3_BASE 0x56CC0F0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ4_BASE 0x56CC140ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ5_BASE 0x56CC190ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ6_BASE 0x56CC1E0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ7_BASE 0x56CC230ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ8_BASE 0x56CC280ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ9_BASE 0x56CC2D0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ10_BASE 0x56CC320ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ11_BASE 0x56CC370ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ12_BASE 0x56CC3C0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ13_BASE 0x56CC410ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ14_BASE 0x56CC460ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ15_BASE 0x56CC4B0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ16_BASE 0x56CC500ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ17_BASE 0x56CC550ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ18_BASE 0x56CC5A0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ19_BASE 0x56CC5F0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ20_BASE 0x56CC640ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ21_BASE 0x56CC690ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ22_BASE 0x56CC6E0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ23_BASE 0x56CC730ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ24_BASE 0x56CC780ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ25_BASE 0x56CC7D0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ26_BASE 0x56CC820ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ27_BASE 0x56CC870ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ28_BASE 0x56CC8C0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ29_BASE 0x56CC910ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ30_BASE 0x56CC960ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC5_RXE0_AXUSER_AXUSER_CQ31_BASE 0x56CC9B0ull
+#define NIC5_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC5_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC5_RXE0_AXUSER_SPECIAL_BASE 0x56CCE80ull
+#define NIC5_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ0_BASE 0x56CD000ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ1_BASE 0x56CD050ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ2_BASE 0x56CD0A0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ3_BASE 0x56CD0F0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ4_BASE 0x56CD140ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ5_BASE 0x56CD190ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ6_BASE 0x56CD1E0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ7_BASE 0x56CD230ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ8_BASE 0x56CD280ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ9_BASE 0x56CD2D0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ10_BASE 0x56CD320ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ11_BASE 0x56CD370ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ12_BASE 0x56CD3C0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ13_BASE 0x56CD410ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ14_BASE 0x56CD460ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ15_BASE 0x56CD4B0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ16_BASE 0x56CD500ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ17_BASE 0x56CD550ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ18_BASE 0x56CD5A0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ19_BASE 0x56CD5F0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ20_BASE 0x56CD640ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ21_BASE 0x56CD690ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ22_BASE 0x56CD6E0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ23_BASE 0x56CD730ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ24_BASE 0x56CD780ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ25_BASE 0x56CD7D0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ26_BASE 0x56CD820ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ27_BASE 0x56CD870ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ28_BASE 0x56CD8C0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ29_BASE 0x56CD910ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ30_BASE 0x56CD960ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC5_RXE1_AXUSER_AXUSER_CQ31_BASE 0x56CD9B0ull
+#define NIC5_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC5_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC5_RXE1_AXUSER_SPECIAL_BASE 0x56CDE80ull
+#define NIC5_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC5_TXS0_BASE 0x56D0000ull
+#define NIC5_TXS0_MAX_OFFSET 0x1000
+#define NIC5_TXS0_SECTION 0xE800
+#define mmNIC5_TXS0_SPECIAL_BASE 0x56D0E80ull
+#define NIC5_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC5_TXS1_BASE 0x56D1000ull
+#define NIC5_TXS1_MAX_OFFSET 0x1000
+#define NIC5_TXS1_SECTION 0xE800
+#define mmNIC5_TXS1_SPECIAL_BASE 0x56D1E80ull
+#define NIC5_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC5_TXE0_BASE 0x56D2000ull
+#define NIC5_TXE0_MAX_OFFSET 0x1000
+#define NIC5_TXE0_SECTION 0xE800
+#define mmNIC5_TXE0_SPECIAL_BASE 0x56D2E80ull
+#define NIC5_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC5_TXE1_BASE 0x56D3000ull
+#define NIC5_TXE1_MAX_OFFSET 0x1000
+#define NIC5_TXE1_SECTION 0xE800
+#define mmNIC5_TXE1_SPECIAL_BASE 0x56D3E80ull
+#define NIC5_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC5_TXB_BASE 0x56D4000ull
+#define NIC5_TXB_MAX_OFFSET 0x1000
+#define NIC5_TXB_SECTION 0xE800
+#define mmNIC5_TXB_SPECIAL_BASE 0x56D4E80ull
+#define NIC5_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC5_MSTR_IF_RR_SHRD_HBW_BASE 0x56D5000ull
+#define NIC5_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC5_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC5_MSTR_IF_RR_PRVT_HBW_BASE 0x56D5200ull
+#define NIC5_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC5_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC5_MSTR_IF_RR_SHRD_LBW_BASE 0x56D5400ull
+#define NIC5_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC5_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC5_MSTR_IF_RR_PRVT_LBW_BASE 0x56D5600ull
+#define NIC5_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC5_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC5_MSTR_IF_E2E_CRDT_BASE 0x56D5800ull
+#define NIC5_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC5_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC5_MSTR_IF_AXUSER_BASE 0x56D5A80ull
+#define NIC5_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC5_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC5_MSTR_IF_DBG_HBW_BASE 0x56D5B00ull
+#define NIC5_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC5_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC5_MSTR_IF_DBG_LBW_BASE 0x56D5B80ull
+#define NIC5_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC5_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC5_MSTR_IF_CORE_HBW_BASE 0x56D5C00ull
+#define NIC5_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC5_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC5_MSTR_IF_CORE_LBW_BASE 0x56D5D80ull
+#define NIC5_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC5_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC5_MSTR_IF_SPECIAL_BASE 0x56D5E80ull
+#define NIC5_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC5_TX_AXUSER_BASE 0x56D6000ull
+#define NIC5_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC5_TX_AXUSER_SECTION 0x2000
+#define mmNIC5_SERDES0_BASE 0x56D8000ull
+#define NIC5_SERDES0_MAX_OFFSET 0x3E40
+#define NIC5_SERDES0_SECTION 0x4000
+#define mmNIC5_SERDES1_BASE 0x56DC000ull
+#define NIC5_SERDES1_MAX_OFFSET 0x3E40
+#define NIC5_SERDES1_SECTION 0x4000
+#define mmNIC5_PHY_BASE 0x56E0000ull
+#define NIC5_PHY_MAX_OFFSET 0x1000
+#define NIC5_PHY_SECTION 0xE800
+#define mmNIC5_PHY_SPECIAL_BASE 0x56E0E80ull
+#define NIC5_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC5_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT5_MAC_AUX_BASE 0x56E8000ull
+#define PRT5_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT5_MAC_AUX_SECTION 0xE800
+#define mmPRT5_MAC_AUX_SPECIAL_BASE 0x56E8E80ull
+#define PRT5_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT5_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT5_MAC_CORE_BASE 0x56E9000ull
+#define PRT5_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT5_MAC_CORE_SECTION 0xE800
+#define mmPRT5_MAC_CORE_SPECIAL_BASE 0x56E9E80ull
+#define PRT5_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT5_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC5_MAC_RS_FEC_BASE 0x56EA000ull
+#define NIC5_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC5_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC5_MAC_GLOB_STAT_CONTROL_REG_BASE 0x56EB000ull
+#define NIC5_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC5_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC5_MAC_GLOB_STAT_RX0_BASE 0x56EB100ull
+#define NIC5_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC5_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC5_MAC_GLOB_STAT_RX1_BASE 0x56EB18Cull
+#define NIC5_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC5_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC5_MAC_GLOB_STAT_RX2_BASE 0x56EB218ull
+#define NIC5_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC5_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC5_MAC_GLOB_STAT_RX3_BASE 0x56EB2A4ull
+#define NIC5_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC5_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC5_MAC_GLOB_STAT_TX0_BASE 0x56EB330ull
+#define NIC5_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC5_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC5_MAC_GLOB_STAT_TX1_BASE 0x56EB398ull
+#define NIC5_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC5_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC5_MAC_GLOB_STAT_TX2_BASE 0x56EB400ull
+#define NIC5_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC5_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC5_MAC_GLOB_STAT_TX3_BASE 0x56EB468ull
+#define NIC5_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC5_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC5_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x56EB800ull
+#define NIC5_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC5_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC5_MAC_CH0_MAC_PCS_BASE 0x56EC000ull
+#define NIC5_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC5_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC5_MAC_CH0_MAC_128_BASE 0x56EC400ull
+#define NIC5_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC5_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC5_MAC_CH0_MAC_AN_BASE 0x56EC800ull
+#define NIC5_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC5_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC5_MAC_CH1_MAC_PCS_BASE 0x56ED000ull
+#define NIC5_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC5_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC5_MAC_CH1_MAC_128_BASE 0x56ED400ull
+#define NIC5_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC5_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC5_MAC_CH1_MAC_AN_BASE 0x56ED800ull
+#define NIC5_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC5_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC5_MAC_CH2_MAC_PCS_BASE 0x56EE000ull
+#define NIC5_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC5_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC5_MAC_CH2_MAC_128_BASE 0x56EE400ull
+#define NIC5_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC5_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC5_MAC_CH2_MAC_AN_BASE 0x56EE800ull
+#define NIC5_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC5_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC5_MAC_CH3_MAC_PCS_BASE 0x56EF000ull
+#define NIC5_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC5_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC5_MAC_CH3_MAC_128_BASE 0x56EF400ull
+#define NIC5_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC5_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC5_MAC_CH3_MAC_AN_BASE 0x56EF800ull
+#define NIC5_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC5_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC6_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5700000ull
+#define NIC6_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5700080ull
+#define NIC6_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5700100ull
+#define NIC6_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5700180ull
+#define NIC6_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_0_SPECIAL_BASE 0x5700E80ull
+#define NIC6_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5701000ull
+#define NIC6_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5701080ull
+#define NIC6_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5701100ull
+#define NIC6_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5701180ull
+#define NIC6_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_1_SPECIAL_BASE 0x5701E80ull
+#define NIC6_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5702000ull
+#define NIC6_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5702080ull
+#define NIC6_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5702100ull
+#define NIC6_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5702180ull
+#define NIC6_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_2_SPECIAL_BASE 0x5702E80ull
+#define NIC6_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5703000ull
+#define NIC6_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5703080ull
+#define NIC6_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5703100ull
+#define NIC6_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5703180ull
+#define NIC6_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_3_SPECIAL_BASE 0x5703E80ull
+#define NIC6_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5704000ull
+#define NIC6_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5704080ull
+#define NIC6_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5704100ull
+#define NIC6_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5704180ull
+#define NIC6_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_4_SPECIAL_BASE 0x5704E80ull
+#define NIC6_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5705000ull
+#define NIC6_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5705080ull
+#define NIC6_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5705100ull
+#define NIC6_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5705180ull
+#define NIC6_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_5_SPECIAL_BASE 0x5705E80ull
+#define NIC6_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5706000ull
+#define NIC6_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5706080ull
+#define NIC6_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5706100ull
+#define NIC6_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5706180ull
+#define NIC6_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_6_SPECIAL_BASE 0x5706E80ull
+#define NIC6_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5707000ull
+#define NIC6_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5707080ull
+#define NIC6_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5707100ull
+#define NIC6_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5707180ull
+#define NIC6_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_7_SPECIAL_BASE 0x5707E80ull
+#define NIC6_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5708000ull
+#define NIC6_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5708080ull
+#define NIC6_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5708100ull
+#define NIC6_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5708180ull
+#define NIC6_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_8_SPECIAL_BASE 0x5708E80ull
+#define NIC6_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5709000ull
+#define NIC6_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5709080ull
+#define NIC6_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5709100ull
+#define NIC6_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5709180ull
+#define NIC6_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_9_SPECIAL_BASE 0x5709E80ull
+#define NIC6_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_10_UNSECURE_DOORBELL0_BASE 0x570A000ull
+#define NIC6_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_10_UNSECURE_DOORBELL1_BASE 0x570A080ull
+#define NIC6_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x570A100ull
+#define NIC6_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x570A180ull
+#define NIC6_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_10_SPECIAL_BASE 0x570AE80ull
+#define NIC6_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_11_UNSECURE_DOORBELL0_BASE 0x570B000ull
+#define NIC6_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_11_UNSECURE_DOORBELL1_BASE 0x570B080ull
+#define NIC6_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x570B100ull
+#define NIC6_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x570B180ull
+#define NIC6_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_11_SPECIAL_BASE 0x570BE80ull
+#define NIC6_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_12_UNSECURE_DOORBELL0_BASE 0x570C000ull
+#define NIC6_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_12_UNSECURE_DOORBELL1_BASE 0x570C080ull
+#define NIC6_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x570C100ull
+#define NIC6_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x570C180ull
+#define NIC6_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_12_SPECIAL_BASE 0x570CE80ull
+#define NIC6_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_13_UNSECURE_DOORBELL0_BASE 0x570D000ull
+#define NIC6_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_13_UNSECURE_DOORBELL1_BASE 0x570D080ull
+#define NIC6_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x570D100ull
+#define NIC6_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x570D180ull
+#define NIC6_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_13_SPECIAL_BASE 0x570DE80ull
+#define NIC6_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR0_14_UNSECURE_DOORBELL0_BASE 0x570E000ull
+#define NIC6_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR0_14_UNSECURE_DOORBELL1_BASE 0x570E080ull
+#define NIC6_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x570E100ull
+#define NIC6_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x570E180ull
+#define NIC6_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR0_14_SPECIAL_BASE 0x570EE80ull
+#define NIC6_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC6_QM_DCCM0_BASE 0x5710000ull
+#define NIC6_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC6_QM_DCCM0_SECTION 0x8000
+#define mmNIC6_QM_ARC_AUX0_BASE 0x5718000ull
+#define NIC6_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC6_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC6_QM_ARC_AUX0_SPECIAL_BASE 0x5718E80ull
+#define NIC6_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC6_QM0_BASE 0x571A000ull
+#define NIC6_QM0_MAX_OFFSET 0x1000
+#define NIC6_QM0_SECTION 0x9000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x571A900ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x571A908ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x571A910ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x571A918ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x571A920ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x571A928ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x571A930ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x571A938ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x571A940ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x571A948ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x571A950ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x571A958ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x571A960ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x571A968ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x571A970ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC6_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x571A978ull
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC6_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC6_QM0_AXUSER_SECURED_BASE 0x571AB00ull
+#define NIC6_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC6_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC6_QM0_AXUSER_NONSECURED_BASE 0x571AB80ull
+#define NIC6_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC6_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC6_QM0_DBG_HBW_BASE 0x571AC00ull
+#define NIC6_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC6_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC6_QM0_DBG_LBW_BASE 0x571AC80ull
+#define NIC6_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC6_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC6_QM0_CGM_BASE 0x571AD80ull
+#define NIC6_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC6_QM0_CGM_SECTION 0x1000
+#define mmNIC6_QM0_SPECIAL_BASE 0x571AE80ull
+#define NIC6_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC6_QPC0_BASE 0x571F000ull
+#define NIC6_QPC0_MAX_OFFSET 0x1000
+#define NIC6_QPC0_SECTION 0x7200
+#define mmNIC6_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x571F720ull
+#define NIC6_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x571F728ull
+#define NIC6_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x571F730ull
+#define NIC6_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x571F738ull
+#define NIC6_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x571F740ull
+#define NIC6_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x571F748ull
+#define NIC6_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x571F750ull
+#define NIC6_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x571F758ull
+#define NIC6_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x571F760ull
+#define NIC6_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x571F768ull
+#define NIC6_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x571F770ull
+#define NIC6_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x571F778ull
+#define NIC6_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x571F780ull
+#define NIC6_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x571F788ull
+#define NIC6_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x571F790ull
+#define NIC6_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x571F798ull
+#define NIC6_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x571F7A0ull
+#define NIC6_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x571F7A8ull
+#define NIC6_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x571F7B0ull
+#define NIC6_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x571F7B8ull
+#define NIC6_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x571F7C0ull
+#define NIC6_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x571F7C8ull
+#define NIC6_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x571F7D0ull
+#define NIC6_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x571F7D8ull
+#define NIC6_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x571F7E0ull
+#define NIC6_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x571F7E8ull
+#define NIC6_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x571F7F0ull
+#define NIC6_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x571F7F8ull
+#define NIC6_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x571F800ull
+#define NIC6_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x571F808ull
+#define NIC6_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x571F810ull
+#define NIC6_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x571F818ull
+#define NIC6_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC6_QPC0_AXUSER_CONG_QUE_BASE 0x571FB80ull
+#define NIC6_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC6_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC6_QPC0_AXUSER_RXWQE_BASE 0x571FBE0ull
+#define NIC6_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC6_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC6_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x571FC40ull
+#define NIC6_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC6_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC6_QPC0_AXUSER_DB_FIFO_BASE 0x571FCA0ull
+#define NIC6_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC6_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC6_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x571FD00ull
+#define NIC6_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC6_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC6_QPC0_AXUSER_ERR_FIFO_BASE 0x571FD60ull
+#define NIC6_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC6_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC6_QPC0_AXUSER_QPC_RESP_BASE 0x571FDC0ull
+#define NIC6_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC6_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC6_QPC0_AXUSER_QPC_REQ_BASE 0x571FE20ull
+#define NIC6_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC6_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC6_QPC0_SPECIAL_BASE 0x571FE80ull
+#define NIC6_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_0_UNSECURE_DOORBELL0_BASE 0x5720000ull
+#define NIC6_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_0_UNSECURE_DOORBELL1_BASE 0x5720080ull
+#define NIC6_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x5720100ull
+#define NIC6_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x5720180ull
+#define NIC6_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_0_SPECIAL_BASE 0x5720E80ull
+#define NIC6_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_1_UNSECURE_DOORBELL0_BASE 0x5721000ull
+#define NIC6_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_1_UNSECURE_DOORBELL1_BASE 0x5721080ull
+#define NIC6_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x5721100ull
+#define NIC6_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x5721180ull
+#define NIC6_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_1_SPECIAL_BASE 0x5721E80ull
+#define NIC6_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_2_UNSECURE_DOORBELL0_BASE 0x5722000ull
+#define NIC6_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_2_UNSECURE_DOORBELL1_BASE 0x5722080ull
+#define NIC6_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x5722100ull
+#define NIC6_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x5722180ull
+#define NIC6_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_2_SPECIAL_BASE 0x5722E80ull
+#define NIC6_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_3_UNSECURE_DOORBELL0_BASE 0x5723000ull
+#define NIC6_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_3_UNSECURE_DOORBELL1_BASE 0x5723080ull
+#define NIC6_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x5723100ull
+#define NIC6_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x5723180ull
+#define NIC6_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_3_SPECIAL_BASE 0x5723E80ull
+#define NIC6_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_4_UNSECURE_DOORBELL0_BASE 0x5724000ull
+#define NIC6_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_4_UNSECURE_DOORBELL1_BASE 0x5724080ull
+#define NIC6_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x5724100ull
+#define NIC6_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x5724180ull
+#define NIC6_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_4_SPECIAL_BASE 0x5724E80ull
+#define NIC6_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_5_UNSECURE_DOORBELL0_BASE 0x5725000ull
+#define NIC6_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_5_UNSECURE_DOORBELL1_BASE 0x5725080ull
+#define NIC6_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x5725100ull
+#define NIC6_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x5725180ull
+#define NIC6_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_5_SPECIAL_BASE 0x5725E80ull
+#define NIC6_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_6_UNSECURE_DOORBELL0_BASE 0x5726000ull
+#define NIC6_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_6_UNSECURE_DOORBELL1_BASE 0x5726080ull
+#define NIC6_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x5726100ull
+#define NIC6_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x5726180ull
+#define NIC6_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_6_SPECIAL_BASE 0x5726E80ull
+#define NIC6_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_7_UNSECURE_DOORBELL0_BASE 0x5727000ull
+#define NIC6_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_7_UNSECURE_DOORBELL1_BASE 0x5727080ull
+#define NIC6_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x5727100ull
+#define NIC6_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x5727180ull
+#define NIC6_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_7_SPECIAL_BASE 0x5727E80ull
+#define NIC6_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_8_UNSECURE_DOORBELL0_BASE 0x5728000ull
+#define NIC6_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_8_UNSECURE_DOORBELL1_BASE 0x5728080ull
+#define NIC6_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x5728100ull
+#define NIC6_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x5728180ull
+#define NIC6_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_8_SPECIAL_BASE 0x5728E80ull
+#define NIC6_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_9_UNSECURE_DOORBELL0_BASE 0x5729000ull
+#define NIC6_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_9_UNSECURE_DOORBELL1_BASE 0x5729080ull
+#define NIC6_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x5729100ull
+#define NIC6_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x5729180ull
+#define NIC6_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_9_SPECIAL_BASE 0x5729E80ull
+#define NIC6_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_10_UNSECURE_DOORBELL0_BASE 0x572A000ull
+#define NIC6_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_10_UNSECURE_DOORBELL1_BASE 0x572A080ull
+#define NIC6_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x572A100ull
+#define NIC6_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x572A180ull
+#define NIC6_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_10_SPECIAL_BASE 0x572AE80ull
+#define NIC6_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_11_UNSECURE_DOORBELL0_BASE 0x572B000ull
+#define NIC6_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_11_UNSECURE_DOORBELL1_BASE 0x572B080ull
+#define NIC6_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x572B100ull
+#define NIC6_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x572B180ull
+#define NIC6_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_11_SPECIAL_BASE 0x572BE80ull
+#define NIC6_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_12_UNSECURE_DOORBELL0_BASE 0x572C000ull
+#define NIC6_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_12_UNSECURE_DOORBELL1_BASE 0x572C080ull
+#define NIC6_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x572C100ull
+#define NIC6_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x572C180ull
+#define NIC6_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_12_SPECIAL_BASE 0x572CE80ull
+#define NIC6_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_13_UNSECURE_DOORBELL0_BASE 0x572D000ull
+#define NIC6_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_13_UNSECURE_DOORBELL1_BASE 0x572D080ull
+#define NIC6_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x572D100ull
+#define NIC6_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x572D180ull
+#define NIC6_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_13_SPECIAL_BASE 0x572DE80ull
+#define NIC6_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC6_UMR1_14_UNSECURE_DOORBELL0_BASE 0x572E000ull
+#define NIC6_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC6_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC6_UMR1_14_UNSECURE_DOORBELL1_BASE 0x572E080ull
+#define NIC6_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC6_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC6_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x572E100ull
+#define NIC6_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC6_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC6_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x572E180ull
+#define NIC6_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC6_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC6_UMR1_14_SPECIAL_BASE 0x572EE80ull
+#define NIC6_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC6_QM_DCCM1_BASE 0x5730000ull
+#define NIC6_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC6_QM_DCCM1_SECTION 0x8000
+#define mmNIC6_QM_ARC_AUX1_BASE 0x5738000ull
+#define NIC6_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC6_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC6_QM_ARC_AUX1_SPECIAL_BASE 0x5738E80ull
+#define NIC6_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC6_QM1_BASE 0x573A000ull
+#define NIC6_QM1_MAX_OFFSET 0x1000
+#define NIC6_QM1_SECTION 0x9000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x573A900ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x573A908ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x573A910ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x573A918ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x573A920ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x573A928ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x573A930ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x573A938ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x573A940ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x573A948ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x573A950ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x573A958ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x573A960ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x573A968ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x573A970ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC6_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x573A978ull
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC6_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC6_QM1_AXUSER_SECURED_BASE 0x573AB00ull
+#define NIC6_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC6_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC6_QM1_AXUSER_NONSECURED_BASE 0x573AB80ull
+#define NIC6_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC6_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC6_QM1_DBG_HBW_BASE 0x573AC00ull
+#define NIC6_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC6_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC6_QM1_DBG_LBW_BASE 0x573AC80ull
+#define NIC6_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC6_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC6_QM1_CGM_BASE 0x573AD80ull
+#define NIC6_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC6_QM1_CGM_SECTION 0x1000
+#define mmNIC6_QM1_SPECIAL_BASE 0x573AE80ull
+#define NIC6_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC6_QPC1_BASE 0x573F000ull
+#define NIC6_QPC1_MAX_OFFSET 0x1000
+#define NIC6_QPC1_SECTION 0x7200
+#define mmNIC6_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x573F720ull
+#define NIC6_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x573F728ull
+#define NIC6_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x573F730ull
+#define NIC6_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x573F738ull
+#define NIC6_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x573F740ull
+#define NIC6_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x573F748ull
+#define NIC6_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x573F750ull
+#define NIC6_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x573F758ull
+#define NIC6_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x573F760ull
+#define NIC6_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x573F768ull
+#define NIC6_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x573F770ull
+#define NIC6_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x573F778ull
+#define NIC6_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x573F780ull
+#define NIC6_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x573F788ull
+#define NIC6_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x573F790ull
+#define NIC6_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x573F798ull
+#define NIC6_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x573F7A0ull
+#define NIC6_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x573F7A8ull
+#define NIC6_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x573F7B0ull
+#define NIC6_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x573F7B8ull
+#define NIC6_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x573F7C0ull
+#define NIC6_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x573F7C8ull
+#define NIC6_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x573F7D0ull
+#define NIC6_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x573F7D8ull
+#define NIC6_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x573F7E0ull
+#define NIC6_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x573F7E8ull
+#define NIC6_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x573F7F0ull
+#define NIC6_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x573F7F8ull
+#define NIC6_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x573F800ull
+#define NIC6_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x573F808ull
+#define NIC6_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x573F810ull
+#define NIC6_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC6_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x573F818ull
+#define NIC6_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC6_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC6_QPC1_AXUSER_CONG_QUE_BASE 0x573FB80ull
+#define NIC6_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC6_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC6_QPC1_AXUSER_RXWQE_BASE 0x573FBE0ull
+#define NIC6_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC6_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC6_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x573FC40ull
+#define NIC6_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC6_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC6_QPC1_AXUSER_DB_FIFO_BASE 0x573FCA0ull
+#define NIC6_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC6_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC6_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x573FD00ull
+#define NIC6_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC6_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC6_QPC1_AXUSER_ERR_FIFO_BASE 0x573FD60ull
+#define NIC6_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC6_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC6_QPC1_AXUSER_QPC_RESP_BASE 0x573FDC0ull
+#define NIC6_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC6_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC6_QPC1_AXUSER_QPC_REQ_BASE 0x573FE20ull
+#define NIC6_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC6_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC6_QPC1_SPECIAL_BASE 0x573FE80ull
+#define NIC6_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC6_TMR_BASE 0x5748000ull
+#define NIC6_TMR_MAX_OFFSET 0x1000
+#define NIC6_TMR_SECTION 0xD600
+#define mmNIC6_TMR_AXUSER_TMR_FREE_LIST_BASE 0x5748D60ull
+#define NIC6_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC6_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC6_TMR_AXUSER_TMR_FIFO_BASE 0x5748DC0ull
+#define NIC6_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC6_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC6_TMR_AXUSER_TMR_FSM_BASE 0x5748E20ull
+#define NIC6_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC6_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC6_TMR_SPECIAL_BASE 0x5748E80ull
+#define NIC6_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC6_RXB_CORE_BASE 0x5749000ull
+#define NIC6_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC6_RXB_CORE_SECTION 0x6100
+#define mmNIC6_RXB_CORE_SCT_AWUSER_BASE 0x5749610ull
+#define NIC6_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC6_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC6_RXB_CORE_SPECIAL_BASE 0x5749E80ull
+#define NIC6_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC6_RXE0_BASE 0x574A000ull
+#define NIC6_RXE0_MAX_OFFSET 0x1000
+#define NIC6_RXE0_SECTION 0x9000
+#define mmNIC6_RXE0_WQE_ARUSER_BASE 0x574A900ull
+#define NIC6_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC6_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC6_RXE0_SPECIAL_BASE 0x574AE80ull
+#define NIC6_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC6_RXE1_BASE 0x574B000ull
+#define NIC6_RXE1_MAX_OFFSET 0x1000
+#define NIC6_RXE1_SECTION 0x9000
+#define mmNIC6_RXE1_WQE_ARUSER_BASE 0x574B900ull
+#define NIC6_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC6_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC6_RXE1_SPECIAL_BASE 0x574BE80ull
+#define NIC6_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ0_BASE 0x574C000ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ1_BASE 0x574C050ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ2_BASE 0x574C0A0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ3_BASE 0x574C0F0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ4_BASE 0x574C140ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ5_BASE 0x574C190ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ6_BASE 0x574C1E0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ7_BASE 0x574C230ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ8_BASE 0x574C280ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ9_BASE 0x574C2D0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ10_BASE 0x574C320ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ11_BASE 0x574C370ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ12_BASE 0x574C3C0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ13_BASE 0x574C410ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ14_BASE 0x574C460ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ15_BASE 0x574C4B0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ16_BASE 0x574C500ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ17_BASE 0x574C550ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ18_BASE 0x574C5A0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ19_BASE 0x574C5F0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ20_BASE 0x574C640ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ21_BASE 0x574C690ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ22_BASE 0x574C6E0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ23_BASE 0x574C730ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ24_BASE 0x574C780ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ25_BASE 0x574C7D0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ26_BASE 0x574C820ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ27_BASE 0x574C870ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ28_BASE 0x574C8C0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ29_BASE 0x574C910ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ30_BASE 0x574C960ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC6_RXE0_AXUSER_AXUSER_CQ31_BASE 0x574C9B0ull
+#define NIC6_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC6_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC6_RXE0_AXUSER_SPECIAL_BASE 0x574CE80ull
+#define NIC6_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ0_BASE 0x574D000ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ1_BASE 0x574D050ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ2_BASE 0x574D0A0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ3_BASE 0x574D0F0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ4_BASE 0x574D140ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ5_BASE 0x574D190ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ6_BASE 0x574D1E0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ7_BASE 0x574D230ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ8_BASE 0x574D280ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ9_BASE 0x574D2D0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ10_BASE 0x574D320ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ11_BASE 0x574D370ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ12_BASE 0x574D3C0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ13_BASE 0x574D410ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ14_BASE 0x574D460ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ15_BASE 0x574D4B0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ16_BASE 0x574D500ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ17_BASE 0x574D550ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ18_BASE 0x574D5A0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ19_BASE 0x574D5F0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ20_BASE 0x574D640ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ21_BASE 0x574D690ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ22_BASE 0x574D6E0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ23_BASE 0x574D730ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ24_BASE 0x574D780ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ25_BASE 0x574D7D0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ26_BASE 0x574D820ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ27_BASE 0x574D870ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ28_BASE 0x574D8C0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ29_BASE 0x574D910ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ30_BASE 0x574D960ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC6_RXE1_AXUSER_AXUSER_CQ31_BASE 0x574D9B0ull
+#define NIC6_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC6_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC6_RXE1_AXUSER_SPECIAL_BASE 0x574DE80ull
+#define NIC6_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC6_TXS0_BASE 0x5750000ull
+#define NIC6_TXS0_MAX_OFFSET 0x1000
+#define NIC6_TXS0_SECTION 0xE800
+#define mmNIC6_TXS0_SPECIAL_BASE 0x5750E80ull
+#define NIC6_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC6_TXS1_BASE 0x5751000ull
+#define NIC6_TXS1_MAX_OFFSET 0x1000
+#define NIC6_TXS1_SECTION 0xE800
+#define mmNIC6_TXS1_SPECIAL_BASE 0x5751E80ull
+#define NIC6_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC6_TXE0_BASE 0x5752000ull
+#define NIC6_TXE0_MAX_OFFSET 0x1000
+#define NIC6_TXE0_SECTION 0xE800
+#define mmNIC6_TXE0_SPECIAL_BASE 0x5752E80ull
+#define NIC6_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC6_TXE1_BASE 0x5753000ull
+#define NIC6_TXE1_MAX_OFFSET 0x1000
+#define NIC6_TXE1_SECTION 0xE800
+#define mmNIC6_TXE1_SPECIAL_BASE 0x5753E80ull
+#define NIC6_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC6_TXB_BASE 0x5754000ull
+#define NIC6_TXB_MAX_OFFSET 0x1000
+#define NIC6_TXB_SECTION 0xE800
+#define mmNIC6_TXB_SPECIAL_BASE 0x5754E80ull
+#define NIC6_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC6_MSTR_IF_RR_SHRD_HBW_BASE 0x5755000ull
+#define NIC6_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC6_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC6_MSTR_IF_RR_PRVT_HBW_BASE 0x5755200ull
+#define NIC6_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC6_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC6_MSTR_IF_RR_SHRD_LBW_BASE 0x5755400ull
+#define NIC6_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC6_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC6_MSTR_IF_RR_PRVT_LBW_BASE 0x5755600ull
+#define NIC6_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC6_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC6_MSTR_IF_E2E_CRDT_BASE 0x5755800ull
+#define NIC6_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC6_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC6_MSTR_IF_AXUSER_BASE 0x5755A80ull
+#define NIC6_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC6_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC6_MSTR_IF_DBG_HBW_BASE 0x5755B00ull
+#define NIC6_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC6_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC6_MSTR_IF_DBG_LBW_BASE 0x5755B80ull
+#define NIC6_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC6_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC6_MSTR_IF_CORE_HBW_BASE 0x5755C00ull
+#define NIC6_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC6_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC6_MSTR_IF_CORE_LBW_BASE 0x5755D80ull
+#define NIC6_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC6_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC6_MSTR_IF_SPECIAL_BASE 0x5755E80ull
+#define NIC6_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC6_TX_AXUSER_BASE 0x5756000ull
+#define NIC6_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC6_TX_AXUSER_SECTION 0x2000
+#define mmNIC6_SERDES0_BASE 0x5758000ull
+#define NIC6_SERDES0_MAX_OFFSET 0x3E40
+#define NIC6_SERDES0_SECTION 0x4000
+#define mmNIC6_SERDES1_BASE 0x575C000ull
+#define NIC6_SERDES1_MAX_OFFSET 0x3E40
+#define NIC6_SERDES1_SECTION 0x4000
+#define mmNIC6_PHY_BASE 0x5760000ull
+#define NIC6_PHY_MAX_OFFSET 0x1000
+#define NIC6_PHY_SECTION 0xE800
+#define mmNIC6_PHY_SPECIAL_BASE 0x5760E80ull
+#define NIC6_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC6_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT6_MAC_AUX_BASE 0x5768000ull
+#define PRT6_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT6_MAC_AUX_SECTION 0xE800
+#define mmPRT6_MAC_AUX_SPECIAL_BASE 0x5768E80ull
+#define PRT6_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT6_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT6_MAC_CORE_BASE 0x5769000ull
+#define PRT6_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT6_MAC_CORE_SECTION 0xE800
+#define mmPRT6_MAC_CORE_SPECIAL_BASE 0x5769E80ull
+#define PRT6_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT6_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC6_MAC_RS_FEC_BASE 0x576A000ull
+#define NIC6_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC6_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC6_MAC_GLOB_STAT_CONTROL_REG_BASE 0x576B000ull
+#define NIC6_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC6_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC6_MAC_GLOB_STAT_RX0_BASE 0x576B100ull
+#define NIC6_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC6_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC6_MAC_GLOB_STAT_RX1_BASE 0x576B18Cull
+#define NIC6_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC6_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC6_MAC_GLOB_STAT_RX2_BASE 0x576B218ull
+#define NIC6_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC6_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC6_MAC_GLOB_STAT_RX3_BASE 0x576B2A4ull
+#define NIC6_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC6_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC6_MAC_GLOB_STAT_TX0_BASE 0x576B330ull
+#define NIC6_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC6_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC6_MAC_GLOB_STAT_TX1_BASE 0x576B398ull
+#define NIC6_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC6_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC6_MAC_GLOB_STAT_TX2_BASE 0x576B400ull
+#define NIC6_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC6_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC6_MAC_GLOB_STAT_TX3_BASE 0x576B468ull
+#define NIC6_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC6_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC6_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x576B800ull
+#define NIC6_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC6_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC6_MAC_CH0_MAC_PCS_BASE 0x576C000ull
+#define NIC6_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC6_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC6_MAC_CH0_MAC_128_BASE 0x576C400ull
+#define NIC6_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC6_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC6_MAC_CH0_MAC_AN_BASE 0x576C800ull
+#define NIC6_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC6_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC6_MAC_CH1_MAC_PCS_BASE 0x576D000ull
+#define NIC6_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC6_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC6_MAC_CH1_MAC_128_BASE 0x576D400ull
+#define NIC6_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC6_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC6_MAC_CH1_MAC_AN_BASE 0x576D800ull
+#define NIC6_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC6_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC6_MAC_CH2_MAC_PCS_BASE 0x576E000ull
+#define NIC6_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC6_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC6_MAC_CH2_MAC_128_BASE 0x576E400ull
+#define NIC6_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC6_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC6_MAC_CH2_MAC_AN_BASE 0x576E800ull
+#define NIC6_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC6_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC6_MAC_CH3_MAC_PCS_BASE 0x576F000ull
+#define NIC6_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC6_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC6_MAC_CH3_MAC_128_BASE 0x576F400ull
+#define NIC6_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC6_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC6_MAC_CH3_MAC_AN_BASE 0x576F800ull
+#define NIC6_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC6_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC7_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5780000ull
+#define NIC7_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5780080ull
+#define NIC7_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5780100ull
+#define NIC7_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5780180ull
+#define NIC7_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_0_SPECIAL_BASE 0x5780E80ull
+#define NIC7_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5781000ull
+#define NIC7_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5781080ull
+#define NIC7_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5781100ull
+#define NIC7_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5781180ull
+#define NIC7_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_1_SPECIAL_BASE 0x5781E80ull
+#define NIC7_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5782000ull
+#define NIC7_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5782080ull
+#define NIC7_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5782100ull
+#define NIC7_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5782180ull
+#define NIC7_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_2_SPECIAL_BASE 0x5782E80ull
+#define NIC7_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5783000ull
+#define NIC7_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5783080ull
+#define NIC7_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5783100ull
+#define NIC7_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5783180ull
+#define NIC7_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_3_SPECIAL_BASE 0x5783E80ull
+#define NIC7_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5784000ull
+#define NIC7_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5784080ull
+#define NIC7_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5784100ull
+#define NIC7_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5784180ull
+#define NIC7_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_4_SPECIAL_BASE 0x5784E80ull
+#define NIC7_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5785000ull
+#define NIC7_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5785080ull
+#define NIC7_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5785100ull
+#define NIC7_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5785180ull
+#define NIC7_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_5_SPECIAL_BASE 0x5785E80ull
+#define NIC7_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5786000ull
+#define NIC7_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5786080ull
+#define NIC7_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5786100ull
+#define NIC7_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5786180ull
+#define NIC7_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_6_SPECIAL_BASE 0x5786E80ull
+#define NIC7_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5787000ull
+#define NIC7_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5787080ull
+#define NIC7_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5787100ull
+#define NIC7_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5787180ull
+#define NIC7_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_7_SPECIAL_BASE 0x5787E80ull
+#define NIC7_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5788000ull
+#define NIC7_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5788080ull
+#define NIC7_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5788100ull
+#define NIC7_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5788180ull
+#define NIC7_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_8_SPECIAL_BASE 0x5788E80ull
+#define NIC7_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5789000ull
+#define NIC7_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5789080ull
+#define NIC7_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5789100ull
+#define NIC7_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5789180ull
+#define NIC7_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_9_SPECIAL_BASE 0x5789E80ull
+#define NIC7_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_10_UNSECURE_DOORBELL0_BASE 0x578A000ull
+#define NIC7_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_10_UNSECURE_DOORBELL1_BASE 0x578A080ull
+#define NIC7_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x578A100ull
+#define NIC7_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x578A180ull
+#define NIC7_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_10_SPECIAL_BASE 0x578AE80ull
+#define NIC7_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_11_UNSECURE_DOORBELL0_BASE 0x578B000ull
+#define NIC7_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_11_UNSECURE_DOORBELL1_BASE 0x578B080ull
+#define NIC7_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x578B100ull
+#define NIC7_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x578B180ull
+#define NIC7_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_11_SPECIAL_BASE 0x578BE80ull
+#define NIC7_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_12_UNSECURE_DOORBELL0_BASE 0x578C000ull
+#define NIC7_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_12_UNSECURE_DOORBELL1_BASE 0x578C080ull
+#define NIC7_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x578C100ull
+#define NIC7_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x578C180ull
+#define NIC7_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_12_SPECIAL_BASE 0x578CE80ull
+#define NIC7_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_13_UNSECURE_DOORBELL0_BASE 0x578D000ull
+#define NIC7_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_13_UNSECURE_DOORBELL1_BASE 0x578D080ull
+#define NIC7_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x578D100ull
+#define NIC7_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x578D180ull
+#define NIC7_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_13_SPECIAL_BASE 0x578DE80ull
+#define NIC7_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR0_14_UNSECURE_DOORBELL0_BASE 0x578E000ull
+#define NIC7_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR0_14_UNSECURE_DOORBELL1_BASE 0x578E080ull
+#define NIC7_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x578E100ull
+#define NIC7_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x578E180ull
+#define NIC7_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR0_14_SPECIAL_BASE 0x578EE80ull
+#define NIC7_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC7_QM_DCCM0_BASE 0x5790000ull
+#define NIC7_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC7_QM_DCCM0_SECTION 0x8000
+#define mmNIC7_QM_ARC_AUX0_BASE 0x5798000ull
+#define NIC7_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC7_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC7_QM_ARC_AUX0_SPECIAL_BASE 0x5798E80ull
+#define NIC7_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC7_QM0_BASE 0x579A000ull
+#define NIC7_QM0_MAX_OFFSET 0x1000
+#define NIC7_QM0_SECTION 0x9000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x579A900ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x579A908ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x579A910ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x579A918ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x579A920ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x579A928ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x579A930ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x579A938ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x579A940ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x579A948ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x579A950ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x579A958ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x579A960ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x579A968ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x579A970ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC7_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x579A978ull
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC7_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC7_QM0_AXUSER_SECURED_BASE 0x579AB00ull
+#define NIC7_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC7_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC7_QM0_AXUSER_NONSECURED_BASE 0x579AB80ull
+#define NIC7_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC7_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC7_QM0_DBG_HBW_BASE 0x579AC00ull
+#define NIC7_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC7_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC7_QM0_DBG_LBW_BASE 0x579AC80ull
+#define NIC7_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC7_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC7_QM0_CGM_BASE 0x579AD80ull
+#define NIC7_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC7_QM0_CGM_SECTION 0x1000
+#define mmNIC7_QM0_SPECIAL_BASE 0x579AE80ull
+#define NIC7_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC7_QPC0_BASE 0x579F000ull
+#define NIC7_QPC0_MAX_OFFSET 0x1000
+#define NIC7_QPC0_SECTION 0x7200
+#define mmNIC7_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x579F720ull
+#define NIC7_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x579F728ull
+#define NIC7_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x579F730ull
+#define NIC7_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x579F738ull
+#define NIC7_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x579F740ull
+#define NIC7_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x579F748ull
+#define NIC7_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x579F750ull
+#define NIC7_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x579F758ull
+#define NIC7_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x579F760ull
+#define NIC7_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x579F768ull
+#define NIC7_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x579F770ull
+#define NIC7_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x579F778ull
+#define NIC7_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x579F780ull
+#define NIC7_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x579F788ull
+#define NIC7_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x579F790ull
+#define NIC7_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x579F798ull
+#define NIC7_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x579F7A0ull
+#define NIC7_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x579F7A8ull
+#define NIC7_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x579F7B0ull
+#define NIC7_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x579F7B8ull
+#define NIC7_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x579F7C0ull
+#define NIC7_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x579F7C8ull
+#define NIC7_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x579F7D0ull
+#define NIC7_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x579F7D8ull
+#define NIC7_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x579F7E0ull
+#define NIC7_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x579F7E8ull
+#define NIC7_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x579F7F0ull
+#define NIC7_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x579F7F8ull
+#define NIC7_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x579F800ull
+#define NIC7_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x579F808ull
+#define NIC7_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x579F810ull
+#define NIC7_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x579F818ull
+#define NIC7_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC7_QPC0_AXUSER_CONG_QUE_BASE 0x579FB80ull
+#define NIC7_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC7_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC7_QPC0_AXUSER_RXWQE_BASE 0x579FBE0ull
+#define NIC7_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC7_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC7_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x579FC40ull
+#define NIC7_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC7_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC7_QPC0_AXUSER_DB_FIFO_BASE 0x579FCA0ull
+#define NIC7_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC7_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC7_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x579FD00ull
+#define NIC7_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC7_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC7_QPC0_AXUSER_ERR_FIFO_BASE 0x579FD60ull
+#define NIC7_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC7_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC7_QPC0_AXUSER_QPC_RESP_BASE 0x579FDC0ull
+#define NIC7_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC7_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC7_QPC0_AXUSER_QPC_REQ_BASE 0x579FE20ull
+#define NIC7_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC7_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC7_QPC0_SPECIAL_BASE 0x579FE80ull
+#define NIC7_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_0_UNSECURE_DOORBELL0_BASE 0x57A0000ull
+#define NIC7_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_0_UNSECURE_DOORBELL1_BASE 0x57A0080ull
+#define NIC7_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x57A0100ull
+#define NIC7_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x57A0180ull
+#define NIC7_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_0_SPECIAL_BASE 0x57A0E80ull
+#define NIC7_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_1_UNSECURE_DOORBELL0_BASE 0x57A1000ull
+#define NIC7_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_1_UNSECURE_DOORBELL1_BASE 0x57A1080ull
+#define NIC7_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x57A1100ull
+#define NIC7_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x57A1180ull
+#define NIC7_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_1_SPECIAL_BASE 0x57A1E80ull
+#define NIC7_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_2_UNSECURE_DOORBELL0_BASE 0x57A2000ull
+#define NIC7_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_2_UNSECURE_DOORBELL1_BASE 0x57A2080ull
+#define NIC7_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x57A2100ull
+#define NIC7_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x57A2180ull
+#define NIC7_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_2_SPECIAL_BASE 0x57A2E80ull
+#define NIC7_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_3_UNSECURE_DOORBELL0_BASE 0x57A3000ull
+#define NIC7_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_3_UNSECURE_DOORBELL1_BASE 0x57A3080ull
+#define NIC7_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x57A3100ull
+#define NIC7_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x57A3180ull
+#define NIC7_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_3_SPECIAL_BASE 0x57A3E80ull
+#define NIC7_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_4_UNSECURE_DOORBELL0_BASE 0x57A4000ull
+#define NIC7_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_4_UNSECURE_DOORBELL1_BASE 0x57A4080ull
+#define NIC7_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x57A4100ull
+#define NIC7_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x57A4180ull
+#define NIC7_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_4_SPECIAL_BASE 0x57A4E80ull
+#define NIC7_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_5_UNSECURE_DOORBELL0_BASE 0x57A5000ull
+#define NIC7_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_5_UNSECURE_DOORBELL1_BASE 0x57A5080ull
+#define NIC7_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x57A5100ull
+#define NIC7_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x57A5180ull
+#define NIC7_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_5_SPECIAL_BASE 0x57A5E80ull
+#define NIC7_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_6_UNSECURE_DOORBELL0_BASE 0x57A6000ull
+#define NIC7_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_6_UNSECURE_DOORBELL1_BASE 0x57A6080ull
+#define NIC7_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x57A6100ull
+#define NIC7_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x57A6180ull
+#define NIC7_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_6_SPECIAL_BASE 0x57A6E80ull
+#define NIC7_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_7_UNSECURE_DOORBELL0_BASE 0x57A7000ull
+#define NIC7_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_7_UNSECURE_DOORBELL1_BASE 0x57A7080ull
+#define NIC7_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x57A7100ull
+#define NIC7_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x57A7180ull
+#define NIC7_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_7_SPECIAL_BASE 0x57A7E80ull
+#define NIC7_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_8_UNSECURE_DOORBELL0_BASE 0x57A8000ull
+#define NIC7_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_8_UNSECURE_DOORBELL1_BASE 0x57A8080ull
+#define NIC7_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x57A8100ull
+#define NIC7_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x57A8180ull
+#define NIC7_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_8_SPECIAL_BASE 0x57A8E80ull
+#define NIC7_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_9_UNSECURE_DOORBELL0_BASE 0x57A9000ull
+#define NIC7_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_9_UNSECURE_DOORBELL1_BASE 0x57A9080ull
+#define NIC7_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x57A9100ull
+#define NIC7_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x57A9180ull
+#define NIC7_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_9_SPECIAL_BASE 0x57A9E80ull
+#define NIC7_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_10_UNSECURE_DOORBELL0_BASE 0x57AA000ull
+#define NIC7_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_10_UNSECURE_DOORBELL1_BASE 0x57AA080ull
+#define NIC7_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x57AA100ull
+#define NIC7_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x57AA180ull
+#define NIC7_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_10_SPECIAL_BASE 0x57AAE80ull
+#define NIC7_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_11_UNSECURE_DOORBELL0_BASE 0x57AB000ull
+#define NIC7_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_11_UNSECURE_DOORBELL1_BASE 0x57AB080ull
+#define NIC7_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x57AB100ull
+#define NIC7_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x57AB180ull
+#define NIC7_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_11_SPECIAL_BASE 0x57ABE80ull
+#define NIC7_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_12_UNSECURE_DOORBELL0_BASE 0x57AC000ull
+#define NIC7_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_12_UNSECURE_DOORBELL1_BASE 0x57AC080ull
+#define NIC7_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x57AC100ull
+#define NIC7_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x57AC180ull
+#define NIC7_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_12_SPECIAL_BASE 0x57ACE80ull
+#define NIC7_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_13_UNSECURE_DOORBELL0_BASE 0x57AD000ull
+#define NIC7_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_13_UNSECURE_DOORBELL1_BASE 0x57AD080ull
+#define NIC7_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x57AD100ull
+#define NIC7_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x57AD180ull
+#define NIC7_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_13_SPECIAL_BASE 0x57ADE80ull
+#define NIC7_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC7_UMR1_14_UNSECURE_DOORBELL0_BASE 0x57AE000ull
+#define NIC7_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC7_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC7_UMR1_14_UNSECURE_DOORBELL1_BASE 0x57AE080ull
+#define NIC7_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC7_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC7_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x57AE100ull
+#define NIC7_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC7_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC7_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x57AE180ull
+#define NIC7_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC7_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC7_UMR1_14_SPECIAL_BASE 0x57AEE80ull
+#define NIC7_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC7_QM_DCCM1_BASE 0x57B0000ull
+#define NIC7_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC7_QM_DCCM1_SECTION 0x8000
+#define mmNIC7_QM_ARC_AUX1_BASE 0x57B8000ull
+#define NIC7_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC7_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC7_QM_ARC_AUX1_SPECIAL_BASE 0x57B8E80ull
+#define NIC7_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC7_QM1_BASE 0x57BA000ull
+#define NIC7_QM1_MAX_OFFSET 0x1000
+#define NIC7_QM1_SECTION 0x9000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x57BA900ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x57BA908ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x57BA910ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x57BA918ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x57BA920ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x57BA928ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x57BA930ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x57BA938ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x57BA940ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x57BA948ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x57BA950ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x57BA958ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x57BA960ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x57BA968ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x57BA970ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC7_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x57BA978ull
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC7_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC7_QM1_AXUSER_SECURED_BASE 0x57BAB00ull
+#define NIC7_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC7_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC7_QM1_AXUSER_NONSECURED_BASE 0x57BAB80ull
+#define NIC7_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC7_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC7_QM1_DBG_HBW_BASE 0x57BAC00ull
+#define NIC7_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC7_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC7_QM1_DBG_LBW_BASE 0x57BAC80ull
+#define NIC7_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC7_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC7_QM1_CGM_BASE 0x57BAD80ull
+#define NIC7_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC7_QM1_CGM_SECTION 0x1000
+#define mmNIC7_QM1_SPECIAL_BASE 0x57BAE80ull
+#define NIC7_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC7_QPC1_BASE 0x57BF000ull
+#define NIC7_QPC1_MAX_OFFSET 0x1000
+#define NIC7_QPC1_SECTION 0x7200
+#define mmNIC7_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x57BF720ull
+#define NIC7_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x57BF728ull
+#define NIC7_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x57BF730ull
+#define NIC7_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x57BF738ull
+#define NIC7_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x57BF740ull
+#define NIC7_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x57BF748ull
+#define NIC7_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x57BF750ull
+#define NIC7_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x57BF758ull
+#define NIC7_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x57BF760ull
+#define NIC7_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x57BF768ull
+#define NIC7_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x57BF770ull
+#define NIC7_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x57BF778ull
+#define NIC7_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x57BF780ull
+#define NIC7_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x57BF788ull
+#define NIC7_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x57BF790ull
+#define NIC7_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x57BF798ull
+#define NIC7_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x57BF7A0ull
+#define NIC7_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x57BF7A8ull
+#define NIC7_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x57BF7B0ull
+#define NIC7_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x57BF7B8ull
+#define NIC7_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x57BF7C0ull
+#define NIC7_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x57BF7C8ull
+#define NIC7_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x57BF7D0ull
+#define NIC7_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x57BF7D8ull
+#define NIC7_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x57BF7E0ull
+#define NIC7_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x57BF7E8ull
+#define NIC7_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x57BF7F0ull
+#define NIC7_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x57BF7F8ull
+#define NIC7_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x57BF800ull
+#define NIC7_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x57BF808ull
+#define NIC7_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x57BF810ull
+#define NIC7_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC7_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x57BF818ull
+#define NIC7_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC7_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC7_QPC1_AXUSER_CONG_QUE_BASE 0x57BFB80ull
+#define NIC7_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC7_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC7_QPC1_AXUSER_RXWQE_BASE 0x57BFBE0ull
+#define NIC7_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC7_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC7_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x57BFC40ull
+#define NIC7_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC7_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC7_QPC1_AXUSER_DB_FIFO_BASE 0x57BFCA0ull
+#define NIC7_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC7_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC7_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x57BFD00ull
+#define NIC7_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC7_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC7_QPC1_AXUSER_ERR_FIFO_BASE 0x57BFD60ull
+#define NIC7_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC7_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC7_QPC1_AXUSER_QPC_RESP_BASE 0x57BFDC0ull
+#define NIC7_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC7_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC7_QPC1_AXUSER_QPC_REQ_BASE 0x57BFE20ull
+#define NIC7_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC7_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC7_QPC1_SPECIAL_BASE 0x57BFE80ull
+#define NIC7_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC7_TMR_BASE 0x57C8000ull
+#define NIC7_TMR_MAX_OFFSET 0x1000
+#define NIC7_TMR_SECTION 0xD600
+#define mmNIC7_TMR_AXUSER_TMR_FREE_LIST_BASE 0x57C8D60ull
+#define NIC7_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC7_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC7_TMR_AXUSER_TMR_FIFO_BASE 0x57C8DC0ull
+#define NIC7_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC7_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC7_TMR_AXUSER_TMR_FSM_BASE 0x57C8E20ull
+#define NIC7_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC7_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC7_TMR_SPECIAL_BASE 0x57C8E80ull
+#define NIC7_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC7_RXB_CORE_BASE 0x57C9000ull
+#define NIC7_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC7_RXB_CORE_SECTION 0x6100
+#define mmNIC7_RXB_CORE_SCT_AWUSER_BASE 0x57C9610ull
+#define NIC7_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC7_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC7_RXB_CORE_SPECIAL_BASE 0x57C9E80ull
+#define NIC7_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC7_RXE0_BASE 0x57CA000ull
+#define NIC7_RXE0_MAX_OFFSET 0x1000
+#define NIC7_RXE0_SECTION 0x9000
+#define mmNIC7_RXE0_WQE_ARUSER_BASE 0x57CA900ull
+#define NIC7_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC7_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC7_RXE0_SPECIAL_BASE 0x57CAE80ull
+#define NIC7_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC7_RXE1_BASE 0x57CB000ull
+#define NIC7_RXE1_MAX_OFFSET 0x1000
+#define NIC7_RXE1_SECTION 0x9000
+#define mmNIC7_RXE1_WQE_ARUSER_BASE 0x57CB900ull
+#define NIC7_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC7_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC7_RXE1_SPECIAL_BASE 0x57CBE80ull
+#define NIC7_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ0_BASE 0x57CC000ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ1_BASE 0x57CC050ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ2_BASE 0x57CC0A0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ3_BASE 0x57CC0F0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ4_BASE 0x57CC140ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ5_BASE 0x57CC190ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ6_BASE 0x57CC1E0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ7_BASE 0x57CC230ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ8_BASE 0x57CC280ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ9_BASE 0x57CC2D0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ10_BASE 0x57CC320ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ11_BASE 0x57CC370ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ12_BASE 0x57CC3C0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ13_BASE 0x57CC410ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ14_BASE 0x57CC460ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ15_BASE 0x57CC4B0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ16_BASE 0x57CC500ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ17_BASE 0x57CC550ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ18_BASE 0x57CC5A0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ19_BASE 0x57CC5F0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ20_BASE 0x57CC640ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ21_BASE 0x57CC690ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ22_BASE 0x57CC6E0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ23_BASE 0x57CC730ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ24_BASE 0x57CC780ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ25_BASE 0x57CC7D0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ26_BASE 0x57CC820ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ27_BASE 0x57CC870ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ28_BASE 0x57CC8C0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ29_BASE 0x57CC910ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ30_BASE 0x57CC960ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC7_RXE0_AXUSER_AXUSER_CQ31_BASE 0x57CC9B0ull
+#define NIC7_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC7_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC7_RXE0_AXUSER_SPECIAL_BASE 0x57CCE80ull
+#define NIC7_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ0_BASE 0x57CD000ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ1_BASE 0x57CD050ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ2_BASE 0x57CD0A0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ3_BASE 0x57CD0F0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ4_BASE 0x57CD140ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ5_BASE 0x57CD190ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ6_BASE 0x57CD1E0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ7_BASE 0x57CD230ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ8_BASE 0x57CD280ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ9_BASE 0x57CD2D0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ10_BASE 0x57CD320ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ11_BASE 0x57CD370ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ12_BASE 0x57CD3C0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ13_BASE 0x57CD410ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ14_BASE 0x57CD460ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ15_BASE 0x57CD4B0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ16_BASE 0x57CD500ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ17_BASE 0x57CD550ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ18_BASE 0x57CD5A0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ19_BASE 0x57CD5F0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ20_BASE 0x57CD640ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ21_BASE 0x57CD690ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ22_BASE 0x57CD6E0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ23_BASE 0x57CD730ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ24_BASE 0x57CD780ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ25_BASE 0x57CD7D0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ26_BASE 0x57CD820ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ27_BASE 0x57CD870ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ28_BASE 0x57CD8C0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ29_BASE 0x57CD910ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ30_BASE 0x57CD960ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC7_RXE1_AXUSER_AXUSER_CQ31_BASE 0x57CD9B0ull
+#define NIC7_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC7_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC7_RXE1_AXUSER_SPECIAL_BASE 0x57CDE80ull
+#define NIC7_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC7_TXS0_BASE 0x57D0000ull
+#define NIC7_TXS0_MAX_OFFSET 0x1000
+#define NIC7_TXS0_SECTION 0xE800
+#define mmNIC7_TXS0_SPECIAL_BASE 0x57D0E80ull
+#define NIC7_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC7_TXS1_BASE 0x57D1000ull
+#define NIC7_TXS1_MAX_OFFSET 0x1000
+#define NIC7_TXS1_SECTION 0xE800
+#define mmNIC7_TXS1_SPECIAL_BASE 0x57D1E80ull
+#define NIC7_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC7_TXE0_BASE 0x57D2000ull
+#define NIC7_TXE0_MAX_OFFSET 0x1000
+#define NIC7_TXE0_SECTION 0xE800
+#define mmNIC7_TXE0_SPECIAL_BASE 0x57D2E80ull
+#define NIC7_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC7_TXE1_BASE 0x57D3000ull
+#define NIC7_TXE1_MAX_OFFSET 0x1000
+#define NIC7_TXE1_SECTION 0xE800
+#define mmNIC7_TXE1_SPECIAL_BASE 0x57D3E80ull
+#define NIC7_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC7_TXB_BASE 0x57D4000ull
+#define NIC7_TXB_MAX_OFFSET 0x1000
+#define NIC7_TXB_SECTION 0xE800
+#define mmNIC7_TXB_SPECIAL_BASE 0x57D4E80ull
+#define NIC7_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC7_MSTR_IF_RR_SHRD_HBW_BASE 0x57D5000ull
+#define NIC7_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC7_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC7_MSTR_IF_RR_PRVT_HBW_BASE 0x57D5200ull
+#define NIC7_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC7_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC7_MSTR_IF_RR_SHRD_LBW_BASE 0x57D5400ull
+#define NIC7_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC7_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC7_MSTR_IF_RR_PRVT_LBW_BASE 0x57D5600ull
+#define NIC7_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC7_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC7_MSTR_IF_E2E_CRDT_BASE 0x57D5800ull
+#define NIC7_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC7_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC7_MSTR_IF_AXUSER_BASE 0x57D5A80ull
+#define NIC7_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC7_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC7_MSTR_IF_DBG_HBW_BASE 0x57D5B00ull
+#define NIC7_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC7_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC7_MSTR_IF_DBG_LBW_BASE 0x57D5B80ull
+#define NIC7_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC7_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC7_MSTR_IF_CORE_HBW_BASE 0x57D5C00ull
+#define NIC7_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC7_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC7_MSTR_IF_CORE_LBW_BASE 0x57D5D80ull
+#define NIC7_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC7_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC7_MSTR_IF_SPECIAL_BASE 0x57D5E80ull
+#define NIC7_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC7_TX_AXUSER_BASE 0x57D6000ull
+#define NIC7_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC7_TX_AXUSER_SECTION 0x2000
+#define mmNIC7_SERDES0_BASE 0x57D8000ull
+#define NIC7_SERDES0_MAX_OFFSET 0x3E40
+#define NIC7_SERDES0_SECTION 0x4000
+#define mmNIC7_SERDES1_BASE 0x57DC000ull
+#define NIC7_SERDES1_MAX_OFFSET 0x3E40
+#define NIC7_SERDES1_SECTION 0x4000
+#define mmNIC7_PHY_BASE 0x57E0000ull
+#define NIC7_PHY_MAX_OFFSET 0x1000
+#define NIC7_PHY_SECTION 0xE800
+#define mmNIC7_PHY_SPECIAL_BASE 0x57E0E80ull
+#define NIC7_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC7_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT7_MAC_AUX_BASE 0x57E8000ull
+#define PRT7_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT7_MAC_AUX_SECTION 0xE800
+#define mmPRT7_MAC_AUX_SPECIAL_BASE 0x57E8E80ull
+#define PRT7_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT7_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT7_MAC_CORE_BASE 0x57E9000ull
+#define PRT7_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT7_MAC_CORE_SECTION 0xE800
+#define mmPRT7_MAC_CORE_SPECIAL_BASE 0x57E9E80ull
+#define PRT7_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT7_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC7_MAC_RS_FEC_BASE 0x57EA000ull
+#define NIC7_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC7_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC7_MAC_GLOB_STAT_CONTROL_REG_BASE 0x57EB000ull
+#define NIC7_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC7_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC7_MAC_GLOB_STAT_RX0_BASE 0x57EB100ull
+#define NIC7_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC7_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC7_MAC_GLOB_STAT_RX1_BASE 0x57EB18Cull
+#define NIC7_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC7_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC7_MAC_GLOB_STAT_RX2_BASE 0x57EB218ull
+#define NIC7_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC7_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC7_MAC_GLOB_STAT_RX3_BASE 0x57EB2A4ull
+#define NIC7_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC7_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC7_MAC_GLOB_STAT_TX0_BASE 0x57EB330ull
+#define NIC7_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC7_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC7_MAC_GLOB_STAT_TX1_BASE 0x57EB398ull
+#define NIC7_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC7_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC7_MAC_GLOB_STAT_TX2_BASE 0x57EB400ull
+#define NIC7_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC7_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC7_MAC_GLOB_STAT_TX3_BASE 0x57EB468ull
+#define NIC7_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC7_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC7_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x57EB800ull
+#define NIC7_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC7_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC7_MAC_CH0_MAC_PCS_BASE 0x57EC000ull
+#define NIC7_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC7_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC7_MAC_CH0_MAC_128_BASE 0x57EC400ull
+#define NIC7_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC7_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC7_MAC_CH0_MAC_AN_BASE 0x57EC800ull
+#define NIC7_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC7_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC7_MAC_CH1_MAC_PCS_BASE 0x57ED000ull
+#define NIC7_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC7_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC7_MAC_CH1_MAC_128_BASE 0x57ED400ull
+#define NIC7_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC7_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC7_MAC_CH1_MAC_AN_BASE 0x57ED800ull
+#define NIC7_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC7_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC7_MAC_CH2_MAC_PCS_BASE 0x57EE000ull
+#define NIC7_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC7_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC7_MAC_CH2_MAC_128_BASE 0x57EE400ull
+#define NIC7_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC7_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC7_MAC_CH2_MAC_AN_BASE 0x57EE800ull
+#define NIC7_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC7_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC7_MAC_CH3_MAC_PCS_BASE 0x57EF000ull
+#define NIC7_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC7_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC7_MAC_CH3_MAC_128_BASE 0x57EF400ull
+#define NIC7_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC7_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC7_MAC_CH3_MAC_AN_BASE 0x57EF800ull
+#define NIC7_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC7_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC8_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5800000ull
+#define NIC8_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5800080ull
+#define NIC8_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5800100ull
+#define NIC8_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5800180ull
+#define NIC8_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_0_SPECIAL_BASE 0x5800E80ull
+#define NIC8_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5801000ull
+#define NIC8_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5801080ull
+#define NIC8_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5801100ull
+#define NIC8_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5801180ull
+#define NIC8_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_1_SPECIAL_BASE 0x5801E80ull
+#define NIC8_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5802000ull
+#define NIC8_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5802080ull
+#define NIC8_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5802100ull
+#define NIC8_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5802180ull
+#define NIC8_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_2_SPECIAL_BASE 0x5802E80ull
+#define NIC8_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5803000ull
+#define NIC8_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5803080ull
+#define NIC8_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5803100ull
+#define NIC8_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5803180ull
+#define NIC8_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_3_SPECIAL_BASE 0x5803E80ull
+#define NIC8_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5804000ull
+#define NIC8_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5804080ull
+#define NIC8_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5804100ull
+#define NIC8_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5804180ull
+#define NIC8_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_4_SPECIAL_BASE 0x5804E80ull
+#define NIC8_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5805000ull
+#define NIC8_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5805080ull
+#define NIC8_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5805100ull
+#define NIC8_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5805180ull
+#define NIC8_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_5_SPECIAL_BASE 0x5805E80ull
+#define NIC8_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5806000ull
+#define NIC8_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5806080ull
+#define NIC8_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5806100ull
+#define NIC8_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5806180ull
+#define NIC8_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_6_SPECIAL_BASE 0x5806E80ull
+#define NIC8_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5807000ull
+#define NIC8_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5807080ull
+#define NIC8_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5807100ull
+#define NIC8_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5807180ull
+#define NIC8_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_7_SPECIAL_BASE 0x5807E80ull
+#define NIC8_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5808000ull
+#define NIC8_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5808080ull
+#define NIC8_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5808100ull
+#define NIC8_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5808180ull
+#define NIC8_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_8_SPECIAL_BASE 0x5808E80ull
+#define NIC8_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5809000ull
+#define NIC8_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5809080ull
+#define NIC8_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5809100ull
+#define NIC8_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5809180ull
+#define NIC8_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_9_SPECIAL_BASE 0x5809E80ull
+#define NIC8_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_10_UNSECURE_DOORBELL0_BASE 0x580A000ull
+#define NIC8_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_10_UNSECURE_DOORBELL1_BASE 0x580A080ull
+#define NIC8_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x580A100ull
+#define NIC8_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x580A180ull
+#define NIC8_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_10_SPECIAL_BASE 0x580AE80ull
+#define NIC8_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_11_UNSECURE_DOORBELL0_BASE 0x580B000ull
+#define NIC8_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_11_UNSECURE_DOORBELL1_BASE 0x580B080ull
+#define NIC8_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x580B100ull
+#define NIC8_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x580B180ull
+#define NIC8_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_11_SPECIAL_BASE 0x580BE80ull
+#define NIC8_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_12_UNSECURE_DOORBELL0_BASE 0x580C000ull
+#define NIC8_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_12_UNSECURE_DOORBELL1_BASE 0x580C080ull
+#define NIC8_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x580C100ull
+#define NIC8_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x580C180ull
+#define NIC8_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_12_SPECIAL_BASE 0x580CE80ull
+#define NIC8_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_13_UNSECURE_DOORBELL0_BASE 0x580D000ull
+#define NIC8_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_13_UNSECURE_DOORBELL1_BASE 0x580D080ull
+#define NIC8_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x580D100ull
+#define NIC8_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x580D180ull
+#define NIC8_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_13_SPECIAL_BASE 0x580DE80ull
+#define NIC8_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR0_14_UNSECURE_DOORBELL0_BASE 0x580E000ull
+#define NIC8_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR0_14_UNSECURE_DOORBELL1_BASE 0x580E080ull
+#define NIC8_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x580E100ull
+#define NIC8_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x580E180ull
+#define NIC8_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR0_14_SPECIAL_BASE 0x580EE80ull
+#define NIC8_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC8_QM_DCCM0_BASE 0x5810000ull
+#define NIC8_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC8_QM_DCCM0_SECTION 0x8000
+#define mmNIC8_QM_ARC_AUX0_BASE 0x5818000ull
+#define NIC8_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC8_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC8_QM_ARC_AUX0_SPECIAL_BASE 0x5818E80ull
+#define NIC8_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC8_QM0_BASE 0x581A000ull
+#define NIC8_QM0_MAX_OFFSET 0x1000
+#define NIC8_QM0_SECTION 0x9000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x581A900ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x581A908ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x581A910ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x581A918ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x581A920ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x581A928ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x581A930ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x581A938ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x581A940ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x581A948ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x581A950ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x581A958ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x581A960ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x581A968ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x581A970ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC8_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x581A978ull
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC8_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC8_QM0_AXUSER_SECURED_BASE 0x581AB00ull
+#define NIC8_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC8_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC8_QM0_AXUSER_NONSECURED_BASE 0x581AB80ull
+#define NIC8_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC8_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC8_QM0_DBG_HBW_BASE 0x581AC00ull
+#define NIC8_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC8_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC8_QM0_DBG_LBW_BASE 0x581AC80ull
+#define NIC8_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC8_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC8_QM0_CGM_BASE 0x581AD80ull
+#define NIC8_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC8_QM0_CGM_SECTION 0x1000
+#define mmNIC8_QM0_SPECIAL_BASE 0x581AE80ull
+#define NIC8_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC8_QPC0_BASE 0x581F000ull
+#define NIC8_QPC0_MAX_OFFSET 0x1000
+#define NIC8_QPC0_SECTION 0x7200
+#define mmNIC8_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x581F720ull
+#define NIC8_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x581F728ull
+#define NIC8_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x581F730ull
+#define NIC8_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x581F738ull
+#define NIC8_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x581F740ull
+#define NIC8_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x581F748ull
+#define NIC8_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x581F750ull
+#define NIC8_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x581F758ull
+#define NIC8_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x581F760ull
+#define NIC8_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x581F768ull
+#define NIC8_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x581F770ull
+#define NIC8_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x581F778ull
+#define NIC8_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x581F780ull
+#define NIC8_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x581F788ull
+#define NIC8_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x581F790ull
+#define NIC8_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x581F798ull
+#define NIC8_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x581F7A0ull
+#define NIC8_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x581F7A8ull
+#define NIC8_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x581F7B0ull
+#define NIC8_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x581F7B8ull
+#define NIC8_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x581F7C0ull
+#define NIC8_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x581F7C8ull
+#define NIC8_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x581F7D0ull
+#define NIC8_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x581F7D8ull
+#define NIC8_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x581F7E0ull
+#define NIC8_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x581F7E8ull
+#define NIC8_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x581F7F0ull
+#define NIC8_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x581F7F8ull
+#define NIC8_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x581F800ull
+#define NIC8_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x581F808ull
+#define NIC8_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x581F810ull
+#define NIC8_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x581F818ull
+#define NIC8_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC8_QPC0_AXUSER_CONG_QUE_BASE 0x581FB80ull
+#define NIC8_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC8_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC8_QPC0_AXUSER_RXWQE_BASE 0x581FBE0ull
+#define NIC8_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC8_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC8_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x581FC40ull
+#define NIC8_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC8_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC8_QPC0_AXUSER_DB_FIFO_BASE 0x581FCA0ull
+#define NIC8_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC8_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC8_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x581FD00ull
+#define NIC8_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC8_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC8_QPC0_AXUSER_ERR_FIFO_BASE 0x581FD60ull
+#define NIC8_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC8_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC8_QPC0_AXUSER_QPC_RESP_BASE 0x581FDC0ull
+#define NIC8_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC8_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC8_QPC0_AXUSER_QPC_REQ_BASE 0x581FE20ull
+#define NIC8_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC8_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC8_QPC0_SPECIAL_BASE 0x581FE80ull
+#define NIC8_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_0_UNSECURE_DOORBELL0_BASE 0x5820000ull
+#define NIC8_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_0_UNSECURE_DOORBELL1_BASE 0x5820080ull
+#define NIC8_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x5820100ull
+#define NIC8_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x5820180ull
+#define NIC8_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_0_SPECIAL_BASE 0x5820E80ull
+#define NIC8_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_1_UNSECURE_DOORBELL0_BASE 0x5821000ull
+#define NIC8_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_1_UNSECURE_DOORBELL1_BASE 0x5821080ull
+#define NIC8_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x5821100ull
+#define NIC8_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x5821180ull
+#define NIC8_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_1_SPECIAL_BASE 0x5821E80ull
+#define NIC8_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_2_UNSECURE_DOORBELL0_BASE 0x5822000ull
+#define NIC8_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_2_UNSECURE_DOORBELL1_BASE 0x5822080ull
+#define NIC8_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x5822100ull
+#define NIC8_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x5822180ull
+#define NIC8_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_2_SPECIAL_BASE 0x5822E80ull
+#define NIC8_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_3_UNSECURE_DOORBELL0_BASE 0x5823000ull
+#define NIC8_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_3_UNSECURE_DOORBELL1_BASE 0x5823080ull
+#define NIC8_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x5823100ull
+#define NIC8_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x5823180ull
+#define NIC8_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_3_SPECIAL_BASE 0x5823E80ull
+#define NIC8_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_4_UNSECURE_DOORBELL0_BASE 0x5824000ull
+#define NIC8_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_4_UNSECURE_DOORBELL1_BASE 0x5824080ull
+#define NIC8_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x5824100ull
+#define NIC8_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x5824180ull
+#define NIC8_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_4_SPECIAL_BASE 0x5824E80ull
+#define NIC8_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_5_UNSECURE_DOORBELL0_BASE 0x5825000ull
+#define NIC8_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_5_UNSECURE_DOORBELL1_BASE 0x5825080ull
+#define NIC8_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x5825100ull
+#define NIC8_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x5825180ull
+#define NIC8_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_5_SPECIAL_BASE 0x5825E80ull
+#define NIC8_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_6_UNSECURE_DOORBELL0_BASE 0x5826000ull
+#define NIC8_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_6_UNSECURE_DOORBELL1_BASE 0x5826080ull
+#define NIC8_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x5826100ull
+#define NIC8_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x5826180ull
+#define NIC8_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_6_SPECIAL_BASE 0x5826E80ull
+#define NIC8_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_7_UNSECURE_DOORBELL0_BASE 0x5827000ull
+#define NIC8_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_7_UNSECURE_DOORBELL1_BASE 0x5827080ull
+#define NIC8_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x5827100ull
+#define NIC8_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x5827180ull
+#define NIC8_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_7_SPECIAL_BASE 0x5827E80ull
+#define NIC8_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_8_UNSECURE_DOORBELL0_BASE 0x5828000ull
+#define NIC8_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_8_UNSECURE_DOORBELL1_BASE 0x5828080ull
+#define NIC8_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x5828100ull
+#define NIC8_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x5828180ull
+#define NIC8_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_8_SPECIAL_BASE 0x5828E80ull
+#define NIC8_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_9_UNSECURE_DOORBELL0_BASE 0x5829000ull
+#define NIC8_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_9_UNSECURE_DOORBELL1_BASE 0x5829080ull
+#define NIC8_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x5829100ull
+#define NIC8_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x5829180ull
+#define NIC8_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_9_SPECIAL_BASE 0x5829E80ull
+#define NIC8_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_10_UNSECURE_DOORBELL0_BASE 0x582A000ull
+#define NIC8_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_10_UNSECURE_DOORBELL1_BASE 0x582A080ull
+#define NIC8_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x582A100ull
+#define NIC8_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x582A180ull
+#define NIC8_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_10_SPECIAL_BASE 0x582AE80ull
+#define NIC8_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_11_UNSECURE_DOORBELL0_BASE 0x582B000ull
+#define NIC8_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_11_UNSECURE_DOORBELL1_BASE 0x582B080ull
+#define NIC8_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x582B100ull
+#define NIC8_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x582B180ull
+#define NIC8_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_11_SPECIAL_BASE 0x582BE80ull
+#define NIC8_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_12_UNSECURE_DOORBELL0_BASE 0x582C000ull
+#define NIC8_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_12_UNSECURE_DOORBELL1_BASE 0x582C080ull
+#define NIC8_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x582C100ull
+#define NIC8_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x582C180ull
+#define NIC8_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_12_SPECIAL_BASE 0x582CE80ull
+#define NIC8_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_13_UNSECURE_DOORBELL0_BASE 0x582D000ull
+#define NIC8_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_13_UNSECURE_DOORBELL1_BASE 0x582D080ull
+#define NIC8_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x582D100ull
+#define NIC8_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x582D180ull
+#define NIC8_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_13_SPECIAL_BASE 0x582DE80ull
+#define NIC8_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC8_UMR1_14_UNSECURE_DOORBELL0_BASE 0x582E000ull
+#define NIC8_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC8_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC8_UMR1_14_UNSECURE_DOORBELL1_BASE 0x582E080ull
+#define NIC8_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC8_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC8_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x582E100ull
+#define NIC8_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC8_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC8_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x582E180ull
+#define NIC8_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC8_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC8_UMR1_14_SPECIAL_BASE 0x582EE80ull
+#define NIC8_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC8_QM_DCCM1_BASE 0x5830000ull
+#define NIC8_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC8_QM_DCCM1_SECTION 0x8000
+#define mmNIC8_QM_ARC_AUX1_BASE 0x5838000ull
+#define NIC8_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC8_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC8_QM_ARC_AUX1_SPECIAL_BASE 0x5838E80ull
+#define NIC8_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC8_QM1_BASE 0x583A000ull
+#define NIC8_QM1_MAX_OFFSET 0x1000
+#define NIC8_QM1_SECTION 0x9000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x583A900ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x583A908ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x583A910ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x583A918ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x583A920ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x583A928ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x583A930ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x583A938ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x583A940ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x583A948ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x583A950ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x583A958ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x583A960ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x583A968ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x583A970ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC8_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x583A978ull
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC8_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC8_QM1_AXUSER_SECURED_BASE 0x583AB00ull
+#define NIC8_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC8_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC8_QM1_AXUSER_NONSECURED_BASE 0x583AB80ull
+#define NIC8_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC8_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC8_QM1_DBG_HBW_BASE 0x583AC00ull
+#define NIC8_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC8_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC8_QM1_DBG_LBW_BASE 0x583AC80ull
+#define NIC8_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC8_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC8_QM1_CGM_BASE 0x583AD80ull
+#define NIC8_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC8_QM1_CGM_SECTION 0x1000
+#define mmNIC8_QM1_SPECIAL_BASE 0x583AE80ull
+#define NIC8_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC8_QPC1_BASE 0x583F000ull
+#define NIC8_QPC1_MAX_OFFSET 0x1000
+#define NIC8_QPC1_SECTION 0x7200
+#define mmNIC8_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x583F720ull
+#define NIC8_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x583F728ull
+#define NIC8_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x583F730ull
+#define NIC8_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x583F738ull
+#define NIC8_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x583F740ull
+#define NIC8_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x583F748ull
+#define NIC8_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x583F750ull
+#define NIC8_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x583F758ull
+#define NIC8_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x583F760ull
+#define NIC8_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x583F768ull
+#define NIC8_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x583F770ull
+#define NIC8_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x583F778ull
+#define NIC8_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x583F780ull
+#define NIC8_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x583F788ull
+#define NIC8_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x583F790ull
+#define NIC8_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x583F798ull
+#define NIC8_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x583F7A0ull
+#define NIC8_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x583F7A8ull
+#define NIC8_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x583F7B0ull
+#define NIC8_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x583F7B8ull
+#define NIC8_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x583F7C0ull
+#define NIC8_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x583F7C8ull
+#define NIC8_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x583F7D0ull
+#define NIC8_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x583F7D8ull
+#define NIC8_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x583F7E0ull
+#define NIC8_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x583F7E8ull
+#define NIC8_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x583F7F0ull
+#define NIC8_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x583F7F8ull
+#define NIC8_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x583F800ull
+#define NIC8_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x583F808ull
+#define NIC8_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x583F810ull
+#define NIC8_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC8_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x583F818ull
+#define NIC8_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC8_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC8_QPC1_AXUSER_CONG_QUE_BASE 0x583FB80ull
+#define NIC8_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC8_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC8_QPC1_AXUSER_RXWQE_BASE 0x583FBE0ull
+#define NIC8_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC8_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC8_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x583FC40ull
+#define NIC8_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC8_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC8_QPC1_AXUSER_DB_FIFO_BASE 0x583FCA0ull
+#define NIC8_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC8_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC8_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x583FD00ull
+#define NIC8_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC8_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC8_QPC1_AXUSER_ERR_FIFO_BASE 0x583FD60ull
+#define NIC8_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC8_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC8_QPC1_AXUSER_QPC_RESP_BASE 0x583FDC0ull
+#define NIC8_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC8_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC8_QPC1_AXUSER_QPC_REQ_BASE 0x583FE20ull
+#define NIC8_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC8_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC8_QPC1_SPECIAL_BASE 0x583FE80ull
+#define NIC8_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC8_TMR_BASE 0x5848000ull
+#define NIC8_TMR_MAX_OFFSET 0x1000
+#define NIC8_TMR_SECTION 0xD600
+#define mmNIC8_TMR_AXUSER_TMR_FREE_LIST_BASE 0x5848D60ull
+#define NIC8_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC8_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC8_TMR_AXUSER_TMR_FIFO_BASE 0x5848DC0ull
+#define NIC8_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC8_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC8_TMR_AXUSER_TMR_FSM_BASE 0x5848E20ull
+#define NIC8_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC8_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC8_TMR_SPECIAL_BASE 0x5848E80ull
+#define NIC8_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC8_RXB_CORE_BASE 0x5849000ull
+#define NIC8_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC8_RXB_CORE_SECTION 0x6100
+#define mmNIC8_RXB_CORE_SCT_AWUSER_BASE 0x5849610ull
+#define NIC8_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC8_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC8_RXB_CORE_SPECIAL_BASE 0x5849E80ull
+#define NIC8_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC8_RXE0_BASE 0x584A000ull
+#define NIC8_RXE0_MAX_OFFSET 0x1000
+#define NIC8_RXE0_SECTION 0x9000
+#define mmNIC8_RXE0_WQE_ARUSER_BASE 0x584A900ull
+#define NIC8_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC8_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC8_RXE0_SPECIAL_BASE 0x584AE80ull
+#define NIC8_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC8_RXE1_BASE 0x584B000ull
+#define NIC8_RXE1_MAX_OFFSET 0x1000
+#define NIC8_RXE1_SECTION 0x9000
+#define mmNIC8_RXE1_WQE_ARUSER_BASE 0x584B900ull
+#define NIC8_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC8_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC8_RXE1_SPECIAL_BASE 0x584BE80ull
+#define NIC8_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ0_BASE 0x584C000ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ1_BASE 0x584C050ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ2_BASE 0x584C0A0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ3_BASE 0x584C0F0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ4_BASE 0x584C140ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ5_BASE 0x584C190ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ6_BASE 0x584C1E0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ7_BASE 0x584C230ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ8_BASE 0x584C280ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ9_BASE 0x584C2D0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ10_BASE 0x584C320ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ11_BASE 0x584C370ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ12_BASE 0x584C3C0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ13_BASE 0x584C410ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ14_BASE 0x584C460ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ15_BASE 0x584C4B0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ16_BASE 0x584C500ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ17_BASE 0x584C550ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ18_BASE 0x584C5A0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ19_BASE 0x584C5F0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ20_BASE 0x584C640ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ21_BASE 0x584C690ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ22_BASE 0x584C6E0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ23_BASE 0x584C730ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ24_BASE 0x584C780ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ25_BASE 0x584C7D0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ26_BASE 0x584C820ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ27_BASE 0x584C870ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ28_BASE 0x584C8C0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ29_BASE 0x584C910ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ30_BASE 0x584C960ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC8_RXE0_AXUSER_AXUSER_CQ31_BASE 0x584C9B0ull
+#define NIC8_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC8_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC8_RXE0_AXUSER_SPECIAL_BASE 0x584CE80ull
+#define NIC8_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ0_BASE 0x584D000ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ1_BASE 0x584D050ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ2_BASE 0x584D0A0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ3_BASE 0x584D0F0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ4_BASE 0x584D140ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ5_BASE 0x584D190ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ6_BASE 0x584D1E0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ7_BASE 0x584D230ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ8_BASE 0x584D280ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ9_BASE 0x584D2D0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ10_BASE 0x584D320ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ11_BASE 0x584D370ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ12_BASE 0x584D3C0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ13_BASE 0x584D410ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ14_BASE 0x584D460ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ15_BASE 0x584D4B0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ16_BASE 0x584D500ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ17_BASE 0x584D550ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ18_BASE 0x584D5A0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ19_BASE 0x584D5F0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ20_BASE 0x584D640ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ21_BASE 0x584D690ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ22_BASE 0x584D6E0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ23_BASE 0x584D730ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ24_BASE 0x584D780ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ25_BASE 0x584D7D0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ26_BASE 0x584D820ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ27_BASE 0x584D870ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ28_BASE 0x584D8C0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ29_BASE 0x584D910ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ30_BASE 0x584D960ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC8_RXE1_AXUSER_AXUSER_CQ31_BASE 0x584D9B0ull
+#define NIC8_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC8_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC8_RXE1_AXUSER_SPECIAL_BASE 0x584DE80ull
+#define NIC8_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC8_TXS0_BASE 0x5850000ull
+#define NIC8_TXS0_MAX_OFFSET 0x1000
+#define NIC8_TXS0_SECTION 0xE800
+#define mmNIC8_TXS0_SPECIAL_BASE 0x5850E80ull
+#define NIC8_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC8_TXS1_BASE 0x5851000ull
+#define NIC8_TXS1_MAX_OFFSET 0x1000
+#define NIC8_TXS1_SECTION 0xE800
+#define mmNIC8_TXS1_SPECIAL_BASE 0x5851E80ull
+#define NIC8_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC8_TXE0_BASE 0x5852000ull
+#define NIC8_TXE0_MAX_OFFSET 0x1000
+#define NIC8_TXE0_SECTION 0xE800
+#define mmNIC8_TXE0_SPECIAL_BASE 0x5852E80ull
+#define NIC8_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC8_TXE1_BASE 0x5853000ull
+#define NIC8_TXE1_MAX_OFFSET 0x1000
+#define NIC8_TXE1_SECTION 0xE800
+#define mmNIC8_TXE1_SPECIAL_BASE 0x5853E80ull
+#define NIC8_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC8_TXB_BASE 0x5854000ull
+#define NIC8_TXB_MAX_OFFSET 0x1000
+#define NIC8_TXB_SECTION 0xE800
+#define mmNIC8_TXB_SPECIAL_BASE 0x5854E80ull
+#define NIC8_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC8_MSTR_IF_RR_SHRD_HBW_BASE 0x5855000ull
+#define NIC8_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC8_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC8_MSTR_IF_RR_PRVT_HBW_BASE 0x5855200ull
+#define NIC8_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC8_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC8_MSTR_IF_RR_SHRD_LBW_BASE 0x5855400ull
+#define NIC8_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC8_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC8_MSTR_IF_RR_PRVT_LBW_BASE 0x5855600ull
+#define NIC8_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC8_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC8_MSTR_IF_E2E_CRDT_BASE 0x5855800ull
+#define NIC8_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC8_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC8_MSTR_IF_AXUSER_BASE 0x5855A80ull
+#define NIC8_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC8_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC8_MSTR_IF_DBG_HBW_BASE 0x5855B00ull
+#define NIC8_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC8_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC8_MSTR_IF_DBG_LBW_BASE 0x5855B80ull
+#define NIC8_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC8_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC8_MSTR_IF_CORE_HBW_BASE 0x5855C00ull
+#define NIC8_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC8_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC8_MSTR_IF_CORE_LBW_BASE 0x5855D80ull
+#define NIC8_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC8_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC8_MSTR_IF_SPECIAL_BASE 0x5855E80ull
+#define NIC8_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC8_TX_AXUSER_BASE 0x5856000ull
+#define NIC8_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC8_TX_AXUSER_SECTION 0x2000
+#define mmNIC8_SERDES0_BASE 0x5858000ull
+#define NIC8_SERDES0_MAX_OFFSET 0x3E40
+#define NIC8_SERDES0_SECTION 0x4000
+#define mmNIC8_SERDES1_BASE 0x585C000ull
+#define NIC8_SERDES1_MAX_OFFSET 0x3E40
+#define NIC8_SERDES1_SECTION 0x4000
+#define mmNIC8_PHY_BASE 0x5860000ull
+#define NIC8_PHY_MAX_OFFSET 0x1000
+#define NIC8_PHY_SECTION 0xE800
+#define mmNIC8_PHY_SPECIAL_BASE 0x5860E80ull
+#define NIC8_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC8_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT8_MAC_AUX_BASE 0x5868000ull
+#define PRT8_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT8_MAC_AUX_SECTION 0xE800
+#define mmPRT8_MAC_AUX_SPECIAL_BASE 0x5868E80ull
+#define PRT8_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT8_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT8_MAC_CORE_BASE 0x5869000ull
+#define PRT8_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT8_MAC_CORE_SECTION 0xE800
+#define mmPRT8_MAC_CORE_SPECIAL_BASE 0x5869E80ull
+#define PRT8_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT8_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC8_MAC_RS_FEC_BASE 0x586A000ull
+#define NIC8_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC8_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC8_MAC_GLOB_STAT_CONTROL_REG_BASE 0x586B000ull
+#define NIC8_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC8_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC8_MAC_GLOB_STAT_RX0_BASE 0x586B100ull
+#define NIC8_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC8_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC8_MAC_GLOB_STAT_RX1_BASE 0x586B18Cull
+#define NIC8_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC8_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC8_MAC_GLOB_STAT_RX2_BASE 0x586B218ull
+#define NIC8_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC8_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC8_MAC_GLOB_STAT_RX3_BASE 0x586B2A4ull
+#define NIC8_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC8_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC8_MAC_GLOB_STAT_TX0_BASE 0x586B330ull
+#define NIC8_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC8_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC8_MAC_GLOB_STAT_TX1_BASE 0x586B398ull
+#define NIC8_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC8_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC8_MAC_GLOB_STAT_TX2_BASE 0x586B400ull
+#define NIC8_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC8_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC8_MAC_GLOB_STAT_TX3_BASE 0x586B468ull
+#define NIC8_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC8_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC8_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x586B800ull
+#define NIC8_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC8_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC8_MAC_CH0_MAC_PCS_BASE 0x586C000ull
+#define NIC8_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC8_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC8_MAC_CH0_MAC_128_BASE 0x586C400ull
+#define NIC8_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC8_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC8_MAC_CH0_MAC_AN_BASE 0x586C800ull
+#define NIC8_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC8_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC8_MAC_CH1_MAC_PCS_BASE 0x586D000ull
+#define NIC8_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC8_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC8_MAC_CH1_MAC_128_BASE 0x586D400ull
+#define NIC8_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC8_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC8_MAC_CH1_MAC_AN_BASE 0x586D800ull
+#define NIC8_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC8_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC8_MAC_CH2_MAC_PCS_BASE 0x586E000ull
+#define NIC8_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC8_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC8_MAC_CH2_MAC_128_BASE 0x586E400ull
+#define NIC8_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC8_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC8_MAC_CH2_MAC_AN_BASE 0x586E800ull
+#define NIC8_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC8_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC8_MAC_CH3_MAC_PCS_BASE 0x586F000ull
+#define NIC8_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC8_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC8_MAC_CH3_MAC_128_BASE 0x586F400ull
+#define NIC8_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC8_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC8_MAC_CH3_MAC_AN_BASE 0x586F800ull
+#define NIC8_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC8_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC9_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5880000ull
+#define NIC9_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5880080ull
+#define NIC9_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5880100ull
+#define NIC9_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5880180ull
+#define NIC9_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_0_SPECIAL_BASE 0x5880E80ull
+#define NIC9_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5881000ull
+#define NIC9_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5881080ull
+#define NIC9_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5881100ull
+#define NIC9_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5881180ull
+#define NIC9_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_1_SPECIAL_BASE 0x5881E80ull
+#define NIC9_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5882000ull
+#define NIC9_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5882080ull
+#define NIC9_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5882100ull
+#define NIC9_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5882180ull
+#define NIC9_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_2_SPECIAL_BASE 0x5882E80ull
+#define NIC9_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5883000ull
+#define NIC9_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5883080ull
+#define NIC9_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5883100ull
+#define NIC9_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5883180ull
+#define NIC9_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_3_SPECIAL_BASE 0x5883E80ull
+#define NIC9_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5884000ull
+#define NIC9_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5884080ull
+#define NIC9_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5884100ull
+#define NIC9_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5884180ull
+#define NIC9_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_4_SPECIAL_BASE 0x5884E80ull
+#define NIC9_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5885000ull
+#define NIC9_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5885080ull
+#define NIC9_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5885100ull
+#define NIC9_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5885180ull
+#define NIC9_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_5_SPECIAL_BASE 0x5885E80ull
+#define NIC9_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5886000ull
+#define NIC9_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5886080ull
+#define NIC9_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5886100ull
+#define NIC9_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5886180ull
+#define NIC9_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_6_SPECIAL_BASE 0x5886E80ull
+#define NIC9_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5887000ull
+#define NIC9_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5887080ull
+#define NIC9_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5887100ull
+#define NIC9_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5887180ull
+#define NIC9_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_7_SPECIAL_BASE 0x5887E80ull
+#define NIC9_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5888000ull
+#define NIC9_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5888080ull
+#define NIC9_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5888100ull
+#define NIC9_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5888180ull
+#define NIC9_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_8_SPECIAL_BASE 0x5888E80ull
+#define NIC9_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5889000ull
+#define NIC9_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5889080ull
+#define NIC9_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5889100ull
+#define NIC9_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5889180ull
+#define NIC9_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_9_SPECIAL_BASE 0x5889E80ull
+#define NIC9_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_10_UNSECURE_DOORBELL0_BASE 0x588A000ull
+#define NIC9_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_10_UNSECURE_DOORBELL1_BASE 0x588A080ull
+#define NIC9_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x588A100ull
+#define NIC9_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x588A180ull
+#define NIC9_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_10_SPECIAL_BASE 0x588AE80ull
+#define NIC9_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_11_UNSECURE_DOORBELL0_BASE 0x588B000ull
+#define NIC9_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_11_UNSECURE_DOORBELL1_BASE 0x588B080ull
+#define NIC9_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x588B100ull
+#define NIC9_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x588B180ull
+#define NIC9_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_11_SPECIAL_BASE 0x588BE80ull
+#define NIC9_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_12_UNSECURE_DOORBELL0_BASE 0x588C000ull
+#define NIC9_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_12_UNSECURE_DOORBELL1_BASE 0x588C080ull
+#define NIC9_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x588C100ull
+#define NIC9_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x588C180ull
+#define NIC9_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_12_SPECIAL_BASE 0x588CE80ull
+#define NIC9_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_13_UNSECURE_DOORBELL0_BASE 0x588D000ull
+#define NIC9_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_13_UNSECURE_DOORBELL1_BASE 0x588D080ull
+#define NIC9_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x588D100ull
+#define NIC9_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x588D180ull
+#define NIC9_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_13_SPECIAL_BASE 0x588DE80ull
+#define NIC9_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR0_14_UNSECURE_DOORBELL0_BASE 0x588E000ull
+#define NIC9_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR0_14_UNSECURE_DOORBELL1_BASE 0x588E080ull
+#define NIC9_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x588E100ull
+#define NIC9_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x588E180ull
+#define NIC9_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR0_14_SPECIAL_BASE 0x588EE80ull
+#define NIC9_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC9_QM_DCCM0_BASE 0x5890000ull
+#define NIC9_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC9_QM_DCCM0_SECTION 0x8000
+#define mmNIC9_QM_ARC_AUX0_BASE 0x5898000ull
+#define NIC9_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC9_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC9_QM_ARC_AUX0_SPECIAL_BASE 0x5898E80ull
+#define NIC9_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC9_QM0_BASE 0x589A000ull
+#define NIC9_QM0_MAX_OFFSET 0x1000
+#define NIC9_QM0_SECTION 0x9000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x589A900ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x589A908ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x589A910ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x589A918ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x589A920ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x589A928ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x589A930ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x589A938ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x589A940ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x589A948ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x589A950ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x589A958ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x589A960ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x589A968ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x589A970ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC9_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x589A978ull
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC9_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC9_QM0_AXUSER_SECURED_BASE 0x589AB00ull
+#define NIC9_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC9_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC9_QM0_AXUSER_NONSECURED_BASE 0x589AB80ull
+#define NIC9_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC9_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC9_QM0_DBG_HBW_BASE 0x589AC00ull
+#define NIC9_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC9_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC9_QM0_DBG_LBW_BASE 0x589AC80ull
+#define NIC9_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC9_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC9_QM0_CGM_BASE 0x589AD80ull
+#define NIC9_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC9_QM0_CGM_SECTION 0x1000
+#define mmNIC9_QM0_SPECIAL_BASE 0x589AE80ull
+#define NIC9_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC9_QPC0_BASE 0x589F000ull
+#define NIC9_QPC0_MAX_OFFSET 0x1000
+#define NIC9_QPC0_SECTION 0x7200
+#define mmNIC9_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x589F720ull
+#define NIC9_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x589F728ull
+#define NIC9_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x589F730ull
+#define NIC9_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x589F738ull
+#define NIC9_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x589F740ull
+#define NIC9_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x589F748ull
+#define NIC9_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x589F750ull
+#define NIC9_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x589F758ull
+#define NIC9_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x589F760ull
+#define NIC9_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x589F768ull
+#define NIC9_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x589F770ull
+#define NIC9_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x589F778ull
+#define NIC9_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x589F780ull
+#define NIC9_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x589F788ull
+#define NIC9_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x589F790ull
+#define NIC9_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x589F798ull
+#define NIC9_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x589F7A0ull
+#define NIC9_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x589F7A8ull
+#define NIC9_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x589F7B0ull
+#define NIC9_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x589F7B8ull
+#define NIC9_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x589F7C0ull
+#define NIC9_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x589F7C8ull
+#define NIC9_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x589F7D0ull
+#define NIC9_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x589F7D8ull
+#define NIC9_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x589F7E0ull
+#define NIC9_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x589F7E8ull
+#define NIC9_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x589F7F0ull
+#define NIC9_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x589F7F8ull
+#define NIC9_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x589F800ull
+#define NIC9_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x589F808ull
+#define NIC9_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x589F810ull
+#define NIC9_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x589F818ull
+#define NIC9_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC9_QPC0_AXUSER_CONG_QUE_BASE 0x589FB80ull
+#define NIC9_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC9_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC9_QPC0_AXUSER_RXWQE_BASE 0x589FBE0ull
+#define NIC9_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC9_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC9_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x589FC40ull
+#define NIC9_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC9_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC9_QPC0_AXUSER_DB_FIFO_BASE 0x589FCA0ull
+#define NIC9_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC9_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC9_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x589FD00ull
+#define NIC9_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC9_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC9_QPC0_AXUSER_ERR_FIFO_BASE 0x589FD60ull
+#define NIC9_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC9_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC9_QPC0_AXUSER_QPC_RESP_BASE 0x589FDC0ull
+#define NIC9_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC9_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC9_QPC0_AXUSER_QPC_REQ_BASE 0x589FE20ull
+#define NIC9_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC9_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC9_QPC0_SPECIAL_BASE 0x589FE80ull
+#define NIC9_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_0_UNSECURE_DOORBELL0_BASE 0x58A0000ull
+#define NIC9_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_0_UNSECURE_DOORBELL1_BASE 0x58A0080ull
+#define NIC9_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x58A0100ull
+#define NIC9_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x58A0180ull
+#define NIC9_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_0_SPECIAL_BASE 0x58A0E80ull
+#define NIC9_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_1_UNSECURE_DOORBELL0_BASE 0x58A1000ull
+#define NIC9_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_1_UNSECURE_DOORBELL1_BASE 0x58A1080ull
+#define NIC9_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x58A1100ull
+#define NIC9_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x58A1180ull
+#define NIC9_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_1_SPECIAL_BASE 0x58A1E80ull
+#define NIC9_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_2_UNSECURE_DOORBELL0_BASE 0x58A2000ull
+#define NIC9_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_2_UNSECURE_DOORBELL1_BASE 0x58A2080ull
+#define NIC9_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x58A2100ull
+#define NIC9_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x58A2180ull
+#define NIC9_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_2_SPECIAL_BASE 0x58A2E80ull
+#define NIC9_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_3_UNSECURE_DOORBELL0_BASE 0x58A3000ull
+#define NIC9_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_3_UNSECURE_DOORBELL1_BASE 0x58A3080ull
+#define NIC9_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x58A3100ull
+#define NIC9_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x58A3180ull
+#define NIC9_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_3_SPECIAL_BASE 0x58A3E80ull
+#define NIC9_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_4_UNSECURE_DOORBELL0_BASE 0x58A4000ull
+#define NIC9_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_4_UNSECURE_DOORBELL1_BASE 0x58A4080ull
+#define NIC9_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x58A4100ull
+#define NIC9_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x58A4180ull
+#define NIC9_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_4_SPECIAL_BASE 0x58A4E80ull
+#define NIC9_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_5_UNSECURE_DOORBELL0_BASE 0x58A5000ull
+#define NIC9_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_5_UNSECURE_DOORBELL1_BASE 0x58A5080ull
+#define NIC9_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x58A5100ull
+#define NIC9_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x58A5180ull
+#define NIC9_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_5_SPECIAL_BASE 0x58A5E80ull
+#define NIC9_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_6_UNSECURE_DOORBELL0_BASE 0x58A6000ull
+#define NIC9_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_6_UNSECURE_DOORBELL1_BASE 0x58A6080ull
+#define NIC9_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x58A6100ull
+#define NIC9_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x58A6180ull
+#define NIC9_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_6_SPECIAL_BASE 0x58A6E80ull
+#define NIC9_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_7_UNSECURE_DOORBELL0_BASE 0x58A7000ull
+#define NIC9_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_7_UNSECURE_DOORBELL1_BASE 0x58A7080ull
+#define NIC9_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x58A7100ull
+#define NIC9_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x58A7180ull
+#define NIC9_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_7_SPECIAL_BASE 0x58A7E80ull
+#define NIC9_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_8_UNSECURE_DOORBELL0_BASE 0x58A8000ull
+#define NIC9_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_8_UNSECURE_DOORBELL1_BASE 0x58A8080ull
+#define NIC9_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x58A8100ull
+#define NIC9_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x58A8180ull
+#define NIC9_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_8_SPECIAL_BASE 0x58A8E80ull
+#define NIC9_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_9_UNSECURE_DOORBELL0_BASE 0x58A9000ull
+#define NIC9_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_9_UNSECURE_DOORBELL1_BASE 0x58A9080ull
+#define NIC9_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x58A9100ull
+#define NIC9_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x58A9180ull
+#define NIC9_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_9_SPECIAL_BASE 0x58A9E80ull
+#define NIC9_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_10_UNSECURE_DOORBELL0_BASE 0x58AA000ull
+#define NIC9_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_10_UNSECURE_DOORBELL1_BASE 0x58AA080ull
+#define NIC9_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x58AA100ull
+#define NIC9_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x58AA180ull
+#define NIC9_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_10_SPECIAL_BASE 0x58AAE80ull
+#define NIC9_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_11_UNSECURE_DOORBELL0_BASE 0x58AB000ull
+#define NIC9_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_11_UNSECURE_DOORBELL1_BASE 0x58AB080ull
+#define NIC9_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x58AB100ull
+#define NIC9_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x58AB180ull
+#define NIC9_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_11_SPECIAL_BASE 0x58ABE80ull
+#define NIC9_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_12_UNSECURE_DOORBELL0_BASE 0x58AC000ull
+#define NIC9_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_12_UNSECURE_DOORBELL1_BASE 0x58AC080ull
+#define NIC9_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x58AC100ull
+#define NIC9_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x58AC180ull
+#define NIC9_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_12_SPECIAL_BASE 0x58ACE80ull
+#define NIC9_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_13_UNSECURE_DOORBELL0_BASE 0x58AD000ull
+#define NIC9_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_13_UNSECURE_DOORBELL1_BASE 0x58AD080ull
+#define NIC9_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x58AD100ull
+#define NIC9_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x58AD180ull
+#define NIC9_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_13_SPECIAL_BASE 0x58ADE80ull
+#define NIC9_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC9_UMR1_14_UNSECURE_DOORBELL0_BASE 0x58AE000ull
+#define NIC9_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC9_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC9_UMR1_14_UNSECURE_DOORBELL1_BASE 0x58AE080ull
+#define NIC9_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC9_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC9_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x58AE100ull
+#define NIC9_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC9_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC9_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x58AE180ull
+#define NIC9_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC9_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC9_UMR1_14_SPECIAL_BASE 0x58AEE80ull
+#define NIC9_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC9_QM_DCCM1_BASE 0x58B0000ull
+#define NIC9_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC9_QM_DCCM1_SECTION 0x8000
+#define mmNIC9_QM_ARC_AUX1_BASE 0x58B8000ull
+#define NIC9_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC9_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC9_QM_ARC_AUX1_SPECIAL_BASE 0x58B8E80ull
+#define NIC9_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC9_QM1_BASE 0x58BA000ull
+#define NIC9_QM1_MAX_OFFSET 0x1000
+#define NIC9_QM1_SECTION 0x9000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x58BA900ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x58BA908ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x58BA910ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x58BA918ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x58BA920ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x58BA928ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x58BA930ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x58BA938ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x58BA940ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x58BA948ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x58BA950ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x58BA958ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x58BA960ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x58BA968ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x58BA970ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC9_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x58BA978ull
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC9_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC9_QM1_AXUSER_SECURED_BASE 0x58BAB00ull
+#define NIC9_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC9_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC9_QM1_AXUSER_NONSECURED_BASE 0x58BAB80ull
+#define NIC9_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC9_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC9_QM1_DBG_HBW_BASE 0x58BAC00ull
+#define NIC9_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC9_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC9_QM1_DBG_LBW_BASE 0x58BAC80ull
+#define NIC9_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC9_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC9_QM1_CGM_BASE 0x58BAD80ull
+#define NIC9_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC9_QM1_CGM_SECTION 0x1000
+#define mmNIC9_QM1_SPECIAL_BASE 0x58BAE80ull
+#define NIC9_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC9_QPC1_BASE 0x58BF000ull
+#define NIC9_QPC1_MAX_OFFSET 0x1000
+#define NIC9_QPC1_SECTION 0x7200
+#define mmNIC9_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x58BF720ull
+#define NIC9_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x58BF728ull
+#define NIC9_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x58BF730ull
+#define NIC9_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x58BF738ull
+#define NIC9_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x58BF740ull
+#define NIC9_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x58BF748ull
+#define NIC9_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x58BF750ull
+#define NIC9_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x58BF758ull
+#define NIC9_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x58BF760ull
+#define NIC9_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x58BF768ull
+#define NIC9_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x58BF770ull
+#define NIC9_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x58BF778ull
+#define NIC9_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x58BF780ull
+#define NIC9_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x58BF788ull
+#define NIC9_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x58BF790ull
+#define NIC9_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x58BF798ull
+#define NIC9_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x58BF7A0ull
+#define NIC9_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x58BF7A8ull
+#define NIC9_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x58BF7B0ull
+#define NIC9_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x58BF7B8ull
+#define NIC9_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x58BF7C0ull
+#define NIC9_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x58BF7C8ull
+#define NIC9_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x58BF7D0ull
+#define NIC9_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x58BF7D8ull
+#define NIC9_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x58BF7E0ull
+#define NIC9_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x58BF7E8ull
+#define NIC9_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x58BF7F0ull
+#define NIC9_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x58BF7F8ull
+#define NIC9_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x58BF800ull
+#define NIC9_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x58BF808ull
+#define NIC9_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x58BF810ull
+#define NIC9_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC9_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x58BF818ull
+#define NIC9_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC9_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC9_QPC1_AXUSER_CONG_QUE_BASE 0x58BFB80ull
+#define NIC9_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC9_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC9_QPC1_AXUSER_RXWQE_BASE 0x58BFBE0ull
+#define NIC9_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC9_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC9_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x58BFC40ull
+#define NIC9_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC9_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC9_QPC1_AXUSER_DB_FIFO_BASE 0x58BFCA0ull
+#define NIC9_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC9_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC9_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x58BFD00ull
+#define NIC9_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC9_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC9_QPC1_AXUSER_ERR_FIFO_BASE 0x58BFD60ull
+#define NIC9_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC9_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC9_QPC1_AXUSER_QPC_RESP_BASE 0x58BFDC0ull
+#define NIC9_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC9_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC9_QPC1_AXUSER_QPC_REQ_BASE 0x58BFE20ull
+#define NIC9_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC9_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC9_QPC1_SPECIAL_BASE 0x58BFE80ull
+#define NIC9_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC9_TMR_BASE 0x58C8000ull
+#define NIC9_TMR_MAX_OFFSET 0x1000
+#define NIC9_TMR_SECTION 0xD600
+#define mmNIC9_TMR_AXUSER_TMR_FREE_LIST_BASE 0x58C8D60ull
+#define NIC9_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC9_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC9_TMR_AXUSER_TMR_FIFO_BASE 0x58C8DC0ull
+#define NIC9_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC9_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC9_TMR_AXUSER_TMR_FSM_BASE 0x58C8E20ull
+#define NIC9_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC9_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC9_TMR_SPECIAL_BASE 0x58C8E80ull
+#define NIC9_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC9_RXB_CORE_BASE 0x58C9000ull
+#define NIC9_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC9_RXB_CORE_SECTION 0x6100
+#define mmNIC9_RXB_CORE_SCT_AWUSER_BASE 0x58C9610ull
+#define NIC9_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC9_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC9_RXB_CORE_SPECIAL_BASE 0x58C9E80ull
+#define NIC9_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC9_RXE0_BASE 0x58CA000ull
+#define NIC9_RXE0_MAX_OFFSET 0x1000
+#define NIC9_RXE0_SECTION 0x9000
+#define mmNIC9_RXE0_WQE_ARUSER_BASE 0x58CA900ull
+#define NIC9_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC9_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC9_RXE0_SPECIAL_BASE 0x58CAE80ull
+#define NIC9_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC9_RXE1_BASE 0x58CB000ull
+#define NIC9_RXE1_MAX_OFFSET 0x1000
+#define NIC9_RXE1_SECTION 0x9000
+#define mmNIC9_RXE1_WQE_ARUSER_BASE 0x58CB900ull
+#define NIC9_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC9_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC9_RXE1_SPECIAL_BASE 0x58CBE80ull
+#define NIC9_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ0_BASE 0x58CC000ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ1_BASE 0x58CC050ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ2_BASE 0x58CC0A0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ3_BASE 0x58CC0F0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ4_BASE 0x58CC140ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ5_BASE 0x58CC190ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ6_BASE 0x58CC1E0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ7_BASE 0x58CC230ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ8_BASE 0x58CC280ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ9_BASE 0x58CC2D0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ10_BASE 0x58CC320ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ11_BASE 0x58CC370ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ12_BASE 0x58CC3C0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ13_BASE 0x58CC410ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ14_BASE 0x58CC460ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ15_BASE 0x58CC4B0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ16_BASE 0x58CC500ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ17_BASE 0x58CC550ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ18_BASE 0x58CC5A0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ19_BASE 0x58CC5F0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ20_BASE 0x58CC640ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ21_BASE 0x58CC690ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ22_BASE 0x58CC6E0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ23_BASE 0x58CC730ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ24_BASE 0x58CC780ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ25_BASE 0x58CC7D0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ26_BASE 0x58CC820ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ27_BASE 0x58CC870ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ28_BASE 0x58CC8C0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ29_BASE 0x58CC910ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ30_BASE 0x58CC960ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC9_RXE0_AXUSER_AXUSER_CQ31_BASE 0x58CC9B0ull
+#define NIC9_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC9_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC9_RXE0_AXUSER_SPECIAL_BASE 0x58CCE80ull
+#define NIC9_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ0_BASE 0x58CD000ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ1_BASE 0x58CD050ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ2_BASE 0x58CD0A0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ3_BASE 0x58CD0F0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ4_BASE 0x58CD140ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ5_BASE 0x58CD190ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ6_BASE 0x58CD1E0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ7_BASE 0x58CD230ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ8_BASE 0x58CD280ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ9_BASE 0x58CD2D0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ10_BASE 0x58CD320ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ11_BASE 0x58CD370ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ12_BASE 0x58CD3C0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ13_BASE 0x58CD410ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ14_BASE 0x58CD460ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ15_BASE 0x58CD4B0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ16_BASE 0x58CD500ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ17_BASE 0x58CD550ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ18_BASE 0x58CD5A0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ19_BASE 0x58CD5F0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ20_BASE 0x58CD640ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ21_BASE 0x58CD690ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ22_BASE 0x58CD6E0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ23_BASE 0x58CD730ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ24_BASE 0x58CD780ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ25_BASE 0x58CD7D0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ26_BASE 0x58CD820ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ27_BASE 0x58CD870ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ28_BASE 0x58CD8C0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ29_BASE 0x58CD910ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ30_BASE 0x58CD960ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC9_RXE1_AXUSER_AXUSER_CQ31_BASE 0x58CD9B0ull
+#define NIC9_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC9_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC9_RXE1_AXUSER_SPECIAL_BASE 0x58CDE80ull
+#define NIC9_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC9_TXS0_BASE 0x58D0000ull
+#define NIC9_TXS0_MAX_OFFSET 0x1000
+#define NIC9_TXS0_SECTION 0xE800
+#define mmNIC9_TXS0_SPECIAL_BASE 0x58D0E80ull
+#define NIC9_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC9_TXS1_BASE 0x58D1000ull
+#define NIC9_TXS1_MAX_OFFSET 0x1000
+#define NIC9_TXS1_SECTION 0xE800
+#define mmNIC9_TXS1_SPECIAL_BASE 0x58D1E80ull
+#define NIC9_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC9_TXE0_BASE 0x58D2000ull
+#define NIC9_TXE0_MAX_OFFSET 0x1000
+#define NIC9_TXE0_SECTION 0xE800
+#define mmNIC9_TXE0_SPECIAL_BASE 0x58D2E80ull
+#define NIC9_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC9_TXE1_BASE 0x58D3000ull
+#define NIC9_TXE1_MAX_OFFSET 0x1000
+#define NIC9_TXE1_SECTION 0xE800
+#define mmNIC9_TXE1_SPECIAL_BASE 0x58D3E80ull
+#define NIC9_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC9_TXB_BASE 0x58D4000ull
+#define NIC9_TXB_MAX_OFFSET 0x1000
+#define NIC9_TXB_SECTION 0xE800
+#define mmNIC9_TXB_SPECIAL_BASE 0x58D4E80ull
+#define NIC9_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC9_MSTR_IF_RR_SHRD_HBW_BASE 0x58D5000ull
+#define NIC9_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC9_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC9_MSTR_IF_RR_PRVT_HBW_BASE 0x58D5200ull
+#define NIC9_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC9_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC9_MSTR_IF_RR_SHRD_LBW_BASE 0x58D5400ull
+#define NIC9_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC9_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC9_MSTR_IF_RR_PRVT_LBW_BASE 0x58D5600ull
+#define NIC9_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC9_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC9_MSTR_IF_E2E_CRDT_BASE 0x58D5800ull
+#define NIC9_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC9_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC9_MSTR_IF_AXUSER_BASE 0x58D5A80ull
+#define NIC9_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC9_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC9_MSTR_IF_DBG_HBW_BASE 0x58D5B00ull
+#define NIC9_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC9_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC9_MSTR_IF_DBG_LBW_BASE 0x58D5B80ull
+#define NIC9_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC9_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC9_MSTR_IF_CORE_HBW_BASE 0x58D5C00ull
+#define NIC9_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC9_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC9_MSTR_IF_CORE_LBW_BASE 0x58D5D80ull
+#define NIC9_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC9_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC9_MSTR_IF_SPECIAL_BASE 0x58D5E80ull
+#define NIC9_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC9_TX_AXUSER_BASE 0x58D6000ull
+#define NIC9_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC9_TX_AXUSER_SECTION 0x2000
+#define mmNIC9_SERDES0_BASE 0x58D8000ull
+#define NIC9_SERDES0_MAX_OFFSET 0x3E40
+#define NIC9_SERDES0_SECTION 0x4000
+#define mmNIC9_SERDES1_BASE 0x58DC000ull
+#define NIC9_SERDES1_MAX_OFFSET 0x3E40
+#define NIC9_SERDES1_SECTION 0x4000
+#define mmNIC9_PHY_BASE 0x58E0000ull
+#define NIC9_PHY_MAX_OFFSET 0x1000
+#define NIC9_PHY_SECTION 0xE800
+#define mmNIC9_PHY_SPECIAL_BASE 0x58E0E80ull
+#define NIC9_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC9_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT9_MAC_AUX_BASE 0x58E8000ull
+#define PRT9_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT9_MAC_AUX_SECTION 0xE800
+#define mmPRT9_MAC_AUX_SPECIAL_BASE 0x58E8E80ull
+#define PRT9_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT9_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT9_MAC_CORE_BASE 0x58E9000ull
+#define PRT9_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT9_MAC_CORE_SECTION 0xE800
+#define mmPRT9_MAC_CORE_SPECIAL_BASE 0x58E9E80ull
+#define PRT9_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT9_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC9_MAC_RS_FEC_BASE 0x58EA000ull
+#define NIC9_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC9_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC9_MAC_GLOB_STAT_CONTROL_REG_BASE 0x58EB000ull
+#define NIC9_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC9_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC9_MAC_GLOB_STAT_RX0_BASE 0x58EB100ull
+#define NIC9_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC9_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC9_MAC_GLOB_STAT_RX1_BASE 0x58EB18Cull
+#define NIC9_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC9_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC9_MAC_GLOB_STAT_RX2_BASE 0x58EB218ull
+#define NIC9_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC9_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC9_MAC_GLOB_STAT_RX3_BASE 0x58EB2A4ull
+#define NIC9_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC9_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC9_MAC_GLOB_STAT_TX0_BASE 0x58EB330ull
+#define NIC9_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC9_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC9_MAC_GLOB_STAT_TX1_BASE 0x58EB398ull
+#define NIC9_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC9_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC9_MAC_GLOB_STAT_TX2_BASE 0x58EB400ull
+#define NIC9_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC9_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC9_MAC_GLOB_STAT_TX3_BASE 0x58EB468ull
+#define NIC9_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC9_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC9_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x58EB800ull
+#define NIC9_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC9_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC9_MAC_CH0_MAC_PCS_BASE 0x58EC000ull
+#define NIC9_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC9_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC9_MAC_CH0_MAC_128_BASE 0x58EC400ull
+#define NIC9_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC9_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC9_MAC_CH0_MAC_AN_BASE 0x58EC800ull
+#define NIC9_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC9_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC9_MAC_CH1_MAC_PCS_BASE 0x58ED000ull
+#define NIC9_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC9_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC9_MAC_CH1_MAC_128_BASE 0x58ED400ull
+#define NIC9_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC9_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC9_MAC_CH1_MAC_AN_BASE 0x58ED800ull
+#define NIC9_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC9_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC9_MAC_CH2_MAC_PCS_BASE 0x58EE000ull
+#define NIC9_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC9_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC9_MAC_CH2_MAC_128_BASE 0x58EE400ull
+#define NIC9_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC9_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC9_MAC_CH2_MAC_AN_BASE 0x58EE800ull
+#define NIC9_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC9_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC9_MAC_CH3_MAC_PCS_BASE 0x58EF000ull
+#define NIC9_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC9_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC9_MAC_CH3_MAC_128_BASE 0x58EF400ull
+#define NIC9_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC9_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC9_MAC_CH3_MAC_AN_BASE 0x58EF800ull
+#define NIC9_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC9_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC10_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5900000ull
+#define NIC10_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5900080ull
+#define NIC10_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5900100ull
+#define NIC10_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5900180ull
+#define NIC10_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_0_SPECIAL_BASE 0x5900E80ull
+#define NIC10_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5901000ull
+#define NIC10_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5901080ull
+#define NIC10_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5901100ull
+#define NIC10_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5901180ull
+#define NIC10_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_1_SPECIAL_BASE 0x5901E80ull
+#define NIC10_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5902000ull
+#define NIC10_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5902080ull
+#define NIC10_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5902100ull
+#define NIC10_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5902180ull
+#define NIC10_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_2_SPECIAL_BASE 0x5902E80ull
+#define NIC10_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5903000ull
+#define NIC10_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5903080ull
+#define NIC10_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5903100ull
+#define NIC10_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5903180ull
+#define NIC10_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_3_SPECIAL_BASE 0x5903E80ull
+#define NIC10_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5904000ull
+#define NIC10_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5904080ull
+#define NIC10_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5904100ull
+#define NIC10_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5904180ull
+#define NIC10_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_4_SPECIAL_BASE 0x5904E80ull
+#define NIC10_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5905000ull
+#define NIC10_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5905080ull
+#define NIC10_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5905100ull
+#define NIC10_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5905180ull
+#define NIC10_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_5_SPECIAL_BASE 0x5905E80ull
+#define NIC10_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5906000ull
+#define NIC10_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5906080ull
+#define NIC10_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5906100ull
+#define NIC10_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5906180ull
+#define NIC10_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_6_SPECIAL_BASE 0x5906E80ull
+#define NIC10_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5907000ull
+#define NIC10_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5907080ull
+#define NIC10_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5907100ull
+#define NIC10_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5907180ull
+#define NIC10_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_7_SPECIAL_BASE 0x5907E80ull
+#define NIC10_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5908000ull
+#define NIC10_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5908080ull
+#define NIC10_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5908100ull
+#define NIC10_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5908180ull
+#define NIC10_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_8_SPECIAL_BASE 0x5908E80ull
+#define NIC10_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5909000ull
+#define NIC10_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5909080ull
+#define NIC10_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5909100ull
+#define NIC10_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5909180ull
+#define NIC10_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_9_SPECIAL_BASE 0x5909E80ull
+#define NIC10_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_10_UNSECURE_DOORBELL0_BASE 0x590A000ull
+#define NIC10_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_10_UNSECURE_DOORBELL1_BASE 0x590A080ull
+#define NIC10_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x590A100ull
+#define NIC10_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x590A180ull
+#define NIC10_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_10_SPECIAL_BASE 0x590AE80ull
+#define NIC10_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_11_UNSECURE_DOORBELL0_BASE 0x590B000ull
+#define NIC10_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_11_UNSECURE_DOORBELL1_BASE 0x590B080ull
+#define NIC10_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x590B100ull
+#define NIC10_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x590B180ull
+#define NIC10_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_11_SPECIAL_BASE 0x590BE80ull
+#define NIC10_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_12_UNSECURE_DOORBELL0_BASE 0x590C000ull
+#define NIC10_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_12_UNSECURE_DOORBELL1_BASE 0x590C080ull
+#define NIC10_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x590C100ull
+#define NIC10_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x590C180ull
+#define NIC10_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_12_SPECIAL_BASE 0x590CE80ull
+#define NIC10_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_13_UNSECURE_DOORBELL0_BASE 0x590D000ull
+#define NIC10_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_13_UNSECURE_DOORBELL1_BASE 0x590D080ull
+#define NIC10_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x590D100ull
+#define NIC10_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x590D180ull
+#define NIC10_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_13_SPECIAL_BASE 0x590DE80ull
+#define NIC10_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR0_14_UNSECURE_DOORBELL0_BASE 0x590E000ull
+#define NIC10_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR0_14_UNSECURE_DOORBELL1_BASE 0x590E080ull
+#define NIC10_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x590E100ull
+#define NIC10_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x590E180ull
+#define NIC10_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR0_14_SPECIAL_BASE 0x590EE80ull
+#define NIC10_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC10_QM_DCCM0_BASE 0x5910000ull
+#define NIC10_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC10_QM_DCCM0_SECTION 0x8000
+#define mmNIC10_QM_ARC_AUX0_BASE 0x5918000ull
+#define NIC10_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC10_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC10_QM_ARC_AUX0_SPECIAL_BASE 0x5918E80ull
+#define NIC10_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC10_QM0_BASE 0x591A000ull
+#define NIC10_QM0_MAX_OFFSET 0x1000
+#define NIC10_QM0_SECTION 0x9000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x591A900ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x591A908ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x591A910ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x591A918ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x591A920ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x591A928ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x591A930ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x591A938ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x591A940ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x591A948ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x591A950ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x591A958ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x591A960ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x591A968ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x591A970ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC10_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x591A978ull
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC10_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC10_QM0_AXUSER_SECURED_BASE 0x591AB00ull
+#define NIC10_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC10_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC10_QM0_AXUSER_NONSECURED_BASE 0x591AB80ull
+#define NIC10_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC10_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC10_QM0_DBG_HBW_BASE 0x591AC00ull
+#define NIC10_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC10_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC10_QM0_DBG_LBW_BASE 0x591AC80ull
+#define NIC10_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC10_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC10_QM0_CGM_BASE 0x591AD80ull
+#define NIC10_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC10_QM0_CGM_SECTION 0x1000
+#define mmNIC10_QM0_SPECIAL_BASE 0x591AE80ull
+#define NIC10_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC10_QPC0_BASE 0x591F000ull
+#define NIC10_QPC0_MAX_OFFSET 0x1000
+#define NIC10_QPC0_SECTION 0x7200
+#define mmNIC10_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x591F720ull
+#define NIC10_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x591F728ull
+#define NIC10_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x591F730ull
+#define NIC10_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x591F738ull
+#define NIC10_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x591F740ull
+#define NIC10_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x591F748ull
+#define NIC10_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x591F750ull
+#define NIC10_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x591F758ull
+#define NIC10_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x591F760ull
+#define NIC10_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x591F768ull
+#define NIC10_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x591F770ull
+#define NIC10_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x591F778ull
+#define NIC10_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x591F780ull
+#define NIC10_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x591F788ull
+#define NIC10_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x591F790ull
+#define NIC10_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x591F798ull
+#define NIC10_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x591F7A0ull
+#define NIC10_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x591F7A8ull
+#define NIC10_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x591F7B0ull
+#define NIC10_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x591F7B8ull
+#define NIC10_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x591F7C0ull
+#define NIC10_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x591F7C8ull
+#define NIC10_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x591F7D0ull
+#define NIC10_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x591F7D8ull
+#define NIC10_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x591F7E0ull
+#define NIC10_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x591F7E8ull
+#define NIC10_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x591F7F0ull
+#define NIC10_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x591F7F8ull
+#define NIC10_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x591F800ull
+#define NIC10_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x591F808ull
+#define NIC10_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x591F810ull
+#define NIC10_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x591F818ull
+#define NIC10_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC10_QPC0_AXUSER_CONG_QUE_BASE 0x591FB80ull
+#define NIC10_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC10_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC10_QPC0_AXUSER_RXWQE_BASE 0x591FBE0ull
+#define NIC10_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC10_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC10_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x591FC40ull
+#define NIC10_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC10_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC10_QPC0_AXUSER_DB_FIFO_BASE 0x591FCA0ull
+#define NIC10_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC10_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC10_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x591FD00ull
+#define NIC10_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC10_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC10_QPC0_AXUSER_ERR_FIFO_BASE 0x591FD60ull
+#define NIC10_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC10_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC10_QPC0_AXUSER_QPC_RESP_BASE 0x591FDC0ull
+#define NIC10_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC10_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC10_QPC0_AXUSER_QPC_REQ_BASE 0x591FE20ull
+#define NIC10_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC10_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC10_QPC0_SPECIAL_BASE 0x591FE80ull
+#define NIC10_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_0_UNSECURE_DOORBELL0_BASE 0x5920000ull
+#define NIC10_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_0_UNSECURE_DOORBELL1_BASE 0x5920080ull
+#define NIC10_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x5920100ull
+#define NIC10_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x5920180ull
+#define NIC10_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_0_SPECIAL_BASE 0x5920E80ull
+#define NIC10_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_1_UNSECURE_DOORBELL0_BASE 0x5921000ull
+#define NIC10_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_1_UNSECURE_DOORBELL1_BASE 0x5921080ull
+#define NIC10_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x5921100ull
+#define NIC10_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x5921180ull
+#define NIC10_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_1_SPECIAL_BASE 0x5921E80ull
+#define NIC10_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_2_UNSECURE_DOORBELL0_BASE 0x5922000ull
+#define NIC10_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_2_UNSECURE_DOORBELL1_BASE 0x5922080ull
+#define NIC10_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x5922100ull
+#define NIC10_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x5922180ull
+#define NIC10_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_2_SPECIAL_BASE 0x5922E80ull
+#define NIC10_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_3_UNSECURE_DOORBELL0_BASE 0x5923000ull
+#define NIC10_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_3_UNSECURE_DOORBELL1_BASE 0x5923080ull
+#define NIC10_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x5923100ull
+#define NIC10_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x5923180ull
+#define NIC10_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_3_SPECIAL_BASE 0x5923E80ull
+#define NIC10_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_4_UNSECURE_DOORBELL0_BASE 0x5924000ull
+#define NIC10_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_4_UNSECURE_DOORBELL1_BASE 0x5924080ull
+#define NIC10_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x5924100ull
+#define NIC10_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x5924180ull
+#define NIC10_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_4_SPECIAL_BASE 0x5924E80ull
+#define NIC10_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_5_UNSECURE_DOORBELL0_BASE 0x5925000ull
+#define NIC10_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_5_UNSECURE_DOORBELL1_BASE 0x5925080ull
+#define NIC10_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x5925100ull
+#define NIC10_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x5925180ull
+#define NIC10_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_5_SPECIAL_BASE 0x5925E80ull
+#define NIC10_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_6_UNSECURE_DOORBELL0_BASE 0x5926000ull
+#define NIC10_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_6_UNSECURE_DOORBELL1_BASE 0x5926080ull
+#define NIC10_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x5926100ull
+#define NIC10_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x5926180ull
+#define NIC10_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_6_SPECIAL_BASE 0x5926E80ull
+#define NIC10_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_7_UNSECURE_DOORBELL0_BASE 0x5927000ull
+#define NIC10_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_7_UNSECURE_DOORBELL1_BASE 0x5927080ull
+#define NIC10_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x5927100ull
+#define NIC10_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x5927180ull
+#define NIC10_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_7_SPECIAL_BASE 0x5927E80ull
+#define NIC10_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_8_UNSECURE_DOORBELL0_BASE 0x5928000ull
+#define NIC10_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_8_UNSECURE_DOORBELL1_BASE 0x5928080ull
+#define NIC10_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x5928100ull
+#define NIC10_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x5928180ull
+#define NIC10_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_8_SPECIAL_BASE 0x5928E80ull
+#define NIC10_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_9_UNSECURE_DOORBELL0_BASE 0x5929000ull
+#define NIC10_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_9_UNSECURE_DOORBELL1_BASE 0x5929080ull
+#define NIC10_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x5929100ull
+#define NIC10_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x5929180ull
+#define NIC10_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_9_SPECIAL_BASE 0x5929E80ull
+#define NIC10_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_10_UNSECURE_DOORBELL0_BASE 0x592A000ull
+#define NIC10_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_10_UNSECURE_DOORBELL1_BASE 0x592A080ull
+#define NIC10_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x592A100ull
+#define NIC10_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x592A180ull
+#define NIC10_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_10_SPECIAL_BASE 0x592AE80ull
+#define NIC10_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_11_UNSECURE_DOORBELL0_BASE 0x592B000ull
+#define NIC10_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_11_UNSECURE_DOORBELL1_BASE 0x592B080ull
+#define NIC10_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x592B100ull
+#define NIC10_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x592B180ull
+#define NIC10_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_11_SPECIAL_BASE 0x592BE80ull
+#define NIC10_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_12_UNSECURE_DOORBELL0_BASE 0x592C000ull
+#define NIC10_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_12_UNSECURE_DOORBELL1_BASE 0x592C080ull
+#define NIC10_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x592C100ull
+#define NIC10_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x592C180ull
+#define NIC10_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_12_SPECIAL_BASE 0x592CE80ull
+#define NIC10_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_13_UNSECURE_DOORBELL0_BASE 0x592D000ull
+#define NIC10_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_13_UNSECURE_DOORBELL1_BASE 0x592D080ull
+#define NIC10_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x592D100ull
+#define NIC10_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x592D180ull
+#define NIC10_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_13_SPECIAL_BASE 0x592DE80ull
+#define NIC10_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC10_UMR1_14_UNSECURE_DOORBELL0_BASE 0x592E000ull
+#define NIC10_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC10_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC10_UMR1_14_UNSECURE_DOORBELL1_BASE 0x592E080ull
+#define NIC10_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC10_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC10_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x592E100ull
+#define NIC10_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC10_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC10_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x592E180ull
+#define NIC10_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC10_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC10_UMR1_14_SPECIAL_BASE 0x592EE80ull
+#define NIC10_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC10_QM_DCCM1_BASE 0x5930000ull
+#define NIC10_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC10_QM_DCCM1_SECTION 0x8000
+#define mmNIC10_QM_ARC_AUX1_BASE 0x5938000ull
+#define NIC10_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC10_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC10_QM_ARC_AUX1_SPECIAL_BASE 0x5938E80ull
+#define NIC10_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC10_QM1_BASE 0x593A000ull
+#define NIC10_QM1_MAX_OFFSET 0x1000
+#define NIC10_QM1_SECTION 0x9000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x593A900ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x593A908ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x593A910ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x593A918ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x593A920ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x593A928ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x593A930ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x593A938ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x593A940ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x593A948ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x593A950ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x593A958ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x593A960ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x593A968ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x593A970ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC10_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x593A978ull
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC10_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC10_QM1_AXUSER_SECURED_BASE 0x593AB00ull
+#define NIC10_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC10_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC10_QM1_AXUSER_NONSECURED_BASE 0x593AB80ull
+#define NIC10_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC10_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC10_QM1_DBG_HBW_BASE 0x593AC00ull
+#define NIC10_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC10_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC10_QM1_DBG_LBW_BASE 0x593AC80ull
+#define NIC10_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC10_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC10_QM1_CGM_BASE 0x593AD80ull
+#define NIC10_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC10_QM1_CGM_SECTION 0x1000
+#define mmNIC10_QM1_SPECIAL_BASE 0x593AE80ull
+#define NIC10_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC10_QPC1_BASE 0x593F000ull
+#define NIC10_QPC1_MAX_OFFSET 0x1000
+#define NIC10_QPC1_SECTION 0x7200
+#define mmNIC10_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x593F720ull
+#define NIC10_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x593F728ull
+#define NIC10_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x593F730ull
+#define NIC10_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x593F738ull
+#define NIC10_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x593F740ull
+#define NIC10_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x593F748ull
+#define NIC10_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x593F750ull
+#define NIC10_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x593F758ull
+#define NIC10_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x593F760ull
+#define NIC10_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x593F768ull
+#define NIC10_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x593F770ull
+#define NIC10_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x593F778ull
+#define NIC10_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x593F780ull
+#define NIC10_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x593F788ull
+#define NIC10_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x593F790ull
+#define NIC10_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x593F798ull
+#define NIC10_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x593F7A0ull
+#define NIC10_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x593F7A8ull
+#define NIC10_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x593F7B0ull
+#define NIC10_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x593F7B8ull
+#define NIC10_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x593F7C0ull
+#define NIC10_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x593F7C8ull
+#define NIC10_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x593F7D0ull
+#define NIC10_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x593F7D8ull
+#define NIC10_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x593F7E0ull
+#define NIC10_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x593F7E8ull
+#define NIC10_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x593F7F0ull
+#define NIC10_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x593F7F8ull
+#define NIC10_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x593F800ull
+#define NIC10_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x593F808ull
+#define NIC10_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x593F810ull
+#define NIC10_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC10_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x593F818ull
+#define NIC10_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC10_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC10_QPC1_AXUSER_CONG_QUE_BASE 0x593FB80ull
+#define NIC10_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC10_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC10_QPC1_AXUSER_RXWQE_BASE 0x593FBE0ull
+#define NIC10_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC10_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC10_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x593FC40ull
+#define NIC10_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC10_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC10_QPC1_AXUSER_DB_FIFO_BASE 0x593FCA0ull
+#define NIC10_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC10_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC10_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x593FD00ull
+#define NIC10_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC10_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC10_QPC1_AXUSER_ERR_FIFO_BASE 0x593FD60ull
+#define NIC10_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC10_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC10_QPC1_AXUSER_QPC_RESP_BASE 0x593FDC0ull
+#define NIC10_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC10_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC10_QPC1_AXUSER_QPC_REQ_BASE 0x593FE20ull
+#define NIC10_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC10_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC10_QPC1_SPECIAL_BASE 0x593FE80ull
+#define NIC10_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC10_TMR_BASE 0x5948000ull
+#define NIC10_TMR_MAX_OFFSET 0x1000
+#define NIC10_TMR_SECTION 0xD600
+#define mmNIC10_TMR_AXUSER_TMR_FREE_LIST_BASE 0x5948D60ull
+#define NIC10_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC10_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC10_TMR_AXUSER_TMR_FIFO_BASE 0x5948DC0ull
+#define NIC10_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC10_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC10_TMR_AXUSER_TMR_FSM_BASE 0x5948E20ull
+#define NIC10_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC10_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC10_TMR_SPECIAL_BASE 0x5948E80ull
+#define NIC10_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC10_RXB_CORE_BASE 0x5949000ull
+#define NIC10_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC10_RXB_CORE_SECTION 0x6100
+#define mmNIC10_RXB_CORE_SCT_AWUSER_BASE 0x5949610ull
+#define NIC10_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC10_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC10_RXB_CORE_SPECIAL_BASE 0x5949E80ull
+#define NIC10_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC10_RXE0_BASE 0x594A000ull
+#define NIC10_RXE0_MAX_OFFSET 0x1000
+#define NIC10_RXE0_SECTION 0x9000
+#define mmNIC10_RXE0_WQE_ARUSER_BASE 0x594A900ull
+#define NIC10_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC10_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC10_RXE0_SPECIAL_BASE 0x594AE80ull
+#define NIC10_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC10_RXE1_BASE 0x594B000ull
+#define NIC10_RXE1_MAX_OFFSET 0x1000
+#define NIC10_RXE1_SECTION 0x9000
+#define mmNIC10_RXE1_WQE_ARUSER_BASE 0x594B900ull
+#define NIC10_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC10_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC10_RXE1_SPECIAL_BASE 0x594BE80ull
+#define NIC10_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ0_BASE 0x594C000ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ1_BASE 0x594C050ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ2_BASE 0x594C0A0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ3_BASE 0x594C0F0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ4_BASE 0x594C140ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ5_BASE 0x594C190ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ6_BASE 0x594C1E0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ7_BASE 0x594C230ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ8_BASE 0x594C280ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ9_BASE 0x594C2D0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ10_BASE 0x594C320ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ11_BASE 0x594C370ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ12_BASE 0x594C3C0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ13_BASE 0x594C410ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ14_BASE 0x594C460ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ15_BASE 0x594C4B0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ16_BASE 0x594C500ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ17_BASE 0x594C550ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ18_BASE 0x594C5A0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ19_BASE 0x594C5F0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ20_BASE 0x594C640ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ21_BASE 0x594C690ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ22_BASE 0x594C6E0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ23_BASE 0x594C730ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ24_BASE 0x594C780ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ25_BASE 0x594C7D0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ26_BASE 0x594C820ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ27_BASE 0x594C870ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ28_BASE 0x594C8C0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ29_BASE 0x594C910ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ30_BASE 0x594C960ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC10_RXE0_AXUSER_AXUSER_CQ31_BASE 0x594C9B0ull
+#define NIC10_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC10_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC10_RXE0_AXUSER_SPECIAL_BASE 0x594CE80ull
+#define NIC10_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ0_BASE 0x594D000ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ1_BASE 0x594D050ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ2_BASE 0x594D0A0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ3_BASE 0x594D0F0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ4_BASE 0x594D140ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ5_BASE 0x594D190ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ6_BASE 0x594D1E0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ7_BASE 0x594D230ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ8_BASE 0x594D280ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ9_BASE 0x594D2D0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ10_BASE 0x594D320ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ11_BASE 0x594D370ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ12_BASE 0x594D3C0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ13_BASE 0x594D410ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ14_BASE 0x594D460ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ15_BASE 0x594D4B0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ16_BASE 0x594D500ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ17_BASE 0x594D550ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ18_BASE 0x594D5A0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ19_BASE 0x594D5F0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ20_BASE 0x594D640ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ21_BASE 0x594D690ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ22_BASE 0x594D6E0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ23_BASE 0x594D730ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ24_BASE 0x594D780ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ25_BASE 0x594D7D0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ26_BASE 0x594D820ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ27_BASE 0x594D870ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ28_BASE 0x594D8C0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ29_BASE 0x594D910ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ30_BASE 0x594D960ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC10_RXE1_AXUSER_AXUSER_CQ31_BASE 0x594D9B0ull
+#define NIC10_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC10_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC10_RXE1_AXUSER_SPECIAL_BASE 0x594DE80ull
+#define NIC10_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC10_TXS0_BASE 0x5950000ull
+#define NIC10_TXS0_MAX_OFFSET 0x1000
+#define NIC10_TXS0_SECTION 0xE800
+#define mmNIC10_TXS0_SPECIAL_BASE 0x5950E80ull
+#define NIC10_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC10_TXS1_BASE 0x5951000ull
+#define NIC10_TXS1_MAX_OFFSET 0x1000
+#define NIC10_TXS1_SECTION 0xE800
+#define mmNIC10_TXS1_SPECIAL_BASE 0x5951E80ull
+#define NIC10_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC10_TXE0_BASE 0x5952000ull
+#define NIC10_TXE0_MAX_OFFSET 0x1000
+#define NIC10_TXE0_SECTION 0xE800
+#define mmNIC10_TXE0_SPECIAL_BASE 0x5952E80ull
+#define NIC10_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC10_TXE1_BASE 0x5953000ull
+#define NIC10_TXE1_MAX_OFFSET 0x1000
+#define NIC10_TXE1_SECTION 0xE800
+#define mmNIC10_TXE1_SPECIAL_BASE 0x5953E80ull
+#define NIC10_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC10_TXB_BASE 0x5954000ull
+#define NIC10_TXB_MAX_OFFSET 0x1000
+#define NIC10_TXB_SECTION 0xE800
+#define mmNIC10_TXB_SPECIAL_BASE 0x5954E80ull
+#define NIC10_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC10_MSTR_IF_RR_SHRD_HBW_BASE 0x5955000ull
+#define NIC10_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC10_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC10_MSTR_IF_RR_PRVT_HBW_BASE 0x5955200ull
+#define NIC10_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC10_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC10_MSTR_IF_RR_SHRD_LBW_BASE 0x5955400ull
+#define NIC10_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC10_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC10_MSTR_IF_RR_PRVT_LBW_BASE 0x5955600ull
+#define NIC10_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC10_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC10_MSTR_IF_E2E_CRDT_BASE 0x5955800ull
+#define NIC10_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC10_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC10_MSTR_IF_AXUSER_BASE 0x5955A80ull
+#define NIC10_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC10_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC10_MSTR_IF_DBG_HBW_BASE 0x5955B00ull
+#define NIC10_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC10_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC10_MSTR_IF_DBG_LBW_BASE 0x5955B80ull
+#define NIC10_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC10_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC10_MSTR_IF_CORE_HBW_BASE 0x5955C00ull
+#define NIC10_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC10_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC10_MSTR_IF_CORE_LBW_BASE 0x5955D80ull
+#define NIC10_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC10_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC10_MSTR_IF_SPECIAL_BASE 0x5955E80ull
+#define NIC10_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC10_TX_AXUSER_BASE 0x5956000ull
+#define NIC10_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC10_TX_AXUSER_SECTION 0x2000
+#define mmNIC10_SERDES0_BASE 0x5958000ull
+#define NIC10_SERDES0_MAX_OFFSET 0x3E40
+#define NIC10_SERDES0_SECTION 0x4000
+#define mmNIC10_SERDES1_BASE 0x595C000ull
+#define NIC10_SERDES1_MAX_OFFSET 0x3E40
+#define NIC10_SERDES1_SECTION 0x4000
+#define mmNIC10_PHY_BASE 0x5960000ull
+#define NIC10_PHY_MAX_OFFSET 0x1000
+#define NIC10_PHY_SECTION 0xE800
+#define mmNIC10_PHY_SPECIAL_BASE 0x5960E80ull
+#define NIC10_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC10_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT10_MAC_AUX_BASE 0x5968000ull
+#define PRT10_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT10_MAC_AUX_SECTION 0xE800
+#define mmPRT10_MAC_AUX_SPECIAL_BASE 0x5968E80ull
+#define PRT10_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT10_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT10_MAC_CORE_BASE 0x5969000ull
+#define PRT10_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT10_MAC_CORE_SECTION 0xE800
+#define mmPRT10_MAC_CORE_SPECIAL_BASE 0x5969E80ull
+#define PRT10_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT10_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC10_MAC_RS_FEC_BASE 0x596A000ull
+#define NIC10_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC10_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC10_MAC_GLOB_STAT_CONTROL_REG_BASE 0x596B000ull
+#define NIC10_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC10_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC10_MAC_GLOB_STAT_RX0_BASE 0x596B100ull
+#define NIC10_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC10_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC10_MAC_GLOB_STAT_RX1_BASE 0x596B18Cull
+#define NIC10_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC10_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC10_MAC_GLOB_STAT_RX2_BASE 0x596B218ull
+#define NIC10_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC10_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC10_MAC_GLOB_STAT_RX3_BASE 0x596B2A4ull
+#define NIC10_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC10_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC10_MAC_GLOB_STAT_TX0_BASE 0x596B330ull
+#define NIC10_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC10_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC10_MAC_GLOB_STAT_TX1_BASE 0x596B398ull
+#define NIC10_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC10_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC10_MAC_GLOB_STAT_TX2_BASE 0x596B400ull
+#define NIC10_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC10_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC10_MAC_GLOB_STAT_TX3_BASE 0x596B468ull
+#define NIC10_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC10_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC10_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x596B800ull
+#define NIC10_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC10_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC10_MAC_CH0_MAC_PCS_BASE 0x596C000ull
+#define NIC10_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC10_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC10_MAC_CH0_MAC_128_BASE 0x596C400ull
+#define NIC10_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC10_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC10_MAC_CH0_MAC_AN_BASE 0x596C800ull
+#define NIC10_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC10_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC10_MAC_CH1_MAC_PCS_BASE 0x596D000ull
+#define NIC10_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC10_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC10_MAC_CH1_MAC_128_BASE 0x596D400ull
+#define NIC10_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC10_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC10_MAC_CH1_MAC_AN_BASE 0x596D800ull
+#define NIC10_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC10_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC10_MAC_CH2_MAC_PCS_BASE 0x596E000ull
+#define NIC10_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC10_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC10_MAC_CH2_MAC_128_BASE 0x596E400ull
+#define NIC10_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC10_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC10_MAC_CH2_MAC_AN_BASE 0x596E800ull
+#define NIC10_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC10_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC10_MAC_CH3_MAC_PCS_BASE 0x596F000ull
+#define NIC10_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC10_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC10_MAC_CH3_MAC_128_BASE 0x596F400ull
+#define NIC10_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC10_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC10_MAC_CH3_MAC_AN_BASE 0x596F800ull
+#define NIC10_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC10_MAC_CH3_MAC_AN_SECTION 0x10800
+#define mmNIC11_UMR0_0_UNSECURE_DOORBELL0_BASE 0x5980000ull
+#define NIC11_UMR0_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_0_UNSECURE_DOORBELL1_BASE 0x5980080ull
+#define NIC11_UMR0_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_0_COMPLETION_QUEUE_CI_0_BASE 0x5980100ull
+#define NIC11_UMR0_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_0_COMPLETION_QUEUE_CI_1_BASE 0x5980180ull
+#define NIC11_UMR0_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_0_SPECIAL_BASE 0x5980E80ull
+#define NIC11_UMR0_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_0_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_1_UNSECURE_DOORBELL0_BASE 0x5981000ull
+#define NIC11_UMR0_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_1_UNSECURE_DOORBELL1_BASE 0x5981080ull
+#define NIC11_UMR0_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_1_COMPLETION_QUEUE_CI_0_BASE 0x5981100ull
+#define NIC11_UMR0_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_1_COMPLETION_QUEUE_CI_1_BASE 0x5981180ull
+#define NIC11_UMR0_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_1_SPECIAL_BASE 0x5981E80ull
+#define NIC11_UMR0_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_1_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_2_UNSECURE_DOORBELL0_BASE 0x5982000ull
+#define NIC11_UMR0_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_2_UNSECURE_DOORBELL1_BASE 0x5982080ull
+#define NIC11_UMR0_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_2_COMPLETION_QUEUE_CI_0_BASE 0x5982100ull
+#define NIC11_UMR0_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_2_COMPLETION_QUEUE_CI_1_BASE 0x5982180ull
+#define NIC11_UMR0_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_2_SPECIAL_BASE 0x5982E80ull
+#define NIC11_UMR0_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_2_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_3_UNSECURE_DOORBELL0_BASE 0x5983000ull
+#define NIC11_UMR0_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_3_UNSECURE_DOORBELL1_BASE 0x5983080ull
+#define NIC11_UMR0_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_3_COMPLETION_QUEUE_CI_0_BASE 0x5983100ull
+#define NIC11_UMR0_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_3_COMPLETION_QUEUE_CI_1_BASE 0x5983180ull
+#define NIC11_UMR0_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_3_SPECIAL_BASE 0x5983E80ull
+#define NIC11_UMR0_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_3_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_4_UNSECURE_DOORBELL0_BASE 0x5984000ull
+#define NIC11_UMR0_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_4_UNSECURE_DOORBELL1_BASE 0x5984080ull
+#define NIC11_UMR0_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_4_COMPLETION_QUEUE_CI_0_BASE 0x5984100ull
+#define NIC11_UMR0_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_4_COMPLETION_QUEUE_CI_1_BASE 0x5984180ull
+#define NIC11_UMR0_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_4_SPECIAL_BASE 0x5984E80ull
+#define NIC11_UMR0_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_4_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_5_UNSECURE_DOORBELL0_BASE 0x5985000ull
+#define NIC11_UMR0_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_5_UNSECURE_DOORBELL1_BASE 0x5985080ull
+#define NIC11_UMR0_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_5_COMPLETION_QUEUE_CI_0_BASE 0x5985100ull
+#define NIC11_UMR0_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_5_COMPLETION_QUEUE_CI_1_BASE 0x5985180ull
+#define NIC11_UMR0_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_5_SPECIAL_BASE 0x5985E80ull
+#define NIC11_UMR0_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_5_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_6_UNSECURE_DOORBELL0_BASE 0x5986000ull
+#define NIC11_UMR0_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_6_UNSECURE_DOORBELL1_BASE 0x5986080ull
+#define NIC11_UMR0_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_6_COMPLETION_QUEUE_CI_0_BASE 0x5986100ull
+#define NIC11_UMR0_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_6_COMPLETION_QUEUE_CI_1_BASE 0x5986180ull
+#define NIC11_UMR0_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_6_SPECIAL_BASE 0x5986E80ull
+#define NIC11_UMR0_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_6_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_7_UNSECURE_DOORBELL0_BASE 0x5987000ull
+#define NIC11_UMR0_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_7_UNSECURE_DOORBELL1_BASE 0x5987080ull
+#define NIC11_UMR0_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_7_COMPLETION_QUEUE_CI_0_BASE 0x5987100ull
+#define NIC11_UMR0_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_7_COMPLETION_QUEUE_CI_1_BASE 0x5987180ull
+#define NIC11_UMR0_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_7_SPECIAL_BASE 0x5987E80ull
+#define NIC11_UMR0_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_7_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_8_UNSECURE_DOORBELL0_BASE 0x5988000ull
+#define NIC11_UMR0_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_8_UNSECURE_DOORBELL1_BASE 0x5988080ull
+#define NIC11_UMR0_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_8_COMPLETION_QUEUE_CI_0_BASE 0x5988100ull
+#define NIC11_UMR0_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_8_COMPLETION_QUEUE_CI_1_BASE 0x5988180ull
+#define NIC11_UMR0_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_8_SPECIAL_BASE 0x5988E80ull
+#define NIC11_UMR0_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_8_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_9_UNSECURE_DOORBELL0_BASE 0x5989000ull
+#define NIC11_UMR0_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_9_UNSECURE_DOORBELL1_BASE 0x5989080ull
+#define NIC11_UMR0_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_9_COMPLETION_QUEUE_CI_0_BASE 0x5989100ull
+#define NIC11_UMR0_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_9_COMPLETION_QUEUE_CI_1_BASE 0x5989180ull
+#define NIC11_UMR0_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_9_SPECIAL_BASE 0x5989E80ull
+#define NIC11_UMR0_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_9_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_10_UNSECURE_DOORBELL0_BASE 0x598A000ull
+#define NIC11_UMR0_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_10_UNSECURE_DOORBELL1_BASE 0x598A080ull
+#define NIC11_UMR0_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_10_COMPLETION_QUEUE_CI_0_BASE 0x598A100ull
+#define NIC11_UMR0_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_10_COMPLETION_QUEUE_CI_1_BASE 0x598A180ull
+#define NIC11_UMR0_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_10_SPECIAL_BASE 0x598AE80ull
+#define NIC11_UMR0_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_10_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_11_UNSECURE_DOORBELL0_BASE 0x598B000ull
+#define NIC11_UMR0_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_11_UNSECURE_DOORBELL1_BASE 0x598B080ull
+#define NIC11_UMR0_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_11_COMPLETION_QUEUE_CI_0_BASE 0x598B100ull
+#define NIC11_UMR0_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_11_COMPLETION_QUEUE_CI_1_BASE 0x598B180ull
+#define NIC11_UMR0_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_11_SPECIAL_BASE 0x598BE80ull
+#define NIC11_UMR0_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_11_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_12_UNSECURE_DOORBELL0_BASE 0x598C000ull
+#define NIC11_UMR0_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_12_UNSECURE_DOORBELL1_BASE 0x598C080ull
+#define NIC11_UMR0_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_12_COMPLETION_QUEUE_CI_0_BASE 0x598C100ull
+#define NIC11_UMR0_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_12_COMPLETION_QUEUE_CI_1_BASE 0x598C180ull
+#define NIC11_UMR0_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_12_SPECIAL_BASE 0x598CE80ull
+#define NIC11_UMR0_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_12_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_13_UNSECURE_DOORBELL0_BASE 0x598D000ull
+#define NIC11_UMR0_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_13_UNSECURE_DOORBELL1_BASE 0x598D080ull
+#define NIC11_UMR0_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_13_COMPLETION_QUEUE_CI_0_BASE 0x598D100ull
+#define NIC11_UMR0_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_13_COMPLETION_QUEUE_CI_1_BASE 0x598D180ull
+#define NIC11_UMR0_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_13_SPECIAL_BASE 0x598DE80ull
+#define NIC11_UMR0_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_13_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR0_14_UNSECURE_DOORBELL0_BASE 0x598E000ull
+#define NIC11_UMR0_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR0_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR0_14_UNSECURE_DOORBELL1_BASE 0x598E080ull
+#define NIC11_UMR0_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR0_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR0_14_COMPLETION_QUEUE_CI_0_BASE 0x598E100ull
+#define NIC11_UMR0_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR0_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR0_14_COMPLETION_QUEUE_CI_1_BASE 0x598E180ull
+#define NIC11_UMR0_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR0_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR0_14_SPECIAL_BASE 0x598EE80ull
+#define NIC11_UMR0_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR0_14_SPECIAL_SECTION 0x1180
+#define mmNIC11_QM_DCCM0_BASE 0x5990000ull
+#define NIC11_QM_DCCM0_MAX_OFFSET 0x4000
+#define NIC11_QM_DCCM0_SECTION 0x8000
+#define mmNIC11_QM_ARC_AUX0_BASE 0x5998000ull
+#define NIC11_QM_ARC_AUX0_MAX_OFFSET 0x1000
+#define NIC11_QM_ARC_AUX0_SECTION 0xE800
+#define mmNIC11_QM_ARC_AUX0_SPECIAL_BASE 0x5998E80ull
+#define NIC11_QM_ARC_AUX0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_QM_ARC_AUX0_SPECIAL_SECTION 0x1180
+#define mmNIC11_QM0_BASE 0x599A000ull
+#define NIC11_QM0_MAX_OFFSET 0x1000
+#define NIC11_QM0_SECTION 0x9000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR0_BASE 0x599A900ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR1_BASE 0x599A908ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR2_BASE 0x599A910ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR3_BASE 0x599A918ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR4_BASE 0x599A920ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR5_BASE 0x599A928ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR6_BASE 0x599A930ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR7_BASE 0x599A938ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR8_BASE 0x599A940ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR9_BASE 0x599A948ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR10_BASE 0x599A950ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR11_BASE 0x599A958ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR12_BASE 0x599A960ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR13_BASE 0x599A968ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR14_BASE 0x599A970ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC11_QM0_QMAN_WR64_BASE_ADDR15_BASE 0x599A978ull
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC11_QM0_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC11_QM0_AXUSER_SECURED_BASE 0x599AB00ull
+#define NIC11_QM0_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC11_QM0_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC11_QM0_AXUSER_NONSECURED_BASE 0x599AB80ull
+#define NIC11_QM0_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC11_QM0_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC11_QM0_DBG_HBW_BASE 0x599AC00ull
+#define NIC11_QM0_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC11_QM0_DBG_HBW_SECTION 0x8000
+#define mmNIC11_QM0_DBG_LBW_BASE 0x599AC80ull
+#define NIC11_QM0_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC11_QM0_DBG_LBW_SECTION 0x1000
+#define mmNIC11_QM0_CGM_BASE 0x599AD80ull
+#define NIC11_QM0_CGM_MAX_OFFSET 0xC000
+#define NIC11_QM0_CGM_SECTION 0x1000
+#define mmNIC11_QM0_SPECIAL_BASE 0x599AE80ull
+#define NIC11_QM0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_QM0_SPECIAL_SECTION 0x4180
+#define mmNIC11_QPC0_BASE 0x599F000ull
+#define NIC11_QPC0_MAX_OFFSET 0x1000
+#define NIC11_QPC0_SECTION 0x7200
+#define mmNIC11_QPC0_DBFIFO0_CI_UPD_ADDR_BASE 0x599F720ull
+#define NIC11_QPC0_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO1_CI_UPD_ADDR_BASE 0x599F728ull
+#define NIC11_QPC0_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO2_CI_UPD_ADDR_BASE 0x599F730ull
+#define NIC11_QPC0_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO3_CI_UPD_ADDR_BASE 0x599F738ull
+#define NIC11_QPC0_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO4_CI_UPD_ADDR_BASE 0x599F740ull
+#define NIC11_QPC0_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO5_CI_UPD_ADDR_BASE 0x599F748ull
+#define NIC11_QPC0_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO6_CI_UPD_ADDR_BASE 0x599F750ull
+#define NIC11_QPC0_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO7_CI_UPD_ADDR_BASE 0x599F758ull
+#define NIC11_QPC0_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO8_CI_UPD_ADDR_BASE 0x599F760ull
+#define NIC11_QPC0_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO9_CI_UPD_ADDR_BASE 0x599F768ull
+#define NIC11_QPC0_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO10_CI_UPD_ADDR_BASE 0x599F770ull
+#define NIC11_QPC0_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO11_CI_UPD_ADDR_BASE 0x599F778ull
+#define NIC11_QPC0_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO12_CI_UPD_ADDR_BASE 0x599F780ull
+#define NIC11_QPC0_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO13_CI_UPD_ADDR_BASE 0x599F788ull
+#define NIC11_QPC0_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO14_CI_UPD_ADDR_BASE 0x599F790ull
+#define NIC11_QPC0_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO15_CI_UPD_ADDR_BASE 0x599F798ull
+#define NIC11_QPC0_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO16_CI_UPD_ADDR_BASE 0x599F7A0ull
+#define NIC11_QPC0_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO17_CI_UPD_ADDR_BASE 0x599F7A8ull
+#define NIC11_QPC0_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO18_CI_UPD_ADDR_BASE 0x599F7B0ull
+#define NIC11_QPC0_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO19_CI_UPD_ADDR_BASE 0x599F7B8ull
+#define NIC11_QPC0_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO20_CI_UPD_ADDR_BASE 0x599F7C0ull
+#define NIC11_QPC0_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO21_CI_UPD_ADDR_BASE 0x599F7C8ull
+#define NIC11_QPC0_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO22_CI_UPD_ADDR_BASE 0x599F7D0ull
+#define NIC11_QPC0_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO23_CI_UPD_ADDR_BASE 0x599F7D8ull
+#define NIC11_QPC0_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO24_CI_UPD_ADDR_BASE 0x599F7E0ull
+#define NIC11_QPC0_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO25_CI_UPD_ADDR_BASE 0x599F7E8ull
+#define NIC11_QPC0_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO26_CI_UPD_ADDR_BASE 0x599F7F0ull
+#define NIC11_QPC0_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO27_CI_UPD_ADDR_BASE 0x599F7F8ull
+#define NIC11_QPC0_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO28_CI_UPD_ADDR_BASE 0x599F800ull
+#define NIC11_QPC0_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFO29_CI_UPD_ADDR_BASE 0x599F808ull
+#define NIC11_QPC0_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x599F810ull
+#define NIC11_QPC0_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x599F818ull
+#define NIC11_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC0_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC11_QPC0_AXUSER_CONG_QUE_BASE 0x599FB80ull
+#define NIC11_QPC0_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC11_QPC0_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC11_QPC0_AXUSER_RXWQE_BASE 0x599FBE0ull
+#define NIC11_QPC0_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC11_QPC0_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC11_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x599FC40ull
+#define NIC11_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC11_QPC0_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC11_QPC0_AXUSER_DB_FIFO_BASE 0x599FCA0ull
+#define NIC11_QPC0_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC11_QPC0_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC11_QPC0_AXUSER_EV_QUE_LBW_INTR_BASE 0x599FD00ull
+#define NIC11_QPC0_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC11_QPC0_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC11_QPC0_AXUSER_ERR_FIFO_BASE 0x599FD60ull
+#define NIC11_QPC0_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC11_QPC0_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC11_QPC0_AXUSER_QPC_RESP_BASE 0x599FDC0ull
+#define NIC11_QPC0_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC11_QPC0_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC11_QPC0_AXUSER_QPC_REQ_BASE 0x599FE20ull
+#define NIC11_QPC0_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC11_QPC0_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC11_QPC0_SPECIAL_BASE 0x599FE80ull
+#define NIC11_QPC0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_QPC0_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_0_UNSECURE_DOORBELL0_BASE 0x59A0000ull
+#define NIC11_UMR1_0_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_0_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_0_UNSECURE_DOORBELL1_BASE 0x59A0080ull
+#define NIC11_UMR1_0_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_0_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_0_COMPLETION_QUEUE_CI_0_BASE 0x59A0100ull
+#define NIC11_UMR1_0_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_0_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_0_COMPLETION_QUEUE_CI_1_BASE 0x59A0180ull
+#define NIC11_UMR1_0_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_0_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_0_SPECIAL_BASE 0x59A0E80ull
+#define NIC11_UMR1_0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_0_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_1_UNSECURE_DOORBELL0_BASE 0x59A1000ull
+#define NIC11_UMR1_1_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_1_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_1_UNSECURE_DOORBELL1_BASE 0x59A1080ull
+#define NIC11_UMR1_1_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_1_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_1_COMPLETION_QUEUE_CI_0_BASE 0x59A1100ull
+#define NIC11_UMR1_1_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_1_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_1_COMPLETION_QUEUE_CI_1_BASE 0x59A1180ull
+#define NIC11_UMR1_1_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_1_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_1_SPECIAL_BASE 0x59A1E80ull
+#define NIC11_UMR1_1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_1_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_2_UNSECURE_DOORBELL0_BASE 0x59A2000ull
+#define NIC11_UMR1_2_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_2_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_2_UNSECURE_DOORBELL1_BASE 0x59A2080ull
+#define NIC11_UMR1_2_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_2_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_2_COMPLETION_QUEUE_CI_0_BASE 0x59A2100ull
+#define NIC11_UMR1_2_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_2_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_2_COMPLETION_QUEUE_CI_1_BASE 0x59A2180ull
+#define NIC11_UMR1_2_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_2_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_2_SPECIAL_BASE 0x59A2E80ull
+#define NIC11_UMR1_2_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_2_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_3_UNSECURE_DOORBELL0_BASE 0x59A3000ull
+#define NIC11_UMR1_3_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_3_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_3_UNSECURE_DOORBELL1_BASE 0x59A3080ull
+#define NIC11_UMR1_3_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_3_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_3_COMPLETION_QUEUE_CI_0_BASE 0x59A3100ull
+#define NIC11_UMR1_3_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_3_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_3_COMPLETION_QUEUE_CI_1_BASE 0x59A3180ull
+#define NIC11_UMR1_3_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_3_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_3_SPECIAL_BASE 0x59A3E80ull
+#define NIC11_UMR1_3_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_3_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_4_UNSECURE_DOORBELL0_BASE 0x59A4000ull
+#define NIC11_UMR1_4_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_4_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_4_UNSECURE_DOORBELL1_BASE 0x59A4080ull
+#define NIC11_UMR1_4_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_4_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_4_COMPLETION_QUEUE_CI_0_BASE 0x59A4100ull
+#define NIC11_UMR1_4_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_4_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_4_COMPLETION_QUEUE_CI_1_BASE 0x59A4180ull
+#define NIC11_UMR1_4_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_4_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_4_SPECIAL_BASE 0x59A4E80ull
+#define NIC11_UMR1_4_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_4_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_5_UNSECURE_DOORBELL0_BASE 0x59A5000ull
+#define NIC11_UMR1_5_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_5_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_5_UNSECURE_DOORBELL1_BASE 0x59A5080ull
+#define NIC11_UMR1_5_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_5_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_5_COMPLETION_QUEUE_CI_0_BASE 0x59A5100ull
+#define NIC11_UMR1_5_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_5_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_5_COMPLETION_QUEUE_CI_1_BASE 0x59A5180ull
+#define NIC11_UMR1_5_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_5_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_5_SPECIAL_BASE 0x59A5E80ull
+#define NIC11_UMR1_5_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_5_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_6_UNSECURE_DOORBELL0_BASE 0x59A6000ull
+#define NIC11_UMR1_6_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_6_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_6_UNSECURE_DOORBELL1_BASE 0x59A6080ull
+#define NIC11_UMR1_6_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_6_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_6_COMPLETION_QUEUE_CI_0_BASE 0x59A6100ull
+#define NIC11_UMR1_6_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_6_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_6_COMPLETION_QUEUE_CI_1_BASE 0x59A6180ull
+#define NIC11_UMR1_6_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_6_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_6_SPECIAL_BASE 0x59A6E80ull
+#define NIC11_UMR1_6_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_6_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_7_UNSECURE_DOORBELL0_BASE 0x59A7000ull
+#define NIC11_UMR1_7_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_7_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_7_UNSECURE_DOORBELL1_BASE 0x59A7080ull
+#define NIC11_UMR1_7_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_7_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_7_COMPLETION_QUEUE_CI_0_BASE 0x59A7100ull
+#define NIC11_UMR1_7_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_7_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_7_COMPLETION_QUEUE_CI_1_BASE 0x59A7180ull
+#define NIC11_UMR1_7_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_7_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_7_SPECIAL_BASE 0x59A7E80ull
+#define NIC11_UMR1_7_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_7_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_8_UNSECURE_DOORBELL0_BASE 0x59A8000ull
+#define NIC11_UMR1_8_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_8_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_8_UNSECURE_DOORBELL1_BASE 0x59A8080ull
+#define NIC11_UMR1_8_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_8_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_8_COMPLETION_QUEUE_CI_0_BASE 0x59A8100ull
+#define NIC11_UMR1_8_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_8_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_8_COMPLETION_QUEUE_CI_1_BASE 0x59A8180ull
+#define NIC11_UMR1_8_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_8_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_8_SPECIAL_BASE 0x59A8E80ull
+#define NIC11_UMR1_8_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_8_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_9_UNSECURE_DOORBELL0_BASE 0x59A9000ull
+#define NIC11_UMR1_9_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_9_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_9_UNSECURE_DOORBELL1_BASE 0x59A9080ull
+#define NIC11_UMR1_9_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_9_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_9_COMPLETION_QUEUE_CI_0_BASE 0x59A9100ull
+#define NIC11_UMR1_9_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_9_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_9_COMPLETION_QUEUE_CI_1_BASE 0x59A9180ull
+#define NIC11_UMR1_9_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_9_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_9_SPECIAL_BASE 0x59A9E80ull
+#define NIC11_UMR1_9_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_9_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_10_UNSECURE_DOORBELL0_BASE 0x59AA000ull
+#define NIC11_UMR1_10_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_10_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_10_UNSECURE_DOORBELL1_BASE 0x59AA080ull
+#define NIC11_UMR1_10_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_10_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_10_COMPLETION_QUEUE_CI_0_BASE 0x59AA100ull
+#define NIC11_UMR1_10_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_10_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_10_COMPLETION_QUEUE_CI_1_BASE 0x59AA180ull
+#define NIC11_UMR1_10_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_10_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_10_SPECIAL_BASE 0x59AAE80ull
+#define NIC11_UMR1_10_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_10_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_11_UNSECURE_DOORBELL0_BASE 0x59AB000ull
+#define NIC11_UMR1_11_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_11_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_11_UNSECURE_DOORBELL1_BASE 0x59AB080ull
+#define NIC11_UMR1_11_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_11_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_11_COMPLETION_QUEUE_CI_0_BASE 0x59AB100ull
+#define NIC11_UMR1_11_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_11_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_11_COMPLETION_QUEUE_CI_1_BASE 0x59AB180ull
+#define NIC11_UMR1_11_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_11_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_11_SPECIAL_BASE 0x59ABE80ull
+#define NIC11_UMR1_11_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_11_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_12_UNSECURE_DOORBELL0_BASE 0x59AC000ull
+#define NIC11_UMR1_12_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_12_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_12_UNSECURE_DOORBELL1_BASE 0x59AC080ull
+#define NIC11_UMR1_12_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_12_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_12_COMPLETION_QUEUE_CI_0_BASE 0x59AC100ull
+#define NIC11_UMR1_12_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_12_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_12_COMPLETION_QUEUE_CI_1_BASE 0x59AC180ull
+#define NIC11_UMR1_12_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_12_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_12_SPECIAL_BASE 0x59ACE80ull
+#define NIC11_UMR1_12_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_12_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_13_UNSECURE_DOORBELL0_BASE 0x59AD000ull
+#define NIC11_UMR1_13_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_13_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_13_UNSECURE_DOORBELL1_BASE 0x59AD080ull
+#define NIC11_UMR1_13_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_13_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_13_COMPLETION_QUEUE_CI_0_BASE 0x59AD100ull
+#define NIC11_UMR1_13_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_13_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_13_COMPLETION_QUEUE_CI_1_BASE 0x59AD180ull
+#define NIC11_UMR1_13_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_13_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_13_SPECIAL_BASE 0x59ADE80ull
+#define NIC11_UMR1_13_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_13_SPECIAL_SECTION 0x1800
+#define mmNIC11_UMR1_14_UNSECURE_DOORBELL0_BASE 0x59AE000ull
+#define NIC11_UMR1_14_UNSECURE_DOORBELL0_MAX_OFFSET 0x1000
+#define NIC11_UMR1_14_UNSECURE_DOORBELL0_SECTION 0x8000
+#define mmNIC11_UMR1_14_UNSECURE_DOORBELL1_BASE 0x59AE080ull
+#define NIC11_UMR1_14_UNSECURE_DOORBELL1_MAX_OFFSET 0x1000
+#define NIC11_UMR1_14_UNSECURE_DOORBELL1_SECTION 0x8000
+#define mmNIC11_UMR1_14_COMPLETION_QUEUE_CI_0_BASE 0x59AE100ull
+#define NIC11_UMR1_14_COMPLETION_QUEUE_CI_0_MAX_OFFSET 0x8000
+#define NIC11_UMR1_14_COMPLETION_QUEUE_CI_0_SECTION 0x8000
+#define mmNIC11_UMR1_14_COMPLETION_QUEUE_CI_1_BASE 0x59AE180ull
+#define NIC11_UMR1_14_COMPLETION_QUEUE_CI_1_MAX_OFFSET 0x8000
+#define NIC11_UMR1_14_COMPLETION_QUEUE_CI_1_SECTION 0xD000
+#define mmNIC11_UMR1_14_SPECIAL_BASE 0x59AEE80ull
+#define NIC11_UMR1_14_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_UMR1_14_SPECIAL_SECTION 0x1180
+#define mmNIC11_QM_DCCM1_BASE 0x59B0000ull
+#define NIC11_QM_DCCM1_MAX_OFFSET 0x4000
+#define NIC11_QM_DCCM1_SECTION 0x8000
+#define mmNIC11_QM_ARC_AUX1_BASE 0x59B8000ull
+#define NIC11_QM_ARC_AUX1_MAX_OFFSET 0x1000
+#define NIC11_QM_ARC_AUX1_SECTION 0xE800
+#define mmNIC11_QM_ARC_AUX1_SPECIAL_BASE 0x59B8E80ull
+#define NIC11_QM_ARC_AUX1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_QM_ARC_AUX1_SPECIAL_SECTION 0x1180
+#define mmNIC11_QM1_BASE 0x59BA000ull
+#define NIC11_QM1_MAX_OFFSET 0x1000
+#define NIC11_QM1_SECTION 0x9000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR0_BASE 0x59BA900ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR0_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR0_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR1_BASE 0x59BA908ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR1_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR1_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR2_BASE 0x59BA910ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR2_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR2_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR3_BASE 0x59BA918ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR3_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR3_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR4_BASE 0x59BA920ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR4_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR4_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR5_BASE 0x59BA928ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR5_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR5_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR6_BASE 0x59BA930ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR6_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR6_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR7_BASE 0x59BA938ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR7_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR7_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR8_BASE 0x59BA940ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR8_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR8_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR9_BASE 0x59BA948ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR9_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR9_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR10_BASE 0x59BA950ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR10_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR10_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR11_BASE 0x59BA958ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR11_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR11_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR12_BASE 0x59BA960ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR12_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR12_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR13_BASE 0x59BA968ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR13_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR13_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR14_BASE 0x59BA970ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR14_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR14_SECTION 0x8000
+#define mmNIC11_QM1_QMAN_WR64_BASE_ADDR15_BASE 0x59BA978ull
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR15_MAX_OFFSET 0x8000
+#define NIC11_QM1_QMAN_WR64_BASE_ADDR15_SECTION 0x1880
+#define mmNIC11_QM1_AXUSER_SECURED_BASE 0x59BAB00ull
+#define NIC11_QM1_AXUSER_SECURED_MAX_OFFSET 0x5000
+#define NIC11_QM1_AXUSER_SECURED_SECTION 0x8000
+#define mmNIC11_QM1_AXUSER_NONSECURED_BASE 0x59BAB80ull
+#define NIC11_QM1_AXUSER_NONSECURED_MAX_OFFSET 0x5000
+#define NIC11_QM1_AXUSER_NONSECURED_SECTION 0x8000
+#define mmNIC11_QM1_DBG_HBW_BASE 0x59BAC00ull
+#define NIC11_QM1_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC11_QM1_DBG_HBW_SECTION 0x8000
+#define mmNIC11_QM1_DBG_LBW_BASE 0x59BAC80ull
+#define NIC11_QM1_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC11_QM1_DBG_LBW_SECTION 0x1000
+#define mmNIC11_QM1_CGM_BASE 0x59BAD80ull
+#define NIC11_QM1_CGM_MAX_OFFSET 0xC000
+#define NIC11_QM1_CGM_SECTION 0x1000
+#define mmNIC11_QM1_SPECIAL_BASE 0x59BAE80ull
+#define NIC11_QM1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_QM1_SPECIAL_SECTION 0x4180
+#define mmNIC11_QPC1_BASE 0x59BF000ull
+#define NIC11_QPC1_MAX_OFFSET 0x1000
+#define NIC11_QPC1_SECTION 0x7200
+#define mmNIC11_QPC1_DBFIFO0_CI_UPD_ADDR_BASE 0x59BF720ull
+#define NIC11_QPC1_DBFIFO0_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO0_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO1_CI_UPD_ADDR_BASE 0x59BF728ull
+#define NIC11_QPC1_DBFIFO1_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO1_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO2_CI_UPD_ADDR_BASE 0x59BF730ull
+#define NIC11_QPC1_DBFIFO2_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO2_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO3_CI_UPD_ADDR_BASE 0x59BF738ull
+#define NIC11_QPC1_DBFIFO3_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO3_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO4_CI_UPD_ADDR_BASE 0x59BF740ull
+#define NIC11_QPC1_DBFIFO4_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO4_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO5_CI_UPD_ADDR_BASE 0x59BF748ull
+#define NIC11_QPC1_DBFIFO5_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO5_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO6_CI_UPD_ADDR_BASE 0x59BF750ull
+#define NIC11_QPC1_DBFIFO6_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO6_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO7_CI_UPD_ADDR_BASE 0x59BF758ull
+#define NIC11_QPC1_DBFIFO7_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO7_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO8_CI_UPD_ADDR_BASE 0x59BF760ull
+#define NIC11_QPC1_DBFIFO8_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO8_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO9_CI_UPD_ADDR_BASE 0x59BF768ull
+#define NIC11_QPC1_DBFIFO9_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO9_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO10_CI_UPD_ADDR_BASE 0x59BF770ull
+#define NIC11_QPC1_DBFIFO10_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO10_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO11_CI_UPD_ADDR_BASE 0x59BF778ull
+#define NIC11_QPC1_DBFIFO11_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO11_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO12_CI_UPD_ADDR_BASE 0x59BF780ull
+#define NIC11_QPC1_DBFIFO12_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO12_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO13_CI_UPD_ADDR_BASE 0x59BF788ull
+#define NIC11_QPC1_DBFIFO13_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO13_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO14_CI_UPD_ADDR_BASE 0x59BF790ull
+#define NIC11_QPC1_DBFIFO14_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO14_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO15_CI_UPD_ADDR_BASE 0x59BF798ull
+#define NIC11_QPC1_DBFIFO15_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO15_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO16_CI_UPD_ADDR_BASE 0x59BF7A0ull
+#define NIC11_QPC1_DBFIFO16_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO16_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO17_CI_UPD_ADDR_BASE 0x59BF7A8ull
+#define NIC11_QPC1_DBFIFO17_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO17_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO18_CI_UPD_ADDR_BASE 0x59BF7B0ull
+#define NIC11_QPC1_DBFIFO18_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO18_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO19_CI_UPD_ADDR_BASE 0x59BF7B8ull
+#define NIC11_QPC1_DBFIFO19_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO19_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO20_CI_UPD_ADDR_BASE 0x59BF7C0ull
+#define NIC11_QPC1_DBFIFO20_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO20_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO21_CI_UPD_ADDR_BASE 0x59BF7C8ull
+#define NIC11_QPC1_DBFIFO21_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO21_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO22_CI_UPD_ADDR_BASE 0x59BF7D0ull
+#define NIC11_QPC1_DBFIFO22_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO22_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO23_CI_UPD_ADDR_BASE 0x59BF7D8ull
+#define NIC11_QPC1_DBFIFO23_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO23_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO24_CI_UPD_ADDR_BASE 0x59BF7E0ull
+#define NIC11_QPC1_DBFIFO24_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO24_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO25_CI_UPD_ADDR_BASE 0x59BF7E8ull
+#define NIC11_QPC1_DBFIFO25_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO25_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO26_CI_UPD_ADDR_BASE 0x59BF7F0ull
+#define NIC11_QPC1_DBFIFO26_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO26_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO27_CI_UPD_ADDR_BASE 0x59BF7F8ull
+#define NIC11_QPC1_DBFIFO27_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO27_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO28_CI_UPD_ADDR_BASE 0x59BF800ull
+#define NIC11_QPC1_DBFIFO28_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO28_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFO29_CI_UPD_ADDR_BASE 0x59BF808ull
+#define NIC11_QPC1_DBFIFO29_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFO29_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFOSECUR_CI_UPD_ADDR_BASE 0x59BF810ull
+#define NIC11_QPC1_DBFIFOSECUR_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFOSECUR_CI_UPD_ADDR_SECTION 0x8000
+#define mmNIC11_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_BASE 0x59BF818ull
+#define NIC11_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_MAX_OFFSET 0x8000
+#define NIC11_QPC1_DBFIFOPRIVIL_CI_UPD_ADDR_SECTION 0x3680
+#define mmNIC11_QPC1_AXUSER_CONG_QUE_BASE 0x59BFB80ull
+#define NIC11_QPC1_AXUSER_CONG_QUE_MAX_OFFSET 0x5000
+#define NIC11_QPC1_AXUSER_CONG_QUE_SECTION 0x6000
+#define mmNIC11_QPC1_AXUSER_RXWQE_BASE 0x59BFBE0ull
+#define NIC11_QPC1_AXUSER_RXWQE_MAX_OFFSET 0x5000
+#define NIC11_QPC1_AXUSER_RXWQE_SECTION 0x6000
+#define mmNIC11_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_BASE 0x59BFC40ull
+#define NIC11_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_MAX_OFFSET 0x5000
+#define NIC11_QPC1_AXUSER_TXWQE_LBW_QMAN_BP_SECTION 0x6000
+#define mmNIC11_QPC1_AXUSER_DB_FIFO_BASE 0x59BFCA0ull
+#define NIC11_QPC1_AXUSER_DB_FIFO_MAX_OFFSET 0x5000
+#define NIC11_QPC1_AXUSER_DB_FIFO_SECTION 0x6000
+#define mmNIC11_QPC1_AXUSER_EV_QUE_LBW_INTR_BASE 0x59BFD00ull
+#define NIC11_QPC1_AXUSER_EV_QUE_LBW_INTR_MAX_OFFSET 0x5000
+#define NIC11_QPC1_AXUSER_EV_QUE_LBW_INTR_SECTION 0x6000
+#define mmNIC11_QPC1_AXUSER_ERR_FIFO_BASE 0x59BFD60ull
+#define NIC11_QPC1_AXUSER_ERR_FIFO_MAX_OFFSET 0x5000
+#define NIC11_QPC1_AXUSER_ERR_FIFO_SECTION 0x6000
+#define mmNIC11_QPC1_AXUSER_QPC_RESP_BASE 0x59BFDC0ull
+#define NIC11_QPC1_AXUSER_QPC_RESP_MAX_OFFSET 0x5000
+#define NIC11_QPC1_AXUSER_QPC_RESP_SECTION 0x6000
+#define mmNIC11_QPC1_AXUSER_QPC_REQ_BASE 0x59BFE20ull
+#define NIC11_QPC1_AXUSER_QPC_REQ_MAX_OFFSET 0x5000
+#define NIC11_QPC1_AXUSER_QPC_REQ_SECTION 0x6000
+#define mmNIC11_QPC1_SPECIAL_BASE 0x59BFE80ull
+#define NIC11_QPC1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_QPC1_SPECIAL_SECTION 0x8180
+#define mmNIC11_TMR_BASE 0x59C8000ull
+#define NIC11_TMR_MAX_OFFSET 0x1000
+#define NIC11_TMR_SECTION 0xD600
+#define mmNIC11_TMR_AXUSER_TMR_FREE_LIST_BASE 0x59C8D60ull
+#define NIC11_TMR_AXUSER_TMR_FREE_LIST_MAX_OFFSET 0x5000
+#define NIC11_TMR_AXUSER_TMR_FREE_LIST_SECTION 0x6000
+#define mmNIC11_TMR_AXUSER_TMR_FIFO_BASE 0x59C8DC0ull
+#define NIC11_TMR_AXUSER_TMR_FIFO_MAX_OFFSET 0x5000
+#define NIC11_TMR_AXUSER_TMR_FIFO_SECTION 0x6000
+#define mmNIC11_TMR_AXUSER_TMR_FSM_BASE 0x59C8E20ull
+#define NIC11_TMR_AXUSER_TMR_FSM_MAX_OFFSET 0x5000
+#define NIC11_TMR_AXUSER_TMR_FSM_SECTION 0x6000
+#define mmNIC11_TMR_SPECIAL_BASE 0x59C8E80ull
+#define NIC11_TMR_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_TMR_SPECIAL_SECTION 0x1800
+#define mmNIC11_RXB_CORE_BASE 0x59C9000ull
+#define NIC11_RXB_CORE_MAX_OFFSET 0x1000
+#define NIC11_RXB_CORE_SECTION 0x6100
+#define mmNIC11_RXB_CORE_SCT_AWUSER_BASE 0x59C9610ull
+#define NIC11_RXB_CORE_SCT_AWUSER_MAX_OFFSET 0x5000
+#define NIC11_RXB_CORE_SCT_AWUSER_SECTION 0x8700
+#define mmNIC11_RXB_CORE_SPECIAL_BASE 0x59C9E80ull
+#define NIC11_RXB_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_RXB_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC11_RXE0_BASE 0x59CA000ull
+#define NIC11_RXE0_MAX_OFFSET 0x1000
+#define NIC11_RXE0_SECTION 0x9000
+#define mmNIC11_RXE0_WQE_ARUSER_BASE 0x59CA900ull
+#define NIC11_RXE0_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC11_RXE0_WQE_ARUSER_SECTION 0x5800
+#define mmNIC11_RXE0_SPECIAL_BASE 0x59CAE80ull
+#define NIC11_RXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_RXE0_SPECIAL_SECTION 0x1800
+#define mmNIC11_RXE1_BASE 0x59CB000ull
+#define NIC11_RXE1_MAX_OFFSET 0x1000
+#define NIC11_RXE1_SECTION 0x9000
+#define mmNIC11_RXE1_WQE_ARUSER_BASE 0x59CB900ull
+#define NIC11_RXE1_WQE_ARUSER_MAX_OFFSET 0x5000
+#define NIC11_RXE1_WQE_ARUSER_SECTION 0x5800
+#define mmNIC11_RXE1_SPECIAL_BASE 0x59CBE80ull
+#define NIC11_RXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_RXE1_SPECIAL_SECTION 0x1800
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ0_BASE 0x59CC000ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ1_BASE 0x59CC050ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ2_BASE 0x59CC0A0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ3_BASE 0x59CC0F0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ4_BASE 0x59CC140ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ5_BASE 0x59CC190ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ6_BASE 0x59CC1E0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ7_BASE 0x59CC230ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ8_BASE 0x59CC280ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ9_BASE 0x59CC2D0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ10_BASE 0x59CC320ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ11_BASE 0x59CC370ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ12_BASE 0x59CC3C0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ13_BASE 0x59CC410ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ14_BASE 0x59CC460ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ15_BASE 0x59CC4B0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ16_BASE 0x59CC500ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ17_BASE 0x59CC550ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ18_BASE 0x59CC5A0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ19_BASE 0x59CC5F0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ20_BASE 0x59CC640ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ21_BASE 0x59CC690ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ22_BASE 0x59CC6E0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ23_BASE 0x59CC730ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ24_BASE 0x59CC780ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ25_BASE 0x59CC7D0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ26_BASE 0x59CC820ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ27_BASE 0x59CC870ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ28_BASE 0x59CC8C0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ29_BASE 0x59CC910ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ30_BASE 0x59CC960ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC11_RXE0_AXUSER_AXUSER_CQ31_BASE 0x59CC9B0ull
+#define NIC11_RXE0_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC11_RXE0_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC11_RXE0_AXUSER_SPECIAL_BASE 0x59CCE80ull
+#define NIC11_RXE0_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_RXE0_AXUSER_SPECIAL_SECTION 0x1800
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ0_BASE 0x59CD000ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ0_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ0_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ1_BASE 0x59CD050ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ1_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ1_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ2_BASE 0x59CD0A0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ2_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ2_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ3_BASE 0x59CD0F0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ3_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ3_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ4_BASE 0x59CD140ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ4_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ4_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ5_BASE 0x59CD190ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ5_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ5_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ6_BASE 0x59CD1E0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ6_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ6_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ7_BASE 0x59CD230ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ7_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ7_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ8_BASE 0x59CD280ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ8_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ8_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ9_BASE 0x59CD2D0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ9_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ9_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ10_BASE 0x59CD320ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ10_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ10_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ11_BASE 0x59CD370ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ11_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ11_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ12_BASE 0x59CD3C0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ12_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ12_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ13_BASE 0x59CD410ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ13_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ13_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ14_BASE 0x59CD460ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ14_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ14_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ15_BASE 0x59CD4B0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ15_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ15_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ16_BASE 0x59CD500ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ16_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ16_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ17_BASE 0x59CD550ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ17_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ17_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ18_BASE 0x59CD5A0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ18_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ18_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ19_BASE 0x59CD5F0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ19_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ19_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ20_BASE 0x59CD640ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ20_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ20_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ21_BASE 0x59CD690ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ21_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ21_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ22_BASE 0x59CD6E0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ22_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ22_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ23_BASE 0x59CD730ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ23_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ23_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ24_BASE 0x59CD780ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ24_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ24_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ25_BASE 0x59CD7D0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ25_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ25_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ26_BASE 0x59CD820ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ26_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ26_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ27_BASE 0x59CD870ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ27_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ27_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ28_BASE 0x59CD8C0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ28_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ28_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ29_BASE 0x59CD910ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ29_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ29_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ30_BASE 0x59CD960ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ30_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ30_SECTION 0x5000
+#define mmNIC11_RXE1_AXUSER_AXUSER_CQ31_BASE 0x59CD9B0ull
+#define NIC11_RXE1_AXUSER_AXUSER_CQ31_MAX_OFFSET 0x5000
+#define NIC11_RXE1_AXUSER_AXUSER_CQ31_SECTION 0x4D00
+#define mmNIC11_RXE1_AXUSER_SPECIAL_BASE 0x59CDE80ull
+#define NIC11_RXE1_AXUSER_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_RXE1_AXUSER_SPECIAL_SECTION 0x2180
+#define mmNIC11_TXS0_BASE 0x59D0000ull
+#define NIC11_TXS0_MAX_OFFSET 0x1000
+#define NIC11_TXS0_SECTION 0xE800
+#define mmNIC11_TXS0_SPECIAL_BASE 0x59D0E80ull
+#define NIC11_TXS0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_TXS0_SPECIAL_SECTION 0x1800
+#define mmNIC11_TXS1_BASE 0x59D1000ull
+#define NIC11_TXS1_MAX_OFFSET 0x1000
+#define NIC11_TXS1_SECTION 0xE800
+#define mmNIC11_TXS1_SPECIAL_BASE 0x59D1E80ull
+#define NIC11_TXS1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_TXS1_SPECIAL_SECTION 0x1800
+#define mmNIC11_TXE0_BASE 0x59D2000ull
+#define NIC11_TXE0_MAX_OFFSET 0x1000
+#define NIC11_TXE0_SECTION 0xE800
+#define mmNIC11_TXE0_SPECIAL_BASE 0x59D2E80ull
+#define NIC11_TXE0_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_TXE0_SPECIAL_SECTION 0x1800
+#define mmNIC11_TXE1_BASE 0x59D3000ull
+#define NIC11_TXE1_MAX_OFFSET 0x1000
+#define NIC11_TXE1_SECTION 0xE800
+#define mmNIC11_TXE1_SPECIAL_BASE 0x59D3E80ull
+#define NIC11_TXE1_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_TXE1_SPECIAL_SECTION 0x1800
+#define mmNIC11_TXB_BASE 0x59D4000ull
+#define NIC11_TXB_MAX_OFFSET 0x1000
+#define NIC11_TXB_SECTION 0xE800
+#define mmNIC11_TXB_SPECIAL_BASE 0x59D4E80ull
+#define NIC11_TXB_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_TXB_SPECIAL_SECTION 0x1800
+#define mmNIC11_MSTR_IF_RR_SHRD_HBW_BASE 0x59D5000ull
+#define NIC11_MSTR_IF_RR_SHRD_HBW_MAX_OFFSET 0x17C0
+#define NIC11_MSTR_IF_RR_SHRD_HBW_SECTION 0x2000
+#define mmNIC11_MSTR_IF_RR_PRVT_HBW_BASE 0x59D5200ull
+#define NIC11_MSTR_IF_RR_PRVT_HBW_MAX_OFFSET 0x17C0
+#define NIC11_MSTR_IF_RR_PRVT_HBW_SECTION 0x2000
+#define mmNIC11_MSTR_IF_RR_SHRD_LBW_BASE 0x59D5400ull
+#define NIC11_MSTR_IF_RR_SHRD_LBW_MAX_OFFSET 0x14C0
+#define NIC11_MSTR_IF_RR_SHRD_LBW_SECTION 0x2000
+#define mmNIC11_MSTR_IF_RR_PRVT_LBW_BASE 0x59D5600ull
+#define NIC11_MSTR_IF_RR_PRVT_LBW_MAX_OFFSET 0x14C0
+#define NIC11_MSTR_IF_RR_PRVT_LBW_SECTION 0x2000
+#define mmNIC11_MSTR_IF_E2E_CRDT_BASE 0x59D5800ull
+#define NIC11_MSTR_IF_E2E_CRDT_MAX_OFFSET 0x2400
+#define NIC11_MSTR_IF_E2E_CRDT_SECTION 0x2800
+#define mmNIC11_MSTR_IF_AXUSER_BASE 0x59D5A80ull
+#define NIC11_MSTR_IF_AXUSER_MAX_OFFSET 0x5000
+#define NIC11_MSTR_IF_AXUSER_SECTION 0x8000
+#define mmNIC11_MSTR_IF_DBG_HBW_BASE 0x59D5B00ull
+#define NIC11_MSTR_IF_DBG_HBW_MAX_OFFSET 0x5800
+#define NIC11_MSTR_IF_DBG_HBW_SECTION 0x8000
+#define mmNIC11_MSTR_IF_DBG_LBW_BASE 0x59D5B80ull
+#define NIC11_MSTR_IF_DBG_LBW_MAX_OFFSET 0x5800
+#define NIC11_MSTR_IF_DBG_LBW_SECTION 0x8000
+#define mmNIC11_MSTR_IF_CORE_HBW_BASE 0x59D5C00ull
+#define NIC11_MSTR_IF_CORE_HBW_MAX_OFFSET 0x1200
+#define NIC11_MSTR_IF_CORE_HBW_SECTION 0x1800
+#define mmNIC11_MSTR_IF_CORE_LBW_BASE 0x59D5D80ull
+#define NIC11_MSTR_IF_CORE_LBW_MAX_OFFSET 0x8000
+#define NIC11_MSTR_IF_CORE_LBW_SECTION 0x1000
+#define mmNIC11_MSTR_IF_SPECIAL_BASE 0x59D5E80ull
+#define NIC11_MSTR_IF_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_MSTR_IF_SPECIAL_SECTION 0x1800
+#define mmNIC11_TX_AXUSER_BASE 0x59D6000ull
+#define NIC11_TX_AXUSER_MAX_OFFSET 0x5000
+#define NIC11_TX_AXUSER_SECTION 0x2000
+#define mmNIC11_SERDES0_BASE 0x59D8000ull
+#define NIC11_SERDES0_MAX_OFFSET 0x3E40
+#define NIC11_SERDES0_SECTION 0x4000
+#define mmNIC11_SERDES1_BASE 0x59DC000ull
+#define NIC11_SERDES1_MAX_OFFSET 0x3E40
+#define NIC11_SERDES1_SECTION 0x4000
+#define mmNIC11_PHY_BASE 0x59E0000ull
+#define NIC11_PHY_MAX_OFFSET 0x1000
+#define NIC11_PHY_SECTION 0xE800
+#define mmNIC11_PHY_SPECIAL_BASE 0x59E0E80ull
+#define NIC11_PHY_SPECIAL_MAX_OFFSET 0x1800
+#define NIC11_PHY_SPECIAL_SECTION 0x7180
+#define mmPRT11_MAC_AUX_BASE 0x59E8000ull
+#define PRT11_MAC_AUX_MAX_OFFSET 0x1000
+#define PRT11_MAC_AUX_SECTION 0xE800
+#define mmPRT11_MAC_AUX_SPECIAL_BASE 0x59E8E80ull
+#define PRT11_MAC_AUX_SPECIAL_MAX_OFFSET 0x1800
+#define PRT11_MAC_AUX_SPECIAL_SECTION 0x1800
+#define mmPRT11_MAC_CORE_BASE 0x59E9000ull
+#define PRT11_MAC_CORE_MAX_OFFSET 0x1000
+#define PRT11_MAC_CORE_SECTION 0xE800
+#define mmPRT11_MAC_CORE_SPECIAL_BASE 0x59E9E80ull
+#define PRT11_MAC_CORE_SPECIAL_MAX_OFFSET 0x1800
+#define PRT11_MAC_CORE_SPECIAL_SECTION 0x1800
+#define mmNIC11_MAC_RS_FEC_BASE 0x59EA000ull
+#define NIC11_MAC_RS_FEC_MAX_OFFSET 0x2DC0
+#define NIC11_MAC_RS_FEC_SECTION 0x1000
+#define mmNIC11_MAC_GLOB_STAT_CONTROL_REG_BASE 0x59EB000ull
+#define NIC11_MAC_GLOB_STAT_CONTROL_REG_MAX_OFFSET 0x2000
+#define NIC11_MAC_GLOB_STAT_CONTROL_REG_SECTION 0x1000
+#define mmNIC11_MAC_GLOB_STAT_RX0_BASE 0x59EB100ull
+#define NIC11_MAC_GLOB_STAT_RX0_MAX_OFFSET 0x8C00
+#define NIC11_MAC_GLOB_STAT_RX0_SECTION 0x8C00
+#define mmNIC11_MAC_GLOB_STAT_RX1_BASE 0x59EB18Cull
+#define NIC11_MAC_GLOB_STAT_RX1_MAX_OFFSET 0x8C00
+#define NIC11_MAC_GLOB_STAT_RX1_SECTION 0x8C00
+#define mmNIC11_MAC_GLOB_STAT_RX2_BASE 0x59EB218ull
+#define NIC11_MAC_GLOB_STAT_RX2_MAX_OFFSET 0x8C00
+#define NIC11_MAC_GLOB_STAT_RX2_SECTION 0x8C00
+#define mmNIC11_MAC_GLOB_STAT_RX3_BASE 0x59EB2A4ull
+#define NIC11_MAC_GLOB_STAT_RX3_MAX_OFFSET 0x8C00
+#define NIC11_MAC_GLOB_STAT_RX3_SECTION 0x8C00
+#define mmNIC11_MAC_GLOB_STAT_TX0_BASE 0x59EB330ull
+#define NIC11_MAC_GLOB_STAT_TX0_MAX_OFFSET 0x6800
+#define NIC11_MAC_GLOB_STAT_TX0_SECTION 0x6800
+#define mmNIC11_MAC_GLOB_STAT_TX1_BASE 0x59EB398ull
+#define NIC11_MAC_GLOB_STAT_TX1_MAX_OFFSET 0x6800
+#define NIC11_MAC_GLOB_STAT_TX1_SECTION 0x6800
+#define mmNIC11_MAC_GLOB_STAT_TX2_BASE 0x59EB400ull
+#define NIC11_MAC_GLOB_STAT_TX2_MAX_OFFSET 0x6800
+#define NIC11_MAC_GLOB_STAT_TX2_SECTION 0x6800
+#define mmNIC11_MAC_GLOB_STAT_TX3_BASE 0x59EB468ull
+#define NIC11_MAC_GLOB_STAT_TX3_MAX_OFFSET 0x6800
+#define NIC11_MAC_GLOB_STAT_TX3_SECTION 0x3980
+#define mmNIC11_MAC_GLOB_STAT_RSFEC_STATS_BASE 0x59EB800ull
+#define NIC11_MAC_GLOB_STAT_RSFEC_STATS_MAX_OFFSET 0x1EC0
+#define NIC11_MAC_GLOB_STAT_RSFEC_STATS_SECTION 0x8000
+#define mmNIC11_MAC_CH0_MAC_PCS_BASE 0x59EC000ull
+#define NIC11_MAC_CH0_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC11_MAC_CH0_MAC_PCS_SECTION 0x4000
+#define mmNIC11_MAC_CH0_MAC_128_BASE 0x59EC400ull
+#define NIC11_MAC_CH0_MAC_128_MAX_OFFSET 0xA400
+#define NIC11_MAC_CH0_MAC_128_SECTION 0x4000
+#define mmNIC11_MAC_CH0_MAC_AN_BASE 0x59EC800ull
+#define NIC11_MAC_CH0_MAC_AN_MAX_OFFSET 0x4400
+#define NIC11_MAC_CH0_MAC_AN_SECTION 0x8000
+#define mmNIC11_MAC_CH1_MAC_PCS_BASE 0x59ED000ull
+#define NIC11_MAC_CH1_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC11_MAC_CH1_MAC_PCS_SECTION 0x4000
+#define mmNIC11_MAC_CH1_MAC_128_BASE 0x59ED400ull
+#define NIC11_MAC_CH1_MAC_128_MAX_OFFSET 0xA400
+#define NIC11_MAC_CH1_MAC_128_SECTION 0x4000
+#define mmNIC11_MAC_CH1_MAC_AN_BASE 0x59ED800ull
+#define NIC11_MAC_CH1_MAC_AN_MAX_OFFSET 0x4400
+#define NIC11_MAC_CH1_MAC_AN_SECTION 0x8000
+#define mmNIC11_MAC_CH2_MAC_PCS_BASE 0x59EE000ull
+#define NIC11_MAC_CH2_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC11_MAC_CH2_MAC_PCS_SECTION 0x4000
+#define mmNIC11_MAC_CH2_MAC_128_BASE 0x59EE400ull
+#define NIC11_MAC_CH2_MAC_128_MAX_OFFSET 0xA400
+#define NIC11_MAC_CH2_MAC_128_SECTION 0x4000
+#define mmNIC11_MAC_CH2_MAC_AN_BASE 0x59EE800ull
+#define NIC11_MAC_CH2_MAC_AN_MAX_OFFSET 0x4400
+#define NIC11_MAC_CH2_MAC_AN_SECTION 0x8000
+#define mmNIC11_MAC_CH3_MAC_PCS_BASE 0x59EF000ull
+#define NIC11_MAC_CH3_MAC_PCS_MAX_OFFSET 0x31C0
+#define NIC11_MAC_CH3_MAC_PCS_SECTION 0x4000
+#define mmNIC11_MAC_CH3_MAC_128_BASE 0x59EF400ull
+#define NIC11_MAC_CH3_MAC_128_MAX_OFFSET 0xA400
+#define NIC11_MAC_CH3_MAC_128_SECTION 0x4000
+#define mmNIC11_MAC_CH3_MAC_AN_BASE 0x59EF800ull
+#define NIC11_MAC_CH3_MAC_AN_MAX_OFFSET 0x4400
+#define NIC11_MAC_CH3_MAC_AN_SECTION 0x610800
+#define mmDCORE0_ROM_TABLE_L_BASE 0x6000000ull
+#define DCORE0_ROM_TABLE_L_MAX_OFFSET 0x1000
+#define DCORE0_ROM_TABLE_L_SECTION 0x80000
+#define mmDCORE0_HMMU0_CS_ROM_TBL_BASE 0x6080000ull
+#define DCORE0_HMMU0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_HMMU0_CS_STM_BASE 0x6081000ull
+#define DCORE0_HMMU0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_CS_STM_SECTION 0x1000
+#define mmDCORE0_HMMU0_CS_CTI_BASE 0x6082000ull
+#define DCORE0_HMMU0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_CS_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU0_CS_ETF_BASE 0x6083000ull
+#define DCORE0_HMMU0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_CS_ETF_SECTION 0x1000
+#define mmDCORE0_HMMU0_CS_SPMU_BASE 0x6084000ull
+#define DCORE0_HMMU0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_CS_SPMU_SECTION 0x1000
+#define mmDCORE0_HMMU0_BMON_CTI_BASE 0x6085000ull
+#define DCORE0_HMMU0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_BMON_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU0_USER_CTI_BASE 0x6086000ull
+#define DCORE0_HMMU0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_USER_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU0_BMON_0_BASE 0x6087000ull
+#define DCORE0_HMMU0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_BMON_0_SECTION 0x1000
+#define mmDCORE0_HMMU0_BMON_1_BASE 0x6088000ull
+#define DCORE0_HMMU0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_BMON_1_SECTION 0x1000
+#define mmDCORE0_HMMU0_BMON_3_BASE 0x6089000ull
+#define DCORE0_HMMU0_BMON_3_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_BMON_3_SECTION 0x1000
+#define mmDCORE0_HMMU0_BMON_2_BASE 0x608A000ull
+#define DCORE0_HMMU0_BMON_2_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_BMON_2_SECTION 0x1000
+#define mmDCORE0_HMMU0_BMON_4_BASE 0x608B000ull
+#define DCORE0_HMMU0_BMON_4_MAX_OFFSET 0x1000
+#define DCORE0_HMMU0_BMON_4_SECTION 0x5000
+#define mmDCORE0_HMMU1_CS_ROM_TBL_BASE 0x6090000ull
+#define DCORE0_HMMU1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_HMMU1_CS_STM_BASE 0x6091000ull
+#define DCORE0_HMMU1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_CS_STM_SECTION 0x1000
+#define mmDCORE0_HMMU1_CS_CTI_BASE 0x6092000ull
+#define DCORE0_HMMU1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_CS_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU1_CS_ETF_BASE 0x6093000ull
+#define DCORE0_HMMU1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_CS_ETF_SECTION 0x1000
+#define mmDCORE0_HMMU1_CS_SPMU_BASE 0x6094000ull
+#define DCORE0_HMMU1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_CS_SPMU_SECTION 0x1000
+#define mmDCORE0_HMMU1_BMON_CTI_BASE 0x6095000ull
+#define DCORE0_HMMU1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_BMON_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU1_USER_CTI_BASE 0x6096000ull
+#define DCORE0_HMMU1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_USER_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU1_BMON_0_BASE 0x6097000ull
+#define DCORE0_HMMU1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_BMON_0_SECTION 0x1000
+#define mmDCORE0_HMMU1_BMON_1_BASE 0x6098000ull
+#define DCORE0_HMMU1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_BMON_1_SECTION 0x1000
+#define mmDCORE0_HMMU1_BMON_3_BASE 0x6099000ull
+#define DCORE0_HMMU1_BMON_3_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_BMON_3_SECTION 0x1000
+#define mmDCORE0_HMMU1_BMON_2_BASE 0x609A000ull
+#define DCORE0_HMMU1_BMON_2_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_BMON_2_SECTION 0x1000
+#define mmDCORE0_HMMU1_BMON_4_BASE 0x609B000ull
+#define DCORE0_HMMU1_BMON_4_MAX_OFFSET 0x1000
+#define DCORE0_HMMU1_BMON_4_SECTION 0x5000
+#define mmDCORE0_HMMU2_CS_ROM_TBL_BASE 0x60A0000ull
+#define DCORE0_HMMU2_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_HMMU2_CS_STM_BASE 0x60A1000ull
+#define DCORE0_HMMU2_CS_STM_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_CS_STM_SECTION 0x1000
+#define mmDCORE0_HMMU2_CS_CTI_BASE 0x60A2000ull
+#define DCORE0_HMMU2_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_CS_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU2_CS_ETF_BASE 0x60A3000ull
+#define DCORE0_HMMU2_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_CS_ETF_SECTION 0x1000
+#define mmDCORE0_HMMU2_CS_SPMU_BASE 0x60A4000ull
+#define DCORE0_HMMU2_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_CS_SPMU_SECTION 0x1000
+#define mmDCORE0_HMMU2_BMON_CTI_BASE 0x60A5000ull
+#define DCORE0_HMMU2_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_BMON_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU2_USER_CTI_BASE 0x60A6000ull
+#define DCORE0_HMMU2_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_USER_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU2_BMON_0_BASE 0x60A7000ull
+#define DCORE0_HMMU2_BMON_0_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_BMON_0_SECTION 0x1000
+#define mmDCORE0_HMMU2_BMON_1_BASE 0x60A8000ull
+#define DCORE0_HMMU2_BMON_1_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_BMON_1_SECTION 0x1000
+#define mmDCORE0_HMMU2_BMON_3_BASE 0x60A9000ull
+#define DCORE0_HMMU2_BMON_3_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_BMON_3_SECTION 0x1000
+#define mmDCORE0_HMMU2_BMON_2_BASE 0x60AA000ull
+#define DCORE0_HMMU2_BMON_2_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_BMON_2_SECTION 0x1000
+#define mmDCORE0_HMMU2_BMON_4_BASE 0x60AB000ull
+#define DCORE0_HMMU2_BMON_4_MAX_OFFSET 0x1000
+#define DCORE0_HMMU2_BMON_4_SECTION 0x5000
+#define mmDCORE0_HMMU3_CS_ROM_TBL_BASE 0x60B0000ull
+#define DCORE0_HMMU3_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_HMMU3_CS_STM_BASE 0x60B1000ull
+#define DCORE0_HMMU3_CS_STM_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_CS_STM_SECTION 0x1000
+#define mmDCORE0_HMMU3_CS_CTI_BASE 0x60B2000ull
+#define DCORE0_HMMU3_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_CS_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU3_CS_ETF_BASE 0x60B3000ull
+#define DCORE0_HMMU3_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_CS_ETF_SECTION 0x1000
+#define mmDCORE0_HMMU3_CS_SPMU_BASE 0x60B4000ull
+#define DCORE0_HMMU3_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_CS_SPMU_SECTION 0x1000
+#define mmDCORE0_HMMU3_BMON_CTI_BASE 0x60B5000ull
+#define DCORE0_HMMU3_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_BMON_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU3_USER_CTI_BASE 0x60B6000ull
+#define DCORE0_HMMU3_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_USER_CTI_SECTION 0x1000
+#define mmDCORE0_HMMU3_BMON_0_BASE 0x60B7000ull
+#define DCORE0_HMMU3_BMON_0_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_BMON_0_SECTION 0x1000
+#define mmDCORE0_HMMU3_BMON_1_BASE 0x60B8000ull
+#define DCORE0_HMMU3_BMON_1_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_BMON_1_SECTION 0x1000
+#define mmDCORE0_HMMU3_BMON_3_BASE 0x60B9000ull
+#define DCORE0_HMMU3_BMON_3_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_BMON_3_SECTION 0x1000
+#define mmDCORE0_HMMU3_BMON_2_BASE 0x60BA000ull
+#define DCORE0_HMMU3_BMON_2_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_BMON_2_SECTION 0x1000
+#define mmDCORE0_HMMU3_BMON_4_BASE 0x60BB000ull
+#define DCORE0_HMMU3_BMON_4_MAX_OFFSET 0x1000
+#define DCORE0_HMMU3_BMON_4_SECTION 0x5000
+#define mmDCORE0_MME_CTRL_ROM_TABLE_BASE 0x60C0000ull
+#define DCORE0_MME_CTRL_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_ROM_TABLE_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_STM_BASE 0x60C1000ull
+#define DCORE0_MME_CTRL_STM_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_STM_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_CTI_BASE 0x60C2000ull
+#define DCORE0_MME_CTRL_CTI_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_CTI_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_ETF_BASE 0x60C3000ull
+#define DCORE0_MME_CTRL_ETF_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_ETF_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_SPMU_BASE 0x60C4000ull
+#define DCORE0_MME_CTRL_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_SPMU_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_CTI0_BASE 0x60C5000ull
+#define DCORE0_MME_CTRL_CTI0_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_CTI0_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_CTI1_BASE 0x60C6000ull
+#define DCORE0_MME_CTRL_CTI1_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_CTI1_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_BMON0_BASE 0x60C7000ull
+#define DCORE0_MME_CTRL_BMON0_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_BMON0_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_BMON1_BASE 0x60C8000ull
+#define DCORE0_MME_CTRL_BMON1_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_BMON1_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_BMON2_BASE 0x60C9000ull
+#define DCORE0_MME_CTRL_BMON2_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_BMON2_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_BMON3_BASE 0x60CA000ull
+#define DCORE0_MME_CTRL_BMON3_MAX_OFFSET 0x1000
+#define DCORE0_MME_CTRL_BMON3_SECTION 0x1000
+#define mmDCORE0_MME_CTRL_ARC_RTT_BASE 0x60CB000ull
+#define DCORE0_MME_CTRL_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_MME_CTRL_ARC_RTT_SECTION 0x5000
+#define mmDCORE0_MME_SBTE0_ROM_TBL_BASE 0x60D0000ull
+#define DCORE0_MME_SBTE0_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE0_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_MME_SBTE0_STM_BASE 0x60D1000ull
+#define DCORE0_MME_SBTE0_STM_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE0_STM_SECTION 0x1000
+#define mmDCORE0_MME_SBTE0_CTI_BASE 0x60D2000ull
+#define DCORE0_MME_SBTE0_CTI_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE0_CTI_SECTION 0x1000
+#define mmDCORE0_MME_SBTE0_ETF_BASE 0x60D3000ull
+#define DCORE0_MME_SBTE0_ETF_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE0_ETF_SECTION 0x1000
+#define mmDCORE0_MME_SBTE0_SPMU_BASE 0x60D4000ull
+#define DCORE0_MME_SBTE0_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE0_SPMU_SECTION 0x1000
+#define mmDCORE0_MME_SBTE0_CTI0_BASE 0x60D5000ull
+#define DCORE0_MME_SBTE0_CTI0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE0_CTI0_SECTION 0x1000
+#define mmDCORE0_MME_SBTE0_CTI1_BASE 0x60D6000ull
+#define DCORE0_MME_SBTE0_CTI1_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE0_CTI1_SECTION 0x1000
+#define mmDCORE0_MME_SBTE0_BMON0_BASE 0x60D7000ull
+#define DCORE0_MME_SBTE0_BMON0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE0_BMON0_SECTION 0x1000
+#define mmDCORE0_MME_SBTE1_ROM_TBL_BASE 0x60D8000ull
+#define DCORE0_MME_SBTE1_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE1_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_MME_SBTE1_STM_BASE 0x60D9000ull
+#define DCORE0_MME_SBTE1_STM_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE1_STM_SECTION 0x1000
+#define mmDCORE0_MME_SBTE1_CTI_BASE 0x60DA000ull
+#define DCORE0_MME_SBTE1_CTI_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE1_CTI_SECTION 0x1000
+#define mmDCORE0_MME_SBTE1_ETF_BASE 0x60DB000ull
+#define DCORE0_MME_SBTE1_ETF_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE1_ETF_SECTION 0x1000
+#define mmDCORE0_MME_SBTE1_SPMU_BASE 0x60DC000ull
+#define DCORE0_MME_SBTE1_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE1_SPMU_SECTION 0x1000
+#define mmDCORE0_MME_SBTE1_CTI0_BASE 0x60DD000ull
+#define DCORE0_MME_SBTE1_CTI0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE1_CTI0_SECTION 0x1000
+#define mmDCORE0_MME_SBTE1_CTI1_BASE 0x60DE000ull
+#define DCORE0_MME_SBTE1_CTI1_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE1_CTI1_SECTION 0x1000
+#define mmDCORE0_MME_SBTE1_BMON0_BASE 0x60DF000ull
+#define DCORE0_MME_SBTE1_BMON0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE1_BMON0_SECTION 0x1000
+#define mmDCORE0_MME_SBTE2_ROM_TBL_BASE 0x60E0000ull
+#define DCORE0_MME_SBTE2_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE2_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_MME_SBTE2_STM_BASE 0x60E1000ull
+#define DCORE0_MME_SBTE2_STM_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE2_STM_SECTION 0x1000
+#define mmDCORE0_MME_SBTE2_CTI_BASE 0x60E2000ull
+#define DCORE0_MME_SBTE2_CTI_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE2_CTI_SECTION 0x1000
+#define mmDCORE0_MME_SBTE2_ETF_BASE 0x60E3000ull
+#define DCORE0_MME_SBTE2_ETF_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE2_ETF_SECTION 0x1000
+#define mmDCORE0_MME_SBTE2_SPMU_BASE 0x60E4000ull
+#define DCORE0_MME_SBTE2_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE2_SPMU_SECTION 0x1000
+#define mmDCORE0_MME_SBTE2_CTI0_BASE 0x60E5000ull
+#define DCORE0_MME_SBTE2_CTI0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE2_CTI0_SECTION 0x1000
+#define mmDCORE0_MME_SBTE2_CTI1_BASE 0x60E6000ull
+#define DCORE0_MME_SBTE2_CTI1_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE2_CTI1_SECTION 0x1000
+#define mmDCORE0_MME_SBTE2_BMON0_BASE 0x60E7000ull
+#define DCORE0_MME_SBTE2_BMON0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE2_BMON0_SECTION 0x1000
+#define mmDCORE0_MME_SBTE3_ROM_TBL_BASE 0x60E8000ull
+#define DCORE0_MME_SBTE3_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE3_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_MME_SBTE3_STM_BASE 0x60E9000ull
+#define DCORE0_MME_SBTE3_STM_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE3_STM_SECTION 0x1000
+#define mmDCORE0_MME_SBTE3_CTI_BASE 0x60EA000ull
+#define DCORE0_MME_SBTE3_CTI_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE3_CTI_SECTION 0x1000
+#define mmDCORE0_MME_SBTE3_ETF_BASE 0x60EB000ull
+#define DCORE0_MME_SBTE3_ETF_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE3_ETF_SECTION 0x1000
+#define mmDCORE0_MME_SBTE3_SPMU_BASE 0x60EC000ull
+#define DCORE0_MME_SBTE3_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE3_SPMU_SECTION 0x1000
+#define mmDCORE0_MME_SBTE3_CTI0_BASE 0x60ED000ull
+#define DCORE0_MME_SBTE3_CTI0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE3_CTI0_SECTION 0x1000
+#define mmDCORE0_MME_SBTE3_CTI1_BASE 0x60EE000ull
+#define DCORE0_MME_SBTE3_CTI1_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE3_CTI1_SECTION 0x1000
+#define mmDCORE0_MME_SBTE3_BMON0_BASE 0x60EF000ull
+#define DCORE0_MME_SBTE3_BMON0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE3_BMON0_SECTION 0x1000
+#define mmDCORE0_MME_SBTE4_ROM_TBL_BASE 0x60F0000ull
+#define DCORE0_MME_SBTE4_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE4_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_MME_SBTE4_STM_BASE 0x60F1000ull
+#define DCORE0_MME_SBTE4_STM_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE4_STM_SECTION 0x1000
+#define mmDCORE0_MME_SBTE4_CTI_BASE 0x60F2000ull
+#define DCORE0_MME_SBTE4_CTI_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE4_CTI_SECTION 0x1000
+#define mmDCORE0_MME_SBTE4_ETF_BASE 0x60F3000ull
+#define DCORE0_MME_SBTE4_ETF_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE4_ETF_SECTION 0x1000
+#define mmDCORE0_MME_SBTE4_SPMU_BASE 0x60F4000ull
+#define DCORE0_MME_SBTE4_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE4_SPMU_SECTION 0x1000
+#define mmDCORE0_MME_SBTE4_CTI0_BASE 0x60F5000ull
+#define DCORE0_MME_SBTE4_CTI0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE4_CTI0_SECTION 0x1000
+#define mmDCORE0_MME_SBTE4_CTI1_BASE 0x60F6000ull
+#define DCORE0_MME_SBTE4_CTI1_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE4_CTI1_SECTION 0x1000
+#define mmDCORE0_MME_SBTE4_BMON0_BASE 0x60F7000ull
+#define DCORE0_MME_SBTE4_BMON0_MAX_OFFSET 0x1000
+#define DCORE0_MME_SBTE4_BMON0_SECTION 0x9000
+#define mmDCORE0_MME_ACC_CS_ROM_TBL_BASE 0x6100000ull
+#define DCORE0_MME_ACC_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_MME_ACC_STM_BASE 0x6101000ull
+#define DCORE0_MME_ACC_STM_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_STM_SECTION 0x1000
+#define mmDCORE0_MME_ACC_CTI_BASE 0x6102000ull
+#define DCORE0_MME_ACC_CTI_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_CTI_SECTION 0x1000
+#define mmDCORE0_MME_ACC_ETF_BASE 0x6103000ull
+#define DCORE0_MME_ACC_ETF_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_ETF_SECTION 0x1000
+#define mmDCORE0_MME_ACC_SPMU_BASE 0x6104000ull
+#define DCORE0_MME_ACC_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_SPMU_SECTION 0x1000
+#define mmDCORE0_MME_ACC_CTI0_BASE 0x6105000ull
+#define DCORE0_MME_ACC_CTI0_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_CTI0_SECTION 0x1000
+#define mmDCORE0_MME_ACC_CTI1_BASE 0x6106000ull
+#define DCORE0_MME_ACC_CTI1_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_CTI1_SECTION 0x1000
+#define mmDCORE0_MME_ACC_BMON0_BASE 0x6107000ull
+#define DCORE0_MME_ACC_BMON0_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_BMON0_SECTION 0x1000
+#define mmDCORE0_MME_ACC_BMON1_BASE 0x6108000ull
+#define DCORE0_MME_ACC_BMON1_MAX_OFFSET 0x1000
+#define DCORE0_MME_ACC_BMON1_SECTION 0x8000
+#define mmDCORE0_SM_CS_DBG_ROM_TBL_BASE 0x6110000ull
+#define DCORE0_SM_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_SM_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_SM_STM_BASE 0x6111000ull
+#define DCORE0_SM_STM_MAX_OFFSET 0x1000
+#define DCORE0_SM_STM_SECTION 0x1000
+#define mmDCORE0_SM_CTI_BASE 0x6112000ull
+#define DCORE0_SM_CTI_MAX_OFFSET 0x1000
+#define DCORE0_SM_CTI_SECTION 0x1000
+#define mmDCORE0_SM_ETF_BASE 0x6113000ull
+#define DCORE0_SM_ETF_MAX_OFFSET 0x1000
+#define DCORE0_SM_ETF_SECTION 0x1000
+#define mmDCORE0_SM_SPMU_BASE 0x6114000ull
+#define DCORE0_SM_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_SM_SPMU_SECTION 0x1000
+#define mmDCORE0_SM_BMON_CTI_BASE 0x6115000ull
+#define DCORE0_SM_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE0_SM_BMON_CTI_SECTION 0x1000
+#define mmDCORE0_SM_USER_CTI_BASE 0x6116000ull
+#define DCORE0_SM_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE0_SM_USER_CTI_SECTION 0x1000
+#define mmDCORE0_SM_BMON_BASE 0x6117000ull
+#define DCORE0_SM_BMON_MAX_OFFSET 0x1000
+#define DCORE0_SM_BMON_SECTION 0x1000
+#define mmDCORE0_SM_BMON1_BASE 0x6118000ull
+#define DCORE0_SM_BMON1_MAX_OFFSET 0x1000
+#define DCORE0_SM_BMON1_SECTION 0x18000
+#define mmDCORE0_XFT_FUNNEL_BASE 0x6130000ull
+#define DCORE0_XFT_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_XFT_FUNNEL_SECTION 0x8000
+#define mmDCORE0_TFT0_FUNNEL_BASE 0x6138000ull
+#define DCORE0_TFT0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TFT0_FUNNEL_SECTION 0x1000
+#define mmDCORE0_TFT1_FUNNEL_BASE 0x6139000ull
+#define DCORE0_TFT1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TFT1_FUNNEL_SECTION 0x1000
+#define mmDCORE0_TFT2_FUNNEL_BASE 0x613A000ull
+#define DCORE0_TFT2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_TFT2_FUNNEL_SECTION 0x7000
+#define mmDCORE0_RTR0_FUNNEL_BASE 0x6141000ull
+#define DCORE0_RTR0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_RTR0_FUNNEL_SECTION 0x8000
+#define mmDCORE0_RTR1_FUNNEL_BASE 0x6149000ull
+#define DCORE0_RTR1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_RTR1_FUNNEL_SECTION 0x8000
+#define mmDCORE0_RTR2_FUNNEL_BASE 0x6151000ull
+#define DCORE0_RTR2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_RTR2_FUNNEL_SECTION 0x8000
+#define mmDCORE0_RTR3_FUNNEL_BASE 0x6159000ull
+#define DCORE0_RTR3_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_RTR3_FUNNEL_SECTION 0x8000
+#define mmDCORE0_RTR4_FUNNEL_BASE 0x6161000ull
+#define DCORE0_RTR4_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_RTR4_FUNNEL_SECTION 0x4000
+#define mmDCORE0_MIF0_FUNNEL_BASE 0x6165000ull
+#define DCORE0_MIF0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_MIF0_FUNNEL_SECTION 0x4000
+#define mmDCORE0_RTR5_FUNNEL_BASE 0x6169000ull
+#define DCORE0_RTR5_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_RTR5_FUNNEL_SECTION 0x4000
+#define mmDCORE0_MIF1_FUNNEL_BASE 0x616D000ull
+#define DCORE0_MIF1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_MIF1_FUNNEL_SECTION 0x4000
+#define mmDCORE0_RTR6_FUNNEL_BASE 0x6171000ull
+#define DCORE0_RTR6_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_RTR6_FUNNEL_SECTION 0x4000
+#define mmDCORE0_MIF2_FUNNEL_BASE 0x6175000ull
+#define DCORE0_MIF2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_MIF2_FUNNEL_SECTION 0x4000
+#define mmDCORE0_RTR7_FUNNEL_BASE 0x6179000ull
+#define DCORE0_RTR7_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_RTR7_FUNNEL_SECTION 0x4000
+#define mmDCORE0_MIF3_FUNNEL_BASE 0x617D000ull
+#define DCORE0_MIF3_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_MIF3_FUNNEL_SECTION 0x43000
+#define mmDCORE0_EDMA0_CS_ROM_TBL_BASE 0x61C0000ull
+#define DCORE0_EDMA0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_EDMA0_CS_STM_BASE 0x61C1000ull
+#define DCORE0_EDMA0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_CS_STM_SECTION 0x1000
+#define mmDCORE0_EDMA0_CS_CTI_BASE 0x61C2000ull
+#define DCORE0_EDMA0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_CS_CTI_SECTION 0x1000
+#define mmDCORE0_EDMA0_CS_ETF_BASE 0x61C3000ull
+#define DCORE0_EDMA0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_CS_ETF_SECTION 0x1000
+#define mmDCORE0_EDMA0_CS_SPMU_BASE 0x61C4000ull
+#define DCORE0_EDMA0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_CS_SPMU_SECTION 0x1000
+#define mmDCORE0_EDMA0_BMON_CTI_BASE 0x61C5000ull
+#define DCORE0_EDMA0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_BMON_CTI_SECTION 0x1000
+#define mmDCORE0_EDMA0_USER_CTI_BASE 0x61C6000ull
+#define DCORE0_EDMA0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_USER_CTI_SECTION 0x1000
+#define mmDCORE0_EDMA0_BMON_0_BASE 0x61C7000ull
+#define DCORE0_EDMA0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_BMON_0_SECTION 0x1000
+#define mmDCORE0_EDMA0_BMON_1_BASE 0x61C8000ull
+#define DCORE0_EDMA0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE0_EDMA0_BMON_1_SECTION 0x1000
+#define mmDCORE0_EDMA0_QM_ARC_RTT_BASE 0x61C9000ull
+#define DCORE0_EDMA0_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_EDMA0_QM_ARC_RTT_SECTION 0x7000
+#define mmDCORE0_EDMA1_CS_ROM_TBL_BASE 0x61D0000ull
+#define DCORE0_EDMA1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_EDMA1_CS_STM_BASE 0x61D1000ull
+#define DCORE0_EDMA1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_CS_STM_SECTION 0x1000
+#define mmDCORE0_EDMA1_CS_CTI_BASE 0x61D2000ull
+#define DCORE0_EDMA1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_CS_CTI_SECTION 0x1000
+#define mmDCORE0_EDMA1_CS_ETF_BASE 0x61D3000ull
+#define DCORE0_EDMA1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_CS_ETF_SECTION 0x1000
+#define mmDCORE0_EDMA1_CS_SPMU_BASE 0x61D4000ull
+#define DCORE0_EDMA1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_CS_SPMU_SECTION 0x1000
+#define mmDCORE0_EDMA1_BMON_CTI_BASE 0x61D5000ull
+#define DCORE0_EDMA1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_BMON_CTI_SECTION 0x1000
+#define mmDCORE0_EDMA1_USER_CTI_BASE 0x61D6000ull
+#define DCORE0_EDMA1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_USER_CTI_SECTION 0x1000
+#define mmDCORE0_EDMA1_BMON_0_BASE 0x61D7000ull
+#define DCORE0_EDMA1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_BMON_0_SECTION 0x1000
+#define mmDCORE0_EDMA1_BMON_1_BASE 0x61D8000ull
+#define DCORE0_EDMA1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE0_EDMA1_BMON_1_SECTION 0x1000
+#define mmDCORE0_EDMA1_QM_ARC_RTT_BASE 0x61D9000ull
+#define DCORE0_EDMA1_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE0_EDMA1_QM_ARC_RTT_SECTION 0x7000
+#define mmDCORE0_VDEC0_CS_ROM_TBL_BASE 0x61E0000ull
+#define DCORE0_VDEC0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_VDEC0_CS_STM_BASE 0x61E1000ull
+#define DCORE0_VDEC0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_CS_STM_SECTION 0x1000
+#define mmDCORE0_VDEC0_CS_CTI_BASE 0x61E2000ull
+#define DCORE0_VDEC0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_CS_CTI_SECTION 0x1000
+#define mmDCORE0_VDEC0_CS_ETF_BASE 0x61E3000ull
+#define DCORE0_VDEC0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_CS_ETF_SECTION 0x1000
+#define mmDCORE0_VDEC0_CS_SPMU_BASE 0x61E4000ull
+#define DCORE0_VDEC0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_CS_SPMU_SECTION 0x1000
+#define mmDCORE0_VDEC0_BMON_CTI_BASE 0x61E5000ull
+#define DCORE0_VDEC0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_BMON_CTI_SECTION 0x1000
+#define mmDCORE0_VDEC0_USER_CTI_BASE 0x61E6000ull
+#define DCORE0_VDEC0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_USER_CTI_SECTION 0x1000
+#define mmDCORE0_VDEC0_BMON_0_BASE 0x61E7000ull
+#define DCORE0_VDEC0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_BMON_0_SECTION 0x1000
+#define mmDCORE0_VDEC0_BMON_1_BASE 0x61E8000ull
+#define DCORE0_VDEC0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_BMON_1_SECTION 0x1000
+#define mmDCORE0_VDEC0_BMON_2_BASE 0x61E9000ull
+#define DCORE0_VDEC0_BMON_2_MAX_OFFSET 0x1000
+#define DCORE0_VDEC0_BMON_2_SECTION 0x7000
+#define mmDCORE0_VDEC1_CS_ROM_TBL_BASE 0x61F0000ull
+#define DCORE0_VDEC1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE0_VDEC1_CS_STM_BASE 0x61F1000ull
+#define DCORE0_VDEC1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_CS_STM_SECTION 0x1000
+#define mmDCORE0_VDEC1_CS_CTI_BASE 0x61F2000ull
+#define DCORE0_VDEC1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_CS_CTI_SECTION 0x1000
+#define mmDCORE0_VDEC1_CS_ETF_BASE 0x61F3000ull
+#define DCORE0_VDEC1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_CS_ETF_SECTION 0x1000
+#define mmDCORE0_VDEC1_CS_SPMU_BASE 0x61F4000ull
+#define DCORE0_VDEC1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_CS_SPMU_SECTION 0x1000
+#define mmDCORE0_VDEC1_BMON_CTI_BASE 0x61F5000ull
+#define DCORE0_VDEC1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_BMON_CTI_SECTION 0x1000
+#define mmDCORE0_VDEC1_USER_CTI_BASE 0x61F6000ull
+#define DCORE0_VDEC1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_USER_CTI_SECTION 0x1000
+#define mmDCORE0_VDEC1_BMON_0_BASE 0x61F7000ull
+#define DCORE0_VDEC1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_BMON_0_SECTION 0x1000
+#define mmDCORE0_VDEC1_BMON_1_BASE 0x61F8000ull
+#define DCORE0_VDEC1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_BMON_1_SECTION 0x1000
+#define mmDCORE0_VDEC1_BMON_2_BASE 0x61F9000ull
+#define DCORE0_VDEC1_BMON_2_MAX_OFFSET 0x1000
+#define DCORE0_VDEC1_BMON_2_SECTION 0x7000
+#define mmDCORE1_ROM_TABLE_L_BASE 0x6200000ull
+#define DCORE1_ROM_TABLE_L_MAX_OFFSET 0x1000
+#define DCORE1_ROM_TABLE_L_SECTION 0x80000
+#define mmDCORE1_HMMU0_CS_ROM_TBL_BASE 0x6280000ull
+#define DCORE1_HMMU0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_HMMU0_CS_STM_BASE 0x6281000ull
+#define DCORE1_HMMU0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_CS_STM_SECTION 0x1000
+#define mmDCORE1_HMMU0_CS_CTI_BASE 0x6282000ull
+#define DCORE1_HMMU0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_CS_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU0_CS_ETF_BASE 0x6283000ull
+#define DCORE1_HMMU0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_CS_ETF_SECTION 0x1000
+#define mmDCORE1_HMMU0_CS_SPMU_BASE 0x6284000ull
+#define DCORE1_HMMU0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_CS_SPMU_SECTION 0x1000
+#define mmDCORE1_HMMU0_BMON_CTI_BASE 0x6285000ull
+#define DCORE1_HMMU0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_BMON_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU0_USER_CTI_BASE 0x6286000ull
+#define DCORE1_HMMU0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_USER_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU0_BMON_0_BASE 0x6287000ull
+#define DCORE1_HMMU0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_BMON_0_SECTION 0x1000
+#define mmDCORE1_HMMU0_BMON_1_BASE 0x6288000ull
+#define DCORE1_HMMU0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_BMON_1_SECTION 0x1000
+#define mmDCORE1_HMMU0_BMON_3_BASE 0x6289000ull
+#define DCORE1_HMMU0_BMON_3_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_BMON_3_SECTION 0x1000
+#define mmDCORE1_HMMU0_BMON_2_BASE 0x628A000ull
+#define DCORE1_HMMU0_BMON_2_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_BMON_2_SECTION 0x1000
+#define mmDCORE1_HMMU0_BMON_4_BASE 0x628B000ull
+#define DCORE1_HMMU0_BMON_4_MAX_OFFSET 0x1000
+#define DCORE1_HMMU0_BMON_4_SECTION 0x5000
+#define mmDCORE1_HMMU1_CS_ROM_TBL_BASE 0x6290000ull
+#define DCORE1_HMMU1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_HMMU1_CS_STM_BASE 0x6291000ull
+#define DCORE1_HMMU1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_CS_STM_SECTION 0x1000
+#define mmDCORE1_HMMU1_CS_CTI_BASE 0x6292000ull
+#define DCORE1_HMMU1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_CS_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU1_CS_ETF_BASE 0x6293000ull
+#define DCORE1_HMMU1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_CS_ETF_SECTION 0x1000
+#define mmDCORE1_HMMU1_CS_SPMU_BASE 0x6294000ull
+#define DCORE1_HMMU1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_CS_SPMU_SECTION 0x1000
+#define mmDCORE1_HMMU1_BMON_CTI_BASE 0x6295000ull
+#define DCORE1_HMMU1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_BMON_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU1_USER_CTI_BASE 0x6296000ull
+#define DCORE1_HMMU1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_USER_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU1_BMON_0_BASE 0x6297000ull
+#define DCORE1_HMMU1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_BMON_0_SECTION 0x1000
+#define mmDCORE1_HMMU1_BMON_1_BASE 0x6298000ull
+#define DCORE1_HMMU1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_BMON_1_SECTION 0x1000
+#define mmDCORE1_HMMU1_BMON_3_BASE 0x6299000ull
+#define DCORE1_HMMU1_BMON_3_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_BMON_3_SECTION 0x1000
+#define mmDCORE1_HMMU1_BMON_2_BASE 0x629A000ull
+#define DCORE1_HMMU1_BMON_2_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_BMON_2_SECTION 0x1000
+#define mmDCORE1_HMMU1_BMON_4_BASE 0x629B000ull
+#define DCORE1_HMMU1_BMON_4_MAX_OFFSET 0x1000
+#define DCORE1_HMMU1_BMON_4_SECTION 0x5000
+#define mmDCORE1_HMMU2_CS_ROM_TBL_BASE 0x62A0000ull
+#define DCORE1_HMMU2_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_HMMU2_CS_STM_BASE 0x62A1000ull
+#define DCORE1_HMMU2_CS_STM_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_CS_STM_SECTION 0x1000
+#define mmDCORE1_HMMU2_CS_CTI_BASE 0x62A2000ull
+#define DCORE1_HMMU2_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_CS_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU2_CS_ETF_BASE 0x62A3000ull
+#define DCORE1_HMMU2_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_CS_ETF_SECTION 0x1000
+#define mmDCORE1_HMMU2_CS_SPMU_BASE 0x62A4000ull
+#define DCORE1_HMMU2_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_CS_SPMU_SECTION 0x1000
+#define mmDCORE1_HMMU2_BMON_CTI_BASE 0x62A5000ull
+#define DCORE1_HMMU2_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_BMON_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU2_USER_CTI_BASE 0x62A6000ull
+#define DCORE1_HMMU2_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_USER_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU2_BMON_0_BASE 0x62A7000ull
+#define DCORE1_HMMU2_BMON_0_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_BMON_0_SECTION 0x1000
+#define mmDCORE1_HMMU2_BMON_1_BASE 0x62A8000ull
+#define DCORE1_HMMU2_BMON_1_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_BMON_1_SECTION 0x1000
+#define mmDCORE1_HMMU2_BMON_3_BASE 0x62A9000ull
+#define DCORE1_HMMU2_BMON_3_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_BMON_3_SECTION 0x1000
+#define mmDCORE1_HMMU2_BMON_2_BASE 0x62AA000ull
+#define DCORE1_HMMU2_BMON_2_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_BMON_2_SECTION 0x1000
+#define mmDCORE1_HMMU2_BMON_4_BASE 0x62AB000ull
+#define DCORE1_HMMU2_BMON_4_MAX_OFFSET 0x1000
+#define DCORE1_HMMU2_BMON_4_SECTION 0x5000
+#define mmDCORE1_HMMU3_CS_ROM_TBL_BASE 0x62B0000ull
+#define DCORE1_HMMU3_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_HMMU3_CS_STM_BASE 0x62B1000ull
+#define DCORE1_HMMU3_CS_STM_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_CS_STM_SECTION 0x1000
+#define mmDCORE1_HMMU3_CS_CTI_BASE 0x62B2000ull
+#define DCORE1_HMMU3_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_CS_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU3_CS_ETF_BASE 0x62B3000ull
+#define DCORE1_HMMU3_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_CS_ETF_SECTION 0x1000
+#define mmDCORE1_HMMU3_CS_SPMU_BASE 0x62B4000ull
+#define DCORE1_HMMU3_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_CS_SPMU_SECTION 0x1000
+#define mmDCORE1_HMMU3_BMON_CTI_BASE 0x62B5000ull
+#define DCORE1_HMMU3_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_BMON_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU3_USER_CTI_BASE 0x62B6000ull
+#define DCORE1_HMMU3_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_USER_CTI_SECTION 0x1000
+#define mmDCORE1_HMMU3_BMON_0_BASE 0x62B7000ull
+#define DCORE1_HMMU3_BMON_0_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_BMON_0_SECTION 0x1000
+#define mmDCORE1_HMMU3_BMON_1_BASE 0x62B8000ull
+#define DCORE1_HMMU3_BMON_1_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_BMON_1_SECTION 0x1000
+#define mmDCORE1_HMMU3_BMON_3_BASE 0x62B9000ull
+#define DCORE1_HMMU3_BMON_3_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_BMON_3_SECTION 0x1000
+#define mmDCORE1_HMMU3_BMON_2_BASE 0x62BA000ull
+#define DCORE1_HMMU3_BMON_2_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_BMON_2_SECTION 0x1000
+#define mmDCORE1_HMMU3_BMON_4_BASE 0x62BB000ull
+#define DCORE1_HMMU3_BMON_4_MAX_OFFSET 0x1000
+#define DCORE1_HMMU3_BMON_4_SECTION 0x5000
+#define mmDCORE1_MME_CTRL_ROM_TABLE_BASE 0x62C0000ull
+#define DCORE1_MME_CTRL_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_ROM_TABLE_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_STM_BASE 0x62C1000ull
+#define DCORE1_MME_CTRL_STM_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_STM_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_CTI_BASE 0x62C2000ull
+#define DCORE1_MME_CTRL_CTI_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_CTI_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_ETF_BASE 0x62C3000ull
+#define DCORE1_MME_CTRL_ETF_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_ETF_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_SPMU_BASE 0x62C4000ull
+#define DCORE1_MME_CTRL_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_SPMU_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_CTI0_BASE 0x62C5000ull
+#define DCORE1_MME_CTRL_CTI0_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_CTI0_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_CTI1_BASE 0x62C6000ull
+#define DCORE1_MME_CTRL_CTI1_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_CTI1_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_BMON0_BASE 0x62C7000ull
+#define DCORE1_MME_CTRL_BMON0_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_BMON0_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_BMON1_BASE 0x62C8000ull
+#define DCORE1_MME_CTRL_BMON1_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_BMON1_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_BMON2_BASE 0x62C9000ull
+#define DCORE1_MME_CTRL_BMON2_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_BMON2_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_BMON3_BASE 0x62CA000ull
+#define DCORE1_MME_CTRL_BMON3_MAX_OFFSET 0x1000
+#define DCORE1_MME_CTRL_BMON3_SECTION 0x1000
+#define mmDCORE1_MME_CTRL_ARC_RTT_BASE 0x62CB000ull
+#define DCORE1_MME_CTRL_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE1_MME_CTRL_ARC_RTT_SECTION 0x5000
+#define mmDCORE1_MME_SBTE0_ROM_TBL_BASE 0x62D0000ull
+#define DCORE1_MME_SBTE0_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE0_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_MME_SBTE0_STM_BASE 0x62D1000ull
+#define DCORE1_MME_SBTE0_STM_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE0_STM_SECTION 0x1000
+#define mmDCORE1_MME_SBTE0_CTI_BASE 0x62D2000ull
+#define DCORE1_MME_SBTE0_CTI_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE0_CTI_SECTION 0x1000
+#define mmDCORE1_MME_SBTE0_ETF_BASE 0x62D3000ull
+#define DCORE1_MME_SBTE0_ETF_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE0_ETF_SECTION 0x1000
+#define mmDCORE1_MME_SBTE0_SPMU_BASE 0x62D4000ull
+#define DCORE1_MME_SBTE0_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE0_SPMU_SECTION 0x1000
+#define mmDCORE1_MME_SBTE0_CTI0_BASE 0x62D5000ull
+#define DCORE1_MME_SBTE0_CTI0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE0_CTI0_SECTION 0x1000
+#define mmDCORE1_MME_SBTE0_CTI1_BASE 0x62D6000ull
+#define DCORE1_MME_SBTE0_CTI1_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE0_CTI1_SECTION 0x1000
+#define mmDCORE1_MME_SBTE0_BMON0_BASE 0x62D7000ull
+#define DCORE1_MME_SBTE0_BMON0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE0_BMON0_SECTION 0x1000
+#define mmDCORE1_MME_SBTE1_ROM_TBL_BASE 0x62D8000ull
+#define DCORE1_MME_SBTE1_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE1_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_MME_SBTE1_STM_BASE 0x62D9000ull
+#define DCORE1_MME_SBTE1_STM_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE1_STM_SECTION 0x1000
+#define mmDCORE1_MME_SBTE1_CTI_BASE 0x62DA000ull
+#define DCORE1_MME_SBTE1_CTI_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE1_CTI_SECTION 0x1000
+#define mmDCORE1_MME_SBTE1_ETF_BASE 0x62DB000ull
+#define DCORE1_MME_SBTE1_ETF_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE1_ETF_SECTION 0x1000
+#define mmDCORE1_MME_SBTE1_SPMU_BASE 0x62DC000ull
+#define DCORE1_MME_SBTE1_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE1_SPMU_SECTION 0x1000
+#define mmDCORE1_MME_SBTE1_CTI0_BASE 0x62DD000ull
+#define DCORE1_MME_SBTE1_CTI0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE1_CTI0_SECTION 0x1000
+#define mmDCORE1_MME_SBTE1_CTI1_BASE 0x62DE000ull
+#define DCORE1_MME_SBTE1_CTI1_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE1_CTI1_SECTION 0x1000
+#define mmDCORE1_MME_SBTE1_BMON0_BASE 0x62DF000ull
+#define DCORE1_MME_SBTE1_BMON0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE1_BMON0_SECTION 0x1000
+#define mmDCORE1_MME_SBTE2_ROM_TBL_BASE 0x62E0000ull
+#define DCORE1_MME_SBTE2_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE2_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_MME_SBTE2_STM_BASE 0x62E1000ull
+#define DCORE1_MME_SBTE2_STM_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE2_STM_SECTION 0x1000
+#define mmDCORE1_MME_SBTE2_CTI_BASE 0x62E2000ull
+#define DCORE1_MME_SBTE2_CTI_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE2_CTI_SECTION 0x1000
+#define mmDCORE1_MME_SBTE2_ETF_BASE 0x62E3000ull
+#define DCORE1_MME_SBTE2_ETF_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE2_ETF_SECTION 0x1000
+#define mmDCORE1_MME_SBTE2_SPMU_BASE 0x62E4000ull
+#define DCORE1_MME_SBTE2_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE2_SPMU_SECTION 0x1000
+#define mmDCORE1_MME_SBTE2_CTI0_BASE 0x62E5000ull
+#define DCORE1_MME_SBTE2_CTI0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE2_CTI0_SECTION 0x1000
+#define mmDCORE1_MME_SBTE2_CTI1_BASE 0x62E6000ull
+#define DCORE1_MME_SBTE2_CTI1_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE2_CTI1_SECTION 0x1000
+#define mmDCORE1_MME_SBTE2_BMON0_BASE 0x62E7000ull
+#define DCORE1_MME_SBTE2_BMON0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE2_BMON0_SECTION 0x1000
+#define mmDCORE1_MME_SBTE3_ROM_TBL_BASE 0x62E8000ull
+#define DCORE1_MME_SBTE3_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE3_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_MME_SBTE3_STM_BASE 0x62E9000ull
+#define DCORE1_MME_SBTE3_STM_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE3_STM_SECTION 0x1000
+#define mmDCORE1_MME_SBTE3_CTI_BASE 0x62EA000ull
+#define DCORE1_MME_SBTE3_CTI_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE3_CTI_SECTION 0x1000
+#define mmDCORE1_MME_SBTE3_ETF_BASE 0x62EB000ull
+#define DCORE1_MME_SBTE3_ETF_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE3_ETF_SECTION 0x1000
+#define mmDCORE1_MME_SBTE3_SPMU_BASE 0x62EC000ull
+#define DCORE1_MME_SBTE3_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE3_SPMU_SECTION 0x1000
+#define mmDCORE1_MME_SBTE3_CTI0_BASE 0x62ED000ull
+#define DCORE1_MME_SBTE3_CTI0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE3_CTI0_SECTION 0x1000
+#define mmDCORE1_MME_SBTE3_CTI1_BASE 0x62EE000ull
+#define DCORE1_MME_SBTE3_CTI1_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE3_CTI1_SECTION 0x1000
+#define mmDCORE1_MME_SBTE3_BMON0_BASE 0x62EF000ull
+#define DCORE1_MME_SBTE3_BMON0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE3_BMON0_SECTION 0x1000
+#define mmDCORE1_MME_SBTE4_ROM_TBL_BASE 0x62F0000ull
+#define DCORE1_MME_SBTE4_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE4_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_MME_SBTE4_STM_BASE 0x62F1000ull
+#define DCORE1_MME_SBTE4_STM_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE4_STM_SECTION 0x1000
+#define mmDCORE1_MME_SBTE4_CTI_BASE 0x62F2000ull
+#define DCORE1_MME_SBTE4_CTI_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE4_CTI_SECTION 0x1000
+#define mmDCORE1_MME_SBTE4_ETF_BASE 0x62F3000ull
+#define DCORE1_MME_SBTE4_ETF_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE4_ETF_SECTION 0x1000
+#define mmDCORE1_MME_SBTE4_SPMU_BASE 0x62F4000ull
+#define DCORE1_MME_SBTE4_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE4_SPMU_SECTION 0x1000
+#define mmDCORE1_MME_SBTE4_CTI0_BASE 0x62F5000ull
+#define DCORE1_MME_SBTE4_CTI0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE4_CTI0_SECTION 0x1000
+#define mmDCORE1_MME_SBTE4_CTI1_BASE 0x62F6000ull
+#define DCORE1_MME_SBTE4_CTI1_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE4_CTI1_SECTION 0x1000
+#define mmDCORE1_MME_SBTE4_BMON0_BASE 0x62F7000ull
+#define DCORE1_MME_SBTE4_BMON0_MAX_OFFSET 0x1000
+#define DCORE1_MME_SBTE4_BMON0_SECTION 0x9000
+#define mmDCORE1_MME_ACC_CS_ROM_TBL_BASE 0x6300000ull
+#define DCORE1_MME_ACC_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_MME_ACC_STM_BASE 0x6301000ull
+#define DCORE1_MME_ACC_STM_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_STM_SECTION 0x1000
+#define mmDCORE1_MME_ACC_CTI_BASE 0x6302000ull
+#define DCORE1_MME_ACC_CTI_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_CTI_SECTION 0x1000
+#define mmDCORE1_MME_ACC_ETF_BASE 0x6303000ull
+#define DCORE1_MME_ACC_ETF_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_ETF_SECTION 0x1000
+#define mmDCORE1_MME_ACC_SPMU_BASE 0x6304000ull
+#define DCORE1_MME_ACC_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_SPMU_SECTION 0x1000
+#define mmDCORE1_MME_ACC_CTI0_BASE 0x6305000ull
+#define DCORE1_MME_ACC_CTI0_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_CTI0_SECTION 0x1000
+#define mmDCORE1_MME_ACC_CTI1_BASE 0x6306000ull
+#define DCORE1_MME_ACC_CTI1_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_CTI1_SECTION 0x1000
+#define mmDCORE1_MME_ACC_BMON0_BASE 0x6307000ull
+#define DCORE1_MME_ACC_BMON0_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_BMON0_SECTION 0x1000
+#define mmDCORE1_MME_ACC_BMON1_BASE 0x6308000ull
+#define DCORE1_MME_ACC_BMON1_MAX_OFFSET 0x1000
+#define DCORE1_MME_ACC_BMON1_SECTION 0x8000
+#define mmDCORE1_SM_CS_DBG_ROM_TBL_BASE 0x6310000ull
+#define DCORE1_SM_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_SM_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_SM_STM_BASE 0x6311000ull
+#define DCORE1_SM_STM_MAX_OFFSET 0x1000
+#define DCORE1_SM_STM_SECTION 0x1000
+#define mmDCORE1_SM_CTI_BASE 0x6312000ull
+#define DCORE1_SM_CTI_MAX_OFFSET 0x1000
+#define DCORE1_SM_CTI_SECTION 0x1000
+#define mmDCORE1_SM_ETF_BASE 0x6313000ull
+#define DCORE1_SM_ETF_MAX_OFFSET 0x1000
+#define DCORE1_SM_ETF_SECTION 0x1000
+#define mmDCORE1_SM_SPMU_BASE 0x6314000ull
+#define DCORE1_SM_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_SM_SPMU_SECTION 0x1000
+#define mmDCORE1_SM_BMON_CTI_BASE 0x6315000ull
+#define DCORE1_SM_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE1_SM_BMON_CTI_SECTION 0x1000
+#define mmDCORE1_SM_USER_CTI_BASE 0x6316000ull
+#define DCORE1_SM_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE1_SM_USER_CTI_SECTION 0x1000
+#define mmDCORE1_SM_BMON_BASE 0x6317000ull
+#define DCORE1_SM_BMON_MAX_OFFSET 0x1000
+#define DCORE1_SM_BMON_SECTION 0x1000
+#define mmDCORE1_SM_BMON1_BASE 0x6318000ull
+#define DCORE1_SM_BMON1_MAX_OFFSET 0x1000
+#define DCORE1_SM_BMON1_SECTION 0x18000
+#define mmDCORE1_XFT_FUNNEL_BASE 0x6330000ull
+#define DCORE1_XFT_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_XFT_FUNNEL_SECTION 0x8000
+#define mmDCORE1_TFT0_FUNNEL_BASE 0x6338000ull
+#define DCORE1_TFT0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_TFT0_FUNNEL_SECTION 0x1000
+#define mmDCORE1_TFT1_FUNNEL_BASE 0x6339000ull
+#define DCORE1_TFT1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_TFT1_FUNNEL_SECTION 0x1000
+#define mmDCORE1_TFT2_FUNNEL_BASE 0x633A000ull
+#define DCORE1_TFT2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_TFT2_FUNNEL_SECTION 0x7000
+#define mmDCORE1_RTR0_FUNNEL_BASE 0x6341000ull
+#define DCORE1_RTR0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_RTR0_FUNNEL_SECTION 0x4000
+#define mmDCORE1_MIF0_FUNNEL_BASE 0x6345000ull
+#define DCORE1_MIF0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_MIF0_FUNNEL_SECTION 0x4000
+#define mmDCORE1_RTR1_FUNNEL_BASE 0x6349000ull
+#define DCORE1_RTR1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_RTR1_FUNNEL_SECTION 0x4000
+#define mmDCORE1_MIF1_FUNNEL_BASE 0x634D000ull
+#define DCORE1_MIF1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_MIF1_FUNNEL_SECTION 0x4000
+#define mmDCORE1_RTR2_FUNNEL_BASE 0x6351000ull
+#define DCORE1_RTR2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_RTR2_FUNNEL_SECTION 0x4000
+#define mmDCORE1_MIF2_FUNNEL_BASE 0x6355000ull
+#define DCORE1_MIF2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_MIF2_FUNNEL_SECTION 0x4000
+#define mmDCORE1_RTR3_FUNNEL_BASE 0x6359000ull
+#define DCORE1_RTR3_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_RTR3_FUNNEL_SECTION 0x4000
+#define mmDCORE1_MIF3_FUNNEL_BASE 0x635D000ull
+#define DCORE1_MIF3_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_MIF3_FUNNEL_SECTION 0x4000
+#define mmDCORE1_RTR4_FUNNEL_BASE 0x6361000ull
+#define DCORE1_RTR4_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_RTR4_FUNNEL_SECTION 0x8000
+#define mmDCORE1_RTR5_FUNNEL_BASE 0x6369000ull
+#define DCORE1_RTR5_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_RTR5_FUNNEL_SECTION 0x8000
+#define mmDCORE1_RTR6_FUNNEL_BASE 0x6371000ull
+#define DCORE1_RTR6_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_RTR6_FUNNEL_SECTION 0x8000
+#define mmDCORE1_RTR7_FUNNEL_BASE 0x6379000ull
+#define DCORE1_RTR7_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_RTR7_FUNNEL_SECTION 0x47000
+#define mmDCORE1_EDMA0_CS_ROM_TBL_BASE 0x63C0000ull
+#define DCORE1_EDMA0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_EDMA0_CS_STM_BASE 0x63C1000ull
+#define DCORE1_EDMA0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_CS_STM_SECTION 0x1000
+#define mmDCORE1_EDMA0_CS_CTI_BASE 0x63C2000ull
+#define DCORE1_EDMA0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_CS_CTI_SECTION 0x1000
+#define mmDCORE1_EDMA0_CS_ETF_BASE 0x63C3000ull
+#define DCORE1_EDMA0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_CS_ETF_SECTION 0x1000
+#define mmDCORE1_EDMA0_CS_SPMU_BASE 0x63C4000ull
+#define DCORE1_EDMA0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_CS_SPMU_SECTION 0x1000
+#define mmDCORE1_EDMA0_BMON_CTI_BASE 0x63C5000ull
+#define DCORE1_EDMA0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_BMON_CTI_SECTION 0x1000
+#define mmDCORE1_EDMA0_USER_CTI_BASE 0x63C6000ull
+#define DCORE1_EDMA0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_USER_CTI_SECTION 0x1000
+#define mmDCORE1_EDMA0_BMON_0_BASE 0x63C7000ull
+#define DCORE1_EDMA0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_BMON_0_SECTION 0x1000
+#define mmDCORE1_EDMA0_BMON_1_BASE 0x63C8000ull
+#define DCORE1_EDMA0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE1_EDMA0_BMON_1_SECTION 0x1000
+#define mmDCORE1_EDMA0_QM_ARC_RTT_BASE 0x63C9000ull
+#define DCORE1_EDMA0_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE1_EDMA0_QM_ARC_RTT_SECTION 0x7000
+#define mmDCORE1_EDMA1_CS_ROM_TBL_BASE 0x63D0000ull
+#define DCORE1_EDMA1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_EDMA1_CS_STM_BASE 0x63D1000ull
+#define DCORE1_EDMA1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_CS_STM_SECTION 0x1000
+#define mmDCORE1_EDMA1_CS_CTI_BASE 0x63D2000ull
+#define DCORE1_EDMA1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_CS_CTI_SECTION 0x1000
+#define mmDCORE1_EDMA1_CS_ETF_BASE 0x63D3000ull
+#define DCORE1_EDMA1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_CS_ETF_SECTION 0x1000
+#define mmDCORE1_EDMA1_CS_SPMU_BASE 0x63D4000ull
+#define DCORE1_EDMA1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_CS_SPMU_SECTION 0x1000
+#define mmDCORE1_EDMA1_BMON_CTI_BASE 0x63D5000ull
+#define DCORE1_EDMA1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_BMON_CTI_SECTION 0x1000
+#define mmDCORE1_EDMA1_USER_CTI_BASE 0x63D6000ull
+#define DCORE1_EDMA1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_USER_CTI_SECTION 0x1000
+#define mmDCORE1_EDMA1_BMON_0_BASE 0x63D7000ull
+#define DCORE1_EDMA1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_BMON_0_SECTION 0x1000
+#define mmDCORE1_EDMA1_BMON_1_BASE 0x63D8000ull
+#define DCORE1_EDMA1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE1_EDMA1_BMON_1_SECTION 0x1000
+#define mmDCORE1_EDMA1_QM_ARC_RTT_BASE 0x63D9000ull
+#define DCORE1_EDMA1_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE1_EDMA1_QM_ARC_RTT_SECTION 0x7000
+#define mmDCORE1_VDEC0_CS_ROM_TBL_BASE 0x63E0000ull
+#define DCORE1_VDEC0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_VDEC0_CS_STM_BASE 0x63E1000ull
+#define DCORE1_VDEC0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_CS_STM_SECTION 0x1000
+#define mmDCORE1_VDEC0_CS_CTI_BASE 0x63E2000ull
+#define DCORE1_VDEC0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_CS_CTI_SECTION 0x1000
+#define mmDCORE1_VDEC0_CS_ETF_BASE 0x63E3000ull
+#define DCORE1_VDEC0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_CS_ETF_SECTION 0x1000
+#define mmDCORE1_VDEC0_CS_SPMU_BASE 0x63E4000ull
+#define DCORE1_VDEC0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_CS_SPMU_SECTION 0x1000
+#define mmDCORE1_VDEC0_BMON_CTI_BASE 0x63E5000ull
+#define DCORE1_VDEC0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_BMON_CTI_SECTION 0x1000
+#define mmDCORE1_VDEC0_USER_CTI_BASE 0x63E6000ull
+#define DCORE1_VDEC0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_USER_CTI_SECTION 0x1000
+#define mmDCORE1_VDEC0_BMON_0_BASE 0x63E7000ull
+#define DCORE1_VDEC0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_BMON_0_SECTION 0x1000
+#define mmDCORE1_VDEC0_BMON_1_BASE 0x63E8000ull
+#define DCORE1_VDEC0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_BMON_1_SECTION 0x1000
+#define mmDCORE1_VDEC0_BMON_2_BASE 0x63E9000ull
+#define DCORE1_VDEC0_BMON_2_MAX_OFFSET 0x1000
+#define DCORE1_VDEC0_BMON_2_SECTION 0x7000
+#define mmDCORE1_VDEC1_CS_ROM_TBL_BASE 0x63F0000ull
+#define DCORE1_VDEC1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE1_VDEC1_CS_STM_BASE 0x63F1000ull
+#define DCORE1_VDEC1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_CS_STM_SECTION 0x1000
+#define mmDCORE1_VDEC1_CS_CTI_BASE 0x63F2000ull
+#define DCORE1_VDEC1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_CS_CTI_SECTION 0x1000
+#define mmDCORE1_VDEC1_CS_ETF_BASE 0x63F3000ull
+#define DCORE1_VDEC1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_CS_ETF_SECTION 0x1000
+#define mmDCORE1_VDEC1_CS_SPMU_BASE 0x63F4000ull
+#define DCORE1_VDEC1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_CS_SPMU_SECTION 0x1000
+#define mmDCORE1_VDEC1_BMON_CTI_BASE 0x63F5000ull
+#define DCORE1_VDEC1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_BMON_CTI_SECTION 0x1000
+#define mmDCORE1_VDEC1_USER_CTI_BASE 0x63F6000ull
+#define DCORE1_VDEC1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_USER_CTI_SECTION 0x1000
+#define mmDCORE1_VDEC1_BMON_0_BASE 0x63F7000ull
+#define DCORE1_VDEC1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_BMON_0_SECTION 0x1000
+#define mmDCORE1_VDEC1_BMON_1_BASE 0x63F8000ull
+#define DCORE1_VDEC1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_BMON_1_SECTION 0x1000
+#define mmDCORE1_VDEC1_BMON_2_BASE 0x63F9000ull
+#define DCORE1_VDEC1_BMON_2_MAX_OFFSET 0x1000
+#define DCORE1_VDEC1_BMON_2_SECTION 0x7000
+#define mmDCORE2_ROM_TABLE_L_BASE 0x6400000ull
+#define DCORE2_ROM_TABLE_L_MAX_OFFSET 0x1000
+#define DCORE2_ROM_TABLE_L_SECTION 0x80000
+#define mmDCORE2_HMMU0_CS_ROM_TBL_BASE 0x6480000ull
+#define DCORE2_HMMU0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_HMMU0_CS_STM_BASE 0x6481000ull
+#define DCORE2_HMMU0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_CS_STM_SECTION 0x1000
+#define mmDCORE2_HMMU0_CS_CTI_BASE 0x6482000ull
+#define DCORE2_HMMU0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_CS_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU0_CS_ETF_BASE 0x6483000ull
+#define DCORE2_HMMU0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_CS_ETF_SECTION 0x1000
+#define mmDCORE2_HMMU0_CS_SPMU_BASE 0x6484000ull
+#define DCORE2_HMMU0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_CS_SPMU_SECTION 0x1000
+#define mmDCORE2_HMMU0_BMON_CTI_BASE 0x6485000ull
+#define DCORE2_HMMU0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_BMON_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU0_USER_CTI_BASE 0x6486000ull
+#define DCORE2_HMMU0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_USER_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU0_BMON_0_BASE 0x6487000ull
+#define DCORE2_HMMU0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_BMON_0_SECTION 0x1000
+#define mmDCORE2_HMMU0_BMON_1_BASE 0x6488000ull
+#define DCORE2_HMMU0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_BMON_1_SECTION 0x1000
+#define mmDCORE2_HMMU0_BMON_3_BASE 0x6489000ull
+#define DCORE2_HMMU0_BMON_3_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_BMON_3_SECTION 0x1000
+#define mmDCORE2_HMMU0_BMON_2_BASE 0x648A000ull
+#define DCORE2_HMMU0_BMON_2_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_BMON_2_SECTION 0x1000
+#define mmDCORE2_HMMU0_BMON_4_BASE 0x648B000ull
+#define DCORE2_HMMU0_BMON_4_MAX_OFFSET 0x1000
+#define DCORE2_HMMU0_BMON_4_SECTION 0x5000
+#define mmDCORE2_HMMU1_CS_ROM_TBL_BASE 0x6490000ull
+#define DCORE2_HMMU1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_HMMU1_CS_STM_BASE 0x6491000ull
+#define DCORE2_HMMU1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_CS_STM_SECTION 0x1000
+#define mmDCORE2_HMMU1_CS_CTI_BASE 0x6492000ull
+#define DCORE2_HMMU1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_CS_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU1_CS_ETF_BASE 0x6493000ull
+#define DCORE2_HMMU1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_CS_ETF_SECTION 0x1000
+#define mmDCORE2_HMMU1_CS_SPMU_BASE 0x6494000ull
+#define DCORE2_HMMU1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_CS_SPMU_SECTION 0x1000
+#define mmDCORE2_HMMU1_BMON_CTI_BASE 0x6495000ull
+#define DCORE2_HMMU1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_BMON_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU1_USER_CTI_BASE 0x6496000ull
+#define DCORE2_HMMU1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_USER_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU1_BMON_0_BASE 0x6497000ull
+#define DCORE2_HMMU1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_BMON_0_SECTION 0x1000
+#define mmDCORE2_HMMU1_BMON_1_BASE 0x6498000ull
+#define DCORE2_HMMU1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_BMON_1_SECTION 0x1000
+#define mmDCORE2_HMMU1_BMON_3_BASE 0x6499000ull
+#define DCORE2_HMMU1_BMON_3_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_BMON_3_SECTION 0x1000
+#define mmDCORE2_HMMU1_BMON_2_BASE 0x649A000ull
+#define DCORE2_HMMU1_BMON_2_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_BMON_2_SECTION 0x1000
+#define mmDCORE2_HMMU1_BMON_4_BASE 0x649B000ull
+#define DCORE2_HMMU1_BMON_4_MAX_OFFSET 0x1000
+#define DCORE2_HMMU1_BMON_4_SECTION 0x5000
+#define mmDCORE2_HMMU2_CS_ROM_TBL_BASE 0x64A0000ull
+#define DCORE2_HMMU2_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_HMMU2_CS_STM_BASE 0x64A1000ull
+#define DCORE2_HMMU2_CS_STM_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_CS_STM_SECTION 0x1000
+#define mmDCORE2_HMMU2_CS_CTI_BASE 0x64A2000ull
+#define DCORE2_HMMU2_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_CS_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU2_CS_ETF_BASE 0x64A3000ull
+#define DCORE2_HMMU2_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_CS_ETF_SECTION 0x1000
+#define mmDCORE2_HMMU2_CS_SPMU_BASE 0x64A4000ull
+#define DCORE2_HMMU2_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_CS_SPMU_SECTION 0x1000
+#define mmDCORE2_HMMU2_BMON_CTI_BASE 0x64A5000ull
+#define DCORE2_HMMU2_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_BMON_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU2_USER_CTI_BASE 0x64A6000ull
+#define DCORE2_HMMU2_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_USER_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU2_BMON_0_BASE 0x64A7000ull
+#define DCORE2_HMMU2_BMON_0_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_BMON_0_SECTION 0x1000
+#define mmDCORE2_HMMU2_BMON_1_BASE 0x64A8000ull
+#define DCORE2_HMMU2_BMON_1_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_BMON_1_SECTION 0x1000
+#define mmDCORE2_HMMU2_BMON_3_BASE 0x64A9000ull
+#define DCORE2_HMMU2_BMON_3_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_BMON_3_SECTION 0x1000
+#define mmDCORE2_HMMU2_BMON_2_BASE 0x64AA000ull
+#define DCORE2_HMMU2_BMON_2_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_BMON_2_SECTION 0x1000
+#define mmDCORE2_HMMU2_BMON_4_BASE 0x64AB000ull
+#define DCORE2_HMMU2_BMON_4_MAX_OFFSET 0x1000
+#define DCORE2_HMMU2_BMON_4_SECTION 0x5000
+#define mmDCORE2_HMMU3_CS_ROM_TBL_BASE 0x64B0000ull
+#define DCORE2_HMMU3_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_HMMU3_CS_STM_BASE 0x64B1000ull
+#define DCORE2_HMMU3_CS_STM_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_CS_STM_SECTION 0x1000
+#define mmDCORE2_HMMU3_CS_CTI_BASE 0x64B2000ull
+#define DCORE2_HMMU3_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_CS_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU3_CS_ETF_BASE 0x64B3000ull
+#define DCORE2_HMMU3_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_CS_ETF_SECTION 0x1000
+#define mmDCORE2_HMMU3_CS_SPMU_BASE 0x64B4000ull
+#define DCORE2_HMMU3_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_CS_SPMU_SECTION 0x1000
+#define mmDCORE2_HMMU3_BMON_CTI_BASE 0x64B5000ull
+#define DCORE2_HMMU3_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_BMON_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU3_USER_CTI_BASE 0x64B6000ull
+#define DCORE2_HMMU3_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_USER_CTI_SECTION 0x1000
+#define mmDCORE2_HMMU3_BMON_0_BASE 0x64B7000ull
+#define DCORE2_HMMU3_BMON_0_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_BMON_0_SECTION 0x1000
+#define mmDCORE2_HMMU3_BMON_1_BASE 0x64B8000ull
+#define DCORE2_HMMU3_BMON_1_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_BMON_1_SECTION 0x1000
+#define mmDCORE2_HMMU3_BMON_3_BASE 0x64B9000ull
+#define DCORE2_HMMU3_BMON_3_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_BMON_3_SECTION 0x1000
+#define mmDCORE2_HMMU3_BMON_2_BASE 0x64BA000ull
+#define DCORE2_HMMU3_BMON_2_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_BMON_2_SECTION 0x1000
+#define mmDCORE2_HMMU3_BMON_4_BASE 0x64BB000ull
+#define DCORE2_HMMU3_BMON_4_MAX_OFFSET 0x1000
+#define DCORE2_HMMU3_BMON_4_SECTION 0x5000
+#define mmDCORE2_MME_CTRL_ROM_TABLE_BASE 0x64C0000ull
+#define DCORE2_MME_CTRL_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_ROM_TABLE_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_STM_BASE 0x64C1000ull
+#define DCORE2_MME_CTRL_STM_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_STM_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_CTI_BASE 0x64C2000ull
+#define DCORE2_MME_CTRL_CTI_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_CTI_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_ETF_BASE 0x64C3000ull
+#define DCORE2_MME_CTRL_ETF_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_ETF_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_SPMU_BASE 0x64C4000ull
+#define DCORE2_MME_CTRL_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_SPMU_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_CTI0_BASE 0x64C5000ull
+#define DCORE2_MME_CTRL_CTI0_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_CTI0_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_CTI1_BASE 0x64C6000ull
+#define DCORE2_MME_CTRL_CTI1_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_CTI1_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_BMON0_BASE 0x64C7000ull
+#define DCORE2_MME_CTRL_BMON0_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_BMON0_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_BMON1_BASE 0x64C8000ull
+#define DCORE2_MME_CTRL_BMON1_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_BMON1_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_BMON2_BASE 0x64C9000ull
+#define DCORE2_MME_CTRL_BMON2_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_BMON2_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_BMON3_BASE 0x64CA000ull
+#define DCORE2_MME_CTRL_BMON3_MAX_OFFSET 0x1000
+#define DCORE2_MME_CTRL_BMON3_SECTION 0x1000
+#define mmDCORE2_MME_CTRL_ARC_RTT_BASE 0x64CB000ull
+#define DCORE2_MME_CTRL_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE2_MME_CTRL_ARC_RTT_SECTION 0x5000
+#define mmDCORE2_MME_SBTE0_ROM_TBL_BASE 0x64D0000ull
+#define DCORE2_MME_SBTE0_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE0_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_MME_SBTE0_STM_BASE 0x64D1000ull
+#define DCORE2_MME_SBTE0_STM_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE0_STM_SECTION 0x1000
+#define mmDCORE2_MME_SBTE0_CTI_BASE 0x64D2000ull
+#define DCORE2_MME_SBTE0_CTI_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE0_CTI_SECTION 0x1000
+#define mmDCORE2_MME_SBTE0_ETF_BASE 0x64D3000ull
+#define DCORE2_MME_SBTE0_ETF_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE0_ETF_SECTION 0x1000
+#define mmDCORE2_MME_SBTE0_SPMU_BASE 0x64D4000ull
+#define DCORE2_MME_SBTE0_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE0_SPMU_SECTION 0x1000
+#define mmDCORE2_MME_SBTE0_CTI0_BASE 0x64D5000ull
+#define DCORE2_MME_SBTE0_CTI0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE0_CTI0_SECTION 0x1000
+#define mmDCORE2_MME_SBTE0_CTI1_BASE 0x64D6000ull
+#define DCORE2_MME_SBTE0_CTI1_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE0_CTI1_SECTION 0x1000
+#define mmDCORE2_MME_SBTE0_BMON0_BASE 0x64D7000ull
+#define DCORE2_MME_SBTE0_BMON0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE0_BMON0_SECTION 0x1000
+#define mmDCORE2_MME_SBTE1_ROM_TBL_BASE 0x64D8000ull
+#define DCORE2_MME_SBTE1_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE1_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_MME_SBTE1_STM_BASE 0x64D9000ull
+#define DCORE2_MME_SBTE1_STM_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE1_STM_SECTION 0x1000
+#define mmDCORE2_MME_SBTE1_CTI_BASE 0x64DA000ull
+#define DCORE2_MME_SBTE1_CTI_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE1_CTI_SECTION 0x1000
+#define mmDCORE2_MME_SBTE1_ETF_BASE 0x64DB000ull
+#define DCORE2_MME_SBTE1_ETF_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE1_ETF_SECTION 0x1000
+#define mmDCORE2_MME_SBTE1_SPMU_BASE 0x64DC000ull
+#define DCORE2_MME_SBTE1_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE1_SPMU_SECTION 0x1000
+#define mmDCORE2_MME_SBTE1_CTI0_BASE 0x64DD000ull
+#define DCORE2_MME_SBTE1_CTI0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE1_CTI0_SECTION 0x1000
+#define mmDCORE2_MME_SBTE1_CTI1_BASE 0x64DE000ull
+#define DCORE2_MME_SBTE1_CTI1_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE1_CTI1_SECTION 0x1000
+#define mmDCORE2_MME_SBTE1_BMON0_BASE 0x64DF000ull
+#define DCORE2_MME_SBTE1_BMON0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE1_BMON0_SECTION 0x1000
+#define mmDCORE2_MME_SBTE2_ROM_TBL_BASE 0x64E0000ull
+#define DCORE2_MME_SBTE2_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE2_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_MME_SBTE2_STM_BASE 0x64E1000ull
+#define DCORE2_MME_SBTE2_STM_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE2_STM_SECTION 0x1000
+#define mmDCORE2_MME_SBTE2_CTI_BASE 0x64E2000ull
+#define DCORE2_MME_SBTE2_CTI_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE2_CTI_SECTION 0x1000
+#define mmDCORE2_MME_SBTE2_ETF_BASE 0x64E3000ull
+#define DCORE2_MME_SBTE2_ETF_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE2_ETF_SECTION 0x1000
+#define mmDCORE2_MME_SBTE2_SPMU_BASE 0x64E4000ull
+#define DCORE2_MME_SBTE2_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE2_SPMU_SECTION 0x1000
+#define mmDCORE2_MME_SBTE2_CTI0_BASE 0x64E5000ull
+#define DCORE2_MME_SBTE2_CTI0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE2_CTI0_SECTION 0x1000
+#define mmDCORE2_MME_SBTE2_CTI1_BASE 0x64E6000ull
+#define DCORE2_MME_SBTE2_CTI1_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE2_CTI1_SECTION 0x1000
+#define mmDCORE2_MME_SBTE2_BMON0_BASE 0x64E7000ull
+#define DCORE2_MME_SBTE2_BMON0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE2_BMON0_SECTION 0x1000
+#define mmDCORE2_MME_SBTE3_ROM_TBL_BASE 0x64E8000ull
+#define DCORE2_MME_SBTE3_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE3_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_MME_SBTE3_STM_BASE 0x64E9000ull
+#define DCORE2_MME_SBTE3_STM_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE3_STM_SECTION 0x1000
+#define mmDCORE2_MME_SBTE3_CTI_BASE 0x64EA000ull
+#define DCORE2_MME_SBTE3_CTI_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE3_CTI_SECTION 0x1000
+#define mmDCORE2_MME_SBTE3_ETF_BASE 0x64EB000ull
+#define DCORE2_MME_SBTE3_ETF_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE3_ETF_SECTION 0x1000
+#define mmDCORE2_MME_SBTE3_SPMU_BASE 0x64EC000ull
+#define DCORE2_MME_SBTE3_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE3_SPMU_SECTION 0x1000
+#define mmDCORE2_MME_SBTE3_CTI0_BASE 0x64ED000ull
+#define DCORE2_MME_SBTE3_CTI0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE3_CTI0_SECTION 0x1000
+#define mmDCORE2_MME_SBTE3_CTI1_BASE 0x64EE000ull
+#define DCORE2_MME_SBTE3_CTI1_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE3_CTI1_SECTION 0x1000
+#define mmDCORE2_MME_SBTE3_BMON0_BASE 0x64EF000ull
+#define DCORE2_MME_SBTE3_BMON0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE3_BMON0_SECTION 0x1000
+#define mmDCORE2_MME_SBTE4_ROM_TBL_BASE 0x64F0000ull
+#define DCORE2_MME_SBTE4_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE4_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_MME_SBTE4_STM_BASE 0x64F1000ull
+#define DCORE2_MME_SBTE4_STM_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE4_STM_SECTION 0x1000
+#define mmDCORE2_MME_SBTE4_CTI_BASE 0x64F2000ull
+#define DCORE2_MME_SBTE4_CTI_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE4_CTI_SECTION 0x1000
+#define mmDCORE2_MME_SBTE4_ETF_BASE 0x64F3000ull
+#define DCORE2_MME_SBTE4_ETF_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE4_ETF_SECTION 0x1000
+#define mmDCORE2_MME_SBTE4_SPMU_BASE 0x64F4000ull
+#define DCORE2_MME_SBTE4_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE4_SPMU_SECTION 0x1000
+#define mmDCORE2_MME_SBTE4_CTI0_BASE 0x64F5000ull
+#define DCORE2_MME_SBTE4_CTI0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE4_CTI0_SECTION 0x1000
+#define mmDCORE2_MME_SBTE4_CTI1_BASE 0x64F6000ull
+#define DCORE2_MME_SBTE4_CTI1_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE4_CTI1_SECTION 0x1000
+#define mmDCORE2_MME_SBTE4_BMON0_BASE 0x64F7000ull
+#define DCORE2_MME_SBTE4_BMON0_MAX_OFFSET 0x1000
+#define DCORE2_MME_SBTE4_BMON0_SECTION 0x9000
+#define mmDCORE2_MME_ACC_CS_ROM_TBL_BASE 0x6500000ull
+#define DCORE2_MME_ACC_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_MME_ACC_STM_BASE 0x6501000ull
+#define DCORE2_MME_ACC_STM_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_STM_SECTION 0x1000
+#define mmDCORE2_MME_ACC_CTI_BASE 0x6502000ull
+#define DCORE2_MME_ACC_CTI_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_CTI_SECTION 0x1000
+#define mmDCORE2_MME_ACC_ETF_BASE 0x6503000ull
+#define DCORE2_MME_ACC_ETF_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_ETF_SECTION 0x1000
+#define mmDCORE2_MME_ACC_SPMU_BASE 0x6504000ull
+#define DCORE2_MME_ACC_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_SPMU_SECTION 0x1000
+#define mmDCORE2_MME_ACC_CTI0_BASE 0x6505000ull
+#define DCORE2_MME_ACC_CTI0_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_CTI0_SECTION 0x1000
+#define mmDCORE2_MME_ACC_CTI1_BASE 0x6506000ull
+#define DCORE2_MME_ACC_CTI1_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_CTI1_SECTION 0x1000
+#define mmDCORE2_MME_ACC_BMON0_BASE 0x6507000ull
+#define DCORE2_MME_ACC_BMON0_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_BMON0_SECTION 0x1000
+#define mmDCORE2_MME_ACC_BMON1_BASE 0x6508000ull
+#define DCORE2_MME_ACC_BMON1_MAX_OFFSET 0x1000
+#define DCORE2_MME_ACC_BMON1_SECTION 0x8000
+#define mmDCORE2_SM_CS_DBG_ROM_TBL_BASE 0x6510000ull
+#define DCORE2_SM_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_SM_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_SM_STM_BASE 0x6511000ull
+#define DCORE2_SM_STM_MAX_OFFSET 0x1000
+#define DCORE2_SM_STM_SECTION 0x1000
+#define mmDCORE2_SM_CTI_BASE 0x6512000ull
+#define DCORE2_SM_CTI_MAX_OFFSET 0x1000
+#define DCORE2_SM_CTI_SECTION 0x1000
+#define mmDCORE2_SM_ETF_BASE 0x6513000ull
+#define DCORE2_SM_ETF_MAX_OFFSET 0x1000
+#define DCORE2_SM_ETF_SECTION 0x1000
+#define mmDCORE2_SM_SPMU_BASE 0x6514000ull
+#define DCORE2_SM_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_SM_SPMU_SECTION 0x1000
+#define mmDCORE2_SM_BMON_CTI_BASE 0x6515000ull
+#define DCORE2_SM_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE2_SM_BMON_CTI_SECTION 0x1000
+#define mmDCORE2_SM_USER_CTI_BASE 0x6516000ull
+#define DCORE2_SM_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE2_SM_USER_CTI_SECTION 0x1000
+#define mmDCORE2_SM_BMON_BASE 0x6517000ull
+#define DCORE2_SM_BMON_MAX_OFFSET 0x1000
+#define DCORE2_SM_BMON_SECTION 0x1000
+#define mmDCORE2_SM_BMON1_BASE 0x6518000ull
+#define DCORE2_SM_BMON1_MAX_OFFSET 0x1000
+#define DCORE2_SM_BMON1_SECTION 0x18000
+#define mmDCORE2_XFT_FUNNEL_BASE 0x6530000ull
+#define DCORE2_XFT_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_XFT_FUNNEL_SECTION 0x8000
+#define mmDCORE2_TFT0_FUNNEL_BASE 0x6538000ull
+#define DCORE2_TFT0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_TFT0_FUNNEL_SECTION 0x1000
+#define mmDCORE2_TFT1_FUNNEL_BASE 0x6539000ull
+#define DCORE2_TFT1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_TFT1_FUNNEL_SECTION 0x1000
+#define mmDCORE2_TFT2_FUNNEL_BASE 0x653A000ull
+#define DCORE2_TFT2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_TFT2_FUNNEL_SECTION 0x7000
+#define mmDCORE2_RTR0_FUNNEL_BASE 0x6541000ull
+#define DCORE2_RTR0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_RTR0_FUNNEL_SECTION 0x8000
+#define mmDCORE2_RTR1_FUNNEL_BASE 0x6549000ull
+#define DCORE2_RTR1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_RTR1_FUNNEL_SECTION 0x8000
+#define mmDCORE2_RTR2_FUNNEL_BASE 0x6551000ull
+#define DCORE2_RTR2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_RTR2_FUNNEL_SECTION 0x8000
+#define mmDCORE2_RTR3_FUNNEL_BASE 0x6559000ull
+#define DCORE2_RTR3_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_RTR3_FUNNEL_SECTION 0x8000
+#define mmDCORE2_RTR4_FUNNEL_BASE 0x6561000ull
+#define DCORE2_RTR4_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_RTR4_FUNNEL_SECTION 0x4000
+#define mmDCORE2_MIF0_FUNNEL_BASE 0x6565000ull
+#define DCORE2_MIF0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_MIF0_FUNNEL_SECTION 0x4000
+#define mmDCORE2_RTR5_FUNNEL_BASE 0x6569000ull
+#define DCORE2_RTR5_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_RTR5_FUNNEL_SECTION 0x4000
+#define mmDCORE2_MIF1_FUNNEL_BASE 0x656D000ull
+#define DCORE2_MIF1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_MIF1_FUNNEL_SECTION 0x4000
+#define mmDCORE2_RTR6_FUNNEL_BASE 0x6571000ull
+#define DCORE2_RTR6_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_RTR6_FUNNEL_SECTION 0x4000
+#define mmDCORE2_MIF2_FUNNEL_BASE 0x6575000ull
+#define DCORE2_MIF2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_MIF2_FUNNEL_SECTION 0x4000
+#define mmDCORE2_RTR7_FUNNEL_BASE 0x6579000ull
+#define DCORE2_RTR7_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_RTR7_FUNNEL_SECTION 0x4000
+#define mmDCORE2_MIF3_FUNNEL_BASE 0x657D000ull
+#define DCORE2_MIF3_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_MIF3_FUNNEL_SECTION 0x43000
+#define mmDCORE2_EDMA0_CS_ROM_TBL_BASE 0x65C0000ull
+#define DCORE2_EDMA0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_EDMA0_CS_STM_BASE 0x65C1000ull
+#define DCORE2_EDMA0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_CS_STM_SECTION 0x1000
+#define mmDCORE2_EDMA0_CS_CTI_BASE 0x65C2000ull
+#define DCORE2_EDMA0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_CS_CTI_SECTION 0x1000
+#define mmDCORE2_EDMA0_CS_ETF_BASE 0x65C3000ull
+#define DCORE2_EDMA0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_CS_ETF_SECTION 0x1000
+#define mmDCORE2_EDMA0_CS_SPMU_BASE 0x65C4000ull
+#define DCORE2_EDMA0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_CS_SPMU_SECTION 0x1000
+#define mmDCORE2_EDMA0_BMON_CTI_BASE 0x65C5000ull
+#define DCORE2_EDMA0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_BMON_CTI_SECTION 0x1000
+#define mmDCORE2_EDMA0_USER_CTI_BASE 0x65C6000ull
+#define DCORE2_EDMA0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_USER_CTI_SECTION 0x1000
+#define mmDCORE2_EDMA0_BMON_0_BASE 0x65C7000ull
+#define DCORE2_EDMA0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_BMON_0_SECTION 0x1000
+#define mmDCORE2_EDMA0_BMON_1_BASE 0x65C8000ull
+#define DCORE2_EDMA0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE2_EDMA0_BMON_1_SECTION 0x1000
+#define mmDCORE2_EDMA0_QM_ARC_RTT_BASE 0x65C9000ull
+#define DCORE2_EDMA0_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE2_EDMA0_QM_ARC_RTT_SECTION 0x7000
+#define mmDCORE2_EDMA1_CS_ROM_TBL_BASE 0x65D0000ull
+#define DCORE2_EDMA1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_EDMA1_CS_STM_BASE 0x65D1000ull
+#define DCORE2_EDMA1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_CS_STM_SECTION 0x1000
+#define mmDCORE2_EDMA1_CS_CTI_BASE 0x65D2000ull
+#define DCORE2_EDMA1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_CS_CTI_SECTION 0x1000
+#define mmDCORE2_EDMA1_CS_ETF_BASE 0x65D3000ull
+#define DCORE2_EDMA1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_CS_ETF_SECTION 0x1000
+#define mmDCORE2_EDMA1_CS_SPMU_BASE 0x65D4000ull
+#define DCORE2_EDMA1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_CS_SPMU_SECTION 0x1000
+#define mmDCORE2_EDMA1_BMON_CTI_BASE 0x65D5000ull
+#define DCORE2_EDMA1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_BMON_CTI_SECTION 0x1000
+#define mmDCORE2_EDMA1_USER_CTI_BASE 0x65D6000ull
+#define DCORE2_EDMA1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_USER_CTI_SECTION 0x1000
+#define mmDCORE2_EDMA1_BMON_0_BASE 0x65D7000ull
+#define DCORE2_EDMA1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_BMON_0_SECTION 0x1000
+#define mmDCORE2_EDMA1_BMON_1_BASE 0x65D8000ull
+#define DCORE2_EDMA1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE2_EDMA1_BMON_1_SECTION 0x1000
+#define mmDCORE2_EDMA1_QM_ARC_RTT_BASE 0x65D9000ull
+#define DCORE2_EDMA1_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE2_EDMA1_QM_ARC_RTT_SECTION 0x7000
+#define mmDCORE2_VDEC0_CS_ROM_TBL_BASE 0x65E0000ull
+#define DCORE2_VDEC0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_VDEC0_CS_STM_BASE 0x65E1000ull
+#define DCORE2_VDEC0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_CS_STM_SECTION 0x1000
+#define mmDCORE2_VDEC0_CS_CTI_BASE 0x65E2000ull
+#define DCORE2_VDEC0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_CS_CTI_SECTION 0x1000
+#define mmDCORE2_VDEC0_CS_ETF_BASE 0x65E3000ull
+#define DCORE2_VDEC0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_CS_ETF_SECTION 0x1000
+#define mmDCORE2_VDEC0_CS_SPMU_BASE 0x65E4000ull
+#define DCORE2_VDEC0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_CS_SPMU_SECTION 0x1000
+#define mmDCORE2_VDEC0_BMON_CTI_BASE 0x65E5000ull
+#define DCORE2_VDEC0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_BMON_CTI_SECTION 0x1000
+#define mmDCORE2_VDEC0_USER_CTI_BASE 0x65E6000ull
+#define DCORE2_VDEC0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_USER_CTI_SECTION 0x1000
+#define mmDCORE2_VDEC0_BMON_0_BASE 0x65E7000ull
+#define DCORE2_VDEC0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_BMON_0_SECTION 0x1000
+#define mmDCORE2_VDEC0_BMON_1_BASE 0x65E8000ull
+#define DCORE2_VDEC0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_BMON_1_SECTION 0x1000
+#define mmDCORE2_VDEC0_BMON_2_BASE 0x65E9000ull
+#define DCORE2_VDEC0_BMON_2_MAX_OFFSET 0x1000
+#define DCORE2_VDEC0_BMON_2_SECTION 0x7000
+#define mmDCORE2_VDEC1_CS_ROM_TBL_BASE 0x65F0000ull
+#define DCORE2_VDEC1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE2_VDEC1_CS_STM_BASE 0x65F1000ull
+#define DCORE2_VDEC1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_CS_STM_SECTION 0x1000
+#define mmDCORE2_VDEC1_CS_CTI_BASE 0x65F2000ull
+#define DCORE2_VDEC1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_CS_CTI_SECTION 0x1000
+#define mmDCORE2_VDEC1_CS_ETF_BASE 0x65F3000ull
+#define DCORE2_VDEC1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_CS_ETF_SECTION 0x1000
+#define mmDCORE2_VDEC1_CS_SPMU_BASE 0x65F4000ull
+#define DCORE2_VDEC1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_CS_SPMU_SECTION 0x1000
+#define mmDCORE2_VDEC1_BMON_CTI_BASE 0x65F5000ull
+#define DCORE2_VDEC1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_BMON_CTI_SECTION 0x1000
+#define mmDCORE2_VDEC1_USER_CTI_BASE 0x65F6000ull
+#define DCORE2_VDEC1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_USER_CTI_SECTION 0x1000
+#define mmDCORE2_VDEC1_BMON_0_BASE 0x65F7000ull
+#define DCORE2_VDEC1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_BMON_0_SECTION 0x1000
+#define mmDCORE2_VDEC1_BMON_1_BASE 0x65F8000ull
+#define DCORE2_VDEC1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_BMON_1_SECTION 0x1000
+#define mmDCORE2_VDEC1_BMON_2_BASE 0x65F9000ull
+#define DCORE2_VDEC1_BMON_2_MAX_OFFSET 0x1000
+#define DCORE2_VDEC1_BMON_2_SECTION 0x7000
+#define mmDCORE3_ROM_TABLE_L_BASE 0x6600000ull
+#define DCORE3_ROM_TABLE_L_MAX_OFFSET 0x1000
+#define DCORE3_ROM_TABLE_L_SECTION 0x80000
+#define mmDCORE3_HMMU0_CS_ROM_TBL_BASE 0x6680000ull
+#define DCORE3_HMMU0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_HMMU0_CS_STM_BASE 0x6681000ull
+#define DCORE3_HMMU0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_CS_STM_SECTION 0x1000
+#define mmDCORE3_HMMU0_CS_CTI_BASE 0x6682000ull
+#define DCORE3_HMMU0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_CS_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU0_CS_ETF_BASE 0x6683000ull
+#define DCORE3_HMMU0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_CS_ETF_SECTION 0x1000
+#define mmDCORE3_HMMU0_CS_SPMU_BASE 0x6684000ull
+#define DCORE3_HMMU0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_CS_SPMU_SECTION 0x1000
+#define mmDCORE3_HMMU0_BMON_CTI_BASE 0x6685000ull
+#define DCORE3_HMMU0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_BMON_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU0_USER_CTI_BASE 0x6686000ull
+#define DCORE3_HMMU0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_USER_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU0_BMON_0_BASE 0x6687000ull
+#define DCORE3_HMMU0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_BMON_0_SECTION 0x1000
+#define mmDCORE3_HMMU0_BMON_1_BASE 0x6688000ull
+#define DCORE3_HMMU0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_BMON_1_SECTION 0x1000
+#define mmDCORE3_HMMU0_BMON_3_BASE 0x6689000ull
+#define DCORE3_HMMU0_BMON_3_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_BMON_3_SECTION 0x1000
+#define mmDCORE3_HMMU0_BMON_2_BASE 0x668A000ull
+#define DCORE3_HMMU0_BMON_2_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_BMON_2_SECTION 0x1000
+#define mmDCORE3_HMMU0_BMON_4_BASE 0x668B000ull
+#define DCORE3_HMMU0_BMON_4_MAX_OFFSET 0x1000
+#define DCORE3_HMMU0_BMON_4_SECTION 0x5000
+#define mmDCORE3_HMMU1_CS_ROM_TBL_BASE 0x6690000ull
+#define DCORE3_HMMU1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_HMMU1_CS_STM_BASE 0x6691000ull
+#define DCORE3_HMMU1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_CS_STM_SECTION 0x1000
+#define mmDCORE3_HMMU1_CS_CTI_BASE 0x6692000ull
+#define DCORE3_HMMU1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_CS_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU1_CS_ETF_BASE 0x6693000ull
+#define DCORE3_HMMU1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_CS_ETF_SECTION 0x1000
+#define mmDCORE3_HMMU1_CS_SPMU_BASE 0x6694000ull
+#define DCORE3_HMMU1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_CS_SPMU_SECTION 0x1000
+#define mmDCORE3_HMMU1_BMON_CTI_BASE 0x6695000ull
+#define DCORE3_HMMU1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_BMON_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU1_USER_CTI_BASE 0x6696000ull
+#define DCORE3_HMMU1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_USER_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU1_BMON_0_BASE 0x6697000ull
+#define DCORE3_HMMU1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_BMON_0_SECTION 0x1000
+#define mmDCORE3_HMMU1_BMON_1_BASE 0x6698000ull
+#define DCORE3_HMMU1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_BMON_1_SECTION 0x1000
+#define mmDCORE3_HMMU1_BMON_3_BASE 0x6699000ull
+#define DCORE3_HMMU1_BMON_3_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_BMON_3_SECTION 0x1000
+#define mmDCORE3_HMMU1_BMON_2_BASE 0x669A000ull
+#define DCORE3_HMMU1_BMON_2_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_BMON_2_SECTION 0x1000
+#define mmDCORE3_HMMU1_BMON_4_BASE 0x669B000ull
+#define DCORE3_HMMU1_BMON_4_MAX_OFFSET 0x1000
+#define DCORE3_HMMU1_BMON_4_SECTION 0x5000
+#define mmDCORE3_HMMU2_CS_ROM_TBL_BASE 0x66A0000ull
+#define DCORE3_HMMU2_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_HMMU2_CS_STM_BASE 0x66A1000ull
+#define DCORE3_HMMU2_CS_STM_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_CS_STM_SECTION 0x1000
+#define mmDCORE3_HMMU2_CS_CTI_BASE 0x66A2000ull
+#define DCORE3_HMMU2_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_CS_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU2_CS_ETF_BASE 0x66A3000ull
+#define DCORE3_HMMU2_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_CS_ETF_SECTION 0x1000
+#define mmDCORE3_HMMU2_CS_SPMU_BASE 0x66A4000ull
+#define DCORE3_HMMU2_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_CS_SPMU_SECTION 0x1000
+#define mmDCORE3_HMMU2_BMON_CTI_BASE 0x66A5000ull
+#define DCORE3_HMMU2_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_BMON_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU2_USER_CTI_BASE 0x66A6000ull
+#define DCORE3_HMMU2_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_USER_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU2_BMON_0_BASE 0x66A7000ull
+#define DCORE3_HMMU2_BMON_0_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_BMON_0_SECTION 0x1000
+#define mmDCORE3_HMMU2_BMON_1_BASE 0x66A8000ull
+#define DCORE3_HMMU2_BMON_1_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_BMON_1_SECTION 0x1000
+#define mmDCORE3_HMMU2_BMON_3_BASE 0x66A9000ull
+#define DCORE3_HMMU2_BMON_3_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_BMON_3_SECTION 0x1000
+#define mmDCORE3_HMMU2_BMON_2_BASE 0x66AA000ull
+#define DCORE3_HMMU2_BMON_2_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_BMON_2_SECTION 0x1000
+#define mmDCORE3_HMMU2_BMON_4_BASE 0x66AB000ull
+#define DCORE3_HMMU2_BMON_4_MAX_OFFSET 0x1000
+#define DCORE3_HMMU2_BMON_4_SECTION 0x5000
+#define mmDCORE3_HMMU3_CS_ROM_TBL_BASE 0x66B0000ull
+#define DCORE3_HMMU3_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_HMMU3_CS_STM_BASE 0x66B1000ull
+#define DCORE3_HMMU3_CS_STM_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_CS_STM_SECTION 0x1000
+#define mmDCORE3_HMMU3_CS_CTI_BASE 0x66B2000ull
+#define DCORE3_HMMU3_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_CS_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU3_CS_ETF_BASE 0x66B3000ull
+#define DCORE3_HMMU3_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_CS_ETF_SECTION 0x1000
+#define mmDCORE3_HMMU3_CS_SPMU_BASE 0x66B4000ull
+#define DCORE3_HMMU3_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_CS_SPMU_SECTION 0x1000
+#define mmDCORE3_HMMU3_BMON_CTI_BASE 0x66B5000ull
+#define DCORE3_HMMU3_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_BMON_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU3_USER_CTI_BASE 0x66B6000ull
+#define DCORE3_HMMU3_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_USER_CTI_SECTION 0x1000
+#define mmDCORE3_HMMU3_BMON_0_BASE 0x66B7000ull
+#define DCORE3_HMMU3_BMON_0_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_BMON_0_SECTION 0x1000
+#define mmDCORE3_HMMU3_BMON_1_BASE 0x66B8000ull
+#define DCORE3_HMMU3_BMON_1_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_BMON_1_SECTION 0x1000
+#define mmDCORE3_HMMU3_BMON_3_BASE 0x66B9000ull
+#define DCORE3_HMMU3_BMON_3_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_BMON_3_SECTION 0x1000
+#define mmDCORE3_HMMU3_BMON_2_BASE 0x66BA000ull
+#define DCORE3_HMMU3_BMON_2_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_BMON_2_SECTION 0x1000
+#define mmDCORE3_HMMU3_BMON_4_BASE 0x66BB000ull
+#define DCORE3_HMMU3_BMON_4_MAX_OFFSET 0x1000
+#define DCORE3_HMMU3_BMON_4_SECTION 0x5000
+#define mmDCORE3_MME_CTRL_ROM_TABLE_BASE 0x66C0000ull
+#define DCORE3_MME_CTRL_ROM_TABLE_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_ROM_TABLE_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_STM_BASE 0x66C1000ull
+#define DCORE3_MME_CTRL_STM_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_STM_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_CTI_BASE 0x66C2000ull
+#define DCORE3_MME_CTRL_CTI_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_CTI_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_ETF_BASE 0x66C3000ull
+#define DCORE3_MME_CTRL_ETF_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_ETF_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_SPMU_BASE 0x66C4000ull
+#define DCORE3_MME_CTRL_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_SPMU_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_CTI0_BASE 0x66C5000ull
+#define DCORE3_MME_CTRL_CTI0_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_CTI0_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_CTI1_BASE 0x66C6000ull
+#define DCORE3_MME_CTRL_CTI1_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_CTI1_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_BMON0_BASE 0x66C7000ull
+#define DCORE3_MME_CTRL_BMON0_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_BMON0_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_BMON1_BASE 0x66C8000ull
+#define DCORE3_MME_CTRL_BMON1_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_BMON1_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_BMON2_BASE 0x66C9000ull
+#define DCORE3_MME_CTRL_BMON2_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_BMON2_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_BMON3_BASE 0x66CA000ull
+#define DCORE3_MME_CTRL_BMON3_MAX_OFFSET 0x1000
+#define DCORE3_MME_CTRL_BMON3_SECTION 0x1000
+#define mmDCORE3_MME_CTRL_ARC_RTT_BASE 0x66CB000ull
+#define DCORE3_MME_CTRL_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE3_MME_CTRL_ARC_RTT_SECTION 0x5000
+#define mmDCORE3_MME_SBTE0_ROM_TBL_BASE 0x66D0000ull
+#define DCORE3_MME_SBTE0_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE0_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_MME_SBTE0_STM_BASE 0x66D1000ull
+#define DCORE3_MME_SBTE0_STM_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE0_STM_SECTION 0x1000
+#define mmDCORE3_MME_SBTE0_CTI_BASE 0x66D2000ull
+#define DCORE3_MME_SBTE0_CTI_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE0_CTI_SECTION 0x1000
+#define mmDCORE3_MME_SBTE0_ETF_BASE 0x66D3000ull
+#define DCORE3_MME_SBTE0_ETF_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE0_ETF_SECTION 0x1000
+#define mmDCORE3_MME_SBTE0_SPMU_BASE 0x66D4000ull
+#define DCORE3_MME_SBTE0_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE0_SPMU_SECTION 0x1000
+#define mmDCORE3_MME_SBTE0_CTI0_BASE 0x66D5000ull
+#define DCORE3_MME_SBTE0_CTI0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE0_CTI0_SECTION 0x1000
+#define mmDCORE3_MME_SBTE0_CTI1_BASE 0x66D6000ull
+#define DCORE3_MME_SBTE0_CTI1_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE0_CTI1_SECTION 0x1000
+#define mmDCORE3_MME_SBTE0_BMON0_BASE 0x66D7000ull
+#define DCORE3_MME_SBTE0_BMON0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE0_BMON0_SECTION 0x1000
+#define mmDCORE3_MME_SBTE1_ROM_TBL_BASE 0x66D8000ull
+#define DCORE3_MME_SBTE1_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE1_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_MME_SBTE1_STM_BASE 0x66D9000ull
+#define DCORE3_MME_SBTE1_STM_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE1_STM_SECTION 0x1000
+#define mmDCORE3_MME_SBTE1_CTI_BASE 0x66DA000ull
+#define DCORE3_MME_SBTE1_CTI_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE1_CTI_SECTION 0x1000
+#define mmDCORE3_MME_SBTE1_ETF_BASE 0x66DB000ull
+#define DCORE3_MME_SBTE1_ETF_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE1_ETF_SECTION 0x1000
+#define mmDCORE3_MME_SBTE1_SPMU_BASE 0x66DC000ull
+#define DCORE3_MME_SBTE1_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE1_SPMU_SECTION 0x1000
+#define mmDCORE3_MME_SBTE1_CTI0_BASE 0x66DD000ull
+#define DCORE3_MME_SBTE1_CTI0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE1_CTI0_SECTION 0x1000
+#define mmDCORE3_MME_SBTE1_CTI1_BASE 0x66DE000ull
+#define DCORE3_MME_SBTE1_CTI1_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE1_CTI1_SECTION 0x1000
+#define mmDCORE3_MME_SBTE1_BMON0_BASE 0x66DF000ull
+#define DCORE3_MME_SBTE1_BMON0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE1_BMON0_SECTION 0x1000
+#define mmDCORE3_MME_SBTE2_ROM_TBL_BASE 0x66E0000ull
+#define DCORE3_MME_SBTE2_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE2_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_MME_SBTE2_STM_BASE 0x66E1000ull
+#define DCORE3_MME_SBTE2_STM_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE2_STM_SECTION 0x1000
+#define mmDCORE3_MME_SBTE2_CTI_BASE 0x66E2000ull
+#define DCORE3_MME_SBTE2_CTI_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE2_CTI_SECTION 0x1000
+#define mmDCORE3_MME_SBTE2_ETF_BASE 0x66E3000ull
+#define DCORE3_MME_SBTE2_ETF_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE2_ETF_SECTION 0x1000
+#define mmDCORE3_MME_SBTE2_SPMU_BASE 0x66E4000ull
+#define DCORE3_MME_SBTE2_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE2_SPMU_SECTION 0x1000
+#define mmDCORE3_MME_SBTE2_CTI0_BASE 0x66E5000ull
+#define DCORE3_MME_SBTE2_CTI0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE2_CTI0_SECTION 0x1000
+#define mmDCORE3_MME_SBTE2_CTI1_BASE 0x66E6000ull
+#define DCORE3_MME_SBTE2_CTI1_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE2_CTI1_SECTION 0x1000
+#define mmDCORE3_MME_SBTE2_BMON0_BASE 0x66E7000ull
+#define DCORE3_MME_SBTE2_BMON0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE2_BMON0_SECTION 0x1000
+#define mmDCORE3_MME_SBTE3_ROM_TBL_BASE 0x66E8000ull
+#define DCORE3_MME_SBTE3_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE3_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_MME_SBTE3_STM_BASE 0x66E9000ull
+#define DCORE3_MME_SBTE3_STM_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE3_STM_SECTION 0x1000
+#define mmDCORE3_MME_SBTE3_CTI_BASE 0x66EA000ull
+#define DCORE3_MME_SBTE3_CTI_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE3_CTI_SECTION 0x1000
+#define mmDCORE3_MME_SBTE3_ETF_BASE 0x66EB000ull
+#define DCORE3_MME_SBTE3_ETF_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE3_ETF_SECTION 0x1000
+#define mmDCORE3_MME_SBTE3_SPMU_BASE 0x66EC000ull
+#define DCORE3_MME_SBTE3_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE3_SPMU_SECTION 0x1000
+#define mmDCORE3_MME_SBTE3_CTI0_BASE 0x66ED000ull
+#define DCORE3_MME_SBTE3_CTI0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE3_CTI0_SECTION 0x1000
+#define mmDCORE3_MME_SBTE3_CTI1_BASE 0x66EE000ull
+#define DCORE3_MME_SBTE3_CTI1_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE3_CTI1_SECTION 0x1000
+#define mmDCORE3_MME_SBTE3_BMON0_BASE 0x66EF000ull
+#define DCORE3_MME_SBTE3_BMON0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE3_BMON0_SECTION 0x1000
+#define mmDCORE3_MME_SBTE4_ROM_TBL_BASE 0x66F0000ull
+#define DCORE3_MME_SBTE4_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE4_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_MME_SBTE4_STM_BASE 0x66F1000ull
+#define DCORE3_MME_SBTE4_STM_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE4_STM_SECTION 0x1000
+#define mmDCORE3_MME_SBTE4_CTI_BASE 0x66F2000ull
+#define DCORE3_MME_SBTE4_CTI_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE4_CTI_SECTION 0x1000
+#define mmDCORE3_MME_SBTE4_ETF_BASE 0x66F3000ull
+#define DCORE3_MME_SBTE4_ETF_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE4_ETF_SECTION 0x1000
+#define mmDCORE3_MME_SBTE4_SPMU_BASE 0x66F4000ull
+#define DCORE3_MME_SBTE4_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE4_SPMU_SECTION 0x1000
+#define mmDCORE3_MME_SBTE4_CTI0_BASE 0x66F5000ull
+#define DCORE3_MME_SBTE4_CTI0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE4_CTI0_SECTION 0x1000
+#define mmDCORE3_MME_SBTE4_CTI1_BASE 0x66F6000ull
+#define DCORE3_MME_SBTE4_CTI1_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE4_CTI1_SECTION 0x1000
+#define mmDCORE3_MME_SBTE4_BMON0_BASE 0x66F7000ull
+#define DCORE3_MME_SBTE4_BMON0_MAX_OFFSET 0x1000
+#define DCORE3_MME_SBTE4_BMON0_SECTION 0x9000
+#define mmDCORE3_MME_ACC_CS_ROM_TBL_BASE 0x6700000ull
+#define DCORE3_MME_ACC_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_MME_ACC_STM_BASE 0x6701000ull
+#define DCORE3_MME_ACC_STM_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_STM_SECTION 0x1000
+#define mmDCORE3_MME_ACC_CTI_BASE 0x6702000ull
+#define DCORE3_MME_ACC_CTI_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_CTI_SECTION 0x1000
+#define mmDCORE3_MME_ACC_ETF_BASE 0x6703000ull
+#define DCORE3_MME_ACC_ETF_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_ETF_SECTION 0x1000
+#define mmDCORE3_MME_ACC_SPMU_BASE 0x6704000ull
+#define DCORE3_MME_ACC_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_SPMU_SECTION 0x1000
+#define mmDCORE3_MME_ACC_CTI0_BASE 0x6705000ull
+#define DCORE3_MME_ACC_CTI0_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_CTI0_SECTION 0x1000
+#define mmDCORE3_MME_ACC_CTI1_BASE 0x6706000ull
+#define DCORE3_MME_ACC_CTI1_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_CTI1_SECTION 0x1000
+#define mmDCORE3_MME_ACC_BMON0_BASE 0x6707000ull
+#define DCORE3_MME_ACC_BMON0_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_BMON0_SECTION 0x1000
+#define mmDCORE3_MME_ACC_BMON1_BASE 0x6708000ull
+#define DCORE3_MME_ACC_BMON1_MAX_OFFSET 0x1000
+#define DCORE3_MME_ACC_BMON1_SECTION 0x8000
+#define mmDCORE3_SM_CS_DBG_ROM_TBL_BASE 0x6710000ull
+#define DCORE3_SM_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_SM_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_SM_STM_BASE 0x6711000ull
+#define DCORE3_SM_STM_MAX_OFFSET 0x1000
+#define DCORE3_SM_STM_SECTION 0x1000
+#define mmDCORE3_SM_CTI_BASE 0x6712000ull
+#define DCORE3_SM_CTI_MAX_OFFSET 0x1000
+#define DCORE3_SM_CTI_SECTION 0x1000
+#define mmDCORE3_SM_ETF_BASE 0x6713000ull
+#define DCORE3_SM_ETF_MAX_OFFSET 0x1000
+#define DCORE3_SM_ETF_SECTION 0x1000
+#define mmDCORE3_SM_SPMU_BASE 0x6714000ull
+#define DCORE3_SM_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_SM_SPMU_SECTION 0x1000
+#define mmDCORE3_SM_BMON_CTI_BASE 0x6715000ull
+#define DCORE3_SM_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE3_SM_BMON_CTI_SECTION 0x1000
+#define mmDCORE3_SM_USER_CTI_BASE 0x6716000ull
+#define DCORE3_SM_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE3_SM_USER_CTI_SECTION 0x1000
+#define mmDCORE3_SM_BMON_BASE 0x6717000ull
+#define DCORE3_SM_BMON_MAX_OFFSET 0x1000
+#define DCORE3_SM_BMON_SECTION 0x1000
+#define mmDCORE3_SM_BMON1_BASE 0x6718000ull
+#define DCORE3_SM_BMON1_MAX_OFFSET 0x1000
+#define DCORE3_SM_BMON1_SECTION 0x18000
+#define mmDCORE3_XFT_FUNNEL_BASE 0x6730000ull
+#define DCORE3_XFT_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_XFT_FUNNEL_SECTION 0x8000
+#define mmDCORE3_TFT0_FUNNEL_BASE 0x6738000ull
+#define DCORE3_TFT0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_TFT0_FUNNEL_SECTION 0x1000
+#define mmDCORE3_TFT1_FUNNEL_BASE 0x6739000ull
+#define DCORE3_TFT1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_TFT1_FUNNEL_SECTION 0x1000
+#define mmDCORE3_TFT2_FUNNEL_BASE 0x673A000ull
+#define DCORE3_TFT2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_TFT2_FUNNEL_SECTION 0x7000
+#define mmDCORE3_RTR0_FUNNEL_BASE 0x6741000ull
+#define DCORE3_RTR0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_RTR0_FUNNEL_SECTION 0x4000
+#define mmDCORE3_MIF0_FUNNEL_BASE 0x6745000ull
+#define DCORE3_MIF0_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_MIF0_FUNNEL_SECTION 0x4000
+#define mmDCORE3_RTR1_FUNNEL_BASE 0x6749000ull
+#define DCORE3_RTR1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_RTR1_FUNNEL_SECTION 0x4000
+#define mmDCORE3_MIF1_FUNNEL_BASE 0x674D000ull
+#define DCORE3_MIF1_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_MIF1_FUNNEL_SECTION 0x4000
+#define mmDCORE3_RTR2_FUNNEL_BASE 0x6751000ull
+#define DCORE3_RTR2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_RTR2_FUNNEL_SECTION 0x4000
+#define mmDCORE3_MIF2_FUNNEL_BASE 0x6755000ull
+#define DCORE3_MIF2_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_MIF2_FUNNEL_SECTION 0x4000
+#define mmDCORE3_RTR3_FUNNEL_BASE 0x6759000ull
+#define DCORE3_RTR3_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_RTR3_FUNNEL_SECTION 0x4000
+#define mmDCORE3_MIF3_FUNNEL_BASE 0x675D000ull
+#define DCORE3_MIF3_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_MIF3_FUNNEL_SECTION 0x4000
+#define mmDCORE3_RTR4_FUNNEL_BASE 0x6761000ull
+#define DCORE3_RTR4_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_RTR4_FUNNEL_SECTION 0x8000
+#define mmDCORE3_RTR5_FUNNEL_BASE 0x6769000ull
+#define DCORE3_RTR5_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_RTR5_FUNNEL_SECTION 0x8000
+#define mmDCORE3_RTR6_FUNNEL_BASE 0x6771000ull
+#define DCORE3_RTR6_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_RTR6_FUNNEL_SECTION 0x8000
+#define mmDCORE3_RTR7_FUNNEL_BASE 0x6779000ull
+#define DCORE3_RTR7_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_RTR7_FUNNEL_SECTION 0x47000
+#define mmDCORE3_EDMA0_CS_ROM_TBL_BASE 0x67C0000ull
+#define DCORE3_EDMA0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_EDMA0_CS_STM_BASE 0x67C1000ull
+#define DCORE3_EDMA0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_CS_STM_SECTION 0x1000
+#define mmDCORE3_EDMA0_CS_CTI_BASE 0x67C2000ull
+#define DCORE3_EDMA0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_CS_CTI_SECTION 0x1000
+#define mmDCORE3_EDMA0_CS_ETF_BASE 0x67C3000ull
+#define DCORE3_EDMA0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_CS_ETF_SECTION 0x1000
+#define mmDCORE3_EDMA0_CS_SPMU_BASE 0x67C4000ull
+#define DCORE3_EDMA0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_CS_SPMU_SECTION 0x1000
+#define mmDCORE3_EDMA0_BMON_CTI_BASE 0x67C5000ull
+#define DCORE3_EDMA0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_BMON_CTI_SECTION 0x1000
+#define mmDCORE3_EDMA0_USER_CTI_BASE 0x67C6000ull
+#define DCORE3_EDMA0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_USER_CTI_SECTION 0x1000
+#define mmDCORE3_EDMA0_BMON_0_BASE 0x67C7000ull
+#define DCORE3_EDMA0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_BMON_0_SECTION 0x1000
+#define mmDCORE3_EDMA0_BMON_1_BASE 0x67C8000ull
+#define DCORE3_EDMA0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE3_EDMA0_BMON_1_SECTION 0x1000
+#define mmDCORE3_EDMA0_QM_ARC_RTT_BASE 0x67C9000ull
+#define DCORE3_EDMA0_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE3_EDMA0_QM_ARC_RTT_SECTION 0x7000
+#define mmDCORE3_EDMA1_CS_ROM_TBL_BASE 0x67D0000ull
+#define DCORE3_EDMA1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_EDMA1_CS_STM_BASE 0x67D1000ull
+#define DCORE3_EDMA1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_CS_STM_SECTION 0x1000
+#define mmDCORE3_EDMA1_CS_CTI_BASE 0x67D2000ull
+#define DCORE3_EDMA1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_CS_CTI_SECTION 0x1000
+#define mmDCORE3_EDMA1_CS_ETF_BASE 0x67D3000ull
+#define DCORE3_EDMA1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_CS_ETF_SECTION 0x1000
+#define mmDCORE3_EDMA1_CS_SPMU_BASE 0x67D4000ull
+#define DCORE3_EDMA1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_CS_SPMU_SECTION 0x1000
+#define mmDCORE3_EDMA1_BMON_CTI_BASE 0x67D5000ull
+#define DCORE3_EDMA1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_BMON_CTI_SECTION 0x1000
+#define mmDCORE3_EDMA1_USER_CTI_BASE 0x67D6000ull
+#define DCORE3_EDMA1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_USER_CTI_SECTION 0x1000
+#define mmDCORE3_EDMA1_BMON_0_BASE 0x67D7000ull
+#define DCORE3_EDMA1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_BMON_0_SECTION 0x1000
+#define mmDCORE3_EDMA1_BMON_1_BASE 0x67D8000ull
+#define DCORE3_EDMA1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE3_EDMA1_BMON_1_SECTION 0x1000
+#define mmDCORE3_EDMA1_QM_ARC_RTT_BASE 0x67D9000ull
+#define DCORE3_EDMA1_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define DCORE3_EDMA1_QM_ARC_RTT_SECTION 0x7000
+#define mmDCORE3_VDEC0_CS_ROM_TBL_BASE 0x67E0000ull
+#define DCORE3_VDEC0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_VDEC0_CS_STM_BASE 0x67E1000ull
+#define DCORE3_VDEC0_CS_STM_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_CS_STM_SECTION 0x1000
+#define mmDCORE3_VDEC0_CS_CTI_BASE 0x67E2000ull
+#define DCORE3_VDEC0_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_CS_CTI_SECTION 0x1000
+#define mmDCORE3_VDEC0_CS_ETF_BASE 0x67E3000ull
+#define DCORE3_VDEC0_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_CS_ETF_SECTION 0x1000
+#define mmDCORE3_VDEC0_CS_SPMU_BASE 0x67E4000ull
+#define DCORE3_VDEC0_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_CS_SPMU_SECTION 0x1000
+#define mmDCORE3_VDEC0_BMON_CTI_BASE 0x67E5000ull
+#define DCORE3_VDEC0_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_BMON_CTI_SECTION 0x1000
+#define mmDCORE3_VDEC0_USER_CTI_BASE 0x67E6000ull
+#define DCORE3_VDEC0_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_USER_CTI_SECTION 0x1000
+#define mmDCORE3_VDEC0_BMON_0_BASE 0x67E7000ull
+#define DCORE3_VDEC0_BMON_0_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_BMON_0_SECTION 0x1000
+#define mmDCORE3_VDEC0_BMON_1_BASE 0x67E8000ull
+#define DCORE3_VDEC0_BMON_1_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_BMON_1_SECTION 0x1000
+#define mmDCORE3_VDEC0_BMON_2_BASE 0x67E9000ull
+#define DCORE3_VDEC0_BMON_2_MAX_OFFSET 0x1000
+#define DCORE3_VDEC0_BMON_2_SECTION 0x7000
+#define mmDCORE3_VDEC1_CS_ROM_TBL_BASE 0x67F0000ull
+#define DCORE3_VDEC1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_CS_ROM_TBL_SECTION 0x1000
+#define mmDCORE3_VDEC1_CS_STM_BASE 0x67F1000ull
+#define DCORE3_VDEC1_CS_STM_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_CS_STM_SECTION 0x1000
+#define mmDCORE3_VDEC1_CS_CTI_BASE 0x67F2000ull
+#define DCORE3_VDEC1_CS_CTI_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_CS_CTI_SECTION 0x1000
+#define mmDCORE3_VDEC1_CS_ETF_BASE 0x67F3000ull
+#define DCORE3_VDEC1_CS_ETF_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_CS_ETF_SECTION 0x1000
+#define mmDCORE3_VDEC1_CS_SPMU_BASE 0x67F4000ull
+#define DCORE3_VDEC1_CS_SPMU_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_CS_SPMU_SECTION 0x1000
+#define mmDCORE3_VDEC1_BMON_CTI_BASE 0x67F5000ull
+#define DCORE3_VDEC1_BMON_CTI_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_BMON_CTI_SECTION 0x1000
+#define mmDCORE3_VDEC1_USER_CTI_BASE 0x67F6000ull
+#define DCORE3_VDEC1_USER_CTI_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_USER_CTI_SECTION 0x1000
+#define mmDCORE3_VDEC1_BMON_0_BASE 0x67F7000ull
+#define DCORE3_VDEC1_BMON_0_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_BMON_0_SECTION 0x1000
+#define mmDCORE3_VDEC1_BMON_1_BASE 0x67F8000ull
+#define DCORE3_VDEC1_BMON_1_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_BMON_1_SECTION 0x1000
+#define mmDCORE3_VDEC1_BMON_2_BASE 0x67F9000ull
+#define DCORE3_VDEC1_BMON_2_MAX_OFFSET 0x1000
+#define DCORE3_VDEC1_BMON_2_SECTION 0x7000
+#define mmCA53_BASE 0x6800000ull
+#define CA53_MAX_OFFSET 0x141000
+#define CA53_SECTION 0x400000
+#define mmPCI_ROM_TABLE_BASE 0x6C00000ull
+#define PCI_ROM_TABLE_MAX_OFFSET 0x1000
+#define PCI_ROM_TABLE_SECTION 0x1000
+#define mmPCIE_STM_BASE 0x6C01000ull
+#define PCIE_STM_MAX_OFFSET 0x1000
+#define PCIE_STM_SECTION 0x1000
+#define mmPCIE_ETF_BASE 0x6C02000ull
+#define PCIE_ETF_MAX_OFFSET 0x1000
+#define PCIE_ETF_SECTION 0x1000
+#define mmPCIE_CTI_0_BASE 0x6C03000ull
+#define PCIE_CTI_0_MAX_OFFSET 0x1000
+#define PCIE_CTI_0_SECTION 0x1000
+#define mmPCIE_SPMU_BASE 0x6C04000ull
+#define PCIE_SPMU_MAX_OFFSET 0x1000
+#define PCIE_SPMU_SECTION 0x1000
+#define mmPCIE_CTI_1_BASE 0x6C05000ull
+#define PCIE_CTI_1_MAX_OFFSET 0x1000
+#define PCIE_CTI_1_SECTION 0x2000
+#define mmPCIE_BMON_MSTR_WR_BASE 0x6C07000ull
+#define PCIE_BMON_MSTR_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_WR_SECTION 0x1000
+#define mmPCIE_BMON_MSTR_RD_BASE 0x6C08000ull
+#define PCIE_BMON_MSTR_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_RD_SECTION 0x1000
+#define mmPCIE_BMON_SLV_WR_BASE 0x6C09000ull
+#define PCIE_BMON_SLV_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_WR_SECTION 0x1000
+#define mmPCIE_BMON_SLV_RD_BASE 0x6C0A000ull
+#define PCIE_BMON_SLV_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_RD_SECTION 0x36000
+#define mmTOP_ROM_TABLE_BASE 0x6C40000ull
+#define TOP_ROM_TABLE_MAX_OFFSET 0x1000
+#define TOP_ROM_TABLE_SECTION 0x1000
+#define mmPSOC_CTI_BASE 0x6C41000ull
+#define PSOC_CTI_MAX_OFFSET 0x1000
+#define PSOC_CTI_SECTION 0x1000
+#define mmPSOC_STM_BASE 0x6C42000ull
+#define PSOC_STM_MAX_OFFSET 0x1000
+#define PSOC_STM_SECTION 0x1000
+#define mmPSOC_FUNNEL_BASE 0x6C43000ull
+#define PSOC_FUNNEL_MAX_OFFSET 0x1000
+#define PSOC_FUNNEL_SECTION 0x1000
+#define mmPSOC_ETR_BASE 0x6C44000ull
+#define PSOC_ETR_MAX_OFFSET 0x1000
+#define PSOC_ETR_SECTION 0x1000
+#define mmPSOC_ETF_BASE 0x6C45000ull
+#define PSOC_ETF_MAX_OFFSET 0x1000
+#define PSOC_ETF_SECTION 0x1000
+#define mmPSOC_TS_CTI_BASE 0x6C46000ull
+#define PSOC_TS_CTI_MAX_OFFSET 0x1000
+#define PSOC_TS_CTI_SECTION 0xA000
+#define mmPSOC_ARC0_CS_DBG_ROM_TBL_BASE 0x6C50000ull
+#define PSOC_ARC0_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmPSOC_ARC0_CS_STM_BASE 0x6C51000ull
+#define PSOC_ARC0_CS_STM_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CS_STM_SECTION 0x1000
+#define mmPSOC_ARC0_CS_CTI_BASE 0x6C52000ull
+#define PSOC_ARC0_CS_CTI_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CS_CTI_SECTION 0x1000
+#define mmPSOC_ARC0_CS_ETF_BASE 0x6C53000ull
+#define PSOC_ARC0_CS_ETF_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CS_ETF_SECTION 0x1000
+#define mmPSOC_ARC0_CS_SPMU_BASE 0x6C54000ull
+#define PSOC_ARC0_CS_SPMU_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CS_SPMU_SECTION 0x1000
+#define mmPSOC_ARC0_BMON_CTI_BASE 0x6C55000ull
+#define PSOC_ARC0_BMON_CTI_MAX_OFFSET 0x1000
+#define PSOC_ARC0_BMON_CTI_SECTION 0x1000
+#define mmPSOC_ARC0_USER_CTI_BASE 0x6C56000ull
+#define PSOC_ARC0_USER_CTI_MAX_OFFSET 0x1000
+#define PSOC_ARC0_USER_CTI_SECTION 0x1000
+#define mmPSOC_ARC0_BMON_0_BASE 0x6C57000ull
+#define PSOC_ARC0_BMON_0_MAX_OFFSET 0x1000
+#define PSOC_ARC0_BMON_0_SECTION 0x1000
+#define mmPSOC_ARC0_BMON_1_BASE 0x6C58000ull
+#define PSOC_ARC0_BMON_1_MAX_OFFSET 0x1000
+#define PSOC_ARC0_BMON_1_SECTION 0x6000
+#define mmPSOC_ARC0_RTT_BASE 0x6C5E000ull
+#define PSOC_ARC0_RTT_MAX_OFFSET 0x1400
+#define PSOC_ARC0_RTT_SECTION 0x1000
+#define mmPSOC_ARC0_FUNNEL_BASE 0x6C5F000ull
+#define PSOC_ARC0_FUNNEL_MAX_OFFSET 0x1000
+#define PSOC_ARC0_FUNNEL_SECTION 0x1000
+#define mmPSOC_ARC1_CS_DBG_ROM_TBL_BASE 0x6C60000ull
+#define PSOC_ARC1_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmPSOC_ARC1_CS_STM_BASE 0x6C61000ull
+#define PSOC_ARC1_CS_STM_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CS_STM_SECTION 0x1000
+#define mmPSOC_ARC1_CS_CTI_BASE 0x6C62000ull
+#define PSOC_ARC1_CS_CTI_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CS_CTI_SECTION 0x1000
+#define mmPSOC_ARC1_CS_ETF_BASE 0x6C63000ull
+#define PSOC_ARC1_CS_ETF_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CS_ETF_SECTION 0x1000
+#define mmPSOC_ARC1_CS_SPMU_BASE 0x6C64000ull
+#define PSOC_ARC1_CS_SPMU_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CS_SPMU_SECTION 0x1000
+#define mmPSOC_ARC1_BMON_CTI_BASE 0x6C65000ull
+#define PSOC_ARC1_BMON_CTI_MAX_OFFSET 0x1000
+#define PSOC_ARC1_BMON_CTI_SECTION 0x1000
+#define mmPSOC_ARC1_USER_CTI_BASE 0x6C66000ull
+#define PSOC_ARC1_USER_CTI_MAX_OFFSET 0x1000
+#define PSOC_ARC1_USER_CTI_SECTION 0x1000
+#define mmPSOC_ARC1_BMON_0_BASE 0x6C67000ull
+#define PSOC_ARC1_BMON_0_MAX_OFFSET 0x1000
+#define PSOC_ARC1_BMON_0_SECTION 0x1000
+#define mmPSOC_ARC1_BMON_1_BASE 0x6C68000ull
+#define PSOC_ARC1_BMON_1_MAX_OFFSET 0x1000
+#define PSOC_ARC1_BMON_1_SECTION 0x6000
+#define mmPSOC_ARC1_RTT_BASE 0x6C6E000ull
+#define PSOC_ARC1_RTT_MAX_OFFSET 0x1400
+#define PSOC_ARC1_RTT_SECTION 0x1000
+#define mmPSOC_ARC1_FUNNEL_BASE 0x6C6F000ull
+#define PSOC_ARC1_FUNNEL_MAX_OFFSET 0x1000
+#define PSOC_ARC1_FUNNEL_SECTION 0x1000
+#define mmPSOC_ARC0_CTI0_BASE 0x6C70000ull
+#define PSOC_ARC0_CTI0_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CTI0_SECTION 0x1000
+#define mmPSOC_ARC0_CTI1_BASE 0x6C71000ull
+#define PSOC_ARC0_CTI1_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CTI1_SECTION 0x1000
+#define mmPSOC_ARC0_CTI2_BASE 0x6C72000ull
+#define PSOC_ARC0_CTI2_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CTI2_SECTION 0x1000
+#define mmPSOC_ARC0_CTI3_BASE 0x6C73000ull
+#define PSOC_ARC0_CTI3_MAX_OFFSET 0x1000
+#define PSOC_ARC0_CTI3_SECTION 0x1000
+#define mmPSOC_ARC1_CTI0_BASE 0x6C74000ull
+#define PSOC_ARC1_CTI0_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CTI0_SECTION 0x1000
+#define mmPSOC_ARC1_CTI1_BASE 0x6C75000ull
+#define PSOC_ARC1_CTI1_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CTI1_SECTION 0x1000
+#define mmPSOC_ARC1_CTI2_BASE 0x6C76000ull
+#define PSOC_ARC1_CTI2_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CTI2_SECTION 0x1000
+#define mmPSOC_ARC1_CTI3_BASE 0x6C77000ull
+#define PSOC_ARC1_CTI3_MAX_OFFSET 0x1000
+#define PSOC_ARC1_CTI3_SECTION 0x9000
+#define mmPDMA0_CS_ROM_TBL_BASE 0x6C80000ull
+#define PDMA0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define PDMA0_CS_ROM_TBL_SECTION 0x1000
+#define mmPDMA0_CS_STM_BASE 0x6C81000ull
+#define PDMA0_CS_STM_MAX_OFFSET 0x1000
+#define PDMA0_CS_STM_SECTION 0x1000
+#define mmPDMA0_CS_CTI_BASE 0x6C82000ull
+#define PDMA0_CS_CTI_MAX_OFFSET 0x1000
+#define PDMA0_CS_CTI_SECTION 0x1000
+#define mmPDMA0_CS_ETF_BASE 0x6C83000ull
+#define PDMA0_CS_ETF_MAX_OFFSET 0x1000
+#define PDMA0_CS_ETF_SECTION 0x1000
+#define mmPDMA0_CS_SPMU_BASE 0x6C84000ull
+#define PDMA0_CS_SPMU_MAX_OFFSET 0x1000
+#define PDMA0_CS_SPMU_SECTION 0x1000
+#define mmPDMA0_BMON_CTI_BASE 0x6C85000ull
+#define PDMA0_BMON_CTI_MAX_OFFSET 0x1000
+#define PDMA0_BMON_CTI_SECTION 0x1000
+#define mmPDMA0_USER_CTI_BASE 0x6C86000ull
+#define PDMA0_USER_CTI_MAX_OFFSET 0x1000
+#define PDMA0_USER_CTI_SECTION 0x1000
+#define mmPDMA0_BMON_0_BASE 0x6C87000ull
+#define PDMA0_BMON_0_MAX_OFFSET 0x1000
+#define PDMA0_BMON_0_SECTION 0x1000
+#define mmPDMA0_BMON_1_BASE 0x6C88000ull
+#define PDMA0_BMON_1_MAX_OFFSET 0x1000
+#define PDMA0_BMON_1_SECTION 0x1000
+#define mmPDMA0_QM_ARC_RTT_BASE 0x6C89000ull
+#define PDMA0_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define PDMA0_QM_ARC_RTT_SECTION 0x7000
+#define mmPDMA1_CS_ROM_TBL_BASE 0x6C90000ull
+#define PDMA1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define PDMA1_CS_ROM_TBL_SECTION 0x1000
+#define mmPDMA1_CS_STM_BASE 0x6C91000ull
+#define PDMA1_CS_STM_MAX_OFFSET 0x1000
+#define PDMA1_CS_STM_SECTION 0x1000
+#define mmPDMA1_CS_CTI_BASE 0x6C92000ull
+#define PDMA1_CS_CTI_MAX_OFFSET 0x1000
+#define PDMA1_CS_CTI_SECTION 0x1000
+#define mmPDMA1_CS_ETF_BASE 0x6C93000ull
+#define PDMA1_CS_ETF_MAX_OFFSET 0x1000
+#define PDMA1_CS_ETF_SECTION 0x1000
+#define mmPDMA1_CS_SPMU_BASE 0x6C94000ull
+#define PDMA1_CS_SPMU_MAX_OFFSET 0x1000
+#define PDMA1_CS_SPMU_SECTION 0x1000
+#define mmPDMA1_BMON_CTI_BASE 0x6C95000ull
+#define PDMA1_BMON_CTI_MAX_OFFSET 0x1000
+#define PDMA1_BMON_CTI_SECTION 0x1000
+#define mmPDMA1_USER_CTI_BASE 0x6C96000ull
+#define PDMA1_USER_CTI_MAX_OFFSET 0x1000
+#define PDMA1_USER_CTI_SECTION 0x1000
+#define mmPDMA1_BMON_0_BASE 0x6C97000ull
+#define PDMA1_BMON_0_MAX_OFFSET 0x1000
+#define PDMA1_BMON_0_SECTION 0x1000
+#define mmPDMA1_BMON_1_BASE 0x6C98000ull
+#define PDMA1_BMON_1_MAX_OFFSET 0x1000
+#define PDMA1_BMON_1_SECTION 0x1000
+#define mmPDMA1_QM_ARC_RTT_BASE 0x6C99000ull
+#define PDMA1_QM_ARC_RTT_MAX_OFFSET 0x1400
+#define PDMA1_QM_ARC_RTT_SECTION 0x7000
+#define mmXDMA_FUNNEL_BASE 0x6CA0000ull
+#define XDMA_FUNNEL_MAX_OFFSET 0x1000
+#define XDMA_FUNNEL_SECTION 0x21000
+#define mmCPU_ETF_0_BASE 0x6CC1000ull
+#define CPU_ETF_0_MAX_OFFSET 0x1000
+#define CPU_ETF_0_SECTION 0x1000
+#define mmCPU_ETF_1_BASE 0x6CC2000ull
+#define CPU_ETF_1_MAX_OFFSET 0x1000
+#define CPU_ETF_1_SECTION 0x2000
+#define mmCPU_CTI_BASE 0x6CC4000ull
+#define CPU_CTI_MAX_OFFSET 0x1000
+#define CPU_CTI_SECTION 0x1000
+#define mmCPU_FUNNEL_BASE 0x6CC5000ull
+#define CPU_FUNNEL_MAX_OFFSET 0x1000
+#define CPU_FUNNEL_SECTION 0x1000
+#define mmCPU_STM_BASE 0x6CC6000ull
+#define CPU_STM_MAX_OFFSET 0x1000
+#define CPU_STM_SECTION 0x1000
+#define mmCPU_CTI_TRACE_BASE 0x6CC7000ull
+#define CPU_CTI_TRACE_MAX_OFFSET 0x1000
+#define CPU_CTI_TRACE_SECTION 0x1000
+#define mmCPU_ETF_TRACE_BASE 0x6CC8000ull
+#define CPU_ETF_TRACE_MAX_OFFSET 0x1000
+#define CPU_ETF_TRACE_SECTION 0x1000
+#define mmCPU_WR_BMON_BASE 0x6CC9000ull
+#define CPU_WR_BMON_MAX_OFFSET 0x1000
+#define CPU_WR_BMON_SECTION 0x1000
+#define mmCPU_RD_BMON_BASE 0x6CCA000ull
+#define CPU_RD_BMON_MAX_OFFSET 0x1000
+#define CPU_RD_BMON_SECTION 0x36000
+#define mmPMMU_CS_DBG_ROM_TBL_BASE 0x6D00000ull
+#define PMMU_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define PMMU_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmPMMU_CS_STM_BASE 0x6D01000ull
+#define PMMU_CS_STM_MAX_OFFSET 0x1000
+#define PMMU_CS_STM_SECTION 0x1000
+#define mmPMMU_CS_CTI_BASE 0x6D02000ull
+#define PMMU_CS_CTI_MAX_OFFSET 0x1000
+#define PMMU_CS_CTI_SECTION 0x1000
+#define mmPMMU_CS_ETF_BASE 0x6D03000ull
+#define PMMU_CS_ETF_MAX_OFFSET 0x1000
+#define PMMU_CS_ETF_SECTION 0x1000
+#define mmPMMU_CS_SPMU_BASE 0x6D04000ull
+#define PMMU_CS_SPMU_MAX_OFFSET 0x1000
+#define PMMU_CS_SPMU_SECTION 0x1000
+#define mmPMMU_BMON_CTI_BASE 0x6D05000ull
+#define PMMU_BMON_CTI_MAX_OFFSET 0x1000
+#define PMMU_BMON_CTI_SECTION 0x1000
+#define mmPMMU_USER_CTI_BASE 0x6D06000ull
+#define PMMU_USER_CTI_MAX_OFFSET 0x1000
+#define PMMU_USER_CTI_SECTION 0x1000
+#define mmPMMU_BMON_0_BASE 0x6D07000ull
+#define PMMU_BMON_0_MAX_OFFSET 0x1000
+#define PMMU_BMON_0_SECTION 0x1000
+#define mmPMMU_BMON_1_BASE 0x6D08000ull
+#define PMMU_BMON_1_MAX_OFFSET 0x1000
+#define PMMU_BMON_1_SECTION 0x1000
+#define mmPMMU_BMON_2_BASE 0x6D09000ull
+#define PMMU_BMON_2_MAX_OFFSET 0x1000
+#define PMMU_BMON_2_SECTION 0x1000
+#define mmPMMU_BMON_3_BASE 0x6D0A000ull
+#define PMMU_BMON_3_MAX_OFFSET 0x1000
+#define PMMU_BMON_3_SECTION 0x1000
+#define mmPMMU_BMON_4_BASE 0x6D0B000ull
+#define PMMU_BMON_4_MAX_OFFSET 0x1000
+#define PMMU_BMON_4_SECTION 0x1000
+#define mmPMMU_FUNNEL_BASE 0x6D0C000ull
+#define PMMU_FUNNEL_MAX_OFFSET 0x1000
+#define PMMU_FUNNEL_SECTION 0x1000
+#define mmPMMU_FUNNEL_DEC_BASE 0x6D0D000ull
+#define PMMU_FUNNEL_DEC_MAX_OFFSET 0x1000
+#define PMMU_FUNNEL_DEC_SECTION 0x33000
+#define mmDCORE0_XBAR_MID_FUNNEL_BASE 0x6D40000ull
+#define DCORE0_XBAR_MID_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_XBAR_MID_FUNNEL_SECTION 0x8000
+#define mmDCORE0_XBAR_EDGE_FUNNEL_BASE 0x6D48000ull
+#define DCORE0_XBAR_EDGE_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE0_XBAR_EDGE_FUNNEL_SECTION 0x8000
+#define mmDCORE1_XBAR_MID_FUNNEL_BASE 0x6D50000ull
+#define DCORE1_XBAR_MID_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_XBAR_MID_FUNNEL_SECTION 0x8000
+#define mmDCORE1_XBAR_EDGE_FUNNEL_BASE 0x6D58000ull
+#define DCORE1_XBAR_EDGE_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE1_XBAR_EDGE_FUNNEL_SECTION 0x8000
+#define mmDCORE2_XBAR_MID_FUNNEL_BASE 0x6D60000ull
+#define DCORE2_XBAR_MID_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_XBAR_MID_FUNNEL_SECTION 0x8000
+#define mmDCORE2_XBAR_EDGE_FUNNEL_BASE 0x6D68000ull
+#define DCORE2_XBAR_EDGE_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE2_XBAR_EDGE_FUNNEL_SECTION 0x8000
+#define mmDCORE3_XBAR_MID_FUNNEL_BASE 0x6D70000ull
+#define DCORE3_XBAR_MID_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_XBAR_MID_FUNNEL_SECTION 0x8000
+#define mmDCORE3_XBAR_EDGE_FUNNEL_BASE 0x6D78000ull
+#define DCORE3_XBAR_EDGE_FUNNEL_MAX_OFFSET 0x1000
+#define DCORE3_XBAR_EDGE_FUNNEL_SECTION 0x88000
+#define mmROT0_CS_ROM_TBL_BASE 0x6E00000ull
+#define ROT0_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define ROT0_CS_ROM_TBL_SECTION 0x1000
+#define mmROT0_CS_STM_BASE 0x6E01000ull
+#define ROT0_CS_STM_MAX_OFFSET 0x1000
+#define ROT0_CS_STM_SECTION 0x1000
+#define mmROT0_CS_CTI_BASE 0x6E02000ull
+#define ROT0_CS_CTI_MAX_OFFSET 0x1000
+#define ROT0_CS_CTI_SECTION 0x1000
+#define mmROT0_CS_ETF_BASE 0x6E03000ull
+#define ROT0_CS_ETF_MAX_OFFSET 0x1000
+#define ROT0_CS_ETF_SECTION 0x1000
+#define mmROT0_CS_SPMU_BASE 0x6E04000ull
+#define ROT0_CS_SPMU_MAX_OFFSET 0x1000
+#define ROT0_CS_SPMU_SECTION 0x1000
+#define mmROT0_BMON_CTI_BASE 0x6E05000ull
+#define ROT0_BMON_CTI_MAX_OFFSET 0x1000
+#define ROT0_BMON_CTI_SECTION 0x1000
+#define mmROT0_USER_CTI_BASE 0x6E06000ull
+#define ROT0_USER_CTI_MAX_OFFSET 0x1000
+#define ROT0_USER_CTI_SECTION 0x1000
+#define mmROT0_BMON_0_BASE 0x6E07000ull
+#define ROT0_BMON_0_MAX_OFFSET 0x1000
+#define ROT0_BMON_0_SECTION 0x1000
+#define mmROT0_BMON_1_BASE 0x6E08000ull
+#define ROT0_BMON_1_MAX_OFFSET 0x1000
+#define ROT0_BMON_1_SECTION 0x1000
+#define mmROT0_BMON_2_BASE 0x6E09000ull
+#define ROT0_BMON_2_MAX_OFFSET 0x1000
+#define ROT0_BMON_2_SECTION 0x1000
+#define mmROT0_BMON_3_BASE 0x6E0A000ull
+#define ROT0_BMON_3_MAX_OFFSET 0x1000
+#define ROT0_BMON_3_SECTION 0x1000
+#define mmROT0_ARC_RTT_BASE 0x6E0B000ull
+#define ROT0_ARC_RTT_MAX_OFFSET 0x1400
+#define ROT0_ARC_RTT_SECTION 0x5000
+#define mmROT1_CS_ROM_TBL_BASE 0x6E10000ull
+#define ROT1_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define ROT1_CS_ROM_TBL_SECTION 0x1000
+#define mmROT1_CS_STM_BASE 0x6E11000ull
+#define ROT1_CS_STM_MAX_OFFSET 0x1000
+#define ROT1_CS_STM_SECTION 0x1000
+#define mmROT1_CS_CTI_BASE 0x6E12000ull
+#define ROT1_CS_CTI_MAX_OFFSET 0x1000
+#define ROT1_CS_CTI_SECTION 0x1000
+#define mmROT1_CS_ETF_BASE 0x6E13000ull
+#define ROT1_CS_ETF_MAX_OFFSET 0x1000
+#define ROT1_CS_ETF_SECTION 0x1000
+#define mmROT1_CS_SPMU_BASE 0x6E14000ull
+#define ROT1_CS_SPMU_MAX_OFFSET 0x1000
+#define ROT1_CS_SPMU_SECTION 0x1000
+#define mmROT1_BMON_CTI_BASE 0x6E15000ull
+#define ROT1_BMON_CTI_MAX_OFFSET 0x1000
+#define ROT1_BMON_CTI_SECTION 0x1000
+#define mmROT1_USER_CTI_BASE 0x6E16000ull
+#define ROT1_USER_CTI_MAX_OFFSET 0x1000
+#define ROT1_USER_CTI_SECTION 0x1000
+#define mmROT1_BMON_0_BASE 0x6E17000ull
+#define ROT1_BMON_0_MAX_OFFSET 0x1000
+#define ROT1_BMON_0_SECTION 0x1000
+#define mmROT1_BMON_1_BASE 0x6E18000ull
+#define ROT1_BMON_1_MAX_OFFSET 0x1000
+#define ROT1_BMON_1_SECTION 0x1000
+#define mmROT1_BMON_2_BASE 0x6E19000ull
+#define ROT1_BMON_2_MAX_OFFSET 0x1000
+#define ROT1_BMON_2_SECTION 0x1000
+#define mmROT1_BMON_3_BASE 0x6E1A000ull
+#define ROT1_BMON_3_MAX_OFFSET 0x1000
+#define ROT1_BMON_3_SECTION 0x1000
+#define mmROT1_ARC_RTT_BASE 0x6E1B000ull
+#define ROT1_ARC_RTT_MAX_OFFSET 0x1400
+#define ROT1_ARC_RTT_SECTION 0x65000
+#define mmARC_FARM_ARC0_RTT_BASE 0x6E80000ull
+#define ARC_FARM_ARC0_RTT_MAX_OFFSET 0x1400
+#define ARC_FARM_ARC0_RTT_SECTION 0x1000
+#define mmARC_FARM_ARC1_RTT_BASE 0x6E81000ull
+#define ARC_FARM_ARC1_RTT_MAX_OFFSET 0x1400
+#define ARC_FARM_ARC1_RTT_SECTION 0x1000
+#define mmARC_FARM_ARC2_RTT_BASE 0x6E82000ull
+#define ARC_FARM_ARC2_RTT_MAX_OFFSET 0x1400
+#define ARC_FARM_ARC2_RTT_SECTION 0x1000
+#define mmARC_FARM_ARC3_RTT_BASE 0x6E83000ull
+#define ARC_FARM_ARC3_RTT_MAX_OFFSET 0x1400
+#define ARC_FARM_ARC3_RTT_SECTION 0xD000
+#define mmARC_FARM_CS_ROM_TBL_BASE 0x6E90000ull
+#define ARC_FARM_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define ARC_FARM_CS_ROM_TBL_SECTION 0x1000
+#define mmARC_FARM_CS_STM_BASE 0x6E91000ull
+#define ARC_FARM_CS_STM_MAX_OFFSET 0x1000
+#define ARC_FARM_CS_STM_SECTION 0x1000
+#define mmARC_FARM_CS_CTI_BASE 0x6E92000ull
+#define ARC_FARM_CS_CTI_MAX_OFFSET 0x1000
+#define ARC_FARM_CS_CTI_SECTION 0x1000
+#define mmARC_FARM_CS_ETF_BASE 0x6E93000ull
+#define ARC_FARM_CS_ETF_MAX_OFFSET 0x1000
+#define ARC_FARM_CS_ETF_SECTION 0x1000
+#define mmARC_FARM_CS_SPMU_BASE 0x6E94000ull
+#define ARC_FARM_CS_SPMU_MAX_OFFSET 0x1000
+#define ARC_FARM_CS_SPMU_SECTION 0x1000
+#define mmARC_FARM_BMON_CTI_BASE 0x6E95000ull
+#define ARC_FARM_BMON_CTI_MAX_OFFSET 0x1000
+#define ARC_FARM_BMON_CTI_SECTION 0x1000
+#define mmARC_FARM_USER_CTI_BASE 0x6E96000ull
+#define ARC_FARM_USER_CTI_MAX_OFFSET 0x1000
+#define ARC_FARM_USER_CTI_SECTION 0x1000
+#define mmARC_FARM_BMON_0_BASE 0x6E97000ull
+#define ARC_FARM_BMON_0_MAX_OFFSET 0x1000
+#define ARC_FARM_BMON_0_SECTION 0x1000
+#define mmARC_FARM_BMON_1_BASE 0x6E98000ull
+#define ARC_FARM_BMON_1_MAX_OFFSET 0x1000
+#define ARC_FARM_BMON_1_SECTION 0x1000
+#define mmARC_FARM_BMON_2_BASE 0x6E99000ull
+#define ARC_FARM_BMON_2_MAX_OFFSET 0x1000
+#define ARC_FARM_BMON_2_SECTION 0x1000
+#define mmARC_FARM_BMON_3_BASE 0x6E9A000ull
+#define ARC_FARM_BMON_3_MAX_OFFSET 0x1000
+#define ARC_FARM_BMON_3_SECTION 0x1000
+#define mmARC_FARM_CTI_BASE 0x6E9B000ull
+#define ARC_FARM_CTI_MAX_OFFSET 0x1000
+#define ARC_FARM_CTI_SECTION 0x1000
+#define mmARC_FARM_FUNNEL_BASE 0x6E9C000ull
+#define ARC_FARM_FUNNEL_MAX_OFFSET 0x1000
+#define ARC_FARM_FUNNEL_SECTION 0x4000
+#define mmKDMA_CS_ROM_TBL_BASE 0x6EA0000ull
+#define KDMA_CS_ROM_TBL_MAX_OFFSET 0x1000
+#define KDMA_CS_ROM_TBL_SECTION 0x1000
+#define mmKDMA_CS_STM_BASE 0x6EA1000ull
+#define KDMA_CS_STM_MAX_OFFSET 0x1000
+#define KDMA_CS_STM_SECTION 0x1000
+#define mmKDMA_CS_CTI_BASE 0x6EA2000ull
+#define KDMA_CS_CTI_MAX_OFFSET 0x1000
+#define KDMA_CS_CTI_SECTION 0x1000
+#define mmKDMA_CS_ETF_BASE 0x6EA3000ull
+#define KDMA_CS_ETF_MAX_OFFSET 0x1000
+#define KDMA_CS_ETF_SECTION 0x1000
+#define mmKDMA_CS_SPMU_BASE 0x6EA4000ull
+#define KDMA_CS_SPMU_MAX_OFFSET 0x1000
+#define KDMA_CS_SPMU_SECTION 0x1000
+#define mmKDMA_BMON_CTI_BASE 0x6EA5000ull
+#define KDMA_BMON_CTI_MAX_OFFSET 0x1000
+#define KDMA_BMON_CTI_SECTION 0x1000
+#define mmKDMA_USER_CTI_BASE 0x6EA6000ull
+#define KDMA_USER_CTI_MAX_OFFSET 0x1000
+#define KDMA_USER_CTI_SECTION 0x1000
+#define mmKDMA_BMON_0_BASE 0x6EA7000ull
+#define KDMA_BMON_0_MAX_OFFSET 0x1000
+#define KDMA_BMON_0_SECTION 0x1000
+#define mmKDMA_BMON_1_BASE 0x6EA8000ull
+#define KDMA_BMON_1_MAX_OFFSET 0x1000
+#define KDMA_BMON_1_SECTION 0x1000
+#define mmKDMA_BMON_2_BASE 0x6EA9000ull
+#define KDMA_BMON_2_MAX_OFFSET 0x1000
+#define KDMA_BMON_2_SECTION 0x1000
+#define mmKDMA_BMON_3_BASE 0x6EAA000ull
+#define KDMA_BMON_3_MAX_OFFSET 0x1000
+#define KDMA_BMON_3_SECTION 0x56000
+#define mmPCIE_VDEC0_CS_DBG_ROM_TBL_BASE 0x6F00000ull
+#define PCIE_VDEC0_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmPCIE_VDEC0_CS_STM_BASE 0x6F01000ull
+#define PCIE_VDEC0_CS_STM_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_CS_STM_SECTION 0x1000
+#define mmPCIE_VDEC0_CS_CTI_BASE 0x6F02000ull
+#define PCIE_VDEC0_CS_CTI_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_CS_CTI_SECTION 0x1000
+#define mmPCIE_VDEC0_CS_ETF_BASE 0x6F03000ull
+#define PCIE_VDEC0_CS_ETF_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_CS_ETF_SECTION 0x1000
+#define mmPCIE_VDEC0_CS_SPMU_BASE 0x6F04000ull
+#define PCIE_VDEC0_CS_SPMU_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_CS_SPMU_SECTION 0x1000
+#define mmPCIE_VDEC0_BMON_CTI_BASE 0x6F05000ull
+#define PCIE_VDEC0_BMON_CTI_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_BMON_CTI_SECTION 0x1000
+#define mmPCIE_VDEC0_USER_CTI_BASE 0x6F06000ull
+#define PCIE_VDEC0_USER_CTI_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_USER_CTI_SECTION 0x1000
+#define mmPCIE_VDEC0_BMON_0_BASE 0x6F07000ull
+#define PCIE_VDEC0_BMON_0_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_BMON_0_SECTION 0x1000
+#define mmPCIE_VDEC0_BMON_1_BASE 0x6F08000ull
+#define PCIE_VDEC0_BMON_1_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_BMON_1_SECTION 0x1000
+#define mmPCIE_VDEC0_BMON_2_BASE 0x6F09000ull
+#define PCIE_VDEC0_BMON_2_MAX_OFFSET 0x1000
+#define PCIE_VDEC0_BMON_2_SECTION 0x7000
+#define mmPCIE_VDEC1_CS_DBG_ROM_TBL_BASE 0x6F10000ull
+#define PCIE_VDEC1_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmPCIE_VDEC1_CS_STM_BASE 0x6F11000ull
+#define PCIE_VDEC1_CS_STM_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_CS_STM_SECTION 0x1000
+#define mmPCIE_VDEC1_CS_CTI_BASE 0x6F12000ull
+#define PCIE_VDEC1_CS_CTI_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_CS_CTI_SECTION 0x1000
+#define mmPCIE_VDEC1_CS_ETF_BASE 0x6F13000ull
+#define PCIE_VDEC1_CS_ETF_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_CS_ETF_SECTION 0x1000
+#define mmPCIE_VDEC1_CS_SPMU_BASE 0x6F14000ull
+#define PCIE_VDEC1_CS_SPMU_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_CS_SPMU_SECTION 0x1000
+#define mmPCIE_VDEC1_BMON_CTI_BASE 0x6F15000ull
+#define PCIE_VDEC1_BMON_CTI_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_BMON_CTI_SECTION 0x1000
+#define mmPCIE_VDEC1_USER_CTI_BASE 0x6F16000ull
+#define PCIE_VDEC1_USER_CTI_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_USER_CTI_SECTION 0x1000
+#define mmPCIE_VDEC1_BMON_0_BASE 0x6F17000ull
+#define PCIE_VDEC1_BMON_0_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_BMON_0_SECTION 0x1000
+#define mmPCIE_VDEC1_BMON_1_BASE 0x6F18000ull
+#define PCIE_VDEC1_BMON_1_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_BMON_1_SECTION 0x1000
+#define mmPCIE_VDEC1_BMON_2_BASE 0x6F19000ull
+#define PCIE_VDEC1_BMON_2_MAX_OFFSET 0x1000
+#define PCIE_VDEC1_BMON_2_SECTION 0xF7000
+#define mmHBM0_MC0_CS_DBG_ROM_TBL_BASE 0x7010000ull
+#define HBM0_MC0_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM0_MC0_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM0_MC0_CS_STM_BASE 0x7011000ull
+#define HBM0_MC0_CS_STM_MAX_OFFSET 0x1000
+#define HBM0_MC0_CS_STM_SECTION 0x1000
+#define mmHBM0_MC0_CS_CTI_BASE 0x7012000ull
+#define HBM0_MC0_CS_CTI_MAX_OFFSET 0x1000
+#define HBM0_MC0_CS_CTI_SECTION 0x1000
+#define mmHBM0_MC0_CS_ETF_BASE 0x7013000ull
+#define HBM0_MC0_CS_ETF_MAX_OFFSET 0x1000
+#define HBM0_MC0_CS_ETF_SECTION 0x1000
+#define mmHBM0_MC0_CS_SPMU_BASE 0x7014000ull
+#define HBM0_MC0_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM0_MC0_CS_SPMU_SECTION 0x1000
+#define mmHBM0_MC0_BMON_CTI_BASE 0x7015000ull
+#define HBM0_MC0_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM0_MC0_BMON_CTI_SECTION 0x1000
+#define mmHBM0_MC0_USER_CTI_BASE 0x7016000ull
+#define HBM0_MC0_USER_CTI_MAX_OFFSET 0x1000
+#define HBM0_MC0_USER_CTI_SECTION 0xA000
+#define mmHBM0_MC0_FUNNEL_BASE 0x7020000ull
+#define HBM0_MC0_FUNNEL_MAX_OFFSET 0x1000
+#define HBM0_MC0_FUNNEL_SECTION 0x30000
+#define mmHBM0_MC1_CS_DBG_ROM_TBL_BASE 0x7050000ull
+#define HBM0_MC1_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM0_MC1_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM0_MC1_CS_STM_BASE 0x7051000ull
+#define HBM0_MC1_CS_STM_MAX_OFFSET 0x1000
+#define HBM0_MC1_CS_STM_SECTION 0x1000
+#define mmHBM0_MC1_CS_CTI_BASE 0x7052000ull
+#define HBM0_MC1_CS_CTI_MAX_OFFSET 0x1000
+#define HBM0_MC1_CS_CTI_SECTION 0x1000
+#define mmHBM0_MC1_CS_ETF_BASE 0x7053000ull
+#define HBM0_MC1_CS_ETF_MAX_OFFSET 0x1000
+#define HBM0_MC1_CS_ETF_SECTION 0x1000
+#define mmHBM0_MC1_CS_SPMU_BASE 0x7054000ull
+#define HBM0_MC1_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM0_MC1_CS_SPMU_SECTION 0x1000
+#define mmHBM0_MC1_BMON_CTI_BASE 0x7055000ull
+#define HBM0_MC1_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM0_MC1_BMON_CTI_SECTION 0x1000
+#define mmHBM0_MC1_USER_CTI_BASE 0x7056000ull
+#define HBM0_MC1_USER_CTI_MAX_OFFSET 0x1000
+#define HBM0_MC1_USER_CTI_SECTION 0xA000
+#define mmHBM0_MC1_FUNNEL_BASE 0x7060000ull
+#define HBM0_MC1_FUNNEL_MAX_OFFSET 0x1000
+#define HBM0_MC1_FUNNEL_SECTION 0x30000
+#define mmHBM1_MC0_CS_DBG_ROM_TBL_BASE 0x7090000ull
+#define HBM1_MC0_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM1_MC0_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM1_MC0_CS_STM_BASE 0x7091000ull
+#define HBM1_MC0_CS_STM_MAX_OFFSET 0x1000
+#define HBM1_MC0_CS_STM_SECTION 0x1000
+#define mmHBM1_MC0_CS_CTI_BASE 0x7092000ull
+#define HBM1_MC0_CS_CTI_MAX_OFFSET 0x1000
+#define HBM1_MC0_CS_CTI_SECTION 0x1000
+#define mmHBM1_MC0_CS_ETF_BASE 0x7093000ull
+#define HBM1_MC0_CS_ETF_MAX_OFFSET 0x1000
+#define HBM1_MC0_CS_ETF_SECTION 0x1000
+#define mmHBM1_MC0_CS_SPMU_BASE 0x7094000ull
+#define HBM1_MC0_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM1_MC0_CS_SPMU_SECTION 0x1000
+#define mmHBM1_MC0_BMON_CTI_BASE 0x7095000ull
+#define HBM1_MC0_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM1_MC0_BMON_CTI_SECTION 0x1000
+#define mmHBM1_MC0_USER_CTI_BASE 0x7096000ull
+#define HBM1_MC0_USER_CTI_MAX_OFFSET 0x1000
+#define HBM1_MC0_USER_CTI_SECTION 0xA000
+#define mmHBM1_MC0_FUNNEL_BASE 0x70A0000ull
+#define HBM1_MC0_FUNNEL_MAX_OFFSET 0x1000
+#define HBM1_MC0_FUNNEL_SECTION 0x30000
+#define mmHBM1_MC1_CS_DBG_ROM_TBL_BASE 0x70D0000ull
+#define HBM1_MC1_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM1_MC1_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM1_MC1_CS_STM_BASE 0x70D1000ull
+#define HBM1_MC1_CS_STM_MAX_OFFSET 0x1000
+#define HBM1_MC1_CS_STM_SECTION 0x1000
+#define mmHBM1_MC1_CS_CTI_BASE 0x70D2000ull
+#define HBM1_MC1_CS_CTI_MAX_OFFSET 0x1000
+#define HBM1_MC1_CS_CTI_SECTION 0x1000
+#define mmHBM1_MC1_CS_ETF_BASE 0x70D3000ull
+#define HBM1_MC1_CS_ETF_MAX_OFFSET 0x1000
+#define HBM1_MC1_CS_ETF_SECTION 0x1000
+#define mmHBM1_MC1_CS_SPMU_BASE 0x70D4000ull
+#define HBM1_MC1_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM1_MC1_CS_SPMU_SECTION 0x1000
+#define mmHBM1_MC1_BMON_CTI_BASE 0x70D5000ull
+#define HBM1_MC1_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM1_MC1_BMON_CTI_SECTION 0x1000
+#define mmHBM1_MC1_USER_CTI_BASE 0x70D6000ull
+#define HBM1_MC1_USER_CTI_MAX_OFFSET 0x1000
+#define HBM1_MC1_USER_CTI_SECTION 0xA000
+#define mmHBM1_MC1_FUNNEL_BASE 0x70E0000ull
+#define HBM1_MC1_FUNNEL_MAX_OFFSET 0x1000
+#define HBM1_MC1_FUNNEL_SECTION 0x30000
+#define mmHBM2_MC0_CS_DBG_ROM_TBL_BASE 0x7110000ull
+#define HBM2_MC0_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM2_MC0_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM2_MC0_CS_STM_BASE 0x7111000ull
+#define HBM2_MC0_CS_STM_MAX_OFFSET 0x1000
+#define HBM2_MC0_CS_STM_SECTION 0x1000
+#define mmHBM2_MC0_CS_CTI_BASE 0x7112000ull
+#define HBM2_MC0_CS_CTI_MAX_OFFSET 0x1000
+#define HBM2_MC0_CS_CTI_SECTION 0x1000
+#define mmHBM2_MC0_CS_ETF_BASE 0x7113000ull
+#define HBM2_MC0_CS_ETF_MAX_OFFSET 0x1000
+#define HBM2_MC0_CS_ETF_SECTION 0x1000
+#define mmHBM2_MC0_CS_SPMU_BASE 0x7114000ull
+#define HBM2_MC0_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM2_MC0_CS_SPMU_SECTION 0x1000
+#define mmHBM2_MC0_BMON_CTI_BASE 0x7115000ull
+#define HBM2_MC0_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM2_MC0_BMON_CTI_SECTION 0x1000
+#define mmHBM2_MC0_USER_CTI_BASE 0x7116000ull
+#define HBM2_MC0_USER_CTI_MAX_OFFSET 0x1000
+#define HBM2_MC0_USER_CTI_SECTION 0xA000
+#define mmHBM2_MC0_FUNNEL_BASE 0x7120000ull
+#define HBM2_MC0_FUNNEL_MAX_OFFSET 0x1000
+#define HBM2_MC0_FUNNEL_SECTION 0x30000
+#define mmHBM2_MC1_CS_DBG_ROM_TBL_BASE 0x7150000ull
+#define HBM2_MC1_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM2_MC1_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM2_MC1_CS_STM_BASE 0x7151000ull
+#define HBM2_MC1_CS_STM_MAX_OFFSET 0x1000
+#define HBM2_MC1_CS_STM_SECTION 0x1000
+#define mmHBM2_MC1_CS_CTI_BASE 0x7152000ull
+#define HBM2_MC1_CS_CTI_MAX_OFFSET 0x1000
+#define HBM2_MC1_CS_CTI_SECTION 0x1000
+#define mmHBM2_MC1_CS_ETF_BASE 0x7153000ull
+#define HBM2_MC1_CS_ETF_MAX_OFFSET 0x1000
+#define HBM2_MC1_CS_ETF_SECTION 0x1000
+#define mmHBM2_MC1_CS_SPMU_BASE 0x7154000ull
+#define HBM2_MC1_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM2_MC1_CS_SPMU_SECTION 0x1000
+#define mmHBM2_MC1_BMON_CTI_BASE 0x7155000ull
+#define HBM2_MC1_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM2_MC1_BMON_CTI_SECTION 0x1000
+#define mmHBM2_MC1_USER_CTI_BASE 0x7156000ull
+#define HBM2_MC1_USER_CTI_MAX_OFFSET 0x1000
+#define HBM2_MC1_USER_CTI_SECTION 0xA000
+#define mmHBM2_MC1_FUNNEL_BASE 0x7160000ull
+#define HBM2_MC1_FUNNEL_MAX_OFFSET 0x1000
+#define HBM2_MC1_FUNNEL_SECTION 0x30000
+#define mmHBM3_MC0_CS_DBG_ROM_TBL_BASE 0x7190000ull
+#define HBM3_MC0_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM3_MC0_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM3_MC0_CS_STM_BASE 0x7191000ull
+#define HBM3_MC0_CS_STM_MAX_OFFSET 0x1000
+#define HBM3_MC0_CS_STM_SECTION 0x1000
+#define mmHBM3_MC0_CS_CTI_BASE 0x7192000ull
+#define HBM3_MC0_CS_CTI_MAX_OFFSET 0x1000
+#define HBM3_MC0_CS_CTI_SECTION 0x1000
+#define mmHBM3_MC0_CS_ETF_BASE 0x7193000ull
+#define HBM3_MC0_CS_ETF_MAX_OFFSET 0x1000
+#define HBM3_MC0_CS_ETF_SECTION 0x1000
+#define mmHBM3_MC0_CS_SPMU_BASE 0x7194000ull
+#define HBM3_MC0_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM3_MC0_CS_SPMU_SECTION 0x1000
+#define mmHBM3_MC0_BMON_CTI_BASE 0x7195000ull
+#define HBM3_MC0_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM3_MC0_BMON_CTI_SECTION 0x1000
+#define mmHBM3_MC0_USER_CTI_BASE 0x7196000ull
+#define HBM3_MC0_USER_CTI_MAX_OFFSET 0x1000
+#define HBM3_MC0_USER_CTI_SECTION 0xA000
+#define mmHBM3_MC0_FUNNEL_BASE 0x71A0000ull
+#define HBM3_MC0_FUNNEL_MAX_OFFSET 0x1000
+#define HBM3_MC0_FUNNEL_SECTION 0x30000
+#define mmHBM3_MC1_CS_DBG_ROM_TBL_BASE 0x71D0000ull
+#define HBM3_MC1_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM3_MC1_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM3_MC1_CS_STM_BASE 0x71D1000ull
+#define HBM3_MC1_CS_STM_MAX_OFFSET 0x1000
+#define HBM3_MC1_CS_STM_SECTION 0x1000
+#define mmHBM3_MC1_CS_CTI_BASE 0x71D2000ull
+#define HBM3_MC1_CS_CTI_MAX_OFFSET 0x1000
+#define HBM3_MC1_CS_CTI_SECTION 0x1000
+#define mmHBM3_MC1_CS_ETF_BASE 0x71D3000ull
+#define HBM3_MC1_CS_ETF_MAX_OFFSET 0x1000
+#define HBM3_MC1_CS_ETF_SECTION 0x1000
+#define mmHBM3_MC1_CS_SPMU_BASE 0x71D4000ull
+#define HBM3_MC1_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM3_MC1_CS_SPMU_SECTION 0x1000
+#define mmHBM3_MC1_BMON_CTI_BASE 0x71D5000ull
+#define HBM3_MC1_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM3_MC1_BMON_CTI_SECTION 0x1000
+#define mmHBM3_MC1_USER_CTI_BASE 0x71D6000ull
+#define HBM3_MC1_USER_CTI_MAX_OFFSET 0x1000
+#define HBM3_MC1_USER_CTI_SECTION 0xA000
+#define mmHBM3_MC1_FUNNEL_BASE 0x71E0000ull
+#define HBM3_MC1_FUNNEL_MAX_OFFSET 0x1000
+#define HBM3_MC1_FUNNEL_SECTION 0x30000
+#define mmHBM4_MC0_CS_DBG_ROM_TBL_BASE 0x7210000ull
+#define HBM4_MC0_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM4_MC0_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM4_MC0_CS_STM_BASE 0x7211000ull
+#define HBM4_MC0_CS_STM_MAX_OFFSET 0x1000
+#define HBM4_MC0_CS_STM_SECTION 0x1000
+#define mmHBM4_MC0_CS_CTI_BASE 0x7212000ull
+#define HBM4_MC0_CS_CTI_MAX_OFFSET 0x1000
+#define HBM4_MC0_CS_CTI_SECTION 0x1000
+#define mmHBM4_MC0_CS_ETF_BASE 0x7213000ull
+#define HBM4_MC0_CS_ETF_MAX_OFFSET 0x1000
+#define HBM4_MC0_CS_ETF_SECTION 0x1000
+#define mmHBM4_MC0_CS_SPMU_BASE 0x7214000ull
+#define HBM4_MC0_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM4_MC0_CS_SPMU_SECTION 0x1000
+#define mmHBM4_MC0_BMON_CTI_BASE 0x7215000ull
+#define HBM4_MC0_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM4_MC0_BMON_CTI_SECTION 0x1000
+#define mmHBM4_MC0_USER_CTI_BASE 0x7216000ull
+#define HBM4_MC0_USER_CTI_MAX_OFFSET 0x1000
+#define HBM4_MC0_USER_CTI_SECTION 0xA000
+#define mmHBM4_MC0_FUNNEL_BASE 0x7220000ull
+#define HBM4_MC0_FUNNEL_MAX_OFFSET 0x1000
+#define HBM4_MC0_FUNNEL_SECTION 0x30000
+#define mmHBM4_MC1_CS_DBG_ROM_TBL_BASE 0x7250000ull
+#define HBM4_MC1_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM4_MC1_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM4_MC1_CS_STM_BASE 0x7251000ull
+#define HBM4_MC1_CS_STM_MAX_OFFSET 0x1000
+#define HBM4_MC1_CS_STM_SECTION 0x1000
+#define mmHBM4_MC1_CS_CTI_BASE 0x7252000ull
+#define HBM4_MC1_CS_CTI_MAX_OFFSET 0x1000
+#define HBM4_MC1_CS_CTI_SECTION 0x1000
+#define mmHBM4_MC1_CS_ETF_BASE 0x7253000ull
+#define HBM4_MC1_CS_ETF_MAX_OFFSET 0x1000
+#define HBM4_MC1_CS_ETF_SECTION 0x1000
+#define mmHBM4_MC1_CS_SPMU_BASE 0x7254000ull
+#define HBM4_MC1_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM4_MC1_CS_SPMU_SECTION 0x1000
+#define mmHBM4_MC1_BMON_CTI_BASE 0x7255000ull
+#define HBM4_MC1_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM4_MC1_BMON_CTI_SECTION 0x1000
+#define mmHBM4_MC1_USER_CTI_BASE 0x7256000ull
+#define HBM4_MC1_USER_CTI_MAX_OFFSET 0x1000
+#define HBM4_MC1_USER_CTI_SECTION 0xA000
+#define mmHBM4_MC1_FUNNEL_BASE 0x7260000ull
+#define HBM4_MC1_FUNNEL_MAX_OFFSET 0x1000
+#define HBM4_MC1_FUNNEL_SECTION 0x30000
+#define mmHBM5_MC0_CS_DBG_ROM_TBL_BASE 0x7290000ull
+#define HBM5_MC0_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM5_MC0_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM5_MC0_CS_STM_BASE 0x7291000ull
+#define HBM5_MC0_CS_STM_MAX_OFFSET 0x1000
+#define HBM5_MC0_CS_STM_SECTION 0x1000
+#define mmHBM5_MC0_CS_CTI_BASE 0x7292000ull
+#define HBM5_MC0_CS_CTI_MAX_OFFSET 0x1000
+#define HBM5_MC0_CS_CTI_SECTION 0x1000
+#define mmHBM5_MC0_CS_ETF_BASE 0x7293000ull
+#define HBM5_MC0_CS_ETF_MAX_OFFSET 0x1000
+#define HBM5_MC0_CS_ETF_SECTION 0x1000
+#define mmHBM5_MC0_CS_SPMU_BASE 0x7294000ull
+#define HBM5_MC0_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM5_MC0_CS_SPMU_SECTION 0x1000
+#define mmHBM5_MC0_BMON_CTI_BASE 0x7295000ull
+#define HBM5_MC0_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM5_MC0_BMON_CTI_SECTION 0x1000
+#define mmHBM5_MC0_USER_CTI_BASE 0x7296000ull
+#define HBM5_MC0_USER_CTI_MAX_OFFSET 0x1000
+#define HBM5_MC0_USER_CTI_SECTION 0xA000
+#define mmHBM5_MC0_FUNNEL_BASE 0x72A0000ull
+#define HBM5_MC0_FUNNEL_MAX_OFFSET 0x1000
+#define HBM5_MC0_FUNNEL_SECTION 0x30000
+#define mmHBM5_MC1_CS_DBG_ROM_TBL_BASE 0x72D0000ull
+#define HBM5_MC1_CS_DBG_ROM_TBL_MAX_OFFSET 0x1000
+#define HBM5_MC1_CS_DBG_ROM_TBL_SECTION 0x1000
+#define mmHBM5_MC1_CS_STM_BASE 0x72D1000ull
+#define HBM5_MC1_CS_STM_MAX_OFFSET 0x1000
+#define HBM5_MC1_CS_STM_SECTION 0x1000
+#define mmHBM5_MC1_CS_CTI_BASE 0x72D2000ull
+#define HBM5_MC1_CS_CTI_MAX_OFFSET 0x1000
+#define HBM5_MC1_CS_CTI_SECTION 0x1000
+#define mmHBM5_MC1_CS_ETF_BASE 0x72D3000ull
+#define HBM5_MC1_CS_ETF_MAX_OFFSET 0x1000
+#define HBM5_MC1_CS_ETF_SECTION 0x1000
+#define mmHBM5_MC1_CS_SPMU_BASE 0x72D4000ull
+#define HBM5_MC1_CS_SPMU_MAX_OFFSET 0x1000
+#define HBM5_MC1_CS_SPMU_SECTION 0x1000
+#define mmHBM5_MC1_BMON_CTI_BASE 0x72D5000ull
+#define HBM5_MC1_BMON_CTI_MAX_OFFSET 0x1000
+#define HBM5_MC1_BMON_CTI_SECTION 0x1000
+#define mmHBM5_MC1_USER_CTI_BASE 0x72D6000ull
+#define HBM5_MC1_USER_CTI_MAX_OFFSET 0x1000
+#define HBM5_MC1_USER_CTI_SECTION 0xA000
+#define mmHBM5_MC1_FUNNEL_BASE 0x72E0000ull
+#define HBM5_MC1_FUNNEL_MAX_OFFSET 0x1000
+#define HBM5_MC1_FUNNEL_SECTION 0x20000
+#define mmNIC0_DBG_CS_DBG_ROM_TABLE_0_BASE 0x7300000ull
+#define NIC0_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC0_DBG_STM_0_BASE 0x7301000ull
+#define NIC0_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_STM_0_SECTION 0x1000
+#define mmNIC0_DBG_CTI_0_BASE 0x7302000ull
+#define NIC0_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_CTI_0_SECTION 0x1000
+#define mmNIC0_DBG_ETF_0_BASE 0x7303000ull
+#define NIC0_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_ETF_0_SECTION 0x1000
+#define mmNIC0_DBG_SPMU_0_BASE 0x7304000ull
+#define NIC0_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC0_DBG_USER_CTI_0_BASE 0x7305000ull
+#define NIC0_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC0_DBG_BMON_CTI_0_BASE 0x7306000ull
+#define NIC0_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC0_DBG_BMON0_0_BASE 0x7307000ull
+#define NIC0_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC0_DBG_BMON1_0_BASE 0x7308000ull
+#define NIC0_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC0_DBG_BMON2_0_BASE 0x7309000ull
+#define NIC0_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC0_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC0_DBG_ARC_RTT0_BASE 0x7310000ull
+#define NIC0_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC0_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC0_DBG_CS_DBG_ROM_TABLE_1_BASE 0x7320000ull
+#define NIC0_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC0_DBG_STM_1_BASE 0x7321000ull
+#define NIC0_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_STM_1_SECTION 0x1000
+#define mmNIC0_DBG_CTI_1_BASE 0x7322000ull
+#define NIC0_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_CTI_1_SECTION 0x1000
+#define mmNIC0_DBG_ETF_1_BASE 0x7323000ull
+#define NIC0_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_ETF_1_SECTION 0x1000
+#define mmNIC0_DBG_SPMU_1_BASE 0x7324000ull
+#define NIC0_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC0_DBG_USER_CTI_1_BASE 0x7325000ull
+#define NIC0_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC0_DBG_BMON_CTI_1_BASE 0x7326000ull
+#define NIC0_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC0_DBG_BMON0_1_BASE 0x7327000ull
+#define NIC0_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC0_DBG_BMON1_1_BASE 0x7328000ull
+#define NIC0_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC0_DBG_BMON2_1_BASE 0x7329000ull
+#define NIC0_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC0_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC0_DBG_ARC_RTT1_BASE 0x7330000ull
+#define NIC0_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC0_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC0_DBG_FUNNEL_TX_BASE 0x7338000ull
+#define NIC0_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC0_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC0_DBG_FUNNEL_NCH_BASE 0x7339000ull
+#define NIC0_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC0_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC1_DBG_CS_DBG_ROM_TABLE_0_BASE 0x7340000ull
+#define NIC1_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC1_DBG_STM_0_BASE 0x7341000ull
+#define NIC1_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_STM_0_SECTION 0x1000
+#define mmNIC1_DBG_CTI_0_BASE 0x7342000ull
+#define NIC1_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_CTI_0_SECTION 0x1000
+#define mmNIC1_DBG_ETF_0_BASE 0x7343000ull
+#define NIC1_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_ETF_0_SECTION 0x1000
+#define mmNIC1_DBG_SPMU_0_BASE 0x7344000ull
+#define NIC1_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC1_DBG_USER_CTI_0_BASE 0x7345000ull
+#define NIC1_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC1_DBG_BMON_CTI_0_BASE 0x7346000ull
+#define NIC1_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC1_DBG_BMON0_0_BASE 0x7347000ull
+#define NIC1_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC1_DBG_BMON1_0_BASE 0x7348000ull
+#define NIC1_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC1_DBG_BMON2_0_BASE 0x7349000ull
+#define NIC1_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC1_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC1_DBG_ARC_RTT0_BASE 0x7350000ull
+#define NIC1_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC1_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC1_DBG_CS_DBG_ROM_TABLE_1_BASE 0x7360000ull
+#define NIC1_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC1_DBG_STM_1_BASE 0x7361000ull
+#define NIC1_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_STM_1_SECTION 0x1000
+#define mmNIC1_DBG_CTI_1_BASE 0x7362000ull
+#define NIC1_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_CTI_1_SECTION 0x1000
+#define mmNIC1_DBG_ETF_1_BASE 0x7363000ull
+#define NIC1_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_ETF_1_SECTION 0x1000
+#define mmNIC1_DBG_SPMU_1_BASE 0x7364000ull
+#define NIC1_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC1_DBG_USER_CTI_1_BASE 0x7365000ull
+#define NIC1_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC1_DBG_BMON_CTI_1_BASE 0x7366000ull
+#define NIC1_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC1_DBG_BMON0_1_BASE 0x7367000ull
+#define NIC1_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC1_DBG_BMON1_1_BASE 0x7368000ull
+#define NIC1_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC1_DBG_BMON2_1_BASE 0x7369000ull
+#define NIC1_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC1_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC1_DBG_ARC_RTT1_BASE 0x7370000ull
+#define NIC1_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC1_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC1_DBG_FUNNEL_TX_BASE 0x7378000ull
+#define NIC1_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC1_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC1_DBG_FUNNEL_NCH_BASE 0x7379000ull
+#define NIC1_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC1_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC2_DBG_CS_DBG_ROM_TABLE_0_BASE 0x7380000ull
+#define NIC2_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC2_DBG_STM_0_BASE 0x7381000ull
+#define NIC2_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_STM_0_SECTION 0x1000
+#define mmNIC2_DBG_CTI_0_BASE 0x7382000ull
+#define NIC2_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_CTI_0_SECTION 0x1000
+#define mmNIC2_DBG_ETF_0_BASE 0x7383000ull
+#define NIC2_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_ETF_0_SECTION 0x1000
+#define mmNIC2_DBG_SPMU_0_BASE 0x7384000ull
+#define NIC2_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC2_DBG_USER_CTI_0_BASE 0x7385000ull
+#define NIC2_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC2_DBG_BMON_CTI_0_BASE 0x7386000ull
+#define NIC2_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC2_DBG_BMON0_0_BASE 0x7387000ull
+#define NIC2_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC2_DBG_BMON1_0_BASE 0x7388000ull
+#define NIC2_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC2_DBG_BMON2_0_BASE 0x7389000ull
+#define NIC2_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC2_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC2_DBG_ARC_RTT0_BASE 0x7390000ull
+#define NIC2_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC2_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC2_DBG_CS_DBG_ROM_TABLE_1_BASE 0x73A0000ull
+#define NIC2_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC2_DBG_STM_1_BASE 0x73A1000ull
+#define NIC2_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_STM_1_SECTION 0x1000
+#define mmNIC2_DBG_CTI_1_BASE 0x73A2000ull
+#define NIC2_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_CTI_1_SECTION 0x1000
+#define mmNIC2_DBG_ETF_1_BASE 0x73A3000ull
+#define NIC2_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_ETF_1_SECTION 0x1000
+#define mmNIC2_DBG_SPMU_1_BASE 0x73A4000ull
+#define NIC2_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC2_DBG_USER_CTI_1_BASE 0x73A5000ull
+#define NIC2_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC2_DBG_BMON_CTI_1_BASE 0x73A6000ull
+#define NIC2_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC2_DBG_BMON0_1_BASE 0x73A7000ull
+#define NIC2_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC2_DBG_BMON1_1_BASE 0x73A8000ull
+#define NIC2_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC2_DBG_BMON2_1_BASE 0x73A9000ull
+#define NIC2_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC2_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC2_DBG_ARC_RTT1_BASE 0x73B0000ull
+#define NIC2_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC2_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC2_DBG_FUNNEL_TX_BASE 0x73B8000ull
+#define NIC2_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC2_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC2_DBG_FUNNEL_NCH_BASE 0x73B9000ull
+#define NIC2_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC2_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC3_DBG_CS_DBG_ROM_TABLE_0_BASE 0x73C0000ull
+#define NIC3_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC3_DBG_STM_0_BASE 0x73C1000ull
+#define NIC3_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_STM_0_SECTION 0x1000
+#define mmNIC3_DBG_CTI_0_BASE 0x73C2000ull
+#define NIC3_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_CTI_0_SECTION 0x1000
+#define mmNIC3_DBG_ETF_0_BASE 0x73C3000ull
+#define NIC3_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_ETF_0_SECTION 0x1000
+#define mmNIC3_DBG_SPMU_0_BASE 0x73C4000ull
+#define NIC3_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC3_DBG_USER_CTI_0_BASE 0x73C5000ull
+#define NIC3_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC3_DBG_BMON_CTI_0_BASE 0x73C6000ull
+#define NIC3_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC3_DBG_BMON0_0_BASE 0x73C7000ull
+#define NIC3_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC3_DBG_BMON1_0_BASE 0x73C8000ull
+#define NIC3_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC3_DBG_BMON2_0_BASE 0x73C9000ull
+#define NIC3_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC3_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC3_DBG_ARC_RTT0_BASE 0x73D0000ull
+#define NIC3_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC3_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC3_DBG_CS_DBG_ROM_TABLE_1_BASE 0x73E0000ull
+#define NIC3_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC3_DBG_STM_1_BASE 0x73E1000ull
+#define NIC3_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_STM_1_SECTION 0x1000
+#define mmNIC3_DBG_CTI_1_BASE 0x73E2000ull
+#define NIC3_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_CTI_1_SECTION 0x1000
+#define mmNIC3_DBG_ETF_1_BASE 0x73E3000ull
+#define NIC3_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_ETF_1_SECTION 0x1000
+#define mmNIC3_DBG_SPMU_1_BASE 0x73E4000ull
+#define NIC3_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC3_DBG_USER_CTI_1_BASE 0x73E5000ull
+#define NIC3_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC3_DBG_BMON_CTI_1_BASE 0x73E6000ull
+#define NIC3_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC3_DBG_BMON0_1_BASE 0x73E7000ull
+#define NIC3_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC3_DBG_BMON1_1_BASE 0x73E8000ull
+#define NIC3_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC3_DBG_BMON2_1_BASE 0x73E9000ull
+#define NIC3_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC3_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC3_DBG_ARC_RTT1_BASE 0x73F0000ull
+#define NIC3_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC3_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC3_DBG_FUNNEL_TX_BASE 0x73F8000ull
+#define NIC3_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC3_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC3_DBG_FUNNEL_NCH_BASE 0x73F9000ull
+#define NIC3_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC3_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC4_DBG_CS_DBG_ROM_TABLE_0_BASE 0x7400000ull
+#define NIC4_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC4_DBG_STM_0_BASE 0x7401000ull
+#define NIC4_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_STM_0_SECTION 0x1000
+#define mmNIC4_DBG_CTI_0_BASE 0x7402000ull
+#define NIC4_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_CTI_0_SECTION 0x1000
+#define mmNIC4_DBG_ETF_0_BASE 0x7403000ull
+#define NIC4_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_ETF_0_SECTION 0x1000
+#define mmNIC4_DBG_SPMU_0_BASE 0x7404000ull
+#define NIC4_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC4_DBG_USER_CTI_0_BASE 0x7405000ull
+#define NIC4_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC4_DBG_BMON_CTI_0_BASE 0x7406000ull
+#define NIC4_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC4_DBG_BMON0_0_BASE 0x7407000ull
+#define NIC4_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC4_DBG_BMON1_0_BASE 0x7408000ull
+#define NIC4_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC4_DBG_BMON2_0_BASE 0x7409000ull
+#define NIC4_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC4_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC4_DBG_ARC_RTT0_BASE 0x7410000ull
+#define NIC4_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC4_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC4_DBG_CS_DBG_ROM_TABLE_1_BASE 0x7420000ull
+#define NIC4_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC4_DBG_STM_1_BASE 0x7421000ull
+#define NIC4_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_STM_1_SECTION 0x1000
+#define mmNIC4_DBG_CTI_1_BASE 0x7422000ull
+#define NIC4_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_CTI_1_SECTION 0x1000
+#define mmNIC4_DBG_ETF_1_BASE 0x7423000ull
+#define NIC4_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_ETF_1_SECTION 0x1000
+#define mmNIC4_DBG_SPMU_1_BASE 0x7424000ull
+#define NIC4_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC4_DBG_USER_CTI_1_BASE 0x7425000ull
+#define NIC4_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC4_DBG_BMON_CTI_1_BASE 0x7426000ull
+#define NIC4_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC4_DBG_BMON0_1_BASE 0x7427000ull
+#define NIC4_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC4_DBG_BMON1_1_BASE 0x7428000ull
+#define NIC4_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC4_DBG_BMON2_1_BASE 0x7429000ull
+#define NIC4_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC4_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC4_DBG_ARC_RTT1_BASE 0x7430000ull
+#define NIC4_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC4_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC4_DBG_FUNNEL_TX_BASE 0x7438000ull
+#define NIC4_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC4_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC4_DBG_FUNNEL_NCH_BASE 0x7439000ull
+#define NIC4_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC4_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC5_DBG_CS_DBG_ROM_TABLE_0_BASE 0x7440000ull
+#define NIC5_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC5_DBG_STM_0_BASE 0x7441000ull
+#define NIC5_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_STM_0_SECTION 0x1000
+#define mmNIC5_DBG_CTI_0_BASE 0x7442000ull
+#define NIC5_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_CTI_0_SECTION 0x1000
+#define mmNIC5_DBG_ETF_0_BASE 0x7443000ull
+#define NIC5_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_ETF_0_SECTION 0x1000
+#define mmNIC5_DBG_SPMU_0_BASE 0x7444000ull
+#define NIC5_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC5_DBG_USER_CTI_0_BASE 0x7445000ull
+#define NIC5_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC5_DBG_BMON_CTI_0_BASE 0x7446000ull
+#define NIC5_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC5_DBG_BMON0_0_BASE 0x7447000ull
+#define NIC5_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC5_DBG_BMON1_0_BASE 0x7448000ull
+#define NIC5_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC5_DBG_BMON2_0_BASE 0x7449000ull
+#define NIC5_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC5_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC5_DBG_ARC_RTT0_BASE 0x7450000ull
+#define NIC5_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC5_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC5_DBG_CS_DBG_ROM_TABLE_1_BASE 0x7460000ull
+#define NIC5_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC5_DBG_STM_1_BASE 0x7461000ull
+#define NIC5_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_STM_1_SECTION 0x1000
+#define mmNIC5_DBG_CTI_1_BASE 0x7462000ull
+#define NIC5_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_CTI_1_SECTION 0x1000
+#define mmNIC5_DBG_ETF_1_BASE 0x7463000ull
+#define NIC5_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_ETF_1_SECTION 0x1000
+#define mmNIC5_DBG_SPMU_1_BASE 0x7464000ull
+#define NIC5_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC5_DBG_USER_CTI_1_BASE 0x7465000ull
+#define NIC5_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC5_DBG_BMON_CTI_1_BASE 0x7466000ull
+#define NIC5_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC5_DBG_BMON0_1_BASE 0x7467000ull
+#define NIC5_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC5_DBG_BMON1_1_BASE 0x7468000ull
+#define NIC5_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC5_DBG_BMON2_1_BASE 0x7469000ull
+#define NIC5_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC5_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC5_DBG_ARC_RTT1_BASE 0x7470000ull
+#define NIC5_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC5_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC5_DBG_FUNNEL_TX_BASE 0x7478000ull
+#define NIC5_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC5_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC5_DBG_FUNNEL_NCH_BASE 0x7479000ull
+#define NIC5_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC5_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC6_DBG_CS_DBG_ROM_TABLE_0_BASE 0x7480000ull
+#define NIC6_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC6_DBG_STM_0_BASE 0x7481000ull
+#define NIC6_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_STM_0_SECTION 0x1000
+#define mmNIC6_DBG_CTI_0_BASE 0x7482000ull
+#define NIC6_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_CTI_0_SECTION 0x1000
+#define mmNIC6_DBG_ETF_0_BASE 0x7483000ull
+#define NIC6_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_ETF_0_SECTION 0x1000
+#define mmNIC6_DBG_SPMU_0_BASE 0x7484000ull
+#define NIC6_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC6_DBG_USER_CTI_0_BASE 0x7485000ull
+#define NIC6_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC6_DBG_BMON_CTI_0_BASE 0x7486000ull
+#define NIC6_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC6_DBG_BMON0_0_BASE 0x7487000ull
+#define NIC6_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC6_DBG_BMON1_0_BASE 0x7488000ull
+#define NIC6_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC6_DBG_BMON2_0_BASE 0x7489000ull
+#define NIC6_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC6_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC6_DBG_ARC_RTT0_BASE 0x7490000ull
+#define NIC6_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC6_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC6_DBG_CS_DBG_ROM_TABLE_1_BASE 0x74A0000ull
+#define NIC6_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC6_DBG_STM_1_BASE 0x74A1000ull
+#define NIC6_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_STM_1_SECTION 0x1000
+#define mmNIC6_DBG_CTI_1_BASE 0x74A2000ull
+#define NIC6_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_CTI_1_SECTION 0x1000
+#define mmNIC6_DBG_ETF_1_BASE 0x74A3000ull
+#define NIC6_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_ETF_1_SECTION 0x1000
+#define mmNIC6_DBG_SPMU_1_BASE 0x74A4000ull
+#define NIC6_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC6_DBG_USER_CTI_1_BASE 0x74A5000ull
+#define NIC6_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC6_DBG_BMON_CTI_1_BASE 0x74A6000ull
+#define NIC6_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC6_DBG_BMON0_1_BASE 0x74A7000ull
+#define NIC6_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC6_DBG_BMON1_1_BASE 0x74A8000ull
+#define NIC6_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC6_DBG_BMON2_1_BASE 0x74A9000ull
+#define NIC6_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC6_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC6_DBG_ARC_RTT1_BASE 0x74B0000ull
+#define NIC6_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC6_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC6_DBG_FUNNEL_TX_BASE 0x74B8000ull
+#define NIC6_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC6_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC6_DBG_FUNNEL_NCH_BASE 0x74B9000ull
+#define NIC6_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC6_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC7_DBG_CS_DBG_ROM_TABLE_0_BASE 0x74C0000ull
+#define NIC7_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC7_DBG_STM_0_BASE 0x74C1000ull
+#define NIC7_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_STM_0_SECTION 0x1000
+#define mmNIC7_DBG_CTI_0_BASE 0x74C2000ull
+#define NIC7_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_CTI_0_SECTION 0x1000
+#define mmNIC7_DBG_ETF_0_BASE 0x74C3000ull
+#define NIC7_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_ETF_0_SECTION 0x1000
+#define mmNIC7_DBG_SPMU_0_BASE 0x74C4000ull
+#define NIC7_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC7_DBG_USER_CTI_0_BASE 0x74C5000ull
+#define NIC7_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC7_DBG_BMON_CTI_0_BASE 0x74C6000ull
+#define NIC7_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC7_DBG_BMON0_0_BASE 0x74C7000ull
+#define NIC7_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC7_DBG_BMON1_0_BASE 0x74C8000ull
+#define NIC7_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC7_DBG_BMON2_0_BASE 0x74C9000ull
+#define NIC7_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC7_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC7_DBG_ARC_RTT0_BASE 0x74D0000ull
+#define NIC7_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC7_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC7_DBG_CS_DBG_ROM_TABLE_1_BASE 0x74E0000ull
+#define NIC7_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC7_DBG_STM_1_BASE 0x74E1000ull
+#define NIC7_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_STM_1_SECTION 0x1000
+#define mmNIC7_DBG_CTI_1_BASE 0x74E2000ull
+#define NIC7_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_CTI_1_SECTION 0x1000
+#define mmNIC7_DBG_ETF_1_BASE 0x74E3000ull
+#define NIC7_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_ETF_1_SECTION 0x1000
+#define mmNIC7_DBG_SPMU_1_BASE 0x74E4000ull
+#define NIC7_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC7_DBG_USER_CTI_1_BASE 0x74E5000ull
+#define NIC7_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC7_DBG_BMON_CTI_1_BASE 0x74E6000ull
+#define NIC7_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC7_DBG_BMON0_1_BASE 0x74E7000ull
+#define NIC7_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC7_DBG_BMON1_1_BASE 0x74E8000ull
+#define NIC7_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC7_DBG_BMON2_1_BASE 0x74E9000ull
+#define NIC7_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC7_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC7_DBG_ARC_RTT1_BASE 0x74F0000ull
+#define NIC7_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC7_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC7_DBG_FUNNEL_TX_BASE 0x74F8000ull
+#define NIC7_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC7_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC7_DBG_FUNNEL_NCH_BASE 0x74F9000ull
+#define NIC7_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC7_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC8_DBG_CS_DBG_ROM_TABLE_0_BASE 0x7500000ull
+#define NIC8_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC8_DBG_STM_0_BASE 0x7501000ull
+#define NIC8_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_STM_0_SECTION 0x1000
+#define mmNIC8_DBG_CTI_0_BASE 0x7502000ull
+#define NIC8_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_CTI_0_SECTION 0x1000
+#define mmNIC8_DBG_ETF_0_BASE 0x7503000ull
+#define NIC8_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_ETF_0_SECTION 0x1000
+#define mmNIC8_DBG_SPMU_0_BASE 0x7504000ull
+#define NIC8_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC8_DBG_USER_CTI_0_BASE 0x7505000ull
+#define NIC8_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC8_DBG_BMON_CTI_0_BASE 0x7506000ull
+#define NIC8_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC8_DBG_BMON0_0_BASE 0x7507000ull
+#define NIC8_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC8_DBG_BMON1_0_BASE 0x7508000ull
+#define NIC8_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC8_DBG_BMON2_0_BASE 0x7509000ull
+#define NIC8_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC8_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC8_DBG_ARC_RTT0_BASE 0x7510000ull
+#define NIC8_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC8_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC8_DBG_CS_DBG_ROM_TABLE_1_BASE 0x7520000ull
+#define NIC8_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC8_DBG_STM_1_BASE 0x7521000ull
+#define NIC8_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_STM_1_SECTION 0x1000
+#define mmNIC8_DBG_CTI_1_BASE 0x7522000ull
+#define NIC8_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_CTI_1_SECTION 0x1000
+#define mmNIC8_DBG_ETF_1_BASE 0x7523000ull
+#define NIC8_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_ETF_1_SECTION 0x1000
+#define mmNIC8_DBG_SPMU_1_BASE 0x7524000ull
+#define NIC8_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC8_DBG_USER_CTI_1_BASE 0x7525000ull
+#define NIC8_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC8_DBG_BMON_CTI_1_BASE 0x7526000ull
+#define NIC8_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC8_DBG_BMON0_1_BASE 0x7527000ull
+#define NIC8_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC8_DBG_BMON1_1_BASE 0x7528000ull
+#define NIC8_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC8_DBG_BMON2_1_BASE 0x7529000ull
+#define NIC8_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC8_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC8_DBG_ARC_RTT1_BASE 0x7530000ull
+#define NIC8_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC8_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC8_DBG_FUNNEL_TX_BASE 0x7538000ull
+#define NIC8_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC8_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC8_DBG_FUNNEL_NCH_BASE 0x7539000ull
+#define NIC8_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC8_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC9_DBG_CS_DBG_ROM_TABLE_0_BASE 0x7540000ull
+#define NIC9_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC9_DBG_STM_0_BASE 0x7541000ull
+#define NIC9_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_STM_0_SECTION 0x1000
+#define mmNIC9_DBG_CTI_0_BASE 0x7542000ull
+#define NIC9_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_CTI_0_SECTION 0x1000
+#define mmNIC9_DBG_ETF_0_BASE 0x7543000ull
+#define NIC9_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_ETF_0_SECTION 0x1000
+#define mmNIC9_DBG_SPMU_0_BASE 0x7544000ull
+#define NIC9_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC9_DBG_USER_CTI_0_BASE 0x7545000ull
+#define NIC9_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC9_DBG_BMON_CTI_0_BASE 0x7546000ull
+#define NIC9_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC9_DBG_BMON0_0_BASE 0x7547000ull
+#define NIC9_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC9_DBG_BMON1_0_BASE 0x7548000ull
+#define NIC9_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC9_DBG_BMON2_0_BASE 0x7549000ull
+#define NIC9_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC9_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC9_DBG_ARC_RTT0_BASE 0x7550000ull
+#define NIC9_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC9_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC9_DBG_CS_DBG_ROM_TABLE_1_BASE 0x7560000ull
+#define NIC9_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC9_DBG_STM_1_BASE 0x7561000ull
+#define NIC9_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_STM_1_SECTION 0x1000
+#define mmNIC9_DBG_CTI_1_BASE 0x7562000ull
+#define NIC9_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_CTI_1_SECTION 0x1000
+#define mmNIC9_DBG_ETF_1_BASE 0x7563000ull
+#define NIC9_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_ETF_1_SECTION 0x1000
+#define mmNIC9_DBG_SPMU_1_BASE 0x7564000ull
+#define NIC9_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC9_DBG_USER_CTI_1_BASE 0x7565000ull
+#define NIC9_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC9_DBG_BMON_CTI_1_BASE 0x7566000ull
+#define NIC9_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC9_DBG_BMON0_1_BASE 0x7567000ull
+#define NIC9_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC9_DBG_BMON1_1_BASE 0x7568000ull
+#define NIC9_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC9_DBG_BMON2_1_BASE 0x7569000ull
+#define NIC9_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC9_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC9_DBG_ARC_RTT1_BASE 0x7570000ull
+#define NIC9_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC9_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC9_DBG_FUNNEL_TX_BASE 0x7578000ull
+#define NIC9_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC9_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC9_DBG_FUNNEL_NCH_BASE 0x7579000ull
+#define NIC9_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC9_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC10_DBG_CS_DBG_ROM_TABLE_0_BASE 0x7580000ull
+#define NIC10_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC10_DBG_STM_0_BASE 0x7581000ull
+#define NIC10_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_STM_0_SECTION 0x1000
+#define mmNIC10_DBG_CTI_0_BASE 0x7582000ull
+#define NIC10_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_CTI_0_SECTION 0x1000
+#define mmNIC10_DBG_ETF_0_BASE 0x7583000ull
+#define NIC10_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_ETF_0_SECTION 0x1000
+#define mmNIC10_DBG_SPMU_0_BASE 0x7584000ull
+#define NIC10_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC10_DBG_USER_CTI_0_BASE 0x7585000ull
+#define NIC10_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC10_DBG_BMON_CTI_0_BASE 0x7586000ull
+#define NIC10_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC10_DBG_BMON0_0_BASE 0x7587000ull
+#define NIC10_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC10_DBG_BMON1_0_BASE 0x7588000ull
+#define NIC10_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC10_DBG_BMON2_0_BASE 0x7589000ull
+#define NIC10_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC10_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC10_DBG_ARC_RTT0_BASE 0x7590000ull
+#define NIC10_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC10_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC10_DBG_CS_DBG_ROM_TABLE_1_BASE 0x75A0000ull
+#define NIC10_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC10_DBG_STM_1_BASE 0x75A1000ull
+#define NIC10_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_STM_1_SECTION 0x1000
+#define mmNIC10_DBG_CTI_1_BASE 0x75A2000ull
+#define NIC10_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_CTI_1_SECTION 0x1000
+#define mmNIC10_DBG_ETF_1_BASE 0x75A3000ull
+#define NIC10_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_ETF_1_SECTION 0x1000
+#define mmNIC10_DBG_SPMU_1_BASE 0x75A4000ull
+#define NIC10_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC10_DBG_USER_CTI_1_BASE 0x75A5000ull
+#define NIC10_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC10_DBG_BMON_CTI_1_BASE 0x75A6000ull
+#define NIC10_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC10_DBG_BMON0_1_BASE 0x75A7000ull
+#define NIC10_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC10_DBG_BMON1_1_BASE 0x75A8000ull
+#define NIC10_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC10_DBG_BMON2_1_BASE 0x75A9000ull
+#define NIC10_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC10_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC10_DBG_ARC_RTT1_BASE 0x75B0000ull
+#define NIC10_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC10_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC10_DBG_FUNNEL_TX_BASE 0x75B8000ull
+#define NIC10_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC10_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC10_DBG_FUNNEL_NCH_BASE 0x75B9000ull
+#define NIC10_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+#define NIC10_DBG_FUNNEL_NCH_SECTION 0x7000
+#define mmNIC11_DBG_CS_DBG_ROM_TABLE_0_BASE 0x75C0000ull
+#define NIC11_DBG_CS_DBG_ROM_TABLE_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_CS_DBG_ROM_TABLE_0_SECTION 0x1000
+#define mmNIC11_DBG_STM_0_BASE 0x75C1000ull
+#define NIC11_DBG_STM_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_STM_0_SECTION 0x1000
+#define mmNIC11_DBG_CTI_0_BASE 0x75C2000ull
+#define NIC11_DBG_CTI_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_CTI_0_SECTION 0x1000
+#define mmNIC11_DBG_ETF_0_BASE 0x75C3000ull
+#define NIC11_DBG_ETF_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_ETF_0_SECTION 0x1000
+#define mmNIC11_DBG_SPMU_0_BASE 0x75C4000ull
+#define NIC11_DBG_SPMU_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_SPMU_0_SECTION 0x1000
+#define mmNIC11_DBG_USER_CTI_0_BASE 0x75C5000ull
+#define NIC11_DBG_USER_CTI_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_USER_CTI_0_SECTION 0x1000
+#define mmNIC11_DBG_BMON_CTI_0_BASE 0x75C6000ull
+#define NIC11_DBG_BMON_CTI_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_BMON_CTI_0_SECTION 0x1000
+#define mmNIC11_DBG_BMON0_0_BASE 0x75C7000ull
+#define NIC11_DBG_BMON0_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_BMON0_0_SECTION 0x1000
+#define mmNIC11_DBG_BMON1_0_BASE 0x75C8000ull
+#define NIC11_DBG_BMON1_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_BMON1_0_SECTION 0x1000
+#define mmNIC11_DBG_BMON2_0_BASE 0x75C9000ull
+#define NIC11_DBG_BMON2_0_MAX_OFFSET 0x1000
+#define NIC11_DBG_BMON2_0_SECTION 0x7000
+#define mmNIC11_DBG_ARC_RTT0_BASE 0x75D0000ull
+#define NIC11_DBG_ARC_RTT0_MAX_OFFSET 0x1400
+#define NIC11_DBG_ARC_RTT0_SECTION 0x10000
+#define mmNIC11_DBG_CS_DBG_ROM_TABLE_1_BASE 0x75E0000ull
+#define NIC11_DBG_CS_DBG_ROM_TABLE_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_CS_DBG_ROM_TABLE_1_SECTION 0x1000
+#define mmNIC11_DBG_STM_1_BASE 0x75E1000ull
+#define NIC11_DBG_STM_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_STM_1_SECTION 0x1000
+#define mmNIC11_DBG_CTI_1_BASE 0x75E2000ull
+#define NIC11_DBG_CTI_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_CTI_1_SECTION 0x1000
+#define mmNIC11_DBG_ETF_1_BASE 0x75E3000ull
+#define NIC11_DBG_ETF_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_ETF_1_SECTION 0x1000
+#define mmNIC11_DBG_SPMU_1_BASE 0x75E4000ull
+#define NIC11_DBG_SPMU_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_SPMU_1_SECTION 0x1000
+#define mmNIC11_DBG_USER_CTI_1_BASE 0x75E5000ull
+#define NIC11_DBG_USER_CTI_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_USER_CTI_1_SECTION 0x1000
+#define mmNIC11_DBG_BMON_CTI_1_BASE 0x75E6000ull
+#define NIC11_DBG_BMON_CTI_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_BMON_CTI_1_SECTION 0x1000
+#define mmNIC11_DBG_BMON0_1_BASE 0x75E7000ull
+#define NIC11_DBG_BMON0_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_BMON0_1_SECTION 0x1000
+#define mmNIC11_DBG_BMON1_1_BASE 0x75E8000ull
+#define NIC11_DBG_BMON1_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_BMON1_1_SECTION 0x1000
+#define mmNIC11_DBG_BMON2_1_BASE 0x75E9000ull
+#define NIC11_DBG_BMON2_1_MAX_OFFSET 0x1000
+#define NIC11_DBG_BMON2_1_SECTION 0x7000
+#define mmNIC11_DBG_ARC_RTT1_BASE 0x75F0000ull
+#define NIC11_DBG_ARC_RTT1_MAX_OFFSET 0x1400
+#define NIC11_DBG_ARC_RTT1_SECTION 0x8000
+#define mmNIC11_DBG_FUNNEL_TX_BASE 0x75F8000ull
+#define NIC11_DBG_FUNNEL_TX_MAX_OFFSET 0x1000
+#define NIC11_DBG_FUNNEL_TX_SECTION 0x1000
+#define mmNIC11_DBG_FUNNEL_NCH_BASE 0x75F9000ull
+#define NIC11_DBG_FUNNEL_NCH_MAX_OFFSET 0x1000
+
+#endif /* GAUDI2_BLOCKS_LINUX_DRIVER_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
new file mode 100644
index 000000000000..d0e2c68a639f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
@@ -0,0 +1,550 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef ASIC_REG_GAUDI2_REGS_H_
+#define ASIC_REG_GAUDI2_REGS_H_
+
+#include "gaudi2_blocks_linux_driver.h"
+#include "psoc_reset_conf_regs.h"
+#include "psoc_global_conf_regs.h"
+#include "cpu_if_regs.h"
+#include "pcie_aux_regs.h"
+#include "pcie_dbi_regs.h"
+#include "pcie_wrap_regs.h"
+#include "pmmu_hbw_stlb_regs.h"
+#include "psoc_timestamp_regs.h"
+#include "psoc_etr_regs.h"
+#include "xbar_edge_0_regs.h"
+#include "xbar_mid_0_regs.h"
+#include "arc_farm_kdma_regs.h"
+#include "arc_farm_kdma_ctx_regs.h"
+#include "arc_farm_kdma_kdma_cgm_regs.h"
+#include "arc_farm_arc0_aux_regs.h"
+#include "arc_farm_arc0_acp_eng_regs.h"
+#include "arc_farm_kdma_ctx_axuser_regs.h"
+#include "arc_farm_arc0_dup_eng_axuser_regs.h"
+#include "arc_farm_arc0_dup_eng_regs.h"
+#include "dcore0_sync_mngr_objs_regs.h"
+#include "dcore0_sync_mngr_glbl_regs.h"
+#include "dcore0_sync_mngr_mstr_if_axuser_regs.h"
+#include "pdma0_qm_arc_aux_regs.h"
+#include "pdma0_core_ctx_regs.h"
+#include "pdma0_core_regs.h"
+#include "pdma0_qm_axuser_secured_regs.h"
+#include "pdma0_qm_regs.h"
+#include "pdma0_qm_cgm_regs.h"
+#include "pdma0_core_ctx_axuser_regs.h"
+#include "pdma1_core_ctx_axuser_regs.h"
+#include "pdma0_qm_axuser_nonsecured_regs.h"
+#include "pdma1_qm_axuser_nonsecured_regs.h"
+#include "dcore0_tpc0_qm_regs.h"
+#include "dcore0_tpc0_qm_cgm_regs.h"
+#include "dcore0_tpc0_qm_axuser_nonsecured_regs.h"
+#include "dcore0_tpc0_qm_arc_aux_regs.h"
+#include "dcore0_tpc0_cfg_regs.h"
+#include "dcore0_tpc0_cfg_qm_regs.h"
+#include "dcore0_tpc0_cfg_axuser_regs.h"
+#include "dcore0_tpc0_cfg_qm_sync_object_regs.h"
+#include "dcore0_tpc0_cfg_kernel_regs.h"
+#include "dcore0_tpc0_cfg_kernel_tensor_0_regs.h"
+#include "dcore0_tpc0_cfg_qm_tensor_0_regs.h"
+#include "dcore0_tpc0_cfg_special_regs.h"
+#include "dcore0_tpc0_eml_funnel_regs.h"
+#include "dcore0_tpc0_eml_etf_regs.h"
+#include "dcore0_tpc0_eml_stm_regs.h"
+#include "dcore0_tpc0_eml_busmon_0_regs.h"
+#include "dcore0_tpc0_eml_spmu_regs.h"
+#include "pmmu_pif_regs.h"
+#include "dcore0_edma0_qm_cgm_regs.h"
+#include "dcore0_edma0_core_regs.h"
+#include "dcore0_edma0_qm_regs.h"
+#include "dcore0_edma0_qm_arc_aux_regs.h"
+#include "dcore0_edma0_core_ctx_regs.h"
+#include "dcore0_edma0_core_ctx_axuser_regs.h"
+#include "dcore0_edma0_qm_axuser_nonsecured_regs.h"
+#include "dcore0_edma1_core_ctx_axuser_regs.h"
+#include "dcore0_edma1_qm_axuser_nonsecured_regs.h"
+#include "dcore0_hmmu0_stlb_regs.h"
+#include "dcore0_hmmu0_mmu_regs.h"
+#include "rot0_qm_regs.h"
+#include "rot0_qm_cgm_regs.h"
+#include "rot0_qm_arc_aux_regs.h"
+#include "rot0_regs.h"
+#include "rot0_desc_regs.h"
+#include "rot0_qm_axuser_nonsecured_regs.h"
+#include "dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h"
+#include "dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h"
+#include "dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h"
+#include "dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h"
+#include "dcore0_rtr0_ctrl_regs.h"
+#include "dcore0_dec0_cmd_regs.h"
+#include "dcore0_vdec0_brdg_ctrl_regs.h"
+#include "dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h"
+#include "dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h"
+#include "dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h"
+#include "dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h"
+#include "dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h"
+#include "dcore0_vdec0_ctrl_special_regs.h"
+#include "pcie_vdec0_brdg_ctrl_axuser_dec_regs.h"
+#include "pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h"
+#include "pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h"
+#include "pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h"
+#include "pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h"
+#include "pcie_dec0_cmd_regs.h"
+#include "pcie_vdec0_brdg_ctrl_regs.h"
+#include "pcie_vdec0_ctrl_special_regs.h"
+#include "dcore0_mme_qm_regs.h"
+#include "dcore0_mme_qm_arc_aux_regs.h"
+#include "dcore0_mme_qm_axuser_secured_regs.h"
+#include "dcore0_mme_qm_cgm_regs.h"
+#include "dcore0_mme_qm_arc_acp_eng_regs.h"
+#include "dcore0_mme_qm_axuser_nonsecured_regs.h"
+#include "dcore0_mme_qm_arc_dup_eng_regs.h"
+#include "dcore0_mme_qm_arc_dup_eng_axuser_regs.h"
+#include "dcore0_mme_sbte0_mstr_if_axuser_regs.h"
+#include "dcore0_mme_wb0_mstr_if_axuser_regs.h"
+#include "dcore0_mme_acc_regs.h"
+#include "dcore0_mme_ctrl_lo_regs.h"
+#include "dcore1_mme_ctrl_lo_regs.h"
+#include "dcore3_mme_ctrl_lo_regs.h"
+#include "dcore0_mme_ctrl_lo_mme_axuser_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_base_addr_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_tensor_a_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_tensor_b_regs.h"
+#include "dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h"
+
+#include "pdma0_qm_masks.h"
+#include "pdma0_core_masks.h"
+#include "pdma0_core_special_masks.h"
+#include "psoc_global_conf_masks.h"
+#include "psoc_reset_conf_masks.h"
+#include "arc_farm_kdma_masks.h"
+#include "arc_farm_kdma_ctx_masks.h"
+#include "arc_farm_arc0_aux_masks.h"
+#include "arc_farm_kdma_ctx_axuser_masks.h"
+#include "dcore0_sync_mngr_objs_masks.h"
+#include "dcore0_sync_mngr_glbl_masks.h"
+#include "dcore0_sync_mngr_mstr_if_axuser_masks.h"
+#include "dcore0_tpc0_cfg_masks.h"
+#include "dcore0_mme_ctrl_lo_masks.h"
+#include "dcore0_mme_sbte0_masks.h"
+#include "dcore0_edma0_qm_masks.h"
+#include "dcore0_edma0_core_masks.h"
+#include "dcore0_hmmu0_stlb_masks.h"
+#include "dcore0_hmmu0_mmu_masks.h"
+#include "dcore0_dec0_cmd_masks.h"
+#include "dcore0_vdec0_brdg_ctrl_masks.h"
+#include "pcie_dec0_cmd_masks.h"
+#include "pcie_vdec0_brdg_ctrl_masks.h"
+#include "rot0_masks.h"
+#include "pmmu_hbw_stlb_masks.h"
+#include "psoc_etr_masks.h"
+
+#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x4800040
+
+#define SM_OBJS_PROT_BITS_OFFS 0x14000
+
+#define DCORE_OFFSET (mmDCORE1_TPC0_QM_BASE - mmDCORE0_TPC0_QM_BASE)
+#define DCORE_EDMA_OFFSET (mmDCORE0_EDMA1_QM_BASE - mmDCORE0_EDMA0_QM_BASE)
+#define DCORE_TPC_OFFSET (mmDCORE0_TPC1_QM_BASE - mmDCORE0_TPC0_QM_BASE)
+#define DCORE_DEC_OFFSET (mmDCORE0_DEC1_VSI_BASE - mmDCORE0_DEC0_VSI_BASE)
+#define DCORE_HMMU_OFFSET (mmDCORE0_HMMU1_MMU_BASE - mmDCORE0_HMMU0_MMU_BASE)
+#define NIC_QM_OFFSET (mmNIC0_QM1_BASE - mmNIC0_QM0_BASE)
+#define PDMA_OFFSET (mmPDMA1_QM_BASE - mmPDMA0_QM_BASE)
+#define ROT_OFFSET (mmROT1_BASE - mmROT0_BASE)
+
+#define TPC_CFG_BASE_ADDRESS_HIGH_OFFSET \
+ (mmDCORE0_TPC0_CFG_CFG_BASE_ADDRESS_HIGH - mmDCORE0_TPC0_CFG_BASE)
+
+#define TPC_CFG_SM_BASE_ADDRESS_HIGH_OFFSET \
+ (mmDCORE0_TPC0_CFG_SM_BASE_ADDRESS_HIGH - mmDCORE0_TPC0_CFG_BASE)
+
+#define TPC_CFG_STALL_OFFSET (mmDCORE0_TPC0_CFG_TPC_STALL - mmDCORE0_TPC0_CFG_BASE)
+#define TPC_CFG_STALL_ON_ERR_OFFSET (mmDCORE0_TPC0_CFG_STALL_ON_ERR - mmDCORE0_TPC0_CFG_BASE)
+#define TPC_CFG_TPC_INTR_MASK_OFFSET (mmDCORE0_TPC0_CFG_TPC_INTR_MASK - mmDCORE0_TPC0_CFG_BASE)
+#define TPC_CFG_MSS_CONFIG_OFFSET (mmDCORE0_TPC0_CFG_MSS_CONFIG - mmDCORE0_TPC0_CFG_BASE)
+
+#define MME_ACC_INTR_MASK_OFFSET (mmDCORE0_MME_ACC_INTR_MASK - mmDCORE0_MME_ACC_BASE)
+#define MME_ACC_WR_AXI_AGG_COUT0_OFFSET (mmDCORE0_MME_ACC_WR_AXI_AGG_COUT0 - mmDCORE0_MME_ACC_BASE)
+#define MME_ACC_WR_AXI_AGG_COUT1_OFFSET (mmDCORE0_MME_ACC_WR_AXI_AGG_COUT1 - mmDCORE0_MME_ACC_BASE)
+#define MME_ACC_AP_LFSR_POLY_OFFSET (mmDCORE0_MME_ACC_AP_LFSR_POLY - mmDCORE0_MME_ACC_BASE)
+#define MME_ACC_AP_LFSR_SEED_SEL_OFFSET (mmDCORE0_MME_ACC_AP_LFSR_SEED_SEL - mmDCORE0_MME_ACC_BASE)
+#define MME_ACC_AP_LFSR_SEED_WDATA_OFFSET \
+ (mmDCORE0_MME_ACC_AP_LFSR_SEED_WDATA - mmDCORE0_MME_ACC_BASE)
+
+#define DMA_CORE_CFG_0_OFFSET (mmARC_FARM_KDMA_CFG_0 - mmARC_FARM_KDMA_BASE)
+#define DMA_CORE_CFG_1_OFFSET (mmARC_FARM_KDMA_CFG_1 - mmARC_FARM_KDMA_BASE)
+#define DMA_CORE_PROT_OFFSET (mmARC_FARM_KDMA_PROT - mmARC_FARM_KDMA_BASE)
+#define DMA_CORE_ERRMSG_ADDR_LO_OFFSET (mmARC_FARM_KDMA_ERRMSG_ADDR_LO - mmARC_FARM_KDMA_BASE)
+#define DMA_CORE_ERRMSG_ADDR_HI_OFFSET (mmARC_FARM_KDMA_ERRMSG_ADDR_HI - mmARC_FARM_KDMA_BASE)
+#define DMA_CORE_ERRMSG_WDATA_OFFSET (mmARC_FARM_KDMA_ERRMSG_WDATA - mmARC_FARM_KDMA_BASE)
+
+#define QM_PQ_BASE_LO_0_OFFSET (mmPDMA0_QM_PQ_BASE_LO_0 - mmPDMA0_QM_BASE)
+#define QM_PQ_BASE_HI_0_OFFSET (mmPDMA0_QM_PQ_BASE_HI_0 - mmPDMA0_QM_BASE)
+#define QM_PQ_SIZE_0_OFFSET (mmPDMA0_QM_PQ_SIZE_0 - mmPDMA0_QM_BASE)
+#define QM_PQ_PI_0_OFFSET (mmPDMA0_QM_PQ_PI_0 - mmPDMA0_QM_BASE)
+#define QM_PQ_CI_0_OFFSET (mmPDMA0_QM_PQ_CI_0 - mmPDMA0_QM_BASE)
+#define QM_CP_FENCE0_CNT_0_OFFSET (mmPDMA0_QM_CP_FENCE0_CNT_0 - mmPDMA0_QM_BASE)
+
+#define QM_CP_MSG_BASE0_ADDR_LO_0_OFFSET (mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 - mmPDMA0_QM_BASE)
+#define QM_CP_MSG_BASE0_ADDR_HI_0_OFFSET (mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 - mmPDMA0_QM_BASE)
+#define QM_CP_MSG_BASE1_ADDR_LO_0_OFFSET (mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 - mmPDMA0_QM_BASE)
+#define QM_CP_MSG_BASE1_ADDR_HI_0_OFFSET (mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 - mmPDMA0_QM_BASE)
+
+#define QM_CP_CFG_OFFSET (mmPDMA0_QM_CP_CFG - mmPDMA0_QM_BASE)
+#define QM_PQC_HBW_BASE_LO_0_OFFSET (mmPDMA0_QM_PQC_HBW_BASE_LO_0 - mmPDMA0_QM_BASE)
+#define QM_PQC_HBW_BASE_HI_0_OFFSET (mmPDMA0_QM_PQC_HBW_BASE_HI_0 - mmPDMA0_QM_BASE)
+#define QM_PQC_SIZE_0_OFFSET (mmPDMA0_QM_PQC_SIZE_0 - mmPDMA0_QM_BASE)
+#define QM_PQC_PI_0_OFFSET (mmPDMA0_QM_PQC_PI_0 - mmPDMA0_QM_BASE)
+#define QM_PQC_LBW_WDATA_0_OFFSET (mmPDMA0_QM_PQC_LBW_WDATA_0 - mmPDMA0_QM_BASE)
+#define QM_PQC_LBW_BASE_LO_0_OFFSET (mmPDMA0_QM_PQC_LBW_BASE_LO_0 - mmPDMA0_QM_BASE)
+#define QM_PQC_LBW_BASE_HI_0_OFFSET (mmPDMA0_QM_PQC_LBW_BASE_HI_0 - mmPDMA0_QM_BASE)
+#define QM_GLBL_ERR_ADDR_LO_OFFSET (mmPDMA0_QM_GLBL_ERR_ADDR_LO - mmPDMA0_QM_BASE)
+#define QM_PQC_CFG_OFFSET (mmPDMA0_QM_PQC_CFG - mmPDMA0_QM_BASE)
+#define QM_ARB_CFG_0_OFFSET (mmPDMA0_QM_ARB_CFG_0 - mmPDMA0_QM_BASE)
+#define QM_GLBL_CFG0_OFFSET (mmPDMA0_QM_GLBL_CFG0 - mmPDMA0_QM_BASE)
+#define QM_GLBL_CFG1_OFFSET (mmPDMA0_QM_GLBL_CFG1 - mmPDMA0_QM_BASE)
+#define QM_GLBL_CFG2_OFFSET (mmPDMA0_QM_GLBL_CFG2 - mmPDMA0_QM_BASE)
+#define QM_GLBL_PROT_OFFSET (mmPDMA0_QM_GLBL_PROT - mmPDMA0_QM_BASE)
+#define QM_GLBL_ERR_CFG_OFFSET (mmPDMA0_QM_GLBL_ERR_CFG - mmPDMA0_QM_BASE)
+#define QM_GLBL_ERR_CFG1_OFFSET (mmPDMA0_QM_GLBL_ERR_CFG1 - mmPDMA0_QM_BASE)
+#define QM_GLBL_ERR_ADDR_HI_OFFSET (mmPDMA0_QM_GLBL_ERR_ADDR_HI - mmPDMA0_QM_BASE)
+#define QM_GLBL_ERR_WDATA_OFFSET (mmPDMA0_QM_GLBL_ERR_WDATA - mmPDMA0_QM_BASE)
+#define QM_ARB_ERR_MSG_EN_OFFSET (mmPDMA0_QM_ARB_ERR_MSG_EN - mmPDMA0_QM_BASE)
+#define QM_ARB_SLV_CHOISE_WDT_OFFSET (mmPDMA0_QM_ARB_SLV_CHOICE_WDT - mmPDMA0_QM_BASE)
+#define QM_FENCE2_OFFSET (mmPDMA0_QM_CP_FENCE2_RDATA_0 - mmPDMA0_QM_BASE)
+#define QM_SEI_STATUS_OFFSET (mmPDMA0_QM_SEI_STATUS - mmPDMA0_QM_BASE)
+
+#define SFT_OFFSET (mmSFT1_HBW_RTR_IF0_RTR_H3_BASE - mmSFT0_HBW_RTR_IF0_RTR_H3_BASE)
+#define SFT_IF_RTR_OFFSET (mmSFT0_HBW_RTR_IF1_RTR_H3_BASE - mmSFT0_HBW_RTR_IF0_RTR_H3_BASE)
+
+#define ARC_HALT_REQ_OFFSET (mmARC_FARM_ARC0_AUX_RUN_HALT_REQ - mmARC_FARM_ARC0_AUX_BASE)
+
+#define ARC_REGION_CFG_OFFSET(region) \
+ (mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_0 + (region * 4) - mmARC_FARM_ARC0_AUX_BASE)
+
+#define ARC_DCCM_UPPER_EN_OFFSET \
+ (mmARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN - mmARC_FARM_ARC0_AUX_BASE)
+
+#define PCIE_VDEC_OFFSET \
+ (mmPCIE_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE - mmPCIE_VDEC0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define DCORE_MME_SBTE_OFFSET \
+ (mmDCORE0_MME_SBTE1_MSTR_IF_RR_SHRD_HBW_BASE - mmDCORE0_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define DCORE_MME_WB_OFFSET \
+ (mmDCORE0_MME_WB1_MSTR_IF_RR_SHRD_HBW_BASE - mmDCORE0_MME_WB0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define DCORE_RTR_OFFSET \
+ (mmDCORE0_RTR1_MSTR_IF_RR_SHRD_HBW_BASE - mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define DCORE_VDEC_OFFSET \
+ (mmDCORE0_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE - mmDCORE0_VDEC0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define MMU_OFFSET(REG) (REG - mmDCORE0_HMMU0_MMU_BASE)
+#define MMU_BYPASS_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_BYPASS)
+#define MMU_SPI_SEI_MASK_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_SPI_SEI_MASK)
+#define MMU_SPI_SEI_CAUSE_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_SPI_SEI_CAUSE)
+#define MMU_ENABLE_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_ENABLE)
+#define MMU_DDR_RANGE_REG_ENABLE MMU_OFFSET(mmDCORE0_HMMU0_MMU_DDR_RANGE_REG_ENABLE)
+#define MMU_RR_SEC_MIN_63_32_0_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_63_32_0)
+#define MMU_RR_SEC_MIN_31_0_0_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MIN_31_0_0)
+#define MMU_RR_SEC_MAX_63_32_0_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_63_32_0)
+#define MMU_RR_SEC_MAX_31_0_0_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_RR_SEC_MAX_31_0_0)
+#define MMU_RR_PRIV_MIN_63_32_0_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_63_32_0)
+#define MMU_RR_PRIV_MIN_31_0_0_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MIN_31_0_0)
+#define MMU_RR_PRIV_MAX_63_32_0_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_63_32_0)
+#define MMU_RR_PRIV_MAX_31_0_0_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_MMU_RR_PRIV_MAX_31_0_0)
+#define MMU_INTERRUPT_CLR_OFFSET MMU_OFFSET(mmDCORE0_HMMU0_MMU_INTERRUPT_CLR)
+
+#define STLB_OFFSET(REG) (REG - mmDCORE0_HMMU0_STLB_BASE)
+#define STLB_BUSY_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_BUSY)
+#define STLB_ASID_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_ASID)
+#define STLB_HOP0_PA43_12_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_HOP0_PA43_12)
+#define STLB_HOP0_PA63_44_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_HOP0_PA63_44)
+#define STLB_HOP_CONFIGURATION_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_HOP_CONFIGURATION)
+#define STLB_INV_ALL_START_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_INV_ALL_START)
+#define STLB_SRAM_INIT_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_SRAM_INIT)
+#define STLB_SET_THRESHOLD_HOP3_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP3)
+#define STLB_SET_THRESHOLD_HOP2_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP2)
+#define STLB_SET_THRESHOLD_HOP1_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP1)
+#define STLB_SET_THRESHOLD_HOP0_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_SET_THRESHOLD_HOP0)
+#define STLB_RANGE_INV_START_LSB_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_RANGE_INV_START_LSB)
+#define STLB_RANGE_INV_START_MSB_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_RANGE_INV_START_MSB)
+#define STLB_RANGE_INV_END_LSB_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_RANGE_INV_END_LSB)
+#define STLB_RANGE_INV_END_MSB_OFFSET STLB_OFFSET(mmDCORE0_HMMU0_STLB_RANGE_INV_END_MSB)
+
+#define STLB_LL_LOOKUP_MASK_63_32_OFFSET \
+ STLB_OFFSET(mmDCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32)
+
+#define STLB_RANGE_CACHE_INVALIDATION_OFFSET \
+ STLB_OFFSET(mmDCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION)
+
+/* RTR CTR RAZWI related offsets */
+#define RTR_MSTR_IF_OFFSET (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE - mmDCORE0_RTR0_CTRL_BASE)
+
+#define RTR_LBW_MSTR_IF_OFFSET \
+ (mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured hbw aw addr high */
+#define DEC_RAZWI_HBW_AW_ADDR_HI \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AW_HI_ADDR - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured hbw aw addr low */
+#define DEC_RAZWI_HBW_AW_ADDR_LO \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AW_LO_ADDR - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured hbw aw set */
+#define DEC_RAZWI_HBW_AW_SET \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AW_SET - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured hbw ar addr high */
+#define DEC_RAZWI_HBW_AR_ADDR_HI \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AR_HI_ADDR - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured hbw ar addr low */
+#define DEC_RAZWI_HBW_AR_ADDR_LO \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AR_LO_ADDR - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured hbw ar set */
+#define DEC_RAZWI_HBW_AR_SET \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AR_SET - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured lbw aw addr */
+#define DEC_RAZWI_LBW_AW_ADDR \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_LBW_AW_ADDR - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured lbw aw set */
+#define DEC_RAZWI_LBW_AW_SET \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_HBW_AW_SET - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured lbw ar addr */
+#define DEC_RAZWI_LBW_AR_ADDR \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_LBW_AR_ADDR - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured lbw ar set */
+#define DEC_RAZWI_LBW_AR_SET \
+ (mmDCORE0_RTR0_CTRL_DEC_RAZWI_LBW_AR_SET - mmDCORE0_RTR0_CTRL_BASE)
+
+/* RAZWI captured shared hbw aw addr high */
+#define RR_SHRD_HBW_AW_RAZWI_HI \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AW_RAZWI_HI - mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI captured shared hbw aw addr low */
+#define RR_SHRD_HBW_AW_RAZWI_LO \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AW_RAZWI_LO - mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI captured shared hbw ar addr high */
+#define RR_SHRD_HBW_AR_RAZWI_HI \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AR_RAZWI_HI - mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI captured shared hbw ar addr low */
+#define RR_SHRD_HBW_AR_RAZWI_LO \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AR_RAZWI_LO - mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI captured shared aw XY coordinates */
+#define RR_SHRD_HBW_AW_RAZWI_XY \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AW_RAZWI_XY - mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI captured shared ar XY coordinates */
+#define RR_SHRD_HBW_AR_RAZWI_XY \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AR_RAZWI_XY - mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI hbw shared occurred due to write access */
+#define RR_SHRD_HBW_AW_RAZWI_HAPPENED \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AW_RAZWI_HAPPENED - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI hbw shared occurred due to read access */
+#define RR_SHRD_HBW_AR_RAZWI_HAPPENED \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_AR_RAZWI_HAPPENED - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI captured shared lbw aw addr */
+#define RR_SHRD_LBW_AW_RAZWI \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AW_RAZWI - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI captured shared lbw ar addr */
+#define RR_SHRD_LBW_AR_RAZWI \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AR_RAZWI - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI captured shared lbw aw XY coordinates */
+#define RR_SHRD_LBW_AW_RAZWI_XY \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AW_RAZWI_XY - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI captured shared lbw ar XY coordinates */
+#define RR_SHRD_LBW_AR_RAZWI_XY \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AR_RAZWI_XY - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI lbw shared occurred due to write access */
+#define RR_SHRD_LBW_AW_RAZWI_HAPPENED \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AW_RAZWI_HAPPENED - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+/* RAZWI lbw shared occurred due to read access */
+#define RR_SHRD_LBW_AR_RAZWI_HAPPENED \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_AR_RAZWI_HAPPENED - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define BRDG_CTRL_BLOCK_OFFSET (mmDCORE0_VDEC0_BRDG_CTRL_BASE - mmDCORE0_DEC0_CMD_BASE)
+#define SPECIAL_BLOCK_OFFSET (mmDCORE0_VDEC0_BRDG_CTRL_SPECIAL_BASE - mmDCORE0_DEC0_CMD_BASE)
+#define SFT_DCORE_OFFSET (mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE - mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE)
+#define SFT_IF_OFFSET (mmSFT0_HBW_RTR_IF1_RTR_CTRL_BASE - mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE)
+
+#define BRDG_CTRL_NRM_MSIX_LBW_AWADDR \
+ (mmDCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWADDR - mmDCORE0_VDEC0_BRDG_CTRL_BASE)
+
+#define BRDG_CTRL_NRM_MSIX_LBW_WDATA \
+ (mmDCORE0_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_WDATA - mmDCORE0_VDEC0_BRDG_CTRL_BASE)
+
+#define BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR \
+ (mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR - mmDCORE0_VDEC0_BRDG_CTRL_BASE)
+
+#define BRDG_CTRL_ABNRM_MSIX_LBW_WDATA \
+ (mmDCORE0_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_WDATA - mmDCORE0_VDEC0_BRDG_CTRL_BASE)
+
+#define RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_SEC_RANGE_MIN_HI_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_HI_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_SEC_RANGE_MIN_LO_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MIN_LO_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_SEC_RANGE_MAX_HI_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_HI_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_SEC_RANGE_MAX_LO_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_SEC_RANGE_MAX_LO_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_PRIV_RANGE_MIN_HI_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_HI_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_PRIV_RANGE_MIN_LO_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MIN_LO_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_PRIV_RANGE_MAX_HI_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_HI_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_SHRD_HBW_PRIV_RANGE_MAX_LO_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_PRIV_RANGE_MAX_LO_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define RR_LBW_SEC_RANGE_MIN_SHORT_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_SHORT_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE)
+
+#define RR_LBW_SEC_RANGE_MAX_SHORT_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_SHORT_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE)
+
+#define RR_LBW_PRIV_RANGE_MIN_SHORT_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_SHORT_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE)
+
+#define RR_LBW_PRIV_RANGE_MAX_SHORT_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_SHORT_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE)
+
+#define RR_LBW_SEC_RANGE_MIN_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MIN_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE)
+
+#define RR_LBW_SEC_RANGE_MAX_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_SEC_RANGE_MAX_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE)
+
+#define RR_LBW_PRIV_RANGE_MIN_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MIN_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE)
+
+#define RR_LBW_PRIV_RANGE_MAX_0_OFFSET \
+ (mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_PRIV_RANGE_MAX_0 - \
+ mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE)
+
+#define ARC_AUX_DCCM_QUEUE_PUSH_REG_0_OFFSET \
+ (mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_0 - mmARC_FARM_ARC0_AUX_BASE)
+
+#define MMU_STATIC_MULTI_PAGE_SIZE_OFFSET \
+ (mmDCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE - mmDCORE0_HMMU0_MMU_BASE)
+
+#define HBM_MC_SPI_TEMP_PIN_CHG_MASK BIT(0)
+#define HBM_MC_SPI_THR_ENG_MASK BIT(1)
+#define HBM_MC_SPI_THR_DIS_ENG_MASK BIT(2)
+#define HBM_MC_SPI_IEEE1500_COMP_MASK BIT(3)
+#define HBM_MC_SPI_IEEE1500_PAUSED_MASK BIT(4)
+
+#include "nic0_qpc0_regs.h"
+#include "nic0_qm0_regs.h"
+#include "nic0_qm_arc_aux0_regs.h"
+#include "nic0_qm0_cgm_regs.h"
+#include "nic0_umr0_0_completion_queue_ci_1_regs.h"
+#include "nic0_umr0_0_unsecure_doorbell0_regs.h"
+
+#define NIC_OFFSET (mmNIC1_MSTR_IF_RR_SHRD_HBW_BASE - mmNIC0_MSTR_IF_RR_SHRD_HBW_BASE)
+
+#define NIC_UMR_OFFSET \
+ (mmNIC0_UMR0_1_UNSECURE_DOORBELL0_BASE - mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE)
+
+#endif /* ASIC_REG_GAUDI2_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h
new file mode 100644
index 000000000000..d49906a68511
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIC0_QM0_CGM_REGS_H_
+#define ASIC_REG_NIC0_QM0_CGM_REGS_H_
+
+/*
+ *****************************************
+ * NIC0_QM0_CGM
+ * (Prototype: QMAN_CGM)
+ *****************************************
+ */
+
+#define mmNIC0_QM0_CGM_CFG 0x541AD80
+
+#define mmNIC0_QM0_CGM_STS 0x541AD84
+
+#define mmNIC0_QM0_CGM_CFG1 0x541AD88
+
+#endif /* ASIC_REG_NIC0_QM0_CGM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h
new file mode 100644
index 000000000000..acb19c1cd4bd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIC0_QM0_REGS_H_
+#define ASIC_REG_NIC0_QM0_REGS_H_
+
+/*
+ *****************************************
+ * NIC0_QM0
+ * (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC0_QM0_GLBL_CFG0 0x541A000
+
+#define mmNIC0_QM0_GLBL_CFG1 0x541A004
+
+#define mmNIC0_QM0_GLBL_CFG2 0x541A008
+
+#define mmNIC0_QM0_GLBL_ERR_CFG 0x541A00C
+
+#define mmNIC0_QM0_GLBL_ERR_CFG1 0x541A010
+
+#define mmNIC0_QM0_GLBL_ERR_ARC_HALT_EN 0x541A014
+
+#define mmNIC0_QM0_GLBL_AXCACHE 0x541A018
+
+#define mmNIC0_QM0_GLBL_STS0 0x541A01C
+
+#define mmNIC0_QM0_GLBL_STS1 0x541A020
+
+#define mmNIC0_QM0_GLBL_ERR_STS_0 0x541A024
+
+#define mmNIC0_QM0_GLBL_ERR_STS_1 0x541A028
+
+#define mmNIC0_QM0_GLBL_ERR_STS_2 0x541A02C
+
+#define mmNIC0_QM0_GLBL_ERR_STS_3 0x541A030
+
+#define mmNIC0_QM0_GLBL_ERR_STS_4 0x541A034
+
+#define mmNIC0_QM0_GLBL_ERR_MSG_EN_0 0x541A038
+
+#define mmNIC0_QM0_GLBL_ERR_MSG_EN_1 0x541A03C
+
+#define mmNIC0_QM0_GLBL_ERR_MSG_EN_2 0x541A040
+
+#define mmNIC0_QM0_GLBL_ERR_MSG_EN_3 0x541A044
+
+#define mmNIC0_QM0_GLBL_ERR_MSG_EN_4 0x541A048
+
+#define mmNIC0_QM0_GLBL_PROT 0x541A04C
+
+#define mmNIC0_QM0_PQ_BASE_LO_0 0x541A050
+
+#define mmNIC0_QM0_PQ_BASE_LO_1 0x541A054
+
+#define mmNIC0_QM0_PQ_BASE_LO_2 0x541A058
+
+#define mmNIC0_QM0_PQ_BASE_LO_3 0x541A05C
+
+#define mmNIC0_QM0_PQ_BASE_HI_0 0x541A060
+
+#define mmNIC0_QM0_PQ_BASE_HI_1 0x541A064
+
+#define mmNIC0_QM0_PQ_BASE_HI_2 0x541A068
+
+#define mmNIC0_QM0_PQ_BASE_HI_3 0x541A06C
+
+#define mmNIC0_QM0_PQ_SIZE_0 0x541A070
+
+#define mmNIC0_QM0_PQ_SIZE_1 0x541A074
+
+#define mmNIC0_QM0_PQ_SIZE_2 0x541A078
+
+#define mmNIC0_QM0_PQ_SIZE_3 0x541A07C
+
+#define mmNIC0_QM0_PQ_PI_0 0x541A080
+
+#define mmNIC0_QM0_PQ_PI_1 0x541A084
+
+#define mmNIC0_QM0_PQ_PI_2 0x541A088
+
+#define mmNIC0_QM0_PQ_PI_3 0x541A08C
+
+#define mmNIC0_QM0_PQ_CI_0 0x541A090
+
+#define mmNIC0_QM0_PQ_CI_1 0x541A094
+
+#define mmNIC0_QM0_PQ_CI_2 0x541A098
+
+#define mmNIC0_QM0_PQ_CI_3 0x541A09C
+
+#define mmNIC0_QM0_PQ_CFG0_0 0x541A0A0
+
+#define mmNIC0_QM0_PQ_CFG0_1 0x541A0A4
+
+#define mmNIC0_QM0_PQ_CFG0_2 0x541A0A8
+
+#define mmNIC0_QM0_PQ_CFG0_3 0x541A0AC
+
+#define mmNIC0_QM0_PQ_CFG1_0 0x541A0B0
+
+#define mmNIC0_QM0_PQ_CFG1_1 0x541A0B4
+
+#define mmNIC0_QM0_PQ_CFG1_2 0x541A0B8
+
+#define mmNIC0_QM0_PQ_CFG1_3 0x541A0BC
+
+#define mmNIC0_QM0_PQ_STS0_0 0x541A0C0
+
+#define mmNIC0_QM0_PQ_STS0_1 0x541A0C4
+
+#define mmNIC0_QM0_PQ_STS0_2 0x541A0C8
+
+#define mmNIC0_QM0_PQ_STS0_3 0x541A0CC
+
+#define mmNIC0_QM0_PQ_STS1_0 0x541A0D0
+
+#define mmNIC0_QM0_PQ_STS1_1 0x541A0D4
+
+#define mmNIC0_QM0_PQ_STS1_2 0x541A0D8
+
+#define mmNIC0_QM0_PQ_STS1_3 0x541A0DC
+
+#define mmNIC0_QM0_CQ_CFG0_0 0x541A0E0
+
+#define mmNIC0_QM0_CQ_CFG0_1 0x541A0E4
+
+#define mmNIC0_QM0_CQ_CFG0_2 0x541A0E8
+
+#define mmNIC0_QM0_CQ_CFG0_3 0x541A0EC
+
+#define mmNIC0_QM0_CQ_CFG0_4 0x541A0F0
+
+#define mmNIC0_QM0_CQ_STS0_0 0x541A0F4
+
+#define mmNIC0_QM0_CQ_STS0_1 0x541A0F8
+
+#define mmNIC0_QM0_CQ_STS0_2 0x541A0FC
+
+#define mmNIC0_QM0_CQ_STS0_3 0x541A100
+
+#define mmNIC0_QM0_CQ_STS0_4 0x541A104
+
+#define mmNIC0_QM0_CQ_CFG1_0 0x541A108
+
+#define mmNIC0_QM0_CQ_CFG1_1 0x541A10C
+
+#define mmNIC0_QM0_CQ_CFG1_2 0x541A110
+
+#define mmNIC0_QM0_CQ_CFG1_3 0x541A114
+
+#define mmNIC0_QM0_CQ_CFG1_4 0x541A118
+
+#define mmNIC0_QM0_CQ_STS1_0 0x541A11C
+
+#define mmNIC0_QM0_CQ_STS1_1 0x541A120
+
+#define mmNIC0_QM0_CQ_STS1_2 0x541A124
+
+#define mmNIC0_QM0_CQ_STS1_3 0x541A128
+
+#define mmNIC0_QM0_CQ_STS1_4 0x541A12C
+
+#define mmNIC0_QM0_CQ_PTR_LO_0 0x541A150
+
+#define mmNIC0_QM0_CQ_PTR_HI_0 0x541A154
+
+#define mmNIC0_QM0_CQ_TSIZE_0 0x541A158
+
+#define mmNIC0_QM0_CQ_CTL_0 0x541A15C
+
+#define mmNIC0_QM0_CQ_PTR_LO_1 0x541A160
+
+#define mmNIC0_QM0_CQ_PTR_HI_1 0x541A164
+
+#define mmNIC0_QM0_CQ_TSIZE_1 0x541A168
+
+#define mmNIC0_QM0_CQ_CTL_1 0x541A16C
+
+#define mmNIC0_QM0_CQ_PTR_LO_2 0x541A170
+
+#define mmNIC0_QM0_CQ_PTR_HI_2 0x541A174
+
+#define mmNIC0_QM0_CQ_TSIZE_2 0x541A178
+
+#define mmNIC0_QM0_CQ_CTL_2 0x541A17C
+
+#define mmNIC0_QM0_CQ_PTR_LO_3 0x541A180
+
+#define mmNIC0_QM0_CQ_PTR_HI_3 0x541A184
+
+#define mmNIC0_QM0_CQ_TSIZE_3 0x541A188
+
+#define mmNIC0_QM0_CQ_CTL_3 0x541A18C
+
+#define mmNIC0_QM0_CQ_PTR_LO_4 0x541A190
+
+#define mmNIC0_QM0_CQ_PTR_HI_4 0x541A194
+
+#define mmNIC0_QM0_CQ_TSIZE_4 0x541A198
+
+#define mmNIC0_QM0_CQ_CTL_4 0x541A19C
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_0 0x541A1A0
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_1 0x541A1A4
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_2 0x541A1A8
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_3 0x541A1AC
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_4 0x541A1B0
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_0 0x541A1B4
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_1 0x541A1B8
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_2 0x541A1BC
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_3 0x541A1C0
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_4 0x541A1C4
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_0 0x541A1C8
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_1 0x541A1CC
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_2 0x541A1D0
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_3 0x541A1D4
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_4 0x541A1D8
+
+#define mmNIC0_QM0_CQ_IFIFO_STS_0 0x541A1DC
+
+#define mmNIC0_QM0_CQ_IFIFO_STS_1 0x541A1E0
+
+#define mmNIC0_QM0_CQ_IFIFO_STS_2 0x541A1E4
+
+#define mmNIC0_QM0_CQ_IFIFO_STS_3 0x541A1E8
+
+#define mmNIC0_QM0_CQ_IFIFO_STS_4 0x541A1EC
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 0x541A1F0
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1 0x541A1F4
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2 0x541A1F8
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3 0x541A1FC
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4 0x541A200
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 0x541A204
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1 0x541A208
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2 0x541A20C
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3 0x541A210
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4 0x541A214
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 0x541A218
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1 0x541A21C
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2 0x541A220
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3 0x541A224
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4 0x541A228
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 0x541A22C
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1 0x541A230
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2 0x541A234
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3 0x541A238
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4 0x541A23C
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 0x541A240
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1 0x541A244
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 0x541A248
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3 0x541A24C
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4 0x541A250
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 0x541A254
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1 0x541A258
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2 0x541A25C
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3 0x541A260
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4 0x541A264
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 0x541A268
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1 0x541A26C
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2 0x541A270
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3 0x541A274
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4 0x541A278
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 0x541A27C
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1 0x541A280
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2 0x541A284
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3 0x541A288
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4 0x541A28C
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_0 0x541A290
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_1 0x541A294
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_2 0x541A298
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_3 0x541A29C
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_4 0x541A2A0
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_0 0x541A2A4
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_1 0x541A2A8
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_2 0x541A2AC
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_3 0x541A2B0
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_4 0x541A2B4
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_0 0x541A2B8
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_1 0x541A2BC
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_2 0x541A2C0
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_3 0x541A2C4
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_4 0x541A2C8
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_0 0x541A2CC
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_1 0x541A2D0
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_2 0x541A2D4
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_3 0x541A2D8
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_4 0x541A2DC
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_0 0x541A2E0
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_1 0x541A2E4
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_2 0x541A2E8
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_3 0x541A2EC
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_4 0x541A2F0
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_0 0x541A2F4
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_1 0x541A2F8
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_2 0x541A2FC
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_3 0x541A300
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_4 0x541A304
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_0 0x541A308
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_1 0x541A30C
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_2 0x541A310
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_3 0x541A314
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_4 0x541A318
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_0 0x541A31C
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_1 0x541A320
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_2 0x541A324
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_3 0x541A328
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_4 0x541A32C
+
+#define mmNIC0_QM0_CP_BARRIER_CFG 0x541A330
+
+#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET 0x541A334
+
+#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET 0x541A338
+
+#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET 0x541A33C
+
+#define mmNIC0_QM0_CP_CQ_PTR_LO_OFFSET_0 0x541A340
+
+#define mmNIC0_QM0_CP_CQ_PTR_LO_OFFSET_1 0x541A344
+
+#define mmNIC0_QM0_CP_CQ_PTR_LO_OFFSET_2 0x541A348
+
+#define mmNIC0_QM0_CP_CQ_PTR_LO_OFFSET_3 0x541A34C
+
+#define mmNIC0_QM0_CP_CQ_PTR_LO_OFFSET_4 0x541A350
+
+#define mmNIC0_QM0_CP_STS_0 0x541A368
+
+#define mmNIC0_QM0_CP_STS_1 0x541A36C
+
+#define mmNIC0_QM0_CP_STS_2 0x541A370
+
+#define mmNIC0_QM0_CP_STS_3 0x541A374
+
+#define mmNIC0_QM0_CP_STS_4 0x541A378
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_0 0x541A37C
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_1 0x541A380
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_2 0x541A384
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_3 0x541A388
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_4 0x541A38C
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_0 0x541A390
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_1 0x541A394
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_2 0x541A398
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_3 0x541A39C
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_4 0x541A3A0
+
+#define mmNIC0_QM0_CP_PRED_0 0x541A3A4
+
+#define mmNIC0_QM0_CP_PRED_1 0x541A3A8
+
+#define mmNIC0_QM0_CP_PRED_2 0x541A3AC
+
+#define mmNIC0_QM0_CP_PRED_3 0x541A3B0
+
+#define mmNIC0_QM0_CP_PRED_4 0x541A3B4
+
+#define mmNIC0_QM0_CP_PRED_UPEN_0 0x541A3B8
+
+#define mmNIC0_QM0_CP_PRED_UPEN_1 0x541A3BC
+
+#define mmNIC0_QM0_CP_PRED_UPEN_2 0x541A3C0
+
+#define mmNIC0_QM0_CP_PRED_UPEN_3 0x541A3C4
+
+#define mmNIC0_QM0_CP_PRED_UPEN_4 0x541A3C8
+
+#define mmNIC0_QM0_CP_DBG_0_0 0x541A3CC
+
+#define mmNIC0_QM0_CP_DBG_0_1 0x541A3D0
+
+#define mmNIC0_QM0_CP_DBG_0_2 0x541A3D4
+
+#define mmNIC0_QM0_CP_DBG_0_3 0x541A3D8
+
+#define mmNIC0_QM0_CP_DBG_0_4 0x541A3DC
+
+#define mmNIC0_QM0_CP_CPDMA_UP_CRED_0 0x541A3E0
+
+#define mmNIC0_QM0_CP_CPDMA_UP_CRED_1 0x541A3E4
+
+#define mmNIC0_QM0_CP_CPDMA_UP_CRED_2 0x541A3E8
+
+#define mmNIC0_QM0_CP_CPDMA_UP_CRED_3 0x541A3EC
+
+#define mmNIC0_QM0_CP_CPDMA_UP_CRED_4 0x541A3F0
+
+#define mmNIC0_QM0_CP_IN_DATA_LO_0 0x541A3F4
+
+#define mmNIC0_QM0_CP_IN_DATA_LO_1 0x541A3F8
+
+#define mmNIC0_QM0_CP_IN_DATA_LO_2 0x541A3FC
+
+#define mmNIC0_QM0_CP_IN_DATA_LO_3 0x541A400
+
+#define mmNIC0_QM0_CP_IN_DATA_LO_4 0x541A404
+
+#define mmNIC0_QM0_CP_IN_DATA_HI_0 0x541A408
+
+#define mmNIC0_QM0_CP_IN_DATA_HI_1 0x541A40C
+
+#define mmNIC0_QM0_CP_IN_DATA_HI_2 0x541A410
+
+#define mmNIC0_QM0_CP_IN_DATA_HI_3 0x541A414
+
+#define mmNIC0_QM0_CP_IN_DATA_HI_4 0x541A418
+
+#define mmNIC0_QM0_PQC_HBW_BASE_LO_0 0x541A41C
+
+#define mmNIC0_QM0_PQC_HBW_BASE_LO_1 0x541A420
+
+#define mmNIC0_QM0_PQC_HBW_BASE_LO_2 0x541A424
+
+#define mmNIC0_QM0_PQC_HBW_BASE_LO_3 0x541A428
+
+#define mmNIC0_QM0_PQC_HBW_BASE_HI_0 0x541A42C
+
+#define mmNIC0_QM0_PQC_HBW_BASE_HI_1 0x541A430
+
+#define mmNIC0_QM0_PQC_HBW_BASE_HI_2 0x541A434
+
+#define mmNIC0_QM0_PQC_HBW_BASE_HI_3 0x541A438
+
+#define mmNIC0_QM0_PQC_SIZE_0 0x541A43C
+
+#define mmNIC0_QM0_PQC_SIZE_1 0x541A440
+
+#define mmNIC0_QM0_PQC_SIZE_2 0x541A444
+
+#define mmNIC0_QM0_PQC_SIZE_3 0x541A448
+
+#define mmNIC0_QM0_PQC_PI_0 0x541A44C
+
+#define mmNIC0_QM0_PQC_PI_1 0x541A450
+
+#define mmNIC0_QM0_PQC_PI_2 0x541A454
+
+#define mmNIC0_QM0_PQC_PI_3 0x541A458
+
+#define mmNIC0_QM0_PQC_LBW_WDATA_0 0x541A45C
+
+#define mmNIC0_QM0_PQC_LBW_WDATA_1 0x541A460
+
+#define mmNIC0_QM0_PQC_LBW_WDATA_2 0x541A464
+
+#define mmNIC0_QM0_PQC_LBW_WDATA_3 0x541A468
+
+#define mmNIC0_QM0_PQC_LBW_BASE_LO_0 0x541A46C
+
+#define mmNIC0_QM0_PQC_LBW_BASE_LO_1 0x541A470
+
+#define mmNIC0_QM0_PQC_LBW_BASE_LO_2 0x541A474
+
+#define mmNIC0_QM0_PQC_LBW_BASE_LO_3 0x541A478
+
+#define mmNIC0_QM0_PQC_LBW_BASE_HI_0 0x541A47C
+
+#define mmNIC0_QM0_PQC_LBW_BASE_HI_1 0x541A480
+
+#define mmNIC0_QM0_PQC_LBW_BASE_HI_2 0x541A484
+
+#define mmNIC0_QM0_PQC_LBW_BASE_HI_3 0x541A488
+
+#define mmNIC0_QM0_PQC_CFG 0x541A48C
+
+#define mmNIC0_QM0_PQC_SECURE_PUSH_IND 0x541A490
+
+#define mmNIC0_QM0_ARB_MASK 0x541A4A0
+
+#define mmNIC0_QM0_ARB_CFG_0 0x541A4A4
+
+#define mmNIC0_QM0_ARB_CHOICE_Q_PUSH 0x541A4A8
+
+#define mmNIC0_QM0_ARB_WRR_WEIGHT_0 0x541A4AC
+
+#define mmNIC0_QM0_ARB_WRR_WEIGHT_1 0x541A4B0
+
+#define mmNIC0_QM0_ARB_WRR_WEIGHT_2 0x541A4B4
+
+#define mmNIC0_QM0_ARB_WRR_WEIGHT_3 0x541A4B8
+
+#define mmNIC0_QM0_ARB_CFG_1 0x541A4BC
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_0 0x541A4C0
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_1 0x541A4C4
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_2 0x541A4C8
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_3 0x541A4CC
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_4 0x541A4D0
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_5 0x541A4D4
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_6 0x541A4D8
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_7 0x541A4DC
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_8 0x541A4E0
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_9 0x541A4E4
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_10 0x541A4E8
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_11 0x541A4EC
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_12 0x541A4F0
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_13 0x541A4F4
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_14 0x541A4F8
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_15 0x541A4FC
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_16 0x541A500
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_17 0x541A504
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_18 0x541A508
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_19 0x541A50C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_20 0x541A510
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_21 0x541A514
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_22 0x541A518
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_23 0x541A51C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 0x541A520
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_25 0x541A524
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_26 0x541A528
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_27 0x541A52C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_28 0x541A530
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_29 0x541A534
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_30 0x541A538
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_31 0x541A53C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_32 0x541A540
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_33 0x541A544
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_34 0x541A548
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_35 0x541A54C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_36 0x541A550
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_37 0x541A554
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_38 0x541A558
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_39 0x541A55C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_40 0x541A560
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_41 0x541A564
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_42 0x541A568
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_43 0x541A56C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_44 0x541A570
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_45 0x541A574
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_46 0x541A578
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_47 0x541A57C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_48 0x541A580
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_49 0x541A584
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_50 0x541A588
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_51 0x541A58C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_52 0x541A590
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_53 0x541A594
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_54 0x541A598
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_55 0x541A59C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_56 0x541A5A0
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_57 0x541A5A4
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_58 0x541A5A8
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_59 0x541A5AC
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_60 0x541A5B0
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_61 0x541A5B4
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_62 0x541A5B8
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_63 0x541A5BC
+
+#define mmNIC0_QM0_ARB_MST_CRED_INC 0x541A5E0
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_0 0x541A5E4
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_1 0x541A5E8
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_2 0x541A5EC
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_3 0x541A5F0
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_4 0x541A5F4
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_5 0x541A5F8
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_6 0x541A5FC
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_7 0x541A600
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_8 0x541A604
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_9 0x541A608
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_10 0x541A60C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_11 0x541A610
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_12 0x541A614
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_13 0x541A618
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_14 0x541A61C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_15 0x541A620
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_16 0x541A624
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_17 0x541A628
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_18 0x541A62C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_19 0x541A630
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_20 0x541A634
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_21 0x541A638
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_22 0x541A63C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_23 0x541A640
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_24 0x541A644
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_25 0x541A648
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_26 0x541A64C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_27 0x541A650
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_28 0x541A654
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_29 0x541A658
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_30 0x541A65C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_31 0x541A660
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_32 0x541A664
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_33 0x541A668
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_34 0x541A66C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_35 0x541A670
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_36 0x541A674
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_37 0x541A678
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_38 0x541A67C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_39 0x541A680
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_40 0x541A684
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_41 0x541A688
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_42 0x541A68C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_43 0x541A690
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_44 0x541A694
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_45 0x541A698
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_46 0x541A69C
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_47 0x541A6A0
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_48 0x541A6A4
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_49 0x541A6A8
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_50 0x541A6AC
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_51 0x541A6B0
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_52 0x541A6B4
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_53 0x541A6B8
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_54 0x541A6BC
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_55 0x541A6C0
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_56 0x541A6C4
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_57 0x541A6C8
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_58 0x541A6CC
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_59 0x541A6D0
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_60 0x541A6D4
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_61 0x541A6D8
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_62 0x541A6DC
+
+#define mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_63 0x541A6E0
+
+#define mmNIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0x541A704
+
+#define mmNIC0_QM0_ARB_MST_SLAVE_EN 0x541A708
+
+#define mmNIC0_QM0_ARB_MST_SLAVE_EN_1 0x541A70C
+
+#define mmNIC0_QM0_ARB_SLV_CHOICE_WDT 0x541A710
+
+#define mmNIC0_QM0_ARB_SLV_ID 0x541A714
+
+#define mmNIC0_QM0_ARB_MST_QUIET_PER 0x541A718
+
+#define mmNIC0_QM0_ARB_MSG_MAX_INFLIGHT 0x541A744
+
+#define mmNIC0_QM0_ARB_BASE_LO 0x541A754
+
+#define mmNIC0_QM0_ARB_BASE_HI 0x541A758
+
+#define mmNIC0_QM0_ARB_STATE_STS 0x541A780
+
+#define mmNIC0_QM0_ARB_CHOICE_FULLNESS_STS 0x541A784
+
+#define mmNIC0_QM0_ARB_MSG_STS 0x541A788
+
+#define mmNIC0_QM0_ARB_SLV_CHOICE_Q_HEAD 0x541A78C
+
+#define mmNIC0_QM0_ARB_ERR_CAUSE 0x541A79C
+
+#define mmNIC0_QM0_ARB_ERR_MSG_EN 0x541A7A0
+
+#define mmNIC0_QM0_ARB_ERR_STS_DRP 0x541A7A8
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS 0x541A7B0
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_1 0x541A7B4
+
+#define mmNIC0_QM0_CSMR_STRICT_PRIO_CFG 0x541A7FC
+
+#define mmNIC0_QM0_ARC_CQ_CFG0 0x541A800
+
+#define mmNIC0_QM0_ARC_CQ_CFG1 0x541A804
+
+#define mmNIC0_QM0_ARC_CQ_PTR_LO 0x541A808
+
+#define mmNIC0_QM0_ARC_CQ_PTR_HI 0x541A80C
+
+#define mmNIC0_QM0_ARC_CQ_TSIZE 0x541A810
+
+#define mmNIC0_QM0_ARC_CQ_CTL 0x541A814
+
+#define mmNIC0_QM0_ARC_CQ_IFIFO_STS 0x541A81C
+
+#define mmNIC0_QM0_ARC_CQ_STS0 0x541A820
+
+#define mmNIC0_QM0_ARC_CQ_STS1 0x541A824
+
+#define mmNIC0_QM0_ARC_CQ_TSIZE_STS 0x541A828
+
+#define mmNIC0_QM0_ARC_CQ_PTR_LO_STS 0x541A82C
+
+#define mmNIC0_QM0_ARC_CQ_PTR_HI_STS 0x541A830
+
+#define mmNIC0_QM0_CP_WR_ARC_ADDR_HI 0x541A834
+
+#define mmNIC0_QM0_CP_WR_ARC_ADDR_LO 0x541A838
+
+#define mmNIC0_QM0_ARC_CQ_IFIFO_MSG_BASE_HI 0x541A83C
+
+#define mmNIC0_QM0_ARC_CQ_IFIFO_MSG_BASE_LO 0x541A840
+
+#define mmNIC0_QM0_ARC_CQ_CTL_MSG_BASE_HI 0x541A844
+
+#define mmNIC0_QM0_ARC_CQ_CTL_MSG_BASE_LO 0x541A848
+
+#define mmNIC0_QM0_CQ_IFIFO_MSG_BASE_HI 0x541A84C
+
+#define mmNIC0_QM0_CQ_IFIFO_MSG_BASE_LO 0x541A850
+
+#define mmNIC0_QM0_CQ_CTL_MSG_BASE_HI 0x541A854
+
+#define mmNIC0_QM0_CQ_CTL_MSG_BASE_LO 0x541A858
+
+#define mmNIC0_QM0_ADDR_OVRD 0x541A85C
+
+#define mmNIC0_QM0_CQ_IFIFO_CI_0 0x541A860
+
+#define mmNIC0_QM0_CQ_IFIFO_CI_1 0x541A864
+
+#define mmNIC0_QM0_CQ_IFIFO_CI_2 0x541A868
+
+#define mmNIC0_QM0_CQ_IFIFO_CI_3 0x541A86C
+
+#define mmNIC0_QM0_CQ_IFIFO_CI_4 0x541A870
+
+#define mmNIC0_QM0_ARC_CQ_IFIFO_CI 0x541A874
+
+#define mmNIC0_QM0_CQ_CTL_CI_0 0x541A878
+
+#define mmNIC0_QM0_CQ_CTL_CI_1 0x541A87C
+
+#define mmNIC0_QM0_CQ_CTL_CI_2 0x541A880
+
+#define mmNIC0_QM0_CQ_CTL_CI_3 0x541A884
+
+#define mmNIC0_QM0_CQ_CTL_CI_4 0x541A888
+
+#define mmNIC0_QM0_ARC_CQ_CTL_CI 0x541A88C
+
+#define mmNIC0_QM0_CP_CFG 0x541A890
+
+#define mmNIC0_QM0_CP_EXT_SWITCH 0x541A894
+
+#define mmNIC0_QM0_CP_SWITCH_WD_SET 0x541A898
+
+#define mmNIC0_QM0_CP_SWITCH_WD 0x541A89C
+
+#define mmNIC0_QM0_ARC_LB_ADDR_BASE_LO 0x541A8A4
+
+#define mmNIC0_QM0_ARC_LB_ADDR_BASE_HI 0x541A8A8
+
+#define mmNIC0_QM0_ENGINE_BASE_ADDR_HI 0x541A8AC
+
+#define mmNIC0_QM0_ENGINE_BASE_ADDR_LO 0x541A8B0
+
+#define mmNIC0_QM0_ENGINE_ADDR_RANGE_SIZE 0x541A8B4
+
+#define mmNIC0_QM0_QM_ARC_AUX_BASE_ADDR_HI 0x541A8B8
+
+#define mmNIC0_QM0_QM_ARC_AUX_BASE_ADDR_LO 0x541A8BC
+
+#define mmNIC0_QM0_QM_BASE_ADDR_HI 0x541A8C0
+
+#define mmNIC0_QM0_QM_BASE_ADDR_LO 0x541A8C4
+
+#define mmNIC0_QM0_ARC_PQC_SECURE_PUSH_IND 0x541A8C8
+
+#define mmNIC0_QM0_PQC_STS_0_0 0x541A8D0
+
+#define mmNIC0_QM0_PQC_STS_0_1 0x541A8D4
+
+#define mmNIC0_QM0_PQC_STS_0_2 0x541A8D8
+
+#define mmNIC0_QM0_PQC_STS_0_3 0x541A8DC
+
+#define mmNIC0_QM0_PQC_STS_1_0 0x541A8E0
+
+#define mmNIC0_QM0_PQC_STS_1_1 0x541A8E4
+
+#define mmNIC0_QM0_PQC_STS_1_2 0x541A8E8
+
+#define mmNIC0_QM0_PQC_STS_1_3 0x541A8EC
+
+#define mmNIC0_QM0_SEI_STATUS 0x541A8F0
+
+#define mmNIC0_QM0_SEI_MASK 0x541A8F4
+
+#define mmNIC0_QM0_GLBL_ERR_ADDR_LO 0x541AD00
+
+#define mmNIC0_QM0_GLBL_ERR_ADDR_HI 0x541AD04
+
+#define mmNIC0_QM0_GLBL_ERR_WDATA 0x541AD08
+
+#define mmNIC0_QM0_L2H_MASK_LO 0x541AD14
+
+#define mmNIC0_QM0_L2H_MASK_HI 0x541AD18
+
+#define mmNIC0_QM0_L2H_CMPR_LO 0x541AD1C
+
+#define mmNIC0_QM0_L2H_CMPR_HI 0x541AD20
+
+#define mmNIC0_QM0_LOCAL_RANGE_BASE 0x541AD24
+
+#define mmNIC0_QM0_LOCAL_RANGE_SIZE 0x541AD28
+
+#define mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_1 0x541AD30
+
+#define mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_0 0x541AD34
+
+#define mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_1 0x541AD38
+
+#define mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_0 0x541AD3C
+
+#define mmNIC0_QM0_IND_GW_APB_CFG 0x541AD40
+
+#define mmNIC0_QM0_IND_GW_APB_WDATA 0x541AD44
+
+#define mmNIC0_QM0_IND_GW_APB_RDATA 0x541AD48
+
+#define mmNIC0_QM0_IND_GW_APB_STATUS 0x541AD4C
+
+#define mmNIC0_QM0_PERF_CNT_FREE_LO 0x541AD60
+
+#define mmNIC0_QM0_PERF_CNT_FREE_HI 0x541AD64
+
+#define mmNIC0_QM0_PERF_CNT_IDLE_LO 0x541AD68
+
+#define mmNIC0_QM0_PERF_CNT_IDLE_HI 0x541AD6C
+
+#define mmNIC0_QM0_PERF_CNT_CFG 0x541AD70
+
+#endif /* ASIC_REG_NIC0_QM0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h
new file mode 100644
index 000000000000..5f380a44dd21
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIC0_QM_ARC_AUX0_REGS_H_
+#define ASIC_REG_NIC0_QM_ARC_AUX0_REGS_H_
+
+/*
+ *****************************************
+ * NIC0_QM_ARC_AUX0
+ * (Prototype: QMAN_ARC_AUX)
+ *****************************************
+ */
+
+#define mmNIC0_QM_ARC_AUX0_RUN_HALT_REQ 0x5418100
+
+#define mmNIC0_QM_ARC_AUX0_RUN_HALT_ACK 0x5418104
+
+#define mmNIC0_QM_ARC_AUX0_RST_VEC_ADDR 0x5418108
+
+#define mmNIC0_QM_ARC_AUX0_DBG_MODE 0x541810C
+
+#define mmNIC0_QM_ARC_AUX0_CLUSTER_NUM 0x5418110
+
+#define mmNIC0_QM_ARC_AUX0_ARC_NUM 0x5418114
+
+#define mmNIC0_QM_ARC_AUX0_WAKE_UP_EVENT 0x5418118
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_SYS_ADDR_BASE 0x541811C
+
+#define mmNIC0_QM_ARC_AUX0_CTI_AP_STS 0x5418120
+
+#define mmNIC0_QM_ARC_AUX0_CTI_CFG_MUX_SEL 0x5418124
+
+#define mmNIC0_QM_ARC_AUX0_ARC_RST 0x5418128
+
+#define mmNIC0_QM_ARC_AUX0_ARC_RST_REQ 0x541812C
+
+#define mmNIC0_QM_ARC_AUX0_SRAM_LSB_ADDR 0x5418130
+
+#define mmNIC0_QM_ARC_AUX0_SRAM_MSB_ADDR 0x5418134
+
+#define mmNIC0_QM_ARC_AUX0_PCIE_LSB_ADDR 0x5418138
+
+#define mmNIC0_QM_ARC_AUX0_PCIE_MSB_ADDR 0x541813C
+
+#define mmNIC0_QM_ARC_AUX0_CFG_LSB_ADDR 0x5418140
+
+#define mmNIC0_QM_ARC_AUX0_CFG_MSB_ADDR 0x5418144
+
+#define mmNIC0_QM_ARC_AUX0_HBM0_LSB_ADDR 0x5418150
+
+#define mmNIC0_QM_ARC_AUX0_HBM0_MSB_ADDR 0x5418154
+
+#define mmNIC0_QM_ARC_AUX0_HBM1_LSB_ADDR 0x5418158
+
+#define mmNIC0_QM_ARC_AUX0_HBM1_MSB_ADDR 0x541815C
+
+#define mmNIC0_QM_ARC_AUX0_HBM2_LSB_ADDR 0x5418160
+
+#define mmNIC0_QM_ARC_AUX0_HBM2_MSB_ADDR 0x5418164
+
+#define mmNIC0_QM_ARC_AUX0_HBM3_LSB_ADDR 0x5418168
+
+#define mmNIC0_QM_ARC_AUX0_HBM3_MSB_ADDR 0x541816C
+
+#define mmNIC0_QM_ARC_AUX0_HBM0_OFFSET 0x5418170
+
+#define mmNIC0_QM_ARC_AUX0_HBM1_OFFSET 0x5418174
+
+#define mmNIC0_QM_ARC_AUX0_HBM2_OFFSET 0x5418178
+
+#define mmNIC0_QM_ARC_AUX0_HBM3_OFFSET 0x541817C
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_LSB_ADDR_0 0x5418180
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_LSB_ADDR_1 0x5418184
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_LSB_ADDR_2 0x5418188
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_LSB_ADDR_3 0x541818C
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_LSB_ADDR_4 0x5418190
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_LSB_ADDR_5 0x5418194
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_LSB_ADDR_6 0x5418198
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_MSB_ADDR_0 0x541819C
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_MSB_ADDR_1 0x54181A0
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_MSB_ADDR_2 0x54181A4
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_MSB_ADDR_3 0x54181A8
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_MSB_ADDR_4 0x54181AC
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_MSB_ADDR_5 0x54181B0
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_PURPOSE_MSB_ADDR_6 0x54181B4
+
+#define mmNIC0_QM_ARC_AUX0_ARC_CBU_AWCACHE_OVR 0x54181B8
+
+#define mmNIC0_QM_ARC_AUX0_ARC_LBU_AWCACHE_OVR 0x54181BC
+
+#define mmNIC0_QM_ARC_AUX0_CONTEXT_ID_0 0x54181C0
+
+#define mmNIC0_QM_ARC_AUX0_CONTEXT_ID_1 0x54181C4
+
+#define mmNIC0_QM_ARC_AUX0_CONTEXT_ID_2 0x54181C8
+
+#define mmNIC0_QM_ARC_AUX0_CONTEXT_ID_3 0x54181CC
+
+#define mmNIC0_QM_ARC_AUX0_CONTEXT_ID_4 0x54181D0
+
+#define mmNIC0_QM_ARC_AUX0_CONTEXT_ID_5 0x54181D4
+
+#define mmNIC0_QM_ARC_AUX0_CONTEXT_ID_6 0x54181D8
+
+#define mmNIC0_QM_ARC_AUX0_CONTEXT_ID_7 0x54181DC
+
+#define mmNIC0_QM_ARC_AUX0_CID_OFFSET_0 0x54181E0
+
+#define mmNIC0_QM_ARC_AUX0_CID_OFFSET_1 0x54181E4
+
+#define mmNIC0_QM_ARC_AUX0_CID_OFFSET_2 0x54181E8
+
+#define mmNIC0_QM_ARC_AUX0_CID_OFFSET_3 0x54181EC
+
+#define mmNIC0_QM_ARC_AUX0_CID_OFFSET_4 0x54181F0
+
+#define mmNIC0_QM_ARC_AUX0_CID_OFFSET_5 0x54181F4
+
+#define mmNIC0_QM_ARC_AUX0_CID_OFFSET_6 0x54181F8
+
+#define mmNIC0_QM_ARC_AUX0_CID_OFFSET_7 0x54181FC
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_0 0x5418200
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_1 0x5418204
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_2 0x5418208
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_3 0x541820C
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_4 0x5418210
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_5 0x5418214
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_6 0x5418218
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_7 0x541821C
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_8 0x5418220
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_9 0x5418224
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_10 0x5418228
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_11 0x541822C
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_12 0x5418230
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_13 0x5418234
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_14 0x5418238
+
+#define mmNIC0_QM_ARC_AUX0_SW_INTR_15 0x541823C
+
+#define mmNIC0_QM_ARC_AUX0_IRQ_INTR_MASK_0 0x5418280
+
+#define mmNIC0_QM_ARC_AUX0_IRQ_INTR_MASK_1 0x5418284
+
+#define mmNIC0_QM_ARC_AUX0_ARC_SEI_INTR_STS 0x5418290
+
+#define mmNIC0_QM_ARC_AUX0_ARC_SEI_INTR_CLR 0x5418294
+
+#define mmNIC0_QM_ARC_AUX0_ARC_SEI_INTR_MASK 0x5418298
+
+#define mmNIC0_QM_ARC_AUX0_ARC_EXCPTN_CAUSE 0x541829C
+
+#define mmNIC0_QM_ARC_AUX0_SEI_INTR_HALT_EN 0x54182A0
+
+#define mmNIC0_QM_ARC_AUX0_ARC_SEI_INTR_HALT_MASK 0x54182A4
+
+#define mmNIC0_QM_ARC_AUX0_QMAN_SEI_INTR_HALT_MASK 0x54182A8
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REI_INTR_STS 0x54182B0
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REI_INTR_CLR 0x54182B4
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REI_INTR_MASK 0x54182B8
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_ECC_ERR_ADDR 0x54182BC
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_ECC_SYNDROME 0x54182C0
+
+#define mmNIC0_QM_ARC_AUX0_I_CACHE_ECC_ERR_ADDR 0x54182C4
+
+#define mmNIC0_QM_ARC_AUX0_I_CACHE_ECC_SYNDROME 0x54182C8
+
+#define mmNIC0_QM_ARC_AUX0_D_CACHE_ECC_ERR_ADDR 0x54182CC
+
+#define mmNIC0_QM_ARC_AUX0_D_CACHE_ECC_SYNDROME 0x54182D0
+
+#define mmNIC0_QM_ARC_AUX0_LBW_TRMINATE_AWADDR_ERR 0x54182E0
+
+#define mmNIC0_QM_ARC_AUX0_LBW_TRMINATE_ARADDR_ERR 0x54182E4
+
+#define mmNIC0_QM_ARC_AUX0_CFG_LBW_TERMINATE_BRESP 0x54182E8
+
+#define mmNIC0_QM_ARC_AUX0_CFG_LBW_TERMINATE_RRESP 0x54182EC
+
+#define mmNIC0_QM_ARC_AUX0_CFG_LBW_TERMINATE_AXLEN 0x54182F0
+
+#define mmNIC0_QM_ARC_AUX0_CFG_LBW_TERMINATE_AXSIZE 0x54182F4
+
+#define mmNIC0_QM_ARC_AUX0_SCRATCHPAD_0 0x5418300
+
+#define mmNIC0_QM_ARC_AUX0_SCRATCHPAD_1 0x5418304
+
+#define mmNIC0_QM_ARC_AUX0_SCRATCHPAD_2 0x5418308
+
+#define mmNIC0_QM_ARC_AUX0_SCRATCHPAD_3 0x541830C
+
+#define mmNIC0_QM_ARC_AUX0_SCRATCHPAD_4 0x5418310
+
+#define mmNIC0_QM_ARC_AUX0_SCRATCHPAD_5 0x5418314
+
+#define mmNIC0_QM_ARC_AUX0_SCRATCHPAD_6 0x5418318
+
+#define mmNIC0_QM_ARC_AUX0_SCRATCHPAD_7 0x541831C
+
+#define mmNIC0_QM_ARC_AUX0_TOTAL_CBU_WR_CNT 0x5418320
+
+#define mmNIC0_QM_ARC_AUX0_INFLIGHT_CBU_WR_CNT 0x5418324
+
+#define mmNIC0_QM_ARC_AUX0_TOTAL_CBU_RD_CNT 0x5418328
+
+#define mmNIC0_QM_ARC_AUX0_INFLIGHT_CBU_RD_CNT 0x541832C
+
+#define mmNIC0_QM_ARC_AUX0_TOTAL_LBU_WR_CNT 0x5418330
+
+#define mmNIC0_QM_ARC_AUX0_INFLIGHT_LBU_WR_CNT 0x5418334
+
+#define mmNIC0_QM_ARC_AUX0_TOTAL_LBU_RD_CNT 0x5418338
+
+#define mmNIC0_QM_ARC_AUX0_INFLIGHT_LBU_RD_CNT 0x541833C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_ARUSER_OVR 0x5418350
+
+#define mmNIC0_QM_ARC_AUX0_CBU_ARUSER_OVR_EN 0x5418354
+
+#define mmNIC0_QM_ARC_AUX0_CBU_AWUSER_OVR 0x5418358
+
+#define mmNIC0_QM_ARC_AUX0_CBU_AWUSER_OVR_EN 0x541835C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_ARUSER_MSB_OVR 0x5418360
+
+#define mmNIC0_QM_ARC_AUX0_CBU_ARUSER_MSB_OVR_EN 0x5418364
+
+#define mmNIC0_QM_ARC_AUX0_CBU_AWUSER_MSB_OVR 0x5418368
+
+#define mmNIC0_QM_ARC_AUX0_CBU_AWUSER_MSB_OVR_EN 0x541836C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_AXCACHE_OVR 0x5418370
+
+#define mmNIC0_QM_ARC_AUX0_CBU_LOCK_OVR 0x5418374
+
+#define mmNIC0_QM_ARC_AUX0_CBU_PROT_OVR 0x5418378
+
+#define mmNIC0_QM_ARC_AUX0_CBU_MAX_OUTSTANDING 0x541837C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_EARLY_BRESP_EN 0x5418380
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORCE_RSP_OK 0x5418384
+
+#define mmNIC0_QM_ARC_AUX0_CBU_NO_WR_INFLIGHT 0x541838C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_SEI_INTR_ID 0x5418390
+
+#define mmNIC0_QM_ARC_AUX0_LBU_ARUSER_OVR 0x5418400
+
+#define mmNIC0_QM_ARC_AUX0_LBU_ARUSER_OVR_EN 0x5418404
+
+#define mmNIC0_QM_ARC_AUX0_LBU_AWUSER_OVR 0x5418408
+
+#define mmNIC0_QM_ARC_AUX0_LBU_AWUSER_OVR_EN 0x541840C
+
+#define mmNIC0_QM_ARC_AUX0_LBU_AXCACHE_OVR 0x5418420
+
+#define mmNIC0_QM_ARC_AUX0_LBU_LOCK_OVR 0x5418424
+
+#define mmNIC0_QM_ARC_AUX0_LBU_PROT_OVR 0x5418428
+
+#define mmNIC0_QM_ARC_AUX0_LBU_MAX_OUTSTANDING 0x541842C
+
+#define mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN 0x5418430
+
+#define mmNIC0_QM_ARC_AUX0_LBU_FORCE_RSP_OK 0x5418434
+
+#define mmNIC0_QM_ARC_AUX0_LBU_NO_WR_INFLIGHT 0x541843C
+
+#define mmNIC0_QM_ARC_AUX0_LBU_SEI_INTR_ID 0x5418440
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_0 0x5418500
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_1 0x5418504
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_2 0x5418508
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_3 0x541850C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_4 0x5418510
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_5 0x5418514
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_6 0x5418518
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_7 0x541851C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_SIZE_0 0x5418520
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_SIZE_1 0x5418524
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_SIZE_2 0x5418528
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_SIZE_3 0x541852C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_SIZE_4 0x5418530
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_SIZE_5 0x5418534
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_SIZE_6 0x5418538
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_SIZE_7 0x541853C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PI_0 0x5418540
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PI_1 0x5418544
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PI_2 0x5418548
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PI_3 0x541854C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PI_4 0x5418550
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PI_5 0x5418554
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PI_6 0x5418558
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PI_7 0x541855C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_CI_0 0x5418560
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_CI_1 0x5418564
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_CI_2 0x5418568
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_CI_3 0x541856C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_CI_4 0x5418570
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_CI_5 0x5418574
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_CI_6 0x5418578
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_CI_7 0x541857C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PUSH_REG_0 0x5418580
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PUSH_REG_1 0x5418584
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PUSH_REG_2 0x5418588
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PUSH_REG_3 0x541858C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PUSH_REG_4 0x5418590
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PUSH_REG_5 0x5418594
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PUSH_REG_6 0x5418598
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_PUSH_REG_7 0x541859C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_MAX_OCCUPANCY_0 0x54185A0
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_MAX_OCCUPANCY_1 0x54185A4
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_MAX_OCCUPANCY_2 0x54185A8
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_MAX_OCCUPANCY_3 0x54185AC
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_MAX_OCCUPANCY_4 0x54185B0
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_MAX_OCCUPANCY_5 0x54185B4
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_MAX_OCCUPANCY_6 0x54185B8
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_MAX_OCCUPANCY_7 0x54185BC
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_VALID_ENTRIES_0 0x54185C0
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_VALID_ENTRIES_1 0x54185C4
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_VALID_ENTRIES_2 0x54185C8
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_VALID_ENTRIES_3 0x54185CC
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_VALID_ENTRIES_4 0x54185D0
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_VALID_ENTRIES_5 0x54185D4
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_VALID_ENTRIES_6 0x54185D8
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_VALID_ENTRIES_7 0x54185DC
+
+#define mmNIC0_QM_ARC_AUX0_GENERAL_Q_VLD_ENTRY_MASK 0x54185E0
+
+#define mmNIC0_QM_ARC_AUX0_NIC_Q_VLD_ENTRY_MASK 0x54185E4
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_DROP_EN 0x5418620
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_WARN_MSG 0x5418624
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_ALERT_MSG 0x5418628
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_GEN_AXI_AWPROT 0x5418630
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_GEN_AXI_AWUSER 0x5418634
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_GEN_AXI_AWBURST 0x5418638
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_GEN_AXI_AWLOCK 0x541863C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_GEN_AXI_AWCACHE 0x5418640
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_WRR_ARB_WEIGHT 0x5418644
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_Q_PUSH_FIFO_FULL_CFG 0x5418648
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_Q_PUSH_FIFO_CNT 0x541864C
+
+#define mmNIC0_QM_ARC_AUX0_QMAN_CQ_IFIFO_SHADOW_CI 0x5418650
+
+#define mmNIC0_QM_ARC_AUX0_QMAN_ARC_CQ_IFIFO_SHADOW_CI 0x5418654
+
+#define mmNIC0_QM_ARC_AUX0_QMAN_CQ_SHADOW_CI 0x5418658
+
+#define mmNIC0_QM_ARC_AUX0_QMAN_ARC_CQ_SHADOW_CI 0x541865C
+
+#define mmNIC0_QM_ARC_AUX0_AUX2APB_PROT 0x5418700
+
+#define mmNIC0_QM_ARC_AUX0_LBW_FORK_WIN_EN 0x5418704
+
+#define mmNIC0_QM_ARC_AUX0_QMAN_LBW_FORK_BASE_ADDR0 0x5418708
+
+#define mmNIC0_QM_ARC_AUX0_QMAN_LBW_FORK_ADDR_MASK0 0x541870C
+
+#define mmNIC0_QM_ARC_AUX0_QMAN_LBW_FORK_BASE_ADDR1 0x5418710
+
+#define mmNIC0_QM_ARC_AUX0_QMAN_LBW_FORK_ADDR_MASK1 0x5418714
+
+#define mmNIC0_QM_ARC_AUX0_FARM_LBW_FORK_BASE_ADDR0 0x5418718
+
+#define mmNIC0_QM_ARC_AUX0_FARM_LBW_FORK_ADDR_MASK0 0x541871C
+
+#define mmNIC0_QM_ARC_AUX0_FARM_LBW_FORK_BASE_ADDR1 0x5418720
+
+#define mmNIC0_QM_ARC_AUX0_FARM_LBW_FORK_ADDR_MASK1 0x5418724
+
+#define mmNIC0_QM_ARC_AUX0_LBW_APB_FORK_MAX_ADDR0 0x5418728
+
+#define mmNIC0_QM_ARC_AUX0_LBW_APB_FORK_MAX_ADDR1 0x541872C
+
+#define mmNIC0_QM_ARC_AUX0_ARC_ACC_ENGS_LBW_FORK_MASK 0x5418730
+
+#define mmNIC0_QM_ARC_AUX0_ARC_DUP_ENG_LBW_FORK_ADDR 0x5418734
+
+#define mmNIC0_QM_ARC_AUX0_ARC_ACP_ENG_LBW_FORK_ADDR 0x5418738
+
+#define mmNIC0_QM_ARC_AUX0_ARC_ACC_ENGS_VIRTUAL_ADDR 0x541873C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_WIN_EN 0x5418740
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_BASE_ADDR0_LSB 0x5418750
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_BASE_ADDR0_MSB 0x5418754
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_ADDR_MASK0_LSB 0x5418758
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_ADDR_MASK0_MSB 0x541875C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_BASE_ADDR1_LSB 0x5418760
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_BASE_ADDR1_MSB 0x5418764
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_ADDR_MASK1_LSB 0x5418768
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_ADDR_MASK1_MSB 0x541876C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_BASE_ADDR2_LSB 0x5418770
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_BASE_ADDR2_MSB 0x5418774
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_ADDR_MASK2_LSB 0x5418778
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_ADDR_MASK2_MSB 0x541877C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_BASE_ADDR3_LSB 0x5418780
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_BASE_ADDR3_MSB 0x5418784
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_ADDR_MASK3_LSB 0x5418788
+
+#define mmNIC0_QM_ARC_AUX0_CBU_FORK_ADDR_MASK3_MSB 0x541878C
+
+#define mmNIC0_QM_ARC_AUX0_CBU_TRMINATE_ARADDR_LSB 0x5418790
+
+#define mmNIC0_QM_ARC_AUX0_CBU_TRMINATE_ARADDR_MSB 0x5418794
+
+#define mmNIC0_QM_ARC_AUX0_CFG_CBU_TERMINATE_BRESP 0x5418798
+
+#define mmNIC0_QM_ARC_AUX0_CFG_CBU_TERMINATE_RRESP 0x541879C
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_0 0x5418800
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_1 0x5418804
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_2 0x5418808
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_3 0x541880C
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_4 0x5418810
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_5 0x5418814
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_6 0x5418818
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_7 0x541881C
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_8 0x5418820
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_9 0x5418824
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_10 0x5418828
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_11 0x541882C
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_12 0x5418830
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_13 0x5418834
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_14 0x5418838
+
+#define mmNIC0_QM_ARC_AUX0_ARC_REGION_CFG_15 0x541883C
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_TRMINATE_AWADDR_ERR 0x5418840
+
+#define mmNIC0_QM_ARC_AUX0_DCCM_TRMINATE_ARADDR_ERR 0x5418844
+
+#define mmNIC0_QM_ARC_AUX0_CFG_DCCM_TERMINATE_BRESP 0x5418848
+
+#define mmNIC0_QM_ARC_AUX0_CFG_DCCM_TERMINATE_RRESP 0x541884C
+
+#define mmNIC0_QM_ARC_AUX0_CFG_DCCM_TERMINATE_EN 0x5418850
+
+#define mmNIC0_QM_ARC_AUX0_CFG_DCCM_SECURE_REGION 0x5418854
+
+#define mmNIC0_QM_ARC_AUX0_ARC_AXI_ORDERING_WR_IF_CNT 0x5418900
+
+#define mmNIC0_QM_ARC_AUX0_ARC_AXI_ORDERING_CTL 0x5418904
+
+#define mmNIC0_QM_ARC_AUX0_ARC_AXI_ORDERING_ADDR_MSK 0x5418908
+
+#define mmNIC0_QM_ARC_AUX0_ARC_AXI_ORDERING_ADDR 0x541890C
+
+#define mmNIC0_QM_ARC_AUX0_ARC_ACC_ENGS_BUSER 0x5418910
+
+#define mmNIC0_QM_ARC_AUX0_MME_ARC_UPPER_DCCM_EN 0x5418920
+
+#endif /* ASIC_REG_NIC0_QM_ARC_AUX0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h
new file mode 100644
index 000000000000..eaee29da4244
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h
@@ -0,0 +1,905 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIC0_QPC0_REGS_H_
+#define ASIC_REG_NIC0_QPC0_REGS_H_
+
+/*
+ *****************************************
+ * NIC0_QPC0
+ * (Prototype: NIC_QPC)
+ *****************************************
+ */
+
+#define mmNIC0_QPC0_REQ_QPC_CACHE_INVALIDATE 0x541F000
+
+#define mmNIC0_QPC0_REQ_QPC_CACHE_INV_STATUS 0x541F004
+
+#define mmNIC0_QPC0_REQ_STATIC_CONFIG 0x541F008
+
+#define mmNIC0_QPC0_REQ_BASE_ADDRESS_63_32 0x541F00C
+
+#define mmNIC0_QPC0_REQ_BASE_ADDRESS_31_7 0x541F010
+
+#define mmNIC0_QPC0_REQ_CLEAN_LINK_LIST 0x541F014
+
+#define mmNIC0_QPC0_REQ_ERR_FIFO_PUSH_63_32 0x541F018
+
+#define mmNIC0_QPC0_REQ_ERR_FIFO_PUSH_31_0 0x541F01C
+
+#define mmNIC0_QPC0_REQ_ERR_QP_STATE_63_32 0x541F020
+
+#define mmNIC0_QPC0_REQ_ERR_QP_STATE_31_0 0x541F024
+
+#define mmNIC0_QPC0_RETRY_COUNT_MAX 0x541F028
+
+#define mmNIC0_QPC0_AXI_PROT 0x541F030
+
+#define mmNIC0_QPC0_RES_QPC_CACHE_INVALIDATE 0x541F034
+
+#define mmNIC0_QPC0_RES_QPC_CACHE_INV_STATUS 0x541F038
+
+#define mmNIC0_QPC0_RES_STATIC_CONFIG 0x541F03C
+
+#define mmNIC0_QPC0_RES_BASE_ADDRESS_63_32 0x541F040
+
+#define mmNIC0_QPC0_RES_BASE_ADDRESS_31_7 0x541F044
+
+#define mmNIC0_QPC0_RES_CLEAN_LINK_LIST 0x541F048
+
+#define mmNIC0_QPC0_ERR_FIFO_WRITE_INDEX 0x541F050
+
+#define mmNIC0_QPC0_ERR_FIFO_PRODUCER_INDEX 0x541F054
+
+#define mmNIC0_QPC0_ERR_FIFO_CONSUMER_INDEX 0x541F058
+
+#define mmNIC0_QPC0_ERR_FIFO_MASK 0x541F05C
+
+#define mmNIC0_QPC0_ERR_FIFO_CREDIT 0x541F060
+
+#define mmNIC0_QPC0_ERR_FIFO_CFG 0x541F064
+
+#define mmNIC0_QPC0_ERR_FIFO_INTR_MASK 0x541F068
+
+#define mmNIC0_QPC0_ERR_FIFO_BASE_ADDR_63_32 0x541F06C
+
+#define mmNIC0_QPC0_ERR_FIFO_BASE_ADDR_31_7 0x541F070
+
+#define mmNIC0_QPC0_GW_BUSY 0x541F080
+
+#define mmNIC0_QPC0_GW_CTRL 0x541F084
+
+#define mmNIC0_QPC0_GW_DATA_0 0x541F08C
+
+#define mmNIC0_QPC0_GW_DATA_1 0x541F090
+
+#define mmNIC0_QPC0_GW_DATA_2 0x541F094
+
+#define mmNIC0_QPC0_GW_DATA_3 0x541F098
+
+#define mmNIC0_QPC0_GW_DATA_4 0x541F09C
+
+#define mmNIC0_QPC0_GW_DATA_5 0x541F0A0
+
+#define mmNIC0_QPC0_GW_DATA_6 0x541F0A4
+
+#define mmNIC0_QPC0_GW_DATA_7 0x541F0A8
+
+#define mmNIC0_QPC0_GW_DATA_8 0x541F0AC
+
+#define mmNIC0_QPC0_GW_DATA_9 0x541F0B0
+
+#define mmNIC0_QPC0_GW_DATA_10 0x541F0B4
+
+#define mmNIC0_QPC0_GW_DATA_11 0x541F0B8
+
+#define mmNIC0_QPC0_GW_DATA_12 0x541F0BC
+
+#define mmNIC0_QPC0_GW_DATA_13 0x541F0C0
+
+#define mmNIC0_QPC0_GW_DATA_14 0x541F0C4
+
+#define mmNIC0_QPC0_GW_DATA_15 0x541F0C8
+
+#define mmNIC0_QPC0_GW_DATA_16 0x541F0CC
+
+#define mmNIC0_QPC0_GW_DATA_17 0x541F0D0
+
+#define mmNIC0_QPC0_GW_DATA_18 0x541F0D4
+
+#define mmNIC0_QPC0_GW_DATA_19 0x541F0D8
+
+#define mmNIC0_QPC0_GW_DATA_20 0x541F0DC
+
+#define mmNIC0_QPC0_GW_DATA_21 0x541F0E0
+
+#define mmNIC0_QPC0_GW_DATA_22 0x541F0E4
+
+#define mmNIC0_QPC0_GW_DATA_23 0x541F0E8
+
+#define mmNIC0_QPC0_GW_DATA_24 0x541F0EC
+
+#define mmNIC0_QPC0_GW_DATA_25 0x541F0F0
+
+#define mmNIC0_QPC0_GW_DATA_26 0x541F0F4
+
+#define mmNIC0_QPC0_GW_DATA_27 0x541F0F8
+
+#define mmNIC0_QPC0_GW_DATA_28 0x541F0FC
+
+#define mmNIC0_QPC0_GW_DATA_29 0x541F100
+
+#define mmNIC0_QPC0_GW_DATA_30 0x541F104
+
+#define mmNIC0_QPC0_GW_DATA_31 0x541F108
+
+#define mmNIC0_QPC0_GW_MASK_0 0x541F124
+
+#define mmNIC0_QPC0_GW_MASK_1 0x541F128
+
+#define mmNIC0_QPC0_GW_MASK_2 0x541F12C
+
+#define mmNIC0_QPC0_GW_MASK_3 0x541F130
+
+#define mmNIC0_QPC0_GW_MASK_4 0x541F134
+
+#define mmNIC0_QPC0_GW_MASK_5 0x541F138
+
+#define mmNIC0_QPC0_GW_MASK_6 0x541F13C
+
+#define mmNIC0_QPC0_GW_MASK_7 0x541F140
+
+#define mmNIC0_QPC0_GW_MASK_8 0x541F144
+
+#define mmNIC0_QPC0_GW_MASK_9 0x541F148
+
+#define mmNIC0_QPC0_GW_MASK_10 0x541F14C
+
+#define mmNIC0_QPC0_GW_MASK_11 0x541F150
+
+#define mmNIC0_QPC0_GW_MASK_12 0x541F154
+
+#define mmNIC0_QPC0_GW_MASK_13 0x541F158
+
+#define mmNIC0_QPC0_GW_MASK_14 0x541F15C
+
+#define mmNIC0_QPC0_GW_MASK_15 0x541F160
+
+#define mmNIC0_QPC0_GW_MASK_16 0x541F164
+
+#define mmNIC0_QPC0_GW_MASK_17 0x541F168
+
+#define mmNIC0_QPC0_GW_MASK_18 0x541F16C
+
+#define mmNIC0_QPC0_GW_MASK_19 0x541F170
+
+#define mmNIC0_QPC0_GW_MASK_20 0x541F174
+
+#define mmNIC0_QPC0_GW_MASK_21 0x541F178
+
+#define mmNIC0_QPC0_GW_MASK_22 0x541F17C
+
+#define mmNIC0_QPC0_GW_MASK_23 0x541F180
+
+#define mmNIC0_QPC0_GW_MASK_24 0x541F184
+
+#define mmNIC0_QPC0_GW_MASK_25 0x541F188
+
+#define mmNIC0_QPC0_GW_MASK_26 0x541F18C
+
+#define mmNIC0_QPC0_GW_MASK_27 0x541F190
+
+#define mmNIC0_QPC0_GW_MASK_28 0x541F194
+
+#define mmNIC0_QPC0_GW_MASK_29 0x541F198
+
+#define mmNIC0_QPC0_GW_MASK_30 0x541F19C
+
+#define mmNIC0_QPC0_GW_MASK_31 0x541F1A0
+
+#define mmNIC0_QPC0_CC_TIMEOUT 0x541F1B0
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_EN 0x541F1FC
+
+#define mmNIC0_QPC0_CC_TICK_WRAP 0x541F200
+
+#define mmNIC0_QPC0_CC_ROLLBACK 0x541F204
+
+#define mmNIC0_QPC0_CC_MAX_WINDOW_SIZE 0x541F208
+
+#define mmNIC0_QPC0_CC_MIN_WINDOW_SIZE 0x541F20C
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_0 0x541F210
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_1 0x541F214
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_2 0x541F218
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_3 0x541F21C
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_4 0x541F220
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_5 0x541F224
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_6 0x541F228
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_7 0x541F22C
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_8 0x541F230
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_9 0x541F234
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_10 0x541F238
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_11 0x541F23C
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_12 0x541F240
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_13 0x541F244
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_14 0x541F248
+
+#define mmNIC0_QPC0_CC_ALPHA_LINEAR_15 0x541F24C
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_0 0x541F250
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_1 0x541F254
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_2 0x541F258
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_3 0x541F25C
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_4 0x541F260
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_5 0x541F264
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_6 0x541F268
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_7 0x541F26C
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_8 0x541F270
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_9 0x541F274
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_10 0x541F278
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_11 0x541F27C
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_12 0x541F280
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_13 0x541F284
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_14 0x541F288
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_15 0x541F28C
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_0 0x541F290
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_1 0x541F294
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_2 0x541F298
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_3 0x541F29C
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_4 0x541F2A0
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_5 0x541F2A4
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_6 0x541F2A8
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_7 0x541F2AC
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_8 0x541F2B0
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_9 0x541F2B4
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_10 0x541F2B8
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_11 0x541F2BC
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_12 0x541F2C0
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_13 0x541F2C4
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_14 0x541F2C8
+
+#define mmNIC0_QPC0_CC_ALPHA_LOG_THRESHOLD_15 0x541F2CC
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_0 0x541F2D0
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_1 0x541F2D4
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_2 0x541F2D8
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_3 0x541F2DC
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_4 0x541F2E0
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_5 0x541F2E4
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_6 0x541F2E8
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_7 0x541F2EC
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_8 0x541F2F0
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_9 0x541F2F4
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_10 0x541F2F8
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_11 0x541F2FC
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_12 0x541F300
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_13 0x541F304
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_14 0x541F308
+
+#define mmNIC0_QPC0_CC_WINDOW_INC_15 0x541F30C
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_0 0x541F310
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_1 0x541F314
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_2 0x541F318
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_3 0x541F31C
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_4 0x541F320
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_5 0x541F324
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_6 0x541F328
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_7 0x541F32C
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_8 0x541F330
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_9 0x541F334
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_10 0x541F338
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_11 0x541F33C
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_12 0x541F340
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_13 0x541F344
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_14 0x541F348
+
+#define mmNIC0_QPC0_CC_WINDOW_IN_THRESHOLD_15 0x541F34C
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_0 0x541F360
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_1 0x541F364
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_2 0x541F368
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_3 0x541F36C
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_4 0x541F370
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_5 0x541F374
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_6 0x541F378
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_7 0x541F37C
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_8 0x541F380
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_9 0x541F384
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_10 0x541F388
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_11 0x541F38C
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_12 0x541F390
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_13 0x541F394
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_14 0x541F398
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_15 0x541F39C
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_16 0x541F3A0
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_17 0x541F3A4
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_18 0x541F3A8
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_19 0x541F3AC
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_20 0x541F3B0
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_21 0x541F3B4
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_22 0x541F3B8
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_23 0x541F3BC
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_24 0x541F3C0
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_25 0x541F3C4
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_26 0x541F3C8
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_27 0x541F3CC
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_28 0x541F3D0
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_29 0x541F3D4
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_30 0x541F3D8
+
+#define mmNIC0_QPC0_DB_FIFO_USER_OVRD_31 0x541F3DC
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_0 0x541F3E0
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_1 0x541F3E4
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_2 0x541F3E8
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_3 0x541F3EC
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_4 0x541F3F0
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_5 0x541F3F4
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_6 0x541F3F8
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_7 0x541F3FC
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_8 0x541F400
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_9 0x541F404
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_10 0x541F408
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_11 0x541F40C
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_12 0x541F410
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_13 0x541F414
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_14 0x541F418
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_15 0x541F41C
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_16 0x541F420
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_17 0x541F424
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_18 0x541F428
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_19 0x541F42C
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_20 0x541F430
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_21 0x541F434
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_22 0x541F438
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_23 0x541F43C
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_24 0x541F440
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_25 0x541F444
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_26 0x541F448
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_27 0x541F44C
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_28 0x541F450
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_29 0x541F454
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_30 0x541F458
+
+#define mmNIC0_QPC0_DB_FIFO_CFG_31 0x541F45C
+
+#define mmNIC0_QPC0_SECURED_DB_FIRST32 0x541F460
+
+#define mmNIC0_QPC0_SECURED_DB_SECOND32 0x541F464
+
+#define mmNIC0_QPC0_SECURED_DB_THIRD32 0x541F468
+
+#define mmNIC0_QPC0_SECURED_DB_FOURTH32 0x541F46C
+
+#define mmNIC0_QPC0_PRIVILEGE_DB_FIRST32 0x541F470
+
+#define mmNIC0_QPC0_PRIVILEGE_DB_SECOND32 0x541F474
+
+#define mmNIC0_QPC0_PRIVILEGE_DB_THIRD32 0x541F478
+
+#define mmNIC0_QPC0_PRIVILEGE_DB_FOURTH32 0x541F47C
+
+#define mmNIC0_QPC0_DBG_INDICATION 0x541F480
+
+#define mmNIC0_QPC0_WTD_WC_FSM 0x541F484
+
+#define mmNIC0_QPC0_WTD_SLICE_FSM 0x541F488
+
+#define mmNIC0_QPC0_REQ_TX_EMPTY_CNT 0x541F48C
+
+#define mmNIC0_QPC0_RES_TX_EMPTY_CNT 0x541F490
+
+#define mmNIC0_QPC0_NUM_ROLLBACKS 0x541F494
+
+#define mmNIC0_QPC0_LAST_QP_ROLLED_BACK 0x541F498
+
+#define mmNIC0_QPC0_NUM_TIMEOUTS 0x541F49C
+
+#define mmNIC0_QPC0_LAST_QP_TIMED_OUT 0x541F4A0
+
+#define mmNIC0_QPC0_WTD_SLICE_FSM_HI 0x541F4A4
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_0 0x541F4B0
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_1 0x541F4B4
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_2 0x541F4B8
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_3 0x541F4BC
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_4 0x541F4C0
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_5 0x541F4C4
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_6 0x541F4C8
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_7 0x541F4CC
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_8 0x541F4D0
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_9 0x541F4D4
+
+#define mmNIC0_QPC0_INTERRUPT_BASE_10 0x541F4D8
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_0 0x541F4DC
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_1 0x541F4E0
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_2 0x541F4E4
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_3 0x541F4E8
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_4 0x541F4EC
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_5 0x541F4F0
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_6 0x541F4F4
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_7 0x541F4F8
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_8 0x541F4FC
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_9 0x541F500
+
+#define mmNIC0_QPC0_INTERRUPT_DATA_10 0x541F504
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_0 0x541F600
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_1 0x541F604
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_2 0x541F608
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_3 0x541F60C
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_4 0x541F610
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_5 0x541F614
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_6 0x541F618
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_7 0x541F61C
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_8 0x541F620
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_9 0x541F624
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_10 0x541F628
+
+#define mmNIC0_QPC0_DBG_COUNT_SELECT_11 0x541F62C
+
+#define mmNIC0_QPC0_DOORBELL_SECURITY 0x541F648
+
+#define mmNIC0_QPC0_DBG_CFG 0x541F64C
+
+#define mmNIC0_QPC0_RES_RING0_PI 0x541F650
+
+#define mmNIC0_QPC0_RES_RING0_CI 0x541F654
+
+#define mmNIC0_QPC0_RES_RING0_CFG 0x541F658
+
+#define mmNIC0_QPC0_RES_RING1_PI 0x541F65C
+
+#define mmNIC0_QPC0_RES_RING1_CI 0x541F660
+
+#define mmNIC0_QPC0_RES_RING1_CFG 0x541F664
+
+#define mmNIC0_QPC0_RES_RING2_PI 0x541F668
+
+#define mmNIC0_QPC0_RES_RING2_CI 0x541F66C
+
+#define mmNIC0_QPC0_RES_RING2_CFG 0x541F670
+
+#define mmNIC0_QPC0_RES_RING3_PI 0x541F674
+
+#define mmNIC0_QPC0_RES_RING3_CI 0x541F678
+
+#define mmNIC0_QPC0_RES_RING3_CFG 0x541F67C
+
+#define mmNIC0_QPC0_REQ_RING0_CI 0x541F680
+
+#define mmNIC0_QPC0_REQ_RING1_CI 0x541F684
+
+#define mmNIC0_QPC0_REQ_RING2_CI 0x541F688
+
+#define mmNIC0_QPC0_REQ_RING3_CI 0x541F68C
+
+#define mmNIC0_QPC0_INTERRUPT_CAUSE 0x541F690
+
+#define mmNIC0_QPC0_INTERRUPT_MASK 0x541F694
+
+#define mmNIC0_QPC0_INTERRUPT_CLR 0x541F698
+
+#define mmNIC0_QPC0_INTERRUPT_EN 0x541F69C
+
+#define mmNIC0_QPC0_INTERRUPT_CFG 0x541F6F0
+
+#define mmNIC0_QPC0_INTERRUPT_RESP_ERR_CAUSE 0x541F6F4
+
+#define mmNIC0_QPC0_INTERRUPT_RESP_ERR_MASK 0x541F6F8
+
+#define mmNIC0_QPC0_INTERRUPR_RESP_ERR_CLR 0x541F700
+
+#define mmNIC0_QPC0_TMR_GW_VALID 0x541F704
+
+#define mmNIC0_QPC0_TMR_GW_DATA0 0x541F708
+
+#define mmNIC0_QPC0_TMR_GW_DATA1 0x541F70C
+
+#define mmNIC0_QPC0_RNR_RETRY_COUNT_EN 0x541F710
+
+#define mmNIC0_QPC0_EVENT_QUE_BASE_ADDR_63_32 0x541F830
+
+#define mmNIC0_QPC0_EVENT_QUE_BASE_ADDR_31_7 0x541F834
+
+#define mmNIC0_QPC0_EVENT_QUE_LOG_SIZE 0x541F838
+
+#define mmNIC0_QPC0_EVENT_QUE_WRITE_INDEX 0x541F83C
+
+#define mmNIC0_QPC0_EVENT_QUE_PRODUCER_INDEX 0x541F840
+
+#define mmNIC0_QPC0_EVENT_QUE_PI_ADDR_63_32 0x541F844
+
+#define mmNIC0_QPC0_EVENT_QUE_PI_ADDR_31_7 0x541F848
+
+#define mmNIC0_QPC0_EVENT_QUE_CONSUMER_INDEX_CB 0x541F84C
+
+#define mmNIC0_QPC0_EVENT_QUE_CFG 0x541F850
+
+#define mmNIC0_QPC0_LBW_PROT 0x541F858
+
+#define mmNIC0_QPC0_MEM_WRITE_INIT 0x541F85C
+
+#define mmNIC0_QPC0_QMAN_DOORBELL 0x541F8E8
+
+#define mmNIC0_QPC0_QMAN_DOORBELL_QPN 0x541F8EC
+
+#define mmNIC0_QPC0_SECURED_CQ_NUMBER 0x541F8F0
+
+#define mmNIC0_QPC0_SECURED_CQ_CONSUMER_INDEX 0x541F8F4
+
+#define mmNIC0_QPC0_PRIVILEGE_CQ_NUMBER 0x541F8F8
+
+#define mmNIC0_QPC0_PRIVILEGE_CQ_CONSUMER_INDEX 0x541F8FC
+
+#define mmNIC0_QPC0_TX_WQ_BASE_ADDR_63_32_0 0x541F900
+
+#define mmNIC0_QPC0_TX_WQ_BASE_ADDR_63_32_1 0x541F904
+
+#define mmNIC0_QPC0_TX_WQ_BASE_ADDR_63_32_2 0x541F908
+
+#define mmNIC0_QPC0_TX_WQ_BASE_ADDR_63_32_3 0x541F90C
+
+#define mmNIC0_QPC0_TX_WQ_BASE_ADDR_31_0_0 0x541F910
+
+#define mmNIC0_QPC0_TX_WQ_BASE_ADDR_31_0_1 0x541F914
+
+#define mmNIC0_QPC0_TX_WQ_BASE_ADDR_31_0_2 0x541F918
+
+#define mmNIC0_QPC0_TX_WQ_BASE_ADDR_31_0_3 0x541F91C
+
+#define mmNIC0_QPC0_LOG_MAX_TX_WQ_SIZE_0 0x541F920
+
+#define mmNIC0_QPC0_LOG_MAX_TX_WQ_SIZE_1 0x541F924
+
+#define mmNIC0_QPC0_LOG_MAX_TX_WQ_SIZE_2 0x541F928
+
+#define mmNIC0_QPC0_LOG_MAX_TX_WQ_SIZE_3 0x541F92C
+
+#define mmNIC0_QPC0_MMU_BYPASS_TX_WQ_0 0x541F930
+
+#define mmNIC0_QPC0_MMU_BYPASS_TX_WQ_1 0x541F934
+
+#define mmNIC0_QPC0_MMU_BYPASS_TX_WQ_2 0x541F938
+
+#define mmNIC0_QPC0_MMU_BYPASS_TX_WQ_3 0x541F93C
+
+#define mmNIC0_QPC0_RX_WQ_BASE_ADDR_63_32_0 0x541F940
+
+#define mmNIC0_QPC0_RX_WQ_BASE_ADDR_63_32_1 0x541F944
+
+#define mmNIC0_QPC0_RX_WQ_BASE_ADDR_63_32_2 0x541F948
+
+#define mmNIC0_QPC0_RX_WQ_BASE_ADDR_63_32_3 0x541F94C
+
+#define mmNIC0_QPC0_RX_WQ_BASE_ADDR_31_0_0 0x541F950
+
+#define mmNIC0_QPC0_RX_WQ_BASE_ADDR_31_0_1 0x541F954
+
+#define mmNIC0_QPC0_RX_WQ_BASE_ADDR_31_0_2 0x541F958
+
+#define mmNIC0_QPC0_RX_WQ_BASE_ADDR_31_0_3 0x541F95C
+
+#define mmNIC0_QPC0_LOG_MAX_RX_WQ_SIZE_0 0x541F960
+
+#define mmNIC0_QPC0_LOG_MAX_RX_WQ_SIZE_1 0x541F964
+
+#define mmNIC0_QPC0_LOG_MAX_RX_WQ_SIZE_2 0x541F968
+
+#define mmNIC0_QPC0_LOG_MAX_RX_WQ_SIZE_3 0x541F96C
+
+#define mmNIC0_QPC0_MMU_BYPASS_RX_WQ_0 0x541F970
+
+#define mmNIC0_QPC0_MMU_BYPASS_RX_WQ_1 0x541F974
+
+#define mmNIC0_QPC0_MMU_BYPASS_RX_WQ_2 0x541F978
+
+#define mmNIC0_QPC0_MMU_BYPASS_RX_WQ_3 0x541F97C
+
+#define mmNIC0_QPC0_WQE_MEM_WRITE_AXI_PROT 0x541F980
+
+#define mmNIC0_QPC0_WQ_UPPER_THRESHOLD 0x541F984
+
+#define mmNIC0_QPC0_WQ_LOWER_THRESHOLD 0x541F988
+
+#define mmNIC0_QPC0_WQ_BP_2ARC_ADDR 0x541F98C
+
+#define mmNIC0_QPC0_WQ_BP_2QMAN_ADDR 0x541F990
+
+#define mmNIC0_QPC0_WTD_CONFIG 0x541F994
+
+#define mmNIC0_QPC0_REQTX_ERR_FIFO_PUSH_63_32 0x541F998
+
+#define mmNIC0_QPC0_REQTX_ERR_FIFO_PUSH_31_0 0x541F99C
+
+#define mmNIC0_QPC0_REQTX_ERR_QP_STATE_63_32 0x541F9A0
+
+#define mmNIC0_QPC0_REQTX_ERR_QP_STATE_31_0 0x541F9A4
+
+#define mmNIC0_QPC0_EVENT_QUE_CONSUMER_INDEX 0x541F9A8
+
+#define mmNIC0_QPC0_ARM_CQ_NUM 0x541F9AC
+
+#define mmNIC0_QPC0_ARM_CQ_INDEX 0x541F9B0
+
+#define mmNIC0_QPC0_QPC_CLOCK_GATE 0x541F9B4
+
+#define mmNIC0_QPC0_QPC_CLOCK_GATE_DIS 0x541F9B8
+
+#define mmNIC0_QPC0_CONG_QUE_BASE_ADDR_63_32 0x541F9BC
+
+#define mmNIC0_QPC0_CONG_QUE_BASE_ADDR_31_7 0x541F9C0
+
+#define mmNIC0_QPC0_CONG_QUE_LOG_SIZE 0x541F9C4
+
+#define mmNIC0_QPC0_CONG_QUE_WRITE_INDEX 0x541F9C8
+
+#define mmNIC0_QPC0_CONG_QUE_PRODUCER_INDEX 0x541F9CC
+
+#define mmNIC0_QPC0_CONG_QUE_PI_ADDR_63_32 0x541F9D0
+
+#define mmNIC0_QPC0_CONG_QUE_PI_ADDR_31_7 0x541F9D4
+
+#define mmNIC0_QPC0_CONG_QUE_CONSUMER_INDEX_CB 0x541F9D8
+
+#define mmNIC0_QPC0_CONG_QUE_CFG 0x541F9DC
+
+#define mmNIC0_QPC0_CONG_QUE_CONSUMER_INDEX 0x541F9E0
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_0 0x541FA00
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_1 0x541FA04
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_2 0x541FA08
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_3 0x541FA0C
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_4 0x541FA10
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_5 0x541FA14
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_6 0x541FA18
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_7 0x541FA1C
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_8 0x541FA20
+
+#define mmNIC0_QPC0_LINEAR_WQE_STATIC_9 0x541FA24
+
+#define mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_0 0x541FA40
+
+#define mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_1 0x541FA44
+
+#define mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_2 0x541FA48
+
+#define mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_3 0x541FA4C
+
+#define mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_4 0x541FA50
+
+#define mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_5 0x541FA54
+
+#define mmNIC0_QPC0_LINEAR_WQE_QPN 0x541FA58
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_0 0x541FA80
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_1 0x541FA84
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_2 0x541FA88
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_3 0x541FA8C
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_4 0x541FA90
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_5 0x541FA94
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_6 0x541FA98
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_7 0x541FA9C
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_8 0x541FAA0
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_9 0x541FAA4
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_10 0x541FAA8
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_11 0x541FAAC
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_12 0x541FAB0
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_13 0x541FAB4
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_14 0x541FAB8
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_15 0x541FABC
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_16 0x541FAC0
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_17 0x541FAC4
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_0 0x541FAE0
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_1 0x541FAE4
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_2 0x541FAE8
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_3 0x541FAEC
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_4 0x541FAF0
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_5 0x541FAF4
+
+#define mmNIC0_QPC0_MULTI_STRIDE_WQE_QPN 0x541FAF8
+
+#endif /* ASIC_REG_NIC0_QPC0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h
new file mode 100644
index 000000000000..2153319a50a0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIC0_UMR0_0_COMPLETION_QUEUE_CI_1_REGS_H_
+#define ASIC_REG_NIC0_UMR0_0_COMPLETION_QUEUE_CI_1_REGS_H_
+
+/*
+ *****************************************
+ * NIC0_UMR0_0_COMPLETION_QUEUE_CI_1
+ * (Prototype: COMPLETION_QUEUE_CI)
+ *****************************************
+ */
+
+#define mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_NUMBER 0x5400180
+
+#define mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX 0x5400184
+
+#endif /* ASIC_REG_NIC0_UMR0_0_COMPLETION_QUEUE_CI_1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h
new file mode 100644
index 000000000000..de8eac74c2fb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIC0_UMR0_0_UNSECURE_DOORBELL0_REGS_H_
+#define ASIC_REG_NIC0_UMR0_0_UNSECURE_DOORBELL0_REGS_H_
+
+/*
+ *****************************************
+ * NIC0_UMR0_0_UNSECURE_DOORBELL0
+ * (Prototype: NIC_UNSEC_DBELL)
+ *****************************************
+ */
+
+#define mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 0x5400000
+
+#define mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_SECOND32 0x5400004
+
+#define mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_THIRD32 0x5400008
+
+#define mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FOURTH32 0x540000C
+
+#endif /* ASIC_REG_NIC0_UMR0_0_UNSECURE_DOORBELL0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h
new file mode 100644
index 000000000000..44182fc18234
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_AUX_REGS_H_
+#define ASIC_REG_PCIE_AUX_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_AUX
+ * (Prototype: PCIE_AUX)
+ *****************************************
+ */
+
+#define mmPCIE_AUX_APB_TIMEOUT 0x4C07004
+
+#define mmPCIE_AUX_SW_GENERAL_PURPOSE_0 0x4C07008
+
+#define mmPCIE_AUX_SW_GENERAL_PURPOSE_1 0x4C0700C
+
+#define mmPCIE_AUX_SW_GENERAL_PURPOSE_2 0x4C07010
+
+#define mmPCIE_AUX_SW_GENERAL_PURPOSE_3 0x4C07014
+
+#define mmPCIE_AUX_SW_GENERAL_PURPOSE_4 0x4C07018
+
+#define mmPCIE_AUX_SW_GENERAL_PURPOSE_5 0x4C0701C
+
+#define mmPCIE_AUX_SW_GENERAL_PURPOSE_6 0x4C07020
+
+#define mmPCIE_AUX_SW_GENERAL_PURPOSE_7 0x4C07024
+
+#define mmPCIE_AUX_PHY_INIT 0x4C07100
+
+#define mmPCIE_AUX_LTR_MAX_LATENCY 0x4C07138
+
+#define mmPCIE_AUX_BAR0_START_L 0x4C07160
+
+#define mmPCIE_AUX_BAR0_START_H 0x4C07164
+
+#define mmPCIE_AUX_BAR1_START 0x4C07168
+
+#define mmPCIE_AUX_BAR2_START_L 0x4C0716C
+
+#define mmPCIE_AUX_BAR2_START_H 0x4C07170
+
+#define mmPCIE_AUX_BAR3_START 0x4C07174
+
+#define mmPCIE_AUX_BAR4_START_L 0x4C07178
+
+#define mmPCIE_AUX_BAR4_START_H 0x4C0717C
+
+#define mmPCIE_AUX_BAR5_START 0x4C07180
+
+#define mmPCIE_AUX_BAR0_LIMIT_L 0x4C07184
+
+#define mmPCIE_AUX_BAR0_LIMIT_H 0x4C07188
+
+#define mmPCIE_AUX_BAR1_LIMIT 0x4C0718C
+
+#define mmPCIE_AUX_BAR2_LIMIT_L 0x4C07190
+
+#define mmPCIE_AUX_BAR2_LIMIT_H 0x4C07194
+
+#define mmPCIE_AUX_BAR3_LIMIT 0x4C07198
+
+#define mmPCIE_AUX_BAR4_LIMIT_L 0x4C0719C
+
+#define mmPCIE_AUX_BAR4_LIMIT_H 0x4C07200
+
+#define mmPCIE_AUX_BAR5_LIMIT 0x4C07204
+
+#define mmPCIE_AUX_BUS_MASTER_EN 0x4C07208
+
+#define mmPCIE_AUX_MEM_SPACE_EN 0x4C0720C
+
+#define mmPCIE_AUX_MAX_RD_REQ_SIZE 0x4C07210
+
+#define mmPCIE_AUX_MAX_PAYLOAD_SIZE 0x4C07214
+
+#define mmPCIE_AUX_EXT_TAG_EN 0x4C07218
+
+#define mmPCIE_AUX_RCB 0x4C0721C
+
+#define mmPCIE_AUX_PM_NO_SOFT_RST 0x4C07220
+
+#define mmPCIE_AUX_PBUS_NUM 0x4C07224
+
+#define mmPCIE_AUX_PBUS_DEV_NUM 0x4C07228
+
+#define mmPCIE_AUX_NO_SNOOP_EN 0x4C0722C
+
+#define mmPCIE_AUX_RELAX_ORDER_EN 0x4C07230
+
+#define mmPCIE_AUX_HP_SLOT_CTRL_ACCESS 0x4C07234
+
+#define mmPCIE_AUX_DLL_STATE_CHGED_EN 0x4C07238
+
+#define mmPCIE_AUX_CMP_CPLED_INT_EN 0x4C0723C
+
+#define mmPCIE_AUX_HP_INT_EN 0x4C07340
+
+#define mmPCIE_AUX_PRE_DET_CHGEN_EN 0x4C07344
+
+#define mmPCIE_AUX_MRL_SENSOR_CHGED_EN 0x4C07348
+
+#define mmPCIE_AUX_PWR_FAULT_DET_EN 0x4C0734C
+
+#define mmPCIE_AUX_ATTEN_BUTTON_PRESSED_EN 0x4C07350
+
+#define mmPCIE_AUX_PF_FLR_ACTIVE 0x4C07360
+
+#define mmPCIE_AUX_PF_FLR_DONE 0x4C07364
+
+#define mmPCIE_AUX_FLR_INT 0x4C07390
+
+#define mmPCIE_AUX_FLR_CTRL 0x4C07394
+
+#define mmPCIE_AUX_LTR_M_EN 0x4C073B0
+
+#define mmPCIE_AUX_LTSSM_EN 0x4C07428
+
+#define mmPCIE_AUX_SYS_INTR 0x4C07440
+
+#define mmPCIE_AUX_INT_DISABLE 0x4C07444
+
+#define mmPCIE_AUX_SMLH_LINK_UP 0x4C07448
+
+#define mmPCIE_AUX_PM_CURR_STATE 0x4C07450
+
+#define mmPCIE_AUX_RDLH_LINK_UP 0x4C07458
+
+#define mmPCIE_AUX_BRDG_SLV_XFER_PENDING 0x4C0745C
+
+#define mmPCIE_AUX_BRDG_DBI_XFER_PENDING 0x4C07460
+
+#define mmPCIE_AUX_AUTO_SP_DIS 0x4C07478
+
+#define mmPCIE_AUX_DBI 0x4C07490
+
+#define mmPCIE_AUX_DBI_32 0x4C07494
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_0 0x4C074A4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_1 0x4C074A8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_2 0x4C074AC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_3 0x4C074B0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_4 0x4C074B4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_5 0x4C074B8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_6 0x4C074BC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_7 0x4C074C0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_8 0x4C074C4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_9 0x4C074C8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_10 0x4C074CC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_11 0x4C074D0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_12 0x4C074D4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_13 0x4C074D8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_14 0x4C074DC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_15 0x4C074E0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_16 0x4C074E4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_17 0x4C074E8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_18 0x4C074EC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_19 0x4C074F0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_20 0x4C074F4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_21 0x4C074F8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_22 0x4C074FC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_23 0x4C07500
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_24 0x4C07504
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_25 0x4C07508
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_26 0x4C0750C
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_27 0x4C07510
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_28 0x4C07514
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_0 0x4C07640
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_1 0x4C07644
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_2 0x4C07648
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_3 0x4C0764C
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_4 0x4C07650
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_5 0x4C07654
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_6 0x4C07658
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_7 0x4C0765C
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_8 0x4C07660
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_9 0x4C07664
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_10 0x4C07668
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_11 0x4C0766C
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_12 0x4C07670
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_13 0x4C07674
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_14 0x4C07678
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_15 0x4C0767C
+
+#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_0 0x4C07744
+
+#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_1 0x4C07748
+
+#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_2 0x4C0774C
+
+#define mmPCIE_AUX_APP_RAS_DES_TBA_CTRL 0x4C07774
+
+#define mmPCIE_AUX_PM_MASTER_STATE 0x4C07838
+
+#define mmPCIE_AUX_PM_SLAVE_STATE 0x4C0783C
+
+#define mmPCIE_AUX_PM_DSTATE 0x4C07840
+
+#define mmPCIE_AUX_PM_PME_EN 0x4C07844
+
+#define mmPCIE_AUX_PM_LINKST_IN_L0S 0x4C07848
+
+#define mmPCIE_AUX_PM_LINKST_IN_L1 0x4C0784C
+
+#define mmPCIE_AUX_PM_LINKST_IN_L2 0x4C07850
+
+#define mmPCIE_AUX_PM_LINKST_L2_EXIT 0x4C07854
+
+#define mmPCIE_AUX_PM_STATUS 0x4C07858
+
+#define mmPCIE_AUX_APP_READY_ENTER_L23 0x4C0785C
+
+#define mmPCIE_AUX_APP_XFER_PENDING 0x4C07860
+
+#define mmPCIE_AUX_APP_REQ_L1 0x4C07930
+
+#define mmPCIE_AUX_AUX_PM_EN 0x4C07934
+
+#define mmPCIE_AUX_APPS_PM_XMT_PME 0x4C07938
+
+#define mmPCIE_AUX_OUTBAND_PWRUP_CMD 0x4C07940
+
+#define mmPCIE_AUX_PERST 0x4C079B8
+
+#define mmPCIE_AUX_DBI_RO_WR_DISABLE 0x4C079BC
+
+#define mmPCIE_AUX_HOLD_PHY_RST 0x4C079C0
+
+#define mmPCIE_AUX_TLP_INTERNAL_ERR_REP 0x4C079C4
+
+#define mmPCIE_AUX_APP_SRIS_MODE 0x4C079C8
+
+#define mmPCIE_AUX_BUS_MSTR_EN_CLR_INTR 0x4C079CC
+
+#define mmPCIE_AUX_BUS_MSTR_EN_CLR_INTR_MASK 0x4C079D0
+
+#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h
new file mode 100644
index 000000000000..cc5842ec6ceb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_DBI_REGS_H_
+#define ASIC_REG_PCIE_DBI_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_DBI
+ * (Prototype: PCIE_DBI)
+ *****************************************
+ */
+
+#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0x4C02000
+
+#define mmPCIE_DBI_STATUS_COMMAND_REG 0x4C02004
+
+#define mmPCIE_DBI_CLASS_CODE_REVISION_ID 0x4C02008
+
+#define mmPCIE_DBI_BIST_HEADER_TYPE_LATENCY_CACHE_LINE_SIZE_REG 0x4C0200C
+
+#define mmPCIE_DBI_BAR0_REG 0x4C02010
+
+#define mmPCIE_DBI_BAR1_REG 0x4C02014
+
+#define mmPCIE_DBI_BAR2_REG 0x4C02018
+
+#define mmPCIE_DBI_BAR3_REG 0x4C0201C
+
+#define mmPCIE_DBI_BAR4_REG 0x4C02020
+
+#define mmPCIE_DBI_BAR5_REG 0x4C02024
+
+#define mmPCIE_DBI_CARDBUS_CIS_PTR_REG 0x4C02028
+
+#define mmPCIE_DBI_SUBSYSTEM_ID_SUBSYSTEM_VENDOR_ID_REG 0x4C0202C
+
+#define mmPCIE_DBI_EXP_ROM_BASE_ADDR_REG 0x4C02030
+
+#define mmPCIE_DBI_PCI_CAP_PTR_REG 0x4C02034
+
+#define mmPCIE_DBI_MAX_LATENCY_MIN_GRANT_INTERRUPT_PIN_INTERRUPT_LINE_REG \
+0x4C0203C
+
+#define mmPCIE_DBI_CAP_ID_NXT_PTR_REG 0x4C02040
+
+#define mmPCIE_DBI_CON_STATUS_REG 0x4C02044
+
+#define mmPCIE_DBI_PCI_MSI_CAP_ID_NEXT_CTRL_REG 0x4C02050
+
+#define mmPCIE_DBI_MSI_CAP_OFF_04H_REG 0x4C02054
+
+#define mmPCIE_DBI_MSI_CAP_OFF_08H_REG 0x4C02058
+
+#define mmPCIE_DBI_MSI_CAP_OFF_0CH_REG 0x4C0205C
+
+#define mmPCIE_DBI_MSI_CAP_OFF_10H_REG 0x4C02060
+
+#define mmPCIE_DBI_MSI_CAP_OFF_14H_REG 0x4C02064
+
+#define mmPCIE_DBI_PCIE_CAP_ID_PCIE_NEXT_CAP_PTR_PCIE_CAP_REG 0x4C02070
+
+#define mmPCIE_DBI_DEVICE_CAPABILITIES_REG 0x4C02074
+
+#define mmPCIE_DBI_DEVICE_CONTROL_DEVICE_STATUS 0x4C02078
+
+#define mmPCIE_DBI_LINK_CAPABILITIES_REG 0x4C0207C
+
+#define mmPCIE_DBI_LINK_CONTROL_LINK_STATUS_REG 0x4C02080
+
+#define mmPCIE_DBI_DEVICE_CAPABILITIES2_REG 0x4C02094
+
+#define mmPCIE_DBI_DEVICE_CONTROL2_DEVICE_STATUS2_REG 0x4C02098
+
+#define mmPCIE_DBI_LINK_CAPABILITIES2_REG 0x4C0209C
+
+#define mmPCIE_DBI_LINK_CONTROL2_LINK_STATUS2_REG 0x4C020A0
+
+#define mmPCIE_DBI_PCI_MSIX_CAP_ID_NEXT_CTRL_REG 0x4C020B0
+
+#define mmPCIE_DBI_MSIX_TABLE_OFFSET_REG 0x4C020B4
+
+#define mmPCIE_DBI_MSIX_PBA_OFFSET_REG 0x4C020B8
+
+#define mmPCIE_DBI_AER_EXT_CAP_HDR_OFF 0x4C02100
+
+#define mmPCIE_DBI_UNCORR_ERR_STATUS_OFF 0x4C02104
+
+#define mmPCIE_DBI_UNCORR_ERR_MASK_OFF 0x4C02108
+
+#define mmPCIE_DBI_UNCORR_ERR_SEV_OFF 0x4C0210C
+
+#define mmPCIE_DBI_CORR_ERR_STATUS_OFF 0x4C02110
+
+#define mmPCIE_DBI_CORR_ERR_MASK_OFF 0x4C02114
+
+#define mmPCIE_DBI_ADV_ERR_CAP_CTRL_OFF 0x4C02118
+
+#define mmPCIE_DBI_HDR_LOG_0_OFF 0x4C0211C
+
+#define mmPCIE_DBI_HDR_LOG_1_OFF 0x4C02120
+
+#define mmPCIE_DBI_HDR_LOG_2_OFF 0x4C02124
+
+#define mmPCIE_DBI_HDR_LOG_3_OFF 0x4C02128
+
+#define mmPCIE_DBI_TLP_PREFIX_LOG_1_OFF 0x4C02138
+
+#define mmPCIE_DBI_TLP_PREFIX_LOG_2_OFF 0x4C0213C
+
+#define mmPCIE_DBI_TLP_PREFIX_LOG_3_OFF 0x4C02140
+
+#define mmPCIE_DBI_TLP_PREFIX_LOG_4_OFF 0x4C02144
+
+#define mmPCIE_DBI_SPCIE_CAP_HEADER_REG 0x4C02148
+
+#define mmPCIE_DBI_LINK_CONTROL3_REG 0x4C0214C
+
+#define mmPCIE_DBI_LANE_ERR_STATUS_REG 0x4C02150
+
+#define mmPCIE_DBI_SPCIE_CAP_OFF_0CH_REG 0x4C02154
+
+#define mmPCIE_DBI_SPCIE_CAP_OFF_10H_REG 0x4C02158
+
+#define mmPCIE_DBI_SPCIE_CAP_OFF_14H_REG 0x4C0215C
+
+#define mmPCIE_DBI_SPCIE_CAP_OFF_18H_REG 0x4C02160
+
+#define mmPCIE_DBI_SPCIE_CAP_OFF_1CH_REG 0x4C02164
+
+#define mmPCIE_DBI_SPCIE_CAP_OFF_20H_REG 0x4C02168
+
+#define mmPCIE_DBI_SPCIE_CAP_OFF_24H_REG 0x4C0216C
+
+#define mmPCIE_DBI_SPCIE_CAP_OFF_28H_REG 0x4C02170
+
+#define mmPCIE_DBI_PL16G_EXT_CAP_HDR_REG 0x4C02178
+
+#define mmPCIE_DBI_PL16G_CAPABILITY_REG 0x4C0217C
+
+#define mmPCIE_DBI_PL16G_CONTROL_REG 0x4C02180
+
+#define mmPCIE_DBI_PL16G_STATUS_REG 0x4C02184
+
+#define mmPCIE_DBI_PL16G_LC_DPAR_STATUS_REG 0x4C02188
+
+#define mmPCIE_DBI_PL16G_FIRST_RETIMER_DPAR_STATUS_REG 0x4C0218C
+
+#define mmPCIE_DBI_PL16G_SECOND_RETIMER_DPAR_STATUS_REG 0x4C02190
+
+#define mmPCIE_DBI_PL16G_CAP_OFF_20H_REG 0x4C02198
+
+#define mmPCIE_DBI_PL16G_CAP_OFF_24H_REG 0x4C0219C
+
+#define mmPCIE_DBI_PL16G_CAP_OFF_28H_REG 0x4C021A0
+
+#define mmPCIE_DBI_PL16G_CAP_OFF_2CH_REG 0x4C021A4
+
+#define mmPCIE_DBI_MARGIN_EXT_CAP_HDR_REG 0x4C021A8
+
+#define mmPCIE_DBI_MARGIN_PORT_CAPABILITIES_STATUS_REG 0x4C021AC
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS0_REG 0x4C021B0
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS1_REG 0x4C021B4
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS2_REG 0x4C021B8
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS3_REG 0x4C021BC
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS4_REG 0x4C021C0
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS5_REG 0x4C021C4
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS6_REG 0x4C021C8
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS7_REG 0x4C021CC
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS8_REG 0x4C021D0
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS9_REG 0x4C021D4
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS10_REG 0x4C021D8
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS11_REG 0x4C021DC
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS12_REG 0x4C021E0
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS13_REG 0x4C021E4
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS14_REG 0x4C021E8
+
+#define mmPCIE_DBI_MARGIN_LANE_CNTRL_STATUS15_REG 0x4C021EC
+
+#define mmPCIE_DBI_LTR_CAP_HDR_REG 0x4C021F0
+
+#define mmPCIE_DBI_LTR_LATENCY_REG 0x4C021F4
+
+#define mmPCIE_DBI_RAS_DES_CAP_HEADER_REG 0x4C021F8
+
+#define mmPCIE_DBI_VENDOR_SPECIFIC_HEADER_REG 0x4C021FC
+
+#define mmPCIE_DBI_EVENT_COUNTER_CONTROL_REG 0x4C02200
+
+#define mmPCIE_DBI_EVENT_COUNTER_DATA_REG 0x4C02204
+
+#define mmPCIE_DBI_TIME_BASED_ANALYSIS_CONTROL_REG 0x4C02208
+
+#define mmPCIE_DBI_TIME_BASED_ANALYSIS_DATA_REG 0x4C0220C
+
+#define mmPCIE_DBI_TIME_BASED_ANALYSIS_DATA_63_32_REG 0x4C02210
+
+#define mmPCIE_DBI_EINJ_ENABLE_REG 0x4C02228
+
+#define mmPCIE_DBI_EINJ0_CRC_REG 0x4C0222C
+
+#define mmPCIE_DBI_EINJ1_SEQNUM_REG 0x4C02230
+
+#define mmPCIE_DBI_EINJ2_DLLP_REG 0x4C02234
+
+#define mmPCIE_DBI_EINJ3_SYMBOL_REG 0x4C02238
+
+#define mmPCIE_DBI_EINJ4_FC_REG 0x4C0223C
+
+#define mmPCIE_DBI_EINJ5_SP_TLP_REG 0x4C02240
+
+#define mmPCIE_DBI_EINJ6_COMPARE_POINT_H0_REG 0x4C02244
+
+#define mmPCIE_DBI_EINJ6_COMPARE_POINT_H1_REG 0x4C02248
+
+#define mmPCIE_DBI_EINJ6_COMPARE_POINT_H2_REG 0x4C0224C
+
+#define mmPCIE_DBI_EINJ6_COMPARE_POINT_H3_REG 0x4C02250
+
+#define mmPCIE_DBI_EINJ6_COMPARE_VALUE_H0_REG 0x4C02254
+
+#define mmPCIE_DBI_EINJ6_COMPARE_VALUE_H1_REG 0x4C02258
+
+#define mmPCIE_DBI_EINJ6_COMPARE_VALUE_H2_REG 0x4C0225C
+
+#define mmPCIE_DBI_EINJ6_COMPARE_VALUE_H3_REG 0x4C02260
+
+#define mmPCIE_DBI_EINJ6_CHANGE_POINT_H0_REG 0x4C02264
+
+#define mmPCIE_DBI_EINJ6_CHANGE_POINT_H1_REG 0x4C02268
+
+#define mmPCIE_DBI_EINJ6_CHANGE_POINT_H2_REG 0x4C0226C
+
+#define mmPCIE_DBI_EINJ6_CHANGE_POINT_H3_REG 0x4C02270
+
+#define mmPCIE_DBI_EINJ6_CHANGE_VALUE_H0_REG 0x4C02274
+
+#define mmPCIE_DBI_EINJ6_CHANGE_VALUE_H1_REG 0x4C02278
+
+#define mmPCIE_DBI_EINJ6_CHANGE_VALUE_H2_REG 0x4C0227C
+
+#define mmPCIE_DBI_EINJ6_CHANGE_VALUE_H3_REG 0x4C02280
+
+#define mmPCIE_DBI_EINJ6_TLP_REG 0x4C02284
+
+#define mmPCIE_DBI_SD_CONTROL1_REG 0x4C02298
+
+#define mmPCIE_DBI_SD_CONTROL2_REG 0x4C0229C
+
+#define mmPCIE_DBI_SD_STATUS_L1LANE_REG 0x4C022A8
+
+#define mmPCIE_DBI_SD_STATUS_L1LTSSM_REG 0x4C022AC
+
+#define mmPCIE_DBI_SD_STATUS_PM_REG 0x4C022B0
+
+#define mmPCIE_DBI_SD_STATUS_L2_REG 0x4C022B4
+
+#define mmPCIE_DBI_SD_STATUS_L3FC_REG 0x4C022B8
+
+#define mmPCIE_DBI_SD_STATUS_L3_REG 0x4C022BC
+
+#define mmPCIE_DBI_SD_EQ_CONTROL1_REG 0x4C022C8
+
+#define mmPCIE_DBI_SD_EQ_CONTROL2_REG 0x4C022CC
+
+#define mmPCIE_DBI_SD_EQ_CONTROL3_REG 0x4C022D0
+
+#define mmPCIE_DBI_SD_EQ_STATUS1_REG 0x4C022D8
+
+#define mmPCIE_DBI_SD_EQ_STATUS2_REG 0x4C022DC
+
+#define mmPCIE_DBI_SD_EQ_STATUS3_REG 0x4C022E0
+
+#define mmPCIE_DBI_DATA_LINK_FEATURE_EXT_HDR_OFF 0x4C022F8
+
+#define mmPCIE_DBI_DATA_LINK_FEATURE_CAP_OFF 0x4C022FC
+
+#define mmPCIE_DBI_DATA_LINK_FEATURE_STATUS_OFF 0x4C02300
+
+#define mmPCIE_DBI_ACK_LATENCY_TIMER_OFF 0x4C02700
+
+#define mmPCIE_DBI_VENDOR_SPEC_DLLP_OFF 0x4C02704
+
+#define mmPCIE_DBI_PORT_FORCE_OFF 0x4C02708
+
+#define mmPCIE_DBI_ACK_F_ASPM_CTRL_OFF 0x4C0270C
+
+#define mmPCIE_DBI_PORT_LINK_CTRL_OFF 0x4C02710
+
+#define mmPCIE_DBI_LANE_SKEW_OFF 0x4C02714
+
+#define mmPCIE_DBI_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x4C02718
+
+#define mmPCIE_DBI_SYMBOL_TIMER_FILTER_1_OFF 0x4C0271C
+
+#define mmPCIE_DBI_FILTER_MASK_2_OFF 0x4C02720
+
+#define mmPCIE_DBI_AMBA_MUL_OB_DECOMP_NP_SUB_REQ_CTRL_OFF 0x4C02724
+
+#define mmPCIE_DBI_PL_DEBUG0_OFF 0x4C02728
+
+#define mmPCIE_DBI_PL_DEBUG1_OFF 0x4C0272C
+
+#define mmPCIE_DBI_TX_P_FC_CREDIT_STATUS_OFF 0x4C02730
+
+#define mmPCIE_DBI_TX_NP_FC_CREDIT_STATUS_OFF 0x4C02734
+
+#define mmPCIE_DBI_TX_CPL_FC_CREDIT_STATUS_OFF 0x4C02738
+
+#define mmPCIE_DBI_QUEUE_STATUS_OFF 0x4C0273C
+
+#define mmPCIE_DBI_VC_TX_ARBI_1_OFF 0x4C02740
+
+#define mmPCIE_DBI_VC_TX_ARBI_2_OFF 0x4C02744
+
+#define mmPCIE_DBI_VC0_P_RX_Q_CTRL_OFF 0x4C02748
+
+#define mmPCIE_DBI_VC0_NP_RX_Q_CTRL_OFF 0x4C0274C
+
+#define mmPCIE_DBI_VC0_CPL_RX_Q_CTRL_OFF 0x4C02750
+
+#define mmPCIE_DBI_GEN2_CTRL_OFF 0x4C0280C
+
+#define mmPCIE_DBI_PHY_STATUS_OFF 0x4C02810
+
+#define mmPCIE_DBI_PHY_CONTROL_OFF 0x4C02814
+
+#define mmPCIE_DBI_TRGT_MAP_CTRL_OFF 0x4C0281C
+
+#define mmPCIE_DBI_CLOCK_GATING_CTRL_OFF 0x4C0288C
+
+#define mmPCIE_DBI_GEN3_RELATED_OFF 0x4C02890
+
+#define mmPCIE_DBI_GEN3_EQ_CONTROL_OFF 0x4C028A8
+
+#define mmPCIE_DBI_GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x4C028AC
+
+#define mmPCIE_DBI_ORDER_RULE_CTRL_OFF 0x4C028B4
+
+#define mmPCIE_DBI_PIPE_LOOPBACK_CONTROL_OFF 0x4C028B8
+
+#define mmPCIE_DBI_MISC_CONTROL_1_OFF 0x4C028BC
+
+#define mmPCIE_DBI_MULTI_LANE_CONTROL_OFF 0x4C028C0
+
+#define mmPCIE_DBI_PHY_INTEROP_CTRL_OFF 0x4C028C4
+
+#define mmPCIE_DBI_TRGT_CPL_LUT_DELETE_ENTRY_OFF 0x4C028C8
+
+#define mmPCIE_DBI_LINK_FLUSH_CONTROL_OFF 0x4C028CC
+
+#define mmPCIE_DBI_AMBA_ERROR_RESPONSE_DEFAULT_OFF 0x4C028D0
+
+#define mmPCIE_DBI_AMBA_LINK_TIMEOUT_OFF 0x4C028D4
+
+#define mmPCIE_DBI_AMBA_ORDERING_CTRL_OFF 0x4C028D8
+
+#define mmPCIE_DBI_COHERENCY_CONTROL_1_OFF 0x4C028E0
+
+#define mmPCIE_DBI_COHERENCY_CONTROL_2_OFF 0x4C028E4
+
+#define mmPCIE_DBI_COHERENCY_CONTROL_3_OFF 0x4C028E8
+
+#define mmPCIE_DBI_AXI_MSTR_MSG_ADDR_LOW_OFF 0x4C028F0
+
+#define mmPCIE_DBI_AXI_MSTR_MSG_ADDR_HIGH_OFF 0x4C028F4
+
+#define mmPCIE_DBI_PCIE_VERSION_NUMBER_OFF 0x4C028F8
+
+#define mmPCIE_DBI_PCIE_VERSION_TYPE_OFF 0x4C028FC
+
+#define mmPCIE_DBI_MSIX_ADDRESS_MATCH_LOW_OFF 0x4C02940
+
+#define mmPCIE_DBI_MSIX_ADDRESS_MATCH_HIGH_OFF 0x4C02944
+
+#define mmPCIE_DBI_MSIX_DOORBELL_OFF 0x4C02948
+
+#define mmPCIE_DBI_MSIX_RAM_CTRL_OFF 0x4C0294C
+
+#define mmPCIE_DBI_PL_LTR_LATENCY_OFF 0x4C02B30
+
+#define mmPCIE_DBI_AUX_CLK_FREQ_OFF 0x4C02B40
+
+#define mmPCIE_DBI_POWERDOWN_CTRL_STATUS_OFF 0x4C02B48
+
+#define mmPCIE_DBI_PHY_VIEWPORT_CTLSTS_OFF 0x4C02B70
+
+#define mmPCIE_DBI_PHY_VIEWPORT_DATA_OFF 0x4C02B74
+
+#define mmPCIE_DBI_GEN4_LANE_MARGINING_1_OFF 0x4C02B80
+
+#define mmPCIE_DBI_GEN4_LANE_MARGINING_2_OFF 0x4C02B84
+
+#define mmPCIE_DBI_PIPE_RELATED_OFF 0x4C02B90
+
+#define mmPCIE_DBI_RX_SERIALIZATION_Q_CTRL_OFF 0x4C02C00
+
+#endif /* ASIC_REG_PCIE_DBI_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h
new file mode 100644
index 000000000000..2b5af010c7a5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_DEC0_CMD_MASKS_H_
+#define ASIC_REG_PCIE_DEC0_CMD_MASKS_H_
+
+/*
+ *****************************************
+ * PCIE_DEC0_CMD
+ * (Prototype: VSI_CMD)
+ *****************************************
+ */
+
+/* PCIE_DEC0_CMD_SWREG0 */
+#define PCIE_DEC0_CMD_SWREG0_SW_HW_VERSION_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG0_SW_HW_VERSION_MASK 0xFFFF
+#define PCIE_DEC0_CMD_SWREG0_SW_HW_ID_SHIFT 16
+#define PCIE_DEC0_CMD_SWREG0_SW_HW_ID_MASK 0xFFFF0000
+
+/* PCIE_DEC0_CMD_SWREG1 */
+#define PCIE_DEC0_CMD_SWREG1_SW_HW_BUILDDATE_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG1_SW_HW_BUILDDATE_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG2 */
+#define PCIE_DEC0_CMD_SWREG2_SW_EXT_NORM_INTR_SRC_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG2_SW_EXT_NORM_INTR_SRC_MASK 0xFFFF
+#define PCIE_DEC0_CMD_SWREG2_SW_EXT_ABN_INTR_SRC_SHIFT 16
+#define PCIE_DEC0_CMD_SWREG2_SW_EXT_ABN_INTR_SRC_MASK 0xFFFF0000
+
+/* PCIE_DEC0_CMD_SWREG3 */
+#define PCIE_DEC0_CMD_SWREG3_SW_EXE_CMDBUF_COUNT_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG3_SW_EXE_CMDBUF_COUNT_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG4 */
+#define PCIE_DEC0_CMD_SWREG4_SW_CMD_EXE_LSB_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG4_SW_CMD_EXE_LSB_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG5 */
+#define PCIE_DEC0_CMD_SWREG5_SW_CMD_EXE_MSB_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG5_SW_CMD_EXE_MSB_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG6 */
+#define PCIE_DEC0_CMD_SWREG6_SW_AXI_TOTALARLEN_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG6_SW_AXI_TOTALARLEN_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG7 */
+#define PCIE_DEC0_CMD_SWREG7_SW_AXI_TOTALR_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG7_SW_AXI_TOTALR_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG8 */
+#define PCIE_DEC0_CMD_SWREG8_SW_AXI_TOTALAR_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG8_SW_AXI_TOTALAR_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG9 */
+#define PCIE_DEC0_CMD_SWREG9_SW_AXI_TOTALRLAST_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG9_SW_AXI_TOTALRLAST_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG10 */
+#define PCIE_DEC0_CMD_SWREG10_SW_AXI_TOTALAWLEN_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG10_SW_AXI_TOTALAWLEN_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG11 */
+#define PCIE_DEC0_CMD_SWREG11_SW_AXI_TOTALW_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG11_SW_AXI_TOTALW_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG12 */
+#define PCIE_DEC0_CMD_SWREG12_SW_AXI_TOTALAW_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG12_SW_AXI_TOTALAW_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG13 */
+#define PCIE_DEC0_CMD_SWREG13_SW_AXI_TOTALWLAST_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG13_SW_AXI_TOTALWLAST_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG14 */
+#define PCIE_DEC0_CMD_SWREG14_SW_AXI_TOTALB_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG14_SW_AXI_TOTALB_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG15 */
+#define PCIE_DEC0_CMD_SWREG15_SW_WORK_STATE_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG15_SW_WORK_STATE_MASK 0x7
+#define PCIE_DEC0_CMD_SWREG15_RSV_SHIFT 3
+#define PCIE_DEC0_CMD_SWREG15_RSV_MASK 0x3FFFF8
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_BREADY_SHIFT 22
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_BREADY_MASK 0x400000
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_BVALID_SHIFT 23
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_BVALID_MASK 0x800000
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_WREADY_SHIFT 24
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_WREADY_MASK 0x1000000
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_WVALID_SHIFT 25
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_WVALID_MASK 0x2000000
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_AWREADY_SHIFT 26
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_AWREADY_MASK 0x4000000
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_AWVALID_SHIFT 27
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_AWVALID_MASK 0x8000000
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_RREADY_SHIFT 28
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_RREADY_MASK 0x10000000
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_RVALID_SHIFT 29
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_RVALID_MASK 0x20000000
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_ARREADY_SHIFT 30
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_ARREADY_MASK 0x40000000
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_ARVALID_SHIFT 31
+#define PCIE_DEC0_CMD_SWREG15_SW_AXI_ARVALID_MASK 0x80000000
+
+/* PCIE_DEC0_CMD_SWREG16 */
+#define PCIE_DEC0_CMD_SWREG16_SW_START_TRIGGER_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG16_SW_START_TRIGGER_MASK 0x1
+#define PCIE_DEC0_CMD_SWREG16_SW_RESET_ALL_SHIFT 1
+#define PCIE_DEC0_CMD_SWREG16_SW_RESET_ALL_MASK 0x2
+#define PCIE_DEC0_CMD_SWREG16_SW_RESET_CORE_SHIFT 2
+#define PCIE_DEC0_CMD_SWREG16_SW_RESET_CORE_MASK 0x4
+#define PCIE_DEC0_CMD_SWREG16_SW_ABORT_MODE_SHIFT 3
+#define PCIE_DEC0_CMD_SWREG16_SW_ABORT_MODE_MASK 0x8
+#define PCIE_DEC0_CMD_SWREG16_SW_CORE_CLK_GATE_DISABLE_SHIFT 4
+#define PCIE_DEC0_CMD_SWREG16_SW_CORE_CLK_GATE_DISABLE_MASK 0x10
+#define PCIE_DEC0_CMD_SWREG16_SW_MASTER_OUT_CLK_GATE_DISABLE_SHIFT 5
+#define PCIE_DEC0_CMD_SWREG16_SW_MASTER_OUT_CLK_GATE_DISABLE_MASK 0x20
+#define PCIE_DEC0_CMD_SWREG16_SW_AXI_CLK_GATE_DISABLE_SHIFT 6
+#define PCIE_DEC0_CMD_SWREG16_SW_AXI_CLK_GATE_DISABLE_MASK 0x40
+#define PCIE_DEC0_CMD_SWREG16_RSV_SHIFT 7
+#define PCIE_DEC0_CMD_SWREG16_RSV_MASK 0xFFFFFF80
+
+/* PCIE_DEC0_CMD_SWREG17 */
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_ENDCMD_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_ENDCMD_MASK 0x1
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_BUSERR_SHIFT 1
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_BUSERR_MASK 0x2
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_TIMEOUT_SHIFT 2
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_TIMEOUT_MASK 0x4
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_CMDERR_SHIFT 3
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_CMDERR_MASK 0x8
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_ABORT_SHIFT 4
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_ABORT_MASK 0x10
+#define PCIE_DEC0_CMD_SWREG17_RSV_1_SHIFT 5
+#define PCIE_DEC0_CMD_SWREG17_RSV_1_MASK 0x20
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_JMP_SHIFT 6
+#define PCIE_DEC0_CMD_SWREG17_SW_IRQ_JMP_MASK 0x40
+#define PCIE_DEC0_CMD_SWREG17_RSV_SHIFT 7
+#define PCIE_DEC0_CMD_SWREG17_RSV_MASK 0xFFFFFF80
+
+/* PCIE_DEC0_CMD_SWREG18 */
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_ENDCMD_EN_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_ENDCMD_EN_MASK 0x1
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_BUSERR_EN_SHIFT 1
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_BUSERR_EN_MASK 0x2
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_TIMEOUT_EN_SHIFT 2
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_TIMEOUT_EN_MASK 0x4
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_CMDERR_EN_SHIFT 3
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_CMDERR_EN_MASK 0x8
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_ABORT_EN_SHIFT 4
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_ABORT_EN_MASK 0x10
+#define PCIE_DEC0_CMD_SWREG18_RSV_1_SHIFT 5
+#define PCIE_DEC0_CMD_SWREG18_RSV_1_MASK 0x20
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_JMP_EN_SHIFT 6
+#define PCIE_DEC0_CMD_SWREG18_SW_IRQ_JMP_EN_MASK 0x40
+#define PCIE_DEC0_CMD_SWREG18_RSV_SHIFT 7
+#define PCIE_DEC0_CMD_SWREG18_RSV_MASK 0xFFFFFF80
+
+/* PCIE_DEC0_CMD_SWREG19 */
+#define PCIE_DEC0_CMD_SWREG19_SW_TIMEOUT_CYCLES_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG19_SW_TIMEOUT_CYCLES_MASK 0x7FFFFFFF
+#define PCIE_DEC0_CMD_SWREG19_SW_TIMEOUT_ENABLE_SHIFT 31
+#define PCIE_DEC0_CMD_SWREG19_SW_TIMEOUT_ENABLE_MASK 0x80000000
+
+/* PCIE_DEC0_CMD_SWREG20 */
+#define PCIE_DEC0_CMD_SWREG20_SW_CMDBUF_EXE_ADDR_LSB_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG20_SW_CMDBUF_EXE_ADDR_LSB_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG21 */
+#define PCIE_DEC0_CMD_SWREG21_SW_CMDBUF_EXE_ADDR_MSB_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG21_SW_CMDBUF_EXE_ADDR_MSB_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG22 */
+#define PCIE_DEC0_CMD_SWREG22_SW_CMDBUF_EXE_LENGTH_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG22_SW_CMDBUF_EXE_LENGTH_MASK 0xFFFF
+#define PCIE_DEC0_CMD_SWREG22_RSV_SHIFT 16
+#define PCIE_DEC0_CMD_SWREG22_RSV_MASK 0xFFFF0000
+
+/* PCIE_DEC0_CMD_SWREG23 */
+#define PCIE_DEC0_CMD_SWREG23_SW_AXI_ID_WR_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG23_SW_AXI_ID_WR_MASK 0xFF
+#define PCIE_DEC0_CMD_SWREG23_SW_AXI_ID_RD_SHIFT 8
+#define PCIE_DEC0_CMD_SWREG23_SW_AXI_ID_RD_MASK 0xFF00
+#define PCIE_DEC0_CMD_SWREG23_SW_MAX_BURST_LEN_SHIFT 16
+#define PCIE_DEC0_CMD_SWREG23_SW_MAX_BURST_LEN_MASK 0xFF0000
+#define PCIE_DEC0_CMD_SWREG23_RSV_SHIFT 24
+#define PCIE_DEC0_CMD_SWREG23_RSV_MASK 0xF000000
+#define PCIE_DEC0_CMD_SWREG23_SW_CMD_SWAP_SHIFT 28
+#define PCIE_DEC0_CMD_SWREG23_SW_CMD_SWAP_MASK 0xF0000000
+
+/* PCIE_DEC0_CMD_SWREG24 */
+#define PCIE_DEC0_CMD_SWREG24_SW_RDY_CMDBUF_COUNT_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG24_SW_RDY_CMDBUF_COUNT_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG25 */
+#define PCIE_DEC0_CMD_SWREG25_SW_EXT_NORM_INTR_GATE_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG25_SW_EXT_NORM_INTR_GATE_MASK 0xFFFF
+#define PCIE_DEC0_CMD_SWREG25_SW_EXT_ABN_INTR_GATE_SHIFT 16
+#define PCIE_DEC0_CMD_SWREG25_SW_EXT_ABN_INTR_GATE_MASK 0xFFFF0000
+
+/* PCIE_DEC0_CMD_SWREG26 */
+#define PCIE_DEC0_CMD_SWREG26_SW_CMDBUF_EXE_ID_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG26_SW_CMDBUF_EXE_ID_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG64 */
+#define PCIE_DEC0_CMD_SWREG64_SW_DUMMY0_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG64_SW_DUMMY0_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG65 */
+#define PCIE_DEC0_CMD_SWREG65_SW_DUMMY1_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG65_SW_DUMMY1_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG66 */
+#define PCIE_DEC0_CMD_SWREG66_SW_DUMMY2_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG66_SW_DUMMY2_MASK 0xFFFFFFFF
+
+/* PCIE_DEC0_CMD_SWREG67 */
+#define PCIE_DEC0_CMD_SWREG67_SW_DUMMY3_SHIFT 0
+#define PCIE_DEC0_CMD_SWREG67_SW_DUMMY3_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_PCIE_DEC0_CMD_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h
new file mode 100644
index 000000000000..dc7d3f6a4b50
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_DEC0_CMD_REGS_H_
+#define ASIC_REG_PCIE_DEC0_CMD_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_DEC0_CMD
+ * (Prototype: VSI_CMD)
+ *****************************************
+ */
+
+#define mmPCIE_DEC0_CMD_SWREG0 0x4F00000
+
+#define mmPCIE_DEC0_CMD_SWREG1 0x4F00004
+
+#define mmPCIE_DEC0_CMD_SWREG2 0x4F00008
+
+#define mmPCIE_DEC0_CMD_SWREG3 0x4F0000C
+
+#define mmPCIE_DEC0_CMD_SWREG4 0x4F00010
+
+#define mmPCIE_DEC0_CMD_SWREG5 0x4F00014
+
+#define mmPCIE_DEC0_CMD_SWREG6 0x4F00018
+
+#define mmPCIE_DEC0_CMD_SWREG7 0x4F0001C
+
+#define mmPCIE_DEC0_CMD_SWREG8 0x4F00020
+
+#define mmPCIE_DEC0_CMD_SWREG9 0x4F00024
+
+#define mmPCIE_DEC0_CMD_SWREG10 0x4F00028
+
+#define mmPCIE_DEC0_CMD_SWREG11 0x4F0002C
+
+#define mmPCIE_DEC0_CMD_SWREG12 0x4F00030
+
+#define mmPCIE_DEC0_CMD_SWREG13 0x4F00034
+
+#define mmPCIE_DEC0_CMD_SWREG14 0x4F00038
+
+#define mmPCIE_DEC0_CMD_SWREG15 0x4F0003C
+
+#define mmPCIE_DEC0_CMD_SWREG16 0x4F00040
+
+#define mmPCIE_DEC0_CMD_SWREG17 0x4F00044
+
+#define mmPCIE_DEC0_CMD_SWREG18 0x4F00048
+
+#define mmPCIE_DEC0_CMD_SWREG19 0x4F0004C
+
+#define mmPCIE_DEC0_CMD_SWREG20 0x4F00050
+
+#define mmPCIE_DEC0_CMD_SWREG21 0x4F00054
+
+#define mmPCIE_DEC0_CMD_SWREG22 0x4F00058
+
+#define mmPCIE_DEC0_CMD_SWREG23 0x4F0005C
+
+#define mmPCIE_DEC0_CMD_SWREG24 0x4F00060
+
+#define mmPCIE_DEC0_CMD_SWREG25 0x4F00064
+
+#define mmPCIE_DEC0_CMD_SWREG26 0x4F00068
+
+#define mmPCIE_DEC0_CMD_SWREG64 0x4F00100
+
+#define mmPCIE_DEC0_CMD_SWREG65 0x4F00104
+
+#define mmPCIE_DEC0_CMD_SWREG66 0x4F00108
+
+#define mmPCIE_DEC0_CMD_SWREG67 0x4F0010C
+
+#endif /* ASIC_REG_PCIE_DEC0_CMD_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h
new file mode 100644
index 000000000000..242c6525bd71
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_REGS_H_
+#define ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_VDEC0_BRDG_CTRL_AXUSER_DEC
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_ASID 0x4F03C00
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_MMU_BP 0x4F03C04
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_STRONG_ORDER 0x4F03C08
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_NO_SNOOP 0x4F03C0C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_WR_REDUCTION 0x4F03C10
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_RD_ATOMIC 0x4F03C14
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_QOS 0x4F03C18
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_RSVD 0x4F03C1C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_EMEM_CPAGE 0x4F03C20
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_CORE 0x4F03C24
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_E2E_COORD 0x4F03C28
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_WR_OVRD_LO 0x4F03C30
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_WR_OVRD_HI 0x4F03C34
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_RD_OVRD_LO 0x4F03C38
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_RD_OVRD_HI 0x4F03C3C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_LB_COORD 0x4F03C40
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_LB_LOCK 0x4F03C44
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_LB_RSVD 0x4F03C48
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_LB_OVRD 0x4F03C4C
+
+#endif /* ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
new file mode 100644
index 000000000000..98d035463561
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_REGS_H_
+#define ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_ASID 0x4F03B00
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_MMU_BP 0x4F03B04
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_STRONG_ORDER 0x4F03B08
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_NO_SNOOP 0x4F03B0C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_WR_REDUCTION 0x4F03B10
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_RD_ATOMIC 0x4F03B14
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_QOS 0x4F03B18
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_RSVD 0x4F03B1C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_EMEM_CPAGE 0x4F03B20
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_CORE 0x4F03B24
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_E2E_COORD 0x4F03B28
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_WR_OVRD_LO 0x4F03B30
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_WR_OVRD_HI 0x4F03B34
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_RD_OVRD_LO 0x4F03B38
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_RD_OVRD_HI 0x4F03B3C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_LB_COORD 0x4F03B40
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_LB_LOCK 0x4F03B44
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_LB_RSVD 0x4F03B48
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_LB_OVRD 0x4F03B4C
+
+#endif /* ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
new file mode 100644
index 000000000000..33ef37619417
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_REGS_H_
+#define ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_ASID 0x4F03900
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_MMU_BP 0x4F03904
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_STRONG_ORDER 0x4F03908
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_NO_SNOOP 0x4F0390C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_WR_REDUCTION 0x4F03910
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_RD_ATOMIC 0x4F03914
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_QOS 0x4F03918
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_RSVD 0x4F0391C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_EMEM_CPAGE 0x4F03920
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_CORE 0x4F03924
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_E2E_COORD 0x4F03928
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_WR_OVRD_LO 0x4F03930
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_WR_OVRD_HI 0x4F03934
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_RD_OVRD_LO 0x4F03938
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_RD_OVRD_HI 0x4F0393C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_LB_COORD 0x4F03940
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_LB_LOCK 0x4F03944
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_LB_RSVD 0x4F03948
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_LB_OVRD 0x4F0394C
+
+#endif /* ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
new file mode 100644
index 000000000000..c4587d5d6406
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_REGS_H_
+#define ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_ASID 0x4F03A00
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_MMU_BP 0x4F03A04
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_STRONG_ORDER 0x4F03A08
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_NO_SNOOP 0x4F03A0C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_WR_REDUCTION 0x4F03A10
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_RD_ATOMIC 0x4F03A14
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_QOS 0x4F03A18
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_RSVD 0x4F03A1C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_EMEM_CPAGE 0x4F03A20
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_CORE 0x4F03A24
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_E2E_COORD 0x4F03A28
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_WR_OVRD_LO 0x4F03A30
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_WR_OVRD_HI 0x4F03A34
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_RD_OVRD_LO 0x4F03A38
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_RD_OVRD_HI 0x4F03A3C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_LB_COORD 0x4F03A40
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_LB_LOCK 0x4F03A44
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_LB_RSVD 0x4F03A48
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_LB_OVRD 0x4F03A4C
+
+#endif /* ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
new file mode 100644
index 000000000000..35349ad375d0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_REGS_H_
+#define ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_ASID 0x4F03800
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_MMU_BP 0x4F03804
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_STRONG_ORDER 0x4F03808
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_NO_SNOOP 0x4F0380C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_WR_REDUCTION 0x4F03810
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_RD_ATOMIC 0x4F03814
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_QOS 0x4F03818
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_RSVD 0x4F0381C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_EMEM_CPAGE 0x4F03820
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_CORE 0x4F03824
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_E2E_COORD 0x4F03828
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_WR_OVRD_LO 0x4F03830
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_WR_OVRD_HI 0x4F03834
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_RD_OVRD_LO 0x4F03838
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_RD_OVRD_HI 0x4F0383C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_LB_COORD 0x4F03840
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_LB_LOCK 0x4F03844
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_LB_RSVD 0x4F03848
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_LB_OVRD 0x4F0384C
+
+#endif /* ASIC_REG_PCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h
new file mode 100644
index 000000000000..d29837883216
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h
@@ -0,0 +1,580 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_VDEC0_BRDG_CTRL_MASKS_H_
+#define ASIC_REG_PCIE_VDEC0_BRDG_CTRL_MASKS_H_
+
+/*
+ *****************************************
+ * PCIE_VDEC0_BRDG_CTRL
+ * (Prototype: VDEC_BRDG_CTRL)
+ *****************************************
+ */
+
+/* PCIE_VDEC0_BRDG_CTRL_CGM_DISABLE */
+#define PCIE_VDEC0_BRDG_CTRL_CGM_DISABLE_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_CGM_DISABLE_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_IDLE_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_IDLE_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_IDLE_MASK_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_APB_CGM_CNT */
+#define PCIE_VDEC0_BRDG_CTRL_APB_CGM_CNT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_APB_CGM_CNT_VAL_MASK 0xFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_APB_ARB_WDOG_CNT */
+#define PCIE_VDEC0_BRDG_CTRL_APB_ARB_WDOG_CNT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_APB_ARB_WDOG_CNT_VAL_MASK 0xFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_GRACEFUL */
+#define PCIE_VDEC0_BRDG_CTRL_GRACEFUL_STOP_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_GRACEFUL_PEND_SHIFT 4
+#define PCIE_VDEC0_BRDG_CTRL_GRACEFUL_PEND_MASK 0x10
+
+/* PCIE_VDEC0_BRDG_CTRL_IDLE_CGM_CNT */
+#define PCIE_VDEC0_BRDG_CTRL_IDLE_CGM_CNT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_IDLE_CGM_CNT_VAL_MASK 0xFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR */
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_VCD_HBW_SEI_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_VCD_HBW_SEI_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_L2C_HBW_SEI_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_L2C_HBW_SEI_MASK 0x2
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_NRM_HBW_SEI_SHIFT 2
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_NRM_HBW_SEI_MASK 0x4
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_ABNRM_HBW_SEI_SHIFT 3
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_ABNRM_HBW_SEI_MASK 0x8
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_VCD_LBW_SEI_SHIFT 4
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_VCD_LBW_SEI_MASK 0x10
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_L2C_LBW_SEI_SHIFT 5
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_L2C_LBW_SEI_MASK 0x20
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_NRM_LBW_SEI_SHIFT 6
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_NRM_LBW_SEI_MASK 0x40
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_ABNRM_LBW_SEI_SHIFT 7
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MSIX_ABNRM_LBW_SEI_MASK 0x80
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_VCD_LBW_SEI_SHIFT 8
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_VCD_LBW_SEI_MASK 0x100
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_L2C_LBW_SEI_SHIFT 9
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_L2C_LBW_SEI_MASK 0x200
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_NRM_LBW_SEI_SHIFT 10
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_NRM_LBW_SEI_MASK 0x400
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_ABNRM_LBW_SEI_SHIFT 11
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_APB_ABNRM_LBW_SEI_MASK 0x800
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_DEC_SEI_SHIFT 12
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_DEC_SEI_MASK 0x1000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_DEC_APB_SEI_SHIFT 13
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_DEC_APB_SEI_MASK 0x2000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_TRC_APB_SEI_SHIFT 14
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_TRC_APB_SEI_MASK 0x4000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_MSTR_IF_SEI_SHIFT 15
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_MSTR_IF_SEI_MASK 0x8000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_AXI_SPLIT_BRESP_ERR_SEI_SHIFT 16
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_AXI_SPLIT_BRESP_ERR_SEI_MASK 0x10000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_HBW_AXI_WR_VIOL_SEI_SHIFT 17
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_HBW_AXI_WR_VIOL_SEI_MASK 0x20000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_HBW_AXI_RD_VIOL_SEI_SHIFT 18
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_HBW_AXI_RD_VIOL_SEI_MASK 0x40000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_AXI_WR_VIOL_SEI_SHIFT 19
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_AXI_WR_VIOL_SEI_MASK 0x80000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_AXI_RD_VIOL_SEI_SHIFT 20
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_LBW_AXI_RD_VIOL_SEI_MASK 0x100000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_VCD_SPI_SHIFT 21
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_VCD_SPI_MASK 0x200000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_L2C_SPI_SHIFT 22
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_L2C_SPI_MASK 0x400000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_NRM_SPI_SHIFT 23
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_NRM_SPI_MASK 0x800000
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_ABNRM_SPI_SHIFT 24
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_ABNRM_SPI_MASK 0x1000000
+
+/* PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE */
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWLEN_GT_31_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWLEN_GT_31_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWLOCK_VIOL_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWLOCK_VIOL_MASK 0x2
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_SHIFT 2
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_MASK 0x4
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_SHIFT 3
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_MASK 0x8
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_SHIFT 4
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_MASK 0x10
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLEN_GT_31_SHIFT 5
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLEN_GT_31_MASK 0x20
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLOCK_VIOL_SHIFT 6
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLOCK_VIOL_MASK 0x40
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_SHIFT 7
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_MASK 0x80
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_SHIFT 8
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK \
+0x100
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_SHIFT 9
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_MASK 0x200
+
+/* PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE */
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_READ_ACCESS_VIOL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_READ_ACCESS_VIOL_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWLOCK_VIOL_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWLOCK_VIOL_MASK 0x2
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWADDR_ALIGN_VIOL_SHIFT 2
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWADDR_ALIGN_VIOL_MASK 0x4
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_SHIFT 3
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_MASK 0x8
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWLEN_VIOL_SHIFT 4
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_AWLEN_VIOL_MASK 0x10
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_WSTRB_VIOL_SHIFT 5
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE_WSTRB_VIOL_MASK 0x20
+
+/* PCIE_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM */
+#define PCIE_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_HBW_AW_VIOL_CLR_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_HBW_AW_VIOL_CLR_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_HBW_AR_VIOL_CLR_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_HBW_AR_VIOL_CLR_MASK 0x2
+#define PCIE_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_LBW_AW_VIOL_CLR_SHIFT 2
+#define PCIE_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM_LBW_AW_VIOL_CLR_MASK 0x4
+
+/* PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MASK_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_MASK_MASK_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_MASK_MASK_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_MASK_MASK_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_MASK_MASK_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_GIC_INTR_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_GIC_INTR_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_GIC_INTR_MASK_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_GIC_INTR_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_GIC_INTR_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_GIC_INTR_MASK_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_GIC_INTR_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_GIC_INTR_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_GIC_INTR_MASK_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_GIC_INTR_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_GIC_INTR_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_GIC_INTR_MASK_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_HBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_HBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_HBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_HBW_ARPROT */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_HBW_ARPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_HBW_ARPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_LBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_LBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_LBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_LBW_ARPROT */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_LBW_ARPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_LBW_ARPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_LBW_SLV_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_LBW_SLV_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_LBW_SLV_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_LBW_SLV_ARPROT */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_LBW_SLV_ARPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_LBW_SLV_ARPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_HBW_LEGAL_AWSIZE_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_HBW_LEGAL_AWSIZE_MASK 0x7
+#define PCIE_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_HBW_LEGAL_ARSIZE_SHIFT 3
+#define PCIE_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_HBW_LEGAL_ARSIZE_MASK 0x38
+#define PCIE_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_LBW_LEGAL_AWSIZE_SHIFT 6
+#define PCIE_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE_LBW_LEGAL_AWSIZE_MASK 0x1C0
+
+/* PCIE_VDEC0_BRDG_CTRL_ARC_MSG_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_ARC_MSG_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ARC_MSG_MASK_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_ARC_START_LBW_WDATA */
+#define PCIE_VDEC0_BRDG_CTRL_ARC_START_LBW_WDATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ARC_START_LBW_WDATA_VAL_MASK 0xFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ARC_FINISH_LBW_WDATA */
+#define PCIE_VDEC0_BRDG_CTRL_ARC_FINISH_LBW_WDATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ARC_FINISH_LBW_WDATA_VAL_MASK 0xFF
+
+/* PCIE_VDEC0_BRDG_CTRL_HWEVENT_TRACE_SEL */
+#define PCIE_VDEC0_BRDG_CTRL_HWEVENT_TRACE_SEL_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HWEVENT_TRACE_SEL_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_HWEVENT_TRACE_ADDR */
+#define PCIE_VDEC0_BRDG_CTRL_HWEVENT_TRACE_ADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HWEVENT_TRACE_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_L */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_L_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_L_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_H */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_H_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_H_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_L */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_L_IND_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_L_IND_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_H */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_H_IND_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_H_IND_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_L */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_L_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_L_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_H */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_H_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_H_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_L */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_L_IND_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_L_IND_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_H */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_H_IND_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_H_IND_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_STAT_CNTR_EN */
+#define PCIE_VDEC0_BRDG_CTRL_STAT_CNTR_EN_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_STAT_CNTR_EN_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_INTR_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_INTR_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_INTR_MASK_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_APB_WR_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_APB_WR_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_APB_RD_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_APB_RD_MASK 0x2
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_LBW_SHIFT 2
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK_LBW_MASK 0x4
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_STAT_VCD_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_STAT_VCD_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_STAT_VCD_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_STAT_VCD_MSIX_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_STAT_VCD_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_STAT_VCD_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_SWREG1_ADDR */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_SWREG1_ADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_SWREG1_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_APB_WR_ADDR */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_APB_WR_ADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_APB_WR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_APB_WR_DATA */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_APB_WR_DATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_APB_WR_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_L */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_L_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_H */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_H_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWADDR */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_WDATA */
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_WDATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_INTR_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_INTR_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_INTR_MASK_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_APB_WR_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_APB_WR_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_APB_RD_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_APB_RD_MASK 0x2
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_LBW_SHIFT 2
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK_LBW_MASK 0x4
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_STAT_L2C_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_STAT_L2C_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_STAT_L2C_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_STAT_L2C_MSIX_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_STAT_L2C_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_STAT_L2C_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_SWREG1_ADDR */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_SWREG1_ADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_SWREG1_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_APB_WR_ADDR */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_APB_WR_ADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_APB_WR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_APB_WR_DATA */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_APB_WR_DATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_APB_WR_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_L */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_L_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_H */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_H_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWADDR */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_WDATA */
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_WDATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_INTR_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_INTR_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_INTR_MASK_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_APB_WR_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_APB_WR_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_APB_RD_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_APB_RD_MASK 0x2
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_LBW_SHIFT 2
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK_LBW_MASK 0x4
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_STAT_NRM_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_STAT_NRM_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_STAT_NRM_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_STAT_NRM_MSIX_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_STAT_NRM_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_STAT_NRM_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_SWREG1_ADDR */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_SWREG1_ADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_SWREG1_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_APB_WR_ADDR */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_APB_WR_ADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_APB_WR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_APB_WR_DATA */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_APB_WR_DATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_APB_WR_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_L */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_L_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_H */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_H_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWADDR */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_WDATA */
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_WDATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_INTR_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_INTR_MASK_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_INTR_MASK_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_APB_WR_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_APB_WR_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_APB_RD_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_APB_RD_MASK 0x2
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_LBW_SHIFT 2
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK_LBW_MASK 0x4
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_STAT_ABNRM_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_STAT_ABNRM_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_STAT_ABNRM_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_STAT_ABNRM_MSIX_WAIT_CNTR */
+#define PCIE_VDEC0_BRDG_CTRL_STAT_ABNRM_MSIX_WAIT_CNTR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_STAT_ABNRM_MSIX_WAIT_CNTR_VAL_MASK 0xFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_SWREG1_ADDR */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_SWREG1_ADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_SWREG1_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_APB_WR_ADDR */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_APB_WR_ADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_APB_WR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_APB_WR_DATA */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_APB_WR_DATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_APB_WR_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_L */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_L_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_H */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_H_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWPROT */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWPROT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWPROT_VAL_MASK 0x7
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_WDATA */
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_WDATA_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_BRESP_ERR_ID */
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_BRESP_ERR_ID_ID_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_BRESP_ERR_ID_ID_MASK 0xFF
+
+/* PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG */
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_FORCE_RESP_OK_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_FORCE_RESP_OK_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_FORCE_WR_BUF_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_FORCE_WR_BUF_MASK 0x2
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_NUM_RD_OS_SHIFT 8
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_NUM_RD_OS_MASK 0xFF00
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_NUM_WR_OS_SHIFT 16
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG_NUM_WR_OS_MASK 0xFF0000
+
+/* PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_NO_WR_INFLIGHT */
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_NO_WR_INFLIGHT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_NO_WR_INFLIGHT_VAL_MASK 0x1
+
+/* PCIE_VDEC0_BRDG_CTRL_HWEVENT_MASK */
+#define PCIE_VDEC0_BRDG_CTRL_HWEVENT_MASK_MASK_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_HWEVENT_MASK_MASK_MASK 0x2
+
+/* PCIE_VDEC0_BRDG_CTRL_HWEVENT_CNTXT */
+#define PCIE_VDEC0_BRDG_CTRL_HWEVENT_CNTXT_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HWEVENT_CNTXT_VAL_MASK 0xFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_LBW_SLV_TERM_ERR_RESP */
+#define PCIE_VDEC0_BRDG_CTRL_LBW_SLV_TERM_ERR_RESP_ERR_RESP_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_LBW_SLV_TERM_ERR_RESP_ERR_RESP_MASK 0x3
+
+/* PCIE_VDEC0_BRDG_CTRL_LBW_MSTR_TERM_ERR_RESP */
+#define PCIE_VDEC0_BRDG_CTRL_LBW_MSTR_TERM_ERR_RESP_ERR_RESP_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_LBW_MSTR_TERM_ERR_RESP_ERR_RESP_MASK 0x3
+
+/* PCIE_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP */
+#define PCIE_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP_WR_ERR_RESP_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP_WR_ERR_RESP_MASK 0x3
+#define PCIE_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP_RD_ERR_RESP_SHIFT 2
+#define PCIE_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP_RD_ERR_RESP_MASK 0xC
+
+/* PCIE_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS */
+#define PCIE_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS_AW_STA_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS_AW_STA_MASK 0x1
+#define PCIE_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS_AR_STA_SHIFT 1
+#define PCIE_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS_AR_STA_MASK 0x2
+
+/* PCIE_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_L */
+#define PCIE_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_L_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_L_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_H */
+#define PCIE_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_H_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_H_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_L */
+#define PCIE_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_L_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_L_VAL_MASK 0xFFFFFFFF
+
+/* PCIE_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_H */
+#define PCIE_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_H_VAL_SHIFT 0
+#define PCIE_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_H_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_PCIE_VDEC0_BRDG_CTRL_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h
new file mode 100644
index 000000000000..c7badd212f2b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_VDEC0_BRDG_CTRL_REGS_H_
+#define ASIC_REG_PCIE_VDEC0_BRDG_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_VDEC0_BRDG_CTRL
+ * (Prototype: VDEC_BRDG_CTRL)
+ *****************************************
+ */
+
+#define mmPCIE_VDEC0_BRDG_CTRL_CGM_DISABLE 0x4F03100
+
+#define mmPCIE_VDEC0_BRDG_CTRL_IDLE_MASK 0x4F03104
+
+#define mmPCIE_VDEC0_BRDG_CTRL_APB_CGM_CNT 0x4F03108
+
+#define mmPCIE_VDEC0_BRDG_CTRL_APB_ARB_WDOG_CNT 0x4F0310C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_GRACEFUL 0x4F03110
+
+#define mmPCIE_VDEC0_BRDG_CTRL_IDLE_CGM_CNT 0x4F03114
+
+#define mmPCIE_VDEC0_BRDG_CTRL_CAUSE_INTR 0x4F03120
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE 0x4F03124
+
+#define mmPCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_CAUSE 0x4F03128
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXI_VIOL_CLR_STICKY_TERM 0x4F0312C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_CAUSE_INTR_MASK 0x4F03130
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_MASK 0x4F03134
+
+#define mmPCIE_VDEC0_BRDG_CTRL_LBW_AXI_VIOL_MASK 0x4F03138
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_GIC_INTR_MASK 0x4F03160
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_GIC_INTR_MASK 0x4F03170
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_GIC_INTR_MASK 0x4F03180
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_GIC_INTR_MASK 0x4F03190
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_HBW_AWPROT 0x4F031A0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_HBW_ARPROT 0x4F031A4
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_LBW_AWPROT 0x4F031B0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_LBW_ARPROT 0x4F031B4
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_LBW_SLV_AWPROT 0x4F031C0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_LBW_SLV_ARPROT 0x4F031C4
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_AXI_LEGAL_AXSIZE 0x4F031D0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ARC_MSG_MASK 0x4F03200
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ARC_START_LBW_WDATA 0x4F03230
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ARC_FINISH_LBW_WDATA 0x4F03260
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HWEVENT_TRACE_SEL 0x4F03270
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HWEVENT_TRACE_ADDR 0x4F03280
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_L 0x4F03290
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_CNT_H 0x4F03294
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_L 0x4F032A0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_FREE_RUN_SET_VALUE_H 0x4F032A4
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_L 0x4F032B0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_BUSY_CNT_H 0x4F032B4
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_L 0x4F032C0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_BUSY_SET_VALUE_H 0x4F032C4
+
+#define mmPCIE_VDEC0_BRDG_CTRL_STAT_CNTR_EN 0x4F032D0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_INTR_MASK 0x4F03300
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_MSIX_FLOW_MASK 0x4F03310
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_WAIT_CNTR 0x4F03320
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_MSIX_WAIT_CNTR 0x4F03330
+
+#define mmPCIE_VDEC0_BRDG_CTRL_STAT_VCD_WAIT_CNTR 0x4F03334
+
+#define mmPCIE_VDEC0_BRDG_CTRL_STAT_VCD_MSIX_WAIT_CNTR 0x4F03338
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_SWREG1_ADDR 0x4F03340
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_APB_WR_ADDR 0x4F03350
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_APB_WR_DATA 0x4F03360
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWPROT 0x4F03380
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_L 0x4F03390
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_CPLQ_HBW_AWADDR_H 0x4F03394
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWPROT 0x4F033C0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_AWADDR 0x4F033D0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_VCD_MSIX_LBW_WDATA 0x4F033E0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_INTR_MASK 0x4F03400
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_MSIX_FLOW_MASK 0x4F03410
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_WAIT_CNTR 0x4F03420
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_MSIX_WAIT_CNTR 0x4F03430
+
+#define mmPCIE_VDEC0_BRDG_CTRL_STAT_L2C_WAIT_CNTR 0x4F03434
+
+#define mmPCIE_VDEC0_BRDG_CTRL_STAT_L2C_MSIX_WAIT_CNTR 0x4F03438
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_SWREG1_ADDR 0x4F03440
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_APB_WR_ADDR 0x4F03450
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_APB_WR_DATA 0x4F03460
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWPROT 0x4F03480
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_L 0x4F03490
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_CPLQ_HBW_AWADDR_H 0x4F03494
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWPROT 0x4F034C0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_AWADDR 0x4F034D0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_L2C_MSIX_LBW_WDATA 0x4F034E0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_INTR_MASK 0x4F03500
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_MSIX_FLOW_MASK 0x4F03510
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_WAIT_CNTR 0x4F03520
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_MSIX_WAIT_CNTR 0x4F03530
+
+#define mmPCIE_VDEC0_BRDG_CTRL_STAT_NRM_WAIT_CNTR 0x4F03534
+
+#define mmPCIE_VDEC0_BRDG_CTRL_STAT_NRM_MSIX_WAIT_CNTR 0x4F03538
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_SWREG1_ADDR 0x4F03540
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_APB_WR_ADDR 0x4F03550
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_APB_WR_DATA 0x4F03560
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWPROT 0x4F03580
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_L 0x4F03590
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_CPLQ_HBW_AWADDR_H 0x4F03594
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWPROT 0x4F035C0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_AWADDR 0x4F035D0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_NRM_MSIX_LBW_WDATA 0x4F035E0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_INTR_MASK 0x4F03600
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_FLOW_MASK 0x4F03610
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_WAIT_CNTR 0x4F03620
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_WAIT_CNTR 0x4F03630
+
+#define mmPCIE_VDEC0_BRDG_CTRL_STAT_ABNRM_WAIT_CNTR 0x4F03634
+
+#define mmPCIE_VDEC0_BRDG_CTRL_STAT_ABNRM_MSIX_WAIT_CNTR 0x4F03638
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_SWREG1_ADDR 0x4F03640
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_APB_WR_ADDR 0x4F03650
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_APB_WR_DATA 0x4F03660
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWPROT 0x4F03680
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_L 0x4F03690
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_CPLQ_HBW_AWADDR_H 0x4F03694
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWPROT 0x4F036C0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR 0x4F036D0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_ABNRM_MSIX_LBW_WDATA 0x4F036E0
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_BRESP_ERR_ID 0x4F03700
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_CFG 0x4F03704
+
+#define mmPCIE_VDEC0_BRDG_CTRL_AXI_SPLIT_NO_WR_INFLIGHT 0x4F03708
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HWEVENT_MASK 0x4F0370C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HWEVENT_CNTXT 0x4F03714
+
+#define mmPCIE_VDEC0_BRDG_CTRL_LBW_SLV_TERM_ERR_RESP 0x4F03718
+
+#define mmPCIE_VDEC0_BRDG_CTRL_LBW_MSTR_TERM_ERR_RESP 0x4F0371C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_DEC_HBW_MSTR_ERR_RESP 0x4F03720
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HBW_VIOL_TERM_STATUS 0x4F03724
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_L 0x4F03728
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HBW_LAST_AWADDR_TERM_H 0x4F0372C
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_L 0x4F03730
+
+#define mmPCIE_VDEC0_BRDG_CTRL_HBW_LAST_ARADDR_TERM_H 0x4F03734
+
+#endif /* ASIC_REG_PCIE_VDEC0_BRDG_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h
new file mode 100644
index 000000000000..491b0cd935af
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_VDEC0_CTRL_SPECIAL_REGS_H_
+#define ASIC_REG_PCIE_VDEC0_CTRL_SPECIAL_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_VDEC0_CTRL_SPECIAL
+ * (Prototype: SPECIAL_REGS)
+ *****************************************
+ */
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_0 0x4F04E80
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_1 0x4F04E84
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_2 0x4F04E88
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_3 0x4F04E8C
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_4 0x4F04E90
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_5 0x4F04E94
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_6 0x4F04E98
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_7 0x4F04E9C
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_8 0x4F04EA0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_9 0x4F04EA4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_10 0x4F04EA8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_11 0x4F04EAC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_12 0x4F04EB0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_13 0x4F04EB4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_14 0x4F04EB8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_15 0x4F04EBC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_16 0x4F04EC0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_17 0x4F04EC4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_18 0x4F04EC8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_19 0x4F04ECC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_20 0x4F04ED0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_21 0x4F04ED4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_22 0x4F04ED8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_23 0x4F04EDC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_24 0x4F04EE0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_25 0x4F04EE4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_26 0x4F04EE8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_27 0x4F04EEC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_28 0x4F04EF0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_29 0x4F04EF4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_30 0x4F04EF8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_PRIV_31 0x4F04EFC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_GW_DATA 0x4F04F00
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_GW_REQ 0x4F04F04
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_NUMOF 0x4F04F0C
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_ECC_SEL 0x4F04F10
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_ECC_CTL 0x4F04F14
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_ECC_ERR_MASK 0x4F04F18
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_ECC_GLBL_ERR_MASK 0x4F04F1C
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_ECC_ERR_STS 0x4F04F20
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_ECC_ERR_ADDR 0x4F04F24
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_MEM_RM 0x4F04F28
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_ERR_MASK 0x4F04F40
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_ERR_ADDR 0x4F04F44
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_ERR_CAUSE 0x4F04F48
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SPARE_0 0x4F04F60
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SPARE_1 0x4F04F64
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SPARE_2 0x4F04F68
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SPARE_3 0x4F04F6C
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_0 0x4F04F80
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_1 0x4F04F84
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_2 0x4F04F88
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_3 0x4F04F8C
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_4 0x4F04F90
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_5 0x4F04F94
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_6 0x4F04F98
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_7 0x4F04F9C
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_8 0x4F04FA0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_9 0x4F04FA4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_10 0x4F04FA8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_11 0x4F04FAC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_12 0x4F04FB0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_13 0x4F04FB4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_14 0x4F04FB8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_15 0x4F04FBC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_16 0x4F04FC0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_17 0x4F04FC4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_18 0x4F04FC8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_19 0x4F04FCC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_20 0x4F04FD0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_21 0x4F04FD4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_22 0x4F04FD8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_23 0x4F04FDC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_24 0x4F04FE0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_25 0x4F04FE4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_26 0x4F04FE8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_27 0x4F04FEC
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_28 0x4F04FF0
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_29 0x4F04FF4
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_30 0x4F04FF8
+
+#define mmPCIE_VDEC0_CTRL_SPECIAL_GLBL_SEC_31 0x4F04FFC
+
+#endif /* ASIC_REG_PCIE_VDEC0_CTRL_SPECIAL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h
new file mode 100644
index 000000000000..a09422f2f281
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h
@@ -0,0 +1,601 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_WRAP_REGS_H_
+#define ASIC_REG_PCIE_WRAP_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_WRAP
+ * (Prototype: PCIE_WRAP)
+ *****************************************
+ */
+
+#define mmPCIE_WRAP_INTR_GEN_MASK_MIN_ADDR_0 0x4C01000
+
+#define mmPCIE_WRAP_INTR_GEN_MASK_MIN_ADDR_1 0x4C01004
+
+#define mmPCIE_WRAP_INTR_GEN_MASK_MAX_ADDR_0 0x4C01008
+
+#define mmPCIE_WRAP_INTR_GEN_MASK_MAX_ADDR_1 0x4C0100C
+
+#define mmPCIE_WRAP_INTR_GEN_MASK_TIMER 0x4C01010
+
+#define mmPCIE_WRAP_INTR_GEN_MASK_CTRL 0x4C01014
+
+#define mmPCIE_WRAP_MSIX_DOORBELL_OFF_ADDR 0x4C01018
+
+#define mmPCIE_WRAP_MSIX_MASK_CTRL 0x4C0101C
+
+#define mmPCIE_WRAP_PHY_FW_SRAM_ADDR_L_0 0x4C01020
+
+#define mmPCIE_WRAP_PHY_FW_SRAM_ADDR_L_1 0x4C01024
+
+#define mmPCIE_WRAP_PHY_FW_SRAM_ADDR_H_0 0x4C01028
+
+#define mmPCIE_WRAP_PHY_FW_SRAM_ADDR_H_1 0x4C0102C
+
+#define mmPCIE_WRAP_PHY_FW_SRAM_CFG_ADDR 0x4C01030
+
+#define mmPCIE_WRAP_MSIX_GW 0x4C01034
+
+#define mmPCIE_WRAP_MSIX_GW_VEC 0x4C01038
+
+#define mmPCIE_WRAP_MSIX_GW_INTR 0x4C0103C
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_0 0x4C01040
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_1 0x4C01044
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_2 0x4C01048
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_3 0x4C0104C
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_4 0x4C01050
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_5 0x4C01054
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_6 0x4C01058
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_7 0x4C0105C
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_8 0x4C01060
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_9 0x4C01064
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_10 0x4C01068
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_11 0x4C0106C
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_12 0x4C01070
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_13 0x4C01074
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_14 0x4C01078
+
+#define mmPCIE_WRAP_MSIX_GW_TABLE_15 0x4C0107C
+
+#define mmPCIE_WRAP_VUART_RX_0 0x4C01100
+
+#define mmPCIE_WRAP_VUART_RX_1 0x4C01104
+
+#define mmPCIE_WRAP_VUART_RX_2 0x4C01108
+
+#define mmPCIE_WRAP_VUART_TX_0 0x4C0110C
+
+#define mmPCIE_WRAP_VUART_TX_1 0x4C01110
+
+#define mmPCIE_WRAP_VUART_TX_2 0x4C01114
+
+#define mmPCIE_WRAP_MSI_GW_BLOCK 0x4C01120
+
+#define mmPCIE_WRAP_PHY_FW_FSM_SIZE 0x4C0120C
+
+#define mmPCIE_WRAP_HOST_ACCESS_TERMINATION 0x4C01210
+
+#define mmPCIE_WRAP_ILLEGAL_LBW_REQ_CTRL 0x4C01214
+
+#define mmPCIE_WRAP_ILLEGAL_LBW_REQ_ADDR_0 0x4C01218
+
+#define mmPCIE_WRAP_ILLEGAL_LBW_REQ_ADDR_1 0x4C0121C
+
+#define mmPCIE_WRAP_ILLEGAL_LBW_REQ_INTR 0x4C01220
+
+#define mmPCIE_WRAP_OUTBOUND_ADDR_LSB 0x4C01224
+
+#define mmPCIE_WRAP_LBW_WSTRB_OVRD 0x4C01228
+
+#define mmPCIE_WRAP_LBW_GW_ADDR_0 0x4C01304
+
+#define mmPCIE_WRAP_LBW_GW_ADDR_1 0x4C01308
+
+#define mmPCIE_WRAP_LBW_GW_ADDR_2 0x4C0130C
+
+#define mmPCIE_WRAP_LBW_GW_ADDR_3 0x4C01310
+
+#define mmPCIE_WRAP_LBW_GW_ADDR_4 0x4C01314
+
+#define mmPCIE_WRAP_LBW_GW_ADDR_5 0x4C01318
+
+#define mmPCIE_WRAP_LBW_GW_ADDR_6 0x4C0131C
+
+#define mmPCIE_WRAP_LBW_GW_ADDR_7 0x4C01320
+
+#define mmPCIE_WRAP_LBW_GW_DATA_0 0x4C01324
+
+#define mmPCIE_WRAP_LBW_GW_DATA_1 0x4C01328
+
+#define mmPCIE_WRAP_LBW_GW_DATA_2 0x4C0132C
+
+#define mmPCIE_WRAP_LBW_GW_DATA_3 0x4C01330
+
+#define mmPCIE_WRAP_LBW_GW_DATA_4 0x4C01334
+
+#define mmPCIE_WRAP_LBW_GW_DATA_5 0x4C01338
+
+#define mmPCIE_WRAP_LBW_GW_DATA_6 0x4C0133C
+
+#define mmPCIE_WRAP_LBW_GW_DATA_7 0x4C01340
+
+#define mmPCIE_WRAP_LBW_GW_GO_0 0x4C01344
+
+#define mmPCIE_WRAP_LBW_GW_GO_1 0x4C01348
+
+#define mmPCIE_WRAP_LBW_GW_GO_2 0x4C0134C
+
+#define mmPCIE_WRAP_LBW_GW_GO_3 0x4C01350
+
+#define mmPCIE_WRAP_LBW_GW_GO_4 0x4C01354
+
+#define mmPCIE_WRAP_LBW_GW_GO_5 0x4C01358
+
+#define mmPCIE_WRAP_LBW_GW_GO_6 0x4C0135C
+
+#define mmPCIE_WRAP_LBW_GW_GO_7 0x4C01360
+
+#define mmPCIE_WRAP_LBW_GW_STATUS_0 0x4C01364
+
+#define mmPCIE_WRAP_LBW_GW_STATUS_1 0x4C01368
+
+#define mmPCIE_WRAP_LBW_GW_STATUS_2 0x4C0136C
+
+#define mmPCIE_WRAP_LBW_GW_STATUS_3 0x4C01370
+
+#define mmPCIE_WRAP_LBW_GW_STATUS_4 0x4C01374
+
+#define mmPCIE_WRAP_LBW_GW_STATUS_5 0x4C01378
+
+#define mmPCIE_WRAP_LBW_GW_STATUS_6 0x4C0137C
+
+#define mmPCIE_WRAP_LBW_GW_STATUS_7 0x4C01380
+
+#define mmPCIE_WRAP_OUTBOUND_OUTSTANDING 0x4C013F4
+
+#define mmPCIE_WRAP_MASK_REQ 0x4C01404
+
+#define mmPCIE_WRAP_ONE_IN_FLIGHT 0x4C01408
+
+#define mmPCIE_WRAP_IND_AWPROT 0x4C0140C
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO 0x4C01500
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_0 0x4C01504
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_1 0x4C01508
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_P_TAG 0x4C0150C
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_ATU_BYPAS 0x4C01510
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_FUNC_NUM 0x4C01514
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_ACT 0x4C01518
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_NUM 0x4C0151C
+
+#define mmPCIE_WRAP_SLV_AWMISC_INFO_TLPPRFX 0x4C01520
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO 0x4C01524
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_TLPPRFX 0x4C01528
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_ATU_BYP 0x4C0152C
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_FUNC_NUM 0x4C01530
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_ACT 0x4C01534
+
+#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_NUM 0x4C01538
+
+#define mmPCIE_WRAP_MESO_FIFO_CTRL_0 0x4C01640
+
+#define mmPCIE_WRAP_MESO_FIFO_CTRL_1 0x4C01644
+
+#define mmPCIE_WRAP_MESO_FIFO_W_LFSR_POLY_0 0x4C01648
+
+#define mmPCIE_WRAP_MESO_FIFO_W_LFSR_POLY_1 0x4C0164C
+
+#define mmPCIE_WRAP_MESO_FIFO_R_LFSR_POLY_0 0x4C01650
+
+#define mmPCIE_WRAP_MESO_FIFO_R_LFSR_POLY_1 0x4C01654
+
+#define mmPCIE_WRAP_MESO_FIFO_W_PUSH_CNT_0 0x4C01658
+
+#define mmPCIE_WRAP_MESO_FIFO_W_PUSH_CNT_1 0x4C0165C
+
+#define mmPCIE_WRAP_MESO_FIFO_W_BP_CNT_0 0x4C01660
+
+#define mmPCIE_WRAP_MESO_FIFO_W_BP_CNT_1 0x4C01664
+
+#define mmPCIE_WRAP_MESO_FIFO_R_ERR_CNT_0 0x4C01668
+
+#define mmPCIE_WRAP_MESO_FIFO_R_ERR_CNT_1 0x4C0166C
+
+#define mmPCIE_WRAP_MESO_FIFO_R_POP_CNT_0 0x4C01670
+
+#define mmPCIE_WRAP_MESO_FIFO_R_POP_CNT_1 0x4C01674
+
+#define mmPCIE_WRAP_MESO_FIFO_W_LFSR_0 0x4C01678
+
+#define mmPCIE_WRAP_MESO_FIFO_W_LFSR_1 0x4C0167C
+
+#define mmPCIE_WRAP_MESO_FIFO_R_LFSR_0 0x4C01680
+
+#define mmPCIE_WRAP_MESO_FIFO_R_LFSR_1 0x4C01684
+
+#define mmPCIE_WRAP_MESO_FIFO_W_PUSH_LFSR_0 0x4C01688
+
+#define mmPCIE_WRAP_MESO_FIFO_W_PUSH_LFSR_1 0x4C0168C
+
+#define mmPCIE_WRAP_MESO_FIFO_R_POP_LFSR_0 0x4C01690
+
+#define mmPCIE_WRAP_MESO_FIFO_R_POP_LFSR_1 0x4C01694
+
+#define mmPCIE_WRAP_MESO_FIFO_W_BP_PERIOD_0 0x4C01698
+
+#define mmPCIE_WRAP_MESO_FIFO_W_BP_PERIOD_1 0x4C0169C
+
+#define mmPCIE_WRAP_MESO_FIFO_R_BP_PERIOD_0 0x4C016A0
+
+#define mmPCIE_WRAP_MESO_FIFO_R_BP_PERIOD_1 0x4C016A4
+
+#define mmPCIE_WRAP_MESO_FIFO_W_USED_CNT_0 0x4C016A8
+
+#define mmPCIE_WRAP_MESO_FIFO_W_USED_CNT_1 0x4C016AC
+
+#define mmPCIE_WRAP_MESO_FIFO_R_USED_CNT_0 0x4C016B0
+
+#define mmPCIE_WRAP_MESO_FIFO_R_USED_CNT_1 0x4C016B4
+
+#define mmPCIE_WRAP_P2P_TABLE_0 0x4C01900
+
+#define mmPCIE_WRAP_P2P_TABLE_1 0x4C01904
+
+#define mmPCIE_WRAP_P2P_TABLE_2 0x4C01908
+
+#define mmPCIE_WRAP_P2P_TABLE_3 0x4C0190C
+
+#define mmPCIE_WRAP_P2P_TABLE_4 0x4C01910
+
+#define mmPCIE_WRAP_P2P_TABLE_5 0x4C01914
+
+#define mmPCIE_WRAP_P2P_TABLE_6 0x4C01918
+
+#define mmPCIE_WRAP_P2P_TABLE_7 0x4C0191C
+
+#define mmPCIE_WRAP_P2P_TABLE_8 0x4C01920
+
+#define mmPCIE_WRAP_P2P_TABLE_9 0x4C01924
+
+#define mmPCIE_WRAP_P2P_TABLE_10 0x4C01928
+
+#define mmPCIE_WRAP_P2P_TABLE_11 0x4C0192C
+
+#define mmPCIE_WRAP_P2P_TABLE_12 0x4C01930
+
+#define mmPCIE_WRAP_P2P_TABLE_13 0x4C01934
+
+#define mmPCIE_WRAP_P2P_TABLE_14 0x4C01938
+
+#define mmPCIE_WRAP_P2P_TABLE_15 0x4C0193C
+
+#define mmPCIE_WRAP_P2P_TABLE_16 0x4C01940
+
+#define mmPCIE_WRAP_P2P_TABLE_17 0x4C01944
+
+#define mmPCIE_WRAP_P2P_TABLE_18 0x4C01948
+
+#define mmPCIE_WRAP_P2P_TABLE_19 0x4C0194C
+
+#define mmPCIE_WRAP_P2P_TABLE_20 0x4C01950
+
+#define mmPCIE_WRAP_P2P_TABLE_21 0x4C01954
+
+#define mmPCIE_WRAP_P2P_TABLE_22 0x4C01958
+
+#define mmPCIE_WRAP_P2P_TABLE_23 0x4C0195C
+
+#define mmPCIE_WRAP_P2P_TABLE_24 0x4C01960
+
+#define mmPCIE_WRAP_P2P_TABLE_25 0x4C01964
+
+#define mmPCIE_WRAP_P2P_TABLE_26 0x4C01968
+
+#define mmPCIE_WRAP_P2P_TABLE_27 0x4C0196C
+
+#define mmPCIE_WRAP_P2P_TABLE_28 0x4C01970
+
+#define mmPCIE_WRAP_P2P_TABLE_29 0x4C01974
+
+#define mmPCIE_WRAP_P2P_TABLE_30 0x4C01978
+
+#define mmPCIE_WRAP_P2P_TABLE_31 0x4C0197C
+
+#define mmPCIE_WRAP_P2P_TABLE_32 0x4C01980
+
+#define mmPCIE_WRAP_P2P_TABLE_33 0x4C01984
+
+#define mmPCIE_WRAP_P2P_TABLE_34 0x4C01988
+
+#define mmPCIE_WRAP_P2P_TABLE_35 0x4C0198C
+
+#define mmPCIE_WRAP_P2P_TABLE_36 0x4C01990
+
+#define mmPCIE_WRAP_P2P_TABLE_37 0x4C01994
+
+#define mmPCIE_WRAP_P2P_TABLE_38 0x4C01998
+
+#define mmPCIE_WRAP_P2P_TABLE_39 0x4C0199C
+
+#define mmPCIE_WRAP_P2P_TABLE_40 0x4C019A0
+
+#define mmPCIE_WRAP_P2P_TABLE_41 0x4C019A4
+
+#define mmPCIE_WRAP_P2P_TABLE_42 0x4C019A8
+
+#define mmPCIE_WRAP_P2P_TABLE_43 0x4C019AC
+
+#define mmPCIE_WRAP_P2P_TABLE_44 0x4C019B0
+
+#define mmPCIE_WRAP_P2P_TABLE_45 0x4C019B4
+
+#define mmPCIE_WRAP_P2P_TABLE_46 0x4C019B8
+
+#define mmPCIE_WRAP_P2P_TABLE_47 0x4C019BC
+
+#define mmPCIE_WRAP_P2P_TABLE_48 0x4C019C0
+
+#define mmPCIE_WRAP_P2P_TABLE_49 0x4C019C4
+
+#define mmPCIE_WRAP_P2P_TABLE_50 0x4C019C8
+
+#define mmPCIE_WRAP_P2P_TABLE_51 0x4C019CC
+
+#define mmPCIE_WRAP_P2P_TABLE_52 0x4C019D0
+
+#define mmPCIE_WRAP_P2P_TABLE_53 0x4C019D4
+
+#define mmPCIE_WRAP_P2P_TABLE_54 0x4C019D8
+
+#define mmPCIE_WRAP_P2P_TABLE_55 0x4C019DC
+
+#define mmPCIE_WRAP_P2P_TABLE_56 0x4C019E0
+
+#define mmPCIE_WRAP_P2P_TABLE_57 0x4C019E4
+
+#define mmPCIE_WRAP_P2P_TABLE_58 0x4C019E8
+
+#define mmPCIE_WRAP_P2P_TABLE_59 0x4C019EC
+
+#define mmPCIE_WRAP_P2P_TABLE_60 0x4C019F0
+
+#define mmPCIE_WRAP_P2P_TABLE_61 0x4C019F4
+
+#define mmPCIE_WRAP_P2P_TABLE_62 0x4C019F8
+
+#define mmPCIE_WRAP_P2P_TABLE_63 0x4C019FC
+
+#define mmPCIE_WRAP_P2P_EN 0x4C01A00
+
+#define mmPCIE_WRAP_P2P_REQ_ID 0x4C01A04
+
+#define mmPCIE_WRAP_P2P_INTR 0x4C01A08
+
+#define mmPCIE_WRAP_P2P_TERMINATE_RESP 0x4C01A0C
+
+#define mmPCIE_WRAP_GIC_INTR_TERMINATE_CTRL 0x4C01A10
+
+#define mmPCIE_WRAP_GIC_INTR_TERMINATE_CNT 0x4C01A14
+
+#define mmPCIE_WRAP_CPU_HOT_RST 0x4C01AE0
+
+#define mmPCIE_WRAP_LBW_AXI_SPLIT_MAX_OUTSTAN 0x4C01B2C
+
+#define mmPCIE_WRAP_AXI_SPLIT_NO_WR_INFLIGHT 0x4C01B30
+
+#define mmPCIE_WRAP_PCIE_WR_BUF 0x4C01B34
+
+#define mmPCIE_WRAP_PCIE_CACHE_OVR 0x4C01B38
+
+#define mmPCIE_WRAP_PCIE_LOCK_OVR 0x4C01B3C
+
+#define mmPCIE_WRAP_PCIE_PROT_OVR 0x4C01B40
+
+#define mmPCIE_WRAP_PCIE_ARUSER_OVR_0 0x4C01B44
+
+#define mmPCIE_WRAP_PCIE_ARUSER_OVR_1 0x4C01B48
+
+#define mmPCIE_WRAP_PCIE_AWUSER_OVR_0 0x4C01B4C
+
+#define mmPCIE_WRAP_PCIE_AWUSER_OVR_1 0x4C01B50
+
+#define mmPCIE_WRAP_PCIE_ARUSER_OVR_EN_0 0x4C01B54
+
+#define mmPCIE_WRAP_PCIE_ARUSER_OVR_EN_1 0x4C01B58
+
+#define mmPCIE_WRAP_PCIE_AWUSER_OVR_EN_0 0x4C01B5C
+
+#define mmPCIE_WRAP_PCIE_AWUSER_OVR_EN_1 0x4C01B60
+
+#define mmPCIE_WRAP_PCIE_MAX_OUTSTAND 0x4C01B64
+
+#define mmPCIE_WRAP_PCIE_MST_IN 0x4C01B68
+
+#define mmPCIE_WRAP_PCIE_RSP_OK 0x4C01B6C
+
+#define mmPCIE_WRAP_AXI_SPLIT_INTR_0 0x4C01B70
+
+#define mmPCIE_WRAP_AXI_SPLIT_INTR_1 0x4C01B74
+
+#define mmPCIE_WRAP_AXI_DRAIN_MSTR_IF_CFG_0 0x4C01B7C
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_0 0x4C01B80
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_1 0x4C01B84
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_2 0x4C01B88
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_3 0x4C01B8C
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_4 0x4C01B90
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_5 0x4C01B94
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_6 0x4C01B98
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_7 0x4C01B9C
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_8 0x4C01BA0
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_9 0x4C01BA4
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_10 0x4C01BA8
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_11 0x4C01BAC
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_12 0x4C01BB0
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_13 0x4C01BB4
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_14 0x4C01BB8
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_15 0x4C01BBC
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_16 0x4C01BC0
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_17 0x4C01BC4
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_18 0x4C01BC8
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_19 0x4C01BCC
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_20 0x4C01BD0
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_21 0x4C01BD4
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_22 0x4C01BD8
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_23 0x4C01BDC
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_24 0x4C01BE0
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_25 0x4C01BE4
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_26 0x4C01BE8
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_27 0x4C01BEC
+
+#define mmPCIE_WRAP_AXI_DRAIN_EXTMEM_POLY_H3_28 0x4C01BF0
+
+#define mmPCIE_WRAP_AXI_DRAIN_ACTIVE 0x4C01D48
+
+#define mmPCIE_WRAP_AXI_DRAIN_IND 0x4C01D4C
+
+#define mmPCIE_WRAP_HBW_DRAIN_TIMEOUT 0x4C01D50
+
+#define mmPCIE_WRAP_HBW_DRAIN_CFG 0x4C01D54
+
+#define mmPCIE_WRAP_LBW_DRAIN_TIMEOUT 0x4C01D58
+
+#define mmPCIE_WRAP_LBW_DRAIN_CFG 0x4C01D5C
+
+#define mmPCIE_WRAP_LBW_DRAIN_DELAY_EN_CNT 0x4C01D60
+
+#define mmPCIE_WRAP_PHY_FW_FSM 0x4C01D64
+
+#define mmPCIE_WRAP_PCIE_PHY_BASE_ADDR_L 0x4C01D68
+
+#define mmPCIE_WRAP_PCIE_PHY_BASE_ADDR_H 0x4C01D6C
+
+#define mmPCIE_WRAP_PCIE_CORE_BASE_ADDR_L 0x4C01D70
+
+#define mmPCIE_WRAP_PCIE_CORE_BASE_ADDR_H 0x4C01D74
+
+#define mmPCIE_WRAP_SPMU_INTR 0x4C01DE4
+
+#define mmPCIE_WRAP_AXI_INTR 0x4C01DE8
+
+#define mmPCIE_WRAP_PCIE_IC_SEI_INTR_IND 0x4C01DEC
+
+#define mmPCIE_WRAP_PMMU_RTR_CFG 0x4C01DF0
+
+#define mmPCIE_WRAP_PSOC_RST_CTRL 0x4C01DF4
+
+#define mmPCIE_WRAP_PSOC_BOOT_MNG_DONE 0x4C01DF8
+
+#define mmPCIE_WRAP_ASID_MOD_CTRL 0x4C01DFC
+
+#define mmPCIE_WRAP_ASID_MOD_ADDR_L_0 0x4C01E00
+
+#define mmPCIE_WRAP_ASID_MOD_ADDR_L_1 0x4C01E04
+
+#define mmPCIE_WRAP_ASID_MOD_ADDR_H_0 0x4C01E08
+
+#define mmPCIE_WRAP_ASID_MOD_ADDR_H_1 0x4C01E0C
+
+#define mmPCIE_WRAP_CS_TRACE_AXI_CTRL 0x4C01E10
+
+#define mmPCIE_WRAP_FLR_FSM_CTRL 0x4C01E14
+
+#define mmPCIE_WRAP_HBW_DRAIN_WR_ADDR_0 0x4C01E18
+
+#define mmPCIE_WRAP_HBW_DRAIN_WR_ADDR_1 0x4C01E1C
+
+#define mmPCIE_WRAP_HBW_DRAIN_RD_ADDR_0 0x4C01E20
+
+#define mmPCIE_WRAP_HBW_DRAIN_RD_ADDR_1 0x4C01E24
+
+#define mmPCIE_WRAP_HBW_DRAIN_STAMP 0x4C01E28
+
+#define mmPCIE_WRAP_LBW_DRAIN_WR_ADDR_0 0x4C01E2C
+
+#define mmPCIE_WRAP_LBW_DRAIN_WR_ADDR_1 0x4C01E30
+
+#define mmPCIE_WRAP_LBW_DRAIN_RD_ADDR_0 0x4C01E34
+
+#define mmPCIE_WRAP_LBW_DRAIN_RD_ADDR_1 0x4C01E38
+
+#define mmPCIE_WRAP_LBW_DRAIN_STAMP 0x4C01E3C
+
+#define mmPCIE_WRAP_EXTMEM_HBM_LOC 0x4C01E40
+
+#define mmPCIE_WRAP_EXTMEM_PC_LOC 0x4C01E44
+
+#define mmPCIE_WRAP_EXTMEM_NONLIN_HBM 0x4C01E48
+
+#define mmPCIE_WRAP_EXTMEM_NONLIN_PC 0x4C01E4C
+
+#define mmPCIE_WRAP_EXTMEM_NONLIN_HBM_NUM 0x4C01E50
+
+#define mmPCIE_WRAP_EXTMEM_NONLIN_HBM_MAP 0x4C01E54
+
+#endif /* ASIC_REG_PCIE_WRAP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h
new file mode 100644
index 000000000000..bacbe4c6fc3c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_CORE_CTX_AXUSER_REGS_H_
+#define ASIC_REG_PDMA0_CORE_CTX_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * PDMA0_CORE_CTX_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_ASID 0x4C8B800
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_MMU_BP 0x4C8B804
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_STRONG_ORDER 0x4C8B808
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_NO_SNOOP 0x4C8B80C
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_WR_REDUCTION 0x4C8B810
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_RD_ATOMIC 0x4C8B814
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_QOS 0x4C8B818
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_RSVD 0x4C8B81C
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_EMEM_CPAGE 0x4C8B820
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_CORE 0x4C8B824
+
+#define mmPDMA0_CORE_CTX_AXUSER_E2E_COORD 0x4C8B828
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_WR_OVRD_LO 0x4C8B830
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_WR_OVRD_HI 0x4C8B834
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_RD_OVRD_LO 0x4C8B838
+
+#define mmPDMA0_CORE_CTX_AXUSER_HB_RD_OVRD_HI 0x4C8B83C
+
+#define mmPDMA0_CORE_CTX_AXUSER_LB_COORD 0x4C8B840
+
+#define mmPDMA0_CORE_CTX_AXUSER_LB_LOCK 0x4C8B844
+
+#define mmPDMA0_CORE_CTX_AXUSER_LB_RSVD 0x4C8B848
+
+#define mmPDMA0_CORE_CTX_AXUSER_LB_OVRD 0x4C8B84C
+
+#endif /* ASIC_REG_PDMA0_CORE_CTX_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h
new file mode 100644
index 000000000000..02b57f07cfaf
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_CORE_CTX_REGS_H_
+#define ASIC_REG_PDMA0_CORE_CTX_REGS_H_
+
+/*
+ *****************************************
+ * PDMA0_CORE_CTX
+ * (Prototype: DMA_CORE_CTX)
+ *****************************************
+ */
+
+#define mmPDMA0_CORE_CTX_RATE_LIM_TKN 0x4C8B860
+
+#define mmPDMA0_CORE_CTX_PWRLP 0x4C8B864
+
+#define mmPDMA0_CORE_CTX_TE_NUMROWS 0x4C8B868
+
+#define mmPDMA0_CORE_CTX_IDX 0x4C8B86C
+
+#define mmPDMA0_CORE_CTX_IDX_INC 0x4C8B870
+
+#define mmPDMA0_CORE_CTX_CTRL 0x4C8B874
+
+#define mmPDMA0_CORE_CTX_SRC_TSIZE_0 0x4C8B878
+
+#define mmPDMA0_CORE_CTX_SRC_TSIZE_1 0x4C8B87C
+
+#define mmPDMA0_CORE_CTX_SRC_STRIDE_1 0x4C8B880
+
+#define mmPDMA0_CORE_CTX_SRC_TSIZE_2 0x4C8B884
+
+#define mmPDMA0_CORE_CTX_SRC_STRIDE_2 0x4C8B888
+
+#define mmPDMA0_CORE_CTX_SRC_TSIZE_3 0x4C8B88C
+
+#define mmPDMA0_CORE_CTX_SRC_STRIDE_3 0x4C8B890
+
+#define mmPDMA0_CORE_CTX_SRC_TSIZE_4 0x4C8B894
+
+#define mmPDMA0_CORE_CTX_SRC_STRIDE_4 0x4C8B898
+
+#define mmPDMA0_CORE_CTX_DST_TSIZE_1 0x4C8B89C
+
+#define mmPDMA0_CORE_CTX_DST_STRIDE_1 0x4C8B8A0
+
+#define mmPDMA0_CORE_CTX_DST_TSIZE_2 0x4C8B8A4
+
+#define mmPDMA0_CORE_CTX_DST_STRIDE_2 0x4C8B8A8
+
+#define mmPDMA0_CORE_CTX_DST_TSIZE_3 0x4C8B8AC
+
+#define mmPDMA0_CORE_CTX_DST_STRIDE_3 0x4C8B8B0
+
+#define mmPDMA0_CORE_CTX_DST_TSIZE_4 0x4C8B8B4
+
+#define mmPDMA0_CORE_CTX_DST_STRIDE_4 0x4C8B8B8
+
+#define mmPDMA0_CORE_CTX_WR_COMP_ADDR_HI 0x4C8B8BC
+
+#define mmPDMA0_CORE_CTX_WR_COMP_ADDR_LO 0x4C8B8C0
+
+#define mmPDMA0_CORE_CTX_WR_COMP_WDATA 0x4C8B8C4
+
+#define mmPDMA0_CORE_CTX_SRC_OFFSET_LO 0x4C8B8C8
+
+#define mmPDMA0_CORE_CTX_SRC_OFFSET_HI 0x4C8B8CC
+
+#define mmPDMA0_CORE_CTX_DST_OFFSET_LO 0x4C8B8D0
+
+#define mmPDMA0_CORE_CTX_DST_OFFSET_HI 0x4C8B8D4
+
+#define mmPDMA0_CORE_CTX_SRC_BASE_LO 0x4C8B8D8
+
+#define mmPDMA0_CORE_CTX_SRC_BASE_HI 0x4C8B8DC
+
+#define mmPDMA0_CORE_CTX_DST_BASE_LO 0x4C8B8E0
+
+#define mmPDMA0_CORE_CTX_DST_BASE_HI 0x4C8B8E4
+
+#define mmPDMA0_CORE_CTX_DST_TSIZE_0 0x4C8B8E8
+
+#define mmPDMA0_CORE_CTX_COMMIT 0x4C8B8EC
+
+#endif /* ASIC_REG_PDMA0_CORE_CTX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h
new file mode 100644
index 000000000000..909cda03c246
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h
@@ -0,0 +1,415 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_CORE_MASKS_H_
+#define ASIC_REG_PDMA0_CORE_MASKS_H_
+
+/*
+ *****************************************
+ * PDMA0_CORE
+ * (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+/* PDMA0_CORE_CFG_0 */
+#define PDMA0_CORE_CFG_0_EN_SHIFT 0
+#define PDMA0_CORE_CFG_0_EN_MASK 0x1
+
+/* PDMA0_CORE_CFG_1 */
+#define PDMA0_CORE_CFG_1_HALT_SHIFT 0
+#define PDMA0_CORE_CFG_1_HALT_MASK 0x1
+#define PDMA0_CORE_CFG_1_FLUSH_SHIFT 1
+#define PDMA0_CORE_CFG_1_FLUSH_MASK 0x2
+
+/* PDMA0_CORE_PROT */
+#define PDMA0_CORE_PROT_VAL_SHIFT 0
+#define PDMA0_CORE_PROT_VAL_MASK 0x1
+#define PDMA0_CORE_PROT_ERR_VAL_SHIFT 1
+#define PDMA0_CORE_PROT_ERR_VAL_MASK 0x2
+
+/* PDMA0_CORE_CKG */
+#define PDMA0_CORE_CKG_HBW_RBUF_SHIFT 0
+#define PDMA0_CORE_CKG_HBW_RBUF_MASK 0x1
+#define PDMA0_CORE_CKG_LBW_RBUF_KDMA_SHIFT 1
+#define PDMA0_CORE_CKG_LBW_RBUF_KDMA_MASK 0x2
+#define PDMA0_CORE_CKG_TE_SHIFT 2
+#define PDMA0_CORE_CKG_TE_MASK 0x4
+
+/* PDMA0_CORE_RD_GLBL */
+#define PDMA0_CORE_RD_GLBL_LBW_VIA_HBW_SHIFT 0
+#define PDMA0_CORE_RD_GLBL_LBW_VIA_HBW_MASK 0x1
+#define PDMA0_CORE_RD_GLBL_HBW_FORCE_MISS_SHIFT 4
+#define PDMA0_CORE_RD_GLBL_HBW_FORCE_MISS_MASK 0x10
+#define PDMA0_CORE_RD_GLBL_LBW_FORCE_MISS_SHIFT 5
+#define PDMA0_CORE_RD_GLBL_LBW_FORCE_MISS_MASK 0x20
+
+/* PDMA0_CORE_RD_HBW_MAX_OUTSTAND */
+#define PDMA0_CORE_RD_HBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define PDMA0_CORE_RD_HBW_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* PDMA0_CORE_RD_HBW_MAX_SIZE */
+#define PDMA0_CORE_RD_HBW_MAX_SIZE_DATA_SHIFT 0
+#define PDMA0_CORE_RD_HBW_MAX_SIZE_DATA_MASK 0xFFF
+#define PDMA0_CORE_RD_HBW_MAX_SIZE_MD_SHIFT 16
+#define PDMA0_CORE_RD_HBW_MAX_SIZE_MD_MASK 0xFFF0000
+
+/* PDMA0_CORE_RD_HBW_ARCACHE */
+#define PDMA0_CORE_RD_HBW_ARCACHE_VAL_SHIFT 0
+#define PDMA0_CORE_RD_HBW_ARCACHE_VAL_MASK 0xF
+
+/* PDMA0_CORE_RD_HBW_INFLIGHTS */
+#define PDMA0_CORE_RD_HBW_INFLIGHTS_VAL_SHIFT 0
+#define PDMA0_CORE_RD_HBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_RD_HBW_RATE_LIM_CFG */
+#define PDMA0_CORE_RD_HBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define PDMA0_CORE_RD_HBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define PDMA0_CORE_RD_HBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define PDMA0_CORE_RD_HBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define PDMA0_CORE_RD_HBW_RATE_LIM_CFG_EN_SHIFT 31
+#define PDMA0_CORE_RD_HBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* PDMA0_CORE_RD_LBW_MAX_OUTSTAND */
+#define PDMA0_CORE_RD_LBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define PDMA0_CORE_RD_LBW_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* PDMA0_CORE_RD_LBW_MAX_SIZE */
+#define PDMA0_CORE_RD_LBW_MAX_SIZE_DATA_SHIFT 0
+#define PDMA0_CORE_RD_LBW_MAX_SIZE_DATA_MASK 0xFFF
+#define PDMA0_CORE_RD_LBW_MAX_SIZE_MD_SHIFT 16
+#define PDMA0_CORE_RD_LBW_MAX_SIZE_MD_MASK 0xFFF0000
+
+/* PDMA0_CORE_RD_LBW_ARCACHE */
+#define PDMA0_CORE_RD_LBW_ARCACHE_VAL_SHIFT 0
+#define PDMA0_CORE_RD_LBW_ARCACHE_VAL_MASK 0xF
+
+/* PDMA0_CORE_RD_LBW_INFLIGHTS */
+#define PDMA0_CORE_RD_LBW_INFLIGHTS_VAL_SHIFT 0
+#define PDMA0_CORE_RD_LBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_RD_LBW_RATE_LIM_CFG */
+#define PDMA0_CORE_RD_LBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define PDMA0_CORE_RD_LBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define PDMA0_CORE_RD_LBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define PDMA0_CORE_RD_LBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define PDMA0_CORE_RD_LBW_RATE_LIM_CFG_EN_SHIFT 31
+#define PDMA0_CORE_RD_LBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* PDMA0_CORE_WR_HBW_MAX_OUTSTAND */
+#define PDMA0_CORE_WR_HBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define PDMA0_CORE_WR_HBW_MAX_OUTSTAND_VAL_MASK 0xFFFF
+
+/* PDMA0_CORE_WR_HBW_MAX_AWID */
+#define PDMA0_CORE_WR_HBW_MAX_AWID_VAL_SHIFT 0
+#define PDMA0_CORE_WR_HBW_MAX_AWID_VAL_MASK 0x3FFF
+
+/* PDMA0_CORE_WR_HBW_AWCACHE */
+#define PDMA0_CORE_WR_HBW_AWCACHE_VAL_SHIFT 0
+#define PDMA0_CORE_WR_HBW_AWCACHE_VAL_MASK 0xF
+
+/* PDMA0_CORE_WR_HBW_INFLIGHTS */
+#define PDMA0_CORE_WR_HBW_INFLIGHTS_VAL_SHIFT 0
+#define PDMA0_CORE_WR_HBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_WR_HBW_RATE_LIM_CFG */
+#define PDMA0_CORE_WR_HBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define PDMA0_CORE_WR_HBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define PDMA0_CORE_WR_HBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define PDMA0_CORE_WR_HBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define PDMA0_CORE_WR_HBW_RATE_LIM_CFG_EN_SHIFT 31
+#define PDMA0_CORE_WR_HBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* PDMA0_CORE_WR_LBW_MAX_OUTSTAND */
+#define PDMA0_CORE_WR_LBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define PDMA0_CORE_WR_LBW_MAX_OUTSTAND_VAL_MASK 0xFFFF
+
+/* PDMA0_CORE_WR_LBW_MAX_AWID */
+#define PDMA0_CORE_WR_LBW_MAX_AWID_VAL_SHIFT 0
+#define PDMA0_CORE_WR_LBW_MAX_AWID_VAL_MASK 0x7F
+
+/* PDMA0_CORE_WR_LBW_AWCACHE */
+#define PDMA0_CORE_WR_LBW_AWCACHE_VAL_SHIFT 0
+#define PDMA0_CORE_WR_LBW_AWCACHE_VAL_MASK 0xF
+
+/* PDMA0_CORE_WR_LBW_INFLIGHTS */
+#define PDMA0_CORE_WR_LBW_INFLIGHTS_VAL_SHIFT 0
+#define PDMA0_CORE_WR_LBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_WR_LBW_RATE_LIM_CFG */
+#define PDMA0_CORE_WR_LBW_RATE_LIM_CFG_TOUT_SHIFT 0
+#define PDMA0_CORE_WR_LBW_RATE_LIM_CFG_TOUT_MASK 0xFF
+#define PDMA0_CORE_WR_LBW_RATE_LIM_CFG_SAT_SHIFT 16
+#define PDMA0_CORE_WR_LBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
+#define PDMA0_CORE_WR_LBW_RATE_LIM_CFG_EN_SHIFT 31
+#define PDMA0_CORE_WR_LBW_RATE_LIM_CFG_EN_MASK 0x80000000
+
+/* PDMA0_CORE_WR_COMP_MAX_OUTSTAND */
+#define PDMA0_CORE_WR_COMP_MAX_OUTSTAND_VAL_SHIFT 0
+#define PDMA0_CORE_WR_COMP_MAX_OUTSTAND_VAL_MASK 0x1F
+
+/* PDMA0_CORE_WR_COMP_AWUSER */
+#define PDMA0_CORE_WR_COMP_AWUSER_VAL_SHIFT 0
+#define PDMA0_CORE_WR_COMP_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_ERR_CFG */
+#define PDMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT 0
+#define PDMA0_CORE_ERR_CFG_ERR_MSG_EN_MASK 0x1
+#define PDMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT 1
+#define PDMA0_CORE_ERR_CFG_STOP_ON_ERR_MASK 0x2
+
+/* PDMA0_CORE_ERR_CAUSE */
+#define PDMA0_CORE_ERR_CAUSE_HBW_RD_ERR_SHIFT 0
+#define PDMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK 0x1
+#define PDMA0_CORE_ERR_CAUSE_HBW_WR_ERR_SHIFT 1
+#define PDMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK 0x2
+#define PDMA0_CORE_ERR_CAUSE_LBW_MSG_WR_ERR_SHIFT 2
+#define PDMA0_CORE_ERR_CAUSE_LBW_MSG_WR_ERR_MASK 0x4
+#define PDMA0_CORE_ERR_CAUSE_DESC_OVF_SHIFT 3
+#define PDMA0_CORE_ERR_CAUSE_DESC_OVF_MASK 0x8
+#define PDMA0_CORE_ERR_CAUSE_LBW_RD_ERR_SHIFT 4
+#define PDMA0_CORE_ERR_CAUSE_LBW_RD_ERR_MASK 0x10
+#define PDMA0_CORE_ERR_CAUSE_LBW_WR_ERR_SHIFT 5
+#define PDMA0_CORE_ERR_CAUSE_LBW_WR_ERR_MASK 0x20
+#define PDMA0_CORE_ERR_CAUSE_TE_DESC_FIFO_OVFL_SHIFT 6
+#define PDMA0_CORE_ERR_CAUSE_TE_DESC_FIFO_OVFL_MASK 0x40
+#define PDMA0_CORE_ERR_CAUSE_LIN_DMA_COMMIT_CFG_ERR_SHIFT 7
+#define PDMA0_CORE_ERR_CAUSE_LIN_DMA_COMMIT_CFG_ERR_MASK 0x80
+
+/* PDMA0_CORE_ERRMSG_ADDR_LO */
+#define PDMA0_CORE_ERRMSG_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_CORE_ERRMSG_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_ERRMSG_ADDR_HI */
+#define PDMA0_CORE_ERRMSG_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_CORE_ERRMSG_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_ERRMSG_WDATA */
+#define PDMA0_CORE_ERRMSG_WDATA_VAL_SHIFT 0
+#define PDMA0_CORE_ERRMSG_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS0 */
+#define PDMA0_CORE_STS0_RD_REQ_CNT_SHIFT 0
+#define PDMA0_CORE_STS0_RD_REQ_CNT_MASK 0x7FFF
+#define PDMA0_CORE_STS0_WR_REQ_CNT_SHIFT 16
+#define PDMA0_CORE_STS0_WR_REQ_CNT_MASK 0x7FFF0000
+#define PDMA0_CORE_STS0_BUSY_SHIFT 31
+#define PDMA0_CORE_STS0_BUSY_MASK 0x80000000
+
+/* PDMA0_CORE_STS1 */
+#define PDMA0_CORE_STS1_IS_HALT_SHIFT 0
+#define PDMA0_CORE_STS1_IS_HALT_MASK 0x1
+
+/* PDMA0_CORE_STS_RD_CTX_SEL */
+#define PDMA0_CORE_STS_RD_CTX_SEL_VAL_SHIFT 0
+#define PDMA0_CORE_STS_RD_CTX_SEL_VAL_MASK 0x7
+#define PDMA0_CORE_STS_RD_CTX_SEL_STRIDE_SHIFT 8
+#define PDMA0_CORE_STS_RD_CTX_SEL_STRIDE_MASK 0x100
+
+/* PDMA0_CORE_STS_RD_CTX_SIZE */
+#define PDMA0_CORE_STS_RD_CTX_SIZE_VAL_SHIFT 0
+#define PDMA0_CORE_STS_RD_CTX_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS_RD_CTX_BASE_LO */
+#define PDMA0_CORE_STS_RD_CTX_BASE_LO_VAL_SHIFT 0
+#define PDMA0_CORE_STS_RD_CTX_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS_RD_CTX_BASE_HI */
+#define PDMA0_CORE_STS_RD_CTX_BASE_HI_VAL_SHIFT 0
+#define PDMA0_CORE_STS_RD_CTX_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS_RD_CTX_ID */
+#define PDMA0_CORE_STS_RD_CTX_ID_VAL_SHIFT 0
+#define PDMA0_CORE_STS_RD_CTX_ID_VAL_MASK 0xFFFF
+
+/* PDMA0_CORE_STS_RD_HB_AXI_ADDR_LO */
+#define PDMA0_CORE_STS_RD_HB_AXI_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_CORE_STS_RD_HB_AXI_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS_RD_HB_AXI_ADDR_HI */
+#define PDMA0_CORE_STS_RD_HB_AXI_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_CORE_STS_RD_HB_AXI_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS_RD_LB_AXI_ADDR */
+#define PDMA0_CORE_STS_RD_LB_AXI_ADDR_VAL_SHIFT 0
+#define PDMA0_CORE_STS_RD_LB_AXI_ADDR_VAL_MASK 0x3FFFFFF
+#define PDMA0_CORE_STS_RD_LB_AXI_ADDR_RDY_SHIFT 30
+#define PDMA0_CORE_STS_RD_LB_AXI_ADDR_RDY_MASK 0x40000000
+#define PDMA0_CORE_STS_RD_LB_AXI_ADDR_VLD_SHIFT 31
+#define PDMA0_CORE_STS_RD_LB_AXI_ADDR_VLD_MASK 0x80000000
+
+/* PDMA0_CORE_STS_WR_CTX_SEL */
+#define PDMA0_CORE_STS_WR_CTX_SEL_VAL_SHIFT 0
+#define PDMA0_CORE_STS_WR_CTX_SEL_VAL_MASK 0x7
+#define PDMA0_CORE_STS_WR_CTX_SEL_STRIDE_SHIFT 8
+#define PDMA0_CORE_STS_WR_CTX_SEL_STRIDE_MASK 0x100
+
+/* PDMA0_CORE_STS_WR_CTX_SIZE */
+#define PDMA0_CORE_STS_WR_CTX_SIZE_VAL_SHIFT 0
+#define PDMA0_CORE_STS_WR_CTX_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS_WR_CTX_BASE_LO */
+#define PDMA0_CORE_STS_WR_CTX_BASE_LO_VAL_SHIFT 0
+#define PDMA0_CORE_STS_WR_CTX_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS_WR_CTX_BASE_HI */
+#define PDMA0_CORE_STS_WR_CTX_BASE_HI_VAL_SHIFT 0
+#define PDMA0_CORE_STS_WR_CTX_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS_WR_CTX_ID */
+#define PDMA0_CORE_STS_WR_CTX_ID_VAL_SHIFT 0
+#define PDMA0_CORE_STS_WR_CTX_ID_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_STS_WR_HB_AXI_ADDR_LO */
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_VAL_MASK 0x3FFFF
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_RDY_SHIFT 30
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_RDY_MASK 0x40000000
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_VLD_SHIFT 31
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_LO_VLD_MASK 0x80000000
+
+/* PDMA0_CORE_STS_WR_HB_AXI_ADDR_HI */
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_VAL_MASK 0x3FFFF
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_RDY_SHIFT 30
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_RDY_MASK 0x40000000
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_VLD_SHIFT 31
+#define PDMA0_CORE_STS_WR_HB_AXI_ADDR_HI_VLD_MASK 0x80000000
+
+/* PDMA0_CORE_STS_WR_LB_AXI_ADDR */
+#define PDMA0_CORE_STS_WR_LB_AXI_ADDR_VAL_SHIFT 0
+#define PDMA0_CORE_STS_WR_LB_AXI_ADDR_VAL_MASK 0x3FFFFFF
+#define PDMA0_CORE_STS_WR_LB_AXI_ADDR_RDY_SHIFT 30
+#define PDMA0_CORE_STS_WR_LB_AXI_ADDR_RDY_MASK 0x40000000
+#define PDMA0_CORE_STS_WR_LB_AXI_ADDR_VLD_SHIFT 31
+#define PDMA0_CORE_STS_WR_LB_AXI_ADDR_VLD_MASK 0x80000000
+
+/* PDMA0_CORE_PWRLP_CFG */
+#define PDMA0_CORE_PWRLP_CFG_GLBL_EN_SHIFT 0
+#define PDMA0_CORE_PWRLP_CFG_GLBL_EN_MASK 0x1
+#define PDMA0_CORE_PWRLP_CFG_CLR_SHIFT 4
+#define PDMA0_CORE_PWRLP_CFG_CLR_MASK 0x10
+
+/* PDMA0_CORE_PWRLP_STS */
+#define PDMA0_CORE_PWRLP_STS_RLVL_SHIFT 0
+#define PDMA0_CORE_PWRLP_STS_RLVL_MASK 0x7F
+#define PDMA0_CORE_PWRLP_STS_WLVL_SHIFT 8
+#define PDMA0_CORE_PWRLP_STS_WLVL_MASK 0x7F00
+#define PDMA0_CORE_PWRLP_STS_RCNT_SHIFT 16
+#define PDMA0_CORE_PWRLP_STS_RCNT_MASK 0x7F0000
+#define PDMA0_CORE_PWRLP_STS_WCNT_SHIFT 23
+#define PDMA0_CORE_PWRLP_STS_WCNT_MASK 0x3F800000
+#define PDMA0_CORE_PWRLP_STS_RFULL_SHIFT 30
+#define PDMA0_CORE_PWRLP_STS_RFULL_MASK 0x40000000
+#define PDMA0_CORE_PWRLP_STS_WFULL_SHIFT 31
+#define PDMA0_CORE_PWRLP_STS_WFULL_MASK 0x80000000
+
+/* PDMA0_CORE_DBG_DESC_CNT */
+#define PDMA0_CORE_DBG_DESC_CNT_VAL_SHIFT 0
+#define PDMA0_CORE_DBG_DESC_CNT_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_DBG_STS */
+#define PDMA0_CORE_DBG_STS_RD_CTX_FULL_SHIFT 0
+#define PDMA0_CORE_DBG_STS_RD_CTX_FULL_MASK 0x1
+#define PDMA0_CORE_DBG_STS_WR_CTX_FULL_SHIFT 1
+#define PDMA0_CORE_DBG_STS_WR_CTX_FULL_MASK 0x2
+#define PDMA0_CORE_DBG_STS_WR_COMP_FULL_SHIFT 2
+#define PDMA0_CORE_DBG_STS_WR_COMP_FULL_MASK 0x4
+#define PDMA0_CORE_DBG_STS_RD_CTX_EMPTY_SHIFT 3
+#define PDMA0_CORE_DBG_STS_RD_CTX_EMPTY_MASK 0x8
+#define PDMA0_CORE_DBG_STS_WR_CTX_EMPTY_SHIFT 4
+#define PDMA0_CORE_DBG_STS_WR_CTX_EMPTY_MASK 0x10
+#define PDMA0_CORE_DBG_STS_WR_COMP_EMPTY_SHIFT 5
+#define PDMA0_CORE_DBG_STS_WR_COMP_EMPTY_MASK 0x20
+#define PDMA0_CORE_DBG_STS_TE_EMPTY_SHIFT 6
+#define PDMA0_CORE_DBG_STS_TE_EMPTY_MASK 0x40
+#define PDMA0_CORE_DBG_STS_TE_BUSY_SHIFT 7
+#define PDMA0_CORE_DBG_STS_TE_BUSY_MASK 0x80
+#define PDMA0_CORE_DBG_STS_GSKT_EMPTY_SHIFT 8
+#define PDMA0_CORE_DBG_STS_GSKT_EMPTY_MASK 0x100
+#define PDMA0_CORE_DBG_STS_GSKT_FULL_SHIFT 9
+#define PDMA0_CORE_DBG_STS_GSKT_FULL_MASK 0x200
+#define PDMA0_CORE_DBG_STS_RD_AGU_CS_SHIFT 10
+#define PDMA0_CORE_DBG_STS_RD_AGU_CS_MASK 0x400
+#define PDMA0_CORE_DBG_STS_WR_AGU_CS_SHIFT 11
+#define PDMA0_CORE_DBG_STS_WR_AGU_CS_MASK 0x800
+
+/* PDMA0_CORE_DBG_BUF_STS */
+#define PDMA0_CORE_DBG_BUF_STS_HBW_FULLNESS_SHIFT 0
+#define PDMA0_CORE_DBG_BUF_STS_HBW_FULLNESS_MASK 0xFFF
+#define PDMA0_CORE_DBG_BUF_STS_LBW_FULLNESS_SHIFT 16
+#define PDMA0_CORE_DBG_BUF_STS_LBW_FULLNESS_MASK 0xFFF0000
+
+/* PDMA0_CORE_DBG_RD_DESC_ID */
+#define PDMA0_CORE_DBG_RD_DESC_ID_VAL_SHIFT 0
+#define PDMA0_CORE_DBG_RD_DESC_ID_VAL_MASK 0xFFFF
+
+/* PDMA0_CORE_DBG_WR_DESC_ID */
+#define PDMA0_CORE_DBG_WR_DESC_ID_VAL_SHIFT 0
+#define PDMA0_CORE_DBG_WR_DESC_ID_VAL_MASK 0xFFFF
+
+/* PDMA0_CORE_APB_DMA_LBW_BASE */
+#define PDMA0_CORE_APB_DMA_LBW_BASE_VAL_SHIFT 0
+#define PDMA0_CORE_APB_DMA_LBW_BASE_VAL_MASK 0xFFFF
+
+/* PDMA0_CORE_APB_MSTR_IF_LBW_BASE */
+#define PDMA0_CORE_APB_MSTR_IF_LBW_BASE_VAL_SHIFT 0
+#define PDMA0_CORE_APB_MSTR_IF_LBW_BASE_VAL_MASK 0xFFFF
+
+/* PDMA0_CORE_E2E_CRED_ASYNC_CFG */
+#define PDMA0_CORE_E2E_CRED_ASYNC_CFG_Y_X_FORCE_SHIFT 0
+#define PDMA0_CORE_E2E_CRED_ASYNC_CFG_Y_X_FORCE_MASK 0x1FF
+#define PDMA0_CORE_E2E_CRED_ASYNC_CFG_FORCE_EN_SHIFT 9
+#define PDMA0_CORE_E2E_CRED_ASYNC_CFG_FORCE_EN_MASK 0x200
+
+/* PDMA0_CORE_DBG_APB_ENABLER */
+#define PDMA0_CORE_DBG_APB_ENABLER_DIS_SHIFT 0
+#define PDMA0_CORE_DBG_APB_ENABLER_DIS_MASK 0x1
+
+/* PDMA0_CORE_L2H_CMPR_LO */
+#define PDMA0_CORE_L2H_CMPR_LO_VAL_SHIFT 20
+#define PDMA0_CORE_L2H_CMPR_LO_VAL_MASK 0xFFF00000
+
+/* PDMA0_CORE_L2H_CMPR_HI */
+#define PDMA0_CORE_L2H_CMPR_HI_VAL_SHIFT 0
+#define PDMA0_CORE_L2H_CMPR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_L2H_MASK_LO */
+#define PDMA0_CORE_L2H_MASK_LO_VAL_SHIFT 20
+#define PDMA0_CORE_L2H_MASK_LO_VAL_MASK 0xFFF00000
+
+/* PDMA0_CORE_L2H_MASK_HI */
+#define PDMA0_CORE_L2H_MASK_HI_VAL_SHIFT 0
+#define PDMA0_CORE_L2H_MASK_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_IDLE_IND_MASK */
+#define PDMA0_CORE_IDLE_IND_MASK_DESC_SHIFT 0
+#define PDMA0_CORE_IDLE_IND_MASK_DESC_MASK 0x1
+#define PDMA0_CORE_IDLE_IND_MASK_COMP_SHIFT 1
+#define PDMA0_CORE_IDLE_IND_MASK_COMP_MASK 0x2
+#define PDMA0_CORE_IDLE_IND_MASK_INSTAGE_SHIFT 2
+#define PDMA0_CORE_IDLE_IND_MASK_INSTAGE_MASK 0x4
+#define PDMA0_CORE_IDLE_IND_MASK_CORE_SHIFT 3
+#define PDMA0_CORE_IDLE_IND_MASK_CORE_MASK 0x8
+#define PDMA0_CORE_IDLE_IND_MASK_DESC_CNT_STS_SHIFT 8
+#define PDMA0_CORE_IDLE_IND_MASK_DESC_CNT_STS_MASK 0x1F00
+#define PDMA0_CORE_IDLE_IND_MASK_COMP_CNT_STS_SHIFT 16
+#define PDMA0_CORE_IDLE_IND_MASK_COMP_CNT_STS_MASK 0x1F0000
+#define PDMA0_CORE_IDLE_IND_MASK_INSTAGE_EMPTY_SHIFT 24
+#define PDMA0_CORE_IDLE_IND_MASK_INSTAGE_EMPTY_MASK 0x1000000
+#define PDMA0_CORE_IDLE_IND_MASK_CORE_IDLE_STS_SHIFT 25
+#define PDMA0_CORE_IDLE_IND_MASK_CORE_IDLE_STS_MASK 0x2000000
+
+/* PDMA0_CORE_APB_ENABLER */
+#define PDMA0_CORE_APB_ENABLER_DIS_SHIFT 0
+#define PDMA0_CORE_APB_ENABLER_DIS_MASK 0x1
+
+#endif /* ASIC_REG_PDMA0_CORE_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h
new file mode 100644
index 000000000000..84079b5077e2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_CORE_REGS_H_
+#define ASIC_REG_PDMA0_CORE_REGS_H_
+
+/*
+ *****************************************
+ * PDMA0_CORE
+ * (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmPDMA0_CORE_CFG_0 0x4C8B000
+
+#define mmPDMA0_CORE_CFG_1 0x4C8B004
+
+#define mmPDMA0_CORE_PROT 0x4C8B008
+
+#define mmPDMA0_CORE_CKG 0x4C8B00C
+
+#define mmPDMA0_CORE_RD_GLBL 0x4C8B07C
+
+#define mmPDMA0_CORE_RD_HBW_MAX_OUTSTAND 0x4C8B080
+
+#define mmPDMA0_CORE_RD_HBW_MAX_SIZE 0x4C8B084
+
+#define mmPDMA0_CORE_RD_HBW_ARCACHE 0x4C8B088
+
+#define mmPDMA0_CORE_RD_HBW_INFLIGHTS 0x4C8B090
+
+#define mmPDMA0_CORE_RD_HBW_RATE_LIM_CFG 0x4C8B094
+
+#define mmPDMA0_CORE_RD_LBW_MAX_OUTSTAND 0x4C8B0C0
+
+#define mmPDMA0_CORE_RD_LBW_MAX_SIZE 0x4C8B0C4
+
+#define mmPDMA0_CORE_RD_LBW_ARCACHE 0x4C8B0C8
+
+#define mmPDMA0_CORE_RD_LBW_INFLIGHTS 0x4C8B0D0
+
+#define mmPDMA0_CORE_RD_LBW_RATE_LIM_CFG 0x4C8B0D4
+
+#define mmPDMA0_CORE_WR_HBW_MAX_OUTSTAND 0x4C8B100
+
+#define mmPDMA0_CORE_WR_HBW_MAX_AWID 0x4C8B104
+
+#define mmPDMA0_CORE_WR_HBW_AWCACHE 0x4C8B108
+
+#define mmPDMA0_CORE_WR_HBW_INFLIGHTS 0x4C8B10C
+
+#define mmPDMA0_CORE_WR_HBW_RATE_LIM_CFG 0x4C8B110
+
+#define mmPDMA0_CORE_WR_LBW_MAX_OUTSTAND 0x4C8B140
+
+#define mmPDMA0_CORE_WR_LBW_MAX_AWID 0x4C8B144
+
+#define mmPDMA0_CORE_WR_LBW_AWCACHE 0x4C8B148
+
+#define mmPDMA0_CORE_WR_LBW_INFLIGHTS 0x4C8B14C
+
+#define mmPDMA0_CORE_WR_LBW_RATE_LIM_CFG 0x4C8B150
+
+#define mmPDMA0_CORE_WR_COMP_MAX_OUTSTAND 0x4C8B180
+
+#define mmPDMA0_CORE_WR_COMP_AWUSER 0x4C8B184
+
+#define mmPDMA0_CORE_ERR_CFG 0x4C8B300
+
+#define mmPDMA0_CORE_ERR_CAUSE 0x4C8B304
+
+#define mmPDMA0_CORE_ERRMSG_ADDR_LO 0x4C8B308
+
+#define mmPDMA0_CORE_ERRMSG_ADDR_HI 0x4C8B30C
+
+#define mmPDMA0_CORE_ERRMSG_WDATA 0x4C8B310
+
+#define mmPDMA0_CORE_STS0 0x4C8B380
+
+#define mmPDMA0_CORE_STS1 0x4C8B384
+
+#define mmPDMA0_CORE_STS_RD_CTX_SEL 0x4C8B400
+
+#define mmPDMA0_CORE_STS_RD_CTX_SIZE 0x4C8B404
+
+#define mmPDMA0_CORE_STS_RD_CTX_BASE_LO 0x4C8B408
+
+#define mmPDMA0_CORE_STS_RD_CTX_BASE_HI 0x4C8B40C
+
+#define mmPDMA0_CORE_STS_RD_CTX_ID 0x4C8B410
+
+#define mmPDMA0_CORE_STS_RD_HB_AXI_ADDR_LO 0x4C8B414
+
+#define mmPDMA0_CORE_STS_RD_HB_AXI_ADDR_HI 0x4C8B418
+
+#define mmPDMA0_CORE_STS_RD_LB_AXI_ADDR 0x4C8B41C
+
+#define mmPDMA0_CORE_STS_WR_CTX_SEL 0x4C8B420
+
+#define mmPDMA0_CORE_STS_WR_CTX_SIZE 0x4C8B424
+
+#define mmPDMA0_CORE_STS_WR_CTX_BASE_LO 0x4C8B428
+
+#define mmPDMA0_CORE_STS_WR_CTX_BASE_HI 0x4C8B42C
+
+#define mmPDMA0_CORE_STS_WR_CTX_ID 0x4C8B430
+
+#define mmPDMA0_CORE_STS_WR_HB_AXI_ADDR_LO 0x4C8B434
+
+#define mmPDMA0_CORE_STS_WR_HB_AXI_ADDR_HI 0x4C8B438
+
+#define mmPDMA0_CORE_STS_WR_LB_AXI_ADDR 0x4C8B43C
+
+#define mmPDMA0_CORE_PWRLP_CFG 0x4C8B700
+
+#define mmPDMA0_CORE_PWRLP_STS 0x4C8B704
+
+#define mmPDMA0_CORE_DBG_DESC_CNT 0x4C8B710
+
+#define mmPDMA0_CORE_DBG_STS 0x4C8B714
+
+#define mmPDMA0_CORE_DBG_BUF_STS 0x4C8B718
+
+#define mmPDMA0_CORE_DBG_RD_DESC_ID 0x4C8B720
+
+#define mmPDMA0_CORE_DBG_WR_DESC_ID 0x4C8B724
+
+#define mmPDMA0_CORE_APB_DMA_LBW_BASE 0x4C8B728
+
+#define mmPDMA0_CORE_APB_MSTR_IF_LBW_BASE 0x4C8B72C
+
+#define mmPDMA0_CORE_E2E_CRED_ASYNC_CFG 0x4C8B730
+
+#define mmPDMA0_CORE_DBG_APB_ENABLER 0x4C8BE1C
+
+#define mmPDMA0_CORE_L2H_CMPR_LO 0x4C8BE20
+
+#define mmPDMA0_CORE_L2H_CMPR_HI 0x4C8BE24
+
+#define mmPDMA0_CORE_L2H_MASK_LO 0x4C8BE28
+
+#define mmPDMA0_CORE_L2H_MASK_HI 0x4C8BE2C
+
+#define mmPDMA0_CORE_IDLE_IND_MASK 0x4C8BE30
+
+#define mmPDMA0_CORE_APB_ENABLER 0x4C8BE34
+
+#endif /* ASIC_REG_PDMA0_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h
new file mode 100644
index 000000000000..15d257e3830e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_CORE_SPECIAL_MASKS_H_
+#define ASIC_REG_PDMA0_CORE_SPECIAL_MASKS_H_
+
+/*
+ *****************************************
+ * PDMA0_CORE_SPECIAL
+ * (Prototype: SPECIAL_REGS)
+ *****************************************
+ */
+
+/* PDMA0_CORE_SPECIAL_GLBL_PRIV */
+#define PDMA0_CORE_SPECIAL_GLBL_PRIV_VAL_SHIFT 0
+#define PDMA0_CORE_SPECIAL_GLBL_PRIV_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_SPECIAL_MEM_GW_DATA */
+#define PDMA0_CORE_SPECIAL_MEM_GW_DATA_VAL_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_GW_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_SPECIAL_MEM_GW_REQ */
+#define PDMA0_CORE_SPECIAL_MEM_GW_REQ_ADDR_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_GW_REQ_ADDR_MASK 0x3FFFFF
+#define PDMA0_CORE_SPECIAL_MEM_GW_REQ_MID_SHIFT 22
+#define PDMA0_CORE_SPECIAL_MEM_GW_REQ_MID_MASK 0x3FC00000
+#define PDMA0_CORE_SPECIAL_MEM_GW_REQ_WNR_SHIFT 30
+#define PDMA0_CORE_SPECIAL_MEM_GW_REQ_WNR_MASK 0x40000000
+#define PDMA0_CORE_SPECIAL_MEM_GW_REQ_VLD_SHIFT 31
+#define PDMA0_CORE_SPECIAL_MEM_GW_REQ_VLD_MASK 0x80000000
+
+/* PDMA0_CORE_SPECIAL_MEM_NUMOF */
+#define PDMA0_CORE_SPECIAL_MEM_NUMOF_VAL_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_NUMOF_VAL_MASK 0xFF
+
+/* PDMA0_CORE_SPECIAL_MEM_ECC_SEL */
+#define PDMA0_CORE_SPECIAL_MEM_ECC_SEL_VAL_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_ECC_SEL_VAL_MASK 0xFF
+
+/* PDMA0_CORE_SPECIAL_MEM_ECC_CTL */
+#define PDMA0_CORE_SPECIAL_MEM_ECC_CTL_SERR_INJ_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_ECC_CTL_SERR_INJ_MASK 0x1
+#define PDMA0_CORE_SPECIAL_MEM_ECC_CTL_DERR_INJ_SHIFT 1
+#define PDMA0_CORE_SPECIAL_MEM_ECC_CTL_DERR_INJ_MASK 0x2
+#define PDMA0_CORE_SPECIAL_MEM_ECC_CTL_SERR_CLR_SHIFT 2
+#define PDMA0_CORE_SPECIAL_MEM_ECC_CTL_SERR_CLR_MASK 0x4
+#define PDMA0_CORE_SPECIAL_MEM_ECC_CTL_DERR_CLR_SHIFT 3
+#define PDMA0_CORE_SPECIAL_MEM_ECC_CTL_DERR_CLR_MASK 0x8
+
+/* PDMA0_CORE_SPECIAL_MEM_ECC_ERR_MASK */
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_MASK_SERR_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_MASK_SERR_MASK 0x1
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_MASK_DERR_SHIFT 1
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_MASK_DERR_MASK 0x2
+
+/* PDMA0_CORE_SPECIAL_MEM_ECC_GLBL_ERR_MASK */
+#define PDMA0_CORE_SPECIAL_MEM_ECC_GLBL_ERR_MASK_SERR_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_ECC_GLBL_ERR_MASK_SERR_MASK 0x1
+#define PDMA0_CORE_SPECIAL_MEM_ECC_GLBL_ERR_MASK_DERR_SHIFT 1
+#define PDMA0_CORE_SPECIAL_MEM_ECC_GLBL_ERR_MASK_DERR_MASK 0x2
+
+/* PDMA0_CORE_SPECIAL_MEM_ECC_ERR_STS */
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_STS_SYND_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_STS_SYND_MASK 0xFFFF
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_STS_SERR_SHIFT 16
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_STS_SERR_MASK 0x10000
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_STS_DERR_SHIFT 17
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_STS_DERR_MASK 0x20000
+
+/* PDMA0_CORE_SPECIAL_MEM_ECC_ERR_ADDR */
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_ADDR_VAL_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_ECC_ERR_ADDR_VAL_MASK 0xFFFF
+
+/* PDMA0_CORE_SPECIAL_MEM_RM */
+#define PDMA0_CORE_SPECIAL_MEM_RM_VAL_SHIFT 0
+#define PDMA0_CORE_SPECIAL_MEM_RM_VAL_MASK 0x3FFFFFFF
+
+/* PDMA0_CORE_SPECIAL_GLBL_ERR_MASK */
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_PRIV_RD_SHIFT 0
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_PRIV_RD_MASK 0x1
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_SEC_RD_SHIFT 1
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_SEC_RD_MASK 0x2
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_UNMAPPED_RD_SHIFT 2
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_UNMAPPED_RD_MASK 0x4
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_PRIV_WR_SHIFT 3
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_PRIV_WR_MASK 0x8
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_SEC_WR_SHIFT 4
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_SEC_WR_MASK 0x10
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_UNMAPPED_WR_SHIFT 5
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_APB_UNMAPPED_WR_MASK 0x20
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_EXT_SEC_WR_SHIFT 16
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_EXT_SEC_WR_MASK 0x10000
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_EXT_UNMAPPED_WR_SHIFT 17
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_MASK_EXT_UNMAPPED_WR_MASK 0x20000
+
+/* PDMA0_CORE_SPECIAL_GLBL_ERR_ADDR */
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_ADDR_VAL_SHIFT 0
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE */
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_RD_SHIFT 0
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_RD_MASK 0x1
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_SEC_RD_SHIFT 1
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_SEC_RD_MASK 0x2
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_RD_SHIFT 2
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_RD_MASK 0x4
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_WR_SHIFT 3
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_WR_MASK 0x8
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_SEC_WR_SHIFT 4
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_SEC_WR_MASK 0x10
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_WR_SHIFT 5
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_WR_MASK 0x20
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_EXT_SEC_WR_SHIFT 16
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_EXT_SEC_WR_MASK 0x10000
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_EXT_UNMAPPED_WR_SHIFT 17
+#define PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_EXT_UNMAPPED_WR_MASK 0x20000
+
+/* PDMA0_CORE_SPECIAL_GLBL_SPARE */
+#define PDMA0_CORE_SPECIAL_GLBL_SPARE_R_SHIFT 0
+#define PDMA0_CORE_SPECIAL_GLBL_SPARE_R_MASK 0xFFFFFFFF
+
+/* PDMA0_CORE_SPECIAL_GLBL_SEC */
+#define PDMA0_CORE_SPECIAL_GLBL_SEC_VAL_SHIFT 0
+#define PDMA0_CORE_SPECIAL_GLBL_SEC_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_PDMA0_CORE_SPECIAL_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h
new file mode 100644
index 000000000000..9b1cb609d134
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_QM_ARC_AUX_REGS_H_
+#define ASIC_REG_PDMA0_QM_ARC_AUX_REGS_H_
+
+/*
+ *****************************************
+ * PDMA0_QM_ARC_AUX
+ * (Prototype: QMAN_ARC_AUX)
+ *****************************************
+ */
+
+#define mmPDMA0_QM_ARC_AUX_RUN_HALT_REQ 0x4C88100
+
+#define mmPDMA0_QM_ARC_AUX_RUN_HALT_ACK 0x4C88104
+
+#define mmPDMA0_QM_ARC_AUX_RST_VEC_ADDR 0x4C88108
+
+#define mmPDMA0_QM_ARC_AUX_DBG_MODE 0x4C8810C
+
+#define mmPDMA0_QM_ARC_AUX_CLUSTER_NUM 0x4C88110
+
+#define mmPDMA0_QM_ARC_AUX_ARC_NUM 0x4C88114
+
+#define mmPDMA0_QM_ARC_AUX_WAKE_UP_EVENT 0x4C88118
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_SYS_ADDR_BASE 0x4C8811C
+
+#define mmPDMA0_QM_ARC_AUX_CTI_AP_STS 0x4C88120
+
+#define mmPDMA0_QM_ARC_AUX_CTI_CFG_MUX_SEL 0x4C88124
+
+#define mmPDMA0_QM_ARC_AUX_ARC_RST 0x4C88128
+
+#define mmPDMA0_QM_ARC_AUX_ARC_RST_REQ 0x4C8812C
+
+#define mmPDMA0_QM_ARC_AUX_SRAM_LSB_ADDR 0x4C88130
+
+#define mmPDMA0_QM_ARC_AUX_SRAM_MSB_ADDR 0x4C88134
+
+#define mmPDMA0_QM_ARC_AUX_PCIE_LSB_ADDR 0x4C88138
+
+#define mmPDMA0_QM_ARC_AUX_PCIE_MSB_ADDR 0x4C8813C
+
+#define mmPDMA0_QM_ARC_AUX_CFG_LSB_ADDR 0x4C88140
+
+#define mmPDMA0_QM_ARC_AUX_CFG_MSB_ADDR 0x4C88144
+
+#define mmPDMA0_QM_ARC_AUX_HBM0_LSB_ADDR 0x4C88150
+
+#define mmPDMA0_QM_ARC_AUX_HBM0_MSB_ADDR 0x4C88154
+
+#define mmPDMA0_QM_ARC_AUX_HBM1_LSB_ADDR 0x4C88158
+
+#define mmPDMA0_QM_ARC_AUX_HBM1_MSB_ADDR 0x4C8815C
+
+#define mmPDMA0_QM_ARC_AUX_HBM2_LSB_ADDR 0x4C88160
+
+#define mmPDMA0_QM_ARC_AUX_HBM2_MSB_ADDR 0x4C88164
+
+#define mmPDMA0_QM_ARC_AUX_HBM3_LSB_ADDR 0x4C88168
+
+#define mmPDMA0_QM_ARC_AUX_HBM3_MSB_ADDR 0x4C8816C
+
+#define mmPDMA0_QM_ARC_AUX_HBM0_OFFSET 0x4C88170
+
+#define mmPDMA0_QM_ARC_AUX_HBM1_OFFSET 0x4C88174
+
+#define mmPDMA0_QM_ARC_AUX_HBM2_OFFSET 0x4C88178
+
+#define mmPDMA0_QM_ARC_AUX_HBM3_OFFSET 0x4C8817C
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_0 0x4C88180
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_1 0x4C88184
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_2 0x4C88188
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_3 0x4C8818C
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_4 0x4C88190
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_5 0x4C88194
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_6 0x4C88198
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_0 0x4C8819C
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_1 0x4C881A0
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_2 0x4C881A4
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_3 0x4C881A8
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_4 0x4C881AC
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_5 0x4C881B0
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_6 0x4C881B4
+
+#define mmPDMA0_QM_ARC_AUX_ARC_CBU_AWCACHE_OVR 0x4C881B8
+
+#define mmPDMA0_QM_ARC_AUX_ARC_LBU_AWCACHE_OVR 0x4C881BC
+
+#define mmPDMA0_QM_ARC_AUX_CONTEXT_ID_0 0x4C881C0
+
+#define mmPDMA0_QM_ARC_AUX_CONTEXT_ID_1 0x4C881C4
+
+#define mmPDMA0_QM_ARC_AUX_CONTEXT_ID_2 0x4C881C8
+
+#define mmPDMA0_QM_ARC_AUX_CONTEXT_ID_3 0x4C881CC
+
+#define mmPDMA0_QM_ARC_AUX_CONTEXT_ID_4 0x4C881D0
+
+#define mmPDMA0_QM_ARC_AUX_CONTEXT_ID_5 0x4C881D4
+
+#define mmPDMA0_QM_ARC_AUX_CONTEXT_ID_6 0x4C881D8
+
+#define mmPDMA0_QM_ARC_AUX_CONTEXT_ID_7 0x4C881DC
+
+#define mmPDMA0_QM_ARC_AUX_CID_OFFSET_0 0x4C881E0
+
+#define mmPDMA0_QM_ARC_AUX_CID_OFFSET_1 0x4C881E4
+
+#define mmPDMA0_QM_ARC_AUX_CID_OFFSET_2 0x4C881E8
+
+#define mmPDMA0_QM_ARC_AUX_CID_OFFSET_3 0x4C881EC
+
+#define mmPDMA0_QM_ARC_AUX_CID_OFFSET_4 0x4C881F0
+
+#define mmPDMA0_QM_ARC_AUX_CID_OFFSET_5 0x4C881F4
+
+#define mmPDMA0_QM_ARC_AUX_CID_OFFSET_6 0x4C881F8
+
+#define mmPDMA0_QM_ARC_AUX_CID_OFFSET_7 0x4C881FC
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_0 0x4C88200
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_1 0x4C88204
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_2 0x4C88208
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_3 0x4C8820C
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_4 0x4C88210
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_5 0x4C88214
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_6 0x4C88218
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_7 0x4C8821C
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_8 0x4C88220
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_9 0x4C88224
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_10 0x4C88228
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_11 0x4C8822C
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_12 0x4C88230
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_13 0x4C88234
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_14 0x4C88238
+
+#define mmPDMA0_QM_ARC_AUX_SW_INTR_15 0x4C8823C
+
+#define mmPDMA0_QM_ARC_AUX_IRQ_INTR_MASK_0 0x4C88280
+
+#define mmPDMA0_QM_ARC_AUX_IRQ_INTR_MASK_1 0x4C88284
+
+#define mmPDMA0_QM_ARC_AUX_ARC_SEI_INTR_STS 0x4C88290
+
+#define mmPDMA0_QM_ARC_AUX_ARC_SEI_INTR_CLR 0x4C88294
+
+#define mmPDMA0_QM_ARC_AUX_ARC_SEI_INTR_MASK 0x4C88298
+
+#define mmPDMA0_QM_ARC_AUX_ARC_EXCPTN_CAUSE 0x4C8829C
+
+#define mmPDMA0_QM_ARC_AUX_SEI_INTR_HALT_EN 0x4C882A0
+
+#define mmPDMA0_QM_ARC_AUX_ARC_SEI_INTR_HALT_MASK 0x4C882A4
+
+#define mmPDMA0_QM_ARC_AUX_QMAN_SEI_INTR_HALT_MASK 0x4C882A8
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REI_INTR_STS 0x4C882B0
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REI_INTR_CLR 0x4C882B4
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REI_INTR_MASK 0x4C882B8
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_ECC_ERR_ADDR 0x4C882BC
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_ECC_SYNDROME 0x4C882C0
+
+#define mmPDMA0_QM_ARC_AUX_I_CACHE_ECC_ERR_ADDR 0x4C882C4
+
+#define mmPDMA0_QM_ARC_AUX_I_CACHE_ECC_SYNDROME 0x4C882C8
+
+#define mmPDMA0_QM_ARC_AUX_D_CACHE_ECC_ERR_ADDR 0x4C882CC
+
+#define mmPDMA0_QM_ARC_AUX_D_CACHE_ECC_SYNDROME 0x4C882D0
+
+#define mmPDMA0_QM_ARC_AUX_LBW_TRMINATE_AWADDR_ERR 0x4C882E0
+
+#define mmPDMA0_QM_ARC_AUX_LBW_TRMINATE_ARADDR_ERR 0x4C882E4
+
+#define mmPDMA0_QM_ARC_AUX_CFG_LBW_TERMINATE_BRESP 0x4C882E8
+
+#define mmPDMA0_QM_ARC_AUX_CFG_LBW_TERMINATE_RRESP 0x4C882EC
+
+#define mmPDMA0_QM_ARC_AUX_CFG_LBW_TERMINATE_AXLEN 0x4C882F0
+
+#define mmPDMA0_QM_ARC_AUX_CFG_LBW_TERMINATE_AXSIZE 0x4C882F4
+
+#define mmPDMA0_QM_ARC_AUX_SCRATCHPAD_0 0x4C88300
+
+#define mmPDMA0_QM_ARC_AUX_SCRATCHPAD_1 0x4C88304
+
+#define mmPDMA0_QM_ARC_AUX_SCRATCHPAD_2 0x4C88308
+
+#define mmPDMA0_QM_ARC_AUX_SCRATCHPAD_3 0x4C8830C
+
+#define mmPDMA0_QM_ARC_AUX_SCRATCHPAD_4 0x4C88310
+
+#define mmPDMA0_QM_ARC_AUX_SCRATCHPAD_5 0x4C88314
+
+#define mmPDMA0_QM_ARC_AUX_SCRATCHPAD_6 0x4C88318
+
+#define mmPDMA0_QM_ARC_AUX_SCRATCHPAD_7 0x4C8831C
+
+#define mmPDMA0_QM_ARC_AUX_TOTAL_CBU_WR_CNT 0x4C88320
+
+#define mmPDMA0_QM_ARC_AUX_INFLIGHT_CBU_WR_CNT 0x4C88324
+
+#define mmPDMA0_QM_ARC_AUX_TOTAL_CBU_RD_CNT 0x4C88328
+
+#define mmPDMA0_QM_ARC_AUX_INFLIGHT_CBU_RD_CNT 0x4C8832C
+
+#define mmPDMA0_QM_ARC_AUX_TOTAL_LBU_WR_CNT 0x4C88330
+
+#define mmPDMA0_QM_ARC_AUX_INFLIGHT_LBU_WR_CNT 0x4C88334
+
+#define mmPDMA0_QM_ARC_AUX_TOTAL_LBU_RD_CNT 0x4C88338
+
+#define mmPDMA0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT 0x4C8833C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_ARUSER_OVR 0x4C88350
+
+#define mmPDMA0_QM_ARC_AUX_CBU_ARUSER_OVR_EN 0x4C88354
+
+#define mmPDMA0_QM_ARC_AUX_CBU_AWUSER_OVR 0x4C88358
+
+#define mmPDMA0_QM_ARC_AUX_CBU_AWUSER_OVR_EN 0x4C8835C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_ARUSER_MSB_OVR 0x4C88360
+
+#define mmPDMA0_QM_ARC_AUX_CBU_ARUSER_MSB_OVR_EN 0x4C88364
+
+#define mmPDMA0_QM_ARC_AUX_CBU_AWUSER_MSB_OVR 0x4C88368
+
+#define mmPDMA0_QM_ARC_AUX_CBU_AWUSER_MSB_OVR_EN 0x4C8836C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_AXCACHE_OVR 0x4C88370
+
+#define mmPDMA0_QM_ARC_AUX_CBU_LOCK_OVR 0x4C88374
+
+#define mmPDMA0_QM_ARC_AUX_CBU_PROT_OVR 0x4C88378
+
+#define mmPDMA0_QM_ARC_AUX_CBU_MAX_OUTSTANDING 0x4C8837C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN 0x4C88380
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORCE_RSP_OK 0x4C88384
+
+#define mmPDMA0_QM_ARC_AUX_CBU_NO_WR_INFLIGHT 0x4C8838C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_SEI_INTR_ID 0x4C88390
+
+#define mmPDMA0_QM_ARC_AUX_LBU_ARUSER_OVR 0x4C88400
+
+#define mmPDMA0_QM_ARC_AUX_LBU_ARUSER_OVR_EN 0x4C88404
+
+#define mmPDMA0_QM_ARC_AUX_LBU_AWUSER_OVR 0x4C88408
+
+#define mmPDMA0_QM_ARC_AUX_LBU_AWUSER_OVR_EN 0x4C8840C
+
+#define mmPDMA0_QM_ARC_AUX_LBU_AXCACHE_OVR 0x4C88420
+
+#define mmPDMA0_QM_ARC_AUX_LBU_LOCK_OVR 0x4C88424
+
+#define mmPDMA0_QM_ARC_AUX_LBU_PROT_OVR 0x4C88428
+
+#define mmPDMA0_QM_ARC_AUX_LBU_MAX_OUTSTANDING 0x4C8842C
+
+#define mmPDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN 0x4C88430
+
+#define mmPDMA0_QM_ARC_AUX_LBU_FORCE_RSP_OK 0x4C88434
+
+#define mmPDMA0_QM_ARC_AUX_LBU_NO_WR_INFLIGHT 0x4C8843C
+
+#define mmPDMA0_QM_ARC_AUX_LBU_SEI_INTR_ID 0x4C88440
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0 0x4C88500
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_1 0x4C88504
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_2 0x4C88508
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_3 0x4C8850C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_4 0x4C88510
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_5 0x4C88514
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_6 0x4C88518
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_7 0x4C8851C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_0 0x4C88520
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_1 0x4C88524
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_2 0x4C88528
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_3 0x4C8852C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_4 0x4C88530
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_5 0x4C88534
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_6 0x4C88538
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_SIZE_7 0x4C8853C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_0 0x4C88540
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_1 0x4C88544
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_2 0x4C88548
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_3 0x4C8854C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_4 0x4C88550
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_5 0x4C88554
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_6 0x4C88558
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PI_7 0x4C8855C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_0 0x4C88560
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_1 0x4C88564
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_2 0x4C88568
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_3 0x4C8856C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_4 0x4C88570
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_5 0x4C88574
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_6 0x4C88578
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_CI_7 0x4C8857C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_0 0x4C88580
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_1 0x4C88584
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_2 0x4C88588
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_3 0x4C8858C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_4 0x4C88590
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_5 0x4C88594
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_6 0x4C88598
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_7 0x4C8859C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_0 0x4C885A0
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_1 0x4C885A4
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_2 0x4C885A8
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_3 0x4C885AC
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_4 0x4C885B0
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_5 0x4C885B4
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_6 0x4C885B8
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_7 0x4C885BC
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_0 0x4C885C0
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_1 0x4C885C4
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_2 0x4C885C8
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_3 0x4C885CC
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_4 0x4C885D0
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_5 0x4C885D4
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_6 0x4C885D8
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_7 0x4C885DC
+
+#define mmPDMA0_QM_ARC_AUX_GENERAL_Q_VLD_ENTRY_MASK 0x4C885E0
+
+#define mmPDMA0_QM_ARC_AUX_NIC_Q_VLD_ENTRY_MASK 0x4C885E4
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_DROP_EN 0x4C88620
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_WARN_MSG 0x4C88624
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG 0x4C88628
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWPROT 0x4C88630
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWUSER 0x4C88634
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWBURST 0x4C88638
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWLOCK 0x4C8863C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_GEN_AXI_AWCACHE 0x4C88640
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_WRR_ARB_WEIGHT 0x4C88644
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG 0x4C88648
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT 0x4C8864C
+
+#define mmPDMA0_QM_ARC_AUX_QMAN_CQ_IFIFO_SHADOW_CI 0x4C88650
+
+#define mmPDMA0_QM_ARC_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI 0x4C88654
+
+#define mmPDMA0_QM_ARC_AUX_QMAN_CQ_SHADOW_CI 0x4C88658
+
+#define mmPDMA0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI 0x4C8865C
+
+#define mmPDMA0_QM_ARC_AUX_AUX2APB_PROT 0x4C88700
+
+#define mmPDMA0_QM_ARC_AUX_LBW_FORK_WIN_EN 0x4C88704
+
+#define mmPDMA0_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR0 0x4C88708
+
+#define mmPDMA0_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK0 0x4C8870C
+
+#define mmPDMA0_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR1 0x4C88710
+
+#define mmPDMA0_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK1 0x4C88714
+
+#define mmPDMA0_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR0 0x4C88718
+
+#define mmPDMA0_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK0 0x4C8871C
+
+#define mmPDMA0_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR1 0x4C88720
+
+#define mmPDMA0_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK1 0x4C88724
+
+#define mmPDMA0_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR0 0x4C88728
+
+#define mmPDMA0_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR1 0x4C8872C
+
+#define mmPDMA0_QM_ARC_AUX_ARC_ACC_ENGS_LBW_FORK_MASK 0x4C88730
+
+#define mmPDMA0_QM_ARC_AUX_ARC_DUP_ENG_LBW_FORK_ADDR 0x4C88734
+
+#define mmPDMA0_QM_ARC_AUX_ARC_ACP_ENG_LBW_FORK_ADDR 0x4C88738
+
+#define mmPDMA0_QM_ARC_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR 0x4C8873C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_WIN_EN 0x4C88740
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_LSB 0x4C88750
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_MSB 0x4C88754
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_LSB 0x4C88758
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_MSB 0x4C8875C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_LSB 0x4C88760
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_MSB 0x4C88764
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_LSB 0x4C88768
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_MSB 0x4C8876C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_LSB 0x4C88770
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_MSB 0x4C88774
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_LSB 0x4C88778
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_MSB 0x4C8877C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_LSB 0x4C88780
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_MSB 0x4C88784
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_LSB 0x4C88788
+
+#define mmPDMA0_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_MSB 0x4C8878C
+
+#define mmPDMA0_QM_ARC_AUX_CBU_TRMINATE_ARADDR_LSB 0x4C88790
+
+#define mmPDMA0_QM_ARC_AUX_CBU_TRMINATE_ARADDR_MSB 0x4C88794
+
+#define mmPDMA0_QM_ARC_AUX_CFG_CBU_TERMINATE_BRESP 0x4C88798
+
+#define mmPDMA0_QM_ARC_AUX_CFG_CBU_TERMINATE_RRESP 0x4C8879C
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_0 0x4C88800
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_1 0x4C88804
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_2 0x4C88808
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_3 0x4C8880C
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_4 0x4C88810
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_5 0x4C88814
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_6 0x4C88818
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_7 0x4C8881C
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_8 0x4C88820
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_9 0x4C88824
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_10 0x4C88828
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_11 0x4C8882C
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_12 0x4C88830
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_13 0x4C88834
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_14 0x4C88838
+
+#define mmPDMA0_QM_ARC_AUX_ARC_REGION_CFG_15 0x4C8883C
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_TRMINATE_AWADDR_ERR 0x4C88840
+
+#define mmPDMA0_QM_ARC_AUX_DCCM_TRMINATE_ARADDR_ERR 0x4C88844
+
+#define mmPDMA0_QM_ARC_AUX_CFG_DCCM_TERMINATE_BRESP 0x4C88848
+
+#define mmPDMA0_QM_ARC_AUX_CFG_DCCM_TERMINATE_RRESP 0x4C8884C
+
+#define mmPDMA0_QM_ARC_AUX_CFG_DCCM_TERMINATE_EN 0x4C88850
+
+#define mmPDMA0_QM_ARC_AUX_CFG_DCCM_SECURE_REGION 0x4C88854
+
+#define mmPDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT 0x4C88900
+
+#define mmPDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_CTL 0x4C88904
+
+#define mmPDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR_MSK 0x4C88908
+
+#define mmPDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR 0x4C8890C
+
+#define mmPDMA0_QM_ARC_AUX_ARC_ACC_ENGS_BUSER 0x4C88910
+
+#define mmPDMA0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN 0x4C88920
+
+#endif /* ASIC_REG_PDMA0_QM_ARC_AUX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h
new file mode 100644
index 000000000000..d2e0756ec5f2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_QM_AXUSER_NONSECURED_REGS_H_
+#define ASIC_REG_PDMA0_QM_AXUSER_NONSECURED_REGS_H_
+
+/*
+ *****************************************
+ * PDMA0_QM_AXUSER_NONSECURED
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_ASID 0x4C8AB80
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP 0x4C8AB84
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_STRONG_ORDER 0x4C8AB88
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_NO_SNOOP 0x4C8AB8C
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_WR_REDUCTION 0x4C8AB90
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_RD_ATOMIC 0x4C8AB94
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_QOS 0x4C8AB98
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_RSVD 0x4C8AB9C
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_EMEM_CPAGE 0x4C8ABA0
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_CORE 0x4C8ABA4
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_E2E_COORD 0x4C8ABA8
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_WR_OVRD_LO 0x4C8ABB0
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_WR_OVRD_HI 0x4C8ABB4
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_RD_OVRD_LO 0x4C8ABB8
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_HB_RD_OVRD_HI 0x4C8ABBC
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_LB_COORD 0x4C8ABC0
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_LB_LOCK 0x4C8ABC4
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_LB_RSVD 0x4C8ABC8
+
+#define mmPDMA0_QM_AXUSER_NONSECURED_LB_OVRD 0x4C8ABCC
+
+#endif /* ASIC_REG_PDMA0_QM_AXUSER_NONSECURED_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h
new file mode 100644
index 000000000000..8bf0516b83f7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_QM_AXUSER_SECURED_REGS_H_
+#define ASIC_REG_PDMA0_QM_AXUSER_SECURED_REGS_H_
+
+/*
+ *****************************************
+ * PDMA0_QM_AXUSER_SECURED
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_ASID 0x4C8AB00
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_MMU_BP 0x4C8AB04
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_STRONG_ORDER 0x4C8AB08
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_NO_SNOOP 0x4C8AB0C
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_WR_REDUCTION 0x4C8AB10
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_RD_ATOMIC 0x4C8AB14
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_QOS 0x4C8AB18
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_RSVD 0x4C8AB1C
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_EMEM_CPAGE 0x4C8AB20
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_CORE 0x4C8AB24
+
+#define mmPDMA0_QM_AXUSER_SECURED_E2E_COORD 0x4C8AB28
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_WR_OVRD_LO 0x4C8AB30
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_WR_OVRD_HI 0x4C8AB34
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_RD_OVRD_LO 0x4C8AB38
+
+#define mmPDMA0_QM_AXUSER_SECURED_HB_RD_OVRD_HI 0x4C8AB3C
+
+#define mmPDMA0_QM_AXUSER_SECURED_LB_COORD 0x4C8AB40
+
+#define mmPDMA0_QM_AXUSER_SECURED_LB_LOCK 0x4C8AB44
+
+#define mmPDMA0_QM_AXUSER_SECURED_LB_RSVD 0x4C8AB48
+
+#define mmPDMA0_QM_AXUSER_SECURED_LB_OVRD 0x4C8AB4C
+
+#endif /* ASIC_REG_PDMA0_QM_AXUSER_SECURED_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h
new file mode 100644
index 000000000000..96c0ce176e73
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_QM_CGM_REGS_H_
+#define ASIC_REG_PDMA0_QM_CGM_REGS_H_
+
+/*
+ *****************************************
+ * PDMA0_QM_CGM
+ * (Prototype: QMAN_CGM)
+ *****************************************
+ */
+
+#define mmPDMA0_QM_CGM_CFG 0x4C8AD80
+
+#define mmPDMA0_QM_CGM_STS 0x4C8AD84
+
+#define mmPDMA0_QM_CGM_CFG1 0x4C8AD88
+
+#endif /* ASIC_REG_PDMA0_QM_CGM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h
new file mode 100644
index 000000000000..b79cae8f5571
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h
@@ -0,0 +1,1165 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_QM_MASKS_H_
+#define ASIC_REG_PDMA0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * PDMA0_QM
+ * (Prototype: QMAN)
+ *****************************************
+ */
+
+/* PDMA0_QM_GLBL_CFG0 */
+#define PDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define PDMA0_QM_GLBL_CFG0_PQF_EN_MASK 0xF
+#define PDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT 4
+#define PDMA0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0
+#define PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT 9
+#define PDMA0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00
+#define PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT 14
+#define PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_MASK 0x4000
+
+/* PDMA0_QM_GLBL_CFG1 */
+#define PDMA0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define PDMA0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF
+#define PDMA0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4
+#define PDMA0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0
+#define PDMA0_QM_GLBL_CFG1_CP_STOP_SHIFT 9
+#define PDMA0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00
+#define PDMA0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16
+#define PDMA0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000
+#define PDMA0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20
+#define PDMA0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000
+#define PDMA0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25
+#define PDMA0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000
+
+/* PDMA0_QM_GLBL_CFG2 */
+#define PDMA0_QM_GLBL_CFG2_ARC_CQF_STOP_SHIFT 0
+#define PDMA0_QM_GLBL_CFG2_ARC_CQF_STOP_MASK 0x1
+#define PDMA0_QM_GLBL_CFG2_ARC_CQF_FLUSH_SHIFT 1
+#define PDMA0_QM_GLBL_CFG2_ARC_CQF_FLUSH_MASK 0x2
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_AWUSER_OVRD_SHIFT 4
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_AWUSER_OVRD_MASK 0x10
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_ARUSER_OVRD_SHIFT 5
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_ARUSER_OVRD_MASK 0x20
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_AWUSER_OVRD_SHIFT 6
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_AWUSER_OVRD_MASK 0x40
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_ARUSER_OVRD_SHIFT 7
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_ARUSER_OVRD_MASK 0x80
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_AWPROT_OVRD_SHIFT 8
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_AWPROT_OVRD_MASK 0x100
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_ARPROT_OVRD_SHIFT 9
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_ARPROT_OVRD_MASK 0x200
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_AWPROT_OVRD_SHIFT 10
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_AWPROT_OVRD_MASK 0x400
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_ARPROT_OVRD_SHIFT 11
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_ARPROT_OVRD_MASK 0x800
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_AWCACHE_OVRD_SHIFT 12
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_AWCACHE_OVRD_MASK 0x1000
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_ARCACHE_OVRD_SHIFT 13
+#define PDMA0_QM_GLBL_CFG2_ARC_HBW_ARCACHE_OVRD_MASK 0x2000
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_AWCACHE_OVRD_SHIFT 14
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_AWCACHE_OVRD_MASK 0x4000
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_ARCACHE_OVRD_SHIFT 15
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_ARCACHE_OVRD_MASK 0x8000
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_BUSER_OVRD_SHIFT 16
+#define PDMA0_QM_GLBL_CFG2_ARC_LBW_BUSER_OVRD_MASK 0x10000
+
+/* PDMA0_QM_GLBL_ERR_CFG */
+#define PDMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF
+#define PDMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define PDMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0
+#define PDMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9
+#define PDMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00
+#define PDMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16
+#define PDMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000
+#define PDMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20
+#define PDMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000
+#define PDMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25
+#define PDMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000
+#define PDMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31
+#define PDMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000
+
+/* PDMA0_QM_GLBL_ERR_CFG1 */
+#define PDMA0_QM_GLBL_ERR_CFG1_CQF_ERR_MSG_EN_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_CFG1_CQF_ERR_MSG_EN_MASK 0x1
+#define PDMA0_QM_GLBL_ERR_CFG1_CQF_STOP_ON_ERR_SHIFT 1
+#define PDMA0_QM_GLBL_ERR_CFG1_CQF_STOP_ON_ERR_MASK 0x2
+#define PDMA0_QM_GLBL_ERR_CFG1_ARC_STOP_ON_ERR_SHIFT 2
+#define PDMA0_QM_GLBL_ERR_CFG1_ARC_STOP_ON_ERR_MASK 0x4
+
+/* PDMA0_QM_GLBL_ERR_ARC_HALT_EN */
+#define PDMA0_QM_GLBL_ERR_ARC_HALT_EN_ERR_IND_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_ARC_HALT_EN_ERR_IND_MASK 0xFFFFFF
+
+/* PDMA0_QM_GLBL_AXCACHE */
+#define PDMA0_QM_GLBL_AXCACHE_HBW_AR_SHIFT 0
+#define PDMA0_QM_GLBL_AXCACHE_HBW_AR_MASK 0xF
+#define PDMA0_QM_GLBL_AXCACHE_HBW_AW_SHIFT 16
+#define PDMA0_QM_GLBL_AXCACHE_HBW_AW_MASK 0xF0000
+#define PDMA0_QM_GLBL_AXCACHE_LBW_AW_SHIFT 20
+#define PDMA0_QM_GLBL_AXCACHE_LBW_AW_MASK 0xF00000
+#define PDMA0_QM_GLBL_AXCACHE_LBW_AR_SHIFT 24
+#define PDMA0_QM_GLBL_AXCACHE_LBW_AR_MASK 0xF000000
+
+/* PDMA0_QM_GLBL_STS0 */
+#define PDMA0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define PDMA0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF
+#define PDMA0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4
+#define PDMA0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0
+#define PDMA0_QM_GLBL_STS0_CP_IDLE_SHIFT 9
+#define PDMA0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00
+#define PDMA0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16
+#define PDMA0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000
+#define PDMA0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20
+#define PDMA0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000
+#define PDMA0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25
+#define PDMA0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000
+#define PDMA0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31
+#define PDMA0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000
+
+/* PDMA0_QM_GLBL_STS1 */
+#define PDMA0_QM_GLBL_STS1_ARC_CQF_IDLE_SHIFT 0
+#define PDMA0_QM_GLBL_STS1_ARC_CQF_IDLE_MASK 0x1
+#define PDMA0_QM_GLBL_STS1_ARC_CQF_IS_STOP_SHIFT 1
+#define PDMA0_QM_GLBL_STS1_ARC_CQF_IS_STOP_MASK 0x2
+
+/* PDMA0_QM_GLBL_ERR_STS */
+#define PDMA0_QM_GLBL_ERR_STS_PQF_RD_ERR_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_STS_PQF_RD_ERR_MASK 0x1
+#define PDMA0_QM_GLBL_ERR_STS_CQF_RD_ERR_SHIFT 1
+#define PDMA0_QM_GLBL_ERR_STS_CQF_RD_ERR_MASK 0x2
+#define PDMA0_QM_GLBL_ERR_STS_CP_RD_ERR_SHIFT 2
+#define PDMA0_QM_GLBL_ERR_STS_CP_RD_ERR_MASK 0x4
+#define PDMA0_QM_GLBL_ERR_STS_CP_UNDEF_CMD_ERR_SHIFT 3
+#define PDMA0_QM_GLBL_ERR_STS_CP_UNDEF_CMD_ERR_MASK 0x8
+#define PDMA0_QM_GLBL_ERR_STS_CP_STOP_OP_SHIFT 4
+#define PDMA0_QM_GLBL_ERR_STS_CP_STOP_OP_MASK 0x10
+#define PDMA0_QM_GLBL_ERR_STS_CP_MSG_WR_ERR_SHIFT 5
+#define PDMA0_QM_GLBL_ERR_STS_CP_MSG_WR_ERR_MASK 0x20
+#define PDMA0_QM_GLBL_ERR_STS_CP_WREG_ERR_SHIFT 6
+#define PDMA0_QM_GLBL_ERR_STS_CP_WREG_ERR_MASK 0x40
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE0_OVF_ERR_SHIFT 8
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE0_OVF_ERR_MASK 0x100
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE1_OVF_ERR_SHIFT 9
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE1_OVF_ERR_MASK 0x200
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE2_OVF_ERR_SHIFT 10
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE2_OVF_ERR_MASK 0x400
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE3_OVF_ERR_SHIFT 11
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE3_OVF_ERR_MASK 0x800
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE0_UDF_ERR_SHIFT 12
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE1_UDF_ERR_SHIFT 13
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE2_UDF_ERR_SHIFT 14
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE3_UDF_ERR_SHIFT 15
+#define PDMA0_QM_GLBL_ERR_STS_CP_FENCE3_UDF_ERR_MASK 0x8000
+#define PDMA0_QM_GLBL_ERR_STS_CPDMA_UP_OVF_ERR_SHIFT 16
+#define PDMA0_QM_GLBL_ERR_STS_CPDMA_UP_OVF_ERR_MASK 0x10000
+#define PDMA0_QM_GLBL_ERR_STS_PQC_L2H_ERR_SHIFT 17
+#define PDMA0_QM_GLBL_ERR_STS_PQC_L2H_ERR_MASK 0x20000
+#define PDMA0_QM_GLBL_ERR_STS_RSVD_18_24_SHIFT 18
+#define PDMA0_QM_GLBL_ERR_STS_RSVD_18_24_MASK 0x1FC0000
+
+/* PDMA0_QM_GLBL_ERR_STS_4 */
+#define PDMA0_QM_GLBL_ERR_STS_4_RSVD0_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_STS_4_RSVD0_MASK 0x1
+#define PDMA0_QM_GLBL_ERR_STS_4_CQF_RD_ERR_SHIFT 1
+#define PDMA0_QM_GLBL_ERR_STS_4_CQF_RD_ERR_MASK 0x2
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_RD_ERR_SHIFT 2
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_RD_ERR_MASK 0x4
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_STOP_OP_SHIFT 4
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_STOP_OP_MASK 0x10
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_MSG_WR_ERR_SHIFT 5
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_MSG_WR_ERR_MASK 0x20
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_WREG_ERR_SHIFT 6
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_WREG_ERR_MASK 0x40
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+#define PDMA0_QM_GLBL_ERR_STS_4_CPDMA_UP_OVF_ERR_SHIFT 16
+#define PDMA0_QM_GLBL_ERR_STS_4_CPDMA_UP_OVF_ERR_MASK 0x10000
+#define PDMA0_QM_GLBL_ERR_STS_4_RSVD17_SHIFT 17
+#define PDMA0_QM_GLBL_ERR_STS_4_RSVD17_MASK 0x20000
+#define PDMA0_QM_GLBL_ERR_STS_4_CQ_WR_IFIFO_CI_ERR_SHIFT 18
+#define PDMA0_QM_GLBL_ERR_STS_4_CQ_WR_IFIFO_CI_ERR_MASK 0x40000
+#define PDMA0_QM_GLBL_ERR_STS_4_CQ_WR_CTL_CI_ERR_SHIFT 19
+#define PDMA0_QM_GLBL_ERR_STS_4_CQ_WR_CTL_CI_ERR_MASK 0x80000
+#define PDMA0_QM_GLBL_ERR_STS_4_ARC_CQF_RD_ERR_SHIFT 20
+#define PDMA0_QM_GLBL_ERR_STS_4_ARC_CQF_RD_ERR_MASK 0x100000
+#define PDMA0_QM_GLBL_ERR_STS_4_ARC_CQ_WR_IFIFO_CI_ERR_SHIFT 21
+#define PDMA0_QM_GLBL_ERR_STS_4_ARC_CQ_WR_IFIFO_CI_ERR_MASK 0x200000
+#define PDMA0_QM_GLBL_ERR_STS_4_ARC_CQ_WR_CTL_CI_ERR_SHIFT 22
+#define PDMA0_QM_GLBL_ERR_STS_4_ARC_CQ_WR_CTL_CI_ERR_MASK 0x400000
+#define PDMA0_QM_GLBL_ERR_STS_4_ARC_AXI_ERR_SHIFT 23
+#define PDMA0_QM_GLBL_ERR_STS_4_ARC_AXI_ERR_MASK 0x800000
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_SWITCH_WDT_ERR_SHIFT 24
+#define PDMA0_QM_GLBL_ERR_STS_4_CP_SWITCH_WDT_ERR_MASK 0x1000000
+
+/* PDMA0_QM_GLBL_ERR_MSG_EN */
+#define PDMA0_QM_GLBL_ERR_MSG_EN_PQF_RD_ERR_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_MSG_EN_PQF_RD_ERR_MASK 0x1
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CQF_RD_ERR_SHIFT 1
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CQF_RD_ERR_MASK 0x2
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_RD_ERR_SHIFT 2
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_RD_ERR_MASK 0x4
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_STOP_OP_SHIFT 4
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_STOP_OP_MASK 0x10
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_MSG_WR_ERR_SHIFT 5
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_MSG_WR_ERR_MASK 0x20
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_WREG_ERR_SHIFT 6
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_WREG_ERR_MASK 0x40
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CPDMA_UP_OVF_ERR_SHIFT 16
+#define PDMA0_QM_GLBL_ERR_MSG_EN_CPDMA_UP_OVF_ERR_MASK 0x10000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_PQC_L2H_ERR_SHIFT 17
+#define PDMA0_QM_GLBL_ERR_MSG_EN_PQC_L2H_ERR_MASK 0x20000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_RSVD_18_24_SHIFT 18
+#define PDMA0_QM_GLBL_ERR_MSG_EN_RSVD_18_24_MASK 0x1FC0000
+
+/* PDMA0_QM_GLBL_ERR_MSG_EN_4 */
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_RSVD0_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_RSVD0_MASK 0x1
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CQF_RD_ERR_SHIFT 1
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CQF_RD_ERR_MASK 0x2
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_RD_ERR_SHIFT 2
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_RD_ERR_MASK 0x4
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_STOP_OP_SHIFT 4
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_STOP_OP_MASK 0x10
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_WREG_ERR_SHIFT 6
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_WREG_ERR_MASK 0x40
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CPDMA_UP_OVF_ERR_SHIFT 16
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CPDMA_UP_OVF_ERR_MASK 0x10000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_RSVD17_SHIFT 17
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_RSVD17_MASK 0x20000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CQ_WR_IFIFO_CI_ERR_SHIFT 18
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CQ_WR_IFIFO_CI_ERR_MASK 0x40000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CQ_WR_CTL_CI_ERR_SHIFT 19
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CQ_WR_CTL_CI_ERR_MASK 0x80000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQF_RD_ERR_SHIFT 20
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQF_RD_ERR_MASK 0x100000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQ_WR_IFIFO_CI_ERR_SHIFT 21
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQ_WR_IFIFO_CI_ERR_MASK 0x200000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQ_WR_CTL_CI_ERR_SHIFT 22
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_CQ_WR_CTL_CI_ERR_MASK 0x400000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_AXI_ERR_SHIFT 23
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_ARC_AXI_ERR_MASK 0x800000
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_SWITCH_WDT_ERR_SHIFT 24
+#define PDMA0_QM_GLBL_ERR_MSG_EN_4_CP_SWITCH_WDT_ERR_MASK 0x1000000
+
+/* PDMA0_QM_GLBL_PROT */
+#define PDMA0_QM_GLBL_PROT_PQF_SHIFT 0
+#define PDMA0_QM_GLBL_PROT_PQF_MASK 0xF
+#define PDMA0_QM_GLBL_PROT_CQF_SHIFT 4
+#define PDMA0_QM_GLBL_PROT_CQF_MASK 0x1F0
+#define PDMA0_QM_GLBL_PROT_CP_SHIFT 9
+#define PDMA0_QM_GLBL_PROT_CP_MASK 0x3E00
+#define PDMA0_QM_GLBL_PROT_ERR_SHIFT 14
+#define PDMA0_QM_GLBL_PROT_ERR_MASK 0x4000
+#define PDMA0_QM_GLBL_PROT_ARB_SHIFT 15
+#define PDMA0_QM_GLBL_PROT_ARB_MASK 0x8000
+#define PDMA0_QM_GLBL_PROT_PQC_SHIFT 16
+#define PDMA0_QM_GLBL_PROT_PQC_MASK 0x10000
+#define PDMA0_QM_GLBL_PROT_CQ_IFIFO_MSG_SHIFT 17
+#define PDMA0_QM_GLBL_PROT_CQ_IFIFO_MSG_MASK 0x20000
+#define PDMA0_QM_GLBL_PROT_ARC_CQ_IFIFO_MSG_SHIFT 18
+#define PDMA0_QM_GLBL_PROT_ARC_CQ_IFIFO_MSG_MASK 0x40000
+#define PDMA0_QM_GLBL_PROT_CQ_CTL_MSG_SHIFT 19
+#define PDMA0_QM_GLBL_PROT_CQ_CTL_MSG_MASK 0x80000
+#define PDMA0_QM_GLBL_PROT_ARC_CQ_CTL_MSG_SHIFT 20
+#define PDMA0_QM_GLBL_PROT_ARC_CQ_CTL_MSG_MASK 0x100000
+#define PDMA0_QM_GLBL_PROT_CP_WR_ARC_SHIFT 21
+#define PDMA0_QM_GLBL_PROT_CP_WR_ARC_MASK 0x200000
+#define PDMA0_QM_GLBL_PROT_ARC_CQF_SHIFT 22
+#define PDMA0_QM_GLBL_PROT_ARC_CQF_MASK 0x400000
+#define PDMA0_QM_GLBL_PROT_ARC_CORE_SHIFT 23
+#define PDMA0_QM_GLBL_PROT_ARC_CORE_MASK 0x800000
+
+/* PDMA0_QM_PQ_BASE_LO */
+#define PDMA0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define PDMA0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQ_BASE_HI */
+#define PDMA0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define PDMA0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQ_SIZE */
+#define PDMA0_QM_PQ_SIZE_VAL_SHIFT 0
+#define PDMA0_QM_PQ_SIZE_VAL_MASK 0x1F
+
+/* PDMA0_QM_PQ_PI */
+#define PDMA0_QM_PQ_PI_VAL_SHIFT 0
+#define PDMA0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQ_CI */
+#define PDMA0_QM_PQ_CI_VAL_SHIFT 0
+#define PDMA0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQ_CFG0 */
+#define PDMA0_QM_PQ_CFG0_FORCE_STALL_SHIFT 0
+#define PDMA0_QM_PQ_CFG0_FORCE_STALL_MASK 0x1
+
+/* PDMA0_QM_PQ_CFG1 */
+#define PDMA0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define PDMA0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFF
+#define PDMA0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define PDMA0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFF0000
+
+/* PDMA0_QM_PQ_STS0 */
+#define PDMA0_QM_PQ_STS0_CREDIT_CNT_SHIFT 0
+#define PDMA0_QM_PQ_STS0_CREDIT_CNT_MASK 0xFF
+#define PDMA0_QM_PQ_STS0_FREE_CNT_SHIFT 8
+#define PDMA0_QM_PQ_STS0_FREE_CNT_MASK 0xFF00
+#define PDMA0_QM_PQ_STS0_INFLIGHT_CNT_SHIFT 16
+#define PDMA0_QM_PQ_STS0_INFLIGHT_CNT_MASK 0xFF0000
+
+/* PDMA0_QM_PQ_STS1 */
+#define PDMA0_QM_PQ_STS1_BUF_EMPTY_SHIFT 0
+#define PDMA0_QM_PQ_STS1_BUF_EMPTY_MASK 0x1
+#define PDMA0_QM_PQ_STS1_BUSY_SHIFT 1
+#define PDMA0_QM_PQ_STS1_BUSY_MASK 0x2
+
+/* PDMA0_QM_CQ_CFG0 */
+#define PDMA0_QM_CQ_CFG0_IF_B2B_EN_SHIFT 0
+#define PDMA0_QM_CQ_CFG0_IF_B2B_EN_MASK 0x1
+#define PDMA0_QM_CQ_CFG0_IF_MSG_EN_SHIFT 1
+#define PDMA0_QM_CQ_CFG0_IF_MSG_EN_MASK 0x2
+#define PDMA0_QM_CQ_CFG0_CTL_MSG_EN_SHIFT 2
+#define PDMA0_QM_CQ_CFG0_CTL_MSG_EN_MASK 0x4
+
+/* PDMA0_QM_CQ_STS0 */
+#define PDMA0_QM_CQ_STS0_CREDIT_CNT_SHIFT 0
+#define PDMA0_QM_CQ_STS0_CREDIT_CNT_MASK 0xFF
+#define PDMA0_QM_CQ_STS0_FREE_CNT_SHIFT 8
+#define PDMA0_QM_CQ_STS0_FREE_CNT_MASK 0xFF00
+#define PDMA0_QM_CQ_STS0_INFLIGHT_CNT_SHIFT 16
+#define PDMA0_QM_CQ_STS0_INFLIGHT_CNT_MASK 0xFF0000
+
+/* PDMA0_QM_CQ_CFG1 */
+#define PDMA0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define PDMA0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFF
+#define PDMA0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define PDMA0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFF0000
+
+/* PDMA0_QM_CQ_STS1 */
+#define PDMA0_QM_CQ_STS1_BUF_EMPTY_SHIFT 0
+#define PDMA0_QM_CQ_STS1_BUF_EMPTY_MASK 0x1
+#define PDMA0_QM_CQ_STS1_BUSY_SHIFT 1
+#define PDMA0_QM_CQ_STS1_BUSY_MASK 0x2
+
+/* PDMA0_QM_CQ_PTR_LO_0 */
+#define PDMA0_QM_CQ_PTR_LO_0_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_PTR_HI_0 */
+#define PDMA0_QM_CQ_PTR_HI_0_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_TSIZE_0 */
+#define PDMA0_QM_CQ_TSIZE_0_VAL_SHIFT 0
+#define PDMA0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_CTL_0 */
+#define PDMA0_QM_CQ_CTL_0_UP_SHIFT 28
+#define PDMA0_QM_CQ_CTL_0_UP_MASK 0xF0000000
+
+/* PDMA0_QM_CQ_PTR_LO_1 */
+#define PDMA0_QM_CQ_PTR_LO_1_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_PTR_HI_1 */
+#define PDMA0_QM_CQ_PTR_HI_1_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_TSIZE_1 */
+#define PDMA0_QM_CQ_TSIZE_1_VAL_SHIFT 0
+#define PDMA0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_CTL_1 */
+#define PDMA0_QM_CQ_CTL_1_UP_SHIFT 28
+#define PDMA0_QM_CQ_CTL_1_UP_MASK 0xF0000000
+
+/* PDMA0_QM_CQ_PTR_LO_2 */
+#define PDMA0_QM_CQ_PTR_LO_2_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_PTR_HI_2 */
+#define PDMA0_QM_CQ_PTR_HI_2_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_TSIZE_2 */
+#define PDMA0_QM_CQ_TSIZE_2_VAL_SHIFT 0
+#define PDMA0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_CTL_2 */
+#define PDMA0_QM_CQ_CTL_2_UP_SHIFT 28
+#define PDMA0_QM_CQ_CTL_2_UP_MASK 0xF0000000
+
+/* PDMA0_QM_CQ_PTR_LO_3 */
+#define PDMA0_QM_CQ_PTR_LO_3_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_PTR_HI_3 */
+#define PDMA0_QM_CQ_PTR_HI_3_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_TSIZE_3 */
+#define PDMA0_QM_CQ_TSIZE_3_VAL_SHIFT 0
+#define PDMA0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_CTL_3 */
+#define PDMA0_QM_CQ_CTL_3_UP_SHIFT 28
+#define PDMA0_QM_CQ_CTL_3_UP_MASK 0xF0000000
+
+/* PDMA0_QM_CQ_PTR_LO_4 */
+#define PDMA0_QM_CQ_PTR_LO_4_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_PTR_HI_4 */
+#define PDMA0_QM_CQ_PTR_HI_4_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_TSIZE_4 */
+#define PDMA0_QM_CQ_TSIZE_4_VAL_SHIFT 0
+#define PDMA0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_CTL_4 */
+#define PDMA0_QM_CQ_CTL_4_UP_SHIFT 28
+#define PDMA0_QM_CQ_CTL_4_UP_MASK 0xF0000000
+
+/* PDMA0_QM_CQ_TSIZE_STS */
+#define PDMA0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define PDMA0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_PTR_LO_STS */
+#define PDMA0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_PTR_HI_STS */
+#define PDMA0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define PDMA0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_IFIFO_STS */
+#define PDMA0_QM_CQ_IFIFO_STS_CNT_SHIFT 0
+#define PDMA0_QM_CQ_IFIFO_STS_CNT_MASK 0x7
+#define PDMA0_QM_CQ_IFIFO_STS_RDY_SHIFT 4
+#define PDMA0_QM_CQ_IFIFO_STS_RDY_MASK 0x10
+#define PDMA0_QM_CQ_IFIFO_STS_CTL_STALL_SHIFT 8
+#define PDMA0_QM_CQ_IFIFO_STS_CTL_STALL_MASK 0x100
+
+/* PDMA0_QM_CP_MSG_BASE0_ADDR_LO */
+#define PDMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_MSG_BASE0_ADDR_HI */
+#define PDMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_MSG_BASE1_ADDR_LO */
+#define PDMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_MSG_BASE1_ADDR_HI */
+#define PDMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_MSG_BASE2_ADDR_LO */
+#define PDMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_MSG_BASE2_ADDR_HI */
+#define PDMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_MSG_BASE3_ADDR_LO */
+#define PDMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_MSG_BASE3_ADDR_HI */
+#define PDMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_FENCE0_RDATA */
+#define PDMA0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define PDMA0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* PDMA0_QM_CP_FENCE1_RDATA */
+#define PDMA0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define PDMA0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* PDMA0_QM_CP_FENCE2_RDATA */
+#define PDMA0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define PDMA0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* PDMA0_QM_CP_FENCE3_RDATA */
+#define PDMA0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define PDMA0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* PDMA0_QM_CP_FENCE0_CNT */
+#define PDMA0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define PDMA0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF
+
+/* PDMA0_QM_CP_FENCE1_CNT */
+#define PDMA0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define PDMA0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF
+
+/* PDMA0_QM_CP_FENCE2_CNT */
+#define PDMA0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define PDMA0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF
+
+/* PDMA0_QM_CP_FENCE3_CNT */
+#define PDMA0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define PDMA0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF
+
+/* PDMA0_QM_CP_BARRIER_CFG */
+#define PDMA0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define PDMA0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+#define PDMA0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16
+#define PDMA0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000
+
+/* PDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define PDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define PDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define PDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define PDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_CP_LDMA_TSIZE_OFFSET */
+#define PDMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define PDMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_CP_CQ_PTR_LO_OFFSET_0 */
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_0_VAL_SHIFT 0
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_0_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_CP_CQ_PTR_LO_OFFSET_1 */
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_1_VAL_SHIFT 0
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_1_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_CP_CQ_PTR_LO_OFFSET_2 */
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_2_VAL_SHIFT 0
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_2_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_CP_CQ_PTR_LO_OFFSET_3 */
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_3_VAL_SHIFT 0
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_3_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_CP_CQ_PTR_LO_OFFSET_4 */
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_4_VAL_SHIFT 0
+#define PDMA0_QM_CP_CQ_PTR_LO_OFFSET_4_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_CP_STS */
+#define PDMA0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define PDMA0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFF
+#define PDMA0_QM_CP_STS_ERDY_SHIFT 8
+#define PDMA0_QM_CP_STS_ERDY_MASK 0x100
+#define PDMA0_QM_CP_STS_SWITCH_EN_SHIFT 9
+#define PDMA0_QM_CP_STS_SWITCH_EN_MASK 0x200
+#define PDMA0_QM_CP_STS_MRDY_SHIFT 10
+#define PDMA0_QM_CP_STS_MRDY_MASK 0x400
+#define PDMA0_QM_CP_STS_SW_STOP_SHIFT 11
+#define PDMA0_QM_CP_STS_SW_STOP_MASK 0x800
+#define PDMA0_QM_CP_STS_FENCE_ID_SHIFT 12
+#define PDMA0_QM_CP_STS_FENCE_ID_MASK 0x3000
+#define PDMA0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 14
+#define PDMA0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x4000
+#define PDMA0_QM_CP_STS_FENCE_TARGET_SHIFT 16
+#define PDMA0_QM_CP_STS_FENCE_TARGET_MASK 0x3FFF0000
+#define PDMA0_QM_CP_STS_CUR_CQ_SHIFT 30
+#define PDMA0_QM_CP_STS_CUR_CQ_MASK 0x40000000
+
+/* PDMA0_QM_CP_CURRENT_INST_LO */
+#define PDMA0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define PDMA0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_CURRENT_INST_HI */
+#define PDMA0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define PDMA0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_PRED */
+#define PDMA0_QM_CP_PRED_VAL_SHIFT 0
+#define PDMA0_QM_CP_PRED_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_PRED_UPEN */
+#define PDMA0_QM_CP_PRED_UPEN_VAL_SHIFT 0
+#define PDMA0_QM_CP_PRED_UPEN_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_DBG_0 */
+#define PDMA0_QM_CP_DBG_0_CS_SHIFT 0
+#define PDMA0_QM_CP_DBG_0_CS_MASK 0x1F
+#define PDMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 5
+#define PDMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x20
+#define PDMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 6
+#define PDMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x40
+#define PDMA0_QM_CP_DBG_0_MREB_STALL_SHIFT 7
+#define PDMA0_QM_CP_DBG_0_MREB_STALL_MASK 0x80
+#define PDMA0_QM_CP_DBG_0_STALL_SHIFT 8
+#define PDMA0_QM_CP_DBG_0_STALL_MASK 0x100
+
+/* PDMA0_QM_CP_CPDMA_UP_CRED */
+#define PDMA0_QM_CP_CPDMA_UP_CRED_TH_SHIFT 0
+#define PDMA0_QM_CP_CPDMA_UP_CRED_TH_MASK 0x3
+#define PDMA0_QM_CP_CPDMA_UP_CRED_VAL_SHIFT 8
+#define PDMA0_QM_CP_CPDMA_UP_CRED_VAL_MASK 0x300
+
+/* PDMA0_QM_CP_IN_DATA_LO */
+#define PDMA0_QM_CP_IN_DATA_LO_VAL_SHIFT 0
+#define PDMA0_QM_CP_IN_DATA_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_IN_DATA_HI */
+#define PDMA0_QM_CP_IN_DATA_HI_VAL_SHIFT 0
+#define PDMA0_QM_CP_IN_DATA_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQC_HBW_BASE_LO */
+#define PDMA0_QM_PQC_HBW_BASE_LO_VAL_SHIFT 0
+#define PDMA0_QM_PQC_HBW_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQC_HBW_BASE_HI */
+#define PDMA0_QM_PQC_HBW_BASE_HI_VAL_SHIFT 0
+#define PDMA0_QM_PQC_HBW_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQC_SIZE */
+#define PDMA0_QM_PQC_SIZE_VAL_SHIFT 0
+#define PDMA0_QM_PQC_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQC_PI */
+#define PDMA0_QM_PQC_PI_VAL_SHIFT 0
+#define PDMA0_QM_PQC_PI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQC_LBW_WDATA */
+#define PDMA0_QM_PQC_LBW_WDATA_VAL_SHIFT 0
+#define PDMA0_QM_PQC_LBW_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQC_LBW_BASE_LO */
+#define PDMA0_QM_PQC_LBW_BASE_LO_VAL_SHIFT 0
+#define PDMA0_QM_PQC_LBW_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQC_LBW_BASE_HI */
+#define PDMA0_QM_PQC_LBW_BASE_HI_VAL_SHIFT 0
+#define PDMA0_QM_PQC_LBW_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PQC_CFG */
+#define PDMA0_QM_PQC_CFG_EN_SHIFT 0
+#define PDMA0_QM_PQC_CFG_EN_MASK 0x1
+#define PDMA0_QM_PQC_CFG_DIRECT_SHIFT 4
+#define PDMA0_QM_PQC_CFG_DIRECT_MASK 0x10
+
+/* PDMA0_QM_PQC_SECURE_PUSH_IND */
+#define PDMA0_QM_PQC_SECURE_PUSH_IND_CP_NUM_SHIFT 0
+#define PDMA0_QM_PQC_SECURE_PUSH_IND_CP_NUM_MASK 0x3
+
+/* PDMA0_QM_ARB_MASK */
+#define PDMA0_QM_ARB_MASK_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MASK_VAL_MASK 0xF
+
+/* PDMA0_QM_ARB_CFG_0 */
+#define PDMA0_QM_ARB_CFG_0_PRIO_TYPE_SHIFT 0
+#define PDMA0_QM_ARB_CFG_0_PRIO_TYPE_MASK 0x1
+#define PDMA0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4
+#define PDMA0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10
+#define PDMA0_QM_ARB_CFG_0_EN_SHIFT 8
+#define PDMA0_QM_ARB_CFG_0_EN_MASK 0x100
+#define PDMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 9
+#define PDMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x200
+
+/* PDMA0_QM_ARB_CHOICE_Q_PUSH */
+#define PDMA0_QM_ARB_CHOICE_Q_PUSH_VAL_SHIFT 0
+#define PDMA0_QM_ARB_CHOICE_Q_PUSH_VAL_MASK 0x3
+
+/* PDMA0_QM_ARB_WRR_WEIGHT */
+#define PDMA0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0
+#define PDMA0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFF
+
+/* PDMA0_QM_ARB_CFG_1 */
+#define PDMA0_QM_ARB_CFG_1_CLR_SHIFT 0
+#define PDMA0_QM_ARB_CFG_1_CLR_MASK 0x1
+
+/* PDMA0_QM_ARB_MST_AVAIL_CRED */
+#define PDMA0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F
+
+/* PDMA0_QM_ARB_MST_CRED_INC */
+#define PDMA0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_MST_CHOICE_PUSH_OFST */
+#define PDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST */
+#define PDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0
+#define PDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_MST_SLAVE_EN */
+#define PDMA0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_MST_SLAVE_EN_1 */
+#define PDMA0_QM_ARB_MST_SLAVE_EN_1_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MST_SLAVE_EN_1_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_SLV_CHOICE_WDT */
+#define PDMA0_QM_ARB_SLV_CHOICE_WDT_VAL_SHIFT 0
+#define PDMA0_QM_ARB_SLV_CHOICE_WDT_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_SLV_ID */
+#define PDMA0_QM_ARB_SLV_ID_VAL_SHIFT 0
+#define PDMA0_QM_ARB_SLV_ID_VAL_MASK 0x7F
+
+/* PDMA0_QM_ARB_MST_QUIET_PER */
+#define PDMA0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_MSG_MAX_INFLIGHT */
+#define PDMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F
+
+/* PDMA0_QM_ARB_BASE_LO */
+#define PDMA0_QM_ARB_BASE_LO_VAL_SHIFT 0
+#define PDMA0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_BASE_HI */
+#define PDMA0_QM_ARB_BASE_HI_VAL_SHIFT 0
+#define PDMA0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_STATE_STS */
+#define PDMA0_QM_ARB_STATE_STS_VAL_SHIFT 0
+#define PDMA0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARB_CHOICE_FULLNESS_STS */
+#define PDMA0_QM_ARB_CHOICE_FULLNESS_STS_VAL_SHIFT 0
+#define PDMA0_QM_ARB_CHOICE_FULLNESS_STS_VAL_MASK 0x7F
+
+/* PDMA0_QM_ARB_MSG_STS */
+#define PDMA0_QM_ARB_MSG_STS_FULL_SHIFT 0
+#define PDMA0_QM_ARB_MSG_STS_FULL_MASK 0x1
+#define PDMA0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1
+#define PDMA0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2
+
+/* PDMA0_QM_ARB_SLV_CHOICE_Q_HEAD */
+#define PDMA0_QM_ARB_SLV_CHOICE_Q_HEAD_VAL_SHIFT 0
+#define PDMA0_QM_ARB_SLV_CHOICE_Q_HEAD_VAL_MASK 0x3
+
+/* PDMA0_QM_ARB_ERR_CAUSE */
+#define PDMA0_QM_ARB_ERR_CAUSE_CHOICE_OVF_SHIFT 0
+#define PDMA0_QM_ARB_ERR_CAUSE_CHOICE_OVF_MASK 0x1
+#define PDMA0_QM_ARB_ERR_CAUSE_CHOICE_WDT_SHIFT 1
+#define PDMA0_QM_ARB_ERR_CAUSE_CHOICE_WDT_MASK 0x2
+#define PDMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2
+#define PDMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4
+
+/* PDMA0_QM_ARB_ERR_MSG_EN */
+#define PDMA0_QM_ARB_ERR_MSG_EN_CHOICE_OVF_SHIFT 0
+#define PDMA0_QM_ARB_ERR_MSG_EN_CHOICE_OVF_MASK 0x1
+#define PDMA0_QM_ARB_ERR_MSG_EN_CHOICE_WDT_SHIFT 1
+#define PDMA0_QM_ARB_ERR_MSG_EN_CHOICE_WDT_MASK 0x2
+#define PDMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2
+#define PDMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+/* PDMA0_QM_ARB_ERR_STS_DRP */
+#define PDMA0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0
+#define PDMA0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3
+
+/* PDMA0_QM_ARB_MST_CRED_STS */
+#define PDMA0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F
+#define PDMA0_QM_ARB_MST_CRED_STS_IDX_SHIFT 24
+#define PDMA0_QM_ARB_MST_CRED_STS_IDX_MASK 0x1F000000
+
+/* PDMA0_QM_ARB_MST_CRED_STS_1 */
+#define PDMA0_QM_ARB_MST_CRED_STS_1_VAL_SHIFT 0
+#define PDMA0_QM_ARB_MST_CRED_STS_1_VAL_MASK 0x7F
+#define PDMA0_QM_ARB_MST_CRED_STS_1_IDX_SHIFT 24
+#define PDMA0_QM_ARB_MST_CRED_STS_1_IDX_MASK 0x1F000000
+
+/* PDMA0_QM_CSMR_STRICT_PRIO_CFG */
+#define PDMA0_QM_CSMR_STRICT_PRIO_CFG_ARB_TYPE_SHIFT 0
+#define PDMA0_QM_CSMR_STRICT_PRIO_CFG_ARB_TYPE_MASK 0x1
+#define PDMA0_QM_CSMR_STRICT_PRIO_CFG_PER_ENTRY_SHIFT 4
+#define PDMA0_QM_CSMR_STRICT_PRIO_CFG_PER_ENTRY_MASK 0x10
+
+/* PDMA0_QM_ARC_CQ_CFG0 */
+#define PDMA0_QM_ARC_CQ_CFG0_IF_B2B_EN_SHIFT 0
+#define PDMA0_QM_ARC_CQ_CFG0_IF_B2B_EN_MASK 0x1
+#define PDMA0_QM_ARC_CQ_CFG0_IF_MSG_EN_SHIFT 1
+#define PDMA0_QM_ARC_CQ_CFG0_IF_MSG_EN_MASK 0x2
+#define PDMA0_QM_ARC_CQ_CFG0_CTL_MSG_EN_SHIFT 2
+#define PDMA0_QM_ARC_CQ_CFG0_CTL_MSG_EN_MASK 0x4
+
+/* PDMA0_QM_ARC_CQ_CFG1 */
+#define PDMA0_QM_ARC_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define PDMA0_QM_ARC_CQ_CFG1_CREDIT_LIM_MASK 0xFF
+#define PDMA0_QM_ARC_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define PDMA0_QM_ARC_CQ_CFG1_MAX_INFLIGHT_MASK 0xFF0000
+
+/* PDMA0_QM_ARC_CQ_PTR_LO */
+#define PDMA0_QM_ARC_CQ_PTR_LO_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_PTR_HI */
+#define PDMA0_QM_ARC_CQ_PTR_HI_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_TSIZE */
+#define PDMA0_QM_ARC_CQ_TSIZE_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_CTL */
+#define PDMA0_QM_ARC_CQ_CTL_UP_SHIFT 28
+#define PDMA0_QM_ARC_CQ_CTL_UP_MASK 0xF0000000
+
+/* PDMA0_QM_ARC_CQ_IFIFO_STS */
+#define PDMA0_QM_ARC_CQ_IFIFO_STS_CNT_SHIFT 0
+#define PDMA0_QM_ARC_CQ_IFIFO_STS_CNT_MASK 0x7
+#define PDMA0_QM_ARC_CQ_IFIFO_STS_RDY_SHIFT 4
+#define PDMA0_QM_ARC_CQ_IFIFO_STS_RDY_MASK 0x10
+#define PDMA0_QM_ARC_CQ_IFIFO_STS_CTL_STALL_SHIFT 8
+#define PDMA0_QM_ARC_CQ_IFIFO_STS_CTL_STALL_MASK 0x100
+
+/* PDMA0_QM_ARC_CQ_STS0 */
+#define PDMA0_QM_ARC_CQ_STS0_CREDIT_CNT_SHIFT 0
+#define PDMA0_QM_ARC_CQ_STS0_CREDIT_CNT_MASK 0xFF
+#define PDMA0_QM_ARC_CQ_STS0_FREE_CNT_SHIFT 8
+#define PDMA0_QM_ARC_CQ_STS0_FREE_CNT_MASK 0xFF00
+#define PDMA0_QM_ARC_CQ_STS0_INFLIGHT_CNT_SHIFT 16
+#define PDMA0_QM_ARC_CQ_STS0_INFLIGHT_CNT_MASK 0xFF0000
+
+/* PDMA0_QM_ARC_CQ_STS1 */
+#define PDMA0_QM_ARC_CQ_STS1_BUF_EMPTY_SHIFT 0
+#define PDMA0_QM_ARC_CQ_STS1_BUF_EMPTY_MASK 0x1
+#define PDMA0_QM_ARC_CQ_STS1_BUSY_SHIFT 1
+#define PDMA0_QM_ARC_CQ_STS1_BUSY_MASK 0x2
+
+/* PDMA0_QM_ARC_CQ_TSIZE_STS */
+#define PDMA0_QM_ARC_CQ_TSIZE_STS_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_PTR_LO_STS */
+#define PDMA0_QM_ARC_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_PTR_HI_STS */
+#define PDMA0_QM_ARC_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_WR_ARC_ADDR_HI */
+#define PDMA0_QM_CP_WR_ARC_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_QM_CP_WR_ARC_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_WR_ARC_ADDR_LO */
+#define PDMA0_QM_CP_WR_ARC_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_QM_CP_WR_ARC_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_HI */
+#define PDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_HI_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO */
+#define PDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_CTL_MSG_BASE_HI */
+#define PDMA0_QM_ARC_CQ_CTL_MSG_BASE_HI_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_CTL_MSG_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO */
+#define PDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_IFIFO_MSG_BASE_HI */
+#define PDMA0_QM_CQ_IFIFO_MSG_BASE_HI_VAL_SHIFT 0
+#define PDMA0_QM_CQ_IFIFO_MSG_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_IFIFO_MSG_BASE_LO */
+#define PDMA0_QM_CQ_IFIFO_MSG_BASE_LO_VAL_SHIFT 0
+#define PDMA0_QM_CQ_IFIFO_MSG_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_CTL_MSG_BASE_HI */
+#define PDMA0_QM_CQ_CTL_MSG_BASE_HI_VAL_SHIFT 0
+#define PDMA0_QM_CQ_CTL_MSG_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_CTL_MSG_BASE_LO */
+#define PDMA0_QM_CQ_CTL_MSG_BASE_LO_VAL_SHIFT 0
+#define PDMA0_QM_CQ_CTL_MSG_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ADDR_OVRD */
+#define PDMA0_QM_ADDR_OVRD_IDX_SHIFT 0
+#define PDMA0_QM_ADDR_OVRD_IDX_MASK 0xFF
+
+/* PDMA0_QM_CQ_IFIFO_CI */
+#define PDMA0_QM_CQ_IFIFO_CI_VAL_SHIFT 0
+#define PDMA0_QM_CQ_IFIFO_CI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_IFIFO_CI */
+#define PDMA0_QM_ARC_CQ_IFIFO_CI_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_IFIFO_CI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CQ_CTL_CI */
+#define PDMA0_QM_CQ_CTL_CI_VAL_SHIFT 0
+#define PDMA0_QM_CQ_CTL_CI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_CQ_CTL_CI */
+#define PDMA0_QM_ARC_CQ_CTL_CI_VAL_SHIFT 0
+#define PDMA0_QM_ARC_CQ_CTL_CI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_CFG */
+#define PDMA0_QM_CP_CFG_SWITCH_EN_SHIFT 0
+#define PDMA0_QM_CP_CFG_SWITCH_EN_MASK 0x1
+#define PDMA0_QM_CP_CFG_SWITCH_WD_EN_SHIFT 1
+#define PDMA0_QM_CP_CFG_SWITCH_WD_EN_MASK 0x2
+
+/* PDMA0_QM_CP_EXT_SWITCH */
+#define PDMA0_QM_CP_EXT_SWITCH_VAL_SHIFT 0
+#define PDMA0_QM_CP_EXT_SWITCH_VAL_MASK 0x1
+
+/* PDMA0_QM_CP_SWITCH_WD_SET */
+#define PDMA0_QM_CP_SWITCH_WD_SET_VAL_SHIFT 0
+#define PDMA0_QM_CP_SWITCH_WD_SET_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_CP_SWITCH_WD */
+#define PDMA0_QM_CP_SWITCH_WD_VAL_SHIFT 0
+#define PDMA0_QM_CP_SWITCH_WD_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_LB_ADDR_BASE_LO */
+#define PDMA0_QM_ARC_LB_ADDR_BASE_LO_VAL_SHIFT 0
+#define PDMA0_QM_ARC_LB_ADDR_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_LB_ADDR_BASE_HI */
+#define PDMA0_QM_ARC_LB_ADDR_BASE_HI_VAL_SHIFT 0
+#define PDMA0_QM_ARC_LB_ADDR_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ENGINE_BASE_ADDR_HI */
+#define PDMA0_QM_ENGINE_BASE_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_QM_ENGINE_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ENGINE_BASE_ADDR_LO */
+#define PDMA0_QM_ENGINE_BASE_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_QM_ENGINE_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ENGINE_ADDR_RANGE_SIZE */
+#define PDMA0_QM_ENGINE_ADDR_RANGE_SIZE_VAL_SHIFT 0
+#define PDMA0_QM_ENGINE_ADDR_RANGE_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_QM_ARC_AUX_BASE_ADDR_HI */
+#define PDMA0_QM_QM_ARC_AUX_BASE_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_QM_QM_ARC_AUX_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_QM_ARC_AUX_BASE_ADDR_LO */
+#define PDMA0_QM_QM_ARC_AUX_BASE_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_QM_QM_ARC_AUX_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_QM_BASE_ADDR_HI */
+#define PDMA0_QM_QM_BASE_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_QM_QM_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_QM_BASE_ADDR_LO */
+#define PDMA0_QM_QM_BASE_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_QM_QM_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_ARC_PQC_SECURE_PUSH_IND */
+#define PDMA0_QM_ARC_PQC_SECURE_PUSH_IND_CP_NUM_SHIFT 0
+#define PDMA0_QM_ARC_PQC_SECURE_PUSH_IND_CP_NUM_MASK 0x3
+
+/* PDMA0_QM_PQC_STS_0 */
+#define PDMA0_QM_PQC_STS_0_COMP_DATA_SHIFT 0
+#define PDMA0_QM_PQC_STS_0_COMP_DATA_MASK 0xFFFF
+#define PDMA0_QM_PQC_STS_0_COMP_OFST_SHIFT 16
+#define PDMA0_QM_PQC_STS_0_COMP_OFST_MASK 0xFFFF0000
+
+/* PDMA0_QM_PQC_STS_1 */
+#define PDMA0_QM_PQC_STS_1_COMP_FIFO_CNTR_SHIFT 0
+#define PDMA0_QM_PQC_STS_1_COMP_FIFO_CNTR_MASK 0xF
+#define PDMA0_QM_PQC_STS_1_COMP_FIFO_EMPTY_SHIFT 4
+#define PDMA0_QM_PQC_STS_1_COMP_FIFO_EMPTY_MASK 0x10
+#define PDMA0_QM_PQC_STS_1_COMP_FIFO_FULL_SHIFT 5
+#define PDMA0_QM_PQC_STS_1_COMP_FIFO_FULL_MASK 0x20
+
+/* PDMA0_QM_SEI_STATUS */
+#define PDMA0_QM_SEI_STATUS_QM_INT_SHIFT 0
+#define PDMA0_QM_SEI_STATUS_QM_INT_MASK 0x1
+#define PDMA0_QM_SEI_STATUS_ARC_INT_SHIFT 1
+#define PDMA0_QM_SEI_STATUS_ARC_INT_MASK 0x2
+
+/* PDMA0_QM_SEI_MASK */
+#define PDMA0_QM_SEI_MASK_QM_INT_SHIFT 0
+#define PDMA0_QM_SEI_MASK_QM_INT_MASK 0x1
+#define PDMA0_QM_SEI_MASK_ARC_INT_SHIFT 1
+#define PDMA0_QM_SEI_MASK_ARC_INT_MASK 0x2
+
+/* PDMA0_QM_GLBL_ERR_ADDR_LO */
+#define PDMA0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_GLBL_ERR_ADDR_HI */
+#define PDMA0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_GLBL_ERR_WDATA */
+#define PDMA0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define PDMA0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_L2H_MASK_LO */
+#define PDMA0_QM_L2H_MASK_LO_VAL_SHIFT 20
+#define PDMA0_QM_L2H_MASK_LO_VAL_MASK 0xFFF00000
+
+/* PDMA0_QM_L2H_MASK_HI */
+#define PDMA0_QM_L2H_MASK_HI_VAL_SHIFT 0
+#define PDMA0_QM_L2H_MASK_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_L2H_CMPR_LO */
+#define PDMA0_QM_L2H_CMPR_LO_VAL_SHIFT 20
+#define PDMA0_QM_L2H_CMPR_LO_VAL_MASK 0xFFF00000
+
+/* PDMA0_QM_L2H_CMPR_HI */
+#define PDMA0_QM_L2H_CMPR_HI_VAL_SHIFT 0
+#define PDMA0_QM_L2H_CMPR_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_LOCAL_RANGE_BASE */
+#define PDMA0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0
+#define PDMA0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_LOCAL_RANGE_SIZE */
+#define PDMA0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0
+#define PDMA0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF
+
+/* PDMA0_QM_HBW_RD_RATE_LIM_CFG_1 */
+#define PDMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define PDMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define PDMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define PDMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* PDMA0_QM_LBW_WR_RATE_LIM_CFG_0 */
+#define PDMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define PDMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define PDMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define PDMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* PDMA0_QM_LBW_WR_RATE_LIM_CFG_1 */
+#define PDMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define PDMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define PDMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define PDMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* PDMA0_QM_HBW_RD_RATE_LIM_CFG_0 */
+#define PDMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define PDMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define PDMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define PDMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* PDMA0_QM_IND_GW_APB_CFG */
+#define PDMA0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0
+#define PDMA0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF
+#define PDMA0_QM_IND_GW_APB_CFG_CMD_SHIFT 31
+#define PDMA0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000
+
+/* PDMA0_QM_IND_GW_APB_WDATA */
+#define PDMA0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0
+#define PDMA0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_IND_GW_APB_RDATA */
+#define PDMA0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0
+#define PDMA0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_IND_GW_APB_STATUS */
+#define PDMA0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0
+#define PDMA0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1
+#define PDMA0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1
+#define PDMA0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2
+
+/* PDMA0_QM_PERF_CNT_FREE_LO */
+#define PDMA0_QM_PERF_CNT_FREE_LO_VAL_SHIFT 0
+#define PDMA0_QM_PERF_CNT_FREE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PERF_CNT_FREE_HI */
+#define PDMA0_QM_PERF_CNT_FREE_HI_VAL_SHIFT 0
+#define PDMA0_QM_PERF_CNT_FREE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PERF_CNT_IDLE_LO */
+#define PDMA0_QM_PERF_CNT_IDLE_LO_VAL_SHIFT 0
+#define PDMA0_QM_PERF_CNT_IDLE_LO_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PERF_CNT_IDLE_HI */
+#define PDMA0_QM_PERF_CNT_IDLE_HI_VAL_SHIFT 0
+#define PDMA0_QM_PERF_CNT_IDLE_HI_VAL_MASK 0xFFFFFFFF
+
+/* PDMA0_QM_PERF_CNT_CFG */
+#define PDMA0_QM_PERF_CNT_CFG_PQ_MASK_SHIFT 0
+#define PDMA0_QM_PERF_CNT_CFG_PQ_MASK_MASK 0xF
+#define PDMA0_QM_PERF_CNT_CFG_CQ_MASK_SHIFT 8
+#define PDMA0_QM_PERF_CNT_CFG_CQ_MASK_MASK 0x1F00
+#define PDMA0_QM_PERF_CNT_CFG_CP_MASK_SHIFT 16
+#define PDMA0_QM_PERF_CNT_CFG_CP_MASK_MASK 0x1F0000
+#define PDMA0_QM_PERF_CNT_CFG_AGENT_MASK_SHIFT 24
+#define PDMA0_QM_PERF_CNT_CFG_AGENT_MASK_MASK 0x1000000
+#define PDMA0_QM_PERF_CNT_CFG_EN_FREE_SHIFT 30
+#define PDMA0_QM_PERF_CNT_CFG_EN_FREE_MASK 0x40000000
+#define PDMA0_QM_PERF_CNT_CFG_EN_IDLE_SHIFT 31
+#define PDMA0_QM_PERF_CNT_CFG_EN_IDLE_MASK 0x80000000
+
+#endif /* ASIC_REG_PDMA0_QM_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h
new file mode 100644
index 000000000000..77d803c938d4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA0_QM_REGS_H_
+#define ASIC_REG_PDMA0_QM_REGS_H_
+
+/*
+ *****************************************
+ * PDMA0_QM
+ * (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmPDMA0_QM_GLBL_CFG0 0x4C8A000
+
+#define mmPDMA0_QM_GLBL_CFG1 0x4C8A004
+
+#define mmPDMA0_QM_GLBL_CFG2 0x4C8A008
+
+#define mmPDMA0_QM_GLBL_ERR_CFG 0x4C8A00C
+
+#define mmPDMA0_QM_GLBL_ERR_CFG1 0x4C8A010
+
+#define mmPDMA0_QM_GLBL_ERR_ARC_HALT_EN 0x4C8A014
+
+#define mmPDMA0_QM_GLBL_AXCACHE 0x4C8A018
+
+#define mmPDMA0_QM_GLBL_STS0 0x4C8A01C
+
+#define mmPDMA0_QM_GLBL_STS1 0x4C8A020
+
+#define mmPDMA0_QM_GLBL_ERR_STS_0 0x4C8A024
+
+#define mmPDMA0_QM_GLBL_ERR_STS_1 0x4C8A028
+
+#define mmPDMA0_QM_GLBL_ERR_STS_2 0x4C8A02C
+
+#define mmPDMA0_QM_GLBL_ERR_STS_3 0x4C8A030
+
+#define mmPDMA0_QM_GLBL_ERR_STS_4 0x4C8A034
+
+#define mmPDMA0_QM_GLBL_ERR_MSG_EN_0 0x4C8A038
+
+#define mmPDMA0_QM_GLBL_ERR_MSG_EN_1 0x4C8A03C
+
+#define mmPDMA0_QM_GLBL_ERR_MSG_EN_2 0x4C8A040
+
+#define mmPDMA0_QM_GLBL_ERR_MSG_EN_3 0x4C8A044
+
+#define mmPDMA0_QM_GLBL_ERR_MSG_EN_4 0x4C8A048
+
+#define mmPDMA0_QM_GLBL_PROT 0x4C8A04C
+
+#define mmPDMA0_QM_PQ_BASE_LO_0 0x4C8A050
+
+#define mmPDMA0_QM_PQ_BASE_LO_1 0x4C8A054
+
+#define mmPDMA0_QM_PQ_BASE_LO_2 0x4C8A058
+
+#define mmPDMA0_QM_PQ_BASE_LO_3 0x4C8A05C
+
+#define mmPDMA0_QM_PQ_BASE_HI_0 0x4C8A060
+
+#define mmPDMA0_QM_PQ_BASE_HI_1 0x4C8A064
+
+#define mmPDMA0_QM_PQ_BASE_HI_2 0x4C8A068
+
+#define mmPDMA0_QM_PQ_BASE_HI_3 0x4C8A06C
+
+#define mmPDMA0_QM_PQ_SIZE_0 0x4C8A070
+
+#define mmPDMA0_QM_PQ_SIZE_1 0x4C8A074
+
+#define mmPDMA0_QM_PQ_SIZE_2 0x4C8A078
+
+#define mmPDMA0_QM_PQ_SIZE_3 0x4C8A07C
+
+#define mmPDMA0_QM_PQ_PI_0 0x4C8A080
+
+#define mmPDMA0_QM_PQ_PI_1 0x4C8A084
+
+#define mmPDMA0_QM_PQ_PI_2 0x4C8A088
+
+#define mmPDMA0_QM_PQ_PI_3 0x4C8A08C
+
+#define mmPDMA0_QM_PQ_CI_0 0x4C8A090
+
+#define mmPDMA0_QM_PQ_CI_1 0x4C8A094
+
+#define mmPDMA0_QM_PQ_CI_2 0x4C8A098
+
+#define mmPDMA0_QM_PQ_CI_3 0x4C8A09C
+
+#define mmPDMA0_QM_PQ_CFG0_0 0x4C8A0A0
+
+#define mmPDMA0_QM_PQ_CFG0_1 0x4C8A0A4
+
+#define mmPDMA0_QM_PQ_CFG0_2 0x4C8A0A8
+
+#define mmPDMA0_QM_PQ_CFG0_3 0x4C8A0AC
+
+#define mmPDMA0_QM_PQ_CFG1_0 0x4C8A0B0
+
+#define mmPDMA0_QM_PQ_CFG1_1 0x4C8A0B4
+
+#define mmPDMA0_QM_PQ_CFG1_2 0x4C8A0B8
+
+#define mmPDMA0_QM_PQ_CFG1_3 0x4C8A0BC
+
+#define mmPDMA0_QM_PQ_STS0_0 0x4C8A0C0
+
+#define mmPDMA0_QM_PQ_STS0_1 0x4C8A0C4
+
+#define mmPDMA0_QM_PQ_STS0_2 0x4C8A0C8
+
+#define mmPDMA0_QM_PQ_STS0_3 0x4C8A0CC
+
+#define mmPDMA0_QM_PQ_STS1_0 0x4C8A0D0
+
+#define mmPDMA0_QM_PQ_STS1_1 0x4C8A0D4
+
+#define mmPDMA0_QM_PQ_STS1_2 0x4C8A0D8
+
+#define mmPDMA0_QM_PQ_STS1_3 0x4C8A0DC
+
+#define mmPDMA0_QM_CQ_CFG0_0 0x4C8A0E0
+
+#define mmPDMA0_QM_CQ_CFG0_1 0x4C8A0E4
+
+#define mmPDMA0_QM_CQ_CFG0_2 0x4C8A0E8
+
+#define mmPDMA0_QM_CQ_CFG0_3 0x4C8A0EC
+
+#define mmPDMA0_QM_CQ_CFG0_4 0x4C8A0F0
+
+#define mmPDMA0_QM_CQ_STS0_0 0x4C8A0F4
+
+#define mmPDMA0_QM_CQ_STS0_1 0x4C8A0F8
+
+#define mmPDMA0_QM_CQ_STS0_2 0x4C8A0FC
+
+#define mmPDMA0_QM_CQ_STS0_3 0x4C8A100
+
+#define mmPDMA0_QM_CQ_STS0_4 0x4C8A104
+
+#define mmPDMA0_QM_CQ_CFG1_0 0x4C8A108
+
+#define mmPDMA0_QM_CQ_CFG1_1 0x4C8A10C
+
+#define mmPDMA0_QM_CQ_CFG1_2 0x4C8A110
+
+#define mmPDMA0_QM_CQ_CFG1_3 0x4C8A114
+
+#define mmPDMA0_QM_CQ_CFG1_4 0x4C8A118
+
+#define mmPDMA0_QM_CQ_STS1_0 0x4C8A11C
+
+#define mmPDMA0_QM_CQ_STS1_1 0x4C8A120
+
+#define mmPDMA0_QM_CQ_STS1_2 0x4C8A124
+
+#define mmPDMA0_QM_CQ_STS1_3 0x4C8A128
+
+#define mmPDMA0_QM_CQ_STS1_4 0x4C8A12C
+
+#define mmPDMA0_QM_CQ_PTR_LO_0 0x4C8A150
+
+#define mmPDMA0_QM_CQ_PTR_HI_0 0x4C8A154
+
+#define mmPDMA0_QM_CQ_TSIZE_0 0x4C8A158
+
+#define mmPDMA0_QM_CQ_CTL_0 0x4C8A15C
+
+#define mmPDMA0_QM_CQ_PTR_LO_1 0x4C8A160
+
+#define mmPDMA0_QM_CQ_PTR_HI_1 0x4C8A164
+
+#define mmPDMA0_QM_CQ_TSIZE_1 0x4C8A168
+
+#define mmPDMA0_QM_CQ_CTL_1 0x4C8A16C
+
+#define mmPDMA0_QM_CQ_PTR_LO_2 0x4C8A170
+
+#define mmPDMA0_QM_CQ_PTR_HI_2 0x4C8A174
+
+#define mmPDMA0_QM_CQ_TSIZE_2 0x4C8A178
+
+#define mmPDMA0_QM_CQ_CTL_2 0x4C8A17C
+
+#define mmPDMA0_QM_CQ_PTR_LO_3 0x4C8A180
+
+#define mmPDMA0_QM_CQ_PTR_HI_3 0x4C8A184
+
+#define mmPDMA0_QM_CQ_TSIZE_3 0x4C8A188
+
+#define mmPDMA0_QM_CQ_CTL_3 0x4C8A18C
+
+#define mmPDMA0_QM_CQ_PTR_LO_4 0x4C8A190
+
+#define mmPDMA0_QM_CQ_PTR_HI_4 0x4C8A194
+
+#define mmPDMA0_QM_CQ_TSIZE_4 0x4C8A198
+
+#define mmPDMA0_QM_CQ_CTL_4 0x4C8A19C
+
+#define mmPDMA0_QM_CQ_TSIZE_STS_0 0x4C8A1A0
+
+#define mmPDMA0_QM_CQ_TSIZE_STS_1 0x4C8A1A4
+
+#define mmPDMA0_QM_CQ_TSIZE_STS_2 0x4C8A1A8
+
+#define mmPDMA0_QM_CQ_TSIZE_STS_3 0x4C8A1AC
+
+#define mmPDMA0_QM_CQ_TSIZE_STS_4 0x4C8A1B0
+
+#define mmPDMA0_QM_CQ_PTR_LO_STS_0 0x4C8A1B4
+
+#define mmPDMA0_QM_CQ_PTR_LO_STS_1 0x4C8A1B8
+
+#define mmPDMA0_QM_CQ_PTR_LO_STS_2 0x4C8A1BC
+
+#define mmPDMA0_QM_CQ_PTR_LO_STS_3 0x4C8A1C0
+
+#define mmPDMA0_QM_CQ_PTR_LO_STS_4 0x4C8A1C4
+
+#define mmPDMA0_QM_CQ_PTR_HI_STS_0 0x4C8A1C8
+
+#define mmPDMA0_QM_CQ_PTR_HI_STS_1 0x4C8A1CC
+
+#define mmPDMA0_QM_CQ_PTR_HI_STS_2 0x4C8A1D0
+
+#define mmPDMA0_QM_CQ_PTR_HI_STS_3 0x4C8A1D4
+
+#define mmPDMA0_QM_CQ_PTR_HI_STS_4 0x4C8A1D8
+
+#define mmPDMA0_QM_CQ_IFIFO_STS_0 0x4C8A1DC
+
+#define mmPDMA0_QM_CQ_IFIFO_STS_1 0x4C8A1E0
+
+#define mmPDMA0_QM_CQ_IFIFO_STS_2 0x4C8A1E4
+
+#define mmPDMA0_QM_CQ_IFIFO_STS_3 0x4C8A1E8
+
+#define mmPDMA0_QM_CQ_IFIFO_STS_4 0x4C8A1EC
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 0x4C8A1F0
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 0x4C8A1F4
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 0x4C8A1F8
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 0x4C8A1FC
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 0x4C8A200
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 0x4C8A204
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 0x4C8A208
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 0x4C8A20C
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 0x4C8A210
+
+#define mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 0x4C8A214
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 0x4C8A218
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 0x4C8A21C
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 0x4C8A220
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 0x4C8A224
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 0x4C8A228
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 0x4C8A22C
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 0x4C8A230
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 0x4C8A234
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 0x4C8A238
+
+#define mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 0x4C8A23C
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 0x4C8A240
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 0x4C8A244
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 0x4C8A248
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 0x4C8A24C
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 0x4C8A250
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 0x4C8A254
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 0x4C8A258
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 0x4C8A25C
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 0x4C8A260
+
+#define mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 0x4C8A264
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 0x4C8A268
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 0x4C8A26C
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 0x4C8A270
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 0x4C8A274
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 0x4C8A278
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 0x4C8A27C
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 0x4C8A280
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 0x4C8A284
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 0x4C8A288
+
+#define mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 0x4C8A28C
+
+#define mmPDMA0_QM_CP_FENCE0_RDATA_0 0x4C8A290
+
+#define mmPDMA0_QM_CP_FENCE0_RDATA_1 0x4C8A294
+
+#define mmPDMA0_QM_CP_FENCE0_RDATA_2 0x4C8A298
+
+#define mmPDMA0_QM_CP_FENCE0_RDATA_3 0x4C8A29C
+
+#define mmPDMA0_QM_CP_FENCE0_RDATA_4 0x4C8A2A0
+
+#define mmPDMA0_QM_CP_FENCE1_RDATA_0 0x4C8A2A4
+
+#define mmPDMA0_QM_CP_FENCE1_RDATA_1 0x4C8A2A8
+
+#define mmPDMA0_QM_CP_FENCE1_RDATA_2 0x4C8A2AC
+
+#define mmPDMA0_QM_CP_FENCE1_RDATA_3 0x4C8A2B0
+
+#define mmPDMA0_QM_CP_FENCE1_RDATA_4 0x4C8A2B4
+
+#define mmPDMA0_QM_CP_FENCE2_RDATA_0 0x4C8A2B8
+
+#define mmPDMA0_QM_CP_FENCE2_RDATA_1 0x4C8A2BC
+
+#define mmPDMA0_QM_CP_FENCE2_RDATA_2 0x4C8A2C0
+
+#define mmPDMA0_QM_CP_FENCE2_RDATA_3 0x4C8A2C4
+
+#define mmPDMA0_QM_CP_FENCE2_RDATA_4 0x4C8A2C8
+
+#define mmPDMA0_QM_CP_FENCE3_RDATA_0 0x4C8A2CC
+
+#define mmPDMA0_QM_CP_FENCE3_RDATA_1 0x4C8A2D0
+
+#define mmPDMA0_QM_CP_FENCE3_RDATA_2 0x4C8A2D4
+
+#define mmPDMA0_QM_CP_FENCE3_RDATA_3 0x4C8A2D8
+
+#define mmPDMA0_QM_CP_FENCE3_RDATA_4 0x4C8A2DC
+
+#define mmPDMA0_QM_CP_FENCE0_CNT_0 0x4C8A2E0
+
+#define mmPDMA0_QM_CP_FENCE0_CNT_1 0x4C8A2E4
+
+#define mmPDMA0_QM_CP_FENCE0_CNT_2 0x4C8A2E8
+
+#define mmPDMA0_QM_CP_FENCE0_CNT_3 0x4C8A2EC
+
+#define mmPDMA0_QM_CP_FENCE0_CNT_4 0x4C8A2F0
+
+#define mmPDMA0_QM_CP_FENCE1_CNT_0 0x4C8A2F4
+
+#define mmPDMA0_QM_CP_FENCE1_CNT_1 0x4C8A2F8
+
+#define mmPDMA0_QM_CP_FENCE1_CNT_2 0x4C8A2FC
+
+#define mmPDMA0_QM_CP_FENCE1_CNT_3 0x4C8A300
+
+#define mmPDMA0_QM_CP_FENCE1_CNT_4 0x4C8A304
+
+#define mmPDMA0_QM_CP_FENCE2_CNT_0 0x4C8A308
+
+#define mmPDMA0_QM_CP_FENCE2_CNT_1 0x4C8A30C
+
+#define mmPDMA0_QM_CP_FENCE2_CNT_2 0x4C8A310
+
+#define mmPDMA0_QM_CP_FENCE2_CNT_3 0x4C8A314
+
+#define mmPDMA0_QM_CP_FENCE2_CNT_4 0x4C8A318
+
+#define mmPDMA0_QM_CP_FENCE3_CNT_0 0x4C8A31C
+
+#define mmPDMA0_QM_CP_FENCE3_CNT_1 0x4C8A320
+
+#define mmPDMA0_QM_CP_FENCE3_CNT_2 0x4C8A324
+
+#define mmPDMA0_QM_CP_FENCE3_CNT_3 0x4C8A328
+
+#define mmPDMA0_QM_CP_FENCE3_CNT_4 0x4C8A32C
+
+#define mmPDMA0_QM_CP_BARRIER_CFG 0x4C8A330
+
+#define mmPDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0x4C8A334
+
+#define mmPDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET 0x4C8A338
+
+#define mmPDMA0_QM_CP_LDMA_TSIZE_OFFSET 0x4C8A33C
+
+#define mmPDMA0_QM_CP_CQ_PTR_LO_OFFSET_0 0x4C8A340
+
+#define mmPDMA0_QM_CP_CQ_PTR_LO_OFFSET_1 0x4C8A344
+
+#define mmPDMA0_QM_CP_CQ_PTR_LO_OFFSET_2 0x4C8A348
+
+#define mmPDMA0_QM_CP_CQ_PTR_LO_OFFSET_3 0x4C8A34C
+
+#define mmPDMA0_QM_CP_CQ_PTR_LO_OFFSET_4 0x4C8A350
+
+#define mmPDMA0_QM_CP_STS_0 0x4C8A368
+
+#define mmPDMA0_QM_CP_STS_1 0x4C8A36C
+
+#define mmPDMA0_QM_CP_STS_2 0x4C8A370
+
+#define mmPDMA0_QM_CP_STS_3 0x4C8A374
+
+#define mmPDMA0_QM_CP_STS_4 0x4C8A378
+
+#define mmPDMA0_QM_CP_CURRENT_INST_LO_0 0x4C8A37C
+
+#define mmPDMA0_QM_CP_CURRENT_INST_LO_1 0x4C8A380
+
+#define mmPDMA0_QM_CP_CURRENT_INST_LO_2 0x4C8A384
+
+#define mmPDMA0_QM_CP_CURRENT_INST_LO_3 0x4C8A388
+
+#define mmPDMA0_QM_CP_CURRENT_INST_LO_4 0x4C8A38C
+
+#define mmPDMA0_QM_CP_CURRENT_INST_HI_0 0x4C8A390
+
+#define mmPDMA0_QM_CP_CURRENT_INST_HI_1 0x4C8A394
+
+#define mmPDMA0_QM_CP_CURRENT_INST_HI_2 0x4C8A398
+
+#define mmPDMA0_QM_CP_CURRENT_INST_HI_3 0x4C8A39C
+
+#define mmPDMA0_QM_CP_CURRENT_INST_HI_4 0x4C8A3A0
+
+#define mmPDMA0_QM_CP_PRED_0 0x4C8A3A4
+
+#define mmPDMA0_QM_CP_PRED_1 0x4C8A3A8
+
+#define mmPDMA0_QM_CP_PRED_2 0x4C8A3AC
+
+#define mmPDMA0_QM_CP_PRED_3 0x4C8A3B0
+
+#define mmPDMA0_QM_CP_PRED_4 0x4C8A3B4
+
+#define mmPDMA0_QM_CP_PRED_UPEN_0 0x4C8A3B8
+
+#define mmPDMA0_QM_CP_PRED_UPEN_1 0x4C8A3BC
+
+#define mmPDMA0_QM_CP_PRED_UPEN_2 0x4C8A3C0
+
+#define mmPDMA0_QM_CP_PRED_UPEN_3 0x4C8A3C4
+
+#define mmPDMA0_QM_CP_PRED_UPEN_4 0x4C8A3C8
+
+#define mmPDMA0_QM_CP_DBG_0_0 0x4C8A3CC
+
+#define mmPDMA0_QM_CP_DBG_0_1 0x4C8A3D0
+
+#define mmPDMA0_QM_CP_DBG_0_2 0x4C8A3D4
+
+#define mmPDMA0_QM_CP_DBG_0_3 0x4C8A3D8
+
+#define mmPDMA0_QM_CP_DBG_0_4 0x4C8A3DC
+
+#define mmPDMA0_QM_CP_CPDMA_UP_CRED_0 0x4C8A3E0
+
+#define mmPDMA0_QM_CP_CPDMA_UP_CRED_1 0x4C8A3E4
+
+#define mmPDMA0_QM_CP_CPDMA_UP_CRED_2 0x4C8A3E8
+
+#define mmPDMA0_QM_CP_CPDMA_UP_CRED_3 0x4C8A3EC
+
+#define mmPDMA0_QM_CP_CPDMA_UP_CRED_4 0x4C8A3F0
+
+#define mmPDMA0_QM_CP_IN_DATA_LO_0 0x4C8A3F4
+
+#define mmPDMA0_QM_CP_IN_DATA_LO_1 0x4C8A3F8
+
+#define mmPDMA0_QM_CP_IN_DATA_LO_2 0x4C8A3FC
+
+#define mmPDMA0_QM_CP_IN_DATA_LO_3 0x4C8A400
+
+#define mmPDMA0_QM_CP_IN_DATA_LO_4 0x4C8A404
+
+#define mmPDMA0_QM_CP_IN_DATA_HI_0 0x4C8A408
+
+#define mmPDMA0_QM_CP_IN_DATA_HI_1 0x4C8A40C
+
+#define mmPDMA0_QM_CP_IN_DATA_HI_2 0x4C8A410
+
+#define mmPDMA0_QM_CP_IN_DATA_HI_3 0x4C8A414
+
+#define mmPDMA0_QM_CP_IN_DATA_HI_4 0x4C8A418
+
+#define mmPDMA0_QM_PQC_HBW_BASE_LO_0 0x4C8A41C
+
+#define mmPDMA0_QM_PQC_HBW_BASE_LO_1 0x4C8A420
+
+#define mmPDMA0_QM_PQC_HBW_BASE_LO_2 0x4C8A424
+
+#define mmPDMA0_QM_PQC_HBW_BASE_LO_3 0x4C8A428
+
+#define mmPDMA0_QM_PQC_HBW_BASE_HI_0 0x4C8A42C
+
+#define mmPDMA0_QM_PQC_HBW_BASE_HI_1 0x4C8A430
+
+#define mmPDMA0_QM_PQC_HBW_BASE_HI_2 0x4C8A434
+
+#define mmPDMA0_QM_PQC_HBW_BASE_HI_3 0x4C8A438
+
+#define mmPDMA0_QM_PQC_SIZE_0 0x4C8A43C
+
+#define mmPDMA0_QM_PQC_SIZE_1 0x4C8A440
+
+#define mmPDMA0_QM_PQC_SIZE_2 0x4C8A444
+
+#define mmPDMA0_QM_PQC_SIZE_3 0x4C8A448
+
+#define mmPDMA0_QM_PQC_PI_0 0x4C8A44C
+
+#define mmPDMA0_QM_PQC_PI_1 0x4C8A450
+
+#define mmPDMA0_QM_PQC_PI_2 0x4C8A454
+
+#define mmPDMA0_QM_PQC_PI_3 0x4C8A458
+
+#define mmPDMA0_QM_PQC_LBW_WDATA_0 0x4C8A45C
+
+#define mmPDMA0_QM_PQC_LBW_WDATA_1 0x4C8A460
+
+#define mmPDMA0_QM_PQC_LBW_WDATA_2 0x4C8A464
+
+#define mmPDMA0_QM_PQC_LBW_WDATA_3 0x4C8A468
+
+#define mmPDMA0_QM_PQC_LBW_BASE_LO_0 0x4C8A46C
+
+#define mmPDMA0_QM_PQC_LBW_BASE_LO_1 0x4C8A470
+
+#define mmPDMA0_QM_PQC_LBW_BASE_LO_2 0x4C8A474
+
+#define mmPDMA0_QM_PQC_LBW_BASE_LO_3 0x4C8A478
+
+#define mmPDMA0_QM_PQC_LBW_BASE_HI_0 0x4C8A47C
+
+#define mmPDMA0_QM_PQC_LBW_BASE_HI_1 0x4C8A480
+
+#define mmPDMA0_QM_PQC_LBW_BASE_HI_2 0x4C8A484
+
+#define mmPDMA0_QM_PQC_LBW_BASE_HI_3 0x4C8A488
+
+#define mmPDMA0_QM_PQC_CFG 0x4C8A48C
+
+#define mmPDMA0_QM_PQC_SECURE_PUSH_IND 0x4C8A490
+
+#define mmPDMA0_QM_ARB_MASK 0x4C8A4A0
+
+#define mmPDMA0_QM_ARB_CFG_0 0x4C8A4A4
+
+#define mmPDMA0_QM_ARB_CHOICE_Q_PUSH 0x4C8A4A8
+
+#define mmPDMA0_QM_ARB_WRR_WEIGHT_0 0x4C8A4AC
+
+#define mmPDMA0_QM_ARB_WRR_WEIGHT_1 0x4C8A4B0
+
+#define mmPDMA0_QM_ARB_WRR_WEIGHT_2 0x4C8A4B4
+
+#define mmPDMA0_QM_ARB_WRR_WEIGHT_3 0x4C8A4B8
+
+#define mmPDMA0_QM_ARB_CFG_1 0x4C8A4BC
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_0 0x4C8A4C0
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_1 0x4C8A4C4
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_2 0x4C8A4C8
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_3 0x4C8A4CC
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_4 0x4C8A4D0
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_5 0x4C8A4D4
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_6 0x4C8A4D8
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_7 0x4C8A4DC
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_8 0x4C8A4E0
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_9 0x4C8A4E4
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_10 0x4C8A4E8
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_11 0x4C8A4EC
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_12 0x4C8A4F0
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_13 0x4C8A4F4
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_14 0x4C8A4F8
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_15 0x4C8A4FC
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_16 0x4C8A500
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_17 0x4C8A504
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_18 0x4C8A508
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_19 0x4C8A50C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_20 0x4C8A510
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_21 0x4C8A514
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_22 0x4C8A518
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_23 0x4C8A51C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_24 0x4C8A520
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_25 0x4C8A524
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_26 0x4C8A528
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_27 0x4C8A52C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_28 0x4C8A530
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_29 0x4C8A534
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_30 0x4C8A538
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_31 0x4C8A53C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_32 0x4C8A540
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_33 0x4C8A544
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_34 0x4C8A548
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_35 0x4C8A54C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_36 0x4C8A550
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_37 0x4C8A554
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_38 0x4C8A558
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_39 0x4C8A55C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_40 0x4C8A560
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_41 0x4C8A564
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_42 0x4C8A568
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_43 0x4C8A56C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_44 0x4C8A570
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_45 0x4C8A574
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_46 0x4C8A578
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_47 0x4C8A57C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_48 0x4C8A580
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_49 0x4C8A584
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_50 0x4C8A588
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_51 0x4C8A58C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_52 0x4C8A590
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_53 0x4C8A594
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_54 0x4C8A598
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_55 0x4C8A59C
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_56 0x4C8A5A0
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_57 0x4C8A5A4
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_58 0x4C8A5A8
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_59 0x4C8A5AC
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_60 0x4C8A5B0
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_61 0x4C8A5B4
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_62 0x4C8A5B8
+
+#define mmPDMA0_QM_ARB_MST_AVAIL_CRED_63 0x4C8A5BC
+
+#define mmPDMA0_QM_ARB_MST_CRED_INC 0x4C8A5E0
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_0 0x4C8A5E4
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_1 0x4C8A5E8
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_2 0x4C8A5EC
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_3 0x4C8A5F0
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_4 0x4C8A5F4
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_5 0x4C8A5F8
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_6 0x4C8A5FC
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_7 0x4C8A600
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_8 0x4C8A604
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_9 0x4C8A608
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_10 0x4C8A60C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_11 0x4C8A610
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_12 0x4C8A614
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_13 0x4C8A618
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_14 0x4C8A61C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_15 0x4C8A620
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_16 0x4C8A624
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_17 0x4C8A628
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_18 0x4C8A62C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_19 0x4C8A630
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_20 0x4C8A634
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_21 0x4C8A638
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_22 0x4C8A63C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_23 0x4C8A640
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_24 0x4C8A644
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_25 0x4C8A648
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_26 0x4C8A64C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_27 0x4C8A650
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_28 0x4C8A654
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_29 0x4C8A658
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_30 0x4C8A65C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_31 0x4C8A660
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_32 0x4C8A664
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_33 0x4C8A668
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_34 0x4C8A66C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_35 0x4C8A670
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_36 0x4C8A674
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_37 0x4C8A678
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_38 0x4C8A67C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_39 0x4C8A680
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_40 0x4C8A684
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_41 0x4C8A688
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_42 0x4C8A68C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_43 0x4C8A690
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_44 0x4C8A694
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_45 0x4C8A698
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_46 0x4C8A69C
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_47 0x4C8A6A0
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_48 0x4C8A6A4
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_49 0x4C8A6A8
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_50 0x4C8A6AC
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_51 0x4C8A6B0
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_52 0x4C8A6B4
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_53 0x4C8A6B8
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_54 0x4C8A6BC
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_55 0x4C8A6C0
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_56 0x4C8A6C4
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_57 0x4C8A6C8
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_58 0x4C8A6CC
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_59 0x4C8A6D0
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_60 0x4C8A6D4
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_61 0x4C8A6D8
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_62 0x4C8A6DC
+
+#define mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_63 0x4C8A6E0
+
+#define mmPDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x4C8A704
+
+#define mmPDMA0_QM_ARB_MST_SLAVE_EN 0x4C8A708
+
+#define mmPDMA0_QM_ARB_MST_SLAVE_EN_1 0x4C8A70C
+
+#define mmPDMA0_QM_ARB_SLV_CHOICE_WDT 0x4C8A710
+
+#define mmPDMA0_QM_ARB_SLV_ID 0x4C8A714
+
+#define mmPDMA0_QM_ARB_MST_QUIET_PER 0x4C8A718
+
+#define mmPDMA0_QM_ARB_MSG_MAX_INFLIGHT 0x4C8A744
+
+#define mmPDMA0_QM_ARB_BASE_LO 0x4C8A754
+
+#define mmPDMA0_QM_ARB_BASE_HI 0x4C8A758
+
+#define mmPDMA0_QM_ARB_STATE_STS 0x4C8A780
+
+#define mmPDMA0_QM_ARB_CHOICE_FULLNESS_STS 0x4C8A784
+
+#define mmPDMA0_QM_ARB_MSG_STS 0x4C8A788
+
+#define mmPDMA0_QM_ARB_SLV_CHOICE_Q_HEAD 0x4C8A78C
+
+#define mmPDMA0_QM_ARB_ERR_CAUSE 0x4C8A79C
+
+#define mmPDMA0_QM_ARB_ERR_MSG_EN 0x4C8A7A0
+
+#define mmPDMA0_QM_ARB_ERR_STS_DRP 0x4C8A7A8
+
+#define mmPDMA0_QM_ARB_MST_CRED_STS 0x4C8A7B0
+
+#define mmPDMA0_QM_ARB_MST_CRED_STS_1 0x4C8A7B4
+
+#define mmPDMA0_QM_CSMR_STRICT_PRIO_CFG 0x4C8A7FC
+
+#define mmPDMA0_QM_ARC_CQ_CFG0 0x4C8A800
+
+#define mmPDMA0_QM_ARC_CQ_CFG1 0x4C8A804
+
+#define mmPDMA0_QM_ARC_CQ_PTR_LO 0x4C8A808
+
+#define mmPDMA0_QM_ARC_CQ_PTR_HI 0x4C8A80C
+
+#define mmPDMA0_QM_ARC_CQ_TSIZE 0x4C8A810
+
+#define mmPDMA0_QM_ARC_CQ_CTL 0x4C8A814
+
+#define mmPDMA0_QM_ARC_CQ_IFIFO_STS 0x4C8A81C
+
+#define mmPDMA0_QM_ARC_CQ_STS0 0x4C8A820
+
+#define mmPDMA0_QM_ARC_CQ_STS1 0x4C8A824
+
+#define mmPDMA0_QM_ARC_CQ_TSIZE_STS 0x4C8A828
+
+#define mmPDMA0_QM_ARC_CQ_PTR_LO_STS 0x4C8A82C
+
+#define mmPDMA0_QM_ARC_CQ_PTR_HI_STS 0x4C8A830
+
+#define mmPDMA0_QM_CP_WR_ARC_ADDR_HI 0x4C8A834
+
+#define mmPDMA0_QM_CP_WR_ARC_ADDR_LO 0x4C8A838
+
+#define mmPDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_HI 0x4C8A83C
+
+#define mmPDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO 0x4C8A840
+
+#define mmPDMA0_QM_ARC_CQ_CTL_MSG_BASE_HI 0x4C8A844
+
+#define mmPDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO 0x4C8A848
+
+#define mmPDMA0_QM_CQ_IFIFO_MSG_BASE_HI 0x4C8A84C
+
+#define mmPDMA0_QM_CQ_IFIFO_MSG_BASE_LO 0x4C8A850
+
+#define mmPDMA0_QM_CQ_CTL_MSG_BASE_HI 0x4C8A854
+
+#define mmPDMA0_QM_CQ_CTL_MSG_BASE_LO 0x4C8A858
+
+#define mmPDMA0_QM_ADDR_OVRD 0x4C8A85C
+
+#define mmPDMA0_QM_CQ_IFIFO_CI_0 0x4C8A860
+
+#define mmPDMA0_QM_CQ_IFIFO_CI_1 0x4C8A864
+
+#define mmPDMA0_QM_CQ_IFIFO_CI_2 0x4C8A868
+
+#define mmPDMA0_QM_CQ_IFIFO_CI_3 0x4C8A86C
+
+#define mmPDMA0_QM_CQ_IFIFO_CI_4 0x4C8A870
+
+#define mmPDMA0_QM_ARC_CQ_IFIFO_CI 0x4C8A874
+
+#define mmPDMA0_QM_CQ_CTL_CI_0 0x4C8A878
+
+#define mmPDMA0_QM_CQ_CTL_CI_1 0x4C8A87C
+
+#define mmPDMA0_QM_CQ_CTL_CI_2 0x4C8A880
+
+#define mmPDMA0_QM_CQ_CTL_CI_3 0x4C8A884
+
+#define mmPDMA0_QM_CQ_CTL_CI_4 0x4C8A888
+
+#define mmPDMA0_QM_ARC_CQ_CTL_CI 0x4C8A88C
+
+#define mmPDMA0_QM_CP_CFG 0x4C8A890
+
+#define mmPDMA0_QM_CP_EXT_SWITCH 0x4C8A894
+
+#define mmPDMA0_QM_CP_SWITCH_WD_SET 0x4C8A898
+
+#define mmPDMA0_QM_CP_SWITCH_WD 0x4C8A89C
+
+#define mmPDMA0_QM_ARC_LB_ADDR_BASE_LO 0x4C8A8A4
+
+#define mmPDMA0_QM_ARC_LB_ADDR_BASE_HI 0x4C8A8A8
+
+#define mmPDMA0_QM_ENGINE_BASE_ADDR_HI 0x4C8A8AC
+
+#define mmPDMA0_QM_ENGINE_BASE_ADDR_LO 0x4C8A8B0
+
+#define mmPDMA0_QM_ENGINE_ADDR_RANGE_SIZE 0x4C8A8B4
+
+#define mmPDMA0_QM_QM_ARC_AUX_BASE_ADDR_HI 0x4C8A8B8
+
+#define mmPDMA0_QM_QM_ARC_AUX_BASE_ADDR_LO 0x4C8A8BC
+
+#define mmPDMA0_QM_QM_BASE_ADDR_HI 0x4C8A8C0
+
+#define mmPDMA0_QM_QM_BASE_ADDR_LO 0x4C8A8C4
+
+#define mmPDMA0_QM_ARC_PQC_SECURE_PUSH_IND 0x4C8A8C8
+
+#define mmPDMA0_QM_PQC_STS_0_0 0x4C8A8D0
+
+#define mmPDMA0_QM_PQC_STS_0_1 0x4C8A8D4
+
+#define mmPDMA0_QM_PQC_STS_0_2 0x4C8A8D8
+
+#define mmPDMA0_QM_PQC_STS_0_3 0x4C8A8DC
+
+#define mmPDMA0_QM_PQC_STS_1_0 0x4C8A8E0
+
+#define mmPDMA0_QM_PQC_STS_1_1 0x4C8A8E4
+
+#define mmPDMA0_QM_PQC_STS_1_2 0x4C8A8E8
+
+#define mmPDMA0_QM_PQC_STS_1_3 0x4C8A8EC
+
+#define mmPDMA0_QM_SEI_STATUS 0x4C8A8F0
+
+#define mmPDMA0_QM_SEI_MASK 0x4C8A8F4
+
+#define mmPDMA0_QM_GLBL_ERR_ADDR_LO 0x4C8AD00
+
+#define mmPDMA0_QM_GLBL_ERR_ADDR_HI 0x4C8AD04
+
+#define mmPDMA0_QM_GLBL_ERR_WDATA 0x4C8AD08
+
+#define mmPDMA0_QM_L2H_MASK_LO 0x4C8AD14
+
+#define mmPDMA0_QM_L2H_MASK_HI 0x4C8AD18
+
+#define mmPDMA0_QM_L2H_CMPR_LO 0x4C8AD1C
+
+#define mmPDMA0_QM_L2H_CMPR_HI 0x4C8AD20
+
+#define mmPDMA0_QM_LOCAL_RANGE_BASE 0x4C8AD24
+
+#define mmPDMA0_QM_LOCAL_RANGE_SIZE 0x4C8AD28
+
+#define mmPDMA0_QM_HBW_RD_RATE_LIM_CFG_1 0x4C8AD30
+
+#define mmPDMA0_QM_LBW_WR_RATE_LIM_CFG_0 0x4C8AD34
+
+#define mmPDMA0_QM_LBW_WR_RATE_LIM_CFG_1 0x4C8AD38
+
+#define mmPDMA0_QM_HBW_RD_RATE_LIM_CFG_0 0x4C8AD3C
+
+#define mmPDMA0_QM_IND_GW_APB_CFG 0x4C8AD40
+
+#define mmPDMA0_QM_IND_GW_APB_WDATA 0x4C8AD44
+
+#define mmPDMA0_QM_IND_GW_APB_RDATA 0x4C8AD48
+
+#define mmPDMA0_QM_IND_GW_APB_STATUS 0x4C8AD4C
+
+#define mmPDMA0_QM_PERF_CNT_FREE_LO 0x4C8AD60
+
+#define mmPDMA0_QM_PERF_CNT_FREE_HI 0x4C8AD64
+
+#define mmPDMA0_QM_PERF_CNT_IDLE_LO 0x4C8AD68
+
+#define mmPDMA0_QM_PERF_CNT_IDLE_HI 0x4C8AD6C
+
+#define mmPDMA0_QM_PERF_CNT_CFG 0x4C8AD70
+
+#endif /* ASIC_REG_PDMA0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h
new file mode 100644
index 000000000000..ccc6dfd22dd7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA1_CORE_CTX_AXUSER_REGS_H_
+#define ASIC_REG_PDMA1_CORE_CTX_AXUSER_REGS_H_
+
+/*
+ *****************************************
+ * PDMA1_CORE_CTX_AXUSER
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_ASID 0x4C9B800
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_MMU_BP 0x4C9B804
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_STRONG_ORDER 0x4C9B808
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_NO_SNOOP 0x4C9B80C
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_WR_REDUCTION 0x4C9B810
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_RD_ATOMIC 0x4C9B814
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_QOS 0x4C9B818
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_RSVD 0x4C9B81C
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_EMEM_CPAGE 0x4C9B820
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_CORE 0x4C9B824
+
+#define mmPDMA1_CORE_CTX_AXUSER_E2E_COORD 0x4C9B828
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_WR_OVRD_LO 0x4C9B830
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_WR_OVRD_HI 0x4C9B834
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_RD_OVRD_LO 0x4C9B838
+
+#define mmPDMA1_CORE_CTX_AXUSER_HB_RD_OVRD_HI 0x4C9B83C
+
+#define mmPDMA1_CORE_CTX_AXUSER_LB_COORD 0x4C9B840
+
+#define mmPDMA1_CORE_CTX_AXUSER_LB_LOCK 0x4C9B844
+
+#define mmPDMA1_CORE_CTX_AXUSER_LB_RSVD 0x4C9B848
+
+#define mmPDMA1_CORE_CTX_AXUSER_LB_OVRD 0x4C9B84C
+
+#endif /* ASIC_REG_PDMA1_CORE_CTX_AXUSER_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h
new file mode 100644
index 000000000000..5fd72d050fff
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PDMA1_QM_AXUSER_NONSECURED_REGS_H_
+#define ASIC_REG_PDMA1_QM_AXUSER_NONSECURED_REGS_H_
+
+/*
+ *****************************************
+ * PDMA1_QM_AXUSER_NONSECURED
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_ASID 0x4C9AB80
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP 0x4C9AB84
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_STRONG_ORDER 0x4C9AB88
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_NO_SNOOP 0x4C9AB8C
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_WR_REDUCTION 0x4C9AB90
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_RD_ATOMIC 0x4C9AB94
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_QOS 0x4C9AB98
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_RSVD 0x4C9AB9C
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_EMEM_CPAGE 0x4C9ABA0
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_CORE 0x4C9ABA4
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_E2E_COORD 0x4C9ABA8
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_WR_OVRD_LO 0x4C9ABB0
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_WR_OVRD_HI 0x4C9ABB4
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_RD_OVRD_LO 0x4C9ABB8
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_HB_RD_OVRD_HI 0x4C9ABBC
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_LB_COORD 0x4C9ABC0
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_LB_LOCK 0x4C9ABC4
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_LB_RSVD 0x4C9ABC8
+
+#define mmPDMA1_QM_AXUSER_NONSECURED_LB_OVRD 0x4C9ABCC
+
+#endif /* ASIC_REG_PDMA1_QM_AXUSER_NONSECURED_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h
new file mode 100644
index 000000000000..0276506ea523
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PMMU_HBW_STLB_MASKS_H_
+#define ASIC_REG_PMMU_HBW_STLB_MASKS_H_
+
+/*
+ *****************************************
+ * PMMU_HBW_STLB
+ * (Prototype: STLB)
+ *****************************************
+ */
+
+/* PMMU_HBW_STLB_BUSY */
+#define PMMU_HBW_STLB_BUSY_BUSY_SHIFT 0
+#define PMMU_HBW_STLB_BUSY_BUSY_MASK 0xFFFFFFFF
+
+/* PMMU_HBW_STLB_ASID */
+#define PMMU_HBW_STLB_ASID_ASID_SHIFT 0
+#define PMMU_HBW_STLB_ASID_ASID_MASK 0x3FF
+
+/* PMMU_HBW_STLB_HOP0_PA43_12 */
+#define PMMU_HBW_STLB_HOP0_PA43_12_HOP0_PA43_12_SHIFT 0
+#define PMMU_HBW_STLB_HOP0_PA43_12_HOP0_PA43_12_MASK 0xFFFFFFFF
+
+/* PMMU_HBW_STLB_HOP0_PA63_44 */
+#define PMMU_HBW_STLB_HOP0_PA63_44_HOP0_PA63_44_SHIFT 0
+#define PMMU_HBW_STLB_HOP0_PA63_44_HOP0_PA63_44_MASK 0xFFFFF
+
+/* PMMU_HBW_STLB_CACHE_INV */
+#define PMMU_HBW_STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
+#define PMMU_HBW_STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF
+#define PMMU_HBW_STLB_CACHE_INV_INDEX_MASK_SHIFT 8
+#define PMMU_HBW_STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00
+
+/* PMMU_HBW_STLB_CACHE_INV_BASE_39_8 */
+#define PMMU_HBW_STLB_CACHE_INV_BASE_39_8_PA_SHIFT 0
+#define PMMU_HBW_STLB_CACHE_INV_BASE_39_8_PA_MASK 0xFFFFFFFF
+
+/* PMMU_HBW_STLB_CACHE_INV_BASE_63_40 */
+#define PMMU_HBW_STLB_CACHE_INV_BASE_63_40_PA_SHIFT 0
+#define PMMU_HBW_STLB_CACHE_INV_BASE_63_40_PA_MASK 0xFFFFFF
+
+/* PMMU_HBW_STLB_STLB_FEATURE_EN */
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_SHIFT 0
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_MASK 0x1
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_SHIFT 1
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_MASK 0x2
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_LOOKUP_EN_SHIFT 2
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_LOOKUP_EN_MASK 0x4
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_BYPASS_SHIFT 3
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_BYPASS_MASK 0x8
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_BANK_STOP_SHIFT 4
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_BANK_STOP_MASK 0x10
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_TRACE_EN_SHIFT 5
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_TRACE_EN_MASK 0x20
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_FOLLOWER_EN_SHIFT 6
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK 0x40
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_CACHING_EN_SHIFT 7
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_CACHING_EN_MASK 0x1F80
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_FOLLOWING_NUM_LIMIT_SHIFT 13
+#define PMMU_HBW_STLB_STLB_FEATURE_EN_FOLLOWING_NUM_LIMIT_MASK 0xE000
+
+/* PMMU_HBW_STLB_STLB_AXI_CACHE */
+#define PMMU_HBW_STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_SHIFT 0
+#define PMMU_HBW_STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_MASK 0xF
+#define PMMU_HBW_STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_SHIFT 4
+#define PMMU_HBW_STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_MASK 0xF0
+#define PMMU_HBW_STLB_STLB_AXI_CACHE_INV_ARCACHE_SHIFT 8
+#define PMMU_HBW_STLB_STLB_AXI_CACHE_INV_ARCACHE_MASK 0xF00
+
+/* PMMU_HBW_STLB_HOP_CONFIGURATION */
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT 0
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK 0x7
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_SHIFT 4
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK 0x70
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_SHIFT 8
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK 0x700
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT 12
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_LAST_HOP_MASK 0x7000
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_SHIFT 16
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK 0x70000
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_SHIFT 20
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK 0x100000
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_SHIFT 21
+#define PMMU_HBW_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_MASK 0x7E00000
+
+/* PMMU_HBW_STLB_LINK_LIST_LOOKUP_MASK_63_32 */
+#define PMMU_HBW_STLB_LINK_LIST_LOOKUP_MASK_63_32_R_SHIFT 0
+#define PMMU_HBW_STLB_LINK_LIST_LOOKUP_MASK_63_32_R_MASK 0xFFFFFFFF
+
+/* PMMU_HBW_STLB_LINK_LIST_LOOKUP_MASK_31_0 */
+#define PMMU_HBW_STLB_LINK_LIST_LOOKUP_MASK_31_0_R_SHIFT 0
+#define PMMU_HBW_STLB_LINK_LIST_LOOKUP_MASK_31_0_R_MASK 0xFFFFFFFF
+
+/* PMMU_HBW_STLB_INV_ALL_START */
+#define PMMU_HBW_STLB_INV_ALL_START_R_SHIFT 0
+#define PMMU_HBW_STLB_INV_ALL_START_R_MASK 0x1
+
+/* PMMU_HBW_STLB_INV_ALL_SET */
+#define PMMU_HBW_STLB_INV_ALL_SET_R_SHIFT 0
+#define PMMU_HBW_STLB_INV_ALL_SET_R_MASK 0xFF
+
+/* PMMU_HBW_STLB_INV_PS */
+#define PMMU_HBW_STLB_INV_PS_R_SHIFT 0
+#define PMMU_HBW_STLB_INV_PS_R_MASK 0x3
+
+/* PMMU_HBW_STLB_INV_CONSUMER_INDEX */
+#define PMMU_HBW_STLB_INV_CONSUMER_INDEX_R_SHIFT 0
+#define PMMU_HBW_STLB_INV_CONSUMER_INDEX_R_MASK 0xFF
+
+/* PMMU_HBW_STLB_INV_HIT_COUNT */
+#define PMMU_HBW_STLB_INV_HIT_COUNT_R_SHIFT 0
+#define PMMU_HBW_STLB_INV_HIT_COUNT_R_MASK 0x7FF
+
+/* PMMU_HBW_STLB_INV_SET */
+#define PMMU_HBW_STLB_INV_SET_R_SHIFT 0
+#define PMMU_HBW_STLB_INV_SET_R_MASK 0xFF
+
+/* PMMU_HBW_STLB_SRAM_INIT */
+#define PMMU_HBW_STLB_SRAM_INIT_BUSY_TAG_SHIFT 0
+#define PMMU_HBW_STLB_SRAM_INIT_BUSY_TAG_MASK 0x3
+#define PMMU_HBW_STLB_SRAM_INIT_BUSY_SLICE_SHIFT 2
+#define PMMU_HBW_STLB_SRAM_INIT_BUSY_SLICE_MASK 0xC
+#define PMMU_HBW_STLB_SRAM_INIT_BUSY_DATA_SHIFT 4
+#define PMMU_HBW_STLB_SRAM_INIT_BUSY_DATA_MASK 0x10
+
+/* PMMU_HBW_STLB_MEM_CACHE_INVALIDATION */
+
+/* PMMU_HBW_STLB_MEM_CACHE_INV_STATUS */
+#define PMMU_HBW_STLB_MEM_CACHE_INV_STATUS_INVALIDATE_DONE_SHIFT 0
+#define PMMU_HBW_STLB_MEM_CACHE_INV_STATUS_INVALIDATE_DONE_MASK 0x1
+#define PMMU_HBW_STLB_MEM_CACHE_INV_STATUS_CACHE_IDLE_SHIFT 1
+#define PMMU_HBW_STLB_MEM_CACHE_INV_STATUS_CACHE_IDLE_MASK 0x2
+
+/* PMMU_HBW_STLB_MEM_CACHE_BASE_38_7 */
+#define PMMU_HBW_STLB_MEM_CACHE_BASE_38_7_R_SHIFT 0
+#define PMMU_HBW_STLB_MEM_CACHE_BASE_38_7_R_MASK 0xFFFFFFFF
+
+/* PMMU_HBW_STLB_MEM_CACHE_BASE_63_39 */
+#define PMMU_HBW_STLB_MEM_CACHE_BASE_63_39_R_SHIFT 0
+#define PMMU_HBW_STLB_MEM_CACHE_BASE_63_39_R_MASK 0x1FFFFFF
+
+/* PMMU_HBW_STLB_MEM_CACHE_CONFIG */
+#define PMMU_HBW_STLB_MEM_CACHE_CONFIG_CACHE_HOP_EN_SHIFT 0
+#define PMMU_HBW_STLB_MEM_CACHE_CONFIG_CACHE_HOP_EN_MASK 0x3F
+#define PMMU_HBW_STLB_MEM_CACHE_CONFIG_CACHE_HOP_PREFETCH_EN_SHIFT 6
+#define PMMU_HBW_STLB_MEM_CACHE_CONFIG_CACHE_HOP_PREFETCH_EN_MASK 0xFC0
+#define PMMU_HBW_STLB_MEM_CACHE_CONFIG_BYPASS_EN_SHIFT 12
+#define PMMU_HBW_STLB_MEM_CACHE_CONFIG_BYPASS_EN_MASK 0x1000
+#define PMMU_HBW_STLB_MEM_CACHE_CONFIG_RELEASE_INVALIDATE_SHIFT 13
+#define PMMU_HBW_STLB_MEM_CACHE_CONFIG_RELEASE_INVALIDATE_MASK 0x2000
+
+/* PMMU_HBW_STLB_SET_THRESHOLD_HOP5 */
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP5_MIN_SHIFT 0
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP5_MIN_MASK 0x1FF
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP5_MAX_SHIFT 9
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP5_MAX_MASK 0x3FE00
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP5_MASK_SHIFT 18
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP5_MASK_MASK 0x7FC0000
+
+/* PMMU_HBW_STLB_SET_THRESHOLD_HOP4 */
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP4_MIN_SHIFT 0
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP4_MIN_MASK 0x1FF
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP4_MAX_SHIFT 9
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP4_MAX_MASK 0x3FE00
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP4_MASK_SHIFT 18
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP4_MASK_MASK 0x7FC0000
+
+/* PMMU_HBW_STLB_SET_THRESHOLD_HOP3 */
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP3_MIN_SHIFT 0
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP3_MIN_MASK 0x1FF
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP3_MAX_SHIFT 9
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP3_MAX_MASK 0x3FE00
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP3_MASK_SHIFT 18
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP3_MASK_MASK 0x7FC0000
+
+/* PMMU_HBW_STLB_SET_THRESHOLD_HOP2 */
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP2_MIN_SHIFT 0
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP2_MIN_MASK 0x1FF
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP2_MAX_SHIFT 9
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP2_MAX_MASK 0x3FE00
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP2_MASK_SHIFT 18
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP2_MASK_MASK 0x7FC0000
+
+/* PMMU_HBW_STLB_SET_THRESHOLD_HOP1 */
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP1_MIN_SHIFT 0
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP1_MIN_MASK 0x1FF
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP1_MAX_SHIFT 9
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP1_MAX_MASK 0x3FE00
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP1_MASK_SHIFT 18
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP1_MASK_MASK 0x7FC0000
+
+/* PMMU_HBW_STLB_SET_THRESHOLD_HOP0 */
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP0_MIN_SHIFT 0
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP0_MIN_MASK 0x1FF
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP0_MAX_SHIFT 9
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP0_MAX_MASK 0x3FE00
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP0_MASK_SHIFT 18
+#define PMMU_HBW_STLB_SET_THRESHOLD_HOP0_MASK_MASK 0x7FC0000
+
+/* PMMU_HBW_STLB_MULTI_HIT_INTERRUPT_CLR */
+
+/* PMMU_HBW_STLB_MULTI_HIT_INTERRUPT_MASK */
+#define PMMU_HBW_STLB_MULTI_HIT_INTERRUPT_MASK_R_SHIFT 0
+#define PMMU_HBW_STLB_MULTI_HIT_INTERRUPT_MASK_R_MASK 0x1
+
+/* PMMU_HBW_STLB_MEM_L0_CACHE_CFG */
+#define PMMU_HBW_STLB_MEM_L0_CACHE_CFG_PLRU_EVICTION_SHIFT 0
+#define PMMU_HBW_STLB_MEM_L0_CACHE_CFG_PLRU_EVICTION_MASK 0x1
+#define PMMU_HBW_STLB_MEM_L0_CACHE_CFG_CACHE_STOP_SHIFT 1
+#define PMMU_HBW_STLB_MEM_L0_CACHE_CFG_CACHE_STOP_MASK 0x2
+#define PMMU_HBW_STLB_MEM_L0_CACHE_CFG_INV_WRITEBACK_SHIFT 2
+#define PMMU_HBW_STLB_MEM_L0_CACHE_CFG_INV_WRITEBACK_MASK 0x4
+
+/* PMMU_HBW_STLB_MEM_READ_ARPROT */
+#define PMMU_HBW_STLB_MEM_READ_ARPROT_R_SHIFT 0
+#define PMMU_HBW_STLB_MEM_READ_ARPROT_R_MASK 0x7
+
+/* PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION */
+#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT 0
+#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \
+0x1
+#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_SHIFT 1
+#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_MASK 0x2
+#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_SHIFT 2
+#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_MASK 0xFFC
+
+/* PMMU_HBW_STLB_RANGE_INV_START_LSB */
+#define PMMU_HBW_STLB_RANGE_INV_START_LSB_INV_START_LSB_SHIFT 0
+#define PMMU_HBW_STLB_RANGE_INV_START_LSB_INV_START_LSB_MASK 0xFFFFFFFF
+
+/* PMMU_HBW_STLB_RANGE_INV_START_MSB */
+#define PMMU_HBW_STLB_RANGE_INV_START_MSB_INV_START_MSB_SHIFT 0
+#define PMMU_HBW_STLB_RANGE_INV_START_MSB_INV_START_MSB_MASK 0xFFFFF
+
+/* PMMU_HBW_STLB_RANGE_INV_END_LSB */
+#define PMMU_HBW_STLB_RANGE_INV_END_LSB_INV_END_LSB_SHIFT 0
+#define PMMU_HBW_STLB_RANGE_INV_END_LSB_INV_END_LSB_MASK 0xFFFFFFFF
+
+/* PMMU_HBW_STLB_RANGE_INV_END_MSB */
+#define PMMU_HBW_STLB_RANGE_INV_END_MSB_INV_END_MSB_SHIFT 0
+#define PMMU_HBW_STLB_RANGE_INV_END_MSB_INV_END_MSB_MASK 0xFFFFF
+
+/* PMMU_HBW_STLB_ASID_SCRAMBLER_CTRL */
+#define PMMU_HBW_STLB_ASID_SCRAMBLER_CTRL_SCRAMBLER_SCRAM_EN_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCRAMBLER_CTRL_SCRAMBLER_SCRAM_EN_MASK 0x1
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_0 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_1 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_2 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_3 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_4 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_5 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_6 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_7 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_8 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_9 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_10 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_10_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_10_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_11 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_11_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_11_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_12 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_12_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_12_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_13 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_13_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_13_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_14 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_14_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_14_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_15 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_15_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_15_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_16 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_16_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_16_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_17 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_17_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_17_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+/* PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_18 */
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_18_ASID_POLY_MATRIX_H3_SHIFT 0
+#define PMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_18_ASID_POLY_MATRIX_H3_MASK 0x1FF
+
+#endif /* ASIC_REG_PMMU_HBW_STLB_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h
new file mode 100644
index 000000000000..87c66c08e24a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PMMU_HBW_STLB_REGS_H_
+#define ASIC_REG_PMMU_HBW_STLB_REGS_H_
+
+/*
+ *****************************************
+ * PMMU_HBW_STLB
+ * (Prototype: STLB)
+ *****************************************
+ */
+
+#define mmPMMU_HBW_STLB_BUSY 0x4D01000
+
+#define mmPMMU_HBW_STLB_ASID 0x4D01004
+
+#define mmPMMU_HBW_STLB_HOP0_PA43_12 0x4D01008
+
+#define mmPMMU_HBW_STLB_HOP0_PA63_44 0x4D0100C
+
+#define mmPMMU_HBW_STLB_CACHE_INV 0x4D01010
+
+#define mmPMMU_HBW_STLB_CACHE_INV_BASE_39_8 0x4D01014
+
+#define mmPMMU_HBW_STLB_CACHE_INV_BASE_63_40 0x4D01018
+
+#define mmPMMU_HBW_STLB_STLB_FEATURE_EN 0x4D0101C
+
+#define mmPMMU_HBW_STLB_STLB_AXI_CACHE 0x4D01020
+
+#define mmPMMU_HBW_STLB_HOP_CONFIGURATION 0x4D01024
+
+#define mmPMMU_HBW_STLB_LINK_LIST_LOOKUP_MASK_63_32 0x4D01028
+
+#define mmPMMU_HBW_STLB_LINK_LIST_LOOKUP_MASK_31_0 0x4D0102C
+
+#define mmPMMU_HBW_STLB_INV_ALL_START 0x4D01034
+
+#define mmPMMU_HBW_STLB_INV_ALL_SET 0x4D01038
+
+#define mmPMMU_HBW_STLB_INV_PS 0x4D0103C
+
+#define mmPMMU_HBW_STLB_INV_CONSUMER_INDEX 0x4D01040
+
+#define mmPMMU_HBW_STLB_INV_HIT_COUNT 0x4D01044
+
+#define mmPMMU_HBW_STLB_INV_SET 0x4D01048
+
+#define mmPMMU_HBW_STLB_SRAM_INIT 0x4D0104C
+
+#define mmPMMU_HBW_STLB_MEM_CACHE_INVALIDATION 0x4D01050
+
+#define mmPMMU_HBW_STLB_MEM_CACHE_INV_STATUS 0x4D01054
+
+#define mmPMMU_HBW_STLB_MEM_CACHE_BASE_38_7 0x4D01058
+
+#define mmPMMU_HBW_STLB_MEM_CACHE_BASE_63_39 0x4D0105C
+
+#define mmPMMU_HBW_STLB_MEM_CACHE_CONFIG 0x4D01060
+
+#define mmPMMU_HBW_STLB_SET_THRESHOLD_HOP5 0x4D01064
+
+#define mmPMMU_HBW_STLB_SET_THRESHOLD_HOP4 0x4D01068
+
+#define mmPMMU_HBW_STLB_SET_THRESHOLD_HOP3 0x4D0106C
+
+#define mmPMMU_HBW_STLB_SET_THRESHOLD_HOP2 0x4D01070
+
+#define mmPMMU_HBW_STLB_SET_THRESHOLD_HOP1 0x4D01074
+
+#define mmPMMU_HBW_STLB_SET_THRESHOLD_HOP0 0x4D01078
+
+#define mmPMMU_HBW_STLB_MULTI_HIT_INTERRUPT_CLR 0x4D0107C
+
+#define mmPMMU_HBW_STLB_MULTI_HIT_INTERRUPT_MASK 0x4D01080
+
+#define mmPMMU_HBW_STLB_MEM_L0_CACHE_CFG 0x4D01084
+
+#define mmPMMU_HBW_STLB_MEM_READ_ARPROT 0x4D01088
+
+#define mmPMMU_HBW_STLB_RANGE_CACHE_INVALIDATION 0x4D0108C
+
+#define mmPMMU_HBW_STLB_RANGE_INV_START_LSB 0x4D01090
+
+#define mmPMMU_HBW_STLB_RANGE_INV_START_MSB 0x4D01094
+
+#define mmPMMU_HBW_STLB_RANGE_INV_END_LSB 0x4D01098
+
+#define mmPMMU_HBW_STLB_RANGE_INV_END_MSB 0x4D0109C
+
+#define mmPMMU_HBW_STLB_ASID_SCRAMBLER_CTRL 0x4D01100
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_0 0x4D01104
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_1 0x4D01108
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_2 0x4D0110C
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_3 0x4D01110
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_4 0x4D01114
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_5 0x4D01118
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_6 0x4D0111C
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_7 0x4D01120
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_8 0x4D01124
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MATRIX_H3_9 0x4D01128
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_10 0x4D0112C
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_11 0x4D01130
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_12 0x4D01134
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_13 0x4D01138
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_14 0x4D0113C
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_15 0x4D01140
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_16 0x4D01144
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_17 0x4D01148
+
+#define mmPMMU_HBW_STLB_ASID_SCR_POLY_MAT_H3_18 0x4D0114C
+
+#endif /* ASIC_REG_PMMU_HBW_STLB_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h
new file mode 100644
index 000000000000..dd12793734b4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PMMU_PIF_REGS_H_
+#define ASIC_REG_PMMU_PIF_REGS_H_
+
+/*
+ *****************************************
+ * PMMU_PIF
+ * (Prototype: PIF)
+ *****************************************
+ */
+
+#define mmPMMU_PIF_WR_CORE_CREDITS_THRESHOLD 0x4D03000
+
+#define mmPMMU_PIF_RD_CORE_CREDITS_THRESHOLD 0x4D03004
+
+#define mmPMMU_PIF_CORE_CREDITS_THRESHOLD 0x4D03008
+
+#define mmPMMU_PIF_CORE_SEPARATION_DISABLE 0x4D0300C
+
+#define mmPMMU_PIF_DISABLE_E2E_CREDITS 0x4D03010
+
+#define mmPMMU_PIF_RATE_LIMITER_ENABLE 0x4D03014
+
+#define mmPMMU_PIF_RATE_LIMITER_TOKEN_RESET 0x4D03018
+
+#define mmPMMU_PIF_RATE_LIMITER_SATURATION 0x4D0301C
+
+#define mmPMMU_PIF_RATE_LIMITER_TIMEOUT_LSB 0x4D03020
+
+#define mmPMMU_PIF_RATE_LIMITER_TIMEOUT_MSB 0x4D03024
+
+#define mmPMMU_PIF_ARB_TYPE 0x4D03028
+
+#define mmPMMU_PIF_CLOCK_GATE_CONFIG 0x4D0302C
+
+#define mmPMMU_PIF_CLOCK_GATE_ACTIVE 0x4D03030
+
+#define mmPMMU_PIF_SPI_INTERRUPT_CAUSE 0x4D03034
+
+#define mmPMMU_PIF_SPI_INTERRUPT_CAUSE_MASK 0x4D03038
+
+#define mmPMMU_PIF_SPI_INTERRUPT_REG 0x4D0303C
+
+#define mmPMMU_PIF_SPI_INTERRUPT_MASK 0x4D03040
+
+#define mmPMMU_PIF_SEI_INTERRUPT_CAUSE 0x4D03044
+
+#define mmPMMU_PIF_SEI_INTERRUPT_CAUSE_MASK 0x4D03048
+
+#define mmPMMU_PIF_SEI_INTERRUPT_REG 0x4D0304C
+
+#define mmPMMU_PIF_SEI_INTERRUPT_MASK 0x4D03050
+
+#define mmPMMU_PIF_DEBUG_BUFFER_CNT_CTRL 0x4D03054
+
+#define mmPMMU_PIF_DEBUG_WR_BUF_CNT 0x4D03058
+
+#define mmPMMU_PIF_DEBUG_RD_BUF_CNT 0x4D0305C
+
+#define mmPMMU_PIF_DEBUG_WR_CORE_BUF_CNT 0x4D03060
+
+#define mmPMMU_PIF_DEBUG_RD_CORE_BUF_CNT 0x4D03070
+
+#define mmPMMU_PIF_DEBUG_WR_BUF_FULL 0x4D03080
+
+#define mmPMMU_PIF_DEBUG_RD_BUF_FULL 0x4D03084
+
+#define mmPMMU_PIF_E2E_ROUTING_CFG 0x4D03090
+
+#define mmPMMU_PIF_E2E_ROUTING_CFG2 0x4D03094
+
+#define mmPMMU_PIF_SPI_INTERRUPT_CLEAR 0x4D03100
+
+#define mmPMMU_PIF_SEI_INTERRUPT_CLEAR 0x4D03104
+
+#define mmPMMU_PIF_BASE_ADDR_PMMU 0x4D03200
+
+#define mmPMMU_PIF_ADDR_MASK_PMMU 0x4D03204
+
+#define mmPMMU_PIF_BASE_ADDR_PCI0 0x4D03208
+
+#define mmPMMU_PIF_ADDR_MASK_PCI0 0x4D0320C
+
+#define mmPMMU_PIF_BASE_ADDR_PCI2 0x4D03210
+
+#define mmPMMU_PIF_ADDR_MASK_PCI1 0x4D03214
+
+#define mmPMMU_PIF_BASE_ADDR_PCI1 0x4D03218
+
+#define mmPMMU_PIF_ADDR_MASK_PCI2 0x4D0321C
+
+#define mmPMMU_PIF_BASE_ADDR_TPC 0x4D03220
+
+#define mmPMMU_PIF_ADDR_MASK_TPC 0x4D03224
+
+#define mmPMMU_PIF_BASE_ADDR_DEC0 0x4D03228
+
+#define mmPMMU_PIF_ADDR_MASK_DEC0 0x4D0322C
+
+#define mmPMMU_PIF_BASE_ADDR_DEC1 0x4D03230
+
+#define mmPMMU_PIF_ADDR_MASK_DEC1 0x4D03234
+
+#define mmPMMU_PIF_PMMU_DBG_BASE_ADDR 0x4D03300
+
+#define mmPMMU_PIF_PMMU_DBG_ADDR_MASK 0x4D03304
+
+#define mmPMMU_PIF_PCI_DBG_BASE_ADDR 0x4D03308
+
+#define mmPMMU_PIF_PCI_DBG_ADDR_MASK 0x4D0330C
+
+#define mmPMMU_PIF_DEC0_DBG_BASE_ADDR 0x4D03310
+
+#define mmPMMU_PIF_DEC0_DBG_ADDR_MASK 0x4D03314
+
+#define mmPMMU_PIF_DEC1_DBG_BASE_ADDR 0x4D03318
+
+#define mmPMMU_PIF_DEC1_DBG_ADDR_MASK 0x4D0331C
+
+#define mmPMMU_PIF_TPC_DBG_BASE_ADDR 0x4D03320
+
+#define mmPMMU_PIF_TPC_DBG_ADDR_MASK 0x4D03324
+
+#endif /* ASIC_REG_PMMU_PIF_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h
new file mode 100644
index 000000000000..42e67c1059c4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_ETR_MASKS_H_
+#define ASIC_REG_PSOC_ETR_MASKS_H_
+
+/*
+ *****************************************
+ * PSOC_ETR
+ * (Prototype: ETR)
+ *****************************************
+ */
+
+/* PSOC_ETR_RSZ */
+#define PSOC_ETR_RSZ_RSZ_ETR_SHIFT 0
+#define PSOC_ETR_RSZ_RSZ_ETR_MASK 0x7FFFFFFF
+
+/* PSOC_ETR_STS */
+#define PSOC_ETR_STS_FULL_SHIFT 0
+#define PSOC_ETR_STS_FULL_MASK 0x1
+#define PSOC_ETR_STS_TRIGGERED_SHIFT 1
+#define PSOC_ETR_STS_TRIGGERED_MASK 0x2
+#define PSOC_ETR_STS_TMCREADY_SHIFT 2
+#define PSOC_ETR_STS_TMCREADY_MASK 0x4
+#define PSOC_ETR_STS_FTEMPTY_SHIFT 3
+#define PSOC_ETR_STS_FTEMPTY_MASK 0x8
+#define PSOC_ETR_STS_EMPTY_SHIFT 4
+#define PSOC_ETR_STS_EMPTY_MASK 0x10
+#define PSOC_ETR_STS_MEMERR_SHIFT 5
+#define PSOC_ETR_STS_MEMERR_MASK 0x20
+
+/* PSOC_ETR_RRD */
+#define PSOC_ETR_RRD_RRD_SHIFT 0
+#define PSOC_ETR_RRD_RRD_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_RRP */
+#define PSOC_ETR_RRP_RRP_SHIFT 0
+#define PSOC_ETR_RRP_RRP_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_RWP */
+#define PSOC_ETR_RWP_RWP_SHIFT 0
+#define PSOC_ETR_RWP_RWP_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_TRG */
+#define PSOC_ETR_TRG_TRG_SHIFT 0
+#define PSOC_ETR_TRG_TRG_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_CTL */
+#define PSOC_ETR_CTL_TRACECAPTEN_SHIFT 0
+#define PSOC_ETR_CTL_TRACECAPTEN_MASK 0x1
+
+/* PSOC_ETR_RWD */
+#define PSOC_ETR_RWD_RWD_SHIFT 0
+#define PSOC_ETR_RWD_RWD_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_MODE */
+#define PSOC_ETR_MODE_MODE_SHIFT 0
+#define PSOC_ETR_MODE_MODE_MASK 0x3
+
+/* PSOC_ETR_LBUFLEVEL */
+#define PSOC_ETR_LBUFLEVEL_LBUFLEVEL_SHIFT 0
+#define PSOC_ETR_LBUFLEVEL_LBUFLEVEL_MASK 0x7FFFFFFF
+
+/* PSOC_ETR_CBUFLEVEL */
+#define PSOC_ETR_CBUFLEVEL_CBUFLEVEL_SHIFT 0
+#define PSOC_ETR_CBUFLEVEL_CBUFLEVEL_MASK 0x7FFFFFFF
+
+/* PSOC_ETR_BUFWM */
+#define PSOC_ETR_BUFWM_BUFWM_SHIFT 0
+#define PSOC_ETR_BUFWM_BUFWM_MASK 0x3FFFFFFF
+
+/* PSOC_ETR_RRPHI */
+#define PSOC_ETR_RRPHI_RRPHI_SHIFT 0
+#define PSOC_ETR_RRPHI_RRPHI_MASK 0xFF
+
+/* PSOC_ETR_RWPHI */
+#define PSOC_ETR_RWPHI_RWPHI_SHIFT 0
+#define PSOC_ETR_RWPHI_RWPHI_MASK 0xFF
+
+/* PSOC_ETR_AXICTL */
+#define PSOC_ETR_AXICTL_PROTCTRLBIT0_SHIFT 0
+#define PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK 0x1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK 0x2
+#define PSOC_ETR_AXICTL_CACHECTRLBIT0_SHIFT 2
+#define PSOC_ETR_AXICTL_CACHECTRLBIT0_MASK 0x4
+#define PSOC_ETR_AXICTL_CACHECTRLBIT1_SHIFT 3
+#define PSOC_ETR_AXICTL_CACHECTRLBIT1_MASK 0x8
+#define PSOC_ETR_AXICTL_CACHECTRLBIT2_SHIFT 4
+#define PSOC_ETR_AXICTL_CACHECTRLBIT2_MASK 0x10
+#define PSOC_ETR_AXICTL_CACHECTRLBIT3_SHIFT 5
+#define PSOC_ETR_AXICTL_CACHECTRLBIT3_MASK 0x20
+#define PSOC_ETR_AXICTL_SCATTERGATHERMODE_SHIFT 7
+#define PSOC_ETR_AXICTL_SCATTERGATHERMODE_MASK 0x80
+#define PSOC_ETR_AXICTL_WRBURSTLEN_SHIFT 8
+#define PSOC_ETR_AXICTL_WRBURSTLEN_MASK 0xF00
+
+/* PSOC_ETR_DBALO */
+#define PSOC_ETR_DBALO_BUFADDRLO_SHIFT 0
+#define PSOC_ETR_DBALO_BUFADDRLO_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_DBAHI */
+#define PSOC_ETR_DBAHI_BUFADDRHI_SHIFT 0
+#define PSOC_ETR_DBAHI_BUFADDRHI_MASK 0xFF
+
+/* PSOC_ETR_FFSR */
+#define PSOC_ETR_FFSR_FLINPROG_SHIFT 0
+#define PSOC_ETR_FFSR_FLINPROG_MASK 0x1
+#define PSOC_ETR_FFSR_FTSTOPPED_SHIFT 1
+#define PSOC_ETR_FFSR_FTSTOPPED_MASK 0x2
+
+/* PSOC_ETR_FFCR */
+#define PSOC_ETR_FFCR_ENFT_SHIFT 0
+#define PSOC_ETR_FFCR_ENFT_MASK 0x1
+#define PSOC_ETR_FFCR_ENTI_SHIFT 1
+#define PSOC_ETR_FFCR_ENTI_MASK 0x2
+#define PSOC_ETR_FFCR_FONFLIN_SHIFT 4
+#define PSOC_ETR_FFCR_FONFLIN_MASK 0x10
+#define PSOC_ETR_FFCR_FONTRIGEVT_SHIFT 5
+#define PSOC_ETR_FFCR_FONTRIGEVT_MASK 0x20
+#define PSOC_ETR_FFCR_FLUSHMAN_SHIFT 6
+#define PSOC_ETR_FFCR_FLUSHMAN_MASK 0x40
+#define PSOC_ETR_FFCR_TRIGONTRIGIN_SHIFT 8
+#define PSOC_ETR_FFCR_TRIGONTRIGIN_MASK 0x100
+#define PSOC_ETR_FFCR_TRIGONTRIGEVT_SHIFT 9
+#define PSOC_ETR_FFCR_TRIGONTRIGEVT_MASK 0x200
+#define PSOC_ETR_FFCR_TRIGONFL_SHIFT 10
+#define PSOC_ETR_FFCR_TRIGONFL_MASK 0x400
+#define PSOC_ETR_FFCR_STOPONFL_SHIFT 12
+#define PSOC_ETR_FFCR_STOPONFL_MASK 0x1000
+#define PSOC_ETR_FFCR_STOPONTRIGEVT_SHIFT 13
+#define PSOC_ETR_FFCR_STOPONTRIGEVT_MASK 0x2000
+
+/* PSOC_ETR_PSCR */
+#define PSOC_ETR_PSCR_PSCOUNT_SHIFT 0
+#define PSOC_ETR_PSCR_PSCOUNT_MASK 0x1F
+
+/* PSOC_ETR_ITMISCOP0 */
+#define PSOC_ETR_ITMISCOP0_ACQCOMP_SHIFT 0
+#define PSOC_ETR_ITMISCOP0_ACQCOMP_MASK 0x1
+#define PSOC_ETR_ITMISCOP0_FULL_SHIFT 1
+#define PSOC_ETR_ITMISCOP0_FULL_MASK 0x2
+
+/* PSOC_ETR_ITTRFLIN */
+#define PSOC_ETR_ITTRFLIN_TRIGIN_SHIFT 0
+#define PSOC_ETR_ITTRFLIN_TRIGIN_MASK 0x1
+#define PSOC_ETR_ITTRFLIN_FLUSHIN_SHIFT 1
+#define PSOC_ETR_ITTRFLIN_FLUSHIN_MASK 0x2
+
+/* PSOC_ETR_ITATBDATA0 */
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT0_SHIFT 0
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT0_MASK 0x1
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT7_SHIFT 1
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT7_MASK 0x2
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT15_SHIFT 2
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT15_MASK 0x4
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT23_SHIFT 3
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT23_MASK 0x8
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT31_SHIFT 4
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT31_MASK 0x10
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT39_SHIFT 5
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT39_MASK 0x20
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT47_SHIFT 6
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT47_MASK 0x40
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT55_SHIFT 7
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT55_MASK 0x80
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT63_SHIFT 8
+#define PSOC_ETR_ITATBDATA0_ATDATASBIT63_MASK 0x100
+
+/* PSOC_ETR_ITATBCTR2 */
+#define PSOC_ETR_ITATBCTR2_ATREADYS_SHIFT 0
+#define PSOC_ETR_ITATBCTR2_ATREADYS_MASK 0x1
+#define PSOC_ETR_ITATBCTR2_AFVALIDS_SHIFT 1
+#define PSOC_ETR_ITATBCTR2_AFVALIDS_MASK 0x2
+#define PSOC_ETR_ITATBCTR2_SYNCREQS_SHIFT 2
+#define PSOC_ETR_ITATBCTR2_SYNCREQS_MASK 0x4
+
+/* PSOC_ETR_ITATBCTR1 */
+#define PSOC_ETR_ITATBCTR1_ATIDS_SHIFT 0
+#define PSOC_ETR_ITATBCTR1_ATIDS_MASK 0x7F
+
+/* PSOC_ETR_ITATBCTR0 */
+#define PSOC_ETR_ITATBCTR0_ATVALIDS_SHIFT 0
+#define PSOC_ETR_ITATBCTR0_ATVALIDS_MASK 0x1
+#define PSOC_ETR_ITATBCTR0_AFREADYS_SHIFT 1
+#define PSOC_ETR_ITATBCTR0_AFREADYS_MASK 0x2
+#define PSOC_ETR_ITATBCTR0_ATBYTESS_SHIFT 8
+#define PSOC_ETR_ITATBCTR0_ATBYTESS_MASK 0x700
+
+/* PSOC_ETR_ITCTRL */
+#define PSOC_ETR_ITCTRL_INTEGRATION_MODE_SHIFT 0
+#define PSOC_ETR_ITCTRL_INTEGRATION_MODE_MASK 0x1
+
+/* PSOC_ETR_CLAIMSET */
+#define PSOC_ETR_CLAIMSET_CLAIMSET_SHIFT 0
+#define PSOC_ETR_CLAIMSET_CLAIMSET_MASK 0xF
+
+/* PSOC_ETR_CLAIMCLR */
+#define PSOC_ETR_CLAIMCLR_CLAIMCLR_SHIFT 0
+#define PSOC_ETR_CLAIMCLR_CLAIMCLR_MASK 0xF
+
+/* PSOC_ETR_LAR */
+#define PSOC_ETR_LAR_ACCESS_W_SHIFT 0
+#define PSOC_ETR_LAR_ACCESS_W_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_LSR */
+#define PSOC_ETR_LSR_LOCKEXIST_SHIFT 0
+#define PSOC_ETR_LSR_LOCKEXIST_MASK 0x1
+#define PSOC_ETR_LSR_LOCKGRANT_SHIFT 1
+#define PSOC_ETR_LSR_LOCKGRANT_MASK 0x2
+#define PSOC_ETR_LSR_LOCKTYPE_SHIFT 2
+#define PSOC_ETR_LSR_LOCKTYPE_MASK 0x4
+
+/* PSOC_ETR_AUTHSTATUS */
+#define PSOC_ETR_AUTHSTATUS_NSID_SHIFT 0
+#define PSOC_ETR_AUTHSTATUS_NSID_MASK 0x3
+#define PSOC_ETR_AUTHSTATUS_NSNID_SHIFT 2
+#define PSOC_ETR_AUTHSTATUS_NSNID_MASK 0xC
+#define PSOC_ETR_AUTHSTATUS_SID_SHIFT 4
+#define PSOC_ETR_AUTHSTATUS_SID_MASK 0x30
+#define PSOC_ETR_AUTHSTATUS_SNID_SHIFT 6
+#define PSOC_ETR_AUTHSTATUS_SNID_MASK 0xC0
+
+/* PSOC_ETR_DEVID */
+#define PSOC_ETR_DEVID_ATBINPORTCOUNT_SHIFT 0
+#define PSOC_ETR_DEVID_ATBINPORTCOUNT_MASK 0x1F
+#define PSOC_ETR_DEVID_CLKSCHEME_SHIFT 5
+#define PSOC_ETR_DEVID_CLKSCHEME_MASK 0x20
+#define PSOC_ETR_DEVID_CONFIGTYPE_SHIFT 6
+#define PSOC_ETR_DEVID_CONFIGTYPE_MASK 0xC0
+#define PSOC_ETR_DEVID_MEMWIDTH_SHIFT 8
+#define PSOC_ETR_DEVID_MEMWIDTH_MASK 0x700
+#define PSOC_ETR_DEVID_WBUF_DEPTH_SHIFT 11
+#define PSOC_ETR_DEVID_WBUF_DEPTH_MASK 0x3800
+
+/* PSOC_ETR_DEVTYPE */
+#define PSOC_ETR_DEVTYPE_MAJOR_TYPE_SHIFT 0
+#define PSOC_ETR_DEVTYPE_MAJOR_TYPE_MASK 0xF
+#define PSOC_ETR_DEVTYPE_SUB_TYPE_SHIFT 4
+#define PSOC_ETR_DEVTYPE_SUB_TYPE_MASK 0xF0
+
+/* PSOC_ETR_PERIPHID4 */
+#define PSOC_ETR_PERIPHID4_JEP106_CONT_SHIFT 0
+#define PSOC_ETR_PERIPHID4_JEP106_CONT_MASK 0xF
+#define PSOC_ETR_PERIPHID4_FOURKB_COUNT_SHIFT 4
+#define PSOC_ETR_PERIPHID4_FOURKB_COUNT_MASK 0xF0
+
+/* PSOC_ETR_PERIPHID5 */
+#define PSOC_ETR_PERIPHID5_PERIPHID5_SHIFT 0
+#define PSOC_ETR_PERIPHID5_PERIPHID5_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_PERIPHID6 */
+#define PSOC_ETR_PERIPHID6_PERIPHID6_SHIFT 0
+#define PSOC_ETR_PERIPHID6_PERIPHID6_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_PERIPHID7 */
+#define PSOC_ETR_PERIPHID7_PERIPHID7_SHIFT 0
+#define PSOC_ETR_PERIPHID7_PERIPHID7_MASK 0xFFFFFFFF
+
+/* PSOC_ETR_PERIPHID0 */
+#define PSOC_ETR_PERIPHID0_PART_NUMBER_BITS7TO0_SHIFT 0
+#define PSOC_ETR_PERIPHID0_PART_NUMBER_BITS7TO0_MASK 0xFF
+
+/* PSOC_ETR_PERIPHID1 */
+#define PSOC_ETR_PERIPHID1_PART_NUMBER_BITS11TO8_SHIFT 0
+#define PSOC_ETR_PERIPHID1_PART_NUMBER_BITS11TO8_MASK 0xF
+#define PSOC_ETR_PERIPHID1_JEP106_BITS3TO0_SHIFT 4
+#define PSOC_ETR_PERIPHID1_JEP106_BITS3TO0_MASK 0xF0
+
+/* PSOC_ETR_PERIPHID2 */
+#define PSOC_ETR_PERIPHID2_JEP106_BITS6TO4_SHIFT 0
+#define PSOC_ETR_PERIPHID2_JEP106_BITS6TO4_MASK 0x7
+#define PSOC_ETR_PERIPHID2_JEDEC_SHIFT 3
+#define PSOC_ETR_PERIPHID2_JEDEC_MASK 0x8
+#define PSOC_ETR_PERIPHID2_REVISION_SHIFT 4
+#define PSOC_ETR_PERIPHID2_REVISION_MASK 0xF0
+
+/* PSOC_ETR_PERIPHID3 */
+#define PSOC_ETR_PERIPHID3_CUSTOMER_MODIFIED_SHIFT 0
+#define PSOC_ETR_PERIPHID3_CUSTOMER_MODIFIED_MASK 0xF
+#define PSOC_ETR_PERIPHID3_REVAND_SHIFT 4
+#define PSOC_ETR_PERIPHID3_REVAND_MASK 0xF0
+
+/* PSOC_ETR_COMPID0 */
+#define PSOC_ETR_COMPID0_PREAMBLE_SHIFT 0
+#define PSOC_ETR_COMPID0_PREAMBLE_MASK 0xFF
+
+/* PSOC_ETR_COMPID1 */
+#define PSOC_ETR_COMPID1_PREAMBLE_SHIFT 0
+#define PSOC_ETR_COMPID1_PREAMBLE_MASK 0xF
+#define PSOC_ETR_COMPID1_F_CLASS_SHIFT 4
+#define PSOC_ETR_COMPID1_F_CLASS_MASK 0xF0
+
+/* PSOC_ETR_COMPID2 */
+#define PSOC_ETR_COMPID2_PREAMBLE_SHIFT 0
+#define PSOC_ETR_COMPID2_PREAMBLE_MASK 0xFF
+
+/* PSOC_ETR_COMPID3 */
+#define PSOC_ETR_COMPID3_PREAMBLE_SHIFT 0
+#define PSOC_ETR_COMPID3_PREAMBLE_MASK 0xFF
+
+#endif /* ASIC_REG_PSOC_ETR_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h
new file mode 100644
index 000000000000..980a3e0054c5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_ETR_REGS_H_
+#define ASIC_REG_PSOC_ETR_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_ETR
+ * (Prototype: ETR)
+ *****************************************
+ */
+
+#define mmPSOC_ETR_RSZ 0x6C44004
+
+#define mmPSOC_ETR_STS 0x6C4400C
+
+#define mmPSOC_ETR_RRD 0x6C44010
+
+#define mmPSOC_ETR_RRP 0x6C44014
+
+#define mmPSOC_ETR_RWP 0x6C44018
+
+#define mmPSOC_ETR_TRG 0x6C4401C
+
+#define mmPSOC_ETR_CTL 0x6C44020
+
+#define mmPSOC_ETR_RWD 0x6C44024
+
+#define mmPSOC_ETR_MODE 0x6C44028
+
+#define mmPSOC_ETR_LBUFLEVEL 0x6C4402C
+
+#define mmPSOC_ETR_CBUFLEVEL 0x6C44030
+
+#define mmPSOC_ETR_BUFWM 0x6C44034
+
+#define mmPSOC_ETR_RRPHI 0x6C44038
+
+#define mmPSOC_ETR_RWPHI 0x6C4403C
+
+#define mmPSOC_ETR_AXICTL 0x6C44110
+
+#define mmPSOC_ETR_DBALO 0x6C44118
+
+#define mmPSOC_ETR_DBAHI 0x6C4411C
+
+#define mmPSOC_ETR_FFSR 0x6C44300
+
+#define mmPSOC_ETR_FFCR 0x6C44304
+
+#define mmPSOC_ETR_PSCR 0x6C44308
+
+#define mmPSOC_ETR_ITMISCOP0 0x6C44EE0
+
+#define mmPSOC_ETR_ITTRFLIN 0x6C44EE8
+
+#define mmPSOC_ETR_ITATBDATA0 0x6C44EEC
+
+#define mmPSOC_ETR_ITATBCTR2 0x6C44EF0
+
+#define mmPSOC_ETR_ITATBCTR1 0x6C44EF4
+
+#define mmPSOC_ETR_ITATBCTR0 0x6C44EF8
+
+#define mmPSOC_ETR_ITCTRL 0x6C44F00
+
+#define mmPSOC_ETR_CLAIMSET 0x6C44FA0
+
+#define mmPSOC_ETR_CLAIMCLR 0x6C44FA4
+
+#define mmPSOC_ETR_LAR 0x6C44FB0
+
+#define mmPSOC_ETR_LSR 0x6C44FB4
+
+#define mmPSOC_ETR_AUTHSTATUS 0x6C44FB8
+
+#define mmPSOC_ETR_DEVID 0x6C44FC8
+
+#define mmPSOC_ETR_DEVTYPE 0x6C44FCC
+
+#define mmPSOC_ETR_PERIPHID4 0x6C44FD0
+
+#define mmPSOC_ETR_PERIPHID5 0x6C44FD4
+
+#define mmPSOC_ETR_PERIPHID6 0x6C44FD8
+
+#define mmPSOC_ETR_PERIPHID7 0x6C44FDC
+
+#define mmPSOC_ETR_PERIPHID0 0x6C44FE0
+
+#define mmPSOC_ETR_PERIPHID1 0x6C44FE4
+
+#define mmPSOC_ETR_PERIPHID2 0x6C44FE8
+
+#define mmPSOC_ETR_PERIPHID3 0x6C44FEC
+
+#define mmPSOC_ETR_COMPID0 0x6C44FF0
+
+#define mmPSOC_ETR_COMPID1 0x6C44FF4
+
+#define mmPSOC_ETR_COMPID2 0x6C44FF8
+
+#define mmPSOC_ETR_COMPID3 0x6C44FFC
+
+#endif /* ASIC_REG_PSOC_ETR_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h
new file mode 100644
index 000000000000..9be3d656da3a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h
@@ -0,0 +1,1406 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF
+ * (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+/* PSOC_GLOBAL_CONF_NON_RST_FLOPS */
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_PCI_FW_FSM */
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_MNL_RST_IND_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_MNL_RST_IND_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_WD_RST_IND_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_WD_RST_IND_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_SW_RST_IND_SHIFT 6
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_SW_RST_IND_MASK 0x40
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_SOFT_RST_IND_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_SOFT_RST_IND_MASK 0x80
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_PRST_RST_IND_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_PRST_RST_IND_MASK 0x100
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_FLR_RST_IND_SHIFT 9
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_FLR_RST_IND_MASK 0x200
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_FW_RST_IND_SHIFT 10
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_FW_RST_IND_MASK 0x400
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_ECC_DERR_RST_IND_SHIFT 11
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_ECC_DERR_RST_IND_MASK 0x800
+
+/* PSOC_GLOBAL_CONF_BTM_FSM */
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK 0x1F
+
+/* PSOC_GLOBAL_CONF_BTL_ROM_DELAY */
+#define PSOC_GLOBAL_CONF_BTL_ROM_DELAY_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_ROM_DELAY_VAL_MASK 0xFFFF
+
+/* PSOC_GLOBAL_CONF_SW_BTM_FSM */
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_MASK 0x1F
+
+/* PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_MASK 0x1F
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_QSPI_SPI */
+#define PSOC_GLOBAL_CONF_QSPI_SPI_SEL_SHIFT 0
+#define PSOC_GLOBAL_CONF_QSPI_SPI_SEL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SPI_MEM_EN */
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SPI_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SPI_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_QSPI_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_QSPI_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_PRSTN */
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_EN */
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR */
+#define PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SPI_IMG_STS */
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SPI_PRI_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SPI_PRI_MASK 0x3
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SPI_SEC_SHIFT 2
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SPI_SEC_MASK 0xC
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_PRI_SHIFT 4
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_PRI_MASK 0x30
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SEC_SHIFT 6
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SEC_MASK 0xC0
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCIE_PRI_SHIFT 8
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCIE_PRI_MASK 0x300
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCIE_SEC_SHIFT 10
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCIE_SEC_MASK 0xC00
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRE_SPI_PRI_SHIFT 12
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRE_SPI_PRI_MASK 0x3000
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRE_PRSTN_PRI_SHIFT 14
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRE_PRSTN_PRI_MASK 0xC000
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRE_SPI_SEC_SHIFT 16
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRE_SPI_SEC_MASK 0x30000
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRE_PRSTN_SEC_SHIFT 18
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRE_PRSTN_SEC_MASK 0xC0000
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_MASK 0x4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_SHIFT 3
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_MASK 0x8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_SHIFT 6
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_MASK 0x40
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_MASK 0x80
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_MASK 0x100
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD_DONE_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_QSPI_SPI_BOOTSEQ_RST */
+#define PSOC_GLOBAL_CONF_QSPI_SPI_BOOTSEQ_RST_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_QSPI_SPI_BOOTSEQ_RST_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PHY_STABLE */
+#define PSOC_GLOBAL_CONF_PHY_STABLE_PRSTN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PHY_STABLE_PRSTN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN_OVR */
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_VAL_SHIFT 4
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_VAL_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_ETR_FLUSH */
+#define PSOC_GLOBAL_CONF_ETR_FLUSH_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_ETR_FLUSH_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ANY_RST */
+#define PSOC_GLOBAL_CONF_ANY_RST_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_ANY_RST_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_COLD_RST_FLOPS */
+#define PSOC_GLOBAL_CONF_COLD_RST_FLOPS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_COLD_RST_FLOPS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_DIS_RAZWI_ERR */
+#define PSOC_GLOBAL_CONF_DIS_RAZWI_ERR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_DIS_RAZWI_ERR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_PHY_RST_N */
+#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_CLK_DIS_SHIFT 16
+#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_CLK_DIS_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_RAZWI_INTERRUPT */
+#define PSOC_GLOBAL_CONF_RAZWI_INTERRUPT_INTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_RAZWI_INTERRUPT_INTR_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RAZWI_MASK_INFO */
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK 0x1
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_SHIFT 1
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK 0x2
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_SHIFT 2
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK 0x4
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_SHIFT 4
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK 0x3FF0
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_SHIFT 16
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK 0xFFFF0000
+
+/* PSOC_GLOBAL_CONF_BTL_PROT */
+#define PSOC_GLOBAL_CONF_BTL_PROT_AR_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_PROT_AR_MASK 0x7
+#define PSOC_GLOBAL_CONF_BTL_PROT_AW_SHIFT 4
+#define PSOC_GLOBAL_CONF_BTL_PROT_AW_MASK 0x70
+
+/* PSOC_GLOBAL_CONF_BTL_ADDR_EXT */
+#define PSOC_GLOBAL_CONF_BTL_ADDR_EXT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_ADDR_EXT_VAL_MASK 0xFFFFF
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_TO */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TO_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TO_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RESET_DELAYS */
+#define PSOC_GLOBAL_CONF_RESET_DELAYS_PRE_RESET_SHIFT 0
+#define PSOC_GLOBAL_CONF_RESET_DELAYS_PRE_RESET_MASK 0xFFFF
+#define PSOC_GLOBAL_CONF_RESET_DELAYS_GRAD_RESET_SHIFT 16
+#define PSOC_GLOBAL_CONF_RESET_DELAYS_GRAD_RESET_MASK 0xFFFF0000
+
+/* PSOC_GLOBAL_CONF_SCRATCHPAD */
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SEMAPHORE */
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_CPU_BOOT_STATUS */
+#define PSOC_GLOBAL_CONF_CPU_BOOT_STATUS_CNTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_CPU_BOOT_STATUS_CNTR_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU */
+#define PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPL_SOURCE */
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_I2C_MSTR1_DBG */
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_MASK 0x1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_SHIFT 1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_MASK 0x2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_SHIFT 2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_MASK 0x4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_SHIFT 3
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_MASK 0x8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_SHIFT 4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_MASK 0x10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_SHIFT 5
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_MASK 0x20
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_SHIFT 6
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_MASK 0x40
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_SHIFT 7
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_MASK 0x80
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_SHIFT 8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_MASK 0x100
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_SHIFT 9
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_MASK 0x200
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_SHIFT 10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_MASK 0x7C00
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_SHIFT 15
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_MASK 0x78000
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_SHIFT 19
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_MASK 0x80000
+
+/* PSOC_GLOBAL_CONF_I2C_SLV */
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK */
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_INTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_INTR_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_TRACE_ADDR */
+#define PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK 0xFFFFFF
+
+/* PSOC_GLOBAL_CONF_SMB_ALERT_CTRL */
+#define PSOC_GLOBAL_CONF_SMB_ALERT_CTRL_I2C_M0_ALERT_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_SMB_ALERT_CTRL_I2C_M0_ALERT_MASK_MASK 0xFF
+#define PSOC_GLOBAL_CONF_SMB_ALERT_CTRL_I2C_M1_ALERT_MASK_SHIFT 8
+#define PSOC_GLOBAL_CONF_SMB_ALERT_CTRL_I2C_M1_ALERT_MASK_MASK 0xFF00
+#define PSOC_GLOBAL_CONF_SMB_ALERT_CTRL_I2C_SLV_ALERT_MASK_SHIFT 16
+#define PSOC_GLOBAL_CONF_SMB_ALERT_CTRL_I2C_SLV_ALERT_MASK_MASK 0xFF0000
+
+/* PSOC_GLOBAL_CONF_SMB_ALERT_INTR_CAUSE */
+#define PSOC_GLOBAL_CONF_SMB_ALERT_INTR_CAUSE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SMB_ALERT_INTR_CAUSE_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CLEAR */
+#define PSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CLEAR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CLEAR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CTRL */
+#define PSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CTRL_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CTRL_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CTRL_MASK_SHIFT 4
+#define PSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CTRL_MASK_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_TRACE_AXPROT */
+#define PSOC_GLOBAL_CONF_TRACE_AXPROT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_AXPROT_VAL_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_TRACE_AWUSER */
+#define PSOC_GLOBAL_CONF_TRACE_AWUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TRACE_ARUSER */
+#define PSOC_GLOBAL_CONF_TRACE_ARUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_ARUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BTL_STS */
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_SHIFT 4
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_MASK 0x10
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_SHIFT 8
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_MASK 0xF00
+
+/* PSOC_GLOBAL_CONF_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_SHIFT 0
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_MASK 0x1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_SHIFT 1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_MASK 0x2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_SHIFT 2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_MASK 0x4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_SHIFT 3
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_MASK 0x8
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_SHIFT 4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_MASK 0x10
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_SHIFT 5
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_MASK 0x20
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_SHIFT 6
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_MASK 0x40
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_SHIFT 7
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_MASK 0x80
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_5_SHIFT 8
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_5_MASK 0x100
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_6_SHIFT 9
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_6_MASK 0x200
+
+/* PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_SHIFT 0
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_MASK 0x1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_SHIFT 1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_MASK 0x2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_SHIFT 2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_MASK 0x4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_SHIFT 3
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_MASK 0x8
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_SHIFT 4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_MASK 0x10
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_SHIFT 5
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_MASK 0x20
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_SHIFT 6
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_MASK 0x40
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_SHIFT 7
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_MASK 0x80
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_SHIFT 12
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_MASK 0x1000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_SHIFT 13
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_MASK 0x2000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_SHIFT 16
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_COMB_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_AXI_ERR_INTR */
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ARC_WD_INTR */
+#define PSOC_GLOBAL_CONF_ARC_WD_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_ARC_WD_INTR_IND_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_ARC_WD_INTR_MASK */
+#define PSOC_GLOBAL_CONF_ARC_WD_INTR_MASK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ARC_WD_INTR_MASK_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_DBG_APB_CTRL */
+#define PSOC_GLOBAL_CONF_DBG_APB_CTRL_SEL_SHIFT 0
+#define PSOC_GLOBAL_CONF_DBG_APB_CTRL_SEL_MASK 0x1
+#define PSOC_GLOBAL_CONF_DBG_APB_CTRL_VAL_SHIFT 1
+#define PSOC_GLOBAL_CONF_DBG_APB_CTRL_VAL_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_SPI_DMA_BAUDR */
+#define PSOC_GLOBAL_CONF_SPI_DMA_BAUDR_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_DMA_BAUDR_VAL_MASK 0xFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_DMA_AWPROT */
+#define PSOC_GLOBAL_CONF_SPI_DMA_AWPROT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_DMA_AWPROT_VAL_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_SPI_DMA_AWUSER */
+#define PSOC_GLOBAL_CONF_SPI_DMA_AWUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_DMA_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_DMA_CTRL */
+#define PSOC_GLOBAL_CONF_SPI_DMA_CTRL_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_DMA_CTRL_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_DMA_CTRL_DST_SRAM_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_DMA_CTRL_DST_SRAM_MASK 0x2
+#define PSOC_GLOBAL_CONF_SPI_DMA_CTRL_MEM_SIZE_SHIFT 4
+#define PSOC_GLOBAL_CONF_SPI_DMA_CTRL_MEM_SIZE_MASK 0x3FFF0
+#define PSOC_GLOBAL_CONF_SPI_DMA_CTRL_ADDR_SHIFT 18
+#define PSOC_GLOBAL_CONF_SPI_DMA_CTRL_ADDR_MASK 0xFFFC0000
+
+/* PSOC_GLOBAL_CONF_SPI_DMA_STATUS */
+#define PSOC_GLOBAL_CONF_SPI_DMA_STATUS_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_DMA_STATUS_DONE_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_DMA_STATUS_ERROR_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_DMA_STATUS_ERROR_MASK 0x2
+#define PSOC_GLOBAL_CONF_SPI_DMA_STATUS_COPIED_SHIFT 4
+#define PSOC_GLOBAL_CONF_SPI_DMA_STATUS_COPIED_MASK 0x3FFF0
+
+/* PSOC_GLOBAL_CONF_SPI_DMA_DST_ADDR_L */
+#define PSOC_GLOBAL_CONF_SPI_DMA_DST_ADDR_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_DMA_DST_ADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_DMA_DST_ADDR_H */
+#define PSOC_GLOBAL_CONF_SPI_DMA_DST_ADDR_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_DMA_DST_ADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL */
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_WEN_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_WEN_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_BYTE_SWAP_SHIFT 4
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_BYTE_SWAP_MASK 0x10
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_WRITE_CMD_SHIFT 8
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_WRITE_CMD_MASK 0xFF00
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_WREN_CMD_SHIFT 16
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_WREN_CMD_MASK 0xFF0000
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_WRDI_CMD_SHIFT 24
+#define PSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL_WRDI_CMD_MASK 0xFF000000
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_CTRL */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_CTRL_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_CTRL_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_CTRL_RESP_ERR_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_CTRL_RESP_ERR_MASK 0x2
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_CTRL_SE_RANGE_SEL_SHIFT 4
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_CTRL_SE_RANGE_SEL_MASK 0xFF0
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_RST_VAL_L */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_RST_VAL_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_RST_VAL_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_RST_VAL_H */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_RST_VAL_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_RST_VAL_H_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_VAL_L */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_VAL_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_VAL_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_VAL_H */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_VAL_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_VAL_H_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_QSPI_WR_WO_TIMER_VAL_L */
+#define PSOC_GLOBAL_CONF_QSPI_WR_WO_TIMER_VAL_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_QSPI_WR_WO_TIMER_VAL_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_QSPI_WR_WO_TIMER_VAL_H */
+#define PSOC_GLOBAL_CONF_QSPI_WR_WO_TIMER_VAL_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_QSPI_WR_WO_TIMER_VAL_H_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_SE_STATUS */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_SE_STATUS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_SE_STATUS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_QSPI_WR_WO_SE_STATUS */
+#define PSOC_GLOBAL_CONF_QSPI_WR_WO_SE_STATUS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_QSPI_WR_WO_SE_STATUS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_ERR_ADDR */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_ERR_ADDR_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_ERR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_QSPI_WR_WO_ERR_ADDR */
+#define PSOC_GLOBAL_CONF_QSPI_WR_WO_ERR_ADDR_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_QSPI_WR_WO_ERR_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_MASK */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_MASK_QSPI_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_MASK_QSPI_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_MASK_SPI_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_MASK_SPI_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CAUSE */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CAUSE_QSPI_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CAUSE_QSPI_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CAUSE_SPI_IND_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CAUSE_SPI_IND_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CLEAR */
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CLEAR_QSPI_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CLEAR_QSPI_VAL_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CLEAR_SPI_VAL_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CLEAR_SPI_VAL_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MSTR_IF */
+#define PSOC_GLOBAL_CONF_MSTR_IF_GRACEFULL_CLEAR_SHIFT 0
+#define PSOC_GLOBAL_CONF_MSTR_IF_GRACEFULL_CLEAR_MASK 0x1
+#define PSOC_GLOBAL_CONF_MSTR_IF_FORCE_BP_SHIFT 1
+#define PSOC_GLOBAL_CONF_MSTR_IF_FORCE_BP_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_TARGETID */
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_SHIFT 1
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_MASK 0xFFE
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_SHIFT 16
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_MASK 0xFFF0000
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_SHIFT 28
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_MASK 0xF0000000
+
+/* PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL */
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL_NUM_OS_RD_SHIFT 0
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL_NUM_OS_RD_MASK 0xFF
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL_NUM_OS_WR_SHIFT 8
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL_NUM_OS_WR_MASK 0xFF00
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL_FORCE_WR_BUF_SHIFT 16
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL_FORCE_WR_BUF_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2 */
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_AWCACHE_OVRD_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_AWCACHE_OVRD_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_AWCACHE_OVRD_VAL_SHIFT 4
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_AWCACHE_OVRD_VAL_MASK 0xF0
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_ARCACHE_OVRD_EN_SHIFT 8
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_ARCACHE_OVRD_EN_MASK 0x100
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_ARCACHE_OVRD_VAL_SHIFT 12
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_ARCACHE_OVRD_VAL_MASK 0xF000
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_NO_WR_INFLIGHT_SHIFT 16
+#define PSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_NO_WR_INFLIGHT_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE */
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L */
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_RSVD_0_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_RSVD_0_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_MEM_REPAIR_CFG_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_MEM_REPAIR_CFG_MASK 0xC
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_CPOL_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_CPOL_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_CPHA_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_CPHA_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_BTL_EN_SHIFT 6
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_BTL_EN_MASK 0x40
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_BTL_ROM_EN_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_BTL_ROM_EN_MASK 0x80
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_DUMP_SEL_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_DUMP_SEL_MASK 0x3FFF00
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_RSVD_1_SHIFT 22
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_RSVD_1_MASK 0x400000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_DUMP_DIS_SHIFT 23
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_DUMP_DIS_MASK 0x800000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_I2C_SHIFT 24
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_I2C_MASK 0x1F000000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_SPI_QSPI_SHIFT 29
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_SPI_QSPI_MASK 0x20000000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_CPU_PLL_CFG_SHIFT 30
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L_CPU_PLL_CFG_MASK 0xC0000000
+
+/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H */
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H_SECURITY_BYPASS_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H_SECURITY_BYPASS_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H_SRIS_MODE_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H_SRIS_MODE_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H_I2C_SLV_ADDR_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H_I2C_SLV_ADDR_MASK 0x7C
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H_RERERVED_STRAP_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H_RERERVED_STRAP_MASK 0x380
+
+/* PSOC_GLOBAL_CONF_LEGACY_BOOT_STRAPS */
+#define PSOC_GLOBAL_CONF_LEGACY_BOOT_STRAPS_PCIE_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_LEGACY_BOOT_STRAPS_PCIE_EN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_DIV */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_VAL_SHIFT 8
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_VAL_MASK 0xFF00
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_STS */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_FAIL_SHIFT 4
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_FAIL_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_OUTSTANT_TRANS */
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_SHIFT 0
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_MASK 0x1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_SHIFT 1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MASK_REQ */
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BSAC_CTRL */
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_ENABLE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_ENABLE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_HOLD_SHIFT 1
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_HOLD_MASK 0x2
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_DONE_SHIFT 4
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_DONE_MASK 0x10
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_STARTED_SHIFT 5
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_STARTED_MASK 0x20
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_APBERROR_SHIFT 6
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_APBERROR_MASK 0x40
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_FRF_SHIFT 8
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_FRF_MASK 0x300
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_TMOD_SHIFT 10
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_TMOD_MASK 0xC00
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_SPI_FRF_SHIFT 12
+#define PSOC_GLOBAL_CONF_BSAC_CTRL_SPI_FRF_MASK 0x3000
+
+/* PSOC_GLOBAL_CONF_BSAC_ADDR */
+#define PSOC_GLOBAL_CONF_BSAC_ADDR_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BSAC_ADDR_VAL_MASK 0xFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BSAC_DATA */
+#define PSOC_GLOBAL_CONF_BSAC_DATA_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BSAC_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BSAC_POLLING_CTRL */
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_CTRL_ADDR_SHIFT 0
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_CTRL_ADDR_MASK 0xFFFFFFF
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_CTRL_ENABLE_SHIFT 28
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_CTRL_ENABLE_MASK 0x10000000
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_CTRL_DONE_SHIFT 29
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_CTRL_DONE_MASK 0x20000000
+
+/* PSOC_GLOBAL_CONF_BSAC_POLLING_DATA */
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_DATA_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BSAC_POLLING_MASK */
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_MASK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BSAC_POLLING_MASK_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BTL_IMG */
+#define PSOC_GLOBAL_CONF_BTL_IMG_SPI_IMAGE_FLIP_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_IMG_SPI_IMAGE_FLIP_MASK 0x1
+#define PSOC_GLOBAL_CONF_BTL_IMG_PRST_IMAGE_FLIP_SHIFT 1
+#define PSOC_GLOBAL_CONF_BTL_IMG_PRST_IMAGE_FLIP_MASK 0x2
+#define PSOC_GLOBAL_CONF_BTL_IMG_PCIE_IMAGE_FLIP_SHIFT 2
+#define PSOC_GLOBAL_CONF_BTL_IMG_PCIE_IMAGE_FLIP_MASK 0x4
+#define PSOC_GLOBAL_CONF_BTL_IMG_SW_RST_RUN_PCIE_IMAGE_SHIFT 4
+#define PSOC_GLOBAL_CONF_BTL_IMG_SW_RST_RUN_PCIE_IMAGE_MASK 0x10
+#define PSOC_GLOBAL_CONF_BTL_IMG_SOFT_RST_RUN_PCIE_IMAGE_SHIFT 5
+#define PSOC_GLOBAL_CONF_BTL_IMG_SOFT_RST_RUN_PCIE_IMAGE_MASK 0x20
+#define PSOC_GLOBAL_CONF_BTL_IMG_WD_RST_RUN_PCIE_IMAGE_SHIFT 6
+#define PSOC_GLOBAL_CONF_BTL_IMG_WD_RST_RUN_PCIE_IMAGE_MASK 0x40
+#define PSOC_GLOBAL_CONF_BTL_IMG_MNL_RST_RUN_PCIE_IMAGE_SHIFT 7
+#define PSOC_GLOBAL_CONF_BTL_IMG_MNL_RST_RUN_PCIE_IMAGE_MASK 0x80
+#define PSOC_GLOBAL_CONF_BTL_IMG_PRST_RUN_PCIE_IMAGE_SHIFT 8
+#define PSOC_GLOBAL_CONF_BTL_IMG_PRST_RUN_PCIE_IMAGE_MASK 0x100
+#define PSOC_GLOBAL_CONF_BTL_IMG_FLR_RST_RUN_PCIE_IMAGE_SHIFT 9
+#define PSOC_GLOBAL_CONF_BTL_IMG_FLR_RST_RUN_PCIE_IMAGE_MASK 0x200
+#define PSOC_GLOBAL_CONF_BTL_IMG_FW_RST_RUN_PCIE_IMAGE_SHIFT 10
+#define PSOC_GLOBAL_CONF_BTL_IMG_FW_RST_RUN_PCIE_IMAGE_MASK 0x400
+
+/* PSOC_GLOBAL_CONF_PRSTN_MASK */
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_WD_MASK */
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RST_SRC */
+#define PSOC_GLOBAL_CONF_RST_SRC_COLD_RST_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_RST_SRC_COLD_RST_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_RST_SRC_MNL_RST_IND_SHIFT 1
+#define PSOC_GLOBAL_CONF_RST_SRC_MNL_RST_IND_MASK 0x2
+#define PSOC_GLOBAL_CONF_RST_SRC_PRSTN_RST_IND_SHIFT 2
+#define PSOC_GLOBAL_CONF_RST_SRC_PRSTN_RST_IND_MASK 0x4
+#define PSOC_GLOBAL_CONF_RST_SRC_SOFT_RST_IND_SHIFT 3
+#define PSOC_GLOBAL_CONF_RST_SRC_SOFT_RST_IND_MASK 0x8
+#define PSOC_GLOBAL_CONF_RST_SRC_WD_RST_IND_SHIFT 4
+#define PSOC_GLOBAL_CONF_RST_SRC_WD_RST_IND_MASK 0x10
+#define PSOC_GLOBAL_CONF_RST_SRC_FW_RST_IND_SHIFT 5
+#define PSOC_GLOBAL_CONF_RST_SRC_FW_RST_IND_MASK 0x20
+#define PSOC_GLOBAL_CONF_RST_SRC_SW_RST_IND_SHIFT 6
+#define PSOC_GLOBAL_CONF_RST_SRC_SW_RST_IND_MASK 0x40
+#define PSOC_GLOBAL_CONF_RST_SRC_FLR_RST_IND_SHIFT 7
+#define PSOC_GLOBAL_CONF_RST_SRC_FLR_RST_IND_MASK 0x80
+#define PSOC_GLOBAL_CONF_RST_SRC_ECC_DERR_RST_IND_SHIFT 8
+#define PSOC_GLOBAL_CONF_RST_SRC_ECC_DERR_RST_IND_MASK 0x100
+
+/* PSOC_GLOBAL_CONF_BOOT_STATE */
+#define PSOC_GLOBAL_CONF_BOOT_STATE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_STATE_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RST_FROM_PCIE_CTRL */
+#define PSOC_GLOBAL_CONF_RST_FROM_PCIE_CTRL_SOFT_RST_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_RST_FROM_PCIE_CTRL_SOFT_RST_MASK_MASK 0x1
+#define PSOC_GLOBAL_CONF_RST_FROM_PCIE_CTRL_SW_RST_MASK_SHIFT 4
+#define PSOC_GLOBAL_CONF_RST_FROM_PCIE_CTRL_SW_RST_MASK_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_PAD_1V8_CFG */
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_PAD_3V3_CFG */
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_BNK3V3_MS */
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_TPC_ISO */
+#define PSOC_GLOBAL_CONF_TPC_ISO_ISO_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_TPC_ISO_ISO_EN_MASK 0x1FFFFFF
+
+/* PSOC_GLOBAL_CONF_VDEC_ISO */
+#define PSOC_GLOBAL_CONF_VDEC_ISO_ISO_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_VDEC_ISO_ISO_EN_MASK 0x3FF
+
+/* PSOC_GLOBAL_CONF_NIC_ISO */
+#define PSOC_GLOBAL_CONF_NIC_ISO_ISO_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_NIC_ISO_ISO_EN_MASK 0xFFF
+
+/* PSOC_GLOBAL_CONF_MME_ISO */
+#define PSOC_GLOBAL_CONF_MME_ISO_MME0_EU_RO_ISO_SHIFT 0
+#define PSOC_GLOBAL_CONF_MME_ISO_MME0_EU_RO_ISO_MASK 0x3F
+#define PSOC_GLOBAL_CONF_MME_ISO_MME1_EU_RO_ISO_SHIFT 6
+#define PSOC_GLOBAL_CONF_MME_ISO_MME1_EU_RO_ISO_MASK 0xFC0
+#define PSOC_GLOBAL_CONF_MME_ISO_MME2_EU_RO_ISO_SHIFT 12
+#define PSOC_GLOBAL_CONF_MME_ISO_MME2_EU_RO_ISO_MASK 0x3F000
+#define PSOC_GLOBAL_CONF_MME_ISO_MME3_EU_RO_ISO_SHIFT 18
+#define PSOC_GLOBAL_CONF_MME_ISO_MME3_EU_RO_ISO_MASK 0xFC0000
+
+/* PSOC_GLOBAL_CONF_EDMA_ISO */
+#define PSOC_GLOBAL_CONF_EDMA_ISO_ISO_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_EDMA_ISO_ISO_EN_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_HBM_ISO */
+#define PSOC_GLOBAL_CONF_HBM_ISO_HBM_TO_XBAR_SHIFT 0
+#define PSOC_GLOBAL_CONF_HBM_ISO_HBM_TO_XBAR_MASK 0xFFF
+#define PSOC_GLOBAL_CONF_HBM_ISO_HBM_TO_HCH_SHIFT 16
+#define PSOC_GLOBAL_CONF_HBM_ISO_HBM_TO_HCH_MASK 0x3F0000
+
+/* PSOC_GLOBAL_CONF_XBAR_EDGE_ISO */
+#define PSOC_GLOBAL_CONF_XBAR_EDGE_ISO_ISO_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_XBAR_EDGE_ISO_ISO_EN_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_HIF_HMMU_ISO */
+#define PSOC_GLOBAL_CONF_HIF_HMMU_ISO_ISO_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_HIF_HMMU_ISO_ISO_EN_MASK 0xFFFF
+
+/* PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_STATUS */
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_STATUS_FAILED_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_STATUS_FAILED_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH */
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_LSB_ADDR_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_LSB_ADDR_MASK 0xFFF
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_PPROT_SHIFT 12
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_PPROT_MASK 0x7000
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_PWRITE_SHIFT 16
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_PWRITE_MASK 0x10000
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_FENCE_SHIFT 17
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_FENCE_MASK 0x20000
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_DROP_SHIFT 18
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_DROP_MASK 0x40000
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_DST_ID_SHIFT 20
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_DST_ID_MASK 0x3F00000
+
+/* PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_WR */
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_WR_PWDATA_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_REQ_WR_PWDATA_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_ASIF_MSTR_RES_STATUS */
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_RES_STATUS_RES_READY_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_RES_STATUS_RES_READY_MASK 0x1
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_RES_STATUS_PSLVERR_SHIFT 4
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_RES_STATUS_PSLVERR_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_ASIF_MSTR_RES_POP */
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_RES_POP_PRDATA_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_RES_POP_PRDATA_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR */
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_REQ_BUFF_FULL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_REQ_BUFF_FULL_MASK 0x1
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_RES_BUFF_FULL_SHIFT 1
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_RES_BUFF_FULL_MASK 0x2
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_REQ_ILLEGAL_SHIFT 2
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_REQ_ILLEGAL_MASK 0x4
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_RES_DATA_OVRN_SHIFT 3
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_RES_DATA_OVRN_MASK 0x8
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PSLVERR_SHIFT 4
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PSLVERR_MASK 0x10
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_RES_QUAL_OVRN_SHIFT 5
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_RES_QUAL_OVRN_MASK 0x20
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_POP_RES_WHILE_EMPTY_SHIFT 6
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_POP_RES_WHILE_EMPTY_MASK 0x40
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PUSH_REQ_WHILE_FULL_SHIFT 7
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PUSH_REQ_WHILE_FULL_MASK 0x80
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_RX_TIMEOUT_SHIFT 8
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_RX_TIMEOUT_MASK 0x100
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_TX_TIMEOUT_SHIFT 9
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_TX_TIMEOUT_MASK 0x200
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PSLVERR_ADDR_SHIFT 12
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PSLVERR_ADDR_MASK 0xFFF000
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PSLVERR_DST_ID_SHIFT 24
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PSLVERR_DST_ID_MASK 0x3F000000
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PSLVERR_DROP_SHIFT 31
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_PSLVERR_DROP_MASK 0x80000000
+
+/* PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK */
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_REQ_BUFF_FULL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_REQ_BUFF_FULL_MASK 0x1
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RES_BUFF_FULL_SHIFT 1
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RES_BUFF_FULL_MASK 0x2
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_REQ_ILLEGAL_SHIFT 2
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_REQ_ILLEGAL_MASK 0x4
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RES_DATA_OVRN_SHIFT 3
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RES_DATA_OVRN_MASK 0x8
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_PSLVERR_SHIFT 4
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_PSLVERR_MASK 0x10
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RES_QUAL_OVRN_SHIFT 5
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RES_QUAL_OVRN_MASK 0x20
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_POP_RES_WHILE_EMPTY_SHIFT 6
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_POP_RES_WHILE_EMPTY_MASK 0x40
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_PUSH_REQ_WHILE_FULL_SHIFT 7
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_PUSH_REQ_WHILE_FULL_MASK 0x80
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RX_TIMEOUT_SHIFT 8
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RX_TIMEOUT_MASK 0x100
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_TX_TIMEOUT_SHIFT 9
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_TX_TIMEOUT_MASK 0x200
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RES_VALID_SHIFT 16
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_MASK_RES_VALID_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_ASIF_MSTR_STATUS */
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_STATUS_REQ_LL_USED_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_STATUS_REQ_LL_USED_MASK 0x3F
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_STATUS_RES_LL_USED_SHIFT 8
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_STATUS_RES_LL_USED_MASK 0x1F00
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_STATUS_OTF_FIFO_USED_SHIFT 16
+#define PSOC_GLOBAL_CONF_ASIF_MSTR_STATUS_OTF_FIFO_USED_MASK 0x3F0000
+
+/* PSOC_GLOBAL_CONF_ASIF_CORE_CFG */
+#define PSOC_GLOBAL_CONF_ASIF_CORE_CFG_RISE_DELAY_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_CORE_CFG_RISE_DELAY_MASK 0x1F
+#define PSOC_GLOBAL_CONF_ASIF_CORE_CFG_FALL_DELAY_SHIFT 8
+#define PSOC_GLOBAL_CONF_ASIF_CORE_CFG_FALL_DELAY_MASK 0x1F00
+#define PSOC_GLOBAL_CONF_ASIF_CORE_CFG_DETECT_DELAY_SHIFT 16
+#define PSOC_GLOBAL_CONF_ASIF_CORE_CFG_DETECT_DELAY_MASK 0xF0000
+#define PSOC_GLOBAL_CONF_ASIF_CORE_CFG_FLUSH_DESIGN_SHIFT 31
+#define PSOC_GLOBAL_CONF_ASIF_CORE_CFG_FLUSH_DESIGN_MASK 0x80000000
+
+/* PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT */
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_RX_DATA_OVRN_CNT_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_RX_DATA_OVRN_CNT_MASK 0xF
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_RX_QUAL_OVRN_CNT_SHIFT 4
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_RX_QUAL_OVRN_CNT_MASK 0xF0
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_DETECT_CYCLES_CNT_SHIFT 8
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_DETECT_CYCLES_CNT_MASK 0xF00
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_RX_CNT_SHIFT 12
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_RX_CNT_MASK 0xF000
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_TX_CNT_SHIFT 16
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_TX_CNT_MASK 0xF0000
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_RX_FSM_SHIFT 20
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_RX_FSM_MASK 0xF00000
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_TX_FSM_SHIFT 24
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_TX_FSM_MASK 0xF000000
+
+/* PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR */
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_GLB_CLEAR_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_GLB_CLEAR_MASK 0x1
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_RX_DATA_OVRN_CLR_SHIFT 1
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_RX_DATA_OVRN_CLR_MASK 0x2
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_RX_QUAL_OVRN_CLR_SHIFT 2
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_RX_QUAL_OVRN_CLR_MASK 0x4
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_RX_CLR_SHIFT 3
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_RX_CLR_MASK 0x8
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_TX_CLR_SHIFT 4
+#define PSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR_TX_CLR_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG */
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_RX_TIMEOUT_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_RX_TIMEOUT_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_TX_TIMEOUT_EN_SHIFT 1
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_TX_TIMEOUT_EN_MASK 0x2
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_RX_TIMEOUT_RES_SHIFT 2
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_RX_TIMEOUT_RES_MASK 0x4
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_TX_TIMEOUT_RES_SHIFT 3
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_TX_TIMEOUT_RES_MASK 0x8
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_RX_TIMEOUT_VALUE_SHIFT 8
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_RX_TIMEOUT_VALUE_MASK 0x3FF00
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_TX_TIMEOUT_VALUE_SHIFT 20
+#define PSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG_TX_TIMEOUT_VALUE_MASK 0x3FF00000
+
+/* PSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CAUSE */
+#define PSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CAUSE_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CAUSE_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CLEAR */
+#define PSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CLEAR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CLEAR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ASIF_FUNC_INTR_MASK */
+#define PSOC_GLOBAL_CONF_ASIF_FUNC_INTR_MASK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_FUNC_INTR_MASK_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ASIF_ERR_INTR_CAUSE */
+#define PSOC_GLOBAL_CONF_ASIF_ERR_INTR_CAUSE_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_ERR_INTR_CAUSE_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ASIF_ERR_INTR_CLEAR */
+#define PSOC_GLOBAL_CONF_ASIF_ERR_INTR_CLEAR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_ERR_INTR_CLEAR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ASIF_ERR_INTR_MASK */
+#define PSOC_GLOBAL_CONF_ASIF_ERR_INTR_MASK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ASIF_ERR_INTR_MASK_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PAD_DEFAULT */
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_PAD_SEL */
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_SMI_ACCESS_EN */
+#define PSOC_GLOBAL_CONF_SMI_ACCESS_EN_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SMI_ACCESS_EN_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SCRAM_EXTMEM_EN */
+#define PSOC_GLOBAL_CONF_SCRAM_EXTMEM_EN_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SCRAM_EXTMEM_EN_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SCRAM_PERM_SEL */
+#define PSOC_GLOBAL_CONF_SCRAM_PERM_SEL_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SCRAM_PERM_SEL_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_SCRAM_POLY_H3 */
+#define PSOC_GLOBAL_CONF_SCRAM_POLY_H3_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SCRAM_POLY_H3_VAL_MASK 0x1FFFFFFF
+
+/* PSOC_GLOBAL_CONF_CORE_MODE */
+#define PSOC_GLOBAL_CONF_CORE_MODE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_CORE_MODE_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_EXTMEM_ID_LOC */
+#define PSOC_GLOBAL_CONF_EXTMEM_ID_LOC_USER_SHRD_IND_LOC_SHIFT 24
+#define PSOC_GLOBAL_CONF_EXTMEM_ID_LOC_USER_SHRD_IND_LOC_MASK 0x3F000000
+
+/* PSOC_GLOBAL_CONF_LBW_USER_CTRL */
+#define PSOC_GLOBAL_CONF_LBW_USER_CTRL_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_LBW_USER_CTRL_EN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ADC_STM_ID */
+#define PSOC_GLOBAL_CONF_ADC_STM_ID_STM_MSTR_ID_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_STM_ID_STM_MSTR_ID_MASK 0x3F
+
+/* PSOC_GLOBAL_CONF_ADC */
+#define PSOC_GLOBAL_CONF_ADC_INTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_INTR_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_ADC_INT_MASK */
+#define PSOC_GLOBAL_CONF_ADC_INT_MASK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_INT_MASK_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_ADC_CLK_FREQ */
+#define PSOC_GLOBAL_CONF_ADC_CLK_FREQ_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_CLK_FREQ_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START */
+#define PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_SAMPLES */
+#define PSOC_GLOBAL_CONF_ADC_SAMPLES_DATA_SAMPLES_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_SAMPLES_DATA_SAMPLES_MASK 0x1F
+#define PSOC_GLOBAL_CONF_ADC_SAMPLES_CLK_SAMPLES_SHIFT 8
+#define PSOC_GLOBAL_CONF_ADC_SAMPLES_CLK_SAMPLES_MASK 0x1F00
+
+/* PSOC_GLOBAL_CONF_ADC_TPH_CS */
+#define PSOC_GLOBAL_CONF_ADC_TPH_CS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_TPH_CS_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_LSB_NMSB */
+#define PSOC_GLOBAL_CONF_ADC_LSB_NMSB_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_LSB_NMSB_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES */
+#define PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE */
+#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_VAL_MASK 0x1
+#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_CH_SEL_SHIFT 4
+#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_CH_SEL_MASK 0x30
+
+/* PSOC_GLOBAL_CONF_ADC_TDV_CSDO */
+#define PSOC_GLOBAL_CONF_ADC_TDV_CSDO_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_TDV_CSDO_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_PID_SEL */
+#define PSOC_GLOBAL_CONF_ADC_PID_SEL_ADC_SEL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_PID_SEL_ADC_SEL_MASK 0x3
+#define PSOC_GLOBAL_CONF_ADC_PID_SEL_CHANNEL_SEL_SHIFT 4
+#define PSOC_GLOBAL_CONF_ADC_PID_SEL_CHANNEL_SEL_MASK 0x30
+
+/* PSOC_GLOBAL_CONF_ADC_TSU_CSCK */
+#define PSOC_GLOBAL_CONF_ADC_TSU_CSCK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_TSU_CSCK_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_CH_SEL */
+#define PSOC_GLOBAL_CONF_ADC_CH_SEL_SEL_DELAY_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_CH_SEL_SEL_DELAY_MASK 0xFF
+#define PSOC_GLOBAL_CONF_ADC_CH_SEL_SEL_MAX_SHIFT 8
+#define PSOC_GLOBAL_CONF_ADC_CH_SEL_SEL_MAX_MASK 0x300
+
+/* PSOC_GLOBAL_CONF_ADC_WRITE_ADDR */
+#define PSOC_GLOBAL_CONF_ADC_WRITE_ADDR_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_WRITE_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_ADC_CFG_DATA */
+#define PSOC_GLOBAL_CONF_ADC_CFG_DATA_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_CFG_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL */
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_AUX_WR_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_AUX_WR_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_WR_EN_SHIFT 1
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_WR_EN_MASK 0x2
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_GRNT_SHIFT 12
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_GRNT_MASK 0x1000
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_IS_DATA_SHIFT 13
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_IS_DATA_MASK 0x2000
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_IS_TS_SHIFT 14
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_IS_TS_MASK 0x4000
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_IS_MARKED_SHIFT 15
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_IS_MARKED_MASK 0x8000
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_CAUSE_TRIG_SHIFT 16
+#define PSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_STM_EV_CAUSE_TRIG_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_CTRL */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_CTRL_RRESP_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_CTRL_RRESP_VAL_MASK 0x3
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_CTRL_WIN_EN_SHIFT 4
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_CTRL_WIN_EN_MASK 0xF0
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD0_L */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD0_L_VAL_SHIFT 12
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD0_L_VAL_MASK 0xFFFFF000
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD0_H */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD0_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD0_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD0_L */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD0_L_VAL_SHIFT 12
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD0_L_VAL_MASK 0xFFFFF000
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD0_H */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD0_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD0_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD1_L */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD1_L_VAL_SHIFT 12
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD1_L_VAL_MASK 0xFFFFF000
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD1_H */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD1_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD1_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD1_L */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD1_L_VAL_SHIFT 12
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD1_L_VAL_MASK 0xFFFFF000
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD1_H */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD1_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD1_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD2_L */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD2_L_VAL_SHIFT 12
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD2_L_VAL_MASK 0xFFFFF000
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD2_H */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD2_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD2_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD2_L */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD2_L_VAL_SHIFT 12
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD2_L_VAL_MASK 0xFFFFF000
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD2_H */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD2_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD2_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD3_L */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD3_L_VAL_SHIFT 12
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD3_L_VAL_MASK 0xFFFFF000
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD3_H */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD3_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD3_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD3_L */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD3_L_VAL_SHIFT 12
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD3_L_VAL_MASK 0xFFFFF000
+
+/* PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD3_H */
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD3_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD3_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SCRATCHPAD_INIT_CTRL */
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_INIT_CTRL_START_SHIFT 0
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_INIT_CTRL_START_MASK 0x1
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_INIT_CTRL_DONE_SHIFT 4
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_INIT_CTRL_DONE_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_RST_OUT_CTRL */
+#define PSOC_GLOBAL_CONF_RST_OUT_CTRL_CLR_SHIFT 0
+#define PSOC_GLOBAL_CONF_RST_OUT_CTRL_CLR_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_CTRL */
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL_EN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_STATUS */
+#define PSOC_GLOBAL_CONF_MEM_CPY_STATUS_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_STATUS_DONE_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_START_ADDR_H */
+#define PSOC_GLOBAL_CONF_MEM_CPY_START_ADDR_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_START_ADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_START_ADDR_L */
+#define PSOC_GLOBAL_CONF_MEM_CPY_START_ADDR_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_START_ADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_DEST_ADDR_H */
+#define PSOC_GLOBAL_CONF_MEM_CPY_DEST_ADDR_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_DEST_ADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_DEST_ADDR_L */
+#define PSOC_GLOBAL_CONF_MEM_CPY_DEST_ADDR_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_DEST_ADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_CTRL2 */
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL2_MEM_SIZE_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL2_MEM_SIZE_MASK 0xFFFF
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL2_WR_OS_SHIFT 16
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL2_WR_OS_MASK 0x3F0000
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL2_RD_OS_SHIFT 24
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL2_RD_OS_MASK 0x3F000000
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL2_USE_CONST_SHIFT 31
+#define PSOC_GLOBAL_CONF_MEM_CPY_CTRL2_USE_CONST_MASK 0x80000000
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_CONST */
+#define PSOC_GLOBAL_CONF_MEM_CPY_CONST_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_CONST_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_CURR_ADDR_H */
+#define PSOC_GLOBAL_CONF_MEM_CPY_CURR_ADDR_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_CURR_ADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_CURR_ADDR_L */
+#define PSOC_GLOBAL_CONF_MEM_CPY_CURR_ADDR_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_CURR_ADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_AXI_SPLIT_CFG */
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_CFG_FORCE_RESP_OK_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_CFG_FORCE_RESP_OK_MASK 0x1
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_CFG_FORCE_WR_BUF_SHIFT 1
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_CFG_FORCE_WR_BUF_MASK 0x2
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_CFG_NUM_RD_OS_SHIFT 8
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_CFG_NUM_RD_OS_MASK 0xFF00
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_CFG_NUM_WR_OS_SHIFT 16
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_CFG_NUM_WR_OS_MASK 0xFF0000
+
+/* PSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1 */
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1_OVRD_RD_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1_OVRD_RD_EN_MASK 0x7
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1_OVRD_RD_VAL_SHIFT 8
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1_OVRD_RD_VAL_MASK 0x700
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1_OVRD_WR_EN_SHIFT 16
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1_OVRD_WR_EN_MASK 0x70000
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1_OVRD_WR_VAL_SHIFT 24
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1_OVRD_WR_VAL_MASK 0x7000000
+
+/* PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG0 */
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG0_OVRD_RD_EN_31_0_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG0_OVRD_RD_EN_31_0_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG1 */
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG1_OVRD_RD_31_0_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG1_OVRD_RD_31_0_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG2 */
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG2_OVRD_WR_EN_31_0_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG2_OVRD_WR_EN_31_0_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG3 */
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG3_OVRD_WR_31_0_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG3_OVRD_WR_31_0_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4 */
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4_OVRD_RD_EN_39_32_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4_OVRD_RD_EN_39_32_MASK 0xFF
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4_OVRD_RD_39_32_SHIFT 8
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4_OVRD_RD_39_32_MASK 0xFF00
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4_OVRD_WR_EN_39_32_SHIFT 16
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4_OVRD_WR_EN_39_32_MASK 0xFF0000
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4_OVRD_WR_39_32_SHIFT 24
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4_OVRD_WR_39_32_MASK 0xFF000000
+
+/* PSOC_GLOBAL_CONF_LBW_ARUSER_OVRD */
+#define PSOC_GLOBAL_CONF_LBW_ARUSER_OVRD_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_LBW_ARUSER_OVRD_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_LBW_ARUSER_OVRD_EN */
+#define PSOC_GLOBAL_CONF_LBW_ARUSER_OVRD_EN_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_LBW_ARUSER_OVRD_EN_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_LBW_AWUSER_OVRD */
+#define PSOC_GLOBAL_CONF_LBW_AWUSER_OVRD_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_LBW_AWUSER_OVRD_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_LBW_AWUSER_OVRD_EN */
+#define PSOC_GLOBAL_CONF_LBW_AWUSER_OVRD_EN_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_LBW_AWUSER_OVRD_EN_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2 */
+#define PSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2_INTR_CAUSE_SHIFT 0
+#define PSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2_INTR_CAUSE_MASK 0x1
+#define PSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2_INTR_MASK_SHIFT 4
+#define PSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2_INTR_MASK_MASK 0x10
+#define PSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2_NO_WR_INFLIGHT_SHIFT 5
+#define PSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2_NO_WR_INFLIGHT_MASK 0x20
+#define PSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2_SEI_INTR_ID_SHIFT 8
+#define PSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2_SEI_INTR_ID_MASK 0x7FFFFF00
+
+/* PSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2 */
+#define PSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2_INTR_CAUSE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2_INTR_CAUSE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2_INTR_MASK_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2_INTR_MASK_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2_NO_WR_INFLIGHT_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2_NO_WR_INFLIGHT_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2_SEI_INTR_ID_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2_SEI_INTR_ID_MASK 0xFFFFF00
+
+/* PSOC_GLOBAL_CONF_AXI_SPLIT_INTR_CLEAR */
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_INTR_CLEAR_MAIN_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_INTR_CLEAR_MAIN_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_INTR_CLEAR_BOOTROM_IND_SHIFT 1
+#define PSOC_GLOBAL_CONF_AXI_SPLIT_INTR_CLEAR_BOOTROM_IND_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_PROT */
+#define PSOC_GLOBAL_CONF_MEM_CPY_PROT_AR_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_PROT_AR_MASK 0x7
+#define PSOC_GLOBAL_CONF_MEM_CPY_PROT_AW_SHIFT 4
+#define PSOC_GLOBAL_CONF_MEM_CPY_PROT_AW_MASK 0x70
+
+/* PSOC_GLOBAL_CONF_ISOLATE_INPUTS */
+#define PSOC_GLOBAL_CONF_ISOLATE_INPUTS_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_ISOLATE_INPUTS_EN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_MESH_TO_BOOTROM_CTRL */
+#define PSOC_GLOBAL_CONF_MESH_TO_BOOTROM_CTRL_BLOCK_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_MESH_TO_BOOTROM_CTRL_BLOCK_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_MESH_TO_BOOTROM_CTRL_BLOCK_BRESP_SHIFT 1
+#define PSOC_GLOBAL_CONF_MESH_TO_BOOTROM_CTRL_BLOCK_BRESP_MASK 0x6
+#define PSOC_GLOBAL_CONF_MESH_TO_BOOTROM_CTRL_BLOCK_RRESP_SHIFT 5
+#define PSOC_GLOBAL_CONF_MESH_TO_BOOTROM_CTRL_BLOCK_RRESP_MASK 0x60
+
+/* PSOC_GLOBAL_CONF_ARC_JT_SEL */
+#define PSOC_GLOBAL_CONF_ARC_JT_SEL_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ARC_JT_SEL_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PLL_DUMP_CRTL */
+#define PSOC_GLOBAL_CONF_PLL_DUMP_CRTL_PLL_SEL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PLL_DUMP_CRTL_PLL_SEL_MASK 0x3F
+#define PSOC_GLOBAL_CONF_PLL_DUMP_CRTL_BIT_SEL_SHIFT 8
+#define PSOC_GLOBAL_CONF_PLL_DUMP_CRTL_BIT_SEL_MASK 0xF00
+
+/* PSOC_GLOBAL_CONF_MEM_CPY_AXUSER */
+#define PSOC_GLOBAL_CONF_MEM_CPY_AXUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_CPY_AXUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BTL_AXUSER */
+#define PSOC_GLOBAL_CONF_BTL_AXUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_AXUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0 */
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC0_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC0_MASK 0x3F
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC1_SHIFT 6
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC1_MASK 0xFC0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC2_SHIFT 12
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC2_MASK 0x3F000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_SHIFT 18
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_MASK \
+0xFC0000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_SHIFT 24
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_MASK \
+0x3F000000
+
+/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1 */
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_ADDR_EXTMEM_HBM_LOC1_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_ADDR_EXTMEM_HBM_LOC1_MASK 0x3F
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_ADDR_EXTMEM_HBM_LOC2_SHIFT 6
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_ADDR_EXTMEM_HBM_LOC2_MASK 0xFC0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_PC_EN_SHIFT 12
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_PC_EN_MASK 0x1000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_CNT_EN_SHIFT 13
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_CNT_EN_MASK 0x2000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_SHIFT 14
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_MASK \
+0x4000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_SHIFT \
+16
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_MASK \
+0xFF0000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_HBM_NUM_SHIFT 24
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_HBM_NUM_MASK 0x7000000
+
+/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2 */
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_SHIFT \
+0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_MASK \
+0xFFFF
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_SHIFT \
+16
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_MASK \
+0xFFFF0000
+
+/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3 */
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP0_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP0_MASK 0x7
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP1_SHIFT 3
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP1_MASK 0x38
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP2_SHIFT 6
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP2_MASK 0x1C0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP3_SHIFT 9
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP3_MASK 0xE00
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP4_SHIFT 12
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP4_MASK 0x7000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP5_SHIFT 15
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP5_MASK 0x38000
+
+/* PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL */
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_AXI_RESP_SHIFT 4
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_AXI_RESP_MASK 0x30
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_DRAIN_HBW_SHIFT 8
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_DRAIN_HBW_MASK 0x100
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_DRAIN_LBW_SHIFT 9
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_DRAIN_LBW_MASK 0x200
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_INTR_MASK_HBW_SHIFT 12
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_INTR_MASK_HBW_MASK 0x1000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_INTR_MASK_LBW_SHIFT 13
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_CTRL_INTR_MASK_LBW_MASK 0x2000
+
+/* PSOC_GLOBAL_CONF_AXI_DRAIN_TIMEOUT */
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_TIMEOUT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_AXI_DRAIN_INTR */
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_INTR_HBW_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_INTR_HBW_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_INTR_LBW_IND_SHIFT 1
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_INTR_LBW_IND_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_BTL_STOP_SPI_CLK */
+#define PSOC_GLOBAL_CONF_BTL_STOP_SPI_CLK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_STOP_SPI_CLK_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ECO_INTR_CAUSE */
+#define PSOC_GLOBAL_CONF_ECO_INTR_CAUSE_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_ECO_INTR_CAUSE_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ECO_INTR_CLEAR */
+#define PSOC_GLOBAL_CONF_ECO_INTR_CLEAR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_ECO_INTR_CLEAR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ECO_INTR_MASK */
+#define PSOC_GLOBAL_CONF_ECO_INTR_MASK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ECO_INTR_MASK_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_DFT_APB_CONTROL */
+#define PSOC_GLOBAL_CONF_DFT_APB_CONTROL_SPIF_MODE_SHIFT 0
+#define PSOC_GLOBAL_CONF_DFT_APB_CONTROL_SPIF_MODE_MASK 0x1
+#define PSOC_GLOBAL_CONF_DFT_APB_CONTROL_RESERVED_OUT_SHIFT 1
+#define PSOC_GLOBAL_CONF_DFT_APB_CONTROL_RESERVED_OUT_MASK 0xFFFE
+#define PSOC_GLOBAL_CONF_DFT_APB_CONTROL_RESERVED_IN_SHIFT 16
+#define PSOC_GLOBAL_CONF_DFT_APB_CONTROL_RESERVED_IN_MASK 0xFFFF0000
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h
new file mode 100644
index 000000000000..48980fa8e37b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h
@@ -0,0 +1,1337 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF
+ * (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0 0x4C4B000
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_1 0x4C4B004
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_2 0x4C4B008
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 0x4C4B00C
+
+#define mmPSOC_GLOBAL_CONF_PCI_FW_FSM 0x4C4B020
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START 0x4C4B024
+
+#define mmPSOC_GLOBAL_CONF_BTM_FSM 0x4C4B028
+
+#define mmPSOC_GLOBAL_CONF_BTL_ROM_DELAY 0x4C4B02C
+
+#define mmPSOC_GLOBAL_CONF_SW_BTM_FSM 0x4C4B030
+
+#define mmPSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM 0x4C4B034
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT 0x4C4B038
+
+#define mmPSOC_GLOBAL_CONF_QSPI_SPI 0x4C4B03C
+
+#define mmPSOC_GLOBAL_CONF_SPI_MEM_EN 0x4C4B040
+
+#define mmPSOC_GLOBAL_CONF_PRSTN 0x4C4B044
+
+#define mmPSOC_GLOBAL_CONF_PCIE_EN 0x4C4B048
+
+#define mmPSOC_GLOBAL_CONF_PCIE_PRSTN_INTR 0x4C4B04C
+
+#define mmPSOC_GLOBAL_CONF_SPI_IMG_STS 0x4C4B050
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_FSM 0x4C4B054
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD 0x4C4B058
+
+#define mmPSOC_GLOBAL_CONF_QSPI_SPI_BOOTSEQ_RST 0x4C4B05C
+
+#define mmPSOC_GLOBAL_CONF_PHY_STABLE 0x4C4B060
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_OVR 0x4C4B064
+
+#define mmPSOC_GLOBAL_CONF_ETR_FLUSH 0x4C4B068
+
+#define mmPSOC_GLOBAL_CONF_ANY_RST 0x4C4B06C
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_0 0x4C4B070
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1 0x4C4B074
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2 0x4C4B078
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3 0x4C4B07C
+
+#define mmPSOC_GLOBAL_CONF_DIS_RAZWI_ERR 0x4C4B080
+
+#define mmPSOC_GLOBAL_CONF_PCIE_PHY_RST_N 0x4C4B084
+
+#define mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT 0x4C4B088
+
+#define mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO 0x4C4B08C
+
+#define mmPSOC_GLOBAL_CONF_BTL_PROT 0x4C4B090
+
+#define mmPSOC_GLOBAL_CONF_BTL_ADDR_EXT 0x4C4B094
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TO 0x4C4B098
+
+#define mmPSOC_GLOBAL_CONF_RESET_DELAYS 0x4C4B09C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 0x4C4B100
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 0x4C4B104
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 0x4C4B108
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 0x4C4B10C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 0x4C4B110
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 0x4C4B114
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 0x4C4B118
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 0x4C4B11C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 0x4C4B120
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 0x4C4B124
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 0x4C4B128
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 0x4C4B12C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 0x4C4B130
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 0x4C4B134
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 0x4C4B138
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 0x4C4B13C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_16 0x4C4B140
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_17 0x4C4B144
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 0x4C4B148
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 0x4C4B14C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 0x4C4B150
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 0x4C4B154
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 0x4C4B158
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 0x4C4B15C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 0x4C4B160
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 0x4C4B164
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 0x4C4B168
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 0x4C4B16C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 0x4C4B170
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 0x4C4B174
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 0x4C4B178
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 0x4C4B17C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_0 0x4C4B200
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_1 0x4C4B204
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_2 0x4C4B208
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_3 0x4C4B20C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_4 0x4C4B210
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_5 0x4C4B214
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_6 0x4C4B218
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_7 0x4C4B21C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_8 0x4C4B220
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_9 0x4C4B224
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_10 0x4C4B228
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_11 0x4C4B22C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_12 0x4C4B230
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_13 0x4C4B234
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_14 0x4C4B238
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_15 0x4C4B23C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_16 0x4C4B240
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_17 0x4C4B244
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_18 0x4C4B248
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_19 0x4C4B24C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_20 0x4C4B250
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_21 0x4C4B254
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_22 0x4C4B258
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_23 0x4C4B25C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_24 0x4C4B260
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_25 0x4C4B264
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_26 0x4C4B268
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_27 0x4C4B26C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_28 0x4C4B270
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_29 0x4C4B274
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_30 0x4C4B278
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_31 0x4C4B27C
+
+#define mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS 0x4C4B300
+
+#define mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU 0x4C4B304
+
+#define mmPSOC_GLOBAL_CONF_SPL_SOURCE 0x4C4B308
+
+#define mmPSOC_GLOBAL_CONF_I2C_MSTR1_DBG 0x4C4B30C
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV 0x4C4B310
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK 0x4C4B314
+
+#define mmPSOC_GLOBAL_CONF_TRACE_ADDR 0x4C4B320
+
+#define mmPSOC_GLOBAL_CONF_SMB_ALERT_CTRL 0x4C4B324
+
+#define mmPSOC_GLOBAL_CONF_SMB_ALERT_INTR_CAUSE 0x4C4B328
+
+#define mmPSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CLEAR 0x4C4B32C
+
+#define mmPSOC_GLOBAL_CONF_PCIE_PSOC_DERR_INTR_CTRL 0x4C4B330
+
+#define mmPSOC_GLOBAL_CONF_TRACE_AXPROT 0x4C4B334
+
+#define mmPSOC_GLOBAL_CONF_TRACE_AWUSER 0x4C4B338
+
+#define mmPSOC_GLOBAL_CONF_TRACE_ARUSER 0x4C4B33C
+
+#define mmPSOC_GLOBAL_CONF_BTL_STS 0x4C4B340
+
+#define mmPSOC_GLOBAL_CONF_TIMEOUT_INTR 0x4C4B350
+
+#define mmPSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR 0x4C4B354
+
+#define mmPSOC_GLOBAL_CONF_PERIPH_INTR 0x4C4B358
+
+#define mmPSOC_GLOBAL_CONF_COMB_PERIPH_INTR 0x4C4B35C
+
+#define mmPSOC_GLOBAL_CONF_AXI_ERR_INTR 0x4C4B360
+
+#define mmPSOC_GLOBAL_CONF_ARC_WD_INTR 0x4C4B368
+
+#define mmPSOC_GLOBAL_CONF_ARC_WD_INTR_MASK 0x4C4B36C
+
+#define mmPSOC_GLOBAL_CONF_DBG_APB_CTRL 0x4C4B370
+
+#define mmPSOC_GLOBAL_CONF_SPI_DMA_BAUDR 0x4C4B374
+
+#define mmPSOC_GLOBAL_CONF_SPI_DMA_AWPROT 0x4C4B378
+
+#define mmPSOC_GLOBAL_CONF_SPI_DMA_AWUSER 0x4C4B37C
+
+#define mmPSOC_GLOBAL_CONF_SPI_DMA_CTRL 0x4C4B380
+
+#define mmPSOC_GLOBAL_CONF_SPI_DMA_STATUS 0x4C4B384
+
+#define mmPSOC_GLOBAL_CONF_SPI_DMA_DST_ADDR_L 0x4C4B388
+
+#define mmPSOC_GLOBAL_CONF_SPI_DMA_DST_ADDR_H 0x4C4B38C
+
+#define mmPSOC_GLOBAL_CONF_SPI_DIRECT_WR_RD_CTRL 0x4C4B3A0
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_CTRL 0x4C4B3B0
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_RST_VAL_L 0x4C4B3B4
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_RST_VAL_H 0x4C4B3B8
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_VAL_L 0x4C4B3BC
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_TIMER_VAL_H 0x4C4B3C0
+
+#define mmPSOC_GLOBAL_CONF_QSPI_WR_WO_TIMER_VAL_L 0x4C4B3C4
+
+#define mmPSOC_GLOBAL_CONF_QSPI_WR_WO_TIMER_VAL_H 0x4C4B3CC
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_SE_STATUS 0x4C4B3D0
+
+#define mmPSOC_GLOBAL_CONF_QSPI_WR_WO_SE_STATUS 0x4C4B3D4
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_ERR_ADDR 0x4C4B3D8
+
+#define mmPSOC_GLOBAL_CONF_QSPI_WR_WO_ERR_ADDR 0x4C4B3DC
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_INTR_MASK 0x4C4B3E0
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CAUSE 0x4C4B3E4
+
+#define mmPSOC_GLOBAL_CONF_SPI_WR_WO_INTR_CLEAR 0x4C4B3E8
+
+#define mmPSOC_GLOBAL_CONF_MSTR_IF 0x4C4B3F0
+
+#define mmPSOC_GLOBAL_CONF_TARGETID 0x4C4B400
+
+#define mmPSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL_0 0x4C4B404
+
+#define mmPSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL_1 0x4C4B408
+
+#define mmPSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_0 0x4C4B40C
+
+#define mmPSOC_GLOBAL_CONF_ARC_LBU_AXI_SPLIT_CTRL2_1 0x4C4B410
+
+#define mmPSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE 0x4C4B420
+
+#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS_L 0x4C4B430
+
+#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS_H 0x4C4B434
+
+#define mmPSOC_GLOBAL_CONF_LEGACY_BOOT_STRAPS 0x4C4B438
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_DIV 0x4C4B44C
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_CTRL 0x4C4B450
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_STS 0x4C4B454
+
+#define mmPSOC_GLOBAL_CONF_OUTSTANT_TRANS 0x4C4B458
+
+#define mmPSOC_GLOBAL_CONF_MASK_REQ 0x4C4B45C
+
+#define mmPSOC_GLOBAL_CONF_BSAC_CTRL 0x4C4B4C0
+
+#define mmPSOC_GLOBAL_CONF_BSAC_ADDR 0x4C4B4C4
+
+#define mmPSOC_GLOBAL_CONF_BSAC_DATA 0x4C4B4C8
+
+#define mmPSOC_GLOBAL_CONF_BSAC_POLLING_CTRL 0x4C4B4CC
+
+#define mmPSOC_GLOBAL_CONF_BSAC_POLLING_DATA 0x4C4B4D0
+
+#define mmPSOC_GLOBAL_CONF_BSAC_POLLING_MASK 0x4C4B4D4
+
+#define mmPSOC_GLOBAL_CONF_BTL_IMG 0x4C4B4E0
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_MASK 0x4C4B4E4
+
+#define mmPSOC_GLOBAL_CONF_WD_MASK 0x4C4B4E8
+
+#define mmPSOC_GLOBAL_CONF_RST_SRC 0x4C4B4F0
+
+#define mmPSOC_GLOBAL_CONF_BOOT_STATE 0x4C4B4F4
+
+#define mmPSOC_GLOBAL_CONF_RST_FROM_PCIE_CTRL 0x4C4B4F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_0 0x4C4B500
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_1 0x4C4B504
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_2 0x4C4B508
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_3 0x4C4B50C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_4 0x4C4B510
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_5 0x4C4B514
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_6 0x4C4B518
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_7 0x4C4B51C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_8 0x4C4B520
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_9 0x4C4B524
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_10 0x4C4B528
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_11 0x4C4B52C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_12 0x4C4B530
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_13 0x4C4B534
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_14 0x4C4B538
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_15 0x4C4B53C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_16 0x4C4B540
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_17 0x4C4B544
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_18 0x4C4B548
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_19 0x4C4B54C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_20 0x4C4B550
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_21 0x4C4B554
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_22 0x4C4B558
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_23 0x4C4B55C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_24 0x4C4B560
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_25 0x4C4B564
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_26 0x4C4B568
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_27 0x4C4B56C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_28 0x4C4B570
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_29 0x4C4B574
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_30 0x4C4B578
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_31 0x4C4B57C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_32 0x4C4B580
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_33 0x4C4B584
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_34 0x4C4B588
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_35 0x4C4B58C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_36 0x4C4B590
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_37 0x4C4B594
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_38 0x4C4B598
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_39 0x4C4B59C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_40 0x4C4B5A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_41 0x4C4B5A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_42 0x4C4B5A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_43 0x4C4B5AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_44 0x4C4B5B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_45 0x4C4B5B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_46 0x4C4B5B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_47 0x4C4B5BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_48 0x4C4B5C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_49 0x4C4B5C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_50 0x4C4B5C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_51 0x4C4B5CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_52 0x4C4B5D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_53 0x4C4B5D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_54 0x4C4B5D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_55 0x4C4B5DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_56 0x4C4B5E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_57 0x4C4B5E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_58 0x4C4B5E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_59 0x4C4B5EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_60 0x4C4B5F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_61 0x4C4B5F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_62 0x4C4B5F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_63 0x4C4B5FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_64 0x4C4B600
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_65 0x4C4B604
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_66 0x4C4B608
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_67 0x4C4B60C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_68 0x4C4B610
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_69 0x4C4B614
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_70 0x4C4B618
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_71 0x4C4B61C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_72 0x4C4B620
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_73 0x4C4B624
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_74 0x4C4B628
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_75 0x4C4B62C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_76 0x4C4B630
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_77 0x4C4B634
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_78 0x4C4B638
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_79 0x4C4B63C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_80 0x4C4B640
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_81 0x4C4B644
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_82 0x4C4B648
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_83 0x4C4B64C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_84 0x4C4B650
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_85 0x4C4B654
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_86 0x4C4B658
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_87 0x4C4B65C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_88 0x4C4B660
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_89 0x4C4B664
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_90 0x4C4B668
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_91 0x4C4B66C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_92 0x4C4B670
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_93 0x4C4B674
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_0 0x4C4B690
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_1 0x4C4B694
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_2 0x4C4B698
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_3 0x4C4B69C
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_4 0x4C4B6A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_5 0x4C4B6A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_6 0x4C4B6A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_7 0x4C4B6AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_8 0x4C4B6B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_9 0x4C4B6B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_10 0x4C4B6B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_11 0x4C4B6BC
+
+#define mmPSOC_GLOBAL_CONF_BNK3V3_MS 0x4C4B710
+
+#define mmPSOC_GLOBAL_CONF_TPC_ISO 0x4C4B760
+
+#define mmPSOC_GLOBAL_CONF_VDEC_ISO 0x4C4B764
+
+#define mmPSOC_GLOBAL_CONF_NIC_ISO 0x4C4B768
+
+#define mmPSOC_GLOBAL_CONF_MME_ISO 0x4C4B76C
+
+#define mmPSOC_GLOBAL_CONF_EDMA_ISO 0x4C4B770
+
+#define mmPSOC_GLOBAL_CONF_HBM_ISO 0x4C4B774
+
+#define mmPSOC_GLOBAL_CONF_XBAR_EDGE_ISO 0x4C4B778
+
+#define mmPSOC_GLOBAL_CONF_HIF_HMMU_ISO 0x4C4B77C
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_REQ_STATUS_0 0x4C4B780
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_REQ_STATUS_1 0x4C4B784
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_0 0x4C4B788
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_REQ_PUSH_1 0x4C4B78C
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_REQ_WR_0 0x4C4B790
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_REQ_WR_1 0x4C4B794
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_RES_STATUS_0 0x4C4B798
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_RES_STATUS_1 0x4C4B79C
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_RES_POP_0 0x4C4B7A0
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_RES_POP_1 0x4C4B7A4
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_0 0x4C4B7A8
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_ERROR_1 0x4C4B7AC
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_0 0x4C4B7B0
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_INTR_MASK_1 0x4C4B7B4
+
+#define mmPSOC_GLOBAL_CONF_ASIF_MSTR_STATUS 0x4C4B7B8
+
+#define mmPSOC_GLOBAL_CONF_ASIF_CORE_CFG 0x4C4B7C0
+
+#define mmPSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT 0x4C4B7C4
+
+#define mmPSOC_GLOBAL_CONF_ASIF_CORE_DBG_CNT_CLR 0x4C4B7C8
+
+#define mmPSOC_GLOBAL_CONF_ASIF_CORE_TIMEOUT_CFG 0x4C4B7CC
+
+#define mmPSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CAUSE_0 0x4C4B7D0
+
+#define mmPSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CAUSE_1 0x4C4B7D4
+
+#define mmPSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CLEAR_0 0x4C4B7D8
+
+#define mmPSOC_GLOBAL_CONF_ASIF_FUNC_INTR_CLEAR_1 0x4C4B7DC
+
+#define mmPSOC_GLOBAL_CONF_ASIF_FUNC_INTR_MASK_0 0x4C4B7E0
+
+#define mmPSOC_GLOBAL_CONF_ASIF_FUNC_INTR_MASK_1 0x4C4B7E4
+
+#define mmPSOC_GLOBAL_CONF_ASIF_ERR_INTR_CAUSE_0 0x4C4B7E8
+
+#define mmPSOC_GLOBAL_CONF_ASIF_ERR_INTR_CAUSE_1 0x4C4B7EC
+
+#define mmPSOC_GLOBAL_CONF_ASIF_ERR_INTR_CLEAR_0 0x4C4B7F0
+
+#define mmPSOC_GLOBAL_CONF_ASIF_ERR_INTR_CLEAR_1 0x4C4B7F4
+
+#define mmPSOC_GLOBAL_CONF_ASIF_ERR_INTR_MASK_0 0x4C4B7F8
+
+#define mmPSOC_GLOBAL_CONF_ASIF_ERR_INTR_MASK_1 0x4C4B7FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_0 0x4C4B800
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_1 0x4C4B804
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_2 0x4C4B808
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_3 0x4C4B80C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_4 0x4C4B810
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_5 0x4C4B814
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_6 0x4C4B818
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_7 0x4C4B81C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_8 0x4C4B820
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_9 0x4C4B824
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_10 0x4C4B828
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_11 0x4C4B82C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_12 0x4C4B830
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_13 0x4C4B834
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_14 0x4C4B838
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_15 0x4C4B83C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_16 0x4C4B840
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_17 0x4C4B844
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_18 0x4C4B848
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_19 0x4C4B84C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_20 0x4C4B850
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_21 0x4C4B854
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_22 0x4C4B858
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_23 0x4C4B85C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_24 0x4C4B860
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_25 0x4C4B864
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_26 0x4C4B868
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_27 0x4C4B86C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_28 0x4C4B870
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_29 0x4C4B874
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_30 0x4C4B878
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_31 0x4C4B87C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_32 0x4C4B880
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_33 0x4C4B884
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_34 0x4C4B888
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_35 0x4C4B88C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_36 0x4C4B890
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_37 0x4C4B894
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_38 0x4C4B898
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_39 0x4C4B89C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_40 0x4C4B8A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_41 0x4C4B8A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_42 0x4C4B8A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_43 0x4C4B8AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_44 0x4C4B8B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_45 0x4C4B8B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_46 0x4C4B8B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_47 0x4C4B8BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_48 0x4C4B8C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_49 0x4C4B8C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_50 0x4C4B8C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_51 0x4C4B8CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_52 0x4C4B8D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_53 0x4C4B8D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_54 0x4C4B8D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_55 0x4C4B8DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_56 0x4C4B8E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_57 0x4C4B8E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_58 0x4C4B8E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_59 0x4C4B8EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_60 0x4C4B8F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_61 0x4C4B8F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_62 0x4C4B8F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_63 0x4C4B8FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_64 0x4C4B900
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_65 0x4C4B904
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_66 0x4C4B908
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_67 0x4C4B90C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_68 0x4C4B910
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_69 0x4C4B914
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_70 0x4C4B918
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_71 0x4C4B91C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_72 0x4C4B920
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_73 0x4C4B924
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_74 0x4C4B928
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_75 0x4C4B92C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_76 0x4C4B930
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_77 0x4C4B934
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_78 0x4C4B938
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_79 0x4C4B93C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_80 0x4C4B940
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_81 0x4C4B944
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_82 0x4C4B948
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_83 0x4C4B94C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_84 0x4C4B950
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_85 0x4C4B954
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_86 0x4C4B958
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_87 0x4C4B95C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_88 0x4C4B960
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_89 0x4C4B964
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_90 0x4C4B968
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_91 0x4C4B96C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_92 0x4C4B970
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_93 0x4C4B974
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_94 0x4C4B978
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_95 0x4C4B97C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_0 0x4C4B980
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_1 0x4C4B984
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_2 0x4C4B988
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_3 0x4C4B98C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_4 0x4C4B990
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_5 0x4C4B994
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_6 0x4C4B998
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_7 0x4C4B99C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_8 0x4C4B9A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_9 0x4C4B9A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_10 0x4C4B9A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_11 0x4C4B9AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_12 0x4C4B9B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_13 0x4C4B9B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_14 0x4C4B9B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_15 0x4C4B9BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_16 0x4C4B9C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_17 0x4C4B9C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_18 0x4C4B9C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_19 0x4C4B9CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_20 0x4C4B9D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_21 0x4C4B9D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_22 0x4C4B9D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_23 0x4C4B9DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_24 0x4C4B9E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_25 0x4C4B9E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_26 0x4C4B9E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_27 0x4C4B9EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_28 0x4C4B9F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_29 0x4C4B9F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_30 0x4C4B9F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_31 0x4C4B9FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_32 0x4C4BA00
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_33 0x4C4BA04
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_34 0x4C4BA08
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_35 0x4C4BA0C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_36 0x4C4BA10
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_37 0x4C4BA14
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_38 0x4C4BA18
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_39 0x4C4BA1C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_40 0x4C4BA20
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_41 0x4C4BA24
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_42 0x4C4BA28
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_43 0x4C4BA2C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_44 0x4C4BA30
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_45 0x4C4BA34
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_46 0x4C4BA38
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_47 0x4C4BA3C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_48 0x4C4BA40
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_49 0x4C4BA44
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_50 0x4C4BA48
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_51 0x4C4BA4C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_52 0x4C4BA50
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_53 0x4C4BA54
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_54 0x4C4BA58
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_55 0x4C4BA5C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_56 0x4C4BA60
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_57 0x4C4BA64
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_58 0x4C4BA68
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_59 0x4C4BA6C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_60 0x4C4BA70
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_61 0x4C4BA74
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_62 0x4C4BA78
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_63 0x4C4BA7C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_64 0x4C4BA80
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_65 0x4C4BA84
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_66 0x4C4BA88
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_67 0x4C4BA8C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_68 0x4C4BA90
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_69 0x4C4BA94
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_70 0x4C4BA98
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_71 0x4C4BA9C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_72 0x4C4BAA0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_73 0x4C4BAA4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_74 0x4C4BAA8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_75 0x4C4BAAC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_76 0x4C4BAB0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_77 0x4C4BAB4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_78 0x4C4BAB8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_79 0x4C4BABC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_80 0x4C4BAC0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0x4C4BAC4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_82 0x4C4BAC8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_83 0x4C4BACC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_84 0x4C4BAD0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_85 0x4C4BAD4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_86 0x4C4BAD8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_87 0x4C4BADC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_88 0x4C4BAE0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_89 0x4C4BAE4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_90 0x4C4BAE8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_91 0x4C4BAEC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_92 0x4C4BAF0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_93 0x4C4BAF4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_94 0x4C4BAF8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_95 0x4C4BAFC
+
+#define mmPSOC_GLOBAL_CONF_SMI_ACCESS_EN 0x4C4BB00
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_EXTMEM_EN 0x4C4BB38
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_PERM_SEL 0x4C4BB3C
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_0 0x4C4BB40
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_1 0x4C4BB44
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_2 0x4C4BB48
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_3 0x4C4BB4C
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_4 0x4C4BB50
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_5 0x4C4BB54
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_6 0x4C4BB58
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_7 0x4C4BB5C
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_8 0x4C4BB60
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_9 0x4C4BB64
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_10 0x4C4BB68
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_11 0x4C4BB6C
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_12 0x4C4BB70
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_13 0x4C4BB74
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_14 0x4C4BB78
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_15 0x4C4BB7C
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_16 0x4C4BB80
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_17 0x4C4BB84
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_18 0x4C4BB88
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_19 0x4C4BB8C
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_20 0x4C4BB90
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_21 0x4C4BB94
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_22 0x4C4BB98
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_23 0x4C4BB9C
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_24 0x4C4BBA0
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_25 0x4C4BBA4
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_26 0x4C4BBA8
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_27 0x4C4BBAC
+
+#define mmPSOC_GLOBAL_CONF_SCRAM_POLY_H3_28 0x4C4BBB0
+
+#define mmPSOC_GLOBAL_CONF_CORE_MODE 0x4C4BBB4
+
+#define mmPSOC_GLOBAL_CONF_EXTMEM_ID_LOC 0x4C4BBB8
+
+#define mmPSOC_GLOBAL_CONF_LBW_USER_CTRL 0x4C4BBC0
+
+#define mmPSOC_GLOBAL_CONF_ADC_STM_ID 0x4C4BBFC
+
+#define mmPSOC_GLOBAL_CONF_ADC_0 0x4C4BC00
+
+#define mmPSOC_GLOBAL_CONF_ADC_1 0x4C4BC04
+
+#define mmPSOC_GLOBAL_CONF_ADC_INT_MASK_0 0x4C4BC10
+
+#define mmPSOC_GLOBAL_CONF_ADC_INT_MASK_1 0x4C4BC14
+
+#define mmPSOC_GLOBAL_CONF_ADC_CLK_FREQ_0 0x4C4BC20
+
+#define mmPSOC_GLOBAL_CONF_ADC_CLK_FREQ_1 0x4C4BC24
+
+#define mmPSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_0 0x4C4BC30
+
+#define mmPSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_1 0x4C4BC34
+
+#define mmPSOC_GLOBAL_CONF_ADC_SAMPLES_0 0x4C4BC40
+
+#define mmPSOC_GLOBAL_CONF_ADC_SAMPLES_1 0x4C4BC44
+
+#define mmPSOC_GLOBAL_CONF_ADC_TPH_CS_0 0x4C4BC50
+
+#define mmPSOC_GLOBAL_CONF_ADC_TPH_CS_1 0x4C4BC54
+
+#define mmPSOC_GLOBAL_CONF_ADC_LSB_NMSB_0 0x4C4BC60
+
+#define mmPSOC_GLOBAL_CONF_ADC_LSB_NMSB_1 0x4C4BC64
+
+#define mmPSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_0 0x4C4BC70
+
+#define mmPSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_1 0x4C4BC74
+
+#define mmPSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_0 0x4C4BC80
+
+#define mmPSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_1 0x4C4BC84
+
+#define mmPSOC_GLOBAL_CONF_ADC_TDV_CSDO_0 0x4C4BC90
+
+#define mmPSOC_GLOBAL_CONF_ADC_TDV_CSDO_1 0x4C4BC94
+
+#define mmPSOC_GLOBAL_CONF_ADC_PID_SEL 0x4C4BC98
+
+#define mmPSOC_GLOBAL_CONF_ADC_TSU_CSCK_0 0x4C4BCA0
+
+#define mmPSOC_GLOBAL_CONF_ADC_TSU_CSCK_1 0x4C4BCA4
+
+#define mmPSOC_GLOBAL_CONF_ADC_CH_SEL_0 0x4C4BCA8
+
+#define mmPSOC_GLOBAL_CONF_ADC_CH_SEL_1 0x4C4BCAC
+
+#define mmPSOC_GLOBAL_CONF_ADC_WRITE_ADDR_0 0x4C4BCC0
+
+#define mmPSOC_GLOBAL_CONF_ADC_WRITE_ADDR_1 0x4C4BCC4
+
+#define mmPSOC_GLOBAL_CONF_ADC_WRITE_ADDR_2 0x4C4BCC8
+
+#define mmPSOC_GLOBAL_CONF_ADC_WRITE_ADDR_3 0x4C4BCCC
+
+#define mmPSOC_GLOBAL_CONF_ADC_WRITE_ADDR_4 0x4C4BCD0
+
+#define mmPSOC_GLOBAL_CONF_ADC_WRITE_ADDR_5 0x4C4BCD4
+
+#define mmPSOC_GLOBAL_CONF_ADC_WRITE_ADDR_6 0x4C4BCD8
+
+#define mmPSOC_GLOBAL_CONF_ADC_WRITE_ADDR_7 0x4C4BCDC
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA_0 0x4C4BCE0
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA_1 0x4C4BCE4
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA_2 0x4C4BCE8
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA_3 0x4C4BCEC
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA_4 0x4C4BCF0
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA_5 0x4C4BCF4
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA_6 0x4C4BCF8
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA_7 0x4C4BCFC
+
+#define mmPSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_0 0x4C4BD00
+
+#define mmPSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_1 0x4C4BD04
+
+#define mmPSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_2 0x4C4BD08
+
+#define mmPSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_3 0x4C4BD0C
+
+#define mmPSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_4 0x4C4BD10
+
+#define mmPSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_5 0x4C4BD14
+
+#define mmPSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_6 0x4C4BD18
+
+#define mmPSOC_GLOBAL_CONF_ADC_AUX_STM_CTRL_7 0x4C4BD1C
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_CTRL 0x4C4BD24
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD0_L 0x4C4BD28
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD0_H 0x4C4BD2C
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD0_L 0x4C4BD30
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD0_H 0x4C4BD34
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD1_L 0x4C4BD38
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD1_H 0x4C4BD3C
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD1_L 0x4C4BD40
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD1_H 0x4C4BD44
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD2_L 0x4C4BD48
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD2_H 0x4C4BD4C
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD2_L 0x4C4BD50
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD2_H 0x4C4BD54
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD3_L 0x4C4BD58
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MIN_AD3_H 0x4C4BD5C
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD3_L 0x4C4BD60
+
+#define mmPSOC_GLOBAL_CONF_TERMINATE_READ_MAX_AD3_H 0x4C4BD64
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_INIT_CTRL 0x4C4BD80
+
+#define mmPSOC_GLOBAL_CONF_RST_OUT_CTRL 0x4C4BD84
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_CTRL 0x4C4BD90
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_STATUS 0x4C4BD94
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_START_ADDR_H 0x4C4BD98
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_START_ADDR_L 0x4C4BD9C
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_DEST_ADDR_H 0x4C4BDA0
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_DEST_ADDR_L 0x4C4BDA4
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_CTRL2 0x4C4BDA8
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_CONST 0x4C4BDAC
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_CURR_ADDR_H 0x4C4BDB0
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_CURR_ADDR_L 0x4C4BDB4
+
+#define mmPSOC_GLOBAL_CONF_AXI_SPLIT_CFG 0x4C4BDC0
+
+#define mmPSOC_GLOBAL_CONF_AXI_SPLIT_PROT_CFG1 0x4C4BDC4
+
+#define mmPSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG0 0x4C4BDC8
+
+#define mmPSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG1 0x4C4BDCC
+
+#define mmPSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG2 0x4C4BDD0
+
+#define mmPSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG3 0x4C4BDD4
+
+#define mmPSOC_GLOBAL_CONF_AXI_SPLIT_USER_CFG4 0x4C4BDD8
+
+#define mmPSOC_GLOBAL_CONF_LBW_ARUSER_OVRD 0x4C4BDE0
+
+#define mmPSOC_GLOBAL_CONF_LBW_ARUSER_OVRD_EN 0x4C4BDE4
+
+#define mmPSOC_GLOBAL_CONF_LBW_AWUSER_OVRD 0x4C4BDE8
+
+#define mmPSOC_GLOBAL_CONF_LBW_AWUSER_OVRD_EN 0x4C4BDEC
+
+#define mmPSOC_GLOBAL_CONF_MAIN_AXI_SPLIT_CFG2 0x4C4BDF0
+
+#define mmPSOC_GLOBAL_CONF_BOOTROM_AXI_SPLIT_CFG2 0x4C4BDF4
+
+#define mmPSOC_GLOBAL_CONF_AXI_SPLIT_INTR_CLEAR 0x4C4BDF8
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_PROT 0x4C4BE08
+
+#define mmPSOC_GLOBAL_CONF_ISOLATE_INPUTS 0x4C4BE10
+
+#define mmPSOC_GLOBAL_CONF_MESH_TO_BOOTROM_CTRL 0x4C4BE14
+
+#define mmPSOC_GLOBAL_CONF_ARC_JT_SEL 0x4C4BE28
+
+#define mmPSOC_GLOBAL_CONF_PLL_DUMP_CRTL 0x4C4BE2C
+
+#define mmPSOC_GLOBAL_CONF_MEM_CPY_AXUSER 0x4C4BE30
+
+#define mmPSOC_GLOBAL_CONF_BTL_AXUSER 0x4C4BE34
+
+#define mmPSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0 0x4C4BE38
+
+#define mmPSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1 0x4C4BE40
+
+#define mmPSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2 0x4C4BE44
+
+#define mmPSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3 0x4C4BE48
+
+#define mmPSOC_GLOBAL_CONF_AXI_DRAIN_CTRL 0x4C4BE4C
+
+#define mmPSOC_GLOBAL_CONF_AXI_DRAIN_TIMEOUT 0x4C4BE50
+
+#define mmPSOC_GLOBAL_CONF_AXI_DRAIN_INTR 0x4C4BE54
+
+#define mmPSOC_GLOBAL_CONF_BTL_STOP_SPI_CLK 0x4C4BE58
+
+#define mmPSOC_GLOBAL_CONF_ECO_INTR_CAUSE 0x4C4BE60
+
+#define mmPSOC_GLOBAL_CONF_ECO_INTR_CLEAR 0x4C4BE64
+
+#define mmPSOC_GLOBAL_CONF_ECO_INTR_MASK 0x4C4BE68
+
+#define mmPSOC_GLOBAL_CONF_DFT_APB_CONTROL 0x4C4BE70
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h
new file mode 100644
index 000000000000..e0cf35226e7f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h
@@ -0,0 +1,2321 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_RESET_CONF_MASKS_H_
+#define ASIC_REG_PSOC_RESET_CONF_MASKS_H_
+
+/*
+ *****************************************
+ * PSOC_RESET_CONF
+ * (Prototype: PSOC_RESET_CONF)
+ *****************************************
+ */
+
+/* PSOC_RESET_CONF_PSOC_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_PSOC_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_PRSTN_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PSOC_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_PSOC_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_SOFT_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PSOC_FW_RST_CFG */
+#define PSOC_RESET_CONF_PSOC_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_FW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PSOC_WD_RST_CFG */
+#define PSOC_RESET_CONF_PSOC_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_WD_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PSOC_MNL_RST_CFG */
+#define PSOC_RESET_CONF_PSOC_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_MNL_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PSOC_FLR_RST_CFG */
+#define PSOC_RESET_CONF_PSOC_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_FLR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PSOC_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_PSOC_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_ECC_DERR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PSOC_SW_RST_CFG */
+#define PSOC_RESET_CONF_PSOC_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_SW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_CPU_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_CPU_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_CPU_PRSTN_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_CPU_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_CPU_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_CPU_SOFT_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_CPU_FW_RST_CFG */
+#define PSOC_RESET_CONF_CPU_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_CPU_FW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_CPU_WD_RST_CFG */
+#define PSOC_RESET_CONF_CPU_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_CPU_WD_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_CPU_MNL_RST_CFG */
+#define PSOC_RESET_CONF_CPU_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_CPU_MNL_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_CPU_FLR_RST_CFG */
+#define PSOC_RESET_CONF_CPU_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_CPU_FLR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_CPU_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_CPU_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_CPU_ECC_DERR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_CPU_SW_RST_CFG */
+#define PSOC_RESET_CONF_CPU_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_CPU_SW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_ARC_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_ARC_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_PRSTN_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ARC_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SOFT_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ARC_FW_RST_CFG */
+#define PSOC_RESET_CONF_ARC_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_FW_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ARC_WD_RST_CFG */
+#define PSOC_RESET_CONF_ARC_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_WD_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ARC_MNL_RST_CFG */
+#define PSOC_RESET_CONF_ARC_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_MNL_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ARC_FLR_RST_CFG */
+#define PSOC_RESET_CONF_ARC_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_FLR_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ARC_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_ARC_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_ECC_DERR_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ARC_SW_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SW_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_SIF_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_SIF_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SIF_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SIF_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_SIF_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SIF_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SIF_FW_RST_CFG */
+#define PSOC_RESET_CONF_SIF_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SIF_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SIF_WD_RST_CFG */
+#define PSOC_RESET_CONF_SIF_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SIF_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SIF_MNL_RST_CFG */
+#define PSOC_RESET_CONF_SIF_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SIF_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SIF_FLR_RST_CFG */
+#define PSOC_RESET_CONF_SIF_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SIF_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SIF_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_SIF_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SIF_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SIF_SW_RST_CFG */
+#define PSOC_RESET_CONF_SIF_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SIF_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SRAM_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_SRAM_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SRAM_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_SRAM_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SRAM_FW_RST_CFG */
+#define PSOC_RESET_CONF_SRAM_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SRAM_WD_RST_CFG */
+#define PSOC_RESET_CONF_SRAM_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SRAM_MNL_RST_CFG */
+#define PSOC_RESET_CONF_SRAM_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SRAM_FLR_RST_CFG */
+#define PSOC_RESET_CONF_SRAM_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SRAM_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_SRAM_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SRAM_SW_RST_CFG */
+#define PSOC_RESET_CONF_SRAM_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PCIE_CTRL_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_CTRL_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_PRSTN_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_CTRL_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_CTRL_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_SOFT_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_CTRL_FW_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_CTRL_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_FW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_CTRL_WD_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_CTRL_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_WD_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_CTRL_MNL_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_CTRL_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_MNL_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_CTRL_FLR_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_CTRL_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_FLR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_CTRL_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_CTRL_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_ECC_DERR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_CTRL_SW_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_CTRL_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_SW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_PRSTN_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_SOFT_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_FW_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_FW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_WD_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_WD_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_MNL_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_MNL_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_FLR_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_FLR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_ECC_DERR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_SW_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_SW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_IF_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_IF_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_PRSTN_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_IF_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_IF_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_SOFT_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_IF_FW_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_IF_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_FW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_IF_WD_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_IF_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_WD_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_IF_MNL_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_IF_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_MNL_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_IF_FLR_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_IF_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_FLR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_IF_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_IF_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_ECC_DERR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_IF_SW_RST_CFG */
+#define PSOC_RESET_CONF_PCIE_IF_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_SW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_TPC_DIV_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_TPC_DIV_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_PRSTN_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_TPC_DIV_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_TPC_DIV_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_SOFT_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_TPC_DIV_FW_RST_CFG */
+#define PSOC_RESET_CONF_TPC_DIV_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_FW_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_TPC_DIV_WD_RST_CFG */
+#define PSOC_RESET_CONF_TPC_DIV_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_WD_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_TPC_DIV_MNL_RST_CFG */
+#define PSOC_RESET_CONF_TPC_DIV_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_MNL_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_TPC_DIV_FLR_RST_CFG */
+#define PSOC_RESET_CONF_TPC_DIV_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_FLR_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_TPC_DIV_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_TPC_DIV_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_ECC_DERR_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_TPC_DIV_SW_RST_CFG */
+#define PSOC_RESET_CONF_TPC_DIV_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_SW_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_HBM_DIV_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_HBM_DIV_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_PRSTN_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_DIV_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_HBM_DIV_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_SOFT_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_DIV_FW_RST_CFG */
+#define PSOC_RESET_CONF_HBM_DIV_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_FW_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_DIV_WD_RST_CFG */
+#define PSOC_RESET_CONF_HBM_DIV_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_WD_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_DIV_MNL_RST_CFG */
+#define PSOC_RESET_CONF_HBM_DIV_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_MNL_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_DIV_FLR_RST_CFG */
+#define PSOC_RESET_CONF_HBM_DIV_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_FLR_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_DIV_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_HBM_DIV_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_ECC_DERR_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_DIV_SW_RST_CFG */
+#define PSOC_RESET_CONF_HBM_DIV_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_SW_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_PMMU_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_PMMU_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_PRSTN_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PMMU_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_PMMU_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_SOFT_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PMMU_FW_RST_CFG */
+#define PSOC_RESET_CONF_PMMU_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_FW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PMMU_WD_RST_CFG */
+#define PSOC_RESET_CONF_PMMU_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_WD_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PMMU_MNL_RST_CFG */
+#define PSOC_RESET_CONF_PMMU_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_MNL_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PMMU_FLR_RST_CFG */
+#define PSOC_RESET_CONF_PMMU_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_FLR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PMMU_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_PMMU_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_ECC_DERR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PMMU_SW_RST_CFG */
+#define PSOC_RESET_CONF_PMMU_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_SW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PM_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_PM_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PM_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PM_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_PM_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PM_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PM_FW_RST_CFG */
+#define PSOC_RESET_CONF_PM_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PM_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PM_WD_RST_CFG */
+#define PSOC_RESET_CONF_PM_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PM_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PM_MNL_RST_CFG */
+#define PSOC_RESET_CONF_PM_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PM_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PM_FLR_RST_CFG */
+#define PSOC_RESET_CONF_PM_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PM_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PM_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_PM_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PM_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PM_SW_RST_CFG */
+#define PSOC_RESET_CONF_PM_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PM_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_TS_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_TS_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_FW_RST_CFG */
+#define PSOC_RESET_CONF_TS_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_WD_RST_CFG */
+#define PSOC_RESET_CONF_TS_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_MNL_RST_CFG */
+#define PSOC_RESET_CONF_TS_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_FLR_RST_CFG */
+#define PSOC_RESET_CONF_TS_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_TS_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_SW_RST_CFG */
+#define PSOC_RESET_CONF_TS_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_IF_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_TS_IF_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_IF_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_TS_IF_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_IF_FW_RST_CFG */
+#define PSOC_RESET_CONF_TS_IF_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_IF_WD_RST_CFG */
+#define PSOC_RESET_CONF_TS_IF_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_IF_MNL_RST_CFG */
+#define PSOC_RESET_CONF_TS_IF_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_IF_FLR_RST_CFG */
+#define PSOC_RESET_CONF_TS_IF_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_IF_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_TS_IF_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_IF_SW_RST_CFG */
+#define PSOC_RESET_CONF_TS_IF_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PLL_L_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_PLL_L_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_PRSTN_RST_CFG_EN_MASK 0xFFFFFFFF
+
+/* PSOC_RESET_CONF_PLL_L_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_PLL_L_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_SOFT_RST_CFG_EN_MASK 0xFFFFFFFF
+
+/* PSOC_RESET_CONF_PLL_L_FW_RST_CFG */
+#define PSOC_RESET_CONF_PLL_L_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_FW_RST_CFG_EN_MASK 0xFFFFFFFF
+
+/* PSOC_RESET_CONF_PLL_L_WD_RST_CFG */
+#define PSOC_RESET_CONF_PLL_L_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_WD_RST_CFG_EN_MASK 0xFFFFFFFF
+
+/* PSOC_RESET_CONF_PLL_L_MNL_RST_CFG */
+#define PSOC_RESET_CONF_PLL_L_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_MNL_RST_CFG_EN_MASK 0xFFFFFFFF
+
+/* PSOC_RESET_CONF_PLL_L_FLR_RST_CFG */
+#define PSOC_RESET_CONF_PLL_L_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_FLR_RST_CFG_EN_MASK 0xFFFFFFFF
+
+/* PSOC_RESET_CONF_PLL_L_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_PLL_L_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_ECC_DERR_RST_CFG_EN_MASK 0xFFFFFFFF
+
+/* PSOC_RESET_CONF_PLL_L_SW_RST_CFG */
+#define PSOC_RESET_CONF_PLL_L_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_SW_RST_CFG_EN_MASK 0xFFFFFFFF
+
+/* PSOC_RESET_CONF_PLL_H_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_PLL_H_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_PRSTN_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PLL_H_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_PLL_H_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_SOFT_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PLL_H_FW_RST_CFG */
+#define PSOC_RESET_CONF_PLL_H_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_FW_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PLL_H_WD_RST_CFG */
+#define PSOC_RESET_CONF_PLL_H_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_WD_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PLL_H_MNL_RST_CFG */
+#define PSOC_RESET_CONF_PLL_H_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_MNL_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PLL_H_FLR_RST_CFG */
+#define PSOC_RESET_CONF_PLL_H_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_FLR_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PLL_H_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_PLL_H_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_ECC_DERR_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PLL_H_SW_RST_CFG */
+#define PSOC_RESET_CONF_PLL_H_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_SW_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_MME_EUS_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_MME_EUS_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MME_EUS_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_MME_EUS_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MME_EUS_FW_RST_CFG */
+#define PSOC_RESET_CONF_MME_EUS_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MME_EUS_WD_RST_CFG */
+#define PSOC_RESET_CONF_MME_EUS_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MME_EUS_MNL_RST_CFG */
+#define PSOC_RESET_CONF_MME_EUS_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MME_EUS_FLR_RST_CFG */
+#define PSOC_RESET_CONF_MME_EUS_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MME_EUS_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_MME_EUS_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MME_EUS_SW_RST_CFG */
+#define PSOC_RESET_CONF_MME_EUS_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MSS_CLS_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_MSS_CLS_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MSS_CLS_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_MSS_CLS_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MSS_CLS_FW_RST_CFG */
+#define PSOC_RESET_CONF_MSS_CLS_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MSS_CLS_WD_RST_CFG */
+#define PSOC_RESET_CONF_MSS_CLS_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MSS_CLS_MNL_RST_CFG */
+#define PSOC_RESET_CONF_MSS_CLS_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MSS_CLS_FLR_RST_CFG */
+#define PSOC_RESET_CONF_MSS_CLS_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MSS_CLS_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_MSS_CLS_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MSS_CLS_SW_RST_CFG */
+#define PSOC_RESET_CONF_MSS_CLS_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TPC_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_TPC_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_PRSTN_RST_CFG_EN_MASK 0x1FFFFFF
+
+/* PSOC_RESET_CONF_TPC_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_TPC_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_SOFT_RST_CFG_EN_MASK 0x1FFFFFF
+
+/* PSOC_RESET_CONF_TPC_FW_RST_CFG */
+#define PSOC_RESET_CONF_TPC_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_FW_RST_CFG_EN_MASK 0x1FFFFFF
+
+/* PSOC_RESET_CONF_TPC_WD_RST_CFG */
+#define PSOC_RESET_CONF_TPC_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_WD_RST_CFG_EN_MASK 0x1FFFFFF
+
+/* PSOC_RESET_CONF_TPC_MNL_RST_CFG */
+#define PSOC_RESET_CONF_TPC_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_MNL_RST_CFG_EN_MASK 0x1FFFFFF
+
+/* PSOC_RESET_CONF_TPC_FLR_RST_CFG */
+#define PSOC_RESET_CONF_TPC_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_FLR_RST_CFG_EN_MASK 0x1FFFFFF
+
+/* PSOC_RESET_CONF_TPC_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_TPC_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_ECC_DERR_RST_CFG_EN_MASK 0x1FFFFFF
+
+/* PSOC_RESET_CONF_TPC_SW_RST_CFG */
+#define PSOC_RESET_CONF_TPC_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_SW_RST_CFG_EN_MASK 0x1FFFFFF
+
+/* PSOC_RESET_CONF_HIF_HMMU_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_HIF_HMMU_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_HIF_HMMU_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_HIF_HMMU_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_HIF_HMMU_FW_RST_CFG */
+#define PSOC_RESET_CONF_HIF_HMMU_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_HIF_HMMU_WD_RST_CFG */
+#define PSOC_RESET_CONF_HIF_HMMU_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_HIF_HMMU_MNL_RST_CFG */
+#define PSOC_RESET_CONF_HIF_HMMU_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_HIF_HMMU_FLR_RST_CFG */
+#define PSOC_RESET_CONF_HIF_HMMU_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_HIF_HMMU_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_HIF_HMMU_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_HIF_HMMU_SW_RST_CFG */
+#define PSOC_RESET_CONF_HIF_HMMU_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_XBAR_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_XBAR_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_XBAR_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_XBAR_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_XBAR_FW_RST_CFG */
+#define PSOC_RESET_CONF_XBAR_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_XBAR_WD_RST_CFG */
+#define PSOC_RESET_CONF_XBAR_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_XBAR_MNL_RST_CFG */
+#define PSOC_RESET_CONF_XBAR_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_XBAR_FLR_RST_CFG */
+#define PSOC_RESET_CONF_XBAR_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_XBAR_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_XBAR_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_XBAR_SW_RST_CFG */
+#define PSOC_RESET_CONF_XBAR_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_FW_RST_CFG */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_WD_RST_CFG */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_MNL_RST_CFG */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_FLR_RST_CFG */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_SW_RST_CFG */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_DDMA_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_DDMA_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_PRSTN_RST_CFG_EN_MASK 0xFF
+
+/* PSOC_RESET_CONF_DDMA_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_DDMA_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_SOFT_RST_CFG_EN_MASK 0xFF
+
+/* PSOC_RESET_CONF_DDMA_FW_RST_CFG */
+#define PSOC_RESET_CONF_DDMA_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_FW_RST_CFG_EN_MASK 0xFF
+
+/* PSOC_RESET_CONF_DDMA_WD_RST_CFG */
+#define PSOC_RESET_CONF_DDMA_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_WD_RST_CFG_EN_MASK 0xFF
+
+/* PSOC_RESET_CONF_DDMA_MNL_RST_CFG */
+#define PSOC_RESET_CONF_DDMA_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_MNL_RST_CFG_EN_MASK 0xFF
+
+/* PSOC_RESET_CONF_DDMA_FLR_RST_CFG */
+#define PSOC_RESET_CONF_DDMA_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_FLR_RST_CFG_EN_MASK 0xFF
+
+/* PSOC_RESET_CONF_DDMA_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_DDMA_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_ECC_DERR_RST_CFG_EN_MASK 0xFF
+
+/* PSOC_RESET_CONF_DDMA_SW_RST_CFG */
+#define PSOC_RESET_CONF_DDMA_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_SW_RST_CFG_EN_MASK 0xFF
+
+/* PSOC_RESET_CONF_KDMA_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_KDMA_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_PRSTN_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_KDMA_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_KDMA_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_SOFT_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_KDMA_FW_RST_CFG */
+#define PSOC_RESET_CONF_KDMA_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_FW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_KDMA_WD_RST_CFG */
+#define PSOC_RESET_CONF_KDMA_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_WD_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_KDMA_MNL_RST_CFG */
+#define PSOC_RESET_CONF_KDMA_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_MNL_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_KDMA_FLR_RST_CFG */
+#define PSOC_RESET_CONF_KDMA_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_FLR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_KDMA_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_KDMA_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_ECC_DERR_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_KDMA_SW_RST_CFG */
+#define PSOC_RESET_CONF_KDMA_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_SW_RST_CFG_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PDMA_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_PDMA_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_PRSTN_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PDMA_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_PDMA_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_SOFT_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PDMA_FW_RST_CFG */
+#define PSOC_RESET_CONF_PDMA_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_FW_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PDMA_WD_RST_CFG */
+#define PSOC_RESET_CONF_PDMA_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_WD_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PDMA_MNL_RST_CFG */
+#define PSOC_RESET_CONF_PDMA_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_MNL_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PDMA_FLR_RST_CFG */
+#define PSOC_RESET_CONF_PDMA_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_FLR_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PDMA_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_PDMA_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_ECC_DERR_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_PDMA_SW_RST_CFG */
+#define PSOC_RESET_CONF_PDMA_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_SW_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ARC_SS_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SS_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_PRSTN_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_ARC_SS_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SS_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_SOFT_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_ARC_SS_FW_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SS_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_FW_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_ARC_SS_WD_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SS_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_WD_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_ARC_SS_MNL_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SS_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_MNL_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_ARC_SS_FLR_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SS_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_FLR_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_ARC_SS_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SS_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_ECC_DERR_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_ARC_SS_SW_RST_CFG */
+#define PSOC_RESET_CONF_ARC_SS_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_SW_RST_CFG_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_ROTATOR_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_ROTATOR_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_PRSTN_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ROTATOR_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_ROTATOR_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_SOFT_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ROTATOR_FW_RST_CFG */
+#define PSOC_RESET_CONF_ROTATOR_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_FW_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ROTATOR_WD_RST_CFG */
+#define PSOC_RESET_CONF_ROTATOR_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_WD_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ROTATOR_MNL_RST_CFG */
+#define PSOC_RESET_CONF_ROTATOR_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_MNL_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ROTATOR_FLR_RST_CFG */
+#define PSOC_RESET_CONF_ROTATOR_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_FLR_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ROTATOR_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_ROTATOR_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_ECC_DERR_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ROTATOR_SW_RST_CFG */
+#define PSOC_RESET_CONF_ROTATOR_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_SW_RST_CFG_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_SM_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_SM_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SM_PRSTN_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SM_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_SM_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SM_SOFT_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SM_FW_RST_CFG */
+#define PSOC_RESET_CONF_SM_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SM_FW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SM_WD_RST_CFG */
+#define PSOC_RESET_CONF_SM_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SM_WD_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SM_MNL_RST_CFG */
+#define PSOC_RESET_CONF_SM_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SM_MNL_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SM_FLR_RST_CFG */
+#define PSOC_RESET_CONF_SM_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SM_FLR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SM_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_SM_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SM_ECC_DERR_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SM_SW_RST_CFG */
+#define PSOC_RESET_CONF_SM_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_SM_SW_RST_CFG_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_VIDEO_DEC_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_VIDEO_DEC_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_PRSTN_RST_CFG_EN_MASK 0x3FF
+
+/* PSOC_RESET_CONF_VIDEO_DEC_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_VIDEO_DEC_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_SOFT_RST_CFG_EN_MASK 0x3FF
+
+/* PSOC_RESET_CONF_VIDEO_DEC_FW_RST_CFG */
+#define PSOC_RESET_CONF_VIDEO_DEC_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_FW_RST_CFG_EN_MASK 0x3FF
+
+/* PSOC_RESET_CONF_VIDEO_DEC_WD_RST_CFG */
+#define PSOC_RESET_CONF_VIDEO_DEC_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_WD_RST_CFG_EN_MASK 0x3FF
+
+/* PSOC_RESET_CONF_VIDEO_DEC_MNL_RST_CFG */
+#define PSOC_RESET_CONF_VIDEO_DEC_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_MNL_RST_CFG_EN_MASK 0x3FF
+
+/* PSOC_RESET_CONF_VIDEO_DEC_FLR_RST_CFG */
+#define PSOC_RESET_CONF_VIDEO_DEC_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_FLR_RST_CFG_EN_MASK 0x3FF
+
+/* PSOC_RESET_CONF_VIDEO_DEC_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_VIDEO_DEC_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_ECC_DERR_RST_CFG_EN_MASK 0x3FF
+
+/* PSOC_RESET_CONF_VIDEO_DEC_SW_RST_CFG */
+#define PSOC_RESET_CONF_VIDEO_DEC_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_SW_RST_CFG_EN_MASK 0x3FF
+
+/* PSOC_RESET_CONF_HBM_MC_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_HBM_MC_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_PRSTN_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_MC_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_HBM_MC_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_SOFT_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_MC_FW_RST_CFG */
+#define PSOC_RESET_CONF_HBM_MC_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_FW_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_MC_WD_RST_CFG */
+#define PSOC_RESET_CONF_HBM_MC_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_WD_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_MC_MNL_RST_CFG */
+#define PSOC_RESET_CONF_HBM_MC_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_MNL_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_MC_FLR_RST_CFG */
+#define PSOC_RESET_CONF_HBM_MC_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_FLR_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_MC_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_HBM_MC_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_ECC_DERR_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_HBM_MC_SW_RST_CFG */
+#define PSOC_RESET_CONF_HBM_MC_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_SW_RST_CFG_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_NIC_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_NIC_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRSTN_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_NIC_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_SOFT_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_FW_RST_CFG */
+#define PSOC_RESET_CONF_NIC_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_FW_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_WD_RST_CFG */
+#define PSOC_RESET_CONF_NIC_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_WD_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_MNL_RST_CFG */
+#define PSOC_RESET_CONF_NIC_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_MNL_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_FLR_RST_CFG */
+#define PSOC_RESET_CONF_NIC_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_FLR_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_NIC_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_ECC_DERR_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_SW_RST_CFG */
+#define PSOC_RESET_CONF_NIC_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_SW_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_PRT_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_NIC_PRT_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_PRSTN_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_PRT_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_NIC_PRT_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_SOFT_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_PRT_FW_RST_CFG */
+#define PSOC_RESET_CONF_NIC_PRT_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_FW_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_PRT_WD_RST_CFG */
+#define PSOC_RESET_CONF_NIC_PRT_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_WD_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_PRT_MNL_RST_CFG */
+#define PSOC_RESET_CONF_NIC_PRT_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_MNL_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_PRT_FLR_RST_CFG */
+#define PSOC_RESET_CONF_NIC_PRT_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_FLR_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_PRT_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_NIC_PRT_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_ECC_DERR_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_PRT_SW_RST_CFG */
+#define PSOC_RESET_CONF_NIC_PRT_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_SW_RST_CFG_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_CH_PRSTN_RST_CFG */
+#define PSOC_RESET_CONF_NIC_CH_PRSTN_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_PRSTN_RST_CFG_EN_MASK 0x7
+
+/* PSOC_RESET_CONF_NIC_CH_SOFT_RST_CFG */
+#define PSOC_RESET_CONF_NIC_CH_SOFT_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_SOFT_RST_CFG_EN_MASK 0x7
+
+/* PSOC_RESET_CONF_NIC_CH_FW_RST_CFG */
+#define PSOC_RESET_CONF_NIC_CH_FW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_FW_RST_CFG_EN_MASK 0x7
+
+/* PSOC_RESET_CONF_NIC_CH_WD_RST_CFG */
+#define PSOC_RESET_CONF_NIC_CH_WD_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_WD_RST_CFG_EN_MASK 0x7
+
+/* PSOC_RESET_CONF_NIC_CH_MNL_RST_CFG */
+#define PSOC_RESET_CONF_NIC_CH_MNL_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_MNL_RST_CFG_EN_MASK 0x7
+
+/* PSOC_RESET_CONF_NIC_CH_FLR_RST_CFG */
+#define PSOC_RESET_CONF_NIC_CH_FLR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_FLR_RST_CFG_EN_MASK 0x7
+
+/* PSOC_RESET_CONF_NIC_CH_ECC_DERR_RST_CFG */
+#define PSOC_RESET_CONF_NIC_CH_ECC_DERR_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_ECC_DERR_RST_CFG_EN_MASK 0x7
+
+/* PSOC_RESET_CONF_NIC_CH_SW_RST_CFG */
+#define PSOC_RESET_CONF_NIC_CH_SW_RST_CFG_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_SW_RST_CFG_EN_MASK 0x7
+
+/* PSOC_RESET_CONF_SOFT_RST */
+#define PSOC_RESET_CONF_SOFT_RST_IND_SHIFT 0
+#define PSOC_RESET_CONF_SOFT_RST_IND_MASK 0x1
+
+/* PSOC_RESET_CONF_SW_ALL_RST */
+#define PSOC_RESET_CONF_SW_ALL_RST_IND_SHIFT 0
+#define PSOC_RESET_CONF_SW_ALL_RST_IND_MASK 0x1
+
+/* PSOC_RESET_CONF_UNIT_RST_N */
+#define PSOC_RESET_CONF_UNIT_RST_N_IND_SHIFT 0
+#define PSOC_RESET_CONF_UNIT_RST_N_IND_MASK 0x1
+
+/* PSOC_RESET_CONF_PSOC_UNIT_RST */
+#define PSOC_RESET_CONF_PSOC_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_UNIT_RST_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_CPU_UNIT_RST */
+#define PSOC_RESET_CONF_CPU_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_CPU_UNIT_RST_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_ARC_UNIT_RST */
+#define PSOC_RESET_CONF_ARC_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_UNIT_RST_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_SIF_UNIT_RST */
+#define PSOC_RESET_CONF_SIF_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_SIF_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SRAM_UNIT_RST */
+#define PSOC_RESET_CONF_SRAM_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PCIE_CTRL_UNIT_RST */
+#define PSOC_RESET_CONF_PCIE_CTRL_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_UNIT_RST_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_UNIT_RST */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_UNIT_RST_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PCIE_IF_UNIT_RST */
+#define PSOC_RESET_CONF_PCIE_IF_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_UNIT_RST_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_TPC_DIV_UNIT_RST */
+#define PSOC_RESET_CONF_TPC_DIV_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_UNIT_RST_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_HBM_DIV_UNIT_RST */
+#define PSOC_RESET_CONF_HBM_DIV_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_UNIT_RST_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_PMMU_UNIT_RST */
+#define PSOC_RESET_CONF_PMMU_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_UNIT_RST_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PM_UNIT_RST */
+#define PSOC_RESET_CONF_PM_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_PM_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_UNIT_RST */
+#define PSOC_RESET_CONF_TS_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TS_IF_UNIT_RST */
+#define PSOC_RESET_CONF_TS_IF_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_PLL_L_UNIT_RST */
+#define PSOC_RESET_CONF_PLL_L_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_UNIT_RST_EN_MASK 0xFFFFFFFF
+
+/* PSOC_RESET_CONF_PLL_H_UNIT_RST */
+#define PSOC_RESET_CONF_PLL_H_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_UNIT_RST_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_MME_EUS_UNIT_RST */
+#define PSOC_RESET_CONF_MME_EUS_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_MSS_CLS_UNIT_RST */
+#define PSOC_RESET_CONF_MSS_CLS_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_TPC_UNIT_RST */
+#define PSOC_RESET_CONF_TPC_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_TPC_UNIT_RST_EN_MASK 0x1FFFFFF
+
+/* PSOC_RESET_CONF_HIF_HMMU_UNIT_RST */
+#define PSOC_RESET_CONF_HIF_HMMU_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_XBAR_UNIT_RST */
+#define PSOC_RESET_CONF_XBAR_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_UNIT_RST */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_DDMA_UNIT_RST */
+#define PSOC_RESET_CONF_DDMA_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_UNIT_RST_EN_MASK 0xFF
+
+/* PSOC_RESET_CONF_KDMA_UNIT_RST */
+#define PSOC_RESET_CONF_KDMA_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_UNIT_RST_EN_MASK 0x1
+
+/* PSOC_RESET_CONF_PDMA_UNIT_RST */
+#define PSOC_RESET_CONF_PDMA_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_UNIT_RST_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_ARC_SS_UNIT_RST */
+#define PSOC_RESET_CONF_ARC_SS_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_UNIT_RST_EN_MASK 0x1F
+
+/* PSOC_RESET_CONF_ROTATOR_UNIT_RST */
+#define PSOC_RESET_CONF_ROTATOR_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_UNIT_RST_EN_MASK 0x3
+
+/* PSOC_RESET_CONF_SM_UNIT_RST */
+#define PSOC_RESET_CONF_SM_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_SM_UNIT_RST_EN_MASK 0xF
+
+/* PSOC_RESET_CONF_VIDEO_DEC_UNIT_RST */
+#define PSOC_RESET_CONF_VIDEO_DEC_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_UNIT_RST_EN_MASK 0x3FF
+
+/* PSOC_RESET_CONF_HBM_MC_UNIT_RST */
+#define PSOC_RESET_CONF_HBM_MC_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_UNIT_RST_EN_MASK 0x3F
+
+/* PSOC_RESET_CONF_NIC_UNIT_RST */
+#define PSOC_RESET_CONF_NIC_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_UNIT_RST_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_PRT_UNIT_RST */
+#define PSOC_RESET_CONF_NIC_PRT_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_UNIT_RST_EN_MASK 0xFFF
+
+/* PSOC_RESET_CONF_NIC_CH_UNIT_RST */
+#define PSOC_RESET_CONF_NIC_CH_UNIT_RST_EN_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_UNIT_RST_EN_MASK 0x7
+
+/* PSOC_RESET_CONF_PSOC_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PSOC_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PSOC_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PSOC_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PSOC_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_CPU_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_CPU_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_CPU_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_CPU_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_CPU_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_ARC_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_ARC_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_ARC_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_ARC_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_ARC_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_ARC_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_ARC_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_ARC_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_ARC_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_ARC_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SIF_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SIF_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SIF_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SIF_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SIF_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SIF_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SIF_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SIF_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SIF_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SIF_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SIF_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SIF_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SIF_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SIF_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SIF_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SIF_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SIF_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SIF_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SIF_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SIF_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SRAM_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SRAM_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SRAM_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SRAM_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SRAM_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SRAM_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SRAM_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SRAM_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SRAM_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SRAM_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SRAM_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SRAM_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SRAM_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SRAM_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SRAM_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SRAM_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SRAM_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PCIE_CTRL_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PCIE_CTRL_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_CTRL_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PCIE_CTRL_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PCIE_CTRL_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PCIE_PHY_CFG_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PCIE_PHY_CFG_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PCIE_IF_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PCIE_IF_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PCIE_IF_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PCIE_IF_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PCIE_IF_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_DIV_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_DIV_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_DIV_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_DIV_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_DIV_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_DIV_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_DIV_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_DIV_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_DIV_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_DIV_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_DIV_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_DIV_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_DIV_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_DIV_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_DIV_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_DIV_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_DIV_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_DIV_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_DIV_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_DIV_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_DIV_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_DIV_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_DIV_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_DIV_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_DIV_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_DIV_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_DIV_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_DIV_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_DIV_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_DIV_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_DIV_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_DIV_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_DIV_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_DIV_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_DIV_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_DIV_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_DIV_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_DIV_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_DIV_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_DIV_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_DIV_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_DIV_5_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_DIV_5_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_DIV_5_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_DIV_5_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_DIV_5_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PMMU_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PMMU_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PMMU_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PMMU_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PMMU_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PM_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PM_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PM_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PM_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PM_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PM_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PM_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PM_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PM_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PM_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PM_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PM_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PM_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PM_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PM_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PM_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PM_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PM_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PM_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PM_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TS_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TS_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TS_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TS_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TS_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TS_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TS_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TS_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TS_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TS_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TS_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TS_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TS_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TS_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TS_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TS_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TS_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TS_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TS_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TS_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TS_IF_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TS_IF_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TS_IF_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TS_IF_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TS_IF_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TS_IF_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TS_IF_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TS_IF_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TS_IF_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TS_IF_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TS_IF_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TS_IF_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TS_IF_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TS_IF_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TS_IF_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TS_IF_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TS_IF_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_5_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_5_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_5_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_5_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_5_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_6_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_6_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_6_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_6_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_6_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_7_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_7_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_7_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_7_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_7_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_8_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_8_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_8_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_8_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_8_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_9_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_9_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_9_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_9_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_9_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_10_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_10_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_10_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_10_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_10_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_11_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_11_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_11_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_11_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_11_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_12_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_12_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_12_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_12_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_12_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_13_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_13_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_13_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_13_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_13_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_14_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_14_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_14_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_14_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_14_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_15_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_15_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_15_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_15_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_15_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_16_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_16_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_16_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_16_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_16_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_17_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_17_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_17_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_17_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_17_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_18_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_18_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_18_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_18_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_18_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_19_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_19_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_19_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_19_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_19_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_20_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_20_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_20_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_20_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_20_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_21_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_21_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_21_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_21_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_21_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_22_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_22_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_22_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_22_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_22_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_23_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_23_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_23_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_23_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_23_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_24_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_24_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_24_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_24_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_24_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_25_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_25_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_25_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_25_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_25_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_26_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_26_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_26_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_26_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_26_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_27_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_27_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_27_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_27_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_27_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_28_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_28_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_28_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_28_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_28_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_29_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_29_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_29_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_29_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_29_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_30_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_30_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_30_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_30_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_30_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_L_31_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_L_31_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_L_31_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_L_31_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_L_31_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_H_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_H_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_H_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_H_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PLL_H_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PLL_H_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PLL_H_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PLL_H_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PLL_H_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_MME_EUS_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_MME_EUS_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_MME_EUS_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_MME_EUS_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_MME_EUS_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_MME_EUS_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_MME_EUS_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_MME_EUS_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_MME_EUS_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_MME_EUS_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_MME_EUS_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_MME_EUS_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_MME_EUS_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_MME_EUS_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_MME_EUS_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_MME_EUS_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_MME_EUS_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_MSS_CLS_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_MSS_CLS_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_MSS_CLS_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_MSS_CLS_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_MSS_CLS_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_MSS_CLS_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_MSS_CLS_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_MSS_CLS_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_MSS_CLS_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_MSS_CLS_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_MSS_CLS_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_MSS_CLS_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_MSS_CLS_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_MSS_CLS_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_MSS_CLS_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_MSS_CLS_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_MSS_CLS_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_5_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_5_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_5_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_5_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_5_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_6_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_6_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_6_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_6_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_6_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_7_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_7_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_7_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_7_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_7_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_8_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_8_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_8_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_8_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_8_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_9_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_9_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_9_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_9_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_9_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_10_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_10_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_10_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_10_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_10_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_11_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_11_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_11_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_11_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_11_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_12_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_12_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_12_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_12_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_12_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_13_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_13_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_13_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_13_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_13_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_14_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_14_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_14_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_14_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_14_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_15_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_15_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_15_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_15_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_15_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_16_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_16_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_16_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_16_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_16_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_17_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_17_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_17_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_17_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_17_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_18_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_18_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_18_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_18_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_18_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_19_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_19_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_19_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_19_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_19_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_20_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_20_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_20_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_20_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_20_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_21_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_21_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_21_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_21_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_21_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_22_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_22_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_22_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_22_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_22_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_23_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_23_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_23_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_23_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_23_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_TPC_24_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_TPC_24_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_TPC_24_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_TPC_24_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_TPC_24_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HIF_HMMU_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HIF_HMMU_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HIF_HMMU_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HIF_HMMU_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HIF_HMMU_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HIF_HMMU_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HIF_HMMU_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HIF_HMMU_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HIF_HMMU_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HIF_HMMU_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HIF_HMMU_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HIF_HMMU_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HIF_HMMU_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HIF_HMMU_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HIF_HMMU_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HIF_HMMU_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HIF_HMMU_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_XBAR_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_XBAR_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_XBAR_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_XBAR_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_XBAR_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_XBAR_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_XBAR_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_XBAR_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_XBAR_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_XBAR_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_XBAR_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_XBAR_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_XBAR_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_XBAR_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_XBAR_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_XBAR_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_XBAR_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SFT_XFT_TFT_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SFT_XFT_TFT_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SFT_XFT_TFT_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SFT_XFT_TFT_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SFT_XFT_TFT_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SFT_XFT_TFT_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SFT_XFT_TFT_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SFT_XFT_TFT_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SFT_XFT_TFT_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SFT_XFT_TFT_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SFT_XFT_TFT_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_DDMA_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_DDMA_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_DDMA_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_DDMA_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_DDMA_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_DDMA_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_DDMA_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_DDMA_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_DDMA_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_DDMA_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_DDMA_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_DDMA_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_DDMA_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_DDMA_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_DDMA_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_DDMA_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_DDMA_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_DDMA_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_DDMA_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_DDMA_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_DDMA_5_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_DDMA_5_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_5_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_DDMA_5_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_DDMA_5_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_DDMA_6_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_DDMA_6_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_6_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_DDMA_6_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_DDMA_6_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_DDMA_7_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_DDMA_7_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_DDMA_7_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_DDMA_7_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_DDMA_7_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_KDMA_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_KDMA_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_KDMA_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_KDMA_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_KDMA_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PDMA_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PDMA_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PDMA_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PDMA_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_PDMA_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_PDMA_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_PDMA_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_PDMA_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_PDMA_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_ARC_SS_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_ARC_SS_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_ARC_SS_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_ARC_SS_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_ARC_SS_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_ARC_SS_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_ARC_SS_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_ARC_SS_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_ARC_SS_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_ARC_SS_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_ARC_SS_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_ARC_SS_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_ARC_SS_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_ARC_SS_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_ARC_SS_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_ARC_SS_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_ARC_SS_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_ARC_SS_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_ARC_SS_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_ARC_SS_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_ARC_SS_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_ROTATOR_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_ROTATOR_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_ROTATOR_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_ROTATOR_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_ROTATOR_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_ROTATOR_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_ROTATOR_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_ROTATOR_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_ROTATOR_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SM_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SM_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SM_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SM_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SM_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SM_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SM_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SM_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SM_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SM_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SM_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SM_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SM_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SM_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SM_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_SM_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_SM_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_SM_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_SM_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_SM_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_5_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_5_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_5_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_5_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_5_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_6_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_6_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_6_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_6_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_6_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_7_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_7_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_7_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_7_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_7_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_8_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_8_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_8_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_8_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_8_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_VIDEO_DEC_9_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_VIDEO_DEC_9_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_VIDEO_DEC_9_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_VIDEO_DEC_9_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_VIDEO_DEC_9_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_MC_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_MC_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_MC_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_MC_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_MC_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_MC_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_MC_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_MC_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_MC_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_MC_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_MC_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_MC_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_MC_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_MC_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_MC_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_MC_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_MC_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_MC_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_MC_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_MC_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_HBM_MC_5_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_HBM_MC_5_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_HBM_MC_5_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_HBM_MC_5_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_HBM_MC_5_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_5_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_5_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_5_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_5_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_5_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_6_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_6_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_6_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_6_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_6_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_7_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_7_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_7_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_7_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_7_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_8_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_8_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_8_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_8_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_8_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_9_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_9_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_9_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_9_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_9_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_10_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_10_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_10_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_10_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_10_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_11_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_11_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_11_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_11_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_11_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_3_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_3_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_3_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_3_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_3_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_4_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_4_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_4_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_4_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_4_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_5_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_5_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_5_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_5_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_5_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_6_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_6_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_6_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_6_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_6_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_7_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_7_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_7_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_7_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_7_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_8_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_8_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_8_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_8_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_8_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_9_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_9_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_9_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_9_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_9_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_10_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_10_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_10_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_10_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_10_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_PRT_11_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_PRT_11_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_PRT_11_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_PRT_11_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_PRT_11_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_CH_0_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_CH_0_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_0_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_CH_0_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_CH_0_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_CH_1_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_CH_1_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_1_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_CH_1_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_CH_1_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+/* PSOC_RESET_CONF_NIC_CH_2_CLK_RST_CTRL */
+#define PSOC_RESET_CONF_NIC_CH_2_CLK_RST_CTRL_RST_SEL_SHIFT 0
+#define PSOC_RESET_CONF_NIC_CH_2_CLK_RST_CTRL_RST_SEL_MASK 0xFF
+#define PSOC_RESET_CONF_NIC_CH_2_CLK_RST_CTRL_CLK_DIS_SHIFT 16
+#define PSOC_RESET_CONF_NIC_CH_2_CLK_RST_CTRL_CLK_DIS_MASK 0x10000
+
+#endif /* ASIC_REG_PSOC_RESET_CONF_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h
new file mode 100644
index 000000000000..6a89624f01d1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h
@@ -0,0 +1,989 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_RESET_CONF_REGS_H_
+#define ASIC_REG_PSOC_RESET_CONF_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_RESET_CONF
+ * (Prototype: PSOC_RESET_CONF)
+ *****************************************
+ */
+
+#define mmPSOC_RESET_CONF_PSOC_PRSTN_RST_CFG 0x4C74000
+
+#define mmPSOC_RESET_CONF_PSOC_SOFT_RST_CFG 0x4C74004
+
+#define mmPSOC_RESET_CONF_PSOC_FW_RST_CFG 0x4C74008
+
+#define mmPSOC_RESET_CONF_PSOC_WD_RST_CFG 0x4C7400C
+
+#define mmPSOC_RESET_CONF_PSOC_MNL_RST_CFG 0x4C74010
+
+#define mmPSOC_RESET_CONF_PSOC_FLR_RST_CFG 0x4C74014
+
+#define mmPSOC_RESET_CONF_PSOC_ECC_DERR_RST_CFG 0x4C74018
+
+#define mmPSOC_RESET_CONF_PSOC_SW_RST_CFG 0x4C7401C
+
+#define mmPSOC_RESET_CONF_CPU_PRSTN_RST_CFG 0x4C74020
+
+#define mmPSOC_RESET_CONF_CPU_SOFT_RST_CFG 0x4C74024
+
+#define mmPSOC_RESET_CONF_CPU_FW_RST_CFG 0x4C74028
+
+#define mmPSOC_RESET_CONF_CPU_WD_RST_CFG 0x4C7402C
+
+#define mmPSOC_RESET_CONF_CPU_MNL_RST_CFG 0x4C74030
+
+#define mmPSOC_RESET_CONF_CPU_FLR_RST_CFG 0x4C74034
+
+#define mmPSOC_RESET_CONF_CPU_ECC_DERR_RST_CFG 0x4C74038
+
+#define mmPSOC_RESET_CONF_CPU_SW_RST_CFG 0x4C7403C
+
+#define mmPSOC_RESET_CONF_ARC_PRSTN_RST_CFG 0x4C74040
+
+#define mmPSOC_RESET_CONF_ARC_SOFT_RST_CFG 0x4C74044
+
+#define mmPSOC_RESET_CONF_ARC_FW_RST_CFG 0x4C74048
+
+#define mmPSOC_RESET_CONF_ARC_WD_RST_CFG 0x4C7404C
+
+#define mmPSOC_RESET_CONF_ARC_MNL_RST_CFG 0x4C74050
+
+#define mmPSOC_RESET_CONF_ARC_FLR_RST_CFG 0x4C74054
+
+#define mmPSOC_RESET_CONF_ARC_ECC_DERR_RST_CFG 0x4C74058
+
+#define mmPSOC_RESET_CONF_ARC_SW_RST_CFG 0x4C7405C
+
+#define mmPSOC_RESET_CONF_SIF_PRSTN_RST_CFG 0x4C74060
+
+#define mmPSOC_RESET_CONF_SIF_SOFT_RST_CFG 0x4C74064
+
+#define mmPSOC_RESET_CONF_SIF_FW_RST_CFG 0x4C74068
+
+#define mmPSOC_RESET_CONF_SIF_WD_RST_CFG 0x4C7406C
+
+#define mmPSOC_RESET_CONF_SIF_MNL_RST_CFG 0x4C74070
+
+#define mmPSOC_RESET_CONF_SIF_FLR_RST_CFG 0x4C74074
+
+#define mmPSOC_RESET_CONF_SIF_ECC_DERR_RST_CFG 0x4C74078
+
+#define mmPSOC_RESET_CONF_SIF_SW_RST_CFG 0x4C7407C
+
+#define mmPSOC_RESET_CONF_SRAM_PRSTN_RST_CFG 0x4C74080
+
+#define mmPSOC_RESET_CONF_SRAM_SOFT_RST_CFG 0x4C74084
+
+#define mmPSOC_RESET_CONF_SRAM_FW_RST_CFG 0x4C74088
+
+#define mmPSOC_RESET_CONF_SRAM_WD_RST_CFG 0x4C7408C
+
+#define mmPSOC_RESET_CONF_SRAM_MNL_RST_CFG 0x4C74090
+
+#define mmPSOC_RESET_CONF_SRAM_FLR_RST_CFG 0x4C74094
+
+#define mmPSOC_RESET_CONF_SRAM_ECC_DERR_RST_CFG 0x4C74098
+
+#define mmPSOC_RESET_CONF_SRAM_SW_RST_CFG 0x4C7409C
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_PRSTN_RST_CFG 0x4C740A0
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_SOFT_RST_CFG 0x4C740A4
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_FW_RST_CFG 0x4C740A8
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_WD_RST_CFG 0x4C740AC
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_MNL_RST_CFG 0x4C740B0
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_FLR_RST_CFG 0x4C740B4
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_ECC_DERR_RST_CFG 0x4C740B8
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_SW_RST_CFG 0x4C740BC
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_PRSTN_RST_CFG 0x4C740C0
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_SOFT_RST_CFG 0x4C740C4
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_FW_RST_CFG 0x4C740C8
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_WD_RST_CFG 0x4C740CC
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_MNL_RST_CFG 0x4C740D0
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_FLR_RST_CFG 0x4C740D4
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_ECC_DERR_RST_CFG 0x4C740D8
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_SW_RST_CFG 0x4C740DC
+
+#define mmPSOC_RESET_CONF_PCIE_IF_PRSTN_RST_CFG 0x4C740E0
+
+#define mmPSOC_RESET_CONF_PCIE_IF_SOFT_RST_CFG 0x4C740E4
+
+#define mmPSOC_RESET_CONF_PCIE_IF_FW_RST_CFG 0x4C740E8
+
+#define mmPSOC_RESET_CONF_PCIE_IF_WD_RST_CFG 0x4C740EC
+
+#define mmPSOC_RESET_CONF_PCIE_IF_MNL_RST_CFG 0x4C740F0
+
+#define mmPSOC_RESET_CONF_PCIE_IF_FLR_RST_CFG 0x4C740F4
+
+#define mmPSOC_RESET_CONF_PCIE_IF_ECC_DERR_RST_CFG 0x4C740F8
+
+#define mmPSOC_RESET_CONF_PCIE_IF_SW_RST_CFG 0x4C740FC
+
+#define mmPSOC_RESET_CONF_TPC_DIV_PRSTN_RST_CFG 0x4C74100
+
+#define mmPSOC_RESET_CONF_TPC_DIV_SOFT_RST_CFG 0x4C74104
+
+#define mmPSOC_RESET_CONF_TPC_DIV_FW_RST_CFG 0x4C74108
+
+#define mmPSOC_RESET_CONF_TPC_DIV_WD_RST_CFG 0x4C7410C
+
+#define mmPSOC_RESET_CONF_TPC_DIV_MNL_RST_CFG 0x4C74110
+
+#define mmPSOC_RESET_CONF_TPC_DIV_FLR_RST_CFG 0x4C74114
+
+#define mmPSOC_RESET_CONF_TPC_DIV_ECC_DERR_RST_CFG 0x4C74118
+
+#define mmPSOC_RESET_CONF_TPC_DIV_SW_RST_CFG 0x4C7411C
+
+#define mmPSOC_RESET_CONF_HBM_DIV_PRSTN_RST_CFG 0x4C74120
+
+#define mmPSOC_RESET_CONF_HBM_DIV_SOFT_RST_CFG 0x4C74124
+
+#define mmPSOC_RESET_CONF_HBM_DIV_FW_RST_CFG 0x4C74128
+
+#define mmPSOC_RESET_CONF_HBM_DIV_WD_RST_CFG 0x4C7412C
+
+#define mmPSOC_RESET_CONF_HBM_DIV_MNL_RST_CFG 0x4C74130
+
+#define mmPSOC_RESET_CONF_HBM_DIV_FLR_RST_CFG 0x4C74134
+
+#define mmPSOC_RESET_CONF_HBM_DIV_ECC_DERR_RST_CFG 0x4C74138
+
+#define mmPSOC_RESET_CONF_HBM_DIV_SW_RST_CFG 0x4C7413C
+
+#define mmPSOC_RESET_CONF_PMMU_PRSTN_RST_CFG 0x4C74140
+
+#define mmPSOC_RESET_CONF_PMMU_SOFT_RST_CFG 0x4C74144
+
+#define mmPSOC_RESET_CONF_PMMU_FW_RST_CFG 0x4C74148
+
+#define mmPSOC_RESET_CONF_PMMU_WD_RST_CFG 0x4C7414C
+
+#define mmPSOC_RESET_CONF_PMMU_MNL_RST_CFG 0x4C74150
+
+#define mmPSOC_RESET_CONF_PMMU_FLR_RST_CFG 0x4C74154
+
+#define mmPSOC_RESET_CONF_PMMU_ECC_DERR_RST_CFG 0x4C74158
+
+#define mmPSOC_RESET_CONF_PMMU_SW_RST_CFG 0x4C7415C
+
+#define mmPSOC_RESET_CONF_PM_PRSTN_RST_CFG 0x4C74160
+
+#define mmPSOC_RESET_CONF_PM_SOFT_RST_CFG 0x4C74164
+
+#define mmPSOC_RESET_CONF_PM_FW_RST_CFG 0x4C74168
+
+#define mmPSOC_RESET_CONF_PM_WD_RST_CFG 0x4C7416C
+
+#define mmPSOC_RESET_CONF_PM_MNL_RST_CFG 0x4C74170
+
+#define mmPSOC_RESET_CONF_PM_FLR_RST_CFG 0x4C74174
+
+#define mmPSOC_RESET_CONF_PM_ECC_DERR_RST_CFG 0x4C74178
+
+#define mmPSOC_RESET_CONF_PM_SW_RST_CFG 0x4C7417C
+
+#define mmPSOC_RESET_CONF_TS_PRSTN_RST_CFG 0x4C74180
+
+#define mmPSOC_RESET_CONF_TS_SOFT_RST_CFG 0x4C74184
+
+#define mmPSOC_RESET_CONF_TS_FW_RST_CFG 0x4C74188
+
+#define mmPSOC_RESET_CONF_TS_WD_RST_CFG 0x4C7418C
+
+#define mmPSOC_RESET_CONF_TS_MNL_RST_CFG 0x4C74190
+
+#define mmPSOC_RESET_CONF_TS_FLR_RST_CFG 0x4C74194
+
+#define mmPSOC_RESET_CONF_TS_ECC_DERR_RST_CFG 0x4C74198
+
+#define mmPSOC_RESET_CONF_TS_SW_RST_CFG 0x4C7419C
+
+#define mmPSOC_RESET_CONF_TS_IF_PRSTN_RST_CFG 0x4C741A0
+
+#define mmPSOC_RESET_CONF_TS_IF_SOFT_RST_CFG 0x4C741A4
+
+#define mmPSOC_RESET_CONF_TS_IF_FW_RST_CFG 0x4C741A8
+
+#define mmPSOC_RESET_CONF_TS_IF_WD_RST_CFG 0x4C741AC
+
+#define mmPSOC_RESET_CONF_TS_IF_MNL_RST_CFG 0x4C741B0
+
+#define mmPSOC_RESET_CONF_TS_IF_FLR_RST_CFG 0x4C741B4
+
+#define mmPSOC_RESET_CONF_TS_IF_ECC_DERR_RST_CFG 0x4C741B8
+
+#define mmPSOC_RESET_CONF_TS_IF_SW_RST_CFG 0x4C741BC
+
+#define mmPSOC_RESET_CONF_PLL_L_PRSTN_RST_CFG 0x4C741C0
+
+#define mmPSOC_RESET_CONF_PLL_L_SOFT_RST_CFG 0x4C741C4
+
+#define mmPSOC_RESET_CONF_PLL_L_FW_RST_CFG 0x4C741C8
+
+#define mmPSOC_RESET_CONF_PLL_L_WD_RST_CFG 0x4C741CC
+
+#define mmPSOC_RESET_CONF_PLL_L_MNL_RST_CFG 0x4C741D0
+
+#define mmPSOC_RESET_CONF_PLL_L_FLR_RST_CFG 0x4C741D4
+
+#define mmPSOC_RESET_CONF_PLL_L_ECC_DERR_RST_CFG 0x4C741D8
+
+#define mmPSOC_RESET_CONF_PLL_L_SW_RST_CFG 0x4C741DC
+
+#define mmPSOC_RESET_CONF_PLL_H_PRSTN_RST_CFG 0x4C741E0
+
+#define mmPSOC_RESET_CONF_PLL_H_SOFT_RST_CFG 0x4C741E4
+
+#define mmPSOC_RESET_CONF_PLL_H_FW_RST_CFG 0x4C741E8
+
+#define mmPSOC_RESET_CONF_PLL_H_WD_RST_CFG 0x4C741EC
+
+#define mmPSOC_RESET_CONF_PLL_H_MNL_RST_CFG 0x4C741F0
+
+#define mmPSOC_RESET_CONF_PLL_H_FLR_RST_CFG 0x4C741F4
+
+#define mmPSOC_RESET_CONF_PLL_H_ECC_DERR_RST_CFG 0x4C741F8
+
+#define mmPSOC_RESET_CONF_PLL_H_SW_RST_CFG 0x4C741FC
+
+#define mmPSOC_RESET_CONF_MME_EUS_PRSTN_RST_CFG 0x4C74200
+
+#define mmPSOC_RESET_CONF_MME_EUS_SOFT_RST_CFG 0x4C74204
+
+#define mmPSOC_RESET_CONF_MME_EUS_FW_RST_CFG 0x4C74208
+
+#define mmPSOC_RESET_CONF_MME_EUS_WD_RST_CFG 0x4C7420C
+
+#define mmPSOC_RESET_CONF_MME_EUS_MNL_RST_CFG 0x4C74210
+
+#define mmPSOC_RESET_CONF_MME_EUS_FLR_RST_CFG 0x4C74214
+
+#define mmPSOC_RESET_CONF_MME_EUS_ECC_DERR_RST_CFG 0x4C74218
+
+#define mmPSOC_RESET_CONF_MME_EUS_SW_RST_CFG 0x4C7421C
+
+#define mmPSOC_RESET_CONF_MSS_CLS_PRSTN_RST_CFG 0x4C74220
+
+#define mmPSOC_RESET_CONF_MSS_CLS_SOFT_RST_CFG 0x4C74224
+
+#define mmPSOC_RESET_CONF_MSS_CLS_FW_RST_CFG 0x4C74228
+
+#define mmPSOC_RESET_CONF_MSS_CLS_WD_RST_CFG 0x4C7422C
+
+#define mmPSOC_RESET_CONF_MSS_CLS_MNL_RST_CFG 0x4C74230
+
+#define mmPSOC_RESET_CONF_MSS_CLS_FLR_RST_CFG 0x4C74234
+
+#define mmPSOC_RESET_CONF_MSS_CLS_ECC_DERR_RST_CFG 0x4C74238
+
+#define mmPSOC_RESET_CONF_MSS_CLS_SW_RST_CFG 0x4C7423C
+
+#define mmPSOC_RESET_CONF_TPC_PRSTN_RST_CFG 0x4C74240
+
+#define mmPSOC_RESET_CONF_TPC_SOFT_RST_CFG 0x4C74244
+
+#define mmPSOC_RESET_CONF_TPC_FW_RST_CFG 0x4C74248
+
+#define mmPSOC_RESET_CONF_TPC_WD_RST_CFG 0x4C7424C
+
+#define mmPSOC_RESET_CONF_TPC_MNL_RST_CFG 0x4C74250
+
+#define mmPSOC_RESET_CONF_TPC_FLR_RST_CFG 0x4C74254
+
+#define mmPSOC_RESET_CONF_TPC_ECC_DERR_RST_CFG 0x4C74258
+
+#define mmPSOC_RESET_CONF_TPC_SW_RST_CFG 0x4C7425C
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_PRSTN_RST_CFG 0x4C74260
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_SOFT_RST_CFG 0x4C74264
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_FW_RST_CFG 0x4C74268
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_WD_RST_CFG 0x4C7426C
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_MNL_RST_CFG 0x4C74270
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_FLR_RST_CFG 0x4C74274
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_ECC_DERR_RST_CFG 0x4C74278
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_SW_RST_CFG 0x4C7427C
+
+#define mmPSOC_RESET_CONF_XBAR_PRSTN_RST_CFG 0x4C74280
+
+#define mmPSOC_RESET_CONF_XBAR_SOFT_RST_CFG 0x4C74284
+
+#define mmPSOC_RESET_CONF_XBAR_FW_RST_CFG 0x4C74288
+
+#define mmPSOC_RESET_CONF_XBAR_WD_RST_CFG 0x4C7428C
+
+#define mmPSOC_RESET_CONF_XBAR_MNL_RST_CFG 0x4C74290
+
+#define mmPSOC_RESET_CONF_XBAR_FLR_RST_CFG 0x4C74294
+
+#define mmPSOC_RESET_CONF_XBAR_ECC_DERR_RST_CFG 0x4C74298
+
+#define mmPSOC_RESET_CONF_XBAR_SW_RST_CFG 0x4C7429C
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_PRSTN_RST_CFG 0x4C742A0
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_SOFT_RST_CFG 0x4C742A4
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_FW_RST_CFG 0x4C742A8
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_WD_RST_CFG 0x4C742AC
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_MNL_RST_CFG 0x4C742B0
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_FLR_RST_CFG 0x4C742B4
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_ECC_DERR_RST_CFG 0x4C742B8
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_SW_RST_CFG 0x4C742BC
+
+#define mmPSOC_RESET_CONF_DDMA_PRSTN_RST_CFG 0x4C742C0
+
+#define mmPSOC_RESET_CONF_DDMA_SOFT_RST_CFG 0x4C742C4
+
+#define mmPSOC_RESET_CONF_DDMA_FW_RST_CFG 0x4C742C8
+
+#define mmPSOC_RESET_CONF_DDMA_WD_RST_CFG 0x4C742CC
+
+#define mmPSOC_RESET_CONF_DDMA_MNL_RST_CFG 0x4C742D0
+
+#define mmPSOC_RESET_CONF_DDMA_FLR_RST_CFG 0x4C742D4
+
+#define mmPSOC_RESET_CONF_DDMA_ECC_DERR_RST_CFG 0x4C742D8
+
+#define mmPSOC_RESET_CONF_DDMA_SW_RST_CFG 0x4C742DC
+
+#define mmPSOC_RESET_CONF_KDMA_PRSTN_RST_CFG 0x4C742E0
+
+#define mmPSOC_RESET_CONF_KDMA_SOFT_RST_CFG 0x4C742E4
+
+#define mmPSOC_RESET_CONF_KDMA_FW_RST_CFG 0x4C742E8
+
+#define mmPSOC_RESET_CONF_KDMA_WD_RST_CFG 0x4C742EC
+
+#define mmPSOC_RESET_CONF_KDMA_MNL_RST_CFG 0x4C742F0
+
+#define mmPSOC_RESET_CONF_KDMA_FLR_RST_CFG 0x4C742F4
+
+#define mmPSOC_RESET_CONF_KDMA_ECC_DERR_RST_CFG 0x4C742F8
+
+#define mmPSOC_RESET_CONF_KDMA_SW_RST_CFG 0x4C742FC
+
+#define mmPSOC_RESET_CONF_PDMA_PRSTN_RST_CFG 0x4C74300
+
+#define mmPSOC_RESET_CONF_PDMA_SOFT_RST_CFG 0x4C74304
+
+#define mmPSOC_RESET_CONF_PDMA_FW_RST_CFG 0x4C74308
+
+#define mmPSOC_RESET_CONF_PDMA_WD_RST_CFG 0x4C7430C
+
+#define mmPSOC_RESET_CONF_PDMA_MNL_RST_CFG 0x4C74310
+
+#define mmPSOC_RESET_CONF_PDMA_FLR_RST_CFG 0x4C74314
+
+#define mmPSOC_RESET_CONF_PDMA_ECC_DERR_RST_CFG 0x4C74318
+
+#define mmPSOC_RESET_CONF_PDMA_SW_RST_CFG 0x4C7431C
+
+#define mmPSOC_RESET_CONF_ARC_SS_PRSTN_RST_CFG 0x4C74320
+
+#define mmPSOC_RESET_CONF_ARC_SS_SOFT_RST_CFG 0x4C74324
+
+#define mmPSOC_RESET_CONF_ARC_SS_FW_RST_CFG 0x4C74328
+
+#define mmPSOC_RESET_CONF_ARC_SS_WD_RST_CFG 0x4C7432C
+
+#define mmPSOC_RESET_CONF_ARC_SS_MNL_RST_CFG 0x4C74330
+
+#define mmPSOC_RESET_CONF_ARC_SS_FLR_RST_CFG 0x4C74334
+
+#define mmPSOC_RESET_CONF_ARC_SS_ECC_DERR_RST_CFG 0x4C74338
+
+#define mmPSOC_RESET_CONF_ARC_SS_SW_RST_CFG 0x4C7433C
+
+#define mmPSOC_RESET_CONF_ROTATOR_PRSTN_RST_CFG 0x4C74340
+
+#define mmPSOC_RESET_CONF_ROTATOR_SOFT_RST_CFG 0x4C74344
+
+#define mmPSOC_RESET_CONF_ROTATOR_FW_RST_CFG 0x4C74348
+
+#define mmPSOC_RESET_CONF_ROTATOR_WD_RST_CFG 0x4C7434C
+
+#define mmPSOC_RESET_CONF_ROTATOR_MNL_RST_CFG 0x4C74350
+
+#define mmPSOC_RESET_CONF_ROTATOR_FLR_RST_CFG 0x4C74354
+
+#define mmPSOC_RESET_CONF_ROTATOR_ECC_DERR_RST_CFG 0x4C74358
+
+#define mmPSOC_RESET_CONF_ROTATOR_SW_RST_CFG 0x4C7435C
+
+#define mmPSOC_RESET_CONF_SM_PRSTN_RST_CFG 0x4C74360
+
+#define mmPSOC_RESET_CONF_SM_SOFT_RST_CFG 0x4C74364
+
+#define mmPSOC_RESET_CONF_SM_FW_RST_CFG 0x4C74368
+
+#define mmPSOC_RESET_CONF_SM_WD_RST_CFG 0x4C7436C
+
+#define mmPSOC_RESET_CONF_SM_MNL_RST_CFG 0x4C74370
+
+#define mmPSOC_RESET_CONF_SM_FLR_RST_CFG 0x4C74374
+
+#define mmPSOC_RESET_CONF_SM_ECC_DERR_RST_CFG 0x4C74378
+
+#define mmPSOC_RESET_CONF_SM_SW_RST_CFG 0x4C7437C
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_PRSTN_RST_CFG 0x4C74380
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_SOFT_RST_CFG 0x4C74384
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_FW_RST_CFG 0x4C74388
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_WD_RST_CFG 0x4C7438C
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_MNL_RST_CFG 0x4C74390
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_FLR_RST_CFG 0x4C74394
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_ECC_DERR_RST_CFG 0x4C74398
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_SW_RST_CFG 0x4C7439C
+
+#define mmPSOC_RESET_CONF_HBM_MC_PRSTN_RST_CFG 0x4C743A0
+
+#define mmPSOC_RESET_CONF_HBM_MC_SOFT_RST_CFG 0x4C743A4
+
+#define mmPSOC_RESET_CONF_HBM_MC_FW_RST_CFG 0x4C743A8
+
+#define mmPSOC_RESET_CONF_HBM_MC_WD_RST_CFG 0x4C743AC
+
+#define mmPSOC_RESET_CONF_HBM_MC_MNL_RST_CFG 0x4C743B0
+
+#define mmPSOC_RESET_CONF_HBM_MC_FLR_RST_CFG 0x4C743B4
+
+#define mmPSOC_RESET_CONF_HBM_MC_ECC_DERR_RST_CFG 0x4C743B8
+
+#define mmPSOC_RESET_CONF_HBM_MC_SW_RST_CFG 0x4C743BC
+
+#define mmPSOC_RESET_CONF_NIC_PRSTN_RST_CFG 0x4C743C0
+
+#define mmPSOC_RESET_CONF_NIC_SOFT_RST_CFG 0x4C743C4
+
+#define mmPSOC_RESET_CONF_NIC_FW_RST_CFG 0x4C743C8
+
+#define mmPSOC_RESET_CONF_NIC_WD_RST_CFG 0x4C743CC
+
+#define mmPSOC_RESET_CONF_NIC_MNL_RST_CFG 0x4C743D0
+
+#define mmPSOC_RESET_CONF_NIC_FLR_RST_CFG 0x4C743D4
+
+#define mmPSOC_RESET_CONF_NIC_ECC_DERR_RST_CFG 0x4C743D8
+
+#define mmPSOC_RESET_CONF_NIC_SW_RST_CFG 0x4C743DC
+
+#define mmPSOC_RESET_CONF_NIC_PRT_PRSTN_RST_CFG 0x4C743E0
+
+#define mmPSOC_RESET_CONF_NIC_PRT_SOFT_RST_CFG 0x4C743E4
+
+#define mmPSOC_RESET_CONF_NIC_PRT_FW_RST_CFG 0x4C743E8
+
+#define mmPSOC_RESET_CONF_NIC_PRT_WD_RST_CFG 0x4C743EC
+
+#define mmPSOC_RESET_CONF_NIC_PRT_MNL_RST_CFG 0x4C743F0
+
+#define mmPSOC_RESET_CONF_NIC_PRT_FLR_RST_CFG 0x4C743F4
+
+#define mmPSOC_RESET_CONF_NIC_PRT_ECC_DERR_RST_CFG 0x4C743F8
+
+#define mmPSOC_RESET_CONF_NIC_PRT_SW_RST_CFG 0x4C743FC
+
+#define mmPSOC_RESET_CONF_NIC_CH_PRSTN_RST_CFG 0x4C74400
+
+#define mmPSOC_RESET_CONF_NIC_CH_SOFT_RST_CFG 0x4C74404
+
+#define mmPSOC_RESET_CONF_NIC_CH_FW_RST_CFG 0x4C74408
+
+#define mmPSOC_RESET_CONF_NIC_CH_WD_RST_CFG 0x4C7440C
+
+#define mmPSOC_RESET_CONF_NIC_CH_MNL_RST_CFG 0x4C74410
+
+#define mmPSOC_RESET_CONF_NIC_CH_FLR_RST_CFG 0x4C74414
+
+#define mmPSOC_RESET_CONF_NIC_CH_ECC_DERR_RST_CFG 0x4C74418
+
+#define mmPSOC_RESET_CONF_NIC_CH_SW_RST_CFG 0x4C7441C
+
+#define mmPSOC_RESET_CONF_SOFT_RST 0x4C74800
+
+#define mmPSOC_RESET_CONF_SW_ALL_RST 0x4C74804
+
+#define mmPSOC_RESET_CONF_UNIT_RST_N 0x4C74808
+
+#define mmPSOC_RESET_CONF_PSOC_UNIT_RST 0x4C7480C
+
+#define mmPSOC_RESET_CONF_CPU_UNIT_RST 0x4C74810
+
+#define mmPSOC_RESET_CONF_ARC_UNIT_RST 0x4C74814
+
+#define mmPSOC_RESET_CONF_SIF_UNIT_RST 0x4C74818
+
+#define mmPSOC_RESET_CONF_SRAM_UNIT_RST 0x4C7481C
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_UNIT_RST 0x4C74820
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_UNIT_RST 0x4C74824
+
+#define mmPSOC_RESET_CONF_PCIE_IF_UNIT_RST 0x4C74828
+
+#define mmPSOC_RESET_CONF_TPC_DIV_UNIT_RST 0x4C7482C
+
+#define mmPSOC_RESET_CONF_HBM_DIV_UNIT_RST 0x4C74830
+
+#define mmPSOC_RESET_CONF_PMMU_UNIT_RST 0x4C74834
+
+#define mmPSOC_RESET_CONF_PM_UNIT_RST 0x4C74838
+
+#define mmPSOC_RESET_CONF_TS_UNIT_RST 0x4C7483C
+
+#define mmPSOC_RESET_CONF_TS_IF_UNIT_RST 0x4C74840
+
+#define mmPSOC_RESET_CONF_PLL_L_UNIT_RST 0x4C74844
+
+#define mmPSOC_RESET_CONF_PLL_H_UNIT_RST 0x4C74848
+
+#define mmPSOC_RESET_CONF_MME_EUS_UNIT_RST 0x4C7484C
+
+#define mmPSOC_RESET_CONF_MSS_CLS_UNIT_RST 0x4C74850
+
+#define mmPSOC_RESET_CONF_TPC_UNIT_RST 0x4C74854
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_UNIT_RST 0x4C74858
+
+#define mmPSOC_RESET_CONF_XBAR_UNIT_RST 0x4C7485C
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_UNIT_RST 0x4C74860
+
+#define mmPSOC_RESET_CONF_DDMA_UNIT_RST 0x4C74864
+
+#define mmPSOC_RESET_CONF_KDMA_UNIT_RST 0x4C74868
+
+#define mmPSOC_RESET_CONF_PDMA_UNIT_RST 0x4C7486C
+
+#define mmPSOC_RESET_CONF_ARC_SS_UNIT_RST 0x4C74870
+
+#define mmPSOC_RESET_CONF_ROTATOR_UNIT_RST 0x4C74874
+
+#define mmPSOC_RESET_CONF_SM_UNIT_RST 0x4C74878
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_UNIT_RST 0x4C7487C
+
+#define mmPSOC_RESET_CONF_HBM_MC_UNIT_RST 0x4C74880
+
+#define mmPSOC_RESET_CONF_NIC_UNIT_RST 0x4C74884
+
+#define mmPSOC_RESET_CONF_NIC_PRT_UNIT_RST 0x4C74888
+
+#define mmPSOC_RESET_CONF_NIC_CH_UNIT_RST 0x4C7488C
+
+#define mmPSOC_RESET_CONF_PSOC_0_CLK_RST_CTRL 0x4C74B00
+
+#define mmPSOC_RESET_CONF_CPU_0_CLK_RST_CTRL 0x4C74B04
+
+#define mmPSOC_RESET_CONF_ARC_0_CLK_RST_CTRL 0x4C74B08
+
+#define mmPSOC_RESET_CONF_ARC_1_CLK_RST_CTRL 0x4C74B0C
+
+#define mmPSOC_RESET_CONF_SIF_0_CLK_RST_CTRL 0x4C74B10
+
+#define mmPSOC_RESET_CONF_SIF_1_CLK_RST_CTRL 0x4C74B14
+
+#define mmPSOC_RESET_CONF_SIF_2_CLK_RST_CTRL 0x4C74B18
+
+#define mmPSOC_RESET_CONF_SIF_3_CLK_RST_CTRL 0x4C74B1C
+
+#define mmPSOC_RESET_CONF_SRAM_0_CLK_RST_CTRL 0x4C74B20
+
+#define mmPSOC_RESET_CONF_SRAM_1_CLK_RST_CTRL 0x4C74B24
+
+#define mmPSOC_RESET_CONF_SRAM_2_CLK_RST_CTRL 0x4C74B28
+
+#define mmPSOC_RESET_CONF_SRAM_3_CLK_RST_CTRL 0x4C74B2C
+
+#define mmPSOC_RESET_CONF_PCIE_CTRL_0_CLK_RST_CTRL 0x4C74B30
+
+#define mmPSOC_RESET_CONF_PCIE_PHY_CFG_0_CLK_RST_CTRL 0x4C74B34
+
+#define mmPSOC_RESET_CONF_PCIE_IF_0_CLK_RST_CTRL 0x4C74B38
+
+#define mmPSOC_RESET_CONF_TPC_DIV_0_CLK_RST_CTRL 0x4C74B3C
+
+#define mmPSOC_RESET_CONF_TPC_DIV_1_CLK_RST_CTRL 0x4C74B40
+
+#define mmPSOC_RESET_CONF_TPC_DIV_2_CLK_RST_CTRL 0x4C74B44
+
+#define mmPSOC_RESET_CONF_TPC_DIV_3_CLK_RST_CTRL 0x4C74B48
+
+#define mmPSOC_RESET_CONF_TPC_DIV_4_CLK_RST_CTRL 0x4C74B4C
+
+#define mmPSOC_RESET_CONF_HBM_DIV_0_CLK_RST_CTRL 0x4C74B50
+
+#define mmPSOC_RESET_CONF_HBM_DIV_1_CLK_RST_CTRL 0x4C74B54
+
+#define mmPSOC_RESET_CONF_HBM_DIV_2_CLK_RST_CTRL 0x4C74B58
+
+#define mmPSOC_RESET_CONF_HBM_DIV_3_CLK_RST_CTRL 0x4C74B5C
+
+#define mmPSOC_RESET_CONF_HBM_DIV_4_CLK_RST_CTRL 0x4C74B60
+
+#define mmPSOC_RESET_CONF_HBM_DIV_5_CLK_RST_CTRL 0x4C74B64
+
+#define mmPSOC_RESET_CONF_PMMU_0_CLK_RST_CTRL 0x4C74B68
+
+#define mmPSOC_RESET_CONF_PM_0_CLK_RST_CTRL 0x4C74B6C
+
+#define mmPSOC_RESET_CONF_PM_1_CLK_RST_CTRL 0x4C74B70
+
+#define mmPSOC_RESET_CONF_PM_2_CLK_RST_CTRL 0x4C74B74
+
+#define mmPSOC_RESET_CONF_PM_3_CLK_RST_CTRL 0x4C74B78
+
+#define mmPSOC_RESET_CONF_TS_0_CLK_RST_CTRL 0x4C74B7C
+
+#define mmPSOC_RESET_CONF_TS_1_CLK_RST_CTRL 0x4C74B80
+
+#define mmPSOC_RESET_CONF_TS_2_CLK_RST_CTRL 0x4C74B84
+
+#define mmPSOC_RESET_CONF_TS_3_CLK_RST_CTRL 0x4C74B88
+
+#define mmPSOC_RESET_CONF_TS_IF_0_CLK_RST_CTRL 0x4C74B8C
+
+#define mmPSOC_RESET_CONF_TS_IF_1_CLK_RST_CTRL 0x4C74B90
+
+#define mmPSOC_RESET_CONF_TS_IF_2_CLK_RST_CTRL 0x4C74B94
+
+#define mmPSOC_RESET_CONF_TS_IF_3_CLK_RST_CTRL 0x4C74B98
+
+#define mmPSOC_RESET_CONF_PLL_L_0_CLK_RST_CTRL 0x4C74B9C
+
+#define mmPSOC_RESET_CONF_PLL_L_1_CLK_RST_CTRL 0x4C74BA0
+
+#define mmPSOC_RESET_CONF_PLL_L_2_CLK_RST_CTRL 0x4C74BA4
+
+#define mmPSOC_RESET_CONF_PLL_L_3_CLK_RST_CTRL 0x4C74BA8
+
+#define mmPSOC_RESET_CONF_PLL_L_4_CLK_RST_CTRL 0x4C74BAC
+
+#define mmPSOC_RESET_CONF_PLL_L_5_CLK_RST_CTRL 0x4C74BB0
+
+#define mmPSOC_RESET_CONF_PLL_L_6_CLK_RST_CTRL 0x4C74BB4
+
+#define mmPSOC_RESET_CONF_PLL_L_7_CLK_RST_CTRL 0x4C74BB8
+
+#define mmPSOC_RESET_CONF_PLL_L_8_CLK_RST_CTRL 0x4C74BBC
+
+#define mmPSOC_RESET_CONF_PLL_L_9_CLK_RST_CTRL 0x4C74BC0
+
+#define mmPSOC_RESET_CONF_PLL_L_10_CLK_RST_CTRL 0x4C74BC4
+
+#define mmPSOC_RESET_CONF_PLL_L_11_CLK_RST_CTRL 0x4C74BC8
+
+#define mmPSOC_RESET_CONF_PLL_L_12_CLK_RST_CTRL 0x4C74BCC
+
+#define mmPSOC_RESET_CONF_PLL_L_13_CLK_RST_CTRL 0x4C74BD0
+
+#define mmPSOC_RESET_CONF_PLL_L_14_CLK_RST_CTRL 0x4C74BD4
+
+#define mmPSOC_RESET_CONF_PLL_L_15_CLK_RST_CTRL 0x4C74BD8
+
+#define mmPSOC_RESET_CONF_PLL_L_16_CLK_RST_CTRL 0x4C74BDC
+
+#define mmPSOC_RESET_CONF_PLL_L_17_CLK_RST_CTRL 0x4C74BE0
+
+#define mmPSOC_RESET_CONF_PLL_L_18_CLK_RST_CTRL 0x4C74BE4
+
+#define mmPSOC_RESET_CONF_PLL_L_19_CLK_RST_CTRL 0x4C74BE8
+
+#define mmPSOC_RESET_CONF_PLL_L_20_CLK_RST_CTRL 0x4C74BEC
+
+#define mmPSOC_RESET_CONF_PLL_L_21_CLK_RST_CTRL 0x4C74BF0
+
+#define mmPSOC_RESET_CONF_PLL_L_22_CLK_RST_CTRL 0x4C74BF4
+
+#define mmPSOC_RESET_CONF_PLL_L_23_CLK_RST_CTRL 0x4C74BF8
+
+#define mmPSOC_RESET_CONF_PLL_L_24_CLK_RST_CTRL 0x4C74BFC
+
+#define mmPSOC_RESET_CONF_PLL_L_25_CLK_RST_CTRL 0x4C74C00
+
+#define mmPSOC_RESET_CONF_PLL_L_26_CLK_RST_CTRL 0x4C74C04
+
+#define mmPSOC_RESET_CONF_PLL_L_27_CLK_RST_CTRL 0x4C74C08
+
+#define mmPSOC_RESET_CONF_PLL_L_28_CLK_RST_CTRL 0x4C74C0C
+
+#define mmPSOC_RESET_CONF_PLL_L_29_CLK_RST_CTRL 0x4C74C10
+
+#define mmPSOC_RESET_CONF_PLL_L_30_CLK_RST_CTRL 0x4C74C14
+
+#define mmPSOC_RESET_CONF_PLL_L_31_CLK_RST_CTRL 0x4C74C18
+
+#define mmPSOC_RESET_CONF_PLL_H_0_CLK_RST_CTRL 0x4C74C1C
+
+#define mmPSOC_RESET_CONF_PLL_H_1_CLK_RST_CTRL 0x4C74C20
+
+#define mmPSOC_RESET_CONF_MME_EUS_0_CLK_RST_CTRL 0x4C74C24
+
+#define mmPSOC_RESET_CONF_MME_EUS_1_CLK_RST_CTRL 0x4C74C28
+
+#define mmPSOC_RESET_CONF_MME_EUS_2_CLK_RST_CTRL 0x4C74C2C
+
+#define mmPSOC_RESET_CONF_MME_EUS_3_CLK_RST_CTRL 0x4C74C30
+
+#define mmPSOC_RESET_CONF_MSS_CLS_0_CLK_RST_CTRL 0x4C74C34
+
+#define mmPSOC_RESET_CONF_MSS_CLS_1_CLK_RST_CTRL 0x4C74C38
+
+#define mmPSOC_RESET_CONF_MSS_CLS_2_CLK_RST_CTRL 0x4C74C3C
+
+#define mmPSOC_RESET_CONF_MSS_CLS_3_CLK_RST_CTRL 0x4C74C40
+
+#define mmPSOC_RESET_CONF_TPC_0_CLK_RST_CTRL 0x4C74C44
+
+#define mmPSOC_RESET_CONF_TPC_1_CLK_RST_CTRL 0x4C74C48
+
+#define mmPSOC_RESET_CONF_TPC_2_CLK_RST_CTRL 0x4C74C4C
+
+#define mmPSOC_RESET_CONF_TPC_3_CLK_RST_CTRL 0x4C74C50
+
+#define mmPSOC_RESET_CONF_TPC_4_CLK_RST_CTRL 0x4C74C54
+
+#define mmPSOC_RESET_CONF_TPC_5_CLK_RST_CTRL 0x4C74C58
+
+#define mmPSOC_RESET_CONF_TPC_6_CLK_RST_CTRL 0x4C74C5C
+
+#define mmPSOC_RESET_CONF_TPC_7_CLK_RST_CTRL 0x4C74C60
+
+#define mmPSOC_RESET_CONF_TPC_8_CLK_RST_CTRL 0x4C74C64
+
+#define mmPSOC_RESET_CONF_TPC_9_CLK_RST_CTRL 0x4C74C68
+
+#define mmPSOC_RESET_CONF_TPC_10_CLK_RST_CTRL 0x4C74C6C
+
+#define mmPSOC_RESET_CONF_TPC_11_CLK_RST_CTRL 0x4C74C70
+
+#define mmPSOC_RESET_CONF_TPC_12_CLK_RST_CTRL 0x4C74C74
+
+#define mmPSOC_RESET_CONF_TPC_13_CLK_RST_CTRL 0x4C74C78
+
+#define mmPSOC_RESET_CONF_TPC_14_CLK_RST_CTRL 0x4C74C7C
+
+#define mmPSOC_RESET_CONF_TPC_15_CLK_RST_CTRL 0x4C74C80
+
+#define mmPSOC_RESET_CONF_TPC_16_CLK_RST_CTRL 0x4C74C84
+
+#define mmPSOC_RESET_CONF_TPC_17_CLK_RST_CTRL 0x4C74C88
+
+#define mmPSOC_RESET_CONF_TPC_18_CLK_RST_CTRL 0x4C74C8C
+
+#define mmPSOC_RESET_CONF_TPC_19_CLK_RST_CTRL 0x4C74C90
+
+#define mmPSOC_RESET_CONF_TPC_20_CLK_RST_CTRL 0x4C74C94
+
+#define mmPSOC_RESET_CONF_TPC_21_CLK_RST_CTRL 0x4C74C98
+
+#define mmPSOC_RESET_CONF_TPC_22_CLK_RST_CTRL 0x4C74C9C
+
+#define mmPSOC_RESET_CONF_TPC_23_CLK_RST_CTRL 0x4C74CA0
+
+#define mmPSOC_RESET_CONF_TPC_24_CLK_RST_CTRL 0x4C74CA4
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_0_CLK_RST_CTRL 0x4C74CA8
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_1_CLK_RST_CTRL 0x4C74CAC
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_2_CLK_RST_CTRL 0x4C74CB0
+
+#define mmPSOC_RESET_CONF_HIF_HMMU_3_CLK_RST_CTRL 0x4C74CB4
+
+#define mmPSOC_RESET_CONF_XBAR_0_CLK_RST_CTRL 0x4C74CB8
+
+#define mmPSOC_RESET_CONF_XBAR_1_CLK_RST_CTRL 0x4C74CBC
+
+#define mmPSOC_RESET_CONF_XBAR_2_CLK_RST_CTRL 0x4C74CC0
+
+#define mmPSOC_RESET_CONF_XBAR_3_CLK_RST_CTRL 0x4C74CC4
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_0_CLK_RST_CTRL 0x4C74CC8
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_1_CLK_RST_CTRL 0x4C74CCC
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_2_CLK_RST_CTRL 0x4C74CD0
+
+#define mmPSOC_RESET_CONF_SFT_XFT_TFT_3_CLK_RST_CTRL 0x4C74CD4
+
+#define mmPSOC_RESET_CONF_DDMA_0_CLK_RST_CTRL 0x4C74CD8
+
+#define mmPSOC_RESET_CONF_DDMA_1_CLK_RST_CTRL 0x4C74CDC
+
+#define mmPSOC_RESET_CONF_DDMA_2_CLK_RST_CTRL 0x4C74CE0
+
+#define mmPSOC_RESET_CONF_DDMA_3_CLK_RST_CTRL 0x4C74CE4
+
+#define mmPSOC_RESET_CONF_DDMA_4_CLK_RST_CTRL 0x4C74CE8
+
+#define mmPSOC_RESET_CONF_DDMA_5_CLK_RST_CTRL 0x4C74CEC
+
+#define mmPSOC_RESET_CONF_DDMA_6_CLK_RST_CTRL 0x4C74CF0
+
+#define mmPSOC_RESET_CONF_DDMA_7_CLK_RST_CTRL 0x4C74CF4
+
+#define mmPSOC_RESET_CONF_KDMA_0_CLK_RST_CTRL 0x4C74CF8
+
+#define mmPSOC_RESET_CONF_PDMA_0_CLK_RST_CTRL 0x4C74CFC
+
+#define mmPSOC_RESET_CONF_PDMA_1_CLK_RST_CTRL 0x4C74D00
+
+#define mmPSOC_RESET_CONF_ARC_SS_0_CLK_RST_CTRL 0x4C74D04
+
+#define mmPSOC_RESET_CONF_ARC_SS_1_CLK_RST_CTRL 0x4C74D08
+
+#define mmPSOC_RESET_CONF_ARC_SS_2_CLK_RST_CTRL 0x4C74D0C
+
+#define mmPSOC_RESET_CONF_ARC_SS_3_CLK_RST_CTRL 0x4C74D10
+
+#define mmPSOC_RESET_CONF_ARC_SS_4_CLK_RST_CTRL 0x4C74D14
+
+#define mmPSOC_RESET_CONF_ROTATOR_0_CLK_RST_CTRL 0x4C74D18
+
+#define mmPSOC_RESET_CONF_ROTATOR_1_CLK_RST_CTRL 0x4C74D1C
+
+#define mmPSOC_RESET_CONF_SM_0_CLK_RST_CTRL 0x4C74D20
+
+#define mmPSOC_RESET_CONF_SM_1_CLK_RST_CTRL 0x4C74D24
+
+#define mmPSOC_RESET_CONF_SM_2_CLK_RST_CTRL 0x4C74D28
+
+#define mmPSOC_RESET_CONF_SM_3_CLK_RST_CTRL 0x4C74D2C
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_0_CLK_RST_CTRL 0x4C74D30
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_1_CLK_RST_CTRL 0x4C74D34
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_2_CLK_RST_CTRL 0x4C74D38
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_3_CLK_RST_CTRL 0x4C74D3C
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_4_CLK_RST_CTRL 0x4C74D40
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_5_CLK_RST_CTRL 0x4C74D44
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_6_CLK_RST_CTRL 0x4C74D48
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_7_CLK_RST_CTRL 0x4C74D4C
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_8_CLK_RST_CTRL 0x4C74D50
+
+#define mmPSOC_RESET_CONF_VIDEO_DEC_9_CLK_RST_CTRL 0x4C74D54
+
+#define mmPSOC_RESET_CONF_HBM_MC_0_CLK_RST_CTRL 0x4C74D58
+
+#define mmPSOC_RESET_CONF_HBM_MC_1_CLK_RST_CTRL 0x4C74D5C
+
+#define mmPSOC_RESET_CONF_HBM_MC_2_CLK_RST_CTRL 0x4C74D60
+
+#define mmPSOC_RESET_CONF_HBM_MC_3_CLK_RST_CTRL 0x4C74D64
+
+#define mmPSOC_RESET_CONF_HBM_MC_4_CLK_RST_CTRL 0x4C74D68
+
+#define mmPSOC_RESET_CONF_HBM_MC_5_CLK_RST_CTRL 0x4C74D6C
+
+#define mmPSOC_RESET_CONF_NIC_0_CLK_RST_CTRL 0x4C74D70
+
+#define mmPSOC_RESET_CONF_NIC_1_CLK_RST_CTRL 0x4C74D74
+
+#define mmPSOC_RESET_CONF_NIC_2_CLK_RST_CTRL 0x4C74D78
+
+#define mmPSOC_RESET_CONF_NIC_3_CLK_RST_CTRL 0x4C74D7C
+
+#define mmPSOC_RESET_CONF_NIC_4_CLK_RST_CTRL 0x4C74D80
+
+#define mmPSOC_RESET_CONF_NIC_5_CLK_RST_CTRL 0x4C74D84
+
+#define mmPSOC_RESET_CONF_NIC_6_CLK_RST_CTRL 0x4C74D88
+
+#define mmPSOC_RESET_CONF_NIC_7_CLK_RST_CTRL 0x4C74D8C
+
+#define mmPSOC_RESET_CONF_NIC_8_CLK_RST_CTRL 0x4C74D90
+
+#define mmPSOC_RESET_CONF_NIC_9_CLK_RST_CTRL 0x4C74D94
+
+#define mmPSOC_RESET_CONF_NIC_10_CLK_RST_CTRL 0x4C74D98
+
+#define mmPSOC_RESET_CONF_NIC_11_CLK_RST_CTRL 0x4C74D9C
+
+#define mmPSOC_RESET_CONF_NIC_PRT_0_CLK_RST_CTRL 0x4C74DA0
+
+#define mmPSOC_RESET_CONF_NIC_PRT_1_CLK_RST_CTRL 0x4C74DA4
+
+#define mmPSOC_RESET_CONF_NIC_PRT_2_CLK_RST_CTRL 0x4C74DA8
+
+#define mmPSOC_RESET_CONF_NIC_PRT_3_CLK_RST_CTRL 0x4C74DAC
+
+#define mmPSOC_RESET_CONF_NIC_PRT_4_CLK_RST_CTRL 0x4C74DB0
+
+#define mmPSOC_RESET_CONF_NIC_PRT_5_CLK_RST_CTRL 0x4C74DB4
+
+#define mmPSOC_RESET_CONF_NIC_PRT_6_CLK_RST_CTRL 0x4C74DB8
+
+#define mmPSOC_RESET_CONF_NIC_PRT_7_CLK_RST_CTRL 0x4C74DBC
+
+#define mmPSOC_RESET_CONF_NIC_PRT_8_CLK_RST_CTRL 0x4C74DC0
+
+#define mmPSOC_RESET_CONF_NIC_PRT_9_CLK_RST_CTRL 0x4C74DC4
+
+#define mmPSOC_RESET_CONF_NIC_PRT_10_CLK_RST_CTRL 0x4C74DC8
+
+#define mmPSOC_RESET_CONF_NIC_PRT_11_CLK_RST_CTRL 0x4C74DCC
+
+#define mmPSOC_RESET_CONF_NIC_CH_0_CLK_RST_CTRL 0x4C74DD0
+
+#define mmPSOC_RESET_CONF_NIC_CH_1_CLK_RST_CTRL 0x4C74DD4
+
+#define mmPSOC_RESET_CONF_NIC_CH_2_CLK_RST_CTRL 0x4C74DD8
+
+#endif /* ASIC_REG_PSOC_RESET_CONF_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h
new file mode 100644
index 000000000000..699becc28887
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+#define ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_TIMESTAMP
+ * (Prototype: TIMESTAMP)
+ *****************************************
+ */
+
+#define mmPSOC_TIMESTAMP_CNTCR 0x4C49000
+
+#define mmPSOC_TIMESTAMP_CNTSR 0x4C49004
+
+#define mmPSOC_TIMESTAMP_CNTCVL 0x4C49008
+
+#define mmPSOC_TIMESTAMP_CNTCVU 0x4C4900C
+
+#define mmPSOC_TIMESTAMP_CNTFID0 0x4C49020
+
+#define mmPSOC_TIMESTAMP_PIDR4 0x4C49FD0
+
+#define mmPSOC_TIMESTAMP_PIDR5 0x4C49FD4
+
+#define mmPSOC_TIMESTAMP_PIDR6 0x4C49FD8
+
+#define mmPSOC_TIMESTAMP_PIDR7 0x4C49FDC
+
+#define mmPSOC_TIMESTAMP_PIDR0 0x4C49FE0
+
+#define mmPSOC_TIMESTAMP_PIDR1 0x4C49FE4
+
+#define mmPSOC_TIMESTAMP_PIDR2 0x4C49FE8
+
+#define mmPSOC_TIMESTAMP_PIDR3 0x4C49FEC
+
+#define mmPSOC_TIMESTAMP_CIDR0 0x4C49FF0
+
+#define mmPSOC_TIMESTAMP_CIDR1 0x4C49FF4
+
+#define mmPSOC_TIMESTAMP_CIDR2 0x4C49FF8
+
+#define mmPSOC_TIMESTAMP_CIDR3 0x4C49FFC
+
+#endif /* ASIC_REG_PSOC_TIMESTAMP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h
new file mode 100644
index 000000000000..79320320ebcb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ROT0_DESC_REGS_H_
+#define ASIC_REG_ROT0_DESC_REGS_H_
+
+/*
+ *****************************************
+ * ROT0_DESC
+ * (Prototype: ROT_DESC)
+ *****************************************
+ */
+
+#define mmROT0_DESC_CONTEXT_ID 0x4E0B100
+
+#define mmROT0_DESC_IN_IMG_START_ADDR_L 0x4E0B104
+
+#define mmROT0_DESC_IN_IMG_START_ADDR_H 0x4E0B108
+
+#define mmROT0_DESC_OUT_IMG_START_ADDR_L 0x4E0B10C
+
+#define mmROT0_DESC_OUT_IMG_START_ADDR_H 0x4E0B110
+
+#define mmROT0_DESC_CFG 0x4E0B114
+
+#define mmROT0_DESC_IM_READ_SLOPE 0x4E0B118
+
+#define mmROT0_DESC_SIN_D 0x4E0B11C
+
+#define mmROT0_DESC_COS_D 0x4E0B120
+
+#define mmROT0_DESC_IN_IMG 0x4E0B124
+
+#define mmROT0_DESC_IN_STRIDE 0x4E0B128
+
+#define mmROT0_DESC_IN_STRIPE 0x4E0B12C
+
+#define mmROT0_DESC_IN_CENTER 0x4E0B130
+
+#define mmROT0_DESC_OUT_IMG 0x4E0B134
+
+#define mmROT0_DESC_OUT_STRIDE 0x4E0B138
+
+#define mmROT0_DESC_OUT_STRIPE 0x4E0B13C
+
+#define mmROT0_DESC_OUT_CENTER 0x4E0B140
+
+#define mmROT0_DESC_BACKGROUND 0x4E0B144
+
+#define mmROT0_DESC_CPL_MSG_EN 0x4E0B148
+
+#define mmROT0_DESC_IDLE_STATE 0x4E0B14C
+
+#define mmROT0_DESC_CPL_MSG_ADDR 0x4E0B150
+
+#define mmROT0_DESC_CPL_MSG_DATA 0x4E0B154
+
+#define mmROT0_DESC_CPL_MSG_AWUSER 0x4E0B158
+
+#define mmROT0_DESC_X_I_START_OFFSET 0x4E0B15C
+
+#define mmROT0_DESC_X_I_START_OFFSET_FLIP 0x4E0B160
+
+#define mmROT0_DESC_X_I_FIRST 0x4E0B164
+
+#define mmROT0_DESC_Y_I_FIRST 0x4E0B168
+
+#define mmROT0_DESC_Y_I 0x4E0B16C
+
+#define mmROT0_DESC_OUT_STRIPE_SIZE 0x4E0B170
+
+#define mmROT0_DESC_RSB_CFG_0 0x4E0B174
+
+#define mmROT0_DESC_RSB_PAD_VAL 0x4E0B178
+
+#define mmROT0_DESC_HBW_ARUSER_HI 0x4E0B17C
+
+#define mmROT0_DESC_HBW_ARUSER_LO 0x4E0B180
+
+#define mmROT0_DESC_HBW_AWUSER_HI 0x4E0B184
+
+#define mmROT0_DESC_HBW_AWUSER_LO 0x4E0B188
+
+#define mmROT0_DESC_OWM_CFG 0x4E0B18C
+
+#define mmROT0_DESC_CTRL_CFG 0x4E0B190
+
+#define mmROT0_DESC_PIXEL_PAD 0x4E0B194
+
+#define mmROT0_DESC_PREC_SHIFT 0x4E0B198
+
+#define mmROT0_DESC_MAX_VAL 0x4E0B19C
+
+#define mmROT0_DESC_A0_M11 0x4E0B1A0
+
+#define mmROT0_DESC_A1_M12 0x4E0B1A4
+
+#define mmROT0_DESC_A2 0x4E0B1A8
+
+#define mmROT0_DESC_B0_M21 0x4E0B1AC
+
+#define mmROT0_DESC_B1_M22 0x4E0B1B0
+
+#define mmROT0_DESC_B2 0x4E0B1B4
+
+#define mmROT0_DESC_C0 0x4E0B1B8
+
+#define mmROT0_DESC_C1 0x4E0B1BC
+
+#define mmROT0_DESC_C2 0x4E0B1C0
+
+#define mmROT0_DESC_D0 0x4E0B1C4
+
+#define mmROT0_DESC_D1 0x4E0B1C8
+
+#define mmROT0_DESC_D2 0x4E0B1CC
+
+#define mmROT0_DESC_INV_PROC_SIZE_M_1 0x4E0B1D0
+
+#define mmROT0_DESC_MESH_IMG_START_ADDR_L 0x4E0B1D4
+
+#define mmROT0_DESC_MESH_IMG_START_ADDR_H 0x4E0B1D8
+
+#define mmROT0_DESC_MESH_IMG 0x4E0B1DC
+
+#define mmROT0_DESC_MESH_STRIDE 0x4E0B1E0
+
+#define mmROT0_DESC_MESH_STRIPE 0x4E0B1E4
+
+#define mmROT0_DESC_MESH_CTRL 0x4E0B1E8
+
+#define mmROT0_DESC_MESH_GH 0x4E0B1EC
+
+#define mmROT0_DESC_MESH_GV 0x4E0B1F0
+
+#define mmROT0_DESC_MRSB_CFG_0 0x4E0B1F4
+
+#define mmROT0_DESC_MRSB_PAD_VAL 0x4E0B1F8
+
+#define mmROT0_DESC_BUF_CFG 0x4E0B1FC
+
+#define mmROT0_DESC_CID_OFFSET 0x4E0B200
+
+#define mmROT0_DESC_PUSH_DESC 0x4E0B204
+
+#endif /* ASIC_REG_ROT0_DESC_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h
new file mode 100644
index 000000000000..f2e739ede3d9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ROT0_MASKS_H_
+#define ASIC_REG_ROT0_MASKS_H_
+
+/*
+ *****************************************
+ * ROT0
+ * (Prototype: ROTATOR)
+ *****************************************
+ */
+
+/* ROT0_KMD_MODE */
+#define ROT0_KMD_MODE_EN_SHIFT 0
+#define ROT0_KMD_MODE_EN_MASK 0x1
+
+/* ROT0_CPL_QUEUE_EN */
+#define ROT0_CPL_QUEUE_EN_Q_EN_SHIFT 0
+#define ROT0_CPL_QUEUE_EN_Q_EN_MASK 0x1
+
+/* ROT0_CPL_QUEUE_ADDR_L */
+#define ROT0_CPL_QUEUE_ADDR_L_VAL_SHIFT 0
+#define ROT0_CPL_QUEUE_ADDR_L_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_CPL_QUEUE_ADDR_H */
+#define ROT0_CPL_QUEUE_ADDR_H_VAL_SHIFT 0
+#define ROT0_CPL_QUEUE_ADDR_H_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_CPL_QUEUE_DATA */
+#define ROT0_CPL_QUEUE_DATA_VAL_SHIFT 0
+#define ROT0_CPL_QUEUE_DATA_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_CPL_QUEUE_AWUSER */
+#define ROT0_CPL_QUEUE_AWUSER_VAL_SHIFT 0
+#define ROT0_CPL_QUEUE_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_CPL_QUEUE_AXI */
+#define ROT0_CPL_QUEUE_AXI_CACHE_SHIFT 0
+#define ROT0_CPL_QUEUE_AXI_CACHE_MASK 0xF
+#define ROT0_CPL_QUEUE_AXI_PROT_SHIFT 4
+#define ROT0_CPL_QUEUE_AXI_PROT_MASK 0x70
+
+/* ROT0_CPL_MSG_THRESHOLD */
+#define ROT0_CPL_MSG_THRESHOLD_VAL_SHIFT 0
+#define ROT0_CPL_MSG_THRESHOLD_VAL_MASK 0x3F
+
+/* ROT0_CPL_MSG_AXI */
+#define ROT0_CPL_MSG_AXI_CACHE_SHIFT 0
+#define ROT0_CPL_MSG_AXI_CACHE_MASK 0xF
+#define ROT0_CPL_MSG_AXI_PROT_SHIFT 4
+#define ROT0_CPL_MSG_AXI_PROT_MASK 0x70
+
+/* ROT0_AXI_WB */
+#define ROT0_AXI_WB_CACHE_SHIFT 0
+#define ROT0_AXI_WB_CACHE_MASK 0xF
+#define ROT0_AXI_WB_PROT_SHIFT 4
+#define ROT0_AXI_WB_PROT_MASK 0x70
+
+/* ROT0_ERR_CFG */
+#define ROT0_ERR_CFG_STOP_ON_ERR_SHIFT 0
+#define ROT0_ERR_CFG_STOP_ON_ERR_MASK 0x1
+
+/* ROT0_ERR_STATUS */
+#define ROT0_ERR_STATUS_ROT_HBW_RD_SHIFT 0
+#define ROT0_ERR_STATUS_ROT_HBW_RD_MASK 0x1
+#define ROT0_ERR_STATUS_ROT_HBW_WR_SHIFT 1
+#define ROT0_ERR_STATUS_ROT_HBW_WR_MASK 0x2
+#define ROT0_ERR_STATUS_QMAN_HBW_RD_SHIFT 2
+#define ROT0_ERR_STATUS_QMAN_HBW_RD_MASK 0x4
+#define ROT0_ERR_STATUS_QMAN_HBW_WR_SHIFT 3
+#define ROT0_ERR_STATUS_QMAN_HBW_WR_MASK 0x8
+#define ROT0_ERR_STATUS_ROT_LBW_WR_SHIFT 4
+#define ROT0_ERR_STATUS_ROT_LBW_WR_MASK 0x10
+
+/* ROT0_WBC_MAX_OUTSTANDING */
+#define ROT0_WBC_MAX_OUTSTANDING_VAL_SHIFT 0
+#define ROT0_WBC_MAX_OUTSTANDING_VAL_MASK 0xFFFF
+
+/* ROT0_WBC_RL */
+#define ROT0_WBC_RL_SATURATION_SHIFT 0
+#define ROT0_WBC_RL_SATURATION_MASK 0xFF
+#define ROT0_WBC_RL_TIMEOUT_SHIFT 8
+#define ROT0_WBC_RL_TIMEOUT_MASK 0xFF00
+#define ROT0_WBC_RL_RST_TOKEN_SHIFT 16
+#define ROT0_WBC_RL_RST_TOKEN_MASK 0xFF0000
+#define ROT0_WBC_RL_RATE_LIMITER_EN_SHIFT 24
+#define ROT0_WBC_RL_RATE_LIMITER_EN_MASK 0x1000000
+
+/* ROT0_WBC_INFLIGHTS */
+#define ROT0_WBC_INFLIGHTS_VAL_SHIFT 0
+#define ROT0_WBC_INFLIGHTS_VAL_MASK 0xFFFF
+
+/* ROT0_WBC_INFO */
+#define ROT0_WBC_INFO_EMPTY_SHIFT 0
+#define ROT0_WBC_INFO_EMPTY_MASK 0x1
+#define ROT0_WBC_INFO_AXI_IDLE_SHIFT 1
+#define ROT0_WBC_INFO_AXI_IDLE_MASK 0x2
+
+/* ROT0_WBC_MON */
+#define ROT0_WBC_MON_CNT_SHIFT 0
+#define ROT0_WBC_MON_CNT_MASK 0x1
+#define ROT0_WBC_MON_TS_SHIFT 8
+#define ROT0_WBC_MON_TS_MASK 0x300
+#define ROT0_WBC_MON_CONTEXT_ID_SHIFT 16
+#define ROT0_WBC_MON_CONTEXT_ID_MASK 0xFFFF0000
+
+/* ROT0_RSB_CAM_MAX_SIZE */
+#define ROT0_RSB_CAM_MAX_SIZE_DATA_SHIFT 0
+#define ROT0_RSB_CAM_MAX_SIZE_DATA_MASK 0xFFFF
+#define ROT0_RSB_CAM_MAX_SIZE_MD_SHIFT 16
+#define ROT0_RSB_CAM_MAX_SIZE_MD_MASK 0xFFFF0000
+
+/* ROT0_RSB_CFG */
+#define ROT0_RSB_CFG_CACHE_DISABLE_SHIFT 0
+#define ROT0_RSB_CFG_CACHE_DISABLE_MASK 0x1
+#define ROT0_RSB_CFG_ENABLE_CGATE_SHIFT 1
+#define ROT0_RSB_CFG_ENABLE_CGATE_MASK 0x2
+
+/* ROT0_RSB_MAX_OS */
+#define ROT0_RSB_MAX_OS_VAL_SHIFT 0
+#define ROT0_RSB_MAX_OS_VAL_MASK 0xFFFF
+
+/* ROT0_RSB_RL */
+#define ROT0_RSB_RL_SATURATION_SHIFT 0
+#define ROT0_RSB_RL_SATURATION_MASK 0xFF
+#define ROT0_RSB_RL_TIMEOUT_SHIFT 8
+#define ROT0_RSB_RL_TIMEOUT_MASK 0xFF00
+#define ROT0_RSB_RL_RST_TOKEN_SHIFT 16
+#define ROT0_RSB_RL_RST_TOKEN_MASK 0xFF0000
+#define ROT0_RSB_RL_RATE_LIMITER_EN_SHIFT 24
+#define ROT0_RSB_RL_RATE_LIMITER_EN_MASK 0x1000000
+
+/* ROT0_RSB_INFLIGHTS */
+#define ROT0_RSB_INFLIGHTS_VAL_SHIFT 0
+#define ROT0_RSB_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_RSB_OCCUPANCY */
+#define ROT0_RSB_OCCUPANCY_VAL_SHIFT 0
+#define ROT0_RSB_OCCUPANCY_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_RSB_INFO */
+#define ROT0_RSB_INFO_EMPTY_SHIFT 0
+#define ROT0_RSB_INFO_EMPTY_MASK 0x1
+#define ROT0_RSB_INFO_AXI_IDLE_SHIFT 1
+#define ROT0_RSB_INFO_AXI_IDLE_MASK 0x2
+
+/* ROT0_RSB_MON */
+#define ROT0_RSB_MON_CNT_SHIFT 0
+#define ROT0_RSB_MON_CNT_MASK 0x1FFF
+#define ROT0_RSB_MON_TS_SHIFT 16
+#define ROT0_RSB_MON_TS_MASK 0x30000
+
+/* ROT0_RSB_MON_CONTEXT_ID */
+#define ROT0_RSB_MON_CONTEXT_ID_VAL_SHIFT 0
+#define ROT0_RSB_MON_CONTEXT_ID_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_MSS_HALT */
+#define ROT0_MSS_HALT_VAL_SHIFT 0
+#define ROT0_MSS_HALT_VAL_MASK 0x7
+
+/* ROT0_MSS_SEI_STATUS */
+#define ROT0_MSS_SEI_STATUS_I0_SHIFT 0
+#define ROT0_MSS_SEI_STATUS_I0_MASK 0x1
+#define ROT0_MSS_SEI_STATUS_I1_SHIFT 1
+#define ROT0_MSS_SEI_STATUS_I1_MASK 0x2
+#define ROT0_MSS_SEI_STATUS_I2_SHIFT 2
+#define ROT0_MSS_SEI_STATUS_I2_MASK 0x4
+#define ROT0_MSS_SEI_STATUS_I3_SHIFT 3
+#define ROT0_MSS_SEI_STATUS_I3_MASK 0x8
+#define ROT0_MSS_SEI_STATUS_I4_SHIFT 4
+#define ROT0_MSS_SEI_STATUS_I4_MASK 0x10
+#define ROT0_MSS_SEI_STATUS_I5_SHIFT 5
+#define ROT0_MSS_SEI_STATUS_I5_MASK 0x20
+#define ROT0_MSS_SEI_STATUS_I6_SHIFT 6
+#define ROT0_MSS_SEI_STATUS_I6_MASK 0x40
+#define ROT0_MSS_SEI_STATUS_I7_SHIFT 7
+#define ROT0_MSS_SEI_STATUS_I7_MASK 0x80
+#define ROT0_MSS_SEI_STATUS_I8_SHIFT 8
+#define ROT0_MSS_SEI_STATUS_I8_MASK 0x100
+#define ROT0_MSS_SEI_STATUS_I9_SHIFT 9
+#define ROT0_MSS_SEI_STATUS_I9_MASK 0x200
+#define ROT0_MSS_SEI_STATUS_I10_SHIFT 10
+#define ROT0_MSS_SEI_STATUS_I10_MASK 0x400
+#define ROT0_MSS_SEI_STATUS_I11_SHIFT 11
+#define ROT0_MSS_SEI_STATUS_I11_MASK 0x800
+#define ROT0_MSS_SEI_STATUS_I12_SHIFT 12
+#define ROT0_MSS_SEI_STATUS_I12_MASK 0x1000
+#define ROT0_MSS_SEI_STATUS_I13_SHIFT 13
+#define ROT0_MSS_SEI_STATUS_I13_MASK 0x2000
+#define ROT0_MSS_SEI_STATUS_I14_SHIFT 14
+#define ROT0_MSS_SEI_STATUS_I14_MASK 0x4000
+#define ROT0_MSS_SEI_STATUS_I15_SHIFT 15
+#define ROT0_MSS_SEI_STATUS_I15_MASK 0x8000
+#define ROT0_MSS_SEI_STATUS_I16_SHIFT 16
+#define ROT0_MSS_SEI_STATUS_I16_MASK 0x10000
+#define ROT0_MSS_SEI_STATUS_I17_SHIFT 17
+#define ROT0_MSS_SEI_STATUS_I17_MASK 0x20000
+#define ROT0_MSS_SEI_STATUS_I18_SHIFT 18
+#define ROT0_MSS_SEI_STATUS_I18_MASK 0x40000
+#define ROT0_MSS_SEI_STATUS_I19_SHIFT 19
+#define ROT0_MSS_SEI_STATUS_I19_MASK 0x80000
+#define ROT0_MSS_SEI_STATUS_I20_SHIFT 20
+#define ROT0_MSS_SEI_STATUS_I20_MASK 0x100000
+#define ROT0_MSS_SEI_STATUS_I21_SHIFT 21
+#define ROT0_MSS_SEI_STATUS_I21_MASK 0x200000
+
+/* ROT0_MSS_SEI_MASK */
+#define ROT0_MSS_SEI_MASK_VAL_SHIFT 0
+#define ROT0_MSS_SEI_MASK_VAL_MASK 0x3FFFFF
+
+/* ROT0_MSS_SPI_STATUS */
+#define ROT0_MSS_SPI_STATUS_I0_SHIFT 0
+#define ROT0_MSS_SPI_STATUS_I0_MASK 0x1
+#define ROT0_MSS_SPI_STATUS_I1_SHIFT 1
+#define ROT0_MSS_SPI_STATUS_I1_MASK 0x2
+#define ROT0_MSS_SPI_STATUS_I2_SHIFT 2
+#define ROT0_MSS_SPI_STATUS_I2_MASK 0x4
+#define ROT0_MSS_SPI_STATUS_I3_SHIFT 3
+#define ROT0_MSS_SPI_STATUS_I3_MASK 0x8
+#define ROT0_MSS_SPI_STATUS_I4_SHIFT 4
+#define ROT0_MSS_SPI_STATUS_I4_MASK 0x10
+#define ROT0_MSS_SPI_STATUS_I5_SHIFT 5
+#define ROT0_MSS_SPI_STATUS_I5_MASK 0x20
+#define ROT0_MSS_SPI_STATUS_I6_SHIFT 6
+#define ROT0_MSS_SPI_STATUS_I6_MASK 0x40
+#define ROT0_MSS_SPI_STATUS_I7_SHIFT 7
+#define ROT0_MSS_SPI_STATUS_I7_MASK 0x80
+
+/* ROT0_MSS_SPI_MASK */
+#define ROT0_MSS_SPI_MASK_VAL_SHIFT 0
+#define ROT0_MSS_SPI_MASK_VAL_MASK 0xFF
+
+/* ROT0_DISABLE_PAD_CALC */
+#define ROT0_DISABLE_PAD_CALC_VAL_SHIFT 0
+#define ROT0_DISABLE_PAD_CALC_VAL_MASK 0x3
+
+/* ROT0_QMAN_CFG */
+#define ROT0_QMAN_CFG_FORCE_STOP_SHIFT 0
+#define ROT0_QMAN_CFG_FORCE_STOP_MASK 0x1
+
+/* ROT0_CLK_EN */
+#define ROT0_CLK_EN_LBW_CFG_DIS_SHIFT 0
+#define ROT0_CLK_EN_LBW_CFG_DIS_MASK 0x1
+#define ROT0_CLK_EN_DBG_CFG_DIS_SHIFT 4
+#define ROT0_CLK_EN_DBG_CFG_DIS_MASK 0x10
+#define ROT0_CLK_EN_SB_EMPTY_MASK_SHIFT 5
+#define ROT0_CLK_EN_SB_EMPTY_MASK_MASK 0x20
+
+/* ROT0_MRSB_CAM_MAX_SIZE */
+#define ROT0_MRSB_CAM_MAX_SIZE_DATA_SHIFT 0
+#define ROT0_MRSB_CAM_MAX_SIZE_DATA_MASK 0xFFFF
+#define ROT0_MRSB_CAM_MAX_SIZE_MD_SHIFT 16
+#define ROT0_MRSB_CAM_MAX_SIZE_MD_MASK 0xFFFF0000
+
+/* ROT0_MRSB_CFG */
+#define ROT0_MRSB_CFG_CACHE_DISABLE_SHIFT 0
+#define ROT0_MRSB_CFG_CACHE_DISABLE_MASK 0x1
+#define ROT0_MRSB_CFG_ENABLE_CGATE_SHIFT 1
+#define ROT0_MRSB_CFG_ENABLE_CGATE_MASK 0x2
+
+/* ROT0_MRSB_MAX_OS */
+#define ROT0_MRSB_MAX_OS_VAL_SHIFT 0
+#define ROT0_MRSB_MAX_OS_VAL_MASK 0xFFFF
+
+/* ROT0_MRSB_RL */
+#define ROT0_MRSB_RL_SATURATION_SHIFT 0
+#define ROT0_MRSB_RL_SATURATION_MASK 0xFF
+#define ROT0_MRSB_RL_TIMEOUT_SHIFT 8
+#define ROT0_MRSB_RL_TIMEOUT_MASK 0xFF00
+#define ROT0_MRSB_RL_RST_TOKEN_SHIFT 16
+#define ROT0_MRSB_RL_RST_TOKEN_MASK 0xFF0000
+#define ROT0_MRSB_RL_RATE_LIMITER_EN_SHIFT 24
+#define ROT0_MRSB_RL_RATE_LIMITER_EN_MASK 0x1000000
+
+/* ROT0_MRSB_INFLIGHTS */
+#define ROT0_MRSB_INFLIGHTS_VAL_SHIFT 0
+#define ROT0_MRSB_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_MRSB_OCCUPANCY */
+#define ROT0_MRSB_OCCUPANCY_VAL_SHIFT 0
+#define ROT0_MRSB_OCCUPANCY_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_MRSB_INFO */
+#define ROT0_MRSB_INFO_EMPTY_SHIFT 0
+#define ROT0_MRSB_INFO_EMPTY_MASK 0x1
+#define ROT0_MRSB_INFO_AXI_IDLE_SHIFT 1
+#define ROT0_MRSB_INFO_AXI_IDLE_MASK 0x2
+
+/* ROT0_MRSB_MON */
+#define ROT0_MRSB_MON_CNT_SHIFT 0
+#define ROT0_MRSB_MON_CNT_MASK 0x1FFF
+#define ROT0_MRSB_MON_TS_SHIFT 16
+#define ROT0_MRSB_MON_TS_MASK 0x30000
+
+/* ROT0_MRSB_MON_CONTEXT_ID */
+#define ROT0_MRSB_MON_CONTEXT_ID_VAL_SHIFT 0
+#define ROT0_MRSB_MON_CONTEXT_ID_VAL_MASK 0xFFFFFFFF
+
+/* ROT0_MSS_STS */
+#define ROT0_MSS_STS_IS_HALT_SHIFT 0
+#define ROT0_MSS_STS_IS_HALT_MASK 0x1
+
+#endif /* ASIC_REG_ROT0_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h
new file mode 100644
index 000000000000..e83daa33d737
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ROT0_QM_ARC_AUX_REGS_H_
+#define ASIC_REG_ROT0_QM_ARC_AUX_REGS_H_
+
+/*
+ *****************************************
+ * ROT0_QM_ARC_AUX
+ * (Prototype: QMAN_ARC_AUX)
+ *****************************************
+ */
+
+#define mmROT0_QM_ARC_AUX_RUN_HALT_REQ 0x4E08100
+
+#define mmROT0_QM_ARC_AUX_RUN_HALT_ACK 0x4E08104
+
+#define mmROT0_QM_ARC_AUX_RST_VEC_ADDR 0x4E08108
+
+#define mmROT0_QM_ARC_AUX_DBG_MODE 0x4E0810C
+
+#define mmROT0_QM_ARC_AUX_CLUSTER_NUM 0x4E08110
+
+#define mmROT0_QM_ARC_AUX_ARC_NUM 0x4E08114
+
+#define mmROT0_QM_ARC_AUX_WAKE_UP_EVENT 0x4E08118
+
+#define mmROT0_QM_ARC_AUX_DCCM_SYS_ADDR_BASE 0x4E0811C
+
+#define mmROT0_QM_ARC_AUX_CTI_AP_STS 0x4E08120
+
+#define mmROT0_QM_ARC_AUX_CTI_CFG_MUX_SEL 0x4E08124
+
+#define mmROT0_QM_ARC_AUX_ARC_RST 0x4E08128
+
+#define mmROT0_QM_ARC_AUX_ARC_RST_REQ 0x4E0812C
+
+#define mmROT0_QM_ARC_AUX_SRAM_LSB_ADDR 0x4E08130
+
+#define mmROT0_QM_ARC_AUX_SRAM_MSB_ADDR 0x4E08134
+
+#define mmROT0_QM_ARC_AUX_PCIE_LSB_ADDR 0x4E08138
+
+#define mmROT0_QM_ARC_AUX_PCIE_MSB_ADDR 0x4E0813C
+
+#define mmROT0_QM_ARC_AUX_CFG_LSB_ADDR 0x4E08140
+
+#define mmROT0_QM_ARC_AUX_CFG_MSB_ADDR 0x4E08144
+
+#define mmROT0_QM_ARC_AUX_HBM0_LSB_ADDR 0x4E08150
+
+#define mmROT0_QM_ARC_AUX_HBM0_MSB_ADDR 0x4E08154
+
+#define mmROT0_QM_ARC_AUX_HBM1_LSB_ADDR 0x4E08158
+
+#define mmROT0_QM_ARC_AUX_HBM1_MSB_ADDR 0x4E0815C
+
+#define mmROT0_QM_ARC_AUX_HBM2_LSB_ADDR 0x4E08160
+
+#define mmROT0_QM_ARC_AUX_HBM2_MSB_ADDR 0x4E08164
+
+#define mmROT0_QM_ARC_AUX_HBM3_LSB_ADDR 0x4E08168
+
+#define mmROT0_QM_ARC_AUX_HBM3_MSB_ADDR 0x4E0816C
+
+#define mmROT0_QM_ARC_AUX_HBM0_OFFSET 0x4E08170
+
+#define mmROT0_QM_ARC_AUX_HBM1_OFFSET 0x4E08174
+
+#define mmROT0_QM_ARC_AUX_HBM2_OFFSET 0x4E08178
+
+#define mmROT0_QM_ARC_AUX_HBM3_OFFSET 0x4E0817C
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_0 0x4E08180
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_1 0x4E08184
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_2 0x4E08188
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_3 0x4E0818C
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_4 0x4E08190
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_5 0x4E08194
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_LSB_ADDR_6 0x4E08198
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_0 0x4E0819C
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_1 0x4E081A0
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_2 0x4E081A4
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_3 0x4E081A8
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_4 0x4E081AC
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_5 0x4E081B0
+
+#define mmROT0_QM_ARC_AUX_GENERAL_PURPOSE_MSB_ADDR_6 0x4E081B4
+
+#define mmROT0_QM_ARC_AUX_ARC_CBU_AWCACHE_OVR 0x4E081B8
+
+#define mmROT0_QM_ARC_AUX_ARC_LBU_AWCACHE_OVR 0x4E081BC
+
+#define mmROT0_QM_ARC_AUX_CONTEXT_ID_0 0x4E081C0
+
+#define mmROT0_QM_ARC_AUX_CONTEXT_ID_1 0x4E081C4
+
+#define mmROT0_QM_ARC_AUX_CONTEXT_ID_2 0x4E081C8
+
+#define mmROT0_QM_ARC_AUX_CONTEXT_ID_3 0x4E081CC
+
+#define mmROT0_QM_ARC_AUX_CONTEXT_ID_4 0x4E081D0
+
+#define mmROT0_QM_ARC_AUX_CONTEXT_ID_5 0x4E081D4
+
+#define mmROT0_QM_ARC_AUX_CONTEXT_ID_6 0x4E081D8
+
+#define mmROT0_QM_ARC_AUX_CONTEXT_ID_7 0x4E081DC
+
+#define mmROT0_QM_ARC_AUX_CID_OFFSET_0 0x4E081E0
+
+#define mmROT0_QM_ARC_AUX_CID_OFFSET_1 0x4E081E4
+
+#define mmROT0_QM_ARC_AUX_CID_OFFSET_2 0x4E081E8
+
+#define mmROT0_QM_ARC_AUX_CID_OFFSET_3 0x4E081EC
+
+#define mmROT0_QM_ARC_AUX_CID_OFFSET_4 0x4E081F0
+
+#define mmROT0_QM_ARC_AUX_CID_OFFSET_5 0x4E081F4
+
+#define mmROT0_QM_ARC_AUX_CID_OFFSET_6 0x4E081F8
+
+#define mmROT0_QM_ARC_AUX_CID_OFFSET_7 0x4E081FC
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_0 0x4E08200
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_1 0x4E08204
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_2 0x4E08208
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_3 0x4E0820C
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_4 0x4E08210
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_5 0x4E08214
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_6 0x4E08218
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_7 0x4E0821C
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_8 0x4E08220
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_9 0x4E08224
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_10 0x4E08228
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_11 0x4E0822C
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_12 0x4E08230
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_13 0x4E08234
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_14 0x4E08238
+
+#define mmROT0_QM_ARC_AUX_SW_INTR_15 0x4E0823C
+
+#define mmROT0_QM_ARC_AUX_IRQ_INTR_MASK_0 0x4E08280
+
+#define mmROT0_QM_ARC_AUX_IRQ_INTR_MASK_1 0x4E08284
+
+#define mmROT0_QM_ARC_AUX_ARC_SEI_INTR_STS 0x4E08290
+
+#define mmROT0_QM_ARC_AUX_ARC_SEI_INTR_CLR 0x4E08294
+
+#define mmROT0_QM_ARC_AUX_ARC_SEI_INTR_MASK 0x4E08298
+
+#define mmROT0_QM_ARC_AUX_ARC_EXCPTN_CAUSE 0x4E0829C
+
+#define mmROT0_QM_ARC_AUX_SEI_INTR_HALT_EN 0x4E082A0
+
+#define mmROT0_QM_ARC_AUX_ARC_SEI_INTR_HALT_MASK 0x4E082A4
+
+#define mmROT0_QM_ARC_AUX_QMAN_SEI_INTR_HALT_MASK 0x4E082A8
+
+#define mmROT0_QM_ARC_AUX_ARC_REI_INTR_STS 0x4E082B0
+
+#define mmROT0_QM_ARC_AUX_ARC_REI_INTR_CLR 0x4E082B4
+
+#define mmROT0_QM_ARC_AUX_ARC_REI_INTR_MASK 0x4E082B8
+
+#define mmROT0_QM_ARC_AUX_DCCM_ECC_ERR_ADDR 0x4E082BC
+
+#define mmROT0_QM_ARC_AUX_DCCM_ECC_SYNDROME 0x4E082C0
+
+#define mmROT0_QM_ARC_AUX_I_CACHE_ECC_ERR_ADDR 0x4E082C4
+
+#define mmROT0_QM_ARC_AUX_I_CACHE_ECC_SYNDROME 0x4E082C8
+
+#define mmROT0_QM_ARC_AUX_D_CACHE_ECC_ERR_ADDR 0x4E082CC
+
+#define mmROT0_QM_ARC_AUX_D_CACHE_ECC_SYNDROME 0x4E082D0
+
+#define mmROT0_QM_ARC_AUX_LBW_TRMINATE_AWADDR_ERR 0x4E082E0
+
+#define mmROT0_QM_ARC_AUX_LBW_TRMINATE_ARADDR_ERR 0x4E082E4
+
+#define mmROT0_QM_ARC_AUX_CFG_LBW_TERMINATE_BRESP 0x4E082E8
+
+#define mmROT0_QM_ARC_AUX_CFG_LBW_TERMINATE_RRESP 0x4E082EC
+
+#define mmROT0_QM_ARC_AUX_CFG_LBW_TERMINATE_AXLEN 0x4E082F0
+
+#define mmROT0_QM_ARC_AUX_CFG_LBW_TERMINATE_AXSIZE 0x4E082F4
+
+#define mmROT0_QM_ARC_AUX_SCRATCHPAD_0 0x4E08300
+
+#define mmROT0_QM_ARC_AUX_SCRATCHPAD_1 0x4E08304
+
+#define mmROT0_QM_ARC_AUX_SCRATCHPAD_2 0x4E08308
+
+#define mmROT0_QM_ARC_AUX_SCRATCHPAD_3 0x4E0830C
+
+#define mmROT0_QM_ARC_AUX_SCRATCHPAD_4 0x4E08310
+
+#define mmROT0_QM_ARC_AUX_SCRATCHPAD_5 0x4E08314
+
+#define mmROT0_QM_ARC_AUX_SCRATCHPAD_6 0x4E08318
+
+#define mmROT0_QM_ARC_AUX_SCRATCHPAD_7 0x4E0831C
+
+#define mmROT0_QM_ARC_AUX_TOTAL_CBU_WR_CNT 0x4E08320
+
+#define mmROT0_QM_ARC_AUX_INFLIGHT_CBU_WR_CNT 0x4E08324
+
+#define mmROT0_QM_ARC_AUX_TOTAL_CBU_RD_CNT 0x4E08328
+
+#define mmROT0_QM_ARC_AUX_INFLIGHT_CBU_RD_CNT 0x4E0832C
+
+#define mmROT0_QM_ARC_AUX_TOTAL_LBU_WR_CNT 0x4E08330
+
+#define mmROT0_QM_ARC_AUX_INFLIGHT_LBU_WR_CNT 0x4E08334
+
+#define mmROT0_QM_ARC_AUX_TOTAL_LBU_RD_CNT 0x4E08338
+
+#define mmROT0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT 0x4E0833C
+
+#define mmROT0_QM_ARC_AUX_CBU_ARUSER_OVR 0x4E08350
+
+#define mmROT0_QM_ARC_AUX_CBU_ARUSER_OVR_EN 0x4E08354
+
+#define mmROT0_QM_ARC_AUX_CBU_AWUSER_OVR 0x4E08358
+
+#define mmROT0_QM_ARC_AUX_CBU_AWUSER_OVR_EN 0x4E0835C
+
+#define mmROT0_QM_ARC_AUX_CBU_ARUSER_MSB_OVR 0x4E08360
+
+#define mmROT0_QM_ARC_AUX_CBU_ARUSER_MSB_OVR_EN 0x4E08364
+
+#define mmROT0_QM_ARC_AUX_CBU_AWUSER_MSB_OVR 0x4E08368
+
+#define mmROT0_QM_ARC_AUX_CBU_AWUSER_MSB_OVR_EN 0x4E0836C
+
+#define mmROT0_QM_ARC_AUX_CBU_AXCACHE_OVR 0x4E08370
+
+#define mmROT0_QM_ARC_AUX_CBU_LOCK_OVR 0x4E08374
+
+#define mmROT0_QM_ARC_AUX_CBU_PROT_OVR 0x4E08378
+
+#define mmROT0_QM_ARC_AUX_CBU_MAX_OUTSTANDING 0x4E0837C
+
+#define mmROT0_QM_ARC_AUX_CBU_EARLY_BRESP_EN 0x4E08380
+
+#define mmROT0_QM_ARC_AUX_CBU_FORCE_RSP_OK 0x4E08384
+
+#define mmROT0_QM_ARC_AUX_CBU_NO_WR_INFLIGHT 0x4E0838C
+
+#define mmROT0_QM_ARC_AUX_CBU_SEI_INTR_ID 0x4E08390
+
+#define mmROT0_QM_ARC_AUX_LBU_ARUSER_OVR 0x4E08400
+
+#define mmROT0_QM_ARC_AUX_LBU_ARUSER_OVR_EN 0x4E08404
+
+#define mmROT0_QM_ARC_AUX_LBU_AWUSER_OVR 0x4E08408
+
+#define mmROT0_QM_ARC_AUX_LBU_AWUSER_OVR_EN 0x4E0840C
+
+#define mmROT0_QM_ARC_AUX_LBU_AXCACHE_OVR 0x4E08420
+
+#define mmROT0_QM_ARC_AUX_LBU_LOCK_OVR 0x4E08424
+
+#define mmROT0_QM_ARC_AUX_LBU_PROT_OVR 0x4E08428
+
+#define mmROT0_QM_ARC_AUX_LBU_MAX_OUTSTANDING 0x4E0842C
+
+#define mmROT0_QM_ARC_AUX_LBU_EARLY_BRESP_EN 0x4E08430
+
+#define mmROT0_QM_ARC_AUX_LBU_FORCE_RSP_OK 0x4E08434
+
+#define mmROT0_QM_ARC_AUX_LBU_NO_WR_INFLIGHT 0x4E0843C
+
+#define mmROT0_QM_ARC_AUX_LBU_SEI_INTR_ID 0x4E08440
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0 0x4E08500
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_1 0x4E08504
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_2 0x4E08508
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_3 0x4E0850C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_4 0x4E08510
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_5 0x4E08514
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_6 0x4E08518
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_7 0x4E0851C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_SIZE_0 0x4E08520
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_SIZE_1 0x4E08524
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_SIZE_2 0x4E08528
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_SIZE_3 0x4E0852C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_SIZE_4 0x4E08530
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_SIZE_5 0x4E08534
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_SIZE_6 0x4E08538
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_SIZE_7 0x4E0853C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PI_0 0x4E08540
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PI_1 0x4E08544
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PI_2 0x4E08548
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PI_3 0x4E0854C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PI_4 0x4E08550
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PI_5 0x4E08554
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PI_6 0x4E08558
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PI_7 0x4E0855C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_CI_0 0x4E08560
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_CI_1 0x4E08564
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_CI_2 0x4E08568
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_CI_3 0x4E0856C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_CI_4 0x4E08570
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_CI_5 0x4E08574
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_CI_6 0x4E08578
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_CI_7 0x4E0857C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_0 0x4E08580
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_1 0x4E08584
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_2 0x4E08588
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_3 0x4E0858C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_4 0x4E08590
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_5 0x4E08594
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_6 0x4E08598
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_PUSH_REG_7 0x4E0859C
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_0 0x4E085A0
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_1 0x4E085A4
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_2 0x4E085A8
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_3 0x4E085AC
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_4 0x4E085B0
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_5 0x4E085B4
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_6 0x4E085B8
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_MAX_OCCUPANCY_7 0x4E085BC
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_0 0x4E085C0
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_1 0x4E085C4
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_2 0x4E085C8
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_3 0x4E085CC
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_4 0x4E085D0
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_5 0x4E085D4
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_6 0x4E085D8
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_VALID_ENTRIES_7 0x4E085DC
+
+#define mmROT0_QM_ARC_AUX_GENERAL_Q_VLD_ENTRY_MASK 0x4E085E0
+
+#define mmROT0_QM_ARC_AUX_NIC_Q_VLD_ENTRY_MASK 0x4E085E4
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_DROP_EN 0x4E08620
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_WARN_MSG 0x4E08624
+
+#define mmROT0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG 0x4E08628
+
+#define mmROT0_QM_ARC_AUX_DCCM_GEN_AXI_AWPROT 0x4E08630
+
+#define mmROT0_QM_ARC_AUX_DCCM_GEN_AXI_AWUSER 0x4E08634
+
+#define mmROT0_QM_ARC_AUX_DCCM_GEN_AXI_AWBURST 0x4E08638
+
+#define mmROT0_QM_ARC_AUX_DCCM_GEN_AXI_AWLOCK 0x4E0863C
+
+#define mmROT0_QM_ARC_AUX_DCCM_GEN_AXI_AWCACHE 0x4E08640
+
+#define mmROT0_QM_ARC_AUX_DCCM_WRR_ARB_WEIGHT 0x4E08644
+
+#define mmROT0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG 0x4E08648
+
+#define mmROT0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT 0x4E0864C
+
+#define mmROT0_QM_ARC_AUX_QMAN_CQ_IFIFO_SHADOW_CI 0x4E08650
+
+#define mmROT0_QM_ARC_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI 0x4E08654
+
+#define mmROT0_QM_ARC_AUX_QMAN_CQ_SHADOW_CI 0x4E08658
+
+#define mmROT0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI 0x4E0865C
+
+#define mmROT0_QM_ARC_AUX_AUX2APB_PROT 0x4E08700
+
+#define mmROT0_QM_ARC_AUX_LBW_FORK_WIN_EN 0x4E08704
+
+#define mmROT0_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR0 0x4E08708
+
+#define mmROT0_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK0 0x4E0870C
+
+#define mmROT0_QM_ARC_AUX_QMAN_LBW_FORK_BASE_ADDR1 0x4E08710
+
+#define mmROT0_QM_ARC_AUX_QMAN_LBW_FORK_ADDR_MASK1 0x4E08714
+
+#define mmROT0_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR0 0x4E08718
+
+#define mmROT0_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK0 0x4E0871C
+
+#define mmROT0_QM_ARC_AUX_FARM_LBW_FORK_BASE_ADDR1 0x4E08720
+
+#define mmROT0_QM_ARC_AUX_FARM_LBW_FORK_ADDR_MASK1 0x4E08724
+
+#define mmROT0_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR0 0x4E08728
+
+#define mmROT0_QM_ARC_AUX_LBW_APB_FORK_MAX_ADDR1 0x4E0872C
+
+#define mmROT0_QM_ARC_AUX_ARC_ACC_ENGS_LBW_FORK_MASK 0x4E08730
+
+#define mmROT0_QM_ARC_AUX_ARC_DUP_ENG_LBW_FORK_ADDR 0x4E08734
+
+#define mmROT0_QM_ARC_AUX_ARC_ACP_ENG_LBW_FORK_ADDR 0x4E08738
+
+#define mmROT0_QM_ARC_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR 0x4E0873C
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_WIN_EN 0x4E08740
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_LSB 0x4E08750
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_BASE_ADDR0_MSB 0x4E08754
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_LSB 0x4E08758
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_ADDR_MASK0_MSB 0x4E0875C
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_LSB 0x4E08760
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_BASE_ADDR1_MSB 0x4E08764
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_LSB 0x4E08768
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_ADDR_MASK1_MSB 0x4E0876C
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_LSB 0x4E08770
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_BASE_ADDR2_MSB 0x4E08774
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_LSB 0x4E08778
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_ADDR_MASK2_MSB 0x4E0877C
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_LSB 0x4E08780
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_BASE_ADDR3_MSB 0x4E08784
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_LSB 0x4E08788
+
+#define mmROT0_QM_ARC_AUX_CBU_FORK_ADDR_MASK3_MSB 0x4E0878C
+
+#define mmROT0_QM_ARC_AUX_CBU_TRMINATE_ARADDR_LSB 0x4E08790
+
+#define mmROT0_QM_ARC_AUX_CBU_TRMINATE_ARADDR_MSB 0x4E08794
+
+#define mmROT0_QM_ARC_AUX_CFG_CBU_TERMINATE_BRESP 0x4E08798
+
+#define mmROT0_QM_ARC_AUX_CFG_CBU_TERMINATE_RRESP 0x4E0879C
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_0 0x4E08800
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_1 0x4E08804
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_2 0x4E08808
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_3 0x4E0880C
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_4 0x4E08810
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_5 0x4E08814
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_6 0x4E08818
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_7 0x4E0881C
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_8 0x4E08820
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_9 0x4E08824
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_10 0x4E08828
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_11 0x4E0882C
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_12 0x4E08830
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_13 0x4E08834
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_14 0x4E08838
+
+#define mmROT0_QM_ARC_AUX_ARC_REGION_CFG_15 0x4E0883C
+
+#define mmROT0_QM_ARC_AUX_DCCM_TRMINATE_AWADDR_ERR 0x4E08840
+
+#define mmROT0_QM_ARC_AUX_DCCM_TRMINATE_ARADDR_ERR 0x4E08844
+
+#define mmROT0_QM_ARC_AUX_CFG_DCCM_TERMINATE_BRESP 0x4E08848
+
+#define mmROT0_QM_ARC_AUX_CFG_DCCM_TERMINATE_RRESP 0x4E0884C
+
+#define mmROT0_QM_ARC_AUX_CFG_DCCM_TERMINATE_EN 0x4E08850
+
+#define mmROT0_QM_ARC_AUX_CFG_DCCM_SECURE_REGION 0x4E08854
+
+#define mmROT0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT 0x4E08900
+
+#define mmROT0_QM_ARC_AUX_ARC_AXI_ORDERING_CTL 0x4E08904
+
+#define mmROT0_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR_MSK 0x4E08908
+
+#define mmROT0_QM_ARC_AUX_ARC_AXI_ORDERING_ADDR 0x4E0890C
+
+#define mmROT0_QM_ARC_AUX_ARC_ACC_ENGS_BUSER 0x4E08910
+
+#define mmROT0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN 0x4E08920
+
+#endif /* ASIC_REG_ROT0_QM_ARC_AUX_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h
new file mode 100644
index 000000000000..8e040a2ef1c1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ROT0_QM_AXUSER_NONSECURED_REGS_H_
+#define ASIC_REG_ROT0_QM_AXUSER_NONSECURED_REGS_H_
+
+/*
+ *****************************************
+ * ROT0_QM_AXUSER_NONSECURED
+ * (Prototype: AXUSER)
+ *****************************************
+ */
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_ASID 0x4E0AB80
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_MMU_BP 0x4E0AB84
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_STRONG_ORDER 0x4E0AB88
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_NO_SNOOP 0x4E0AB8C
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_WR_REDUCTION 0x4E0AB90
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_RD_ATOMIC 0x4E0AB94
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_QOS 0x4E0AB98
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_RSVD 0x4E0AB9C
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_EMEM_CPAGE 0x4E0ABA0
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_CORE 0x4E0ABA4
+
+#define mmROT0_QM_AXUSER_NONSECURED_E2E_COORD 0x4E0ABA8
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_WR_OVRD_LO 0x4E0ABB0
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_WR_OVRD_HI 0x4E0ABB4
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_RD_OVRD_LO 0x4E0ABB8
+
+#define mmROT0_QM_AXUSER_NONSECURED_HB_RD_OVRD_HI 0x4E0ABBC
+
+#define mmROT0_QM_AXUSER_NONSECURED_LB_COORD 0x4E0ABC0
+
+#define mmROT0_QM_AXUSER_NONSECURED_LB_LOCK 0x4E0ABC4
+
+#define mmROT0_QM_AXUSER_NONSECURED_LB_RSVD 0x4E0ABC8
+
+#define mmROT0_QM_AXUSER_NONSECURED_LB_OVRD 0x4E0ABCC
+
+#endif /* ASIC_REG_ROT0_QM_AXUSER_NONSECURED_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h
new file mode 100644
index 000000000000..077ae2347a3d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ROT0_QM_CGM_REGS_H_
+#define ASIC_REG_ROT0_QM_CGM_REGS_H_
+
+/*
+ *****************************************
+ * ROT0_QM_CGM
+ * (Prototype: QMAN_CGM)
+ *****************************************
+ */
+
+#define mmROT0_QM_CGM_CFG 0x4E0AD80
+
+#define mmROT0_QM_CGM_STS 0x4E0AD84
+
+#define mmROT0_QM_CGM_CFG1 0x4E0AD88
+
+#endif /* ASIC_REG_ROT0_QM_CGM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h
new file mode 100644
index 000000000000..de3c85510af2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ROT0_QM_REGS_H_
+#define ASIC_REG_ROT0_QM_REGS_H_
+
+/*
+ *****************************************
+ * ROT0_QM
+ * (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmROT0_QM_GLBL_CFG0 0x4E0A000
+
+#define mmROT0_QM_GLBL_CFG1 0x4E0A004
+
+#define mmROT0_QM_GLBL_CFG2 0x4E0A008
+
+#define mmROT0_QM_GLBL_ERR_CFG 0x4E0A00C
+
+#define mmROT0_QM_GLBL_ERR_CFG1 0x4E0A010
+
+#define mmROT0_QM_GLBL_ERR_ARC_HALT_EN 0x4E0A014
+
+#define mmROT0_QM_GLBL_AXCACHE 0x4E0A018
+
+#define mmROT0_QM_GLBL_STS0 0x4E0A01C
+
+#define mmROT0_QM_GLBL_STS1 0x4E0A020
+
+#define mmROT0_QM_GLBL_ERR_STS_0 0x4E0A024
+
+#define mmROT0_QM_GLBL_ERR_STS_1 0x4E0A028
+
+#define mmROT0_QM_GLBL_ERR_STS_2 0x4E0A02C
+
+#define mmROT0_QM_GLBL_ERR_STS_3 0x4E0A030
+
+#define mmROT0_QM_GLBL_ERR_STS_4 0x4E0A034
+
+#define mmROT0_QM_GLBL_ERR_MSG_EN_0 0x4E0A038
+
+#define mmROT0_QM_GLBL_ERR_MSG_EN_1 0x4E0A03C
+
+#define mmROT0_QM_GLBL_ERR_MSG_EN_2 0x4E0A040
+
+#define mmROT0_QM_GLBL_ERR_MSG_EN_3 0x4E0A044
+
+#define mmROT0_QM_GLBL_ERR_MSG_EN_4 0x4E0A048
+
+#define mmROT0_QM_GLBL_PROT 0x4E0A04C
+
+#define mmROT0_QM_PQ_BASE_LO_0 0x4E0A050
+
+#define mmROT0_QM_PQ_BASE_LO_1 0x4E0A054
+
+#define mmROT0_QM_PQ_BASE_LO_2 0x4E0A058
+
+#define mmROT0_QM_PQ_BASE_LO_3 0x4E0A05C
+
+#define mmROT0_QM_PQ_BASE_HI_0 0x4E0A060
+
+#define mmROT0_QM_PQ_BASE_HI_1 0x4E0A064
+
+#define mmROT0_QM_PQ_BASE_HI_2 0x4E0A068
+
+#define mmROT0_QM_PQ_BASE_HI_3 0x4E0A06C
+
+#define mmROT0_QM_PQ_SIZE_0 0x4E0A070
+
+#define mmROT0_QM_PQ_SIZE_1 0x4E0A074
+
+#define mmROT0_QM_PQ_SIZE_2 0x4E0A078
+
+#define mmROT0_QM_PQ_SIZE_3 0x4E0A07C
+
+#define mmROT0_QM_PQ_PI_0 0x4E0A080
+
+#define mmROT0_QM_PQ_PI_1 0x4E0A084
+
+#define mmROT0_QM_PQ_PI_2 0x4E0A088
+
+#define mmROT0_QM_PQ_PI_3 0x4E0A08C
+
+#define mmROT0_QM_PQ_CI_0 0x4E0A090
+
+#define mmROT0_QM_PQ_CI_1 0x4E0A094
+
+#define mmROT0_QM_PQ_CI_2 0x4E0A098
+
+#define mmROT0_QM_PQ_CI_3 0x4E0A09C
+
+#define mmROT0_QM_PQ_CFG0_0 0x4E0A0A0
+
+#define mmROT0_QM_PQ_CFG0_1 0x4E0A0A4
+
+#define mmROT0_QM_PQ_CFG0_2 0x4E0A0A8
+
+#define mmROT0_QM_PQ_CFG0_3 0x4E0A0AC
+
+#define mmROT0_QM_PQ_CFG1_0 0x4E0A0B0
+
+#define mmROT0_QM_PQ_CFG1_1 0x4E0A0B4
+
+#define mmROT0_QM_PQ_CFG1_2 0x4E0A0B8
+
+#define mmROT0_QM_PQ_CFG1_3 0x4E0A0BC
+
+#define mmROT0_QM_PQ_STS0_0 0x4E0A0C0
+
+#define mmROT0_QM_PQ_STS0_1 0x4E0A0C4
+
+#define mmROT0_QM_PQ_STS0_2 0x4E0A0C8
+
+#define mmROT0_QM_PQ_STS0_3 0x4E0A0CC
+
+#define mmROT0_QM_PQ_STS1_0 0x4E0A0D0
+
+#define mmROT0_QM_PQ_STS1_1 0x4E0A0D4
+
+#define mmROT0_QM_PQ_STS1_2 0x4E0A0D8
+
+#define mmROT0_QM_PQ_STS1_3 0x4E0A0DC
+
+#define mmROT0_QM_CQ_CFG0_0 0x4E0A0E0
+
+#define mmROT0_QM_CQ_CFG0_1 0x4E0A0E4
+
+#define mmROT0_QM_CQ_CFG0_2 0x4E0A0E8
+
+#define mmROT0_QM_CQ_CFG0_3 0x4E0A0EC
+
+#define mmROT0_QM_CQ_CFG0_4 0x4E0A0F0
+
+#define mmROT0_QM_CQ_STS0_0 0x4E0A0F4
+
+#define mmROT0_QM_CQ_STS0_1 0x4E0A0F8
+
+#define mmROT0_QM_CQ_STS0_2 0x4E0A0FC
+
+#define mmROT0_QM_CQ_STS0_3 0x4E0A100
+
+#define mmROT0_QM_CQ_STS0_4 0x4E0A104
+
+#define mmROT0_QM_CQ_CFG1_0 0x4E0A108
+
+#define mmROT0_QM_CQ_CFG1_1 0x4E0A10C
+
+#define mmROT0_QM_CQ_CFG1_2 0x4E0A110
+
+#define mmROT0_QM_CQ_CFG1_3 0x4E0A114
+
+#define mmROT0_QM_CQ_CFG1_4 0x4E0A118
+
+#define mmROT0_QM_CQ_STS1_0 0x4E0A11C
+
+#define mmROT0_QM_CQ_STS1_1 0x4E0A120
+
+#define mmROT0_QM_CQ_STS1_2 0x4E0A124
+
+#define mmROT0_QM_CQ_STS1_3 0x4E0A128
+
+#define mmROT0_QM_CQ_STS1_4 0x4E0A12C
+
+#define mmROT0_QM_CQ_PTR_LO_0 0x4E0A150
+
+#define mmROT0_QM_CQ_PTR_HI_0 0x4E0A154
+
+#define mmROT0_QM_CQ_TSIZE_0 0x4E0A158
+
+#define mmROT0_QM_CQ_CTL_0 0x4E0A15C
+
+#define mmROT0_QM_CQ_PTR_LO_1 0x4E0A160
+
+#define mmROT0_QM_CQ_PTR_HI_1 0x4E0A164
+
+#define mmROT0_QM_CQ_TSIZE_1 0x4E0A168
+
+#define mmROT0_QM_CQ_CTL_1 0x4E0A16C
+
+#define mmROT0_QM_CQ_PTR_LO_2 0x4E0A170
+
+#define mmROT0_QM_CQ_PTR_HI_2 0x4E0A174
+
+#define mmROT0_QM_CQ_TSIZE_2 0x4E0A178
+
+#define mmROT0_QM_CQ_CTL_2 0x4E0A17C
+
+#define mmROT0_QM_CQ_PTR_LO_3 0x4E0A180
+
+#define mmROT0_QM_CQ_PTR_HI_3 0x4E0A184
+
+#define mmROT0_QM_CQ_TSIZE_3 0x4E0A188
+
+#define mmROT0_QM_CQ_CTL_3 0x4E0A18C
+
+#define mmROT0_QM_CQ_PTR_LO_4 0x4E0A190
+
+#define mmROT0_QM_CQ_PTR_HI_4 0x4E0A194
+
+#define mmROT0_QM_CQ_TSIZE_4 0x4E0A198
+
+#define mmROT0_QM_CQ_CTL_4 0x4E0A19C
+
+#define mmROT0_QM_CQ_TSIZE_STS_0 0x4E0A1A0
+
+#define mmROT0_QM_CQ_TSIZE_STS_1 0x4E0A1A4
+
+#define mmROT0_QM_CQ_TSIZE_STS_2 0x4E0A1A8
+
+#define mmROT0_QM_CQ_TSIZE_STS_3 0x4E0A1AC
+
+#define mmROT0_QM_CQ_TSIZE_STS_4 0x4E0A1B0
+
+#define mmROT0_QM_CQ_PTR_LO_STS_0 0x4E0A1B4
+
+#define mmROT0_QM_CQ_PTR_LO_STS_1 0x4E0A1B8
+
+#define mmROT0_QM_CQ_PTR_LO_STS_2 0x4E0A1BC
+
+#define mmROT0_QM_CQ_PTR_LO_STS_3 0x4E0A1C0
+
+#define mmROT0_QM_CQ_PTR_LO_STS_4 0x4E0A1C4
+
+#define mmROT0_QM_CQ_PTR_HI_STS_0 0x4E0A1C8
+
+#define mmROT0_QM_CQ_PTR_HI_STS_1 0x4E0A1CC
+
+#define mmROT0_QM_CQ_PTR_HI_STS_2 0x4E0A1D0
+
+#define mmROT0_QM_CQ_PTR_HI_STS_3 0x4E0A1D4
+
+#define mmROT0_QM_CQ_PTR_HI_STS_4 0x4E0A1D8
+
+#define mmROT0_QM_CQ_IFIFO_STS_0 0x4E0A1DC
+
+#define mmROT0_QM_CQ_IFIFO_STS_1 0x4E0A1E0
+
+#define mmROT0_QM_CQ_IFIFO_STS_2 0x4E0A1E4
+
+#define mmROT0_QM_CQ_IFIFO_STS_3 0x4E0A1E8
+
+#define mmROT0_QM_CQ_IFIFO_STS_4 0x4E0A1EC
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_LO_0 0x4E0A1F0
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_LO_1 0x4E0A1F4
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_LO_2 0x4E0A1F8
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_LO_3 0x4E0A1FC
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_LO_4 0x4E0A200
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_HI_0 0x4E0A204
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_HI_1 0x4E0A208
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_HI_2 0x4E0A20C
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_HI_3 0x4E0A210
+
+#define mmROT0_QM_CP_MSG_BASE0_ADDR_HI_4 0x4E0A214
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_LO_0 0x4E0A218
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_LO_1 0x4E0A21C
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_LO_2 0x4E0A220
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_LO_3 0x4E0A224
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_LO_4 0x4E0A228
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_HI_0 0x4E0A22C
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_HI_1 0x4E0A230
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_HI_2 0x4E0A234
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_HI_3 0x4E0A238
+
+#define mmROT0_QM_CP_MSG_BASE1_ADDR_HI_4 0x4E0A23C
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_LO_0 0x4E0A240
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_LO_1 0x4E0A244
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_LO_2 0x4E0A248
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_LO_3 0x4E0A24C
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_LO_4 0x4E0A250
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_HI_0 0x4E0A254
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_HI_1 0x4E0A258
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_HI_2 0x4E0A25C
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_HI_3 0x4E0A260
+
+#define mmROT0_QM_CP_MSG_BASE2_ADDR_HI_4 0x4E0A264
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_LO_0 0x4E0A268
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_LO_1 0x4E0A26C
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_LO_2 0x4E0A270
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_LO_3 0x4E0A274
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_LO_4 0x4E0A278
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_HI_0 0x4E0A27C
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_HI_1 0x4E0A280
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_HI_2 0x4E0A284
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_HI_3 0x4E0A288
+
+#define mmROT0_QM_CP_MSG_BASE3_ADDR_HI_4 0x4E0A28C
+
+#define mmROT0_QM_CP_FENCE0_RDATA_0 0x4E0A290
+
+#define mmROT0_QM_CP_FENCE0_RDATA_1 0x4E0A294
+
+#define mmROT0_QM_CP_FENCE0_RDATA_2 0x4E0A298
+
+#define mmROT0_QM_CP_FENCE0_RDATA_3 0x4E0A29C
+
+#define mmROT0_QM_CP_FENCE0_RDATA_4 0x4E0A2A0
+
+#define mmROT0_QM_CP_FENCE1_RDATA_0 0x4E0A2A4
+
+#define mmROT0_QM_CP_FENCE1_RDATA_1 0x4E0A2A8
+
+#define mmROT0_QM_CP_FENCE1_RDATA_2 0x4E0A2AC
+
+#define mmROT0_QM_CP_FENCE1_RDATA_3 0x4E0A2B0
+
+#define mmROT0_QM_CP_FENCE1_RDATA_4 0x4E0A2B4
+
+#define mmROT0_QM_CP_FENCE2_RDATA_0 0x4E0A2B8
+
+#define mmROT0_QM_CP_FENCE2_RDATA_1 0x4E0A2BC
+
+#define mmROT0_QM_CP_FENCE2_RDATA_2 0x4E0A2C0
+
+#define mmROT0_QM_CP_FENCE2_RDATA_3 0x4E0A2C4
+
+#define mmROT0_QM_CP_FENCE2_RDATA_4 0x4E0A2C8
+
+#define mmROT0_QM_CP_FENCE3_RDATA_0 0x4E0A2CC
+
+#define mmROT0_QM_CP_FENCE3_RDATA_1 0x4E0A2D0
+
+#define mmROT0_QM_CP_FENCE3_RDATA_2 0x4E0A2D4
+
+#define mmROT0_QM_CP_FENCE3_RDATA_3 0x4E0A2D8
+
+#define mmROT0_QM_CP_FENCE3_RDATA_4 0x4E0A2DC
+
+#define mmROT0_QM_CP_FENCE0_CNT_0 0x4E0A2E0
+
+#define mmROT0_QM_CP_FENCE0_CNT_1 0x4E0A2E4
+
+#define mmROT0_QM_CP_FENCE0_CNT_2 0x4E0A2E8
+
+#define mmROT0_QM_CP_FENCE0_CNT_3 0x4E0A2EC
+
+#define mmROT0_QM_CP_FENCE0_CNT_4 0x4E0A2F0
+
+#define mmROT0_QM_CP_FENCE1_CNT_0 0x4E0A2F4
+
+#define mmROT0_QM_CP_FENCE1_CNT_1 0x4E0A2F8
+
+#define mmROT0_QM_CP_FENCE1_CNT_2 0x4E0A2FC
+
+#define mmROT0_QM_CP_FENCE1_CNT_3 0x4E0A300
+
+#define mmROT0_QM_CP_FENCE1_CNT_4 0x4E0A304
+
+#define mmROT0_QM_CP_FENCE2_CNT_0 0x4E0A308
+
+#define mmROT0_QM_CP_FENCE2_CNT_1 0x4E0A30C
+
+#define mmROT0_QM_CP_FENCE2_CNT_2 0x4E0A310
+
+#define mmROT0_QM_CP_FENCE2_CNT_3 0x4E0A314
+
+#define mmROT0_QM_CP_FENCE2_CNT_4 0x4E0A318
+
+#define mmROT0_QM_CP_FENCE3_CNT_0 0x4E0A31C
+
+#define mmROT0_QM_CP_FENCE3_CNT_1 0x4E0A320
+
+#define mmROT0_QM_CP_FENCE3_CNT_2 0x4E0A324
+
+#define mmROT0_QM_CP_FENCE3_CNT_3 0x4E0A328
+
+#define mmROT0_QM_CP_FENCE3_CNT_4 0x4E0A32C
+
+#define mmROT0_QM_CP_BARRIER_CFG 0x4E0A330
+
+#define mmROT0_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0x4E0A334
+
+#define mmROT0_QM_CP_LDMA_DST_BASE_LO_OFFSET 0x4E0A338
+
+#define mmROT0_QM_CP_LDMA_TSIZE_OFFSET 0x4E0A33C
+
+#define mmROT0_QM_CP_CQ_PTR_LO_OFFSET_0 0x4E0A340
+
+#define mmROT0_QM_CP_CQ_PTR_LO_OFFSET_1 0x4E0A344
+
+#define mmROT0_QM_CP_CQ_PTR_LO_OFFSET_2 0x4E0A348
+
+#define mmROT0_QM_CP_CQ_PTR_LO_OFFSET_3 0x4E0A34C
+
+#define mmROT0_QM_CP_CQ_PTR_LO_OFFSET_4 0x4E0A350
+
+#define mmROT0_QM_CP_STS_0 0x4E0A368
+
+#define mmROT0_QM_CP_STS_1 0x4E0A36C
+
+#define mmROT0_QM_CP_STS_2 0x4E0A370
+
+#define mmROT0_QM_CP_STS_3 0x4E0A374
+
+#define mmROT0_QM_CP_STS_4 0x4E0A378
+
+#define mmROT0_QM_CP_CURRENT_INST_LO_0 0x4E0A37C
+
+#define mmROT0_QM_CP_CURRENT_INST_LO_1 0x4E0A380
+
+#define mmROT0_QM_CP_CURRENT_INST_LO_2 0x4E0A384
+
+#define mmROT0_QM_CP_CURRENT_INST_LO_3 0x4E0A388
+
+#define mmROT0_QM_CP_CURRENT_INST_LO_4 0x4E0A38C
+
+#define mmROT0_QM_CP_CURRENT_INST_HI_0 0x4E0A390
+
+#define mmROT0_QM_CP_CURRENT_INST_HI_1 0x4E0A394
+
+#define mmROT0_QM_CP_CURRENT_INST_HI_2 0x4E0A398
+
+#define mmROT0_QM_CP_CURRENT_INST_HI_3 0x4E0A39C
+
+#define mmROT0_QM_CP_CURRENT_INST_HI_4 0x4E0A3A0
+
+#define mmROT0_QM_CP_PRED_0 0x4E0A3A4
+
+#define mmROT0_QM_CP_PRED_1 0x4E0A3A8
+
+#define mmROT0_QM_CP_PRED_2 0x4E0A3AC
+
+#define mmROT0_QM_CP_PRED_3 0x4E0A3B0
+
+#define mmROT0_QM_CP_PRED_4 0x4E0A3B4
+
+#define mmROT0_QM_CP_PRED_UPEN_0 0x4E0A3B8
+
+#define mmROT0_QM_CP_PRED_UPEN_1 0x4E0A3BC
+
+#define mmROT0_QM_CP_PRED_UPEN_2 0x4E0A3C0
+
+#define mmROT0_QM_CP_PRED_UPEN_3 0x4E0A3C4
+
+#define mmROT0_QM_CP_PRED_UPEN_4 0x4E0A3C8
+
+#define mmROT0_QM_CP_DBG_0_0 0x4E0A3CC
+
+#define mmROT0_QM_CP_DBG_0_1 0x4E0A3D0
+
+#define mmROT0_QM_CP_DBG_0_2 0x4E0A3D4
+
+#define mmROT0_QM_CP_DBG_0_3 0x4E0A3D8
+
+#define mmROT0_QM_CP_DBG_0_4 0x4E0A3DC
+
+#define mmROT0_QM_CP_CPDMA_UP_CRED_0 0x4E0A3E0
+
+#define mmROT0_QM_CP_CPDMA_UP_CRED_1 0x4E0A3E4
+
+#define mmROT0_QM_CP_CPDMA_UP_CRED_2 0x4E0A3E8
+
+#define mmROT0_QM_CP_CPDMA_UP_CRED_3 0x4E0A3EC
+
+#define mmROT0_QM_CP_CPDMA_UP_CRED_4 0x4E0A3F0
+
+#define mmROT0_QM_CP_IN_DATA_LO_0 0x4E0A3F4
+
+#define mmROT0_QM_CP_IN_DATA_LO_1 0x4E0A3F8
+
+#define mmROT0_QM_CP_IN_DATA_LO_2 0x4E0A3FC
+
+#define mmROT0_QM_CP_IN_DATA_LO_3 0x4E0A400
+
+#define mmROT0_QM_CP_IN_DATA_LO_4 0x4E0A404
+
+#define mmROT0_QM_CP_IN_DATA_HI_0 0x4E0A408
+
+#define mmROT0_QM_CP_IN_DATA_HI_1 0x4E0A40C
+
+#define mmROT0_QM_CP_IN_DATA_HI_2 0x4E0A410
+
+#define mmROT0_QM_CP_IN_DATA_HI_3 0x4E0A414
+
+#define mmROT0_QM_CP_IN_DATA_HI_4 0x4E0A418
+
+#define mmROT0_QM_PQC_HBW_BASE_LO_0 0x4E0A41C
+
+#define mmROT0_QM_PQC_HBW_BASE_LO_1 0x4E0A420
+
+#define mmROT0_QM_PQC_HBW_BASE_LO_2 0x4E0A424
+
+#define mmROT0_QM_PQC_HBW_BASE_LO_3 0x4E0A428
+
+#define mmROT0_QM_PQC_HBW_BASE_HI_0 0x4E0A42C
+
+#define mmROT0_QM_PQC_HBW_BASE_HI_1 0x4E0A430
+
+#define mmROT0_QM_PQC_HBW_BASE_HI_2 0x4E0A434
+
+#define mmROT0_QM_PQC_HBW_BASE_HI_3 0x4E0A438
+
+#define mmROT0_QM_PQC_SIZE_0 0x4E0A43C
+
+#define mmROT0_QM_PQC_SIZE_1 0x4E0A440
+
+#define mmROT0_QM_PQC_SIZE_2 0x4E0A444
+
+#define mmROT0_QM_PQC_SIZE_3 0x4E0A448
+
+#define mmROT0_QM_PQC_PI_0 0x4E0A44C
+
+#define mmROT0_QM_PQC_PI_1 0x4E0A450
+
+#define mmROT0_QM_PQC_PI_2 0x4E0A454
+
+#define mmROT0_QM_PQC_PI_3 0x4E0A458
+
+#define mmROT0_QM_PQC_LBW_WDATA_0 0x4E0A45C
+
+#define mmROT0_QM_PQC_LBW_WDATA_1 0x4E0A460
+
+#define mmROT0_QM_PQC_LBW_WDATA_2 0x4E0A464
+
+#define mmROT0_QM_PQC_LBW_WDATA_3 0x4E0A468
+
+#define mmROT0_QM_PQC_LBW_BASE_LO_0 0x4E0A46C
+
+#define mmROT0_QM_PQC_LBW_BASE_LO_1 0x4E0A470
+
+#define mmROT0_QM_PQC_LBW_BASE_LO_2 0x4E0A474
+
+#define mmROT0_QM_PQC_LBW_BASE_LO_3 0x4E0A478
+
+#define mmROT0_QM_PQC_LBW_BASE_HI_0 0x4E0A47C
+
+#define mmROT0_QM_PQC_LBW_BASE_HI_1 0x4E0A480
+
+#define mmROT0_QM_PQC_LBW_BASE_HI_2 0x4E0A484
+
+#define mmROT0_QM_PQC_LBW_BASE_HI_3 0x4E0A488
+
+#define mmROT0_QM_PQC_CFG 0x4E0A48C
+
+#define mmROT0_QM_PQC_SECURE_PUSH_IND 0x4E0A490
+
+#define mmROT0_QM_ARB_MASK 0x4E0A4A0
+
+#define mmROT0_QM_ARB_CFG_0 0x4E0A4A4
+
+#define mmROT0_QM_ARB_CHOICE_Q_PUSH 0x4E0A4A8
+
+#define mmROT0_QM_ARB_WRR_WEIGHT_0 0x4E0A4AC
+
+#define mmROT0_QM_ARB_WRR_WEIGHT_1 0x4E0A4B0
+
+#define mmROT0_QM_ARB_WRR_WEIGHT_2 0x4E0A4B4
+
+#define mmROT0_QM_ARB_WRR_WEIGHT_3 0x4E0A4B8
+
+#define mmROT0_QM_ARB_CFG_1 0x4E0A4BC
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_0 0x4E0A4C0
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_1 0x4E0A4C4
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_2 0x4E0A4C8
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_3 0x4E0A4CC
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_4 0x4E0A4D0
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_5 0x4E0A4D4
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_6 0x4E0A4D8
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_7 0x4E0A4DC
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_8 0x4E0A4E0
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_9 0x4E0A4E4
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_10 0x4E0A4E8
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_11 0x4E0A4EC
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_12 0x4E0A4F0
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_13 0x4E0A4F4
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_14 0x4E0A4F8
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_15 0x4E0A4FC
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_16 0x4E0A500
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_17 0x4E0A504
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_18 0x4E0A508
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_19 0x4E0A50C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_20 0x4E0A510
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_21 0x4E0A514
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_22 0x4E0A518
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_23 0x4E0A51C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_24 0x4E0A520
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_25 0x4E0A524
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_26 0x4E0A528
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_27 0x4E0A52C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_28 0x4E0A530
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_29 0x4E0A534
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_30 0x4E0A538
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_31 0x4E0A53C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_32 0x4E0A540
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_33 0x4E0A544
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_34 0x4E0A548
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_35 0x4E0A54C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_36 0x4E0A550
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_37 0x4E0A554
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_38 0x4E0A558
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_39 0x4E0A55C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_40 0x4E0A560
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_41 0x4E0A564
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_42 0x4E0A568
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_43 0x4E0A56C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_44 0x4E0A570
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_45 0x4E0A574
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_46 0x4E0A578
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_47 0x4E0A57C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_48 0x4E0A580
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_49 0x4E0A584
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_50 0x4E0A588
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_51 0x4E0A58C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_52 0x4E0A590
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_53 0x4E0A594
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_54 0x4E0A598
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_55 0x4E0A59C
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_56 0x4E0A5A0
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_57 0x4E0A5A4
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_58 0x4E0A5A8
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_59 0x4E0A5AC
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_60 0x4E0A5B0
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_61 0x4E0A5B4
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_62 0x4E0A5B8
+
+#define mmROT0_QM_ARB_MST_AVAIL_CRED_63 0x4E0A5BC
+
+#define mmROT0_QM_ARB_MST_CRED_INC 0x4E0A5E0
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_0 0x4E0A5E4
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_1 0x4E0A5E8
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_2 0x4E0A5EC
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_3 0x4E0A5F0
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_4 0x4E0A5F4
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_5 0x4E0A5F8
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_6 0x4E0A5FC
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_7 0x4E0A600
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_8 0x4E0A604
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_9 0x4E0A608
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_10 0x4E0A60C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_11 0x4E0A610
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_12 0x4E0A614
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_13 0x4E0A618
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_14 0x4E0A61C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_15 0x4E0A620
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_16 0x4E0A624
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_17 0x4E0A628
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_18 0x4E0A62C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_19 0x4E0A630
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_20 0x4E0A634
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_21 0x4E0A638
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_22 0x4E0A63C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_23 0x4E0A640
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_24 0x4E0A644
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_25 0x4E0A648
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_26 0x4E0A64C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_27 0x4E0A650
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_28 0x4E0A654
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_29 0x4E0A658
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_30 0x4E0A65C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_31 0x4E0A660
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_32 0x4E0A664
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_33 0x4E0A668
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_34 0x4E0A66C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_35 0x4E0A670
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_36 0x4E0A674
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_37 0x4E0A678
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_38 0x4E0A67C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_39 0x4E0A680
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_40 0x4E0A684
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_41 0x4E0A688
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_42 0x4E0A68C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_43 0x4E0A690
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_44 0x4E0A694
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_45 0x4E0A698
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_46 0x4E0A69C
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_47 0x4E0A6A0
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_48 0x4E0A6A4
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_49 0x4E0A6A8
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_50 0x4E0A6AC
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_51 0x4E0A6B0
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_52 0x4E0A6B4
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_53 0x4E0A6B8
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_54 0x4E0A6BC
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_55 0x4E0A6C0
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_56 0x4E0A6C4
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_57 0x4E0A6C8
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_58 0x4E0A6CC
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_59 0x4E0A6D0
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_60 0x4E0A6D4
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_61 0x4E0A6D8
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_62 0x4E0A6DC
+
+#define mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_63 0x4E0A6E0
+
+#define mmROT0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x4E0A704
+
+#define mmROT0_QM_ARB_MST_SLAVE_EN 0x4E0A708
+
+#define mmROT0_QM_ARB_MST_SLAVE_EN_1 0x4E0A70C
+
+#define mmROT0_QM_ARB_SLV_CHOICE_WDT 0x4E0A710
+
+#define mmROT0_QM_ARB_SLV_ID 0x4E0A714
+
+#define mmROT0_QM_ARB_MST_QUIET_PER 0x4E0A718
+
+#define mmROT0_QM_ARB_MSG_MAX_INFLIGHT 0x4E0A744
+
+#define mmROT0_QM_ARB_BASE_LO 0x4E0A754
+
+#define mmROT0_QM_ARB_BASE_HI 0x4E0A758
+
+#define mmROT0_QM_ARB_STATE_STS 0x4E0A780
+
+#define mmROT0_QM_ARB_CHOICE_FULLNESS_STS 0x4E0A784
+
+#define mmROT0_QM_ARB_MSG_STS 0x4E0A788
+
+#define mmROT0_QM_ARB_SLV_CHOICE_Q_HEAD 0x4E0A78C
+
+#define mmROT0_QM_ARB_ERR_CAUSE 0x4E0A79C
+
+#define mmROT0_QM_ARB_ERR_MSG_EN 0x4E0A7A0
+
+#define mmROT0_QM_ARB_ERR_STS_DRP 0x4E0A7A8
+
+#define mmROT0_QM_ARB_MST_CRED_STS 0x4E0A7B0
+
+#define mmROT0_QM_ARB_MST_CRED_STS_1 0x4E0A7B4
+
+#define mmROT0_QM_CSMR_STRICT_PRIO_CFG 0x4E0A7FC
+
+#define mmROT0_QM_ARC_CQ_CFG0 0x4E0A800
+
+#define mmROT0_QM_ARC_CQ_CFG1 0x4E0A804
+
+#define mmROT0_QM_ARC_CQ_PTR_LO 0x4E0A808
+
+#define mmROT0_QM_ARC_CQ_PTR_HI 0x4E0A80C
+
+#define mmROT0_QM_ARC_CQ_TSIZE 0x4E0A810
+
+#define mmROT0_QM_ARC_CQ_CTL 0x4E0A814
+
+#define mmROT0_QM_ARC_CQ_IFIFO_STS 0x4E0A81C
+
+#define mmROT0_QM_ARC_CQ_STS0 0x4E0A820
+
+#define mmROT0_QM_ARC_CQ_STS1 0x4E0A824
+
+#define mmROT0_QM_ARC_CQ_TSIZE_STS 0x4E0A828
+
+#define mmROT0_QM_ARC_CQ_PTR_LO_STS 0x4E0A82C
+
+#define mmROT0_QM_ARC_CQ_PTR_HI_STS 0x4E0A830
+
+#define mmROT0_QM_CP_WR_ARC_ADDR_HI 0x4E0A834
+
+#define mmROT0_QM_CP_WR_ARC_ADDR_LO 0x4E0A838
+
+#define mmROT0_QM_ARC_CQ_IFIFO_MSG_BASE_HI 0x4E0A83C
+
+#define mmROT0_QM_ARC_CQ_IFIFO_MSG_BASE_LO 0x4E0A840
+
+#define mmROT0_QM_ARC_CQ_CTL_MSG_BASE_HI 0x4E0A844
+
+#define mmROT0_QM_ARC_CQ_CTL_MSG_BASE_LO 0x4E0A848
+
+#define mmROT0_QM_CQ_IFIFO_MSG_BASE_HI 0x4E0A84C
+
+#define mmROT0_QM_CQ_IFIFO_MSG_BASE_LO 0x4E0A850
+
+#define mmROT0_QM_CQ_CTL_MSG_BASE_HI 0x4E0A854
+
+#define mmROT0_QM_CQ_CTL_MSG_BASE_LO 0x4E0A858
+
+#define mmROT0_QM_ADDR_OVRD 0x4E0A85C
+
+#define mmROT0_QM_CQ_IFIFO_CI_0 0x4E0A860
+
+#define mmROT0_QM_CQ_IFIFO_CI_1 0x4E0A864
+
+#define mmROT0_QM_CQ_IFIFO_CI_2 0x4E0A868
+
+#define mmROT0_QM_CQ_IFIFO_CI_3 0x4E0A86C
+
+#define mmROT0_QM_CQ_IFIFO_CI_4 0x4E0A870
+
+#define mmROT0_QM_ARC_CQ_IFIFO_CI 0x4E0A874
+
+#define mmROT0_QM_CQ_CTL_CI_0 0x4E0A878
+
+#define mmROT0_QM_CQ_CTL_CI_1 0x4E0A87C
+
+#define mmROT0_QM_CQ_CTL_CI_2 0x4E0A880
+
+#define mmROT0_QM_CQ_CTL_CI_3 0x4E0A884
+
+#define mmROT0_QM_CQ_CTL_CI_4 0x4E0A888
+
+#define mmROT0_QM_ARC_CQ_CTL_CI 0x4E0A88C
+
+#define mmROT0_QM_CP_CFG 0x4E0A890
+
+#define mmROT0_QM_CP_EXT_SWITCH 0x4E0A894
+
+#define mmROT0_QM_CP_SWITCH_WD_SET 0x4E0A898
+
+#define mmROT0_QM_CP_SWITCH_WD 0x4E0A89C
+
+#define mmROT0_QM_ARC_LB_ADDR_BASE_LO 0x4E0A8A4
+
+#define mmROT0_QM_ARC_LB_ADDR_BASE_HI 0x4E0A8A8
+
+#define mmROT0_QM_ENGINE_BASE_ADDR_HI 0x4E0A8AC
+
+#define mmROT0_QM_ENGINE_BASE_ADDR_LO 0x4E0A8B0
+
+#define mmROT0_QM_ENGINE_ADDR_RANGE_SIZE 0x4E0A8B4
+
+#define mmROT0_QM_QM_ARC_AUX_BASE_ADDR_HI 0x4E0A8B8
+
+#define mmROT0_QM_QM_ARC_AUX_BASE_ADDR_LO 0x4E0A8BC
+
+#define mmROT0_QM_QM_BASE_ADDR_HI 0x4E0A8C0
+
+#define mmROT0_QM_QM_BASE_ADDR_LO 0x4E0A8C4
+
+#define mmROT0_QM_ARC_PQC_SECURE_PUSH_IND 0x4E0A8C8
+
+#define mmROT0_QM_PQC_STS_0_0 0x4E0A8D0
+
+#define mmROT0_QM_PQC_STS_0_1 0x4E0A8D4
+
+#define mmROT0_QM_PQC_STS_0_2 0x4E0A8D8
+
+#define mmROT0_QM_PQC_STS_0_3 0x4E0A8DC
+
+#define mmROT0_QM_PQC_STS_1_0 0x4E0A8E0
+
+#define mmROT0_QM_PQC_STS_1_1 0x4E0A8E4
+
+#define mmROT0_QM_PQC_STS_1_2 0x4E0A8E8
+
+#define mmROT0_QM_PQC_STS_1_3 0x4E0A8EC
+
+#define mmROT0_QM_SEI_STATUS 0x4E0A8F0
+
+#define mmROT0_QM_SEI_MASK 0x4E0A8F4
+
+#define mmROT0_QM_GLBL_ERR_ADDR_LO 0x4E0AD00
+
+#define mmROT0_QM_GLBL_ERR_ADDR_HI 0x4E0AD04
+
+#define mmROT0_QM_GLBL_ERR_WDATA 0x4E0AD08
+
+#define mmROT0_QM_L2H_MASK_LO 0x4E0AD14
+
+#define mmROT0_QM_L2H_MASK_HI 0x4E0AD18
+
+#define mmROT0_QM_L2H_CMPR_LO 0x4E0AD1C
+
+#define mmROT0_QM_L2H_CMPR_HI 0x4E0AD20
+
+#define mmROT0_QM_LOCAL_RANGE_BASE 0x4E0AD24
+
+#define mmROT0_QM_LOCAL_RANGE_SIZE 0x4E0AD28
+
+#define mmROT0_QM_HBW_RD_RATE_LIM_CFG_1 0x4E0AD30
+
+#define mmROT0_QM_LBW_WR_RATE_LIM_CFG_0 0x4E0AD34
+
+#define mmROT0_QM_LBW_WR_RATE_LIM_CFG_1 0x4E0AD38
+
+#define mmROT0_QM_HBW_RD_RATE_LIM_CFG_0 0x4E0AD3C
+
+#define mmROT0_QM_IND_GW_APB_CFG 0x4E0AD40
+
+#define mmROT0_QM_IND_GW_APB_WDATA 0x4E0AD44
+
+#define mmROT0_QM_IND_GW_APB_RDATA 0x4E0AD48
+
+#define mmROT0_QM_IND_GW_APB_STATUS 0x4E0AD4C
+
+#define mmROT0_QM_PERF_CNT_FREE_LO 0x4E0AD60
+
+#define mmROT0_QM_PERF_CNT_FREE_HI 0x4E0AD64
+
+#define mmROT0_QM_PERF_CNT_IDLE_LO 0x4E0AD68
+
+#define mmROT0_QM_PERF_CNT_IDLE_HI 0x4E0AD6C
+
+#define mmROT0_QM_PERF_CNT_CFG 0x4E0AD70
+
+#endif /* ASIC_REG_ROT0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h
new file mode 100644
index 000000000000..7d85dc5559da
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_ROT0_REGS_H_
+#define ASIC_REG_ROT0_REGS_H_
+
+/*
+ *****************************************
+ * ROT0
+ * (Prototype: ROTATOR)
+ *****************************************
+ */
+
+#define mmROT0_KMD_MODE 0x4E0B000
+
+#define mmROT0_CPL_QUEUE_EN 0x4E0B004
+
+#define mmROT0_CPL_QUEUE_ADDR_L 0x4E0B008
+
+#define mmROT0_CPL_QUEUE_ADDR_H 0x4E0B00C
+
+#define mmROT0_CPL_QUEUE_DATA 0x4E0B010
+
+#define mmROT0_CPL_QUEUE_AWUSER 0x4E0B014
+
+#define mmROT0_CPL_QUEUE_AXI 0x4E0B018
+
+#define mmROT0_CPL_MSG_THRESHOLD 0x4E0B020
+
+#define mmROT0_CPL_MSG_AXI 0x4E0B024
+
+#define mmROT0_AXI_WB 0x4E0B028
+
+#define mmROT0_ERR_CFG 0x4E0B02C
+
+#define mmROT0_ERR_STATUS 0x4E0B030
+
+#define mmROT0_WBC_MAX_OUTSTANDING 0x4E0B038
+
+#define mmROT0_WBC_RL 0x4E0B03C
+
+#define mmROT0_WBC_INFLIGHTS 0x4E0B040
+
+#define mmROT0_WBC_INFO 0x4E0B044
+
+#define mmROT0_WBC_MON 0x4E0B048
+
+#define mmROT0_RSB_CAM_MAX_SIZE 0x4E0B04C
+
+#define mmROT0_RSB_CFG 0x4E0B050
+
+#define mmROT0_RSB_MAX_OS 0x4E0B054
+
+#define mmROT0_RSB_RL 0x4E0B058
+
+#define mmROT0_RSB_INFLIGHTS 0x4E0B05C
+
+#define mmROT0_RSB_OCCUPANCY 0x4E0B060
+
+#define mmROT0_RSB_INFO 0x4E0B064
+
+#define mmROT0_RSB_MON 0x4E0B068
+
+#define mmROT0_RSB_MON_CONTEXT_ID 0x4E0B06C
+
+#define mmROT0_MSS_HALT 0x4E0B070
+
+#define mmROT0_MSS_SEI_STATUS 0x4E0B074
+
+#define mmROT0_MSS_SEI_MASK 0x4E0B078
+
+#define mmROT0_MSS_SPI_STATUS 0x4E0B07C
+
+#define mmROT0_MSS_SPI_MASK 0x4E0B080
+
+#define mmROT0_DISABLE_PAD_CALC 0x4E0B084
+
+#define mmROT0_QMAN_CFG 0x4E0B088
+
+#define mmROT0_CLK_EN 0x4E0B08C
+
+#define mmROT0_MRSB_CAM_MAX_SIZE 0x4E0B090
+
+#define mmROT0_MRSB_CFG 0x4E0B094
+
+#define mmROT0_MRSB_MAX_OS 0x4E0B098
+
+#define mmROT0_MRSB_RL 0x4E0B09C
+
+#define mmROT0_MRSB_INFLIGHTS 0x4E0B0A0
+
+#define mmROT0_MRSB_OCCUPANCY 0x4E0B0A4
+
+#define mmROT0_MRSB_INFO 0x4E0B0A8
+
+#define mmROT0_MRSB_MON 0x4E0B0AC
+
+#define mmROT0_MRSB_MON_CONTEXT_ID 0x4E0B0B0
+
+#define mmROT0_MSS_STS 0x4E0B0B4
+
+#endif /* ASIC_REG_ROT0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h
new file mode 100644
index 000000000000..e8aebd7f5f85
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_XBAR_EDGE_0_REGS_H_
+#define ASIC_REG_XBAR_EDGE_0_REGS_H_
+
+/*
+ *****************************************
+ * XBAR_EDGE_0
+ * (Prototype: XBAR)
+ *****************************************
+ */
+
+#define mmXBAR_EDGE_0_LBW_HIF0_BASE_ADDR 0x4D48000
+
+#define mmXBAR_EDGE_0_LBW_HIF0_ADDR_MASK 0x4D48004
+
+#define mmXBAR_EDGE_0_LBW_HIF1_BASE_ADDR 0x4D48008
+
+#define mmXBAR_EDGE_0_LBW_HIF1_ADDR_MASK 0x4D4800C
+
+#define mmXBAR_EDGE_0_LBW_HMMU0_BASE_ADDR 0x4D48010
+
+#define mmXBAR_EDGE_0_LBW_HMMU0_ADDR_MASK 0x4D48014
+
+#define mmXBAR_EDGE_0_LBW_HMMU1_BASE_ADDR 0x4D48018
+
+#define mmXBAR_EDGE_0_LBW_HMMU1_ADDR_MASK 0x4D4801C
+
+#define mmXBAR_EDGE_0_LBW_EDMA_BASE_ADDR0 0x4D48020
+
+#define mmXBAR_EDGE_0_LBW_EDMA_ADDR_MASK0 0x4D48024
+
+#define mmXBAR_EDGE_0_LBW_EDMA_BASE_ADDR1 0x4D48028
+
+#define mmXBAR_EDGE_0_LBW_EDMA_ADDR_MASK1 0x4D4802C
+
+#define mmXBAR_EDGE_0_LBW_HBM_BASE_ADDR0 0x4D48030
+
+#define mmXBAR_EDGE_0_LBW_HBM_ADDR_MASK0 0x4D48034
+
+#define mmXBAR_EDGE_0_LBW_HBM_BASE_ADDR1 0x4D48038
+
+#define mmXBAR_EDGE_0_LBW_HBM_ADDR_MASK1 0x4D4803C
+
+#define mmXBAR_EDGE_0_LBW_XBAR_BASE_ADDR0 0x4D48040
+
+#define mmXBAR_EDGE_0_LBW_XBAR_ADDR_MASK0 0x4D48044
+
+#define mmXBAR_EDGE_0_LBW_XBAR_BASE_ADDR1 0x4D48048
+
+#define mmXBAR_EDGE_0_LBW_XBAR_ADDR_MASK1 0x4D4804C
+
+#define mmXBAR_EDGE_0_DBG_HIF0_BASE_ADDR 0x4D48080
+
+#define mmXBAR_EDGE_0_DBG_HIF0_ADDR_MASK 0x4D48084
+
+#define mmXBAR_EDGE_0_DBG_HIF1_BASE_ADDR 0x4D48088
+
+#define mmXBAR_EDGE_0_DBG_HIF1_ADDR_MASK 0x4D4808C
+
+#define mmXBAR_EDGE_0_DBG_HMMU0_BASE_ADDR 0x4D48090
+
+#define mmXBAR_EDGE_0_DBG_HMMU0_ADDR_MASK 0x4D48094
+
+#define mmXBAR_EDGE_0_DBG_HMMU1_BASE_ADDR 0x4D48098
+
+#define mmXBAR_EDGE_0_DBG_HMMU1_ADDR_MASK 0x4D4809C
+
+#define mmXBAR_EDGE_0_DBG_EDMA_BASE_ADDR0 0x4D480A0
+
+#define mmXBAR_EDGE_0_DBG_EDMA_ADDR_MASK0 0x4D480A4
+
+#define mmXBAR_EDGE_0_DBG_EDMA_BASE_ADDR1 0x4D480A8
+
+#define mmXBAR_EDGE_0_DBG_EDMA_ADDR_MASK1 0x4D480AC
+
+#define mmXBAR_EDGE_0_DBG_HBM_BASE_ADDR0 0x4D480B0
+
+#define mmXBAR_EDGE_0_DBG_HBM_ADDR_MASK0 0x4D480B4
+
+#define mmXBAR_EDGE_0_DBG_HBM_BASE_ADDR1 0x4D480B8
+
+#define mmXBAR_EDGE_0_DBG_HBM_ADDR_MASK1 0x4D480BC
+
+#define mmXBAR_EDGE_0_DBG_XBAR_BASE_ADDR0 0x4D480C0
+
+#define mmXBAR_EDGE_0_DBG_XBAR_ADDR_MASK0 0x4D480C4
+
+#define mmXBAR_EDGE_0_DBG_XBAR_BASE_ADDR1 0x4D480C8
+
+#define mmXBAR_EDGE_0_DBG_XBAR_ADDR_MASK1 0x4D480CC
+
+#define mmXBAR_EDGE_0_LBW_INTERNAL_ADDR_RGF 0x4D480D0
+
+#define mmXBAR_EDGE_0_DBG_INTERNAL_ADDR_FUN 0x4D480D4
+
+#define mmXBAR_EDGE_0_EMEM_HBM_BIT_LOCATION 0x4D48100
+
+#define mmXBAR_EDGE_0_EMEM_PC_BIT_LOCATION 0x4D48104
+
+#define mmXBAR_EDGE_0_HIF_WR_RS_CH_LOCATION 0x4D48108
+
+#define mmXBAR_EDGE_0_HBW_MST_ARB_WEIGHT 0x4D4810C
+
+#define mmXBAR_EDGE_0_MMU_PC_IDX_MAP_0 0x4D48110
+
+#define mmXBAR_EDGE_0_MMU_PC_IDX_MAP_1 0x4D48114
+
+#define mmXBAR_EDGE_0_MMU_RD_LL_ARB_0 0x4D48120
+
+#define mmXBAR_EDGE_0_MMU_RD_LL_ARB_1 0x4D48124
+
+#define mmXBAR_EDGE_0_MMU_WR_LL_ARB_0 0x4D48128
+
+#define mmXBAR_EDGE_0_MMU_WR_LL_ARB_1 0x4D4812C
+
+#define mmXBAR_EDGE_0_HBM_USER_RESP_OVR_0 0x4D48130
+
+#define mmXBAR_EDGE_0_HBM_USER_RESP_OVR_1 0x4D48134
+
+#define mmXBAR_EDGE_0_RL_RD_0 0x4D48140
+
+#define mmXBAR_EDGE_0_RL_RD_1 0x4D48144
+
+#define mmXBAR_EDGE_0_RL_RD_2 0x4D48148
+
+#define mmXBAR_EDGE_0_RL_RD_3 0x4D4814C
+
+#define mmXBAR_EDGE_0_RL_RD_4 0x4D48150
+
+#define mmXBAR_EDGE_0_RL_RD_5 0x4D48154
+
+#define mmXBAR_EDGE_0_RL_RD_6 0x4D48158
+
+#define mmXBAR_EDGE_0_RL_RD_7 0x4D4815C
+
+#define mmXBAR_EDGE_0_RL_RD_8 0x4D48160
+
+#define mmXBAR_EDGE_0_RL_RD_9 0x4D48164
+
+#define mmXBAR_EDGE_0_RL_RD_10 0x4D48168
+
+#define mmXBAR_EDGE_0_RL_RD_11 0x4D4816C
+
+#define mmXBAR_EDGE_0_RL_WR_0 0x4D48180
+
+#define mmXBAR_EDGE_0_RL_WR_1 0x4D48184
+
+#define mmXBAR_EDGE_0_RL_WR_2 0x4D48188
+
+#define mmXBAR_EDGE_0_RL_WR_3 0x4D4818C
+
+#define mmXBAR_EDGE_0_RL_WR_4 0x4D48190
+
+#define mmXBAR_EDGE_0_RL_WR_5 0x4D48194
+
+#define mmXBAR_EDGE_0_RL_WR_6 0x4D48198
+
+#define mmXBAR_EDGE_0_RL_WR_7 0x4D4819C
+
+#define mmXBAR_EDGE_0_RL_WR_8 0x4D481A0
+
+#define mmXBAR_EDGE_0_RL_WR_9 0x4D481A4
+
+#define mmXBAR_EDGE_0_RL_WR_10 0x4D481A8
+
+#define mmXBAR_EDGE_0_RL_WR_11 0x4D481AC
+
+#define mmXBAR_EDGE_0_E2E_CRDT_SLV_0 0x4D481B0
+
+#define mmXBAR_EDGE_0_E2E_CRDT_SLV_1 0x4D481B4
+
+#define mmXBAR_EDGE_0_E2E_CRDT_SLV_2 0x4D481B8
+
+#define mmXBAR_EDGE_0_E2E_CRDT_DEBUG 0x4D481BC
+
+#define mmXBAR_EDGE_0_UPSCALE 0x4D481C0
+
+#define mmXBAR_EDGE_0_DOWN_CONV 0x4D481C4
+
+#define mmXBAR_EDGE_0_DOWN_CONV_LFSR_EN 0x4D481D0
+
+#define mmXBAR_EDGE_0_DOWN_CONV_LFSR_SET_VLD 0x4D481D4
+
+#define mmXBAR_EDGE_0_DOWN_CONV_LFSR_SET_VALUE 0x4D481D8
+
+#define mmXBAR_EDGE_0_DOWN_CONV_LFSR_CFG_POLY 0x4D481DC
+
+#endif /* ASIC_REG_XBAR_EDGE_0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h
new file mode 100644
index 000000000000..3d39d1a94851
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_XBAR_MID_0_REGS_H_
+#define ASIC_REG_XBAR_MID_0_REGS_H_
+
+/*
+ *****************************************
+ * XBAR_MID_0
+ * (Prototype: XBAR)
+ *****************************************
+ */
+
+#define mmXBAR_MID_0_LBW_HIF0_BASE_ADDR 0x4D40000
+
+#define mmXBAR_MID_0_LBW_HIF0_ADDR_MASK 0x4D40004
+
+#define mmXBAR_MID_0_LBW_HIF1_BASE_ADDR 0x4D40008
+
+#define mmXBAR_MID_0_LBW_HIF1_ADDR_MASK 0x4D4000C
+
+#define mmXBAR_MID_0_LBW_HMMU0_BASE_ADDR 0x4D40010
+
+#define mmXBAR_MID_0_LBW_HMMU0_ADDR_MASK 0x4D40014
+
+#define mmXBAR_MID_0_LBW_HMMU1_BASE_ADDR 0x4D40018
+
+#define mmXBAR_MID_0_LBW_HMMU1_ADDR_MASK 0x4D4001C
+
+#define mmXBAR_MID_0_LBW_EDMA_BASE_ADDR0 0x4D40020
+
+#define mmXBAR_MID_0_LBW_EDMA_ADDR_MASK0 0x4D40024
+
+#define mmXBAR_MID_0_LBW_EDMA_BASE_ADDR1 0x4D40028
+
+#define mmXBAR_MID_0_LBW_EDMA_ADDR_MASK1 0x4D4002C
+
+#define mmXBAR_MID_0_LBW_HBM_BASE_ADDR0 0x4D40030
+
+#define mmXBAR_MID_0_LBW_HBM_ADDR_MASK0 0x4D40034
+
+#define mmXBAR_MID_0_LBW_HBM_BASE_ADDR1 0x4D40038
+
+#define mmXBAR_MID_0_LBW_HBM_ADDR_MASK1 0x4D4003C
+
+#define mmXBAR_MID_0_LBW_XBAR_BASE_ADDR0 0x4D40040
+
+#define mmXBAR_MID_0_LBW_XBAR_ADDR_MASK0 0x4D40044
+
+#define mmXBAR_MID_0_LBW_XBAR_BASE_ADDR1 0x4D40048
+
+#define mmXBAR_MID_0_LBW_XBAR_ADDR_MASK1 0x4D4004C
+
+#define mmXBAR_MID_0_DBG_HIF0_BASE_ADDR 0x4D40080
+
+#define mmXBAR_MID_0_DBG_HIF0_ADDR_MASK 0x4D40084
+
+#define mmXBAR_MID_0_DBG_HIF1_BASE_ADDR 0x4D40088
+
+#define mmXBAR_MID_0_DBG_HIF1_ADDR_MASK 0x4D4008C
+
+#define mmXBAR_MID_0_DBG_HMMU0_BASE_ADDR 0x4D40090
+
+#define mmXBAR_MID_0_DBG_HMMU0_ADDR_MASK 0x4D40094
+
+#define mmXBAR_MID_0_DBG_HMMU1_BASE_ADDR 0x4D40098
+
+#define mmXBAR_MID_0_DBG_HMMU1_ADDR_MASK 0x4D4009C
+
+#define mmXBAR_MID_0_DBG_EDMA_BASE_ADDR0 0x4D400A0
+
+#define mmXBAR_MID_0_DBG_EDMA_ADDR_MASK0 0x4D400A4
+
+#define mmXBAR_MID_0_DBG_EDMA_BASE_ADDR1 0x4D400A8
+
+#define mmXBAR_MID_0_DBG_EDMA_ADDR_MASK1 0x4D400AC
+
+#define mmXBAR_MID_0_DBG_HBM_BASE_ADDR0 0x4D400B0
+
+#define mmXBAR_MID_0_DBG_HBM_ADDR_MASK0 0x4D400B4
+
+#define mmXBAR_MID_0_DBG_HBM_BASE_ADDR1 0x4D400B8
+
+#define mmXBAR_MID_0_DBG_HBM_ADDR_MASK1 0x4D400BC
+
+#define mmXBAR_MID_0_DBG_XBAR_BASE_ADDR0 0x4D400C0
+
+#define mmXBAR_MID_0_DBG_XBAR_ADDR_MASK0 0x4D400C4
+
+#define mmXBAR_MID_0_DBG_XBAR_BASE_ADDR1 0x4D400C8
+
+#define mmXBAR_MID_0_DBG_XBAR_ADDR_MASK1 0x4D400CC
+
+#define mmXBAR_MID_0_LBW_INTERNAL_ADDR_RGF 0x4D400D0
+
+#define mmXBAR_MID_0_DBG_INTERNAL_ADDR_FUN 0x4D400D4
+
+#define mmXBAR_MID_0_EMEM_HBM_BIT_LOCATION 0x4D40100
+
+#define mmXBAR_MID_0_EMEM_PC_BIT_LOCATION 0x4D40104
+
+#define mmXBAR_MID_0_HIF_WR_RS_CH_LOCATION 0x4D40108
+
+#define mmXBAR_MID_0_HBW_MST_ARB_WEIGHT 0x4D4010C
+
+#define mmXBAR_MID_0_MMU_PC_IDX_MAP_0 0x4D40110
+
+#define mmXBAR_MID_0_MMU_PC_IDX_MAP_1 0x4D40114
+
+#define mmXBAR_MID_0_MMU_RD_LL_ARB_0 0x4D40120
+
+#define mmXBAR_MID_0_MMU_RD_LL_ARB_1 0x4D40124
+
+#define mmXBAR_MID_0_MMU_WR_LL_ARB_0 0x4D40128
+
+#define mmXBAR_MID_0_MMU_WR_LL_ARB_1 0x4D4012C
+
+#define mmXBAR_MID_0_HBM_USER_RESP_OVR_0 0x4D40130
+
+#define mmXBAR_MID_0_HBM_USER_RESP_OVR_1 0x4D40134
+
+#define mmXBAR_MID_0_RL_RD_0 0x4D40140
+
+#define mmXBAR_MID_0_RL_RD_1 0x4D40144
+
+#define mmXBAR_MID_0_RL_RD_2 0x4D40148
+
+#define mmXBAR_MID_0_RL_RD_3 0x4D4014C
+
+#define mmXBAR_MID_0_RL_RD_4 0x4D40150
+
+#define mmXBAR_MID_0_RL_RD_5 0x4D40154
+
+#define mmXBAR_MID_0_RL_RD_6 0x4D40158
+
+#define mmXBAR_MID_0_RL_RD_7 0x4D4015C
+
+#define mmXBAR_MID_0_RL_RD_8 0x4D40160
+
+#define mmXBAR_MID_0_RL_RD_9 0x4D40164
+
+#define mmXBAR_MID_0_RL_RD_10 0x4D40168
+
+#define mmXBAR_MID_0_RL_RD_11 0x4D4016C
+
+#define mmXBAR_MID_0_RL_WR_0 0x4D40180
+
+#define mmXBAR_MID_0_RL_WR_1 0x4D40184
+
+#define mmXBAR_MID_0_RL_WR_2 0x4D40188
+
+#define mmXBAR_MID_0_RL_WR_3 0x4D4018C
+
+#define mmXBAR_MID_0_RL_WR_4 0x4D40190
+
+#define mmXBAR_MID_0_RL_WR_5 0x4D40194
+
+#define mmXBAR_MID_0_RL_WR_6 0x4D40198
+
+#define mmXBAR_MID_0_RL_WR_7 0x4D4019C
+
+#define mmXBAR_MID_0_RL_WR_8 0x4D401A0
+
+#define mmXBAR_MID_0_RL_WR_9 0x4D401A4
+
+#define mmXBAR_MID_0_RL_WR_10 0x4D401A8
+
+#define mmXBAR_MID_0_RL_WR_11 0x4D401AC
+
+#define mmXBAR_MID_0_E2E_CRDT_SLV_0 0x4D401B0
+
+#define mmXBAR_MID_0_E2E_CRDT_SLV_1 0x4D401B4
+
+#define mmXBAR_MID_0_E2E_CRDT_SLV_2 0x4D401B8
+
+#define mmXBAR_MID_0_E2E_CRDT_DEBUG 0x4D401BC
+
+#define mmXBAR_MID_0_UPSCALE 0x4D401C0
+
+#define mmXBAR_MID_0_DOWN_CONV 0x4D401C4
+
+#define mmXBAR_MID_0_DOWN_CONV_LFSR_EN 0x4D401D0
+
+#define mmXBAR_MID_0_DOWN_CONV_LFSR_SET_VLD 0x4D401D4
+
+#define mmXBAR_MID_0_DOWN_CONV_LFSR_SET_VALUE 0x4D401D8
+
+#define mmXBAR_MID_0_DOWN_CONV_LFSR_CFG_POLY 0x4D401DC
+
+#endif /* ASIC_REG_XBAR_MID_0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2.h
new file mode 100644
index 000000000000..5b4f9e108798
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI2_H
+#define GAUDI2_H
+
+#define SRAM_CFG_BAR_ID 0
+#define MSIX_BAR_ID 2
+#define DRAM_BAR_ID 4
+
+/* Refers to CFG_REGION_SIZE, BAR0_RSRVD_SIZE and SRAM_SIZE */
+#define CFG_BAR_SIZE 0x10000000ull /* 256MB */
+
+#define MSIX_BAR_SIZE 0x4000ull /* 16KB */
+
+#define CFG_BASE 0x1000007FF8000000ull
+#define CFG_SIZE 0x8000000ull /* 96MB CFG + 32MB DBG*/
+#define CFG_REGION_SIZE 0xC000000ull /* 192MB */
+
+#define STM_FLASH_BASE_ADDR 0x1000007FF4000000ull /* Not 256MB aligned */
+#define STM_FLASH_ALIGNED_OFF 0x4000000ull /* 256 MB alignment */
+#define STM_FLASH_SIZE 0x2000000ull /* 32MB */
+
+#define SPI_FLASH_BASE_ADDR 0x1000007FF6000000ull
+#define SPI_FLASH_SIZE 0x1000000ull /* 16MB */
+
+#define SCRATCHPAD_SRAM_ADDR 0x1000007FF7FE0000ull
+#define SCRATCHPAD_SRAM_SIZE 0x10000ull /* 64KB */
+
+#define PCIE_FW_SRAM_ADDR 0x1000007FF7FF0000ull
+#define PCIE_FW_SRAM_SIZE 0x8000 /* 32KB */
+
+#define BAR0_RSRVD_BASE_ADDR 0x1000FFFFFC000000ull
+#define BAR0_RSRVD_SIZE 0x1000000ull /* 16MB */
+
+#define SRAM_BASE_ADDR 0x1000FFFFFD000000ull
+#define SRAM_SIZE 0x3000000ull /* 48MB */
+
+#define DRAM_PHYS_BASE 0x1001000000000000ull
+
+/* every hint address is masked accordingly */
+#define DRAM_VA_HINT_MASK 0xFFFFFFFFFFFFull /* 48bit mask */
+
+#define HOST_PHYS_BASE_0 0x0000000000000000ull
+#define HOST_PHYS_SIZE_0 0x0100000000000000ull /* 64PB (56 bits) */
+
+#define HOST_PHYS_BASE_1 0xFF00000000000000ull
+#define HOST_PHYS_SIZE_1 0x0100000000000000ull /* 64PB (56 bits) */
+
+#define RESERVED_VA_RANGE_FOR_ARC_ON_HBM_START 0x1001500000000000ull
+#define RESERVED_VA_RANGE_FOR_ARC_ON_HBM_END 0x10016FFFFFFFFFFFull
+
+#define RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START 0xFFF077FFFFFF0000ull
+#define RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_END 0xFFF077FFFFFFFFFFull
+
+#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_START 0xFFF0780000000000ull
+#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_END 0xFFF07FFFFFFFFFFFull
+
+#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START 0xFFF0F80000000000ull
+#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END 0xFFF0FFFFFFFFFFFFull
+
+#define GAUDI2_MSIX_ENTRIES 512
+
+#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
+
+#define MAX_ASID 2
+
+#define NUM_ARC_CPUS 69
+
+/* Every ARC cpu in the system contains a single DCCM block
+ * except MME and Scheduler ARCs which contain 2 DCCM blocks
+ */
+#define ARC_DCCM_BLOCK_SIZE 0x8000
+
+#define NUM_OF_DCORES 4
+#define NUM_OF_SFT 4
+#define NUM_OF_PSOC_ARC 2
+#define NUM_OF_SCHEDULER_ARC 6
+
+#define NUM_OF_PQ_PER_QMAN 4
+#define NUM_OF_CQ_PER_QMAN 5
+#define NUM_OF_CP_PER_QMAN 5
+#define NUM_OF_EDMA_PER_DCORE 2
+#define NUM_OF_HIF_PER_DCORE 4
+#define NUM_OF_PDMA 2
+#define NUM_OF_TPC_PER_DCORE 6
+#define NUM_DCORE0_TPC 7
+#define NUM_DCORE1_TPC NUM_OF_TPC_PER_DCORE
+#define NUM_DCORE2_TPC NUM_OF_TPC_PER_DCORE
+#define NUM_DCORE3_TPC NUM_OF_TPC_PER_DCORE
+#define NUM_OF_DEC_PER_DCORE 2
+#define NUM_OF_ROT 2
+#define NUM_OF_HMMU_PER_DCORE 4
+#define NUM_OF_MME_PER_DCORE 1
+#define NUM_OF_MME_SBTE_PER_DCORE 5
+#define NUM_OF_MME_WB_PER_DCORE 2
+#define NUM_OF_RTR_PER_DCORE 8
+#define NUM_OF_VDEC_PER_DCORE 2
+#define NUM_OF_IF_RTR_PER_SFT 3
+#define NUM_OF_PCIE_VDEC 2
+#define NUM_OF_ARC_FARMS_ARC 4
+#define NUM_OF_XBAR 4
+
+#define TPC_NUM_OF_KERNEL_TENSORS 16
+#define TPC_NUM_OF_QM_TENSORS 16
+
+#define MME_NUM_OF_LFSR_SEEDS 256
+
+#define NIC_NUMBER_OF_MACROS 12
+
+#define NIC_NUMBER_OF_QM_PER_MACRO 2
+
+#define NIC_NUMBER_OF_ENGINES (NIC_NUMBER_OF_MACROS * 2)
+
+#define NIC_MAX_NUMBER_OF_PORTS (NIC_NUMBER_OF_ENGINES * 2)
+
+#define DEVICE_CACHE_LINE_SIZE 128
+
+#endif /* GAUDI2_H */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h
new file mode 100644
index 000000000000..34406770a76a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h
@@ -0,0 +1,963 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2021 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef __GAUDI2_ASYNC_EVENTS_H_
+#define __GAUDI2_ASYNC_EVENTS_H_
+
+enum gaudi2_async_event_id {
+ GAUDI2_EVENT_PCIE_CORE_SERR = 32,
+ GAUDI2_EVENT_PCIE_CORE_DERR = 33,
+ GAUDI2_EVENT_PCIE_IF_SERR = 34,
+ GAUDI2_EVENT_PCIE_IF_DERR = 35,
+ GAUDI2_EVENT_PCIE_PHY_SERR = 36,
+ GAUDI2_EVENT_PCIE_PHY_DERR = 37,
+ GAUDI2_EVENT_TPC0_ECC_SERR = 38,
+ GAUDI2_EVENT_TPC1_ECC_SERR = 39,
+ GAUDI2_EVENT_TPC2_ECC_SERR = 40,
+ GAUDI2_EVENT_TPC3_ECC_SERR = 41,
+ GAUDI2_EVENT_TPC4_ECC_SERR = 42,
+ GAUDI2_EVENT_TPC5_ECC_SERR = 43,
+ GAUDI2_EVENT_TPC6_ECC_SERR = 44,
+ GAUDI2_EVENT_TPC7_ECC_SERR = 45,
+ GAUDI2_EVENT_TPC8_ECC_SERR = 46,
+ GAUDI2_EVENT_TPC9_ECC_SERR = 47,
+ GAUDI2_EVENT_TPC10_ECC_SERR = 48,
+ GAUDI2_EVENT_TPC11_ECC_SERR = 49,
+ GAUDI2_EVENT_TPC12_ECC_SERR = 50,
+ GAUDI2_EVENT_TPC13_ECC_SERR = 51,
+ GAUDI2_EVENT_TPC14_ECC_SERR = 52,
+ GAUDI2_EVENT_TPC15_ECC_SERR = 53,
+ GAUDI2_EVENT_TPC16_ECC_SERR = 54,
+ GAUDI2_EVENT_TPC17_ECC_SERR = 55,
+ GAUDI2_EVENT_TPC18_ECC_SERR = 56,
+ GAUDI2_EVENT_TPC19_ECC_SERR = 57,
+ GAUDI2_EVENT_TPC20_ECC_SERR = 58,
+ GAUDI2_EVENT_TPC21_ECC_SERR = 59,
+ GAUDI2_EVENT_TPC22_ECC_SERR = 60,
+ GAUDI2_EVENT_TPC23_ECC_SERR = 61,
+ GAUDI2_EVENT_TPC24_ECC_SERR = 62,
+ GAUDI2_EVENT_TPC0_ECC_DERR = 63,
+ GAUDI2_EVENT_TPC1_ECC_DERR = 64,
+ GAUDI2_EVENT_TPC2_ECC_DERR = 65,
+ GAUDI2_EVENT_TPC3_ECC_DERR = 66,
+ GAUDI2_EVENT_TPC4_ECC_DERR = 67,
+ GAUDI2_EVENT_TPC5_ECC_DERR = 68,
+ GAUDI2_EVENT_TPC6_ECC_DERR = 69,
+ GAUDI2_EVENT_TPC7_ECC_DERR = 70,
+ GAUDI2_EVENT_TPC8_ECC_DERR = 71,
+ GAUDI2_EVENT_TPC9_ECC_DERR = 72,
+ GAUDI2_EVENT_TPC10_ECC_DERR = 73,
+ GAUDI2_EVENT_TPC11_ECC_DERR = 74,
+ GAUDI2_EVENT_TPC12_ECC_DERR = 75,
+ GAUDI2_EVENT_TPC13_ECC_DERR = 76,
+ GAUDI2_EVENT_TPC14_ECC_DERR = 77,
+ GAUDI2_EVENT_TPC15_ECC_DERR = 78,
+ GAUDI2_EVENT_TPC16_ECC_DERR = 79,
+ GAUDI2_EVENT_TPC17_ECC_DERR = 80,
+ GAUDI2_EVENT_TPC18_ECC_DERR = 81,
+ GAUDI2_EVENT_TPC19_ECC_DERR = 82,
+ GAUDI2_EVENT_TPC20_ECC_DERR = 83,
+ GAUDI2_EVENT_TPC21_ECC_DERR = 84,
+ GAUDI2_EVENT_TPC22_ECC_DERR = 85,
+ GAUDI2_EVENT_TPC23_ECC_DERR = 86,
+ GAUDI2_EVENT_TPC24_ECC_DERR = 87,
+ GAUDI2_EVENT_MME0_SBTE0_ECC_SERR = 88,
+ GAUDI2_EVENT_MME0_SBTE1_ECC_SERR = 89,
+ GAUDI2_EVENT_MME0_SBTE2_ECC_SERR = 90,
+ GAUDI2_EVENT_MME0_SBTE3_ECC_SERR = 91,
+ GAUDI2_EVENT_MME0_SBTE4_ECC_SERR = 92,
+ GAUDI2_EVENT_MME0_CTRL_ECC_SERR = 93,
+ GAUDI2_EVENT_MME0_WAP_ECC_SERR = 94,
+ GAUDI2_EVENT_MME1_SBTE0_ECC_SERR = 95,
+ GAUDI2_EVENT_MME1_SBTE1_ECC_SERR = 96,
+ GAUDI2_EVENT_MME1_SBTE2_ECC_SERR = 97,
+ GAUDI2_EVENT_MME1_SBTE3_ECC_SERR = 98,
+ GAUDI2_EVENT_MME1_SBTE4_ECC_SERR = 99,
+ GAUDI2_EVENT_MME1_CTRL_ECC_SERR = 100,
+ GAUDI2_EVENT_MME1_WAP_ECC_SERR = 101,
+ GAUDI2_EVENT_MME2_SBTE0_ECC_SERR = 102,
+ GAUDI2_EVENT_MME2_SBTE1_ECC_SERR = 103,
+ GAUDI2_EVENT_MME2_SBTE2_ECC_SERR = 104,
+ GAUDI2_EVENT_MME2_SBTE3_ECC_SERR = 105,
+ GAUDI2_EVENT_MME2_SBTE4_ECC_SERR = 106,
+ GAUDI2_EVENT_MME2_CTRL_ECC_SERR = 107,
+ GAUDI2_EVENT_MME2_WAP_ECC_SERR = 108,
+ GAUDI2_EVENT_MME3_SBTE0_ECC_SERR = 109,
+ GAUDI2_EVENT_MME3_SBTE1_ECC_SERR = 110,
+ GAUDI2_EVENT_MME3_SBTE2_ECC_SERR = 111,
+ GAUDI2_EVENT_MME3_SBTE3_ECC_SERR = 112,
+ GAUDI2_EVENT_MME3_SBTE4_ECC_SERR = 113,
+ GAUDI2_EVENT_MME3_CTRL_ECC_SERR = 114,
+ GAUDI2_EVENT_MME3_WAP_ECC_SERR = 115,
+ GAUDI2_EVENT_MME0_SBTE0_ECC_DERR = 116,
+ GAUDI2_EVENT_MME0_SBTE1_ECC_DERR = 117,
+ GAUDI2_EVENT_MME0_SBTE2_ECC_DERR = 118,
+ GAUDI2_EVENT_MME0_SBTE3_ECC_DERR = 119,
+ GAUDI2_EVENT_MME0_SBTE4_ECC_DERR = 120,
+ GAUDI2_EVENT_MME0_CTRL_ECC_DERR = 121,
+ GAUDI2_EVENT_MME0_WAP_ECC_DERR = 122,
+ GAUDI2_EVENT_MME1_SBTE0_ECC_DERR = 123,
+ GAUDI2_EVENT_MME1_SBTE1_ECC_DERR = 124,
+ GAUDI2_EVENT_MME1_SBTE2_ECC_DERR = 125,
+ GAUDI2_EVENT_MME1_SBTE3_ECC_DERR = 126,
+ GAUDI2_EVENT_MME1_SBTE4_ECC_DERR = 127,
+ GAUDI2_EVENT_MME1_CTRL_ECC_DERR = 128,
+ GAUDI2_EVENT_MME1_WAP_ECC_DERR = 129,
+ GAUDI2_EVENT_MME2_SBTE0_ECC_DERR = 130,
+ GAUDI2_EVENT_MME2_SBTE1_ECC_DERR = 131,
+ GAUDI2_EVENT_MME2_SBTE2_ECC_DERR = 132,
+ GAUDI2_EVENT_MME2_SBTE3_ECC_DERR = 133,
+ GAUDI2_EVENT_MME2_SBTE4_ECC_DERR = 134,
+ GAUDI2_EVENT_MME2_CTRL_ECC_DERR = 135,
+ GAUDI2_EVENT_MME2_WAP_ECC_DERR = 136,
+ GAUDI2_EVENT_MME3_SBTE0_ECC_DERR = 137,
+ GAUDI2_EVENT_MME3_SBTE1_ECC_DERR = 138,
+ GAUDI2_EVENT_MME3_SBTE2_ECC_DERR = 139,
+ GAUDI2_EVENT_MME3_SBTE3_ECC_DERR = 140,
+ GAUDI2_EVENT_MME3_SBTE4_ECC_DERR = 141,
+ GAUDI2_EVENT_MME3_CTRL_ECC_DERR = 142,
+ GAUDI2_EVENT_MME3_WAP_ECC_DERR = 143,
+ GAUDI2_EVENT_HDMA2_ECC_SERR = 144,
+ GAUDI2_EVENT_HDMA3_ECC_SERR = 145,
+ GAUDI2_EVENT_HDMA0_ECC_SERR = 146,
+ GAUDI2_EVENT_HDMA1_ECC_SERR = 147,
+ GAUDI2_EVENT_HDMA6_ECC_SERR = 148,
+ GAUDI2_EVENT_HDMA7_ECC_SERR = 149,
+ GAUDI2_EVENT_HDMA4_ECC_SERR = 150,
+ GAUDI2_EVENT_HDMA5_ECC_SERR = 151,
+ GAUDI2_EVENT_HDMA2_ECC_DERR = 152,
+ GAUDI2_EVENT_HDMA3_ECC_DERR = 153,
+ GAUDI2_EVENT_HDMA0_ECC_DERR = 154,
+ GAUDI2_EVENT_HDMA1_ECC_DERR = 155,
+ GAUDI2_EVENT_HDMA6_ECC_DERR = 156,
+ GAUDI2_EVENT_HDMA7_ECC_DERR = 157,
+ GAUDI2_EVENT_HDMA4_ECC_DERR = 158,
+ GAUDI2_EVENT_HDMA5_ECC_DERR = 159,
+ GAUDI2_EVENT_KDMA0_ECC_SERR = 160,
+ GAUDI2_EVENT_PDMA0_ECC_SERR = 161,
+ GAUDI2_EVENT_PDMA1_ECC_SERR = 162,
+ GAUDI2_EVENT_KDMA0_ECC_DERR = 163,
+ GAUDI2_EVENT_PDMA0_ECC_DERR = 164,
+ GAUDI2_EVENT_PDMA1_ECC_DERR = 165,
+ GAUDI2_EVENT_CPU_IF_ECC_SERR = 166,
+ GAUDI2_EVENT_CPU_IF_ECC_DERR = 167,
+ GAUDI2_EVENT_PSOC_MEM_SERR = 168,
+ GAUDI2_EVENT_PSOC_MEM_DERR = 169,
+ GAUDI2_EVENT_SRAM0_ECC_SERR = 170,
+ GAUDI2_EVENT_SRAM1_ECC_SERR = 171,
+ GAUDI2_EVENT_SRAM2_ECC_SERR = 172,
+ GAUDI2_EVENT_SRAM3_ECC_SERR = 173,
+ GAUDI2_EVENT_SRAM4_ECC_SERR = 174,
+ GAUDI2_EVENT_SRAM5_ECC_SERR = 175,
+ GAUDI2_EVENT_SRAM6_ECC_SERR = 176,
+ GAUDI2_EVENT_SRAM7_ECC_SERR = 177,
+ GAUDI2_EVENT_SRAM8_ECC_SERR = 178,
+ GAUDI2_EVENT_SRAM9_ECC_SERR = 179,
+ GAUDI2_EVENT_SRAM10_ECC_SERR = 180,
+ GAUDI2_EVENT_SRAM11_ECC_SERR = 181,
+ GAUDI2_EVENT_SRAM12_ECC_SERR = 182,
+ GAUDI2_EVENT_SRAM13_ECC_SERR = 183,
+ GAUDI2_EVENT_SRAM14_ECC_SERR = 184,
+ GAUDI2_EVENT_SRAM15_ECC_SERR = 185,
+ GAUDI2_EVENT_SRAM16_ECC_SERR = 186,
+ GAUDI2_EVENT_SRAM17_ECC_SERR = 187,
+ GAUDI2_EVENT_SRAM18_ECC_SERR = 188,
+ GAUDI2_EVENT_SRAM19_ECC_SERR = 189,
+ GAUDI2_EVENT_SRAM20_ECC_SERR = 190,
+ GAUDI2_EVENT_SRAM21_ECC_SERR = 191,
+ GAUDI2_EVENT_SRAM22_ECC_SERR = 192,
+ GAUDI2_EVENT_SRAM23_ECC_SERR = 193,
+ GAUDI2_EVENT_SRAM24_ECC_SERR = 194,
+ GAUDI2_EVENT_SRAM25_ECC_SERR = 195,
+ GAUDI2_EVENT_SRAM26_ECC_SERR = 196,
+ GAUDI2_EVENT_SRAM27_ECC_SERR = 197,
+ GAUDI2_EVENT_SRAM28_ECC_SERR = 198,
+ GAUDI2_EVENT_SRAM29_ECC_SERR = 199,
+ GAUDI2_EVENT_SRAM30_ECC_SERR = 200,
+ GAUDI2_EVENT_SRAM31_ECC_SERR = 201,
+ GAUDI2_EVENT_SRAM0_ECC_DERR = 202,
+ GAUDI2_EVENT_SRAM1_ECC_DERR = 203,
+ GAUDI2_EVENT_SRAM2_ECC_DERR = 204,
+ GAUDI2_EVENT_SRAM3_ECC_DERR = 205,
+ GAUDI2_EVENT_SRAM4_ECC_DERR = 206,
+ GAUDI2_EVENT_SRAM5_ECC_DERR = 207,
+ GAUDI2_EVENT_SRAM6_ECC_DERR = 208,
+ GAUDI2_EVENT_SRAM7_ECC_DERR = 209,
+ GAUDI2_EVENT_SRAM8_ECC_DERR = 210,
+ GAUDI2_EVENT_SRAM9_ECC_DERR = 211,
+ GAUDI2_EVENT_SRAM10_ECC_DERR = 212,
+ GAUDI2_EVENT_SRAM11_ECC_DERR = 213,
+ GAUDI2_EVENT_SRAM12_ECC_DERR = 214,
+ GAUDI2_EVENT_SRAM13_ECC_DERR = 215,
+ GAUDI2_EVENT_SRAM14_ECC_DERR = 216,
+ GAUDI2_EVENT_SRAM15_ECC_DERR = 217,
+ GAUDI2_EVENT_SRAM16_ECC_DERR = 218,
+ GAUDI2_EVENT_SRAM17_ECC_DERR = 219,
+ GAUDI2_EVENT_SRAM18_ECC_DERR = 220,
+ GAUDI2_EVENT_SRAM19_ECC_DERR = 221,
+ GAUDI2_EVENT_SRAM20_ECC_DERR = 222,
+ GAUDI2_EVENT_SRAM21_ECC_DERR = 223,
+ GAUDI2_EVENT_SRAM22_ECC_DERR = 224,
+ GAUDI2_EVENT_SRAM23_ECC_DERR = 225,
+ GAUDI2_EVENT_SRAM24_ECC_DERR = 226,
+ GAUDI2_EVENT_SRAM25_ECC_DERR = 227,
+ GAUDI2_EVENT_SRAM26_ECC_DERR = 228,
+ GAUDI2_EVENT_SRAM27_ECC_DERR = 229,
+ GAUDI2_EVENT_SRAM28_ECC_DERR = 230,
+ GAUDI2_EVENT_SRAM29_ECC_DERR = 231,
+ GAUDI2_EVENT_SRAM30_ECC_DERR = 232,
+ GAUDI2_EVENT_SRAM31_ECC_DERR = 233,
+ GAUDI2_EVENT_CPU_GIC500 = 234,
+ GAUDI2_EVENT_HBM_0_MC0_ECC_SERR = 235,
+ GAUDI2_EVENT_HBM_1_MC0_ECC_SERR = 236,
+ GAUDI2_EVENT_HBM_2_MC0_ECC_SERR = 237,
+ GAUDI2_EVENT_HBM_3_MC0_ECC_SERR = 238,
+ GAUDI2_EVENT_HBM_4_MC0_ECC_SERR = 239,
+ GAUDI2_EVENT_HBM_5_MC0_ECC_SERR = 240,
+ GAUDI2_EVENT_HBM_0_MC1_ECC_SERR = 241,
+ GAUDI2_EVENT_HBM_1_MC1_ECC_SERR = 242,
+ GAUDI2_EVENT_HBM_2_MC1_ECC_SERR = 243,
+ GAUDI2_EVENT_HBM_3_MC1_ECC_SERR = 244,
+ GAUDI2_EVENT_HBM_4_MC1_ECC_SERR = 245,
+ GAUDI2_EVENT_HBM_5_MC1_ECC_SERR = 246,
+ GAUDI2_EVENT_HBM_0_MC0_ECC_DERR = 247,
+ GAUDI2_EVENT_HBM_1_MC0_ECC_DERR = 248,
+ GAUDI2_EVENT_HBM_2_MC0_ECC_DERR = 249,
+ GAUDI2_EVENT_HBM_3_MC0_ECC_DERR = 250,
+ GAUDI2_EVENT_HBM_4_MC0_ECC_DERR = 251,
+ GAUDI2_EVENT_HBM_5_MC0_ECC_DERR = 252,
+ GAUDI2_EVENT_HBM_0_MC1_ECC_DERR = 253,
+ GAUDI2_EVENT_HBM_1_MC1_ECC_DERR = 254,
+ GAUDI2_EVENT_HBM_2_MC1_ECC_DERR = 255,
+ GAUDI2_EVENT_HBM_3_MC1_ECC_DERR = 256,
+ GAUDI2_EVENT_HBM_4_MC1_ECC_DERR = 257,
+ GAUDI2_EVENT_HBM_5_MC1_ECC_DERR = 258,
+ GAUDI2_EVENT_HMMU_0_ECC_SERR = 259,
+ GAUDI2_EVENT_HMMU_1_ECC_SERR = 260,
+ GAUDI2_EVENT_HMMU_2_ECC_SERR = 261,
+ GAUDI2_EVENT_HMMU_3_ECC_SERR = 262,
+ GAUDI2_EVENT_HMMU_8_ECC_SERR = 263,
+ GAUDI2_EVENT_HMMU_9_ECC_SERR = 264,
+ GAUDI2_EVENT_HMMU_10_ECC_SERR = 265,
+ GAUDI2_EVENT_HMMU_11_ECC_SERR = 266,
+ GAUDI2_EVENT_HMMU_7_ECC_SERR = 267,
+ GAUDI2_EVENT_HMMU_6_ECC_SERR = 268,
+ GAUDI2_EVENT_HMMU_5_ECC_SERR = 269,
+ GAUDI2_EVENT_HMMU_4_ECC_SERR = 270,
+ GAUDI2_EVENT_HMMU_15_ECC_SERR = 271,
+ GAUDI2_EVENT_HMMU_14_ECC_SERR = 272,
+ GAUDI2_EVENT_HMMU_13_ECC_SERR = 273,
+ GAUDI2_EVENT_HMMU_12_ECC_SERR = 274,
+ GAUDI2_EVENT_HMMU_0_ECC_DERR = 275,
+ GAUDI2_EVENT_HMMU_1_ECC_DERR = 276,
+ GAUDI2_EVENT_HMMU_2_ECC_DERR = 277,
+ GAUDI2_EVENT_HMMU_3_ECC_DERR = 278,
+ GAUDI2_EVENT_HMMU_8_ECC_DERR = 279,
+ GAUDI2_EVENT_HMMU_9_ECC_DERR = 280,
+ GAUDI2_EVENT_HMMU_10_ECC_DERR = 281,
+ GAUDI2_EVENT_HMMU_11_ECC_DERR = 282,
+ GAUDI2_EVENT_HMMU_7_ECC_DERR = 283,
+ GAUDI2_EVENT_HMMU_6_ECC_DERR = 284,
+ GAUDI2_EVENT_HMMU_5_ECC_DERR = 285,
+ GAUDI2_EVENT_HMMU_4_ECC_DERR = 286,
+ GAUDI2_EVENT_HMMU_15_ECC_DERR = 287,
+ GAUDI2_EVENT_HMMU_14_ECC_DERR = 288,
+ GAUDI2_EVENT_HMMU_13_ECC_DERR = 289,
+ GAUDI2_EVENT_HMMU_12_ECC_DERR = 290,
+ GAUDI2_EVENT_PMMU_ECC_SERR_0 = 291,
+ GAUDI2_EVENT_PMMU_ECC_DERR_0 = 292,
+ GAUDI2_EVENT_DEC0_VCD_ECC_SERR = 295,
+ GAUDI2_EVENT_DEC1_VCD_ECC_SERR = 296,
+ GAUDI2_EVENT_DEC2_VCD_ECC_SERR = 297,
+ GAUDI2_EVENT_DEC3_VCD_ECC_SERR = 298,
+ GAUDI2_EVENT_DEC4_VCD_ECC_SERR = 299,
+ GAUDI2_EVENT_DEC5_VCD_ECC_SERR = 300,
+ GAUDI2_EVENT_DEC6_VCD_ECC_SERR = 301,
+ GAUDI2_EVENT_DEC7_VCD_ECC_SERR = 302,
+ GAUDI2_EVENT_DEC8_VCD_ECC_SERR = 303,
+ GAUDI2_EVENT_DEC9_VCD_ECC_SERR = 304,
+ GAUDI2_EVENT_DEC0_L2C_ECC_SERR = 305,
+ GAUDI2_EVENT_DEC1_L2C_ECC_SERR = 306,
+ GAUDI2_EVENT_DEC2_L2C_ECC_SERR = 307,
+ GAUDI2_EVENT_DEC3_L2C_ECC_SERR = 308,
+ GAUDI2_EVENT_DEC4_L2C_ECC_SERR = 309,
+ GAUDI2_EVENT_DEC5_L2C_ECC_SERR = 310,
+ GAUDI2_EVENT_DEC6_L2C_ECC_SERR = 311,
+ GAUDI2_EVENT_DEC7_L2C_ECC_SERR = 312,
+ GAUDI2_EVENT_DEC8_L2C_ECC_SERR = 313,
+ GAUDI2_EVENT_DEC9_L2C_ECC_SERR = 314,
+ GAUDI2_EVENT_DEC0_VCD_ECC_DERR = 315,
+ GAUDI2_EVENT_DEC1_VCD_ECC_DERR = 316,
+ GAUDI2_EVENT_DEC2_VCD_ECC_DERR = 317,
+ GAUDI2_EVENT_DEC3_VCD_ECC_DERR = 318,
+ GAUDI2_EVENT_DEC4_VCD_ECC_DERR = 319,
+ GAUDI2_EVENT_DEC5_VCD_ECC_DERR = 320,
+ GAUDI2_EVENT_DEC6_VCD_ECC_DERR = 321,
+ GAUDI2_EVENT_DEC7_VCD_ECC_DERR = 322,
+ GAUDI2_EVENT_DEC8_VCD_ECC_DERR = 323,
+ GAUDI2_EVENT_DEC9_VCD_ECC_DERR = 324,
+ GAUDI2_EVENT_DEC0_L2C_ECC_DERR = 325,
+ GAUDI2_EVENT_DEC1_L2C_ECC_DERR = 326,
+ GAUDI2_EVENT_DEC2_L2C_ECC_DERR = 327,
+ GAUDI2_EVENT_DEC3_L2C_ECC_DERR = 328,
+ GAUDI2_EVENT_DEC4_L2C_ECC_DERR = 329,
+ GAUDI2_EVENT_DEC5_L2C_ECC_DERR = 330,
+ GAUDI2_EVENT_DEC6_L2C_ECC_DERR = 331,
+ GAUDI2_EVENT_DEC7_L2C_ECC_DERR = 332,
+ GAUDI2_EVENT_DEC8_L2C_ECC_DERR = 333,
+ GAUDI2_EVENT_DEC9_L2C_ECC_DERR = 334,
+ GAUDI2_EVENT_HIF0_ECC_SERR = 337,
+ GAUDI2_EVENT_HIF1_ECC_SERR = 338,
+ GAUDI2_EVENT_HIF2_ECC_SERR = 339,
+ GAUDI2_EVENT_HIF3_ECC_SERR = 340,
+ GAUDI2_EVENT_HIF8_ECC_SERR = 341,
+ GAUDI2_EVENT_HIF9_ECC_SERR = 342,
+ GAUDI2_EVENT_HIF10_ECC_SERR = 343,
+ GAUDI2_EVENT_HIF11_ECC_SERR = 344,
+ GAUDI2_EVENT_HIF7_ECC_SERR = 345,
+ GAUDI2_EVENT_HIF6_ECC_SERR = 346,
+ GAUDI2_EVENT_HIF5_ECC_SERR = 347,
+ GAUDI2_EVENT_HIF4_ECC_SERR = 348,
+ GAUDI2_EVENT_HIF15_ECC_SERR = 349,
+ GAUDI2_EVENT_HIF14_ECC_SERR = 350,
+ GAUDI2_EVENT_HIF13_ECC_SERR = 351,
+ GAUDI2_EVENT_HIF12_ECC_SERR = 352,
+ GAUDI2_EVENT_HIF0_ECC_DERR = 353,
+ GAUDI2_EVENT_HIF1_ECC_DERR = 354,
+ GAUDI2_EVENT_HIF2_ECC_DERR = 355,
+ GAUDI2_EVENT_HIF3_ECC_DERR = 356,
+ GAUDI2_EVENT_HIF8_ECC_DERR = 357,
+ GAUDI2_EVENT_HIF9_ECC_DERR = 358,
+ GAUDI2_EVENT_HIF10_ECC_DERR = 359,
+ GAUDI2_EVENT_HIF11_ECC_DERR = 360,
+ GAUDI2_EVENT_HIF7_ECC_DERR = 361,
+ GAUDI2_EVENT_HIF6_ECC_DERR = 362,
+ GAUDI2_EVENT_HIF5_ECC_DERR = 363,
+ GAUDI2_EVENT_HIF4_ECC_DERR = 364,
+ GAUDI2_EVENT_HIF15_ECC_DERR = 365,
+ GAUDI2_EVENT_HIF14_ECC_DERR = 366,
+ GAUDI2_EVENT_HIF13_ECC_DERR = 367,
+ GAUDI2_EVENT_HIF12_ECC_DERR = 368,
+ GAUDI2_EVENT_NIC0_ECC_SERR = 369,
+ GAUDI2_EVENT_NIC1_ECC_SERR = 370,
+ GAUDI2_EVENT_NIC2_ECC_SERR = 371,
+ GAUDI2_EVENT_NIC3_ECC_SERR = 372,
+ GAUDI2_EVENT_NIC4_ECC_SERR = 373,
+ GAUDI2_EVENT_NIC5_ECC_SERR = 374,
+ GAUDI2_EVENT_NIC6_ECC_SERR = 375,
+ GAUDI2_EVENT_NIC7_ECC_SERR = 376,
+ GAUDI2_EVENT_NIC8_ECC_SERR = 377,
+ GAUDI2_EVENT_NIC9_ECC_SERR = 378,
+ GAUDI2_EVENT_NIC10_ECC_SERR = 379,
+ GAUDI2_EVENT_NIC11_ECC_SERR = 380,
+ GAUDI2_EVENT_NIC0_ECC_DERR = 381,
+ GAUDI2_EVENT_NIC1_ECC_DERR = 382,
+ GAUDI2_EVENT_NIC2_ECC_DERR = 383,
+ GAUDI2_EVENT_NIC3_ECC_DERR = 384,
+ GAUDI2_EVENT_NIC4_ECC_DERR = 385,
+ GAUDI2_EVENT_NIC5_ECC_DERR = 386,
+ GAUDI2_EVENT_NIC6_ECC_DERR = 387,
+ GAUDI2_EVENT_NIC7_ECC_DERR = 388,
+ GAUDI2_EVENT_NIC8_ECC_DERR = 389,
+ GAUDI2_EVENT_NIC9_ECC_DERR = 390,
+ GAUDI2_EVENT_NIC10_ECC_DERR = 391,
+ GAUDI2_EVENT_NIC11_ECC_DERR = 392,
+ GAUDI2_EVENT_SM0_ECC_DERR = 393,
+ GAUDI2_EVENT_SM1_ECC_DERR = 394,
+ GAUDI2_EVENT_SM2_ECC_DERR = 395,
+ GAUDI2_EVENT_SM3_ECC_DERR = 396,
+ GAUDI2_EVENT_SM0_ECC_SERR = 397,
+ GAUDI2_EVENT_SM1_ECC_SERR = 398,
+ GAUDI2_EVENT_SM2_ECC_SERR = 399,
+ GAUDI2_EVENT_SM3_ECC_SERR = 400,
+ GAUDI2_EVENT_XBAR0_ECC_SERR = 401,
+ GAUDI2_EVENT_XBAR1_ECC_SERR = 402,
+ GAUDI2_EVENT_XBAR2_ECC_SERR = 403,
+ GAUDI2_EVENT_XBAR3_ECC_SERR = 404,
+ GAUDI2_EVENT_XBAR0_ECC_DERR = 405,
+ GAUDI2_EVENT_XBAR1_ECC_DERR = 406,
+ GAUDI2_EVENT_XBAR2_ECC_DERR = 407,
+ GAUDI2_EVENT_XBAR3_ECC_DERR = 408,
+ GAUDI2_EVENT_ARC0_ECC_SERR = 409,
+ GAUDI2_EVENT_ARC0_ECC_DERR = 410,
+ GAUDI2_EVENT_PCIE_BME_CLEARD = 411,
+ GAUDI2_EVENT_PCIE_ADDR_DEC_ERR = 412,
+ GAUDI2_EVENT_TPC0_AXI_ERR_RSP = 413,
+ GAUDI2_EVENT_TPC1_AXI_ERR_RSP = 414,
+ GAUDI2_EVENT_TPC2_AXI_ERR_RSP = 415,
+ GAUDI2_EVENT_TPC3_AXI_ERR_RSP = 416,
+ GAUDI2_EVENT_TPC4_AXI_ERR_RSP = 417,
+ GAUDI2_EVENT_TPC5_AXI_ERR_RSP = 418,
+ GAUDI2_EVENT_TPC6_AXI_ERR_RSP = 419,
+ GAUDI2_EVENT_TPC7_AXI_ERR_RSP = 420,
+ GAUDI2_EVENT_TPC8_AXI_ERR_RSP = 421,
+ GAUDI2_EVENT_TPC9_AXI_ERR_RSP = 422,
+ GAUDI2_EVENT_TPC10_AXI_ERR_RSP = 423,
+ GAUDI2_EVENT_TPC11_AXI_ERR_RSP = 424,
+ GAUDI2_EVENT_TPC12_AXI_ERR_RSP = 425,
+ GAUDI2_EVENT_TPC13_AXI_ERR_RSP = 426,
+ GAUDI2_EVENT_TPC14_AXI_ERR_RSP = 427,
+ GAUDI2_EVENT_TPC15_AXI_ERR_RSP = 428,
+ GAUDI2_EVENT_TPC16_AXI_ERR_RSP = 429,
+ GAUDI2_EVENT_TPC17_AXI_ERR_RSP = 430,
+ GAUDI2_EVENT_TPC18_AXI_ERR_RSP = 431,
+ GAUDI2_EVENT_TPC19_AXI_ERR_RSP = 432,
+ GAUDI2_EVENT_TPC20_AXI_ERR_RSP = 433,
+ GAUDI2_EVENT_TPC21_AXI_ERR_RSP = 434,
+ GAUDI2_EVENT_TPC22_AXI_ERR_RSP = 435,
+ GAUDI2_EVENT_TPC23_AXI_ERR_RSP = 436,
+ GAUDI2_EVENT_TPC24_AXI_ERR_RSP = 437,
+ GAUDI2_EVENT_CPU_AXI_ECC = 438,
+ GAUDI2_EVENT_CPU_L2_RAM_ECC = 439,
+ GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP = 440,
+ GAUDI2_EVENT_MME0_SBTE1_AXI_ERR_RSP = 441,
+ GAUDI2_EVENT_MME0_SBTE2_AXI_ERR_RSP = 442,
+ GAUDI2_EVENT_MME0_SBTE3_AXI_ERR_RSP = 443,
+ GAUDI2_EVENT_MME0_SBTE4_AXI_ERR_RSP = 444,
+ GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE = 445,
+ GAUDI2_EVENT_MME0_QMAN_SW_ERROR = 446,
+ GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP = 447,
+ GAUDI2_EVENT_MME1_SBTE1_AXI_ERR_RSP = 448,
+ GAUDI2_EVENT_MME1_SBTE2_AXI_ERR_RSP = 449,
+ GAUDI2_EVENT_MME1_SBTE3_AXI_ERR_RSP = 450,
+ GAUDI2_EVENT_MME1_SBTE4_AXI_ERR_RSP = 451,
+ GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE = 452,
+ GAUDI2_EVENT_MME1_QMAN_SW_ERROR = 453,
+ GAUDI2_EVENT_MME2_SBTE0_AXI_ERR_RSP = 454,
+ GAUDI2_EVENT_MME2_SBTE1_AXI_ERR_RSP = 455,
+ GAUDI2_EVENT_MME2_SBTE2_AXI_ERR_RSP = 456,
+ GAUDI2_EVENT_MME2_SBTE3_AXI_ERR_RSP = 457,
+ GAUDI2_EVENT_MME2_SBTE4_AXI_ERR_RSP = 458,
+ GAUDI2_EVENT_MME2_CTRL_AXI_ERROR_RESPONSE = 459,
+ GAUDI2_EVENT_MME2_QMAN_SW_ERROR = 460,
+ GAUDI2_EVENT_MME3_SBTE0_AXI_ERR_RSP = 461,
+ GAUDI2_EVENT_MME3_SBTE1_AXI_ERR_RSP = 462,
+ GAUDI2_EVENT_MME3_SBTE2_AXI_ERR_RSP = 463,
+ GAUDI2_EVENT_MME3_SBTE3_AXI_ERR_RSP = 464,
+ GAUDI2_EVENT_MME3_SBTE4_AXI_ERR_RSP = 465,
+ GAUDI2_EVENT_MME3_CTRL_AXI_ERROR_RESPONSE = 466,
+ GAUDI2_EVENT_MME3_QMAN_SW_ERROR = 467,
+ GAUDI2_EVENT_PSOC_MME_PLL_LOCK_ERR = 468,
+ GAUDI2_EVENT_PSOC_CPU_PLL_LOCK_ERR = 469,
+ GAUDI2_EVENT_DCORE3_TPC_PLL_LOCK_ERR = 470,
+ GAUDI2_EVENT_DCORE3_NIC_PLL_LOCK_ERR = 471,
+ GAUDI2_EVENT_DCORE3_XBAR_MMU_PLL_LOCK_ERR = 472,
+ GAUDI2_EVENT_DCORE3_XBAR_DMA_PLL_LOCK_ERR = 473,
+ GAUDI2_EVENT_DCORE3_XBAR_IF_PLL_LOCK_ERR = 474,
+ GAUDI2_EVENT_DCORE3_XBAR_BANK_PLL_LOCK_ERR = 475,
+ GAUDI2_EVENT_DCORE1_XBAR_MMU_PLL_LOCK_ERR = 476,
+ GAUDI2_EVENT_DCORE1_XBAR_DMA_PLL_LOCK_ERR = 477,
+ GAUDI2_EVENT_DCORE1_XBAR_IF_PLL_LOCK_ERR = 478,
+ GAUDI2_EVENT_DCORE1_XBAR_MESH_PLL_LOCK_ERR = 479,
+ GAUDI2_EVENT_DCORE1_TPC_PLL_LOCK_ERR = 480,
+ GAUDI2_EVENT_DCORE1_NIC_PLL_LOCK_ERR = 481,
+ GAUDI2_EVENT_PMMU_MME_PLL_LOCK_ERR = 482,
+ GAUDI2_EVENT_DCORE0_TPC_PLL_LOCK_ERR = 483,
+ GAUDI2_EVENT_DCORE0_PCI_PLL_LOCK_ERR = 484,
+ GAUDI2_EVENT_DCORE0_XBAR_MMU_PLL_LOCK_ERR = 485,
+ GAUDI2_EVENT_DCORE0_XBAR_DMA_PLL_LOCK_ERR = 486,
+ GAUDI2_EVENT_DCORE0_XBAR_IF_PLL_LOCK_ERR = 487,
+ GAUDI2_EVENT_DCORE0_XBAR_MESH_PLL_LOCK_ERR = 488,
+ GAUDI2_EVENT_DCORE2_XBAR_MMU_PLL_LOCK_ERR = 489,
+ GAUDI2_EVENT_DCORE2_XBAR_DMA_PLL_LOCK_ERR = 490,
+ GAUDI2_EVENT_DCORE2_XBAR_IF_PLL_LOCK_ERR = 491,
+ GAUDI2_EVENT_DCORE2_XBAR_BANK_PLL_LOCK_ERR = 492,
+ GAUDI2_EVENT_DCORE2_TPC_PLL_LOCK_ERR = 493,
+ GAUDI2_EVENT_PSOC_VID_PLL_LOCK_ERR = 494,
+ GAUDI2_EVENT_PMMU_VID_PLL_LOCK_ERR = 495,
+ GAUDI2_EVENT_DCORE3_HBM_PLL_LOCK_ERR = 496,
+ GAUDI2_EVENT_DCORE1_XBAR_HBM_PLL_LOCK_ERR = 497,
+ GAUDI2_EVENT_DCORE1_HBM_PLL_LOCK_ERR = 498,
+ GAUDI2_EVENT_DCORE0_HBM_PLL_LOCK_ERR = 499,
+ GAUDI2_EVENT_DCORE2_XBAR_HBM_PLL_LOCK_ERR = 500,
+ GAUDI2_EVENT_DCORE2_HBM_PLL_LOCK_ERR = 501,
+ GAUDI2_EVENT_CPU_AXI_ERR_RSP = 502,
+ GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP = 503,
+ GAUDI2_EVENT_HMMU_1_AXI_ERR_RSP = 504,
+ GAUDI2_EVENT_HMMU_2_AXI_ERR_RSP = 505,
+ GAUDI2_EVENT_HMMU_3_AXI_ERR_RSP = 506,
+ GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP = 507,
+ GAUDI2_EVENT_HMMU_9_AXI_ERR_RSP = 508,
+ GAUDI2_EVENT_HMMU_10_AXI_ERR_RSP = 509,
+ GAUDI2_EVENT_HMMU_11_AXI_ERR_RSP = 510,
+ GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP = 511,
+ GAUDI2_EVENT_HMMU_6_AXI_ERR_RSP = 512,
+ GAUDI2_EVENT_HMMU_5_AXI_ERR_RSP = 513,
+ GAUDI2_EVENT_HMMU_4_AXI_ERR_RSP = 514,
+ GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP = 515,
+ GAUDI2_EVENT_HMMU_14_AXI_ERR_RSP = 516,
+ GAUDI2_EVENT_HMMU_13_AXI_ERR_RSP = 517,
+ GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP = 518,
+ GAUDI2_EVENT_PMMU_FATAL_0 = 519,
+ GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0 = 520,
+ GAUDI2_EVENT_VM0_ALARM_A = 521,
+ GAUDI2_EVENT_VM0_ALARM_B = 522,
+ GAUDI2_EVENT_VM1_ALARM_A = 523,
+ GAUDI2_EVENT_VM1_ALARM_B = 524,
+ GAUDI2_EVENT_VM2_ALARM_A = 525,
+ GAUDI2_EVENT_VM2_ALARM_B = 526,
+ GAUDI2_EVENT_VM3_ALARM_A = 527,
+ GAUDI2_EVENT_VM3_ALARM_B = 528,
+ GAUDI2_EVENT_PSOC_AXI_ERR_RSP = 529,
+ GAUDI2_EVENT_PSOC_PRSTN_FALL = 530,
+ GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP = 539,
+ GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP = 540,
+ GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP = 541,
+ GAUDI2_EVENT_HBM_CATTRIP_0 = 542,
+ GAUDI2_EVENT_HBM_CATTRIP_1 = 543,
+ GAUDI2_EVENT_HBM_CATTRIP_2 = 544,
+ GAUDI2_EVENT_HBM_CATTRIP_3 = 545,
+ GAUDI2_EVENT_HBM_CATTRIP_4 = 546,
+ GAUDI2_EVENT_HBM_CATTRIP_5 = 547,
+ GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE = 548,
+ GAUDI2_EVENT_HBM0_MC0_SEI_NON_SEVERE = 549,
+ GAUDI2_EVENT_HBM0_MC1_SEI_SEVERE = 550,
+ GAUDI2_EVENT_HBM0_MC1_SEI_NON_SEVERE = 551,
+ GAUDI2_EVENT_HBM1_MC0_SEI_SEVERE = 552,
+ GAUDI2_EVENT_HBM1_MC0_SEI_NON_SEVERE = 553,
+ GAUDI2_EVENT_HBM1_MC1_SEI_SEVERE = 554,
+ GAUDI2_EVENT_HBM1_MC1_SEI_NON_SEVERE = 555,
+ GAUDI2_EVENT_HBM2_MC0_SEI_SEVERE = 556,
+ GAUDI2_EVENT_HBM2_MC0_SEI_NON_SEVERE = 557,
+ GAUDI2_EVENT_HBM2_MC1_SEI_SEVERE = 558,
+ GAUDI2_EVENT_HBM2_MC1_SEI_NON_SEVERE = 559,
+ GAUDI2_EVENT_HBM3_MC0_SEI_SEVERE = 560,
+ GAUDI2_EVENT_HBM3_MC0_SEI_NON_SEVERE = 561,
+ GAUDI2_EVENT_HBM3_MC1_SEI_SEVERE = 562,
+ GAUDI2_EVENT_HBM3_MC1_SEI_NON_SEVERE = 563,
+ GAUDI2_EVENT_HBM4_MC0_SEI_SEVERE = 564,
+ GAUDI2_EVENT_HBM4_MC0_SEI_NON_SEVERE = 565,
+ GAUDI2_EVENT_HBM4_MC1_SEI_SEVERE = 566,
+ GAUDI2_EVENT_HBM4_MC1_SEI_NON_SEVERE = 567,
+ GAUDI2_EVENT_HBM5_MC0_SEI_SEVERE = 568,
+ GAUDI2_EVENT_HBM5_MC0_SEI_NON_SEVERE = 569,
+ GAUDI2_EVENT_HBM5_MC1_SEI_SEVERE = 570,
+ GAUDI2_EVENT_HBM5_MC1_SEI_NON_SEVERE = 571,
+ GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE = 572,
+ GAUDI2_EVENT_DEC1_AXI_ERR_RSPONSE = 573,
+ GAUDI2_EVENT_DEC2_AXI_ERR_RSPONSE = 574,
+ GAUDI2_EVENT_DEC3_AXI_ERR_RSPONSE = 575,
+ GAUDI2_EVENT_DEC4_AXI_ERR_RSPONSE = 576,
+ GAUDI2_EVENT_DEC5_AXI_ERR_RSPONSE = 577,
+ GAUDI2_EVENT_DEC6_AXI_ERR_RSPONSE = 578,
+ GAUDI2_EVENT_DEC7_AXI_ERR_RSPONSE = 579,
+ GAUDI2_EVENT_DEC8_AXI_ERR_RSPONSE = 580,
+ GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE = 581,
+ GAUDI2_EVENT_HIF0_FATAL = 584,
+ GAUDI2_EVENT_HIF1_FATAL = 585,
+ GAUDI2_EVENT_HIF2_FATAL = 586,
+ GAUDI2_EVENT_HIF3_FATAL = 587,
+ GAUDI2_EVENT_HIF8_FATAL = 588,
+ GAUDI2_EVENT_HIF9_FATAL = 589,
+ GAUDI2_EVENT_HIF10_FATAL = 590,
+ GAUDI2_EVENT_HIF11_FATAL = 591,
+ GAUDI2_EVENT_HIF7_FATAL = 592,
+ GAUDI2_EVENT_HIF6_FATAL = 593,
+ GAUDI2_EVENT_HIF5_FATAL = 594,
+ GAUDI2_EVENT_HIF4_FATAL = 595,
+ GAUDI2_EVENT_HIF15_FATAL = 596,
+ GAUDI2_EVENT_HIF14_FATAL = 597,
+ GAUDI2_EVENT_HIF13_FATAL = 598,
+ GAUDI2_EVENT_HIF12_FATAL = 599,
+ GAUDI2_EVENT_NIC0_AXI_ERROR_RESPONSE = 600,
+ GAUDI2_EVENT_NIC1_AXI_ERROR_RESPONSE = 601,
+ GAUDI2_EVENT_NIC2_AXI_ERROR_RESPONSE = 602,
+ GAUDI2_EVENT_NIC3_AXI_ERROR_RESPONSE = 603,
+ GAUDI2_EVENT_NIC4_AXI_ERROR_RESPONSE = 604,
+ GAUDI2_EVENT_NIC5_AXI_ERROR_RESPONSE = 605,
+ GAUDI2_EVENT_NIC6_AXI_ERROR_RESPONSE = 606,
+ GAUDI2_EVENT_NIC7_AXI_ERROR_RESPONSE = 607,
+ GAUDI2_EVENT_NIC8_AXI_ERROR_RESPONSE = 608,
+ GAUDI2_EVENT_NIC9_AXI_ERROR_RESPONSE = 609,
+ GAUDI2_EVENT_NIC10_AXI_ERROR_RESPONSE = 610,
+ GAUDI2_EVENT_NIC11_AXI_ERROR_RESPONSE = 611,
+ GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE = 612,
+ GAUDI2_EVENT_SM1_AXI_ERROR_RESPONSE = 613,
+ GAUDI2_EVENT_SM2_AXI_ERROR_RESPONSE = 614,
+ GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE = 615,
+ GAUDI2_EVENT_ARC_AXI_ERROR_RESPONSE_0 = 616,
+ GAUDI2_EVENT_PCIE_PME_MSG_RECEIVED = 618,
+ GAUDI2_EVENT_PCIE_FLR_REQUESTED = 619,
+ GAUDI2_EVENT_PCIE_HOT_RESET_REQ = 620,
+ GAUDI2_EVENT_PCIE_PERST = 621,
+ GAUDI2_EVENT_PCIE_APB_TIMEOUT = 622,
+ GAUDI2_EVENT_PCIE_BM_D_P_WR = 623,
+ GAUDI2_EVENT_PCIE_BM_D_RD = 624,
+ GAUDI2_EVENT_PCIE_BM_U_P_WR = 625,
+ GAUDI2_EVENT_PCIE_BM_U_RD = 626,
+ GAUDI2_EVENT_PCIE_FATAL_ERR = 627,
+ GAUDI2_EVENT_PCIE_PERST_FAL = 628,
+ GAUDI2_EVENT_PCIE_VDM_READY = 629,
+ GAUDI2_EVENT_PCIE_VDM_ERROR = 630,
+ GAUDI2_EVENT_PCIE_P2P_MSIX = 631,
+ GAUDI2_EVENT_PCIE_DRAIN_COMPLETE = 632,
+ GAUDI2_EVENT_TPC0_BMON_SPMU = 633,
+ GAUDI2_EVENT_TPC0_KERNEL_ERR = 634,
+ GAUDI2_EVENT_TPC1_BMON_SPMU = 635,
+ GAUDI2_EVENT_TPC1_KERNEL_ERR = 636,
+ GAUDI2_EVENT_TPC2_BMON_SPMU = 637,
+ GAUDI2_EVENT_TPC2_KERNEL_ERR = 638,
+ GAUDI2_EVENT_TPC3_BMON_SPMU = 639,
+ GAUDI2_EVENT_TPC3_KERNEL_ERR = 640,
+ GAUDI2_EVENT_TPC4_BMON_SPMU = 641,
+ GAUDI2_EVENT_TPC4_KERNEL_ERR = 642,
+ GAUDI2_EVENT_TPC5_BMON_SPMU = 643,
+ GAUDI2_EVENT_TPC5_KERNEL_ERR = 644,
+ GAUDI2_EVENT_TPC6_BMON_SPMU = 645,
+ GAUDI2_EVENT_TPC6_KERNEL_ERR = 646,
+ GAUDI2_EVENT_TPC7_BMON_SPMU = 647,
+ GAUDI2_EVENT_TPC7_KERNEL_ERR = 648,
+ GAUDI2_EVENT_TPC8_BMON_SPMU = 649,
+ GAUDI2_EVENT_TPC8_KERNEL_ERR = 650,
+ GAUDI2_EVENT_TPC9_BMON_SPMU = 651,
+ GAUDI2_EVENT_TPC9_KERNEL_ERR = 652,
+ GAUDI2_EVENT_TPC10_BMON_SPMU = 653,
+ GAUDI2_EVENT_TPC10_KERNEL_ERR = 654,
+ GAUDI2_EVENT_TPC11_BMON_SPMU = 655,
+ GAUDI2_EVENT_TPC11_KERNEL_ERR = 656,
+ GAUDI2_EVENT_TPC12_BMON_SPMU = 657,
+ GAUDI2_EVENT_TPC12_KERNEL_ERR = 658,
+ GAUDI2_EVENT_TPC13_BMON_SPMU = 659,
+ GAUDI2_EVENT_TPC13_KERNEL_ERR = 660,
+ GAUDI2_EVENT_TPC14_BMON_SPMU = 661,
+ GAUDI2_EVENT_TPC14_KERNEL_ERR = 662,
+ GAUDI2_EVENT_TPC15_BMON_SPMU = 663,
+ GAUDI2_EVENT_TPC15_KERNEL_ERR = 664,
+ GAUDI2_EVENT_TPC16_BMON_SPMU = 665,
+ GAUDI2_EVENT_TPC16_KERNEL_ERR = 666,
+ GAUDI2_EVENT_TPC17_BMON_SPMU = 667,
+ GAUDI2_EVENT_TPC17_KERNEL_ERR = 668,
+ GAUDI2_EVENT_TPC18_BMON_SPMU = 669,
+ GAUDI2_EVENT_TPC18_KERNEL_ERR = 670,
+ GAUDI2_EVENT_TPC19_BMON_SPMU = 671,
+ GAUDI2_EVENT_TPC19_KERNEL_ERR = 672,
+ GAUDI2_EVENT_TPC20_BMON_SPMU = 673,
+ GAUDI2_EVENT_TPC20_KERNEL_ERR = 674,
+ GAUDI2_EVENT_TPC21_BMON_SPMU = 675,
+ GAUDI2_EVENT_TPC21_KERNEL_ERR = 676,
+ GAUDI2_EVENT_TPC22_BMON_SPMU = 677,
+ GAUDI2_EVENT_TPC22_KERNEL_ERR = 678,
+ GAUDI2_EVENT_TPC23_BMON_SPMU = 679,
+ GAUDI2_EVENT_TPC23_KERNEL_ERR = 680,
+ GAUDI2_EVENT_TPC24_BMON_SPMU = 681,
+ GAUDI2_EVENT_TPC24_KERNEL_ERR = 682,
+ GAUDI2_EVENT_MME0_SPI_BASE = 683,
+ GAUDI2_EVENT_MME0_CTRL_BMON_SPMU = 688,
+ GAUDI2_EVENT_MME0_SBTE_BMON_SPMU = 689,
+ GAUDI2_EVENT_MME0_WAP_BMON_SPMU = 690,
+ GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID = 691,
+ GAUDI2_EVENT_MME1_SPI_BASE = 692,
+ GAUDI2_EVENT_MME1_CTRL_BMON_SPMU = 697,
+ GAUDI2_EVENT_MME1_SBTE_BMON_SPMU = 698,
+ GAUDI2_EVENT_MME1_WAP_BMON_SPMU = 699,
+ GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID = 700,
+ GAUDI2_EVENT_MME2_SPI_BASE = 701,
+ GAUDI2_EVENT_MME2_CTRL_BMON_SPMU = 706,
+ GAUDI2_EVENT_MME2_SBTE_BMON_SPMU = 707,
+ GAUDI2_EVENT_MME2_WAP_BMON_SPMU = 708,
+ GAUDI2_EVENT_MME2_WAP_SOURCE_RESULT_INVALID = 709,
+ GAUDI2_EVENT_MME3_SPI_BASE = 710,
+ GAUDI2_EVENT_MME3_CTRL_BMON_SPMU = 715,
+ GAUDI2_EVENT_MME3_SBTE_BMON_SPMU = 716,
+ GAUDI2_EVENT_MME3_WAP_BMON_SPMU = 717,
+ GAUDI2_EVENT_MME3_WAP_SOURCE_RESULT_INVALID = 718,
+ GAUDI2_EVENT_HMMU0_SPI_BASE = 719,
+ GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM = 720,
+ GAUDI2_EVENT_HMMU0_SECURITY_ERROR = 721,
+ GAUDI2_EVENT_HMMU1_SPI_BASE = 722,
+ GAUDI2_EVENT_HMMU1_PAGE_FAULT_WR_PERM = 723,
+ GAUDI2_EVENT_HMMU1_SECURITY_ERROR = 724,
+ GAUDI2_EVENT_HMMU2_SPI_BASE = 725,
+ GAUDI2_EVENT_HMMU2_PAGE_FAULT_WR_PERM = 726,
+ GAUDI2_EVENT_HMMU2_SECURITY_ERROR = 727,
+ GAUDI2_EVENT_HMMU3_SPI_BASE = 728,
+ GAUDI2_EVENT_HMMU3_PAGE_FAULT_WR_PERM = 729,
+ GAUDI2_EVENT_HMMU3_SECURITY_ERROR = 730,
+ GAUDI2_EVENT_HMMU8_SPI_BASE = 731,
+ GAUDI2_EVENT_HMMU8_PAGE_FAULT_WR_PERM = 732,
+ GAUDI2_EVENT_HMMU8_SECURITY_ERROR = 733,
+ GAUDI2_EVENT_HMMU9_SPI_BASE = 734,
+ GAUDI2_EVENT_HMMU9_PAGE_FAULT_WR_PERM = 735,
+ GAUDI2_EVENT_HMMU9_SECURITY_ERROR = 736,
+ GAUDI2_EVENT_HMMU10_SPI_BASE = 737,
+ GAUDI2_EVENT_HMMU10_PAGE_FAULT_WR_PERM = 738,
+ GAUDI2_EVENT_HMMU10_SECURITY_ERROR = 739,
+ GAUDI2_EVENT_HMMU11_SPI_BASE = 740,
+ GAUDI2_EVENT_HMMU11_PAGE_FAULT_WR_PERM = 741,
+ GAUDI2_EVENT_HMMU11_SECURITY_ERROR = 742,
+ GAUDI2_EVENT_HMMU7_SPI_BASE = 743,
+ GAUDI2_EVENT_HMMU7_PAGE_FAULT_WR_PERM = 744,
+ GAUDI2_EVENT_HMMU7_SECURITY_ERROR = 745,
+ GAUDI2_EVENT_HMMU6_SPI_BASE = 746,
+ GAUDI2_EVENT_HMMU6_PAGE_FAULT_WR_PERM = 747,
+ GAUDI2_EVENT_HMMU6_SECURITY_ERROR = 748,
+ GAUDI2_EVENT_HMMU5_SPI_BASE = 749,
+ GAUDI2_EVENT_HMMU5_PAGE_FAULT_WR_PERM = 750,
+ GAUDI2_EVENT_HMMU5_SECURITY_ERROR = 751,
+ GAUDI2_EVENT_HMMU4_SPI_BASE = 752,
+ GAUDI2_EVENT_HMMU4_PAGE_FAULT_WR_PERM = 753,
+ GAUDI2_EVENT_HMMU4_SECURITY_ERROR = 754,
+ GAUDI2_EVENT_HMMU15_SPI_BASE = 755,
+ GAUDI2_EVENT_HMMU15_PAGE_FAULT_WR_PERM = 756,
+ GAUDI2_EVENT_HMMU15_SECURITY_ERROR = 757,
+ GAUDI2_EVENT_HMMU14_SPI_BASE = 758,
+ GAUDI2_EVENT_HMMU14_PAGE_FAULT_WR_PERM = 759,
+ GAUDI2_EVENT_HMMU14_SECURITY_ERROR = 760,
+ GAUDI2_EVENT_HMMU13_SPI_BASE = 761,
+ GAUDI2_EVENT_HMMU13_PAGE_FAULT_WR_PERM = 762,
+ GAUDI2_EVENT_HMMU13_SECURITY_ERROR = 763,
+ GAUDI2_EVENT_HMMU12_SPI_BASE = 764,
+ GAUDI2_EVENT_HMMU12_PAGE_FAULT_WR_PERM = 765,
+ GAUDI2_EVENT_HMMU12_SECURITY_ERROR = 766,
+ GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM = 768,
+ GAUDI2_EVENT_PMMU0_SECURITY_ERROR = 769,
+ GAUDI2_EVENT_HDMA2_BM_SPMU = 770,
+ GAUDI2_EVENT_HDMA3_BM_SPMU = 772,
+ GAUDI2_EVENT_HDMA0_BM_SPMU = 774,
+ GAUDI2_EVENT_HDMA1_BM_SPMU = 776,
+ GAUDI2_EVENT_HDMA6_BM_SPMU = 778,
+ GAUDI2_EVENT_HDMA7_BM_SPMU = 780,
+ GAUDI2_EVENT_HDMA4_BM_SPMU = 782,
+ GAUDI2_EVENT_HDMA5_BM_SPMU = 784,
+ GAUDI2_EVENT_KDMA_BM_SPMU = 786,
+ GAUDI2_EVENT_PDMA0_BM_SPMU = 788,
+ GAUDI2_EVENT_PDMA1_BM_SPMU = 789,
+ GAUDI2_EVENT_HBM0_MC0_SPI = 790,
+ GAUDI2_EVENT_HBM0_MC1_SPI = 791,
+ GAUDI2_EVENT_HBM1_MC0_SPI = 792,
+ GAUDI2_EVENT_HBM1_MC1_SPI = 793,
+ GAUDI2_EVENT_HBM2_MC0_SPI = 794,
+ GAUDI2_EVENT_HBM2_MC1_SPI = 795,
+ GAUDI2_EVENT_HBM3_MC0_SPI = 796,
+ GAUDI2_EVENT_HBM3_MC1_SPI = 797,
+ GAUDI2_EVENT_HBM4_MC0_SPI = 798,
+ GAUDI2_EVENT_HBM4_MC1_SPI = 799,
+ GAUDI2_EVENT_HBM5_MC0_SPI = 800,
+ GAUDI2_EVENT_HBM5_MC1_SPI = 801,
+ GAUDI2_EVENT_CPU_BMON = 802,
+ GAUDI2_EVENT_CPU_BMON_1 = 803,
+ GAUDI2_EVENT_TS_A_SOUTH_0 = 804,
+ GAUDI2_EVENT_TS_A_NORTH_1 = 805,
+ GAUDI2_EVENT_TS_A_EAST_2 = 806,
+ GAUDI2_EVENT_TS_A_WEST_3 = 807,
+ GAUDI2_EVENT_PSOC0_GPIO_79_64 = 812,
+ GAUDI2_EVENT_PSOC1_GPIO_63_48 = 813,
+ GAUDI2_EVENT_PSOC2_GPIO_47_32 = 814,
+ GAUDI2_EVENT_PSOC3_GPIO_31_16 = 815,
+ GAUDI2_EVENT_PSOC4_GPIO_15_0 = 816,
+ GAUDI2_EVENT_PSOC58_RPM_READY = 870,
+ GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN = 871,
+ GAUDI2_EVENT_PSOC60_GPIO_95_80 = 872,
+ GAUDI2_EVENT_PSOC62_QSPI_INTERRUPT = 874,
+ GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT = 875,
+ GAUDI2_EVENT_PSOC64_ADC0_INTERRUPT = 876,
+ GAUDI2_EVENT_PSOC65_PID = 877,
+ GAUDI2_EVENT_PSOC66_IC_SLV_SMBALERT_DET_INTR = 878,
+ GAUDI2_EVENT_PSOC75_SVID0_PARITY_ERROR_INTERRUPT = 887,
+ GAUDI2_EVENT_PSOC76_SVID1_PARITY_ERROR_INTERRUPT = 888,
+ GAUDI2_EVENT_PSOC77_SVID_PARITY_ERROR_INTERRUPT = 889,
+ GAUDI2_EVENT_PSOC78_SVID0_READY_INTERRUPT = 890,
+ GAUDI2_EVENT_PSOC79_SVID1_READY_INTERRUPT = 891,
+ GAUDI2_EVENT_PSOC80_SVID2_READY_INTERRUPT = 892,
+ GAUDI2_EVENT_PSOC81_ADC1_INTERRUPT = 893,
+ GAUDI2_EVENT_PSOC82_SVID_COMBINED_ALERT_INTERRUPT = 894,
+ GAUDI2_EVENT_PSOC83_SPI_WARE_OUT_ATTACK_OR_ARP = 895,
+ GAUDI2_EVENT_DEC0_SPI = 896,
+ GAUDI2_EVENT_DEC0_BMON_SPMU = 897,
+ GAUDI2_EVENT_DEC1_SPI = 898,
+ GAUDI2_EVENT_DEC1_BMON_SPMU = 899,
+ GAUDI2_EVENT_DEC2_SPI = 900,
+ GAUDI2_EVENT_DEC2_BMON_SPMU = 901,
+ GAUDI2_EVENT_DEC3_SPI = 902,
+ GAUDI2_EVENT_DEC3_BMON_SPMU = 903,
+ GAUDI2_EVENT_DEC4_SPI = 904,
+ GAUDI2_EVENT_DEC4_BMON_SPMU = 905,
+ GAUDI2_EVENT_DEC5_SPI = 906,
+ GAUDI2_EVENT_DEC5_BMON_SPMU = 907,
+ GAUDI2_EVENT_DEC6_SPI = 908,
+ GAUDI2_EVENT_DEC6_BMON_SPMU = 909,
+ GAUDI2_EVENT_DEC7_SPI = 910,
+ GAUDI2_EVENT_DEC7_BMON_SPMU = 911,
+ GAUDI2_EVENT_DEC8_SPI = 912,
+ GAUDI2_EVENT_DEC8_BMON_SPMU = 913,
+ GAUDI2_EVENT_DEC9_SPI = 914,
+ GAUDI2_EVENT_DEC9_BMON_SPMU = 915,
+ GAUDI2_EVENT_HIF0_SPI_WARN = 918,
+ GAUDI2_EVENT_HIF1_SPI_WARN = 920,
+ GAUDI2_EVENT_HIF2_SPI_WARN = 922,
+ GAUDI2_EVENT_HIF3_SPI_WARN = 924,
+ GAUDI2_EVENT_HIF8_SPI_WARN = 926,
+ GAUDI2_EVENT_HIF9_SPI_WARN = 928,
+ GAUDI2_EVENT_HIF10_SPI_WARN = 930,
+ GAUDI2_EVENT_HIF11_SPI_WARN = 932,
+ GAUDI2_EVENT_HIF7_SPI_WARN = 934,
+ GAUDI2_EVENT_HIF6_SPI_WARN = 936,
+ GAUDI2_EVENT_HIF5_SPI_WARN = 938,
+ GAUDI2_EVENT_HIF4_SPI_WARN = 940,
+ GAUDI2_EVENT_HIF15_SPI_WARN = 942,
+ GAUDI2_EVENT_HIF14_SPI_WARN = 944,
+ GAUDI2_EVENT_HIF13_SPI_WARN = 946,
+ GAUDI2_EVENT_HIF12_SPI_WARN = 948,
+ GAUDI2_EVENT_NIC0_BMON_SPMU = 951,
+ GAUDI2_EVENT_NIC0_SW_ERROR = 952,
+ GAUDI2_EVENT_NIC1_BMON_SPMU = 955,
+ GAUDI2_EVENT_NIC1_SW_ERROR = 956,
+ GAUDI2_EVENT_NIC2_BMON_SPMU = 959,
+ GAUDI2_EVENT_NIC2_SW_ERROR = 960,
+ GAUDI2_EVENT_NIC3_BMON_SPMU = 963,
+ GAUDI2_EVENT_NIC3_SW_ERROR = 964,
+ GAUDI2_EVENT_NIC4_BMON_SPMU = 967,
+ GAUDI2_EVENT_NIC4_SW_ERROR = 968,
+ GAUDI2_EVENT_NIC5_BMON_SPMU = 971,
+ GAUDI2_EVENT_NIC5_SW_ERROR = 972,
+ GAUDI2_EVENT_NIC6_BMON_SPMU = 975,
+ GAUDI2_EVENT_NIC6_SW_ERROR = 976,
+ GAUDI2_EVENT_NIC7_BMON_SPMU = 979,
+ GAUDI2_EVENT_NIC7_SW_ERROR = 980,
+ GAUDI2_EVENT_NIC8_BMON_SPMU = 983,
+ GAUDI2_EVENT_NIC8_SW_ERROR = 984,
+ GAUDI2_EVENT_NIC9_BMON_SPMU = 987,
+ GAUDI2_EVENT_NIC9_SW_ERROR = 988,
+ GAUDI2_EVENT_NIC10_BMON_SPMU = 991,
+ GAUDI2_EVENT_NIC10_SW_ERROR = 992,
+ GAUDI2_EVENT_NIC11_BMON_SPMU = 995,
+ GAUDI2_EVENT_NIC11_SW_ERROR = 996,
+ GAUDI2_EVENT_ROTATOR0_SERR = 1118,
+ GAUDI2_EVENT_ROTATOR1_SERR = 1119,
+ GAUDI2_EVENT_ROTATOR0_DERR = 1120,
+ GAUDI2_EVENT_ROTATOR1_DERR = 1121,
+ GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE = 1122,
+ GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE = 1123,
+ GAUDI2_EVENT_ROTATOR0_BMON_SPMU = 1126,
+ GAUDI2_EVENT_ROTATOR1_BMON_SPMU = 1128,
+ GAUDI2_EVENT_SM0_BMON_SPMU = 1130,
+ GAUDI2_EVENT_SM1_BMON_SPMU = 1131,
+ GAUDI2_EVENT_SM2_BMON_SPMU = 1132,
+ GAUDI2_EVENT_SM3_BMON_SPMU = 1133,
+ GAUDI2_EVENT_PSOC_DMA_QM = 1174,
+ GAUDI2_EVENT_TPC0_QM = 1206,
+ GAUDI2_EVENT_TPC1_QM = 1207,
+ GAUDI2_EVENT_TPC2_QM = 1208,
+ GAUDI2_EVENT_TPC3_QM = 1209,
+ GAUDI2_EVENT_TPC4_QM = 1210,
+ GAUDI2_EVENT_TPC5_QM = 1211,
+ GAUDI2_EVENT_TPC6_QM = 1212,
+ GAUDI2_EVENT_TPC7_QM = 1213,
+ GAUDI2_EVENT_TPC8_QM = 1214,
+ GAUDI2_EVENT_TPC9_QM = 1215,
+ GAUDI2_EVENT_TPC10_QM = 1216,
+ GAUDI2_EVENT_TPC11_QM = 1217,
+ GAUDI2_EVENT_TPC12_QM = 1218,
+ GAUDI2_EVENT_TPC13_QM = 1219,
+ GAUDI2_EVENT_TPC14_QM = 1220,
+ GAUDI2_EVENT_TPC15_QM = 1221,
+ GAUDI2_EVENT_TPC16_QM = 1222,
+ GAUDI2_EVENT_TPC17_QM = 1223,
+ GAUDI2_EVENT_TPC18_QM = 1224,
+ GAUDI2_EVENT_TPC19_QM = 1225,
+ GAUDI2_EVENT_TPC20_QM = 1226,
+ GAUDI2_EVENT_TPC21_QM = 1227,
+ GAUDI2_EVENT_TPC22_QM = 1228,
+ GAUDI2_EVENT_TPC23_QM = 1229,
+ GAUDI2_EVENT_TPC24_QM = 1230,
+ GAUDI2_EVENT_MME0_QM = 1232,
+ GAUDI2_EVENT_MME1_QM = 1233,
+ GAUDI2_EVENT_MME2_QM = 1234,
+ GAUDI2_EVENT_MME3_QM = 1235,
+ GAUDI2_EVENT_HDMA2_QM = 1236,
+ GAUDI2_EVENT_HDMA3_QM = 1237,
+ GAUDI2_EVENT_HDMA0_QM = 1238,
+ GAUDI2_EVENT_HDMA1_QM = 1239,
+ GAUDI2_EVENT_HDMA6_QM = 1240,
+ GAUDI2_EVENT_HDMA7_QM = 1241,
+ GAUDI2_EVENT_HDMA4_QM = 1242,
+ GAUDI2_EVENT_HDMA5_QM = 1243,
+ GAUDI2_EVENT_PDMA0_QM = 1244,
+ GAUDI2_EVENT_PDMA1_QM = 1245,
+ GAUDI2_EVENT_CPU_PI_UPDATE = 1246,
+ GAUDI2_EVENT_CPU_HALT_MACHINE = 1247,
+ GAUDI2_EVENT_CPU_INTS_REGISTER = 1248,
+ GAUDI2_EVENT_ROTATOR0_ROT0_QM = 1249,
+ GAUDI2_EVENT_ROTATOR1_ROT1_QM = 1250,
+ GAUDI2_EVENT_CPU_SOFT_RESET = 1251,
+ GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE = 1252,
+ GAUDI2_EVENT_CPU_FIX_POWER_ENV_S = 1253,
+ GAUDI2_EVENT_CPU_FIX_POWER_ENV_E = 1254,
+ GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S = 1255,
+ GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E = 1256,
+ GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_EVENT = 1257,
+ GAUDI2_EVENT_CPU_PKT_QUEUE_OUT_SYNC = 1258,
+ GAUDI2_EVENT_HDMA2_CORE = 1259,
+ GAUDI2_EVENT_HDMA3_CORE = 1260,
+ GAUDI2_EVENT_HDMA0_CORE = 1261,
+ GAUDI2_EVENT_HDMA1_CORE = 1262,
+ GAUDI2_EVENT_HDMA6_CORE = 1263,
+ GAUDI2_EVENT_HDMA7_CORE = 1264,
+ GAUDI2_EVENT_HDMA4_CORE = 1265,
+ GAUDI2_EVENT_HDMA5_CORE = 1266,
+ GAUDI2_EVENT_PDMA0_CORE = 1267,
+ GAUDI2_EVENT_PDMA1_CORE = 1268,
+ GAUDI2_EVENT_KDMA0_CORE = 1269,
+ GAUDI2_EVENT_NIC0_QM0 = 1270,
+ GAUDI2_EVENT_NIC0_QM1 = 1271,
+ GAUDI2_EVENT_NIC1_QM0 = 1272,
+ GAUDI2_EVENT_NIC1_QM1 = 1273,
+ GAUDI2_EVENT_NIC2_QM0 = 1274,
+ GAUDI2_EVENT_NIC2_QM1 = 1275,
+ GAUDI2_EVENT_NIC3_QM0 = 1276,
+ GAUDI2_EVENT_NIC3_QM1 = 1277,
+ GAUDI2_EVENT_NIC4_QM0 = 1278,
+ GAUDI2_EVENT_NIC4_QM1 = 1279,
+ GAUDI2_EVENT_NIC5_QM0 = 1280,
+ GAUDI2_EVENT_NIC5_QM1 = 1281,
+ GAUDI2_EVENT_NIC6_QM0 = 1282,
+ GAUDI2_EVENT_NIC6_QM1 = 1283,
+ GAUDI2_EVENT_NIC7_QM0 = 1284,
+ GAUDI2_EVENT_NIC7_QM1 = 1285,
+ GAUDI2_EVENT_NIC8_QM0 = 1286,
+ GAUDI2_EVENT_NIC8_QM1 = 1287,
+ GAUDI2_EVENT_NIC9_QM0 = 1288,
+ GAUDI2_EVENT_NIC9_QM1 = 1289,
+ GAUDI2_EVENT_NIC10_QM0 = 1290,
+ GAUDI2_EVENT_NIC10_QM1 = 1291,
+ GAUDI2_EVENT_NIC11_QM0 = 1292,
+ GAUDI2_EVENT_NIC11_QM1 = 1293,
+ GAUDI2_EVENT_CPU_PKT_SANITY_FAILED = 1294,
+ GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 = 1295,
+ GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG1 = 1296,
+ GAUDI2_EVENT_CPU1_STATUS_NIC1_ENG0 = 1297,
+ GAUDI2_EVENT_CPU1_STATUS_NIC1_ENG1 = 1298,
+ GAUDI2_EVENT_CPU2_STATUS_NIC2_ENG0 = 1299,
+ GAUDI2_EVENT_CPU2_STATUS_NIC2_ENG1 = 1300,
+ GAUDI2_EVENT_CPU3_STATUS_NIC3_ENG0 = 1301,
+ GAUDI2_EVENT_CPU3_STATUS_NIC3_ENG1 = 1302,
+ GAUDI2_EVENT_CPU4_STATUS_NIC4_ENG0 = 1303,
+ GAUDI2_EVENT_CPU4_STATUS_NIC4_ENG1 = 1304,
+ GAUDI2_EVENT_CPU5_STATUS_NIC5_ENG0 = 1305,
+ GAUDI2_EVENT_CPU5_STATUS_NIC5_ENG1 = 1306,
+ GAUDI2_EVENT_CPU6_STATUS_NIC6_ENG0 = 1307,
+ GAUDI2_EVENT_CPU6_STATUS_NIC6_ENG1 = 1308,
+ GAUDI2_EVENT_CPU7_STATUS_NIC7_ENG0 = 1309,
+ GAUDI2_EVENT_CPU7_STATUS_NIC7_ENG1 = 1310,
+ GAUDI2_EVENT_CPU8_STATUS_NIC8_ENG0 = 1311,
+ GAUDI2_EVENT_CPU8_STATUS_NIC8_ENG1 = 1312,
+ GAUDI2_EVENT_CPU9_STATUS_NIC9_ENG0 = 1313,
+ GAUDI2_EVENT_CPU9_STATUS_NIC9_ENG1 = 1314,
+ GAUDI2_EVENT_CPU10_STATUS_NIC10_ENG0 = 1315,
+ GAUDI2_EVENT_CPU10_STATUS_NIC10_ENG1 = 1316,
+ GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG0 = 1317,
+ GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1 = 1318,
+ GAUDI2_EVENT_ARC_DCCM_FULL = 1319,
+ GAUDI2_EVENT_SIZE,
+};
+
+#endif /* __GAUDI2_ASYNC_EVENTS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
new file mode 100644
index 000000000000..5bd4383c9f2c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
@@ -0,0 +1,2668 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2021 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_
+#define __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_
+
+struct gaudi2_async_events_ids_map {
+ int fc_id;
+ int cpu_id;
+ int valid;
+ int msg;
+ int reset;
+ char name[64];
+};
+
+static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
+ { .fc_id = 0, .cpu_id = 0, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1, .cpu_id = 1, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 2, .cpu_id = 2, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 3, .cpu_id = 3, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 4, .cpu_id = 4, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 5, .cpu_id = 5, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 6, .cpu_id = 6, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 7, .cpu_id = 7, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 8, .cpu_id = 8, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 9, .cpu_id = 9, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 10, .cpu_id = 10, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 11, .cpu_id = 11, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 12, .cpu_id = 12, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 13, .cpu_id = 13, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 14, .cpu_id = 14, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 15, .cpu_id = 15, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 16, .cpu_id = 16, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 17, .cpu_id = 17, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 18, .cpu_id = 18, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 19, .cpu_id = 19, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 20, .cpu_id = 20, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 21, .cpu_id = 21, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 22, .cpu_id = 22, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 23, .cpu_id = 23, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 24, .cpu_id = 24, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 25, .cpu_id = 25, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 26, .cpu_id = 26, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 27, .cpu_id = 27, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 28, .cpu_id = 28, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 29, .cpu_id = 29, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 30, .cpu_id = 30, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 31, .cpu_id = 31, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 32, .cpu_id = 32, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PCIE_CORE_SERR" },
+ { .fc_id = 33, .cpu_id = 33, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PCIE_CORE_DERR" },
+ { .fc_id = 34, .cpu_id = 34, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PCIE_IF_SERR" },
+ { .fc_id = 35, .cpu_id = 35, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PCIE_IF_DERR" },
+ { .fc_id = 36, .cpu_id = 36, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PCIE_PHY_SERR" },
+ { .fc_id = 37, .cpu_id = 37, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PCIE_PHY_DERR" },
+ { .fc_id = 38, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC0_ECC_SERR" },
+ { .fc_id = 39, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC1_ECC_SERR" },
+ { .fc_id = 40, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC2_ECC_SERR" },
+ { .fc_id = 41, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC3_ECC_SERR" },
+ { .fc_id = 42, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC4_ECC_SERR" },
+ { .fc_id = 43, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC5_ECC_SERR" },
+ { .fc_id = 44, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC6_ECC_SERR" },
+ { .fc_id = 45, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC7_ECC_SERR" },
+ { .fc_id = 46, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC8_ECC_SERR" },
+ { .fc_id = 47, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC9_ECC_SERR" },
+ { .fc_id = 48, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC10_ECC_SERR" },
+ { .fc_id = 49, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC11_ECC_SERR" },
+ { .fc_id = 50, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC12_ECC_SERR" },
+ { .fc_id = 51, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC13_ECC_SERR" },
+ { .fc_id = 52, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC14_ECC_SERR" },
+ { .fc_id = 53, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC15_ECC_SERR" },
+ { .fc_id = 54, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC16_ECC_SERR" },
+ { .fc_id = 55, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC17_ECC_SERR" },
+ { .fc_id = 56, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC18_ECC_SERR" },
+ { .fc_id = 57, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC19_ECC_SERR" },
+ { .fc_id = 58, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC20_ECC_SERR" },
+ { .fc_id = 59, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC21_ECC_SERR" },
+ { .fc_id = 60, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC22_ECC_SERR" },
+ { .fc_id = 61, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC23_ECC_SERR" },
+ { .fc_id = 62, .cpu_id = 38, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC24_ECC_SERR" },
+ { .fc_id = 63, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC0_ECC_DERR" },
+ { .fc_id = 64, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC1_ECC_DERR" },
+ { .fc_id = 65, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC2_ECC_DERR" },
+ { .fc_id = 66, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC3_ECC_DERR" },
+ { .fc_id = 67, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC4_ECC_DERR" },
+ { .fc_id = 68, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC5_ECC_DERR" },
+ { .fc_id = 69, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC6_ECC_DERR" },
+ { .fc_id = 70, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC7_ECC_DERR" },
+ { .fc_id = 71, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC8_ECC_DERR" },
+ { .fc_id = 72, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC9_ECC_DERR" },
+ { .fc_id = 73, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC10_ECC_DERR" },
+ { .fc_id = 74, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC11_ECC_DERR" },
+ { .fc_id = 75, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC12_ECC_DERR" },
+ { .fc_id = 76, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC13_ECC_DERR" },
+ { .fc_id = 77, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC14_ECC_DERR" },
+ { .fc_id = 78, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC15_ECC_DERR" },
+ { .fc_id = 79, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC16_ECC_DERR" },
+ { .fc_id = 80, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC17_ECC_DERR" },
+ { .fc_id = 81, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC18_ECC_DERR" },
+ { .fc_id = 82, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC19_ECC_DERR" },
+ { .fc_id = 83, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC20_ECC_DERR" },
+ { .fc_id = 84, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC21_ECC_DERR" },
+ { .fc_id = 85, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC22_ECC_DERR" },
+ { .fc_id = 86, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC23_ECC_DERR" },
+ { .fc_id = 87, .cpu_id = 39, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC24_ECC_DERR" },
+ { .fc_id = 88, .cpu_id = 40, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_SBTE0_ECC_SERR" },
+ { .fc_id = 89, .cpu_id = 40, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_SBTE1_ECC_SERR" },
+ { .fc_id = 90, .cpu_id = 40, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_SBTE2_ECC_SERR" },
+ { .fc_id = 91, .cpu_id = 40, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_SBTE3_ECC_SERR" },
+ { .fc_id = 92, .cpu_id = 40, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_SBTE4_ECC_SERR" },
+ { .fc_id = 93, .cpu_id = 40, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_CTRL_ECC_SERR" },
+ { .fc_id = 94, .cpu_id = 40, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_WAP_ECC_SERR" },
+ { .fc_id = 95, .cpu_id = 41, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_SBTE0_ECC_SERR" },
+ { .fc_id = 96, .cpu_id = 41, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_SBTE1_ECC_SERR" },
+ { .fc_id = 97, .cpu_id = 41, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_SBTE2_ECC_SERR" },
+ { .fc_id = 98, .cpu_id = 41, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_SBTE3_ECC_SERR" },
+ { .fc_id = 99, .cpu_id = 41, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_SBTE4_ECC_SERR" },
+ { .fc_id = 100, .cpu_id = 41, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_CTRL_ECC_SERR" },
+ { .fc_id = 101, .cpu_id = 41, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_WAP_ECC_SERR" },
+ { .fc_id = 102, .cpu_id = 42, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_SBTE0_ECC_SERR" },
+ { .fc_id = 103, .cpu_id = 42, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_SBTE1_ECC_SERR" },
+ { .fc_id = 104, .cpu_id = 42, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_SBTE2_ECC_SERR" },
+ { .fc_id = 105, .cpu_id = 42, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_SBTE3_ECC_SERR" },
+ { .fc_id = 106, .cpu_id = 42, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_SBTE4_ECC_SERR" },
+ { .fc_id = 107, .cpu_id = 42, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_CTRL_ECC_SERR" },
+ { .fc_id = 108, .cpu_id = 42, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_WAP_ECC_SERR" },
+ { .fc_id = 109, .cpu_id = 43, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_SBTE0_ECC_SERR" },
+ { .fc_id = 110, .cpu_id = 43, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_SBTE1_ECC_SERR" },
+ { .fc_id = 111, .cpu_id = 43, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_SBTE2_ECC_SERR" },
+ { .fc_id = 112, .cpu_id = 43, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_SBTE3_ECC_SERR" },
+ { .fc_id = 113, .cpu_id = 43, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_SBTE4_ECC_SERR" },
+ { .fc_id = 114, .cpu_id = 43, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_CTRL_ECC_SERR" },
+ { .fc_id = 115, .cpu_id = 43, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_WAP_ECC_SERR" },
+ { .fc_id = 116, .cpu_id = 44, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE0_ECC_DERR" },
+ { .fc_id = 117, .cpu_id = 44, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE1_ECC_DERR" },
+ { .fc_id = 118, .cpu_id = 44, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE2_ECC_DERR" },
+ { .fc_id = 119, .cpu_id = 44, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE3_ECC_DERR" },
+ { .fc_id = 120, .cpu_id = 44, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE4_ECC_DERR" },
+ { .fc_id = 121, .cpu_id = 44, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_CTRL_ECC_DERR" },
+ { .fc_id = 122, .cpu_id = 44, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_WAP_ECC_DERR" },
+ { .fc_id = 123, .cpu_id = 45, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE0_ECC_DERR" },
+ { .fc_id = 124, .cpu_id = 45, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE1_ECC_DERR" },
+ { .fc_id = 125, .cpu_id = 45, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE2_ECC_DERR" },
+ { .fc_id = 126, .cpu_id = 45, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE3_ECC_DERR" },
+ { .fc_id = 127, .cpu_id = 45, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE4_ECC_DERR" },
+ { .fc_id = 128, .cpu_id = 45, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_CTRL_ECC_DERR" },
+ { .fc_id = 129, .cpu_id = 45, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_WAP_ECC_DERR" },
+ { .fc_id = 130, .cpu_id = 46, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE0_ECC_DERR" },
+ { .fc_id = 131, .cpu_id = 46, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE1_ECC_DERR" },
+ { .fc_id = 132, .cpu_id = 46, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE2_ECC_DERR" },
+ { .fc_id = 133, .cpu_id = 46, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE3_ECC_DERR" },
+ { .fc_id = 134, .cpu_id = 46, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE4_ECC_DERR" },
+ { .fc_id = 135, .cpu_id = 46, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_CTRL_ECC_DERR" },
+ { .fc_id = 136, .cpu_id = 46, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_WAP_ECC_DERR" },
+ { .fc_id = 137, .cpu_id = 47, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE0_ECC_DERR" },
+ { .fc_id = 138, .cpu_id = 47, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE1_ECC_DERR" },
+ { .fc_id = 139, .cpu_id = 47, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE2_ECC_DERR" },
+ { .fc_id = 140, .cpu_id = 47, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE3_ECC_DERR" },
+ { .fc_id = 141, .cpu_id = 47, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE4_ECC_DERR" },
+ { .fc_id = 142, .cpu_id = 47, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_CTRL_ECC_DERR" },
+ { .fc_id = 143, .cpu_id = 47, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_WAP_ECC_DERR" },
+ { .fc_id = 144, .cpu_id = 48, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA2_ECC_SERR" },
+ { .fc_id = 145, .cpu_id = 48, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA3_ECC_SERR" },
+ { .fc_id = 146, .cpu_id = 48, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA0_ECC_SERR" },
+ { .fc_id = 147, .cpu_id = 48, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA1_ECC_SERR" },
+ { .fc_id = 148, .cpu_id = 48, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA6_ECC_SERR" },
+ { .fc_id = 149, .cpu_id = 48, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA7_ECC_SERR" },
+ { .fc_id = 150, .cpu_id = 48, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA4_ECC_SERR" },
+ { .fc_id = 151, .cpu_id = 48, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA5_ECC_SERR" },
+ { .fc_id = 152, .cpu_id = 49, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HDMA2_ECC_DERR" },
+ { .fc_id = 153, .cpu_id = 49, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HDMA3_ECC_DERR" },
+ { .fc_id = 154, .cpu_id = 49, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HDMA0_ECC_DERR" },
+ { .fc_id = 155, .cpu_id = 49, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HDMA1_ECC_DERR" },
+ { .fc_id = 156, .cpu_id = 49, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HDMA6_ECC_DERR" },
+ { .fc_id = 157, .cpu_id = 49, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HDMA7_ECC_DERR" },
+ { .fc_id = 158, .cpu_id = 49, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HDMA4_ECC_DERR" },
+ { .fc_id = 159, .cpu_id = 49, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HDMA5_ECC_DERR" },
+ { .fc_id = 160, .cpu_id = 50, .valid = 1,
+ .msg = 0, .reset = 0, .name = "KDMA0_ECC_SERR" },
+ { .fc_id = 161, .cpu_id = 51, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PDMA0_ECC_SERR" },
+ { .fc_id = 162, .cpu_id = 51, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PDMA1_ECC_SERR" },
+ { .fc_id = 163, .cpu_id = 52, .valid = 1,
+ .msg = 0, .reset = 1, .name = "KDMA0_ECC_DERR" },
+ { .fc_id = 164, .cpu_id = 53, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PDMA0_ECC_DERR" },
+ { .fc_id = 165, .cpu_id = 53, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PDMA1_ECC_DERR" },
+ { .fc_id = 166, .cpu_id = 54, .valid = 1,
+ .msg = 0, .reset = 0, .name = "CPU_IF_ECC_SERR" },
+ { .fc_id = 167, .cpu_id = 55, .valid = 1,
+ .msg = 0, .reset = 1, .name = "CPU_IF_ECC_DERR" },
+ { .fc_id = 168, .cpu_id = 56, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PSOC_MEM_SERR" },
+ { .fc_id = 169, .cpu_id = 57, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PSOC_MEM_DERR" },
+ { .fc_id = 170, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM0_ECC_SERR" },
+ { .fc_id = 171, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM1_ECC_SERR" },
+ { .fc_id = 172, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM2_ECC_SERR" },
+ { .fc_id = 173, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM3_ECC_SERR" },
+ { .fc_id = 174, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM4_ECC_SERR" },
+ { .fc_id = 175, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM5_ECC_SERR" },
+ { .fc_id = 176, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM6_ECC_SERR" },
+ { .fc_id = 177, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM7_ECC_SERR" },
+ { .fc_id = 178, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM8_ECC_SERR" },
+ { .fc_id = 179, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM9_ECC_SERR" },
+ { .fc_id = 180, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM10_ECC_SERR" },
+ { .fc_id = 181, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM11_ECC_SERR" },
+ { .fc_id = 182, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM12_ECC_SERR" },
+ { .fc_id = 183, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM13_ECC_SERR" },
+ { .fc_id = 184, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM14_ECC_SERR" },
+ { .fc_id = 185, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM15_ECC_SERR" },
+ { .fc_id = 186, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM16_ECC_SERR" },
+ { .fc_id = 187, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM17_ECC_SERR" },
+ { .fc_id = 188, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM18_ECC_SERR" },
+ { .fc_id = 189, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM19_ECC_SERR" },
+ { .fc_id = 190, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM20_ECC_SERR" },
+ { .fc_id = 191, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM21_ECC_SERR" },
+ { .fc_id = 192, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM22_ECC_SERR" },
+ { .fc_id = 193, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM23_ECC_SERR" },
+ { .fc_id = 194, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM24_ECC_SERR" },
+ { .fc_id = 195, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM25_ECC_SERR" },
+ { .fc_id = 196, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM26_ECC_SERR" },
+ { .fc_id = 197, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM27_ECC_SERR" },
+ { .fc_id = 198, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM28_ECC_SERR" },
+ { .fc_id = 199, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM29_ECC_SERR" },
+ { .fc_id = 200, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM30_ECC_SERR" },
+ { .fc_id = 201, .cpu_id = 58, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SRAM31_ECC_SERR" },
+ { .fc_id = 202, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM0_ECC_DERR" },
+ { .fc_id = 203, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM1_ECC_DERR" },
+ { .fc_id = 204, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM2_ECC_DERR" },
+ { .fc_id = 205, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM3_ECC_DERR" },
+ { .fc_id = 206, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM4_ECC_DERR" },
+ { .fc_id = 207, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM5_ECC_DERR" },
+ { .fc_id = 208, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM6_ECC_DERR" },
+ { .fc_id = 209, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM7_ECC_DERR" },
+ { .fc_id = 210, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM8_ECC_DERR" },
+ { .fc_id = 211, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM9_ECC_DERR" },
+ { .fc_id = 212, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM10_ECC_DERR" },
+ { .fc_id = 213, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM11_ECC_DERR" },
+ { .fc_id = 214, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM12_ECC_DERR" },
+ { .fc_id = 215, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM13_ECC_DERR" },
+ { .fc_id = 216, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM14_ECC_DERR" },
+ { .fc_id = 217, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM15_ECC_DERR" },
+ { .fc_id = 218, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM16_ECC_DERR" },
+ { .fc_id = 219, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM17_ECC_DERR" },
+ { .fc_id = 220, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM18_ECC_DERR" },
+ { .fc_id = 221, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM19_ECC_DERR" },
+ { .fc_id = 222, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM20_ECC_DERR" },
+ { .fc_id = 223, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM21_ECC_DERR" },
+ { .fc_id = 224, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM22_ECC_DERR" },
+ { .fc_id = 225, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM23_ECC_DERR" },
+ { .fc_id = 226, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM24_ECC_DERR" },
+ { .fc_id = 227, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM25_ECC_DERR" },
+ { .fc_id = 228, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM26_ECC_DERR" },
+ { .fc_id = 229, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM27_ECC_DERR" },
+ { .fc_id = 230, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM28_ECC_DERR" },
+ { .fc_id = 231, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM29_ECC_DERR" },
+ { .fc_id = 232, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM30_ECC_DERR" },
+ { .fc_id = 233, .cpu_id = 59, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SRAM31_ECC_DERR" },
+ { .fc_id = 234, .cpu_id = 60, .valid = 1,
+ .msg = 0, .reset = 1, .name = "GIC500" },
+ { .fc_id = 235, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_0_MC0_ECC_SERR" },
+ { .fc_id = 236, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_1_MC0_ECC_SERR" },
+ { .fc_id = 237, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_2_MC0_ECC_SERR" },
+ { .fc_id = 238, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_3_MC0_ECC_SERR" },
+ { .fc_id = 239, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_4_MC0_ECC_SERR" },
+ { .fc_id = 240, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_5_MC0_ECC_SERR" },
+ { .fc_id = 241, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_0_MC1_ECC_SERR" },
+ { .fc_id = 242, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_1_MC1_ECC_SERR" },
+ { .fc_id = 243, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_2_MC1_ECC_SERR" },
+ { .fc_id = 244, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_3_MC1_ECC_SERR" },
+ { .fc_id = 245, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_4_MC1_ECC_SERR" },
+ { .fc_id = 246, .cpu_id = 61, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM_5_MC1_ECC_SERR" },
+ { .fc_id = 247, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_0_MC0_ECC_DERR" },
+ { .fc_id = 248, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_1_MC0_ECC_DERR" },
+ { .fc_id = 249, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_2_MC0_ECC_DERR" },
+ { .fc_id = 250, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_3_MC0_ECC_DERR" },
+ { .fc_id = 251, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_4_MC0_ECC_DERR" },
+ { .fc_id = 252, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_5_MC0_ECC_DERR" },
+ { .fc_id = 253, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_0_MC1_ECC_DERR" },
+ { .fc_id = 254, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_1_MC1_ECC_DERR" },
+ { .fc_id = 255, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_2_MC1_ECC_DERR" },
+ { .fc_id = 256, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_3_MC1_ECC_DERR" },
+ { .fc_id = 257, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_4_MC1_ECC_DERR" },
+ { .fc_id = 258, .cpu_id = 62, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_5_MC1_ECC_DERR" },
+ { .fc_id = 259, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_0_ECC_SERR" },
+ { .fc_id = 260, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_1_ECC_SERR" },
+ { .fc_id = 261, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_2_ECC_SERR" },
+ { .fc_id = 262, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_3_ECC_SERR" },
+ { .fc_id = 263, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_8_ECC_SERR" },
+ { .fc_id = 264, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_9_ECC_SERR" },
+ { .fc_id = 265, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_10_ECC_SERR" },
+ { .fc_id = 266, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_11_ECC_SERR" },
+ { .fc_id = 267, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_7_ECC_SERR" },
+ { .fc_id = 268, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_6_ECC_SERR" },
+ { .fc_id = 269, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_5_ECC_SERR" },
+ { .fc_id = 270, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_4_ECC_SERR" },
+ { .fc_id = 271, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_15_ECC_SERR" },
+ { .fc_id = 272, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_14_ECC_SERR" },
+ { .fc_id = 273, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_13_ECC_SERR" },
+ { .fc_id = 274, .cpu_id = 63, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HMMU_12_ECC_SERR" },
+ { .fc_id = 275, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_0_ECC_DERR" },
+ { .fc_id = 276, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_1_ECC_DERR" },
+ { .fc_id = 277, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_2_ECC_DERR" },
+ { .fc_id = 278, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_3_ECC_DERR" },
+ { .fc_id = 279, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_8_ECC_DERR" },
+ { .fc_id = 280, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_9_ECC_DERR" },
+ { .fc_id = 281, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_10_ECC_DERR" },
+ { .fc_id = 282, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_11_ECC_DERR" },
+ { .fc_id = 283, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_7_ECC_DERR" },
+ { .fc_id = 284, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_6_ECC_DERR" },
+ { .fc_id = 285, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_5_ECC_DERR" },
+ { .fc_id = 286, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_4_ECC_DERR" },
+ { .fc_id = 287, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_15_ECC_DERR" },
+ { .fc_id = 288, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_14_ECC_DERR" },
+ { .fc_id = 289, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_13_ECC_DERR" },
+ { .fc_id = 290, .cpu_id = 64, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_12_ECC_DERR" },
+ { .fc_id = 291, .cpu_id = 65, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PMMU_ECC_SERR" },
+ { .fc_id = 292, .cpu_id = 66, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PMMU_ECC_DERR" },
+ { .fc_id = 293, .cpu_id = 67, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 294, .cpu_id = 68, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 295, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC0_VCD_ECC_SERR" },
+ { .fc_id = 296, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC1_VCD_ECC_SERR" },
+ { .fc_id = 297, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC2_VCD_ECC_SERR" },
+ { .fc_id = 298, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC3_VCD_ECC_SERR" },
+ { .fc_id = 299, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC4_VCD_ECC_SERR" },
+ { .fc_id = 300, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC5_VCD_ECC_SERR" },
+ { .fc_id = 301, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC6_VCD_ECC_SERR" },
+ { .fc_id = 302, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC7_VCD_ECC_SERR" },
+ { .fc_id = 303, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC8_VCD_ECC_SERR" },
+ { .fc_id = 304, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC9_VCD_ECC_SERR" },
+ { .fc_id = 305, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC0_L2C_ECC_SERR" },
+ { .fc_id = 306, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC1_L2C_ECC_SERR" },
+ { .fc_id = 307, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC2_L2C_ECC_SERR" },
+ { .fc_id = 308, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC3_L2C_ECC_SERR" },
+ { .fc_id = 309, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC4_L2C_ECC_SERR" },
+ { .fc_id = 310, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC5_L2C_ECC_SERR" },
+ { .fc_id = 311, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC6_L2C_ECC_SERR" },
+ { .fc_id = 312, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC7_L2C_ECC_SERR" },
+ { .fc_id = 313, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC8_L2C_ECC_SERR" },
+ { .fc_id = 314, .cpu_id = 69, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC9_L2C_ECC_SERR" },
+ { .fc_id = 315, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC0_VCD_ECC_DERR" },
+ { .fc_id = 316, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC1_VCD_ECC_DERR" },
+ { .fc_id = 317, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC2_VCD_ECC_DERR" },
+ { .fc_id = 318, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC3_VCD_ECC_DERR" },
+ { .fc_id = 319, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC4_VCD_ECC_DERR" },
+ { .fc_id = 320, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC5_VCD_ECC_DERR" },
+ { .fc_id = 321, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC6_VCD_ECC_DERR" },
+ { .fc_id = 322, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC7_VCD_ECC_DERR" },
+ { .fc_id = 323, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC8_VCD_ECC_DERR" },
+ { .fc_id = 324, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC9_VCD_ECC_DERR" },
+ { .fc_id = 325, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC0_L2C_ECC_DERR" },
+ { .fc_id = 326, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC1_L2C_ECC_DERR" },
+ { .fc_id = 327, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC2_L2C_ECC_DERR" },
+ { .fc_id = 328, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC3_L2C_ECC_DERR" },
+ { .fc_id = 329, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC4_L2C_ECC_DERR" },
+ { .fc_id = 330, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC5_L2C_ECC_DERR" },
+ { .fc_id = 331, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC6_L2C_ECC_DERR" },
+ { .fc_id = 332, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC7_L2C_ECC_DERR" },
+ { .fc_id = 333, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC8_L2C_ECC_DERR" },
+ { .fc_id = 334, .cpu_id = 70, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC9_L2C_ECC_DERR" },
+ { .fc_id = 335, .cpu_id = 71, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 336, .cpu_id = 72, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 337, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF0_ECC_SERR" },
+ { .fc_id = 338, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF1_ECC_SERR" },
+ { .fc_id = 339, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF2_ECC_SERR" },
+ { .fc_id = 340, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF3_ECC_SERR" },
+ { .fc_id = 341, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF8_ECC_SERR" },
+ { .fc_id = 342, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF9_ECC_SERR" },
+ { .fc_id = 343, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF10_ECC_SERR" },
+ { .fc_id = 344, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF11_ECC_SERR" },
+ { .fc_id = 345, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF7_ECC_SERR" },
+ { .fc_id = 346, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF6_ECC_SERR" },
+ { .fc_id = 347, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF5_ECC_SERR" },
+ { .fc_id = 348, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF4_ECC_SERR" },
+ { .fc_id = 349, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF15_ECC_SERR" },
+ { .fc_id = 350, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF14_ECC_SERR" },
+ { .fc_id = 351, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF13_ECC_SERR" },
+ { .fc_id = 352, .cpu_id = 73, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HIF12_ECC_SERR" },
+ { .fc_id = 353, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF0_ECC_DERR" },
+ { .fc_id = 354, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF1_ECC_DERR" },
+ { .fc_id = 355, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF2_ECC_DERR" },
+ { .fc_id = 356, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF3_ECC_DERR" },
+ { .fc_id = 357, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF8_ECC_DERR" },
+ { .fc_id = 358, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF9_ECC_DERR" },
+ { .fc_id = 359, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF10_ECC_DERR" },
+ { .fc_id = 360, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF11_ECC_DERR" },
+ { .fc_id = 361, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF7_ECC_DERR" },
+ { .fc_id = 362, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF6_ECC_DERR" },
+ { .fc_id = 363, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF5_ECC_DERR" },
+ { .fc_id = 364, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF4_ECC_DERR" },
+ { .fc_id = 365, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF15_ECC_DERR" },
+ { .fc_id = 366, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF14_ECC_DERR" },
+ { .fc_id = 367, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF13_ECC_DERR" },
+ { .fc_id = 368, .cpu_id = 74, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF12_ECC_DERR" },
+ { .fc_id = 369, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC0_ECC_SERR" },
+ { .fc_id = 370, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC1_ECC_SERR" },
+ { .fc_id = 371, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC2_ECC_SERR" },
+ { .fc_id = 372, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC3_ECC_SERR" },
+ { .fc_id = 373, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC4_ECC_SERR" },
+ { .fc_id = 374, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC5_ECC_SERR" },
+ { .fc_id = 375, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC6_ECC_SERR" },
+ { .fc_id = 376, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC7_ECC_SERR" },
+ { .fc_id = 377, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC8_ECC_SERR" },
+ { .fc_id = 378, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC9_ECC_SERR" },
+ { .fc_id = 379, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC10_ECC_SERR" },
+ { .fc_id = 380, .cpu_id = 75, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC11_ECC_SERR" },
+ { .fc_id = 381, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC0_ECC_DERR" },
+ { .fc_id = 382, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC1_ECC_DERR" },
+ { .fc_id = 383, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC2_ECC_DERR" },
+ { .fc_id = 384, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC3_ECC_DERR" },
+ { .fc_id = 385, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC4_ECC_DERR" },
+ { .fc_id = 386, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC5_ECC_DERR" },
+ { .fc_id = 387, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC6_ECC_DERR" },
+ { .fc_id = 388, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC7_ECC_DERR" },
+ { .fc_id = 389, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC8_ECC_DERR" },
+ { .fc_id = 390, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC9_ECC_DERR" },
+ { .fc_id = 391, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC10_ECC_DERR" },
+ { .fc_id = 392, .cpu_id = 76, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC11_ECC_DERR" },
+ { .fc_id = 393, .cpu_id = 77, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SM0_ECC_DERR" },
+ { .fc_id = 394, .cpu_id = 77, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SM1_ECC_DERR" },
+ { .fc_id = 395, .cpu_id = 77, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SM2_ECC_DERR" },
+ { .fc_id = 396, .cpu_id = 77, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SM3_ECC_DERR" },
+ { .fc_id = 397, .cpu_id = 78, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SM0_ECC_SERR" },
+ { .fc_id = 398, .cpu_id = 78, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SM1_ECC_SERR" },
+ { .fc_id = 399, .cpu_id = 78, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SM2_ECC_SERR" },
+ { .fc_id = 400, .cpu_id = 78, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SM3_ECC_SERR" },
+ { .fc_id = 401, .cpu_id = 79, .valid = 1,
+ .msg = 0, .reset = 0, .name = "XBAR0_ECC_SERR" },
+ { .fc_id = 402, .cpu_id = 79, .valid = 1,
+ .msg = 0, .reset = 0, .name = "XBAR1_ECC_SERR" },
+ { .fc_id = 403, .cpu_id = 79, .valid = 1,
+ .msg = 0, .reset = 0, .name = "XBAR2_ECC_SERR" },
+ { .fc_id = 404, .cpu_id = 79, .valid = 1,
+ .msg = 0, .reset = 0, .name = "XBAR3_ECC_SERR" },
+ { .fc_id = 405, .cpu_id = 80, .valid = 1,
+ .msg = 0, .reset = 1, .name = "XBAR0_ECC_DERR" },
+ { .fc_id = 406, .cpu_id = 80, .valid = 1,
+ .msg = 0, .reset = 1, .name = "XBAR1_ECC_DERR" },
+ { .fc_id = 407, .cpu_id = 80, .valid = 1,
+ .msg = 0, .reset = 1, .name = "XBAR2_ECC_DERR" },
+ { .fc_id = 408, .cpu_id = 80, .valid = 1,
+ .msg = 0, .reset = 1, .name = "XBAR3_ECC_DERR" },
+ { .fc_id = 409, .cpu_id = 81, .valid = 1,
+ .msg = 0, .reset = 0, .name = "ARC0_ECC_SERR" },
+ { .fc_id = 410, .cpu_id = 82, .valid = 1,
+ .msg = 0, .reset = 1, .name = "ARC0_ECC_DERR" },
+ { .fc_id = 411, .cpu_id = 83, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 412, .cpu_id = 84, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PCIE_ADDR_DEC_ERR" },
+ { .fc_id = 413, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC0_AXI_ERR_RSP" },
+ { .fc_id = 414, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC1_AXI_ERR_RSP" },
+ { .fc_id = 415, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC2_AXI_ERR_RSP" },
+ { .fc_id = 416, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC3_AXI_ERR_RSP" },
+ { .fc_id = 417, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC4_AXI_ERR_RSP" },
+ { .fc_id = 418, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC5_AXI_ERR_RSP" },
+ { .fc_id = 419, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC6_AXI_ERR_RSP" },
+ { .fc_id = 420, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC7_AXI_ERR_RSP" },
+ { .fc_id = 421, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC8_AXI_ERR_RSP" },
+ { .fc_id = 422, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC9_AXI_ERR_RSP" },
+ { .fc_id = 423, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC10_AXI_ERR_RSP" },
+ { .fc_id = 424, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC11_AXI_ERR_RSP" },
+ { .fc_id = 425, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC12_AXI_ERR_RSP" },
+ { .fc_id = 426, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC13_AXI_ERR_RSP" },
+ { .fc_id = 427, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC14_AXI_ERR_RSP" },
+ { .fc_id = 428, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC15_AXI_ERR_RSP" },
+ { .fc_id = 429, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC16_AXI_ERR_RSP" },
+ { .fc_id = 430, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC17_AXI_ERR_RSP" },
+ { .fc_id = 431, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC18_AXI_ERR_RSP" },
+ { .fc_id = 432, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC19_AXI_ERR_RSP" },
+ { .fc_id = 433, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC20_AXI_ERR_RSP" },
+ { .fc_id = 434, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC21_AXI_ERR_RSP" },
+ { .fc_id = 435, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC22_AXI_ERR_RSP" },
+ { .fc_id = 436, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC23_AXI_ERR_RSP" },
+ { .fc_id = 437, .cpu_id = 85, .valid = 1,
+ .msg = 0, .reset = 1, .name = "TPC24_AXI_ERR_RSP" },
+ { .fc_id = 438, .cpu_id = 86, .valid = 1,
+ .msg = 0, .reset = 1, .name = "AXI_ECC" },
+ { .fc_id = 439, .cpu_id = 87, .valid = 1,
+ .msg = 0, .reset = 1, .name = "L2_RAM_ECC" },
+ { .fc_id = 440, .cpu_id = 88, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 441, .cpu_id = 88, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 442, .cpu_id = 88, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 443, .cpu_id = 88, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 444, .cpu_id = 88, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 445, .cpu_id = 88, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 446, .cpu_id = 88, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME0_QMAN_SW_ERROR" },
+ { .fc_id = 447, .cpu_id = 89, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 448, .cpu_id = 89, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 449, .cpu_id = 89, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 450, .cpu_id = 89, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 451, .cpu_id = 89, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 452, .cpu_id = 89, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 453, .cpu_id = 89, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME1_QMAN_SW_ERROR" },
+ { .fc_id = 454, .cpu_id = 90, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 455, .cpu_id = 90, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 456, .cpu_id = 90, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 457, .cpu_id = 90, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 458, .cpu_id = 90, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 459, .cpu_id = 90, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 460, .cpu_id = 90, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME2_QMAN_SW_ERROR" },
+ { .fc_id = 461, .cpu_id = 91, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 462, .cpu_id = 91, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 463, .cpu_id = 91, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 464, .cpu_id = 91, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 465, .cpu_id = 91, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 466, .cpu_id = 91, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 467, .cpu_id = 91, .valid = 1,
+ .msg = 0, .reset = 1, .name = "MME3_QMAN_SW_ERROR" },
+ { .fc_id = 468, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PSOC_MME_PLL_LOCK_ERR" },
+ { .fc_id = 469, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PSOC_CPU_PLL_LOCK_ERR" },
+ { .fc_id = 470, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE3_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 471, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE3_NIC_PLL_LOCK_ERR" },
+ { .fc_id = 472, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE3_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 473, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE3_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 474, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE3_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 475, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE3_XBAR_BANK_PLL_LOCK_ERR" },
+ { .fc_id = 476, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE1_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 477, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE1_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 478, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE1_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 479, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE1_XBAR_MESH_PLL_LOCK_ERR" },
+ { .fc_id = 480, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE1_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 481, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE1_NIC_PLL_LOCK_ERR" },
+ { .fc_id = 482, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PMMU_MME_PLL_LOCK_ERR" },
+ { .fc_id = 483, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE0_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 484, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE0_PCI_PLL_LOCK_ERR" },
+ { .fc_id = 485, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE0_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 486, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE0_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 487, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE0_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 488, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE0_XBAR_MESH_PLL_LOCK_ERR" },
+ { .fc_id = 489, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE2_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 490, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE2_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 491, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE2_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 492, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE2_XBAR_BANK_PLL_LOCK_ERR" },
+ { .fc_id = 493, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE2_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 494, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PSOC_VID_PLL_LOCK_ERR" },
+ { .fc_id = 495, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PMMU_VID_PLL_LOCK_ERR" },
+ { .fc_id = 496, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE3_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 497, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE1_XBAR_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 498, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE1_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 499, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE0_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 500, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE2_XBAR_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 501, .cpu_id = 92, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DCORE2_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 502, .cpu_id = 93, .valid = 1,
+ .msg = 0, .reset = 1, .name = "CPU_AXI_ERR_RSP" },
+ { .fc_id = 503, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_0_AXI_ERR_RSP" },
+ { .fc_id = 504, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_1_AXI_ERR_RSP" },
+ { .fc_id = 505, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_2_AXI_ERR_RSP" },
+ { .fc_id = 506, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_3_AXI_ERR_RSP" },
+ { .fc_id = 507, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_8_AXI_ERR_RSP" },
+ { .fc_id = 508, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_9_AXI_ERR_RSP" },
+ { .fc_id = 509, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_10_AXI_ERR_RSP" },
+ { .fc_id = 510, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_11_AXI_ERR_RSP" },
+ { .fc_id = 511, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_7_AXI_ERR_RSP" },
+ { .fc_id = 512, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_6_AXI_ERR_RSP" },
+ { .fc_id = 513, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_5_AXI_ERR_RSP" },
+ { .fc_id = 514, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_4_AXI_ERR_RSP" },
+ { .fc_id = 515, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_15_AXI_ERR_RSP" },
+ { .fc_id = 516, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_14_AXI_ERR_RSP" },
+ { .fc_id = 517, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_13_AXI_ERR_RSP" },
+ { .fc_id = 518, .cpu_id = 94, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU_12_AXI_ERR_RSP" },
+ { .fc_id = 519, .cpu_id = 95, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PMMU_FATAL" },
+ { .fc_id = 520, .cpu_id = 96, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PMMU_AXI_ERR_RSP" },
+ { .fc_id = 521, .cpu_id = 97, .valid = 1,
+ .msg = 0, .reset = 0, .name = "VM0_ALARM_A" },
+ { .fc_id = 522, .cpu_id = 98, .valid = 1,
+ .msg = 0, .reset = 0, .name = "VM0_ALARM_B" },
+ { .fc_id = 523, .cpu_id = 99, .valid = 1,
+ .msg = 0, .reset = 0, .name = "VM1_ALARM_A" },
+ { .fc_id = 524, .cpu_id = 100, .valid = 1,
+ .msg = 0, .reset = 0, .name = "VM1_ALARM_B" },
+ { .fc_id = 525, .cpu_id = 101, .valid = 1,
+ .msg = 0, .reset = 0, .name = "VM2_ALARM_A" },
+ { .fc_id = 526, .cpu_id = 102, .valid = 1,
+ .msg = 0, .reset = 0, .name = "VM2_ALARM_B" },
+ { .fc_id = 527, .cpu_id = 103, .valid = 1,
+ .msg = 0, .reset = 0, .name = "VM3_ALARM_A" },
+ { .fc_id = 528, .cpu_id = 104, .valid = 1,
+ .msg = 0, .reset = 0, .name = "VM3_ALARM_B" },
+ { .fc_id = 529, .cpu_id = 105, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PSOC_AXI_ERR_RSP" },
+ { .fc_id = 530, .cpu_id = 106, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PSOC_PRSTN_FALL" },
+ { .fc_id = 531, .cpu_id = 107, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 532, .cpu_id = 107, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 533, .cpu_id = 107, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 534, .cpu_id = 107, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 535, .cpu_id = 107, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 536, .cpu_id = 107, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 537, .cpu_id = 107, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 538, .cpu_id = 107, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 539, .cpu_id = 108, .valid = 1,
+ .msg = 0, .reset = 1, .name = "KDMA_CH0_AXI_ERR_RSP" },
+ { .fc_id = 540, .cpu_id = 109, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PDMA_CH0_AXI_ERR_RSP" },
+ { .fc_id = 541, .cpu_id = 109, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PDMA_CH1_AXI_ERR_RSP" },
+ { .fc_id = 542, .cpu_id = 110, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_CATTRIP_0" },
+ { .fc_id = 543, .cpu_id = 111, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_CATTRIP_1" },
+ { .fc_id = 544, .cpu_id = 112, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_CATTRIP_2" },
+ { .fc_id = 545, .cpu_id = 113, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_CATTRIP_3" },
+ { .fc_id = 546, .cpu_id = 114, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_CATTRIP_4" },
+ { .fc_id = 547, .cpu_id = 115, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM_CATTRIP_5" },
+ { .fc_id = 548, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM0_MC0_SEI_SEVERE" },
+ { .fc_id = 549, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM0_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 550, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM0_MC1_SEI_SEVERE" },
+ { .fc_id = 551, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM0_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 552, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM1_MC0_SEI_SEVERE" },
+ { .fc_id = 553, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM1_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 554, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM1_MC1_SEI_SEVERE" },
+ { .fc_id = 555, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM1_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 556, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM2_MC0_SEI_SEVERE" },
+ { .fc_id = 557, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM2_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 558, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM2_MC1_SEI_SEVERE" },
+ { .fc_id = 559, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM2_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 560, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM3_MC0_SEI_SEVERE" },
+ { .fc_id = 561, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM3_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 562, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM3_MC1_SEI_SEVERE" },
+ { .fc_id = 563, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM3_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 564, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM4_MC0_SEI_SEVERE" },
+ { .fc_id = 565, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM4_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 566, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM4_MC1_SEI_SEVERE" },
+ { .fc_id = 567, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM4_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 568, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM5_MC0_SEI_SEVERE" },
+ { .fc_id = 569, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM5_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 570, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HBM5_MC1_SEI_SEVERE" },
+ { .fc_id = 571, .cpu_id = 116, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM5_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 572, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC0_AXI_ERR_RSPONSE" },
+ { .fc_id = 573, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC1_AXI_ERR_RSPONSE" },
+ { .fc_id = 574, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC2_AXI_ERR_RSPONSE" },
+ { .fc_id = 575, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC3_AXI_ERR_RSPONSE" },
+ { .fc_id = 576, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC4_AXI_ERR_RSPONSE" },
+ { .fc_id = 577, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC5_AXI_ERR_RSPONSE" },
+ { .fc_id = 578, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC6_AXI_ERR_RSPONSE" },
+ { .fc_id = 579, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC7_AXI_ERR_RSPONSE" },
+ { .fc_id = 580, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC8_AXI_ERR_RSPONSE" },
+ { .fc_id = 581, .cpu_id = 117, .valid = 1,
+ .msg = 0, .reset = 1, .name = "DEC9_AXI_ERR_RSPONSE" },
+ { .fc_id = 582, .cpu_id = 118, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 583, .cpu_id = 119, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 584, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF0_FATAL" },
+ { .fc_id = 585, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF1_FATAL" },
+ { .fc_id = 586, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF2_FATAL" },
+ { .fc_id = 587, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF3_FATAL" },
+ { .fc_id = 588, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF8_FATAL" },
+ { .fc_id = 589, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF9_FATAL" },
+ { .fc_id = 590, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF10_FATAL" },
+ { .fc_id = 591, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF11_FATAL" },
+ { .fc_id = 592, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF7_FATAL" },
+ { .fc_id = 593, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF6_FATAL" },
+ { .fc_id = 594, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF5_FATAL" },
+ { .fc_id = 595, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF4_FATAL" },
+ { .fc_id = 596, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF15_FATAL" },
+ { .fc_id = 597, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF14_FATAL" },
+ { .fc_id = 598, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF13_FATAL" },
+ { .fc_id = 599, .cpu_id = 120, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HIF12_FATAL" },
+ { .fc_id = 600, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC0_AXI_ERROR_RESPONSE" },
+ { .fc_id = 601, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC1_AXI_ERROR_RESPONSE" },
+ { .fc_id = 602, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC2_AXI_ERROR_RESPONSE" },
+ { .fc_id = 603, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC3_AXI_ERROR_RESPONSE" },
+ { .fc_id = 604, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC4_AXI_ERROR_RESPONSE" },
+ { .fc_id = 605, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC5_AXI_ERROR_RESPONSE" },
+ { .fc_id = 606, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC6_AXI_ERROR_RESPONSE" },
+ { .fc_id = 607, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC7_AXI_ERROR_RESPONSE" },
+ { .fc_id = 608, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC8_AXI_ERROR_RESPONSE" },
+ { .fc_id = 609, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC9_AXI_ERROR_RESPONSE" },
+ { .fc_id = 610, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC10_AXI_ERROR_RESPONSE" },
+ { .fc_id = 611, .cpu_id = 121, .valid = 1,
+ .msg = 0, .reset = 1, .name = "NIC11_AXI_ERROR_RESPONSE" },
+ { .fc_id = 612, .cpu_id = 122, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SM0_AXI_ERROR_RESPONSE" },
+ { .fc_id = 613, .cpu_id = 122, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SM1_AXI_ERROR_RESPONSE" },
+ { .fc_id = 614, .cpu_id = 122, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SM2_AXI_ERROR_RESPONSE" },
+ { .fc_id = 615, .cpu_id = 122, .valid = 1,
+ .msg = 0, .reset = 1, .name = "SM3_AXI_ERROR_RESPONSE" },
+ { .fc_id = 616, .cpu_id = 123, .valid = 1,
+ .msg = 0, .reset = 1, .name = "ARC_AXI_ERROR_RESPONSE" },
+ { .fc_id = 617, .cpu_id = 124, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 618, .cpu_id = 125, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 619, .cpu_id = 125, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PCIE_FLR_REQUESTED" },
+ { .fc_id = 620, .cpu_id = 125, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 621, .cpu_id = 125, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 622, .cpu_id = 125, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PCIE_APB_TIMEOUT" },
+ { .fc_id = 623, .cpu_id = 125, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 624, .cpu_id = 125, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 625, .cpu_id = 125, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 626, .cpu_id = 125, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 627, .cpu_id = 125, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PCIE_FATAL_ERR" },
+ { .fc_id = 628, .cpu_id = 125, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 629, .cpu_id = 126, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 630, .cpu_id = 127, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 631, .cpu_id = 128, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PCIE_P2P_MSIX" },
+ { .fc_id = 632, .cpu_id = 129, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PCIE_DRAIN_COMPLETE" },
+ { .fc_id = 633, .cpu_id = 130, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC0_BMON_SPMU" },
+ { .fc_id = 634, .cpu_id = 131, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC0_KERNEL_ERR" },
+ { .fc_id = 635, .cpu_id = 132, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC1_BMON_SPMU" },
+ { .fc_id = 636, .cpu_id = 133, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC1_KERNEL_ERR" },
+ { .fc_id = 637, .cpu_id = 134, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC2_BMON_SPMU" },
+ { .fc_id = 638, .cpu_id = 135, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC2_KERNEL_ERR" },
+ { .fc_id = 639, .cpu_id = 136, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC3_BMON_SPMU" },
+ { .fc_id = 640, .cpu_id = 137, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC3_KERNEL_ERR" },
+ { .fc_id = 641, .cpu_id = 138, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC4_BMON_SPMU" },
+ { .fc_id = 642, .cpu_id = 139, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC4_KERNEL_ERR" },
+ { .fc_id = 643, .cpu_id = 140, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC5_BMON_SPMU" },
+ { .fc_id = 644, .cpu_id = 141, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC5_KERNEL_ERR" },
+ { .fc_id = 645, .cpu_id = 150, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC6_BMON_SPMU" },
+ { .fc_id = 646, .cpu_id = 151, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC6_KERNEL_ERR" },
+ { .fc_id = 647, .cpu_id = 152, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC7_BMON_SPMU" },
+ { .fc_id = 648, .cpu_id = 153, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC7_KERNEL_ERR" },
+ { .fc_id = 649, .cpu_id = 146, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC8_BMON_SPMU" },
+ { .fc_id = 650, .cpu_id = 147, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC8_KERNEL_ERR" },
+ { .fc_id = 651, .cpu_id = 148, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC9_BMON_SPMU" },
+ { .fc_id = 652, .cpu_id = 149, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC9_KERNEL_ERR" },
+ { .fc_id = 653, .cpu_id = 142, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC10_BMON_SPMU" },
+ { .fc_id = 654, .cpu_id = 143, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC10_KERNEL_ERR" },
+ { .fc_id = 655, .cpu_id = 144, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC11_BMON_SPMU" },
+ { .fc_id = 656, .cpu_id = 145, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC11_KERNEL_ERR" },
+ { .fc_id = 657, .cpu_id = 162, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC12_BMON_SPMU" },
+ { .fc_id = 658, .cpu_id = 163, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC12_KERNEL_ERR" },
+ { .fc_id = 659, .cpu_id = 164, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC13_BMON_SPMU" },
+ { .fc_id = 660, .cpu_id = 165, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC13_KERNEL_ERR" },
+ { .fc_id = 661, .cpu_id = 158, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC14_BMON_SPMU" },
+ { .fc_id = 662, .cpu_id = 159, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC14_KERNEL_ERR" },
+ { .fc_id = 663, .cpu_id = 160, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC15_BMON_SPMU" },
+ { .fc_id = 664, .cpu_id = 161, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC15_KERNEL_ERR" },
+ { .fc_id = 665, .cpu_id = 154, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC16_BMON_SPMU" },
+ { .fc_id = 666, .cpu_id = 155, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC16_KERNEL_ERR" },
+ { .fc_id = 667, .cpu_id = 156, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC17_BMON_SPMU" },
+ { .fc_id = 668, .cpu_id = 157, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC17_KERNEL_ERR" },
+ { .fc_id = 669, .cpu_id = 166, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC18_BMON_SPMU" },
+ { .fc_id = 670, .cpu_id = 167, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC18_KERNEL_ERR" },
+ { .fc_id = 671, .cpu_id = 168, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC19_BMON_SPMU" },
+ { .fc_id = 672, .cpu_id = 169, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC19_KERNEL_ERR" },
+ { .fc_id = 673, .cpu_id = 170, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC20_BMON_SPMU" },
+ { .fc_id = 674, .cpu_id = 171, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC20_KERNEL_ERR" },
+ { .fc_id = 675, .cpu_id = 172, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC21_BMON_SPMU" },
+ { .fc_id = 676, .cpu_id = 173, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC21_KERNEL_ERR" },
+ { .fc_id = 677, .cpu_id = 174, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC22_BMON_SPMU" },
+ { .fc_id = 678, .cpu_id = 175, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC22_KERNEL_ERR" },
+ { .fc_id = 679, .cpu_id = 176, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC23_BMON_SPMU" },
+ { .fc_id = 680, .cpu_id = 177, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC23_KERNEL_ERR" },
+ { .fc_id = 681, .cpu_id = 178, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC24_BMON_SPMU" },
+ { .fc_id = 682, .cpu_id = 179, .valid = 1,
+ .msg = 0, .reset = 0, .name = "TPC24_KERNEL_ERR" },
+ { .fc_id = 683, .cpu_id = 180, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 684, .cpu_id = 180, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 685, .cpu_id = 180, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 686, .cpu_id = 180, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 687, .cpu_id = 180, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 688, .cpu_id = 180, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_CTRL_BMON_SPMU" },
+ { .fc_id = 689, .cpu_id = 180, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_SBTE_BMON_SPMU" },
+ { .fc_id = 690, .cpu_id = 180, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_WAP_BMON_SPMU" },
+ { .fc_id = 691, .cpu_id = 180, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME0_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 692, .cpu_id = 181, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 693, .cpu_id = 181, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 694, .cpu_id = 181, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 695, .cpu_id = 181, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 696, .cpu_id = 181, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 697, .cpu_id = 181, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_CTRL_BMON_SPMU" },
+ { .fc_id = 698, .cpu_id = 181, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_SBTE_BMON_SPMU" },
+ { .fc_id = 699, .cpu_id = 181, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_WAP_BMON_SPMU" },
+ { .fc_id = 700, .cpu_id = 181, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME1_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 701, .cpu_id = 182, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 702, .cpu_id = 182, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 703, .cpu_id = 182, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 704, .cpu_id = 182, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 705, .cpu_id = 182, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 706, .cpu_id = 182, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_CTRL_BMON_SPMU" },
+ { .fc_id = 707, .cpu_id = 182, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_SBTE_BMON_SPMU" },
+ { .fc_id = 708, .cpu_id = 182, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_WAP_BMON_SPMU" },
+ { .fc_id = 709, .cpu_id = 182, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME2_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 710, .cpu_id = 183, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 711, .cpu_id = 183, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 712, .cpu_id = 183, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 713, .cpu_id = 183, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 714, .cpu_id = 183, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 715, .cpu_id = 183, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_CTRL_BMON_SPMU" },
+ { .fc_id = 716, .cpu_id = 183, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_SBTE_BMON_SPMU" },
+ { .fc_id = 717, .cpu_id = 183, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_WAP_BMON_SPMU" },
+ { .fc_id = 718, .cpu_id = 183, .valid = 1,
+ .msg = 0, .reset = 0, .name = "MME3_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 719, .cpu_id = 184, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 720, .cpu_id = 184, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU0_PAGE_FAULT_OR_WR_PERM" },
+ { .fc_id = 721, .cpu_id = 184, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU0_SECURITY_ERROR" },
+ { .fc_id = 722, .cpu_id = 185, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 723, .cpu_id = 185, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU1_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 724, .cpu_id = 185, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU1_SECURITY_ERROR" },
+ { .fc_id = 725, .cpu_id = 186, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 726, .cpu_id = 186, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU2_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 727, .cpu_id = 186, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU2_SECURITY_ERROR" },
+ { .fc_id = 728, .cpu_id = 187, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 729, .cpu_id = 187, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU3_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 730, .cpu_id = 187, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU3_SECURITY_ERROR" },
+ { .fc_id = 731, .cpu_id = 188, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 732, .cpu_id = 188, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU8_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 733, .cpu_id = 188, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU8_SECURITY_ERROR" },
+ { .fc_id = 734, .cpu_id = 189, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 735, .cpu_id = 189, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU9_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 736, .cpu_id = 189, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU9_SECURITY_ERROR" },
+ { .fc_id = 737, .cpu_id = 190, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 738, .cpu_id = 190, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU10_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 739, .cpu_id = 190, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU10_SECURITY_ERROR" },
+ { .fc_id = 740, .cpu_id = 191, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 741, .cpu_id = 191, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU11_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 742, .cpu_id = 191, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU11_SECURITY_ERROR" },
+ { .fc_id = 743, .cpu_id = 192, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 744, .cpu_id = 192, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU7_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 745, .cpu_id = 192, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU7_SECURITY_ERROR" },
+ { .fc_id = 746, .cpu_id = 193, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 747, .cpu_id = 193, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU6_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 748, .cpu_id = 193, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU6_SECURITY_ERROR" },
+ { .fc_id = 749, .cpu_id = 194, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 750, .cpu_id = 194, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU5_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 751, .cpu_id = 194, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU5_SECURITY_ERROR" },
+ { .fc_id = 752, .cpu_id = 195, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 753, .cpu_id = 195, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU4_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 754, .cpu_id = 195, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU4_SECURITY_ERROR" },
+ { .fc_id = 755, .cpu_id = 196, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 756, .cpu_id = 196, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU15_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 757, .cpu_id = 196, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU15_SECURITY_ERROR" },
+ { .fc_id = 758, .cpu_id = 197, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 759, .cpu_id = 197, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU14_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 760, .cpu_id = 197, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU14_SECURITY_ERROR" },
+ { .fc_id = 761, .cpu_id = 198, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 762, .cpu_id = 198, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU13_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 763, .cpu_id = 198, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU13_SECURITY_ERROR" },
+ { .fc_id = 764, .cpu_id = 199, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 765, .cpu_id = 199, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU12_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 766, .cpu_id = 199, .valid = 1,
+ .msg = 0, .reset = 1, .name = "HMMU12_SECURITY_ERROR" },
+ { .fc_id = 767, .cpu_id = 200, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 768, .cpu_id = 201, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PMMU0_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 769, .cpu_id = 202, .valid = 1,
+ .msg = 0, .reset = 1, .name = "PMMU0_SECURITY_ERROR" },
+ { .fc_id = 770, .cpu_id = 203, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA2_BM_SPMU" },
+ { .fc_id = 771, .cpu_id = 204, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 772, .cpu_id = 205, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA3_BM_SPMU" },
+ { .fc_id = 773, .cpu_id = 206, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 774, .cpu_id = 207, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA0_BM_SPMU" },
+ { .fc_id = 775, .cpu_id = 208, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 776, .cpu_id = 209, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA1_BM_SPMU" },
+ { .fc_id = 777, .cpu_id = 210, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 778, .cpu_id = 211, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA6_BM_SPMU" },
+ { .fc_id = 779, .cpu_id = 212, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 780, .cpu_id = 213, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA7_BM_SPMU" },
+ { .fc_id = 781, .cpu_id = 214, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 782, .cpu_id = 215, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA4_BM_SPMU" },
+ { .fc_id = 783, .cpu_id = 216, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 784, .cpu_id = 217, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HDMA5_BM_SPMU" },
+ { .fc_id = 785, .cpu_id = 218, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 786, .cpu_id = 219, .valid = 1,
+ .msg = 0, .reset = 0, .name = "KDMA_BM_SPMU" },
+ { .fc_id = 787, .cpu_id = 220, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 788, .cpu_id = 221, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PDMA0_BM_SPMU" },
+ { .fc_id = 789, .cpu_id = 222, .valid = 1,
+ .msg = 0, .reset = 0, .name = "PDMA1_BM_SPMU" },
+ { .fc_id = 790, .cpu_id = 223, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM0_MC0_SPI" },
+ { .fc_id = 791, .cpu_id = 224, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM0_MC1_SPI" },
+ { .fc_id = 792, .cpu_id = 225, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM1_MC0_SPI" },
+ { .fc_id = 793, .cpu_id = 226, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM1_MC1_SPI" },
+ { .fc_id = 794, .cpu_id = 227, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM2_MC0_SPI" },
+ { .fc_id = 795, .cpu_id = 228, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM2_MC1_SPI" },
+ { .fc_id = 796, .cpu_id = 229, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM3_MC0_SPI" },
+ { .fc_id = 797, .cpu_id = 230, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM3_MC1_SPI" },
+ { .fc_id = 798, .cpu_id = 231, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM4_MC0_SPI" },
+ { .fc_id = 799, .cpu_id = 232, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM4_MC1_SPI" },
+ { .fc_id = 800, .cpu_id = 233, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM5_MC0_SPI" },
+ { .fc_id = 801, .cpu_id = 234, .valid = 1,
+ .msg = 0, .reset = 0, .name = "HBM5_MC1_SPI" },
+ { .fc_id = 802, .cpu_id = 235, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 803, .cpu_id = 236, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 804, .cpu_id = 237, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 805, .cpu_id = 238, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 806, .cpu_id = 239, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 807, .cpu_id = 240, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 808, .cpu_id = 241, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 809, .cpu_id = 242, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 810, .cpu_id = 243, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 811, .cpu_id = 244, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 812, .cpu_id = 245, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 813, .cpu_id = 246, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 814, .cpu_id = 247, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 815, .cpu_id = 248, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 816, .cpu_id = 249, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 817, .cpu_id = 250, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 818, .cpu_id = 251, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 819, .cpu_id = 252, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 820, .cpu_id = 253, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 821, .cpu_id = 254, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 822, .cpu_id = 255, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 823, .cpu_id = 256, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 824, .cpu_id = 257, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 825, .cpu_id = 258, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 826, .cpu_id = 259, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 827, .cpu_id = 260, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 828, .cpu_id = 261, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 829, .cpu_id = 262, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 830, .cpu_id = 263, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 831, .cpu_id = 264, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 832, .cpu_id = 265, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 833, .cpu_id = 266, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 834, .cpu_id = 267, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 835, .cpu_id = 268, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 836, .cpu_id = 269, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 837, .cpu_id = 270, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 838, .cpu_id = 271, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 839, .cpu_id = 272, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 840, .cpu_id = 273, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 841, .cpu_id = 274, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 842, .cpu_id = 275, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 843, .cpu_id = 276, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 844, .cpu_id = 277, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 845, .cpu_id = 278, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 846, .cpu_id = 279, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 847, .cpu_id = 280, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 848, .cpu_id = 281, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 849, .cpu_id = 282, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 850, .cpu_id = 283, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 851, .cpu_id = 284, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 852, .cpu_id = 285, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 853, .cpu_id = 286, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 854, .cpu_id = 287, .valid = 0,
+ .msg = 0, .reset = 1, .name = "" },
+ { .fc_id = 855, .cpu_id = 288, .valid = 0,
+ .msg = 0, .reset = 1, .name = "" },
+ { .fc_id = 856, .cpu_id = 289, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 857, .cpu_id = 290, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 858, .cpu_id = 291, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 859, .cpu_id = 292, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 860, .cpu_id = 293, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 861, .cpu_id = 294, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 862, .cpu_id = 295, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 863, .cpu_id = 296, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 864, .cpu_id = 297, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 865, .cpu_id = 298, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 866, .cpu_id = 299, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 867, .cpu_id = 300, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 868, .cpu_id = 301, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 869, .cpu_id = 302, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 870, .cpu_id = 303, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 871, .cpu_id = 304, .valid = 1,
+ .msg = 0, .reset = 1, .name = "RPM_ERROR_OR_DRAIN" },
+ { .fc_id = 872, .cpu_id = 305, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 873, .cpu_id = 306, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 874, .cpu_id = 307, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 875, .cpu_id = 308, .valid = 1,
+ .msg = 0, .reset = 0, .name = "RAZWI_OR_PID_MIN_MAX_INTERRUPT" },
+ { .fc_id = 876, .cpu_id = 309, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 877, .cpu_id = 310, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 878, .cpu_id = 311, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 879, .cpu_id = 312, .valid = 0,
+ .msg = 0, .reset = 1, .name = "" },
+ { .fc_id = 880, .cpu_id = 313, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 881, .cpu_id = 314, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 882, .cpu_id = 315, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 883, .cpu_id = 316, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 884, .cpu_id = 317, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 885, .cpu_id = 318, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 886, .cpu_id = 319, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 887, .cpu_id = 320, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 888, .cpu_id = 321, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 889, .cpu_id = 322, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 890, .cpu_id = 323, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 891, .cpu_id = 324, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 892, .cpu_id = 325, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 893, .cpu_id = 326, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 894, .cpu_id = 327, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 895, .cpu_id = 328, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 896, .cpu_id = 329, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC0_SPI" },
+ { .fc_id = 897, .cpu_id = 329, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC0_BMON_SPMU" },
+ { .fc_id = 898, .cpu_id = 330, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC1_SPI" },
+ { .fc_id = 899, .cpu_id = 330, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC1_BMON_SPMU" },
+ { .fc_id = 900, .cpu_id = 331, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC2_SPI" },
+ { .fc_id = 901, .cpu_id = 331, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC2_BMON_SPMU" },
+ { .fc_id = 902, .cpu_id = 332, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC3_SPI" },
+ { .fc_id = 903, .cpu_id = 332, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC3_BMON_SPMU" },
+ { .fc_id = 904, .cpu_id = 333, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC4_SPI" },
+ { .fc_id = 905, .cpu_id = 333, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC4_BMON_SPMU" },
+ { .fc_id = 906, .cpu_id = 334, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC5_SPI" },
+ { .fc_id = 907, .cpu_id = 334, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC5_BMON_SPMU" },
+ { .fc_id = 908, .cpu_id = 335, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC6_SPI" },
+ { .fc_id = 909, .cpu_id = 335, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC6_BMON_SPMU" },
+ { .fc_id = 910, .cpu_id = 336, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC7_SPI" },
+ { .fc_id = 911, .cpu_id = 336, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC7_BMON_SPMU" },
+ { .fc_id = 912, .cpu_id = 337, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC8_SPI" },
+ { .fc_id = 913, .cpu_id = 337, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC8_BMON_SPMU" },
+ { .fc_id = 914, .cpu_id = 338, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC9_SPI" },
+ { .fc_id = 915, .cpu_id = 338, .valid = 1,
+ .msg = 0, .reset = 0, .name = "DEC9_BMON_SPMU" },
+ { .fc_id = 916, .cpu_id = 339, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 917, .cpu_id = 340, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 918, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 919, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 920, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 921, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 922, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 923, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 924, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 925, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 926, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 927, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 928, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 929, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 930, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 931, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 932, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 933, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 934, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 935, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 936, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 937, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 938, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 939, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 940, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 941, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 942, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 943, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 944, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 945, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 946, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 947, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 948, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 949, .cpu_id = 341, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 950, .cpu_id = 342, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 951, .cpu_id = 343, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC0_BMON_SPMU" },
+ { .fc_id = 952, .cpu_id = 343, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC0_SW_ERROR" },
+ { .fc_id = 953, .cpu_id = 343, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 954, .cpu_id = 343, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 955, .cpu_id = 344, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC1_BMON_SPMU" },
+ { .fc_id = 956, .cpu_id = 344, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC1_SW_ERROR" },
+ { .fc_id = 957, .cpu_id = 344, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 958, .cpu_id = 344, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 959, .cpu_id = 345, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC2_BMON_SPMU" },
+ { .fc_id = 960, .cpu_id = 345, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC2_SW_ERROR" },
+ { .fc_id = 961, .cpu_id = 345, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 962, .cpu_id = 345, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 963, .cpu_id = 346, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC3_BMON_SPMU" },
+ { .fc_id = 964, .cpu_id = 346, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC3_SW_ERROR" },
+ { .fc_id = 965, .cpu_id = 346, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 966, .cpu_id = 346, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 967, .cpu_id = 347, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC4_BMON_SPMU" },
+ { .fc_id = 968, .cpu_id = 347, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC4_SW_ERROR" },
+ { .fc_id = 969, .cpu_id = 347, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 970, .cpu_id = 347, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 971, .cpu_id = 348, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC5_BMON_SPMU" },
+ { .fc_id = 972, .cpu_id = 348, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC5_SW_ERROR" },
+ { .fc_id = 973, .cpu_id = 348, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 974, .cpu_id = 348, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 975, .cpu_id = 349, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC6_BMON_SPMU" },
+ { .fc_id = 976, .cpu_id = 349, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC6_SW_ERROR" },
+ { .fc_id = 977, .cpu_id = 349, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 978, .cpu_id = 349, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 979, .cpu_id = 350, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC7_BMON_SPMU" },
+ { .fc_id = 980, .cpu_id = 350, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC7_SW_ERROR" },
+ { .fc_id = 981, .cpu_id = 350, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 982, .cpu_id = 350, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 983, .cpu_id = 351, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC8_BMON_SPMU" },
+ { .fc_id = 984, .cpu_id = 351, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC8_SW_ERROR" },
+ { .fc_id = 985, .cpu_id = 351, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 986, .cpu_id = 351, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 987, .cpu_id = 352, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC9_BMON_SPMU" },
+ { .fc_id = 988, .cpu_id = 352, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC9_SW_ERROR" },
+ { .fc_id = 989, .cpu_id = 352, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 990, .cpu_id = 352, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 991, .cpu_id = 353, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC10_BMON_SPMU" },
+ { .fc_id = 992, .cpu_id = 353, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC10_SW_ERROR" },
+ { .fc_id = 993, .cpu_id = 353, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 994, .cpu_id = 353, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 995, .cpu_id = 354, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC11_BMON_SPMU" },
+ { .fc_id = 996, .cpu_id = 354, .valid = 1,
+ .msg = 0, .reset = 0, .name = "NIC11_SW_ERROR" },
+ { .fc_id = 997, .cpu_id = 354, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 998, .cpu_id = 354, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 999, .cpu_id = 355, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1000, .cpu_id = 356, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1001, .cpu_id = 357, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1002, .cpu_id = 358, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1003, .cpu_id = 359, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1004, .cpu_id = 360, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1005, .cpu_id = 361, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1006, .cpu_id = 362, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1007, .cpu_id = 363, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1008, .cpu_id = 368, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1009, .cpu_id = 369, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1010, .cpu_id = 366, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1011, .cpu_id = 367, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1012, .cpu_id = 364, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1013, .cpu_id = 365, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1014, .cpu_id = 374, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1015, .cpu_id = 375, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1016, .cpu_id = 372, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1017, .cpu_id = 373, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1018, .cpu_id = 370, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1019, .cpu_id = 371, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1020, .cpu_id = 376, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1021, .cpu_id = 377, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1022, .cpu_id = 378, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1023, .cpu_id = 379, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1024, .cpu_id = 380, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1025, .cpu_id = 381, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1026, .cpu_id = 382, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1027, .cpu_id = 383, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1028, .cpu_id = 384, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1029, .cpu_id = 385, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1030, .cpu_id = 386, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1031, .cpu_id = 387, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1032, .cpu_id = 388, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1033, .cpu_id = 389, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1034, .cpu_id = 390, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1035, .cpu_id = 391, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1036, .cpu_id = 392, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1037, .cpu_id = 393, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1038, .cpu_id = 394, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1039, .cpu_id = 395, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1040, .cpu_id = 396, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1041, .cpu_id = 397, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1042, .cpu_id = 398, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1043, .cpu_id = 399, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1044, .cpu_id = 400, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1045, .cpu_id = 401, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1046, .cpu_id = 402, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1047, .cpu_id = 403, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1048, .cpu_id = 404, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1049, .cpu_id = 405, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1050, .cpu_id = 406, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1051, .cpu_id = 407, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1052, .cpu_id = 408, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1053, .cpu_id = 409, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1054, .cpu_id = 410, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1055, .cpu_id = 411, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1056, .cpu_id = 412, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1057, .cpu_id = 413, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1058, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1059, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1060, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1061, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1062, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1063, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1064, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1065, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1066, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1067, .cpu_id = 414, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1068, .cpu_id = 415, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1069, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1070, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1071, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1072, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1073, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1074, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1075, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1076, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1077, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1078, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1079, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1080, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1081, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1082, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1083, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1084, .cpu_id = 416, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1085, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1086, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1087, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1088, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1089, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1090, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1091, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1092, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1093, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1094, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1095, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1096, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1097, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1098, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1099, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1100, .cpu_id = 417, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1101, .cpu_id = 418, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1102, .cpu_id = 419, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1103, .cpu_id = 420, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1104, .cpu_id = 421, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1105, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1106, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1107, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1108, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1109, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1110, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1111, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1112, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1113, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1114, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1115, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1116, .cpu_id = 422, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1117, .cpu_id = 423, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1118, .cpu_id = 424, .valid = 1,
+ .msg = 0, .reset = 0, .name = "ROTATOR0_SERR" },
+ { .fc_id = 1119, .cpu_id = 425, .valid = 1,
+ .msg = 0, .reset = 0, .name = "ROTATOR1_SERR" },
+ { .fc_id = 1120, .cpu_id = 426, .valid = 1,
+ .msg = 0, .reset = 1, .name = "ROTATOR0_DERR" },
+ { .fc_id = 1121, .cpu_id = 427, .valid = 1,
+ .msg = 0, .reset = 1, .name = "ROTATOR1_DERR" },
+ { .fc_id = 1122, .cpu_id = 428, .valid = 1,
+ .msg = 0, .reset = 1, .name = "ROTATOR0_AXI_ERROR_RESPONSE" },
+ { .fc_id = 1123, .cpu_id = 429, .valid = 1,
+ .msg = 0, .reset = 1, .name = "ROTATOR1_AXI_ERROR_RESPONSE" },
+ { .fc_id = 1124, .cpu_id = 430, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1125, .cpu_id = 431, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1126, .cpu_id = 432, .valid = 1,
+ .msg = 0, .reset = 0, .name = "ROTATOR0_BMON_SPMU" },
+ { .fc_id = 1127, .cpu_id = 433, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1128, .cpu_id = 434, .valid = 1,
+ .msg = 0, .reset = 0, .name = "ROTATOR1_BMON_SPMU" },
+ { .fc_id = 1129, .cpu_id = 435, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1130, .cpu_id = 436, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SM0_BMON_SPMU" },
+ { .fc_id = 1131, .cpu_id = 437, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SM1_BMON_SPMU" },
+ { .fc_id = 1132, .cpu_id = 438, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SM2_BMON_SPMU" },
+ { .fc_id = 1133, .cpu_id = 439, .valid = 1,
+ .msg = 0, .reset = 0, .name = "SM3_BMON_SPMU" },
+ { .fc_id = 1134, .cpu_id = 440, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1135, .cpu_id = 441, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1136, .cpu_id = 442, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1137, .cpu_id = 443, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1138, .cpu_id = 444, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1139, .cpu_id = 445, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1140, .cpu_id = 446, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1141, .cpu_id = 447, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1142, .cpu_id = 448, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1143, .cpu_id = 449, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1144, .cpu_id = 450, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1145, .cpu_id = 451, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1146, .cpu_id = 452, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1147, .cpu_id = 453, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1148, .cpu_id = 454, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1149, .cpu_id = 455, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1150, .cpu_id = 456, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1151, .cpu_id = 457, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1152, .cpu_id = 458, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1153, .cpu_id = 459, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1154, .cpu_id = 460, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1155, .cpu_id = 461, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1156, .cpu_id = 462, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1157, .cpu_id = 463, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1158, .cpu_id = 464, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1159, .cpu_id = 465, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1160, .cpu_id = 466, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1161, .cpu_id = 467, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1162, .cpu_id = 468, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1163, .cpu_id = 469, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1164, .cpu_id = 470, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1165, .cpu_id = 471, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1166, .cpu_id = 472, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1167, .cpu_id = 473, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1168, .cpu_id = 474, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1169, .cpu_id = 475, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1170, .cpu_id = 476, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1171, .cpu_id = 477, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1172, .cpu_id = 478, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1173, .cpu_id = 479, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1174, .cpu_id = 480, .valid = 1,
+ .msg = 1, .reset = 0, .name = "PSOC_DMA_QM" },
+ { .fc_id = 1175, .cpu_id = 481, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1176, .cpu_id = 482, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1177, .cpu_id = 483, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1178, .cpu_id = 484, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1179, .cpu_id = 485, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1180, .cpu_id = 486, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1181, .cpu_id = 487, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1182, .cpu_id = 488, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1183, .cpu_id = 489, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1184, .cpu_id = 490, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1185, .cpu_id = 491, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1186, .cpu_id = 492, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1187, .cpu_id = 493, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1188, .cpu_id = 494, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1189, .cpu_id = 495, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1190, .cpu_id = 496, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1191, .cpu_id = 497, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1192, .cpu_id = 498, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1193, .cpu_id = 499, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1194, .cpu_id = 500, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1195, .cpu_id = 501, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1196, .cpu_id = 502, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1197, .cpu_id = 503, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1198, .cpu_id = 504, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1199, .cpu_id = 505, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1200, .cpu_id = 506, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1201, .cpu_id = 507, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1202, .cpu_id = 508, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1203, .cpu_id = 509, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1204, .cpu_id = 510, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1205, .cpu_id = 511, .valid = 0,
+ .msg = 0, .reset = 0, .name = "" },
+ { .fc_id = 1206, .cpu_id = 512, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC0_QM" },
+ { .fc_id = 1207, .cpu_id = 513, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC1_QM" },
+ { .fc_id = 1208, .cpu_id = 514, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC2_QM" },
+ { .fc_id = 1209, .cpu_id = 515, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC3_QM" },
+ { .fc_id = 1210, .cpu_id = 516, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC4_QM" },
+ { .fc_id = 1211, .cpu_id = 517, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC5_QM" },
+ { .fc_id = 1212, .cpu_id = 518, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC6_QM" },
+ { .fc_id = 1213, .cpu_id = 519, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC7_QM" },
+ { .fc_id = 1214, .cpu_id = 520, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC8_QM" },
+ { .fc_id = 1215, .cpu_id = 521, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC9_QM" },
+ { .fc_id = 1216, .cpu_id = 522, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC10_QM" },
+ { .fc_id = 1217, .cpu_id = 523, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC11_QM" },
+ { .fc_id = 1218, .cpu_id = 524, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC12_QM" },
+ { .fc_id = 1219, .cpu_id = 525, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC13_QM" },
+ { .fc_id = 1220, .cpu_id = 526, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC14_QM" },
+ { .fc_id = 1221, .cpu_id = 527, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC15_QM" },
+ { .fc_id = 1222, .cpu_id = 528, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC16_QM" },
+ { .fc_id = 1223, .cpu_id = 529, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC17_QM" },
+ { .fc_id = 1224, .cpu_id = 530, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC18_QM" },
+ { .fc_id = 1225, .cpu_id = 531, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC19_QM" },
+ { .fc_id = 1226, .cpu_id = 532, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC20_QM" },
+ { .fc_id = 1227, .cpu_id = 533, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC21_QM" },
+ { .fc_id = 1228, .cpu_id = 534, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC22_QM" },
+ { .fc_id = 1229, .cpu_id = 535, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC23_QM" },
+ { .fc_id = 1230, .cpu_id = 536, .valid = 1,
+ .msg = 1, .reset = 0, .name = "TPC24_QM" },
+ { .fc_id = 1231, .cpu_id = 537, .valid = 0,
+ .msg = 1, .reset = 0, .name = "" },
+ { .fc_id = 1232, .cpu_id = 538, .valid = 1,
+ .msg = 1, .reset = 0, .name = "MME0_QM" },
+ { .fc_id = 1233, .cpu_id = 539, .valid = 1,
+ .msg = 1, .reset = 0, .name = "MME1_QM" },
+ { .fc_id = 1234, .cpu_id = 540, .valid = 1,
+ .msg = 1, .reset = 0, .name = "MME2_QM" },
+ { .fc_id = 1235, .cpu_id = 541, .valid = 1,
+ .msg = 1, .reset = 0, .name = "MME3_QM" },
+ { .fc_id = 1236, .cpu_id = 542, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA2_QM" },
+ { .fc_id = 1237, .cpu_id = 543, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA3_QM" },
+ { .fc_id = 1238, .cpu_id = 544, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA0_QM" },
+ { .fc_id = 1239, .cpu_id = 545, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA1_QM" },
+ { .fc_id = 1240, .cpu_id = 546, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA6_QM" },
+ { .fc_id = 1241, .cpu_id = 547, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA7_QM" },
+ { .fc_id = 1242, .cpu_id = 548, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA4_QM" },
+ { .fc_id = 1243, .cpu_id = 549, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA5_QM" },
+ { .fc_id = 1244, .cpu_id = 550, .valid = 1,
+ .msg = 1, .reset = 0, .name = "PDMA0_QM" },
+ { .fc_id = 1245, .cpu_id = 551, .valid = 1,
+ .msg = 1, .reset = 0, .name = "PDMA1_QM" },
+ { .fc_id = 1246, .cpu_id = 552, .valid = 1,
+ .msg = 1, .reset = 0, .name = "PI_UPDATE" },
+ { .fc_id = 1247, .cpu_id = 553, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HALT_MACHINE" },
+ { .fc_id = 1248, .cpu_id = 554, .valid = 1,
+ .msg = 1, .reset = 0, .name = "INTS_REGISTER" },
+ { .fc_id = 1249, .cpu_id = 555, .valid = 1,
+ .msg = 1, .reset = 0, .name = "ROT0_QM" },
+ { .fc_id = 1250, .cpu_id = 556, .valid = 1,
+ .msg = 1, .reset = 0, .name = "ROT1_QM" },
+ { .fc_id = 1251, .cpu_id = 557, .valid = 1,
+ .msg = 1, .reset = 0, .name = "SOFT_RESET" },
+ { .fc_id = 1252, .cpu_id = 558, .valid = 1,
+ .msg = 1, .reset = 0, .name = "CPLD_SHUTDOWN_CAUSE" },
+ { .fc_id = 1253, .cpu_id = 559, .valid = 1,
+ .msg = 1, .reset = 0, .name = "FIX_POWER_ENV_S" },
+ { .fc_id = 1254, .cpu_id = 560, .valid = 1,
+ .msg = 1, .reset = 0, .name = "FIX_POWER_ENV_E" },
+ { .fc_id = 1255, .cpu_id = 561, .valid = 1,
+ .msg = 1, .reset = 0, .name = "FIX_THERMAL_ENV_S" },
+ { .fc_id = 1256, .cpu_id = 562, .valid = 1,
+ .msg = 1, .reset = 0, .name = "FIX_THERMAL_ENV_E" },
+ { .fc_id = 1257, .cpu_id = 563, .valid = 1,
+ .msg = 1, .reset = 0, .name = "CPLD_SHUTDOWN_EVENT" },
+ { .fc_id = 1258, .cpu_id = 564, .valid = 1,
+ .msg = 1, .reset = 0, .name = "PKT_QUEUE_OUT_SYNC" },
+ { .fc_id = 1259, .cpu_id = 565, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA2_CORE" },
+ { .fc_id = 1260, .cpu_id = 566, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA3_CORE" },
+ { .fc_id = 1261, .cpu_id = 567, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA0_CORE" },
+ { .fc_id = 1262, .cpu_id = 568, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA1_CORE" },
+ { .fc_id = 1263, .cpu_id = 569, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA6_CORE" },
+ { .fc_id = 1264, .cpu_id = 570, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA7_CORE" },
+ { .fc_id = 1265, .cpu_id = 571, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA4_CORE" },
+ { .fc_id = 1266, .cpu_id = 572, .valid = 1,
+ .msg = 1, .reset = 0, .name = "HDMA5_CORE" },
+ { .fc_id = 1267, .cpu_id = 573, .valid = 1,
+ .msg = 1, .reset = 0, .name = "PDMA0_CORE" },
+ { .fc_id = 1268, .cpu_id = 574, .valid = 1,
+ .msg = 1, .reset = 0, .name = "PDMA1_CORE" },
+ { .fc_id = 1269, .cpu_id = 575, .valid = 1,
+ .msg = 1, .reset = 0, .name = "KDMA0_CORE" },
+ { .fc_id = 1270, .cpu_id = 576, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC0_QM0" },
+ { .fc_id = 1271, .cpu_id = 577, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC0_QM1" },
+ { .fc_id = 1272, .cpu_id = 578, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC1_QM0" },
+ { .fc_id = 1273, .cpu_id = 579, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC1_QM1" },
+ { .fc_id = 1274, .cpu_id = 580, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC2_QM0" },
+ { .fc_id = 1275, .cpu_id = 581, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC2_QM1" },
+ { .fc_id = 1276, .cpu_id = 582, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC3_QM0" },
+ { .fc_id = 1277, .cpu_id = 583, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC3_QM1" },
+ { .fc_id = 1278, .cpu_id = 584, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC4_QM0" },
+ { .fc_id = 1279, .cpu_id = 585, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC4_QM1" },
+ { .fc_id = 1280, .cpu_id = 586, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC5_QM0" },
+ { .fc_id = 1281, .cpu_id = 587, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC5_QM1" },
+ { .fc_id = 1282, .cpu_id = 588, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC6_QM0" },
+ { .fc_id = 1283, .cpu_id = 589, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC6_QM1" },
+ { .fc_id = 1284, .cpu_id = 590, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC7_QM0" },
+ { .fc_id = 1285, .cpu_id = 591, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC7_QM1" },
+ { .fc_id = 1286, .cpu_id = 592, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC8_QM0" },
+ { .fc_id = 1287, .cpu_id = 593, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC8_QM1" },
+ { .fc_id = 1288, .cpu_id = 594, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC9_QM0" },
+ { .fc_id = 1289, .cpu_id = 595, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC9_QM1" },
+ { .fc_id = 1290, .cpu_id = 596, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC10_QM0" },
+ { .fc_id = 1291, .cpu_id = 597, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC10_QM1" },
+ { .fc_id = 1292, .cpu_id = 598, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC11_QM0" },
+ { .fc_id = 1293, .cpu_id = 599, .valid = 1,
+ .msg = 1, .reset = 0, .name = "NIC11_QM1" },
+ { .fc_id = 1294, .cpu_id = 600, .valid = 1,
+ .msg = 1, .reset = 0, .name = "CPU_PKT_SANITY_FAILED" },
+ { .fc_id = 1295, .cpu_id = 601, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC0_ENG0" },
+ { .fc_id = 1296, .cpu_id = 602, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC0_ENG1" },
+ { .fc_id = 1297, .cpu_id = 603, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC1_ENG0" },
+ { .fc_id = 1298, .cpu_id = 604, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC1_ENG1" },
+ { .fc_id = 1299, .cpu_id = 605, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC2_ENG0" },
+ { .fc_id = 1300, .cpu_id = 606, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC2_ENG1" },
+ { .fc_id = 1301, .cpu_id = 607, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC3_ENG0" },
+ { .fc_id = 1302, .cpu_id = 608, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC3_ENG1" },
+ { .fc_id = 1303, .cpu_id = 609, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC4_ENG0" },
+ { .fc_id = 1304, .cpu_id = 610, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC4_ENG1" },
+ { .fc_id = 1305, .cpu_id = 611, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC5_ENG0" },
+ { .fc_id = 1306, .cpu_id = 612, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC5_ENG1" },
+ { .fc_id = 1307, .cpu_id = 613, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC6_ENG0" },
+ { .fc_id = 1308, .cpu_id = 614, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC6_ENG1" },
+ { .fc_id = 1309, .cpu_id = 615, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC7_ENG0" },
+ { .fc_id = 1310, .cpu_id = 616, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC7_ENG1" },
+ { .fc_id = 1311, .cpu_id = 617, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC8_ENG0" },
+ { .fc_id = 1312, .cpu_id = 618, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC8_ENG1" },
+ { .fc_id = 1313, .cpu_id = 619, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC9_ENG0" },
+ { .fc_id = 1314, .cpu_id = 620, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC9_ENG1" },
+ { .fc_id = 1315, .cpu_id = 621, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC10_ENG0" },
+ { .fc_id = 1316, .cpu_id = 622, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC10_ENG1" },
+ { .fc_id = 1317, .cpu_id = 623, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC11_ENG0" },
+ { .fc_id = 1318, .cpu_id = 624, .valid = 1,
+ .msg = 1, .reset = 0, .name = "STATUS_NIC11_ENG1" },
+ { .fc_id = 1319, .cpu_id = 625, .valid = 1,
+ .msg = 1, .reset = 0, .name = "ARC_DCCM_FULL" },
+};
+
+#endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h
new file mode 100644
index 000000000000..6d6ed7838a64
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef __GAUDI2_ASYNC_VIRT_EVENTS_H_
+#define __GAUDI2_ASYNC_VIRT_EVENTS_H_
+
+enum gaudi2_async_virt_event_id {
+ GAUDI2_EVENT_NIC3_QM1_OLD = 1206,
+ GAUDI2_EVENT_NIC4_QM0_OLD = 1207,
+ GAUDI2_EVENT_NIC4_QM1_OLD = 1208,
+ GAUDI2_EVENT_NIC5_QM0_OLD = 1209,
+ GAUDI2_EVENT_NIC5_QM1_OLD = 1210,
+ GAUDI2_EVENT_NIC6_QM0_OLD = 1211,
+ GAUDI2_EVENT_NIC6_QM1_OLD = 1212,
+ GAUDI2_EVENT_NIC7_QM0_OLD = 1213,
+ GAUDI2_EVENT_NIC7_QM1_OLD = 1214,
+ GAUDI2_EVENT_NIC8_QM0_OLD = 1215,
+ GAUDI2_EVENT_NIC8_QM1_OLD = 1216,
+ GAUDI2_EVENT_NIC9_QM0_OLD = 1217,
+ GAUDI2_EVENT_NIC9_QM1_OLD = 1218,
+ GAUDI2_EVENT_NIC10_QM0_OLD = 1219,
+ GAUDI2_EVENT_NIC10_QM1_OLD = 1220,
+ GAUDI2_EVENT_NIC11_QM0_OLD = 1221,
+ GAUDI2_EVENT_NIC11_QM1_OLD = 1222,
+ GAUDI2_EVENT_CPU_PKT_SANITY_FAILED_OLD = 1223,
+ GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0_OLD = 1224,
+ GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG1_OLD = 1225,
+ GAUDI2_EVENT_CPU1_STATUS_NIC1_ENG0_OLD = 1226,
+ GAUDI2_EVENT_CPU1_STATUS_NIC1_ENG1_OLD = 1227,
+ GAUDI2_EVENT_CPU2_STATUS_NIC2_ENG0_OLD = 1228,
+ GAUDI2_EVENT_CPU2_STATUS_NIC2_ENG1_OLD = 1229,
+ GAUDI2_EVENT_CPU3_STATUS_NIC3_ENG0_OLD = 1230,
+ GAUDI2_EVENT_CPU3_STATUS_NIC3_ENG1_OLD = 1231,
+ GAUDI2_EVENT_CPU4_STATUS_NIC4_ENG0_OLD = 1232,
+ GAUDI2_EVENT_CPU4_STATUS_NIC4_ENG1_OLD = 1233,
+ GAUDI2_EVENT_CPU5_STATUS_NIC5_ENG0_OLD = 1234,
+ GAUDI2_EVENT_CPU5_STATUS_NIC5_ENG1_OLD = 1235,
+ GAUDI2_EVENT_CPU6_STATUS_NIC6_ENG0_OLD = 1236,
+ GAUDI2_EVENT_CPU6_STATUS_NIC6_ENG1_OLD = 1237,
+ GAUDI2_EVENT_CPU7_STATUS_NIC7_ENG0_OLD = 1238,
+ GAUDI2_EVENT_CPU7_STATUS_NIC7_ENG1_OLD = 1239,
+ GAUDI2_EVENT_CPU8_STATUS_NIC8_ENG0_OLD = 1240,
+ GAUDI2_EVENT_CPU8_STATUS_NIC8_ENG1_OLD = 1241,
+ GAUDI2_EVENT_CPU9_STATUS_NIC9_ENG0_OLD = 1242,
+ GAUDI2_EVENT_CPU9_STATUS_NIC9_ENG1_OLD = 1243,
+ GAUDI2_EVENT_CPU10_STATUS_NIC10_ENG0_OLD = 1244,
+ GAUDI2_EVENT_CPU10_STATUS_NIC10_ENG1_OLD = 1245,
+ GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG0_OLD = 1246,
+ GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1_OLD = 1247,
+ GAUDI2_EVENT_ARC_DCCM_FULL_OLD = 1248,
+};
+
+#endif /* __GAUDI2_ASYNC_VIRT_EVENTS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h
new file mode 100644
index 000000000000..14f09d7758c7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h
@@ -0,0 +1,984 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef GAUDI2_CORESIGHT_H
+#define GAUDI2_CORESIGHT_H
+
+enum gaudi2_debug_stm_regs_index {
+ GAUDI2_STM_FIRST = 0,
+ GAUDI2_STM_DCORE0_TPC0_EML = GAUDI2_STM_FIRST,
+ GAUDI2_STM_DCORE0_TPC1_EML,
+ GAUDI2_STM_DCORE0_TPC2_EML,
+ GAUDI2_STM_DCORE0_TPC3_EML,
+ GAUDI2_STM_DCORE0_TPC4_EML,
+ GAUDI2_STM_DCORE0_TPC5_EML,
+ GAUDI2_STM_DCORE0_TPC6_EML,
+ GAUDI2_STM_DCORE1_TPC0_EML,
+ GAUDI2_STM_DCORE1_TPC1_EML,
+ GAUDI2_STM_DCORE1_TPC2_EML,
+ GAUDI2_STM_DCORE1_TPC3_EML,
+ GAUDI2_STM_DCORE1_TPC4_EML,
+ GAUDI2_STM_DCORE1_TPC5_EML,
+ GAUDI2_STM_DCORE2_TPC0_EML,
+ GAUDI2_STM_DCORE2_TPC1_EML,
+ GAUDI2_STM_DCORE2_TPC2_EML,
+ GAUDI2_STM_DCORE2_TPC3_EML,
+ GAUDI2_STM_DCORE2_TPC4_EML,
+ GAUDI2_STM_DCORE2_TPC5_EML,
+ GAUDI2_STM_DCORE3_TPC0_EML,
+ GAUDI2_STM_DCORE3_TPC1_EML,
+ GAUDI2_STM_DCORE3_TPC2_EML,
+ GAUDI2_STM_DCORE3_TPC3_EML,
+ GAUDI2_STM_DCORE3_TPC4_EML,
+ GAUDI2_STM_DCORE3_TPC5_EML,
+ GAUDI2_STM_DCORE0_HMMU0_CS,
+ GAUDI2_STM_DCORE0_HMMU1_CS,
+ GAUDI2_STM_DCORE0_HMMU2_CS,
+ GAUDI2_STM_DCORE0_HMMU3_CS,
+ GAUDI2_STM_DCORE0_MME_CTRL,
+ GAUDI2_STM_DCORE0_MME_SBTE0,
+ GAUDI2_STM_DCORE0_MME_SBTE1,
+ GAUDI2_STM_DCORE0_MME_SBTE2,
+ GAUDI2_STM_DCORE0_MME_SBTE3,
+ GAUDI2_STM_DCORE0_MME_SBTE4,
+ GAUDI2_STM_DCORE0_MME_ACC,
+ GAUDI2_STM_DCORE0_SM,
+ GAUDI2_STM_DCORE0_EDMA0_CS,
+ GAUDI2_STM_DCORE0_EDMA1_CS,
+ GAUDI2_STM_DCORE0_VDEC0_CS,
+ GAUDI2_STM_DCORE0_VDEC1_CS,
+ GAUDI2_STM_DCORE1_HMMU0_CS,
+ GAUDI2_STM_DCORE1_HMMU1_CS,
+ GAUDI2_STM_DCORE1_HMMU2_CS,
+ GAUDI2_STM_DCORE1_HMMU3_CS,
+ GAUDI2_STM_DCORE1_MME_CTRL,
+ GAUDI2_STM_DCORE1_MME_SBTE0,
+ GAUDI2_STM_DCORE1_MME_SBTE1,
+ GAUDI2_STM_DCORE1_MME_SBTE2,
+ GAUDI2_STM_DCORE1_MME_SBTE3,
+ GAUDI2_STM_DCORE1_MME_SBTE4,
+ GAUDI2_STM_DCORE1_MME_ACC,
+ GAUDI2_STM_DCORE1_SM,
+ GAUDI2_STM_DCORE1_EDMA0_CS,
+ GAUDI2_STM_DCORE1_EDMA1_CS,
+ GAUDI2_STM_DCORE1_VDEC0_CS,
+ GAUDI2_STM_DCORE1_VDEC1_CS,
+ GAUDI2_STM_DCORE2_HMMU0_CS,
+ GAUDI2_STM_DCORE2_HMMU1_CS,
+ GAUDI2_STM_DCORE2_HMMU2_CS,
+ GAUDI2_STM_DCORE2_HMMU3_CS,
+ GAUDI2_STM_DCORE2_MME_CTRL,
+ GAUDI2_STM_DCORE2_MME_SBTE0,
+ GAUDI2_STM_DCORE2_MME_SBTE1,
+ GAUDI2_STM_DCORE2_MME_SBTE2,
+ GAUDI2_STM_DCORE2_MME_SBTE3,
+ GAUDI2_STM_DCORE2_MME_SBTE4,
+ GAUDI2_STM_DCORE2_MME_ACC,
+ GAUDI2_STM_DCORE2_SM,
+ GAUDI2_STM_DCORE2_EDMA0_CS,
+ GAUDI2_STM_DCORE2_EDMA1_CS,
+ GAUDI2_STM_DCORE2_VDEC0_CS,
+ GAUDI2_STM_DCORE2_VDEC1_CS,
+ GAUDI2_STM_DCORE3_HMMU0_CS,
+ GAUDI2_STM_DCORE3_HMMU1_CS,
+ GAUDI2_STM_DCORE3_HMMU2_CS,
+ GAUDI2_STM_DCORE3_HMMU3_CS,
+ GAUDI2_STM_DCORE3_MME_CTRL,
+ GAUDI2_STM_DCORE3_MME_SBTE0,
+ GAUDI2_STM_DCORE3_MME_SBTE1,
+ GAUDI2_STM_DCORE3_MME_SBTE2,
+ GAUDI2_STM_DCORE3_MME_SBTE3,
+ GAUDI2_STM_DCORE3_MME_SBTE4,
+ GAUDI2_STM_DCORE3_MME_ACC,
+ GAUDI2_STM_DCORE3_SM,
+ GAUDI2_STM_DCORE3_EDMA0_CS,
+ GAUDI2_STM_DCORE3_EDMA1_CS,
+ GAUDI2_STM_DCORE3_VDEC0_CS,
+ GAUDI2_STM_DCORE3_VDEC1_CS,
+ GAUDI2_STM_PCIE,
+ GAUDI2_STM_PSOC,
+ GAUDI2_STM_PSOC_ARC0_CS,
+ GAUDI2_STM_PSOC_ARC1_CS,
+ GAUDI2_STM_PDMA0_CS,
+ GAUDI2_STM_PDMA1_CS,
+ GAUDI2_STM_CPU,
+ GAUDI2_STM_PMMU_CS,
+ GAUDI2_STM_ROT0_CS,
+ GAUDI2_STM_ROT1_CS,
+ GAUDI2_STM_ARC_FARM_CS,
+ GAUDI2_STM_KDMA_CS,
+ GAUDI2_STM_PCIE_VDEC0_CS,
+ GAUDI2_STM_PCIE_VDEC1_CS,
+ GAUDI2_STM_HBM0_MC0_CS,
+ GAUDI2_STM_HBM0_MC1_CS,
+ GAUDI2_STM_HBM1_MC0_CS,
+ GAUDI2_STM_HBM1_MC1_CS,
+ GAUDI2_STM_HBM2_MC0_CS,
+ GAUDI2_STM_HBM2_MC1_CS,
+ GAUDI2_STM_HBM3_MC0_CS,
+ GAUDI2_STM_HBM3_MC1_CS,
+ GAUDI2_STM_HBM4_MC0_CS,
+ GAUDI2_STM_HBM4_MC1_CS,
+ GAUDI2_STM_HBM5_MC0_CS,
+ GAUDI2_STM_HBM5_MC1_CS,
+ GAUDI2_STM_NIC0_DBG_0,
+ GAUDI2_STM_NIC0_DBG_1,
+ GAUDI2_STM_NIC1_DBG_0,
+ GAUDI2_STM_NIC1_DBG_1,
+ GAUDI2_STM_NIC2_DBG_0,
+ GAUDI2_STM_NIC2_DBG_1,
+ GAUDI2_STM_NIC3_DBG_0,
+ GAUDI2_STM_NIC3_DBG_1,
+ GAUDI2_STM_NIC4_DBG_0,
+ GAUDI2_STM_NIC4_DBG_1,
+ GAUDI2_STM_NIC5_DBG_0,
+ GAUDI2_STM_NIC5_DBG_1,
+ GAUDI2_STM_NIC6_DBG_0,
+ GAUDI2_STM_NIC6_DBG_1,
+ GAUDI2_STM_NIC7_DBG_0,
+ GAUDI2_STM_NIC7_DBG_1,
+ GAUDI2_STM_NIC8_DBG_0,
+ GAUDI2_STM_NIC8_DBG_1,
+ GAUDI2_STM_NIC9_DBG_0,
+ GAUDI2_STM_NIC9_DBG_1,
+ GAUDI2_STM_NIC10_DBG_0,
+ GAUDI2_STM_NIC10_DBG_1,
+ GAUDI2_STM_NIC11_DBG_0,
+ GAUDI2_STM_NIC11_DBG_1,
+ GAUDI2_STM_LAST = GAUDI2_STM_NIC11_DBG_1
+};
+
+enum gaudi2_debug_etf_regs_index {
+ GAUDI2_ETF_FIRST = 0,
+ GAUDI2_ETF_DCORE0_TPC0_EML = GAUDI2_ETF_FIRST,
+ GAUDI2_ETF_DCORE0_TPC1_EML,
+ GAUDI2_ETF_DCORE0_TPC2_EML,
+ GAUDI2_ETF_DCORE0_TPC3_EML,
+ GAUDI2_ETF_DCORE0_TPC4_EML,
+ GAUDI2_ETF_DCORE0_TPC5_EML,
+ GAUDI2_ETF_DCORE0_TPC6_EML,
+ GAUDI2_ETF_DCORE1_TPC0_EML,
+ GAUDI2_ETF_DCORE1_TPC1_EML,
+ GAUDI2_ETF_DCORE1_TPC2_EML,
+ GAUDI2_ETF_DCORE1_TPC3_EML,
+ GAUDI2_ETF_DCORE1_TPC4_EML,
+ GAUDI2_ETF_DCORE1_TPC5_EML,
+ GAUDI2_ETF_DCORE2_TPC0_EML,
+ GAUDI2_ETF_DCORE2_TPC1_EML,
+ GAUDI2_ETF_DCORE2_TPC2_EML,
+ GAUDI2_ETF_DCORE2_TPC3_EML,
+ GAUDI2_ETF_DCORE2_TPC4_EML,
+ GAUDI2_ETF_DCORE2_TPC5_EML,
+ GAUDI2_ETF_DCORE3_TPC0_EML,
+ GAUDI2_ETF_DCORE3_TPC1_EML,
+ GAUDI2_ETF_DCORE3_TPC2_EML,
+ GAUDI2_ETF_DCORE3_TPC3_EML,
+ GAUDI2_ETF_DCORE3_TPC4_EML,
+ GAUDI2_ETF_DCORE3_TPC5_EML,
+ GAUDI2_ETF_DCORE0_HMMU0_CS,
+ GAUDI2_ETF_DCORE0_HMMU1_CS,
+ GAUDI2_ETF_DCORE0_HMMU2_CS,
+ GAUDI2_ETF_DCORE0_HMMU3_CS,
+ GAUDI2_ETF_DCORE0_MME_CTRL,
+ GAUDI2_ETF_DCORE0_MME_SBTE0,
+ GAUDI2_ETF_DCORE0_MME_SBTE1,
+ GAUDI2_ETF_DCORE0_MME_SBTE2,
+ GAUDI2_ETF_DCORE0_MME_SBTE3,
+ GAUDI2_ETF_DCORE0_MME_SBTE4,
+ GAUDI2_ETF_DCORE0_MME_ACC,
+ GAUDI2_ETF_DCORE0_SM,
+ GAUDI2_ETF_DCORE0_EDMA0_CS,
+ GAUDI2_ETF_DCORE0_EDMA1_CS,
+ GAUDI2_ETF_DCORE0_VDEC0_CS,
+ GAUDI2_ETF_DCORE0_VDEC1_CS,
+ GAUDI2_ETF_DCORE1_HMMU0_CS,
+ GAUDI2_ETF_DCORE1_HMMU1_CS,
+ GAUDI2_ETF_DCORE1_HMMU2_CS,
+ GAUDI2_ETF_DCORE1_HMMU3_CS,
+ GAUDI2_ETF_DCORE1_MME_CTRL,
+ GAUDI2_ETF_DCORE1_MME_SBTE0,
+ GAUDI2_ETF_DCORE1_MME_SBTE1,
+ GAUDI2_ETF_DCORE1_MME_SBTE2,
+ GAUDI2_ETF_DCORE1_MME_SBTE3,
+ GAUDI2_ETF_DCORE1_MME_SBTE4,
+ GAUDI2_ETF_DCORE1_MME_ACC,
+ GAUDI2_ETF_DCORE1_SM,
+ GAUDI2_ETF_DCORE1_EDMA0_CS,
+ GAUDI2_ETF_DCORE1_EDMA1_CS,
+ GAUDI2_ETF_DCORE1_VDEC0_CS,
+ GAUDI2_ETF_DCORE1_VDEC1_CS,
+ GAUDI2_ETF_DCORE2_HMMU0_CS,
+ GAUDI2_ETF_DCORE2_HMMU1_CS,
+ GAUDI2_ETF_DCORE2_HMMU2_CS,
+ GAUDI2_ETF_DCORE2_HMMU3_CS,
+ GAUDI2_ETF_DCORE2_MME_CTRL,
+ GAUDI2_ETF_DCORE2_MME_SBTE0,
+ GAUDI2_ETF_DCORE2_MME_SBTE1,
+ GAUDI2_ETF_DCORE2_MME_SBTE2,
+ GAUDI2_ETF_DCORE2_MME_SBTE3,
+ GAUDI2_ETF_DCORE2_MME_SBTE4,
+ GAUDI2_ETF_DCORE2_MME_ACC,
+ GAUDI2_ETF_DCORE2_SM,
+ GAUDI2_ETF_DCORE2_EDMA0_CS,
+ GAUDI2_ETF_DCORE2_EDMA1_CS,
+ GAUDI2_ETF_DCORE2_VDEC0_CS,
+ GAUDI2_ETF_DCORE2_VDEC1_CS,
+ GAUDI2_ETF_DCORE3_HMMU0_CS,
+ GAUDI2_ETF_DCORE3_HMMU1_CS,
+ GAUDI2_ETF_DCORE3_HMMU2_CS,
+ GAUDI2_ETF_DCORE3_HMMU3_CS,
+ GAUDI2_ETF_DCORE3_MME_CTRL,
+ GAUDI2_ETF_DCORE3_MME_SBTE0,
+ GAUDI2_ETF_DCORE3_MME_SBTE1,
+ GAUDI2_ETF_DCORE3_MME_SBTE2,
+ GAUDI2_ETF_DCORE3_MME_SBTE3,
+ GAUDI2_ETF_DCORE3_MME_SBTE4,
+ GAUDI2_ETF_DCORE3_MME_ACC,
+ GAUDI2_ETF_DCORE3_SM,
+ GAUDI2_ETF_DCORE3_EDMA0_CS,
+ GAUDI2_ETF_DCORE3_EDMA1_CS,
+ GAUDI2_ETF_DCORE3_VDEC0_CS,
+ GAUDI2_ETF_DCORE3_VDEC1_CS,
+ GAUDI2_ETF_PCIE,
+ GAUDI2_ETF_PSOC,
+ GAUDI2_ETF_PSOC_ARC0_CS,
+ GAUDI2_ETF_PSOC_ARC1_CS,
+ GAUDI2_ETF_PDMA0_CS,
+ GAUDI2_ETF_PDMA1_CS,
+ GAUDI2_ETF_CPU_0,
+ GAUDI2_ETF_CPU_1,
+ GAUDI2_ETF_CPU_TRACE,
+ GAUDI2_ETF_PMMU_CS,
+ GAUDI2_ETF_ROT0_CS,
+ GAUDI2_ETF_ROT1_CS,
+ GAUDI2_ETF_ARC_FARM_CS,
+ GAUDI2_ETF_KDMA_CS,
+ GAUDI2_ETF_PCIE_VDEC0_CS,
+ GAUDI2_ETF_PCIE_VDEC1_CS,
+ GAUDI2_ETF_HBM0_MC0_CS,
+ GAUDI2_ETF_HBM0_MC1_CS,
+ GAUDI2_ETF_HBM1_MC0_CS,
+ GAUDI2_ETF_HBM1_MC1_CS,
+ GAUDI2_ETF_HBM2_MC0_CS,
+ GAUDI2_ETF_HBM2_MC1_CS,
+ GAUDI2_ETF_HBM3_MC0_CS,
+ GAUDI2_ETF_HBM3_MC1_CS,
+ GAUDI2_ETF_HBM4_MC0_CS,
+ GAUDI2_ETF_HBM4_MC1_CS,
+ GAUDI2_ETF_HBM5_MC0_CS,
+ GAUDI2_ETF_HBM5_MC1_CS,
+ GAUDI2_ETF_NIC0_DBG_0,
+ GAUDI2_ETF_NIC0_DBG_1,
+ GAUDI2_ETF_NIC1_DBG_0,
+ GAUDI2_ETF_NIC1_DBG_1,
+ GAUDI2_ETF_NIC2_DBG_0,
+ GAUDI2_ETF_NIC2_DBG_1,
+ GAUDI2_ETF_NIC3_DBG_0,
+ GAUDI2_ETF_NIC3_DBG_1,
+ GAUDI2_ETF_NIC4_DBG_0,
+ GAUDI2_ETF_NIC4_DBG_1,
+ GAUDI2_ETF_NIC5_DBG_0,
+ GAUDI2_ETF_NIC5_DBG_1,
+ GAUDI2_ETF_NIC6_DBG_0,
+ GAUDI2_ETF_NIC6_DBG_1,
+ GAUDI2_ETF_NIC7_DBG_0,
+ GAUDI2_ETF_NIC7_DBG_1,
+ GAUDI2_ETF_NIC8_DBG_0,
+ GAUDI2_ETF_NIC8_DBG_1,
+ GAUDI2_ETF_NIC9_DBG_0,
+ GAUDI2_ETF_NIC9_DBG_1,
+ GAUDI2_ETF_NIC10_DBG_0,
+ GAUDI2_ETF_NIC10_DBG_1,
+ GAUDI2_ETF_NIC11_DBG_0,
+ GAUDI2_ETF_NIC11_DBG_1,
+ GAUDI2_ETF_LAST = GAUDI2_ETF_NIC11_DBG_1
+};
+
+enum gaudi2_debug_funnel_regs_index {
+ GAUDI2_FUNNEL_FIRST = 0,
+ GAUDI2_FUNNEL_DCORE0_TPC0_EML = GAUDI2_FUNNEL_FIRST,
+ GAUDI2_FUNNEL_DCORE0_TPC1_EML,
+ GAUDI2_FUNNEL_DCORE0_TPC2_EML,
+ GAUDI2_FUNNEL_DCORE0_TPC3_EML,
+ GAUDI2_FUNNEL_DCORE0_TPC4_EML,
+ GAUDI2_FUNNEL_DCORE0_TPC5_EML,
+ GAUDI2_FUNNEL_DCORE0_TPC6_EML,
+ GAUDI2_FUNNEL_DCORE1_TPC0_EML,
+ GAUDI2_FUNNEL_DCORE1_TPC1_EML,
+ GAUDI2_FUNNEL_DCORE1_TPC2_EML,
+ GAUDI2_FUNNEL_DCORE1_TPC3_EML,
+ GAUDI2_FUNNEL_DCORE1_TPC4_EML,
+ GAUDI2_FUNNEL_DCORE1_TPC5_EML,
+ GAUDI2_FUNNEL_DCORE2_TPC0_EML,
+ GAUDI2_FUNNEL_DCORE2_TPC1_EML,
+ GAUDI2_FUNNEL_DCORE2_TPC2_EML,
+ GAUDI2_FUNNEL_DCORE2_TPC3_EML,
+ GAUDI2_FUNNEL_DCORE2_TPC4_EML,
+ GAUDI2_FUNNEL_DCORE2_TPC5_EML,
+ GAUDI2_FUNNEL_DCORE3_TPC0_EML,
+ GAUDI2_FUNNEL_DCORE3_TPC1_EML,
+ GAUDI2_FUNNEL_DCORE3_TPC2_EML,
+ GAUDI2_FUNNEL_DCORE3_TPC3_EML,
+ GAUDI2_FUNNEL_DCORE3_TPC4_EML,
+ GAUDI2_FUNNEL_DCORE3_TPC5_EML,
+ GAUDI2_FUNNEL_DCORE0_XFT,
+ GAUDI2_FUNNEL_DCORE0_TFT0,
+ GAUDI2_FUNNEL_DCORE0_TFT1,
+ GAUDI2_FUNNEL_DCORE0_TFT2,
+ GAUDI2_FUNNEL_DCORE0_RTR0,
+ GAUDI2_FUNNEL_DCORE0_RTR1,
+ GAUDI2_FUNNEL_DCORE0_RTR2,
+ GAUDI2_FUNNEL_DCORE0_RTR3,
+ GAUDI2_FUNNEL_DCORE0_RTR4,
+ GAUDI2_FUNNEL_DCORE0_MIF0,
+ GAUDI2_FUNNEL_DCORE0_RTR5,
+ GAUDI2_FUNNEL_DCORE0_MIF1,
+ GAUDI2_FUNNEL_DCORE0_RTR6,
+ GAUDI2_FUNNEL_DCORE0_MIF2,
+ GAUDI2_FUNNEL_DCORE0_RTR7,
+ GAUDI2_FUNNEL_DCORE0_MIF3,
+ GAUDI2_FUNNEL_DCORE1_XFT,
+ GAUDI2_FUNNEL_DCORE1_TFT0,
+ GAUDI2_FUNNEL_DCORE1_TFT1,
+ GAUDI2_FUNNEL_DCORE1_TFT2,
+ GAUDI2_FUNNEL_DCORE1_RTR0,
+ GAUDI2_FUNNEL_DCORE1_MIF0,
+ GAUDI2_FUNNEL_DCORE1_RTR1,
+ GAUDI2_FUNNEL_DCORE1_MIF1,
+ GAUDI2_FUNNEL_DCORE1_RTR2,
+ GAUDI2_FUNNEL_DCORE1_MIF2,
+ GAUDI2_FUNNEL_DCORE1_RTR3,
+ GAUDI2_FUNNEL_DCORE1_MIF3,
+ GAUDI2_FUNNEL_DCORE1_RTR4,
+ GAUDI2_FUNNEL_DCORE1_RTR5,
+ GAUDI2_FUNNEL_DCORE1_RTR6,
+ GAUDI2_FUNNEL_DCORE1_RTR7,
+ GAUDI2_FUNNEL_DCORE2_XFT,
+ GAUDI2_FUNNEL_DCORE2_TFT0,
+ GAUDI2_FUNNEL_DCORE2_TFT1,
+ GAUDI2_FUNNEL_DCORE2_TFT2,
+ GAUDI2_FUNNEL_DCORE2_RTR0,
+ GAUDI2_FUNNEL_DCORE2_RTR1,
+ GAUDI2_FUNNEL_DCORE2_RTR2,
+ GAUDI2_FUNNEL_DCORE2_RTR3,
+ GAUDI2_FUNNEL_DCORE2_RTR4,
+ GAUDI2_FUNNEL_DCORE2_MIF0,
+ GAUDI2_FUNNEL_DCORE2_RTR5,
+ GAUDI2_FUNNEL_DCORE2_MIF1,
+ GAUDI2_FUNNEL_DCORE2_RTR6,
+ GAUDI2_FUNNEL_DCORE2_MIF2,
+ GAUDI2_FUNNEL_DCORE2_RTR7,
+ GAUDI2_FUNNEL_DCORE2_MIF3,
+ GAUDI2_FUNNEL_DCORE3_XFT,
+ GAUDI2_FUNNEL_DCORE3_TFT0,
+ GAUDI2_FUNNEL_DCORE3_TFT1,
+ GAUDI2_FUNNEL_DCORE3_TFT2,
+ GAUDI2_FUNNEL_DCORE3_RTR0,
+ GAUDI2_FUNNEL_DCORE3_MIF0,
+ GAUDI2_FUNNEL_DCORE3_RTR1,
+ GAUDI2_FUNNEL_DCORE3_MIF1,
+ GAUDI2_FUNNEL_DCORE3_RTR2,
+ GAUDI2_FUNNEL_DCORE3_MIF2,
+ GAUDI2_FUNNEL_DCORE3_RTR3,
+ GAUDI2_FUNNEL_DCORE3_MIF3,
+ GAUDI2_FUNNEL_DCORE3_RTR4,
+ GAUDI2_FUNNEL_DCORE3_RTR5,
+ GAUDI2_FUNNEL_DCORE3_RTR6,
+ GAUDI2_FUNNEL_DCORE3_RTR7,
+ GAUDI2_FUNNEL_PSOC,
+ GAUDI2_FUNNEL_PSOC_ARC0,
+ GAUDI2_FUNNEL_PSOC_ARC1,
+ GAUDI2_FUNNEL_XDMA,
+ GAUDI2_FUNNEL_CPU,
+ GAUDI2_FUNNEL_PMMU,
+ GAUDI2_FUNNEL_PMMU_DEC,
+ GAUDI2_FUNNEL_DCORE0_XBAR_MID,
+ GAUDI2_FUNNEL_DCORE0_XBAR_EDGE,
+ GAUDI2_FUNNEL_DCORE1_XBAR_MID,
+ GAUDI2_FUNNEL_DCORE1_XBAR_EDGE,
+ GAUDI2_FUNNEL_DCORE2_XBAR_MID,
+ GAUDI2_FUNNEL_DCORE2_XBAR_EDGE,
+ GAUDI2_FUNNEL_DCORE3_XBAR_MID,
+ GAUDI2_FUNNEL_DCORE3_XBAR_EDGE,
+ GAUDI2_FUNNEL_ARC_FARM,
+ GAUDI2_FUNNEL_HBM0_MC0,
+ GAUDI2_FUNNEL_HBM0_MC1,
+ GAUDI2_FUNNEL_HBM1_MC0,
+ GAUDI2_FUNNEL_HBM1_MC1,
+ GAUDI2_FUNNEL_HBM2_MC0,
+ GAUDI2_FUNNEL_HBM2_MC1,
+ GAUDI2_FUNNEL_HBM3_MC0,
+ GAUDI2_FUNNEL_HBM3_MC1,
+ GAUDI2_FUNNEL_HBM4_MC0,
+ GAUDI2_FUNNEL_HBM4_MC1,
+ GAUDI2_FUNNEL_HBM5_MC0,
+ GAUDI2_FUNNEL_HBM5_MC1,
+ GAUDI2_FUNNEL_NIC0_DBG_TX,
+ GAUDI2_FUNNEL_NIC0_DBG_NCH,
+ GAUDI2_FUNNEL_NIC1_DBG_TX,
+ GAUDI2_FUNNEL_NIC1_DBG_NCH,
+ GAUDI2_FUNNEL_NIC2_DBG_TX,
+ GAUDI2_FUNNEL_NIC2_DBG_NCH,
+ GAUDI2_FUNNEL_NIC3_DBG_TX,
+ GAUDI2_FUNNEL_NIC3_DBG_NCH,
+ GAUDI2_FUNNEL_NIC4_DBG_TX,
+ GAUDI2_FUNNEL_NIC4_DBG_NCH,
+ GAUDI2_FUNNEL_NIC5_DBG_TX,
+ GAUDI2_FUNNEL_NIC5_DBG_NCH,
+ GAUDI2_FUNNEL_NIC6_DBG_TX,
+ GAUDI2_FUNNEL_NIC6_DBG_NCH,
+ GAUDI2_FUNNEL_NIC7_DBG_TX,
+ GAUDI2_FUNNEL_NIC7_DBG_NCH,
+ GAUDI2_FUNNEL_NIC8_DBG_TX,
+ GAUDI2_FUNNEL_NIC8_DBG_NCH,
+ GAUDI2_FUNNEL_NIC9_DBG_TX,
+ GAUDI2_FUNNEL_NIC9_DBG_NCH,
+ GAUDI2_FUNNEL_NIC10_DBG_TX,
+ GAUDI2_FUNNEL_NIC10_DBG_NCH,
+ GAUDI2_FUNNEL_NIC11_DBG_TX,
+ GAUDI2_FUNNEL_NIC11_DBG_NCH,
+ GAUDI2_FUNNEL_LAST = GAUDI2_FUNNEL_NIC11_DBG_NCH
+};
+
+enum gaudi2_debug_bmon_regs_index {
+ GAUDI2_BMON_FIRST = 0,
+ GAUDI2_BMON_DCORE0_TPC0_EML_0 = GAUDI2_BMON_FIRST,
+ GAUDI2_BMON_DCORE0_TPC0_EML_1,
+ GAUDI2_BMON_DCORE0_TPC0_EML_2,
+ GAUDI2_BMON_DCORE0_TPC0_EML_3,
+ GAUDI2_BMON_DCORE0_TPC1_EML_0,
+ GAUDI2_BMON_DCORE0_TPC1_EML_1,
+ GAUDI2_BMON_DCORE0_TPC1_EML_2,
+ GAUDI2_BMON_DCORE0_TPC1_EML_3,
+ GAUDI2_BMON_DCORE0_TPC2_EML_0,
+ GAUDI2_BMON_DCORE0_TPC2_EML_1,
+ GAUDI2_BMON_DCORE0_TPC2_EML_2,
+ GAUDI2_BMON_DCORE0_TPC2_EML_3,
+ GAUDI2_BMON_DCORE0_TPC3_EML_0,
+ GAUDI2_BMON_DCORE0_TPC3_EML_1,
+ GAUDI2_BMON_DCORE0_TPC3_EML_2,
+ GAUDI2_BMON_DCORE0_TPC3_EML_3,
+ GAUDI2_BMON_DCORE0_TPC4_EML_0,
+ GAUDI2_BMON_DCORE0_TPC4_EML_1,
+ GAUDI2_BMON_DCORE0_TPC4_EML_2,
+ GAUDI2_BMON_DCORE0_TPC4_EML_3,
+ GAUDI2_BMON_DCORE0_TPC5_EML_0,
+ GAUDI2_BMON_DCORE0_TPC5_EML_1,
+ GAUDI2_BMON_DCORE0_TPC5_EML_2,
+ GAUDI2_BMON_DCORE0_TPC5_EML_3,
+ GAUDI2_BMON_DCORE0_TPC6_EML_0,
+ GAUDI2_BMON_DCORE0_TPC6_EML_1,
+ GAUDI2_BMON_DCORE0_TPC6_EML_2,
+ GAUDI2_BMON_DCORE0_TPC6_EML_3,
+ GAUDI2_BMON_DCORE1_TPC0_EML_0,
+ GAUDI2_BMON_DCORE1_TPC0_EML_1,
+ GAUDI2_BMON_DCORE1_TPC0_EML_2,
+ GAUDI2_BMON_DCORE1_TPC0_EML_3,
+ GAUDI2_BMON_DCORE1_TPC1_EML_0,
+ GAUDI2_BMON_DCORE1_TPC1_EML_1,
+ GAUDI2_BMON_DCORE1_TPC1_EML_2,
+ GAUDI2_BMON_DCORE1_TPC1_EML_3,
+ GAUDI2_BMON_DCORE1_TPC2_EML_0,
+ GAUDI2_BMON_DCORE1_TPC2_EML_1,
+ GAUDI2_BMON_DCORE1_TPC2_EML_2,
+ GAUDI2_BMON_DCORE1_TPC2_EML_3,
+ GAUDI2_BMON_DCORE1_TPC3_EML_0,
+ GAUDI2_BMON_DCORE1_TPC3_EML_1,
+ GAUDI2_BMON_DCORE1_TPC3_EML_2,
+ GAUDI2_BMON_DCORE1_TPC3_EML_3,
+ GAUDI2_BMON_DCORE1_TPC4_EML_0,
+ GAUDI2_BMON_DCORE1_TPC4_EML_1,
+ GAUDI2_BMON_DCORE1_TPC4_EML_2,
+ GAUDI2_BMON_DCORE1_TPC4_EML_3,
+ GAUDI2_BMON_DCORE1_TPC5_EML_0,
+ GAUDI2_BMON_DCORE1_TPC5_EML_1,
+ GAUDI2_BMON_DCORE1_TPC5_EML_2,
+ GAUDI2_BMON_DCORE1_TPC5_EML_3,
+ GAUDI2_BMON_DCORE2_TPC0_EML_0,
+ GAUDI2_BMON_DCORE2_TPC0_EML_1,
+ GAUDI2_BMON_DCORE2_TPC0_EML_2,
+ GAUDI2_BMON_DCORE2_TPC0_EML_3,
+ GAUDI2_BMON_DCORE2_TPC1_EML_0,
+ GAUDI2_BMON_DCORE2_TPC1_EML_1,
+ GAUDI2_BMON_DCORE2_TPC1_EML_2,
+ GAUDI2_BMON_DCORE2_TPC1_EML_3,
+ GAUDI2_BMON_DCORE2_TPC2_EML_0,
+ GAUDI2_BMON_DCORE2_TPC2_EML_1,
+ GAUDI2_BMON_DCORE2_TPC2_EML_2,
+ GAUDI2_BMON_DCORE2_TPC2_EML_3,
+ GAUDI2_BMON_DCORE2_TPC3_EML_0,
+ GAUDI2_BMON_DCORE2_TPC3_EML_1,
+ GAUDI2_BMON_DCORE2_TPC3_EML_2,
+ GAUDI2_BMON_DCORE2_TPC3_EML_3,
+ GAUDI2_BMON_DCORE2_TPC4_EML_0,
+ GAUDI2_BMON_DCORE2_TPC4_EML_1,
+ GAUDI2_BMON_DCORE2_TPC4_EML_2,
+ GAUDI2_BMON_DCORE2_TPC4_EML_3,
+ GAUDI2_BMON_DCORE2_TPC5_EML_0,
+ GAUDI2_BMON_DCORE2_TPC5_EML_1,
+ GAUDI2_BMON_DCORE2_TPC5_EML_2,
+ GAUDI2_BMON_DCORE2_TPC5_EML_3,
+ GAUDI2_BMON_DCORE3_TPC0_EML_0,
+ GAUDI2_BMON_DCORE3_TPC0_EML_1,
+ GAUDI2_BMON_DCORE3_TPC0_EML_2,
+ GAUDI2_BMON_DCORE3_TPC0_EML_3,
+ GAUDI2_BMON_DCORE3_TPC1_EML_0,
+ GAUDI2_BMON_DCORE3_TPC1_EML_1,
+ GAUDI2_BMON_DCORE3_TPC1_EML_2,
+ GAUDI2_BMON_DCORE3_TPC1_EML_3,
+ GAUDI2_BMON_DCORE3_TPC2_EML_0,
+ GAUDI2_BMON_DCORE3_TPC2_EML_1,
+ GAUDI2_BMON_DCORE3_TPC2_EML_2,
+ GAUDI2_BMON_DCORE3_TPC2_EML_3,
+ GAUDI2_BMON_DCORE3_TPC3_EML_0,
+ GAUDI2_BMON_DCORE3_TPC3_EML_1,
+ GAUDI2_BMON_DCORE3_TPC3_EML_2,
+ GAUDI2_BMON_DCORE3_TPC3_EML_3,
+ GAUDI2_BMON_DCORE3_TPC4_EML_0,
+ GAUDI2_BMON_DCORE3_TPC4_EML_1,
+ GAUDI2_BMON_DCORE3_TPC4_EML_2,
+ GAUDI2_BMON_DCORE3_TPC4_EML_3,
+ GAUDI2_BMON_DCORE3_TPC5_EML_0,
+ GAUDI2_BMON_DCORE3_TPC5_EML_1,
+ GAUDI2_BMON_DCORE3_TPC5_EML_2,
+ GAUDI2_BMON_DCORE3_TPC5_EML_3,
+ GAUDI2_BMON_DCORE0_HMMU0_0,
+ GAUDI2_BMON_DCORE0_HMMU0_1,
+ GAUDI2_BMON_DCORE0_HMMU0_3,
+ GAUDI2_BMON_DCORE0_HMMU0_2,
+ GAUDI2_BMON_DCORE0_HMMU0_4,
+ GAUDI2_BMON_DCORE0_HMMU1_0,
+ GAUDI2_BMON_DCORE0_HMMU1_1,
+ GAUDI2_BMON_DCORE0_HMMU1_3,
+ GAUDI2_BMON_DCORE0_HMMU1_2,
+ GAUDI2_BMON_DCORE0_HMMU1_4,
+ GAUDI2_BMON_DCORE0_HMMU2_0,
+ GAUDI2_BMON_DCORE0_HMMU2_1,
+ GAUDI2_BMON_DCORE0_HMMU2_3,
+ GAUDI2_BMON_DCORE0_HMMU2_2,
+ GAUDI2_BMON_DCORE0_HMMU2_4,
+ GAUDI2_BMON_DCORE0_HMMU3_0,
+ GAUDI2_BMON_DCORE0_HMMU3_1,
+ GAUDI2_BMON_DCORE0_HMMU3_3,
+ GAUDI2_BMON_DCORE0_HMMU3_2,
+ GAUDI2_BMON_DCORE0_HMMU3_4,
+ GAUDI2_BMON_DCORE0_MME_CTRL_0,
+ GAUDI2_BMON_DCORE0_MME_CTRL_1,
+ GAUDI2_BMON_DCORE0_MME_CTRL_2,
+ GAUDI2_BMON_DCORE0_MME_CTRL_3,
+ GAUDI2_BMON_DCORE0_MME_SBTE0_0,
+ GAUDI2_BMON_DCORE0_MME_SBTE1_0,
+ GAUDI2_BMON_DCORE0_MME_SBTE2_0,
+ GAUDI2_BMON_DCORE0_MME_SBTE3_0,
+ GAUDI2_BMON_DCORE0_MME_SBTE4_0,
+ GAUDI2_BMON_DCORE0_MME_ACC_0,
+ GAUDI2_BMON_DCORE0_MME_ACC_1,
+ GAUDI2_BMON_DCORE0_SM,
+ GAUDI2_BMON_DCORE0_SM_1,
+ GAUDI2_BMON_DCORE0_EDMA0_0,
+ GAUDI2_BMON_DCORE0_EDMA0_1,
+ GAUDI2_BMON_DCORE0_EDMA1_0,
+ GAUDI2_BMON_DCORE0_EDMA1_1,
+ GAUDI2_BMON_DCORE0_VDEC0_0,
+ GAUDI2_BMON_DCORE0_VDEC0_1,
+ GAUDI2_BMON_DCORE0_VDEC0_2,
+ GAUDI2_BMON_DCORE0_VDEC1_0,
+ GAUDI2_BMON_DCORE0_VDEC1_1,
+ GAUDI2_BMON_DCORE0_VDEC1_2,
+ GAUDI2_BMON_DCORE1_HMMU0_0,
+ GAUDI2_BMON_DCORE1_HMMU0_1,
+ GAUDI2_BMON_DCORE1_HMMU0_3,
+ GAUDI2_BMON_DCORE1_HMMU0_2,
+ GAUDI2_BMON_DCORE1_HMMU0_4,
+ GAUDI2_BMON_DCORE1_HMMU1_0,
+ GAUDI2_BMON_DCORE1_HMMU1_1,
+ GAUDI2_BMON_DCORE1_HMMU1_3,
+ GAUDI2_BMON_DCORE1_HMMU1_2,
+ GAUDI2_BMON_DCORE1_HMMU1_4,
+ GAUDI2_BMON_DCORE1_HMMU2_0,
+ GAUDI2_BMON_DCORE1_HMMU2_1,
+ GAUDI2_BMON_DCORE1_HMMU2_3,
+ GAUDI2_BMON_DCORE1_HMMU2_2,
+ GAUDI2_BMON_DCORE1_HMMU2_4,
+ GAUDI2_BMON_DCORE1_HMMU3_0,
+ GAUDI2_BMON_DCORE1_HMMU3_1,
+ GAUDI2_BMON_DCORE1_HMMU3_3,
+ GAUDI2_BMON_DCORE1_HMMU3_2,
+ GAUDI2_BMON_DCORE1_HMMU3_4,
+ GAUDI2_BMON_DCORE1_MME_CTRL_0,
+ GAUDI2_BMON_DCORE1_MME_CTRL_1,
+ GAUDI2_BMON_DCORE1_MME_CTRL_2,
+ GAUDI2_BMON_DCORE1_MME_CTRL_3,
+ GAUDI2_BMON_DCORE1_MME_SBTE0_0,
+ GAUDI2_BMON_DCORE1_MME_SBTE1_0,
+ GAUDI2_BMON_DCORE1_MME_SBTE2_0,
+ GAUDI2_BMON_DCORE1_MME_SBTE3_0,
+ GAUDI2_BMON_DCORE1_MME_SBTE4_0,
+ GAUDI2_BMON_DCORE1_MME_ACC_0,
+ GAUDI2_BMON_DCORE1_MME_ACC_1,
+ GAUDI2_BMON_DCORE1_SM,
+ GAUDI2_BMON_DCORE1_SM_1,
+ GAUDI2_BMON_DCORE1_EDMA0_0,
+ GAUDI2_BMON_DCORE1_EDMA0_1,
+ GAUDI2_BMON_DCORE1_EDMA1_0,
+ GAUDI2_BMON_DCORE1_EDMA1_1,
+ GAUDI2_BMON_DCORE1_VDEC0_0,
+ GAUDI2_BMON_DCORE1_VDEC0_1,
+ GAUDI2_BMON_DCORE1_VDEC0_2,
+ GAUDI2_BMON_DCORE1_VDEC1_0,
+ GAUDI2_BMON_DCORE1_VDEC1_1,
+ GAUDI2_BMON_DCORE1_VDEC1_2,
+ GAUDI2_BMON_DCORE2_HMMU0_0,
+ GAUDI2_BMON_DCORE2_HMMU0_1,
+ GAUDI2_BMON_DCORE2_HMMU0_3,
+ GAUDI2_BMON_DCORE2_HMMU0_2,
+ GAUDI2_BMON_DCORE2_HMMU0_4,
+ GAUDI2_BMON_DCORE2_HMMU1_0,
+ GAUDI2_BMON_DCORE2_HMMU1_1,
+ GAUDI2_BMON_DCORE2_HMMU1_3,
+ GAUDI2_BMON_DCORE2_HMMU1_2,
+ GAUDI2_BMON_DCORE2_HMMU1_4,
+ GAUDI2_BMON_DCORE2_HMMU2_0,
+ GAUDI2_BMON_DCORE2_HMMU2_1,
+ GAUDI2_BMON_DCORE2_HMMU2_3,
+ GAUDI2_BMON_DCORE2_HMMU2_2,
+ GAUDI2_BMON_DCORE2_HMMU2_4,
+ GAUDI2_BMON_DCORE2_HMMU3_0,
+ GAUDI2_BMON_DCORE2_HMMU3_1,
+ GAUDI2_BMON_DCORE2_HMMU3_3,
+ GAUDI2_BMON_DCORE2_HMMU3_2,
+ GAUDI2_BMON_DCORE2_HMMU3_4,
+ GAUDI2_BMON_DCORE2_MME_CTRL_0,
+ GAUDI2_BMON_DCORE2_MME_CTRL_1,
+ GAUDI2_BMON_DCORE2_MME_CTRL_2,
+ GAUDI2_BMON_DCORE2_MME_CTRL_3,
+ GAUDI2_BMON_DCORE2_MME_SBTE0_0,
+ GAUDI2_BMON_DCORE2_MME_SBTE1_0,
+ GAUDI2_BMON_DCORE2_MME_SBTE2_0,
+ GAUDI2_BMON_DCORE2_MME_SBTE3_0,
+ GAUDI2_BMON_DCORE2_MME_SBTE4_0,
+ GAUDI2_BMON_DCORE2_MME_ACC_0,
+ GAUDI2_BMON_DCORE2_MME_ACC_1,
+ GAUDI2_BMON_DCORE2_SM,
+ GAUDI2_BMON_DCORE2_SM_1,
+ GAUDI2_BMON_DCORE2_EDMA0_0,
+ GAUDI2_BMON_DCORE2_EDMA0_1,
+ GAUDI2_BMON_DCORE2_EDMA1_0,
+ GAUDI2_BMON_DCORE2_EDMA1_1,
+ GAUDI2_BMON_DCORE2_VDEC0_0,
+ GAUDI2_BMON_DCORE2_VDEC0_1,
+ GAUDI2_BMON_DCORE2_VDEC0_2,
+ GAUDI2_BMON_DCORE2_VDEC1_0,
+ GAUDI2_BMON_DCORE2_VDEC1_1,
+ GAUDI2_BMON_DCORE2_VDEC1_2,
+ GAUDI2_BMON_DCORE3_HMMU0_0,
+ GAUDI2_BMON_DCORE3_HMMU0_1,
+ GAUDI2_BMON_DCORE3_HMMU0_3,
+ GAUDI2_BMON_DCORE3_HMMU0_2,
+ GAUDI2_BMON_DCORE3_HMMU0_4,
+ GAUDI2_BMON_DCORE3_HMMU1_0,
+ GAUDI2_BMON_DCORE3_HMMU1_1,
+ GAUDI2_BMON_DCORE3_HMMU1_3,
+ GAUDI2_BMON_DCORE3_HMMU1_2,
+ GAUDI2_BMON_DCORE3_HMMU1_4,
+ GAUDI2_BMON_DCORE3_HMMU2_0,
+ GAUDI2_BMON_DCORE3_HMMU2_1,
+ GAUDI2_BMON_DCORE3_HMMU2_3,
+ GAUDI2_BMON_DCORE3_HMMU2_2,
+ GAUDI2_BMON_DCORE3_HMMU2_4,
+ GAUDI2_BMON_DCORE3_HMMU3_0,
+ GAUDI2_BMON_DCORE3_HMMU3_1,
+ GAUDI2_BMON_DCORE3_HMMU3_3,
+ GAUDI2_BMON_DCORE3_HMMU3_2,
+ GAUDI2_BMON_DCORE3_HMMU3_4,
+ GAUDI2_BMON_DCORE3_MME_CTRL_0,
+ GAUDI2_BMON_DCORE3_MME_CTRL_1,
+ GAUDI2_BMON_DCORE3_MME_CTRL_2,
+ GAUDI2_BMON_DCORE3_MME_CTRL_3,
+ GAUDI2_BMON_DCORE3_MME_SBTE0_0,
+ GAUDI2_BMON_DCORE3_MME_SBTE1_0,
+ GAUDI2_BMON_DCORE3_MME_SBTE2_0,
+ GAUDI2_BMON_DCORE3_MME_SBTE3_0,
+ GAUDI2_BMON_DCORE3_MME_SBTE4_0,
+ GAUDI2_BMON_DCORE3_MME_ACC_0,
+ GAUDI2_BMON_DCORE3_MME_ACC_1,
+ GAUDI2_BMON_DCORE3_SM,
+ GAUDI2_BMON_DCORE3_SM_1,
+ GAUDI2_BMON_DCORE3_EDMA0_0,
+ GAUDI2_BMON_DCORE3_EDMA0_1,
+ GAUDI2_BMON_DCORE3_EDMA1_0,
+ GAUDI2_BMON_DCORE3_EDMA1_1,
+ GAUDI2_BMON_DCORE3_VDEC0_0,
+ GAUDI2_BMON_DCORE3_VDEC0_1,
+ GAUDI2_BMON_DCORE3_VDEC0_2,
+ GAUDI2_BMON_DCORE3_VDEC1_0,
+ GAUDI2_BMON_DCORE3_VDEC1_1,
+ GAUDI2_BMON_DCORE3_VDEC1_2,
+ GAUDI2_BMON_PCIE_MSTR_WR,
+ GAUDI2_BMON_PCIE_MSTR_RD,
+ GAUDI2_BMON_PCIE_SLV_WR,
+ GAUDI2_BMON_PCIE_SLV_RD,
+ GAUDI2_BMON_PSOC_ARC0_0,
+ GAUDI2_BMON_PSOC_ARC0_1,
+ GAUDI2_BMON_PSOC_ARC1_0,
+ GAUDI2_BMON_PSOC_ARC1_1,
+ GAUDI2_BMON_PDMA0_0,
+ GAUDI2_BMON_PDMA0_1,
+ GAUDI2_BMON_PDMA1_0,
+ GAUDI2_BMON_PDMA1_1,
+ GAUDI2_BMON_CPU_WR,
+ GAUDI2_BMON_CPU_RD,
+ GAUDI2_BMON_PMMU_0,
+ GAUDI2_BMON_PMMU_1,
+ GAUDI2_BMON_PMMU_2,
+ GAUDI2_BMON_PMMU_3,
+ GAUDI2_BMON_PMMU_4,
+ GAUDI2_BMON_ROT0_0,
+ GAUDI2_BMON_ROT0_1,
+ GAUDI2_BMON_ROT0_2,
+ GAUDI2_BMON_ROT0_3,
+ GAUDI2_BMON_ROT1_0,
+ GAUDI2_BMON_ROT1_1,
+ GAUDI2_BMON_ROT1_2,
+ GAUDI2_BMON_ROT1_3,
+ GAUDI2_BMON_ARC_FARM_0,
+ GAUDI2_BMON_ARC_FARM_1,
+ GAUDI2_BMON_ARC_FARM_2,
+ GAUDI2_BMON_ARC_FARM_3,
+ GAUDI2_BMON_KDMA_0,
+ GAUDI2_BMON_KDMA_1,
+ GAUDI2_BMON_KDMA_2,
+ GAUDI2_BMON_KDMA_3,
+ GAUDI2_BMON_PCIE_VDEC0_0,
+ GAUDI2_BMON_PCIE_VDEC0_1,
+ GAUDI2_BMON_PCIE_VDEC0_2,
+ GAUDI2_BMON_PCIE_VDEC1_0,
+ GAUDI2_BMON_PCIE_VDEC1_1,
+ GAUDI2_BMON_PCIE_VDEC1_2,
+ GAUDI2_BMON_NIC0_DBG_0_0,
+ GAUDI2_BMON_NIC0_DBG_1_0,
+ GAUDI2_BMON_NIC0_DBG_2_0,
+ GAUDI2_BMON_NIC0_DBG_0_1,
+ GAUDI2_BMON_NIC0_DBG_1_1,
+ GAUDI2_BMON_NIC0_DBG_2_1,
+ GAUDI2_BMON_NIC1_DBG_0_0,
+ GAUDI2_BMON_NIC1_DBG_1_0,
+ GAUDI2_BMON_NIC1_DBG_2_0,
+ GAUDI2_BMON_NIC1_DBG_0_1,
+ GAUDI2_BMON_NIC1_DBG_1_1,
+ GAUDI2_BMON_NIC1_DBG_2_1,
+ GAUDI2_BMON_NIC2_DBG_0_0,
+ GAUDI2_BMON_NIC2_DBG_1_0,
+ GAUDI2_BMON_NIC2_DBG_2_0,
+ GAUDI2_BMON_NIC2_DBG_0_1,
+ GAUDI2_BMON_NIC2_DBG_1_1,
+ GAUDI2_BMON_NIC2_DBG_2_1,
+ GAUDI2_BMON_NIC3_DBG_0_0,
+ GAUDI2_BMON_NIC3_DBG_1_0,
+ GAUDI2_BMON_NIC3_DBG_2_0,
+ GAUDI2_BMON_NIC3_DBG_0_1,
+ GAUDI2_BMON_NIC3_DBG_1_1,
+ GAUDI2_BMON_NIC3_DBG_2_1,
+ GAUDI2_BMON_NIC4_DBG_0_0,
+ GAUDI2_BMON_NIC4_DBG_1_0,
+ GAUDI2_BMON_NIC4_DBG_2_0,
+ GAUDI2_BMON_NIC4_DBG_0_1,
+ GAUDI2_BMON_NIC4_DBG_1_1,
+ GAUDI2_BMON_NIC4_DBG_2_1,
+ GAUDI2_BMON_NIC5_DBG_0_0,
+ GAUDI2_BMON_NIC5_DBG_1_0,
+ GAUDI2_BMON_NIC5_DBG_2_0,
+ GAUDI2_BMON_NIC5_DBG_0_1,
+ GAUDI2_BMON_NIC5_DBG_1_1,
+ GAUDI2_BMON_NIC5_DBG_2_1,
+ GAUDI2_BMON_NIC6_DBG_0_0,
+ GAUDI2_BMON_NIC6_DBG_1_0,
+ GAUDI2_BMON_NIC6_DBG_2_0,
+ GAUDI2_BMON_NIC6_DBG_0_1,
+ GAUDI2_BMON_NIC6_DBG_1_1,
+ GAUDI2_BMON_NIC6_DBG_2_1,
+ GAUDI2_BMON_NIC7_DBG_0_0,
+ GAUDI2_BMON_NIC7_DBG_1_0,
+ GAUDI2_BMON_NIC7_DBG_2_0,
+ GAUDI2_BMON_NIC7_DBG_0_1,
+ GAUDI2_BMON_NIC7_DBG_1_1,
+ GAUDI2_BMON_NIC7_DBG_2_1,
+ GAUDI2_BMON_NIC8_DBG_0_0,
+ GAUDI2_BMON_NIC8_DBG_1_0,
+ GAUDI2_BMON_NIC8_DBG_2_0,
+ GAUDI2_BMON_NIC8_DBG_0_1,
+ GAUDI2_BMON_NIC8_DBG_1_1,
+ GAUDI2_BMON_NIC8_DBG_2_1,
+ GAUDI2_BMON_NIC9_DBG_0_0,
+ GAUDI2_BMON_NIC9_DBG_1_0,
+ GAUDI2_BMON_NIC9_DBG_2_0,
+ GAUDI2_BMON_NIC9_DBG_0_1,
+ GAUDI2_BMON_NIC9_DBG_1_1,
+ GAUDI2_BMON_NIC9_DBG_2_1,
+ GAUDI2_BMON_NIC10_DBG_0_0,
+ GAUDI2_BMON_NIC10_DBG_1_0,
+ GAUDI2_BMON_NIC10_DBG_2_0,
+ GAUDI2_BMON_NIC10_DBG_0_1,
+ GAUDI2_BMON_NIC10_DBG_1_1,
+ GAUDI2_BMON_NIC10_DBG_2_1,
+ GAUDI2_BMON_NIC11_DBG_0_0,
+ GAUDI2_BMON_NIC11_DBG_1_0,
+ GAUDI2_BMON_NIC11_DBG_2_0,
+ GAUDI2_BMON_NIC11_DBG_0_1,
+ GAUDI2_BMON_NIC11_DBG_1_1,
+ GAUDI2_BMON_NIC11_DBG_2_1,
+ GAUDI2_BMON_LAST = GAUDI2_BMON_NIC11_DBG_2_1
+};
+
+enum gaudi2_debug_spmu_regs_index {
+ GAUDI2_SPMU_FIRST = 0,
+ GAUDI2_SPMU_DCORE0_TPC0_EML = GAUDI2_SPMU_FIRST,
+ GAUDI2_SPMU_DCORE0_TPC1_EML,
+ GAUDI2_SPMU_DCORE0_TPC2_EML,
+ GAUDI2_SPMU_DCORE0_TPC3_EML,
+ GAUDI2_SPMU_DCORE0_TPC4_EML,
+ GAUDI2_SPMU_DCORE0_TPC5_EML,
+ GAUDI2_SPMU_DCORE0_TPC6_EML,
+ GAUDI2_SPMU_DCORE1_TPC0_EML,
+ GAUDI2_SPMU_DCORE1_TPC1_EML,
+ GAUDI2_SPMU_DCORE1_TPC2_EML,
+ GAUDI2_SPMU_DCORE1_TPC3_EML,
+ GAUDI2_SPMU_DCORE1_TPC4_EML,
+ GAUDI2_SPMU_DCORE1_TPC5_EML,
+ GAUDI2_SPMU_DCORE2_TPC0_EML,
+ GAUDI2_SPMU_DCORE2_TPC1_EML,
+ GAUDI2_SPMU_DCORE2_TPC2_EML,
+ GAUDI2_SPMU_DCORE2_TPC3_EML,
+ GAUDI2_SPMU_DCORE2_TPC4_EML,
+ GAUDI2_SPMU_DCORE2_TPC5_EML,
+ GAUDI2_SPMU_DCORE3_TPC0_EML,
+ GAUDI2_SPMU_DCORE3_TPC1_EML,
+ GAUDI2_SPMU_DCORE3_TPC2_EML,
+ GAUDI2_SPMU_DCORE3_TPC3_EML,
+ GAUDI2_SPMU_DCORE3_TPC4_EML,
+ GAUDI2_SPMU_DCORE3_TPC5_EML,
+ GAUDI2_SPMU_DCORE0_HMMU0_CS,
+ GAUDI2_SPMU_DCORE0_HMMU1_CS,
+ GAUDI2_SPMU_DCORE0_HMMU2_CS,
+ GAUDI2_SPMU_DCORE0_HMMU3_CS,
+ GAUDI2_SPMU_DCORE0_MME_CTRL,
+ GAUDI2_SPMU_DCORE0_MME_SBTE0,
+ GAUDI2_SPMU_DCORE0_MME_SBTE1,
+ GAUDI2_SPMU_DCORE0_MME_SBTE2,
+ GAUDI2_SPMU_DCORE0_MME_SBTE3,
+ GAUDI2_SPMU_DCORE0_MME_SBTE4,
+ GAUDI2_SPMU_DCORE0_MME_ACC,
+ GAUDI2_SPMU_DCORE0_SM,
+ GAUDI2_SPMU_DCORE0_EDMA0_CS,
+ GAUDI2_SPMU_DCORE0_EDMA1_CS,
+ GAUDI2_SPMU_DCORE0_VDEC0_CS,
+ GAUDI2_SPMU_DCORE0_VDEC1_CS,
+ GAUDI2_SPMU_DCORE1_HMMU0_CS,
+ GAUDI2_SPMU_DCORE1_HMMU1_CS,
+ GAUDI2_SPMU_DCORE1_HMMU2_CS,
+ GAUDI2_SPMU_DCORE1_HMMU3_CS,
+ GAUDI2_SPMU_DCORE1_MME_CTRL,
+ GAUDI2_SPMU_DCORE1_MME_SBTE0,
+ GAUDI2_SPMU_DCORE1_MME_SBTE1,
+ GAUDI2_SPMU_DCORE1_MME_SBTE2,
+ GAUDI2_SPMU_DCORE1_MME_SBTE3,
+ GAUDI2_SPMU_DCORE1_MME_SBTE4,
+ GAUDI2_SPMU_DCORE1_MME_ACC,
+ GAUDI2_SPMU_DCORE1_SM,
+ GAUDI2_SPMU_DCORE1_EDMA0_CS,
+ GAUDI2_SPMU_DCORE1_EDMA1_CS,
+ GAUDI2_SPMU_DCORE1_VDEC0_CS,
+ GAUDI2_SPMU_DCORE1_VDEC1_CS,
+ GAUDI2_SPMU_DCORE2_HMMU0_CS,
+ GAUDI2_SPMU_DCORE2_HMMU1_CS,
+ GAUDI2_SPMU_DCORE2_HMMU2_CS,
+ GAUDI2_SPMU_DCORE2_HMMU3_CS,
+ GAUDI2_SPMU_DCORE2_MME_CTRL,
+ GAUDI2_SPMU_DCORE2_MME_SBTE0,
+ GAUDI2_SPMU_DCORE2_MME_SBTE1,
+ GAUDI2_SPMU_DCORE2_MME_SBTE2,
+ GAUDI2_SPMU_DCORE2_MME_SBTE3,
+ GAUDI2_SPMU_DCORE2_MME_SBTE4,
+ GAUDI2_SPMU_DCORE2_MME_ACC,
+ GAUDI2_SPMU_DCORE2_SM,
+ GAUDI2_SPMU_DCORE2_EDMA0_CS,
+ GAUDI2_SPMU_DCORE2_EDMA1_CS,
+ GAUDI2_SPMU_DCORE2_VDEC0_CS,
+ GAUDI2_SPMU_DCORE2_VDEC1_CS,
+ GAUDI2_SPMU_DCORE3_HMMU0_CS,
+ GAUDI2_SPMU_DCORE3_HMMU1_CS,
+ GAUDI2_SPMU_DCORE3_HMMU2_CS,
+ GAUDI2_SPMU_DCORE3_HMMU3_CS,
+ GAUDI2_SPMU_DCORE3_MME_CTRL,
+ GAUDI2_SPMU_DCORE3_MME_SBTE0,
+ GAUDI2_SPMU_DCORE3_MME_SBTE1,
+ GAUDI2_SPMU_DCORE3_MME_SBTE2,
+ GAUDI2_SPMU_DCORE3_MME_SBTE3,
+ GAUDI2_SPMU_DCORE3_MME_SBTE4,
+ GAUDI2_SPMU_DCORE3_MME_ACC,
+ GAUDI2_SPMU_DCORE3_SM,
+ GAUDI2_SPMU_DCORE3_EDMA0_CS,
+ GAUDI2_SPMU_DCORE3_EDMA1_CS,
+ GAUDI2_SPMU_DCORE3_VDEC0_CS,
+ GAUDI2_SPMU_DCORE3_VDEC1_CS,
+ GAUDI2_SPMU_PCIE,
+ GAUDI2_SPMU_PSOC_ARC0_CS,
+ GAUDI2_SPMU_PSOC_ARC1_CS,
+ GAUDI2_SPMU_PDMA0_CS,
+ GAUDI2_SPMU_PDMA1_CS,
+ GAUDI2_SPMU_PMMU_CS,
+ GAUDI2_SPMU_ROT0_CS,
+ GAUDI2_SPMU_ROT1_CS,
+ GAUDI2_SPMU_ARC_FARM_CS,
+ GAUDI2_SPMU_KDMA_CS,
+ GAUDI2_SPMU_PCIE_VDEC0_CS,
+ GAUDI2_SPMU_PCIE_VDEC1_CS,
+ GAUDI2_SPMU_HBM0_MC0_CS,
+ GAUDI2_SPMU_HBM0_MC1_CS,
+ GAUDI2_SPMU_HBM1_MC0_CS,
+ GAUDI2_SPMU_HBM1_MC1_CS,
+ GAUDI2_SPMU_HBM2_MC0_CS,
+ GAUDI2_SPMU_HBM2_MC1_CS,
+ GAUDI2_SPMU_HBM3_MC0_CS,
+ GAUDI2_SPMU_HBM3_MC1_CS,
+ GAUDI2_SPMU_HBM4_MC0_CS,
+ GAUDI2_SPMU_HBM4_MC1_CS,
+ GAUDI2_SPMU_HBM5_MC0_CS,
+ GAUDI2_SPMU_HBM5_MC1_CS,
+ GAUDI2_SPMU_NIC0_DBG_0,
+ GAUDI2_SPMU_NIC0_DBG_1,
+ GAUDI2_SPMU_NIC1_DBG_0,
+ GAUDI2_SPMU_NIC1_DBG_1,
+ GAUDI2_SPMU_NIC2_DBG_0,
+ GAUDI2_SPMU_NIC2_DBG_1,
+ GAUDI2_SPMU_NIC3_DBG_0,
+ GAUDI2_SPMU_NIC3_DBG_1,
+ GAUDI2_SPMU_NIC4_DBG_0,
+ GAUDI2_SPMU_NIC4_DBG_1,
+ GAUDI2_SPMU_NIC5_DBG_0,
+ GAUDI2_SPMU_NIC5_DBG_1,
+ GAUDI2_SPMU_NIC6_DBG_0,
+ GAUDI2_SPMU_NIC6_DBG_1,
+ GAUDI2_SPMU_NIC7_DBG_0,
+ GAUDI2_SPMU_NIC7_DBG_1,
+ GAUDI2_SPMU_NIC8_DBG_0,
+ GAUDI2_SPMU_NIC8_DBG_1,
+ GAUDI2_SPMU_NIC9_DBG_0,
+ GAUDI2_SPMU_NIC9_DBG_1,
+ GAUDI2_SPMU_NIC10_DBG_0,
+ GAUDI2_SPMU_NIC10_DBG_1,
+ GAUDI2_SPMU_NIC11_DBG_0,
+ GAUDI2_SPMU_NIC11_DBG_1,
+ GAUDI2_SPMU_LAST = GAUDI2_SPMU_NIC11_DBG_1
+};
+
+#endif /* GAUDI2_CORESIGHT_H */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h
new file mode 100644
index 000000000000..e4a7d5725096
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019-2021 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI2_FW_IF_H
+#define GAUDI2_FW_IF_H
+
+#define GAUDI2_EVENT_QUEUE_MSIX_IDX 0
+
+#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
+#define LINUX_FW_OFFSET 0x800000 /* 8BM in DDR */
+
+#define GAUDI2_PLL_FREQ_LOW 200000000 /* 200 MHz */
+
+#define GAUDI2_SP_SRAM_BASE_ADDR 0x27FE0000
+#define GAUDI2_MAILBOX_BASE_ADDR 0x27FE1800
+
+#define GAUDI2_NUM_MME 4
+
+#define GAUDI2_ARCPID_TX_MB_SIZE 0x1000
+#define GAUDI2_ARCPID_RX_MB_SIZE 0x400
+#define GAUDI2_ARM_TX_MB_SIZE 0x400
+#define GAUDI2_ARM_RX_MB_SIZE 0x1800
+
+#define GAUDI2_DCCM_BASE_ADDR 0x27020000
+#define GAUDI2_ARCPID_TX_MB_ADDR GAUDI2_DCCM_BASE_ADDR
+
+#define GAUDI2_ARCPID_RX_MB_ADDR (GAUDI2_ARCPID_TX_MB_ADDR + \
+ GAUDI2_ARCPID_TX_MB_SIZE)
+
+#define GAUDI2_ARM_TX_MB_ADDR GAUDI2_MAILBOX_BASE_ADDR
+
+#define GAUDI2_ARM_RX_MB_ADDR (GAUDI2_ARM_TX_MB_ADDR + \
+ GAUDI2_ARM_TX_MB_SIZE)
+
+#define GAUDI2_ARM_TX_MB_OFFSET (GAUDI2_ARM_TX_MB_ADDR - \
+ GAUDI2_SP_SRAM_BASE_ADDR)
+
+#define GAUDI2_ARM_RX_MB_OFFSET (GAUDI2_ARM_RX_MB_ADDR - \
+ GAUDI2_SP_SRAM_BASE_ADDR)
+
+enum gaudi2_fw_status {
+ GAUDI2_PID_STATUS_UP = 0x1, /* PID on ARC0 is up */
+ GAUDI2_ARM_STATUS_UP = 0x2, /* ARM Linux Boot complete */
+ GAUDI2_MGMT_STATUS_UP = 0x3, /* ARC1 Mgmt is up */
+ GAUDI2_STATUS_LAST = 0xFF
+};
+
+struct gaudi2_cold_rst_data {
+ union {
+ struct {
+ u32 recovery_flag: 1;
+ u32 validation_flag: 1;
+ u32 efuse_read_flag: 1;
+ u32 spsram_init_done : 1;
+ u32 fake_security_enable : 1;
+ u32 fake_sig_validation_en : 1;
+ u32 reserved : 26;
+ };
+ __le32 data;
+ };
+};
+
+enum gaudi2_rst_src {
+ HL_COLD_RST = 1,
+ HL_MANUAL_RST = 2,
+ HL_PRSTN_RST = 4,
+ HL_SOFT_RST = 8,
+ HL_WD_RST = 16,
+ HL_FW_ALL_RST = 32,
+ HL_SW_ALL_RST = 64,
+ HL_FLR_RST = 128,
+ HL_ECC_DERR_RST = 256
+};
+
+struct gaudi2_redundancy_ctx {
+ int redundant_hbm;
+ int redundant_edma;
+ int redundant_tpc;
+ int redundant_vdec;
+ __le64 hbm_mask;
+ __le64 edma_mask;
+ __le64 tpc_mask;
+ __le64 vdec_mask;
+ __le64 mme_mask;
+ __le64 nic_mask;
+ __le64 rtr_mask;
+ __le64 hmmu_hif_iso;
+ __le64 xbar_edge_iso;
+ __le64 hmmu_hif_mask;
+ __le64 xbar_edge_mask;
+ __u8 mme_pe_iso[GAUDI2_NUM_MME];
+ __le32 full_hbm_mode; /* true on full (non binning hbm)*/
+} __packed;
+
+#endif /* GAUDI2_FW_IF_H */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h
new file mode 100644
index 000000000000..8bf90fc18bf5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI2_PACKETS_H
+#define GAUDI2_PACKETS_H
+
+#include <linux/types.h>
+
+#define PACKET_HEADER_PACKET_ID_SHIFT 56
+#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull
+
+enum packet_id {
+ PACKET_WREG_32 = 0x1,
+ PACKET_WREG_BULK = 0x2,
+ PACKET_MSG_LONG = 0x3,
+ PACKET_MSG_SHORT = 0x4,
+ PACKET_CP_DMA = 0x5,
+ PACKET_REPEAT = 0x6,
+ PACKET_MSG_PROT = 0x7,
+ PACKET_FENCE = 0x8,
+ PACKET_LIN_DMA = 0x9,
+ PACKET_NOP = 0xA,
+ PACKET_STOP = 0xB,
+ PACKET_ARB_POINT = 0xC,
+ PACKET_WAIT = 0xD,
+ PACKET_CB_LIST = 0xE,
+ PACKET_LOAD_AND_EXE = 0xF,
+ PACKET_WRITE_ARC_STREAM = 0x10,
+ PACKET_LAST_READ_FROM_ARC = 0x11,
+ PACKET_WREG_64_SHORT = 0x12,
+ PACKET_WREG_64_LONG = 0x13,
+ MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >>
+ PACKET_HEADER_PACKET_ID_SHIFT) + 1
+};
+
+#define GAUDI2_PKT_CTL_OPCODE_SHIFT 24
+#define GAUDI2_PKT_CTL_OPCODE_MASK 0x1F000000
+
+#define GAUDI2_PKT_CTL_EB_SHIFT 29
+#define GAUDI2_PKT_CTL_EB_MASK 0x20000000
+
+#define GAUDI2_PKT_CTL_RB_SHIFT 30
+#define GAUDI2_PKT_CTL_RB_MASK 0x40000000
+
+#define GAUDI2_PKT_CTL_MB_SHIFT 31
+#define GAUDI2_PKT_CTL_MB_MASK 0x80000000
+
+/* All packets have, at least, an 8-byte header, which contains
+ * the packet type. The kernel driver uses the packet header for packet
+ * validation and to perform any necessary required preparation before
+ * sending them off to the hardware.
+ */
+struct gaudi2_packet {
+ __le64 header;
+ /* The rest of the packet data follows. Use the corresponding
+ * packet_XXX struct to deference the data, based on packet type
+ */
+ u8 contents[0];
+};
+
+struct packet_nop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct packet_stop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct packet_wreg32 {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_wreg_bulk {
+ __le32 size64;
+ __le32 ctl;
+ __le64 values[0]; /* data starts here */
+};
+
+struct packet_msg_long {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+#define GAUDI2_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT 0
+#define GAUDI2_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x00007FFF
+
+#define GAUDI2_PKT_SHORT_VAL_SOB_MOD_SHIFT 31
+#define GAUDI2_PKT_SHORT_VAL_SOB_MOD_MASK 0x80000000
+
+#define GAUDI2_PKT_SHORT_VAL_MON_SYNC_GID_SHIFT 0
+#define GAUDI2_PKT_SHORT_VAL_MON_SYNC_GID_MASK 0x000000FF
+
+#define GAUDI2_PKT_SHORT_VAL_MON_MASK_SHIFT 8
+#define GAUDI2_PKT_SHORT_VAL_MON_MASK_MASK 0x0000FF00
+
+#define GAUDI2_PKT_SHORT_VAL_MON_MODE_SHIFT 16
+#define GAUDI2_PKT_SHORT_VAL_MON_MODE_MASK 0x00010000
+
+#define GAUDI2_PKT_SHORT_VAL_MON_SYNC_VAL_SHIFT 17
+#define GAUDI2_PKT_SHORT_VAL_MON_SYNC_VAL_MASK 0xFFFE0000
+
+#define GAUDI2_PKT_SHORT_CTL_ADDR_SHIFT 0
+#define GAUDI2_PKT_SHORT_CTL_ADDR_MASK 0x0000FFFF
+
+#define GAUDI2_PKT_SHORT_CTL_BASE_SHIFT 22
+#define GAUDI2_PKT_SHORT_CTL_BASE_MASK 0x00C00000
+
+struct packet_msg_short {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_msg_prot {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+#define GAUDI2_PKT_FENCE_CFG_DEC_VAL_SHIFT 0
+#define GAUDI2_PKT_FENCE_CFG_DEC_VAL_MASK 0x0000000F
+
+#define GAUDI2_PKT_FENCE_CFG_TARGET_VAL_SHIFT 16
+#define GAUDI2_PKT_FENCE_CFG_TARGET_VAL_MASK 0x00FF0000
+
+#define GAUDI2_PKT_FENCE_CFG_ID_SHIFT 30
+#define GAUDI2_PKT_FENCE_CFG_ID_MASK 0xC0000000
+
+#define GAUDI2_PKT_FENCE_CTL_PRED_SHIFT 0
+#define GAUDI2_PKT_FENCE_CTL_PRED_MASK 0x0000001F
+
+struct packet_fence {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+#define GAUDI2_PKT_LIN_DMA_CTL_WRCOMP_SHIFT 0
+#define GAUDI2_PKT_LIN_DMA_CTL_WRCOMP_MASK 0x00000001
+
+#define GAUDI2_PKT_LIN_DMA_CTL_ENDIAN_SHIFT 1
+#define GAUDI2_PKT_LIN_DMA_CTL_ENDIAN_MASK 0x00000006
+
+#define GAUDI2_PKT_LIN_DMA_CTL_MEMSET_SHIFT 4
+#define GAUDI2_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000010
+
+#define GAUDI2_PKT_LIN_DMA_CTL_CONTEXT_ID_SHIFT 8
+#define GAUDI2_PKT_LIN_DMA_CTL_CONTEXT_ID_MASK 0x00FFFF00
+
+struct packet_lin_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+ __le64 dst_addr;
+};
+
+struct packet_arb_point {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+struct packet_repeat {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+struct packet_wait {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+struct packet_cb_list {
+ __le32 reserved;
+ __le32 ctl;
+ __le64 index_addr;
+ __le64 table_addr;
+};
+
+struct packet_load_and_exe {
+ __le32 cfg;
+ __le32 ctl;
+ __le64 src_addr;
+};
+
+struct packet_cp_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+};
+
+#endif /* GAUDI2_PACKETS_H */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h
new file mode 100644
index 000000000000..ae7feb388f63
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI2_REG_MAP_H_
+#define GAUDI2_REG_MAP_H_
+
+/*
+ * PSOC scratch-pad registers
+ */
+#define mmHW_STATE mmCPU_IF_KMD_HW_DIRTY_STATUS
+#define mmPID_STATUS_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
+#define mmARM_STATUS_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
+#define mmGIC_TPC_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
+#define mmGIC_MME_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
+#define mmGIC_DMA_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
+#define mmGIC_ROT_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
+#define mmGIC_NIC_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
+#define mmGIC_DMA_CR_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
+#define mmGIC_HOST_PI_UPD_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
+#define mmGIC_HOST_HALT_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
+#define mmGIC_HOST_INTS_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_11
+#define mmGIC_HOST_SOFT_RST_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_12
+#define mmEEPROM_COPY_LOCATION_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_13
+#define mmCPU_RST_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_14
+#define mmENGINE_ARC_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_15
+#define mmPID_CFG_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_18
+/*
+ * TODO: mmGIC_RAZWI_STATUS_REG is temporary
+ * macro and to be removed after GAUDI2 PO
+ */
+#define mmGIC_RAZWI_STATUS_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_19
+#define mmCPU_BOOT_DEV_STS0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_20
+#define mmCPU_BOOT_DEV_STS1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_21
+#define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23
+#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
+#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
+#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
+#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
+#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
+#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
+#define mmRST_SRC mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_0
+#define mmPREBOOT_PCIE_EN mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1
+#define mmCOLD_RST_DATA mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2
+#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3
+#define mmPID_CMD_REQ_REG mmPSOC_PID_PID_CMD_0
+#define mmPID_CMD_REQ_REG_HI mmPSOC_PID_PID_CMD_1
+#define mmPID_CMD_RSP_REG mmPSOC_PID_PID_CMD_2
+#define mmPID_CMD_RSP_REG_HI mmPSOC_PID_PID_CMD_3
+#define mmPID_CMD_TELEMETRY_REG_0 mmPSOC_PID_PID_CMD_4
+#define mmPID_CMD_TELEMETRY_REG_0_HI mmPSOC_PID_PID_CMD_5
+#define mmPID_CMD_TELEMETRY_REG_1 mmPSOC_PID_PID_CMD_6
+#define mmPID_CMD_TELEMETRY_REG_1_HI mmPSOC_PID_PID_CMD_7
+
+#endif /* GAUDI2_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
index 50ce5175b63a..896799204fb0 100644
--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -28,18 +28,6 @@ enum packet_id {
PACKET_HEADER_PACKET_ID_SHIFT) + 1
};
-enum goya_dma_direction {
- DMA_HOST_TO_DRAM,
- DMA_HOST_TO_SRAM,
- DMA_DRAM_TO_SRAM,
- DMA_SRAM_TO_DRAM,
- DMA_SRAM_TO_HOST,
- DMA_DRAM_TO_HOST,
- DMA_DRAM_TO_DRAM,
- DMA_SRAM_TO_SRAM,
- DMA_ENUM_MAX
-};
-
#define GOYA_PKT_CTL_OPCODE_SHIFT 24
#define GOYA_PKT_CTL_OPCODE_MASK 0x1F000000
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index cae8ac8bc5b1..d408feecd483 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -9,9 +9,17 @@
#define INCLUDE_MMU_GENERAL_H_
#define PAGE_SHIFT_4KB 12
+#define PAGE_SHIFT_64KB 16
#define PAGE_SHIFT_2MB 21
-#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB)
-#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
+#define PAGE_SHIFT_16MB 24
+#define PAGE_SHIFT_64MB 26
+#define PAGE_SHIFT_1GB 30
+#define PAGE_SIZE_4KB _BITUL(PAGE_SHIFT_4KB)
+#define PAGE_SIZE_64KB _BITUL(PAGE_SHIFT_64KB)
+#define PAGE_SIZE_2MB _BITUL(PAGE_SHIFT_2MB)
+#define PAGE_SIZE_16MB _BITUL(PAGE_SHIFT_16MB)
+#define PAGE_SIZE_64MB _BITUL(PAGE_SHIFT_64MB)
+#define PAGE_SIZE_1GB _BITUL(PAGE_SHIFT_1GB)
#define PAGE_PRESENT_MASK 0x0000000000001ull
#define SWAP_OUT_MASK 0x0000000000004ull
@@ -19,6 +27,7 @@
#define FLAGS_MASK 0x0000000000FFFull
#define MMU_ARCH_5_HOPS 5
+#define MMU_ARCH_6_HOPS 6
#define HOP_PHYS_ADDR_MASK (~FLAGS_MASK)
@@ -31,6 +40,7 @@
#define MMU_HOP0_PA43_12_SHIFT 12
#define MMU_HOP0_PA49_44_SHIFT (12 + 32)
+#define MMU_HOP0_PA63_44_SHIFT (12 + 32)
#define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h
new file mode 100644
index 000000000000..cd7bf25d2da9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef INCLUDE_MMU_V2_0_H_
+#define INCLUDE_MMU_V2_0_H_
+
+#define HOP0_MASK_4K 0xFE00000000000000ull
+#define HOP1_MASK_4K 0x01FF000000000000ull
+#define HOP2_MASK_4K 0x0000FF8000000000ull
+#define HOP3_MASK_4K 0x0000007FC0000000ull
+#define HOP4_MASK_4K 0x000000003FE00000ull
+#define HOP5_MASK_4K 0x00000000001FF000ull
+
+#define HOP0_MASK_64K 0xFF00000000000000ull
+#define HOP1_MASK_64K 0x00FF000000000000ull
+#define HOP2_MASK_64K 0x0000FF0000000000ull
+#define HOP3_MASK_64K 0x000000FF00000000ull
+#define HOP4_MASK_64K 0x00000000FF000000ull
+#define HOP5_MASK_64K 0x0000000000FF0000ull
+
+#define HOP0_SHIFT_4K 57
+#define HOP1_SHIFT_4K 48
+#define HOP2_SHIFT_4K 39
+#define HOP3_SHIFT_4K 30
+#define HOP4_SHIFT_4K 21
+#define HOP5_SHIFT_4K 12
+
+#define HOP0_SHIFT_64K 56
+#define HOP1_SHIFT_64K 48
+#define HOP2_SHIFT_64K 40
+#define HOP3_SHIFT_64K 32
+#define HOP4_SHIFT_64K 24
+#define HOP5_SHIFT_64K 16
+
+#define DHOP0_MASK HOP0_MASK_4K
+#define DHOP1_MASK HOP1_MASK_4K
+#define DHOP2_MASK HOP2_MASK_4K
+#define DHOP3_MASK HOP3_MASK_4K
+#define DHOP4_MASK 0x000003C000000ull
+
+#define DHOP0_SHIFT HOP0_SHIFT_4K
+#define DHOP1_SHIFT HOP1_SHIFT_4K
+#define DHOP2_SHIFT HOP2_SHIFT_4K
+#define DHOP3_SHIFT HOP3_SHIFT_4K
+#define DHOP4_SHIFT 26
+
+#endif /* INCLUDE_MMU_V2_0_H_ */
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 2e0aa74ac185..95ef971b5e1c 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o
lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
-KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n
-KCOV_INSTRUMENT_rodata.o := n
-CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO)
+
+KASAN_SANITIZE_rodata.o := n
+KCSAN_SANITIZE_rodata.o := n
+KCOV_INSTRUMENT_rodata.o := n
+OBJECT_FILES_NON_STANDARD_rodata.o := y
+CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 009239ad1d8a..48821f4c2b21 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -29,7 +29,7 @@ struct lkdtm_list {
#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
#else
-#define REC_STACK_SIZE (THREAD_SIZE / 8)
+#define REC_STACK_SIZE (THREAD_SIZE / 8UL)
#endif
#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index 666a7f4bc137..71483cb1e422 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -97,7 +97,7 @@ static volatile int force_check;
static void lkdtm_CFI_BACKWARD(void)
{
/* Use calculated gotos to keep labels addressable. */
- void *labels[] = {0, &&normal, &&redirected, &&check_normal, &&check_redirected};
+ void *labels[] = { NULL, &&normal, &&redirected, &&check_normal, &&check_redirected };
pr_info("Attempting unchecked stack return address redirection ...\n");
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index befa491e3344..3a95fe7d4e33 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -561,7 +561,7 @@ static int mei_me_hbuf_write(struct mei_device *dev,
dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
empty_slots = mei_hbuf_empty_slots(dev);
- dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
+ dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots);
if (empty_slots < 0)
return -EOVERFLOW;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 19dbdad8ad8a..fa1f5a632e7f 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -36,7 +36,7 @@
* kernel/user requirements.
*
* Blade percpu resources reserved for kernel use. These resources are
- * reserved whenever the the kernel context for the blade is loaded. Note
+ * reserved whenever the kernel context for the blade is loaded. Note
* that the kernel context is not guaranteed to be always available. It is
* loaded on demand & can be stolen by a user if the user demand exceeds the
* kernel demand. The kernel can always reload the kernel context but
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index ba9ae0e2df0f..fff522d347e3 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -1598,7 +1598,7 @@ out_2:
* by xpc_notify_senders_of_disconnect_uv(), and to also get an
* error returned here will confuse them. Additionally, since
* in this case the channel is being disconnected we don't need
- * to put the the msg_slot back on the free list.
+ * to put the msg_slot back on the free list.
*/
if (cmpxchg(&msg_slot->func, func, NULL) != func) {
ret = xpSuccess;
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 50644f83e78c..2396ba3b03bd 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -285,7 +285,7 @@ xpnet_connection_activity(enum xp_retval reason, short partid, int channel,
__clear_bit(partid, xpnet_broadcast_partitions);
spin_unlock_bh(&xpnet_broadcast_lock);
- if (bitmap_empty((unsigned long *)xpnet_broadcast_partitions,
+ if (bitmap_empty(xpnet_broadcast_partitions,
xp_max_npartitions)) {
netif_carrier_off(xpnet_device);
}
@@ -522,9 +522,8 @@ xpnet_init(void)
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
- xpnet_broadcast_partitions = kcalloc(BITS_TO_LONGS(xp_max_npartitions),
- sizeof(long),
- GFP_KERNEL);
+ xpnet_broadcast_partitions = bitmap_zalloc(xp_max_npartitions,
+ GFP_KERNEL);
if (xpnet_broadcast_partitions == NULL)
return -ENOMEM;
@@ -535,7 +534,7 @@ xpnet_init(void)
xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, NET_NAME_UNKNOWN,
ether_setup);
if (xpnet_device == NULL) {
- kfree(xpnet_broadcast_partitions);
+ bitmap_free(xpnet_broadcast_partitions);
return -ENOMEM;
}
@@ -574,7 +573,7 @@ xpnet_init(void)
result = register_netdev(xpnet_device);
if (result != 0) {
free_netdev(xpnet_device);
- kfree(xpnet_broadcast_partitions);
+ bitmap_free(xpnet_broadcast_partitions);
}
return result;
@@ -590,7 +589,7 @@ xpnet_exit(void)
unregister_netdev(xpnet_device);
free_netdev(xpnet_device);
- kfree(xpnet_broadcast_partitions);
+ bitmap_free(xpnet_broadcast_partitions);
}
module_exit(xpnet_exit);
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index 6cc31789b38d..a948e95d4375 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SRAM protect-exec region helper functions
*
* Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 281c54003edc..b70a013139c7 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -9,43 +9,38 @@
static struct class *uacce_class;
static dev_t uacce_devt;
-static DEFINE_MUTEX(uacce_mutex);
static DEFINE_XARRAY_ALLOC(uacce_xa);
-static int uacce_start_queue(struct uacce_queue *q)
+/*
+ * If the parent driver or the device disappears, the queue state is invalid and
+ * ops are not usable anymore.
+ */
+static bool uacce_queue_is_valid(struct uacce_queue *q)
{
- int ret = 0;
+ return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
+}
- mutex_lock(&uacce_mutex);
+static int uacce_start_queue(struct uacce_queue *q)
+{
+ int ret;
- if (q->state != UACCE_Q_INIT) {
- ret = -EINVAL;
- goto out_with_lock;
- }
+ if (q->state != UACCE_Q_INIT)
+ return -EINVAL;
if (q->uacce->ops->start_queue) {
ret = q->uacce->ops->start_queue(q);
if (ret < 0)
- goto out_with_lock;
+ return ret;
}
q->state = UACCE_Q_STARTED;
-
-out_with_lock:
- mutex_unlock(&uacce_mutex);
-
- return ret;
+ return 0;
}
static int uacce_put_queue(struct uacce_queue *q)
{
struct uacce_device *uacce = q->uacce;
- mutex_lock(&uacce_mutex);
-
- if (q->state == UACCE_Q_ZOMBIE)
- goto out;
-
if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
uacce->ops->stop_queue(q);
@@ -54,8 +49,6 @@ static int uacce_put_queue(struct uacce_queue *q)
uacce->ops->put_queue(q);
q->state = UACCE_Q_ZOMBIE;
-out:
- mutex_unlock(&uacce_mutex);
return 0;
}
@@ -65,20 +58,36 @@ static long uacce_fops_unl_ioctl(struct file *filep,
{
struct uacce_queue *q = filep->private_data;
struct uacce_device *uacce = q->uacce;
+ long ret = -ENXIO;
+
+ /*
+ * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
+ * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
+ * gets called with mmap_lock held, by taking uacce->mutex instead of
+ * q->mutex. Doing this in uacce_fops_mmap() is not possible because
+ * uacce_fops_open() calls iommu_sva_bind_device(), which takes
+ * mmap_lock, while holding uacce->mutex.
+ */
+ mutex_lock(&uacce->mutex);
+ if (!uacce_queue_is_valid(q))
+ goto out_unlock;
switch (cmd) {
case UACCE_CMD_START_Q:
- return uacce_start_queue(q);
-
+ ret = uacce_start_queue(q);
+ break;
case UACCE_CMD_PUT_Q:
- return uacce_put_queue(q);
-
+ ret = uacce_put_queue(q);
+ break;
default:
- if (!uacce->ops->ioctl)
- return -EINVAL;
-
- return uacce->ops->ioctl(q, cmd, arg);
+ if (uacce->ops->ioctl)
+ ret = uacce->ops->ioctl(q, cmd, arg);
+ else
+ ret = -EINVAL;
}
+out_unlock:
+ mutex_unlock(&uacce->mutex);
+ return ret;
}
#ifdef CONFIG_COMPAT
@@ -136,6 +145,13 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
if (!q)
return -ENOMEM;
+ mutex_lock(&uacce->mutex);
+
+ if (!uacce->parent) {
+ ret = -EINVAL;
+ goto out_with_mem;
+ }
+
ret = uacce_bind_queue(uacce, q);
if (ret)
goto out_with_mem;
@@ -152,10 +168,9 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
filep->private_data = q;
uacce->inode = inode;
q->state = UACCE_Q_INIT;
-
- mutex_lock(&uacce->queues_lock);
+ mutex_init(&q->mutex);
list_add(&q->list, &uacce->queues);
- mutex_unlock(&uacce->queues_lock);
+ mutex_unlock(&uacce->mutex);
return 0;
@@ -163,18 +178,20 @@ out_with_bond:
uacce_unbind_queue(q);
out_with_mem:
kfree(q);
+ mutex_unlock(&uacce->mutex);
return ret;
}
static int uacce_fops_release(struct inode *inode, struct file *filep)
{
struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
- mutex_lock(&q->uacce->queues_lock);
- list_del(&q->list);
- mutex_unlock(&q->uacce->queues_lock);
+ mutex_lock(&uacce->mutex);
uacce_put_queue(q);
uacce_unbind_queue(q);
+ list_del(&q->list);
+ mutex_unlock(&uacce->mutex);
kfree(q);
return 0;
@@ -217,10 +234,9 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
vma->vm_private_data = q;
qfr->type = type;
- mutex_lock(&uacce_mutex);
-
- if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
- ret = -EINVAL;
+ mutex_lock(&q->mutex);
+ if (!uacce_queue_is_valid(q)) {
+ ret = -ENXIO;
goto out_with_lock;
}
@@ -248,12 +264,12 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
}
q->qfrs[type] = qfr;
- mutex_unlock(&uacce_mutex);
+ mutex_unlock(&q->mutex);
return ret;
out_with_lock:
- mutex_unlock(&uacce_mutex);
+ mutex_unlock(&q->mutex);
kfree(qfr);
return ret;
}
@@ -262,12 +278,20 @@ static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
{
struct uacce_queue *q = file->private_data;
struct uacce_device *uacce = q->uacce;
+ __poll_t ret = 0;
+
+ mutex_lock(&q->mutex);
+ if (!uacce_queue_is_valid(q))
+ goto out_unlock;
poll_wait(file, &q->wait, wait);
+
if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
- return EPOLLIN | EPOLLRDNORM;
+ ret = EPOLLIN | EPOLLRDNORM;
- return 0;
+out_unlock:
+ mutex_unlock(&q->mutex);
+ return ret;
}
static const struct file_operations uacce_fops = {
@@ -450,7 +474,7 @@ struct uacce_device *uacce_alloc(struct device *parent,
goto err_with_uacce;
INIT_LIST_HEAD(&uacce->queues);
- mutex_init(&uacce->queues_lock);
+ mutex_init(&uacce->mutex);
device_initialize(&uacce->dev);
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
uacce->dev.class = uacce_class;
@@ -507,13 +531,23 @@ void uacce_remove(struct uacce_device *uacce)
if (uacce->inode)
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
+ /*
+ * uacce_fops_open() may be running concurrently, even after we remove
+ * the cdev. Holding uacce->mutex ensures that open() does not obtain a
+ * removed uacce device.
+ */
+ mutex_lock(&uacce->mutex);
/* ensure no open queue remains */
- mutex_lock(&uacce->queues_lock);
list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
+ /*
+ * Taking q->mutex ensures that fops do not use the defunct
+ * uacce->ops after the queue is disabled.
+ */
+ mutex_lock(&q->mutex);
uacce_put_queue(q);
+ mutex_unlock(&q->mutex);
uacce_unbind_queue(q);
}
- mutex_unlock(&uacce->queues_lock);
/* disable sva now since no opened queues */
uacce_disable_sva(uacce);
@@ -521,6 +555,13 @@ void uacce_remove(struct uacce_device *uacce)
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
xa_erase(&uacce_xa, uacce->dev_id);
+ /*
+ * uacce exists as long as there are open fds, but ops will be freed
+ * now. Ensure that bugs cause NULL deref rather than use-after-free.
+ */
+ uacce->ops = NULL;
+ uacce->parent = NULL;
+ mutex_unlock(&uacce->mutex);
put_device(&uacce->dev);
}
EXPORT_SYMBOL_GPL(uacce_remove);
diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c
new file mode 100644
index 000000000000..53b5506080e1
--- /dev/null
+++ b/drivers/misc/vcpu_stall_detector.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// VCPU stall detector.
+// Copyright (C) Google, 2022
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/param.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define VCPU_STALL_REG_STATUS (0x00)
+#define VCPU_STALL_REG_LOAD_CNT (0x04)
+#define VCPU_STALL_REG_CURRENT_CNT (0x08)
+#define VCPU_STALL_REG_CLOCK_FREQ_HZ (0x0C)
+#define VCPU_STALL_REG_LEN (0x10)
+
+#define VCPU_STALL_DEFAULT_CLOCK_HZ (10)
+#define VCPU_STALL_MAX_CLOCK_HZ (100)
+#define VCPU_STALL_DEFAULT_TIMEOUT_SEC (8)
+#define VCPU_STALL_MAX_TIMEOUT_SEC (600)
+
+struct vcpu_stall_detect_config {
+ u32 clock_freq_hz;
+ u32 stall_timeout_sec;
+
+ void __iomem *membase;
+ struct platform_device *dev;
+ enum cpuhp_state hp_online;
+};
+
+struct vcpu_stall_priv {
+ struct hrtimer vcpu_hrtimer;
+ bool is_initialized;
+};
+
+/* The vcpu stall configuration structure which applies to all the CPUs */
+static struct vcpu_stall_detect_config vcpu_stall_config;
+
+#define vcpu_stall_reg_write(vcpu, reg, value) \
+ writel_relaxed((value), \
+ (void __iomem *)(vcpu_stall_config.membase + \
+ (vcpu) * VCPU_STALL_REG_LEN + (reg)))
+
+
+static struct vcpu_stall_priv __percpu *vcpu_stall_detectors;
+
+static enum hrtimer_restart
+vcpu_stall_detect_timer_fn(struct hrtimer *hrtimer)
+{
+ u32 ticks, ping_timeout_ms;
+
+ /* Reload the stall detector counter register every
+ * `ping_timeout_ms` to prevent the virtual device
+ * from decrementing it to 0. The virtual device decrements this
+ * register at 'clock_freq_hz' frequency.
+ */
+ ticks = vcpu_stall_config.clock_freq_hz *
+ vcpu_stall_config.stall_timeout_sec;
+ vcpu_stall_reg_write(smp_processor_id(),
+ VCPU_STALL_REG_LOAD_CNT, ticks);
+
+ ping_timeout_ms = vcpu_stall_config.stall_timeout_sec *
+ MSEC_PER_SEC / 2;
+ hrtimer_forward_now(hrtimer,
+ ms_to_ktime(ping_timeout_ms));
+
+ return HRTIMER_RESTART;
+}
+
+static int start_stall_detector_cpu(unsigned int cpu)
+{
+ u32 ticks, ping_timeout_ms;
+ struct vcpu_stall_priv *vcpu_stall_detector =
+ this_cpu_ptr(vcpu_stall_detectors);
+ struct hrtimer *vcpu_hrtimer = &vcpu_stall_detector->vcpu_hrtimer;
+
+ vcpu_stall_reg_write(cpu, VCPU_STALL_REG_CLOCK_FREQ_HZ,
+ vcpu_stall_config.clock_freq_hz);
+
+ /* Compute the number of ticks required for the stall detector
+ * counter register based on the internal clock frequency and the
+ * timeout value given from the device tree.
+ */
+ ticks = vcpu_stall_config.clock_freq_hz *
+ vcpu_stall_config.stall_timeout_sec;
+ vcpu_stall_reg_write(cpu, VCPU_STALL_REG_LOAD_CNT, ticks);
+
+ /* Enable the internal clock and start the stall detector */
+ vcpu_stall_reg_write(cpu, VCPU_STALL_REG_STATUS, 1);
+
+ /* Pet the stall detector at half of its expiration timeout
+ * to prevent spurious resets.
+ */
+ ping_timeout_ms = vcpu_stall_config.stall_timeout_sec *
+ MSEC_PER_SEC / 2;
+
+ hrtimer_init(vcpu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ vcpu_hrtimer->function = vcpu_stall_detect_timer_fn;
+ vcpu_stall_detector->is_initialized = true;
+
+ hrtimer_start(vcpu_hrtimer, ms_to_ktime(ping_timeout_ms),
+ HRTIMER_MODE_REL_PINNED);
+
+ return 0;
+}
+
+static int stop_stall_detector_cpu(unsigned int cpu)
+{
+ struct vcpu_stall_priv *vcpu_stall_detector =
+ per_cpu_ptr(vcpu_stall_detectors, cpu);
+
+ if (!vcpu_stall_detector->is_initialized)
+ return 0;
+
+ /* Disable the stall detector for the current CPU */
+ hrtimer_cancel(&vcpu_stall_detector->vcpu_hrtimer);
+ vcpu_stall_reg_write(cpu, VCPU_STALL_REG_STATUS, 0);
+ vcpu_stall_detector->is_initialized = false;
+
+ return 0;
+}
+
+static int vcpu_stall_detect_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ void __iomem *membase;
+ u32 clock_freq_hz = VCPU_STALL_DEFAULT_CLOCK_HZ;
+ u32 stall_timeout_sec = VCPU_STALL_DEFAULT_TIMEOUT_SEC;
+ struct device_node *np = pdev->dev.of_node;
+
+ vcpu_stall_detectors = devm_alloc_percpu(&pdev->dev,
+ typeof(struct vcpu_stall_priv));
+ if (!vcpu_stall_detectors)
+ return -ENOMEM;
+
+ membase = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
+ if (IS_ERR(membase)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return PTR_ERR(membase);
+ }
+
+ if (!of_property_read_u32(np, "clock-frequency", &clock_freq_hz)) {
+ if (!(clock_freq_hz > 0 &&
+ clock_freq_hz < VCPU_STALL_MAX_CLOCK_HZ)) {
+ dev_warn(&pdev->dev, "clk out of range\n");
+ clock_freq_hz = VCPU_STALL_DEFAULT_CLOCK_HZ;
+ }
+ }
+
+ if (!of_property_read_u32(np, "timeout-sec", &stall_timeout_sec)) {
+ if (!(stall_timeout_sec > 0 &&
+ stall_timeout_sec < VCPU_STALL_MAX_TIMEOUT_SEC)) {
+ dev_warn(&pdev->dev, "stall timeout out of range\n");
+ stall_timeout_sec = VCPU_STALL_DEFAULT_TIMEOUT_SEC;
+ }
+ }
+
+ vcpu_stall_config = (struct vcpu_stall_detect_config) {
+ .membase = membase,
+ .clock_freq_hz = clock_freq_hz,
+ .stall_timeout_sec = stall_timeout_sec
+ };
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "virt/vcpu_stall_detector:online",
+ start_stall_detector_cpu,
+ stop_stall_detector_cpu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to install cpu hotplug");
+ goto err;
+ }
+
+ vcpu_stall_config.hp_online = ret;
+ return 0;
+err:
+ return ret;
+}
+
+static int vcpu_stall_detect_remove(struct platform_device *pdev)
+{
+ int cpu;
+
+ cpuhp_remove_state(vcpu_stall_config.hp_online);
+
+ for_each_possible_cpu(cpu)
+ stop_stall_detector_cpu(cpu);
+
+ return 0;
+}
+
+static const struct of_device_id vcpu_stall_detect_of_match[] = {
+ { .compatible = "qemu,vcpu-stall-detector", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, vcpu_stall_detect_of_match);
+
+static struct platform_driver vcpu_stall_detect_driver = {
+ .probe = vcpu_stall_detect_probe,
+ .remove = vcpu_stall_detect_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = vcpu_stall_detect_of_match,
+ },
+};
+
+module_platform_driver(vcpu_stall_detect_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sebastian Ene <sebastianene@google.com>");
+MODULE_DESCRIPTION("VCPU stall detector");
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 086ce77d9074..61a2be712bf7 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -29,8 +29,6 @@
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
#include <linux/balloon_compaction.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
@@ -1587,7 +1585,7 @@ static int vmballoon_register_shrinker(struct vmballoon *b)
b->shrinker.count_objects = vmballoon_shrinker_count;
b->shrinker.seeks = DEFAULT_SEEKS;
- r = register_shrinker(&b->shrinker);
+ r = register_shrinker(&b->shrinker, "vmw-balloon");
if (r == 0)
b->shrinker_registered = true;
@@ -1730,20 +1728,6 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
#ifdef CONFIG_BALLOON_COMPACTION
-
-static int vmballoon_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type vmballoon_fs = {
- .name = "balloon-vmware",
- .init_fs_context = vmballoon_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
-static struct vfsmount *vmballoon_mnt;
-
/**
* vmballoon_migratepage() - migrates a balloon page.
* @b_dev_info: balloon device information descriptor.
@@ -1863,21 +1847,6 @@ out_unlock:
}
/**
- * vmballoon_compaction_deinit() - removes compaction related data.
- *
- * @b: pointer to the balloon.
- */
-static void vmballoon_compaction_deinit(struct vmballoon *b)
-{
- if (!IS_ERR(b->b_dev_info.inode))
- iput(b->b_dev_info.inode);
-
- b->b_dev_info.inode = NULL;
- kern_unmount(vmballoon_mnt);
- vmballoon_mnt = NULL;
-}
-
-/**
* vmballoon_compaction_init() - initialized compaction for the balloon.
*
* @b: pointer to the balloon.
@@ -1888,33 +1857,15 @@ static void vmballoon_compaction_deinit(struct vmballoon *b)
*
* Return: zero on success or error code on failure.
*/
-static __init int vmballoon_compaction_init(struct vmballoon *b)
+static __init void vmballoon_compaction_init(struct vmballoon *b)
{
- vmballoon_mnt = kern_mount(&vmballoon_fs);
- if (IS_ERR(vmballoon_mnt))
- return PTR_ERR(vmballoon_mnt);
-
b->b_dev_info.migratepage = vmballoon_migratepage;
- b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
-
- if (IS_ERR(b->b_dev_info.inode))
- return PTR_ERR(b->b_dev_info.inode);
-
- b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
- return 0;
}
#else /* CONFIG_BALLOON_COMPACTION */
-
-static void vmballoon_compaction_deinit(struct vmballoon *b)
-{
-}
-
-static int vmballoon_compaction_init(struct vmballoon *b)
+static inline void vmballoon_compaction_init(struct vmballoon *b)
{
- return 0;
}
-
#endif /* CONFIG_BALLOON_COMPACTION */
static int __init vmballoon_init(void)
@@ -1939,9 +1890,7 @@ static int __init vmballoon_init(void)
* balloon_devinfo_init() .
*/
balloon_devinfo_init(&balloon.b_dev_info);
- error = vmballoon_compaction_init(&balloon);
- if (error)
- goto fail;
+ vmballoon_compaction_init(&balloon);
INIT_LIST_HEAD(&balloon.huge_pages);
spin_lock_init(&balloon.comm_lock);
@@ -1958,7 +1907,6 @@ static int __init vmballoon_init(void)
return 0;
fail:
vmballoon_unregister_shrinker(&balloon);
- vmballoon_compaction_deinit(&balloon);
return error;
}
@@ -1985,8 +1933,5 @@ static void __exit vmballoon_exit(void)
*/
vmballoon_send_start(&balloon, 0);
vmballoon_pop(&balloon);
-
- /* Only once we popped the balloon, compaction can be deinit */
- vmballoon_compaction_deinit(&balloon);
}
module_exit(vmballoon_exit);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index f4a1281658db..ce89611a136e 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -176,7 +176,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
@@ -1302,7 +1302,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr_p,
+ int recovery_mode, bool *do_rel_wr_p,
bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
@@ -1368,12 +1368,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks--;
/*
- * After a read error, we redo the request one sector
+ * After a read error, we redo the request one (native) sector
* at a time in order to accurately determine which
* sectors can be read successfully.
*/
- if (disable_multi)
- brq->data.blocks = 1;
+ if (recovery_mode)
+ brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
/*
* Some controllers have HW issues while operating
@@ -1590,7 +1590,7 @@ static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq)
{
u32 readcmd, writecmd;
@@ -1599,7 +1599,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->blkdata;
bool do_rel_wr, do_data_tag;
- mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
+ mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
brq->mrq.cmd = &brq->cmd;
@@ -1690,7 +1690,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
#define MMC_READ_SINGLE_RETRIES 2
-/* Single sector read during recovery */
+/* Single (native) sector read during recovery */
static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
@@ -1698,6 +1698,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
+ size_t bytes_per_read = queue_physical_block_size(mq->queue);
do {
u32 status;
@@ -1732,13 +1733,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
else
error = BLK_STS_OK;
- } while (blk_update_request(req, error, 512));
+ } while (blk_update_request(req, error, bytes_per_read));
return;
error_exit:
mrq->data->bytes_xfered = 0;
- blk_update_request(req, BLK_STS_IOERR, 512);
+ blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
/* Let it try the remaining request again */
if (mqrq->retries > MMC_MAX_RETRIES - 1)
mqrq->retries = MMC_MAX_RETRIES - 1;
@@ -1879,10 +1880,9 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
return;
}
- /* FIXME: Missing single sector read for large sector size */
- if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
- brq->data.blocks > 1) {
- /* Read one sector at a time */
+ if (rq_data_dir(req) == READ && brq->data.blocks >
+ queue_physical_block_size(mq->queue) >> 9) {
+ /* Read one (native) sector at a time */
mmc_blk_read_single(mq, req);
return;
}
@@ -2505,11 +2505,11 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
dev_set_drvdata(&card->dev, md);
ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
if (ret)
- goto err_cleanup_queue;
+ goto err_put_disk;
return md;
- err_cleanup_queue:
- blk_cleanup_queue(md->disk->queue);
+ err_put_disk:
+ put_disk(md->disk);
blk_mq_free_tag_set(&md->queue.tag_set);
err_kfree:
kfree(md);
@@ -2988,7 +2988,7 @@ static int mmc_blk_probe(struct mmc_card *card)
* Don't enable runtime PM for SD-combo cards here. Leave that
* decision to be taken during the SDIO init sequence instead.
*/
- if (card->type != MMC_TYPE_SD_COMBO) {
+ if (!mmc_card_sd_combo(card)) {
pm_runtime_set_active(&card->dev);
pm_runtime_enable(&card->dev);
}
@@ -3015,7 +3015,7 @@ static void mmc_blk_remove(struct mmc_card *card)
mmc_blk_part_switch(card, md->part_type);
mmc_release_host(card->host);
}
- if (card->type != MMC_TYPE_SD_COMBO)
+ if (!mmc_card_sd_combo(card))
pm_runtime_disable(&card->dev);
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 58a60afa650b..d8762fa3d5cd 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -85,7 +85,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
- if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ if (mmc_card_sdio(card) || mmc_card_sd_combo(card)) {
retval = add_uevent_var(env, "SDIO_ID=%04X:%04X",
card->cis.vendor, card->cis.device);
if (retval)
@@ -107,7 +107,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
* SDIO (non-combo) cards are not handled by mmc_block driver and do not
* have accessible CID register which used by mmc_card_name() function.
*/
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
return 0;
retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card));
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 4b70cbfc6d5d..ef53a2578824 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -943,9 +943,11 @@ int mmc_execute_tuning(struct mmc_card *card)
}
/* Only print error when we don't check for card removal */
- if (!host->detect_change)
+ if (!host->detect_change) {
pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err);
+ mmc_debugfs_err_stats_inc(host, MMC_ERR_TUNING);
+ }
return err;
}
@@ -2244,6 +2246,12 @@ void mmc_rescan(struct work_struct *work)
if (freqs[i] <= host->f_min)
break;
}
+
+ /*
+ * Ignore the command timeout errors observed during
+ * the card init as those are excepted.
+ */
+ host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
mmc_release_host(host);
out:
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 3fdbc801e64a..fe6808771bc7 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -223,6 +223,81 @@ static int mmc_clock_opt_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
"%llu\n");
+static int mmc_err_state_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+ int i;
+
+ if (!host)
+ return -EINVAL;
+
+ *val = 0;
+ for (i = 0; i < MMC_ERR_MAX; i++) {
+ if (host->err_stats[i]) {
+ *val = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_err_state, mmc_err_state_get, NULL, "%llu\n");
+
+static int mmc_err_stats_show(struct seq_file *file, void *data)
+{
+ struct mmc_host *host = (struct mmc_host *)file->private;
+ const char *desc[MMC_ERR_MAX] = {
+ [MMC_ERR_CMD_TIMEOUT] = "Command Timeout Occurred",
+ [MMC_ERR_CMD_CRC] = "Command CRC Errors Occurred",
+ [MMC_ERR_DAT_TIMEOUT] = "Data Timeout Occurred",
+ [MMC_ERR_DAT_CRC] = "Data CRC Errors Occurred",
+ [MMC_ERR_AUTO_CMD] = "Auto-Cmd Error Occurred",
+ [MMC_ERR_ADMA] = "ADMA Error Occurred",
+ [MMC_ERR_TUNING] = "Tuning Error Occurred",
+ [MMC_ERR_CMDQ_RED] = "CMDQ RED Errors",
+ [MMC_ERR_CMDQ_GCE] = "CMDQ GCE Errors",
+ [MMC_ERR_CMDQ_ICCE] = "CMDQ ICCE Errors",
+ [MMC_ERR_REQ_TIMEOUT] = "Request Timedout",
+ [MMC_ERR_CMDQ_REQ_TIMEOUT] = "CMDQ Request Timedout",
+ [MMC_ERR_ICE_CFG] = "ICE Config Errors",
+ [MMC_ERR_CTRL_TIMEOUT] = "Controller Timedout errors",
+ [MMC_ERR_UNEXPECTED_IRQ] = "Unexpected IRQ errors",
+ };
+ int i;
+
+ for (i = 0; i < MMC_ERR_MAX; i++) {
+ if (desc[i])
+ seq_printf(file, "# %s:\t %d\n",
+ desc[i], host->err_stats[i]);
+ }
+
+ return 0;
+}
+
+static int mmc_err_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_err_stats_show, inode->i_private);
+}
+
+static ssize_t mmc_err_stats_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mmc_host *host = filp->f_mapping->host->i_private;
+
+ pr_debug("%s: Resetting MMC error statistics\n", __func__);
+ memset(host->err_stats, 0, sizeof(host->err_stats));
+
+ return cnt;
+}
+
+static const struct file_operations mmc_err_stats_fops = {
+ .open = mmc_err_stats_open,
+ .read = seq_read,
+ .write = mmc_err_stats_write,
+ .release = single_release,
+};
+
void mmc_add_host_debugfs(struct mmc_host *host)
{
struct dentry *root;
@@ -236,6 +311,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
debugfs_create_file_unsafe("clock", S_IRUSR | S_IWUSR, root, host,
&mmc_clock_fops);
+ debugfs_create_file_unsafe("err_state", 0600, root, host,
+ &mmc_err_state);
+ debugfs_create_file("err_stats", 0600, root, host,
+ &mmc_err_stats_fops);
+
#ifdef CONFIG_FAIL_MMC_REQUEST
if (fail_request)
setup_fault_attr(&fail_default_attr, fail_request);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 2ed2b4d5e5a5..0fd91f749b3a 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -599,7 +599,7 @@ static int mmc_validate_host_caps(struct mmc_host *host)
}
if (caps2 & (MMC_CAP2_HS400_ES | MMC_CAP2_HS400) &&
- !(caps & MMC_CAP_8_BIT_DATA)) {
+ !(caps & MMC_CAP_8_BIT_DATA) && !(caps2 & MMC_CAP2_NO_MMC)) {
dev_warn(dev, "drop HS400 support since no 8-bit bus\n");
host->caps2 = caps2 & ~MMC_CAP2_HS400_ES & ~MMC_CAP2_HS400;
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index fa5324ceeebe..fefaa901b50f 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -116,8 +116,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
}
}
-static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
- bool reserved)
+static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req)
{
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
@@ -494,7 +493,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
if (blk_queue_quiesced(q))
blk_mq_unquiesce_queue(q);
- blk_cleanup_queue(q);
blk_mq_free_tag_set(&mq->tag_set);
/*
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index f879dc63d936..be4393988086 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -163,8 +163,10 @@ static inline bool mmc_fixup_of_compatible_match(struct mmc_card *card,
struct device_node *np;
for_each_child_of_node(mmc_dev(card->host)->of_node, np) {
- if (of_device_is_compatible(np, compatible))
+ if (of_device_is_compatible(np, compatible)) {
+ of_node_put(np);
return true;
+ }
}
return false;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c5f1df6ce4c0..cee4c0b59f43 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -793,7 +793,7 @@ static umode_t sd_std_is_visible(struct kobject *kobj, struct attribute *attr,
attr == &dev_attr_info2.attr ||
attr == &dev_attr_info3.attr ||
attr == &dev_attr_info4.attr
- ) && card->type != MMC_TYPE_SD_COMBO)
+ ) &&!mmc_card_sd_combo(card))
return 0;
return attr->mode;
@@ -870,7 +870,7 @@ try_again:
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
+ if (!mmc_host_is_spi(host) && rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 25799accf8a0..0b682a31cd3e 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -226,6 +226,20 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
if (data & SDIO_DRIVE_SDTD)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTERRUPT_EXT, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_INTERRUPT_EXT_SAI) {
+ data |= SDIO_INTERRUPT_EXT_EAI;
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_INTERRUPT_EXT,
+ data, NULL);
+ if (ret)
+ goto out;
+
+ card->cccr.enable_async_irq = 1;
+ }
}
/* if no uhs mode ensure we check for high speed */
@@ -335,7 +349,7 @@ static int sdio_disable_4bit_bus(struct mmc_card *card)
{
int err;
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
goto out;
if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
@@ -360,7 +374,7 @@ static int sdio_enable_4bit_bus(struct mmc_card *card)
err = sdio_enable_wide(card);
if (err <= 0)
return err;
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
goto out;
if (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) {
@@ -415,7 +429,7 @@ static int sdio_enable_hs(struct mmc_card *card)
int ret;
ret = mmc_sdio_switch_hs(card, true);
- if (ret <= 0 || card->type == MMC_TYPE_SDIO)
+ if (ret <= 0 || mmc_card_sdio(card))
return ret;
ret = mmc_sd_switch_hs(card);
@@ -441,7 +455,7 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
max_dtr = card->cis.max_dtr;
}
- if (card->type == MMC_TYPE_SD_COMBO)
+ if (mmc_card_sd_combo(card))
max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
return max_dtr;
@@ -689,7 +703,7 @@ try_again:
mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
card->type = MMC_TYPE_SD_COMBO;
- if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
+ if (oldcard && (!mmc_card_sd_combo(oldcard) ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
err = -ENOENT;
goto mismatch;
@@ -697,7 +711,7 @@ try_again:
} else {
card->type = MMC_TYPE_SDIO;
- if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
+ if (oldcard && !mmc_card_sdio(oldcard)) {
err = -ENOENT;
goto mismatch;
}
@@ -754,7 +768,7 @@ try_again:
/*
* Read CSD, before selecting the card
*/
- if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
+ if (!oldcard && mmc_card_sd_combo(card)) {
err = mmc_sd_get_csd(card);
if (err)
goto remove;
@@ -827,7 +841,7 @@ try_again:
mmc_fixup_device(card, sdio_fixup_methods);
- if (card->type == MMC_TYPE_SD_COMBO) {
+ if (mmc_card_sd_combo(card)) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
/* handle as SDIO-only card if memory init failed */
if (err) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d6144978e32d..10c563999d3d 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -169,8 +169,9 @@ config MMC_SDHCI_OF_ASPEED
If unsure, say N.
config MMC_SDHCI_OF_ASPEED_TEST
- bool "Tests for the ASPEED SDHCI driver"
- depends on MMC_SDHCI_OF_ASPEED && KUNIT=y
+ bool "Tests for the ASPEED SDHCI driver" if !KUNIT_ALL_TESTS
+ depends on MMC_SDHCI_OF_ASPEED && KUNIT
+ default KUNIT_ALL_TESTS
help
Enable KUnit tests for the ASPEED SDHCI driver. Select this
option only if you will boot the kernel for the purpose of running
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 2c4b2df52adb..12dca91a8ef6 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -277,6 +277,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Error populating slots\n");
octeon_mmc_set_shared_power(host, 0);
+ of_node_put(cn);
goto error;
}
i++;
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index 76013bbbcff3..202b1d6da678 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -142,8 +142,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
continue;
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
- if (ret)
+ if (ret) {
+ of_node_put(child_node);
goto error;
+ }
}
i++;
}
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index b0d30c35c390..b3d7d6d8d654 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -822,8 +822,15 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
- cmd_error || data_error)
+ cmd_error || data_error) {
+ if (status & CQHCI_IS_RED)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_RED);
+ if (status & CQHCI_IS_GCE)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
+ if (status & CQHCI_IS_ICCE)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
cqhci_error_irq(mmc, status, cmd_error, data_error);
+ }
if (status & CQHCI_IS_TCC) {
/* read TCN and complete the request */
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index ca5be4445ae0..9f20ac524c8b 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -670,7 +670,9 @@ static int dw_mci_exynos_remove(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
+
+ return 0;
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index e9437ef8ef19..6f22fe054087 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -179,7 +179,9 @@ static int dw_mci_hi3798cv200_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->drive_clk);
clk_disable_unprepare(priv->sample_clk);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
+
+ return 0;
}
static const struct of_device_id dw_mci_hi3798cv200_match[] = {
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index f825487aa739..2a99f15f527f 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -377,7 +377,9 @@ static int dw_mci_rockchip_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
+
+ return 0;
}
static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 2f08d442e557..fc462995cf94 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1172,8 +1172,10 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
ret = device_reset_optional(&pdev->dev);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ goto free_host;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 01159eaf8694..012aa85489d8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -762,7 +762,7 @@ int mmci_dmae_setup(struct mmci_host *host)
/*
* If only an RX channel is specified, the driver will
- * attempt to use it bidirectionally, however if it is
+ * attempt to use it bidirectionally, however if it
* is specified but cannot be located, DMA will be disabled.
*/
if (dmae->rx_channel && !dmae->tx_channel)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 9da4489dc345..69d78604d1fc 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2015 MediaTek Inc.
+ * Copyright (c) 2014-2015, 2022 MediaTek Inc.
* Author: Chaotian.Jing <chaotian.jing@mediatek.com>
*/
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -440,8 +441,10 @@ struct msdc_host {
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
struct pinctrl_state *pins_uhs;
+ struct pinctrl_state *pins_eint;
struct delayed_work req_timeout;
int irq; /* host interrupt */
+ int eint_irq; /* interrupt from sdio device for waking up system */
struct reset_control *reset;
struct clk *src_clk; /* msdc source clock */
@@ -1521,17 +1524,46 @@ static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
- unsigned long flags;
struct msdc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret;
spin_lock_irqsave(&host->lock, flags);
__msdc_enable_sdio_irq(host, enb);
spin_unlock_irqrestore(&host->lock, flags);
- if (enb)
- pm_runtime_get_noresume(host->dev);
- else
- pm_runtime_put_noidle(host->dev);
+ if (mmc_card_enable_async_irq(mmc->card) && host->pins_eint) {
+ if (enb) {
+ /*
+ * In dev_pm_set_dedicated_wake_irq_reverse(), eint pin will be set to
+ * GPIO mode. We need to restore it to SDIO DAT1 mode after that.
+ * Since the current pinstate is pins_uhs, to ensure pinctrl select take
+ * affect successfully, we change the pinstate to pins_eint firstly.
+ */
+ pinctrl_select_state(host->pinctrl, host->pins_eint);
+ ret = dev_pm_set_dedicated_wake_irq_reverse(host->dev, host->eint_irq);
+
+ if (ret) {
+ dev_err(host->dev, "Failed to register SDIO wakeup irq!\n");
+ host->pins_eint = NULL;
+ pm_runtime_get_noresume(host->dev);
+ } else {
+ dev_dbg(host->dev, "SDIO eint irq: %d!\n", host->eint_irq);
+ }
+
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ } else {
+ dev_pm_clear_wake_irq(host->dev);
+ }
+ } else {
+ if (enb) {
+ /* Ensure host->pins_eint is NULL */
+ host->pins_eint = NULL;
+ pm_runtime_get_noresume(host->dev);
+ } else {
+ pm_runtime_put_noidle(host->dev);
+ }
+ }
}
static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
@@ -2319,7 +2351,7 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
else
val = readl(host->base + PAD_DS_TUNE);
- dev_info(host->dev, "Fianl PAD_DS_TUNE: 0x%x\n", val);
+ dev_info(host->dev, "Final PAD_DS_TUNE: 0x%x\n", val);
return 0;
@@ -2414,6 +2446,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
/* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
@@ -2635,6 +2670,20 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ /* Support for SDIO eint irq ? */
+ if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) {
+ host->eint_irq = platform_get_irq_byname(pdev, "sdio_wakeup");
+ if (host->eint_irq > 0) {
+ host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint");
+ if (IS_ERR(host->pins_eint)) {
+ dev_err(&pdev->dev, "Cannot find pinctrl eint!\n");
+ host->pins_eint = NULL;
+ } else {
+ device_init_wakeup(&pdev->dev, true);
+ }
+ }
+ }
+
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
@@ -2849,6 +2898,15 @@ static int __maybe_unused msdc_runtime_suspend(struct device *dev)
struct msdc_host *host = mmc_priv(mmc);
msdc_save_reg(host);
+
+ if (sdio_irq_claimed(mmc)) {
+ if (host->pins_eint) {
+ disable_irq(host->irq);
+ pinctrl_select_state(host->pinctrl, host->pins_eint);
+ }
+
+ __msdc_enable_sdio_irq(host, 0);
+ }
msdc_gate_clock(host);
return 0;
}
@@ -2864,25 +2922,47 @@ static int __maybe_unused msdc_runtime_resume(struct device *dev)
return ret;
msdc_restore_reg(host);
+
+ if (sdio_irq_claimed(mmc) && host->pins_eint) {
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ enable_irq(host->irq);
+ }
return 0;
}
static int __maybe_unused msdc_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc);
if (ret)
return ret;
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
}
+ /*
+ * Bump up runtime PM usage counter otherwise dev->power.needs_force_resume will
+ * not be marked as 1, pm_runtime_force_resume() will go out directly.
+ */
+ if (sdio_irq_claimed(mmc) && host->pins_eint)
+ pm_runtime_get_noresume(dev);
+
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused msdc_resume(struct device *dev)
{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
+
+ if (sdio_irq_claimed(mmc) && host->pins_eint)
+ pm_runtime_put_noidle(dev);
+
return pm_runtime_force_resume(dev);
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index de04b5afef2e..2cf0413407ea 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -923,7 +923,7 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
* One way to prevent this is to only allow 1-bit transfers.
*/
- if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
+ if (is_imx31_mmc(mxcmci) && mmc_card_sdio(card))
host->caps &= ~MMC_CAP_4_BIT_DATA;
else
host->caps |= MMC_CAP_4_BIT_DATA;
@@ -1025,7 +1025,7 @@ static int mxcmci_probe(struct platform_device *pdev)
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- host->devtype = (enum mxcmci_type)of_device_get_match_data(&pdev->dev);
+ host->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
/* adjust max_segs after devtype detection */
if (!is_mpc512x_mmc(host))
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0db9490dc659..e4003f6058eb 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_of_init(pdev, mmc);
if (ret)
- return ret;
+ goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_init_ocr(host);
if (ret < 0)
- return ret;
+ goto out;
mmc->caps = 0;
host->cmdat = 0;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 1a1e3e020a8c..c4abfee1ebae 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -43,6 +43,7 @@ struct renesas_sdhi_quirks {
bool hs400_4taps;
bool fixed_addr_mode;
bool dma_one_rx_only;
+ bool manual_tap_correction;
u32 hs400_bad_taps;
const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX];
};
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 4404ca1f98d8..6edbf5c161ab 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -49,9 +49,6 @@
#define HOST_MODE_GEN3_32BIT (HOST_MODE_GEN3_WMODE | HOST_MODE_GEN3_BUSWIDTH)
#define HOST_MODE_GEN3_64BIT 0
-#define CTL_SDIF_MODE 0xe6
-#define SDIF_MODE_HS400 BIT(0)
-
#define SDHI_VER_GEN2_SDR50 0x490c
#define SDHI_VER_RZ_A1 0x820b
/* very old datasheets said 0x490c for SDR104, too. They are wrong! */
@@ -383,8 +380,7 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF,
priv->scc_tappos_hs400);
- /* Gen3 can't do automatic tap correction with HS400, so disable it */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC)
+ if (priv->quirks && priv->quirks->manual_tap_correction)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
@@ -562,23 +558,25 @@ static void renesas_sdhi_scc_reset(struct tmio_mmc_host *host, struct renesas_sd
}
/* only populated for TMIO_MMC_MIN_RCAR2 */
-static void renesas_sdhi_reset(struct tmio_mmc_host *host)
+static void renesas_sdhi_reset(struct tmio_mmc_host *host, bool preserve)
{
struct renesas_sdhi *priv = host_to_priv(host);
int ret;
u16 val;
- if (priv->rstc) {
- reset_control_reset(priv->rstc);
- /* Unknown why but without polling reset status, it will hang */
- read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
- false, priv->rstc);
- /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
- sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
- priv->needs_adjust_hs400 = false;
- renesas_sdhi_set_clock(host, host->clk_cache);
- } else if (priv->scc_ctl) {
- renesas_sdhi_scc_reset(host, priv);
+ if (!preserve) {
+ if (priv->rstc) {
+ reset_control_reset(priv->rstc);
+ /* Unknown why but without polling reset status, it will hang */
+ read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
+ false, priv->rstc);
+ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ priv->needs_adjust_hs400 = false;
+ renesas_sdhi_set_clock(host, host->clk_cache);
+ } else if (priv->scc_ctl) {
+ renesas_sdhi_scc_reset(host, priv);
+ }
}
if (sd_ctrl_read16(host, CTL_VERSION) >= SDHI_VER_GEN3_SD) {
@@ -719,7 +717,7 @@ static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
/* Change TAP position according to correction status */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC &&
+ if (priv->quirks && priv->quirks->manual_tap_correction &&
host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0;
/*
@@ -938,6 +936,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(priv->clk_cd))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cd), "cannot get cd clock");
+ priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return PTR_ERR(priv->rstc);
+
priv->pinctrl = devm_pinctrl_get(&pdev->dev);
if (!IS_ERR(priv->pinctrl)) {
priv->pins_default = pinctrl_lookup_state(priv->pinctrl,
@@ -1030,10 +1032,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ret)
goto efree;
- priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
- if (IS_ERR(priv->rstc))
- return PTR_ERR(priv->rstc);
-
ver = sd_ctrl_read16(host, CTL_VERSION);
/* GEN2_SDR104 is first known SDHI to use 32bit block count */
if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 3084b15ae2cb..42937596c4c4 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -170,6 +170,7 @@ static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
@@ -182,25 +183,30 @@ static const struct renesas_sdhi_quirks sdhi_quirks_fixed_addr = {
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
.hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a7796_es13_calib_table,
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a77965_calib_table,
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
.hs400_calib_table = r8a77990_calib_table,
+ .manual_tap_correction = true,
};
/*
@@ -268,6 +274,7 @@ static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
{ .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
@@ -321,7 +328,7 @@ renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host)
}
/*
- * renesas_sdhi_internal_dmac_map() will be called with two difference
+ * renesas_sdhi_internal_dmac_map() will be called with two different
* sg pointers in two mmc_data by .pre_req(), but tmio host can have a single
* sg_ptr only. So, renesas_sdhi_internal_dmac_{un}map() should use a sg
* pointer in a mmc_data instead of host->sg_ptr.
@@ -355,7 +362,7 @@ renesas_sdhi_internal_dmac_map(struct tmio_mmc_host *host,
data->host_cookie = cookie;
- /* This DMAC cannot handle if buffer is not 128-bytes alignment */
+ /* This DMAC needs buffers to be 128-byte aligned */
if (!IS_ALIGNED(sg_dma_address(data->sg), 128)) {
renesas_sdhi_internal_dmac_unmap(host, data, cookie);
return false;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index c0350e9c03f3..4cca4c90769b 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -775,8 +775,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct sdhci_acpi_slot *slot;
- struct acpi_device *device, *child;
const struct dmi_system_id *id;
+ struct acpi_device *device;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
struct resource *iomem;
@@ -796,10 +796,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
slot = sdhci_acpi_get_slot(device);
/* Power on the SDHCI controller and its children */
- acpi_device_fix_up_power(device);
- list_for_each_entry(child, &device->children, node)
- if (child->status.present && child->status.enabled)
- acpi_device_fix_up_power(child);
+ acpi_device_fix_up_power_extended(device);
if (sdhci_acpi_byt_defer(dev))
return -EPROBE_DEFER;
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 4d4aac85cc7a..61a12f2f7f03 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2013 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2013 Broadcom Corporation
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 8eb57de48e0c..aff36a933ebe 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -31,6 +31,8 @@
struct sdhci_brcmstb_priv {
void __iomem *cfg_regs;
unsigned int flags;
+ struct clk *base_clk;
+ u32 base_freq_hz;
};
struct brcmstb_match_priv {
@@ -250,9 +252,11 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
const struct of_device_id *match;
struct sdhci_brcmstb_priv *priv;
+ u32 actual_clock_mhz;
struct sdhci_host *host;
struct resource *iomem;
struct clk *clk;
+ struct clk *base_clk = NULL;
int res;
match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
@@ -330,6 +334,35 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ /* Change the base clock frequency if the DT property exists */
+ if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &priv->base_freq_hz) != 0)
+ goto add_host;
+
+ base_clk = devm_clk_get_optional(&pdev->dev, "sdio_freq");
+ if (IS_ERR(base_clk)) {
+ dev_warn(&pdev->dev, "Clock for \"sdio_freq\" not found\n");
+ goto add_host;
+ }
+
+ res = clk_prepare_enable(base_clk);
+ if (res)
+ goto err;
+
+ /* set improved clock rate */
+ clk_set_rate(base_clk, priv->base_freq_hz);
+ actual_clock_mhz = clk_get_rate(base_clk) / 1000000;
+
+ host->caps &= ~SDHCI_CLOCK_V3_BASE_MASK;
+ host->caps |= (actual_clock_mhz << SDHCI_CLOCK_BASE_SHIFT);
+ /* Disable presets because they are now incorrect */
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
+ dev_dbg(&pdev->dev, "Base Clock Frequency changed to %dMHz\n",
+ actual_clock_mhz);
+ priv->base_clk = base_clk;
+
+add_host:
res = sdhci_brcmstb_add_host(host, priv);
if (res)
goto err;
@@ -340,6 +373,7 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
err:
sdhci_pltfm_free(pdev);
err_clk:
+ clk_disable_unprepare(base_clk);
clk_disable_unprepare(clk);
return res;
}
@@ -351,11 +385,51 @@ static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_brcmstb_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+ clk_disable_unprepare(priv->base_clk);
+ return sdhci_pltfm_suspend(dev);
+}
+
+static int sdhci_brcmstb_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = sdhci_pltfm_resume(dev);
+ if (!ret && priv->base_freq_hz) {
+ ret = clk_prepare_enable(priv->base_clk);
+ /*
+ * Note: using clk_get_rate() below as clk_get_rate()
+ * honors CLK_GET_RATE_NOCACHE attribute, but clk_set_rate()
+ * may do implicit get_rate() calls that do not honor
+ * CLK_GET_RATE_NOCACHE.
+ */
+ if (!ret &&
+ (clk_get_rate(priv->base_clk) != priv->base_freq_hz))
+ ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
+ }
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops sdhci_brcmstb_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_brcmstb_suspend, sdhci_brcmstb_resume)
+};
+
static struct platform_driver sdhci_brcmstb_driver = {
.driver = {
.name = "sdhci-brcmstb",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_pltfm_pmops,
+ .pm = &sdhci_brcmstb_pmops,
.of_match_table = of_match_ptr(sdhci_brcm_of_match),
},
.probe = sdhci_brcmstb_probe,
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 032bf852397f..6db35b1b8557 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
/*
* iProc SDHCI platform driver
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e395411fb6fd..dc2991422a87 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2435,33 +2435,12 @@ static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
};
static const struct of_device_id sdhci_msm_dt_match[] = {
- /* Following two entries are deprecated (kept only for backward compatibility) */
- {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
- /* Add entries for sdcc versions less than 5.0 here */
- {.compatible = "qcom,apq8084-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8226-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8916-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8953-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8974-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8992-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8994-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8996-sdhci", .data = &sdhci_msm_mci_var},
/*
- * Add entries for sdcc version 5.0 here. For SDCC version 5.0.0,
- * MCI registers are removed from SDCC interface and some registers
- * are moved to HC.
+ * Do not add new variants to the driver which are compatible with
+ * generic ones, unless they need customization.
*/
- {.compatible = "qcom,qcs404-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sdx55-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sdx65-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sdm630-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm6125-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm6350-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm8150-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm8250-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sc7280-sdhci", .data = &sdhci_msm_v5_var},
- /* Add entries where soc specific handling is required, here */
+ {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
+ {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
{},
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 757801dfc308..3997cad1f793 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -1733,7 +1733,6 @@ err_pltfm_free:
static int sdhci_arasan_remove(struct platform_device *pdev)
{
- int ret;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
@@ -1747,11 +1746,11 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
sdhci_arasan_unregister_sdclk(&pdev->dev);
- ret = sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(clk_ahb);
- return ret;
+ return 0;
}
static struct platform_driver sdhci_arasan_driver = {
diff --git a/drivers/mmc/host/sdhci-of-aspeed-test.c b/drivers/mmc/host/sdhci-of-aspeed-test.c
index 1ed4f86291f2..ecb502606c53 100644
--- a/drivers/mmc/host/sdhci-of-aspeed-test.c
+++ b/drivers/mmc/host/sdhci-of-aspeed-test.c
@@ -96,10 +96,4 @@ static struct kunit_suite aspeed_sdhci_test_suite = {
.test_cases = aspeed_sdhci_test_cases,
};
-static struct kunit_suite *aspeed_sdc_test_suite_array[] = {
- &aspeed_sdhci_test_suite,
- NULL,
-};
-
-static struct kunit_suite **aspeed_sdc_test_suites
- __used __section(".kunit_test_suites") = aspeed_sdc_test_suite_array;
+kunit_test_suite(aspeed_sdhci_test_suite);
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 6e4e132903a6..ba6677bf7372 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -606,25 +606,6 @@ static struct platform_driver aspeed_sdc_driver = {
#if defined(CONFIG_MMC_SDHCI_OF_ASPEED_TEST)
#include "sdhci-of-aspeed-test.c"
-
-static inline int aspeed_sdc_tests_init(void)
-{
- return __kunit_test_suites_init(aspeed_sdc_test_suites);
-}
-
-static inline void aspeed_sdc_tests_exit(void)
-{
- __kunit_test_suites_exit(aspeed_sdc_test_suites);
-}
-#else
-static inline int aspeed_sdc_tests_init(void)
-{
- return 0;
-}
-
-static inline void aspeed_sdc_tests_exit(void)
-{
-}
#endif
static int __init aspeed_sdc_init(void)
@@ -637,18 +618,7 @@ static int __init aspeed_sdc_init(void)
rc = platform_driver_register(&aspeed_sdc_driver);
if (rc < 0)
- goto cleanup_sdhci;
-
- rc = aspeed_sdc_tests_init();
- if (rc < 0) {
- platform_driver_unregister(&aspeed_sdc_driver);
- goto cleanup_sdhci;
- }
-
- return 0;
-
-cleanup_sdhci:
- platform_driver_unregister(&aspeed_sdhci_driver);
+ platform_driver_unregister(&aspeed_sdhci_driver);
return rc;
}
@@ -656,8 +626,6 @@ module_init(aspeed_sdc_init);
static void __exit aspeed_sdc_exit(void)
{
- aspeed_sdc_tests_exit();
-
platform_driver_unregister(&aspeed_sdc_driver);
platform_driver_unregister(&aspeed_sdhci_driver);
}
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 10fb4cb2c731..cd0134580a90 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -100,8 +100,13 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- if (timing == MMC_TIMING_MMC_DDR52)
- sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+ u8 mc1r;
+
+ if (timing == MMC_TIMING_MMC_DDR52) {
+ mc1r = sdhci_readb(host, SDMMC_MC1R);
+ mc1r |= SDMMC_MC1R_DDR;
+ sdhci_writeb(host, mc1r, SDMMC_MC1R);
+ }
sdhci_set_uhs_signaling(host, timing);
}
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index bac874ab0b33..a7343d4bc50e 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include "sdhci-pltfm.h"
@@ -30,6 +31,7 @@
/* Offset inside the vendor area 1 */
#define DWCMSHC_HOST_CTRL3 0x8
#define DWCMSHC_EMMC_CONTROL 0x2c
+#define DWCMSHC_CARD_IS_EMMC BIT(0)
#define DWCMSHC_ENHANCED_STROBE BIT(8)
#define DWCMSHC_EMMC_ATCTRL 0x40
@@ -38,7 +40,7 @@
#define DWCMSHC_EMMC_DLL_RXCLK 0x804
#define DWCMSHC_EMMC_DLL_TXCLK 0x808
#define DWCMSHC_EMMC_DLL_STRBIN 0x80c
-#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
+#define DECMSHC_EMMC_DLL_CMDOUT 0x810
#define DWCMSHC_EMMC_DLL_STATUS0 0x840
#define DWCMSHC_EMMC_DLL_START BIT(0)
#define DWCMSHC_EMMC_DLL_LOCKED BIT(8)
@@ -47,22 +49,39 @@
#define DWCMSHC_EMMC_DLL_START_POINT 16
#define DWCMSHC_EMMC_DLL_INC 8
#define DWCMSHC_EMMC_DLL_DLYENA BIT(27)
-#define DLL_TXCLK_TAPNUM_DEFAULT 0x8
-#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
+#define DLL_TXCLK_TAPNUM_DEFAULT 0x10
+#define DLL_TXCLK_TAPNUM_90_DEGREES 0xA
#define DLL_TXCLK_TAPNUM_FROM_SW BIT(24)
+#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
+#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
+#define DLL_STRBIN_DELAY_NUM_SEL BIT(26)
+#define DLL_STRBIN_DELAY_NUM_OFFSET 16
+#define DLL_STRBIN_DELAY_NUM_DEFAULT 0x16
#define DLL_RXCLK_NO_INVERTER 1
#define DLL_RXCLK_INVERTER 0
+#define DLL_CMDOUT_TAPNUM_90_DEGREES 0x8
+#define DLL_CMDOUT_TAPNUM_FROM_SW BIT(24)
+#define DLL_CMDOUT_SRC_CLK_NEG BIT(28)
+#define DLL_CMDOUT_EN_SRC_CLK_NEG BIT(29)
+
#define DLL_LOCK_WO_TMOUT(x) \
((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \
(((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
-#define RK3568_MAX_CLKS 3
+#define RK35xx_MAX_CLKS 3
#define BOUNDARY_OK(addr, len) \
((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
-struct rk3568_priv {
+enum dwcmshc_rk_type {
+ DWCMSHC_RK3568,
+ DWCMSHC_RK3588,
+};
+
+struct rk35xx_priv {
/* Rockchip specified optional clocks */
- struct clk_bulk_data rockchip_clks[RK3568_MAX_CLKS];
+ struct clk_bulk_data rockchip_clks[RK35xx_MAX_CLKS];
+ struct reset_control *reset;
+ enum dwcmshc_rk_type devtype;
u8 txclk_tapnum;
};
@@ -131,7 +150,9 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- u16 ctrl_2;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u16 ctrl, ctrl_2;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
@@ -149,8 +170,15 @@ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
else if ((timing == MMC_TIMING_UHS_DDR50) ||
(timing == MMC_TIMING_MMC_DDR52))
ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
- else if (timing == MMC_TIMING_MMC_HS400)
+ else if (timing == MMC_TIMING_MMC_HS400) {
+ /* set CARD_IS_EMMC bit to enable Data Strobe for HS400 */
+ ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ ctrl |= DWCMSHC_CARD_IS_EMMC;
+ sdhci_writew(host, ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+
ctrl_2 |= DWCMSHC_CTRL_HS400;
+ }
+
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
@@ -176,24 +204,18 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *priv = dwc_priv->priv;
+ struct rk35xx_priv *priv = dwc_priv->priv;
u8 txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
u32 extra, reg;
int err;
host->mmc->actual_clock = 0;
- /*
- * DO NOT TOUCH THIS SETTING. RX clk inverter unit is enabled
- * by default, but it shouldn't be enabled. We should anyway
- * disable it before issuing any cmds.
- */
- extra = DWCMSHC_EMMC_DLL_DLYENA |
- DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
- sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
-
- if (clock == 0)
+ if (clock == 0) {
+ /* Disable interface clock at initial state. */
+ sdhci_set_clock(host, clock);
return;
+ }
/* Rockchip platform only support 375KHz for identify mode */
if (clock <= 400000)
@@ -211,9 +233,21 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
extra &= ~BIT(0);
sdhci_writel(host, extra, reg);
- if (clock <= 400000) {
- /* Disable DLL to reset sample clock */
+ if (clock <= 52000000) {
+ /* Disable DLL and reset both of sample and drive clock */
sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_CTRL);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_RXCLK);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
+ sdhci_writel(host, 0, DECMSHC_EMMC_DLL_CMDOUT);
+ /*
+ * Before switching to hs400es mode, the driver will enable
+ * enhanced strobe first. PHY needs to configure the parameters
+ * of enhanced strobe first.
+ */
+ extra = DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_STRBIN_DELAY_NUM_SEL |
+ DLL_STRBIN_DELAY_NUM_DEFAULT << DLL_STRBIN_DELAY_NUM_OFFSET;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
return;
}
@@ -222,6 +256,15 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
udelay(1);
sdhci_writel(host, 0x0, DWCMSHC_EMMC_DLL_CTRL);
+ /*
+ * We shouldn't set DLL_RXCLK_NO_INVERTER for identify mode but
+ * we must set it in higher speed mode.
+ */
+ extra = DWCMSHC_EMMC_DLL_DLYENA;
+ if (priv->devtype == DWCMSHC_RK3568)
+ extra |= DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
+
/* Init DLL settings */
extra = 0x5 << DWCMSHC_EMMC_DLL_START_POINT |
0x2 << DWCMSHC_EMMC_DLL_INC |
@@ -244,8 +287,20 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
host->mmc->ios.timing == MMC_TIMING_MMC_HS400)
txclk_tapnum = priv->txclk_tapnum;
+ if ((priv->devtype == DWCMSHC_RK3588) && host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
+ txclk_tapnum = DLL_TXCLK_TAPNUM_90_DEGREES;
+
+ extra = DLL_CMDOUT_SRC_CLK_NEG |
+ DLL_CMDOUT_EN_SRC_CLK_NEG |
+ DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_CMDOUT_TAPNUM_90_DEGREES |
+ DLL_CMDOUT_TAPNUM_FROM_SW;
+ sdhci_writel(host, extra, DECMSHC_EMMC_DLL_CMDOUT);
+ }
+
extra = DWCMSHC_EMMC_DLL_DLYENA |
DLL_TXCLK_TAPNUM_FROM_SW |
+ DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL |
txclk_tapnum;
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_TXCLK);
@@ -255,6 +310,21 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
}
+static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
+ struct rk35xx_priv *priv = dwc_priv->priv;
+
+ if (mask & SDHCI_RESET_ALL && priv->reset) {
+ reset_control_assert(priv->reset);
+ udelay(1);
+ reset_control_deassert(priv->reset);
+ }
+
+ sdhci_reset(host, mask);
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -264,12 +334,12 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
.adma_write_desc = dwcmshc_adma_write_desc,
};
-static const struct sdhci_ops sdhci_dwcmshc_rk3568_ops = {
+static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
.set_clock = dwcmshc_rk3568_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = dwcmshc_set_uhs_signaling,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
- .reset = sdhci_reset,
+ .reset = rk35xx_sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
};
@@ -279,30 +349,46 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
-static const struct sdhci_pltfm_data sdhci_dwcmshc_rk3568_pdata = {
- .ops = &sdhci_dwcmshc_rk3568_ops,
+#ifdef CONFIG_ACPI
+static const struct sdhci_pltfm_data sdhci_dwcmshc_bf3_pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+};
+#endif
+
+static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
+ .ops = &sdhci_dwcmshc_rk35xx_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
-static int dwcmshc_rk3568_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
{
int err;
- struct rk3568_priv *priv = dwc_priv->priv;
+ struct rk35xx_priv *priv = dwc_priv->priv;
+
+ priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc));
+ if (IS_ERR(priv->reset)) {
+ err = PTR_ERR(priv->reset);
+ dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err);
+ return err;
+ }
priv->rockchip_clks[0].id = "axi";
priv->rockchip_clks[1].id = "block";
priv->rockchip_clks[2].id = "timer";
- err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), RK3568_MAX_CLKS,
+ err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), RK35xx_MAX_CLKS,
priv->rockchip_clks);
if (err) {
dev_err(mmc_dev(host->mmc), "failed to get clocks %d\n", err);
return err;
}
- err = clk_bulk_prepare_enable(RK3568_MAX_CLKS, priv->rockchip_clks);
+ err = clk_bulk_prepare_enable(RK35xx_MAX_CLKS, priv->rockchip_clks);
if (err) {
dev_err(mmc_dev(host->mmc), "failed to enable clocks %d\n", err);
return err;
@@ -321,10 +407,28 @@ static int dwcmshc_rk3568_init(struct sdhci_host *host, struct dwcmshc_priv *dwc
return 0;
}
+static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+{
+ /*
+ * Don't support highspeed bus mode with low clk speed as we
+ * cannot use DLL for this condition.
+ */
+ if (host->mmc->f_max <= 52000000) {
+ dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n",
+ host->mmc->f_max);
+ host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400);
+ host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR);
+ }
+}
+
static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
{
+ .compatible = "rockchip,rk3588-dwcmshc",
+ .data = &sdhci_dwcmshc_rk35xx_pdata,
+ },
+ {
.compatible = "rockchip,rk3568-dwcmshc",
- .data = &sdhci_dwcmshc_rk3568_pdata,
+ .data = &sdhci_dwcmshc_rk35xx_pdata,
},
{
.compatible = "snps,dwcmshc-sdhci",
@@ -336,7 +440,10 @@ MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
- { .id = "MLNXBF30" },
+ {
+ .id = "MLNXBF30",
+ .driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
+ },
{}
};
#endif
@@ -347,12 +454,12 @@ static int dwcmshc_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
struct dwcmshc_priv *priv;
- struct rk3568_priv *rk_priv = NULL;
+ struct rk35xx_priv *rk_priv = NULL;
const struct sdhci_pltfm_data *pltfm_data;
int err;
u32 extra;
- pltfm_data = of_device_get_match_data(&pdev->dev);
+ pltfm_data = device_get_match_data(&pdev->dev);
if (!pltfm_data) {
dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
@@ -402,33 +509,47 @@ static int dwcmshc_probe(struct platform_device *pdev)
host->mmc_host_ops.request = dwcmshc_request;
host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe;
- if (pltfm_data == &sdhci_dwcmshc_rk3568_pdata) {
- rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk3568_priv), GFP_KERNEL);
+ if (pltfm_data == &sdhci_dwcmshc_rk35xx_pdata) {
+ rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk35xx_priv), GFP_KERNEL);
if (!rk_priv) {
err = -ENOMEM;
goto err_clk;
}
+ if (of_device_is_compatible(pdev->dev.of_node, "rockchip,rk3588-dwcmshc"))
+ rk_priv->devtype = DWCMSHC_RK3588;
+ else
+ rk_priv->devtype = DWCMSHC_RK3568;
+
priv->priv = rk_priv;
- err = dwcmshc_rk3568_init(host, priv);
+ err = dwcmshc_rk35xx_init(host, priv);
if (err)
goto err_clk;
}
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- err = sdhci_add_host(host);
+ err = sdhci_setup_host(host);
if (err)
goto err_clk;
+ if (rk_priv)
+ dwcmshc_rk35xx_postinit(host, priv);
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto err_setup_host;
+
return 0;
+err_setup_host:
+ sdhci_cleanup_host(host);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
free_pltfm:
sdhci_pltfm_free(pdev);
@@ -440,14 +561,14 @@ static int dwcmshc_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+ struct rk35xx_priv *rk_priv = priv->priv;
sdhci_remove_host(host, 0);
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
sdhci_pltfm_free(pdev);
@@ -460,7 +581,7 @@ static int dwcmshc_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+ struct rk35xx_priv *rk_priv = priv->priv;
int ret;
ret = sdhci_suspend_host(host);
@@ -472,7 +593,7 @@ static int dwcmshc_suspend(struct device *dev)
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
return ret;
@@ -483,7 +604,7 @@ static int dwcmshc_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+ struct rk35xx_priv *rk_priv = priv->priv;
int ret;
ret = clk_prepare_enable(pltfm_host->clk);
@@ -497,7 +618,7 @@ static int dwcmshc_resume(struct device *dev)
}
if (rk_priv) {
- ret = clk_bulk_prepare_enable(RK3568_MAX_CLKS,
+ ret = clk_bulk_prepare_enable(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
if (ret)
return ret;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d9dc41143bb3..e0266638381d 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -904,6 +904,7 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
scfg_node = of_find_matching_node(NULL, scfg_device_ids);
if (scfg_node)
scfg_base = of_iomap(scfg_node, 0);
+ of_node_put(scfg_node);
if (scfg_base) {
sdhciovselcr = SDHCIOVSELCR_TGLEN |
SDHCIOVSELCR_VSELVAL;
@@ -1418,7 +1419,7 @@ static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
static int sdhci_esdhc_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- struct device_node *np;
+ struct device_node *np, *tp;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_esdhc *esdhc;
int ret;
@@ -1463,7 +1464,9 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
if (esdhc->vendor_ver > VENDOR_V_22)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
- if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+ tp = of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc");
+ if (tp) {
+ of_node_put(tp);
host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
}
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 86e867ffbb10..033be559a730 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1298,8 +1298,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
/*
* omap_device_pm_domain has callbacks to enable the main
* functional clock, interface clock and also configure the
- * SYSCONFIG register of omap devices. The callback will be invoked
- * as part of pm_runtime_get_sync.
+ * SYSCONFIG register to clear any boot loader set voltage
+ * capabilities before calling sdhci_setup_host(). The
+ * callback will be invoked as part of pm_runtime_get_sync.
*/
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 50);
@@ -1441,7 +1442,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- sdhci_runtime_suspend_host(host);
+ if (omap_host->con != -EINVAL)
+ sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host);
@@ -1458,10 +1460,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- if (omap_host->con != -EINVAL)
+ if (omap_host->con != -EINVAL) {
sdhci_omap_context_restore(omap_host);
-
- sdhci_runtime_resume_host(host, 0);
+ sdhci_runtime_resume_host(host, 0);
+ }
return 0;
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index ed53276f6ad9..622b7de96c7f 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1240,16 +1240,11 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
#ifdef CONFIG_ACPI
static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
{
- struct acpi_device *device, *child;
+ struct acpi_device *device;
device = ACPI_COMPANION(&slot->chip->pdev->dev);
- if (!device)
- return;
-
- acpi_device_fix_up_power(device);
- list_for_each_entry(child, &device->children, node)
- if (child->status.present && child->status.enabled)
- acpi_device_fix_up_power(child);
+ if (device)
+ acpi_device_fix_up_power_extended(device);
}
#else
static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index f13c08db3da5..4d509f656188 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -95,6 +95,9 @@
#define PCIE_GLI_9763E_SCR 0x8E0
#define GLI_9763E_SCR_AXI_REQ BIT(9)
+#define PCIE_GLI_9763E_CFG 0x8A0
+#define GLI_9763E_CFG_LPSN_DIS BIT(12)
+
#define PCIE_GLI_9763E_CFG2 0x8A4
#define GLI_9763E_CFG2_L1DLY GENMASK(28, 19)
#define GLI_9763E_CFG2_L1DLY_MID 0x54
@@ -963,12 +966,40 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
}
#ifdef CONFIG_PM
+static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
+{
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+
+ if (enable)
+ value &= ~GLI_9763E_CFG_LPSN_DIS;
+ else
+ value |= GLI_9763E_CFG_LPSN_DIS;
+
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+}
+
static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
struct sdhci_host *host = slot->host;
u16 clock;
+ /* Enable LPM negotiation to allow entering L1 state */
+ gl9763e_set_low_power_negotiation(slot, true);
+
clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clock &= ~(SDHCI_CLOCK_PLL_EN | SDHCI_CLOCK_CARD_EN);
sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
@@ -1002,6 +1033,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
clock |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+ /* Disable LPM negotiation to avoid entering L1 state. */
+ gl9763e_set_low_power_negotiation(slot, false);
+
return 0;
}
#endif
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index d41582c21aa3..6415916fbd91 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -440,15 +440,14 @@ static int sdhci_st_remove(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
struct reset_control *rstc = pdata->rstc;
- int ret;
- ret = sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(pdata->icnclk);
reset_control_assert(rstc);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 22152029e14c..7689ffec5ad1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -224,6 +224,7 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
if (timedout) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -1716,6 +1717,7 @@ static bool sdhci_send_command_retry(struct sdhci_host *host,
if (!timeout--) {
pr_err("%s: Controller never released inhibit bit(s).\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
cmd->error = -EIO;
return false;
@@ -1965,6 +1967,7 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -1987,6 +1990,7 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
if (timedout) {
pr_err("%s: PLL clock never stabilised.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -3161,6 +3165,7 @@ static void sdhci_timeout_timer(struct timer_list *t)
if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, REQ_TIMEOUT);
sdhci_dumpregs(host);
host->cmd->error = -ETIMEDOUT;
@@ -3183,6 +3188,7 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
(host->cmd && sdhci_data_line_cmd(host->cmd))) {
pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, REQ_TIMEOUT);
sdhci_dumpregs(host);
if (host->data) {
@@ -3234,17 +3240,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
return;
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
return;
}
if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
- if (intmask & SDHCI_INT_TIMEOUT)
+ if (intmask & SDHCI_INT_TIMEOUT) {
host->cmd->error = -ETIMEDOUT;
- else
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
+ } else {
host->cmd->error = -EILSEQ;
-
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, CMD_CRC);
+ }
/* Treat data command CRC error the same as data CRC error */
if (host->cmd->data &&
(intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
@@ -3266,6 +3276,8 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
-ETIMEDOUT :
-EILSEQ;
+ sdhci_err_stats_inc(host, AUTO_CMD);
+
if (sdhci_auto_cmd23(host, mrq)) {
mrq->sbc->error = err;
__sdhci_finish_mrq(host, mrq);
@@ -3342,6 +3354,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
host->data_cmd = NULL;
data_cmd->error = -ETIMEDOUT;
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
__sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
@@ -3370,23 +3383,30 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
return;
}
- if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ if (intmask & SDHCI_INT_DATA_TIMEOUT) {
host->data->error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_DATA_END_BIT)
+ sdhci_err_stats_inc(host, DAT_TIMEOUT);
+ } else if (intmask & SDHCI_INT_DATA_END_BIT) {
host->data->error = -EILSEQ;
- else if ((intmask & SDHCI_INT_DATA_CRC) &&
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if ((intmask & SDHCI_INT_DATA_CRC) &&
SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
- != MMC_BUS_TEST_R)
+ != MMC_BUS_TEST_R) {
host->data->error = -EILSEQ;
- else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
intmask);
sdhci_adma_show_error(host);
+ sdhci_err_stats_inc(host, ADMA);
host->data->error = -EIO;
if (host->ops->adma_workaround)
host->ops->adma_workaround(host, intmask);
@@ -3584,6 +3604,7 @@ out:
if (unexpected) {
pr_err("%s: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), unexpected);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
}
@@ -3905,20 +3926,27 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (!host->cqe_on)
return false;
- if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
+ if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
*cmd_error = -EILSEQ;
- else if (intmask & SDHCI_INT_TIMEOUT)
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, CMD_CRC);
+ } else if (intmask & SDHCI_INT_TIMEOUT) {
*cmd_error = -ETIMEDOUT;
- else
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
+ } else
*cmd_error = 0;
- if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
+ if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
*data_error = -EILSEQ;
- else if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
*data_error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_ADMA_ERROR)
+ sdhci_err_stats_inc(host, DAT_TIMEOUT);
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
*data_error = -EIO;
- else
+ sdhci_err_stats_inc(host, ADMA);
+ } else
*data_error = 0;
/* Clear selected interrupts. */
@@ -3934,6 +3962,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
sdhci_writel(host, intmask, SDHCI_INT_STATUS);
pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d7929d725730..95a08f09df30 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -356,6 +356,9 @@ struct sdhci_adma2_64_desc {
*/
#define MMC_CMD_TRANSFER_TIME (10 * NSEC_PER_MSEC) /* max 10 ms */
+#define sdhci_err_stats_inc(host, err_name) \
+ mmc_debugfs_err_stats_inc((host)->mmc, MMC_ERR_##err_name)
+
enum sdhci_cookie {
COOKIE_UNMAPPED,
COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index b55a29c53d9c..53a2ad9a24b8 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -75,7 +75,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
tmio_mmc_clk_start(host);
}
-static void tmio_mmc_reset(struct tmio_mmc_host *host)
+static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
{
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
usleep_range(10000, 11000);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index e754bb3f5c32..501613c74406 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -42,6 +42,7 @@
#define CTL_DMA_ENABLE 0xd8
#define CTL_RESET_SD 0xe0
#define CTL_VERSION 0xe2
+#define CTL_SDIF_MODE 0xe6 /* only known on R-Car 2+ */
/* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */
#define TMIO_STOP_STP BIT(0)
@@ -98,6 +99,9 @@
/* Definitions for values the CTL_DMA_ENABLE register can take */
#define DMA_ENABLE_DMASDRW BIT(1)
+/* Definitions for values the CTL_SDIF_MODE register can take */
+#define SDIF_MODE_HS400 BIT(0) /* only known on R-Car 2+ */
+
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
@@ -181,7 +185,7 @@ struct tmio_mmc_host {
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
- void (*reset)(struct tmio_mmc_host *host);
+ void (*reset)(struct tmio_mmc_host *host, bool preserve);
bool (*check_retune)(struct tmio_mmc_host *host, struct mmc_request *mrq);
void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq);
unsigned int (*get_timeout_cycles)(struct tmio_mmc_host *host);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index a5850d83908b..437048bb8027 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -179,8 +179,17 @@ static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
}
-static void tmio_mmc_reset(struct tmio_mmc_host *host)
+static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
{
+ u16 card_opt, clk_ctrl, sdif_mode;
+
+ if (preserve) {
+ card_opt = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT);
+ clk_ctrl = sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL);
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ sdif_mode = sd_ctrl_read16(host, CTL_SDIF_MODE);
+ }
+
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
usleep_range(10000, 11000);
@@ -190,7 +199,7 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
tmio_mmc_abort_dma(host);
if (host->reset)
- host->reset(host);
+ host->reset(host, preserve);
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
host->sdcard_irq_mask = host->sdcard_irq_mask_all;
@@ -206,6 +215,13 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
}
+ if (preserve) {
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, card_opt);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk_ctrl);
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ sd_ctrl_write16(host, CTL_SDIF_MODE, sdif_mode);
+ }
+
if (host->mmc->card)
mmc_retune_needed(host->mmc);
}
@@ -248,7 +264,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
spin_unlock_irqrestore(&host->lock, flags);
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, true);
/* Ready for new calls */
host->mrq = NULL;
@@ -961,7 +977,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
tmio_mmc_power_off(host);
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, false);
host->set_clock(host, 0);
break;
@@ -1189,7 +1205,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
_host->sdcard_irq_mask_all = TMIO_MASK_ALL;
_host->set_clock(_host, 0);
- tmio_mmc_reset(_host);
+ tmio_mmc_reset(_host, false);
spin_lock_init(&_host->lock);
mutex_init(&_host->ios_lock);
@@ -1285,7 +1301,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_clk_enable(host);
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, false);
if (host->clk_cache)
host->set_clock(host, host->clk_cache);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 134e27328597..25bad4318305 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -112,6 +112,13 @@ static const struct of_device_id dataflash_dt_ids[] = {
MODULE_DEVICE_TABLE(of, dataflash_dt_ids);
#endif
+static const struct spi_device_id dataflash_spi_ids[] = {
+ { .name = "at45", },
+ { .name = "dataflash", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, dataflash_spi_ids);
+
/* ......................................................................... */
/*
@@ -936,6 +943,7 @@ static struct spi_driver dataflash_driver = {
.probe = dataflash_probe,
.remove = dataflash_remove,
+ .id_table = dataflash_spi_ids,
/* FIXME: investigate suspend and resume... */
};
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 6950a8764815..36e060386e59 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -270,7 +270,9 @@ static int powernv_flash_release(struct platform_device *pdev)
struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
/* All resources should be freed automatically */
- return mtd_device_unregister(&(data->mtd));
+ WARN_ON(mtd_device_unregister(&data->mtd));
+
+ return 0;
}
static const struct of_device_id powernv_flash_match[] = {
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 24073518587f..f58742486d3d 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -1045,13 +1045,9 @@ static int spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
- int ret, i;
+ int i;
dev = platform_get_drvdata(pdev);
- if (!dev) {
- dev_err(&pdev->dev, "dev is null\n");
- return -ENODEV;
- }
/* clean up for all nor flash */
for (i = 0; i < dev->num_flashes; i++) {
@@ -1060,9 +1056,7 @@ static int spear_smi_remove(struct platform_device *pdev)
continue;
/* clean up mtd stuff */
- ret = mtd_device_unregister(&flash->mtd);
- if (ret)
- dev_err(&pdev->dev, "error removing mtd\n");
+ WARN_ON(mtd_device_unregister(&flash->mtd));
}
clk_disable_unprepare(dev->clk);
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index d3377b10fc0f..54861d889c30 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2084,15 +2084,12 @@ static int stfsm_probe(struct platform_device *pdev)
* Configure READ/WRITE/ERASE sequences according to platform and
* device flags.
*/
- if (info->config) {
+ if (info->config)
ret = info->config(fsm);
- if (ret)
- goto err_clk_unprepare;
- } else {
+ else
ret = stfsm_prepare_rwe_seqs_default(fsm);
- if (ret)
- goto err_clk_unprepare;
- }
+ if (ret)
+ goto err_clk_unprepare;
fsm->mtd.name = info->name;
fsm->mtd.dev.parent = &pdev->dev;
@@ -2115,10 +2112,12 @@ static int stfsm_probe(struct platform_device *pdev)
(long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
- return mtd_device_register(&fsm->mtd, NULL, 0);
-
+ ret = mtd_device_register(&fsm->mtd, NULL, 0);
+ if (ret) {
err_clk_unprepare:
- clk_disable_unprepare(fsm->clk);
+ clk_disable_unprepare(fsm->clk);
+ }
+
return ret;
}
@@ -2126,9 +2125,11 @@ static int stfsm_remove(struct platform_device *pdev)
{
struct stfsm *fsm = platform_get_drvdata(pdev);
+ WARN_ON(mtd_device_unregister(&fsm->mtd));
+
clk_disable_unprepare(fsm->clk);
- return mtd_device_unregister(&fsm->mtd);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index a3439b791eeb..a6161ce340d4 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -233,16 +233,16 @@ static int am654_hbmc_remove(struct platform_device *pdev)
{
struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
- int ret;
- ret = hyperbus_unregister_device(&priv->hbdev);
+ hyperbus_unregister_device(&priv->hbdev);
+
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
- return ret;
+ return 0;
}
static const struct of_device_id am654_hbmc_dt_ids[] = {
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
index 2f9fc4e17d53..4d8047d43e48 100644
--- a/drivers/mtd/hyperbus/hyperbus-core.c
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -126,16 +126,12 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
}
EXPORT_SYMBOL_GPL(hyperbus_register_device);
-int hyperbus_unregister_device(struct hyperbus_device *hbdev)
+void hyperbus_unregister_device(struct hyperbus_device *hbdev)
{
- int ret = 0;
-
if (hbdev && hbdev->mtd) {
- ret = mtd_device_unregister(hbdev->mtd);
+ WARN_ON(mtd_device_unregister(hbdev->mtd));
map_destroy(hbdev->mtd);
}
-
- return ret;
}
EXPORT_SYMBOL_GPL(hyperbus_unregister_device);
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index 6e08ec1d4f09..d00d30243403 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -134,7 +134,7 @@ static int rpcif_hb_probe(struct platform_device *pdev)
error = rpcif_hw_init(&hyperbus->rpc, true);
if (error)
- return error;
+ goto out_disable_rpm;
hyperbus->hbdev.map.size = hyperbus->rpc.size;
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
@@ -145,19 +145,24 @@ static int rpcif_hb_probe(struct platform_device *pdev)
hyperbus->hbdev.np = of_get_next_child(pdev->dev.parent->of_node, NULL);
error = hyperbus_register_device(&hyperbus->hbdev);
if (error)
- rpcif_disable_rpm(&hyperbus->rpc);
+ goto out_disable_rpm;
+
+ return 0;
+out_disable_rpm:
+ rpcif_disable_rpm(&hyperbus->rpc);
return error;
}
static int rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
- int error = hyperbus_unregister_device(&hyperbus->hbdev);
+
+ hyperbus_unregister_device(&hyperbus->hbdev);
rpcif_disable_rpm(&hyperbus->rpc);
- return error;
+ return 0;
}
static struct platform_driver rpcif_platform_driver = {
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index 72f5c7b30079..367e2d906de0 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -478,7 +478,9 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
*/
static int lpddr2_nvm_remove(struct platform_device *pdev)
{
- return mtd_device_unregister(dev_get_drvdata(&pdev->dev));
+ WARN_ON(mtd_device_unregister(dev_get_drvdata(&pdev->dev)));
+
+ return 0;
}
/* Initialize platform_driver data structure for lpddr2_nvm */
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 4f63b8430c71..85eca6a192e6 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -66,18 +66,12 @@ static int physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
- int i, err = 0;
+ int i;
info = platform_get_drvdata(dev);
- if (!info) {
- err = -EINVAL;
- goto out;
- }
if (info->cmtd) {
- err = mtd_device_unregister(info->cmtd);
- if (err)
- goto out;
+ WARN_ON(mtd_device_unregister(info->cmtd));
if (info->cmtd != info->mtds[0])
mtd_concat_destroy(info->cmtd);
@@ -92,10 +86,9 @@ static int physmap_flash_remove(struct platform_device *dev)
if (physmap_data && physmap_data->exit)
physmap_data->exit(dev);
-out:
pm_runtime_put(&dev->dev);
pm_runtime_disable(&dev->dev);
- return err;
+ return 0;
}
static void physmap_set_vpp(struct map_info *map, int state)
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
index ad7cd9cfaee0..a1b8b7b25f88 100644
--- a/drivers/mtd/maps/physmap-versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -93,6 +93,7 @@ static int ap_flash_init(struct platform_device *pdev)
return -ENODEV;
}
ebi_base = of_iomap(ebi, 0);
+ of_node_put(ebi);
if (!ebi_base)
return -ENODEV;
@@ -207,6 +208,7 @@ int of_flash_probe_versatile(struct platform_device *pdev,
versatile_flashprot = (enum versatile_flashprot)devid->data;
rmap = syscon_node_to_regmap(sysnp);
+ of_node_put(sysnp);
if (IS_ERR(rmap))
return PTR_ERR(rmap);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f73172111465..60b222799871 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -29,7 +29,7 @@ static void blktrans_dev_release(struct kref *kref)
struct mtd_blktrans_dev *dev =
container_of(kref, struct mtd_blktrans_dev, ref);
- blk_cleanup_disk(dev->disk);
+ put_disk(dev->disk);
blk_mq_free_tag_set(dev->tag_set);
kfree(dev->tag_set);
list_del(&dev->list);
@@ -398,7 +398,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(new->disk);
+ put_disk(new->disk);
out_free_tag_set:
blk_mq_free_tag_set(new->tag_set);
out_kfree_tag_set:
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index d0f9c4b0285c..05860288a7af 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -615,21 +615,24 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
if (!usr_oob)
req.ooblen = 0;
+ req.len &= 0xffffffff;
+ req.ooblen &= 0xffffffff;
+
if (req.start + req.len > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
if (datbuf_len > 0) {
- datbuf = kmalloc(datbuf_len, GFP_KERNEL);
+ datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
if (!datbuf)
return -ENOMEM;
}
oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
if (oobbuf_len > 0) {
- oobbuf = kmalloc(oobbuf_len, GFP_KERNEL);
+ oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
if (!oobbuf) {
- kfree(datbuf);
+ kvfree(datbuf);
return -ENOMEM;
}
}
@@ -679,8 +682,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
usr_oob += ops.oobretlen;
}
- kfree(datbuf);
- kfree(oobbuf);
+ kvfree(datbuf);
+ kvfree(oobbuf);
return ret;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9eb0680db312..a9b8be9f40dc 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -546,6 +546,68 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
return 0;
}
+static void mtd_check_of_node(struct mtd_info *mtd)
+{
+ struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
+ const char *pname, *prefix = "partition-";
+ int plen, mtd_name_len, offset, prefix_len;
+ struct mtd_info *parent;
+ bool found = false;
+
+ /* Check if MTD already has a device node */
+ if (dev_of_node(&mtd->dev))
+ return;
+
+ /* Check if a partitions node exist */
+ if (!mtd_is_partition(mtd))
+ return;
+ parent = mtd->parent;
+ parent_dn = dev_of_node(&parent->dev);
+ if (!parent_dn)
+ return;
+
+ partitions = of_get_child_by_name(parent_dn, "partitions");
+ if (!partitions)
+ goto exit_parent;
+
+ prefix_len = strlen(prefix);
+ mtd_name_len = strlen(mtd->name);
+
+ /* Search if a partition is defined with the same name */
+ for_each_child_of_node(partitions, mtd_dn) {
+ offset = 0;
+
+ /* Skip partition with no/wrong prefix */
+ if (!of_node_name_prefix(mtd_dn, "partition-"))
+ continue;
+
+ /* Label have priority. Check that first */
+ if (of_property_read_string(mtd_dn, "label", &pname)) {
+ of_property_read_string(mtd_dn, "name", &pname);
+ offset = prefix_len;
+ }
+
+ plen = strlen(pname) - offset;
+ if (plen == mtd_name_len &&
+ !strncmp(mtd->name, pname + offset, plen)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ goto exit_partitions;
+
+ /* Set of_node only for nvmem */
+ if (of_device_is_compatible(mtd_dn, "nvmem-cells"))
+ mtd_set_of_node(mtd, mtd_dn);
+
+exit_partitions:
+ of_node_put(partitions);
+exit_parent:
+ of_node_put(parent_dn);
+}
+
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
@@ -658,6 +720,7 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
dev_set_drvdata(&mtd->dev, mtd);
+ mtd_check_of_node(mtd);
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
if (error)
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 53bd10738418..296fb16c8dc3 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -347,17 +347,17 @@ static int anfc_select_target(struct nand_chip *chip, int target)
/* Update clock frequency */
if (nfc->cur_clk != anand->clk) {
- clk_disable_unprepare(nfc->controller_clk);
- ret = clk_set_rate(nfc->controller_clk, anand->clk);
+ clk_disable_unprepare(nfc->bus_clk);
+ ret = clk_set_rate(nfc->bus_clk, anand->clk);
if (ret) {
dev_err(nfc->dev, "Failed to change clock rate\n");
return ret;
}
- ret = clk_prepare_enable(nfc->controller_clk);
+ ret = clk_prepare_enable(nfc->bus_clk);
if (ret) {
dev_err(nfc->dev,
- "Failed to re-enable the controller clock\n");
+ "Failed to re-enable the bus clock\n");
return ret;
}
@@ -1043,7 +1043,13 @@ static int anfc_setup_interface(struct nand_chip *chip, int target,
DQS_BUFF_SEL_OUT(dqs_mode);
}
- anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ if (nand_interface_is_sdr(conf)) {
+ anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ } else {
+ /* ONFI timings are defined in picoseconds */
+ anand->clk = div_u64((u64)NSEC_PER_SEC * 1000,
+ conf->timings.nvddr.tCK_min);
+ }
/*
* Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 6ef14442c71a..c9ac3baf68c0 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -2629,7 +2629,9 @@ static int atmel_nand_controller_remove(struct platform_device *pdev)
{
struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
- return nc->caps->ops->remove(nc);
+ WARN_ON(nc->caps->ops->remove(nc));
+
+ return 0;
}
static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index 9dbf031716a6..af119e376352 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -679,8 +679,10 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_set_master(pdev);
cafe = kzalloc(sizeof(*cafe), GFP_KERNEL);
- if (!cafe)
- return -ENOMEM;
+ if (!cafe) {
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
mtd = nand_to_mtd(&cafe->nand);
mtd->dev.parent = &pdev->dev;
@@ -801,6 +803,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_iounmap(pdev, cafe->mmio);
out_free_mtd:
kfree(cafe);
+ out_disable_device:
+ pci_disable_device(pdev);
out:
return err;
}
@@ -822,6 +826,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
pci_iounmap(pdev, cafe->mmio);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
+ pci_disable_device(pdev);
}
static const struct pci_device_id cafe_nand_tbl[] = {
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 889e40329956..93da23682d86 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -850,9 +850,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
unsigned int tRP_ps;
bool use_half_period;
int sample_delay_ps, sample_delay_factor;
- u16 busy_timeout_cycles;
+ unsigned int busy_timeout_cycles;
u8 wrn_dly_sel;
unsigned long clk_rate, min_rate;
+ u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */
@@ -885,7 +886,8 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
- busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
+ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index ac3be92872d0..829b76b303aa 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -1293,26 +1293,20 @@ meson_nfc_nand_chip_init(struct device *dev,
return 0;
}
-static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
{
struct meson_nfc_nand_chip *meson_chip;
struct mtd_info *mtd;
- int ret;
while (!list_empty(&nfc->chips)) {
meson_chip = list_first_entry(&nfc->chips,
struct meson_nfc_nand_chip, node);
mtd = nand_to_mtd(&meson_chip->nand);
- ret = mtd_device_unregister(mtd);
- if (ret)
- return ret;
+ WARN_ON(mtd_device_unregister(mtd));
- meson_nfc_free_buffer(&meson_chip->nand);
nand_cleanup(&meson_chip->nand);
list_del(&meson_chip->node);
}
-
- return 0;
}
static int meson_nfc_nand_chips_init(struct device *dev,
@@ -1445,16 +1439,11 @@ err_clk:
static int meson_nfc_remove(struct platform_device *pdev)
{
struct meson_nfc *nfc = platform_get_drvdata(pdev);
- int ret;
- ret = meson_nfc_nand_chip_cleanup(nfc);
- if (ret)
- return ret;
+ meson_nfc_nand_chip_cleanup(nfc);
meson_nfc_disable_clk(nfc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 58c32a11792e..4a9f2b6c772d 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -2278,16 +2278,14 @@ static int omap_nand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct omap_nand_info *info = mtd_to_omap(mtd);
- int ret;
rawnand_sw_bch_cleanup(nand_chip);
if (info->dma)
dma_release_channel(info->dma);
- ret = mtd_device_unregister(mtd);
- WARN_ON(ret);
+ WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(nand_chip);
- return ret;
+ return 0;
}
/* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 048b255faa76..8f80019a9f01 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -80,8 +80,10 @@
#define DISABLE_STATUS_AFTER_WRITE 4
#define CW_PER_PAGE 6
#define UD_SIZE_BYTES 9
+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
#define ECC_PARITY_SIZE_BYTES_RS 19
#define SPARE_SIZE_BYTES 23
+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
#define NUM_ADDR_CYCLES 27
#define STATUS_BFR_READ 30
#define SET_RD_MODE_AFTER_STATUS 31
@@ -102,6 +104,7 @@
#define ECC_MODE 4
#define ECC_PARITY_SIZE_BYTES_BCH 8
#define ECC_NUM_DATA_BYTES 16
+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
#define ECC_FORCE_CLK_OPEN 30
/* NAND_DEV_CMD1 bits */
@@ -238,6 +241,9 @@ nandc_set_reg(chip, reg, \
* @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ * @txn_done - completion for NAND transfer.
* @bam_ce_pos - the index in bam_ce which is available for next sgl
* @bam_ce_start - the index in bam_ce which marks the start position ce
* for current sgl. It will be used for size calculation
@@ -250,14 +256,14 @@ nandc_set_reg(chip, reg, \
* @rx_sgl_start - start index in data sgl for rx.
* @wait_second_completion - wait for second DMA desc completion before making
* the NAND transfer completion.
- * @txn_done - completion for NAND transfer.
- * @last_data_desc - last DMA desc in data channel (tx/rx).
- * @last_cmd_desc - last DMA desc in command channel.
*/
struct bam_transaction {
struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
u32 bam_ce_pos;
u32 bam_ce_start;
u32 cmd_sgl_pos;
@@ -267,25 +273,23 @@ struct bam_transaction {
u32 rx_sgl_pos;
u32 rx_sgl_start;
bool wait_second_completion;
- struct completion txn_done;
- struct dma_async_tx_descriptor *last_data_desc;
- struct dma_async_tx_descriptor *last_cmd_desc;
};
/*
* This data type corresponds to the nand dma descriptor
+ * @dma_desc - low level DMA engine descriptor
* @list - list for desc_info
- * @dir - DMA transfer direction
+ *
* @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
* ADM
* @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
* @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
- * @dma_desc - low level DMA engine descriptor
+ * @dir - DMA transfer direction
*/
struct desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
struct list_head node;
- enum dma_data_direction dir;
union {
struct scatterlist adm_sgl;
struct {
@@ -293,7 +297,7 @@ struct desc_info {
int sgl_cnt;
};
};
- struct dma_async_tx_descriptor *dma_desc;
+ enum dma_data_direction dir;
};
/*
@@ -337,52 +341,64 @@ struct nandc_regs {
/*
* NAND controller data struct
*
- * @controller: base controller structure
- * @host_list: list containing all the chips attached to the
- * controller
* @dev: parent device
+ *
* @base: MMIO base
- * @base_phys: physical base address of controller registers
- * @base_dma: dma base address of controller registers
+ *
* @core_clk: controller clock
* @aon_clk: another controller clock
*
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ *
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ *
+ * @controller: base controller structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ *
* @chan: dma channel
* @cmd_crci: ADM DMA CRCI for command flow control
* @data_crci: ADM DMA CRCI for data flow control
+ *
* @desc_list: DMA descriptor list (list of desc_infos)
*
* @data_buffer: our local DMA buffer for page read/writes,
* used when we can't use the buffer provided
* by upper layers directly
- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
- * functions
* @reg_read_buf: local buffer for reading back registers via DMA
+ *
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
* @reg_read_dma: contains dma address for register read buffer
- * @reg_read_pos: marker for data read in reg_read_buf
*
- * @regs: a contiguous chunk of memory for DMA register
- * writes. contains the register values to be
- * written to controller
- * @cmd1/vld: some fixed controller register values
- * @props: properties of current NAND controller,
- * initialized via DT match data
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
* @max_cwperpage: maximum QPIC codewords required. calculated
* from all connected NAND devices pagesize
+ *
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @cmd1/vld: some fixed controller register values
*/
struct qcom_nand_controller {
- struct nand_controller controller;
- struct list_head host_list;
-
struct device *dev;
void __iomem *base;
- phys_addr_t base_phys;
- dma_addr_t base_dma;
struct clk *core_clk;
struct clk *aon_clk;
+ struct nandc_regs *regs;
+ struct bam_transaction *bam_txn;
+
+ const struct qcom_nandc_props *props;
+
+ struct nand_controller controller;
+ struct list_head host_list;
+
union {
/* will be used only by QPIC for BAM DMA */
struct {
@@ -400,64 +416,89 @@ struct qcom_nand_controller {
};
struct list_head desc_list;
- struct bam_transaction *bam_txn;
u8 *data_buffer;
+ __le32 *reg_read_buf;
+
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+ dma_addr_t reg_read_dma;
+
int buf_size;
int buf_count;
int buf_start;
unsigned int max_cwperpage;
- __le32 *reg_read_buf;
- dma_addr_t reg_read_dma;
int reg_read_pos;
- struct nandc_regs *regs;
-
u32 cmd1, vld;
- const struct qcom_nandc_props *props;
+};
+
+/*
+ * NAND special boot partitions
+ *
+ * @page_offset: offset of the partition where spare data is not protected
+ * by ECC (value in pages)
+ * @page_offset: size of the partition where spare data is not protected
+ * by ECC (value in pages)
+ */
+struct qcom_nand_boot_partition {
+ u32 page_offset;
+ u32 page_size;
};
/*
* NAND chip structure
*
+ * @boot_partitions: array of boot partitions where offset and size of the
+ * boot partitions are stored
+ *
* @chip: base NAND chip structure
* @node: list node to add itself to host_list in
* qcom_nand_controller
*
+ * @nr_boot_partitions: count of the boot partitions where spare data is not
+ * protected by ECC
+ *
* @cs: chip select value for this chip
* @cw_size: the number of bytes in a single step/codeword
* of a page, consisting of all data, ecc, spare
* and reserved bytes
* @cw_data: the number of bytes within a codeword protected
* by ECC
- * @use_ecc: request the controller to use ECC for the
- * upcoming read/write
- * @bch_enabled: flag to tell whether BCH ECC mode is used
* @ecc_bytes_hw: ECC bytes used by controller hardware for this
* chip
- * @status: value to be returned if NAND_CMD_STATUS command
- * is executed
+ *
* @last_command: keeps track of last command on this chip. used
* for reading correct status
*
* @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
* ecc/non-ecc mode for the current nand flash
* device
+ *
+ * @status: value to be returned if NAND_CMD_STATUS command
+ * is executed
+ * @codeword_fixup: keep track of the current layout used by
+ * the driver for read/write operation.
+ * @use_ecc: request the controller to use ECC for the
+ * upcoming read/write
+ * @bch_enabled: flag to tell whether BCH ECC mode is used
*/
struct qcom_nand_host {
+ struct qcom_nand_boot_partition *boot_partitions;
+
struct nand_chip chip;
struct list_head node;
+ int nr_boot_partitions;
+
int cs;
int cw_size;
int cw_data;
- bool use_ecc;
- bool bch_enabled;
int ecc_bytes_hw;
int spare_bytes;
int bbm_size;
- u8 status;
+
int last_command;
u32 cfg0, cfg1;
@@ -466,23 +507,30 @@ struct qcom_nand_host {
u32 ecc_bch_cfg;
u32 clrflashstatus;
u32 clrreadstatus;
+
+ u8 status;
+ bool codeword_fixup;
+ bool use_ecc;
+ bool bch_enabled;
};
/*
* This data type corresponds to the NAND controller properties which varies
* among different NAND controllers.
* @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
* @is_bam - whether NAND controller is using BAM
* @is_qpic - whether NAND CTRL is part of qpic IP
* @qpic_v2 - flag to indicate QPIC IP version 2
- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
*/
struct qcom_nandc_props {
u32 ecc_modes;
+ u32 dev_cmd_reg_start;
bool is_bam;
bool is_qpic;
bool qpic_v2;
- u32 dev_cmd_reg_start;
+ bool use_codeword_fixup;
};
/* Frees the BAM transaction memory */
@@ -1701,7 +1749,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
- if (qcom_nandc_is_last_cw(ecc, cw)) {
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) * 4);
oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
@@ -1782,7 +1830,7 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
}
for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
- if (qcom_nandc_is_last_cw(ecc, cw)) {
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) * 4);
oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
} else {
@@ -1940,7 +1988,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
@@ -2037,6 +2085,69 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
return ret;
}
+static bool qcom_nandc_is_boot_partition(struct qcom_nand_host *host, int page)
+{
+ struct qcom_nand_boot_partition *boot_partition;
+ u32 start, end;
+ int i;
+
+ /*
+ * Since the frequent access will be to the non-boot partitions like rootfs,
+ * optimize the page check by:
+ *
+ * 1. Checking if the page lies after the last boot partition.
+ * 2. Checking from the boot partition end.
+ */
+
+ /* First check the last boot partition */
+ boot_partition = &host->boot_partitions[host->nr_boot_partitions - 1];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ /* Page is after the last boot partition end. This is NOT a boot partition */
+ if (page > end)
+ return false;
+
+ /* Actually check if it's a boot partition */
+ if (page < end && page >= start)
+ return true;
+
+ /* Check the other boot partitions starting from the second-last partition */
+ for (i = host->nr_boot_partitions - 2; i >= 0; i--) {
+ boot_partition = &host->boot_partitions[i];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ if (page < end && page >= start)
+ return true;
+ }
+
+ return false;
+}
+
+static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
+{
+ bool codeword_fixup = qcom_nandc_is_boot_partition(host, page);
+
+ /* Skip conf write if we are already in the correct mode */
+ if (codeword_fixup == host->codeword_fixup)
+ return;
+
+ host->codeword_fixup = codeword_fixup;
+
+ host->cw_data = codeword_fixup ? 512 : 516;
+ host->spare_bytes = host->cw_size - host->ecc_bytes_hw -
+ host->bbm_size - host->cw_data;
+
+ host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
+ host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
+ host->cw_data << UD_SIZE_BYTES;
+
+ host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
+ host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
+ host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
+}
+
/* implements ecc->read_page() */
static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
@@ -2045,6 +2156,9 @@ static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
u8 *data_buf, *oob_buf = NULL;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -2064,6 +2178,9 @@ static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int cw, ret;
u8 *data_buf = buf, *oob_buf = chip->oob_poi;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
for (cw = 0; cw < ecc->steps; cw++) {
ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
page, cw);
@@ -2084,6 +2201,9 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2104,6 +2224,9 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
u8 *data_buf, *oob_buf;
int i, ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
@@ -2119,7 +2242,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
@@ -2176,6 +2299,9 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
u8 *data_buf, *oob_buf;
int i, ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2194,7 +2320,7 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) << 2);
oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
@@ -2254,6 +2380,9 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
int data_size, oob_size;
int ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
host->use_ecc = true;
clear_bam_transaction(nandc);
@@ -2915,6 +3044,74 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
+static int qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_boot_partition *boot_partition;
+ struct device *dev = nandc->dev;
+ int partitions_count, i, j, ret;
+
+ if (!of_find_property(dn, "qcom,boot-partitions", NULL))
+ return 0;
+
+ partitions_count = of_property_count_u32_elems(dn, "qcom,boot-partitions");
+ if (partitions_count <= 0) {
+ dev_err(dev, "Error parsing boot partition\n");
+ return partitions_count ? partitions_count : -EINVAL;
+ }
+
+ host->nr_boot_partitions = partitions_count / 2;
+ host->boot_partitions = devm_kcalloc(dev, host->nr_boot_partitions,
+ sizeof(*host->boot_partitions), GFP_KERNEL);
+ if (!host->boot_partitions) {
+ host->nr_boot_partitions = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0, j = 0; i < host->nr_boot_partitions; i++, j += 2) {
+ boot_partition = &host->boot_partitions[i];
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j,
+ &boot_partition->page_offset);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition offset at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_offset % mtd->writesize) {
+ dev_err(dev, "Boot partition offset not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert offset to nand pages */
+ boot_partition->page_offset /= mtd->writesize;
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j + 1,
+ &boot_partition->page_size);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition size at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_size % mtd->writesize) {
+ dev_err(dev, "Boot partition size not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert size to nand pages */
+ boot_partition->page_size /= mtd->writesize;
+ }
+
+ return 0;
+}
+
static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
struct qcom_nand_host *host,
struct device_node *dn)
@@ -2972,6 +3169,14 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
if (ret)
nand_cleanup(chip);
+ if (nandc->props->use_codeword_fixup) {
+ ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+ }
+
return ret;
}
@@ -3137,6 +3342,7 @@ static int qcom_nandc_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq806x_nandc_props = {
.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
.is_bam = false,
+ .use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
};
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index ba24cb36d0b9..b2b42dd1a2de 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -52,7 +52,7 @@ static const struct mtd_ooblayout_ops oob_sm_ops = {
.free = oob_sm_ooblayout_free,
};
-/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* NOTE: This layout is not compatabable with SmartMedia, */
/* because the 256 byte devices have page depenent oob layout */
/* However it does preserve the bad block markers */
/* If you use smftl, it will bypass this and work correctly */
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index b36e5260ae27..e12f9f580a15 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -1223,11 +1223,8 @@ static int tegra_nand_remove(struct platform_device *pdev)
struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
struct nand_chip *chip = ctrl->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
- int ret;
- ret = mtd_device_unregister(mtd);
- if (ret)
- return ret;
+ WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(chip);
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 80dabe6ff0f3..b520fe634041 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o
+spinand-objs := core.o ato.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
new file mode 100644
index 000000000000..82b377c06812
--- /dev/null
+++ b/drivers/mtd/nand/spi/ato.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Aidan MacDonald
+ *
+ * Author: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+
+#define SPINAND_MFR_ATO 0x9b
+
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+
+static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+ return 0;
+}
+
+static int ato25d1ga_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = (16 * section);
+ region->length = 8;
+ } else {
+ /* first byte of section 0 is reserved for the BBM */
+ region->offset = 1;
+ region->length = 7;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ato25d1ga_ooblayout = {
+ .ecc = ato25d1ga_ooblayout_ecc,
+ .free = ato25d1ga_ooblayout_free,
+};
+
+
+static const struct spinand_info ato_spinand_table[] = {
+ SPINAND_INFO("ATO25D1GA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x12),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&ato25d1ga_ooblayout, NULL)),
+};
+
+static const struct spinand_manufacturer_ops ato_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer ato_spinand_manufacturer = {
+ .id = SPINAND_MFR_ATO,
+ .name = "ATO",
+ .chips = ato_spinand_table,
+ .nchips = ARRAY_SIZE(ato_spinand_table),
+ .ops = &ato_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index d5b685d1605e..9d73910a7ae8 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -927,6 +927,7 @@ static const struct nand_ops spinand_ops = {
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &ato_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index 23763d16e4f9..b43df73927a0 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -186,3 +186,12 @@ config MTD_QCOMSMEM_PARTS
help
This provides support for parsing partitions from Shared Memory (SMEM)
for NAND and SPI flash on Qualcomm platforms.
+
+config MTD_SERCOMM_PARTS
+ tristate "Sercomm partition table parser"
+ depends on MTD && RALINK
+ help
+ This provides partitions table parser for devices with Sercomm
+ partition map. This partition table contains real partition
+ offsets, which may differ from device to device depending on the
+ number and location of bad blocks on NAND.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 2e98aa048278..2fcf0ab9e7da 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -10,6 +10,7 @@ ofpart-$(CONFIG_MTD_OF_PARTS_LINKSYS_NS)+= ofpart_linksys_ns.o
obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+obj-$(CONFIG_MTD_SERCOMM_PARTS) += scpart.o
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_QCOMSMEM_PARTS) += qcomsmempart.o
diff --git a/drivers/mtd/parsers/ofpart_bcm4908.c b/drivers/mtd/parsers/ofpart_bcm4908.c
index 0eddef4c198e..bb072a0940e4 100644
--- a/drivers/mtd/parsers/ofpart_bcm4908.c
+++ b/drivers/mtd/parsers/ofpart_bcm4908.c
@@ -35,12 +35,15 @@ static long long bcm4908_partitions_fw_offset(void)
err = kstrtoul(s + len + 1, 0, &offset);
if (err) {
pr_err("failed to parse %s\n", s + len + 1);
+ of_node_put(root);
return err;
}
+ of_node_put(root);
return offset << 10;
}
+ of_node_put(root);
return -ENOENT;
}
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index feb44a573d44..a16b42a88581 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -58,6 +58,7 @@ static void parse_redboot_of(struct mtd_info *master)
return;
ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
+ of_node_put(npart);
if (ret)
return;
diff --git a/drivers/mtd/parsers/scpart.c b/drivers/mtd/parsers/scpart.c
new file mode 100644
index 000000000000..02601bb33de4
--- /dev/null
+++ b/drivers/mtd/parsers/scpart.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * drivers/mtd/scpart.c: Sercomm Partition Parser
+ *
+ * Copyright (C) 2018 NOGUCHI Hiroshi
+ * Copyright (C) 2022 Mikhail Zhilkin
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+
+#define MOD_NAME "scpart"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) MOD_NAME ": " fmt
+
+#define ID_ALREADY_FOUND 0xffffffffUL
+
+#define MAP_OFFS_IN_BLK 0x800
+#define MAP_MIRROR_NUM 2
+
+static const char sc_part_magic[] = {
+ 'S', 'C', 'F', 'L', 'M', 'A', 'P', 'O', 'K', '\0',
+};
+#define PART_MAGIC_LEN sizeof(sc_part_magic)
+
+/* assumes that all fields are set by CPU native endian */
+struct sc_part_desc {
+ uint32_t part_id;
+ uint32_t part_offs;
+ uint32_t part_bytes;
+};
+
+static uint32_t scpart_desc_is_valid(struct sc_part_desc *pdesc)
+{
+ return ((pdesc->part_id != 0xffffffffUL) &&
+ (pdesc->part_offs != 0xffffffffUL) &&
+ (pdesc->part_bytes != 0xffffffffUL));
+}
+
+static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
+ struct sc_part_desc **ppdesc)
+{
+ int cnt = 0;
+ int res = 0;
+ int res2;
+ loff_t offs;
+ size_t retlen;
+ struct sc_part_desc *pdesc = NULL;
+ struct sc_part_desc *tmpdesc;
+ uint8_t *buf;
+
+ buf = kzalloc(master->erasesize, GFP_KERNEL);
+ if (!buf) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ res2 = mtd_read(master, partmap_offs, master->erasesize, &retlen, buf);
+ if (res2 || retlen != master->erasesize) {
+ res = -EIO;
+ goto free;
+ }
+
+ for (offs = MAP_OFFS_IN_BLK;
+ offs < master->erasesize - sizeof(*tmpdesc);
+ offs += sizeof(*tmpdesc)) {
+ tmpdesc = (struct sc_part_desc *)&buf[offs];
+ if (!scpart_desc_is_valid(tmpdesc))
+ break;
+ cnt++;
+ }
+
+ if (cnt > 0) {
+ int bytes = cnt * sizeof(*pdesc);
+
+ pdesc = kcalloc(cnt, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc) {
+ res = -ENOMEM;
+ goto free;
+ }
+ memcpy(pdesc, &(buf[MAP_OFFS_IN_BLK]), bytes);
+
+ *ppdesc = pdesc;
+ res = cnt;
+ }
+
+free:
+ kfree(buf);
+
+out:
+ return res;
+}
+
+static int scpart_find_partmap(struct mtd_info *master,
+ struct sc_part_desc **ppdesc)
+{
+ int magic_found = 0;
+ int res = 0;
+ int res2;
+ loff_t offs = 0;
+ size_t retlen;
+ uint8_t rdbuf[PART_MAGIC_LEN];
+
+ while ((magic_found < MAP_MIRROR_NUM) &&
+ (offs < master->size) &&
+ !mtd_block_isbad(master, offs)) {
+ res2 = mtd_read(master, offs, PART_MAGIC_LEN, &retlen, rdbuf);
+ if (res2 || retlen != PART_MAGIC_LEN) {
+ res = -EIO;
+ goto out;
+ }
+ if (!memcmp(rdbuf, sc_part_magic, PART_MAGIC_LEN)) {
+ pr_debug("Signature found at 0x%llx\n", offs);
+ magic_found++;
+ res = scpart_scan_partmap(master, offs, ppdesc);
+ if (res > 0)
+ goto out;
+ }
+ offs += master->erasesize;
+ }
+
+out:
+ if (res > 0)
+ pr_info("Valid 'SC PART MAP' (%d partitions) found at 0x%llx\n", res, offs);
+ else
+ pr_info("No valid 'SC PART MAP' was found\n");
+
+ return res;
+}
+
+static int scpart_parse(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ const char *partname;
+ int n;
+ int nr_scparts;
+ int nr_parts = 0;
+ int res = 0;
+ struct sc_part_desc *scpart_map = NULL;
+ struct mtd_partition *parts = NULL;
+ struct device_node *mtd_node;
+ struct device_node *ofpart_node;
+ struct device_node *pp;
+
+ mtd_node = mtd_get_of_node(master);
+ if (!mtd_node) {
+ res = -ENOENT;
+ goto out;
+ }
+
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ pr_info("%s: 'partitions' subnode not found on %pOF.\n",
+ master->name, mtd_node);
+ res = -ENOENT;
+ goto out;
+ }
+
+ nr_scparts = scpart_find_partmap(master, &scpart_map);
+ if (nr_scparts <= 0) {
+ pr_info("No any partitions was found in 'SC PART MAP'.\n");
+ res = -ENOENT;
+ goto free;
+ }
+
+ parts = kcalloc(of_get_child_count(ofpart_node), sizeof(*parts),
+ GFP_KERNEL);
+ if (!parts) {
+ res = -ENOMEM;
+ goto free;
+ }
+
+ for_each_child_of_node(ofpart_node, pp) {
+ u32 scpart_id;
+
+ if (of_property_read_u32(pp, "sercomm,scpart-id", &scpart_id))
+ continue;
+
+ for (n = 0 ; n < nr_scparts ; n++)
+ if ((scpart_map[n].part_id != ID_ALREADY_FOUND) &&
+ (scpart_id == scpart_map[n].part_id))
+ break;
+ if (n >= nr_scparts)
+ /* not match */
+ continue;
+
+ /* add the partition found in OF into MTD partition array */
+ parts[nr_parts].offset = scpart_map[n].part_offs;
+ parts[nr_parts].size = scpart_map[n].part_bytes;
+ parts[nr_parts].of_node = pp;
+
+ if (!of_property_read_string(pp, "label", &partname))
+ parts[nr_parts].name = partname;
+ if (of_property_read_bool(pp, "read-only"))
+ parts[nr_parts].mask_flags |= MTD_WRITEABLE;
+ if (of_property_read_bool(pp, "lock"))
+ parts[nr_parts].mask_flags |= MTD_POWERUP_LOCK;
+
+ /* mark as 'done' */
+ scpart_map[n].part_id = ID_ALREADY_FOUND;
+
+ nr_parts++;
+ }
+
+ if (nr_parts > 0) {
+ *pparts = parts;
+ res = nr_parts;
+ } else
+ pr_info("No partition in OF matches partition ID with 'SC PART MAP'.\n");
+
+ of_node_put(pp);
+
+free:
+ of_node_put(ofpart_node);
+ kfree(scpart_map);
+ if (res <= 0)
+ kfree(parts);
+
+out:
+ return res;
+}
+
+static const struct of_device_id scpart_parser_of_match_table[] = {
+ { .compatible = "sercomm,sc-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, scpart_parser_of_match_table);
+
+static struct mtd_part_parser scpart_parser = {
+ .parse_fn = scpart_parse,
+ .name = "scpart",
+ .of_match_table = scpart_parser_of_match_table,
+};
+module_mtd_part_parser(scpart_parser);
+
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("scpart");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NOGUCHI Hiroshi <drvlabo@gmail.com>");
+MODULE_AUTHOR("Mikhail Zhilkin <csharper2005@gmail.com>");
+MODULE_DESCRIPTION("Sercomm partition parser");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 0cff2cda1b5a..7f955fade838 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1111,9 +1111,9 @@ static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
- mutex_lock(&ftl->mutex);
del_timer_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
+ mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
}
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 94a969185ceb..5070d72835ec 100644
--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -237,7 +237,7 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
reg = readl(host->regbase + FMC_CFG);
reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
reg |= FMC_CFG_OP_MODE_NORMAL;
- reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES
+ reg |= (nor->addr_nbytes == 4) ? SPI_NOR_ADDR_MODE_4BYTES
: SPI_NOR_ADDR_MODE_3BYTES;
writel(reg, host->regbase + FMC_CFG);
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 9032b9ab2eaf..ab3990e6ac25 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -203,7 +203,7 @@ static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
SPIFI_CMD_DATALEN(len) |
SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->program_opcode) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
for (i = 0; i < len; i++)
@@ -230,7 +230,7 @@ static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->erase_opcode) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
return nxp_spifi_wait_for_cmd(spifi);
@@ -252,12 +252,12 @@ static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
}
/* Memory mode supports address length between 1 and 4 */
- if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4)
+ if (spifi->nor.addr_nbytes < 1 || spifi->nor.addr_nbytes > 4)
return -EINVAL;
spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
return 0;
}
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 502967c76c5f..f2c64006f8d7 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -38,7 +38,7 @@
*/
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
-#define SPI_NOR_MAX_ADDR_WIDTH 4
+#define SPI_NOR_MAX_ADDR_NBYTES 4
#define SPI_NOR_SRST_SLEEP_MIN 200
#define SPI_NOR_SRST_SLEEP_MAX 400
@@ -177,7 +177,7 @@ int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
{
- if (spi_nor_protocol_is_dtr(nor->write_proto))
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
return -EOPNOTSUPP;
return nor->controller_ops->erase(nor, offs);
@@ -198,7 +198,7 @@ static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, from, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(len, buf, 0));
bool usebouncebuf;
@@ -262,7 +262,7 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(len, buf, 0));
ssize_t nbytes;
@@ -972,7 +972,7 @@ static int spi_nor_erase_chip(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
- spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
@@ -1113,9 +1113,9 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
if (nor->spimem) {
struct spi_mem_op op =
SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
- nor->addr_width, addr);
+ nor->addr_nbytes, addr);
- spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
return spi_mem_exec_op(nor->spimem, &op);
} else if (nor->controller_ops->erase) {
@@ -1126,13 +1126,13 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
* Default implementation, if driver doesn't have a specialized HW
* control
*/
- for (i = nor->addr_width - 1; i >= 0; i--) {
+ for (i = nor->addr_nbytes - 1; i >= 0; i--) {
nor->bouncebuf[i] = addr & 0xff;
addr >>= 8;
}
return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
- nor->bouncebuf, nor->addr_width);
+ nor->bouncebuf, nor->addr_nbytes);
}
/**
@@ -2249,43 +2249,43 @@ static int spi_nor_default_setup(struct spi_nor *nor,
return 0;
}
-static int spi_nor_set_addr_width(struct spi_nor *nor)
+static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
{
- if (nor->addr_width) {
- /* already configured from SFDP */
+ if (nor->params->addr_nbytes) {
+ nor->addr_nbytes = nor->params->addr_nbytes;
} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
/*
* In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
- * in this protocol an odd address width cannot be used because
+ * in this protocol an odd addr_nbytes cannot be used because
* then the address phase would only span a cycle and a half.
* Half a cycle would be left over. We would then have to start
* the dummy phase in the middle of a cycle and so too the data
* phase, and we will end the transaction with half a cycle left
* over.
*
- * Force all 8D-8D-8D flashes to use an address width of 4 to
+ * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
* avoid this situation.
*/
- nor->addr_width = 4;
- } else if (nor->info->addr_width) {
- nor->addr_width = nor->info->addr_width;
+ nor->addr_nbytes = 4;
+ } else if (nor->info->addr_nbytes) {
+ nor->addr_nbytes = nor->info->addr_nbytes;
} else {
- nor->addr_width = 3;
+ nor->addr_nbytes = 3;
}
- if (nor->addr_width == 3 && nor->params->size > 0x1000000) {
+ if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
+ nor->addr_nbytes = 4;
}
- if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_dbg(nor->dev, "address width is too large: %u\n",
- nor->addr_width);
+ if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
+ dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
+ nor->addr_nbytes);
return -EINVAL;
}
/* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
!(nor->flags & SNOR_F_HAS_4BAIT))
spi_nor_set_4byte_opcodes(nor);
@@ -2304,7 +2304,7 @@ static int spi_nor_setup(struct spi_nor *nor,
if (ret)
return ret;
- return spi_nor_set_addr_width(nor);
+ return spi_nor_set_addr_nbytes(nor);
}
/**
@@ -2382,12 +2382,7 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
*/
erase_mask = 0;
i = 0;
- if (no_sfdp_flags & SECT_4K_PMC) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K_PMC);
- i++;
- } else if (no_sfdp_flags & SECT_4K) {
+ if (no_sfdp_flags & SECT_4K) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K);
@@ -2497,7 +2492,6 @@ static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
if (spi_nor_parse_sfdp(nor)) {
memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
- nor->addr_width = 0;
nor->flags &= ~SNOR_F_4B_OPCODES;
}
}
@@ -2718,7 +2712,7 @@ static int spi_nor_init(struct spi_nor *nor)
nor->flags & SNOR_F_SWP_IS_VOLATILE))
spi_nor_try_unlock_all(nor);
- if (nor->addr_width == 4 &&
+ if (nor->addr_nbytes == 4 &&
nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
!(nor->flags & SNOR_F_4B_OPCODES)) {
/*
@@ -2730,7 +2724,7 @@ static int spi_nor_init(struct spi_nor *nor)
*/
WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
"enabling reset hack; may not recover from unexpected reboots\n");
- nor->params->set_4byte_addr_mode(nor, true);
+ return nor->params->set_4byte_addr_mode(nor, true);
}
return 0;
@@ -2845,7 +2839,7 @@ static void spi_nor_put_device(struct mtd_info *mtd)
void spi_nor_restore(struct spi_nor *nor)
{
/* restore the addressing mode */
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
nor->flags & SNOR_F_BROKEN_RESET)
nor->params->set_4byte_addr_mode(nor, false);
@@ -2989,7 +2983,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
* - select op codes for (Fast) Read, Page Program and Sector Erase.
* - set the number of dummy cycles (mode cycles + wait states).
* - set the SPI protocols for register and memory accesses.
- * - set the address width.
+ * - set the number of address bytes.
*/
ret = spi_nor_setup(nor, hwcaps);
if (ret)
@@ -3030,7 +3024,7 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(0, NULL, 0)),
.offset = 0,
@@ -3061,7 +3055,7 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 3f841ec36e56..85b0cf254e97 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -84,9 +84,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_width, addr) \
+#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_nbytes, addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
- SPI_MEM_OP_ADDR(addr_width, addr, 0), \
+ SPI_MEM_OP_ADDR(addr_nbytes, addr, 0), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
@@ -340,6 +340,11 @@ struct spi_nor_otp {
* @writesize Minimal writable flash unit size. Defaults to 1. Set to
* ECC unit size for ECC-ed flashes.
* @page_size: the page size of the SPI NOR flash memory.
+ * @addr_nbytes: number of address bytes to send.
+ * @addr_mode_nbytes: number of address bytes of current address mode. Useful
+ * when the flash operates with 4B opcodes but needs the
+ * internal address mode for opcodes that don't have a 4B
+ * opcode correspondent.
* @rdsr_dummy: dummy cycles needed for Read Status Register command
* in octal DTR mode.
* @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register
@@ -372,6 +377,8 @@ struct spi_nor_flash_parameter {
u64 size;
u32 writesize;
u32 page_size;
+ u8 addr_nbytes;
+ u8 addr_mode_nbytes;
u8 rdsr_dummy;
u8 rdsr_addr_nbytes;
@@ -429,7 +436,7 @@ struct spi_nor_fixups {
* isn't necessarily called a "sector" by the vendor.
* @n_sectors: the number of sectors.
* @page_size: the flash's page size.
- * @addr_width: the flash's address width.
+ * @addr_nbytes: number of address bytes to send.
*
* @parse_sfdp: true when flash supports SFDP tables. The false value has no
* meaning. If one wants to skip the SFDP tables, one should
@@ -457,7 +464,6 @@ struct spi_nor_fixups {
* flags are used together with the SPI_NOR_SKIP_SFDP flag.
* SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables.
* SECT_4K: SPINOR_OP_BE_4K works uniformly.
- * SECT_4K_PMC: SPINOR_OP_BE_4K_PMC works uniformly.
* SPI_NOR_DUAL_READ: flash supports Dual Read.
* SPI_NOR_QUAD_READ: flash supports Quad Read.
* SPI_NOR_OCTAL_READ: flash supports Octal Read.
@@ -488,7 +494,7 @@ struct flash_info {
unsigned sector_size;
u16 n_sectors;
u16 page_size;
- u16 addr_width;
+ u8 addr_nbytes;
bool parse_sfdp;
u16 flags;
@@ -505,7 +511,6 @@ struct flash_info {
u8 no_sfdp_flags;
#define SPI_NOR_SKIP_SFDP BIT(0)
#define SECT_4K BIT(1)
-#define SECT_4K_PMC BIT(2)
#define SPI_NOR_DUAL_READ BIT(3)
#define SPI_NOR_QUAD_READ BIT(4)
#define SPI_NOR_OCTAL_READ BIT(5)
@@ -550,11 +555,11 @@ struct flash_info {
.n_sectors = (_n_sectors), \
.page_size = 256, \
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
- .addr_width = (_addr_width), \
+ .addr_nbytes = (_addr_nbytes), \
.flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
#define OTP_INFO(_len, _n_regions, _base, _offset) \
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index eaf84f7a0676..df76cb5de3f9 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -86,7 +86,7 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
seq_printf(s, "size\t\t%s\n", buf);
seq_printf(s, "write size\t%u\n", params->writesize);
seq_printf(s, "page size\t%u\n", params->page_size);
- seq_printf(s, "address width\t%u\n", nor->addr_width);
+ seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes);
seq_puts(s, "flags\t\t");
spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names));
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
index 79e2408f4998..fcc3b0e7cda9 100644
--- a/drivers/mtd/spi-nor/esmt.c
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -13,7 +13,7 @@ static const struct flash_info esmt_nor_parts[] = {
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K) },
- { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64)
+ { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index c012bc2486e1..89a66a19d754 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -14,13 +14,13 @@ is25lp256_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_bfpt *bfpt)
{
/*
- * IS25LP256 supports 4B opcodes, but the BFPT advertises a
- * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
- * Overwrite the address width advertised by the BFPT.
+ * IS25LP256 supports 4B opcodes, but the BFPT advertises
+ * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY.
+ * Overwrite the number of address bytes advertised by the BFPT.
*/
if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
- nor->addr_width = 4;
+ nor->params->addr_nbytes = 4;
return 0;
}
@@ -29,6 +29,21 @@ static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
+static void pm25lv_nor_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ int i;
+
+ /* The PM25LV series has a different 4k sector erase opcode */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (map->erase_type[i].size == 4096)
+ map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
+}
+
+static const struct spi_nor_fixups pm25lv_nor_fixups = {
+ .late_init = pm25lv_nor_late_init,
+};
+
static const struct flash_info issi_nor_parts[] = {
/* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
@@ -62,9 +77,13 @@ static const struct flash_info issi_nor_parts[] = {
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K_PMC) },
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K_PMC) },
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index a96f74e0f568..3c9681a3f7a3 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -399,8 +399,16 @@ static int micron_st_nor_ready(struct spi_nor *nor)
return sr_ready;
ret = micron_st_nor_read_fsr(nor, nor->bouncebuf);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * Some controllers, such as Intel SPI, do not support low
+ * level operations such as reading the flag status
+ * register. They only expose small amount of high level
+ * operations to the software. If this is the case we use
+ * only the status register value.
+ */
+ return ret == -EOPNOTSUPP ? sr_ready : ret;
+ }
if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
if (nor->bouncebuf[0] & FSR_E_ERR)
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index fa63d8571218..00ab0d2d6d2f 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -35,13 +35,13 @@
*/
int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
{
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
struct spi_mem_dirmap_desc *rdesc;
enum spi_nor_protocol read_proto;
int ret;
read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_proto = nor->read_proto;
rdesc = nor->dirmap.rdesc;
@@ -54,7 +54,7 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
ret = spi_nor_read_data(nor, addr, len, buf);
nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_proto = read_proto;
nor->dirmap.rdesc = rdesc;
@@ -85,11 +85,11 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
{
enum spi_nor_protocol write_proto;
struct spi_mem_dirmap_desc *wdesc;
- u8 addr_width, program_opcode;
+ u8 addr_nbytes, program_opcode;
int ret, written;
program_opcode = nor->program_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
write_proto = nor->write_proto;
wdesc = nor->dirmap.wdesc;
@@ -113,7 +113,7 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
out:
nor->program_opcode = program_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->write_proto = write_proto;
nor->dirmap.wdesc = wdesc;
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index a5211543d30d..2257f1b4c2e2 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -134,7 +134,7 @@ struct sfdp_4bait {
/**
* spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
- * addr_width and read_dummy members of the struct spi_nor
+ * addr_nbytes and read_dummy members of the struct spi_nor
* should be previously
* set.
* @nor: pointer to a 'struct spi_nor'
@@ -178,21 +178,21 @@ static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
size_t len, void *buf)
{
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
int ret;
read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
nor->read_opcode = SPINOR_OP_RDSFDP;
- nor->addr_width = 3;
+ nor->addr_nbytes = 3;
nor->read_dummy = 8;
ret = spi_nor_read_raw(nor, addr, len, buf);
nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
return ret;
@@ -462,11 +462,13 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
- nor->addr_width = 3;
+ params->addr_nbytes = 3;
+ params->addr_mode_nbytes = 3;
break;
case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
- nor->addr_width = 4;
+ params->addr_nbytes = 4;
+ params->addr_mode_nbytes = 4;
break;
default:
@@ -637,12 +639,12 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
}
/**
- * spi_nor_smpt_addr_width() - return the address width used in the
+ * spi_nor_smpt_addr_nbytes() - return the number of address bytes used in the
* configuration detection command.
* @nor: pointer to a 'struct spi_nor'
* @settings: configuration detection command descriptor, dword1
*/
-static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
+static u8 spi_nor_smpt_addr_nbytes(const struct spi_nor *nor, const u32 settings)
{
switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
case SMPT_CMD_ADDRESS_LEN_0:
@@ -653,7 +655,7 @@ static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
return 4;
case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
default:
- return nor->addr_width;
+ return nor->params->addr_mode_nbytes;
}
}
@@ -690,7 +692,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
u32 addr;
int err;
u8 i;
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
u8 read_data_mask, map_id;
/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
@@ -698,7 +700,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
if (!buf)
return ERR_PTR(-ENOMEM);
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_opcode = nor->read_opcode;
@@ -709,7 +711,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
break;
read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
- nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
+ nor->addr_nbytes = spi_nor_smpt_addr_nbytes(nor, smpt[i]);
nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
addr = smpt[i + 1];
@@ -756,7 +758,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
/* fall through */
out:
kfree(buf);
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_opcode = read_opcode;
return ret;
@@ -1044,7 +1046,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
/*
* We need at least one 4-byte op code per read, program and erase
* operation; the .read(), .write() and .erase() hooks share the
- * nor->addr_width value.
+ * nor->addr_nbytes value.
*/
if (!read_hwcaps || !pp_hwcaps || !erase_mask)
goto out;
@@ -1098,7 +1100,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
* Spansion memory. However this quirk is no longer needed with new
* SFDP compliant memories.
*/
- nor->addr_width = 4;
+ params->addr_nbytes = 4;
nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
/* fall through */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 43cd6cd92537..0150049007be 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -14,6 +14,8 @@
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
+#define SPINOR_REG_CYPRESS_CFR1V 0x00800002
+#define SPINOR_REG_CYPRESS_CFR1V_QUAD_EN BIT(1) /* Quad Enable */
#define SPINOR_REG_CYPRESS_CFR2V 0x00800003
#define SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24 0xb
#define SPINOR_REG_CYPRESS_CFR3V 0x00800004
@@ -114,6 +116,150 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
}
/**
+ * cypress_nor_quad_enable_volatile() - enable Quad I/O mode in volatile
+ * register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * It is recommended to update volatile registers in the field application due
+ * to a risk of the non-volatile registers corruption by power interrupt. This
+ * function sets Quad Enable bit in CFR1 volatile. If users set the Quad Enable
+ * bit in the CFR1 non-volatile in advance (typically by a Flash programmer
+ * before mounting Flash on PCB), the Quad Enable bit in the CFR1 volatile is
+ * also set during Flash power-up.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+ u8 cfr1v_written;
+ int ret;
+
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V,
+ nor->bouncebuf);
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1V_QUAD_EN)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1V_QUAD_EN;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V, 1,
+ nor->bouncebuf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ cfr1v_written = nor->bouncebuf[0];
+
+ /* Read back and check it. */
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V,
+ nor->bouncebuf);
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != cfr1v_written) {
+ dev_err(nor->dev, "CFR1: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * cypress_nor_set_page_size() - Set page size which corresponds to the flash
+ * configuration.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * The BFPT table advertises a 512B or 256B page size depending on part but the
+ * page size is actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_set_page_size(struct spi_nor *nor)
+{
+ struct spi_mem_op op =
+ CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V,
+ nor->bouncebuf);
+ int ret;
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
+ nor->params->page_size = 512;
+ else
+ nor->params->page_size = 256;
+
+ return 0;
+}
+
+static int
+s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /* Replace Quad Enable with volatile version */
+ nor->params->quad_enable = cypress_nor_quad_enable_volatile;
+
+ return cypress_nor_set_page_size(nor);
+}
+
+static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ struct spi_nor_erase_type *erase_type =
+ nor->params->erase_map.erase_type;
+ unsigned int i;
+
+ /*
+ * In some parts, 3byte erase opcodes are advertised by 4BAIT.
+ * Convert them to 4byte erase opcodes.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ switch (erase_type[i].opcode) {
+ case SPINOR_OP_SE:
+ erase_type[i].opcode = SPINOR_OP_SE_4B;
+ break;
+ case SPINOR_OP_BE_4K:
+ erase_type[i].opcode = SPINOR_OP_BE_4K_4B;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void s25hx_t_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /* Fast Read 4B requires mode cycles */
+ params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
+
+ /* The writesize should be ECC data unit size */
+ params->writesize = 16;
+}
+
+static struct spi_nor_fixups s25hx_t_fixups = {
+ .post_bfpt = s25hx_t_post_bfpt_fixup,
+ .post_sfdp = s25hx_t_post_sfdp_fixup,
+ .late_init = s25hx_t_late_init,
+};
+
+/**
* cypress_nor_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
@@ -167,28 +313,7 @@ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
- /*
- * The BFPT table advertises a 512B page size but the page size is
- * actually configurable (with the default being 256B). Read from
- * CFR3V[4] and set the correct size.
- */
- struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V,
- nor->bouncebuf);
- int ret;
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
- nor->params->page_size = 512;
- else
- nor->params->page_size = 256;
-
- return 0;
+ return cypress_nor_set_page_size(nor);
}
static const struct spi_nor_fixups s28hs512t_fixups = {
@@ -310,6 +435,22 @@ static const struct flash_info spansion_nor_parts[] = {
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
{ "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
index 1d2f5db047bd..5723157739fc 100644
--- a/drivers/mtd/spi-nor/xilinx.c
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -31,7 +31,7 @@
.sector_size = (8 * (_page_size)), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
- .addr_width = 3, \
+ .addr_nbytes = 3, \
.flags = SPI_NOR_NO_FR
/* Xilinx S3AN share MFR with Atmel SPI NOR */
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index a78fdf3b30f7..4cf67a2a0d04 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -467,7 +467,7 @@ out_destroy_wq:
out_remove_minor:
idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk:
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
out_free_tags:
blk_mq_free_tag_set(&dev->tag_set);
out_free_dev:
@@ -486,7 +486,7 @@ static void ubiblock_cleanup(struct ubiblock *dev)
destroy_workqueue(dev->wq);
/* Finally destroy the blk queue */
dev_info(disk_to_dev(dev->gd), "released");
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
blk_mq_free_tag_set(&dev->tag_set);
idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8c1eeb5a8db8..94c889802566 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -500,6 +500,8 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/can/Kconfig"
+
source "drivers/net/mctp/Kconfig"
source "drivers/net/mdio/Kconfig"
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index be2719a3ba70..9a247eb7679c 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -449,7 +449,7 @@ out:
dev_put(amt->dev);
}
-/* Non-existant group is created as INCLUDE {empty}:
+/* Non-existent group is created as INCLUDE {empty}:
*
* RFC 3376 - 5.1. Action on Change of Interface State
*
@@ -563,7 +563,7 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
ihv3->nsrcs = 0;
ihv3->resv = 0;
ihv3->suppress = false;
- ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
ihv3->csum = 0;
csum = &ihv3->csum;
csum_start = (void *)ihv3;
@@ -577,14 +577,14 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
return skb;
}
-static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
- bool validate)
+static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+ bool validate)
{
if (validate && amt->status >= status)
return;
netdev_dbg(amt->dev, "Update GW status %s -> %s",
status_str[amt->status], status_str[status]);
- amt->status = status;
+ WRITE_ONCE(amt->status, status);
}
static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
@@ -600,14 +600,6 @@ static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
tunnel->status = status;
}
-static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
- bool validate)
-{
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, status, validate);
- spin_unlock_bh(&amt->lock);
-}
-
static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
enum amt_status status, bool validate)
{
@@ -700,9 +692,7 @@ static void amt_send_discovery(struct amt_dev *amt)
if (unlikely(net_xmit_eval(err)))
amt->dev->stats.tx_errors++;
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
- spin_unlock_bh(&amt->lock);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
out:
rcu_read_unlock();
}
@@ -900,6 +890,28 @@ static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
}
#endif
+static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
+ struct sk_buff *skb)
+{
+ int index;
+
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events >= AMT_MAX_EVENTS) {
+ spin_unlock_bh(&amt->lock);
+ return 1;
+ }
+
+ index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
+ amt->events[index].event = event;
+ amt->events[index].skb = skb;
+ amt->nr_events++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ queue_work(amt_wq, &amt->event_wq);
+ spin_unlock_bh(&amt->lock);
+
+ return 0;
+}
+
static void amt_secret_work(struct work_struct *work)
{
struct amt_dev *amt = container_of(to_delayed_work(work),
@@ -913,58 +925,72 @@ static void amt_secret_work(struct work_struct *work)
msecs_to_jiffies(AMT_SECRET_TIMEOUT));
}
-static void amt_discovery_work(struct work_struct *work)
+static void amt_event_send_discovery(struct amt_dev *amt)
{
- struct amt_dev *amt = container_of(to_delayed_work(work),
- struct amt_dev,
- discovery_wq);
-
- spin_lock_bh(&amt->lock);
if (amt->status > AMT_STATUS_SENT_DISCOVERY)
goto out;
get_random_bytes(&amt->nonce, sizeof(__be32));
- spin_unlock_bh(&amt->lock);
amt_send_discovery(amt);
- spin_lock_bh(&amt->lock);
out:
mod_delayed_work(amt_wq, &amt->discovery_wq,
msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
- spin_unlock_bh(&amt->lock);
}
-static void amt_req_work(struct work_struct *work)
+static void amt_discovery_work(struct work_struct *work)
{
struct amt_dev *amt = container_of(to_delayed_work(work),
struct amt_dev,
- req_wq);
+ discovery_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
+ mod_delayed_work(amt_wq, &amt->discovery_wq,
+ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
+}
+
+static void amt_event_send_request(struct amt_dev *amt)
+{
u32 exp;
- spin_lock_bh(&amt->lock);
if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
goto out;
if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
netdev_dbg(amt->dev, "Gateway is not ready");
amt->qi = AMT_INIT_REQ_TIMEOUT;
- amt->ready4 = false;
- amt->ready6 = false;
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
amt->remote_ip = 0;
- __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
+ amt_update_gw_status(amt, AMT_STATUS_INIT, false);
amt->req_cnt = 0;
+ amt->nonce = 0;
goto out;
}
- spin_unlock_bh(&amt->lock);
+
+ if (!amt->req_cnt) {
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
+ get_random_bytes(&amt->nonce, sizeof(__be32));
+ }
amt_send_request(amt, false);
amt_send_request(amt, true);
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
amt->req_cnt++;
out:
exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
- spin_unlock_bh(&amt->lock);
+}
+
+static void amt_req_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ req_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
+ mod_delayed_work(amt_wq, &amt->req_wq,
+ msecs_to_jiffies(100));
}
static bool amt_send_membership_update(struct amt_dev *amt,
@@ -1220,7 +1246,8 @@ static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
/* Gateway only passes IGMP/MLD packets */
if (!report)
goto free;
- if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
+ if ((!v6 && !READ_ONCE(amt->ready4)) ||
+ (v6 && !READ_ONCE(amt->ready6)))
goto free;
if (amt_send_membership_update(amt, skb, v6))
goto free;
@@ -1373,11 +1400,11 @@ static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
int i;
if (!v6) {
- igmp_grec = (struct igmpv3_grec *)grec;
+ igmp_grec = grec;
nsrcs = ntohs(igmp_grec->grec_nsrcs);
} else {
#if IS_ENABLED(CONFIG_IPV6)
- mld_grec = (struct mld2_grec *)grec;
+ mld_grec = grec;
nsrcs = ntohs(mld_grec->grec_nsrcs);
#else
return;
@@ -1458,11 +1485,11 @@ static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
int i, j;
if (!v6) {
- igmp_grec = (struct igmpv3_grec *)grec;
+ igmp_grec = grec;
nsrcs = ntohs(igmp_grec->grec_nsrcs);
} else {
#if IS_ENABLED(CONFIG_IPV6)
- mld_grec = (struct mld2_grec *)grec;
+ mld_grec = grec;
nsrcs = ntohs(mld_grec->grec_nsrcs);
#else
return;
@@ -2236,6 +2263,10 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
ipv4_is_zeronet(amta->ip4))
return true;
+ if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
+ amt->nonce != amta->nonce)
+ return true;
+
amt->remote_ip = amta->ip4;
netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
mod_delayed_work(amt_wq, &amt->req_wq, 0);
@@ -2251,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
struct ethhdr *eth;
struct iphdr *iph;
+ if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
+ return true;
+
hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
if (!pskb_may_pull(skb, hdr_size))
return true;
@@ -2325,6 +2359,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
if (amtmq->reserved || amtmq->version)
return true;
+ if (amtmq->nonce != amt->nonce)
+ return true;
+
hdr_size -= sizeof(*eth);
if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
return true;
@@ -2339,6 +2376,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
iph = ip_hdr(skb);
if (iph->version == 4) {
+ if (READ_ONCE(amt->ready4))
+ return true;
+
if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
sizeof(*ihv3)))
return true;
@@ -2349,12 +2389,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
skb_reset_transport_header(skb);
skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
- spin_lock_bh(&amt->lock);
- amt->ready4 = true;
+ WRITE_ONCE(amt->ready4, true);
amt->mac = amtmq->response_mac;
amt->req_cnt = 0;
amt->qi = ihv3->qqic;
- spin_unlock_bh(&amt->lock);
skb->protocol = htons(ETH_P_IP);
eth->h_proto = htons(ETH_P_IP);
ip_eth_mc_map(iph->daddr, eth->h_dest);
@@ -2363,6 +2401,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
struct mld2_query *mld2q;
struct ipv6hdr *ip6h;
+ if (READ_ONCE(amt->ready6))
+ return true;
+
if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
sizeof(*mld2q)))
return true;
@@ -2374,12 +2415,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
skb_reset_transport_header(skb);
skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
- spin_lock_bh(&amt->lock);
- amt->ready6 = true;
+ WRITE_ONCE(amt->ready6, true);
amt->mac = amtmq->response_mac;
amt->req_cnt = 0;
amt->qi = mld2q->mld2q_qqic;
- spin_unlock_bh(&amt->lock);
skb->protocol = htons(ETH_P_IPV6);
eth->h_proto = htons(ETH_P_IPV6);
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
@@ -2392,12 +2431,14 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
+ local_bh_disable();
if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
dev_sw_netstats_rx_add(amt->dev, len);
} else {
amt->dev->stats.rx_dropped++;
}
+ local_bh_enable();
return false;
}
@@ -2638,7 +2679,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
if (tunnel->ip4 == iph->saddr)
goto send;
+ spin_lock_bh(&amt->lock);
if (amt->nr_tunnels >= amt->max_tunnels) {
+ spin_unlock_bh(&amt->lock);
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
return true;
}
@@ -2646,8 +2689,10 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
tunnel = kzalloc(sizeof(*tunnel) +
(sizeof(struct hlist_head) * amt->hash_buckets),
GFP_ATOMIC);
- if (!tunnel)
+ if (!tunnel) {
+ spin_unlock_bh(&amt->lock);
return true;
+ }
tunnel->source_port = udph->source;
tunnel->ip4 = iph->saddr;
@@ -2660,10 +2705,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
- spin_lock_bh(&amt->lock);
list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
tunnel->key = amt->key;
- amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
+ __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
amt->nr_tunnels++;
mod_delayed_work(amt_wq, &tunnel->gc_wq,
msecs_to_jiffies(amt_gmi(amt)));
@@ -2688,6 +2732,38 @@ send:
return false;
}
+static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
+{
+ int type = amt_parse_type(skb);
+ int err = 1;
+
+ if (type == -1)
+ goto drop;
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ switch (type) {
+ case AMT_MSG_ADVERTISEMENT:
+ err = amt_advertisement_handler(amt, skb);
+ break;
+ case AMT_MSG_MEMBERSHIP_QUERY:
+ err = amt_membership_query_handler(amt, skb);
+ if (!err)
+ return;
+ break;
+ default:
+ netdev_dbg(amt->dev, "Invalid type of Gateway\n");
+ break;
+ }
+ }
+drop:
+ if (err) {
+ amt->dev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ consume_skb(skb);
+ }
+}
+
static int amt_rcv(struct sock *sk, struct sk_buff *skb)
{
struct amt_dev *amt;
@@ -2719,8 +2795,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
err = true;
goto drop;
}
- err = amt_advertisement_handler(amt, skb);
- break;
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
+ goto drop;
+ }
+ goto out;
case AMT_MSG_MULTICAST_DATA:
if (iph->saddr != amt->remote_ip) {
netdev_dbg(amt->dev, "Invalid Relay IP\n");
@@ -2738,11 +2818,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
err = true;
goto drop;
}
- err = amt_membership_query_handler(amt, skb);
- if (err)
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
goto drop;
- else
- goto out;
+ }
+ goto out;
default:
err = true;
netdev_dbg(amt->dev, "Invalid type of Gateway\n");
@@ -2780,6 +2861,46 @@ out:
return 0;
}
+static void amt_event_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
+ struct sk_buff *skb;
+ u8 event;
+ int i;
+
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events == 0) {
+ spin_unlock_bh(&amt->lock);
+ return;
+ }
+ event = amt->events[amt->event_idx].event;
+ skb = amt->events[amt->event_idx].skb;
+ amt->events[amt->event_idx].event = AMT_EVENT_NONE;
+ amt->events[amt->event_idx].skb = NULL;
+ amt->nr_events--;
+ amt->event_idx++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ spin_unlock_bh(&amt->lock);
+
+ switch (event) {
+ case AMT_EVENT_RECEIVE:
+ amt_gw_rcv(amt, skb);
+ break;
+ case AMT_EVENT_SEND_DISCOVERY:
+ amt_event_send_discovery(amt);
+ break;
+ case AMT_EVENT_SEND_REQUEST:
+ amt_event_send_request(amt);
+ break;
+ default:
+ if (skb)
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
{
struct amt_dev *amt;
@@ -2804,7 +2925,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
break;
case AMT_MSG_REQUEST:
case AMT_MSG_MEMBERSHIP_UPDATE:
- if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
mod_delayed_work(amt_wq, &amt->req_wq, 0);
break;
default:
@@ -2867,6 +2988,8 @@ static int amt_dev_open(struct net_device *dev)
amt->ready4 = false;
amt->ready6 = false;
+ amt->event_idx = 0;
+ amt->nr_events = 0;
err = amt_socket_create(amt);
if (err)
@@ -2874,6 +2997,7 @@ static int amt_dev_open(struct net_device *dev)
amt->req_cnt = 0;
amt->remote_ip = 0;
+ amt->nonce = 0;
get_random_bytes(&amt->key, sizeof(siphash_key_t));
amt->status = AMT_STATUS_INIT;
@@ -2892,6 +3016,8 @@ static int amt_dev_stop(struct net_device *dev)
struct amt_dev *amt = netdev_priv(dev);
struct amt_tunnel_list *tunnel, *tmp;
struct socket *sock;
+ struct sk_buff *skb;
+ int i;
cancel_delayed_work_sync(&amt->req_wq);
cancel_delayed_work_sync(&amt->discovery_wq);
@@ -2904,6 +3030,15 @@ static int amt_dev_stop(struct net_device *dev)
if (sock)
udp_tunnel_sock_release(sock);
+ cancel_work_sync(&amt->event_wq);
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ skb = amt->events[i].skb;
+ if (skb)
+ kfree_skb(skb);
+ amt->events[i].event = AMT_EVENT_NONE;
+ amt->events[i].skb = NULL;
+ }
+
amt->ready4 = false;
amt->ready6 = false;
amt->req_cnt = 0;
@@ -3095,7 +3230,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
goto err;
}
if (amt->mode == AMT_MODE_RELAY) {
- amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
amt->qri = 10;
dev->needed_headroom = amt->stream_dev->needed_headroom +
AMT_RELAY_HLEN;
@@ -3146,8 +3281,8 @@ static int amt_newlink(struct net *net, struct net_device *dev,
INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
+ INIT_WORK(&amt->event_wq, amt_event_work);
INIT_LIST_HEAD(&amt->tunnel_list);
-
return 0;
err:
dev_put(amt->stream_dev);
@@ -3280,7 +3415,7 @@ static int __init amt_init(void)
if (err < 0)
goto unregister_notifier;
- amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
+ amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
if (!amt_wq) {
err = -ENOMEM;
goto rtnl_unregister;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 007d43e46dcb..b9dbad3a8af8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -653,6 +653,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb,
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct slave *tx_slave = NULL;
+ struct net_device *dev;
struct arp_pkt *arp;
if (!pskb_network_may_pull(skb, sizeof(*arp)))
@@ -665,6 +666,15 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
return NULL;
+ dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
+ if (dev) {
+ if (netif_is_bridge_master(dev)) {
+ dev_put(dev);
+ return NULL;
+ }
+ dev_put(dev);
+ }
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond, arp);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6ba4c83fe5fc..50e60843020c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1026,12 +1026,38 @@ out:
}
+/**
+ * bond_choose_primary_or_current - select the primary or high priority slave
+ * @bond: our bonding struct
+ *
+ * - Check if there is a primary link. If the primary link was set and is up,
+ * go on and do link reselection.
+ *
+ * - If primary link is not set or down, find the highest priority link.
+ * If the highest priority link is not current slave, set it as primary
+ * link and do link reselection.
+ */
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
+ struct slave *slave, *hprio = NULL;
+ struct list_head *iter;
if (!prim || prim->link != BOND_LINK_UP) {
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->link == BOND_LINK_UP) {
+ hprio = hprio ?: slave;
+ if (slave->prio > hprio->prio)
+ hprio = slave;
+ }
+ }
+
+ if (hprio && hprio != curr) {
+ prim = hprio;
+ goto link_reselect;
+ }
+
if (!curr || curr->link != BOND_LINK_UP)
return NULL;
return curr;
@@ -1042,6 +1068,7 @@ static struct slave *bond_choose_primary_or_current(struct bonding *bond)
return prim;
}
+link_reselect:
if (!curr || curr->link != BOND_LINK_UP)
return prim;
@@ -1974,6 +2001,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
+ new_slave->last_tx = new_slave->last_rx;
+
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -2857,8 +2886,11 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
return;
}
- if (bond_handle_vlan(slave, tags, skb))
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
arp_xmit(skb);
+ }
+
return;
}
@@ -3047,8 +3079,7 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_arp(bond, slave, sip, tip);
out_unlock:
@@ -3076,8 +3107,10 @@ static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
}
addrconf_addr_solict_mult(daddr, &mcaddr);
- if (bond_handle_vlan(slave, tags, skb))
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
ndisc_send_skb(skb, &mcaddr, saddr);
+ }
}
static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
@@ -3219,8 +3252,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_ns(bond, slave, saddr, daddr);
else if (curr_arp_slave &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_ns(bond, slave, saddr, daddr);
out:
@@ -3308,12 +3340,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* so it can wait
*/
bond_for_each_slave_rcu(bond, slave, iter) {
- unsigned long trans_start = dev_trans_start(slave->dev);
+ unsigned long last_tx = slave_last_tx(slave);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
- if (bond_time_in_interval(bond, trans_start, 1) &&
+ if (bond_time_in_interval(bond, last_tx, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
@@ -3338,7 +3370,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
@@ -3404,7 +3436,7 @@ re_arm:
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
- unsigned long trans_start, last_rx;
+ unsigned long last_tx, last_rx;
struct list_head *iter;
struct slave *slave;
int commit = 0;
@@ -3455,9 +3487,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* - (more than missed_max*delta since receive AND
* the bond has an IP address)
*/
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (bond_is_active_slave(slave) &&
- (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
@@ -3474,8 +3506,8 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
- unsigned long trans_start;
struct list_head *iter;
+ unsigned long last_tx;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
@@ -3484,10 +3516,10 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (rtnl_dereference(bond->curr_active_slave) != slave ||
(!rtnl_dereference(bond->curr_active_slave) &&
- bond_time_in_interval(bond, trans_start, 1))) {
+ bond_time_in_interval(bond, last_tx, 1))) {
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
@@ -5306,8 +5338,14 @@ static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *dev)
{
- if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
- return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
+
+ /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
+ * was true, if tls_device_down is running in parallel, but it's OK,
+ * because bond_get_slave_by_dev has a NULL check.
+ */
+ if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_netdev);
return bond_tx_drop(dev, skb);
}
#endif
@@ -6220,45 +6258,33 @@ int bond_create(struct net *net, const char *name)
{
struct net_device *bond_dev;
struct bonding *bond;
- struct alb_bond_info *bond_info;
- int res;
+ int res = -ENOMEM;
rtnl_lock();
bond_dev = alloc_netdev_mq(sizeof(struct bonding),
name ? name : "bond%d", NET_NAME_UNKNOWN,
bond_setup, tx_queues);
- if (!bond_dev) {
- pr_err("%s: eek! can't alloc netdev!\n", name);
- rtnl_unlock();
- return -ENOMEM;
- }
+ if (!bond_dev)
+ goto out;
- /*
- * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
- * It is set to 0 by default which is wrong.
- */
bond = netdev_priv(bond_dev);
- bond_info = &(BOND_ALB_INFO(bond));
- bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
-
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
if (res < 0) {
free_netdev(bond_dev);
- rtnl_unlock();
-
- return res;
+ goto out;
}
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
+out:
rtnl_unlock();
- return 0;
+ return res;
}
static int __net_init bond_net_init(struct net *net)
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 6f404f9c34e3..c2d080fc4fc4 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -27,6 +27,7 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
+ nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
0;
}
@@ -53,6 +54,9 @@ static int bond_fill_slave_info(struct sk_buff *skb,
if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
goto nla_put_failure;
+ if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio))
+ goto nla_put_failure;
+
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
const struct aggregator *agg;
const struct port *ad_port;
@@ -117,6 +121,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
+ [IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -151,7 +156,18 @@ static int bond_slave_changelink(struct net_device *bond_dev,
snprintf(queue_id_str, sizeof(queue_id_str), "%s:%u\n",
slave_dev->name, queue_id);
bond_opt_initstr(&newval, queue_id_str);
- err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval,
+ data[IFLA_BOND_SLAVE_QUEUE_ID], extack);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_SLAVE_PRIO]) {
+ int prio = nla_get_s32(data[IFLA_BOND_SLAVE_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, prio);
+ err = __bond_opt_set(bond, BOND_OPT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_PRIO], extack);
if (err)
return err;
}
@@ -175,7 +191,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int mode = nla_get_u8(data[IFLA_BOND_MODE]);
bond_opt_initval(&newval, mode);
- err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MODE, &newval,
+ data[IFLA_BOND_MODE], extack);
if (err)
return err;
}
@@ -192,7 +209,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
active_slave = slave_dev->name;
}
bond_opt_initstr(&newval, active_slave);
- err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval,
+ data[IFLA_BOND_ACTIVE_SLAVE], extack);
if (err)
return err;
}
@@ -200,7 +218,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
bond_opt_initval(&newval, miimon);
- err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval,
+ data[IFLA_BOND_MIIMON], extack);
if (err)
return err;
}
@@ -208,7 +227,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
bond_opt_initval(&newval, updelay);
- err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval,
+ data[IFLA_BOND_UPDELAY], extack);
if (err)
return err;
}
@@ -216,7 +236,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
bond_opt_initval(&newval, downdelay);
- err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval,
+ data[IFLA_BOND_DOWNDELAY], extack);
if (err)
return err;
}
@@ -224,7 +245,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int delay = nla_get_u32(data[IFLA_BOND_PEER_NOTIF_DELAY]);
bond_opt_initval(&newval, delay);
- err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval,
+ data[IFLA_BOND_PEER_NOTIF_DELAY], extack);
if (err)
return err;
}
@@ -232,7 +254,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
bond_opt_initval(&newval, use_carrier);
- err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval,
+ data[IFLA_BOND_USE_CARRIER], extack);
if (err)
return err;
}
@@ -240,12 +263,14 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
if (arp_interval && miimon) {
- netdev_err(bond->dev, "ARP monitoring cannot be used with MII monitoring\n");
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
+ "ARP monitoring cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_interval);
- err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval,
+ data[IFLA_BOND_ARP_INTERVAL], extack);
if (err)
return err;
}
@@ -264,7 +289,9 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
- &newval);
+ &newval,
+ data[IFLA_BOND_ARP_IP_TARGET],
+ extack);
if (err)
break;
i++;
@@ -292,7 +319,9 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
bond_opt_initextra(&newval, &addr6, sizeof(addr6));
err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS,
- &newval);
+ &newval,
+ data[IFLA_BOND_NS_IP6_TARGET],
+ extack);
if (err)
break;
i++;
@@ -307,12 +336,14 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
if (arp_validate && miimon) {
- netdev_err(bond->dev, "ARP validating cannot be used with MII monitoring\n");
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
+ "ARP validating cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_validate);
- err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval,
+ data[IFLA_BOND_ARP_VALIDATE], extack);
if (err)
return err;
}
@@ -321,7 +352,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
bond_opt_initval(&newval, arp_all_targets);
- err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval,
+ data[IFLA_BOND_ARP_ALL_TARGETS], extack);
if (err)
return err;
}
@@ -335,7 +367,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
primary = dev->name;
bond_opt_initstr(&newval, primary);
- err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval,
+ data[IFLA_BOND_PRIMARY], extack);
if (err)
return err;
}
@@ -344,7 +377,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
bond_opt_initval(&newval, primary_reselect);
- err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval,
+ data[IFLA_BOND_PRIMARY_RESELECT], extack);
if (err)
return err;
}
@@ -353,7 +387,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
bond_opt_initval(&newval, fail_over_mac);
- err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval,
+ data[IFLA_BOND_FAIL_OVER_MAC], extack);
if (err)
return err;
}
@@ -362,7 +397,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
bond_opt_initval(&newval, xmit_hash_policy);
- err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval,
+ data[IFLA_BOND_XMIT_HASH_POLICY], extack);
if (err)
return err;
}
@@ -371,7 +407,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
bond_opt_initval(&newval, resend_igmp);
- err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval,
+ data[IFLA_BOND_RESEND_IGMP], extack);
if (err)
return err;
}
@@ -380,7 +417,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
bond_opt_initval(&newval, num_peer_notif);
- err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval,
+ data[IFLA_BOND_NUM_PEER_NOTIF], extack);
if (err)
return err;
}
@@ -389,7 +427,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
bond_opt_initval(&newval, all_slaves_active);
- err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval,
+ data[IFLA_BOND_ALL_SLAVES_ACTIVE], extack);
if (err)
return err;
}
@@ -398,7 +437,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
bond_opt_initval(&newval, min_links);
- err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval,
+ data[IFLA_BOND_MIN_LINKS], extack);
if (err)
return err;
}
@@ -407,7 +447,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
bond_opt_initval(&newval, lp_interval);
- err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval,
+ data[IFLA_BOND_LP_INTERVAL], extack);
if (err)
return err;
}
@@ -416,7 +457,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
bond_opt_initval(&newval, packets_per_slave);
- err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval,
+ data[IFLA_BOND_PACKETS_PER_SLAVE], extack);
if (err)
return err;
}
@@ -425,7 +467,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int lacp_active = nla_get_u8(data[IFLA_BOND_AD_LACP_ACTIVE]);
bond_opt_initval(&newval, lacp_active);
- err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval,
+ data[IFLA_BOND_AD_LACP_ACTIVE], extack);
if (err)
return err;
}
@@ -435,7 +478,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
bond_opt_initval(&newval, lacp_rate);
- err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval,
+ data[IFLA_BOND_AD_LACP_RATE], extack);
if (err)
return err;
}
@@ -444,7 +488,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_AD_SELECT]);
bond_opt_initval(&newval, ad_select);
- err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval,
+ data[IFLA_BOND_AD_SELECT], extack);
if (err)
return err;
}
@@ -453,7 +498,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u16(data[IFLA_BOND_AD_ACTOR_SYS_PRIO]);
bond_opt_initval(&newval, actor_sys_prio);
- err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval,
+ data[IFLA_BOND_AD_ACTOR_SYS_PRIO], extack);
if (err)
return err;
}
@@ -462,7 +508,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
bond_opt_initval(&newval, port_key);
- err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval,
+ data[IFLA_BOND_AD_USER_PORT_KEY], extack);
if (err)
return err;
}
@@ -472,7 +519,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
bond_opt_initval(&newval,
nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
- err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval,
+ data[IFLA_BOND_AD_ACTOR_SYSTEM], extack);
if (err)
return err;
}
@@ -480,7 +528,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
bond_opt_initval(&newval, dynamic_lb);
- err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval,
+ data[IFLA_BOND_TLB_DYNAMIC_LB], extack);
if (err)
return err;
}
@@ -489,7 +538,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int missed_max = nla_get_u8(data[IFLA_BOND_MISSED_MAX]);
bond_opt_initval(&newval, missed_max);
- err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval,
+ data[IFLA_BOND_MISSED_MAX], extack);
if (err)
return err;
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 1f8323ad5282..3498db1c1b3c 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -40,6 +40,8 @@ static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_all_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_primary_reselect_set(struct bonding *bond,
@@ -365,6 +367,16 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_intmax_tbl,
.set = bond_option_miimon_set
},
+ [BOND_OPT_PRIO] = {
+ .id = BOND_OPT_PRIO,
+ .name = "prio",
+ .desc = "Link priority for failover re-selection",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+ BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB)),
+ .set = bond_option_prio_set
+ },
[BOND_OPT_PRIMARY] = {
.id = BOND_OPT_PRIMARY,
.name = "primary",
@@ -632,27 +644,35 @@ static int bond_opt_check_deps(struct bonding *bond,
}
static void bond_opt_dep_print(struct bonding *bond,
- const struct bond_option *opt)
+ const struct bond_option *opt,
+ struct nlattr *bad_attr,
+ struct netlink_ext_ack *extack)
{
const struct bond_opt_value *modeval;
struct bond_params *params;
params = &bond->params;
modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
- if (test_bit(params->mode, &opt->unsuppmodes))
+ if (test_bit(params->mode, &opt->unsuppmodes)) {
netdev_err(bond->dev, "option %s: mode dependency failed, not supported in mode %s(%llu)\n",
opt->name, modeval->string, modeval->value);
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "option not supported in mode");
+ }
}
static void bond_opt_error_interpret(struct bonding *bond,
const struct bond_option *opt,
- int error, const struct bond_opt_value *val)
+ int error, const struct bond_opt_value *val,
+ struct nlattr *bad_attr,
+ struct netlink_ext_ack *extack)
{
const struct bond_opt_value *minval, *maxval;
char *p;
switch (error) {
case -EINVAL:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr, "invalid option value");
if (val) {
if (val->string) {
/* sometimes RAWVAL opts may have new lines */
@@ -674,13 +694,17 @@ static void bond_opt_error_interpret(struct bonding *bond,
opt->name, minval ? minval->value : 0, maxval->value);
break;
case -EACCES:
- bond_opt_dep_print(bond, opt);
+ bond_opt_dep_print(bond, opt, bad_attr, extack);
break;
case -ENOTEMPTY:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "unable to set option because the bond device has slaves");
netdev_err(bond->dev, "option %s: unable to set because the bond device has slaves\n",
opt->name);
break;
case -EBUSY:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "unable to set option because the bond is up");
netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n",
opt->name);
break;
@@ -691,6 +715,8 @@ static void bond_opt_error_interpret(struct bonding *bond,
*p = '\0';
netdev_err(bond->dev, "option %s: interface %s does not exist!\n",
opt->name, val->string);
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "interface does not exist");
}
break;
default:
@@ -703,13 +729,17 @@ static void bond_opt_error_interpret(struct bonding *bond,
* @bond: target bond device
* @option: option to set
* @val: value to set it to
+ * @bad_attr: netlink attribue that caused the error
+ * @extack: extended netlink error structure, used when an error message
+ * needs to be returned to the caller via netlink
*
* This function is used to change the bond's option value, it can be
* used for both enabling/changing an option and for disabling it. RTNL lock
* must be obtained before calling this function.
*/
int __bond_opt_set(struct bonding *bond,
- unsigned int option, struct bond_opt_value *val)
+ unsigned int option, struct bond_opt_value *val,
+ struct nlattr *bad_attr, struct netlink_ext_ack *extack)
{
const struct bond_opt_value *retval = NULL;
const struct bond_option *opt;
@@ -731,7 +761,7 @@ int __bond_opt_set(struct bonding *bond,
ret = opt->set(bond, retval);
out:
if (ret)
- bond_opt_error_interpret(bond, opt, ret, val);
+ bond_opt_error_interpret(bond, opt, ret, val, bad_attr, extack);
return ret;
}
@@ -753,7 +783,7 @@ int __bond_opt_set_notify(struct bonding *bond,
ASSERT_RTNL();
- ret = __bond_opt_set(bond, option, val);
+ ret = __bond_opt_set(bond, option, val, NULL, NULL);
if (!ret && (bond->dev->reg_state == NETREG_REGISTERED))
call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
@@ -1288,6 +1318,27 @@ static int bond_option_missed_max_set(struct bonding *bond,
return 0;
}
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(newval->slave_dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+ slave->prio = newval->value;
+
+ if (rtnl_dereference(bond->primary_slave))
+ slave_warn(bond->dev, slave->dev,
+ "prio updated, but will not affect failover re-selection as primary slave have been set\n");
+ else
+ bond_select_active_slave(bond);
+
+ return 0;
+}
+
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index b2dcc1e5a388..3048ad77edb3 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,5 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
-menu "CAN Device Drivers"
+
+menuconfig CAN_DEV
+ tristate "CAN Device Drivers"
+ default y
+ depends on CAN
+ help
+ Controller Area Network (CAN) is serial communications protocol up to
+ 1Mbit/s for its original release (now known as Classical CAN) and up
+ to 8Mbit/s for the more recent CAN with Flexible Data-Rate
+ (CAN-FD). The CAN bus was originally mainly for automotive, but is now
+ widely used in marine (NMEA2000), industrial, and medical
+ applications. More information on the CAN network protocol family
+ PF_CAN is contained in <Documentation/networking/can.rst>.
+
+ This section contains all the CAN(-FD) device drivers including the
+ virtual ones. If you own such devices or plan to use the virtual CAN
+ interfaces to develop applications, say Y here.
+
+ To compile as a module, choose M here: the module will be called
+ can-dev.
+
+if CAN_DEV
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
@@ -28,35 +49,22 @@ config CAN_VXCAN
This driver can also be built as a module. If so, the module
will be called vxcan.
-config CAN_SLCAN
- tristate "Serial / USB serial CAN Adaptors (slcan)"
- depends on TTY
+config CAN_NETLINK
+ bool "CAN device drivers with Netlink support"
+ default y
help
- CAN driver for several 'low cost' CAN interfaces that are attached
- via serial lines or via USB-to-serial adapters using the LAWICEL
- ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+ Enables the common framework for CAN device drivers. This is the
+ standard library and provides features for the Netlink interface such
+ as bittiming validation, support of CAN error states, device restart
+ and others.
- As only the sending and receiving of CAN frames is implemented, this
- driver should work with the (serial/USB) CAN hardware from:
- www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
-
- Userspace tools to attach the SLCAN line discipline (slcan_attach,
- slcand) can be found in the can-utils at the linux-can project, see
- https://github.com/linux-can/can-utils for details.
-
- The slcan driver supports up to 10 CAN netdevices by default which
- can be changed by the 'maxdev=xx' module option. This driver can
- also be built as a module. If so, the module will be called slcan.
+ The additional features selected by this option will be added to the
+ can-dev module.
-config CAN_DEV
- tristate "Platform CAN drivers with Netlink support"
- default y
- help
- Enables the common framework for platform CAN drivers with Netlink
- support. This is the standard library for CAN drivers.
- If unsure, say Y.
+ This is required by all platform and hardware CAN drivers. If you
+ plan to use such devices or if unsure, say Y.
-if CAN_DEV
+if CAN_NETLINK
config CAN_CALC_BITTIMING
bool "CAN bit-timing calculation"
@@ -69,8 +77,15 @@ config CAN_CALC_BITTIMING
source clock frequencies. Disabling saves some space, but then the
bit-timing parameters must be specified directly using the Netlink
arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
+
+ The additional features selected by this option will be added to the
+ can-dev module.
+
If unsure, say Y.
+config CAN_RX_OFFLOAD
+ bool
+
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM
@@ -78,10 +93,29 @@ config CAN_AT91
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
+config CAN_CAN327
+ tristate "Serial / USB serial ELM327 based OBD-II Interfaces (can327)"
+ depends on TTY
+ select CAN_RX_OFFLOAD
+ help
+ CAN driver for several 'low cost' OBD-II interfaces based on the
+ ELM327 OBD-II interpreter chip.
+
+ This is a best effort driver - the ELM327 interface was never
+ designed to be used as a standalone CAN interface. However, it can
+ still be used for simple request-response protocols (such as OBD II),
+ and to monitor broadcast messages on a bus (such as in a vehicle).
+
+ Please refer to the documentation for information on how to use it:
+ Documentation/networking/device_drivers/can/can327.rst
+
+ If this driver is built as a module, it will be called can327.
+
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
depends on OF || COLDFIRE || COMPILE_TEST
depends on HAS_IOMEM
+ select CAN_RX_OFFLOAD
help
Say Y here if you want to support for Freescale FlexCAN.
@@ -118,6 +152,26 @@ config CAN_KVASER_PCIEFD
Kvaser Mini PCI Express HS v2
Kvaser Mini PCI Express 2xHS v2
+config CAN_SLCAN
+ tristate "Serial / USB serial CAN Adaptors (slcan)"
+ depends on TTY
+ help
+ CAN driver for several 'low cost' CAN interfaces that are attached
+ via serial lines or via USB-to-serial adapters using the LAWICEL
+ ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+
+ As only the sending and receiving of CAN frames is implemented, this
+ driver should work with the (serial/USB) CAN hardware from:
+ www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
+
+ Userspace tools to attach the SLCAN line discipline (slcan_attach,
+ slcand) can be found in the can-utils at the linux-can project, see
+ https://github.com/linux-can/can-utils for details.
+
+ The slcan driver supports up to 10 CAN netdevices by default which
+ can be changed by the 'maxdev=xx' module option. This driver can
+ also be built as a module. If so, the module will be called slcan.
+
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
@@ -131,6 +185,7 @@ config CAN_SUN4I
config CAN_TI_HECC
depends on ARM
tristate "TI High End CAN Controller"
+ select CAN_RX_OFFLOAD
help
Driver for TI HECC (High End CAN Controller) module found on many
TI devices. The device specifications are available from www.ti.com
@@ -164,7 +219,7 @@ source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
source "drivers/net/can/usb/Kconfig"
-endif
+endif #CAN_NETLINK
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
@@ -174,4 +229,4 @@ config CAN_DEBUG_DEVICES
a problem with CAN support and want to see more of what is going
on.
-endmenu
+endif #CAN_DEV
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0af85983634c..61c75ce9d500 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
-obj-$(CONFIG_CAN_SLCAN) += slcan.o
+obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
obj-y += rcar/
@@ -14,6 +14,7 @@ obj-y += usb/
obj-y += softing/
obj-$(CONFIG_CAN_AT91) += at91_can.o
+obj-$(CONFIG_CAN_CAN327) += can327.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 29ed0d3cd171..3a2d109a3792 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -1152,6 +1153,10 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops at91_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static ssize_t mb0_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1293,6 +1298,7 @@ static int at91_can_probe(struct platform_device *pdev)
}
dev->netdev_ops = &at91_netdev_ops;
+ dev->ethtool_ops = &at91_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index bd2f6dc01194..f23a03300a81 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -223,7 +223,7 @@ int c_can_power_up(struct net_device *dev);
int c_can_power_down(struct net_device *dev);
#endif
-void c_can_set_ethtool_ops(struct net_device *dev);
+extern const struct ethtool_ops c_can_ethtool_ops;
static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring)
{
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index 8a826a6813bd..e41167eda673 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -24,11 +24,7 @@ static void c_can_get_ringparam(struct net_device *netdev,
ring->tx_pending = priv->msg_obj_tx_num;
}
-static const struct ethtool_ops c_can_ethtool_ops = {
+const struct ethtool_ops c_can_ethtool_ops = {
.get_ringparam = c_can_get_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
};
-
-void c_can_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &c_can_ethtool_ops;
-}
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index a7362af0babb..dc8132862f33 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -952,15 +952,14 @@ static int c_can_handle_state_change(struct net_device *dev,
switch (error_type) {
case C_CAN_NO_ERROR:
- /* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case C_CAN_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -970,7 +969,7 @@ static int c_can_handle_state_change(struct net_device *dev,
break;
case C_CAN_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
if (rx_err_passive)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
@@ -1365,7 +1364,7 @@ int register_c_can_dev(struct net_device *dev)
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
- c_can_set_ethtool_ops(dev);
+ dev->ethtool_ops = &c_can_ethtool_ops;
return register_candev(dev);
}
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
new file mode 100644
index 000000000000..0aa1af31d0fe
--- /dev/null
+++ b/drivers/net/can/can327.c
@@ -0,0 +1,1144 @@
+// SPDX-License-Identifier: GPL-2.0
+/* ELM327 based CAN interface driver (tty line discipline)
+ *
+ * This driver started as a derivative of linux/drivers/net/can/slcan.c
+ * and my thanks go to the original authors for their inspiration.
+ *
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/tty_ldisc.h>
+#include <linux/workqueue.h>
+
+#include <uapi/linux/tty.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/rx-offload.h>
+
+#define CAN327_NAPI_WEIGHT 4
+
+#define CAN327_SIZE_TXBUF 32
+#define CAN327_SIZE_RXBUF 1024
+
+#define CAN327_CAN_CONFIG_SEND_SFF 0x8000
+#define CAN327_CAN_CONFIG_VARIABLE_DLC 0x4000
+#define CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF 0x2000
+#define CAN327_CAN_CONFIG_BAUDRATE_MULT_8_7 0x1000
+
+#define CAN327_DUMMY_CHAR 'y'
+#define CAN327_DUMMY_STRING "y"
+#define CAN327_READY_CHAR '>'
+
+/* Bits in elm->cmds_todo */
+enum can327_tx_do {
+ CAN327_TX_DO_CAN_DATA = 0,
+ CAN327_TX_DO_CANID_11BIT,
+ CAN327_TX_DO_CANID_29BIT_LOW,
+ CAN327_TX_DO_CANID_29BIT_HIGH,
+ CAN327_TX_DO_CAN_CONFIG_PART2,
+ CAN327_TX_DO_CAN_CONFIG,
+ CAN327_TX_DO_RESPONSES,
+ CAN327_TX_DO_SILENT_MONITOR,
+ CAN327_TX_DO_INIT,
+};
+
+struct can327 {
+ /* This must be the first member when using alloc_candev() */
+ struct can_priv can;
+
+ struct can_rx_offload offload;
+
+ /* TTY buffers */
+ u8 txbuf[CAN327_SIZE_TXBUF];
+ u8 rxbuf[CAN327_SIZE_RXBUF];
+
+ /* Per-channel lock */
+ spinlock_t lock;
+
+ /* TTY and netdev devices that we're bridging */
+ struct tty_struct *tty;
+ struct net_device *dev;
+
+ /* TTY buffer accounting */
+ struct work_struct tx_work; /* Flushes TTY TX buffer */
+ u8 *txhead; /* Next TX byte */
+ size_t txleft; /* Bytes left to TX */
+ int rxfill; /* Bytes already RX'd in buffer */
+
+ /* State machine */
+ enum {
+ CAN327_STATE_NOTINIT = 0,
+ CAN327_STATE_GETDUMMYCHAR,
+ CAN327_STATE_GETPROMPT,
+ CAN327_STATE_RECEIVING,
+ } state;
+
+ /* Things we have yet to send */
+ char **next_init_cmd;
+ unsigned long cmds_todo;
+
+ /* The CAN frame and config the ELM327 is sending/using,
+ * or will send/use after finishing all cmds_todo
+ */
+ struct can_frame can_frame_to_send;
+ u16 can_config;
+ u8 can_bitrate_divisor;
+
+ /* Parser state */
+ bool drop_next_line;
+
+ /* Stop the channel on UART side hardware failure, e.g. stray
+ * characters or neverending lines. This may be caused by bad
+ * UART wiring, a bad ELM327, a bad UART bridge...
+ * Once this is true, nothing will be sent to the TTY.
+ */
+ bool uart_side_failure;
+};
+
+static inline void can327_uart_side_failure(struct can327 *elm);
+
+static void can327_send(struct can327 *elm, const void *buf, size_t len)
+{
+ int written;
+
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->uart_side_failure)
+ return;
+
+ memcpy(elm->txbuf, buf, len);
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+ written = elm->tty->ops->write(elm->tty, elm->txbuf, len);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+ return;
+ }
+
+ elm->txleft = len - written;
+ elm->txhead = elm->txbuf + written;
+}
+
+/* Take the ELM327 out of almost any state and back into command mode.
+ * We send CAN327_DUMMY_CHAR which will either abort any running
+ * operation, or be echoed back to us in case we're already in command
+ * mode.
+ */
+static void can327_kick_into_cmd_mode(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->state != CAN327_STATE_GETDUMMYCHAR &&
+ elm->state != CAN327_STATE_GETPROMPT) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+
+ elm->state = CAN327_STATE_GETDUMMYCHAR;
+ }
+}
+
+/* Schedule a CAN frame and necessary config changes to be sent to the TTY. */
+static void can327_send_frame(struct can327 *elm, struct can_frame *frame)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Schedule any necessary changes in ELM327's CAN configuration */
+ if (elm->can_frame_to_send.can_id != frame->can_id) {
+ /* Set the new CAN ID for transmission. */
+ if ((frame->can_id ^ elm->can_frame_to_send.can_id)
+ & CAN_EFF_FLAG) {
+ elm->can_config =
+ (frame->can_id & CAN_EFF_FLAG ? 0 : CAN327_CAN_CONFIG_SEND_SFF) |
+ CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF |
+ elm->can_bitrate_divisor;
+
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+ }
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo);
+ } else {
+ set_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_LOW,
+ &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH,
+ &elm->cmds_todo);
+ }
+ }
+
+ /* Schedule the CAN frame itself. */
+ elm->can_frame_to_send = *frame;
+ set_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+/* ELM327 initialisation sequence.
+ * The line length is limited by the buffer in can327_handle_prompt().
+ */
+static char *can327_init_script[] = {
+ "AT WS\r", /* v1.0: Warm Start */
+ "AT PP FF OFF\r", /* v1.0: All Programmable Parameters Off */
+ "AT M0\r", /* v1.0: Memory Off */
+ "AT AL\r", /* v1.0: Allow Long messages */
+ "AT BI\r", /* v1.0: Bypass Initialisation */
+ "AT CAF0\r", /* v1.0: CAN Auto Formatting Off */
+ "AT CFC0\r", /* v1.0: CAN Flow Control Off */
+ "AT CF 000\r", /* v1.0: Reset CAN ID Filter */
+ "AT CM 000\r", /* v1.0: Reset CAN ID Mask */
+ "AT E1\r", /* v1.0: Echo On */
+ "AT H1\r", /* v1.0: Headers On */
+ "AT L0\r", /* v1.0: Linefeeds Off */
+ "AT SH 7DF\r", /* v1.0: Set CAN sending ID to 0x7df */
+ "AT ST FF\r", /* v1.0: Set maximum Timeout for response after TX */
+ "AT AT0\r", /* v1.2: Adaptive Timing Off */
+ "AT D1\r", /* v1.3: Print DLC On */
+ "AT S1\r", /* v1.3: Spaces On */
+ "AT TP B\r", /* v1.0: Try Protocol B */
+ NULL
+};
+
+static void can327_init_device(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ elm->state = CAN327_STATE_NOTINIT;
+ elm->can_frame_to_send.can_id = 0x7df; /* ELM327 HW default */
+ elm->rxfill = 0;
+ elm->drop_next_line = 0;
+
+ /* We can only set the bitrate as a fraction of 500000.
+ * The bitrates listed in can327_bitrate_const will
+ * limit the user to the right values.
+ */
+ elm->can_bitrate_divisor = 500000 / elm->can.bittiming.bitrate;
+ elm->can_config =
+ CAN327_CAN_CONFIG_SEND_SFF | CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF | elm->can_bitrate_divisor;
+
+ /* Configure ELM327 and then start monitoring */
+ elm->next_init_cmd = &can327_init_script[0];
+ set_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+static void can327_feed_frame_to_netdev(struct can327 *elm, struct sk_buff *skb)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (!netif_running(elm->dev))
+ return;
+
+ /* Queue for NAPI pickup.
+ * rx-offload will update stats and LEDs for us.
+ */
+ if (can_rx_offload_queue_tail(&elm->offload, skb))
+ elm->dev->stats.rx_fifo_errors++;
+
+ /* Wake NAPI */
+ can_rx_offload_irq_finish(&elm->offload);
+}
+
+/* Called when we're out of ideas and just want it all to end. */
+static inline void can327_uart_side_failure(struct can327 *elm)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ elm->uart_side_failure = true;
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ elm->can.can_stats.bus_off++;
+ netif_stop_queue(elm->dev);
+ elm->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(elm->dev);
+
+ netdev_err(elm->dev,
+ "ELM327 misbehaved. Blocking further communication.\n");
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ return;
+
+ frame->can_id |= CAN_ERR_BUSOFF;
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Compares a byte buffer (non-NUL terminated) to the payload part of
+ * a string, and returns true iff the buffer (content *and* length) is
+ * exactly that string, without the terminating NUL byte.
+ *
+ * Example: If reference is "BUS ERROR", then this returns true iff nbytes == 9
+ * and !memcmp(buf, "BUS ERROR", 9).
+ *
+ * The reason to use strings is so we can easily include them in the C
+ * code, and to avoid hardcoding lengths.
+ */
+static inline bool can327_rxbuf_cmp(const u8 *buf, size_t nbytes,
+ const char *reference)
+{
+ size_t ref_len = strlen(reference);
+
+ return (nbytes == ref_len) && !memcmp(buf, reference, ref_len);
+}
+
+static void can327_parse_error(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ /* It's okay to return here:
+ * The outer parsing loop will drop this UART buffer.
+ */
+ return;
+
+ /* Filter possible error messages based on length of RX'd line */
+ if (can327_rxbuf_cmp(elm->rxbuf, len, "UNABLE TO CONNECT")) {
+ netdev_err(elm->dev,
+ "ELM327 reported UNABLE TO CONNECT. Please check your setup.\n");
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUFFER FULL")) {
+ /* This will only happen if the last data line was complete.
+ * Otherwise, can327_parse_frame() will heuristically
+ * emit this kind of error frame instead.
+ */
+ frame->can_id |= CAN_ERR_CRTL;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS ERROR")) {
+ frame->can_id |= CAN_ERR_BUSERROR;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "CAN ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "<RX ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS BUSY")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_OVERLOAD;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "FB ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_TX;
+ } else if (len == 5 && !memcmp(elm->rxbuf, "ERR", 3)) {
+ /* ERR is followed by two digits, hence line length 5 */
+ netdev_err(elm->dev, "ELM327 reported an ERR%c%c. Please power it off and on again.\n",
+ elm->rxbuf[3], elm->rxbuf[4]);
+ frame->can_id |= CAN_ERR_CRTL;
+ } else {
+ /* Something else has happened.
+ * Maybe garbage on the UART line.
+ * Emit a generic error frame.
+ */
+ }
+
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Parse CAN frames coming as ASCII from ELM327.
+ * They can be of various formats:
+ *
+ * 29-bit ID (EFF): 12 34 56 78 D PL PL PL PL PL PL PL PL
+ * 11-bit ID (!EFF): 123 D PL PL PL PL PL PL PL PL
+ *
+ * where D = DLC, PL = payload byte
+ *
+ * Instead of a payload, RTR indicates a remote request.
+ *
+ * We will use the spaces and line length to guess the format.
+ */
+static int can327_parse_frame(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+ int hexlen;
+ int datastart;
+ int i;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_skb(elm->dev, &frame);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Find first non-hex and non-space character:
+ * - In the simplest case, there is none.
+ * - For RTR frames, 'R' is the first non-hex character.
+ * - An error message may replace the end of the data line.
+ */
+ for (hexlen = 0; hexlen <= len; hexlen++) {
+ if (hex_to_bin(elm->rxbuf[hexlen]) < 0 &&
+ elm->rxbuf[hexlen] != ' ') {
+ break;
+ }
+ }
+
+ /* Sanity check whether the line is really a clean hexdump,
+ * or terminated by an error message, or contains garbage.
+ */
+ if (hexlen < len && !isdigit(elm->rxbuf[hexlen]) &&
+ !isupper(elm->rxbuf[hexlen]) && '<' != elm->rxbuf[hexlen] &&
+ ' ' != elm->rxbuf[hexlen]) {
+ /* The line is likely garbled anyway, so bail.
+ * The main code will restart listening.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* Use spaces in CAN ID to distinguish 29 or 11 bit address length.
+ * No out-of-bounds access:
+ * We use the fact that we can always read from elm->rxbuf.
+ */
+ if (elm->rxbuf[2] == ' ' && elm->rxbuf[5] == ' ' &&
+ elm->rxbuf[8] == ' ' && elm->rxbuf[11] == ' ' &&
+ elm->rxbuf[13] == ' ') {
+ frame->can_id = CAN_EFF_FLAG;
+ datastart = 14;
+ } else if (elm->rxbuf[3] == ' ' && elm->rxbuf[5] == ' ') {
+ datastart = 6;
+ } else {
+ /* This is not a well-formatted data line.
+ * Assume it's an error message.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ if (hexlen < datastart) {
+ /* The line is too short to be a valid frame hex dump.
+ * Something interrupted the hex dump or it is invalid.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* From here on all chars up to buf[hexlen] are hex or spaces,
+ * at well-defined offsets.
+ */
+
+ /* Read CAN data length */
+ frame->len = (hex_to_bin(elm->rxbuf[datastart - 2]) << 0);
+
+ /* Read CAN ID */
+ if (frame->can_id & CAN_EFF_FLAG) {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 28) |
+ (hex_to_bin(elm->rxbuf[1]) << 24) |
+ (hex_to_bin(elm->rxbuf[3]) << 20) |
+ (hex_to_bin(elm->rxbuf[4]) << 16) |
+ (hex_to_bin(elm->rxbuf[6]) << 12) |
+ (hex_to_bin(elm->rxbuf[7]) << 8) |
+ (hex_to_bin(elm->rxbuf[9]) << 4) |
+ (hex_to_bin(elm->rxbuf[10]) << 0);
+ } else {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 8) |
+ (hex_to_bin(elm->rxbuf[1]) << 4) |
+ (hex_to_bin(elm->rxbuf[2]) << 0);
+ }
+
+ /* Check for RTR frame */
+ if (elm->rxfill >= hexlen + 3 &&
+ !memcmp(&elm->rxbuf[hexlen], "RTR", 3)) {
+ frame->can_id |= CAN_RTR_FLAG;
+ }
+
+ /* Is the line long enough to hold the advertised payload?
+ * Note: RTR frames have a DLC, but no actual payload.
+ */
+ if (!(frame->can_id & CAN_RTR_FLAG) &&
+ (hexlen < frame->len * 3 + datastart)) {
+ /* Incomplete frame.
+ * Probably the ELM327's RS232 TX buffer was full.
+ * Emit an error frame and exit.
+ */
+ frame->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ frame->len = CAN_ERR_DLC;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ can327_feed_frame_to_netdev(elm, skb);
+
+ /* Signal failure to parse.
+ * The line will be re-parsed as an error line, which will fail.
+ * However, this will correctly drop the state machine back into
+ * command mode.
+ */
+ return -ENODATA;
+ }
+
+ /* Parse the data nibbles. */
+ for (i = 0; i < frame->len; i++) {
+ frame->data[i] =
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i]) << 4) |
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i + 1]));
+ }
+
+ /* Feed the frame to the network layer. */
+ can327_feed_frame_to_netdev(elm, skb);
+
+ return 0;
+}
+
+static void can327_parse_line(struct can327 *elm, size_t len)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Skip empty lines */
+ if (!len)
+ return;
+
+ /* Skip echo lines */
+ if (elm->drop_next_line) {
+ elm->drop_next_line = 0;
+ return;
+ } else if (!memcmp(elm->rxbuf, "AT", 2)) {
+ return;
+ }
+
+ /* Regular parsing */
+ if (elm->state == CAN327_STATE_RECEIVING &&
+ can327_parse_frame(elm, len)) {
+ /* Parse an error line. */
+ can327_parse_error(elm, len);
+
+ /* Start afresh. */
+ can327_kick_into_cmd_mode(elm);
+ }
+}
+
+static void can327_handle_prompt(struct can327 *elm)
+{
+ struct can_frame *frame = &elm->can_frame_to_send;
+ /* Size this buffer for the largest ELM327 line we may generate,
+ * which is currently an 8 byte CAN frame's payload hexdump.
+ * Items in can327_init_script must fit here, too!
+ */
+ char local_txbuf[sizeof("0102030405060708\r")];
+
+ lockdep_assert_held(&elm->lock);
+
+ if (!elm->cmds_todo) {
+ /* Enter CAN monitor mode */
+ can327_send(elm, "ATMA\r", 5);
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+
+ return;
+ }
+
+ /* Reconfigure ELM327 step by step as indicated by elm->cmds_todo */
+ if (test_bit(CAN327_TX_DO_INIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf), "%s",
+ *elm->next_init_cmd);
+
+ elm->next_init_cmd++;
+ if (!(*elm->next_init_cmd)) {
+ clear_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ /* Init finished. */
+ }
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCSM%i\r",
+ !!(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATR%i\r",
+ !(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPC\r");
+ set_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPB%04X\r",
+ elm->can_config);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCP%02X\r",
+ (frame->can_id & CAN_EFF_MASK) >> 24);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%06X\r",
+ frame->can_id & CAN_EFF_MASK & ((1 << 24) - 1));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%03X\r",
+ frame->can_id & CAN_SFF_MASK);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo)) {
+ if (frame->can_id & CAN_RTR_FLAG) {
+ /* Send an RTR frame. Their DLC is fixed.
+ * Some chips don't send them at all.
+ */
+ snprintf(local_txbuf, sizeof(local_txbuf), "ATRTR\r");
+ } else {
+ /* Send a regular CAN data frame */
+ int i;
+
+ for (i = 0; i < frame->len; i++) {
+ snprintf(&local_txbuf[2 * i],
+ sizeof(local_txbuf), "%02X",
+ frame->data[i]);
+ }
+
+ snprintf(&local_txbuf[2 * i], sizeof(local_txbuf),
+ "\r");
+ }
+
+ elm->drop_next_line = 1;
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+ }
+
+ can327_send(elm, local_txbuf, strlen(local_txbuf));
+}
+
+static bool can327_is_ready_char(char c)
+{
+ /* Bits 0xc0 are sometimes set (randomly), hence the mask.
+ * Probably bad hardware.
+ */
+ return (c & 0x3f) == CAN327_READY_CHAR;
+}
+
+static void can327_drop_bytes(struct can327 *elm, size_t i)
+{
+ lockdep_assert_held(&elm->lock);
+
+ memmove(&elm->rxbuf[0], &elm->rxbuf[i], CAN327_SIZE_RXBUF - i);
+ elm->rxfill -= i;
+}
+
+static void can327_parse_rxbuf(struct can327 *elm, size_t first_new_char_idx)
+{
+ size_t len, pos;
+
+ lockdep_assert_held(&elm->lock);
+
+ switch (elm->state) {
+ case CAN327_STATE_NOTINIT:
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_GETDUMMYCHAR:
+ /* Wait for 'y' or '>' */
+ for (pos = 0; pos < elm->rxfill; pos++) {
+ if (elm->rxbuf[pos] == CAN327_DUMMY_CHAR) {
+ can327_send(elm, "\r", 1);
+ elm->state = CAN327_STATE_GETPROMPT;
+ pos++;
+ break;
+ } else if (can327_is_ready_char(elm->rxbuf[pos])) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ pos++;
+ break;
+ }
+ }
+
+ can327_drop_bytes(elm, pos);
+ break;
+
+ case CAN327_STATE_GETPROMPT:
+ /* Wait for '>' */
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1]))
+ can327_handle_prompt(elm);
+
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_RECEIVING:
+ /* Find <CR> delimiting feedback lines. */
+ len = first_new_char_idx;
+ while (len < elm->rxfill && elm->rxbuf[len] != '\r')
+ len++;
+
+ if (len == CAN327_SIZE_RXBUF) {
+ /* Assume the buffer ran full with garbage.
+ * Did we even connect at the right baud rate?
+ */
+ netdev_err(elm->dev,
+ "RX buffer overflow. Faulty ELM327 or UART?\n");
+ can327_uart_side_failure(elm);
+ } else if (len == elm->rxfill) {
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1])) {
+ /* The ELM327's AT ST response timeout ran out,
+ * so we got a prompt.
+ * Clear RX buffer and restart listening.
+ */
+ elm->rxfill = 0;
+
+ can327_handle_prompt(elm);
+ }
+
+ /* No <CR> found - we haven't received a full line yet.
+ * Wait for more data.
+ */
+ } else {
+ /* We have a full line to parse. */
+ can327_parse_line(elm, len);
+
+ /* Remove parsed data from RX buffer. */
+ can327_drop_bytes(elm, len + 1);
+
+ /* More data to parse? */
+ if (elm->rxfill)
+ can327_parse_rxbuf(elm, 0);
+ }
+ }
+}
+
+static int can327_netdev_open(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ int err;
+
+ spin_lock_bh(&elm->lock);
+
+ if (!elm->tty) {
+ spin_unlock_bh(&elm->lock);
+ return -ENODEV;
+ }
+
+ if (elm->uart_side_failure)
+ netdev_warn(elm->dev,
+ "Reopening netdev after a UART side fault has been detected.\n");
+
+ /* Clear TTY buffers */
+ elm->rxfill = 0;
+ elm->txleft = 0;
+
+ /* open_candev() checks for elm->can.bittiming.bitrate != 0 */
+ err = open_candev(dev);
+ if (err) {
+ spin_unlock_bh(&elm->lock);
+ return err;
+ }
+
+ can327_init_device(elm);
+ spin_unlock_bh(&elm->lock);
+
+ err = can_rx_offload_add_manual(dev, &elm->offload, CAN327_NAPI_WEIGHT);
+ if (err) {
+ close_candev(dev);
+ return err;
+ }
+
+ can_rx_offload_enable(&elm->offload);
+
+ elm->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int can327_netdev_close(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+
+ /* Interrupt whatever the ELM327 is doing right now */
+ spin_lock_bh(&elm->lock);
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ spin_unlock_bh(&elm->lock);
+
+ netif_stop_queue(dev);
+
+ /* Give UART one final chance to flush. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+ flush_work(&elm->tx_work);
+
+ can_rx_offload_disable(&elm->offload);
+ elm->can.state = CAN_STATE_STOPPED;
+ can_rx_offload_del(&elm->offload);
+ close_candev(dev);
+
+ return 0;
+}
+
+/* Send a can_frame to a TTY. */
+static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ struct can_frame *frame = (struct can_frame *)skb->data;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ /* We shouldn't get here after a hardware fault:
+ * can_bus_off() calls netif_carrier_off()
+ */
+ if (elm->uart_side_failure) {
+ WARN_ON_ONCE(elm->uart_side_failure);
+ goto out;
+ }
+
+ netif_stop_queue(dev);
+
+ /* BHs are already disabled, so no spin_lock_bh().
+ * See Documentation/networking/netdevices.rst
+ */
+ spin_lock(&elm->lock);
+ can327_send_frame(elm, frame);
+ spin_unlock(&elm->lock);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += frame->can_id & CAN_RTR_FLAG ? 0 : frame->len;
+
+ skb_tx_timestamp(skb);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops can327_netdev_ops = {
+ .ndo_open = can327_netdev_open,
+ .ndo_stop = can327_netdev_close,
+ .ndo_start_xmit = can327_netdev_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops can327_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static bool can327_is_valid_rx_char(u8 c)
+{
+ static const bool lut_char_is_valid['z'] = {
+ ['\r'] = true,
+ [' '] = true,
+ ['.'] = true,
+ ['0'] = true, true, true, true, true,
+ ['5'] = true, true, true, true, true,
+ ['<'] = true,
+ [CAN327_READY_CHAR] = true,
+ ['?'] = true,
+ ['A'] = true, true, true, true, true, true, true,
+ ['H'] = true, true, true, true, true, true, true,
+ ['O'] = true, true, true, true, true, true, true,
+ ['V'] = true, true, true, true, true,
+ ['a'] = true,
+ ['b'] = true,
+ ['v'] = true,
+ [CAN327_DUMMY_CHAR] = true,
+ };
+ BUILD_BUG_ON(CAN327_DUMMY_CHAR >= 'z');
+
+ return (c < ARRAY_SIZE(lut_char_is_valid) && lut_char_is_valid[c]);
+}
+
+/* Handle incoming ELM327 ASCII data.
+ * This will not be re-entered while running, but other ldisc
+ * functions may be called in parallel.
+ */
+static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp,
+ const char *fp, int count)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+ size_t first_new_char_idx;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ /* Store old rxfill, so can327_parse_rxbuf() will have
+ * the option of skipping already checked characters.
+ */
+ first_new_char_idx = elm->rxfill;
+
+ while (count-- && elm->rxfill < CAN327_SIZE_RXBUF) {
+ if (fp && *fp++) {
+ netdev_err(elm->dev,
+ "Error in received character stream. Check your wiring.");
+
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ /* Ignore NUL characters, which the PIC microcontroller may
+ * inadvertently insert due to a known hardware bug.
+ * See ELM327 documentation, which refers to a Microchip PIC
+ * bug description.
+ */
+ if (*cp) {
+ /* Check for stray characters on the UART line.
+ * Likely caused by bad hardware.
+ */
+ if (!can327_is_valid_rx_char(*cp)) {
+ netdev_err(elm->dev,
+ "Received illegal character %02x.\n",
+ *cp);
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ elm->rxbuf[elm->rxfill++] = *cp;
+ }
+
+ cp++;
+ }
+
+ if (count >= 0) {
+ netdev_err(elm->dev,
+ "Receive buffer overflowed. Bad chip or wiring? count = %i",
+ count);
+
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ can327_parse_rxbuf(elm, first_new_char_idx);
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Write out remaining transmit buffer.
+ * Scheduled when TTY is writable.
+ */
+static void can327_ldisc_tx_worker(struct work_struct *work)
+{
+ struct can327 *elm = container_of(work, struct can327, tx_work);
+ ssize_t written;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ if (elm->txleft) {
+ written = elm->tty->ops->write(elm->tty, elm->txhead,
+ elm->txleft);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ elm->txleft -= written;
+ elm->txhead += written;
+ }
+
+ if (!elm->txleft)
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Called by the driver when there's room for more data. */
+static void can327_ldisc_tx_wakeup(struct tty_struct *tty)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+
+ schedule_work(&elm->tx_work);
+}
+
+/* ELM327 can only handle bitrates that are integer divisors of 500 kHz,
+ * or 7/8 of that. Divisors are 1 to 64.
+ * Currently we don't implement support for 7/8 rates.
+ */
+static const u32 can327_bitrate_const[] = {
+ 7812, 7936, 8064, 8196, 8333, 8474, 8620, 8771,
+ 8928, 9090, 9259, 9433, 9615, 9803, 10000, 10204,
+ 10416, 10638, 10869, 11111, 11363, 11627, 11904, 12195,
+ 12500, 12820, 13157, 13513, 13888, 14285, 14705, 15151,
+ 15625, 16129, 16666, 17241, 17857, 18518, 19230, 20000,
+ 20833, 21739, 22727, 23809, 25000, 26315, 27777, 29411,
+ 31250, 33333, 35714, 38461, 41666, 45454, 50000, 55555,
+ 62500, 71428, 83333, 100000, 125000, 166666, 250000, 500000
+};
+
+static int can327_ldisc_open(struct tty_struct *tty)
+{
+ struct net_device *dev;
+ struct can327 *elm;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ dev = alloc_candev(sizeof(struct can327), 0);
+ if (!dev)
+ return -ENFILE;
+ elm = netdev_priv(dev);
+
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ spin_lock_init(&elm->lock);
+ INIT_WORK(&elm->tx_work, can327_ldisc_tx_worker);
+
+ /* Configure CAN metadata */
+ elm->can.bitrate_const = can327_bitrate_const;
+ elm->can.bitrate_const_cnt = ARRAY_SIZE(can327_bitrate_const);
+ elm->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ /* Configure netdev interface */
+ elm->dev = dev;
+ dev->netdev_ops = &can327_netdev_ops;
+ dev->ethtool_ops = &can327_ethtool_ops;
+
+ /* Mark ldisc channel as alive */
+ elm->tty = tty;
+ tty->disc_data = elm;
+
+ /* Let 'er rip */
+ err = register_candev(elm->dev);
+ if (err) {
+ free_candev(elm->dev);
+ return err;
+ }
+
+ netdev_info(elm->dev, "can327 on %s.\n", tty->name);
+
+ return 0;
+}
+
+/* Close down a can327 channel.
+ * This means flushing out any pending queues, and then returning.
+ * This call is serialized against other ldisc functions:
+ * Once this is called, no other ldisc function of ours is entered.
+ *
+ * We also use this function for a hangup event.
+ */
+static void can327_ldisc_close(struct tty_struct *tty)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+
+ /* unregister_netdev() calls .ndo_stop() so we don't have to.
+ * Our .ndo_stop() also flushes the TTY write wakeup handler,
+ * so we can safely set elm->tty = NULL after this.
+ */
+ unregister_candev(elm->dev);
+
+ /* Mark channel as dead */
+ spin_lock_bh(&elm->lock);
+ tty->disc_data = NULL;
+ elm->tty = NULL;
+ spin_unlock_bh(&elm->lock);
+
+ netdev_info(elm->dev, "can327 off %s.\n", tty->name);
+
+ free_candev(elm->dev);
+}
+
+static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+ unsigned int tmp;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strnlen(elm->dev->name, IFNAMSIZ - 1) + 1;
+ if (copy_to_user((void __user *)arg, elm->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops can327_ldisc = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .num = N_CAN327,
+ .receive_buf = can327_ldisc_rx,
+ .write_wakeup = can327_ldisc_tx_wakeup,
+ .open = can327_ldisc_open,
+ .close = can327_ldisc_close,
+ .ioctl = can327_ldisc_ioctl,
+};
+
+static int __init can327_init(void)
+{
+ int status;
+
+ status = tty_register_ldisc(&can327_ldisc);
+ if (status)
+ pr_err("Can't register line discipline\n");
+
+ return status;
+}
+
+static void __exit can327_exit(void)
+{
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
+ */
+ tty_unregister_ldisc(&can327_ldisc);
+}
+
+module_init(can327_init);
+module_exit(can327_exit);
+
+MODULE_ALIAS_LDISC(N_CAN327);
+MODULE_DESCRIPTION("ELM327 based CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Max Staudt <max@enpas.org>");
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index bb7224cfc6ab..0b9dfc76e769 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -17,6 +17,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -512,6 +513,7 @@ static int cc770_err(struct net_device *dev, u8 status)
/* Use extended functions of the CC770 */
if (priv->control_normal_mode & CTRL_EAF) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = cc770_read_reg(priv, tx_error_counter);
cf->data[7] = cc770_read_reg(priv, rx_error_counter);
}
@@ -835,6 +837,10 @@ static const struct net_device_ops cc770_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops cc770_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_cc770dev(struct net_device *dev)
{
struct cc770_priv *priv = netdev_priv(dev);
@@ -845,6 +851,7 @@ int register_cc770dev(struct net_device *dev)
return err;
dev->netdev_ops = &cc770_netdev_ops;
+ dev->ethtool_ops = &cc770_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 64990bf20fdc..3c18d028bd8c 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/bitfield.h>
#include <linux/interrupt.h>
@@ -847,7 +848,7 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
case CAN_STATE_ERROR_PASSIVE:
priv->can.can_stats.error_passive++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.rxerr > 127) ?
CAN_ERR_CRTL_RX_PASSIVE :
CAN_ERR_CRTL_TX_PASSIVE;
@@ -858,7 +859,7 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
case CAN_STATE_ERROR_WARNING:
priv->can.can_stats.error_warning++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] |= (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -867,6 +868,7 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
}
break;
case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
@@ -1087,7 +1089,7 @@ clear:
/**
* ctucan_interrupt() - CAN Isr
* @irq: irq number
- * @dev_id: device id poniter
+ * @dev_id: device id pointer
*
* This is the CTU CAN FD ISR. It checks for the type of interrupt
* and invokes the corresponding ISR.
@@ -1300,6 +1302,10 @@ static const struct net_device_ops ctucan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ctucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int ctucan_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1376,6 +1382,7 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
set_drvdata_fnc(dev, ndev);
SET_NETDEV_DEV(ndev, dev);
ndev->netdev_ops = &ctucan_netdev_ops;
+ ndev->ethtool_ops = &ctucan_ethtool_ops;
/* Getting the can_clk info */
if (!can_clk_rate) {
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
index edc1c1a24348..0c181ab51bf8 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_kregs.h
+++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
@@ -4,9 +4,9 @@
* CTU CAN FD IP Core
*
* Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
- * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2022 Ondrej Ille <ondrej.ille@gmail.com> self-funded
* Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
- * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
*
* Project advisors:
* Jiri Novak <jnovak@fel.cvut.cz>
@@ -64,9 +64,12 @@ enum ctu_can_fd_can_registers {
CTUCANFD_RX_DATA = 0x6c,
CTUCANFD_TX_STATUS = 0x70,
CTUCANFD_TX_COMMAND = 0x74,
+ CTUCANFD_TXTB_INFO = 0x76,
CTUCANFD_TX_PRIORITY = 0x78,
CTUCANFD_ERR_CAPT = 0x7c,
+ CTUCANFD_RETR_CTR = 0x7d,
CTUCANFD_ALC = 0x7e,
+ CTUCANFD_TS_INFO = 0x7f,
CTUCANFD_TRV_DELAY = 0x80,
CTUCANFD_SSP_CFG = 0x82,
CTUCANFD_RX_FR_CTR = 0x84,
@@ -102,8 +105,12 @@ enum ctu_can_fd_can_registers {
#define REG_MODE_STM BIT(2)
#define REG_MODE_AFM BIT(3)
#define REG_MODE_FDE BIT(4)
+#define REG_MODE_TTTM BIT(5)
+#define REG_MODE_ROM BIT(6)
#define REG_MODE_ACF BIT(7)
#define REG_MODE_TSTM BIT(8)
+#define REG_MODE_RXBAM BIT(9)
+#define REG_MODE_SAM BIT(11)
#define REG_MODE_RTRLE BIT(16)
#define REG_MODE_RTRTH GENMASK(20, 17)
#define REG_MODE_ILBP BIT(21)
@@ -123,8 +130,10 @@ enum ctu_can_fd_can_registers {
#define REG_STATUS_EWL BIT(6)
#define REG_STATUS_IDLE BIT(7)
#define REG_STATUS_PEXS BIT(8)
+#define REG_STATUS_STCNT BIT(16)
/* COMMAND registers */
+#define REG_COMMAND_RXRPMV BIT(1)
#define REG_COMMAND_RRB BIT(2)
#define REG_COMMAND_CDO BIT(3)
#define REG_COMMAND_ERCRST BIT(4)
@@ -263,8 +272,12 @@ enum ctu_can_fd_can_registers {
#define REG_TX_STATUS_TX2S GENMASK(7, 4)
#define REG_TX_STATUS_TX3S GENMASK(11, 8)
#define REG_TX_STATUS_TX4S GENMASK(15, 12)
+#define REG_TX_STATUS_TX5S GENMASK(19, 16)
+#define REG_TX_STATUS_TX6S GENMASK(23, 20)
+#define REG_TX_STATUS_TX7S GENMASK(27, 24)
+#define REG_TX_STATUS_TX8S GENMASK(31, 28)
-/* TX_COMMAND registers */
+/* TX_COMMAND TXTB_INFO registers */
#define REG_TX_COMMAND_TXCE BIT(0)
#define REG_TX_COMMAND_TXCR BIT(1)
#define REG_TX_COMMAND_TXCA BIT(2)
@@ -272,18 +285,29 @@ enum ctu_can_fd_can_registers {
#define REG_TX_COMMAND_TXB2 BIT(9)
#define REG_TX_COMMAND_TXB3 BIT(10)
#define REG_TX_COMMAND_TXB4 BIT(11)
+#define REG_TX_COMMAND_TXB5 BIT(12)
+#define REG_TX_COMMAND_TXB6 BIT(13)
+#define REG_TX_COMMAND_TXB7 BIT(14)
+#define REG_TX_COMMAND_TXB8 BIT(15)
+#define REG_TX_COMMAND_TXT_BUFFER_COUNT GENMASK(19, 16)
/* TX_PRIORITY registers */
#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0)
#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4)
#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8)
#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12)
+#define REG_TX_PRIORITY_TXT5P GENMASK(18, 16)
+#define REG_TX_PRIORITY_TXT6P GENMASK(22, 20)
+#define REG_TX_PRIORITY_TXT7P GENMASK(26, 24)
+#define REG_TX_PRIORITY_TXT8P GENMASK(30, 28)
-/* ERR_CAPT ALC registers */
+/* ERR_CAPT RETR_CTR ALC TS_INFO registers */
#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0)
#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5)
+#define REG_ERR_CAPT_RETR_CTR_VAL GENMASK(11, 8)
#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16)
#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21)
+#define REG_ERR_CAPT_TS_BITS GENMASK(29, 24)
/* TRV_DELAY SSP_CFG registers */
#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0)
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
index af2901db473c..633687d6b6c0 100644
--- a/drivers/net/can/dev/Makefile
+++ b/drivers/net/can/dev/Makefile
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y += bittiming.o
-can-dev-y += dev.o
-can-dev-y += length.o
-can-dev-y += netlink.o
-can-dev-y += rx-offload.o
-can-dev-y += skb.o
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+
+can-dev-y += skb.o
+
+can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += dev.o
+can-dev-$(CONFIG_CAN_NETLINK) += length.o
+can-dev-$(CONFIG_CAN_NETLINK) += netlink.o
+can-dev-$(CONFIG_CAN_RX_OFFLOAD) += rx-offload.o
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index c1e76f0a5064..7ae80763c960 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -4,205 +4,8 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/units.h>
#include <linux/can/dev.h>
-#ifdef CONFIG_CAN_CALC_BITTIMING
-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-
-/* Bit-timing calculation derived from:
- *
- * Code based on LinCAN sources and H8S2638 project
- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
- * Copyright 2005 Stanislav Marek
- * email: pisa@cmp.felk.cvut.cz
- *
- * Calculates proper bit-timing parameters for a specified bit-rate
- * and sample-point, which can then be used to set the bit-timing
- * registers of the CAN controller. You can find more information
- * in the header file linux/can/netlink.h.
- */
-static int
-can_update_sample_point(const struct can_bittiming_const *btc,
- const unsigned int sample_point_nominal, const unsigned int tseg,
- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
- unsigned int *sample_point_error_ptr)
-{
- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
- unsigned int sample_point, best_sample_point = 0;
- unsigned int tseg1, tseg2;
- int i;
-
- for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
- 1000 - i;
- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
- tseg1 = tseg - tseg2;
- if (tseg1 > btc->tseg1_max) {
- tseg1 = btc->tseg1_max;
- tseg2 = tseg - tseg1;
- }
-
- sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
- (tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
-
- if (sample_point <= sample_point_nominal &&
- sample_point_error < best_sample_point_error) {
- best_sample_point = sample_point;
- best_sample_point_error = sample_point_error;
- *tseg1_ptr = tseg1;
- *tseg2_ptr = tseg2;
- }
- }
-
- if (sample_point_error_ptr)
- *sample_point_error_ptr = best_sample_point_error;
-
- return best_sample_point;
-}
-
-int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
- unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
- unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
- unsigned int best_tseg = 0; /* current best value for tseg */
- unsigned int best_brp = 0; /* current best value for brp */
- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
- u64 v64;
-
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800 * KILO /* BPS */)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500 * KILO /* BPS */)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
-
- /* tseg even = round down, odd = round up */
- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_SYNC_SEG + tseg / 2;
-
- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
-
- /* choose brp step which is possible in system */
- brp = (brp / btc->brp_inc) * btc->brp_inc;
- if (brp < btc->brp_min || brp > btc->brp_max)
- continue;
-
- bitrate = priv->clock.freq / (brp * tsegall);
- bitrate_error = abs(bt->bitrate - bitrate);
-
- /* tseg brp biterror */
- if (bitrate_error > best_bitrate_error)
- continue;
-
- /* reset sample point error if we have a better bitrate */
- if (bitrate_error < best_bitrate_error)
- best_sample_point_error = UINT_MAX;
-
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
- &tseg1, &tseg2, &sample_point_error);
- if (sample_point_error >= best_sample_point_error)
- continue;
-
- best_sample_point_error = sample_point_error;
- best_bitrate_error = bitrate_error;
- best_tseg = tseg / 2;
- best_brp = brp;
-
- if (bitrate_error == 0 && sample_point_error == 0)
- break;
- }
-
- if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
- do_div(v64, bt->bitrate);
- bitrate_error = (u32)v64;
- if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
- }
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
- }
-
- /* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
- best_tseg, &tseg1, &tseg2,
- NULL);
-
- v64 = (u64)best_brp * 1000 * 1000 * 1000;
- do_div(v64, priv->clock.freq);
- bt->tq = (u32)v64;
- bt->prop_seg = tseg1 / 2;
- bt->phase_seg1 = tseg1 - bt->prop_seg;
- bt->phase_seg2 = tseg2;
-
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
-
- bt->brp = best_brp;
-
- /* real bitrate */
- bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
-
- return 0;
-}
-
-void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
- const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
-
-{
- if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
- return;
-
- *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
-
- /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
- * delay compensation" (TDC) is only applicable if data BRP is
- * one or two.
- */
- if (dbt->brp == 1 || dbt->brp == 2) {
- /* Sample point in clock periods */
- u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
- dbt->phase_seg1) * dbt->brp;
-
- if (sample_point_in_tc < tdc_const->tdco_min)
- return;
- tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
- *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
- }
-}
-#endif /* CONFIG_CAN_CALC_BITTIMING */
-
/* Checks the validity of the specified bit-timing parameters prop_seg,
* phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
* prescaler value brp. You can find more information in the header
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
new file mode 100644
index 000000000000..d3caa040614d
--- /dev/null
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/units.h>
+#include <linux/can/dev.h>
+
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ const unsigned int sample_point_nominal, const unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if (sample_point <= sample_point_nominal &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+
+ /* Use CiA recommended sample points */
+ if (bt->sample_point) {
+ sample_point_nominal = bt->sample_point;
+ } else {
+ if (bt->bitrate > 800 * KILO /* BPS */)
+ sample_point_nominal = 750;
+ else if (bt->bitrate > 500 * KILO /* BPS */)
+ sample_point_nominal = 800;
+ else
+ sample_point_nominal = 875;
+ }
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error >= best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-tenth of a percent */
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ netdev_err(dev,
+ "bitrate error %d.%d%% too high\n",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EDOM;
+ }
+ netdev_warn(dev, "bitrate error %d.%d%%\n",
+ bitrate_error / 10, bitrate_error % 10);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ /* check for sjw user settings */
+ if (!bt->sjw || !btc->sjw_max) {
+ bt->sjw = 1;
+ } else {
+ /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+ if (bt->sjw > btc->sjw_max)
+ bt->sjw = btc->sjw_max;
+ /* bt->sjw must not be higher than tseg2 */
+ if (tseg2 < bt->sjw)
+ bt->sjw = tseg2;
+ }
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+
+ return 0;
+}
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
+
+{
+ if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
+ return;
+
+ *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+
+ /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
+ * delay compensation" (TDC) is only applicable if data BRP is
+ * one or two.
+ */
+ if (dbt->brp == 1 || dbt->brp == 2) {
+ /* Sample point in clock periods */
+ u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ if (sample_point_in_tc < tdc_const->tdco_min)
+ return;
+ tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
+ *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
+ }
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 96c9d9db00cf..c1956b1e9faf 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -4,7 +4,6 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
@@ -17,12 +16,6 @@
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#define MOD_DESC "CAN device driver interface"
-
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
{
@@ -329,6 +322,56 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL_GPL(can_change_mtu);
+/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
+ * supporting hardware timestamps
+ */
+int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_ON &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_ON;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL(can_eth_ioctl_hwts);
+
+/* generic implementation of ethtool_ops::get_ts_info for CAN devices
+ * supporting hardware timestamps
+ */
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+EXPORT_SYMBOL(can_ethtool_op_get_ts_info_hwts);
+
/* Common open function when the device gets opened.
*
* This function should be called in the open function of the device
@@ -513,7 +556,7 @@ static __init int can_dev_init(void)
err = can_netlink_register();
if (!err)
- pr_info(MOD_DESC "\n");
+ pr_info("CAN device driver interface\n");
return err;
}
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 7633d98e3912..8efa22d9f214 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -176,7 +176,8 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->bittiming_const && !priv->do_set_bittiming)
+ if (!priv->bittiming_const && !priv->do_set_bittiming &&
+ !priv->bitrate_const)
return -EOPNOTSUPP;
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
@@ -278,7 +279,8 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming &&
+ !priv->data_bitrate_const)
return -EOPNOTSUPP;
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
@@ -509,7 +511,8 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if ((priv->bittiming.bitrate &&
+ if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET &&
+ priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN &&
nla_put(skb, IFLA_CAN_BITTIMING,
sizeof(priv->bittiming), &priv->bittiming)) ||
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 61660248c69e..07e0feac8629 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -5,6 +5,14 @@
*/
#include <linux/can/dev.h>
+#include <linux/can/netlink.h>
+#include <linux/module.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
/* Local echo of CAN messages
*
@@ -64,6 +72,9 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
/* save frame_len to reuse it when transmission is completed */
can_skb_prv(skb)->frame_len = frame_len;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
skb_tx_timestamp(skb);
/* save this skb for tx interrupt echo handling */
@@ -99,6 +110,9 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
+ skb_tstamp_tx(skb, skb_hwtstamps(skb));
+
/* get the real payload length for netdev statistics */
if (cf->can_id & CAN_RTR_FLAG)
*len_ptr = 0;
@@ -252,3 +266,67 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
return skb;
}
EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* perform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ }
+
+ return true;
+}
+
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (skb->protocol == htons(ETH_P_CAN)) {
+ if (unlikely(skb->len != CAN_MTU ||
+ cfd->len > CAN_MAX_DLEN))
+ goto inval_skb;
+ } else if (skb->protocol == htons(ETH_P_CANFD)) {
+ if (unlikely(skb->len != CANFD_MTU ||
+ cfd->len > CANFD_MAX_DLEN))
+ goto inval_skb;
+ } else {
+ goto inval_skb;
+ }
+
+ if (!can_skb_headroom_valid(dev, skb)) {
+ goto inval_skb;
+ } else if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ netdev_info_once(dev,
+ "interface in listen only mode, dropping skb\n");
+ goto inval_skb;
+ }
+
+ return false;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+EXPORT_SYMBOL_GPL(can_dropped_invalid_skb);
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index d060088047f1..f857968efed7 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -2113,7 +2113,7 @@ static int flexcan_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &flexcan_netdev_ops;
- flexcan_set_ethtool_ops(dev);
+ dev->ethtool_ops = &flexcan_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/flexcan/flexcan-ethtool.c b/drivers/net/can/flexcan/flexcan-ethtool.c
index 3ae535577700..50e86b2da532 100644
--- a/drivers/net/can/flexcan/flexcan-ethtool.c
+++ b/drivers/net/can/flexcan/flexcan-ethtool.c
@@ -100,15 +100,11 @@ static int flexcan_get_sset_count(struct net_device *netdev, int sset)
}
}
-static const struct ethtool_ops flexcan_ethtool_ops = {
+const struct ethtool_ops flexcan_ethtool_ops = {
.get_ringparam = flexcan_get_ringparam,
.get_strings = flexcan_get_strings,
.get_priv_flags = flexcan_get_priv_flags,
.set_priv_flags = flexcan_set_priv_flags,
.get_sset_count = flexcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
};
-
-void flexcan_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &flexcan_ethtool_ops;
-}
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 23fc09a7e10f..8621a8ea1dea 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -114,7 +114,7 @@ struct flexcan_priv {
void (*write)(u32 val, void __iomem *addr);
};
-void flexcan_set_ethtool_ops(struct net_device *dev);
+extern const struct ethtool_ops flexcan_ethtool_ops;
static inline bool
flexcan_supports_rx_mailbox(const struct flexcan_priv *priv)
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 4c47c1055eff..6c37aab93eb3 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/spinlock.h>
@@ -671,6 +672,7 @@ static void grcan_err(struct net_device *dev, u32 sources, u32 status)
/* There are no others at this point */
break;
}
+ cf.can_id |= CAN_ERR_CNT;
cf.data[6] = txerr;
cf.data[7] = rxerr;
priv->can.state = state;
@@ -1560,6 +1562,10 @@ static const struct net_device_ops grcan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops grcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int grcan_setup_netdev(struct platform_device *ofdev,
void __iomem *base,
int irq, u32 ambafreq, bool txbug)
@@ -1576,6 +1582,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
dev->irq = irq;
dev->flags |= IFF_ECHO;
dev->netdev_ops = &grcan_netdev_ops;
+ dev->ethtool_ops = &grcan_ethtool_ops;
dev->sysfs_groups[0] = &sysfs_grcan_group;
priv = netdev_priv(dev);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 968ed6d7316b..ad7a89b95da7 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -492,7 +493,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -501,7 +502,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
@@ -925,6 +926,10 @@ static const struct net_device_ops ifi_canfd_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ifi_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int ifi_canfd_plat_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -962,6 +967,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
ndev->irq = irq;
ndev->flags |= IFF_ECHO; /* we support local echo */
ndev->netdev_ops = &ifi_canfd_netdev_ops;
+ ndev->ethtool_ops = &ifi_canfd_ethtool_ops;
priv = netdev_priv(ndev);
priv->ndev = ndev;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 35bfb82d6929..71a2caae0757 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
@@ -1127,7 +1128,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* bus error interrupt */
if (isrc == CEVTIND_BEI) {
mod->can.can_stats.bus_error++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
switch (ecc & ECC_MASK) {
case ECC_BIT:
@@ -1153,7 +1154,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING ||
state == CAN_STATE_ERROR_PASSIVE)) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
if (state == CAN_STATE_ERROR_WARNING) {
mod->can.can_stats.error_warning++;
cf->data[1] = (txerr > rxerr) ?
@@ -1277,6 +1278,8 @@ static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
if (!skb)
return;
+ skb_tx_timestamp(skb);
+
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
}
@@ -1752,6 +1755,10 @@ static const struct net_device_ops ican3_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ican3_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/*
* Low-level CAN Device
*/
@@ -1923,6 +1930,7 @@ static int ican3_probe(struct platform_device *pdev)
mod->free_page = DPM_FREE_START;
ndev->netdev_ops = &ican3_netdev_ops;
+ ndev->ethtool_ops = &ican3_ethtool_ops;
ndev->flags |= IFF_ECHO;
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 017f2d36ffc3..ed54c0b3c7d4 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/can/dev.h>
#include <linux/timer.h>
@@ -919,10 +920,15 @@ static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
static const struct net_device_ops kvaser_pciefd_netdev_ops = {
.ndo_open = kvaser_pciefd_open,
.ndo_stop = kvaser_pciefd_stop,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_pciefd_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
{
int i;
@@ -939,6 +945,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can = netdev_priv(netdev);
netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
+ netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
@@ -1306,7 +1313,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
shhwtstamps->hwtstamp =
ns_to_ktime(div_u64(p->timestamp * 1000,
can->kv_pcie->freq_to_ticks_div));
- cf->can_id |= CAN_ERR_BUSERROR;
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index 45ad1b3f0cd0..fc2afab36279 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CAN_M_CAN
tristate "Bosch M_CAN support"
+ select CAN_RX_OFFLOAD
help
Say Y here if you want support for Bosch M_CAN controller framework.
This is common support for devices that embed the Bosch M_CAN IP.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 7931f9c71ef3..4709c012b1dc 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -9,6 +9,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -741,7 +742,7 @@ static int m_can_handle_state_change(struct net_device *dev,
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -750,7 +751,7 @@ static int m_can_handle_state_change(struct net_device *dev,
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
ecr = m_can_read(cdev, M_CAN_ECR);
if (ecr & ECR_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -1348,8 +1349,8 @@ static void m_can_chip_config(struct net_device *dev)
/* set bittiming params */
m_can_set_bittiming(dev);
- /* enable internal timestamp generation, with a prescalar of 16. The
- * prescalar is applied to the nominal bit timing
+ /* enable internal timestamp generation, with a prescaler of 16. The
+ * prescaler is applied to the nominal bit timing
*/
m_can_write(cdev, M_CAN_TSCC,
FIELD_PREP(TSCC_TCP_MASK, 0xf) |
@@ -1829,10 +1830,15 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops m_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int register_m_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
+ dev->ethtool_ops = &m_can_ethtool_ops;
return register_candev(dev);
}
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 65ba6697bd7d..c469b2f3e57d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -63,7 +63,7 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
else
*mscan_clksrc = MSCAN_CLKSRC_XTAL;
- freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
if (!freq)
return 0;
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 78a21ab63601..2119fbb287ef 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -616,6 +616,10 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops mscan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
@@ -676,6 +680,7 @@ struct net_device *alloc_mscandev(void)
priv = netdev_priv(dev);
dev->netdev_ops = &mscan_netdev_ops;
+ dev->ethtool_ops = &mscan_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index fde3ac516d26..0558ff67ec6a 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -6,6 +6,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -489,6 +490,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
if (!skb)
return;
+ errc = ioread32(&priv->regs->errc);
if (status & PCH_BUS_OFF) {
pch_can_set_tx_all(priv, 0);
pch_can_set_rx_all(priv, 0);
@@ -496,9 +498,12 @@ static void pch_can_error(struct net_device *ndev, u32 status)
cf->can_id |= CAN_ERR_BUSOFF;
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
+ } else {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = errc & PCH_TEC;
+ cf->data[7] = (errc & PCH_REC) >> 8;
}
- errc = ioread32(&priv->regs->errc);
/* Warning interrupt. */
if (status & PCH_EWARN) {
state = CAN_STATE_ERROR_WARNING;
@@ -556,9 +561,6 @@ static void pch_can_error(struct net_device *ndev, u32 status)
break;
}
- cf->data[6] = errc & PCH_TEC;
- cf->data[7] = (errc & PCH_REC) >> 8;
-
priv->can.state = state;
netif_receive_skb(skb);
}
@@ -937,6 +939,10 @@ static const struct net_device_ops pch_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops pch_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void pch_can_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
@@ -1187,6 +1193,7 @@ static int pch_can_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &pch_can_netdev_ops;
+ ndev->ethtool_ops = &pch_can_ethtool_ops;
priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
netif_napi_add_weight(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index b2dea360813d..f8420cc1d907 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -7,6 +7,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/ethtool.h>
#include "peak_canfd_user.h"
@@ -373,7 +374,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
@@ -386,7 +387,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -430,7 +431,7 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
return -ENOMEM;
}
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
cf->data[6] = priv->bec.txerr;
@@ -742,13 +743,59 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops peak_canfd_netdev_ops = {
.ndo_open = peak_canfd_open,
.ndo_stop = peak_canfd_close,
+ .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_canfd_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static int peak_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops peak_canfd_ethtool_ops = {
+ .get_ts_info = peak_get_ts_info,
+};
+
struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
int echo_skb_max)
{
@@ -789,6 +836,7 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
ndev->flags |= IFF_ECHO;
ndev->netdev_ops = &peak_canfd_netdev_ops;
+ ndev->ethtool_ops = &peak_canfd_ethtool_ops;
ndev->dev_id = index;
return ndev;
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index d45762f1cf6b..6ee968c59ac9 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/can/dev.h>
@@ -232,11 +233,8 @@ static void rcar_can_error(struct net_device *ndev)
if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
txerr = readb(&priv->regs->tecr);
rxerr = readb(&priv->regs->recr);
- if (skb) {
+ if (skb)
cf->can_id |= CAN_ERR_CRTL;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
}
if (eifr & RCAR_CAN_EIFR_BEIF) {
int rx_errors = 0, tx_errors = 0;
@@ -336,6 +334,10 @@ static void rcar_can_error(struct net_device *ndev)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
if (eifr & RCAR_CAN_EIFR_ORIF) {
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
@@ -629,6 +631,10 @@ static const struct net_device_ops rcar_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops rcar_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
@@ -784,6 +790,7 @@ static int rcar_can_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &rcar_can_netdev_ops;
+ ndev->ethtool_ops = &rcar_can_ethtool_ops;
ndev->irq = irq;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index ba42cef10a53..27085b796e75 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/can/dev.h>
@@ -1052,7 +1053,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
netdev_dbg(ndev, "Error warning interrupt\n");
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = txerr;
@@ -1062,7 +1063,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
netdev_dbg(ndev, "Error passive interrupt\n");
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
cf->data[6] = txerr;
@@ -1695,6 +1696,10 @@ static const struct net_device_ops rcar_canfd_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops rcar_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
u32 fcan_freq)
{
@@ -1711,6 +1716,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv = netdev_priv(ndev);
ndev->netdev_ops = &rcar_canfd_netdev_ops;
+ ndev->ethtool_ops = &rcar_canfd_ethtool_ops;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
priv->base = gpriv->base;
@@ -1843,6 +1849,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
of_child = of_get_child_by_name(pdev->dev.of_node, name);
if (of_child && of_device_is_available(of_child))
channels_mask |= BIT(i);
+ of_node_put(of_child);
}
if (chip_id != RENESAS_RZG2L) {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 2e7638f98cf1..98dfd5f295a7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -52,6 +52,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -183,8 +184,9 @@ static void chipset_init(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
- /* set clock divider and output control register */
- priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
+ if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG))
+ /* set clock divider and output control register */
+ priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
/* set acceptance filter (accept all) */
priv->write_reg(priv, SJA1000_ACCC0, 0x00);
@@ -209,7 +211,8 @@ static void sja1000_start(struct net_device *dev)
set_reset_mode(dev);
/* Initialize chip if uninitialized at this stage */
- if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
+ if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG ||
+ priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
chipset_init(dev);
/* Clear error counters and error code capture */
@@ -402,9 +405,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
txerr = priv->read_reg(priv, SJA1000_TXERR);
rxerr = priv->read_reg(priv, SJA1000_RXERR);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
-
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -426,6 +426,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & IRQ_BEI) {
/* bus error interrupt */
priv->can.can_stats.bus_error++;
@@ -650,6 +655,10 @@ static const struct net_device_ops sja1000_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops sja1000_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_sja1000dev(struct net_device *dev)
{
int ret;
@@ -659,6 +668,7 @@ int register_sja1000dev(struct net_device *dev)
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &sja1000_netdev_ops;
+ dev->ethtool_ops = &sja1000_ethtool_ops;
set_reset_mode(dev);
chipset_init(dev);
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 9d46398f8154..7f736f1df547 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -145,7 +145,8 @@
/*
* Flags for sja1000priv.flags
*/
-#define SJA1000_CUSTOM_IRQ_HANDLER 0x1
+#define SJA1000_CUSTOM_IRQ_HANDLER BIT(0)
+#define SJA1000_QUIRK_NO_CDR_REG BIT(1)
/*
* SJA1000 private data structure
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index f9ec7bd8dfac..81bc741905fd 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL v2");
struct sja1000_of_data {
size_t priv_sz;
- int (*init)(struct sja1000_priv *priv, struct device_node *of);
+ void (*init)(struct sja1000_priv *priv, struct device_node *of);
};
struct technologic_priv {
@@ -94,15 +94,13 @@ static void sp_technologic_write_reg16(const struct sja1000_priv *priv,
spin_unlock_irqrestore(&tp->io_lock, flags);
}
-static int sp_technologic_init(struct sja1000_priv *priv, struct device_node *of)
+static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *of)
{
struct technologic_priv *tp = priv->priv;
priv->read_reg = sp_technologic_read_reg16;
priv->write_reg = sp_technologic_write_reg16;
spin_lock_init(&tp->io_lock);
-
- return 0;
}
static void sp_populate(struct sja1000_priv *priv,
@@ -210,7 +208,6 @@ static int sp_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq = NULL;
struct sja1000_platform_data *pdata;
struct device_node *of = pdev->dev.of_node;
- const struct of_device_id *of_id;
const struct sja1000_of_data *of_data = NULL;
size_t priv_sz = 0;
@@ -243,11 +240,9 @@ static int sp_probe(struct platform_device *pdev)
return -ENODEV;
}
- of_id = of_match_device(sp_of_table, &pdev->dev);
- if (of_id && of_id->data) {
- of_data = of_id->data;
+ of_data = device_get_match_data(&pdev->dev);
+ if (of_data)
priv_sz = of_data->priv_sz;
- }
dev = alloc_sja1000dev(priv_sz);
if (!dev)
@@ -269,11 +264,8 @@ static int sp_probe(struct platform_device *pdev)
if (of) {
sp_populate_of(priv, of);
- if (of_data && of_data->init) {
- err = of_data->init(priv, of);
- if (err)
- goto exit_free;
- }
+ if (of_data && of_data->init)
+ of_data->init(priv, of);
} else {
sp_populate(priv, pdata, res_mem->flags);
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
deleted file mode 100644
index 64a3aee8a7da..000000000000
--- a/drivers/net/can/slcan.c
+++ /dev/null
@@ -1,793 +0,0 @@
-/*
- * slcan.c - serial line CAN interface driver (using tty line discipline)
- *
- * This file is derived from linux/drivers/net/slip/slip.c
- *
- * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
- * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
- * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see http://www.gnu.org/licenses/gpl.html
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/workqueue.h>
-#include <linux/can.h>
-#include <linux/can/skb.h>
-#include <linux/can/can-ml.h>
-
-MODULE_ALIAS_LDISC(N_SLCAN);
-MODULE_DESCRIPTION("serial line CAN interface");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
-
-#define SLCAN_MAGIC 0x53CA
-
-static int maxdev = 10; /* MAX number of SLCAN channels;
- This can be overridden with
- insmod slcan.ko maxdev=nnn */
-module_param(maxdev, int, 0);
-MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
-
-/* maximum rx buffer len: extended CAN frame with timestamp */
-#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
-
-#define SLC_CMD_LEN 1
-#define SLC_SFF_ID_LEN 3
-#define SLC_EFF_ID_LEN 8
-
-struct slcan {
- int magic;
-
- /* Various fields. */
- struct tty_struct *tty; /* ptr to TTY structure */
- struct net_device *dev; /* easy for intr handling */
- spinlock_t lock;
- struct work_struct tx_work; /* Flushes transmit buffer */
-
- /* These are pointers to the malloc()ed frame buffers. */
- unsigned char rbuff[SLC_MTU]; /* receiver buffer */
- int rcount; /* received chars counter */
- unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
- unsigned char *xhead; /* pointer to next XMIT byte */
- int xleft; /* bytes left in XMIT queue */
-
- unsigned long flags; /* Flag values/ mode etc */
-#define SLF_INUSE 0 /* Channel in use */
-#define SLF_ERROR 1 /* Parity, etc. error */
-};
-
-static struct net_device **slcan_devs;
-
- /************************************************************************
- * SLCAN ENCAPSULATION FORMAT *
- ************************************************************************/
-
-/*
- * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
- * frame format) a data length code (len) which can be from 0 to 8
- * and up to <len> data bytes as payload.
- * Additionally a CAN frame may become a remote transmission frame if the
- * RTR-bit is set. This causes another ECU to send a CAN frame with the
- * given can_id.
- *
- * The SLCAN ASCII representation of these different frame types is:
- * <type> <id> <dlc> <data>*
- *
- * Extended frames (29 bit) are defined by capital characters in the type.
- * RTR frames are defined as 'r' types - normal frames have 't' type:
- * t => 11 bit data frame
- * r => 11 bit RTR frame
- * T => 29 bit data frame
- * R => 29 bit RTR frame
- *
- * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
- * The <dlc> is a one byte ASCII number ('0' - '8')
- * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
- *
- * Examples:
- *
- * t1230 : can_id 0x123, len 0, no data
- * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33
- * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55
- * r1230 : can_id 0x123, len 0, no data, remote transmission request
- *
- */
-
- /************************************************************************
- * STANDARD SLCAN DECAPSULATION *
- ************************************************************************/
-
-/* Send one completely decapsulated can_frame to the network layer */
-static void slc_bump(struct slcan *sl)
-{
- struct sk_buff *skb;
- struct can_frame cf;
- int i, tmp;
- u32 tmpid;
- char *cmd = sl->rbuff;
-
- memset(&cf, 0, sizeof(cf));
-
- switch (*cmd) {
- case 'r':
- cf.can_id = CAN_RTR_FLAG;
- fallthrough;
- case 't':
- /* store dlc ASCII value and terminate SFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
- /* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
- break;
- case 'R':
- cf.can_id = CAN_RTR_FLAG;
- fallthrough;
- case 'T':
- cf.can_id |= CAN_EFF_FLAG;
- /* store dlc ASCII value and terminate EFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
- /* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
- break;
- default:
- return;
- }
-
- if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
- return;
-
- cf.can_id |= tmpid;
-
- /* get len from sanitized ASCII value */
- if (cf.len >= '0' && cf.len < '9')
- cf.len -= '0';
- else
- return;
-
- /* RTR frames may have a dlc > 0 but they never have any data bytes */
- if (!(cf.can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf.len; i++) {
- tmp = hex_to_bin(*cmd++);
- if (tmp < 0)
- return;
- cf.data[i] = (tmp << 4);
- tmp = hex_to_bin(*cmd++);
- if (tmp < 0)
- return;
- cf.data[i] |= tmp;
- }
- }
-
- skb = dev_alloc_skb(sizeof(struct can_frame) +
- sizeof(struct can_skb_priv));
- if (!skb)
- return;
-
- skb->dev = sl->dev;
- skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = sl->dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- skb_put_data(skb, &cf, sizeof(struct can_frame));
-
- sl->dev->stats.rx_packets++;
- if (!(cf.can_id & CAN_RTR_FLAG))
- sl->dev->stats.rx_bytes += cf.len;
-
- netif_rx(skb);
-}
-
-/* parse tty input stream */
-static void slcan_unesc(struct slcan *sl, unsigned char s)
-{
- if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- (sl->rcount > 4)) {
- slc_bump(sl);
- }
- sl->rcount = 0;
- } else {
- if (!test_bit(SLF_ERROR, &sl->flags)) {
- if (sl->rcount < SLC_MTU) {
- sl->rbuff[sl->rcount++] = s;
- return;
- } else {
- sl->dev->stats.rx_over_errors++;
- set_bit(SLF_ERROR, &sl->flags);
- }
- }
- }
-}
-
- /************************************************************************
- * STANDARD SLCAN ENCAPSULATION *
- ************************************************************************/
-
-/* Encapsulate one can_frame and stuff into a TTY queue. */
-static void slc_encaps(struct slcan *sl, struct can_frame *cf)
-{
- int actual, i;
- unsigned char *pos;
- unsigned char *endpos;
- canid_t id = cf->can_id;
-
- pos = sl->xbuff;
-
- if (cf->can_id & CAN_RTR_FLAG)
- *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
- else
- *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
-
- /* determine number of chars for the CAN-identifier */
- if (cf->can_id & CAN_EFF_FLAG) {
- id &= CAN_EFF_MASK;
- endpos = pos + SLC_EFF_ID_LEN;
- } else {
- *pos |= 0x20; /* convert R/T to lower case for SFF */
- id &= CAN_SFF_MASK;
- endpos = pos + SLC_SFF_ID_LEN;
- }
-
- /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
- pos++;
- while (endpos >= pos) {
- *endpos-- = hex_asc_upper[id & 0xf];
- id >>= 4;
- }
-
- pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
-
- *pos++ = cf->len + '0';
-
- /* RTR frames may have a dlc > 0 but they never have any data bytes */
- if (!(cf->can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf->len; i++)
- pos = hex_byte_pack_upper(pos, cf->data[i]);
-
- sl->dev->stats.tx_bytes += cf->len;
- }
-
- *pos++ = '\r';
-
- /* Order of next two lines is *very* important.
- * When we are sending a little amount of data,
- * the transfer may be completed inside the ops->write()
- * routine, because it's running with interrupts enabled.
- * In this case we *never* got WRITE_WAKEUP event,
- * if we did not request it before write operation.
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
- set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
- sl->xleft = (pos - sl->xbuff) - actual;
- sl->xhead = sl->xbuff + actual;
-}
-
-/* Write out any remaining transmit buffer. Scheduled when tty is writable */
-static void slcan_transmit(struct work_struct *work)
-{
- struct slcan *sl = container_of(work, struct slcan, tx_work);
- int actual;
-
- spin_lock_bh(&sl->lock);
- /* First make sure we're connected. */
- if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
- spin_unlock_bh(&sl->lock);
- return;
- }
-
- if (sl->xleft <= 0) {
- /* Now serial buffer is almost free & we can start
- * transmission of another packet */
- sl->dev->stats.tx_packets++;
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- spin_unlock_bh(&sl->lock);
- netif_wake_queue(sl->dev);
- return;
- }
-
- actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
- sl->xleft -= actual;
- sl->xhead += actual;
- spin_unlock_bh(&sl->lock);
-}
-
-/*
- * Called by the driver when there's room for more data.
- * Schedule the transmit.
- */
-static void slcan_write_wakeup(struct tty_struct *tty)
-{
- struct slcan *sl;
-
- rcu_read_lock();
- sl = rcu_dereference(tty->disc_data);
- if (sl)
- schedule_work(&sl->tx_work);
- rcu_read_unlock();
-}
-
-/* Send a can_frame to a TTY queue. */
-static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- if (can_dropped_invalid_skb(dev, skb))
- return NETDEV_TX_OK;
-
- spin_lock(&sl->lock);
- if (!netif_running(dev)) {
- spin_unlock(&sl->lock);
- printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
- goto out;
- }
- if (sl->tty == NULL) {
- spin_unlock(&sl->lock);
- goto out;
- }
-
- netif_stop_queue(sl->dev);
- slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
- spin_unlock(&sl->lock);
-
-out:
- kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-
-/******************************************
- * Routines looking at netdevice side.
- ******************************************/
-
-/* Netdevice UP -> DOWN routine */
-static int slc_close(struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- /* TTY discipline is running. */
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- }
- netif_stop_queue(dev);
- sl->rcount = 0;
- sl->xleft = 0;
- spin_unlock_bh(&sl->lock);
-
- return 0;
-}
-
-/* Netdevice DOWN -> UP routine */
-static int slc_open(struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- if (sl->tty == NULL)
- return -ENODEV;
-
- sl->flags &= (1 << SLF_INUSE);
- netif_start_queue(dev);
- return 0;
-}
-
-/* Hook the destructor so we can free slcan devs at the right point in time */
-static void slc_free_netdev(struct net_device *dev)
-{
- int i = dev->base_addr;
-
- slcan_devs[i] = NULL;
-}
-
-static int slcan_change_mtu(struct net_device *dev, int new_mtu)
-{
- return -EINVAL;
-}
-
-static const struct net_device_ops slc_netdev_ops = {
- .ndo_open = slc_open,
- .ndo_stop = slc_close,
- .ndo_start_xmit = slc_xmit,
- .ndo_change_mtu = slcan_change_mtu,
-};
-
-static void slc_setup(struct net_device *dev)
-{
- dev->netdev_ops = &slc_netdev_ops;
- dev->needs_free_netdev = true;
- dev->priv_destructor = slc_free_netdev;
-
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->tx_queue_len = 10;
-
- dev->mtu = CAN_MTU;
- dev->type = ARPHRD_CAN;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
- dev->features = NETIF_F_HW_CSUM;
-}
-
-/******************************************
- Routines looking at TTY side.
- ******************************************/
-
-/*
- * Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
- * a block of SLCAN data has been received, which can now be decapsulated
- * and sent on to some IP layer for further processing. This will not
- * be re-entered while running but other ldisc functions may be called
- * in parallel
- */
-
-static void slcan_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, const char *fp,
- int count)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
-
- if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
- return;
-
- /* Read the characters out of the buffer */
- while (count--) {
- if (fp && *fp++) {
- if (!test_and_set_bit(SLF_ERROR, &sl->flags))
- sl->dev->stats.rx_errors++;
- cp++;
- continue;
- }
- slcan_unesc(sl, *cp++);
- }
-}
-
-/************************************
- * slcan_open helper routines.
- ************************************/
-
-/* Collect hanged up channels */
-static void slc_sync(void)
-{
- int i;
- struct net_device *dev;
- struct slcan *sl;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (dev == NULL)
- break;
-
- sl = netdev_priv(dev);
- if (sl->tty)
- continue;
- if (dev->flags & IFF_UP)
- dev_close(dev);
- }
-}
-
-/* Find a free SLCAN channel, and link in this `tty' line. */
-static struct slcan *slc_alloc(void)
-{
- int i;
- char name[IFNAMSIZ];
- struct net_device *dev = NULL;
- struct can_ml_priv *can_ml;
- struct slcan *sl;
- int size;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (dev == NULL)
- break;
-
- }
-
- /* Sorry, too many, all slots in use */
- if (i >= maxdev)
- return NULL;
-
- sprintf(name, "slcan%d", i);
- size = ALIGN(sizeof(*sl), NETDEV_ALIGN) + sizeof(struct can_ml_priv);
- dev = alloc_netdev(size, name, NET_NAME_UNKNOWN, slc_setup);
- if (!dev)
- return NULL;
-
- dev->base_addr = i;
- sl = netdev_priv(dev);
- can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
- can_set_ml_priv(dev, can_ml);
-
- /* Initialize channel control data */
- sl->magic = SLCAN_MAGIC;
- sl->dev = dev;
- spin_lock_init(&sl->lock);
- INIT_WORK(&sl->tx_work, slcan_transmit);
- slcan_devs[i] = dev;
-
- return sl;
-}
-
-/*
- * Open the high-level part of the SLCAN channel.
- * This function is called by the TTY module when the
- * SLCAN line discipline is called for. Because we are
- * sure the tty line exists, we only have to link it to
- * a free SLCAN channel...
- *
- * Called in process context serialized from other ldisc calls.
- */
-
-static int slcan_open(struct tty_struct *tty)
-{
- struct slcan *sl;
- int err;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (tty->ops->write == NULL)
- return -EOPNOTSUPP;
-
- /* RTnetlink lock is misused here to serialize concurrent
- opens of slcan channels. There are better ways, but it is
- the simplest one.
- */
- rtnl_lock();
-
- /* Collect hanged up channels. */
- slc_sync();
-
- sl = tty->disc_data;
-
- err = -EEXIST;
- /* First make sure we're not already connected. */
- if (sl && sl->magic == SLCAN_MAGIC)
- goto err_exit;
-
- /* OK. Find a free SLCAN channel to use. */
- err = -ENFILE;
- sl = slc_alloc();
- if (sl == NULL)
- goto err_exit;
-
- sl->tty = tty;
- tty->disc_data = sl;
-
- if (!test_bit(SLF_INUSE, &sl->flags)) {
- /* Perform the low-level SLCAN initialization. */
- sl->rcount = 0;
- sl->xleft = 0;
-
- set_bit(SLF_INUSE, &sl->flags);
-
- err = register_netdevice(sl->dev);
- if (err)
- goto err_free_chan;
- }
-
- /* Done. We have linked the TTY line to a channel. */
- rtnl_unlock();
- tty->receive_room = 65536; /* We don't flow control */
-
- /* TTY layer expects 0 on success */
- return 0;
-
-err_free_chan:
- sl->tty = NULL;
- tty->disc_data = NULL;
- clear_bit(SLF_INUSE, &sl->flags);
- slc_free_netdev(sl->dev);
- /* do not call free_netdev before rtnl_unlock */
- rtnl_unlock();
- free_netdev(sl->dev);
- return err;
-
-err_exit:
- rtnl_unlock();
-
- /* Count references from TTY module */
- return err;
-}
-
-/*
- * Close down a SLCAN channel.
- * This means flushing out any pending queues, and then returning. This
- * call is serialized against other ldisc functions.
- *
- * We also use this method for a hangup event.
- */
-
-static void slcan_close(struct tty_struct *tty)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
- return;
-
- spin_lock_bh(&sl->lock);
- rcu_assign_pointer(tty->disc_data, NULL);
- sl->tty = NULL;
- spin_unlock_bh(&sl->lock);
-
- synchronize_rcu();
- flush_work(&sl->tx_work);
-
- /* Flush network side */
- unregister_netdev(sl->dev);
- /* This will complete via sl_free_netdev */
-}
-
-static void slcan_hangup(struct tty_struct *tty)
-{
- slcan_close(tty);
-}
-
-/* Perform I/O control on an active SLCAN channel. */
-static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
- unsigned int tmp;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC)
- return -EINVAL;
-
- switch (cmd) {
- case SIOCGIFNAME:
- tmp = strlen(sl->dev->name) + 1;
- if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
- return -EFAULT;
- return 0;
-
- case SIOCSIFHWADDR:
- return -EINVAL;
-
- default:
- return tty_mode_ioctl(tty, cmd, arg);
- }
-}
-
-static struct tty_ldisc_ops slc_ldisc = {
- .owner = THIS_MODULE,
- .num = N_SLCAN,
- .name = "slcan",
- .open = slcan_open,
- .close = slcan_close,
- .hangup = slcan_hangup,
- .ioctl = slcan_ioctl,
- .receive_buf = slcan_receive_buf,
- .write_wakeup = slcan_write_wakeup,
-};
-
-static int __init slcan_init(void)
-{
- int status;
-
- if (maxdev < 4)
- maxdev = 4; /* Sanity */
-
- pr_info("slcan: serial line CAN interface driver\n");
- pr_info("slcan: %d dynamic interface channels.\n", maxdev);
-
- slcan_devs = kcalloc(maxdev, sizeof(struct net_device *), GFP_KERNEL);
- if (!slcan_devs)
- return -ENOMEM;
-
- /* Fill in our line protocol discipline, and register it */
- status = tty_register_ldisc(&slc_ldisc);
- if (status) {
- printk(KERN_ERR "slcan: can't register line discipline\n");
- kfree(slcan_devs);
- }
- return status;
-}
-
-static void __exit slcan_exit(void)
-{
- int i;
- struct net_device *dev;
- struct slcan *sl;
- unsigned long timeout = jiffies + HZ;
- int busy = 0;
-
- if (slcan_devs == NULL)
- return;
-
- /* First of all: check for active disciplines and hangup them.
- */
- do {
- if (busy)
- msleep_interruptible(100);
-
- busy = 0;
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
- sl = netdev_priv(dev);
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- busy++;
- tty_hangup(sl->tty);
- }
- spin_unlock_bh(&sl->lock);
- }
- } while (busy && time_before(jiffies, timeout));
-
- /* FIXME: hangup is async so we should wait when doing this second
- phase */
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
- slcan_devs[i] = NULL;
-
- sl = netdev_priv(dev);
- if (sl->tty) {
- printk(KERN_ERR "%s: tty discipline still running\n",
- dev->name);
- }
-
- unregister_netdev(dev);
- }
-
- kfree(slcan_devs);
- slcan_devs = NULL;
-
- tty_unregister_ldisc(&slc_ldisc);
-}
-
-module_init(slcan_init);
-module_exit(slcan_exit);
diff --git a/drivers/net/can/slcan/Makefile b/drivers/net/can/slcan/Makefile
new file mode 100644
index 000000000000..8a88e484ee21
--- /dev/null
+++ b/drivers/net/can/slcan/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_SLCAN) += slcan.o
+
+slcan-objs :=
+slcan-objs += slcan-core.o
+slcan-objs += slcan-ethtool.o
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
new file mode 100644
index 000000000000..8d13fdf8c28a
--- /dev/null
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -0,0 +1,939 @@
+/*
+ * slcan.c - serial line CAN interface driver (using tty line discipline)
+ *
+ * This file is derived from linux/drivers/net/slip/slip.c and got
+ * inspiration from linux/drivers/net/can/can327.c for the rework made
+ * on the line discipline code.
+ *
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see http://www.gnu.org/licenses/gpl.html
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+
+#include "slcan.h"
+
+MODULE_ALIAS_LDISC(N_SLCAN);
+MODULE_DESCRIPTION("serial line CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
+
+/* maximum rx buffer len: extended CAN frame with timestamp */
+#define SLCAN_MTU (sizeof("T1111222281122334455667788EA5F\r") + 1)
+
+#define SLCAN_CMD_LEN 1
+#define SLCAN_SFF_ID_LEN 3
+#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_STATE_LEN 1
+#define SLCAN_STATE_BE_RXCNT_LEN 3
+#define SLCAN_STATE_BE_TXCNT_LEN 3
+#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
+struct slcan {
+ struct can_priv can;
+
+ /* Various fields. */
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+ spinlock_t lock;
+ struct work_struct tx_work; /* Flushes transmit buffer */
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char rbuff[SLCAN_MTU]; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char xbuff[SLCAN_MTU]; /* transmitter buffer*/
+ unsigned char *xhead; /* pointer to next XMIT byte */
+ int xleft; /* bytes left in XMIT queue */
+
+ unsigned long flags; /* Flag values/ mode etc */
+#define SLF_ERROR 0 /* Parity, etc. error */
+#define SLF_XCMD 1 /* Command transmission */
+ unsigned long cmd_flags; /* Command flags */
+#define CF_ERR_RST 0 /* Reset errors on open */
+ wait_queue_head_t xcmd_wait; /* Wait queue for commands */
+ /* transmission */
+};
+
+static const u32 slcan_bitrate_const[] = {
+ 10000, 20000, 50000, 100000, 125000,
+ 250000, 500000, 800000, 1000000
+};
+
+bool slcan_err_rst_on_open(struct net_device *ndev)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ return !!test_bit(CF_ERR_RST, &sl->cmd_flags);
+}
+
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (on)
+ set_bit(CF_ERR_RST, &sl->cmd_flags);
+ else
+ clear_bit(CF_ERR_RST, &sl->cmd_flags);
+
+ return 0;
+}
+
+/*************************************************************************
+ * SLCAN ENCAPSULATION FORMAT *
+ *************************************************************************/
+
+/* A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
+ * frame format) a data length code (len) which can be from 0 to 8
+ * and up to <len> data bytes as payload.
+ * Additionally a CAN frame may become a remote transmission frame if the
+ * RTR-bit is set. This causes another ECU to send a CAN frame with the
+ * given can_id.
+ *
+ * The SLCAN ASCII representation of these different frame types is:
+ * <type> <id> <dlc> <data>*
+ *
+ * Extended frames (29 bit) are defined by capital characters in the type.
+ * RTR frames are defined as 'r' types - normal frames have 't' type:
+ * t => 11 bit data frame
+ * r => 11 bit RTR frame
+ * T => 29 bit data frame
+ * R => 29 bit RTR frame
+ *
+ * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
+ * The <dlc> is a one byte ASCII number ('0' - '8')
+ * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
+ *
+ * Examples:
+ *
+ * t1230 : can_id 0x123, len 0, no data
+ * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33
+ * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55
+ * r1230 : can_id 0x123, len 0, no data, remote transmission request
+ *
+ */
+
+/*************************************************************************
+ * STANDARD SLCAN DECAPSULATION *
+ *************************************************************************/
+
+/* Send one completely decapsulated can_frame to the network layer */
+static void slcan_bump_frame(struct slcan *sl)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int i, tmp;
+ u32 tmpid;
+ char *cmd = sl->rbuff;
+
+ skb = alloc_can_skb(sl->dev, &cf);
+ if (unlikely(!skb)) {
+ sl->dev->stats.rx_dropped++;
+ return;
+ }
+
+ switch (*cmd) {
+ case 'r':
+ cf->can_id = CAN_RTR_FLAG;
+ fallthrough;
+ case 't':
+ /* store dlc ASCII value and terminate SFF CAN ID string */
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN] = 0;
+ /* point to payload data behind the dlc */
+ cmd += SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN + 1;
+ break;
+ case 'R':
+ cf->can_id = CAN_RTR_FLAG;
+ fallthrough;
+ case 'T':
+ cf->can_id |= CAN_EFF_FLAG;
+ /* store dlc ASCII value and terminate EFF CAN ID string */
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN] = 0;
+ /* point to payload data behind the dlc */
+ cmd += SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN + 1;
+ break;
+ default:
+ goto decode_failed;
+ }
+
+ if (kstrtou32(sl->rbuff + SLCAN_CMD_LEN, 16, &tmpid))
+ goto decode_failed;
+
+ cf->can_id |= tmpid;
+
+ /* get len from sanitized ASCII value */
+ if (cf->len >= '0' && cf->len < '9')
+ cf->len -= '0';
+ else
+ goto decode_failed;
+
+ /* RTR frames may have a dlc > 0 but they never have any data bytes */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i++) {
+ tmp = hex_to_bin(*cmd++);
+ if (tmp < 0)
+ goto decode_failed;
+
+ cf->data[i] = (tmp << 4);
+ tmp = hex_to_bin(*cmd++);
+ if (tmp < 0)
+ goto decode_failed;
+
+ cf->data[i] |= tmp;
+ }
+ }
+
+ sl->dev->stats.rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ sl->dev->stats.rx_bytes += cf->len;
+
+ netif_rx(skb);
+ return;
+
+decode_failed:
+ sl->dev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+}
+
+/* A change state frame must contain state info and receive and transmit
+ * error counters.
+ *
+ * Examples:
+ *
+ * sb256256 : state bus-off: rx counter 256, tx counter 256
+ * sa057033 : state active, rx counter 57, tx counter 33
+ */
+static void slcan_bump_state(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ u32 rxerr, txerr;
+ enum can_state state, rx_state, tx_state;
+
+ switch (cmd[1]) {
+ case 'a':
+ state = CAN_STATE_ERROR_ACTIVE;
+ break;
+ case 'w':
+ state = CAN_STATE_ERROR_WARNING;
+ break;
+ case 'p':
+ state = CAN_STATE_ERROR_PASSIVE;
+ break;
+ case 'b':
+ state = CAN_STATE_BUS_OFF;
+ break;
+ default:
+ return;
+ }
+
+ if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
+ return;
+
+ cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
+ cmd[SLCAN_STATE_BE_TXCNT_LEN] = 0;
+ if (kstrtou32(cmd, 10, &txerr))
+ return;
+
+ *cmd = 0;
+ cmd -= SLCAN_STATE_BE_RXCNT_LEN;
+ if (kstrtou32(cmd, 10, &rxerr))
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+
+ tx_state = txerr >= rxerr ? state : 0;
+ rx_state = txerr <= rxerr ? state : 0;
+ can_change_state(dev, cf, tx_state, rx_state);
+
+ if (state == CAN_STATE_BUS_OFF) {
+ can_bus_off(dev);
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ if (skb)
+ netif_rx(skb);
+}
+
+/* An error frame can contain more than one type of error.
+ *
+ * Examples:
+ *
+ * e1a : len 1, errors: ACK error
+ * e3bcO: len 3, errors: Bit0 error, CRC error, Tx overrun error
+ */
+static void slcan_bump_err(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ bool rx_errors = false, tx_errors = false, rx_over_errors = false;
+ int i, len;
+
+ /* get len from sanitized ASCII value */
+ len = cmd[1];
+ if (len >= '0' && len < '9')
+ len -= '0';
+ else
+ return;
+
+ if ((len + SLCAN_CMD_LEN + 1) > sl->rcount)
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ cmd += SLCAN_CMD_LEN + 1;
+ for (i = 0; i < len; i++, cmd++) {
+ switch (*cmd) {
+ case 'a':
+ netdev_dbg(dev, "ACK error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
+
+ break;
+ case 'b':
+ netdev_dbg(dev, "Bit0 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+
+ break;
+ case 'B':
+ netdev_dbg(dev, "Bit1 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+
+ break;
+ case 'c':
+ netdev_dbg(dev, "CRC error\n");
+ rx_errors = true;
+ if (skb) {
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+
+ break;
+ case 'f':
+ netdev_dbg(dev, "Form Error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+
+ break;
+ case 'o':
+ netdev_dbg(dev, "Rx overrun error\n");
+ rx_over_errors = true;
+ rx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
+ break;
+ case 'O':
+ netdev_dbg(dev, "Tx overrun error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_TX_OVERFLOW;
+ }
+
+ break;
+ case 's':
+ netdev_dbg(dev, "Stuff error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+
+ break;
+ default:
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return;
+ }
+ }
+
+ if (rx_errors)
+ dev->stats.rx_errors++;
+
+ if (rx_over_errors)
+ dev->stats.rx_over_errors++;
+
+ if (tx_errors)
+ dev->stats.tx_errors++;
+
+ if (skb)
+ netif_rx(skb);
+}
+
+static void slcan_bump(struct slcan *sl)
+{
+ switch (sl->rbuff[0]) {
+ case 'r':
+ fallthrough;
+ case 't':
+ fallthrough;
+ case 'R':
+ fallthrough;
+ case 'T':
+ return slcan_bump_frame(sl);
+ case 'e':
+ return slcan_bump_err(sl);
+ case 's':
+ return slcan_bump_state(sl);
+ default:
+ return;
+ }
+}
+
+/* parse tty input stream */
+static void slcan_unesc(struct slcan *sl, unsigned char s)
+{
+ if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+ sl->rcount > 4)
+ slcan_bump(sl);
+
+ sl->rcount = 0;
+ } else {
+ if (!test_bit(SLF_ERROR, &sl->flags)) {
+ if (sl->rcount < SLCAN_MTU) {
+ sl->rbuff[sl->rcount++] = s;
+ return;
+ }
+
+ sl->dev->stats.rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+ }
+}
+
+/*************************************************************************
+ * STANDARD SLCAN ENCAPSULATION *
+ *************************************************************************/
+
+/* Encapsulate one can_frame and stuff into a TTY queue. */
+static void slcan_encaps(struct slcan *sl, struct can_frame *cf)
+{
+ int actual, i;
+ unsigned char *pos;
+ unsigned char *endpos;
+ canid_t id = cf->can_id;
+
+ pos = sl->xbuff;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
+ else
+ *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
+
+ /* determine number of chars for the CAN-identifier */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id &= CAN_EFF_MASK;
+ endpos = pos + SLCAN_EFF_ID_LEN;
+ } else {
+ *pos |= 0x20; /* convert R/T to lower case for SFF */
+ id &= CAN_SFF_MASK;
+ endpos = pos + SLCAN_SFF_ID_LEN;
+ }
+
+ /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
+ pos++;
+ while (endpos >= pos) {
+ *endpos-- = hex_asc_upper[id & 0xf];
+ id >>= 4;
+ }
+
+ pos += (cf->can_id & CAN_EFF_FLAG) ?
+ SLCAN_EFF_ID_LEN : SLCAN_SFF_ID_LEN;
+
+ *pos++ = cf->len + '0';
+
+ /* RTR frames may have a dlc > 0 but they never have any data bytes */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i++)
+ pos = hex_byte_pack_upper(pos, cf->data[i]);
+
+ sl->dev->stats.tx_bytes += cf->len;
+ }
+
+ *pos++ = '\r';
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
+ sl->xleft = (pos - sl->xbuff) - actual;
+ sl->xhead = sl->xbuff + actual;
+}
+
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slcan_transmit(struct work_struct *work)
+{
+ struct slcan *sl = container_of(work, struct slcan, tx_work);
+ int actual;
+
+ spin_lock_bh(&sl->lock);
+ /* First make sure we're connected. */
+ if (unlikely(!netif_running(sl->dev)) &&
+ likely(!test_bit(SLF_XCMD, &sl->flags))) {
+ spin_unlock_bh(&sl->lock);
+ return;
+ }
+
+ if (sl->xleft <= 0) {
+ if (unlikely(test_bit(SLF_XCMD, &sl->flags))) {
+ clear_bit(SLF_XCMD, &sl->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ wake_up(&sl->xcmd_wait);
+ return;
+ }
+
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet
+ */
+ sl->dev->stats.tx_packets++;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ netif_wake_queue(sl->dev);
+ return;
+ }
+
+ actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+ spin_unlock_bh(&sl->lock);
+}
+
+/* Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+
+ schedule_work(&sl->tx_work);
+}
+
+/* Send a can_frame to a TTY queue. */
+static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ spin_lock(&sl->lock);
+ if (!netif_running(dev)) {
+ spin_unlock(&sl->lock);
+ netdev_warn(dev, "xmit: iface is down\n");
+ goto out;
+ }
+ if (!sl->tty) {
+ spin_unlock(&sl->lock);
+ goto out;
+ }
+
+ netif_stop_queue(sl->dev);
+ slcan_encaps(sl, (struct can_frame *)skb->data); /* encaps & send */
+ spin_unlock(&sl->lock);
+
+ skb_tx_timestamp(skb);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/******************************************
+ * Routines looking at netdevice side.
+ ******************************************/
+
+static int slcan_transmit_cmd(struct slcan *sl, const unsigned char *cmd)
+{
+ int ret, actual, n;
+
+ spin_lock(&sl->lock);
+ if (!sl->tty) {
+ spin_unlock(&sl->lock);
+ return -ENODEV;
+ }
+
+ n = scnprintf(sl->xbuff, sizeof(sl->xbuff), "%s", cmd);
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, n);
+ sl->xleft = n - actual;
+ sl->xhead = sl->xbuff + actual;
+ set_bit(SLF_XCMD, &sl->flags);
+ spin_unlock(&sl->lock);
+ ret = wait_event_interruptible_timeout(sl->xcmd_wait,
+ !test_bit(SLF_XCMD, &sl->flags),
+ HZ);
+ clear_bit(SLF_XCMD, &sl->flags);
+ if (ret == -ERESTARTSYS)
+ return ret;
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* Netdevice UP -> DOWN routine */
+static int slcan_netdev_close(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+ int err;
+
+ if (sl->can.bittiming.bitrate &&
+ sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ err = slcan_transmit_cmd(sl, "C\r");
+ if (err)
+ netdev_warn(dev,
+ "failed to send close command 'C\\r'\n");
+ }
+
+ /* TTY discipline is running. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ flush_work(&sl->tx_work);
+
+ netif_stop_queue(dev);
+ sl->rcount = 0;
+ sl->xleft = 0;
+ close_candev(dev);
+ sl->can.state = CAN_STATE_STOPPED;
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNKNOWN)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNSET;
+
+ return 0;
+}
+
+/* Netdevice DOWN -> UP routine */
+static int slcan_netdev_open(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+ unsigned char cmd[SLCAN_MTU];
+ int err, s;
+
+ /* The baud rate is not set with the command
+ * `ip link set <iface> type can bitrate <baud>' and therefore
+ * can.bittiming.bitrate is CAN_BITRATE_UNSET (0), causing
+ * open_candev() to fail. So let's set to a fake value.
+ */
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNSET)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNKNOWN;
+
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ return err;
+ }
+
+ if (sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ for (s = 0; s < ARRAY_SIZE(slcan_bitrate_const); s++) {
+ if (sl->can.bittiming.bitrate == slcan_bitrate_const[s])
+ break;
+ }
+
+ /* The CAN framework has already validate the bitrate value,
+ * so we can avoid to check if `s' has been properly set.
+ */
+ snprintf(cmd, sizeof(cmd), "C\rS%d\r", s);
+ err = slcan_transmit_cmd(sl, cmd);
+ if (err) {
+ netdev_err(dev,
+ "failed to send bitrate command 'C\\rS%d\\r'\n",
+ s);
+ goto cmd_transmit_failed;
+ }
+
+ if (test_bit(CF_ERR_RST, &sl->cmd_flags)) {
+ err = slcan_transmit_cmd(sl, "F\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send error command 'F\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+
+ if (sl->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ err = slcan_transmit_cmd(sl, "L\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send listen-only command 'L\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ } else {
+ err = slcan_transmit_cmd(sl, "O\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send open command 'O\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+ }
+
+ sl->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(dev);
+ return 0;
+
+cmd_transmit_failed:
+ close_candev(dev);
+ return err;
+}
+
+static const struct net_device_ops slcan_netdev_ops = {
+ .ndo_open = slcan_netdev_open,
+ .ndo_stop = slcan_netdev_close,
+ .ndo_start_xmit = slcan_netdev_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+/******************************************
+ * Routines looking at TTY side.
+ ******************************************/
+
+/* Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of SLCAN data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing. This will not
+ * be re-entered while running but other ldisc functions may be called
+ * in parallel
+ */
+static void slcan_receive_buf(struct tty_struct *tty,
+ const unsigned char *cp, const char *fp,
+ int count)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+
+ if (!netif_running(sl->dev))
+ return;
+
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp && *fp++) {
+ if (!test_and_set_bit(SLF_ERROR, &sl->flags))
+ sl->dev->stats.rx_errors++;
+ cp++;
+ continue;
+ }
+ slcan_unesc(sl, *cp++);
+ }
+}
+
+/* Open the high-level part of the SLCAN channel.
+ * This function is called by the TTY module when the
+ * SLCAN line discipline is called for.
+ *
+ * Called in process context serialized from other ldisc calls.
+ */
+static int slcan_open(struct tty_struct *tty)
+{
+ struct net_device *dev;
+ struct slcan *sl;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ dev = alloc_candev(sizeof(*sl), 1);
+ if (!dev)
+ return -ENFILE;
+
+ sl = netdev_priv(dev);
+
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ sl->rcount = 0;
+ sl->xleft = 0;
+ spin_lock_init(&sl->lock);
+ INIT_WORK(&sl->tx_work, slcan_transmit);
+ init_waitqueue_head(&sl->xcmd_wait);
+
+ /* Configure CAN metadata */
+ sl->can.bitrate_const = slcan_bitrate_const;
+ sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const);
+ sl->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ /* Configure netdev interface */
+ sl->dev = dev;
+ dev->netdev_ops = &slcan_netdev_ops;
+ dev->ethtool_ops = &slcan_ethtool_ops;
+
+ /* Mark ldisc channel as alive */
+ sl->tty = tty;
+ tty->disc_data = sl;
+
+ err = register_candev(dev);
+ if (err) {
+ free_candev(dev);
+ pr_err("can't register candev\n");
+ return err;
+ }
+
+ netdev_info(dev, "slcan on %s.\n", tty->name);
+ /* TTY layer expects 0 on success */
+ return 0;
+}
+
+/* Close down a SLCAN channel.
+ * This means flushing out any pending queues, and then returning. This
+ * call is serialized against other ldisc functions.
+ * Once this is called, no other ldisc function of ours is entered.
+ *
+ * We also use this method for a hangup event.
+ */
+static void slcan_close(struct tty_struct *tty)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+
+ /* unregister_netdev() calls .ndo_stop() so we don't have to.
+ * Our .ndo_stop() also flushes the TTY write wakeup handler,
+ * so we can safely set sl->tty = NULL after this.
+ */
+ unregister_candev(sl->dev);
+
+ /* Mark channel as dead */
+ spin_lock_bh(&sl->lock);
+ tty->disc_data = NULL;
+ sl->tty = NULL;
+ spin_unlock_bh(&sl->lock);
+
+ netdev_info(sl->dev, "slcan off %s.\n", tty->name);
+ free_candev(sl->dev);
+}
+
+/* Perform I/O control on an active SLCAN channel. */
+static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+ unsigned int tmp;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strlen(sl->dev->name) + 1;
+ if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops slcan_ldisc = {
+ .owner = THIS_MODULE,
+ .num = N_SLCAN,
+ .name = KBUILD_MODNAME,
+ .open = slcan_open,
+ .close = slcan_close,
+ .ioctl = slcan_ioctl,
+ .receive_buf = slcan_receive_buf,
+ .write_wakeup = slcan_write_wakeup,
+};
+
+static int __init slcan_init(void)
+{
+ int status;
+
+ pr_info("serial line CAN interface driver\n");
+
+ /* Fill in our line protocol discipline, and register it */
+ status = tty_register_ldisc(&slcan_ldisc);
+ if (status)
+ pr_err("can't register line discipline\n");
+
+ return status;
+}
+
+static void __exit slcan_exit(void)
+{
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
+ */
+ tty_unregister_ldisc(&slcan_ldisc);
+}
+
+module_init(slcan_init);
+module_exit(slcan_exit);
diff --git a/drivers/net/can/slcan/slcan-ethtool.c b/drivers/net/can/slcan/slcan-ethtool.c
new file mode 100644
index 000000000000..f598c653fbfa
--- /dev/null
+++ b/drivers/net/can/slcan/slcan-ethtool.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "slcan.h"
+
+static const char slcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN BIT(0)
+ "err-rst-on-open",
+};
+
+static void slcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, slcan_priv_flags_strings,
+ sizeof(slcan_priv_flags_strings));
+ }
+}
+
+static u32 slcan_get_priv_flags(struct net_device *ndev)
+{
+ u32 flags = 0;
+
+ if (slcan_err_rst_on_open(ndev))
+ flags |= SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN;
+
+ return flags;
+}
+
+static int slcan_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ bool err_rst_op_open = !!(flags & SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN);
+
+ return slcan_enable_err_rst_on_open(ndev, err_rst_op_open);
+}
+
+static int slcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(slcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+const struct ethtool_ops slcan_ethtool_ops = {
+ .get_strings = slcan_get_strings,
+ .get_priv_flags = slcan_get_priv_flags,
+ .set_priv_flags = slcan_set_priv_flags,
+ .get_sset_count = slcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
diff --git a/drivers/net/can/slcan/slcan.h b/drivers/net/can/slcan/slcan.h
new file mode 100644
index 000000000000..85cedf856db3
--- /dev/null
+++ b/drivers/net/can/slcan/slcan.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * slcan.h - serial line CAN interface driver
+ *
+ * Copyright (C) Laurence Culhane <loz@holmes.demon.co.uk>
+ * Copyright (C) Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * Copyright (C) Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#ifndef _SLCAN_H
+#define _SLCAN_H
+
+bool slcan_err_rst_on_open(struct net_device *ndev);
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on);
+
+extern const struct ethtool_ops slcan_ethtool_ops;
+
+#endif /* _SLCAN_H */
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 8d27ac66ca7f..a5ef57f415f7 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -5,6 +5,7 @@
* - Kurt Van Dijck, EIA Electronics
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/io.h>
@@ -611,8 +612,12 @@ static const struct net_device_ops softing_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops softing_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const softing_btr_const = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -649,6 +654,7 @@ static struct net_device *softing_netdev_create(struct softing *card,
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &softing_netdev_ops;
+ netdev->ethtool_ops = &softing_ethtool_ops;
priv->can.do_set_mode = softing_candev_set_mode;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
@@ -846,7 +852,7 @@ platform_resource_failed:
static struct platform_driver softing_driver = {
.driver = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
},
.probe = softing_pdev_probe,
.remove = softing_pdev_remove,
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index ebc4ebb44c98..b87dc420428d 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -667,8 +668,6 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
txerr = hi3110_read(spi, HI3110_READ_TEC);
rxerr = hi3110_read(spi, HI3110_READ_REC);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
@@ -681,6 +680,10 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
hi3110_hw_sleep(spi);
break;
}
+ } else {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
}
@@ -800,6 +803,10 @@ static const struct net_device_ops hi3110_netdev_ops = {
.ndo_start_xmit = hi3110_hard_start_xmit,
};
+static const struct ethtool_ops hi3110_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id hi3110_of_match[] = {
{
.compatible = "holt,hi3110",
@@ -854,6 +861,7 @@ static int hi3110_can_probe(struct spi_device *spi)
goto out_free;
net->netdev_ops = &hi3110_netdev_ops;
+ net->ethtool_ops = &hi3110_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 666a4505a55a..c320de474f40 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -26,6 +26,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
@@ -1069,9 +1070,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
- /* mask out flags we don't care about */
- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
@@ -1081,6 +1079,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF,
CANINTF_RX0IF, 0x00);
+
+ /* check if buffer 1 is already known to be full, no need to re-read */
+ if (!(intf & CANINTF_RX1IF)) {
+ u8 intf1, eflag1;
+
+ /* intf needs to be read again to avoid a race condition */
+ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
+
+ /* combine flags from both operations for error handling */
+ intf |= intf1;
+ eflag |= eflag1;
+ }
}
/* receive buffer 1 */
@@ -1091,6 +1101,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
clear_intf |= CANINTF_RX1IF;
}
+ /* mask out flags we don't care about */
+ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
/* any error or tx interrupt we need to clear? */
if (intf & (CANINTF_ERR | CANINTF_TX))
clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
@@ -1248,6 +1261,10 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops mcp251x_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id mcp251x_of_match[] = {
{
.compatible = "microchip,mcp2510",
@@ -1313,6 +1330,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
goto out_free;
net->netdev_ops = &mcp251x_netdev_ops;
+ net->ethtool_ops = &mcp251x_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
index dd0fc0a54be1..877e4356010d 100644
--- a/drivers/net/can/spi/mcp251xfd/Kconfig
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -2,6 +2,7 @@
config CAN_MCP251XFD
tristate "Microchip MCP251xFD SPI CAN controllers"
+ select CAN_RX_OFFLOAD
select REGMAP
select WANT_DEV_COREDUMP
help
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 9b47b07162fe..68df6d4641b5 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1099,6 +1099,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
if (err)
return err;
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
}
@@ -1670,6 +1671,7 @@ static const struct net_device_ops mcp251xfd_netdev_ops = {
.ndo_open = mcp251xfd_open,
.ndo_stop = mcp251xfd_stop,
.ndo_start_xmit = mcp251xfd_start_xmit,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_change_mtu = can_change_mtu,
};
@@ -1690,8 +1692,8 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
u32 osc;
int err;
- /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
- * autodetect the model.
+ /* The OSC_LPMEN is only supported on MCP2518FD and MCP251863,
+ * so use it to autodetect the model.
*/
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
MCP251XFD_REG_OSC_LPMEN,
@@ -1703,10 +1705,18 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
if (err)
return err;
- if (osc & MCP251XFD_REG_OSC_LPMEN)
- devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
- else
+ if (osc & MCP251XFD_REG_OSC_LPMEN) {
+ /* We cannot distinguish between MCP2518FD and
+ * MCP251863. If firmware specifies MCP251863, keep
+ * it, otherwise set to MCP2518FD.
+ */
+ if (mcp251xfd_is_251863(priv))
+ devtype_data = &mcp251xfd_devtype_data_mcp251863;
+ else
+ devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
+ } else {
devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
+ }
if (!mcp251xfd_is_251XFD(priv) &&
priv->devtype_data.model != devtype_data->model) {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index c991b30bc9f0..004eaf96262b 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -253,7 +253,7 @@ void mcp251xfd_dump(const struct mcp251xfd_priv *priv)
file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) *
sizeof(struct mcp251xfd_dump_object_reg);
- /* TEF ring, RX ring, TX rings */
+ /* TEF ring, RX rings, TX ring */
rings_num = 1 + priv->rx_ring_num + 1;
obj_num += rings_num;
file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX *
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
index 6c7a57f16cc6..3585f02575df 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -124,6 +124,7 @@ static const struct ethtool_ops mcp251xfd_ethtool_ops = {
.set_ringparam = mcp251xfd_ring_set_ringparam,
.get_coalesce = mcp251xfd_ring_get_coalesce,
.set_coalesce = mcp251xfd_ring_set_coalesce,
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv)
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 155b90f6c767..525309da1320 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -53,6 +53,7 @@
#include <linux/can/error.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -535,11 +536,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
rxerr = (errc >> 16) & 0xFF;
txerr = errc & 0xFF;
- if (skb) {
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
-
if (isrc & SUN4I_INT_DATA_OR) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -570,6 +566,11 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (skb && state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & SUN4I_INT_BUS_ERR) {
/* bus error interrupt */
netdev_dbg(dev, "bus error interrupt\n");
@@ -761,6 +762,10 @@ static const struct net_device_ops sun4ican_netdev_ops = {
.ndo_start_xmit = sun4ican_start_xmit,
};
+static const struct ethtool_ops sun4ican_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct sun4ican_quirks sun4ican_quirks_a10 = {
.has_reset = false,
};
@@ -851,6 +856,7 @@ static int sun4ican_probe(struct platform_device *pdev)
}
dev->netdev_ops = &sun4ican_netdev_ops;
+ dev->ethtool_ops = &sun4ican_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index debe17bfd0f0..b218fb3c6b76 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI HECC (CAN) device driver
*
@@ -6,16 +7,6 @@
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
* Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed as is WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/module.h>
@@ -23,6 +14,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/platform_device.h>
@@ -662,6 +654,7 @@ static void ti_hecc_change_state(struct net_device *ndev,
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = hecc_read(priv, HECC_CANTEC);
cf->data[7] = hecc_read(priv, HECC_CANREC);
}
@@ -840,6 +833,10 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ti_hecc_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id ti_hecc_dt_ids[] = {
{
.compatible = "ti,am3517-hecc",
@@ -917,6 +914,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &ti_hecc_netdev_ops;
+ ndev->ethtool_ops = &ti_hecc_ethtool_ops;
priv->clk = clk_get(&pdev->dev, "hecc_ck");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index f959215c9d53..1218f9642f33 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -14,11 +14,18 @@ config CAN_EMS_USB
This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
-config CAN_ESD_USB2
- tristate "ESD USB/2 CAN/USB interface"
+config CAN_ESD_USB
+ tristate "esd electronics gmbh CAN/USB interfaces"
help
- This driver supports the CAN-USB/2 interface
- from esd electronic system design gmbh (http://www.esd.eu).
+ This driver adds supports for several CAN/USB interfaces
+ from esd electronics gmbh (https://www.esd.eu).
+
+ The drivers supports the following devices:
+ - esd CAN-USB/2
+ - esd CAN-USB/Micro
+
+ To compile this driver as a module, choose M here: the module
+ will be called esd_usb.
config CAN_ETAS_ES58X
tristate "ETAS ES58X CAN/USB interfaces"
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 748cf31a0d53..1ea16be5743b 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
-obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_ESD_USB) += esd_usb.o
obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index bbec3311d893..d31191686a54 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -194,7 +195,7 @@ struct __packed ems_cpc_msg {
__le32 ts_sec; /* timestamp in seconds */
__le32 ts_nsec; /* timestamp in nano seconds */
- union {
+ union __packed {
u8 generic[64];
struct cpc_can_msg can_msg;
struct cpc_can_params can_params;
@@ -879,8 +880,12 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ems_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const ems_usb_bittiming_const = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -990,6 +995,7 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->netdev_ops = &ems_usb_netdev_ops;
+ netdev->ethtool_ops = &ems_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -1074,7 +1080,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver ems_usb_driver = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.probe = ems_usb_probe,
.disconnect = ems_usb_disconnect,
.id_table = ems_usb_table,
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb.c
index 286daaaea0b8..1bcfad11b1e4 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * CAN driver for esd CAN-USB/2 and CAN-USB/Micro
+ * CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro
*
- * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
+ * Copyright (C) 2022 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -14,20 +16,24 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
-MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces");
+MODULE_AUTHOR("Matthias Fuchs <socketcan@esd.eu>");
+MODULE_AUTHOR("Frank Jungclaus <frank.jungclaus@esd.eu>");
+MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro interfaces");
MODULE_LICENSE("GPL v2");
-/* Define these values to match your devices */
+/* USB vendor and product ID */
#define USB_ESDGMBH_VENDOR_ID 0x0ab4
#define USB_CANUSB2_PRODUCT_ID 0x0010
#define USB_CANUSBM_PRODUCT_ID 0x0011
+/* CAN controller clock frequencies */
#define ESD_USB2_CAN_CLOCK 60000000
#define ESD_USBM_CAN_CLOCK 36000000
-#define ESD_USB2_MAX_NETS 2
-/* USB2 commands */
+/* Maximum number of CAN nets */
+#define ESD_USB_MAX_NETS 2
+
+/* USB commands */
#define CMD_VERSION 1 /* also used for VERSION_REPLY */
#define CMD_CAN_RX 2 /* device to host only */
#define CMD_CAN_TX 3 /* also used for TX_DONE */
@@ -43,13 +49,15 @@ MODULE_LICENSE("GPL v2");
#define ESD_EVENT 0x40000000
#define ESD_IDMASK 0x1fffffff
-/* esd CAN event ids used by this driver */
-#define ESD_EV_CAN_ERROR_EXT 2
+/* esd CAN event ids */
+#define ESD_EV_CAN_ERROR_EXT 2 /* CAN controller specific diagnostic data */
/* baudrate message flags */
-#define ESD_USB2_UBR 0x80000000
-#define ESD_USB2_LOM 0x40000000
-#define ESD_USB2_NO_BAUDRATE 0x7fffffff
+#define ESD_USB_UBR 0x80000000
+#define ESD_USB_LOM 0x40000000
+#define ESD_USB_NO_BAUDRATE 0x7fffffff
+
+/* bit timing CAN-USB/2 */
#define ESD_USB2_TSEG1_MIN 1
#define ESD_USB2_TSEG1_MAX 16
#define ESD_USB2_TSEG1_SHIFT 16
@@ -68,7 +76,7 @@ MODULE_LICENSE("GPL v2");
#define ESD_ID_ENABLE 0x80
#define ESD_MAX_ID_SEGMENT 64
-/* SJA1000 ECC register (emulated by usb2 firmware) */
+/* SJA1000 ECC register (emulated by usb firmware) */
#define SJA1000_ECC_SEG 0x1F
#define SJA1000_ECC_DIR 0x20
#define SJA1000_ECC_ERR 0x06
@@ -158,7 +166,7 @@ struct set_baudrate_msg {
};
/* Main message type used between library and application */
-struct __attribute__ ((packed)) esd_usb2_msg {
+struct __packed esd_usb_msg {
union {
struct header_msg hdr;
struct version_msg version;
@@ -171,23 +179,23 @@ struct __attribute__ ((packed)) esd_usb2_msg {
} msg;
};
-static struct usb_device_id esd_usb2_table[] = {
+static struct usb_device_id esd_usb_table[] = {
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)},
{}
};
-MODULE_DEVICE_TABLE(usb, esd_usb2_table);
+MODULE_DEVICE_TABLE(usb, esd_usb_table);
-struct esd_usb2_net_priv;
+struct esd_usb_net_priv;
struct esd_tx_urb_context {
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
u32 echo_index;
};
-struct esd_usb2 {
+struct esd_usb {
struct usb_device *udev;
- struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS];
+ struct esd_usb_net_priv *nets[ESD_USB_MAX_NETS];
struct usb_anchor rx_submitted;
@@ -198,22 +206,22 @@ struct esd_usb2 {
dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
-struct esd_usb2_net_priv {
+struct esd_usb_net_priv {
struct can_priv can; /* must be the first member */
atomic_t active_tx_jobs;
struct usb_anchor tx_submitted;
struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
- struct esd_usb2 *usb2;
+ struct esd_usb *usb;
struct net_device *netdev;
int index;
u8 old_state;
struct can_berr_counter bec;
};
-static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ struct esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct can_frame *cf;
@@ -258,7 +266,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
priv->can.can_stats.bus_error++;
stats->rx_errors++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR |
+ CAN_ERR_CNT;
switch (ecc & SJA1000_ECC_MASK) {
case SJA1000_ECC_BIT:
@@ -296,8 +305,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
}
}
-static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv,
+ struct esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct can_frame *cf;
@@ -311,7 +320,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
id = le32_to_cpu(msg->msg.rx.id);
if (id & ESD_EVENT) {
- esd_usb2_rx_event(priv, msg);
+ esd_usb_rx_event(priv, msg);
} else {
skb = alloc_can_skb(priv->netdev, &cf);
if (skb == NULL) {
@@ -338,12 +347,10 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
netif_rx(skb);
}
-
- return;
}
-static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
+ struct esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct net_device *netdev = priv->netdev;
@@ -370,9 +377,9 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
netif_wake_queue(netdev);
}
-static void esd_usb2_read_bulk_callback(struct urb *urb)
+static void esd_usb_read_bulk_callback(struct urb *urb)
{
- struct esd_usb2 *dev = urb->context;
+ struct esd_usb *dev = urb->context;
int retval;
int pos = 0;
int i;
@@ -394,9 +401,9 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
}
while (pos < urb->actual_length) {
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
- msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos);
+ msg = (struct esd_usb_msg *)(urb->transfer_buffer + pos);
switch (msg->msg.hdr.cmd) {
case CMD_CAN_RX:
@@ -405,7 +412,7 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
break;
}
- esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
+ esd_usb_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
break;
case CMD_CAN_TX:
@@ -414,8 +421,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
break;
}
- esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
- msg);
+ esd_usb_tx_done_msg(dev->nets[msg->msg.txdone.net],
+ msg);
break;
}
@@ -430,7 +437,7 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
urb->transfer_buffer, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
+ esd_usb_read_bulk_callback, dev);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval == -ENODEV) {
@@ -442,19 +449,15 @@ resubmit_urb:
dev_err(dev->udev->dev.parent,
"failed resubmitting read bulk urb: %d\n", retval);
}
-
- return;
}
-/*
- * callback for bulk IN urb
- */
-static void esd_usb2_write_bulk_callback(struct urb *urb)
+/* callback for bulk IN urb */
+static void esd_usb_write_bulk_callback(struct urb *urb)
{
struct esd_tx_urb_context *context = urb->context;
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
struct net_device *netdev;
- size_t size = sizeof(struct esd_usb2_msg);
+ size_t size = sizeof(struct esd_usb_msg);
WARN_ON(!context);
@@ -478,7 +481,7 @@ static ssize_t firmware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d.%d.%d\n",
(dev->version >> 12) & 0xf,
@@ -491,7 +494,7 @@ static ssize_t hardware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d.%d.%d\n",
(dev->version >> 28) & 0xf,
@@ -504,13 +507,13 @@ static ssize_t nets_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d", dev->net_count);
}
static DEVICE_ATTR_RO(nets);
-static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
+static int esd_usb_send_msg(struct esd_usb *dev, struct esd_usb_msg *msg)
{
int actual_length;
@@ -522,8 +525,8 @@ static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
1000);
}
-static int esd_usb2_wait_msg(struct esd_usb2 *dev,
- struct esd_usb2_msg *msg)
+static int esd_usb_wait_msg(struct esd_usb *dev,
+ struct esd_usb_msg *msg)
{
int actual_length;
@@ -535,7 +538,7 @@ static int esd_usb2_wait_msg(struct esd_usb2 *dev,
1000);
}
-static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
{
int i, err = 0;
@@ -568,7 +571,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
usb_fill_bulk_urb(urb, dev->udev,
usb_rcvbulkpipe(dev->udev, 1),
buf, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
+ esd_usb_read_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->rx_submitted);
@@ -606,14 +609,12 @@ freeurb:
return 0;
}
-/*
- * Start interface
- */
-static int esd_usb2_start(struct esd_usb2_net_priv *priv)
+/* Start interface */
+static int esd_usb_start(struct esd_usb_net_priv *priv)
{
- struct esd_usb2 *dev = priv->usb2;
+ struct esd_usb *dev = priv->usb;
struct net_device *netdev = priv->netdev;
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
int err, i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -622,8 +623,7 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
goto out;
}
- /*
- * Enable all IDs
+ /* Enable all IDs
* The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
* Each bit represents one 11 bit CAN identifier. A set bit
* enables reception of the corresponding CAN identifier. A cleared
@@ -644,11 +644,11 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
/* enable 29bit extended IDs */
msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
- err = esd_usb2_send_msg(dev, msg);
+ err = esd_usb_send_msg(dev, msg);
if (err)
goto out;
- err = esd_usb2_setup_rx_urbs(dev);
+ err = esd_usb_setup_rx_urbs(dev);
if (err)
goto out;
@@ -664,9 +664,9 @@ out:
return err;
}
-static void unlink_all_urbs(struct esd_usb2 *dev)
+static void unlink_all_urbs(struct esd_usb *dev)
{
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
@@ -687,9 +687,9 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
}
}
-static int esd_usb2_open(struct net_device *netdev)
+static int esd_usb_open(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
int err;
/* common open */
@@ -698,7 +698,7 @@ static int esd_usb2_open(struct net_device *netdev)
return err;
/* finally start device */
- err = esd_usb2_start(priv);
+ err = esd_usb_start(priv);
if (err) {
netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
@@ -710,20 +710,20 @@ static int esd_usb2_open(struct net_device *netdev)
return 0;
}
-static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
+static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2 *dev = priv->usb2;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb *dev = priv->usb;
struct esd_tx_urb_context *context = NULL;
struct net_device_stats *stats = &netdev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
struct urb *urb;
u8 *buf;
int i, err;
int ret = NETDEV_TX_OK;
- size_t size = sizeof(struct esd_usb2_msg);
+ size_t size = sizeof(struct esd_usb_msg);
if (can_dropped_invalid_skb(netdev, skb))
return NETDEV_TX_OK;
@@ -745,7 +745,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
goto nobufmem;
}
- msg = (struct esd_usb2_msg *)buf;
+ msg = (struct esd_usb_msg *)buf;
msg->msg.hdr.len = 3; /* minimal length */
msg->msg.hdr.cmd = CMD_CAN_TX;
@@ -771,9 +771,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
}
}
- /*
- * This may never happen.
- */
+ /* This may never happen */
if (!context) {
netdev_warn(netdev, "couldn't find free context\n");
ret = NETDEV_TX_BUSY;
@@ -788,7 +786,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
msg->msg.hdr.len << 2,
- esd_usb2_write_bulk_callback, context);
+ esd_usb_write_bulk_callback, context);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -821,8 +819,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
netif_trans_update(netdev);
- /*
- * Release our reference to this URB, the USB core will eventually free
+ /* Release our reference to this URB, the USB core will eventually free
* it entirely.
*/
usb_free_urb(urb);
@@ -839,24 +836,24 @@ nourbmem:
return ret;
}
-static int esd_usb2_close(struct net_device *netdev)
+static int esd_usb_close(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2_msg *msg;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_msg *msg;
int i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
- /* Disable all IDs (see esd_usb2_start()) */
+ /* Disable all IDs (see esd_usb_start()) */
msg->msg.hdr.cmd = CMD_IDADD;
msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
msg->msg.filter.net = priv->index;
msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
msg->msg.filter.mask[i] = 0;
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
+ if (esd_usb_send_msg(priv->usb, msg) < 0)
netdev_err(netdev, "sending idadd message failed\n");
/* set CAN controller to reset mode */
@@ -864,8 +861,8 @@ static int esd_usb2_close(struct net_device *netdev)
msg->msg.hdr.cmd = CMD_SETBAUD;
msg->msg.setbaud.net = priv->index;
msg->msg.setbaud.rsvd = 0;
- msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
+ msg->msg.setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
+ if (esd_usb_send_msg(priv->usb, msg) < 0)
netdev_err(netdev, "sending setbaud message failed\n");
priv->can.state = CAN_STATE_STOPPED;
@@ -879,13 +876,17 @@ static int esd_usb2_close(struct net_device *netdev)
return 0;
}
-static const struct net_device_ops esd_usb2_netdev_ops = {
- .ndo_open = esd_usb2_open,
- .ndo_stop = esd_usb2_close,
- .ndo_start_xmit = esd_usb2_start_xmit,
+static const struct net_device_ops esd_usb_netdev_ops = {
+ .ndo_open = esd_usb_open,
+ .ndo_stop = esd_usb_close,
+ .ndo_start_xmit = esd_usb_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops esd_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const esd_usb2_bittiming_const = {
.name = "esd_usb2",
.tseg1_min = ESD_USB2_TSEG1_MIN,
@@ -900,20 +901,20 @@ static const struct can_bittiming_const esd_usb2_bittiming_const = {
static int esd_usb2_set_bittiming(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
int err;
u32 canbtr;
int sjw_shift;
- canbtr = ESD_USB2_UBR;
+ canbtr = ESD_USB_UBR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- canbtr |= ESD_USB2_LOM;
+ canbtr |= ESD_USB_LOM;
canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
- if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) ==
+ if (le16_to_cpu(priv->usb->udev->descriptor.idProduct) ==
USB_CANUSBM_PRODUCT_ID)
sjw_shift = ESD_USBM_SJW_SHIFT;
else
@@ -941,16 +942,16 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
netdev_info(netdev, "setting BTR=%#x\n", canbtr);
- err = esd_usb2_send_msg(priv->usb2, msg);
+ err = esd_usb_send_msg(priv->usb, msg);
kfree(msg);
return err;
}
-static int esd_usb2_get_berr_counter(const struct net_device *netdev,
- struct can_berr_counter *bec)
+static int esd_usb_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
bec->txerr = priv->bec.txerr;
bec->rxerr = priv->bec.rxerr;
@@ -958,7 +959,7 @@ static int esd_usb2_get_berr_counter(const struct net_device *netdev,
return 0;
}
-static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
+static int esd_usb_set_mode(struct net_device *netdev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
@@ -972,11 +973,11 @@ static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
return 0;
}
-static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
+static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
struct net_device *netdev;
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
int err = 0;
int i;
@@ -995,7 +996,7 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
for (i = 0; i < MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
- priv->usb2 = dev;
+ priv->usb = dev;
priv->netdev = netdev;
priv->index = index;
@@ -1013,12 +1014,13 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
priv->can.bittiming_const = &esd_usb2_bittiming_const;
priv->can.do_set_bittiming = esd_usb2_set_bittiming;
- priv->can.do_set_mode = esd_usb2_set_mode;
- priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
+ priv->can.do_set_mode = esd_usb_set_mode;
+ priv->can.do_get_berr_counter = esd_usb_get_berr_counter;
netdev->flags |= IFF_ECHO; /* we support local echo */
- netdev->netdev_ops = &esd_usb2_netdev_ops;
+ netdev->netdev_ops = &esd_usb_netdev_ops;
+ netdev->ethtool_ops = &esd_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &intf->dev);
netdev->dev_id = index;
@@ -1038,17 +1040,16 @@ done:
return err;
}
-/*
- * probe function for new USB2 devices
+/* probe function for new USB devices
*
* check version information and number of available
* CAN interfaces
*/
-static int esd_usb2_probe(struct usb_interface *intf,
+static int esd_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct esd_usb2 *dev;
- struct esd_usb2_msg *msg;
+ struct esd_usb *dev;
+ struct esd_usb_msg *msg;
int i, err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1076,13 +1077,13 @@ static int esd_usb2_probe(struct usb_interface *intf,
msg->msg.version.flags = 0;
msg->msg.version.drv_version = 0;
- err = esd_usb2_send_msg(dev, msg);
+ err = esd_usb_send_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "sending version message failed\n");
goto free_msg;
}
- err = esd_usb2_wait_msg(dev, msg);
+ err = esd_usb_wait_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "no version message answer\n");
goto free_msg;
@@ -1105,7 +1106,7 @@ static int esd_usb2_probe(struct usb_interface *intf,
/* do per device probing */
for (i = 0; i < dev->net_count; i++)
- esd_usb2_probe_one_net(intf, i);
+ esd_usb_probe_one_net(intf, i);
free_msg:
kfree(msg);
@@ -1115,12 +1116,10 @@ done:
return err;
}
-/*
- * called by the usb core when the device is removed from the system
- */
-static void esd_usb2_disconnect(struct usb_interface *intf)
+/* called by the usb core when the device is removed from the system */
+static void esd_usb_disconnect(struct usb_interface *intf)
{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
struct net_device *netdev;
int i;
@@ -1144,11 +1143,11 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
}
/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver esd_usb2_driver = {
- .name = "esd_usb2",
- .probe = esd_usb2_probe,
- .disconnect = esd_usb2_disconnect,
- .id_table = esd_usb2_table,
+static struct usb_driver esd_usb_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = esd_usb_probe,
+ .disconnect = esd_usb_disconnect,
+ .id_table = esd_usb_table,
};
-module_usb_driver(esd_usb2_driver);
+module_usb_driver(esd_usb_driver);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 2d73ebbf3836..51294b717040 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -10,6 +10,7 @@
* Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -18,14 +19,11 @@
#include "es58x_core.h"
-#define DRV_VERSION "1.00"
MODULE_AUTHOR("Vincent Mailhol <mailhol.vincent@wanadoo.fr>");
MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>");
MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters");
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL v2");
-#define ES58X_MODULE_NAME "etas_es58x"
#define ES58X_VENDOR_ID 0x108C
#define ES581_4_PRODUCT_ID 0x0159
#define ES582_1_PRODUCT_ID 0x0168
@@ -59,11 +57,11 @@ MODULE_DEVICE_TABLE(usb, es58x_id_table);
#define es58x_print_hex_dump(buf, len) \
print_hex_dump(KERN_DEBUG, \
- ES58X_MODULE_NAME " " __stringify(buf) ": ", \
+ KBUILD_MODNAME " " __stringify(buf) ": ", \
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
#define es58x_print_hex_dump_debug(buf, len) \
- print_hex_dump_debug(ES58X_MODULE_NAME " " __stringify(buf) ": ",\
+ print_hex_dump_debug(KBUILD_MODNAME " " __stringify(buf) ": ",\
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
/* The last two bytes of an ES58X command is a CRC16. The first two
@@ -1462,10 +1460,6 @@ static void es58x_read_bulk_callback(struct urb *urb)
}
resubmit_urb:
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_read_bulk_callback, es58x_dev);
-
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret == -ENODEV) {
for (i = 0; i < es58x_dev->num_can_ch; i++)
@@ -1599,7 +1593,8 @@ static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev)
return NULL;
usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- buf, tx_buf_len, NULL, NULL);
+ buf, tx_buf_len, es58x_write_bulk_callback,
+ NULL);
return urb;
}
@@ -1632,9 +1627,7 @@ static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb,
int ret;
es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length);
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_write_bulk_callback, netdev);
+ urb->context = netdev;
usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
@@ -1707,7 +1700,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
{
const struct device *dev = es58x_dev->dev;
const struct es58x_parameters *param = es58x_dev->param;
- size_t rx_buf_len = es58x_dev->rx_max_packet_size;
+ u16 rx_buf_len = usb_maxpacket(es58x_dev->udev, es58x_dev->rx_pipe);
struct urb *urb;
u8 *buf;
int i;
@@ -1739,7 +1732,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
dev_err(dev, "%s: Could not setup any rx URBs\n", __func__);
return ret;
}
- dev_dbg(dev, "%s: Allocated %d rx URBs each of size %zu\n",
+ dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n",
__func__, i, rx_buf_len);
return ret;
@@ -1981,7 +1974,12 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
static const struct net_device_ops es58x_netdev_ops = {
.ndo_open = es58x_open,
.ndo_stop = es58x_stop,
- .ndo_start_xmit = es58x_start_xmit
+ .ndo_start_xmit = es58x_start_xmit,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+};
+
+static const struct ethtool_ops es58x_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
/**
@@ -2088,6 +2086,7 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx);
netdev->netdev_ops = &es58x_netdev_ops;
+ netdev->ethtool_ops = &es58x_ethtool_ops;
netdev->flags |= IFF_ECHO; /* We support local echo */
netdev->dev_port = channel_idx;
@@ -2181,9 +2180,8 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep_in, *ep_out;
int ret;
- dev_info(dev,
- "Starting %s %s (Serial Number %s) driver version %s\n",
- udev->manufacturer, udev->product, udev->serial, DRV_VERSION);
+ dev_info(dev, "Starting %s %s (Serial Number %s)\n",
+ udev->manufacturer, udev->product, udev->serial);
ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out,
NULL, NULL);
@@ -2223,7 +2221,6 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
ep_in->bEndpointAddress);
es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev,
ep_out->bEndpointAddress);
- es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize);
return es58x_dev;
}
@@ -2281,7 +2278,7 @@ static void es58x_disconnect(struct usb_interface *intf)
}
static struct usb_driver es58x_driver = {
- .name = ES58X_MODULE_NAME,
+ .name = KBUILD_MODNAME,
.probe = es58x_probe,
.disconnect = es58x_disconnect,
.id_table = es58x_id_table
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index e5033cb5e695..d769bdf740b7 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -380,7 +380,6 @@ struct es58x_operators {
* @timestamps: a temporary buffer to store the time stamps before
* feeding them to es58x_can_get_echo_skb(). Can only be used
* in RX branches.
- * @rx_max_packet_size: Maximum length of bulk-in URB.
* @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
* @opened_channel_cnt: number of channels opened. Free of race
* conditions because its two users (net_device_ops:ndo_open()
@@ -401,8 +400,8 @@ struct es58x_device {
const struct es58x_parameters *param;
const struct es58x_operators *ops;
- int rx_pipe;
- int tx_pipe;
+ unsigned int rx_pipe;
+ unsigned int tx_pipe;
struct usb_anchor rx_urbs;
struct usb_anchor tx_urbs_busy;
@@ -414,7 +413,6 @@ struct es58x_device {
u64 timestamps[ES58X_ECHO_BULK_MAX];
- u16 rx_max_packet_size;
u8 num_can_ch;
u8 opened_channel_cnt;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index d3a658b444b5..baf749c8cda3 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -946,6 +946,7 @@ static int gs_usb_set_phys_id(struct net_device *dev,
static const struct ethtool_ops gs_usb_ethtool_ops = {
.set_phys_id = gs_usb_set_phys_id,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static struct gs_can *gs_make_candev(unsigned int channel,
@@ -989,11 +990,12 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev = netdev_priv(netdev);
netdev->netdev_ops = &gs_usb_netdev_ops;
+ netdev->ethtool_ops = &gs_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
/* dev setup */
- strcpy(dev->bt_const.name, "gs_usb");
+ strcpy(dev->bt_const.name, KBUILD_MODNAME);
dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
@@ -1100,7 +1102,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
return ERR_PTR(rc);
}
- strcpy(dev->data_bt_const.name, "gs_usb");
+ strcpy(dev->data_bt_const.name, KBUILD_MODNAME);
dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended->dtseg1_min);
dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended->dtseg1_max);
dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended->dtseg2_min);
@@ -1270,7 +1272,7 @@ static const struct usb_device_id gs_usb_table[] = {
MODULE_DEVICE_TABLE(usb, gs_usb_table);
static struct usb_driver gs_usb_driver = {
- .name = "gs_usb",
+ .name = KBUILD_MODNAME,
.probe = gs_usb_probe,
.disconnect = gs_usb_disconnect,
.id_table = gs_usb_table,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index eefcbe3aadce..841da29cef93 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -39,6 +39,7 @@
#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
+#define KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP BIT(3)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index f211bfcb1d97..824cab80aa02 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -13,6 +13,7 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/kernel.h>
@@ -89,7 +90,7 @@
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
- .quirks = 0,
+ .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
.ops = &kvaser_usb_hydra_dev_ops,
};
@@ -665,6 +666,22 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct net_device_ops kvaser_usb_netdev_ops_hwts = {
+ .ndo_open = kvaser_usb_open,
+ .ndo_stop = kvaser_usb_close,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_start_xmit = kvaser_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops kvaser_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
int i;
@@ -742,7 +759,13 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &kvaser_usb_netdev_ops;
-
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) {
+ netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts;
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts;
+ } else {
+ netdev->netdev_ops = &kvaser_usb_netdev_ops;
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
+ }
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
@@ -869,7 +892,7 @@ static void kvaser_usb_disconnect(struct usb_interface *intf)
}
static struct usb_driver kvaser_usb_driver = {
- .name = "kvaser_usb",
+ .name = KBUILD_MODNAME,
.probe = kvaser_usb_probe,
.disconnect = kvaser_usb_disconnect,
.id_table = kvaser_usb_table,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 5d70844ac030..dd65c101bfb8 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -917,8 +917,11 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
netif_rx(skb);
}
@@ -1069,8 +1072,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
shhwtstamps->hwtstamp = hwtstamp;
cf->can_id |= CAN_ERR_BUSERROR;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
netif_rx(skb);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index cc809ecd1e62..07f687f29b34 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -853,8 +853,11 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
break;
}
- cf->data[6] = es->txerr;
- cf->data[7] = es->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
+ }
netif_rx(skb);
}
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 792ab9da317d..69346c63021f 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -10,6 +10,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -758,6 +759,10 @@ static const struct net_device_ops mcba_netdev_ops = {
.ndo_start_xmit = mcba_usb_start_xmit,
};
+static const struct ethtool_ops mcba_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/* Microchip CANBUS has hardcoded bittiming values by default.
* This function sends request via USB to change the speed and align bittiming
* values for presentation purposes only
@@ -836,6 +841,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
priv->can.do_set_bittiming = mcba_net_set_bittiming;
netdev->netdev_ops = &mcba_netdev_ops;
+ netdev->ethtool_ops = &mcba_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 091c631ebe23..687dd542f7f6 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -506,6 +506,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
/* Supply TX/RX error counters in case of
* controller error.
*/
+ cf->can_id = CAN_ERR_CNT;
cf->data[6] = mc->pdev->bec.txerr;
cf->data[7] = mc->pdev->bec.rxerr;
}
@@ -964,6 +965,7 @@ static int pcan_usb_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_ethtool_ops = {
.set_phys_id = pcan_usb_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
};
/*
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index b850ff8fe4bd..8c9d53f6e24c 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -57,7 +57,7 @@ MODULE_DEVICE_TABLE(usb, peak_usb_table);
* dump memory
*/
#define DUMP_WIDTH 16
-void pcan_dump_mem(char *prompt, void *p, int l)
+void pcan_dump_mem(const char *prompt, const void *p, int l)
{
pr_info("%s dumping %s (%d bytes):\n",
PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
@@ -775,13 +775,54 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
return 0;
}
+static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_open = peak_usb_ndo_open,
.ndo_stop = peak_usb_ndo_stop,
+ .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_usb_ndo_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
/*
* create one device which is attached to CAN controller #ctrl_idx of the
* usb adapter.
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index f60af573a2e0..f6bdd8b3f290 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -132,7 +132,7 @@ struct peak_usb_device {
struct peak_usb_device *next_siblings;
};
-void pcan_dump_mem(char *prompt, void *p, int l);
+void pcan_dump_mem(const char *prompt, const void *p, int l);
/* common timestamp management */
void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
@@ -145,5 +145,6 @@ int peak_usb_netif_rx(struct sk_buff *skb,
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
+int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 65487ec33566..2ea1500df393 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -33,6 +33,10 @@
#define PCAN_UFD_RX_BUFFER_SIZE 2048
#define PCAN_UFD_TX_BUFFER_SIZE 512
+/* struct pcan_ufd_fw_info::type */
+#define PCAN_USBFD_TYPE_STD 1
+#define PCAN_USBFD_TYPE_EXT 2 /* includes EP numbers */
+
/* read some versions info from the hw device */
struct __packed pcan_ufd_fw_info {
__le16 size_of; /* sizeof this */
@@ -44,6 +48,13 @@ struct __packed pcan_ufd_fw_info {
__le32 dev_id[2]; /* "device id" per CAN */
__le32 ser_no; /* S/N */
__le32 flags; /* special functions */
+
+ /* extended data when type == PCAN_USBFD_TYPE_EXT */
+ u8 cmd_out_ep; /* ep for cmd */
+ u8 cmd_in_ep; /* ep for replies */
+ u8 data_out_ep[2]; /* ep for CANx TX */
+ u8 data_in_ep; /* ep for CAN RX */
+ u8 dummy[3];
};
/* handle device specific info used by the netdevices */
@@ -171,6 +182,9 @@ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
/* send PCAN-USB Pro FD commands synchronously */
static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info;
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
int err = 0;
u8 *packet_ptr;
@@ -200,7 +214,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
do {
err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
- PCAN_USBPRO_EP_CMDOUT),
+ fw_info->cmd_out_ep),
packet_ptr, packet_len,
NULL, PCAN_UFD_CMD_TIMEOUT_MS);
if (err) {
@@ -426,6 +440,9 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
struct urb *urb, u8 *buf)
{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info;
u8 *pc = buf;
/* build the entire cmds list in the provided buffer, to go back into
@@ -439,7 +456,7 @@ static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
/* complete the URB */
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT),
+ usb_sndbulkpipe(dev->udev, fw_info->cmd_out_ep),
buf, pc - buf,
pcan_usb_pro_restart_complete, dev);
@@ -839,6 +856,15 @@ static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
return 0;
}
+/* probe function for all PCAN-USB FD family usb interfaces */
+static int pcan_usb_fd_probe(struct usb_interface *intf)
+{
+ struct usb_host_interface *iface_desc = &intf->altsetting[0];
+
+ /* CAN interface is always interface #0 */
+ return iface_desc->desc.bInterfaceNumber;
+}
+
/* stop interface (last chance before set bus off) */
static int pcan_usb_fd_stop(struct peak_usb_device *dev)
{
@@ -860,6 +886,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
{
struct pcan_usb_fd_device *pdev =
container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info;
int i, err = -ENOMEM;
/* do this for 1st channel only */
@@ -878,10 +905,12 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
/* number of ts msgs to ignore before taking one into account */
pdev->usb_if->cm_ignore_count = 5;
+ fw_info = &pdev->usb_if->fw_info;
+
err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
PCAN_USBPRO_INFO_FW,
- &pdev->usb_if->fw_info,
- sizeof(pdev->usb_if->fw_info));
+ fw_info,
+ sizeof(*fw_info));
if (err) {
dev_err(dev->netdev->dev.parent,
"unable to read %s firmware info (err %d)\n",
@@ -895,14 +924,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
*/
dev_info(dev->netdev->dev.parent,
"PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n",
- dev->adapter->name, pdev->usb_if->fw_info.hw_version,
- pdev->usb_if->fw_info.fw_version[0],
- pdev->usb_if->fw_info.fw_version[1],
- pdev->usb_if->fw_info.fw_version[2],
+ dev->adapter->name, fw_info->hw_version,
+ fw_info->fw_version[0],
+ fw_info->fw_version[1],
+ fw_info->fw_version[2],
dev->adapter->ctrl_count);
/* check for ability to switch between ISO/non-ISO modes */
- if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
+ if (fw_info->fw_version[0] >= 2) {
/* firmware >= 2.x supports ISO/non-ISO switching */
dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
} else {
@@ -910,6 +939,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
}
+ /* if vendor rsp is of type 2, then it contains EP numbers to
+ * use for cmds pipes. If not, then default EP should be used.
+ */
+ if (fw_info->type != cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT;
+ fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN;
+ }
+
/* tell the hardware the can driver is running */
err = pcan_usb_fd_drv_loaded(dev, 1);
if (err) {
@@ -930,12 +967,23 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
/* do a copy of the ctrlmode[_supported] too */
dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
+
+ fw_info = &pdev->usb_if->fw_info;
}
pdev->usb_if->dev[dev->ctrl_idx] = dev;
dev->device_number =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ /* if vendor rsp is of type 2, then it contains EP numbers to
+ * use for data pipes. If not, then statically defined EP are used
+ * (see peak_usb_create_dev()).
+ */
+ if (fw_info->type == cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ dev->ep_msg_in = fw_info->data_in_ep;
+ dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx];
+ }
+
/* set clock domain */
for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++)
if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i])
@@ -1032,6 +1080,7 @@ static int pcan_usb_fd_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_fd_ethtool_ops = {
.set_phys_id = pcan_usb_fd_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
};
/* describes the PCAN-USB FD adapter */
@@ -1091,7 +1140,7 @@ const struct peak_usb_adapter pcan_usb_fd = {
.tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
/* device callbacks */
- .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
+ .intf_probe = pcan_usb_fd_probe,
.dev_init = pcan_usb_fd_init,
.dev_exit = pcan_usb_fd_exit,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index ebe087f258e3..5d8f6a40bb2c 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -439,7 +439,7 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
return err;
pdn = (struct pcan_usb_pro_devid *)pc;
- *device_id = le32_to_cpu(pdn->serial_num);
+ *device_id = le32_to_cpu(pdn->dev_num);
return err;
}
@@ -1022,6 +1022,7 @@ static int pcan_usb_pro_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_pro_ethtool_ops = {
.set_phys_id = pcan_usb_pro_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
};
/*
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 5d4cf14eb9d9..a34e0fc021c9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -112,7 +112,7 @@ struct __packed pcan_usb_pro_devid {
u8 data_type;
u8 channel;
__le16 dummy;
- __le32 serial_num;
+ __le32 dev_num;
};
#define PCAN_USBPRO_LED_DEVICE 0x00
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 5ae0d7c017cc..7c35f50fda4e 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -28,6 +28,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -1233,6 +1234,10 @@ static const struct net_device_ops ucan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/* Request to set bittiming
*
* This function generates an USB set bittiming message and transmits
@@ -1512,6 +1517,7 @@ static int ucan_probe(struct usb_interface *intf,
spin_lock_init(&up->context_lock);
spin_lock_init(&up->echo_skb_lock);
netdev->netdev_ops = &ucan_netdev_ops;
+ netdev->ethtool_ops = &ucan_ethtool_ops;
usb_set_intfdata(intf, up);
SET_NETDEV_DEV(netdev, &intf->dev);
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index f3363575bf32..64c00abe91cf 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -12,6 +12,7 @@
* who were very cooperative and answered my questions.
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -438,9 +439,11 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
if (rx_errors)
stats->rx_errors++;
-
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ if (priv->can.state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
@@ -868,8 +871,12 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops usb_8dev_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const usb_8dev_bittiming_const = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -925,6 +932,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
CAN_CTRLMODE_CC_LEN8_DLC;
netdev->netdev_ops = &usb_8dev_netdev_ops;
+ netdev->ethtool_ops = &usb_8dev_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -995,7 +1003,7 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
}
static struct usb_driver usb_8dev_driver = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.probe = usb_8dev_probe,
.disconnect = usb_8dev_disconnect,
.id_table = usb_8dev_table,
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index a15619d883ec..36b6310a2e5b 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -40,6 +40,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -99,6 +100,8 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
+ skb_tx_timestamp(skb);
+
if (!echo) {
/* no echo handling available inside this driver */
if (loop) {
@@ -146,6 +149,10 @@ static const struct net_device_ops vcan_netdev_ops = {
.ndo_change_mtu = vcan_change_mtu,
};
+static const struct ethtool_ops vcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
@@ -161,6 +168,7 @@ static void vcan_setup(struct net_device *dev)
dev->flags |= IFF_ECHO;
dev->netdev_ops = &vcan_netdev_ops;
+ dev->ethtool_ops = &vcan_ethtool_ops;
dev->needs_free_netdev = true;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 577a80300514..cffd107d8b28 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -9,6 +9,7 @@
* Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -53,6 +54,8 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
goto out_unlock;
}
+ skb_tx_timestamp(oskb);
+
skb = skb_clone(oskb, GFP_ATOMIC);
if (skb) {
consume_skb(oskb);
@@ -144,6 +147,10 @@ static const struct net_device_ops vxcan_netdev_ops = {
.ndo_change_mtu = vxcan_change_mtu,
};
+static const struct ethtool_ops vxcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vxcan_setup(struct net_device *dev)
{
struct can_ml_priv *can_ml;
@@ -155,6 +162,7 @@ static void vxcan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->flags = IFF_NOARP;
dev->netdev_ops = &vxcan_netdev_ops;
+ dev->ethtool_ops = &vxcan_ethtool_ops;
dev->needs_free_netdev = true;
can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index e179d311aa28..5d3172795ad0 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Xilinx CAN device driver
*
- * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ * Copyright (C) 2012 - 2022 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
* Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
@@ -9,8 +9,10 @@
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -50,7 +52,7 @@ enum xcan_reg {
/* only on CAN FD cores */
XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
- * Prescalar
+ * Prescaler
*/
XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
@@ -86,6 +88,8 @@ enum xcan_reg {
#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
+#define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */
+#define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */
#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
@@ -99,6 +103,7 @@ enum xcan_reg {
#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
+#define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */
#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
@@ -132,6 +137,7 @@ enum xcan_reg {
#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
+#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
@@ -276,6 +282,26 @@ static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.brp_inc = 1,
};
+/* Transmission Delay Compensation constants for CANFD 1.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 32,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+/* Transmission Delay Compensation constants for CANFD 2.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd2 = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 64,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -405,7 +431,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
return -EPERM;
}
- /* Setting Baud Rate prescalar value in BRPR Register */
+ /* Setting Baud Rate prescaler value in BRPR Register */
btr0 = (bt->brp - 1);
/* Setting Time Segment 1 in BTR Register */
@@ -422,8 +448,16 @@ static int xcan_set_bittiming(struct net_device *ndev)
if (priv->devtype.cantype == XAXI_CANFD ||
priv->devtype.cantype == XAXI_CANFD_2_0) {
- /* Setting Baud Rate prescalar value in F_BRPR Register */
+ /* Setting Baud Rate prescaler value in F_BRPR Register */
btr0 = dbt->brp - 1;
+ if (can_tdc_is_enabled(&priv->can)) {
+ if (priv->devtype.cantype == XAXI_CANFD)
+ btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ else
+ btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ }
/* Setting Time Segment 1 in BTR Register */
btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
@@ -932,6 +966,7 @@ static void xcan_set_error_state(struct net_device *ndev,
can_change_state(ndev, cf, tx_state, rx_state);
if (cf) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
@@ -1483,6 +1518,22 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
return 0;
}
+/**
+ * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value
+ * @ndev: Pointer to net_device structure
+ * @tdcv: Pointer to TDCV value
+ *
+ * Return: 0 on success
+ */
+static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
+
+ return 0;
+}
+
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1490,6 +1541,10 @@ static const struct net_device_ops xcan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops xcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/**
* xcan_suspend - Suspend method for the driver
* @dev: Address of the device structure
@@ -1735,17 +1790,24 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
- if (devtype->cantype == XAXI_CANFD)
+ if (devtype->cantype == XAXI_CANFD) {
priv->can.data_bittiming_const =
&xcan_data_bittiming_const_canfd;
+ priv->can.tdc_const = &xcan_tdc_const_canfd;
+ }
- if (devtype->cantype == XAXI_CANFD_2_0)
+ if (devtype->cantype == XAXI_CANFD_2_0) {
priv->can.data_bittiming_const =
&xcan_data_bittiming_const_canfd2;
+ priv->can.tdc_const = &xcan_tdc_const_canfd2;
+ }
if (devtype->cantype == XAXI_CANFD ||
- devtype->cantype == XAXI_CANFD_2_0)
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ devtype->cantype == XAXI_CANFD_2_0) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_TDC_AUTO;
+ priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
+ }
priv->reg_base = addr;
priv->tx_max = tx_max;
@@ -1764,6 +1826,7 @@ static int xcan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &xcan_netdev_ops;
+ ndev->ethtool_ops = &xcan_ethtool_ops;
/* Getting the CAN can_clk info */
priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 6d1fcb08bba1..d8ae0e8af2a0 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -60,16 +60,17 @@ source "drivers/net/dsa/sja1105/Kconfig"
source "drivers/net/dsa/xrs700x/Kconfig"
-config NET_DSA_QCA8K
- tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
- select NET_DSA_TAG_QCA
- select REGMAP
- help
- This enables support for the Qualcomm Atheros QCA8K Ethernet
- switch chips.
-
source "drivers/net/dsa/realtek/Kconfig"
+config NET_DSA_RZN1_A5PSW
+ tristate "Renesas RZ/N1 A5PSW Ethernet switch support"
+ depends on OF && ARCH_RZN1
+ select NET_DSA_TAG_RZN1_A5PSW
+ select PCS_RZN1_MIIC
+ help
+ This driver supports the A5PSW switch, which is embedded in Renesas
+ RZ/N1 SoC.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index e73838c12256..16eb879e0cb4 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -8,7 +8,7 @@ endif
obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+obj-$(CONFIG_NET_DSA_RZN1_A5PSW) += rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 0e54b2a0c211..308f15d3832e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -320,8 +320,6 @@ static void b53_spi_remove(struct spi_device *spi)
if (dev)
b53_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
}
static void b53_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index ac1f3b3a7040..01f90994dedd 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1894,11 +1894,8 @@ static int hellcreek_probe(struct platform_device *pdev)
if (!port->counter_values)
return -ENOMEM;
- port->vlan_dev_bitmap =
- devm_kcalloc(dev,
- BITS_TO_LONGS(VLAN_N_VID),
- sizeof(unsigned long),
- GFP_KERNEL);
+ port->vlan_dev_bitmap = devm_bitmap_zalloc(dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!port->vlan_dev_bitmap)
return -ENOMEM;
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index c9e2a8989556..06b1efdb5e7d 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,49 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only
-config NET_DSA_MICROCHIP_KSZ_COMMON
- select NET_DSA_TAG_KSZ
- tristate
-
-menuconfig NET_DSA_MICROCHIP_KSZ9477
- tristate "Microchip KSZ9477 series switch support"
+menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
+ tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support"
depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
+ select NET_DSA_TAG_KSZ
help
- This driver adds support for Microchip KSZ9477 switch chips.
+ This driver adds support for Microchip KSZ9477 series switch and
+ KSZ8795/KSZ88x3 switch chips.
config NET_DSA_MICROCHIP_KSZ9477_I2C
- tristate "KSZ9477 series I2C connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && I2C
+ tristate "KSZ series I2C connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && I2C
select REGMAP_I2C
help
Select to enable support for registering switches configured through I2C.
-config NET_DSA_MICROCHIP_KSZ9477_SPI
- tristate "KSZ9477 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && SPI
+config NET_DSA_MICROCHIP_KSZ_SPI
+ tristate "KSZ series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && SPI
select REGMAP_SPI
help
Select to enable support for registering switches configured through SPI.
-menuconfig NET_DSA_MICROCHIP_KSZ8795
- tristate "Microchip KSZ8795 series switch support"
- depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
- help
- This driver adds support for Microchip KSZ8795/KSZ88X3 switch chips.
-
-config NET_DSA_MICROCHIP_KSZ8795_SPI
- tristate "KSZ8795 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795 && SPI
- select REGMAP_SPI
- help
- This driver accesses KSZ8795 chip through SPI.
-
- It is required to use the KSZ8795 switch driver as the only access
- is through SPI.
-
config NET_DSA_MICROCHIP_KSZ8863_SMI
tristate "KSZ series SMI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON
select MDIO_BITBANG
help
Select to enable support for registering switches configured through
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 2a03b21a3386..28873559efc2 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
+ksz_switch-objs := ksz_common.o
+ksz_switch-objs += ksz9477.o
+ksz_switch-objs += ksz8795.o
+ksz_switch-objs += lan937x_main.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 03da369675c6..42c50cc4d853 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -7,64 +7,55 @@
#ifndef __KSZ8XXX_H
#define __KSZ8XXX_H
-#include <linux/kernel.h>
-enum ksz_regs {
- REG_IND_CTRL_0,
- REG_IND_DATA_8,
- REG_IND_DATA_CHECK,
- REG_IND_DATA_HI,
- REG_IND_DATA_LO,
- REG_IND_MIB_CHECK,
- REG_IND_BYTE,
- P_FORCE_CTRL,
- P_LINK_STATUS,
- P_LOCAL_CTRL,
- P_NEG_RESTART_CTRL,
- P_REMOTE_STATUS,
- P_SPEED_STATUS,
- S_TAIL_TAG_CTRL,
-};
+#include <linux/types.h>
+#include <net/dsa.h>
+#include "ksz_common.h"
-enum ksz_masks {
- PORT_802_1P_REMAPPING,
- SW_TAIL_TAG_ENABLE,
- MIB_COUNTER_OVERFLOW,
- MIB_COUNTER_VALID,
- VLAN_TABLE_FID,
- VLAN_TABLE_MEMBERSHIP,
- VLAN_TABLE_VALID,
- STATIC_MAC_TABLE_VALID,
- STATIC_MAC_TABLE_USE_FID,
- STATIC_MAC_TABLE_FID,
- STATIC_MAC_TABLE_OVERRIDE,
- STATIC_MAC_TABLE_FWD_PORTS,
- DYNAMIC_MAC_TABLE_ENTRIES_H,
- DYNAMIC_MAC_TABLE_MAC_EMPTY,
- DYNAMIC_MAC_TABLE_NOT_READY,
- DYNAMIC_MAC_TABLE_ENTRIES,
- DYNAMIC_MAC_TABLE_FID,
- DYNAMIC_MAC_TABLE_SRC_PORT,
- DYNAMIC_MAC_TABLE_TIMESTAMP,
-};
-
-enum ksz_shifts {
- VLAN_TABLE_MEMBERSHIP_S,
- VLAN_TABLE,
- STATIC_MAC_FWD_PORTS,
- STATIC_MAC_FID,
- DYNAMIC_MAC_ENTRIES_H,
- DYNAMIC_MAC_ENTRIES,
- DYNAMIC_MAC_FID,
- DYNAMIC_MAC_TIMESTAMP,
- DYNAMIC_MAC_SRC_PORT,
-};
-
-struct ksz8 {
- const u8 *regs;
- const u32 *masks;
- const u8 *shifts;
- void *priv;
-};
+int ksz8_setup(struct dsa_switch *ds);
+u32 ksz8_get_port_addr(int port, int offset);
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
+int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz8_port_init_cnt(struct ksz_device *dev, int port);
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz8_get_stp_reg(void);
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void ksz8_config_cpu_port(struct dsa_switch *ds);
+int ksz8_enable_stp_addr(struct ksz_device *dev);
+int ksz8_reset_switch(struct ksz_device *dev);
+int ksz8_switch_detect(struct ksz_device *dev);
+int ksz8_switch_init(struct ksz_device *dev);
+void ksz8_switch_exit(struct ksz_device *dev);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 12a599d5e61a..c79a5128235f 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -26,111 +26,6 @@
#include "ksz8795_reg.h"
#include "ksz8.h"
-static const u8 ksz8795_regs[] = {
- [REG_IND_CTRL_0] = 0x6E,
- [REG_IND_DATA_8] = 0x70,
- [REG_IND_DATA_CHECK] = 0x72,
- [REG_IND_DATA_HI] = 0x71,
- [REG_IND_DATA_LO] = 0x75,
- [REG_IND_MIB_CHECK] = 0x74,
- [REG_IND_BYTE] = 0xA0,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x07,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x08,
- [P_SPEED_STATUS] = 0x09,
- [S_TAIL_TAG_CTRL] = 0x0C,
-};
-
-static const u32 ksz8795_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(7),
- [SW_TAIL_TAG_ENABLE] = BIT(1),
- [MIB_COUNTER_OVERFLOW] = BIT(6),
- [MIB_COUNTER_VALID] = BIT(5),
- [VLAN_TABLE_FID] = GENMASK(6, 0),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
- [VLAN_TABLE_VALID] = BIT(12),
- [STATIC_MAC_TABLE_VALID] = BIT(21),
- [STATIC_MAC_TABLE_USE_FID] = BIT(23),
- [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
-};
-
-static const u8 ksz8795_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 7,
- [VLAN_TABLE] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 24,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 29,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 27,
- [DYNAMIC_MAC_SRC_PORT] = 24,
-};
-
-static const u8 ksz8863_regs[] = {
- [REG_IND_CTRL_0] = 0x79,
- [REG_IND_DATA_8] = 0x7B,
- [REG_IND_DATA_CHECK] = 0x7B,
- [REG_IND_DATA_HI] = 0x7C,
- [REG_IND_DATA_LO] = 0x80,
- [REG_IND_MIB_CHECK] = 0x80,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x0C,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x0E,
- [P_SPEED_STATUS] = 0x0F,
- [S_TAIL_TAG_CTRL] = 0x03,
-};
-
-static const u32 ksz8863_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(3),
- [SW_TAIL_TAG_ENABLE] = BIT(6),
- [MIB_COUNTER_OVERFLOW] = BIT(7),
- [MIB_COUNTER_VALID] = BIT(6),
- [VLAN_TABLE_FID] = GENMASK(15, 12),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
- [VLAN_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_USE_FID] = BIT(21),
- [STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
-};
-
-static u8 ksz8863_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 22,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 24,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 24,
- [DYNAMIC_MAC_SRC_PORT] = 20,
-};
-
-static bool ksz_is_ksz88x3(struct ksz_device *dev)
-{
- return dev->chip_id == 0x8830;
-}
-
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -145,11 +40,12 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
int ret = 0;
+ regs = dev->info->regs;
+
mutex_lock(&dev->alu_mutex);
ctrl_addr = IND_ACC_TABLE(table) | addr;
@@ -162,7 +58,7 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
return ret;
}
-static int ksz8_reset_switch(struct ksz_device *dev)
+int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
/* reset switch */
@@ -213,18 +109,17 @@ static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
true);
}
-static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
ctrl_addr = addr + dev->info->reg_mib_cnt * port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
@@ -252,16 +147,15 @@ static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
addr -= dev->info->reg_mib_cnt;
ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
@@ -305,13 +199,14 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
u32 *last = (u32 *)dropped;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u32 cur;
+ regs = dev->info->regs;
+
addr -= dev->info->reg_mib_cnt;
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0;
@@ -334,8 +229,8 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
}
}
-static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
if (ksz_is_ksz88x3(dev))
ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
@@ -343,7 +238,7 @@ static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
}
-static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
if (ksz_is_ksz88x3(dev))
return;
@@ -358,7 +253,7 @@ static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
}
-static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
+void ksz8_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
@@ -392,10 +287,11 @@ static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
+ regs = dev->info->regs;
+
ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
@@ -406,10 +302,11 @@ static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
+ regs = dev->info->regs;
+
ctrl_addr = IND_ACC_TABLE(table) | addr;
mutex_lock(&dev->alu_mutex);
@@ -420,13 +317,12 @@ static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
{
- struct ksz8 *ksz8 = dev->priv;
int timeout = 100;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
do {
ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
@@ -447,22 +343,20 @@ static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
return 0;
}
-static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
- u8 *mac_addr, u8 *fid, u8 *src_port,
- u8 *timestamp, u16 *entries)
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u8 data;
int rc;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
@@ -512,17 +406,16 @@ static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
return rc;
}
-static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
data_hi = data >> 32;
@@ -551,17 +444,16 @@ static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
return -ENXIO;
}
-static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
data_lo = ((u32)alu->mac[2] << 24) |
((u32)alu->mac[3] << 16) |
@@ -587,12 +479,11 @@ static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
u8 *member, u8 *valid)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
const u32 *masks;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
*fid = vlan & masks[VLAN_TABLE_FID];
*member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >>
@@ -603,12 +494,11 @@ static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
u16 *vlan)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
const u32 *masks;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
*vlan = fid;
*vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S];
@@ -618,12 +508,11 @@ static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
u64 data;
int i;
- shifts = ksz8->shifts;
+ shifts = dev->info->shifts;
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
addr *= 4;
@@ -663,16 +552,17 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
ksz8_w_table(dev, TABLE_VLAN, addr, buf);
}
-static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
- struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, link;
- const u8 *regs = ksz8->regs;
int processed = true;
+ const u16 *regs;
u8 val1, val2;
u16 data = 0;
u8 p = phy;
+ regs = dev->info->regs;
+
switch (reg) {
case MII_BMCR:
ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
@@ -786,13 +676,14 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
*val = data;
}
-static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
{
- struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, data;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u8 p = phy;
+ regs = dev->info->regs;
+
switch (reg) {
case MII_BMCR:
@@ -898,30 +789,7 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
}
}
-static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
-{
- struct ksz_device *dev = ds->priv;
-
- /* ksz88x3 uses the same tag schema as KSZ9893 */
- return ksz_is_ksz88x3(dev) ?
- DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795;
-}
-
-static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port)
-{
- /* Silicon Errata Sheet (DS80000830A):
- * Port 1 does not work with LinkMD Cable-Testing.
- * Port 1 does not respond to received PAUSE control frames.
- */
- if (!port)
- return MICREL_KSZ8_P1_ERRATA;
-
- return 0;
-}
-
-static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
u8 data;
@@ -931,16 +799,14 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
}
-static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
-}
-
-static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
struct ksz_port *p;
+ const u16 *regs;
+
+ regs = dev->info->regs;
if ((uint)port < dev->info->port_cnt) {
first = port;
@@ -954,9 +820,9 @@ static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
p = &dev->ports[index];
if (!p->on)
continue;
- ksz_pread8(dev, index, P_STP_CTRL, &learn[index]);
+ ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]);
if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL,
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL],
learn[index] | PORT_LEARN_DISABLE);
}
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
@@ -965,15 +831,113 @@ static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
if (!p->on)
continue;
if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL, learn[index]);
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
}
}
-static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
- struct netlink_ext_ack *extack)
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ksz_device *dev = ds->priv;
+ int ret = 0;
+ u16 i = 0;
+ u16 entries = 0;
+ u8 timestamp = 0;
+ u8 fid;
+ u8 member;
+ struct alu_struct alu;
+
+ do {
+ alu.is_static = false;
+ ret = ksz8_r_dyn_mac_table(dev, i, alu.mac, &fid, &member,
+ &timestamp, &entries);
+ if (!ret && (member & BIT(port))) {
+ ret = cb(alu.mac, alu.fid, alu.is_static, data);
+ if (ret)
+ break;
+ }
+ i++;
+ } while (i < entries);
+ if (i >= entries)
+ ret = 0;
+ return ret;
+}
+
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ struct alu_struct alu;
+ int index;
+ int empty = 0;
+
+ alu.port_forward = 0;
+ for (index = 0; index < dev->info->num_statics; index++) {
+ if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
+ break;
+ /* Remember the first empty entry. */
+ } else if (!empty) {
+ empty = index + 1;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics && !empty)
+ return -ENOSPC;
+
+ /* add entry */
+ if (index == dev->info->num_statics) {
+ index = empty - 1;
+ memset(&alu, 0, sizeof(alu));
+ memcpy(alu.mac, mdb->addr, ETH_ALEN);
+ alu.is_static = true;
+ }
+ alu.port_forward |= BIT(port);
+ if (mdb->vid) {
+ alu.is_use_fid = true;
+
+ /* Need a way to map VID to FID. */
+ alu.fid = mdb->vid;
+ }
+ ksz8_w_sta_mac_table(dev, index, &alu);
+
+ return 0;
+}
+
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ struct alu_struct alu;
+ int index;
+
+ for (index = 0; index < dev->info->num_statics; index++) {
+ if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
+ break;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics)
+ goto exit;
+
+ /* clear port */
+ alu.port_forward &= ~BIT(port);
+ if (!alu.port_forward)
+ alu.is_static = false;
+ ksz8_w_sta_mac_table(dev, index, &alu);
+
+exit:
+ return 0;
+}
+
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack)
+{
if (ksz_is_ksz88x3(dev))
return -ENOTSUPP;
@@ -998,12 +962,11 @@ static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
}
}
-static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
u16 data, new_pvid = 0;
u8 fid, member, valid;
@@ -1071,10 +1034,9 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
- struct ksz_device *dev = ds->priv;
u16 data, pvid;
u8 fid, member, valid;
@@ -1104,12 +1066,10 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress, struct netlink_ext_ack *extack)
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
if (ingress) {
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
dev->mirror_rx |= BIT(port);
@@ -1128,10 +1088,9 @@ static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct ksz_device *dev = ds->priv;
u8 data;
if (mirror->ingress) {
@@ -1152,7 +1111,6 @@ static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
- u8 data8;
if (!p->interface && dev->compat_interface) {
dev_warn(dev->dev,
@@ -1161,50 +1119,15 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
port);
p->interface = dev->compat_interface;
}
-
- /* Configure MII interface for proper network communication. */
- ksz_read8(dev, REG_PORT_5_CTRL_6, &data8);
- data8 &= ~PORT_INTERFACE_TYPE;
- data8 &= ~PORT_GMII_1GPS_MODE;
- switch (p->interface) {
- case PHY_INTERFACE_MODE_MII:
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_RMII:
- data8 |= PORT_INTERFACE_RMII;
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_GMII:
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_GMII;
- p->phydev.speed = SPEED_1000;
- break;
- default:
- data8 &= ~PORT_RGMII_ID_IN_ENABLE;
- data8 &= ~PORT_RGMII_ID_OUT_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- data8 |= PORT_RGMII_ID_IN_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- data8 |= PORT_RGMII_ID_OUT_ENABLE;
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_RGMII;
- p->phydev.speed = SPEED_1000;
- break;
- }
- ksz_write8(dev, REG_PORT_5_CTRL_6, data8);
- p->phydev.duplex = 1;
}
-static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
u8 member;
- masks = ksz8->masks;
+ masks = dev->info->masks;
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
@@ -1234,17 +1157,17 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz8_cfg_port_member(dev, port, member);
}
-static void ksz8_config_cpu_port(struct dsa_switch *ds)
+void ksz8_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
struct ksz_port *p;
const u32 *masks;
+ const u16 *regs;
u8 remote;
int i;
- masks = ksz8->masks;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
/* Switch marks the maximum frame with extra byte as oversize. */
ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
@@ -1258,7 +1181,7 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Last port may be disabled. */
if (i == dev->phy_port_cnt)
@@ -1272,15 +1195,15 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
continue;
if (!ksz_is_ksz88x3(dev)) {
ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
- if (remote & PORT_FIBER_MODE)
+ if (remote & KSZ8_PORT_FIBER_MODE)
p->fiber = 1;
}
if (p->fiber)
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- true);
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, true);
else
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- false);
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, false);
}
}
@@ -1301,22 +1224,26 @@ static int ksz8_handle_global_errata(struct dsa_switch *ds)
return ret;
}
-static int ksz8_setup(struct dsa_switch *ds)
+int ksz8_enable_stp_addr(struct ksz_device *dev)
{
- struct ksz_device *dev = ds->priv;
struct alu_struct alu;
- int i, ret = 0;
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->info->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
+ /* Setup STP address for STP operation. */
+ memset(&alu, 0, sizeof(alu));
+ ether_addr_copy(alu.mac, eth_stp_addr);
+ alu.is_static = true;
+ alu.is_override = true;
+ alu.port_forward = dev->info->cpu_ports;
+
+ ksz8_w_sta_mac_table(dev, 0, &alu);
+
+ return 0;
+}
- ret = ksz8_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
- return ret;
- }
+int ksz8_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
@@ -1335,10 +1262,6 @@ static int ksz8_setup(struct dsa_switch *ds)
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
- ksz8_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true);
-
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
@@ -1346,38 +1269,15 @@ static int ksz8_setup(struct dsa_switch *ds)
if (!ksz_is_ksz88x3(dev))
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
-
for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
- /* Setup STP address for STP operation. */
- memset(&alu, 0, sizeof(alu));
- ether_addr_copy(alu.mac, eth_stp_addr);
- alu.is_static = true;
- alu.is_override = true;
- alu.port_forward = dev->info->cpu_ports;
-
- ksz8_w_sta_mac_table(dev, 0, &alu);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
return ksz8_handle_global_errata(ds);
}
-static void ksz8_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
{
- struct ksz_device *dev = ds->priv;
-
- ksz_phylink_get_caps(ds, port, config);
-
config->mac_capabilities = MAC_10 | MAC_100;
/* Silicon Errata Sheet (DS80000830A):
@@ -1393,102 +1293,17 @@ static void ksz8_get_caps(struct dsa_switch *ds, int port,
config->mac_capabilities |= MAC_ASYM_PAUSE;
}
-static const struct dsa_switch_ops ksz8_switch_ops = {
- .get_tag_protocol = ksz8_get_tag_protocol,
- .get_phy_flags = ksz8_sw_get_phy_flags,
- .setup = ksz8_setup,
- .phy_read = ksz_phy_read16,
- .phy_write = ksz_phy_write16,
- .phylink_get_caps = ksz8_get_caps,
- .phylink_mac_link_down = ksz_mac_link_down,
- .port_enable = ksz_enable_port,
- .get_strings = ksz_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz8_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz8_port_vlan_filtering,
- .port_vlan_add = ksz8_port_vlan_add,
- .port_vlan_del = ksz8_port_vlan_del,
- .port_fdb_dump = ksz_port_fdb_dump,
- .port_mdb_add = ksz_port_mdb_add,
- .port_mdb_del = ksz_port_mdb_del,
- .port_mirror_add = ksz8_port_mirror_add,
- .port_mirror_del = ksz8_port_mirror_del,
-};
-
-static u32 ksz8_get_port_addr(int port, int offset)
+u32 ksz8_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz8_switch_detect(struct ksz_device *dev)
+int ksz8_switch_init(struct ksz_device *dev)
{
- u8 id1, id2;
- u16 id16;
- int ret;
-
- /* read chip id */
- ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
- if (ret)
- return ret;
-
- id1 = id16 >> 8;
- id2 = id16 & SW_CHIP_ID_M;
-
- switch (id1) {
- case KSZ87_FAMILY_ID:
- if ((id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
- return -ENODEV;
-
- if (id2 == CHIP_ID_95) {
- u8 val;
-
- id2 = 0x95;
- ksz_read8(dev, REG_PORT_STATUS_0, &val);
- if (val & PORT_FIBER_MODE)
- id2 = 0x65;
- } else if (id2 == CHIP_ID_94) {
- id2 = 0x94;
- }
- break;
- case KSZ88_FAMILY_ID:
- if (id2 != CHIP_ID_63)
- return -ENODEV;
- break;
- default:
- dev_err(dev->dev, "invalid family id: %d\n", id1);
- return -ENODEV;
- }
- id16 &= ~0xff;
- id16 |= id2;
- dev->chip_id = id16;
-
- return 0;
-}
-
-static int ksz8_switch_init(struct ksz_device *dev)
-{
- struct ksz8 *ksz8 = dev->priv;
-
- dev->ds->ops = &ksz8_switch_ops;
-
dev->cpu_port = fls(dev->info->cpu_ports) - 1;
dev->phy_port_cnt = dev->info->port_cnt - 1;
dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports;
- if (ksz_is_ksz88x3(dev)) {
- ksz8->regs = ksz8863_regs;
- ksz8->masks = ksz8863_masks;
- ksz8->shifts = ksz8863_shifts;
- } else {
- ksz8->regs = ksz8795_regs;
- ksz8->masks = ksz8795_masks;
- ksz8->shifts = ksz8795_shifts;
- }
-
/* We rely on software untagging on the CPU port, so that we
* can support both tagged and untagged VLANs
*/
@@ -1502,37 +1317,11 @@ static int ksz8_switch_init(struct ksz_device *dev)
return 0;
}
-static void ksz8_switch_exit(struct ksz_device *dev)
+void ksz8_switch_exit(struct ksz_device *dev)
{
ksz8_reset_switch(dev);
}
-static const struct ksz_dev_ops ksz8_dev_ops = {
- .get_port_addr = ksz8_get_port_addr,
- .cfg_port_member = ksz8_cfg_port_member,
- .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
- .port_setup = ksz8_port_setup,
- .r_phy = ksz8_r_phy,
- .w_phy = ksz8_w_phy,
- .r_dyn_mac_table = ksz8_r_dyn_mac_table,
- .r_sta_mac_table = ksz8_r_sta_mac_table,
- .w_sta_mac_table = ksz8_w_sta_mac_table,
- .r_mib_cnt = ksz8_r_mib_cnt,
- .r_mib_pkt = ksz8_r_mib_pkt,
- .freeze_mib = ksz8_freeze_mib,
- .port_init_cnt = ksz8_port_init_cnt,
- .shutdown = ksz8_reset_switch,
- .detect = ksz8_switch_detect,
- .init = ksz8_switch_init,
- .exit = ksz8_switch_exit,
-};
-
-int ksz8_switch_register(struct ksz_device *dev)
-{
- return ksz_switch_register(dev, &ksz8_dev_ops);
-}
-EXPORT_SYMBOL(ksz8_switch_register);
-
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 4109433b6b6c..77487d611824 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -14,22 +14,8 @@
#define KS_PRIO_M 0x3
#define KS_PRIO_S 2
-#define REG_CHIP_ID0 0x00
-
-#define KSZ87_FAMILY_ID 0x87
-#define KSZ88_FAMILY_ID 0x88
-
-#define REG_CHIP_ID1 0x01
-
-#define SW_CHIP_ID_M 0xF0
-#define SW_CHIP_ID_S 4
#define SW_REVISION_M 0x0E
#define SW_REVISION_S 1
-#define SW_START 0x01
-
-#define CHIP_ID_94 0x60
-#define CHIP_ID_95 0x90
-#define CHIP_ID_63 0x30
#define KSZ8863_REG_SW_RESET 0x43
@@ -57,7 +43,6 @@
#define REG_SW_CTRL_2 0x04
#define UNICAST_VLAN_BOUNDARY BIT(7)
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
@@ -77,13 +62,9 @@
#define SW_FLOW_CTRL BIT(5)
#define SW_10_MBIT BIT(4)
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_CTRL_5 0x07
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_CTRL_6 0x08
#define SW_MIB_COUNTER_FLUSH BIT(7)
@@ -189,15 +170,7 @@
#define REG_PORT_5_CTRL_6 0x56
#define PORT_MII_INTERNAL_CLOCK BIT(7)
-#define PORT_GMII_1GPS_MODE BIT(6)
-#define PORT_RGMII_ID_IN_ENABLE BIT(4)
-#define PORT_RGMII_ID_OUT_ENABLE BIT(3)
#define PORT_GMII_MAC_MODE BIT(2)
-#define PORT_INTERFACE_TYPE 0x3
-#define PORT_INTERFACE_MII 0
-#define PORT_INTERFACE_RMII 1
-#define PORT_INTERFACE_GMII 2
-#define PORT_INTERFACE_RGMII 3
#define REG_PORT_1_CTRL_7 0x17
#define REG_PORT_2_CTRL_7 0x27
@@ -217,8 +190,6 @@
#define REG_PORT_4_STATUS_0 0x48
/* For KSZ8765. */
-#define PORT_FIBER_MODE BIT(7)
-
#define PORT_REMOTE_ASYM_PAUSE BIT(5)
#define PORT_REMOTE_SYM_PAUSE BIT(4)
#define PORT_REMOTE_100BTX_FD BIT(3)
@@ -322,7 +293,6 @@
#define REG_PORT_CTRL_5 0x05
-#define REG_PORT_STATUS_0 0x08
#define REG_PORT_STATUS_1 0x09
#define REG_PORT_LINK_MD_CTRL 0x0A
#define REG_PORT_LINK_MD_RESULT 0x0B
@@ -788,7 +758,6 @@
#define P_TAG_CTRL REG_PORT_CTRL_0
#define P_MIRROR_CTRL REG_PORT_CTRL_1
#define P_802_1P_CTRL REG_PORT_CTRL_2
-#define P_STP_CTRL REG_PORT_CTRL_2
#define P_PASS_ALL_CTRL REG_PORT_CTRL_12
#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12
#define P_DROP_TAG_CTRL REG_PORT_CTRL_13
@@ -813,12 +782,6 @@
#define REG_IND_EEE_GLOB2_LO 0x34
#define REG_IND_EEE_GLOB2_HI 0x35
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
-
/**
* MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
* MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index b6f99e641dca..5247fdfb964d 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -26,11 +26,9 @@ static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
struct mdio_device *mdev;
u8 reg = *(u8 *)reg_buf;
u8 *val = val_buf;
- struct ksz8 *ksz8;
int i, ret = 0;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
for (i = 0; i < val_len; i++) {
@@ -55,13 +53,11 @@ static int ksz8863_mdio_write(void *ctx, const void *data, size_t count)
{
struct ksz_device *dev = ctx;
struct mdio_device *mdev;
- struct ksz8 *ksz8;
int i, ret = 0;
u32 reg;
u8 *val;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
val = (u8 *)(data + 4);
reg = *(u32 *)data;
@@ -142,17 +138,10 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev)
{
struct regmap_config rc;
struct ksz_device *dev;
- struct ksz8 *ksz8;
int ret;
int i;
- ksz8 = devm_kzalloc(&mdiodev->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = mdiodev;
-
- dev = ksz_switch_alloc(&mdiodev->dev, ksz8);
+ dev = ksz_switch_alloc(&mdiodev->dev, mdiodev);
if (!dev)
return -ENOMEM;
@@ -174,7 +163,7 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev)
if (mdiodev->dev.platform_data)
dev->pdata = mdiodev->dev.platform_data;
- ret = ksz8_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index ab40b700cf1a..e4f446db0ca1 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -17,11 +17,7 @@
#include "ksz9477_reg.h"
#include "ksz_common.h"
-
-/* Used with variable features to indicate capabilities. */
-#define GBIT_SUPPORT BIT(0)
-#define NEW_XMII BIT(1)
-#define IS_9893 BIT(2)
+#include "ksz9477.h"
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
@@ -47,9 +43,8 @@ static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
bits, set ? bits : 0);
}
-static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu)
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
{
- struct ksz_device *dev = ds->priv;
u16 frame_size, max_frame = 0;
int i;
@@ -65,7 +60,7 @@ static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu)
REG_SW_MTU_MASK, max_frame);
}
-static int ksz9477_max_mtu(struct dsa_switch *ds, int port)
+int ksz9477_max_mtu(struct ksz_device *dev, int port)
{
return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
@@ -175,7 +170,7 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
10, 1000);
}
-static int ksz9477_reset_switch(struct ksz_device *dev)
+int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
u32 data32;
@@ -198,12 +193,6 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], REG_SW_MAC_CTRL_2,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
-
data8 = SW_ENABLE_REFCLKO;
if (dev->synclko_disable)
data8 = 0;
@@ -214,8 +203,7 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
return 0;
}
-static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
- u64 *cnt)
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
struct ksz_port *p = &dev->ports[port];
unsigned int val;
@@ -242,14 +230,14 @@ static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
*cnt += data;
}
-static void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
addr = dev->info->mib_names[addr].index;
ksz9477_r_mib_cnt(dev, port, addr, cnt);
}
-static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
struct ksz_port *p = &dev->ports[port];
@@ -263,7 +251,7 @@ static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
-static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -276,21 +264,8 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
mutex_unlock(&mib->cnt_mutex);
}
-static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
+void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
- enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
- struct ksz_device *dev = ds->priv;
-
- if (dev->features & IS_9893)
- proto = DSA_TAG_PROTO_KSZ9893;
- return proto;
-}
-
-static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
-{
- struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
/* No real PHY after this. Simulate the PHY.
@@ -335,40 +310,30 @@ static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
}
- return val;
+ *data = val;
}
-static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
- u16 val)
+void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
- struct ksz_device *dev = ds->priv;
-
/* No real PHY after this. */
if (addr >= dev->phy_port_cnt)
- return 0;
+ return;
/* No gigabit support. Do not write to this register. */
if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000)
- return 0;
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+ return;
- return 0;
+ ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
}
-static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
- u8 member)
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
}
-static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state)
-{
- ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
-}
-
-static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
+ const u16 *regs = dev->info->regs;
u8 data;
regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2,
@@ -377,24 +342,21 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
if (port < dev->info->port_cnt) {
/* flush individual port */
- ksz_pread8(dev, port, P_STP_CTRL, &data);
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
if (!(data & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, port, P_STP_CTRL,
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL],
data | PORT_LEARN_DISABLE);
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
} else {
/* flush all */
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
}
}
-static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -408,11 +370,10 @@ static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
int err;
@@ -445,10 +406,9 @@ static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
- struct ksz_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
u32 vlan_table[3];
u16 pvid;
@@ -479,11 +439,9 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -537,11 +495,9 @@ exit:
return ret;
}
-static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -628,10 +584,9 @@ static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
alu->mac[5] = alu_table[3] & 0xFF;
}
-static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ksz_device *dev = ds->priv;
int ret = 0;
u32 ksz_data;
u32 alu_table[4];
@@ -658,6 +613,9 @@ static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
@@ -680,17 +638,20 @@ exit:
return ret;
}
-static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
u32 mac_hi, mac_lo;
int err = 0;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
@@ -699,8 +660,8 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -744,7 +705,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -756,17 +717,20 @@ exit:
return err;
}
-static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
int ret = 0;
u32 mac_hi, mac_lo;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
@@ -775,8 +739,8 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -818,7 +782,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -832,11 +796,10 @@ exit:
return ret;
}
-static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress, struct netlink_ext_ack *extack)
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
u8 data;
int p;
@@ -872,10 +835,9 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct ksz_device *dev = ds->priv;
bool in_use = false;
u8 data;
int p;
@@ -902,142 +864,18 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
PORT_MIRROR_SNIFFER, false);
}
-static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data)
-{
- bool gbit;
-
- if (dev->features & NEW_XMII)
- gbit = !(data & PORT_MII_NOT_1GBIT);
- else
- gbit = !!(data & PORT_MII_1000MBIT_S1);
- return gbit;
-}
-
-static void ksz9477_set_gbit(struct ksz_device *dev, bool gbit, u8 *data)
-{
- if (dev->features & NEW_XMII) {
- if (gbit)
- *data &= ~PORT_MII_NOT_1GBIT;
- else
- *data |= PORT_MII_NOT_1GBIT;
- } else {
- if (gbit)
- *data |= PORT_MII_1000MBIT_S1;
- else
- *data &= ~PORT_MII_1000MBIT_S1;
- }
-}
-
-static int ksz9477_get_xmii(struct ksz_device *dev, u8 data)
-{
- int mode;
-
- if (dev->features & NEW_XMII) {
- switch (data & PORT_MII_SEL_M) {
- case PORT_MII_SEL:
- mode = 0;
- break;
- case PORT_RMII_SEL:
- mode = 1;
- break;
- case PORT_GMII_SEL:
- mode = 2;
- break;
- default:
- mode = 3;
- }
- } else {
- switch (data & PORT_MII_SEL_M) {
- case PORT_MII_SEL_S1:
- mode = 0;
- break;
- case PORT_RMII_SEL_S1:
- mode = 1;
- break;
- case PORT_GMII_SEL_S1:
- mode = 2;
- break;
- default:
- mode = 3;
- }
- }
- return mode;
-}
-
-static void ksz9477_set_xmii(struct ksz_device *dev, int mode, u8 *data)
-{
- u8 xmii;
-
- if (dev->features & NEW_XMII) {
- switch (mode) {
- case 0:
- xmii = PORT_MII_SEL;
- break;
- case 1:
- xmii = PORT_RMII_SEL;
- break;
- case 2:
- xmii = PORT_GMII_SEL;
- break;
- default:
- xmii = PORT_RGMII_SEL;
- break;
- }
- } else {
- switch (mode) {
- case 0:
- xmii = PORT_MII_SEL_S1;
- break;
- case 1:
- xmii = PORT_RMII_SEL_S1;
- break;
- case 2:
- xmii = PORT_GMII_SEL_S1;
- break;
- default:
- xmii = PORT_RGMII_SEL_S1;
- break;
- }
- }
- *data &= ~PORT_MII_SEL_M;
- *data |= xmii;
-}
-
static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
{
phy_interface_t interface;
bool gbit;
- int mode;
- u8 data8;
if (port < dev->phy_port_cnt)
return PHY_INTERFACE_MODE_NA;
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- gbit = ksz9477_get_gbit(dev, data8);
- mode = ksz9477_get_xmii(dev, data8);
- switch (mode) {
- case 2:
- interface = PHY_INTERFACE_MODE_GMII;
- if (gbit)
- break;
- fallthrough;
- case 0:
- interface = PHY_INTERFACE_MODE_MII;
- break;
- case 1:
- interface = PHY_INTERFACE_MODE_RMII;
- break;
- default:
- interface = PHY_INTERFACE_MODE_RGMII;
- if (data8 & PORT_RGMII_ID_EG_ENABLE)
- interface = PHY_INTERFACE_MODE_RGMII_TXID;
- if (data8 & PORT_RGMII_ID_IG_ENABLE) {
- interface = PHY_INTERFACE_MODE_RGMII_RXID;
- if (data8 & PORT_RGMII_ID_EG_ENABLE)
- interface = PHY_INTERFACE_MODE_RGMII_ID;
- }
- break;
- }
+
+ gbit = ksz_get_gbit(dev, port);
+
+ interface = ksz_get_xmii(dev, port, gbit);
+
return interface;
}
@@ -1097,21 +935,21 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
}
-static void ksz9477_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
{
- ksz_phylink_get_caps(ds, port, config);
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
- config->mac_capabilities = MAC_10 | MAC_100 | MAC_1000FD |
- MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
+ if (dev->features & GBIT_SUPPORT)
+ config->mac_capabilities |= MAC_1000FD;
}
-static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
- struct ksz_port *p = &dev->ports[port];
struct dsa_switch *ds = dev->ds;
- u8 data8, member;
u16 data16;
+ u8 member;
/* enable tag tail for host port */
if (cpu_port)
@@ -1151,44 +989,6 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
true);
-
- /* configure MAC to 1G & RGMII mode */
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- switch (p->interface) {
- case PHY_INTERFACE_MODE_MII:
- ksz9477_set_xmii(dev, 0, &data8);
- ksz9477_set_gbit(dev, false, &data8);
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_RMII:
- ksz9477_set_xmii(dev, 1, &data8);
- ksz9477_set_gbit(dev, false, &data8);
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_GMII:
- ksz9477_set_xmii(dev, 2, &data8);
- ksz9477_set_gbit(dev, true, &data8);
- p->phydev.speed = SPEED_1000;
- break;
- default:
- ksz9477_set_xmii(dev, 3, &data8);
- ksz9477_set_gbit(dev, true, &data8);
- data8 &= ~PORT_RGMII_ID_IG_ENABLE;
- data8 &= ~PORT_RGMII_ID_EG_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- data8 |= PORT_RGMII_ID_IG_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- data8 |= PORT_RGMII_ID_EG_ENABLE;
- /* On KSZ9893, disable RGMII in-band status support */
- if (dev->features & IS_9893)
- data8 &= ~PORT_MII_MAC_MODE;
- p->phydev.speed = SPEED_1000;
- break;
- }
- ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
- p->phydev.duplex = 1;
}
if (cpu_port)
@@ -1203,7 +1003,7 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
}
-static void ksz9477_config_cpu_port(struct dsa_switch *ds)
+void ksz9477_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
@@ -1260,7 +1060,7 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
continue;
p = &dev->ports[i];
- ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
p->on = 1;
if (i < dev->phy_port_cnt)
p->phy = 1;
@@ -1273,22 +1073,44 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
}
}
-static int ksz9477_setup(struct dsa_switch *ds)
+int ksz9477_enable_stp_addr(struct ksz_device *dev)
{
- struct ksz_device *dev = ds->priv;
- int ret = 0;
+ const u32 *masks;
+ u32 data;
+ int ret;
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->info->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
+ masks = dev->info->masks;
- ret = ksz9477_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
+ /* Enable Reserved multicast table */
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
+
+ /* Set the Override bit for forwarding BPDU packet to CPU */
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
+ ALU_V_OVERRIDE | BIT(dev->cpu_port));
+ if (ret < 0)
+ return ret;
+
+ data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
+
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
return ret;
}
+ return 0;
+}
+
+int ksz9477_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
/* Required for port partitioning. */
ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
true);
@@ -1305,69 +1127,27 @@ static int ksz9477_setup(struct dsa_switch *ds)
if (ret)
return ret;
- ksz9477_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
-
/* queue based egress rate limit */
ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
- /* start switch */
- ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
return 0;
}
-static const struct dsa_switch_ops ksz9477_switch_ops = {
- .get_tag_protocol = ksz9477_get_tag_protocol,
- .setup = ksz9477_setup,
- .phy_read = ksz9477_phy_read16,
- .phy_write = ksz9477_phy_write16,
- .phylink_mac_link_down = ksz_mac_link_down,
- .phylink_get_caps = ksz9477_get_caps,
- .port_enable = ksz_enable_port,
- .get_strings = ksz_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz9477_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz9477_port_vlan_filtering,
- .port_vlan_add = ksz9477_port_vlan_add,
- .port_vlan_del = ksz9477_port_vlan_del,
- .port_fdb_dump = ksz9477_port_fdb_dump,
- .port_fdb_add = ksz9477_port_fdb_add,
- .port_fdb_del = ksz9477_port_fdb_del,
- .port_mdb_add = ksz9477_port_mdb_add,
- .port_mdb_del = ksz9477_port_mdb_del,
- .port_mirror_add = ksz9477_port_mirror_add,
- .port_mirror_del = ksz9477_port_mirror_del,
- .get_stats64 = ksz_get_stats64,
- .port_change_mtu = ksz9477_change_mtu,
- .port_max_mtu = ksz9477_max_mtu,
-};
-
-static u32 ksz9477_get_port_addr(int port, int offset)
+u32 ksz9477_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz9477_switch_detect(struct ksz_device *dev)
+int ksz9477_switch_init(struct ksz_device *dev)
{
u8 data8;
- u8 id_hi;
- u8 id_lo;
- u32 id32;
int ret;
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
/* turn off SPI DO Edge select */
ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
if (ret)
@@ -1378,10 +1158,6 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
if (ret)
return ret;
- /* read chip id */
- ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
- if (ret)
- return ret;
ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
if (ret)
return ret;
@@ -1392,12 +1168,7 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
/* Default capability is gigabit capable. */
dev->features = GBIT_SUPPORT;
- dev_dbg(dev->dev, "Switch detect: ID=%08x%02x\n", id32, data8);
- id_hi = (u8)(id32 >> 16);
- id_lo = (u8)(id32 >> 8);
- if ((id_lo & 0xf) == 3) {
- /* Chip is from KSZ9893 design. */
- dev_info(dev->dev, "Found KSZ9893\n");
+ if (dev->chip_id == KSZ9893_CHIP_ID) {
dev->features |= IS_9893;
/* Chip does not support gigabit. */
@@ -1405,81 +1176,19 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
dev->features &= ~GBIT_SUPPORT;
dev->phy_port_cnt = 2;
} else {
- dev_info(dev->dev, "Found KSZ9477 or compatible\n");
- /* Chip uses new XMII register definitions. */
- dev->features |= NEW_XMII;
-
/* Chip does not support gigabit. */
if (!(data8 & SW_GIGABIT_ABLE))
dev->features &= ~GBIT_SUPPORT;
}
- /* Change chip id to known ones so it can be matched against them. */
- id32 = (id_hi << 16) | (id_lo << 8);
-
- dev->chip_id = id32;
-
return 0;
}
-static int ksz9477_switch_init(struct ksz_device *dev)
-{
- dev->ds->ops = &ksz9477_switch_ops;
-
- dev->port_mask = (1 << dev->info->port_cnt) - 1;
-
- return 0;
-}
-
-static void ksz9477_switch_exit(struct ksz_device *dev)
+void ksz9477_switch_exit(struct ksz_device *dev)
{
ksz9477_reset_switch(dev);
}
-static const struct ksz_dev_ops ksz9477_dev_ops = {
- .get_port_addr = ksz9477_get_port_addr,
- .cfg_port_member = ksz9477_cfg_port_member,
- .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
- .port_setup = ksz9477_port_setup,
- .r_mib_cnt = ksz9477_r_mib_cnt,
- .r_mib_pkt = ksz9477_r_mib_pkt,
- .r_mib_stat64 = ksz_r_mib_stats64,
- .freeze_mib = ksz9477_freeze_mib,
- .port_init_cnt = ksz9477_port_init_cnt,
- .shutdown = ksz9477_reset_switch,
- .detect = ksz9477_switch_detect,
- .init = ksz9477_switch_init,
- .exit = ksz9477_switch_exit,
-};
-
-int ksz9477_switch_register(struct ksz_device *dev)
-{
- int ret, i;
- struct phy_device *phydev;
-
- ret = ksz_switch_register(dev, &ksz9477_dev_ops);
- if (ret)
- return ret;
-
- for (i = 0; i < dev->phy_port_cnt; ++i) {
- if (!dsa_is_user_port(dev->ds, i))
- continue;
-
- phydev = dsa_to_port(dev->ds, i)->slave->phydev;
-
- /* The MAC actually cannot run in 1000 half-duplex mode. */
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* PHY does not support gigabit. */
- if (!(dev->features & GBIT_SUPPORT))
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
- }
- return ret;
-}
-EXPORT_SYMBOL(ksz9477_switch_register);
-
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
new file mode 100644
index 000000000000..cd278b307b3c
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip KSZ9477 series Header file
+ *
+ * Copyright (C) 2017-2022 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ9477_H
+#define __KSZ9477_H
+
+#include <net/dsa.h>
+#include "ksz_common.h"
+
+int ksz9477_setup(struct dsa_switch *ds);
+u32 ksz9477_get_port_addr(int port, int offset);
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port);
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_get_stp_reg(void);
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu);
+int ksz9477_max_mtu(struct ksz_device *dev, int port);
+void ksz9477_config_cpu_port(struct dsa_switch *ds);
+int ksz9477_enable_stp_addr(struct ksz_device *dev);
+int ksz9477_reset_switch(struct ksz_device *dev);
+int ksz9477_dsa_init(struct ksz_device *dev);
+int ksz9477_switch_init(struct ksz_device *dev);
+void ksz9477_switch_exit(struct ksz_device *dev);
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index faa3163c86b0..99966514d444 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -41,7 +41,7 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
if (i2c->dev.platform_data)
dev->pdata = i2c->dev.platform_data;
- ret = ksz9477_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -71,8 +71,8 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
if (!dev)
return;
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
dsa_switch_shutdown(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 7a2c8d4767af..ddf99d1e4bbd 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -25,7 +25,6 @@
#define REG_CHIP_ID2__1 0x0002
-#define CHIP_ID_63 0x63
#define CHIP_ID_66 0x66
#define CHIP_ID_67 0x67
#define CHIP_ID_77 0x77
@@ -166,7 +165,6 @@
#define SW_DOUBLE_TAG BIT(7)
#define SW_RESET BIT(1)
-#define SW_START BIT(0)
#define REG_SW_MAC_ADDR_0 0x0302
#define REG_SW_MAC_ADDR_1 0x0303
@@ -266,7 +264,6 @@
#define REG_SW_MAC_CTRL_1 0x0331
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
@@ -277,13 +274,9 @@
#define REG_SW_MAC_CTRL_2 0x0332
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_MAC_CTRL_3 0x0333
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_MAC_CTRL_4 0x0334
#define SW_PASS_PAUSE BIT(3)
@@ -426,12 +419,9 @@
#define REG_SW_ALU_STAT_CTRL__4 0x041C
-#define ALU_STAT_INDEX_M (BIT(4) - 1)
-#define ALU_STAT_INDEX_S 16
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
#define ALU_RESV_MCAST_ADDR BIT(1)
-#define ALU_STAT_READ BIT(0)
#define REG_SW_ALU_VAL_A 0x0420
@@ -1185,35 +1175,11 @@
#define PORT_LINK_STATUS_FAIL BIT(0)
/* 3 - xMII */
-#define REG_PORT_XMII_CTRL_0 0x0300
-
#define PORT_SGMII_SEL BIT(7)
-#define PORT_MII_FULL_DUPLEX BIT(6)
-#define PORT_MII_100MBIT BIT(4)
#define PORT_GRXC_ENABLE BIT(0)
-#define REG_PORT_XMII_CTRL_1 0x0301
-
#define PORT_RMII_CLK_SEL BIT(7)
-/* S1 */
-#define PORT_MII_1000MBIT_S1 BIT(6)
-/* S2 */
-#define PORT_MII_NOT_1GBIT BIT(6)
#define PORT_MII_SEL_EDGE BIT(5)
-#define PORT_RGMII_ID_IG_ENABLE BIT(4)
-#define PORT_RGMII_ID_EG_ENABLE BIT(3)
-#define PORT_MII_MAC_MODE BIT(2)
-#define PORT_MII_SEL_M 0x3
-/* S1 */
-#define PORT_MII_SEL_S1 0x0
-#define PORT_RMII_SEL_S1 0x1
-#define PORT_GMII_SEL_S1 0x2
-#define PORT_RGMII_SEL_S1 0x3
-/* S2 */
-#define PORT_RGMII_SEL 0x0
-#define PORT_RMII_SEL 0x1
-#define PORT_GMII_SEL 0x2
-#define PORT_MII_SEL 0x3
/* 4 - MAC */
#define REG_PORT_MAC_CTRL_0 0x0400
@@ -1269,8 +1235,6 @@
/* 5 - MIB Counters */
#define REG_PORT_MIB_CTRL_STAT__4 0x0500
-#define MIB_COUNTER_OVERFLOW BIT(31)
-#define MIB_COUNTER_VALID BIT(30)
#define MIB_COUNTER_READ BIT(25)
#define MIB_COUNTER_FLUSH_FREEZE BIT(24)
#define MIB_COUNTER_INDEX_M (BIT(8) - 1)
@@ -1629,11 +1593,7 @@
#define P_BCAST_STORM_CTRL REG_PORT_MAC_CTRL_0
#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
#define P_MIRROR_CTRL REG_PORT_MRI_MIRROR_CTRL
-#define P_STP_CTRL REG_PORT_LUE_MSTP_STATE
#define P_PHY_CTRL REG_PORT_PHY_CTRL
-#define P_NEG_RESTART_CTRL REG_PORT_PHY_CTRL
-#define P_LINK_STATUS REG_PORT_PHY_STATUS
-#define P_SPEED_STATUS REG_PORT_PHY_PHY_CTRL
#define P_RATE_LIMIT_CTRL REG_PORT_MAC_IN_RATE_LIMIT
#define S_LINK_AGING_CTRL REG_SW_LUE_CTRL_1
@@ -1653,12 +1613,6 @@
#define PTP_TRIG_UNIT_M (BIT(MAX_TRIG_UNIT) - 1)
#define PTP_TS_UNIT_M (BIT(MAX_TIMESTAMP_UNIT) - 1)
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
-
#define KSZ9477_MAX_FRAME_SIZE 9000
#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
deleted file mode 100644
index 1bc8b0cbe458..000000000000
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip KSZ9477 series register access through SPI
- *
- * Copyright (C) 2017-2019 Microchip Technology Inc.
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-
-#include "ksz_common.h"
-
-#define SPI_ADDR_SHIFT 24
-#define SPI_ADDR_ALIGN 3
-#define SPI_TURNAROUND_SHIFT 5
-
-KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
- SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN);
-
-static int ksz9477_spi_probe(struct spi_device *spi)
-{
- struct regmap_config rc;
- struct ksz_device *dev;
- int i, ret;
-
- dev = ksz_switch_alloc(&spi->dev, spi);
- if (!dev)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- rc = ksz9477_regmap_config[i];
- rc.lock_arg = &dev->regmap_mutex;
- dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
- if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&spi->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz9477_regmap_config[i].val_bits, ret);
- return ret;
- }
- }
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- /* setup spi */
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret)
- return ret;
-
- ret = ksz9477_switch_register(dev);
-
- /* Main DSA driver may not be started yet. */
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static void ksz9477_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static void ksz9477_spi_shutdown(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- dsa_switch_shutdown(dev->ds);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static const struct of_device_id ksz9477_dt_ids[] = {
- {
- .compatible = "microchip,ksz9477",
- .data = &ksz_switch_chips[KSZ9477]
- },
- {
- .compatible = "microchip,ksz9897",
- .data = &ksz_switch_chips[KSZ9897]
- },
- {
- .compatible = "microchip,ksz9893",
- .data = &ksz_switch_chips[KSZ9893]
- },
- {
- .compatible = "microchip,ksz9563",
- .data = &ksz_switch_chips[KSZ9893]
- },
- {
- .compatible = "microchip,ksz8563",
- .data = &ksz_switch_chips[KSZ9893]
- },
- {
- .compatible = "microchip,ksz9567",
- .data = &ksz_switch_chips[KSZ9567]
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
-
-static const struct spi_device_id ksz9477_spi_ids[] = {
- { "ksz9477" },
- { "ksz9897" },
- { "ksz9893" },
- { "ksz9563" },
- { "ksz8563" },
- { "ksz9567" },
- { },
-};
-MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids);
-
-static struct spi_driver ksz9477_spi_driver = {
- .driver = {
- .name = "ksz9477-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz9477_dt_ids),
- },
- .id_table = ksz9477_spi_ids,
- .probe = ksz9477_spi_probe,
- .remove = ksz9477_spi_remove,
- .shutdown = ksz9477_spi_shutdown,
-};
-
-module_spi_driver(ksz9477_spi_driver);
-
-MODULE_ALIAS("spi:ksz9477");
-MODULE_ALIAS("spi:ksz9897");
-MODULE_ALIAS("spi:ksz9893");
-MODULE_ALIAS("spi:ksz9563");
-MODULE_ALIAS("spi:ksz8563");
-MODULE_ALIAS("spi:ksz9567");
-MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 9ca8c8d7740f..ed7d137cba99 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -16,10 +16,14 @@
#include <linux/if_bridge.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/micrel_phy.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz8.h"
+#include "ksz9477.h"
+#include "lan937x.h"
#define MIB_COUNTER_NUM 0x20
@@ -138,6 +142,276 @@ static const struct ksz_mib_names ksz9477_mib_names[] = {
{ 0x83, "tx_discards" },
};
+static const struct ksz_dev_ops ksz8_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+};
+
+static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .setup = ksz9477_setup,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = ksz9477_port_setup,
+ .r_phy = ksz9477_r_phy,
+ .w_phy = ksz9477_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = ksz9477_get_caps,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = ksz9477_change_mtu,
+ .max_mtu = ksz9477_max_mtu,
+ .config_cpu_port = ksz9477_config_cpu_port,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = ksz9477_reset_switch,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+};
+
+static const struct ksz_dev_ops lan937x_dev_ops = {
+ .setup = lan937x_setup,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = lan937x_port_setup,
+ .r_phy = lan937x_r_phy,
+ .w_phy = lan937x_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = lan937x_phylink_get_caps,
+ .setup_rgmii_delay = lan937x_setup_rgmii_delay,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = lan937x_change_mtu,
+ .max_mtu = ksz9477_max_mtu,
+ .config_cpu_port = lan937x_config_cpu_port,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = lan937x_reset_switch,
+ .init = lan937x_switch_init,
+ .exit = lan937x_switch_exit,
+};
+
+static const u16 ksz8795_regs[] = {
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x74,
+ [REG_IND_BYTE] = 0xA0,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x07,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x08,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+ [P_XMII_CTRL_0] = 0x06,
+ [P_XMII_CTRL_1] = 0x56,
+};
+
+static const u32 ksz8795_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(6),
+ [MIB_COUNTER_VALID] = BIT(5),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(5),
+};
+
+static const u8 ksz8795_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 0,
+ [P_MII_10MBIT] = 1,
+ [P_MII_FULL_DUPLEX] = 0,
+ [P_MII_HALF_DUPLEX] = 1,
+};
+
+static const u8 ksz8795_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 3,
+ [P_GMII_SEL] = 2,
+ [P_RMII_SEL] = 1,
+ [P_MII_SEL] = 0,
+ [P_GMII_1GBIT] = 1,
+ [P_GMII_NOT_1GBIT] = 0,
+};
+
+static const u8 ksz8795_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
+static const u16 ksz8863_regs[] = {
+ [REG_IND_CTRL_0] = 0x79,
+ [REG_IND_DATA_8] = 0x7B,
+ [REG_IND_DATA_CHECK] = 0x7B,
+ [REG_IND_DATA_HI] = 0x7C,
+ [REG_IND_DATA_LO] = 0x80,
+ [REG_IND_MIB_CHECK] = 0x80,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0x03,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8863_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(6),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8863_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 24,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
+static const u16 ksz9477_regs[] = {
+ [P_STP_CTRL] = 0x0B04,
+ [S_START_CTRL] = 0x0300,
+ [S_BROADCAST_CTRL] = 0x0332,
+ [S_MULTICAST_CTRL] = 0x0331,
+ [P_XMII_CTRL_0] = 0x0300,
+ [P_XMII_CTRL_1] = 0x0301,
+};
+
+static const u32 ksz9477_masks[] = {
+ [ALU_STAT_WRITE] = 0,
+ [ALU_STAT_READ] = 1,
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 ksz9477_shifts[] = {
+ [ALU_STAT_INDEX] = 16,
+};
+
+static const u8 ksz9477_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 1,
+ [P_MII_10MBIT] = 0,
+ [P_MII_FULL_DUPLEX] = 1,
+ [P_MII_HALF_DUPLEX] = 0,
+};
+
+static const u8 ksz9477_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 0,
+ [P_RMII_SEL] = 1,
+ [P_GMII_SEL] = 2,
+ [P_MII_SEL] = 3,
+ [P_GMII_1GBIT] = 0,
+ [P_GMII_NOT_1GBIT] = 1,
+};
+
+static const u32 lan937x_masks[] = {
+ [ALU_STAT_WRITE] = 1,
+ [ALU_STAT_READ] = 2,
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 lan937x_shifts[] = {
+ [ALU_STAT_INDEX] = 8,
+};
+
const struct ksz_chip_data ksz_switch_chips[] = {
[KSZ8795] = {
.chip_id = KSZ8795_CHIP_ID,
@@ -147,10 +421,16 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
@@ -179,10 +459,16 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
@@ -197,10 +483,16 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
@@ -215,9 +507,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x4, /* can be configured as cpu port */
.port_cnt = 3,
+ .ops = &ksz8_dev_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8863_regs,
+ .masks = ksz8863_masks,
+ .shifts = ksz8863_shifts,
.supports_mii = {false, false, true},
.supports_rmii = {false, false, true},
.internal_phy = {true, true, false},
@@ -231,10 +527,16 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
false, true, false},
.supports_rmii = {false, false, false, false,
@@ -253,10 +555,16 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
false, true, true},
.supports_rmii = {false, false, false, false,
@@ -275,9 +583,15 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
+ .ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
.supports_mii = {false, false, true},
.supports_rmii = {false, false, true},
.supports_rgmii = {false, false, true},
@@ -292,10 +606,16 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
false, true, true},
.supports_rmii = {false, false, false, false,
@@ -314,9 +634,15 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
@@ -331,9 +657,15 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false, true, true},
.supports_rmii = {false, false, false, false, true, true},
.supports_rgmii = {false, false, false, false, true, true},
@@ -348,9 +680,15 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
true, true, false, false},
.supports_rmii = {false, false, false, false,
@@ -369,9 +707,15 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x38, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
true, true, false, false},
.supports_rmii = {false, false, false, false,
@@ -390,9 +734,15 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
true, true, false, false},
.supports_rmii = {false, false, false, false,
@@ -436,8 +786,8 @@ static int ksz_check_device_id(struct ksz_device *dev)
return 0;
}
-void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
+static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct ksz_device *dev = ds->priv;
@@ -456,23 +806,29 @@ void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->info->internal_phy[port])
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+
+ if (dev->dev_ops->get_caps)
+ dev->dev_ops->get_caps(dev, port, config);
}
-EXPORT_SYMBOL_GPL(ksz_phylink_get_caps);
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
{
+ struct ethtool_pause_stats *pstats;
struct rtnl_link_stats64 *stats;
struct ksz_stats_raw *raw;
struct ksz_port_mib *mib;
mib = &dev->ports[port].mib;
stats = &mib->stats64;
+ pstats = &mib->pause_stats;
raw = (struct ksz_stats_raw *)mib->counters;
spin_lock(&mib->stats64_lock);
- stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
- stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
+ raw->rx_pause;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
+ raw->tx_pause;
/* HW counters are counting bytes + FCS which is not acceptable
* for rtnl_link_stats64 interface
@@ -498,12 +854,14 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
stats->multicast = raw->rx_mcast;
stats->collisions = raw->tx_total_col;
+ pstats->tx_pause_frames = raw->tx_pause;
+ pstats->rx_pause_frames = raw->rx_pause;
+
spin_unlock(&mib->stats64_lock);
}
-EXPORT_SYMBOL_GPL(ksz_r_mib_stats64);
-void ksz_get_stats64(struct dsa_switch *ds, int port,
- struct rtnl_link_stats64 *s)
+static void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
{
struct ksz_device *dev = ds->priv;
struct ksz_port_mib *mib;
@@ -514,10 +872,22 @@ void ksz_get_stats64(struct dsa_switch *ds, int port,
memcpy(s, &mib->stats64, sizeof(*s));
spin_unlock(&mib->stats64_lock);
}
-EXPORT_SYMBOL_GPL(ksz_get_stats64);
-void ksz_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
+static void ksz_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
{
struct ksz_device *dev = ds->priv;
int i;
@@ -530,9 +900,8 @@ void ksz_get_strings(struct dsa_switch *ds, int port,
dev->info->mib_names[i].string, ETH_GSTRING_LEN);
}
}
-EXPORT_SYMBOL_GPL(ksz_get_strings);
-void ksz_update_port_member(struct ksz_device *dev, int port)
+static void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
struct dsa_switch *ds = dev->ds;
@@ -589,7 +958,55 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
-EXPORT_SYMBOL_GPL(ksz_update_port_member);
+
+static int ksz_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ const u16 *regs;
+ int ret;
+
+ regs = dev->info->regs;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->info->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = dev->dev_ops->reset(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ /* set broadcast storm protection 10% rate */
+ regmap_update_bits(dev->regmap[1], regs[S_BROADCAST_CTRL],
+ BROADCAST_STORM_RATE,
+ (BROADCAST_STORM_VALUE *
+ BROADCAST_STORM_PROT_RATE) / 100);
+
+ dev->dev_ops->config_cpu_port(ds);
+
+ dev->dev_ops->enable_stp_addr(dev);
+
+ regmap_update_bits(dev->regmap[0], regs[S_MULTICAST_CTRL],
+ MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE);
+
+ ksz_init_mib_timer(dev);
+
+ ds->configure_vlan_while_not_filtering = false;
+
+ if (dev->dev_ops->setup) {
+ ret = dev->dev_ops->setup(ds);
+ if (ret)
+ return ret;
+ }
+
+ /* start switch */
+ regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
+ SW_START, SW_START);
+
+ return 0;
+}
static void port_r_cnt(struct ksz_device *dev, int port)
{
@@ -667,9 +1084,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)
memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64));
}
}
-EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
+static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
@@ -678,9 +1094,8 @@ int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
return val;
}
-EXPORT_SYMBOL_GPL(ksz_phy_read16);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
@@ -688,10 +1103,25 @@ int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_phy_write16);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
+static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID) {
+ /* Silicon Errata Sheet (DS80000830A):
+ * Port 1 does not work with LinkMD Cable-Testing.
+ * Port 1 does not respond to received PAUSE control frames.
+ */
+ if (!port)
+ return MICREL_KSZ8_P1_ERRATA;
+ }
+
+ return 0;
+}
+
+static void ksz_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
@@ -702,9 +1132,8 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
}
-EXPORT_SYMBOL_GPL(ksz_mac_link_down);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
+static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ksz_device *dev = ds->priv;
@@ -713,9 +1142,9 @@ int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
return dev->info->mib_cnt;
}
-EXPORT_SYMBOL_GPL(ksz_sset_count);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *buf)
{
const struct dsa_port *dp = dsa_to_port(ds, port);
struct ksz_device *dev = ds->priv;
@@ -731,12 +1160,11 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64));
mutex_unlock(&mib->cnt_mutex);
}
-EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge,
- bool *tx_fwd_offload,
- struct netlink_ext_ack *extack)
+static int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
@@ -744,135 +1172,83 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
+static void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
{
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
*/
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
-void ksz_port_fast_age(struct dsa_switch *ds, int port)
+static void ksz_port_fast_age(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
dev->dev_ops->flush_dyn_mac_table(dev, port);
}
-EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data)
+static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
- int ret = 0;
- u16 i = 0;
- u16 entries = 0;
- u8 timestamp = 0;
- u8 fid;
- u8 member;
- struct alu_struct alu;
-
- do {
- alu.is_static = false;
- ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
- &member, &timestamp,
- &entries);
- if (!ret && (member & BIT(port))) {
- ret = cb(alu.mac, alu.fid, alu.is_static, data);
- if (ret)
- break;
- }
- i++;
- } while (i < entries);
- if (i >= entries)
- ret = 0;
- return ret;
+ if (!dev->dev_ops->fdb_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_add(dev, port, addr, vid, db);
}
-EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
+static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr,
+ u16 vid, struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
- int empty = 0;
-
- alu.port_forward = 0;
- for (index = 0; index < dev->info->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- /* Remember the first empty entry. */
- } else if (!empty) {
- empty = index + 1;
- }
- }
- /* no available entry */
- if (index == dev->info->num_statics && !empty)
- return -ENOSPC;
+ if (!dev->dev_ops->fdb_del)
+ return -EOPNOTSUPP;
- /* add entry */
- if (index == dev->info->num_statics) {
- index = empty - 1;
- memset(&alu, 0, sizeof(alu));
- memcpy(alu.mac, mdb->addr, ETH_ALEN);
- alu.is_static = true;
- }
- alu.port_forward |= BIT(port);
- if (mdb->vid) {
- alu.is_use_fid = true;
+ return dev->dev_ops->fdb_del(dev, port, addr, vid, db);
+}
- /* Need a way to map VID to FID. */
- alu.fid = mdb->vid;
- }
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ksz_device *dev = ds->priv;
- return 0;
+ if (!dev->dev_ops->fdb_dump)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_dump(dev, port, cb, data);
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
+static int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
-
- for (index = 0; index < dev->info->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- }
- }
- /* no available entry */
- if (index == dev->info->num_statics)
- goto exit;
+ if (!dev->dev_ops->mdb_add)
+ return -EOPNOTSUPP;
- /* clear port */
- alu.port_forward &= ~BIT(port);
- if (!alu.port_forward)
- alu.is_static = false;
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+ return dev->dev_ops->mdb_add(dev, port, mdb, db);
+}
-exit:
- return 0;
+static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mdb_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mdb_del(dev, port, mdb, db);
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+static int ksz_enable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
@@ -888,16 +1264,17 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_enable_port);
-void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state, int reg)
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
+ const u16 *regs;
u8 data;
- ksz_pread8(dev, port, reg, &data);
+ regs = dev->info->regs;
+
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
switch (state) {
@@ -921,14 +1298,443 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
return;
}
- ksz_pwrite8(dev, port, reg, data);
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
p = &dev->ports[port];
p->stp_state = state;
ksz_update_port_member(dev, port);
}
-EXPORT_SYMBOL_GPL(ksz_port_stp_state_set);
+
+static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct ksz_device *dev = ds->priv;
+ enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
+
+ if (dev->chip_id == KSZ8795_CHIP_ID ||
+ dev->chip_id == KSZ8794_CHIP_ID ||
+ dev->chip_id == KSZ8765_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ8795;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID ||
+ dev->chip_id == KSZ9893_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9893;
+
+ if (dev->chip_id == KSZ9477_CHIP_ID ||
+ dev->chip_id == KSZ9897_CHIP_ID ||
+ dev->chip_id == KSZ9567_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9477;
+
+ if (is_lan937x(dev))
+ proto = DSA_TAG_PROTO_LAN937X_VALUE;
+
+ return proto;
+}
+
+static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_filtering)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_filtering(dev, port, flag, extack);
+}
+
+static int ksz_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_add(dev, port, vlan, extack);
+}
+
+static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_del(dev, port, vlan);
+}
+
+static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mirror_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack);
+}
+
+static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->mirror_del)
+ dev->dev_ops->mirror_del(dev, port, mirror);
+}
+
+static int ksz_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->change_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->change_mtu(dev, port, mtu);
+}
+
+static int ksz_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->max_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->max_mtu(dev, port);
+}
+
+static void ksz_set_xmii(struct ksz_device *dev, int port,
+ phy_interface_t interface)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ struct ksz_port *p = &dev->ports[port];
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~(P_MII_SEL_M | P_RGMII_ID_IG_ENABLE |
+ P_RGMII_ID_EG_ENABLE);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ data8 |= bitval[P_MII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= bitval[P_RMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= bitval[P_GMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ data8 |= bitval[P_RGMII_SEL];
+ /* On KSZ9893, disable RGMII in-band status support */
+ if (dev->features & IS_9893)
+ data8 &= ~P_MII_MAC_MODE;
+ break;
+ default:
+ dev_err(dev->dev, "Unsupported interface '%s' for port %d\n",
+ phy_modes(interface), port);
+ return;
+ }
+
+ if (p->rgmii_tx_val)
+ data8 |= P_RGMII_ID_EG_ENABLE;
+
+ if (p->rgmii_rx_val)
+ data8 |= P_RGMII_ID_IG_ENABLE;
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ phy_interface_t interface;
+ u8 data8;
+ u8 val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_MII_SEL_M, data8);
+
+ if (val == bitval[P_MII_SEL]) {
+ if (gbit)
+ interface = PHY_INTERFACE_MODE_GMII;
+ else
+ interface = PHY_INTERFACE_MODE_MII;
+ } else if (val == bitval[P_RMII_SEL]) {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ } else {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_TXID;
+ if (data8 & P_RGMII_ID_IG_ENABLE) {
+ interface = PHY_INTERFACE_MODE_RGMII_RXID;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_ID;
+ }
+ }
+
+ return interface;
+}
+
+static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (ksz_is_ksz88x3(dev))
+ return;
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(dev->dev, "In-band AN not supported!\n");
+ return;
+ }
+
+ ksz_set_xmii(dev, port, state->interface);
+
+ if (dev->dev_ops->phylink_mac_config)
+ dev->dev_ops->phylink_mac_config(dev, port, mode, state);
+
+ if (dev->dev_ops->setup_rgmii_delay)
+ dev->dev_ops->setup_rgmii_delay(dev, port);
+}
+
+bool ksz_get_gbit(struct ksz_device *dev, int port)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ bool gbit = false;
+ u8 data8;
+ bool val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_GMII_1GBIT_M, data8);
+
+ if (val == bitval[P_GMII_1GBIT])
+ gbit = true;
+
+ return gbit;
+}
+
+static void ksz_set_gbit(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~P_GMII_1GBIT_M;
+
+ if (gbit)
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_1GBIT]);
+ else
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_NOT_1GBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+static void ksz_set_100_10mbit(struct ksz_device *dev, int port, int speed)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_0], &data8);
+
+ data8 &= ~P_MII_100MBIT_M;
+
+ if (speed == SPEED_100)
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_100MBIT]);
+ else
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_10MBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_0], data8);
+}
+
+static void ksz_port_set_xmii_speed(struct ksz_device *dev, int port, int speed)
+{
+ if (speed == SPEED_1000)
+ ksz_set_gbit(dev, port, true);
+ else
+ ksz_set_gbit(dev, port, false);
+
+ if (speed == SPEED_100 || speed == SPEED_10)
+ ksz_set_100_10mbit(dev, port, speed);
+}
+
+static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ u8 mask;
+ u8 val;
+
+ mask = P_MII_DUPLEX_M | masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL];
+
+ if (duplex == DUPLEX_FULL)
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_FULL_DUPLEX]);
+ else
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_HALF_DUPLEX]);
+
+ if (tx_pause)
+ val |= masks[P_MII_TX_FLOW_CTRL];
+
+ if (rx_pause)
+ val |= masks[P_MII_RX_FLOW_CTRL];
+
+ ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
+}
+
+static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+
+ p = &dev->ports[port];
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ p->phydev.speed = speed;
+
+ ksz_port_set_xmii_speed(dev, port, speed);
+
+ ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
+
+ if (dev->dev_ops->phylink_mac_link_up)
+ dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
+ phydev, speed, duplex,
+ tx_pause, rx_pause);
+}
+
+static int ksz_switch_detect(struct ksz_device *dev)
+{
+ u8 id1, id2;
+ u16 id16;
+ u32 id32;
+ int ret;
+
+ /* read chip id */
+ ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
+ if (ret)
+ return ret;
+
+ id1 = FIELD_GET(SW_FAMILY_ID_M, id16);
+ id2 = FIELD_GET(SW_CHIP_ID_M, id16);
+
+ switch (id1) {
+ case KSZ87_FAMILY_ID:
+ if (id2 == KSZ87_CHIP_ID_95) {
+ u8 val;
+
+ dev->chip_id = KSZ8795_CHIP_ID;
+
+ ksz_read8(dev, KSZ8_PORT_STATUS_0, &val);
+ if (val & KSZ8_PORT_FIBER_MODE)
+ dev->chip_id = KSZ8765_CHIP_ID;
+ } else if (id2 == KSZ87_CHIP_ID_94) {
+ dev->chip_id = KSZ8794_CHIP_ID;
+ } else {
+ return -ENODEV;
+ }
+ break;
+ case KSZ88_FAMILY_ID:
+ if (id2 == KSZ88_CHIP_ID_63)
+ dev->chip_id = KSZ8830_CHIP_ID;
+ else
+ return -ENODEV;
+ break;
+ default:
+ ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
+ if (ret)
+ return ret;
+
+ dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32);
+ id32 &= ~0xFF;
+
+ switch (id32) {
+ case KSZ9477_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case LAN9370_CHIP_ID:
+ case LAN9371_CHIP_ID:
+ case LAN9372_CHIP_ID:
+ case LAN9373_CHIP_ID:
+ case LAN9374_CHIP_ID:
+ dev->chip_id = id32;
+ break;
+ default:
+ dev_err(dev->dev,
+ "unsupported switch detected %x)\n", id32);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static const struct dsa_switch_ops ksz_switch_ops = {
+ .get_tag_protocol = ksz_get_tag_protocol,
+ .get_phy_flags = ksz_get_phy_flags,
+ .setup = ksz_setup,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+ .phylink_get_caps = ksz_phylink_get_caps,
+ .phylink_mac_config = ksz_phylink_mac_config,
+ .phylink_mac_link_up = ksz_phylink_mac_link_up,
+ .phylink_mac_link_down = ksz_mac_link_down,
+ .port_enable = ksz_enable_port,
+ .get_strings = ksz_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz_port_stp_state_set,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz_port_vlan_filtering,
+ .port_vlan_add = ksz_port_vlan_add,
+ .port_vlan_del = ksz_port_vlan_del,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_fdb_add = ksz_port_fdb_add,
+ .port_fdb_del = ksz_port_fdb_del,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+ .port_mirror_add = ksz_port_mirror_add,
+ .port_mirror_del = ksz_port_mirror_del,
+ .get_stats64 = ksz_get_stats64,
+ .get_pause_stats = ksz_get_pause_stats,
+ .port_change_mtu = ksz_change_mtu,
+ .port_max_mtu = ksz_max_mtu,
+};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
@@ -941,6 +1747,7 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
ds->dev = base;
ds->num_ports = DSA_MAX_PORTS;
+ ds->ops = &ksz_switch_ops;
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
@@ -956,8 +1763,44 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
}
EXPORT_SYMBOL(ksz_switch_alloc);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops)
+static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num,
+ struct device_node *port_dn)
+{
+ phy_interface_t phy_mode = dev->ports[port_num].interface;
+ int rx_delay = -1, tx_delay = -1;
+
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return;
+
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ if (rx_delay == -1 && tx_delay == -1) {
+ dev_warn(dev->dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port_num);
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ dev->ports[port_num].rgmii_rx_val = rx_delay;
+ dev->ports[port_num].rgmii_tx_val = tx_delay;
+}
+
+int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
struct device_node *port, *ports;
@@ -986,10 +1829,9 @@ int ksz_switch_register(struct ksz_device *dev,
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
- dev->dev_ops = ops;
-
- if (dev->dev_ops->detect(dev))
- return -EINVAL;
+ ret = ksz_switch_detect(dev);
+ if (ret)
+ return ret;
info = ksz_lookup_info(dev->chip_id);
if (!info)
@@ -998,10 +1840,15 @@ int ksz_switch_register(struct ksz_device *dev,
/* Update the compatible info with the probed one */
dev->info = info;
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->info->dev_name, dev->chip_rev);
+
ret = ksz_check_device_id(dev);
if (ret)
return ret;
+ dev->dev_ops = dev->info->ops;
+
ret = dev->dev_ops->init(dev);
if (ret)
return ret;
@@ -1038,18 +1885,23 @@ int ksz_switch_register(struct ksz_device *dev,
ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports");
- if (ports)
+ if (ports) {
for_each_available_child_of_node(ports, port) {
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num))) {
of_node_put(port);
+ of_node_put(ports);
return -EINVAL;
}
of_get_phy_mode(port,
&dev->ports[port_num].interface);
+
+ ksz_parse_rgmii_delay(dev, port_num, port);
}
+ of_node_put(ports);
+ }
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
@@ -1072,7 +1924,7 @@ int ksz_switch_register(struct ksz_device *dev,
/* Start the MIB timer. */
schedule_delayed_work(&dev->mib_read, 0);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ksz_switch_register);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 8500eaedad67..764ada3a0f42 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -25,6 +25,7 @@ struct ksz_port_mib {
u8 cnt_ptr;
u64 *counters;
struct rtnl_link_stats64 stats64;
+ struct ethtool_pause_stats pause_stats;
struct spinlock stats64_lock;
};
@@ -41,11 +42,21 @@ struct ksz_chip_data {
int num_statics;
int cpu_ports;
int port_cnt;
+ const struct ksz_dev_ops *ops;
bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
const struct ksz_mib_names *mib_names;
int mib_cnt;
u8 reg_mib_cnt;
+ const u16 *regs;
+ const u32 *masks;
+ const u8 *shifts;
+ const u8 *xmii_ctrl0;
+ const u8 *xmii_ctrl1;
+ int stp_ctrl_reg;
+ int broadcast_ctrl_reg;
+ int multicast_ctrl_reg;
+ int start_ctrl_reg;
bool supports_mii[KSZ_MAX_NUM_PORTS];
bool supports_rmii[KSZ_MAX_NUM_PORTS];
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
@@ -68,6 +79,8 @@ struct ksz_port {
struct ksz_port_mib mib;
phy_interface_t interface;
u16 max_frame;
+ u32 rgmii_tx_val;
+ u32 rgmii_rx_val;
};
struct ksz_device {
@@ -90,6 +103,7 @@ struct ksz_device {
/* chip specific data */
u32 chip_id;
+ u8 chip_rev;
int cpu_port; /* port connected to CPU */
int phy_port_cnt;
phy_interface_t compat_interface;
@@ -140,6 +154,84 @@ enum ksz_chip_id {
LAN9374_CHIP_ID = 0x00937400,
};
+enum ksz_regs {
+ REG_IND_CTRL_0,
+ REG_IND_DATA_8,
+ REG_IND_DATA_CHECK,
+ REG_IND_DATA_HI,
+ REG_IND_DATA_LO,
+ REG_IND_MIB_CHECK,
+ REG_IND_BYTE,
+ P_FORCE_CTRL,
+ P_LINK_STATUS,
+ P_LOCAL_CTRL,
+ P_NEG_RESTART_CTRL,
+ P_REMOTE_STATUS,
+ P_SPEED_STATUS,
+ S_TAIL_TAG_CTRL,
+ P_STP_CTRL,
+ S_START_CTRL,
+ S_BROADCAST_CTRL,
+ S_MULTICAST_CTRL,
+ P_XMII_CTRL_0,
+ P_XMII_CTRL_1,
+};
+
+enum ksz_masks {
+ PORT_802_1P_REMAPPING,
+ SW_TAIL_TAG_ENABLE,
+ MIB_COUNTER_OVERFLOW,
+ MIB_COUNTER_VALID,
+ VLAN_TABLE_FID,
+ VLAN_TABLE_MEMBERSHIP,
+ VLAN_TABLE_VALID,
+ STATIC_MAC_TABLE_VALID,
+ STATIC_MAC_TABLE_USE_FID,
+ STATIC_MAC_TABLE_FID,
+ STATIC_MAC_TABLE_OVERRIDE,
+ STATIC_MAC_TABLE_FWD_PORTS,
+ DYNAMIC_MAC_TABLE_ENTRIES_H,
+ DYNAMIC_MAC_TABLE_MAC_EMPTY,
+ DYNAMIC_MAC_TABLE_NOT_READY,
+ DYNAMIC_MAC_TABLE_ENTRIES,
+ DYNAMIC_MAC_TABLE_FID,
+ DYNAMIC_MAC_TABLE_SRC_PORT,
+ DYNAMIC_MAC_TABLE_TIMESTAMP,
+ ALU_STAT_WRITE,
+ ALU_STAT_READ,
+ P_MII_TX_FLOW_CTRL,
+ P_MII_RX_FLOW_CTRL,
+};
+
+enum ksz_shifts {
+ VLAN_TABLE_MEMBERSHIP_S,
+ VLAN_TABLE,
+ STATIC_MAC_FWD_PORTS,
+ STATIC_MAC_FID,
+ DYNAMIC_MAC_ENTRIES_H,
+ DYNAMIC_MAC_ENTRIES,
+ DYNAMIC_MAC_FID,
+ DYNAMIC_MAC_TIMESTAMP,
+ DYNAMIC_MAC_SRC_PORT,
+ ALU_STAT_INDEX,
+};
+
+enum ksz_xmii_ctrl0 {
+ P_MII_100MBIT,
+ P_MII_10MBIT,
+ P_MII_FULL_DUPLEX,
+ P_MII_HALF_DUPLEX,
+};
+
+enum ksz_xmii_ctrl1 {
+ P_RGMII_SEL,
+ P_RMII_SEL,
+ P_GMII_SEL,
+ P_MII_SEL,
+ P_GMII_1GBIT,
+ P_GMII_NOT_1GBIT,
+};
+
struct alu_struct {
/* entry 1 */
u8 is_static:1;
@@ -160,6 +252,7 @@ struct alu_struct {
};
struct ksz_dev_ops {
+ int (*setup)(struct dsa_switch *ds);
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
@@ -167,71 +260,68 @@ struct ksz_dev_ops {
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
- int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp,
- u16 *entries);
- int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
- void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
void (*r_mib_stat64)(struct ksz_device *dev, int port);
+ int (*vlan_filtering)(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+ int (*vlan_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+ int (*vlan_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+ int (*mirror_add)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+ void (*mirror_del)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+ int (*fdb_add)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_del)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_dump)(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+ int (*mdb_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ int (*mdb_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ void (*get_caps)(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+ int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
+ int (*max_mtu)(struct ksz_device *dev, int port);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
- int (*shutdown)(struct ksz_device *dev);
- int (*detect)(struct ksz_device *dev);
+ void (*phylink_mac_config)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause);
+ void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
+ void (*config_cpu_port)(struct dsa_switch *ds);
+ int (*enable_stp_addr)(struct ksz_device *dev);
+ int (*reset)(struct ksz_device *dev);
int (*init)(struct ksz_device *dev);
void (*exit)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops);
+int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
-int ksz8_switch_register(struct ksz_device *dev);
-int ksz9477_switch_register(struct ksz_device *dev);
-
-void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
void ksz_r_mib_stats64(struct ksz_device *dev, int port);
-void ksz_get_stats64(struct dsa_switch *ds, int port,
- struct rtnl_link_stats64 *s);
-void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+bool ksz_get_gbit(struct ksz_device *dev, int port);
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit);
extern const struct ksz_chip_data ksz_switch_chips[];
-/* Common DSA access functions */
-
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload,
- struct netlink_ext_ack *extack);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge);
-void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state, int reg);
-void ksz_port_fast_age(struct dsa_switch *ds, int port);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
-void ksz_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf);
-
/* Common register access functions */
static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
@@ -336,6 +426,14 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
+static inline void ksz_prmw8(struct ksz_device *dev, int port, int offset,
+ u8 mask, u8 val)
+{
+ regmap_update_bits(dev->regmap[0],
+ dev->dev_ops->get_port_addr(port, offset),
+ mask, val);
+}
+
static inline void ksz_regmap_lock(void *__mtx)
{
struct mutex *mtx = __mtx;
@@ -348,11 +446,70 @@ static inline void ksz_regmap_unlock(void *__mtx)
mutex_unlock(mtx);
}
+static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8830_CHIP_ID;
+}
+
+static inline int is_lan937x(struct ksz_device *dev)
+{
+ return dev->chip_id == LAN9370_CHIP_ID ||
+ dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID ||
+ dev->chip_id == LAN9373_CHIP_ID ||
+ dev->chip_id == LAN9374_CHIP_ID;
+}
+
/* STP State Defines */
#define PORT_TX_ENABLE BIT(2)
#define PORT_RX_ENABLE BIT(1)
#define PORT_LEARN_DISABLE BIT(0)
+/* Switch ID Defines */
+#define REG_CHIP_ID0 0x00
+
+#define SW_FAMILY_ID_M GENMASK(15, 8)
+#define KSZ87_FAMILY_ID 0x87
+#define KSZ88_FAMILY_ID 0x88
+
+#define KSZ8_PORT_STATUS_0 0x08
+#define KSZ8_PORT_FIBER_MODE BIT(7)
+
+#define SW_CHIP_ID_M GENMASK(7, 4)
+#define KSZ87_CHIP_ID_94 0x6
+#define KSZ87_CHIP_ID_95 0x9
+#define KSZ88_CHIP_ID_63 0x3
+
+#define SW_REV_ID_M GENMASK(7, 4)
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROT_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+#define BROADCAST_STORM_RATE_HI 0x07
+#define BROADCAST_STORM_RATE_LO 0xFF
+#define BROADCAST_STORM_RATE 0x07FF
+
+#define MULTICAST_STORM_DISABLE BIT(6)
+
+#define SW_START 0x01
+
+/* Used with variable features to indicate capabilities. */
+#define GBIT_SUPPORT BIT(0)
+#define IS_9893 BIT(2)
+
+/* xMII configuration */
+#define P_MII_DUPLEX_M BIT(6)
+#define P_MII_100MBIT_M BIT(4)
+
+#define P_GMII_1GBIT_M BIT(6)
+#define P_RGMII_ID_IG_ENABLE BIT(4)
+#define P_RGMII_ID_EG_ENABLE BIT(3)
+#define P_MII_MAC_MODE BIT(2)
+#define P_MII_SEL_M 0x3
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 961a74c359a8..05bd089795f8 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Microchip KSZ8795 series register access through SPI
+ * Microchip ksz series register access through SPI
*
* Copyright (C) 2017 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
@@ -14,7 +14,6 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include "ksz8.h"
#include "ksz_common.h"
#define KSZ8795_SPI_ADDR_SHIFT 12
@@ -25,29 +24,29 @@
#define KSZ8863_SPI_ADDR_ALIGN 8
#define KSZ8863_SPI_TURNAROUND_SHIFT 0
+#define KSZ9477_SPI_ADDR_SHIFT 24
+#define KSZ9477_SPI_ADDR_ALIGN 3
+#define KSZ9477_SPI_TURNAROUND_SHIFT 5
+
KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
-static int ksz8795_spi_probe(struct spi_device *spi)
+KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
+ KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
+
+static int ksz_spi_probe(struct spi_device *spi)
{
const struct regmap_config *regmap_config;
const struct ksz_chip_data *chip;
struct device *ddev = &spi->dev;
struct regmap_config rc;
struct ksz_device *dev;
- struct ksz8 *ksz8;
int i, ret = 0;
- ksz8 = devm_kzalloc(&spi->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = spi;
-
- dev = ksz_switch_alloc(&spi->dev, ksz8);
+ dev = ksz_switch_alloc(&spi->dev, spi);
if (!dev)
return -ENOMEM;
@@ -57,8 +56,12 @@ static int ksz8795_spi_probe(struct spi_device *spi)
if (chip->chip_id == KSZ8830_CHIP_ID)
regmap_config = ksz8863_regmap_config;
- else
+ else if (chip->chip_id == KSZ8795_CHIP_ID ||
+ chip->chip_id == KSZ8794_CHIP_ID ||
+ chip->chip_id == KSZ8765_CHIP_ID)
regmap_config = ksz8795_regmap_config;
+ else
+ regmap_config = ksz9477_regmap_config;
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
rc = regmap_config[i];
@@ -82,7 +85,7 @@ static int ksz8795_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = ksz8_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -93,7 +96,7 @@ static int ksz8795_spi_probe(struct spi_device *spi)
return 0;
}
-static void ksz8795_spi_remove(struct spi_device *spi)
+static void ksz_spi_remove(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
@@ -103,22 +106,22 @@ static void ksz8795_spi_remove(struct spi_device *spi)
spi_set_drvdata(spi, NULL);
}
-static void ksz8795_spi_shutdown(struct spi_device *spi)
+static void ksz_spi_shutdown(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
if (!dev)
return;
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
dsa_switch_shutdown(dev->ds);
spi_set_drvdata(spi, NULL);
}
-static const struct of_device_id ksz8795_dt_ids[] = {
+static const struct of_device_id ksz_dt_ids[] = {
{
.compatible = "microchip,ksz8765",
.data = &ksz_switch_chips[KSZ8765]
@@ -139,34 +142,96 @@ static const struct of_device_id ksz8795_dt_ids[] = {
.compatible = "microchip,ksz8873",
.data = &ksz_switch_chips[KSZ8830]
},
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
+ {
+ .compatible = "microchip,lan9370",
+ .data = &ksz_switch_chips[LAN9370]
+ },
+ {
+ .compatible = "microchip,lan9371",
+ .data = &ksz_switch_chips[LAN9371]
+ },
+ {
+ .compatible = "microchip,lan9372",
+ .data = &ksz_switch_chips[LAN9372]
+ },
+ {
+ .compatible = "microchip,lan9373",
+ .data = &ksz_switch_chips[LAN9373]
+ },
+ {
+ .compatible = "microchip,lan9374",
+ .data = &ksz_switch_chips[LAN9374]
+ },
{},
};
-MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
+MODULE_DEVICE_TABLE(of, ksz_dt_ids);
-static const struct spi_device_id ksz8795_spi_ids[] = {
+static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz8765" },
{ "ksz8794" },
{ "ksz8795" },
{ "ksz8863" },
{ "ksz8873" },
+ { "ksz9477" },
+ { "ksz9897" },
+ { "ksz9893" },
+ { "ksz9563" },
+ { "ksz8563" },
+ { "ksz9567" },
+ { "lan9370" },
+ { "lan9371" },
+ { "lan9372" },
+ { "lan9373" },
+ { "lan9374" },
{ },
};
-MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids);
+MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
-static struct spi_driver ksz8795_spi_driver = {
+static struct spi_driver ksz_spi_driver = {
.driver = {
- .name = "ksz8795-switch",
+ .name = "ksz-switch",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz8795_dt_ids),
+ .of_match_table = ksz_dt_ids,
},
- .id_table = ksz8795_spi_ids,
- .probe = ksz8795_spi_probe,
- .remove = ksz8795_spi_remove,
- .shutdown = ksz8795_spi_shutdown,
+ .id_table = ksz_spi_ids,
+ .probe = ksz_spi_probe,
+ .remove = ksz_spi_remove,
+ .shutdown = ksz_spi_shutdown,
};
-module_spi_driver(ksz8795_spi_driver);
+module_spi_driver(ksz_spi_driver);
+MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9897");
+MODULE_ALIAS("spi:ksz9893");
+MODULE_ALIAS("spi:ksz9563");
+MODULE_ALIAS("spi:ksz8563");
+MODULE_ALIAS("spi:ksz9567");
+MODULE_ALIAS("spi:lan937x");
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch SPI Driver");
+MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
new file mode 100644
index 000000000000..4e0b1dccec27
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip lan937x dev ops headers
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+
+#ifndef __LAN937X_CFG_H
+#define __LAN937X_CFG_H
+
+int lan937x_reset_switch(struct ksz_device *dev);
+int lan937x_setup(struct dsa_switch *ds);
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void lan937x_config_cpu_port(struct dsa_switch *ds);
+int lan937x_switch_init(struct ksz_device *dev);
+void lan937x_switch_exit(struct ksz_device *dev);
+void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
+#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
new file mode 100644
index 000000000000..daedd2bf20c1
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip LAN937X switch driver main logic
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/math.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "lan937x_reg.h"
+#include "ksz_common.h"
+#include "lan937x.h"
+
+static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
+
+static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset,
+ u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static int lan937x_enable_spi_indirect_access(struct ksz_device *dev)
+{
+ u16 data16;
+ int ret;
+
+ /* Enable Phy access through SPI */
+ ret = lan937x_cfg(dev, REG_GLOBAL_CTRL_0, SW_PHY_REG_BLOCK, false);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read16(dev, REG_VPHY_SPECIAL_CTRL__2, &data16);
+ if (ret < 0)
+ return ret;
+
+ /* Allow SPI access */
+ data16 |= VPHY_SPI_INDIRECT_ENABLE;
+
+ return ksz_write16(dev, REG_VPHY_SPECIAL_CTRL__2, data16);
+}
+
+static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
+{
+ u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
+ u16 temp;
+
+ /* get register address based on the logical port */
+ temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
+
+ return ksz_write16(dev, REG_VPHY_IND_ADDR__2, temp);
+}
+
+static int lan937x_internal_phy_write(struct ksz_device *dev, int addr, int reg,
+ u16 val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port */
+ if (!dev->info->internal_phy[addr])
+ return -EOPNOTSUPP;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write the data to be written to the VPHY reg */
+ ret = ksz_write16(dev, REG_VPHY_IND_DATA__2, val);
+ if (ret < 0)
+ return ret;
+
+ /* Write the Write En and Busy bit */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2,
+ (VPHY_IND_WRITE | VPHY_IND_BUSY));
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to write phy register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
+ u16 *val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port, return 0xffff for non-existent phy */
+ if (!dev->info->internal_phy[addr])
+ return 0xffff;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write Read and Busy bit to start the transaction */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2, VPHY_IND_BUSY);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to read phy register\n");
+ return ret;
+ }
+
+ /* Read the VPHY register which has the PHY data */
+ return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
+}
+
+void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+{
+ lan937x_internal_phy_read(dev, addr, reg, data);
+}
+
+void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+{
+ lan937x_internal_phy_write(dev, addr, reg, val);
+}
+
+static int lan937x_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+ u16 val;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ret = lan937x_internal_phy_read(dev, addr, regnum, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static int lan937x_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return lan937x_internal_phy_write(dev, addr, regnum, val);
+}
+
+static int lan937x_mdio_register(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret;
+
+ mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
+ if (!mdio_np) {
+ dev_err(ds->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus) {
+ of_node_put(mdio_np);
+ return -ENOMEM;
+ }
+
+ bus->priv = dev;
+ bus->read = lan937x_sw_mdio_read;
+ bus->write = lan937x_sw_mdio_write;
+ bus->name = "lan937x slave smi";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ ds->slave_mii_bus = bus;
+
+ ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(ds->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ }
+
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+int lan937x_reset_switch(struct ksz_device *dev)
+{
+ u32 data32;
+ int ret;
+
+ /* reset switch */
+ ret = lan937x_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Auto Aging */
+ ret = lan937x_cfg(dev, REG_SW_LUE_CTRL_1, SW_LINK_AUTO_AGING, true);
+ if (ret < 0)
+ return ret;
+
+ /* disable interrupts */
+ ret = ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
+ if (ret < 0)
+ return ret;
+
+ return ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+}
+
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ struct dsa_switch *ds = dev->ds;
+ u8 member;
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_TAIL_TAG_ENABLE, true);
+
+ /* disable frame check length field */
+ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH,
+ false);
+
+ /* set back pressure for half duplex */
+ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
+ true);
+
+ /* enable 802.1p priority */
+ lan937x_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ if (!dev->info->internal_phy[port])
+ lan937x_port_cfg(dev, port, regs[P_XMII_CTRL_0],
+ masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL],
+ true);
+
+ if (cpu_port)
+ member = dsa_user_ports(ds);
+ else
+ member = BIT(dsa_upstream_port(ds, port));
+
+ dev->dev_ops->cfg_port_member(dev, port, member);
+}
+
+void lan937x_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dev->info->cpu_ports & (1 << dp->index)) {
+ dev->cpu_port = dp->index;
+
+ /* enable cpu port */
+ lan937x_port_setup(dev, dp->index, true);
+ }
+ }
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ ksz_port_stp_state_set(ds, dp->index, BR_STATE_DISABLED);
+ }
+}
+
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
+{
+ struct dsa_switch *ds = dev->ds;
+ int ret;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port))
+ new_mtu += LAN937X_TAG_LEN;
+
+ if (new_mtu >= FR_MIN_SIZE)
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, true);
+ else
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, false);
+ if (ret < 0) {
+ dev_err(ds->dev, "failed to enable jumbo\n");
+ return ret;
+ }
+
+ /* Write the frame size in PORT_MAX_FR_SIZE register */
+ ksz_pwrite16(dev, port, PORT_MAX_FR_SIZE, new_mtu);
+
+ return 0;
+}
+
+static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
+ u16 reg, u8 val)
+{
+ u16 data16;
+
+ ksz_pread16(dev, port, reg, &data16);
+
+ /* Update tune Adjust */
+ data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
+ ksz_pwrite16(dev, port, reg, data16);
+
+ /* write DLL reset to take effect */
+ data16 |= PORT_DLL_RESET;
+ ksz_pwrite16(dev, port, reg, data16);
+}
+
+static void lan937x_set_rgmii_tx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ /* Apply different codes based on the ports as per characterization
+ * results
+ */
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_TX_DELAY_2NS :
+ RGMII_2_TX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_5, val);
+}
+
+static void lan937x_set_rgmii_rx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_RX_DELAY_2NS :
+ RGMII_2_RX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_4, val);
+}
+
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_100FD;
+
+ if (dev->info->supports_rgmii[port]) {
+ /* MII/RMII/RGMII ports */
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10 | MAC_1000FD;
+ }
+}
+
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+
+ if (p->rgmii_tx_val) {
+ lan937x_set_rgmii_tx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii tx delay for the port %d\n",
+ port);
+ }
+
+ if (p->rgmii_rx_val) {
+ lan937x_set_rgmii_rx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii rx delay for the port %d\n",
+ port);
+ }
+}
+
+int lan937x_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ /* enable Indirect Access from SPI to the VPHY registers */
+ ret = lan937x_enable_spi_indirect_access(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable spi indirect access");
+ return ret;
+ }
+
+ ret = lan937x_mdio_register(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to register the mdio");
+ return ret;
+ }
+
+ /* The VLAN aware is a global setting. Mixed vlan
+ * filterings are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Enable aggressive back off for half duplex & UNH mode */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_0,
+ (SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF),
+ true);
+
+ /* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
+ * packets when 16 or more collisions occur
+ */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+
+ /* enable global MIB counter freeze function */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+
+ /* disable CLK125 & CLK25, 1: disable, 0: enable */
+ lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ (SW_CLK125_ENB | SW_CLK25_ENB), true);
+
+ return 0;
+}
+
+int lan937x_switch_init(struct ksz_device *dev)
+{
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ return 0;
+}
+
+void lan937x_switch_exit(struct ksz_device *dev)
+{
+ lan937x_reset_switch(dev);
+}
+
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("Microchip LAN937x Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
new file mode 100644
index 000000000000..ba4adaddb3ec
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip LAN937X switch register definitions
+ * Copyright (C) 2019-2021 Microchip Technology Inc.
+ */
+#ifndef __LAN937X_REG_H
+#define __LAN937X_REG_H
+
+#define PORT_CTRL_ADDR(port, addr) ((addr) | (((port) + 1) << 12))
+
+/* 0 - Operation */
+#define REG_GLOBAL_CTRL_0 0x0007
+
+#define SW_PHY_REG_BLOCK BIT(7)
+#define SW_FAST_MODE BIT(3)
+#define SW_FAST_MODE_OVERRIDE BIT(2)
+
+#define REG_SW_INT_STATUS__4 0x0010
+#define REG_SW_INT_MASK__4 0x0014
+
+#define LUE_INT BIT(31)
+#define TRIG_TS_INT BIT(30)
+#define APB_TIMEOUT_INT BIT(29)
+#define OVER_TEMP_INT BIT(28)
+#define HSR_INT BIT(27)
+#define PIO_INT BIT(26)
+#define POR_READY_INT BIT(25)
+
+#define SWITCH_INT_MASK \
+ (LUE_INT | TRIG_TS_INT | APB_TIMEOUT_INT | OVER_TEMP_INT | HSR_INT | \
+ PIO_INT | POR_READY_INT)
+
+#define REG_SW_PORT_INT_STATUS__4 0x0018
+#define REG_SW_PORT_INT_MASK__4 0x001C
+
+/* 1 - Global */
+#define REG_SW_GLOBAL_OUTPUT_CTRL__1 0x0103
+#define SW_CLK125_ENB BIT(1)
+#define SW_CLK25_ENB BIT(0)
+
+/* 3 - Operation Control */
+#define REG_SW_OPERATION 0x0300
+
+#define SW_DOUBLE_TAG BIT(7)
+#define SW_OVER_TEMP_ENABLE BIT(2)
+#define SW_RESET BIT(1)
+
+#define REG_SW_LUE_CTRL_0 0x0310
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_DROP_INVALID_VID BIT(6)
+#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_S 3
+#define SW_RESV_MCAST_ENABLE BIT(2)
+
+#define REG_SW_LUE_CTRL_1 0x0311
+
+#define UNICAST_LEARN_DISABLE BIT(7)
+#define SW_FLUSH_STP_TABLE BIT(5)
+#define SW_FLUSH_MSTP_TABLE BIT(4)
+#define SW_SRC_ADDR_FILTER BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_MAC_CTRL_0 0x0330
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_PAUSE_UNH_MODE BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_MAC_CTRL_1 0x0331
+#define SW_SHORT_IFG BIT(7)
+#define MULTICAST_STORM_DISABLE BIT(6)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define SW_PASS_SHORT_FRAME BIT(0)
+
+#define REG_SW_MAC_CTRL_6 0x0336
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+
+/* 4 - LUE */
+#define REG_SW_ALU_STAT_CTRL__4 0x041C
+
+#define REG_SW_ALU_VAL_B 0x0424
+#define ALU_V_OVERRIDE BIT(31)
+#define ALU_V_USE_FID BIT(30)
+#define ALU_V_PORT_MAP 0xFF
+
+/* 7 - VPhy */
+#define REG_VPHY_IND_ADDR__2 0x075C
+#define REG_VPHY_IND_DATA__2 0x0760
+
+#define REG_VPHY_IND_CTRL__2 0x0768
+
+#define VPHY_IND_WRITE BIT(1)
+#define VPHY_IND_BUSY BIT(0)
+
+#define REG_VPHY_SPECIAL_CTRL__2 0x077C
+#define VPHY_SMI_INDIRECT_ENABLE BIT(15)
+#define VPHY_SW_LOOPBACK BIT(14)
+#define VPHY_MDIO_INTERNAL_ENABLE BIT(13)
+#define VPHY_SPI_INDIRECT_ENABLE BIT(12)
+#define VPHY_PORT_MODE_M 0x3
+#define VPHY_PORT_MODE_S 8
+#define VPHY_MODE_RGMII 0
+#define VPHY_MODE_MII_PHY 1
+#define VPHY_MODE_SGMII 2
+#define VPHY_MODE_RMII_PHY 3
+#define VPHY_SW_COLLISION_TEST BIT(7)
+#define VPHY_SPEED_DUPLEX_STAT_M 0x7
+#define VPHY_SPEED_DUPLEX_STAT_S 2
+#define VPHY_SPEED_1000 BIT(4)
+#define VPHY_SPEED_100 BIT(3)
+#define VPHY_FULL_DUPLEX BIT(2)
+
+/* Port Registers */
+
+/* 0 - Operation */
+#define REG_PORT_CTRL_0 0x0020
+
+#define PORT_MAC_LOOPBACK BIT(7)
+#define PORT_MAC_REMOTE_LOOPBACK BIT(6)
+#define PORT_K2L_INSERT_ENABLE BIT(5)
+#define PORT_K2L_DEBUG_ENABLE BIT(4)
+#define PORT_TAIL_TAG_ENABLE BIT(2)
+#define PORT_QUEUE_SPLIT_ENABLE 0x3
+
+/* 1 - Phy */
+#define REG_PORT_T1_PHY_CTRL_BASE 0x0100
+
+/* 3 - xMII */
+#define PORT_SGMII_SEL BIT(7)
+#define PORT_GRXC_ENABLE BIT(0)
+
+#define PORT_MII_SEL_EDGE BIT(5)
+
+#define REG_PORT_XMII_CTRL_4 0x0304
+#define REG_PORT_XMII_CTRL_5 0x0306
+
+#define PORT_DLL_RESET BIT(15)
+#define PORT_TUNE_ADJ GENMASK(13, 7)
+
+/* 4 - MAC */
+#define REG_PORT_MAC_CTRL_0 0x0400
+#define PORT_CHECK_LENGTH BIT(2)
+#define PORT_BROADCAST_STORM BIT(1)
+#define PORT_JUMBO_PACKET BIT(0)
+
+#define REG_PORT_MAC_CTRL_1 0x0401
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_PASS_ALL BIT(0)
+
+#define PORT_MAX_FR_SIZE 0x404
+#define FR_MIN_SIZE 1522
+
+/* 8 - Classification and Policing */
+#define REG_PORT_MRI_PRIO_CTRL 0x0801
+#define PORT_HIGHEST_PRIO BIT(7)
+#define PORT_OR_PRIO BIT(6)
+#define PORT_MAC_PRIO_ENABLE BIT(4)
+#define PORT_VLAN_PRIO_ENABLE BIT(3)
+#define PORT_802_1P_PRIO_ENABLE BIT(2)
+#define PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+
+/* The port number as per the datasheet */
+#define RGMII_2_PORT_NUM 5
+#define RGMII_1_PORT_NUM 6
+
+#define LAN937X_RGMII_2_PORT (RGMII_2_PORT_NUM - 1)
+#define LAN937X_RGMII_1_PORT (RGMII_1_PORT_NUM - 1)
+
+#define RGMII_1_TX_DELAY_2NS 2
+#define RGMII_2_TX_DELAY_2NS 0
+#define RGMII_1_RX_DELAY_2NS 0x1B
+#define RGMII_2_RX_DELAY_2NS 0x14
+
+#define LAN937X_TAG_LEN 2
+
+#endif
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 2b02d823d497..835807911be0 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1038,6 +1038,7 @@ static int
mt7530_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
@@ -1046,7 +1047,11 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
* restore the port matrix if the port is the member of a certain
* bridge.
*/
- priv->ports[port].pm |= PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ if (dsa_port_is_user(dp)) {
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index));
+ }
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
@@ -1195,7 +1200,8 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
- u32 port_bitmap = BIT(MT7530_CPU_PORT);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ u32 port_bitmap = BIT(cpu_dp->index);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
@@ -1272,9 +1278,12 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
* the CPU port get out of VLAN filtering mode.
*/
if (all_user_ports_removed) {
- mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ mt7530_write(priv, MT7530_PCR_P(cpu_dp->index),
PCR_MATRIX(dsa_user_ports(priv->ds)));
- mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG
+ mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG
| PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
}
@@ -1312,6 +1321,7 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
@@ -1340,8 +1350,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
*/
if (priv->ports[port].enable)
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
- PCR_MATRIX(BIT(MT7530_CPU_PORT)));
- priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ PCR_MATRIX(BIT(cpu_dp->index)));
+ priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index));
/* When a port is removed from the bridge, the port would be set up
* back to the default as is at initial boot which is a VLAN-unaware
@@ -1508,6 +1518,9 @@ static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1515,7 +1528,7 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
* for becoming a VLAN-aware port.
*/
mt7530_port_set_vlan_aware(ds, port);
- mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+ mt7530_port_set_vlan_aware(ds, cpu_dp->index);
} else {
mt7530_port_set_vlan_unaware(ds, port);
}
@@ -1527,11 +1540,11 @@ static void
mt7530_hw_vlan_add(struct mt7530_priv *priv,
struct mt7530_hw_vlan_entry *entry)
{
+ struct dsa_port *dp = dsa_to_port(priv->ds, entry->port);
u8 new_members;
u32 val;
- new_members = entry->old_members | BIT(entry->port) |
- BIT(MT7530_CPU_PORT);
+ new_members = entry->old_members | BIT(entry->port);
/* Validate the entry with independent learning, create egress tag per
* VLAN and joining the port as one of the port members.
@@ -1542,22 +1555,20 @@ mt7530_hw_vlan_add(struct mt7530_priv *priv,
/* Decide whether adding tag or not for those outgoing packets from the
* port inside the VLAN.
- */
- val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG :
- MT7530_VLAN_EGRESS_TAG;
- mt7530_rmw(priv, MT7530_VAWD2,
- ETAG_CTRL_P_MASK(entry->port),
- ETAG_CTRL_P(entry->port, val));
-
- /* CPU port is always taken as a tagged port for serving more than one
+ * CPU port is always taken as a tagged port for serving more than one
* VLANs across and also being applied with egress type stack mode for
* that VLAN tags would be appended after hardware special tag used as
* DSA tag.
*/
+ if (dsa_port_is_cpu(dp))
+ val = MT7530_VLAN_EGRESS_STACK;
+ else if (entry->untagged)
+ val = MT7530_VLAN_EGRESS_UNTAG;
+ else
+ val = MT7530_VLAN_EGRESS_TAG;
mt7530_rmw(priv, MT7530_VAWD2,
- ETAG_CTRL_P_MASK(MT7530_CPU_PORT),
- ETAG_CTRL_P(MT7530_CPU_PORT,
- MT7530_VLAN_EGRESS_STACK));
+ ETAG_CTRL_P_MASK(entry->port),
+ ETAG_CTRL_P(entry->port, val));
}
static void
@@ -1576,11 +1587,7 @@ mt7530_hw_vlan_del(struct mt7530_priv *priv,
return;
}
- /* If certain member apart from CPU port is still alive in the VLAN,
- * the entry would be kept valid. Otherwise, the entry is got to be
- * disabled.
- */
- if (new_members && new_members != BIT(MT7530_CPU_PORT)) {
+ if (new_members) {
val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
VLAN_VALID;
mt7530_write(priv, MT7530_VAWD1, val);
@@ -2098,11 +2105,12 @@ static int
mt7530_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
+ struct device_node *dn = NULL;
struct device_node *phy_node;
struct device_node *mac_np;
struct mt7530_dummy_poll p;
phy_interface_t interface;
- struct device_node *dn;
+ struct dsa_port *cpu_dp;
u32 id, val;
int ret, i;
@@ -2110,7 +2118,19 @@ mt7530_setup(struct dsa_switch *ds)
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
- dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ dn = cpu_dp->master->dev.of_node->parent;
+ /* It doesn't matter which CPU port is found first,
+ * their masters should share the same parent OF node
+ */
+ break;
+ }
+
+ if (!dn) {
+ dev_err(ds->dev, "parent OF node of DSA master not found");
+ return -EINVAL;
+ }
+
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
@@ -2272,6 +2292,7 @@ mt7531_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
struct mt7530_dummy_poll p;
+ struct dsa_port *cpu_dp;
u32 val, id;
int ret, i;
@@ -2344,8 +2365,11 @@ mt7531_setup(struct dsa_switch *ds)
CORE_PLL_GROUP4, val);
/* BPDU to CPU port */
- mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
- BIT(MT7530_CPU_PORT));
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+ BIT(cpu_dp->index));
+ break;
+ }
mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
MT753X_BPDU_CPU_ONLY);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 71e36b69b96d..e509af95c354 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -8,7 +8,6 @@
#define MT7530_NUM_PORTS 7
#define MT7530_NUM_PHYS 5
-#define MT7530_CPU_PORT 6
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a4c6eb9a52d0..83dca9179aa0 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 0b49d243e00b..07e9a4da924c 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -449,9 +449,6 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
goto restore_link;
}
- if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
- mode = chip->info->ops->port_max_speed_mode(port);
-
if (chip->info->ops->port_set_pause) {
err = chip->info->ops->port_set_pause(chip, port, pause);
if (err)
@@ -3280,28 +3277,56 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct device_node *phy_handle = NULL;
struct dsa_switch *ds = chip->ds;
+ phy_interface_t mode;
struct dsa_port *dp;
- int tx_amp;
+ int tx_amp, speed;
int err;
u16 reg;
chip->ports[port].chip = chip;
chip->ports[port].port = port;
+ dp = dsa_to_port(ds, port);
+
/* MAC Forcing register: don't force link, speed, duplex or flow control
* state to any particular values on physical ports, but force the CPU
* port and all DSA ports to their maximum bandwidth and full duplex.
*/
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ struct phylink_config pl_config = {};
+ unsigned long caps;
+
+ mv88e6xxx_get_caps(ds, port, &pl_config);
+
+ caps = pl_config.mac_capabilities;
+
+ if (chip->info->ops->port_max_speed_mode)
+ mode = chip->info->ops->port_max_speed_mode(port);
+ else
+ mode = PHY_INTERFACE_MODE_NA;
+
+ if (caps & MAC_10000FD)
+ speed = SPEED_10000;
+ else if (caps & MAC_5000FD)
+ speed = SPEED_5000;
+ else if (caps & MAC_2500FD)
+ speed = SPEED_2500;
+ else if (caps & MAC_1000)
+ speed = SPEED_1000;
+ else if (caps & MAC_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
- SPEED_MAX, DUPLEX_FULL,
- PAUSE_OFF,
- PHY_INTERFACE_MODE_NA);
- else
+ speed, DUPLEX_FULL,
+ PAUSE_OFF, mode);
+ } else {
err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
SPEED_UNFORCED, DUPLEX_UNFORCED,
PAUSE_ON,
PHY_INTERFACE_MODE_NA);
+ }
if (err)
return err;
@@ -3473,7 +3498,6 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
}
if (chip->info->ops->serdes_set_tx_amplitude) {
- dp = dsa_to_port(ds, port);
if (dp)
phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 5e03cfe50156..e693154cf803 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -488,14 +488,13 @@ struct mv88e6xxx_ops {
int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port,
int pause);
-#define SPEED_MAX INT_MAX
#define SPEED_UNFORCED -2
#define DUPLEX_UNFORCED -2
/* Port's MAC speed (in Mbps) and MAC duplex mode
*
* Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
- * Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
+ * Use SPEED_UNFORCED for normal detection.
*
* Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
* or DUPLEX_UNFORCED for normal duplex detection.
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 795b3128768f..90c55f23b7c9 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -294,28 +294,10 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
return 0;
}
-/* Support 10, 100, 200 Mbps (e.g. 88E6065 family) */
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex)
-{
- if (speed == SPEED_MAX)
- speed = 200;
-
- if (speed > 200)
- return -EOPNOTSUPP;
-
- /* Setting 200 Mbps on port 0 to 3 selects 100 Mbps */
- return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
- duplex);
-}
-
/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed == 200 || speed > 1000)
return -EOPNOTSUPP;
@@ -327,9 +309,6 @@ int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 100;
-
if (speed > 100)
return -EOPNOTSUPP;
@@ -341,9 +320,6 @@ int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 5 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -369,9 +345,6 @@ phy_interface_t mv88e6341_port_max_speed_mode(int port)
int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed > 1000)
return -EOPNOTSUPP;
@@ -386,9 +359,6 @@ int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -414,9 +384,6 @@ phy_interface_t mv88e6390_port_max_speed_mode(int port)
int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 10000;
-
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
@@ -445,9 +412,6 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
u16 reg, ctrl;
int err;
- if (speed == SPEED_MAX)
- speed = (port > 0 && port < 9) ? 1000 : 10000;
-
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index e0a705d82019..cb04243f37c1 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -342,8 +342,6 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex);
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex);
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 220b0b027b55..08db9cf76818 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -6,6 +6,7 @@ config NET_DSA_MSCC_FELIX
depends on NET_VENDOR_FREESCALE
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n
select MSCC_OCELOT_SWITCH_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3e07dc39007a..aadb0bd7c24f 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -610,6 +610,9 @@ static int felix_change_tag_protocol(struct dsa_switch *ds,
old_proto_ops = felix->tag_proto_ops;
+ if (proto_ops == old_proto_ops)
+ return 0;
+
err = proto_ops->setup(ds);
if (err)
goto setup_failed;
@@ -1553,9 +1556,18 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct felix *felix = ocelot_to_felix(ocelot);
ocelot_port_set_maxlen(ocelot, port, new_mtu);
+ mutex_lock(&ocelot->tas_lock);
+
+ if (ocelot_port->taprio && felix->info->tas_guard_bands_update)
+ felix->info->tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
+
return 0;
}
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 9e07eb7ee28d..deb8dde1fc19 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -53,6 +53,7 @@ struct felix_info {
struct phylink_link_state *state);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
+ void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
struct regmap *(*init_regmap)(struct ocelot *ocelot,
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 9c27b9b0128d..1cdce8a98d1d 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -16,6 +16,7 @@
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/pci.h>
+#include <linux/time.h>
#include "felix.h"
#define VSC9959_NUM_PORTS 6
@@ -273,27 +274,98 @@ static const u32 vsc9959_rew_regmap[] = {
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
- REG(SYS_COUNT_TX_128_511, 0x000224),
- REG(SYS_COUNT_TX_512_1023, 0x000228),
- REG(SYS_COUNT_TX_1024_1526, 0x00022c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000230),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -546,100 +618,379 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x80, .name = "tx_octets", },
- { .offset = 0x81, .name = "tx_unicast", },
- { .offset = 0x82, .name = "tx_multicast", },
- { .offset = 0x83, .name = "tx_broadcast", },
- { .offset = 0x84, .name = "tx_collision", },
- { .offset = 0x85, .name = "tx_drops", },
- { .offset = 0x86, .name = "tx_pause", },
- { .offset = 0x87, .name = "tx_frames_below_65_octets", },
- { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
- { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x8E, .name = "tx_yellow_prio_0", },
- { .offset = 0x8F, .name = "tx_yellow_prio_1", },
- { .offset = 0x90, .name = "tx_yellow_prio_2", },
- { .offset = 0x91, .name = "tx_yellow_prio_3", },
- { .offset = 0x92, .name = "tx_yellow_prio_4", },
- { .offset = 0x93, .name = "tx_yellow_prio_5", },
- { .offset = 0x94, .name = "tx_yellow_prio_6", },
- { .offset = 0x95, .name = "tx_yellow_prio_7", },
- { .offset = 0x96, .name = "tx_green_prio_0", },
- { .offset = 0x97, .name = "tx_green_prio_1", },
- { .offset = 0x98, .name = "tx_green_prio_2", },
- { .offset = 0x99, .name = "tx_green_prio_3", },
- { .offset = 0x9A, .name = "tx_green_prio_4", },
- { .offset = 0x9B, .name = "tx_green_prio_5", },
- { .offset = 0x9C, .name = "tx_green_prio_6", },
- { .offset = 0x9D, .name = "tx_green_prio_7", },
- { .offset = 0x9E, .name = "tx_aged", },
- { .offset = 0x100, .name = "drop_local", },
- { .offset = 0x101, .name = "drop_tail", },
- { .offset = 0x102, .name = "drop_yellow_prio_0", },
- { .offset = 0x103, .name = "drop_yellow_prio_1", },
- { .offset = 0x104, .name = "drop_yellow_prio_2", },
- { .offset = 0x105, .name = "drop_yellow_prio_3", },
- { .offset = 0x106, .name = "drop_yellow_prio_4", },
- { .offset = 0x107, .name = "drop_yellow_prio_5", },
- { .offset = 0x108, .name = "drop_yellow_prio_6", },
- { .offset = 0x109, .name = "drop_yellow_prio_7", },
- { .offset = 0x10A, .name = "drop_green_prio_0", },
- { .offset = 0x10B, .name = "drop_green_prio_1", },
- { .offset = 0x10C, .name = "drop_green_prio_2", },
- { .offset = 0x10D, .name = "drop_green_prio_3", },
- { .offset = 0x10E, .name = "drop_green_prio_4", },
- { .offset = 0x10F, .name = "drop_green_prio_5", },
- { .offset = 0x110, .name = "drop_green_prio_6", },
- { .offset = 0x111, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -1127,9 +1478,212 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_free(felix->imdio);
}
+/* Extract shortest continuous gate open intervals in ns for each traffic class
+ * of a cyclic tc-taprio schedule. If a gate is always open, the duration is
+ * considered U64_MAX. If the gate is always closed, it is considered 0.
+ */
+static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
+ u64 min_gate_len[OCELOT_NUM_TC])
+{
+ struct tc_taprio_sched_entry *entry;
+ u64 gate_len[OCELOT_NUM_TC];
+ u8 gates_ever_opened = 0;
+ int tc, i, n;
+
+ /* Initialize arrays */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ min_gate_len[tc] = U64_MAX;
+ gate_len[tc] = 0;
+ }
+
+ /* If we don't have taprio, consider all gates as permanently open */
+ if (!taprio)
+ return;
+
+ n = taprio->num_entries;
+
+ /* Walk through the gate list twice to determine the length
+ * of consecutively open gates for a traffic class, including
+ * open gates that wrap around. We are just interested in the
+ * minimum window size, and this doesn't change what the
+ * minimum is (if the gate never closes, min_gate_len will
+ * remain U64_MAX).
+ */
+ for (i = 0; i < 2 * n; i++) {
+ entry = &taprio->entries[i % n];
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ if (entry->gate_mask & BIT(tc)) {
+ gate_len[tc] += entry->interval;
+ gates_ever_opened |= BIT(tc);
+ } else {
+ /* Gate closes now, record a potential new
+ * minimum and reinitialize length
+ */
+ if (min_gate_len[tc] > gate_len[tc] &&
+ gate_len[tc])
+ min_gate_len[tc] = gate_len[tc];
+ gate_len[tc] = 0;
+ }
+ }
+ }
+
+ /* min_gate_len[tc] actually tracks minimum *open* gate time, so for
+ * permanently closed gates, min_gate_len[tc] will still be U64_MAX.
+ * Therefore they are currently indistinguishable from permanently
+ * open gates. Overwrite the gate len with 0 when we know they're
+ * actually permanently closed, i.e. after the loop above.
+ */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (!(gates_ever_opened & BIT(tc)))
+ min_gate_len[tc] = 0;
+}
+
+/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
+ * switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
+ * values (the default value is 1518). Also, for traffic class windows smaller
+ * than one MTU sized frame, update QSYS_QMAXSDU_CFG to enable oversized frame
+ * dropping, such that these won't hang the port, as they will never be sent.
+ */
+static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u64 min_gate_len[OCELOT_NUM_TC];
+ int speed, picos_per_byte;
+ u64 needed_bit_time_ps;
+ u32 val, maxlen;
+ u8 tas_speed;
+ int tc;
+
+ lockdep_assert_held(&ocelot->tas_lock);
+
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
+
+ switch (tas_speed) {
+ case OCELOT_SPEED_10:
+ speed = SPEED_10;
+ break;
+ case OCELOT_SPEED_100:
+ speed = SPEED_100;
+ break;
+ case OCELOT_SPEED_1000:
+ speed = SPEED_1000;
+ break;
+ case OCELOT_SPEED_2500:
+ speed = SPEED_2500;
+ break;
+ default:
+ return;
+ }
+
+ picos_per_byte = (USEC_PER_SEC * 8) / speed;
+
+ val = ocelot_port_readl(ocelot_port, DEV_MAC_MAXLEN_CFG);
+ /* MAXLEN_CFG accounts automatically for VLAN. We need to include it
+ * manually in the bit time calculation, plus the preamble and SFD.
+ */
+ maxlen = val + 2 * VLAN_HLEN;
+ /* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
+ * 4 octets FCS, 12 octets IFG.
+ */
+ needed_bit_time_ps = (maxlen + 24) * picos_per_byte;
+
+ dev_dbg(ocelot->dev,
+ "port %d: max frame size %d needs %llu ps at speed %d\n",
+ port, maxlen, needed_bit_time_ps, speed);
+
+ vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len);
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u32 max_sdu;
+
+ if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
+ min_gate_len[tc] * PSEC_PER_NSEC > needed_bit_time_ps) {
+ /* Setting QMAXSDU_CFG to 0 disables oversized frame
+ * dropping.
+ */
+ max_sdu = 0;
+ dev_dbg(ocelot->dev,
+ "port %d tc %d min gate len %llu"
+ ", sending all frames\n",
+ port, tc, min_gate_len[tc]);
+ } else {
+ /* If traffic class doesn't support a full MTU sized
+ * frame, make sure to enable oversize frame dropping
+ * for frames larger than the smallest that would fit.
+ */
+ max_sdu = div_u64(min_gate_len[tc] * PSEC_PER_NSEC,
+ picos_per_byte);
+ /* A TC gate may be completely closed, which is a
+ * special case where all packets are oversized.
+ * Any limit smaller than 64 octets accomplishes this
+ */
+ if (!max_sdu)
+ max_sdu = 1;
+ /* Take L1 overhead into account, but just don't allow
+ * max_sdu to go negative or to 0. Here we use 20
+ * because QSYS_MAXSDU_CFG_* already counts the 4 FCS
+ * octets as part of packet size.
+ */
+ if (max_sdu > 20)
+ max_sdu -= 20;
+ dev_info(ocelot->dev,
+ "port %d tc %d min gate length %llu"
+ " ns not enough for max frame size %d at %d"
+ " Mbps, dropping frames over %d"
+ " octets including FCS\n",
+ port, tc, min_gate_len[tc], maxlen, speed,
+ max_sdu);
+ }
+
+ /* ocelot_write_rix is a macro that concatenates
+ * QSYS_MAXSDU_CFG_* with _RSZ, so we need to spell out
+ * the writes to each traffic class
+ */
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+ }
+
+ ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+}
+
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
u32 speed)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 tas_speed;
switch (speed) {
@@ -1154,6 +1708,13 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
+
+ mutex_lock(&ocelot->tas_lock);
+
+ if (ocelot_port->taprio)
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
}
static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
@@ -1196,26 +1757,36 @@ static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix,
static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
struct tc_taprio_qopt_offload *taprio)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
struct timespec64 base_ts;
int ret, i;
u32 val;
+ mutex_lock(&ocelot->tas_lock);
+
if (!taprio->enable) {
- ocelot_rmw_rix(ocelot,
- QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
- QSYS_TAG_CONFIG_ENABLE |
- QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
+ taprio_offload_free(ocelot_port->taprio);
+ ocelot_port->taprio = NULL;
+
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
return 0;
}
if (taprio->cycle_time > NSEC_PER_SEC ||
- taprio->cycle_time_extension >= NSEC_PER_SEC)
- return -EINVAL;
+ taprio->cycle_time_extension >= NSEC_PER_SEC) {
+ ret = -EINVAL;
+ goto err;
+ }
- if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
- return -ERANGE;
+ if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) {
+ ret = -ERANGE;
+ goto err;
+ }
/* Enable guard band. The switch will schedule frames without taking
* their length into account. Thus we'll always need to enable the
@@ -1236,8 +1807,10 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
* config is pending, need reset the TAS module
*/
val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING)
- return -EBUSY;
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err;
+ }
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_ENABLE |
@@ -1270,10 +1843,67 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val,
!(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
10, 100000);
+ if (ret)
+ goto err;
+
+ ocelot_port->taprio = taprio_offload_get(taprio);
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+err:
+ mutex_unlock(&ocelot->tas_lock);
return ret;
}
+static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+{
+ struct tc_taprio_qopt_offload *taprio;
+ struct ocelot_port *ocelot_port;
+ struct timespec64 base_ts;
+ int port;
+ u32 val;
+
+ mutex_lock(&ocelot->tas_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_port = ocelot->ports[port];
+ taprio = ocelot_port->taprio;
+ if (!taprio)
+ continue;
+
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Disable time-aware shaper */
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+
+ vsc9959_new_base_time(ocelot, taprio->base_time,
+ taprio->cycle_time, &base_ts);
+
+ ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec),
+ QSYS_PARAM_CFG_REG_2);
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_rmw(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val),
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
+ QSYS_PARAM_CFG_REG_3);
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Re-enable time-aware shaper */
+ ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+ }
+ mutex_unlock(&ocelot->tas_lock);
+}
+
static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
struct tc_cbs_qopt_offload *cbs_qopt)
{
@@ -1886,7 +2516,7 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
struct felix_stream_filter_counters *counters)
{
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
SYS_STAT_CFG_STAT_VIEW_M,
@@ -1903,7 +2533,7 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
SYS_STAT_CFG);
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
}
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
@@ -2214,6 +2844,7 @@ static const struct ocelot_ops vsc9959_ops = {
.psfp_filter_del = vsc9959_psfp_filter_del,
.psfp_stats_get = vsc9959_psfp_stats_get,
.cut_through_fwd = vsc9959_cut_through_fwd,
+ .tas_clock_adjust = vsc9959_tas_clock_adjust,
};
static const struct felix_info felix_info_vsc9959 = {
@@ -2240,6 +2871,7 @@ static const struct felix_info felix_info_vsc9959 = {
.port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
+ .tas_guard_bands_update = vsc9959_tas_guard_bands_update,
.init_regmap = ocelot_regmap_init,
};
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ea0649211356..b34f4cdfe814 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -270,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = {
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
@@ -543,101 +614,379 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x40, .name = "tx_octets", },
- { .offset = 0x41, .name = "tx_unicast", },
- { .offset = 0x42, .name = "tx_multicast", },
- { .offset = 0x43, .name = "tx_broadcast", },
- { .offset = 0x44, .name = "tx_collision", },
- { .offset = 0x45, .name = "tx_drops", },
- { .offset = 0x46, .name = "tx_pause", },
- { .offset = 0x47, .name = "tx_frames_below_65_octets", },
- { .offset = 0x48, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x49, .name = "tx_frames_128_255_octets", },
- { .offset = 0x4A, .name = "tx_frames_256_511_octets", },
- { .offset = 0x4B, .name = "tx_frames_512_1023_octets", },
- { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x4D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x4E, .name = "tx_yellow_prio_0", },
- { .offset = 0x4F, .name = "tx_yellow_prio_1", },
- { .offset = 0x50, .name = "tx_yellow_prio_2", },
- { .offset = 0x51, .name = "tx_yellow_prio_3", },
- { .offset = 0x52, .name = "tx_yellow_prio_4", },
- { .offset = 0x53, .name = "tx_yellow_prio_5", },
- { .offset = 0x54, .name = "tx_yellow_prio_6", },
- { .offset = 0x55, .name = "tx_yellow_prio_7", },
- { .offset = 0x56, .name = "tx_green_prio_0", },
- { .offset = 0x57, .name = "tx_green_prio_1", },
- { .offset = 0x58, .name = "tx_green_prio_2", },
- { .offset = 0x59, .name = "tx_green_prio_3", },
- { .offset = 0x5A, .name = "tx_green_prio_4", },
- { .offset = 0x5B, .name = "tx_green_prio_5", },
- { .offset = 0x5C, .name = "tx_green_prio_6", },
- { .offset = 0x5D, .name = "tx_green_prio_7", },
- { .offset = 0x5E, .name = "tx_aged", },
- { .offset = 0x80, .name = "drop_local", },
- { .offset = 0x81, .name = "drop_tail", },
- { .offset = 0x82, .name = "drop_yellow_prio_0", },
- { .offset = 0x83, .name = "drop_yellow_prio_1", },
- { .offset = 0x84, .name = "drop_yellow_prio_2", },
- { .offset = 0x85, .name = "drop_yellow_prio_3", },
- { .offset = 0x86, .name = "drop_yellow_prio_4", },
- { .offset = 0x87, .name = "drop_yellow_prio_5", },
- { .offset = 0x88, .name = "drop_yellow_prio_6", },
- { .offset = 0x89, .name = "drop_yellow_prio_7", },
- { .offset = 0x8A, .name = "drop_green_prio_0", },
- { .offset = 0x8B, .name = "drop_green_prio_1", },
- { .offset = 0x8C, .name = "drop_green_prio_2", },
- { .offset = 0x8D, .name = "drop_green_prio_3", },
- { .offset = 0x8E, .name = "drop_green_prio_4", },
- { .offset = 0x8F, .name = "drop_green_prio_5", },
- { .offset = 0x90, .name = "drop_green_prio_6", },
- { .offset = 0x91, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
index 13b7e679b8b5..ba339747362c 100644
--- a/drivers/net/dsa/qca/Kconfig
+++ b/drivers/net/dsa/qca/Kconfig
@@ -7,3 +7,11 @@ config NET_DSA_AR9331
help
This enables support for the Qualcomm Atheros AR9331 built-in Ethernet
switch.
+
+config NET_DSA_QCA8K
+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+ select NET_DSA_TAG_QCA
+ select REGMAP
+ help
+ This enables support for the Qualcomm Atheros QCA8K Ethernet
+ switch chips.
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
index 274022319066..701f1d199e93 100644
--- a/drivers/net/dsa/qca/Makefile
+++ b/drivers/net/dsa/qca/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+qca8k-y += qca8k-common.o qca8k-8xxx.o
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index e5098cfe44bc..0796b7cf8cae 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -231,6 +231,7 @@ struct ar9331_sw_port {
int idx;
struct delayed_work mib_read;
struct rtnl_link_stats64 stats;
+ struct ethtool_pause_stats pause_stats;
struct spinlock stats_lock;
};
@@ -604,6 +605,7 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
static void ar9331_read_stats(struct ar9331_sw_port *port)
{
struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
+ struct ethtool_pause_stats *pstats = &port->pause_stats;
struct rtnl_link_stats64 *stats = &port->stats;
struct ar9331_sw_stats_raw raw;
int ret;
@@ -644,6 +646,9 @@ static void ar9331_read_stats(struct ar9331_sw_port *port)
stats->multicast += raw.rxmulti;
stats->collisions += raw.txcollision;
+ pstats->tx_pause_frames += raw.txpause;
+ pstats->rx_pause_frames += raw.rxpause;
+
spin_unlock(&port->stats_lock);
}
@@ -668,6 +673,17 @@ static void ar9331_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
+static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(pause_stats, &p->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&p->stats_lock);
+}
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
@@ -677,6 +693,7 @@ static const struct dsa_switch_ops ar9331_sw_ops = {
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
.get_stats64 = ar9331_get_stats64,
+ .get_pause_stats = ar9331_get_pause_stats,
};
static irqreturn_t ar9331_sw_irq(int irq, void *data)
@@ -818,7 +835,7 @@ static int __ar9331_mdio_write(struct mii_bus *sbus, u8 mode, u16 reg, u16 val)
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
- return mdiobus_write(sbus, p, r, val);
+ return __mdiobus_write(sbus, p, r, val);
}
static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
@@ -829,7 +846,7 @@ static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
- return mdiobus_read(sbus, p, r);
+ return __mdiobus_read(sbus, p, r);
}
static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
@@ -849,6 +866,8 @@ static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
return 0;
}
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
+
ret = __ar9331_mdio_read(sbus, reg);
if (ret < 0)
goto error;
@@ -860,9 +879,13 @@ static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
*(u32 *)val_buf |= ret << 16;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
error:
+ mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to read register.\n");
+
return ret;
}
@@ -872,12 +895,15 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
struct mii_bus *sbus = priv->sbus;
int ret;
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
if (reg == AR9331_SW_REG_PAGE) {
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_PAGE,
0, val);
if (ret < 0)
goto error;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
}
@@ -897,10 +923,14 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
if (ret < 0)
goto error;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
error:
+ mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
+
return ret;
}
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 1cbb05b0323f..1d3e7782a71f 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -15,7 +15,6 @@
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
-#include <linux/if_bridge.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
#include <linux/gpio/consumer.h>
@@ -24,57 +23,6 @@
#include "qca8k.h"
-#define MIB_DESC(_s, _o, _n) \
- { \
- .size = (_s), \
- .offset = (_o), \
- .name = (_n), \
- }
-
-static const struct qca8k_mib_desc ar8327_mib[] = {
- MIB_DESC(1, 0x00, "RxBroad"),
- MIB_DESC(1, 0x04, "RxPause"),
- MIB_DESC(1, 0x08, "RxMulti"),
- MIB_DESC(1, 0x0c, "RxFcsErr"),
- MIB_DESC(1, 0x10, "RxAlignErr"),
- MIB_DESC(1, 0x14, "RxRunt"),
- MIB_DESC(1, 0x18, "RxFragment"),
- MIB_DESC(1, 0x1c, "Rx64Byte"),
- MIB_DESC(1, 0x20, "Rx128Byte"),
- MIB_DESC(1, 0x24, "Rx256Byte"),
- MIB_DESC(1, 0x28, "Rx512Byte"),
- MIB_DESC(1, 0x2c, "Rx1024Byte"),
- MIB_DESC(1, 0x30, "Rx1518Byte"),
- MIB_DESC(1, 0x34, "RxMaxByte"),
- MIB_DESC(1, 0x38, "RxTooLong"),
- MIB_DESC(2, 0x3c, "RxGoodByte"),
- MIB_DESC(2, 0x44, "RxBadByte"),
- MIB_DESC(1, 0x4c, "RxOverFlow"),
- MIB_DESC(1, 0x50, "Filtered"),
- MIB_DESC(1, 0x54, "TxBroad"),
- MIB_DESC(1, 0x58, "TxPause"),
- MIB_DESC(1, 0x5c, "TxMulti"),
- MIB_DESC(1, 0x60, "TxUnderRun"),
- MIB_DESC(1, 0x64, "Tx64Byte"),
- MIB_DESC(1, 0x68, "Tx128Byte"),
- MIB_DESC(1, 0x6c, "Tx256Byte"),
- MIB_DESC(1, 0x70, "Tx512Byte"),
- MIB_DESC(1, 0x74, "Tx1024Byte"),
- MIB_DESC(1, 0x78, "Tx1518Byte"),
- MIB_DESC(1, 0x7c, "TxMaxByte"),
- MIB_DESC(1, 0x80, "TxOverSize"),
- MIB_DESC(2, 0x84, "TxByte"),
- MIB_DESC(1, 0x8c, "TxCollision"),
- MIB_DESC(1, 0x90, "TxAbortCol"),
- MIB_DESC(1, 0x94, "TxMultiCol"),
- MIB_DESC(1, 0x98, "TxSingleCol"),
- MIB_DESC(1, 0x9c, "TxExcDefer"),
- MIB_DESC(1, 0xa0, "TxDefer"),
- MIB_DESC(1, 0xa4, "TxLateCol"),
- MIB_DESC(1, 0xa8, "RXUnicast"),
- MIB_DESC(1, 0xac, "TXUnicast"),
-};
-
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
@@ -184,24 +132,6 @@ qca8k_set_page(struct qca8k_priv *priv, u16 page)
return 0;
}
-static int
-qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
-{
- return regmap_read(priv->regmap, reg, val);
-}
-
-static int
-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return regmap_write(priv->regmap, reg, val);
-}
-
-static int
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
-{
- return regmap_update_bits(priv->regmap, reg, mask, write_val);
-}
-
static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data;
@@ -412,43 +342,6 @@ qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 wri
}
static int
-qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
- int i, count = len / sizeof(u32), ret;
-
- if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
- return 0;
-
- for (i = 0; i < count; i++) {
- ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int
-qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
- int i, count = len / sizeof(u32), ret;
- u32 tmp;
-
- if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
- return 0;
-
- for (i = 0; i < count; i++) {
- tmp = val[i];
-
- ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int
qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
@@ -534,30 +427,6 @@ exit:
return ret;
}
-static const struct regmap_range qca8k_readable_ranges[] = {
- regmap_reg_range(0x0000, 0x00e4), /* Global control */
- regmap_reg_range(0x0100, 0x0168), /* EEE control */
- regmap_reg_range(0x0200, 0x0270), /* Parser control */
- regmap_reg_range(0x0400, 0x0454), /* ACL */
- regmap_reg_range(0x0600, 0x0718), /* Lookup */
- regmap_reg_range(0x0800, 0x0b70), /* QM */
- regmap_reg_range(0x0c00, 0x0c80), /* PKT */
- regmap_reg_range(0x0e00, 0x0e98), /* L3 */
- regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
- regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
- regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
- regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
- regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
- regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
- regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
-
-};
-
-static const struct regmap_access_table qca8k_readable_table = {
- .yes_ranges = qca8k_readable_ranges,
- .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
-};
-
static struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
@@ -572,385 +441,6 @@ static struct regmap_config qca8k_regmap_config = {
};
static int
-qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
-{
- u32 val;
-
- return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
-}
-
-static int
-qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
-{
- u32 reg[3];
- int ret;
-
- /* load the ARL table into an array */
- ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
- if (ret)
- return ret;
-
- /* vid - 83:72 */
- fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
- /* aging - 67:64 */
- fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
- /* portmask - 54:48 */
- fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
- /* mac - 47:0 */
- fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
- fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
- fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
- fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
- fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
- fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
-
- return 0;
-}
-
-static void
-qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
- u8 aging)
-{
- u32 reg[3] = { 0 };
-
- /* vid - 83:72 */
- reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
- /* aging - 67:64 */
- reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
- /* portmask - 54:48 */
- reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
- /* mac - 47:0 */
- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
-
- /* load the array into the ARL table */
- qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
-}
-
-static int
-qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
-{
- u32 reg;
- int ret;
-
- /* Set the command and FDB index */
- reg = QCA8K_ATU_FUNC_BUSY;
- reg |= cmd;
- if (port >= 0) {
- reg |= QCA8K_ATU_FUNC_PORT_EN;
- reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
- }
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_FDB_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_ATU_FUNC_FULL)
- return -1;
- }
-
- return 0;
-}
-
-static int
-qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
-{
- int ret;
-
- qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
- if (ret < 0)
- return ret;
-
- return qca8k_fdb_read(priv, fdb);
-}
-
-static int
-qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
- u16 vid, u8 aging)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static void
-qca8k_fdb_flush(struct qca8k_priv *priv)
-{
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
- mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
- const u8 *mac, u16 vid)
-{
- struct qca8k_fdb fdb = { 0 };
- int ret;
-
- mutex_lock(&priv->reg_mutex);
-
- qca8k_fdb_write(priv, vid, 0, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
- if (ret < 0)
- goto exit;
-
- ret = qca8k_fdb_read(priv, &fdb);
- if (ret < 0)
- goto exit;
-
- /* Rule exist. Delete first */
- if (!fdb.aging) {
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- if (ret)
- goto exit;
- }
-
- /* Add port to fdb portmask */
- fdb.port_mask |= port_mask;
-
- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
- const u8 *mac, u16 vid)
-{
- struct qca8k_fdb fdb = { 0 };
- int ret;
-
- mutex_lock(&priv->reg_mutex);
-
- qca8k_fdb_write(priv, vid, 0, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
- if (ret < 0)
- goto exit;
-
- /* Rule doesn't exist. Why delete? */
- if (!fdb.aging) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- if (ret)
- goto exit;
-
- /* Only port in the rule is this port. Don't re insert */
- if (fdb.port_mask == port_mask)
- goto exit;
-
- /* Remove port from port mask */
- fdb.port_mask &= ~port_mask;
-
- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
-{
- u32 reg;
- int ret;
-
- /* Set the command and VLAN index */
- reg = QCA8K_VTU_FUNC1_BUSY;
- reg |= cmd;
- reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_VLAN_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_VTU_FUNC1_FULL)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int
-qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
-{
- u32 reg;
- int ret;
-
- /*
- We do the right thing with VLAN 0 and treat it as untagged while
- preserving the tag on egress.
- */
- if (vid == 0)
- return 0;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
- if (untagged)
- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
- else
- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
-
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
-{
- u32 reg, mask;
- int ret, i;
- bool del;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
-
- /* Check if we're the last member to be removed */
- del = true;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
-
- if ((reg & mask) != mask) {
- del = false;
- break;
- }
- }
-
- if (del) {
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
- } else {
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
- }
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_mib_init(struct qca8k_priv *priv)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
- QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
- FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
- QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
- if (ret)
- goto exit;
-
- ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static void
-qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
-{
- u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
- /* Port 0 and 6 have no internal PHY */
- if (port > 0 && port < 6)
- mask |= QCA8K_PORT_STATUS_LINK_AUTO;
-
- if (enable)
- regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
- else
- regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
-}
-
-static int
qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
struct sk_buff *read_skb, u32 *val)
{
@@ -1462,8 +952,8 @@ static int qca8k_find_cpu_port(struct dsa_switch *ds)
static int
qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
{
+ const struct qca8k_match_data *data = priv->info;
struct device_node *node = priv->dev->of_node;
- const struct qca8k_match_data *data;
u32 val = 0;
int ret;
@@ -1472,8 +962,6 @@ qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
* Should be applied by default but we set this just to make sure.
*/
if (priv->switch_id == QCA8K_ID_QCA8327) {
- data = of_device_get_match_data(priv->dev);
-
/* Set the correct package of 148 pin for QCA8327 */
if (data->reduced_package)
val |= QCA8327_PWS_PACKAGE148_EN;
@@ -1993,26 +1481,8 @@ static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
qpcs->port = port;
}
-static void
-qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
-{
- const struct qca8k_match_data *match_data;
- struct qca8k_priv *priv = ds->priv;
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- match_data = of_device_get_match_data(priv->dev);
-
- for (i = 0; i < match_data->mib_count; i++)
- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
- ETH_GSTRING_LEN);
-}
-
static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
- const struct qca8k_match_data *match_data;
struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv;
const struct qca8k_mib_desc *mib;
@@ -2031,10 +1501,9 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
if (port != mib_eth_data->req_port)
goto exit;
- match_data = device_get_match_data(priv->dev);
data = mib_eth_data->data;
- for (i = 0; i < match_data->mib_count; i++) {
+ for (i = 0; i < priv->info->mib_count; i++) {
mib = &ar8327_mib[i];
/* First 3 mib are present in the skb head */
@@ -2101,522 +1570,6 @@ exit:
return ret;
}
-static void
-qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- const struct qca8k_match_data *match_data;
- const struct qca8k_mib_desc *mib;
- u32 reg, i, val;
- u32 hi = 0;
- int ret;
-
- if (priv->mgmt_master &&
- qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
- return;
-
- match_data = of_device_get_match_data(priv->dev);
-
- for (i = 0; i < match_data->mib_count; i++) {
- mib = &ar8327_mib[i];
- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
-
- ret = qca8k_read(priv, reg, &val);
- if (ret < 0)
- continue;
-
- if (mib->size == 2) {
- ret = qca8k_read(priv, reg + 4, &hi);
- if (ret < 0)
- continue;
- }
-
- data[i] = val;
- if (mib->size == 2)
- data[i] |= (u64)hi << 32;
- }
-}
-
-static int
-qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
-{
- const struct qca8k_match_data *match_data;
- struct qca8k_priv *priv = ds->priv;
-
- if (sset != ETH_SS_STATS)
- return 0;
-
- match_data = of_device_get_match_data(priv->dev);
-
- return match_data->mib_count;
-}
-
-static int
-qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
- u32 reg;
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
- if (ret < 0)
- goto exit;
-
- if (eee->eee_enabled)
- reg |= lpi_en;
- else
- reg &= ~lpi_en;
- ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
-static void
-qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 stp_state;
-
- switch (state) {
- case BR_STATE_DISABLED:
- stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
- break;
- case BR_STATE_BLOCKING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
- break;
- case BR_STATE_LISTENING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
- break;
- case BR_STATE_LEARNING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- default:
- stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
- break;
- }
-
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
-}
-
-static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge,
- bool *tx_fwd_offload,
- struct netlink_ext_ack *extack)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask, cpu_port;
- int i, ret;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- port_mask = BIT(cpu_port);
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
- ret = regmap_set_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- if (ret)
- return ret;
- if (i != port)
- port_mask |= BIT(i);
- }
-
- /* Add all other ports to this ports portvlan mask */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
-
- return ret;
-}
-
-static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, i;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
- regmap_clear_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- }
-
- /* Set the cpu port to be the only one in the portvlan mask of
- * this port
- */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
-}
-
-static void
-qca8k_port_fast_age(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = ds->priv;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
- mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
-{
- struct qca8k_priv *priv = ds->priv;
- unsigned int secs = msecs / 1000;
- u32 val;
-
- /* AGE_TIME reg is set in 7s step */
- val = secs / 7;
-
- /* Handle case with 0 as val to NOT disable
- * learning
- */
- if (!val)
- val = 1;
-
- return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
- QCA8K_ATU_AGE_TIME(val));
-}
-
-static int
-qca8k_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 1);
- priv->port_enabled_map |= BIT(port);
-
- if (dsa_is_user_port(ds, port))
- phy_support_asym_pause(phy);
-
- return 0;
-}
-
-static void
-qca8k_port_disable(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 0);
- priv->port_enabled_map &= ~BIT(port);
-}
-
-static int
-qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- /* We have only have a general MTU setting.
- * DSA always set the CPU port's MTU to the largest MTU of the slave
- * ports.
- * Setting MTU just for the CPU port is sufficient to correctly set a
- * value for every port.
- */
- if (!dsa_is_cpu_port(ds, port))
- return 0;
-
- /* To change the MAX_FRAME_SIZE the cpu ports must be off or
- * the switch panics.
- * Turn off both cpu ports before applying the new value to prevent
- * this.
- */
- if (priv->port_enabled_map & BIT(0))
- qca8k_port_set_status(priv, 0, 0);
-
- if (priv->port_enabled_map & BIT(6))
- qca8k_port_set_status(priv, 6, 0);
-
- /* Include L2 header / FCS length */
- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
-
- if (priv->port_enabled_map & BIT(0))
- qca8k_port_set_status(priv, 0, 1);
-
- if (priv->port_enabled_map & BIT(6))
- qca8k_port_set_status(priv, 6, 1);
-
- return ret;
-}
-
-static int
-qca8k_port_max_mtu(struct dsa_switch *ds, int port)
-{
- return QCA8K_MAX_MTU;
-}
-
-static int
-qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
- u16 port_mask, u16 vid)
-{
- /* Set the vid to the port vlan id if no vid is set */
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_add(priv, addr, port_mask, vid,
- QCA8K_ATU_STATUS_STATIC);
-}
-
-static int
-qca8k_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_del(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- struct qca8k_fdb _fdb = { 0 };
- int cnt = QCA8K_NUM_FDB_RECORDS;
- bool is_static;
- int ret = 0;
-
- mutex_lock(&priv->reg_mutex);
- while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
- if (!_fdb.aging)
- break;
- is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
- ret = cb(_fdb.mac, _fdb.vid, is_static, data);
- if (ret)
- break;
- }
- mutex_unlock(&priv->reg_mutex);
-
- return 0;
-}
-
-static int
-qca8k_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
-{
- struct qca8k_priv *priv = ds->priv;
- const u8 *addr = mdb->addr;
- u16 vid = mdb->vid;
-
- return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
-}
-
-static int
-qca8k_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
-{
- struct qca8k_priv *priv = ds->priv;
- const u8 *addr = mdb->addr;
- u16 vid = mdb->vid;
-
- return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
-}
-
-static int
-qca8k_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress, struct netlink_ext_ack *extack)
-{
- struct qca8k_priv *priv = ds->priv;
- int monitor_port, ret;
- u32 reg, val;
-
- /* Check for existent entry */
- if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
- return -EEXIST;
-
- ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
- if (ret)
- return ret;
-
- /* QCA83xx can have only one port set to mirror mode.
- * Check that the correct port is requested and return error otherwise.
- * When no mirror port is set, the values is set to 0xF
- */
- monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
- if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
- return -EEXIST;
-
- /* Set the monitor port */
- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
- mirror->to_local_port);
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
- if (ret)
- return ret;
-
- if (ingress) {
- reg = QCA8K_PORT_LOOKUP_CTRL(port);
- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
- } else {
- reg = QCA8K_REG_PORT_HOL_CTRL1(port);
- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
- }
-
- ret = regmap_update_bits(priv->regmap, reg, val, val);
- if (ret)
- return ret;
-
- /* Track mirror port for tx and rx to decide when the
- * mirror port has to be disabled.
- */
- if (ingress)
- priv->mirror_rx |= BIT(port);
- else
- priv->mirror_tx |= BIT(port);
-
- return 0;
-}
-
-static void
-qca8k_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg, val;
- int ret;
-
- if (mirror->ingress) {
- reg = QCA8K_PORT_LOOKUP_CTRL(port);
- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
- } else {
- reg = QCA8K_REG_PORT_HOL_CTRL1(port);
- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
- }
-
- ret = regmap_clear_bits(priv->regmap, reg, val);
- if (ret)
- goto err;
-
- if (mirror->ingress)
- priv->mirror_rx &= ~BIT(port);
- else
- priv->mirror_tx &= ~BIT(port);
-
- /* No port set to send packet to mirror port. Disable mirror port */
- if (!priv->mirror_rx && !priv->mirror_tx) {
- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
- if (ret)
- goto err;
- }
-err:
- dev_err(priv->dev, "Failed to del mirror port from %d", port);
-}
-
-static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- if (vlan_filtering) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
- } else {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
- }
-
- return ret;
-}
-
-static int
-qca8k_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
- if (ret) {
- dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
- return ret;
- }
-
- if (pvid) {
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- QCA8K_EGREES_VLAN_PORT_MASK(port),
- QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid) |
- QCA8K_PORT_VLAN_SVID(vlan->vid));
- }
-
- return ret;
-}
-
-static int
-qca8k_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- ret = qca8k_vlan_del(priv, port, vlan->vid);
- if (ret)
- dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
-
- return ret;
-}
-
static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = ds->priv;
@@ -2640,174 +1593,6 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
return DSA_TAG_PROTO_QCA;
}
-static bool
-qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
-{
- struct dsa_port *dp;
- int members = 0;
-
- if (!lag.id)
- return false;
-
- dsa_lag_foreach_port(dp, ds->dst, &lag)
- /* Includes the port joining the LAG */
- members++;
-
- if (members > QCA8K_NUM_PORTS_FOR_LAG)
- return false;
-
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
- return false;
-
- if (info->hash_type != NETDEV_LAG_HASH_L2 &&
- info->hash_type != NETDEV_LAG_HASH_L23)
- return false;
-
- return true;
-}
-
-static int
-qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
-{
- struct net_device *lag_dev = lag.dev;
- struct qca8k_priv *priv = ds->priv;
- bool unique_lag = true;
- unsigned int i;
- u32 hash = 0;
-
- switch (info->hash_type) {
- case NETDEV_LAG_HASH_L23:
- hash |= QCA8K_TRUNK_HASH_SIP_EN;
- hash |= QCA8K_TRUNK_HASH_DIP_EN;
- fallthrough;
- case NETDEV_LAG_HASH_L2:
- hash |= QCA8K_TRUNK_HASH_SA_EN;
- hash |= QCA8K_TRUNK_HASH_DA_EN;
- break;
- default: /* We should NEVER reach this */
- return -EOPNOTSUPP;
- }
-
- /* Check if we are the unique configured LAG */
- dsa_lags_foreach_id(i, ds->dst)
- if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
- unique_lag = false;
- break;
- }
-
- /* Hash Mode is global. Make sure the same Hash Mode
- * is set to all the 4 possible lag.
- * If we are the unique LAG we can set whatever hash
- * mode we want.
- * To change hash mode it's needed to remove all LAG
- * and change the mode with the latest.
- */
- if (unique_lag) {
- priv->lag_hash_mode = hash;
- } else if (priv->lag_hash_mode != hash) {
- netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
- return -EOPNOTSUPP;
- }
-
- return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
- QCA8K_TRUNK_HASH_MASK, hash);
-}
-
-static int
-qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
- struct dsa_lag lag, bool delete)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret, id, i;
- u32 val;
-
- /* DSA LAG IDs are one-based, hardware is zero-based */
- id = lag.id - 1;
-
- /* Read current port member */
- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
- if (ret)
- return ret;
-
- /* Shift val to the correct trunk */
- val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
- val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
- if (delete)
- val &= ~BIT(port);
- else
- val |= BIT(port);
-
- /* Update port member. With empty portmap disable trunk */
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
- QCA8K_REG_GOL_TRUNK_MEMBER(id) |
- QCA8K_REG_GOL_TRUNK_EN(id),
- !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
- val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
-
- /* Search empty member if adding or port on deleting */
- for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
- if (ret)
- return ret;
-
- val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
-
- if (delete) {
- /* If port flagged to be disabled assume this member is
- * empty
- */
- if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
- continue;
-
- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
- if (val != port)
- continue;
- } else {
- /* If port flagged to be enabled assume this member is
- * already set
- */
- if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
- continue;
- }
-
- /* We have found the member to add/remove */
- break;
- }
-
- /* Set port in the correct port mask or disable port if in delete mode */
- return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
- !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
- port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
-}
-
-static int
-qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
-{
- int ret;
-
- if (!qca8k_lag_can_offload(ds, lag, info))
- return -EOPNOTSUPP;
-
- ret = qca8k_lag_setup_hash(ds, lag, info);
- if (ret)
- return ret;
-
- return qca8k_lag_refresh_portmap(ds, port, lag, false);
-}
-
-static int
-qca8k_port_lag_leave(struct dsa_switch *ds, int port,
- struct dsa_lag lag)
-{
- return qca8k_lag_refresh_portmap(ds, port, lag, true);
-}
-
static void
qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
bool operational)
@@ -3091,36 +1876,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.connect_tag_protocol = qca8k_connect_tag_protocol,
};
-static int qca8k_read_switch_id(struct qca8k_priv *priv)
-{
- const struct qca8k_match_data *data;
- u32 val;
- u8 id;
- int ret;
-
- /* get the switches ID from the compatible */
- data = of_device_get_match_data(priv->dev);
- if (!data)
- return -ENODEV;
-
- ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
- if (ret < 0)
- return -ENODEV;
-
- id = QCA8K_MASK_CTRL_DEVICE_ID(val);
- if (id != data->id) {
- dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
- return -ENODEV;
- }
-
- priv->switch_id = id;
-
- /* Save revision to communicate to the internal PHY driver */
- priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
-
- return 0;
-}
-
static int
qca8k_sw_probe(struct mdio_device *mdiodev)
{
@@ -3134,6 +1889,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
+ priv->info = of_device_get_match_data(priv->dev);
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
@@ -3256,20 +2012,29 @@ static int qca8k_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
+static const struct qca8k_info_ops qca8xxx_ops = {
+ .autocast_mib = qca8k_get_ethtool_stats_eth,
+ .read_eth = qca8k_read_eth,
+ .write_eth = qca8k_write_eth,
+};
+
static const struct qca8k_match_data qca8327 = {
.id = QCA8K_ID_QCA8327,
.reduced_package = true,
.mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
.mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca833x = {
.id = QCA8K_ID_QCA8337,
.mib_count = QCA8K_QCA833X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
};
static const struct of_device_id qca8k_of_match[] = {
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
new file mode 100644
index 000000000000..bba95613e218
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+const struct qca8k_mib_desc ar8327_mib[] = {
+ MIB_DESC(1, 0x00, "RxBroad"),
+ MIB_DESC(1, 0x04, "RxPause"),
+ MIB_DESC(1, 0x08, "RxMulti"),
+ MIB_DESC(1, 0x0c, "RxFcsErr"),
+ MIB_DESC(1, 0x10, "RxAlignErr"),
+ MIB_DESC(1, 0x14, "RxRunt"),
+ MIB_DESC(1, 0x18, "RxFragment"),
+ MIB_DESC(1, 0x1c, "Rx64Byte"),
+ MIB_DESC(1, 0x20, "Rx128Byte"),
+ MIB_DESC(1, 0x24, "Rx256Byte"),
+ MIB_DESC(1, 0x28, "Rx512Byte"),
+ MIB_DESC(1, 0x2c, "Rx1024Byte"),
+ MIB_DESC(1, 0x30, "Rx1518Byte"),
+ MIB_DESC(1, 0x34, "RxMaxByte"),
+ MIB_DESC(1, 0x38, "RxTooLong"),
+ MIB_DESC(2, 0x3c, "RxGoodByte"),
+ MIB_DESC(2, 0x44, "RxBadByte"),
+ MIB_DESC(1, 0x4c, "RxOverFlow"),
+ MIB_DESC(1, 0x50, "Filtered"),
+ MIB_DESC(1, 0x54, "TxBroad"),
+ MIB_DESC(1, 0x58, "TxPause"),
+ MIB_DESC(1, 0x5c, "TxMulti"),
+ MIB_DESC(1, 0x60, "TxUnderRun"),
+ MIB_DESC(1, 0x64, "Tx64Byte"),
+ MIB_DESC(1, 0x68, "Tx128Byte"),
+ MIB_DESC(1, 0x6c, "Tx256Byte"),
+ MIB_DESC(1, 0x70, "Tx512Byte"),
+ MIB_DESC(1, 0x74, "Tx1024Byte"),
+ MIB_DESC(1, 0x78, "Tx1518Byte"),
+ MIB_DESC(1, 0x7c, "TxMaxByte"),
+ MIB_DESC(1, 0x80, "TxOverSize"),
+ MIB_DESC(2, 0x84, "TxByte"),
+ MIB_DESC(1, 0x8c, "TxCollision"),
+ MIB_DESC(1, 0x90, "TxAbortCol"),
+ MIB_DESC(1, 0x94, "TxMultiCol"),
+ MIB_DESC(1, 0x98, "TxSingleCol"),
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
+ MIB_DESC(1, 0xa8, "RXUnicast"),
+ MIB_DESC(1, 0xac, "TXUnicast"),
+};
+
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+{
+ return regmap_read(priv->regmap, reg, val);
+}
+
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_write(priv->regmap, reg, val);
+}
+
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+ regmap_reg_range(0x0200, 0x0270), /* Parser control */
+ regmap_reg_range(0x0400, 0x0454), /* ACL */
+ regmap_reg_range(0x0600, 0x0718), /* Lookup */
+ regmap_reg_range(0x0800, 0x0b70), /* QM */
+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+};
+
+const struct regmap_access_table qca8k_readable_table = {
+ .yes_ranges = qca8k_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+
+ if (priv->mgmt_master && priv->info->ops->read_eth &&
+ !priv->info->ops->read_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+ u32 tmp;
+
+ if (priv->mgmt_master && priv->info->ops->write_eth &&
+ !priv->info->ops->write_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ tmp = val[i];
+
+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
+}
+
+static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+ u32 reg[3];
+ int ret;
+
+ /* load the ARL table into an array */
+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ if (ret)
+ return ret;
+
+ /* vid - 83:72 */
+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
+ /* aging - 67:64 */
+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
+ /* portmask - 54:48 */
+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
+ /* mac - 47:0 */
+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
+
+ return 0;
+}
+
+static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
+ const u8 *mac, u8 aging)
+{
+ u32 reg[3] = { 0 };
+
+ /* vid - 83:72 */
+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
+ /* aging - 67:64 */
+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
+ /* portmask - 54:48 */
+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
+ /* mac - 47:0 */
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
+
+ /* load the array into the ARL table */
+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+}
+
+static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
+ int port)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and FDB index */
+ reg = QCA8K_ATU_FUNC_BUSY;
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
+ }
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_FDB_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_ATU_FUNC_FULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
+ int port)
+{
+ int ret;
+
+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+ if (ret < 0)
+ return ret;
+
+ return qca8k_fdb_read(priv, fdb);
+}
+
+static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid, u8 aging)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+void qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule exist. Delete first */
+ if (!fdb.aging) {
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+ }
+
+ /* Add port to fdb portmask */
+ fdb.port_mask |= port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule doesn't exist. Why delete? */
+ if (!fdb.aging) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+
+ /* Only port in the rule is this port. Don't re insert */
+ if (fdb.port_mask == port_mask)
+ goto exit;
+
+ /* Remove port from port mask */
+ fdb.port_mask &= ~port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_vlan_access(struct qca8k_priv *priv,
+ enum qca8k_vlan_cmd cmd, u16 vid)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and VLAN index */
+ reg = QCA8K_VTU_FUNC1_BUSY;
+ reg |= cmd;
+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_VLAN_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_VTU_FUNC1_FULL)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
+ bool untagged)
+{
+ u32 reg;
+ int ret;
+
+ /* We do the right thing with VLAN 0 and treat it as untagged while
+ * preserving the tag on egress.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ if (untagged)
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
+ else
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
+
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
+{
+ u32 reg, mask;
+ int ret, i;
+ bool del;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
+
+ /* Check if we're the last member to be removed */
+ del = true;
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+ if ((reg & mask) != mask) {
+ del = false;
+ break;
+ }
+ }
+
+ if (del) {
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+ } else {
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+int qca8k_mib_init(struct qca8k_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
+ QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ /* Port 0 and 6 have no internal PHY */
+ if (port > 0 && port < 6)
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++)
+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i, val;
+ u32 hi = 0;
+ int ret;
+
+ if (priv->mgmt_master && priv->info->ops->autocast_mib &&
+ priv->info->ops->autocast_mib(ds, port, data) > 0)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+ ret = qca8k_read(priv, reg, &val);
+ if (ret < 0)
+ continue;
+
+ if (mib->size == 2) {
+ ret = qca8k_read(priv, reg + 4, &hi);
+ if (ret < 0)
+ continue;
+ }
+
+ data[i] = val;
+ if (mib->size == 2)
+ data[i] |= (u64)hi << 32;
+ }
+}
+
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return priv->info->mib_count;
+}
+
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *eee)
+{
+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
+ if (ret < 0)
+ goto exit;
+
+ if (eee->eee_enabled)
+ reg |= lpi_en;
+ else
+ reg &= ~lpi_en;
+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ /* Nothing to do on the port's MAC */
+ return 0;
+}
+
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+ break;
+ }
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int port_mask, cpu_port;
+ int i, ret;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Add this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ if (ret)
+ return ret;
+ if (i != port)
+ port_mask |= BIT(i);
+ }
+
+ /* Add all other ports to this ports portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int cpu_port, i;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Remove this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ /* Set the cpu port to be the only one in the portvlan mask of
+ * this port
+ */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+}
+
+void qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct qca8k_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ u32 val;
+
+ /* AGE_TIME reg is set in 7s step */
+ val = secs / 7;
+
+ /* Handle case with 0 as val to NOT disable
+ * learning
+ */
+ if (!val)
+ val = 1;
+
+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
+ QCA8K_ATU_AGE_TIME_MASK,
+ QCA8K_ATU_AGE_TIME(val));
+}
+
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 1);
+ priv->port_enabled_map |= BIT(port);
+
+ if (dsa_is_user_port(ds, port))
+ phy_support_asym_pause(phy);
+
+ return 0;
+}
+
+void qca8k_port_disable(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+ priv->port_enabled_map &= ~BIT(port);
+}
+
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ /* We have only have a general MTU setting.
+ * DSA always set the CPU port's MTU to the largest MTU of the slave
+ * ports.
+ * Setting MTU just for the CPU port is sufficient to correctly set a
+ * value for every port.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+ * the switch panics.
+ * Turn off both cpu ports before applying the new value to prevent
+ * this.
+ */
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 0);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 0);
+
+ /* Include L2 header / FCS length */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 1);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 1);
+
+ return ret;
+}
+
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return QCA8K_MAX_MTU;
+}
+
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid)
+{
+ /* Set the vid to the port vlan id if no vid is set */
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_add(priv, addr, port_mask, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_del(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct qca8k_fdb _fdb = { 0 };
+ int cnt = QCA8K_NUM_FDB_RECORDS;
+ bool is_static;
+ int ret = 0;
+
+ mutex_lock(&priv->reg_mutex);
+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+ if (!_fdb.aging)
+ break;
+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
+ ret = cb(_fdb.mac, _fdb.vid, is_static, data);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int monitor_port, ret;
+ u32 reg, val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* QCA83xx can have only one port set to mirror mode.
+ * Check that the correct port is requested and return error otherwise.
+ * When no mirror port is set, the values is set to 0xF
+ */
+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+ /* Set the monitor port */
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
+ mirror->to_local_port);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ return ret;
+
+ if (ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_update_bits(priv->regmap, reg, val, val);
+ if (ret)
+ return ret;
+
+ /* Track mirror port for tx and rx to decide when the
+ * mirror port has to be disabled.
+ */
+ if (ingress)
+ priv->mirror_rx |= BIT(port);
+ else
+ priv->mirror_tx |= BIT(port);
+
+ return 0;
+}
+
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg, val;
+ int ret;
+
+ if (mirror->ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_clear_bits(priv->regmap, reg, val);
+ if (ret)
+ goto err;
+
+ if (mirror->ingress)
+ priv->mirror_rx &= ~BIT(port);
+ else
+ priv->mirror_tx &= ~BIT(port);
+
+ /* No port set to send packet to mirror port. Disable mirror port */
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ goto err;
+ }
+err:
+ dev_err(priv->dev, "Failed to del mirror port from %d", port);
+}
+
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ if (vlan_filtering) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ } else {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
+
+ if (pvid) {
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ QCA8K_EGREES_VLAN_PORT_MASK(port),
+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
+ if (ret)
+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+
+ return ret;
+}
+
+static bool qca8k_lag_can_offload(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct dsa_port *dp;
+ int members = 0;
+
+ if (!lag.id)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > QCA8K_NUM_PORTS_FOR_LAG)
+ return false;
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return false;
+
+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23)
+ return false;
+
+ return true;
+}
+
+static int qca8k_lag_setup_hash(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct net_device *lag_dev = lag.dev;
+ struct qca8k_priv *priv = ds->priv;
+ bool unique_lag = true;
+ unsigned int i;
+ u32 hash = 0;
+
+ switch (info->hash_type) {
+ case NETDEV_LAG_HASH_L23:
+ hash |= QCA8K_TRUNK_HASH_SIP_EN;
+ hash |= QCA8K_TRUNK_HASH_DIP_EN;
+ fallthrough;
+ case NETDEV_LAG_HASH_L2:
+ hash |= QCA8K_TRUNK_HASH_SA_EN;
+ hash |= QCA8K_TRUNK_HASH_DA_EN;
+ break;
+ default: /* We should NEVER reach this */
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are the unique configured LAG */
+ dsa_lags_foreach_id(i, ds->dst)
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
+ unique_lag = false;
+ break;
+ }
+
+ /* Hash Mode is global. Make sure the same Hash Mode
+ * is set to all the 4 possible lag.
+ * If we are the unique LAG we can set whatever hash
+ * mode we want.
+ * To change hash mode it's needed to remove all LAG
+ * and change the mode with the latest.
+ */
+ if (unique_lag) {
+ priv->lag_hash_mode = hash;
+ } else if (priv->lag_hash_mode != hash) {
+ netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
+ QCA8K_TRUNK_HASH_MASK, hash);
+}
+
+static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
+ struct dsa_lag lag, bool delete)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret, id, i;
+ u32 val;
+
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
+
+ /* Read current port member */
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* Shift val to the correct trunk */
+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
+ if (delete)
+ val &= ~BIT(port);
+ else
+ val |= BIT(port);
+
+ /* Update port member. With empty portmap disable trunk */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
+ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
+ QCA8K_REG_GOL_TRUNK_EN(id),
+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
+
+ /* Search empty member if adding or port on deleting */
+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
+ if (ret)
+ return ret;
+
+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
+
+ if (delete) {
+ /* If port flagged to be disabled assume this member is
+ * empty
+ */
+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
+ if (val != port)
+ continue;
+ } else {
+ /* If port flagged to be enabled assume this member is
+ * already set
+ */
+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+ }
+
+ /* We have found the member to add/remove */
+ break;
+ }
+
+ /* Set port in the correct port mask or disable port if in delete mode */
+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
+}
+
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ int ret;
+
+ if (!qca8k_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ ret = qca8k_lag_setup_hash(ds, lag, info);
+ if (ret)
+ return ret;
+
+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
+}
+
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag)
+{
+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
+}
+
+int qca8k_read_switch_id(struct qca8k_priv *priv)
+{
+ u32 val;
+ u8 id;
+ int ret;
+
+ if (!priv->info)
+ return -ENODEV;
+
+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
+ if (ret < 0)
+ return -ENODEV;
+
+ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
+ if (id != priv->info->id) {
+ dev_err(priv->dev,
+ "Switch id detected %x but expected %x",
+ id, priv->info->id);
+ return -ENODEV;
+ }
+
+ priv->switch_id = id;
+
+ /* Save revision to communicate to the internal PHY driver */
+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index ec58d0e80a70..e36ecc9777f4 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -324,10 +324,20 @@ enum qca8k_mid_cmd {
QCA8K_MIB_CAST = 3,
};
+struct qca8k_priv;
+
+struct qca8k_info_ops {
+ int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
+ /* TODO: remove these extra ops when we can support regmap bulk read/write */
+ int (*read_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
+ int (*write_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
+};
+
struct qca8k_match_data {
u8 id;
bool reduced_package;
u8 mib_count;
+ const struct qca8k_info_ops *ops;
};
enum {
@@ -401,6 +411,7 @@ struct qca8k_priv {
struct qca8k_mdio_cache mdio_cache;
struct qca8k_pcs pcs_port_0;
struct qca8k_pcs pcs_port_6;
+ const struct qca8k_match_data *info;
};
struct qca8k_mib_desc {
@@ -416,4 +427,93 @@ struct qca8k_fdb {
u8 mac[6];
};
+/* Common setup function */
+extern const struct qca8k_mib_desc ar8327_mib[];
+extern const struct regmap_access_table qca8k_readable_table;
+int qca8k_mib_init(struct qca8k_priv *priv);
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable);
+int qca8k_read_switch_id(struct qca8k_priv *priv);
+
+/* Common read/write/rmw function */
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val);
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val);
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val);
+
+/* Common ops function */
+void qca8k_fdb_flush(struct qca8k_priv *priv);
+
+/* Common ethtool stats function */
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data);
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data);
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* Common eee function */
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+
+/* Common bridge function */
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+/* Common port enable/disable function */
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+void qca8k_port_disable(struct dsa_switch *ds, int port);
+
+/* Common MTU function */
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu);
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port);
+
+/* Common fast age function */
+void qca8k_port_fast_age(struct dsa_switch *ds, int port);
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
+
+/* Common FDB function */
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid);
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+
+/* Common MDB function */
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+
+/* Common port mirror function */
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+
+/* Common port VLAN function */
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+
+/* Common port LAG function */
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info);
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag);
+
#endif /* __QCA8K_H */
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 769f672e9128..da31d8b839ac 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -101,27 +101,14 @@
#include "realtek.h"
-/* Chip-specific data and limits */
-#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
-#define RTL8365MB_CHIP_VER_8365MB_VC 0x0040
-
-#define RTL8365MB_CHIP_ID_8367S 0x6367
-#define RTL8365MB_CHIP_VER_8367S 0x00A0
-
-#define RTL8365MB_CHIP_ID_8367RB 0x6367
-#define RTL8365MB_CHIP_VER_8367RB 0x0020
-
/* Family-specific data and limits */
#define RTL8365MB_PHYADDRMAX 7
#define RTL8365MB_NUM_PHYREGS 32
#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
-/* RTL8370MB and RTL8310SR, possibly suportable by this driver, have 10 ports */
-#define RTL8365MB_MAX_NUM_PORTS 10
+#define RTL8365MB_MAX_NUM_PORTS 11
+#define RTL8365MB_MAX_NUM_EXTINTS 3
#define RTL8365MB_LEARN_LIMIT_MAX 2112
-/* valid for all 6-port or less variants */
-static const int rtl8365mb_extint_port_map[] = { -1, -1, -1, -1, -1, -1, 1, 2, -1, -1};
-
/* Chip identification registers */
#define RTL8365MB_CHIP_ID_REG 0x1300
@@ -201,7 +188,7 @@ static const int rtl8365mb_extint_port_map[] = { -1, -1, -1, -1, -1, -1, 1, 2,
/* The PHY OCP addresses of PHY registers 0~31 start here */
#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
-/* EXT interface port mode values - used in DIGITAL_INTERFACE_SELECT */
+/* External interface port mode values - used in DIGITAL_INTERFACE_SELECT */
#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
#define RTL8365MB_EXT_PORT_MODE_RGMII 1
#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
@@ -217,19 +204,7 @@ static const int rtl8365mb_extint_port_map[] = { -1, -1, -1, -1, -1, -1, 1, 2,
#define RTL8365MB_EXT_PORT_MODE_1000X 12
#define RTL8365MB_EXT_PORT_MODE_100FX 13
-/* Realtek docs and driver uses logic number as EXT_PORT0=16, EXT_PORT1=17,
- * EXT_PORT2=18, to interact with switch ports. That logic number is internally
- * converted to either a physical port number (0..9) or an external interface id (0..2),
- * depending on which function was called. The external interface id is calculated as
- * (ext_id=logic_port-15), while the logical to physical map depends on the chip id/version.
- *
- * EXT_PORT0 mentioned in datasheets and rtl8367c driver is used in this driver
- * as extid==1, EXT_PORT2, mentioned in Realtek rtl8367c driver for 10-port switches,
- * would have an ext_id of 3 (out of range for most extint macros) and ext_id 0 does
- * not seem to be used as well for this family.
- */
-
-/* EXT interface mode configuration registers 0~1 */
+/* External interface mode configuration registers 0~1 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
@@ -241,7 +216,7 @@ static const int rtl8365mb_extint_port_map[] = { -1, -1, -1, -1, -1, -1, 1, 2,
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extint) \
(((_extint) % 2) * 4)
-/* EXT interface RGMII TX/RX delay configuration registers 0~2 */
+/* External interface RGMII TX/RX delay configuration registers 0~2 */
#define RTL8365MB_EXT_RGMXF_REG0 0x1306 /* EXT0 */
#define RTL8365MB_EXT_RGMXF_REG1 0x1307 /* EXT1 */
#define RTL8365MB_EXT_RGMXF_REG2 0x13C5 /* EXT2 */
@@ -258,7 +233,7 @@ static const int rtl8365mb_extint_port_map[] = { -1, -1, -1, -1, -1, -1, 1, 2,
#define RTL8365MB_PORT_SPEED_100M 1
#define RTL8365MB_PORT_SPEED_1000M 2
-/* EXT interface force configuration registers 0~2 */
+/* External interface force configuration registers 0~2 */
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310 /* EXT0 */
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311 /* EXT1 */
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4 /* EXT2 */
@@ -490,6 +465,95 @@ static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
{ 0x1D32, 0x0002 },
};
+enum rtl8365mb_phy_interface_mode {
+ RTL8365MB_PHY_INTERFACE_MODE_INVAL = 0,
+ RTL8365MB_PHY_INTERFACE_MODE_INTERNAL = BIT(0),
+ RTL8365MB_PHY_INTERFACE_MODE_MII = BIT(1),
+ RTL8365MB_PHY_INTERFACE_MODE_TMII = BIT(2),
+ RTL8365MB_PHY_INTERFACE_MODE_RMII = BIT(3),
+ RTL8365MB_PHY_INTERFACE_MODE_RGMII = BIT(4),
+ RTL8365MB_PHY_INTERFACE_MODE_SGMII = BIT(5),
+ RTL8365MB_PHY_INTERFACE_MODE_HSGMII = BIT(6),
+};
+
+/**
+ * struct rtl8365mb_extint - external interface info
+ * @port: the port with an external interface
+ * @id: the external interface ID, which is either 0, 1, or 2
+ * @supported_interfaces: a bitmask of supported PHY interface modes
+ *
+ * Represents a mapping: port -> { id, supported_interfaces }. To be embedded
+ * in &struct rtl8365mb_chip_info for every port with an external interface.
+ */
+struct rtl8365mb_extint {
+ int port;
+ int id;
+ unsigned int supported_interfaces;
+};
+
+/**
+ * struct rtl8365mb_chip_info - static chip-specific info
+ * @name: human-readable chip name
+ * @chip_id: chip identifier
+ * @chip_ver: chip silicon revision
+ * @extints: available external interfaces
+ * @jam_table: chip-specific initialization jam table
+ * @jam_size: size of the chip's jam table
+ *
+ * These data are specific to a given chip in the family of switches supported
+ * by this driver. When adding support for another chip in the family, a new
+ * chip info should be added to the rtl8365mb_chip_infos array.
+ */
+struct rtl8365mb_chip_info {
+ const char *name;
+ u32 chip_id;
+ u32 chip_ver;
+ const struct rtl8365mb_extint extints[RTL8365MB_MAX_NUM_EXTINTS];
+ const struct rtl8365mb_jam_tbl_entry *jam_table;
+ size_t jam_size;
+};
+
+/* Chip info for each supported switch in the family */
+#define PHY_INTF(_mode) (RTL8365MB_PHY_INTERFACE_MODE_ ## _mode)
+static const struct rtl8365mb_chip_info rtl8365mb_chip_infos[] = {
+ {
+ .name = "RTL8365MB-VC",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0040,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367S",
+ .chip_id = 0x6367,
+ .chip_ver = 0x00A0,
+ .extints = {
+ { 6, 1, PHY_INTF(SGMII) | PHY_INTF(HSGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367RB-VB",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0020,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+};
+
enum rtl8365mb_stp_state {
RTL8365MB_STP_STATE_DISABLED = 0,
RTL8365MB_STP_STATE_BLOCKING = 1,
@@ -559,33 +623,23 @@ struct rtl8365mb_port {
};
/**
- * struct rtl8365mb - private chip-specific driver data
+ * struct rtl8365mb - driver private data
* @priv: pointer to parent realtek_priv data
* @irq: registered IRQ or zero
- * @chip_id: chip identifier
- * @chip_ver: chip silicon revision
- * @port_mask: mask of all ports
- * @learn_limit_max: maximum number of L2 addresses the chip can learn
+ * @chip_info: chip-specific info about the attached switch
* @cpu: CPU tagging and CPU port configuration for this chip
* @mib_lock: prevent concurrent reads of MIB counters
* @ports: per-port data
- * @jam_table: chip-specific initialization jam table
- * @jam_size: size of the chip's jam table
*
* Private data for this driver.
*/
struct rtl8365mb {
struct realtek_priv *priv;
int irq;
- u32 chip_id;
- u32 chip_ver;
- u32 port_mask;
- u32 learn_limit_max;
+ const struct rtl8365mb_chip_info *chip_info;
struct rtl8365mb_cpu cpu;
struct mutex mib_lock;
struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
- const struct rtl8365mb_jam_tbl_entry *jam_table;
- size_t jam_size;
};
static int rtl8365mb_phy_poll_busy(struct realtek_priv *priv)
@@ -780,6 +834,26 @@ static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
}
+static const struct rtl8365mb_extint *
+rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ int i;
+
+ for (i = 0; i < RTL8365MB_MAX_NUM_EXTINTS; i++) {
+ const struct rtl8365mb_extint *extint =
+ &mb->chip_info->extints[i];
+
+ if (!extint->supported_interfaces)
+ continue;
+
+ if (extint->port == port)
+ return extint;
+ }
+
+ return NULL;
+}
+
static enum dsa_tag_protocol
rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
@@ -800,20 +874,17 @@ rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
phy_interface_t interface)
{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
int rx_delay = 0;
- int ext_int;
u32 val;
int ret;
- ext_int = rtl8365mb_extint_port_map[port];
-
- if (ext_int <= 0) {
- dev_err(priv->dev, "Port %d is not an external interface port\n", port);
- return -EINVAL;
- }
+ if (!extint)
+ return -ENODEV;
dp = dsa_to_port(priv->ds, port);
dn = dp->dn;
@@ -847,7 +918,7 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
tx_delay = val / 2;
else
dev_warn(priv->dev,
- "EXT interface TX delay must be 0 or 2 ns\n");
+ "RGMII TX delay must be 0 or 2 ns\n");
}
if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
@@ -857,11 +928,11 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
rx_delay = val;
else
dev_warn(priv->dev,
- "EXT interface RX delay must be 0 to 2.1 ns\n");
+ "RGMII RX delay must be 0 to 2.1 ns\n");
}
ret = regmap_update_bits(
- priv->map, RTL8365MB_EXT_RGMXF_REG(ext_int),
+ priv->map, RTL8365MB_EXT_RGMXF_REG(extint->id),
RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
@@ -870,11 +941,11 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
return ret;
ret = regmap_update_bits(
- priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_int),
- RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_int),
+ priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(extint->id),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(extint->id),
RTL8365MB_EXT_PORT_MODE_RGMII
<< RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
- ext_int));
+ extint->id));
if (ret)
return ret;
@@ -885,21 +956,18 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
bool link, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
u32 r_tx_pause;
u32 r_rx_pause;
u32 r_duplex;
u32 r_speed;
u32 r_link;
- int ext_int;
int val;
int ret;
- ext_int = rtl8365mb_extint_port_map[port];
-
- if (ext_int <= 0) {
- dev_err(priv->dev, "Port %d is not an external interface port\n", port);
- return -EINVAL;
- }
+ if (!extint)
+ return -ENODEV;
if (link) {
/* Force the link up with the desired configuration */
@@ -947,7 +1015,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
r_duplex) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
ret = regmap_write(priv->map,
- RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_int),
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(extint->id),
val);
if (ret)
return ret;
@@ -958,7 +1026,13 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
- if (dsa_is_user_port(ds, port)) {
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(ds->priv, port);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ if (!extint) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
@@ -967,12 +1041,16 @@ static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
*/
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
- } else if (dsa_is_cpu_port(ds, port)) {
- phy_interface_set_rgmii(config->supported_interfaces);
+ return;
}
- config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000FD;
+ /* Populate according to the modes supported by _this driver_,
+ * not necessarily the modes supported by the hardware, some of
+ * which remain unimplemented.
+ */
+
+ if (extint->supported_interfaces & RTL8365MB_PHY_INTERFACE_MODE_RGMII)
+ phy_interface_set_rgmii(config->supported_interfaces);
}
static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
@@ -1091,15 +1169,13 @@ static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
static int rtl8365mb_port_set_learning(struct realtek_priv *priv, int port,
bool enable)
{
- struct rtl8365mb *mb = priv->chip_data;
-
/* Enable/disable learning by limiting the number of L2 addresses the
* port can learn. Realtek documentation states that a limit of zero
* disables learning. When enabling learning, set it to the chip's
* maximum.
*/
return regmap_write(priv->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
- enable ? mb->learn_limit_max : 0);
+ enable ? RTL8365MB_LEARN_LIMIT_MAX : 0);
}
static int rtl8365mb_port_set_isolation(struct realtek_priv *priv, int port,
@@ -1489,13 +1565,10 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
{
struct realtek_priv *priv = data;
unsigned long line_changes = 0;
- struct rtl8365mb *mb;
u32 stat;
int line;
int ret;
- mb = priv->chip_data;
-
ret = rtl8365mb_get_and_clear_status_reg(priv, RTL8365MB_INTR_STATUS_REG,
&stat);
if (ret)
@@ -1520,7 +1593,7 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
- line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
+ line_changes = linkup_ind | linkdown_ind;
}
if (!line_changes)
@@ -1792,14 +1865,17 @@ static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds,
static int rtl8365mb_switch_init(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
+ const struct rtl8365mb_chip_info *ci;
int ret;
int i;
+ ci = mb->chip_info;
+
/* Do any chip-specific init jam before getting to the common stuff */
- if (mb->jam_table) {
- for (i = 0; i < mb->jam_size; i++) {
- ret = regmap_write(priv->map, mb->jam_table[i].reg,
- mb->jam_table[i].val);
+ if (ci->jam_table) {
+ for (i = 0; i < ci->jam_size; i++) {
+ ret = regmap_write(priv->map, ci->jam_table[i].reg,
+ ci->jam_table[i].val);
if (ret)
return ret;
}
@@ -1972,6 +2048,7 @@ static int rtl8365mb_detect(struct realtek_priv *priv)
u32 chip_id;
u32 chip_ver;
int ret;
+ int i;
ret = rtl8365mb_get_chip_id_and_ver(priv->map, &chip_id, &chip_ver);
if (ret) {
@@ -1980,54 +2057,32 @@ static int rtl8365mb_detect(struct realtek_priv *priv)
return ret;
}
- switch (chip_id) {
- case RTL8365MB_CHIP_ID_8365MB_VC:
- switch (chip_ver) {
- case RTL8365MB_CHIP_VER_8365MB_VC:
- dev_info(priv->dev,
- "found an RTL8365MB-VC switch (ver=0x%04x)\n",
- chip_ver);
- break;
- case RTL8365MB_CHIP_VER_8367RB:
- dev_info(priv->dev,
- "found an RTL8367RB-VB switch (ver=0x%04x)\n",
- chip_ver);
- break;
- case RTL8365MB_CHIP_VER_8367S:
- dev_info(priv->dev,
- "found an RTL8367S switch (ver=0x%04x)\n",
- chip_ver);
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_chip_infos); i++) {
+ const struct rtl8365mb_chip_info *ci = &rtl8365mb_chip_infos[i];
+
+ if (ci->chip_id == chip_id && ci->chip_ver == chip_ver) {
+ mb->chip_info = ci;
break;
- default:
- dev_err(priv->dev, "unrecognized switch version (ver=0x%04x)",
- chip_ver);
- return -ENODEV;
}
+ }
- priv->num_ports = RTL8365MB_MAX_NUM_PORTS;
-
- mb->priv = priv;
- mb->chip_id = chip_id;
- mb->chip_ver = chip_ver;
- mb->port_mask = GENMASK(priv->num_ports - 1, 0);
- mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX;
- mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
- mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
-
- mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
- mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
- mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
- mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
- mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
-
- break;
- default:
+ if (!mb->chip_info) {
dev_err(priv->dev,
- "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
- chip_id, chip_ver);
+ "unrecognized switch (id=0x%04x, ver=0x%04x)", chip_id,
+ chip_ver);
return -ENODEV;
}
+ dev_info(priv->dev, "found an %s switch\n", mb->chip_info->name);
+
+ priv->num_ports = RTL8365MB_MAX_NUM_PORTS;
+ mb->priv = priv;
+ mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
+ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
+ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
+ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
+ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+
return 0;
}
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
new file mode 100644
index 000000000000..0744e8162e1d
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -0,0 +1,1064 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <net/dsa.h>
+
+#include "rzn1_a5psw.h"
+
+struct a5psw_stats {
+ u16 offset;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define STAT_DESC(_offset) { \
+ .offset = A5PSW_##_offset, \
+ .name = __stringify(_offset), \
+}
+
+static const struct a5psw_stats a5psw_stats[] = {
+ STAT_DESC(aFramesTransmittedOK),
+ STAT_DESC(aFramesReceivedOK),
+ STAT_DESC(aFrameCheckSequenceErrors),
+ STAT_DESC(aAlignmentErrors),
+ STAT_DESC(aOctetsTransmittedOK),
+ STAT_DESC(aOctetsReceivedOK),
+ STAT_DESC(aTxPAUSEMACCtrlFrames),
+ STAT_DESC(aRxPAUSEMACCtrlFrames),
+ STAT_DESC(ifInErrors),
+ STAT_DESC(ifOutErrors),
+ STAT_DESC(ifInUcastPkts),
+ STAT_DESC(ifInMulticastPkts),
+ STAT_DESC(ifInBroadcastPkts),
+ STAT_DESC(ifOutDiscards),
+ STAT_DESC(ifOutUcastPkts),
+ STAT_DESC(ifOutMulticastPkts),
+ STAT_DESC(ifOutBroadcastPkts),
+ STAT_DESC(etherStatsDropEvents),
+ STAT_DESC(etherStatsOctets),
+ STAT_DESC(etherStatsPkts),
+ STAT_DESC(etherStatsUndersizePkts),
+ STAT_DESC(etherStatsOversizePkts),
+ STAT_DESC(etherStatsPkts64Octets),
+ STAT_DESC(etherStatsPkts65to127Octets),
+ STAT_DESC(etherStatsPkts128to255Octets),
+ STAT_DESC(etherStatsPkts256to511Octets),
+ STAT_DESC(etherStatsPkts1024to1518Octets),
+ STAT_DESC(etherStatsPkts1519toXOctets),
+ STAT_DESC(etherStatsJabbers),
+ STAT_DESC(etherStatsFragments),
+ STAT_DESC(VLANReceived),
+ STAT_DESC(VLANTransmitted),
+ STAT_DESC(aDeferred),
+ STAT_DESC(aMultipleCollisions),
+ STAT_DESC(aSingleCollisions),
+ STAT_DESC(aLateCollisions),
+ STAT_DESC(aExcessiveCollisions),
+ STAT_DESC(aCarrierSenseErrors),
+};
+
+static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
+{
+ writel(value, a5psw->base + offset);
+}
+
+static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
+{
+ return readl(a5psw->base + offset);
+}
+
+static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ spin_lock(&a5psw->reg_lock);
+
+ reg = a5psw_reg_readl(a5psw, offset);
+ reg &= ~mask;
+ reg |= val;
+ a5psw_reg_writel(a5psw, offset, reg);
+
+ spin_unlock(&a5psw->reg_lock);
+}
+
+static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RZN1_A5PSW;
+}
+
+static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
+ bool enable)
+{
+ u32 rx_match = 0;
+
+ if (enable)
+ rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
+
+ a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
+ A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
+}
+
+static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
+{
+ /* Enable "management forward" pattern matching, this will forward
+ * packets from this port only towards the management port and thus
+ * isolate the port.
+ */
+ a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
+}
+
+static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
+{
+ u32 port_ena = 0;
+
+ if (enable)
+ port_ena |= A5PSW_PORT_ENA_TX_RX(port);
+
+ a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
+ port_ena);
+}
+
+static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
+{
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
+ !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret)
+ dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
+
+ return ret;
+}
+
+static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
+{
+ u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+ a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ mutex_unlock(&a5psw->lk_lock);
+}
+
+static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
+ bool authorize)
+{
+ u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
+
+ if (authorize)
+ reg |= A5PSW_AUTH_PORT_AUTHORIZED;
+ else
+ reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
+
+ a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
+}
+
+static void a5psw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, false);
+ a5psw_port_enable_set(a5psw, port, false);
+}
+
+static int a5psw_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, true);
+ a5psw_port_enable_set(a5psw, port, true);
+
+ return 0;
+}
+
+static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
+ a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
+
+ return 0;
+}
+
+static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return A5PSW_MAX_MTU;
+}
+
+static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *intf = config->supported_interfaces;
+
+ config->mac_capabilities = MAC_1000FD;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* GMII is used internally and GMAC2 is connected to the switch
+ * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, intf);
+ } else {
+ config->mac_capabilities |= MAC_100 | MAC_10;
+ phy_interface_set_rgmii(intf);
+ __set_bit(PHY_INTERFACE_MODE_RMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_MII, intf);
+ }
+}
+
+static struct phylink_pcs *
+a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct a5psw *a5psw = ds->priv;
+
+ if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
+ return a5psw->pcs[port];
+
+ return NULL;
+}
+
+static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct a5psw *a5psw = ds->priv;
+ u32 cmd_cfg;
+
+ cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
+ cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
+ A5PSW_CMD_CFG_TX_CRC_APPEND;
+ struct a5psw *a5psw = ds->priv;
+
+ if (speed == SPEED_1000)
+ cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
+
+ if (duplex == DUPLEX_HALF)
+ cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
+
+ cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
+
+ if (!rx_pause)
+ cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
+
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned long rate;
+ u64 max, tmp;
+ u32 agetime;
+
+ rate = clk_get_rate(a5psw->clk);
+ max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
+ rate) * 1000;
+ if (msecs > max)
+ return -EINVAL;
+
+ tmp = div_u64(rate, MSEC_PER_SEC);
+ agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
+
+ return 0;
+}
+
+static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
+ bool set)
+{
+ u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
+ A5PSW_MCAST_DEF_MASK};
+ int i;
+
+ if (set)
+ a5psw->bridged_ports |= BIT(port);
+ else
+ a5psw->bridged_ports &= ~BIT(port);
+
+ for (i = 0; i < ARRAY_SIZE(offsets); i++)
+ a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
+}
+
+static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ /* We only support 1 bridge device */
+ if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Forwarding offload supported for a single bridge");
+ return -EOPNOTSUPP;
+ }
+
+ a5psw->br_dev = bridge.dev;
+ a5psw_flooding_set_resolution(a5psw, port, true);
+ a5psw_port_mgmtfwd_set(a5psw, port, false);
+
+ return 0;
+}
+
+static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_flooding_set_resolution(a5psw, port, false);
+ a5psw_port_mgmtfwd_set(a5psw, port, true);
+
+ /* No more ports bridged */
+ if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
+ a5psw->br_dev = NULL;
+}
+
+static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
+ struct a5psw *a5psw = ds->priv;
+ u32 reg = 0;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ reg |= A5PSW_INPUT_LEARN_DIS(port);
+ reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+ break;
+ case BR_STATE_LISTENING:
+ reg |= A5PSW_INPUT_LEARN_DIS(port);
+ break;
+ case BR_STATE_LEARNING:
+ reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ break;
+ }
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_fdb_flush(a5psw, port);
+}
+
+static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
+ u16 *entry)
+{
+ u32 ctrl;
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
+
+ ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
+ ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ if (ret)
+ return ret;
+
+ *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
+
+ return 0;
+}
+
+static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool inc_learncount = false;
+ int ret = 0;
+ u16 entry;
+ u32 reg;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+ lk_data.entry.port_mask = BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ /* Set the value to be written in the lookup table */
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ if (!lk_data.entry.valid) {
+ inc_learncount = true;
+ /* port_mask set to 0x1f when entry is not valid, clear it */
+ lk_data.entry.port_mask = 0;
+ lk_data.entry.prio = 0;
+ }
+
+ lk_data.entry.port_mask |= BIT(port);
+ lk_data.entry.is_static = 1;
+ lk_data.entry.valid = 1;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ if (inc_learncount) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool clear = false;
+ u16 entry;
+ u32 reg;
+ int ret;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+
+ /* Our hardware does not associate any VID to the FDB entries so this
+ * means that if two entries were added for the same mac but for
+ * different VID, then, on the deletion of the first one, we would also
+ * delete the second one. Since there is unfortunately nothing we can do
+ * about that, do not return an error...
+ */
+ if (!lk_data.entry.valid)
+ goto lk_unlock;
+
+ lk_data.entry.port_mask &= ~BIT(port);
+ /* If there is no more port in the mask, clear the entry */
+ if (lk_data.entry.port_mask == 0)
+ clear = true;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = entry;
+ if (clear)
+ reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
+ else
+ reg |= A5PSW_LK_ADDR_CTRL_WRITE;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ /* Decrement LEARNCOUNT */
+ if (clear) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data;
+ int i = 0, ret = 0;
+ u32 reg;
+
+ mutex_lock(&a5psw->lk_lock);
+
+ for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
+ reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto out_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ /* If entry is not valid or does not contain the port, skip */
+ if (!lk_data.entry.valid ||
+ !(lk_data.entry.port_mask & BIT(port)))
+ continue;
+
+ lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
+
+ ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
+ if (ret)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
+{
+ u32 reg_lo, reg_hi;
+
+ reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
+ /* A5PSW_STATS_HIWORD is latched on stat read */
+ reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
+
+ return ((u64)reg_hi << 32) | reg_lo;
+}
+
+static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ unsigned int u;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
+ memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned int u;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
+ data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
+}
+
+static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(a5psw_stats);
+}
+
+static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
+ mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
+ mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
+ mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
+ mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
+ mac_stats->AlignmentErrors = RD(aAlignmentErrors);
+ mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
+ mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
+ mac_stats->LateCollisions = RD(aLateCollisions);
+ mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
+ mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
+ mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
+ mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
+ mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
+ mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
+ mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
+ mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
+ mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
+ mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
+#undef RD
+}
+
+static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, A5PSW_MAX_MTU },
+ {}
+};
+
+static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
+ rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
+ rmon_stats->fragments = RD(etherStatsFragments);
+ rmon_stats->jabbers = RD(etherStatsJabbers);
+ rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
+ rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
+ rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
+ rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
+ rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
+ rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
+ rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
+#undef RD
+
+ *ranges = a5psw_rmon_ranges;
+}
+
+static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+ u64 stat;
+
+ stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesTransmitted = stat;
+ stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesReceived = stat;
+}
+
+static int a5psw_setup(struct dsa_switch *ds)
+{
+ struct a5psw *a5psw = ds->priv;
+ int port, vlan, ret;
+ struct dsa_port *dp;
+ u32 reg;
+
+ /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dp->index != A5PSW_CPU_PORT) {
+ dev_err(a5psw->dev, "Invalid CPU port\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Configure management port */
+ reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
+
+ /* Set pattern 0 to forward all frame to mgmt port */
+ a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
+ A5PSW_PATTERN_CTRL_MGMTFWD);
+
+ /* Enable port tagging */
+ reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
+ reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
+
+ /* Enable normal switch operation */
+ reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
+ A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
+ A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
+ a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
+ !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret) {
+ dev_err(a5psw->dev, "Failed to clear lookup table\n");
+ return ret;
+ }
+
+ /* Reset learn count to 0 */
+ reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+
+ /* Clear VLAN resource table */
+ reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
+ for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
+ a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
+
+ /* Reset all ports */
+ dsa_switch_for_each_port(dp, ds) {
+ port = dp->index;
+
+ /* Reset the port */
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
+ A5PSW_CMD_CFG_SW_RESET);
+
+ /* Enable only CPU port */
+ a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
+
+ if (dsa_port_is_unused(dp))
+ continue;
+
+ /* Enable egress flooding for CPU port */
+ if (dsa_port_is_cpu(dp))
+ a5psw_flooding_set_resolution(a5psw, port, true);
+
+ /* Enable management forward only for user ports */
+ if (dsa_port_is_user(dp))
+ a5psw_port_mgmtfwd_set(a5psw, port, true);
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops a5psw_switch_ops = {
+ .get_tag_protocol = a5psw_get_tag_protocol,
+ .setup = a5psw_setup,
+ .port_disable = a5psw_port_disable,
+ .port_enable = a5psw_port_enable,
+ .phylink_get_caps = a5psw_phylink_get_caps,
+ .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
+ .phylink_mac_link_down = a5psw_phylink_mac_link_down,
+ .phylink_mac_link_up = a5psw_phylink_mac_link_up,
+ .port_change_mtu = a5psw_port_change_mtu,
+ .port_max_mtu = a5psw_port_max_mtu,
+ .get_sset_count = a5psw_get_sset_count,
+ .get_strings = a5psw_get_strings,
+ .get_ethtool_stats = a5psw_get_ethtool_stats,
+ .get_eth_mac_stats = a5psw_get_eth_mac_stats,
+ .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
+ .get_rmon_stats = a5psw_get_rmon_stats,
+ .set_ageing_time = a5psw_set_ageing_time,
+ .port_bridge_join = a5psw_port_bridge_join,
+ .port_bridge_leave = a5psw_port_bridge_leave,
+ .port_stp_state_set = a5psw_port_stp_state_set,
+ .port_fast_age = a5psw_port_fast_age,
+ .port_fdb_add = a5psw_port_fdb_add,
+ .port_fdb_del = a5psw_port_fdb_del,
+ .port_fdb_dump = a5psw_port_fdb_dump,
+};
+
+static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
+{
+ u32 status;
+ int err;
+
+ err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
+ !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
+ 1000 * USEC_PER_MSEC);
+ if (err)
+ dev_err(a5psw->dev, "MDIO command timeout\n");
+
+ return err;
+}
+
+static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd, status;
+ int ret;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ cmd = A5PSW_MDIO_COMMAND_READ;
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+
+ ret = a5psw_mdio_wait_busy(a5psw);
+ if (ret)
+ return ret;
+
+ ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
+
+ status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
+ if (status & A5PSW_MDIO_CFG_STATUS_READERR)
+ return -EIO;
+
+ return ret;
+}
+
+static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
+ u16 phy_data)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
+
+ return a5psw_mdio_wait_busy(a5psw);
+}
+
+static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
+{
+ unsigned long rate;
+ unsigned long div;
+ u32 cfgstatus;
+
+ rate = clk_get_rate(a5psw->hclk);
+ div = ((rate / mdio_freq) / 2);
+ if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
+ div < A5PSW_MDIO_CLK_DIV_MIN) {
+ dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
+ return -ERANGE;
+ }
+
+ cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
+
+ return 0;
+}
+
+static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
+{
+ struct device *dev = a5psw->dev;
+ struct mii_bus *bus;
+ u32 mdio_freq;
+ int ret;
+
+ if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
+ mdio_freq = A5PSW_MDIO_DEF_FREQ;
+
+ ret = a5psw_mdio_config(a5psw, mdio_freq);
+ if (ret)
+ return ret;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "a5psw_mdio";
+ bus->read = a5psw_mdio_read;
+ bus->write = a5psw_mdio_write;
+ bus->priv = a5psw;
+ bus->parent = dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ a5psw->mii_bus = bus;
+
+ return devm_of_mdiobus_register(dev, bus, node);
+}
+
+static void a5psw_pcs_free(struct a5psw *a5psw)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
+ if (a5psw->pcs[i])
+ miic_destroy(a5psw->pcs[i]);
+ }
+}
+
+static int a5psw_pcs_get(struct a5psw *a5psw)
+{
+ struct device_node *ports, *port, *pcs_node;
+ struct phylink_pcs *pcs;
+ int ret;
+ u32 reg;
+
+ ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ pcs_node = of_parse_phandle(port, "pcs-handle", 0);
+ if (!pcs_node)
+ continue;
+
+ if (of_property_read_u32(port, "reg", &reg)) {
+ ret = -EINVAL;
+ goto free_pcs;
+ }
+
+ if (reg >= ARRAY_SIZE(a5psw->pcs)) {
+ ret = -ENODEV;
+ goto free_pcs;
+ }
+
+ pcs = miic_create(a5psw->dev, pcs_node);
+ if (IS_ERR(pcs)) {
+ dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
+ reg);
+ ret = PTR_ERR(pcs);
+ goto free_pcs;
+ }
+
+ a5psw->pcs[reg] = pcs;
+ of_node_put(pcs_node);
+ }
+ of_node_put(ports);
+
+ return 0;
+
+free_pcs:
+ of_node_put(pcs_node);
+ of_node_put(port);
+ of_node_put(ports);
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio;
+ struct dsa_switch *ds;
+ struct a5psw *a5psw;
+ int ret;
+
+ a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
+ if (!a5psw)
+ return -ENOMEM;
+
+ a5psw->dev = dev;
+ mutex_init(&a5psw->lk_lock);
+ spin_lock_init(&a5psw->reg_lock);
+ a5psw->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(a5psw->base))
+ return PTR_ERR(a5psw->base);
+
+ ret = a5psw_pcs_get(a5psw);
+ if (ret)
+ return ret;
+
+ a5psw->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(a5psw->hclk)) {
+ dev_err(dev, "failed get hclk clock\n");
+ ret = PTR_ERR(a5psw->hclk);
+ goto free_pcs;
+ }
+
+ a5psw->clk = devm_clk_get(dev, "clk");
+ if (IS_ERR(a5psw->clk)) {
+ dev_err(dev, "failed get clk_switch clock\n");
+ ret = PTR_ERR(a5psw->clk);
+ goto free_pcs;
+ }
+
+ ret = clk_prepare_enable(a5psw->clk);
+ if (ret)
+ goto free_pcs;
+
+ ret = clk_prepare_enable(a5psw->hclk);
+ if (ret)
+ goto clk_disable;
+
+ mdio = of_get_child_by_name(dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ ret = a5psw_probe_mdio(a5psw, mdio);
+ if (ret) {
+ of_node_put(mdio);
+ dev_err(dev, "Failed to register MDIO: %d\n", ret);
+ goto hclk_disable;
+ }
+ }
+
+ of_node_put(mdio);
+
+ ds = &a5psw->ds;
+ ds->dev = dev;
+ ds->num_ports = A5PSW_PORTS_NUM;
+ ds->ops = &a5psw_switch_ops;
+ ds->priv = a5psw;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err(dev, "Failed to register DSA switch: %d\n", ret);
+ goto hclk_disable;
+ }
+
+ return 0;
+
+hclk_disable:
+ clk_disable_unprepare(a5psw->hclk);
+clk_disable:
+ clk_disable_unprepare(a5psw->clk);
+free_pcs:
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_remove(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return 0;
+
+ dsa_unregister_switch(&a5psw->ds);
+ a5psw_pcs_free(a5psw);
+ clk_disable_unprepare(a5psw->hclk);
+ clk_disable_unprepare(a5psw->clk);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void a5psw_shutdown(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return;
+
+ dsa_switch_shutdown(&a5psw->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id a5psw_of_mtable[] = {
+ { .compatible = "renesas,rzn1-a5psw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
+
+static struct platform_driver a5psw_driver = {
+ .driver = {
+ .name = "rzn1_a5psw",
+ .of_match_table = of_match_ptr(a5psw_of_mtable),
+ },
+ .probe = a5psw_probe,
+ .remove = a5psw_remove,
+ .shutdown = a5psw_shutdown,
+};
+module_platform_driver(a5psw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h
new file mode 100644
index 000000000000..c67abd49c013
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <net/dsa.h>
+
+#define A5PSW_REVISION 0x0
+#define A5PSW_PORT_OFFSET(port) (0x400 * (port))
+
+#define A5PSW_PORT_ENA 0x8
+#define A5PSW_PORT_ENA_RX_SHIFT 16
+#define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
+ BIT(port))
+#define A5PSW_UCAST_DEF_MASK 0xC
+
+#define A5PSW_VLAN_VERIFY 0x10
+#define A5PSW_VLAN_VERI_SHIFT 0
+#define A5PSW_VLAN_DISC_SHIFT 16
+
+#define A5PSW_BCAST_DEF_MASK 0x14
+#define A5PSW_MCAST_DEF_MASK 0x18
+
+#define A5PSW_INPUT_LEARN 0x1C
+#define A5PSW_INPUT_LEARN_DIS(p) BIT((p) + 16)
+#define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p)
+
+#define A5PSW_MGMT_CFG 0x20
+#define A5PSW_MGMT_CFG_DISCARD BIT(7)
+
+#define A5PSW_MODE_CFG 0x24
+#define A5PSW_MODE_STATS_RESET BIT(31)
+
+#define A5PSW_VLAN_IN_MODE 0x28
+#define A5PSW_VLAN_IN_MODE_PORT_SHIFT(port) ((port) * 2)
+#define A5PSW_VLAN_IN_MODE_PORT(port) (GENMASK(1, 0) << \
+ A5PSW_VLAN_IN_MODE_PORT_SHIFT(port))
+#define A5PSW_VLAN_IN_MODE_SINGLE_PASSTHROUGH 0x0
+#define A5PSW_VLAN_IN_MODE_SINGLE_REPLACE 0x1
+#define A5PSW_VLAN_IN_MODE_TAG_ALWAYS 0x2
+
+#define A5PSW_VLAN_OUT_MODE 0x2C
+#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << ((port) * 2))
+#define A5PSW_VLAN_OUT_MODE_DIS 0x0
+#define A5PSW_VLAN_OUT_MODE_STRIP 0x1
+#define A5PSW_VLAN_OUT_MODE_TAG_THROUGH 0x2
+#define A5PSW_VLAN_OUT_MODE_TRANSPARENT 0x3
+
+#define A5PSW_VLAN_IN_MODE_ENA 0x30
+#define A5PSW_VLAN_TAG_ID 0x34
+
+#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_AUTH_PORT(port) (0x240 + 4 * (port))
+#define A5PSW_AUTH_PORT_AUTHORIZED BIT(0)
+
+#define A5PSW_VLAN_RES(entry) (0x280 + 4 * (entry))
+#define A5PSW_VLAN_RES_WR_PORTMASK BIT(30)
+#define A5PSW_VLAN_RES_WR_TAGMASK BIT(29)
+#define A5PSW_VLAN_RES_RD_TAGMASK BIT(28)
+#define A5PSW_VLAN_RES_ID GENMASK(16, 5)
+#define A5PSW_VLAN_RES_PORTMASK GENMASK(4, 0)
+
+#define A5PSW_RXMATCH_CONFIG(port) (0x3e80 + 4 * (port))
+#define A5PSW_RXMATCH_CONFIG_PATTERN(p) BIT(p)
+
+#define A5PSW_PATTERN_CTRL(p) (0x3eb0 + 4 * (p))
+#define A5PSW_PATTERN_CTRL_MGMTFWD BIT(1)
+
+#define A5PSW_LK_CTRL 0x400
+#define A5PSW_LK_ADDR_CTRL_BLOCKING BIT(0)
+#define A5PSW_LK_ADDR_CTRL_LEARNING BIT(1)
+#define A5PSW_LK_ADDR_CTRL_AGEING BIT(2)
+#define A5PSW_LK_ADDR_CTRL_ALLOW_MIGR BIT(3)
+#define A5PSW_LK_ADDR_CTRL_CLEAR_TABLE BIT(6)
+
+#define A5PSW_LK_ADDR_CTRL 0x408
+#define A5PSW_LK_ADDR_CTRL_BUSY BIT(31)
+#define A5PSW_LK_ADDR_CTRL_DELETE_PORT BIT(30)
+#define A5PSW_LK_ADDR_CTRL_CLEAR BIT(29)
+#define A5PSW_LK_ADDR_CTRL_LOOKUP BIT(28)
+#define A5PSW_LK_ADDR_CTRL_WAIT BIT(27)
+#define A5PSW_LK_ADDR_CTRL_READ BIT(26)
+#define A5PSW_LK_ADDR_CTRL_WRITE BIT(25)
+#define A5PSW_LK_ADDR_CTRL_ADDRESS GENMASK(12, 0)
+
+#define A5PSW_LK_DATA_LO 0x40C
+#define A5PSW_LK_DATA_HI 0x410
+#define A5PSW_LK_DATA_HI_VALID BIT(16)
+#define A5PSW_LK_DATA_HI_PORT BIT(16)
+
+#define A5PSW_LK_LEARNCOUNT 0x418
+#define A5PSW_LK_LEARNCOUNT_COUNT GENMASK(13, 0)
+#define A5PSW_LK_LEARNCOUNT_MODE GENMASK(31, 30)
+#define A5PSW_LK_LEARNCOUNT_MODE_SET 0x0
+#define A5PSW_LK_LEARNCOUNT_MODE_INC 0x1
+#define A5PSW_LK_LEARNCOUNT_MODE_DEC 0x2
+
+#define A5PSW_MGMT_TAG_CFG 0x480
+#define A5PSW_MGMT_TAG_CFG_TAGFIELD GENMASK(31, 16)
+#define A5PSW_MGMT_TAG_CFG_ALL_FRAMES BIT(1)
+#define A5PSW_MGMT_TAG_CFG_ENABLE BIT(0)
+
+#define A5PSW_LK_AGETIME 0x41C
+#define A5PSW_LK_AGETIME_MASK GENMASK(23, 0)
+
+#define A5PSW_MDIO_CFG_STATUS 0x700
+#define A5PSW_MDIO_CFG_STATUS_CLKDIV GENMASK(15, 7)
+#define A5PSW_MDIO_CFG_STATUS_READERR BIT(1)
+#define A5PSW_MDIO_CFG_STATUS_BUSY BIT(0)
+
+#define A5PSW_MDIO_COMMAND 0x704
+/* Register is named TRAININIT in datasheet and should be set when reading */
+#define A5PSW_MDIO_COMMAND_READ BIT(15)
+#define A5PSW_MDIO_COMMAND_PHY_ADDR GENMASK(9, 5)
+#define A5PSW_MDIO_COMMAND_REG_ADDR GENMASK(4, 0)
+
+#define A5PSW_MDIO_DATA 0x708
+#define A5PSW_MDIO_DATA_MASK GENMASK(15, 0)
+
+#define A5PSW_CMD_CFG(port) (0x808 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_CMD_CFG_CNTL_FRM_ENA BIT(23)
+#define A5PSW_CMD_CFG_SW_RESET BIT(13)
+#define A5PSW_CMD_CFG_TX_CRC_APPEND BIT(11)
+#define A5PSW_CMD_CFG_HD_ENA BIT(10)
+#define A5PSW_CMD_CFG_PAUSE_IGNORE BIT(8)
+#define A5PSW_CMD_CFG_CRC_FWD BIT(6)
+#define A5PSW_CMD_CFG_ETH_SPEED BIT(3)
+#define A5PSW_CMD_CFG_RX_ENA BIT(1)
+#define A5PSW_CMD_CFG_TX_ENA BIT(0)
+
+#define A5PSW_FRM_LENGTH(port) (0x814 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_FRM_LENGTH_MASK GENMASK(13, 0)
+
+#define A5PSW_STATUS(port) (0x840 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_STATS_HIWORD 0x900
+
+/* Stats */
+#define A5PSW_aFramesTransmittedOK 0x868
+#define A5PSW_aFramesReceivedOK 0x86C
+#define A5PSW_aFrameCheckSequenceErrors 0x870
+#define A5PSW_aAlignmentErrors 0x874
+#define A5PSW_aOctetsTransmittedOK 0x878
+#define A5PSW_aOctetsReceivedOK 0x87C
+#define A5PSW_aTxPAUSEMACCtrlFrames 0x880
+#define A5PSW_aRxPAUSEMACCtrlFrames 0x884
+/* If */
+#define A5PSW_ifInErrors 0x888
+#define A5PSW_ifOutErrors 0x88C
+#define A5PSW_ifInUcastPkts 0x890
+#define A5PSW_ifInMulticastPkts 0x894
+#define A5PSW_ifInBroadcastPkts 0x898
+#define A5PSW_ifOutDiscards 0x89C
+#define A5PSW_ifOutUcastPkts 0x8A0
+#define A5PSW_ifOutMulticastPkts 0x8A4
+#define A5PSW_ifOutBroadcastPkts 0x8A8
+/* Ether */
+#define A5PSW_etherStatsDropEvents 0x8AC
+#define A5PSW_etherStatsOctets 0x8B0
+#define A5PSW_etherStatsPkts 0x8B4
+#define A5PSW_etherStatsUndersizePkts 0x8B8
+#define A5PSW_etherStatsOversizePkts 0x8BC
+#define A5PSW_etherStatsPkts64Octets 0x8C0
+#define A5PSW_etherStatsPkts65to127Octets 0x8C4
+#define A5PSW_etherStatsPkts128to255Octets 0x8C8
+#define A5PSW_etherStatsPkts256to511Octets 0x8CC
+#define A5PSW_etherStatsPkts512to1023Octets 0x8D0
+#define A5PSW_etherStatsPkts1024to1518Octets 0x8D4
+#define A5PSW_etherStatsPkts1519toXOctets 0x8D8
+#define A5PSW_etherStatsJabbers 0x8DC
+#define A5PSW_etherStatsFragments 0x8E0
+
+#define A5PSW_VLANReceived 0x8E8
+#define A5PSW_VLANTransmitted 0x8EC
+
+#define A5PSW_aDeferred 0x910
+#define A5PSW_aMultipleCollisions 0x914
+#define A5PSW_aSingleCollisions 0x918
+#define A5PSW_aLateCollisions 0x91C
+#define A5PSW_aExcessiveCollisions 0x920
+#define A5PSW_aCarrierSenseErrors 0x924
+
+#define A5PSW_VLAN_TAG(prio, id) (((prio) << 12) | (id))
+#define A5PSW_PORTS_NUM 5
+#define A5PSW_CPU_PORT (A5PSW_PORTS_NUM - 1)
+#define A5PSW_MDIO_DEF_FREQ 2500000
+#define A5PSW_MDIO_TIMEOUT 100
+#define A5PSW_JUMBO_LEN (10 * SZ_1K)
+#define A5PSW_MDIO_CLK_DIV_MIN 5
+#define A5PSW_TAG_LEN 8
+#define A5PSW_VLAN_COUNT 32
+
+/* Ensure enough space for 2 VLAN tags */
+#define A5PSW_EXTRA_MTU_LEN (A5PSW_TAG_LEN + 8)
+#define A5PSW_MAX_MTU (A5PSW_JUMBO_LEN - A5PSW_EXTRA_MTU_LEN)
+
+#define A5PSW_PATTERN_MGMTFWD 0
+
+#define A5PSW_LK_BUSY_USEC_POLL 10
+#define A5PSW_CTRL_TIMEOUT 1000
+#define A5PSW_TABLE_ENTRIES 8192
+
+struct fdb_entry {
+ u8 mac[ETH_ALEN];
+ u16 valid:1;
+ u16 is_static:1;
+ u16 prio:3;
+ u16 port_mask:5;
+ u16 reserved:6;
+} __packed;
+
+union lk_data {
+ struct {
+ u32 lo;
+ u32 hi;
+ };
+ struct fdb_entry entry;
+};
+
+/**
+ * struct a5psw - switch struct
+ * @base: Base address of the switch
+ * @hclk: hclk_switch clock
+ * @clk: clk_switch clock
+ * @dev: Device associated to the switch
+ * @mii_bus: MDIO bus struct
+ * @mdio_freq: MDIO bus frequency requested
+ * @pcs: Array of PCS connected to the switch ports (not for the CPU)
+ * @ds: DSA switch struct
+ * @stats_lock: lock to access statistics (shared HI counter)
+ * @lk_lock: Lock for the lookup table
+ * @reg_lock: Lock for register read-modify-write operation
+ * @bridged_ports: Mask of ports that are bridged and should be flooded
+ * @br_dev: Bridge net device
+ */
+struct a5psw {
+ void __iomem *base;
+ struct clk *hclk;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *mii_bus;
+ struct phylink_pcs *pcs[A5PSW_PORTS_NUM - 1];
+ struct dsa_switch ds;
+ struct mutex lk_lock;
+ spinlock_t reg_lock;
+ u32 bridged_ports;
+ struct net_device *br_dev;
+};
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 0569ff066634..10c6fea1227f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,7 +93,7 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
return PTR_ERR(region);
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 72b6fc1932b5..b03d0d0c3dbf 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -2330,7 +2330,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
else
mode = MLO_AN_PHY;
- rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode);
+ rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode, NULL);
if (rc < 0)
goto out;
@@ -3382,12 +3382,28 @@ static const struct of_device_id sja1105_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+static const struct spi_device_id sja1105_spi_ids[] = {
+ { "sja1105e" },
+ { "sja1105t" },
+ { "sja1105p" },
+ { "sja1105q" },
+ { "sja1105r" },
+ { "sja1105s" },
+ { "sja1110a" },
+ { "sja1110b" },
+ { "sja1110c" },
+ { "sja1110d" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
+
static struct spi_driver sja1105_driver = {
.driver = {
.name = "sja1105",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids),
},
+ .id_table = sja1105_spi_ids,
.probe = sja1105_probe,
.remove = sja1105_remove,
.shutdown = sja1105_shutdown,
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 3110895358d8..97a92e6da60d 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -205,10 +205,20 @@ static const struct of_device_id vsc73xx_of_match[] = {
};
MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+static const struct spi_device_id vsc73xx_spi_ids[] = {
+ { "vsc7385" },
+ { "vsc7388" },
+ { "vsc7395" },
+ { "vsc7398" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids);
+
static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove,
.shutdown = vsc73xx_spi_shutdown,
+ .id_table = vsc73xx_spi_ids,
.driver = {
.name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 557ca8ff9dec..ca3e4700a813 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -225,7 +225,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
list_del(&slave->list);
queue->num_slaves--;
slave->dev->flags &= ~IFF_SLAVE;
- dev_put_track(slave->dev, &slave->dev_tracker);
+ netdev_put(slave->dev, &slave->dev_tracker);
kfree(slave);
}
@@ -399,7 +399,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
if (duplicate_slave)
eql_kill_one_slave(queue, duplicate_slave);
- dev_hold_track(slave->dev, &slave->dev_tracker, GFP_ATOMIC);
+ netdev_hold(slave->dev, &slave->dev_tracker, GFP_ATOMIC);
list_add(&slave->list, &queue->all_slaves);
queue->num_slaves++;
slave->dev->flags |= IFF_SLAVE;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 955abbc5490e..9a55c1d5a0a1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -84,6 +84,7 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
+source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 9eb01169957f..c06e75ed4231 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -97,6 +97,7 @@ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index fbf4588994ac..d19d1579c415 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -1106,7 +1106,7 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0, &rxmac->mif_ctrl);
writel(0, &rxmac->space_avail);
- /* Initialize the the mif_ctrl register
+ /* Initialize the mif_ctrl register
* bit 3: Receive code error. One or more nibbles were signaled as
* errors during the reception of the packet. Clear this
* bit in Gigabit, set it in 100Mbit. This was derived
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index b7d772f2dcbb..3c2e32fb7389 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -3,11 +3,12 @@
* Copyright (C) 2014 Altera Corporation. All rights reserved
*/
-#include <linux/kernel.h>
-
#ifndef __ALTERA_UTILS_H__
#define __ALTERA_UTILS_H__
+#include <linux/compiler.h>
+#include <linux/types.h>
+
void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 05ac8d9ccb2f..5d1baa01360f 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1830,9 +1830,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
dev->max_mtu = AMD8111E_MAX_MTU;
netif_napi_add_weight(dev, &lp->napi, amd8111e_rx_poll, 32);
-#if AMD8111E_VLAN_TAG_USED
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-#endif
/* Probe the external PHY */
amd8111e_probe_ext_phy(dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 895d35639129..c68ace804e37 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -230,7 +230,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
netif_dbg(pdata, drv, netdev,
- "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+ "cap=%d, en=%#x, mbc=%d, delay=%d\n",
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
/* Check PFC for supported number of traffic classes */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 4d46780fad13..f342bb853189 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1673,12 +1673,10 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
return ret;
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
- packet->header_len = skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
+ packet->header_len = skb_inner_tcp_all_headers(skb);
packet->tcp_header_len = inner_tcp_hdrlen(skb);
} else {
- packet->header_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ packet->header_len = skb_tcp_all_headers(skb);
packet->tcp_header_len = tcp_hdrlen(skb);
}
packet->tcp_payload_len = skb->len - packet->header_len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d9547552ceef..b875c430222e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -417,7 +417,7 @@ struct xgbe_rx_ring_data {
/* Structure used to hold information related to the descriptor
* and the packet associated with the descriptor (always use
- * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
+ * the XGBE_GET_DESC_DATA macro to access this data from the ring)
*/
struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e11cc29d3264..06508eebb585 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -265,12 +265,10 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, polling_timer);
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_isr(i, (void *)aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -1014,7 +1012,6 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
@@ -1064,11 +1061,11 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i;
- ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ if (!self->aq_vec[i])
+ break;
data += count;
- count = aq_vec_get_sw_stats(aq_vec, tc, data);
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
}
}
@@ -1382,7 +1379,6 @@ int aq_nic_set_loopback(struct aq_nic_s *self)
int aq_nic_stop(struct aq_nic_s *self)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
netif_tx_disable(self->ndev);
@@ -1400,9 +1396,8 @@ int aq_nic_stop(struct aq_nic_s *self)
aq_ptp_irq_free(self);
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_stop(aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_stop(self->aq_vec[i]);
aq_ptp_ring_stop(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 831833911a52..8647125d60ae 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -379,7 +379,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
-static int aq_suspend_common(struct device *dev, bool deep)
+static int aq_suspend_common(struct device *dev)
{
struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
@@ -392,17 +392,15 @@ static int aq_suspend_common(struct device *dev, bool deep)
if (netif_running(nic->ndev))
aq_nic_stop(nic);
- if (deep) {
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- aq_nic_set_power(nic);
- }
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
rtnl_unlock();
return 0;
}
-static int atl_resume_common(struct device *dev, bool deep)
+static int atl_resume_common(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct aq_nic_s *nic;
@@ -415,11 +413,6 @@ static int atl_resume_common(struct device *dev, bool deep)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (deep) {
- /* Reinitialize Nic/Vecs objects */
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- }
-
if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
@@ -444,22 +437,22 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev);
}
static int aq_pm_thaw(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev);
}
static int aq_pm_resume_restore(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev);
}
static const struct dev_pm_ops aq_pm_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
index b6119dcc3bb9..c2fda80fe1cc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
@@ -158,7 +158,7 @@ struct aq_mss_egress_class_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! 0: don't care and no LLC header exist.
@@ -422,7 +422,7 @@ struct aq_mss_ingress_preclass_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! Mask is per-byte.
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index cac509708e9d..e461f4764066 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -786,7 +786,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
return false;
}
-static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
{
struct ag71xx_ring *ring = &ag->tx_ring;
int sent = 0, bytes_compl = 0, n = 0;
@@ -825,7 +825,7 @@ static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
if (!skb)
continue;
- dev_kfree_skb_any(skb);
+ napi_consume_skb(skb, budget);
ring->buf[i].tx.skb = NULL;
bytes_compl += ring->buf[i].tx.len;
@@ -946,7 +946,7 @@ static unsigned int ag71xx_max_frame_len(unsigned int mtu)
return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
}
-static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
+static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
{
u32 t;
@@ -970,7 +970,7 @@ static void ag71xx_fast_reset(struct ag71xx *ag)
mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
- ag71xx_tx_packets(ag, true);
+ ag71xx_tx_packets(ag, true, 0);
reset_control_assert(ag->mac_reset);
usleep_range(10, 20);
@@ -1657,7 +1657,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pktlen;
- skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
+ skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
@@ -1703,7 +1703,7 @@ static int ag71xx_poll(struct napi_struct *napi, int limit)
int tx_done, rx_done;
u32 status;
- tx_done = ag71xx_tx_packets(ag, false);
+ tx_done = ag71xx_tx_packets(ag, false, limit);
netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
rx_done = ag71xx_rx_packets(ag, limit);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 49459397993e..be4b1f8eef29 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2072,7 +2072,7 @@ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
tpd_req = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb)) {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb))
tpd_req++;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -2107,7 +2107,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2132,7 +2132,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
*tpd = atl1c_get_tpd(adapter, queue);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2219,7 +2219,8 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
if (tso) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2733,8 +2734,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
atl1c_clean_rx, 64);
for (i = 0; i < adapter->tx_queue_count; ++i)
- netif_napi_add(netdev, &adapter->tpd_ring[i].napi,
- atl1c_clean_tx, 64);
+ netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
+ atl1c_clean_tx);
timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
@@ -2849,7 +2850,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 20681860a599..57a51fb7746c 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1609,8 +1609,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
if (skb_is_gso(skb)) {
if (skb->protocol == htons(ETH_P_IP) ||
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
- proto_hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb)) {
tpd_req += ((skb_headlen(skb) - proto_hdr_len +
MAX_TX_BUF_LEN - 1) >>
@@ -1645,7 +1644,7 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
netdev_warn(adapter->netdev,
@@ -1713,7 +1712,8 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (segment) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
@@ -2482,7 +2482,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 6a969969d221..ff1fe09abf9f 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2115,7 +2115,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
ntohs(iph->tot_len));
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->len == hdr_len) {
iph->check = 0;
tcp_hdr(skb)->check =
@@ -2206,7 +2206,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (retval) {
/* TSO */
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
@@ -2367,8 +2367,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
- proto_hdr_len = (skb_transport_offset(skb) +
- tcp_hdrlen(skb));
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (unlikely(proto_hdr_len > len)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 698438a2ee0f..1c6aea12db72 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -388,7 +388,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_buf_size, DMA_FROM_DEVICE);
priv->rx_buf[desc_idx] = NULL;
- skb = build_skb(buf, priv->rx_frag_size);
+ skb = napi_build_skb(buf, priv->rx_frag_size);
if (unlikely(!skb)) {
skb_free_frag(buf);
dev->stats.rx_dropped++;
@@ -423,7 +423,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
/*
* try to or force reclaim of transmitted buffers
*/
-static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
+static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget)
{
struct bcm_enet_priv *priv;
unsigned int bytes;
@@ -468,7 +468,7 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
dev->stats.tx_errors++;
bytes += skb->len;
- dev_kfree_skb(skb);
+ napi_consume_skb(skb, budget);
released++;
}
@@ -499,7 +499,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
ENETDMAC_IR, priv->tx_chan);
/* reclaim sent skb */
- bcm_enet_tx_reclaim(dev, 0);
+ bcm_enet_tx_reclaim(dev, 0, budget);
spin_lock(&priv->rx_lock);
rx_work_done = bcm_enet_receive_queue(dev, budget);
@@ -1211,7 +1211,7 @@ static int bcm_enet_stop(struct net_device *dev)
bcm_enet_disable_mac(priv);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -1935,7 +1935,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enet_driver = {
+static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
.remove = bcm_enet_remove,
.driver = {
@@ -2362,7 +2362,7 @@ static int bcm_enetsw_stop(struct net_device *dev)
bcm_enet_disable_dma(priv, priv->rx_chan);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -2756,7 +2756,7 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enetsw_driver = {
+static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
.remove = bcm_enetsw_remove,
.driver = {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 2dfc1e32bbb3..93580484a3f4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
}
slot->skb = skb;
- ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
+ ring->end += nr_frags + 1;
wmb();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5729a5ab059d..712b5595bc39 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3421,12 +3421,9 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Headers length */
if (xmit_type & XMIT_GSO_ENC)
- hlen = (int)(skb_inner_transport_header(skb) -
- skb->data) +
- inner_tcp_hdrlen(skb);
+ hlen = skb_inner_tcp_all_headers(skb);
else
- hlen = (int)(skb_transport_header(skb) -
- skb->data) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
/* Amount of data (w/o headers) on linear part of SKB*/
first_bd_sz = skb_headlen(skb) - hlen;
@@ -3534,15 +3531,13 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data;
+ return skb_inner_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_inner_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
}
/**
@@ -3568,12 +3563,12 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ return skb_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
+ return skb_transport_offset(skb) + sizeof(struct udphdr);
}
/* set FW indication according to inner or outer protocols if tunneled */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 7071604f9984..02808513ffe4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13844,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* Since some switches tend to reinit the AN process and clear the
- * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 56b46b8206a7..ba0f1ffac507 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -535,12 +535,9 @@ normal_tx:
u32 hdr_len;
if (skb->encapsulation)
- hdr_len = skb_inner_network_offset(skb) +
- skb_inner_network_header_len(skb) +
- inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
else
- hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
@@ -4480,7 +4477,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
}
}
if (irq_reinit) {
- kfree(bp->ntp_fltr_bmap);
+ bitmap_free(bp->ntp_fltr_bmap);
bp->ntp_fltr_bmap = NULL;
}
bp->ntp_fltr_count = 0;
@@ -4499,9 +4496,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
- sizeof(long),
- GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -7790,7 +7785,7 @@ hwrm_dbg_qcaps_exit:
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc;
@@ -10065,7 +10060,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
resc_reinit = true;
- if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
+ test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
fw_reset = true;
else
bnxt_remap_fw_health_regs(bp);
@@ -10658,7 +10654,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
- /* Flush rings and and disable interrupts */
+ /* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a1dca8c58f54..075c6206325c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2314,6 +2314,7 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 3528ce9849e6..059f96f7a96f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -20,6 +20,7 @@
#include "bnxt_ulp.h"
#include "bnxt_ptp.h"
#include "bnxt_coredump.h"
+#include "bnxt_nvm_defs.h"
static void __bnxt_fw_recover(struct bnxt *bp)
{
@@ -610,6 +611,63 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
return rc;
}
+static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ u32 datalen;
+ u16 index;
+ u8 *buf;
+
+ if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &datalen) || !datalen) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error");
+ return false;
+ }
+
+ buf = kzalloc(datalen, GFP_KERNEL);
+ if (!buf) {
+ NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test");
+ return false;
+ }
+
+ if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
+ goto err;
+ }
+
+ if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
+ BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
+ goto err;
+ }
+
+ return true;
+
+err:
+ kfree(buf);
+ return false;
+}
+
+static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ return id == DEVLINK_ATTR_SELFTEST_ID_FLASH;
+}
+
+static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl,
+ unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH)
+ return bnxt_nvm_test(bp, extack) ?
+ DEVLINK_SELFTEST_STATUS_PASS :
+ DEVLINK_SELFTEST_STATUS_FAIL;
+
+ return DEVLINK_SELFTEST_STATUS_SKIP;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -622,6 +680,8 @@ static const struct devlink_ops bnxt_dl_ops = {
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = bnxt_dl_reload_down,
.reload_up = bnxt_dl_reload_up,
+ .selftest_check = bnxt_dl_selftest_check,
+ .selftest_run = bnxt_dl_selftest_run,
};
static const struct devlink_ops bnxt_vf_dl_ops;
@@ -979,9 +1039,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
- if (rc)
- return rc;
+ if (BNXT_CHIP_P5(bp)) {
+ rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
+ if (rc)
+ return rc;
+ }
return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7191e5d74208..87eb5362ad70 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2176,14 +2176,14 @@ static void bnxt_print_admin_err(struct bnxt *bp)
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
-static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
- u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
- u32 dir_item_len, const u8 *data,
- size_t data_len)
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_write_input *req;
@@ -2836,8 +2836,8 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
return rc;
}
-static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
- u32 length, u8 *data)
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
@@ -2871,9 +2871,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
return rc;
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length)
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length)
{
struct hwrm_nvm_find_dir_entry_output *output;
struct hwrm_nvm_find_dir_entry_input *req;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index a59284215e78..a8ecef8ab82c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -58,5 +58,17 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len);
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 562f8f68a47d..7f3c0875b6f5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -76,14 +76,23 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 high_before, high_now, low;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return -EIO;
+ high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
ptp_read_system_prets(sts);
- *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
ptp_read_system_postts(sts);
- *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+ high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
+ if (high_now != high_before) {
+ ptp_read_system_prets(sts);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+ }
+ *ns = ((u64)high_now << 32) | low;
+
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ddf2f3963abe..730febd19330 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -307,7 +307,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return -EINVAL;
}
- if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ if (min_tx_rate > pf_link_speed) {
netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
min_tx_rate, vf_id);
return -EINVAL;
@@ -823,8 +823,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out2;
rc = pci_enable_sriov(bp->pdev, *num_vfs);
- if (rc)
+ if (rc) {
+ bnxt_ulp_sriov_cfg(bp, 0);
goto err_out2;
+ }
return 0;
@@ -832,6 +834,9 @@ err_out2:
/* Free the resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
+ /* Restore the max resources */
+ bnxt_hwrm_func_qcaps(bp);
+
err_out1:
bnxt_free_vf_resources(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f02fe906dedb..f53387ed0167 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -28,7 +28,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo;
- struct bnxt_sw_tx_bd *tx_buf, *first_buf;
+ struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd *txbd;
int num_frags = 0;
u32 flags;
@@ -43,13 +43,14 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
/* fill up the first buffer */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
- first_buf = tx_buf;
tx_buf->nr_frags = num_frags;
if (xdp)
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT);
+ flags = (len << TX_BD_LEN_SHIFT) |
+ ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -82,7 +83,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
flags = frag_len << TX_BD_LEN_SHIFT;
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
- txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
len = frag_len;
@@ -96,7 +96,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
prod = NEXT_TX(prod);
txr->tx_prod = prod;
- return first_buf;
+ return tx_buf;
}
static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f7f10cfb3476..e86503d97f32 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
+ id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
@@ -669,7 +669,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
- kfree(id_tbl->table);
+ bitmap_free(id_tbl->table);
id_tbl->table = NULL;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c888ddee1fc4..7ded559842e8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -393,6 +393,9 @@ int bcmgenet_mii_probe(struct net_device *dev)
if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_MAC_INTERRUPT;
+ /* Indicate that the MAC is responsible for PHY PM */
+ dev->phydev->mac_managed_pm = true;
+
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c28f8cc00d1c..db1e9d810b41 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7944,7 +7944,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
/* HW/FW can not correctly segment packets that have been
* vlan encapsulated.
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f6fe08df568b..29dd0f93d6c0 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2823,8 +2823,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
return -EINVAL;
}
- if (unlikely((gso_size + skb_transport_offset(skb) +
- tcp_hdrlen(skb)) >= skb->len)) {
+ if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
@@ -2872,8 +2871,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
if (unlikely(skb_headlen(skb) <
- skb_transport_offset(skb) +
- tcp_hdrlen(skb))) {
+ skb_tcp_all_headers(skb))) {
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 7ca077b65eaa..9c410f93a103 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -717,14 +717,15 @@
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
#define MACB_CAPS_MIIONRGMII 0x00000200
+#define MACB_CAPS_NEED_TSUCLK 0x00000400
+#define MACB_CAPS_PCS 0x01000000
+#define MACB_CAPS_HIGH_SPEED 0x02000000
#define MACB_CAPS_CLK_HW_CHG 0x04000000
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_PCS 0x01000000
-#define MACB_CAPS_HIGH_SPEED 0x02000000
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d89098f4ede8..66c7d08d376a 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2267,7 +2267,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* only queue eth + ip headers separately for UDP */
hdrlen = skb_transport_offset(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
if (skb_headlen(skb) < hdrlen) {
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
/* if this is required, would need to copy to single buffer */
@@ -3482,7 +3482,8 @@ static int gem_add_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
spin_lock_irqsave(&bp->rx_fs_lock, flags);
@@ -3535,8 +3536,8 @@ static int gem_del_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc),
- htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
gem_writel_n(bp, SCRT2, fs->location, 0);
@@ -4600,6 +4601,40 @@ static int fu540_c000_init(struct platform_device *pdev)
return macb_init(pdev);
}
+static int init_reset_optional(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Ensure PHY device used in SGMII mode is ready */
+ bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
+
+ if (IS_ERR(bp->sgmii_phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
+ "failed to get SGMII PHY\n");
+
+ ret = phy_init(bp->sgmii_phy);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to init SGMII PHY\n");
+ }
+
+ /* Fully reset controller at hardware level if mapped in device tree */
+ ret = device_reset_optional(&pdev->dev);
+ if (ret) {
+ phy_exit(bp->sgmii_phy);
+ return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
+ }
+
+ ret = macb_init(pdev);
+ if (ret)
+ phy_exit(bp->sgmii_phy);
+
+ return ret;
+}
+
static const struct macb_usrio_config sama7g5_usrio = {
.mii = 0,
.rmii = 1,
@@ -4626,8 +4661,8 @@ static const struct macb_config at91sam9260_config = {
};
static const struct macb_config sama5d3macb_config = {
- .caps = MACB_CAPS_SG_DISABLED
- | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_SG_DISABLED |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
.usrio = &macb_default_usrio,
@@ -4658,8 +4693,8 @@ static const struct macb_config sama5d29_config = {
};
static const struct macb_config sama5d3_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
- | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4689,55 +4724,13 @@ static const struct macb_config np4_config = {
.usrio = &macb_default_usrio,
};
-static int zynqmp_init(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct macb *bp = netdev_priv(dev);
- int ret;
-
- if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- /* Ensure PS-GTR PHY device used in SGMII mode is ready */
- bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
-
- if (IS_ERR(bp->sgmii_phy)) {
- ret = PTR_ERR(bp->sgmii_phy);
- dev_err_probe(&pdev->dev, ret,
- "failed to get PS-GTR PHY\n");
- return ret;
- }
-
- ret = phy_init(bp->sgmii_phy);
- if (ret) {
- dev_err(&pdev->dev, "failed to init PS-GTR PHY: %d\n",
- ret);
- return ret;
- }
- }
-
- /* Fully reset GEM controller at hardware level using zynqmp-reset driver,
- * if mapped in device tree.
- */
- ret = device_reset_optional(&pdev->dev);
- if (ret) {
- dev_err_probe(&pdev->dev, ret, "failed to reset controller");
- phy_exit(bp->sgmii_phy);
- return ret;
- }
-
- ret = macb_init(pdev);
- if (ret)
- phy_exit(bp->sgmii_phy);
-
- return ret;
-}
-
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
- .init = zynqmp_init,
+ .init = init_reset_optional,
.jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4751,6 +4744,17 @@ static const struct macb_config zynq_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config mpfs_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
MACB_CAPS_MIIONRGMII,
@@ -4769,8 +4773,17 @@ static const struct macb_config sama7g5_emac_config = {
.usrio = &sama7g5_usrio,
};
+static const struct macb_config versal_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
+};
+
static const struct of_device_id macb_dt_ids[] = {
- { .compatible = "cdns,at32ap7000-macb" },
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
{ .compatible = "cdns,macb" },
{ .compatible = "cdns,np4-macb", .data = &np4_config },
@@ -4784,11 +4797,15 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
- { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
- { .compatible = "cdns,zynq-gem", .data = &zynq_config },
+ { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
+ { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
+ { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
+ { .compatible = "xlnx,versal-gem", .data = &versal_config},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4796,8 +4813,8 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
static const struct macb_config default_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4913,8 +4930,8 @@ static int macb_probe(struct platform_device *pdev)
/* MTU range: 68 - 1500 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
- if (bp->caps & MACB_CAPS_JUMBO)
- dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+ if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+ dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
dev->max_mtu = ETH_DATA_LEN;
@@ -5198,7 +5215,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
if (!(device_may_wakeup(dev)))
macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
- else
+ else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
return 0;
@@ -5214,8 +5231,10 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
clk_prepare_enable(bp->rx_clk);
+ clk_prepare_enable(bp->tsu_clk);
+ } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
+ clk_prepare_enable(bp->tsu_clk);
}
- clk_prepare_enable(bp->tsu_clk);
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 9559c16078f9..e6cb20aaa76a 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -434,7 +434,7 @@ int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
return 0;
}
-static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
+static void gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
{
u32 reg_val;
@@ -444,8 +444,6 @@ static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE));
else
macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
-
- return 0;
}
int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -468,8 +466,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
case HWTSTAMP_TX_OFF:
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (gem_ptp_set_one_step_sync(bp, 1) != 0)
- return -ERANGE;
+ gem_ptp_set_one_step_sync(bp, 1);
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 4367edbdd579..06397cc8bb36 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1261,7 +1261,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
static int nicvf_tso_count_subdescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int sh_len = skb_tcp_all_headers(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
@@ -1382,7 +1382,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
hdr->tso = 1;
- hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr->tso_start = skb_tcp_all_headers(skb);
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 0321be77366c..e56eff701395 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: common.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index bf43da6c6a63..12639b688ddc 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: cphy.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index 5249686afe71..a30fb407115d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: cpl5_cmd.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -635,4 +626,3 @@ struct cpl_mss_change {
};
#endif /* _CXGB_CPL5_CMD_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index 81526ad36339..0427e894c277 100644
--- a/drivers/net/ethernet/chelsio/cxgb/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: elmer0.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -154,4 +145,3 @@ enum {
#define MI1_OP_INDIRECT_READ 3
#endif /* _CXGB_ELMER0_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 3e182eee799e..ef70569435be 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: espi.c *
@@ -7,16 +8,6 @@
* Ethernet SPI functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 162de5259df9..f588e9f3b37a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: espi.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index 5913eaf442b5..96077da1ed5e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: gmac.h *
@@ -7,16 +8,6 @@
* Generic MAC functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index 7ddb301bcba0..556c8ad68fa8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: mv88x201x.c *
@@ -7,16 +8,6 @@
* Marvell PHY (mv88x201x) functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 0bb37e4680c7..cbfa03d5663a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: pm3393.c *
@@ -7,16 +8,6 @@
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index 964ce59ee169..f751e680cf7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: regs.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 12e76fd0ae91..861edff5ed89 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: sge.c *
@@ -7,16 +8,6 @@
* DMA engine. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index 716705b96f26..f7e6f64040ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: sge.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 007c591b8bf5..367a9e4581d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: subr.c *
@@ -7,16 +8,6 @@
* Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index 7f79cc7ceb75..4c883170683b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: suni1x10gexp_regs.h *
@@ -7,16 +8,6 @@
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -1639,4 +1630,3 @@
#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002
#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 84604aff53ce..89256b866840 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
/*
* on rx, the iscsi pdu has to be < rx page size and the
- * the max rx data length programmed in TP
+ * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4a872f328fea..7d5204834ee2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -85,7 +85,7 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
if (err) {
dev_err(adap->pdev_dev,
- "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+ "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, err=%d\n",
dcb_ver_array[dcb->dcb_version], app.selector,
app.protocol, -err);
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 7d49fd4edc9e..14e0d989c3ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3429,18 +3429,18 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
unsigned long *t;
struct adapter *adap = filp->private_data;
- t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ t = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!t)
return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
if (err) {
- kfree(t);
+ bitmap_free(t);
return err;
}
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
- kfree(t);
+ bitmap_free(t);
return count;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 6c790af92170..77897edd2bc0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -2227,7 +2227,7 @@ void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
if (eth_filter_info) {
for (i = 0; i < adap->params.nports; i++) {
kvfree(eth_filter_info[i].loc_array);
- kfree(eth_filter_info[i].bmap);
+ bitmap_free(eth_filter_info[i].bmap);
}
kfree(eth_filter_info);
}
@@ -2270,9 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
goto free_eth_finfo;
}
- eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
- sizeof(unsigned long),
- GFP_KERNEL);
+ eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
goto free_eth_finfo;
@@ -2284,7 +2282,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
free_eth_finfo:
while (i-- > 0) {
- kfree(eth_filter->port[i].bmap);
+ bitmap_free(eth_filter->port[i].bmap);
kvfree(eth_filter->port[i].loc_array);
}
kfree(eth_filter_info);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0c78c0db8937..d0061921529f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5047,28 +5047,24 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* Allocate the memory for the vaious egress queue bitmaps
* ie starving_fl, txq_maperr and blocked_fl.
*/
- adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.starving_fl) {
ret = -ENOMEM;
goto bye;
}
- adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.txq_maperr) {
ret = -ENOMEM;
goto bye;
}
#ifdef CONFIG_DEBUG_FS
- adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.blocked_fl) {
ret = -ENOMEM;
goto bye;
}
- bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
#endif
params[0] = FW_PARAM_PFVF(CLIP_START);
@@ -5417,10 +5413,10 @@ bye:
adap_free_hma_mem(adap);
kfree(adap->sge.egr_map);
kfree(adap->sge.ingr_map);
- kfree(adap->sge.starving_fl);
- kfree(adap->sge.txq_maperr);
+ bitmap_free(adap->sge.starving_fl);
+ bitmap_free(adap->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adap->sge.blocked_fl);
+ bitmap_free(adap->sge.blocked_fl);
#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
@@ -5854,8 +5850,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
- sizeof(long), GFP_KERNEL);
+ adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL);
if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
@@ -5870,7 +5865,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
static void free_msix_info(struct adapter *adap)
{
- kfree(adap->msix_bmap.msix_bmap);
+ bitmap_free(adap->msix_bmap.msix_bmap);
kfree(adap->msix_info);
}
@@ -6189,10 +6184,10 @@ static void free_some_resources(struct adapter *adapter)
cxgb4_cleanup_ethtool_filters(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
- kfree(adapter->sge.starving_fl);
- kfree(adapter->sge.txq_maperr);
+ bitmap_free(adapter->sge.starving_fl);
+ bitmap_free(adapter->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adapter->sge.blocked_fl);
+ bitmap_free(adapter->sge.blocked_fl);
#endif
disable_msi(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index f889f404305c..ee52e3b1d74f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1531,7 +1531,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (cxgb4_is_ktls_skb(skb) &&
- (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
+ (skb->len - skb_tcp_all_headers(skb)))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 26433a62d7f0..fed5f93bf620 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd[3];
+ __be32 rsvd;
};
struct cpl_act_open_req {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 7de3800437c9..c2822e635f89 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2859,7 +2859,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
* address stored on the adapter
* @adapter: The adapter
*
- * Find the the port mask for the VF based on the index of mac
+ * Find the port mask for the VF based on the index of mac
* address stored in the adapter. If no mac address is stored on
* the adapter for the VF, use the port mask received from the
* firmware.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d546993bda09..1c52592d3b65 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -877,7 +877,7 @@ int t4vf_get_sge_params(struct adapter *adapter)
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
- * separately with the Padding Boundary in SGE_CONTROL and and Packing
+ * separately with the Padding Boundary in SGE_CONTROL and Packing
* Boundary in SGE_CONTROL2. So for T5 and later we need to grab
* SGE_CONTROL in order to determine how ingress packet data will be
* laid out in Packed Buffer Mode. Unfortunately, older versions of
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 60b648b46f75..da9973b711f4 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1012,7 +1012,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pktlen = skb_tcp_all_headers(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -1907,7 +1907,7 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
return 0;
th = tcp_hdr(nskb);
- skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
skb_tx_timestamp(nskb);
@@ -1932,20 +1932,26 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
struct sge_eth_txq *q;
struct adapter *adap;
unsigned long flags;
tcp_seq = ntohl(th->seq);
- skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_offset = skb_tcp_all_headers(skb);
skb_data_len = skb->len - skb_offset;
data_len = skb_data_len;
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != dev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't quit on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (unlikely(tls_netdev && tls_netdev != dev))
goto out;
tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 4af5561cbfc5..ddfe9208529a 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1236,8 +1236,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- sock_net(newsk)->
- ipv4.sysctl_tcp_window_scaling,
+ READ_ONCE(sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1384,7 +1384,7 @@ static void chtls_pass_accept_request(struct sock *sk,
#endif
}
if (req->tcpopt.wsf <= 14 &&
- sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
@@ -1392,7 +1392,7 @@ static void chtls_pass_accept_request(struct sock *sk,
th_ecn = tcph->ece && tcph->cwr;
if (th_ecn) {
ect = !INET_ECN_is_not_ect(ip_dsfield);
- ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+ ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
inet_rsk(oreq)->ecn_ok = 1;
}
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index d6dd1b4edf6e..462c5435a206 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _CQ_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index ac37cacc6136..d25426470a29 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _CQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 52aaf1bb5205..a0964b629ffc 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ENIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
index 3bdc74fba1e3..e3b700c28bc4 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.c
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2013 Cisco Systems, Inc. All rights reserved.
#include <linux/netdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
index 6b9f9255af28..e01790fb0415 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.h
+++ b/drivers/net/ethernet/cisco/enic/enic_api.h
@@ -1,20 +1,5 @@
-/**
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */
#ifndef __ENIC_API_H__
#define __ENIC_API_H__
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index f8d2a6a34282..2cbae7c6cc3d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2011 Cisco Systems, Inc. All rights reserved.
#include <linux/pci.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index f5bb058b3f96..698d0cb02064 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2011 Cisco Systems, Inc. All rights reserved. */
#ifndef _ENIC_DEV_H_
#define _ENIC_DEV_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 6c11f9d62526..60d8c0fbc037 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2013 Cisco Systems, Inc. All rights reserved.
#include <linux/netdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 1c81b161de52..372fb7b3a282 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -680,11 +680,10 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
skb_frag_t *frag;
if (skb->encapsulation) {
- hdr_len = skb_inner_transport_header(skb) - skb->data;
- hdr_len += inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
} else {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 80f46dbd5117..4720a952725d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2011 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.h b/drivers/net/ethernet/cisco/enic/enic_pp.h
index a09ff392c1c6..20a2687713ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.h
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2011 Cisco Systems, Inc. All rights reserved. */
#ifndef _ENIC_PP_H_
#define _ENIC_PP_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 40b20817ddd5..1c48aebdbab0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 81f98a8b60e9..b8ee42d297aa 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ENIC_RES_H_
diff --git a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
index e6dd30988d6f..0ab5fd6b8d46 100644
--- a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _RQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 519323460f26..27c885e91552 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 4e6aa65857f7..eed5bf59e5d2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_CQ_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 45015931b335..12a83fa1302d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 714fc1ed79e3..6273794b923b 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_DEV_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index fcc4a3ccdd94..db56d778877a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_DEVCMD_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 7d6fbb5635a4..5acc236069de 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_ENIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 23604e3d4455..25319f072a04 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.h b/drivers/net/ethernet/cisco/enic/vnic_intr.h
index 2b1636392294..33a72aa10b26 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_INTR_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h
index 84ff8ca17fcb..04fee45b5d39 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_nic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_NIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
index 4e45f88ac1d4..b4776e334d63 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_resource.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_RESOURCE_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index a3e7b003ada1..5ae80551f17c 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
@@ -216,4 +203,3 @@ void vnic_rq_clean(struct vnic_rq *rq,
vnic_dev_clear_desc_ring(&rq->ring);
}
-
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0413103ebe94..0bc595abc03b 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_RQ_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h
index 881fa18542b3..4dcf0e61cb13 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rss.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RSS_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
index 74c81ed6fdab..2dd04322d760 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_stats.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_STATS_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 24ef8cd40545..20fcb20b42ed 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2010 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2010 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.h b/drivers/net/ethernet/cisco/enic/vnic_vic.h
index 057776908828..b51c1c52f8bf 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2010 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2010 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_VIC_H_
#define _VNIC_VIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index eb75891974df..29c7900349b2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 01209613d57d..75c526911074 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_WQ_H_
diff --git a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
index c7021e3a631f..425e46a804ee 100644
--- a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _WQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 528eb0f223b1..b4f5e57d0285 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -2287,7 +2287,7 @@ err:
/* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data)
+ u8 page_num, u32 off, u32 len, u8 *data)
{
struct be_dma_mem cmd;
struct be_mcc_wrb *wrb;
@@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
req->port = cpu_to_le32(adapter->hba_port_num);
req->page_num = cpu_to_le32(page_num);
status = be_mcc_notify_wait(adapter);
- if (!status) {
+ if (!status && len > 0) {
struct be_cmd_resp_port_type *resp = cmd.va;
- memcpy(data, resp->page_data, PAGE_DATA_LEN);
+ memcpy(data, resp->page_data + off, len);
}
err:
mutex_unlock(&adapter->mcc_lock);
@@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
switch (adapter->phy.interface_type) {
case PHY_TYPE_QSFP:
@@ -2440,7 +2440,7 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
strlcpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db1f3b908582..e2085c68c0ee 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
u32 *state);
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data);
+ u8 page_num, u32 off, u32 len, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_query_sfp_info(struct be_adapter *adapter);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index dfa784339781..bd0df189d871 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1344,7 +1344,7 @@ static int be_get_module_info(struct net_device *netdev,
return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -1362,25 +1362,32 @@ static int be_get_module_eeprom(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
+ u32 begin, end;
if (!check_privilege(adapter, MAX_PRIVILEGES))
return -EOPNOTSUPP;
- status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- data);
- if (status)
- goto err;
+ begin = eeprom->offset;
+ end = eeprom->offset + eeprom->len;
+
+ if (begin < PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
+ min_t(u32, end, PAGE_DATA_LEN) - begin,
+ data);
+ if (status)
+ goto err;
+
+ data += PAGE_DATA_LEN - begin;
+ begin = PAGE_DATA_LEN;
+ }
- if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
- status = be_cmd_read_port_transceiver_data(adapter,
- TR_PAGE_A2,
- data +
- PAGE_DATA_LEN);
+ if (end > PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
+ begin - PAGE_DATA_LEN,
+ end - begin, data);
if (status)
goto err;
}
- if (eeprom->offset)
- memcpy(data, data + eeprom->offset, eeprom->len);
err:
return be_cmd_status(status);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index cd4e243da5fa..414362febbb9 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -737,9 +737,9 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
static int be_gso_hdr_len(struct sk_buff *skb)
{
if (skb->encapsulation)
- return skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -3178,7 +3178,7 @@ static irqreturn_t be_intx(int irq, void *dev)
}
be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
- /* Return IRQ_HANDLED only for the the first spurious intr
+ /* Return IRQ_HANDLED only for the first spurious intr
* after a valid intr to stop the kernel from branding
* this irq as a bad one!
*/
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index cb069a0af7b9..a5f7152a1716 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -340,14 +340,14 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
return 0;
}
-static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
+static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
int i;
for (i = 0; i < count; i++) {
- entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
+ entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
if (entry->len) {
if (i == 0)
@@ -395,7 +395,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
retval = tsnep_tx_map(skb, tx, count);
if (retval != 0) {
- tsnep_tx_unmap(tx, count);
+ tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
@@ -464,7 +464,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
if (skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
- tsnep_tx_unmap(tx, count);
+ tsnep_tx_unmap(tx, tx->read, count);
if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
@@ -1282,7 +1282,7 @@ MODULE_DEVICE_TABLE(of, tsnep_of_match);
static struct platform_driver tsnep_driver = {
.driver = {
.name = TSNEP,
- .of_match_table = of_match_ptr(tsnep_of_match),
+ .of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
.remove = tsnep_remove,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 5231818943c6..c03663785a8d 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1764,6 +1764,19 @@ cleanup_clk:
return rc;
}
+static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
+{
+ struct device_node *child_np = of_get_child_by_name(np, name);
+ bool ret = false;
+
+ if (child_np) {
+ ret = true;
+ of_node_put(child_np);
+ }
+
+ return ret;
+}
+
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -1883,7 +1896,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* Display what we found */
phy_attached_info(phy);
- } else if (np && !of_get_child_by_name(np, "mdio")) {
+ } else if (np && !ftgmac100_has_child_node(np, "mdio")) {
/* Support legacy ASPEED devicetree descriptions that decribe a
* MAC with an embedded MDIO controller but have no "mdio"
* child node. Automatically scan the MDIO bus for available
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index cd9ec80522e7..75d51572693d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1660,8 +1660,8 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
buf_array[i] = addr;
/* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
+ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, priv->rx_buf_size,
bpid);
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a90275143d87..e8e2aa1e7f01 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -691,7 +691,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
void *bufaddr;
unsigned long dmabuf;
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 5ddb769bdfb4..a7f4c3c29f3e 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -924,7 +924,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(&op->dev) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
prop = of_get_property(np, "current-speed", &prop_size);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index f85b5e81dfc1..95f778cce98c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -100,8 +100,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
dev_set_drvdata(dev, bus);
/* set MII speed */
- out_be32(&priv->regs->mii_speed,
- ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
+ out_be32(&priv->regs->mii_speed, ((mpc5xxx_get_bus_frequency(dev) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7d49c28215f3..3dc3c0b626c2 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -135,11 +135,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 5ff2634bee2f..cb419aef8d1b 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -201,7 +201,7 @@ void fs_enet_platform_cleanup(void);
/* access macros */
#if defined(CONFIG_CPM1)
-/* for a a CPM1 __raw_xxx's are sufficient */
+/* for a CPM1 __raw_xxx's are sufficient */
#define __cbd_out32(addr, x) __raw_writel(x, addr)
#define __cbd_out16(addr, x) __raw_writew(x, addr)
#define __cbd_in32(addr) __raw_readl(addr)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 152f4d83765a..d37d7a19a759 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -102,7 +102,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
- int (*get_bus_freq)(struct device_node *);
+ int (*get_bus_freq)(struct device *);
int ret = -ENOMEM, clock, speed;
match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
@@ -136,7 +136,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
}
if (get_bus_freq) {
- clock = get_bus_freq(ofdev->dev.of_node);
+ clock = get_bus_freq(&ofdev->dev);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3dc9369a33f7..e7bf1524b68e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1944,6 +1944,7 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
+ skb_tx_timestamp(skb);
netdev_tx_sent_queue(txq, bytes_sent);
gfar_wmb();
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 9a2c16d69e2c..81fb68730138 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1457,6 +1457,7 @@ static int gfar_get_ts_info(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
return 0;
}
@@ -1474,7 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/fungible/funcore/fun_hci.h b/drivers/net/ethernet/fungible/funcore/fun_hci.h
index 257203e94b68..f21819670106 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_hci.h
+++ b/drivers/net/ethernet/fungible/funcore/fun_hci.h
@@ -442,6 +442,7 @@ enum fun_port_lane_attr {
};
enum fun_admin_port_subop {
+ FUN_ADMIN_PORT_SUBOP_XCVR_READ = 0x23,
FUN_ADMIN_PORT_SUBOP_INETADDR_EVENT = 0x24,
};
@@ -595,6 +596,19 @@ struct fun_admin_port_req {
struct fun_admin_read48_req read48[];
} read;
+ struct fun_admin_port_xcvr_read_req {
+ u8 subop;
+ u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+ } xcvr_read;
struct fun_admin_port_inetaddr_event_req {
__u8 subop;
__u8 rsvd0;
@@ -625,6 +639,15 @@ struct fun_admin_port_req {
.id = cpu_to_be32(_id), \
}
+#define FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(_flags, _id, _bank, _page, \
+ _offset, _length, _dev_addr) \
+ ((struct fun_admin_port_xcvr_read_req) { \
+ .subop = FUN_ADMIN_PORT_SUBOP_XCVR_READ, \
+ .flags = cpu_to_be16(_flags), .id = cpu_to_be32(_id), \
+ .bank = (_bank), .page = (_page), .offset = (_offset), \
+ .length = (_length), .dev_addr = (_dev_addr), \
+ })
+
struct fun_admin_port_rsp {
struct fun_admin_rsp_common common;
@@ -659,6 +682,23 @@ struct fun_admin_port_rsp {
} u;
};
+struct fun_admin_port_xcvr_read_rsp {
+ struct fun_admin_rsp_common common;
+
+ u8 subop;
+ u8 rsvd0[3];
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+
+ u8 data[128];
+};
+
enum fun_xcvr_type {
FUN_XCVR_BASET = 0x0,
FUN_XCVR_CU = 0x1,
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index d081168c95fa..31aa185f4d17 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -78,6 +78,7 @@ static const char * const txq_stat_names[] = {
"tx_cso",
"tx_tso",
"tx_encapsulated_tso",
+ "tx_uso",
"tx_more",
"tx_queue_stops",
"tx_queue_restarts",
@@ -778,6 +779,7 @@ static void fun_get_ethtool_stats(struct net_device *netdev,
ADD_STAT(txs.tx_cso);
ADD_STAT(txs.tx_tso);
ADD_STAT(txs.tx_encap_tso);
+ ADD_STAT(txs.tx_uso);
ADD_STAT(txs.tx_more);
ADD_STAT(txs.tx_nstops);
ADD_STAT(txs.tx_nrestarts);
@@ -1116,6 +1118,39 @@ static int fun_set_fecparam(struct net_device *netdev,
return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
}
+static int fun_get_port_module_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *req,
+ struct netlink_ext_ack *extack)
+{
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_xcvr_read_rsp rsp;
+ } cmd;
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Specified port is virtual, only physical ports have modules");
+ return -EOPNOTSUPP;
+ }
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ sizeof(cmd.req));
+ cmd.req.u.xcvr_read =
+ FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(0, netdev->dev_port,
+ req->bank, req->page,
+ req->offset, req->length,
+ req->i2c_address);
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ memcpy(req->data, cmd.rsp.data, req->length);
+ return req->length;
+}
+
static const struct ethtool_ops fun_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1154,6 +1189,7 @@ static const struct ethtool_ops fun_ethtool_ops = {
.get_eth_mac_stats = fun_get_802_3_stats,
.get_eth_ctrl_stats = fun_get_802_3_ctrl_stats,
.get_rmon_stats = fun_get_rmon_stats,
+ .get_module_eeprom_by_page = fun_get_port_module_page,
};
void fun_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index 9485cf699c5d..f247b7ad3a88 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -1357,7 +1357,8 @@ static const struct net_device_ops fun_netdev_ops = {
#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
-#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
+ NETIF_F_GSO_UDP_L4)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_rx.c b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
index 0f6a549b9f67..29a6c2ede43a 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_rx.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
@@ -142,6 +142,7 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
int ref_ok, struct funeth_txq *xdp_q)
{
struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
struct xdp_buff xdp;
u32 act;
@@ -163,7 +164,9 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
case XDP_TX:
if (unlikely(!ref_ok))
goto pass;
- if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
+
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))
goto xdp_error;
FUN_QSTAT_INC(q, xdp_tx);
q->xdp_flush |= FUN_XDP_FLUSH_TX;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
index ff6e29237253..706d81e39a54 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_tx.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
@@ -16,23 +16,24 @@
#define FUN_XDP_CLEAN_BATCH 16
/* DMA-map a packet and return the (length, DMA_address) pairs for its
- * segments. If a mapping error occurs -ENOMEM is returned.
+ * segments. If a mapping error occurs -ENOMEM is returned. The packet
+ * consists of an skb_shared_info and one additional address/length pair.
*/
-static int map_skb(const struct sk_buff *skb, struct device *dev,
- dma_addr_t *addr, unsigned int *len)
+static int fun_map_pkt(struct device *dev, const struct skb_shared_info *si,
+ void *data, unsigned int data_len,
+ dma_addr_t *addr, unsigned int *len)
{
- const struct skb_shared_info *si;
const skb_frag_t *fp, *end;
- *len = skb_headlen(skb);
- *addr = dma_map_single(dev, skb->data, *len, DMA_TO_DEVICE);
+ *len = data_len;
+ *addr = dma_map_single(dev, data, *len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
return -ENOMEM;
- si = skb_shinfo(skb);
- end = &si->frags[si->nr_frags];
+ if (!si)
+ return 0;
- for (fp = si->frags; fp < end; fp++) {
+ for (fp = si->frags, end = fp + si->nr_frags; fp < end; fp++) {
*++len = skb_frag_size(fp);
*++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
@@ -44,7 +45,7 @@ unwind:
while (fp-- > si->frags)
dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
- dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_single(dev, addr[-1], data_len, DMA_TO_DEVICE);
return -ENOMEM;
}
@@ -71,6 +72,33 @@ static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
}
+/* Write a gather list to the Tx descriptor at @req from @ngle address/length
+ * pairs.
+ */
+static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
+ struct fun_eth_tx_req *req,
+ const dma_addr_t *addrs,
+ const unsigned int *lens,
+ unsigned int ngle)
+{
+ struct fun_dataop_gl *gle;
+ unsigned int i;
+
+ req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
+
+ for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
+ i < ngle && txq_to_end(q, gle); i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+
+ if (txq_to_end(q, gle) == 0) {
+ gle = (struct fun_dataop_gl *)q->desc;
+ for ( ; i < ngle; i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+ }
+
+ return gle;
+}
+
static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
{
return *(__be16 *)&tcp_flag_word(th);
@@ -83,7 +111,7 @@ static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
const struct fun_ktls_tx_ctx *tls_ctx;
u32 datalen, seq;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
if (!datalen)
return skb;
@@ -129,10 +157,13 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
struct fun_eth_tx_req *req;
struct fun_dataop_gl *gle;
const struct tcphdr *th;
- unsigned int ngle, i;
+ unsigned int l4_hlen;
+ unsigned int ngle;
u16 flags;
- if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
+ shinfo = skb_shinfo(skb);
+ if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data,
+ skb_headlen(skb), addrs, lens))) {
FUN_QSTAT_INC(q, tx_map_err);
return 0;
}
@@ -145,7 +176,6 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
req->repr_idn = 0;
req->encap_proto = 0;
- shinfo = skb_shinfo(skb);
if (likely(shinfo->gso_size)) {
if (skb->encapsulation) {
u16 ol4_ofst;
@@ -178,6 +208,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
FUN_ETH_UPDATE_INNER_L3_LEN;
}
th = inner_tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
fun_eth_offload_init(&req->offload, flags,
shinfo->gso_size,
tcp_hdr_doff_flags(th), 0,
@@ -185,6 +216,24 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
skb_inner_transport_offset(skb),
skb_network_offset(skb), ol4_ofst);
FUN_QSTAT_INC(q, tx_encap_tso);
+ } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+ flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_INNER_L4_LEN |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+
+ if (ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+ else
+ flags |= FUN_ETH_INNER_IPV6;
+
+ l4_hlen = sizeof(struct udphdr);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ cpu_to_be16(l4_hlen << 10), 0,
+ skb_network_offset(skb),
+ skb_transport_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_uso);
} else {
/* HW considers one set of headers as inner */
flags = FUN_ETH_INNER_LSO |
@@ -195,6 +244,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
else
flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
th = tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
fun_eth_offload_init(&req->offload, flags,
shinfo->gso_size,
tcp_hdr_doff_flags(th), 0,
@@ -209,7 +259,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
extra_pkts = shinfo->gso_segs - 1;
extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
- __tcp_hdrlen(th)) * extra_pkts;
+ l4_hlen) * extra_pkts;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
if (skb->csum_offset == offsetof(struct udphdr, check))
@@ -222,18 +272,9 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
}
ngle = shinfo->nr_frags + 1;
- req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
- for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
- i < ngle && txq_to_end(q, gle); i++, gle++)
- fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
-
- if (txq_to_end(q, gle) == 0) {
- gle = (struct fun_dataop_gl *)q->desc;
- for ( ; i < ngle; i++, gle++)
- fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
- }
+ gle = fun_write_gl(q, req, addrs, lens, ngle);
if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
@@ -350,7 +391,7 @@ static u16 txq_hw_head(const struct funeth_txq *q)
/* Unmap the Tx packet starting at the given descriptor index and
* return the number of Tx descriptors it occupied.
*/
-static unsigned int unmap_skb(const struct funeth_txq *q, unsigned int idx)
+static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
{
const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
unsigned int ngle = req->dataop.ngather;
@@ -398,7 +439,7 @@ static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
rmb();
do {
- unsigned int pkt_desc = unmap_skb(q, reclaim_idx);
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
struct sk_buff *skb = q->info[reclaim_idx].skb;
trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
@@ -440,20 +481,10 @@ int fun_txq_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
-static void fun_xdp_unmap(const struct funeth_txq *q, unsigned int idx)
-{
- const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
- const struct fun_dataop_gl *gle;
-
- gle = (const struct fun_dataop_gl *)req->dataop.imm;
- dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
- be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
-}
-
-/* Reclaim up to @budget completed Tx descriptors from a TX XDP queue. */
+/* Reclaim up to @budget completed Tx packets from a TX XDP queue. */
static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
{
- unsigned int npkts = 0, head, reclaim_idx;
+ unsigned int npkts = 0, ndesc = 0, head, reclaim_idx;
for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
@@ -465,37 +496,49 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
rmb();
do {
- fun_xdp_unmap(q, reclaim_idx);
- page_frag_free(q->info[reclaim_idx].vaddr);
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
- trace_funeth_tx_free(q, reclaim_idx, 1, head);
+ xdp_return_frame(q->info[reclaim_idx].xdpf);
+
+ trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
- reclaim_idx = (reclaim_idx + 1) & q->mask;
+ reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
+ ndesc += pkt_desc;
npkts++;
} while (reclaim_idx != head && npkts < budget);
}
- q->cons_cnt += npkts;
+ q->cons_cnt += ndesc;
return npkts;
}
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
{
+ unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len;
+ const struct skb_shared_info *si = NULL;
+ unsigned int lens[MAX_SKB_FRAGS + 1];
+ dma_addr_t dma[MAX_SKB_FRAGS + 1];
struct fun_eth_tx_req *req;
- struct fun_dataop_gl *gle;
- unsigned int idx;
- dma_addr_t dma;
if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
- if (!unlikely(fun_txq_avail(q))) {
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ si = xdp_get_shared_info_from_frame(xdpf);
+ tot_len = xdp_get_frame_len(xdpf);
+ nfrags += si->nr_frags;
+ ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags *
+ sizeof(struct fun_dataop_gl)),
+ FUNETH_SQE_SIZE);
+ }
+
+ if (unlikely(fun_txq_avail(q) < ndesc)) {
FUN_QSTAT_INC(q, tx_xdp_full);
return false;
}
- dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
+ if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma,
+ lens))) {
FUN_QSTAT_INC(q, tx_map_err);
return false;
}
@@ -503,26 +546,25 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
idx = q->prod_cnt & q->mask;
req = fun_tx_desc_addr(q, idx);
req->op = FUN_ETH_OP_TX;
- req->len8 = (sizeof(*req) + sizeof(*gle)) / 8;
+ req->len8 = 0;
req->flags = 0;
req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
req->repr_idn = 0;
req->encap_proto = 0;
fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
- req->dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
+ req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len);
- gle = (struct fun_dataop_gl *)req->dataop.imm;
- fun_dataop_gl_init(gle, 0, 0, len, dma);
+ fun_write_gl(q, req, dma, lens, nfrags);
- q->info[idx].vaddr = data;
+ q->info[idx].xdpf = xdpf;
u64_stats_update_begin(&q->syncp);
- q->stats.tx_bytes += len;
+ q->stats.tx_bytes += tot_len;
q->stats.tx_pkts++;
u64_stats_update_end(&q->syncp);
- trace_funeth_tx(q, len, idx, 1);
- q->prod_cnt++;
+ trace_funeth_tx(q, tot_len, idx, nfrags);
+ q->prod_cnt += ndesc;
return true;
}
@@ -545,12 +587,9 @@ int fun_xdp_xmit_frames(struct net_device *dev, int n,
if (unlikely(q_idx >= fp->num_xdpqs))
return -ENXIO;
- for (q = xdpqs[q_idx], i = 0; i < n; i++) {
- const struct xdp_frame *xdpf = frames[i];
-
- if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
+ for (q = xdpqs[q_idx], i = 0; i < n; i++)
+ if (!fun_xdp_tx(q, frames[i]))
break;
- }
if (unlikely(flags & XDP_XMIT_FLUSH))
fun_txq_wr_db(q);
@@ -565,7 +604,7 @@ static void fun_txq_purge(struct funeth_txq *q)
while (q->cons_cnt != q->prod_cnt) {
unsigned int idx = q->cons_cnt & q->mask;
- q->cons_cnt += unmap_skb(q, idx);
+ q->cons_cnt += fun_unmap_pkt(q, idx);
dev_kfree_skb_any(q->info[idx].skb);
}
netdev_tx_reset_queue(q->ndq);
@@ -576,9 +615,8 @@ static void fun_xdpq_purge(struct funeth_txq *q)
while (q->cons_cnt != q->prod_cnt) {
unsigned int idx = q->cons_cnt & q->mask;
- fun_xdp_unmap(q, idx);
- page_frag_free(q->info[idx].vaddr);
- q->cons_cnt++;
+ q->cons_cnt += fun_unmap_pkt(q, idx);
+ xdp_return_frame(q->info[idx].xdpf);
}
}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 04c9f91b7489..53b7e95213a8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -82,6 +82,7 @@ struct funeth_txq_stats { /* per Tx queue SW counters */
u64 tx_cso; /* # of packets with checksum offload */
u64 tx_tso; /* # of non-encapsulated TSO super-packets */
u64 tx_encap_tso; /* # of encapsulated TSO super-packets */
+ u64 tx_uso; /* # of non-encapsulated UDP LSO super-packets */
u64 tx_more; /* # of DBs elided due to xmit_more */
u64 tx_nstops; /* # of times the queue has stopped */
u64 tx_nrestarts; /* # of times the queue has restarted */
@@ -95,8 +96,8 @@ struct funeth_txq_stats { /* per Tx queue SW counters */
struct funeth_tx_info { /* per Tx descriptor state */
union {
- struct sk_buff *skb; /* associated packet */
- void *vaddr; /* start address for XDP */
+ struct sk_buff *skb; /* associated packet (sk_buff path) */
+ struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */
};
};
@@ -245,7 +246,7 @@ static inline int fun_irq_node(const struct fun_irq *p)
int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
int fun_txq_napi_poll(struct napi_struct *napi, int budget);
netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len);
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
int fun_xdp_xmit_frames(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index ec394d991668..588d64819ed5 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -386,7 +386,7 @@ static int gve_prep_tso(struct sk_buff *skb)
(__force __wsum)htonl(paylen));
/* Compute length of segmentation header. */
- header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ header_len = skb_tcp_all_headers(skb);
break;
default:
return -EINVAL;
@@ -598,9 +598,9 @@ static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
*/
static bool gve_can_send_tso(const struct sk_buff *skb)
{
- const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
int cur_seg_size;
@@ -795,7 +795,7 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
/* No outstanding miss completion but packet allocated
* implies packet receives a re-injection completion
- * without a a prior miss completion. Return without
+ * without a prior miss completion. Return without
* completing the packet.
*/
net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 2f0bd21a9082..d94cc8c6681f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -31,8 +31,6 @@
#define HNS_BUFFER_SIZE_2048 2048
#define BD_MAX_SEND_SIZE 8191
-#define SKB_TMP_LEN(SKB) \
- (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
int send_sz, dma_addr_t dma, int frag_end,
@@ -94,7 +92,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
@@ -108,7 +106,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
}
desc->tx.ip_offset = ip_offset;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index ae56306400b8..35d70041b9e8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1838,9 +1838,9 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
{
if (!skb->encapsulation)
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_tcp_all_headers(skb);
- return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
}
/* HW need every continuous max_non_tso_bd_num buffer data to be larger
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
index 5153e5d41bbd..b8a1ecb4b8fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -37,8 +37,7 @@ DECLARE_EVENT_CLASS(hns3_skb_template,
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
__entry->gso_type = skb_shinfo(skb)->gso_type;
__entry->hdr_len = skb->encapsulation ?
- skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
- skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_inner_tcp_all_headers(skb) : skb_tcp_all_headers(skb);
__entry->ip_summed = skb->ip_summed;
__entry->fraglist = skb_has_frag_list(skb);
hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5eaf09ea4009..26f87330173e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -979,7 +979,7 @@ static int hclgevf_update_mac_list(struct hnae3_handle *handle,
/* if the mac addr is already in the mac list, no need to add a new
* one into it, just check the mac addr state, convert it to a new
- * new state, or just remove it, or do nothing.
+ * state, or just remove it, or do nothing.
*/
mac_node = hclgevf_find_mac_node(list, addr);
if (mac_node) {
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 07fdab58001d..c2ae1b4f9a5f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -174,7 +174,7 @@ static int hns_mdio_wait_ready(struct mii_bus *bus)
u32 cmd_reg_value;
int i;
- /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+ /* waiting for MDIO_COMMAND_REG's mdio_start==0 */
/* after that can do read or write*/
for (i = 0; i < MDIO_TIMEOUT; i++) {
cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
@@ -319,7 +319,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
MDIO_C45_READ, phy_id, devad);
}
- /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* Step 5: waiting for MDIO_COMMAND_REG's mdio_start==0,*/
/* check for read or write opt is finished */
ret = hns_mdio_wait_ready(bus);
if (ret) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index fb3e89141a0d..a4fbf44f944c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -95,9 +95,6 @@ struct hinic_dev {
u16 sq_depth;
u16 rq_depth;
- struct hinic_txq_stats tx_stats;
- struct hinic_rxq_stats rx_stats;
-
u8 rss_tmpl_idx;
u8 rss_hash_engine;
u16 num_rss;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 05329292d940..c23ee2ddbce3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -62,8 +62,6 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define HINIC_LRO_RX_TIMER_DEFAULT 16
-#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8)
-
#define work_to_rx_mode_work(work) \
container_of(work, struct hinic_rx_mode_work, work)
@@ -82,56 +80,44 @@ static int set_features(struct hinic_dev *nic_dev,
netdev_features_t pre_features,
netdev_features_t features, bool force_change);
-static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
+static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
{
- struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats;
struct hinic_rxq_stats rx_stats;
- u64_stats_init(&rx_stats.syncp);
-
hinic_rxq_get_stats(rxq, &rx_stats);
- u64_stats_update_begin(&nic_rx_stats->syncp);
nic_rx_stats->bytes += rx_stats.bytes;
nic_rx_stats->pkts += rx_stats.pkts;
nic_rx_stats->errors += rx_stats.errors;
nic_rx_stats->csum_errors += rx_stats.csum_errors;
nic_rx_stats->other_errors += rx_stats.other_errors;
- u64_stats_update_end(&nic_rx_stats->syncp);
-
- hinic_rxq_clean_stats(rxq);
}
-static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
+static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq)
{
- struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats;
struct hinic_txq_stats tx_stats;
- u64_stats_init(&tx_stats.syncp);
-
hinic_txq_get_stats(txq, &tx_stats);
- u64_stats_update_begin(&nic_tx_stats->syncp);
nic_tx_stats->bytes += tx_stats.bytes;
nic_tx_stats->pkts += tx_stats.pkts;
nic_tx_stats->tx_busy += tx_stats.tx_busy;
nic_tx_stats->tx_wake += tx_stats.tx_wake;
nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
- u64_stats_update_end(&nic_tx_stats->syncp);
-
- hinic_txq_clean_stats(txq);
}
-static void update_nic_stats(struct hinic_dev *nic_dev)
+static void gather_nic_stats(struct hinic_dev *nic_dev,
+ struct hinic_rxq_stats *nic_rx_stats,
+ struct hinic_txq_stats *nic_tx_stats)
{
int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
for (i = 0; i < num_qps; i++)
- update_rx_stats(nic_dev, &nic_dev->rxqs[i]);
+ gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]);
for (i = 0; i < num_qps; i++)
- update_tx_stats(nic_dev, &nic_dev->txqs[i]);
+ gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]);
}
/**
@@ -560,8 +546,6 @@ int hinic_close(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- update_nic_stats(nic_dev);
-
up(&nic_dev->mgmt_lock);
if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
@@ -855,26 +839,19 @@ static void hinic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rxq_stats *nic_rx_stats;
- struct hinic_txq_stats *nic_tx_stats;
-
- nic_rx_stats = &nic_dev->rx_stats;
- nic_tx_stats = &nic_dev->tx_stats;
-
- down(&nic_dev->mgmt_lock);
+ struct hinic_rxq_stats nic_rx_stats = {};
+ struct hinic_txq_stats nic_tx_stats = {};
if (nic_dev->flags & HINIC_INTF_UP)
- update_nic_stats(nic_dev);
-
- up(&nic_dev->mgmt_lock);
+ gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats);
- stats->rx_bytes = nic_rx_stats->bytes;
- stats->rx_packets = nic_rx_stats->pkts;
- stats->rx_errors = nic_rx_stats->errors;
+ stats->rx_bytes = nic_rx_stats.bytes;
+ stats->rx_packets = nic_rx_stats.pkts;
+ stats->rx_errors = nic_rx_stats.errors;
- stats->tx_bytes = nic_tx_stats->bytes;
- stats->tx_packets = nic_tx_stats->pkts;
- stats->tx_errors = nic_tx_stats->tx_dropped;
+ stats->tx_bytes = nic_tx_stats.bytes;
+ stats->tx_packets = nic_tx_stats.pkts;
+ stats->tx_errors = nic_tx_stats.tx_dropped;
}
static int hinic_set_features(struct net_device *netdev,
@@ -1173,8 +1150,6 @@ static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
static int nic_dev_init(struct pci_dev *pdev)
{
struct hinic_rx_mode_work *rx_mode_work;
- struct hinic_txq_stats *tx_stats;
- struct hinic_rxq_stats *rx_stats;
struct hinic_dev *nic_dev;
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -1236,15 +1211,8 @@ static int nic_dev_init(struct pci_dev *pdev)
sema_init(&nic_dev->mgmt_lock, 1);
- tx_stats = &nic_dev->tx_stats;
- rx_stats = &nic_dev->rx_stats;
-
- u64_stats_init(&tx_stats->syncp);
- u64_stats_init(&rx_stats->syncp);
-
- nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev,
- VLAN_BITMAP_SIZE(nic_dev),
- GFP_KERNEL);
+ nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!nic_dev->vlan_bitmap) {
err = -ENOMEM;
goto err_vlan_bitmap;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 24b7b819dbfb..a866bea65110 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -73,7 +73,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
@@ -83,7 +82,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index 01e7d3c0b68e..df555847afb5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -852,12 +852,6 @@ int hinic_ndo_set_vf_bw(struct net_device *netdev,
return -EINVAL;
}
- if (max_tx_rate < min_tx_rate) {
- netif_err(nic_dev, drv, netdev, "Max rate %d must be greater than or equal to min rate %d\n",
- max_tx_rate, min_tx_rate);
- return -EINVAL;
- }
-
err = hinic_port_link_state(nic_dev, &link_state);
if (err) {
netif_err(nic_dev, drv, netdev,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 87408e7bb809..5051cdff2384 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -98,7 +98,6 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
@@ -108,7 +107,6 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
}
/**
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 8ce3348edf08..5dc302880f5f 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1617,7 +1617,7 @@ static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
* For TSO packets we only copy the headers into the
* immediate area.
*/
- immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ immediate_len = skb_tcp_all_headers(skb);
}
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 36418b510dde..11a884aa5082 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1430,7 +1430,6 @@ static int e100_phy_check_without_mii(struct nic *nic)
#define MII_NSC_CONG MII_RESV1
#define NSC_CONG_ENABLE 0x0100
#define NSC_CONG_TXREADY 0x0400
-#define ADVERTISE_FC_SUPPORTED 0x0400
static int e100_phy_init(struct nic *nic)
{
struct net_device *netdev = nic->netdev;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 1042e79a1397..4542e2bc28e8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -2000,7 +2000,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
@@ -4376,7 +4376,7 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
/**
* e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
* @hw: Struct containing variables accessed by shared code
- * @offset: Offset in VLAN filer table to write
+ * @offset: Offset in VLAN filter table to write
* @value: Value to write into VLAN filter table
*/
void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
@@ -4396,7 +4396,7 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
}
/**
- * e1000_clear_vfta - Clears the VLAN filer table
+ * e1000_clear_vfta - Clears the VLAN filter table
* @hw: Struct containing variables accessed by shared code
*/
static void e1000_clear_vfta(struct e1000_hw *hw)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3f5feb55cfba..23299fc56199 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2708,7 +2708,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -3139,7 +3139,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
case e1000_82544: {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 4d4f5bf1e516..f4154ca7fcb4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -82,7 +82,6 @@ E1000_PARAM(Duplex, "Duplex setting");
*/
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
#define AUTONEG_ADV_DEFAULT 0x2F
-#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
@@ -95,7 +94,6 @@ E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
* Default Value: Read flow control settings from the EEPROM
*/
E1000_PARAM(FlowControl, "Flow Control setting");
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 8d06c9d8ff8b..e8a9a9610ac6 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -329,7 +329,7 @@ struct e1000_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct pm_qos_request pm_qos_req;
- s32 ptp_delta;
+ long ptp_delta;
u16 eee_advert;
};
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 13382df2f2ef..bcf680e83811 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -630,7 +630,6 @@ struct e1000_phy_info {
bool disable_polarity_correction;
bool is_mdix;
bool polarity_correction;
- bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e6c8e6d5234f..9466f65a6da7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2050,10 +2050,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
bool blocked = false;
int i = 0;
- /* Check the PHY (LCD) reset flag */
- if (hw->phy.reset_disable)
- return true;
-
while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
(i++ < 30))
usleep_range(10000, 11000);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 638a3ddd7ada..2504b11c3169 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -271,7 +271,6 @@
#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26)
#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
-#define I217_MEMPWR_MOEM 0x1000
/* Receive Address Initial CRC Calculation */
#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 51512a73fdd0..5df7ad93f3d7 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -957,7 +957,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fa06f68c8c80..321f2a95ae3a 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3922,9 +3922,9 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return;
- if (info->adjfreq) {
+ if (info->adjfine) {
/* restore the previous ptp frequency delta */
- ret_val = info->adjfreq(info, adapter->ptp_delta);
+ ret_val = info->adjfine(info, adapter->ptp_delta);
} else {
/* set the default base frequency if no adjustment possible */
ret_val = e1000e_get_base_timinca(adapter, &timinca);
@@ -5474,7 +5474,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -5846,7 +5846,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* points to just header, pull a few bytes of payload from
* frags into skb->data
*/
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
/* we do this workaround for ES2LAN, but it is un-necessary,
* avoiding it could save a lot of cycles
*/
@@ -6494,6 +6494,10 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
hw->mac.type >= e1000_pch_adp) {
+ /* Keep the GPT clock enabled for CSME */
+ mac_data = er32(FEXTNVM);
+ mac_data |= BIT(3);
+ ew32(FEXTNVM, mac_data);
/* Request ME unconfigure the device from S0ix */
mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG;
@@ -6987,21 +6991,8 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
- u16 phy_data;
int rc;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
- hw->mac.type >= e1000_pch_adp) {
- /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */
- e1e_rphy(hw, I217_MEMPWR, &phy_data);
- phy_data |= I217_MEMPWR_MOEM;
- e1e_wphy(hw, I217_MEMPWR, phy_data);
-
- /* Disable LCD reset */
- hw->phy.reset_disable = true;
- }
-
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
@@ -7023,8 +7014,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
- u16 phy_data;
int rc;
/* Introduce S0ix implementation */
@@ -7035,17 +7024,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
if (rc)
return rc;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
- hw->mac.type >= e1000_pch_adp) {
- /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */
- e1e_rphy(hw, I217_MEMPWR, &phy_data);
- phy_data &= ~I217_MEMPWR_MOEM;
- e1e_wphy(hw, I217_MEMPWR, phy_data);
-
- /* Enable LCD reset */
- hw->phy.reset_disable = false;
- }
-
return e1000e_pm_thaw(dev);
}
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ebe121db4307..3132d8f2f207 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -101,8 +101,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
* demoted to the most advanced interrupt mode available.
*/
E1000_PARAM(IntMode, "Interrupt Mode");
-#define MAX_INTMODE 2
-#define MIN_INTMODE 0
/* Enable Smart Power Down of the PHY
*
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index eb5c014c02fb..0e488e4fa5c1 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -15,14 +15,16 @@
#endif
/**
- * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
+ * e1000e_phc_adjfine - adjust the frequency of the hardware clock
* @ptp: ptp clock structure
- * @delta: Desired frequency change in parts per billion
+ * @delta: Desired frequency chance in scaled parts per million
*
* Adjust the frequency of the PHC cycle counter by the indicated delta from
* the base frequency.
+ *
+ * Scaled parts per million is ppm but with a 16 bit binary fractional field.
**/
-static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int e1000e_phc_adjfine(struct ptp_clock_info *ptp, long delta)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
@@ -33,9 +35,6 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
u32 timinca, incvalue;
s32 ret_val;
- if ((delta > ptp->max_adj) || (delta <= -1000000000))
- return -EINVAL;
-
if (delta < 0) {
neg_adj = true;
delta = -delta;
@@ -50,9 +49,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
- adjustment = incvalue;
- adjustment *= delta;
- adjustment = div_u64(adjustment, 1000000000);
+ adjustment = mul_u64_u64_div_u64(incvalue, (u64)delta,
+ 1000000ULL << 16);
incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
@@ -260,7 +258,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = e1000e_phc_adjfreq,
+ .adjfine = e1000e_phc_adjfine,
.adjtime = e1000e_phc_adjtime,
.gettimex64 = e1000e_phc_gettimex,
.settime64 = e1000e_phc_settime,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 30ca9ee1900b..87fa5874f16e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -809,7 +809,7 @@ static s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
- * This function copies the message from the the message array to mbmem
+ * This function copies the message from the message array to mbmem
**/
static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
{
@@ -1825,7 +1825,7 @@ static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx)
fm10k_sm_mbx_connect_reset(mbx);
break;
case FM10K_STATE_CONNECT:
- /* try connnecting at lower version */
+ /* try connecting at lower version */
if (mbx->remote) {
while (mbx->local > 1)
mbx->local--;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index f6d56867f857..75cbdf2dbbe3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -78,7 +78,7 @@ static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
* @string: Pointer to location of destination string
*
* This function pulls the string back out of the attribute and will place
- * it in the array pointed by by string. It will return success if provided
+ * it in the array pointed by string. It will return success if provided
* with a valid pointers.
**/
static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
@@ -584,7 +584,7 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
* @mbx: Unused mailbox pointer
*
* This function is a default handler for unrecognized messages. At a
- * a minimum it just indicates that the message requested was
+ * minimum it just indicates that the message requested was
* unimplemented.
**/
s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 407fe8f340a0..d86b6d349ea9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -566,6 +566,7 @@ struct i40e_pf {
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
+#define I40E_FLAG_VF_VLAN_PRUNING BIT(27)
/* TOTAL_PORT_SHUTDOWN
* Allows to physically disable the link on the NIC's port.
* If enabled, (after link down request from the OS)
@@ -1291,4 +1292,18 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
struct i40e_cloud_filter *filter,
bool add);
+
+/**
+ * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
+ * @pf: pointer to a pf.
+ *
+ * Check and return value of flag I40E_FLAG_TC_MQPRIO.
+ *
+ * Return: I40E_FLAG_TC_MQPRIO set state.
+ **/
+static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
+{
+ return pf->flags & I40E_FLAG_TC_MQPRIO;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 19704f5c8291..156e92c43780 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -236,8 +236,6 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat)
-#define I40E_QUEUE_STAT(_name, _stat) \
- I40E_STAT(struct i40e_ring, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -457,6 +455,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
+ I40E_PRIV_FLAG("vf-vlan-pruning",
+ I40E_FLAG_VF_VLAN_PRUNING, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1141,6 +1141,71 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
return 0;
}
+#define I40E_LBIT_SIZE 8
+/**
+ * i40e_speed_to_link_speed - Translate decimal speed to i40e_aq_link_speed
+ * @speed: speed in decimal
+ * @ks: ethtool ksettings
+ *
+ * Return i40e_aq_link_speed based on speed
+ **/
+static enum i40e_aq_link_speed
+i40e_speed_to_link_speed(__u32 speed, const struct ethtool_link_ksettings *ks)
+{
+ enum i40e_aq_link_speed link_speed = I40E_LINK_SPEED_UNKNOWN;
+ bool speed_changed = false;
+ int i, j;
+
+ static const struct {
+ __u32 speed;
+ enum i40e_aq_link_speed link_speed;
+ __u8 bit[I40E_LBIT_SIZE];
+ } i40e_speed_lut[] = {
+#define I40E_LBIT(mode) ETHTOOL_LINK_MODE_ ## mode ##_Full_BIT
+ {SPEED_100, I40E_LINK_SPEED_100MB, {I40E_LBIT(100baseT)} },
+ {SPEED_1000, I40E_LINK_SPEED_1GB,
+ {I40E_LBIT(1000baseT), I40E_LBIT(1000baseX),
+ I40E_LBIT(1000baseKX)} },
+ {SPEED_10000, I40E_LINK_SPEED_10GB,
+ {I40E_LBIT(10000baseT), I40E_LBIT(10000baseKR),
+ I40E_LBIT(10000baseLR), I40E_LBIT(10000baseCR),
+ I40E_LBIT(10000baseSR), I40E_LBIT(10000baseKX4)} },
+
+ {SPEED_25000, I40E_LINK_SPEED_25GB,
+ {I40E_LBIT(25000baseCR), I40E_LBIT(25000baseKR),
+ I40E_LBIT(25000baseSR)} },
+ {SPEED_40000, I40E_LINK_SPEED_40GB,
+ {I40E_LBIT(40000baseKR4), I40E_LBIT(40000baseCR4),
+ I40E_LBIT(40000baseSR4), I40E_LBIT(40000baseLR4)} },
+ {SPEED_20000, I40E_LINK_SPEED_20GB,
+ {I40E_LBIT(20000baseKR2)} },
+ {SPEED_2500, I40E_LINK_SPEED_2_5GB, {I40E_LBIT(2500baseT)} },
+ {SPEED_5000, I40E_LINK_SPEED_5GB, {I40E_LBIT(2500baseT)} }
+#undef I40E_LBIT
+};
+
+ for (i = 0; i < ARRAY_SIZE(i40e_speed_lut); i++) {
+ if (i40e_speed_lut[i].speed == speed) {
+ for (j = 0; j < I40E_LBIT_SIZE; j++) {
+ if (test_bit(i40e_speed_lut[i].bit[j],
+ ks->link_modes.supported)) {
+ speed_changed = true;
+ break;
+ }
+ if (!i40e_speed_lut[i].bit[j])
+ break;
+ }
+ if (speed_changed) {
+ link_speed = i40e_speed_lut[i].link_speed;
+ break;
+ }
+ }
+ }
+ return link_speed;
+}
+
+#undef I40E_LBIT_SIZE
+
/**
* i40e_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
@@ -1157,12 +1222,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings copy_ks;
struct i40e_aq_set_phy_config config;
struct i40e_pf *pf = np->vsi->back;
+ enum i40e_aq_link_speed link_speed;
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
bool autoneg_changed = false;
i40e_status status = 0;
int timeout = 50;
int err = 0;
+ __u32 speed;
u8 autoneg;
/* Changing port settings is not supported if this isn't the
@@ -1195,6 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
+ speed = copy_ks.base.speed;
/* get our own copy of the bits to check against */
memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
@@ -1213,6 +1281,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* set autoneg back to what it currently is */
copy_ks.base.autoneg = safe_ks.base.autoneg;
+ copy_ks.base.speed = safe_ks.base.speed;
/* If copy_ks.base and safe_ks.base are not the same now, then they are
* trying to set something that we do not support.
@@ -1329,6 +1398,27 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
40000baseLR4_Full))
config.link_speed |= I40E_LINK_SPEED_40GB;
+ /* Autonegotiation must be disabled to change speed */
+ if ((speed != SPEED_UNKNOWN && safe_ks.base.speed != speed) &&
+ (autoneg == AUTONEG_DISABLE ||
+ (safe_ks.base.autoneg == AUTONEG_DISABLE && !autoneg_changed))) {
+ link_speed = i40e_speed_to_link_speed(speed, ks);
+ if (link_speed == I40E_LINK_SPEED_UNKNOWN) {
+ netdev_info(netdev, "Given speed is not supported\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ } else {
+ config.link_speed = link_speed;
+ }
+ } else {
+ if (safe_ks.base.speed != speed) {
+ netdev_info(netdev,
+ "Unable to set speed, disable autoneg\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ }
+
/* If speed didn't get set, set it to what it currently is.
* This is needed because if advertise is 0 (as it is when autoneg
* is disabled) then speed won't get set.
@@ -4931,7 +5021,7 @@ static int i40e_set_channels(struct net_device *dev,
/* We do not support setting channels via ethtool when TCs are
* configured through mqprio
*/
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return -EINVAL;
/* verify they are not requesting separate vectors */
@@ -5294,6 +5384,13 @@ flags_complete:
return -EOPNOTSUPP;
}
+ if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) &&
+ pf->num_alloc_vfs) {
+ dev_warn(&pf->pdev->dev,
+ "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if ((changed_flags & new_flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
(new_flags & I40E_FLAG_MFP_ENABLED))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index aa786fd55951..9f1d5de7bf16 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -384,7 +384,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
@@ -1442,6 +1444,114 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
}
/**
+ * i40e_get_vf_new_vlan - Get new vlan id on a vf
+ * @vsi: the vsi to configure
+ * @new_mac: new mac filter to be added
+ * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Get new VLAN id based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * Returns the value of the new vlan filter or
+ * the old value if no new filter is needed.
+ */
+static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
+ struct i40e_new_mac_filter *new_mac,
+ struct i40e_mac_filter *f,
+ int vlan_filters,
+ bool trusted)
+{
+ s16 pvid = le16_to_cpu(vsi->info.pvid);
+ struct i40e_pf *pf = vsi->back;
+ bool is_any;
+
+ if (new_mac)
+ f = new_mac->f;
+
+ if (pvid && f->vlan != pvid)
+ return pvid;
+
+ is_any = (trusted ||
+ !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
+
+ if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (is_any && !vlan_filters && f->vlan == 0)) {
+ if (is_any)
+ return I40E_VLAN_ANY;
+ else
+ return 0;
+ }
+
+ return f->vlan;
+}
+
+/**
+ * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
+ * @vsi: the vsi to configure
+ * @tmp_add_list: list of filters ready to be added
+ * @tmp_del_list: list of filters ready to be deleted
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Correct VF VLAN filters based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
+ *
+ * This function is only expected to be called from within
+ * i40e_sync_vsi_filters.
+ *
+ * NOTE: This function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
+ struct hlist_head *tmp_add_list,
+ struct hlist_head *tmp_del_list,
+ int vlan_filters,
+ bool trusted)
+{
+ struct i40e_mac_filter *f, *add_head;
+ struct i40e_new_mac_filter *new_mac;
+ struct hlist_node *h;
+ int bkt, new_vlan;
+
+ hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
+ new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
+ vlan_filters, trusted);
+ }
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
+ trusted);
+ if (new_vlan != f->vlan) {
+ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
+ if (!add_head)
+ return -ENOMEM;
+ /* Create a temporary i40e_new_mac_filter */
+ new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
+ if (!new_mac)
+ return -ENOMEM;
+ new_mac->f = add_head;
+ new_mac->state = add_head->state;
+
+ /* Add the new filter to the tmp list */
+ hlist_add_head(&new_mac->hlist, tmp_add_list);
+
+ /* Put the original filter into the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, tmp_del_list);
+ }
+ }
+
+ vsi->has_vlan_filter = !!vlan_filters;
+ return 0;
+}
+
+/**
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
@@ -1925,11 +2035,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
* non-zero req_queue_pairs says that user requested a new
* queue count via ethtool's set_channels, so use this
* value for queues distribution across traffic classes
+ * We need at least one queue pair for the interface
+ * to be usable as we see in else statement.
*/
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
vsi->num_queue_pairs = pf->num_lan_msix;
+ else
+ vsi->num_queue_pairs = 1;
}
/* Number of queues per enabled TC */
@@ -2496,10 +2610,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vlan_filters++;
}
- retval = i40e_correct_mac_vlan_filters(vsi,
- &tmp_add_list,
- &tmp_del_list,
- vlan_filters);
+ if (vsi->type != I40E_VSI_SRIOV)
+ retval = i40e_correct_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters);
+ else
+ retval = i40e_correct_vf_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters, pf->vf[vsi->vf_id].trusted);
hlist_for_each_entry(new, &tmp_add_list, hlist)
netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
@@ -2928,8 +3046,21 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
int bkt;
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
- if (f->state == I40E_FILTER_REMOVE)
+ /* If we're asked to add a filter that has been marked for
+ * removal, it is safe to simply restore it to active state.
+ * __i40e_del_filter will have simply deleted any filters which
+ * were previously marked NEW or FAILED, so if it is currently
+ * marked REMOVE it must have previously been ACTIVE. Since we
+ * haven't yet run the sync filters task, just restore this
+ * filter to the ACTIVE state so that the sync task leaves it
+ * in place.
+ */
+ if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
+ f->state = I40E_FILTER_ACTIVE;
continue;
+ } else if (f->state == I40E_FILTER_REMOVE) {
+ continue;
+ }
add_f = i40e_add_filter(vsi, f->macaddr, vid);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
@@ -4110,7 +4241,6 @@ static void i40e_free_misc_vector(struct i40e_pf *pf)
i40e_flush(&pf->hw);
if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
}
@@ -4849,7 +4979,6 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */
irq_update_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
@@ -5333,7 +5462,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
u8 num_tc = 0;
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
@@ -5365,7 +5494,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
**/
static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return i40e_mqprio_get_enabled_tc(pf);
/* If neither MQPRIO nor DCB is enabled for this PF then just return
@@ -5462,7 +5591,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
int i;
/* There is no need to reset BW when mqprio mode is on. */
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return 0;
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
@@ -5534,7 +5663,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
vsi->tc_config.tc_info[i].qoffset);
}
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return;
/* Assign UP2TC map for the VSI */
@@ -5695,7 +5824,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.info = vsi->info;
- if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
if (ret)
goto out;
@@ -6419,7 +6548,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
if (vsi->type == I40E_VSI_MAIN) {
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
else
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
@@ -7813,7 +7942,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
return ERR_PTR(-EINVAL);
}
- if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
return ERR_PTR(-EINVAL);
}
@@ -8066,7 +8195,7 @@ config_tc:
/* Quiesce VSI queues */
i40e_quiesce_vsi(vsi);
- if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
+ if (!hw && !i40e_is_tc_mqprio_enabled(pf))
i40e_remove_queue_channels(vsi);
/* Configure VSI for enabled TCs */
@@ -8090,7 +8219,7 @@ config_tc:
"Setup channel (id:%u) utilizing num_queues %d\n",
vsi->seid, vsi->tc_config.tc_info[0].qcount);
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
if (vsi->mqprio_qopt.max_rate[0]) {
u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
@@ -10650,7 +10779,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
- int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+ const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
@@ -10658,13 +10787,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
int v;
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- i40e_check_recovery_mode(pf)) {
+ is_recovery_mode_reported)
i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
- }
if (test_bit(__I40E_DOWN, pf->state) &&
- !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !old_recovery_mode_bit)
+ !test_bit(__I40E_RECOVERY_MODE, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -10691,13 +10818,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* accordingly with regard to resources initialization
* and deinitialization
*/
- if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
- old_recovery_mode_bit) {
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
if (i40e_get_capabilities(pf,
i40e_aqc_opc_list_func_capabilities))
goto end_unlock;
- if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ if (is_recovery_mode_reported) {
/* we're staying in recovery mode so we'll reinitialize
* misc vector here
*/
@@ -10747,7 +10873,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* unless I40E_FLAG_TC_MQPRIO was enabled or DCB
* is not supported with new link speed
*/
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
i40e_aq_set_dcb_parameters(hw, false, NULL);
} else {
if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
@@ -13106,7 +13232,7 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 61e5789d78db..2d3533f38d7b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -27,7 +27,6 @@
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB
-#define to_dev(obj) container_of(obj, struct device, kobj)
enum i40e_ptp_pin {
SDP3_2 = 0,
@@ -335,44 +334,37 @@ static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
}
/**
- * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * i40e_ptp_adjfine - Adjust the PHC frequency
* @ptp: The PTP clock structure
- * @ppb: Parts per billion adjustment from the base
+ * @scaled_ppm: Scaled parts per million adjustment from base
*
- * Adjust the frequency of the PHC by the indicated parts per billion from the
- * base frequency.
+ * Adjust the frequency of the PHC by the indicated delta from the base
+ * frequency.
+ *
+ * Scaled parts per million is ppm with a 16 bit binary fractional field.
**/
-static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct i40e_hw *hw = &pf->hw;
u64 adj, freq, diff;
int neg_adj = 0;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- freq = I40E_PTP_40GB_INCVAL;
- freq *= ppb;
- diff = div_u64(freq, 1000000000ULL);
+ smp_mb(); /* Force any pending update before accessing. */
+ freq = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult);
+ diff = mul_u64_u64_div_u64(freq, (u64)scaled_ppm,
+ 1000000ULL << 16);
if (neg_adj)
adj = I40E_PTP_40GB_INCVAL - diff;
else
adj = I40E_PTP_40GB_INCVAL + diff;
- /* At some link speeds, the base incval is so large that directly
- * multiplying by ppb would result in arithmetic overflow even when
- * using a u64. Avoid this by instead calculating the new incval
- * always in terms of the 40GbE clock rate and then multiplying by the
- * link speed factor afterwards. This does result in slightly lower
- * precision at lower link speeds, but it is fairly minor.
- */
- smp_mb(); /* Force any pending update before accessing. */
- adj *= READ_ONCE(pf->ptp_adj_mult);
-
wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
@@ -1402,7 +1394,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
- pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+ pf->ptp_caps.adjfine = i40e_ptp_adjfine;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
pf->ptp_caps.gettimex64 = i40e_ptp_gettimex;
pf->ptp_caps.settime64 = i40e_ptp_settime;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 7bc1174edf6b..d4226161a3ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -372,7 +372,6 @@ static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
}
}
-#define IP_HEADER_OFFSET 14
#define I40E_UDPIP_DUMMY_PACKET_LEN 42
#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
/**
@@ -1483,10 +1482,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
@@ -2291,16 +2288,14 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
* i40e_run_xdp - run an XDP program
* @rx_ring: Rx ring being processed
* @xdp: XDP buffer containing the frame
+ * @xdp_prog: XDP program to run
**/
-static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
-
if (!xdp_prog)
goto xdp_out;
@@ -2445,6 +2440,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int offset = rx_ring->rx_offset;
struct sk_buff *skb = rx_ring->skb;
unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
struct xdp_buff xdp;
int xdp_res = 0;
@@ -2454,6 +2450,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
#endif
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
@@ -2509,11 +2507,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
- xdp_res = i40e_run_xdp(rx_ring, &xdp);
+ xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog);
}
if (xdp_res) {
@@ -3204,11 +3203,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
protocol = vlan_get_protocol(skb);
- if (eth_p_mpls(protocol))
+ if (eth_p_mpls(protocol)) {
ip.hdr = skb_inner_network_header(skb);
- else
+ l4.hdr = skb_checksum_start(skb);
+ } else {
ip.hdr = skb_network_header(skb);
- l4.hdr = skb_checksum_start(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* set the tx_flags to indicate the IP protocol type. this is
* required so that checksum header computation below is accurate.
@@ -3713,35 +3714,55 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
struct i40e_ring *xdp_ring)
{
- u16 i = xdp_ring->next_to_use;
- struct i40e_tx_buffer *tx_bi;
- struct i40e_tx_desc *tx_desc;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 i = 0, index = xdp_ring->next_to_use;
+ struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index];
+ struct i40e_tx_buffer *tx_bi = tx_head;
+ struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index);
void *data = xdpf->data;
u32 size = xdpf->len;
- dma_addr_t dma;
- if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
+ if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) {
xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED;
}
- dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(xdp_ring->dev, dma))
- return I40E_XDP_CONSUMED;
- tx_bi = &xdp_ring->tx_bi[i];
- tx_bi->bytecount = size;
- tx_bi->gso_segs = 1;
- tx_bi->xdpf = xdpf;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- /* record length, and DMA address */
- dma_unmap_len_set(tx_bi, len, size);
- dma_unmap_addr_set(tx_bi, dma, dma);
+ for (;;) {
+ dma_addr_t dma;
- tx_desc = I40E_TX_DESC(xdp_ring, i);
- tx_desc->buffer_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
- | I40E_TXD_CMD,
- 0, size, 0);
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ goto unmap;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0);
+
+ if (++index == xdp_ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ tx_bi = &xdp_ring->tx_bi[index];
+ tx_desc = I40E_TX_DESC(xdp_ring, index);
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ size = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
/* Make certain all of the status bits have been updated
* before next_to_watch is written.
@@ -3749,14 +3770,30 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
smp_wmb();
xdp_ring->xdp_tx_active++;
- i++;
- if (i == xdp_ring->count)
- i = 0;
- tx_bi->next_to_watch = tx_desc;
- xdp_ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = index;
return I40E_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_bi = &xdp_ring->tx_bi[index];
+ if (dma_unmap_len(tx_bi, len))
+ dma_unmap_page(xdp_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+ if (tx_bi == tx_head)
+ break;
+
+ if (!index)
+ index += xdp_ring->count;
+ index--;
+ }
+
+ return I40E_XDP_CONSUMED;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 86b0f21287dc..4f184c50f6e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -4353,6 +4353,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
+ i40e_vlan_stripping_enable(vsi);
i40e_vc_reset_vf(vf, true);
/* During reset the VF got a new VSI, so refresh a pointer. */
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -4368,7 +4369,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
* MAC addresses deleted.
*/
if ((!(vlan_id || qos) ||
- vlanprio != le16_to_cpu(vsi->info.pvid)) &&
+ vlanprio != le16_to_cpu(vsi->info.pvid)) &&
vsi->info.pvid) {
ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
if (ret) {
@@ -4731,6 +4732,11 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
goto out;
vf->trusted = setting;
+
+ /* request PF to sync mac/vlan filters for the VF */
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+
i40e_vc_reset_vf(vf, true);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index af3e7e6afc85..6d4009e0cbd6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -143,20 +143,17 @@ int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
* i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
*
* Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
**/
-static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- /* NB! xdp_prog will always be !NULL, due to the fact that
- * this path is enabled by setting an XDP program.
- */
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
@@ -339,9 +336,15 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
u16 cleaned_count;
+ /* NB! xdp_prog will always be !NULL, due to the fact that
+ * this path is enabled by setting an XDP program.
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
unsigned int rx_packets;
@@ -378,7 +381,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
- xdp_res = i40e_run_xdp_zc(rx_ring, bi);
+ xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
&rx_bytes, size, xdp_res, &failure);
if (failure)
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 49aed3e506a6..3f6187c16424 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -64,7 +64,6 @@ struct iavf_vsi {
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
int base_vector;
- u16 work_limit;
u16 qs_handle;
void *priv; /* client driver data reference. */
};
@@ -93,6 +92,7 @@ struct iavf_vsi {
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
+#define IAVF_MBPS_QUANTA 50
#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
(IAVF_MAX_VF_VSI * \
@@ -146,7 +146,8 @@ struct iavf_mac_filter {
u8 remove:1; /* filter needs to be removed */
u8 add:1; /* filter needs to be added */
u8 is_primary:1; /* filter is a default VF MAC */
- u8 padding:4;
+ u8 add_handled:1; /* received response for filter add */
+ u8 padding:3;
};
};
@@ -159,8 +160,12 @@ struct iavf_vlan {
struct iavf_vlan_filter {
struct list_head list;
struct iavf_vlan vlan;
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
+ struct {
+ u8 is_new_vlan:1; /* filter is new, wait for PF answer */
+ u8 remove:1; /* filter needs to be removed */
+ u8 add:1; /* filter needs to be added */
+ u8 padding:5;
+ };
};
#define IAVF_MAX_TRAFFIC_CLASS 4
@@ -248,6 +253,7 @@ struct iavf_adapter {
struct work_struct adminq_task;
struct delayed_work client_task;
wait_queue_head_t down_waitqueue;
+ wait_queue_head_t vc_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
struct list_head mac_filter_list;
@@ -292,6 +298,7 @@ struct iavf_adapter {
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
+#define IAVF_FLAG_INITIAL_MAC_SET BIT(23)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
@@ -427,6 +434,11 @@ struct iavf_adapter {
/* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
+ /* snapshot of "num_active_queues" before setup_tc for qdisc add
+ * is invoked. This information is useful during qdisc del flow,
+ * to restore correct number of queues
+ */
+ int orig_num_active_queues;
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
@@ -461,6 +473,10 @@ static inline const char *iavf_state_str(enum iavf_state_t state)
return "__IAVF_INIT_VERSION_CHECK";
case __IAVF_INIT_GET_RESOURCES:
return "__IAVF_INIT_GET_RESOURCES";
+ case __IAVF_INIT_EXTENDED_CAPS:
+ return "__IAVF_INIT_EXTENDED_CAPS";
+ case __IAVF_INIT_CONFIG_ADAPTER:
+ return "__IAVF_INIT_CONFIG_ADAPTER";
case __IAVF_INIT_SW:
return "__IAVF_INIT_SW";
case __IAVF_INIT_FAILED:
@@ -520,6 +536,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
void iavf_deconfigure_queues(struct iavf_adapter *adapter);
@@ -559,6 +576,8 @@ void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ const u8 *new_mac);
void
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
netdev_features_t prev_features,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index cd4e6a22d0f9..9ffbd24d83cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@ init_adminq_exit:
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 3bb56714beb0..e535d4c3da49 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -692,12 +692,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
struct iavf_ring *rx_ring, *tx_ring;
- ec->tx_max_coalesced_frames = vsi->work_limit;
- ec->rx_max_coalesced_frames = vsi->work_limit;
-
/* Rx and Tx usecs per queue value. If user doesn't specify the
* queue, return queue 0's value to represent.
*/
@@ -825,12 +821,8 @@ static int __iavf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
int i;
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
- vsi->work_limit = ec->tx_max_coalesced_frames_irq;
-
if (ec->rx_coalesce_usecs == 0) {
if (ec->use_adaptive_rx_coalesce)
netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
@@ -1969,8 +1961,6 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index f3ecb3bca33d..f39440ad5c50 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -843,7 +843,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
* iavf_get_num_vlans_added - get number of VLANs added
* @adapter: board private structure
*/
-static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
{
return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
@@ -906,11 +906,6 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
return -ENOMEM;
- if (proto == cpu_to_be16(ETH_P_8021Q))
- set_bit(vid, adapter->vsi.active_cvlans);
- else
- set_bit(vid, adapter->vsi.active_svlans);
-
return 0;
}
@@ -983,6 +978,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
+ f->add_handled = false;
f->is_new_mac = true;
f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
@@ -994,47 +990,132 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
}
/**
- * iavf_set_mac - NDO callback to set port mac address
- * @netdev: network interface device structure
- * @p: pointer to an address structure
+ * iavf_replace_primary_mac - Replace current primary address
+ * @adapter: board private structure
+ * @new_mac: new MAC address to be applied
*
- * Returns 0 on success, negative on failure
+ * Replace current dev_addr and send request to PF for removal of previous
+ * primary MAC address filter and addition of new primary MAC filter.
+ * Return 0 for success, -ENOMEM for failure.
+ *
+ * Do not call this with mac_vlan_list_lock!
**/
-static int iavf_set_mac(struct net_device *netdev, void *p)
+int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ const u8 *new_mac)
{
- struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
- return 0;
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->is_primary = false;
+ }
+
f = iavf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
- f->is_primary = true;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
}
- f = iavf_add_filter(adapter, addr->sa_data);
+ f = iavf_add_filter(adapter, new_mac);
+
if (f) {
+ /* Always send the request to add if changing primary MAC
+ * even if filter is already present on the list
+ */
f->is_primary = true;
- ether_addr_copy(hw->mac.addr, addr->sa_data);
+ f->add = true;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
+ ether_addr_copy(hw->mac.addr, new_mac);
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* schedule the watchdog task to immediately process the request */
- if (f)
+ if (f) {
queue_work(iavf_wq, &adapter->watchdog_task.work);
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * iavf_is_mac_set_handled - wait for a response to set MAC from PF
+ * @netdev: network interface device structure
+ * @macaddr: MAC address to set
+ *
+ * Returns true on success, false on failure
+ */
+static bool iavf_is_mac_set_handled(struct net_device *netdev,
+ const u8 *macaddr)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_mac_filter *f;
+ bool ret = false;
- return (f == NULL) ? -ENOMEM : 0;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+ f = iavf_find_filter(adapter, macaddr);
+
+ if (!f || (!f->add && f->add_handled))
+ ret = true;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ return ret;
+}
+
+/**
+ * iavf_set_mac - NDO callback to set port MAC address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int iavf_set_mac(struct net_device *netdev, void *p)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);
+ int ret;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = iavf_replace_primary_mac(adapter, addr->sa_data);
+
+ if (ret)
+ return ret;
+
+ /* If this is an initial set MAC during VF spawn do not wait */
+ if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
+ adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
+ return 0;
+ }
+
+ if (handle_mac)
+ goto done;
+
+ ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500));
+
+ /* If ret < 0 then it means wait was interrupted.
+ * If ret == 0 then it means we got a timeout.
+ * else it means we got response for set MAC from PF,
+ * check if netdev MAC was updated to requested MAC,
+ * if yes then set MAC succeeded otherwise it failed return -EACCES
+ */
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return -EAGAIN;
+
+done:
+ if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return -EACCES;
+
+ return 0;
}
/**
@@ -2245,7 +2326,6 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2287,7 +2367,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
err = iavf_get_vf_config(adapter);
if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
- goto err_alloc;
+ goto err;
} else if (err == -EINVAL) {
/* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
@@ -2451,6 +2531,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
+ adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;
+
adapter->tx_desc_count = IAVF_DEFAULT_TXD;
adapter->rx_desc_count = IAVF_DEFAULT_RXD;
err = iavf_init_interrupt_scheme(adapter);
@@ -2956,6 +3038,9 @@ continue_reset:
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
+ bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
+ bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
+
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some
@@ -3001,12 +3086,15 @@ continue_reset:
return;
reset_err:
+ if (running) {
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ iavf_free_traffic_irqs(adapter);
+ }
+ iavf_disable_vf(adapter);
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running)
- iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- iavf_close(netdev);
}
/**
@@ -3325,6 +3413,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u64 total_max_rate = 0;
+ u32 tx_rate_rem = 0;
int i, num_qps = 0;
u64 tx_rate = 0;
int ret = 0;
@@ -3339,12 +3428,32 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
return -EINVAL;
if (mqprio_qopt->min_rate[i]) {
dev_err(&adapter->pdev->dev,
- "Invalid min tx rate (greater than 0) specified\n");
+ "Invalid min tx rate (greater than 0) specified for TC%d\n",
+ i);
return -EINVAL;
}
- /*convert to Mbps */
+
+ /* convert to Mbps */
tx_rate = div_u64(mqprio_qopt->max_rate[i],
IAVF_MBPS_DIVISOR);
+
+ if (mqprio_qopt->max_rate[i] &&
+ tx_rate < IAVF_MBPS_QUANTA) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, minimum %dMbps\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
+ (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
+
+ if (tx_rate_rem != 0) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, not divisible by %d\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
@@ -3411,6 +3520,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
netif_tx_disable(netdev);
iavf_del_all_cloud_filters(adapter);
adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
+ total_qps = adapter->orig_num_active_queues;
goto exit;
} else {
return -EINVAL;
@@ -3454,7 +3564,21 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
adapter->ch_config.ch_info[i].offset = 0;
}
}
+
+ /* Take snapshot of original config such as "num_active_queues"
+ * It is used later when delete ADQ flow is exercised, so that
+ * once delete ADQ flow completes, VF shall go back to its
+ * original queue configuration
+ */
+
+ adapter->orig_num_active_queues = adapter->num_active_queues;
+
+ /* Store queue info based on TC so that VF gets configured
+ * with correct number of queues when VF completes ADQ config
+ * flow
+ */
adapter->ch_config.total_qps = total_qps;
+
netif_tx_stop_all_queues(netdev);
netif_tx_disable(netdev);
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
@@ -3471,6 +3595,12 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
}
}
exit:
+ if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
+ return 0;
+
+ netif_set_real_num_rx_queues(netdev, total_qps);
+ netif_set_real_num_tx_queues(netdev, total_qps);
+
return ret;
}
@@ -3747,6 +3877,29 @@ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
}
/**
+ * iavf_find_cf - Find the cloud filter in the list
+ * @adapter: Board private structure
+ * @cookie: filter specific cookie
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * cloud_filter_list_lock.
+ */
+static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
+ unsigned long *cookie)
+{
+ struct iavf_cloud_filter *filter = NULL;
+
+ if (!cookie)
+ return NULL;
+
+ list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
+ if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+ return filter;
+ }
+ return NULL;
+}
+
+/**
* iavf_configure_clsflower - Add tc flower filters
* @adapter: board private structure
* @cls_flower: Pointer to struct flow_cls_offload
@@ -3777,6 +3930,15 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter->cookie = cls_flower->cookie;
+ /* bail out here if filter already exists */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ if (iavf_find_cf(adapter, &cls_flower->cookie)) {
+ dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
+ err = -EEXIST;
+ goto spin_unlock;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
/* set the mask to all zeroes to begin with */
memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
/* start out with flow type and eth type IPv4 to begin with */
@@ -3795,6 +3957,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
adapter->num_cloud_filters++;
filter->add = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
+spin_unlock:
spin_unlock_bh(&adapter->cloud_filter_list_lock);
err:
if (err)
@@ -3804,28 +3967,6 @@ err:
return err;
}
-/* iavf_find_cf - Find the cloud filter in the list
- * @adapter: Board private structure
- * @cookie: filter specific cookie
- *
- * Returns ptr to the filter object or NULL. Must be called while holding the
- * cloud_filter_list_lock.
- */
-static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
- unsigned long *cookie)
-{
- struct iavf_cloud_filter *filter = NULL;
-
- if (!cookie)
- return NULL;
-
- list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
- if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
- return filter;
- }
- return NULL;
-}
-
/**
* iavf_delete_clsflower - Remove tc flower filters
* @adapter: board private structure
@@ -3947,8 +4088,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock))
+ while (!mutex_trylock(&adapter->crit_lock)) {
+ /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
+ * is already taken and iavf_open is called from an upper
+ * device's notifier reacting on NETDEV_REGISTER event.
+ * We have to leave here to avoid dead lock.
+ */
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ return -EBUSY;
+
usleep_range(500, 1000);
+ }
if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
@@ -4162,7 +4312,7 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
@@ -4681,6 +4831,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
+ /* Setup the wait queue for indicating virtchannel events */
+ init_waitqueue_head(&adapter->vc_waitqueue);
+
return 0;
err_ioremap:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 978f651c6b09..06d18797d25a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -194,7 +194,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
struct iavf_tx_buffer *tx_buf;
struct iavf_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int budget = vsi->work_limit;
+ unsigned int budget = IAVF_DEFAULT_IRQ_WORK;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = IAVF_TX_DESC(tx_ring, i);
@@ -1285,11 +1285,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;
- if (!size)
- return NULL;
-
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
+ if (!size)
+ return rx_buffer;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 782450d5c12f..15ee85dc33bd 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -5,10 +5,6 @@
#include "iavf_prototype.h"
#include "iavf_client.h"
-/* busy wait delay in msec */
-#define IAVF_BUSY_WAIT_DELAY 10
-#define IAVF_BUSY_WAIT_COUNT 50
-
/**
* iavf_send_pf_msg
* @adapter: adapter structure
@@ -598,6 +594,8 @@ static void iavf_mac_add_ok(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
f->is_new_mac = false;
+ if (!f->add && !f->add_handled)
+ f->add_handled = true;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
@@ -618,6 +616,9 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
f->remove = false;
+ if (!f->add && !f->add_handled)
+ f->add_handled = true;
+
if (f->is_new_mac) {
list_del(&f->list);
kfree(f);
@@ -627,6 +628,33 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
}
/**
+ * iavf_vlan_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove VLAN filters from list based on PF response.
+ **/
+static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
+{
+ struct iavf_vlan_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->is_new_vlan) {
+ if (f->vlan.tpid == ETH_P_8021Q)
+ clear_bit(f->vlan.vid,
+ adapter->vsi.active_cvlans);
+ else
+ clear_bit(f->vlan.vid,
+ adapter->vsi.active_svlans);
+
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
* iavf_add_vlans
* @adapter: adapter structure
*
@@ -683,6 +711,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vvfl->vlan_id[i] = f->vlan.vid;
i++;
f->add = false;
+ f->is_new_vlan = true;
if (i == count)
break;
}
@@ -695,10 +724,18 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
} else {
+ u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
+ u16 current_vlans = iavf_get_num_vlans_added(adapter);
struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
+ if ((count + current_vlans) > max_vlans &&
+ current_vlans < max_vlans) {
+ count = max_vlans - iavf_get_num_vlans_added(adapter);
+ more = true;
+ }
+
len = sizeof(*vvfl_v2) + ((count - 1) *
sizeof(struct virtchnl_vlan_filter));
if (len > IAVF_MAX_AQ_BUF_SIZE) {
@@ -725,6 +762,9 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
&adapter->vlan_v2_caps.filtering.filtering_support;
struct virtchnl_vlan *vlan;
+ if (i == count)
+ break;
+
/* give priority over outer if it's enabled */
if (filtering_support->outer)
vlan = &vvfl_v2->filters[i].outer;
@@ -736,8 +776,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
i++;
f->add = false;
- if (i == count)
- break;
+ f->is_new_vlan = true;
}
}
@@ -1932,6 +1971,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+ wake_up(&adapter->vc_waitqueue);
break;
case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
@@ -2080,6 +2120,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
*/
iavf_netdev_features_vlan_strip_set(netdev, true);
break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ iavf_vlan_add_reject(adapter);
+ dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
@@ -2091,7 +2136,13 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
- eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ if (!ether_addr_equal(netdev->dev_addr,
+ adapter->hw.mac.addr)) {
+ netif_addr_lock_bh(netdev);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
+ }
+ wake_up(&adapter->vc_waitqueue);
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
@@ -2121,10 +2172,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
/* restore current mac address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
+ netif_addr_lock_bh(netdev);
/* refresh current mac address if changed */
- eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
iavf_add_filter(adapter, adapter->hw.mac.addr);
@@ -2160,6 +2212,10 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
fallthrough;
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
+ struct iavf_mac_filter *f;
+ bool was_mac_changed;
+ u64 aq_required = 0;
+
if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
memcpy(&adapter->vlan_v2_caps, msg,
min_t(u16, msglen,
@@ -2167,6 +2223,46 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_process_config(adapter);
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ was_mac_changed = !ether_addr_equal(netdev->dev_addr,
+ adapter->hw.mac.addr);
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+ /* re-add all MAC filters */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (was_mac_changed &&
+ ether_addr_equal(netdev->dev_addr, f->macaddr))
+ ether_addr_copy(f->macaddr,
+ adapter->hw.mac.addr);
+
+ f->is_new_mac = true;
+ f->add = true;
+ f->add_handled = false;
+ f->remove = false;
+ }
+
+ /* re-add all VLAN filters */
+ if (VLAN_FILTERING_ALLOWED(adapter)) {
+ struct iavf_vlan_filter *vlf;
+
+ if (!list_empty(&adapter->vlan_filter_list)) {
+ list_for_each_entry(vlf,
+ &adapter->vlan_filter_list,
+ list)
+ vlf->add = true;
+
+ aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+ }
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ netif_addr_lock_bh(netdev);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
+
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
+ aq_required;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
@@ -2332,6 +2428,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
+ case VIRTCHNL_OP_ADD_VLAN_V2: {
+ struct iavf_vlan_filter *f;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->is_new_vlan) {
+ f->is_new_vlan = false;
+ if (f->vlan.tpid == ETH_P_8021Q)
+ set_bit(f->vlan.vid,
+ adapter->vsi.active_cvlans);
+ else
+ set_bit(f->vlan.vid,
+ adapter->vsi.active_svlans);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ }
+ break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
/* PF enabled vlan strip on this VF.
* Update netdev->features if needed to be in sync with ethtool.
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 60453b3b8d23..cc5b85afd437 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -52,6 +52,7 @@
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
#include <net/gtp.h>
+#include <linux/ppp_defs.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -181,6 +182,7 @@
enum ice_feature {
ICE_F_DSCP,
+ ICE_F_PTP_EXTTS,
ICE_F_SMA_CTRL,
ICE_F_GNSS,
ICE_F_MAX
@@ -246,8 +248,6 @@ struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
- struct ice_vsi *dflt_vsi; /* default VSI for this switch */
- u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
enum ice_pf_state {
@@ -544,8 +544,8 @@ struct ice_pf {
u32 msg_enable;
struct ice_ptp ptp;
struct tty_driver *ice_gnss_tty_driver;
- struct tty_port gnss_tty_port;
- struct gnss_serial *gnss_serial;
+ struct tty_port *gnss_tty_port[ICE_GNSS_TTY_MINOR_DEVICES];
+ struct gnss_serial *gnss_serial[ICE_GNSS_TTY_MINOR_DEVICES];
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 05cb9dd7035a..9939238573a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1395,7 +1395,7 @@ struct ice_aqc_get_link_topo {
u8 rsvd[9];
};
-/* Read I2C (direct, 0x06E2) */
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
struct ice_aqc_i2c {
struct ice_aqc_link_topo_addr topo_addr;
__le16 i2c_addr;
@@ -1405,7 +1405,7 @@ struct ice_aqc_i2c {
u8 rsvd;
__le16 i2c_bus_addr;
- u8 rsvd2[4];
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
};
/* Read I2C Response (direct, 0x06E2) */
@@ -2124,7 +2124,7 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
- struct ice_aqc_i2c read_i2c;
+ struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
} params;
};
@@ -2241,6 +2241,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
+ ice_aqc_opc_write_i2c = 0x06E3,
ice_aqc_opc_set_port_id_led = 0x06E9,
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 9619bdb9e49a..27d0cbbd29da 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4823,7 +4823,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
- cmd = &desc.params.read_i2c;
+ cmd = &desc.params.read_write_i2c;
if (!data)
return -EINVAL;
@@ -4851,6 +4851,51 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_aq_write_i2c
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ * @cd: pointer to command details structure or NULL
+ *
+ * Write I2C (0x06E3)
+ *
+ * * Return:
+ * * 0 - Successful write to the i2c device
+ * * -EINVAL - Data size greater than 4 bytes
+ * * -EIO - FW error
+ */
+int
+ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc = { 0 };
+ struct ice_aqc_i2c *cmd;
+ u8 data_size;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
+
+ /* data_size limited to 4 */
+ if (data_size > 4)
+ return -EINVAL;
+
+ cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ memcpy(cmd->i2c_data, data, data_size);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_driver_param - Set driver parameter to share via firmware
* @hw: pointer to the HW struct
* @idx: parameter index to set
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 872ea7d2332d..61b7c60db689 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -214,5 +214,9 @@ int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd);
+int
+ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 61dd2f18dee8..b41bc3dc1745 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -5,6 +5,7 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
+#define ICE_DEV_ID_E822_SI_DFLT 0x1888
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 3991d62473bf..3337314a7b35 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -814,6 +814,8 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
devlink_port_unregister(devlink_port);
}
+#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
* @devlink: the devlink instance
@@ -840,8 +842,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- void *nvm_data;
- u32 nvm_size;
+ u8 *nvm_data, *tmp, i;
+ u32 nvm_size, left;
+ s8 num_blks;
int status;
nvm_size = hw->flash.flash_size;
@@ -849,26 +852,44 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
if (!nvm_data)
return -ENOMEM;
- status = ice_acquire_nvm(hw, ICE_RES_READ);
- if (status) {
- dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
- vfree(nvm_data);
- return status;
- }
- status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
- if (status) {
- dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
- nvm_size, status, hw->adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);
+ tmp = nvm_data;
+ left = nvm_size;
+
+ /* Some systems take longer to read the NVM than others which causes the
+ * FW to reclaim the NVM lock before the entire NVM has been read. Fix
+ * this by breaking the reads of the NVM into smaller chunks that will
+ * probably not take as long. This has some overhead since we are
+ * increasing the number of AQ commands, but it should always work
+ */
+ for (i = 0; i < num_blks; i++) {
+ u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left);
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ vfree(nvm_data);
+ return -EIO;
+ }
+
+ status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE,
+ &read_sz, tmp, false);
+ if (status) {
+ dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
+ read_sz, status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ice_release_nvm(hw);
+ vfree(nvm_data);
+ return -EIO;
+ }
ice_release_nvm(hw);
- vfree(nvm_data);
- return status;
- }
- ice_release_nvm(hw);
+ tmp += read_sz;
+ left -= read_sz;
+ }
*data = nvm_data;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 6a463b242c7d..e35371e61e07 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -133,8 +133,8 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx;
- if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
- if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
+ if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
rule_added = true;
}
@@ -151,7 +151,7 @@ err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
if (rule_added)
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
err_def_rx:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -411,7 +411,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 70335f6e8524..a6fff8ebaf9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -658,7 +658,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
rx_desc = ICE_RX_DESC(rx_ring, i);
if (!(rx_desc->wb.status_error0 &
- cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+ (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
+ cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue;
rx_buf = &rx_ring->rx_buf[i];
@@ -1292,7 +1293,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* promiscuous mode because it's not supported
*/
if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
- ice_is_any_vf_in_promisc(pf)) {
+ ice_is_any_vf_in_unicast_promisc(pf)) {
dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
/* toggle bit back to previous state */
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 5d10c4f84a36..ead6d50fc0ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -852,7 +852,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
if (!seg)
return -ENOMEM;
- tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
@@ -1214,7 +1214,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
if (!seg)
return -ENOMEM;
- tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index c73cdab44f70..4b3bb19e1d06 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1964,8 +1964,11 @@ ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
}
}
} while (fv);
- if (list_empty(fv_list))
+ if (list_empty(fv_list)) {
+ dev_warn(ice_hw_to_dev(hw), "Required profiles not found in currently loaded DDP package");
return -EIO;
+ }
+
return 0;
err:
@@ -2639,7 +2642,7 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
*
* This function will either add or move a ptype to a particular PTG depending
* on if the ptype is already part of another group. Note that using a
- * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
static int
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 85a94483c2ed..40e678cfb507 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -62,7 +62,7 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -86,7 +86,7 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -109,7 +109,7 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
@@ -132,7 +132,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 665a344fb9c0..3dc5662d62a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -736,7 +736,87 @@ static int ice_finalize_update(struct pldmfw *context)
return 0;
}
-static const struct pldmfw_ops ice_fwu_ops = {
+struct ice_pldm_pci_record_id {
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+};
+
+/**
+ * ice_op_pci_match_record - Check if a PCI device matches the record
+ * @context: PLDM fw update structure
+ * @record: list of records extracted from the PLDM image
+ *
+ * Determine if the PCI device associated with this device matches the record
+ * data provided.
+ *
+ * Searches the descriptor TLVs and extracts the relevant descriptor data into
+ * a pldm_pci_record_id. This is then compared against the PCI device ID
+ * information.
+ *
+ * Returns: true if the device matches the record, false otherwise.
+ */
+static bool
+ice_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pci_dev *pdev = to_pci_dev(context->dev);
+ struct ice_pldm_pci_record_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subsystem_vendor = PCI_ANY_ID,
+ .subsystem_device = PCI_ANY_ID,
+ };
+ struct pldmfw_desc_tlv *desc;
+
+ list_for_each_entry(desc, &record->descs, entry) {
+ u16 value;
+ int *ptr;
+
+ switch (desc->type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ ptr = &id.vendor;
+ break;
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ ptr = &id.device;
+ break;
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ ptr = &id.subsystem_vendor;
+ break;
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ ptr = &id.subsystem_device;
+ break;
+ default:
+ /* Skip unrelated TLVs */
+ continue;
+ }
+
+ value = get_unaligned_le16(desc->data);
+ /* A value of zero for one of the descriptors is sometimes
+ * used when the record should ignore this field when matching
+ * device. For example if the record applies to any subsystem
+ * device or vendor.
+ */
+ if (value)
+ *ptr = value;
+ else
+ *ptr = PCI_ANY_ID;
+ }
+
+ /* the E822 device can have a generic device ID so check for that */
+ if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) &&
+ (id.device == PCI_ANY_ID || id.device == pdev->device ||
+ id.device == ICE_DEV_ID_E822_SI_DFLT) &&
+ (id.subsystem_vendor == PCI_ANY_ID ||
+ id.subsystem_vendor == pdev->subsystem_vendor) &&
+ (id.subsystem_device == PCI_ANY_ID ||
+ id.subsystem_device == pdev->subsystem_device))
+ return true;
+
+ return false;
+}
+
+static const struct pldmfw_ops ice_fwu_ops_e810 = {
.match_record = &pldmfw_op_pci_match_record,
.send_package_data = &ice_send_package_data,
.send_component_table = &ice_send_component_table,
@@ -744,6 +824,14 @@ static const struct pldmfw_ops ice_fwu_ops = {
.finalize_update = &ice_finalize_update,
};
+static const struct pldmfw_ops ice_fwu_ops_e822 = {
+ .match_record = &ice_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
/**
* ice_get_pending_updates - Check if the component has a pending update
* @pf: the PF driver structure
@@ -921,7 +1009,11 @@ int ice_devlink_flash_update(struct devlink *devlink,
memset(&priv, 0, sizeof(priv));
- priv.context.ops = &ice_fwu_ops;
+ /* the E822 device needs a slightly different ops */
+ if (hw->mac_type == ICE_MAC_GENERIC)
+ priv.context.ops = &ice_fwu_ops_e822;
+ else
+ priv.context.ops = &ice_fwu_ops_e810;
priv.context.dev = dev;
priv.extack = extack;
priv.pf = pf;
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 57586a2e6dec..b5a7f246d230 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -1,11 +1,104 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2018-2021, Intel Corporation. */
+/* Copyright (C) 2021-2022, Intel Corporation. */
#include "ice.h"
#include "ice_lib.h"
#include <linux/tty_driver.h>
/**
+ * ice_gnss_do_write - Write data to internal GNSS
+ * @pf: board private structure
+ * @buf: command buffer
+ * @size: command buffer size
+ *
+ * Write UBX command data to the GNSS receiver
+ */
+static unsigned int
+ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ struct ice_hw *hw = &pf->hw;
+ unsigned int offset = 0;
+ int err = 0;
+
+ memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
+ link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
+ link_topo.topo_params.node_type_ctx |=
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE);
+
+ /* It's not possible to write a single byte to u-blox.
+ * Write all bytes in a loop until there are 6 or less bytes left. If
+ * there are exactly 6 bytes left, the last write would be only a byte.
+ * In this case, do 4+2 bytes writes instead of 5+1. Otherwise, do the
+ * last 2 to 5 bytes write.
+ */
+ while (size - offset > ICE_GNSS_UBX_WRITE_BYTES + 1) {
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]),
+ ICE_MAX_I2C_WRITE_BYTES,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ offset += ICE_GNSS_UBX_WRITE_BYTES;
+ }
+
+ /* Single byte would be written. Write 4 bytes instead of 5. */
+ if (size - offset == ICE_GNSS_UBX_WRITE_BYTES + 1) {
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]),
+ ICE_MAX_I2C_WRITE_BYTES - 1,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ offset += ICE_GNSS_UBX_WRITE_BYTES - 1;
+ }
+
+ /* Do the last write, 2 to 5 bytes. */
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]), size - offset - 1,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ return size;
+
+err_out:
+ dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n",
+ offset, size, err);
+
+ return offset;
+}
+
+/**
+ * ice_gnss_write_pending - Write all pending data to internal GNSS
+ * @work: GNSS write work structure
+ */
+static void ice_gnss_write_pending(struct kthread_work *work)
+{
+ struct gnss_serial *gnss = container_of(work, struct gnss_serial,
+ write_work);
+ struct ice_pf *pf = gnss->back;
+
+ if (!list_empty(&gnss->queue)) {
+ struct gnss_write_buf *write_buf = NULL;
+ unsigned int bytes;
+
+ write_buf = list_first_entry(&gnss->queue,
+ struct gnss_write_buf, queue);
+
+ bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
+ dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
+
+ list_del(&write_buf->queue);
+ kfree(write_buf->buf);
+ kfree(write_buf);
+ }
+}
+
+/**
* ice_gnss_read - Read data from internal GNSS module
* @work: GNSS read work structure
*
@@ -17,13 +110,13 @@ static void ice_gnss_read(struct kthread_work *work)
struct gnss_serial *gnss = container_of(work, struct gnss_serial,
read_work.work);
struct ice_aqc_link_topo_addr link_topo;
- u8 i2c_params, bytes_read;
+ unsigned int i, bytes_read, data_len;
struct tty_port *port;
struct ice_pf *pf;
struct ice_hw *hw;
__be16 data_len_b;
char *buf = NULL;
- u16 i, data_len;
+ u8 i2c_params;
int err = 0;
pf = gnss->back;
@@ -65,7 +158,7 @@ static void ice_gnss_read(struct kthread_work *work)
mdelay(10);
}
- data_len = min(data_len, (u16)PAGE_SIZE);
+ data_len = min_t(typeof(data_len), data_len, PAGE_SIZE);
data_len = tty_buffer_request_room(port, data_len);
if (!data_len) {
err = -ENOMEM;
@@ -74,9 +167,10 @@ static void ice_gnss_read(struct kthread_work *work)
/* Read received data */
for (i = 0; i < data_len; i += bytes_read) {
- u16 bytes_left = data_len - i;
+ unsigned int bytes_left = data_len - i;
- bytes_read = min_t(typeof(bytes_left), bytes_left, ICE_MAX_I2C_DATA_SIZE);
+ bytes_read = min_t(typeof(bytes_left), bytes_left,
+ ICE_MAX_I2C_DATA_SIZE);
err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA),
@@ -103,8 +197,9 @@ exit:
/**
* ice_gnss_struct_init - Initialize GNSS structure for the TTY
* @pf: Board private structure
+ * @index: TTY device index
*/
-static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
+static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
{
struct device *dev = ice_pf_to_dev(pf);
struct kthread_worker *kworker;
@@ -117,9 +212,11 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
mutex_init(&gnss->gnss_mutex);
gnss->open_count = 0;
gnss->back = pf;
- pf->gnss_serial = gnss;
+ pf->gnss_serial[index] = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
+ INIT_LIST_HEAD(&gnss->queue);
+ kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
/* Allocate a kworker for handling work required for the GNSS TTY
* writes.
*/
@@ -155,10 +252,10 @@ static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp)
tty->driver_data = NULL;
/* Get the serial object associated with this tty pointer */
- gnss = pf->gnss_serial;
+ gnss = pf->gnss_serial[tty->index];
if (!gnss) {
/* Initialize GNSS struct on the first device open */
- gnss = ice_gnss_struct_init(pf);
+ gnss = ice_gnss_struct_init(pf, tty->index);
if (!gnss)
return -ENOMEM;
}
@@ -211,25 +308,100 @@ exit:
}
/**
- * ice_gnss_tty_write - Dummy TTY write function to avoid kernel panic
+ * ice_gnss_tty_write - Write GNSS data
* @tty: pointer to the tty_struct
* @buf: pointer to the user data
- * @cnt: the number of characters that was able to be sent to the hardware (or
- * queued to be sent at a later time)
+ * @count: the number of characters queued to be sent to the HW
+ *
+ * The write function call is called by the user when there is data to be sent
+ * to the hardware. First the tty core receives the call, and then it passes the
+ * data on to the tty driver's write function. The tty core also tells the tty
+ * driver the size of the data being sent.
+ * If any errors happen during the write call, a negative error value should be
+ * returned instead of the number of characters queued to be written.
*/
static int
-ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int cnt)
+ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
- return 0;
+ struct gnss_write_buf *write_buf;
+ struct gnss_serial *gnss;
+ unsigned char *cmd_buf;
+ struct ice_pf *pf;
+ int err = count;
+
+ /* We cannot write a single byte using our I2C implementation. */
+ if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
+ return -EINVAL;
+
+ gnss = tty->driver_data;
+ if (!gnss)
+ return -EFAULT;
+
+ pf = (struct ice_pf *)tty->driver->driver_state;
+ if (!pf)
+ return -EFAULT;
+
+ /* Only allow to write on TTY 0 */
+ if (gnss != pf->gnss_serial[0])
+ return -EIO;
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ if (!gnss->open_count) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
+ if (!cmd_buf) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(cmd_buf, buf, count);
+
+ /* Send the data out to a hardware port */
+ write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
+ if (!write_buf) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ write_buf->buf = cmd_buf;
+ write_buf->size = count;
+ INIT_LIST_HEAD(&write_buf->queue);
+ list_add_tail(&write_buf->queue, &gnss->queue);
+ kthread_queue_work(gnss->kworker, &gnss->write_work);
+exit:
+ mutex_unlock(&gnss->gnss_mutex);
+ return err;
}
/**
- * ice_gnss_tty_write_room - Dummy TTY write_room function to avoid kernel panic
+ * ice_gnss_tty_write_room - Returns the numbers of characters to be written.
* @tty: pointer to the tty_struct
+ *
+ * This routine returns the numbers of characters the tty driver will accept
+ * for queuing to be written or 0 if either the TTY is not open or user
+ * tries to write to the TTY other than the first.
*/
static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty)
{
- return 0;
+ struct gnss_serial *gnss = tty->driver_data;
+
+ /* Only allow to write on TTY 0 */
+ if (!gnss || gnss != gnss->back->gnss_serial[0])
+ return 0;
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ if (!gnss->open_count) {
+ mutex_unlock(&gnss->gnss_mutex);
+ return 0;
+ }
+
+ mutex_unlock(&gnss->gnss_mutex);
+ return ICE_GNSS_TTY_WRITE_BUF;
}
static const struct tty_operations tty_gps_ops = {
@@ -249,11 +421,13 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
const int ICE_TTYDRV_NAME_MAX = 14;
struct tty_driver *tty_driver;
char *ttydrv_name;
+ unsigned int i;
int err;
- tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
+ tty_driver = tty_alloc_driver(ICE_GNSS_TTY_MINOR_DEVICES,
+ TTY_DRIVER_REAL_RAW);
if (IS_ERR(tty_driver)) {
- dev_err(ice_pf_to_dev(pf), "Failed to allocate memory for GNSS TTY\n");
+ dev_err(dev, "Failed to allocate memory for GNSS TTY\n");
return NULL;
}
@@ -283,23 +457,32 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
tty_driver->driver_state = pf;
tty_set_operations(tty_driver, &tty_gps_ops);
- pf->gnss_serial = NULL;
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
+ GFP_KERNEL);
+ pf->gnss_serial[i] = NULL;
- tty_port_init(&pf->gnss_tty_port);
- tty_port_link_device(&pf->gnss_tty_port, tty_driver, 0);
+ tty_port_init(pf->gnss_tty_port[i]);
+ tty_port_link_device(pf->gnss_tty_port[i], tty_driver, i);
+ }
err = tty_register_driver(tty_driver);
if (err) {
- dev_err(ice_pf_to_dev(pf), "Failed to register TTY driver err=%d\n",
- err);
+ dev_err(dev, "Failed to register TTY driver err=%d\n", err);
- tty_port_destroy(&pf->gnss_tty_port);
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ tty_port_destroy(pf->gnss_tty_port[i]);
+ kfree(pf->gnss_tty_port[i]);
+ }
kfree(ttydrv_name);
tty_driver_kref_put(pf->ice_gnss_tty_driver);
return NULL;
}
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
+ dev_info(dev, "%s%d registered\n", ttydrv_name, i);
+
return tty_driver;
}
@@ -327,17 +510,25 @@ void ice_gnss_init(struct ice_pf *pf)
*/
void ice_gnss_exit(struct ice_pf *pf)
{
+ unsigned int i;
+
if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver)
return;
- tty_port_destroy(&pf->gnss_tty_port);
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ if (pf->gnss_tty_port[i]) {
+ tty_port_destroy(pf->gnss_tty_port[i]);
+ kfree(pf->gnss_tty_port[i]);
+ }
- if (pf->gnss_serial) {
- struct gnss_serial *gnss = pf->gnss_serial;
+ if (pf->gnss_serial[i]) {
+ struct gnss_serial *gnss = pf->gnss_serial[i];
- kthread_cancel_delayed_work_sync(&gnss->read_work);
- kfree(gnss);
- pf->gnss_serial = NULL;
+ kthread_cancel_work_sync(&gnss->write_work);
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ kfree(gnss);
+ pf->gnss_serial[i] = NULL;
+ }
}
tty_unregister_driver(pf->ice_gnss_tty_driver);
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 9211adb2372c..f454dd1d9285 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018-2021, Intel Corporation. */
+/* Copyright (C) 2021-2022, Intel Corporation. */
#ifndef _ICE_GNSS_H_
#define _ICE_GNSS_H_
@@ -8,14 +8,34 @@
#include <linux/tty_flip.h>
#define ICE_E810T_GNSS_I2C_BUS 0x2
+#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
+/* Create 2 minor devices, both using the same GNSS module. First one is RW,
+ * second one RO.
+ */
+#define ICE_GNSS_TTY_MINOR_DEVICES 2
+#define ICE_GNSS_TTY_WRITE_BUF 250
+#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
+#define ICE_MAX_I2C_WRITE_BYTES 4
+
+/* u-blox ZED-F9T specific definitions */
#define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42
/* Data length register is big endian */
#define ICE_GNSS_UBX_DATA_LEN_H 0xFD
#define ICE_GNSS_UBX_DATA_LEN_WIDTH 2
#define ICE_GNSS_UBX_EMPTY_DATA 0xFF
-#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
-#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
+/* For u-blox writes are performed without address so the first byte to write is
+ * passed as I2C addr parameter.
+ */
+#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1)
#define ICE_MAX_UBX_READ_TRIES 255
+#define ICE_MAX_UBX_ACK_READ_TRIES 4095
+
+struct gnss_write_buf {
+ struct list_head queue;
+ unsigned int size;
+ unsigned char *buf;
+};
+
/**
* struct gnss_serial - data used to initialize GNSS TTY port
@@ -25,6 +45,8 @@
* @gnss_mutex: gnss_mutex used to protect GNSS serial operations
* @kworker: kwork thread for handling periodic work
* @read_work: read_work function for handling GNSS reads
+ * @write_work: write_work function for handling GNSS writes
+ * @queue: write buffers queue
*/
struct gnss_serial {
struct ice_pf *back;
@@ -33,6 +55,8 @@ struct gnss_serial {
struct mutex gnss_mutex; /* protects GNSS serial structure */
struct kthread_worker *kworker;
struct kthread_delayed_work read_work;
+ struct kthread_work write_work;
+ struct list_head queue;
};
#if IS_ENABLED(CONFIG_TTY)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 4f954db01b92..c9f7393b783d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -447,11 +447,9 @@ void ice_deinit_lag(struct ice_pf *pf)
if (lag->pf)
ice_unregister_lag_handler(lag);
- if (lag->upper_netdev)
- dev_put(lag->upper_netdev);
+ dev_put(lag->upper_netdev);
- if (lag->peer_netdev)
- dev_put(lag->peer_netdev);
+ dev_put(lag->peer_netdev);
kfree(lag);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index f7f9c973ec54..733c455f6574 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -887,6 +887,9 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
ICE_AQ_VSI_OUTER_TAG_TYPE_M;
+ ctxt->info.outer_vlan_flags |=
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
}
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
@@ -2419,7 +2422,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
agg_id);
return;
}
- /* aggregator node is created, store the neeeded info */
+ /* aggregator node is created, store the needed info */
agg_node->valid = true;
agg_node->agg_id = agg_id;
}
@@ -3003,8 +3006,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
- ice_clear_dflt_vsi(pf->first_sw);
+ if (ice_is_vsi_dflt_vsi(vsi))
+ ice_clear_dflt_vsi(vsi);
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
@@ -3178,7 +3181,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
pf = vsi->back;
vtype = vsi->type;
- if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+ if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
ice_vsi_init_vlan_ops(vsi);
@@ -3687,116 +3690,97 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
- * @sw: switch to check if its default forwarding VSI is free
+ * @pi: port info of the switch with default VSI
*
- * Return true if the default forwarding VSI is already being used, else returns
- * false signalling that it's available to use.
+ * Return true if the there is a single VSI in default forwarding VSI list
*/
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
{
- return (sw->dflt_vsi && sw->dflt_vsi_ena);
+ bool exists = false;
+
+ ice_check_if_dflt_vsi(pi, 0, &exists);
+ return exists;
}
/**
* ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
- * @sw: switch for the default forwarding VSI to compare against
* @vsi: VSI to compare against default forwarding VSI
*
* If this VSI passed in is the default forwarding VSI then return true, else
* return false
*/
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
{
- return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
+ return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
}
/**
* ice_set_dflt_vsi - set the default forwarding VSI
- * @sw: switch used to assign the default forwarding VSI
* @vsi: VSI getting set as the default forwarding VSI on the switch
*
* If the VSI passed in is already the default VSI and it's enabled just return
* success.
*
- * If there is already a default VSI on the switch and it's enabled then return
- * -EEXIST since there can only be one default VSI per switch.
- *
- * Otherwise try to set the VSI passed in as the switch's default VSI and
- * return the result.
+ * Otherwise try to set the VSI passed in as the switch's default VSI and
+ * return the result.
*/
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+int ice_set_dflt_vsi(struct ice_vsi *vsi)
{
struct device *dev;
int status;
- if (!sw || !vsi)
+ if (!vsi)
return -EINVAL;
dev = ice_pf_to_dev(vsi->back);
/* the VSI passed in is already the default VSI */
- if (ice_is_vsi_dflt_vsi(sw, vsi)) {
+ if (ice_is_vsi_dflt_vsi(vsi)) {
dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
vsi->vsi_num);
return 0;
}
- /* another VSI is already the default VSI for this switch */
- if (ice_is_dflt_vsi_in_use(sw)) {
- dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
- sw->dflt_vsi->vsi_num);
- return -EEXIST;
- }
-
- status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
vsi->vsi_num, status);
return status;
}
- sw->dflt_vsi = vsi;
- sw->dflt_vsi_ena = true;
-
return 0;
}
/**
* ice_clear_dflt_vsi - clear the default forwarding VSI
- * @sw: switch used to clear the default VSI
+ * @vsi: VSI to remove from filter list
*
* If the switch has no default VSI or it's not enabled then return error.
*
* Otherwise try to clear the default VSI and return the result.
*/
-int ice_clear_dflt_vsi(struct ice_sw *sw)
+int ice_clear_dflt_vsi(struct ice_vsi *vsi)
{
- struct ice_vsi *dflt_vsi;
struct device *dev;
int status;
- if (!sw)
+ if (!vsi)
return -EINVAL;
- dev = ice_pf_to_dev(sw->pf);
-
- dflt_vsi = sw->dflt_vsi;
+ dev = ice_pf_to_dev(vsi->back);
/* there is no default VSI configured */
- if (!ice_is_dflt_vsi_in_use(sw))
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info))
return -ENODEV;
- status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
- dflt_vsi->vsi_num, status);
+ vsi->vsi_num, status);
return -EIO;
}
- sw->dflt_vsi = NULL;
- sw->dflt_vsi_ena = false;
-
return 0;
}
@@ -4078,7 +4062,11 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
if (err && err != -EEXIST)
return err;
- return 0;
+ /* when deleting the last VLAN filter, make sure to disable the VLAN
+ * promisc mode so the filter isn't left by accident
+ */
+ return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
}
/**
@@ -4179,6 +4167,7 @@ void ice_init_feature_support(struct ice_pf *pf)
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
ice_set_feature_support(pf, ICE_F_DSCP);
+ ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
if (ice_is_e810t(&pf->hw)) {
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
if (ice_gnss_is_gps_present(&pf->hw))
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 0095329949d4..8712b1d2ceec 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -102,13 +102,10 @@ int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
-
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_clear_dflt_vsi(struct ice_sw *sw);
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
+int ice_set_dflt_vsi(struct ice_vsi *vsi);
+int ice_clear_dflt_vsi(struct ice_vsi *vsi);
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
int ice_get_link_speed_kbps(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c1ac2f746714..4ecaf40cf946 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -267,8 +267,10 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
promisc_m, 0);
}
+ if (status && status != -EEXIST)
+ return status;
- return status;
+ return 0;
}
/**
@@ -410,8 +412,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
- if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
- err = ice_set_dflt_vsi(pf->first_sw, vsi);
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
+ err = ice_set_dflt_vsi(vsi);
if (err && err != -EEXIST) {
netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -424,8 +426,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* Clear Rx filter to remove traffic from wire */
- if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
- err = ice_clear_dflt_vsi(pf->first_sw);
+ if (ice_is_vsi_dflt_vsi(vsi)) {
+ err = ice_clear_dflt_vsi(vsi);
if (err) {
netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -433,7 +435,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
- if (vsi->current_netdev_flags &
+ if (vsi->netdev->features &
NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_ops->ena_rx_filtering(vsi);
}
@@ -3358,6 +3360,7 @@ static void ice_set_netdev_features(struct net_device *netdev)
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_LOOPBACK;
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
@@ -3572,6 +3575,14 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
usleep_range(1000, 2000);
+ ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+ if (ret) {
+ netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ }
+
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Make sure VLAN delete is successful before updating VLAN
@@ -4656,6 +4667,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_set_safe_mode_caps(hw);
}
+ hw->ucast_shared = true;
+
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
@@ -5313,12 +5326,6 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_aer_clear_nonfatal_status(pdev);
- if (err)
- dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
- err);
- /* non-fatal, continue */
-
return result;
}
@@ -5413,6 +5420,7 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
/* required last entry */
{ 0, }
};
@@ -5916,6 +5924,32 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
}
/**
+ * ice_set_loopback - turn on/off loopback mode on underlying PF
+ * @vsi: ptr to VSI
+ * @ena: flag to indicate the on/off setting
+ */
+static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
+{
+ bool if_running = netif_running(vsi->netdev);
+ int ret;
+
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ ret = ice_down(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
+ return ret;
+ }
+ }
+ ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
+ if (ret)
+ netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
+ if (if_running)
+ ret = ice_up(vsi);
+
+ return ret;
+}
+
+/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -5923,44 +5957,41 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
static int
ice_set_features(struct net_device *netdev, netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
- if (ice_is_safe_mode(vsi->back)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
+ if (ice_is_safe_mode(pf)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Device is in Safe Mode - not enabling advanced netdev features\n");
return ret;
}
/* Do not change setting during reset */
if (ice_is_reset_in_progress(pf->state)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ dev_err(ice_pf_to_dev(pf),
+ "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
return -EBUSY;
}
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
- if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
- ice_vsi_manage_rss_lut(vsi, true);
- else if (!(features & NETIF_F_RXHASH) &&
- netdev->features & NETIF_F_RXHASH)
- ice_vsi_manage_rss_lut(vsi, false);
+ if (changed & NETIF_F_RXHASH)
+ ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
ret = ice_set_vlan_features(netdev, features);
if (ret)
return ret;
- if ((features & NETIF_F_NTUPLE) &&
- !(netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, true);
- ice_init_arfs(vsi);
- } else if (!(features & NETIF_F_NTUPLE) &&
- (netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, false);
- ice_clear_arfs(vsi);
+ if (changed & NETIF_F_NTUPLE) {
+ bool ena = !!(features & NETIF_F_NTUPLE);
+
+ ice_vsi_manage_fdir(vsi, ena);
+ ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
}
/* don't turn off hw_tc_offload when ADQ is already enabled */
@@ -5969,13 +6000,17 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
return -EACCES;
}
- if ((features & NETIF_F_HW_TC) &&
- !(netdev->features & NETIF_F_HW_TC))
- set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
- else
- clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ if (changed & NETIF_F_HW_TC) {
+ bool ena = !!(features & NETIF_F_HW_TC);
- return 0;
+ ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
+ clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+
+ return ret;
}
/**
@@ -6010,10 +6045,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
- err = ice_vsi_vlan_setup(vsi);
+ if (vsi->type != ICE_VSI_LB) {
+ err = ice_vsi_vlan_setup(vsi);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
}
ice_vsi_cfg_dcb_rings(vsi);
@@ -6995,12 +7032,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
- if (pf->first_sw->dflt_vsi_ena)
- dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
- /* clear the default VSI configuration if it exists */
- pf->first_sw->dflt_vsi = NULL;
- pf->first_sw->dflt_vsi_ena = false;
-
ice_clear_pxe_mode(hw);
err = ice_init_nvm(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 3f64300b0e14..560efc7654c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -43,6 +43,9 @@ enum ice_protocol_type {
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
+ ICE_PPPOE,
+ ICE_VLAN_EX,
+ ICE_VLAN_IN,
ICE_VXLAN_GPE,
ICE_SCTP_IL,
ICE_PROTOCOL_LAST
@@ -107,15 +110,21 @@ enum ice_prot_id {
#define ICE_TCP_IL_HW 49
#define ICE_UDP_ILOS_HW 53
#define ICE_GRE_OF_HW 64
+#define ICE_PPPOE_HW 103
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
-#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
+#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
#define ICE_MDID_SIZE 2
+
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MASK 0xFF
+#define ICE_VLAN_FLAG_MDID 20
+#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
+#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
+
#define ICE_TUN_FLAG_FV_IND 2
/* Mapping of software defined protocol ID to hardware defined protocol ID */
@@ -200,6 +209,14 @@ struct ice_udp_gtp_hdr {
u8 rsvrd;
};
+struct ice_pppoe_hdr {
+ u8 rsrvd_ver_type;
+ u8 rsrvd_code;
+ __be16 session_id;
+ __be16 length;
+ __be16 ppp_prot_id; /* control and data only */
+};
+
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
@@ -217,6 +234,7 @@ union ice_prot_hdr {
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
+ struct ice_pppoe_hdr pppoe_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index ef9344ef0d8e..72b663108a4a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1102,9 +1102,8 @@ static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- u64 freq, divisor = 1000000ULL;
struct ice_hw *hw = &pf->hw;
- s64 incval, diff;
+ u64 incval, diff;
int neg_adj = 0;
int err;
@@ -1115,17 +1114,8 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
scaled_ppm = -scaled_ppm;
}
- while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
- /* handle overflow by scaling down the scaled_ppm and
- * the divisor, losing some precision
- */
- scaled_ppm >>= 2;
- divisor >>= 2;
- }
-
- freq = (incval * (u64)scaled_ppm) >> 16;
- diff = div_u64(freq, divisor);
-
+ diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm,
+ 1000000ULL << 16);
if (neg_adj)
incval -= diff;
else
@@ -1900,9 +1890,12 @@ ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
}
info->n_per_out = N_PER_OUT_E810T;
- info->n_ext_ts = N_EXT_TS_E810;
- info->n_pins = NUM_PTP_PINS_E810T;
- info->verify = ice_verify_pin_e810t;
+
+ if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) {
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+ }
/* Complete setup of the SMA pins */
ice_ptp_setup_sma_pins_e810t(pf, info);
@@ -1910,11 +1903,16 @@ ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
* @info: PTP clock capabilities
*/
-static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
+static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->n_per_out = N_PER_OUT_E810;
+
+ if (!ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
+ return;
+
info->n_ext_ts = N_EXT_TS_E810;
}
@@ -1956,7 +1954,7 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
if (ice_is_e810t(&pf->hw))
ice_ptp_setup_pins_e810t(pf, info);
else
- ice_ptp_setup_pins_e810(info);
+ ice_ptp_setup_pins_e810(pf, info);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index bb1721f1321d..3ba1408c56a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1310,39 +1310,6 @@ out_put_vf:
}
/**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
- struct ice_sw_recipe *mac_recipe_list =
- &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *rule_head;
- struct mutex *rule_lock; /* protect MAC filter list access */
-
- rule_head = &mac_recipe_list->filt_rules;
- rule_lock = &mac_recipe_list->filt_rule_lock;
-
- mutex_lock(rule_lock);
- list_for_each_entry(list_itr, rule_head, list_entry) {
- u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
- if (ether_addr_equal(existing_mac, umac)) {
- mutex_unlock(rule_lock);
- return true;
- }
- }
-
- mutex_unlock(rule_lock);
-
- return false;
-}
-
-/**
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -1376,13 +1343,6 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (ret)
goto out_put_vf;
- if (ice_unicast_mac_exists(pf, mac)) {
- netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
- mac, vf_id, mac);
- ret = -EINVAL;
- goto out_put_vf;
- }
-
mutex_lock(&vf->cfg_lock);
/* VF is notified of its new MAC via the PF's response to the
@@ -1593,16 +1553,6 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
goto out_put_vf;
}
- /* when max_tx_rate is zero that means no max Tx rate limiting, so only
- * check if max_tx_rate is non-zero
- */
- if (max_tx_rate && min_tx_rate > max_tx_rate) {
- dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
- min_tx_rate, max_tx_rate);
- ret = -EINVAL;
- goto out_put_vf;
- }
-
if (min_tx_rate && ice_is_dcb_active(pf)) {
dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
ret = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 8d8f3eec79ee..3808034f7e7e 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -31,16 +31,17 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
enum {
- ICE_PKT_VLAN = BIT(0),
- ICE_PKT_OUTER_IPV6 = BIT(1),
- ICE_PKT_TUN_GTPC = BIT(2),
- ICE_PKT_TUN_GTPU = BIT(3),
- ICE_PKT_TUN_NVGRE = BIT(4),
- ICE_PKT_TUN_UDP = BIT(5),
- ICE_PKT_INNER_IPV6 = BIT(6),
- ICE_PKT_INNER_TCP = BIT(7),
- ICE_PKT_INNER_UDP = BIT(8),
- ICE_PKT_GTP_NOPAY = BIT(9),
+ ICE_PKT_OUTER_IPV6 = BIT(0),
+ ICE_PKT_TUN_GTPC = BIT(1),
+ ICE_PKT_TUN_GTPU = BIT(2),
+ ICE_PKT_TUN_NVGRE = BIT(3),
+ ICE_PKT_TUN_UDP = BIT(4),
+ ICE_PKT_INNER_IPV6 = BIT(5),
+ ICE_PKT_INNER_TCP = BIT(6),
+ ICE_PKT_INNER_UDP = BIT(7),
+ ICE_PKT_GTP_NOPAY = BIT(8),
+ ICE_PKT_KMALLOC = BIT(9),
+ ICE_PKT_PPPOE = BIT(10),
};
struct ice_dummy_pkt_offsets {
@@ -53,22 +54,42 @@ struct ice_dummy_pkt_profile {
const u8 *pkt;
u32 match;
u16 pkt_len;
+ u16 offsets_len;
};
-#define ICE_DECLARE_PKT_OFFSETS(type) \
- static const struct ice_dummy_pkt_offsets \
+#define ICE_DECLARE_PKT_OFFSETS(type) \
+ static const struct ice_dummy_pkt_offsets \
ice_dummy_##type##_packet_offsets[]
-#define ICE_DECLARE_PKT_TEMPLATE(type) \
+#define ICE_DECLARE_PKT_TEMPLATE(type) \
static const u8 ice_dummy_##type##_packet[]
-#define ICE_PKT_PROFILE(type, m) { \
- .match = (m), \
- .pkt = ice_dummy_##type##_packet, \
- .pkt_len = sizeof(ice_dummy_##type##_packet), \
- .offsets = ice_dummy_##type##_packet_offsets, \
+#define ICE_PKT_PROFILE(type, m) { \
+ .match = (m), \
+ .pkt = ice_dummy_##type##_packet, \
+ .pkt_len = sizeof(ice_dummy_##type##_packet), \
+ .offsets = ice_dummy_##type##_packet_offsets, \
+ .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
}
+ICE_DECLARE_PKT_OFFSETS(vlan) = {
+ { ICE_VLAN_OFOS, 12 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(vlan) = {
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+};
+
+ICE_DECLARE_PKT_OFFSETS(qinq) = {
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(qinq) = {
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+};
+
ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -506,38 +527,6 @@ ICE_DECLARE_PKT_TEMPLATE(udp) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
-ICE_DECLARE_PKT_OFFSETS(vlan_udp) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_UDP_ILOS, 38 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (801.1Q), IPv4:UDP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
-
- 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x11, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
- 0x00, 0x08, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* offset info for MAC + IPv4 + TCP dummy packet */
ICE_DECLARE_PKT_OFFSETS(tcp) = {
{ ICE_MAC_OFOS, 0 },
@@ -570,41 +559,6 @@ ICE_DECLARE_PKT_TEMPLATE(tcp) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
-ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_TCP_IL, 38 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (801.1Q), IPv4:TCP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
-
- 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x06, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x50, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -640,46 +594,6 @@ ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + TCP */
-ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_TCP_IL, 58 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (802.1Q), IPv6 + TCP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x50, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* IPv6 + UDP */
ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
@@ -717,43 +631,6 @@ ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + UDP */
-ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_UDP_ILOS, 58 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (802.1Q), IPv6 + UDP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
-
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
- 0x00, 0x08, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
@@ -1233,6 +1110,154 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00,
};
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_TCP_IL, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x16,
+
+ 0x00, 0x21, /* PPP Link Layer 20 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_UDP_ILOS, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x16,
+
+ 0x00, 0x21, /* PPP Link Layer 20 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_TCP_IL, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x2a,
+
+ 0x00, 0x57, /* PPP Link Layer 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_UDP_ILOS, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x2a,
+
+ 0x00, 0x57, /* PPP Link Layer 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
ICE_PKT_GTP_NOPAY),
@@ -1259,6 +1284,11 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
ICE_PKT_INNER_TCP),
ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
@@ -1271,14 +1301,9 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
ICE_PKT_INNER_IPV6),
ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
- ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP |
- ICE_PKT_VLAN),
ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
- ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN),
ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
- ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN),
ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
- ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN),
ICE_PKT_PROFILE(tcp, 0),
};
@@ -1737,7 +1762,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
sw_buf->res_type =
@@ -2230,8 +2256,6 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->sw_id = swid;
pi->pf_vf_num = pf_vf_num;
pi->is_vf = is_vf;
- pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
@@ -2666,7 +2690,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
@@ -3848,7 +3873,7 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
- * @hw: pointer to the hardware structure
+ * @pi: pointer to the port_info structure
* @vsi_handle: VSI handle to set as default
* @set: true to add the above mentioned switch rule, false to remove it
* @direction: ICE_FLTR_RX or ICE_FLTR_TX
@@ -3856,25 +3881,20 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
* add filter rule to set/unset given VSI as default VSI for the switch
* (represented by swid)
*/
-int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction)
{
- struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info f_info;
- enum ice_adminq_opc opcode;
- u16 s_rule_size;
+ struct ice_hw *hw = pi->hw;
u16 hw_vsi_id;
int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
- hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule) :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
- if (!s_rule)
- return -ENOMEM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&f_info, 0, sizeof(f_info));
@@ -3882,54 +3902,80 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
f_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_info.vsi_handle = vsi_handle;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = hw->port_info->lport;
f_info.src_id = ICE_SRC_ID_LPORT;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_tx_vsi_rule_id;
}
+ f_list_entry.fltr_info = f_info;
if (set)
- opcode = ice_aqc_opc_add_sw_rules;
+ status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
else
- opcode = ice_aqc_opc_remove_sw_rules;
-
- ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
- if (status || !(f_info.flag & ICE_FLTR_TX_RX))
- goto out;
- if (set) {
- u16 index = le16_to_cpu(s_rule->index);
-
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_tx_vsi_rule_id = index;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_rx_vsi_rule_id = index;
- }
- } else {
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
+ status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
+
+ return status;
+}
+
+/**
+ * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
+ * @fm_entry: filter entry to inspect
+ * @vsi_handle: VSI handle to compare with filter info
+ */
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
+{
+ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ fm_entry->fltr_info.vsi_handle == vsi_handle) ||
+ (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ fm_entry->vsi_list_info &&
+ (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
+}
+
+/**
+ * ice_check_if_dflt_vsi - check if VSI is default VSI
+ * @pi: pointer to the port_info structure
+ * @vsi_handle: vsi handle to check for in filter list
+ * @rule_exists: indicates if there are any VSI's in the rule list
+ *
+ * checks if the VSI is in a default VSI list, and also indicates
+ * if the default VSI list is empty
+ */
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists)
+{
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_sw_recipe *recp_list;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ bool ret = false;
+
+ recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
+ rule_lock = &recp_list->filt_rule_lock;
+ rule_head = &recp_list->filt_rules;
+
+ mutex_lock(rule_lock);
+
+ if (rule_exists && !list_empty(rule_head))
+ *rule_exists = true;
+
+ list_for_each_entry(fm_entry, rule_head, list_entry) {
+ if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
+ ret = true;
+ break;
}
}
-out:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- return status;
+ mutex_unlock(rule_lock);
+
+ return ret;
}
/**
@@ -4049,21 +4095,6 @@ int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
}
/**
- * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
- * @fm_entry: filter entry to inspect
- * @vsi_handle: VSI handle to compare with filter info
- */
-static bool
-ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
-{
- return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
- fm_entry->fltr_info.vsi_handle == vsi_handle) ||
- (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
- fm_entry->vsi_list_info &&
- (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
-}
-
-/**
* ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to remove filters from
@@ -4414,6 +4445,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
goto free_fltr_list;
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ /* Avoid enabling or disabling VLAN zero twice when in double
+ * VLAN mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
@@ -4421,7 +4459,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
@@ -4609,6 +4647,9 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, { 0, 2, 4, 6 } },
{ ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
{ ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
+ { ICE_PPPOE, { 0, 2, 4, 6 } },
+ { ICE_VLAN_EX, { 2, 0 } },
+ { ICE_VLAN_IN, { 2, 0 } },
};
static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
@@ -4629,6 +4670,9 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, ICE_GRE_OF_HW },
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_PPPOE, ICE_PPPOE_HW },
+ { ICE_VLAN_EX, ICE_VLAN_OF_HW },
+ { ICE_VLAN_IN, ICE_VLAN_OL_HW },
};
/**
@@ -4934,7 +4978,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
- bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+ bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
/* For each profile we are going to associate the recipe with, add the
* recipes that are associated with that profile. This will give us
@@ -5313,10 +5357,11 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
* ice_add_special_words - Add words that are not protocols, such as metadata
* @rinfo: other information regarding the rule e.g. priority and action info
* @lkup_exts: lookup word structure
+ * @dvm_ena: is double VLAN mode enabled
*/
static int
ice_add_special_words(struct ice_adv_rule_info *rinfo,
- struct ice_prot_lkup_ext *lkup_exts)
+ struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
{
u16 mask;
@@ -5335,6 +5380,19 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
}
}
+ if (rinfo->vlan_type != 0 && dvm_ena) {
+ if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
+ u8 word = lkup_exts->n_val_words++;
+
+ lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
+ lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
+ lkup_exts->field_mask[word] =
+ ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
+ } else {
+ return -ENOSPC;
+ }
+ }
+
return 0;
}
@@ -5454,7 +5512,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, lkup_exts);
+ status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
goto err_free_lkup_exts;
@@ -5555,6 +5613,79 @@ err_free_lkup_exts:
}
/**
+ * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
+ *
+ * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
+ * @num_vlan: number of VLAN tags
+ */
+static struct ice_dummy_pkt_profile *
+ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
+ u32 num_vlan)
+{
+ struct ice_dummy_pkt_profile *profile;
+ struct ice_dummy_pkt_offsets *offsets;
+ u32 buf_len, off, etype_off, i;
+ u8 *pkt;
+
+ if (num_vlan < 1 || num_vlan > 2)
+ return ERR_PTR(-EINVAL);
+
+ off = num_vlan * VLAN_HLEN;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
+ dummy_pkt->offsets_len;
+ offsets = kzalloc(buf_len, GFP_KERNEL);
+ if (!offsets)
+ return ERR_PTR(-ENOMEM);
+
+ offsets[0] = dummy_pkt->offsets[0];
+ if (num_vlan == 2) {
+ offsets[1] = ice_dummy_qinq_packet_offsets[0];
+ offsets[2] = ice_dummy_qinq_packet_offsets[1];
+ } else if (num_vlan == 1) {
+ offsets[1] = ice_dummy_vlan_packet_offsets[0];
+ }
+
+ for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
+ offsets[i + num_vlan].offset =
+ dummy_pkt->offsets[i].offset + off;
+ }
+ offsets[i + num_vlan] = dummy_pkt->offsets[i];
+
+ etype_off = dummy_pkt->offsets[1].offset;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
+ dummy_pkt->pkt_len;
+ pkt = kzalloc(buf_len, GFP_KERNEL);
+ if (!pkt) {
+ kfree(offsets);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(pkt, dummy_pkt->pkt, etype_off);
+ memcpy(pkt + etype_off,
+ num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
+ off);
+ memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
+ dummy_pkt->pkt_len - etype_off);
+
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ if (!profile) {
+ kfree(offsets);
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ profile->offsets = offsets;
+ profile->pkt = pkt;
+ profile->pkt_len = buf_len;
+ profile->match |= ICE_PKT_KMALLOC;
+
+ return profile;
+}
+
+/**
* ice_find_dummy_packet - find dummy packet
*
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5569,7 +5700,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
enum ice_sw_tunnel_type tun_type)
{
const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
- u32 match = 0;
+ u32 match = 0, vlan_count = 0;
u16 i;
switch (tun_type) {
@@ -5597,8 +5728,11 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
match |= ICE_PKT_INNER_TCP;
else if (lkups[i].type == ICE_IPV6_OFOS)
match |= ICE_PKT_OUTER_IPV6;
- else if (lkups[i].type == ICE_VLAN_OFOS)
- match |= ICE_PKT_VLAN;
+ else if (lkups[i].type == ICE_VLAN_OFOS ||
+ lkups[i].type == ICE_VLAN_EX)
+ vlan_count++;
+ else if (lkups[i].type == ICE_VLAN_IN)
+ vlan_count++;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
@@ -5615,11 +5749,20 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
match |= ICE_PKT_INNER_IPV6;
else if (lkups[i].type == ICE_GTP_NO_PAY)
match |= ICE_PKT_GTP_NOPAY;
+ else if (lkups[i].type == ICE_PPPOE) {
+ match |= ICE_PKT_PPPOE;
+ if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
+ htons(PPP_IPV6))
+ match |= ICE_PKT_OUTER_IPV6;
+ }
}
while (ret->match && (match & ret->match) != ret->match)
ret++;
+ if (vlan_count != 0)
+ ret = ice_dummy_packet_add_vlan(ret, vlan_count);
+
return ret;
}
@@ -5678,6 +5821,8 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_ethtype_hdr);
break;
case ICE_VLAN_OFOS:
+ case ICE_VLAN_EX:
+ case ICE_VLAN_IN:
len = sizeof(struct ice_vlan_hdr);
break;
case ICE_IPV4_OFOS:
@@ -5707,6 +5852,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GTP:
len = sizeof(struct ice_udp_gtp_hdr);
break;
+ case ICE_PPPOE:
+ len = sizeof(struct ice_pppoe_hdr);
+ break;
default:
return -EINVAL;
}
@@ -5783,6 +5931,36 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
}
/**
+ * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
+ * @vlan_type: VLAN tag type
+ * @pkt: dummy packet to fill in
+ * @offsets: offset info for the dummy packet
+ */
+static int
+ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
+ const struct ice_dummy_pkt_offsets *offsets)
+{
+ u16 i;
+
+ /* Find VLAN header and insert VLAN TPID */
+ for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ if (offsets[i].type == ICE_VLAN_OFOS ||
+ offsets[i].type == ICE_VLAN_EX) {
+ struct ice_vlan_hdr *hdr;
+ u16 offset;
+
+ offset = offsets[i].offset;
+ hdr = (struct ice_vlan_hdr *)&pkt[offset];
+ hdr->type = cpu_to_be16(vlan_type);
+
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+/**
* ice_find_adv_rule_entry - Search a rule entry
* @hw: pointer to the hardware structure
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5817,6 +5995,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
rinfo->tun_type == list_itr->rule_info.tun_type &&
+ rinfo->vlan_type == list_itr->rule_info.vlan_type &&
lkups_matched)
return list_itr;
}
@@ -5993,16 +6172,22 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* locate a dummy packet */
profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
+ if (IS_ERR(profile))
+ return PTR_ERR(profile);
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
- rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
- return -EIO;
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
+ status = -EIO;
+ goto free_pkt_profile;
+ }
vsi_handle = rinfo->sw_act.vsi_handle;
- if (!ice_is_vsi_valid(hw, vsi_handle))
- return -EINVAL;
+ if (!ice_is_vsi_valid(hw, vsi_handle)) {
+ status = -EINVAL;
+ goto free_pkt_profile;
+ }
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id =
@@ -6012,7 +6197,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
if (status)
- return status;
+ goto free_pkt_profile;
m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
if (m_entry) {
/* we have to add VSI to VSI_LIST and increment vsi_count.
@@ -6031,12 +6216,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
}
- return status;
+ goto free_pkt_profile;
}
rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
- if (!s_rule)
- return -ENOMEM;
+ if (!s_rule) {
+ status = -ENOMEM;
+ goto free_pkt_profile;
+ }
if (!rinfo->flags_info.act_valid) {
act |= ICE_SINGLE_ACT_LAN_ENABLE;
act |= ICE_SINGLE_ACT_LB_ENABLE;
@@ -6105,6 +6292,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
goto err_ice_add_adv_rule;
}
+ if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
+ status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
+ s_rule->hdr_data,
+ profile->offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+ }
+
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
NULL);
@@ -6150,6 +6345,13 @@ err_ice_add_adv_rule:
kfree(s_rule);
+free_pkt_profile:
+ if (profile->match & ICE_PKT_KMALLOC) {
+ kfree(profile->offsets);
+ kfree(profile->pkt);
+ kfree(profile);
+ }
+
return status;
}
@@ -6342,7 +6544,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, &lkup_exts);
+ status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
if (status)
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index eb641e5512d2..68d8e8a6a189 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -192,6 +192,7 @@ struct ice_adv_rule_info {
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
+ u16 vlan_type;
struct ice_adv_rule_flags_info flags_info;
};
@@ -358,7 +359,13 @@ int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
/* Promisc/defport setup for VSIs */
-int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction);
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists);
+
int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index b803f2ab3cc7..a298862857a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -50,6 +50,15 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_VLAN)
lkups_cnt++;
+ /* is CVLAN specified? */
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ lkups_cnt++;
+
+ /* are PPPoE options specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
+ ICE_TC_FLWR_FIELD_PPP_PROTO))
+ lkups_cnt++;
+
/* are IPv[4|6] fields specified? */
if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
@@ -134,6 +143,18 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
}
}
+static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
+{
+ switch (vlan_tpid) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ case ETH_P_QINQ1:
+ return vlan_tpid;
+ default:
+ return 0;
+ }
+}
+
static int
ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
struct ice_adv_lkup_elem *list)
@@ -269,8 +290,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
bool inner = false;
+ u16 vlan_tpid = 0;
int i = 0;
+ rule_info->vlan_type = vlan_tpid;
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
@@ -311,12 +335,48 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* copy VLAN info */
if (flags & ICE_TC_FLWR_FIELD_VLAN) {
- list[i].type = ICE_VLAN_OFOS;
+ vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
+ rule_info->vlan_type =
+ ice_check_supported_vlan_tpid(vlan_tpid);
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ list[i].type = ICE_VLAN_EX;
+ else
+ list[i].type = ICE_VLAN_OFOS;
list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
i++;
}
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].type = ICE_VLAN_IN;
+ list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
+ ICE_TC_FLWR_FIELD_PPP_PROTO)) {
+ struct ice_pppoe_hdr *vals, *masks;
+
+ vals = &list[i].h_u.pppoe_hdr;
+ masks = &list[i].m_u.pppoe_hdr;
+
+ list[i].type = ICE_PPPOE;
+
+ if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) {
+ vals->session_id = headers->pppoe_hdr.session_id;
+ masks->session_id = cpu_to_be16(0xFFFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) {
+ vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto;
+ masks->ppp_prot_id = cpu_to_be16(0xFFFF);
+ }
+
+ i++;
+ }
+
/* copy L3 (IPv[4|6]: src, dest) address */
if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
ICE_TC_FLWR_FIELD_SRC_IPV4)) {
@@ -661,6 +721,31 @@ exit:
}
/**
+ * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: Pointer to outer header fields
+ * @returns PPP protocol used in filter (ppp_ses or ppp_disc)
+ */
+static u16
+ice_tc_set_pppoe(struct flow_match_pppoe *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ if (match->mask->session_id) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
+ headers->pppoe_hdr.session_id = match->key->session_id;
+ }
+
+ if (match->mask->ppp_proto) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
+ headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
+ }
+
+ return be16_to_cpu(match->key->type);
+}
+
+/**
* ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
* @match: Pointer to flow match structure
* @fltr: Pointer to filter structure
@@ -945,6 +1030,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
@@ -954,7 +1040,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_PPPOE))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
return -EOPNOTSUPP;
}
@@ -1060,6 +1147,50 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
if (match.mask->vlan_priority)
headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_tpid)
+ headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan match;
+
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
+ return -EINVAL;
+ }
+
+ flow_rule_match_cvlan(rule, &match);
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Bad CVLAN mask");
+ return -EINVAL;
+ }
+ }
+
+ headers->cvlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
+ if (match.mask->vlan_priority)
+ headers->cvlan_hdr.vlan_prio = match.key->vlan_priority;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
+ struct flow_match_pppoe match;
+
+ flow_rule_match_pppoe(rule, &match);
+ n_proto_key = ice_tc_set_pppoe(&match, fltr, headers);
+
+ /* If ethertype equals ETH_P_PPP_SES, n_proto might be
+ * overwritten by encapsulated protocol (ppp_proto field) or set
+ * to 0. To correct this, flow_match_pppoe provides the type
+ * field, which contains the actual ethertype (ETH_P_PPP_SES).
+ */
+ headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
+ headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
+ fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -1194,7 +1325,7 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
vsi->netdev->dev_addr);
- memset(fltr->outer_headers.l2_mask.dst_mac, 0xff, ETH_ALEN);
+ eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
}
/* validate specified dest MAC address, make sure either it belongs to
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index e25e958f4396..91cd3d3778c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -23,6 +23,9 @@
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
+#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
+#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
+#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@@ -40,6 +43,12 @@ struct ice_tc_flower_action {
struct ice_tc_vlan_hdr {
__be16 vlan_id; /* Only last 12 bits valid */
u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_tpid;
+};
+
+struct ice_tc_pppoe_hdr {
+ __be16 session_id;
+ __be16 ppp_proto;
};
struct ice_tc_l2_hdr {
@@ -81,6 +90,8 @@ struct ice_tc_flower_lyr_2_4_hdrs {
struct ice_tc_l2_hdr l2_key;
struct ice_tc_l2_hdr l2_mask;
struct ice_tc_vlan_hdr vlan_hdr;
+ struct ice_tc_vlan_hdr cvlan_hdr;
+ struct ice_tc_pppoe_hdr pppoe_hdr;
/* L3 (IPv4[6]) layer fields with their mask */
struct ice_tc_l3_hdr l3_key;
struct ice_tc_l3_hdr l3_mask;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 3f8b7274ed2f..836dce840712 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1751,11 +1751,13 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
protocol = vlan_get_protocol(skb);
- if (eth_p_mpls(protocol))
+ if (eth_p_mpls(protocol)) {
ip.hdr = skb_inner_network_header(skb);
- else
+ l4.hdr = skb_checksum_start(skb);
+ } else {
ip.hdr = skb_network_header(skb);
- l4.hdr = skb_checksum_start(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* compute outer L2 header size */
l2_len = ip.hdr - skb->data;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f2a518a1fd94..861b64322959 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -693,10 +693,6 @@ struct ice_port_info {
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
#define ICE_LPORT_MASK 0xff
- u16 dflt_tx_vsi_rule_id;
- u16 dflt_tx_vsi_num;
- u16 dflt_rx_vsi_rule_id;
- u16 dflt_rx_vsi_num;
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 7adf9ddf129e..0abeed092de1 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -271,13 +271,14 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
}
/**
- * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
+ * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
+ * are in unicast promiscuous mode
* @pf: PF structure for accessing VF(s)
*
- * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
+ * Return false if no VF(s) are in unicast promiscuous mode,
* else return true
*/
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
{
bool is_vf_promisc = false;
struct ice_vf *vf;
@@ -286,8 +287,7 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
rcu_read_lock();
ice_for_each_vf_rcu(pf, bkt, vf) {
/* found a VF that has promiscuous mode configured */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
is_vf_promisc = true;
break;
}
@@ -298,6 +298,73 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
}
/**
+ * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ * @ucast_m: promiscuous mask to apply to unicast
+ * @mcast_m: promiscuous mask to apply to multicast
+ *
+ * Decide which mask should be used for unicast and multicast filter,
+ * based on presence of VLANs
+ */
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m)
+{
+ if (ice_vf_is_port_vlan_ena(vf) ||
+ ice_vsi_has_non_zero_vlans(vsi)) {
+ *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ } else {
+ *mcast_m = ICE_MCAST_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_PROMISC_BITS;
+ }
+}
+
+/**
+ * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ *
+ * Clear all promiscuous/allmulticast filters for a VF
+ */
+static int
+ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vf->pf;
+ u8 ucast_m, mcast_m;
+ int ret = 0;
+
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ } else {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
+ }
+
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
+ }
+ }
+
+ if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
+ }
+ }
+ return ret;
+}
+
+/**
* ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
* @vf: the VF to configure
* @vsi: the VF's VSI
@@ -487,7 +554,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
struct ice_vsi *vsi;
struct device *dev;
struct ice_hw *hw;
- u8 promisc_m;
int err = 0;
bool rsd;
@@ -505,8 +571,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
- if (WARN_ON(!vsi))
+ if (!vsi) {
+ dev_dbg(dev, "VF is already removed\n");
return -EINVAL;
+ }
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_all_rx_rings(vsi);
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
@@ -554,16 +622,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
*/
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan)
- promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_UCAST_PROMISC_BITS;
-
- if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
- dev_err(dev, "disabling promiscuous mode failed\n");
- }
+ ice_vf_clear_all_promisc_modes(vf, vsi);
ice_eswitch_del_vf_mac_rule(vf);
@@ -705,13 +764,16 @@ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops;
- int err;
+ int err = 0;
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- err = vlan_ops->ena_tx_filtering(vsi);
- if (err)
- return err;
+ /* Allow VF with VLAN 0 only to send all tagged traffic */
+ if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
return ice_cfg_mac_antispoof(vsi, true);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 1b4380d6d949..52bd9a3816bf 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -214,7 +214,10 @@ struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
void ice_set_vf_state_qs_dis(struct ice_vf *vf);
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m);
int
ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int
@@ -260,7 +263,7 @@ static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
}
-static inline bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+static inline bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 4547bc1f7cee..2b4c791b6cba 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -360,6 +360,54 @@ static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
}
/**
+ * ice_vc_get_vlan_caps
+ * @hw: pointer to the hw
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VSI
+ * @driver_caps: current driver caps
+ *
+ * Return 0 if there is no VLAN caps supported, or VLAN caps value
+ */
+static u32
+ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
+ u32 driver_caps)
+{
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ /* In switchdev setting VLAN from VF isn't supported */
+ return 0;
+
+ if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ /* VLAN offloads based on current device configuration */
+ return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
+ } else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
+ /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
+ * these two conditions, which amounts to guest VLAN filtering
+ * and offloads being based on the inner VLAN or the
+ * inner/single VLAN respectively and don't allow VF to
+ * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
+ */
+ if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
+ return VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (!ice_is_dvm_ena(hw) &&
+ !ice_vf_is_port_vlan_ena(vf)) {
+ /* configure backward compatible support for VFs that
+ * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
+ * configured in SVM, and no port VLAN is configured
+ */
+ ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
+ return VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (ice_is_dvm_ena(hw)) {
+ /* configure software offloaded VLAN support when DVM
+ * is enabled, but no port VLAN is enabled
+ */
+ ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_vc_get_vf_res_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -402,33 +450,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
goto err;
}
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
- /* VLAN offloads based on current device configuration */
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2;
- } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
- /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
- * these two conditions, which amounts to guest VLAN filtering
- * and offloads being based on the inner VLAN or the
- * inner/single VLAN respectively and don't allow VF to
- * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
- */
- if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
- } else if (!ice_is_dvm_ena(hw) &&
- !ice_vf_is_port_vlan_ena(vf)) {
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
- /* configure backward compatible support for VFs that
- * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
- * configured in SVM, and no port VLAN is configured
- */
- ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
- } else if (ice_is_dvm_ena(hw)) {
- /* configure software offloaded VLAN support when DVM
- * is enabled, but no port VLAN is enabled
- */
- ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
- }
- }
+ vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
+ vf->driver_caps);
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
@@ -976,6 +999,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
int mcast_err = 0, ucast_err = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ u8 mcast_m, ucast_m;
struct device *dev;
int ret = 0;
@@ -1022,39 +1046,33 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+
if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
- bool set_dflt_vsi = alluni || allmulti;
+ if (alluni) {
+ /* in this case we're turning on promiscuous mode */
+ ret = ice_set_dflt_vsi(vsi);
+ } else {
+ /* in this case we're turning off promiscuous mode */
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ }
- if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
- /* only attempt to set the default forwarding VSI if
- * it's not currently set
- */
- ret = ice_set_dflt_vsi(pf->first_sw, vsi);
- else if (!set_dflt_vsi &&
- ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
- /* only attempt to free the default forwarding VSI if we
- * are the owner
- */
- ret = ice_clear_dflt_vsi(pf->first_sw);
+ /* in this case we're turning on/off only
+ * allmulticast
+ */
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
if (ret) {
- dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
- set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
+ dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
+ vf->vf_id, ret);
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
goto error_param;
}
} else {
- u8 mcast_m, ucast_m;
-
- if (ice_vf_is_port_vlan_ena(vf) ||
- ice_vsi_has_non_zero_vlans(vsi)) {
- mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
- ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
- } else {
- mcast_m = ICE_MCAST_PROMISC_BITS;
- ucast_m = ICE_UCAST_PROMISC_BITS;
- }
-
if (alluni)
ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
else
@@ -1079,6 +1097,9 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
vf->vf_states))
dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, mcast_err);
}
if (!ucast_err) {
@@ -1091,6 +1112,9 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
vf->vf_states))
dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, ucast_err);
}
error_param:
@@ -2264,6 +2288,15 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Enable VLAN filtering on first non-zero VLAN */
if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vf->spoofchk) {
+ status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ }
if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
@@ -2309,8 +2342,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
/* Disable VLAN filtering when only VLAN 0 is left */
- if (!ice_vsi_has_non_zero_vlans(vsi))
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ vsi->inner_vlan_ops.dis_tx_filtering(vsi);
vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+ }
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
@@ -2814,6 +2849,13 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2829,8 +2871,17 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
- ice_vf_dis_vlan_promisc(vsi, &vlan);
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
}
}
@@ -2907,6 +2958,13 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (err)
return err;
}
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2922,10 +2980,19 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
- if (err)
- return err;
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid) {
+ err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
}
}
@@ -2948,7 +3015,8 @@ ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
struct virtchnl_vlan_filtering_caps *vfc,
struct virtchnl_vlan_filter_list_v2 *vfl)
{
- u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
+ u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
+ vfl->num_elements;
if (num_requested_filters > vfc->max_filters)
return false;
@@ -3528,42 +3596,6 @@ ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
}
-static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't enable VLAN stripping in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
-static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't disable VLAN stripping in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
static int
ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
{
@@ -3590,10 +3622,10 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.config_rss_lut = ice_vc_config_rss_lut,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
- .add_vlan_msg = ice_vc_repr_add_vlan,
- .remove_vlan_msg = ice_vc_repr_del_vlan,
- .ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping,
- .dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
index 1b618de592b7..bcda2e004807 100644
--- a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -199,7 +199,6 @@ static bool ice_is_dvm_supported(struct ice_hw *hw)
#define ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX 2
#define ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX 2
#define ICE_PKT_FLAGS_0_TO_15_FV_IDX 1
-#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = {
{
/* Update recipe ICE_SW_LKUP_VLAN to filter based on the
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index cbe92fd23a70..8d6e44ee1895 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2207,7 +2207,7 @@ out:
* igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
* @hw: pointer to the HW structure
*
- * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
* the values found in the EEPROM. This addresses an issue in which these
* bits are not restored from EEPROM after reset.
**/
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index ca5429774994..fa028928482f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1033,9 +1033,6 @@
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
-/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
-
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1277c5c7d099..205d577bdbba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -854,7 +854,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 9cb49980ec2d..eb9f6da9208a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -116,7 +116,6 @@
#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
/* TX Rate Limit Registers */
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2d3daf022651..015b78144114 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -664,6 +664,8 @@ struct igb_adapter {
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c5f04c40284b..2796e81d2726 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1945,7 +1945,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
* However, when we do so, no frame from queue 2 and 3 are
* transmitted. It seems the MAX_TPKT_SIZE should not be great
* or _equal_ to the buffer size programmed in TXPBS. For this
- * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
+ * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64.
*/
val = (4096 - 1) / 64;
wr32(E1000_I210_DTXMXPKTSZ, val);
@@ -3637,6 +3637,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3649,12 +3650,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3814,7 +3816,9 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
+ rtnl_lock();
igb_disable_sriov(pdev);
+ rtnl_unlock();
#endif
unregister_netdev(netdev);
@@ -3974,6 +3978,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -6260,74 +6267,108 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *tx_ring,
struct xdp_frame *xdpf)
{
- union e1000_adv_tx_desc *tx_desc;
- u32 len, cmd_type, olinfo_status;
- struct igb_tx_buffer *tx_buffer;
- dma_addr_t dma;
- u16 i;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, i, index = tx_ring->next_to_use;
+ struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index];
+ struct igb_tx_buffer *tx_buffer = tx_head;
+ union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index);
+ u32 len = xdpf->len, cmd_type, olinfo_status;
+ void *data = xdpf->data;
- len = xdpf->len;
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
- if (unlikely(!igb_desc_unused(tx_ring)))
- return IGB_XDP_CONSUMED;
-
- dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ if (igb_maybe_stop_tx(tx_ring, count + 3))
return IGB_XDP_CONSUMED;
+ i = 0;
/* record the location of the first descriptor for this packet */
- tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- tx_buffer->bytecount = len;
- tx_buffer->gso_segs = 1;
- tx_buffer->protocol = 0;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->type = IGB_TYPE_XDP;
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- i = tx_ring->next_to_use;
- tx_desc = IGB_TX_DESC(tx_ring, i);
+ olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT;
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ olinfo_status |= tx_ring->reg_idx << 4;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- dma_unmap_len_set(tx_buffer, len, len);
- dma_unmap_addr_set(tx_buffer, dma, dma);
- tx_buffer->type = IGB_TYPE_XDP;
- tx_buffer->xdpf = xdpf;
+ for (;;) {
+ dma_addr_t dma;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto unmap;
- /* put descriptor type bits */
- cmd_type = E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_DEXT |
- E1000_ADVTXD_DCMD_IFCS;
- cmd_type |= len | IGB_TXD_DCMD;
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
- olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
- /* 82575 requires a unique index per ring */
- if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
- olinfo_status |= tx_ring->reg_idx << 4;
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS | len;
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ tx_buffer->protocol = 0;
- netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
+ if (++index == tx_ring->count)
+ index = 0;
+ if (i == nr_frags)
+ break;
+
+ tx_buffer = &tx_ring->tx_buffer_info[index];
+ tx_desc = IGB_TX_DESC(tx_ring, index);
+ tx_desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount);
/* set the timestamp */
- tx_buffer->time_stamp = jiffies;
+ tx_head->time_stamp = jiffies;
/* Avoid any potential race with xdp_xmit and cleanup */
smp_wmb();
/* set next_to_watch value indicating a packet is present */
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- tx_buffer->next_to_watch = tx_desc;
- tx_ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ tx_ring->next_to_use = index;
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
- writel(i, tx_ring->tail);
+ writel(index, tx_ring->tail);
return IGB_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[index];
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ if (tx_buffer == tx_head)
+ break;
+
+ if (!index)
+ index += tx_ring->count;
+ index--;
+ }
+
+ return IGB_XDP_CONSUMED;
}
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
@@ -7924,8 +7965,10 @@ unlock:
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7939,6 +7982,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
@@ -8818,6 +8862,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
@@ -9522,7 +9567,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
igb_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 02fec948ce64..15e57460e19e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -190,7 +190,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
}
/* PTP clock operations */
-static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfine_82576(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
@@ -199,15 +199,14 @@ static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
u64 rate;
u32 incvalue;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- rate = ppb;
- rate <<= 14;
- rate = div_u64(rate, 1953125);
- incvalue = 16 << IGB_82576_TSYNC_SHIFT;
+ incvalue = INCVALUE_82576;
+ rate = mul_u64_u64_div_u64(incvalue, (u64)scaled_ppm,
+ 1000000ULL << 16);
if (neg_adj)
incvalue -= rate;
@@ -1347,7 +1346,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.max_adj = 999999881;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+ adapter->ptp_caps.adjfine = igb_ptp_adjfine_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 975eb47ee04d..57d39ee00b58 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -227,7 +227,7 @@ struct igbvf_adapter {
/* The VF counters don't clear on read so we have to get a base
* count on driver start up and always subtract that base on
- * on the first update, thus the flag..
+ * the first update, thus the flag..
*/
struct e1000_vf_stats stats;
u64 zero_base;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 43ced78c3a2e..f4e91db89fe5 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2537,7 +2537,7 @@ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
igbvf_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 360644f33d5f..88680e3d613d 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -89,8 +89,6 @@ struct igc_mac_info {
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
- u8 forced_speed_duplex;
-
bool asf_firmware_present;
bool arc_subsystem_valid;
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 67b8ffd21d8a..a5c4b19d71a2 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -193,7 +193,7 @@ s32 igc_force_mac_fc(struct igc_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index ae17af44fe02..ebff0e04045d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -5813,9 +5813,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
return false;
for (n = 0; n < qopt->num_entries; n++) {
- const struct tc_taprio_sched_entry *e;
+ const struct tc_taprio_sched_entry *e, *prev;
int i;
+ prev = n ? &qopt->entries[n - 1] : NULL;
e = &qopt->entries[n];
/* i225 only supports "global" frame preemption
@@ -5828,7 +5829,12 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->gate_mask & BIT(i))
queue_uses[i]++;
- if (queue_uses[i] > 1)
+ /* There are limitations: A single queue cannot be
+ * opened and closed multiple times per cycle unless the
+ * gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
return false;
}
}
@@ -5872,6 +5878,7 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
+ bool queue_configured[IGC_MAX_TX_QUEUES] = { };
u32 start_time = 0, end_time = 0;
size_t n;
@@ -5887,9 +5894,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
- /* FIXME: be a little smarter about cases when the gate for a
- * queue stays open for more than one entry.
- */
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
int i;
@@ -5902,8 +5906,15 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!(e->gate_mask & BIT(i)))
continue;
- ring->start_time = start_time;
+ /* Check whether a queue stays open for more than one
+ * entry. If so, keep the start and advance the end
+ * time.
+ */
+ if (!queue_configured[i])
+ ring->start_time = start_time;
ring->end_time = end_time;
+
+ queue_configured[i] = true;
}
start_time += e->interval;
@@ -6171,6 +6182,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
+ if (IGC_REMOVED(hw_addr))
+ return ~value;
+
value = readl(&hw_addr[reg]);
/* reads should not return all F's */
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 653e9f1e35b5..8dbb9f903ca7 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -15,7 +15,6 @@
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
-#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
#define IGC_PTM_STAT_SLEEP 2
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index e197a33d93a0..c0d8214148d1 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -59,9 +59,6 @@
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
-/* MSI-X Table Register Descriptions */
-#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
-
/* RSS registers */
#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
@@ -306,7 +303,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
- writel((val), &hw_addr[(reg)]); \
+ if (!IGC_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
@@ -318,4 +316,6 @@ do { \
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
+#define IGC_REMOVED(h) unlikely(!(h))
+
#endif
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index c8d1e815ec6b..98bd3267b99b 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -576,7 +576,7 @@ ixgb_rar_set(struct ixgb_hw *hw,
* Writes a value to the specified offset in the VLAN filter table.
*
* hw - Struct containing variables accessed by shared code
- * offset - Offset in VLAN filer table to write
+ * offset - Offset in VLAN filter table to write
* value - Value to write into VLAN filter table
*****************************************************************************/
void
@@ -588,7 +588,7 @@ ixgb_write_vfta(struct ixgb_hw *hw,
}
/******************************************************************************
- * Clears the VLAN filer table
+ * Clears the VLAN filter table
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index affdefcca7e3..45be9a1ab6af 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1187,7 +1187,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
iph = ip_hdr(skb);
iph->tot_len = 0;
@@ -1704,7 +1704,6 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
netdev->stats.tx_window_errors = 0;
}
-#define IXGB_MAX_INTR 10
/**
* ixgb_intr - Interrupt Handler
* @irq: interrupt number
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index f0cadd532c53..d40f96250691 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -141,8 +141,6 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define MAX_RDTR 0xFFFF
#define MIN_RDTR 0
-#define XSUMRX_DEFAULT OPTION_ENABLED
-
#define DEFAULT_FCRTL 0x28000
#define DEFAULT_FCRTH 0x30000
#define MIN_FCRTL 0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 921a4d977d65..5369a97ff5ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -167,12 +167,46 @@ enum ixgbe_tx_flags {
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+
+struct vf_stats {
+ u64 gprc;
+ u64 gorc;
+ u64 gptc;
+ u64 gotc;
+ u64 mprc;
+};
+
struct vf_data_storage {
struct pci_dev *vfdev;
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
bool clear_to_send;
+ struct vf_stats vfstats;
+ struct vf_stats last_vfstats;
+ struct vf_stats saved_rst_vfstats;
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
@@ -779,6 +813,7 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_IXGBE_IPSEC */
+ spinlock_t vfs_lock;
};
static inline int ixgbe_determine_xdp_q_idx(int cpu)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 95c92fe890a1..100388968e4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -879,7 +879,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* ixgbe_clear_vfta_82598 - Clear VLAN filter table
* @hw: pointer to hardware structure
*
- * Clears the VLAN filer table, and the VMDq index associated with the filter
+ * Clears the VLAN filter table, and the VMDq index associated with the filter
**/
static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 4c26c4b92f07..38c4609bd429 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3237,7 +3237,7 @@ vfta_update:
* ixgbe_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to hardware structure
*
- * Clears the VLAN filer table, and the VMDq index associated with the filter
+ * Clears the VLAN filter table, and the VMDq index associated with the filter
**/
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 72e6ebffea33..e85f7d2e8810 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -8,12 +8,10 @@
#include "ixgbe_sriov.h"
/* Callbacks for DCB netlink in the kernel */
-#define BIT_DCB_MODE 0x01
#define BIT_PFC 0x02
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
-#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 628d0eb0599f..04f453eabef6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -18,8 +18,6 @@
#include "ixgbe_phy.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBE_STATS};
struct ixgbe_stats {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 77c2e70b0860..d1e430b8c8aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5161,7 +5161,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
}
/**
- * ixgbe_lpbthresh - calculate low water mark for for flow control
+ * ixgbe_lpbthresh - calculate low water mark for flow control
*
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
@@ -5549,6 +5549,47 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
return ret;
}
+/**
+ * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
+ * @adapter: board private structure
+ *
+ * On a reset we need to clear out the VF stats or accounting gets
+ * messed up because they're not clear on read.
+ **/
+static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ adapter->vfinfo[i].last_vfstats.gprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gprc +=
+ adapter->vfinfo[i].vfstats.gprc;
+ adapter->vfinfo[i].vfstats.gprc = 0;
+ adapter->vfinfo[i].last_vfstats.gptc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gptc +=
+ adapter->vfinfo[i].vfstats.gptc;
+ adapter->vfinfo[i].vfstats.gptc = 0;
+ adapter->vfinfo[i].last_vfstats.gorc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gorc +=
+ adapter->vfinfo[i].vfstats.gorc;
+ adapter->vfinfo[i].vfstats.gorc = 0;
+ adapter->vfinfo[i].last_vfstats.gotc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gotc +=
+ adapter->vfinfo[i].vfstats.gotc;
+ adapter->vfinfo[i].vfstats.gotc = 0;
+ adapter->vfinfo[i].last_vfstats.mprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.mprc +=
+ adapter->vfinfo[i].vfstats.mprc;
+ adapter->vfinfo[i].vfstats.mprc = 0;
+ }
+}
+
static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -5684,6 +5725,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->service_timer, jiffies);
+ ixgbe_clear_vf_stats_counters(adapter);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
@@ -6403,6 +6445,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
+
#ifdef CONFIG_IXGBE_DCB
ixgbe_init_dcb(adapter);
#endif
@@ -7271,6 +7316,32 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
netdev->stats.rx_length_errors = hwstats->rlec;
netdev->stats.rx_crc_errors = hwstats->crcerrs;
netdev->stats.rx_missed_errors = total_mpc;
+
+ /* VF Stats Collection - skip while resetting because these
+ * are not clear on read and otherwise you'll sometimes get
+ * crazy values.
+ */
+ if (!test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ for (i = 0; i < adapter->num_vfs; i++) {
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i),
+ adapter->vfinfo[i].last_vfstats.gprc,
+ adapter->vfinfo[i].vfstats.gprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i),
+ adapter->vfinfo[i].last_vfstats.gptc,
+ adapter->vfinfo[i].vfstats.gptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i),
+ IXGBE_PVFGORC_MSB(i),
+ adapter->vfinfo[i].last_vfstats.gorc,
+ adapter->vfinfo[i].vfstats.gorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i),
+ IXGBE_PVFGOTC_MSB(i),
+ adapter->vfinfo[i].last_vfstats.gotc,
+ adapter->vfinfo[i].vfstats.gotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i),
+ adapter->vfinfo[i].last_vfstats.mprc,
+ adapter->vfinfo[i].vfstats.mprc);
+ }
+ }
}
/**
@@ -9022,6 +9093,23 @@ static void ixgbe_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
}
+static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (vf < 0 || vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ vf_stats->rx_packets = adapter->vfinfo[vf].vfstats.gprc;
+ vf_stats->rx_bytes = adapter->vfinfo[vf].vfstats.gorc;
+ vf_stats->tx_packets = adapter->vfinfo[vf].vfstats.gptc;
+ vf_stats->tx_bytes = adapter->vfinfo[vf].vfstats.gotc;
+ vf_stats->multicast = adapter->vfinfo[vf].vfstats.mprc;
+
+ return 0;
+}
+
#ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
@@ -10338,6 +10426,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
+ .ndo_get_vf_stats = ixgbe_ndo_get_vf_stats,
.ndo_get_stats64 = ixgbe_get_stats64,
.ndo_setup_tc = __ixgbe_setup_tc,
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 336426a67ac1..9f06896a049b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -113,12 +113,16 @@
* the sign bit. This register enables software to calculate frequency
* adjustments and apply them directly to the clock rate.
*
- * The math for converting ppb into TIMINCA values is fairly straightforward.
- * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL
+ * The math for converting scaled_ppm into TIMINCA values is fairly
+ * straightforward.
*
- * This assumes that ppb is never high enough to create a value bigger than
- * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this
- * value is also simple.
+ * TIMINCA value = ( Base_Frequency * scaled_ppm ) / 1000000ULL << 16
+ *
+ * To avoid overflow, we simply use mul_u64_u64_div_u64.
+ *
+ * This assumes that scaled_ppm is never high enough to create a value bigger
+ * than TIMINCA's 31 bits can store. This is ensured by the stack, and is
+ * measured in parts per billion. Calculating this value is also simple.
* Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL
*
* For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is
@@ -138,7 +142,6 @@
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
-#define MAX_TIMADJ 0x7FFFFFFF
/**
* ixgbe_ptp_setup_sdp_X540
@@ -434,45 +437,45 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_ptp_adjfreq_82599
+ * ixgbe_ptp_adjfine_82599
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated scaled_ppm from the base frequency.
*
- * adjust the frequency of the ptp cycle counter by the
- * indicated ppb from the base frequency.
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
+static int ixgbe_ptp_adjfine_82599(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
- u64 freq, incval;
- u32 diff;
+ u64 incval, diff;
int neg_adj = 0;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
smp_mb();
incval = READ_ONCE(adapter->base_incval);
- freq = incval;
- freq *= ppb;
- diff = div_u64(freq, 1000000000ULL);
+ diff = mul_u64_u64_div_u64(incval, scaled_ppm,
+ 1000000ULL << 16);
incval = neg_adj ? (incval - diff) : (incval + diff);
switch (hw->mac.type) {
case ixgbe_mac_X540:
if (incval > 0xFFFFFFFFULL)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval);
break;
case ixgbe_mac_82599EB:
if (incval > 0x00FFFFFFULL)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL));
@@ -485,32 +488,35 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
}
/**
- * ixgbe_ptp_adjfreq_X550
+ * ixgbe_ptp_adjfine_X550
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
*
- * adjust the frequency of the SYSTIME registers by the indicated ppb from base
- * frequency
+ * Adjust the frequency of the SYSTIME registers by the indicated scaled_ppm
+ * from base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb)
+static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
int neg_adj = 0;
- u64 rate = IXGBE_X550_BASE_PERIOD;
+ u64 rate;
u32 inca;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- rate *= ppb;
- rate = div_u64(rate, 1000000000ULL);
+
+ rate = mul_u64_u64_div_u64(IXGBE_X550_BASE_PERIOD, scaled_ppm,
+ 1000000ULL << 16);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
inca = rate & INCVALUE_MASK;
if (neg_adj)
@@ -1356,7 +1362,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 1;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
@@ -1373,7 +1379,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
@@ -1389,7 +1395,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 1;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d4e63f0644c3..29cc60988071 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -77,7 +77,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
adapter->bridge_mode = BRIDGE_MODE_VEB;
- /* limit trafffic classes based on VFs enabled */
+ /* limit traffic classes based on VFs enabled */
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
@@ -205,10 +205,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ unsigned long flags;
int rss;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
/* put the reference to all of the vf devices */
for (vf = 0; vf < num_vfs; ++vf) {
@@ -1355,8 +1358,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
if (!ixgbe_check_for_rst(hw, vf))
@@ -1370,6 +1375,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
if (!ixgbe_check_for_ack(hw, vf))
ixgbe_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6da9880d766a..7f7ea468ffa9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2533,6 +2533,13 @@ enum {
#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
(IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index e4b50c7781ff..35c2b9b8bd19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1737,7 +1737,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for native SFP support.
+ * Configure the integrated PHY for native SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
@@ -1786,7 +1786,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for SFP support.
+ * Configure the integrated PHY for SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 3b41f83c8dff..fed46872af2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -17,8 +17,6 @@
#include "ixgbevf.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBEVF_STATS};
struct ixgbe_stats {
@@ -130,8 +128,6 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
adapter->msg_enable = data;
}
-#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
#define IXGBE_REGS_LEN 45
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 55b87bc3a938..2f12fbe229c1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4787,7 +4787,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
rtnl_unlock();
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 68fc32e36e88..1641d00d8ed3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -964,7 +964,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- /* if we we didn't get an ACK there must have been
+ /* if we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 57eff4e9e6de..b6be0552a6c1 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -775,7 +775,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
int tx_index;
struct tx_desc *desc;
int ret;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 384f5a16753d..0caa2df87c04 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2664,8 +2664,8 @@ err_drop_frame:
static inline void
mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ int hdr_len = skb_tcp_all_headers(skb);
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2727,7 +2727,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
if ((txq->count + tso_count_descs(skb)) >= txq->size)
return 0;
- if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
pr_info("*** Is this even possible?\n");
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 7f4a4ca9af78..40203560b291 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o
+ rvu_sdp.o rvu_npc_hash.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 25491edc35ce..c8724bfa86b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -498,6 +498,32 @@ static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
}
+static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = cgx->mac_ops->fifo_len;
+ num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC0 gets half of the FIFO, reset 1/4th */
+ if (lmac_id == 0)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
/* Configure CGX LMAC in internal loopback mode */
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
{
@@ -847,6 +873,11 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
+ /* Disable all PFC classes by default */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
}
int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
@@ -899,6 +930,7 @@ int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
return 0;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
if (rx_pause) {
cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
@@ -910,12 +942,13 @@ int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
CGXX_SMUX_CBFC_CTL_DRP_EN);
}
- if (tx_pause)
+ if (tx_pause) {
cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
- else
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ } else {
cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
-
- cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ }
cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
@@ -1005,9 +1038,9 @@ int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
dev = &cgx->pdev->dev;
- dev_err(dev, "cgx port %d:%d cmd timeout\n",
- cgx->cgx_id, lmac->lmac_id);
- err = -EIO;
+ dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
+ cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
+ err = LMAC_AF_ERR_CMD_TIMEOUT;
goto unlock;
}
@@ -1433,11 +1466,19 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
u64 req = 0;
u64 resp;
- if (enable)
+ if (enable) {
req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
- else
- req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ /* On CN10K firmware offloads link bring up/down operations to ECP
+ * On Octeontx2 link operations are handled by firmware itself
+ * which can cause mbox errors so configure maximum time firmware
+ * poll for Link as 1000 ms
+ */
+ if (!is_dev_rpm(cgx))
+ req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
+ } else {
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ }
return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
}
@@ -1689,6 +1730,7 @@ static struct mac_ops cgx_mac_ops = {
.tx_stats_cnt = 18,
.get_nr_lmacs = cgx_get_nr_lmacs,
.get_lmac_type = cgx_get_lmac_type,
+ .lmac_fifo_len = cgx_get_lmac_fifo_len,
.mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
.mac_get_rx_stats = cgx_get_rx_stats,
.mac_get_tx_stats = cgx_get_tx_stats,
@@ -1743,6 +1785,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ if (!cgx->lmac_count) {
+ dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
+ err = -EOPNOTSUPP;
+ goto err_release_regions;
+ }
+
nvec = pci_msix_vec_count(cgx->pdev);
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
if (err < 0 || err != nvec) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index bd2f33a26eee..0b06788b8d80 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -92,7 +92,7 @@
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
-#define CGX_CMD_TIMEOUT 2200 /* msecs */
+#define CGX_CMD_TIMEOUT 5000 /* msecs */
#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_LMAC_FWI 0
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index f72ec0e2506f..d4a27c882a5b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -261,4 +261,6 @@ struct cgx_lnk_sts {
#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+/* LINK_BRING_UP command timeout */
+#define LINKCFG_TIMEOUT GENMASK_ULL(21, 8)
#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index f30581bf0688..52b6016789fa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -80,6 +80,7 @@ struct mac_ops {
*/
int (*get_nr_lmacs)(void *cgx);
u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ u32 (*lmac_fifo_len)(void *cgx, int lmac_id);
int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
bool enable);
/* Register Stats related functions */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 550cb11197bf..d7762577e285 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -33,7 +33,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 3000 /* Time(ms) to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 6000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -169,9 +169,10 @@ M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
-M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, cgx_mac_addr_reset_req, \
+ msg_rsp) \
M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
- msg_rsp) \
+ cgx_mac_addr_update_rsp) \
M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
@@ -241,6 +242,9 @@ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
npc_mcam_get_stats_req, \
npc_mcam_get_stats_rsp) \
+M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key, \
+ npc_get_secret_key_req, \
+ npc_get_secret_key_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
@@ -428,6 +432,7 @@ struct get_hw_cap_rsp {
struct mbox_msghdr hdr;
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
+ u8 npc_hash_extract; /* Is hash extract supported */
};
/* CGX mbox message formats */
@@ -451,6 +456,7 @@ struct cgx_fec_stats_rsp {
struct cgx_mac_addr_set_or_get {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
+ u32 index;
};
/* Structure for requesting the operation to
@@ -466,7 +472,7 @@ struct cgx_mac_addr_add_req {
*/
struct cgx_mac_addr_add_rsp {
struct mbox_msghdr hdr;
- u8 index;
+ u32 index;
};
/* Structure for requesting the operation to
@@ -474,7 +480,7 @@ struct cgx_mac_addr_add_rsp {
*/
struct cgx_mac_addr_del_req {
struct mbox_msghdr hdr;
- u8 index;
+ u32 index;
};
/* Structure for response against the operation to
@@ -482,7 +488,7 @@ struct cgx_mac_addr_del_req {
*/
struct cgx_max_dmac_entries_get_rsp {
struct mbox_msghdr hdr;
- u8 max_dmac_filters;
+ u32 max_dmac_filters;
};
struct cgx_link_user_info {
@@ -583,10 +589,20 @@ struct cgx_set_link_mode_rsp {
int status;
};
+struct cgx_mac_addr_reset_req {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
struct cgx_mac_addr_update_req {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
- u8 index;
+ u32 index;
+};
+
+struct cgx_mac_addr_update_rsp {
+ struct mbox_msghdr hdr;
+ u32 index;
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
@@ -1440,6 +1456,16 @@ struct npc_mcam_get_stats_rsp {
u8 stat_ena; /* enabled */
};
+struct npc_get_secret_key_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+};
+
+struct npc_get_secret_key_rsp {
+ struct mbox_msghdr hdr;
+ u64 secret_key[3];
+};
+
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
@@ -1622,6 +1648,11 @@ enum cgx_af_status {
LMAC_AF_ERR_PERM_DENIED = -1103,
LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
+ LMAC_AF_ERR_CMD_TIMEOUT = -1106,
+ LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED = -1107,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED = -1108,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED = -1109,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 9b6e587e78b4..f187293e3e08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -10,6 +10,14 @@
#define NPC_KEX_CHAN_MASK 0xFFFULL
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
enum NPC_LID_E {
NPC_LID_LA = 0,
NPC_LID_LB,
@@ -200,6 +208,7 @@ enum key_fields {
NPC_ERRLEV,
NPC_ERRCODE,
NPC_LXMB,
+ NPC_EXACT_RESULT,
NPC_LA,
NPC_LB,
NPC_LC,
@@ -381,6 +390,22 @@ struct nix_rx_action {
};
/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_EXACT_NIBBLE_START 40
+#define NPC_EXACT_NIBBLE_END 43
+#define NPC_EXACT_NIBBLE GENMASK_ULL(43, 40)
+
+/* NPC_EXACT_KEX_S nibble definitions for each field */
+#define NPC_EXACT_NIBBLE_HIT BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_OPC BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_WAY BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_INDEX GENMASK_ULL(43, 41)
+
+#define NPC_EXACT_RESULT_HIT BIT_ULL(0)
+#define NPC_EXACT_RESULT_OPC GENMASK_ULL(2, 1)
+#define NPC_EXACT_RESULT_WAY GENMASK_ULL(4, 3)
+#define NPC_EXACT_RESULT_IDX GENMASK_ULL(15, 5)
+
+/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
/* NPC_PARSE_KEX_S nibble definitions for each field */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 4180376fa676..a820bad3abb2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -155,7 +155,7 @@
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
- NPC_PARSE_NIBBLE_ERRCODE | \
+ NPC_PARSE_NIBBLE_L2L3_BCAST | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
@@ -15123,7 +15123,8 @@ static struct npc_mcam_kex npc_mkex_default = {
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
/* nibble: LA..LE (ltype only) + Error code + Channel */
- [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX |
+ (u64)NPC_EXACT_NIBBLE_HIT,
/* nibble: LA..LE (ltype only) */
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
},
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 47e83d7a5804..ef59de43b11e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -22,6 +22,7 @@ static struct mac_ops rpm_mac_ops = {
.tx_stats_cnt = 34,
.get_nr_lmacs = rpm_get_nr_lmacs,
.get_lmac_type = rpm_get_lmac_type,
+ .lmac_fifo_len = rpm_get_lmac_fifo_len,
.mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
.mac_get_rx_stats = rpm_get_rx_stats,
.mac_get_tx_stats = rpm_get_tx_stats,
@@ -276,6 +277,14 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable all PFC classes */
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+
+ /* Enable channel mask for all LMACS */
+ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -342,6 +351,35 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
return err;
}
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 hi_perf_lmac;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = rpm->mac_ops->fifo_len;
+ num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC marked as hi_perf gets half of the FIFO and rest 1/4th */
+ hi_perf_lmac = rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS);
+ hi_perf_lmac = (hi_perf_lmac >> 4) & 0x3ULL;
+ if (lmac_id == hi_perf_lmac)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
@@ -387,15 +425,14 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
{
rpm_t *rpm = rpmd;
- u64 cfg;
+ u64 cfg, class_en;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
- /* reset PFC class quanta and threshold */
- rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
-
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
if (rx_pause) {
cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
@@ -410,9 +447,11 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (tx_pause) {
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, class_en);
} else {
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, 0, class_en);
}
if (!rx_pause && !tx_pause)
@@ -422,9 +461,7 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
- cfg = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, cfg);
- rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, class_en);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 9ab8d49dd180..c2bd6e54ea51 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -48,10 +48,10 @@
#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
-#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
#define RPMX_CMR_RX_OVR_BP 0x4120
#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_CMR_CHAN_MSK_OR 0x4118
#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
@@ -70,11 +70,12 @@
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
-#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+#define RPM_DEFAULT_PAUSE_TIME 0x7FF
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id);
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 54e1b27a7dfe..7282a826d81e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -18,6 +18,7 @@
#include "ptp.h"
#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -68,6 +69,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
hw->cap.nix_shaper_toggle_wait = false;
+ hw->cap.npc_hash_extract = false;
+ hw->cap.npc_exact_match_enabled = false;
hw->rvu = rvu;
if (is_rvu_pre_96xx_C0(rvu)) {
@@ -85,6 +88,9 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
if (!is_rvu_otx2(rvu))
hw->cap.per_pf_mbox_regs = true;
+
+ if (is_rvu_npc_hash_extract_en(rvu))
+ hw->cap.npc_hash_extract = true;
}
/* Poll a RVU block's register 'offset', for a 'zero'
@@ -1122,6 +1128,12 @@ cpt:
goto cgx_err;
}
+ err = rvu_npc_exact_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "failed to initialize exact match table\n");
+ return err;
+ }
+
/* Assign MACs for CGX mapped functions */
rvu_setup_pfvf_macaddress(rvu);
@@ -1991,6 +2003,7 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
rsp->nix_shaping = hw->cap.nix_shaping;
+ rsp->npc_hash_extract = hw->cap.npc_hash_extract;
return 0;
}
@@ -2548,6 +2561,9 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
{
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_reset(rvu, pcifunc);
+
mutex_lock(&rvu->flr_lock);
/* Reset order should reflect inter-block dependencies:
* 1. Reset any packet/work sources (NIX, CPT, TIM)
@@ -2564,6 +2580,12 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
mutex_unlock(&rvu->flr_lock);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 513b43ecd5be..d15bc443335d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -338,6 +338,8 @@ struct hw_cap {
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
bool ipolicer;
+ bool npc_hash_extract; /* Hash extract enabled ? */
+ bool npc_exact_match_enabled; /* Exact match supported ? */
};
struct rvu_hwinfo {
@@ -369,6 +371,7 @@ struct rvu_hwinfo {
struct rvu *rvu;
struct npc_pkind pkind;
struct npc_mcam mcam;
+ struct npc_exact_table *table;
};
struct mbox_wq_info {
@@ -419,6 +422,7 @@ struct npc_kpu_profile_adapter {
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex_hash *mkex_hash;
bool custom;
size_t pkinds;
size_t kpus;
@@ -575,6 +579,17 @@ static inline bool is_rvu_otx2(struct rvu *rvu)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
+{
+ u64 npc_const3;
+
+ npc_const3 = rvu_read64(rvu, BLKADDR_NPC, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62)))
+ return false;
+
+ return true;
+}
+
static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
u8 lmacid, u8 chan)
{
@@ -754,7 +769,6 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
/* NPC APIs */
-int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
@@ -773,14 +787,17 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
bool enable);
+
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, int type, bool enable);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable);
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
+
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
@@ -810,11 +827,16 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
u16 pfc_en);
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
-
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
int index);
+int rvu_npc_init(struct rvu *rvu);
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask);
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
/* CPT APIs */
int rvu_cpt_register_interrupts(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 9ffe99830e34..addc69f4b65c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -14,6 +14,7 @@
#include "lmac_common.h"
#include "rvu_reg.h"
#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -474,6 +475,11 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
if (!is_cgx_config_permitted(rvu, pcifunc))
return;
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rvu_npc_exact_reset(rvu, pcifunc);
+ return;
+ }
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_dev = cgx_get_pdata(cgx_id);
lmac_count = cgx_get_lmac_cnt(cgx_dev);
@@ -584,6 +590,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -602,6 +611,9 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
if (rc >= 0) {
@@ -622,6 +634,9 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
}
@@ -643,6 +658,11 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
return 0;
}
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
+ return 0;
+ }
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
return 0;
@@ -680,6 +700,10 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, true);
@@ -695,6 +719,10 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, false);
@@ -833,6 +861,22 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu)
return fifo_len;
}
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
+{
+ struct mac_ops *mac_ops;
+ void *cgxd;
+
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ return 0;
+
+ mac_ops = get_mac_ops(cgxd);
+ if (!mac_ops->lmac_fifo_len)
+ return 0;
+
+ return mac_ops->lmac_fifo_len(cgxd, lmac);
+}
+
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
@@ -1059,7 +1103,7 @@ int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!rvu->fwdata)
- return -ENXIO;
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
if (!is_pf_cgxmapped(rvu, pf))
return -EPERM;
@@ -1088,7 +1132,7 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
@@ -1098,12 +1142,16 @@ int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
+
return cgx_lmac_addr_reset(cgx_id, lmac_id);
}
int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
- struct msg_rsp *rsp)
+ struct cgx_mac_addr_update_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
@@ -1111,6 +1159,9 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index a9da85e418a4..38bbae5d9ae0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -17,7 +17,7 @@
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
-#define CPT_CTX_ILEN 2
+#define CPT_CTX_ILEN 2ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
@@ -480,7 +480,7 @@ static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
*/
if (!is_rvu_otx2(rvu)) {
val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
- val |= rvu->hw->cpt_chan_base;
+ val |= (u64)rvu->hw->cpt_chan_base;
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 2ad73b180276..f42a09f04b25 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -18,6 +18,7 @@
#include "cgx.h"
#include "lmac_common.h"
#include "npc.h"
+#include "rvu_npc_hash.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -2600,6 +2601,170 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
+ struct npc_exact_table_entry *cam_entry;
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i, j;
+
+ u8 bitmap = 0;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Check if there is at least one entry in mem table */
+ if (!table->mem_tbl_entry_cnt)
+ goto dump_cam_table;
+
+ /* Print table headers */
+ seq_puts(s, "\n\tExact Match MEM Table\n");
+ seq_puts(s, "Index\t");
+
+ for (i = 0; i < table->mem_table.ways; i++) {
+ mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
+ struct npc_exact_table_entry, list);
+
+ seq_printf(s, "Way-%d\t\t\t\t\t", i);
+ }
+
+ seq_puts(s, "\n");
+ for (i = 0; i < table->mem_table.ways; i++)
+ seq_puts(s, "\tChan MAC \t");
+
+ seq_puts(s, "\n\n");
+
+ /* Print mem table entries */
+ for (i = 0; i < table->mem_table.depth; i++) {
+ bitmap = 0;
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!mem_entry[j])
+ continue;
+
+ if (mem_entry[j]->index != i)
+ continue;
+
+ bitmap |= BIT(j);
+ }
+
+ /* No valid entries */
+ if (!bitmap)
+ continue;
+
+ seq_printf(s, "%d\t", i);
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!(bitmap & BIT(j))) {
+ seq_puts(s, "nil\t\t\t\t\t");
+ continue;
+ }
+
+ seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
+ mem_entry[j]->mac);
+ mem_entry[j] = list_next_entry(mem_entry[j], list);
+ }
+ seq_puts(s, "\n");
+ }
+
+dump_cam_table:
+
+ if (!table->cam_tbl_entry_cnt)
+ goto done;
+
+ seq_puts(s, "\n\tExact Match CAM Table\n");
+ seq_puts(s, "index\tchan\tMAC\n");
+
+ /* Traverse cam table entries */
+ list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
+ seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
+ cam_entry->mac);
+ }
+
+done:
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
+
+static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i;
+
+ table = rvu->hw->table;
+
+ seq_puts(s, "\n\tExact Table Info\n");
+ seq_printf(s, "Exact Match Feature : %s\n",
+ rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return 0;
+
+ seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
+
+ seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
+
+ seq_puts(s, "\n\tMEM Table Info\n");
+ seq_printf(s, "Ways : %d\n", table->mem_table.ways);
+ seq_printf(s, "Depth : %d\n", table->mem_table.depth);
+ seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
+ seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
+ seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
+
+ seq_puts(s, "\n\tCAM Table Info\n");
+ seq_printf(s, "Depth : %d\n", table->cam_table.depth);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
+
+static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ struct npc_key_field *field;
+ u16 chan, pcifunc;
+ int blkaddr, i;
+ u64 cfg, cam1;
+ char *str;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ table = rvu->hw->table;
+
+ field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
+
+ seq_puts(s, "\n\t Exact Hit on drop status\n");
+ seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
+
+ for (i = 0; i < table->num_drop_rules; i++) {
+ pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
+
+ /* channel will be always in keyword 0 */
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
+ chan = field->kw_mask[0] & cam1;
+
+ str = (cfg & 1) ? "enabled" : "disabled";
+
+ seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(table->counter_idx[i])),
+ chan, str);
+ }
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
+
static void rvu_dbg_npc_init(struct rvu *rvu)
{
rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
@@ -2608,8 +2773,22 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
&rvu_dbg_npc_mcam_info_fops);
debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
&rvu_dbg_npc_mcam_rules_fops);
+
debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
&rvu_dbg_npc_rx_miss_act_fops);
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return;
+
+ debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_entries_fops);
+
+ debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_info_fops);
+
+ debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_drop_cnt_fops);
+
}
static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index d0ab8f233a02..88dee589cb21 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -10,6 +10,7 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "rvu_struct.h"
+#include "rvu_npc_hash.h"
#define DRV_NAME "octeontx2-af"
@@ -1436,14 +1437,75 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
};
+static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ bool enabled;
+
+ enabled = rvu_npc_exact_has_match_table(rvu);
+
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
+ enabled ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_npc_exact_disable_feature(rvu);
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 enable;
+
+ if (kstrtoull(val.vstr, 10, &enable)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 value is supported");
+ return -EINVAL;
+ }
+
+ if (enable != 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only disabling exact match feature is supported");
+ return -EINVAL;
+ }
+
+ if (rvu_npc_exact_can_disable_feature(rvu))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't disable exact match feature; Please try before any configuration");
+ return -EFAULT;
+}
+
static const struct devlink_param rvu_af_dl_params[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
"dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_npc_exact_feature_get,
+ rvu_af_npc_exact_feature_disable,
+ rvu_af_npc_exact_feature_validate),
};
/* Devlink switch mode */
@@ -1501,6 +1563,7 @@ int rvu_register_dl(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
struct devlink *dl;
+ size_t size;
int err;
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
@@ -1522,8 +1585,12 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
- err = devlink_params_register(dl, rvu_af_dl_params,
- ARRAY_SIZE(rvu_af_dl_params));
+ /* Register exact match devlink only for CN10K-B */
+ size = ARRAY_SIZE(rvu_af_dl_params);
+ if (!rvu_npc_exact_has_match_table(rvu))
+ size -= 1;
+
+ err = devlink_params_register(dl, rvu_af_dl_params, size);
if (err) {
dev_err(rvu->dev,
"devlink params register failed with error %d", err);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0fa625e2528e..0879a48411f3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -14,6 +14,7 @@
#include "npc.h"
#include "cgx.h"
#include "lmac_common.h"
+#include "rvu_npc_hash.h"
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
@@ -3792,9 +3793,15 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_promisc_enable(rvu, pcifunc);
} else {
if (!nix_rx_multicast)
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_promisc_disable(rvu, pcifunc);
}
return 0;
@@ -4003,9 +4010,13 @@ linkcfg:
return 0;
/* Update transmit credits for CGX links */
- lmac_fifo_len =
- rvu_cgx_get_fifolen(rvu) /
- cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, lmac);
+ return 0;
+ }
return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
(lmac_fifo_len - req->maxlen) / 16);
}
@@ -4057,7 +4068,10 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
struct rvu_hwinfo *hw = rvu->hw;
int cgx, lmac_cnt, slink, link;
u16 lbk_max_frs, lmac_max_frs;
+ unsigned long lmac_bmap;
u64 tx_credits, cfg;
+ u64 lmac_fifo_len;
+ int iter;
rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
@@ -4091,12 +4105,23 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
/* Skip when cgx is not available or lmac cnt is zero */
if (lmac_cnt <= 0)
continue;
- tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
- lmac_max_frs) / 16;
- /* Enable credits and set credit pkt count to max allowed */
- cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
slink = cgx * hw->lmac_per_cgx;
- for (link = slink; link < (slink + lmac_cnt); link++) {
+
+ /* Get LMAC id's from bitmap */
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, iter);
+ continue;
+ }
+ tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+
+ link = iter + slink;
nix_hw->tx_credits[link] = tx_credits;
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 3a31fb8cc155..1e348fd0d930 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -15,6 +15,7 @@
#include "npc.h"
#include "cgx.h"
#include "npc_profile.h"
+#include "rvu_npc_hash.h"
#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
@@ -1096,6 +1097,9 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
/* Delete multicast and promisc MCAM entries */
@@ -1105,8 +1109,39 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
NIXLF_PROMISC_ENTRY, false);
}
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+
+ mutex_lock(&mcam->lock);
+
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->intf != intf)
+ continue;
+
+ if (rule->entry != entry)
+ continue;
+
+ rule->enable = enable;
+ mutex_unlock(&mcam->lock);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ entry, enable);
+
+ return true;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return false;
+}
+
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
/* Enables only broadcast match entry. Promisc/Allmulti are enabled
* in set_rx_mode mbox handler.
*/
@@ -1181,14 +1216,6 @@ void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
}
-#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
-
-#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-
static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
struct npc_mcam_kex *mkex, u8 intf)
{
@@ -1262,6 +1289,9 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
}
+
+ /* Programme mkex hash profile */
+ npc_program_mkex_hash(rvu, blkaddr);
}
static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
@@ -1463,6 +1493,7 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
profile->lt_def = &npc_lt_defaults;
profile->mkex = &npc_mkex_default;
+ profile->mkex_hash = &npc_mkex_hash_default;
return 0;
}
@@ -1650,7 +1681,7 @@ static void npc_load_kpu_profile(struct rvu *rvu)
* Firmware database method.
* Default KPU profile.
*/
- if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
kpu_profile);
rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
@@ -1819,7 +1850,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
-
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
*/
@@ -1915,6 +1945,7 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
u64 nibble_ena, rx_kex, tx_kex;
@@ -1927,15 +1958,15 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
mcam->counters.max--;
mcam->rx_miss_act_cntr = mcam->counters.max;
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
if (nibble_ena) {
tx_kex &= ~NPC_PARSE_NIBBLE;
tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
}
/* Configure RX interfaces */
@@ -2047,6 +2078,7 @@ int rvu_npc_init(struct rvu *rvu)
rvu_npc_setup_interfaces(rvu, blkaddr);
+ npc_config_secret_key(rvu, blkaddr);
/* Configure MKEX profile */
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
@@ -2534,7 +2566,7 @@ alloc:
/* Copy MCAM entry indices into mbox response entry_list.
* Requester always expects indices in ascending order, so
- * so reverse the list if reverse bitmap is used for allocation.
+ * reverse the list if reverse bitmap is used for allocation.
*/
if (!req->contig && rsp->count) {
index = 0;
@@ -2562,6 +2594,14 @@ alloc:
return 0;
}
+/* Marks bitmaps to reserved the mcam slot */
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ npc_mcam_set_bit(mcam, entry_idx);
+}
+
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
struct npc_mcam_alloc_entry_req *req,
struct npc_mcam_alloc_entry_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 19c53e591d0d..7c4e1acd0f77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -10,6 +10,8 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "npc.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
#define NPC_BYTESM GENMASK_ULL(19, 16)
#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
@@ -227,6 +229,25 @@ static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
return true;
}
+static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 40 ... 43:
+ type = NPC_EXACT_RESULT;
+ break;
+
+ default:
+ return;
+ }
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
u8 key_nibble, u8 intf)
{
@@ -276,6 +297,7 @@ static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
default:
return;
}
+
npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
}
@@ -445,7 +467,8 @@ do { \
NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
- NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* SMAC follows the DMAC(which is 6 bytes) */
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
/* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
}
@@ -509,8 +532,8 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u8 lid, lt, ld, bitnr;
+ u64 cfg, masked_cfg;
u8 key_nibble = 0;
- u64 cfg;
/* Scan and note how parse result is going to be in key.
* A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
@@ -518,12 +541,24 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
* will be concatenated in key.
*/
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
- cfg &= NPC_PARSE_NIBBLE;
- for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
+ masked_cfg = cfg & NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) {
npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
key_nibble++;
}
+ /* Ignore exact match bits for mcam entries except the first rule
+ * which is drop on hit. This first rule is configured explitcitly by
+ * exact match code.
+ */
+ masked_cfg = cfg & NPC_EXACT_NIBBLE;
+ bitnr = NPC_EXACT_NIBBLE_START;
+ for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg,
+ NPC_EXACT_NIBBLE_START) {
+ npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
/* Scan and note how layer data is going to be in key */
for (lid = 0; lid < NPC_MAX_LID; lid++) {
for (lt = 0; lt < NPC_MAX_LT; lt++) {
@@ -624,9 +659,9 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
* If any bits in mask are 0 then corresponding bits in value are
* dont care.
*/
-static void npc_update_entry(struct rvu *rvu, enum key_fields type,
- struct mcam_entry *entry, u64 val_lo,
- u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry dummy = { {0} };
@@ -705,8 +740,6 @@ static void npc_update_entry(struct rvu *rvu, enum key_fields type,
}
}
-#define IPV6_WORDS 4
-
static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
u64 features, struct flow_msg *pkt,
struct flow_msg *mask,
@@ -779,7 +812,8 @@ static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry,
static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
u64 features, struct flow_msg *pkt,
struct flow_msg *mask,
- struct rvu_npc_mcam_rule *output, u8 intf)
+ struct rvu_npc_mcam_rule *output, u8 intf,
+ int blkaddr)
{
u64 dmac_mask = ether_addr_to_u64(mask->dmac);
u64 smac_mask = ether_addr_to_u64(mask->smac);
@@ -828,6 +862,7 @@ do { \
} while (0)
NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+
NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
ntohs(mask->etype), 0);
@@ -854,10 +889,12 @@ do { \
npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
npc_update_vlan_features(rvu, entry, features, intf);
+
+ npc_update_field_hash(rvu, intf, entry, blkaddr, features,
+ pkt, mask, opkt, omask);
}
-static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
- u16 entry)
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry)
{
struct rvu_npc_mcam_rule *iter;
@@ -1023,8 +1060,9 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
u16 owner = req->hdr.pcifunc;
struct msg_rsp write_rsp;
struct mcam_entry *entry;
- int entry_index, err;
bool new = false;
+ u16 entry_index;
+ int err;
installed_features = req->features;
features = req->features;
@@ -1032,7 +1070,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
entry_index = req->entry;
npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
- req->intf);
+ req->intf, blkaddr);
if (is_npc_intf_rx(req->intf))
npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
@@ -1057,7 +1095,8 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, missing_features,
&def_ucast_rule->packet,
&def_ucast_rule->mask,
- &dummy, req->intf);
+ &dummy, req->intf,
+ blkaddr);
installed_features = req->features | missing_features;
}
@@ -1424,3 +1463,98 @@ void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
}
mutex_unlock(&mcam->lock);
}
+
+/* single drop on non hit rule starting from 0th index. This an extension
+ * to RPM mac filter to support more rules.
+ */
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ struct npc_mcam_write_entry_req req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ struct msg_rsp rsp;
+ bool enabled;
+ int blkaddr;
+ int err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Bail out if no exact match support */
+ if (!rvu_npc_exact_has_match_table(rvu)) {
+ dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__);
+ return -EINVAL;
+ }
+
+ /* If 0th entry is already used, return err */
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx);
+ if (enabled) {
+ dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n",
+ __func__, mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Add this entry to mcam rules list */
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ /* Disable rule by default. Enable rule when first dmac filter is
+ * installed
+ */
+ rule->enable = false;
+ rule->chan = chan_val;
+ rule->chan_mask = chan_mask;
+ rule->entry = mcam_idx;
+ rvu_mcam_add_rule(mcam, rule);
+
+ /* Reserve slot 0 */
+ npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx);
+
+ /* Allocate counter for this single drop on non hit rule */
+ cntr_req.hdr.pcifunc = 0; /* AF request */
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n",
+ __func__, err);
+ return -EFAULT;
+ }
+ *counter_idx = cntr_rsp.cntr;
+
+ /* Fill in fields for this mcam entry */
+ npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0,
+ exact_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0,
+ chan_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0,
+ bcast_mcast_mask, 0, NIX_INTF_RX);
+
+ req.intf = NIX_INTF_RX;
+ req.set_cntr = true;
+ req.cntr = cntr_rsp.cntr;
+ req.entry = mcam_idx;
+
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n",
+ __func__, mcam_idx);
+ return err;
+ }
+
+ dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n",
+ __func__, mcam_idx, req.cntr);
+
+ /* disable entry at Bank 0, index 0 */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
new file mode 100644
index 000000000000..bdd65ce56a32
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_FS_H
+#define __RVU_NPC_FS_H
+
+#define IPV6_WORDS 4
+
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf);
+
+#endif /* RVU_NPC_FS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
new file mode 100644
index 000000000000..594029007f85
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -0,0 +1,2009 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/debugfs.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
+
+static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
+ size_t width_bits)
+{
+ const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
+ const size_t msb = start_bit + width_bits - 1;
+ const size_t lword = start_bit >> 6;
+ const size_t uword = msb >> 6;
+ size_t lbits;
+ u64 hi, lo;
+
+ if (lword == uword)
+ return (input[lword] >> (start_bit & 63)) & mask;
+
+ lbits = 64 - (start_bit & 63);
+ hi = input[uword];
+ lo = (input[lword] >> (start_bit & 63));
+ return ((hi << lbits) | lo) & mask;
+}
+
+static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
+{
+ u64 prev_orig_word = 0;
+ u64 cur_orig_word = 0;
+ size_t extra = key_bit_len % 64;
+ size_t max_idx = key_bit_len / 64;
+ size_t i;
+
+ if (extra)
+ max_idx++;
+
+ for (i = 0; i < max_idx; i++) {
+ cur_orig_word = key[i];
+ key[i] = key[i] << 1;
+ key[i] |= ((prev_orig_word >> 63) & 0x1);
+ prev_orig_word = cur_orig_word;
+ }
+}
+
+static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
+ size_t key_bit_len)
+{
+ u32 hash_out = 0;
+ u64 temp_data = 0;
+ int i;
+
+ for (i = data_bit_len - 1; i >= 0; i--) {
+ temp_data = (data[i / 64]);
+ temp_data = temp_data >> (i % 64);
+ temp_data &= 0x1;
+ if (temp_data)
+ hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
+
+ rvu_npc_lshift_key(key, key_bit_len);
+ }
+
+ return hash_out;
+}
+
+u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+ u64 *secret_key, u8 intf, u8 hash_idx)
+{
+ u64 hash_key[3];
+ u64 data_padded[2];
+ u32 field_hash;
+
+ hash_key[0] = secret_key[1] << 31;
+ hash_key[0] |= secret_key[2];
+ hash_key[1] = secret_key[1] >> 33;
+ hash_key[1] |= secret_key[0] << 31;
+ hash_key[2] = secret_key[0] >> 33;
+
+ data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0];
+ data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1];
+ field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
+
+ field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32;
+ field_hash |= mkex_hash->hash_ctrl[intf][hash_idx];
+ return field_hash;
+}
+
+static u64 npc_update_use_hash(int lt, int ld)
+{
+ u64 cfg = 0;
+
+ switch (lt) {
+ case NPC_LT_LC_IP6:
+ /* Update use_hash(bit-20) and bytesm1 (bit-16:19)
+ * in KEX_LD_CFG
+ */
+ cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
+ ld ? 0x8 : 0x18,
+ 0x1, 0x0, 0x10);
+ break;
+ }
+
+ return cfg;
+}
+
+static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_tx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg = npc_update_use_hash(lt, ld);
+
+ hash_cnt++;
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+ }
+ }
+ }
+ }
+}
+
+static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg = npc_update_use_hash(lt, ld);
+
+ hash_cnt++;
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+ hash_cnt++;
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+ }
+ }
+ }
+}
+
+void npc_config_secret_key(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract) {
+ dev_info(rvu->dev, "HW does not support secret key configuration\n");
+ return;
+ }
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
+ RVU_NPC_HASH_SECRET_KEY0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
+ RVU_NPC_HASH_SECRET_KEY1);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
+ RVU_NPC_HASH_SECRET_KEY2);
+ }
+}
+
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract) {
+ dev_dbg(rvu->dev, "Field hash extract feature is not supported\n");
+ return;
+ }
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_hash_rx(rvu, blkaddr, intf);
+ npc_program_mkex_hash_tx(rvu, blkaddr, intf);
+ }
+}
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ struct npc_get_secret_key_req req;
+ struct npc_get_secret_key_rsp rsp;
+ u64 ldata[2], cfg;
+ u32 field_hash;
+ u8 hash_idx;
+
+ if (!rvu->hw->cap.npc_hash_extract) {
+ dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
+ return;
+ }
+
+ req.intf = intf;
+ rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp);
+
+ for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
+ if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
+ u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
+ u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
+ u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
+ switch (ltype & ltype_mask) {
+ /* If hash extract enabled is supported for IPv6 then
+ * 128 bit IPv6 source and destination addressed
+ * is hashed to 32 bit value.
+ */
+ case NPC_LT_LC_IP6:
+ if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ u32 src_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+ ldata[0] = (u64)src_ip[0] << 32 | src_ip[1];
+ ldata[1] = (u64)src_ip[2] << 32 | src_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ mkex_hash,
+ rsp.secret_key,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry,
+ field_hash, 0, 32, 0, intf);
+ memcpy(&opkt->ip6src, &pkt->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&omask->ip6src, &mask->ip6src,
+ sizeof(mask->ip6src));
+ break;
+ }
+
+ if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ u32 dst_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+ ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1];
+ ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ mkex_hash,
+ rsp.secret_key,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry,
+ field_hash, 0, 32, 0, intf);
+ memcpy(&opkt->ip6dst, &pkt->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&omask->ip6dst, &mask->ip6dst,
+ sizeof(mask->ip6dst));
+ }
+ break;
+ }
+ }
+ }
+ }
+}
+
+int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
+ struct npc_get_secret_key_req *req,
+ struct npc_get_secret_key_rsp *rsp)
+{
+ u64 *secret_key = rsp->secret_key;
+ u8 intf = req->intf;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
+ secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
+ secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
+ * @mac_addr: MAC address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
+{
+ u64 mac = 0;
+ int index;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+
+ return mac;
+}
+
+/**
+ * rvu_exact_prepare_mdata - Make mdata for mcam entry
+ * @mac: MAC address
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @mask: LDATA mask.
+ * Return: Meta data
+ */
+static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac);
+
+ /* Please note that mask is 48bit which excludes chan and ctype.
+ * Increase mask bits if we need to include them as well.
+ */
+ ldata |= ((u64)chan << 48);
+ ldata |= ((u64)ctype << 60);
+ ldata &= mask;
+ ldata = ldata << 2;
+
+ return ldata;
+}
+
+/**
+ * rvu_exact_calculate_hash - calculate hash index to mem table.
+ * @rvu: resource virtualization unit.
+ * @chan: Channel number
+ * @ctype: Channel type.
+ * @mac: MAC address
+ * @mask: HASH mask.
+ * @table_depth: Depth of table.
+ * Return: Hash value
+ */
+static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
+ u64 mask, u32 table_depth)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ u64 hash_key[2];
+ u64 key_in[2];
+ u64 ldata;
+ u32 hash;
+
+ key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
+ key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
+
+ hash_key[0] = key_in[0] << 31;
+ hash_key[0] |= key_in[1];
+ hash_key[1] = key_in[0] >> 33;
+
+ ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
+
+ dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
+ ldata, hash_key[1], hash_key[0]);
+ hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
+
+ hash &= table->mem_table.hash_mask;
+ hash += table->mem_table.hash_offset;
+ dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash);
+
+ return hash;
+}
+
+/**
+ * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
+ * @rvu: resource virtualization unit.
+ * @way: Indicate way to table.
+ * @index: Hash index to 4 way table.
+ * @hash: Hash value.
+ *
+ * Searches 4 way table using hash index. Returns 0 on success.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
+ u32 *index, unsigned int hash)
+{
+ struct npc_exact_table *table;
+ int depth, i;
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ /* Check all the 4 ways for a free slot. */
+ mutex_lock(&table->lock);
+ for (i = 0; i < table->mem_table.ways; i++) {
+ if (test_bit(hash + i * depth, table->mem_table.bmap))
+ continue;
+
+ set_bit(hash + i * depth, table->mem_table.bmap);
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
+ __func__, i, hash);
+
+ *way = i;
+ *index = hash;
+ return 0;
+ }
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
+ bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_free_id - Free seq id from bitmat.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier to be freed.
+ */
+static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ mutex_lock(&table->lock);
+ clear_bit(seq_id, table->id_bmap);
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
+}
+
+/**
+ * rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ * Return: True or false.
+ */
+static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
+ if (idx == table->tot_ids) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
+ __func__, bitmap_weight(table->id_bmap, table->tot_ids));
+
+ return false;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->id_bmap);
+ mutex_unlock(&table->lock);
+
+ *seq_id = idx;
+ dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
+ * @rvu: resource virtualization unit.
+ * @index: Index to exact CAM table.
+ * Return: 0 upon success; else error number.
+ */
+static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
+ if (idx == table->cam_table.depth) {
+ mutex_unlock(&table->lock);
+ dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
+ bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
+ return -ENOSPC;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->cam_table.bmap);
+ mutex_unlock(&table->lock);
+
+ *index = idx;
+ dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
+ __func__, idx);
+ return 0;
+}
+
+/**
+ * rvu_exact_prepare_table_entry - Data for exact match table entry.
+ * @rvu: Resource virtualization unit.
+ * @enable: Enable/Disable entry
+ * @ctype: Software defined channel type. Currently set as 0.
+ * @chan: Channel number.
+ * @mac_addr: Destination mac address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
+ u8 ctype, u16 chan, u8 *mac_addr)
+
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
+
+ /* Enable or disable */
+ u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
+
+ /* Set Ctype */
+ mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
+
+ /* Set chan */
+ mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
+
+ /* MAC address */
+ mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
+
+ return mdata;
+}
+
+/**
+ * rvu_exact_config_secret_key - Configure secret key.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_secret_key(struct rvu *rvu)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY1);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY2);
+}
+
+/**
+ * rvu_exact_config_search_key - Configure search key
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_search_key(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg_val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* HDR offset */
+ reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
+
+ /* BYTESM1, number of bytes - 1 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
+
+ /* Enable LID and set LID to NPC_LID_LA */
+ reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
+ reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA);
+
+ /* Clear layer type based extraction */
+
+ /* Disable LT_EN */
+ reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
+
+ /* Set LTYPE_MATCH to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
+
+ /* Set LTYPE_MASK to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
+}
+
+/**
+ * rvu_exact_config_result_ctrl - Set exact table hash control
+ * @rvu: Resource virtualization unit.
+ * @depth: Depth of Exact match table.
+ *
+ * Sets mask and offset for hash for mem table.
+ */
+static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
+{
+ int blkaddr;
+ u64 reg = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Set mask. Note that depth is a power of 2 */
+ rvu->hw->table->mem_table.hash_mask = (depth - 1);
+ reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
+
+ /* Set offset as 0 */
+ rvu->hw->table->mem_table.hash_offset = 0;
+ reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
+
+ /* Set reg for RX */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
+ /* Store hash mask and offset for s/w algorithm */
+}
+
+/**
+ * rvu_exact_config_table_mask - Set exact table mask.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_table_mask(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 mask = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Don't use Ctype */
+ mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
+
+ /* Set chan */
+ mask |= GENMASK_ULL(59, 48);
+
+ /* Full ldata */
+ mask |= GENMASK_ULL(47, 0);
+
+ /* Store mask for s/w hash calcualtion */
+ rvu->hw->table->mem_table.mask = mask;
+
+ /* Set mask for RX.*/
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
+}
+
+/**
+ * rvu_npc_exact_get_max_entries - Get total number of entries in table.
+ * @rvu: resource virtualization unit.
+ * Return: Maximum table entries possible.
+ */
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ return table->tot_ids;
+}
+
+/**
+ * rvu_npc_exact_has_match_table - Checks support for exact match.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match table is supported/enabled.
+ */
+bool rvu_npc_exact_has_match_table(struct rvu *rvu)
+{
+ return rvu->hw->cap.npc_exact_match_enabled;
+}
+
+/**
+ * __rvu_npc_exact_find_entry_by_seq_id - find entry by id
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ *
+ * Caller should acquire the lock.
+ * Return: Pointer to table entry.
+ */
+static struct npc_exact_table_entry *
+__rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *entry = NULL;
+ struct list_head *lhead;
+
+ lhead = &table->lhead_gbl;
+
+ /* traverse to find the matching entry */
+ list_for_each_entry(entry, lhead, glist) {
+ if (entry->seq_id != seq_id)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * rvu_npc_exact_add_to_list - Add entry to list
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE to select MEM/CAM table.
+ * @ways: MEM table ways.
+ * @index: Index in MEM/CAM table.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @mac_addr: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence identifier
+ * @cmd: True if function is called by ethtool cmd
+ * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
+ * @pcifunc: pci function
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
+ u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
+ u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
+{
+ struct npc_exact_table_entry *entry, *tmp, *iter;
+ struct npc_exact_table *table = rvu->hw->table;
+ struct list_head *lhead, *pprev;
+
+ WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
+
+ if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
+ dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
+ return -EFAULT;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rvu_npc_exact_free_id(rvu, *seq_id);
+ dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&table->lock);
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+ lhead = &table->lhead_cam_tbl_entry;
+ table->cam_tbl_entry_cnt++;
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+ lhead = &table->lhead_mem_tbl_entry[ways];
+ table->mem_tbl_entry_cnt++;
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ kfree(entry);
+ rvu_npc_exact_free_id(rvu, *seq_id);
+
+ dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
+ return -EINVAL;
+ }
+
+ /* Add to global list */
+ INIT_LIST_HEAD(&entry->glist);
+ list_add_tail(&entry->glist, &table->lhead_gbl);
+ INIT_LIST_HEAD(&entry->list);
+ entry->index = index;
+ entry->ways = ways;
+ entry->opc_type = opc_type;
+
+ entry->pcifunc = pcifunc;
+
+ ether_addr_copy(entry->mac, mac_addr);
+ entry->chan = chan;
+ entry->ctype = ctype;
+ entry->cgx_id = cgx_id;
+ entry->lmac_id = lmac_id;
+
+ entry->seq_id = *seq_id;
+
+ entry->mcam_idx = mcam_idx;
+ entry->cmd = cmd;
+
+ pprev = lhead;
+
+ /* Insert entry in ascending order of index */
+ list_for_each_entry_safe(iter, tmp, lhead, list) {
+ if (index < iter->index)
+ break;
+
+ pprev = &iter->list;
+ }
+
+ /* Add to each table list */
+ list_add(&entry->list, pprev);
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mem_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @ways: ways for MEM table.
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
+}
+
+/**
+ * rvu_npc_exact_cam_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
+}
+
+/**
+ * rvu_npc_exact_dealloc_table_entry - dealloc table entry
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE for selection of table(MEM or CAM)
+ * @ways: ways if opc_type is MEM table.
+ * @index: Index of MEM or CAM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
+ u8 ways, u32 index)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table *table;
+ u8 null_dmac[6] = { 0 };
+ int depth;
+
+ /* Prepare entry with all fields set to zero */
+ u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ mutex_lock(&table->lock);
+
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index, table->cam_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
+ __func__, ways, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
+ clear_bit(index, table->cam_table.bmap);
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
+ clear_bit(index + ways * depth, table->mem_table.bmap);
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
+ return -ENOSPC;
+ }
+
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
+ __func__, index, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_alloc_table_entry - Allociate an entry
+ * @rvu: resource virtualization unit.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @index: Index of MEM table or CAM table.
+ * @ways: Ways. Only valid for MEM table.
+ * @opc_type: OPCODE to select table (MEM or CAM)
+ *
+ * Try allocating a slot from MEM table. If all 4 ways
+ * slot are full for a hash index, check availability in
+ * 32-entry CAM table for allocation.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype,
+ u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
+{
+ struct npc_exact_table *table;
+ unsigned int hash;
+ int err;
+
+ table = rvu->hw->table;
+
+ /* Check in 4-ways mem entry for free slote */
+ hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
+ table->mem_table.depth);
+ err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_MEM;
+ dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
+ __func__, *ways, *index);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
+
+ /* wayss is 0 for cam table */
+ *ways = 0;
+ err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_CAM;
+ dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
+ __func__, *index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: Drop rule index in NPC mcam.
+ * @chan_val: Channel value.
+ * @chan_mask: Channel Mask.
+ * @pcifunc: pcifunc of interface.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
+ u64 chan_val, u64 chan_mask, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
+ continue;
+
+ return false;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX)
+ return false;
+
+ table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
+ table->drop_rule_map[i].chan_val = (u16)chan_val;
+ table->drop_rule_map[i].chan_mask = (u16)chan_mask;
+ table->drop_rule_map[i].pcifunc = pcifunc;
+ table->drop_rule_map[i].valid = true;
+ return true;
+}
+
+/**
+ * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (SDK, LBK or CGX)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LAMC identifier.
+ * @val: Channel number.
+ * @mask: Channel mask.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
+ u8 cgx_id, u8 lmac_id,
+ u64 *val, u64 *mask)
+{
+ u16 chan_val, chan_mask;
+
+ /* No support for SDP and LBK */
+ if (intf_type != NIX_INTF_TYPE_CGX)
+ return false;
+
+ chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
+ chan_mask = 0xfff;
+
+ if (val)
+ *val = chan_val;
+
+ if (mask)
+ *mask = chan_mask;
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
+ * @rvu: resource virtualization unit.
+ * @drop_rule_idx: Drop rule index in NPC mcam.
+ *
+ * Debugfs (exact_drop_cnt) entry displays pcifunc for interface
+ * by retrieving the pcifunc value from data base.
+ * Return: Drop rule index.
+ */
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
+ continue;
+
+ return table->drop_rule_map[i].pcifunc;
+ }
+
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, drop_rule_idx);
+ return -1;
+}
+
+/**
+ * rvu_npc_exact_get_drop_rule_info - Get drop rule information.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (CGX, SDP or LBK)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: Channel value.
+ * @mask: Channel mask.
+ * @pcifunc: pcifunc of interface corresponding to the drop rule.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
+ u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
+ u64 *mask, u16 *pcifunc)
+{
+ struct npc_exact_table *table;
+ u64 chan_val, chan_mask;
+ bool rc;
+ int i;
+
+ table = rvu->hw->table;
+
+ if (intf_type != NIX_INTF_TYPE_CGX) {
+ dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
+ return false;
+ }
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc)
+ return false;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (val)
+ *val = table->drop_rule_map[i].chan_val;
+ if (mask)
+ *mask = table->drop_rule_map[i].chan_mask;
+ if (pcifunc)
+ *pcifunc = table->drop_rule_map[i].pcifunc;
+
+ *drop_mcam_idx = i;
+ return true;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX) {
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, *drop_mcam_idx);
+ return false;
+ }
+
+ dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return false;
+}
+
+/**
+ * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: +1 or -1.
+ * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
+ *
+ * when first exact match entry against a drop rule is added, enable_or_disable_cam
+ * is set to true. When last exact match entry against a drop rule is deleted,
+ * enable_or_disable_cam is set to true.
+ * Return: Number of rules
+ */
+static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
+ int val, bool *enable_or_disable_cam)
+{
+ struct npc_exact_table *table;
+ u16 *cnt, old_cnt;
+ bool promisc;
+
+ table = rvu->hw->table;
+ promisc = table->promisc_mode[drop_mcam_idx];
+
+ cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+ old_cnt = *cnt;
+
+ *cnt += val;
+
+ if (!enable_or_disable_cam)
+ goto done;
+
+ *enable_or_disable_cam = false;
+
+ if (promisc)
+ goto done;
+
+ /* If all rules are deleted and not already in promisc mode; disable cam */
+ if (!*cnt && val < 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+ /* If rule got added and not already in promisc mode; enable cam */
+ if (!old_cnt && val > 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+done:
+ return *cnt;
+}
+
+/**
+ * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Deletes entry from linked lists and free up slot in HW MEM or CAM
+ * table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table_entry *entry = NULL;
+ struct npc_exact_table *table;
+ bool disable_cam = false;
+ u32 drop_mcam_idx = -1;
+ int *cnt;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
+ if (!entry) {
+ dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
+ &table->mem_tbl_entry_cnt;
+
+ /* delete from lists */
+ list_del_init(&entry->list);
+ list_del_init(&entry->glist);
+
+ (*cnt)--;
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
+ entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
+ __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ if (entry->cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
+
+ /* No dmac filter rules; disable drop on hit rule */
+ if (disable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ mutex_unlock(&table->lock);
+
+ rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
+
+ rvu_npc_exact_free_id(rvu, seq_id);
+
+ dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
+ __func__, seq_id, entry->mac);
+ kfree(entry);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_add_table_entry - Adds a table entry
+ * @rvu: resource virtualization unit.
+ * @cgx_id: cgx identifier.
+ * @lmac_id: lmac identifier.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence number.
+ * @cmd: Whether it is invoked by ethtool cmd.
+ * @mcam_idx: NPC mcam index corresponding to MAC
+ * @pcifunc: PCI func.
+ *
+ * Creates a new exact match table entry in either CAM or
+ * MEM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
+ u16 chan, u8 ctype, u32 *seq_id, bool cmd,
+ u32 mcam_idx, u16 pcifunc)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ enum npc_exact_opc_type opc_type;
+ bool enable_cam = false;
+ u32 drop_mcam_idx;
+ u32 index;
+ u64 mdata;
+ bool rc;
+ int err;
+ u8 ways;
+
+ ctype = 0;
+
+ err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
+ if (err) {
+ dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
+ return err;
+ }
+
+ /* Write mdata to table */
+ mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
+
+ if (opc_type == NPC_EXACT_OPC_CAM)
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
+ else
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata);
+
+ /* Insert entry to linked list */
+ err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
+ mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
+ if (err) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
+ return err;
+ }
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ if (cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
+
+ /* First command rule; enable drop on hit rule */
+ if (enable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
+ dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, index, mac, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_update_table_entry - Update exact match table.
+ * @rvu: resource virtualization unit.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @old_mac: Existing MAC address entry.
+ * @new_mac: New MAC address entry.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Updates MAC address of an entry. If entry is in MEM table, new
+ * hash value may not match with old one.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
+ u8 *old_mac, u8 *new_mac, u32 *seq_id)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ u32 hash_index;
+ u64 mdata;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
+ if (!entry) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev,
+ "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
+ __func__, cgx_id, lmac_id, old_mac);
+ return -ENODATA;
+ }
+
+ /* If entry is in mem table and new hash index is different than old
+ * hash index, we cannot update the entry. Fail in these scenarios.
+ */
+ if (entry->opc_type == NPC_EXACT_OPC_MEM) {
+ hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
+ new_mac, table->mem_table.mask,
+ table->mem_table.depth);
+ if (hash_index != entry->index) {
+ dev_dbg(rvu->dev,
+ "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
+ __func__, hash_index, entry->index);
+ mutex_unlock(&table->lock);
+ return -EINVAL;
+ }
+ }
+
+ mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
+
+ if (entry->opc_type == NPC_EXACT_OPC_MEM)
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
+ else
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
+
+ /* Update entry fields */
+ ether_addr_copy(entry->mac, new_mac);
+ *seq_id = entry->seq_id;
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, entry->index, entry->mac, entry->ways, entry->opc_type);
+
+ dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
+ __func__, old_mac, new_mac);
+
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_disable - Disable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc
+ *
+ * Drop rule is against each PF. We dont support DMAC filter for
+ * VF.
+ * Return: 0 upon success
+ */
+
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+ u32 cnt;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (!*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = false;
+ cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ mutex_unlock(&table->lock);
+
+ /* If no dmac filter entries configured, disable drop rule */
+ if (!cnt)
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ else
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+
+ dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
+ __func__, cgx_id, lmac_id, cnt);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_enable - Enable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+ u32 cnt;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = true;
+ cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ mutex_unlock(&table->lock);
+
+ /* If no dmac filter entries configured, disable drop rule */
+ if (!cnt)
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ else
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+
+ dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
+ __func__, cgx_id, lmac_id, cnt);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_reset - Delete PF mac address.
+ * @rvu: resource virtualization unit.
+ * @req: Reset request
+ * @rsp: Reset response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ if (rc) {
+ /* TODO: how to handle this error case ? */
+ dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
+ __func__, pfvf->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_update - Update mac address field with new value.
+ * @rvu: resource virtualization unit.
+ * @req: Update request.
+ * @rsp: Update response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ struct rvu_pfvf *pfvf;
+ u32 seq_id, mcam_idx;
+ u8 old_mac[ETH_ALEN];
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
+ __func__, req->index, req->mac_addr);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
+ if (!entry) {
+ dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
+ mutex_unlock(&table->lock);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
+ }
+ ether_addr_copy(old_mac, entry->mac);
+ seq_id = entry->seq_id;
+ mcam_idx = entry->mcam_idx;
+ mutex_unlock(&table->lock);
+
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ /* This could be a new entry */
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
+ pfvf->mac_addr, pf);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id, true,
+ mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev,
+ "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
+ * @rvu: resource virtualization unit.
+ * @req: Add request.
+ * @rsp: Add response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+ u32 seq_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, -1, req->hdr.pcifunc);
+
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_del - Delete DMAC filter
+ * @rvu: resource virtualization unit.
+ * @req: Delete request.
+ * @rsp: Delete response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int rc;
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
+ * @rvu: resource virtualization unit.
+ * @req: Set request.
+ * @rsp: Set response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u32 mcam_idx = -1;
+ int rc, nixlf;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = &rvu->pf[pf];
+
+ /* If table does not have an entry; both update entry and del table entry API
+ * below fails. Those are not failure conditions.
+ */
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pf);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
+ __func__, pfvf->mac_addr, pf);
+ }
+
+ /* find mcam entry if exist */
+ rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
+ if (!rc) {
+ mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
+ __func__, req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev,
+ "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match feature is supported.
+ */
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ bool empty;
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return false;
+
+ mutex_lock(&table->lock);
+ empty = list_empty(&table->lhead_gbl);
+ mutex_unlock(&table->lock);
+
+ return empty;
+}
+
+/**
+ * rvu_npc_exact_disable_feature - Disable feature.
+ * @rvu: resource virtualization unit.
+ */
+void rvu_npc_exact_disable_feature(struct rvu *rvu)
+{
+ rvu->hw->cap.npc_exact_match_enabled = false;
+}
+
+/**
+ * rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: PCI func to match.
+ */
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *tmp, *iter;
+ u32 seq_id;
+
+ mutex_lock(&table->lock);
+ list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
+ if (pcifunc != iter->pcifunc)
+ continue;
+
+ seq_id = iter->seq_id;
+ dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
+ pcifunc, seq_id);
+
+ mutex_unlock(&table->lock);
+ rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ mutex_lock(&table->lock);
+ }
+ mutex_unlock(&table->lock);
+}
+
+/**
+ * rvu_npc_exact_init - initialize exact match table
+ * @rvu: resource virtualization unit.
+ *
+ * Initialize HW and SW resources to manage 4way-2K table and fully
+ * associative 32-entry mcam table.
+ * Return: 0 upon success.
+ */
+int rvu_npc_exact_init(struct rvu *rvu)
+{
+ u64 bcast_mcast_val, bcast_mcast_mask;
+ struct npc_exact_table *table;
+ u64 exact_val, exact_mask;
+ u64 chan_val, chan_mask;
+ u8 cgx_id, lmac_id;
+ u32 *drop_mcam_idx;
+ u16 max_lmac_cnt;
+ u64 npc_const3;
+ int table_size;
+ int blkaddr;
+ u16 pcifunc;
+ int err, i;
+ u64 cfg;
+ bool rc;
+
+ /* Read NPC_AF_CONST3 and check for have exact
+ * match functionality is present
+ */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check exact match feature is supported */
+ npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62))) {
+ dev_info(rvu->dev, "%s: No support for exact match support\n",
+ __func__);
+ return 0;
+ }
+
+ /* Check if kex profile has enabled EXACT match nibble */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ if (!(cfg & NPC_EXACT_NIBBLE_HIT)) {
+ dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n",
+ __func__);
+ return 0;
+ }
+
+ /* Set capability to true */
+ rvu->hw->cap.npc_exact_match_enabled = true;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
+ memset(table, 0, sizeof(*table));
+ rvu->hw->table = table;
+
+ /* Read table size, ways and depth */
+ table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+ table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
+ table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
+
+ dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
+ __func__, table->mem_table.ways, table->cam_table.depth);
+
+ /* Check if depth of table is not a sequre of 2
+ * TODO: why _builtin_popcount() is not working ?
+ */
+ if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
+ dev_err(rvu->dev,
+ "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
+ __func__, table->mem_table.depth);
+ return -EINVAL;
+ }
+
+ table_size = table->mem_table.depth * table->mem_table.ways;
+
+ /* Allocate bitmap for 4way 2K table */
+ table->mem_table.bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table_size),
+ sizeof(long), GFP_KERNEL);
+ if (!table->mem_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
+
+ /* Allocate bitmap for 32 entry mcam */
+ table->cam_table.bmap = devm_kcalloc(rvu->dev, 1, sizeof(long), GFP_KERNEL);
+
+ if (!table->cam_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
+
+ table->tot_ids = (table->mem_table.depth * table->mem_table.ways) + table->cam_table.depth;
+ table->id_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table->tot_ids),
+ table->tot_ids, GFP_KERNEL);
+
+ if (!table->id_bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
+ __func__, table->tot_ids);
+
+ /* Initialize list heads for npc_exact_table entries.
+ * This entry is used by debugfs to show entries in
+ * exact match table.
+ */
+ for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
+ INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
+
+ INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
+ INIT_LIST_HEAD(&table->lhead_gbl);
+
+ mutex_init(&table->lock);
+
+ rvu_exact_config_secret_key(rvu);
+ rvu_exact_config_search_key(rvu);
+
+ rvu_exact_config_table_mask(rvu);
+ rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
+
+ /* - No drop rule for LBK
+ * - Drop rules for SDP and each LMAC.
+ */
+ exact_val = !NPC_EXACT_RESULT_HIT;
+ exact_mask = NPC_EXACT_RESULT_HIT;
+
+ /* nibble - 3 2 1 0
+ * L3B L3M L2B L2M
+ */
+ bcast_mcast_val = 0b0000;
+ bcast_mcast_mask = 0b0011;
+
+ /* Install SDP drop rule */
+ drop_mcam_idx = &table->num_drop_rules;
+
+ max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE;
+ for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
+ if (rvu->pf2cgxlmac_map[i] == 0xFF)
+ continue;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
+ __func__, chan_val, chan_mask, *drop_mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Filter rules are only for PF */
+ pcifunc = RVU_PFFUNC(i, 0);
+
+ dev_dbg(rvu->dev,
+ "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
+ __func__, cgx_id, lmac_id, chan_val, chan_mask);
+
+ rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
+ chan_val, chan_mask, pcifunc);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
+ __func__, cgx_id, lmac_id, chan_val);
+ return -EINVAL;
+ }
+
+ err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
+ &table->counter_idx[*drop_mcam_idx],
+ chan_val, chan_mask,
+ exact_val, exact_mask,
+ bcast_mcast_val, bcast_mcast_mask);
+ if (err) {
+ dev_err(rvu->dev,
+ "failed to configure drop rule (cgx=%d lmac=%d)\n",
+ cgx_id, lmac_id);
+ return err;
+ }
+
+ (*drop_mcam_idx)++;
+ }
+
+ dev_info(rvu->dev, "initialized exact match table successfully\n");
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
new file mode 100644
index 000000000000..3efeb09c58de
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_HASH_H
+#define __RVU_NPC_HASH_H
+
+#define RVU_NPC_HASH_SECRET_KEY0 0xa9d5af4c9fbc76b1
+#define RVU_NPC_HASH_SECRET_KEY1 0xa9d5af4c9fbc87b4
+#define RVU_NPC_HASH_SECRET_KEY2 0x5954c9e7
+
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+
+#define KEX_LD_CFG_USE_HASH(use_hash, bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ ((use_hash) << 20 | ((bytesm1) << 16) | ((hdr_ofs) << 8) | \
+ ((ena) << 7) | ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+#define KEX_LD_CFG_HASH(hdr_ofs, bytesm1, lt_en, lid_en, lid, ltype_match, ltype_mask) \
+ (((hdr_ofs) << 32) | ((bytesm1) << 16) | \
+ ((lt_en) << 12) | ((lid_en) << 11) | ((lid) << 8) | \
+ ((ltype_match) << 4) | ((ltype_mask) & 0xF))
+
+#define SET_KEX_LD_HASH(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_CFG(intf, ld), cfg)
+
+#define SET_KEX_LD_HASH_MASK(intf, ld, mask_idx, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg)
+
+#define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg)
+
+struct npc_mcam_kex_hash {
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ bool lid_lt_ld_hash_en[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_CFG */
+ u64 hash[NPC_MAX_INTF][NPC_MAX_HASH];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+} __packed;
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask);
+void npc_config_secret_key(struct rvu *rvu, int blkaddr);
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
+u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+ u64 *secret_key, u8 intf, u8 hash_idx);
+
+static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ .lid_lt_ld_hash_en = {
+ [NIX_INTF_RX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ true,
+ true,
+ },
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ true,
+ true,
+ },
+ },
+ },
+ },
+
+ .hash = {
+ [NIX_INTF_RX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+
+ [NIX_INTF_TX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+ },
+
+ .hash_mask = {
+ [NIX_INTF_RX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+ },
+
+ .hash_ctrl = {
+ [NIX_INTF_RX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+ },
+};
+
+/* If exact match table support is enabled, enable drop rules */
+#define NPC_MCAM_DROP_RULE_MAX 30
+#define NPC_MCAM_SDP_DROP_RULE_IDX 0
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+enum npc_exact_opc_type {
+ NPC_EXACT_OPC_MEM,
+ NPC_EXACT_OPC_CAM,
+};
+
+struct npc_exact_table_entry {
+ struct list_head list;
+ struct list_head glist;
+ u32 seq_id; /* Sequence number of entry */
+ u32 index; /* Mem table or cam table index */
+ u32 mcam_idx;
+ /* Mcam index. This is valid only if "cmd" field is false */
+ enum npc_exact_opc_type opc_type;
+ u16 chan;
+ u16 pcifunc;
+ u8 ways;
+ u8 mac[ETH_ALEN];
+ u8 ctype;
+ u8 cgx_id;
+ u8 lmac_id;
+ bool cmd; /* Is added by ethtool command ? */
+};
+
+struct npc_exact_table {
+ struct mutex lock; /* entries update lock */
+ unsigned long *id_bmap;
+ int num_drop_rules;
+ u32 tot_ids;
+ u16 cnt_cmd_rules[NPC_MCAM_DROP_RULE_MAX];
+ u16 counter_idx[NPC_MCAM_DROP_RULE_MAX];
+ bool promisc_mode[NPC_MCAM_DROP_RULE_MAX];
+ struct {
+ int ways;
+ int depth;
+ unsigned long *bmap;
+ u64 mask; // Masks before hash calculation.
+ u16 hash_mask; // 11 bits for hash mask
+ u16 hash_offset; // 11 bits offset
+ } mem_table;
+
+ struct {
+ int depth;
+ unsigned long *bmap;
+ } cam_table;
+
+ struct {
+ bool valid;
+ u16 chan_val;
+ u16 chan_mask;
+ u16 pcifunc;
+ u8 drop_rule_idx;
+ } drop_rule_map[NPC_MCAM_DROP_RULE_MAX];
+
+#define NPC_EXACT_TBL_MAX_WAYS 4
+
+ struct list_head lhead_mem_tbl_entry[NPC_EXACT_TBL_MAX_WAYS];
+ int mem_tbl_entry_cnt;
+
+ struct list_head lhead_cam_tbl_entry;
+ int cam_tbl_entry_cnt;
+
+ struct list_head lhead_gbl;
+};
+
+bool rvu_npc_exact_has_match_table(struct rvu *rvu);
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu);
+int rvu_npc_exact_init(struct rvu *rvu);
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx);
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc);
+#endif /* RVU_NPC_HASH_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 22cd751613cd..77a9ade91f3e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -565,7 +565,13 @@
#define NPC_AF_PCK_DEF_OIP4 (0x00620)
#define NPC_AF_PCK_DEF_OIP6 (0x00630)
#define NPC_AF_PCK_DEF_IIP4 (0x00640)
+#define NPC_AF_INTFX_HASHX_RESULT_CTRL(a, b) (0x006c0 | (a) << 4 | (b) << 3)
+#define NPC_AF_INTFX_HASHX_MASKX(a, b, c) (0x00700 | (a) << 5 | (b) << 4 | (c) << 3)
#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3)
+#define NPC_AF_INTFX_HASHX_CFG(a, b) (0x00b00 | (a) << 6 | (b) << 4)
+#define NPC_AF_INTFX_SECRET_KEY0(a) (0x00e00 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY1(a) (0x00e20 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY2(a) (0x00e40 | (a) << 3)
#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8)
#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6)
#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6)
@@ -599,6 +605,15 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+#define NPC_AF_EXACT_MEM_ENTRY(a, b) (0x300000 | (a) << 15 | (b) << 3)
+#define NPC_AF_EXACT_CAM_ENTRY(a) (0xC00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_MASK(a) (0x660 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_RESULT_CTL(a)(0x680 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_CFG(a) (0xA00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET0(a) (0xE00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET1(a) (0xE20 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET2(a) (0xE40 | (a) << 3)
+
#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
u64 offset; \
\
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index fb8db5888d2f..d686c7b6252f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -632,6 +632,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
@@ -641,11 +647,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
- req->num_regs++;
- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
-
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
* For VF this is always ignored.
@@ -1591,6 +1598,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
+
+ pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
}
EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index ce2766317c0b..b28029cc4316 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -195,6 +195,7 @@ struct otx2_hw {
u16 sqb_size;
/* NIX */
+ u8 txschq_link_cfg_lvl;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
@@ -314,8 +315,8 @@ struct otx2_flow_config {
#define OTX2_VF_VLAN_TX_INDEX 1
u16 max_flows;
u8 dmacflt_max_flows;
- u8 *bmap_to_dmacindex;
- unsigned long dmacflt_bmap;
+ u32 *bmap_to_dmacindex;
+ unsigned long *dmacflt_bmap;
struct list_head flow_list;
};
@@ -895,9 +896,9 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
-int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
-int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
-int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos);
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
index 2ec800f741d8..80d853b343f9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -8,7 +8,7 @@
#include "otx2_common.h"
static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
- u8 *dmac_index)
+ u32 *dmac_index)
{
struct cgx_mac_addr_add_req *req;
struct cgx_mac_addr_add_rsp *rsp;
@@ -35,9 +35,10 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
return err;
}
-static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index)
{
struct cgx_mac_addr_set_or_get *req;
+ struct cgx_mac_addr_set_or_get *rsp;
int err;
mutex_lock(&pf->mbox.lock);
@@ -48,16 +49,31 @@ static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
return -ENOMEM;
}
+ req->index = *dmac_index;
+
ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_set_or_get *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ *dmac_index = rsp->index;
+out:
mutex_unlock(&pf->mbox.lock);
return err;
}
-int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos)
{
- u8 *dmacindex;
+ u32 *dmacindex;
/* Store dmacindex returned by CGX/RPM driver which will
* be used for macaddr update/remove
@@ -65,13 +81,13 @@ int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
- return otx2_dmacflt_add_pfmac(pf);
+ return otx2_dmacflt_add_pfmac(pf, dmacindex);
else
return otx2_dmacflt_do_add(pf, mac, dmacindex);
}
static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
- u8 dmac_index)
+ u32 dmac_index)
{
struct cgx_mac_addr_del_req *req;
int err;
@@ -91,9 +107,9 @@ static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
return err;
}
-static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index)
{
- struct msg_req *req;
+ struct cgx_mac_addr_reset_req *req;
int err;
mutex_lock(&pf->mbox.lock);
@@ -102,6 +118,7 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
+ req->index = dmac_index;
err = otx2_sync_mbox_msg(&pf->mbox);
@@ -110,12 +127,12 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
}
int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
- u8 bit_pos)
+ u32 bit_pos)
{
- u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
- return otx2_dmacflt_remove_pfmac(pf);
+ return otx2_dmacflt_remove_pfmac(pf, dmacindex);
else
return otx2_dmacflt_do_remove(pf, mac, dmacindex);
}
@@ -144,6 +161,12 @@ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
rsp = (struct cgx_max_dmac_entries_get_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
out:
@@ -151,9 +174,10 @@ out:
return err;
}
-int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
{
struct cgx_mac_addr_update_req *req;
+ struct cgx_mac_addr_update_rsp *rsp;
int rc;
mutex_lock(&pf->mbox.lock);
@@ -167,8 +191,19 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
ether_addr_copy(req->mac_addr, mac);
req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ /* check the response and change index */
+
rc = otx2_sync_mbox_msg(&pf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_update_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
+
+out:
mutex_unlock(&pf->mbox.lock);
return rc;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 2dd192b5e4e0..709fc0114fbd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -18,7 +18,7 @@ struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
u32 location;
- u16 entry;
+ u32 entry;
bool is_vf;
u8 rss_ctx_id;
#define DMAC_FILTER_RULE BIT(0)
@@ -232,6 +232,9 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
return 0;
}
+/* TODO : revisit on size */
+#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
+
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg;
@@ -242,6 +245,12 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
if (!pfvf->flow_cfg)
return -ENOMEM;
+ pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pfvf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
flow_cfg = pfvf->flow_cfg;
INIT_LIST_HEAD(&flow_cfg->flow_list);
flow_cfg->max_flows = 0;
@@ -259,6 +268,12 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->flow_cfg)
return -ENOMEM;
+ pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
/* Allocate bare minimum number of MCAM entries needed for
@@ -284,7 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return 0;
pf->flow_cfg->bmap_to_dmacindex =
- devm_kzalloc(pf->dev, sizeof(u8) *
+ devm_kzalloc(pf->dev, sizeof(u32) *
pf->flow_cfg->dmacflt_max_flows,
GFP_KERNEL);
@@ -355,7 +370,7 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
- if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
+ if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(netdev,
"Add %pM to CGX/RPM DMAC filters list as well\n",
@@ -438,7 +453,7 @@ int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
return 0;
if (flow_cfg->nr_flows == flow_cfg->max_flows ||
- !bitmap_empty(&flow_cfg->dmacflt_bmap,
+ !bitmap_empty(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows))
return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
else
@@ -1010,7 +1025,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
otx2_add_flow_to_list(pfvf, pf_mac);
pfvf->flow_cfg->nr_flows++;
- set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ set_bit(0, pfvf->flow_cfg->dmacflt_bmap);
return 0;
}
@@ -1064,7 +1079,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
flow->entry);
- if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ if (bitmap_full(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows)) {
netdev_warn(pfvf->netdev,
"Can't insert the rule %d as max allowed dmac filters are %d\n",
@@ -1078,17 +1093,17 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
}
/* Install PF mac address to DMAC filter list */
- if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ if (!test_bit(0, flow_cfg->dmacflt_bmap))
otx2_add_flow_with_pfmac(pfvf, flow);
flow->rule_type |= DMAC_FILTER_RULE;
- flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
fsp->location = flow_cfg->max_flows + flow->entry;
flow->flow_spec.location = fsp->location;
flow->location = fsp->location;
- set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ set_bit(flow->entry, flow_cfg->dmacflt_bmap);
otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
} else {
@@ -1154,11 +1169,12 @@ static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
if (req == DMAC_ADDR_DEL) {
otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
0);
- clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ clear_bit(0, pfvf->flow_cfg->dmacflt_bmap);
found = true;
} else {
ether_addr_copy(eth_hdr->h_dest,
pfvf->netdev->dev_addr);
+
otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
}
break;
@@ -1194,12 +1210,12 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
flow->entry);
- clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ clear_bit(flow->entry, flow_cfg->dmacflt_bmap);
/* If all dmac filters are removed delete macfilter with
* interface mac address and configure CGX/RPM block in
* promiscuous mode
*/
- if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ if (bitmap_weight(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows) == 1)
otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
} else {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 9106c359e64c..9376d0e62914 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1120,7 +1120,7 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
+ if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(pf->netdev,
"CGX/RPM internal loopback might not work as DMAC filters are active\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 28b19945d716..e64318c110fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -28,6 +28,9 @@
#define MAX_RATE_EXPONENT 0x0FULL
#define MAX_RATE_MANTISSA 0xFFULL
+#define CN10K_MAX_BURST_MANTISSA 0x7FFFULL
+#define CN10K_MAX_BURST_SIZE 8453888ULL
+
/* Bitfields in NIX_TLX_PIR register */
#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
@@ -35,6 +38,9 @@
#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
+#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
+#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
}
EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
-static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
- u32 *burst_mantissa)
+static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+ u32 *burst_exp, u32 *burst_mantissa)
{
+ int max_burst, max_mantissa;
unsigned int tmp;
+ if (is_dev_otx2(nic->pdev)) {
+ max_burst = MAX_BURST_SIZE;
+ max_mantissa = MAX_BURST_MANTISSA;
+ } else {
+ max_burst = CN10K_MAX_BURST_SIZE;
+ max_mantissa = CN10K_MAX_BURST_MANTISSA;
+ }
+
/* Burst is calculated as
* ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
* Max supported burst size is 130,816 bytes.
*/
- burst = min_t(u32, burst, MAX_BURST_SIZE);
+ burst = min_t(u32, burst, max_burst);
if (burst) {
*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
tmp = burst - rounddown_pow_of_two(burst);
- if (burst < MAX_BURST_MANTISSA)
+ if (burst < max_mantissa)
*burst_mantissa = tmp * 2;
else
*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
} else {
*burst_exp = MAX_BURST_EXPONENT;
- *burst_mantissa = MAX_BURST_MANTISSA;
+ *burst_mantissa = max_mantissa;
}
}
-static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
u32 *mantissa, u32 *div_exp)
{
- unsigned int tmp;
+ u64 tmp;
/* Rate calculation by hardware
*
@@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
}
}
-static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
+ u64 maxrate, u32 burst)
{
- struct otx2_hw *hw = &nic->hw;
- struct nix_txschq_config *req;
u32 burst_exp, burst_mantissa;
u32 exp, mantissa, div_exp;
+ u64 regval = 0;
+
+ /* Get exponent and mantissa values from the desired rate */
+ otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
+ otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+ if (is_dev_otx2(nic->pdev)) {
+ regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ } else {
+ regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ }
+
+ return regval;
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
+ u32 burst, u64 maxrate)
+{
+ struct otx2_hw *hw = &nic->hw;
+ struct nix_txschq_config *req;
int txschq, err;
/* All SQs share the same TL4, so pick the first scheduler */
txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
- /* Get exponent and mantissa values from the desired rate */
- otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
- otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
-
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
if (!req) {
@@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma
req->lvl = NIX_TXSCH_LVL_TL4;
req->num_regs = 1;
req->reg[0] = NIX_AF_TL4X_PIR(txschq);
- req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
- FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
- FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
- FIELD_PREP(TLX_RATE_EXPONENT, exp) |
- FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
err = otx2_sync_mbox_msg(&nic->mbox);
mutex_unlock(&nic->mbox.lock);
@@ -230,7 +264,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
struct netlink_ext_ack *extack = cls->common.extack;
struct flow_action *actions = &cls->rule->action;
struct flow_action_entry *entry;
- u32 rate;
+ u64 rate;
int err;
err = otx2_tc_validate_flow(nic, actions, extack);
@@ -256,7 +290,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
}
/* Convert bytes per second to Mbps */
rate = entry->police.rate_bytes_ps * 8;
- rate = max_t(u32, rate / 1000000, 1);
+ rate = max_t(u64, rate / 1000000, 1);
err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
if (err)
return err;
@@ -614,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
flow_spec->dport = match.key->dst;
flow_mask->dport = match.mask->dst;
- if (ip_proto == IPPROTO_UDP)
- req->features |= BIT_ULL(NPC_DPORT_UDP);
- else if (ip_proto == IPPROTO_TCP)
- req->features |= BIT_ULL(NPC_DPORT_TCP);
- else if (ip_proto == IPPROTO_SCTP)
- req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+ if (flow_mask->dport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
flow_spec->sport = match.key->src;
flow_mask->sport = match.mask->src;
- if (ip_proto == IPPROTO_UDP)
- req->features |= BIT_ULL(NPC_SPORT_UDP);
- else if (ip_proto == IPPROTO_TCP)
- req->features |= BIT_ULL(NPC_SPORT_TCP);
- else if (ip_proto == IPPROTO_SCTP)
- req->features |= BIT_ULL(NPC_SPORT_SCTP);
+
+ if (flow_mask->sport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
}
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 3baeafc40807..a18e8efd0f1e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -624,7 +624,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->subdc = NIX_SUBDC_EXT;
if (skb_shinfo(skb)->gso_size) {
ext->lso = 1;
- ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ext->lso_sb = skb_tcp_all_headers(skb);
ext->lso_mps = skb_shinfo(skb)->gso_size;
/* Only TSOv4 and TSOv6 GSO offloads are supported */
@@ -931,7 +931,7 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
* be correctly modified, hence don't offload such TSO segments.
*/
- payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ payload_len = skb->len - skb_tcp_all_headers(skb);
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
if (last_seg_size && last_seg_size < 16)
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index c88e8a436029..fbe62bbfb789 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -21,7 +21,7 @@
#define OTX2_HEAD_ROOM OTX2_ALIGN
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
-#define OTX2_MIN_MTU 64
+#define OTX2_MIN_MTU 60
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
index b6f20e2034c6..f2f7663c3d10 100644
--- a/drivers/net/ethernet/marvell/prestera/Kconfig
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -8,6 +8,7 @@ config PRESTERA
depends on NET_SWITCHDEV && VLAN_8021Q
depends on BRIDGE || BRIDGE=n
select NET_DEVLINK
+ select PHYLINK
help
This driver supports Marvell Prestera Switch ASICs family.
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 6f754ae2a584..2f84d0fb4094 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -7,6 +7,7 @@
#include <linux/notifier.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/phylink.h>
#include <net/devlink.h>
#include <uapi/linux/if_ether.h>
@@ -20,6 +21,26 @@ struct prestera_fw_rev {
u16 sub;
};
+struct prestera_flood_domain {
+ struct prestera_switch *sw;
+ struct list_head flood_domain_port_list;
+ u32 idx;
+};
+
+struct prestera_mdb_entry {
+ struct prestera_switch *sw;
+ struct prestera_flood_domain *flood_domain;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+struct prestera_flood_domain_port {
+ struct prestera_flood_domain *flood_domain;
+ struct net_device *dev;
+ struct list_head flood_domain_port_node;
+ u16 vid;
+};
+
struct prestera_port_stats {
u64 good_octets_received;
u64 bad_octets_received;
@@ -72,6 +93,7 @@ struct prestera_lag {
struct prestera_flow_block;
struct prestera_port_mac_state {
+ bool valid;
u32 mode;
u32 speed;
bool oper;
@@ -107,7 +129,8 @@ struct prestera_port_phy_config {
struct prestera_port {
struct net_device *dev;
struct prestera_switch *sw;
- struct prestera_flow_block *flow_block;
+ struct prestera_flow_block *ingress_flow_block;
+ struct prestera_flow_block *egress_flow_block;
struct devlink_port dl_port;
struct list_head lag_member;
struct prestera_lag *lag;
@@ -130,6 +153,13 @@ struct prestera_port {
struct prestera_port_phy_config cfg_phy;
struct prestera_port_mac_state state_mac;
struct prestera_port_phy_state state_phy;
+
+ struct phylink_config phy_config;
+ struct phylink *phy_link;
+ struct phylink_pcs phylink_pcs;
+
+ /* protects state_mac */
+ spinlock_t state_mac_lock;
};
struct prestera_device {
@@ -270,6 +300,7 @@ struct prestera_switch {
u32 mtu_min;
u32 mtu_max;
u8 id;
+ struct device_node *np;
struct prestera_router *router;
struct prestera_lag *lags;
struct prestera_counter *counter;
@@ -320,6 +351,8 @@ void prestera_router_fini(struct prestera_switch *sw);
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+struct prestera_switch *prestera_switch_get(struct net_device *dev);
+
int prestera_port_cfg_mac_read(struct prestera_port *port,
struct prestera_port_mac_config *cfg);
@@ -330,6 +363,10 @@ struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
void prestera_queue_work(struct work_struct *work);
+int prestera_port_learning_set(struct prestera_port *port, bool learn_enable);
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood);
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood);
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
@@ -337,9 +374,30 @@ bool prestera_netdev_check(const struct net_device *dev);
int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr);
bool prestera_port_is_lag_member(const struct prestera_port *port);
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id);
struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id);
u16 prestera_port_lag_id(const struct prestera_port *port);
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid);
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry);
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw);
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain);
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid);
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port);
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid);
+
#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 3a141f2db812..3d4b85f2d541 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -61,6 +61,7 @@ struct prestera_acl_ruleset {
u32 index;
u16 pcl_id;
bool offload;
+ bool ingress;
};
struct prestera_acl_vtcam {
@@ -70,6 +71,7 @@ struct prestera_acl_vtcam {
u32 id;
bool is_keymask_set;
u8 lookup;
+ u8 direction;
};
static const struct rhashtable_params prestera_acl_ruleset_ht_params = {
@@ -93,23 +95,36 @@ static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
.automatic_shrinking = true,
};
-int prestera_acl_chain_to_client(u32 chain_index, u32 *client)
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client)
{
- static const u32 client_map[] = {
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2
+ static const u32 ingress_client_map[] = {
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2
};
- if (chain_index >= ARRAY_SIZE(client_map))
+ if (!ingress) {
+ /* prestera supports only one chain on egress */
+ if (chain_index > 0)
+ return -EINVAL;
+
+ *client = PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP;
+ return 0;
+ }
+
+ if (chain_index >= ARRAY_SIZE(ingress_client_map))
return -EINVAL;
- *client = client_map[chain_index];
+ *client = ingress_client_map[chain_index];
return 0;
}
-static bool prestera_acl_chain_is_supported(u32 chain_index)
+static bool prestera_acl_chain_is_supported(u32 chain_index, bool ingress)
{
+ if (!ingress)
+ /* prestera supports only one chain on egress */
+ return chain_index == 0;
+
return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0;
}
@@ -122,7 +137,7 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
u32 uid = 0;
int err;
- if (!prestera_acl_chain_is_supported(chain_index))
+ if (!prestera_acl_chain_is_supported(chain_index, block->ingress))
return ERR_PTR(-EINVAL);
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
@@ -130,6 +145,7 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
return ERR_PTR(-ENOMEM);
ruleset->acl = acl;
+ ruleset->ingress = block->ingress;
ruleset->ht_key.block = block;
ruleset->ht_key.chain_index = chain_index;
refcount_set(&ruleset->refcount, 1);
@@ -172,13 +188,18 @@ int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
struct prestera_acl_iface iface;
u32 vtcam_id;
+ int dir;
int err;
+ dir = ruleset->ingress ?
+ PRESTERA_HW_VTCAM_DIR_INGRESS : PRESTERA_HW_VTCAM_DIR_EGRESS;
+
if (ruleset->offload)
return -EEXIST;
err = prestera_acl_vtcam_id_get(ruleset->acl,
ruleset->ht_key.chain_index,
+ dir,
ruleset->keymask, &vtcam_id);
if (err)
goto err_vtcam_create;
@@ -719,7 +740,7 @@ vtcam_found:
return 0;
}
-int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
void *keymask, u32 *vtcam_id)
{
struct prestera_acl_vtcam *vtcam;
@@ -731,7 +752,8 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
* fine for now
*/
list_for_each_entry(vtcam, &acl->vtcam_list, list) {
- if (lookup != vtcam->lookup)
+ if (lookup != vtcam->lookup ||
+ dir != vtcam->direction)
continue;
if (!keymask && !vtcam->is_keymask_set) {
@@ -752,7 +774,7 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
return -ENOMEM;
err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id,
- PRESTERA_HW_VTCAM_DIR_INGRESS);
+ dir);
if (err) {
kfree(vtcam);
@@ -765,6 +787,7 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
return 0;
}
+ vtcam->direction = dir;
vtcam->id = new_vtcam_id;
vtcam->lookup = lookup;
if (keymask) {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index f963e1e0c0f0..03fc5b9dc925 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -199,9 +199,9 @@ void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
u16 pcl_id);
-int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
void *keymask, u32 *vtcam_id);
int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
-int prestera_acl_chain_to_client(u32 chain_index, u32 *client);
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client);
#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 40d5b89573bb..1da7ff889417 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -521,6 +521,9 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
ecmd->base.speed = SPEED_UNKNOWN;
ecmd->base.duplex = DUPLEX_UNKNOWN;
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_get(port->phy_link, ecmd);
+
ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
if (port->caps.type == PRESTERA_PORT_TYPE_TP) {
@@ -648,6 +651,9 @@ prestera_ethtool_set_link_ksettings(struct net_device *dev,
u8 adver_fec;
int err;
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_set(port->phy_link, ecmd);
+
err = prestera_port_type_set(ecmd, port);
if (err)
return err;
@@ -782,28 +788,6 @@ static int prestera_ethtool_nway_reset(struct net_device *dev)
return -EINVAL;
}
-void prestera_ethtool_port_state_changed(struct prestera_port *port,
- struct prestera_port_event *evt)
-{
- struct prestera_port_mac_state *smac = &port->state_mac;
-
- smac->oper = evt->data.mac.oper;
-
- if (smac->oper) {
- smac->mode = evt->data.mac.mode;
- smac->speed = evt->data.mac.speed;
- smac->duplex = evt->data.mac.duplex;
- smac->fc = evt->data.mac.fc;
- smac->fec = evt->data.mac.fec;
- } else {
- smac->mode = PRESTERA_MAC_MODE_MAX;
- smac->speed = SPEED_UNKNOWN;
- smac->duplex = DUPLEX_UNKNOWN;
- smac->fc = 0;
- smac->fec = 0;
- }
-}
-
const struct ethtool_ops prestera_ethtool_ops = {
.get_drvinfo = prestera_ethtool_get_drvinfo,
.get_link_ksettings = prestera_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
index 9eb18e99dea6..bd5600886bc6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -11,7 +11,4 @@ struct prestera_port;
extern const struct ethtool_ops prestera_ethtool_ops;
-void prestera_ethtool_port_state_changed(struct prestera_port *port,
- struct prestera_port_event *evt);
-
#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index 05c3ad98eba9..2262693bd5cf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -75,7 +75,9 @@ static void prestera_flow_block_destroy(void *cb_priv)
}
static struct prestera_flow_block *
-prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
+prestera_flow_block_create(struct prestera_switch *sw,
+ struct net *net,
+ bool ingress)
{
struct prestera_flow_block *block;
@@ -87,6 +89,7 @@ prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
INIT_LIST_HEAD(&block->template_list);
block->net = net;
block->sw = sw;
+ block->ingress = ingress;
return block;
}
@@ -165,7 +168,8 @@ static int prestera_flow_block_unbind(struct prestera_flow_block *block,
static struct prestera_flow_block *
prestera_flow_block_get(struct prestera_switch *sw,
struct flow_block_offload *f,
- bool *register_block)
+ bool *register_block,
+ bool ingress)
{
struct prestera_flow_block *block;
struct flow_block_cb *block_cb;
@@ -173,7 +177,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
block_cb = flow_block_cb_lookup(f->block,
prestera_flow_block_cb, sw);
if (!block_cb) {
- block = prestera_flow_block_create(sw, f->net);
+ block = prestera_flow_block_create(sw, f->net, ingress);
if (!block)
return ERR_PTR(-ENOMEM);
@@ -209,7 +213,7 @@ static void prestera_flow_block_put(struct prestera_flow_block *block)
}
static int prestera_setup_flow_block_bind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -217,7 +221,7 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
bool register_block;
int err;
- block = prestera_flow_block_get(sw, f, &register_block);
+ block = prestera_flow_block_get(sw, f, &register_block, ingress);
if (IS_ERR(block))
return PTR_ERR(block);
@@ -232,7 +236,11 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
list_add_tail(&block_cb->driver_list, &prestera_block_cb_list);
}
- port->flow_block = block;
+ if (ingress)
+ port->ingress_flow_block = block;
+ else
+ port->egress_flow_block = block;
+
return 0;
err_block_bind:
@@ -242,7 +250,7 @@ err_block_bind:
}
static void prestera_setup_flow_block_unbind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -266,24 +274,38 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
list_del(&block_cb->driver_list);
}
error:
- port->flow_block = NULL;
+ if (ingress)
+ port->ingress_flow_block = NULL;
+ else
+ port->egress_flow_block = NULL;
}
-int prestera_flow_block_setup(struct prestera_port *port,
- struct flow_block_offload *f)
+static int prestera_setup_flow_block_clsact(struct prestera_port *port,
+ struct flow_block_offload *f,
+ bool ingress)
{
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
f->driver_block_list = &prestera_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- return prestera_setup_flow_block_bind(port, f);
+ return prestera_setup_flow_block_bind(port, f, ingress);
case FLOW_BLOCK_UNBIND:
- prestera_setup_flow_block_unbind(port, f);
+ prestera_setup_flow_block_unbind(port, f, ingress);
return 0;
default:
return -EOPNOTSUPP;
}
}
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ switch (f->binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ return prestera_setup_flow_block_clsact(port, f, true);
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ return prestera_setup_flow_block_clsact(port, f, false);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 6550278b166a..0c9e13263261 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -23,6 +23,7 @@ struct prestera_flow_block {
struct flow_block_cb *block_cb;
struct list_head template_list;
unsigned int rule_count;
+ bool ingress;
};
int prestera_flow_block_setup(struct prestera_port *port,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index d43e503c644f..19d3b55c578e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -79,7 +79,7 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
} else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
/* setup counter first */
rule->re_arg.count.valid = true;
- err = prestera_acl_chain_to_client(chain_index,
+ err = prestera_acl_chain_to_client(chain_index, block->ingress,
&rule->re_arg.count.client);
if (err)
return err;
@@ -116,7 +116,7 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
rule->re_arg.police.rate =
act->police.rate_bytes_ps;
rule->re_arg.police.burst = act->police.burst;
- rule->re_arg.police.ingress = true;
+ rule->re_arg.police.ingress = block->ingress;
break;
case FLOW_ACTION_GOTO:
err = prestera_flower_parse_goto_action(block, rule,
@@ -138,7 +138,8 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
struct flow_cls_offload *f,
struct prestera_flow_block *block)
-{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
struct prestera_acl_match *r_match = &rule->re_key.match;
struct prestera_port *port;
struct net_device *ingress_dev;
@@ -167,24 +168,24 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
}
port = netdev_priv(ingress_dev);
- mask = htons(0x1FFF);
- key = htons(port->hw_id);
+ mask = htons(0x1FFF << 3);
+ key = htons(port->hw_id << 3);
rule_match_set(r_match->key, SYS_PORT, key);
rule_match_set(r_match->mask, SYS_PORT, mask);
- mask = htons(0x1FF);
+ mask = htons(0x3FF);
key = htons(port->dev_id);
rule_match_set(r_match->key, SYS_DEV, key);
rule_match_set(r_match->mask, SYS_DEV, mask);
return 0;
-
}
static int prestera_flower_parse(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_cls_offload *f)
-{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = f_rule->match.dissector;
struct prestera_acl_match *r_match = &rule->re_key.match;
__be16 n_proto_mask = 0;
@@ -202,6 +203,7 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ICMP) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
return -EOPNOTSUPP;
@@ -301,6 +303,29 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
}
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
+ struct flow_match_ports_range match;
+ __be32 tp_key, tp_mask;
+
+ flow_rule_match_ports_range(f_rule, &match);
+
+ /* src port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.src) |
+ (ntohs(match.key->tp_max.src) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.src) |
+ (ntohs(match.mask->tp_max.src) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask);
+
+ /* dst port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.dst) |
+ (ntohs(match.key->tp_max.dst) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.dst) |
+ (ntohs(match.mask->tp_max.dst) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask);
+ }
+
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
@@ -397,7 +422,6 @@ void prestera_flower_destroy(struct prestera_flow_block *block,
prestera_acl_rule_destroy(rule);
}
prestera_acl_ruleset_put(ruleset);
-
}
int prestera_flower_tmplt_create(struct prestera_flow_block *block,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 79fd3cac539d..962d7e0c0cb5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -60,6 +60,14 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE = 0x700,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY = 0x701,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET = 0x702,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET = 0x703,
+
+ PRESTERA_CMD_TYPE_MDB_CREATE = 0x704,
+ PRESTERA_CMD_TYPE_MDB_DESTROY = 0x705,
+
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900,
@@ -185,6 +193,12 @@ struct prestera_fw_event_handler {
void *arg;
};
+enum {
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_REG_PORT = 0,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG = 1,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_MAX = 2,
+};
+
struct prestera_msg_cmd {
__le32 type;
};
@@ -627,6 +641,57 @@ struct prestera_msg_event_fdb {
u8 dest_type;
};
+struct prestera_msg_flood_domain_create_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_flood_domain_create_resp {
+ struct prestera_msg_ret ret;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_ports_set_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le32 ports_num;
+};
+
+struct prestera_msg_flood_domain_ports_reset_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_port {
+ union {
+ struct {
+ __le32 port_num;
+ __le32 dev_num;
+ };
+ __le16 lag_id;
+ };
+ __le16 vid;
+ __le16 port_type;
+};
+
+struct prestera_msg_mdb_create_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_mdb_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
static void prestera_hw_build_tests(void)
{
/* check requests */
@@ -654,10 +719,17 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_req) != 4);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_destroy_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_set_req) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -1531,7 +1603,7 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
@@ -1549,7 +1621,7 @@ static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
@@ -1567,56 +1639,6 @@ static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
- .port = __cpu_to_le32(port->hw_id),
- .dev = __cpu_to_le32(port->dev_id),
- .param = {
- .flood = flood,
- }
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
-int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
- unsigned long val)
-{
- int err;
-
- if (port->sw->dev->fw_rev.maj <= 2) {
- if (!(mask & BR_FLOOD))
- return 0;
-
- return prestera_hw_port_flood_set_v2(port, val & BR_FLOOD);
- }
-
- if (mask & BR_FLOOD) {
- err = prestera_hw_port_uc_flood_set(port, val & BR_FLOOD);
- if (err)
- goto err_uc_flood;
- }
-
- if (mask & BR_MCAST_FLOOD) {
- err = prestera_hw_port_mc_flood_set(port, val & BR_MCAST_FLOOD);
- if (err)
- goto err_mc_flood;
- }
-
- return 0;
-
-err_mc_flood:
- prestera_hw_port_mc_flood_set(port, 0);
-err_uc_flood:
- if (mask & BR_FLOOD)
- prestera_hw_port_uc_flood_set(port, 0);
-
- return err;
-}
-
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
@@ -2244,3 +2266,133 @@ int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET,
&req.cmd, sizeof(req));
}
+
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_create_resp resp;
+ struct prestera_msg_flood_domain_create_req req;
+ int err;
+
+ err = prestera_cmd_ret(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE, &req.cmd,
+ sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ domain->idx = __le32_to_cpu(resp.flood_domain_idx);
+
+ return 0;
+}
+
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ struct prestera_msg_flood_domain_ports_set_req *req;
+ struct prestera_msg_flood_domain_port *ports;
+ struct prestera_switch *sw = domain->sw;
+ struct prestera_port *port;
+ u32 ports_num = 0;
+ int buf_size;
+ void *buff;
+ u16 lag_id;
+ int err;
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node)
+ ports_num++;
+
+ if (!ports_num)
+ return -EINVAL;
+
+ buf_size = sizeof(*req) + sizeof(*ports) * ports_num;
+
+ buff = kmalloc(buf_size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ req = buff;
+ ports = buff + sizeof(*req);
+
+ req->flood_domain_idx = __cpu_to_le32(domain->idx);
+ req->ports_num = __cpu_to_le32(ports_num);
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node) {
+ if (netif_is_lag_master(flood_domain_port->dev)) {
+ if (prestera_lag_id(sw, flood_domain_port->dev,
+ &lag_id)) {
+ kfree(buff);
+ return -EINVAL;
+ }
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG);
+ ports->lag_id = __cpu_to_le16(lag_id);
+ } else {
+ port = prestera_port_dev_lower_find(flood_domain_port->dev);
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT);
+ ports->dev_num = __cpu_to_le32(port->dev_id);
+ ports->port_num = __cpu_to_le32(port->hw_id);
+ }
+
+ ports->vid = __cpu_to_le16(flood_domain_port->vid);
+
+ ports++;
+ }
+
+ err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET,
+ &req->cmd, buf_size);
+
+ kfree(buff);
+
+ return err;
+}
+
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_ports_reset_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_create_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_CREATE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_DESTROY, &req.cmd,
+ sizeof(req));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 579d9ba23ffc..56e043146dd2 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -123,9 +123,10 @@ enum prestera_hw_vtcam_direction_t {
};
enum {
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0 = 0,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1 = 1,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0 = 0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1 = 1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP = 3,
};
struct prestera_switch;
@@ -143,6 +144,8 @@ struct prestera_acl_hw_action_info;
struct prestera_acl_iface;
struct prestera_counter_stats;
struct prestera_iface;
+struct prestera_flood_domain;
+struct prestera_mdb_entry;
/* Switch API */
int prestera_hw_switch_init(struct prestera_switch *sw);
@@ -178,8 +181,8 @@ int prestera_hw_port_stats_get(const struct prestera_port *port,
struct prestera_port_stats *stats);
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
-int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
- unsigned long val);
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood);
int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type);
/* Vlan API */
@@ -301,4 +304,13 @@ int prestera_hw_policer_release(struct prestera_switch *sw,
int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
u32 policer_id, u64 cir, u32 cbs);
+/* Flood domain / MDB API */
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain);
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb);
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb);
+
#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 3952fdcc9240..ede3e53b9790 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -9,6 +9,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/if_vlan.h>
+#include <linux/phylink.h>
#include "prestera.h"
#include "prestera_hw.h"
@@ -35,6 +36,21 @@ void prestera_queue_work(struct work_struct *work)
queue_work(prestera_owq, work);
}
+int prestera_port_learning_set(struct prestera_port *port, bool learn)
+{
+ return prestera_hw_port_learning_set(port, learn);
+}
+
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_uc_flood_set(port, flood);
+}
+
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_mc_flood_set(port, flood);
+}
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
{
enum prestera_accept_frm_type frm_type;
@@ -91,6 +107,14 @@ struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
return port;
}
+struct prestera_switch *prestera_switch_get(struct net_device *dev)
+{
+ struct prestera_port *port;
+
+ port = prestera_port_dev_lower_find(dev);
+ return port ? port->sw : NULL;
+}
+
int prestera_port_cfg_mac_read(struct prestera_port *port,
struct prestera_port_mac_config *cfg)
{
@@ -119,18 +143,24 @@ static int prestera_port_open(struct net_device *dev)
struct prestera_port_mac_config cfg_mac;
int err = 0;
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
- err = prestera_port_cfg_mac_read(port, &cfg_mac);
- if (!err) {
- cfg_mac.admin = true;
- err = prestera_port_cfg_mac_write(port, &cfg_mac);
- }
+ if (port->phy_link) {
+ phylink_start(port->phy_link);
} else {
- port->cfg_phy.admin = true;
- err = prestera_hw_port_phy_mode_set(port, true, port->autoneg,
- port->cfg_phy.mode,
- port->adver_link_modes,
- port->cfg_phy.mdix);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = true;
+ err = prestera_port_cfg_mac_write(port,
+ &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = true;
+ err = prestera_hw_port_phy_mode_set(port, true,
+ port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
}
netif_start_queue(dev);
@@ -146,23 +176,259 @@ static int prestera_port_close(struct net_device *dev)
netif_stop_queue(dev);
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ if (port->phy_link) {
+ phylink_stop(port->phy_link);
+ phylink_disconnect_phy(port->phy_link);
err = prestera_port_cfg_mac_read(port, &cfg_mac);
if (!err) {
cfg_mac.admin = false;
prestera_port_cfg_mac_write(port, &cfg_mac);
}
} else {
- port->cfg_phy.admin = false;
- err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
- port->cfg_phy.mode,
- port->adver_link_modes,
- port->cfg_phy.mdix);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = false;
+ prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
}
return err;
}
+static void
+prestera_port_mac_state_cache_read(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ *state = port->state_mac;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static void
+prestera_port_mac_state_cache_write(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ port->state_mac = *state;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static struct prestera_port *prestera_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct prestera_port, phylink_pcs);
+}
+
+static void prestera_mac_config(struct phylink_config *config,
+ unsigned int an_mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void prestera_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(ndev);
+ struct prestera_port_mac_state state_mac;
+
+ /* Invalidate. Parameters will update on next link event. */
+ memset(&state_mac, 0, sizeof(state_mac));
+ state_mac.valid = false;
+ prestera_port_mac_state_cache_write(port, &state_mac);
+}
+
+static void prestera_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+}
+
+static struct phylink_pcs *
+prestera_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *dev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(dev);
+
+ return &port->phylink_pcs;
+}
+
+static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct prestera_port *port = container_of(pcs, struct prestera_port,
+ phylink_pcs);
+ struct prestera_port_mac_state smac;
+
+ prestera_port_mac_state_cache_read(port, &smac);
+
+ if (smac.valid) {
+ state->link = smac.oper ? 1 : 0;
+ /* AN is completed, when port is up */
+ state->an_complete = (smac.oper && port->autoneg) ? 1 : 0;
+ state->speed = smac.speed;
+ state->duplex = smac.duplex;
+ } else {
+ state->link = 0;
+ state->an_complete = 0;
+ }
+}
+
+static int prestera_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct prestera_port *port = prestera_pcs_to_port(pcs);
+ struct prestera_port_mac_config cfg_mac;
+ int err;
+
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (err)
+ return err;
+
+ cfg_mac.admin = true;
+ cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ cfg_mac.speed = SPEED_10000;
+ cfg_mac.inband = 0;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SR_LR;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ cfg_mac.speed = SPEED_2500;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.inband = test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ cfg_mac.inband = 1;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ default:
+ cfg_mac.speed = SPEED_1000;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.inband = test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ cfg_mac.mode = PRESTERA_MAC_MODE_1000BASE_X;
+ break;
+ }
+
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void prestera_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ /* TODO: add 1000basex AN restart support
+ * (Currently FW has no support for 1000baseX AN restart, but it will in the future,
+ * so as for now the function would stay empty.)
+ */
+}
+
+static const struct phylink_mac_ops prestera_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = prestera_mac_select_pcs,
+ .mac_config = prestera_mac_config,
+ .mac_link_down = prestera_mac_link_down,
+ .mac_link_up = prestera_mac_link_up,
+};
+
+static const struct phylink_pcs_ops prestera_pcs_ops = {
+ .pcs_get_state = prestera_pcs_get_state,
+ .pcs_config = prestera_pcs_config,
+ .pcs_an_restart = prestera_pcs_an_restart,
+};
+
+static int prestera_port_sfp_bind(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct device_node *ports, *node;
+ struct fwnode_handle *fwnode;
+ struct phylink *phy_link;
+ int err;
+
+ if (!sw->np)
+ return 0;
+
+ ports = of_find_node_by_name(sw->np, "ports");
+
+ for_each_child_of_node(ports, node) {
+ int num;
+
+ err = of_property_read_u32(node, "prestera,port-num", &num);
+ if (err) {
+ dev_err(sw->dev->dev,
+ "device node %pOF has no valid reg property: %d\n",
+ node, err);
+ goto out;
+ }
+
+ if (port->fp_id != num)
+ continue;
+
+ port->phylink_pcs.ops = &prestera_pcs_ops;
+
+ port->phy_config.dev = &port->dev->dev;
+ port->phy_config.type = PHYLINK_NETDEV;
+
+ fwnode = of_fwnode_handle(node);
+
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phy_config.supported_interfaces);
+
+ port->phy_config.mac_capabilities =
+ MAC_1000 | MAC_2500FD | MAC_10000FD;
+
+ phy_link = phylink_create(&port->phy_config, fwnode,
+ PHY_INTERFACE_MODE_INTERNAL,
+ &prestera_mac_ops);
+ if (IS_ERR(phy_link)) {
+ netdev_err(port->dev, "failed to create phylink\n");
+ err = PTR_ERR(phy_link);
+ goto out;
+ }
+
+ port->phy_link = phy_link;
+ break;
+ }
+
+out:
+ of_node_put(ports);
+ return err;
+}
+
+static int prestera_port_sfp_unbind(struct prestera_port *port)
+{
+ if (port->phy_link)
+ phylink_destroy(port->phy_link);
+
+ return 0;
+}
+
static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -343,6 +609,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
port->id = id;
port->sw = sw;
+ spin_lock_init(&port->state_mac_lock);
+
err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
&port->fp_id);
if (err) {
@@ -357,8 +625,10 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
+ SET_NETDEV_DEV(dev, sw->dev->dev);
- netif_carrier_off(dev);
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP)
+ netif_carrier_off(dev);
dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
dev->min_mtu = sw->mtu_min;
@@ -409,7 +679,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
cfg_mac.admin = false;
cfg_mac.mode = PRESTERA_MAC_MODE_MAX;
}
- cfg_mac.inband = false;
+ cfg_mac.inband = 0;
cfg_mac.speed = 0;
cfg_mac.duplex = DUPLEX_UNKNOWN;
cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
@@ -451,8 +721,13 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
prestera_devlink_port_set(port);
+ err = prestera_port_sfp_bind(port);
+ if (err)
+ goto err_sfp_bind;
+
return 0;
+err_sfp_bind:
err_register_netdev:
prestera_port_list_del(port);
err_port_init:
@@ -498,8 +773,10 @@ static int prestera_create_ports(struct prestera_switch *sw)
return 0;
err_port_create:
- list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list) {
+ prestera_port_sfp_unbind(port);
prestera_port_destroy(port);
+ }
return err;
}
@@ -507,25 +784,47 @@ err_port_create:
static void prestera_port_handle_event(struct prestera_switch *sw,
struct prestera_event *evt, void *arg)
{
+ struct prestera_port_mac_state smac;
+ struct prestera_port_event *pevt;
struct delayed_work *caching_dw;
struct prestera_port *port;
- port = prestera_find_port(sw, evt->port_evt.port_id);
- if (!port || !port->dev)
- return;
-
- caching_dw = &port->cached_hw_stats.caching_dw;
-
- prestera_ethtool_port_state_changed(port, &evt->port_evt);
-
if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ pevt = &evt->port_evt;
+ port = prestera_find_port(sw, pevt->port_id);
+ if (!port || !port->dev)
+ return;
+
+ caching_dw = &port->cached_hw_stats.caching_dw;
+
+ if (port->phy_link) {
+ memset(&smac, 0, sizeof(smac));
+ smac.valid = true;
+ smac.oper = pevt->data.mac.oper;
+ if (smac.oper) {
+ smac.mode = pevt->data.mac.mode;
+ smac.speed = pevt->data.mac.speed;
+ smac.duplex = pevt->data.mac.duplex;
+ smac.fc = pevt->data.mac.fc;
+ smac.fec = pevt->data.mac.fec;
+ phylink_mac_change(port->phy_link, true);
+ } else {
+ phylink_mac_change(port->phy_link, false);
+ }
+ prestera_port_mac_state_cache_write(port, &smac);
+ }
+
if (port->state_mac.oper) {
- netif_carrier_on(port->dev);
+ if (!port->phy_link)
+ netif_carrier_on(port->dev);
+
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
} else if (netif_running(port->dev) &&
netif_carrier_ok(port->dev)) {
- netif_carrier_off(port->dev);
+ if (!port->phy_link)
+ netif_carrier_off(port->dev);
+
if (delayed_work_pending(caching_dw))
cancel_delayed_work(caching_dw);
}
@@ -548,19 +847,20 @@ static void prestera_event_handlers_unregister(struct prestera_switch *sw)
static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
{
struct device_node *base_mac_np;
- struct device_node *np;
- int ret;
+ int ret = 0;
- np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
- base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
+ if (sw->np) {
+ base_mac_np = of_parse_phandle(sw->np, "base-mac-provider", 0);
+ if (base_mac_np) {
+ ret = of_get_mac_address(base_mac_np, sw->base_mac);
+ of_node_put(base_mac_np);
+ }
+ }
- ret = of_get_mac_address(base_mac_np, sw->base_mac);
- if (ret) {
+ if (!is_valid_ether_addr(sw->base_mac) || ret) {
eth_random_addr(sw->base_mac);
dev_info(prestera_dev(sw), "using random base mac address\n");
}
- of_node_put(base_mac_np);
- of_node_put(np);
return prestera_hw_switch_mac_set(sw, sw->base_mac);
}
@@ -585,6 +885,30 @@ static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw,
return NULL;
}
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id)
+{
+ struct prestera_lag *lag;
+ int free_id = -1;
+ int id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = prestera_lag_by_id(sw, id);
+ if (lag->member_count) {
+ if (lag->dev == lag_dev) {
+ *lag_id = id;
+ return 0;
+ }
+ } else if (free_id < 0) {
+ free_id = id;
+ }
+ }
+ if (free_id < 0)
+ return -ENOSPC;
+ *lag_id = free_id;
+ return 0;
+}
+
static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw,
struct net_device *lag_dev)
{
@@ -876,6 +1200,150 @@ static int prestera_netdev_event_handler(struct notifier_block *nb,
return notifier_from_errno(err);
}
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_flood_domain *flood_domain;
+ struct prestera_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ flood_domain = prestera_flood_domain_create(sw);
+ if (!flood_domain)
+ goto err_flood_domain_create;
+
+ mdb_entry->sw = sw;
+ mdb_entry->vid = vid;
+ mdb_entry->flood_domain = flood_domain;
+ ether_addr_copy(mdb_entry->addr, addr);
+
+ if (prestera_hw_mdb_create(mdb_entry))
+ goto err_mdb_hw_create;
+
+ return mdb_entry;
+
+err_mdb_hw_create:
+ prestera_flood_domain_destroy(flood_domain);
+err_flood_domain_create:
+ kfree(mdb_entry);
+err_mdb_alloc:
+ return NULL;
+}
+
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry)
+{
+ prestera_hw_mdb_destroy(mdb_entry);
+ prestera_flood_domain_destroy(mdb_entry->flood_domain);
+ kfree(mdb_entry);
+}
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw)
+{
+ struct prestera_flood_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->sw = sw;
+
+ if (prestera_hw_flood_domain_create(domain)) {
+ kfree(domain);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&domain->flood_domain_port_list);
+
+ return domain;
+}
+
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain)
+{
+ WARN_ON(!list_empty(&flood_domain->flood_domain_port_list));
+ WARN_ON_ONCE(prestera_hw_flood_domain_destroy(flood_domain));
+ kfree(flood_domain);
+}
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ bool is_first_port_in_list = false;
+ int err;
+
+ flood_domain_port = kzalloc(sizeof(*flood_domain_port), GFP_KERNEL);
+ if (!flood_domain_port) {
+ err = -ENOMEM;
+ goto err_port_alloc;
+ }
+
+ flood_domain_port->vid = vid;
+
+ if (list_empty(&flood_domain->flood_domain_port_list))
+ is_first_port_in_list = true;
+
+ list_add(&flood_domain_port->flood_domain_port_node,
+ &flood_domain->flood_domain_port_list);
+
+ flood_domain_port->flood_domain = flood_domain;
+ flood_domain_port->dev = dev;
+
+ if (!is_first_port_in_list) {
+ err = prestera_hw_flood_domain_ports_reset(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+ }
+
+ err = prestera_hw_flood_domain_ports_set(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+
+ return 0;
+
+err_prestera_mdb_port_create_hw:
+ list_del(&flood_domain_port->flood_domain_port_node);
+ kfree(flood_domain_port);
+err_port_alloc:
+ return err;
+}
+
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port)
+{
+ struct prestera_flood_domain *flood_domain = port->flood_domain;
+
+ list_del(&port->flood_domain_port_node);
+
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_reset(flood_domain));
+
+ if (!list_empty(&flood_domain->flood_domain_port_list))
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_set(flood_domain));
+
+ kfree(port);
+}
+
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ list_for_each_entry(flood_domain_port,
+ &flood_domain->flood_domain_port_list,
+ flood_domain_port_node)
+ if (flood_domain_port->dev == dev &&
+ vid == flood_domain_port->vid)
+ return flood_domain_port;
+
+ return NULL;
+}
+
static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
{
sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
@@ -892,6 +1360,8 @@ static int prestera_switch_init(struct prestera_switch *sw)
{
int err;
+ sw->np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
+
err = prestera_hw_switch_init(sw);
if (err) {
dev_err(prestera_dev(sw), "Failed to init Switch device\n");
@@ -992,6 +1462,7 @@ static void prestera_switch_fini(struct prestera_switch *sw)
prestera_router_fini(sw);
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
+ of_node_put(sw->np);
}
int prestera_device_register(struct prestera_device *dev)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 3754d8aec76d..58f4e44d5ad7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -389,8 +389,8 @@ static int __prestera_inetaddr_event(struct prestera_switch *sw,
unsigned long event,
struct netlink_ext_ack *extack)
{
- if (!prestera_netdev_check(dev) || netif_is_bridge_port(dev) ||
- netif_is_lag_port(dev) || netif_is_ovs_port(dev))
+ if (!prestera_netdev_check(dev) || netif_is_any_bridge_port(dev) ||
+ netif_is_lag_port(dev))
return 0;
return __prestera_inetaddr_port_event(dev, event, extack);
@@ -588,6 +588,7 @@ err_router_lib_init:
void prestera_router_fini(struct prestera_switch *sw)
{
+ unregister_fib_notifier(&init_net, &sw->router->fib_nb);
unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
rhashtable_destroy(&sw->router->kern_fib_cache_ht);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index b4599fe4ca8d..71cde97d85c8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -39,7 +39,10 @@ struct prestera_bridge {
struct net_device *dev;
struct prestera_switchdev *swdev;
struct list_head port_list;
+ struct list_head br_mdb_entry_list;
+ bool mrouter_exist;
bool vlan_enabled;
+ bool multicast_enabled;
u16 bridge_id;
};
@@ -48,8 +51,10 @@ struct prestera_bridge_port {
struct net_device *dev;
struct prestera_bridge *bridge;
struct list_head vlan_list;
+ struct list_head br_mdb_port_list;
refcount_t ref_count;
unsigned long flags;
+ bool mrouter;
u8 stp_state;
};
@@ -67,6 +72,20 @@ struct prestera_port_vlan {
u16 vid;
};
+struct prestera_br_mdb_port {
+ struct prestera_bridge_port *br_port;
+ struct list_head br_mdb_port_node;
+};
+
+/* Software representation of MDB table. */
+struct prestera_br_mdb_entry {
+ struct prestera_bridge *bridge;
+ struct prestera_mdb_entry *mdb;
+ struct list_head br_mdb_port_list;
+ struct list_head br_mdb_entry_node;
+ bool enabled;
+};
+
static struct workqueue_struct *swdev_wq;
static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
@@ -74,6 +93,82 @@ static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
u8 state);
+static struct prestera_bridge *
+prestera_bridge_find(const struct prestera_switch *sw,
+ const struct net_device *br_dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &sw->swdev->bridge_list, head)
+ if (bridge->dev == br_dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_find(const struct prestera_bridge *bridge,
+ const struct net_device *brport_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head)
+ if (br_port->dev == brport_dev)
+ return br_port;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_find(struct prestera_switch *sw,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_find(sw, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_find(bridge, brport_dev);
+}
+
+static void
+prestera_br_port_flags_reset(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ prestera_port_uc_flood_set(port, false);
+ prestera_port_mc_flood_set(port, false);
+ prestera_port_learning_set(port, false);
+}
+
+static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ int err;
+
+ err = prestera_port_uc_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_mc_flood_set(port, br_port->flags & BR_MCAST_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ prestera_br_port_flags_reset(br_port, port);
+ return err;
+}
+
static struct prestera_bridge_vlan *
prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
{
@@ -220,6 +315,70 @@ static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode)
}
static void
+prestera_mdb_port_del(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_flood_domain *fl_domain = mdb->flood_domain;
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ flood_domain_port = prestera_flood_domain_port_find(fl_domain,
+ orig_dev,
+ mdb->vid);
+ if (flood_domain_port)
+ prestera_flood_domain_port_destroy(flood_domain_port);
+}
+
+static void
+prestera_br_mdb_entry_put(struct prestera_br_mdb_entry *br_mdb)
+{
+ struct prestera_bridge_port *br_port;
+
+ if (list_empty(&br_mdb->br_mdb_port_list)) {
+ list_for_each_entry(br_port, &br_mdb->bridge->port_list, head)
+ prestera_mdb_port_del(br_mdb->mdb, br_port->dev);
+
+ prestera_mdb_entry_destroy(br_mdb->mdb);
+ list_del(&br_mdb->br_mdb_entry_node);
+ kfree(br_mdb);
+ }
+}
+
+static void
+prestera_br_mdb_port_del(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp;
+
+ list_for_each_entry_safe(br_mdb_port, tmp, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ if (br_mdb_port->br_port == br_port) {
+ list_del(&br_mdb_port->br_mdb_port_node);
+ kfree(br_mdb_port);
+ }
+ }
+}
+
+static void
+prestera_mdb_flush_bridge_port(struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp_port;
+ struct prestera_br_mdb_entry *br_mdb, *br_mdb_tmp;
+ struct prestera_bridge *br_dev = br_port->bridge;
+
+ list_for_each_entry_safe(br_mdb, br_mdb_tmp, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ list_for_each_entry_safe(br_mdb_port, tmp_port,
+ &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ prestera_mdb_port_del(br_mdb->mdb,
+ br_mdb_port->br_port->dev);
+ prestera_br_mdb_port_del(br_mdb, br_mdb_port->br_port);
+ }
+ prestera_br_mdb_entry_put(br_mdb);
+ }
+}
+
+static void
prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
{
u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
@@ -244,6 +403,8 @@ prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
else
prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+ prestera_mdb_flush_bridge_port(br_port);
+
list_del(&port_vlan->br_vlan_head);
prestera_bridge_vlan_put(br_vlan);
prestera_bridge_port_put(br_port);
@@ -295,8 +456,10 @@ prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
bridge->vlan_enabled = vlan_enabled;
bridge->swdev = swdev;
bridge->dev = dev;
+ bridge->multicast_enabled = br_multicast_enabled(dev);
INIT_LIST_HEAD(&bridge->port_list);
+ INIT_LIST_HEAD(&bridge->br_mdb_entry_list);
list_add(&bridge->head, &swdev->bridge_list);
@@ -314,6 +477,7 @@ static void prestera_bridge_destroy(struct prestera_bridge *bridge)
else
prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
+ WARN_ON(!list_empty(&bridge->br_mdb_entry_list));
WARN_ON(!list_empty(&bridge->port_list));
kfree(bridge);
}
@@ -405,6 +569,7 @@ prestera_bridge_port_create(struct prestera_bridge *bridge,
INIT_LIST_HEAD(&br_port->vlan_list);
list_add(&br_port->head, &bridge->port_list);
+ INIT_LIST_HEAD(&br_port->br_mdb_port_list);
return br_port;
}
@@ -414,6 +579,7 @@ prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
{
list_del(&br_port->head);
WARN_ON(!list_empty(&br_port->vlan_list));
+ WARN_ON(!list_empty(&br_port->br_mdb_port_list));
kfree(br_port);
}
@@ -461,19 +627,13 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
if (err)
return err;
- err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
- br_port->flags);
+ err = prestera_br_port_flags_set(br_port, port);
if (err)
- goto err_port_flood_set;
-
- err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
- if (err)
- goto err_port_learning_set;
+ goto err_flags2port_set;
return 0;
-err_port_learning_set:
-err_port_flood_set:
+err_flags2port_set:
prestera_hw_bridge_port_delete(port, bridge->bridge_id);
return err;
@@ -592,8 +752,9 @@ void prestera_bridge_port_leave(struct net_device *br_dev,
switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
- prestera_hw_port_learning_set(port, false);
- prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0);
+ prestera_mdb_flush_bridge_port(br_port);
+
+ prestera_br_port_flags_reset(br_port, port);
prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
prestera_bridge_port_put(br_port);
}
@@ -603,26 +764,14 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
struct switchdev_brport_flags flags)
{
struct prestera_bridge_port *br_port;
- int err;
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
- err = prestera_hw_port_flood_set(port, flags.mask, flags.val);
- if (err)
- return err;
-
- if (flags.mask & BR_LEARNING) {
- err = prestera_hw_port_learning_set(port,
- flags.val & BR_LEARNING);
- if (err)
- return err;
- }
-
- memcpy(&br_port->flags, &flags.val, sizeof(flags.val));
-
- return 0;
+ br_port->flags &= ~flags.mask;
+ br_port->flags |= flags.val & flags.mask;
+ return prestera_br_port_flags_set(br_port, port);
}
static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
@@ -716,6 +865,290 @@ err_port_stp_set:
return err;
}
+static int
+prestera_br_port_lag_mdb_mc_enable_sync(struct prestera_bridge_port *br_port,
+ bool enabled)
+{
+ struct prestera_port *pr_port;
+ struct prestera_switch *sw;
+ u16 lag_id;
+ int err;
+
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+ if (!pr_port)
+ return 0;
+
+ sw = pr_port->sw;
+ err = prestera_lag_id(sw, br_port->dev, &lag_id);
+ if (err)
+ return err;
+
+ list_for_each_entry(pr_port, &sw->port_list, list) {
+ if (pr_port->lag->lag_id == lag_id) {
+ err = prestera_port_mc_flood_set(pr_port, enabled);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int prestera_br_mdb_mc_enable_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_port *port;
+ bool enabled;
+ int err;
+
+ /* if mrouter exists:
+ * - make sure every mrouter receives unreg mcast traffic;
+ * if mrouter doesn't exists:
+ * - make sure every port receives unreg mcast traffic;
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ if (br_dev->multicast_enabled && br_dev->mrouter_exist)
+ enabled = br_port->mrouter;
+ else
+ enabled = br_port->flags & BR_MCAST_FLOOD;
+
+ if (netif_is_lag_master(br_port->dev)) {
+ err = prestera_br_port_lag_mdb_mc_enable_sync(br_port,
+ enabled);
+ if (err)
+ return err;
+ continue;
+ }
+
+ port = prestera_port_dev_lower_find(br_port->dev);
+ if (!port)
+ continue;
+
+ err = prestera_port_mc_flood_set(port, enabled);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool
+prestera_br_mdb_port_is_member(struct prestera_br_mdb_entry *br_mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_br_mdb_port *tmp_port;
+
+ list_for_each_entry(tmp_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (tmp_port->br_port->dev == orig_dev)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_mdb_port_add(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev,
+ const unsigned char addr[ETH_ALEN], u16 vid)
+{
+ struct prestera_flood_domain *flood_domain = mdb->flood_domain;
+ int err;
+
+ if (!prestera_flood_domain_port_find(flood_domain,
+ orig_dev, vid)) {
+ err = prestera_flood_domain_port_create(flood_domain, orig_dev,
+ vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Sync bridge mdb (software table) with HW table (if MC is enabled). */
+static int prestera_br_mdb_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+ struct prestera_bridge_port *br_port;
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_mdb_entry *mdb;
+ struct prestera_port *pr_port;
+ int err = 0;
+
+ if (!br_dev->multicast_enabled)
+ return 0;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ mdb = br_mdb->mdb;
+ /* Make sure every port that explicitly been added to the mdb
+ * joins the specified group.
+ */
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ br_port = br_mdb_port->br_port;
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Match only mdb and br_mdb ports that belong to the
+ * same broadcast domain.
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ /* If port is not in MDB or there's no Mrouter
+ * clear HW mdb.
+ */
+ if (prestera_br_mdb_port_is_member(br_mdb,
+ br_mdb_port->br_port->dev) &&
+ br_dev->mrouter_exist)
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ else
+ prestera_mdb_port_del(mdb, br_port->dev);
+
+ if (err)
+ return err;
+ }
+
+ /* Make sure that every mrouter port joins every MC group int
+ * broadcast domain. If it's not an mrouter - it should leave
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Make sure mrouter woudln't receive traffci from
+ * another broadcast domain (e.g. from a vlan, which
+ * mrouter port is not a member of).
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ if (br_port->mrouter) {
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ if (err)
+ return err;
+ } else if (!br_port->mrouter &&
+ !prestera_br_mdb_port_is_member
+ (br_mdb, br_port->dev)) {
+ prestera_mdb_port_del(mdb, br_port->dev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+prestera_mdb_enable_set(struct prestera_br_mdb_entry *br_mdb, bool enable)
+{
+ int err;
+
+ if (enable != br_mdb->enabled) {
+ if (enable)
+ err = prestera_hw_mdb_create(br_mdb->mdb);
+ else
+ err = prestera_hw_mdb_destroy(br_mdb->mdb);
+
+ if (err)
+ return err;
+
+ br_mdb->enabled = enable;
+ }
+
+ return 0;
+}
+
+static int
+prestera_br_mdb_enable_set(struct prestera_bridge *br_dev, bool enable)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ int err;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ err = prestera_mdb_enable_set(br_mdb, enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_br_mc_disabled_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool mc_disabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *br_dev;
+
+ br_dev = prestera_bridge_find(sw, orig_dev);
+ if (!br_dev)
+ return 0;
+
+ br_dev->multicast_enabled = !mc_disabled;
+
+ /* There's no point in enabling mdb back if router is missing. */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
+static bool
+prestera_bridge_mdb_mc_mrouter_exists(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &br_dev->port_list, head)
+ if (br_port->mrouter)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_port_attr_mrouter_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool is_port_mrouter)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+
+ br_port = prestera_bridge_port_find(port->sw, orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+ br_port->mrouter = is_port_mrouter;
+
+ br_dev->mrouter_exist = prestera_bridge_mdb_mc_mrouter_exists(br_dev);
+
+ /* Enable MDB processing if both mrouter exists and mc is enabled.
+ * In case if MC enabled, but there is no mrouter, device would flood
+ * all multicast traffic (even if MDB table is not empty) with the use
+ * of bridge's flood capabilities (without the use of flood_domain).
+ */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -745,6 +1178,14 @@ static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
attr->u.vlan_filtering);
break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ err = prestera_port_attr_mrouter_set(port, attr->orig_dev,
+ attr->u.mrouter);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ err = prestera_port_attr_br_mc_disabled_set(port, attr->orig_dev,
+ attr->u.mc_disabled);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -918,14 +1359,9 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
if (port_vlan->br_port)
return 0;
- err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
- br_port->flags);
+ err = prestera_br_port_flags_set(br_port, port);
if (err)
- return err;
-
- err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
- if (err)
- goto err_port_learning_set;
+ goto err_flags2port_set;
err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
if (err)
@@ -950,8 +1386,8 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
err_bridge_vlan_get:
prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
err_port_vid_stp_set:
- prestera_hw_port_learning_set(port, false);
-err_port_learning_set:
+ prestera_br_port_flags_reset(br_port, port);
+err_flags2port_set:
return err;
}
@@ -1048,20 +1484,162 @@ static int prestera_port_vlans_add(struct prestera_port *port,
flag_pvid, extack);
}
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_create(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb_entry;
+ struct prestera_mdb_entry *mdb_entry;
+
+ br_mdb_entry = kzalloc(sizeof(*br_mdb_entry), GFP_KERNEL);
+ if (!br_mdb_entry)
+ return NULL;
+
+ mdb_entry = prestera_mdb_entry_create(sw, addr, vid);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ br_mdb_entry->mdb = mdb_entry;
+ br_mdb_entry->bridge = br_dev;
+ br_mdb_entry->enabled = true;
+ INIT_LIST_HEAD(&br_mdb_entry->br_mdb_port_list);
+
+ list_add(&br_mdb_entry->br_mdb_entry_node, &br_dev->br_mdb_entry_list);
+
+ return br_mdb_entry;
+
+err_mdb_alloc:
+ kfree(br_mdb_entry);
+ return NULL;
+}
+
+static int prestera_br_mdb_port_add(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (br_mdb_port->br_port == br_port)
+ return 0;
+
+ br_mdb_port = kzalloc(sizeof(*br_mdb_port), GFP_KERNEL);
+ if (!br_mdb_port)
+ return -ENOMEM;
+
+ br_mdb_port->br_port = br_port;
+ list_add(&br_mdb_port->br_mdb_port_node,
+ &br_mdb->br_mdb_port_list);
+
+ return 0;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_find(struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node)
+ if (ether_addr_equal(&br_mdb->mdb->addr[0], addr) &&
+ vid == br_mdb->mdb->vid)
+ return br_mdb;
+
+ return NULL;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_get(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ br_mdb = prestera_br_mdb_entry_find(br_dev, addr, vid);
+ if (br_mdb)
+ return br_mdb;
+
+ return prestera_br_mdb_entry_create(sw, br_dev, addr, vid);
+}
+
+static int
+prestera_mdb_port_addr_obj_add(const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ struct prestera_switch *sw;
+ struct prestera_port *port;
+ int err;
+
+ sw = prestera_switch_get(mdb->obj.orig_dev);
+ port = prestera_port_dev_lower_find(mdb->obj.orig_dev);
+
+ br_port = prestera_bridge_port_find(sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ if (mdb->vid)
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ br_dev->bridge_id);
+
+ if (!br_mdb)
+ return -ENOMEM;
+
+ /* Make sure newly allocated MDB entry gets disabled if either MC is
+ * disabled, or the mrouter does not exist.
+ */
+ WARN_ON(prestera_mdb_enable_set(br_mdb, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ err = prestera_br_mdb_port_add(br_mdb, br_port);
+ if (err) {
+ prestera_br_mdb_entry_put(br_mdb);
+ return err;
+ }
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int prestera_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
const struct switchdev_obj_port_vlan *vlan;
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
return prestera_port_vlans_add(port, vlan, extack);
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_add(mdb);
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ fallthrough;
default:
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
+
+ return err;
}
static int prestera_port_vlans_del(struct prestera_port *port,
@@ -1086,17 +1664,71 @@ static int prestera_port_vlans_del(struct prestera_port *port,
return 0;
}
+static int
+prestera_mdb_port_addr_obj_del(struct prestera_port *port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ int err;
+
+ /* Bridge port no longer exists - and so does this MDB entry */
+ br_port = prestera_bridge_port_find(port->sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ /* Removing MDB with non-existing VLAN - not supported; */
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (br_port->bridge->vlan_enabled)
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ br_port->bridge->bridge_id);
+
+ if (!br_mdb)
+ return 0;
+
+ /* Since there might be a situation that this port was the last in the
+ * MDB group, we have to both remove this port from software and HW MDB,
+ * sync MDB table, and then destroy software MDB (if needed).
+ */
+ prestera_br_mdb_port_del(br_mdb, br_port);
+
+ prestera_br_mdb_entry_put(br_mdb);
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int prestera_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_del(port, mdb);
+ break;
default:
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
+
+ return err;
}
static int prestera_switchdev_blk_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index a1e907c85217..bbea5458000b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1863,7 +1863,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (mss != 0) {
if (!(hw->flags & SKY2_HW_NEW_LE))
- mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ mss += skb_tcp_all_headers(skb);
if (mss != sky2->tx_last_mss) {
le = get_tx_le(sky2, &slot);
@@ -4711,7 +4711,7 @@ static irqreturn_t sky2_test_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Test interrupt path by forcing a a software IRQ */
+/* Test interrupt path by forcing a software IRQ */
static int sky2_test_msi(struct sky2_hw *hw)
{
struct pci_dev *pdev = hw->pdev;
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index da4ec235d146..97374fb3ee79 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -17,6 +17,8 @@ config NET_MEDIATEK_SOC
select PINCTRL
select PHYLINK
select DIMLIB
+ select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 59c9a10f83ba..8aff4c0c28bd 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -34,6 +34,10 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
+ sizeof(u64) }
+
static const struct mtk_reg_map mtk_reg_map = {
.tx_irq_mask = 0x1a1c,
.tx_irq_status = 0x1a18,
@@ -141,6 +145,13 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_long_errors),
MTK_ETHTOOL_STAT(rx_checksum_errors),
MTK_ETHTOOL_STAT(rx_flow_control_packets),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
};
static const char * const mtk_clks_source_name[] = {
@@ -990,7 +1001,7 @@ static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
- bool napi)
+ struct xdp_frame_bulk *bq, bool napi)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
@@ -1020,15 +1031,27 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
}
}
- tx_buf->flags = 0;
- if (tx_buf->skb &&
- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
- if (napi)
- napi_consume_skb(tx_buf->skb, napi);
- else
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+ } else {
+ struct xdp_frame *xdpf = tx_buf->data;
+
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else if (bq)
+ xdp_return_frame_bulk(xdpf, bq);
+ else
+ xdp_return_frame(xdpf);
+ }
}
- tx_buf->skb = NULL;
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
}
static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
@@ -1045,7 +1068,7 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len1, size);
} else {
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd->txd1 = mapped_addr;
txd->txd2 = TX_DMA_PLEN0(size);
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -1221,7 +1244,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
soc->txrx.txd_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
@@ -1235,7 +1258,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
}
/* store skb to cleanup */
- itx_buf->skb = skb;
+ itx_buf->type = MTK_TYPE_SKB;
+ itx_buf->data = skb;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1)
@@ -1274,7 +1298,7 @@ err_dma:
tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */
- mtk_tx_unmap(eth, tx_buf, false);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
@@ -1432,11 +1456,320 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
}
}
+static bool mtk_page_pool_enabled(struct mtk_eth *eth)
+{
+ return !eth->hwlro;
+}
+
+static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ struct xdp_rxq_info *xdp_q,
+ int id, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
+ : DMA_FROM_DEVICE;
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
+ id, PAGE_SIZE);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ return pp;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(xdp_q);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return ERR_PTR(err);
+}
+
+static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (!page)
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
+ return page_address(page);
+}
+
+static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
+{
+ if (ring->page_pool)
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(data), napi);
+ else
+ skb_free_frag(data);
+}
+
+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
+ struct mtk_tx_dma_desc_info *txd_info,
+ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
+ void *data, u16 headroom, int index, bool dma_map)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd_pdma;
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info->addr = dma_map_single(eth->dma_dev, data,
+ txd_info->size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
+ return -ENOMEM;
+
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(data);
+
+ txd_info->addr = page_pool_get_dma_addr(page) +
+ sizeof(struct xdp_frame) + headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
+ txd_info->size, DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+ index);
+
+ return 0;
+}
+
+static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+ .last = !xdp_frame_has_frags(xdpf),
+ };
+ int err, index = 0, n_desc = 1, nr_frags;
+ struct mtk_tx_dma *htxd, *txd, *txd_pdma;
+ struct mtk_tx_buf *htx_buf, *tx_buf;
+ void *data = xdpf->data;
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ return -EBUSY;
+
+ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
+ return -EBUSY;
+
+ spin_lock(&eth->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+ spin_unlock(&eth->page_lock);
+ return -ENOMEM;
+ }
+ htxd = txd;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
+
+ for (;;) {
+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+ data, xdpf->headroom, index, dma_map);
+ if (err < 0)
+ goto unmap;
+
+ if (txd_info.last)
+ break;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = skb_frag_size(&sinfo->frags[index]);
+ txd_info.last = index + 1 == nr_frags;
+ data = skb_frag_address(&sinfo->frags[index]);
+
+ index++;
+ }
+ /* store xdpf for cleanup */
+ htx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (index & 1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
+ } else {
+ int idx;
+
+ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return 0;
+
+unmap:
+ while (htxd != txd) {
+ txd_pdma = qdma_to_pdma(ring, htxd);
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
+ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return err;
+}
+
+static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ struct mtk_eth *eth = mac->hw;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
+ break;
+ nxmit++;
+ }
+
+ u64_stats_update_begin(&hw_stats->syncp);
+ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
+ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
+ u64_stats_update_end(&hw_stats->syncp);
+
+ return nxmit;
+}
+
+static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+
+ prog = rcu_dereference(eth->prog);
+ if (!prog)
+ goto out;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ count = &hw_stats->xdp_stats.rx_xdp_pass;
+ goto update_stats;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_redirect;
+ goto update_stats;
+ case XDP_TX: {
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_tx;
+ goto update_stats;
+ }
+ default:
+ bpf_warn_invalid_xdp_action(dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(xdp->data), true);
+
+update_stats:
+ u64_stats_update_begin(&hw_stats->syncp);
+ *count = *count + 1;
+ u64_stats_update_end(&hw_stats->syncp);
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
+ bool xdp_flush = false;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
@@ -1444,8 +1777,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
int done = 0, bytes = 0;
while (done < budget) {
+ unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
- unsigned int pktlen;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@@ -1477,47 +1810,97 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto release_desc;
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+
/* alloc new buffer */
- if (ring->frag_size <= PAGE_SIZE)
- new_data = napi_alloc_frag(ring->frag_size);
- else
- new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
- if (unlikely(!new_data)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(eth->dma_dev,
- new_data + NET_SKB_PAD +
- eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
- skb_free_frag(new_data);
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
+ if (ring->page_pool) {
+ struct page *page = virt_to_head_page(data);
+ struct xdp_buff xdp;
+ u32 ret;
+
+ new_data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_sync_single_for_cpu(eth->dma_dev,
+ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
+ pktlen, page_pool_get_dma_dir(ring->page_pool));
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
+ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
+ false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ ret = mtk_xdp_run(eth, ring, &xdp, netdev);
+ if (ret == XDP_REDIRECT)
+ xdp_flush = true;
- dma_unmap_single(eth->dma_dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
+ if (ret != XDP_PASS)
+ goto skip_rx;
+
+ skb = build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_put_full_page(ring->page_pool,
+ page, true);
+ netdev->stats.rx_dropped++;
+ goto skip_rx;
+ }
+
+ skb_reserve(skb, xdp.data - xdp.data_hard_start);
+ skb_put(skb, xdp.data_end - xdp.data);
+ skb_mark_for_recycle(skb);
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ new_data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ netdev->stats.rx_dropped++;
+ skb_free_frag(data);
+ goto skip_rx;
+ }
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- skb_free_frag(data);
- netdev->stats.rx_dropped++;
- goto skip_rx;
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, pktlen);
}
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
- skb_put(skb, pktlen);
- if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
+ bytes += skb->len;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ rxdcsum = &trxd.rxd3;
+ else
+ rxdcsum = &trxd.rxd4;
+
+ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- bytes += pktlen;
hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
if (hash != MTK_RXD4_FOE_ENTRY) {
@@ -1555,7 +1938,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
-
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
@@ -1563,7 +1945,6 @@ release_desc:
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
ring->calc_idx = idx;
-
done++;
}
@@ -1582,6 +1963,9 @@ rx_done:
&dim_sample);
net_dim(&eth->rx_dim, dim_sample);
+ if (xdp_flush)
+ xdp_do_flush_map();
+
return done;
}
@@ -1590,15 +1974,16 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->last_free_ptr;
dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
desc = mtk_qdma_phys_to_virt(ring, cpu);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
@@ -1613,22 +1998,26 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1;
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[mac] += skb->len;
- done[mac]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[mac] += skb->len;
+ done[mac]++;
+ }
budget--;
}
- mtk_tx_unmap(eth, tx_buf, true);
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = next_cpu;
}
+ xdp_flush_frame_bulk(&bq);
ring->last_free_ptr = cpu;
mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
@@ -1640,27 +2029,30 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->cpu_idx;
dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
tx_buf = &ring->buf[cpu];
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[0] += skb->len;
- done[0]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[0] += skb->len;
+ done[0]++;
+ }
budget--;
}
-
- mtk_tx_unmap(eth, tx_buf, true);
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
desc = ring->dma + cpu * eth->soc->txrx.txd_size;
ring->last_free = desc;
@@ -1668,6 +2060,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
}
+ xdp_flush_frame_bulk(&bq);
ring->cpu_idx = cpu;
@@ -1877,7 +2270,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth, &ring->buf[i], false);
+ mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf);
ring->buf = NULL;
}
@@ -1927,13 +2320,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < rx_dma_size; i++) {
- if (ring->frag_size <= PAGE_SIZE)
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
- else
- ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
- if (!ring->data[i])
- return -ENOMEM;
+ if (mtk_page_pool_enabled(eth)) {
+ struct page_pool *pp;
+
+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
+ rx_dma_size);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ ring->page_pool = pp;
}
ring->dma = dma_alloc_coherent(eth->dma_dev,
@@ -1944,16 +2339,33 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
for (i = 0; i < rx_dma_size; i++) {
struct mtk_rx_dma_v2 *rxd;
-
- dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
- ring->data[i] + NET_SKB_PAD + eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
+ dma_addr_t dma_addr;
+ void *data;
rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ data = netdev_alloc_frag(ring->frag_size);
+ else
+ data = mtk_max_lro_buf_alloc(GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr)))
+ return -ENOMEM;
+ }
rxd->rxd1 = (unsigned int)dma_addr;
+ ring->data[i] = data;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
@@ -1969,6 +2381,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd8 = 0;
}
}
+
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
@@ -2020,7 +2433,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
dma_unmap_single(eth->dma_dev, rxd->rxd1,
ring->buf_size, DMA_FROM_DEVICE);
- skb_free_frag(ring->data[i]);
+ mtk_rx_put_buff(ring, ring->data[i], false);
}
kfree(ring->data);
ring->data = NULL;
@@ -2032,6 +2445,13 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
ring->dma, ring->phys);
ring->dma = NULL;
}
+
+ if (ring->page_pool) {
+ if (xdp_rxq_info_is_reg(&ring->xdp_q))
+ xdp_rxq_info_unreg(&ring->xdp_q);
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
}
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
@@ -2639,6 +3059,48 @@ static int mtk_stop(struct net_device *dev)
return 0;
}
+static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct bpf_prog *old_prog;
+ bool need_update;
+
+ if (eth->hwlro) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!eth->prog != !!prog;
+ if (netif_running(dev) && need_update)
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (netif_running(dev) && need_update)
+ return mtk_open(dev);
+
+ return 0;
+}
+
+static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
@@ -2934,6 +3396,12 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new;
+ if (rcu_access_pointer(eth->prog) &&
+ length > MTK_PP_MAX_BUF_SIZE) {
+ netdev_err(dev, "Invalid MTU for XDP mode\n");
+ return -EINVAL;
+ }
+
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
@@ -3123,11 +3591,18 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
int i;
switch (stringset) {
- case ETH_SS_STATS:
+ case ETH_SS_STATS: {
+ struct mtk_mac *mac = netdev_priv(dev);
+
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ if (mtk_page_pool_enabled(mac->hw))
+ page_pool_ethtool_stats_get_strings(data);
+ break;
+ }
+ default:
break;
}
}
@@ -3135,13 +3610,35 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int mtk_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(mtk_ethtool_stats);
+ case ETH_SS_STATS: {
+ int count = ARRAY_SIZE(mtk_ethtool_stats);
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (mtk_page_pool_enabled(mac->hw))
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+ }
default:
return -EOPNOTSUPP;
}
}
+static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
+ struct mtk_rx_ring *ring = &eth->rx_ring[i];
+
+ if (!ring->page_pool)
+ continue;
+
+ page_pool_get_stats(ring->page_pool, &stats);
+ }
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mtk_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -3169,6 +3666,8 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+ if (mtk_page_pool_enabled(mac->hw))
+ mtk_ethtool_pp_stats(mac->hw, data_dst);
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
@@ -3261,6 +3760,8 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_poll_controller = mtk_poll_controller,
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
+ .ndo_xdp_xmit = mtk_xdp_xmit,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
@@ -3761,6 +4262,7 @@ static const struct mtk_soc_data mt7986_data = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 0a632896451a..7405c97cda66 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -18,6 +18,8 @@
#include <linux/rhashtable.h>
#include <linux/dim.h>
#include <linux/bitfield.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
#include "mtk_ppe.h"
#define MTK_QDMA_PAGE_SIZE 2048
@@ -49,6 +51,11 @@
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
+#define MTK_PP_PAD (MTK_PP_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
+
#define MTK_QRX_OFFSET 0x10
#define MTK_MAX_RX_RING_NUM 4
@@ -563,6 +570,16 @@ struct mtk_tx_dma_v2 {
struct mtk_eth;
struct mtk_mac;
+struct mtk_xdp_stats {
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_pass;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_errors;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_xmit_errors;
+};
+
/* struct mtk_hw_stats - the structure that holds the traffic statistics.
* @stats_lock: make sure that stats operations are atomic
* @reg_offset: the status register offset of the SoC
@@ -586,6 +603,8 @@ struct mtk_hw_stats {
u64 rx_checksum_errors;
u64 rx_flow_control_packets;
+ struct mtk_xdp_stats xdp_stats;
+
spinlock_t stats_lock;
u32 reg_offset;
struct u64_stats_sync syncp;
@@ -677,6 +696,12 @@ enum mtk_dev_state {
MTK_RESETTING
};
+enum mtk_tx_buf_type {
+ MTK_TYPE_SKB,
+ MTK_TYPE_XDP_TX,
+ MTK_TYPE_XDP_NDO,
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -686,7 +711,9 @@ enum mtk_dev_state {
* @dma_len1: The length of the second segment
*/
struct mtk_tx_buf {
- struct sk_buff *skb;
+ enum mtk_tx_buf_type type;
+ void *data;
+
u32 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);
@@ -745,6 +772,9 @@ struct mtk_rx_ring {
bool calc_idx_update;
u16 calc_idx;
u32 crx_idx_reg;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_q;
};
enum mkt_eth_capabilities {
@@ -1078,6 +1108,8 @@ struct mtk_eth {
struct mtk_ppe *ppe;
struct rhashtable flow_table;
+
+ struct bpf_prog __rcu *prog;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 90e7dfd011c9..25dc3c3aa31d 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -88,29 +88,28 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
static int
mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
{
- struct net_device_path_ctx ctx = {
- .dev = dev,
- };
- struct net_device_path path = {};
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
- memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
+ if (!dev)
+ return -ENODEV;
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
return -1;
- if (!dev->netdev_ops->ndo_fill_forward_path)
- return -1;
-
- if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
- return -1;
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
- if (path.type != DEV_PATH_MTK_WDMA)
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
return -1;
- info->wdma_idx = path.mtk_wdma.wdma_idx;
- info->queue = path.mtk_wdma.queue;
- info->bss = path.mtk_wdma.bss;
- info->wcid = path.mtk_wdma.wcid;
+ info->wdma_idx = path->mtk_wdma.wdma_idx;
+ info->queue = path->mtk_wdma.queue;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 95839fd84dab..3f0e5e64de50 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
@@ -32,6 +33,7 @@
#define MTK_STAR_SKB_ALIGNMENT 16
#define MTK_STAR_HASHTABLE_MC_LIMIT 256
#define MTK_STAR_HASHTABLE_SIZE_MAX 512
+#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
* work for this controller.
@@ -129,6 +131,11 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_INT_MASK 0x0054
#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
+/* Delay-Macro Register */
+#define MTK_STAR_REG_TEST0 0x0058
+#define MTK_STAR_BIT_INV_RX_CLK BIT(30)
+#define MTK_STAR_BIT_INV_TX_CLK BIT(31)
+
/* Misc. Config Register */
#define MTK_STAR_REG_TEST1 0x005c
#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
@@ -149,6 +156,7 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
#define MTK_STAR_BIT_CLK_DIV_10 0x0a
+#define MTK_STAR_BIT_CLK_DIV_50 0x32
/* Counter registers. */
#define MTK_STAR_REG_C_RXOKPKT 0x0100
@@ -181,9 +189,14 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_C_RX_TWIST 0x0218
/* Ethernet CFG Control */
-#define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4
-#define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0)
-#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0)
+#define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4
+#define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8
+#define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10
+#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0
+#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8)
/* Represents the actual structure of descriptors used by the MAC. We can
* reuse the same structure for both TX and RX - the layout is the same, only
@@ -216,7 +229,8 @@ struct mtk_star_ring_desc_data {
struct sk_buff *skb;
};
-#define MTK_STAR_RING_NUM_DESCS 128
+#define MTK_STAR_RING_NUM_DESCS 512
+#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
@@ -231,6 +245,11 @@ struct mtk_star_ring {
unsigned int tail;
};
+struct mtk_star_compat {
+ int (*set_interface_mode)(struct net_device *ndev);
+ unsigned char bit_clk_div;
+};
+
struct mtk_star_priv {
struct net_device *ndev;
@@ -246,7 +265,8 @@ struct mtk_star_priv {
struct mtk_star_ring rx_ring;
struct mii_bus *mii;
- struct napi_struct napi;
+ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
struct device_node *phy_node;
phy_interface_t phy_intf;
@@ -255,6 +275,11 @@ struct mtk_star_priv {
int speed;
int duplex;
int pause;
+ bool rmii_rxc;
+ bool rx_inv;
+ bool tx_inv;
+
+ const struct mtk_star_compat *compat_data;
/* Protects against concurrent descriptor access. */
spinlock_t lock;
@@ -357,19 +382,16 @@ mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
mtk_star_ring_push_head(ring, desc_data, flags);
}
-static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
+static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
{
- return abs(ring->head - ring->tail);
-}
+ u32 avail;
-static bool mtk_star_ring_full(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
-}
+ if (ring->tail > ring->head)
+ avail = ring->tail - ring->head - 1;
+ else
+ avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
-static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) > 0;
+ return avail;
}
static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
@@ -414,6 +436,36 @@ static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
MTK_STAR_BIT_MAC_CFG_NIC_PD);
}
+static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value &= ~MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value &= ~MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
+static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value |= MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value |= MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
/* Unmask the three interrupts we care about, mask all others. */
static void mtk_star_intr_enable(struct mtk_star_priv *priv)
{
@@ -429,20 +481,11 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
}
-static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
-{
- unsigned int val;
-
- regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
-
- return val;
-}
-
static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
{
unsigned int val;
- val = mtk_star_intr_read(priv);
+ regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
return val;
@@ -714,25 +757,44 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
}
-/* All processing for TX and RX happens in the napi poll callback.
- *
- * FIXME: The interrupt handling should be more fine-grained with each
- * interrupt enabled/disabled independently when needed. Unfortunatly this
- * turned out to impact the driver's stability and until we have something
- * working properly, we're disabling all interrupts during TX & RX processing
- * or when resetting the counter registers.
- */
+/**
+ * mtk_star_handle_irq - Interrupt Handler.
+ * @irq: interrupt number.
+ * @data: pointer to a network interface device structure.
+ * Description : this is the driver interrupt service routine.
+ * it mainly handles:
+ * 1. tx complete interrupt for frame transmission.
+ * 2. rx complete interrupt for frame reception.
+ * 3. MAC Management Counter interrupt to avoid counter overflow.
+ **/
static irqreturn_t mtk_star_handle_irq(int irq, void *data)
{
- struct mtk_star_priv *priv;
- struct net_device *ndev;
-
- ndev = data;
- priv = netdev_priv(ndev);
+ struct net_device *ndev = data;
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ unsigned int intr_status = mtk_star_intr_ack_all(priv);
+ bool rx, tx;
+
+ rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
+ napi_schedule_prep(&priv->rx_napi);
+ tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
+ napi_schedule_prep(&priv->tx_napi);
+
+ if (rx || tx) {
+ spin_lock(&priv->lock);
+ /* mask Rx and TX Complete interrupt */
+ mtk_star_disable_dma_irq(priv, rx, tx);
+ spin_unlock(&priv->lock);
+
+ if (rx)
+ __napi_schedule(&priv->rx_napi);
+ if (tx)
+ __napi_schedule(&priv->tx_napi);
+ }
- if (netif_running(ndev)) {
- mtk_star_intr_disable(priv);
- napi_schedule(&priv->napi);
+ /* interrupt is triggered once any counters reach 0x8000000 */
+ if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
+ mtk_star_update_stats(priv);
+ mtk_star_reset_counters(priv);
}
return IRQ_HANDLED;
@@ -821,32 +883,26 @@ static void mtk_star_phy_config(struct mtk_star_priv *priv)
val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
- /* Only full-duplex supported for now. */
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
-
- regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
-
if (priv->pause) {
- val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
- val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
- val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
} else {
- val = 0;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
}
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
+ val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
+ val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
+ val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
- if (priv->pause) {
- val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
- val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
- } else {
- val = 0;
- }
-
+ val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
+ val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
}
@@ -898,14 +954,7 @@ static void mtk_star_init_config(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
MTK_STAR_MSK_MAC_CLK_CONF,
- MTK_STAR_BIT_CLK_DIV_10);
-}
-
-static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
-{
- regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
- MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
- MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
+ priv->compat_data->bit_clk_div);
}
static int mtk_star_enable(struct net_device *ndev)
@@ -951,11 +1000,12 @@ static int mtk_star_enable(struct net_device *ndev)
/* Request the interrupt */
ret = request_irq(ndev->irq, mtk_star_handle_irq,
- IRQF_TRIGGER_FALLING, ndev->name, ndev);
+ IRQF_TRIGGER_NONE, ndev->name, ndev);
if (ret)
goto err_free_skbs;
- napi_enable(&priv->napi);
+ napi_enable(&priv->tx_napi);
+ napi_enable(&priv->rx_napi);
mtk_star_intr_ack_all(priv);
mtk_star_intr_enable(priv);
@@ -988,7 +1038,8 @@ static void mtk_star_disable(struct net_device *ndev)
struct mtk_star_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
- napi_disable(&priv->napi);
+ napi_disable(&priv->tx_napi);
+ napi_disable(&priv->rx_napi);
mtk_star_intr_disable(priv);
mtk_star_dma_disable(priv);
mtk_star_intr_ack_all(priv);
@@ -1020,13 +1071,45 @@ static int mtk_star_netdev_ioctl(struct net_device *ndev,
return phy_mii_ioctl(ndev->phydev, req, cmd);
}
-static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
+static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ netif_stop_queue(priv->ndev);
+
+ /* Might race with mtk_star_tx_poll, check again */
+ smp_mb();
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
+ return -EBUSY;
+
+ netif_start_queue(priv->ndev);
+
+ return 0;
+}
+
+static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
+ return 0;
+
+ return __mtk_star_maybe_stop_tx(priv, size);
+}
+
+static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct mtk_star_ring *ring = &priv->tx_ring;
struct device *dev = mtk_star_get_dev(priv);
struct mtk_star_ring_desc_data desc_data;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ /* This is a hard error, log it. */
+ pr_err_ratelimited("Tx ring full when queue awake\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
if (dma_mapping_error(dev, desc_data.dma_addr))
@@ -1034,17 +1117,11 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
desc_data.skb = skb;
desc_data.len = skb->len;
-
- spin_lock_bh(&priv->lock);
-
mtk_star_ring_push_head_tx(ring, &desc_data);
netdev_sent_queue(ndev, skb->len);
- if (mtk_star_ring_full(ring))
- netif_stop_queue(ndev);
-
- spin_unlock_bh(&priv->lock);
+ mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
mtk_star_dma_resume_tx(priv);
@@ -1076,31 +1153,40 @@ static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
return ret;
}
-static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
+static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
{
+ struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
+ tx_napi);
+ int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
struct mtk_star_ring *ring = &priv->tx_ring;
struct net_device *ndev = priv->ndev;
- int ret, pkts_compl, bytes_compl;
- bool wake = false;
-
- spin_lock(&priv->lock);
-
- for (pkts_compl = 0, bytes_compl = 0;;
- pkts_compl++, bytes_compl += ret, wake = true) {
- if (!mtk_star_ring_descs_available(ring))
- break;
+ unsigned int head = ring->head;
+ unsigned int entry = ring->tail;
+ while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
if (ret < 0)
break;
+
+ count++;
+ pkts_compl++;
+ bytes_compl += ret;
+ entry = ring->tail;
}
netdev_completed_queue(ndev, pkts_compl, bytes_compl);
- if (wake && netif_queue_stopped(ndev))
+ if (unlikely(netif_queue_stopped(ndev)) &&
+ (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
netif_wake_queue(ndev);
- spin_unlock(&priv->lock);
+ if (napi_complete(napi)) {
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, false, true);
+ spin_unlock(&priv->lock);
+ }
+
+ return 0;
}
static void mtk_star_netdev_get_stats64(struct net_device *ndev,
@@ -1180,7 +1266,7 @@ static const struct ethtool_ops mtk_star_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
{
struct mtk_star_ring *ring = &priv->rx_ring;
struct device *dev = mtk_star_get_dev(priv);
@@ -1188,107 +1274,85 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
struct net_device *ndev = priv->ndev;
struct sk_buff *curr_skb, *new_skb;
dma_addr_t new_dma_addr;
- int ret;
+ int ret, count = 0;
- spin_lock(&priv->lock);
- ret = mtk_star_ring_pop_tail(ring, &desc_data);
- spin_unlock(&priv->lock);
- if (ret)
- return -1;
+ while (count < budget) {
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ if (ret)
+ return -1;
- curr_skb = desc_data.skb;
+ curr_skb = desc_data.skb;
- if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
- (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
- /* Error packet -> drop and reuse skb. */
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
+ (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
+ /* Error packet -> drop and reuse skb. */
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- /* Prepare new skb before receiving the current one. Reuse the current
- * skb if we fail at any point.
- */
- new_skb = mtk_star_alloc_skb(ndev);
- if (!new_skb) {
- ndev->stats.rx_dropped++;
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ /* Prepare new skb before receiving the current one.
+ * Reuse the current skb if we fail at any point.
+ */
+ new_skb = mtk_star_alloc_skb(ndev);
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
- if (dma_mapping_error(dev, new_dma_addr)) {
- ndev->stats.rx_dropped++;
- dev_kfree_skb(new_skb);
- new_skb = curr_skb;
- netdev_err(ndev, "DMA mapping error of RX descriptor\n");
- goto push_new_skb;
- }
+ new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
+ if (dma_mapping_error(dev, new_dma_addr)) {
+ ndev->stats.rx_dropped++;
+ dev_kfree_skb(new_skb);
+ new_skb = curr_skb;
+ netdev_err(ndev, "DMA mapping error of RX descriptor\n");
+ goto push_new_skb;
+ }
- /* We can't fail anymore at this point: it's safe to unmap the skb. */
- mtk_star_dma_unmap_rx(priv, &desc_data);
+ /* We can't fail anymore at this point:
+ * it's safe to unmap the skb.
+ */
+ mtk_star_dma_unmap_rx(priv, &desc_data);
- skb_put(desc_data.skb, desc_data.len);
- desc_data.skb->ip_summed = CHECKSUM_NONE;
- desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
- desc_data.skb->dev = ndev;
- netif_receive_skb(desc_data.skb);
+ skb_put(desc_data.skb, desc_data.len);
+ desc_data.skb->ip_summed = CHECKSUM_NONE;
+ desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
+ desc_data.skb->dev = ndev;
+ netif_receive_skb(desc_data.skb);
- /* update dma_addr for new skb */
- desc_data.dma_addr = new_dma_addr;
+ /* update dma_addr for new skb */
+ desc_data.dma_addr = new_dma_addr;
push_new_skb:
- desc_data.len = skb_tailroom(new_skb);
- desc_data.skb = new_skb;
- spin_lock(&priv->lock);
- mtk_star_ring_push_head_rx(ring, &desc_data);
- spin_unlock(&priv->lock);
+ count++;
- return 0;
-}
-
-static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
-{
- int received, ret;
-
- for (received = 0, ret = 0; received < budget && ret == 0; received++)
- ret = mtk_star_receive_packet(priv);
+ desc_data.len = skb_tailroom(new_skb);
+ desc_data.skb = new_skb;
+ mtk_star_ring_push_head_rx(ring, &desc_data);
+ }
mtk_star_dma_resume_rx(priv);
- return received;
+ return count;
}
-static int mtk_star_poll(struct napi_struct *napi, int budget)
+static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
- unsigned int status;
- int received = 0;
-
- priv = container_of(napi, struct mtk_star_priv, napi);
-
- status = mtk_star_intr_read(priv);
- mtk_star_intr_ack_all(priv);
+ int work_done = 0;
- if (status & MTK_STAR_BIT_INT_STS_TNTC)
- /* Clean-up all TX descriptors. */
- mtk_star_tx_complete_all(priv);
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
- if (status & MTK_STAR_BIT_INT_STS_FNRC)
- /* Receive up to $budget packets. */
- received = mtk_star_process_rx(priv, budget);
-
- if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
- mtk_star_update_stats(priv);
- mtk_star_reset_counters(priv);
+ work_done = mtk_star_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, true, false);
+ spin_unlock(&priv->lock);
}
- if (received < budget)
- napi_complete_done(napi, received);
-
- mtk_star_intr_enable(priv);
-
- return received;
+ return work_done;
}
static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
@@ -1442,6 +1506,25 @@ static void mtk_star_clk_disable_unprepare(void *data)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+static int mtk_star_set_timing(struct mtk_star_priv *priv)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int delay_val = 0;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
+}
+
static int mtk_star_probe(struct platform_device *pdev)
{
struct device_node *of_node;
@@ -1460,6 +1543,7 @@ static int mtk_star_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
+ priv->compat_data = of_device_get_match_data(&pdev->dev);
SET_NETDEV_DEV(ndev, dev);
platform_set_drvdata(pdev, ndev);
@@ -1510,7 +1594,8 @@ static int mtk_star_probe(struct platform_device *pdev)
ret = of_get_phy_mode(of_node, &priv->phy_intf);
if (ret) {
return ret;
- } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
+ } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
+ priv->phy_intf != PHY_INTERFACE_MODE_MII) {
dev_err(dev, "unsupported phy mode: %s\n",
phy_modes(priv->phy_intf));
return -EINVAL;
@@ -1522,7 +1607,23 @@ static int mtk_star_probe(struct platform_device *pdev)
return -ENODEV;
}
- mtk_star_set_mode_rmii(priv);
+ priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
+ priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
+ priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
+
+ if (priv->compat_data->set_interface_mode) {
+ ret = priv->compat_data->set_interface_mode(ndev);
+ if (ret) {
+ dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
+ return -EINVAL;
+ }
+ }
+
+ ret = mtk_star_set_timing(priv);
+ if (ret) {
+ dev_err(dev, "Failed to set timing, err = %d\n", ret);
+ return -EINVAL;
+ }
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1550,16 +1651,92 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll,
+ NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
return devm_register_netdev(dev, ndev);
}
#ifdef CONFIG_OF
+static int mt8516_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val, ret, rmii_rxc;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ rmii_rxc = 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG1_CON,
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
+ rmii_rxc);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG0_CON,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
+ intf_val);
+}
+
+static int mt8365_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG_CON_V2,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
+ intf_val);
+}
+
+static const struct mtk_star_compat mtk_star_mt8516_compat = {
+ .set_interface_mode = mt8516_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
+};
+
+static const struct mtk_star_compat mtk_star_mt8365_compat = {
+ .set_interface_mode = mt8365_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
+};
+
static const struct of_device_id mtk_star_of_match[] = {
- { .compatible = "mediatek,mt8516-eth", },
- { .compatible = "mediatek,mt8518-eth", },
- { .compatible = "mediatek,mt8175-eth", },
+ { .compatible = "mediatek,mt8516-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8518-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8175-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8365-eth",
+ .data = &mtk_star_mt8365_compat },
{ }
};
MODULE_DEVICE_TABLE(of, mtk_star_of_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 8f0cd3196aac..29be2fcafea3 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -651,7 +651,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
* WDMA RX.
*/
- BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
+ BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 5b11557f1ae4..0eb7b83637d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -204,9 +204,13 @@ out:
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
{
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
int err = 0;
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
@@ -215,6 +219,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
err);
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
}
static void dump_err_buf(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index ac5468b77488..82a07a31cde7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -226,10 +226,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create cr-space region */
crdump->region_crspace =
- devlink_region_create(devlink,
- &region_cr_space_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- pci_resource_len(pdev, 0));
+ devl_region_create(devlink,
+ &region_cr_space_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
if (IS_ERR(crdump->region_crspace))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_cr_space_str,
@@ -237,10 +237,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create fw-health region */
crdump->region_fw_health =
- devlink_region_create(devlink,
- &region_fw_health_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- HEALTH_BUFFER_SIZE);
+ devl_region_create(devlink,
+ &region_fw_health_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
if (IS_ERR(crdump->region_fw_health))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_fw_health_str,
@@ -253,6 +253,6 @@ void mlx4_crdump_end(struct mlx4_dev *dev)
{
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
- devlink_region_destroy(crdump->region_fw_health);
- devlink_region_destroy(crdump->region_crspace);
+ devl_region_destroy(crdump->region_fw_health);
+ devl_region_destroy(crdump->region_crspace);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index af3b2b59a2a6..43a4102e9c09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -645,7 +645,7 @@ static int get_real_size(const struct sk_buff *skb,
*inline_ok = false;
*hopbyhop = 0;
if (skb->encapsulation) {
- *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+ *lso_header_size = skb_inner_tcp_all_headers(skb);
} else {
/* Detects large IPV6 TCP packets and prepares for removal of
* HBH header that has been pushed by ip6_xmit(),
@@ -653,7 +653,7 @@ static int get_real_size(const struct sk_buff *skb,
*/
if (ipv6_has_hopopt_jumbo(skb))
*hopbyhop = sizeof(struct hop_jumbo_hdr);
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ *lso_header_size = skb_tcp_all_headers(skb);
}
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 42c96c9d7fb1..dcb9eb1899ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = min(
bitmap_weight(actv_ports.ports, dev->caps.num_ports),
- dev->caps.num_ports);
+ (unsigned int) dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b187c210d4d6..78c5f40382c9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3033,7 +3033,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
int err;
- err = devlink_port_register(devlink, &info->devlink_port, port);
+ err = devl_port_register(devlink, &info->devlink_port, port);
if (err)
return err;
@@ -3071,7 +3071,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3093,7 +3093,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3109,7 +3109,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
@@ -3333,6 +3333,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
{
+ struct devlink *devlink = priv_to_devlink(priv);
struct mlx4_dev *dev;
unsigned sum = 0;
int err;
@@ -3341,6 +3342,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
+ devl_assert_locked(devlink);
dev = &priv->dev;
INIT_LIST_HEAD(&priv->ctx_list);
@@ -3999,6 +4001,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
if (!devlink)
return -ENOMEM;
+ devl_lock(devlink);
priv = devlink_priv(devlink);
dev = &priv->dev;
@@ -4026,6 +4029,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -4035,6 +4039,7 @@ err_params_unregister:
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
+ devl_unlock(devlink);
devlink_free(devlink);
return ret;
}
@@ -4056,8 +4061,11 @@ static void mlx4_unload_one(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int pci_dev_data;
+ struct devlink *devlink;
int p, i;
+ devlink = priv_to_devlink(priv);
+ devl_assert_locked(devlink);
if (priv->removed)
return;
@@ -4137,6 +4145,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_unregister(devlink);
+ devl_lock(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4172,6 +4181,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
+ devl_unlock(devlink);
devlink_free(devlink);
}
@@ -4292,15 +4302,20 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4333,6 +4348,7 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int err;
@@ -4340,6 +4356,8 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
@@ -4358,19 +4376,23 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
}
end:
mutex_unlock(&persist->interface_state_mutex);
-
+ devl_unlock(devlink);
}
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
mlx4_pci_disable_device(dev);
}
@@ -4385,12 +4407,16 @@ static int __maybe_unused mlx4_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(dev, "suspend was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return 0;
}
@@ -4402,6 +4428,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int ret = 0;
@@ -4409,6 +4436,8 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
@@ -4422,6 +4451,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
}
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9ea867a45764..a3773a8177ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o lib/tout.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o
#
# Netdev basic
@@ -28,7 +28,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o
+ en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
+ lib/crypto.o
#
# Netdev extra
@@ -45,7 +46,8 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
- en/tc/post_act.o en/tc/int_port.o
+ en/tc/post_act.o en/tc/int_port.o en/tc/meter.o \
+ en/tc/post_meter.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
@@ -53,7 +55,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
en/tc/act/mirred.o en/tc/act/mirred_nic.o \
en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
- en/tc/act/redirect_ingress.o
+ en/tc/act/redirect_ingress.o en/tc/act/police.o
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
@@ -67,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/legacy.o \
- esw/devlink_port.o esw/vporttbl.o esw/qos.o
+ esw/debugfs.o esw/devlink_port.o esw/vporttbl.o esw/qos.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 9caa1b52321b..3e232a65a0c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -166,6 +166,28 @@ static const struct file_operations stats_fops = {
.write = average_write,
};
+static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_cmd *cmd;
+ char tbuf[6];
+ int weight;
+ int field;
+ int ret;
+
+ cmd = filp->private_data;
+ weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
+ field = cmd->max_reg_cmds - weight;
+ ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static const struct file_operations slots_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = slots_read,
+};
+
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
@@ -176,6 +198,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
cmd = &dev->priv.dbg.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
+ debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
+
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 50422b56a64d..0571e40c6ee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -340,8 +340,10 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
struct auxiliary_driver *adrv;
int ret = 0, i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -389,6 +391,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break;
}
}
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
mutex_unlock(&mlx5_intf_mutex);
return ret;
}
@@ -401,7 +404,9 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
pm_message_t pm = {};
int i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
continue;
@@ -430,6 +435,7 @@ skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -438,6 +444,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
{
int ret;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev);
@@ -450,6 +457,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev);
@@ -526,16 +534,22 @@ del_adev:
int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ int err = 0;
lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0;
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
- return 0;
+ goto out;
+
+ err = add_drivers(dev);
- return add_drivers(dev);
+out:
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
+ return err;
}
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index f85166e587f2..66c6a7017695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -104,7 +104,16 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- return mlx5_fw_reset_wait_reset_done(dev);
+ err = mlx5_fw_reset_wait_reset_done(dev);
+ if (err)
+ return err;
+
+ mlx5_unload_one_devl_locked(dev);
+ err = mlx5_health_wait_pci_up(dev);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+
+ return err;
}
static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
@@ -134,6 +143,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
+ int ret = 0;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
if (sf_dev_allocated) {
@@ -156,17 +166,21 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one(dev);
- return 0;
+ mlx5_unload_one_devl_locked(dev);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
- return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
- return mlx5_devlink_reload_fw_activate(devlink, extack);
+ ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack);
+ else
+ ret = mlx5_devlink_reload_fw_activate(devlink, extack);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
+
+ return ret;
}
static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
@@ -174,24 +188,27 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int ret = 0;
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- return mlx5_load_one(dev, false);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return mlx5_load_one(dev, false);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
- return 0;
+ return ret;
}
static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
@@ -828,28 +845,28 @@ static int mlx5_devlink_traps_register(struct devlink *devlink)
struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err;
- err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
if (err)
return err;
- err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
- &core_dev->priv);
+ err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+ &core_dev->priv);
if (err)
goto err_trap_group;
return 0;
err_trap_group:
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
return err;
}
static void mlx5_devlink_traps_unregister(struct devlink *devlink)
{
- devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
}
int mlx5_devlink_register(struct devlink *devlink)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b6c15efe92ad..a560df446bac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -109,7 +109,7 @@ struct page_pool;
#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS \
- ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
@@ -174,8 +174,8 @@ struct page_pool;
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \
- << MLX5_MKEY_BSF_OCTO_SIZE)
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
+ mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -233,7 +233,7 @@ static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
}
-static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
+static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
{
/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
* Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
@@ -242,11 +242,12 @@ static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
* than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
* cache-aligned.
*/
-#if L1_CACHE_BYTES < 128
- return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
-#else
- return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2);
+ u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+
+#if L1_CACHE_BYTES >= 128
+ wqebbs = ALIGN_DOWN(wqebbs, 2);
#endif
+ return wqebbs;
}
struct mlx5e_tx_wqe {
@@ -321,7 +322,8 @@ struct mlx5e_params {
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
struct {
- struct mlx5e_mqprio_rl *rl;
+ u64 max_rate[TC_MAX_QUEUE];
+ u32 hw_id[TC_MAX_QUEUE];
} channel;
} mqprio;
bool rx_cqe_compress_def;
@@ -455,7 +457,7 @@ struct mlx5e_txqsq {
struct netdev_queue *txq;
u32 sqn;
u16 stop_room;
- u16 max_sq_mpw_wqebbs;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
@@ -570,7 +572,7 @@ struct mlx5e_xdpsq {
struct device *pdev;
__be32 mkey_be;
u16 stop_room;
- u16 max_sq_mpw_wqebbs;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
unsigned long state;
unsigned int hw_mtu;
@@ -898,16 +900,8 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask;
};
-struct mlx5e_htb {
- DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
- DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
- struct mlx5e_sq_stats **qos_sq_stats;
- u16 max_qos_sqs;
- u16 maj_id;
- u16 defcls;
-};
-
struct mlx5e_trap;
+struct mlx5e_htb;
struct mlx5e_priv {
/* priv data path fields - start */
@@ -928,7 +922,7 @@ struct mlx5e_priv {
struct mlx5e_rx_res *rx_res;
u32 *tx_rates;
- struct mlx5e_flow_steering fs;
+ struct mlx5e_flow_steering *fs;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -945,6 +939,8 @@ struct mlx5e_priv {
struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
+ struct mlx5e_sq_stats **htb_qos_sq_stats;
+ u16 htb_max_qos_sqs;
u16 stats_nch;
u16 max_nch;
u8 max_opened_tc;
@@ -976,7 +972,7 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
struct mlx5e_scratchpad scratchpad;
- struct mlx5e_htb htb;
+ struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl;
};
@@ -992,6 +988,8 @@ enum mlx5e_profile_feature {
MLX5E_PROFILE_FEATURE_PTP_RX,
MLX5E_PROFILE_FEATURE_PTP_TX,
MLX5E_PROFILE_FEATURE_QOS_HTB,
+ MLX5E_PROFILE_FEATURE_FS_VLAN,
+ MLX5E_PROFILE_FEATURE_FS_TC,
};
struct mlx5e_profile {
@@ -1027,7 +1025,6 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
@@ -1181,7 +1178,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data);
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param);
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index ae52e7f38306..b69f9d10ccbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -21,6 +21,7 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
struct netdev_phys_item_id ppid = {};
struct devlink_port *dl_port;
unsigned int dl_port_index;
+ int ret;
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
@@ -41,7 +42,13 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
memset(dl_port, 0, sizeof(*dl_port));
devlink_port_attrs_set(dl_port, &attrs);
- return devlink_port_register(devlink, dl_port, dl_port_index);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_lock(devlink);
+ ret = devl_port_register(devlink, dl_port, dl_port_index);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_unlock(devlink);
+
+ return ret;
}
void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
@@ -54,8 +61,13 @@ void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
- devlink_port_unregister(dl_port);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_lock(devlink);
+ devl_port_unregister(dl_port);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_unlock(devlink);
}
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 6e3a90a959e9..9b8cdf2e68ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -15,29 +15,6 @@ enum {
MLX5E_TC_MISS_LEVEL,
};
-struct mlx5e_tc_table {
- /* Protects the dynamic assignment of the t parameter
- * which is the nic tc root table.
- */
- struct mutex t_lock;
- struct mlx5_flow_table *t;
- struct mlx5_flow_table *miss_t;
- struct mlx5_fs_chains *chains;
- struct mlx5e_post_act *post_act;
-
- struct rhashtable ht;
-
- struct mod_hdr_tbl mod_hdr;
- struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
- DECLARE_HASHTABLE(hairpin_tbl, 8);
-
- struct notifier_block netdevice_nb;
- struct netdev_net_notifier netdevice_nn;
-
- struct mlx5_tc_ct_priv *ct;
- struct mapping_ctx *mapping;
-};
-
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
@@ -160,16 +137,20 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU
struct mlx5e_accel_fs_tcp;
#endif
+struct mlx5e_profile;
struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
struct mlx5e_flow_steering {
+ bool state_destroy;
+ bool vlan_strip_disable;
+ struct mlx5_core_dev *mdev;
struct mlx5_flow_namespace *ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
- struct mlx5e_tc_table tc;
+ struct mlx5e_tc_table *tc;
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
@@ -200,13 +181,22 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-int mlx5e_fs_init(struct mlx5e_priv *priv);
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy);
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
-
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index 7aa25a5e29d7..e153d6119e02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -94,7 +94,7 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_udp = priv->fs.udp;
+ fs_udp = priv->fs->udp;
ft = fs_udp->tables[type].t;
fs_udp_set_dport_flow(spec, type, d_port);
@@ -121,10 +121,10 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ
struct mlx5e_fs_udp *fs_udp;
int err;
- fs_udp = priv->fs.udp;
+ fs_udp = priv->fs->udp;
fs_udp_t = &fs_udp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -208,7 +208,7 @@ out:
static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
+ struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -218,7 +218,7 @@ static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -278,10 +278,10 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- dest.ft = priv->fs.udp->tables[i].t;
+ dest.ft = priv->fs->udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -294,7 +294,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
{
- struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
+ struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
int i;
if (!fs_udp)
@@ -309,20 +309,20 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
fs_udp_destroy_table(fs_udp, i);
kfree(fs_udp);
- priv->fs.udp = NULL;
+ priv->fs->udp = NULL;
}
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
{
int i, err;
- if (priv->fs.udp) {
- priv->fs.udp->ref_cnt++;
+ if (priv->fs->udp) {
+ priv->fs->udp->ref_cnt++;
return 0;
}
- priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
- if (!priv->fs.udp)
+ priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
+ if (!priv->fs->udp)
return -ENOMEM;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
@@ -335,16 +335,16 @@ int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_tables;
- priv->fs.udp->ref_cnt = 1;
+ priv->fs->udp->ref_cnt = 1;
return 0;
err_destroy_tables:
while (--i >= 0)
- fs_udp_destroy_table(priv->fs.udp, i);
+ fs_udp_destroy_table(priv->fs->udp, i);
- kfree(priv->fs.udp);
- priv->fs.udp = NULL;
+ kfree(priv->fs->udp);
+ priv->fs->udp = NULL;
return err;
}
@@ -371,7 +371,7 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_any = priv->fs.any;
+ fs_any = priv->fs->any;
ft = fs_any->table.t;
fs_any_set_ethertype_flow(spec, ether_type);
@@ -398,10 +398,10 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv)
struct mlx5e_fs_any *fs_any;
int err;
- fs_any = priv->fs.any;
+ fs_any = priv->fs->any;
fs_any_t = &fs_any->table;
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -474,7 +474,7 @@ err:
static int fs_any_create_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fs.any->table;
+ struct mlx5e_flow_table *ft = &priv->fs->any->table;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -484,7 +484,7 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -514,7 +514,7 @@ static int fs_any_disable(struct mlx5e_priv *priv)
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -530,10 +530,10 @@ static int fs_any_enable(struct mlx5e_priv *priv)
int err;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.any->table.t;
+ dest.ft = priv->fs->any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -555,7 +555,7 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
{
- struct mlx5e_fs_any *fs_any = priv->fs.any;
+ struct mlx5e_fs_any *fs_any = priv->fs->any;
if (!fs_any)
return;
@@ -568,20 +568,20 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
fs_any_destroy_table(fs_any);
kfree(fs_any);
- priv->fs.any = NULL;
+ priv->fs->any = NULL;
}
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
{
int err;
- if (priv->fs.any) {
- priv->fs.any->ref_cnt++;
+ if (priv->fs->any) {
+ priv->fs->any->ref_cnt++;
return 0;
}
- priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
- if (!priv->fs.any)
+ priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
+ if (!priv->fs->any)
return -ENOMEM;
err = fs_any_create_table(priv);
@@ -592,14 +592,14 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_table;
- priv->fs.any->ref_cnt = 1;
+ priv->fs->any->ref_cnt = 1;
return 0;
err_destroy_table:
- fs_any_destroy_table(priv->fs.any);
+ fs_any_destroy_table(priv->fs->any);
- kfree(priv->fs.any);
- priv->fs.any = NULL;
+ kfree(priv->fs->any);
+ priv->fs->any = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
new file mode 100644
index 000000000000..6dac76fa58a3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/pkt_cls.h>
+#include "htb.h"
+#include "en.h"
+#include "../qos.h"
+
+struct mlx5e_qos_node {
+ struct hlist_node hnode;
+ struct mlx5e_qos_node *parent;
+ u64 rate;
+ u32 bw_share;
+ u32 max_average_bw;
+ u32 hw_id;
+ u32 classid; /* 16-bit, except root. */
+ u16 qid;
+};
+
+struct mlx5e_htb {
+ DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
+ DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ struct mlx5e_selq *selq;
+};
+
+#define MLX5E_QOS_QID_INNER 0xffff
+#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
+
+/* Software representation of the QoS tree */
+
+int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt, err;
+
+ hash_for_each(htb->qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ err = callback(data, node->qid, node->hw_id);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb)
+{
+ int last;
+
+ last = find_last_bit(htb->qos_used_qids, mlx5e_qos_max_leaf_nodes(htb->mdev));
+ return last == mlx5e_qos_max_leaf_nodes(htb->mdev) ? 0 : last + 1;
+}
+
+static int mlx5e_htb_find_unused_qos_qid(struct mlx5e_htb *htb)
+{
+ int size = mlx5e_qos_max_leaf_nodes(htb->mdev);
+ struct mlx5e_priv *priv = htb->priv;
+ int res;
+
+ WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
+ res = find_first_zero_bit(htb->qos_used_qids, size);
+
+ return res == size ? -ENOSPC : res;
+}
+
+static struct mlx5e_qos_node *
+mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid,
+ struct mlx5e_qos_node *parent)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->parent = parent;
+
+ node->qid = qid;
+ __set_bit(qid, htb->qos_used_qids);
+
+ node->classid = classid;
+ hash_add_rcu(htb->qos_tc2node, &node->hnode, classid);
+
+ mlx5e_update_tx_netdev_queues(htb->priv);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_create_root(struct mlx5e_htb *htb)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->qid = MLX5E_QOS_QID_INNER;
+ node->classid = MLX5E_HTB_CLASSID_ROOT;
+ hash_add_rcu(htb->qos_tc2node, &node->hnode, node->classid);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find(struct mlx5e_htb *htb, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible(htb->qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find_rcu(struct mlx5e_htb *htb, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible_rcu(htb->qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static void mlx5e_htb_node_delete(struct mlx5e_htb *htb, struct mlx5e_qos_node *node)
+{
+ hash_del_rcu(&node->hnode);
+ if (node->qid != MLX5E_QOS_QID_INNER) {
+ __clear_bit(node->qid, htb->qos_used_qids);
+ mlx5e_update_tx_netdev_queues(htb->priv);
+ }
+ /* Make sure this qid is no longer selected by mlx5e_select_queue, so
+ * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
+ */
+ synchronize_net();
+ kfree(node);
+}
+
+/* TX datapath API */
+
+int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid)
+{
+ struct mlx5e_qos_node *node;
+ u16 qid;
+ int res;
+
+ rcu_read_lock();
+
+ node = mlx5e_htb_node_find_rcu(htb, classid);
+ if (!node) {
+ res = -ENOENT;
+ goto out;
+ }
+ qid = READ_ONCE(node->qid);
+ if (qid == MLX5E_QOS_QID_INNER) {
+ res = -EINVAL;
+ goto out;
+ }
+ res = mlx5e_qid_from_qos(&htb->priv->channels, qid);
+
+out:
+ rcu_read_unlock();
+ return res;
+}
+
+/* HTB TC handlers */
+
+static int
+mlx5e_htb_root_add(struct mlx5e_htb *htb, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *root;
+ bool opened;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
+
+ mlx5e_selq_prepare_htb(htb->selq, htb_maj_id, htb_defcls);
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ err = mlx5e_qos_alloc_queues(priv, &priv->channels);
+ if (err)
+ goto err_cancel_selq;
+ }
+
+ root = mlx5e_htb_node_create_root(htb);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto err_free_queues;
+ }
+
+ err = mlx5_qos_create_root_node(htb->mdev, &root->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
+ goto err_sw_node_delete;
+ }
+
+ mlx5e_selq_apply(htb->selq);
+
+ return 0;
+
+err_sw_node_delete:
+ mlx5e_htb_node_delete(htb, root);
+
+err_free_queues:
+ if (opened)
+ mlx5e_qos_close_all_queues(&priv->channels);
+err_cancel_selq:
+ mlx5e_selq_cancel(htb->selq);
+ return err;
+}
+
+static int mlx5e_htb_root_del(struct mlx5e_htb *htb)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *root;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_DESTROY\n");
+
+ /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
+ * so that we can safely switch to its non-HTB non-PTP fastpath.
+ */
+ synchronize_net();
+
+ mlx5e_selq_prepare_htb(htb->selq, 0, 0);
+ mlx5e_selq_apply(htb->selq);
+
+ root = mlx5e_htb_node_find(htb, MLX5E_HTB_CLASSID_ROOT);
+ if (!root) {
+ qos_err(htb->mdev, "Failed to find the root node in the QoS tree\n");
+ return -ENOENT;
+ }
+ err = mlx5_qos_destroy_node(htb->mdev, root->hw_id);
+ if (err)
+ qos_err(htb->mdev, "Failed to destroy root node %u, err = %d\n",
+ root->hw_id, err);
+ mlx5e_htb_node_delete(htb, root);
+
+ mlx5e_qos_deactivate_all_queues(&priv->channels);
+ mlx5e_qos_close_all_queues(&priv->channels);
+
+ return err;
+}
+
+static int mlx5e_htb_convert_rate(struct mlx5e_htb *htb, u64 rate,
+ struct mlx5e_qos_node *parent, u32 *bw_share)
+{
+ u64 share = 0;
+
+ while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
+ parent = parent->parent;
+
+ if (parent->max_average_bw)
+ share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
+ parent->max_average_bw);
+ else
+ share = 101;
+
+ *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
+
+ qos_dbg(htb->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
+ rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
+
+ return 0;
+}
+
+static void mlx5e_htb_convert_ceil(struct mlx5e_htb *htb, u64 ceil, u32 *max_average_bw)
+{
+ /* Hardware treats 0 as "unlimited", set at least 1. */
+ *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
+
+ qos_dbg(htb->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
+ ceil, *max_average_bw);
+}
+
+int
+mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ struct mlx5e_priv *priv = htb->priv;
+ int qid;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
+ classid, parent_classid, rate, ceil);
+
+ qid = mlx5e_htb_find_unused_qos_qid(htb);
+ if (qid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
+ return qid;
+ }
+
+ parent = mlx5e_htb_node_find(htb, parent_classid);
+ if (!parent)
+ return -EINVAL;
+
+ node = mlx5e_htb_node_create_leaf(htb, classid, qid, parent);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node->parent, &node->bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &node->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ mlx5e_htb_node_delete(htb, node);
+ return err;
+ }
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ return mlx5e_qid_from_qos(&priv->channels, node->qid);
+}
+
+int
+mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *child;
+ struct mlx5e_priv *priv = htb->priv;
+ int err, tmp_err;
+ u32 new_hw_id;
+ u16 qid;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
+ classid, child_classid, rate, ceil);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_inner_node(htb->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
+ qos_err(htb->mdev, "Failed to create an inner node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ /* Intentionally reuse the qid for the upcoming first child. */
+ child = mlx5e_htb_node_create_leaf(htb, child_classid, node->qid, node);
+ if (IS_ERR(child)) {
+ err = PTR_ERR(child);
+ goto err_destroy_hw_node;
+ }
+
+ child->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node, &child->bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &child->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, new_hw_id, child->bw_share,
+ child->max_average_bw, &child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ goto err_delete_sw_node;
+ }
+
+ /* No fail point. */
+
+ qid = node->qid;
+ /* Pairs with mlx5e_htb_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, child->qid, child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, child->qid, child->hw_id);
+ }
+ }
+
+ return 0;
+
+err_delete_sw_node:
+ child->qid = MLX5E_QOS_QID_INNER;
+ mlx5e_htb_node_delete(htb, child);
+
+err_destroy_hw_node:
+ tmp_err = mlx5_qos_destroy_node(htb->mdev, new_hw_id);
+ if (tmp_err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
+ new_hw_id, classid, tmp_err);
+ return err;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find_by_qid(struct mlx5e_htb *htb, u16 qid)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(htb->qos_tc2node, bkt, node, hnode)
+ if (node->qid == qid)
+ break;
+
+ return node;
+}
+
+int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *node;
+ struct netdev_queue *txq;
+ u16 qid, moved_qid;
+ bool opened;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
+
+ node = mlx5e_htb_node_find(htb, *classid);
+ if (!node)
+ return -ENOENT;
+
+ /* Store qid for reuse. */
+ qid = node->qid;
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ txq = netdev_get_tx_queue(htb->netdev,
+ mlx5e_qid_from_qos(&priv->channels, qid));
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, *classid, err);
+
+ mlx5e_htb_node_delete(htb, node);
+
+ moved_qid = mlx5e_htb_cur_leaf_nodes(htb);
+
+ if (moved_qid == 0) {
+ /* The last QoS SQ was just destroyed. */
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+ moved_qid--;
+
+ if (moved_qid < qid) {
+ /* The highest QoS SQ was just destroyed. */
+ WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
+ qid, moved_qid);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+
+ WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
+ qos_dbg(htb->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
+
+ node = mlx5e_htb_node_find_by_qid(htb, moved_qid);
+ WARN(!node, "Could not find a node with qid %u to move to queue %u",
+ moved_qid, qid);
+
+ /* Stop traffic to the old queue. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+ __clear_bit(moved_qid, priv->htb->qos_used_qids);
+
+ if (opened) {
+ txq = netdev_get_tx_queue(htb->netdev,
+ mlx5e_qid_from_qos(&priv->channels, moved_qid));
+ mlx5e_deactivate_qos_sq(priv, moved_qid);
+ mlx5e_close_qos_sq(priv, moved_qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(htb->netdev, moved_qid);
+
+ __set_bit(qid, htb->qos_used_qids);
+ WRITE_ONCE(node->qid, qid);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
+ node->classid, moved_qid, qid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ mlx5e_update_tx_netdev_queues(priv);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
+
+ *classid = node->classid;
+ return 0;
+}
+
+int
+mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ struct mlx5e_priv *priv = htb->priv;
+ u32 old_hw_id, new_hw_id;
+ int err, saved_err = 0;
+ u16 qid;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
+ force ? "_FORCE" : "", classid);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->parent->hw_id,
+ node->parent->bw_share,
+ node->parent->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ if (!force)
+ return err;
+ saved_err = err;
+ }
+
+ /* Store qid for reuse and prevent clearing the bit. */
+ qid = node->qid;
+ /* Pairs with mlx5e_htb_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(htb->netdev, qid);
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ parent = node->parent;
+ mlx5e_htb_node_delete(htb, node);
+
+ node = parent;
+ WRITE_ONCE(node->qid, qid);
+
+ /* Early return on error in force mode. Parent will still be an inner
+ * node to be deleted by a following delete operation.
+ */
+ if (saved_err)
+ return saved_err;
+
+ old_hw_id = node->hw_id;
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, old_hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ return 0;
+}
+
+static int
+mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *child;
+ int err = 0;
+ int bkt;
+
+ hash_for_each(htb->qos_tc2node, bkt, child, hnode) {
+ u32 old_bw_share = child->bw_share;
+ int err_one;
+
+ if (child->parent != node)
+ continue;
+
+ mlx5e_htb_convert_rate(htb, child->rate, node, &child->bw_share);
+ if (child->bw_share == old_bw_share)
+ continue;
+
+ err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
+ child->max_average_bw, child->hw_id);
+ if (!err && err_one) {
+ err = err_one;
+
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
+ qos_err(htb->mdev, "Failed to modify a child node (class %04x), err = %d\n",
+ node->classid, err);
+ }
+ }
+
+ return err;
+}
+
+int
+mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ u32 bw_share, max_average_bw;
+ struct mlx5e_qos_node *node;
+ bool ceil_changed = false;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
+ classid, rate, ceil);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
+
+ err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
+ max_average_bw, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+ qos_err(htb->mdev, "Failed to modify a node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ if (max_average_bw != node->max_average_bw)
+ ceil_changed = true;
+
+ node->bw_share = bw_share;
+ node->max_average_bw = max_average_bw;
+
+ if (ceil_changed)
+ err = mlx5e_htb_update_children(htb, node, extack);
+
+ return err;
+}
+
+struct mlx5e_htb *mlx5e_htb_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_htb), GFP_KERNEL);
+}
+
+void mlx5e_htb_free(struct mlx5e_htb *htb)
+{
+ kvfree(htb);
+}
+
+int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
+ struct net_device *netdev, struct mlx5_core_dev *mdev,
+ struct mlx5e_selq *selq, struct mlx5e_priv *priv)
+{
+ htb->mdev = mdev;
+ htb->netdev = netdev;
+ htb->selq = selq;
+ htb->priv = priv;
+ hash_init(htb->qos_tc2node);
+ return mlx5e_htb_root_add(htb, htb_qopt->parent_classid, htb_qopt->classid,
+ htb_qopt->extack);
+}
+
+void mlx5e_htb_cleanup(struct mlx5e_htb *htb)
+{
+ mlx5e_htb_root_del(htb);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h
new file mode 100644
index 000000000000..8386f1ea4559
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_EN_HTB_H_
+#define __MLX5E_EN_HTB_H_
+
+#include "qos.h"
+
+#define MLX5E_QOS_MAX_LEAF_NODES 256
+
+struct mlx5e_selq;
+struct mlx5e_htb;
+
+typedef int (*mlx5e_fp_htb_enumerate)(void *data, u16 qid, u32 hw_id);
+int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data);
+
+int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb);
+
+/* TX datapath API */
+int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid);
+
+/* HTB TC handlers */
+
+int
+mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+struct mlx5e_htb *mlx5e_htb_alloc(void);
+void mlx5e_htb_free(struct mlx5e_htb *htb);
+int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
+ struct net_device *netdev, struct mlx5_core_dev *mdev,
+ struct mlx5e_selq *selq, struct mlx5e_priv *priv);
+void mlx5e_htb_cleanup(struct mlx5e_htb *htb);
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3c1edfa33aa7..e025040350ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -790,8 +790,20 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
+
+ /* If XDP program is attached, XSK may be turned on at any time without
+ * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
+ * both regular RQ and XSK RQ.
+ * Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it
+ * doesn't affect its return value, as long as params->xdp_prog != NULL,
+ * so we can just multiply by 2.
+ */
+ if (params->xdp_prog)
+ wqebbs *= 2;
+
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
+
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 047f88f09203..903de88bab53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
+#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+
+static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+ return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
+}
+
+static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+ struct skb_shared_hwtstamps hwts = {};
+ struct sk_buff *skb;
+
+ ptpsq->cq_stats->resync_event++;
+
+ while (skb_cc != skb_id) {
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+ skb_tstamp_tx(skb, &hwts);
+ ptpsq->cq_stats->resync_cqe++;
+ skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ }
+}
+
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
int budget)
{
- struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+ u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+ struct sk_buff *skb;
ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
ptpsq->cq_stats->err_cqe++;
goto out;
}
+ if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
+ mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
+
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
+ struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
GFP_KERNEL, numa);
@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
ptpsq->skb_fifo.mask = wq_sz - 1;
-
+ if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
+ ptpsq->ts_cqe_ctr_mask =
+ (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0;
}
@@ -591,7 +624,7 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!ptp_fs->valid)
return;
@@ -608,7 +641,7 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
struct mlx5_flow_handle *rule;
int err;
@@ -775,13 +808,13 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
if (!ptp_fs)
return -ENOMEM;
- priv->fs.ptp_fs = ptp_fs;
+ priv->fs->ptp_fs = ptp_fs;
return 0;
}
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index a71a32e00ebb..92dbbec472ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -17,6 +17,7 @@ struct mlx5e_ptpsq {
u16 skb_fifo_pc;
struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats;
+ u16 ts_cqe_ctr_mask;
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 9db677e9ca9c..2842195ee548 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -2,11 +2,16 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
#include "en.h"
#include "params.h"
#include "../qos.h"
+#include "en/htb.h"
-#define BYTES_IN_MBIT 125000
+struct qos_sq_callback_params {
+ struct mlx5e_priv *priv;
+ struct mlx5e_channels *chs;
+};
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
{
@@ -28,124 +33,14 @@ int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
}
-int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
-{
- int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
-
- return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
-}
-
-/* Software representation of the QoS tree (internal to this file) */
-
-static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
-{
- int size = mlx5e_qos_max_leaf_nodes(priv->mdev);
- int res;
-
- WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
- res = find_first_zero_bit(priv->htb.qos_used_qids, size);
-
- return res == size ? -ENOSPC : res;
-}
-
-struct mlx5e_qos_node {
- struct hlist_node hnode;
- struct mlx5e_qos_node *parent;
- u64 rate;
- u32 bw_share;
- u32 max_average_bw;
- u32 hw_id;
- u32 classid; /* 16-bit, except root. */
- u16 qid;
-};
-
-#define MLX5E_QOS_QID_INNER 0xffff
-#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
-
-static struct mlx5e_qos_node *
-mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid,
- struct mlx5e_qos_node *parent)
-{
- struct mlx5e_qos_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->parent = parent;
-
- node->qid = qid;
- __set_bit(qid, priv->htb.qos_used_qids);
-
- node->classid = classid;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
-
- mlx5e_update_tx_netdev_queues(priv);
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv)
-{
- struct mlx5e_qos_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->qid = MLX5E_QOS_QID_INNER;
- node->classid = MLX5E_HTB_CLASSID_ROOT;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid)
-{
- struct mlx5e_qos_node *node = NULL;
-
- hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
- if (node->classid == classid)
- break;
- }
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid)
-{
- struct mlx5e_qos_node *node = NULL;
-
- hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
- if (node->classid == classid)
- break;
- }
-
- return node;
-}
-
-static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
-{
- hash_del_rcu(&node->hnode);
- if (node->qid != MLX5E_QOS_QID_INNER) {
- __clear_bit(node->qid, priv->htb.qos_used_qids);
- mlx5e_update_tx_netdev_queues(priv);
- }
- /* Make sure this qid is no longer selected by mlx5e_select_queue, so
- * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
- */
- synchronize_net();
- kfree(node);
-}
-
/* TX datapath API */
-static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
+u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
{
/* These channel params are safe to access from the datapath, because:
- * 1. This function is called only after checking priv->htb.maj_id != 0,
+ * 1. This function is called only after checking selq->htb_maj_id != 0,
* and the number of queues can't change while HTB offload is active.
- * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for
+ * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for
* mlx5e_select_queue to finish while holding priv->state_lock,
* preventing other code from changing the number of queues.
*/
@@ -154,30 +49,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
}
-int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
-{
- struct mlx5e_qos_node *node;
- u16 qid;
- int res;
-
- rcu_read_lock();
-
- node = mlx5e_sw_node_find_rcu(priv, classid);
- if (!node) {
- res = -ENOENT;
- goto out;
- }
- qid = READ_ONCE(node->qid);
- if (qid == MLX5E_QOS_QID_INNER) {
- res = -EINVAL;
- goto out;
- }
- res = mlx5e_qid_from_qos(&priv->channels, qid);
-
-out:
- rcu_read_unlock();
- return res;
-}
+/* SQ lifecycle */
static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
{
@@ -194,10 +66,8 @@ static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
return mlx5e_state_dereference(priv, qos_sqs[qid]);
}
-/* SQ lifecycle */
-
-static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
- struct mlx5e_qos_node *node)
+int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ u16 node_qid, u32 hw_id)
{
struct mlx5e_create_cq_param ccp = {};
struct mlx5e_txqsq __rcu **qos_sqs;
@@ -210,13 +80,13 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
params = &chs->params;
- txq_ix = mlx5e_qid_from_qos(chs, node->qid);
+ txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node->qid > priv->htb.max_qos_sqs);
- if (node->qid == priv->htb.max_qos_sqs) {
+ WARN_ON(node_qid > priv->htb_max_qos_sqs);
+ if (node_qid == priv->htb_max_qos_sqs) {
struct mlx5e_sq_stats *stats, **stats_list = NULL;
- if (priv->htb.max_qos_sqs == 0) {
+ if (priv->htb_max_qos_sqs == 0) {
stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
sizeof(*stats_list),
GFP_KERNEL);
@@ -229,16 +99,16 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
return -ENOMEM;
}
if (stats_list)
- WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
- WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
- /* Order max_qos_sqs increment after writing the array pointer.
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
+ /* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
*/
- smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
+ smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
}
- ix = node->qid % params->num_channels;
- qid = node->qid / params->num_channels;
+ ix = node_qid % params->num_channels;
+ qid = node_qid / params->num_channels;
c = chs->c[ix];
qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
@@ -257,8 +127,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err)
goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, node->hw_id,
- priv->htb.qos_sq_stats[node->qid]);
+ &param_sq, sq, 0, hw_id,
+ priv->htb_qos_sq_stats[node_qid]);
if (err)
goto err_close_cq;
@@ -273,14 +143,22 @@ err_free_sq:
return err;
}
-static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+static int mlx5e_open_qos_sq_cb_wrapper(void *data, u16 node_qid, u32 hw_id)
+{
+ struct qos_sq_callback_params *cb_params = data;
+
+ return mlx5e_open_qos_sq(cb_params->priv, cb_params->chs, node_qid, hw_id);
+}
+
+int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
{
+ struct mlx5e_priv *priv = data;
struct mlx5e_txqsq *sq;
u16 qid;
- sq = mlx5e_get_qos_sq(priv, node->qid);
+ sq = mlx5e_get_qos_sq(priv, node_qid);
- qid = mlx5e_qid_from_qos(&priv->channels, node->qid);
+ qid = mlx5e_qid_from_qos(&priv->channels, node_qid);
/* If it's a new queue, it will be marked as started at this point.
* Stop it before updating txq2sq.
@@ -295,11 +173,13 @@ static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node
*/
smp_wmb();
- qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid);
+ qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid);
mlx5e_activate_txqsq(sq);
+
+ return 0;
}
-static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
+void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq *sq;
@@ -319,7 +199,7 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
smp_wmb();
}
-static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
+void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq __rcu **qos_sqs;
struct mlx5e_params *params;
@@ -369,7 +249,7 @@ void mlx5e_qos_close_queues(struct mlx5e_channel *c)
kvfree(qos_sqs);
}
-static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
+void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
{
int i;
@@ -377,7 +257,7 @@ static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_close_queues(chs->c[i]);
}
-static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
u16 qos_sqs_size;
int i;
@@ -413,24 +293,20 @@ err_free:
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
- struct mlx5e_qos_node *node = NULL;
- int bkt, err;
-
- if (!priv->htb.maj_id)
- return 0;
+ struct qos_sq_callback_params callback_params;
+ int err;
err = mlx5e_qos_alloc_queues(priv, chs);
if (err)
return err;
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
- if (node->qid == MLX5E_QOS_QID_INNER)
- continue;
- err = mlx5e_open_qos_sq(priv, chs, node);
- if (err) {
- mlx5e_qos_close_all_queues(chs);
- return err;
- }
+ callback_params.priv = priv;
+ callback_params.chs = chs;
+
+ err = mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_open_qos_sq_cb_wrapper, &callback_params);
+ if (err) {
+ mlx5e_qos_close_all_queues(chs);
+ return err;
}
return 0;
@@ -438,14 +314,7 @@ int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
{
- struct mlx5e_qos_node *node = NULL;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
- if (node->qid == MLX5E_QOS_QID_INNER)
- continue;
- mlx5e_activate_qos_sq(priv, node);
- }
+ mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_activate_qos_sq, priv);
}
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
@@ -474,7 +343,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
}
}
-static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
+void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
{
int i;
@@ -482,293 +351,14 @@ static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_deactivate_queues(chs->c[i]);
}
-/* HTB API */
-
-int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *root;
- bool opened;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
-
- if (!mlx5_qos_is_supported(priv->mdev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
- return -EOPNOTSUPP;
- }
-
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (opened) {
- mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true);
-
- err = mlx5e_qos_alloc_queues(priv, &priv->channels);
- if (err)
- goto err_cancel_selq;
- }
-
- root = mlx5e_sw_node_create_root(priv);
- if (IS_ERR(root)) {
- err = PTR_ERR(root);
- goto err_free_queues;
- }
-
- err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
- goto err_sw_node_delete;
- }
-
- WRITE_ONCE(priv->htb.defcls, htb_defcls);
- /* Order maj_id after defcls - pairs with
- * mlx5e_select_queue/mlx5e_select_htb_queues.
- */
- smp_store_release(&priv->htb.maj_id, htb_maj_id);
-
- if (opened)
- mlx5e_selq_apply(&priv->selq);
-
- return 0;
-
-err_sw_node_delete:
- mlx5e_sw_node_delete(priv, root);
-
-err_free_queues:
- if (opened)
- mlx5e_qos_close_all_queues(&priv->channels);
-err_cancel_selq:
- mlx5e_selq_cancel(&priv->selq);
- return err;
-}
-
-int mlx5e_htb_root_del(struct mlx5e_priv *priv)
-{
- struct mlx5e_qos_node *root;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
-
- /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
- * so that we can safely switch to its non-HTB non-PTP fastpath.
- */
- synchronize_net();
-
- mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false);
- mlx5e_selq_apply(&priv->selq);
-
- WRITE_ONCE(priv->htb.maj_id, 0);
-
- root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
- if (!root) {
- qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
- return -ENOENT;
- }
- err = mlx5_qos_destroy_node(priv->mdev, root->hw_id);
- if (err)
- qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n",
- root->hw_id, err);
- mlx5e_sw_node_delete(priv, root);
-
- mlx5e_qos_deactivate_all_queues(&priv->channels);
- mlx5e_qos_close_all_queues(&priv->channels);
-
- return err;
-}
-
-static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
- struct mlx5e_qos_node *parent, u32 *bw_share)
-{
- u64 share = 0;
-
- while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
- parent = parent->parent;
-
- if (parent->max_average_bw)
- share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
- parent->max_average_bw);
- else
- share = 101;
-
- *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
-
- qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
- rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
-
- return 0;
-}
-
-static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
-{
- /* Hardware treats 0 as "unlimited", set at least 1. */
- *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
-
- qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
- ceil, *max_average_bw);
-}
-
-int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
- u32 parent_classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *parent;
- int qid;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
- classid, parent_classid, rate, ceil);
-
- qid = mlx5e_find_unused_qos_qid(priv);
- if (qid < 0) {
- NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
- return qid;
- }
-
- parent = mlx5e_sw_node_find(priv, parent_classid);
- if (!parent)
- return -EINVAL;
-
- node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent);
- if (IS_ERR(node))
- return PTR_ERR(node);
-
- node->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw);
-
- err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id,
- node->bw_share, node->max_average_bw,
- &node->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- mlx5e_sw_node_delete(priv, node);
- return err;
- }
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
- }
- }
-
- return mlx5e_qid_from_qos(&priv->channels, node->qid);
-}
-
-int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
- u64 rate, u64 ceil, struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *child;
- int err, tmp_err;
- u32 new_hw_id;
- u16 qid;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
- classid, child_classid, rate, ceil);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id,
- node->bw_share, node->max_average_bw,
- &new_hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
- qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n",
- classid, err);
- return err;
- }
-
- /* Intentionally reuse the qid for the upcoming first child. */
- child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node);
- if (IS_ERR(child)) {
- err = PTR_ERR(child);
- goto err_destroy_hw_node;
- }
-
- child->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw);
-
- err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share,
- child->max_average_bw, &child->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- goto err_delete_sw_node;
- }
-
- /* No fail point. */
-
- qid = node->qid;
- /* Pairs with mlx5e_get_txq_by_classid. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- node->hw_id = new_hw_id;
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, child);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, child);
- }
- }
-
- return 0;
-
-err_delete_sw_node:
- child->qid = MLX5E_QOS_QID_INNER;
- mlx5e_sw_node_delete(priv, child);
-
-err_destroy_hw_node:
- tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id);
- if (tmp_err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
- new_hw_id, classid, tmp_err);
- return err;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid)
-{
- struct mlx5e_qos_node *node = NULL;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
- if (node->qid == qid)
- break;
-
- return node;
-}
-
-static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
+void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
{
qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
netdev_tx_reset_queue(txq);
netif_tx_start_queue(txq);
}
-static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
+void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
{
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
@@ -781,251 +371,65 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
spin_unlock_bh(qdisc_lock(qdisc));
}
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
- struct netlink_ext_ack *extack)
+int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_qopt)
{
- struct mlx5e_qos_node *node;
- struct netdev_queue *txq;
- u16 qid, moved_qid;
- bool opened;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
-
- node = mlx5e_sw_node_find(priv, *classid);
- if (!node)
- return -ENOENT;
-
- /* Store qid for reuse. */
- qid = node->qid;
-
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (opened) {
- txq = netdev_get_tx_queue(priv->netdev,
- mlx5e_qid_from_qos(&priv->channels, qid));
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, *classid, err);
-
- mlx5e_sw_node_delete(priv, node);
-
- moved_qid = mlx5e_qos_cur_leaf_nodes(priv);
-
- if (moved_qid == 0) {
- /* The last QoS SQ was just destroyed. */
- if (opened)
- mlx5e_reactivate_qos_sq(priv, qid, txq);
- return 0;
- }
- moved_qid--;
-
- if (moved_qid < qid) {
- /* The highest QoS SQ was just destroyed. */
- WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
- qid, moved_qid);
- if (opened)
- mlx5e_reactivate_qos_sq(priv, qid, txq);
- return 0;
- }
-
- WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
- qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
-
- node = mlx5e_sw_node_find_by_qid(priv, moved_qid);
- WARN(!node, "Could not find a node with qid %u to move to queue %u",
- moved_qid, qid);
-
- /* Stop traffic to the old queue. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
- __clear_bit(moved_qid, priv->htb.qos_used_qids);
-
- if (opened) {
- txq = netdev_get_tx_queue(priv->netdev,
- mlx5e_qid_from_qos(&priv->channels, moved_qid));
- mlx5e_deactivate_qos_sq(priv, moved_qid);
- mlx5e_close_qos_sq(priv, moved_qid);
- }
-
- /* Prevent packets from the old class from getting into the new one. */
- mlx5e_reset_qdisc(priv->netdev, moved_qid);
-
- __set_bit(qid, priv->htb.qos_used_qids);
- WRITE_ONCE(node->qid, qid);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
- node->classid, moved_qid, qid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
- }
- }
-
- mlx5e_update_tx_netdev_queues(priv);
- if (opened)
- mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
-
- *classid = node->classid;
- return 0;
-}
-
-int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *parent;
- u32 old_hw_id, new_hw_id;
- int err, saved_err = 0;
- u16 qid;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
- force ? "_FORCE" : "", classid);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id,
- node->parent->bw_share,
- node->parent->max_average_bw,
- &new_hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- if (!force)
- return err;
- saved_err = err;
- }
-
- /* Store qid for reuse and prevent clearing the bit. */
- qid = node->qid;
- /* Pairs with mlx5e_get_txq_by_classid. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- /* Prevent packets from the old class from getting into the new one. */
- mlx5e_reset_qdisc(priv->netdev, qid);
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- parent = node->parent;
- mlx5e_sw_node_delete(priv, node);
+ struct mlx5e_htb *htb = priv->htb;
+ int res;
- node = parent;
- WRITE_ONCE(node->qid, qid);
+ if (!htb && htb_qopt->command != TC_HTB_CREATE)
+ return -EINVAL;
- /* Early return on error in force mode. Parent will still be an inner
- * node to be deleted by a following delete operation.
- */
- if (saved_err)
- return saved_err;
-
- old_hw_id = node->hw_id;
- node->hw_id = new_hw_id;
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
+ switch (htb_qopt->command) {
+ case TC_HTB_CREATE:
+ if (!mlx5_qos_is_supported(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(htb_qopt->extack,
+ "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
}
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, old_hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- return 0;
-}
-
-static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *child;
- int err = 0;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
- u32 old_bw_share = child->bw_share;
- int err_one;
-
- if (child->parent != node)
- continue;
-
- mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share);
- if (child->bw_share == old_bw_share)
- continue;
-
- err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share,
- child->max_average_bw, child->hw_id);
- if (!err && err_one) {
- err = err_one;
-
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
- qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n",
- node->classid, err);
+ priv->htb = mlx5e_htb_alloc();
+ htb = priv->htb;
+ if (!htb)
+ return -ENOMEM;
+ res = mlx5e_htb_init(htb, htb_qopt, priv->netdev, priv->mdev, &priv->selq, priv);
+ if (res) {
+ mlx5e_htb_free(htb);
+ priv->htb = NULL;
}
+ return res;
+ case TC_HTB_DESTROY:
+ mlx5e_htb_cleanup(htb);
+ mlx5e_htb_free(htb);
+ priv->htb = NULL;
+ return 0;
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ res = mlx5e_htb_leaf_alloc_queue(htb, htb_qopt->classid, htb_qopt->parent_classid,
+ htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
+ if (res < 0)
+ return res;
+ htb_qopt->qid = res;
+ return 0;
+ case TC_HTB_LEAF_TO_INNER:
+ return mlx5e_htb_leaf_to_inner(htb, htb_qopt->parent_classid, htb_qopt->classid,
+ htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
+ case TC_HTB_LEAF_DEL:
+ return mlx5e_htb_leaf_del(htb, &htb_qopt->classid, htb_qopt->extack);
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return mlx5e_htb_leaf_del_last(htb, htb_qopt->classid,
+ htb_qopt->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+ htb_qopt->extack);
+ case TC_HTB_NODE_MODIFY:
+ return mlx5e_htb_node_modify(htb, htb_qopt->classid, htb_qopt->rate, htb_qopt->ceil,
+ htb_qopt->extack);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ res = mlx5e_htb_get_txq_by_classid(htb, htb_qopt->classid);
+ if (res < 0)
+ return res;
+ htb_qopt->qid = res;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
}
-
- return err;
-}
-
-int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack)
-{
- u32 bw_share, max_average_bw;
- struct mlx5e_qos_node *node;
- bool ceil_changed = false;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
- classid, rate, ceil);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- node->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw);
-
- err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share,
- max_average_bw, node->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
- qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n",
- classid, err);
- return err;
- }
-
- if (max_average_bw != node->max_average_bw)
- ceil_changed = true;
-
- node->bw_share = bw_share;
- node->max_average_bw = max_average_bw;
-
- if (ceil_changed)
- err = mlx5e_qos_update_children(priv, node, extack);
-
- return err;
}
struct mlx5e_mqprio_rl {
@@ -1111,3 +515,4 @@ int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_i
*hw_id = rl->leaves_id[tc];
return 0;
}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index 5d9bd91d86c2..4947afa23b73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -6,40 +6,39 @@
#include <linux/mlx5/driver.h>
-#define MLX5E_QOS_MAX_LEAF_NODES 256
+#define BYTES_IN_MBIT 125000
struct mlx5e_priv;
+struct mlx5e_htb;
struct mlx5e_channels;
struct mlx5e_channel;
+struct tc_htb_qopt_offload;
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
-int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
-
-/* TX datapath API */
-int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
/* SQ lifecycle */
+int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ u16 node_qid, u32 hw_id);
+int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id);
+void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid);
+void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid);
+void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
+void mlx5e_reset_qdisc(struct net_device *dev, u16 qid);
+
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv);
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c);
+void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs);
void mlx5e_qos_close_queues(struct mlx5e_channel *c);
+void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs);
+int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
+
+/* TX datapath API */
+u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
/* HTB API */
-int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_root_del(struct mlx5e_priv *priv);
-int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
- u32 parent_classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
- u64 rate, u64 ceil, struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack);
+int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb);
/* MQPRIO TX rate limit */
struct mlx5e_mqprio_rl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 48dc121b2cb4..39ef2a2561a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -269,6 +269,12 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
attr->u.vlan_filtering, br_offloads);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
+ err = mlx5_esw_bridge_vlan_proto_set(vport_num,
+ esw_owner_vhca_id,
+ attr->u.vlan_protocol,
+ br_offloads);
+ break;
default:
err = -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 86fa0bdbee36..fac7e3ff2674 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -21,6 +21,7 @@
#include "en/tc/sample.h"
#include "en_accel/ipsec_rxtx.h"
#include "en/tc/int_port.h"
+#include "en/tc/act/act.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
@@ -511,6 +512,120 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
return 0;
}
+static int
+mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct flow_action_entry *action;
+ struct mlx5e_tc_act *act;
+ bool add = false;
+ int i;
+
+ /* There is no use case currently for more than one action (e.g. pedit).
+ * when there will be, need to handle cleaning multiple actions on err.
+ */
+ if (!flow_offload_has_one_action(&fl_act->action))
+ return -EOPNOTSUPP;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ flow_action_for_each(i, action, &fl_act->action) {
+ act = mlx5e_tc_act_get(action->id, ns_type);
+ if (!act)
+ continue;
+
+ if (!act->offload_action)
+ continue;
+
+ if (!act->offload_action(priv, fl_act, action))
+ add = true;
+ }
+
+ return add ? 0 : -EOPNOTSUPP;
+}
+
+static int
+mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->destroy_action)
+ return -EOPNOTSUPP;
+
+ return act->destroy_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->stats_action)
+ return -EOPNOTSUPP;
+
+ return act->stats_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ switch (fl_act->command) {
+ case FLOW_ACT_REPLACE:
+ return mlx5e_rep_indr_replace_act(rpriv, fl_act);
+ case FLOW_ACT_DESTROY:
+ return mlx5e_rep_indr_destroy_act(rpriv, fl_act);
+ case FLOW_ACT_STATS:
+ return mlx5e_rep_indr_stats_act(rpriv, fl_act);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv,
+ enum tc_setup_type type,
+ void *data)
+{
+ if (!data)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_ACT:
+ return mlx5e_rep_indr_setup_act(rpriv, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
@@ -518,7 +633,7 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
- return -EOPNOTSUPP;
+ return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data);
switch (type) {
case TC_SETUP_BLOCK:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
index d98a277eb7f8..f675b1926340 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -7,6 +7,7 @@
#include <linux/rcupdate.h>
#include "en.h"
#include "en/ptp.h"
+#include "en/htb.h"
struct mlx5e_selq_params {
unsigned int num_regular_queues;
@@ -19,6 +20,8 @@ struct mlx5e_selq_params {
bool is_ptp : 1;
};
};
+ u16 htb_maj_id;
+ u16 htb_defcls;
};
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
@@ -44,6 +47,8 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
.num_tcs = 1,
.is_htb = false,
.is_ptp = false,
+ .htb_maj_id = 0,
+ .htb_defcls = 0,
};
rcu_assign_pointer(selq->active, init_params);
@@ -64,21 +69,50 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
selq->standby = NULL;
}
-void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
{
+ struct mlx5e_selq_params *selq_active;
+
lockdep_assert_held(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
selq->is_prepared = true;
+ selq_active = rcu_dereference_protected(selq->active,
+ lockdep_is_held(selq->state_lock));
+ *selq->standby = *selq_active;
selq->standby->num_channels = params->num_channels;
selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
selq->standby->num_regular_queues =
selq->standby->num_channels * selq->standby->num_tcs;
- selq->standby->is_htb = htb;
selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
}
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq)
+{
+ struct mlx5e_selq_params *selq_active =
+ rcu_dereference_protected(selq->active, lockdep_is_held(selq->state_lock));
+
+ return selq_active->htb_maj_id;
+}
+
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls)
+{
+ struct mlx5e_selq_params *selq_active;
+
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ selq->is_prepared = true;
+
+ selq_active = rcu_dereference_protected(selq->active,
+ lockdep_is_held(selq->state_lock));
+ *selq->standby = *selq_active;
+ selq->standby->is_htb = htb_maj_id;
+ selq->standby->htb_maj_id = htb_maj_id;
+ selq->standby->htb_defcls = htb_defcls;
+}
+
void mlx5e_selq_apply(struct mlx5e_selq *selq)
{
struct mlx5e_selq_params *old_params;
@@ -137,20 +171,21 @@ static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
return selq->num_regular_queues + up;
}
-static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb)
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_selq_params *selq)
{
u16 classid;
/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
- if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id))
+ if ((TC_H_MAJ(skb->priority) >> 16) == selq->htb_maj_id)
classid = TC_H_MIN(skb->priority);
else
- classid = READ_ONCE(priv->htb.defcls);
+ classid = selq->htb_defcls;
if (!classid)
return 0;
- return mlx5e_get_txq_by_classid(priv, classid);
+ return mlx5e_htb_get_txq_by_classid(priv->htb, classid);
}
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -187,10 +222,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
up * selq->num_channels;
}
- if (unlikely(selq->is_htb)) {
+ if (unlikely(selq->htb_maj_id)) {
/* num_tcs == 1, shortcut for PTP */
- txq_ix = mlx5e_select_htb_queue(priv, skb);
+ txq_ix = mlx5e_select_htb_queue(priv, skb, selq);
if (txq_ix > 0)
return txq_ix;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
index 6c070141d8f1..fd590f80e4d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
@@ -21,7 +21,9 @@ struct sk_buff;
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
-void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb);
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params);
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls);
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq);
void mlx5e_selq_apply(struct mlx5e_selq *selq);
void mlx5e_selq_cancel(struct mlx5e_selq *selq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index 2755c25ba324..305fde62a78d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -30,7 +30,7 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
NULL, /* FLOW_ACTION_WAKE, */
NULL, /* FLOW_ACTION_QUEUE, */
&mlx5e_tc_act_sample,
- NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_police,
&mlx5e_tc_act_ct,
NULL, /* FLOW_ACTION_CT_METADATA, */
&mlx5e_tc_act_mpls_push,
@@ -106,8 +106,8 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
{
memset(parse_state, 0, sizeof(*parse_state));
parse_state->flow = flow;
- parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
+ parse_state->flow_action = flow_action;
}
void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index f34714c5ddd4..e1570ff056ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -13,7 +13,7 @@
struct mlx5_flow_attr;
struct mlx5e_tc_act_parse_state {
- unsigned int num_actions;
+ struct flow_action *flow_action;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
u32 actions;
@@ -50,6 +50,16 @@ struct mlx5e_tc_act {
bool (*is_multi_table_act)(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr);
+
+ int (*offload_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act);
+
+ int (*destroy_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
+
+ int (*stats_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
};
struct mlx5e_tc_flow_action {
@@ -76,6 +86,7 @@ extern struct mlx5e_tc_act mlx5e_tc_act_ct;
extern struct mlx5e_tc_act mlx5e_tc_act_sample;
extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
+extern struct mlx5e_tc_act mlx5e_tc_act_police;
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index 4726bcb46eec..69949ab830b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -21,7 +21,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
u32 max_chain;
esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
new file mode 100644
index 000000000000..37522352e4b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return false;
+ }
+ if (mlx5e_policer_validate(parse_state->flow_action, act,
+ parse_state->extack))
+ return false;
+
+ return !!mlx5e_get_flow_meters(parse_state->flow->priv->mdev);
+}
+
+static int
+fill_meter_params_from_act(const struct flow_action_entry *act,
+ struct mlx5e_flow_meter_params *params)
+{
+ params->index = act->hw_index;
+ if (act->police.rate_bytes_ps) {
+ params->mode = MLX5_RATE_LIMIT_BPS;
+ /* change rate to bits per second */
+ params->rate = act->police.rate_bytes_ps << 3;
+ params->burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps) {
+ params->mode = MLX5_RATE_LIMIT_PPS;
+ params->rate = act->police.rate_pkt_ps;
+ params->burst = act->police.burst_pkt;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ int err;
+
+ err = fill_meter_params_from_act(act, &attr->meter_attr.params);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
+ attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
+
+ return 0;
+}
+
+static bool
+tc_act_is_multi_table_act_police(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ return true;
+}
+
+static int
+tc_act_police_offload(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ int err = 0;
+
+ err = fill_meter_params_from_act(act, &params);
+ if (err)
+ return err;
+
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter) && PTR_ERR(meter) == -ENOENT) {
+ meter = mlx5e_tc_meter_replace(priv->mdev, &params);
+ } else if (!IS_ERR(meter)) {
+ err = mlx5e_tc_meter_update(meter, &params);
+ mlx5e_tc_meter_put(meter);
+ }
+
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ err = PTR_ERR(meter);
+ }
+
+ return err;
+}
+
+static int
+tc_act_police_destroy(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+ /* first put for the get and second for cleanup */
+ mlx5e_tc_meter_put(meter);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
+static int
+tc_act_police_stats(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ u64 bytes, packets, drops, lastuse;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+
+ mlx5e_tc_meter_get_stats(meter, &bytes, &packets, &drops, &lastuse);
+ flow_stats_update(&fl_act->stats, bytes, packets, drops, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_police = {
+ .can_offload = tc_act_can_offload_police,
+ .parse_action = tc_act_parse_police,
+ .is_multi_table_act = tc_act_is_multi_table_act_police,
+ .offload_action = tc_act_police_offload,
+ .destroy_action = tc_act_police_destroy,
+ .stats_action = tc_act_police_stats,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index a7d9eab19e4a..53b270f652b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -12,7 +12,7 @@ tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
{
struct netlink_ext_ack *extack = parse_state->extack;
- if (parse_state->num_actions != 1) {
+ if (parse_state->flow_action->num_entries != 1) {
NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
new file mode 100644
index 000000000000..a53e205f4a89
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/math64.h>
+#include "lib/aso.h"
+#include "en/tc/post_act.h"
+#include "meter.h"
+#include "en/tc_priv.h"
+
+#define MLX5_START_COLOR_SHIFT 28
+#define MLX5_METER_MODE_SHIFT 24
+#define MLX5_CBS_EXP_SHIFT 24
+#define MLX5_CBS_MAN_SHIFT 16
+#define MLX5_CIR_EXP_SHIFT 8
+
+/* cir = 8*(10^9)*cir_mantissa/(2^cir_exponent)) bits/s */
+#define MLX5_CONST_CIR 8000000000ULL
+#define MLX5_CALC_CIR(m, e) ((MLX5_CONST_CIR * (m)) >> (e))
+#define MLX5_MAX_CIR ((MLX5_CONST_CIR * 0x100) - 1)
+
+/* cbs = cbs_mantissa*2^cbs_exponent */
+#define MLX5_CALC_CBS(m, e) ((m) << (e))
+#define MLX5_MAX_CBS ((0x100ULL << 0x1F) - 1)
+#define MLX5_MAX_HW_CBS 0x7FFFFFFF
+
+struct mlx5e_flow_meter_aso_obj {
+ struct list_head entry;
+ int base_id;
+ int total_meters;
+
+ unsigned long meters_map[0]; /* must be at the end of this struct */
+};
+
+struct mlx5e_flow_meters {
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_aso *aso;
+ struct mutex aso_lock; /* Protects aso operations */
+ int log_granularity;
+ u32 pdn;
+
+ DECLARE_HASHTABLE(hashtbl, 8);
+
+ struct mutex sync_lock; /* protect flow meter operations */
+ struct list_head partial_list;
+ struct list_head full_list;
+
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_post_act *post_act;
+};
+
+static void
+mlx5e_flow_meter_cir_calc(u64 cir, u8 *man, u8 *exp)
+{
+ s64 _cir, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cir << e;
+ if ((s64)m < 0) /* overflow */
+ break;
+ m = div64_u64(m, MLX5_CONST_CIR);
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cir = MLX5_CALC_CIR(m, e);
+ _delta = cir - _cir;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+static void
+mlx5e_flow_meter_cbs_calc(u64 cbs, u8 *man, u8 *exp)
+{
+ s64 _cbs, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cbs >> e;
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cbs = MLX5_CALC_CBS(m, e);
+ _delta = cbs - _cbs;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params)
+{
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl;
+ struct mlx5_wqe_aso_data_seg *aso_data;
+ struct mlx5e_flow_meters *flow_meters;
+ u8 cir_man, cir_exp, cbs_man, cbs_exp;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *aso;
+ u64 rate, burst;
+ u8 ds_cnt;
+ int err;
+
+ rate = meter_params->rate;
+ burst = meter_params->burst;
+
+ /* HW treats each packet as 128 bytes in PPS mode */
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS) {
+ rate <<= 10;
+ burst <<= 7;
+ }
+
+ if (!rate || rate > MLX5_MAX_CIR || !burst || burst > MLX5_MAX_CBS)
+ return -EINVAL;
+
+ /* HW has limitation of total 31 bits for cbs */
+ if (burst > MLX5_MAX_HW_CBS) {
+ mlx5_core_warn(mdev,
+ "burst(%lld) is too large, use HW allowed value(%d)\n",
+ burst, MLX5_MAX_HW_CBS);
+ burst = MLX5_MAX_HW_CBS;
+ }
+
+ mlx5_core_dbg(mdev, "meter mode=%d\n", meter_params->mode);
+ mlx5e_flow_meter_cir_calc(rate, &cir_man, &cir_exp);
+ mlx5_core_dbg(mdev, "rate=%lld, cir=%lld, exp=%d, man=%d\n",
+ rate, MLX5_CALC_CIR(cir_man, cir_exp), cir_exp, cir_man);
+ mlx5e_flow_meter_cbs_calc(burst, &cbs_man, &cbs_exp);
+ mlx5_core_dbg(mdev, "burst=%lld, cbs=%lld, exp=%d, man=%d\n",
+ burst, MLX5_CALC_CBS((u64)cbs_man, cbs_exp), cbs_exp, cbs_man);
+
+ if (!cir_man || !cbs_man)
+ return -EINVAL;
+
+ flow_meters = meter->flow_meters;
+ aso = flow_meters->aso;
+
+ mutex_lock(&flow_meters->aso_lock);
+ aso_wqe = mlx5_aso_get_wqe(aso);
+ ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_DS);
+ mlx5_aso_build_wqe(aso, ds_cnt, aso_wqe, meter->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
+
+ aso_ctrl = &aso_wqe->aso_ctrl;
+ memset(aso_ctrl, 0, sizeof(*aso_ctrl));
+ aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
+ aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
+ MLX5_ASO_ALWAYS_TRUE << 4;
+ aso_ctrl->data_offset_condition_operand = MLX5_ASO_LOGICAL_OR << 6;
+ aso_ctrl->data_mask = cpu_to_be64(0x80FFFFFFULL << (meter->idx ? 0 : 32));
+
+ aso_data = (struct mlx5_wqe_aso_data_seg *)(aso_wqe + 1);
+ memset(aso_data, 0, sizeof(*aso_data));
+ aso_data->bytewise_data[meter->idx * 8] = cpu_to_be32((0x1 << 31) | /* valid */
+ (MLX5_FLOW_METER_COLOR_GREEN << MLX5_START_COLOR_SHIFT));
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS)
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_NUM_PACKETS << MLX5_METER_MODE_SHIFT);
+ else
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH << MLX5_METER_MODE_SHIFT);
+
+ aso_data->bytewise_data[meter->idx * 8 + 2] = cpu_to_be32((cbs_exp << MLX5_CBS_EXP_SHIFT) |
+ (cbs_man << MLX5_CBS_MAN_SHIFT) |
+ (cir_exp << MLX5_CIR_EXP_SHIFT) |
+ cir_man);
+
+ mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl);
+
+ /* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */
+ err = mlx5_aso_poll_cq(aso, true, 10);
+ mutex_unlock(&flow_meters->aso_lock);
+
+ return err;
+}
+
+static int
+mlx5e_flow_meter_create_aso_obj(struct mlx5e_flow_meters *flow_meters, int *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_meter_aso_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ void *obj;
+ int err;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, log_obj_range, flow_meters->log_granularity);
+
+ obj = MLX5_ADDR_OF(create_flow_meter_aso_obj_in, in, flow_meter_aso_obj);
+ MLX5_SET(flow_meter_aso_obj, obj, meter_aso_access_pd, flow_meters->pdn);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err) {
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) created\n", *obj_id);
+ }
+
+ return err;
+}
+
+static void
+mlx5e_flow_meter_destroy_aso_obj(struct mlx5_core_dev *mdev, u32 obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) destroyed\n", obj_id);
+}
+
+static struct mlx5e_flow_meter_handle *
+__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
+{
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ struct mlx5e_flow_meter_handle *meter;
+ struct mlx5_fc *counter;
+ int err, pos, total;
+ u32 id;
+
+ meter = kzalloc(sizeof(*meter), GFP_KERNEL);
+ if (!meter)
+ return ERR_PTR(-ENOMEM);
+
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_red_counter;
+ }
+ meter->red_counter = counter;
+
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_green_counter;
+ }
+ meter->green_counter = counter;
+
+ meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
+ struct mlx5e_flow_meter_aso_obj,
+ entry);
+ /* 2 meters in one object */
+ total = 1 << (flow_meters->log_granularity + 1);
+ if (!meters_obj) {
+ err = mlx5e_flow_meter_create_aso_obj(flow_meters, &id);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create flow meter ASO object\n");
+ goto err_create;
+ }
+
+ meters_obj = kzalloc(sizeof(*meters_obj) + BITS_TO_BYTES(total),
+ GFP_KERNEL);
+ if (!meters_obj) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ meters_obj->base_id = id;
+ meters_obj->total_meters = total;
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ pos = 0;
+ } else {
+ pos = find_first_zero_bit(meters_obj->meters_map, total);
+ if (bitmap_weight(meters_obj->meters_map, total) == total - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->full_list);
+ }
+ }
+
+ bitmap_set(meters_obj->meters_map, pos, 1);
+ meter->flow_meters = flow_meters;
+ meter->meters_obj = meters_obj;
+ meter->obj_id = meters_obj->base_id + pos / 2;
+ meter->idx = pos % 2;
+
+ mlx5_core_dbg(mdev, "flow meter allocated, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+
+ return meter;
+
+err_mem:
+ mlx5e_flow_meter_destroy_aso_obj(mdev, id);
+err_create:
+ mlx5_fc_destroy(mdev, meter->green_counter);
+err_green_counter:
+ mlx5_fc_destroy(mdev, meter->red_counter);
+err_red_counter:
+ kfree(meter);
+ return ERR_PTR(err);
+}
+
+static void
+__mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ int n, pos;
+
+ mlx5_fc_destroy(mdev, meter->green_counter);
+ mlx5_fc_destroy(mdev, meter->red_counter);
+
+ meters_obj = meter->meters_obj;
+ pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
+ bitmap_clear(meters_obj->meters_map, pos, 1);
+ n = bitmap_weight(meters_obj->meters_map, meters_obj->total_meters);
+ if (n == 0) {
+ list_del(&meters_obj->entry);
+ mlx5e_flow_meter_destroy_aso_obj(mdev, meters_obj->base_id);
+ kfree(meters_obj);
+ } else if (n == meters_obj->total_meters - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ }
+
+ mlx5_core_dbg(mdev, "flow meter freed, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+ kfree(meter);
+}
+
+static struct mlx5e_flow_meter_handle *
+__mlx5e_tc_meter_get(struct mlx5e_flow_meters *flow_meters, u32 index)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ hash_for_each_possible(flow_meters->hashtbl, meter, hlist, index)
+ if (meter->params.index == index)
+ goto add_ref;
+
+ return ERR_PTR(-ENOENT);
+
+add_ref:
+ meter->refcnt++;
+
+ return meter;
+}
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&flow_meters->sync_lock);
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ mutex_unlock(&flow_meters->sync_lock);
+
+ return meter;
+}
+
+static void
+__mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ if (--meter->refcnt == 0) {
+ hash_del(&meter->hlist);
+ __mlx5e_flow_meter_free(meter);
+ }
+}
+
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+
+ mutex_lock(&flow_meters->sync_lock);
+ __mlx5e_tc_meter_put(meter);
+ mutex_unlock(&flow_meters->sync_lock);
+}
+
+static struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = __mlx5e_flow_meter_alloc(flow_meters);
+ if (IS_ERR(meter))
+ return meter;
+
+ hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
+ meter->params.index = params->index;
+ meter->refcnt++;
+
+ return meter;
+}
+
+static int
+__mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ int err = 0;
+
+ if (meter->params.mode != params->mode || meter->params.rate != params->rate ||
+ meter->params.burst != params->burst) {
+ err = mlx5e_tc_meter_modify(mdev, meter, params);
+ if (err)
+ goto out;
+
+ meter->params.mode = params->mode;
+ meter->params.rate = params->rate;
+ meter->params.burst = params->burst;
+ }
+
+out:
+ return err;
+}
+
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&flow_meters->sync_lock);
+ err = __mlx5e_tc_meter_update(meter, params);
+ mutex_unlock(&flow_meters->sync_lock);
+ return err;
+}
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&flow_meters->sync_lock);
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ if (IS_ERR(meter)) {
+ meter = mlx5e_tc_meter_alloc(flow_meters, params);
+ if (IS_ERR(meter)) {
+ err = PTR_ERR(meter);
+ goto err_get;
+ }
+ }
+
+ err = __mlx5e_tc_meter_update(meter, params);
+ if (err)
+ goto err_update;
+
+ mutex_unlock(&flow_meters->sync_lock);
+ return meter;
+
+err_update:
+ __mlx5e_tc_meter_put(meter);
+err_get:
+ mutex_unlock(&flow_meters->sync_lock);
+ return ERR_PTR(err);
+}
+
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters)
+{
+ return flow_meters->ns_type;
+}
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (IS_ERR_OR_NULL(post_act)) {
+ netdev_dbg(priv->netdev,
+ "flow meter offload is not supported, post action is missing\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ flow_meters = kzalloc(sizeof(*flow_meters), GFP_KERNEL);
+ if (!flow_meters)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_core_alloc_pd(mdev, &flow_meters->pdn);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc pd for flow meter aso, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_meters->aso = mlx5_aso_create(mdev, flow_meters->pdn);
+ if (IS_ERR(flow_meters->aso)) {
+ mlx5_core_warn(mdev, "Failed to create aso wqe for flow meter\n");
+ err = PTR_ERR(flow_meters->aso);
+ goto err_sq;
+ }
+
+ mutex_init(&flow_meters->sync_lock);
+ INIT_LIST_HEAD(&flow_meters->partial_list);
+ INIT_LIST_HEAD(&flow_meters->full_list);
+
+ flow_meters->ns_type = ns_type;
+ flow_meters->mdev = mdev;
+ flow_meters->post_act = post_act;
+ mutex_init(&flow_meters->aso_lock);
+ flow_meters->log_granularity = min_t(int, 6,
+ MLX5_CAP_QOS(mdev, log_meter_aso_max_alloc));
+
+ return flow_meters;
+
+err_sq:
+ mlx5_core_dealloc_pd(mdev, flow_meters->pdn);
+err_out:
+ kfree(flow_meters);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters)
+{
+ if (IS_ERR_OR_NULL(flow_meters))
+ return;
+
+ mlx5_aso_destroy(flow_meters->aso);
+ mlx5_core_dealloc_pd(flow_meters->mdev, flow_meters->pdn);
+ kfree(flow_meters);
+}
+
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse)
+{
+ u64 bytes1, packets1, lastuse1;
+ u64 bytes2, packets2, lastuse2;
+
+ mlx5_fc_query_cached(meter->green_counter, &bytes1, &packets1, &lastuse1);
+ mlx5_fc_query_cached(meter->red_counter, &bytes2, &packets2, &lastuse2);
+
+ *bytes = bytes1 + bytes2;
+ *packets = packets1 + packets2;
+ *drops = packets2;
+ *lastuse = max_t(u64, lastuse1, lastuse2);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
new file mode 100644
index 000000000000..6de6e8a16327
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_FLOW_METER_H__
+#define __MLX5_EN_FLOW_METER_H__
+
+struct mlx5e_post_meter_priv;
+struct mlx5e_flow_meter_aso_obj;
+struct mlx5e_flow_meters;
+struct mlx5_flow_attr;
+
+enum mlx5e_flow_meter_mode {
+ MLX5_RATE_LIMIT_BPS,
+ MLX5_RATE_LIMIT_PPS,
+};
+
+struct mlx5e_flow_meter_params {
+ enum mlx5e_flow_meter_mode mode;
+ /* police action index */
+ u32 index;
+ u64 rate;
+ u64 burst;
+};
+
+struct mlx5e_flow_meter_handle {
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ u32 obj_id;
+ u8 idx;
+
+ int refcnt;
+ struct hlist_node hlist;
+ struct mlx5e_flow_meter_params params;
+
+ struct mlx5_fc *green_counter;
+ struct mlx5_fc *red_counter;
+};
+
+struct mlx5e_meter_attr {
+ struct mlx5e_flow_meter_params params;
+ struct mlx5e_flow_meter_handle *meter;
+ struct mlx5e_post_meter_priv *post_meter;
+};
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params);
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter);
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params);
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
+
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters);
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_action);
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters);
+
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+
+#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index dea137dd744b..4e48946c4c2a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -22,9 +22,9 @@ struct mlx5e_post_act_handle {
u32 id;
};
-#define MLX5_POST_ACTION_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
-#define MLX5_POST_ACTION_MAX GENMASK(MLX5_POST_ACTION_BITS - 1, 0)
-#define MLX5_POST_ACTION_MASK MLX5_POST_ACTION_MAX
+#define MLX5_POST_ACTION_BITS MLX5_REG_MAPPING_MBITS(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MASK MLX5_REG_MAPPING_MASK(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MAX MLX5_POST_ACTION_MASK
struct mlx5e_post_act *
mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
@@ -36,7 +36,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
int err;
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
- if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
@@ -128,6 +128,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
post_attr->inner_match_level = MLX5_MATCH_NONE;
post_attr->outer_match_level = MLX5_MATCH_NONE;
post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
handle->ns_type = post_act->ns_type;
/* Splits were handled before post action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
new file mode 100644
index 000000000000..8b77e822810e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "en/tc_priv.h"
+#include "post_meter.h"
+#include "en/tc/post_act.h"
+
+#define MLX5_PACKET_COLOR_BITS MLX5_REG_MAPPING_MBITS(PACKET_COLOR_TO_REG)
+#define MLX5_PACKET_COLOR_MASK MLX5_REG_MAPPING_MASK(PACKET_COLOR_TO_REG)
+
+struct mlx5e_post_meter_priv {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *fwd_green_rule;
+ struct mlx5_flow_handle *drop_red_rule;
+};
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return post_meter->ft;
+}
+
+static int
+mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *root_ns;
+
+ root_ns = mlx5_get_flow_namespace(priv->mdev, ns_type);
+ if (!root_ns) {
+ mlx5_core_warn(priv->mdev, "Failed to get namespace for flow meter\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+
+ post_meter->ft = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(post_meter->ft)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n");
+ return PTR_ERR(post_meter->ft);
+ }
+
+ return 0;
+}
+
+static int
+mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *misc2, *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ misc2 = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_5, MLX5_PACKET_COLOR_MASK);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ post_meter->fg = mlx5_create_flow_group(post_meter->ft, flow_group_in);
+ if (IS_ERR(post_meter->fg)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow group\n");
+ err = PTR_ERR(post_meter->fg);
+ }
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static int
+mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
+{
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter_id = mlx5_fc_id(red_counter);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n");
+ err = PTR_ERR(rule);
+ goto err_red;
+ }
+ post_meter->drop_red_rule = rule;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = mlx5e_tc_post_act_get_ft(post_act);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(green_counter);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n");
+ err = PTR_ERR(rule);
+ goto err_green;
+ }
+ post_meter->fwd_green_rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_green:
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+err_red:
+ kvfree(spec);
+ return err;
+}
+
+static void
+mlx5e_post_meter_rules_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+ mlx5_del_flow_rules(post_meter->fwd_green_rule);
+}
+
+static void
+mlx5e_post_meter_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_group(post_meter->fg);
+}
+
+static void
+mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_table(post_meter->ft);
+}
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
+{
+ struct mlx5e_post_meter_priv *post_meter;
+ int err;
+
+ post_meter = kzalloc(sizeof(*post_meter), GFP_KERNEL);
+ if (!post_meter)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_post_meter_table_create(priv, ns_type, post_meter);
+ if (err)
+ goto err_ft;
+
+ err = mlx5e_post_meter_fg_create(priv, post_meter);
+ if (err)
+ goto err_fg;
+
+ err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, green_counter,
+ red_counter);
+ if (err)
+ goto err_rules;
+
+ return post_meter;
+
+err_rules:
+ mlx5e_post_meter_fg_destroy(post_meter);
+err_fg:
+ mlx5e_post_meter_table_destroy(post_meter);
+err_ft:
+ kfree(post_meter);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5e_post_meter_rules_destroy(post_meter);
+ mlx5e_post_meter_fg_destroy(post_meter);
+ mlx5e_post_meter_table_destroy(post_meter);
+ kfree(post_meter);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
new file mode 100644
index 000000000000..34d0e4b9fc7a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_POST_METER_H__
+#define __MLX5_EN_POST_METER_H__
+
+#define packet_color_to_reg { \
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5, \
+ .moffset = 0, \
+ .mlen = 8, \
+ .soffset = MLX5_BYTE_OFF(fte_match_param, \
+ misc_parameters_2.metadata_reg_c_5), \
+}
+
+struct mlx5e_post_meter_priv;
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter);
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter);
+
+#endif /* __MLX5_EN_POST_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 25f51f80a9b4..864ce0c393e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -36,8 +36,8 @@
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
-#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
-#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
+#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
+#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
/* Statically allocate modify actions for
* ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
@@ -76,6 +76,7 @@ struct mlx5_tc_ct_priv {
struct mlx5_ct_fs *fs;
struct mlx5_ct_fs_ops *fs_ops;
spinlock_t ht_lock; /* protects ft entries */
+ struct workqueue_struct *wq;
struct mlx5_tc_ct_debugfs debugfs;
};
@@ -941,14 +942,11 @@ static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
static void
__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
{
- struct mlx5e_priv *priv;
-
if (!refcount_dec_and_test(&entry->refcnt))
return;
- priv = netdev_priv(entry->ct_priv->netdev);
INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
- queue_work(priv->wq, &entry->work);
+ queue_work(entry->ct_priv->wq, &entry->work);
}
static struct mlx5_ct_counter *
@@ -1759,19 +1757,16 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
static void
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
{
- struct mlx5e_priv *priv;
-
if (!refcount_dec_and_test(&ft->refcount))
return;
+ flush_workqueue(ct_priv->wq);
nf_flow_table_offload_del_cb(ft->nf_ft,
mlx5_tc_ct_block_flow_offload, ft);
rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
rhashtable_free_and_destroy(&ft->ct_entries_ht,
mlx5_tc_ct_flush_ft_entry,
ct_priv);
- priv = netdev_priv(ct_priv->netdev);
- flush_workqueue(priv->wq);
mlx5_tc_ct_free_pre_ct_tables(ft);
mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
kfree(ft);
@@ -2067,7 +2062,7 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
/* Ignore_flow_level support isn't supported by default for VFs and so post_act
* won't be supported. Skip showing error msg.
*/
- if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
err_msg = "post action is missing";
err = -EOPNOTSUPP;
goto out_err;
@@ -2176,6 +2171,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
goto err_ct_tuples_nat_ht;
+ ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0);
+ if (!ct_priv->wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
err = mlx5_tc_ct_fs_init(ct_priv);
if (err)
goto err_init_fs;
@@ -2184,6 +2185,8 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
return ct_priv;
err_init_fs:
+ destroy_workqueue(ct_priv->wq);
+err_wq:
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
err_ct_tuples_nat_ht:
rhashtable_destroy(&ct_priv->ct_tuples_ht);
@@ -2213,6 +2216,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
if (!ct_priv)
return;
+ destroy_workqueue(ct_priv->wq);
mlx5_ct_tc_remove_dbgfs(ct_priv);
chains = ct_priv->chains;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 00a3ba862afb..5bbd6b92840f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -62,10 +62,11 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_4),\
}
+/* 8 LSB of metadata C5 are reserved for packet color */
#define fteid_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
- .moffset = 0,\
- .mlen = 32,\
+ .moffset = 8,\
+ .mlen = 24,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_5),\
}
@@ -84,10 +85,8 @@ struct mlx5_ct_attr {
.mlen = ESW_ZONE_ID_BITS,\
}
-#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
-#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
-#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
+#define MLX5_CT_ZONE_BITS MLX5_REG_MAPPING_MBITS(ZONE_TO_REG)
+#define MLX5_CT_ZONE_MASK MLX5_REG_MAPPING_MASK(ZONE_TO_REG)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 3b74a6fd5c43..10c9a8a79d00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -11,7 +11,6 @@
#define MLX5E_TC_MAX_SPLITS 1
-#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
@@ -44,6 +43,8 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_act_parse_state parse_state;
};
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
+
/* Helper struct for accessing a struct containing list_head array.
* Containing struct
* |- Helper array
@@ -203,7 +204,13 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
+struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev);
+
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack);
+
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index d87bbb0be7c8..e6f64d890fb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -506,7 +506,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
int err;
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
@@ -620,7 +620,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index a8cfab4a393c..cc18d97d8ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -7,6 +7,8 @@
#include "en.h"
#include <net/xdp_sock_drv.h>
+#define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL
+
/* RX data path */
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
@@ -21,6 +23,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
+retry:
dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk)
return -ENOMEM;
@@ -32,6 +35,17 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
*/
dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
+ /* MTT page mapping has alignment requirements. If they are not
+ * satisfied, leak the descriptor so that it won't come again, and try
+ * to allocate a new one.
+ */
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) {
+ xsk_buff_discard(dma_info->xsk);
+ goto retry;
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 04c0a5e1c89a..1839f1ab1ddd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -194,4 +194,14 @@ static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
}
+
+static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
+{
+ return mlx5e_ktls_init_tx(priv);
+}
+
+static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
+{
+ mlx5e_ktls_cleanup_tx(priv);
+}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 3ae6067c7e6b..20a4f1e585af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -86,7 +86,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -158,10 +158,10 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule;
int err = 0;
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -267,7 +267,7 @@ out:
static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
+ struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -277,7 +277,7 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -326,10 +326,10 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- dest.ft = priv->fs.accel_tcp->tables[i].t;
+ dest.ft = priv->fs->accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -344,7 +344,7 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
{
struct mlx5e_accel_fs_tcp *fs_tcp;
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
@@ -357,7 +357,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
{
int i;
- if (!priv->fs.accel_tcp)
+ if (!priv->fs->accel_tcp)
return;
accel_fs_tcp_disable(priv);
@@ -365,8 +365,8 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(priv, i);
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ kfree(priv->fs->accel_tcp);
+ priv->fs->accel_tcp = NULL;
}
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
@@ -376,8 +376,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
- if (!priv->fs.accel_tcp)
+ priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
+ if (!priv->fs->accel_tcp)
return -ENOMEM;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
@@ -396,7 +396,7 @@ err_destroy_tables:
while (--i >= 0)
accel_fs_tcp_destroy_table(priv, i);
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ kfree(priv->fs->accel_tcp);
+ priv->fs->accel_tcp = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 8315e8f603d7..f8113fd23265 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -184,13 +184,13 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
fs_prot = &accel_esp->fs_prot[type];
fs_prot->default_dest =
- mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
@@ -205,7 +205,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
@@ -249,7 +249,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
+ mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
@@ -271,7 +271,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
deleted file mode 100644
index e4eeb2ba21c7..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_STEERING_H__
-#define __MLX5_IPSEC_STEERING_H__
-
-#include "en.h"
-#include "ipsec.h"
-#include "ipsec_offload.h"
-#include "en/fs.h"
-
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule);
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- struct mlx5e_ipsec_rule *ipsec_rule);
-#endif /* __MLX5_IPSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 814f2a56f633..30a70d139046 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -54,7 +54,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
+ if (!mlx5e_ktls_type_check(mdev, crypto_info))
return -EOPNOTSUPP;
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index d016624fbc9d..948400dee525 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -42,6 +42,8 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
@@ -62,6 +64,8 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
atomic64_t tx_tls_del;
+ atomic64_t tx_tls_pool_alloc;
+ atomic64_t tx_tls_pool_free;
atomic64_t rx_tls_ctx;
atomic64_t rx_tls_del;
};
@@ -69,6 +73,7 @@ struct mlx5e_tls_sw_stats {
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
+ struct mlx5e_tls_tx_pool *tx_pool;
};
int mlx5e_ktls_init(struct mlx5e_priv *priv);
@@ -83,6 +88,15 @@ static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
+static inline int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+}
+
static inline int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 0bb0633b7542..27483aa7be8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_rx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
- TLS_OFFLOAD_CONTEXT_SIZE_RX);
+ BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX);
*ctx = priv_rx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 2ab46c4247ff..7c1c0eb16787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -41,6 +41,8 @@
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_alloc) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_free) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 4b6f0d1ea59a..0aef69527226 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -35,30 +35,70 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
+ stop_room += 1; /* fence nop */
return stop_room;
}
+static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
+{
+ MLX5_SET(tisc, tisc, tls_en, 1);
+ MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
+}
+
static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
- MLX5_SET(tisc, tisc, tls_en, 1);
+ return mlx5_core_create_tis(mdev, in, tisn);
+}
+
+static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
+}
+
+static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
- return mlx5e_create_tis(mdev, in, tisn);
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
}
struct mlx5e_ktls_offload_context_tx {
- struct tls_offload_context_tx *tx_ctx;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
- struct mlx5e_tls_sw_stats *sw_stats;
+ /* fast path */
u32 expected_seq;
u32 tisn;
- u32 key_id;
bool ctx_post_pending;
+ /* control / resync */
+ struct list_head list_node; /* member of the pool */
+ struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ struct tls_offload_context_tx *tx_ctx;
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ u32 key_id;
+ u8 create_err : 1;
};
static void
@@ -68,8 +108,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_tx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
*ctx = priv_tx;
}
@@ -83,28 +122,368 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
return *ctx;
}
+/* struct for callback API management */
+struct mlx5e_async_ctx {
+ struct mlx5_async_work context;
+ struct mlx5_async_ctx async_ctx;
+ struct work_struct work;
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct completion complete;
+ int err;
+ union {
+ u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
+ u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
+ };
+};
+
+static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+{
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+ if (!bulk_async)
+ return NULL;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
+ init_completion(&async->complete);
+ }
+
+ return bulk_async;
+}
+
+static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
+ }
+ kvfree(bulk_async);
+}
+
+static void create_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ if (status) {
+ async->err = status;
+ priv_tx->create_err = 1;
+ goto out;
+ }
+
+ priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
+out:
+ complete(&async->complete);
+}
+
+static void destroy_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ complete(&async->complete);
+ kfree(priv_tx);
+}
+
+static struct mlx5e_ktls_offload_context_tx *
+mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
+ struct mlx5e_async_ctx *async)
+{
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ int err;
+
+ priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
+ if (!priv_tx)
+ return ERR_PTR(-ENOMEM);
+
+ priv_tx->mdev = mdev;
+ priv_tx->sw_stats = sw_stats;
+
+ if (!async) {
+ err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
+ if (err)
+ goto err_out;
+ } else {
+ async->priv_tx = priv_tx;
+ err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+ async->out_create, sizeof(async->out_create),
+ create_tis_callback, &async->context);
+ if (err)
+ goto err_out;
+ }
+
+ return priv_tx;
+
+err_out:
+ kfree(priv_tx);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ struct mlx5e_async_ctx *async)
+{
+ if (priv_tx->create_err) {
+ complete(&async->complete);
+ kfree(priv_tx);
+ return;
+ }
+ async->priv_tx = priv_tx;
+ mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
+ &async->async_ctx,
+ async->out_destroy, sizeof(async->out_destroy),
+ destroy_tis_callback, &async->context);
+}
+
+static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
+ struct list_head *list, int size)
+{
+ struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = mlx5e_bulk_async_init(mdev, size);
+ if (!bulk_async)
+ return;
+
+ i = 0;
+ list_for_each_entry(obj, list, list_node) {
+ mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+ i++;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ wait_for_completion(&async->complete);
+ }
+ mlx5e_bulk_async_cleanup(bulk_async, size);
+}
+
+/* Recycling pool API */
+
+#define MLX5E_TLS_TX_POOL_BULK (16)
+#define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
+#define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
+
+struct mlx5e_tls_tx_pool {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ struct mutex lock; /* Protects access to the pool */
+ struct list_head list;
+ size_t size;
+
+ struct workqueue_struct *wq;
+ struct work_struct create_work;
+ struct work_struct destroy_work;
+};
+
+static void create_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, create_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_async_ctx *bulk_async;
+ LIST_HEAD(local_list);
+ int i, j, err = 0;
+
+ bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
+ if (!bulk_async)
+ return;
+
+ for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+ list_add(&obj->list_node, &local_list);
+ }
+
+ for (j = 0; j < i; j++) {
+ struct mlx5e_async_ctx *async = &bulk_async[j];
+
+ wait_for_completion(&async->complete);
+ if (!err && async->err)
+ err = async->err;
+ }
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
+ mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+ if (err)
+ goto err_out;
+
+ mutex_lock(&pool->lock);
+ if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ goto err_out;
+ }
+ list_splice(&local_list, &pool->list);
+ pool->size += MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+ return;
+
+err_out:
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static void destroy_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ mutex_lock(&pool->lock);
+ if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ return;
+ }
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
+ struct mlx5e_tls_sw_stats *sw_stats)
+{
+ struct mlx5e_tls_tx_pool *pool;
+
+ BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
+
+ pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
+ if (!pool->wq)
+ goto err_free;
+
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->lock);
+
+ INIT_WORK(&pool->create_work, create_work);
+ INIT_WORK(&pool->destroy_work, destroy_work);
+
+ pool->mdev = mdev;
+ pool->sw_stats = sw_stats;
+
+ return pool;
+
+err_free:
+ kvfree(pool);
+ return NULL;
+}
+
+static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ }
+ if (pool->size) {
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
+ atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
+ }
+}
+
+static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ mlx5e_tls_tx_pool_list_cleanup(pool);
+ destroy_workqueue(pool->wq);
+ kvfree(pool);
+}
+
+static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
+{
+ mutex_lock(&pool->lock);
+ list_add(&obj->list_node, &pool->list);
+ if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, &pool->destroy_work);
+ mutex_unlock(&pool->lock);
+}
+
+static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
+{
+ struct mlx5e_ktls_offload_context_tx *obj;
+
+ mutex_lock(&pool->lock);
+ if (unlikely(pool->size == 0)) {
+ /* pool is empty:
+ * - trigger the populating work, and
+ * - serve the current context via the regular blocking api.
+ */
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
+ if (!IS_ERR(obj))
+ atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
+ return obj;
+ }
+
+ obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
+ list_node);
+ list_del(&obj->list_node);
+ if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ return obj;
+}
+
+/* End of pool API */
+
int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct mlx5e_tls_tx_pool *pool;
struct tls_context *tls_ctx;
- struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int err;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
- priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
- if (!priv_tx)
- return -ENOMEM;
+ priv_tx = pool_pop(pool);
+ if (IS_ERR(priv_tx))
+ return PTR_ERR(priv_tx);
- err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
+ err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
if (err)
goto err_create_key;
- priv_tx->sw_stats = &priv->tls->sw_stats;
priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -112,36 +491,29 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
- err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
- if (err)
- goto err_create_tis;
-
priv_tx->ctx_post_pending = true;
atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
return 0;
-err_create_tis:
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
err_create_key:
- kfree(priv_tx);
+ pool_push(pool, priv_tx);
return err;
}
void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
- struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_tx_pool *pool;
struct mlx5e_priv *priv;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
- mlx5e_destroy_tis(mdev, priv_tx->tisn);
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
- kfree(priv_tx);
+ mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
+ pool_push(pool, priv_tx);
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
@@ -202,6 +574,16 @@ post_progress_params(struct mlx5e_txqsq *sq,
sq->pc += num_wqebbs;
}
+static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ tx_fill_wi(sq, pi, 1, 0, NULL);
+
+ mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+}
+
static void
mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
@@ -213,6 +595,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
post_static_params(sq, priv_tx, fence_first_post);
post_progress_params(sq, priv_tx, progress_fence);
+ tx_post_fence_nop(sq);
}
struct tx_sync_info {
@@ -305,7 +688,7 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
}
static int
-tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
+tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
{
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_data_seg *dseg;
@@ -327,7 +710,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
cseg->tis_tir_num = cpu_to_be32(tisn << 8);
- cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
@@ -362,67 +744,39 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
stats->tls_dump_bytes += wi->num_bytes;
}
-static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-
- tx_fill_wi(sq, pi, 1, 0, NULL);
-
- mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
-}
-
static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq,
int datalen,
u32 seq)
{
- struct mlx5e_sq_stats *stats = sq->stats;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
- int i = 0;
+ int i;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
- if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
- if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
- stats->tls_skip_no_sync_data++;
- return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
- }
- /* We might get here if a retransmission reaches the driver
- * after the relevant record is acked.
+ if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
+ /* We might get here with ret == FAIL if a retransmission
+ * reaches the driver after the relevant record is acked.
* It should be safe to drop the packet in this case
*/
- stats->tls_drop_no_sync_data++;
- goto err_out;
- }
-
- stats->tls_ooo++;
+ return ret;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
- /* If no dump WQE was sent, we need to have a fence NOP WQE before the
- * actual data xmit.
- */
- if (!info.nr_frags) {
- tx_post_fence_nop(sq);
- return MLX5E_KTLS_SYNC_DONE;
- }
-
- for (; i < info.nr_frags; i++) {
+ for (i = 0; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
orig_fsz = skb_frag_size(f);
do {
- bool fence = !(i || frag_offset);
unsigned int fsz;
n++;
fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
skb_frag_size_set(f, fsz);
- if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+ if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
page_ref_add(skb_frag_page(f), n - 1);
goto err_out;
}
@@ -454,40 +808,49 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
int datalen;
u32 seq;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
if (!datalen)
return true;
mlx5e_tx_mpwqe_ensure_complete(sq);
tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't WARN on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
- if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
+ if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- }
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) {
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+ stats->tls_ooo++;
+
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+ stats->tls_skip_no_sync_data++;
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
- fallthrough;
+ goto err_out;
case MLX5E_KTLS_SYNC_FAIL:
+ stats->tls_drop_no_sync_data++;
goto err_out;
}
}
@@ -506,3 +869,24 @@ err_out:
dev_kfree_skb_any(skb);
return false;
}
+
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return 0;
+
+ priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
+ if (!priv->tls->tx_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return;
+
+ mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
+ priv->tls->tx_pool = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 49cca6bd49a1..cd7f245dcf14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -147,9 +147,9 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
+ dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
@@ -172,10 +172,10 @@ static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
int i;
arfs_del_rules(priv);
- destroy_workqueue(priv->fs.arfs->wq);
+ destroy_workqueue(priv->fs->arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
- arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
+ if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
+ arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
}
}
@@ -185,13 +185,13 @@ void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
return;
_mlx5e_cleanup_tables(priv);
- kvfree(priv->fs.arfs);
+ kvfree(priv->fs->arfs);
}
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
+ struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5_traffic_types tt;
@@ -321,7 +321,7 @@ out:
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -332,7 +332,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -361,14 +361,14 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
- priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
- if (!priv->fs.arfs)
+ priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
+ if (!priv->fs->arfs)
return -ENOMEM;
- spin_lock_init(&priv->fs.arfs->arfs_lock);
- INIT_LIST_HEAD(&priv->fs.arfs->rules);
- priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
- if (!priv->fs.arfs->wq)
+ spin_lock_init(&priv->fs->arfs->arfs_lock);
+ INIT_LIST_HEAD(&priv->fs->arfs->rules);
+ priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!priv->fs->arfs->wq)
goto err;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
@@ -381,7 +381,7 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
err_des:
_mlx5e_cleanup_tables(priv);
err:
- kvfree(priv->fs.arfs);
+ kvfree(priv->fs->arfs);
return err;
}
@@ -396,8 +396,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
@@ -408,7 +408,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
@@ -425,12 +425,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
@@ -474,7 +474,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
@@ -592,9 +592,9 @@ static void arfs_handle_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
@@ -647,7 +647,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
- rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
+ rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -691,7 +691,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
struct flow_keys fk;
@@ -725,7 +725,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
- queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
+ queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 6e80585d731f..b811207fe5ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -30,6 +30,8 @@
* SOFTWARE.
*/
+#include <linux/ethtool_netlink.h>
+
#include "en.h"
#include "en/port.h"
#include "en/params.h"
@@ -305,12 +307,18 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
}
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param)
{
param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
+
+ kernel_param->tcp_data_split =
+ (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static void mlx5e_get_ringparam(struct net_device *dev,
@@ -320,7 +328,7 @@ static void mlx5e_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
@@ -451,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
*/
- if (priv->htb.maj_id) {
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
__func__);
@@ -2067,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
* the numeration of the QoS SQs will change, while per-queue qdiscs are
* attached.
*/
- if (priv->htb.maj_id) {
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
__func__);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index d2f0773f95c6..e2a9b9be5c1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -37,13 +37,13 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
#include "en.h"
-#include "en_rep.h"
+#include "en_tc.h"
#include "lib/mpfs.h"
#include "en/ptp.h"
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type);
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai);
enum {
@@ -132,9 +132,8 @@ struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
return vlan->ft.t;
}
-static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
+static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
{
- struct net_device *ndev = priv->netdev;
int max_list_size;
int list_size;
u16 *vlans;
@@ -143,15 +142,15 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID)
list_size++;
- max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
+ max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
if (list_size > max_list_size) {
- netdev_warn(ndev,
- "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
- list_size, max_list_size);
+ mlx5_core_warn(fs->mdev,
+ "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+ list_size, max_list_size);
list_size = max_list_size;
}
@@ -160,16 +159,16 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
}
- err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
+ err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
if (err)
- netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
- err);
+ mlx5_core_err(fs->mdev, "Failed to modify vport vlans list err(%d)\n",
+ err);
kvfree(vlans);
return err;
@@ -183,18 +182,18 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
};
-static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = fs->vlan->ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.l2.ft.t;
+ dest.ft = fs->l2.ft.t;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -204,24 +203,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
- rule_p = &priv->fs.vlan->untagged_rule;
+ rule_p = &fs->vlan->untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- rule_p = &priv->fs.vlan->any_cvlan_rule;
+ rule_p = &fs->vlan->any_cvlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- rule_p = &priv->fs.vlan->any_svlan_rule;
+ rule_p = &fs->vlan->any_svlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- rule_p = &priv->fs.vlan->active_svlans_rule[vid];
+ rule_p = &fs->vlan->active_svlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
@@ -231,7 +230,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
vid);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
- rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
+ rule_p = &fs->vlan->active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -250,13 +249,13 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+ mlx5_core_err(fs->mdev, "%s: add rule failed\n", __func__);
}
return err;
}
-static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
struct mlx5_flow_spec *spec;
@@ -267,68 +266,68 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
return -ENOMEM;
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
- err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
+ err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec);
kvfree(spec);
return err;
}
-static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
- enum mlx5e_vlan_rule_type rule_type, u16 vid)
+static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->fs.vlan->untagged_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
- priv->fs.vlan->untagged_rule = NULL;
+ if (fs->vlan->untagged_rule) {
+ mlx5_del_flow_rules(fs->vlan->untagged_rule);
+ fs->vlan->untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- if (priv->fs.vlan->any_cvlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
- priv->fs.vlan->any_cvlan_rule = NULL;
+ if (fs->vlan->any_cvlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_cvlan_rule);
+ fs->vlan->any_cvlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- if (priv->fs.vlan->any_svlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
- priv->fs.vlan->any_svlan_rule = NULL;
+ if (fs->vlan->any_svlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_svlan_rule);
+ fs->vlan->any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- if (priv->fs.vlan->active_svlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
- priv->fs.vlan->active_svlans_rule[vid] = NULL;
+ if (fs->vlan->active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]);
+ fs->vlan->active_svlans_rule[vid] = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
- if (priv->fs.vlan->active_cvlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
- priv->fs.vlan->active_cvlans_rule[vid] = NULL;
+ if (fs->vlan->active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]);
+ fs->vlan->active_cvlans_rule[vid] = NULL;
}
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
break;
}
}
-static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs)
{
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
-static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
+static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs)
{
int err;
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
if (err)
return err;
- return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
static struct mlx5_flow_handle *
@@ -354,101 +353,101 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.vlan->trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
- __func__, err);
+ priv->fs->vlan->trap_rule = NULL;
+ mlx5_core_err(priv->fs->mdev, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.vlan->trap_rule = rule;
+ priv->fs->vlan->trap_rule = rule;
return 0;
}
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan->trap_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
- priv->fs.vlan->trap_rule = NULL;
+ if (priv->fs->vlan->trap_rule) {
+ mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
+ priv->fs->vlan->trap_rule = NULL;
}
}
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.l2.trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
- __func__, err);
+ priv->fs->l2.trap_rule = NULL;
+ mlx5_core_err(priv->fs->mdev, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.l2.trap_rule = rule;
+ priv->fs->l2.trap_rule = rule;
return 0;
}
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.l2.trap_rule) {
- mlx5_del_flow_rules(priv->fs.l2.trap_rule);
- priv->fs.l2.trap_rule = NULL;
+ if (priv->fs->l2.trap_rule) {
+ mlx5_del_flow_rules(priv->fs->l2.trap_rule);
+ priv->fs->l2.trap_rule = NULL;
}
}
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->fs.vlan->cvlan_filter_disabled)
+ if (!priv->fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = false;
+ priv->fs->vlan->cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan->cvlan_filter_disabled)
+ if (priv->fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = true;
+ priv->fs->vlan->cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_add_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
{
int err;
- set_bit(vid, priv->fs.vlan->active_cvlans);
+ set_bit(vid, fs->vlan->active_cvlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
if (err)
- clear_bit(vid, priv->fs.vlan->active_cvlans);
+ clear_bit(vid, fs->vlan->active_cvlans);
return err;
}
-static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev, u16 vid)
{
- struct net_device *netdev = priv->netdev;
int err;
- set_bit(vid, priv->fs.vlan->active_svlans);
+ set_bit(vid, fs->vlan->active_svlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
if (err) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
+ clear_bit(vid, fs->vlan->active_svlans);
return err;
}
@@ -457,86 +456,91 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
return err;
}
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q)
- return mlx5e_vlan_rx_add_cvid(priv, vid);
+ return mlx5e_vlan_rx_add_cvid(fs, vid);
else if (be16_to_cpu(proto) == ETH_P_8021AD)
- return mlx5e_vlan_rx_add_svid(priv, vid);
+ return mlx5e_vlan_rx_add_svid(fs, netdev, vid);
return -EOPNOTSUPP;
}
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q) {
- clear_bit(vid, priv->fs.vlan->active_cvlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ clear_bit(vid, fs->vlan->active_cvlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
- netdev_update_features(dev);
+ clear_bit(vid, fs->vlan->active_svlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_update_features(netdev);
}
return 0;
}
-static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
{
int i;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_add_any_vid_rules(priv);
+ if (fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_add_any_vid_rules(fs);
}
static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
{
int i;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
+ WARN_ON_ONCE(priv->fs->state_destroy);
mlx5e_remove_vlan_trap(priv);
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_del_any_vid_rules(priv);
+ if (priv->fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_del_any_vid_rules(priv->fs);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_hash_node *hn)
{
u8 action = hn->action;
@@ -547,9 +551,9 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
switch (action) {
case MLX5E_ACTION_ADD:
- mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH);
if (!is_multicast_ether_addr(mac_addr)) {
- l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
+ l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr);
hn->mpfs = !l2_err;
}
hn->action = MLX5E_ACTION_NONE;
@@ -557,52 +561,50 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
case MLX5E_ACTION_DEL:
if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
- l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
- mlx5e_del_l2_flow_rule(priv, &hn->ai);
+ l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr);
+ mlx5e_del_l2_flow_rule(fs, &hn->ai);
mlx5e_del_l2_from_hash(hn);
break;
}
if (l2_err)
- netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
+ mlx5_core_warn(fs->mdev, "MPFS, failed to %s mac %pM, err(%d)\n",
+ action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
}
-static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct net_device *netdev = priv->netdev;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(netdev);
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
- priv->netdev->dev_addr);
-
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
netdev_for_each_uc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
netdev_for_each_mc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
netif_addr_unlock_bh(netdev);
}
-static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
+static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
+ struct net_device *ndev,
u8 addr_array[][ETH_ALEN], int size)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
- struct net_device *ndev = priv->netdev;
struct mlx5e_l2_hash_node *hn;
struct hlist_head *addr_list;
struct hlist_node *tmp;
int i = 0;
int hi;
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], ndev->dev_addr);
- else if (priv->fs.l2.broadcast_enabled)
+ else if (fs->l2.broadcast_enabled)
ether_addr_copy(addr_array[i++], ndev->broadcast);
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -614,7 +616,8 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
}
}
-static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
+static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
int list_type)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
@@ -627,19 +630,19 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int err;
int hi;
- size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
+ size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0);
max_size = is_uc ?
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) :
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list);
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
size++;
if (size > max_size) {
- netdev_warn(priv->netdev,
- "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
- is_uc ? "UC" : "MC", size, max_size);
+ mlx5_core_warn(fs->mdev,
+ "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+ is_uc ? "UC" : "MC", size, max_size);
size = max_size;
}
@@ -649,65 +652,67 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
err = -ENOMEM;
goto out;
}
- mlx5e_fill_addr_array(priv, list_type, addr_array, size);
+ mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size);
}
- err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
+ err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
out:
if (err)
- netdev_err(priv->netdev,
- "Failed to modify vport %s list err(%d)\n",
- is_uc ? "UC" : "MC", err);
+ mlx5_core_err(fs->mdev,
+ "Failed to modify vport %s list err(%d)\n",
+ is_uc ? "UC" : "MC", err);
kfree(addr_array);
}
-static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
+static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_l2_table *ea = &priv->fs.l2;
+ struct mlx5e_l2_table *ea = &fs->l2;
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
- mlx5_modify_nic_vport_promisc(priv->mdev, 0,
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC);
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC);
+ mlx5_modify_nic_vport_promisc(fs->mdev, 0,
ea->allmulti_enabled,
ea->promisc_enabled);
}
-static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
+ mlx5e_execute_l2_action(fs, hn);
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
+ mlx5e_execute_l2_action(fs, hn);
}
-static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
hn->action = MLX5E_ACTION_DEL;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
- if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
- mlx5e_sync_netdev_addr(priv);
+ if (fs->state_destroy)
+ mlx5e_sync_netdev_addr(fs, netdev);
- mlx5e_apply_netdev_addr(priv);
+ mlx5e_apply_netdev_addr(fs);
}
#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
-static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
+static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
{
- struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
+ struct mlx5_flow_table *ft = fs->promisc.ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -718,22 +723,22 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
if (!spec)
return -ENOMEM;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
- rule_p = &priv->fs.promisc.rule;
+ rule_p = &fs->promisc.rule;
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
+ mlx5_core_err(fs->mdev, "%s: add promiscuous rule failed\n", __func__);
}
kvfree(spec);
return err;
}
-static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
+static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
+ struct mlx5e_flow_table *ft = &fs->promisc.ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -742,14 +747,14 @@ static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
- netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
+ mlx5_core_err(fs->mdev, "fail to create promisc table err=%d\n", err);
return err;
}
- err = mlx5e_add_promisc_rule(priv);
+ err = mlx5e_add_promisc_rule(fs);
if (err)
goto err_destroy_promisc_table;
@@ -762,34 +767,31 @@ err_destroy_promisc_table:
return err;
}
-static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
+static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
+ if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
return;
- mlx5_del_flow_rules(priv->fs.promisc.rule);
- priv->fs.promisc.rule = NULL;
+ mlx5_del_flow_rules(fs->promisc.rule);
+ fs->promisc.rule = NULL;
}
-static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+ if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
return;
- mlx5e_del_promisc_rule(priv);
- mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
- priv->fs.promisc.ft.t = NULL;
+ mlx5e_del_promisc_rule(fs);
+ mlx5_destroy_flow_table(fs->promisc.ft.t);
+ fs->promisc.ft.t = NULL;
}
-void mlx5e_set_rx_mode_work(struct work_struct *work)
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- set_rx_mode_work);
+ struct mlx5e_l2_table *ea = &fs->l2;
- struct mlx5e_l2_table *ea = &priv->fs.l2;
- struct net_device *ndev = priv->netdev;
-
- bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
- bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
- bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool rx_mode_enable = fs->state_destroy;
+ bool promisc_enabled = rx_mode_enable && (netdev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (netdev->flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
@@ -801,32 +803,32 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
int err;
if (enable_promisc) {
- err = mlx5e_create_promisc_table(priv);
+ err = mlx5e_create_promisc_table(fs);
if (err)
enable_promisc = false;
- if (!priv->channels.params.vlan_strip_disable && !err)
- netdev_warn_once(ndev,
- "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
+ if (!fs->vlan_strip_disable && !err)
+ mlx5_core_warn_once(fs->mdev,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
}
if (enable_allmulti)
- mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
- mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
- mlx5e_handle_netdev_addr(priv);
+ mlx5e_handle_netdev_addr(fs, netdev);
if (disable_broadcast)
- mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
+ mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
if (disable_allmulti)
- mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
+ mlx5e_del_l2_flow_rule(fs, &ea->allmulti);
if (disable_promisc)
- mlx5e_destroy_promisc_table(priv);
+ mlx5e_destroy_promisc_table(fs);
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
ea->broadcast_enabled = broadcast_enabled;
- mlx5e_vport_context_update(priv);
+ mlx5e_vport_context_update(fs, netdev);
}
static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
@@ -841,9 +843,9 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
ft->num_groups = 0;
}
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev)
{
- ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
+ ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast);
}
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
@@ -861,7 +863,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -884,7 +886,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -898,18 +900,18 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
}
ttc_params->inner_ttc = tunnel;
- if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
ttc_params->tunnel_dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->tunnel_dests[tt].ft =
- mlx5_get_ttc_flow_table(priv->fs.inner_ttc);
+ mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
}
}
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai)
{
if (!IS_ERR_OR_NULL(ai->rule)) {
@@ -918,10 +920,10 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
}
}
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = fs->l2.ft.t;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
@@ -939,7 +941,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
switch (type) {
case MLX5E_FULLMATCH:
@@ -957,8 +959,8 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
- netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
- __func__, mv_dmac);
+ mlx5_core_err(fs->mdev, "%s: add l2 rule(mac:%pM) failed\n",
+ __func__, mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
@@ -1044,12 +1046,12 @@ err_destroy_groups:
static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
{
- mlx5e_destroy_flow_table(&priv->fs.l2.ft);
+ mlx5e_destroy_flow_table(&priv->fs->l2.ft);
}
static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{
- struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+ struct mlx5e_l2_table *l2_table = &priv->fs->l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -1060,7 +1062,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_L2_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1180,20 +1182,20 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
return err;
}
-static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
+static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft;
int err;
- ft = &priv->fs.vlan->ft;
+ ft = &fs->vlan->ft;
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
ft_attr.level = MLX5E_VLAN_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t))
return PTR_ERR(ft->t);
@@ -1207,7 +1209,7 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
if (err)
goto err_free_g;
- mlx5e_add_vlan_rules(priv);
+ mlx5e_fs_add_vlan_rules(fs);
return 0;
@@ -1222,33 +1224,33 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
- mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
+ mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
{
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return;
- mlx5_destroy_ttc_table(priv->fs.inner_ttc);
+ mlx5_destroy_ttc_table(priv->fs->inner_ttc);
}
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
}
static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
{
struct ttc_params ttc_params = {};
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return 0;
mlx5e_set_inner_ttc_params(priv, &ttc_params);
- priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
- &ttc_params);
- if (IS_ERR(priv->fs.inner_ttc))
- return PTR_ERR(priv->fs.inner_ttc);
+ priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->fs->mdev,
+ &ttc_params);
+ if (IS_ERR(priv->fs->inner_ttc))
+ return PTR_ERR(priv->fs->inner_ttc);
return 0;
}
@@ -1257,9 +1259,9 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
mlx5e_set_ttc_params(priv, &ttc_params, true);
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc))
- return PTR_ERR(priv->fs.ttc);
+ priv->fs->ttc = mlx5_create_ttc_table(priv->fs->mdev, &ttc_params);
+ if (IS_ERR(priv->fs->ttc))
+ return PTR_ERR(priv->fs->ttc);
return 0;
}
@@ -1267,45 +1269,44 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs.ns)
+ if (!priv->fs->ns)
return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create arfs tables, err=%d\n",
+ err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
err = mlx5e_create_inner_ttc_table(priv);
if (err) {
- netdev_err(priv->netdev,
- "Failed to create inner ttc table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev,
+ "Failed to create inner ttc table, err=%d\n", err);
goto err_destroy_arfs_tables;
}
err = mlx5e_create_ttc_table(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create ttc table, err=%d\n",
+ err);
goto err_destroy_inner_ttc_table;
}
err = mlx5e_create_l2_table(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create l2 table, err=%d\n",
+ err);
goto err_destroy_ttc_table;
}
- err = mlx5e_create_vlan_table(priv);
+ err = mlx5e_fs_create_vlan_table(priv->fs);
if (err) {
- netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create vlan table, err=%d\n",
+ err);
goto err_destroy_l2_table;
}
@@ -1342,16 +1343,69 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_ethtool_cleanup_steering(priv);
}
-int mlx5e_fs_init(struct mlx5e_priv *priv)
+static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
+{
+ fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL);
+ if (!fs->vlan)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
{
- priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
- if (!priv->fs.vlan)
+ kvfree(fs->vlan);
+}
+
+static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
+{
+ fs->tc = mlx5e_tc_table_alloc();
+ if (IS_ERR(fs->tc))
return -ENOMEM;
return 0;
}
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
+{
+ mlx5e_tc_table_free(fs->tc);
+}
+
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy)
+{
+ struct mlx5e_flow_steering *fs;
+ int err;
+
+ fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ goto err;
+
+ fs->mdev = mdev;
+ fs->state_destroy = state_destroy;
+ if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
+ err = mlx5e_fs_vlan_alloc(fs);
+ if (err)
+ goto err_free_fs;
+ }
+
+ if (mlx5e_profile_feature_cap(profile, FS_TC)) {
+ err = mlx5e_fs_tc_alloc(fs);
+ if (err)
+ goto err_free_vlan;
+ }
+
+ return fs;
+err_free_fs:
+ kvfree(fs);
+err_free_vlan:
+ mlx5e_fs_vlan_free(fs);
+err:
+ return NULL;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
- kvfree(priv->fs.vlan);
- priv->fs.vlan = NULL;
+ mlx5e_fs_tc_free(fs);
+ mlx5e_fs_vlan_free(fs);
+ kvfree(fs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index ad0d234632a3..3e4bc7836ef4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -81,18 +81,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
- eth_ft = &priv->fs.ethtool.l2_ft[prio];
+ eth_ft = &priv->fs->ethtool.l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
@@ -383,14 +383,14 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
struct mlx5e_ethtool_rule *iter;
- struct list_head *head = &priv->fs.ethtool.rules;
+ struct list_head *head = &priv->fs->ethtool.rules;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
- priv->fs.ethtool.tot_num_rules++;
+ priv->fs->ethtool.tot_num_rules++;
list_add(&rule->list, head);
}
@@ -507,7 +507,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
- priv->fs.ethtool.tot_num_rules--;
+ priv->fs->ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
@@ -517,7 +517,7 @@ static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
{
struct mlx5e_ethtool_rule *iter;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
@@ -742,10 +742,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
eth_rule->flow_spec = *fs;
eth_rule->eth_ft = eth_ft;
- if (!eth_ft->ft) {
- err = -EINVAL;
- goto del_ethtool_rule;
- }
+
rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -791,7 +788,7 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
- list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
@@ -834,13 +831,13 @@ void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
- list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
+ list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
del_ethtool_rule(priv, iter);
}
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{
- INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+ INIT_LIST_HEAD(&priv->fs->ethtool.rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
@@ -966,7 +963,7 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ info->rule_cnt = priv->fs->ethtool.tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 087952b84ccb..d858667736a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -31,7 +31,6 @@
*/
#include <net/tc_act/tc_gact.h>
-#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <net/geneve.h>
@@ -64,6 +63,7 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
+#include "en/htb.h"
#include "qos.h"
#include "en/trap.h"
@@ -1912,8 +1912,7 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
{
int tc;
- if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
- !params->mqprio.channel.rl) {
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) {
*hw_id = 0;
return 0;
}
@@ -1922,7 +1921,14 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
if (tc < 0)
return tc;
- return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+ if (tc >= params->mqprio.num_tc) {
+ WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
+ tc, params->mqprio.num_tc);
+ return -EINVAL;
+ }
+
+ *hw_id = params->mqprio.channel.hw_id[tc];
+ return 0;
}
static int mlx5e_open_sqs(struct mlx5e_channel *c,
@@ -2383,9 +2389,11 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- err = mlx5e_qos_open_queues(priv, chs);
- if (err)
- goto err_close_ptp;
+ if (priv->htb) {
+ err = mlx5e_qos_open_queues(priv, chs);
+ if (err)
+ goto err_close_ptp;
+ }
mlx5e_health_channels_update(priv);
kvfree(cparam);
@@ -2567,9 +2575,11 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
{
- int qos_queues, nch, ntc, num_txqs, err;
+ int nch, ntc, num_txqs, err;
+ int qos_queues = 0;
- qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
+ if (priv->htb)
+ qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb);
nch = priv->channels.params.num_channels;
ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
@@ -2615,13 +2625,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
- if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
- if (priv->mqprio_rl) {
- mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
- mlx5e_mqprio_rl_free(priv->mqprio_rl);
- }
- priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
- }
return 0;
@@ -2724,7 +2727,8 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
- mlx5e_qos_activate_queues(priv);
+ if (priv->htb)
+ mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
/* dev_watchdog() wants all TX queues to be started when the carrier is
@@ -2841,7 +2845,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
new_chs.params = *params;
- mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id);
+ mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
err = mlx5e_open_channels(priv, &new_chs);
if (err)
@@ -2897,7 +2901,7 @@ int mlx5e_open_locked(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id);
+ mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
set_bit(MLX5E_STATE_OPENED, &priv->state);
@@ -3135,6 +3139,12 @@ err_close_tises:
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ priv->mqprio_rl = NULL;
+ }
+ mlx5e_accel_cleanup_tx(priv);
mlx5e_destroy_tises(priv);
}
@@ -3203,19 +3213,38 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
- params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
+static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params,
+ struct mlx5e_mqprio_rl *rl)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+ u32 hw_id = 0;
+
+ if (rl)
+ mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id);
+ params->mqprio.channel.hw_id[tc] = hw_id;
+ }
+}
+
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
- struct tc_mqprio_qopt *qopt,
+ struct tc_mqprio_qopt_offload *mqprio,
struct mlx5e_mqprio_rl *rl)
{
+ int tc;
+
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
- params->mqprio.num_tc = qopt->num_tc;
- params->mqprio.channel.rl = rl;
- mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
+ params->mqprio.num_tc = mqprio->qopt.num_tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc];
+
+ mlx5e_mqprio_rl_update_params(params, rl);
+ mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt);
}
static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
@@ -3241,6 +3270,12 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
+ if (!err && priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ priv->mqprio_rl = NULL;
+ }
+
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
mlx5e_get_dcb_num_tc(&priv->channels.params));
return err;
@@ -3299,16 +3334,38 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
-static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[])
{
int tc;
- for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
- if (mqprio->max_rate[tc])
+ for (tc = 0; tc < num_tc; tc++)
+ if (max_rate[tc])
return true;
return false;
}
+static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
+ u8 num_tc, u64 max_rate[])
+{
+ struct mlx5e_mqprio_rl *rl;
+ int err;
+
+ if (!mlx5e_mqprio_rate_limit(num_tc, max_rate))
+ return NULL;
+
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return ERR_PTR(err);
+ }
+
+ return rl;
+}
+
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
@@ -3322,32 +3379,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err)
return err;
- rl = NULL;
- if (mlx5e_mqprio_rate_limit(mqprio)) {
- rl = mlx5e_mqprio_rl_alloc();
- if (!rl)
- return -ENOMEM;
- err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
- mqprio->max_rate);
- if (err) {
- mlx5e_mqprio_rl_free(rl);
- return err;
- }
- }
+ rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
+ if (IS_ERR(rl))
+ return PTR_ERR(rl);
new_params = priv->channels.params;
- mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
+ mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
- if (err && rl) {
- mlx5e_mqprio_rl_cleanup(rl);
- mlx5e_mqprio_rl_free(rl);
+ if (err) {
+ if (rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
+ return err;
}
- return err;
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = rl;
+
+ return 0;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3356,7 +3413,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(priv->htb.maj_id))
+ if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
return -EINVAL;
switch (mqprio->mode) {
@@ -3369,47 +3426,6 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
}
}
-static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
-{
- int res;
-
- switch (htb->command) {
- case TC_HTB_CREATE:
- return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
- htb->extack);
- case TC_HTB_DESTROY:
- return mlx5e_htb_root_del(priv);
- case TC_HTB_LEAF_ALLOC_QUEUE:
- res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
- htb->rate, htb->ceil, htb->extack);
- if (res < 0)
- return res;
- htb->qid = res;
- return 0;
- case TC_HTB_LEAF_TO_INNER:
- return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
- htb->rate, htb->ceil, htb->extack);
- case TC_HTB_LEAF_DEL:
- return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack);
- case TC_HTB_LEAF_DEL_LAST:
- case TC_HTB_LEAF_DEL_LAST_FORCE:
- return mlx5e_htb_leaf_del_last(priv, htb->classid,
- htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
- htb->extack);
- case TC_HTB_NODE_MODIFY:
- return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
- htb->extack);
- case TC_HTB_LEAF_QUERY_QUEUE:
- res = mlx5e_get_txq_by_classid(priv, htb->classid);
- if (res < 0)
- return res;
- htb->qid = res;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -3443,7 +3459,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
return err;
case TC_SETUP_QDISC_HTB:
mutex_lock(&priv->state_lock);
- err = mlx5e_setup_tc_htb(priv, type_data);
+ err = mlx5e_htb_setup_tc(priv, type_data);
mutex_unlock(&priv->state_lock);
return err;
default:
@@ -3594,20 +3610,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
- if (enable && priv->xsk.refcnt) {
- netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
- priv->xsk.refcnt);
- err = -EINVAL;
- goto out;
- }
-
cur_params = &priv->channels.params;
- if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
- netdev_warn(netdev, "can't set LRO with legacy RQ\n");
- err = -EINVAL;
- goto out;
- }
-
new_params = *cur_params;
if (enable)
@@ -3676,6 +3679,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
@@ -3685,12 +3689,14 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
}
#endif
- if (!enable && priv->htb.maj_id) {
+ mutex_lock(&priv->state_lock);
+ if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
- return -EINVAL;
+ err = -EINVAL;
}
+ mutex_unlock(&priv->state_lock);
- return 0;
+ return err;
}
static int set_feature_rx_all(struct net_device *netdev, bool enable)
@@ -3772,20 +3778,45 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
+ priv->fs->vlan_strip_disable = !enable;
priv->channels.params.vlan_strip_disable = !enable;
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
- if (err)
+ if (err) {
+ priv->fs->vlan_strip_disable = enable;
priv->channels.params.vlan_strip_disable = enable;
-
+ }
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
+}
+
#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
@@ -3883,8 +3914,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- if (!priv->fs.vlan ||
- !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
+ if (!priv->fs->vlan ||
+ !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
@@ -3916,6 +3947,11 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
if (priv->xsk.refcnt) {
+ if (features & NETIF_F_LRO) {
+ netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
+ priv->xsk.refcnt);
+ features &= ~NETIF_F_LRO;
+ }
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
@@ -5002,6 +5038,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_flow_steering *fs;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -5009,11 +5046,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
- err = mlx5e_fs_init(priv);
- if (err) {
+ fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!fs) {
+ err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
return err;
}
+ priv->fs = fs;
err = mlx5e_ipsec_init(priv);
if (err)
@@ -5032,7 +5072,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_fs_cleanup(priv);
+ mlx5e_fs_cleanup(priv->fs);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -5110,6 +5150,23 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
priv->rx_res = NULL;
}
+static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
+{
+ struct mlx5e_params *params;
+ struct mlx5e_mqprio_rl *rl;
+
+ params = &priv->channels.params;
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
+ return;
+
+ rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
+ params->mqprio.channel.max_rate);
+ if (IS_ERR(rl))
+ rl = NULL;
+ priv->mqprio_rl = rl;
+ mlx5e_mqprio_rl_update_params(params, rl);
+}
+
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{
int err;
@@ -5120,8 +5177,17 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
return err;
}
+ err = mlx5e_accel_init_tx(priv);
+ if (err)
+ goto err_destroy_tises;
+
+ mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv);
return 0;
+
+err_destroy_tises:
+ mlx5e_destroy_tises(priv);
+ return err;
}
static void mlx5e_nic_enable(struct mlx5e_priv *priv)
@@ -5129,7 +5195,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, netdev);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
@@ -5214,7 +5280,9 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
- BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
+ BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_TC),
};
static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
@@ -5264,6 +5332,14 @@ int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
+ mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
}
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
+}
+
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
@@ -5293,7 +5369,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (err)
goto err_free_cpumask;
- hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
@@ -5350,14 +5425,9 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
- for (i = 0; i < priv->htb.max_qos_sqs; i++)
- kfree(priv->htb.qos_sq_stats[i]);
- kvfree(priv->htb.qos_sq_stats);
-
- if (priv->mqprio_rl) {
- mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
- mlx5e_mqprio_rl_free(priv->mqprio_rl);
- }
+ for (i = 0; i < priv->htb_max_qos_sqs; i++)
+ kfree(priv->htb_qos_sq_stats[i]);
+ kvfree(priv->htb_qos_sq_stats);
memset(priv, 0, sizeof(*priv));
}
@@ -5447,6 +5517,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
int err;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
@@ -5506,6 +5578,8 @@ err_cleanup_tx:
out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
}
@@ -5515,6 +5589,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (profile->disable)
profile->disable(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f797fd97d305..0c66774a1720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -229,7 +229,7 @@ mlx5e_rep_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
static int
@@ -696,6 +696,13 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
@@ -708,16 +715,26 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
mlx5e_vxlan_set_netdev_info(priv);
- return mlx5e_init_rep(mdev, netdev);
+ mlx5e_build_rep_params(netdev);
+ mlx5e_timestamp_init(priv);
+ return 0;
}
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
+ mlx5e_fs_cleanup(priv->fs);
mlx5e_ipsec_cleanup(priv);
}
@@ -728,8 +745,8 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
/* The inner_ttc in the ttc params is intentionally not set */
mlx5e_set_ttc_params(priv, &ttc_params, false);
@@ -738,9 +755,9 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc)) {
- err = PTR_ERR(priv->fs.ttc);
+ priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
+ if (IS_ERR(priv->fs->ttc)) {
+ err = PTR_ERR(priv->fs->ttc);
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
err);
return err;
@@ -760,7 +777,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
- rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
return 0;
}
@@ -836,10 +853,12 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
int err;
priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
- return -ENOMEM;
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
@@ -873,13 +892,15 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -888,7 +909,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index adf5cc6a7b8c..dec183ccd4ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -62,6 +62,7 @@ struct mlx5_tc_int_port_priv;
struct mlx5e_rep_bond;
struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
+struct mlx5e_flow_meters;
struct mlx5_rep_uplink_priv {
/* indirect block callbacks are invoked on bind/unbind events
@@ -97,6 +98,8 @@ struct mlx5_rep_uplink_priv {
/* OVS internal port support */
struct mlx5e_tc_int_port_priv *int_port_priv;
+
+ struct mlx5e_flow_meters *flow_meters;
};
struct mlx5e_rep_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 57fa0489eeb8..7409829d1201 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -474,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
int i;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
- stats = READ_ONCE(priv->htb.qos_sq_stats);
+ max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+ stats = READ_ONCE(priv->htb_qos_sq_stats);
for (i = 0; i < max_qos_sqs; i++) {
mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
@@ -688,7 +688,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
+ if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
return;
MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
@@ -2184,13 +2186,13 @@ static const struct counter_desc qos_sq_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
{
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
+ return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
{
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
int i, qid;
for (qid = 0; qid < max_qos_sqs; qid++)
@@ -2208,8 +2210,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
int i, qid;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
- stats = READ_ONCE(priv->htb.qos_sq_stats);
+ max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+ stats = READ_ONCE(priv->htb_qos_sq_stats);
for (qid = 0; qid < max_qos_sqs; qid++) {
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index e48b15b55b6f..ed4fc940e4ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats {
u64 err_cqe;
u64 abort;
u64 abort_abs_diff_ns;
+ u64 resync_cqe;
+ u64 resync_event;
};
struct mlx5e_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3a39a50146dd..f154bda668ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -59,6 +59,7 @@
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
#include "en/tc/act/act.h"
+#include "en/tc/post_meter.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -70,6 +71,30 @@
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
+struct mlx5e_tc_table {
+ /* Protects the dynamic assignment of the t parameter
+ * which is the nic tc root table.
+ */
+ struct mutex t_lock;
+ struct mlx5e_priv *priv;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_table *miss_t;
+ struct mlx5_fs_chains *chains;
+ struct mlx5e_post_act *post_act;
+
+ struct rhashtable ht;
+
+ struct mod_hdr_tbl mod_hdr;
+ struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+ struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
+
+ struct mlx5_tc_ct_priv *ct;
+ struct mapping_ctx *mapping;
+};
+
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
@@ -104,8 +129,27 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
.mlen = 16,
},
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
+ [PACKET_COLOR_TO_REG] = packet_color_to_reg,
};
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
+{
+ struct mlx5e_tc_table *tc;
+
+ tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
+ return tc ? tc : ERR_PTR(-ENOMEM);
+}
+
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
+{
+ kvfree(tc);
+}
+
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
+{
+ return tc->chains;
+}
+
/* To avoid false lock dependency warning set the tc_ht lock
* class different than the lock class of the ht being used when deleting
* last flow from a group and then deleting a group, we get into del_sw_flow_group()
@@ -240,6 +284,30 @@ mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
return NULL;
}
+struct mlx5e_flow_meters *
+mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_priv *priv;
+
+ if (is_mdev_switchdev_mode(dev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ priv = netdev_priv(uplink_rpriv->netdev);
+ if (!uplink_priv->flow_meters)
+ uplink_priv->flow_meters =
+ mlx5e_flow_meters_init(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ uplink_priv->post_act);
+ if (!IS_ERR(uplink_priv->flow_meters))
+ return uplink_priv->flow_meters;
+ }
+
+ return NULL;
+}
+
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
@@ -254,7 +322,7 @@ get_ct_priv(struct mlx5e_priv *priv)
return uplink_priv->ct_priv;
}
- return priv->fs.tc.ct;
+ return priv->fs->tc->ct;
}
static struct mlx5e_tc_psample *
@@ -288,7 +356,7 @@ get_post_action(struct mlx5e_priv *priv)
return uplink_priv->post_act;
}
- return priv->fs.tc.post_act;
+ return priv->fs->tc->post_act;
}
struct mlx5_flow_handle *
@@ -319,12 +387,62 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
+static bool
+is_flow_meter_action(struct mlx5_flow_attr *attr)
+{
+ return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
+}
+
+static int
+mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_post_act *post_act = get_post_action(priv);
+ struct mlx5e_post_meter_priv *post_meter;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
+ if (IS_ERR(meter)) {
+ mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
+ return PTR_ERR(meter);
+ }
+
+ ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
+ post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter,
+ meter->red_counter);
+ if (IS_ERR(post_meter)) {
+ mlx5_core_err(priv->mdev, "Failed to init post meter\n");
+ goto err_meter_init;
+ }
+
+ attr->meter_attr.meter = meter;
+ attr->meter_attr.post_meter = post_meter;
+ attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ return 0;
+
+err_meter_init:
+ mlx5e_tc_meter_put(meter);
+ return PTR_ERR(post_meter);
+}
+
+static void
+mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr)
+{
+ mlx5e_post_meter_cleanup(attr->meter_attr.post_meter);
+ mlx5e_tc_meter_put(attr->meter_attr.meter);
+}
+
struct mlx5_flow_handle *
mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int err;
if (attr->flags & MLX5_ATTR_FLAG_CT) {
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
@@ -341,6 +459,12 @@ mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
+ if (is_flow_meter_action(attr)) {
+ err = mlx5e_tc_add_flow_meter(priv, attr);
+ if (err)
+ return ERR_PTR(err);
+ }
+
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
@@ -367,6 +491,9 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
}
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+
+ if (attr->meter_attr.meter)
+ mlx5e_tc_del_flow_meter(attr);
}
int
@@ -484,7 +611,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
- &priv->fs.tc.mod_hdr;
+ &priv->fs->tc->mod_hdr;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
@@ -702,7 +829,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels,
- mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
+ mlx5_get_ttc_flow_table(priv->fs->ttc)->id);
return 0;
@@ -792,7 +919,7 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
- hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
@@ -807,10 +934,10 @@ static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
/* no more hairpin flows for us, release the hairpin pair */
- if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
@@ -894,10 +1021,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
if (err)
return err;
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
@@ -909,7 +1036,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
return -ENOMEM;
}
@@ -921,9 +1048,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
- hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
params.log_data_size = 16;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -999,10 +1126,10 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_fs_chains *nic_chains;
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
@@ -1011,6 +1138,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft;
int dest_ix = 0;
+ nic_chains = mlx5e_nic_chains(tc);
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = nic_attr->flow_tag;
@@ -1035,7 +1163,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
- dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan);
}
dest_ix++;
}
@@ -1063,7 +1191,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- rule = ERR_CAST(priv->fs.tc.t);
+ rule = ERR_CAST(priv->fs->tc->t);
goto err_ft_get;
}
}
@@ -1165,7 +1293,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc);
mlx5_del_flow_rules(rule);
@@ -1182,7 +1310,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *attr = flow->attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
flow_flag_clear(flow, OFFLOADED);
@@ -1194,13 +1322,13 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
- mutex_lock(&priv->fs.tc.t_lock);
+ mutex_lock(&priv->fs->tc->t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
- mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
- priv->fs.tc.t = NULL;
+ mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
+ priv->fs->tc->t = NULL;
}
- mutex_unlock(&priv->fs.tc.t_lock);
+ mutex_unlock(&priv->fs->tc->t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
@@ -3793,7 +3921,7 @@ static bool is_lag_dev(struct mlx5e_priv *priv,
static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
{
- if (mlx5e_eswitch_uplink_rep(out_dev) &&
+ if (same_hw_reps(priv, out_dev) &&
MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
return true;
@@ -3936,7 +4064,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
rpriv = priv->ppriv;
return &rpriv->tc_ht;
} else /* NIC offload */
- return &priv->fs.tc.ht;
+ return &priv->fs->tc->ht;
}
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
@@ -4519,9 +4647,9 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
return err;
}
-static int mlx5e_policer_validate(const struct flow_action *action,
- const struct flow_action_entry *act,
- struct netlink_ext_ack *extack)
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
{
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
@@ -4655,11 +4783,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
- hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
+ mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
+ hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
if (refcount_inc_not_zero(&hpe->refcnt))
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
@@ -4674,7 +4802,6 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct mlx5e_flow_steering *fs;
struct mlx5e_priv *peer_priv;
struct mlx5e_tc_table *tc;
struct mlx5e_priv *priv;
@@ -4685,8 +4812,7 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
- fs = container_of(tc, struct mlx5e_flow_steering, tc);
- priv = container_of(fs, struct mlx5e_priv, fs);
+ priv = tc->priv;
peer_priv = netdev_priv(ndev);
if (priv == peer_priv ||
!(priv->netdev->features & NETIF_F_HW_TC))
@@ -4715,7 +4841,7 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
- struct mlx5_flow_table **ft = &priv->fs.tc.miss_t;
+ struct mlx5_flow_table **ft = &priv->fs->tc->miss_t;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
@@ -4737,12 +4863,12 @@ static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_flow_table(priv->fs.tc.miss_t);
+ mlx5_destroy_flow_table(priv->fs->tc->miss_t);
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
@@ -4753,6 +4879,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
mutex_init(&tc->t_lock);
mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl);
+ tc->priv = priv;
err = rhashtable_init(&tc->ht, &tc_ht_params);
if (err)
@@ -4782,7 +4909,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = priv->fs.tc.miss_t;
+ attr.default_ft = priv->fs->tc->miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
@@ -4792,7 +4919,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
}
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
- tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
+ tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
@@ -4831,7 +4958,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
@@ -4955,6 +5082,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
+ mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
}
@@ -5035,7 +5163,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
int err;
@@ -5060,7 +5188,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
- zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
+ zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index e2a1250aeca1..6ce1ab6b86b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -39,6 +39,7 @@
#include "en/tc_ct.h"
#include "en/tc_tun.h"
#include "en/tc/int_port.h"
+#include "en/tc/meter.h"
#include "en_rep.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
@@ -71,6 +72,7 @@ struct mlx5_flow_attr {
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_ct_attr ct_attr;
struct mlx5e_sample_attr sample_attr;
+ struct mlx5e_meter_attr meter_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 chain;
u16 prio;
@@ -83,6 +85,7 @@ struct mlx5_flow_attr {
u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */
u32 flags;
+ u32 exe_aso_type;
struct list_head list;
struct mlx5e_post_act_handle *post_act_handle;
struct {
@@ -229,6 +232,7 @@ enum mlx5e_tc_attr_to_reg {
FTEID_TO_REG,
NIC_CHAIN_TO_REG,
NIC_ZONE_RESTORE_TO_REG,
+ PACKET_COLOR_TO_REG,
};
struct mlx5e_tc_attr_to_reg_mapping {
@@ -241,6 +245,10 @@ struct mlx5e_tc_attr_to_reg_mapping {
extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
+#define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset)
+#define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen)
+#define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0))
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
@@ -348,6 +356,8 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
#endif
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
@@ -368,6 +378,8 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
#else /* CONFIG_MLX5_CLS_ACT */
+static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
+static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{ return false; }
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 50d14cec4894..27f791feb517 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -152,14 +152,14 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
} else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ihs = skb_tcp_all_headers(skb);
if (ipv6_has_hopopt_jumbo(skb)) {
*hopbyhop = sizeof(struct hop_jumbo_hdr);
ihs -= sizeof(struct hop_jumbo_hdr);
@@ -341,6 +341,26 @@ static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
}
}
+static void mlx5e_tx_flush(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ /* Must not be called when a MPWQE session is active but empty. */
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wi = &sq->db.wqe_info[pi];
+
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
+
+ wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
const struct mlx5e_tx_attr *attr,
@@ -459,6 +479,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
}
static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
@@ -560,6 +581,13 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5e_xmit_data txd;
+ txd.data = skb->data;
+ txd.len = skb->len;
+
+ txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
+ goto err_unmap;
+
if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
mlx5e_tx_mpwqe_session_start(sq, eseg);
} else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
@@ -569,18 +597,9 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->xmit_more += xmit_more;
- txd.data = skb->data;
- txd.len = skb->len;
-
- txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
- goto err_unmap;
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
-
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
-
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
-
mlx5e_tx_skb_update_hwts_flags(skb);
if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
@@ -602,6 +621,7 @@ err_unmap:
mlx5e_dma_unmap_wqe_err(sq, 1);
sq->stats->dropped++;
dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
}
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
@@ -611,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq);
}
+static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
+ ptpsq->ts_cqe_ctr_mask);
+}
+
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
+ if (unlikely(sq->ptpsq))
+ mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1006,5 +1036,6 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 05e08cec5a8c..4fbff7bcc155 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021 Mellanox Technologies. */
+#include <linux/build_bug.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <net/netevent.h>
@@ -12,26 +13,57 @@
#define CREATE_TRACE_POINTS
#include "diag/bridge_tracepoint.h"
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE 12000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE 16000
#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
-
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE == 64000);
+
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE 16000
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE (32000 - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE == 64000);
#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
@@ -63,12 +95,14 @@ struct mlx5_esw_bridge {
struct mlx5_flow_table *egress_ft;
struct mlx5_flow_group *egress_vlan_fg;
+ struct mlx5_flow_group *egress_qinq_fg;
struct mlx5_flow_group *egress_mac_fg;
struct mlx5_flow_group *egress_miss_fg;
struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
struct mlx5_flow_handle *egress_miss_handle;
unsigned long ageing_time;
u32 flags;
+ u16 vlan_proto;
};
static void
@@ -138,7 +172,9 @@ mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
+mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -154,30 +190,53 @@ mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flo
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(ingress_ft, in);
kvfree(in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
+ vlan_proto, PTR_ERR(fg));
return fg;
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ingress_ft)
+mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
+ u16 vlan_proto, struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -193,27 +252,48 @@ mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
-
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
"Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
PTR_ERR(fg));
-
kvfree(in);
return fg;
}
static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -250,7 +330,9 @@ mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *egress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -265,13 +347,14 @@ mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
@@ -283,6 +366,25 @@ mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
}
static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *egress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
+}
+
+static struct mlx5_flow_group *
mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -346,7 +448,7 @@ mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
+ struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
struct mlx5_flow_table *ingress_ft, *skip_ft;
struct mlx5_eswitch *esw = br_offloads->esw;
int err;
@@ -374,10 +476,22 @@ mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
goto err_vlan_fg;
}
- filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft);
- if (IS_ERR(filter_fg)) {
- err = PTR_ERR(filter_fg);
- goto err_filter_fg;
+ vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
+ if (IS_ERR(vlan_filter_fg)) {
+ err = PTR_ERR(vlan_filter_fg);
+ goto err_vlan_filter_fg;
+ }
+
+ qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
+ if (IS_ERR(qinq_fg)) {
+ err = PTR_ERR(qinq_fg);
+ goto err_qinq_fg;
+ }
+
+ qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
+ if (IS_ERR(qinq_filter_fg)) {
+ err = PTR_ERR(qinq_filter_fg);
+ goto err_qinq_filter_fg;
}
mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
@@ -389,13 +503,19 @@ mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
br_offloads->ingress_ft = ingress_ft;
br_offloads->skip_ft = skip_ft;
br_offloads->ingress_vlan_fg = vlan_fg;
- br_offloads->ingress_filter_fg = filter_fg;
+ br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
+ br_offloads->ingress_qinq_fg = qinq_fg;
+ br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
br_offloads->ingress_mac_fg = mac_fg;
return 0;
err_mac_fg:
- mlx5_destroy_flow_group(filter_fg);
-err_filter_fg:
+ mlx5_destroy_flow_group(qinq_filter_fg);
+err_qinq_filter_fg:
+ mlx5_destroy_flow_group(qinq_fg);
+err_qinq_fg:
+ mlx5_destroy_flow_group(vlan_filter_fg);
+err_vlan_filter_fg:
mlx5_destroy_flow_group(vlan_fg);
err_vlan_fg:
mlx5_destroy_flow_table(skip_ft);
@@ -409,8 +529,12 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa
{
mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
br_offloads->ingress_mac_fg = NULL;
- mlx5_destroy_flow_group(br_offloads->ingress_filter_fg);
- br_offloads->ingress_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
+ br_offloads->ingress_qinq_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
+ br_offloads->ingress_qinq_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
+ br_offloads->ingress_vlan_filter_fg = NULL;
mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
br_offloads->ingress_vlan_fg = NULL;
mlx5_destroy_flow_table(br_offloads->skip_ft);
@@ -428,7 +552,7 @@ static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg;
+ struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
struct mlx5_flow_handle *miss_handle = NULL;
struct mlx5_eswitch *esw = br_offloads->esw;
@@ -447,6 +571,12 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
goto err_vlan_fg;
}
+ qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
+ if (IS_ERR(qinq_fg)) {
+ err = PTR_ERR(qinq_fg);
+ goto err_qinq_fg;
+ }
+
mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
@@ -491,6 +621,7 @@ skip_miss_flow:
bridge->egress_ft = egress_ft;
bridge->egress_vlan_fg = vlan_fg;
+ bridge->egress_qinq_fg = qinq_fg;
bridge->egress_mac_fg = mac_fg;
bridge->egress_miss_fg = miss_fg;
bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
@@ -498,6 +629,8 @@ skip_miss_flow:
return 0;
err_mac_fg:
+ mlx5_destroy_flow_group(qinq_fg);
+err_qinq_fg:
mlx5_destroy_flow_group(vlan_fg);
err_vlan_fg:
mlx5_destroy_flow_table(egress_ft);
@@ -515,6 +648,7 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
if (bridge->egress_miss_fg)
mlx5_destroy_flow_group(bridge->egress_miss_fg);
mlx5_destroy_flow_group(bridge->egress_mac_fg);
+ mlx5_destroy_flow_group(bridge->egress_qinq_fg);
mlx5_destroy_flow_group(bridge->egress_vlan_fg);
mlx5_destroy_flow_table(bridge->egress_ft);
}
@@ -559,10 +693,17 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
flow_act.pkt_reformat = vlan->pkt_reformat_push;
flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
} else if (vlan) {
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
@@ -645,10 +786,17 @@ mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *a
MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
@@ -696,10 +844,17 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
flow_act.pkt_reformat = vlan->pkt_reformat_pop;
}
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
@@ -774,6 +929,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
bridge->ifindex = ifindex;
bridge->refcnt = 1;
bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
+ bridge->vlan_proto = ETH_P_8021Q;
list_add(&bridge->list, &br_offloads->bridges);
return bridge;
@@ -911,12 +1067,13 @@ mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
}
static int
-mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_eswitch *esw)
{
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
- } vlan_hdr = { htons(ETH_P_8021Q), htons(vlan->vid) };
+ } vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_pkt_reformat *pkt_reformat;
@@ -1008,36 +1165,58 @@ mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct
vlan->pkt_mod_hdr_push_mark = NULL;
}
-static struct mlx5_esw_bridge_vlan *
-mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
- struct mlx5_eswitch *esw)
+static int
+mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_eswitch *esw)
{
- struct mlx5_esw_bridge_vlan *vlan;
int err;
- vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
- return ERR_PTR(-ENOMEM);
-
- vlan->vid = vid;
- vlan->flags = flags;
- INIT_LIST_HEAD(&vlan->fdb_list);
-
if (flags & BRIDGE_VLAN_INFO_PVID) {
- err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
+ err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
if (err)
- goto err_vlan_push;
+ return err;
err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
if (err)
goto err_vlan_push_mark;
}
+
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
if (err)
goto err_vlan_pop;
}
+ return 0;
+
+err_vlan_pop:
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
+err_vlan_push_mark:
+ if (vlan->pkt_reformat_push)
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
+ return err;
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ int err;
+
+ vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+ INIT_LIST_HEAD(&vlan->fdb_list);
+
+ err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, vlan, esw);
+ if (err)
+ goto err_vlan_push_pop;
+
err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
if (err)
goto err_xa_insert;
@@ -1048,13 +1227,11 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
err_xa_insert:
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
-err_vlan_pop:
if (vlan->pkt_mod_hdr_push_mark)
mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
-err_vlan_push_mark:
if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
-err_vlan_push:
+err_vlan_push_pop:
kvfree(vlan);
return ERR_PTR(err);
}
@@ -1102,6 +1279,50 @@ static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
}
+static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_vlan *vlan;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&port->vlans, i, vlan) {
+ mlx5_esw_bridge_vlan_flush(vlan, bridge);
+ err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, vlan,
+ br_offloads->esw);
+ if (err) {
+ esw_warn(br_offloads->esw->dev,
+ "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
+ vlan->vid, bridge->vlan_proto, port->vport_num,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_port *port;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&br_offloads->ports, i, port) {
+ if (port->bridge != bridge)
+ continue;
+
+ err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
@@ -1287,6 +1508,32 @@ int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, boo
return 0;
}
+int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
+ struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge *bridge;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id,
+ br_offloads);
+ if (!port)
+ return -EINVAL;
+
+ bridge = port->bridge;
+ if (bridge->vlan_proto == proto)
+ return 0;
+ if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
+ esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
+ return -EOPNOTSUPP;
+ }
+
+ mlx5_esw_bridge_fdb_flush(bridge);
+ bridge->vlan_proto = proto;
+ mlx5_esw_bridge_vlans_recreate(bridge);
+
+ return 0;
+}
+
static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
@@ -1434,7 +1681,8 @@ int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
}
- vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, br_offloads->esw);
+ vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
+ br_offloads->esw);
if (IS_ERR(vlan)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
return PTR_ERR(vlan);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
index efc39975226e..10851a515bca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -26,7 +26,9 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_flow_table *ingress_ft;
struct mlx5_flow_group *ingress_vlan_fg;
- struct mlx5_flow_group *ingress_filter_fg;
+ struct mlx5_flow_group *ingress_vlan_filter_fg;
+ struct mlx5_flow_group *ingress_qinq_fg;
+ struct mlx5_flow_group *ingress_qinq_filter_fg;
struct mlx5_flow_group *ingress_mac_fg;
struct mlx5_flow_table *skip_ft;
@@ -60,6 +62,8 @@ int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsign
struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
+ struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
new file mode 100644
index 000000000000..2db13c71e88c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include "eswitch.h"
+
+enum vnic_diag_counter {
+ MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE,
+ MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW,
+ MLX5_VNIC_DIAG_COMP_EQ_OVERRUN,
+ MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN,
+ MLX5_VNIC_DIAG_CQ_OVERRUN,
+ MLX5_VNIC_DIAG_INVALID_COMMAND,
+ MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
+};
+
+static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
+ u32 *val)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
+ struct mlx5_core_dev *dev = vport->dev;
+ u16 vport_num = vport->vport;
+ void *vnic_diag_out;
+ int err;
+
+ MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
+ MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
+ if (!mlx5_esw_is_manager_vport(dev->priv.eswitch, vport_num))
+ MLX5_SET(query_vnic_env_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ vnic_diag_out = MLX5_ADDR_OF(query_vnic_env_out, out, vport_env);
+ switch (counter) {
+ case MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, total_error_queues);
+ break;
+ case MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out,
+ send_queue_priority_update_flow);
+ break;
+ case MLX5_VNIC_DIAG_COMP_EQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, comp_eq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, async_eq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_CQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, cq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_INVALID_COMMAND:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, invalid_command);
+ break;
+ case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
+ break;
+ }
+
+ return 0;
+}
+
+static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
+ enum vnic_diag_counter type)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = mlx5_esw_query_vnic_diag(vport, type, &val);
+ if (ret)
+ return ret;
+
+ seq_printf(file, "%d\n", val);
+ return 0;
+}
+
+static int total_q_under_processor_handle_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE);
+}
+
+static int send_queue_priority_update_flow_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private,
+ MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW);
+}
+
+static int comp_eq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_COMP_EQ_OVERRUN);
+}
+
+static int async_eq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN);
+}
+
+static int cq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_CQ_OVERRUN);
+}
+
+static int invalid_command_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_INVALID_COMMAND);
+}
+
+static int quota_exceeded_command_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
+}
+
+DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
+DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
+DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
+DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
+DEFINE_SHOW_ATTRIBUTE(cq_overrun);
+DEFINE_SHOW_ATTRIBUTE(invalid_command);
+DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
+
+void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ debugfs_remove_recursive(vport->dbgfs);
+ vport->dbgfs = NULL;
+}
+
+/* vnic diag dir name is "pf", "ecpf" or "{vf/sf}_xxxx" */
+#define VNIC_DIAG_DIR_NAME_MAX_LEN 8
+
+void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+ struct dentry *vnic_diag;
+ char dir_name[VNIC_DIAG_DIR_NAME_MAX_LEN];
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ return;
+
+ if (vport_num == MLX5_VPORT_PF) {
+ strcpy(dir_name, "pf");
+ } else if (vport_num == MLX5_VPORT_ECPF) {
+ strcpy(dir_name, "ecpf");
+ } else {
+ err = snprintf(dir_name, VNIC_DIAG_DIR_NAME_MAX_LEN, "%s_%d", is_sf ? "sf" : "vf",
+ is_sf ? sf_num : vport_num - MLX5_VPORT_FIRST_VF);
+ if (WARN_ON(err < 0))
+ return;
+ }
+
+ vport->dbgfs = debugfs_create_dir(dir_name, esw->dbgfs);
+ vnic_diag = debugfs_create_dir("vnic_diag", vport->dbgfs);
+
+ if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) {
+ debugfs_create_file("total_q_under_processor_handle", 0444, vnic_diag, vport,
+ &total_q_under_processor_handle_fops);
+ debugfs_create_file("send_queue_priority_update_flow", 0444, vnic_diag, vport,
+ &send_queue_priority_update_flow_fops);
+ }
+
+ if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) {
+ debugfs_create_file("comp_eq_overrun", 0444, vnic_diag, vport,
+ &comp_eq_overrun_fops);
+ debugfs_create_file("async_eq_overrun", 0444, vnic_diag, vport,
+ &async_eq_overrun_fops);
+ }
+
+ if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun))
+ debugfs_create_file("cq_overrun", 0444, vnic_diag, vport, &cq_overrun_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, invalid_command_count))
+ debugfs_create_file("invalid_command", 0444, vnic_diag, vport,
+ &invalid_command_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
+ debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
+ &quota_exceeded_command_fops);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 7f9b96d9537e..9bc7be95db54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -87,11 +87,11 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devlink_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register(devlink, dl_port, dl_port_index);
if (err)
goto reg_err;
- err = devlink_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport);
if (err)
goto rate_err;
@@ -99,7 +99,7 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
return 0;
rate_err:
- devlink_port_unregister(dl_port);
+ devl_port_unregister(dl_port);
reg_err:
mlx5_esw_dl_port_free(dl_port);
return err;
@@ -118,10 +118,10 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devlink_rate_leaf_destroy(vport->dl_port);
+ devl_rate_leaf_destroy(vport->dl_port);
}
- devlink_port_unregister(vport->dl_port);
+ devl_port_unregister(vport->dl_port);
mlx5_esw_dl_port_free(vport->dl_port);
vport->dl_port = NULL;
}
@@ -156,11 +156,11 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devlink_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register(devlink, dl_port, dl_port_index);
if (err)
return err;
- err = devlink_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport);
if (err)
goto rate_err;
@@ -168,7 +168,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
return 0;
rate_err:
- devlink_port_unregister(dl_port);
+ devl_port_unregister(dl_port);
return err;
}
@@ -182,9 +182,9 @@ void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devlink_rate_leaf_destroy(vport->dl_port);
+ devl_rate_leaf_destroy(vport->dl_port);
}
- devlink_port_unregister(vport->dl_port);
+ devl_port_unregister(vport->dl_port);
vport->dl_port = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 9d17206d1625..fabe49a35a5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -11,6 +11,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "fs_core.h"
+#include "fs_ft_pool.h"
#include "esw/qos.h"
enum {
@@ -95,8 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- ft_attr.max_fte = table_size;
+ ft_attr.max_fte = POOL_NEXT_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
@@ -105,6 +105,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
goto out;
}
esw->fdb_table.legacy.fdb = fdb;
+ table_size = fdb->max_fte;
/* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 719ef26d23c0..6aa58044b949 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
+#include <linux/debugfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "esw/qos.h"
@@ -1002,6 +1003,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
if (err)
return err;
+ mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0);
err = esw_offloads_load_rep(esw, vport_num);
if (err)
goto err_rep;
@@ -1009,6 +1011,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
return err;
err_rep:
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
return err;
}
@@ -1016,6 +1019,7 @@ err_rep:
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
@@ -1152,8 +1156,6 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
{
const u32 *out;
- WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
-
if (num_vfs < 0)
return;
@@ -1186,6 +1188,9 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
int total_vports;
int err;
+ if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
+ return 0;
+
total_vports = mlx5_eswitch_get_total_vports(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
@@ -1203,6 +1208,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
} else {
esw_warn(dev, "ingress ACL is not supported by FW\n");
}
+ esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
return 0;
err:
@@ -1215,6 +1221,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
+ esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
mlx5_fs_ingress_acls_cleanup(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
@@ -1224,7 +1231,6 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
- * @mode: Eswitch mode to enable
* @num_vfs: Enable eswitch for given number of VFs. This is optional.
* Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
* Caller should pass num_vfs > 0 when enabling eswitch for
@@ -1238,7 +1244,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
* mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
* It returns 0 on success or error code on failure.
*/
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
@@ -1257,9 +1263,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
- esw->mode = mode;
-
- if (mode == MLX5_ESWITCH_LEGACY) {
+ if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
mlx5_rescan_drivers(esw->dev);
@@ -1269,22 +1273,19 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
if (err)
goto abort;
+ esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
+
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
- mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- mlx5_esw_mode_change_notify(esw, mode);
+ mlx5_esw_mode_change_notify(esw, esw->mode);
return 0;
abort:
- esw->mode = MLX5_ESWITCH_NONE;
-
- if (mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
-
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1305,14 +1306,16 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (!mlx5_esw_allowed(esw))
return 0;
- toggle_lag = esw->mode == MLX5_ESWITCH_NONE;
+ devl_assert_locked(priv_to_devlink(esw->dev));
+
+ toggle_lag = !mlx5_esw_is_fdb_created(esw);
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- if (esw->mode == MLX5_ESWITCH_NONE) {
- ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
+ if (!mlx5_esw_is_fdb_created(esw)) {
+ ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
enum mlx5_eswitch_vport_event vport_events;
@@ -1330,55 +1333,81 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
return ret;
}
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
+/* When disabling sriov, free driver level resources. */
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
{
- struct devlink *devlink = priv_to_devlink(esw->dev);
- int old_mode;
-
- lockdep_assert_held_write(&esw->mode_lock);
-
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_allowed(esw))
return;
- esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ devl_assert_locked(priv_to_devlink(esw->dev));
+ down_write(&esw->mode_lock);
+ /* If driver is unloaded, this function is called twice by remove_one()
+ * and mlx5_unload(). Prevent the second call.
+ */
+ if (!esw->esw_funcs.num_vfs && !clear_vf)
+ goto unlock;
+
+ esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- /* Notify eswitch users that it is exiting from current mode.
- * So that it can do necessary cleanup before the eswitch is disabled.
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
+ /* If disabling sriov in switchdev mode, free meta rules here
+ * because it depends on num_vfs.
*/
- mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
+ struct devlink *devlink = priv_to_devlink(esw->dev);
- mlx5_eswitch_event_handlers_unregister(esw);
+ esw_offloads_del_send_to_vport_meta_rules(esw);
+ devl_rate_nodes_destroy(devlink);
+ }
- if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_legacy_disable(esw);
- else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_disable(esw);
+ esw->esw_funcs.num_vfs = 0;
- old_mode = esw->mode;
- esw->mode = MLX5_ESWITCH_NONE;
+unlock:
+ up_write(&esw->mode_lock);
+}
- if (old_mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
+/* Free resources for corresponding eswitch mode. It is called by devlink
+ * when changing eswitch mode or modprobe when unloading driver.
+ */
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
+{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
+
+ /* Notify eswitch users that it is exiting from current mode.
+ * So that it can do necessary cleanup before the eswitch is disabled.
+ */
+ mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
- devlink_rate_nodes_destroy(devlink);
+ mlx5_eswitch_event_handlers_unregister(esw);
+ esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
+
+ esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ esw_offloads_disable(esw);
+ else if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_legacy_disable(esw);
mlx5_esw_acls_ns_cleanup(esw);
- if (clear_vf)
- mlx5_eswitch_clear_vf_vports_info(esw);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ devl_rate_nodes_destroy(devlink);
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_allowed(esw))
return;
+ devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- mlx5_eswitch_disable_locked(esw, clear_vf);
- esw->esw_funcs.num_vfs = 0;
+ mlx5_eswitch_disable_locked(esw);
up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
@@ -1573,7 +1602,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
- esw->mode = MLX5_ESWITCH_NONE;
+ esw->mode = MLX5_ESWITCH_LEGACY;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
@@ -1587,6 +1616,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
+ esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev));
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
esw->total_vports,
@@ -1610,6 +1640,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
+ debugfs_remove_recursive(esw->dbgfs);
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
@@ -1875,7 +1906,7 @@ u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
- return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
+ return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
@@ -1995,8 +2026,6 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
*/
void mlx5_esw_unlock(struct mlx5_eswitch *esw)
{
- if (!mlx5_esw_allowed(esw))
- return;
up_write(&esw->mode_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2754a732914d..87ce5a208cb5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -191,6 +191,7 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
+ struct dentry *dbgfs;
};
struct mlx5_esw_indir_table;
@@ -282,10 +283,15 @@ struct mlx5_esw_functions {
enum {
MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
+ MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
};
struct mlx5_esw_bridge_offloads;
+enum {
+ MLX5_ESW_FDB_CREATED = BIT(0),
+};
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -331,12 +337,14 @@ struct mlx5_eswitch {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
+ struct dentry *dbgfs;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw);
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
@@ -350,10 +358,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -575,6 +584,11 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
return dl_port_index & 0xffff;
}
+static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
+}
+
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
@@ -672,6 +686,9 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num);
+void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num);
+
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
@@ -719,7 +736,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
-static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
+static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
+static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 2ce3728576d1..ed73132129aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -230,10 +230,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
}
static void
-esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
- struct mlx5_flow_act *flow_act,
- struct mlx5_fs_chains *chains,
- int i)
+esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains, int i)
{
if (mlx5_chains_ignore_flow_level_supported(chains))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
@@ -241,6 +239,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
}
+static void
+esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, int i)
+{
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = esw->fdb_table.offloads.slow_fdb;
+}
+
static int
esw_setup_chain_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@@ -475,8 +483,11 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
- } else if (mlx5e_tc_attr_flags_skip(attr->flags)) {
- esw_setup_slow_path_dest(dest, flow_act, chains, *i);
+ } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
+ esw_setup_slow_path_dest(dest, flow_act, esw, *i);
+ (*i)++;
+ } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
+ esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->dest_chain) {
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
@@ -512,6 +523,20 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
}
}
+static void
+esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = attr->meter_attr.meter;
+ flow_act->exe_aso.type = attr->exe_aso_type;
+ flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
+ flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
+ /* use metadata reg 5 for packet color */
+ flow_act->exe_aso.return_reg_id = 5;
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -579,6 +604,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
+ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
+ esw_setup_meter(attr, &flow_act);
+
if (split) {
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
@@ -1040,6 +1069,15 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
mlx5_del_flow_rules(flows[i]);
kvfree(flows);
+ /* If changing eswitch mode from switchdev to legacy, but num_vfs is not 0,
+ * meta rules could be freed again. So set it to NULL.
+ */
+ esw->fdb_table.offloads.send_to_vport_meta_rules = NULL;
+}
+
+void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
}
static int
@@ -2034,7 +2072,7 @@ static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
@@ -2170,18 +2208,18 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- esw->dev->priv.sriov.num_vfs);
+ esw->mode = MLX5_ESWITCH_OFFLOADS;
+ err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
}
+ mlx5_rescan_drivers(esw->dev);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
@@ -2894,7 +2932,7 @@ int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
int err = 0;
down_write(&esw->mode_lock);
- if (esw->mode != MLX5_ESWITCH_NONE) {
+ if (mlx5_esw_is_fdb_created(esw)) {
err = -EBUSY;
goto done;
}
@@ -3055,6 +3093,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
static void
esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
{
+ struct devlink *devlink;
bool host_pf_disabled;
u16 new_num_vfs;
@@ -3066,6 +3105,8 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
return;
+ devlink = priv_to_devlink(esw->dev);
+ devl_lock(devlink);
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
@@ -3078,6 +3119,7 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
return;
}
esw->esw_funcs.num_vfs = new_num_vfs;
+ devl_unlock(devlink);
}
static void esw_functions_changed_event_handler(struct work_struct *work)
@@ -3229,13 +3271,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_OFFLOADS;
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
@@ -3334,36 +3375,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
-{
- /* devlink commands in NONE eswitch mode are currently supported only
- * on ECPF.
- */
- return (esw->mode == MLX5_ESWITCH_NONE &&
- !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
-}
-
-/* FIXME: devl_unlock() followed by devl_lock() inside driver callback
- * is never correct and prone to races. It's a transitional workaround,
- * never repeat this pattern.
- *
- * This code MUST be fixed before removing devlink_mutex as it is safe
- * to do only because of that mutex.
- */
-static void mlx5_eswtich_mode_callback_enter(struct devlink *devlink,
- struct mlx5_eswitch *esw)
-{
- devl_unlock(devlink);
- down_write(&esw->mode_lock);
-}
-
-static void mlx5_eswtich_mode_callback_exit(struct devlink *devlink,
- struct mlx5_eswitch *esw)
-{
- up_write(&esw->mode_lock);
- devl_lock(devlink);
-}
-
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3378,15 +3389,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- /* FIXME: devl_unlock() followed by devl_lock() inside driver callback
- * is never correct and prone to races. It's a transitional workaround,
- * never repeat this pattern.
- *
- * This code MUST be fixed before removing devlink_mutex as it is safe
- * to do only because of that mutex.
- */
- devl_unlock(devlink);
-
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
@@ -3399,6 +3401,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
+ mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -3409,6 +3412,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
err = esw_offloads_stop(esw, extack);
+ mlx5_rescan_drivers(esw->dev);
} else {
err = -EINVAL;
}
@@ -3417,7 +3421,6 @@ unlock:
mlx5_esw_unlock(esw);
enable_lag:
mlx5_lag_enable_change(esw->dev);
- devl_lock(devlink);
return err;
}
@@ -3430,14 +3433,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
+ down_write(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode);
-unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3484,10 +3482,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto out;
+ down_write(&esw->mode_lock);
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -3521,11 +3516,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
goto out;
esw->offloads.inline_mode = mlx5_mode;
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return 0;
out:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3538,14 +3533,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
+ down_write(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
-unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3555,16 +3545,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw;
- int err;
+ int err = 0;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
+ down_write(&esw->mode_lock);
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
@@ -3607,7 +3594,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
}
unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3615,21 +3602,15 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
+ down_write(&esw->mode_lock);
*encap = esw->offloads.encap;
-unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
- return err;
+ up_write(&esw->mode_lock);
+ return 0;
}
static bool
@@ -3752,12 +3733,14 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
if (err)
goto devlink_err;
+ mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum);
err = mlx5_esw_offloads_rep_load(esw, vport_num);
if (err)
goto rep_err;
return 0;
rep_err:
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
devlink_err:
mlx5_esw_vport_disable(esw, vport_num);
@@ -3767,6 +3750,7 @@ devlink_err:
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
{
mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 2ccf7bef9b05..e735e19461ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -50,10 +50,12 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
- ft->max_fte = size ? roundup_pow_of_two(size) : 1;
+ int max_fte = ft_attr->max_fte;
+
+ ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1;
return 0;
}
@@ -258,7 +260,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
@@ -267,17 +269,19 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
+ unsigned int size;
int err;
- if (size != POOL_NEXT_SIZE)
- size = roundup_pow_of_two(size);
- size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
+ if (ft_attr->max_fte != POOL_NEXT_SIZE)
+ size = roundup_pow_of_two(ft_attr->max_fte);
+ size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
if (!size)
return -ENOSPC;
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
@@ -479,6 +483,30 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
return 0;
}
+
+static void
+mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
+{
+ void *exe_aso_ctrl;
+ void *execute_aso;
+
+ execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context,
+ execute_aso[0]);
+ MLX5_SET(execute_aso, execute_aso, valid, 1);
+ MLX5_SET(execute_aso, execute_aso, aso_object_id,
+ fte->action.exe_aso.object_id);
+
+ exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
+ fte->action.exe_aso.return_reg_id);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
+ fte->action.exe_aso.type);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
+ fte->action.exe_aso.flow_meter.init_color);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
+ fte->action.exe_aso.flow_meter.meter_idx);
+}
+
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5_flow_table *ft,
@@ -663,6 +691,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
+ mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
+ } else {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+ }
+
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err_out:
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 274004e80f03..8ef4254b9ea1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -38,7 +38,7 @@
struct mlx5_flow_cmds {
int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft);
int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 21e5c709b2d3..e3960cdf5131 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1155,7 +1155,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
ft->ns = ns;
- err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
+ err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
if (err)
goto free_ft;
@@ -1195,6 +1195,12 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
}
EXPORT_SYMBOL(mlx5_create_flow_table);
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
+{
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_flow_table_id);
+
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr, u16 vport)
@@ -2895,6 +2901,14 @@ static int create_fdb_bypass(struct mlx5_flow_steering *steering)
return 0;
}
+static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
+}
+
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *maj_prio;
@@ -2945,10 +2959,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
return 0;
out_err:
- cleanup_root_ns(steering->fdb_root_ns);
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
- steering->fdb_root_ns = NULL;
+ cleanup_fdb_root_ns(steering);
return err;
}
@@ -3108,10 +3119,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->fdb_root_ns);
- steering->fdb_root_ns = NULL;
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
+ cleanup_fdb_root_ns(steering);
cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index cfb8bedba512..079fa44ada71 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -289,6 +289,10 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
sw_owner_id[i]);
}
+ if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) &&
+ dev->priv.sw_vhca_id > 0)
+ MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id);
+
return mlx5_cmd_exec_in(dev, init_hca, in);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 052af4901c0b..e8896f368362 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -149,6 +149,9 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
+ mlx5_unload_one(dev);
+ if (mlx5_health_wait_pci_up(dev))
+ mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
mlx5_load_one(dev, false);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
@@ -183,15 +186,9 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
reset_reload_work);
struct mlx5_core_dev *dev = fw_reset->dev;
- int err;
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
- err = mlx5_health_wait_pci_up(dev);
- if (err)
- mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
- fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
}
@@ -395,7 +392,6 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
}
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
done:
fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 659021c31cbd..2cf2c9948446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -666,16 +666,20 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
struct mlx5_fw_reporter_ctx fw_reporter_ctx;
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
+ struct devlink *devlink;
struct mlx5_priv *priv;
health = container_of(work, struct mlx5_core_health, fatal_report_work);
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
+ devlink = priv_to_devlink(dev);
enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+ devl_lock(devlink);
if (mlx5_health_try_recover(dev))
mlx5_core_err(dev, "health recovery failed\n");
+ devl_unlock(devlink);
return;
}
fw_reporter_ctx.err_synd = health->synd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 8da73ef5680f..ac3757beaea2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -83,7 +83,7 @@ static void mlx5i_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
static int mlx5i_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 0a99a020a3b2..c02b7b08fb4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -322,10 +322,10 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs.ns)
+ if (!priv->fs->ns)
return -EINVAL;
err = mlx5e_arfs_create_tables(priv);
@@ -364,9 +364,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
+ }
+
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
mlx5e_create_q_counters(priv);
@@ -397,6 +406,8 @@ err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -408,6 +419,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+ mlx5e_fs_cleanup(priv->fs);
}
/* The stats groups order is opposite to the update_stats() order calls */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index 15e41dc84d53..b8feaf0f5c4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -44,7 +44,7 @@ static int port_sel_mode_show(struct seq_file *file, void *priv)
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
- mode = mlx5_get_str_port_sel_mode(ldev);
+ mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags);
else
ret = -EINVAL;
mutex_unlock(&ldev->lock);
@@ -72,6 +72,7 @@ static int state_show(struct seq_file *file, void *priv)
static int flags_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
+ bool fdb_sel_mode_native;
struct mlx5_lag *ldev;
bool shared_fdb;
bool lag_active;
@@ -79,14 +80,21 @@ static int flags_show(struct seq_file *file, void *priv)
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
- if (lag_active)
- shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+ if (!lag_active)
+ goto unlock;
+
+ shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+ fdb_sel_mode_native = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
+ &ldev->mode_flags);
+unlock:
mutex_unlock(&ldev->lock);
if (!lag_active)
return -EINVAL;
seq_printf(file, "%s:%s\n", "shared_fdb", shared_fdb ? "on" : "off");
+ seq_printf(file, "%s:%s\n", "fdb_selection_mode",
+ fdb_sel_mode_native ? "native" : "affinity");
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 2a8fc547eb37..0f34e3c80d1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -68,14 +68,15 @@ static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
unsigned long flags)
{
- bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
+ bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
+ &flags);
int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx;
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
- MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
+ MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) {
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
@@ -471,8 +472,13 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
*flags = 0;
- if (shared_fdb)
+ if (shared_fdb) {
set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
+ set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
+ }
+
+ if (mode == MLX5_LAG_MODE_MPESW)
+ set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
if (roce_lag)
return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
@@ -481,9 +487,9 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
return 0;
}
-char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev)
+char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
{
- int port_sel_mode = get_port_sel_mode(ldev->mode, ldev->mode_flags);
+ int port_sel_mode = get_port_sel_mode(mode, flags);
switch (port_sel_mode) {
case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity";
@@ -507,7 +513,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
if (tracker)
mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
- shared_fdb, mlx5_get_str_port_sel_mode(ldev));
+ shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
if (err) {
@@ -632,6 +638,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
+ struct mlx5_core_dev *dev;
u8 mode;
#endif
int i;
@@ -641,11 +648,11 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
return false;
#ifdef CONFIG_MLX5_ESWITCH
- mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev);
-
- if (mode != MLX5_ESWITCH_NONE && mode != MLX5_ESWITCH_OFFLOADS)
+ dev = ldev->pf[MLX5_LAG_P1].dev;
+ if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev))
return false;
+ mode = mlx5_eswitch_mode(dev);
for (i = 0; i < ldev->ports; i++)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
@@ -760,8 +767,7 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
#ifdef CONFIG_MLX5_ESWITCH
for (i = 0; i < ldev->ports; i++)
- roce_lag = roce_lag &&
- ldev->pf[i].dev->priv.eswitch->mode == MLX5_ESWITCH_NONE;
+ roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
return roce_lag;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index c81b173156d2..ce2ce8ccbd70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -24,6 +24,7 @@ enum {
enum {
MLX5_LAG_MODE_FLAG_HASH_BASED,
MLX5_LAG_MODE_FLAG_SHARED_FDB,
+ MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
};
enum mlx5_lag_mode {
@@ -114,7 +115,7 @@ bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);
-char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev);
+char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
u8 *ports, int *num_enabled);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index ee4b25a50315..f643202b29c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -41,7 +41,6 @@ void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev = dev->priv.lag;
- bool shared_fdb;
int err = 0;
if (!ldev)
@@ -55,8 +54,8 @@ int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
err = -EINVAL;
goto out;
}
- shared_fdb = mlx5_shared_fdb_supported(ldev);
- err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, shared_fdb);
+
+ err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
if (err)
mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
new file mode 100644
index 000000000000..21e14507ff5c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/transobj.h>
+#include "aso.h"
+#include "wq.h"
+
+struct mlx5_aso_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5_aso {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
+ struct mlx5_aso_cq cq;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+} ____cacheline_aligned_in_smp;
+
+static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
+ void *cqc_data, struct mlx5_aso_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_vector2eqn(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
+ struct mlx5_aso_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, log_cq_size, 1);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = create_aso_cq(cq, cqc_data);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
+ goto err_free_cq;
+ }
+
+ kvfree(cqc_data);
+ return 0;
+
+err_free_cq:
+ mlx5_aso_free_cq(cq);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ int err;
+
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ return 0;
+}
+
+static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ int err;
+
+ err = create_aso_sq(mdev, pdn, sqc_data, sq);
+ if (err)
+ return err;
+
+ err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+
+ return err;
+}
+
+static void mlx5_aso_free_sq(struct mlx5_aso *sq)
+{
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_aso_free_sq(sq);
+}
+
+static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
+ u32 pdn, struct mlx5_aso *sq)
+{
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, pdn);
+ MLX5_SET(wq, wq, log_wq_sz, 1);
+
+ err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
+ goto err_free_asosq;
+ }
+
+ mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_free_asosq:
+ mlx5_aso_free_sq(sq);
+err_out:
+ kvfree(sqc_data);
+ return err;
+}
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ struct mlx5_aso *aso;
+ int err;
+
+ aso = kzalloc(sizeof(*aso), GFP_KERNEL);
+ if (!aso)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
+ if (err)
+ goto err_cq;
+
+ err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
+ if (err)
+ goto err_sq;
+
+ return aso;
+
+err_sq:
+ mlx5_aso_destroy_cq(&aso->cq);
+err_cq:
+ kfree(aso);
+ return ERR_PTR(err);
+}
+
+void mlx5_aso_destroy(struct mlx5_aso *aso)
+{
+ if (IS_ERR_OR_NULL(aso))
+ return;
+
+ mlx5_aso_destroy_sq(aso);
+ mlx5_aso_destroy_cq(&aso->cq);
+ kfree(aso);
+}
+
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
+ (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_ACCESS_ASO);
+ cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->general_id = cpu_to_be32(obj_id);
+}
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
+{
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
+ return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+}
+
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg)
+{
+ doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ if (with_data)
+ aso->pc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->pc += MLX5_ASO_WQEBBS;
+ *aso->wq.db = cpu_to_be32(aso->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
+{
+ struct mlx5_aso_cq *cq = &aso->cq;
+ struct mlx5_cqe64 *cqe;
+ unsigned long expires;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+
+ expires = jiffies + msecs_to_jiffies(interval_ms);
+ while (!cqe && time_is_after_jiffies(expires)) {
+ usleep_range(2, 10);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ }
+
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe;
+
+ mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+
+ err_cqe = (struct mlx5_err_cqe *)cqe;
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
+ err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n",
+ err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (with_data)
+ aso->cc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->cc += MLX5_ASO_WQEBBS;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
new file mode 100644
index 000000000000..b3bbf284fe71
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_ASO_H__
+#define __MLX5_LIB_ASO_H__
+
+#include <linux/mlx5/qp.h>
+#include "mlx5_core.h"
+
+#define MLX5_ASO_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
+#define MLX5_ASO_WQEBBS_DATA \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
+#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
+
+struct mlx5_wqe_aso_ctrl_seg {
+ __be32 va_h;
+ __be32 va_l; /* include read_enable */
+ __be32 l_key;
+ u8 data_mask_mode;
+ u8 condition_1_0_operand;
+ u8 condition_1_0_offset;
+ u8 data_offset_condition_operand;
+ __be32 condition_0_data;
+ __be32 condition_0_mask;
+ __be32 condition_1_data;
+ __be32 condition_1_mask;
+ __be64 bitwise_data;
+ __be64 data_mask;
+};
+
+struct mlx5_wqe_aso_data_seg {
+ __be32 bytewise_data[16];
+};
+
+struct mlx5_aso_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+};
+
+struct mlx5_aso_wqe_data {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+ struct mlx5_wqe_aso_data_seg aso_data;
+};
+
+enum {
+ MLX5_ASO_LOGICAL_AND,
+ MLX5_ASO_LOGICAL_OR,
+};
+
+enum {
+ MLX5_ASO_ALWAYS_FALSE,
+ MLX5_ASO_ALWAYS_TRUE,
+ MLX5_ASO_EQUAL,
+ MLX5_ASO_NOT_EQUAL,
+ MLX5_ASO_GREATER_OR_EQUAL,
+ MLX5_ASO_LESSER_OR_EQUAL,
+ MLX5_ASO_LESSER,
+ MLX5_ASO_GREATER,
+ MLX5_ASO_CYCLIC_GREATER,
+ MLX5_ASO_CYCLIC_LESSER,
+};
+
+enum {
+ MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT,
+ MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE,
+ MLX5_ASO_DATA_MASK_MODE_CALCULATED_64BYTE,
+};
+
+enum {
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
+};
+
+struct mlx5_aso;
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode);
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg);
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms);
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
+void mlx5_aso_destroy(struct mlx5_aso *aso);
+#endif /* __MLX5_LIB_ASO_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 3d5e57ff558c..9482e51ac82a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -12,13 +12,16 @@ struct mlx5_dm {
spinlock_t lock;
unsigned long *steering_sw_icm_alloc_blocks;
unsigned long *header_modify_sw_icm_alloc_blocks;
+ unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
};
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
{
+ u64 header_modify_pattern_icm_blocks = 0;
u64 header_modify_icm_blocks = 0;
u64 steering_icm_blocks = 0;
struct mlx5_dm *dm;
+ bool support_v2;
if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
return NULL;
@@ -35,8 +38,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->steering_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(steering_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zalloc(steering_icm_blocks, GFP_KERNEL);
if (!dm->steering_sw_icm_alloc_blocks)
goto err_steering;
}
@@ -47,16 +49,33 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->header_modify_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zalloc(header_modify_icm_blocks, GFP_KERNEL);
if (!dm->header_modify_sw_icm_alloc_blocks)
goto err_modify_hdr;
}
+ support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
+ MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
+
+ if (support_v2) {
+ header_modify_pattern_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_pattern_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->header_modify_pattern_sw_icm_alloc_blocks =
+ bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
+ if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
+ goto err_pattern;
+ }
+
return dm;
+err_pattern:
+ bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
+
err_modify_hdr:
- kfree(dm->steering_sw_icm_alloc_blocks);
+ bitmap_free(dm->steering_sw_icm_alloc_blocks);
err_steering:
kfree(dm);
@@ -75,7 +94,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks,
BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
- kfree(dm->steering_sw_icm_alloc_blocks);
+ bitmap_free(dm->steering_sw_icm_alloc_blocks);
}
if (dm->header_modify_sw_icm_alloc_blocks) {
@@ -83,7 +102,15 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
BIT(MLX5_CAP_DEV_MEM(dev,
log_header_modify_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
- kfree(dm->header_modify_sw_icm_alloc_blocks);
+ bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
+ }
+
+ if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_pattern_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ bitmap_free(dm->header_modify_pattern_sw_icm_alloc_blocks);
}
kfree(dm);
@@ -130,6 +157,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
log_header_modify_sw_icm_size);
block_map = dm->header_modify_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ header_modify_pattern_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_pattern_sw_icm_size);
+ block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
@@ -203,6 +237,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
block_map = dm->header_modify_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ header_modify_pattern_sw_icm_start_address);
+ block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
index d758848d34d0..696e45e2bd06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -32,20 +32,17 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val;
}
-void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
+int mlx5_tout_init(struct mlx5_core_dev *dev)
{
int i;
- for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
- tout_set(dev, tout_def_sw_val[i], i);
-}
-
-int mlx5_tout_init(struct mlx5_core_dev *dev)
-{
dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
if (!dev->timeouts)
return -ENOMEM;
+ for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
+ tout_set(dev, tout_def_sw_val[i], i);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
index 257c03eeab36..bc9e9aeda847 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -35,7 +35,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
-void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c9b4e50a593e..bec8d6d0b5f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -90,6 +90,8 @@ module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
static u32 sw_owner_id[4];
+#define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
+static DEFINE_IDA(sw_vhca_ida);
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
@@ -314,13 +316,6 @@ struct mlx5_reg_host_endianness {
u8 rsvd[15];
};
-#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
-
-enum {
- MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
- MLX5_DEV_CAP_FLAG_DCT,
-};
-
static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
{
switch (size) {
@@ -499,6 +494,31 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
return err;
}
+static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
+ if (err)
+ return err;
+
+ if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
+ !(dev->priv.sw_vhca_id > 0))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
+ MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
+ MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
+
+ return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
+}
+
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
struct mlx5_profile *prof = &dev->profile;
@@ -524,7 +544,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
/* Check log_max_qp from HCA caps to set in current profile */
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
- prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
+ prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
@@ -669,6 +689,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev)
goto out;
}
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_2(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
+ goto out;
+ }
+
out:
kfree(set_ctx);
return err;
@@ -1023,8 +1050,6 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
- mlx5_tout_set_def_val(dev);
-
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, timeout,
@@ -1257,6 +1282,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
+ mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
@@ -1276,8 +1302,10 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
int mlx5_init_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
int err = 0;
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
dev->state = MLX5_DEVICE_STATE_UP;
@@ -1306,6 +1334,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
goto err_register;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return 0;
err_register:
@@ -1320,11 +1349,15 @@ function_teardown:
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return err;
}
void mlx5_uninit_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
mlx5_unregister_device(dev);
@@ -1343,13 +1376,15 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
}
-int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
{
int err = 0;
u64 timeout;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "interface is up, NOP\n");
@@ -1391,8 +1426,20 @@ out:
return err;
}
-void mlx5_unload_one(struct mlx5_core_dev *dev)
+int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ int ret;
+
+ devl_lock(devlink);
+ ret = mlx5_load_one_devl_locked(dev, recovery);
+ devl_unlock(devlink);
+ return ret;
+}
+
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
mlx5_detach_device(dev);
@@ -1410,6 +1457,15 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
+void mlx5_unload_one(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
+ mlx5_unload_one_devl_locked(dev);
+ devl_unlock(devlink);
+}
+
static const int types[] = {
MLX5_CAP_GENERAL,
MLX5_CAP_GENERAL_2,
@@ -1512,6 +1568,18 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_hca_caps;
+ /* The conjunction of sw_vhca_id with sw_owner_id will be a global
+ * unique id per function which uses mlx5_core.
+ * Those values are supplied to FW as part of the init HCA command to
+ * be used by both driver and FW when it's applicable.
+ */
+ dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
+ MAX_SW_VHCA_ID,
+ GFP_KERNEL);
+ if (dev->priv.sw_vhca_id < 0)
+ mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
+ dev->priv.sw_vhca_id);
+
return 0;
err_hca_caps:
@@ -1536,6 +1604,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ if (priv->sw_vhca_id > 0)
+ ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
+
mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
@@ -1859,7 +1930,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev);
+ mlx5_unload_one_devl_locked(dev);
}
int mlx5_recover_device(struct mlx5_core_dev *dev)
@@ -1870,7 +1941,7 @@ int mlx5_recover_device(struct mlx5_core_dev *dev)
return -EIO;
}
- return mlx5_load_one(dev, true);
+ return mlx5_load_one_devl_locked(dev, true);
}
static struct pci_driver mlx5_core_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 9cc7afea2758..ad61b86d5769 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -290,7 +290,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev);
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 3be659cd91f1..7d955a4d9f14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -501,7 +501,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
case MLX5_ESWITCH_OFFLOADS:
mlx5_sf_table_enable(table);
break;
- case MLX5_ESWITCH_NONE:
+ case MLX5_ESWITCH_LEGACY:
mlx5_sf_table_disable(table);
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2935614f6fa9..ee2e1b7c1310 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -145,8 +145,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
sriov->vfs_ctx[vf].enabled = 0;
}
- if (MLX5_ESWITCH_MANAGER(dev))
- mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
@@ -155,13 +154,16 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
}
+ devl_unlock(devlink);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
@@ -174,10 +176,13 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int num_vfs = pci_num_vf(dev->pdev);
pci_disable_sriov(pdev);
+ devl_lock(devlink);
mlx5_device_disable_sriov(dev, num_vfs, true);
+ devl_unlock(devlink);
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 1383550f44c1..b1dfad274a39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -22,6 +22,7 @@ enum dr_action_valid_state {
DR_ACTION_STATE_PUSH_VLAN,
DR_ACTION_STATE_NON_TERM,
DR_ACTION_STATE_TERM,
+ DR_ACTION_STATE_ASO,
DR_ACTION_STATE_MAX,
};
@@ -42,6 +43,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
+ [DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
};
@@ -71,6 +73,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -85,6 +88,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -93,6 +97,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -105,6 +110,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -118,6 +124,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
@@ -128,6 +135,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -145,6 +153,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -163,18 +178,21 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -185,6 +203,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -196,6 +215,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -206,6 +226,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -219,6 +240,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -240,6 +271,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -253,6 +285,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -261,6 +294,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -272,6 +306,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -284,6 +319,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -296,6 +332,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -312,6 +349,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -331,6 +375,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -338,6 +383,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -345,6 +391,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -356,6 +403,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -368,6 +416,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -379,6 +428,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -393,6 +443,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -738,6 +799,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.reformat.param_0 = action->reformat->param_0;
attr.reformat.param_1 = action->reformat->param_1;
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ attr.aso_flow_meter.obj_id = action->aso->obj_id;
+ attr.aso_flow_meter.offset = action->aso->offset;
+ attr.aso_flow_meter.dest_reg_id = action->aso->dest_reg_id;
+ attr.aso_flow_meter.init_color = action->aso->init_color;
+ break;
default:
mlx5dr_err(dmn, "Unsupported action type %d\n", action_type);
return -EINVAL;
@@ -798,6 +865,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler),
+ [DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter),
};
static struct mlx5dr_action *
@@ -1830,6 +1898,34 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return action;
}
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id,
+ u8 dest_reg_id, u8 aso_type,
+ u8 init_color, u8 meter_id)
+{
+ struct mlx5dr_action *action;
+
+ if (aso_type != MLX5_EXE_ASO_FLOW_METER)
+ return NULL;
+
+ if (init_color > MLX5_FLOW_METER_COLOR_UNDEFINED)
+ return NULL;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_ASO_FLOW_METER);
+ if (!action)
+ return NULL;
+
+ action->aso->obj_id = obj_id;
+ action->aso->offset = meter_id;
+ action->aso->dest_reg_id = dest_reg_id;
+ action->aso->init_color = init_color;
+ action->aso->dmn = dmn;
+
+ refcount_inc(&dmn->refcount);
+
+ return action;
+}
+
int mlx5dr_action_destroy(struct mlx5dr_action *action)
{
if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
@@ -1881,6 +1977,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
case DR_ACTION_TYP_SAMPLER:
refcount_dec(&action->sampler->dmn->refcount);
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ refcount_dec(&action->aso->dmn->refcount);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 223c8741b7ae..16d65fe4f654 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -439,6 +439,7 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
+ MLX5_SET(create_flow_table_in, in, uid, attr->uid);
ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index d5998ef59be4..7adcf0eec13b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -21,10 +21,11 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_TABLE_TX = 3102,
DR_DUMP_REC_TYPE_MATCHER = 3200,
- DR_DUMP_REC_TYPE_MATCHER_MASK = 3201,
+ DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201,
DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
+ DR_DUMP_REC_TYPE_MATCHER_MASK = 3205,
DR_DUMP_REC_TYPE_RULE = 3300,
DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
@@ -114,13 +115,15 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->fw_tbl.id);
+ rule_id, action->dest_tbl->fw_tbl.id,
+ -1);
else
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->tbl->table_id);
+ rule_id, action->dest_tbl->tbl->table_id,
+ DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
break;
case DR_ACTION_TYP_CTR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index fcb962c6db2e..ee677a5c76be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -89,6 +89,7 @@ enum dr_ste_v1_action_id {
DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
DR_STE_V1_ACTION_ID_TRAILER = 0x13,
DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
DR_STE_V1_ACTION_ID_MAX = 0x21,
@@ -129,6 +130,10 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
};
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -494,6 +499,27 @@ static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
+ u32 object_id,
+ u32 offset,
+ u8 dest_reg_id,
+ u8 init_color)
+{
+ MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
+ DR_STE_V1_ACTION_ID_ASO);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
+ object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
+ /* Convert reg_c index to HW 64bit index */
+ MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
+ (dest_reg_id - 1) / 2);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
+ offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
+ init_color);
+}
+
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
@@ -629,6 +655,21 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -802,6 +843,21 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index e5f6412baea9..31d443dd8386 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -214,7 +214,7 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
tbl->table_type);
}
-static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
+static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl, u16 uid)
{
bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
@@ -236,6 +236,7 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
ft_attr.sw_owner = true;
ft_attr.decap_en = en_decap;
ft_attr.reformat_en = en_encap;
+ ft_attr.uid = uid;
ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
NULL, &tbl->table_id);
@@ -243,7 +244,8 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
return ret;
}
-struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags)
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level,
+ u32 flags, u16 uid)
{
struct mlx5dr_table *tbl;
int ret;
@@ -263,7 +265,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u
if (ret)
goto free_tbl;
- ret = dr_table_create_sw_owned_tbl(tbl);
+ ret = dr_table_create_sw_owned_tbl(tbl, uid);
if (ret)
goto uninit_tbl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 98320e3945ad..062c7c74a1f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -127,6 +127,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_INSERT_HDR,
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
+ DR_ACTION_TYP_ASO_FLOW_METER,
DR_ACTION_TYP_MAX,
};
@@ -271,6 +272,13 @@ struct mlx5dr_ste_actions_attr {
int count;
u32 headers[MLX5DR_MAX_VLANS];
} vlans;
+
+ struct {
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+ } aso_flow_meter;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1035,6 +1043,14 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
+struct mlx5dr_action_aso_flow_meter {
+ struct mlx5dr_domain *dmn;
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -1049,6 +1065,7 @@ struct mlx5dr_action {
struct mlx5dr_action_vport *vport;
struct mlx5dr_action_push_vlan *push_vlan;
struct mlx5dr_action_flow_tag *flow_tag;
+ struct mlx5dr_action_aso_flow_meter *aso;
};
};
@@ -1200,6 +1217,7 @@ struct mlx5dr_cmd_query_flow_table_details {
struct mlx5dr_cmd_create_flow_table_attr {
u32 table_type;
+ u16 uid;
u64 icm_addr_rx;
u64 icm_addr_tx;
u8 level;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 6a9abba92df6..13b6d4721e17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -62,7 +62,7 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
struct mlx5dr_table *tbl;
@@ -71,7 +71,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
- size,
+ ft_attr,
next_ft);
flags = ft->flags;
/* turn off encap/decap if not supported for sw-str by fw */
@@ -79,7 +79,8 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags);
+ tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags,
+ ft_attr->uid);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
@@ -500,6 +501,27 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action =
+ mlx5dr_action_create_aso(domain,
+ fte->action.exe_aso.object_id,
+ fte->action.exe_aso.return_reg_id,
+ fte->action.exe_aso.type,
+ fte->action.exe_aso.flow_meter.init_color,
+ fte->action.exe_aso.flow_meter.meter_idx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 9604b2091358..fb078fa0f0cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -574,4 +574,30 @@ struct mlx5_ifc_dr_action_hw_copy_bits {
u8 reserved_at_38[0x8];
};
+enum {
+ MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ = 2,
+};
+
+struct mlx5_ifc_ste_aso_flow_meter_action_bits {
+ u8 reserved_at_0[0xc];
+ u8 action[0x1];
+ u8 initial_color[0x2];
+ u8 line_id[0x1];
+};
+
+struct mlx5_ifc_ste_double_action_aso_v1_bits {
+ u8 action_id[0x8];
+ u8 aso_context_number[0x18];
+
+ u8 dest_reg_id[0x2];
+ u8 change_ordering_tag[0x1];
+ u8 aso_check_ordering[0x1];
+ u8 aso_context_type[0x4];
+ u8 reserved_at_28[0x8];
+ union {
+ u8 aso_fields[0x10];
+ struct mlx5_ifc_ste_aso_flow_meter_action_bits flow_meter;
+ };
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 7626c85643b1..226a0d7bb06d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -51,7 +51,8 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn);
struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
+ u16 uid);
struct mlx5dr_table *
mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft);
@@ -131,6 +132,14 @@ struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
+ u32 obj_id,
+ u8 return_reg_id,
+ u8 aso_type,
+ u8 init_color,
+ u8 meter_id);
+
int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index ac020cb78072..d5c317325030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1086,9 +1086,17 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
goto free;
MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
- MLX5_SET(modify_nic_vport_context_in, in,
- nic_vport_context.affiliated_vhca_id,
- MLX5_CAP_GEN(master_mdev, vhca_id));
+ if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
+ } else {
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN(master_mdev, vhca_id));
+ }
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria,
MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 84621b4cb15b..b03e1c66bac0 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -19,8 +19,6 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
-#define DRV_NAME "mlxbf_gige"
-
/* Allocate SKB whose payload pointer aligns with the Bluefield
* hardware DMA limitation, i.e. DMA operation can't cross
* a 4KB boundary. A maximum packet size of 2KB is assumed in the
@@ -427,7 +425,7 @@ static struct platform_driver mlxbf_gige_driver = {
.remove = mlxbf_gige_remove,
.shutdown = mlxbf_gige_shutdown,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
},
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 4683312861ac..a510bf2cff2f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -7,6 +7,7 @@ config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
select NET_DEVLINK
select MLXFW
+ select AUXILIARY_BUS
help
This driver supports Mellanox Technologies Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 1a465fd5d8b3..3ca9fce759ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o core_acl_flex_keys.o \
core_acl_flex_actions.o core_env.o \
- core_linecards.o
+ core_linecards.o core_linecard_dev.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
@@ -12,7 +12,6 @@ mlxsw_i2c-objs := i2c.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
- spectrum_router_xm.o \
spectrum1_kvdl.o spectrum2_kvdl.o \
spectrum_kvdl.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \
@@ -29,7 +28,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_qdisc.o spectrum_span.o \
spectrum_nve.o spectrum_nve_vxlan.o \
spectrum_dpipe.o spectrum_trap.o \
- spectrum_ethtool.o spectrum_policer.o
+ spectrum_ethtool.o spectrum_policer.o \
+ spectrum_pgt.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 51b260d54237..60232fb8ccd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -329,6 +329,32 @@ MLXSW_ITEM64(cmd_mbox, query_fw, free_running_clock_offset, 0x50, 0, 64);
*/
MLXSW_ITEM32(cmd_mbox, query_fw, fr_rn_clk_bar, 0x58, 30, 2);
+/* cmd_mbox_query_fw_utc_sec_offset
+ * The offset of the UTC_Sec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_sec_offset, 0x70, 0, 64);
+
+/* cmd_mbox_query_fw_utc_sec_bar
+ * PCI base address register (BAR) of the UTC_Sec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_sec_bar, 0x78, 30, 2);
+
+/* cmd_mbox_query_fw_utc_nsec_offset
+ * The offset of the UTC_nSec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_nsec_offset, 0x80, 0, 64);
+
+/* cmd_mbox_query_fw_utc_nsec_bar
+ * PCI base address register (BAR) of the UTC_nSec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_nsec_bar, 0x88, 30, 2);
+
/* QUERY_BOARDINFO - Query Board Information
* -----------------------------------------
* OpMod == 0 (N/A), INMmod == 0 (N/A)
@@ -343,23 +369,6 @@ static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
}
-/* cmd_mbox_xm_num_local_ports
- * Number of local_ports connected to the xm.
- * Each local port is a 4x
- * Spectrum-2/3: 25G
- * Spectrum-4: 50G
- */
-MLXSW_ITEM32(cmd_mbox, boardinfo, xm_num_local_ports, 0x00, 4, 3);
-
-/* cmd_mbox_xm_exists
- * An XM (eXtanded Mezanine, e.g. used for the XLT) is connected on the board.
- */
-MLXSW_ITEM32(cmd_mbox, boardinfo, xm_exists, 0x00, 0, 1);
-
-/* cmd_mbox_xm_local_port_entry
- */
-MLXSW_ITEM_BIT_ARRAY(cmd_mbox, boardinfo, xm_local_port_entry, 0x04, 4, 8);
-
/* cmd_mbox_boardinfo_intapin
* When PCIe interrupt messages are being used, this value is used for clearing
* an interrupt. When using MSI-X, this register is not used.
@@ -650,6 +659,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+/* cmd_mbox_config_set_ubridge
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
+
/* cmd_mbox_config_set_kvd_linear_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
@@ -674,11 +689,11 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
-/* cmd_mbox_config_set_kvh_xlt_cache_mode
+/* cmd_mbox_config_set_cqe_time_stamp_type
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, set_kvh_xlt_cache_mode, 0x08, 3, 1);
+MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_time_stamp_type, 0x08, 2, 1);
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
@@ -736,16 +751,25 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+enum mlxsw_cmd_mbox_config_profile_flood_mode {
+ /* Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset
+ * tables. max_fid_flood_tables indicates the number of per-FID tables.
+ * Reserved when unified bridge model is used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED = 3,
+ /* Controlled flood tables. Reserved when legacy bridge model is
+ * used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED = 4,
+};
+
/* cmd_mbox_config_profile_flood_mode
* Flooding mode to use.
- * 0-2 - Backward compatible modes for SwitchX devices.
- * 3 - Mixed mode, where:
- * max_flood_tables indicates the number of single-entry tables.
- * max_vid_flood_tables indicates the number of per-VID tables.
- * max_fid_offset_flood_tables indicates the number of FID-offset tables.
- * max_fid_flood_tables indicates the number of per-FID tables.
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 3);
/* cmd_mbox_config_profile_max_fid_offset_flood_tables
* Maximum number of FID-offset flooding tables.
@@ -806,12 +830,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
-/* cmd_mbox_config_profile_kvh_xlt_cache_mode
- * KVH XLT cache mode:
- * 0 - XLT can use all KVH as best-effort
- * 1 - XLT cache uses 1/2 KVH
+/* cmd_mbox_config_profile_ubridge
+ * Unified Bridge
+ * 0 - non unified bridge
+ * 1 - unified bridge
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, kvh_xlt_cache_mode, 0x50, 8, 4);
+MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
/* cmd_mbox_config_kvd_linear_size
* KVD Linear Size
@@ -866,6 +890,26 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
0x60, 0, 8, 0x08, 0x00, false);
+enum mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type {
+ /* uSec - 1.024uSec (default). Only bits 15:0 are valid. */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_USEC,
+ /* FRC - Free Running Clock, units of 1nSec.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_FRC,
+ /* UTC. time_stamp[37:30] = Sec, time_stamp[29:0] = nSec.
+ * Reserved when SwitchX/2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
+/* cmd_mbox_config_profile_cqe_time_stamp_type
+ * CQE time_stamp_type for non-mirror-packets.
+ * Configured if set_cqe_time_stamp_type is set.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, cqe_time_stamp_type, 0xB0, 8, 2);
+
/* cmd_mbox_config_profile_cqe_version
* CQE version:
* 0: CQE version is 0
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index fc52832241b3..75553eb2c7f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -127,11 +127,11 @@ static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
max_ports, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink,
- DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
- max_ports, MLXSW_CORE_RESOURCE_PORTS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &ports_num_params);
+ return devl_resource_register(devlink,
+ DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
+ max_ports, MLXSW_CORE_RESOURCE_PORTS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &ports_num_params);
}
static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
@@ -157,8 +157,8 @@ static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
goto err_resources_ports_register;
}
atomic_set(&mlxsw_core->active_ports_count, 0);
- devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
- mlxsw_ports_occ_get, mlxsw_core);
+ devl_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
+ mlxsw_ports_occ_get, mlxsw_core);
return 0;
@@ -171,9 +171,9 @@ static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
+ devl_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
if (!reload)
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
kfree(mlxsw_core->ports);
}
@@ -951,6 +951,20 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_core->fw_flash_in_progress = true;
+ err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack);
+ mlxsw_core->fw_flash_in_progress = false;
+
+ return err;
+}
+
struct mlxsw_core_fw_info {
struct mlxfw_dev mlxfw_dev;
struct mlxsw_core *mlxsw_core;
@@ -1105,8 +1119,9 @@ static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
.fsm_release = mlxsw_core_fw_fsm_release,
};
-static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
- struct netlink_ext_ack *extack)
+static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core_fw_info mlxsw_core_fw_info = {
.mlxfw_dev = {
@@ -1117,13 +1132,9 @@ static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmw
},
.mlxsw_core = mlxsw_core
};
- int err;
- mlxsw_core->fw_flash_in_progress = true;
- err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
- mlxsw_core->fw_flash_in_progress = false;
-
- return err;
+ return mlxsw_core_fw_flash(mlxsw_core, &mlxsw_core_fw_info.mlxfw_dev,
+ firmware, extack);
}
static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
@@ -1169,7 +1180,7 @@ static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
+ err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL);
release_firmware(firmware);
if (err)
dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
@@ -1187,7 +1198,7 @@ static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
- return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack);
+ return mlxsw_core_dev_fw_flash(mlxsw_core, params->fw, extack);
}
static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
@@ -1498,13 +1509,15 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_re
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ int err;
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
- return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
- mlxsw_core->bus,
- mlxsw_core->bus_priv, true,
- devlink, extack);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink, extack);
+ return err;
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
@@ -2102,6 +2115,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err = -ENOMEM;
goto err_devlink_alloc;
}
+ devl_lock(devlink);
}
mlxsw_core = devlink_priv(devlink);
@@ -2187,6 +2201,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (!reload) {
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
}
return 0;
@@ -2214,12 +2229,14 @@ err_alloc_lag_mapping:
mlxsw_ports_fini(mlxsw_core, reload);
err_ports_init:
if (!reload)
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
err_register_resources:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- if (!reload)
+ if (!reload) {
+ devl_unlock(devlink);
devlink_free(devlink);
+ }
err_devlink_alloc:
return err;
}
@@ -2255,8 +2272,10 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (!reload)
+ if (!reload) {
devlink_unregister(devlink);
+ devl_lock(devlink);
+ }
if (devlink_is_reload_failed(devlink)) {
if (!reload)
@@ -2281,16 +2300,19 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
if (!reload)
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
- if (!reload)
+ if (!reload) {
+ devl_unlock(devlink);
devlink_free(devlink);
+ }
return;
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
+ devl_unlock(devlink);
devlink_free(devlink);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
@@ -3151,18 +3173,6 @@ mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
return mlxsw_core_port->linecard;
}
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
-{
- const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
- int i;
-
- for (i = 0; i < bus_info->xm_local_ports_count; i++)
- if (bus_info->xm_local_ports[i] == local_port)
- return true;
- return false;
-}
-EXPORT_SYMBOL(mlxsw_core_port_is_xm);
-
void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
bool (*selector)(void *priv, u16 local_port),
void *priv)
@@ -3321,6 +3331,24 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_sec);
+
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_nsec);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver->sdq_supports_cqe_v2;
+}
+EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
{
mlxsw_core->emad.enable_string_tlv = true;
@@ -3331,9 +3359,15 @@ static int __init mlxsw_core_module_init(void)
{
int err;
+ err = mlxsw_linecard_driver_register();
+ if (err)
+ return err;
+
mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
- if (!mlxsw_wq)
- return -ENOMEM;
+ if (!mlxsw_wq) {
+ err = -ENOMEM;
+ goto err_alloc_workqueue;
+ }
mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
mlxsw_core_driver_name);
if (!mlxsw_owq) {
@@ -3344,6 +3378,8 @@ static int __init mlxsw_core_module_init(void)
err_alloc_ordered_workqueue:
destroy_workqueue(mlxsw_wq);
+err_alloc_workqueue:
+ mlxsw_linecard_driver_unregister();
return err;
}
@@ -3351,6 +3387,7 @@ static void __exit mlxsw_core_module_exit(void)
{
destroy_workqueue(mlxsw_owq);
destroy_workqueue(mlxsw_wq);
+ mlxsw_linecard_driver_unregister();
}
module_init(mlxsw_core_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c2a891287047..02d9cc2ef0c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -12,12 +12,14 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/net_namespace.h>
+#include <linux/auxiliary_bus.h>
#include <net/devlink.h>
#include "trap.h"
#include "reg.h"
#include "cmd.h"
#include "resources.h"
+#include "../mlxfw/mlxfw.h"
enum mlxsw_core_resource_id {
MLXSW_CORE_RESOURCE_PORTS = 1,
@@ -47,6 +49,11 @@ mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
@@ -261,7 +268,6 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
struct mlxsw_linecard *
mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
bool (*selector)(void *priv,
u16 local_port),
@@ -296,8 +302,9 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
+ used_ubridge:1,
used_kvd_sizes:1,
- used_kvh_xlt_cache_mode:1;
+ used_cqe_time_stamp_type:1;
u8 max_vepa_channels;
u16 max_mid;
u16 max_pgt;
@@ -316,10 +323,11 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
+ u8 ubridge;
u32 kvd_linear_size;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
- u8 kvh_xlt_cache_mode;
+ u8 cqe_time_stamp_type;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -419,6 +427,7 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
+ bool sdq_supports_cqe_v2;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -429,6 +438,11 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
@@ -468,6 +482,8 @@ struct mlxsw_bus {
u8 *p_status);
u32 (*read_frc_h)(void *bus_priv);
u32 (*read_frc_l)(void *bus_priv);
+ u32 (*read_utc_sec)(void *bus_priv);
+ u32 (*read_utc_nsec)(void *bus_priv);
u8 features;
};
@@ -478,8 +494,6 @@ struct mlxsw_fw_rev {
u16 can_reset_minor;
};
-#define MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX 4
-
struct mlxsw_bus_info {
const char *device_kind;
const char *device_name;
@@ -488,10 +502,7 @@ struct mlxsw_bus_info {
u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
u8 low_frequency:1,
- read_frc_capable:1,
- xm_exists:1;
- u8 xm_local_ports_count;
- u8 xm_local_ports[MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX];
+ read_clock_capable:1;
};
struct mlxsw_hwmon;
@@ -547,11 +558,17 @@ enum mlxsw_devlink_param_id {
MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
};
+struct mlxsw_cqe_ts {
+ u8 sec;
+ u32 nsec;
+};
+
struct mlxsw_skb_cb {
union {
struct mlxsw_tx_info tx_info;
struct mlxsw_rx_md_info rx_md_info;
};
+ struct mlxsw_cqe_ts cqe_ts;
};
static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
@@ -567,6 +584,15 @@ enum mlxsw_linecard_status_event_type {
MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION,
};
+struct mlxsw_linecard_bdev;
+
+struct mlxsw_linecard_device_info {
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_sub_minor;
+ char psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
+};
+
struct mlxsw_linecard {
u8 slot_index;
struct mlxsw_linecards *linecards;
@@ -581,6 +607,11 @@ struct mlxsw_linecard {
active:1;
u16 hw_revision;
u16 ini_version;
+ struct mlxsw_linecard_bdev *bdev;
+ struct {
+ struct mlxsw_linecard_device_info info;
+ u8 index;
+ } device;
};
struct mlxsw_linecard_types_info;
@@ -601,6 +632,14 @@ mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index)
return &linecards->linecards[slot_index - 1];
}
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *bus_info);
void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core);
@@ -620,4 +659,10 @@ void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
struct mlxsw_linecards_event_ops *ops,
void *priv);
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard);
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard);
+
+int mlxsw_linecard_driver_register(void);
+void mlxsw_linecard_driver_unregister(void);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index fa33caecc91d..636db9a87457 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1164,7 +1164,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
* trap control. In addition, the Trap / Discard action enables activating
* SPAN (port mirroring).
*
- * The Trap with userdef action action has the same functionality as
+ * The Trap with userdef action has the same functionality as
* the Trap action with addition of user defined value that can be set
* and used by higher layer applications.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 34bec9cd572c..0107cbc32fc7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -180,7 +180,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
} else {
/* When reading upper pages 1, 2 and 3 the offset
* starts at 0 and I2C high address is used. Please refer
- * refer to "Memory Organization" figure in SFF-8472
+ * to "Memory Organization" figure in SFF-8472
* specification for graphical depiction.
*/
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
new file mode 100644
index 000000000000..af37e650a8ad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/idr.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <net/devlink.h>
+#include "core.h"
+
+#define MLXSW_LINECARD_DEV_ID_NAME "lc"
+
+struct mlxsw_linecard_dev {
+ struct mlxsw_linecard *linecard;
+};
+
+struct mlxsw_linecard_bdev {
+ struct auxiliary_device adev;
+ struct mlxsw_linecard *linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+};
+
+static DEFINE_IDA(mlxsw_linecard_bdev_ida);
+
+static int mlxsw_linecard_bdev_id_alloc(void)
+{
+ return ida_alloc(&mlxsw_linecard_bdev_ida, GFP_KERNEL);
+}
+
+static void mlxsw_linecard_bdev_id_free(int id)
+{
+ ida_free(&mlxsw_linecard_bdev_ida, id);
+}
+
+static void mlxsw_linecard_bdev_release(struct device *device)
+{
+ struct auxiliary_device *adev =
+ container_of(device, struct auxiliary_device, dev);
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+
+ mlxsw_linecard_bdev_id_free(adev->id);
+ kfree(linecard_bdev);
+}
+
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev;
+ int err;
+ int id;
+
+ id = mlxsw_linecard_bdev_id_alloc();
+ if (id < 0)
+ return id;
+
+ linecard_bdev = kzalloc(sizeof(*linecard_bdev), GFP_KERNEL);
+ if (!linecard_bdev) {
+ mlxsw_linecard_bdev_id_free(id);
+ return -ENOMEM;
+ }
+ linecard_bdev->adev.id = id;
+ linecard_bdev->adev.name = MLXSW_LINECARD_DEV_ID_NAME;
+ linecard_bdev->adev.dev.release = mlxsw_linecard_bdev_release;
+ linecard_bdev->adev.dev.parent = linecard->linecards->bus_info->dev;
+ linecard_bdev->linecard = linecard;
+
+ err = auxiliary_device_init(&linecard_bdev->adev);
+ if (err) {
+ mlxsw_linecard_bdev_id_free(id);
+ kfree(linecard_bdev);
+ return err;
+ }
+
+ err = auxiliary_device_add(&linecard_bdev->adev);
+ if (err) {
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ return err;
+ }
+
+ linecard->bdev = linecard_bdev;
+ return 0;
+}
+
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev = linecard->bdev;
+
+ if (!linecard_bdev)
+ /* Unprovisioned line cards do not have an auxiliary device. */
+ return;
+ auxiliary_device_delete(&linecard_bdev->adev);
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ linecard->bdev = NULL;
+}
+
+static int mlxsw_linecard_dev_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_devlink_info_get(linecard, req, extack);
+}
+
+static int
+mlxsw_linecard_dev_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_flash_update(devlink, linecard,
+ params->fw, extack);
+}
+
+static const struct devlink_ops mlxsw_linecard_dev_devlink_ops = {
+ .info_get = mlxsw_linecard_dev_devlink_info_get,
+ .flash_update = mlxsw_linecard_dev_devlink_flash_update,
+};
+
+static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&mlxsw_linecard_dev_devlink_ops,
+ sizeof(*linecard_dev), &adev->dev);
+ if (!devlink)
+ return -ENOMEM;
+ linecard_dev = devlink_priv(devlink);
+ linecard_dev->linecard = linecard_bdev->linecard;
+ linecard_bdev->linecard_dev = linecard_dev;
+
+ devlink_register(devlink);
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink);
+ return 0;
+}
+
+static void mlxsw_linecard_bdev_remove(struct auxiliary_device *adev)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct devlink *devlink = priv_to_devlink(linecard_bdev->linecard_dev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, NULL);
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
+
+static const struct auxiliary_device_id mlxsw_linecard_bdev_id_table[] = {
+ { .name = KBUILD_MODNAME "." MLXSW_LINECARD_DEV_ID_NAME },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlxsw_linecard_bdev_id_table);
+
+static struct auxiliary_driver mlxsw_linecard_driver = {
+ .name = MLXSW_LINECARD_DEV_ID_NAME,
+ .probe = mlxsw_linecard_bdev_probe,
+ .remove = mlxsw_linecard_bdev_remove,
+ .id_table = mlxsw_linecard_bdev_id_table,
+};
+
+int mlxsw_linecard_driver_register(void)
+{
+ return auxiliary_driver_register(&mlxsw_linecard_driver);
+}
+
+void mlxsw_linecard_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&mlxsw_linecard_driver);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index 5c9869dcf674..ca59f0b946da 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -13,6 +13,7 @@
#include <linux/vmalloc.h>
#include "core.h"
+#include "../mlxfw/mlxfw.h"
struct mlxsw_linecard_ini_file {
__le16 size;
@@ -87,6 +88,351 @@ static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard)
return linecard->name;
}
+struct mlxsw_linecard_device_fw_info {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_linecard *linecard;
+};
+
+static int mlxsw_linecard_device_fw_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index,
+ u32 *p_max_size,
+ u8 *p_align_bits,
+ u16 *p_max_write_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcqi_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcqi), &mcqi_pl);
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
+ p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size,
+ MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_linecard_device_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev,
+ u32 *fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ u16 component_index,
+ u32 component_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u8 *data,
+ u16 size, u32 offset)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcda_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcda), &mcda_pl);
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int mlxsw_linecard_device_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE,
+ 0, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ u8 error_code;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_linecard_device_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static void mlxsw_linecard_device_fw_fsm_release(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_linecard_device_dev_ops = {
+ .component_query = mlxsw_linecard_device_fw_component_query,
+ .fsm_lock = mlxsw_linecard_device_fw_fsm_lock,
+ .fsm_component_update = mlxsw_linecard_device_fw_fsm_component_update,
+ .fsm_block_download = mlxsw_linecard_device_fw_fsm_block_download,
+ .fsm_component_verify = mlxsw_linecard_device_fw_fsm_component_verify,
+ .fsm_activate = mlxsw_linecard_device_fw_fsm_activate,
+ .fsm_query_state = mlxsw_linecard_device_fw_fsm_query_state,
+ .fsm_cancel = mlxsw_linecard_device_fw_fsm_cancel,
+ .fsm_release = mlxsw_linecard_device_fw_fsm_release,
+};
+
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ struct mlxsw_linecard_device_fw_info info = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_linecard_device_dev_ops,
+ .psid = linecard->device.info.psid,
+ .psid_size = strlen(linecard->device.info.psid),
+ .devlink = linecard_devlink,
+ },
+ .mlxsw_core = mlxsw_core,
+ .linecard = linecard,
+ };
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (!linecard->active) {
+ NL_SET_ERR_MSG_MOD(extack, "Only active line cards can be flashed");
+ err = -EINVAL;
+ goto unlock;
+ }
+ err = mlxsw_core_fw_flash(mlxsw_core, &info.mlxfw_dev,
+ firmware, extack);
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_device_psid_get(struct mlxsw_linecard *linecard,
+ u8 device_index, char *psid)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mgir_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index, device_index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mgir), &mgir_pl);
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgir_fw_info_psid_memcpy_from(mgir_pl, psid);
+ return 0;
+}
+
+static int mlxsw_linecard_device_info_update(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ bool flashable_found = false;
+ u8 msg_seq = 0;
+
+ do {
+ struct mlxsw_linecard_device_info info;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool flash_owner;
+ bool data_valid;
+ u8 device_index;
+ int err;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, &flash_owner,
+ &device_index,
+ &info.fw_major,
+ &info.fw_minor,
+ &info.fw_sub_minor);
+ if (!data_valid)
+ break;
+ if (!flash_owner) /* We care only about flashable ones. */
+ continue;
+ if (flashable_found) {
+ dev_warn_once(linecard->linecards->bus_info->dev, "linecard %u: More flashable devices present, exposing only the first one\n",
+ linecard->slot_index);
+ return 0;
+ }
+
+ err = mlxsw_linecard_device_psid_get(linecard, device_index,
+ info.psid);
+ if (err)
+ return err;
+
+ linecard->device.info = info;
+ linecard->device.index = device_index;
+ flashable_found = true;
+ } while (msg_seq);
+
+ return 0;
+}
+
static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard)
{
linecard->provisioned = false;
@@ -226,12 +572,57 @@ void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister);
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ char buf[32];
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (WARN_ON(!linecard->provisioned)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ sprintf(buf, "%d", linecard->hw_revision);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ goto unlock;
+
+ sprintf(buf, "%d", linecard->ini_version);
+ err = devlink_info_version_running_put(req, "ini.version", buf);
+ if (err)
+ goto unlock;
+
+ if (linecard->active) {
+ struct mlxsw_linecard_device_info *info = &linecard->device.info;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ info->psid);
+
+ sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
+ info->fw_sub_minor);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
static int
mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
u16 hw_revision, u16 ini_version)
{
struct mlxsw_linecards *linecards = linecard->linecards;
const char *type;
+ int err;
type = mlxsw_linecard_types_lookup(linecards, card_type);
mlxsw_linecard_status_event_done(linecard,
@@ -252,6 +643,14 @@ mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
linecard->provisioned = true;
linecard->hw_revision = hw_revision;
linecard->ini_version = ini_version;
+
+ err = mlxsw_linecard_bdev_add(linecard);
+ if (err) {
+ linecard->provisioned = false;
+ mlxsw_linecard_provision_fail(linecard);
+ return err;
+ }
+
devlink_linecard_provision_set(linecard->devlink_linecard, type);
return 0;
}
@@ -260,6 +659,7 @@ static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard)
{
mlxsw_linecard_status_event_done(linecard,
MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ mlxsw_linecard_bdev_del(linecard);
linecard->provisioned = false;
devlink_linecard_provision_clear(linecard->devlink_linecard);
}
@@ -270,6 +670,10 @@ static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard)
char mddc_pl[MLXSW_REG_MDDC_LEN];
int err;
+ err = mlxsw_linecard_device_info_update(linecard);
+ if (err)
+ return err;
+
mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
if (err)
@@ -885,6 +1289,7 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
mlxsw_core_flush_owq();
if (linecard->active)
mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_bdev_del(linecard);
devlink_linecard_destroy(linecard->devlink_linecard);
mutex_destroy(&linecard->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9660d4cce96..bb1cd4bae82e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -328,7 +328,6 @@ static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
u8 last_module = max_ports;
int i;
int err;
@@ -357,10 +356,8 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
}
/* Create port objects for each valid entry */
- devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
- if (mlxsw_m->module_to_port[i] > 0 &&
- !mlxsw_core_port_is_xm(mlxsw_m->core, i)) {
+ if (mlxsw_m->module_to_port[i] > 0) {
err = mlxsw_m_port_create(mlxsw_m,
mlxsw_m->module_to_port[i],
i);
@@ -368,7 +365,6 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
goto err_module_to_port_create;
}
}
- devl_unlock(devlink);
return 0;
@@ -378,7 +374,6 @@ err_module_to_port_create:
mlxsw_m_port_remove(mlxsw_m,
mlxsw_m->module_to_port[i]);
}
- devl_unlock(devlink);
i = max_ports;
err_module_to_port_map:
for (i--; i > 0; i--)
@@ -391,10 +386,8 @@ err_module_to_port_alloc:
static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
int i;
- devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
mlxsw_m_port_remove(mlxsw_m,
@@ -402,7 +395,6 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
mlxsw_m_port_module_unmap(mlxsw_m, i);
}
}
- devl_unlock(devlink);
kfree(mlxsw_m->module_to_port);
kfree(mlxsw_m->ports);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index f91dde4df152..50527adc5b5a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -103,6 +103,8 @@ struct mlxsw_pci {
struct pci_dev *pdev;
u8 __iomem *hw_addr;
u64 free_running_clock_offset;
+ u64 utc_sec_offset;
+ u64 utc_nsec_offset;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -456,9 +458,9 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
{
q->u.cq.v = mlxsw_pci->max_cqe_ver;
- /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
- q->num < mlxsw_pci->num_sdq_cqs)
+ q->num < mlxsw_pci->num_sdq_cqs &&
+ !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
q->u.cq.v = MLXSW_PCI_CQE_V1;
}
@@ -505,9 +507,32 @@ static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
+static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
+ ptrdiff_t off)
+{
+ return ioread32be(mlxsw_pci->hw_addr + off);
+}
+
+static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
+ struct sk_buff *skb,
+ enum mlxsw_pci_cqe_v cqe_v, char *cqe)
+{
+ if (cqe_v != MLXSW_PCI_CQE_V2)
+ return;
+
+ if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) !=
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC)
+ return;
+
+ mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
+ mlxsw_skb_cb(skb)->cqe_ts.nsec =
+ mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
+}
+
static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
+ enum mlxsw_pci_cqe_v cqe_v,
char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
@@ -527,6 +552,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
if (unlikely(!tx_info.is_emad &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
tx_info.local_port);
skb = NULL;
@@ -647,6 +673,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
}
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
byte_count -= ETH_FCS_LEN;
@@ -698,7 +726,7 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, ncqe);
+ wqe_counter, q->u.cq.v, ncqe);
q->u.cq.comp_sdq_count++;
} else {
struct mlxsw_pci_queue *rdq;
@@ -1235,6 +1263,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
+ if (profile->used_ubridge) {
+ mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
+ profile->ubridge);
+ }
if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
@@ -1252,12 +1285,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
}
- if (profile->used_kvh_xlt_cache_mode) {
- mlxsw_cmd_mbox_config_profile_set_kvh_xlt_cache_mode_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvh_xlt_cache_mode_set(
- mbox, profile->kvh_xlt_cache_mode);
- }
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
@@ -1268,31 +1295,14 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
}
- return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
-}
-
-static int mlxsw_pci_boardinfo_xm_process(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_bus_info *bus_info,
- char *mbox)
-{
- int count = mlxsw_cmd_mbox_boardinfo_xm_num_local_ports_get(mbox);
- int i;
-
- if (!mlxsw_cmd_mbox_boardinfo_xm_exists_get(mbox))
- return 0;
-
- bus_info->xm_exists = true;
-
- if (count > MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX) {
- dev_err(&mlxsw_pci->pdev->dev, "Invalid number of XM local ports\n");
- return -EINVAL;
+ if (profile->used_cqe_time_stamp_type) {
+ mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
+ profile->cqe_time_stamp_type);
}
- bus_info->xm_local_ports_count = count;
- for (i = 0; i < count; i++)
- bus_info->xm_local_ports[i] =
- mlxsw_cmd_mbox_boardinfo_xm_local_port_entry_get(mbox,
- i);
- return 0;
+
+ return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
}
static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
@@ -1306,8 +1316,7 @@ static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
return err;
mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
-
- return mlxsw_pci_boardinfo_xm_process(mlxsw_pci, bus_info, mbox);
+ return 0;
}
static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -1550,6 +1559,24 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->free_running_clock_offset =
mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
+ if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_sec_bar;
+ }
+
+ mlxsw_pci->utc_sec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_nsec_bar;
+ }
+
+ mlxsw_pci->utc_nsec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
+
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err)
@@ -1582,6 +1609,14 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_config_profile;
+ /* Some resources depend on unified bridge model, which is configured
+ * as part of config_profile. Query the resources again to get correct
+ * values.
+ */
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+ if (err)
+ goto err_requery_resources;
+
err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
if (err)
goto err_aqs_init;
@@ -1599,12 +1634,15 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
+err_requery_resources:
err_config_profile:
err_cqe_v_check:
err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
err_fw_area_init:
+err_utc_nsec_bar:
+err_utc_sec_bar:
err_fr_rn_clk_bar:
err_doorbell_page_bar:
err_iface_rev:
@@ -1819,19 +1857,33 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
static u32 mlxsw_pci_read_frc_h(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- u64 frc_offset;
+ u64 frc_offset_h;
- frc_offset = mlxsw_pci->free_running_clock_offset;
- return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
+ frc_offset_h = mlxsw_pci->free_running_clock_offset;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
}
static u32 mlxsw_pci_read_frc_l(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- u64 frc_offset;
+ u64 frc_offset_l;
+
+ frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
+}
+
+static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
+}
+
+static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
- frc_offset = mlxsw_pci->free_running_clock_offset;
- return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
}
static const struct mlxsw_bus mlxsw_pci_bus = {
@@ -1843,6 +1895,8 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
.read_frc_h = mlxsw_pci_read_frc_h,
.read_frc_l = mlxsw_pci_read_frc_l,
+ .read_utc_sec = mlxsw_pci_read_utc_sec,
+ .read_utc_nsec = mlxsw_pci_read_utc_nsec,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
@@ -1933,7 +1987,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
- mlxsw_pci->bus_info.read_frc_capable = true;
+ mlxsw_pci->bus_info.read_clock_capable = true;
mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 7b531228d6c0..48dbfea0a2a1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -41,9 +41,6 @@
#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
((offset) + (type_offset) + (num) * 4)
-#define MLXSW_PCI_FREE_RUNNING_CLOCK_H(offset) (offset)
-#define MLXSW_PCI_FREE_RUNNING_CLOCK_L(offset) ((offset) + 4)
-
#define MLXSW_PCI_CQS_MAX 96
#define MLXSW_PCI_EQS_COUNT 2
#define MLXSW_PCI_EQ_ASYNC_NUM 0
@@ -217,6 +214,25 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
+/* pci_cqe_time_stamp_low
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_low, 0x0C, 16, 16);
+
#define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F
/* pci_cqe_mirror_tclass
@@ -280,8 +296,67 @@ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
*/
MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8);
+enum mlxsw_pci_cqe_time_stamp_type {
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_USEC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_FRC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC,
+};
+
+/* pci_cqe_time_stamp_type
+ * Time stamp type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type)
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_type, 0x18, 22, 2);
+
#define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF
+/* pci_cqe_time_stamp_high
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_high, 0x18, 0, 22);
+
+static inline u64 mlxsw_pci_cqe2_time_stamp_get(const char *cqe)
+{
+ u64 ts_high = mlxsw_pci_cqe2_time_stamp_high_get(cqe);
+ u64 ts_low = mlxsw_pci_cqe2_time_stamp_low_get(cqe);
+
+ return ts_high << 16 | ts_low;
+}
+
+static inline u8 mlxsw_pci_cqe2_time_stamp_sec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts >> 30 & 0xFF;
+}
+
+static inline u32 mlxsw_pci_cqe2_time_stamp_nsec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts & 0x3FFFFFFF;
+}
+
/* pci_cqe_mirror_latency
* End-to-end latency of the original packet that does mirroring to the CPU.
* Value of 0xFFFFFF means that the latency is invalid. Units are according to
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 741fd2989d12..ac4d4ea51597 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -15,8 +15,6 @@
#define MLXSW_PORT_SWID_TYPE_IB 1
#define MLXSW_PORT_SWID_TYPE_ETH 2
-#define MLXSW_PORT_MID 0xd000
-
#define MLXSW_PORT_MAX_IB_PHY_PORTS 36
#define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 93af6c974ece..f27bdecdf952 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -322,6 +322,18 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
/* reg_sfd_uc_fid_vid
* Filtering ID or VLAN ID
* For SwitchX and SwitchX-2:
@@ -335,6 +347,15 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_vid
+ * New VID when set_vid=1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when set_vid=0.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
/* reg_sfd_uc_system_port
* Unique port identifier for the final destination of the packet.
* Access: RW
@@ -359,7 +380,7 @@ static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
- const char *mac, u16 fid_vid,
+ const char *mac, u16 fid_vid, u16 vid,
enum mlxsw_reg_sfd_rec_action action,
u16 local_port)
{
@@ -368,6 +389,8 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_set_vid_set(payload, rec_index, vid ? true : false);
+ mlxsw_reg_sfd_uc_vid_set(payload, rec_index, vid);
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
@@ -379,6 +402,18 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_lag_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
/* reg_sfd_uc_lag_fid_vid
* Filtering ID or VLAN ID
* For SwitchX and SwitchX-2:
@@ -393,8 +428,10 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
/* reg_sfd_uc_lag_lag_vid
- * Indicates VID in case of vFIDs. Reserved for FIDs.
+ * New vlan ID.
* Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and set_vid=0.
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
MLXSW_REG_SFD_REC_LEN, 0x0C, false);
@@ -419,6 +456,7 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_lag_set_vid_set(payload, rec_index, true);
mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
@@ -997,7 +1035,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
* to packet types used for flooding.
*/
#define MLXSW_REG_SFGC_ID 0x2011
-#define MLXSW_REG_SFGC_LEN 0x10
+#define MLXSW_REG_SFGC_LEN 0x14
MLXSW_REG_DEFINE(sfgc, MLXSW_REG_SFGC_ID, MLXSW_REG_SFGC_LEN);
@@ -1019,9 +1057,10 @@ enum mlxsw_reg_sfgc_type {
*/
MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
-enum mlxsw_reg_sfgc_bridge_type {
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
- MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+/* bridge_type is used in SFGC and SFMR. */
+enum mlxsw_reg_bridge_type {
+ MLXSW_REG_BRIDGE_TYPE_0 = 0, /* Used for .1q FIDs. */
+ MLXSW_REG_BRIDGE_TYPE_1 = 1, /* Used for .1d FIDs. */
};
/* reg_sfgc_bridge_type
@@ -1054,12 +1093,6 @@ MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
*/
MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
-/* reg_sfgc_mid
- * The multicast ID for the swid. Not supported for Spectrum
- * Access: RW
- */
-MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
-
/* reg_sfgc_counter_set_type
* Counter Set Type for flow counters.
* Access: RW
@@ -1072,18 +1105,26 @@ MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
*/
MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+/* reg_sfgc_mid_base
+ * MID Base.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfgc, mid_base, 0x10, 0, 16);
+
static inline void
mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
- enum mlxsw_reg_sfgc_bridge_type bridge_type,
+ enum mlxsw_reg_bridge_type bridge_type,
enum mlxsw_flood_table_type table_type,
- unsigned int flood_table)
+ unsigned int flood_table, u16 mid_base)
{
MLXSW_REG_ZERO(sfgc, payload);
mlxsw_reg_sfgc_type_set(payload, type);
mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfgc_table_type_set(payload, table_type);
mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
- mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+ mlxsw_reg_sfgc_mid_base_set(payload, mid_base);
}
/* SFDF - Switch Filtering DB Flush
@@ -1516,7 +1557,7 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port,
* virtualized ports.
*/
#define MLXSW_REG_SVFA_ID 0x201C
-#define MLXSW_REG_SVFA_LEN 0x10
+#define MLXSW_REG_SVFA_LEN 0x18
MLXSW_REG_DEFINE(svfa, MLXSW_REG_SVFA_ID, MLXSW_REG_SVFA_LEN);
@@ -1537,6 +1578,7 @@ MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12);
enum mlxsw_reg_svfa_mt {
MLXSW_REG_SVFA_MT_VID_TO_FID,
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ MLXSW_REG_SVFA_MT_VNI_TO_FID,
};
/* reg_svfa_mapping_table
@@ -1586,20 +1628,76 @@ MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
*/
MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
-static inline void mlxsw_reg_svfa_pack(char *payload, u16 local_port,
- enum mlxsw_reg_svfa_mt mt, bool valid,
- u16 fid, u16 vid)
+/* reg_svfa_vni
+ * Virtual Network Identifier.
+ * Access: Index
+ *
+ * Note: Reserved when mapping_table is not 2 (VNI mapping table).
+ */
+MLXSW_ITEM32(reg, svfa, vni, 0x10, 0, 24);
+
+/* reg_svfa_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non enabled RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, svfa, irif_v, 0x14, 24, 1);
+
+/* reg_svfa_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, svfa, irif, 0x14, 0, 16);
+
+static inline void __mlxsw_reg_svfa_pack(char *payload,
+ enum mlxsw_reg_svfa_mt mt, bool valid,
+ u16 fid, bool irif_v, u16 irif)
{
MLXSW_REG_ZERO(svfa, payload);
- local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
mlxsw_reg_svfa_swid_set(payload, 0);
- mlxsw_reg_svfa_local_port_set(payload, local_port);
mlxsw_reg_svfa_mapping_table_set(payload, mt);
mlxsw_reg_svfa_v_set(payload, valid);
mlxsw_reg_svfa_fid_set(payload, fid);
+ mlxsw_reg_svfa_irif_v_set(payload, irif_v);
+ mlxsw_reg_svfa_irif_set(payload, irif_v ? irif : 0);
+}
+
+static inline void mlxsw_reg_svfa_port_vid_pack(char *payload, u16 local_port,
+ bool valid, u16 fid, u16 vid,
+ bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_local_port_set(payload, local_port);
+ mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_svfa_vid_pack(char *payload, bool valid, u16 fid,
+ u16 vid, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vid_set(payload, vid);
}
+static inline void mlxsw_reg_svfa_vni_pack(char *payload, bool valid, u16 fid,
+ u32 vni, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VNI_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_vni_set(payload, vni);
+}
+
/* SPVTR - Switch Port VLAN Stacking Register
* ------------------------------------------
* The Switch Port VLAN Stacking register configures the VLAN mode of the port
@@ -1741,7 +1839,7 @@ static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port,
* Creates and configures FIDs.
*/
#define MLXSW_REG_SFMR_ID 0x201F
-#define MLXSW_REG_SFMR_LEN 0x18
+#define MLXSW_REG_SFMR_LEN 0x30
MLXSW_REG_DEFINE(sfmr, MLXSW_REG_SFMR_ID, MLXSW_REG_SFMR_LEN);
@@ -1764,6 +1862,28 @@ MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
*/
MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+/* reg_sfmr_flood_rsp
+ * Router sub-port flooding table.
+ * 0 - Regular flooding table.
+ * 1 - Router sub-port flooding table. For this FID the flooding is per
+ * router-sub-port local_port. Must not be set for a FID which is not a
+ * router-sub-port and must be set prior to enabling the relevant RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_rsp, 0x08, 31, 1);
+
+/* reg_sfmr_flood_bridge_type
+ * Flood bridge type (see SFGC.bridge_type).
+ * 0 - type_0.
+ * 1 - type_1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when flood_rsp=1.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_bridge_type, 0x08, 28, 1);
+
/* reg_sfmr_fid_offset
* FID offset.
* Used to point into the flooding table selected by SFGC register if
@@ -1800,15 +1920,57 @@ MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
/* reg_sfmr_vni
* Virtual Network Identifier.
+ * When legacy bridge model is used, a given VNI can only be assigned to one
+ * FID. When unified bridge model is used, it configures only the FID->VNI,
+ * the VNI->FID is done by SVFA.
* Access: RW
- *
- * Note: A given VNI can only be assigned to one FID.
*/
MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+/* reg_sfmr_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non valid RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, irif_v, 0x14, 24, 1);
+
+/* reg_sfmr_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, sfmr, irif, 0x14, 0, 16);
+
+/* reg_sfmr_smpe_valid
+ * SMPE is valid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe_valid, 0x28, 20, 1);
+
+/* reg_sfmr_smpe
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
+
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
- u16 fid_offset)
+ u16 fid_offset, bool flood_rsp,
+ enum mlxsw_reg_bridge_type bridge_type,
+ bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
@@ -1816,6 +1978,10 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
mlxsw_reg_sfmr_vtfp_set(payload, false);
mlxsw_reg_sfmr_vv_set(payload, false);
+ mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
+ mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_sfmr_smpe_set(payload, smpe);
}
/* SPVMLR - Switch Port VLAN MAC Learning Register
@@ -2013,6 +2179,45 @@ static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port,
mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
}
+/* SMPE - Switch Multicast Port to Egress VID
+ * ------------------------------------------
+ * The switch multicast port to egress VID maps
+ * {egress_port, SMPE index} -> {VID}.
+ */
+#define MLXSW_REG_SMPE_ID 0x202B
+#define MLXSW_REG_SMPE_LEN 0x0C
+
+MLXSW_REG_DEFINE(smpe, MLXSW_REG_SMPE_ID, MLXSW_REG_SMPE_LEN);
+
+/* reg_smpe_local_port
+ * Local port number.
+ * CPU port is not supported.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, smpe, 0x00, 16, 0x00, 12);
+
+/* reg_smpe_smpe_index
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smpe, smpe_index, 0x04, 0, 16);
+
+/* reg_smpe_evid
+ * Egress VID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, smpe, evid, 0x08, 0, 12);
+
+static inline void mlxsw_reg_smpe_pack(char *payload, u16 local_port,
+ u16 smpe_index, u16 evid)
+{
+ MLXSW_REG_ZERO(smpe, payload);
+ mlxsw_reg_smpe_local_port_set(payload, local_port);
+ mlxsw_reg_smpe_smpe_index_set(payload, smpe_index);
+ mlxsw_reg_smpe_evid_set(payload, evid);
+}
+
/* SFTR-V2 - Switch Flooding Table Version 2 Register
* --------------------------------------------------
* The switch flooding table is used for flooding packet replication. The table
@@ -2107,6 +2312,23 @@ MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8);
*/
MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16);
+/* reg_smid2_smpe_valid
+ * SMPE is valid.
+ * When not valid, the egress VID will not be modified by the SMPE table.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe_valid, 0x08, 20, 1);
+
+/* reg_smid2_smpe
+ * Switch multicast port to egress VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe, 0x08, 0, 16);
+
/* reg_smid2_port
* Local port memebership (1 bit per port).
* Access: RW
@@ -2120,13 +2342,15 @@ MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1);
MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1);
static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port,
- bool set)
+ bool set, bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(smid2, payload);
mlxsw_reg_smid2_swid_set(payload, 0);
mlxsw_reg_smid2_mid_set(payload, mid);
mlxsw_reg_smid2_port_set(payload, port, set);
mlxsw_reg_smid2_port_mask_set(payload, port, 1);
+ mlxsw_reg_smid2_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_smid2_smpe_set(payload, smpe_valid ? smpe : 0);
}
/* CWTP - Congetion WRED ECN TClass Profile
@@ -6701,31 +6925,32 @@ MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
/* VLAN Interface */
-/* reg_ritr_vlan_if_vid
+/* reg_ritr_vlan_if_vlan_id
* VLAN ID.
* Access: RW
*/
-MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
+MLXSW_ITEM32(reg, ritr, vlan_if_vlan_id, 0x08, 0, 12);
+
+/* reg_ritr_vlan_if_efid
+ * Egress FID.
+ * Used to connect the RIF to a bridge.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-1.
+ */
+MLXSW_ITEM32(reg, ritr, vlan_if_efid, 0x0C, 0, 16);
/* FID Interface */
/* reg_ritr_fid_if_fid
- * Filtering ID. Used to connect a bridge to the router. Only FIDs from
- * the vFID range are supported.
+ * Filtering ID. Used to connect a bridge to the router.
+ * When legacy bridge model is used, only FIDs from the vFID range are
+ * supported. When unified bridge model is used, this is the egress FID for
+ * router to bridge.
* Access: RW
*/
MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
-static inline void mlxsw_reg_ritr_fid_set(char *payload,
- enum mlxsw_reg_ritr_if_type rif_type,
- u16 fid)
-{
- if (rif_type == MLXSW_REG_RITR_FID_IF)
- mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
- else
- mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
-}
-
/* Sub-port Interface */
/* reg_ritr_sp_if_lag
@@ -6742,6 +6967,16 @@ MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
*/
MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
+/* reg_ritr_sp_if_efid
+ * Egress filtering ID.
+ * Used to connect the eRIF to a bridge if eRIF-ACL has modified the DMAC or
+ * the VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_efid, 0x0C, 0, 16);
+
/* reg_ritr_sp_if_vid
* VLAN ID.
* Access: RW
@@ -6881,10 +7116,11 @@ static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
}
static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
- u16 system_port, u16 vid)
+ u16 system_port, u16 efid, u16 vid)
{
mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
+ mlxsw_reg_ritr_sp_if_efid_set(payload, efid);
mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
}
@@ -6918,6 +7154,20 @@ static inline void mlxsw_reg_ritr_mac_pack(char *payload, const char *mac)
}
static inline void
+mlxsw_reg_ritr_vlan_if_pack(char *payload, bool enable, u16 rif, u16 vr_id,
+ u16 mtu, const char *mac, u8 mac_profile_id,
+ u16 vlan_id, u16 efid)
+{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_VLAN_IF;
+
+ mlxsw_reg_ritr_pack(payload, enable, type, rif, vr_id, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
+ mlxsw_reg_ritr_if_mac_profile_id_set(payload, mac_profile_id);
+ mlxsw_reg_ritr_vlan_if_vlan_id_set(payload, vlan_id);
+ mlxsw_reg_ritr_vlan_if_efid_set(payload, efid);
+}
+
+static inline void
mlxsw_reg_ritr_loopback_ipip_common_pack(char *payload,
enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
enum mlxsw_reg_ritr_loopback_ipip_options options,
@@ -7848,11 +8098,10 @@ static inline void mlxsw_reg_ralue_pack4(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len,
- u32 *dip)
+ u32 dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_ralue_dip4_set(payload, *dip);
+ mlxsw_reg_ralue_dip4_set(payload, dip);
}
static inline void mlxsw_reg_ralue_pack6(char *payload,
@@ -7862,8 +8111,7 @@ static inline void mlxsw_reg_ralue_pack6(char *payload,
const void *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
+ mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
}
static inline void
@@ -8926,656 +9174,62 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
-/* RXLTE - Router XLT Enable Register
- * ----------------------------------
- * The RXLTE enables XLT (eXtended Lookup Table) LPM lookups if a capable
- * XM is present on the system.
- */
-
-#define MLXSW_REG_RXLTE_ID 0x8050
-#define MLXSW_REG_RXLTE_LEN 0x0C
-
-MLXSW_REG_DEFINE(rxlte, MLXSW_REG_RXLTE_ID, MLXSW_REG_RXLTE_LEN);
-
-/* reg_rxlte_virtual_router
- * Virtual router ID associated with the router interface.
- * Range is 0..cap_max_virtual_routers-1
- * Access: Index
- */
-MLXSW_ITEM32(reg, rxlte, virtual_router, 0x00, 0, 16);
-
-enum mlxsw_reg_rxlte_protocol {
- MLXSW_REG_RXLTE_PROTOCOL_IPV4,
- MLXSW_REG_RXLTE_PROTOCOL_IPV6,
-};
-
-/* reg_rxlte_protocol
- * Access: Index
- */
-MLXSW_ITEM32(reg, rxlte, protocol, 0x04, 0, 4);
-
-/* reg_rxlte_lpm_xlt_en
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxlte, lpm_xlt_en, 0x08, 0, 1);
-
-static inline void mlxsw_reg_rxlte_pack(char *payload, u16 virtual_router,
- enum mlxsw_reg_rxlte_protocol protocol,
- bool lpm_xlt_en)
-{
- MLXSW_REG_ZERO(rxlte, payload);
- mlxsw_reg_rxlte_virtual_router_set(payload, virtual_router);
- mlxsw_reg_rxlte_protocol_set(payload, protocol);
- mlxsw_reg_rxlte_lpm_xlt_en_set(payload, lpm_xlt_en);
-}
-
-/* RXLTM - Router XLT M select Register
- * ------------------------------------
- * The RXLTM configures and selects the M for the XM lookups.
- */
-
-#define MLXSW_REG_RXLTM_ID 0x8051
-#define MLXSW_REG_RXLTM_LEN 0x14
-
-MLXSW_REG_DEFINE(rxltm, MLXSW_REG_RXLTM_ID, MLXSW_REG_RXLTM_LEN);
-
-/* reg_rxltm_m0_val_v6
- * Global M0 value For IPv6.
- * Range 0..128
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxltm, m0_val_v6, 0x10, 16, 8);
-
-/* reg_rxltm_m0_val_v4
- * Global M0 value For IPv4.
- * Range 0..32
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxltm, m0_val_v4, 0x10, 0, 6);
-
-static inline void mlxsw_reg_rxltm_pack(char *payload, u8 m0_val_v4, u8 m0_val_v6)
-{
- MLXSW_REG_ZERO(rxltm, payload);
- mlxsw_reg_rxltm_m0_val_v6_set(payload, m0_val_v6);
- mlxsw_reg_rxltm_m0_val_v4_set(payload, m0_val_v4);
-}
-
-/* RLCMLD - Router LPM Cache ML Delete Register
- * --------------------------------------------
- * The RLCMLD register is used to bulk delete the XLT-LPM cache ML entries.
- * This can be used by SW when L is increased or decreased, thus need to
- * remove entries with old ML values.
- */
-
-#define MLXSW_REG_RLCMLD_ID 0x8055
-#define MLXSW_REG_RLCMLD_LEN 0x30
-
-MLXSW_REG_DEFINE(rlcmld, MLXSW_REG_RLCMLD_ID, MLXSW_REG_RLCMLD_LEN);
-
-enum mlxsw_reg_rlcmld_select {
- MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES,
- MLXSW_REG_RLCMLD_SELECT_M_ENTRIES,
- MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES,
-};
-
-/* reg_rlcmld_select
- * Which entries to delete.
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, select, 0x00, 16, 2);
-
-enum mlxsw_reg_rlcmld_filter_fields {
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL = 0x04,
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER = 0x08,
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP = 0x10,
-};
-
-/* reg_rlcmld_filter_fields
- * If a bit is '0' then the relevant field is ignored.
- * Access: Index
+/* REIV - Router Egress Interface to VID Register
+ * ----------------------------------------------
+ * The REIV register maps {eRIF, egress_port} -> VID.
+ * This mapping is done at the egress, after the ACLs.
+ * This mapping always takes effect after router, regardless of cast
+ * (for unicast/multicast/port-base multicast), regardless of eRIF type and
+ * regardless of bridge decisions (e.g. SFD for unicast or SMPE).
+ * Reserved when the RIF is a loopback RIF.
+ *
+ * Note: Reserved when legacy bridge model is used.
*/
-MLXSW_ITEM32(reg, rlcmld, filter_fields, 0x00, 0, 8);
+#define MLXSW_REG_REIV_ID 0x8034
+#define MLXSW_REG_REIV_BASE_LEN 0x20 /* base length, without records */
+#define MLXSW_REG_REIV_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_REIV_REC_MAX_COUNT 256 /* firmware limitation */
+#define MLXSW_REG_REIV_LEN (MLXSW_REG_REIV_BASE_LEN + \
+ MLXSW_REG_REIV_REC_LEN * \
+ MLXSW_REG_REIV_REC_MAX_COUNT)
-enum mlxsw_reg_rlcmld_protocol {
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6,
-};
+MLXSW_REG_DEFINE(reiv, MLXSW_REG_REIV_ID, MLXSW_REG_REIV_LEN);
-/* reg_rlcmld_protocol
+/* reg_reiv_port_page
+ * Port page - elport_record[0] is 256*port_page.
* Access: Index
*/
-MLXSW_ITEM32(reg, rlcmld, protocol, 0x08, 0, 4);
+MLXSW_ITEM32(reg, reiv, port_page, 0x00, 0, 4);
-/* reg_rlcmld_virtual_router
- * Virtual router ID.
- * Range is 0..cap_max_virtual_routers-1
+/* reg_reiv_erif
+ * Egress RIF.
+ * Range is 0..cap_max_router_interfaces-1.
* Access: Index
*/
-MLXSW_ITEM32(reg, rlcmld, virtual_router, 0x0C, 0, 16);
-
-/* reg_rlcmld_dip
- * The prefix of the route or of the marker that the object of the LPM
- * is compared with. The most significant bits of the dip are the prefix.
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, dip4, 0x1C, 0, 32);
-MLXSW_ITEM_BUF(reg, rlcmld, dip6, 0x10, 16);
-
-/* reg_rlcmld_dip_mask
- * per bit:
- * 0: no match
- * 1: match
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, dip_mask4, 0x2C, 0, 32);
-MLXSW_ITEM_BUF(reg, rlcmld, dip_mask6, 0x20, 16);
-
-static inline void __mlxsw_reg_rlcmld_pack(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- enum mlxsw_reg_rlcmld_protocol protocol,
- u16 virtual_router)
-{
- u8 filter_fields = MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL |
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER |
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP;
-
- MLXSW_REG_ZERO(rlcmld, payload);
- mlxsw_reg_rlcmld_select_set(payload, select);
- mlxsw_reg_rlcmld_filter_fields_set(payload, filter_fields);
- mlxsw_reg_rlcmld_protocol_set(payload, protocol);
- mlxsw_reg_rlcmld_virtual_router_set(payload, virtual_router);
-}
-
-static inline void mlxsw_reg_rlcmld_pack4(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- u16 virtual_router,
- u32 dip, u32 dip_mask)
-{
- __mlxsw_reg_rlcmld_pack(payload, select,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4,
- virtual_router);
- mlxsw_reg_rlcmld_dip4_set(payload, dip);
- mlxsw_reg_rlcmld_dip_mask4_set(payload, dip_mask);
-}
-
-static inline void mlxsw_reg_rlcmld_pack6(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- u16 virtual_router,
- const void *dip, const void *dip_mask)
-{
- __mlxsw_reg_rlcmld_pack(payload, select,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6,
- virtual_router);
- mlxsw_reg_rlcmld_dip6_memcpy_to(payload, dip);
- mlxsw_reg_rlcmld_dip_mask6_memcpy_to(payload, dip_mask);
-}
-
-/* RLPMCE - Router LPM Cache Enable Register
- * -----------------------------------------
- * Allows disabling the LPM cache. Can be changed on the fly.
- */
-
-#define MLXSW_REG_RLPMCE_ID 0x8056
-#define MLXSW_REG_RLPMCE_LEN 0x4
-
-MLXSW_REG_DEFINE(rlpmce, MLXSW_REG_RLPMCE_ID, MLXSW_REG_RLPMCE_LEN);
-
-/* reg_rlpmce_flush
- * Flush:
- * 0: do not flush the cache (default)
- * 1: flush (clear) the cache
- * Access: WO
- */
-MLXSW_ITEM32(reg, rlpmce, flush, 0x00, 4, 1);
-
-/* reg_rlpmce_disable
- * LPM cache:
- * 0: enabled (default)
- * 1: disabled
- * Access: RW
- */
-MLXSW_ITEM32(reg, rlpmce, disable, 0x00, 0, 1);
-
-static inline void mlxsw_reg_rlpmce_pack(char *payload, bool flush,
- bool disable)
-{
- MLXSW_REG_ZERO(rlpmce, payload);
- mlxsw_reg_rlpmce_flush_set(payload, flush);
- mlxsw_reg_rlpmce_disable_set(payload, disable);
-}
-
-/* Note that XLTQ, XMDR, XRMT and XRALXX register positions violate the rule
- * of ordering register definitions by the ID. However, XRALXX pack helpers are
- * using RALXX pack helpers, RALXX registers have higher IDs.
- * Also XMDR is using RALUE enums. XLRQ and XRMT are just put alongside with the
- * related registers.
- */
-
-/* XLTQ - XM Lookup Table Query Register
- * -------------------------------------
- */
-#define MLXSW_REG_XLTQ_ID 0x7802
-#define MLXSW_REG_XLTQ_LEN 0x2C
-
-MLXSW_REG_DEFINE(xltq, MLXSW_REG_XLTQ_ID, MLXSW_REG_XLTQ_LEN);
-
-enum mlxsw_reg_xltq_xm_device_id {
- MLXSW_REG_XLTQ_XM_DEVICE_ID_UNKNOWN,
- MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT = 0xCF71,
-};
-
-/* reg_xltq_xm_device_id
- * XM device ID.
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xm_device_id, 0x04, 0, 16);
-
-/* reg_xltq_xlt_cap_ipv4_lpm
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xlt_cap_ipv4_lpm, 0x10, 0, 1);
-
-/* reg_xltq_xlt_cap_ipv6_lpm
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xlt_cap_ipv6_lpm, 0x10, 1, 1);
-
-/* reg_xltq_cap_xlt_entries
- * Number of XLT entries
- * Note: SW must not fill more than 80% in order to avoid overflow
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, cap_xlt_entries, 0x20, 0, 32);
-
-/* reg_xltq_cap_xlt_mtable
- * XLT M-Table max size
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, cap_xlt_mtable, 0x24, 0, 32);
+MLXSW_ITEM32(reg, reiv, erif, 0x04, 0, 16);
-static inline void mlxsw_reg_xltq_pack(char *payload)
-{
- MLXSW_REG_ZERO(xltq, payload);
-}
-
-static inline void mlxsw_reg_xltq_unpack(char *payload, u16 *xm_device_id, bool *xlt_cap_ipv4_lpm,
- bool *xlt_cap_ipv6_lpm, u32 *cap_xlt_entries,
- u32 *cap_xlt_mtable)
-{
- *xm_device_id = mlxsw_reg_xltq_xm_device_id_get(payload);
- *xlt_cap_ipv4_lpm = mlxsw_reg_xltq_xlt_cap_ipv4_lpm_get(payload);
- *xlt_cap_ipv6_lpm = mlxsw_reg_xltq_xlt_cap_ipv6_lpm_get(payload);
- *cap_xlt_entries = mlxsw_reg_xltq_cap_xlt_entries_get(payload);
- *cap_xlt_mtable = mlxsw_reg_xltq_cap_xlt_mtable_get(payload);
-}
-
-/* XMDR - XM Direct Register
- * -------------------------
- * The XMDR allows direct access to the XM device via the switch.
- * Working in synchronous mode. FW waits for response from the XLT
- * for each command. FW acks the XMDR accordingly.
- */
-#define MLXSW_REG_XMDR_ID 0x7803
-#define MLXSW_REG_XMDR_BASE_LEN 0x20
-#define MLXSW_REG_XMDR_TRANS_LEN 0x80
-#define MLXSW_REG_XMDR_LEN (MLXSW_REG_XMDR_BASE_LEN + \
- MLXSW_REG_XMDR_TRANS_LEN)
-
-MLXSW_REG_DEFINE(xmdr, MLXSW_REG_XMDR_ID, MLXSW_REG_XMDR_LEN);
-
-/* reg_xmdr_bulk_entry
- * Bulk_entry
- * 0: Last entry - immediate flush of XRT-cache
- * 1: Bulk entry - do not flush the XRT-cache
+/* reg_reiv_rec_update
+ * Update enable (when write):
+ * 0 - Do not update the entry.
+ * 1 - Update the entry.
* Access: OP
*/
-MLXSW_ITEM32(reg, xmdr, bulk_entry, 0x04, 8, 1);
-
-/* reg_xmdr_num_rec
- * Number of records for Direct access to XM
- * Supported: 0..4 commands (except NOP which is a filler)
- * 0 commands is reserved when bulk_entry = 1.
- * 0 commands is allowed when bulk_entry = 0 for immediate XRT-cache flush.
- * Access: OP
- */
-MLXSW_ITEM32(reg, xmdr, num_rec, 0x04, 0, 4);
-
-/* reg_xmdr_reply_vect
- * Reply Vector
- * Bit i for command index i+1
- * values per bit:
- * 0: failed
- * 1: succeeded
- * e.g. if commands 1, 2, 4 succeeded and command 3 failed then binary
- * value will be 0b1011
- * Access: RO
- */
-MLXSW_ITEM_BIT_ARRAY(reg, xmdr, reply_vect, 0x08, 4, 1);
-
-static inline void mlxsw_reg_xmdr_pack(char *payload, bool bulk_entry)
-{
- MLXSW_REG_ZERO(xmdr, payload);
- mlxsw_reg_xmdr_bulk_entry_set(payload, bulk_entry);
-}
-
-enum mlxsw_reg_xmdr_c_cmd_id {
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4 = 0x30,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6 = 0x31,
-};
-
-#define MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN 32
-#define MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN 48
-
-/* reg_xmdr_c_cmd_id
- */
-MLXSW_ITEM32(reg, xmdr_c, cmd_id, 0x00, 24, 8);
-
-/* reg_xmdr_c_seq_number
- */
-MLXSW_ITEM32(reg, xmdr_c, seq_number, 0x00, 12, 12);
-
-enum mlxsw_reg_xmdr_c_ltr_op {
- /* Activity is set */
- MLXSW_REG_XMDR_C_LTR_OP_WRITE = 0,
- /* There is no update mask. All fields are updated. */
- MLXSW_REG_XMDR_C_LTR_OP_UPDATE = 1,
- MLXSW_REG_XMDR_C_LTR_OP_DELETE = 2,
-};
-
-/* reg_xmdr_c_ltr_op
- * Operation.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_op, 0x04, 24, 8);
-
-/* reg_xmdr_c_ltr_trap_action
- * Trap action.
- * Values are defined in enum mlxsw_reg_ralue_trap_action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_trap_action, 0x04, 20, 4);
-
-enum mlxsw_reg_xmdr_c_ltr_trap_id_num {
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS0,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS1,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS2,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS3,
-};
-
-/* reg_xmdr_c_ltr_trap_id_num
- * Trap-ID number.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_trap_id_num, 0x04, 16, 4);
-
-/* reg_xmdr_c_ltr_virtual_router
- * Virtual Router ID.
- * Range is 0..cap_max_virtual_routers-1
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_virtual_router, 0x04, 0, 16);
-
-/* reg_xmdr_c_ltr_prefix_len
- * Number of bits in the prefix of the LPM route.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_prefix_len, 0x08, 24, 8);
-
-/* reg_xmdr_c_ltr_bmp_len
- * The best match prefix length in the case that there is no match for
- * longer prefixes.
- * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_bmp_len, 0x08, 16, 8);
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_update, MLXSW_REG_REIV_BASE_LEN, 31, 1,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
-/* reg_xmdr_c_ltr_entry_type
- * Entry type.
- * Values are defined in enum mlxsw_reg_ralue_entry_type.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_entry_type, 0x08, 4, 4);
-
-enum mlxsw_reg_xmdr_c_ltr_action_type {
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL,
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE,
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME,
-};
-
-/* reg_xmdr_c_ltr_action_type
- * Action Type.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_action_type, 0x08, 0, 4);
-
-/* reg_xmdr_c_ltr_erif
- * Egress Router Interface.
- * Only relevant in case of LOCAL action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_erif, 0x10, 0, 16);
-
-/* reg_xmdr_c_ltr_adjacency_index
- * Points to the first entry of the group-based ECMP.
- * Only relevant in case of REMOTE action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_adjacency_index, 0x10, 0, 24);
-
-#define MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC 0xFFFFFF
-
-/* reg_xmdr_c_ltr_pointer_to_tunnel
- * Only relevant in case of IP2ME action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_pointer_to_tunnel, 0x10, 0, 24);
-
-/* reg_xmdr_c_ltr_ecmp_size
- * Amount of sequential entries starting
- * from the adjacency_index (the number of ECMPs).
- * The valid range is 1-64, 512, 1024, 2048 and 4096.
- * Only relevant in case of REMOTE action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_ecmp_size, 0x14, 0, 32);
-
-/* reg_xmdr_c_ltr_dip*
- * The prefix of the route or of the marker that the object of the LPM
- * is compared with. The most significant bits of the dip are the prefix.
- * The least significant bits must be '0' if the prefix_len is smaller
- * than 128 for IPv6 or smaller than 32 for IPv4.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_dip4, 0x1C, 0, 32);
-MLXSW_ITEM_BUF(reg, xmdr_c, ltr_dip6, 0x1C, 16);
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_xmdr_c_cmd_id cmd_id, u16 seq_number,
- enum mlxsw_reg_xmdr_c_ltr_op op, u16 virtual_router,
- u8 prefix_len)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
- u8 num_rec = mlxsw_reg_xmdr_num_rec_get(xmdr_payload);
-
- mlxsw_reg_xmdr_num_rec_set(xmdr_payload, num_rec + 1);
-
- mlxsw_reg_xmdr_c_cmd_id_set(payload, cmd_id);
- mlxsw_reg_xmdr_c_seq_number_set(payload, seq_number);
- mlxsw_reg_xmdr_c_ltr_op_set(payload, op);
- mlxsw_reg_xmdr_c_ltr_virtual_router_set(payload, virtual_router);
- mlxsw_reg_xmdr_c_ltr_prefix_len_set(payload, prefix_len);
- mlxsw_reg_xmdr_c_ltr_entry_type_set(payload,
- MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
- mlxsw_reg_xmdr_c_ltr_bmp_len_set(payload, prefix_len);
-}
-
-static inline unsigned int
-mlxsw_reg_xmdr_c_ltr_pack4(char *xmdr_payload, unsigned int trans_offset,
- u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op,
- u16 virtual_router, u8 prefix_len, u32 *dip)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4,
- seq_number, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_xmdr_c_ltr_dip4_set(payload, *dip);
- return MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN;
-}
-
-static inline unsigned int
-mlxsw_reg_xmdr_c_ltr_pack6(char *xmdr_payload, unsigned int trans_offset,
- u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op,
- u16 virtual_router, u8 prefix_len, const void *dip)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6,
- seq_number, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_xmdr_c_ltr_dip6_memcpy_to(payload, dip);
- return MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN;
-}
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_act_remote_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_ralue_trap_action trap_action,
- enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num,
- u32 adjacency_index, u16 ecmp_size)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE);
- mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action);
- mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num);
- mlxsw_reg_xmdr_c_ltr_adjacency_index_set(payload, adjacency_index);
- mlxsw_reg_xmdr_c_ltr_ecmp_size_set(payload, ecmp_size);
-}
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_act_local_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_ralue_trap_action trap_action,
- enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num, u16 erif)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL);
- mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action);
- mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num);
- mlxsw_reg_xmdr_c_ltr_erif_set(payload, erif);
-}
-
-static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(char *xmdr_payload,
- unsigned int trans_offset)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME);
- mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload,
- MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC);
-}
-
-static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(char *xmdr_payload,
- unsigned int trans_offset,
- u32 pointer_to_tunnel)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME);
- mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload, pointer_to_tunnel);
-}
-
-/* XRMT - XM Router M Table Register
- * ---------------------------------
- * The XRMT configures the M-Table for the XLT-LPM.
- */
-#define MLXSW_REG_XRMT_ID 0x7810
-#define MLXSW_REG_XRMT_LEN 0x14
-
-MLXSW_REG_DEFINE(xrmt, MLXSW_REG_XRMT_ID, MLXSW_REG_XRMT_LEN);
-
-/* reg_xrmt_index
- * Index in M-Table.
- * Range 0..cap_xlt_mtable-1
- * Access: Index
- */
-MLXSW_ITEM32(reg, xrmt, index, 0x04, 0, 20);
-
-/* reg_xrmt_l0_val
+/* reg_reiv_rec_evid
+ * Egress VID.
+ * Range is 0..4095.
* Access: RW
*/
-MLXSW_ITEM32(reg, xrmt, l0_val, 0x10, 24, 8);
-
-static inline void mlxsw_reg_xrmt_pack(char *payload, u32 index, u8 l0_val)
-{
- MLXSW_REG_ZERO(xrmt, payload);
- mlxsw_reg_xrmt_index_set(payload, index);
- mlxsw_reg_xrmt_l0_val_set(payload, l0_val);
-}
-
-/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register
- * -----------------------------------------------------------
- * The XRALTA is used to allocate the XLT LPM trees.
- *
- * This register embeds original RALTA register.
- */
-#define MLXSW_REG_XRALTA_ID 0x7811
-#define MLXSW_REG_XRALTA_LEN 0x08
-#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_evid, MLXSW_REG_REIV_BASE_LEN, 0, 12,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
-MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN);
-
-static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc,
- enum mlxsw_reg_ralxx_protocol protocol,
- u8 tree_id)
+static inline void mlxsw_reg_reiv_pack(char *payload, u8 port_page, u16 erif)
{
- char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET;
-
- MLXSW_REG_ZERO(xralta, payload);
- mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id);
-}
-
-/* XRALST - XM Router Algorithmic LPM Structure Tree Register
- * ----------------------------------------------------------
- * The XRALST is used to set and query the structure of an XLT LPM tree.
- *
- * This register embeds original RALST register.
- */
-#define MLXSW_REG_XRALST_ID 0x7812
-#define MLXSW_REG_XRALST_LEN 0x108
-#define MLXSW_REG_XRALST_RALST_OFFSET 0x04
-
-MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN);
-
-static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id)
-{
- char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
-
- MLXSW_REG_ZERO(xralst, payload);
- mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id);
-}
-
-static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number,
- u8 left_child_bin,
- u8 right_child_bin)
-{
- char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
-
- mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin,
- right_child_bin);
-}
-
-/* XRALTB - XM Router Algorithmic LPM Tree Binding Register
- * --------------------------------------------------------
- * The XRALTB register is used to bind virtual router and protocol
- * to an allocated LPM tree.
- *
- * This register embeds original RALTB register.
- */
-#define MLXSW_REG_XRALTB_ID 0x7813
-#define MLXSW_REG_XRALTB_LEN 0x08
-#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04
-
-MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN);
-
-static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router,
- enum mlxsw_reg_ralxx_protocol protocol,
- u8 tree_id)
-{
- char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET;
-
- MLXSW_REG_ZERO(xraltb, payload);
- mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id);
+ MLXSW_REG_ZERO(reiv, payload);
+ mlxsw_reg_reiv_port_page_set(payload, port_page);
+ mlxsw_reg_reiv_erif_set(payload, erif);
}
/* MFCR - Management Fan Control Register
@@ -10693,6 +10347,8 @@ MLXSW_REG_DEFINE(mtutc, MLXSW_REG_MTUTC_ID, MLXSW_REG_MTUTC_LEN);
enum mlxsw_reg_mtutc_operation {
MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC = 0,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 1,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME = 2,
MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ = 3,
};
@@ -10705,25 +10361,50 @@ MLXSW_ITEM32(reg, mtutc, operation, 0x00, 0, 4);
/* reg_mtutc_freq_adjustment
* Frequency adjustment: Every PPS the HW frequency will be
* adjusted by this value. Units of HW clock, where HW counts
- * 10^9 HW clocks for 1 HW second.
+ * 10^9 HW clocks for 1 HW second. Range is from -50,000,000 to +50,000,000.
+ * In Spectrum-2, the field is reversed, positive values mean to decrease the
+ * frequency.
* Access: RW
*/
MLXSW_ITEM32(reg, mtutc, freq_adjustment, 0x04, 0, 32);
+#define MLXSW_REG_MTUTC_MAX_FREQ_ADJ (50 * 1000 * 1000)
+
/* reg_mtutc_utc_sec
* UTC seconds.
* Access: WO
*/
MLXSW_ITEM32(reg, mtutc, utc_sec, 0x10, 0, 32);
+/* reg_mtutc_utc_nsec
+ * UTC nSecs.
+ * Range 0..(10^9-1)
+ * Updated when operation is SET_TIME_IMMEDIATE.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, utc_nsec, 0x14, 0, 30);
+
+/* reg_mtutc_time_adjustment
+ * Time adjustment.
+ * Units of nSec.
+ * Range is from -32768 to +32767.
+ * Updated when operation is ADJUST_TIME.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, time_adjustment, 0x18, 0, 32);
+
static inline void
mlxsw_reg_mtutc_pack(char *payload, enum mlxsw_reg_mtutc_operation oper,
- u32 freq_adj, u32 utc_sec)
+ u32 freq_adj, u32 utc_sec, u32 utc_nsec, u32 time_adj)
{
MLXSW_REG_ZERO(mtutc, payload);
mlxsw_reg_mtutc_operation_set(payload, oper);
mlxsw_reg_mtutc_freq_adjustment_set(payload, freq_adj);
mlxsw_reg_mtutc_utc_sec_set(payload, utc_sec);
+ mlxsw_reg_mtutc_utc_nsec_set(payload, utc_nsec);
+ mlxsw_reg_mtutc_time_adjustment_set(payload, time_adj);
}
/* MCQI - Management Component Query Information
@@ -11391,15 +11072,76 @@ MLXSW_ITEM32(reg, mtptpt, trap_id, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, mtptpt, message_type, 0x04, 0, 16);
-static inline void mlxsw_reg_mtptptp_pack(char *payload,
- enum mlxsw_reg_mtptpt_trap_id trap_id,
- u16 message_type)
+static inline void mlxsw_reg_mtptpt_pack(char *payload,
+ enum mlxsw_reg_mtptpt_trap_id trap_id,
+ u16 message_type)
{
MLXSW_REG_ZERO(mtptpt, payload);
mlxsw_reg_mtptpt_trap_id_set(payload, trap_id);
mlxsw_reg_mtptpt_message_type_set(payload, message_type);
}
+/* MTPCPC - Monitoring Time Precision Correction Port Configuration Register
+ * -------------------------------------------------------------------------
+ */
+#define MLXSW_REG_MTPCPC_ID 0x9093
+#define MLXSW_REG_MTPCPC_LEN 0x2C
+
+MLXSW_REG_DEFINE(mtpcpc, MLXSW_REG_MTPCPC_ID, MLXSW_REG_MTPCPC_LEN);
+
+/* reg_mtpcpc_pport
+ * Per port:
+ * 0: config is global. When reading - the local_port is 1.
+ * 1: config is per port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpcpc, pport, 0x00, 31, 1);
+
+/* reg_mtpcpc_local_port
+ * Local port number.
+ * Supported to/from CPU port.
+ * Reserved when pport = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, mtpcpc, 0x00, 16, 0x00, 12);
+
+/* reg_mtpcpc_ptp_trap_en
+ * Enable PTP traps.
+ * The trap_id is configured by MTPTPT.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ptp_trap_en, 0x04, 0, 1);
+
+/* reg_mtpcpc_ing_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at ingress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ing_correction_message_type, 0x10, 0, 16);
+
+/* reg_mtpcpc_egr_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at egress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, egr_correction_message_type, 0x14, 0, 16);
+
+static inline void mlxsw_reg_mtpcpc_pack(char *payload, bool pport,
+ u16 local_port, bool ptp_trap_en,
+ u16 ing, u16 egr)
+{
+ MLXSW_REG_ZERO(mtpcpc, payload);
+ mlxsw_reg_mtpcpc_pport_set(payload, pport);
+ mlxsw_reg_mtpcpc_local_port_set(payload, pport ? local_port : 0);
+ mlxsw_reg_mtpcpc_ptp_trap_en_set(payload, ptp_trap_en);
+ mlxsw_reg_mtpcpc_ing_correction_message_type_set(payload, ing);
+ mlxsw_reg_mtpcpc_egr_correction_message_type_set(payload, egr);
+}
+
/* MFGD - Monitoring FW General Debug Register
* -------------------------------------------
*/
@@ -11622,6 +11364,95 @@ mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index,
*p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload);
}
+/* MDDT - Management DownStream Device Tunneling Register
+ * ------------------------------------------------------
+ * This register allows to deliver query and request messages (PRM registers,
+ * commands) to a DownStream device.
+ */
+#define MLXSW_REG_MDDT_ID 0x9160
+#define MLXSW_REG_MDDT_LEN 0x110
+
+MLXSW_REG_DEFINE(mddt, MLXSW_REG_MDDT_ID, MLXSW_REG_MDDT_LEN);
+
+/* reg_mddt_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, slot_index, 0x00, 8, 4);
+
+/* reg_mddt_device_index
+ * Device index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, device_index, 0x00, 0, 8);
+
+/* reg_mddt_read_size
+ * Read size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, read_size, 0x04, 24, 8);
+
+/* reg_mddt_write_size
+ * Write size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, write_size, 0x04, 16, 8);
+
+enum mlxsw_reg_mddt_status {
+ MLXSW_REG_MDDT_STATUS_OK,
+};
+
+/* reg_mddt_status
+ * Return code of the Downstream Device to the register that was sent.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddt, status, 0x0C, 24, 8);
+
+enum mlxsw_reg_mddt_method {
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+};
+
+/* reg_mddt_method
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, method, 0x0C, 22, 2);
+
+/* reg_mddt_register_id
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, register_id, 0x0C, 0, 16);
+
+#define MLXSW_REG_MDDT_PAYLOAD_OFFSET 0x0C
+#define MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN 4
+
+static inline char *mlxsw_reg_mddt_inner_payload(char *payload)
+{
+ return payload + MLXSW_REG_MDDT_PAYLOAD_OFFSET +
+ MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+}
+
+static inline void mlxsw_reg_mddt_pack(char *payload, u8 slot_index,
+ u8 device_index,
+ enum mlxsw_reg_mddt_method method,
+ const struct mlxsw_reg_info *reg,
+ char **inner_payload)
+{
+ int len = reg->len + MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+
+ if (WARN_ON(len + MLXSW_REG_MDDT_PAYLOAD_OFFSET > MLXSW_REG_MDDT_LEN))
+ len = MLXSW_REG_MDDT_LEN - MLXSW_REG_MDDT_PAYLOAD_OFFSET;
+
+ MLXSW_REG_ZERO(mddt, payload);
+ mlxsw_reg_mddt_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddt_device_index_set(payload, device_index);
+ mlxsw_reg_mddt_method_set(payload, method);
+ mlxsw_reg_mddt_register_id_set(payload, reg->id);
+ mlxsw_reg_mddt_read_size_set(payload, len / 4);
+ mlxsw_reg_mddt_write_size_set(payload, len / 4);
+ *inner_payload = mlxsw_reg_mddt_inner_payload(payload);
+}
+
/* MDDQ - Management DownStream Device Query Register
* --------------------------------------------------
* This register allows to query the DownStream device properties. The desired
@@ -11643,7 +11474,11 @@ MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1);
enum mlxsw_reg_mddq_query_type {
MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1,
- MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME = 3,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices
+ * on the slot, data_valid
+ * will be '0'.
+ */
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME,
};
/* reg_mddq_query_type
@@ -11657,6 +11492,28 @@ MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4);
+/* reg_mddq_response_msg_seq
+ * Response message sequential number. For a specific request, the response
+ * message sequential number is the following one. In addition, the last
+ * message should be 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8);
+
+/* reg_mddq_request_msg_seq
+ * Request message sequential number.
+ * The first message number should be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8);
+
+/* reg_mddq_data_valid
+ * If set, the data in the data field is valid and contain the information
+ * for the queried index.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1);
+
/* reg_mddq_slot_info_provisioned
* If set, the INI file is applied and the card is provisioned.
* Access: RO
@@ -11743,6 +11600,61 @@ mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index,
*p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload);
}
+/* reg_mddq_device_info_flash_owner
+ * If set, the device is the flash owner. Otherwise, a shared flash
+ * is used by this device (another device is the flash owner).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1);
+
+/* reg_mddq_device_info_device_index
+ * Device index. The first device should number 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8);
+
+/* reg_mddq_device_info_fw_major
+ * Major FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16);
+
+/* reg_mddq_device_info_fw_minor
+ * Minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16);
+
+/* reg_mddq_device_info_fw_sub_minor
+ * Sub-minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16);
+
+static inline void
+mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index,
+ u8 request_msg_seq)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO);
+ mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq);
+}
+
+static inline void
+mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq,
+ bool *p_data_valid, bool *p_flash_owner,
+ u8 *p_device_index, u16 *p_fw_major,
+ u16 *p_fw_minor, u16 *p_fw_sub_minor)
+{
+ *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload);
+ *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload);
+ *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload);
+ *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload);
+ *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload);
+ *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload);
+ *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload);
+}
+
#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20
/* reg_mddq_slot_ascii_name
@@ -13011,6 +12923,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(spvc),
MLXSW_REG(spevet),
+ MLXSW_REG(smpe),
MLXSW_REG(sftr2),
MLXSW_REG(smid2),
MLXSW_REG(cwtp),
@@ -13084,16 +12997,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rigr2),
MLXSW_REG(recr2),
MLXSW_REG(rmft2),
- MLXSW_REG(rxlte),
- MLXSW_REG(rxltm),
- MLXSW_REG(rlcmld),
- MLXSW_REG(rlpmce),
- MLXSW_REG(xltq),
- MLXSW_REG(xmdr),
- MLXSW_REG(xrmt),
- MLXSW_REG(xralta),
- MLXSW_REG(xralst),
- MLXSW_REG(xraltb),
+ MLXSW_REG(reiv),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
@@ -13124,9 +13028,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtpppc),
MLXSW_REG(mtpptr),
MLXSW_REG(mtptpt),
+ MLXSW_REG(mtpcpc),
MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
MLXSW_REG(mbct),
+ MLXSW_REG(mddt),
MLXSW_REG(mddq),
MLXSW_REG(mddc),
MLXSW_REG(mfde),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index daacf6291253..19ae0d1c74a8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -11,6 +11,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_PGT_SIZE,
MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
@@ -23,6 +24,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
MLXSW_RES_ID_MAX_SYSTEM_PORT,
+ MLXSW_RES_ID_FID,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
@@ -69,6 +71,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_PGT_SIZE] = 0x1004,
[MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
[MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
@@ -81,6 +84,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
[MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
+ [MLXSW_RES_ID_FID] = 0x2512,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index cafd206e8d7e..30c7b0e15721 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -29,6 +29,7 @@
#include <net/pkt_cls.h>
#include <net/netevent.h>
#include <net/addrconf.h>
+#include <linux/ptp_classify.h>
#include "spectrum.h"
#include "pci.h"
@@ -166,7 +167,7 @@ MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
* set, otherwise calculated based on the packet's VID using VID to FID mapping.
* Valid for data packets only.
*/
-MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
/* tx_hdr_type
* 0 - Data packets
@@ -230,8 +231,8 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
@@ -246,6 +247,82 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+int
+mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr;
+ u16 max_fid;
+ int err;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ err = -ENOMEM;
+ goto err_skb_cow_head;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
+ err = -EIO;
+ goto err_res_valid;
+ }
+ max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ return 0;
+
+err_res_valid:
+err_skb_cow_head:
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
+{
+ unsigned int type;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ type = ptp_classify_raw(skb);
+ return !!ptp_parse_header(skb, type);
+}
+
+static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
+ * need special handling and cannot be transmitted as regular control
+ * packets.
+ */
+ if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
+ return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
+ mlxsw_sp_port, skb,
+ tx_info);
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
{
switch (state) {
@@ -648,12 +725,6 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
@@ -664,7 +735,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- mlxsw_sp_txhdr_construct(skb, &tx_info);
+ err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
+ &tx_info);
+ if (err)
+ return NETDEV_TX_OK;
+
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
*/
@@ -1822,9 +1897,9 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
- mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
@@ -1999,7 +2074,6 @@ __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
for (i = 1; i < max_ports; i++)
@@ -2007,12 +2081,10 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
/* Make sure all scheduled events are processed */
__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
- devl_lock(devlink);
for (i = 1; i < max_ports; i++)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
- devl_unlock(devlink);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
}
@@ -2034,7 +2106,6 @@ mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_port_mapping_events *events;
struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
@@ -2057,7 +2128,6 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
goto err_event_enable;
}
- devl_lock(devlink);
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
@@ -2070,7 +2140,6 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_port_create;
}
- devl_unlock(devlink);
return 0;
err_port_create:
@@ -2080,7 +2149,6 @@ err_port_create:
i = max_ports;
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
- devl_unlock(devlink);
err_event_enable:
for (i--; i >= 1; i--)
mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
@@ -2105,9 +2173,6 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
for (i = 1; i < max_ports; i++) {
- if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
- continue;
-
port_mapping = &mlxsw_sp->port_mapping[i];
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
if (err)
@@ -2676,6 +2741,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2692,6 +2758,24 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+};
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
+ .clock_init = mlxsw_sp2_ptp_clock_init,
+ .clock_fini = mlxsw_sp2_ptp_clock_fini,
+ .init = mlxsw_sp2_ptp_init,
+ .fini = mlxsw_sp2_ptp_fini,
+ .receive = mlxsw_sp2_ptp_receive,
+ .transmitted = mlxsw_sp2_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp2_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp2_get_stats_count,
+ .get_stats_strings = mlxsw_sp2_get_stats_strings,
+ .get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -3013,6 +3097,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_sp_pgt_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
+ goto err_pgt_init;
+ }
+
err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
@@ -3100,7 +3190,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_router_init;
}
- if (mlxsw_sp->bus_info->read_frc_capable) {
+ if (mlxsw_sp->bus_info->read_clock_capable) {
/* NULL is a valid return value from clock_init */
mlxsw_sp->clock =
mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
@@ -3204,6 +3294,8 @@ err_traps_init:
err_policers_init:
mlxsw_sp_fids_fini(mlxsw_sp);
err_fids_init:
+ mlxsw_sp_pgt_fini(mlxsw_sp);
+err_pgt_init:
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
return err;
@@ -3235,7 +3327,9 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
+ mlxsw_sp->pgt_smpe_index_valid = true;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3267,7 +3361,9 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3299,7 +3395,9 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3323,7 +3421,7 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
- mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
@@ -3331,7 +3429,9 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3364,28 +3464,20 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_policers_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp_pgt_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
}
-/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
- * 802.1Q FIDs
- */
-#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
- VLAN_VID_MASK - 1)
-
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
.used_kvd_sizes = 1,
.kvd_hash_single_parts = 59,
.kvd_hash_double_parts = 41,
@@ -3399,25 +3491,22 @@ static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
};
static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvh_xlt_cache_mode = 1,
- .kvh_xlt_cache_mode = 1,
+ .used_ubridge = 1,
+ .ubridge = 1,
.swid_config = {
{
.used_type = 1,
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
+ .used_cqe_time_stamp_type = 1,
+ .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
};
static void
@@ -3477,19 +3566,19 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
&hash_single_size_params);
kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- kvd_size, MLXSW_SP_RESOURCE_KVD,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
if (err)
return err;
linear_size = profile->kvd_linear_size;
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
- linear_size,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- MLXSW_SP_RESOURCE_KVD,
- &linear_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &linear_size_params);
if (err)
return err;
@@ -3502,20 +3591,20 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
- double_size,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- MLXSW_SP_RESOURCE_KVD,
- &hash_double_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_double_size_params);
if (err)
return err;
single_size = kvd_size - double_size - linear_size;
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
- single_size,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- MLXSW_SP_RESOURCE_KVD,
- &hash_single_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_single_size_params);
if (err)
return err;
@@ -3536,10 +3625,10 @@ static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVD_GRANULARITY,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- kvd_size, MLXSW_SP_RESOURCE_KVD,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params);
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
}
static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
@@ -3555,10 +3644,10 @@ static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&span_size_params, max_span, max_span,
1, DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
- max_span, MLXSW_SP_RESOURCE_SPAN,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &span_size_params);
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
+ max_span, MLXSW_SP_RESOURCE_SPAN,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &span_size_params);
}
static int
@@ -3577,12 +3666,31 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
max_rif_mac_profiles, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink,
- "rif_mac_profiles",
- max_rif_mac_profiles,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ return devl_resource_register(devlink,
+ "rif_mac_profiles",
+ max_rif_mac_profiles,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
+static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u64 max_rifs;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
+ return -EIO;
+
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
+ devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink, "rifs", max_rifs,
+ MLXSW_SP_RESOURCE_RIFS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
}
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
@@ -3609,13 +3717,18 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rif_mac_profile_register;
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
return 0;
+err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3643,13 +3756,18 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rif_mac_profile_register;
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
return 0;
+err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3673,15 +3791,15 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
* granularity from the profile. In case the user
* provided the sizes they are obtained via devlink.
*/
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- p_linear_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
if (err)
*p_linear_size = profile->kvd_linear_size;
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- p_double_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
if (err) {
double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_linear_size;
@@ -3692,9 +3810,9 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
MLXSW_SP_KVD_GRANULARITY);
}
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- p_single_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
if (err)
*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_double_size - *p_linear_size;
@@ -3807,6 +3925,7 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
+ .sdq_supports_cqe_v2 = false,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
@@ -3845,6 +3964,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
@@ -3883,6 +4003,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp4_driver = {
@@ -3919,6 +4040,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a60d2bbd3aa6..c8ff2a6d7e90 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -68,6 +68,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ MLXSW_SP_RESOURCE_RIFS,
};
struct mlxsw_sp_port;
@@ -111,15 +112,6 @@ enum mlxsw_sp_nve_type {
MLXSW_SP_NVE_TYPE_VXLAN,
};
-struct mlxsw_sp_mid {
- struct list_head list;
- unsigned char addr[ETH_ALEN];
- u16 fid;
- u16 mid;
- bool in_hw;
- unsigned long *ports_in_mid; /* bits array */
-};
-
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
@@ -142,6 +134,7 @@ struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
+struct mlxsw_sp_pgt;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -210,10 +203,13 @@ struct mlxsw_sp {
const struct mlxsw_sp_mall_ops *mall_ops;
const struct mlxsw_sp_router_ops *router_ops;
const struct mlxsw_listener *listeners;
+ const struct mlxsw_sp_fid_family **fid_family_arr;
size_t listeners_count;
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
+ struct mlxsw_sp_pgt *pgt;
+ bool pgt_smpe_index_valid;
};
struct mlxsw_sp_ptp_ops {
@@ -247,6 +243,10 @@ struct mlxsw_sp_ptp_ops {
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+ int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
};
static inline struct mlxsw_sp_upper *
@@ -389,6 +389,31 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
+struct mlxsw_sp_ports_bitmap {
+ unsigned long *bitmap;
+ unsigned int nbits;
+};
+
+static inline int
+mlxsw_sp_port_bitmap_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ unsigned int nbits = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ ports_bm->nbits = nbits;
+ ports_bm->bitmap = bitmap_zalloc(nbits, GFP_KERNEL);
+ if (!ports_bm->bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void
+mlxsw_sp_port_bitmap_fini(struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ bitmap_free(ports_bm->bitmap);
+}
+
static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
bool *trap_en)
{
@@ -679,6 +704,12 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -715,6 +746,7 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
@@ -1236,7 +1268,6 @@ int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);
int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
@@ -1264,7 +1295,8 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid);
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
@@ -1286,6 +1318,9 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+extern const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[];
+extern const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[];
+
/* spectrum_mr.c */
enum mlxsw_sp_mr_route_prio {
MLXSW_SP_MR_ROUTE_PRIO_SG,
@@ -1443,4 +1478,16 @@ int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
+/* spectrum_pgt.c */
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid);
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base);
+int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member);
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
index d20e794e01ca..1e3fc989393c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -216,8 +216,8 @@ mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
u64 resource_size;
int err;
- err = devlink_resource_size_get(devlink, info->resource_id,
- &resource_size);
+ err = devl_resource_size_get(devlink, info->resource_id,
+ &resource_size);
if (err) {
need_update = false;
resource_size = info->end_index - info->start_index + 1;
@@ -338,22 +338,22 @@ static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
if (err)
return err;
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- mlxsw_sp1_kvdl_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- mlxsw_sp1_kvdl_single_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- mlxsw_sp1_kvdl_chunks_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- mlxsw_sp1_kvdl_large_chunks_occ_get,
- kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp1_kvdl_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp1_kvdl_single_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp1_kvdl_chunks_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp1_kvdl_large_chunks_occ_get,
+ kvdl);
return 0;
}
@@ -362,14 +362,14 @@ static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl *kvdl = priv;
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp1_kvdl_parts_fini(kvdl);
}
@@ -396,32 +396,32 @@ int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
- MLXSW_SP1_KVDL_SINGLE_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP1_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
- MLXSW_SP1_KVDL_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP1_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 10ae1115de6c..24ff305a2995 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -15,7 +15,7 @@ struct mlxsw_sp2_kvdl_part_info {
* usage bits we need and how many indexes there are
* represented by a single bit. This could be got from FW
* querying appropriate resources. So have the resource
- * ids for for this purpose in partition definition.
+ * ids for this purpose in partition definition.
*/
enum mlxsw_res_id usage_bit_count_res_id;
enum mlxsw_res_id index_range_res_id;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index c68fc8f7ca99..c9f1c79f3f9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1290,12 +1290,12 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_sb_mms_init;
mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
- err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
- mlxsw_sp->sb->sb_size,
- ing_pool_count,
- eg_pool_count,
- MLXSW_SP_SB_ING_TC_COUNT,
- MLXSW_SP_SB_EG_TC_COUNT);
+ err = devl_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+ mlxsw_sp->sb->sb_size,
+ ing_pool_count,
+ eg_pool_count,
+ MLXSW_SP_SB_ING_TC_COUNT,
+ MLXSW_SP_SB_EG_TC_COUNT);
if (err)
goto err_devlink_sb_register;
@@ -1314,7 +1314,7 @@ err_sb_ports_init:
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
- devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
+ devl_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
mlxsw_sp_sb_ports_fini(mlxsw_sp);
kfree(mlxsw_sp->sb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index fc2257753b9b..ee59c79156e4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -67,16 +67,16 @@ static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
return -EIO;
sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
res_id);
- err = devlink_resource_size_get(devlink,
- sub_pool->resource_id,
- &sub_pool->size);
+ err = devl_resource_size_get(devlink,
+ sub_pool->resource_id,
+ &sub_pool->size);
if (err)
goto err_resource_size_get;
- devlink_resource_occ_get_register(devlink,
- sub_pool->resource_id,
- mlxsw_sp_counter_sub_pool_occ_get,
- sub_pool);
+ devl_resource_occ_get_register(devlink,
+ sub_pool->resource_id,
+ mlxsw_sp_counter_sub_pool_occ_get,
+ sub_pool);
sub_pool->base_index = base_index;
base_index += sub_pool->size;
@@ -88,8 +88,8 @@ err_resource_size_get:
for (i--; i >= 0; i--) {
sub_pool = &pool->sub_pools[i];
- devlink_resource_occ_get_unregister(devlink,
- sub_pool->resource_id);
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
}
return err;
}
@@ -105,8 +105,8 @@ static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
sub_pool = &pool->sub_pools[i];
WARN_ON(atomic_read(&sub_pool->active_entries_count));
- devlink_resource_occ_get_unregister(devlink,
- sub_pool->resource_id);
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
}
}
@@ -135,12 +135,12 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
spin_lock_init(&pool->counter_pool_lock);
atomic_set(&pool->active_entries_count, 0);
- err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
- &pool->pool_size);
+ err = devl_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ &pool->pool_size);
if (err)
goto err_pool_resource_size_get;
- devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
- mlxsw_sp_counter_pool_occ_get, pool);
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ mlxsw_sp_counter_pool_occ_get, pool);
pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL);
if (!pool->usage) {
@@ -157,8 +157,8 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
err_sub_pools_init:
bitmap_free(pool->usage);
err_usage_alloc:
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_COUNTERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
err_pool_resource_size_get:
kfree(pool);
return err;
@@ -174,8 +174,8 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
pool->pool_size);
WARN_ON(atomic_read(&pool->active_entries_count));
bitmap_free(pool->usage);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_COUNTERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
kfree(pool);
}
@@ -262,12 +262,12 @@ int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, pool_size,
pool_size, bank_size,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink,
- MLXSW_SP_RESOURCE_NAME_COUNTERS,
- pool_size,
- MLXSW_SP_RESOURCE_COUNTERS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ err = devl_resource_register(devlink,
+ MLXSW_SP_RESOURCE_NAME_COUNTERS,
+ pool_size,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
if (err)
return err;
@@ -287,12 +287,12 @@ int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, sub_pool_size,
sub_pool_size, bank_size,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink,
- sub_pool->resource_name,
- sub_pool_size,
- sub_pool->resource_id,
- MLXSW_SP_RESOURCE_COUNTERS,
- &size_params);
+ err = devl_resource_register(devlink,
+ sub_pool->resource_name,
+ sub_pool_size,
+ sub_pool->resource_id,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ &size_params);
if (err)
return err;
total_bank_config += sub_pool->bank_count;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 5d494fabf93d..5416093c0e35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -295,17 +295,17 @@ static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
- &mlxsw_sp_erif_ops,
- mlxsw_sp, false);
+ return devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
+ &mlxsw_sp_erif_ops,
+ mlxsw_sp, false);
}
static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
+ devl_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
}
static int mlxsw_sp_dpipe_table_host_matches_dump(struct sk_buff *skb, int type)
@@ -749,25 +749,25 @@ static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- &mlxsw_sp_host4_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
return err;
}
@@ -775,8 +775,8 @@ static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
}
static int
@@ -826,25 +826,25 @@ static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- &mlxsw_sp_host6_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
return err;
}
@@ -852,8 +852,8 @@ static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
}
static int mlxsw_sp_dpipe_table_adj_matches_dump(void *priv,
@@ -1231,25 +1231,25 @@ static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- &mlxsw_sp_dpipe_table_adj_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
return err;
}
@@ -1257,8 +1257,8 @@ static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
}
int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
@@ -1266,10 +1266,8 @@ int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_headers_register(devlink,
- &mlxsw_sp_dpipe_headers);
- if (err)
- return err;
+ devl_dpipe_headers_register(devlink, &mlxsw_sp_dpipe_headers);
+
err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp);
if (err)
goto err_erif_table_init;
@@ -1294,7 +1292,7 @@ err_host6_table_init:
err_host4_table_init:
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
err_erif_table_init:
- devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
+ devl_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
return err;
}
@@ -1306,5 +1304,5 @@ void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
- devlink_dpipe_headers_unregister(devlink);
+ devl_dpipe_headers_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index ce80931f0402..045a24cacfa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -22,11 +22,18 @@ struct mlxsw_sp_fid_core {
unsigned int *port_fid_mappings;
};
+struct mlxsw_sp_fid_port_vid {
+ struct list_head list;
+ u16 local_port;
+ u16 vid;
+};
+
struct mlxsw_sp_fid {
struct list_head list;
struct mlxsw_sp_rif *rif;
refcount_t ref_count;
u16 fid_index;
+ u16 fid_offset;
struct mlxsw_sp_fid_family *fid_family;
struct rhash_head ht_node;
@@ -37,6 +44,7 @@ struct mlxsw_sp_fid {
int nve_ifindex;
u8 vni_valid:1,
nve_flood_index_valid:1;
+ struct list_head port_vid_list; /* Ordered by local port. */
};
struct mlxsw_sp_fid_8021q {
@@ -63,7 +71,6 @@ static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
struct mlxsw_sp_flood_table {
enum mlxsw_sp_flood_type packet_type;
- enum mlxsw_reg_sfgc_bridge_type bridge_type;
enum mlxsw_flood_table_type table_type;
int table_index;
};
@@ -76,18 +83,18 @@ struct mlxsw_sp_fid_ops {
u16 *p_fid_index);
bool (*compare)(const struct mlxsw_sp_fid *fid,
const void *arg);
- u16 (*flood_index)(const struct mlxsw_sp_fid *fid);
int (*port_vid_map)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
- int (*vni_set)(struct mlxsw_sp_fid *fid, __be32 vni);
+ int (*vni_set)(struct mlxsw_sp_fid *fid);
void (*vni_clear)(struct mlxsw_sp_fid *fid);
- int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index);
+ int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid);
void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev);
+ int (*vid_to_fid_rif_update)(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif);
};
struct mlxsw_sp_fid_family {
@@ -102,7 +109,10 @@ struct mlxsw_sp_fid_family {
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
- u8 lag_vid_valid:1;
+ bool flood_rsp;
+ enum mlxsw_reg_bridge_type bridge_type;
+ u16 pgt_base;
+ bool smpe_index_valid;
};
static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
@@ -137,11 +147,6 @@ bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
return fid_family->start_index == fid_index;
}
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_family->lag_vid_valid;
-}
-
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index)
{
@@ -206,17 +211,20 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
int err;
- if (WARN_ON(!ops->nve_flood_index_set || fid->nve_flood_index_valid))
+ if (WARN_ON(fid->nve_flood_index_valid))
return -EINVAL;
- err = ops->nve_flood_index_set(fid, nve_flood_index);
- if (err)
- return err;
-
fid->nve_flood_index = nve_flood_index;
fid->nve_flood_index_valid = true;
+ err = ops->nve_flood_index_set(fid);
+ if (err)
+ goto err_nve_flood_index_set;
return 0;
+
+err_nve_flood_index_set:
+ fid->nve_flood_index_valid = false;
+ return err;
}
void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
@@ -224,7 +232,7 @@ void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
- if (WARN_ON(!ops->nve_flood_index_clear || !fid->nve_flood_index_valid))
+ if (WARN_ON(!fid->nve_flood_index_valid))
return;
fid->nve_flood_index_valid = false;
@@ -244,7 +252,7 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
int err;
- if (WARN_ON(!ops->vni_set || fid->vni_valid))
+ if (WARN_ON(fid->vni_valid))
return -EINVAL;
fid->nve_type = type;
@@ -256,15 +264,15 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
if (err)
return err;
- err = ops->vni_set(fid, vni);
+ fid->vni_valid = true;
+ err = ops->vni_set(fid);
if (err)
goto err_vni_set;
- fid->vni_valid = true;
-
return 0;
err_vni_set:
+ fid->vni_valid = false;
rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
mlxsw_sp_fid_vni_ht_params);
return err;
@@ -276,7 +284,7 @@ void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid)
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (WARN_ON(!ops->vni_clear || !fid->vni_valid))
+ if (WARN_ON(!fid->vni_valid))
return;
fid->vni_valid = false;
@@ -316,34 +324,43 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
return NULL;
}
+static u16
+mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family)
+{
+ return fid_family->end_index - fid_family->start_index + 1;
+}
+
+static u16
+mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 fid_offset)
+{
+ u16 num_fids;
+
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ return fid_family->pgt_base + num_fids * flood_table->table_index +
+ fid_offset;
+}
+
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
- const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
const struct mlxsw_sp_flood_table *flood_table;
- char *sftr2_pl;
- int err;
+ u16 mid_index;
- if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
+ if (WARN_ON(!fid_family->flood_tables))
return -EINVAL;
flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
if (!flood_table)
return -ESRCH;
- sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL);
- if (!sftr2_pl)
- return -ENOMEM;
-
- mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index,
- ops->flood_index(fid), flood_table->table_type, 1,
- local_port, member);
- err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2),
- sftr2_pl);
- kfree(sftr2_pl);
- return err;
+ mid_index = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table,
+ fid->fid_offset);
+ return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp, mid_index,
+ fid->fid_index, local_port, member);
}
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
@@ -370,11 +387,6 @@ enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
return fid->fid_family->type;
}
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
-{
- fid->rif = rif;
-}
-
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid)
{
return fid->rif;
@@ -405,6 +417,7 @@ static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
u16 vid = *(u16 *) arg;
mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
@@ -413,38 +426,341 @@ static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
MLXSW_REG_SFMR_OP_DESTROY_FID;
}
-static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u16 fid_offset, bool valid)
+static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
- mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index,
- fid_offset);
+ mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid->fid_index,
+ fid->fid_offset, fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- __be32 vni, bool vni_valid, u32 nve_flood_index,
- bool nve_flood_index_valid)
+static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+ fid->fid_index, fid->fid_offset,
+ fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
+ mlxsw_reg_sfmr_vv_set(sfmr_pl, fid->vni_valid);
+ mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(fid->vni));
+ mlxsw_reg_sfmr_vtfp_set(sfmr_pl, fid->nve_flood_index_valid);
+ mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, fid->nve_flood_index);
+
+ if (rif) {
+ mlxsw_reg_sfmr_irif_v_set(sfmr_pl, true);
+ mlxsw_reg_sfmr_irif_set(sfmr_pl, mlxsw_sp_rif_index(rif));
+ }
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid_index,
- 0);
- mlxsw_reg_sfmr_vv_set(sfmr_pl, vni_valid);
- mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(vni));
- mlxsw_reg_sfmr_vtfp_set(sfmr_pl, nve_flood_index_valid);
- mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, nve_flood_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+static int mlxsw_sp_fid_vni_to_fid_map(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif,
+ bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vni_pack(svfa_pl, valid, fid->fid_index,
+ be32_to_cpu(fid->vni), irif_valid, irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_edit_op(fid, rif);
+}
+
+static int mlxsw_sp_fid_vni_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ if (!fid->vni_valid)
+ return 0;
+
+ return mlxsw_sp_fid_vni_to_fid_map(fid, rif, fid->vni_valid);
+}
+
+static int
+mlxsw_sp_fid_vid_to_fid_map(const struct mlxsw_sp_fid *fid, u16 vid, bool valid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vid_pack(svfa_pl, valid, fid->fid_index, vid, irif_valid,
+ irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int
+mlxsw_sp_fid_8021q_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ /* Update the global VID => FID mapping we created when the FID was
+ * configured.
+ */
+ return mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, rif);
+}
+
+static int
+mlxsw_sp_fid_port_vid_to_fid_rif_update_one(const struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_fid_port_vid *pv,
+ bool irif_valid, u16 irif_index)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, pv->local_port, true,
+ fid->fid_index, pv->vid, irif_valid,
+ irif_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_vid_to_fid_rif_set(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+ u16 irif_index;
+ int err;
+
+ err = fid->fid_family->ops->vid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ irif_index = mlxsw_sp_rif_index(rif);
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated with the
+ * ingress RIF.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ err = mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv,
+ true,
+ irif_index);
+ if (err)
+ goto err_port_vid_to_fid_rif_update_one;
+ }
+
+ return 0;
+
+err_port_vid_to_fid_rif_update_one:
+ list_for_each_entry_continue_reverse(pv, &fid->port_vid_list, list) {
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+static void mlxsw_sp_fid_vid_to_fid_rif_unset(const struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_reiv_handle(struct mlxsw_sp_fid *fid, u16 rif_index,
+ bool valid, u8 port_page)
+{
+ u16 local_port_end = (port_page + 1) * MLXSW_REG_REIV_REC_MAX_COUNT - 1;
+ u16 local_port_start = port_page * MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *port_vid;
+ u8 rec_num, entries_num = 0;
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+
+ list_for_each_entry(port_vid, &fid->port_vid_list, list) {
+ /* port_vid_list is sorted by local_port. */
+ if (port_vid->local_port < local_port_start)
+ continue;
+
+ if (port_vid->local_port > local_port_end)
+ break;
+
+ rec_num = port_vid->local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num,
+ valid ? port_vid->vid : 0);
+ entries_num++;
+ }
+
+ if (!entries_num) {
+ kfree(reiv_pl);
+ return 0;
+ }
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ if (err)
+ goto err_reg_write;
+
+ kfree(reiv_pl);
+ return 0;
+
+err_reg_write:
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_erif_eport_to_vid_map(struct mlxsw_sp_fid *fid,
+ u16 rif_index, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u8 num_port_pages;
+ int err, i;
+
+ num_port_pages = mlxsw_core_max_ports(mlxsw_sp->core) /
+ MLXSW_REG_REIV_REC_MAX_COUNT + 1;
+
+ for (i = 0; i < num_port_pages; i++) {
+ err = mlxsw_sp_fid_reiv_handle(fid, rif_index, valid, i);
+ if (err)
+ goto err_reiv_handle;
+ }
+
+ return 0;
+
+err_reiv_handle:
+ for (; i >= 0; i--)
+ mlxsw_sp_fid_reiv_handle(fid, rif_index, !valid, i);
+ return err;
+}
+
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
+{
+ u16 rif_index = mlxsw_sp_rif_index(rif);
+ int err;
+
+ err = mlxsw_sp_fid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni_to_fid_rif_update(fid, rif);
+ if (err)
+ goto err_vni_to_fid_rif_update;
+
+ err = mlxsw_sp_fid_vid_to_fid_rif_set(fid, rif);
+ if (err)
+ goto err_vid_to_fid_rif_set;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, true);
+ if (err)
+ goto err_erif_eport_to_vid_map;
+
+ fid->rif = rif;
+ return 0;
+
+err_erif_eport_to_vid_map:
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+err_vid_to_fid_rif_set:
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+err_vni_to_fid_rif_update:
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid)
+{
+ u16 rif_index;
+
+ if (!fid->rif)
+ return;
+
+ rif_index = mlxsw_sp_rif_index(fid->rif);
+ fid->rif = NULL;
+
+ mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, false);
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_vni_op(const struct mlxsw_sp_fid *fid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, fid->vni_valid);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_edit_op(fid, fid->rif);
+ if (err)
+ goto err_fid_edit_op;
+
+ return 0;
+
+err_fid_edit_op:
+ mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, !fid->vni_valid);
+ return err;
+}
+
+static int __mlxsw_sp_fid_port_vid_map(const struct mlxsw_sp_fid *fid,
u16 local_port, u16 vid, bool valid)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid = false;
+ u16 irif_index = 0;
+
+ if (fid->rif) {
+ irif_valid = true;
+ irif_index = mlxsw_sp_rif_index(fid->rif);
+ }
- mlxsw_reg_svfa_pack(svfa_pl, local_port, mt, valid, fid_index, vid);
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, local_port, valid, fid->fid_index,
+ vid, irif_valid, irif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
@@ -459,20 +775,19 @@ static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
int br_ifindex = *(int *) arg;
mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_op(fid_family->mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
{
if (fid->vni_valid)
mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_8021d_index_alloc(struct mlxsw_sp_fid *fid,
@@ -498,14 +813,8 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
}
-static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_index - VLAN_N_VID;
-}
-
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
int err;
@@ -517,7 +826,7 @@ static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ err = __mlxsw_sp_fid_port_vid_map(fid,
mlxsw_sp_port->local_port,
vid, true);
if (err)
@@ -540,8 +849,7 @@ err_fid_port_vid_map:
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
return err;
@@ -549,7 +857,6 @@ err_fid_port_vid_map:
static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
@@ -562,12 +869,108 @@ static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
}
+static int
+mlxsw_sp_fid_port_vid_list_add(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp_port_vid;
+
+ port_vid = kzalloc(sizeof(*port_vid), GFP_KERNEL);
+ if (!port_vid)
+ return -ENOMEM;
+
+ port_vid->local_port = local_port;
+ port_vid->vid = vid;
+
+ list_for_each_entry(tmp_port_vid, &fid->port_vid_list, list) {
+ if (tmp_port_vid->local_port > local_port)
+ break;
+ }
+
+ list_add_tail(&port_vid->list, &tmp_port_vid->list);
+ return 0;
+}
+
+static void
+mlxsw_sp_fid_port_vid_list_del(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp;
+
+ list_for_each_entry_safe(port_vid, tmp, &fid->port_vid_list, list) {
+ if (port_vid->local_port != local_port || port_vid->vid != vid)
+ continue;
+
+ list_del(&port_vid->list);
+ kfree(port_vid);
+ return;
+ }
+}
+
+static int
+mlxsw_sp_fid_mpe_table_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char smpe_pl[MLXSW_REG_SMPE_LEN];
+
+ mlxsw_reg_smpe_pack(smpe_pl, local_port, fid->fid_index,
+ valid ? vid : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smpe), smpe_pl);
+}
+
+static int
+mlxsw_sp_fid_erif_eport_to_vid_map_one(const struct mlxsw_sp_fid *fid,
+ u16 local_port, u16 vid, bool valid)
+{
+ u8 port_page = local_port / MLXSW_REG_REIV_REC_MAX_COUNT;
+ u8 rec_num = local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u16 rif_index = mlxsw_sp_rif_index(fid->rif);
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num, valid ? vid : 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_evid_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, valid);
+ if (err)
+ return err;
+
+ if (!fid->rif)
+ return 0;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ valid);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+
+ return 0;
+
+err_erif_eport_to_vid_map_one:
+ mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, !valid);
+ return err;
+}
+
static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
@@ -576,11 +979,20 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, true);
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
if (err)
return err;
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -591,8 +1003,11 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
return err;
}
@@ -606,43 +1021,29 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
}
-static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, vni,
- true, fid->nve_flood_index,
- fid->nve_flood_index_valid);
+ return mlxsw_sp_fid_vni_op(fid);
}
static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, 0, false,
- fid->nve_flood_index, fid->nve_flood_index_valid);
+ mlxsw_sp_fid_vni_op(fid);
}
-static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index)
+static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index,
- fid->vni, fid->vni_valid, nve_flood_index,
- true);
+ return mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, fid->vni,
- fid->vni_valid, 0, false);
+ mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void
@@ -652,13 +1053,19 @@ mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, 0);
}
+static int
+mlxsw_sp_fid_8021d_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
.deconfigure = mlxsw_sp_fid_8021d_deconfigure,
.index_alloc = mlxsw_sp_fid_8021d_index_alloc,
.compare = mlxsw_sp_fid_8021d_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
.port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
.vni_set = mlxsw_sp_fid_8021d_vni_set,
@@ -666,42 +1073,32 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
};
+#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
+#define MLXSW_SP_FID_RFID_MAX (11 * 1024)
+#define MLXSW_SP_FID_8021Q_PGT_BASE 0
+#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX)
+
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
{
.packet_type = MLXSW_SP_FLOOD_TYPE_UC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 0,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_MC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 1,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_BC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 2,
},
};
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
- .type = MLXSW_SP_FID_TYPE_8021D,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
- .start_index = VLAN_N_VID,
- .end_index = VLAN_N_VID + MLXSW_SP_FID_8021D_MAX - 1,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_FID,
- .ops = &mlxsw_sp_fid_8021d_ops,
- .lag_vid_valid = 1,
-};
-
static bool
mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
{
@@ -717,48 +1114,19 @@ mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = {
- .setup = mlxsw_sp_fid_8021q_setup,
- .configure = mlxsw_sp_fid_8021d_configure,
- .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
- .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
- .compare = mlxsw_sp_fid_8021q_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
- .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
- .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
- .vni_set = mlxsw_sp_fid_8021d_vni_set,
- .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
- .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
- .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
- .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
-};
-
-/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */
-#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX)
-#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \
- VLAN_VID_MASK - 2)
-
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = {
- .type = MLXSW_SP_FID_TYPE_8021Q,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
- .start_index = MLXSW_SP_FID_8021Q_EMU_START,
- .end_index = MLXSW_SP_FID_8021Q_EMU_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_emu_ops,
- .lag_vid_valid = 1,
-};
+static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
{
- /* rFIDs are allocated by the device during init */
- return 0;
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
{
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
@@ -787,9 +1155,28 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- /* We only need to transition the port to virtual mode since
- * {Port, VID} => FID is done by the firmware upon RIF creation.
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ return err;
+
+ /* Using legacy bridge model, we only need to transition the port to
+ * virtual mode since {Port, VID} => FID is done by the firmware upon
+ * RIF creation. Using unified bridge model, we need to map
+ * {Port, VID} => FID and map egress VID.
*/
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
+ if (err)
+ goto err_port_vid_map;
+
+ if (fid->rif) {
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port,
+ vid, true);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+ }
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -800,6 +1187,13 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+err_erif_eport_to_vid_map_one:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+err_port_vid_map:
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
return err;
}
@@ -813,39 +1207,69 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+}
+
+static int mlxsw_sp_fid_rfid_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_rfid_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int
+mlxsw_sp_fid_rfid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
}
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
+ .setup = mlxsw_sp_fid_rfid_setup,
.configure = mlxsw_sp_fid_rfid_configure,
.deconfigure = mlxsw_sp_fid_rfid_deconfigure,
.index_alloc = mlxsw_sp_fid_rfid_index_alloc,
.compare = mlxsw_sp_fid_rfid_compare,
.port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_rfid_vni_set,
+ .vni_clear = mlxsw_sp_fid_rfid_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
};
-#define MLXSW_SP_RFID_BASE (15 * 1024)
-#define MLXSW_SP_RFID_MAX 1024
-
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
- .type = MLXSW_SP_FID_TYPE_RFID,
- .fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = MLXSW_SP_RFID_BASE,
- .end_index = MLXSW_SP_RFID_BASE + MLXSW_SP_RFID_MAX - 1,
- .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
- .ops = &mlxsw_sp_fid_rfid_ops,
-};
+static void mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
-
- return mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_dummy_deconfigure(struct mlxsw_sp_fid *fid)
{
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_dummy_index_alloc(struct mlxsw_sp_fid *fid,
@@ -862,26 +1286,252 @@ static bool mlxsw_sp_fid_dummy_compare(const struct mlxsw_sp_fid *fid,
return true;
}
+static int mlxsw_sp_fid_dummy_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_dummy_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
+ .setup = mlxsw_sp_fid_dummy_setup,
.configure = mlxsw_sp_fid_dummy_configure,
.deconfigure = mlxsw_sp_fid_dummy_deconfigure,
.index_alloc = mlxsw_sp_fid_dummy_index_alloc,
.compare = mlxsw_sp_fid_dummy_compare,
+ .vni_set = mlxsw_sp_fid_dummy_vni_set,
+ .vni_clear = mlxsw_sp_fid_dummy_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_dummy_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_dummy_nve_flood_index_clear,
+};
+
+static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ int err;
+
+ err = mlxsw_sp_fid_op(fid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, fid->rif);
+ if (err)
+ goto err_vid_to_fid_map;
+
+ return 0;
+
+err_vid_to_fid_map:
+ mlxsw_sp_fid_op(fid, false);
+ return err;
+}
+
+static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ if (fid->vni_valid)
+ mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
+
+ mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, false, NULL);
+ mlxsw_sp_fid_op(fid, false);
+}
+
+static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ /* In case there are no {Port, VID} => FID mappings on the port,
+ * we can use the global VID => FID mapping we created when the
+ * FID was configured, otherwise, configure new mapping.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]) {
+ err = __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, true);
+ if (err)
+ return err;
+ }
+
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
+ return 0;
+
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+};
+
+/* There are 4K-2 802.1Q FIDs */
+#define MLXSW_SP_FID_8021Q_START 1 /* FID 0 is reserved. */
+#define MLXSW_SP_FID_8021Q_END (MLXSW_SP_FID_8021Q_START + \
+ MLXSW_SP_FID_8021Q_MAX - 1)
+
+/* There are 1K 802.1D FIDs */
+#define MLXSW_SP_FID_8021D_START (MLXSW_SP_FID_8021Q_END + 1)
+#define MLXSW_SP_FID_8021D_END (MLXSW_SP_FID_8021D_START + \
+ MLXSW_SP_FID_8021D_MAX - 1)
+
+/* There is one dummy FID */
+#define MLXSW_SP_FID_DUMMY (MLXSW_SP_FID_8021D_END + 1)
+
+/* There are 11K rFIDs */
+#define MLXSW_SP_RFID_START (MLXSW_SP_FID_DUMMY + 1)
+#define MLXSW_SP_RFID_END (MLXSW_SP_RFID_START + \
+ MLXSW_SP_FID_RFID_MAX - 1)
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_START,
+ .end_index = MLXSW_SP_RFID_END,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops,
+ .flood_rsp = true,
+ .smpe_index_valid = false,
+};
+
+const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp1_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp1_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp1_fid_dummy_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = true,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = VLAN_N_VID - 1,
- .end_index = VLAN_N_VID - 1,
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
.ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
};
-static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
- [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
+const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
- [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
};
static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
@@ -919,6 +1569,8 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&fid->port_vid_list);
fid->fid_family = fid_family;
err = fid->fid_family->ops->index_alloc(fid, arg, &fid_index);
@@ -927,8 +1579,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid->fid_index = fid_index;
__set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
- if (fid->fid_family->ops->setup)
- fid->fid_family->ops->setup(fid, arg);
+ fid->fid_family->ops->setup(fid, arg);
err = fid->fid_family->ops->configure(fid);
if (err)
@@ -967,6 +1618,7 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
fid->fid_family->ops->deconfigure(fid);
__clear_bit(fid->fid_index - fid_family->start_index,
fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid->port_vid_list));
kfree(fid);
}
@@ -1010,26 +1662,49 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
const struct mlxsw_sp_flood_table *flood_table)
{
enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
const int *sfgc_packet_types;
- int i;
+ u16 num_fids, mid_base;
+ int err, i;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base, num_fids);
+ if (err)
+ return err;
sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
- struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
char sfgc_pl[MLXSW_REG_SFGC_LEN];
- int err;
if (!sfgc_packet_types[i])
continue;
- mlxsw_reg_sfgc_pack(sfgc_pl, i, flood_table->bridge_type,
- flood_table->table_type,
- flood_table->table_index);
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
+ flood_table->table_type, 0, mid_base);
+
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
if (err)
- return err;
+ goto err_reg_write;
}
return 0;
+
+err_reg_write:
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_flood_table_fini(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 num_fids, mid_base;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
}
static int
@@ -1050,6 +1725,19 @@ mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
return 0;
}
+static void
+mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &fid_family->flood_tables[i];
+ mlxsw_sp_fid_flood_table_fini(fid_family, flood_table);
+ }
+}
+
static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fid_family *tmpl)
{
@@ -1091,6 +1779,10 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid_family *fid_family)
{
mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
+
+ if (fid_family->flood_tables)
+ mlxsw_sp_fid_flood_tables_fini(fid_family);
+
bitmap_free(fid_family->fids_bitmap);
WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
kfree(fid_family);
@@ -1144,7 +1836,7 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
err = mlxsw_sp_fid_family_register(mlxsw_sp,
- mlxsw_sp_fid_family_arr[i]);
+ mlxsw_sp->fid_family_arr[i]);
if (err)
goto err_fid_ops_register;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
new file mode 100644
index 000000000000..7dd3dba0fa83
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/refcount.h>
+#include <linux/idr.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_pgt {
+ struct idr pgt_idr;
+ u16 end_index; /* Exclusive. */
+ struct mutex lock; /* Protects PGT. */
+ bool smpe_index_valid;
+};
+
+struct mlxsw_sp_pgt_entry {
+ struct list_head ports_list;
+ u16 index;
+ u16 smpe_index;
+};
+
+struct mlxsw_sp_pgt_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+};
+
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid)
+{
+ int index, err = 0;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
+ mlxsw_sp->pgt->end_index, GFP_KERNEL);
+
+ if (index < 0) {
+ err = index;
+ goto err_idr_alloc;
+ }
+
+ *p_mid = index;
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
+{
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int
+mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ unsigned int idr_cursor;
+ int i, err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ /* This function is supposed to be called several times as part of
+ * driver init, in specific order. Verify that the mid_index is the
+ * first free index in the idr, to be able to free the indexes in case
+ * of error.
+ */
+ idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
+ if (WARN_ON(idr_cursor != mid_base)) {
+ err = -EINVAL;
+ goto err_idr_cursor;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
+ mid_base, mid_base + count, GFP_KERNEL);
+ if (err < 0)
+ goto err_idr_alloc_cyclic;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc_cyclic:
+ for (i--; i >= 0; i--)
+ idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
+err_idr_cursor:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void
+mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr;
+ int i;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ for (i = 0; i < count; i++)
+ WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i));
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+
+ list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) {
+ if (pgt_entry_port->local_port == local_port)
+ return pgt_entry_port;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ void *ret;
+ int err;
+
+ pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL);
+ if (!pgt_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid);
+ if (IS_ERR(ret)) {
+ err = PTR_ERR(ret);
+ goto err_idr_replace;
+ }
+
+ INIT_LIST_HEAD(&pgt_entry->ports_list);
+ pgt_entry->index = mid;
+ pgt_entry->smpe_index = smpe;
+ return pgt_entry;
+
+err_idr_replace:
+ kfree(pgt_entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt,
+ struct mlxsw_sp_pgt_entry *pgt_entry)
+{
+ WARN_ON(!list_empty(&pgt_entry->ports_list));
+
+ pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index);
+ if (WARN_ON(IS_ERR(pgt_entry)))
+ return;
+
+ kfree(pgt_entry);
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (pgt_entry)
+ return pgt_entry;
+
+ return mlxsw_sp_pgt_entry_create(pgt, mid, smpe);
+}
+
+static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (WARN_ON(!pgt_entry))
+ return;
+
+ if (list_empty(&pgt_entry->ports_list))
+ mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
+}
+
+static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
+ bool member)
+{
+ mlxsw_reg_smid2_port_set(smid2_pl, local_port, member);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1);
+}
+
+static int
+mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port, bool member)
+{
+ char *smid2_pl;
+ int err;
+
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0,
+ mlxsw_sp->pgt->smpe_index_valid,
+ pgt_entry->smpe_index);
+
+ mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+
+ kfree(smid2_pl);
+
+ return err;
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ int err;
+
+ pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL);
+ if (!pgt_entry_port)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port,
+ true);
+ if (err)
+ goto err_pgt_entry_port_write;
+
+ pgt_entry_port->local_port = local_port;
+ list_add(&pgt_entry_port->list, &pgt_entry->ports_list);
+
+ return pgt_entry_port;
+
+err_pgt_entry_port_write:
+ kfree(pgt_entry_port);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port)
+
+{
+ list_del(&pgt_entry_port->list);
+ mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry,
+ pgt_entry_port->local_port, false);
+ kfree(pgt_entry_port);
+}
+
+static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ int err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe);
+ if (IS_ERR(pgt_entry)) {
+ err = PTR_ERR(pgt_entry);
+ goto err_pgt_entry_get;
+ }
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry,
+ local_port);
+ if (IS_ERR(pgt_entry_port)) {
+ err = PTR_ERR(pgt_entry_port);
+ goto err_pgt_entry_port_get;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_pgt_entry_port_get:
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+err_pgt_entry_get:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp,
+ u16 mid, u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid);
+ if (!pgt_entry)
+ goto out;
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port);
+ if (!pgt_entry_port)
+ goto out;
+
+ mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port);
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+
+out:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member)
+{
+ if (member)
+ return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe,
+ local_port);
+
+ mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port);
+ return 0;
+}
+
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_pgt *pgt;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE))
+ return -EIO;
+
+ pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL);
+ if (!pgt)
+ return -ENOMEM;
+
+ idr_init(&pgt->pgt_idr);
+ pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE);
+ mutex_init(&pgt->lock);
+ pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid;
+ mlxsw_sp->pgt = pgt;
+ return 0;
+}
+
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->pgt->lock);
+ WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr));
+ idr_destroy(&mlxsw_sp->pgt->pgt_idr);
+ kfree(mlxsw_sp->pgt);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
index 39052e5c12fd..22ebb207ce4d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
@@ -94,10 +94,10 @@ mlxsw_sp_policer_single_rate_family_init(struct mlxsw_sp_policer_family *family)
atomic_set(&family->policers_count, 0);
devlink = priv_to_devlink(core);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
- mlxsw_sp_policer_single_rate_occ_get,
- family);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ mlxsw_sp_policer_single_rate_occ_get,
+ family);
return 0;
}
@@ -107,8 +107,8 @@ mlxsw_sp_policer_single_rate_family_fini(struct mlxsw_sp_policer_family *family)
{
struct devlink *devlink = priv_to_devlink(family->mlxsw_sp->core);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
WARN_ON(atomic_read(&family->policers_count) != 0);
}
@@ -419,22 +419,22 @@ int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, global_policers,
global_policers, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, "global_policers",
- global_policers,
- MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ err = devl_resource_register(devlink, "global_policers",
+ global_policers,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, single_rate_policers,
single_rate_policers, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, "single_rate_policers",
- single_rate_policers,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
- MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
- &size_params);
+ err = devl_resource_register(devlink, "single_rate_policers",
+ single_rate_policers,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ &size_params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 35422e64d89f..7b01b9c20722 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -11,6 +11,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/net_tstamp.h>
+#include <linux/refcount.h>
#include "spectrum.h"
#include "spectrum_ptp.h"
@@ -29,12 +30,25 @@
struct mlxsw_sp_ptp_state {
struct mlxsw_sp *mlxsw_sp;
+};
+
+struct mlxsw_sp1_ptp_state {
+ struct mlxsw_sp_ptp_state common;
struct rhltable unmatched_ht;
spinlock_t unmatched_lock; /* protects the HT */
struct delayed_work ht_gc_dw;
u32 gc_cycle;
};
+struct mlxsw_sp2_ptp_state {
+ struct mlxsw_sp_ptp_state common;
+ refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
+ * enabled.
+ */
+ struct hwtstamp_config config;
+ struct mutex lock; /* Protects 'config' and HW configuration. */
+};
+
struct mlxsw_sp1_ptp_key {
u16 local_port;
u8 message_type;
@@ -60,20 +74,44 @@ static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
struct mlxsw_sp_ptp_clock {
struct mlxsw_core *core;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+};
+
+struct mlxsw_sp1_ptp_clock {
+ struct mlxsw_sp_ptp_clock common;
spinlock_t lock; /* protect this structure */
struct cyclecounter cycles;
struct timecounter tc;
u32 nominal_c_mult;
- struct ptp_clock *ptp;
- struct ptp_clock_info ptp_info;
unsigned long overflow_period;
struct delayed_work overflow_work;
};
-static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
+static struct mlxsw_sp1_ptp_state *
+mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp1_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp2_ptp_state *
+mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp1_ptp_clock *
+mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct mlxsw_sp1_ptp_clock, common.ptp_info);
+}
+
+static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
struct ptp_system_timestamp *sts)
{
- struct mlxsw_core *mlxsw_core = clock->core;
+ struct mlxsw_core *mlxsw_core = clock->common.core;
u32 frc_h1, frc_h2, frc_l;
frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
@@ -94,20 +132,20 @@ static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(cc, struct mlxsw_sp_ptp_clock, cycles);
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
}
static int
-mlxsw_sp1_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
+mlxsw_sp_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
{
struct mlxsw_core *mlxsw_core = clock->core;
char mtutc_pl[MLXSW_REG_MTUTC_LEN];
mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
- freq_adj, 0);
+ freq_adj, 0, 0, 0);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
}
@@ -122,9 +160,9 @@ static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
}
static int
-mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock *clock, u64 nsec)
{
- struct mlxsw_core *mlxsw_core = clock->core;
+ struct mlxsw_core *mlxsw_core = clock->common.core;
u64 next_sec, next_sec_in_nsec, cycles;
char mtutc_pl[MLXSW_REG_MTUTC_LEN];
char mtpps_pl[MLXSW_REG_MTPPS_LEN];
@@ -144,14 +182,13 @@ mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
mlxsw_reg_mtutc_pack(mtutc_pl,
MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
- 0, next_sec);
+ 0, next_sec, 0, 0);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
}
static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
int neg_adj = 0;
u32 diff;
u64 adj;
@@ -174,13 +211,12 @@ static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
clock->nominal_c_mult + diff;
spin_unlock_bh(&clock->lock);
- return mlxsw_sp1_ptp_phc_adjfreq(clock, neg_adj ? -ppb : ppb);
+ return mlxsw_sp_ptp_phc_adjfreq(&clock->common, neg_adj ? -ppb : ppb);
}
static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 nsec;
spin_lock_bh(&clock->lock);
@@ -195,8 +231,7 @@ static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 cycles, nsec;
spin_lock_bh(&clock->lock);
@@ -212,8 +247,7 @@ static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 nsec = timespec64_to_ns(ts);
spin_lock_bh(&clock->lock);
@@ -237,9 +271,9 @@ static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp1_ptp_clock *clock;
- clock = container_of(dwork, struct mlxsw_sp_ptp_clock, overflow_work);
+ clock = container_of(dwork, struct mlxsw_sp1_ptp_clock, overflow_work);
spin_lock_bh(&clock->lock);
timecounter_read(&clock->tc);
@@ -251,7 +285,7 @@ struct mlxsw_sp_ptp_clock *
mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
{
u64 overflow_cycles, nsec, frac = 0;
- struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp1_ptp_clock *clock;
int err;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
@@ -265,10 +299,9 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
- clock->core = mlxsw_sp->core;
+ clock->common.core = mlxsw_sp->core;
- timecounter_init(&clock->tc, &clock->cycles,
- ktime_to_ns(ktime_get_real()));
+ timecounter_init(&clock->tc, &clock->cycles, 0);
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least twice every wrap around.
@@ -286,7 +319,158 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
mlxsw_core_schedule_dw(&clock->overflow_work, 0);
- clock->ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->common.ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->common.ptp = ptp_clock_register(&clock->common.ptp_info, dev);
+ if (IS_ERR(clock->common.ptp)) {
+ err = PTR_ERR(clock->common.ptp);
+ dev_err(dev, "ptp_clock_register failed %d\n", err);
+ goto err_ptp_clock_register;
+ }
+
+ return &clock->common;
+
+err_ptp_clock_register:
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
+{
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
+ ptp_clock_unregister(clock_common->ptp);
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+}
+
+static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ u32 utc_sec1, utc_sec2, utc_nsec;
+
+ utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (utc_sec1 != utc_sec2) {
+ /* Wrap around. */
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
+}
+
+static int
+mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+ u32 sec, nsec_rem;
+
+ sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
+ 0, sec, nsec_rem, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+ /* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
+ * reversed, positive values mean to decrease the frequency. Adjust the
+ * sign of PPB to this behavior.
+ */
+ return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
+}
+
+static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+
+ /* HW time adjustment range is s16. If out of range, set time instead. */
+ if (delta < S16_MIN || delta > S16_MAX) {
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
+ nsec += delta;
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+ }
+
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
+ 0, 0, 0, delta);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec = timespec64_to_ns(ts);
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+}
+
+static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlxsw_sp_clock",
+ .max_adj = MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
+ .adjfine = mlxsw_sp2_ptp_adjfine,
+ .adjtime = mlxsw_sp2_ptp_adjtime,
+ .gettimex64 = mlxsw_sp2_ptp_gettimex,
+ .settime64 = mlxsw_sp2_ptp_settime,
+};
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ struct mlxsw_sp_ptp_clock *clock;
+ int err;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->core = mlxsw_sp->core;
+
+ clock->ptp_info = mlxsw_sp2_ptp_clock_info;
+
+ err = mlxsw_sp2_ptp_phc_settime(clock, 0);
+ if (err) {
+ dev_err(dev, "setting UTC time failed %d\n", err);
+ goto err_ptp_phc_settime;
+ }
+
clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
if (IS_ERR(clock->ptp)) {
err = PTR_ERR(clock->ptp);
@@ -297,15 +481,14 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
return clock;
err_ptp_clock_register:
- cancel_delayed_work_sync(&clock->overflow_work);
+err_ptp_phc_settime:
kfree(clock);
return ERR_PTR(err);
}
-void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
{
ptp_clock_unregister(clock->ptp);
- cancel_delayed_work_sync(&clock->overflow_work);
kfree(clock);
}
@@ -348,7 +531,7 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
u64 timestamp)
{
int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
- struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched;
int err;
@@ -359,7 +542,7 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
unmatched->key = key;
unmatched->skb = skb;
unmatched->timestamp = timestamp;
- unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
+ unmatched->gc_cycle = ptp_state->gc_cycle + cycles;
err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
mlxsw_sp1_ptp_unmatched_ht_params);
@@ -373,11 +556,12 @@ static struct mlxsw_sp1_ptp_unmatched *
mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key, int *p_length)
{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
struct rhlist_head *tmp, *list;
int length = 0;
- list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
+ list = rhltable_lookup(&ptp_state->unmatched_ht, &key,
mlxsw_sp1_ptp_unmatched_ht_params);
rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
last = unmatched;
@@ -392,7 +576,9 @@ static int
mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
- return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
+
+ return rhltable_remove(&ptp_state->unmatched_ht,
&unmatched->ht_node,
mlxsw_sp1_ptp_unmatched_ht_params);
}
@@ -438,12 +624,16 @@ static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
struct sk_buff *skb,
u64 timestamp)
{
+ struct mlxsw_sp_ptp_clock *clock_common = mlxsw_sp->clock;
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
struct skb_shared_hwtstamps hwtstamps;
u64 nsec;
- spin_lock_bh(&mlxsw_sp->clock->lock);
- nsec = timecounter_cyc2time(&mlxsw_sp->clock->tc, timestamp);
- spin_unlock_bh(&mlxsw_sp->clock->lock);
+ spin_lock_bh(&clock->lock);
+ nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ spin_unlock_bh(&clock->lock);
hwtstamps.hwtstamp = ns_to_ktime(nsec);
mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
@@ -481,13 +671,14 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key,
struct sk_buff *skb, u64 timestamp)
{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched;
int length;
int err;
rcu_read_lock();
- spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
+ spin_lock(&ptp_state->unmatched_lock);
unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
if (skb && unmatched && unmatched->timestamp) {
@@ -515,7 +706,7 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
WARN_ON_ONCE(err);
}
- spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
+ spin_unlock(&ptp_state->unmatched_lock);
if (unmatched)
mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
@@ -611,9 +802,10 @@ void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
+mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp1_ptp_state *ptp_state,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
+ struct mlxsw_sp *mlxsw_sp = ptp_state->common.mlxsw_sp;
struct mlxsw_sp_ptp_port_dir_stats *stats;
struct mlxsw_sp_port *mlxsw_sp_port;
int err;
@@ -636,7 +828,7 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
/* The packet was matched with timestamp during the walk. */
goto out;
- mlxsw_sp_port = ptp_state->mlxsw_sp->ports[unmatched->key.local_port];
+ mlxsw_sp_port = mlxsw_sp->ports[unmatched->key.local_port];
if (mlxsw_sp_port) {
stats = unmatched->key.ingress ?
&mlxsw_sp_port->ptp.stats.rx_gcd :
@@ -653,7 +845,7 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
* netif_receive_skb(), in process context, is seen elsewhere in the
* kernel, notably in pktgen.
*/
- mlxsw_sp1_ptp_unmatched_finish(ptp_state->mlxsw_sp, unmatched);
+ mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
out:
local_bh_enable();
@@ -663,12 +855,12 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlxsw_sp1_ptp_unmatched *unmatched;
- struct mlxsw_sp_ptp_state *ptp_state;
+ struct mlxsw_sp1_ptp_state *ptp_state;
struct rhashtable_iter iter;
u32 gc_cycle;
void *obj;
- ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
+ ptp_state = container_of(dwork, struct mlxsw_sp1_ptp_state, ht_gc_dw);
gc_cycle = ptp_state->gc_cycle++;
rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
@@ -694,7 +886,7 @@ static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
{
char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
- mlxsw_reg_mtptptp_pack(mtptpt_pl, trap_id, message_type);
+ mlxsw_reg_mtptpt_pack(mtptpt_pl, trap_id, message_type);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
}
@@ -807,10 +999,44 @@ static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 event_message_type;
+ int err;
+
+ /* Deliver these message types as PTP0. */
+ event_message_type = BIT(PTP_MSGTYPE_SYNC) |
+ BIT(PTP_MSGTYPE_DELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_RESP);
+
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+ event_message_type);
+ if (err)
+ return err;
+
+ /* Everything else is PTP1. */
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+ ~event_message_type);
+ if (err)
+ goto err_mtptpt1_set;
+
+ return 0;
+
+err_mtptpt1_set:
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ return err;
+}
+
+static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+}
+
struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_ptp_state *ptp_state;
- u16 message_type;
+ struct mlxsw_sp1_ptp_state *ptp_state;
int err;
err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
@@ -820,7 +1046,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
if (!ptp_state)
return ERR_PTR(-ENOMEM);
- ptp_state->mlxsw_sp = mlxsw_sp;
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
spin_lock_init(&ptp_state->unmatched_lock);
@@ -829,22 +1055,9 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_hashtable_init;
- /* Delive these message types as PTP0. */
- message_type = BIT(PTP_MSGTYPE_SYNC) |
- BIT(PTP_MSGTYPE_DELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_RESP);
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
- message_type);
- if (err)
- goto err_mtptpt_set;
-
- /* Everything else is PTP1. */
- message_type = ~message_type;
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
- message_type);
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
if (err)
- goto err_mtptpt1_set;
+ goto err_ptp_traps_set;
err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
if (err)
@@ -853,28 +1066,28 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
MLXSW_SP1_PTP_HT_GC_INTERVAL);
- return ptp_state;
+ return &ptp_state->common;
err_fifo_clr:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
-err_mtptpt1_set:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
-err_mtptpt_set:
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+err_ptp_traps_set:
rhltable_destroy(&ptp_state->unmatched_ht);
err_hashtable_init:
kfree(ptp_state);
return ERR_PTR(err);
}
-void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
+void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
{
- struct mlxsw_sp *mlxsw_sp = ptp_state->mlxsw_sp;
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp1_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
rhltable_free_and_destroy(&ptp_state->unmatched_ht,
&mlxsw_sp1_ptp_unmatched_free_fn, NULL);
kfree(ptp_state);
@@ -887,9 +1100,10 @@ int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
- u16 *p_ing_types, u16 *p_egr_types,
- enum hwtstamp_rx_filters *p_rx_filter)
+static int
+mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
{
enum hwtstamp_rx_filters rx_filter = config->rx_filter;
enum hwtstamp_tx_types tx_type = config->tx_type;
@@ -1050,8 +1264,8 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 egr_types;
int err;
- err = mlxsw_sp_ptp_get_message_types(config, &ing_types, &egr_types,
- &rx_filter);
+ err = mlxsw_sp1_ptp_get_message_types(config, &ing_types, &egr_types,
+ &rx_filter);
if (err)
return err;
@@ -1144,3 +1358,369 @@ void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
*data++ = *(u64 *)(stats + offset);
}
}
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
+ if (!ptp_state)
+ return ERR_PTR(-ENOMEM);
+
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
+ if (err)
+ goto err_ptp_traps_set;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ mutex_init(&ptp_state->lock);
+ return &ptp_state->common;
+
+err_ptp_traps_set:
+ kfree(ptp_state);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
+{
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+
+ mutex_destroy(&ptp_state->lock);
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+ kfree(ptp_state);
+}
+
+static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
+ u8 cqe_ts_sec)
+{
+ u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (cqe_ts_sec > (utc_sec & 0xff))
+ /* Time stamp above the last bits of UTC (UTC & 0xff) means the
+ * latter has wrapped after the time stamp was collected.
+ */
+ utc_sec -= 256;
+
+ utc_sec &= ~0xff;
+ utc_sec |= cqe_ts_sec;
+
+ return utc_sec;
+}
+
+static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_skb_cb *cb,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ u64 ts_sec, ts_nsec, nsec;
+
+ WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
+
+ /* The time stamp in the CQE is represented by 38 bits, which is a short
+ * representation of UTC time. Software should create the full time
+ * stamp using the global UTC clock. The seconds have only 8 bits in the
+ * CQE, to create the full time stamp, use the current UTC time and fix
+ * the seconds according to the relation between UTC seconds and CQE
+ * seconds.
+ */
+ ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
+ ts_nsec = cb->cqe_ts.nsec;
+
+ nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
+
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+}
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ *skb_hwtstamps(skb) = hwtstamps;
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ skb_tstamp_tx(skb, &hwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ mutex_lock(&ptp_state->lock);
+ *config = ptp_state->config;
+ mutex_unlock(&ptp_state->lock);
+
+ return 0;
+}
+
+static int
+mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
+{
+ enum hwtstamp_rx_filters rx_filter = config->rx_filter;
+ enum hwtstamp_tx_types tx_type = config->tx_type;
+ u16 ing_types = 0x00;
+ u16 egr_types = 0x00;
+
+ *p_rx_filter = rx_filter;
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ing_types = 0x00;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* In Spectrum-2 and above, all packets get time stamp by
+ * default and the driver fill the time stamp only for event
+ * packets. Return all event types even if only specific types
+ * were required.
+ */
+ ing_types = 0x0f;
+ *p_rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ egr_types = 0x00;
+ break;
+ case HWTSTAMP_TX_ON:
+ egr_types = 0x0f;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ if ((ing_types && !egr_types) || (!ing_types && egr_types))
+ return -EINVAL;
+
+ *p_ing_types = ing_types;
+ *p_egr_types = egr_types;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
+ u16 ing_types, u16 egr_types)
+{
+ char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
+
+ mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
+ egr_types);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
+}
+
+static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
+ u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ing_types, u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
+ egr_types, new_config);
+ if (err)
+ return err;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
+ if (err)
+ goto err_ptp_disable;
+
+ return 0;
+
+err_ptp_disable:
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+ return err;
+}
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ enum hwtstamp_rx_filters rx_filter;
+ struct hwtstamp_config new_config;
+ u16 new_ing_types, new_egr_types;
+ bool ptp_enabled;
+ int err;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
+
+ err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
+ &new_egr_types, &rx_filter);
+ if (err)
+ goto err_get_message_types;
+
+ new_config.flags = config->flags;
+ new_config.tx_type = config->tx_type;
+ new_config.rx_filter = rx_filter;
+
+ ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
+ mlxsw_sp_port->ptp.egr_types;
+
+ if ((new_ing_types || new_egr_types) && !ptp_enabled) {
+ err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
+ new_egr_types, new_config);
+ if (err)
+ goto err_configure_port;
+ } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
+ err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
+ if (err)
+ goto err_deconfigure_port;
+ }
+
+ mlxsw_sp_port->ptp.ing_types = new_ing_types;
+ mlxsw_sp_port->ptp.egr_types = new_egr_types;
+
+ /* Notify the ioctl caller what we are actually timestamping. */
+ config->rx_filter = rx_filter;
+ mutex_unlock(&ptp_state->lock);
+
+ return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+ mutex_unlock(&ptp_state->lock);
+ return err;
+}
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
+ * their correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
+ */
+ if (!skb_vlan_tagged(skb)) {
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return -ENOMEM;
+ }
+ }
+
+ return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
+ tx_info);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index c06cd1384bca..a8b88230959a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -57,6 +57,40 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock);
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp);
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port);
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -136,7 +170,15 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index)
{
}
-#endif
+
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
@@ -184,16 +226,26 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
-{
-}
-
static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
+{
+}
+
static inline int mlxsw_sp2_get_stats_count(void)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 0d8a0068e4ca..2c4443c6b964 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -443,65 +443,12 @@ struct mlxsw_sp_fib_entry_decap {
u32 tunnel_index;
};
-static struct mlxsw_sp_fib_entry_priv *
-mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- struct mlxsw_sp_fib_entry_priv *priv;
-
- if (!ll_ops->fib_entry_priv_size)
- /* No need to have priv */
- return NULL;
-
- priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
- if (!priv)
- return ERR_PTR(-ENOMEM);
- refcount_set(&priv->refcnt, 1);
- return priv;
-}
-
-static void
-mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
-{
- kfree(priv);
-}
-
-static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
-{
- refcount_inc(&priv->refcnt);
-}
-
-static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv || !refcount_dec_and_test(&priv->refcnt))
- return;
- mlxsw_sp_fib_entry_priv_destroy(priv);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv)
- return;
- mlxsw_sp_fib_entry_priv_hold(priv);
- list_add(&priv->list, &op_ctx->fib_entry_priv_list);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_priv *priv, *tmp;
-
- list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
- mlxsw_sp_fib_entry_priv_put(priv);
- INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
-}
-
struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
- struct mlxsw_sp_fib_entry_priv *priv;
};
struct mlxsw_sp_fib4_entry {
@@ -537,7 +484,6 @@ struct mlxsw_sp_fib {
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
enum mlxsw_sp_l3proto proto;
- const struct mlxsw_sp_router_ll_ops *ll_ops;
};
struct mlxsw_sp_vr {
@@ -551,45 +497,16 @@ struct mlxsw_sp_vr {
refcount_t ul_rif_refcnt;
};
-static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto)
-{
- return 0;
-}
-
-static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
- xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
- xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
- xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
-}
-
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
- err = ll_ops->init(mlxsw_sp, vr->id, proto);
- if (err)
- return ERR_PTR(err);
-
lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
@@ -601,7 +518,6 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
fib->proto = proto;
fib->vr = vr;
fib->lpm_tree = lpm_tree;
- fib->ll_ops = ll_ops;
mlxsw_sp_lpm_tree_hold(lpm_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
if (err)
@@ -640,36 +556,33 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, true,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, false,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralst_pl[MLXSW_REG_XRALST_LEN];
+ char ralst_pl[MLXSW_REG_RALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
@@ -677,20 +590,19 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
- mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
+ mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
- mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
- MLXSW_REG_RALST_BIN_NO_CHILD);
+ mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
- return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
@@ -701,11 +613,12 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
- err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
if (err)
return ERR_PTR(err);
- err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
+ lpm_tree);
if (err)
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
@@ -716,15 +629,14 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
err_left_struct_set:
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
return ERR_PTR(err);
}
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
@@ -732,7 +644,6 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -746,7 +657,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
}
}
- return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
}
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
@@ -757,11 +668,8 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops =
- mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
-
if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -851,23 +759,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib, u8 tree_id)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- tree_id);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
@@ -5384,7 +5292,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
{
const struct fib_nh *nh = fib_info_nh(fi, 0);
- return nh->fib_nh_scope == RT_SCOPE_LINK ||
+ return nh->fib_nh_gw_family ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
@@ -5780,14 +5688,13 @@ mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
+ case MLXSW_REG_RALUE_OP_WRITE_WRITE:
mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
default:
@@ -5795,140 +5702,39 @@ mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
}
}
-struct mlxsw_sp_fib_entry_op_ctx_basic {
- char ralue_pl[MLXSW_REG_RALUE_LEN];
-};
-
static void
-mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto,
- enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len,
- unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv)
+mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
+ const struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
- enum mlxsw_reg_ralxx_protocol ralxx_proto;
- char *ralue_pl = op_ctx_basic->ralue_pl;
- enum mlxsw_reg_ralue_op ralue_op;
-
- ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
+ enum mlxsw_reg_ralxx_protocol proto;
+ u32 *p_dip;
- switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
+ proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
- switch (proto) {
+ switch (fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, (u32 *) addr);
+ p_dip = (u32 *) fib_entry->fib_node->key.addr;
+ mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ *p_dip);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, addr);
+ mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ fib_entry->fib_node->key.addr);
break;
}
}
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, adjacency_index, ecmp_size);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, local_erif);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
-}
-
-static int
-mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
- op_ctx_basic->ralue_pl);
-}
-
-static bool
-mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
-{
- return true;
-}
-
-static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
-{
- struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
-
- mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
- fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- fib_entry->fib_node->key.addr,
- fib_entry->priv);
-}
-
-static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- bool postponed_for_bulk = false;
- int err;
-
- err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
- if (!postponed_for_bulk)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- return err;
-}
-
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -5951,20 +5757,19 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
- adjacency_index, ecmp_size);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id = 0;
u16 rif_index = 0;
@@ -5976,64 +5781,61 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
+ rif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_pack(op_ctx);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
const struct mlxsw_sp_ipip_ops *ipip_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
int err;
if (WARN_ON(!ipip_entry))
@@ -6045,55 +5847,54 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
- return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
- return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
- return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
+ op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
- return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
+ fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
- return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
+ int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
if (err)
return err;
@@ -6103,35 +5904,18 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- bool is_new)
-{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
- MLXSW_SP_FIB_ENTRY_OP_UPDATE);
-}
-
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
-
- if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
- return 0;
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- MLXSW_SP_FIB_ENTRY_OP_DELETE);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
static int
@@ -6226,12 +6010,6 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib4_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
if (err)
goto err_nexthop4_group_get;
@@ -6260,8 +6038,6 @@ err_fib4_entry_type_set:
err_nexthop_group_vr_link:
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
err_nexthop4_group_get:
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib4_entry);
return ERR_PTR(err);
}
@@ -6276,7 +6052,6 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
- mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
kfree(fib4_entry);
}
@@ -6514,16 +6289,14 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- bool is_new = !fib_node->fib_entry;
int err;
fib_node->fib_entry = fib_entry;
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_update;
@@ -6534,25 +6307,14 @@ err_fib_entry_update:
return err;
}
-static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void
+mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- int err;
- err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
fib_node->fib_entry = NULL;
- return err;
-}
-
-static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
}
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
@@ -6574,7 +6336,6 @@ static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
@@ -6609,7 +6370,7 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
goto err_fib_node_entry_link;
@@ -6634,23 +6395,20 @@ err_fib4_entry_create:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry;
struct mlxsw_sp_fib_node *fib_node;
- int err;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
- return 0;
+ return;
fib_node = fib4_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
@@ -6958,9 +6716,9 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}
-static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int
+mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
@@ -6983,8 +6741,7 @@ static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
- &fib6_entry->common, false);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_entry_update;
@@ -7008,7 +6765,6 @@ err_nexthop6_group_get:
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -7026,7 +6782,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
fib6_entry->nrt6++;
}
- err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
if (err)
goto err_rt6_unwind;
@@ -7045,7 +6801,6 @@ err_rt6_unwind:
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -7063,7 +6818,7 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
}
static int
@@ -7149,12 +6904,6 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib6_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
INIT_LIST_HEAD(&fib6_entry->rt6_list);
for (i = 0; i < nrt6; i++) {
@@ -7196,8 +6945,6 @@ err_rt6_unwind:
list_del(&mlxsw_sp_rt6->list);
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib6_entry);
return ERR_PTR(err);
}
@@ -7220,7 +6967,6 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
WARN_ON(fib6_entry->nrt6);
- mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
kfree(fib6_entry);
}
@@ -7278,8 +7024,8 @@ static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
}
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
struct mlxsw_sp_fib_entry *replaced;
@@ -7318,7 +7064,7 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_node_entry_link;
@@ -7342,8 +7088,8 @@ err_fib6_entry_create:
}
static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -7371,7 +7117,8 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib6_entry, common);
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
if (err)
goto err_fib6_entry_nexthop_add;
@@ -7382,17 +7129,16 @@ err_fib6_entry_nexthop_add:
return err;
}
-static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
- int err;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
- return 0;
+ return;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
@@ -7401,22 +7147,22 @@ static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
*/
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
if (!fib6_entry)
- return 0;
+ return;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
if (nrt6 != fib6_entry->nrt6) {
- mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
- return 0;
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ return;
}
fib_node = fib6_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static struct mlxsw_sp_mr_table *
@@ -7569,15 +7315,15 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
}
}
-struct mlxsw_sp_fib6_event {
+struct mlxsw_sp_fib6_event_work {
struct fib6_info **rt_arr;
unsigned int nrt6;
};
-struct mlxsw_sp_fib_event {
- struct list_head list; /* node in fib queue */
+struct mlxsw_sp_fib_event_work {
+ struct work_struct work;
union {
- struct mlxsw_sp_fib6_event fib6_event;
+ struct mlxsw_sp_fib6_event_work fib6_work;
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
@@ -7586,12 +7332,11 @@ struct mlxsw_sp_fib_event {
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
- int family;
};
static int
-mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
- struct fib6_entry_notifier_info *fen6_info)
+mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
+ struct fib6_entry_notifier_info *fen6_info)
{
struct fib6_info *rt = fen6_info->rt;
struct fib6_info **rt_arr;
@@ -7605,8 +7350,8 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
if (!rt_arr)
return -ENOMEM;
- fib6_event->rt_arr = rt_arr;
- fib6_event->nrt6 = nrt6;
+ fib6_work->rt_arr = rt_arr;
+ fib6_work->nrt6 = nrt6;
rt_arr[0] = rt;
fib6_info_hold(rt);
@@ -7628,242 +7373,182 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
}
static void
-mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
+mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
{
int i;
- for (i = 0; i < fib6_event->nrt6; i++)
- mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
- kfree(fib6_event->rt_arr);
+ for (i = 0; i < fib6_work->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
+ kfree(fib6_work->rt_arr);
}
-static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
+ &fib_work->fen_info);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
- &fib_event->fen_info);
+ &fib_work->fen_info);
}
- fib_info_put(fib_event->fen_info.fi);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- fib_info_put(fib_event->fen_info.fi);
+ mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
- fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
+ mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
+ fib_work->fnh_info.fib_nh);
+ fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
{
- struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_APPEND:
- err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_del(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
bool replace;
int err;
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
- replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
+ replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
+ replace);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
- mr_cache_put(fib_event->men_info.mfc);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
- mr_cache_put(fib_event->men_info.mfc);
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
- &fib_event->ven_info);
+ &fib_work->ven_info);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
- dev_put(fib_event->ven_info.dev);
+ dev_put(fib_work->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
- mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
- dev_put(fib_event->ven_info.dev);
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
+ &fib_work->ven_info);
+ dev_put(fib_work->ven_info.dev);
break;
}
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
-{
- struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
- struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
- struct mlxsw_sp_fib_event *next_fib_event;
- struct mlxsw_sp_fib_event *fib_event;
- int last_family = AF_UNSPEC;
- LIST_HEAD(fib_event_queue);
-
- spin_lock_bh(&router->fib_event_queue_lock);
- list_splice_init(&router->fib_event_queue, &fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
-
- /* Router lock is held here to make sure per-instance
- * operation context is not used in between FIB4/6 events
- * processing.
- */
- mutex_lock(&router->lock);
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- list_for_each_entry_safe(fib_event, next_fib_event,
- &fib_event_queue, list) {
- /* Check if the next entry in the queue exists and it is
- * of the same type (family and event) as the currect one.
- * In that case it is permitted to do the bulking
- * of multiple FIB entries to a single register write.
- */
- op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
- fib_event->family == next_fib_event->family &&
- fib_event->event == next_fib_event->event;
- op_ctx->event = fib_event->event;
-
- /* In case family of this and the previous entry are different, context
- * reinitialization is going to be needed now, indicate that.
- * Note that since last_family is initialized to AF_UNSPEC, this is always
- * going to happen for the first entry processed in the work.
- */
- if (fib_event->family != last_family)
- op_ctx->initialized = false;
-
- switch (fib_event->family) {
- case AF_INET:
- mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case AF_INET6:
- mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case RTNL_FAMILY_IP6MR:
- case RTNL_FAMILY_IPMR:
- /* Unlock here as inside FIBMR the lock is taken again
- * under RTNL. The per-instance operation context
- * is not used by FIBMR.
- */
- mutex_unlock(&router->lock);
- mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
- fib_event);
- mutex_lock(&router->lock);
- break;
- default:
- WARN_ON_ONCE(1);
- }
- last_family = fib_event->family;
- kfree(fib_event);
- cond_resched();
- }
- WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- mutex_unlock(&router->lock);
-}
-
-static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
+static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
- fib_event->fen_info = *fen_info;
+ fib_work->fen_info = *fen_info;
/* Take reference on fib_info to prevent it from being
- * freed while event is queued. Release it afterwards.
+ * freed while work is queued. Release it afterwards.
*/
- fib_info_hold(fib_event->fen_info.fi);
+ fib_info_hold(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
- fib_event->fnh_info = *fnh_info;
- fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
+ fib_work->fnh_info = *fnh_info;
+ fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
}
-static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib6_entry_notifier_info *fen6_info;
int err;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
- err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
- fen6_info);
+ err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
+ fen6_info);
if (err)
return err;
break;
@@ -7873,20 +7558,20 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
}
static void
-mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
- mr_cache_hold(fib_event->men_info.mfc);
+ memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
+ mr_cache_hold(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
- memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
- dev_hold(fib_event->ven_info.dev);
+ memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
+ dev_hold(fib_work->ven_info.dev);
break;
}
}
@@ -7940,7 +7625,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_fib_event *fib_event;
+ struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
int err;
@@ -7972,39 +7657,37 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
break;
}
- fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
- if (!fib_event)
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (!fib_work)
return NOTIFY_BAD;
- fib_event->mlxsw_sp = router->mlxsw_sp;
- fib_event->event = event;
- fib_event->family = info->family;
+ fib_work->mlxsw_sp = router->mlxsw_sp;
+ fib_work->event = event;
switch (info->family) {
case AF_INET:
- mlxsw_sp_router_fib4_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
+ mlxsw_sp_router_fib4_event(fib_work, info);
break;
case AF_INET6:
- err = mlxsw_sp_router_fib6_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
+ err = mlxsw_sp_router_fib6_event(fib_work, info);
if (err)
goto err_fib_event;
break;
case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
- mlxsw_sp_router_fibmr_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
+ mlxsw_sp_router_fibmr_event(fib_work, info);
break;
}
- /* Enqueue the event and trigger the work */
- spin_lock_bh(&router->fib_event_queue_lock);
- list_add_tail(&fib_event->list, &router->fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
- mlxsw_core_schedule_work(&router->fib_event_work);
+ mlxsw_core_schedule_work(&fib_work->work);
return NOTIFY_DONE;
err_fib_event:
- kfree(fib_event);
+ kfree(fib_work);
return NOTIFY_BAD;
}
@@ -8463,6 +8146,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_counters_alloc(rif);
}
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return rif;
err_stats_enable:
@@ -8492,6 +8176,7 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
struct mlxsw_sp_vr *vr;
int i;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
@@ -8650,6 +8335,13 @@ static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
}
+static u64 mlxsw_sp_rifs_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->router->rifs_count);
+}
+
static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
struct netlink_ext_ack *extack)
@@ -8898,9 +8590,7 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
- if (netif_is_bridge_port(port_dev) ||
- netif_is_lag_port(port_dev) ||
- netif_is_ovs_port(port_dev))
+ if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
return 0;
return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
@@ -9624,17 +9314,18 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif_subport *rif_subport;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 efid;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
+ efid = mlxsw_sp_fid_index(rif->fid);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
- rif_subport->vid);
-
+ efid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9659,9 +9350,15 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
@@ -9673,7 +9370,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9697,10 +9394,9 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
.fid_get = mlxsw_sp_rif_subport_fid_get,
};
-static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
- enum mlxsw_reg_ritr_if_type type,
- u16 vid_fid, bool enable)
+static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -9708,7 +9404,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
- mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
+ mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9732,10 +9428,9 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
return err;
rif->mac_profile_id = mac_profile;
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
- true);
+ err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
if (err)
- goto err_rif_vlan_fid_op;
+ goto err_rif_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
@@ -9752,9 +9447,15 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
@@ -9762,8 +9463,8 @@ err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
-err_rif_vlan_fid_op:
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
+err_rif_fid_op:
mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
@@ -9774,7 +9475,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9782,7 +9483,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
@@ -9859,11 +9560,119 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
+static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr,
+ rif->mac_profile_id, vid, efid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
+ struct netlink_ext_ack *extack)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u8 mac_profile;
+ int err;
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
+ if (err)
+ goto err_rif_vlan_fid_op;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_mc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
+ return 0;
+
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+err_rif_fdb_op:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_mc_flood_set:
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+err_rif_vlan_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_fid_rif_unset(rif->fid);
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
+}
+
+static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_fid_configure,
- .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .configure = mlxsw_sp1_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
+static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ u16 efid = mlxsw_sp_fid_index(rif->fid);
+
+ return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp2_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
@@ -9938,7 +9747,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
};
@@ -9981,6 +9790,7 @@ mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
if (err)
goto ul_rif_op_err;
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return ul_rif;
ul_rif_op_err:
@@ -9993,6 +9803,7 @@ static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
{
struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
kfree(ul_rif);
@@ -10124,7 +9935,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
@@ -10148,10 +9959,15 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
- mlxsw_sp_rif_mac_profiles_occ_get,
- mlxsw_sp);
+ atomic_set(&mlxsw_sp->router->rifs_count, 0);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ mlxsw_sp_rif_mac_profiles_occ_get,
+ mlxsw_sp);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIFS,
+ mlxsw_sp_rifs_occ_get,
+ mlxsw_sp);
return 0;
}
@@ -10161,11 +9977,13 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
kfree(mlxsw_sp->router->rifs);
@@ -10324,7 +10142,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
unsigned long *fields = config->fields;
u32 hash_fields;
- switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
+ switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
case 0:
mlxsw_sp_mp4_hash_outer_addr(config);
break;
@@ -10342,7 +10160,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_mp_hash_inner_l3(config);
break;
case 3:
- hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+ hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
/* Outer */
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
@@ -10523,13 +10341,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
struct net *net = mlxsw_sp_net(mlxsw_sp);
- bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
+ bool usp;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
@@ -10545,46 +10364,6 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
-static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
- .init = mlxsw_sp_router_ll_basic_init,
- .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
- .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
- .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
- .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
- .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
- .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
- .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
- .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
- .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
- .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
- .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
-};
-
-static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
-{
- size_t max_size = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
- size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
-
- if (size > max_size)
- max_size = size;
- }
- router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
- GFP_KERNEL);
- if (!router->ll_op_ctx)
- return -ENOMEM;
- INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
- return 0;
-}
-
-static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
-{
- WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- kfree(router->ll_op_ctx);
-}
-
static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
{
u16 lb_rif_index;
@@ -10658,23 +10437,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_router_ops_init;
- err = mlxsw_sp_router_xm_init(mlxsw_sp);
- if (err)
- goto err_xm_init;
-
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
- &mlxsw_sp_router_ll_xm_ops :
- &mlxsw_sp_router_ll_basic_ops;
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
-
- err = mlxsw_sp_router_ll_op_ctx_init(router);
- if (err)
- goto err_ll_op_ctx_init;
-
INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
mlxsw_sp_nh_grp_activity_work);
-
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -10727,10 +10492,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
- INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
- INIT_LIST_HEAD(&router->fib_event_queue);
- spin_lock_init(&router->fib_event_queue_lock);
-
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
@@ -10785,7 +10546,6 @@ err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&router->fib_event_queue));
err_dscp_init:
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
@@ -10809,10 +10569,6 @@ err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(router);
-err_ll_op_ctx_init:
- mlxsw_sp_router_xm_fini(mlxsw_sp);
-err_xm_init:
err_router_ops_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
@@ -10831,7 +10587,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_lb_rif_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -10843,8 +10598,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
- mlxsw_sp_router_xm_fini(mlxsw_sp);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 37411b74c3e6..c5dfb972b433 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -15,32 +15,12 @@ struct mlxsw_sp_router_nve_decap {
u8 valid:1;
};
-struct mlxsw_sp_fib_entry_op_ctx {
- u8 bulk_ok:1, /* Indicate to the low-level op it is ok to bulk
- * the actual entry with the one that is the next
- * in queue.
- */
- initialized:1; /* Bit that the low-level op sets in case
- * the context priv is initialized.
- */
- struct list_head fib_entry_priv_list;
- unsigned long event;
- unsigned long ll_priv[];
-};
-
-static inline void
-mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- WARN_ON_ONCE(!list_empty(&op_ctx->fib_entry_priv_list));
- memset(op_ctx, 0, sizeof(*op_ctx));
- INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
-}
-
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
struct idr rif_mac_profiles_idr;
atomic_t rif_mac_profiles_count;
+ atomic_t rifs_count;
u8 max_rif_mac_profile;
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
@@ -72,14 +52,8 @@ struct mlxsw_sp_router {
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
- struct work_struct fib_event_work;
- struct list_head fib_event_queue;
- spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
- /* One set of ops for each protocol: IPv4 and IPv6 */
- const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
u16 lb_rif_index;
- struct mlxsw_sp_router_xm *xm;
const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges;
size_t adj_grp_size_ranges_count;
struct delayed_work nh_grp_activity_dw;
@@ -89,48 +63,6 @@ struct mlxsw_sp_router {
u32 adj_trap_index;
};
-struct mlxsw_sp_fib_entry_priv {
- refcount_t refcnt;
- struct list_head list; /* Member in op_ctx->fib_entry_priv_list */
- unsigned long priv[];
-};
-
-enum mlxsw_sp_fib_entry_op {
- MLXSW_SP_FIB_ENTRY_OP_WRITE,
- MLXSW_SP_FIB_ENTRY_OP_UPDATE,
- MLXSW_SP_FIB_ENTRY_OP_DELETE,
-};
-
-/* Low-level router ops. Basically this is to handle the different
- * register sets to work with ordinary and XM trees and FIB entries.
- */
-struct mlxsw_sp_router_ll_ops {
- int (*init)(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto);
- int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
- int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
- int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
- size_t fib_entry_op_ctx_size;
- size_t fib_entry_priv_size;
- void (*fib_entry_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto, enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len, unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv);
- void (*fib_entry_act_remote_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size);
- void (*fib_entry_act_local_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif);
- void (*fib_entry_act_ip2me_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx);
- void (*fib_entry_act_ip2me_tun_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr);
- int (*fib_entry_commit)(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk);
- bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv);
-};
-
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
@@ -150,7 +82,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
-u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
@@ -232,10 +163,4 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
struct net_device *
mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
-extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops;
-
-int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp);
-void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp);
-bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp);
-
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c
deleted file mode 100644
index d213af723a2a..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c
+++ /dev/null
@@ -1,812 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/rhashtable.h>
-
-#include "spectrum.h"
-#include "core.h"
-#include "reg.h"
-#include "spectrum_router.h"
-
-#define MLXSW_SP_ROUTER_XM_M_VAL 16
-
-static const u8 mlxsw_sp_router_xm_m_val[] = {
- [MLXSW_SP_L3_PROTO_IPV4] = MLXSW_SP_ROUTER_XM_M_VAL,
- [MLXSW_SP_L3_PROTO_IPV6] = 0, /* Currently unused. */
-};
-
-#define MLXSW_SP_ROUTER_XM_L_VAL_MAX 16
-
-struct mlxsw_sp_router_xm {
- bool ipv4_supported;
- bool ipv6_supported;
- unsigned int entries_size;
- struct rhashtable ltable_ht;
- struct rhashtable flush_ht; /* Stores items about to be flushed from cache */
- unsigned int flush_count;
- bool flush_all_mode;
-};
-
-struct mlxsw_sp_router_xm_ltable_node {
- struct rhash_head ht_node; /* Member of router_xm->ltable_ht */
- u16 mindex;
- u8 current_lvalue;
- refcount_t refcnt;
- unsigned int lvalue_ref[MLXSW_SP_ROUTER_XM_L_VAL_MAX + 1];
-};
-
-static const struct rhashtable_params mlxsw_sp_router_xm_ltable_ht_params = {
- .key_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, mindex),
- .head_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, ht_node),
- .key_len = sizeof(u16),
- .automatic_shrinking = true,
-};
-
-struct mlxsw_sp_router_xm_flush_info {
- bool all;
- enum mlxsw_sp_l3proto proto;
- u16 virtual_router;
- u8 prefix_len;
- unsigned char addr[sizeof(struct in6_addr)];
-};
-
-struct mlxsw_sp_router_xm_fib_entry {
- bool committed;
- struct mlxsw_sp_router_xm_ltable_node *ltable_node; /* Parent node */
- u16 mindex; /* Store for processing from commit op */
- u8 lvalue;
- struct mlxsw_sp_router_xm_flush_info flush_info;
-};
-
-#define MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX \
- (MLXSW_REG_XMDR_TRANS_LEN / MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN)
-
-struct mlxsw_sp_fib_entry_op_ctx_xm {
- bool initialized;
- char xmdr_pl[MLXSW_REG_XMDR_LEN];
- unsigned int trans_offset; /* Offset of the current command within one
- * transaction of XMDR register.
- */
- unsigned int trans_item_len; /* The current command length. This is used
- * to advance 'trans_offset' when the next
- * command is appended.
- */
- unsigned int entries_count;
- struct mlxsw_sp_router_xm_fib_entry *entries[MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX];
-};
-
-static int mlxsw_sp_router_ll_xm_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto)
-{
- char rxlte_pl[MLXSW_REG_RXLTE_LEN];
-
- mlxsw_reg_rxlte_pack(rxlte_pl, vr_id,
- (enum mlxsw_reg_rxlte_protocol) proto, true);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxlte), rxlte_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralta), xralta_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralst), xralst_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xraltb), xraltb_pl);
-}
-
-static u16 mlxsw_sp_router_ll_xm_mindex_get4(const u32 addr)
-{
- /* Currently the M-index is set to linear mode. That means it is defined
- * as 16 MSB of IP address.
- */
- return addr >> MLXSW_SP_ROUTER_XM_L_VAL_MAX;
-}
-
-static u16 mlxsw_sp_router_ll_xm_mindex_get6(const unsigned char *addr)
-{
- WARN_ON_ONCE(1);
- return 0; /* currently unused */
-}
-
-static void mlxsw_sp_router_ll_xm_op_ctx_check_init(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- if (op_ctx->initialized)
- return;
- op_ctx->initialized = true;
-
- mlxsw_reg_xmdr_pack(op_ctx_xm->xmdr_pl, true);
- op_ctx_xm->trans_offset = 0;
- op_ctx_xm->entries_count = 0;
-}
-
-static void mlxsw_sp_router_ll_xm_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto,
- enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len,
- unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
- struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
- struct mlxsw_sp_router_xm_flush_info *flush_info;
- enum mlxsw_reg_xmdr_c_ltr_op xmdr_c_ltr_op;
- unsigned int len;
-
- mlxsw_sp_router_ll_xm_op_ctx_check_init(op_ctx, op_ctx_xm);
-
- switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_WRITE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_UPDATE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_DELETE;
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
-
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- len = mlxsw_reg_xmdr_c_ltr_pack4(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- op_ctx_xm->entries_count, xmdr_c_ltr_op,
- virtual_router, prefix_len, (u32 *) addr);
- fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get4(*((u32 *) addr));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- len = mlxsw_reg_xmdr_c_ltr_pack6(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- op_ctx_xm->entries_count, xmdr_c_ltr_op,
- virtual_router, prefix_len, addr);
- fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get6(addr);
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
- if (!op_ctx_xm->trans_offset)
- op_ctx_xm->trans_item_len = len;
- else
- WARN_ON_ONCE(op_ctx_xm->trans_item_len != len);
-
- op_ctx_xm->entries[op_ctx_xm->entries_count] = fib_entry;
-
- fib_entry->lvalue = prefix_len > mlxsw_sp_router_xm_m_val[proto] ?
- prefix_len - mlxsw_sp_router_xm_m_val[proto] : 0;
-
- flush_info = &fib_entry->flush_info;
- flush_info->proto = proto;
- flush_info->virtual_router = virtual_router;
- flush_info->prefix_len = prefix_len;
- if (addr)
- memcpy(flush_info->addr, addr, sizeof(flush_info->addr));
- else
- memset(flush_info->addr, 0, sizeof(flush_info->addr));
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_remote_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- trap_action, trap_id, adjacency_index, ecmp_size);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_local_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- trap_action, trap_id, local_erif);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- tunnel_ptr);
-}
-
-static struct mlxsw_sp_router_xm_ltable_node *
-mlxsw_sp_router_xm_ltable_node_get(struct mlxsw_sp_router_xm *router_xm, u16 mindex)
-{
- struct mlxsw_sp_router_xm_ltable_node *ltable_node;
- int err;
-
- ltable_node = rhashtable_lookup_fast(&router_xm->ltable_ht, &mindex,
- mlxsw_sp_router_xm_ltable_ht_params);
- if (ltable_node) {
- refcount_inc(&ltable_node->refcnt);
- return ltable_node;
- }
- ltable_node = kzalloc(sizeof(*ltable_node), GFP_KERNEL);
- if (!ltable_node)
- return ERR_PTR(-ENOMEM);
- ltable_node->mindex = mindex;
- refcount_set(&ltable_node->refcnt, 1);
-
- err = rhashtable_insert_fast(&router_xm->ltable_ht, &ltable_node->ht_node,
- mlxsw_sp_router_xm_ltable_ht_params);
- if (err)
- goto err_insert;
-
- return ltable_node;
-
-err_insert:
- kfree(ltable_node);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_router_xm_ltable_node_put(struct mlxsw_sp_router_xm *router_xm,
- struct mlxsw_sp_router_xm_ltable_node *ltable_node)
-{
- if (!refcount_dec_and_test(&ltable_node->refcnt))
- return;
- rhashtable_remove_fast(&router_xm->ltable_ht, &ltable_node->ht_node,
- mlxsw_sp_router_xm_ltable_ht_params);
- kfree(ltable_node);
-}
-
-static int mlxsw_sp_router_xm_ltable_lvalue_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_ltable_node *ltable_node)
-{
- char xrmt_pl[MLXSW_REG_XRMT_LEN];
-
- mlxsw_reg_xrmt_pack(xrmt_pl, ltable_node->mindex, ltable_node->current_lvalue);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xrmt), xrmt_pl);
-}
-
-struct mlxsw_sp_router_xm_flush_node {
- struct rhash_head ht_node; /* Member of router_xm->flush_ht */
- struct list_head list;
- struct mlxsw_sp_router_xm_flush_info flush_info;
- struct delayed_work dw;
- struct mlxsw_sp *mlxsw_sp;
- unsigned long start_jiffies;
- unsigned int reuses; /* By how many flush calls this was reused. */
- refcount_t refcnt;
-};
-
-static const struct rhashtable_params mlxsw_sp_router_xm_flush_ht_params = {
- .key_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, flush_info),
- .head_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, ht_node),
- .key_len = sizeof(struct mlxsw_sp_router_xm_flush_info),
- .automatic_shrinking = true,
-};
-
-static struct mlxsw_sp_router_xm_flush_node *
-mlxsw_sp_router_xm_cache_flush_node_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_info *flush_info)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
- int err;
-
- flush_node = kzalloc(sizeof(*flush_node), GFP_KERNEL);
- if (!flush_node)
- return ERR_PTR(-ENOMEM);
-
- flush_node->flush_info = *flush_info;
- err = rhashtable_insert_fast(&router_xm->flush_ht, &flush_node->ht_node,
- mlxsw_sp_router_xm_flush_ht_params);
- if (err) {
- kfree(flush_node);
- return ERR_PTR(err);
- }
- router_xm->flush_count++;
- flush_node->mlxsw_sp = mlxsw_sp;
- flush_node->start_jiffies = jiffies;
- refcount_set(&flush_node->refcnt, 1);
- return flush_node;
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_hold(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- if (!flush_node)
- return;
- refcount_inc(&flush_node->refcnt);
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_put(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- if (!flush_node || !refcount_dec_and_test(&flush_node->refcnt))
- return;
- kfree(flush_node);
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- router_xm->flush_count--;
- rhashtable_remove_fast(&router_xm->flush_ht, &flush_node->ht_node,
- mlxsw_sp_router_xm_flush_ht_params);
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
-}
-
-static u32 mlxsw_sp_router_xm_flush_mask4(u8 prefix_len)
-{
- return GENMASK(31, 32 - prefix_len);
-}
-
-static unsigned char *mlxsw_sp_router_xm_flush_mask6(u8 prefix_len)
-{
- static unsigned char mask[sizeof(struct in6_addr)];
-
- memset(mask, 0, sizeof(mask));
- memset(mask, 0xff, prefix_len / 8);
- mask[prefix_len / 8] = GENMASK(8, 8 - prefix_len % 8);
- return mask;
-}
-
-#define MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT 15
-#define MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES 15
-#define MLXSW_SP_ROUTER_XM_CACHE_DELAY 50 /* usecs */
-#define MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT (MLXSW_SP_ROUTER_XM_CACHE_DELAY * 10)
-
-static void mlxsw_sp_router_xm_cache_flush_work(struct work_struct *work)
-{
- struct mlxsw_sp_router_xm_flush_info *flush_info;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
- char rlcmld_pl[MLXSW_REG_RLCMLD_LEN];
- enum mlxsw_reg_rlcmld_select select;
- struct mlxsw_sp *mlxsw_sp;
- u32 addr4;
- int err;
-
- flush_node = container_of(work, struct mlxsw_sp_router_xm_flush_node,
- dw.work);
- mlxsw_sp = flush_node->mlxsw_sp;
- flush_info = &flush_node->flush_info;
-
- if (flush_info->all) {
- char rlpmce_pl[MLXSW_REG_RLPMCE_LEN];
-
- mlxsw_reg_rlpmce_pack(rlpmce_pl, true, false);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlpmce),
- rlpmce_pl);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
-
- if (flush_node->reuses <
- MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES)
- /* Leaving flush-all mode. */
- mlxsw_sp->router->xm->flush_all_mode = false;
- goto out;
- }
-
- select = MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES;
-
- switch (flush_info->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- addr4 = *((u32 *) flush_info->addr);
- addr4 &= mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len);
-
- /* In case the flush prefix length is bigger than M-value,
- * it makes no sense to flush M entries. So just flush
- * the ML entries.
- */
- if (flush_info->prefix_len > MLXSW_SP_ROUTER_XM_M_VAL)
- select = MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES;
-
- mlxsw_reg_rlcmld_pack4(rlcmld_pl, select,
- flush_info->virtual_router, addr4,
- mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rlcmld_pack6(rlcmld_pl, select,
- flush_info->virtual_router, flush_info->addr,
- mlxsw_sp_router_xm_flush_mask6(flush_info->prefix_len));
- break;
- default:
- WARN_ON(true);
- goto out;
- }
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlcmld), rlcmld_pl);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
-
-out:
- mlxsw_sp_router_xm_cache_flush_node_destroy(mlxsw_sp, flush_node);
-}
-
-static bool
-mlxsw_sp_router_xm_cache_flush_may_cancel(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- unsigned long max_wait = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT);
- unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
-
- /* In case there is the same flushing work pending, check
- * if we can consolidate with it. We can do it up to MAX_WAIT.
- * Cancel the delayed work. If the work was still pending.
- */
- if (time_is_before_jiffies(flush_node->start_jiffies + max_wait - delay) &&
- cancel_delayed_work_sync(&flush_node->dw))
- return true;
- return false;
-}
-
-static int
-mlxsw_sp_router_xm_cache_flush_schedule(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_info *flush_info)
-{
- unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
- struct mlxsw_sp_router_xm_flush_info flush_all_info = {.all = true};
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
-
- /* Check if the queued number of flushes reached critical amount after
- * which it is better to just flush the whole cache.
- */
- if (router_xm->flush_count == MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT)
- /* Entering flush-all mode. */
- router_xm->flush_all_mode = true;
-
- if (router_xm->flush_all_mode)
- flush_info = &flush_all_info;
-
- rcu_read_lock();
- flush_node = rhashtable_lookup_fast(&router_xm->flush_ht, flush_info,
- mlxsw_sp_router_xm_flush_ht_params);
- /* Take a reference so the object is not freed before possible
- * delayed work cancel could be done.
- */
- mlxsw_sp_router_xm_cache_flush_node_hold(flush_node);
- rcu_read_unlock();
-
- if (flush_node && mlxsw_sp_router_xm_cache_flush_may_cancel(flush_node)) {
- flush_node->reuses++;
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
- /* Original work was within wait period and was canceled.
- * That means that the reference is still held and the
- * flush_node_put() call above did not free the flush_node.
- * Reschedule it with fresh delay.
- */
- goto schedule_work;
- } else {
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
- }
-
- flush_node = mlxsw_sp_router_xm_cache_flush_node_create(mlxsw_sp, flush_info);
- if (IS_ERR(flush_node))
- return PTR_ERR(flush_node);
- INIT_DELAYED_WORK(&flush_node->dw, mlxsw_sp_router_xm_cache_flush_work);
-
-schedule_work:
- mlxsw_core_schedule_dw(&flush_node->dw, delay);
- return 0;
-}
-
-static int
-mlxsw_sp_router_xm_ml_entry_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_fib_entry *fib_entry)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_ltable_node *ltable_node;
- u8 lvalue = fib_entry->lvalue;
- int err;
-
- ltable_node = mlxsw_sp_router_xm_ltable_node_get(router_xm,
- fib_entry->mindex);
- if (IS_ERR(ltable_node))
- return PTR_ERR(ltable_node);
- if (lvalue > ltable_node->current_lvalue) {
- /* The L-value is bigger then the one currently set, update. */
- ltable_node->current_lvalue = lvalue;
- err = mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp,
- ltable_node);
- if (err)
- goto err_lvalue_set;
-
- /* The L value for prefix/M is increased.
- * Therefore, all entries in M and ML caches matching
- * {prefix/M, proto, VR} need to be flushed. Set the flush
- * prefix length to M to achieve that.
- */
- fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
- }
-
- ltable_node->lvalue_ref[lvalue]++;
- fib_entry->ltable_node = ltable_node;
-
- return 0;
-
-err_lvalue_set:
- mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
- return err;
-}
-
-static void
-mlxsw_sp_router_xm_ml_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_fib_entry *fib_entry)
-{
- struct mlxsw_sp_router_xm_ltable_node *ltable_node =
- fib_entry->ltable_node;
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- u8 lvalue = fib_entry->lvalue;
-
- ltable_node->lvalue_ref[lvalue]--;
- if (lvalue == ltable_node->current_lvalue && lvalue &&
- !ltable_node->lvalue_ref[lvalue]) {
- u8 new_lvalue = lvalue - 1;
-
- /* Find the biggest L-value left out there. */
- while (new_lvalue > 0 && !ltable_node->lvalue_ref[lvalue])
- new_lvalue--;
-
- ltable_node->current_lvalue = new_lvalue;
- mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp, ltable_node);
-
- /* The L value for prefix/M is decreased.
- * Therefore, all entries in M and ML caches matching
- * {prefix/M, proto, VR} need to be flushed. Set the flush
- * prefix length to M to achieve that.
- */
- fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
- }
- mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
-}
-
-static int
-mlxsw_sp_router_xm_ml_entries_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int err;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- err = mlxsw_sp_router_xm_ml_entry_add(mlxsw_sp, fib_entry);
- if (err)
- goto rollback;
- }
- return 0;
-
-rollback:
- for (i--; i >= 0; i--) {
- fib_entry = op_ctx_xm->entries[i];
- mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
- }
- return err;
-}
-
-static void
-mlxsw_sp_router_xm_ml_entries_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
- }
-}
-
-static void
-mlxsw_sp_router_xm_ml_entries_cache_flush(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int err;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- err = mlxsw_sp_router_xm_cache_flush_schedule(mlxsw_sp,
- &fib_entry->flush_info);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
- }
-}
-
-static int mlxsw_sp_router_ll_xm_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- u8 num_rec;
- int err;
- int i;
-
- op_ctx_xm->trans_offset += op_ctx_xm->trans_item_len;
- op_ctx_xm->entries_count++;
-
- /* Check if bulking is possible and there is still room for another
- * FIB entry record. The size of 'trans_item_len' is either size of IPv4
- * command or size of IPv6 command. Not possible to mix those in a
- * single XMDR write.
- */
- if (op_ctx->bulk_ok &&
- op_ctx_xm->trans_offset + op_ctx_xm->trans_item_len <= MLXSW_REG_XMDR_TRANS_LEN) {
- if (postponed_for_bulk)
- *postponed_for_bulk = true;
- return 0;
- }
-
- if (op_ctx->event == FIB_EVENT_ENTRY_REPLACE) {
- /* The L-table is updated inside. It has to be done before
- * the prefix is inserted.
- */
- err = mlxsw_sp_router_xm_ml_entries_add(mlxsw_sp, op_ctx_xm);
- if (err)
- goto out;
- }
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xmdr), op_ctx_xm->xmdr_pl);
- if (err)
- goto out;
- num_rec = mlxsw_reg_xmdr_num_rec_get(op_ctx_xm->xmdr_pl);
- if (num_rec > op_ctx_xm->entries_count) {
- dev_err(mlxsw_sp->bus_info->dev, "Invalid XMDR number of records\n");
- err = -EIO;
- goto out;
- }
- for (i = 0; i < num_rec; i++) {
- if (!mlxsw_reg_xmdr_reply_vect_get(op_ctx_xm->xmdr_pl, i)) {
- dev_err(mlxsw_sp->bus_info->dev, "Command send over XMDR failed\n");
- err = -EIO;
- goto out;
- } else {
- fib_entry = op_ctx_xm->entries[i];
- fib_entry->committed = true;
- }
- }
-
- if (op_ctx->event == FIB_EVENT_ENTRY_DEL)
- /* The L-table is updated inside. It has to be done after
- * the prefix was removed.
- */
- mlxsw_sp_router_xm_ml_entries_del(mlxsw_sp, op_ctx_xm);
-
- /* At the very end, do the XLT cache flushing to evict stale
- * M and ML cache entries after prefixes were inserted/removed.
- */
- mlxsw_sp_router_xm_ml_entries_cache_flush(mlxsw_sp, op_ctx_xm);
-
-out:
- /* Next pack call is going to do reinitialization */
- op_ctx->initialized = false;
- return err;
-}
-
-static bool mlxsw_sp_router_ll_xm_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
-
- return fib_entry->committed;
-}
-
-const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops = {
- .init = mlxsw_sp_router_ll_xm_init,
- .ralta_write = mlxsw_sp_router_ll_xm_ralta_write,
- .ralst_write = mlxsw_sp_router_ll_xm_ralst_write,
- .raltb_write = mlxsw_sp_router_ll_xm_raltb_write,
- .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_xm),
- .fib_entry_priv_size = sizeof(struct mlxsw_sp_router_xm_fib_entry),
- .fib_entry_pack = mlxsw_sp_router_ll_xm_fib_entry_pack,
- .fib_entry_act_remote_pack = mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack,
- .fib_entry_act_local_pack = mlxsw_sp_router_ll_xm_fib_entry_act_local_pack,
- .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack,
- .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack,
- .fib_entry_commit = mlxsw_sp_router_ll_xm_fib_entry_commit,
- .fib_entry_is_committed = mlxsw_sp_router_ll_xm_fib_entry_is_committed,
-};
-
-#define MLXSW_SP_ROUTER_XM_MINDEX_SIZE (64 * 1024)
-
-int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm;
- char rxltm_pl[MLXSW_REG_RXLTM_LEN];
- char xltq_pl[MLXSW_REG_XLTQ_LEN];
- u32 mindex_size;
- u16 device_id;
- int err;
-
- if (!mlxsw_sp->bus_info->xm_exists)
- return 0;
-
- router_xm = kzalloc(sizeof(*router_xm), GFP_KERNEL);
- if (!router_xm)
- return -ENOMEM;
-
- mlxsw_reg_xltq_pack(xltq_pl);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(xltq), xltq_pl);
- if (err)
- goto err_xltq_query;
- mlxsw_reg_xltq_unpack(xltq_pl, &device_id, &router_xm->ipv4_supported,
- &router_xm->ipv6_supported, &router_xm->entries_size, &mindex_size);
-
- if (device_id != MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT) {
- dev_err(mlxsw_sp->bus_info->dev, "Invalid XM device id\n");
- err = -EINVAL;
- goto err_device_id_check;
- }
-
- if (mindex_size != MLXSW_SP_ROUTER_XM_MINDEX_SIZE) {
- dev_err(mlxsw_sp->bus_info->dev, "Unexpected M-index size\n");
- err = -EINVAL;
- goto err_mindex_size_check;
- }
-
- mlxsw_reg_rxltm_pack(rxltm_pl, mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV4],
- mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV6]);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxltm), rxltm_pl);
- if (err)
- goto err_rxltm_write;
-
- err = rhashtable_init(&router_xm->ltable_ht, &mlxsw_sp_router_xm_ltable_ht_params);
- if (err)
- goto err_ltable_ht_init;
-
- err = rhashtable_init(&router_xm->flush_ht, &mlxsw_sp_router_xm_flush_ht_params);
- if (err)
- goto err_flush_ht_init;
-
- mlxsw_sp->router->xm = router_xm;
- return 0;
-
-err_flush_ht_init:
- rhashtable_destroy(&router_xm->ltable_ht);
-err_ltable_ht_init:
-err_rxltm_write:
-err_mindex_size_check:
-err_device_id_check:
-err_xltq_query:
- kfree(router_xm);
- return err;
-}
-
-void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- if (!mlxsw_sp->bus_info->xm_exists)
- return;
-
- rhashtable_destroy(&router_xm->flush_ht);
- rhashtable_destroy(&router_xm->ltable_ht);
- kfree(router_xm);
-}
-
-bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- return router_xm && router_xm->ipv4_supported;
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index fe663b0ab708..39904dacf4f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -106,8 +106,8 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_init;
- devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
- mlxsw_sp_span_occ_get, mlxsw_sp);
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
+ mlxsw_sp_span_occ_get, mlxsw_sp);
INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
return 0;
@@ -123,7 +123,7 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
cancel_work_sync(&mlxsw_sp->span->work);
- devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a6d2e806cba9..4efccd942fb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -48,7 +48,8 @@ struct mlxsw_sp_bridge_device {
struct net_device *dev;
struct list_head list;
struct list_head ports_list;
- struct list_head mids_list;
+ struct list_head mdb_list;
+ struct rhashtable mdb_ht;
u8 vlan_enabled:1,
multicast_enabled:1,
mrouter:1;
@@ -102,6 +103,33 @@ struct mlxsw_sp_switchdev_ops {
void (*init)(struct mlxsw_sp *mlxsw_sp);
};
+struct mlxsw_sp_mdb_entry_key {
+ unsigned char addr[ETH_ALEN];
+ u16 fid;
+};
+
+struct mlxsw_sp_mdb_entry {
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_mdb_entry_key key;
+ u16 mid;
+ struct list_head ports_list;
+ u16 ports_count;
+};
+
+struct mlxsw_sp_mdb_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+ refcount_t refcount;
+ bool mrouter;
+};
+
+static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
+};
+
static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
@@ -109,12 +137,13 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port);
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device
- *bridge_device);
+ *bridge_device, bool mc_enabled);
static void
mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -237,6 +266,10 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
if (!bridge_device)
return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_mdb_rhashtable_init;
+
bridge_device->dev = br_dev;
bridge_device->vlan_enabled = vlan_enabled;
bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
@@ -254,7 +287,8 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
- INIT_LIST_HEAD(&bridge_device->mids_list);
+ INIT_LIST_HEAD(&bridge_device->mdb_list);
+
if (list_empty(&bridge->bridges_list))
mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
list_add(&bridge_device->list, &bridge->bridges_list);
@@ -273,6 +307,8 @@ err_vxlan_init:
list_del(&bridge_device->list);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
+ rhashtable_destroy(&bridge_device->mdb_ht);
+err_mdb_rhashtable_init:
kfree(bridge_device);
return ERR_PTR(err);
}
@@ -290,7 +326,8 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
WARN_ON(!list_empty(&bridge_device->ports_list));
- WARN_ON(!list_empty(&bridge_device->mids_list));
+ WARN_ON(!list_empty(&bridge_device->mdb_list));
+ rhashtable_destroy(&bridge_device->mdb_ht);
kfree(bridge_device);
}
@@ -643,6 +680,64 @@ err_port_bridge_vlan_flood_set:
}
static int
+mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type, local_port, member);
+ if (err)
+ goto err_fid_flood_set;
+ }
+
+ return 0;
+
+err_fid_flood_set:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &bridge_vlan->port_vlan_list,
+ list) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
+ local_port, !member);
+ }
+
+ return err;
+}
+
+static int
+mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ member);
+ if (err)
+ goto err_bridge_vlans_flood_set;
+ }
+
+ return 0;
+
+err_bridge_vlans_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ !member);
+ return err;
+}
+
+static int
mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan,
bool set)
@@ -813,6 +908,9 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port)
return 0;
+ mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
+ is_port_mrouter);
+
if (!bridge_port->bridge_device->multicast_enabled)
goto out;
@@ -822,8 +920,6 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
- is_port_mrouter);
out:
bridge_port->mrouter = is_port_mrouter;
return 0;
@@ -842,6 +938,7 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *orig_dev,
bool mc_disabled)
{
+ enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
@@ -854,43 +951,184 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_device)
return 0;
- if (bridge_device->multicast_enabled != !mc_disabled) {
- bridge_device->multicast_enabled = !mc_disabled;
- mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
- bridge_device);
- }
+ if (bridge_device->multicast_enabled == !mc_disabled)
+ return 0;
+
+ bridge_device->multicast_enabled = !mc_disabled;
+ err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ !mc_disabled);
+ if (err)
+ goto err_mc_enable_sync;
list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
- enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
bool member = mlxsw_sp_mc_flood(bridge_port);
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
- bridge_port,
- packet_type, member);
+ err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
+ packet_type,
+ member);
if (err)
- return err;
+ goto err_flood_table_set;
}
- bridge_device->multicast_enabled = !mc_disabled;
-
return 0;
+
+err_flood_table_set:
+ list_for_each_entry_continue_reverse(bridge_port,
+ &bridge_device->ports_list, list) {
+ bool member = mlxsw_sp_mc_flood(bridge_port);
+
+ mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
+ !member);
+ }
+ mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ mc_disabled);
+err_mc_enable_sync:
+ bridge_device->multicast_enabled = mc_disabled;
+ return err;
+}
+
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
+ if (mdb_entry_port->local_port == local_port)
+ return mdb_entry_port;
+ }
+
+ return NULL;
}
-static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
- u16 mid_idx, bool add)
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
{
- char *smid2_pl;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
int err;
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count++;
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
- mlxsw_sp_router_port(mlxsw_sp), add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+ mdb_entry->ports_count++;
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port, bool force)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count--;
+ return;
+ }
+
+ mdb_entry->ports_count--;
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+}
+
+static __always_unused struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ int err;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (!mdb_entry_port->mrouter)
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ mdb_entry_port->mrouter = true;
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static __always_unused void
+mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!mdb_entry_port->mrouter)
+ return;
+
+ mdb_entry_port->mrouter = false;
+ if (!refcount_dec_and_test(&mdb_entry_port->refcount))
+ return;
+
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
}
static void
@@ -898,10 +1136,17 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device,
bool add)
{
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- list_for_each_entry(mid, &bridge_device->mids_list, list)
- mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
+ }
}
static int
@@ -1127,14 +1372,13 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
struct mlxsw_sp_bridge_vlan *bridge_vlan;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid = mlxsw_sp_port_vlan->vid;
- bool last_port, last_vlan;
+ bool last_port;
if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
- last_vlan = list_is_singular(&bridge_port->vlans_list);
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
last_port = list_is_singular(&bridge_vlan->port_vlan_list);
@@ -1146,8 +1390,9 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
bridge_port,
mlxsw_sp_fid_index(fid));
- if (last_vlan)
- mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
+
+ mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
+ mlxsw_sp_fid_index(fid));
mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
@@ -1436,7 +1681,8 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
}
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
+ const char *mac, u16 fid, u16 vid,
+ bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
{
@@ -1449,7 +1695,8 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
+ local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1464,18 +1711,18 @@ out:
}
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
- bool dynamic)
+ const char *mac, u16 fid, u16 vid,
+ bool adding, bool dynamic)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
- MLXSW_REG_SFD_REC_ACTION_NOP,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
+ adding, MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
@@ -1537,7 +1784,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
bridge_port->system_port,
- fdb_info->addr, fid_index,
+ fdb_info->addr, fid_index, vid,
adding, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
@@ -1546,8 +1793,9 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
vid, adding, false);
}
-static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
- u16 fid, u16 mid_idx, bool adding)
+static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mdb_entry *mdb_entry,
+ bool adding)
{
char *sfd_pl;
u8 num_rec;
@@ -1558,8 +1806,9 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
+ mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
+ mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mdb_entry->mid);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1573,79 +1822,17 @@ out:
return err;
}
-static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
- long *ports_bitmap,
- bool set_router_port)
-{
- char *smid2_pl;
- int err, i;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false);
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
- if (mlxsw_sp->ports[i])
- mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
- }
-
- mlxsw_reg_smid2_port_mask_set(smid2_pl,
- mlxsw_sp_router_port(mlxsw_sp), 1);
-
- for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
- mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
-
- mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
- set_router_port);
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 mid_idx, bool add)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *smid2_pl;
- int err;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
-{
- struct mlxsw_sp_mid *mid;
-
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
- return mid;
- }
- return NULL;
-}
-
static void
mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
- unsigned long *ports_bitmap)
+ struct mlxsw_sp_ports_bitmap *ports_bm)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u64 max_lag_members, i;
int lag_id;
if (!bridge_port->lagged) {
- set_bit(bridge_port->system_port, ports_bitmap);
+ set_bit(bridge_port->system_port, ports_bm->bitmap);
} else {
max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_LAG_MEMBERS);
@@ -1655,13 +1842,13 @@ mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
lag_id, i);
if (mlxsw_sp_port)
set_bit(mlxsw_sp_port->local_port,
- ports_bitmap);
+ ports_bm->bitmap);
}
}
}
static void
-mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
+mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp *mlxsw_sp)
{
@@ -1671,116 +1858,226 @@ mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
if (bridge_port->mrouter) {
mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
bridge_port,
- flood_bitmap);
+ flood_bm);
}
}
}
-static bool
-mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid,
- struct mlxsw_sp_bridge_device *bridge_device)
+static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ unsigned int nbits = ports_bm->nbits;
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, nbits) {
+ mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
+ mdb_entry,
+ i);
+ if (IS_ERR(mdb_entry_port)) {
+ nbits = i;
+ goto err_mrouter_port_get;
+ }
+ }
+
+ return 0;
+
+err_mrouter_port_get:
+ for_each_set_bit(i, ports_bm->bitmap, nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+ return PTR_ERR(mdb_entry_port);
+}
+
+static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+}
+
+static int
+mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
{
- long *flood_bitmap;
- int num_of_ports;
- u16 mid_idx;
+ struct mlxsw_sp_ports_bitmap ports_bm;
int err;
- mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
- MLXSW_SP_MID_MAX);
- if (mid_idx == MLXSW_SP_MID_MAX)
- return false;
+ err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
+ if (err)
+ return err;
- num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
- if (!flood_bitmap)
- return false;
+ mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
+
+ if (add)
+ err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
+ mdb_entry);
+ else
+ mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
+
+ mlxsw_sp_port_bitmap_fini(&ports_bm);
+ return err;
+}
- bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
- mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
- mid->mid = mid_idx;
- err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
- bridge_device->mrouter);
- bitmap_free(flood_bitmap);
+ ether_addr_copy(mdb_entry->key.addr, addr);
+ mdb_entry->key.fid = fid;
+ err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
if (err)
- return false;
+ goto err_pgt_mid_alloc;
+
+ INIT_LIST_HEAD(&mdb_entry->ports_list);
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
- true);
+ err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
+ true);
if (err)
- return false;
+ goto err_mdb_mrouters_set;
- set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = true;
- return true;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port)) {
+ err = PTR_ERR(mdb_entry_port);
+ goto err_mdb_entry_port_get;
+ }
+
+ if (bridge_device->multicast_enabled) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
+ if (err)
+ goto err_mdb_entry_write;
+ }
+
+ err = rhashtable_insert_fast(&bridge_device->mdb_ht,
+ &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
+
+ return mdb_entry;
+
+err_rhashtable_insert:
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+err_mdb_entry_write:
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
+err_mdb_entry_port_get:
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+err_mdb_mrouters_set:
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+err_pgt_mid_alloc:
+ kfree(mdb_entry);
+ return ERR_PTR(err);
}
-static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ u16 local_port, bool force)
{
- if (!mid->in_hw)
- return 0;
-
- clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = false;
- return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
- false);
+ list_del(&mdb_entry->list);
+ rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+ WARN_ON(!list_empty(&mdb_entry->ports_list));
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+ kfree(mdb_entry);
}
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
{
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- mid = kzalloc(sizeof(*mid), GFP_KERNEL);
- if (!mid)
- return NULL;
+ ether_addr_copy(key.addr, addr);
+ key.fid = fid;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (mdb_entry) {
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
- GFP_KERNEL);
- if (!mid->ports_in_mid)
- goto err_ports_in_mid_alloc;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
+ mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port))
+ return ERR_CAST(mdb_entry_port);
- ether_addr_copy(mid->addr, addr);
- mid->fid = fid;
- mid->in_hw = false;
+ return mdb_entry;
+ }
- if (!bridge_device->multicast_enabled)
- goto out;
+ return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
+ local_port);
+}
- if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
- goto err_write_mdb_entry;
+static bool
+mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_mdb_entry_port *removed_entry_port,
+ bool force)
+{
+ if (mdb_entry->ports_count > 1)
+ return false;
-out:
- list_add_tail(&mid->list, &bridge_device->mids_list);
- return mid;
+ if (force)
+ return true;
-err_write_mdb_entry:
- bitmap_free(mid->ports_in_mid);
-err_ports_in_mid_alloc:
- kfree(mid);
- return NULL;
+ if (!removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 1)
+ return false;
+
+ if (removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 2)
+ return false;
+
+ return true;
}
-static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
+ bool force)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err = 0;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
- if (bitmap_empty(mid->ports_in_mid,
- mlxsw_core_max_ports(mlxsw_sp->core))) {
- err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
- list_del(&mid->list);
- bitmap_free(mid->ports_in_mid);
- kfree(mid);
- }
- return err;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ /* Avoid a temporary situation in which the MDB entry points to an empty
+ * PGT entry, as otherwise packets will be temporarily dropped instead
+ * of being flooded. Instead, in this situation, call
+ * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
+ * then releases the PGT entry.
+ */
+ if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
+ mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
+ local_port, force);
+ else
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
+ force);
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1789,12 +2086,10 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
- int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
@@ -1809,54 +2104,35 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
- fid_index);
- if (!mid) {
- netdev_err(dev, "Unable to allocate MC group\n");
- return -ENOMEM;
- }
- }
- set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
-
- if (!bridge_device->multicast_enabled)
- return 0;
-
- if (bridge_port->mrouter)
- return 0;
-
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
- if (err) {
- netdev_err(dev, "Unable to set SMID\n");
- goto err_out;
- }
+ mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
+ mdb->addr, fid_index,
+ mlxsw_sp_port->local_port);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
return 0;
-
-err_out:
- mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- return err;
}
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_device
- *bridge_device)
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool mc_enabled)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_mid *mid;
- bool mc_enabled;
-
- mc_enabled = bridge_device->multicast_enabled;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (mc_enabled)
- mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
- bridge_device);
- else
- mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
+ if (err)
+ goto err_mdb_entry_write;
}
+ return 0;
+
+err_mdb_entry_write:
+ list_for_each_entry_continue_reverse(mdb_entry,
+ &bridge_device->mdb_list, list)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
+ return err;
}
static void
@@ -1864,14 +2140,20 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
bool add)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_port->local_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
}
}
@@ -1949,28 +2231,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int
-__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_mid *mid)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- if (bridge_port->bridge_device->multicast_enabled &&
- !bridge_port->mrouter) {
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- if (err)
- netdev_err(dev, "Unable to remove port from SMID\n");
- }
-
- err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- if (err)
- netdev_err(dev, "Unable to remove MC SFD\n");
-
- return err;
-}
-
static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -1980,7 +2240,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
@@ -1996,32 +2257,44 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
+ ether_addr_copy(key.addr, mdb->addr);
+ key.fid = fid_index;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (!mdb_entry) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
}
- return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ mlxsw_sp_port->local_port, false);
+ return 0;
}
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port)
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid, *tmp;
+ struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
+ u16 local_port = mlxsw_sp_port->local_port;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
- if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
- __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
- mid);
- } else if (bridge_device->multicast_enabled &&
- bridge_port->mrouter) {
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- }
+ list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
+ list) {
+ if (mdb_entry->key.fid != fid_index)
+ continue;
+
+ if (bridge_port->mrouter)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
+ mdb_entry,
+ local_port);
+
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ local_port, true);
}
}
@@ -2633,10 +2906,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
+ u16 local_port, vid, fid, evid = 0;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
- u16 local_port;
- u16 vid, fid;
bool do_notification = true;
int err;
@@ -2667,9 +2939,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ evid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
- err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
+ err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
adding, true);
if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
@@ -2729,8 +3002,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
- lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
- mlxsw_sp_port_vlan->vid : 0;
+ lag_vid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index ed4d0d3448f3..f4bfdb6dab9c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -953,16 +953,16 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
MIRROR),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ARPBC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
MIRROR),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ARPUC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
@@ -1298,8 +1298,8 @@ static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->policers_count; i++) {
policer_item = &trap->policer_items_arr[i];
- err = devlink_trap_policers_register(devlink,
- &policer_item->policer, 1);
+ err = devl_trap_policers_register(devlink,
+ &policer_item->policer, 1);
if (err)
goto err_trap_policer_register;
}
@@ -1309,8 +1309,8 @@ static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
err_trap_policer_register:
for (i--; i >= 0; i--) {
policer_item = &trap->policer_items_arr[i];
- devlink_trap_policers_unregister(devlink,
- &policer_item->policer, 1);
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
}
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
return err;
@@ -1325,8 +1325,8 @@ static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
for (i = trap->policers_count - 1; i >= 0; i--) {
policer_item = &trap->policer_items_arr[i];
- devlink_trap_policers_unregister(devlink,
- &policer_item->policer, 1);
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
}
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
}
@@ -1381,8 +1381,7 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->groups_count; i++) {
group_item = &trap->group_items_arr[i];
- err = devlink_trap_groups_register(devlink, &group_item->group,
- 1);
+ err = devl_trap_groups_register(devlink, &group_item->group, 1);
if (err)
goto err_trap_group_register;
}
@@ -1392,7 +1391,7 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
err_trap_group_register:
for (i--; i >= 0; i--) {
group_item = &trap->group_items_arr[i];
- devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
}
mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
return err;
@@ -1408,7 +1407,7 @@ static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_group_item *group_item;
group_item = &trap->group_items_arr[i];
- devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
}
mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
}
@@ -1469,8 +1468,8 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->traps_count; i++) {
trap_item = &trap->trap_items_arr[i];
- err = devlink_traps_register(devlink, &trap_item->trap, 1,
- mlxsw_sp);
+ err = devl_traps_register(devlink, &trap_item->trap, 1,
+ mlxsw_sp);
if (err)
goto err_trap_register;
}
@@ -1480,7 +1479,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
err_trap_register:
for (i--; i >= 0; i--) {
trap_item = &trap->trap_items_arr[i];
- devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
}
mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
return err;
@@ -1496,7 +1495,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_item *trap_item;
trap_item = &trap->trap_items_arr[i];
- devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
}
mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index d888498aed33..8da169663bda 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -27,8 +27,6 @@ enum {
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
- MLXSW_TRAP_ID_ARPBC = 0x50,
- MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -71,6 +69,8 @@ enum {
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
+ MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index c8fe8b31f07b..b1c74e6cb012 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -155,8 +155,8 @@ static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
return 0;
}
-static int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
- u16 timeout)
+int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
+ u16 timeout)
{
u16 timeout_cnt = 0;
u32 val;
@@ -192,7 +192,7 @@ static int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
return 0;
}
-static void lan743x_hs_syslock_release(struct lan743x_adapter *adapter)
+void lan743x_hs_syslock_release(struct lan743x_adapter *adapter)
{
u32 val;
@@ -1149,7 +1149,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+ if (adapter->is_pci11x1x)
+ wol->supported |= WAKE_MAGICSECURE;
+
wol->wolopts |= adapter->wolopts;
+ if (adapter->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, adapter->sopass, sizeof(wol->sopass));
}
static int lan743x_ethtool_set_wol(struct net_device *netdev,
@@ -1170,6 +1175,13 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
adapter->wolopts |= WAKE_PHY;
if (wol->wolopts & WAKE_ARP)
adapter->wolopts |= WAKE_ARP;
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ wol->wolopts & WAKE_MAGIC) {
+ memcpy(adapter->sopass, wol->sopass, sizeof(wol->sopass));
+ adapter->wolopts |= WAKE_MAGICSECURE;
+ } else {
+ memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
+ }
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
@@ -1178,6 +1190,49 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
}
#endif /* CONFIG_PM */
+static void lan743x_common_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ u32 *rb = p;
+
+ memset(p, 0, (MAX_LAN743X_ETH_REGS * sizeof(u32)));
+
+ rb[ETH_PRIV_FLAGS] = adapter->flags;
+ rb[ETH_ID_REV] = lan743x_csr_read(adapter, ID_REV);
+ rb[ETH_FPGA_REV] = lan743x_csr_read(adapter, FPGA_REV);
+ rb[ETH_STRAP_READ] = lan743x_csr_read(adapter, STRAP_READ);
+ rb[ETH_INT_STS] = lan743x_csr_read(adapter, INT_STS);
+ rb[ETH_HW_CFG] = lan743x_csr_read(adapter, HW_CFG);
+ rb[ETH_PMT_CTL] = lan743x_csr_read(adapter, PMT_CTL);
+ rb[ETH_E2P_CMD] = lan743x_csr_read(adapter, E2P_CMD);
+ rb[ETH_E2P_DATA] = lan743x_csr_read(adapter, E2P_DATA);
+ rb[ETH_MAC_CR] = lan743x_csr_read(adapter, MAC_CR);
+ rb[ETH_MAC_RX] = lan743x_csr_read(adapter, MAC_RX);
+ rb[ETH_MAC_TX] = lan743x_csr_read(adapter, MAC_TX);
+ rb[ETH_FLOW] = lan743x_csr_read(adapter, MAC_FLOW);
+ rb[ETH_MII_ACC] = lan743x_csr_read(adapter, MAC_MII_ACC);
+ rb[ETH_MII_DATA] = lan743x_csr_read(adapter, MAC_MII_DATA);
+ rb[ETH_EEE_TX_LPI_REQ_DLY] = lan743x_csr_read(adapter,
+ MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ rb[ETH_WUCSR] = lan743x_csr_read(adapter, MAC_WUCSR);
+ rb[ETH_WK_SRC] = lan743x_csr_read(adapter, MAC_WK_SRC);
+}
+
+static int lan743x_get_regs_len(struct net_device *dev)
+{
+ return MAX_LAN743X_ETH_REGS * sizeof(u32);
+}
+
+static void lan743x_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+{
+ regs->version = LAN743X_ETH_REG_VERSION;
+
+ lan743x_common_regs(dev, regs, p);
+}
+
const struct ethtool_ops lan743x_ethtool_ops = {
.get_drvinfo = lan743x_ethtool_get_drvinfo,
.get_msglevel = lan743x_ethtool_get_msglevel,
@@ -1202,6 +1257,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.set_eee = lan743x_ethtool_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = lan743x_get_regs_len,
+ .get_regs = lan743x_get_regs,
#ifdef CONFIG_PM
.get_wol = lan743x_ethtool_get_wol,
.set_wol = lan743x_ethtool_set_wol,
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h
index d0d11a777a58..7f5996a52488 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.h
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h
@@ -6,6 +6,32 @@
#include "linux/ethtool.h"
+#define LAN743X_ETH_REG_VERSION 1
+
+enum {
+ ETH_PRIV_FLAGS,
+ ETH_ID_REV,
+ ETH_FPGA_REV,
+ ETH_STRAP_READ,
+ ETH_INT_STS,
+ ETH_HW_CFG,
+ ETH_PMT_CTL,
+ ETH_E2P_CMD,
+ ETH_E2P_DATA,
+ ETH_MAC_CR,
+ ETH_MAC_RX,
+ ETH_MAC_TX,
+ ETH_FLOW,
+ ETH_MII_ACC,
+ ETH_MII_DATA,
+ ETH_EEE_TX_LPI_REQ_DLY,
+ ETH_WUCSR,
+ ETH_WK_SRC,
+
+ /* Add new registers above */
+ MAX_LAN743X_ETH_REGS
+};
+
extern const struct ethtool_ops lan743x_ethtool_ops;
#endif /* _LAN743X_ETHTOOL_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index af81236b4b4e..a9a1dea6d731 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -22,20 +22,36 @@
#define MMD_ACCESS_WRITE 1
#define MMD_ACCESS_READ 2
#define MMD_ACCESS_READ_INC 3
+#define PCS_POWER_STATE_DOWN 0x6
+#define PCS_POWER_STATE_UP 0x4
static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
{
u32 chip_rev;
+ u32 cfg_load;
+ u32 hw_cfg;
u32 strap;
+ int ret;
+
+ /* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */
+ ret = lan743x_hs_syslock_acquire(adapter, 100);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Sys Lock acquire failed ret:%d\n", ret);
+ return;
+ }
- strap = lan743x_csr_read(adapter, STRAP_READ);
- if (strap & STRAP_READ_USE_SGMII_EN_) {
+ cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG);
+ lan743x_hs_syslock_release(adapter);
+ hw_cfg = lan743x_csr_read(adapter, HW_CFG);
+
+ if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ ||
+ hw_cfg & HW_CFG_RST_PROTECT_) {
+ strap = lan743x_csr_read(adapter, STRAP_READ);
if (strap & STRAP_READ_SGMII_EN_)
adapter->is_sgmii_en = true;
else
adapter->is_sgmii_en = false;
- netif_dbg(adapter, drv, adapter->netdev,
- "STRAP_READ: 0x%08X\n", strap);
} else {
chip_rev = lan743x_csr_read(adapter, FPGA_REV);
if (chip_rev) {
@@ -43,12 +59,12 @@ static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
adapter->is_sgmii_en = true;
else
adapter->is_sgmii_en = false;
- netif_dbg(adapter, drv, adapter->netdev,
- "FPGA_REV: 0x%08X\n", chip_rev);
} else {
adapter->is_sgmii_en = false;
}
}
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis");
}
static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
@@ -909,6 +925,318 @@ static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
return ret;
}
+static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
+{
+ u32 data;
+ int ret;
+
+ ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data,
+ !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "%s: error %d sgmii wait timeout\n", __func__, ret);
+
+ return ret;
+}
+
+static int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
+{
+ u32 mmd_access;
+ int ret;
+ u32 val;
+
+ if (mmd > 31) {
+ netif_err(adapter, probe, adapter->netdev,
+ "%s mmd should <= 31\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&adapter->sgmii_rw_lock);
+ /* Load Register Address */
+ mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
+ mmd_access |= (addr | SGMII_ACC_SGMII_BZY_);
+ lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
+ ret = lan743x_sgmii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ goto sgmii_unlock;
+
+ val = lan743x_csr_read(adapter, SGMII_DATA);
+ ret = (int)(val & SGMII_DATA_MASK_);
+
+sgmii_unlock:
+ mutex_unlock(&adapter->sgmii_rw_lock);
+
+ return ret;
+}
+
+static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
+ u8 mmd, u16 addr, u16 val)
+{
+ u32 mmd_access;
+ int ret;
+
+ if (mmd > 31) {
+ netif_err(adapter, probe, adapter->netdev,
+ "%s mmd should <= 31\n", __func__);
+ return -EINVAL;
+ }
+ mutex_lock(&adapter->sgmii_rw_lock);
+ /* Load Register Data */
+ lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_));
+ /* Load Register Address */
+ mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
+ mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_);
+ lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
+ ret = lan743x_sgmii_wait_till_not_busy(adapter);
+ mutex_unlock(&adapter->sgmii_rw_lock);
+
+ return ret;
+}
+
+static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
+ u16 baud)
+{
+ int mpllctrl0;
+ int mpllctrl1;
+ int miscctrl1;
+ int ret;
+
+ mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL0);
+ if (mpllctrl0 < 0)
+ return mpllctrl0;
+
+ mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_;
+ if (baud == VR_MII_BAUD_RATE_1P25GBPS) {
+ mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100;
+ /* mpll_baud_clk/4 */
+ miscctrl1 = 0xA;
+ } else {
+ mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125;
+ /* mpll_baud_clk/2 */
+ miscctrl1 = 0x5;
+ }
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1);
+ if (ret < 0)
+ return ret;
+
+ return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MISC_CTRL1, miscctrl1);
+}
+
+static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
+ bool enable)
+{
+ if (enable)
+ return lan743x_sgmii_mpll_set(adapter,
+ VR_MII_BAUD_RATE_3P125GBPS);
+ else
+ return lan743x_sgmii_mpll_set(adapter,
+ VR_MII_BAUD_RATE_1P25GBPS);
+}
+
+static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
+ bool *status)
+{
+ int ret;
+
+ ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
+ ret == VR_MII_MPLL_MULTIPLIER_50)
+ *status = true;
+ else
+ *status = false;
+
+ return 0;
+}
+
+static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
+{
+ enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
+ int mii_ctrl;
+ int dgt_ctrl;
+ int an_ctrl;
+ int ret;
+
+ if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE)
+ /* Switch to 2.5 Gbps */
+ ret = lan743x_sgmii_2_5G_mode_set(adapter, true);
+ else
+ /* Switch to 10/100/1000 Mbps clock */
+ ret = lan743x_sgmii_2_5G_mode_set(adapter, false);
+ if (ret < 0)
+ return ret;
+
+ /* Enable SGMII Auto NEG */
+ mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
+ if (mii_ctrl < 0)
+ return mii_ctrl;
+
+ an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL);
+ if (an_ctrl < 0)
+ return an_ctrl;
+
+ dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_CTRL1);
+ if (dgt_ctrl < 0)
+ return dgt_ctrl;
+
+ if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) {
+ mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100);
+ mii_ctrl |= BMCR_SPEED1000;
+ dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
+ dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
+ /* In order for Auto-Negotiation to operate properly at
+ * 2.5 Gbps the 1.6ms link timer values must be adjusted
+ * The VR_MII_LINK_TIMER_CTRL Register must be set to
+ * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the
+ * VR_MII_DIG_CTRL1 Register set to 1
+ */
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_LINK_TIMER_CTRL, 0x7A1);
+ if (ret < 0)
+ return ret;
+ } else {
+ mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_;
+ dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
+ dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
+ }
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR,
+ mii_ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_CTRL1, dgt_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_AN_CTRL, an_ctrl);
+}
+
+static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
+{
+ u8 wait_cnt = 0;
+ u32 dig_sts;
+
+ do {
+ dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_STS);
+ if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >>
+ VR_MII_DIG_STS_PSEQ_STATE_POS_) == state)
+ break;
+ usleep_range(1000, 2000);
+ } while (wait_cnt++ < 10);
+
+ if (wait_cnt >= 10)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct phy_device *phydev = netdev->phydev;
+ enum lan743x_sgmii_lsd lsd = POWER_DOWN;
+ int mii_ctl;
+ bool status;
+ int ret;
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
+ lsd = LINK_2500_MASTER;
+ else
+ lsd = LINK_2500_SLAVE;
+ break;
+ case SPEED_1000:
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
+ lsd = LINK_1000_MASTER;
+ else
+ lsd = LINK_1000_SLAVE;
+ break;
+ case SPEED_100:
+ if (phydev->duplex)
+ lsd = LINK_100FD;
+ else
+ lsd = LINK_100HD;
+ break;
+ case SPEED_10:
+ if (phydev->duplex)
+ lsd = LINK_10FD;
+ else
+ lsd = LINK_10HD;
+ break;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "Invalid speed %d\n", phydev->speed);
+ return -EINVAL;
+ }
+
+ adapter->sgmii_lsd = lsd;
+ ret = lan743x_sgmii_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d SGMII cfg failed\n", ret);
+ return ret;
+ }
+
+ ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "erro %d SGMII get mode failed\n", ret);
+ return ret;
+ }
+
+ if (status)
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII 2.5G mode enable\n");
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII 1G mode enable\n");
+
+ /* SGMII/1000/2500BASE-X PCS power down */
+ mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
+ if (mii_ctl < 0)
+ return mii_ctl;
+
+ mii_ctl |= BMCR_PDOWN;
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN);
+ if (ret < 0)
+ return ret;
+
+ /* SGMII/1000/2500BASE-X PCS power up */
+ mii_ctl &= ~BMCR_PDOWN;
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
u8 *addr)
{
@@ -1124,6 +1452,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
data |= MAC_CR_CFG_H_;
data &= ~MAC_CR_CFG_L_;
break;
+ case SPEED_2500:
+ data |= MAC_CR_CFG_H_;
+ data |= MAC_CR_CFG_L_;
+ break;
}
lan743x_csr_write(adapter, MAC_CR, data);
@@ -1135,6 +1467,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
lan743x_phy_update_flowcontrol(adapter, local_advertisement,
remote_advertisement);
lan743x_ptp_update_latency(adapter, phydev->speed);
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
+ lan743x_sgmii_config(adapter);
}
}
@@ -2875,6 +3211,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
pci11x1x_strap_get_status(adapter);
spin_lock_init(&adapter->eth_syslock_spinlock);
+ mutex_init(&adapter->sgmii_rw_lock);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
@@ -3124,6 +3461,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
const u8 ipv6_multicast[3] = { 0x33, 0x33 };
const u8 arp_type[2] = { 0x08, 0x06 };
int mask_index;
+ u32 sopass;
u32 pmtctl;
u32 wucsr;
u32 macrx;
@@ -3218,6 +3556,14 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
}
+ if (adapter->wolopts & WAKE_MAGICSECURE) {
+ sopass = *(u32 *)adapter->sopass;
+ lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass);
+ sopass = *(u16 *)&adapter->sopass[4];
+ lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass);
+ wucsr |= MAC_MP_SO_EN_;
+ }
+
lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
lan743x_csr_write(adapter, PMT_CTL, pmtctl);
lan743x_csr_write(adapter, MAC_RX, macrx);
@@ -3228,6 +3574,7 @@ static int lan743x_pm_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
lan743x_pcidev_shutdown(pdev);
@@ -3239,6 +3586,18 @@ static int lan743x_pm_suspend(struct device *dev)
if (adapter->wolopts)
lan743x_pm_set_wol(adapter);
+ if (adapter->is_pci11x1x) {
+ /* Save HW_CFG to config again in PM resume */
+ data = lan743x_csr_read(adapter, HW_CFG);
+ adapter->hw_cfg = data;
+ data |= (HW_CFG_RST_PROTECT_PCIE_ |
+ HW_CFG_D3_RESET_DIS_ |
+ HW_CFG_D3_VAUX_OVR_ |
+ HW_CFG_HOT_RESET_DIS_ |
+ HW_CFG_RST_PROTECT_);
+ lan743x_csr_write(adapter, HW_CFG, data);
+ }
+
/* Host sets PME_En, put D3hot */
return pci_prepare_to_sleep(pdev);
}
@@ -3254,6 +3613,10 @@ static int lan743x_pm_resume(struct device *dev)
pci_restore_state(pdev);
pci_save_state(pdev);
+ /* Restore HW_CFG that was saved during pm suspend */
+ if (adapter->is_pci11x1x)
+ lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg);
+
ret = lan743x_hardware_init(adapter, pdev);
if (ret) {
netif_err(adapter, probe, adapter->netdev,
@@ -3270,6 +3633,9 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_info(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 1ca5f3216403..72adae4f2aa0 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -43,6 +43,11 @@
#define STRAP_READ_ADV_PM_DISABLE_ BIT(0)
#define HW_CFG (0x010)
+#define HW_CFG_RST_PROTECT_PCIE_ BIT(19)
+#define HW_CFG_HOT_RESET_DIS_ BIT(15)
+#define HW_CFG_D3_VAUX_OVR_ BIT(14)
+#define HW_CFG_D3_RESET_DIS_ BIT(13)
+#define HW_CFG_RST_PROTECT_ BIT(12)
#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0)
#define HW_CFG_EE_OTP_RELOAD_ BIT(4)
#define HW_CFG_LRST_ BIT(1)
@@ -92,6 +97,11 @@
#define CONFIG_REG_ADDR_BASE (0x0000)
#define ETH_EEPROM_REG_ADDR_BASE (0x0E00)
#define ETH_OTP_REG_ADDR_BASE (0x1000)
+#define GEN_SYS_CONFIG_LOAD_STARTED_REG (0x0078)
+#define ETH_SYS_CONFIG_LOAD_STARTED_REG (ETH_SYS_REG_ADDR_BASE + \
+ CONFIG_REG_ADDR_BASE + \
+ GEN_SYS_CONFIG_LOAD_STARTED_REG)
+#define GEN_SYS_LOAD_STARTED_REG_ETH_ BIT(4)
#define SYS_LOCK_REG (0x00A0)
#define SYS_LOCK_REG_MAIN_LOCK_ BIT(7)
#define SYS_LOCK_REG_GEN_PERI_LOCK_ BIT(5)
@@ -214,6 +224,7 @@
#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
#define MAC_WUCSR (0x140)
+#define MAC_MP_SO_EN_ BIT(21)
#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
#define MAC_WUCSR_PFDA_EN_ BIT(3)
#define MAC_WUCSR_WAKE_EN_ BIT(2)
@@ -221,6 +232,8 @@
#define MAC_WUCSR_BCST_EN_ BIT(0)
#define MAC_WK_SRC (0x144)
+#define MAC_MP_SO_HI (0x148)
+#define MAC_MP_SO_LO (0x14C)
#define MAC_WUF_CFG0 (0x150)
#define MAC_NUM_OF_WUF_CFG (32)
@@ -280,11 +293,82 @@
#define MAC_WUCSR2 (0x600)
+#define SGMII_ACC (0x720)
+#define SGMII_ACC_SGMII_BZY_ BIT(31)
+#define SGMII_ACC_SGMII_WR_ BIT(30)
+#define SGMII_ACC_SGMII_MMD_SHIFT_ (16)
+#define SGMII_ACC_SGMII_MMD_MASK_ GENMASK(20, 16)
+#define SGMII_ACC_SGMII_MMD_VSR_ BIT(15)
+#define SGMII_ACC_SGMII_ADDR_SHIFT_ (0)
+#define SGMII_ACC_SGMII_ADDR_MASK_ GENMASK(15, 0)
+#define SGMII_DATA (0x724)
+#define SGMII_DATA_SHIFT_ (0)
+#define SGMII_DATA_MASK_ GENMASK(15, 0)
#define SGMII_CTL (0x728)
#define SGMII_CTL_SGMII_ENABLE_ BIT(31)
#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+/* Vendor Specific SGMII MMD details */
+#define SR_VSMMD_PCS_ID1 0x0004
+#define SR_VSMMD_PCS_ID2 0x0005
+#define SR_VSMMD_STS 0x0008
+#define SR_VSMMD_CTRL 0x0009
+
+#define VR_MII_DIG_CTRL1 0x8000
+#define VR_MII_DIG_CTRL1_VR_RST_ BIT(15)
+#define VR_MII_DIG_CTRL1_R2TLBE_ BIT(14)
+#define VR_MII_DIG_CTRL1_EN_VSMMD1_ BIT(13)
+#define VR_MII_DIG_CTRL1_CS_EN_ BIT(10)
+#define VR_MII_DIG_CTRL1_MAC_AUTO_SW_ BIT(9)
+#define VR_MII_DIG_CTRL1_INIT_ BIT(8)
+#define VR_MII_DIG_CTRL1_DTXLANED_0_ BIT(4)
+#define VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_ BIT(3)
+#define VR_MII_DIG_CTRL1_EN_2_5G_MODE_ BIT(2)
+#define VR_MII_DIG_CTRL1_BYP_PWRUP_ BIT(1)
+#define VR_MII_DIG_CTRL1_PHY_MODE_CTRL_ BIT(0)
+#define VR_MII_AN_CTRL 0x8001
+#define VR_MII_AN_CTRL_MII_CTRL_ BIT(8)
+#define VR_MII_AN_CTRL_SGMII_LINK_STS_ BIT(4)
+#define VR_MII_AN_CTRL_TX_CONFIG_ BIT(3)
+#define VR_MII_AN_CTRL_1000BASE_X_ (0)
+#define VR_MII_AN_CTRL_SGMII_MODE_ (2)
+#define VR_MII_AN_CTRL_QSGMII_MODE_ (3)
+#define VR_MII_AN_CTRL_PCS_MODE_SHIFT_ (1)
+#define VR_MII_AN_CTRL_PCS_MODE_MASK_ GENMASK(2, 1)
+#define VR_MII_AN_CTRL_MII_AN_INTR_EN_ BIT(0)
+#define VR_MII_AN_INTR_STS 0x8002
+#define VR_MII_AN_INTR_STS_LINK_UP_ BIT(4)
+#define VR_MII_AN_INTR_STS_SPEED_MASK_ GENMASK(3, 2)
+#define VR_MII_AN_INTR_STS_1000_MBPS_ BIT(3)
+#define VR_MII_AN_INTR_STS_100_MBPS_ BIT(2)
+#define VR_MII_AN_INTR_STS_10_MBPS_ (0)
+#define VR_MII_AN_INTR_STS_FDX_ BIT(1)
+#define VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR_ BIT(0)
+
+#define VR_MII_LINK_TIMER_CTRL 0x800A
+#define VR_MII_DIG_STS 0x8010
+#define VR_MII_DIG_STS_PSEQ_STATE_MASK_ GENMASK(4, 2)
+#define VR_MII_DIG_STS_PSEQ_STATE_POS_ (2)
+#define VR_MII_GEN2_4_MPLL_CTRL0 0x8078
+#define VR_MII_MPLL_CTRL0_REF_CLK_DIV2_ BIT(12)
+#define VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_ BIT(4)
+#define VR_MII_GEN2_4_MPLL_CTRL1 0x8079
+#define VR_MII_MPLL_CTRL1_MPLL_MULTIPLIER_ GENMASK(6, 0)
+#define VR_MII_BAUD_RATE_3P125GBPS (3125)
+#define VR_MII_BAUD_RATE_1P25GBPS (1250)
+#define VR_MII_MPLL_MULTIPLIER_125 (125)
+#define VR_MII_MPLL_MULTIPLIER_100 (100)
+#define VR_MII_MPLL_MULTIPLIER_50 (50)
+#define VR_MII_MPLL_MULTIPLIER_40 (40)
+#define VR_MII_GEN2_4_MISC_CTRL1 0x809A
+#define VR_MII_CTRL1_RX_RATE_0_MASK_ GENMASK(3, 2)
+#define VR_MII_CTRL1_RX_RATE_0_SHIFT_ (2)
+#define VR_MII_CTRL1_TX_RATE_0_MASK_ GENMASK(1, 0)
+#define VR_MII_MPLL_BAUD_CLK (0)
+#define VR_MII_MPLL_BAUD_CLK_DIV_2 (1)
+#define VR_MII_MPLL_BAUD_CLK_DIV_4 (2)
+
#define INT_STS (0x780)
#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
#define INT_BIT_ALL_RX_ (0x0F000000)
@@ -906,12 +990,28 @@ struct lan743x_rx {
struct sk_buff *skb_head, *skb_tail;
};
+/* SGMII Link Speed Duplex status */
+enum lan743x_sgmii_lsd {
+ POWER_DOWN = 0,
+ LINK_DOWN,
+ ANEG_BUSY,
+ LINK_10HD,
+ LINK_10FD,
+ LINK_100HD,
+ LINK_100FD,
+ LINK_1000_MASTER,
+ LINK_1000_SLAVE,
+ LINK_2500_MASTER,
+ LINK_2500_SLAVE
+};
+
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
int msg_enable;
#ifdef CONFIG_PM
u32 wolopts;
+ u8 sopass[SOPASS_MAX];
#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
@@ -931,12 +1031,16 @@ struct lan743x_adapter {
spinlock_t eth_syslock_spinlock;
bool eth_syslock_en;
u32 eth_syslock_acquire_cnt;
+ struct mutex sgmii_rw_lock;
+ /* SGMII Link Speed & Duplex status */
+ enum lan743x_sgmii_lsd sgmii_lsd;
u8 max_tx_channels;
u8 used_tx_channels;
u8 max_vector_count;
#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
u32 flags;
+ u32 hw_cfg;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
@@ -1049,5 +1153,7 @@ struct lan743x_rx_buffer_info {
u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
+int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter, u16 timeout);
+void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index 005e56ea5da1..5893770bfd94 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -75,6 +75,9 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
unsigned int vid,
enum macaccess_entry_type type)
{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
lan966x_mac_select(lan966x, mac, vid);
/* Issue a write command */
@@ -86,7 +89,10 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
lan966x, ANA_MACACCESS);
- return lan966x_mac_wait_for_completion(lan966x);
+ ret = lan966x_mac_wait_for_completion(lan966x);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
}
/* The mask of the front ports is encoded inside the mac parameter via a call
@@ -113,11 +119,13 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port,
return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
}
-int lan966x_mac_forget(struct lan966x *lan966x,
- const unsigned char mac[ETH_ALEN],
- unsigned int vid,
- enum macaccess_entry_type type)
+static int lan966x_mac_forget_locked(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
{
+ lockdep_assert_held(&lan966x->mac_lock);
+
lan966x_mac_select(lan966x, mac, vid);
/* Issue a forget command */
@@ -128,6 +136,20 @@ int lan966x_mac_forget(struct lan966x *lan966x,
return lan966x_mac_wait_for_completion(lan966x);
}
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = lan966x_mac_forget_locked(lan966x, mac, vid, type);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
+}
+
int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
{
return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
@@ -161,7 +183,7 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma
{
struct lan966x_mac_entry *mac_entry;
- mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL);
+ mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC);
if (!mac_entry)
return NULL;
@@ -179,7 +201,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
struct lan966x_mac_entry *res = NULL;
struct lan966x_mac_entry *mac_entry;
- spin_lock(&lan966x->mac_lock);
list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
if (mac_entry->vid == vid &&
ether_addr_equal(mac, mac_entry->mac) &&
@@ -188,7 +209,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
break;
}
}
- spin_unlock(&lan966x->mac_lock);
return res;
}
@@ -231,8 +251,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
{
struct lan966x_mac_entry *mac_entry;
- if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL))
+ spin_lock(&lan966x->mac_lock);
+ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) {
+ spin_unlock(&lan966x->mac_lock);
return 0;
+ }
/* In case the entry already exists, don't add it again to SW,
* just update HW, but we need to look in the actual HW because
@@ -241,21 +264,25 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
* add the entry but without the extern_learn flag.
*/
mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
- if (mac_entry)
- return lan966x_mac_learn(lan966x, port->chip_port,
- addr, vid, ENTRYTYPE_LOCKED);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ goto mac_learn;
+ }
mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
- if (!mac_entry)
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
return -ENOMEM;
+ }
- spin_lock(&lan966x->mac_lock);
list_add_tail(&mac_entry->list, &lan966x->mac_entries);
spin_unlock(&lan966x->mac_lock);
- lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
+mac_learn:
+ lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
+
return 0;
}
@@ -269,8 +296,9 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
list) {
if (mac_entry->vid == vid &&
ether_addr_equal(addr, mac_entry->mac)) {
- lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
- ENTRYTYPE_LOCKED);
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
list_del(&mac_entry->list);
kfree(mac_entry);
@@ -288,8 +316,8 @@ void lan966x_mac_purge_entries(struct lan966x *lan966x)
spin_lock(&lan966x->mac_lock);
list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
list) {
- lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
- ENTRYTYPE_LOCKED);
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid, ENTRYTYPE_LOCKED);
list_del(&mac_entry->list);
kfree(mac_entry);
@@ -325,10 +353,13 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
{
struct lan966x_mac_entry *mac_entry, *tmp;
unsigned char mac[ETH_ALEN] __aligned(2);
+ struct list_head mac_deleted_entries;
u32 dest_idx;
u32 column;
u16 vid;
+ INIT_LIST_HEAD(&mac_deleted_entries);
+
spin_lock(&lan966x->mac_lock);
list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
bool found = false;
@@ -362,20 +393,26 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
}
if (!found) {
- /* Notify the bridge that the entry doesn't exist
- * anymore in the HW and remove the entry from the SW
- * list
- */
- lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
- mac_entry->mac, mac_entry->vid,
- lan966x->ports[mac_entry->port_index]->dev);
-
list_del(&mac_entry->list);
- kfree(mac_entry);
+ /* Move the entry from SW list to a tmp list such that
+ * it would be deleted later
+ */
+ list_add_tail(&mac_entry->list, &mac_deleted_entries);
}
}
spin_unlock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) {
+ /* Notify the bridge that the entry doesn't exist
+ * anymore in the HW
+ */
+ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mac_entry->mac, mac_entry->vid,
+ lan966x->ports[mac_entry->port_index]->dev);
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+
/* Now go to the list of columns and see if any entry was not in the SW
* list, then that means that the entry is new so it needs to notify the
* bridge.
@@ -396,13 +433,20 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
continue;
+ spin_lock(&lan966x->mac_lock);
+ mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ continue;
+ }
+
mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
- if (!mac_entry)
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
return;
+ }
mac_entry->row = row;
-
- spin_lock(&lan966x->mac_lock);
list_add_tail(&mac_entry->list, &lan966x->mac_entries);
spin_unlock(&lan966x->mac_lock);
@@ -424,6 +468,7 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
lan966x, ANA_MACTINDX);
while (1) {
+ spin_lock(&lan966x->mac_lock);
lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
ANA_MACACCESS_MAC_TABLE_CMD,
lan966x, ANA_MACACCESS);
@@ -447,12 +492,15 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
stop = false;
if (column == LAN966X_MAC_COLUMNS - 1 &&
- index == 0 && stop)
+ index == 0 && stop) {
+ spin_unlock(&lan966x->mac_lock);
break;
+ }
entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+ spin_unlock(&lan966x->mac_lock);
/* Once all the columns are read process them */
if (column == LAN966X_MAC_COLUMNS - 1) {
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1d6e3b641b2e..d928b75f3780 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -710,7 +710,7 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->xtr_irq);
lan966x->xtr_irq = -ENXIO;
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
@@ -718,10 +718,10 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (lan966x->fdma)
devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
- if (lan966x->ptp_irq)
+ if (lan966x->ptp_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
- if (lan966x->ptp_ext_irq)
+ if (lan966x->ptp_ext_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
@@ -1049,7 +1049,7 @@ static int lan966x_probe(struct platform_device *pdev)
}
lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
lan966x_ana_irq_handler, IRQF_ONESHOT,
"ana irq", lan966x);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 5edc8b7176c8..ec07f7d0528c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -394,15 +394,13 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
struct sparx5 *spx5 = port->sparx5;
u16 pgid_idx, vid;
u32 mact_entry;
+ bool is_host;
int res, err;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
- if (netif_is_bridge_master(v->obj.orig_dev)) {
- sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid);
- return 0;
- }
+ is_host = netif_is_bridge_master(v->obj.orig_dev);
/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
* Fall back to bridge vid 1.
@@ -419,17 +417,33 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
/* MC_IDX starts after the port masks in the PGID table */
pgid_idx += SPX5_PORTS;
- sparx5_pgid_update_mask(port, pgid_idx, true);
+
+ if (is_host)
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid_idx));
+ else
+ sparx5_pgid_update_mask(port, pgid_idx, true);
+
} else {
err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
if (err) {
netdev_warn(dev, "multicast pgid table full\n");
return err;
}
- sparx5_pgid_update_mask(port, pgid_idx, true);
+
+ if (is_host)
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid_idx));
+ else
+ sparx5_pgid_update_mask(port, pgid_idx, true);
+
err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
+
if (err) {
netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
+ sparx5_pgid_free(spx5, pgid_idx);
sparx5_pgid_update_mask(port, pgid_idx, false);
return err;
}
@@ -466,17 +480,12 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *spx5 = port->sparx5;
u16 pgid_idx, vid;
- u32 mact_entry, res, pgid_entry[3];
- int err;
+ u32 mact_entry, res, pgid_entry[3], misc_cfg;
+ bool host_ena;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
- if (netif_is_bridge_master(v->obj.orig_dev)) {
- sparx5_mact_forget(spx5, v->addr, v->vid);
- return 0;
- }
-
if (!br_vlan_enabled(spx5->hw_bridge_dev))
vid = 1;
else
@@ -489,15 +498,21 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
/* MC_IDX starts after the port masks in the PGID table */
pgid_idx += SPX5_PORTS;
- sparx5_pgid_update_mask(port, pgid_idx, false);
+
+ if (netif_is_bridge_master(v->obj.orig_dev))
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(0),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid_idx));
+ else
+ sparx5_pgid_update_mask(port, pgid_idx, false);
+
+ misc_cfg = spx5_rd(spx5, ANA_AC_PGID_MISC_CFG(pgid_idx));
+ host_ena = ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(misc_cfg);
sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
- if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS)) {
- /* No ports are in MC group. Remove entry */
- err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
- if (err)
- return err;
- }
+ if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS) && !host_ena)
+ /* No ports or CPU are in MC group. Remove entry */
+ return sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
}
return 0;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
index 41ecd156e95f..4a6efe6ada08 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma.h
+++ b/drivers/net/ethernet/microsoft/mana/gdma.h
@@ -348,6 +348,7 @@ struct gdma_context {
struct completion eq_test_event;
u32 test_event_eq_id;
+ bool is_pf;
void __iomem *bar0_va;
void __iomem *shm_base;
void __iomem *db_page_base;
@@ -469,6 +470,15 @@ struct gdma_eqe {
#define GDMA_REG_DB_PAGE_SIZE 0x10
#define GDMA_REG_SHM_OFFSET 0x18
+#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
+#define GDMA_PF_REG_DB_PAGE_OFF 0xC8
+#define GDMA_PF_REG_SHM_OFF 0x70
+
+#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
+
+#define MANA_PF_DEVICE_ID 0x00B9
+#define MANA_VF_DEVICE_ID 0x00BA
+
struct gdma_posted_wqe_info {
u32 wqe_size_in_bu;
};
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 49b85ca578b0..5f9240182351 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -18,7 +18,24 @@ static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
return readq(g->bar0_va + offset);
}
-static void mana_gd_init_registers(struct pci_dev *pdev)
+static void mana_gd_init_pf_regs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ void __iomem *sriov_base_va;
+ u64 sriov_base_off;
+
+ gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
+ gc->db_page_base = gc->bar0_va +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
+ sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
+
+ sriov_base_va = gc->bar0_va + sriov_base_off;
+ gc->shm_base = sriov_base_va +
+ mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
+}
+
+static void mana_gd_init_vf_regs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -30,6 +47,16 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
}
+static void mana_gd_init_registers(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ if (gc->is_pf)
+ mana_gd_init_pf_regs(pdev);
+ else
+ mana_gd_init_vf_regs(pdev);
+}
+
static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -1304,6 +1331,11 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_gd_remove_irqs(pdev);
}
+static bool mana_is_pf(unsigned short dev_id)
+{
+ return dev_id == MANA_PF_DEVICE_ID;
+}
+
static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct gdma_context *gc;
@@ -1340,10 +1372,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!bar0_va)
goto free_gc;
+ gc->is_pf = mana_is_pf(pdev->device);
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
-
err = mana_gd_setup(pdev);
if (err)
goto unmap_bar;
@@ -1438,7 +1470,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
#endif
static const struct pci_device_id mana_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
{ }
};
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 078d6a5a0768..543a5d5c304f 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -158,6 +158,14 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
hwc->rxq->msg_buf->gpa_mkey = val;
hwc->txq->msg_buf->gpa_mkey = val;
break;
+
+ case HWC_INIT_DATA_PF_DEST_RQ_ID:
+ hwc->pf_dest_vrq_id = val;
+ break;
+
+ case HWC_INIT_DATA_PF_DEST_CQ_ID:
+ hwc->pf_dest_vrcq_id = val;
+ break;
}
break;
@@ -773,10 +781,13 @@ void mana_hwc_destroy_channel(struct gdma_context *gc)
int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
const void *req, u32 resp_len, void *resp)
{
+ struct gdma_context *gc = hwc->gdma_dev->gdma_context;
struct hwc_work_request *tx_wr;
struct hwc_wq *txq = hwc->txq;
struct gdma_req_hdr *req_msg;
struct hwc_caller_ctx *ctx;
+ u32 dest_vrcq = 0;
+ u32 dest_vrq = 0;
u16 msg_id;
int err;
@@ -803,7 +814,12 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
tx_wr->msg_size = req_len;
- err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
+ if (gc->is_pf) {
+ dest_vrq = hwc->pf_dest_vrq_id;
+ dest_vrcq = hwc->pf_dest_vrcq_id;
+ }
+
+ err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
if (err) {
dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
goto out;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h
index 31c6e83c454a..6a757a6e2732 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.h
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.h
@@ -20,6 +20,8 @@
#define HWC_INIT_DATA_MAX_NUM_CQS 7
#define HWC_INIT_DATA_PDID 8
#define HWC_INIT_DATA_GPA_MKEY 9
+#define HWC_INIT_DATA_PF_DEST_RQ_ID 10
+#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
@@ -178,6 +180,9 @@ struct hw_channel_context {
struct semaphore sema;
struct gdma_resource inflight_msg_res;
+ u32 pf_dest_vrq_id;
+ u32 pf_dest_vrcq_id;
+
struct hwc_caller_ctx *caller_ctx;
};
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index d36405af9432..d58be64374c8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -53,12 +53,14 @@ struct mana_stats_rx {
u64 bytes;
u64 xdp_drop;
u64 xdp_tx;
+ u64 xdp_redirect;
struct u64_stats_sync syncp;
};
struct mana_stats_tx {
u64 packets;
u64 bytes;
+ u64 xdp_xmit;
struct u64_stats_sync syncp;
};
@@ -311,6 +313,8 @@ struct mana_rxq {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
struct page *xdp_save_page;
+ bool xdp_flush;
+ int xdp_rc; /* XDP redirect return code */
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
@@ -374,6 +378,7 @@ struct mana_port_context {
unsigned int num_queues;
mana_handle_t port_handle;
+ mana_handle_t pf_filter_handle;
u16 port_idx;
@@ -395,6 +400,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
+int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
+ u32 flags);
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct xdp_buff *xdp, void *buf_va, uint pkt_len);
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
@@ -420,6 +427,12 @@ enum mana_command_code {
MANA_FENCE_RQ = 0x20006,
MANA_CONFIG_VPORT_RX = 0x20007,
MANA_QUERY_VPORT_CONFIG = 0x20008,
+
+ /* Privileged commands for the PF mode */
+ MANA_REGISTER_FILTER = 0x28000,
+ MANA_DEREGISTER_FILTER = 0x28001,
+ MANA_REGISTER_HW_PORT = 0x28003,
+ MANA_DEREGISTER_HW_PORT = 0x28004,
};
/* Query Device Configuration */
@@ -547,6 +560,63 @@ struct mana_cfg_rx_steer_resp {
struct gdma_resp_hdr hdr;
}; /* HW DATA */
+/* Register HW vPort */
+struct mana_register_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ u16 attached_gfid;
+ u8 is_pf_default_vport;
+ u8 reserved1;
+ u8 allow_all_ether_types;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+}; /* HW DATA */
+
+struct mana_register_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+/* Deregister HW vPort */
+struct mana_deregister_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+struct mana_deregister_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Register filter */
+struct mana_register_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u8 mac_addr[6];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+ u16 reserved5;
+ u32 reserved6;
+ u32 reserved7;
+ u32 reserved8;
+}; /* HW DATA */
+
+struct mana_register_filter_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+/* Deregister filter */
+struct mana_deregister_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+struct mana_deregister_filter_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
#define MANA_MAX_NUM_QUEUES 64
#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 1d2f948b5c00..421fd39ff3a8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -32,9 +32,55 @@ void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
ndev->stats.tx_dropped++;
}
+static int mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame,
+ u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_set_queue_mapping(skb, q_idx);
+
+ mana_xdp_tx(skb, ndev);
+
+ return 0;
+}
+
+int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct mana_stats_tx *tx_stats;
+ int i, count = 0;
+ u16 q_idx;
+
+ if (unlikely(!apc->port_is_up))
+ return 0;
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (mana_xdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &apc->tx_qp[q_idx].txq.stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
+
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct xdp_buff *xdp, void *buf_va, uint pkt_len)
{
+ struct mana_stats_rx *rx_stats;
struct bpf_prog *prog;
u32 act = XDP_PASS;
@@ -49,12 +95,30 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
act = bpf_prog_run_xdp(prog, xdp);
+ rx_stats = &rxq->stats;
+
switch (act) {
case XDP_PASS:
case XDP_TX:
case XDP_DROP:
break;
+ case XDP_REDIRECT:
+ rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog);
+ if (!rxq->xdp_rc) {
+ rxq->xdp_flush = true;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += pkt_len;
+ rx_stats->xdp_redirect++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ }
+
+ fallthrough;
+
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
break;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index b1d773823232..9259a74eca40 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -6,6 +6,7 @@
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/filter.h>
#include <linux/mm.h>
#include <net/checksum.h>
@@ -382,6 +383,7 @@ static const struct net_device_ops mana_devops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
.ndo_bpf = mana_bpf,
+ .ndo_xdp_xmit = mana_xdp_xmit,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -446,6 +448,119 @@ static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
return 0;
}
+static int mana_pf_register_hw_vport(struct mana_port_context *apc)
+{
+ struct mana_register_hw_vport_resp resp = {};
+ struct mana_register_hw_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
+ sizeof(req), sizeof(resp));
+ req.attached_gfid = 1;
+ req.is_pf_default_vport = 1;
+ req.allow_all_ether_types = 1;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ apc->port_handle = resp.hw_vport_handle;
+ return 0;
+}
+
+static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
+{
+ struct mana_deregister_hw_vport_resp resp = {};
+ struct mana_deregister_hw_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
+ sizeof(req), sizeof(resp));
+ req.hw_vport_handle = apc->port_handle;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(apc->ndev,
+ "Failed to deregister hw vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
+static int mana_pf_register_filter(struct mana_port_context *apc)
+{
+ struct mana_register_filter_resp resp = {};
+ struct mana_register_filter_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ apc->pf_filter_handle = resp.filter_handle;
+ return 0;
+}
+
+static void mana_pf_deregister_filter(struct mana_port_context *apc)
+{
+ struct mana_deregister_filter_resp resp = {};
+ struct mana_deregister_filter_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
+ sizeof(req), sizeof(resp));
+ req.filter_handle = apc->pf_filter_handle;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(apc->ndev,
+ "Failed to deregister filter: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
u32 proto_minor_ver, u32 proto_micro_ver,
u16 *max_num_vports)
@@ -1007,6 +1122,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
+ if (act == XDP_REDIRECT && !rxq->xdp_rc)
+ return;
+
if (act != XDP_PASS && act != XDP_TX)
goto drop_xdp;
@@ -1162,11 +1280,14 @@ drop:
static void mana_poll_rx_cq(struct mana_cq *cq)
{
struct gdma_comp *comp = cq->gdma_comp_buf;
+ struct mana_rxq *rxq = cq->rxq;
int comp_read, i;
comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
+ rxq->xdp_flush = false;
+
for (i = 0; i < comp_read; i++) {
if (WARN_ON_ONCE(comp[i].is_sq))
return;
@@ -1175,8 +1296,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
return;
- mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
+ mana_process_rx_cqe(rxq, cq, &comp[i]);
}
+
+ if (rxq->xdp_flush)
+ xdp_do_flush();
}
static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
@@ -1653,6 +1777,7 @@ out:
static void mana_destroy_vport(struct mana_port_context *apc)
{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_rxq *rxq;
u32 rxq_idx;
@@ -1666,6 +1791,9 @@ static void mana_destroy_vport(struct mana_port_context *apc)
}
mana_destroy_txq(apc);
+
+ if (gd->gdma_context->is_pf)
+ mana_pf_deregister_hw_vport(apc);
}
static int mana_create_vport(struct mana_port_context *apc,
@@ -1676,6 +1804,12 @@ static int mana_create_vport(struct mana_port_context *apc,
apc->default_rxobj = INVALID_MANA_HANDLE;
+ if (gd->gdma_context->is_pf) {
+ err = mana_pf_register_hw_vport(apc);
+ if (err)
+ return err;
+ }
+
err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
if (err)
return err;
@@ -1755,6 +1889,7 @@ reset_apc:
int mana_alloc_queues(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
int err;
err = mana_create_vport(apc, ndev);
@@ -1781,6 +1916,12 @@ int mana_alloc_queues(struct net_device *ndev)
if (err)
goto destroy_vport;
+ if (gd->gdma_context->is_pf) {
+ err = mana_pf_register_filter(apc);
+ if (err)
+ goto destroy_vport;
+ }
+
mana_chn_setxdp(apc, mana_xdp_get(apc));
return 0;
@@ -1825,6 +1966,7 @@ int mana_attach(struct net_device *ndev)
static int mana_dealloc_queues(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_txq *txq;
int i, err;
@@ -1833,6 +1975,9 @@ static int mana_dealloc_queues(struct net_device *ndev)
mana_chn_setxdp(apc, NULL);
+ if (gd->gdma_context->is_pf)
+ mana_pf_deregister_filter(apc);
+
/* No packet can be transmitted now since apc->port_is_up is false.
* There is still a tiny chance that mana_poll_tx_cq() can re-enable
* a txq because it may not timely see apc->port_is_up being cleared
@@ -1915,6 +2060,7 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
apc->max_queues = gc->max_num_queues;
apc->num_queues = gc->max_num_queues;
apc->port_handle = INVALID_MANA_HANDLE;
+ apc->pf_filter_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
ndev->netdev_ops = &mana_devops;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index e13f2453eabb..c530db76880f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -23,7 +23,7 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues * 6;
+ return ARRAY_SIZE(mana_eth_stats) + num_queues * 8;
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -50,6 +50,8 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "rx_%d_xdp_tx", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_redirect", i);
+ p += ETH_GSTRING_LEN;
}
for (i = 0; i < num_queues; i++) {
@@ -57,6 +59,8 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_xdp_xmit", i);
+ p += ETH_GSTRING_LEN;
}
}
@@ -70,6 +74,8 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_stats_tx *tx_stats;
unsigned int start;
u64 packets, bytes;
+ u64 xdp_redirect;
+ u64 xdp_xmit;
u64 xdp_drop;
u64 xdp_tx;
int q, i = 0;
@@ -89,12 +95,14 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
bytes = rx_stats->bytes;
xdp_drop = rx_stats->xdp_drop;
xdp_tx = rx_stats->xdp_tx;
+ xdp_redirect = rx_stats->xdp_redirect;
} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_drop;
data[i++] = xdp_tx;
+ data[i++] = xdp_redirect;
}
for (q = 0; q < num_queues; q++) {
@@ -104,10 +112,12 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_xmit;
}
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a3214a762e4b..19009a6bd33a 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -62,9 +62,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
{
struct sockaddr *address = addr;
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
@@ -77,7 +74,7 @@ static void moxart_mac_free_memory(struct net_device *ndev)
int i;
for (i = 0; i < RX_DESC_NUM; i++)
- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
+ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
@@ -147,11 +144,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i],
priv->rx_buf_size,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i],
@@ -172,9 +169,6 @@ static int moxart_mac_open(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EADDRNOTAVAIL;
-
napi_enable(&priv->napi);
moxart_mac_reset(ndev);
@@ -240,7 +234,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- dma_sync_single_for_cpu(&ndev->dev,
+ dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +288,7 @@ static void moxart_tx_finished(struct net_device *ndev)
unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) {
- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++;
@@ -358,9 +352,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
goto out_unlock;
}
@@ -379,7 +373,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = ETH_ZLEN;
}
- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -488,12 +482,19 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
ndev->base_addr = res->start;
+ ret = platform_get_ethdev_address(p_dev, ndev);
+ if (ret == -EPROBE_DEFER)
+ goto init_fail;
+ if (ret)
+ eth_hw_addr_random(ndev);
+ moxart_update_mac_address(ndev);
+
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -501,7 +502,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 8da7e25a47c9..306026e6aa11 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1860,16 +1860,20 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
- for (i = 0; i < ocelot->num_stats; i++)
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
ETH_GSTRING_LEN);
+ }
}
EXPORT_SYMBOL(ocelot_get_strings);
/* Caller must hold &ocelot->stats_lock */
static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
{
- unsigned int idx = port * ocelot->num_stats;
+ unsigned int idx = port * OCELOT_NUM_STATS;
struct ocelot_stats_region *region;
int err, j;
@@ -1877,9 +1881,8 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
list_for_each_entry(region, &ocelot->stats_regions, node) {
- err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- region->offset, region->buf,
- region->count);
+ err = ocelot_bulk_read(ocelot, region->base, region->buf,
+ region->count);
if (err)
return err;
@@ -1906,13 +1909,13 @@ static void ocelot_check_stats_work(struct work_struct *work)
stats_work);
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
for (i = 0; i < ocelot->num_phys_ports; i++) {
err = ocelot_port_update_stats(ocelot, i);
if (err)
break;
}
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1925,16 +1928,22 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
/* check and update now */
err = ocelot_port_update_stats(ocelot, port);
- /* Copy all counters */
- for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port * ocelot->num_stats + i];
+ /* Copy all supported counters */
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ int index = port * OCELOT_NUM_STATS + i;
+
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ *data++ = ocelot->stats[index];
+ }
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1943,10 +1952,16 @@ EXPORT_SYMBOL(ocelot_get_ethtool_stats);
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
{
+ int i, num_stats = 0;
+
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return ocelot->num_stats;
+ for (i = 0; i < OCELOT_NUM_STATS; i++)
+ if (ocelot->stats_layout[i].name[0] != '\0')
+ num_stats++;
+
+ return num_stats;
}
EXPORT_SYMBOL(ocelot_get_sset_count);
@@ -1958,8 +1973,11 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->stats_regions);
- for (i = 0; i < ocelot->num_stats; i++) {
- if (region && ocelot->stats_layout[i].offset == last + 1) {
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ if (region && ocelot->stats_layout[i].reg == last + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -1967,12 +1985,12 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!region)
return -ENOMEM;
- region->offset = ocelot->stats_layout[i].offset;
+ region->base = ocelot->stats_layout[i].reg;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
- last = ocelot->stats_layout[i].offset;
+ last = ocelot->stats_layout[i].reg;
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
@@ -3340,7 +3358,6 @@ static void ocelot_detect_features(struct ocelot *ocelot)
int ocelot_init(struct ocelot *ocelot)
{
- const struct ocelot_stat_layout *stat;
char queue_name[32];
int i, ret;
u32 port;
@@ -3353,20 +3370,17 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->num_stats = 0;
- for_each_stat(ocelot, stat)
- ocelot->num_stats++;
-
ocelot->stats = devm_kcalloc(ocelot->dev,
- ocelot->num_phys_ports * ocelot->num_stats,
+ ocelot->num_phys_ports * OCELOT_NUM_STATS,
sizeof(u64), GFP_KERNEL);
if (!ocelot->stats)
return -ENOMEM;
- mutex_init(&ocelot->stats_lock);
+ spin_lock_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
+ mutex_init(&ocelot->tas_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
@@ -3510,7 +3524,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
destroy_workqueue(ocelot->owq);
- mutex_destroy(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
index 083fddd263ec..8e3894cf5f7c 100644
--- a/drivers/net/ethernet/mscc/ocelot_fdma.c
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -94,19 +94,18 @@ static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
}
+static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot)
+{
+ return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
+}
+
static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
{
- unsigned long timeout;
u32 safe;
- timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
- do {
- safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
- if (safe & BIT(chan))
- return 0;
- } while (time_after(jiffies, timeout));
-
- return -ETIMEDOUT;
+ return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe,
+ safe & BIT(chan), 0,
+ OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
}
static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 5e6136e80282..330d30841cdc 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -725,37 +725,42 @@ static void ocelot_get_stats64(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
+ u64 *s;
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
- SYS_STAT_CFG);
+ spin_lock(&ocelot->stats_lock);
+
+ s = &ocelot->stats[port * OCELOT_NUM_STATS];
/* Get Rx stats */
- stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
- stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
- ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
- ocelot_read(ocelot, SYS_COUNT_RX_64) +
- ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
- ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
- ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
- stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+ stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
+ stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
+ s[OCELOT_STAT_RX_FRAGMENTS] +
+ s[OCELOT_STAT_RX_JABBERS] +
+ s[OCELOT_STAT_RX_LONGS] +
+ s[OCELOT_STAT_RX_64] +
+ s[OCELOT_STAT_RX_65_127] +
+ s[OCELOT_STAT_RX_128_255] +
+ s[OCELOT_STAT_RX_256_511] +
+ s[OCELOT_STAT_RX_512_1023] +
+ s[OCELOT_STAT_RX_1024_1526] +
+ s[OCELOT_STAT_RX_1527_MAX];
+ stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
stats->rx_dropped = dev->stats.rx_dropped;
/* Get Tx stats */
- stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
- stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
- ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
- ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
- ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
- stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
- ocelot_read(ocelot, SYS_COUNT_TX_AGING);
- stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+ stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
+ stats->tx_packets = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
+ s[OCELOT_STAT_TX_AGED];
+ stats->collisions = s[OCELOT_STAT_TX_COLLISION];
+
+ spin_unlock(&ocelot->stats_lock);
}
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 87ad2137ba06..09c703efe946 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -72,6 +72,10 @@ int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ if (ocelot->ops->tas_clock_adjust)
+ ocelot->ops->tas_clock_adjust(ocelot);
+
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_settime64);
@@ -105,6 +109,9 @@ int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ if (ocelot->ops->tas_clock_adjust)
+ ocelot->ops->tas_clock_adjust(ocelot);
} else {
/* Fall back using ocelot_ptp_settime64 which is not exact. */
struct timespec64 ts;
@@ -117,6 +124,7 @@ int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
ocelot_ptp_settime64(ptp, &ts);
}
+
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_adjtime);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 961f803aca19..9c488953f541 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -96,101 +96,379 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
};
-static const struct ocelot_stat_layout ocelot_stats_layout[] = {
- { .name = "rx_octets", .offset = 0x00, },
- { .name = "rx_unicast", .offset = 0x01, },
- { .name = "rx_multicast", .offset = 0x02, },
- { .name = "rx_broadcast", .offset = 0x03, },
- { .name = "rx_shorts", .offset = 0x04, },
- { .name = "rx_fragments", .offset = 0x05, },
- { .name = "rx_jabbers", .offset = 0x06, },
- { .name = "rx_crc_align_errs", .offset = 0x07, },
- { .name = "rx_sym_errs", .offset = 0x08, },
- { .name = "rx_frames_below_65_octets", .offset = 0x09, },
- { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
- { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
- { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
- { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
- { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
- { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
- { .name = "rx_pause", .offset = 0x10, },
- { .name = "rx_control", .offset = 0x11, },
- { .name = "rx_longs", .offset = 0x12, },
- { .name = "rx_classified_drops", .offset = 0x13, },
- { .name = "rx_red_prio_0", .offset = 0x14, },
- { .name = "rx_red_prio_1", .offset = 0x15, },
- { .name = "rx_red_prio_2", .offset = 0x16, },
- { .name = "rx_red_prio_3", .offset = 0x17, },
- { .name = "rx_red_prio_4", .offset = 0x18, },
- { .name = "rx_red_prio_5", .offset = 0x19, },
- { .name = "rx_red_prio_6", .offset = 0x1A, },
- { .name = "rx_red_prio_7", .offset = 0x1B, },
- { .name = "rx_yellow_prio_0", .offset = 0x1C, },
- { .name = "rx_yellow_prio_1", .offset = 0x1D, },
- { .name = "rx_yellow_prio_2", .offset = 0x1E, },
- { .name = "rx_yellow_prio_3", .offset = 0x1F, },
- { .name = "rx_yellow_prio_4", .offset = 0x20, },
- { .name = "rx_yellow_prio_5", .offset = 0x21, },
- { .name = "rx_yellow_prio_6", .offset = 0x22, },
- { .name = "rx_yellow_prio_7", .offset = 0x23, },
- { .name = "rx_green_prio_0", .offset = 0x24, },
- { .name = "rx_green_prio_1", .offset = 0x25, },
- { .name = "rx_green_prio_2", .offset = 0x26, },
- { .name = "rx_green_prio_3", .offset = 0x27, },
- { .name = "rx_green_prio_4", .offset = 0x28, },
- { .name = "rx_green_prio_5", .offset = 0x29, },
- { .name = "rx_green_prio_6", .offset = 0x2A, },
- { .name = "rx_green_prio_7", .offset = 0x2B, },
- { .name = "tx_octets", .offset = 0x40, },
- { .name = "tx_unicast", .offset = 0x41, },
- { .name = "tx_multicast", .offset = 0x42, },
- { .name = "tx_broadcast", .offset = 0x43, },
- { .name = "tx_collision", .offset = 0x44, },
- { .name = "tx_drops", .offset = 0x45, },
- { .name = "tx_pause", .offset = 0x46, },
- { .name = "tx_frames_below_65_octets", .offset = 0x47, },
- { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
- { .name = "tx_frames_128_255_octets", .offset = 0x49, },
- { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
- { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
- { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
- { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
- { .name = "tx_yellow_prio_0", .offset = 0x4E, },
- { .name = "tx_yellow_prio_1", .offset = 0x4F, },
- { .name = "tx_yellow_prio_2", .offset = 0x50, },
- { .name = "tx_yellow_prio_3", .offset = 0x51, },
- { .name = "tx_yellow_prio_4", .offset = 0x52, },
- { .name = "tx_yellow_prio_5", .offset = 0x53, },
- { .name = "tx_yellow_prio_6", .offset = 0x54, },
- { .name = "tx_yellow_prio_7", .offset = 0x55, },
- { .name = "tx_green_prio_0", .offset = 0x56, },
- { .name = "tx_green_prio_1", .offset = 0x57, },
- { .name = "tx_green_prio_2", .offset = 0x58, },
- { .name = "tx_green_prio_3", .offset = 0x59, },
- { .name = "tx_green_prio_4", .offset = 0x5A, },
- { .name = "tx_green_prio_5", .offset = 0x5B, },
- { .name = "tx_green_prio_6", .offset = 0x5C, },
- { .name = "tx_green_prio_7", .offset = 0x5D, },
- { .name = "tx_aged", .offset = 0x5E, },
- { .name = "drop_local", .offset = 0x80, },
- { .name = "drop_tail", .offset = 0x81, },
- { .name = "drop_yellow_prio_0", .offset = 0x82, },
- { .name = "drop_yellow_prio_1", .offset = 0x83, },
- { .name = "drop_yellow_prio_2", .offset = 0x84, },
- { .name = "drop_yellow_prio_3", .offset = 0x85, },
- { .name = "drop_yellow_prio_4", .offset = 0x86, },
- { .name = "drop_yellow_prio_5", .offset = 0x87, },
- { .name = "drop_yellow_prio_6", .offset = 0x88, },
- { .name = "drop_yellow_prio_7", .offset = 0x89, },
- { .name = "drop_green_prio_0", .offset = 0x8A, },
- { .name = "drop_green_prio_1", .offset = 0x8B, },
- { .name = "drop_green_prio_2", .offset = 0x8C, },
- { .name = "drop_green_prio_3", .offset = 0x8D, },
- { .name = "drop_green_prio_4", .offset = 0x8E, },
- { .name = "drop_green_prio_5", .offset = 0x8F, },
- { .name = "drop_green_prio_6", .offset = 0x90, },
- { .name = "drop_green_prio_7", .offset = 0x91, },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static void ocelot_pll5_init(struct ocelot *ocelot)
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index c2af4eb8ca5d..9cf82ecf191c 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -180,13 +180,38 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
REG(SYS_COUNT_TX_UNICAST, 0x000104),
REG(SYS_COUNT_TX_MULTICAST, 0x000108),
@@ -196,11 +221,46 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 61497c3e4cfb..971dde8c3286 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2692,7 +2692,7 @@ again:
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header can be at most 1KB long */
- cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ cum_len = -skb_tcp_all_headers(skb);
/* for IPv6 TSO, the checksum offset stores the
* TCP header length, to save the firmware from
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 50bca486a244..9aae7f1eb5d2 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -158,7 +158,7 @@ MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
I. Board Compatibility
This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
-It also works with other chips in in the DP83810 series.
+It also works with other chips in the DP83810 series.
II. Board-specific settings
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 0c0d127906dd..09a89e72f904 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -32,28 +32,4 @@ config S2IO
To compile this driver as a module, choose M here. The module
will be called s2io.
-config VXGE
- tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter"
- depends on PCI
- help
- This driver supports Exar Corp's X3100 Series 10 GbE PCIe
- I/O Virtualized Server Adapter. These were originally released from
- Neterion, which was later acquired by Exar. So, the adapters might be
- labeled as either one, depending on its age.
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/ethernet/neterion/vxge.rst>.
-
- To compile this driver as a module, choose M here. The module
- will be called vxge.
-
-config VXGE_DEBUG_TRACE_ALL
- bool "Enabling All Debug trace statements in driver"
- default n
- depends on VXGE
- help
- Say Y here if you want to enabling all the debug trace statements in
- the vxge driver. By default only few debug trace statements are
- enabled.
-
endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile
index 87ede8a47bb8..de98b4e6eff9 100644
--- a/drivers/net/ethernet/neterion/Makefile
+++ b/drivers/net/ethernet/neterion/Makefile
@@ -4,4 +4,3 @@
#
obj-$(CONFIG_S2IO) += s2io.o
-obj-$(CONFIG_VXGE) += vxge/
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6dd451adc331..30f955efa830 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2156,7 +2156,7 @@ static int verify_xena_quiescence(struct s2io_nic *sp)
/*
* In PCI 33 mode, the P_PLL is not used, and therefore,
- * the the P_PLL_LOCK bit in the adapter_status register will
+ * the P_PLL_LOCK bit in the adapter_status register will
* not be asserted.
*/
if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
@@ -3817,7 +3817,7 @@ static irqreturn_t s2io_test_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Test interrupt path by forcing a a software IRQ */
+/* Test interrupt path by forcing a software IRQ */
static int s2io_test_msi(struct s2io_nic *sp)
{
struct pci_dev *pdev = sp->pdev;
@@ -5492,7 +5492,7 @@ s2io_ethtool_gringparam(struct net_device *dev,
}
/**
- * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
+ * s2io_ethtool_getpause_data -Pause frame generation and reception.
* @dev: pointer to netdev
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
@@ -7449,7 +7449,7 @@ aggregate:
* @link : inidicates whether link is UP/DOWN.
* Description:
* This function stops/starts the Tx queue depending on whether the link
- * status of the NIC is is down or up. This is called by the Alarm
+ * status of the NIC is down or up. This is called by the Alarm
* interrupt handler whenever a link change interrupt comes up.
* Return value:
* void.
@@ -7732,7 +7732,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
* Setting the device configuration parameters.
* Most of these parameters can be specified by the user during
* module insertion as they are module loadable parameters. If
- * these parameters are not not specified during load time, they
+ * these parameters are not specified during load time, they
* are initialized with default values.
*/
config = &sp->config;
diff --git a/drivers/net/ethernet/neterion/vxge/Makefile b/drivers/net/ethernet/neterion/vxge/Makefile
deleted file mode 100644
index 0820e81ca7fb..000000000000
--- a/drivers/net/ethernet/neterion/vxge/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O
-# Virtualized Server Adapter linux driver
-
-obj-$(CONFIG_VXGE) += vxge.o
-
-vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
deleted file mode 100644
index a3204a7ef750..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ /dev/null
@@ -1,5099 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/vmalloc.h>
-#include <linux/etherdevice.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-main.h"
-
-#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
- status = __vxge_hw_vpath_stats_access(vpath, \
- VXGE_HW_STATS_OP_READ, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
-static void
-vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
-{
- u64 val64;
-
- val64 = readq(&vp_reg->rxmac_vcfg0);
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- writeq(val64, &vp_reg->rxmac_vcfg0);
- val64 = readq(&vp_reg->rxmac_vcfg0);
-}
-
-/*
- * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
- */
-int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct __vxge_hw_virtualpath *vpath;
- u64 val64, rxd_count, rxd_spat;
- int count = 0, total_count = 0;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
-
- vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
-
- /* Check that the ring controller for this vpath has enough free RxDs
- * to send frames to the host. This is done by reading the
- * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
- * RXD_SPAT value for the vpath.
- */
- val64 = readq(&vp_reg->prc_cfg6);
- rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
- /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
- * leg room.
- */
- rxd_spat *= 2;
-
- do {
- mdelay(1);
-
- rxd_count = readq(&vp_reg->prc_rxd_doorbell);
-
- /* Check that the ring controller for this vpath does
- * not have any frame in its pipeline.
- */
- val64 = readq(&vp_reg->frm_in_progress_cnt);
- if ((rxd_count <= rxd_spat) || (val64 > 0))
- count = 0;
- else
- count++;
- total_count++;
- } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
- (total_count < VXGE_HW_MAX_POLLING_COUNT));
-
- if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
- printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
- __func__);
-
- return total_count;
-}
-
-/* vxge_hw_device_wait_receive_idle - This function waits until all frames
- * stored in the frame buffer for each vpath assigned to the given
- * function (hldev) have been sent to the host.
- */
-void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
-{
- int i, total_count = 0;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
- if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
- break;
- }
-}
-
-/*
- * __vxge_hw_device_register_poll
- * Will poll certain register for specified amount of time.
- * Will poll until masked bit is not cleared.
- */
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
-{
- u64 val64;
- u32 i = 0;
-
- udelay(10);
-
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- udelay(100);
- } while (++i <= 9);
-
- i = 0;
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- mdelay(1);
- } while (++i <= max_millis);
-
- return VXGE_HW_FAIL;
-}
-
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
- u64 mask, u32 max_millis)
-{
- __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
- wmb();
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
- wmb();
-
- return __vxge_hw_device_register_poll(addr, mask, max_millis);
-}
-
-static enum vxge_hw_status
-vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
- u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
- u64 *steer_ctrl)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- enum vxge_hw_status status;
- u64 val64;
- u32 retry = 0, max_retry = 3;
-
- spin_lock(&vpath->lock);
- if (!vpath->vp_open) {
- spin_unlock(&vpath->lock);
- max_retry = 100;
- }
-
- writeq(*data0, &vp_reg->rts_access_steer_data0);
- writeq(*data1, &vp_reg->rts_access_steer_data1);
- wmb();
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- *steer_ctrl;
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- /* The __vxge_hw_device_register_poll can udelay for a significant
- * amount of time, blocking other process from the CPU. If it delays
- * for ~5secs, a NMI error can occur. A way around this is to give up
- * the processor via msleep, but this is not allowed is under lock.
- * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
- * 1sec and sleep for 10ms until the firmware operation has completed
- * or timed-out.
- */
- while ((status != VXGE_HW_OK) && retry++ < max_retry) {
- if (!vpath->vp_open)
- msleep(20);
- status = __vxge_hw_device_register_poll(
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
- }
-
- if (status != VXGE_HW_OK)
- goto out;
-
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- *data0 = readq(&vp_reg->rts_access_steer_data0);
- *data1 = readq(&vp_reg->rts_access_steer_data1);
- *steer_ctrl = val64;
- } else
- status = VXGE_HW_FAIL;
-
-out:
- if (vpath->vp_open)
- spin_unlock(&vpath->lock);
- return status;
-}
-
-enum vxge_hw_status
-vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
- u32 *minor, u32 *build)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_READ,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
- *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
- *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-
- return status;
-}
-
-enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- u32 ret;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
- goto exit;
- }
-
- ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
- if (ret != 1) {
- vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
- __func__, ret);
- status = VXGE_HW_FAIL;
- }
-
-exit:
- return status;
-}
-
-enum vxge_hw_status
-vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- int ret_code, sec_code;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- /* send upgrade start command */
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_START,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
- __func__);
- return status;
- }
-
- /* Transfer fw image to adapter 16 bytes at a time */
- for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
- steer_ctrl = 0;
-
- /* The next 128bits of fwdata to be loaded onto the adapter */
- data0 = *((u64 *)fwdata);
- data1 = *((u64 *)fwdata + 1);
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_SEND,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
- __func__);
- goto out;
- }
-
- ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
- switch (ret_code) {
- case VXGE_HW_FW_UPGRADE_OK:
- /* All OK, send next 16 bytes. */
- break;
- case VXGE_FW_UPGRADE_BYTES2SKIP:
- /* skip bytes in the stream */
- fwdata += (data0 >> 8) & 0xFFFFFFFF;
- break;
- case VXGE_HW_FW_UPGRADE_DONE:
- goto out;
- case VXGE_HW_FW_UPGRADE_ERR:
- sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
- switch (sec_code) {
- case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
- case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
- printk(KERN_ERR
- "corrupted data from .ncf file\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
- printk(KERN_ERR "invalid .ncf file\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
- printk(KERN_ERR "buffer overflow\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
- printk(KERN_ERR "failed to flash the image\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
- printk(KERN_ERR
- "generic error. Unknown error type\n");
- break;
- default:
- printk(KERN_ERR "Unknown error of type %d\n",
- sec_code);
- break;
- }
- status = VXGE_HW_FAIL;
- goto out;
- default:
- printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
- status = VXGE_HW_FAIL;
- goto out;
- }
- /* point to next 16 bytes */
- fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
- }
-out:
- return status;
-}
-
-enum vxge_hw_status
-vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
- struct eprom_image *img)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- int i;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
- data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_API_GET_EPROM_REV,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- break;
-
- img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
- img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
- img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
- img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_channel_free - Free memory allocated for channel
- * This function deallocates memory from the channel and various arrays
- * in the channel
- */
-static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
-{
- kfree(channel->work_arr);
- kfree(channel->free_arr);
- kfree(channel->reserve_arr);
- kfree(channel->orig_arr);
- kfree(channel);
-}
-
-/*
- * __vxge_hw_channel_initialize - Initialize a channel
- * This function initializes a channel by properly setting the
- * various references
- */
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
-{
- u32 i;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = channel->vph->vpath;
-
- if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
- for (i = 0; i < channel->length; i++)
- channel->orig_arr[i] = channel->reserve_arr[i];
- }
-
- switch (channel->type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- vpath->fifoh = (struct __vxge_hw_fifo *)channel;
- channel->stats = &((struct __vxge_hw_fifo *)
- channel)->stats->common_stats;
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- vpath->ringh = (struct __vxge_hw_ring *)channel;
- channel->stats = &((struct __vxge_hw_ring *)
- channel)->stats->common_stats;
- break;
- default:
- break;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_channel_reset - Resets a channel
- * This function resets a channel by properly setting the various references
- */
-static enum vxge_hw_status
-__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
-{
- u32 i;
-
- for (i = 0; i < channel->length; i++) {
- if (channel->reserve_arr != NULL)
- channel->reserve_arr[i] = channel->orig_arr[i];
- if (channel->free_arr != NULL)
- channel->free_arr[i] = NULL;
- if (channel->work_arr != NULL)
- channel->work_arr[i] = NULL;
- }
- channel->free_ptr = channel->length;
- channel->reserve_ptr = channel->length;
- channel->reserve_top = 0;
- channel->post_index = 0;
- channel->compl_index = 0;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_pci_e_init
- * Initialize certain PCI/PCI-X configuration registers
- * with recommended values. Save config space for future hw resets.
- */
-static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
-{
- u16 cmd = 0;
-
- /* Set the PErr Repconse bit and SERR in PCI command register. */
- pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
- cmd |= 0x140;
- pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
-
- pci_save_state(hldev->pdev);
-}
-
-/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
- * in progress
- * This routine checks the vpath reset in progress register is turned zero
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
-{
- enum vxge_hw_status status;
- status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
- VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
- return status;
-}
-
-/*
- * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
- * Set the swapper bits appropriately for the lagacy section.
- */
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- wmb();
-
- switch (val64) {
- case VXGE_HW_SWAPPER_INITIAL_VALUE:
- return status;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- break;
-
- case VXGE_HW_SWAPPER_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
- }
-
- wmb();
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
- status = VXGE_HW_ERR_SWAPPER_CTRL;
-
- return status;
-}
-
-/*
- * __vxge_hw_device_toc_get
- * This routine sets the swapper and reads the toc pointer and returns the
- * memory mapped address of the toc
- */
-static struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0)
-{
- u64 val64;
- struct vxge_hw_toc_reg __iomem *toc = NULL;
- enum vxge_hw_status status;
-
- struct vxge_hw_legacy_reg __iomem *legacy_reg =
- (struct vxge_hw_legacy_reg __iomem *)bar0;
-
- status = __vxge_hw_legacy_swapper_set(legacy_reg);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&legacy_reg->toc_first_pointer);
- toc = bar0 + val64;
-exit:
- return toc;
-}
-
-/*
- * __vxge_hw_device_reg_addr_get
- * This routine sets the swapper and reads the toc pointer and initializes the
- * register location pointers in the device object. It waits until the ric is
- * completed initializing registers.
- */
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
- u32 i;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- hldev->legacy_reg = hldev->bar0;
-
- hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
- if (hldev->toc_reg == NULL) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- val64 = readq(&hldev->toc_reg->toc_common_pointer);
- hldev->common_reg = hldev->bar0 + val64;
-
- val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
- hldev->mrpcim_reg = hldev->bar0 + val64;
-
- for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
- hldev->srpcim_reg[i] = hldev->bar0 + val64;
- }
-
- for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
- hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
- }
-
- for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
- hldev->vpath_reg[i] = hldev->bar0 + val64;
- }
-
- val64 = readq(&hldev->toc_reg->toc_kdfc);
-
- switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
- case 0:
- hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
- break;
- default:
- break;
- }
-
- status = __vxge_hw_device_vpath_reset_in_prog_check(
- (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
- * This routine returns the Access Rights of the driver
- */
-static u32
-__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
-{
- u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
-
- switch (host_type) {
- case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
- if (func_id == 0) {
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- }
- break;
- case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
- case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
- case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
- break;
- case VXGE_HW_SR_VH_FUNCTION0:
- case VXGE_HW_VH_NORMAL_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- }
-
- return access_rights;
-}
-/*
- * __vxge_hw_device_is_privilaged
- * This routine checks if the device function is privilaged or not
- */
-
-enum vxge_hw_status
-__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
-{
- if (__vxge_hw_device_access_rights_get(host_type,
- func_id) &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
- return VXGE_HW_OK;
- else
- return VXGE_HW_ERR_PRIVILEGED_OPERATION;
-}
-
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
- u64 val64;
-
- val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
- return
- (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_device_host_info_get
- * This routine returns the host type assignments
- */
-static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
- u32 i;
-
- val64 = readq(&hldev->common_reg->host_type_assignments);
-
- hldev->host_type =
- (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
-
- hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpath_assignments & vxge_mBIT(i)))
- continue;
-
- hldev->func_id =
- __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
-
- hldev->access_rights = __vxge_hw_device_access_rights_get(
- hldev->host_type, hldev->func_id);
-
- hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
- hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
-
- hldev->first_vp_id = i;
- break;
- }
-}
-
-/*
- * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
- * link width and signalling rate.
- */
-static enum vxge_hw_status
-__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
-{
- struct pci_dev *dev = hldev->pdev;
- u16 lnk;
-
- /* Get the negotiated link width and speed from PCI config space */
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
-
- if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
- return VXGE_HW_ERR_INVALID_PCI_INFO;
-
- switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
- case PCIE_LNK_WIDTH_RESRV:
- case PCIE_LNK_X1:
- case PCIE_LNK_X2:
- case PCIE_LNK_X4:
- case PCIE_LNK_X8:
- break;
- default:
- return VXGE_HW_ERR_INVALID_PCI_INFO;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_initialize
- * Initialize Titan-V hardware.
- */
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id)) {
- /* Validate the pci-e link width and speed */
- status = __vxge_hw_verify_pci_e_info(hldev);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
- struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
- struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
- struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- fw_date->day =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
- fw_date->month =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
- fw_date->year =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
-
- snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- fw_date->month, fw_date->day, fw_date->year);
-
- fw_version->major =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
- fw_version->minor =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
- fw_version->build =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-
- snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- fw_version->major, fw_version->minor, fw_version->build);
-
- flash_date->day =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
- flash_date->month =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
- flash_date->year =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
-
- snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- flash_date->month, flash_date->day, flash_date->year);
-
- flash_version->major =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
- flash_version->minor =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
- flash_version->build =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
-
- snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- flash_version->major, flash_version->minor,
- flash_version->build);
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- __be64 *serial_number = (void *)hw_info->serial_number;
- __be64 *product_desc = (void *)hw_info->product_desc;
- __be64 *part_number = (void *)hw_info->part_number;
- enum vxge_hw_status status;
- u64 data0, data1 = 0, steer_ctrl = 0;
- u32 i, j = 0;
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- serial_number[0] = cpu_to_be64(data0);
- serial_number[1] = cpu_to_be64(data1);
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- part_number[0] = cpu_to_be64(data0);
- part_number[1] = cpu_to_be64(data1);
-
- for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
- i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
- data0 = i;
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- product_desc[j++] = cpu_to_be64(data0);
- product_desc[j++] = cpu_to_be64(data1);
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- data0 = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_API_GET_FUNC_MODE,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
- return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- * from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
- u8 *macaddr, u8 *macaddr_mask)
-{
- u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- data0 = 0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
- int i;
-
- do {
- status = vxge_hw_vpath_fw_api(vpath, action,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
- data1);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i - 1] = (u8) (data0 & 0xFF);
- data0 >>= 8;
-
- macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
- data1 >>= 8;
- }
-
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
- data0 = 0, data1 = 0, steer_ctrl = 0;
-
- } while (!is_valid_ether_addr(macaddr));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_device_hw_info_get - Get the hw information
- * @bar0: the bar
- * @hw_info: the hw_info struct
- *
- * Returns the vpath mask that has the bits set for each vpath allocated
- * for the driver, FW version information, and the first mac address for
- * each vpath
- */
-enum vxge_hw_status
-vxge_hw_device_hw_info_get(void __iomem *bar0,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u32 i;
- u64 val64;
- struct vxge_hw_toc_reg __iomem *toc;
- struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- enum vxge_hw_status status;
- struct __vxge_hw_virtualpath vpath;
-
- memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
-
- toc = __vxge_hw_device_toc_get(bar0);
- if (toc == NULL) {
- status = VXGE_HW_ERR_CRITICAL;
- goto exit;
- }
-
- val64 = readq(&toc->toc_common_pointer);
- common_reg = bar0 + val64;
-
- status = __vxge_hw_device_vpath_reset_in_prog_check(
- (u64 __iomem *)&common_reg->vpath_rst_in_prog);
- if (status != VXGE_HW_OK)
- goto exit;
-
- hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
-
- val64 = readq(&common_reg->host_type_assignments);
-
- hw_info->host_type =
- (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
- continue;
-
- val64 = readq(&toc->toc_vpmgmt_pointer[i]);
-
- vpmgmt_reg = bar0 + val64;
-
- hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
- if (__vxge_hw_device_access_rights_get(hw_info->host_type,
- hw_info->func_id) &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
-
- val64 = readq(&toc->toc_mrpcim_pointer);
-
- mrpcim_reg = bar0 + val64;
-
- writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
- wmb();
- }
-
- val64 = readq(&toc->toc_vpath_pointer[i]);
-
- spin_lock_init(&vpath.lock);
- vpath.vp_reg = bar0 + val64;
- vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
-
- status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- break;
- }
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
- continue;
-
- val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath.vp_reg = bar0 + val64;
- vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
-
- status = __vxge_hw_vpath_addr_get(&vpath,
- hw_info->mac_addrs[i],
- hw_info->mac_addr_masks[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_destroy - Deallocates the block pool
- */
-static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
-{
- struct __vxge_hw_device *hldev;
- struct list_head *p, *n;
-
- if (!blockpool)
- return;
-
- hldev = blockpool->hldev;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
- dma_unmap_single(&hldev->pdev->dev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(hldev->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- blockpool->pool_size--;
- }
-
- list_for_each_safe(p, n, &blockpool->free_entry_list) {
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- }
-
- return;
-}
-
-/*
- * __vxge_hw_blockpool_create - Create block pool
- */
-static enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max)
-{
- u32 i;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (blockpool == NULL) {
- status = VXGE_HW_FAIL;
- goto blockpool_create_exit;
- }
-
- blockpool->hldev = hldev;
- blockpool->block_size = VXGE_HW_BLOCK_SIZE;
- blockpool->pool_size = 0;
- blockpool->pool_max = pool_max;
- blockpool->req_out = 0;
-
- INIT_LIST_HEAD(&blockpool->free_block_list);
- INIT_LIST_HEAD(&blockpool->free_entry_list);
-
- for (i = 0; i < pool_size + pool_max; i++) {
- entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- list_add(&entry->item, &blockpool->free_entry_list);
- }
-
- for (i = 0; i < pool_size; i++) {
- memblock = vxge_os_dma_malloc(
- hldev->pdev,
- VXGE_HW_BLOCK_SIZE,
- &dma_handle,
- &acc_handle);
- if (memblock == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- dma_addr = dma_map_single(&hldev->pdev->dev, memblock,
- VXGE_HW_BLOCK_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) {
- vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry =
- kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry != NULL) {
- list_del(&entry->item);
- entry->length = VXGE_HW_BLOCK_SIZE;
- entry->memblock = memblock;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- } else {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- }
-
-blockpool_create_exit:
- return status;
-}
-
-/*
- * __vxge_hw_device_fifo_config_check - Check fifo configuration.
- * Check the fifo configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
-{
- if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
- (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_vpath_config_check - Check vpath configuration.
- * Check the vpath configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
-{
- enum vxge_hw_status status;
-
- if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
- (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
- return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
-
- status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
- if (status != VXGE_HW_OK)
- return status;
-
- if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
- ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
- (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
- return VXGE_HW_BADCFG_VPATH_MTU;
-
- if ((vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
- return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_config_check - Check device configuration.
- * Check the device configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
-{
- u32 i;
- enum vxge_hw_status status;
-
- if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
- return VXGE_HW_BADCFG_INTR_MODE;
-
- if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
- (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
- return VXGE_HW_BADCFG_RTS_MAC_EN;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- status = __vxge_hw_device_vpath_config_check(
- &new_config->vp_config[i]);
- if (status != VXGE_HW_OK)
- return status;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_device_initialize - Initialize Titan device.
- * Initialize Titan device. Note that all the arguments of this public API
- * are 'IN', including @hldev. Driver cooperates with
- * OS to find new Titan device, locate its PCI and memory spaces.
- *
- * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
- * to enable the latter to perform Titan hardware initialization.
- */
-enum vxge_hw_status
-vxge_hw_device_initialize(
- struct __vxge_hw_device **devh,
- struct vxge_hw_device_attr *attr,
- struct vxge_hw_device_config *device_config)
-{
- u32 i;
- u32 nblocks = 0;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- status = __vxge_hw_device_config_check(device_config);
- if (status != VXGE_HW_OK)
- goto exit;
-
- hldev = vzalloc(sizeof(struct __vxge_hw_device));
- if (hldev == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- hldev->magic = VXGE_HW_DEVICE_MAGIC;
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
-
- /* apply config */
- memcpy(&hldev->config, device_config,
- sizeof(struct vxge_hw_device_config));
-
- hldev->bar0 = attr->bar0;
- hldev->pdev = attr->pdev;
-
- hldev->uld_callbacks = attr->uld_callbacks;
-
- __vxge_hw_device_pci_e_init(hldev);
-
- status = __vxge_hw_device_reg_addr_get(hldev);
- if (status != VXGE_HW_OK) {
- vfree(hldev);
- goto exit;
- }
-
- __vxge_hw_device_host_info_get(hldev);
-
- /* Incrementing for stats blocks */
- nblocks++;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpath_assignments & vxge_mBIT(i)))
- continue;
-
- if (device_config->vp_config[i].ring.enable ==
- VXGE_HW_RING_ENABLE)
- nblocks += device_config->vp_config[i].ring.ring_blocks;
-
- if (device_config->vp_config[i].fifo.enable ==
- VXGE_HW_FIFO_ENABLE)
- nblocks += device_config->vp_config[i].fifo.fifo_blocks;
- nblocks++;
- }
-
- if (__vxge_hw_blockpool_create(hldev,
- &hldev->block_pool,
- device_config->dma_blockpool_initial + nblocks,
- device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
-
- vxge_hw_device_terminate(hldev);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- status = __vxge_hw_device_initialize(hldev);
- if (status != VXGE_HW_OK) {
- vxge_hw_device_terminate(hldev);
- goto exit;
- }
-
- *devh = hldev;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_terminate - Terminate Titan device.
- * Terminate HW device.
- */
-void
-vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
-{
- vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
-
- hldev->magic = VXGE_HW_DEVICE_DEAD;
- __vxge_hw_blockpool_destroy(&hldev->block_pool);
- vfree(hldev);
-}
-
-/*
- * __vxge_hw_vpath_stats_access - Get the statistics from the given location
- * and offset and perform an operation
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
- u32 operation, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_stats_access_exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->xmac_stats_access_cmd,
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
- vpath->hldev->config.device_poll_millis);
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&vp_reg->xmac_stats_access_data);
- else
- *stat = 0;
-
-vpath_stats_access_exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *)vpath_tx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset, val64);
- if (status != VXGE_HW_OK)
- goto exit;
- offset++;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
- val64 = (u64 *) vpath_rx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset >> 3, val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- val64 = readq(&vp_reg->vpath_debug_stats0);
- hw_stats->ini_num_mwr_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats1);
- hw_stats->ini_num_mrd_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats2);
- hw_stats->ini_num_cpl_rcvd =
- (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats3);
- hw_stats->ini_num_mwr_byte_sent =
- VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats4);
- hw_stats->ini_num_cpl_byte_rcvd =
- VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats5);
- hw_stats->wrcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats6);
- hw_stats->rdcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count0 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count1 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count23);
- hw_stats->vpath_genstats_count2 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count3 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count4);
- hw_stats->vpath_genstats_count4 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count5);
- hw_stats->vpath_genstats_count5 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
- val64);
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
-
- hw_stats->prog_event_vnum0 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
-
- hw_stats->prog_event_vnum1 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
-
- hw_stats->prog_event_vnum2 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
-
- hw_stats->prog_event_vnum3 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
-
- val64 = readq(&vp_reg->rx_multi_cast_stats);
- hw_stats->rx_multi_cast_frame_discard =
- (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
-
- val64 = readq(&vp_reg->rx_frm_transferred);
- hw_stats->rx_frm_transferred =
- (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
-
- val64 = readq(&vp_reg->rxd_returned);
- hw_stats->rxd_returned =
- (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_mpa);
- hw_stats->rx_mpa_len_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
- hw_stats->rx_mpa_mrk_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
- hw_stats->rx_mpa_crc_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_fau);
- hw_stats->rx_permitted_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
- hw_stats->rx_vp_reset_discarded_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
- hw_stats->rx_wol_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
-
- val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
- hw_stats->tx_vp_reset_discarded_frms =
- (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
- val64);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_stats_get - Get the device hw statistics.
- * Returns the vpath h/w stats for the device.
- */
-enum vxge_hw_status
-vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
- struct vxge_hw_device_stats_hw_info *hw_stats)
-{
- u32 i;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
- (hldev->virtual_paths[i].vp_open ==
- VXGE_HW_VP_NOT_OPEN))
- continue;
-
- memcpy(hldev->virtual_paths[i].hw_stats_sav,
- hldev->virtual_paths[i].hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(
- &hldev->virtual_paths[i],
- hldev->virtual_paths[i].hw_stats);
- }
-
- memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
- sizeof(struct vxge_hw_device_stats_hw_info));
-
- return status;
-}
-
-/*
- * vxge_hw_driver_stats_get - Get the device sw statistics.
- * Returns the vpath s/w stats for the device.
- */
-enum vxge_hw_status vxge_hw_driver_stats_get(
- struct __vxge_hw_device *hldev,
- struct vxge_hw_device_stats_sw_info *sw_stats)
-{
- memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
- sizeof(struct vxge_hw_device_stats_sw_info));
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
- * and offset and perform an operation
- * Get the statistics from the given location and offset.
- */
-enum vxge_hw_status
-vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
- u32 operation, u32 location, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
- VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &hldev->mrpcim_reg->xmac_stats_sys_cmd,
- VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
- hldev->config.device_poll_millis);
-
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
- else
- *stat = 0;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
- * Get the Statistics on aggregate port
- */
-static enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
- struct vxge_hw_xmac_aggr_stats *aggr_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *)aggr_stats;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
- status = vxge_hw_mrpcim_stats_access(hldev,
- VXGE_HW_STATS_OP_READ,
- VXGE_HW_STATS_LOC_AGGR,
- ((offset + (104 * port)) >> 3), val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
- * Get the Statistics on port
- */
-static enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
- struct vxge_hw_xmac_port_stats *port_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = 0x0;
- val64 = (u64 *) port_stats;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
- status = vxge_hw_mrpcim_stats_access(hldev,
- VXGE_HW_STATS_OP_READ,
- VXGE_HW_STATS_LOC_AGGR,
- ((offset + (608 * port)) >> 3), val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
- * Get the XMAC Statistics
- */
-enum vxge_hw_status
-vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
- struct vxge_hw_xmac_stats *xmac_stats)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 i;
-
- status = vxge_hw_device_xmac_aggr_stats_get(hldev,
- 0, &xmac_stats->aggr_stats[0]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = vxge_hw_device_xmac_aggr_stats_get(hldev,
- 1, &xmac_stats->aggr_stats[1]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
-
- status = vxge_hw_device_xmac_port_stats_get(hldev,
- i, &xmac_stats->port_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(
- &hldev->virtual_paths[i],
- &xmac_stats->vpath_tx_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(
- &hldev->virtual_paths[i],
- &xmac_stats->vpath_rx_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_debug_set - Set the debug module, level and timestamp
- * This routine is used to dynamically change the debug output
- */
-void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
- enum vxge_debug_level level, u32 mask)
-{
- if (hldev == NULL)
- return;
-
-#if defined(VXGE_DEBUG_TRACE_MASK) || \
- defined(VXGE_DEBUG_ERR_MASK)
- hldev->debug_module_mask = mask;
- hldev->debug_level = level;
-#endif
-
-#if defined(VXGE_DEBUG_ERR_MASK)
- hldev->level_err = level & VXGE_ERR;
-#endif
-
-#if defined(VXGE_DEBUG_TRACE_MASK)
- hldev->level_trace = level & VXGE_TRACE;
-#endif
-}
-
-/*
- * vxge_hw_device_error_level_get - Get the error level
- * This routine returns the current error level set
- */
-u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_ERR_MASK)
- if (hldev == NULL)
- return VXGE_ERR;
- else
- return hldev->level_err;
-#else
- return 0;
-#endif
-}
-
-/*
- * vxge_hw_device_trace_level_get - Get the trace level
- * This routine returns the current trace level set
- */
-u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK)
- if (hldev == NULL)
- return VXGE_TRACE;
- else
- return hldev->level_trace;
-#else
- return 0;
-#endif
-}
-
-/*
- * vxge_hw_getpause_data -Pause frame frame generation and reception.
- * Returns the Pause frame generation and reception capability of the NIC.
- */
-enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
- u32 port, u32 *tx, u32 *rx)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
- status = VXGE_HW_ERR_INVALID_PORT;
- goto exit;
- }
-
- if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- goto exit;
- }
-
- val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
- if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
- *tx = 1;
- if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
- *rx = 1;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_setpause_data - set/reset pause frame generation.
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- */
-enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
- u32 port, u32 tx, u32 rx)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
- status = VXGE_HW_ERR_INVALID_PORT;
- goto exit;
- }
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
- if (tx)
- val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
- else
- val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
- if (rx)
- val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
- else
- val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
-
- writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
-exit:
- return status;
-}
-
-u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
-{
- struct pci_dev *dev = hldev->pdev;
- u16 lnk;
-
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
- return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
-}
-
-/*
- * __vxge_hw_ring_block_memblock_idx - Return the memblock index
- * This function returns the index of memory block
- */
-static inline u32
-__vxge_hw_ring_block_memblock_idx(u8 *block)
-{
- return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
-}
-
-/*
- * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
- * This function sets index to a memory block
- */
-static inline void
-__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
-{
- *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
-}
-
-/*
- * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
- * in RxD block
- * Sets the next block pointer in RxD block
- */
-static inline void
-__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
-{
- *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
-}
-
-/*
- * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
- * first block
- * Returns the dma address of the first RxD block
- */
-static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
-{
- struct vxge_hw_mempool_dma *dma_object;
-
- dma_object = ring->mempool->memblocks_dma_arr;
- vxge_assert(dma_object != NULL);
-
- return dma_object->addr;
-}
-
-/*
- * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
- * This function returns the dma address of a given item
- */
-static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
- void *item)
-{
- u32 memblock_idx;
- void *memblock;
- struct vxge_hw_mempool_dma *memblock_dma_object;
- ptrdiff_t dma_item_offset;
-
- /* get owner memblock index */
- memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
-
- /* get owner memblock by memblock index */
- memblock = mempoolh->memblocks_arr[memblock_idx];
-
- /* get memblock DMA object by memblock index */
- memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
-
- /* calculate offset in the memblock of this item */
- dma_item_offset = (u8 *)item - (u8 *)memblock;
-
- return memblock_dma_object->addr + dma_item_offset;
-}
-
-/*
- * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
- * This function returns the dma address of a given item
- */
-static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
- struct __vxge_hw_ring *ring, u32 from,
- u32 to)
-{
- u8 *to_item , *from_item;
- dma_addr_t to_dma;
-
- /* get "from" RxD block */
- from_item = mempoolh->items_arr[from];
- vxge_assert(from_item);
-
- /* get "to" RxD block */
- to_item = mempoolh->items_arr[to];
- vxge_assert(to_item);
-
- /* return address of the beginning of previous RxD block */
- to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
-
- /* set next pointer for this RxD block to point on
- * previous item's DMA start address */
- __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
-}
-
-/*
- * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
- * block callback
- * This function is callback passed to __vxge_hw_mempool_create to create memory
- * pool for RxD block
- */
-static void
-__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index, u32 is_last)
-{
- u32 i;
- void *item = mempoolh->items_arr[index];
- struct __vxge_hw_ring *ring =
- (struct __vxge_hw_ring *)mempoolh->userdata;
-
- /* format rxds array */
- for (i = 0; i < ring->rxds_per_block; i++) {
- void *rxdblock_priv;
- void *uld_priv;
- struct vxge_hw_ring_rxd_1 *rxdp;
-
- u32 reserve_index = ring->channel.reserve_ptr -
- (index * ring->rxds_per_block + i + 1);
- u32 memblock_item_idx;
-
- ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
- i * ring->rxd_size;
-
- /* Note: memblock_item_idx is index of the item within
- * the memblock. For instance, in case of three RxD-blocks
- * per memblock this value can be 0, 1 or 2. */
- rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
- memblock_index, item,
- &memblock_item_idx);
-
- rxdp = ring->channel.reserve_arr[reserve_index];
-
- uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
-
- /* pre-format Host_Control */
- rxdp->host_control = (u64)(size_t)uld_priv;
- }
-
- __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
-
- if (is_last) {
- /* link last one with first one */
- __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
- }
-
- if (index > 0) {
- /* link this RxD block with previous one */
- __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
- }
-}
-
-/*
- * __vxge_hw_ring_replenish - Initial replenish of RxDs
- * This function replenishes the RxDs from reserve array to work array
- */
-static enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
-{
- void *rxd;
- struct __vxge_hw_channel *channel;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- channel = &ring->channel;
-
- while (vxge_hw_channel_dtr_count(channel) > 0) {
-
- status = vxge_hw_ring_rxd_reserve(ring, &rxd);
-
- vxge_assert(status == VXGE_HW_OK);
-
- if (ring->rxd_init) {
- status = ring->rxd_init(rxd, channel->userdata);
- if (status != VXGE_HW_OK) {
- vxge_hw_ring_rxd_free(ring, rxd);
- goto exit;
- }
- }
-
- vxge_hw_ring_rxd_post(ring, rxd);
- }
- status = VXGE_HW_OK;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_channel_allocate - Allocate memory for channel
- * This function allocates required memory for the channel and various arrays
- * in the channel
- */
-static struct __vxge_hw_channel *
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type,
- u32 length, u32 per_dtr_space,
- void *userdata)
-{
- struct __vxge_hw_channel *channel;
- struct __vxge_hw_device *hldev;
- int size = 0;
- u32 vp_id;
-
- hldev = vph->vpath->hldev;
- vp_id = vph->vpath->vp_id;
-
- switch (type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- size = sizeof(struct __vxge_hw_fifo);
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- size = sizeof(struct __vxge_hw_ring);
- break;
- default:
- break;
- }
-
- channel = kzalloc(size, GFP_KERNEL);
- if (channel == NULL)
- goto exit0;
- INIT_LIST_HEAD(&channel->item);
-
- channel->common_reg = hldev->common_reg;
- channel->first_vp_id = hldev->first_vp_id;
- channel->type = type;
- channel->devh = hldev;
- channel->vph = vph;
- channel->userdata = userdata;
- channel->per_dtr_space = per_dtr_space;
- channel->length = length;
- channel->vp_id = vp_id;
-
- channel->work_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->work_arr == NULL)
- goto exit1;
-
- channel->free_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->free_arr == NULL)
- goto exit1;
- channel->free_ptr = length;
-
- channel->reserve_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->reserve_arr == NULL)
- goto exit1;
- channel->reserve_ptr = length;
- channel->reserve_top = 0;
-
- channel->orig_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->orig_arr == NULL)
- goto exit1;
-
- return channel;
-exit1:
- __vxge_hw_channel_free(channel);
-
-exit0:
- return NULL;
-}
-
-/*
- * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
- * Adds a block to block pool
- */
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
-{
- struct __vxge_hw_blockpool *blockpool;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- dma_addr_t dma_addr;
-
- blockpool = &devh->block_pool;
-
- if (block_addr == NULL) {
- blockpool->req_out--;
- goto exit;
- }
-
- dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length,
- DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) {
- vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
- blockpool->req_out--;
- goto exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry) {
- entry->length = length;
- entry->memblock = block_addr;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_h;
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- blockpool->req_out--;
-
-exit:
- return;
-}
-
-static inline void
-vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
-{
- void *vaddr;
-
- vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-/*
- * __vxge_hw_blockpool_blocks_add - Request additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
-{
- u32 nreq = 0, i;
-
- if ((blockpool->pool_size + blockpool->req_out) <
- VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
- nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
- blockpool->req_out += nreq;
- }
-
- for (i = 0; i < nreq; i++)
- vxge_os_dma_malloc_async(
- (blockpool->hldev)->pdev,
- blockpool->hldev, VXGE_HW_BLOCK_SIZE);
-}
-
-/*
- * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
- * Allocates a block of memory of given size, either from block pool
- * or by calling vxge_os_dma_malloc()
- */
-static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- void *memblock = NULL;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
-
- memblock = vxge_os_dma_malloc(devh->pdev, size,
- &dma_object->handle,
- &dma_object->acc_handle);
-
- if (!memblock)
- goto exit;
-
- dma_object->addr = dma_map_single(&devh->pdev->dev, memblock,
- size, DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) {
- vxge_os_dma_free(devh->pdev, memblock,
- &dma_object->acc_handle);
- memblock = NULL;
- goto exit;
- }
-
- } else {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- dma_object->addr = entry->dma_addr;
- dma_object->handle = entry->dma_handle;
- dma_object->acc_handle = entry->acc_handle;
- memblock = entry->memblock;
-
- list_add(&entry->item,
- &blockpool->free_entry_list);
- blockpool->pool_size--;
- }
-
- if (memblock != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
- }
-exit:
- return memblock;
-}
-
-/*
- * __vxge_hw_blockpool_blocks_remove - Free additional blocks
- */
-static void
-__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
-
- if (blockpool->pool_size < blockpool->pool_max)
- break;
-
- dma_unmap_single(&(blockpool->hldev)->pdev->dev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(
- (blockpool->hldev)->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
-
- list_add(p, &blockpool->free_entry_list);
-
- blockpool->pool_size--;
-
- }
-}
-
-/*
- * __vxge_hw_blockpool_free - Frees the memory allcoated with
- * __vxge_hw_blockpool_malloc
- */
-static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
- void *memblock, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
- dma_unmap_single(&devh->pdev->dev, dma_object->addr, size,
- DMA_BIDIRECTIONAL);
- vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
- } else {
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = vmalloc(sizeof(
- struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry != NULL) {
- entry->length = size;
- entry->memblock = memblock;
- entry->dma_addr = dma_object->addr;
- entry->acc_handle = dma_object->acc_handle;
- entry->dma_handle = dma_object->handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
- if (status == VXGE_HW_OK)
- __vxge_hw_blockpool_blocks_remove(blockpool);
- }
-}
-
-/*
- * vxge_hw_mempool_destroy
- */
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
-{
- u32 i, j;
- struct __vxge_hw_device *devh = mempool->devh;
-
- for (i = 0; i < mempool->memblocks_allocated; i++) {
- struct vxge_hw_mempool_dma *dma_object;
-
- vxge_assert(mempool->memblocks_arr[i]);
- vxge_assert(mempool->memblocks_dma_arr + i);
-
- dma_object = mempool->memblocks_dma_arr + i;
-
- for (j = 0; j < mempool->items_per_memblock; j++) {
- u32 index = i * mempool->items_per_memblock + j;
-
- /* to skip last partially filled(if any) memblock */
- if (index >= mempool->items_current)
- break;
- }
-
- vfree(mempool->memblocks_priv_arr[i]);
-
- __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
- mempool->memblock_size, dma_object);
- }
-
- vfree(mempool->items_arr);
- vfree(mempool->memblocks_dma_arr);
- vfree(mempool->memblocks_priv_arr);
- vfree(mempool->memblocks_arr);
- vfree(mempool);
-}
-
-/*
- * __vxge_hw_mempool_grow
- * Will resize mempool up to %num_allocate value.
- */
-static enum vxge_hw_status
-__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
- u32 *num_allocated)
-{
- u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
- u32 n_items = mempool->items_per_memblock;
- u32 start_block_idx = mempool->memblocks_allocated;
- u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- *num_allocated = 0;
-
- if (end_block_idx > mempool->memblocks_max) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- for (i = start_block_idx; i < end_block_idx; i++) {
- u32 j;
- u32 is_last = ((end_block_idx - 1) == i);
- struct vxge_hw_mempool_dma *dma_object =
- mempool->memblocks_dma_arr + i;
- void *the_memblock;
-
- /* allocate memblock's private part. Each DMA memblock
- * has a space allocated for item's private usage upon
- * mempool's user request. Each time mempool grows, it will
- * allocate new memblock and its private part at once.
- * This helps to minimize memory usage a lot. */
- mempool->memblocks_priv_arr[i] =
- vzalloc(array_size(mempool->items_priv_size, n_items));
- if (mempool->memblocks_priv_arr[i] == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- /* allocate DMA-capable memblock */
- mempool->memblocks_arr[i] =
- __vxge_hw_blockpool_malloc(mempool->devh,
- mempool->memblock_size, dma_object);
- if (mempool->memblocks_arr[i] == NULL) {
- vfree(mempool->memblocks_priv_arr[i]);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- (*num_allocated)++;
- mempool->memblocks_allocated++;
-
- memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
-
- the_memblock = mempool->memblocks_arr[i];
-
- /* fill the items hash array */
- for (j = 0; j < n_items; j++) {
- u32 index = i * n_items + j;
-
- if (first_time && index >= mempool->items_initial)
- break;
-
- mempool->items_arr[index] =
- ((char *)the_memblock + j*mempool->item_size);
-
- /* let caller to do more job on each item */
- if (mempool->item_func_alloc != NULL)
- mempool->item_func_alloc(mempool, i,
- dma_object, index, is_last);
-
- mempool->items_current = index + 1;
- }
-
- if (first_time && mempool->items_current ==
- mempool->items_initial)
- break;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_mempool_create
- * This function will create memory pool object. Pool may grow but will
- * never shrink. Pool consists of number of dynamically allocated blocks
- * with size enough to hold %items_initial number of items. Memory is
- * DMA-able but client must map/unmap before interoperating with the device.
- */
-static struct vxge_hw_mempool *
-__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 items_priv_size,
- u32 items_initial,
- u32 items_max,
- const struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 memblocks_to_allocate;
- struct vxge_hw_mempool *mempool = NULL;
- u32 allocated;
-
- if (memblock_size < item_size) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- mempool = vzalloc(sizeof(struct vxge_hw_mempool));
- if (mempool == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- mempool->devh = devh;
- mempool->memblock_size = memblock_size;
- mempool->items_max = items_max;
- mempool->items_initial = items_initial;
- mempool->item_size = item_size;
- mempool->items_priv_size = items_priv_size;
- mempool->item_func_alloc = mp_callback->item_func_alloc;
- mempool->userdata = userdata;
-
- mempool->memblocks_allocated = 0;
-
- mempool->items_per_memblock = memblock_size / item_size;
-
- mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
- mempool->items_per_memblock;
-
- /* allocate array of memblocks */
- mempool->memblocks_arr =
- vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
- if (mempool->memblocks_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate array of private parts of items per memblocks */
- mempool->memblocks_priv_arr =
- vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
- if (mempool->memblocks_priv_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate array of memblocks DMA objects */
- mempool->memblocks_dma_arr =
- vzalloc(array_size(sizeof(struct vxge_hw_mempool_dma),
- mempool->memblocks_max));
- if (mempool->memblocks_dma_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate hash array of items */
- mempool->items_arr = vzalloc(array_size(sizeof(void *),
- mempool->items_max));
- if (mempool->items_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* calculate initial number of memblocks */
- memblocks_to_allocate = (mempool->items_initial +
- mempool->items_per_memblock - 1) /
- mempool->items_per_memblock;
-
- /* pre-allocate the mempool */
- status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
- &allocated);
- if (status != VXGE_HW_OK) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
-exit:
- return mempool;
-}
-
-/*
- * __vxge_hw_ring_abort - Returns the RxD
- * This function terminates the RxDs of ring
- */
-static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
-{
- void *rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(channel, &rxdh);
-
- if (rxdh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(channel);
-
- if (ring->rxd_term)
- ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
- channel->userdata);
-
- vxge_hw_channel_dtr_free(channel, rxdh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_ring_reset - Resets the ring
- * This function resets the ring during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- __vxge_hw_ring_abort(ring);
-
- status = __vxge_hw_channel_reset(channel);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_ring_delete - Removes the ring
- * This function freeup the memory pool and removes the ring
- */
-static enum vxge_hw_status
-__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_ring *ring = vp->vpath->ringh;
-
- __vxge_hw_ring_abort(ring);
-
- if (ring->mempool)
- __vxge_hw_mempool_destroy(ring->mempool);
-
- vp->vpath->ringh = NULL;
- __vxge_hw_channel_free(&ring->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_ring_create - Create a Ring
- * This function creates Ring and initializes it.
- */
-static enum vxge_hw_status
-__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_ring_attr *attr)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_ring *ring;
- u32 ring_length;
- struct vxge_hw_ring_config *config;
- struct __vxge_hw_device *hldev;
- u32 vp_id;
- static const struct vxge_hw_mempool_cbs ring_mp_callback = {
- .item_func_alloc = __vxge_hw_ring_mempool_item_alloc,
- };
-
- if ((vp == NULL) || (attr == NULL)) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- hldev = vp->vpath->hldev;
- vp_id = vp->vpath->vp_id;
-
- config = &hldev->config.vp_config[vp_id].ring;
-
- ring_length = config->ring_blocks *
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
-
- ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_RING,
- ring_length,
- attr->per_rxd_space,
- attr->userdata);
- if (ring == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- vp->vpath->ringh = ring;
- ring->vp_id = vp_id;
- ring->vp_reg = vp->vpath->vp_reg;
- ring->common_reg = hldev->common_reg;
- ring->stats = &vp->vpath->sw_stats->ring_stats;
- ring->config = config;
- ring->callback = attr->callback;
- ring->rxd_init = attr->rxd_init;
- ring->rxd_term = attr->rxd_term;
- ring->buffer_mode = config->buffer_mode;
- ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
- ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
- ring->rxds_limit = config->rxds_limit;
-
- ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
- ring->rxd_priv_size =
- sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
- ring->per_rxd_space = attr->per_rxd_space;
-
- ring->rxd_priv_size =
- ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
- /* how many RxDs can fit into one block. Depends on configured
- * buffer_mode. */
- ring->rxds_per_block =
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
-
- /* calculate actual RxD block private size */
- ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
- ring->mempool = __vxge_hw_mempool_create(hldev,
- VXGE_HW_BLOCK_SIZE,
- VXGE_HW_BLOCK_SIZE,
- ring->rxdblock_priv_size,
- ring->config->ring_blocks,
- ring->config->ring_blocks,
- &ring_mp_callback,
- ring);
- if (ring->mempool == NULL) {
- __vxge_hw_ring_delete(vp);
- return VXGE_HW_ERR_OUT_OF_MEMORY;
- }
-
- status = __vxge_hw_channel_initialize(&ring->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
- }
-
- /* Note:
- * Specifying rxd_init callback means two things:
- * 1) rxds need to be initialized by driver at channel-open time;
- * 2) rxds need to be posted at channel-open time
- * (that's what the initial_replenish() below does)
- * Currently we don't have a case when the 1) is done without the 2).
- */
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
- }
- }
-
- /* initial replenish will increment the counter in its post() routine,
- * we have to reset it */
- ring->stats->common_stats.usage_cnt = 0;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_config_default_get - Initialize device config with defaults.
- * Initialize Titan device config with default values.
- */
-enum vxge_hw_status
-vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
-{
- u32 i;
-
- device_config->dma_blockpool_initial =
- VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
- device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
- device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
- device_config->rth_en = VXGE_HW_RTH_DEFAULT;
- device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
- device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
- device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- device_config->vp_config[i].vp_id = i;
-
- device_config->vp_config[i].min_bandwidth =
- VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
-
- device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
-
- device_config->vp_config[i].ring.ring_blocks =
- VXGE_HW_DEF_RING_BLOCKS;
-
- device_config->vp_config[i].ring.buffer_mode =
- VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
-
- device_config->vp_config[i].ring.scatter_mode =
- VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].ring.rxds_limit =
- VXGE_HW_DEF_RING_RXDS_LIMIT;
-
- device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
-
- device_config->vp_config[i].fifo.fifo_blocks =
- VXGE_HW_MIN_FIFO_BLOCKS;
-
- device_config->vp_config[i].fifo.max_frags =
- VXGE_HW_MAX_FIFO_FRAGS;
-
- device_config->vp_config[i].fifo.memblock_size =
- VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
-
- device_config->vp_config[i].fifo.alignment_size =
- VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
-
- device_config->vp_config[i].fifo.intr =
- VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
-
- device_config->vp_config[i].fifo.no_snoop_bits =
- VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
- device_config->vp_config[i].tti.intr_enable =
- VXGE_HW_TIM_INTR_DEFAULT;
-
- device_config->vp_config[i].tti.btimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ac_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ci_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ri_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.rtimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.util_sel =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.ltimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_d =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.intr_enable =
- VXGE_HW_TIM_INTR_DEFAULT;
-
- device_config->vp_config[i].rti.btimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ac_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ci_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ri_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.rtimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.util_sel =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.ltimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_d =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].mtu =
- VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
-
- device_config->vp_config[i].rpa_strip_vlan_tag =
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
- * Set the swapper bits appropriately for the vpath.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
-#ifndef __BIG_ENDIAN
- u64 val64;
-
- val64 = readq(&vpath_reg->vpath_general_cfg1);
- wmb();
- val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
- writeq(val64, &vpath_reg->vpath_general_cfg1);
- wmb();
-#endif
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
- * Set the swapper bits appropriately for the vpath.
- */
-static enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
- u64 val64;
-
- val64 = readq(&legacy_reg->pifm_wr_swap_en);
-
- if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
- val64 = readq(&vpath_reg->kdfcctl_cfg0);
- wmb();
-
- val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
- VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
- VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
-
- writeq(val64, &vpath_reg->kdfcctl_cfg0);
- wmb();
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_mgmt_reg_read - Read Titan register.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
- enum vxge_hw_mgmt_reg_type type,
- u32 index, u32 offset, u64 *value)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- switch (type) {
- case vxge_hw_mgmt_reg_type_legacy:
- if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->legacy_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_toc:
- if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->toc_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_common:
- if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->common_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_mrpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_srpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->srpcim_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpmgmt:
- if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpath:
- if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->vpath_reg[index] +
- offset);
- break;
- default:
- status = VXGE_HW_ERR_INVALID_TYPE;
- break;
- }
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
- */
-enum vxge_hw_status
-vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
-{
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- int i = 0, j = 0;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((vpath_mask) & vxge_mBIT(i)))
- continue;
- vpmgmt_reg = hldev->vpmgmt_reg[i];
- for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
- if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
- & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
- return VXGE_HW_FAIL;
- }
- }
- return VXGE_HW_OK;
-}
-/*
- * vxge_hw_mgmt_reg_Write - Write Titan register.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
- enum vxge_hw_mgmt_reg_type type,
- u32 index, u32 offset, u64 value)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- switch (type) {
- case vxge_hw_mgmt_reg_type_legacy:
- if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->legacy_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_toc:
- if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->toc_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_common:
- if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->common_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_mrpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_srpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
- offset);
-
- break;
- case vxge_hw_mgmt_reg_type_vpmgmt:
- if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpath:
- if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->vpath_reg[index] +
- offset);
- break;
- default:
- status = VXGE_HW_ERR_INVALID_TYPE;
- break;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
- void *txdlh;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
- if (txdlh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(&fifo->channel);
-
- if (fifo->txdl_term) {
- fifo->txdl_term(txdlh,
- VXGE_HW_TXDL_STATE_POSTED,
- fifo->channel.userdata);
- }
-
- vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_fifo_abort(fifo);
- status = __vxge_hw_channel_reset(&fifo->channel);
-
- return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-static enum vxge_hw_status
-__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
- __vxge_hw_fifo_abort(fifo);
-
- if (fifo->mempool)
- __vxge_hw_mempool_destroy(fifo->mempool);
-
- vp->vpath->fifoh = NULL;
-
- __vxge_hw_channel_free(&fifo->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
- * list callback
- * This function is callback passed to __vxge_hw_mempool_create to create memory
- * pool for TxD list
- */
-static void
-__vxge_hw_fifo_mempool_item_alloc(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
- u32 index, u32 is_last)
-{
- u32 memblock_item_idx;
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp =
- (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
- struct __vxge_hw_fifo *fifo =
- (struct __vxge_hw_fifo *)mempoolh->userdata;
- void *memblock = mempoolh->memblocks_arr[memblock_index];
-
- vxge_assert(txdp);
-
- txdp->host_control = (u64) (size_t)
- __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
- &memblock_item_idx);
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
-
- vxge_assert(txdl_priv);
-
- fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
-
- /* pre-format HW's TxDL's private */
- txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
- txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
- txdl_priv->dma_handle = dma_object->handle;
- txdl_priv->memblock = memblock;
- txdl_priv->first_txdp = txdp;
- txdl_priv->next_txdl_priv = NULL;
- txdl_priv->alloc_frags = 0;
-}
-
-/*
- * __vxge_hw_fifo_create - Create a FIFO
- * This function creates FIFO and initializes it.
- */
-static enum vxge_hw_status
-__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_fifo_attr *attr)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_fifo *fifo;
- struct vxge_hw_fifo_config *config;
- u32 txdl_size, txdl_per_memblock;
- struct vxge_hw_mempool_cbs fifo_mp_callback;
- struct __vxge_hw_virtualpath *vpath;
-
- if ((vp == NULL) || (attr == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
- vpath = vp->vpath;
- config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
-
- txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
-
- txdl_per_memblock = config->memblock_size / txdl_size;
-
- fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_FIFO,
- config->fifo_blocks * txdl_per_memblock,
- attr->per_txdl_space, attr->userdata);
-
- if (fifo == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- vpath->fifoh = fifo;
- fifo->nofl_db = vpath->nofl_db;
-
- fifo->vp_id = vpath->vp_id;
- fifo->vp_reg = vpath->vp_reg;
- fifo->stats = &vpath->sw_stats->fifo_stats;
-
- fifo->config = config;
-
- /* apply "interrupts per txdl" attribute */
- fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
- fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
- fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
-
- if (fifo->config->intr)
- fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
-
- fifo->no_snoop_bits = config->no_snoop_bits;
-
- /*
- * FIFO memory management strategy:
- *
- * TxDL split into three independent parts:
- * - set of TxD's
- * - TxD HW private part
- * - driver private part
- *
- * Adaptative memory allocation used. i.e. Memory allocated on
- * demand with the size which will fit into one memory block.
- * One memory block may contain more than one TxDL.
- *
- * During "reserve" operations more memory can be allocated on demand
- * for example due to FIFO full condition.
- *
- * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
- * routine which will essentially stop the channel and free resources.
- */
-
- /* TxDL common private size == TxDL private + driver private */
- fifo->priv_size =
- sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
- fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
- fifo->per_txdl_space = attr->per_txdl_space;
-
- /* recompute txdl size to be cacheline aligned */
- fifo->txdl_size = txdl_size;
- fifo->txdl_per_memblock = txdl_per_memblock;
-
- fifo->txdl_term = attr->txdl_term;
- fifo->callback = attr->callback;
-
- if (fifo->txdl_per_memblock == 0) {
- __vxge_hw_fifo_delete(vp);
- status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
- goto exit;
- }
-
- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
-
- fifo->mempool =
- __vxge_hw_mempool_create(vpath->hldev,
- fifo->config->memblock_size,
- fifo->txdl_size,
- fifo->priv_size,
- (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
- (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
- &fifo_mp_callback,
- fifo);
-
- if (fifo->mempool == NULL) {
- __vxge_hw_fifo_delete(vp);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- status = __vxge_hw_channel_initialize(&fifo->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_fifo_delete(vp);
- goto exit;
- }
-
- vxge_assert(fifo->channel.reserve_ptr);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_read - Read the content of given address
- * in pci config space.
- * Read from the vpath pci config space.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
- u32 phy_func_0, u32 offset, u32 *val)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
-
- if (phy_func_0)
- val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
-
- writeq(val64, &vp_reg->pci_config_access_cfg1);
- wmb();
- writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
- &vp_reg->pci_config_access_cfg2);
- wmb();
-
- status = __vxge_hw_device_register_poll(
- &vp_reg->pci_config_access_cfg2,
- VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->pci_config_access_status);
-
- if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
- status = VXGE_HW_FAIL;
- *val = 0;
- } else
- *val = (u32)vxge_bVALn(val64, 32, 32);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_device_flick_link_led - Flick (blink) link LED.
- * @hldev: HW device.
- * @on_off: TRUE if flickering to be on, FALSE to be off
- *
- * Flicker the link LED.
- */
-enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
-{
- struct __vxge_hw_virtualpath *vpath;
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- if (hldev == NULL) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- data0 = on_off;
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
- */
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
- u32 action, u32 rts_table, u32 offset,
- u64 *data0, u64 *data1)
-{
- enum vxge_hw_status status;
- u64 steer_ctrl = 0;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- if ((rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
- (rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
- (rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
- steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
- }
-
- status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
- data0, data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
- (rts_table !=
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
- *data1 = 0;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
- */
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
- u32 rts_table, u32 offset, u64 steer_data0,
- u64 steer_data1)
-{
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- data0 = steer_data0;
-
- if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
- data1 = steer_data1;
-
- status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
- &data0, &data1, &steer_ctrl);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
- */
-enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
- struct __vxge_hw_vpath_handle *vp,
- enum vxge_hw_rth_algoritms algorithm,
- struct vxge_hw_rth_hash_types *hash_type,
- u16 bucket_size)
-{
- u64 data0, data1;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
- 0, &data0, &data1);
- if (status != VXGE_HW_OK)
- goto exit;
-
- data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
-
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
-
- if (hash_type->hash_type_tcpipv4_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
-
- if (hash_type->hash_type_ipv4_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
-
- if (hash_type->hash_type_tcpipv6_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
-
- if (hash_type->hash_type_ipv6_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
-
- if (hash_type->hash_type_tcpipv6ex_en)
- data0 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
-
- if (hash_type->hash_type_ipv6ex_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
-
- if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
- data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
- else
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
- 0, data0, 0);
-exit:
- return status;
-}
-
-static void
-vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
- u16 flag, u8 *itable)
-{
- switch (flag) {
- case 1:
- *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 2:
- *data0 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 3:
- *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 4:
- *data1 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
- itable[j]);
- return;
- default:
- return;
- }
-}
-/*
- * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
- */
-enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
- struct __vxge_hw_vpath_handle **vpath_handles,
- u32 vpath_count,
- u8 *mtable,
- u8 *itable,
- u32 itable_size)
-{
- u32 i, j, action, rts_table;
- u64 data0;
- u64 data1;
- u32 max_entries;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- max_entries = (((u32)1) << itable_size);
-
- if (vp->vpath->hldev->config.rth_it_type
- == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
- rts_table =
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
-
- for (j = 0; j < max_entries; j++) {
-
- data1 = 0;
-
- data0 =
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
- itable[j]);
-
- status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
- action, rts_table, j, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- for (j = 0; j < max_entries; j++) {
-
- data1 = 0;
-
- data0 =
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
- itable[j]);
-
- status = __vxge_hw_vpath_rts_table_set(
- vpath_handles[mtable[itable[j]]], action,
- rts_table, j, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
- } else {
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
- rts_table =
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
- for (i = 0; i < vpath_count; i++) {
-
- for (j = 0; j < max_entries;) {
-
- data0 = 0;
- data1 = 0;
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 1, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 2, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 3, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 4, itable);
- j++;
- break;
- }
-
- if (data0 != 0) {
- status = __vxge_hw_vpath_rts_table_set(
- vpath_handles[i],
- action, rts_table,
- 0, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
- }
- }
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_check_leak - Check for memory leak
- * @ring: Handle to the ring object used for receive
- *
- * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
- * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
- * Returns: VXGE_HW_FAIL, if leak has occurred.
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u64 rxd_new_count, rxd_spat;
-
- if (ring == NULL)
- return status;
-
- rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
- rxd_spat = readq(&ring->vp_reg->prc_cfg6);
- rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
-
- if (rxd_new_count >= rxd_spat)
- status = VXGE_HW_FAIL;
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_mgmt_read
- * This routine reads the vpath_mgmt registers
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_mgmt_read(
- struct __vxge_hw_device *hldev,
- struct __vxge_hw_virtualpath *vpath)
-{
- u32 i, mtu = 0, max_pyld = 0;
- u64 val64;
-
- for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
-
- val64 = readq(&vpath->vpmgmt_reg->
- rxmac_cfg0_port_vpmgmt_clone[i]);
- max_pyld =
- (u32)
- VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
- (val64);
- if (mtu < max_pyld)
- mtu = max_pyld;
- }
-
- vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
-
- val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (val64 & vxge_mBIT(i))
- vpath->vsport_number = i;
- }
-
- val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
-
- if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
- VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
- else
- VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
- * This routine checks the vpath_rst_in_prog register to see if
- * adapter completed the reset process for the vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
-{
- enum vxge_hw_status status;
-
- status = __vxge_hw_device_register_poll(
- &vpath->hldev->common_reg->vpath_rst_in_prog,
- VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
- 1 << (16 - vpath->vp_id)),
- vpath->hldev->config.device_poll_millis);
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_reset
- * This routine resets the vpath on the device
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
-
- val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->cmn_rsthdlr_cfg0);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_sw_reset
- * This routine resets the vpath structures
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->ringh) {
- status = __vxge_hw_ring_reset(vpath->ringh);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- if (vpath->fifoh)
- status = __vxge_hw_fifo_reset(vpath->fifoh);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_prc_configure
- * This routine configures the prc registers of virtual path using the config
- * passed
- */
-static void
-__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- vp_config = vpath->vp_config;
-
- if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
- return;
-
- val64 = readq(&vp_reg->prc_cfg1);
- val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
- writeq(val64, &vp_reg->prc_cfg1);
-
- val64 = readq(&vpath->vp_reg->prc_cfg6);
- val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
- writeq(val64, &vpath->vp_reg->prc_cfg6);
-
- val64 = readq(&vp_reg->prc_cfg7);
-
- if (vpath->vp_config->ring.scatter_mode !=
- VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
-
- val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
-
- switch (vpath->vp_config->ring.scatter_mode) {
- case VXGE_HW_RING_SCATTER_MODE_A:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
- break;
- case VXGE_HW_RING_SCATTER_MODE_B:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
- break;
- case VXGE_HW_RING_SCATTER_MODE_C:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
- break;
- }
- }
-
- writeq(val64, &vp_reg->prc_cfg7);
-
- writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
- __vxge_hw_ring_first_block_address_get(
- vpath->ringh) >> 3), &vp_reg->prc_cfg5);
-
- val64 = readq(&vp_reg->prc_cfg4);
- val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
- val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
-
- val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
- VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
-
- if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
- val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
- else
- val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
-
- writeq(val64, &vp_reg->prc_cfg4);
-}
-
-/*
- * __vxge_hw_vpath_kdfc_configure
- * This routine configures the kdfc registers of virtual path using the
- * config passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- u64 vpath_stride;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
-
- vpath->max_kdfc_db =
- (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
- val64+1)/2;
-
- if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-
- vpath->max_nofl_db = vpath->max_kdfc_db;
-
- if (vpath->max_nofl_db <
- ((vpath->vp_config->fifo.memblock_size /
- (vpath->vp_config->fifo.max_frags *
- sizeof(struct vxge_hw_fifo_txd))) *
- vpath->vp_config->fifo.fifo_blocks)) {
-
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
- }
- val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
- (vpath->max_nofl_db*2)-1);
- }
-
- writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
-
- writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
- &vp_reg->kdfc_fifo_trpl_ctrl);
-
- val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
-
- val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
-
- val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
-#ifndef __BIG_ENDIAN
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
-#endif
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
-
- writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
- writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
- wmb();
- vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
-
- vpath->nofl_db =
- (struct __vxge_hw_non_offload_db_wrapper __iomem *)
- (hldev->kdfc + (vp_id *
- VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
- vpath_stride)));
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_mac_configure
- * This routine configures the mac of virtual path using the config passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- vp_config = vpath->vp_config;
-
- writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
- vpath->vsport_number), &vp_reg->xmac_vsport_choice);
-
- if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
-
- val64 = readq(&vp_reg->xmac_rpa_vcfg);
-
- if (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
- if (vp_config->rpa_strip_vlan_tag)
- val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
- else
- val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
- }
-
- writeq(val64, &vp_reg->xmac_rpa_vcfg);
- val64 = readq(&vp_reg->rxmac_vcfg0);
-
- if (vp_config->mtu !=
- VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- if ((vp_config->mtu +
- VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
- vp_config->mtu +
- VXGE_HW_MAC_HEADER_MAX_SIZE);
- else
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
- vpath->max_mtu);
- }
-
- writeq(val64, &vp_reg->rxmac_vcfg0);
-
- val64 = readq(&vp_reg->rxmac_vcfg1);
-
- val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
- VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
-
- if (hldev->config.rth_it_type ==
- VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
- val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
- 0x2) |
- VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
- }
-
- writeq(val64, &vp_reg->rxmac_vcfg1);
- }
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_tim_configure
- * This routine configures the tim registers of virtual path using the config
- * passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- config = vpath->vp_config;
-
- writeq(0, &vp_reg->tim_dest_addr);
- writeq(0, &vp_reg->tim_vpath_map);
- writeq(0, &vp_reg->tim_bitmap);
- writeq(0, &vp_reg->tim_remap);
-
- if (config->ring.enable == VXGE_HW_RING_ENABLE)
- writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
- (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
- VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
-
- val64 = readq(&vp_reg->tim_pci_cfg);
- val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
- writeq(val64, &vp_reg->tim_pci_cfg);
-
- if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- config->tti.btimer_val);
- }
-
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
-
- if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ac_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- }
-
- if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ci_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- }
-
- if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
- config->tti.urange_a);
- }
-
- if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
- config->tti.urange_b);
- }
-
- if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
- config->tti.urange_c);
- }
-
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- vpath->tim_tti_cfg1_saved = val64;
-
- val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
- config->tti.uec_a);
- }
-
- if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
- config->tti.uec_b);
- }
-
- if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
- config->tti.uec_c);
- }
-
- if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
- config->tti.uec_d);
- }
-
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ri_en)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- else
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- }
-
- if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- config->tti.rtimer_val);
- }
-
- if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
- }
-
- if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- config->tti.ltimer_val);
- }
-
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
- vpath->tim_tti_cfg3_saved = val64;
- }
-
- if (config->ring.enable == VXGE_HW_RING_ENABLE) {
-
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- config->rti.btimer_val);
- }
-
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
-
- if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ac_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- }
-
- if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ci_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- }
-
- if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
- config->rti.urange_a);
- }
-
- if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
- config->rti.urange_b);
- }
-
- if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
- config->rti.urange_c);
- }
-
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
- vpath->tim_rti_cfg1_saved = val64;
-
- val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
- config->rti.uec_a);
- }
-
- if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
- config->rti.uec_b);
- }
-
- if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
- config->rti.uec_c);
- }
-
- if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
- config->rti.uec_d);
- }
-
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
- val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ri_en)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- else
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- }
-
- if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- config->rti.rtimer_val);
- }
-
- if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
- }
-
- if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- config->rti.ltimer_val);
- }
-
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
- vpath->tim_rti_cfg3_saved = val64;
- }
-
- val64 = 0;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
-
- val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
- val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
- val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
- writeq(val64, &vp_reg->tim_wrkld_clc);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_initialize
- * This routine is the final phase of init which initializes the
- * registers of the vpath using the configuration passed.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- u32 val32;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
- status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
-
- /* Get MRRS value from device control */
- status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
- if (status == VXGE_HW_OK) {
- val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
- val64 &=
- ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
- val64 |=
- VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
-
- val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
- }
-
- val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
- val64 |=
- VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
- VXGE_HW_MAX_PAYLOAD_SIZE_512);
-
- val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
- writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vp_terminate - Terminate Virtual Path structure
- * This routine closes all channels it opened and freeup memory
- */
-static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
- goto exit;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
- vpath->hldev->tim_int_mask1, vpath->vp_id);
- hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
-
- /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
- * work after the interface is brought down.
- */
- spin_lock(&vpath->lock);
- vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
- spin_unlock(&vpath->lock);
-
- vpath->vpmgmt_reg = NULL;
- vpath->nofl_db = NULL;
- vpath->max_mtu = 0;
- vpath->vsport_number = 0;
- vpath->max_kdfc_db = 0;
- vpath->max_nofl_db = 0;
- vpath->ringh = NULL;
- vpath->fifoh = NULL;
- memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
- vpath->stats_block = NULL;
- vpath->hw_stats = NULL;
- vpath->hw_stats_sav = NULL;
- vpath->sw_stats = NULL;
-
-exit:
- return;
-}
-
-/*
- * __vxge_hw_vp_initialize - Initialize Virtual Path structure
- * This routine is the initial phase of init which resets the vpath and
- * initializes the software support structures.
- */
-static enum vxge_hw_status
-__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
- struct vxge_hw_vp_config *config)
-{
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
- status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
- goto exit;
- }
-
- vpath = &hldev->virtual_paths[vp_id];
-
- spin_lock_init(&vpath->lock);
- vpath->vp_id = vp_id;
- vpath->vp_open = VXGE_HW_VP_OPEN;
- vpath->hldev = hldev;
- vpath->vp_config = config;
- vpath->vp_reg = hldev->vpath_reg[vp_id];
- vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
-
- __vxge_hw_vpath_reset(hldev, vp_id);
-
- status = __vxge_hw_vpath_reset_check(vpath);
- if (status != VXGE_HW_OK) {
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
- goto exit;
- }
-
- status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
- if (status != VXGE_HW_OK) {
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
- goto exit;
- }
-
- INIT_LIST_HEAD(&vpath->vpath_handles);
-
- vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
- hldev->tim_int_mask1, vp_id);
-
- status = __vxge_hw_vpath_initialize(hldev, vp_id);
- if (status != VXGE_HW_OK)
- __vxge_hw_vp_terminate(hldev, vp_id);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_mtu_set - Set MTU.
- * Set new MTU value. Example, to use jumbo frames:
- * vxge_hw_vpath_mtu_set(my_device, 9600);
- */
-enum vxge_hw_status
-vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
- vpath = vp->vpath;
-
- new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
-
- if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
- status = VXGE_HW_ERR_INVALID_MTU_SIZE;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-
- vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
- * Enable the DMA vpath statistics. The function is to be called to re-enable
- * the adapter to update stats into the host memory
- */
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- memcpy(vpath->hw_stats_sav, vpath->hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
- * This function allocates a block from block pool or from the system
- */
-static struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (size == blockpool->block_size) {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- blockpool->pool_size--;
- }
- }
-
- if (entry != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
-
- return entry;
-}
-
-/*
- * vxge_hw_vpath_open - Open a virtual path on a given adapter
- * This function is used to open access to virtual path of an
- * adapter for offload, GRO operations. This function returns
- * synchronously.
- */
-enum vxge_hw_status
-vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
- struct vxge_hw_vpath_attr *attr,
- struct __vxge_hw_vpath_handle **vpath_handle)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct __vxge_hw_vpath_handle *vp;
- enum vxge_hw_status status;
-
- vpath = &hldev->virtual_paths[attr->vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_OPEN) {
- status = VXGE_HW_ERR_INVALID_STATE;
- goto vpath_open_exit1;
- }
-
- status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
- &hldev->config.vp_config[attr->vp_id]);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit1;
-
- vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
- if (vp == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto vpath_open_exit2;
- }
-
- vp->vpath = vpath;
-
- if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
- status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit6;
- }
-
- if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
- status = __vxge_hw_ring_create(vp, &attr->ring_attr);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit7;
-
- __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
- }
-
- vpath->fifoh->tx_intr_num =
- (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
- VXGE_HW_VPATH_INTR_TX;
-
- vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
- VXGE_HW_BLOCK_SIZE);
- if (vpath->stats_block == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto vpath_open_exit8;
- }
-
- vpath->hw_stats = vpath->stats_block->memblock;
- memset(vpath->hw_stats, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
- vpath->hw_stats;
-
- vpath->hw_stats_sav =
- &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
- memset(vpath->hw_stats_sav, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
-
- status = vxge_hw_vpath_stats_enable(vp);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit8;
-
- list_add(&vp->item, &vpath->vpath_handles);
-
- hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
-
- *vpath_handle = vp;
-
- attr->fifo_attr.userdata = vpath->fifoh;
- attr->ring_attr.userdata = vpath->ringh;
-
- return VXGE_HW_OK;
-
-vpath_open_exit8:
- if (vpath->ringh != NULL)
- __vxge_hw_ring_delete(vp);
-vpath_open_exit7:
- if (vpath->fifoh != NULL)
- __vxge_hw_fifo_delete(vp);
-vpath_open_exit6:
- vfree(vp);
-vpath_open_exit2:
- __vxge_hw_vp_terminate(hldev, attr->vp_id);
-vpath_open_exit1:
-
- return status;
-}
-
-/**
- * vxge_hw_vpath_rx_doorbell_init - Close the handle got from previous vpath
- * (vpath) open
- * @vp: Handle got from previous vpath open
- *
- * This function is used to close access to virtual path opened
- * earlier.
- */
-void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
- struct __vxge_hw_ring *ring = vpath->ringh;
- struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
- u64 new_count, val64, val164;
-
- if (vdev->titan1) {
- new_count = readq(&vpath->vp_reg->rxdmem_size);
- new_count &= 0x1fff;
- } else
- new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
-
- val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
-
- writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
- &vpath->vp_reg->prc_rxd_doorbell);
- readl(&vpath->vp_reg->prc_rxd_doorbell);
-
- val164 /= 2;
- val64 = readq(&vpath->vp_reg->prc_cfg6);
- val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
- val64 &= 0x1ff;
-
- /*
- * Each RxD is of 4 qwords
- */
- new_count -= (val64 + 1);
- val64 = min(val164, new_count) / 4;
-
- ring->rxds_limit = min(ring->rxds_limit, val64);
- if (ring->rxds_limit < 4)
- ring->rxds_limit = 4;
-}
-
-/*
- * __vxge_hw_blockpool_block_free - Frees a block from block pool
- * @devh: Hal device
- * @entry: Entry of block to be freed
- *
- * This function frees a block from block pool
- */
-static void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
- struct __vxge_hw_blockpool_entry *entry)
-{
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (entry->length == blockpool->block_size) {
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- __vxge_hw_blockpool_blocks_remove(blockpool);
-}
-
-/*
- * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
- * This function is used to close access to virtual path opened
- * earlier.
- */
-enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = NULL;
- struct __vxge_hw_device *devh = NULL;
- u32 vp_id = vp->vpath->vp_id;
- u32 is_empty = TRUE;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- vpath = vp->vpath;
- devh = vpath->hldev;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_close_exit;
- }
-
- list_del(&vp->item);
-
- if (!list_empty(&vpath->vpath_handles)) {
- list_add(&vp->item, &vpath->vpath_handles);
- is_empty = FALSE;
- }
-
- if (!is_empty) {
- status = VXGE_HW_FAIL;
- goto vpath_close_exit;
- }
-
- devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
-
- if (vpath->ringh != NULL)
- __vxge_hw_ring_delete(vp);
-
- if (vpath->fifoh != NULL)
- __vxge_hw_fifo_delete(vp);
-
- if (vpath->stats_block != NULL)
- __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
-
- vfree(vp);
-
- __vxge_hw_vp_terminate(devh, vp_id);
-
-vpath_close_exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_reset - Resets vpath
- * This function is used to request a reset of vpath
- */
-enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status;
- u32 vp_id;
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
-
- vp_id = vpath->vp_id;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
- if (status == VXGE_HW_OK)
- vpath->sw_stats->soft_reset_cnt++;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
- * This function poll's for the vpath reset completion and re initializes
- * the vpath.
- */
-enum vxge_hw_status
-vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = NULL;
- enum vxge_hw_status status;
- struct __vxge_hw_device *hldev;
- u32 vp_id;
-
- vp_id = vp->vpath->vp_id;
- vpath = vp->vpath;
- hldev = vpath->hldev;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- status = __vxge_hw_vpath_reset_check(vpath);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_initialize(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- if (vpath->ringh != NULL)
- __vxge_hw_vpath_prc_configure(hldev, vp_id);
-
- memset(vpath->hw_stats, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- memset(vpath->hw_stats_sav, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- writeq(vpath->stats_block->dma_addr,
- &vpath->vp_reg->stats_cfg);
-
- status = vxge_hw_vpath_stats_enable(vp);
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_enable - Enable vpath.
- * This routine clears the vpath reset thereby enabling a vpath
- * to start forwarding frames and generating interrupts.
- */
-void
-vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_device *hldev;
- u64 val64;
-
- hldev = vp->vpath->hldev;
-
- val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
- 1 << (16 - vp->vpath->vp_id));
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->cmn_rsthdlr_cfg1);
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
deleted file mode 100644
index 0cd0750484ae..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ /dev/null
@@ -1,2086 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_CONFIG_H
-#define VXGE_CONFIG_H
-#include <linux/hardirq.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-
-#ifndef VXGE_CACHE_LINE_SIZE
-#define VXGE_CACHE_LINE_SIZE 128
-#endif
-
-#ifndef VXGE_ALIGN
-#define VXGE_ALIGN(adrs, size) \
- (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
-#endif
-
-#define VXGE_HW_MIN_MTU ETH_MIN_MTU
-#define VXGE_HW_MAX_MTU 9600
-#define VXGE_HW_DEFAULT_MTU 1500
-
-#define VXGE_HW_MAX_ROM_IMAGES 8
-
-struct eprom_image {
- u8 is_valid:1;
- u8 index;
- u8 type;
- u16 version;
-};
-
-#ifdef VXGE_DEBUG_ASSERT
-/**
- * vxge_assert
- * @test: C-condition to check
- * @fmt: printf like format string
- *
- * This function implements traditional assert. By default assertions
- * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
- * compilation
- * time.
- */
-#define vxge_assert(test) BUG_ON(!(test))
-#else
-#define vxge_assert(test)
-#endif /* end of VXGE_DEBUG_ASSERT */
-
-/**
- * enum vxge_debug_level
- * @VXGE_NONE: debug disabled
- * @VXGE_ERR: all errors going to be logged out
- * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
- * going to be logged out. Very noisy.
- *
- * This enumeration going to be used to switch between different
- * debug levels during runtime if DEBUG macro defined during
- * compilation. If DEBUG macro not defined than code will be
- * compiled out.
- */
-enum vxge_debug_level {
- VXGE_NONE = 0,
- VXGE_TRACE = 1,
- VXGE_ERR = 2
-};
-
-#define NULL_VPID 0xFFFFFFFF
-#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
-#define VXGE_DEBUG_MODULE_MASK 0xffffffff
-#define VXGE_DEBUG_TRACE_MASK 0xffffffff
-#define VXGE_DEBUG_ERR_MASK 0xffffffff
-#define VXGE_DEBUG_MASK 0x000001ff
-#else
-#define VXGE_DEBUG_MODULE_MASK 0x20000000
-#define VXGE_DEBUG_TRACE_MASK 0x20000000
-#define VXGE_DEBUG_ERR_MASK 0x20000000
-#define VXGE_DEBUG_MASK 0x00000001
-#endif
-
-/*
- * @VXGE_COMPONENT_LL: do debug for vxge link layer module
- * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
- *
- * This enumeration going to be used to distinguish modules
- * or libraries during compilation and runtime. Makefile must declare
- * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
- */
-#define VXGE_COMPONENT_LL 0x20000000
-#define VXGE_COMPONENT_ALL 0xffffffff
-
-#define VXGE_HW_BASE_INF 100
-#define VXGE_HW_BASE_ERR 200
-#define VXGE_HW_BASE_BADCFG 300
-
-enum vxge_hw_status {
- VXGE_HW_OK = 0,
- VXGE_HW_FAIL = 1,
- VXGE_HW_PENDING = 2,
- VXGE_HW_COMPLETIONS_REMAIN = 3,
-
- VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
- VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
-
- VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
- VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
- VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
- VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
- VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
- VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
- VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
- VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
- VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
- VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
- VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
- VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
- VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
- VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
- VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
- VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
- VXGE_HW_ERR_PRIVILEGED_OPERATION = VXGE_HW_BASE_ERR + 17,
- VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
- VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
- VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
- VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
- VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
-
- VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
- VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
- VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
- VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
- VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
- VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
- VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
-
- VXGE_HW_EOF_TRACE_BUF = -1
-};
-
-/**
- * enum enum vxge_hw_device_link_state - Link state enumeration.
- * @VXGE_HW_LINK_NONE: Invalid link state.
- * @VXGE_HW_LINK_DOWN: Link is down.
- * @VXGE_HW_LINK_UP: Link is up.
- *
- */
-enum vxge_hw_device_link_state {
- VXGE_HW_LINK_NONE,
- VXGE_HW_LINK_DOWN,
- VXGE_HW_LINK_UP
-};
-
-/**
- * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
- * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
- * @VXGE_HW_FW_UPGRADE_DONE: upload completed
- * @VXGE_HW_FW_UPGRADE_ERR: upload error
- * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
- *
- */
-enum vxge_hw_fw_upgrade_code {
- VXGE_HW_FW_UPGRADE_OK = 0,
- VXGE_HW_FW_UPGRADE_DONE = 1,
- VXGE_HW_FW_UPGRADE_ERR = 2,
- VXGE_FW_UPGRADE_BYTES2SKIP = 3
-};
-
-/**
- * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
- * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
- * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
- * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
- */
-enum vxge_hw_fw_upgrade_err_code {
- VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
- VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
- VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
- VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
- VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
-};
-
-/**
- * struct vxge_hw_device_date - Date Format
- * @day: Day
- * @month: Month
- * @year: Year
- * @date: Date in string format
- *
- * Structure for returning date
- */
-
-#define VXGE_HW_FW_STRLEN 32
-struct vxge_hw_device_date {
- u32 day;
- u32 month;
- u32 year;
- char date[VXGE_HW_FW_STRLEN];
-};
-
-struct vxge_hw_device_version {
- u32 major;
- u32 minor;
- u32 build;
- char version[VXGE_HW_FW_STRLEN];
-};
-
-/**
- * struct vxge_hw_fifo_config - Configuration of fifo.
- * @enable: Is this fifo to be commissioned
- * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
- * blocks per queue.
- * @max_frags: Max number of Tx buffers per TxDL (that is, per single
- * transmit operation).
- * No more than 256 transmit buffers can be specified.
- * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
- * bytes. Setting @memblock_size to page size ensures
- * by-page allocation of descriptors. 128K bytes is the
- * maximum supported block size.
- * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
- * (e.g., to align on a cache line).
- * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
- * Use 0 otherwise.
- * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
- * which generally improves latency of the host bridge operation
- * (see PCI specification). For valid values please refer
- * to struct vxge_hw_fifo_config{} in the driver sources.
- * Configuration of all Titan fifos.
- * Note: Valid (min, max) range for each attribute is specified in the body of
- * the struct vxge_hw_fifo_config{} structure.
- */
-struct vxge_hw_fifo_config {
- u32 enable;
-#define VXGE_HW_FIFO_ENABLE 1
-#define VXGE_HW_FIFO_DISABLE 0
-
- u32 fifo_blocks;
-#define VXGE_HW_MIN_FIFO_BLOCKS 2
-#define VXGE_HW_MAX_FIFO_BLOCKS 128
-
- u32 max_frags;
-#define VXGE_HW_MIN_FIFO_FRAGS 1
-#define VXGE_HW_MAX_FIFO_FRAGS 256
-
- u32 memblock_size;
-#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
-#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
-#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
-
- u32 alignment_size;
-#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
-#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
-#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
-
- u32 intr;
-#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
-#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
-#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
-
- u32 no_snoop_bits;
-#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
-#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
-#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
-#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
-#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
-
-};
-/**
- * struct vxge_hw_ring_config - Ring configurations.
- * @enable: Is this ring to be commissioned
- * @ring_blocks: Numbers of RxD blocks in the ring
- * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
- * to Titan User Guide.
- * @scatter_mode: Titan supports two receive scatter modes: A and B.
- * For details please refer to Titan User Guide.
- * @rx_timer_val: The number of 32ns periods that would be counted between two
- * timer interrupts.
- * @greedy_return: If Set it forces the device to return absolutely all RxD
- * that are consumed and still on board when a timer interrupt
- * triggers. If Clear, then if the device has already returned
- * RxD before current timer interrupt triggered and after the
- * previous timer interrupt triggered, then the device is not
- * forced to returned the rest of the consumed RxD that it has
- * on board which account for a byte count less than the one
- * programmed into PRC_CFG6.RXD_CRXDT field
- * @rx_timer_ci: TBD
- * @backoff_interval_us: Time (in microseconds), after which Titan
- * tries to download RxDs posted by the host.
- * Note that the "backoff" does not happen if host posts receive
- * descriptors in the timely fashion.
- * Ring configuration.
- */
-struct vxge_hw_ring_config {
- u32 enable;
-#define VXGE_HW_RING_ENABLE 1
-#define VXGE_HW_RING_DISABLE 0
-#define VXGE_HW_RING_DEFAULT 1
-
- u32 ring_blocks;
-#define VXGE_HW_MIN_RING_BLOCKS 1
-#define VXGE_HW_MAX_RING_BLOCKS 128
-#define VXGE_HW_DEF_RING_BLOCKS 2
-
- u32 buffer_mode;
-#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
-#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
-#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
-#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
-
- u32 scatter_mode;
-#define VXGE_HW_RING_SCATTER_MODE_A 0
-#define VXGE_HW_RING_SCATTER_MODE_B 1
-#define VXGE_HW_RING_SCATTER_MODE_C 2
-#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
-
- u64 rxds_limit;
-#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
-};
-
-/**
- * struct vxge_hw_vp_config - Configuration of virtual path
- * @vp_id: Virtual Path Id
- * @min_bandwidth: Minimum Guaranteed bandwidth
- * @ring: See struct vxge_hw_ring_config{}.
- * @fifo: See struct vxge_hw_fifo_config{}.
- * @tti: Configuration of interrupt associated with Transmit.
- * see struct vxge_hw_tim_intr_config();
- * @rti: Configuration of interrupt associated with Receive.
- * see struct vxge_hw_tim_intr_config();
- * @mtu: mtu size used on this port.
- * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
- * remove the VLAN tag from all received tagged frames that are not
- * replicated at the internal L2 switch.
- * 0 - Do not strip the VLAN tag.
- * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
- * always placed into the RxDMA descriptor.
- *
- * This structure is used by the driver to pass the configuration parameters to
- * configure Virtual Path.
- */
-struct vxge_hw_vp_config {
- u32 vp_id;
-
-#define VXGE_HW_VPATH_PRIORITY_MIN 0
-#define VXGE_HW_VPATH_PRIORITY_MAX 16
-#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
-
- u32 min_bandwidth;
-#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
-#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
-#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
-
- struct vxge_hw_ring_config ring;
- struct vxge_hw_fifo_config fifo;
- struct vxge_hw_tim_intr_config tti;
- struct vxge_hw_tim_intr_config rti;
-
- u32 mtu;
-#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
-#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
-#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
-
- u32 rpa_strip_vlan_tag;
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
-
-};
-/**
- * struct vxge_hw_device_config - Device configuration.
- * @dma_blockpool_initial: Initial size of DMA Pool
- * @dma_blockpool_max: Maximum blocks in DMA pool
- * @intr_mode: Line, or MSI-X interrupt.
- *
- * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
- * @rth_it_type: RTH IT table programming type
- * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
- * @vp_config: Configuration for virtual paths
- * @device_poll_millis: Specify the interval (in mulliseconds)
- * to wait for register reads
- *
- * Titan configuration.
- * Contains per-device configuration parameters, including:
- * - stats sampling interval, etc.
- *
- * In addition, struct vxge_hw_device_config{} includes "subordinate"
- * configurations, including:
- * - fifos and rings;
- * - MAC (done at firmware level).
- *
- * See Titan User Guide for more details.
- * Note: Valid (min, max) range for each attribute is specified in the body of
- * the struct vxge_hw_device_config{} structure. Please refer to the
- * corresponding include file.
- * See also: struct vxge_hw_tim_intr_config{}.
- */
-struct vxge_hw_device_config {
- u32 device_poll_millis;
-#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
-#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
-#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
-
- u32 dma_blockpool_initial;
- u32 dma_blockpool_max;
-#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
-#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
-
-#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
-
- u32 intr_mode:2,
-#define VXGE_HW_INTR_MODE_IRQLINE 0
-#define VXGE_HW_INTR_MODE_MSIX 1
-#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
-
-#define VXGE_HW_INTR_MODE_DEF 0
-
- rth_en:1,
-#define VXGE_HW_RTH_DISABLE 0
-#define VXGE_HW_RTH_ENABLE 1
-#define VXGE_HW_RTH_DEFAULT 0
-
- rth_it_type:1,
-#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
-#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
-#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
-
- rts_mac_en:1,
-#define VXGE_HW_RTS_MAC_DISABLE 0
-#define VXGE_HW_RTS_MAC_ENABLE 1
-#define VXGE_HW_RTS_MAC_DEFAULT 0
-
- hwts_en:1;
-#define VXGE_HW_HWTS_DISABLE 0
-#define VXGE_HW_HWTS_ENABLE 1
-#define VXGE_HW_HWTS_DEFAULT 1
-
- struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * function vxge_uld_link_up_f - Link-Up callback provided by driver.
- * @devh: HW device handle.
- * Link-up notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * function vxge_uld_link_down_f - Link-Down callback provided by
- * driver.
- * @devh: HW device handle.
- *
- * Link-Down notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * function vxge_uld_crit_err_f - Critical Error notification callback.
- * @devh: HW device handle.
- * (typically - at HW device iinitialization time).
- * @type: Enumerated hw error, e.g.: double ECC.
- * @serr_data: Titan status.
- * @ext_data: Extended data. The contents depends on the @type.
- *
- * Link-Down notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
- * @link_up: See vxge_uld_link_up_f{}.
- * @link_down: See vxge_uld_link_down_f{}.
- * @crit_err: See vxge_uld_crit_err_f{}.
- *
- * Driver slow-path (per-driver) callbacks.
- * Implemented by driver and provided to HW via
- * vxge_hw_driver_initialize().
- * Note that these callbacks are not mandatory: HW will not invoke
- * a callback if NULL is specified.
- *
- * See also: vxge_hw_driver_initialize().
- */
-struct vxge_hw_uld_cbs {
- void (*link_up)(struct __vxge_hw_device *devh);
- void (*link_down)(struct __vxge_hw_device *devh);
- void (*crit_err)(struct __vxge_hw_device *devh,
- enum vxge_hw_event type, u64 ext_data);
-};
-
-/*
- * struct __vxge_hw_blockpool_entry - Block private data structure
- * @item: List header used to link.
- * @length: Length of the block
- * @memblock: Virtual address block
- * @dma_addr: DMA Address of the block.
- * @dma_handle: DMA handle of the block.
- * @acc_handle: DMA acc handle
- *
- * Block is allocated with a header to put the blocks into list.
- *
- */
-struct __vxge_hw_blockpool_entry {
- struct list_head item;
- u32 length;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
-};
-
-/*
- * struct __vxge_hw_blockpool - Block Pool
- * @hldev: HW device
- * @block_size: size of each block.
- * @Pool_size: Number of blocks in the pool
- * @pool_max: Maximum number of blocks above which to free additional blocks
- * @req_out: Number of block requests with OS out standing
- * @free_block_list: List of free blocks
- *
- * Block pool contains the DMA blocks preallocated.
- *
- */
-struct __vxge_hw_blockpool {
- struct __vxge_hw_device *hldev;
- u32 block_size;
- u32 pool_size;
- u32 pool_max;
- u32 req_out;
- struct list_head free_block_list;
- struct list_head free_entry_list;
-};
-
-/*
- * enum enum __vxge_hw_channel_type - Enumerated channel types.
- * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
- * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
- * @VXGE_HW_CHANNEL_TYPE_RING: ring.
- * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
- * (and recognized) channel types. Currently: 2.
- *
- * Enumerated channel types. Currently there are only two link-layer
- * channels - Titan fifo and Titan ring. In the future the list will grow.
- */
-enum __vxge_hw_channel_type {
- VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
- VXGE_HW_CHANNEL_TYPE_FIFO = 1,
- VXGE_HW_CHANNEL_TYPE_RING = 2,
- VXGE_HW_CHANNEL_TYPE_MAX = 3
-};
-
-/*
- * struct __vxge_hw_channel
- * @item: List item; used to maintain a list of open channels.
- * @type: Channel type. See enum vxge_hw_channel_type{}.
- * @devh: Device handle. HW device object that contains _this_ channel.
- * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
- * @length: Channel length. Currently allocated number of descriptors.
- * The channel length "grows" when more descriptors get allocated.
- * See _hw_mempool_grow.
- * @reserve_arr: Reserve array. Contains descriptors that can be reserved
- * by driver for the subsequent send or receive operation.
- * See vxge_hw_fifo_txdl_reserve(),
- * vxge_hw_ring_rxd_reserve().
- * @reserve_ptr: Current pointer in the resrve array
- * @reserve_top: Reserve top gives the maximum number of dtrs available in
- * reserve array.
- * @work_arr: Work array. Contains descriptors posted to the channel.
- * Note that at any point in time @work_arr contains 3 types of
- * descriptors:
- * 1) posted but not yet consumed by Titan device;
- * 2) consumed but not yet completed;
- * 3) completed but not yet freed
- * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
- * @post_index: Post index. At any point in time points on the
- * position in the channel, which'll contain next to-be-posted
- * descriptor.
- * @compl_index: Completion index. At any point in time points on the
- * position in the channel, which will contain next
- * to-be-completed descriptor.
- * @free_arr: Free array. Contains completed descriptors that were freed
- * (i.e., handed over back to HW) by driver.
- * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
- * @free_ptr: current pointer in free array
- * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
- * to store per-operation control information.
- * @stats: Pointer to common statistics
- * @userdata: Per-channel opaque (void*) user-defined context, which may be
- * driver object, ULP connection, etc.
- * Once channel is open, @userdata is passed back to user via
- * vxge_hw_channel_callback_f.
- *
- * HW channel object.
- *
- * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
- */
-struct __vxge_hw_channel {
- struct list_head item;
- enum __vxge_hw_channel_type type;
- struct __vxge_hw_device *devh;
- struct __vxge_hw_vpath_handle *vph;
- u32 length;
- u32 vp_id;
- void **reserve_arr;
- u32 reserve_ptr;
- u32 reserve_top;
- void **work_arr;
- u32 post_index ____cacheline_aligned;
- u32 compl_index ____cacheline_aligned;
- void **free_arr;
- u32 free_ptr;
- void **orig_arr;
- u32 per_dtr_space;
- void *userdata;
- struct vxge_hw_common_reg __iomem *common_reg;
- u32 first_vp_id;
- struct vxge_hw_vpath_stats_sw_common_info *stats;
-
-} ____cacheline_aligned;
-
-/*
- * struct __vxge_hw_virtualpath - Virtual Path
- *
- * @vp_id: Virtual path id
- * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
- * @hldev: Hal device
- * @vp_config: Virtual Path Config
- * @vp_reg: VPATH Register map address in BAR0
- * @vpmgmt_reg: VPATH_MGMT register map address
- * @max_mtu: Max mtu that can be supported
- * @vsport_number: vsport attached to this vpath
- * @max_kdfc_db: Maximum kernel mode doorbells
- * @max_nofl_db: Maximum non offload doorbells
- * @tx_intr_num: Interrupt Number associated with the TX
-
- * @ringh: Ring Queue
- * @fifoh: FIFO Queue
- * @vpath_handles: Virtual Path handles list
- * @stats_block: Memory for DMAing stats
- * @stats: Vpath statistics
- *
- * Virtual path structure to encapsulate the data related to a virtual path.
- * Virtual paths are allocated by the HW upon getting configuration from the
- * driver and inserted into the list of virtual paths.
- */
-struct __vxge_hw_virtualpath {
- u32 vp_id;
-
- u32 vp_open;
-#define VXGE_HW_VP_NOT_OPEN 0
-#define VXGE_HW_VP_OPEN 1
-
- struct __vxge_hw_device *hldev;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
-
- u32 max_mtu;
- u32 vsport_number;
- u32 max_kdfc_db;
- u32 max_nofl_db;
- u64 tim_tti_cfg1_saved;
- u64 tim_tti_cfg3_saved;
- u64 tim_rti_cfg1_saved;
- u64 tim_rti_cfg3_saved;
-
- struct __vxge_hw_ring *____cacheline_aligned ringh;
- struct __vxge_hw_fifo *____cacheline_aligned fifoh;
- struct list_head vpath_handles;
- struct __vxge_hw_blockpool_entry *stats_block;
- struct vxge_hw_vpath_stats_hw_info *hw_stats;
- struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- spinlock_t lock;
-};
-
-/*
- * struct __vxge_hw_vpath_handle - List item to store callback information
- * @item: List head to keep the item in linked list
- * @vpath: Virtual path to which this item belongs
- *
- * This structure is used to store the callback information.
- */
-struct __vxge_hw_vpath_handle {
- struct list_head item;
- struct __vxge_hw_virtualpath *vpath;
-};
-
-/*
- * struct __vxge_hw_device
- *
- * HW device object.
- */
-/**
- * struct __vxge_hw_device - Hal device object
- * @magic: Magic Number
- * @bar0: BAR0 virtual address.
- * @pdev: Physical device handle
- * @config: Confguration passed by the LL driver at initialization
- * @link_state: Link state
- *
- * HW device object. Represents Titan adapter
- */
-struct __vxge_hw_device {
- u32 magic;
-#define VXGE_HW_DEVICE_MAGIC 0x12345678
-#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
- void __iomem *bar0;
- struct pci_dev *pdev;
- struct net_device *ndev;
- struct vxge_hw_device_config config;
- enum vxge_hw_device_link_state link_state;
-
- const struct vxge_hw_uld_cbs *uld_callbacks;
-
- u32 host_type;
- u32 func_id;
- u32 access_rights;
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
- struct vxge_hw_legacy_reg __iomem *legacy_reg;
- struct vxge_hw_toc_reg __iomem *toc_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
- struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
- [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
- [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
- struct vxge_hw_vpath_reg __iomem *vpath_reg \
- [VXGE_HW_TITAN_VPATH_REG_SPACES];
- u8 __iomem *kdfc;
- u8 __iomem *usdc;
- struct __vxge_hw_virtualpath virtual_paths \
- [VXGE_HW_MAX_VIRTUAL_PATHS];
- u64 vpath_assignments;
- u64 vpaths_deployed;
- u32 first_vp_id;
- u64 tim_int_mask0[4];
- u32 tim_int_mask1[4];
-
- struct __vxge_hw_blockpool block_pool;
- struct vxge_hw_device_stats stats;
- u32 debug_module_mask;
- u32 debug_level;
- u32 level_err;
- u32 level_trace;
- u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
-};
-
-#define VXGE_HW_INFO_LEN 64
-/**
- * struct vxge_hw_device_hw_info - Device information
- * @host_type: Host Type
- * @func_id: Function Id
- * @vpath_mask: vpath bit mask
- * @fw_version: Firmware version
- * @fw_date: Firmware Date
- * @flash_version: Firmware version
- * @flash_date: Firmware Date
- * @mac_addrs: Mac addresses for each vpath
- * @mac_addr_masks: Mac address masks for each vpath
- *
- * Returns the vpath mask that has the bits set for each vpath allocated
- * for the driver and the first mac address for each vpath
- */
-struct vxge_hw_device_hw_info {
- u32 host_type;
-#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
-#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
-#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
-#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
-#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
-#define VXGE_HW_SR_VH_FUNCTION0 5
-#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
-#define VXGE_HW_VH_NORMAL_FUNCTION 7
- u64 function_mode;
-#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
-#define VXGE_HW_FUNCTION_MODE_SRIOV 2
-#define VXGE_HW_FUNCTION_MODE_MRIOV 3
-#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
-#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
-#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
-#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
-
- u32 func_id;
- u64 vpath_mask;
- struct vxge_hw_device_version fw_version;
- struct vxge_hw_device_date fw_date;
- struct vxge_hw_device_version flash_version;
- struct vxge_hw_device_date flash_date;
- u8 serial_number[VXGE_HW_INFO_LEN];
- u8 part_number[VXGE_HW_INFO_LEN];
- u8 product_desc[VXGE_HW_INFO_LEN];
- u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
- u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
-};
-
-/**
- * struct vxge_hw_device_attr - Device memory spaces.
- * @bar0: BAR0 virtual address.
- * @pdev: PCI device object.
- *
- * Device memory spaces. Includes configuration, BAR0 etc. per device
- * mapped memories. Also, includes a pointer to OS-specific PCI device object.
- */
-struct vxge_hw_device_attr {
- void __iomem *bar0;
- struct pci_dev *pdev;
- const struct vxge_hw_uld_cbs *uld_callbacks;
-};
-
-#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
-
-#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
- if (i < 16) { \
- m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
- m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
- } \
- else { \
- m1[0] = 0x80000000; \
- m1[1] = 0x40000000; \
- } \
-}
-
-#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
- if (i < 16) { \
- m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
- m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
- } \
- else { \
- m1[0] = 0; \
- m1[1] = 0; \
- } \
-}
-
-#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
- status = vxge_hw_mrpcim_stats_access(hldev, \
- VXGE_HW_STATS_OP_READ, \
- loc, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
-/*
- * struct __vxge_hw_ring - Ring channel.
- * @channel: Channel "base" of this ring, the common part of all HW
- * channels.
- * @mempool: Memory pool, the pool from which descriptors get allocated.
- * (See vxge_hw_mm.h).
- * @config: Ring configuration, part of device configuration
- * (see struct vxge_hw_device_config{}).
- * @ring_length: Length of the ring
- * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
- * as per Titan User Guide.
- * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
- * 1-buffer mode descriptor is 32 byte long, etc.
- * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
- * per-descriptor data (e.g., DMA handle for Solaris)
- * @per_rxd_space: Per rxd space requested by driver
- * @rxds_per_block: Number of descriptors per hardware-defined RxD
- * block. Depends on the (1-, 3-, 5-) buffer mode.
- * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
- * usage. Not to confuse with @rxd_priv_size.
- * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
- * @callback: Channel completion callback. HW invokes the callback when there
- * are new completions on that channel. In many implementations
- * the @callback executes in the hw interrupt context.
- * @rxd_init: Channel's descriptor-initialize callback.
- * See vxge_hw_ring_rxd_init_f{}.
- * If not NULL, HW invokes the callback when opening
- * the ring.
- * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding channel.
- * See also vxge_hw_channel_rxd_term_f{}.
- * @stats: Statistics for ring
- * Ring channel.
- *
- * Note: The structure is cache line aligned to better utilize
- * CPU cache performance.
- */
-struct __vxge_hw_ring {
- struct __vxge_hw_channel channel;
- struct vxge_hw_mempool *mempool;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- u32 ring_length;
- u32 buffer_mode;
- u32 rxd_size;
- u32 rxd_priv_size;
- u32 per_rxd_space;
- u32 rxds_per_block;
- u32 rxdblock_priv_size;
- u32 cmpl_cnt;
- u32 vp_id;
- u32 doorbell_cnt;
- u32 total_db_cnt;
- u64 rxds_limit;
- u32 rtimer;
- u64 tim_rti_cfg1_saved;
- u64 tim_rti_cfg3_saved;
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_ring *ringh,
- void *rxdh,
- u8 t_code,
- void *userdata);
-
- enum vxge_hw_status (*rxd_init)(
- void *rxdh,
- void *userdata);
-
- void (*rxd_term)(
- void *rxdh,
- enum vxge_hw_rxd_state state,
- void *userdata);
-
- struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
- struct vxge_hw_ring_config *config;
-} ____cacheline_aligned;
-
-/**
- * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
- * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
- * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
- * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
- * device.
- * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
- * filling-in and posting later.
- *
- * Titan/HW descriptor states.
- *
- */
-enum vxge_hw_txdl_state {
- VXGE_HW_TXDL_STATE_NONE = 0,
- VXGE_HW_TXDL_STATE_AVAIL = 1,
- VXGE_HW_TXDL_STATE_POSTED = 2,
- VXGE_HW_TXDL_STATE_FREED = 3
-};
-/*
- * struct __vxge_hw_fifo - Fifo.
- * @channel: Channel "base" of this fifo, the common part of all HW
- * channels.
- * @mempool: Memory pool, from which descriptors get allocated.
- * @config: Fifo configuration, part of device configuration
- * (see struct vxge_hw_device_config{}).
- * @interrupt_type: Interrupt type to be used
- * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
- * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
- * on TxDL please refer to Titan UG.
- * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
- * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
- * @priv_size: Per-Tx descriptor space reserved for driver
- * usage.
- * @per_txdl_space: Per txdl private space for the driver
- * @callback: Fifo completion callback. HW invokes the callback when there
- * are new completions on that fifo. In many implementations
- * the @callback executes in the hw interrupt context.
- * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding fifo.
- * See also vxge_hw_fifo_txdl_term_f{}.
- * @stats: Statistics of this fifo
- *
- * Fifo channel.
- * Note: The structure is cache line aligned.
- */
-struct __vxge_hw_fifo {
- struct __vxge_hw_channel channel;
- struct vxge_hw_mempool *mempool;
- struct vxge_hw_fifo_config *config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
- u64 interrupt_type;
- u32 no_snoop_bits;
- u32 txdl_per_memblock;
- u32 txdl_size;
- u32 priv_size;
- u32 per_txdl_space;
- u32 vp_id;
- u32 tx_intr_num;
- u32 rtimer;
- u64 tim_tti_cfg1_saved;
- u64 tim_tti_cfg3_saved;
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code,
- void *userdata,
- struct sk_buff ***skb_ptr,
- int nr_skb,
- int *more);
-
- void (*txdl_term)(
- void *txdlh,
- enum vxge_hw_txdl_state state,
- void *userdata);
-
- struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
-} ____cacheline_aligned;
-
-/*
- * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
- * @dma_addr: DMA (mapped) address of _this_ descriptor.
- * @dma_handle: DMA handle used to map the descriptor onto device.
- * @dma_offset: Descriptor's offset in the memory block. HW allocates
- * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
- * Each memblock is a contiguous block of DMA-able memory.
- * @frags: Total number of fragments (that is, contiguous data buffers)
- * carried by this TxDL.
- * @align_vaddr_start: Aligned virtual address start
- * @align_vaddr: Virtual address of the per-TxDL area in memory used for
- * alignement. Used to place one or more mis-aligned fragments
- * @align_dma_addr: DMA address translated from the @align_vaddr.
- * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
- * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
- * @align_dma_offset: The current offset into the @align_vaddr area.
- * Grows while filling the descriptor, gets reset.
- * @align_used_frags: Number of fragments used.
- * @alloc_frags: Total number of fragments allocated.
- * @unused: TODO
- * @next_txdl_priv: (TODO).
- * @first_txdp: (TODO).
- * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
- * TxDL list.
- * @txdlh: Corresponding txdlh to this TxDL.
- * @memblock: Pointer to the TxDL memory block or memory page.
- * on the next send operation.
- * @dma_object: DMA address and handle of the memory block that contains
- * the descriptor. This member is used only in the "checked"
- * version of the HW (to enforce certain assertions);
- * otherwise it gets compiled out.
- * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
- *
- * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
- * information associated with the descriptor. Note that driver can ask HW
- * to allocate additional per-descriptor space for its own (driver-specific)
- * purposes.
- *
- * See also: struct vxge_hw_ring_rxd_priv{}.
- */
-struct __vxge_hw_fifo_txdl_priv {
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- ptrdiff_t dma_offset;
- u32 frags;
- u8 *align_vaddr_start;
- u8 *align_vaddr;
- dma_addr_t align_dma_addr;
- struct pci_dev *align_dma_handle;
- struct pci_dev *align_dma_acch;
- ptrdiff_t align_dma_offset;
- u32 align_used_frags;
- u32 alloc_frags;
- u32 unused;
- struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
- struct vxge_hw_fifo_txd *first_txdp;
- void *memblock;
-};
-
-/*
- * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
- * @control_0: Bits 0 to 7 - Doorbell type.
- * Bits 8 to 31 - Reserved.
- * Bits 32 to 39 - The highest TxD in this TxDL.
- * Bits 40 to 47 - Reserved.
- * Bits 48 to 55 - Reserved.
- * Bits 56 to 63 - No snoop flags.
- * @txdl_ptr: The starting location of the TxDL in host memory.
- *
- * Created by the host and written to the adapter via PIO to a Kernel Doorbell
- * FIFO. All non-offload doorbell wrapper fields must be written by the host as
- * part of a doorbell write. Consumed by the adapter but is not written by the
- * adapter.
- */
-struct __vxge_hw_non_offload_db_wrapper {
- u64 control_0;
-#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
-#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_NODBW_TYPE_NODBW 0
-
-#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
-#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
-
-#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
-#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
-#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
-#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
-
- u64 txdl_ptr;
-};
-
-/*
- * TX Descriptor
- */
-
-/**
- * struct vxge_hw_fifo_txd - Transmit Descriptor
- * @control_0: Bits 0 to 6 - Reserved.
- * Bit 7 - List Ownership. This field should be initialized
- * to '1' by the driver before the transmit list pointer is
- * written to the adapter. This field will be set to '0' by the
- * adapter once it has completed transmitting the frame or frames in
- * the list. Note - This field is only valid in TxD0. Additionally,
- * for multi-list sequences, the driver should not release any
- * buffers until the ownership of the last list in the multi-list
- * sequence has been returned to the host.
- * Bits 8 to 11 - Reserved
- * Bits 12 to 15 - Transfer_Code. This field is only valid in
- * TxD0. It is used to describe the status of the transmit data
- * buffer transfer. This field is always overwritten by the
- * adapter, so this field may be initialized to any value.
- * Bits 16 to 17 - Host steering. This field allows the host to
- * override the selection of the physical transmit port.
- * Attention:
- * Normal sounds as if learned from the switch rather than from
- * the aggregation algorythms.
- * 00: Normal. Use Destination/MAC Address
- * lookup to determine the transmit port.
- * 01: Send on physical Port1.
- * 10: Send on physical Port0.
- * 11: Send on both ports.
- * Bits 18 to 21 - Reserved
- * Bits 22 to 23 - Gather_Code. This field is set by the host and
- * is used to describe how individual buffers comprise a frame.
- * 10: First descriptor of a frame.
- * 00: Middle of a multi-descriptor frame.
- * 01: Last descriptor of a frame.
- * 11: First and last descriptor of a frame (the entire frame
- * resides in a single buffer).
- * For multi-descriptor frames, the only valid gather code sequence
- * is {10, [00], 01}. In other words, the descriptors must be placed
- * in the list in the correct order.
- * Bits 24 to 27 - Reserved
- * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
- * definition. Only valid in TxD0. This field allows the host to
- * indicate the Ethernet encapsulation of an outbound LSO packet.
- * 00 - classic mode (best guess)
- * 01 - LLC
- * 10 - SNAP
- * 11 - DIX
- * If "classic mode" is selected, the adapter will attempt to
- * decode the frame's Ethernet encapsulation by examining the L/T
- * field as follows:
- * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
- * if packet is IPv4 or IPv6.
- * 0x8870 Jumbo-SNAP encoding.
- * 0x0800 IPv4 DIX encoding
- * 0x86DD IPv6 DIX encoding
- * others illegal encapsulation
- * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
- * Set to 1 to perform segmentation offload for TCP/UDP.
- * This field is valid only in TxD0.
- * Bits 31 to 33 - Reserved.
- * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
- * This field is meaningful only when LSO_Control is non-zero.
- * When LSO_Control is set to TCP_LSO, the single (possibly large)
- * TCP segment described by this TxDL will be sent as a series of
- * TCP segments each of which contains no more than LSO_MSS
- * payload bytes.
- * When LSO_Control is set to UDP_LSO, the single (possibly large)
- * UDP datagram described by this TxDL will be sent as a series of
- * UDP datagrams each of which contains no more than LSO_MSS
- * payload bytes.
- * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
- * or TCP payload, with the exception of the last, which will have
- * <= LSO_MSS bytes of payload.
- * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
- * buffer to be read by the adapter. This field is written by the
- * host. A value of 0 is illegal.
- * Bits 32 to 63 - This value is written by the adapter upon
- * completion of a UDP or TCP LSO operation and indicates the number
- * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
- * returned for any non-LSO operation.
- * @control_1: Bits 0 to 4 - Reserved.
- * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
- * offload. This field is only valid in the first TxD of a frame.
- * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
- * This field is only valid in the first TxD of a frame (the TxD's
- * gather code must be 10 or 11). The driver should only set this
- * bit if it can guarantee that TCP is present.
- * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
- * This field is only valid in the first TxD of a frame (the TxD's
- * gather code must be 10 or 11). The driver should only set this
- * bit if it can guarantee that UDP is present.
- * Bits 8 to 14 - Reserved.
- * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
- * instruct the adapter to insert the VLAN tag specified by the
- * Tx_VLAN_Tag field. This field is only valid in the first TxD of
- * a frame.
- * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
- * to be inserted into the frame by the adapter (the first two bytes
- * of a VLAN tag are always 0x8100). This field is only valid if the
- * Tx_VLAN_Enable field is set to '1'.
- * Bits 32 to 33 - Reserved.
- * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
- * number the frame associated with. This field is written by the
- * host. It is only valid in the first TxD of a frame.
- * Bits 40 to 42 - Reserved.
- * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
- * functions. This field is valid only in the first TxD
- * of a frame.
- * Bits 44 to 45 - Reserved.
- * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
- * generate an interrupt as soon as all of the frames in the list
- * have been transmitted. In order to have per-frame interrupts,
- * the driver should place a maximum of one frame per list. This
- * field is only valid in the first TxD of a frame.
- * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
- * to count the frame toward the utilization interrupt specified in
- * the Tx_Int_Number field. This field is only valid in the first
- * TxD of a frame.
- * Bits 48 to 63 - Reserved.
- * @buffer_pointer: Buffer start address.
- * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
- * Titan descriptor prior to posting the latter on the fifo
- * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
- * to the driver with each completed descriptor.
- *
- * Transmit descriptor (TxD).Fifo descriptor contains configured number
- * (list) of TxDs. * For more details please refer to Titan User Guide,
- * Section 5.4.2 "Transmit Descriptor (TxD) Format".
- */
-struct vxge_hw_fifo_txd {
- u64 control_0;
-#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
-
-#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
-#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
-
-
-#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
-#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
-#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
-
-
-#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
-
-#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
-
-#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
-
- u64 control_1;
-#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
-#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
-#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
-#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
-
-#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
-
-#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
-#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
-
- u64 buffer_pointer;
-
- u64 host_control;
-};
-
-/**
- * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
- * @host_control: This field is exclusively for host use and is "readonly"
- * from the adapter's perspective.
- * @control_0:Bits 0 to 6 - RTH_Bucket get
- * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
- * by the host, and is set to 0 by the adapter.
- * 0 - Host owns RxD and buffer.
- * 1 - The adapter owns RxD and buffer.
- * Bit 8 - Fast_Path_Eligible When set, indicates that the
- * received frame meets all of the criteria for fast path processing.
- * The required criteria are as follows:
- * !SYN &
- * (Transfer_Code == "Transfer OK") &
- * (!Is_IP_Fragment) &
- * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
- * (Is_IPv6)) &
- * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
- * (Is_UDP & (computed_L4_checksum == 0xFFFF |
- * computed _L4_checksum == 0x0000)))
- * (same meaning for all RxD buffer modes)
- * Bit 9 - L3 Checksum Correct
- * Bit 10 - L4 Checksum Correct
- * Bit 11 - Reserved
- * Bit 12 to 15 - This field is written by the adapter. It is
- * used to report the status of the frame transfer to the host.
- * 0x0 - Transfer OK
- * 0x4 - RDA Failure During Transfer
- * 0x5 - Unparseable Packet, such as unknown IPv6 header.
- * 0x6 - Frame integrity error (FCS or ECC).
- * 0x7 - Buffer Size Error. The provided buffer(s) were not
- * appropriately sized and data loss occurred.
- * 0x8 - Internal ECC Error. RxD corrupted.
- * 0x9 - IPv4 Checksum error
- * 0xA - TCP/UDP Checksum error
- * 0xF - Unknown Error or Multiple Error. Indicates an
- * unknown problem or that more than one of transfer codes is set.
- * Bit 16 - SYN The adapter sets this field to indicate that
- * the incoming frame contained a TCP segment with its SYN bit
- * set and its ACK bit NOT set. (same meaning for all RxD buffer
- * modes)
- * Bit 17 - Is ICMP
- * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
- * Socket Pair Direct Match Table and the frame was steered based
- * on SPDM.
- * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
- * Indirection Table and the frame was steered based on hash
- * indirection.
- * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
- * type) that was used to calculate the hash.
- * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
- * tagged.
- * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
- * of the received frame.
- * 0x0 - Ethernet DIX
- * 0x1 - LLC
- * 0x2 - SNAP (includes Jumbo-SNAP)
- * 0x3 - IPX
- * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
- * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
- * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
- * IP packet.
- * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
- * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
- * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
- * arrived with the frame. If the resulting computed IPv4 header
- * checksum for the frame did not produce the expected 0xFFFF value,
- * then the transfer code would be set to 0x9.
- * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
- * arrived with the frame. If the resulting computed TCP/UDP checksum
- * for the frame did not produce the expected 0xFFFF value, then the
- * transfer code would be set to 0xA.
- * @control_1:Bits 0 to 1 - Reserved
- * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
- * eventually overwritten by the adapter. The host writes the
- * available buffer size in bytes when it passes the descriptor to
- * the adapter. When a frame is delivered the host, the adapter
- * populates this field with the number of bytes written into the
- * buffer. The largest supported buffer is 16, 383 bytes.
- * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
- * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
- * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
- * of the VLAN tag, if one was detected by the adapter. This field is
- * populated even if VLAN-tag stripping is enabled.
- * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
- *
- * One buffer mode RxD for ring structure
- */
-struct vxge_hw_ring_rxd_1 {
- u64 host_control;
- u64 control_0;
-#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
-
-#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
-
-#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
-
-#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
-
-#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
-
-#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
-#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
-
-#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
-
-#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
-
-#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
-
-#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
-
-#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
-
-#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
-
-#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
-
-#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
-
-#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
-
-#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
-
-#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
-
- u64 control_1;
-
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
-
-#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
-
-#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
-
- u64 buffer0_ptr;
-};
-
-enum vxge_hw_rth_algoritms {
- RTH_ALG_JENKINS = 0,
- RTH_ALG_MS_RSS = 1,
- RTH_ALG_CRC32C = 2
-};
-
-/**
- * struct vxge_hw_rth_hash_types - RTH hash types.
- * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
- * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
- * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
- * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
- * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
- * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
- *
- * Used to pass RTH hash types to rts_rts_set.
- *
- * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
- */
-struct vxge_hw_rth_hash_types {
- u8 hash_type_tcpipv4_en:1,
- hash_type_ipv4_en:1,
- hash_type_tcpipv6_en:1,
- hash_type_ipv6_en:1,
- hash_type_tcpipv6ex_en:1,
- hash_type_ipv6ex_en:1;
-};
-
-void vxge_hw_device_debug_set(
- struct __vxge_hw_device *devh,
- enum vxge_debug_level level,
- u32 mask);
-
-u32
-vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
-
-u32
-vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
-
-/**
- * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
- * @buf_mode: Buffer mode (1, 3 or 5)
- *
- * This function returns the size of RxD for given buffer mode
- */
-static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
-{
- return sizeof(struct vxge_hw_ring_rxd_1);
-}
-
-/**
- * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
- * @buf_mode: Buffer mode (1 buffer mode only)
- *
- * This function returns the number of RxD for RxD block for given buffer mode
- */
-static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
-{
- return (u32)((VXGE_HW_BLOCK_SIZE-16) /
- sizeof(struct vxge_hw_ring_rxd_1));
-}
-
-/**
- * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
- * @rxdh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer this descriptor
- * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
- * the receive buffer should be already mapped to the device
- * @size: Size of the receive @dma_pointer buffer.
- *
- * Prepare 1-buffer-mode Rx descriptor for posting
- * (via vxge_hw_ring_rxd_post()).
- *
- * This inline helper-function does not return any parameters and always
- * succeeds.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_set(
- void *rxdh,
- dma_addr_t dma_pointer,
- u32 size)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- rxdp->buffer0_ptr = dma_pointer;
- rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
- rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
-}
-
-/**
- * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
- * descriptor.
- * @vpath_handle: Virtual Path handle.
- * @rxdh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer this descriptor
- * carries. Returned by HW.
- * @pkt_length: Length (in bytes) of the data in the buffer pointed by
- *
- * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
- * This inline helper-function uses completed descriptor to populate receive
- * buffer pointer and other "out" parameters. The function always succeeds.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_get(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- u32 *pkt_length)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
-
- *pkt_length =
- (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
-}
-
-/**
- * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
- * a completed receive descriptor for 1b mode.
- * @vpath_handle: Virtual Path handle.
- * @rxdh: Descriptor handle.
- * @rxd_info: Descriptor information
- *
- * Retrieve extended information associated with a completed receive descriptor.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_info_get(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- struct vxge_hw_ring_rxd_info *rxd_info)
-{
-
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- rxd_info->syn_flag =
- (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
- rxd_info->is_icmp =
- (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
- rxd_info->fast_path_eligible =
- (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
- rxd_info->l3_cksum_valid =
- (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
- rxd_info->l3_cksum =
- (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
- rxd_info->l4_cksum_valid =
- (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
- rxd_info->l4_cksum =
- (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
- rxd_info->frame =
- (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
- rxd_info->proto =
- (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
- rxd_info->is_vlan =
- (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
- rxd_info->vlan =
- (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
- rxd_info->rth_bucket =
- (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
- rxd_info->rth_it_hit =
- (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
- rxd_info->rth_spdm_hit =
- (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
- rxd_info->rth_hash_type =
- (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
- rxd_info->rth_value =
- (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
-}
-
-/**
- * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
- * of 1b mode 3b mode ring.
- * @rxdh: Descriptor handle.
- *
- * Returns: private driver info associated with the descriptor.
- * driver requests per-descriptor space via vxge_hw_ring_attr.
- *
- */
-static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- return (void *)(size_t)rxdp->host_control;
-}
-
-/**
- * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
- * @txdlh: Descriptor handle.
- * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
- * and/or TCP and/or UDP.
- *
- * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
- * descriptor.
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
- * and vxge_hw_fifo_txdl_buffer_set().
- * All these APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
- txdp->control_1 |= cksum_bits;
-}
-
-/**
- * vxge_hw_fifo_txdl_mss_set - Set MSS.
- * @txdlh: Descriptor handle.
- * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
- * driver, which in turn inserts the MSS into the @txdlh.
- *
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
- * and vxge_hw_fifo_txdl_cksum_set_bits().
- * All these APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
- txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
-}
-
-/**
- * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
- * @txdlh: Descriptor handle.
- * @vlan_tag: 16bit VLAN tag.
- *
- * Insert VLAN tag into specified transmit descriptor.
- * The actual insertion of the tag into outgoing frame is done by the hardware.
- */
-static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
- txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
-}
-
-/**
- * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
- * @txdlh: Descriptor handle.
- *
- * Retrieve per-descriptor private data.
- * Note that driver requests per-descriptor space via
- * struct vxge_hw_fifo_attr passed to
- * vxge_hw_vpath_open().
- *
- * Returns: private driver data associated with the descriptor.
- */
-static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- return (void *)(size_t)txdp->host_control;
-}
-
-/**
- * struct vxge_hw_ring_attr - Ring open "template".
- * @callback: Ring completion callback. HW invokes the callback when there
- * are new completions on that ring. In many implementations
- * the @callback executes in the hw interrupt context.
- * @rxd_init: Ring's descriptor-initialize callback.
- * See vxge_hw_ring_rxd_init_f{}.
- * If not NULL, HW invokes the callback when opening
- * the ring.
- * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding ring.
- * See also vxge_hw_ring_rxd_term_f{}.
- * @userdata: User-defined "context" of _that_ ring. Passed back to the
- * user as one of the @callback, @rxd_init, and @rxd_term arguments.
- * @per_rxd_space: If specified (i.e., greater than zero): extra space
- * reserved by HW per each receive descriptor.
- * Can be used to store
- * and retrieve on completion, information specific
- * to the driver.
- *
- * Ring open "template". User fills the structure with ring
- * attributes and passes it to vxge_hw_vpath_open().
- */
-struct vxge_hw_ring_attr {
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_ring *ringh,
- void *rxdh,
- u8 t_code,
- void *userdata);
-
- enum vxge_hw_status (*rxd_init)(
- void *rxdh,
- void *userdata);
-
- void (*rxd_term)(
- void *rxdh,
- enum vxge_hw_rxd_state state,
- void *userdata);
-
- void *userdata;
- u32 per_rxd_space;
-};
-
-/**
- * function vxge_hw_fifo_callback_f - FIFO callback.
- * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
- * descriptors.
- * @txdlh: First completed descriptor.
- * @txdl_priv: Pointer to per txdl space allocated
- * @t_code: Transfer code, as per Titan User Guide.
- * Returned by HW.
- * @host_control: Opaque 64bit data stored by driver inside the Titan
- * descriptor prior to posting the latter on the fifo
- * via vxge_hw_fifo_txdl_post(). The @host_control is returned
- * as is to the driver with each completed descriptor.
- * @userdata: Opaque per-fifo data specified at fifo open
- * time, via vxge_hw_vpath_open().
- *
- * Fifo completion callback (type declaration). A single per-fifo
- * callback is specified at fifo open time, via
- * vxge_hw_vpath_open(). Typically gets called as part of the processing
- * of the Interrupt Service Routine.
- *
- * Fifo callback gets called by HW if, and only if, there is at least
- * one new completion on a given fifo. Upon processing the first @txdlh driver
- * is _supposed_ to continue consuming completions using:
- * - vxge_hw_fifo_txdl_next_completed()
- *
- * Note that failure to process new completions in a timely fashion
- * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
- *
- * Non-zero @t_code means failure to process transmit descriptor.
- *
- * In the "transmit" case the failure could happen, for instance, when the
- * link is down, in which case Titan completes the descriptor because it
- * is not able to send the data out.
- *
- * For details please refer to Titan User Guide.
- *
- * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
- */
-/**
- * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
- * @txdlh: First completed descriptor.
- * @txdl_priv: Pointer to per txdl space allocated
- * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
- * @userdata: Per-fifo user data (a.k.a. context) specified at
- * fifo open time, via vxge_hw_vpath_open().
- *
- * Terminate descriptor callback. Unless NULL is specified in the
- * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
- * HW invokes the callback as part of closing fifo, prior to
- * de-allocating the ring and associated data structures
- * (including descriptors).
- * driver should utilize the callback to (for instance) unmap
- * and free DMA data buffers associated with the posted (state =
- * VXGE_HW_TXDL_STATE_POSTED) descriptors,
- * as well as other relevant cleanup functions.
- *
- * See also: struct vxge_hw_fifo_attr{}
- */
-/**
- * struct vxge_hw_fifo_attr - Fifo open "template".
- * @callback: Fifo completion callback. HW invokes the callback when there
- * are new completions on that fifo. In many implementations
- * the @callback executes in the hw interrupt context.
- * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding fifo.
- * See also vxge_hw_fifo_txdl_term_f{}.
- * @userdata: User-defined "context" of _that_ fifo. Passed back to the
- * user as one of the @callback, and @txdl_term arguments.
- * @per_txdl_space: If specified (i.e., greater than zero): extra space
- * reserved by HW per each transmit descriptor. Can be used to
- * store, and retrieve on completion, information specific
- * to the driver.
- *
- * Fifo open "template". User fills the structure with fifo
- * attributes and passes it to vxge_hw_vpath_open().
- */
-struct vxge_hw_fifo_attr {
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code,
- void *userdata,
- struct sk_buff ***skb_ptr,
- int nr_skb, int *more);
-
- void (*txdl_term)(
- void *txdlh,
- enum vxge_hw_txdl_state state,
- void *userdata);
-
- void *userdata;
- u32 per_txdl_space;
-};
-
-/**
- * struct vxge_hw_vpath_attr - Attributes of virtual path
- * @vp_id: Identifier of Virtual Path
- * @ring_attr: Attributes of ring for non-offload receive
- * @fifo_attr: Attributes of fifo for non-offload transmit
- *
- * Attributes of virtual path. This structure is passed as parameter
- * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
- */
-struct vxge_hw_vpath_attr {
- u32 vp_id;
- struct vxge_hw_ring_attr ring_attr;
- struct vxge_hw_fifo_attr fifo_attr;
-};
-
-enum vxge_hw_status vxge_hw_device_hw_info_get(
- void __iomem *bar0,
- struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status vxge_hw_device_config_default_get(
- struct vxge_hw_device_config *device_config);
-
-/**
- * vxge_hw_device_link_state_get - Get link state.
- * @devh: HW device handle.
- *
- * Get link state.
- * Returns: link state.
- */
-static inline
-enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
- struct __vxge_hw_device *devh)
-{
- return devh->link_state;
-}
-
-void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
-
-const u8 *
-vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
-
-u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
-
-const u8 *
-vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_initialize(
- struct __vxge_hw_device **devh,
- struct vxge_hw_device_attr *attr,
- struct vxge_hw_device_config *device_config);
-
-enum vxge_hw_status vxge_hw_device_getpause_data(
- struct __vxge_hw_device *devh,
- u32 port,
- u32 *tx,
- u32 *rx);
-
-enum vxge_hw_status vxge_hw_device_setpause_data(
- struct __vxge_hw_device *devh,
- u32 port,
- u32 tx,
- u32 rx);
-
-static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
- unsigned long size,
- struct pci_dev **p_dmah,
- struct pci_dev **p_dma_acch)
-{
- void *vaddr;
- unsigned long misaligned = 0;
- int realloc_flag = 0;
- *p_dma_acch = *p_dmah = NULL;
-
-realloc:
- vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
- if (vaddr == NULL)
- return vaddr;
- misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
- VXGE_CACHE_LINE_SIZE);
- if (realloc_flag)
- goto out;
-
- if (misaligned) {
- /* misaligned, free current one and try allocating
- * size + VXGE_CACHE_LINE_SIZE memory
- */
- kfree(vaddr);
- size += VXGE_CACHE_LINE_SIZE;
- realloc_flag = 1;
- goto realloc;
- }
-out:
- *(unsigned long *)p_dma_acch = misaligned;
- vaddr = (void *)((u8 *)vaddr + misaligned);
- return vaddr;
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
-/*
- * __vxge_hw_mempool_item_priv - will return pointer on per item private space
- */
-static inline void*
-__vxge_hw_mempool_item_priv(
- struct vxge_hw_mempool *mempool,
- u32 memblock_idx,
- void *item,
- u32 *memblock_item_idx)
-{
- ptrdiff_t offset;
- void *memblock = mempool->memblocks_arr[memblock_idx];
-
-
- offset = (u32)((u8 *)item - (u8 *)memblock);
- vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
-
- (*memblock_item_idx) = (u32) offset / mempool->item_size;
- vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
-
- return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
- (*memblock_item_idx) * mempool->items_priv_size;
-}
-
-/*
- * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
- * for the fifo.
- * @fifo: Fifo
- * @txdp: Poniter to a TxD
- */
-static inline struct __vxge_hw_fifo_txdl_priv *
-__vxge_hw_fifo_txdl_priv(
- struct __vxge_hw_fifo *fifo,
- struct vxge_hw_fifo_txd *txdp)
-{
- return (struct __vxge_hw_fifo_txdl_priv *)
- (((char *)((ulong)txdp->host_control)) +
- fifo->per_txdl_space);
-}
-
-enum vxge_hw_status vxge_hw_vpath_open(
- struct __vxge_hw_device *devh,
- struct vxge_hw_vpath_attr *attr,
- struct __vxge_hw_vpath_handle **vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_close(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-vxge_hw_vpath_reset(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-vxge_hw_vpath_recover_from_reset(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void
-vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
-
-enum vxge_hw_status
-vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status vxge_hw_vpath_mtu_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 new_mtu);
-
-void
-vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-
-static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
-{
- writel(val, addr + 4);
-}
-
-static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
-enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
-
-enum vxge_hw_status
-vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
-
-/**
- * vxge_debug_ll
- * @level: level of debug verbosity.
- * @mask: mask for the debug
- * @buf: Circular buffer for tracing
- * @fmt: printf like format string
- *
- * Provides logging facilities. Can be customized on per-module
- * basis or/and with debug levels. Input parameters, except
- * module and level, are the same as posix printf. This function
- * may be compiled out if DEBUG macro was never defined.
- * See also: enum vxge_debug_level{}.
- */
-#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) do { \
- if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
- (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
- if ((mask & VXGE_DEBUG_MASK) == mask) \
- printk(fmt "\n", ##__VA_ARGS__); \
-} while (0)
-#else
-#define vxge_debug_ll(level, mask, fmt, ...)
-#endif
-
-enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
- struct __vxge_hw_vpath_handle **vpath_handles,
- u32 vpath_count,
- u8 *mtable,
- u8 *itable,
- u32 itable_size);
-
-enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- enum vxge_hw_rth_algoritms algorithm,
- struct vxge_hw_rth_hash_types *hash_type,
- u16 bucket_size);
-
-enum vxge_hw_status
-__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
-
-#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
-#define VXGE_HW_MAX_POLLING_COUNT 100
-
-void
-vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
- u32 *minor, u32 *build);
-
-enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
- int size);
-
-enum vxge_hw_status
-vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
- struct eprom_image *eprom_image_data);
-
-int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
deleted file mode 100644
index 4d91026485ae..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ /dev/null
@@ -1,1154 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/ethtool.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/etherdevice.h>
-
-#include "vxge-ethtool.h"
-
-static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
- {"\n DRIVER STATISTICS"},
- {"vpaths_opened"},
- {"vpath_open_fail_cnt"},
- {"link_up_cnt"},
- {"link_down_cnt"},
- {"tx_frms"},
- {"tx_errors"},
- {"tx_bytes"},
- {"txd_not_free"},
- {"txd_out_of_desc"},
- {"rx_frms"},
- {"rx_errors"},
- {"rx_bytes"},
- {"rx_mcast"},
- {"pci_map_fail_cnt"},
- {"skb_alloc_fail_cnt"}
-};
-
-/**
- * vxge_ethtool_set_link_ksettings - Sets different link parameters.
- * @dev: device pointer.
- * @cmd: pointer to the structure with parameters given by ethtool to set
- * link information.
- *
- * The function sets different link parameters provided by the user onto
- * the NIC.
- * Return value:
- * 0 on success.
- */
-static int
-vxge_ethtool_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- /* We currently only support 10Gb/FULL */
- if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
- (cmd->base.speed != SPEED_10000) ||
- (cmd->base.duplex != DUPLEX_FULL))
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * vxge_ethtool_get_link_ksettings - Return link specific information.
- * @dev: device pointer.
- * @cmd: pointer to the structure with parameters given by ethtool
- * to return link information.
- *
- * Returns link specific information like speed, duplex etc.. to ethtool.
- * Return value :
- * return 0 on success.
- */
-static int vxge_ethtool_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
-
- if (netif_carrier_ok(dev)) {
- cmd->base.speed = SPEED_10000;
- cmd->base.duplex = DUPLEX_FULL;
- } else {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- }
-
- cmd->base.autoneg = AUTONEG_DISABLE;
- return 0;
-}
-
-/**
- * vxge_ethtool_gdrvinfo - Returns driver specific information.
- * @dev: device pointer.
- * @info: pointer to the structure with parameters given by ethtool to
- * return driver information.
- *
- * Returns driver specefic information like name, version etc.. to ethtool.
- */
-static void vxge_ethtool_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
-}
-
-/**
- * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer.
- * @dev: device pointer.
- * @regs: pointer to the structure with parameters given by ethtool for
- * dumping the registers.
- * @space: The input argument into which all the registers are dumped.
- *
- * Dumps the vpath register space of Titan NIC into the user given
- * buffer area.
- */
-static void vxge_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
-{
- int index, offset;
- enum vxge_hw_status status;
- u64 reg;
- u64 *reg_space = (u64 *)space;
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
- regs->version = vdev->pdev->subsystem_device;
- for (index = 0; index < vdev->no_of_vpath; index++) {
- for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg);
- offset += 8) {
- status = vxge_hw_mgmt_reg_read(hldev,
- vxge_hw_mgmt_reg_type_vpath,
- vdev->vpaths[index].device_id,
- offset, &reg);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Getting reg dump Failed",
- __func__, __LINE__);
- return;
- }
- *reg_space++ = reg;
- }
- }
-}
-
-/**
- * vxge_ethtool_idnic - To physically identify the nic on the system.
- * @dev : device pointer.
- * @state : requested LED state
- *
- * Used to physically identify the NIC on the system.
- * 0 on success
- */
-static int vxge_ethtool_idnic(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * vxge_ethtool_getpause_data - Pause frame frame generation and reception.
- * @dev : device pointer.
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * Returns the Pause frame generation and reception capability of the NIC.
- * Return value:
- * void
- */
-static void vxge_ethtool_getpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
-}
-
-/**
- * vxge_ethtool_setpause_data - set/reset pause frame generation.
- * @dev : device pointer.
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- * Return value:
- * int, returns 0 on Success
- */
-static int vxge_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
-
- vdev->config.tx_pause_enable = ep->tx_pause;
- vdev->config.rx_pause_enable = ep->rx_pause;
-
- return 0;
-}
-
-static void vxge_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *estats, u64 *tmp_stats)
-{
- int j, k;
- enum vxge_hw_status status;
- enum vxge_hw_status swstatus;
- struct vxge_vpath *vpath = NULL;
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
- struct vxge_hw_xmac_stats *xmac_stats;
- struct vxge_hw_device_stats_sw_info *sw_stats;
- struct vxge_hw_device_stats_hw_info *hw_stats;
-
- u64 *ptr = tmp_stats;
-
- memset(tmp_stats, 0,
- vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64));
-
- xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL);
- if (xmac_stats == NULL) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for xmac_stats",
- __func__, __LINE__);
- return;
- }
-
- sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info),
- GFP_KERNEL);
- if (sw_stats == NULL) {
- kfree(xmac_stats);
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for sw_stats",
- __func__, __LINE__);
- return;
- }
-
- hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info),
- GFP_KERNEL);
- if (hw_stats == NULL) {
- kfree(xmac_stats);
- kfree(sw_stats);
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for hw_stats",
- __func__, __LINE__);
- return;
- }
-
- *ptr++ = 0;
- status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
- if (status != VXGE_HW_OK) {
- if (status != VXGE_HW_ERR_PRIVILEGED_OPERATION) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Failure in getting xmac stats",
- __func__, __LINE__);
- }
- }
- swstatus = vxge_hw_driver_stats_get(hldev, sw_stats);
- if (swstatus != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Failure in getting sw stats",
- __func__, __LINE__);
- }
-
- status = vxge_hw_device_stats_get(hldev, hw_stats);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d hw_stats_get error", __func__, __LINE__);
- }
-
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_hw_info *vpath_info;
-
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = hw_stats->vpath_info[j];
- if (!vpath_info) {
- memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN +
- VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64));
- ptr += (VXGE_HW_VPATH_TX_STATS_LEN +
- VXGE_HW_VPATH_RX_STATS_LEN);
- continue;
- }
-
- *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms;
- *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets;
- *ptr++ = vpath_info->tx_stats.tx_data_octets;
- *ptr++ = vpath_info->tx_stats.tx_mcast_frms;
- *ptr++ = vpath_info->tx_stats.tx_bcast_frms;
- *ptr++ = vpath_info->tx_stats.tx_ucast_frms;
- *ptr++ = vpath_info->tx_stats.tx_tagged_frms;
- *ptr++ = vpath_info->tx_stats.tx_vld_ip;
- *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets;
- *ptr++ = vpath_info->tx_stats.tx_icmp;
- *ptr++ = vpath_info->tx_stats.tx_tcp;
- *ptr++ = vpath_info->tx_stats.tx_rst_tcp;
- *ptr++ = vpath_info->tx_stats.tx_udp;
- *ptr++ = vpath_info->tx_stats.tx_unknown_protocol;
- *ptr++ = vpath_info->tx_stats.tx_lost_ip;
- *ptr++ = vpath_info->tx_stats.tx_parse_error;
- *ptr++ = vpath_info->tx_stats.tx_tcp_offload;
- *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload;
- *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload;
- *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms;
- *ptr++ = vpath_info->rx_stats.rx_vld_frms;
- *ptr++ = vpath_info->rx_stats.rx_offload_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets;
- *ptr++ = vpath_info->rx_stats.rx_data_octets;
- *ptr++ = vpath_info->rx_stats.rx_offload_octets;
- *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms;
- *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms;
- *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms;
- *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms;
- *ptr++ = vpath_info->rx_stats.rx_tagged_frms;
- *ptr++ = vpath_info->rx_stats.rx_long_frms;
- *ptr++ = vpath_info->rx_stats.rx_usized_frms;
- *ptr++ = vpath_info->rx_stats.rx_osized_frms;
- *ptr++ = vpath_info->rx_stats.rx_frag_frms;
- *ptr++ = vpath_info->rx_stats.rx_jabber_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms;
- *ptr++ = vpath_info->rx_stats.rx_ip;
- *ptr++ = vpath_info->rx_stats.rx_accepted_ip;
- *ptr++ = vpath_info->rx_stats.rx_ip_octets;
- *ptr++ = vpath_info->rx_stats.rx_err_ip;
- *ptr++ = vpath_info->rx_stats.rx_icmp;
- *ptr++ = vpath_info->rx_stats.rx_tcp;
- *ptr++ = vpath_info->rx_stats.rx_udp;
- *ptr++ = vpath_info->rx_stats.rx_err_tcp;
- *ptr++ = vpath_info->rx_stats.rx_lost_frms;
- *ptr++ = vpath_info->rx_stats.rx_lost_ip;
- *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload;
- *ptr++ = vpath_info->rx_stats.rx_various_discard;
- *ptr++ = vpath_info->rx_stats.rx_sleep_discard;
- *ptr++ = vpath_info->rx_stats.rx_red_discard;
- *ptr++ = vpath_info->rx_stats.rx_queue_full_discard;
- *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms;
- }
- *ptr++ = 0;
- for (k = 0; k < vdev->max_config_port; k++) {
- *ptr++ = xmac_stats->aggr_stats[k].tx_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets;
- *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets;
- *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms;
- }
- *ptr++ = 0;
- for (k = 0; k < vdev->max_config_port; k++) {
- *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_data_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_vld_ip;
- *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_icmp;
- *ptr++ = xmac_stats->port_stats[k].tx_tcp;
- *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp;
- *ptr++ = xmac_stats->port_stats[k].tx_udp;
- *ptr++ = xmac_stats->port_stats[k].tx_parse_error;
- *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol;
- *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_drop_ip;
- *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match;
- *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_drop_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_offload_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_data_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_offload_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_long_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_usized_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_osized_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_frag_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_ip_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_err_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_icmp;
- *ptr++ = xmac_stats->port_stats[k].rx_tcp;
- *ptr++ = xmac_stats->port_stats[k].rx_udp;
- *ptr++ = xmac_stats->port_stats[k].rx_err_tcp;
- *ptr++ = xmac_stats->port_stats[k].rx_pause_count;
- *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_udp;
- *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_switch_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_len_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_rts_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_trash_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_red_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match;
- *ptr++ = xmac_stats->port_stats[k].rx_local_fault;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match;
- *ptr++ = xmac_stats->port_stats[k].rx_jettison;
- *ptr++ = xmac_stats->port_stats[k].rx_remote_fault;
- }
-
- *ptr++ = 0;
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_sw_info *vpath_info;
-
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = (struct vxge_hw_vpath_stats_sw_info *)
- &sw_stats->vpath_info[j];
- *ptr++ = vpath_info->soft_reset_cnt;
- *ptr++ = vpath_info->error_stats.unknown_alarms;
- *ptr++ = vpath_info->error_stats.network_sustained_fault;
- *ptr++ = vpath_info->error_stats.network_sustained_ok;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error;
- *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow;
- *ptr++ = vpath_info->error_stats.statsb_pif_chain_error;
- *ptr++ = vpath_info->error_stats.statsb_drop_timeout;
- *ptr++ = vpath_info->error_stats.target_illegal_access;
- *ptr++ = vpath_info->error_stats.ini_serr_det;
- *ptr++ = vpath_info->error_stats.prc_ring_bumps;
- *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err;
- *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort;
- *ptr++ = vpath_info->error_stats.prc_quanta_size_err;
- *ptr++ = vpath_info->ring_stats.common_stats.full_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.usage_max;
- *ptr++ = vpath_info->ring_stats.common_stats.
- reserve_free_swaps_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt;
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j];
- *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.usage_max;
- *ptr++ = vpath_info->fifo_stats.common_stats.
- reserve_free_swaps_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt;
- *ptr++ = vpath_info->fifo_stats.total_posts;
- *ptr++ = vpath_info->fifo_stats.total_buffers;
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j];
- }
-
- *ptr++ = 0;
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_hw_info *vpath_info;
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = hw_stats->vpath_info[j];
- if (!vpath_info) {
- memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64));
- ptr += VXGE_HW_VPATH_STATS_LEN;
- continue;
- }
- *ptr++ = vpath_info->ini_num_mwr_sent;
- *ptr++ = vpath_info->ini_num_mrd_sent;
- *ptr++ = vpath_info->ini_num_cpl_rcvd;
- *ptr++ = vpath_info->ini_num_mwr_byte_sent;
- *ptr++ = vpath_info->ini_num_cpl_byte_rcvd;
- *ptr++ = vpath_info->wrcrdtarb_xoff;
- *ptr++ = vpath_info->rdcrdtarb_xoff;
- *ptr++ = vpath_info->vpath_genstats_count0;
- *ptr++ = vpath_info->vpath_genstats_count1;
- *ptr++ = vpath_info->vpath_genstats_count2;
- *ptr++ = vpath_info->vpath_genstats_count3;
- *ptr++ = vpath_info->vpath_genstats_count4;
- *ptr++ = vpath_info->vpath_genstats_count5;
- *ptr++ = vpath_info->prog_event_vnum0;
- *ptr++ = vpath_info->prog_event_vnum1;
- *ptr++ = vpath_info->prog_event_vnum2;
- *ptr++ = vpath_info->prog_event_vnum3;
- *ptr++ = vpath_info->rx_multi_cast_frame_discard;
- *ptr++ = vpath_info->rx_frm_transferred;
- *ptr++ = vpath_info->rxd_returned;
- *ptr++ = vpath_info->rx_mpa_len_fail_frms;
- *ptr++ = vpath_info->rx_mpa_mrk_fail_frms;
- *ptr++ = vpath_info->rx_mpa_crc_fail_frms;
- *ptr++ = vpath_info->rx_permitted_frms;
- *ptr++ = vpath_info->rx_vp_reset_discarded_frms;
- *ptr++ = vpath_info->rx_wol_frms;
- *ptr++ = vpath_info->tx_vp_reset_discarded_frms;
- }
-
- *ptr++ = 0;
- *ptr++ = vdev->stats.vpaths_open;
- *ptr++ = vdev->stats.vpath_open_fail;
- *ptr++ = vdev->stats.link_up;
- *ptr++ = vdev->stats.link_down;
-
- for (k = 0; k < vdev->no_of_vpath; k++) {
- *ptr += vdev->vpaths[k].fifo.stats.tx_frms;
- *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors;
- *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes;
- *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free;
- *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc;
- *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
- *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
- *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
- *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
- *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail +
- vdev->vpaths[k].ring.stats.pci_map_fail;
- *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
- }
-
- ptr += 12;
-
- kfree(xmac_stats);
- kfree(sw_stats);
- kfree(hw_stats);
-}
-
-static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
- u8 *data)
-{
- int stat_size = 0;
- int i, j;
- struct vxgedev *vdev = netdev_priv(dev);
- switch (stringset) {
- case ETH_SS_STATS:
- vxge_add_string("VPATH STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("tx_ttl_eth_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ttl_eth_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ucast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_rst_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_unknown_proto_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lost_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_parse_error_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_offload_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_retx_tcp_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lost_ip_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_eth_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_eth_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_mcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_bcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_long_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_usized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_osized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frag_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jabber_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_ip_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_various_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_sleep_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_red_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_queue_full_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_ok_frms_%d\t\t\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->max_config_port; i++) {
- vxge_add_string("tx_frms_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_discarded_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_errored_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frms_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_discarded_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_errored_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unknown_slow_proto_frms_%d\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\nPORT STATISTICS%s\t\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->max_config_port; i++) {
- vxge_add_string("tx_ttl_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ttl_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ucast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_rst_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_parse_error_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_unknown_protocol_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_pause_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_marker_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lacpdu_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_drop_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_char2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_char1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_column2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_column1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_any_err_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_drop_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_mcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_bcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_long_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_usized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_osized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frag_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jabber_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_octets_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_pause_count_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_pause_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unsup_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_fcs_err_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_in_rng_len_err_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_out_rng_len_err_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_discard_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_udp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_marker_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lacpdu_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unknown_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_fcs_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_illegal_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_switch_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_len_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_rpa_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_l2_mgmt_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_rts_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_trash_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_buff_full_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_red_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_char1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_err_sym_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_column1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_char2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_local_fault_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_column2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jettison_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_remote_fault_%d\t\t\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("soft_reset_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("unknown_alarms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("network_sustained_fault_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("network_sustained_ok_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_poison_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("dblgen_fifo0_overflow_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("statsb_pif_chain_error_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("statsb_drop_timeout_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("target_illegal_access_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_serr_det_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_ring_bumps_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_rxdcm_sc_abort_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_quanta_size_err_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_full_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_usage_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_usage_max_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_reserve_free_swaps_cnt_%d\t",
- &stat_size, data, i);
- vxge_add_string("ring_total_compl_cnt_%d\t\t",
- &stat_size, data, i);
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t",
- &stat_size, data, j, i);
- vxge_add_string("fifo_full_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_usage_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_usage_max_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_compl_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_posts_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_buffers_%d\t\t",
- &stat_size, data, i);
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- vxge_add_string("txd_t_code_err_cnt%d_%d\t\t",
- &stat_size, data, j, i);
- }
-
- vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("ini_num_mwr_sent_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_mrd_sent_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_mwr_byte_sent_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("wrcrdtarb_xoff_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rdcrdtarb_xoff_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count0_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count1_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count2_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count3_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count4_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count5_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum0_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum1_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum2_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum3_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_multi_cast_frame_discard_%d\t",
- &stat_size, data, i);
- vxge_add_string("rx_frm_transferred_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rxd_returned_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_len_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_permitted_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vp_reset_discarded_frms_%d\t",
- &stat_size, data, i);
- vxge_add_string("rx_wol_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vp_reset_discarded_frms_%d\t",
- &stat_size, data, i);
- }
-
- memcpy(data + stat_size, &ethtool_driver_stats_keys,
- sizeof(ethtool_driver_stats_keys));
- }
-}
-
-static int vxge_ethtool_get_regs_len(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
-}
-
-static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- switch (sset) {
- case ETH_SS_STATS:
- return VXGE_TITLE_LEN +
- (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) +
- (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) +
- (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_SW_STATS_LEN) +
- DRIVER_STAT_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
- printk(KERN_INFO "Single Function Mode is required to flash the"
- " firmware\n");
- return -EINVAL;
- }
-
- if (netif_running(dev)) {
- printk(KERN_INFO "Interface %s must be down to flash the "
- "firmware\n", dev->name);
- return -EBUSY;
- }
-
- return vxge_fw_upgrade(vdev, parms->data, 1);
-}
-
-static const struct ethtool_ops vxge_ethtool_ops = {
- .get_drvinfo = vxge_ethtool_gdrvinfo,
- .get_regs_len = vxge_ethtool_get_regs_len,
- .get_regs = vxge_ethtool_gregs,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = vxge_ethtool_getpause_data,
- .set_pauseparam = vxge_ethtool_setpause_data,
- .get_strings = vxge_ethtool_get_strings,
- .set_phys_id = vxge_ethtool_idnic,
- .get_sset_count = vxge_ethtool_get_sset_count,
- .get_ethtool_stats = vxge_get_ethtool_stats,
- .flash_device = vxge_fw_flash,
- .get_link_ksettings = vxge_ethtool_get_link_ksettings,
- .set_link_ksettings = vxge_ethtool_set_link_ksettings,
-};
-
-void vxge_initialize_ethtool_ops(struct net_device *ndev)
-{
- ndev->ethtool_ops = &vxge_ethtool_ops;
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
deleted file mode 100644
index 065a2c0429a4..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef _VXGE_ETHTOOL_H
-#define _VXGE_ETHTOOL_H
-
-#include "vxge-main.h"
-
-/* Ethtool related variables and Macros. */
-static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
-
-#define VXGE_TITLE_LEN 5
-#define VXGE_HW_VPATH_STATS_LEN 27
-#define VXGE_HW_AGGR_STATS_LEN 13
-#define VXGE_HW_PORT_STATS_LEN 94
-#define VXGE_HW_VPATH_TX_STATS_LEN 19
-#define VXGE_HW_VPATH_RX_STATS_LEN 42
-#define VXGE_SW_STATS_LEN 60
-#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\
- VXGE_HW_AGGR_STATS_LEN +\
- VXGE_HW_PORT_STATS_LEN +\
- VXGE_HW_VPATH_TX_STATS_LEN +\
- VXGE_HW_VPATH_RX_STATS_LEN)
-
-#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN)
-#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN)
-
-/* Maximum flicker time of adapter LED */
-#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */
-#define VXGE_FLICKER_ON 1
-#define VXGE_FLICKER_OFF 0
-
-#define vxge_add_string(fmt, size, buf, ...) {\
- snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \
- *size += ETH_GSTRING_LEN; \
-}
-
-#endif /*_VXGE_ETHTOOL_H*/
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
deleted file mode 100644
index fa5d4ddf429b..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ /dev/null
@@ -1,4808 +0,0 @@
-/******************************************************************************
-* This software may be used and distributed according to the terms of
-* the GNU General Public License (GPL), incorporated herein by reference.
-* Drivers based on or derived from this code fall under the GPL and must
-* retain the authorship, copyright and license notice. This file is not
-* a complete program and may only be used when the entire operating
-* system is licensed under the GPL.
-* See the file COPYING in this distribution for more information.
-*
-* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
-* Virtualized Server Adapter.
-* Copyright(c) 2002-2010 Exar Corp.
-*
-* The module loadable parameters that are supported by the driver and a brief
-* explanation of all the variables:
-* vlan_tag_strip:
-* Strip VLAN Tag enable/disable. Instructs the device to remove
-* the VLAN tag from all received tagged frames that are not
-* replicated at the internal L2 switch.
-* 0 - Do not strip the VLAN tag.
-* 1 - Strip the VLAN tag.
-*
-* addr_learn_en:
-* Enable learning the mac address of the guest OS interface in
-* a virtualization environment.
-* 0 - DISABLE
-* 1 - ENABLE
-*
-* max_config_port:
-* Maximum number of port to be supported.
-* MIN -1 and MAX - 2
-*
-* max_config_vpath:
-* This configures the maximum no of VPATH configures for each
-* device function.
-* MIN - 1 and MAX - 17
-*
-* max_config_dev:
-* This configures maximum no of Device function to be enabled.
-* MIN - 1 and MAX - 17
-*
-******************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <net/ip.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/firmware.h>
-#include <linux/net_tstamp.h>
-#include <linux/prefetch.h>
-#include <linux/module.h>
-#include "vxge-main.h"
-#include "vxge-reg.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
- "Virtualized Server Adapter");
-
-static const struct pci_device_id vxge_id_table[] = {
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
- PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
- PCI_ANY_ID},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, vxge_id_table);
-
-VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
-VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
-VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
-VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
-VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
-VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
-
-static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
- {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
-static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
- {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
-module_param_array(bw_percentage, uint, NULL, 0);
-
-static struct vxge_drv_config *driver_config;
-static void vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-static inline int is_vxge_card_up(struct vxgedev *vdev)
-{
- return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-}
-
-static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
-{
- struct sk_buff **skb_ptr = NULL;
- struct sk_buff **temp;
-#define NR_SKB_COMPLETED 16
- struct sk_buff *completed[NR_SKB_COMPLETED];
- int more;
-
- do {
- more = 0;
- skb_ptr = completed;
-
- if (__netif_tx_trylock(fifo->txq)) {
- vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
- NR_SKB_COMPLETED, &more);
- __netif_tx_unlock(fifo->txq);
- }
-
- /* free SKBs */
- for (temp = completed; temp != skb_ptr; temp++)
- dev_consume_skb_irq(*temp);
- } while (more);
-}
-
-static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
-{
- int i;
-
- /* Complete all transmits */
- for (i = 0; i < vdev->no_of_vpath; i++)
- VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
-}
-
-static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
-{
- int i;
- struct vxge_ring *ring;
-
- /* Complete all receives*/
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
- vxge_hw_vpath_poll_rx(ring->handle);
- }
-}
-
-/*
- * vxge_callback_link_up
- *
- * This function is called during interrupt context to notify link up state
- * change.
- */
-static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- vdev->ndev->name, __func__, __LINE__);
- netdev_notice(vdev->ndev, "Link Up\n");
- vdev->stats.link_up++;
-
- netif_carrier_on(vdev->ndev);
- netif_tx_wake_all_queues(vdev->ndev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_callback_link_down
- *
- * This function is called during interrupt context to notify link down state
- * change.
- */
-static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
- netdev_notice(vdev->ndev, "Link Down\n");
-
- vdev->stats.link_down++;
- netif_carrier_off(vdev->ndev);
- netif_tx_stop_all_queues(vdev->ndev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_rx_alloc
- *
- * Allocate SKB.
- */
-static struct sk_buff *
-vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
-{
- struct net_device *dev;
- struct sk_buff *skb;
- struct vxge_rx_priv *rx_priv;
-
- dev = ring->ndev;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
-
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
-
- /* try to allocate skb first. this one may fail */
- skb = netdev_alloc_skb(dev, skb_size +
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
- if (skb == NULL) {
- vxge_debug_mem(VXGE_ERR,
- "%s: out of memory to allocate SKB", dev->name);
- ring->stats.skb_alloc_fail++;
- return NULL;
- }
-
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d Skb : 0x%p", ring->ndev->name,
- __func__, __LINE__, skb);
-
- skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
-
- rx_priv->skb = skb;
- rx_priv->skb_data = NULL;
- rx_priv->data_size = skb_size;
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return skb;
-}
-
-/*
- * vxge_rx_map
- */
-static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
-{
- struct vxge_rx_priv *rx_priv;
- dma_addr_t dma_addr;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
-
- rx_priv->skb_data = rx_priv->skb->data;
- dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) {
- ring->stats.pci_map_fail++;
- return -EIO;
- }
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
- ring->ndev->name, __func__, __LINE__,
- (unsigned long long)dma_addr);
- vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
-
- rx_priv->data_dma = dma_addr;
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return 0;
-}
-
-/*
- * vxge_rx_initial_replenish
- * Allocation of RxD as an initial replenish procedure.
- */
-static enum vxge_hw_status
-vxge_rx_initial_replenish(void *dtrh, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct vxge_rx_priv *rx_priv;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- if (vxge_rx_alloc(dtrh, ring,
- VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
- return VXGE_HW_FAIL;
-
- if (vxge_rx_map(dtrh, ring)) {
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
- dev_kfree_skb(rx_priv->skb);
-
- return VXGE_HW_FAIL;
- }
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return VXGE_HW_OK;
-}
-
-static inline void
-vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
- int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
-{
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- skb_record_rx_queue(skb, ring->driver_id);
- skb->protocol = eth_type_trans(skb, ring->ndev);
-
- u64_stats_update_begin(&ring->stats.syncp);
- ring->stats.rx_frms++;
- ring->stats.rx_bytes += pkt_length;
-
- if (skb->pkt_type == PACKET_MULTICAST)
- ring->stats.rx_mcast++;
- u64_stats_update_end(&ring->stats.syncp);
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d skb protocol = %d",
- ring->ndev->name, __func__, __LINE__, skb->protocol);
-
- if (ext_info->vlan &&
- ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
- napi_gro_receive(ring->napi_p, skb);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-}
-
-static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
- struct vxge_rx_priv *rx_priv)
-{
- dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
- vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
-}
-
-static inline void vxge_post(int *dtr_cnt, void **first_dtr,
- void *post_dtr, struct __vxge_hw_ring *ringh)
-{
- int dtr_count = *dtr_cnt;
- if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
- if (*first_dtr)
- vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
- *first_dtr = post_dtr;
- } else
- vxge_hw_ring_rxd_post_post(ringh, post_dtr);
- dtr_count++;
- *dtr_cnt = dtr_count;
-}
-
-/*
- * vxge_rx_1b_compl
- *
- * If the interrupt is because of a received frame or if the receive ring
- * contains fresh as yet un-processed frames, this function is called.
- */
-static enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
- u8 t_code, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct net_device *dev = ring->ndev;
- unsigned int dma_sizes;
- void *first_dtr = NULL;
- int dtr_cnt = 0;
- int data_size;
- dma_addr_t data_dma;
- int pkt_length;
- struct sk_buff *skb;
- struct vxge_rx_priv *rx_priv;
- struct vxge_hw_ring_rxd_info ext_info;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
-
- if (ring->budget <= 0)
- goto out;
-
- do {
- prefetch((char *)dtr + L1_CACHE_BYTES);
- rx_priv = vxge_hw_ring_rxd_private_get(dtr);
- skb = rx_priv->skb;
- data_size = rx_priv->data_size;
- data_dma = rx_priv->data_dma;
- prefetch(rx_priv->skb_data);
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d skb = 0x%p",
- ring->ndev->name, __func__, __LINE__, skb);
-
- vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
- pkt_length = dma_sizes;
-
- pkt_length -= ETH_FCS_LEN;
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d Packet Length = %d",
- ring->ndev->name, __func__, __LINE__, pkt_length);
-
- vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
-
- /* check skb validity */
- vxge_assert(skb);
-
- prefetch((char *)skb + L1_CACHE_BYTES);
- if (unlikely(t_code)) {
- if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
- VXGE_HW_OK) {
-
- ring->stats.rx_errors++;
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s :%d Rx T_code is %d",
- ring->ndev->name, __func__,
- __LINE__, t_code);
-
- /* If the t_code is not supported and if the
- * t_code is other than 0x5 (unparseable packet
- * such as unknown UPV6 header), Drop it !!!
- */
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- ring->stats.rx_dropped++;
- continue;
- }
- }
-
- if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
- if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
- if (!vxge_rx_map(dtr, ring)) {
- skb_put(skb, pkt_length);
-
- dma_unmap_single(&ring->pdev->dev,
- data_dma, data_size,
- DMA_FROM_DEVICE);
-
- vxge_hw_ring_rxd_pre_post(ringh, dtr);
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- } else {
- dev_kfree_skb(rx_priv->skb);
- rx_priv->skb = skb;
- rx_priv->data_size = data_size;
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- ring->stats.rx_dropped++;
- break;
- }
- } else {
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- ring->stats.rx_dropped++;
- break;
- }
- } else {
- struct sk_buff *skb_up;
-
- skb_up = netdev_alloc_skb(dev, pkt_length +
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
- if (skb_up != NULL) {
- skb_reserve(skb_up,
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
-
- dma_sync_single_for_cpu(&ring->pdev->dev,
- data_dma, data_size,
- DMA_FROM_DEVICE);
-
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d skb_up = %p",
- ring->ndev->name, __func__,
- __LINE__, skb);
- memcpy(skb_up->data, skb->data, pkt_length);
-
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- /* will netif_rx small SKB instead */
- skb = skb_up;
- skb_put(skb, pkt_length);
- } else {
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- vxge_debug_rx(VXGE_ERR,
- "%s: vxge_rx_1b_compl: out of "
- "memory", dev->name);
- ring->stats.skb_alloc_fail++;
- break;
- }
- }
-
- if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
- !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
- (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
- ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
- ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
-
-
- if (ring->rx_hwts) {
- struct skb_shared_hwtstamps *skb_hwts;
- u32 ns = *(u32 *)(skb->head + pkt_length);
-
- skb_hwts = skb_hwtstamps(skb);
- skb_hwts->hwtstamp = ns_to_ktime(ns);
- }
-
- /* rth_hash_type and rth_it_hit are non-zero regardless of
- * whether rss is enabled. Only the rth_value is zero/non-zero
- * if rss is disabled/enabled, so key off of that.
- */
- if (ext_info.rth_value)
- skb_set_hash(skb, ext_info.rth_value,
- PKT_HASH_TYPE_L3);
-
- vxge_rx_complete(ring, skb, ext_info.vlan,
- pkt_length, &ext_info);
-
- ring->budget--;
- ring->pkts_processed++;
- if (!ring->budget)
- break;
-
- } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
- &t_code) == VXGE_HW_OK);
-
- if (first_dtr)
- vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
-
-out:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...",
- __func__, __LINE__);
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_xmit_compl
- *
- * If an interrupt was raised to indicate DMA complete of the Tx packet,
- * this function is called. It identifies the last TxD whose buffer was
- * freed and frees all skbs whose data have already DMA'ed into the NICs
- * internal memory.
- */
-static enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
- enum vxge_hw_fifo_tcode t_code, void *userdata,
- struct sk_buff ***skb_ptr, int nr_skb, int *more)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
- struct sk_buff *skb, **done_skb = *skb_ptr;
- int pkt_cnt = 0;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Entered....", __func__, __LINE__);
-
- do {
- int frg_cnt;
- skb_frag_t *frag;
- int i = 0, j;
- struct vxge_tx_priv *txd_priv =
- vxge_hw_fifo_txdl_private_get(dtr);
-
- skb = txd_priv->skb;
- frg_cnt = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
-
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d fifo_hw = %p dtr = %p "
- "tcode = 0x%x", fifo->ndev->name, __func__,
- __LINE__, fifo_hw, dtr, t_code);
- /* check skb validity */
- vxge_assert(skb);
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
- fifo->ndev->name, __func__, __LINE__,
- skb, txd_priv, frg_cnt);
- if (unlikely(t_code)) {
- fifo->stats.tx_errors++;
- vxge_debug_tx(VXGE_ERR,
- "%s: tx: dtr %p completed due to "
- "error t_code %01x", fifo->ndev->name,
- dtr, t_code);
- vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
- }
-
- /* for unfragmented skb */
- dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (j = 0; j < frg_cnt; j++) {
- dma_unmap_page(&fifo->pdev->dev,
- txd_priv->dma_buffers[i++],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-
- /* Updating the statistics block */
- u64_stats_update_begin(&fifo->stats.syncp);
- fifo->stats.tx_frms++;
- fifo->stats.tx_bytes += skb->len;
- u64_stats_update_end(&fifo->stats.syncp);
-
- *done_skb++ = skb;
-
- if (--nr_skb <= 0) {
- *more = 1;
- break;
- }
-
- pkt_cnt++;
- if (pkt_cnt > fifo->indicate_max_pkts)
- break;
-
- } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
- &dtr, &t_code) == VXGE_HW_OK);
-
- *skb_ptr = done_skb;
- if (netif_tx_queue_stopped(fifo->txq))
- netif_tx_wake_queue(fifo->txq);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- fifo->ndev->name, __func__, __LINE__);
- return VXGE_HW_OK;
-}
-
-/* select a vpath to transmit the packet */
-static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
-{
- u16 queue_len, counter = 0;
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *ip;
- struct tcphdr *th;
-
- ip = ip_hdr(skb);
-
- if (!ip_is_fragment(ip)) {
- th = (struct tcphdr *)(((unsigned char *)ip) +
- ip->ihl*4);
-
- queue_len = vdev->no_of_vpath;
- counter = (ntohs(th->source) +
- ntohs(th->dest)) &
- vdev->vpath_selector[queue_len - 1];
- if (counter >= queue_len)
- counter = queue_len - 1;
- }
- }
- return counter;
-}
-
-static enum vxge_hw_status vxge_search_mac_addr_in_list(
- struct vxge_vpath *vpath, u64 del_mac)
-{
- struct list_head *entry, *next;
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
- return TRUE;
- }
- return FALSE;
-}
-
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct vxge_mac_addrs *new_mac_entry;
- u8 *mac_address = NULL;
-
- if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
- return TRUE;
-
- new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
- if (!new_mac_entry) {
- vxge_debug_mem(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- return FALSE;
- }
-
- list_add(&new_mac_entry->item, &vpath->mac_addr_list);
-
- /* Copy the new mac address to the list */
- mac_address = (u8 *)&new_mac_entry->macaddr;
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- new_mac_entry->state = mac->state;
- vpath->mac_addr_cnt++;
-
- if (is_multicast_ether_addr(mac->macaddr))
- vpath->mcast_addr_cnt++;
-
- return TRUE;
-}
-
-/* Add a mac address to DA table */
-static enum vxge_hw_status
-vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
-
- if (is_multicast_ether_addr(mac->macaddr))
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
- else
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
- mac->macmask, duplicate_mode);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config add entry failed for vpath:%d",
- vpath->device_id);
- } else
- if (FALSE == vxge_mac_list_add(vpath, mac))
- status = -EPERM;
-
- return status;
-}
-
-static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
-{
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- u64 mac_addr = 0, vpath_vector = 0;
- int vpath_idx = 0;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath = NULL;
-
- mac_address = (u8 *)&mac_addr;
- memcpy(mac_address, mac_header, ETH_ALEN);
-
- /* Is this mac address already in the list? */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vxge_search_mac_addr_in_list(vpath, mac_addr))
- return vpath_idx;
- }
-
- memset(&mac_info, 0, sizeof(struct macInfo));
- memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
-
- /* Any vpath has room to add mac address to its da table? */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
- /* Add this mac address to this vpath */
- mac_info.vpath_no = vpath_idx;
- mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info);
- if (status != VXGE_HW_OK)
- return -EPERM;
- return vpath_idx;
- }
- }
-
- mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
- vpath_idx = 0;
- mac_info.vpath_no = vpath_idx;
- /* Is the first vpath already selected as catch-basin ? */
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
- /* Add this mac address to this vpath */
- if (FALSE == vxge_mac_list_add(vpath, &mac_info))
- return -EPERM;
- return vpath_idx;
- }
-
- /* Select first vpath as catch-basin */
- vpath_vector = vxge_mBIT(vpath->device_id);
- status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- vpath_vector);
- if (status != VXGE_HW_OK) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Unable to set the vpath-%d in catch-basin mode",
- VXGE_DRIVER_NAME, vpath->device_id);
- return -EPERM;
- }
-
- if (FALSE == vxge_mac_list_add(vpath, &mac_info))
- return -EPERM;
-
- return vpath_idx;
-}
-
-/**
- * vxge_xmit
- * @skb : the socket buffer containing the Tx data.
- * @dev : device pointer.
- *
- * This function is the Tx entry point of the driver. Neterion NIC supports
- * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
-*/
-static netdev_tx_t
-vxge_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct vxge_fifo *fifo = NULL;
- void *dtr_priv;
- void *dtr = NULL;
- struct vxgedev *vdev = NULL;
- enum vxge_hw_status status;
- int frg_cnt, first_frg_len;
- skb_frag_t *frag;
- int i = 0, j = 0, avail;
- u64 dma_pointer;
- struct vxge_tx_priv *txdl_priv = NULL;
- struct __vxge_hw_fifo *fifo_hw;
- int offload_type;
- int vpath_no = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- dev->name, __func__, __LINE__);
-
- /* A buffer with no data will be dropped */
- if (unlikely(skb->len <= 0)) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Buffer has no data..", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- vdev = netdev_priv(dev);
-
- if (unlikely(!is_vxge_card_up(vdev))) {
- vxge_debug_tx(VXGE_ERR,
- "%s: vdev not initialized", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (vdev->config.addr_learn_en) {
- vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
- if (vpath_no == -EPERM) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Failed to store the mac address",
- dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- }
-
- if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
- vpath_no = skb_get_queue_mapping(skb);
- else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
- vpath_no = vxge_get_vpath_no(vdev, skb);
-
- vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
-
- if (vpath_no >= vdev->no_of_vpath)
- vpath_no = 0;
-
- fifo = &vdev->vpaths[vpath_no].fifo;
- fifo_hw = fifo->handle;
-
- if (netif_tx_queue_stopped(fifo->txq))
- return NETDEV_TX_BUSY;
-
- avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
- if (avail == 0) {
- vxge_debug_tx(VXGE_ERR,
- "%s: No free TXDs available", dev->name);
- fifo->stats.txd_not_free++;
- goto _exit0;
- }
-
- /* Last TXD? Stop tx queue to avoid dropping packets. TX
- * completion will resume the queue.
- */
- if (avail == 1)
- netif_tx_stop_queue(fifo->txq);
-
- status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
- if (unlikely(status != VXGE_HW_OK)) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Out of descriptors .", dev->name);
- fifo->stats.txd_out_of_desc++;
- goto _exit0;
- }
-
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
- dev->name, __func__, __LINE__,
- fifo_hw, dtr, dtr_priv);
-
- if (skb_vlan_tag_present(skb)) {
- u16 vlan_tag = skb_vlan_tag_get(skb);
- vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
- }
-
- first_frg_len = skb_headlen(skb);
-
- dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data,
- first_frg_len, DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) {
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
- fifo->stats.pci_map_fail++;
- goto _exit0;
- }
-
- txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
- txdl_priv->skb = skb;
- txdl_priv->dma_buffers[j] = dma_pointer;
-
- frg_cnt = skb_shinfo(skb)->nr_frags;
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d skb = %p txdl_priv = %p "
- "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
- __func__, __LINE__, skb, txdl_priv,
- frg_cnt, (unsigned long long)dma_pointer);
-
- vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- first_frg_len);
-
- frag = &skb_shinfo(skb)->frags[0];
- for (i = 0; i < frg_cnt; i++) {
- /* ignore 0 length fragment */
- if (!skb_frag_size(frag))
- continue;
-
- dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
- goto _exit2;
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d frag = %d dma_pointer = 0x%llx",
- dev->name, __func__, __LINE__, i,
- (unsigned long long)dma_pointer);
-
- txdl_priv->dma_buffers[j] = dma_pointer;
- vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- skb_frag_size(frag));
- frag += 1;
- }
-
- offload_type = vxge_offload_type(skb);
-
- if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- int mss = vxge_tcp_mss(skb);
- if (mss) {
- vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
- dev->name, __func__, __LINE__, mss);
- vxge_hw_fifo_txdl_mss_set(dtr, mss);
- } else {
- vxge_assert(skb->len <=
- dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
- vxge_assert(0);
- goto _exit1;
- }
- }
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vxge_hw_fifo_txdl_cksum_set_bits(dtr,
- VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
- VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
- VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
-
- vxge_hw_fifo_txdl_post(fifo_hw, dtr);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
- dev->name, __func__, __LINE__);
- return NETDEV_TX_OK;
-
-_exit2:
- vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
-_exit1:
- j = 0;
- frag = &skb_shinfo(skb)->frags[0];
-
- dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (; j < i; j++) {
- dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-_exit0:
- netif_tx_stop_queue(fifo->txq);
- dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
-}
-
-/*
- * vxge_rx_term
- *
- * Function will be called by hw function to abort all outstanding receive
- * descriptors.
- */
-static void
-vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct vxge_rx_priv *rx_priv =
- vxge_hw_ring_rxd_private_get(dtrh);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- if (state != VXGE_HW_RXD_STATE_POSTED)
- return;
-
- dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- dev_kfree_skb(rx_priv->skb);
- rx_priv->skb_data = NULL;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- ring->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_tx_term
- *
- * Function will be called to abort all outstanding tx descriptors
- */
-static void
-vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
- skb_frag_t *frag;
- int i = 0, j, frg_cnt;
- struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
- struct sk_buff *skb = txd_priv->skb;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if (state != VXGE_HW_TXDL_STATE_POSTED)
- return;
-
- /* check skb validity */
- vxge_assert(skb);
- frg_cnt = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
-
- /* for unfragmented skb */
- dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (j = 0; j < frg_cnt; j++) {
- dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- dev_kfree_skb(skb);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct list_head *entry, *next;
- u64 del_mac = 0;
- u8 *mac_address = (u8 *) (&del_mac);
-
- /* Copy the mac address to delete from the list */
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
- list_del(entry);
- kfree(entry);
- vpath->mac_addr_cnt--;
-
- if (is_multicast_ether_addr(mac->macaddr))
- vpath->mcast_addr_cnt--;
- return TRUE;
- }
- }
-
- return FALSE;
-}
-
-/* delete a mac address from DA table */
-static enum vxge_hw_status
-vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
- mac->macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config delete entry failed for vpath:%d",
- vpath->device_id);
- } else
- vxge_mac_list_del(vpath, mac);
- return status;
-}
-
-/**
- * vxge_set_multicast
- * @dev: pointer to the device structure
- *
- * Entry point for multicast address enable/disable
- * This function is a driver entry point which gets called by the kernel
- * whenever multicast addresses must be enabled/disabled. This also gets
- * called to set/reset promiscuous mode. Depending on the deivce flag, we
- * determine, if multicast address must be enabled or if promiscuous mode
- * is to be disabled etc.
- */
-static void vxge_set_multicast(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- struct vxgedev *vdev;
- int i, mcast_cnt = 0;
- struct vxge_vpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- int vpath_idx = 0;
- struct vxge_mac_addrs *mac_entry;
- struct list_head *list_head;
- struct list_head *entry, *next;
- u8 *mac_address = NULL;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return;
-
- if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to enable "
- "multicast, status %d", status);
- vdev->all_multi_flg = 1;
- }
- } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
- status = vxge_hw_vpath_mcast_disable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to disable "
- "multicast, status %d", status);
- vdev->all_multi_flg = 0;
- }
- }
-
-
- if (!vdev->config.addr_learn_en) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
-
- if (dev->flags & IFF_PROMISC)
- status = vxge_hw_vpath_promisc_enable(
- vpath->handle);
- else
- status = vxge_hw_vpath_promisc_disable(
- vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to %s promisc"
- ", status %d", dev->flags&IFF_PROMISC ?
- "enable" : "disable", status);
- }
- }
-
- memset(&mac_info, 0, sizeof(struct macInfo));
- /* Update individual M_CAST address list */
- if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
- mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
- list_head = &vdev->vpaths[0].mac_addr_list;
- if ((netdev_mc_count(dev) +
- (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
- vdev->vpaths[0].max_mac_addr_cnt)
- goto _set_all_mcast;
-
- /* Delete previous MC's */
- for (i = 0; i < mcast_cnt; i++) {
- list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *)entry;
- /* Copy the mac address to delete */
- mac_address = (u8 *)&mac_entry->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-
- if (is_multicast_ether_addr(mac_info.macaddr)) {
- for (vpath_idx = 0; vpath_idx <
- vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(
- vdev,
- &mac_info);
- }
- }
- }
- }
-
- /* Add new ones */
- netdev_for_each_mc_addr(ha, dev) {
- memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Setting individual"
- "multicast address failed",
- __func__, __LINE__);
- goto _set_all_mcast;
- }
- }
- }
-
- return;
-_set_all_mcast:
- mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
- /* Delete previous MC's */
- for (i = 0; i < mcast_cnt; i++) {
- list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *)entry;
- /* Copy the mac address to delete */
- mac_address = (u8 *)&mac_entry->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-
- if (is_multicast_ether_addr(mac_info.macaddr))
- break;
- }
-
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(vdev, &mac_info);
- }
- }
-
- /* Enable all multicast */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
-
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling all multicasts failed",
- __func__, __LINE__);
- }
- vdev->all_multi_flg = 1;
- }
- dev->flags |= IFF_ALLMULTI;
- }
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-/**
- * vxge_set_mac_addr
- * @dev: pointer to the device structure
- * @p: socket info
- *
- * Update entry "0" (default MAC addr)
- */
-static int vxge_set_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
- struct vxgedev *vdev;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info_new, mac_info_old;
- int vpath_idx = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memset(&mac_info_new, 0, sizeof(struct macInfo));
- memset(&mac_info_old, 0, sizeof(struct macInfo));
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
- __func__, __LINE__);
-
- /* Get the old address */
- memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
-
- /* Copy the new address */
- memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
-
- /* First delete the old mac address from all the vpaths
- as we can't specify the index while adding new mac address */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
- if (!vpath->is_open) {
- /* This can happen when this interface is added/removed
- to the bonding interface. Delete this station address
- from the linked list */
- vxge_mac_list_del(vpath, &mac_info_old);
-
- /* Add this new address to the linked list
- for later restoring */
- vxge_mac_list_add(vpath, &mac_info_new);
-
- continue;
- }
- /* Delete the station address */
- mac_info_old.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(vdev, &mac_info_old);
- }
-
- if (unlikely(!is_vxge_card_up(vdev))) {
- eth_hw_addr_set(dev, addr->sa_data);
- return VXGE_HW_OK;
- }
-
- /* Set this mac address to all the vpaths */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- mac_info_new.vpath_no = vpath_idx;
- mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info_new);
- if (status != VXGE_HW_OK)
- return -EINVAL;
- }
-
- eth_hw_addr_set(dev, addr->sa_data);
-
- return status;
-}
-
-/*
- * vxge_vpath_intr_enable
- * @vdev: pointer to vdev
- * @vp_id: vpath for which to enable the interrupts
- *
- * Enables the interrupts for the vpath
-*/
-static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
-{
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int msix_id = 0;
- int tim_msix_id[4] = {0, 1, 0, 0};
- int alarm_msix_id = VXGE_ALARM_MSIX_ID;
-
- vxge_hw_vpath_intr_enable(vpath->handle);
-
- if (vdev->config.intr_type == INTA)
- vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
- else {
- vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
- alarm_msix_id);
-
- msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
-
- /* enable the alarm vector */
- msix_id = (vpath->handle->vpath->hldev->first_vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
- }
-}
-
-/*
- * vxge_vpath_intr_disable
- * @vdev: pointer to vdev
- * @vp_id: vpath for which to disable the interrupts
- *
- * Disables the interrupts for the vpath
-*/
-static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
-{
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- struct __vxge_hw_device *hldev;
- int msix_id;
-
- hldev = pci_get_drvdata(vdev->pdev);
-
- vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
-
- vxge_hw_vpath_intr_disable(vpath->handle);
-
- if (vdev->config.intr_type == INTA)
- vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
- else {
- msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
-
- /* disable the alarm vector */
- msix_id = (vpath->handle->vpath->hldev->first_vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
- }
-}
-
-/* list all mac addresses from DA table */
-static enum vxge_hw_status
-vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- unsigned char macmask[ETH_ALEN];
- unsigned char macaddr[ETH_ALEN];
-
- status = vxge_hw_vpath_mac_addr_get(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config list entry failed for vpath:%d",
- vpath->device_id);
- return status;
- }
-
- while (!ether_addr_equal(mac->macaddr, macaddr)) {
- status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK)
- break;
- }
-
- return status;
-}
-
-/* Store all mac addresses from the list to the DA table */
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- struct list_head *entry, *next;
-
- memset(&mac_info, 0, sizeof(struct macInfo));
-
- if (vpath->is_open) {
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- mac_address =
- (u8 *)&
- ((struct vxge_mac_addrs *)entry)->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
- ((struct vxge_mac_addrs *)entry)->state =
- VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- /* does this mac address already exist in da table? */
- status = vxge_search_mac_addr_in_da_table(vpath,
- &mac_info);
- if (status != VXGE_HW_OK) {
- /* Add this mac address to the DA table */
- status = vxge_hw_vpath_mac_addr_add(
- vpath->handle, mac_info.macaddr,
- mac_info.macmask,
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA add entry failed for vpath:%d",
- vpath->device_id);
- ((struct vxge_mac_addrs *)entry)->state
- = VXGE_LL_MAC_ADDR_IN_LIST;
- }
- }
- }
- }
-
- return status;
-}
-
-/* Store all vlan ids from the list to the vid table */
-static enum vxge_hw_status
-vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev = vpath->vdev;
- u16 vid;
-
- if (!vpath->is_open)
- return status;
-
- for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
- status = vxge_hw_vpath_vid_add(vpath->handle, vid);
-
- return status;
-}
-
-/*
- * vxge_reset_vpath
- * @vdev: pointer to vdev
- * @vp_id: vpath to reset
- *
- * Resets the vpath
-*/
-static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int ret = 0;
-
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* is device reset already scheduled */
- if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- return 0;
-
- if (vpath->handle) {
- if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
- if (is_vxge_card_up(vdev) &&
- vxge_hw_vpath_recover_from_reset(vpath->handle)
- != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_from_reset"
- "failed for vpath:%d", vp_id);
- return status;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for"
- "vpath:%d", vp_id);
- return status;
- }
- } else
- return VXGE_HW_FAIL;
-
- vxge_restore_vpath_mac_addr(vpath);
- vxge_restore_vpath_vid_table(vpath);
-
- /* Enable all broadcast */
- vxge_hw_vpath_bcast_enable(vpath->handle);
-
- /* Enable all multicast */
- if (vdev->all_multi_flg) {
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling multicast failed",
- __func__, __LINE__);
- }
-
- /* Enable the interrupts */
- vxge_vpath_intr_enable(vdev, vp_id);
-
- smp_wmb();
-
- /* Enable the flow of traffic through the vpath */
- vxge_hw_vpath_enable(vpath->handle);
-
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vpath->handle);
- vpath->ring.last_status = VXGE_HW_OK;
-
- /* Vpath reset done */
- clear_bit(vp_id, &vdev->vp_reset);
-
- /* Start the vpath queue */
- if (netif_tx_queue_stopped(vpath->fifo.txq))
- netif_tx_wake_queue(vpath->fifo.txq);
-
- return ret;
-}
-
-/* Configure CI */
-static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
-{
- int i = 0;
-
- /* Enable CI for RTI */
- if (vdev->config.intr_type == MSI_X) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct __vxge_hw_ring *hw_ring;
-
- hw_ring = vdev->vpaths[i].ring.handle;
- vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
- }
- }
-
- /* Enable CI for TTI */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
- vxge_hw_vpath_tti_ci_set(hw_fifo);
- /*
- * For Inta (with or without napi), Set CI ON for only one
- * vpath. (Have only one free running timer).
- */
- if ((vdev->config.intr_type == INTA) && (i == 0))
- break;
- }
-
- return;
-}
-
-static int do_vxge_reset(struct vxgedev *vdev, int event)
-{
- int ret = 0, vp_id, i;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* is reset already scheduled */
- if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- return 0;
- }
-
- if (event == VXGE_LL_FULL_RESET) {
- netif_carrier_off(vdev->ndev);
-
- /* wait for all the vpath reset to complete */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- while (test_bit(vp_id, &vdev->vp_reset))
- msleep(50);
- }
-
- netif_carrier_on(vdev->ndev);
-
- /* if execution mode is set to debug, don't reset the adapter */
- if (unlikely(vdev->exec_mode)) {
- vxge_debug_init(VXGE_ERR,
- "%s: execution mode is debug, returning..",
- vdev->ndev->name);
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- netif_tx_stop_all_queues(vdev->ndev);
- return 0;
- }
- }
-
- if (event == VXGE_LL_FULL_RESET) {
- vxge_hw_device_wait_receive_idle(vdev->devh);
- vxge_hw_device_intr_disable(vdev->devh);
-
- switch (vdev->cric_err_event) {
- case VXGE_HW_EVENT_UNKNOWN:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "unknown error",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_RESET_START:
- break;
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- case VXGE_HW_EVENT_ALARM_CLEARED:
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- break;
- case VXGE_HW_EVENT_CRITICAL_ERR:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "serious error",
- vdev->ndev->name);
- /* SOP or device reset required */
- /* This event is not currently used */
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SERR:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "serious error",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "slot freeze",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- default:
- break;
-
- }
- }
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
- netif_tx_stop_all_queues(vdev->ndev);
-
- if (event == VXGE_LL_FULL_RESET) {
- vxge_reset_all_vpaths(vdev);
- }
-
- if (event == VXGE_LL_COMPL_RESET) {
- for (i = 0; i < vdev->no_of_vpath; i++)
- if (vdev->vpaths[i].handle) {
- if (vxge_hw_vpath_recover_from_reset(
- vdev->vpaths[i].handle)
- != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_"
- "from_reset failed for vpath: "
- "%d", i);
- ret = -EPERM;
- goto out;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for "
- "vpath:%d", i);
- ret = -EPERM;
- goto out;
- }
- }
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
- /* Reprogram the DA table with populated mac addresses */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
- vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
- }
-
- /* enable vpath interrupts */
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_vpath_intr_enable(vdev, i);
-
- vxge_hw_device_intr_enable(vdev->devh);
-
- smp_wmb();
-
- /* Indicate card up */
- set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- /* Get the traffic to flow through the vpaths */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_hw_vpath_enable(vdev->vpaths[i].handle);
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
- }
-
- netif_tx_wake_all_queues(vdev->ndev);
- }
-
- /* configure CI */
- vxge_config_ci_for_tti_rti(vdev);
-
-out:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-
- /* Indicate reset done */
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
- clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
- return ret;
-}
-
-/*
- * vxge_reset
- * @vdev: pointer to ll device
- *
- * driver may reset the chip on events of serr, eccerr, etc
- */
-static void vxge_reset(struct work_struct *work)
-{
- struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
-
- if (!netif_running(vdev->ndev))
- return;
-
- do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
-}
-
-/**
- * vxge_poll_msix - Receive handler when Receive Polling is used.
- * @napi: pointer to the napi structure.
- * @budget: Number of packets budgeted to be processed in this iteration.
- *
- * This function comes into picture only if Receive side is being handled
- * through polling (called NAPI in linux). It mostly does what the normal
- * Rx interrupt handler does in terms of descriptor and packet processing
- * but not in an interrupt context. Also it will process a specified number
- * of packets at most in one iteration. This value is passed down by the
- * kernel as the function argument 'budget'.
- */
-static int vxge_poll_msix(struct napi_struct *napi, int budget)
-{
- struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
- int pkts_processed;
- int budget_org = budget;
-
- ring->budget = budget;
- ring->pkts_processed = 0;
- vxge_hw_vpath_poll_rx(ring->handle);
- pkts_processed = ring->pkts_processed;
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
-
- /* Re enable the Rx interrupts for the vpath */
- vxge_hw_channel_msix_unmask(
- (struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
- }
-
- /* We are copying and returning the local variable, in case if after
- * clearing the msix interrupt above, if the interrupt fires right
- * away which can preempt this NAPI thread */
- return pkts_processed;
-}
-
-static int vxge_poll_inta(struct napi_struct *napi, int budget)
-{
- struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
- int pkts_processed = 0;
- int i;
- int budget_org = budget;
- struct vxge_ring *ring;
-
- struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
- ring->budget = budget;
- ring->pkts_processed = 0;
- vxge_hw_vpath_poll_rx(ring->handle);
- pkts_processed += ring->pkts_processed;
- budget -= ring->pkts_processed;
- if (budget <= 0)
- break;
- }
-
- VXGE_COMPLETE_ALL_TX(vdev);
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
- /* Re enable the Rx interrupts for the ring */
- vxge_hw_device_unmask_all(hldev);
- vxge_hw_device_flush_io(hldev);
- }
-
- return pkts_processed;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * vxge_netpoll - netpoll event handler entry point
- * @dev : pointer to the device structure.
- * Description:
- * This function will be called by upper layer to check for events on the
- * interface in situations where interrupts are disabled. It is used for
- * specific in-kernel networking tasks, such as remote consoles and kernel
- * debugging over the network (example netdump in RedHat).
- */
-static void vxge_netpoll(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct pci_dev *pdev = vdev->pdev;
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- const int irq = pdev->irq;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if (pci_channel_offline(pdev))
- return;
-
- disable_irq(irq);
- vxge_hw_device_clear_tx_rx(hldev);
-
- vxge_hw_device_clear_tx_rx(hldev);
- VXGE_COMPLETE_ALL_RX(vdev);
- VXGE_COMPLETE_ALL_TX(vdev);
-
- enable_irq(irq);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-#endif
-
-/* RTH configuration */
-static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_rth_hash_types hash_types;
- u8 itable[256] = {0}; /* indirection table */
- u8 mtable[256] = {0}; /* CPU to vpath mapping */
- int index;
-
- /*
- * Filling
- * - itable with bucket numbers
- * - mtable with bucket-to-vpath mapping
- */
- for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
- itable[index] = index;
- mtable[index] = index % vdev->no_of_vpath;
- }
-
- /* set indirection table, bucket-to-vpath mapping */
- status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
- vdev->no_of_vpath,
- mtable, itable,
- vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "RTH indirection table configuration failed "
- "for vpath:%d", vdev->vpaths[0].device_id);
- return status;
- }
-
- /* Fill RTH hash types */
- hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
- hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
- hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
- hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
- hash_types.hash_type_tcpipv6ex_en =
- vdev->config.rth_hash_type_tcpipv6ex;
- hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
-
- /*
- * Because the itable_set() method uses the active_table field
- * for the target virtual path the RTH config should be updated
- * for all VPATHs. The h/w only uses the lowest numbered VPATH
- * when steering frames.
- */
- for (index = 0; index < vdev->no_of_vpath; index++) {
- status = vxge_hw_vpath_rts_rth_set(
- vdev->vpaths[index].handle,
- vdev->config.rth_algorithm,
- &hash_types,
- vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "RTH configuration failed for vpath:%d",
- vdev->vpaths[index].device_id);
- return status;
- }
- }
-
- return status;
-}
-
-/* reset vpaths */
-static void vxge_reset_all_vpaths(struct vxgedev *vdev)
-{
- struct vxge_vpath *vpath;
- int i;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- if (vpath->handle) {
- if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
- if (is_vxge_card_up(vdev) &&
- vxge_hw_vpath_recover_from_reset(
- vpath->handle) != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_"
- "from_reset failed for vpath: "
- "%d", i);
- return;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for "
- "vpath:%d", i);
- return;
- }
- }
- }
-}
-
-/* close vpaths */
-static void vxge_close_vpaths(struct vxgedev *vdev, int index)
-{
- struct vxge_vpath *vpath;
- int i;
-
- for (i = index; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- if (vpath->handle && vpath->is_open) {
- vxge_hw_vpath_close(vpath->handle);
- vdev->stats.vpaths_open--;
- }
- vpath->is_open = 0;
- vpath->handle = NULL;
- }
-}
-
-/* open vpaths */
-static int vxge_open_vpaths(struct vxgedev *vdev)
-{
- struct vxge_hw_vpath_attr attr;
- enum vxge_hw_status status;
- struct vxge_vpath *vpath;
- u32 vp_id = 0;
- int i;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_configured);
-
- if (!vdev->titan1) {
- struct vxge_hw_vp_config *vcfg;
- vcfg = &vdev->devh->config.vp_config[vpath->device_id];
-
- vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
- vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
- vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
- vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
- vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
- vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
- vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
- vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
- vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
- }
-
- attr.vp_id = vpath->device_id;
- attr.fifo_attr.callback = vxge_xmit_compl;
- attr.fifo_attr.txdl_term = vxge_tx_term;
- attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
- attr.fifo_attr.userdata = &vpath->fifo;
-
- attr.ring_attr.callback = vxge_rx_1b_compl;
- attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
- attr.ring_attr.rxd_term = vxge_rx_term;
- attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
- attr.ring_attr.userdata = &vpath->ring;
-
- vpath->ring.ndev = vdev->ndev;
- vpath->ring.pdev = vdev->pdev;
-
- status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
- if (status == VXGE_HW_OK) {
- vpath->fifo.handle =
- (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
- vpath->ring.handle =
- (struct __vxge_hw_ring *)attr.ring_attr.userdata;
- vpath->fifo.tx_steering_type =
- vdev->config.tx_steering_type;
- vpath->fifo.ndev = vdev->ndev;
- vpath->fifo.pdev = vdev->pdev;
-
- u64_stats_init(&vpath->fifo.stats.syncp);
- u64_stats_init(&vpath->ring.stats.syncp);
-
- if (vdev->config.tx_steering_type)
- vpath->fifo.txq =
- netdev_get_tx_queue(vdev->ndev, i);
- else
- vpath->fifo.txq =
- netdev_get_tx_queue(vdev->ndev, 0);
- vpath->fifo.indicate_max_pkts =
- vdev->config.fifo_indicate_max_pkts;
- vpath->fifo.tx_vector_no = 0;
- vpath->ring.rx_vector_no = 0;
- vpath->ring.rx_hwts = vdev->rx_hwts;
- vpath->is_open = 1;
- vdev->vp_handles[i] = vpath->handle;
- vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
- vdev->stats.vpaths_open++;
- } else {
- vdev->stats.vpath_open_fail++;
- vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
- "open with status: %d",
- vdev->ndev->name, vpath->device_id,
- status);
- vxge_close_vpaths(vdev, 0);
- return -EPERM;
- }
-
- vp_id = vpath->handle->vpath->vp_id;
- vdev->vpaths_deployed |= vxge_mBIT(vp_id);
- }
-
- return VXGE_HW_OK;
-}
-
-/**
- * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
- * if the interrupts are not within a range
- * @fifo: pointer to transmit fifo structure
- * Description: The function changes boundary timer and restriction timer
- * value depends on the traffic
- * Return Value: None
- */
-static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
-{
- fifo->interrupt_count++;
- if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
- struct __vxge_hw_fifo *hw_fifo = fifo->handle;
-
- fifo->jiffies = jiffies;
- if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
- hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
- hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
- vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
- } else if (hw_fifo->rtimer != 0) {
- hw_fifo->rtimer = 0;
- vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
- }
- fifo->interrupt_count = 0;
- }
-}
-
-/**
- * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
- * if the interrupts are not within a range
- * @ring: pointer to receive ring structure
- * Description: The function increases of decreases the packet counts within
- * the ranges of traffic utilization, if the interrupts due to this ring are
- * not within a fixed range.
- * Return Value: Nothing
- */
-static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
-{
- ring->interrupt_count++;
- if (time_before(ring->jiffies + HZ / 100, jiffies)) {
- struct __vxge_hw_ring *hw_ring = ring->handle;
-
- ring->jiffies = jiffies;
- if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
- hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
- hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
- vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
- } else if (hw_ring->rtimer != 0) {
- hw_ring->rtimer = 0;
- vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
- }
- ring->interrupt_count = 0;
- }
-}
-
-/*
- * vxge_isr_napi
- * @irq: the irq of the device.
- * @dev_id: a void pointer to the hldev structure of the Titan device
- * @ptregs: pointer to the registers pushed on the stack.
- *
- * This function is the ISR handler of the device when napi is enabled. It
- * identifies the reason for the interrupt and calls the relevant service
- * routines.
- */
-static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
-{
- struct __vxge_hw_device *hldev;
- u64 reason;
- enum vxge_hw_status status;
- struct vxgedev *vdev = (struct vxgedev *)dev_id;
-
- vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- hldev = pci_get_drvdata(vdev->pdev);
-
- if (pci_channel_offline(vdev->pdev))
- return IRQ_NONE;
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return IRQ_HANDLED;
-
- status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
- if (status == VXGE_HW_OK) {
- vxge_hw_device_mask_all(hldev);
-
- if (reason &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
- vdev->vpaths_deployed >>
- (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
-
- vxge_hw_device_clear_tx_rx(hldev);
- napi_schedule(&vdev->napi);
- vxge_debug_intr(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
- return IRQ_HANDLED;
- } else
- vxge_hw_device_unmask_all(hldev);
- } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
- (status == VXGE_HW_ERR_CRITICAL) ||
- (status == VXGE_HW_ERR_FIFO))) {
- vxge_hw_device_mask_all(hldev);
- vxge_hw_device_flush_io(hldev);
- return IRQ_HANDLED;
- } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
- return IRQ_HANDLED;
-
- vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
- return IRQ_NONE;
-}
-
-static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
-
- adaptive_coalesce_tx_interrupts(fifo);
-
- vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- VXGE_COMPLETE_VPATH_TX(fifo);
-
- vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
-{
- struct vxge_ring *ring = (struct vxge_ring *)dev_id;
-
- adaptive_coalesce_rx_interrupts(ring);
-
- vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
-
- vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
-
- napi_schedule(&ring->napi);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-vxge_alarm_msix_handle(int irq, void *dev_id)
-{
- int i;
- enum vxge_hw_status status;
- struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
- struct vxgedev *vdev = vpath->vdev;
- int msix_id = (vpath->handle->vpath->vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- /* Reduce the chance of losing alarm interrupts by masking
- * the vector. A pending bit will be set if an alarm is
- * generated and on unmask the interrupt will be fired.
- */
- vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
- vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
-
- status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
- vdev->exec_mode);
- if (status == VXGE_HW_OK) {
- vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- msix_id);
- continue;
- }
- vxge_debug_intr(VXGE_ERR,
- "%s: vxge_hw_vpath_alarm_process failed %x ",
- VXGE_DRIVER_NAME, status);
- }
- return IRQ_HANDLED;
-}
-
-static int vxge_alloc_msix(struct vxgedev *vdev)
-{
- int j, i, ret = 0;
- int msix_intr_vect = 0, temp;
- vdev->intr_cnt = 0;
-
-start:
- /* Tx/Rx MSIX Vectors count */
- vdev->intr_cnt = vdev->no_of_vpath * 2;
-
- /* Alarm MSIX Vectors count */
- vdev->intr_cnt++;
-
- vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!vdev->entries) {
- vxge_debug_init(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- ret = -ENOMEM;
- goto alloc_entries_failed;
- }
-
- vdev->vxge_entries = kcalloc(vdev->intr_cnt,
- sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
- if (!vdev->vxge_entries) {
- vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- ret = -ENOMEM;
- goto alloc_vxge_entries_failed;
- }
-
- for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
-
- msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
-
- /* Initialize the fifo vector */
- vdev->entries[j].entry = msix_intr_vect;
- vdev->vxge_entries[j].entry = msix_intr_vect;
- vdev->vxge_entries[j].in_use = 0;
- j++;
-
- /* Initialize the ring vector */
- vdev->entries[j].entry = msix_intr_vect + 1;
- vdev->vxge_entries[j].entry = msix_intr_vect + 1;
- vdev->vxge_entries[j].in_use = 0;
- j++;
- }
-
- /* Initialize the alarm vector */
- vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
- vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
- vdev->vxge_entries[j].in_use = 0;
-
- ret = pci_enable_msix_range(vdev->pdev,
- vdev->entries, 3, vdev->intr_cnt);
- if (ret < 0) {
- ret = -ENODEV;
- goto enable_msix_failed;
- } else if (ret < vdev->intr_cnt) {
- pci_disable_msix(vdev->pdev);
-
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enable failed for %d vectors, ret: %d",
- VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
- if (max_config_vpath != VXGE_USE_DEFAULT) {
- ret = -ENODEV;
- goto enable_msix_failed;
- }
-
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- vdev->entries = NULL;
- vdev->vxge_entries = NULL;
- /* Try with less no of vector by reducing no of vpaths count */
- temp = (ret - 1)/2;
- vxge_close_vpaths(vdev, temp);
- vdev->no_of_vpath = temp;
- goto start;
- }
- return 0;
-
-enable_msix_failed:
- kfree(vdev->vxge_entries);
-alloc_vxge_entries_failed:
- kfree(vdev->entries);
-alloc_entries_failed:
- return ret;
-}
-
-static int vxge_enable_msix(struct vxgedev *vdev)
-{
-
- int i, ret = 0;
- /* 0 - Tx, 1 - Rx */
- int tim_msix_id[4] = {0, 1, 0, 0};
-
- vdev->intr_cnt = 0;
-
- /* allocate msix vectors */
- ret = vxge_alloc_msix(vdev);
- if (!ret) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct vxge_vpath *vpath = &vdev->vpaths[i];
-
- /* If fifo or ring are not enabled, the MSIX vector for
- * it should be set to 0.
- */
- vpath->ring.rx_vector_no = (vpath->device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
-
- vpath->fifo.tx_vector_no = (vpath->device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE);
-
- vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
- VXGE_ALARM_MSIX_ID);
- }
- }
-
- return ret;
-}
-
-static void vxge_rem_msix_isr(struct vxgedev *vdev)
-{
- int intr_cnt;
-
- for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
- intr_cnt++) {
- if (vdev->vxge_entries[intr_cnt].in_use) {
- free_irq(vdev->entries[intr_cnt].vector,
- vdev->vxge_entries[intr_cnt].arg);
- vdev->vxge_entries[intr_cnt].in_use = 0;
- }
- }
-
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- vdev->entries = NULL;
- vdev->vxge_entries = NULL;
-
- if (vdev->config.intr_type == MSI_X)
- pci_disable_msix(vdev->pdev);
-}
-
-static void vxge_rem_isr(struct vxgedev *vdev)
-{
- if (IS_ENABLED(CONFIG_PCI_MSI) &&
- vdev->config.intr_type == MSI_X) {
- vxge_rem_msix_isr(vdev);
- } else if (vdev->config.intr_type == INTA) {
- free_irq(vdev->pdev->irq, vdev);
- }
-}
-
-static int vxge_add_isr(struct vxgedev *vdev)
-{
- int ret = 0;
- int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
- int pci_fun = PCI_FUNC(vdev->pdev->devfn);
-
- if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
- ret = vxge_enable_msix(vdev);
-
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
- vdev->config.intr_type = INTA;
- }
-
- if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
- for (intr_idx = 0;
- intr_idx < (vdev->no_of_vpath *
- VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
-
- msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
- irq_req = 0;
-
- switch (msix_idx) {
- case 0:
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun, vp_idx);
- ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_tx_msix_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx].fifo);
- vdev->vxge_entries[intr_cnt].arg =
- &vdev->vpaths[vp_idx].fifo;
- irq_req = 1;
- break;
- case 1:
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun, vp_idx);
- ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_rx_msix_napi_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx].ring);
- vdev->vxge_entries[intr_cnt].arg =
- &vdev->vpaths[vp_idx].ring;
- irq_req = 1;
- break;
- }
-
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSIX - %d Registration failed",
- vdev->ndev->name, intr_cnt);
- vxge_rem_msix_isr(vdev);
- vdev->config.intr_type = INTA;
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA",
- vdev->ndev->name);
- goto INTA_MODE;
- }
-
- if (irq_req) {
- /* We requested for this msix interrupt */
- vdev->vxge_entries[intr_cnt].in_use = 1;
- msix_idx += vdev->vpaths[vp_idx].device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_unmask(
- vdev->vpaths[vp_idx].handle,
- msix_idx);
- intr_cnt++;
- }
-
- /* Point to next vpath handler */
- if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
- (vp_idx < (vdev->no_of_vpath - 1)))
- vp_idx++;
- }
-
- intr_cnt = vdev->no_of_vpath * 2;
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Alarm - fn:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun);
- /* For Alarm interrupts */
- ret = request_irq(vdev->entries[intr_cnt].vector,
- vxge_alarm_msix_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[0]);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSIX - %d Registration failed",
- vdev->ndev->name, intr_cnt);
- vxge_rem_msix_isr(vdev);
- vdev->config.intr_type = INTA;
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA",
- vdev->ndev->name);
- goto INTA_MODE;
- }
-
- msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
- vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
- msix_idx);
- vdev->vxge_entries[intr_cnt].in_use = 1;
- vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
- }
-
-INTA_MODE:
- if (vdev->config.intr_type == INTA) {
- snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
- "%s:vxge:INTA", vdev->ndev->name);
- vxge_hw_device_set_intr_type(vdev->devh,
- VXGE_HW_INTR_MODE_IRQLINE);
-
- vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
-
- ret = request_irq((int) vdev->pdev->irq,
- vxge_isr_napi,
- IRQF_SHARED, vdev->desc[0], vdev);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s %s-%d: ISR registration failed",
- VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
- return -ENODEV;
- }
- vxge_debug_init(VXGE_TRACE,
- "new %s-%d line allocated",
- "IRQ", vdev->pdev->irq);
- }
-
- return VXGE_HW_OK;
-}
-
-static void vxge_poll_vp_reset(struct timer_list *t)
-{
- struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer);
- int i, j = 0;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- if (test_bit(i, &vdev->vp_reset)) {
- vxge_reset_vpath(vdev, i);
- j++;
- }
- }
- if (j && (vdev->config.intr_type != MSI_X)) {
- vxge_hw_device_unmask_all(vdev->devh);
- vxge_hw_device_flush_io(vdev->devh);
- }
-
- mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
-}
-
-static void vxge_poll_vp_lockup(struct timer_list *t)
-{
- struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer);
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- struct vxge_ring *ring;
- int i;
- unsigned long rx_frms;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
-
- /* Truncated to machine word size number of frames */
- rx_frms = READ_ONCE(ring->stats.rx_frms);
-
- /* Did this vpath received any packets */
- if (ring->stats.prev_rx_frms == rx_frms) {
- status = vxge_hw_vpath_check_leak(ring->handle);
-
- /* Did it received any packets last time */
- if ((VXGE_HW_FAIL == status) &&
- (VXGE_HW_FAIL == ring->last_status)) {
-
- /* schedule vpath reset */
- if (!test_and_set_bit(i, &vdev->vp_reset)) {
- vpath = &vdev->vpaths[i];
-
- /* disable interrupts for this vpath */
- vxge_vpath_intr_disable(vdev, i);
-
- /* stop the queue for this vpath */
- netif_tx_stop_queue(vpath->fifo.txq);
- continue;
- }
- }
- }
- ring->stats.prev_rx_frms = rx_frms;
- ring->last_status = status;
- }
-
- /* Check every 1 milli second */
- mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
-}
-
-static netdev_features_t vxge_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- netdev_features_t changed = dev->features ^ features;
-
- /* Enabling RTH requires some of the logic in vxge_device_register and a
- * vpath reset. Due to these restrictions, only allow modification
- * while the interface is down.
- */
- if ((changed & NETIF_F_RXHASH) && netif_running(dev))
- features ^= NETIF_F_RXHASH;
-
- return features;
-}
-
-static int vxge_set_features(struct net_device *dev, netdev_features_t features)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- netdev_features_t changed = dev->features ^ features;
-
- if (!(changed & NETIF_F_RXHASH))
- return 0;
-
- /* !netif_running() ensured by vxge_fix_features() */
-
- vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
- vxge_reset_all_vpaths(vdev);
-
- return 0;
-}
-
-/**
- * vxge_open
- * @dev: pointer to the device structure.
- *
- * This function is the open entry point of the driver. It mainly calls a
- * function to allocate Rx buffers and inserts them into the buffer
- * descriptors and then enables the Rx part of the NIC.
- * Return value: '0' on success and an appropriate (-)ve integer as
- * defined in errno.h file on failure.
- */
-static int vxge_open(struct net_device *dev)
-{
- enum vxge_hw_status status;
- struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
- struct vxge_vpath *vpath;
- int ret = 0;
- int i;
- u64 val64;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d", dev->name, __func__, __LINE__);
-
- vdev = netdev_priv(dev);
- hldev = pci_get_drvdata(vdev->pdev);
-
- /* make sure you have link off by default every time Nic is
- * initialized */
- netif_carrier_off(dev);
-
- /* Open VPATHs */
- status = vxge_open_vpaths(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: Vpath open failed", vdev->ndev->name);
- ret = -EPERM;
- goto out0;
- }
-
- vdev->mtu = dev->mtu;
-
- status = vxge_add_isr(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: ISR add failed", dev->name);
- ret = -EPERM;
- goto out1;
- }
-
- if (vdev->config.intr_type != MSI_X) {
- netif_napi_add_weight(dev, &vdev->napi, vxge_poll_inta,
- vdev->config.napi_weight);
- napi_enable(&vdev->napi);
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vpath->ring.napi_p = &vdev->napi;
- }
- } else {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- netif_napi_add_weight(dev, &vpath->ring.napi,
- vxge_poll_msix,
- vdev->config.napi_weight);
- napi_enable(&vpath->ring.napi);
- vpath->ring.napi_p = &vpath->ring.napi;
- }
- }
-
- /* configure RTH */
- if (vdev->config.rth_steering) {
- status = vxge_rth_configure(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: RTH configuration failed",
- dev->name);
- ret = -EPERM;
- goto out2;
- }
- }
- printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
- hldev->config.rth_en ? "enabled" : "disabled");
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- /* set initial mtu before enabling the device */
- status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: can not set new MTU", dev->name);
- ret = -EPERM;
- goto out2;
- }
- }
-
- VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
- vxge_debug_init(vdev->level_trace,
- "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
- VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
-
- /* Restore the DA, VID table and also multicast and promiscuous mode
- * states
- */
- if (vdev->all_multi_flg) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_restore_vpath_mac_addr(vpath);
- vxge_restore_vpath_vid_table(vpath);
-
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling multicast failed",
- __func__, __LINE__);
- }
- }
-
- /* Enable vpath to sniff all unicast/multicast traffic that not
- * addressed to them. We allow promiscuous mode for PF only
- */
-
- val64 = 0;
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_addr),
- val64);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_vid),
- val64);
-
- vxge_set_multicast(dev);
-
- /* Enabling Bcast and mcast for all vpath */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- status = vxge_hw_vpath_bcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s : Can not enable bcast for vpath "
- "id %d", dev->name, i);
- if (vdev->config.addr_learn_en) {
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s : Can not enable mcast for vpath "
- "id %d", dev->name, i);
- }
- }
-
- vxge_hw_device_setpause_data(vdev->devh, 0,
- vdev->config.tx_pause_enable,
- vdev->config.rx_pause_enable);
-
- if (vdev->vp_reset_timer.function == NULL)
- vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset,
- HZ / 2);
-
- /* There is no need to check for RxD leak and RxD lookup on Titan1A */
- if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
- vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup,
- HZ / 2);
-
- set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- smp_wmb();
-
- if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
- netif_carrier_on(vdev->ndev);
- netdev_notice(vdev->ndev, "Link Up\n");
- vdev->stats.link_up++;
- }
-
- vxge_hw_device_intr_enable(vdev->devh);
-
- smp_wmb();
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- vxge_hw_vpath_enable(vpath->handle);
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vpath->handle);
- }
-
- netif_tx_start_all_queues(vdev->ndev);
-
- /* configure CI */
- vxge_config_ci_for_tti_rti(vdev);
-
- goto out0;
-
-out2:
- vxge_rem_isr(vdev);
-
- /* Disable napi */
- if (vdev->config.intr_type != MSI_X)
- napi_disable(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- napi_disable(&vdev->vpaths[i].ring.napi);
- }
-
-out1:
- vxge_close_vpaths(vdev, 0);
-out0:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- dev->name, __func__, __LINE__);
- return ret;
-}
-
-/* Loop through the mac address list and delete all the entries */
-static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
-{
-
- struct list_head *entry, *next;
- if (list_empty(&vpath->mac_addr_list))
- return;
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- list_del(entry);
- kfree(entry);
- }
-}
-
-static void vxge_napi_del_all(struct vxgedev *vdev)
-{
- int i;
- if (vdev->config.intr_type != MSI_X)
- netif_napi_del(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- netif_napi_del(&vdev->vpaths[i].ring.napi);
- }
-}
-
-static int do_vxge_close(struct net_device *dev, int do_io)
-{
- enum vxge_hw_status status;
- struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
- int i;
- u64 val64, vpath_vector;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- dev->name, __func__, __LINE__);
-
- vdev = netdev_priv(dev);
- hldev = pci_get_drvdata(vdev->pdev);
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* If vxge_handle_crit_err task is executing,
- * wait till it completes. */
- while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- msleep(50);
-
- if (do_io) {
- /* Put the vpath back in normal mode */
- vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
- status = vxge_hw_mgmt_reg_read(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- &val64);
- if (status == VXGE_HW_OK) {
- val64 &= ~vpath_vector;
- status = vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- val64);
- }
-
- /* Remove the function 0 from promiscuous mode */
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_addr),
- 0);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_vid),
- 0);
-
- smp_wmb();
- }
-
- if (vdev->titan1)
- del_timer_sync(&vdev->vp_lockup_timer);
-
- del_timer_sync(&vdev->vp_reset_timer);
-
- if (do_io)
- vxge_hw_device_wait_receive_idle(hldev);
-
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- /* Disable napi */
- if (vdev->config.intr_type != MSI_X)
- napi_disable(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- napi_disable(&vdev->vpaths[i].ring.napi);
- }
-
- netif_carrier_off(vdev->ndev);
- netdev_notice(vdev->ndev, "Link Down\n");
- netif_tx_stop_all_queues(vdev->ndev);
-
- /* Note that at this point xmit() is stopped by upper layer */
- if (do_io)
- vxge_hw_device_intr_disable(vdev->devh);
-
- vxge_rem_isr(vdev);
-
- vxge_napi_del_all(vdev);
-
- if (do_io)
- vxge_reset_all_vpaths(vdev);
-
- vxge_close_vpaths(vdev, 0);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
-
- clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
-
- return 0;
-}
-
-/**
- * vxge_close
- * @dev: device pointer.
- *
- * This is the stop entry point of the driver. It needs to undo exactly
- * whatever was done by the open entry point, thus it's usually referred to
- * as the close function.Among other things this function mainly stops the
- * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
- * Return value: '0' on success and an appropriate (-)ve integer as
- * defined in errno.h file on failure.
- */
-static int vxge_close(struct net_device *dev)
-{
- do_vxge_close(dev, 1);
- return 0;
-}
-
-/**
- * vxge_change_mtu
- * @dev: net device pointer.
- * @new_mtu :the new MTU size for the device.
- *
- * A driver entry point to change MTU size for the device. Before changing
- * the MTU the device must be stopped.
- */
-static int vxge_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s:%d", __func__, __LINE__);
-
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev))) {
- /* just store new value, will use later on open() */
- dev->mtu = new_mtu;
- vxge_debug_init(vdev->level_err,
- "%s", "device is down on MTU change");
- return 0;
- }
-
- vxge_debug_init(vdev->level_trace,
- "trying to apply new MTU %d", new_mtu);
-
- if (vxge_close(dev))
- return -EIO;
-
- dev->mtu = new_mtu;
- vdev->mtu = new_mtu;
-
- if (vxge_open(dev))
- return -EIO;
-
- vxge_debug_init(vdev->level_trace,
- "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s:%d Exiting...", __func__, __LINE__);
-
- return 0;
-}
-
-/**
- * vxge_get_stats64
- * @dev: pointer to the device structure
- * @net_stats: pointer to struct rtnl_link_stats64
- *
- */
-static void
-vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- int k;
-
- /* net_stats already zeroed by caller */
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
- struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
- unsigned int start;
- u64 packets, bytes, multicast;
-
- do {
- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
-
- packets = rxstats->rx_frms;
- multicast = rxstats->rx_mcast;
- bytes = rxstats->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
-
- net_stats->rx_packets += packets;
- net_stats->rx_bytes += bytes;
- net_stats->multicast += multicast;
-
- net_stats->rx_errors += rxstats->rx_errors;
- net_stats->rx_dropped += rxstats->rx_dropped;
-
- do {
- start = u64_stats_fetch_begin_irq(&txstats->syncp);
-
- packets = txstats->tx_frms;
- bytes = txstats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
-
- net_stats->tx_packets += packets;
- net_stats->tx_bytes += bytes;
- net_stats->tx_errors += txstats->tx_errors;
- }
-}
-
-static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
-{
- enum vxge_hw_status status;
- u64 val64;
-
- /* Timestamp is passed to the driver via the FCS, therefore we
- * must disable the FCS stripping by the adapter. Since this is
- * required for the driver to load (due to a hardware bug),
- * there is no need to do anything special here.
- */
- val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
- VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
- VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
-
- status = vxge_hw_mgmt_reg_write(devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- offsetof(struct vxge_hw_mrpcim_reg,
- xmac_timestamp),
- val64);
- vxge_hw_device_flush_io(devh);
- devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
- return status;
-}
-
-static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
-{
- struct hwtstamp_config config;
- int i;
-
- if (copy_from_user(&config, data, sizeof(config)))
- return -EFAULT;
-
- /* Transmit HW Timestamp not supported */
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
- case HWTSTAMP_TX_ON:
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- vdev->rx_hwts = 0;
- config.rx_filter = HWTSTAMP_FILTER_NONE;
- break;
-
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
- return -EFAULT;
-
- vdev->rx_hwts = 1;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
-
- default:
- return -ERANGE;
- }
-
- for (i = 0; i < vdev->no_of_vpath; i++)
- vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
-
- if (copy_to_user(data, &config, sizeof(config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
-{
- struct hwtstamp_config config;
-
- config.flags = 0;
- config.tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (vdev->rx_hwts ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
-
- if (copy_to_user(data, &config, sizeof(config)))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * vxge_ioctl
- * @dev: Device pointer.
- * @rq: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
- * @cmd: This is used to distinguish between the different commands that
- * can be passed to the IOCTL functions.
- *
- * Entry point for the Ioctl.
- */
-static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return vxge_hwtstamp_set(vdev, rq->ifr_data);
- case SIOCGHWTSTAMP:
- return vxge_hwtstamp_get(vdev, rq->ifr_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * vxge_tx_watchdog
- * @dev: pointer to net device structure
- * @txqueue: index of the hanging queue
- *
- * Watchdog for transmit side.
- * This function is triggered if the Tx Queue is stopped
- * for a pre-defined amount of time when the Interface is still up.
- */
-static void vxge_tx_watchdog(struct net_device *dev, unsigned int txqueue)
-{
- struct vxgedev *vdev;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
-
- schedule_work(&vdev->reset_task);
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-/**
- * vxge_vlan_rx_add_vid
- * @dev: net device pointer.
- * @proto: vlan protocol
- * @vid: vid
- *
- * Add the vlan id to the devices vlan id table
- */
-static int
-vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath;
- int vp_id;
-
- /* Add these vlan to the vid table */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vpath = &vdev->vpaths[vp_id];
- if (!vpath->is_open)
- continue;
- vxge_hw_vpath_vid_add(vpath->handle, vid);
- }
- set_bit(vid, vdev->active_vlans);
- return 0;
-}
-
-/**
- * vxge_vlan_rx_kill_vid
- * @dev: net device pointer.
- * @proto: vlan protocol
- * @vid: vid
- *
- * Remove the vlan id from the device's vlan id table
- */
-static int
-vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath;
- int vp_id;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- /* Delete this vlan from the vid table */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vpath = &vdev->vpaths[vp_id];
- if (!vpath->is_open)
- continue;
- vxge_hw_vpath_vid_delete(vpath->handle, vid);
- }
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
- clear_bit(vid, vdev->active_vlans);
- return 0;
-}
-
-static const struct net_device_ops vxge_netdev_ops = {
- .ndo_open = vxge_open,
- .ndo_stop = vxge_close,
- .ndo_get_stats64 = vxge_get_stats64,
- .ndo_start_xmit = vxge_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = vxge_set_multicast,
- .ndo_eth_ioctl = vxge_ioctl,
- .ndo_set_mac_address = vxge_set_mac_addr,
- .ndo_change_mtu = vxge_change_mtu,
- .ndo_fix_features = vxge_fix_features,
- .ndo_set_features = vxge_set_features,
- .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
- .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
- .ndo_tx_timeout = vxge_tx_watchdog,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = vxge_netpoll,
-#endif
-};
-
-static int vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config,
- int no_of_vpath, struct vxgedev **vdev_out)
-{
- struct net_device *ndev;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev;
- int ret = 0, no_of_queue = 1;
- u64 stat;
-
- *vdev_out = NULL;
- if (config->tx_steering_type)
- no_of_queue = no_of_vpath;
-
- ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
- no_of_queue);
- if (ndev == NULL) {
- vxge_debug_init(
- vxge_hw_device_trace_level_get(hldev),
- "%s : device allocation failed", __func__);
- ret = -ENODEV;
- goto _out0;
- }
-
- vxge_debug_entryexit(
- vxge_hw_device_trace_level_get(hldev),
- "%s: %s:%d Entering...",
- ndev->name, __func__, __LINE__);
-
- vdev = netdev_priv(ndev);
- memset(vdev, 0, sizeof(struct vxgedev));
-
- vdev->ndev = ndev;
- vdev->devh = hldev;
- vdev->pdev = hldev->pdev;
- memcpy(&vdev->config, config, sizeof(struct vxge_config));
- vdev->rx_hwts = 0;
- vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
-
- SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
-
- ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_HW_VLAN_CTAG_TX;
- if (vdev->config.rth_steering != NO_STEERING)
- ndev->hw_features |= NETIF_F_RXHASH;
-
- ndev->features |= ndev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
-
-
- ndev->netdev_ops = &vxge_netdev_ops;
-
- ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
- INIT_WORK(&vdev->reset_task, vxge_reset);
-
- vxge_initialize_ethtool_ops(ndev);
-
- /* Allocate memory for vpath */
- vdev->vpaths = kcalloc(no_of_vpath, sizeof(struct vxge_vpath),
- GFP_KERNEL);
- if (!vdev->vpaths) {
- vxge_debug_init(VXGE_ERR,
- "%s: vpath memory allocation failed",
- vdev->ndev->name);
- ret = -ENOMEM;
- goto _out1;
- }
-
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s : checksumming enabled", __func__);
-
- ndev->features |= NETIF_F_HIGHDMA;
-
- /* MTU range: 68 - 9600 */
- ndev->min_mtu = VXGE_HW_MIN_MTU;
- ndev->max_mtu = VXGE_HW_MAX_MTU;
-
- ret = register_netdev(ndev);
- if (ret) {
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s: %s : device registration failed!",
- ndev->name, __func__);
- goto _out2;
- }
-
- /* Set the factory defined MAC address initially */
- ndev->addr_len = ETH_ALEN;
-
- /* Make Link state as off at this point, when the Link change
- * interrupt comes the state will be automatically changed to
- * the right state.
- */
- netif_carrier_off(ndev);
-
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s: Ethernet device registered",
- ndev->name);
-
- hldev->ndev = ndev;
- *vdev_out = vdev;
-
- /* Resetting the Device stats */
- status = vxge_hw_mrpcim_stats_access(
- hldev,
- VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
- 0,
- 0,
- &stat);
-
- if (status == VXGE_HW_ERR_PRIVILEGED_OPERATION)
- vxge_debug_init(
- vxge_hw_device_trace_level_get(hldev),
- "%s: device stats clear returns"
- "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev->name);
-
- vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
- "%s: %s:%d Exiting...",
- ndev->name, __func__, __LINE__);
-
- return ret;
-_out2:
- kfree(vdev->vpaths);
-_out1:
- free_netdev(ndev);
-_out0:
- return ret;
-}
-
-/*
- * vxge_device_unregister
- *
- * This function will unregister and free network device
- */
-static void vxge_device_unregister(struct __vxge_hw_device *hldev)
-{
- struct vxgedev *vdev;
- struct net_device *dev;
- char buf[IFNAMSIZ];
-
- dev = hldev->ndev;
- vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
- __func__, __LINE__);
-
- strlcpy(buf, dev->name, IFNAMSIZ);
-
- flush_work(&vdev->reset_task);
-
- /* in 2.6 will call stop() if device is up */
- unregister_netdev(dev);
-
- kfree(vdev->vpaths);
-
- vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
- buf);
- vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
- __func__, __LINE__);
-
- /* we are safe to free it now */
- free_netdev(dev);
-}
-
-/*
- * vxge_callback_crit_err
- *
- * This function is called by the alarm handler in interrupt context.
- * Driver must analyze it based on the event type.
- */
-static void
-vxge_callback_crit_err(struct __vxge_hw_device *hldev,
- enum vxge_hw_event type, u64 vp_id)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath = NULL;
- int vpath_idx;
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
-
- /* Note: This event type should be used for device wide
- * indications only - Serious errors, Slot freeze and critical errors
- */
- vdev->cric_err_event = type;
-
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->device_id == vp_id)
- break;
- }
-
- if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
- if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
- vxge_debug_init(VXGE_ERR,
- "%s: Slot is frozen", vdev->ndev->name);
- } else if (type == VXGE_HW_EVENT_SERR) {
- vxge_debug_init(VXGE_ERR,
- "%s: Encountered Serious Error",
- vdev->ndev->name);
- } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
- vxge_debug_init(VXGE_ERR,
- "%s: Encountered Critical Error",
- vdev->ndev->name);
- }
-
- if ((type == VXGE_HW_EVENT_SERR) ||
- (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
- vxge_hw_device_mask_all(hldev);
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
- (type == VXGE_HW_EVENT_VPATH_ERR)) {
-
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- else {
- /* check if this vpath is already set for reset */
- if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
-
- /* disable interrupts for this vpath */
- vxge_vpath_intr_disable(vdev, vpath_idx);
-
- /* stop the queue for this vpath */
- netif_tx_stop_queue(vpath->fifo.txq);
- }
- }
- }
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s: %s:%d Exiting...",
- vdev->ndev->name, __func__, __LINE__);
-}
-
-static void verify_bandwidth(void)
-{
- int i, band_width, total = 0, equal_priority = 0;
-
- /* 1. If user enters 0 for some fifo, give equal priority to all */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (bw_percentage[i] == 0) {
- equal_priority = 1;
- break;
- }
- }
-
- if (!equal_priority) {
- /* 2. If sum exceeds 100, give equal priority to all */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (bw_percentage[i] == 0xFF)
- break;
-
- total += bw_percentage[i];
- if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
- equal_priority = 1;
- break;
- }
- }
- }
-
- if (!equal_priority) {
- /* Is all the bandwidth consumed? */
- if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
- if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
- /* Split rest of bw equally among next VPs*/
- band_width =
- (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
- (VXGE_HW_MAX_VIRTUAL_PATHS - i);
- if (band_width < 2) /* min of 2% */
- equal_priority = 1;
- else {
- for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
- i++)
- bw_percentage[i] =
- band_width;
- }
- }
- } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
- equal_priority = 1;
- }
-
- if (equal_priority) {
- vxge_debug_init(VXGE_ERR,
- "%s: Assigning equal bandwidth to all the vpaths",
- VXGE_DRIVER_NAME);
- bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
- VXGE_HW_MAX_VIRTUAL_PATHS;
- for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- bw_percentage[i] = bw_percentage[0];
- }
-}
-
-/*
- * Vpath configuration
- */
-static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
- u64 vpath_mask, struct vxge_config *config_param)
-{
- int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
- u32 txdl_size, txdl_per_memblock;
-
- temp = driver_config->vpath_per_dev;
- if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
- (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
- /* No more CPU. Return vpath number as zero.*/
- if (driver_config->g_no_cpus == -1)
- return 0;
-
- if (!driver_config->g_no_cpus)
- driver_config->g_no_cpus =
- netif_get_num_default_rss_queues();
-
- driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
- if (!driver_config->vpath_per_dev)
- driver_config->vpath_per_dev = 1;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- if (vxge_bVALn(vpath_mask, i, 1))
- default_no_vpath++;
-
- if (default_no_vpath < driver_config->vpath_per_dev)
- driver_config->vpath_per_dev = default_no_vpath;
-
- driver_config->g_no_cpus = driver_config->g_no_cpus -
- (driver_config->vpath_per_dev * 2);
- if (driver_config->g_no_cpus <= 0)
- driver_config->g_no_cpus = -1;
- }
-
- if (driver_config->vpath_per_dev == 1) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: Disable tx and rx steering, "
- "as single vpath is configured", VXGE_DRIVER_NAME);
- config_param->rth_steering = NO_STEERING;
- config_param->tx_steering_type = NO_STEERING;
- device_config->rth_en = 0;
- }
-
- /* configure bandwidth */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- device_config->vp_config[i].min_bandwidth = bw_percentage[i];
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- device_config->vp_config[i].vp_id = i;
- device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
- if (no_of_vpaths < driver_config->vpath_per_dev) {
- if (!vxge_bVALn(vpath_mask, i, 1)) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d is not available",
- VXGE_DRIVER_NAME, i);
- continue;
- } else {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d available",
- VXGE_DRIVER_NAME, i);
- no_of_vpaths++;
- }
- } else {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d is not configured, "
- "max_config_vpath exceeded",
- VXGE_DRIVER_NAME, i);
- break;
- }
-
- /* Configure Tx fifo's */
- device_config->vp_config[i].fifo.enable =
- VXGE_HW_FIFO_ENABLE;
- device_config->vp_config[i].fifo.max_frags =
- MAX_SKB_FRAGS + 1;
- device_config->vp_config[i].fifo.memblock_size =
- VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
-
- txdl_size = device_config->vp_config[i].fifo.max_frags *
- sizeof(struct vxge_hw_fifo_txd);
- txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
-
- device_config->vp_config[i].fifo.fifo_blocks =
- ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
-
- device_config->vp_config[i].fifo.intr =
- VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
-
- /* Configure tti properties */
- device_config->vp_config[i].tti.intr_enable =
- VXGE_HW_TIM_INTR_ENABLE;
-
- device_config->vp_config[i].tti.btimer_val =
- (VXGE_TTI_BTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.timer_ac_en =
- VXGE_HW_TIM_TIMER_AC_ENABLE;
-
- /* For msi-x with napi (each vector has a handler of its own) -
- * Set CI to OFF for all vpaths
- */
- device_config->vp_config[i].tti.timer_ci_en =
- VXGE_HW_TIM_TIMER_CI_DISABLE;
-
- device_config->vp_config[i].tti.timer_ri_en =
- VXGE_HW_TIM_TIMER_RI_DISABLE;
-
- device_config->vp_config[i].tti.util_sel =
- VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
-
- device_config->vp_config[i].tti.ltimer_val =
- (VXGE_TTI_LTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.rtimer_val =
- (VXGE_TTI_RTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
- device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
- device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
- device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
- device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
- device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
- device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
-
- /* Configure Rx rings */
- device_config->vp_config[i].ring.enable =
- VXGE_HW_RING_ENABLE;
-
- device_config->vp_config[i].ring.ring_blocks =
- VXGE_HW_DEF_RING_BLOCKS;
-
- device_config->vp_config[i].ring.buffer_mode =
- VXGE_HW_RING_RXD_BUFFER_MODE_1;
-
- device_config->vp_config[i].ring.rxds_limit =
- VXGE_HW_DEF_RING_RXDS_LIMIT;
-
- device_config->vp_config[i].ring.scatter_mode =
- VXGE_HW_RING_SCATTER_MODE_A;
-
- /* Configure rti properties */
- device_config->vp_config[i].rti.intr_enable =
- VXGE_HW_TIM_INTR_ENABLE;
-
- device_config->vp_config[i].rti.btimer_val =
- (VXGE_RTI_BTIMER_VAL * 1000)/272;
-
- device_config->vp_config[i].rti.timer_ac_en =
- VXGE_HW_TIM_TIMER_AC_ENABLE;
-
- device_config->vp_config[i].rti.timer_ci_en =
- VXGE_HW_TIM_TIMER_CI_DISABLE;
-
- device_config->vp_config[i].rti.timer_ri_en =
- VXGE_HW_TIM_TIMER_RI_DISABLE;
-
- device_config->vp_config[i].rti.util_sel =
- VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
-
- device_config->vp_config[i].rti.urange_a =
- RTI_RX_URANGE_A;
- device_config->vp_config[i].rti.urange_b =
- RTI_RX_URANGE_B;
- device_config->vp_config[i].rti.urange_c =
- RTI_RX_URANGE_C;
- device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
- device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
- device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
- device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
-
- device_config->vp_config[i].rti.rtimer_val =
- (VXGE_RTI_RTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].rti.ltimer_val =
- (VXGE_RTI_LTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].rpa_strip_vlan_tag =
- vlan_tag_strip;
- }
-
- driver_config->vpath_per_dev = temp;
- return no_of_vpaths;
-}
-
-/* initialize device configuratrions */
-static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
- int *intr_type)
-{
- /* Used for CQRQ/SRQ. */
- device_config->dma_blockpool_initial =
- VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
-
- device_config->dma_blockpool_max =
- VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
-
- if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
- max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
-
- if (!IS_ENABLED(CONFIG_PCI_MSI)) {
- vxge_debug_init(VXGE_ERR,
- "%s: This Kernel does not support "
- "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
- *intr_type = INTA;
- }
-
- /* Configure whether MSI-X or IRQL. */
- switch (*intr_type) {
- case INTA:
- device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
- break;
-
- case MSI_X:
- device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
- break;
- }
-
- /* Timer period between device poll */
- device_config->device_poll_millis = VXGE_TIMER_DELAY;
-
- /* Configure mac based steering. */
- device_config->rts_mac_en = addr_learn_en;
-
- /* Configure Vpaths */
- device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
-
- vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
- __func__);
- vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
- device_config->intr_mode);
- vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
- device_config->device_poll_millis);
- vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
- device_config->rth_en);
- vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
- device_config->rth_it_type);
-}
-
-static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
-{
- int i;
-
- vxge_debug_init(VXGE_TRACE,
- "%s: %d Vpath(s) opened",
- vdev->ndev->name, vdev->no_of_vpath);
-
- switch (vdev->config.intr_type) {
- case INTA:
- vxge_debug_init(VXGE_TRACE,
- "%s: Interrupt type INTA", vdev->ndev->name);
- break;
-
- case MSI_X:
- vxge_debug_init(VXGE_TRACE,
- "%s: Interrupt type MSI-X", vdev->ndev->name);
- break;
- }
-
- if (vdev->config.rth_steering) {
- vxge_debug_init(VXGE_TRACE,
- "%s: RTH steering enabled for TCP_IPV4",
- vdev->ndev->name);
- } else {
- vxge_debug_init(VXGE_TRACE,
- "%s: RTH steering disabled", vdev->ndev->name);
- }
-
- switch (vdev->config.tx_steering_type) {
- case NO_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- break;
- case TX_PRIORITY_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Unsupported tx steering option",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- break;
- case TX_VLAN_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Unsupported tx steering option",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- break;
- case TX_MULTIQ_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx multiqueue steering enabled",
- vdev->ndev->name);
- break;
- case TX_PORT_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx port steering enabled",
- vdev->ndev->name);
- break;
- default:
- vxge_debug_init(VXGE_ERR,
- "%s: Unsupported tx steering type",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- }
-
- if (vdev->config.addr_learn_en)
- vxge_debug_init(VXGE_TRACE,
- "%s: MAC Address learning enabled", vdev->ndev->name);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!vxge_bVALn(vpath_mask, i, 1))
- continue;
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: MTU size - %d", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].mtu);
- vxge_debug_init(VXGE_TRACE,
- "%s: VLAN tag stripping %s", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].rpa_strip_vlan_tag
- ? "Enabled" : "Disabled");
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: Max frags : %d", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].fifo.max_frags);
- break;
- }
-}
-
-/**
- * vxge_pm_suspend - vxge power management suspend entry point
- * @dev_d: device pointer
- *
- */
-static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
-{
- return -ENOSYS;
-}
-/**
- * vxge_pm_resume - vxge power management resume entry point
- * @dev_d: device pointer
- *
- */
-static int __maybe_unused vxge_pm_resume(struct device *dev_d)
-{
- return -ENOSYS;
-}
-
-/**
- * vxge_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev)) {
- /* Bring down the card, while avoiding PCI I/O */
- do_vxge_close(netdev, 0);
- }
-
- pci_disable_device(pdev);
-
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * vxge_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
- * followed by fixups by BIOS, and has its config space
- * set up identically to what it was at cold boot.
- */
-static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- struct vxgedev *vdev = netdev_priv(netdev);
-
- if (pci_enable_device(pdev)) {
- netdev_err(netdev, "Cannot re-enable device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- pci_set_master(pdev);
- do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * vxge_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells
- * us that its OK to resume normal operation.
- */
-static void vxge_io_resume(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- if (netif_running(netdev)) {
- if (vxge_open(netdev)) {
- netdev_err(netdev,
- "Can't bring device back up after reset\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
-}
-
-static inline u32 vxge_get_num_vfs(u64 function_mode)
-{
- u32 num_functions = 0;
-
- switch (function_mode) {
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
- case VXGE_HW_FUNCTION_MODE_SRIOV_8:
- num_functions = 8;
- break;
- case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
- num_functions = 1;
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV:
- case VXGE_HW_FUNCTION_MODE_MRIOV:
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
- num_functions = 17;
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV_4:
- num_functions = 4;
- break;
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
- num_functions = 2;
- break;
- case VXGE_HW_FUNCTION_MODE_MRIOV_8:
- num_functions = 8; /* TODO */
- break;
- }
- return num_functions;
-}
-
-int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
-{
- struct __vxge_hw_device *hldev = vdev->devh;
- u32 maj, min, bld, cmaj, cmin, cbld;
- enum vxge_hw_status status;
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
- if (ret) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
- VXGE_DRIVER_NAME, fw_name);
- goto out;
- }
-
- /* Load the new firmware onto the adapter */
- status = vxge_update_fw_image(hldev, fw->data, fw->size);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: FW image download to adapter failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- /* Read the version of the new firmware */
- status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: Upgrade read version failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- cmaj = vdev->config.device_hw_info.fw_version.major;
- cmin = vdev->config.device_hw_info.fw_version.minor;
- cbld = vdev->config.device_hw_info.fw_version.build;
- /* It's possible the version in /lib/firmware is not the latest version.
- * If so, we could get into a loop of trying to upgrade to the latest
- * and flashing the older version.
- */
- if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
- !override) {
- ret = -EINVAL;
- goto out;
- }
-
- printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
- maj, min, bld);
-
- /* Flash the adapter with the new firmware */
- status = vxge_hw_flash_fw(hldev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
- "hard reset before using, thus requiring a system reboot or a "
- "hotplug event.\n");
-
-out:
- release_firmware(fw);
- return ret;
-}
-
-static int vxge_probe_fw_update(struct vxgedev *vdev)
-{
- u32 maj, min, bld;
- int ret, gpxe = 0;
- char *fw_name;
-
- maj = vdev->config.device_hw_info.fw_version.major;
- min = vdev->config.device_hw_info.fw_version.minor;
- bld = vdev->config.device_hw_info.fw_version.build;
-
- if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
- return 0;
-
- /* Ignore the build number when determining if the current firmware is
- * "too new" to load the driver
- */
- if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
- "version, unable to load driver\n",
- VXGE_DRIVER_NAME);
- return -EINVAL;
- }
-
- /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
- * work with this driver.
- */
- if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
- "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
- return -EINVAL;
- }
-
- /* If file not specified, determine gPXE or not */
- if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
- int i;
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
- if (vdev->devh->eprom_versions[i]) {
- gpxe = 1;
- break;
- }
- }
- if (gpxe)
- fw_name = "vxge/X3fw-pxe.ncf";
- else
- fw_name = "vxge/X3fw.ncf";
-
- ret = vxge_fw_upgrade(vdev, fw_name, 0);
- /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
- * probe, so ignore them
- */
- if (ret != -EINVAL && ret != -ENOENT)
- return -EIO;
- else
- ret = 0;
-
- if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
- VXGE_FW_VER(maj, min, 0)) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
- " be used with this driver.",
- VXGE_DRIVER_NAME, maj, min, bld);
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int is_sriov_initialized(struct pci_dev *pdev)
-{
- int pos;
- u16 ctrl;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
- if (ctrl & PCI_SRIOV_CTRL_VFE)
- return 1;
- }
- return 0;
-}
-
-static const struct vxge_hw_uld_cbs vxge_callbacks = {
- .link_up = vxge_callback_link_up,
- .link_down = vxge_callback_link_down,
- .crit_err = vxge_callback_crit_err,
-};
-
-/**
- * vxge_probe
- * @pdev : structure containing the PCI related information of the device.
- * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
- * Description:
- * This function is called when a new PCI device gets detected and initializes
- * it.
- * Return value:
- * returns 0 on success and negative on failure.
- *
- */
-static int
-vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
-{
- struct __vxge_hw_device *hldev;
- enum vxge_hw_status status;
- int ret;
- u64 vpath_mask = 0;
- struct vxgedev *vdev;
- struct vxge_config *ll_config = NULL;
- struct vxge_hw_device_config *device_config = NULL;
- struct vxge_hw_device_attr attr;
- int i, j, no_of_vpath = 0, max_vpath_supported = 0;
- u8 *macaddr;
- struct vxge_mac_addrs *entry;
- static int bus = -1, device = -1;
- u32 host_type;
- u8 new_device = 0;
- enum vxge_hw_status is_privileged;
- u32 function_mode;
- u32 num_vfs = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- attr.pdev = pdev;
-
- /* In SRIOV-17 mode, functions of the same adapter
- * can be deployed on different buses
- */
- if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
- !pdev->is_virtfn)
- new_device = 1;
-
- bus = pdev->bus->number;
- device = PCI_SLOT(pdev->devfn);
-
- if (new_device) {
- if (driver_config->config_dev_cnt &&
- (driver_config->config_dev_cnt !=
- driver_config->total_dev_cnt))
- vxge_debug_init(VXGE_ERR,
- "%s: Configured %d of %d devices",
- VXGE_DRIVER_NAME,
- driver_config->config_dev_cnt,
- driver_config->total_dev_cnt);
- driver_config->config_dev_cnt = 0;
- driver_config->total_dev_cnt = 0;
- }
-
- /* Now making the CPU based no of vpath calculation
- * applicable for individual functions as well.
- */
- driver_config->g_no_cpus = 0;
- driver_config->vpath_per_dev = max_config_vpath;
-
- driver_config->total_dev_cnt++;
- if (++driver_config->config_dev_cnt > max_config_dev) {
- ret = 0;
- goto _exit0;
- }
-
- device_config = kzalloc(sizeof(struct vxge_hw_device_config),
- GFP_KERNEL);
- if (!device_config) {
- ret = -ENOMEM;
- vxge_debug_init(VXGE_ERR,
- "device_config : malloc failed %s %d",
- __FILE__, __LINE__);
- goto _exit0;
- }
-
- ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
- if (!ll_config) {
- ret = -ENOMEM;
- vxge_debug_init(VXGE_ERR,
- "device_config : malloc failed %s %d",
- __FILE__, __LINE__);
- goto _exit0;
- }
- ll_config->tx_steering_type = TX_MULTIQ_STEERING;
- ll_config->intr_type = MSI_X;
- ll_config->napi_weight = NAPI_POLL_WEIGHT;
- ll_config->rth_steering = RTH_STEERING;
-
- /* get the default configuration parameters */
- vxge_hw_device_config_default_get(device_config);
-
- /* initialize configuration parameters */
- vxge_device_config_init(device_config, &ll_config->intr_type);
-
- ret = pci_enable_device(pdev);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s : can not enable PCI device", __func__);
- goto _exit0;
- }
-
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s : using 64bit DMA", __func__);
- } else {
- ret = -ENOMEM;
- goto _exit1;
- }
-
- ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s : request regions failed", __func__);
- goto _exit1;
- }
-
- pci_set_master(pdev);
-
- attr.bar0 = pci_ioremap_bar(pdev, 0);
- if (!attr.bar0) {
- vxge_debug_init(VXGE_ERR,
- "%s : cannot remap io memory bar0", __func__);
- ret = -ENODEV;
- goto _exit2;
- }
- vxge_debug_ll_config(VXGE_TRACE,
- "pci ioremap bar0: %p:0x%llx",
- attr.bar0,
- (unsigned long long)pci_resource_start(pdev, 0));
-
- status = vxge_hw_device_hw_info_get(attr.bar0,
- &ll_config->device_hw_info);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: Reading of hardware info failed."
- "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
- vpath_mask = ll_config->device_hw_info.vpath_mask;
- if (vpath_mask == 0) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: No vpaths available in device", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
- vxge_debug_ll_config(VXGE_TRACE,
- "%s:%d Vpath mask = %llx", __func__, __LINE__,
- (unsigned long long)vpath_mask);
-
- function_mode = ll_config->device_hw_info.function_mode;
- host_type = ll_config->device_hw_info.host_type;
- is_privileged = __vxge_hw_device_is_privilaged(host_type,
- ll_config->device_hw_info.func_id);
-
- /* Check how many vpaths are available */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((vpath_mask) & vxge_mBIT(i)))
- continue;
- max_vpath_supported++;
- }
-
- if (new_device)
- num_vfs = vxge_get_num_vfs(function_mode) - 1;
-
- /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
- (ll_config->intr_type != INTA)) {
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret)
- vxge_debug_ll_config(VXGE_ERR,
- "Failed in enabling SRIOV mode: %d\n", ret);
- /* No need to fail out, as an error here is non-fatal */
- }
-
- /*
- * Configure vpaths and get driver configured number of vpaths
- * which is less than or equal to the maximum vpaths per function.
- */
- no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
- if (!no_of_vpath) {
- vxge_debug_ll_config(VXGE_ERR,
- "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
- ret = 0;
- goto _exit3;
- }
-
- /* Setting driver callbacks */
- attr.uld_callbacks = &vxge_callbacks;
-
- status = vxge_hw_device_initialize(&hldev, &attr, device_config);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "Failed to initialize device (%d)", status);
- ret = -EINVAL;
- goto _exit3;
- }
-
- if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
- ll_config->device_hw_info.fw_version.minor,
- ll_config->device_hw_info.fw_version.build) >=
- VXGE_EPROM_FW_VER) {
- struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
-
- status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
- VXGE_DRIVER_NAME);
- /* This is a non-fatal error, continue */
- }
-
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
- hldev->eprom_versions[i] = img[i].version;
- if (!img[i].is_valid)
- break;
- vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
- "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
- VXGE_EPROM_IMG_MAJOR(img[i].version),
- VXGE_EPROM_IMG_MINOR(img[i].version),
- VXGE_EPROM_IMG_FIX(img[i].version),
- VXGE_EPROM_IMG_BUILD(img[i].version));
- }
- }
-
- /* if FCS stripping is not disabled in MAC fail driver load */
- status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
- " failing driver load", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit4;
- }
-
- /* Always enable HWTS. This will always cause the FCS to be invalid,
- * due to the fact that HWTS is using the FCS as the location of the
- * timestamp. The HW FCS checking will still correctly determine if
- * there is a valid checksum, and the FCS is being removed by the driver
- * anyway. So no functionality is being lost. Since it is always
- * enabled, we now simply use the ioctl call to set whether or not the
- * driver should be paying attention to the HWTS.
- */
- if (is_privileged == VXGE_HW_OK) {
- status = vxge_timestamp_config(hldev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
- VXGE_DRIVER_NAME);
- ret = -EFAULT;
- goto _exit4;
- }
- }
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
-
- /* set private device info */
- pci_set_drvdata(pdev, hldev);
-
- ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
- ll_config->addr_learn_en = addr_learn_en;
- ll_config->rth_algorithm = RTH_ALG_JENKINS;
- ll_config->rth_hash_type_tcpipv4 = 1;
- ll_config->rth_hash_type_ipv4 = 0;
- ll_config->rth_hash_type_tcpipv6 = 0;
- ll_config->rth_hash_type_ipv6 = 0;
- ll_config->rth_hash_type_tcpipv6ex = 0;
- ll_config->rth_hash_type_ipv6ex = 0;
- ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
- ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
-
- ret = vxge_device_register(hldev, ll_config, no_of_vpath, &vdev);
- if (ret) {
- ret = -EINVAL;
- goto _exit4;
- }
-
- ret = vxge_probe_fw_update(vdev);
- if (ret)
- goto _exit5;
-
- vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
- vxge_hw_device_trace_level_get(hldev));
-
- /* set private HW device info */
- vdev->mtu = VXGE_HW_DEFAULT_MTU;
- vdev->bar0 = attr.bar0;
- vdev->max_vpath_supported = max_vpath_supported;
- vdev->no_of_vpath = no_of_vpath;
-
- /* Virtual Path count */
- for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!vxge_bVALn(vpath_mask, i, 1))
- continue;
- if (j >= vdev->no_of_vpath)
- break;
-
- vdev->vpaths[j].is_configured = 1;
- vdev->vpaths[j].device_id = i;
- vdev->vpaths[j].ring.driver_id = j;
- vdev->vpaths[j].vdev = vdev;
- vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
- memcpy((u8 *)vdev->vpaths[j].macaddr,
- ll_config->device_hw_info.mac_addrs[i],
- ETH_ALEN);
-
- /* Initialize the mac address list header */
- INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
-
- vdev->vpaths[j].mac_addr_cnt = 0;
- vdev->vpaths[j].mcast_addr_cnt = 0;
- j++;
- }
- vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
- vdev->max_config_port = max_config_port;
-
- vdev->vlan_tag_strip = vlan_tag_strip;
-
- /* map the hashing selector table to the configured vpaths */
- for (i = 0; i < vdev->no_of_vpath; i++)
- vdev->vpath_selector[i] = vpath_selector[i];
-
- macaddr = (u8 *)vdev->vpaths[0].macaddr;
-
- ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
-
- vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
- vdev->ndev->name, ll_config->device_hw_info.serial_number);
-
- vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
- vdev->ndev->name, ll_config->device_hw_info.part_number);
-
- vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
- vdev->ndev->name, ll_config->device_hw_info.product_desc);
-
- vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
- vdev->ndev->name, macaddr);
-
- vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
- vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
-
- vxge_debug_init(VXGE_TRACE,
- "%s: Firmware version : %s Date : %s", vdev->ndev->name,
- ll_config->device_hw_info.fw_version.version,
- ll_config->device_hw_info.fw_date.date);
-
- if (new_device) {
- switch (ll_config->device_hw_info.function_mode) {
- case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
- vxge_debug_init(VXGE_TRACE,
- "%s: Single Function Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
- vxge_debug_init(VXGE_TRACE,
- "%s: Multi Function Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV:
- vxge_debug_init(VXGE_TRACE,
- "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_MRIOV:
- vxge_debug_init(VXGE_TRACE,
- "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
- break;
- }
- }
-
- vxge_print_parm(vdev, vpath_mask);
-
- /* Store the fw version for ethttool option */
- strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
- eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr);
-
- /* Copy the station mac address to the list */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
- if (NULL == entry) {
- vxge_debug_init(VXGE_ERR,
- "%s: mac_addr_list : memory allocation failed",
- vdev->ndev->name);
- ret = -EPERM;
- goto _exit6;
- }
- macaddr = (u8 *)&entry->macaddr;
- memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
- list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
- vdev->vpaths[i].mac_addr_cnt = 1;
- }
-
- kfree(device_config);
-
- /*
- * INTA is shared in multi-function mode. This is unlike the INTA
- * implementation in MR mode, where each VH has its own INTA message.
- * - INTA is masked (disabled) as long as at least one function sets
- * its TITAN_MASK_ALL_INT.ALARM bit.
- * - INTA is unmasked (enabled) when all enabled functions have cleared
- * their own TITAN_MASK_ALL_INT.ALARM bit.
- * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
- * Though this driver leaves the top level interrupts unmasked while
- * leaving the required module interrupt bits masked on exit, there
- * could be a rougue driver around that does not follow this procedure
- * resulting in a failure to generate interrupts. The following code is
- * present to prevent such a failure.
- */
-
- if (ll_config->device_hw_info.function_mode ==
- VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
- if (vdev->config.intr_type == INTA)
- vxge_hw_device_unmask_all(hldev);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
- vdev->ndev->name, __func__, __LINE__);
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
- vxge_hw_device_trace_level_get(hldev));
-
- kfree(ll_config);
- return 0;
-
-_exit6:
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_free_mac_add_list(&vdev->vpaths[i]);
-_exit5:
- vxge_device_unregister(hldev);
-_exit4:
- vxge_hw_device_terminate(hldev);
- pci_disable_sriov(pdev);
-_exit3:
- iounmap(attr.bar0);
-_exit2:
- pci_release_region(pdev, 0);
-_exit1:
- pci_disable_device(pdev);
-_exit0:
- kfree(ll_config);
- kfree(device_config);
- driver_config->config_dev_cnt--;
- driver_config->total_dev_cnt--;
- return ret;
-}
-
-/**
- * vxge_remove - Free the PCI device
- * @pdev: structure containing the PCI related information of the device.
- * Description: This function is called by the Pci subsystem to release a
- * PCI device and free up all resource held up by the device.
- */
-static void vxge_remove(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev;
- struct vxgedev *vdev;
- int i;
-
- hldev = pci_get_drvdata(pdev);
- if (hldev == NULL)
- return;
-
- vdev = netdev_priv(hldev->ndev);
-
- vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
- vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
- __func__);
-
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_free_mac_add_list(&vdev->vpaths[i]);
-
- vxge_device_unregister(hldev);
- /* Do not call pci_disable_sriov here, as it will break child devices */
- vxge_hw_device_terminate(hldev);
- iounmap(vdev->bar0);
- pci_release_region(pdev, 0);
- pci_disable_device(pdev);
- driver_config->config_dev_cnt--;
- driver_config->total_dev_cnt--;
-
- vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
- __func__, __LINE__);
- vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
- __LINE__);
-}
-
-static const struct pci_error_handlers vxge_err_handler = {
- .error_detected = vxge_io_error_detected,
- .slot_reset = vxge_io_slot_reset,
- .resume = vxge_io_resume,
-};
-
-static SIMPLE_DEV_PM_OPS(vxge_pm_ops, vxge_pm_suspend, vxge_pm_resume);
-
-static struct pci_driver vxge_driver = {
- .name = VXGE_DRIVER_NAME,
- .id_table = vxge_id_table,
- .probe = vxge_probe,
- .remove = vxge_remove,
- .driver.pm = &vxge_pm_ops,
- .err_handler = &vxge_err_handler,
-};
-
-static int __init
-vxge_starter(void)
-{
- int ret = 0;
-
- pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
- pr_info("Driver version: %s\n", DRV_VERSION);
-
- verify_bandwidth();
-
- driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
- if (!driver_config)
- return -ENOMEM;
-
- ret = pci_register_driver(&vxge_driver);
- if (ret) {
- kfree(driver_config);
- goto err;
- }
-
- if (driver_config->config_dev_cnt &&
- (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
- vxge_debug_init(VXGE_ERR,
- "%s: Configured %d of %d devices",
- VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
- driver_config->total_dev_cnt);
-err:
- return ret;
-}
-
-static void __exit
-vxge_closer(void)
-{
- pci_unregister_driver(&vxge_driver);
- kfree(driver_config);
-}
-module_init(vxge_starter);
-module_exit(vxge_closer);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
deleted file mode 100644
index da9d2c191828..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ /dev/null
@@ -1,516 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_MAIN_H
-#define VXGE_MAIN_H
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-version.h"
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
-
-#define VXGE_DRIVER_NAME "vxge"
-#define VXGE_DRIVER_VENDOR "Neterion, Inc"
-#define VXGE_DRIVER_FW_VERSION_MAJOR 1
-
-#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
- VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
- VXGE_VERSION_FOR
-
-#define PCI_DEVICE_ID_TITAN_WIN 0x5733
-#define PCI_DEVICE_ID_TITAN_UNI 0x5833
-#define VXGE_HW_TITAN1_PCI_REVISION 1
-#define VXGE_HW_TITAN1A_PCI_REVISION 2
-
-#define VXGE_USE_DEFAULT 0xffffffff
-#define VXGE_HW_VPATH_MSIX_ACTIVE 4
-#define VXGE_ALARM_MSIX_ID 2
-#define VXGE_HW_RXSYNC_FREQ_CNT 4
-#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
-#define VXGE_LL_RX_COPY_THRESHOLD 256
-#define VXGE_DEF_FIFO_LENGTH 84
-
-#define NO_STEERING 0
-#define PORT_STEERING 0x1
-#define RTH_STEERING 0x2
-#define RX_TOS_STEERING 0x3
-#define RX_VLAN_STEERING 0x4
-#define RTH_BUCKET_SIZE 4
-
-#define TX_PRIORITY_STEERING 1
-#define TX_VLAN_STEERING 2
-#define TX_PORT_STEERING 3
-#define TX_MULTIQ_STEERING 4
-
-#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
-
-#define VXGE_TTI_BTIMER_VAL 250000
-
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_T1A_TTI_LTIMER_VAL 80
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_TTI_RTIMER_ADAPT_VAL 10
-#define VXGE_T1A_TTI_RTIMER_VAL 400
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
-#define VXGE_RTI_RTIMER_ADAPT_VAL 15
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
-#define VXGE_ISR_POLLING_CNT 8
-#define VXGE_MAX_CONFIG_DEV 0xFF
-#define VXGE_EXEC_MODE_DISABLE 0
-#define VXGE_EXEC_MODE_ENABLE 1
-#define VXGE_MAX_CONFIG_PORT 1
-#define VXGE_ALL_VID_DISABLE 0
-#define VXGE_ALL_VID_ENABLE 1
-#define VXGE_PAUSE_CTRL_DISABLE 0
-#define VXGE_PAUSE_CTRL_ENABLE 1
-
-#define TTI_TX_URANGE_A 5
-#define TTI_TX_URANGE_B 15
-#define TTI_TX_URANGE_C 40
-#define TTI_TX_UFC_A 5
-#define TTI_TX_UFC_B 40
-#define TTI_TX_UFC_C 60
-#define TTI_TX_UFC_D 100
-#define TTI_T1A_TX_UFC_A 30
-#define TTI_T1A_TX_UFC_B 80
-/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
-/* Slope - 93 */
-/* 60 - 9k Mtu, 140 - 1.5k mtu */
-#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
-
-/* Slope - 37 */
-/* 100 - 9k Mtu, 300 - 1.5k mtu */
-#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
-
-
-#define RTI_RX_URANGE_A 5
-#define RTI_RX_URANGE_B 15
-#define RTI_RX_URANGE_C 40
-#define RTI_T1A_RX_URANGE_A 1
-#define RTI_T1A_RX_URANGE_B 20
-#define RTI_T1A_RX_URANGE_C 50
-#define RTI_RX_UFC_A 1
-#define RTI_RX_UFC_B 5
-#define RTI_RX_UFC_C 10
-#define RTI_RX_UFC_D 15
-#define RTI_T1A_RX_UFC_B 20
-#define RTI_T1A_RX_UFC_C 50
-#define RTI_T1A_RX_UFC_D 60
-
-/*
- * The interrupt rate is maintained at 3k per second with the moderation
- * parameters for most traffic but not all. This is the maximum interrupt
- * count allowed per function with INTA or per vector in the case of
- * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
- */
-#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
-#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
-
-/* Milli secs timer period */
-#define VXGE_TIMER_DELAY 10000
-
-#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
-
-#define is_sriov(function_mode) \
- ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
- (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
- (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
-
-enum vxge_reset_event {
- /* reset events */
- VXGE_LL_VPATH_RESET = 0,
- VXGE_LL_DEVICE_RESET = 1,
- VXGE_LL_FULL_RESET = 2,
- VXGE_LL_START_RESET = 3,
- VXGE_LL_COMPL_RESET = 4
-};
-/* These flags represent the devices temporary state */
-enum vxge_device_state_t {
-__VXGE_STATE_RESET_CARD = 0,
-__VXGE_STATE_CARD_UP
-};
-
-enum vxge_mac_addr_state {
- /* mac address states */
- VXGE_LL_MAC_ADDR_IN_LIST = 0,
- VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1
-};
-
-struct vxge_drv_config {
- int config_dev_cnt;
- int total_dev_cnt;
- int g_no_cpus;
- unsigned int vpath_per_dev;
-};
-
-struct macInfo {
- unsigned char macaddr[ETH_ALEN];
- unsigned char macmask[ETH_ALEN];
- unsigned int vpath_no;
- enum vxge_mac_addr_state state;
-};
-
-struct vxge_config {
- int tx_pause_enable;
- int rx_pause_enable;
- int napi_weight;
- int intr_type;
-#define INTA 0
-#define MSI 1
-#define MSI_X 2
-
- int addr_learn_en;
-
- u32 rth_steering:2,
- rth_algorithm:2,
- rth_hash_type_tcpipv4:1,
- rth_hash_type_ipv4:1,
- rth_hash_type_tcpipv6:1,
- rth_hash_type_ipv6:1,
- rth_hash_type_tcpipv6ex:1,
- rth_hash_type_ipv6ex:1,
- rth_bkt_sz:8;
- int rth_jhash_golden_ratio;
- int tx_steering_type;
- int fifo_indicate_max_pkts;
- struct vxge_hw_device_hw_info device_hw_info;
-};
-
-struct vxge_msix_entry {
- /* Mimicing the msix_entry struct of Kernel. */
- u16 vector;
- u16 entry;
- u16 in_use;
- void *arg;
-};
-
-/* Software Statistics */
-
-struct vxge_sw_stats {
-
- /* Virtual Path */
- unsigned long vpaths_open;
- unsigned long vpath_open_fail;
-
- /* Misc. */
- unsigned long link_up;
- unsigned long link_down;
-};
-
-struct vxge_mac_addrs {
- struct list_head item;
- u64 macaddr;
- u64 macmask;
- enum vxge_mac_addr_state state;
-};
-
-struct vxgedev;
-
-struct vxge_fifo_stats {
- struct u64_stats_sync syncp;
- u64 tx_frms;
- u64 tx_bytes;
-
- unsigned long tx_errors;
- unsigned long txd_not_free;
- unsigned long txd_out_of_desc;
- unsigned long pci_map_fail;
-};
-
-struct vxge_fifo {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_fifo *handle;
- struct netdev_queue *txq;
-
- int tx_steering_type;
- int indicate_max_pkts;
-
- /* Adaptive interrupt moderation parameters used in T1A */
- unsigned long interrupt_count;
- unsigned long jiffies;
-
- u32 tx_vector_no;
- /* Tx stats */
- struct vxge_fifo_stats stats;
-} ____cacheline_aligned;
-
-struct vxge_ring_stats {
- struct u64_stats_sync syncp;
- u64 rx_frms;
- u64 rx_mcast;
- u64 rx_bytes;
-
- unsigned long rx_errors;
- unsigned long rx_dropped;
- unsigned long prev_rx_frms;
- unsigned long pci_map_fail;
- unsigned long skb_alloc_fail;
-};
-
-struct vxge_ring {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_ring *handle;
- /* The vpath id maintained in the driver -
- * 0 to 'maximum_vpaths_in_function - 1'
- */
- int driver_id;
-
- /* Adaptive interrupt moderation parameters used in T1A */
- unsigned long interrupt_count;
- unsigned long jiffies;
-
- /* copy of the flag indicating whether rx_hwts is to be used */
- u32 rx_hwts:1;
-
- int pkts_processed;
- int budget;
-
- struct napi_struct napi;
- struct napi_struct *napi_p;
-
-#define VXGE_MAX_MAC_ADDR_COUNT 30
-
- int vlan_tag_strip;
- u32 rx_vector_no;
- enum vxge_hw_status last_status;
-
- /* Rx stats */
- struct vxge_ring_stats stats;
-} ____cacheline_aligned;
-
-struct vxge_vpath {
- struct vxge_fifo fifo;
- struct vxge_ring ring;
-
- struct __vxge_hw_vpath_handle *handle;
-
- /* Actual vpath id for this vpath in the device - 0 to 16 */
- int device_id;
- int max_mac_addr_cnt;
- int is_configured;
- int is_open;
- struct vxgedev *vdev;
- u8 macaddr[ETH_ALEN];
- u8 macmask[ETH_ALEN];
-
-#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
- /* mac addresses currently programmed into NIC */
- u16 mac_addr_cnt;
- u16 mcast_addr_cnt;
- struct list_head mac_addr_list;
-
- u32 level_err;
- u32 level_trace;
-};
-#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \
- for (i = 0; i < vdev->no_of_vpath; i++) { \
- vdev->vpaths[i].level_err = err; \
- vdev->vpaths[i].level_trace = trace; \
- } \
- vdev->level_err = err; \
- vdev->level_trace = trace; \
-}
-
-struct vxgedev {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_device *devh;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- int vlan_tag_strip;
- struct vxge_config config;
- unsigned long state;
-
- /* Indicates which vpath to reset */
- unsigned long vp_reset;
-
- /* Timer used for polling vpath resets */
- struct timer_list vp_reset_timer;
-
- /* Timer used for polling vpath lockup */
- struct timer_list vp_lockup_timer;
-
- /*
- * Flags to track whether device is in All Multicast
- * or in promiscuous mode.
- */
- u16 all_multi_flg;
-
- /* A flag indicating whether rx_hwts is to be used or not. */
- u32 rx_hwts:1,
- titan1:1;
-
- struct vxge_msix_entry *vxge_entries;
- struct msix_entry *entries;
- /*
- * 4 for each vpath * 17;
- * total is 68
- */
-#define VXGE_MAX_REQUESTED_MSIX 68
-#define VXGE_INTR_STRLEN 80
- char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
-
- enum vxge_hw_event cric_err_event;
-
- int max_vpath_supported;
- int no_of_vpath;
-
- struct napi_struct napi;
- /* A debug option, when enabled and if error condition occurs,
- * the driver will do following steps:
- * - mask all interrupts
- * - Not clear the source of the alarm
- * - gracefully stop all I/O
- * A diagnostic dump of register and stats at this point
- * reveals very useful information.
- */
- int exec_mode;
- int max_config_port;
- struct vxge_vpath *vpaths;
-
- struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
- void __iomem *bar0;
- struct vxge_sw_stats stats;
- int mtu;
- /* Below variables are used for vpath selection to transmit a packet */
- u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
- u64 vpaths_deployed;
-
- u32 intr_cnt;
- u32 level_err;
- u32 level_trace;
- char fw_version[VXGE_HW_FW_STRLEN];
- struct work_struct reset_task;
-};
-
-struct vxge_rx_priv {
- struct sk_buff *skb;
- unsigned char *skb_data;
- dma_addr_t data_dma;
- dma_addr_t data_size;
-};
-
-struct vxge_tx_priv {
- struct sk_buff *skb;
- dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
-};
-
-#define VXGE_MODULE_PARAM_INT(p, val) \
- static int p = val; \
- module_param(p, int, 0)
-
-static inline
-void vxge_os_timer(struct timer_list *timer, void (*func)(struct timer_list *),
- unsigned long timeout)
-{
- timer_setup(timer, func, 0);
- mod_timer(timer, jiffies + timeout);
-}
-
-void vxge_initialize_ethtool_ops(struct net_device *ndev);
-int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
-
-/* #define VXGE_DEBUG_INIT: debug for initialization functions
- * #define VXGE_DEBUG_TX : debug transmit related functions
- * #define VXGE_DEBUG_RX : debug recevice related functions
- * #define VXGE_DEBUG_MEM : debug memory module
- * #define VXGE_DEBUG_LOCK: debug locks
- * #define VXGE_DEBUG_SEM : debug semaphore
- * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
-*/
-#define VXGE_DEBUG_INIT 0x00000001
-#define VXGE_DEBUG_TX 0x00000002
-#define VXGE_DEBUG_RX 0x00000004
-#define VXGE_DEBUG_MEM 0x00000008
-#define VXGE_DEBUG_LOCK 0x00000010
-#define VXGE_DEBUG_SEM 0x00000020
-#define VXGE_DEBUG_ENTRYEXIT 0x00000040
-#define VXGE_DEBUG_INTR 0x00000080
-#define VXGE_DEBUG_LL_CONFIG 0x00000100
-
-/* Debug tracing for VXGE driver */
-#ifndef VXGE_DEBUG_MASK
-#define VXGE_DEBUG_MASK 0x0
-#endif
-
-#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
-#define vxge_debug_ll_config(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_ll_config(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
-#define vxge_debug_init(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_init(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
-#define vxge_debug_tx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_tx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
-#define vxge_debug_rx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_rx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
-#define vxge_debug_mem(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_mem(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
-#define vxge_debug_entryexit(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_entryexit(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
-#define vxge_debug_intr(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_intr(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
- vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \
- level, mask);\
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
- vxge_hw_device_error_level_get((struct __vxge_hw_device *) \
- vdev->devh), \
- vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \
- vdev->devh));\
-}
-
-#ifdef NETIF_F_GSO
-#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
-#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
-#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
-#endif
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-reg.h b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
deleted file mode 100644
index 3e658b175947..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-reg.h
+++ /dev/null
@@ -1,4636 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized
- * Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_REG_H
-#define VXGE_REG_H
-
-/*
- * vxge_mBIT(loc) - set bit at offset
- */
-#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc))
-
-/*
- * vxge_vBIT(val, loc, sz) - set bits at offset
- */
-#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
-#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
-
-/*
- * vxge_bVALn(bits, loc, n) - Get the value of n bits at location
- */
-#define vxge_bVALn(bits, loc, n) \
- ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1))
-
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \
- vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \
- vxge_bVALn(bits, 48, 8)
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \
- vxge_bVALn(bits, 56, 8)
-
-#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \
- vxge_bVALn(bits, 3, 5)
-#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \
- vxge_bVALn(bits, 5, 3)
-#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5
-
-#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17
-#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17
-#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
-#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
-
-#define VXGE_HW_FW_API_GET_EPROM_REV 31
-
-#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
-#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
-#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
-#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
-
-#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
-#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
-#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
-#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
-
-#define VXGE_HW_FW_API_GET_FUNC_MODE 29
-#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
-
-#define VXGE_HW_FW_UPGRADE_MEMO 13
-#define VXGE_HW_FW_UPGRADE_ACTION 16
-#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
-#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
-#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
-#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
-
-#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
-#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
-#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
-
-#define VXGE_HW_ASIC_MODE_RESERVED 0
-#define VXGE_HW_ASIC_MODE_NO_IOV 1
-#define VXGE_HW_ASIC_MODE_SR_IOV 2
-#define VXGE_HW_ASIC_MODE_MR_IOV 3
-
-#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19)
-#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23)
-#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31)
-
-#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1)
-
-#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \
- vxge_bVALn(bits, 50, 14)
-
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \
- vxge_bVALn(bits, 0, 17)
-
-#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \
- vxge_bVALn(bits, 3, 5)
-
-#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \
- vxge_bVALn(bits, 17, 15)
-
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2
-
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1
-
-#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \
- (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7))
-#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \
- vxge_bVALn(val, 61, 3)
-#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \
- (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7))
-#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \
- vxge_bVALn(val, 61, 3)
-
-#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits
-#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits
-
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \
- vxge_bVALn(bits, 1, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \
- vxge_bVALn(bits, 17, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \
- vxge_bVALn(bits, 33, 15)
-
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \
- vxge_vBIT(val, 49, 15)
-
-#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0
-#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1
-#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2
-
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1
-
-#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0
-#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1
-
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13
-
-#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
-
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \
- vxge_mBIT(54)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \
- vxge_bVALn(bits, 55, 5)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \
- vxge_vBIT(val, 55, 5)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \
- vxge_bVALn(bits, 62, 2)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172
-
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \
- vxge_bVALn(bits, 7, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \
- vxge_bVALn(bits, 8, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \
- vxge_bVALn(bits, 4, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \
- vxge_bVALn(bits, 10, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \
- vxge_vBIT(val, 10, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \
- vxge_bVALn(bits, 15, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \
- vxge_bVALn(bits, 19, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \
- vxge_bVALn(bits, 23, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \
- vxge_bVALn(bits, 27, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \
- vxge_bVALn(bits, 31, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \
- vxge_bVALn(bits, 35, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \
- vxge_bVALn(bits, 39, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \
- vxge_bVALn(bits, 43, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \
- vxge_vBIT(val, 32, 32)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \
- vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \
- vxge_bVALn(bits, 32, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \
- vxge_vBIT(val, 32, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \
- vxge_bVALn(bits, 36, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \
- vxge_vBIT(val, 36, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \
- vxge_bVALn(bits, 40, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \
- vxge_vBIT(val, 40, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \
- vxge_bVALn(bits, 42, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \
- vxge_vBIT(val, 42, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \
- vxge_bVALn(bits, 0, 64)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \
- vxge_vBIT(val, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \
- vxge_vBIT(val, 62, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \
- vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \
- vxge_bVALn(bits, 40, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 41, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \
- vxge_vBIT(val, 41, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 48, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \
- vxge_bVALn(bits, 56, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 57, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \
- vxge_vBIT(val, 57, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \
- vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \
- vxge_bVALn(bits, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \
- vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \
- vxge_bVALn(bits, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
-
-#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
- vxge_bVALn(bits, 0, 18)
-
-#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits)
-#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits)
-#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\
-) vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \
- vxge_bVALn(bits, 32, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \
- vxge_bVALn(bits, 32, 16)
-
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\
-) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\
-) vxge_bVALn(bits, 32, 32)
-#define \
-VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16)
-
-#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 16, 8)
-
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 16, 8)
-
-#define VXGE_HW_CONFIG_PRIV_H
-
-#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL
-#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL
-#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL
-#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL
-
-#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL
-
-/*
- * The registers are memory mapped and are native big-endian byte order. The
- * little-endian hosts are handled by enabling hardware byte-swapping for
- * register and dma operations.
- */
-struct vxge_hw_legacy_reg {
-
- u8 unused00010[0x00010];
-
-/*0x00010*/ u64 toc_swapper_fb;
-#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00018*/ u64 pifm_rd_swap_en;
-#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00020*/ u64 pifm_rd_flip_en;
-#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00028*/ u64 pifm_wr_swap_en;
-#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00030*/ u64 pifm_wr_flip_en;
-#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00038*/ u64 toc_first_pointer;
-#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00040*/ u64 host_access_en;
-#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64)
-
-} __packed;
-
-struct vxge_hw_toc_reg {
-
- u8 unused00050[0x00050];
-
-/*0x00050*/ u64 toc_common_pointer;
-#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00058*/ u64 toc_memrepair_pointer;
-#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17];
-#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused001e0[0x001e0-0x000e8];
-
-/*0x001e0*/ u64 toc_mrpcim_pointer;
-#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x001e8*/ u64 toc_srpcim_pointer[17];
-#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused00278[0x00278-0x00270];
-
-/*0x00278*/ u64 toc_vpmgmt_pointer[17];
-#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused00390[0x00390-0x00300];
-
-/*0x00390*/ u64 toc_vpath_pointer[17];
-#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused004a0[0x004a0-0x00418];
-
-/*0x004a0*/ u64 toc_kdfc;
-#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
-#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
-/*0x004a8*/ u64 toc_usdc;
-#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
-#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
-/*0x004b0*/ u64 toc_kdfc_vpath_stride;
-#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \
- vxge_vBIT(val, 0, 64)
-/*0x004b8*/ u64 toc_kdfc_fifo_stride;
-#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \
- vxge_vBIT(val, 0, 64)
-
-} __packed;
-
-struct vxge_hw_common_reg {
-
- u8 unused00a00[0x00a00];
-
-/*0x00a00*/ u64 prc_status1;
-#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n)
-/*0x00a08*/ u64 rxdcm_reset_in_progress;
-#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
-/*0x00a10*/ u64 replicq_flush_in_progress;
-#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a18*/ u64 rxpe_cmds_reset_in_progress;
-#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a20*/ u64 mxp_cmds_reset_in_progress;
-#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a28*/ u64 noffload_reset_in_progress;
-#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
-/*0x00a30*/ u64 rd_req_in_progress;
-#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n)
-/*0x00a38*/ u64 rd_req_outstanding;
-#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n)
-/*0x00a40*/ u64 kdfc_reset_in_progress;
-#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
- u8 unused00b00[0x00b00-0x00a48];
-
-/*0x00b00*/ u64 one_cfg_vp;
-#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n)
-/*0x00b08*/ u64 one_common;
-#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n)
- u8 unused00b80[0x00b80-0x00b10];
-
-/*0x00b80*/ u64 tim_int_en;
-#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n)
-/*0x00b88*/ u64 tim_set_int_en;
-#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n)
-/*0x00b90*/ u64 tim_clr_int_en;
-#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n)
-/*0x00b98*/ u64 tim_mask_int_during_reset;
-#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n)
-/*0x00ba0*/ u64 tim_reset_in_progress;
-#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n)
-/*0x00ba8*/ u64 tim_outstanding_bmap;
-#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n)
- u8 unused00c00[0x00c00-0x00bb0];
-
-/*0x00c00*/ u64 msg_reset_in_progress;
-#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17)
-/*0x00c08*/ u64 msg_mxp_mr_ready;
-#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n)
-/*0x00c10*/ u64 msg_uxp_mr_ready;
-#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n)
-/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch;
-#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n)
-/*0x00c20*/ u64 msg_umq_rtl_bwr;
-#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n)
- u8 unused00d00[0x00d00-0x00c28];
-
-/*0x00d00*/ u64 cmn_rsthdlr_cfg0;
-#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17)
-/*0x00d08*/ u64 cmn_rsthdlr_cfg1;
-#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17)
-/*0x00d10*/ u64 cmn_rsthdlr_cfg2;
-#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17)
-/*0x00d18*/ u64 cmn_rsthdlr_cfg3;
-#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17)
-/*0x00d20*/ u64 cmn_rsthdlr_cfg4;
-#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17)
- u8 unused00d40[0x00d40-0x00d28];
-
-/*0x00d40*/ u64 cmn_rsthdlr_cfg8;
-#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17)
-/*0x00d48*/ u64 stats_cfg0;
-#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17)
- u8 unused00da8[0x00da8-0x00d50];
-
-/*0x00da8*/ u64 clear_msix_mask_vect[4];
-#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00dc8*/ u64 set_msix_mask_vect[4];
-#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17)
-/*0x00de8*/ u64 clear_msix_mask_all_vect;
-#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00df0*/ u64 set_msix_mask_all_vect;
-#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00df8*/ u64 mask_vector[4];
-#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17)
-/*0x00e18*/ u64 msix_pending_vector[4];
-#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00e38*/ u64 clr_msix_one_shot_vec[4];
-#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00e58*/ u64 titan_asic_id;
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8)
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8)
-/*0x00e60*/ u64 titan_general_int_status;
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \
- vxge_vBIT(val, 3, 17)
- u8 unused00e70[0x00e70-0x00e68];
-
-/*0x00e70*/ u64 titan_mask_all_int;
-#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7)
-#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15)
- u8 unused00e80[0x00e80-0x00e78];
-
-/*0x00e80*/ u64 tim_int_status0;
-#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64)
-/*0x00e88*/ u64 tim_int_mask0;
-#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64)
-/*0x00e90*/ u64 tim_int_status1;
-#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4)
-/*0x00e98*/ u64 tim_int_mask1;
-#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4)
-/*0x00ea0*/ u64 rti_int_status;
-#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17)
-/*0x00ea8*/ u64 rti_int_mask;
-#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17)
-/*0x00eb0*/ u64 adapter_status;
-#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0)
-#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1)
-#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2)
-#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3)
-#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4)
-#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5)
-#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6)
-#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7)
-#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8)
-#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9)
-#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10)
-#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11)
-#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12)
-#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8)
-/*0x00eb8*/ u64 gen_ctrl;
-#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0)
-#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1)
-#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2)
-#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3)
-#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4)
-#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5)
-#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4)
- u8 unused00ed0[0x00ed0-0x00ec0];
-
-/*0x00ed0*/ u64 adapter_ready;
-#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63)
-/*0x00ed8*/ u64 outstanding_read;
-#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17)
-/*0x00ee0*/ u64 vpath_rst_in_prog;
-#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17)
-/*0x00ee8*/ u64 vpath_reg_modified;
-#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17)
- u8 unused00fc0[0x00fc0-0x00ef0];
-
-/*0x00fc0*/ u64 cp_reset_in_progress;
-#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n)
- u8 unused01080[0x01080-0x00fc8];
-
-/*0x01080*/ u64 xgmac_ready;
-#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17)
- u8 unused010c0[0x010c0-0x01088];
-
-/*0x010c0*/ u64 fbif_ready;
-#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17)
- u8 unused01100[0x01100-0x010c8];
-
-/*0x01100*/ u64 vplane_assignments;
-#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5)
-/*0x01108*/ u64 vpath_assignments;
-#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17)
-/*0x01110*/ u64 resource_assignments;
-#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01118*/ u64 host_type_assignments;
-#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \
- vxge_vBIT(val, 5, 3)
- u8 unused01128[0x01128-0x01120];
-
-/*0x01128*/ u64 max_resource_assignments;
-#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \
- vxge_vBIT(val, 11, 5)
-/*0x01130*/ u64 pf_vpath_assignments;
-#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused01200[0x01200-0x01138];
-
-/*0x01200*/ u64 rts_access_icmp;
-#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01208*/ u64 rts_access_tcpsyn;
-#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01210*/ u64 rts_access_zl4pyld;
-#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01218*/ u64 rts_access_l4prtcl_tcp;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01220*/ u64 rts_access_l4prtcl_udp;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01228*/ u64 rts_access_l4prtcl_flex;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01230*/ u64 rts_access_ipfrag;
-#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17)
-
-} __packed;
-
-struct vxge_hw_memrepair_reg {
- u64 unused1;
- u64 unused2;
-} __packed;
-
-struct vxge_hw_pcicfgmgmt_reg {
-
-/*0x00000*/ u64 resource_no;
-#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3)
-/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00020*/ u64 msixgrp_no;
-#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11)
-
-} __packed;
-
-struct vxge_hw_mrpcim_reg {
-/*0x00000*/ u64 g3fbct_int_status;
-#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x00008*/ u64 g3fbct_int_mask;
-/*0x00010*/ u64 g3fbct_err_reg;
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
-/*0x00018*/ u64 g3fbct_err_mask;
-/*0x00020*/ u64 g3fbct_err_alarm;
-
- u8 unused00a00[0x00a00-0x00028];
-
-/*0x00a00*/ u64 wrdma_int_status;
-#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9)
-#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12)
-#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17)
-/*0x00a08*/ u64 wrdma_int_mask;
-/*0x00a10*/ u64 rc_alarm_reg;
-#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1)
-#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3)
-#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5)
-#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6)
-#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8)
-#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9)
-#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10)
-#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12)
-/*0x00a18*/ u64 rc_alarm_mask;
-/*0x00a20*/ u64 rc_alarm_alarm;
-/*0x00a28*/ u64 rxdrm_sm_err_reg;
-#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a30*/ u64 rxdrm_sm_err_mask;
-/*0x00a38*/ u64 rxdrm_sm_err_alarm;
-/*0x00a40*/ u64 rxdcm_sm_err_reg;
-#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a48*/ u64 rxdcm_sm_err_mask;
-/*0x00a50*/ u64 rxdcm_sm_err_alarm;
-/*0x00a58*/ u64 rxdwm_sm_err_reg;
-#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a60*/ u64 rxdwm_sm_err_mask;
-/*0x00a68*/ u64 rxdwm_sm_err_alarm;
-/*0x00a70*/ u64 rda_err_reg;
-#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0)
-#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1)
-#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2)
-#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3)
-#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4)
-#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5)
-#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6)
-#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7)
-/*0x00a78*/ u64 rda_err_mask;
-/*0x00a80*/ u64 rda_err_alarm;
-/*0x00a88*/ u64 rda_ecc_db_reg;
-#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
-/*0x00a90*/ u64 rda_ecc_db_mask;
-/*0x00a98*/ u64 rda_ecc_db_alarm;
-/*0x00aa0*/ u64 rda_ecc_sg_reg;
-#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
-/*0x00aa8*/ u64 rda_ecc_sg_mask;
-/*0x00ab0*/ u64 rda_ecc_sg_alarm;
-/*0x00ab8*/ u64 rqa_err_reg;
-#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0)
-/*0x00ac0*/ u64 rqa_err_mask;
-/*0x00ac8*/ u64 rqa_err_alarm;
-/*0x00ad0*/ u64 frf_alarm_reg;
-#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n)
-/*0x00ad8*/ u64 frf_alarm_mask;
-/*0x00ae0*/ u64 frf_alarm_alarm;
-/*0x00ae8*/ u64 rocrc_alarm_reg;
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4)
-#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5)
-#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22)
-/*0x00af0*/ u64 rocrc_alarm_mask;
-/*0x00af8*/ u64 rocrc_alarm_alarm;
-/*0x00b00*/ u64 wde0_alarm_reg;
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b08*/ u64 wde0_alarm_mask;
-/*0x00b10*/ u64 wde0_alarm_alarm;
-/*0x00b18*/ u64 wde1_alarm_reg;
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b20*/ u64 wde1_alarm_mask;
-/*0x00b28*/ u64 wde1_alarm_alarm;
-/*0x00b30*/ u64 wde2_alarm_reg;
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b38*/ u64 wde2_alarm_mask;
-/*0x00b40*/ u64 wde2_alarm_alarm;
-/*0x00b48*/ u64 wde3_alarm_reg;
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b50*/ u64 wde3_alarm_mask;
-/*0x00b58*/ u64 wde3_alarm_alarm;
-
- u8 unused00be8[0x00be8-0x00b60];
-
-/*0x00be8*/ u64 rx_w_round_robin_0;
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5)
-/*0x00bf0*/ u64 rx_w_round_robin_1;
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00bf8*/ u64 rx_w_round_robin_2;
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c00*/ u64 rx_w_round_robin_3;
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c08*/ u64 rx_w_round_robin_4;
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c10*/ u64 rx_w_round_robin_5;
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c18*/ u64 rx_w_round_robin_6;
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c20*/ u64 rx_w_round_robin_7;
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c28*/ u64 rx_w_round_robin_8;
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c30*/ u64 rx_w_round_robin_9;
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c38*/ u64 rx_w_round_robin_10;
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c40*/ u64 rx_w_round_robin_11;
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c48*/ u64 rx_w_round_robin_12;
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c50*/ u64 rx_w_round_robin_13;
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c58*/ u64 rx_w_round_robin_14;
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c60*/ u64 rx_w_round_robin_15;
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c68*/ u64 rx_w_round_robin_16;
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c70*/ u64 rx_w_round_robin_17;
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c78*/ u64 rx_w_round_robin_18;
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c80*/ u64 rx_w_round_robin_19;
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c88*/ u64 rx_w_round_robin_20;
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c90*/ u64 rx_w_round_robin_21;
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \
- vxge_vBIT(val, 19, 5)
-
-#define VXGE_HW_WRR_RING_SERVICE_STATES 171
-#define VXGE_HW_WRR_RING_COUNT 22
-
-/*0x00c98*/ u64 rx_queue_priority_0;
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-/*0x00ca0*/ u64 rx_queue_priority_1;
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5)
-/*0x00ca8*/ u64 rx_queue_priority_2;
-#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5)
- u8 unused00cc8[0x00cc8-0x00cb0];
-
-/*0x00cc8*/ u64 replication_queue_priority;
-#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00cd0*/ u64 rx_queue_select;
-#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n)
-#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15)
-#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23)
-/*0x00cd8*/ u64 rqa_vpbp_ctrl;
-#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15)
-#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23)
-#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31)
-/*0x00ce0*/ u64 rx_multi_cast_ctrl;
-#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \
- vxge_vBIT(val, 2, 30)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32)
-/*0x00ce8*/ u64 wde_prm_ctrl;
-#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33)
-#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2)
-/*0x00cf0*/ u64 noa_ctrl;
-#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4)
-/*0x00cf8*/ u64 phase_cfg;
-#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0)
-#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3)
-#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7)
-#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11)
-#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15)
-#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19)
-#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23)
-#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27)
-#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31)
-#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35)
-#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39)
-#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43)
-/*0x00d00*/ u64 rcq_bypq_cfg;
-#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22)
-#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9)
-#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9)
- u8 unused00e00[0x00e00-0x00d08];
-
-/*0x00e00*/ u64 doorbell_int_status;
-#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7)
-#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15)
-/*0x00e08*/ u64 doorbell_int_mask;
-/*0x00e10*/ u64 kdfc_err_reg;
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
-/*0x00e18*/ u64 kdfc_err_mask;
-/*0x00e20*/ u64 kdfc_err_reg_alarm;
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
- u8 unused00e40[0x00e40-0x00e28];
-/*0x00e40*/ u64 kdfc_vp_partition_0;
-#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0)
-#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15)
-/*0x00e48*/ u64 kdfc_vp_partition_1;
-#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15)
-/*0x00e50*/ u64 kdfc_vp_partition_2;
-#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15)
-/*0x00e58*/ u64 kdfc_vp_partition_3;
-#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15)
-/*0x00e60*/ u64 kdfc_vp_partition_4;
-#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15)
-/*0x00e68*/ u64 kdfc_vp_partition_5;
-#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15)
-/*0x00e70*/ u64 kdfc_vp_partition_6;
-#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15)
-/*0x00e78*/ u64 kdfc_vp_partition_7;
-#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15)
-/*0x00e80*/ u64 kdfc_vp_partition_8;
-#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15)
-/*0x00e88*/ u64 kdfc_w_round_robin_0;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
- u8 unused0f28[0x0f28-0x0e90];
-
-/*0x00f28*/ u64 kdfc_w_round_robin_20;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
-#define VXGE_HW_WRR_FIFO_COUNT 20
-
- u8 unused0fc8[0x0fc8-0x0f30];
-
-/*0x00fc8*/ u64 kdfc_w_round_robin_40;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
- u8 unused1068[0x01068-0x0fd0];
-
-/*0x01068*/ u64 kdfc_entry_type_sel_0;
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2)
-/*0x01070*/ u64 kdfc_entry_type_sel_1;
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2)
-/*0x01078*/ u64 kdfc_fifo_0_ctrl;
-#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176
-#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153
-
- u8 unused1100[0x01100-0x1080];
-
-/*0x01100*/ u64 kdfc_fifo_17_ctrl;
-#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
-
- u8 unused1600[0x01600-0x1108];
-
-/*0x01600*/ u64 rxmac_int_status;
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7)
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \
- vxge_mBIT(11)
-/*0x01608*/ u64 rxmac_int_mask;
- u8 unused01618[0x01618-0x01610];
-
-/*0x01618*/ u64 rxmac_gen_err_reg;
-/*0x01620*/ u64 rxmac_gen_err_mask;
-/*0x01628*/ u64 rxmac_gen_err_alarm;
-/*0x01630*/ u64 rxmac_ecc_err_reg;
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \
- vxge_vBIT(val, 24, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \
- vxge_vBIT(val, 26, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \
- vxge_vBIT(val, 28, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \
- vxge_vBIT(val, 30, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \
- vxge_vBIT(val, 40, 7)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \
- vxge_vBIT(val, 47, 7)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \
- vxge_vBIT(val, 54, 3)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \
- vxge_vBIT(val, 57, 3)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \
- vxge_mBIT(60)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \
- vxge_mBIT(61)
-/*0x01638*/ u64 rxmac_ecc_err_mask;
-/*0x01640*/ u64 rxmac_ecc_err_alarm;
-/*0x01648*/ u64 rxmac_various_err_reg;
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3)
-/*0x01650*/ u64 rxmac_various_err_mask;
-/*0x01658*/ u64 rxmac_various_err_alarm;
-/*0x01660*/ u64 rxmac_gen_cfg;
-#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11)
-/*0x01668*/ u64 rxmac_authorize_all_addr;
-#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n)
-/*0x01670*/ u64 rxmac_authorize_all_vid;
-#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n)
- u8 unused016c0[0x016c0-0x01678];
-
-/*0x016c0*/ u64 rxmac_red_rate_repl_queue;
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35)
- u8 unused016e0[0x016e0-0x016c8];
-
-/*0x016e0*/ u64 rxmac_cfg0_port[3];
-#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7)
-#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27)
-#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14)
- u8 unused01710[0x01710-0x016f8];
-
-/*0x01710*/ u64 rxmac_cfg2_port[3];
-#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3)
-/*0x01728*/ u64 rxmac_pause_cfg_port[3];
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59)
- u8 unused01758[0x01758-0x01740];
-
-/*0x01758*/ u64 rxmac_red_cfg0_port[3];
-#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n)
-/*0x01770*/ u64 rxmac_red_cfg1_port[3];
-#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11)
-/*0x01788*/ u64 rxmac_red_cfg2_port[3];
-#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n)
-/*0x017a0*/ u64 rxmac_link_util_port[3];
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \
- vxge_vBIT(val, 1, 7)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23)
- u8 unused017d0[0x017d0-0x017b8];
-
-/*0x017d0*/ u64 rxmac_status_port[3];
-#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3)
- u8 unused01800[0x01800-0x017e8];
-
-/*0x01800*/ u64 rxmac_rx_pa_cfg0;
-#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63)
-/*0x01808*/ u64 rxmac_rx_pa_cfg1;
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23)
- u8 unused01828[0x01828-0x01810];
-
-/*0x01828*/ u64 rts_mgr_cfg0;
-#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35)
-#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39)
-#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
-#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59)
-/*0x01830*/ u64 rts_mgr_cfg1;
-#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7)
-/*0x01838*/ u64 rts_mgr_criteria_priority;
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3)
-/*0x01840*/ u64 rts_mgr_da_pause_cfg;
-#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17)
-/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg;
-#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused01890[0x01890-0x01850];
-/*0x01890*/ u64 rts_mgr_cbasin_cfg;
- u8 unused01968[0x01968-0x01898];
-
-/*0x01968*/ u64 dbg_stat_rx_any_frms;
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused01a00[0x01a00-0x01970];
-
-/*0x01a00*/ u64 rxmac_red_rate_vp[17];
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
- u8 unused01e00[0x01e00-0x01a88];
-
-/*0x01e00*/ u64 xgmac_int_status;
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \
- vxge_mBIT(7)
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \
- vxge_mBIT(11)
-#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15)
-#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19)
-#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23)
-/*0x01e08*/ u64 xgmac_int_mask;
-/*0x01e10*/ u64 xmac_gen_err_reg;
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \
- vxge_mBIT(11)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \
- vxge_mBIT(19)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \
- vxge_mBIT(23)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \
- vxge_vBIT(val, 40, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \
- vxge_vBIT(val, 42, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \
- vxge_vBIT(val, 44, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \
- vxge_vBIT(val, 46, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \
- vxge_vBIT(val, 48, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \
- vxge_vBIT(val, 50, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \
- vxge_vBIT(val, 52, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \
- vxge_vBIT(val, 54, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \
- vxge_vBIT(val, 56, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \
- vxge_vBIT(val, 58, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63)
-/*0x01e18*/ u64 xmac_gen_err_mask;
-/*0x01e20*/ u64 xmac_gen_err_alarm;
-/*0x01e28*/ u64 xmac_link_err_port0_reg;
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \
- vxge_mBIT(19)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \
- vxge_mBIT(47)
-/*0x01e30*/ u64 xmac_link_err_port0_mask;
-/*0x01e38*/ u64 xmac_link_err_port0_alarm;
-/*0x01e40*/ u64 xmac_link_err_port1_reg;
-/*0x01e48*/ u64 xmac_link_err_port1_mask;
-/*0x01e50*/ u64 xmac_link_err_port1_alarm;
-/*0x01e58*/ u64 xgxs_gen_err_reg;
-#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63)
-/*0x01e60*/ u64 xgxs_gen_err_mask;
-/*0x01e68*/ u64 xgxs_gen_err_alarm;
-/*0x01e70*/ u64 asic_ntwk_err_reg;
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
-/*0x01e78*/ u64 asic_ntwk_err_mask;
-/*0x01e80*/ u64 asic_ntwk_err_alarm;
-/*0x01e88*/ u64 asic_gpio_err_reg;
-#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n)
-/*0x01e90*/ u64 asic_gpio_err_mask;
-/*0x01e98*/ u64 asic_gpio_err_alarm;
-/*0x01ea0*/ u64 xgmac_gen_status;
-#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3)
-#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11)
-/*0x01ea8*/ u64 xgmac_gen_fw_memo_status;
-#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask;
-#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64)
-/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status;
-#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01ec0*/ u64 xgmac_main_cfg_port[2];
-#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3)
- u8 unused01f40[0x01f40-0x01ed0];
-
-/*0x01f40*/ u64 xmac_gen_cfg;
-#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4)
-/*0x01f48*/ u64 xmac_timestamp;
-#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3)
-#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19)
-#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16)
-/*0x01f50*/ u64 xmac_stats_gen_cfg;
-#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15)
-/*0x01f58*/ u64 xmac_stats_sys_cmd;
-#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
-/*0x01f60*/ u64 xmac_stats_sys_data;
-#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
- u8 unused01f80[0x01f80-0x01f68];
-
-/*0x01f80*/ u64 asic_ntwk_ctrl;
-#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11)
-#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15)
-/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info;
-#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n)
-/*0x01f90*/ u64 asic_ntwk_cfg_port_num;
-#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n)
-/*0x01f98*/ u64 xmac_cfg_port[3];
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15)
-/*0x01fb0*/ u64 xmac_station_addr_port[2];
-#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
- u8 unused02020[0x02020-0x01fc0];
-
-/*0x02020*/ u64 lag_cfg;
-#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11)
-#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15)
-#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19)
-/*0x02028*/ u64 lag_status;
-#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3)
-#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \
- vxge_vBIT(val, 8, 8)
-/*0x02030*/ u64 lag_active_passive_cfg;
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \
- vxge_vBIT(val, 32, 16)
- u8 unused02040[0x02040-0x02038];
-
-/*0x02040*/ u64 lag_lacp_cfg;
-#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7)
-#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11)
-#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15)
-/*0x02048*/ u64 lag_timer_cfg_1;
-#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16)
-/*0x02050*/ u64 lag_timer_cfg_2;
-#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16)
-/*0x02058*/ u64 lag_sys_id;
-#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51)
-#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55)
-/*0x02060*/ u64 lag_sys_cfg;
-#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
- u8 unused02070[0x02070-0x02068];
-
-/*0x02070*/ u64 lag_aggr_addr_cfg[2];
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51)
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55)
-/*0x02080*/ u64 lag_aggr_id_cfg[2];
-#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16)
-/*0x02090*/ u64 lag_aggr_admin_key[2];
-#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x020a0*/ u64 lag_aggr_alt_admin_key;
-#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19)
-/*0x020a8*/ u64 lag_aggr_oper_key[2];
-#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x020b8*/ u64 lag_aggr_partner_sys_id[2];
-#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48)
-/*0x020c8*/ u64 lag_aggr_partner_info[2];
-#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \
- vxge_vBIT(val, 16, 16)
-/*0x020d8*/ u64 lag_aggr_state[2];
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15)
- u8 unused020f0[0x020f0-0x020e8];
-
-/*0x020f0*/ u64 lag_port_cfg[2];
-#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15)
-/*0x02100*/ u64 lag_port_actor_admin_cfg[2];
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16)
-/*0x02110*/ u64 lag_port_actor_admin_state[2];
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31)
-/*0x02120*/ u64 lag_port_partner_admin_sys_id[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
-/*0x02130*/ u64 lag_port_partner_admin_cfg[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \
- vxge_vBIT(val, 48, 16)
-/*0x02140*/ u64 lag_port_partner_admin_state[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31)
-/*0x02150*/ u64 lag_port_to_aggr[2];
-#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19)
-/*0x02160*/ u64 lag_port_actor_oper_key[2];
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x02170*/ u64 lag_port_actor_oper_state[2];
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
-/*0x02180*/ u64 lag_port_partner_oper_sys_id[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \
- vxge_vBIT(val, 0, 48)
-/*0x02190*/ u64 lag_port_partner_oper_info[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \
- vxge_vBIT(val, 48, 16)
-/*0x021a0*/ u64 lag_port_partner_oper_state[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \
- vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
-/*0x021b0*/ u64 lag_port_state_vars[2];
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \
- vxge_mBIT(32)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \
- vxge_mBIT(33)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \
- vxge_vBIT(val, 41, 3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \
- vxge_vBIT(val, 56, 4)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \
- vxge_vBIT(val, 60, 4)
-/*0x021c0*/ u64 lag_port_timer_cntr[2];
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \
- vxge_vBIT(val, 8, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \
- vxge_vBIT(val, 32, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \
- vxge_vBIT(val, 40, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \
- vxge_vBIT(val, 56, 8)
- u8 unused02208[0x02700-0x021d0];
-
-/*0x02700*/ u64 rtdma_int_status;
-#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1)
-#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2)
-#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4)
-#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5)
-/*0x02708*/ u64 rtdma_int_mask;
-/*0x02710*/ u64 pda_alarm_reg;
-#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0)
-#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1)
-/*0x02718*/ u64 pda_alarm_mask;
-/*0x02720*/ u64 pda_alarm_alarm;
-/*0x02728*/ u64 pcc_error_reg;
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n)
-/*0x02730*/ u64 pcc_error_mask;
-/*0x02738*/ u64 pcc_error_alarm;
-/*0x02740*/ u64 lso_error_reg;
-#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n)
-#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n)
-/*0x02748*/ u64 lso_error_mask;
-/*0x02750*/ u64 lso_error_alarm;
-/*0x02758*/ u64 sm_error_reg;
-#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15)
-/*0x02760*/ u64 sm_error_mask;
-/*0x02768*/ u64 sm_error_alarm;
-
- u8 unused027a8[0x027a8-0x02770];
-
-/*0x027a8*/ u64 txd_ownership_ctrl;
-#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7)
-/*0x027b0*/ u64 pcc_cfg;
-#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n)
-/*0x027b8*/ u64 pcc_control;
-#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15)
-#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31)
-/*0x027c0*/ u64 pda_status1;
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4)
-/*0x027c8*/ u64 rtdma_bw_timer;
-#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4)
-
- u8 unused02900[0x02900-0x027d0];
-/*0x02900*/ u64 g3cmct_int_status;
-#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x02908*/ u64 g3cmct_int_mask;
-/*0x02910*/ u64 g3cmct_err_reg;
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
-/*0x02918*/ u64 g3cmct_err_mask;
-/*0x02920*/ u64 g3cmct_err_alarm;
- u8 unused03000[0x03000-0x02928];
-
-/*0x03000*/ u64 mc_int_status;
-#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3)
-#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7)
-#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11)
-#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15)
-/*0x03008*/ u64 mc_int_mask;
-/*0x03010*/ u64 mc_err_reg;
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4)
-#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11)
-#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14)
-#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15)
-/*0x03018*/ u64 mc_err_mask;
-/*0x03020*/ u64 mc_err_alarm;
-/*0x03028*/ u64 grocrc_alarm_reg;
-#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3)
-#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7)
-/*0x03030*/ u64 grocrc_alarm_mask;
-/*0x03038*/ u64 grocrc_alarm_alarm;
- u8 unused03100[0x03100-0x03040];
-
-/*0x03100*/ u64 rx_thresh_cfg_repl;
-#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62)
-#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63)
- u8 unused033b8[0x033b8-0x03108];
-
-/*0x033b8*/ u64 fbmc_ecc_cfg;
-#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5)
- u8 unused03400[0x03400-0x033c0];
-
-/*0x03400*/ u64 pcipif_int_status;
-#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3)
-#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7)
-#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11)
-#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15)
-#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \
- vxge_mBIT(19)
-/*0x03408*/ u64 pcipif_int_mask;
-/*0x03410*/ u64 dbecc_err_reg;
-#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3)
-#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11)
-#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19)
-#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23)
-/*0x03418*/ u64 dbecc_err_mask;
-/*0x03420*/ u64 dbecc_err_alarm;
-/*0x03428*/ u64 sbecc_err_reg;
-#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15)
-#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23)
-/*0x03430*/ u64 sbecc_err_mask;
-/*0x03438*/ u64 sbecc_err_alarm;
-/*0x03440*/ u64 general_err_reg;
-#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27)
-/*0x03448*/ u64 general_err_mask;
-/*0x03450*/ u64 general_err_alarm;
-/*0x03458*/ u64 srpcim_msg_reg;
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \
- vxge_mBIT(0)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \
- vxge_mBIT(1)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \
- vxge_mBIT(2)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \
- vxge_mBIT(3)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \
- vxge_mBIT(4)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \
- vxge_mBIT(5)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \
- vxge_mBIT(6)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \
- vxge_mBIT(7)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \
- vxge_mBIT(8)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \
- vxge_mBIT(9)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \
- vxge_mBIT(10)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \
- vxge_mBIT(11)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \
- vxge_mBIT(12)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \
- vxge_mBIT(13)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \
- vxge_mBIT(14)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \
- vxge_mBIT(15)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \
- vxge_mBIT(16)
-/*0x03460*/ u64 srpcim_msg_mask;
-/*0x03468*/ u64 srpcim_msg_alarm;
- u8 unused03600[0x03600-0x03470];
-
-/*0x03600*/ u64 gcmg1_int_status;
-#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6)
-#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7)
-#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8)
-/*0x03608*/ u64 gcmg1_int_mask;
- u8 unused03a00[0x03a00-0x03610];
-
-/*0x03a00*/ u64 pcmg1_int_status;
-#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3)
-/*0x03a08*/ u64 pcmg1_int_mask;
- u8 unused04000[0x04000-0x03a10];
-
-/*0x04000*/ u64 one_int_status;
-#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \
- vxge_mBIT(13)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \
- vxge_mBIT(14)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15)
-#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23)
-#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31)
-#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39)
-#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47)
-#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55)
-/*0x04008*/ u64 one_int_mask;
- u8 unused04818[0x04818-0x04010];
-
-/*0x04818*/ u64 noa_wct_ctrl;
-#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0)
-/*0x04820*/ u64 rc_cfg2;
-#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16)
-/*0x04828*/ u64 rc_cfg3;
-#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16)
-/*0x04830*/ u64 rx_multi_cast_ctrl1;
-#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7)
-#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5)
-/*0x04838*/ u64 rxdm_dbg_rd;
-#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12)
-#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31)
-/*0x04840*/ u64 rxdm_dbg_rd_data;
-#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x04848*/ u64 rqa_top_prty_for_vh[17];
-#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
- vxge_vBIT(val, 59, 5)
- u8 unused04900[0x04900-0x048d0];
-
-/*0x04900*/ u64 tim_status;
-#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0)
-/*0x04908*/ u64 tim_ecc_enable;
-#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7)
-#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15)
-#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23)
-/*0x04910*/ u64 tim_bp_ctrl;
-#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7)
-#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15)
-#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23)
-/*0x04918*/ u64 tim_resource_assignment_vh[17];
-#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
-/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17];
-#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5)
- u8 unused04b00[0x04b00-0x04a28];
-
-/*0x04b00*/ u64 gcmg2_int_status;
-#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7)
-#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15)
-#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23)
-/*0x04b08*/ u64 gcmg2_int_mask;
-/*0x04b10*/ u64 gxtmc_err_reg;
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \
- vxge_mBIT(21)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \
- vxge_mBIT(22)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \
- vxge_mBIT(24)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \
- vxge_mBIT(25)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35)
-/*0x04b18*/ u64 gxtmc_err_mask;
-/*0x04b20*/ u64 gxtmc_err_alarm;
-/*0x04b28*/ u64 cmc_err_reg;
-#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0)
-/*0x04b30*/ u64 cmc_err_mask;
-/*0x04b38*/ u64 cmc_err_alarm;
-/*0x04b40*/ u64 gcp_err_reg;
-#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0)
-#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1)
-#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2)
-#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3)
-/*0x04b48*/ u64 gcp_err_mask;
-/*0x04b50*/ u64 gcp_err_alarm;
- u8 unused04f00[0x04f00-0x04b58];
-
-/*0x04f00*/ u64 pcmg2_int_status;
-#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7)
-#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15)
-#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23)
-/*0x04f08*/ u64 pcmg2_int_mask;
-/*0x04f10*/ u64 pxtmc_err_reg;
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57)
-/*0x04f18*/ u64 pxtmc_err_mask;
-/*0x04f20*/ u64 pxtmc_err_alarm;
-/*0x04f28*/ u64 cp_err_reg;
-#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2)
-#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49)
-#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50)
-#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51)
-#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52)
-#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53)
-#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57)
-#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60)
-#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61)
-#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62)
-#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63)
-/*0x04f30*/ u64 cp_err_mask;
-/*0x04f38*/ u64 cp_err_alarm;
- u8 unused04fe8[0x04f50-0x04f40];
-
-/*0x04f50*/ u64 cp_exc_reg;
-#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47)
-#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55)
-#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63)
-/*0x04f58*/ u64 cp_exc_mask;
-/*0x04f60*/ u64 cp_exc_alarm;
-/*0x04f68*/ u64 cp_exc_cause;
-#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32)
- u8 unused05200[0x05200-0x04f70];
-
-/*0x05200*/ u64 msg_int_status;
-#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7)
-#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63)
-/*0x05208*/ u64 msg_int_mask;
-/*0x05210*/ u64 tim_err_reg;
-#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19)
-#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20)
-#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22)
-#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23)
-#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n)
-/*0x05218*/ u64 tim_err_mask;
-/*0x05220*/ u64 tim_err_alarm;
-/*0x05228*/ u64 msg_err_reg;
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \
- vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31)
-#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \
- vxge_mBIT(36)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63)
-/*0x05230*/ u64 msg_err_mask;
-/*0x05238*/ u64 msg_err_alarm;
- u8 unused05340[0x05340-0x05240];
-
-/*0x05340*/ u64 msg_exc_reg;
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50)
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55)
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63)
-/*0x05348*/ u64 msg_exc_mask;
-/*0x05350*/ u64 msg_exc_alarm;
-/*0x05358*/ u64 msg_exc_cause;
-#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32)
- u8 unused05368[0x05380-0x05360];
-
-/*0x05380*/ u64 msg_err2_reg;
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \
- vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \
- vxge_mBIT(12)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \
- vxge_mBIT(13)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \
- vxge_mBIT(14)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \
- vxge_mBIT(15)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \
- vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \
- vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \
- vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \
- vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \
- vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \
- vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \
- vxge_mBIT(22)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \
- vxge_mBIT(23)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \
- vxge_mBIT(24)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \
- vxge_mBIT(25)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \
- vxge_mBIT(26)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \
- vxge_mBIT(27)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \
- vxge_mBIT(28)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(30)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(31)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(32)
-#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62)
-#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63)
-/*0x05388*/ u64 msg_err2_mask;
-/*0x05390*/ u64 msg_err2_alarm;
-/*0x05398*/ u64 msg_err3_reg;
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57)
-/*0x053a0*/ u64 msg_err3_mask;
-/*0x053a8*/ u64 msg_err3_alarm;
- u8 unused05600[0x05600-0x053b0];
-
-/*0x05600*/ u64 fau_gen_err_reg;
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3)
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7)
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11)
-#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15)
-/*0x05608*/ u64 fau_gen_err_mask;
-/*0x05610*/ u64 fau_gen_err_alarm;
-/*0x05618*/ u64 fau_ecc_err_reg;
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 2, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 4, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 8, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 10, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 14, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 16, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \
- vxge_vBIT(val, 18, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \
- vxge_vBIT(val, 20, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31)
-/*0x05620*/ u64 fau_ecc_err_mask;
-/*0x05628*/ u64 fau_ecc_err_alarm;
- u8 unused05658[0x05658-0x05630];
-/*0x05658*/ u64 fau_pa_cfg;
-#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3)
-#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7)
-#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11)
- u8 unused05668[0x05668-0x05660];
-
-/*0x05668*/ u64 dbg_stats_fau_rx_path;
-#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused056c0[0x056c0-0x05670];
-
-/*0x056c0*/ u64 fau_lag_cfg;
-#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7)
- u8 unused05800[0x05800-0x056c8];
-
-/*0x05800*/ u64 tpa_int_status;
-#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15)
-#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23)
-#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31)
-/*0x05808*/ u64 tpa_int_mask;
-/*0x05810*/ u64 orp_err_reg;
-#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11)
-#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43)
-#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47)
-/*0x05818*/ u64 orp_err_mask;
-/*0x05820*/ u64 orp_err_alarm;
-/*0x05828*/ u64 ptm_alarm_reg;
-#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3)
-#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7)
-#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15)
-#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2)
-#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2)
-/*0x05830*/ u64 ptm_alarm_mask;
-/*0x05838*/ u64 ptm_alarm_alarm;
-/*0x05840*/ u64 tpa_error_reg;
-#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3)
-#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11)
-/*0x05848*/ u64 tpa_error_mask;
-/*0x05850*/ u64 tpa_error_alarm;
-/*0x05858*/ u64 tpa_global_cfg;
-#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35)
- u8 unused05868[0x05870-0x05860];
-
-/*0x05870*/ u64 ptm_ecc_cfg;
-#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3)
-/*0x05878*/ u64 ptm_phase_cfg;
-#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3)
-#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7)
- u8 unused05898[0x05898-0x05880];
-
-/*0x05898*/ u64 dbg_stats_tpa_tx_path;
-#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused05900[0x05900-0x058a0];
-
-/*0x05900*/ u64 tmac_int_status;
-#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7)
-/*0x05908*/ u64 tmac_int_mask;
-/*0x05910*/ u64 txmac_gen_err_reg;
-#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3)
-#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7)
-/*0x05918*/ u64 txmac_gen_err_mask;
-/*0x05920*/ u64 txmac_gen_err_alarm;
-/*0x05928*/ u64 txmac_ecc_err_reg;
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39)
-/*0x05930*/ u64 txmac_ecc_err_mask;
-/*0x05938*/ u64 txmac_ecc_err_alarm;
- u8 unused05978[0x05978-0x05940];
-
-/*0x05978*/ u64 dbg_stat_tx_any_frms;
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused059a0[0x059a0-0x05980];
-
-/*0x059a0*/ u64 txmac_link_util_port[3];
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \
- vxge_vBIT(val, 1, 7)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23)
-/*0x059b8*/ u64 txmac_cfg0_port[3];
-#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7)
-#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
-/*0x059d0*/ u64 txmac_cfg1_port[3];
-#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8)
-/*0x059e8*/ u64 txmac_status_port[3];
-#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3)
- u8 unused05a20[0x05a20-0x05a00];
-
-/*0x05a20*/ u64 lag_distrib_dest;
-#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n)
-/*0x05a28*/ u64 lag_marker_cfg;
-#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7)
-#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51)
-/*0x05a30*/ u64 lag_tx_cfg;
-#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3)
-#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11)
-#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16)
-/*0x05a38*/ u64 lag_tx_status;
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \
- vxge_vBIT(val, 8, 8)
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused05d48[0x05d48-0x05a40];
-
-/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17];
-#define \
-VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\
- vxge_vBIT(val, 0, 64)
- u8 unused06420[0x06420-0x05dd0];
-
-/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17];
-#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17];
-
-/*0x06530*/ u64 debug_stats0;
-#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32)
-/*0x06538*/ u64 debug_stats1;
-#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32)
-/*0x06540*/ u64 debug_stats2;
-#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32)
-/*0x06548*/ u64 debug_stats3_vplane[17];
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16)
-/*0x065d0*/ u64 debug_stats4_vplane[17];
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16)
-
- u8 unused07000[0x07000-0x06658];
-
-/*0x07000*/ u64 mrpcim_general_int_status;
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22)
-/*0x07008*/ u64 mrpcim_general_int_mask;
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22)
-/*0x07010*/ u64 mrpcim_ppif_int_status;
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\
- vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\
- vxge_mBIT(32)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\
- vxge_mBIT(33)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\
- vxge_mBIT(34)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\
- vxge_mBIT(35)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\
- vxge_mBIT(36)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\
- vxge_mBIT(37)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\
- vxge_mBIT(38)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\
- vxge_mBIT(39)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\
- vxge_mBIT(40)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \
- vxge_mBIT(41)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \
- vxge_mBIT(42)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \
- vxge_mBIT(43)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \
- vxge_mBIT(44)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \
- vxge_mBIT(45)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \
- vxge_mBIT(46)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \
- vxge_mBIT(47)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \
- vxge_mBIT(55)
-/*0x07018*/ u64 mrpcim_ppif_int_mask;
- u8 unused07028[0x07028-0x07020];
-
-/*0x07028*/ u64 ini_errors_reg;
-#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3)
-#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19)
-#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23)
-#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27)
-#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31)
-#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39)
-#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43)
-#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47)
-#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51)
-#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55)
-#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59)
-#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63)
-/*0x07030*/ u64 ini_errors_mask;
-/*0x07038*/ u64 ini_errors_alarm;
-/*0x07040*/ u64 dma_errors_reg;
-#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3)
-#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34)
-/*0x07048*/ u64 dma_errors_mask;
-/*0x07050*/ u64 dma_errors_alarm;
-/*0x07058*/ u64 tgt_errors_reg;
-#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21)
-/*0x07060*/ u64 tgt_errors_mask;
-/*0x07068*/ u64 tgt_errors_alarm;
-/*0x07070*/ u64 config_errors_reg;
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35)
-#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63)
-/*0x07078*/ u64 config_errors_mask;
-/*0x07080*/ u64 config_errors_alarm;
- u8 unused07090[0x07090-0x07088];
-
-/*0x07090*/ u64 crdt_errors_reg;
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \
- vxge_mBIT(15)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \
- vxge_mBIT(23)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \
- vxge_mBIT(47)
-/*0x07098*/ u64 crdt_errors_mask;
-/*0x070a0*/ u64 crdt_errors_alarm;
- u8 unused070b0[0x070b0-0x070a8];
-
-/*0x070b0*/ u64 mrpcim_general_errors_reg;
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \
- vxge_mBIT(47)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51)
-/*0x070b8*/ u64 mrpcim_general_errors_mask;
-/*0x070c0*/ u64 mrpcim_general_errors_alarm;
- u8 unused070d0[0x070d0-0x070c8];
-
-/*0x070d0*/ u64 pll_errors_reg;
-#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3)
-#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7)
-#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11)
-/*0x070d8*/ u64 pll_errors_mask;
-/*0x070e0*/ u64 pll_errors_alarm;
-/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask;
-/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm;
-/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg;
-#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask;
-/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm;
- u8 unused07128[0x07128-0x07118];
-
-/*0x07128*/ u64 crdt_errors_vplane_reg[17];
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \
- vxge_mBIT(7)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \
- vxge_mBIT(11)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \
- vxge_mBIT(15)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \
- vxge_mBIT(19)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \
- vxge_mBIT(23)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \
- vxge_mBIT(27)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \
- vxge_mBIT(31)
-/*0x07130*/ u64 crdt_errors_vplane_mask[17];
-/*0x07138*/ u64 crdt_errors_vplane_alarm[17];
- u8 unused072f0[0x072f0-0x072c0];
-
-/*0x072f0*/ u64 mrpcim_rst_in_prog;
-#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7)
-/*0x072f8*/ u64 mrpcim_reg_modified;
-#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7)
-
- u8 unused07378[0x07378-0x07300];
-
-/*0x07378*/ u64 write_arb_pending;
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19)
-/*0x07380*/ u64 read_arb_pending;
-#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3)
-#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7)
-#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11)
-/*0x07388*/ u64 dmaif_dmadbl_pending;
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \
- vxge_vBIT(val, 13, 51)
-/*0x07390*/ u64 wrcrdtarb_status0_vplane[17];
-#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \
- vxge_vBIT(val, 0, 8)
-/*0x07418*/ u64 wrcrdtarb_status1_vplane[17];
-#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \
- vxge_vBIT(val, 4, 12)
- u8 unused07500[0x07500-0x074a0];
-
-/*0x07500*/ u64 mrpcim_general_cfg1;
-#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7)
-/*0x07508*/ u64 mrpcim_general_cfg2;
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \
- vxge_vBIT(val, 47, 5)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63)
-/*0x07510*/ u64 mrpcim_general_cfg3;
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \
- vxge_vBIT(val, 36, 16)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63)
-/*0x07518*/ u64 mrpcim_stats_start_host_addr;
-#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\
- vxge_vBIT(val, 0, 57)
-
- u8 unused07950[0x07950-0x07520];
-
-/*0x07950*/ u64 rdcrdtarb_cfg0;
-#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 18, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 26, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 34, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4)
-#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63)
- u8 unused07be8[0x07be8-0x07958];
-
-/*0x07be8*/ u64 bf_sw_reset;
-#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8)
-/*0x07bf0*/ u64 sw_reset_status;
-#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7)
-#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15)
- u8 unused07d30[0x07d30-0x07bf8];
-
-/*0x07d30*/ u64 mrpcim_debug_stats0;
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32)
-/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07ed0*/ u64 mrpcim_debug_stats4;
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07ed8*/ u64 genstats_count01;
-#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32)
-/*0x07ee0*/ u64 genstats_count23;
-#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32)
-/*0x07ee8*/ u64 genstats_count4;
-#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32)
-/*0x07ef0*/ u64 genstats_count5;
-#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32)
-
- u8 unused07f08[0x07f08-0x07ef8];
-
-/*0x07f08*/ u64 genstats_cfg[6];
-#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17)
-/*0x07f38*/ u64 genstat_64bit_cfg;
-#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3)
-#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7)
- u8 unused08000[0x08000-0x07f40];
-/*0x08000*/ u64 gcmg3_int_status;
-#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0)
-#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1)
-#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4)
-#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6)
-/*0x08008*/ u64 gcmg3_int_mask;
- u8 unused09000[0x09000-0x8010];
-
-/*0x09000*/ u64 g3ifcmd_fb_int_status;
-#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09008*/ u64 g3ifcmd_fb_int_mask;
-/*0x09010*/ u64 g3ifcmd_fb_err_reg;
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09018*/ u64 g3ifcmd_fb_err_mask;
-/*0x09020*/ u64 g3ifcmd_fb_err_alarm;
-
- u8 unused09400[0x09400-0x09028];
-
-/*0x09400*/ u64 g3ifcmd_cmu_int_status;
-#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09408*/ u64 g3ifcmd_cmu_int_mask;
-/*0x09410*/ u64 g3ifcmd_cmu_err_reg;
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09418*/ u64 g3ifcmd_cmu_err_mask;
-/*0x09420*/ u64 g3ifcmd_cmu_err_alarm;
-
- u8 unused09800[0x09800-0x09428];
-
-/*0x09800*/ u64 g3ifcmd_cml_int_status;
-#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09808*/ u64 g3ifcmd_cml_int_mask;
-/*0x09810*/ u64 g3ifcmd_cml_err_reg;
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09818*/ u64 g3ifcmd_cml_err_mask;
-/*0x09820*/ u64 g3ifcmd_cml_err_alarm;
- u8 unused09b00[0x09b00-0x09828];
-
-/*0x09b00*/ u64 vpath_to_vplane_map[17];
-#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \
- vxge_vBIT(val, 3, 5)
- u8 unused09c30[0x09c30-0x09b88];
-
-/*0x09c30*/ u64 xgxs_cfg_port[2];
-#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27)
-#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4)
-/*0x09c40*/ u64 xgxs_rxber_cfg_port[2];
-#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \
- vxge_vBIT(val, 16, 48)
-/*0x09c50*/ u64 xgxs_rxber_status_port[2];
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \
- vxge_vBIT(val, 48, 16)
-/*0x09c60*/ u64 xgxs_status_port[2];
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \
- vxge_vBIT(val, 32, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \
- vxge_vBIT(val, 36, 4)
-/*0x09c70*/ u64 xgxs_pma_reset_port[2];
-#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8)
- u8 unused09c90[0x09c90-0x09c80];
-
-/*0x09c90*/ u64 xgxs_static_cfg_port[2];
-#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3)
- u8 unused09d40[0x09d40-0x09ca0];
-
-/*0x09d40*/ u64 xgxs_info_port[2];
-#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32)
-/*0x09d50*/ u64 ratemgmt_cfg_port[2];
-#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19)
-/*0x09d60*/ u64 ratemgmt_status_port[2];
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3)
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11)
- u8 unused09d80[0x09d80-0x09d70];
-
-/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2];
-#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7)
-/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2];
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \
- vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35)
-/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2];
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \
- vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \
- vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35)
-/*0x09db0*/ u64 anbe_cfg_port[2];
-#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2)
-#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2)
-/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2];
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32)
- u8 unused09de0[0x09de0-0x09dd0];
-
-/*0x09de0*/ u64 anbe_fw_mstr_port[2];
-#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3)
-#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7)
-/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \
- vxge_mBIT(3)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \
- vxge_mBIT(7)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \
- vxge_mBIT(11)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \
- vxge_mBIT(15)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \
- vxge_vBIT(val, 18, 6)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \
- vxge_mBIT(27)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \
- vxge_mBIT(35)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \
- vxge_mBIT(39)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \
- vxge_mBIT(43)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \
- vxge_mBIT(47)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \
-vxge_mBIT(51)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \
- vxge_mBIT(55)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \
- vxge_vBIT(val, 56, 4)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \
- vxge_vBIT(val, 60, 4)
-/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \
- vxge_mBIT(32)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \
- vxge_mBIT(33)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \
- vxge_mBIT(40)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \
- vxge_mBIT(41)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \
- vxge_mBIT(42)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \
- vxge_mBIT(50)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \
- vxge_vBIT(val, 54, 5)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
- vxge_vBIT(val, 59, 5)
-/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused09e30[0x09e30-0x09e20];
-
-/*0x09e30*/ u64 antp_gen_cfg_port[2];
-/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \
- vxge_vBIT(val, 10, 6)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \
- vxge_mBIT(23)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \
- vxge_mBIT(27)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \
- vxge_mBIT(35)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \
- vxge_mBIT(43)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \
- vxge_mBIT(51)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \
- vxge_mBIT(59)
-/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \
- vxge_vBIT(val, 4, 7)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
- vxge_vBIT(val, 11, 5)
-/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \
- vxge_vBIT(val, 5, 11)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \
- vxge_vBIT(val, 32, 16)
-/*0x09e70*/ u64 mdio_mgr_access_port[2];
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63)
- u8 unused0a200[0x0a200-0x09e80];
-/*0x0a200*/ u64 xmac_vsport_choices_vh[17];
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
- u8 unused0a400[0x0a400-0x0a288];
-
-/*0x0a400*/ u64 rx_thresh_cfg_vp[17];
-#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8)
- u8 unused0ac90[0x0ac90-0x0a488];
-} __packed;
-
-/*VXGE_HW_SRPCIM_REGS_H*/
-struct vxge_hw_srpcim_reg {
-
-/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh;
-#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \
- vxge_vBIT(val, 0, 32)
- u8 unused00100[0x00100-0x00008];
-
-/*0x00100*/ u64 srpcim_pcipif_int_status;
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3)
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7)
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \
- BIT(11)
-/*0x00108*/ u64 srpcim_pcipif_int_mask;
-/*0x00110*/ u64 mrpcim_msg_reg;
-#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3)
-/*0x00118*/ u64 mrpcim_msg_mask;
-/*0x00120*/ u64 mrpcim_msg_alarm;
-/*0x00128*/ u64 vpath_msg_reg;
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16)
-/*0x00130*/ u64 vpath_msg_mask;
-/*0x00138*/ u64 vpath_msg_alarm;
- u8 unused00160[0x00160-0x00140];
-
-/*0x00160*/ u64 srpcim_to_mrpcim_wmsg;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0)
-/*0x00170*/ u64 mrpcim_to_srpcim_rmsg;
-#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel;
-#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \
- vxge_vBIT(val, 0, 5)
-/*0x00180*/ u64 vpath_to_srpcim_rmsg;
-#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \
- vxge_vBIT(val, 0, 64)
- u8 unused00200[0x00200-0x00188];
-
-/*0x00200*/ u64 srpcim_general_int_status;
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0)
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3)
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7)
- u8 unused00210[0x00210-0x00208];
-
-/*0x00210*/ u64 srpcim_general_int_mask;
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0)
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3)
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7)
- u8 unused00220[0x00220-0x00218];
-
-/*0x00220*/ u64 srpcim_ppif_int_status;
-
-/*0x00228*/ u64 srpcim_ppif_int_mask;
-/*0x00230*/ u64 srpcim_gen_errors_reg;
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23)
-/*0x00238*/ u64 srpcim_gen_errors_mask;
-/*0x00240*/ u64 srpcim_gen_errors_alarm;
-/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg;
-#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3)
-/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask;
-/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm;
-/*0x00260*/ u64 vpath_to_srpcim_alarm_reg;
-
-/*0x00268*/ u64 vpath_to_srpcim_alarm_mask;
-/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm;
- u8 unused00280[0x00280-0x00278];
-
-/*0x00280*/ u64 pf_sw_reset;
-#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8)
-/*0x00288*/ u64 srpcim_general_cfg1;
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39)
-/*0x00290*/ u64 srpcim_interrupt_cfg1;
-#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3)
- u8 unused002a8[0x002a8-0x00298];
-
-/*0x002a8*/ u64 srpcim_clear_msix_mask;
-#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0)
-/*0x002b0*/ u64 srpcim_set_msix_mask;
-#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0)
-/*0x002b8*/ u64 srpcim_clr_msix_one_shot;
-#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0)
-/*0x002c0*/ u64 srpcim_rst_in_prog;
-#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7)
-/*0x002c8*/ u64 srpcim_reg_modified;
-#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7)
-/*0x002d0*/ u64 tgt_pf_illegal_access;
-#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
-/*0x002d8*/ u64 srpcim_msix_status;
-#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3)
-#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7)
- u8 unused00880[0x00880-0x002e0];
-
-/*0x00880*/ u64 xgmac_sr_int_status;
-#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3)
-/*0x00888*/ u64 xgmac_sr_int_mask;
-/*0x00890*/ u64 asic_ntwk_sr_err_reg;
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \
- BIT(11)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15)
-/*0x00898*/ u64 asic_ntwk_sr_err_mask;
-/*0x008a0*/ u64 asic_ntwk_sr_err_alarm;
- u8 unused008c0[0x008c0-0x008a8];
-
-/*0x008c0*/ u64 xmac_vsport_choices_sr_clone;
-#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused00900[0x00900-0x008c8];
-
-/*0x00900*/ u64 mr_rqa_top_prty_for_vh;
-#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00908*/ u64 umq_vh_data_list_empty;
-#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \
- BIT(0)
-/*0x00910*/ u64 wde_cfg;
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5)
-#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6)
-#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7)
-#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14)
-#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15)
-#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16)
-#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17)
-#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19)
-#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2)
-#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2)
-
-} __packed;
-
-/*VXGE_HW_VPMGMT_REGS_H*/
-struct vxge_hw_vpmgmt_reg {
-
- u8 unused00040[0x00040-0x00000];
-
-/*0x00040*/ u64 vpath_to_func_map_cfg1;
-#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \
- vxge_vBIT(val, 3, 5)
-/*0x00048*/ u64 vpath_is_first;
-#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3)
-/*0x00050*/ u64 srpcim_to_vpath_wmsg;
-#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig;
-#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \
- vxge_mBIT(0)
- u8 unused00100[0x00100-0x00060];
-
-/*0x00100*/ u64 tim_vpath_assignment;
-#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
- u8 unused00140[0x00140-0x00108];
-
-/*0x00140*/ u64 rqa_top_prty_for_vp;
-#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \
- vxge_vBIT(val, 59, 5)
- u8 unused001c0[0x001c0-0x00148];
-
-/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone;
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \
- vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \
- vxge_mBIT(23)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \
- vxge_mBIT(39)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \
- vxge_mBIT(43)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \
- vxge_mBIT(47)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \
- vxge_mBIT(51)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \
- vxge_mBIT(55)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \
- vxge_mBIT(59)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63)
-/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone;
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59)
-/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone;
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \
- vxge_vBIT(val, 5, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \
- vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \
- vxge_vBIT(val, 13, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \
- vxge_vBIT(val, 17, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \
- vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \
- vxge_vBIT(val, 25, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \
- vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \
- vxge_vBIT(val, 33, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \
- vxge_vBIT(val, 37, 3)
-/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3];
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \
- vxge_mBIT(27)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \
- vxge_vBIT(val, 50, 14)
-/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3];
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \
- vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \
- vxge_vBIT(val, 20, 16)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \
- vxge_mBIT(39)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \
- vxge_mBIT(43)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \
- vxge_mBIT(59)
- u8 unused00240[0x00240-0x00208];
-
-/*0x00240*/ u64 xmac_vsport_choices_vp;
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
- u8 unused00260[0x00260-0x00248];
-
-/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone;
-#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3)
-#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \
- vxge_mBIT(11)
-/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2];
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \
- vxge_mBIT(3)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \
- vxge_mBIT(11)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15)
-/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone;
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \
- vxge_vBIT(val, 2, 2)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \
- vxge_vBIT(val, 28, 4)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \
- vxge_vBIT(val, 32, 4)
-/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone;
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \
- vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \
- vxge_vBIT(val, 32, 16)
-/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone;
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \
- vxge_vBIT(val, 8, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15)
-/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3];
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15)
- u8 unused002c0[0x002c0-0x002a8];
-
-/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone;
-#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7)
-/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3];
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7)
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
- u8 unused00300[0x00300-0x002e0];
-
-/*0x00300*/ u64 wol_mp_crc;
-#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63)
-/*0x00308*/ u64 wol_mp_mask_a;
-#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64)
-/*0x00310*/ u64 wol_mp_mask_b;
-#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64)
- u8 unused00360[0x00360-0x00318];
-
-/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone;
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3)
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7)
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11)
-/*0x00368*/ u64 rx_datapath_util_vp_clone;
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \
- vxge_vBIT(val, 7, 9)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \
- vxge_vBIT(val, 24, 4)
- u8 unused00380[0x00380-0x00370];
-
-/*0x00380*/ u64 tx_datapath_util_vp_clone;
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \
- vxge_vBIT(val, 7, 9)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \
- vxge_vBIT(val, 24, 4)
-
-} __packed;
-
-struct vxge_hw_vpath_reg {
-
- u8 unused00300[0x00300];
-
-/*0x00300*/ u64 usdc_vpath;
-#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32)
- u8 unused00a00[0x00a00-0x00308];
-
-/*0x00a00*/ u64 wrdma_alarm_status;
-#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1)
-/*0x00a08*/ u64 wrdma_alarm_mask;
- u8 unused00a30[0x00a30-0x00a10];
-
-/*0x00a30*/ u64 prc_alarm_reg;
-#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0)
-#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1)
-#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2)
-#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3)
-/*0x00a38*/ u64 prc_alarm_mask;
-/*0x00a40*/ u64 prc_alarm_alarm;
-/*0x00a48*/ u64 prc_cfg1;
-#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29)
-#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34)
-#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35)
-#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36)
-#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37)
-#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39)
-#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2)
- u8 unused00a60[0x00a60-0x00a50];
-
-/*0x00a60*/ u64 prc_cfg4;
-#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7)
-#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22)
-#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23)
-#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31)
-#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32)
-#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36)
-#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37)
-#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24)
-/*0x00a68*/ u64 prc_cfg5;
-#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61)
-/*0x00a70*/ u64 prc_cfg6;
-#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0)
-#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2)
-#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5)
-#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8)
-#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
-#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
-#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
-#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
-/*0x00a78*/ u64 prc_cfg7;
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
-#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12)
-#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14)
-#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5)
-/*0x00a80*/ u64 tim_dest_addr;
-#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64)
-/*0x00a88*/ u64 prc_rxd_doorbell;
-#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16)
-/*0x00a90*/ u64 rqa_prty_for_vp;
-#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5)
-/*0x00a98*/ u64 rxdmem_size;
-#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13)
-/*0x00aa0*/ u64 frm_in_progress_cnt;
-#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00aa8*/ u64 rx_multi_cast_stats;
-#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16)
-/*0x00ab0*/ u64 rx_frm_transferred;
-#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x00ab8*/ u64 rxd_returned;
-#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16)
- u8 unused00c00[0x00c00-0x00ac0];
-
-/*0x00c00*/ u64 kdfc_fifo_trpl_partition;
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15)
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15)
-/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl;
-#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7)
-/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c40*/ u64 kdfc_trpl_fifo_offset;
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15)
-/*0x00c48*/ u64 kdfc_drbl_triplet_total;
-#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \
- vxge_vBIT(val, 17, 15)
- u8 unused00c60[0x00c60-0x00c50];
-
-/*0x00c60*/ u64 usdc_drbl_ctrl;
-#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23)
-/*0x00c68*/ u64 usdc_vp_ready;
-#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7)
-#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15)
-#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23)
-/*0x00c70*/ u64 kdfc_status;
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0)
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1)
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2)
- u8 unused00c80[0x00c80-0x00c78];
-
-/*0x00c80*/ u64 xmac_rpa_vcfg;
-#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15)
-#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19)
-#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23)
-/*0x00c88*/ u64 rxmac_vcfg0;
-#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14)
-#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19)
-#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14)
-#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43)
-#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51)
-#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55)
-/*0x00c90*/ u64 rxmac_vcfg1;
-#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2)
-#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47)
-#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51)
-/*0x00c98*/ u64 rts_access_steer_ctrl;
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8)
-/*0x00ca0*/ u64 rts_access_steer_data0;
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x00ca8*/ u64 rts_access_steer_data1;
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64)
- u8 unused00d00[0x00d00-0x00cb0];
-
-/*0x00d00*/ u64 xmac_vsport_choice;
-#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5)
-/*0x00d08*/ u64 xmac_stats_cfg;
-/*0x00d10*/ u64 xmac_stats_access_cmd;
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15)
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
-/*0x00d18*/ u64 xmac_stats_access_data;
-#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x00d20*/ u64 asic_ntwk_vp_ctrl;
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55)
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63)
- u8 unused00d30[0x00d30-0x00d28];
-
-/*0x00d30*/ u64 xgmac_vp_int_status;
-#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \
- vxge_mBIT(3)
-/*0x00d38*/ u64 xgmac_vp_int_mask;
-/*0x00d40*/ u64 asic_ntwk_vp_err_reg;
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \
- vxge_mBIT(11)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \
- vxge_mBIT(15)
-#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \
- vxge_mBIT(19)
-#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
-/*0x00d48*/ u64 asic_ntwk_vp_err_mask;
-/*0x00d50*/ u64 asic_ntwk_vp_err_alarm;
- u8 unused00d80[0x00d80-0x00d58];
-
-/*0x00d80*/ u64 rtdma_bw_ctrl;
-#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39)
-#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18)
-/*0x00d88*/ u64 rtdma_rd_optimization_ctrl;
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19)
-#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \
- vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \
- vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \
- vxge_vBIT(val, 37, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \
- vxge_vBIT(val, 61, 3)
-/*0x00d90*/ u64 pda_pcc_job_monitor;
-#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7)
-/*0x00d98*/ u64 tx_protocol_assist_cfg;
-#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6)
-#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7)
- u8 unused01000[0x01000-0x00da0];
-
-/*0x01000*/ u64 tim_cfg1_int_num[4];
-#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26)
-#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7)
-/*0x01020*/ u64 tim_cfg2_int_num[4];
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16)
-/*0x01040*/ u64 tim_cfg3_int_num[4];
-#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0)
-#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4)
-#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26)
-#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6)
-#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26)
-/*0x01060*/ u64 tim_wrkld_clc;
-#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43)
-#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7)
-/*0x01068*/ u64 tim_bitmap;
-#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32)
-#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33)
-/*0x01070*/ u64 tim_ring_assn;
-#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2)
-/*0x01078*/ u64 tim_remap;
-#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5)
-#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6)
-#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7)
-#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5)
-/*0x01080*/ u64 tim_vpath_map;
-#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
-/*0x01088*/ u64 tim_pci_cfg;
-#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7)
-#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15)
-#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23)
-#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31)
- u8 unused01100[0x01100-0x01090];
-
-/*0x01100*/ u64 sgrp_assign;
-#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64)
-/*0x01108*/ u64 sgrp_aoa_and_result;
-#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \
- vxge_vBIT(val, 0, 64)
-/*0x01110*/ u64 rpe_pci_cfg;
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9)
-#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10)
-#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31)
-/*0x01118*/ u64 rpe_lro_cfg;
-#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7)
-#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11)
-#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15)
-#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23)
-/*0x01120*/ u64 pe_mr2vp_ack_blk_limit;
-#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32)
-/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit;
-#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \
- vxge_vBIT(val, 32, 32)
-/*0x01130*/ u64 txpe_pci_nce_cfg;
-#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55)
-#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63)
- u8 unused01180[0x01180-0x01138];
-
-/*0x01180*/ u64 msg_qpad_en_cfg;
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3)
-#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7)
-#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23)
-#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31)
-/*0x01188*/ u64 msg_pci_cfg;
-#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3)
-#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7)
-#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11)
-#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15)
-/*0x01190*/ u64 umqdmq_ir_init;
-#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x01198*/ u64 dmq_ir_int;
-#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6)
-#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7)
-#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
-/*0x011a0*/ u64 dmq_bwr_init_add;
-#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
-/*0x011a8*/ u64 dmq_bwr_init_byte;
-#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
-/*0x011b0*/ u64 dmq_ir;
-#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8)
-/*0x011b8*/ u64 umq_int;
-#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6)
-#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7)
-#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
-/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init;
-#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8)
-/*0x011c8*/ u64 umq_bwr_pfch_ctrl;
-#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3)
-/*0x011d0*/ u64 umq_mr2vp_bwr_eol;
-#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32)
-/*0x011d8*/ u64 umq_bwr_init_add;
-#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
-/*0x011e0*/ u64 umq_bwr_init_byte;
-#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
-/*0x011e8*/ u64 gendma_int;
-/*0x011f0*/ u64 umqdmq_ir_init_notify;
-#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
-/*0x011f8*/ u64 dmq_init_notify;
-#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
-/*0x01200*/ u64 umq_init_notify;
-#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
- u8 unused01380[0x01380-0x01208];
-
-/*0x01380*/ u64 tpa_cfg;
-#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7)
-#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11)
-#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15)
- u8 unused01400[0x01400-0x01388];
-
-/*0x01400*/ u64 tx_vp_reset_discarded_frms;
-#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \
- vxge_vBIT(val, 48, 16)
- u8 unused01480[0x01480-0x01408];
-
-/*0x01480*/ u64 fau_rpa_vcfg;
-#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7)
-#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11)
-#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15)
- u8 unused014d0[0x014d0-0x01488];
-
-/*0x014d0*/ u64 dbg_stats_rx_mpa;
-#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16)
-/*0x014d8*/ u64 dbg_stats_rx_fau;
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused014f0[0x014f0-0x014e0];
-
-/*0x014f0*/ u64 fbmc_vp_rdy;
-#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0)
- u8 unused01e00[0x01e00-0x014f8];
-
-/*0x01e00*/ u64 vpath_pcipif_int_status;
-#define \
-VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \
- vxge_mBIT(3)
-#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \
- vxge_mBIT(7)
-/*0x01e08*/ u64 vpath_pcipif_int_mask;
- u8 unused01e20[0x01e20-0x01e10];
-
-/*0x01e20*/ u64 srpcim_msg_to_vpath_reg;
-#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \
- vxge_mBIT(3)
-/*0x01e28*/ u64 srpcim_msg_to_vpath_mask;
-/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm;
- u8 unused01ea0[0x01ea0-0x01e38];
-
-/*0x01ea0*/ u64 vpath_to_srpcim_wmsg;
-#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig;
-#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \
- vxge_mBIT(0)
- u8 unused02000[0x02000-0x01eb0];
-
-/*0x02000*/ u64 vpath_general_int_status;
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19)
-/*0x02008*/ u64 vpath_general_int_mask;
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19)
-/*0x02010*/ u64 vpath_ppif_int_status;
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \
- vxge_mBIT(3)
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \
- vxge_mBIT(7)
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \
- vxge_mBIT(11)
-#define \
-VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \
- vxge_mBIT(15)
-#define \
-VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \
- vxge_mBIT(19)
-/*0x02018*/ u64 vpath_ppif_int_mask;
-/*0x02020*/ u64 kdfcctl_errors_reg;
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39)
-/*0x02028*/ u64 kdfcctl_errors_mask;
-/*0x02030*/ u64 kdfcctl_errors_alarm;
- u8 unused02040[0x02040-0x02038];
-
-/*0x02040*/ u64 general_errors_reg;
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3)
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7)
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11)
-#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15)
-#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19)
-#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27)
-#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31)
-/*0x02048*/ u64 general_errors_mask;
-/*0x02050*/ u64 general_errors_alarm;
-/*0x02058*/ u64 pci_config_errors_reg;
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3)
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7)
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11)
-/*0x02060*/ u64 pci_config_errors_mask;
-/*0x02068*/ u64 pci_config_errors_alarm;
-/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg;
-#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \
- vxge_mBIT(3)
-/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask;
-/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm;
-/*0x02088*/ u64 srpcim_to_vpath_alarm_reg;
-#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x02090*/ u64 srpcim_to_vpath_alarm_mask;
-/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm;
- u8 unused02108[0x02108-0x020a0];
-
-/*0x02108*/ u64 kdfcctl_status;
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8)
-/*0x02110*/ u64 rsthdlr_status;
-#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3)
-#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2)
-/*0x02118*/ u64 fifo0_status;
-#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12)
-/*0x02120*/ u64 fifo1_status;
-#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12)
-/*0x02128*/ u64 fifo2_status;
-#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12)
- u8 unused02158[0x02158-0x02130];
-
-/*0x02158*/ u64 tgt_illegal_access;
-#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
- u8 unused02200[0x02200-0x02160];
-
-/*0x02200*/ u64 vpath_general_cfg1;
-#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3)
-#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11)
-#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63)
-/*0x02208*/ u64 vpath_general_cfg2;
-#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3)
-/*0x02210*/ u64 vpath_general_cfg3;
-#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3)
- u8 unused02220[0x02220-0x02218];
-
-/*0x02220*/ u64 kdfcctl_cfg0;
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39)
-
- u8 unused02268[0x02268-0x02228];
-
-/*0x02268*/ u64 stats_cfg;
-#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57)
-/*0x02270*/ u64 interrupt_cfg0;
-#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7)
- u8 unused02280[0x02280-0x02278];
-
-/*0x02280*/ u64 interrupt_cfg2;
-#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
-/*0x02288*/ u64 one_shot_vect0_en;
-#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3)
-/*0x02290*/ u64 one_shot_vect1_en;
-#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3)
-/*0x02298*/ u64 one_shot_vect2_en;
-#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3)
-/*0x022a0*/ u64 one_shot_vect3_en;
-#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3)
- u8 unused022b0[0x022b0-0x022a8];
-
-/*0x022b0*/ u64 pci_config_access_cfg1;
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12)
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15)
-/*0x022b8*/ u64 pci_config_access_cfg2;
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0)
-/*0x022c0*/ u64 pci_config_access_status;
-#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0)
-#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32)
- u8 unused02300[0x02300-0x022c8];
-
-/*0x02300*/ u64 vpath_debug_stats0;
-#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32)
-/*0x02308*/ u64 vpath_debug_stats1;
-#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32)
-/*0x02310*/ u64 vpath_debug_stats2;
-#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32)
-/*0x02318*/ u64 vpath_debug_stats3;
-#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \
- vxge_vBIT(val, 0, 64)
-/*0x02320*/ u64 vpath_debug_stats4;
-#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \
- vxge_vBIT(val, 0, 64)
-/*0x02328*/ u64 vpath_debug_stats5;
-#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
-/*0x02330*/ u64 vpath_debug_stats6;
-#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
-/*0x02338*/ u64 vpath_genstats_count01;
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02340*/ u64 vpath_genstats_count23;
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02348*/ u64 vpath_genstats_count4;
-#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02350*/ u64 vpath_genstats_count5;
-#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused02648[0x02648-0x02358];
-} __packed;
-
-#define VXGE_HW_EEPROM_SIZE (0x01 << 11)
-
-/* Capability lists */
-#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */
-#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */
-#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
deleted file mode 100644
index ee164970b267..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ /dev/null
@@ -1,2428 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/prefetch.h>
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-main.h"
-
-/*
- * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
- * @vp: Virtual Path handle.
- *
- * Enable vpath interrupts. The function is to be executed the last in
- * vpath initialization sequence.
- *
- * See also: vxge_hw_vpath_intr_disable()
- */
-enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- enum vxge_hw_status status = VXGE_HW_OK;
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_ppif_int_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->wrdma_alarm_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->xgmac_vp_int_status);
-
- readq(&vp_reg->vpath_general_int_status);
-
- /* Mask unwanted interrupts */
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_mask);
-
- /* Unmask the individual interrupts */
-
- writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
- &vp_reg->general_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
- &vp_reg->kdfcctl_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
- &vp_reg->prc_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
-
- if (vpath->hldev->first_vp_id != vpath->vp_id)
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_mask);
- else
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
- VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
- VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_pio_mem_write32_upper(0,
- &vp_reg->vpath_general_int_mask);
-exit:
- return status;
-
-}
-
-/*
- * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
- * @vp: Virtual Path handle.
- *
- * Disable vpath interrupts. The function is to be executed the last in
- * vpath initialization sequence.
- *
- * See also: vxge_hw_vpath_intr_enable()
- */
-enum vxge_hw_status vxge_hw_vpath_intr_disable(
- struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_general_int_mask);
-
- writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_ppif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->wrdma_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->xgmac_vp_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_mask);
-
-exit:
- return status;
-}
-
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
- u64 val64;
-
- if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
- return;
-
- vp_reg = fifo->vp_reg;
- config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
-
- if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
- config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- fifo->tim_tti_cfg1_saved = val64;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- }
-}
-
-void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
-{
- u64 val64 = ring->tim_rti_cfg1_saved;
-
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- ring->tim_rti_cfg1_saved = val64;
- writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-}
-
-void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
-{
- u64 val64 = fifo->tim_tti_cfg3_saved;
- u64 timer = (fifo->rtimer * 1000) / 272;
-
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
- if (timer)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
- VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
-
- writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
- /* tti_cfg3_saved is not updated again because it is
- * initialized at one place only - init time.
- */
-}
-
-void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
-{
- u64 val64 = ring->tim_rti_cfg3_saved;
- u64 timer = (ring->rtimer * 1000) / 272;
-
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
- if (timer)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
- VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
-
- writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
- /* rti_cfg3_saved is not updated again because it is
- * initialized at one place only - init time.
- */
-}
-
-/**
- * vxge_hw_channel_msix_mask - Mask MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSIX ID
- *
- * The function masks the msix interrupt for the given msix_id
- *
- * Returns: 0
- */
-void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->set_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- *
- * Returns: 0
- */
-void
-vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- * if configured in MSIX oneshot mode
- *
- * Returns: 0
- */
-void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
-{
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
-}
-
-/**
- * vxge_hw_device_set_intr_type - Updates the configuration
- * with new interrupt type.
- * @hldev: HW device handle.
- * @intr_mode: New interrupt type
- */
-u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
-{
-
- if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (intr_mode != VXGE_HW_INTR_MODE_DEF))
- intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
-
- hldev->config.intr_mode = intr_mode;
- return intr_mode;
-}
-
-/**
- * vxge_hw_device_intr_enable - Enable interrupts.
- * @hldev: HW device handle.
- *
- * Enable Titan interrupts. The function is to be executed the last in
- * Titan initialization sequence.
- *
- * See also: vxge_hw_device_intr_disable()
- */
-void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
-{
- u32 i;
- u64 val64;
- u32 val32;
-
- vxge_hw_device_mask_all(hldev);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- vxge_hw_vpath_intr_enable(
- VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
- }
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
- val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
-
- if (val64 != 0) {
- writeq(val64, &hldev->common_reg->tim_int_status0);
-
- writeq(~val64, &hldev->common_reg->tim_int_mask0);
- }
-
- val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
-
- if (val32 != 0) {
- __vxge_hw_pio_mem_write32_upper(val32,
- &hldev->common_reg->tim_int_status1);
-
- __vxge_hw_pio_mem_write32_upper(~val32,
- &hldev->common_reg->tim_int_mask1);
- }
- }
-
- val64 = readq(&hldev->common_reg->titan_general_int_status);
-
- vxge_hw_device_unmask_all(hldev);
-}
-
-/**
- * vxge_hw_device_intr_disable - Disable Titan interrupts.
- * @hldev: HW device handle.
- *
- * Disable Titan interrupts.
- *
- * See also: vxge_hw_device_intr_enable()
- */
-void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
-{
- u32 i;
-
- vxge_hw_device_mask_all(hldev);
-
- /* mask all the tim interrupts */
- writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
- __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
- &hldev->common_reg->tim_int_mask1);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- vxge_hw_vpath_intr_disable(
- VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
- }
-}
-
-/**
- * vxge_hw_device_mask_all - Mask all device interrupts.
- * @hldev: HW device handle.
- *
- * Mask all device interrupts.
- *
- * See also: vxge_hw_device_unmask_all()
- */
-void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
-{
- u64 val64;
-
- val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
- VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->titan_mask_all_int);
-}
-
-/**
- * vxge_hw_device_unmask_all - Unmask all device interrupts.
- * @hldev: HW device handle.
- *
- * Unmask all device interrupts.
- *
- * See also: vxge_hw_device_mask_all()
- */
-void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
-{
- u64 val64 = 0;
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
- val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->titan_mask_all_int);
-}
-
-/**
- * vxge_hw_device_flush_io - Flush io writes.
- * @hldev: HW device handle.
- *
- * The function performs a read operation to flush io writes.
- *
- * Returns: void
- */
-void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
-{
- readl(&hldev->common_reg->titan_general_int_status);
-}
-
-/**
- * __vxge_hw_device_handle_error - Handle error
- * @hldev: HW device
- * @vp_id: Vpath Id
- * @type: Error type. Please see enum vxge_hw_event{}
- *
- * Handle error.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
- enum vxge_hw_event type)
-{
- switch (type) {
- case VXGE_HW_EVENT_UNKNOWN:
- break;
- case VXGE_HW_EVENT_RESET_START:
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- goto out;
- case VXGE_HW_EVENT_ALARM_CLEARED:
- goto out;
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- case VXGE_HW_EVENT_CRITICAL_ERR:
- case VXGE_HW_EVENT_SERR:
- break;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- break;
- default:
- vxge_assert(0);
- goto out;
- }
-
- /* notify driver */
- if (hldev->uld_callbacks->crit_err)
- hldev->uld_callbacks->crit_err(hldev,
- type, vp_id);
-out:
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_down_ind
- * @hldev: HW device handle.
- *
- * Link down indication handler. The function is invoked by HW when
- * Titan indicates that the link is down.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_DOWN)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_DOWN;
-
- /* notify driver */
- if (hldev->uld_callbacks->link_down)
- hldev->uld_callbacks->link_down(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_up_ind
- * @hldev: HW device handle.
- *
- * Link up indication handler. The function is invoked by HW when
- * Titan indicates that the link is up for programmable amount of time.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_UP)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_UP;
-
- /* notify driver */
- if (hldev->uld_callbacks->link_up)
- hldev->uld_callbacks->link_up(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
-{
- u64 val64;
- u64 alarm_status;
- u64 pic_status;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
- u64 mask64;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath == NULL) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out2;
- }
-
- hldev = vpath->hldev;
- vp_reg = vpath->vp_reg;
- alarm_status = readq(&vp_reg->vpath_general_int_status);
-
- if (alarm_status == VXGE_HW_ALL_FOXES) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
- alarm_event);
- goto out;
- }
-
- sw_stats = vpath->sw_stats;
-
- if (alarm_status & ~(
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
- sw_stats->error_stats.unknown_alarms++;
-
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out;
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
-
- val64 = readq(&vp_reg->xgmac_vp_int_status);
-
- if (val64 &
- VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
-
- val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
- ))) {
- sw_stats->error_stats.network_sustained_fault++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_down_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_DOWN, alarm_event);
- }
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
- ))) {
-
- sw_stats->error_stats.network_sustained_ok++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_up_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_UP, alarm_event);
- }
-
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
-
- if (skip_alarms)
- return VXGE_HW_OK;
- }
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
-
- pic_status = readq(&vp_reg->vpath_ppif_int_status);
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
-
- val64 = readq(&vp_reg->general_errors_reg);
- mask64 = readq(&vp_reg->general_errors_mask);
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
- ~mask64) {
- sw_stats->error_stats.ini_serr_det++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_SERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
- ~mask64) {
- sw_stats->error_stats.dblgen_fifo0_overflow++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
- ~mask64)
- sw_stats->error_stats.statsb_pif_chain_error++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
- ~mask64)
- sw_stats->error_stats.statsb_drop_timeout++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
- ~mask64)
- sw_stats->error_stats.target_illegal_access++;
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
-
- val64 = readq(&vp_reg->kdfcctl_errors_reg);
- mask64 = readq(&vp_reg->kdfcctl_errors_mask);
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_poison++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->kdfcctl_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
-
- val64 = readq(&vp_reg->wrdma_alarm_status);
-
- if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
-
- val64 = readq(&vp_reg->prc_alarm_reg);
- mask64 = readq(&vp_reg->prc_alarm_mask);
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
- ~mask64)
- sw_stats->error_stats.prc_ring_bumps++;
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
- ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
- & ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_abort++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
- & ~mask64) {
- sw_stats->error_stats.prc_quanta_size_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
- }
-out:
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-out2:
- if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
- (alarm_event == VXGE_HW_EVENT_UNKNOWN))
- return VXGE_HW_OK;
-
- __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
-
- if (alarm_event == VXGE_HW_EVENT_SERR)
- return VXGE_HW_ERR_CRITICAL;
-
- return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
- VXGE_HW_ERR_SLOT_FREEZE :
- (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
- VXGE_HW_ERR_VPATH;
-}
-
-/**
- * vxge_hw_device_begin_irq - Begin IRQ processing.
- * @hldev: HW device handle.
- * @skip_alarms: Do not clear the alarms
- * @reason: "Reason" for the interrupt, the value of Titan's
- * general_int_status register.
- *
- * The function performs two actions, It first checks whether (shared IRQ) the
- * interrupt was raised by the device. Next, it masks the device interrupts.
- *
- * Note:
- * vxge_hw_device_begin_irq() does not flush MMIO writes through the
- * bridge. Therefore, two back-to-back interrupts are potentially possible.
- *
- * Returns: 0, if the interrupt is not "ours" (note that in this case the
- * device remain enabled).
- * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
- * status.
- */
-enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
- u32 skip_alarms, u64 *reason)
-{
- u32 i;
- u64 val64;
- u64 adapter_status;
- u64 vpath_mask;
- enum vxge_hw_status ret = VXGE_HW_OK;
-
- val64 = readq(&hldev->common_reg->titan_general_int_status);
-
- if (unlikely(!val64)) {
- /* not Titan interrupt */
- *reason = 0;
- ret = VXGE_HW_ERR_WRONG_IRQ;
- goto exit;
- }
-
- if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
-
- adapter_status = readq(&hldev->common_reg->adapter_status);
-
- if (adapter_status == VXGE_HW_ALL_FOXES) {
-
- __vxge_hw_device_handle_error(hldev,
- NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
- *reason = 0;
- ret = VXGE_HW_ERR_SLOT_FREEZE;
- goto exit;
- }
- }
-
- hldev->stats.sw_dev_info_stats.total_intr_cnt++;
-
- *reason = val64;
-
- vpath_mask = hldev->vpaths_deployed >>
- (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
-
- if (val64 &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
- hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
-
- return VXGE_HW_OK;
- }
-
- hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
-
- if (unlikely(val64 &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
-
- enum vxge_hw_status error_level = VXGE_HW_OK;
-
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- ret = __vxge_hw_vpath_alarm_process(
- &hldev->virtual_paths[i], skip_alarms);
-
- error_level = VXGE_HW_SET_LEVEL(ret, error_level);
-
- if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
- (ret == VXGE_HW_ERR_SLOT_FREEZE)))
- break;
- }
-
- ret = error_level;
- }
-exit:
- return ret;
-}
-
-/**
- * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
- * condition that has caused the Tx and RX interrupt.
- * @hldev: HW device.
- *
- * Acknowledge (that is, clear) the condition that has caused
- * the Tx and Rx interrupt.
- * See also: vxge_hw_device_begin_irq(),
- * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
- */
-void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
-{
-
- if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
- &hldev->common_reg->tim_int_status0);
- }
-
- if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
- &hldev->common_reg->tim_int_status1);
- }
-}
-
-/*
- * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
- * @channel: Channel
- * @dtrh: Buffer to return the DTR pointer
- *
- * Allocates a dtr from the reserve array. If the reserve array is empty,
- * it swaps the reserve and free arrays.
- *
- */
-static enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
-{
- if (channel->reserve_ptr - channel->reserve_top > 0) {
-_alloc_after_swap:
- *dtrh = channel->reserve_arr[--channel->reserve_ptr];
-
- return VXGE_HW_OK;
- }
-
- /* switch between empty and full arrays */
-
- /* the idea behind such a design is that by having free and reserved
- * arrays separated we basically separated irq and non-irq parts.
- * i.e. no additional lock need to be done when we free a resource */
-
- if (channel->length - channel->free_ptr > 0) {
- swap(channel->reserve_arr, channel->free_arr);
- channel->reserve_ptr = channel->length;
- channel->reserve_top = channel->free_ptr;
- channel->free_ptr = channel->length;
-
- channel->stats->reserve_free_swaps_cnt++;
-
- goto _alloc_after_swap;
- }
-
- channel->stats->full_cnt++;
-
- *dtrh = NULL;
- return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
-}
-
-/*
- * vxge_hw_channel_dtr_post - Post a dtr to the channel
- * @channelh: Channel
- * @dtrh: DTR pointer
- *
- * Posts a dtr to work array.
- *
- */
-static void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
-{
- vxge_assert(channel->work_arr[channel->post_index] == NULL);
-
- channel->work_arr[channel->post_index++] = dtrh;
-
- /* wrap-around */
- if (channel->post_index == channel->length)
- channel->post_index = 0;
-}
-
-/*
- * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
- * @channel: Channel
- * @dtr: Buffer to return the next completed DTR pointer
- *
- * Returns the next completed dtr with out removing it from work array
- *
- */
-void
-vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
-{
- vxge_assert(channel->compl_index < channel->length);
-
- *dtrh = channel->work_arr[channel->compl_index];
- prefetch(*dtrh);
-}
-
-/*
- * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
- * @channel: Channel handle
- *
- * Removes the next completed dtr from work array
- *
- */
-void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
-{
- channel->work_arr[channel->compl_index] = NULL;
-
- /* wrap-around */
- if (++channel->compl_index == channel->length)
- channel->compl_index = 0;
-
- channel->stats->total_compl_cnt++;
-}
-
-/*
- * vxge_hw_channel_dtr_free - Frees a dtr
- * @channel: Channel handle
- * @dtr: DTR pointer
- *
- * Returns the dtr to free array
- *
- */
-void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
-{
- channel->free_arr[--channel->free_ptr] = dtrh;
-}
-
-/*
- * vxge_hw_channel_dtr_count
- * @channel: Channel handle. Obtained via vxge_hw_channel_open().
- *
- * Retrieve number of DTRs available. This function can not be called
- * from data path. ring_initial_replenishi() is the only user.
- */
-int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
-{
- return (channel->reserve_ptr - channel->reserve_top) +
- (channel->length - channel->free_ptr);
-}
-
-/**
- * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
- * with a valid handle.
- *
- * Reserve Rx descriptor for the subsequent filling-in driver
- * and posting on the corresponding channel (@channelh)
- * via vxge_hw_ring_rxd_post().
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
- *
- */
-enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
- void **rxdh)
-{
- enum vxge_hw_status status;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- status = vxge_hw_channel_dtr_alloc(channel, rxdh);
-
- if (status == VXGE_HW_OK) {
- struct vxge_hw_ring_rxd_1 *rxdp =
- (struct vxge_hw_ring_rxd_1 *)*rxdh;
-
- rxdp->control_0 = rxdp->control_1 = 0;
- }
-
- return status;
-}
-
-/**
- * vxge_hw_ring_rxd_free - Free descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Free the reserved descriptor. This operation is "symmetrical" to
- * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
- * lifecycle.
- *
- * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
- * be:
- *
- * - reserved (vxge_hw_ring_rxd_reserve);
- *
- * - posted (vxge_hw_ring_rxd_post);
- *
- * - completed (vxge_hw_ring_rxd_next_completed);
- *
- * - and recycled again (vxge_hw_ring_rxd_free).
- *
- * For alternative state transitions and more details please refer to
- * the design doc.
- *
- */
-void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_free(channel, rxdh);
-
-}
-
-/**
- * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * This routine prepares a rxd and posts
- */
-void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_post(channel, rxdh);
-}
-
-/**
- * vxge_hw_ring_rxd_post_post - Process rxd after post.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Processes rxd after post
- */
-void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
-
- rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
-
- if (ring->stats->common_stats.usage_cnt > 0)
- ring->stats->common_stats.usage_cnt--;
-}
-
-/**
- * vxge_hw_ring_rxd_post - Post descriptor on the ring.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
- *
- * Post descriptor on the ring.
- * Prior to posting the descriptor should be filled in accordance with
- * Host/Titan interface specification for a given service (LL, etc.).
- *
- */
-void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- wmb();
- rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
-
- vxge_hw_channel_dtr_post(channel, rxdh);
-
- if (ring->stats->common_stats.usage_cnt > 0)
- ring->stats->common_stats.usage_cnt--;
-}
-
-/**
- * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Processes rxd after post with memory barrier.
- */
-void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
-{
- wmb();
- vxge_hw_ring_rxd_post_post(ring, rxdh);
-}
-
-/**
- * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle. Returned by HW.
- * @t_code: Transfer code, as per Titan User Guide,
- * Receive Descriptor Format. Returned by HW.
- *
- * Retrieve the _next_ completed descriptor.
- * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
- * driver of new completed descriptors. After that
- * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
- * completions (the very first completion is passed by HW via
- * vxge_hw_ring_callback_f).
- *
- * Implementation-wise, the driver is free to call
- * vxge_hw_ring_rxd_next_completed either immediately from inside the
- * ring callback, or in a deferred fashion and separate (from HW)
- * context.
- *
- * Non-zero @t_code means failure to fill-in receive buffer(s)
- * of the descriptor.
- * For instance, parity error detected during the data transfer.
- * In this case Titan will complete the descriptor and indicate
- * for the host that the received data is not to be used.
- * For details please refer to Titan User Guide.
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
- * are currently available for processing.
- *
- * See also: vxge_hw_ring_callback_f{},
- * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
- */
-enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
- struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
-{
- struct __vxge_hw_channel *channel;
- struct vxge_hw_ring_rxd_1 *rxdp;
- enum vxge_hw_status status = VXGE_HW_OK;
- u64 control_0, own;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_try_complete(channel, rxdh);
-
- rxdp = *rxdh;
- if (rxdp == NULL) {
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
- goto exit;
- }
-
- control_0 = rxdp->control_0;
- own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
- *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
-
- /* check whether it is not the end */
- if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
-
- vxge_assert((rxdp)->host_control !=
- 0);
-
- ++ring->cmpl_cnt;
- vxge_hw_channel_dtr_complete(channel);
-
- vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
-
- ring->stats->common_stats.usage_cnt++;
- if (ring->stats->common_stats.usage_max <
- ring->stats->common_stats.usage_cnt)
- ring->stats->common_stats.usage_max =
- ring->stats->common_stats.usage_cnt;
-
- status = VXGE_HW_OK;
- goto exit;
- }
-
- /* reset it. since we don't want to return
- * garbage to the driver */
- *rxdh = NULL;
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_ring_handle_tcode - Handle transfer code.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- * @t_code: One of the enumerated (and documented in the Titan user guide)
- * "transfer codes".
- *
- * Handle descriptor's transfer code. The latter comes with each completed
- * descriptor.
- *
- * Returns: one of the enum vxge_hw_status{} enumerated types.
- * VXGE_HW_OK - for success.
- * VXGE_HW_ERR_CRITICAL - when encounters critical error.
- */
-enum vxge_hw_status vxge_hw_ring_handle_tcode(
- struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- /* If the t_code is not supported and if the
- * t_code is other than 0x5 (unparseable packet
- * such as unknown UPV6 header), Drop it !!!
- */
-
- if (t_code == VXGE_HW_RING_T_CODE_OK ||
- t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
- status = VXGE_HW_OK;
- goto exit;
- }
-
- if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
- status = VXGE_HW_ERR_INVALID_TCODE;
- goto exit;
- }
-
- ring->stats->rxd_t_code_err_cnt[t_code]++;
-exit:
- return status;
-}
-
-/**
- * __vxge_hw_non_offload_db_post - Post non offload doorbell
- *
- * @fifo: fifohandle
- * @txdl_ptr: The starting location of the TxDL in host memory
- * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
- * @no_snoop: No snoop flags
- *
- * This function posts a non-offload doorbell to doorbell FIFO
- *
- */
-static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
- u64 txdl_ptr, u32 num_txds, u32 no_snoop)
-{
- writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
- VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
- VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
- &fifo->nofl_db->control_0);
-
- writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
-}
-
-/**
- * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
- * the fifo
- * @fifoh: Handle to the fifo object used for non offload send
- */
-u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
-{
- return vxge_hw_channel_dtr_count(&fifoh->channel);
-}
-
-/**
- * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
- * with a valid handle.
- * @txdl_priv: Buffer to return the pointer to per txdl space
- *
- * Reserve a single TxDL (that is, fifo descriptor)
- * for the subsequent filling-in by driver)
- * and posting on the corresponding channel (@channelh)
- * via vxge_hw_fifo_txdl_post().
- *
- * Note: it is the responsibility of driver to reserve multiple descriptors
- * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
- * carries up to configured number (fifo.max_frags) of contiguous buffers.
- *
- * Returns: VXGE_HW_OK - success;
- * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
- *
- */
-enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
- struct __vxge_hw_fifo *fifo,
- void **txdlh, void **txdl_priv)
-{
- struct __vxge_hw_channel *channel;
- enum vxge_hw_status status;
- int i;
-
- channel = &fifo->channel;
-
- status = vxge_hw_channel_dtr_alloc(channel, txdlh);
-
- if (status == VXGE_HW_OK) {
- struct vxge_hw_fifo_txd *txdp =
- (struct vxge_hw_fifo_txd *)*txdlh;
- struct __vxge_hw_fifo_txdl_priv *priv;
-
- priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
-
- /* reset the TxDL's private */
- priv->align_dma_offset = 0;
- priv->align_vaddr_start = priv->align_vaddr;
- priv->align_used_frags = 0;
- priv->frags = 0;
- priv->alloc_frags = fifo->config->max_frags;
- priv->next_txdl_priv = NULL;
-
- *txdl_priv = (void *)(size_t)txdp->host_control;
-
- for (i = 0; i < fifo->config->max_frags; i++) {
- txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
- txdp->control_0 = txdp->control_1 = 0;
- }
- }
-
- return status;
-}
-
-/**
- * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
- * descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- * @frag_idx: Index of the data buffer in the caller's scatter-gather list
- * (of buffers).
- * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
- * @size: Size of the data buffer (in bytes).
- *
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
- * All three APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
- void *txdlh, u32 frag_idx,
- dma_addr_t dma_pointer, u32 size)
-{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp, *txdp_last;
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
-
- if (frag_idx != 0)
- txdp->control_0 = txdp->control_1 = 0;
- else {
- txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
- VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
- txdp->control_1 |= fifo->interrupt_type;
- txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
- fifo->tx_intr_num);
- if (txdl_priv->frags) {
- txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
- (txdl_priv->frags - 1);
- txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
- VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
- }
- }
-
- vxge_assert(frag_idx < txdl_priv->alloc_frags);
-
- txdp->buffer_pointer = (u64)dma_pointer;
- txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
- fifo->stats->total_buffers++;
- txdl_priv->frags++;
-}
-
-/**
- * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
- *
- * Post descriptor on the 'fifo' type channel for transmission.
- * Prior to posting the descriptor should be filled in accordance with
- * Host/Titan interface specification for a given service (LL, etc.).
- *
- */
-void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
-{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp_last;
- struct vxge_hw_fifo_txd *txdp_first;
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp_first = txdlh;
-
- txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
- txdp_last->control_0 |=
- VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
- txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
-
- vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
-
- __vxge_hw_non_offload_db_post(fifo,
- (u64)txdl_priv->dma_addr,
- txdl_priv->frags - 1,
- fifo->no_snoop_bits);
-
- fifo->stats->total_posts++;
- fifo->stats->common_stats.usage_cnt++;
- if (fifo->stats->common_stats.usage_max <
- fifo->stats->common_stats.usage_cnt)
- fifo->stats->common_stats.usage_max =
- fifo->stats->common_stats.usage_cnt;
-}
-
-/**
- * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle. Returned by HW.
- * @t_code: Transfer code, as per Titan User Guide,
- * Transmit Descriptor Format.
- * Returned by HW.
- *
- * Retrieve the _next_ completed descriptor.
- * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
- * driver of new completed descriptors. After that
- * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
- * completions (the very first completion is passed by HW via
- * vxge_hw_channel_callback_f).
- *
- * Implementation-wise, the driver is free to call
- * vxge_hw_fifo_txdl_next_completed either immediately from inside the
- * channel callback, or in a deferred fashion and separate (from HW)
- * context.
- *
- * Non-zero @t_code means failure to process the descriptor.
- * The failure could happen, for instance, when the link is
- * down, in which case Titan completes the descriptor because it
- * is not able to send the data out.
- *
- * For details please refer to Titan User Guide.
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
- * are currently available for processing.
- *
- */
-enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
- struct __vxge_hw_fifo *fifo, void **txdlh,
- enum vxge_hw_fifo_tcode *t_code)
-{
- struct __vxge_hw_channel *channel;
- struct vxge_hw_fifo_txd *txdp;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- channel = &fifo->channel;
-
- vxge_hw_channel_dtr_try_complete(channel, txdlh);
-
- txdp = *txdlh;
- if (txdp == NULL) {
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
- goto exit;
- }
-
- /* check whether host owns it */
- if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
-
- vxge_assert(txdp->host_control != 0);
-
- vxge_hw_channel_dtr_complete(channel);
-
- *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
-
- if (fifo->stats->common_stats.usage_cnt > 0)
- fifo->stats->common_stats.usage_cnt--;
-
- status = VXGE_HW_OK;
- goto exit;
- }
-
- /* no more completions */
- *txdlh = NULL;
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_fifo_handle_tcode - Handle transfer code.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- * @t_code: One of the enumerated (and documented in the Titan user guide)
- * "transfer codes".
- *
- * Handle descriptor's transfer code. The latter comes with each completed
- * descriptor.
- *
- * Returns: one of the enum vxge_hw_status{} enumerated types.
- * VXGE_HW_OK - for success.
- * VXGE_HW_ERR_CRITICAL - when encounters critical error.
- */
-enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
- status = VXGE_HW_ERR_INVALID_TCODE;
- goto exit;
- }
-
- fifo->stats->txd_t_code_err_cnt[t_code]++;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_fifo_txdl_free - Free descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- *
- * Free the reserved descriptor. This operation is "symmetrical" to
- * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
- * lifecycle.
- *
- * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
- * be:
- *
- * - reserved (vxge_hw_fifo_txdl_reserve);
- *
- * - posted (vxge_hw_fifo_txdl_post);
- *
- * - completed (vxge_hw_fifo_txdl_next_completed);
- *
- * - and recycled again (vxge_hw_fifo_txdl_free).
- *
- * For alternative state transitions and more details please refer to
- * the design doc.
- *
- */
-void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
-
- vxge_hw_channel_dtr_free(channel, txdlh);
-}
-
-/**
- * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- * @duplicate_mode: Duplicate MAC address add mode. Please see
- * enum vxge_hw_vpath_mac_addr_add_mode{}
- *
- * Adds the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_add(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask,
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- for (i = 0; i < ETH_ALEN; i++) {
- data1 <<= 8;
- data1 |= (u8)macaddr[i];
-
- data2 <<= 8;
- data2 |= (u8)macaddr_mask[i];
- }
-
- switch (duplicate_mode) {
- case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
- i = 0;
- break;
- case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
- i = 1;
- break;
- case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
- i = 2;
- break;
- default:
- i = 0;
- break;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_get - Get the first mac address entry
- * @vp: Vpath handle.
- * @macaddr: First MAC address entry for this vpath in the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Get the first mac address entry for this vpath from MAC address table.
- * Return: the first mac address and mac address mask in the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data1, &data2);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry
- * @vp: Vpath handle.
- * @macaddr: Next MAC address entry for this vpath in the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Get the next mac address entry for this vpath from MAC address table.
- * Return: the next mac address and mac address mask in the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get_next(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data1, &data2);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
-
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Delete the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- for (i = 0; i < ETH_ALEN; i++) {
- data1 <<= 8;
- data1 |= (u8)macaddr[i];
-
- data2 <<= 8;
- data2 |= (u8)macaddr_mask[i];
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_delete
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
- * to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_add
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Enable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_disable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- /* Enable promiscuous mode for function 0 only */
- if (!(vpath->hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
- return VXGE_HW_OK;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
-
- val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_BCAST_EN |
- VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Disable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_enable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
-
- val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_bcast_enable - Enable broadcast
- * @vp: Vpath handle.
- *
- * Enable receiving broadcasts.
- */
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
- val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
- * @vp: Vpath handle.
- *
- * Enable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK on success.
- *
- */
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
- val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
- * @vp: Vpath handle.
- *
- * Disable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
- val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-enum vxge_hw_status vxge_hw_vpath_alarm_process(
- struct __vxge_hw_vpath_handle *vp,
- u32 skip_alarms)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
- * alrms
- * @vp: Virtual Path handle.
- * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
- * interrupts(Can be repeated). If fifo or ring are not enabled
- * the MSIX vector for that should be set to 0
- * @alarm_msix_id: MSIX vector for alarm.
- *
- * This API will associate a given MSIX vector numbers with the four TIM
- * interrupts and alarm interrupt.
- */
-void
-vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
- int alarm_msix_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- u32 vp_id = vp->vpath->vp_id;
-
- val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
- (vp_id * 4) + tim_msix_id[0]) |
- VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
- (vp_id * 4) + tim_msix_id[1]);
-
- writeq(val64, &vp_reg->interrupt_cfg0);
-
- writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
- (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
- &vp_reg->interrupt_cfg2);
-
- if (vpath->hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
- 0, 32), &vp_reg->one_shot_vect0_en);
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
- 0, 32), &vp_reg->one_shot_vect1_en);
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
- 0, 32), &vp_reg->one_shot_vect2_en);
- }
-}
-
-/**
- * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSIX ID
- *
- * The function masks the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
-}
-
-/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
- &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
- else
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
- &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
-}
-
-/**
- * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
- * @vp: Virtual Path handle.
- *
- * Mask Tx and Rx vpath interrupts.
- *
- * See also: vxge_hw_vpath_inta_mask_tx_rx()
- */
-void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
-{
- u64 tim_int_mask0[4] = {[0 ...3] = 0};
- u32 tim_int_mask1[4] = {[0 ...3] = 0};
- u64 val64;
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
- tim_int_mask1, vp->vpath->vp_id);
-
- val64 = readq(&hldev->common_reg->tim_int_mask0);
-
- if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
- &hldev->common_reg->tim_int_mask0);
- }
-
- val64 = readl(&hldev->common_reg->tim_int_mask1);
-
- if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
- &hldev->common_reg->tim_int_mask1);
- }
-}
-
-/**
- * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
- * @vp: Virtual Path handle.
- *
- * Unmask Tx and Rx vpath interrupts.
- *
- * See also: vxge_hw_vpath_inta_mask_tx_rx()
- */
-void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
-{
- u64 tim_int_mask0[4] = {[0 ...3] = 0};
- u32 tim_int_mask1[4] = {[0 ...3] = 0};
- u64 val64;
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
- tim_int_mask1, vp->vpath->vp_id);
-
- val64 = readq(&hldev->common_reg->tim_int_mask0);
-
- if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
- &hldev->common_reg->tim_int_mask0);
- }
-
- if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
- &hldev->common_reg->tim_int_mask1);
- }
-}
-
-/**
- * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
- * descriptors and process the same.
- * @ring: Handle to the ring object used for receive
- *
- * The function polls the Rx for the completed descriptors and calls
- * the driver via supplied completion callback.
- *
- * Returns: VXGE_HW_OK, if the polling is completed successful.
- * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
- * descriptors available which are yet to be processed.
- *
- * See also: vxge_hw_vpath_poll_rx()
- */
-enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
-{
- u8 t_code;
- enum vxge_hw_status status = VXGE_HW_OK;
- void *first_rxdh;
- int new_count = 0;
-
- ring->cmpl_cnt = 0;
-
- status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
- if (status == VXGE_HW_OK)
- ring->callback(ring, first_rxdh,
- t_code, ring->channel.userdata);
-
- if (ring->cmpl_cnt != 0) {
- ring->doorbell_cnt += ring->cmpl_cnt;
- if (ring->doorbell_cnt >= ring->rxds_limit) {
- /*
- * Each RxD is of 4 qwords, update the number of
- * qwords replenished
- */
- new_count = (ring->doorbell_cnt * 4);
-
- /* For each block add 4 more qwords */
- ring->total_db_cnt += ring->doorbell_cnt;
- if (ring->total_db_cnt >= ring->rxds_per_block) {
- new_count += 4;
- /* Reset total count */
- ring->total_db_cnt %= ring->rxds_per_block;
- }
- writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
- &ring->vp_reg->prc_rxd_doorbell);
- readl(&ring->common_reg->titan_general_int_status);
- ring->doorbell_cnt = 0;
- }
- }
-
- return status;
-}
-
-/**
- * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
- * @fifo: Handle to the fifo object used for non offload send
- * @skb_ptr: pointer to skb
- * @nr_skb: number of skbs
- * @more: more is coming
- *
- * The function polls the Tx for the completed descriptors and calls
- * the driver via supplied completion callback.
- *
- * Returns: VXGE_HW_OK, if the polling is completed successful.
- * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
- * descriptors available which are yet to be processed.
- */
-enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
- struct sk_buff ***skb_ptr, int nr_skb,
- int *more)
-{
- enum vxge_hw_fifo_tcode t_code;
- void *first_txdlh;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
-
- status = vxge_hw_fifo_txdl_next_completed(fifo,
- &first_txdlh, &t_code);
- if (status == VXGE_HW_OK)
- if (fifo->callback(fifo, first_txdlh, t_code,
- channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
- status = VXGE_HW_COMPLETIONS_REMAIN;
-
- return status;
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
deleted file mode 100644
index ba6f833bb059..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
+++ /dev/null
@@ -1,2290 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_TRAFFIC_H
-#define VXGE_TRAFFIC_H
-
-#include "vxge-reg.h"
-#include "vxge-version.h"
-
-#define VXGE_HW_DTR_MAX_T_CODE 16
-#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_MAX_VIRTUAL_PATHS 17
-
-#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
-
-#define VXGE_HW_DEFAULT_32 0xffffffff
-/* frames sizes */
-#define VXGE_HW_HEADER_802_2_SIZE 3
-#define VXGE_HW_HEADER_SNAP_SIZE 5
-#define VXGE_HW_HEADER_VLAN_SIZE 4
-#define VXGE_HW_MAC_HEADER_MAX_SIZE \
- (ETH_HLEN + \
- VXGE_HW_HEADER_802_2_SIZE + \
- VXGE_HW_HEADER_VLAN_SIZE + \
- VXGE_HW_HEADER_SNAP_SIZE)
-
-/* 32bit alignments */
-#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
-#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
-#define VXGE_HW_HEADER_802_2_ALIGN 3
-#define VXGE_HW_HEADER_SNAP_ALIGN 1
-
-#define VXGE_HW_L3_CKSUM_OK 0xFFFF
-#define VXGE_HW_L4_CKSUM_OK 0xFFFF
-
-/* Forward declarations */
-struct __vxge_hw_device;
-struct __vxge_hw_vpath_handle;
-struct vxge_hw_vp_config;
-struct __vxge_hw_virtualpath;
-struct __vxge_hw_channel;
-struct __vxge_hw_fifo;
-struct __vxge_hw_ring;
-struct vxge_hw_ring_attr;
-struct vxge_hw_mempool;
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-/*VXGE_HW_STATUS_H*/
-
-#define VXGE_HW_EVENT_BASE 0
-#define VXGE_LL_EVENT_BASE 100
-
-/**
- * enum vxge_hw_event- Enumerates slow-path HW events.
- * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
- * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
- * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
- * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
- * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
- * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
- * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
- * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
- * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
- * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
- * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
- * slot-freeze from the rest critical events (e.g. ECC) when it is
- * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
- *
- * enum vxge_hw_event enumerates slow-path HW eventis.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
- * vxge_uld_link_down_f{}.
- */
-enum vxge_hw_event {
- VXGE_HW_EVENT_UNKNOWN = 0,
- /* HW events */
- VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
- VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
- VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
- VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
- VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
- VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
- VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
- VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
- VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
- VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
- VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
- VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
- VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
- VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
-};
-
-#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
-
-/*
- * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
- caller.
- */
-struct vxge_hw_mempool_dma {
- dma_addr_t addr;
- struct pci_dev *handle;
- struct pci_dev *acc_handle;
-};
-
-/*
- * vxge_hw_mempool_item_f - Mempool item alloc/free callback
- * @mempoolh: Memory pool handle.
- * @memblock: Address of memory block
- * @memblock_index: Index of memory block
- * @item: Item that gets allocated or freed.
- * @index: Item's index in the memory pool.
- * @is_last: True, if this item is the last one in the pool; false - otherwise.
- * userdata: Per-pool user context.
- *
- * Memory pool allocation/deallocation callback.
- */
-
-/*
- * struct vxge_hw_mempool - Memory pool.
- */
-struct vxge_hw_mempool {
-
- void (*item_func_alloc)(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index,
- u32 is_last);
-
- void *userdata;
- void **memblocks_arr;
- void **memblocks_priv_arr;
- struct vxge_hw_mempool_dma *memblocks_dma_arr;
- struct __vxge_hw_device *devh;
- u32 memblock_size;
- u32 memblocks_max;
- u32 memblocks_allocated;
- u32 item_size;
- u32 items_max;
- u32 items_initial;
- u32 items_current;
- u32 items_per_memblock;
- void **items_arr;
- u32 items_priv_size;
-};
-
-#define VXGE_HW_MAX_INTR_PER_VP 4
-#define VXGE_HW_VPATH_INTR_TX 0
-#define VXGE_HW_VPATH_INTR_RX 1
-#define VXGE_HW_VPATH_INTR_EINTA 2
-#define VXGE_HW_VPATH_INTR_BMAP 3
-
-#define VXGE_HW_BLOCK_SIZE 4096
-
-/**
- * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
- * @intr_enable: Set to 1, if interrupt is enabled.
- * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
- * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
- * asserted, other interrupt-generating entities will cancel the
- * scheduled timer interrupt.
- * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
- * When asserted, an interrupt will be generated every time the
- * boundary timer expires, even if no traffic has been transmitted
- * on this interrupt.
- * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
- * (Re-) Interrupt Enable: When asserted, an interrupt will be
- * generated the next time the timer expires, even if no traffic has
- * been transmitted on this interrupt. (This will only happen once
- * each time that this value is written to the TIM.) This bit is
- * cleared by H/W at the end of the current-timer-interval when
- * the interrupt is triggered.
- * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
- * @util_sel: Utilization Selector. Selects which of the workload approximations
- * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
- * specified utilization etc.), selects one of
- * the 17 host configured values.
- * 0-Virtual Path 0
- * 1-Virtual Path 1
- * ...
- * 16-Virtual Path 17
- * 17-Legacy Tx network utilization, provided by TPA
- * 18-Legacy Rx network utilization, provided by FAU
- * 19-Average of legacy Rx and Tx utilization calculated from link
- * utilization values.
- * 20-31-Invalid configurations
- * 32-Host utilization for Virtual Path 0
- * 33-Host utilization for Virtual Path 1
- * ...
- * 48-Host utilization for Virtual Path 17
- * 49-Legacy Tx network utilization, provided by TPA
- * 50-Legacy Rx network utilization, provided by FAU
- * 51-Average of legacy Rx and Tx utilization calculated from
- * link utilization values.
- * 52-63-Invalid configurations
- * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
- * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
- * to 1 enables counting of TxD0 returns (signalled by PCC's),
- * towards utilization event count values.
- * @urange_a: Defines the upper limit (in percent) for this utilization range
- * to be active. This range is considered active
- * if 0 = UTIL = URNG_A
- * and the UEC_A field (below) is non-zero.
- * @uec_a: Utilization Event Count A. If this range is active, the adapter will
- * wait until UEC_A events have occurred on the interrupt before
- * generating an interrupt.
- * @urange_b: Link utilization range B.
- * @uec_b: Utilization Event Count B.
- * @urange_c: Link utilization range C.
- * @uec_c: Utilization Event Count C.
- * @urange_d: Link utilization range D.
- * @uec_d: Utilization Event Count D.
- * Traffic Interrupt Controller Module interrupt configuration.
- */
-struct vxge_hw_tim_intr_config {
-
- u32 intr_enable;
-#define VXGE_HW_TIM_INTR_ENABLE 1
-#define VXGE_HW_TIM_INTR_DISABLE 0
-#define VXGE_HW_TIM_INTR_DEFAULT 0
-
- u32 btimer_val;
-#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
-#define VXGE_HW_USE_FLASH_DEFAULT (~0)
-
- u32 timer_ac_en;
-#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
-#define VXGE_HW_TIM_TIMER_AC_DISABLE 0
-
- u32 timer_ci_en;
-#define VXGE_HW_TIM_TIMER_CI_ENABLE 1
-#define VXGE_HW_TIM_TIMER_CI_DISABLE 0
-
- u32 timer_ri_en;
-#define VXGE_HW_TIM_TIMER_RI_ENABLE 1
-#define VXGE_HW_TIM_TIMER_RI_DISABLE 0
-
- u32 rtimer_val;
-#define VXGE_HW_MIN_TIM_RTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
-
- u32 util_sel;
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
-#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
-
- u32 ltimer_val;
-#define VXGE_HW_MIN_TIM_LTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
-
- /* Line utilization interrupts */
- u32 urange_a;
-#define VXGE_HW_MIN_TIM_URANGE_A 0
-#define VXGE_HW_MAX_TIM_URANGE_A 100
-
- u32 uec_a;
-#define VXGE_HW_MIN_TIM_UEC_A 0
-#define VXGE_HW_MAX_TIM_UEC_A 65535
-
- u32 urange_b;
-#define VXGE_HW_MIN_TIM_URANGE_B 0
-#define VXGE_HW_MAX_TIM_URANGE_B 100
-
- u32 uec_b;
-#define VXGE_HW_MIN_TIM_UEC_B 0
-#define VXGE_HW_MAX_TIM_UEC_B 65535
-
- u32 urange_c;
-#define VXGE_HW_MIN_TIM_URANGE_C 0
-#define VXGE_HW_MAX_TIM_URANGE_C 100
-
- u32 uec_c;
-#define VXGE_HW_MIN_TIM_UEC_C 0
-#define VXGE_HW_MAX_TIM_UEC_C 65535
-
- u32 uec_d;
-#define VXGE_HW_MIN_TIM_UEC_D 0
-#define VXGE_HW_MAX_TIM_UEC_D 65535
-};
-
-#define VXGE_HW_STATS_OP_READ 0
-#define VXGE_HW_STATS_OP_CLEAR_STAT 1
-#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
-#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
-#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
-
-#define VXGE_HW_STATS_LOC_AGGR 17
-#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
-
-#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
-#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
-
-#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
- vxge_bVALn(bits, 32, 32)
-
-/**
- * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
- *
- * @tx_frms: Count of data frames transmitted on this Aggregator on all
- * its Aggregation ports. Does not include LACPDUs or Marker PDUs.
- * However, does include frames discarded by the Distribution
- * function.
- * @tx_data_octets: Count of data and padding octets of frames transmitted
- * on this Aggregator on all its Aggregation ports. Does not include
- * octets of LACPDUs or Marker PDUs. However, does include octets of
- * frames discarded by the Distribution function.
- * @tx_mcast_frms: Count of data frames transmitted (to a group destination
- * address other than the broadcast address) on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. However, does include frames discarded by the Distribution
- * function.
- * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
- * on all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. However, does include frames discarded by the Distribution
- * function.
- * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
- * that are discarded by the Distribution function. This occurs when
- * conversation are allocated to different ports and have to be
- * flushed on old ports
- * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
- * experience transmission errors on its Aggregation ports.
- * @rx_frms: Count of data frames received on this Aggregator on all its
- * Aggregation ports. Does not include LACPDUs or Marker PDUs.
- * Also, does not include frames discarded by the Collection
- * function.
- * @rx_data_octets: Count of data and padding octets of frames received on this
- * Aggregator on all its Aggregation ports. Does not include octets
- * of LACPDUs or Marker PDUs. Also, does not include
- * octets of frames
- * discarded by the Collection function.
- * @rx_mcast_frms: Count of data frames received (from a group destination
- * address other than the broadcast address) on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. Also, does not include frames discarded by the Collection
- * function.
- * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. Also, does not include frames discarded by the Collection
- * function.
- * @rx_discarded_frms: Count of data frames received on this Aggregator that are
- * discarded by the Collection function because the Collection
- * function was disabled on the port which the frames are received.
- * @rx_errored_frms: Count of data frames received on this Aggregator that are
- * discarded by its Aggregation ports, or are discarded by the
- * Collection function of the Aggregator, or that are discarded by
- * the Aggregator due to detection of an illegal Slow Protocols PDU.
- * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
- * that are discarded by its Aggregation ports due to detection of
- * an unknown Slow Protocols PDU.
- *
- * Per aggregator XMAC RX statistics.
- */
-struct vxge_hw_xmac_aggr_stats {
-/*0x000*/ u64 tx_frms;
-/*0x008*/ u64 tx_data_octets;
-/*0x010*/ u64 tx_mcast_frms;
-/*0x018*/ u64 tx_bcast_frms;
-/*0x020*/ u64 tx_discarded_frms;
-/*0x028*/ u64 tx_errored_frms;
-/*0x030*/ u64 rx_frms;
-/*0x038*/ u64 rx_data_octets;
-/*0x040*/ u64 rx_mcast_frms;
-/*0x048*/ u64 rx_bcast_frms;
-/*0x050*/ u64 rx_discarded_frms;
-/*0x058*/ u64 rx_errored_frms;
-/*0x060*/ u64 rx_unknown_slow_proto_frms;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
- *
- * @tx_ttl_frms: Count of successfully transmitted MAC frames
- * @tx_ttl_octets: Count of total octets of transmitted frames, not including
- * framing characters (i.e. less framing bits). To determine the
- * total octets of transmitted frames, including framing characters,
- * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
- * otherwise configured, this stat only counts frames that have
- * 8 bytes of preamble for each frame). This stat can be configured
- * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
- * including the preamble octets.
- * @tx_data_octets: Count of data and padding octets of successfully transmitted
- * frames.
- * @tx_mcast_frms: Count of successfully transmitted frames to a group address
- * other than the broadcast address.
- * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
- * group address.
- * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
- * Includes discarded frames that are not sent to the network.
- * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
- * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
- * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
- * are passed to the network.
- * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
- * due to problems within ICMP.
- * @tx_tcp: Count of transmitted TCP segments. Does not include segments
- * containing retransmitted octets.
- * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
- * @tx_udp: Count of transmitted UDP datagrams.
- * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
- * generally occurs when a packet is corrupt somehow, including
- * packets that have IP version mismatches, invalid Layer 2 control
- * fields, etc. L3/L4 checksums are not offloaded, but the packet
- * is still be transmitted.
- * @tx_unknown_protocol: Increments when the TPA encounters an unknown
- * protocol, such as a new IPv6 extension header, or an unsupported
- * Routing Type. The packet still has a checksum calculated but it
- * may be incorrect.
- * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
- * Since, the only control frames supported by this device are
- * PAUSE frames, this register is a count of all transmitted MAC
- * control frames.
- * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
- * on this Aggregation port.
- * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
- * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
- * the network. Increments because of:
- * 1) An internal processing error
- * (such as an uncorrectable ECC error). 2) A frame parsing error
- * during IP checksum calculation.
- * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
- * Aggregation port.
- * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
- * characters that match a pattern that is programmable through
- * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
- * is set to /T/ (i.e. the terminate character), thus the statistic
- * tracks the number of transmitted Terminate characters.
- * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
- * characters that match a pattern that is programmable through
- * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
- * is set to /S/ (i.e. the start character),
- * thus the statistic tracks
- * the number of transmitted Start characters.
- * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
- * columns that match a pattern that is programmable through register
- * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns transmitted at
- * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
- * set to 1, then this stat increments when COLUMN2 is found within
- * 'n' clocks after COLUMN1. Here, 'n' is defined by
- * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
- * to 0, then it means to search anywhere for COLUMN2).
- * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
- * columns that match a pattern that is programmable through register
- * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
- * to 4 x /I/ (i.e. a column containing all idle characters),
- * thus the statistic tracks the number of transmitted Idle columns.
- * @tx_any_err_frms: Count of transmitted frames containing any error that
- * prevents them from being passed to the network. Increments if
- * there is an ECC while reading the frame out of the transmit
- * buffer. Also increments if the transmit protocol assist (TPA)
- * block determines that the frame should not be sent.
- * @tx_drop_frms: Count of frames that could not be sent for no other reason
- * than internal MAC processing. Increments once whenever the
- * transmit buffer is flushed (due to an ECC error on a memory
- * descriptor).
- * @rx_ttl_frms: Count of total received MAC frames, including frames received
- * with frame-too-long, FCS, or length errors. This stat can be
- * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
- * everything, even "frames" as small one byte of preamble.
- * @rx_vld_frms: Count of successfully received MAC frames. Does not include
- * frames received with frame-too-long, FCS, or length errors.
- * @rx_offload_frms: Count of offloaded received frames that are passed to
- * the host.
- * @rx_ttl_octets: Count of total octets of received frames, not including
- * framing characters (i.e. less framing bits). To determine the
- * total octets of received frames, including framing characters,
- * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
- * otherwise configured, this stat only counts frames that have 8
- * bytes of preamble for each frame). This stat can be configured
- * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
- * even the preamble octets of "frames" as small one byte of preamble
- * @rx_data_octets: Count of data and padding octets of successfully received
- * frames. Does not include frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_offload_octets: Count of total octets, not including framing
- * characters, of offloaded received frames that are passed
- * to the host.
- * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
- * nonbroadcast group address. Does not include frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
- * the broadcast group address. Does not include frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_accepted_ucast_frms: Count of successfully received frames containing
- * a unicast address. Only includes frames that are passed to
- * the system.
- * @rx_accepted_nucast_frms: Count of successfully received frames containing
- * a non-unicast (broadcast or multicast) address. Only includes
- * frames that are passed to the system. Could include, for instance,
- * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
- * register is set to pass FCS-errored frames to the host.
- * @rx_tagged_frms: Count of received frames containing a VLAN tag.
- * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
- * + 18 bytes (+ 22 bytes if VLAN-tagged).
- * @rx_usized_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets, that are otherwise well-formed.
- * In other words, counts runts.
- * @rx_osized_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets, that are otherwise
- * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
- * is set to 1, then "more than 1518 octets" becomes "more than 1518
- * (1522 if VLAN-tagged) octets".
- * @rx_frag_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets that had bad FCS. In other
- * words, counts fragments.
- * @rx_jabber_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets that had bad FCS. In other
- * words, counts jabbers. Note: If register
- * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
- * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
- * octets".
- * @rx_ttl_64_frms: Count of total received MAC frames with length (including
- * FCS, but not framing bits) of exactly 64 octets. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ttl_65_127_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 65 and 127
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_128_255_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 128 and 255
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_256_511_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 256 and 511
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 512 and 1023
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1024 and 1518
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1519 and 4095
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 4096 and 8191
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 8192 and
- * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) exceeding
- * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
- * Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
- * @rx_accepted_ip: Count of received IP datagrams that
- * are passed to the system.
- * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
- * errored IP datagrams.
- * @rx_err_ip: Count of received IP datagrams containing errors. For example,
- * bad IP checksum.
- * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
- * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
- * Note: This stat contains a count of all received TCP segments,
- * regardless of whether or not they pertain to an established
- * connection.
- * @rx_udp: Count of received UDP datagrams.
- * @rx_err_tcp: Count of received TCP segments containing errors. For example,
- * bad TCP checksum.
- * @rx_pause_count: Count of number of pause quanta that the MAC has been in
- * the paused state. Recall, one pause quantum equates to 512
- * bit times.
- * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
- * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
- * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
- * this register is a count of all received MAC control frames.
- * Note: This stat may be configured to count all layer 2 errors
- * (i.e. length errors and FCS errors).
- * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
- * not include frames received with frame-too-long or
- * frame-too-short error.
- * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
- * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
- * for VLAN-tagged frames), inclusive, that does not match the
- * number of data octets (including pad) received. Also contains
- * a count of received frames with a length/type field less than
- * 46 (42 for VLAN-tagged frames) and the number of data octets
- * (including pad) received is greater than 46 (42 for VLAN-tagged
- * frames).
- * @rx_out_rng_len_err_frms: Count of received frames with length/type field
- * between 1501 and 1535 decimal, inclusive.
- * @rx_drop_frms: Count of received frames that could not be passed to the host.
- * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
- * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
- * for a list of reasons. Because the RMAC drops one frame at a time,
- * this stat also indicates the number of drop events.
- * @rx_discarded_frms: Count of received frames containing
- * any error that prevents
- * them from being passed to the system. See PORTn_RX_FCS_DISCARD,
- * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
- * reasons.
- * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
- * host. See PORTn_RX_DROP_FRMS for a list of reasons.
- * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
- * host. See PORTn_RX_DROP_FRMS for a list of reasons.
- * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
- * port.
- * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
- * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
- * that carry the Slow Protocols EtherType, but contain an unknown
- * PDU. Or frames that contain the Slow Protocols group MAC address,
- * but do not carry the Slow Protocols EtherType.
- * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
- * this Aggregation port.
- * @rx_fcs_discard: Count of received frames that are discarded because the
- * FCS check failed.
- * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
- * that carry the Slow Protocols EtherType, but contain a badly
- * formed PDU. Or frames that carry the Slow Protocols EtherType,
- * but contain an illegal value of Protocol Subtype.
- * @rx_switch_discard: Count of received frames that are discarded by the
- * internal switch because they did not have an entry in the
- * Filtering Database. This includes frames that had an invalid
- * destination MAC address or VLAN ID. It also includes frames are
- * discarded because they did not satisfy the length requirements
- * of the target VPATH.
- * @rx_len_discard: Count of received frames that are discarded because of an
- * invalid frame length (includes fragments, oversized frames and
- * mismatch between frame length and length/type field). This stat
- * can be configured
- * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
- * @rx_rpa_discard: Count of received frames that were discarded because the
- * receive protocol assist (RPA) discovered and error in the frame
- * or was unable to parse the frame.
- * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
- * Link Aggregation Control Protocol (LACP) frames, etc.) that are
- * discarded.
- * @rx_rts_discard: Count of received frames that are discarded by the receive
- * traffic steering (RTS) logic. Includes those frame discarded
- * because the SSC response contradicted the switch table, because
- * the SSC timed out, or because the target queue could not fit the
- * frame.
- * @rx_trash_discard: Count of received frames that are discarded because
- * receive traffic steering (RTS) steered the frame to the trash
- * queue.
- * @rx_buff_full_discard: Count of received frames that are discarded because
- * internal buffers are full. Includes frames discarded because the
- * RTS logic is waiting for an SSC lookup that has no timeout bound.
- * Also, includes frames that are dropped because the MAC2FAU buffer
- * is nearly full -- this can happen if the external receive buffer
- * is full and the receive path is backing up.
- * @rx_red_discard: Count of received frames that are discarded because of RED
- * (Random Early Discard).
- * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
- * characters occurring between times of normal data transmission
- * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
- * incremented when either -
- * 1) The Reconciliation Sublayer (RS) is expecting one control
- * character and gets another (i.e. is expecting a Start
- * character, but gets another control character).
- * 2) Start control character is not in lane 0
- * Only increments the count by one for each XGMII column.
- * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
- * during normal data transmission. If the Reconciliation Sublayer
- * (RS) receives a control character, other than a terminate control
- * character, during receipt of data octets then this register is
- * incremented. Also increments if the start frame delimiter is not
- * found in the correct location. Only increments the count by one
- * for each XGMII column.
- * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
- * to /E/ (i.e. the error character), thus the statistic tracks the
- * number of Error characters received at any time.
- * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
- * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
- * Only includes symbol errors that are observed between the XGMII
- * Start Frame Delimiter and End Frame Delimiter, inclusive. And
- * only increments the count by one for each frame.
- * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns received at any
- * time.
- * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
- * to /E/ (i.e. the error character), thus the statistic tracks the
- * number of Error characters received at any time.
- * @rx_local_fault: Maintains a count of the number of times that link
- * transitioned from "up" to "down" due to a local fault.
- * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns received at any
- * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
- * to 1, then this stat increments when COLUMN2 is found within 'n'
- * clocks after COLUMN1. Here, 'n' is defined by
- * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
- * 0, then it means to search anywhere for COLUMN2).
- * @rx_jettison: Count of received frames that are jettisoned because internal
- * buffers are full.
- * @rx_remote_fault: Maintains a count of the number of times that link
- * transitioned from "up" to "down" due to a remote fault.
- *
- * XMAC Port Statistics.
- */
-struct vxge_hw_xmac_port_stats {
-/*0x000*/ u64 tx_ttl_frms;
-/*0x008*/ u64 tx_ttl_octets;
-/*0x010*/ u64 tx_data_octets;
-/*0x018*/ u64 tx_mcast_frms;
-/*0x020*/ u64 tx_bcast_frms;
-/*0x028*/ u64 tx_ucast_frms;
-/*0x030*/ u64 tx_tagged_frms;
-/*0x038*/ u64 tx_vld_ip;
-/*0x040*/ u64 tx_vld_ip_octets;
-/*0x048*/ u64 tx_icmp;
-/*0x050*/ u64 tx_tcp;
-/*0x058*/ u64 tx_rst_tcp;
-/*0x060*/ u64 tx_udp;
-/*0x068*/ u32 tx_parse_error;
-/*0x06c*/ u32 tx_unknown_protocol;
-/*0x070*/ u64 tx_pause_ctrl_frms;
-/*0x078*/ u32 tx_marker_pdu_frms;
-/*0x07c*/ u32 tx_lacpdu_frms;
-/*0x080*/ u32 tx_drop_ip;
-/*0x084*/ u32 tx_marker_resp_pdu_frms;
-/*0x088*/ u32 tx_xgmii_char2_match;
-/*0x08c*/ u32 tx_xgmii_char1_match;
-/*0x090*/ u32 tx_xgmii_column2_match;
-/*0x094*/ u32 tx_xgmii_column1_match;
-/*0x098*/ u32 unused1;
-/*0x09c*/ u16 tx_any_err_frms;
-/*0x09e*/ u16 tx_drop_frms;
-/*0x0a0*/ u64 rx_ttl_frms;
-/*0x0a8*/ u64 rx_vld_frms;
-/*0x0b0*/ u64 rx_offload_frms;
-/*0x0b8*/ u64 rx_ttl_octets;
-/*0x0c0*/ u64 rx_data_octets;
-/*0x0c8*/ u64 rx_offload_octets;
-/*0x0d0*/ u64 rx_vld_mcast_frms;
-/*0x0d8*/ u64 rx_vld_bcast_frms;
-/*0x0e0*/ u64 rx_accepted_ucast_frms;
-/*0x0e8*/ u64 rx_accepted_nucast_frms;
-/*0x0f0*/ u64 rx_tagged_frms;
-/*0x0f8*/ u64 rx_long_frms;
-/*0x100*/ u64 rx_usized_frms;
-/*0x108*/ u64 rx_osized_frms;
-/*0x110*/ u64 rx_frag_frms;
-/*0x118*/ u64 rx_jabber_frms;
-/*0x120*/ u64 rx_ttl_64_frms;
-/*0x128*/ u64 rx_ttl_65_127_frms;
-/*0x130*/ u64 rx_ttl_128_255_frms;
-/*0x138*/ u64 rx_ttl_256_511_frms;
-/*0x140*/ u64 rx_ttl_512_1023_frms;
-/*0x148*/ u64 rx_ttl_1024_1518_frms;
-/*0x150*/ u64 rx_ttl_1519_4095_frms;
-/*0x158*/ u64 rx_ttl_4096_8191_frms;
-/*0x160*/ u64 rx_ttl_8192_max_frms;
-/*0x168*/ u64 rx_ttl_gt_max_frms;
-/*0x170*/ u64 rx_ip;
-/*0x178*/ u64 rx_accepted_ip;
-/*0x180*/ u64 rx_ip_octets;
-/*0x188*/ u64 rx_err_ip;
-/*0x190*/ u64 rx_icmp;
-/*0x198*/ u64 rx_tcp;
-/*0x1a0*/ u64 rx_udp;
-/*0x1a8*/ u64 rx_err_tcp;
-/*0x1b0*/ u64 rx_pause_count;
-/*0x1b8*/ u64 rx_pause_ctrl_frms;
-/*0x1c0*/ u64 rx_unsup_ctrl_frms;
-/*0x1c8*/ u64 rx_fcs_err_frms;
-/*0x1d0*/ u64 rx_in_rng_len_err_frms;
-/*0x1d8*/ u64 rx_out_rng_len_err_frms;
-/*0x1e0*/ u64 rx_drop_frms;
-/*0x1e8*/ u64 rx_discarded_frms;
-/*0x1f0*/ u64 rx_drop_ip;
-/*0x1f8*/ u64 rx_drop_udp;
-/*0x200*/ u32 rx_marker_pdu_frms;
-/*0x204*/ u32 rx_lacpdu_frms;
-/*0x208*/ u32 rx_unknown_pdu_frms;
-/*0x20c*/ u32 rx_marker_resp_pdu_frms;
-/*0x210*/ u32 rx_fcs_discard;
-/*0x214*/ u32 rx_illegal_pdu_frms;
-/*0x218*/ u32 rx_switch_discard;
-/*0x21c*/ u32 rx_len_discard;
-/*0x220*/ u32 rx_rpa_discard;
-/*0x224*/ u32 rx_l2_mgmt_discard;
-/*0x228*/ u32 rx_rts_discard;
-/*0x22c*/ u32 rx_trash_discard;
-/*0x230*/ u32 rx_buff_full_discard;
-/*0x234*/ u32 rx_red_discard;
-/*0x238*/ u32 rx_xgmii_ctrl_err_cnt;
-/*0x23c*/ u32 rx_xgmii_data_err_cnt;
-/*0x240*/ u32 rx_xgmii_char1_match;
-/*0x244*/ u32 rx_xgmii_err_sym;
-/*0x248*/ u32 rx_xgmii_column1_match;
-/*0x24c*/ u32 rx_xgmii_char2_match;
-/*0x250*/ u32 rx_local_fault;
-/*0x254*/ u32 rx_xgmii_column2_match;
-/*0x258*/ u32 rx_jettison;
-/*0x25c*/ u32 rx_remote_fault;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
- *
- * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
- * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
- * not including framing characters (i.e. less framing bits).
- * To determine the total octets of transmitted frames, including
- * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
- * this stat (the device always prepends 8 bytes of preamble for
- * each frame)
- * @tx_data_octets: Count of data and padding octets of successfully transmitted
- * frames.
- * @tx_mcast_frms: Count of successfully transmitted frames to a group address
- * other than the broadcast address.
- * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
- * group address.
- * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
- * Includes discarded frames that are not sent to the network.
- * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
- * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
- * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
- * are passed to the network.
- * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
- * to problems within ICMP.
- * @tx_tcp: Count of transmitted TCP segments. Does not include segments
- * containing retransmitted octets.
- * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
- * @tx_udp: Count of transmitted UDP datagrams.
- * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
- * such as a new IPv6 extension header, or an unsupported Routing
- * Type. The packet still has a checksum calculated but it may be
- * incorrect.
- * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
- * to the network. Increments because of: 1) An internal processing
- * error (such as an uncorrectable ECC error). 2) A frame parsing
- * error during IP checksum calculation.
- * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
- * generally occurs when a packet is corrupt somehow, including
- * packets that have IP version mismatches, invalid Layer 2 control
- * fields, etc. L3/L4 checksums are not offloaded, but the packet
- * is still be transmitted.
- * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
- * of transmitted TCP segments. Does not include segments containing
- * retransmitted octets.
- * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
- * total number of segments retransmitted. Retransmitted segments
- * that are sourced by the host are counted by the host.
- * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
- * of transmitted IP datagrams that could not be passed to the
- * network.
- *
- * XMAC Vpath TX Statistics.
- */
-struct vxge_hw_xmac_vpath_tx_stats {
- u64 tx_ttl_eth_frms;
- u64 tx_ttl_eth_octets;
- u64 tx_data_octets;
- u64 tx_mcast_frms;
- u64 tx_bcast_frms;
- u64 tx_ucast_frms;
- u64 tx_tagged_frms;
- u64 tx_vld_ip;
- u64 tx_vld_ip_octets;
- u64 tx_icmp;
- u64 tx_tcp;
- u64 tx_rst_tcp;
- u64 tx_udp;
- u32 tx_unknown_protocol;
- u32 tx_lost_ip;
- u32 unused1;
- u32 tx_parse_error;
- u64 tx_tcp_offload;
- u64 tx_retx_tcp_offload;
- u64 tx_lost_ip_offload;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
- *
- * @rx_ttl_eth_frms: Count of successfully received MAC frames.
- * @rx_vld_frms: Count of successfully received MAC frames. Does not include
- * frames received with frame-too-long, FCS, or length errors.
- * @rx_offload_frms: Count of offloaded received frames that are passed to
- * the host.
- * @rx_ttl_eth_octets: Count of total octets of received frames, not including
- * framing characters (i.e. less framing bits). Only counts octets
- * of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
- * before FCS. To determine the total octets of received frames,
- * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
- * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
- * that have the required 8 bytes of preamble).
- * @rx_data_octets: Count of data and padding octets of successfully received
- * frames. Does not include frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_offload_octets: Count of total octets, not including framing characters,
- * of offloaded received frames that are passed to the host.
- * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
- * nonbroadcast group address. Does not include frames received with
- * frame-too-long, FCS, or length errors.
- * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
- * broadcast group address. Does not include frames received with
- * frame-too-long, FCS, or length errors.
- * @rx_accepted_ucast_frms: Count of successfully received frames containing
- * a unicast address. Only includes frames that are passed to the
- * system.
- * @rx_accepted_nucast_frms: Count of successfully received frames containing
- * a non-unicast (broadcast or multicast) address. Only includes
- * frames that are passed to the system. Could include, for instance,
- * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
- * register is set to pass FCS-errored frames to the host.
- * @rx_tagged_frms: Count of received frames containing a VLAN tag.
- * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
- * + 18 bytes (+ 22 bytes if VLAN-tagged).
- * @rx_usized_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets, that are otherwise well-formed.
- * In other words, counts runts.
- * @rx_osized_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets, that are otherwise
- * well-formed.
- * @rx_frag_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets that had bad FCS.
- * In other words, counts fragments.
- * @rx_jabber_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets that had bad FCS. In other
- * words, counts jabbers.
- * @rx_ttl_64_frms: Count of total received MAC frames with length (including
- * FCS, but not framing bits) of exactly 64 octets. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ttl_65_127_frms: Count of total received MAC frames
- * with length (including
- * FCS, but not framing bits) of between 65 and 127 octets inclusive.
- * Includes frames received with frame-too-long, FCS,
- * or length errors.
- * @rx_ttl_128_255_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits)
- * of between 128 and 255 octets
- * inclusive. Includes frames received with frame-too-long, FCS,
- * or length errors.
- * @rx_ttl_256_511_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits)
- * of between 256 and 511 octets
- * inclusive. Includes frames received with frame-too-long, FCS, or
- * length errors.
- * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 512 and 1023
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1024 and 1518
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1519 and 4095
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 4096 and 8191
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 8192 and
- * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
- * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
- * @rx_accepted_ip: Count of received IP datagrams that
- * are passed to the system.
- * @rx_ip_octets: Count of number of octets in received IP datagrams.
- * Includes errored IP datagrams.
- * @rx_err_ip: Count of received IP datagrams containing errors. For example,
- * bad IP checksum.
- * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
- * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
- * Note: This stat contains a count of all received TCP segments,
- * regardless of whether or not they pertain to an established
- * connection.
- * @rx_udp: Count of received UDP datagrams.
- * @rx_err_tcp: Count of received TCP segments containing errors. For example,
- * bad TCP checksum.
- * @rx_lost_frms: Count of received frames that could not be passed to the host.
- * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
- * for a list of reasons.
- * @rx_lost_ip: Count of received IP datagrams that could not be passed to
- * the host. See RX_LOST_FRMS for a list of reasons.
- * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
- * of received IP datagrams that could not be passed to the host.
- * See RX_LOST_FRMS for a list of reasons.
- * @rx_various_discard: Count of received frames that are discarded because
- * the target receive queue is full.
- * @rx_sleep_discard: Count of received frames that are discarded because the
- * target VPATH is asleep (a Wake-on-LAN magic packet can be used
- * to awaken the VPATH).
- * @rx_red_discard: Count of received frames that are discarded because of RED
- * (Random Early Discard).
- * @rx_queue_full_discard: Count of received frames that are discarded because
- * the target receive queue is full.
- * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
- *
- * XMAC Vpath RX Statistics.
- */
-struct vxge_hw_xmac_vpath_rx_stats {
- u64 rx_ttl_eth_frms;
- u64 rx_vld_frms;
- u64 rx_offload_frms;
- u64 rx_ttl_eth_octets;
- u64 rx_data_octets;
- u64 rx_offload_octets;
- u64 rx_vld_mcast_frms;
- u64 rx_vld_bcast_frms;
- u64 rx_accepted_ucast_frms;
- u64 rx_accepted_nucast_frms;
- u64 rx_tagged_frms;
- u64 rx_long_frms;
- u64 rx_usized_frms;
- u64 rx_osized_frms;
- u64 rx_frag_frms;
- u64 rx_jabber_frms;
- u64 rx_ttl_64_frms;
- u64 rx_ttl_65_127_frms;
- u64 rx_ttl_128_255_frms;
- u64 rx_ttl_256_511_frms;
- u64 rx_ttl_512_1023_frms;
- u64 rx_ttl_1024_1518_frms;
- u64 rx_ttl_1519_4095_frms;
- u64 rx_ttl_4096_8191_frms;
- u64 rx_ttl_8192_max_frms;
- u64 rx_ttl_gt_max_frms;
- u64 rx_ip;
- u64 rx_accepted_ip;
- u64 rx_ip_octets;
- u64 rx_err_ip;
- u64 rx_icmp;
- u64 rx_tcp;
- u64 rx_udp;
- u64 rx_err_tcp;
- u64 rx_lost_frms;
- u64 rx_lost_ip;
- u64 rx_lost_ip_offload;
- u16 rx_various_discard;
- u16 rx_sleep_discard;
- u16 rx_red_discard;
- u16 rx_queue_full_discard;
- u64 rx_mpa_ok_frms;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_stats - XMAC Statistics
- *
- * @aggr_stats: Statistics on aggregate port(port 0, port 1)
- * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
- * @vpath_tx_stats: Per vpath XMAC TX stats
- * @vpath_rx_stats: Per vpath XMAC RX stats
- *
- * XMAC Statistics.
- */
-struct vxge_hw_xmac_stats {
- struct vxge_hw_xmac_aggr_stats
- aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
- struct vxge_hw_xmac_port_stats
- port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
- struct vxge_hw_xmac_vpath_tx_stats
- vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
- struct vxge_hw_xmac_vpath_rx_stats
- vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
- * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
- * for the given VPATH
- * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
- * @ini_num_cpl_rcvd: The number of PCI read completions received by the
- * PIC block
- * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
- * block to the host
- * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
- * the PIC block
- * @wrcrdtarb_xoff: TBD
- * @rdcrdtarb_xoff: TBD
- * @vpath_genstats_count0: TBD
- * @vpath_genstats_count1: TBD
- * @vpath_genstats_count2: TBD
- * @vpath_genstats_count3: TBD
- * @vpath_genstats_count4: TBD
- * @vpath_gennstats_count5: TBD
- * @tx_stats: Transmit stats
- * @rx_stats: Receive stats
- * @prog_event_vnum1: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
- * @prog_event_vnum0: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
- * @prog_event_vnum3: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
- * @prog_event_vnum2: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
- * @rx_multi_cast_frame_discard: TBD
- * @rx_frm_transferred: TBD
- * @rxd_returned: TBD
- * @rx_mpa_len_fail_frms: Count of received frames
- * that fail the MPA length check
- * @rx_mpa_mrk_fail_frms: Count of received frames
- * that fail the MPA marker check
- * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
- * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
- * frame buffer (and subsequently to the host).
- * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
- * because the VPATH is in reset
- * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
- * whenever the received frame matches the VPATH's Wake-on-LAN
- * signature(s) CRC.
- * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
- * because the VPATH is in reset. Includes frames that are discarded
- * because the current VPIN does not match that VPIN of the frame
- *
- * Titan vpath hardware statistics.
- */
-struct vxge_hw_vpath_stats_hw_info {
-/*0x000*/ u32 ini_num_mwr_sent;
-/*0x004*/ u32 unused1;
-/*0x008*/ u32 ini_num_mrd_sent;
-/*0x00c*/ u32 unused2;
-/*0x010*/ u32 ini_num_cpl_rcvd;
-/*0x014*/ u32 unused3;
-/*0x018*/ u64 ini_num_mwr_byte_sent;
-/*0x020*/ u64 ini_num_cpl_byte_rcvd;
-/*0x028*/ u32 wrcrdtarb_xoff;
-/*0x02c*/ u32 unused4;
-/*0x030*/ u32 rdcrdtarb_xoff;
-/*0x034*/ u32 unused5;
-/*0x038*/ u32 vpath_genstats_count0;
-/*0x03c*/ u32 vpath_genstats_count1;
-/*0x040*/ u32 vpath_genstats_count2;
-/*0x044*/ u32 vpath_genstats_count3;
-/*0x048*/ u32 vpath_genstats_count4;
-/*0x04c*/ u32 unused6;
-/*0x050*/ u32 vpath_genstats_count5;
-/*0x054*/ u32 unused7;
-/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats;
-/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats;
-/*0x220*/ u64 unused9;
-/*0x228*/ u32 prog_event_vnum1;
-/*0x22c*/ u32 prog_event_vnum0;
-/*0x230*/ u32 prog_event_vnum3;
-/*0x234*/ u32 prog_event_vnum2;
-/*0x238*/ u16 rx_multi_cast_frame_discard;
-/*0x23a*/ u8 unused10[6];
-/*0x240*/ u32 rx_frm_transferred;
-/*0x244*/ u32 unused11;
-/*0x248*/ u16 rxd_returned;
-/*0x24a*/ u8 unused12[6];
-/*0x252*/ u16 rx_mpa_len_fail_frms;
-/*0x254*/ u16 rx_mpa_mrk_fail_frms;
-/*0x256*/ u16 rx_mpa_crc_fail_frms;
-/*0x258*/ u16 rx_permitted_frms;
-/*0x25c*/ u64 rx_vp_reset_discarded_frms;
-/*0x25e*/ u64 rx_wol_frms;
-/*0x260*/ u64 tx_vp_reset_discarded_frms;
-} __packed;
-
-
-/**
- * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
- * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated
- * by the adapter that were discarded because the VPATH is out of service
- * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the
- * adapter that were discared because the VPATH is out of service
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by
- * the adapter that were discarded because the VPATH instance number does
- * not match
- * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated
- * by the adapter that were discarded because the VPATH instance number
- * does not match
- * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer
- * to the GENSTATS0_CFG for information on configuring this statistic
- * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer
- * to the GENSTATS1_CFG for information on configuring this statistic
- * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer
- * to the GENSTATS2_CFG for information on configuring this statistic
- * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer
- * to the GENSTATS3_CFG for information on configuring this statistic
- * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer
- * to the GENSTATS4_CFG for information on configuring this statistic
- * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer
- * to the GENSTATS5_CFG for information on configuring this statistic
- * @pci.rstdrop_cpl 0x01c8 4
- * @pci.rstdrop_msg 0x01cc 4
- * @pci.rstdrop_client1 0x01d0 4
- * @pci.rstdrop_client0 0x01d4 4
- * @pci.rstdrop_client2 0x01d8 4
- * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion
- * header credits were depleted
- * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted
- * header credits were depleted
- * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted
- * header credits were depleted
- * @pci.depl_cplh[vplane1] 0x01ea 2
- * @pci.depl_nph[vplane1] 0x01ec 2
- * @pci.depl_ph[vplane1] 0x01ee 2
- * @pci.depl_cplh[vplane2] 0x01f2 2
- * @pci.depl_nph[vplane2] 0x01f4 2
- * @pci.depl_ph[vplane2] 0x01f6 2
- * @pci.depl_cplh[vplane3] 0x01fa 2
- * @pci.depl_nph[vplane3] 0x01fc 2
- * @pci.depl_ph[vplane3] 0x01fe 2
- * @pci.depl_cplh[vplane4] 0x0202 2
- * @pci.depl_nph[vplane4] 0x0204 2
- * @pci.depl_ph[vplane4] 0x0206 2
- * @pci.depl_cplh[vplane5] 0x020a 2
- * @pci.depl_nph[vplane5] 0x020c 2
- * @pci.depl_ph[vplane5] 0x020e 2
- * @pci.depl_cplh[vplane6] 0x0212 2
- * @pci.depl_nph[vplane6] 0x0214 2
- * @pci.depl_ph[vplane6] 0x0216 2
- * @pci.depl_cplh[vplane7] 0x021a 2
- * @pci.depl_nph[vplane7] 0x021c 2
- * @pci.depl_ph[vplane7] 0x021e 2
- * @pci.depl_cplh[vplane8] 0x0222 2
- * @pci.depl_nph[vplane8] 0x0224 2
- * @pci.depl_ph[vplane8] 0x0226 2
- * @pci.depl_cplh[vplane9] 0x022a 2
- * @pci.depl_nph[vplane9] 0x022c 2
- * @pci.depl_ph[vplane9] 0x022e 2
- * @pci.depl_cplh[vplane10] 0x0232 2
- * @pci.depl_nph[vplane10] 0x0234 2
- * @pci.depl_ph[vplane10] 0x0236 2
- * @pci.depl_cplh[vplane11] 0x023a 2
- * @pci.depl_nph[vplane11] 0x023c 2
- * @pci.depl_ph[vplane11] 0x023e 2
- * @pci.depl_cplh[vplane12] 0x0242 2
- * @pci.depl_nph[vplane12] 0x0244 2
- * @pci.depl_ph[vplane12] 0x0246 2
- * @pci.depl_cplh[vplane13] 0x024a 2
- * @pci.depl_nph[vplane13] 0x024c 2
- * @pci.depl_ph[vplane13] 0x024e 2
- * @pci.depl_cplh[vplane14] 0x0252 2
- * @pci.depl_nph[vplane14] 0x0254 2
- * @pci.depl_ph[vplane14] 0x0256 2
- * @pci.depl_cplh[vplane15] 0x025a 2
- * @pci.depl_nph[vplane15] 0x025c 2
- * @pci.depl_ph[vplane15] 0x025e 2
- * @pci.depl_cplh[vplane16] 0x0262 2
- * @pci.depl_nph[vplane16] 0x0264 2
- * @pci.depl_ph[vplane16] 0x0266 2
- * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data
- * credits were depleted
- * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data
- * credits were depleted
- * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data
- * credits were depleted
- * @pci.depl_cpld[vplane1] 0x0272 2
- * @pci.depl_npd[vplane1] 0x0274 2
- * @pci.depl_pd[vplane1] 0x0276 2
- * @pci.depl_cpld[vplane2] 0x027a 2
- * @pci.depl_npd[vplane2] 0x027c 2
- * @pci.depl_pd[vplane2] 0x027e 2
- * @pci.depl_cpld[vplane3] 0x0282 2
- * @pci.depl_npd[vplane3] 0x0284 2
- * @pci.depl_pd[vplane3] 0x0286 2
- * @pci.depl_cpld[vplane4] 0x028a 2
- * @pci.depl_npd[vplane4] 0x028c 2
- * @pci.depl_pd[vplane4] 0x028e 2
- * @pci.depl_cpld[vplane5] 0x0292 2
- * @pci.depl_npd[vplane5] 0x0294 2
- * @pci.depl_pd[vplane5] 0x0296 2
- * @pci.depl_cpld[vplane6] 0x029a 2
- * @pci.depl_npd[vplane6] 0x029c 2
- * @pci.depl_pd[vplane6] 0x029e 2
- * @pci.depl_cpld[vplane7] 0x02a2 2
- * @pci.depl_npd[vplane7] 0x02a4 2
- * @pci.depl_pd[vplane7] 0x02a6 2
- * @pci.depl_cpld[vplane8] 0x02aa 2
- * @pci.depl_npd[vplane8] 0x02ac 2
- * @pci.depl_pd[vplane8] 0x02ae 2
- * @pci.depl_cpld[vplane9] 0x02b2 2
- * @pci.depl_npd[vplane9] 0x02b4 2
- * @pci.depl_pd[vplane9] 0x02b6 2
- * @pci.depl_cpld[vplane10] 0x02ba 2
- * @pci.depl_npd[vplane10] 0x02bc 2
- * @pci.depl_pd[vplane10] 0x02be 2
- * @pci.depl_cpld[vplane11] 0x02c2 2
- * @pci.depl_npd[vplane11] 0x02c4 2
- * @pci.depl_pd[vplane11] 0x02c6 2
- * @pci.depl_cpld[vplane12] 0x02ca 2
- * @pci.depl_npd[vplane12] 0x02cc 2
- * @pci.depl_pd[vplane12] 0x02ce 2
- * @pci.depl_cpld[vplane13] 0x02d2 2
- * @pci.depl_npd[vplane13] 0x02d4 2
- * @pci.depl_pd[vplane13] 0x02d6 2
- * @pci.depl_cpld[vplane14] 0x02da 2
- * @pci.depl_npd[vplane14] 0x02dc 2
- * @pci.depl_pd[vplane14] 0x02de 2
- * @pci.depl_cpld[vplane15] 0x02e2 2
- * @pci.depl_npd[vplane15] 0x02e4 2
- * @pci.depl_pd[vplane15] 0x02e6 2
- * @pci.depl_cpld[vplane16] 0x02ea 2
- * @pci.depl_npd[vplane16] 0x02ec 2
- * @pci.depl_pd[vplane16] 0x02ee 2
- * @xgmac_port[3];
- * @xgmac_aggr[2];
- * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic.
- * Increments when internal logic detects a certain event. See register
- * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
- * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic.
- * Increments when internal logic detects a certain event. See register
- * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
- * @xgmac.orp_lro_events 0x0af8 8
- * @xgmac.orp_bs_events 0x0b00 8
- * @xgmac.orp_iwarp_events 0x0b08 8
- * @xgmac.tx_permitted_frms 0x0b14 4
- * @xgmac.port2_tx_any_frms 0x0b1d 1
- * @xgmac.port1_tx_any_frms 0x0b1e 1
- * @xgmac.port0_tx_any_frms 0x0b1f 1
- * @xgmac.port2_rx_any_frms 0x0b25 1
- * @xgmac.port1_rx_any_frms 0x0b26 1
- * @xgmac.port0_rx_any_frms 0x0b27 1
- *
- * Titan mrpcim hardware statistics.
- */
-struct vxge_hw_device_stats_mrpcim_info {
-/*0x0000*/ u32 pic_ini_rd_drop;
-/*0x0004*/ u32 pic_ini_wr_drop;
-/*0x0008*/ struct {
- /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted;
- /*0x0004*/ u32 unused1;
- } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
-/*0x0090*/ struct {
- /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted;
- /*0x0004*/ u32 unused2;
- } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
-/*0x0118*/ struct {
- /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted;
- /*0x0004*/ u32 unused3;
- } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
-/*0x01a0*/ u32 pic_ini_rd_vpin_drop;
-/*0x01a4*/ u32 pic_ini_wr_vpin_drop;
-/*0x01a8*/ u32 pic_genstats_count0;
-/*0x01ac*/ u32 pic_genstats_count1;
-/*0x01b0*/ u32 pic_genstats_count2;
-/*0x01b4*/ u32 pic_genstats_count3;
-/*0x01b8*/ u32 pic_genstats_count4;
-/*0x01bc*/ u32 unused4;
-/*0x01c0*/ u32 pic_genstats_count5;
-/*0x01c4*/ u32 unused5;
-/*0x01c8*/ u32 pci_rstdrop_cpl;
-/*0x01cc*/ u32 pci_rstdrop_msg;
-/*0x01d0*/ u32 pci_rstdrop_client1;
-/*0x01d4*/ u32 pci_rstdrop_client0;
-/*0x01d8*/ u32 pci_rstdrop_client2;
-/*0x01dc*/ u32 unused6;
-/*0x01e0*/ struct {
- /*0x0000*/ u16 unused7;
- /*0x0002*/ u16 pci_depl_cplh;
- /*0x0004*/ u16 pci_depl_nph;
- /*0x0006*/ u16 pci_depl_ph;
- } pci_depl_h_vplane[17];
-/*0x0268*/ struct {
- /*0x0000*/ u16 unused8;
- /*0x0002*/ u16 pci_depl_cpld;
- /*0x0004*/ u16 pci_depl_npd;
- /*0x0006*/ u16 pci_depl_pd;
- } pci_depl_d_vplane[17];
-/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3];
-/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
-/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0;
-/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1;
-/*0x0af0*/ u64 unused7;
-/*0x0af8*/ u64 unused8;
-/*0x0b00*/ u64 unused9;
-/*0x0b08*/ u64 unused10;
-/*0x0b10*/ u32 unused11;
-/*0x0b14*/ u32 xgmac_tx_permitted_frms;
-/*0x0b18*/ u32 unused12;
-/*0x0b1c*/ u8 unused13;
-/*0x0b1d*/ u8 xgmac_port2_tx_any_frms;
-/*0x0b1e*/ u8 xgmac_port1_tx_any_frms;
-/*0x0b1f*/ u8 xgmac_port0_tx_any_frms;
-/*0x0b20*/ u32 unused14;
-/*0x0b24*/ u8 unused15;
-/*0x0b25*/ u8 xgmac_port2_rx_any_frms;
-/*0x0b26*/ u8 xgmac_port1_rx_any_frms;
-/*0x0b27*/ u8 xgmac_port0_rx_any_frms;
-} __packed;
-
-/**
- * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
- * @vpath_info: VPath statistics
- * @vpath_info_sav: Vpath statistics saved
- *
- * Titan hardware statistics.
- */
-struct vxge_hw_device_stats_hw_info {
- struct vxge_hw_vpath_stats_hw_info
- *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
- struct vxge_hw_vpath_stats_hw_info
- vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_common_info - HW common
- * statistics for queues.
- * @full_cnt: Number of times the queue was full
- * @usage_cnt: usage count.
- * @usage_max: Maximum usage
- * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
- * @total_compl_cnt: Total descriptor completion count.
- *
- * Hw queue counters
- * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
- * struct vxge_hw_vpath_stats_sw_ring_info{},
- */
-struct vxge_hw_vpath_stats_sw_common_info {
- u32 full_cnt;
- u32 usage_cnt;
- u32 usage_max;
- u32 reserve_free_swaps_cnt;
- u32 total_compl_cnt;
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
- * @common_stats: Common counters for all queues
- * @total_posts: Total number of postings on the queue.
- * @total_buffers: Total number of buffers posted.
- * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
- * (index) in this array reflects the transfer code type, for instance
- * 0xA - "loss of link".
- * Value txd_t_code_err_cnt[i] reflects the
- * number of times the corresponding transfer code was encountered.
- *
- * HW fifo counters
- * See also: struct vxge_hw_vpath_stats_sw_common_info{},
- * struct vxge_hw_vpath_stats_sw_ring_info{},
- */
-struct vxge_hw_vpath_stats_sw_fifo_info {
- struct vxge_hw_vpath_stats_sw_common_info common_stats;
- u32 total_posts;
- u32 total_buffers;
- u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
- * @common_stats: Common counters for all queues
- * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
- * (index) in this array reflects the transfer code type,
- * for instance
- * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
- * Value rxd_t_code_err_cnt[i] reflects the
- * number of times the corresponding transfer code was encountered.
- *
- * HW ring counters
- * See also: struct vxge_hw_vpath_stats_sw_common_info{},
- * struct vxge_hw_vpath_stats_sw_fifo_info{},
- */
-struct vxge_hw_vpath_stats_sw_ring_info {
- struct vxge_hw_vpath_stats_sw_common_info common_stats;
- u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
-
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
- * @unknown_alarms:
- * @network_sustained_fault:
- * @network_sustained_ok:
- * @kdfcctl_fifo0_overwrite:
- * @kdfcctl_fifo0_poison:
- * @kdfcctl_fifo0_dma_error:
- * @dblgen_fifo0_overflow:
- * @statsb_pif_chain_error:
- * @statsb_drop_timeout:
- * @target_illegal_access:
- * @ini_serr_det:
- * @prc_ring_bumps:
- * @prc_rxdcm_sc_err:
- * @prc_rxdcm_sc_abort:
- * @prc_quanta_size_err:
- *
- * HW vpath error statistics
- */
-struct vxge_hw_vpath_stats_sw_err {
- u32 unknown_alarms;
- u32 network_sustained_fault;
- u32 network_sustained_ok;
- u32 kdfcctl_fifo0_overwrite;
- u32 kdfcctl_fifo0_poison;
- u32 kdfcctl_fifo0_dma_error;
- u32 dblgen_fifo0_overflow;
- u32 statsb_pif_chain_error;
- u32 statsb_drop_timeout;
- u32 target_illegal_access;
- u32 ini_serr_det;
- u32 prc_ring_bumps;
- u32 prc_rxdcm_sc_err;
- u32 prc_rxdcm_sc_abort;
- u32 prc_quanta_size_err;
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
- * @soft_reset_cnt: Number of times soft reset is done on this vpath.
- * @error_stats: error counters for the vpath
- * @ring_stats: counters for ring belonging to the vpath
- * @fifo_stats: counters for fifo belonging to the vpath
- *
- * HW vpath sw statistics
- * See also: struct vxge_hw_device_info{} }.
- */
-struct vxge_hw_vpath_stats_sw_info {
- u32 soft_reset_cnt;
- struct vxge_hw_vpath_stats_sw_err error_stats;
- struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
- struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
-};
-
-/**
- * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
- *
- * @not_traffic_intr_cnt: Number of times the host was interrupted
- * without new completions.
- * "Non-traffic interrupt counter".
- * @traffic_intr_cnt: Number of traffic interrupts for the device.
- * @total_intr_cnt: Total number of traffic interrupts for the device.
- * @total_intr_cnt == @traffic_intr_cnt +
- * @not_traffic_intr_cnt
- * @soft_reset_cnt: Number of times soft reset is done on this device.
- * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
- * HW per-device statistics.
- */
-struct vxge_hw_device_stats_sw_info {
- u32 not_traffic_intr_cnt;
- u32 traffic_intr_cnt;
- u32 total_intr_cnt;
- u32 soft_reset_cnt;
- struct vxge_hw_vpath_stats_sw_info
- vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_device_stats_sw_err - HW device error statistics.
- * @vpath_alarms: Number of vpath alarms
- *
- * HW Device error stats
- */
-struct vxge_hw_device_stats_sw_err {
- u32 vpath_alarms;
-};
-
-/**
- * struct vxge_hw_device_stats - Contains HW per-device statistics,
- * including hw.
- * @devh: HW device handle.
- * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
- * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
- * space.
- * @hw_info_dma_acch: One more DMA handle used subsequently to free the
- * DMA object. Note that this and the previous handle have
- * physical meaning for Solaris; on Windows and Linux the
- * corresponding value will be simply pointer to PCI device.
- *
- * @hw_dev_info_stats: Titan statistics maintained by the hardware.
- * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
- * of completions per interrupt.
- * @sw_dev_err_stats: HW's "soft" device error statistics.
- *
- * Structure-container of HW per-device statistics. Note that per-channel
- * statistics are kept in separate structures under HW's fifo and ring
- * channels.
- */
-struct vxge_hw_device_stats {
- /* handles */
- struct __vxge_hw_device *devh;
-
- /* HW device hardware statistics */
- struct vxge_hw_device_stats_hw_info hw_dev_info_stats;
-
- /* HW device "soft" stats */
- struct vxge_hw_device_stats_sw_err sw_dev_err_stats;
- struct vxge_hw_device_stats_sw_info sw_dev_info_stats;
-
-};
-
-enum vxge_hw_status vxge_hw_device_hw_stats_enable(
- struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_stats_get(
- struct __vxge_hw_device *devh,
- struct vxge_hw_device_stats_hw_info *hw_stats);
-
-enum vxge_hw_status vxge_hw_driver_stats_get(
- struct __vxge_hw_device *devh,
- struct vxge_hw_device_stats_sw_info *sw_stats);
-
-enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status
-vxge_hw_mrpcim_stats_access(
- struct __vxge_hw_device *devh,
- u32 operation,
- u32 location,
- u32 offset,
- u64 *stat);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
- struct vxge_hw_xmac_stats *xmac_stats);
-
-/**
- * enum enum vxge_hw_mgmt_reg_type - Register types.
- *
- * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
- * @vxge_hw_mgmt_reg_type_toc: TOC Registers
- * @vxge_hw_mgmt_reg_type_common: Common Registers
- * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
- * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
- * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
- * @vxge_hw_mgmt_reg_type_vpath: vpath registers
- *
- * Register type enumaration
- */
-enum vxge_hw_mgmt_reg_type {
- vxge_hw_mgmt_reg_type_legacy = 0,
- vxge_hw_mgmt_reg_type_toc = 1,
- vxge_hw_mgmt_reg_type_common = 2,
- vxge_hw_mgmt_reg_type_mrpcim = 3,
- vxge_hw_mgmt_reg_type_srpcim = 4,
- vxge_hw_mgmt_reg_type_vpmgmt = 5,
- vxge_hw_mgmt_reg_type_vpath = 6
-};
-
-enum vxge_hw_status
-vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
- enum vxge_hw_mgmt_reg_type type,
- u32 index,
- u32 offset,
- u64 *value);
-
-enum vxge_hw_status
-vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
- enum vxge_hw_mgmt_reg_type type,
- u32 index,
- u32 offset,
- u64 value);
-
-/**
- * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
- * @VXGE_HW_RXD_STATE_NONE: Invalid state.
- * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
- * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
- * device.
- * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
- * filling-in and posting later.
- *
- * Titan/HW descriptor states.
- *
- */
-enum vxge_hw_rxd_state {
- VXGE_HW_RXD_STATE_NONE = 0,
- VXGE_HW_RXD_STATE_AVAIL = 1,
- VXGE_HW_RXD_STATE_POSTED = 2,
- VXGE_HW_RXD_STATE_FREED = 3
-};
-
-/**
- * struct vxge_hw_ring_rxd_info - Extended information associated with a
- * completed ring descriptor.
- * @syn_flag: SYN flag
- * @is_icmp: Is ICMP
- * @fast_path_eligible: Fast Path Eligible flag
- * @l3_cksum: in L3 checksum is valid
- * @l3_cksum: Result of IP checksum check (by Titan hardware).
- * This field containing VXGE_HW_L3_CKSUM_OK would mean that
- * the checksum is correct, otherwise - the datagram is
- * corrupted.
- * @l4_cksum: in L4 checksum is valid
- * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
- * This field containing VXGE_HW_L4_CKSUM_OK would mean that
- * the checksum is correct. Otherwise - the packet is
- * corrupted.
- * @frame: Zero or more of enum vxge_hw_frame_type flags.
- * See enum vxge_hw_frame_type{}.
- * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for
- * various higher-layer protocols, including (but note restricted to)
- * TCP and UDP. See enum vxge_hw_frame_proto{}.
- * @is_vlan: If vlan tag is valid
- * @vlan: VLAN tag extracted from the received frame.
- * @rth_bucket: RTH bucket
- * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
- * has a matching entry in the Indirection table.
- * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
- * has a matching entry in the Socket Pair Direct Match table.
- * @rth_hash_type: RTH hash code of the function used to calculate the hash.
- * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
- * hardware if RTH is enabled.
- */
-struct vxge_hw_ring_rxd_info {
- u32 syn_flag;
- u32 is_icmp;
- u32 fast_path_eligible;
- u32 l3_cksum_valid;
- u32 l3_cksum;
- u32 l4_cksum_valid;
- u32 l4_cksum;
- u32 frame;
- u32 proto;
- u32 is_vlan;
- u32 vlan;
- u32 rth_bucket;
- u32 rth_it_hit;
- u32 rth_spdm_hit;
- u32 rth_hash_type;
- u32 rth_value;
-};
-/**
- * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
- * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
- * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
- * configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
- * configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
- * presentation configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
- * such as unknown IPv6 header.
- * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
- * error, such as FCS or ECC).
- * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
- * s) were not appropriately sized and data loss occurred.
- * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
- * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
- * Segment1 exceeded the capacity of Buffer1 and the remainder
- * was placed in Buffer2. Segment2 now starts in Buffer3.
- * No data loss or errors occurred.
- * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
- * assigned buffers has a size of 0 bytes.
- * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
- * VPath Reset or because of a VPIN mismatch.
- * @VXGE_HW_RING_T_CODE_UNUSED: Unused
- * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
- * transfer code condition occurred.
- *
- * Transfer codes returned by adapter.
- */
-enum vxge_hw_ring_tcode {
- VXGE_HW_RING_T_CODE_OK = 0x0,
- VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
- VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
- VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
- VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
- VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
- VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
- VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
- VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
- VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
- VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
- VXGE_HW_RING_T_CODE_UNUSED = 0xE,
- VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
-};
-
-enum vxge_hw_status vxge_hw_ring_rxd_reserve(
- struct __vxge_hw_ring *ring_handle,
- void **rxdh);
-
-void
-vxge_hw_ring_rxd_pre_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void
-vxge_hw_ring_rxd_post_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void
-vxge_hw_ring_rxd_post_post_wmb(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void vxge_hw_ring_rxd_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
- struct __vxge_hw_ring *ring_handle,
- void **rxdh,
- u8 *t_code);
-
-enum vxge_hw_status vxge_hw_ring_handle_tcode(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- u8 t_code);
-
-void vxge_hw_ring_rxd_free(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-/**
- * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
- * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
- * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
- * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
- * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
- * @VXGE_HW_FRAME_PROTO_TCP: TCP.
- * @VXGE_HW_FRAME_PROTO_UDP: UDP.
- * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
- *
- * Higher layer ethernet protocols and options.
- */
-enum vxge_hw_frame_proto {
- VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
- VXGE_HW_FRAME_PROTO_IPV4 = 0x10,
- VXGE_HW_FRAME_PROTO_IPV6 = 0x08,
- VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04,
- VXGE_HW_FRAME_PROTO_TCP = 0x02,
- VXGE_HW_FRAME_PROTO_UDP = 0x01,
- VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \
- VXGE_HW_FRAME_PROTO_UDP)
-};
-
-/**
- * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
- * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
- *
- * These gather codes are used to indicate the position of a TxD in a TxD list
- */
-enum vxge_hw_fifo_gather_code {
- VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
- VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
- VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
- VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
-};
-
-/**
- * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
- * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
- * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
- * frame data) returned with corrupt data.
- * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
- * with no data.
- * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
- * frame or LSO MSS that was too long (>9800B).
- * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
- * Offload operation, due to improper header template,
- * unsupported protocol, etc.
- * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
- * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
- * data buffer transfer errors are encountered (see below).
- * Otherwise it is set to 0.
- *
- * These tcodes are returned in various API for TxD status
- */
-enum vxge_hw_fifo_tcode {
- VXGE_HW_FIFO_T_CODE_OK = 0x0,
- VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
- VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
- VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
- VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
- VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
- VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
-};
-
-enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
- struct __vxge_hw_fifo *fifoh,
- void **txdlh,
- void **txdl_priv);
-
-void vxge_hw_fifo_txdl_buffer_set(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- u32 frag_idx,
- dma_addr_t dma_pointer,
- u32 size);
-
-void vxge_hw_fifo_txdl_post(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh);
-
-u32 vxge_hw_fifo_free_txdl_count_get(
- struct __vxge_hw_fifo *fifo_handle);
-
-enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
- struct __vxge_hw_fifo *fifoh,
- void **txdlh,
- enum vxge_hw_fifo_tcode *t_code);
-
-enum vxge_hw_status vxge_hw_fifo_handle_tcode(
- struct __vxge_hw_fifo *fifoh,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code);
-
-void vxge_hw_fifo_txdl_free(
- struct __vxge_hw_fifo *fifoh,
- void *txdlh);
-
-/*
- * Device
- */
-
-#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
-#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
-
-/*
- * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
- * @dma_addr: DMA (mapped) address of _this_ descriptor.
- * @dma_handle: DMA handle used to map the descriptor onto device.
- * @dma_offset: Descriptor's offset in the memory block. HW allocates
- * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
- * bytes. Each memblock is contiguous DMA-able memory. Each
- * memblock contains 1 or more 4KB RxD blocks visible to the
- * Titan hardware.
- * @dma_object: DMA address and handle of the memory block that contains
- * the descriptor. This member is used only in the "checked"
- * version of the HW (to enforce certain assertions);
- * otherwise it gets compiled out.
- * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
- *
- * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
- * information associated with the descriptor. Note that driver can ask HW
- * to allocate additional per-descriptor space for its own (driver-specific)
- * purposes.
- */
-struct __vxge_hw_ring_rxd_priv {
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- ptrdiff_t dma_offset;
-#ifdef VXGE_DEBUG_ASSERT
- struct vxge_hw_mempool_dma *dma_object;
-#endif
-};
-
-struct vxge_hw_mempool_cbs {
- void (*item_func_alloc)(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index,
- u32 is_last);
-};
-
-#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
- ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
-
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 action,
- u32 rts_table,
- u32 offset,
- u64 *data1,
- u64 *data2);
-
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 action,
- u32 rts_table,
- u32 offset,
- u64 data1,
- u64 data2);
-
-enum vxge_hw_status
-__vxge_hw_vpath_enable(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-void vxge_hw_device_intr_enable(
- struct __vxge_hw_device *devh);
-
-u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
-
-void vxge_hw_device_intr_disable(
- struct __vxge_hw_device *devh);
-
-void vxge_hw_device_mask_all(
- struct __vxge_hw_device *devh);
-
-void vxge_hw_device_unmask_all(
- struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_begin_irq(
- struct __vxge_hw_device *devh,
- u32 skip_alarms,
- u64 *reason);
-
-void vxge_hw_device_clear_tx_rx(
- struct __vxge_hw_device *devh);
-
-/*
- * Virtual Paths
- */
-
-void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
-
-void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
-
-u32 vxge_hw_vpath_id(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_vpath_mac_addr_add_mode {
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
- VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
- VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
-};
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask,
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 vid);
-
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 vid);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 etype);
-
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_mcast_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_poll_rx(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status vxge_hw_vpath_poll_tx(
- struct __vxge_hw_fifo *fifoh,
- struct sk_buff ***skb_ptr, int nr_skb, int *more);
-
-enum vxge_hw_status vxge_hw_vpath_alarm_process(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 skip_alarms);
-
-void
-vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
- int *tim_msix_id, int alarm_msix_id);
-
-void
-vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
-
-void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
-
-void
-vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-enum vxge_hw_status vxge_hw_vpath_intr_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_intr_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void vxge_hw_vpath_inta_mask_tx_rx(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void vxge_hw_vpath_inta_unmask_tx_rx(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void
-vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
- void **dtrh);
-
-void
-vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
-
-void
-vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
-
-int
-vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
-
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
-
-void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-version.h b/drivers/net/ethernet/neterion/vxge/vxge-version.h
deleted file mode 100644
index b9efa28bab3e..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-version.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_VERSION_H
-#define VXGE_VERSION_H
-
-#define VXGE_VERSION_MAJOR "2"
-#define VXGE_VERSION_MINOR "5"
-#define VXGE_VERSION_FIX "3"
-#define VXGE_VERSION_BUILD "22640"
-#define VXGE_VERSION_FOR "k"
-
-#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
-
-#define VXGE_DEAD_FW_VER_MAJOR 1
-#define VXGE_DEAD_FW_VER_MINOR 4
-#define VXGE_DEAD_FW_VER_BUILD 4
-
-#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
- VXGE_DEAD_FW_VER_MINOR, \
- VXGE_DEAD_FW_VER_BUILD)
-
-#define VXGE_EPROM_FW_VER_MAJOR 1
-#define VXGE_EPROM_FW_VER_MINOR 6
-#define VXGE_EPROM_FW_VER_BUILD 1
-
-#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
- VXGE_EPROM_FW_VER_MINOR, \
- VXGE_EPROM_FW_VER_BUILD)
-
-#define VXGE_CERT_FW_VER_MAJOR 1
-#define VXGE_CERT_FW_VER_MINOR 8
-#define VXGE_CERT_FW_VER_BUILD 1
-
-#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
- VXGE_CERT_FW_VER_MINOR, \
- VXGE_CERT_FW_VER_BUILD)
-
-#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e31f8fbbc696..df2ab5cbd49b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -4233,7 +4233,7 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
}
/* If the chain is ended by an load/store pair then this
- * could serve as the new head of the the next chain.
+ * could serve as the new head of the next chain.
*/
if (curr_pair_is_memcpy(meta1, meta2)) {
head_ld_meta = meta1;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 0147de405365..2b383d92d7f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -149,7 +149,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
}
/* Pre_lag action must be first on action list.
- * If other actions already exist they need pushed forward.
+ * If other actions already exist they need to be pushed forward.
*/
if (act_len)
memmove(nfp_flow->action_data + act_size,
@@ -427,6 +427,12 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
return -EOPNOTSUPP;
}
+ if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: loaded firmware does not support tunnel flag offload");
+ return -EOPNOTSUPP;
+ }
+
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -436,7 +442,8 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
- set_tun->tun_id = ip_tun->key.tun_id;
+ if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY)
+ set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
set_tun->ttl = ip_tun->key.ttl;
@@ -474,17 +481,11 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
ip_rt_put(rt);
} else {
- set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+ set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
}
}
set_tun->tos = ip_tun->key.tos;
-
- if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
- return -EOPNOTSUPP;
- }
set_tun->tun_flags = ip_tun->key.tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
@@ -674,9 +675,9 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
fl_hl_mask->hop_limit;
break;
case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
- if (mask & ~IPV6_FLOW_LABEL_MASK ||
- exact & ~IPV6_FLOW_LABEL_MASK) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
+ if (mask & ~IPV6_FLOWINFO_MASK ||
+ exact & ~IPV6_FLOWINFO_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow info action");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 68e8a2fb1a29..2df2af1da716 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -96,8 +96,6 @@
#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
-#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
-
/* LAG ports */
#define NFP_FL_LAG_OUT 0xC0DE0000
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 7c31a46195b2..b3b2a23b8d89 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -182,7 +182,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
u8 ip_proto = 0;
/* Temporary buffer for mangling keys, 64 is enough to cover max
* struct size of key in various fields that may be mangled.
- * Supported fileds to mangle:
+ * Supported fields to mangle:
* mac_src/mac_dst(struct flow_match_eth_addrs, 12B)
* nw_tos/nw_ttl(struct flow_match_ip, 2B)
* nw_src/nw_dst(struct flow_match_ipv4/6_addrs, 32B)
@@ -194,7 +194,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
entry1->netdev != entry2->netdev)
return -EINVAL;
- /* check the overlapped fields one by one, the unmasked part
+ /* Check the overlapped fields one by one, the unmasked part
* should not conflict with each other.
*/
if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -563,7 +563,7 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
- /* ip_proto is the only field that needed in later compile_action,
+ /* ip_proto is the only field that is needed in later compile_action,
* needed to set the correct checksum flags. It doesn't really matter
* which input rule's ip_proto field we take as the earlier merge checks
* would have made sure that they don't conflict. We do not know which
@@ -1013,7 +1013,7 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
nft_m_entry->tc_m_parent = tc_m_entry;
nft_m_entry->nft_parent = nft_entry;
nft_m_entry->tc_flower_cookie = 0;
- /* Copy the netdev from one the pre_ct entry. When the tc_m_entry was created
+ /* Copy the netdev from the pre_ct entry. When the tc_m_entry was created
* it only combined them if the netdevs were the same, so can use any of them.
*/
nft_m_entry->netdev = pre_ct_entry->netdev;
@@ -1143,7 +1143,7 @@ nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
zt->priv = priv;
zt->nft = NULL;
- /* init the various hash tables and lists*/
+ /* init the various hash tables and lists */
INIT_LIST_HEAD(&zt->pre_ct_list);
INIT_LIST_HEAD(&zt->post_ct_list);
INIT_LIST_HEAD(&zt->nft_flows_list);
@@ -1346,7 +1346,7 @@ static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
*/
if (is_nft_flow) {
- /* Need to iterate through list of nft_flow entries*/
+ /* Need to iterate through list of nft_flow entries */
struct nfp_fl_ct_flow_entry *ct_entry = entry;
list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
@@ -1354,7 +1354,7 @@ static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
cleanup_nft_merge_entry(m_entry);
}
} else {
- /* Need to iterate through list of tc_merged_flow entries*/
+ /* Need to iterate through list of tc_merged_flow entries */
struct nfp_fl_ct_tc_merge *ct_entry = entry;
list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index ede90e086b28..e92860e20a24 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -234,7 +234,7 @@ nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
}
/* To signal the end of a batch, both the switch and last flags are set
- * and the the reserved SYNC group ID is used.
+ * and the reserved SYNC group ID is used.
*/
if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
@@ -576,7 +576,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
group->dirty = true;
group->slave_cnt = slave_count;
- /* Group may have been on queue for removal but is now offloable. */
+ /* Group may have been on queue for removal but is now offloadable. */
group->to_remove = false;
mutex_unlock(&lag->lock);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 74e1b279c13b..0f06ef6e24bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -339,7 +339,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
goto err_free_ctx_entry;
}
- /* Do net allocate a mask-id for pre_tun_rules. These flows are used to
+ /* Do not allocate a mask-id for pre_tun_rules. These flows are used to
* configure the pre_tun table and are never actually send to the
* firmware as an add-flow message. This causes the mask-id allocation
* on the firmware to get out of sync if allocated here.
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 9d65459bdba5..83c97154c0c7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -359,7 +359,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_opts(rule, &enc_op);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
- /* check if GRE, which has no enc_ports */
+ /* Check if GRE, which has no enc_ports */
if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
@@ -1016,7 +1016,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
nfp_flower_is_merge_flow(sub_flow2))
return -EINVAL;
- /* check if the two flows are already merged */
+ /* Check if the two flows are already merged */
parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
if (rhashtable_lookup_fast(&priv->merge_table,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 3206ba83b1aa..4e5df9f2c372 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -534,7 +534,7 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
}
}
-/* offload tc action, currently only for tc police */
+/* Offload tc action, currently only for tc police */
static const struct rhashtable_params stats_meter_table_params = {
.key_offset = offsetof(struct nfp_meter_entry, meter_id),
@@ -690,7 +690,7 @@ nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
for (i = 0 ; i < action_num; i++) {
- /*set qos associate data for this interface */
+ /* Set qos associate data for this interface */
action = paction + i;
if (action->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack,
@@ -736,7 +736,7 @@ nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
u32 meter_id;
bool pps;
- /*delete qos associate data for this interface */
+ /* Delete qos associate data for this interface */
if (fl_act->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: qos rate limit offload requires police action");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 6bf3ec448e7e..52f67157bd0f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -447,7 +447,8 @@ void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app,
static void
nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
- void *flow, struct neighbour *neigh, bool is_ipv6)
+ void *flow, struct neighbour *neigh, bool is_ipv6,
+ bool override)
{
bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead;
size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) :
@@ -546,6 +547,13 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
if (nn_entry->flow)
list_del(&nn_entry->list_head);
kfree(nn_entry);
+ } else if (nn_entry && !neigh_invalid && override) {
+ mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ nfp_tun_link_predt_entries(app, nn_entry);
+ nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
+ nn_entry->payload,
+ GFP_ATOMIC);
}
spin_unlock_bh(&priv->predt_lock);
@@ -610,7 +618,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
dst_release(dst);
}
- nfp_tun_write_neigh(n->dev, app, &flow6, n, true);
+ nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
#else
return NOTIFY_DONE;
#endif /* CONFIG_IPV6 */
@@ -633,7 +641,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
ip_rt_put(rt);
}
- nfp_tun_write_neigh(n->dev, app, &flow4, n, false);
+ nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
}
#else
return NOTIFY_DONE;
@@ -676,7 +684,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt);
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh(n->dev, app, &flow, n, false);
+ nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
neigh_release(n);
rcu_read_unlock();
return;
@@ -718,7 +726,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh(n->dev, app, &flow, n, true);
+ nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
neigh_release(n);
rcu_read_unlock();
return;
@@ -1064,7 +1072,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
return 0;
entry->ref_count--;
- /* If del is part of a mod then mac_list is still in use elsewheree. */
+ /* If del is part of a mod then mac_list is still in use elsewhere. */
if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 7db56abaa582..448c1c1afaee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -3,6 +3,7 @@
#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
@@ -81,12 +82,11 @@ nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
}
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
@@ -167,30 +167,35 @@ nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
u64_stats_update_end(&r_vec->tx_sync);
}
-static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
+static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb, u64 tls_handle)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
+ bool vlan_insert;
u32 meta_id = 0;
int md_bytes;
- if (likely(!md_dst && !tls_handle))
- return 0;
- if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
- if (!tls_handle)
- return 0;
- md_dst = NULL;
+ if (unlikely(md_dst || tls_handle)) {
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
+ md_dst = NULL;
}
- md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
+ vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
+
+ if (!(md_dst || tls_handle || vlan_insert))
+ return 0;
+
+ md_bytes = sizeof(meta_id) +
+ !!md_dst * NFP_NET_META_PORTID_SIZE +
+ !!tls_handle * NFP_NET_META_CONN_HANDLE_SIZE +
+ vlan_insert * NFP_NET_META_VLAN_SIZE;
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
- meta_id = 0;
data = skb_push(skb, md_bytes) + md_bytes;
if (md_dst) {
- data -= 4;
+ data -= NFP_NET_META_PORTID_SIZE;
put_unaligned_be32(md_dst->u.port_info.port_id, data);
meta_id = NFP_NET_META_PORTID;
}
@@ -198,13 +203,23 @@ static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
/* conn handle is opaque, we just use u64 to be able to quickly
* compare it to zero
*/
- data -= 8;
+ data -= NFP_NET_META_CONN_HANDLE_SIZE;
memcpy(data, &tls_handle, sizeof(tls_handle));
meta_id <<= NFP_NET_META_FIELD_SIZE;
meta_id |= NFP_NET_META_CONN_HANDLE;
}
+ if (vlan_insert) {
+ data -= NFP_NET_META_VLAN_SIZE;
+ /* data type of skb->vlan_proto is __be16
+ * so it fills metadata without calling put_unaligned_be16
+ */
+ memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto));
+ put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_VLAN;
+ }
- data -= 4;
+ data -= sizeof(meta_id);
put_unaligned_be32(meta_id, data);
return md_bytes;
@@ -258,7 +273,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- md_bytes = nfp_nfd3_prep_tx_meta(skb, tls_handle);
+ md_bytes = nfp_nfd3_prep_tx_meta(dp, skb, tls_handle);
if (unlikely(md_bytes < 0))
goto err_flush;
@@ -282,7 +297,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = (nr_frags ? 0 : NFD3_DESC_TX_EOP) | md_bytes;
txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_40b(txd, dma_addr);
txd->data_len = cpu_to_le16(skb->len);
txd->flags = 0;
@@ -320,7 +335,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
txd = &tx_ring->txds[wr_idx];
txd->dma_len = cpu_to_le16(fsize);
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_40b(txd, dma_addr);
txd->offset_eop = md_bytes |
((f == nr_frags - 1) ? NFD3_DESC_TX_EOP : 0);
txd->vals8[1] = second_half;
@@ -562,8 +577,12 @@ nfp_nfd3_rx_give_one(const struct nfp_net_dp *dp,
/* Fill freelist descriptor */
rx_ring->rxds[wr_idx].fld.reserved = 0;
rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
- dma_addr + dp->rx_dma_off);
+ /* DMA address is expanded to 48-bit width in freelist for NFP3800,
+ * so the *_48b macro is used accordingly, it's also OK to fill
+ * a 40-bit address since the top 8 bits are get set to 0.
+ */
+ nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
rx_ring->wr_p++;
if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
@@ -700,7 +719,7 @@ bool
nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
- u32 meta_info;
+ u32 meta_info, vlan_info;
meta_info = get_unaligned_be32(data);
data += 4;
@@ -718,6 +737,17 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_VLAN:
+ vlan_info = get_unaligned_be32(data);
+ if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
+ meta->vlan.stripped = true;
+ meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
+ vlan_info);
+ meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
+ vlan_info);
+ }
+ data += 4;
+ break;
case NFP_NET_META_PORTID:
meta->portid = get_unaligned_be32(data);
data += 4;
@@ -817,7 +847,7 @@ nfp_nfd3_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = NFD3_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
+ nfp_desc_set_dma_addr_40b(txd, rxbuf->dma_addr + dma_off);
txd->data_len = cpu_to_le16(pkt_len);
txd->flags = 0;
@@ -1046,9 +1076,11 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
#endif
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
@@ -1193,7 +1225,7 @@ nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = meta_len | NFD3_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_40b(txd, dma_addr);
txd->data_len = cpu_to_le16(skb->len);
txd->flags = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
index 47604d5e25eb..a03190c9313c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
@@ -247,10 +247,13 @@ nfp_nfd3_print_tx_descs(struct seq_file *file,
NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2 | NFP_NET_CFG_CTRL_RXQINQ | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2 | \
NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_RSS | \
NFP_NET_CFG_CTRL_IRQMOD | NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_VEPA | \
NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
@@ -260,6 +263,7 @@ const struct nfp_dp_ops nfp_nfd3_ops = {
.version = NFP_NFD_VER_NFD3,
.tx_min_desc_per_pkt = 1,
.cap_mask = NFP_NFD3_CFG_CTRL_SUPPORTED,
+ .dma_mask = DMA_BIT_MASK(40),
.poll = nfp_nfd3_poll,
.xsk_poll = nfp_nfd3_xsk_poll,
.ctrl_poll = nfp_nfd3_ctrl_poll,
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index c16c4b42ecfd..65e243168765 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -40,7 +40,7 @@ nfp_nfd3_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = NFD3_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, xrxbuf->dma_addr + pkt_off);
+ nfp_desc_set_dma_addr_40b(txd, xrxbuf->dma_addr + pkt_off);
txd->data_len = cpu_to_le16(pkt_len);
txd->flags = 0;
@@ -94,9 +94,12 @@ static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
nfp_nfd3_rx_csum(dp, r_vec, rxd, meta, skb);
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, meta))) {
+ dev_kfree_skb_any(skb);
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+
if (meta_xdp)
skb_metadata_set(skb,
xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
@@ -361,10 +364,8 @@ static void nfp_nfd3_xsk_tx(struct nfp_net_tx_ring *tx_ring)
/* Build TX descriptor. */
txd = &tx_ring->txds[wr_idx];
- nfp_desc_set_dma_addr(txd,
- xsk_buff_raw_get_dma(xsk_pool,
- desc[i].addr
- ));
+ nfp_desc_set_dma_addr_40b(txd,
+ xsk_buff_raw_get_dma(xsk_pool, desc[i].addr));
txd->offset_eop = NFD3_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(desc[i].len);
txd->data_len = cpu_to_le16(desc[i].len);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index e509d6dcba5c..2b427d8ccb2f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -46,28 +46,16 @@ nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
}
segs = skb_shinfo(skb)->gso_segs;
mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
- /* Note: TSO of the packet with metadata prepended to skb is not
- * supported yet, in which case l3/l4_offset and lso_hdrlen need
- * be correctly handled here.
- * Concern:
- * The driver doesn't have md_bytes easily available at this point.
- * The PCI.IN PD ME won't have md_bytes bytes to add to lso_hdrlen,
- * so it needs the full length there. The app MEs might prefer
- * l3_offset and l4_offset relative to the start of packet data,
- * but could probably cope with it being relative to the CTM buf
- * data offset.
- */
txd.l3_offset = l3_offset;
txd.l4_offset = l4_offset;
txd.lso_meta_res = 0;
@@ -125,17 +113,18 @@ nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
static int
nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
- unsigned int nr_frags, struct sk_buff *skb)
+ struct sk_buff *skb)
{
unsigned int n_descs, wr_p, nop_slots;
const skb_frag_t *frag, *fend;
struct nfp_nfdk_tx_desc *txd;
+ unsigned int nr_frags;
unsigned int wr_idx;
int err;
recount_descs:
n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
-
+ nr_frags = skb_shinfo(skb)->nr_frags;
frag = skb_shinfo(skb)->frags;
fend = frag + nr_frags;
for (; frag < fend; frag++)
@@ -181,55 +170,52 @@ close_block:
return 0;
}
-static int nfp_nfdk_prep_port_id(struct sk_buff *skb)
+static int
+nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
+ struct sk_buff *skb)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
+ bool vlan_insert;
+ u32 meta_id = 0;
+ int md_bytes;
- if (likely(!md_dst))
- return 0;
- if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
- return 0;
-
- /* Note: Unsupported case when TSO a skb with metedata prepended.
- * See the comments in `nfp_nfdk_tx_tso` for details.
- */
- if (unlikely(md_dst && skb_is_gso(skb)))
- return -EOPNOTSUPP;
-
- if (unlikely(skb_cow_head(skb, sizeof(md_dst->u.port_info.port_id))))
- return -ENOMEM;
-
- data = skb_push(skb, sizeof(md_dst->u.port_info.port_id));
- put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
+ md_dst = NULL;
- return sizeof(md_dst->u.port_info.port_id);
-}
+ vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
-static int
-nfp_nfdk_prep_tx_meta(struct nfp_app *app, struct sk_buff *skb,
- struct nfp_net_r_vector *r_vec)
-{
- unsigned char *data;
- int res, md_bytes;
- u32 meta_id = 0;
-
- res = nfp_nfdk_prep_port_id(skb);
- if (unlikely(res <= 0))
- return res;
+ if (!(md_dst || vlan_insert))
+ return 0;
- md_bytes = res;
- meta_id = NFP_NET_META_PORTID;
+ md_bytes = sizeof(meta_id) +
+ !!md_dst * NFP_NET_META_PORTID_SIZE +
+ vlan_insert * NFP_NET_META_VLAN_SIZE;
- if (unlikely(skb_cow_head(skb, sizeof(meta_id))))
+ if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
- md_bytes += sizeof(meta_id);
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= NFP_NET_META_PORTID_SIZE;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (vlan_insert) {
+ data -= NFP_NET_META_VLAN_SIZE;
+ /* data type of skb->vlan_proto is __be16
+ * so it fills metadata without calling put_unaligned_be16
+ */
+ memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto));
+ put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_VLAN;
+ }
meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
FIELD_PREP(NFDK_META_FIELDS, meta_id);
- data = skb_push(skb, sizeof(meta_id));
+ data -= sizeof(meta_id);
put_unaligned_be32(meta_id, data);
return NFDK_DESC_TX_CHAIN_META;
@@ -277,14 +263,17 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- metadata = nfp_nfdk_prep_tx_meta(nn->app, skb, r_vec);
+ metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb);
if (unlikely((int)metadata < 0))
goto err_flush;
- nr_frags = skb_shinfo(skb)->nr_frags;
- if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb))
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
goto err_flush;
+ /* nr_frags will change after skb_linearize so we get nr_frags after
+ * nfp_nfdk_tx_maybe_close_block function
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
/* DMA map all */
wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
txd = &tx_ring->ktxds[wr_idx];
@@ -310,11 +299,20 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
/* FIELD_PREP() implicitly truncates to chunk */
dma_len -= 1;
- dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+
+ /* We will do our best to pass as much data as we can in descriptor
+ * and we need to make sure the first descriptor includes whole head
+ * since there is limitation in firmware side. Sometimes the value of
+ * dma_len bitwise and NFDK_DESC_TX_DMA_LEN_HEAD will less than
+ * headlen.
+ */
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+ dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
/* starts at bit 0 */
BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
@@ -339,7 +337,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
dma_len -= dlen_type;
dma_addr += dlen_type + 1;
@@ -595,8 +593,8 @@ nfp_nfdk_rx_give_one(const struct nfp_net_dp *dp,
/* Fill freelist descriptor */
rx_ring->rxds[wr_idx].fld.reserved = 0;
rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
- dma_addr + dp->rx_dma_off);
+ nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
rx_ring->wr_p++;
if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
@@ -717,7 +715,7 @@ static bool
nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
- u32 meta_info;
+ u32 meta_info, vlan_info;
meta_info = get_unaligned_be32(data);
data += 4;
@@ -735,6 +733,17 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_VLAN:
+ vlan_info = get_unaligned_be32(data);
+ if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
+ meta->vlan.stripped = true;
+ meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
+ vlan_info);
+ meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
+ vlan_info);
+ }
+ data += 4;
+ break;
case NFP_NET_META_PORTID:
meta->portid = get_unaligned_be32(data);
data += 4;
@@ -925,11 +934,13 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
/* FIELD_PREP() implicitly truncates to chunk */
dma_len -= 1;
- dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+ dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
dma_len -= tmp_dlen;
@@ -940,7 +951,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
dma_len -= 1;
dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
dlen_type &= NFDK_DESC_TX_DMA_LEN;
dma_len -= dlen_type;
@@ -1170,9 +1181,11 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
@@ -1303,7 +1316,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
skb_push(skb, 4));
}
- if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb))
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
goto err_free;
/* DMA map all */
@@ -1328,11 +1341,13 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
txbuf++;
dma_len -= 1;
- dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+ dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
dma_len -= tmp_dlen;
@@ -1343,7 +1358,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
dma_len -= 1;
dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
dlen_type &= NFDK_DESC_TX_DMA_LEN;
dma_len -= dlen_type;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
index 301f11108826..fdb8144a63e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
@@ -168,10 +168,12 @@ nfp_nfdk_print_tx_descs(struct seq_file *file,
NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2 | NFP_NET_CFG_CTRL_RXQINQ | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2 | \
NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_IRQMOD | \
- NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_TXRWB | NFP_NET_CFG_CTRL_VEPA | \
NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
@@ -181,6 +183,7 @@ const struct nfp_dp_ops nfp_nfdk_ops = {
.version = NFP_NFD_VER_NFDK,
.tx_min_desc_per_pkt = NFDK_TX_DESC_PER_SIMPLE_PKT,
.cap_mask = NFP_NFDK_CFG_CTRL_SUPPORTED,
+ .dma_mask = DMA_BIT_MASK(48),
.poll = nfp_nfdk_poll,
.ctrl_poll = nfp_nfdk_ctrl_poll,
.xmit = nfp_nfdk_tx,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 09f250e74dfa..bb3f46c74f77 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -230,7 +230,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
struct nfp_app *app;
if (id >= ARRAY_SIZE(apps) || !apps[id]) {
- nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id);
+ nfp_err(pf->cpp, "unknown FW app ID 0x%02x, driver too old or support for FW not built in\n", id);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 4f88d17536c3..873429f7a6da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -392,7 +392,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
/* First try to find a firmware image specific for this device */
interface = nfp_cpp_interface(pf->cpp);
nfp_cpp_serial(pf->cpp, &serial);
- sprintf(fw_name, "netronome/serial-%pMF-%02hhx-%02hhx.nffw",
+ sprintf(fw_name, "netronome/serial-%pMF-%02x-%02x.nffw",
serial, interface >> 8, interface & 0xff);
fw = nfp_net_fw_request(pdev, pf, fw_name);
if (fw)
@@ -410,7 +410,9 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
return NULL;
}
- fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "nffw.partno");
+ if (!fw_model)
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
if (!fw_model) {
dev_err(&pdev->dev, "Error: can't read part number\n");
return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 3dd3a92d2e7f..a101ff30a1ae 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -115,7 +115,7 @@ struct nfp_nfdk_tx_buf;
#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
/* Convenience macro for writing dma address into RX/TX descriptors */
-#define nfp_desc_set_dma_addr(desc, dma_addr) \
+#define nfp_desc_set_dma_addr_40b(desc, dma_addr) \
do { \
__typeof__(desc) __d = (desc); \
dma_addr_t __addr = (dma_addr); \
@@ -124,13 +124,13 @@ struct nfp_nfdk_tx_buf;
__d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
} while (0)
-#define nfp_nfdk_tx_desc_set_dma_addr(desc, dma_addr) \
- do { \
- __typeof__(desc) __d = (desc); \
- dma_addr_t __addr = (dma_addr); \
- \
- __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr) & 0xff); \
- __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
+#define nfp_desc_set_dma_addr_48b(desc, dma_addr) \
+ do { \
+ __typeof__(desc) __d = (desc); \
+ dma_addr_t __addr = (dma_addr); \
+ \
+ __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr)); \
+ __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
} while (0)
/**
@@ -225,8 +225,8 @@ struct nfp_net_tx_ring {
struct nfp_net_rx_desc {
union {
struct {
- u8 dma_addr_hi; /* High bits of the buf address */
- __le16 reserved; /* Must be zero */
+ __le16 dma_addr_hi; /* High bits of the buf address */
+ u8 reserved; /* Must be zero */
u8 meta_len_dd; /* Must be zero */
__le32 dma_addr_lo; /* Low bits of the buffer address */
@@ -248,6 +248,8 @@ struct nfp_net_rx_desc {
};
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+#define NFP_NET_VLAN_CTAG 0
+#define NFP_NET_VLAN_STAG 1
struct nfp_meta_parsed {
u8 hash_type;
@@ -256,6 +258,11 @@ struct nfp_meta_parsed {
u32 mark;
u32 portid;
__wsum csum;
+ struct {
+ bool stripped;
+ u8 tpid;
+ u16 tci;
+ } vlan;
};
struct nfp_net_rx_hash {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 4e56a99087fa..cf4d6f1129fa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -31,6 +31,7 @@
#include <linux/ethtool.h>
#include <linux/log2.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/ktime.h>
@@ -597,7 +598,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
return skb;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
resync_pending = tls_offload_tx_resync_pending(skb->sk);
@@ -665,7 +666,7 @@ void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
return;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
@@ -1694,16 +1695,18 @@ static int nfp_net_set_features(struct net_device *netdev,
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
- new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_RXVLAN;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
if (features & NETIF_F_HW_VLAN_CTAG_TX)
- new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_TXVLAN;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
@@ -1713,6 +1716,13 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
}
+ if (changed & NETIF_F_HW_VLAN_STAG_RX) {
+ if (features & NETIF_F_HW_VLAN_STAG_RX)
+ new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
+ }
+
if (changed & NETIF_F_SG) {
if (features & NETIF_F_SG)
new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
@@ -1742,6 +1752,27 @@ static int nfp_net_set_features(struct net_device *netdev,
}
static netdev_features_t
+nfp_net_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (features & NETIF_F_HW_VLAN_STAG_RX)) {
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ netdev_warn(netdev,
+ "S-tag and C-tag stripping can't be enabled at the same time. Enabling S-tag stripping and disabling C-tag stripping\n");
+ } else if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) {
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ netdev->wanted_features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ netdev_warn(netdev,
+ "S-tag and C-tag stripping can't be enabled at the same time. Enabling C-tag stripping and disabling S-tag stripping\n");
+ }
+ }
+ return features;
+}
+
+static netdev_features_t
nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
@@ -1757,8 +1788,7 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
if (skb_is_gso(skb)) {
u32 hdrlen;
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
/* Assume worst case scenario of having longest possible
* metadata prepend - 8B
@@ -1892,6 +1922,69 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
+static int nfp_net_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ u16 mode;
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
+ return -EOPNOTSUPP;
+
+ mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ?
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
+ nlflags, filter_mask, NULL);
+}
+
+static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem, err;
+ u32 new_ctrl;
+ u16 mode;
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ if (nla_len(attr) < sizeof(mode))
+ return -EINVAL;
+
+ new_ctrl = nn->dp.ctrl;
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA)
+ new_ctrl |= NFP_NET_CFG_CTRL_VEPA;
+ else if (mode == BRIDGE_MODE_VEB)
+ new_ctrl &= ~NFP_NET_CFG_CTRL_VEPA;
+ else
+ return -EOPNOTSUPP;
+
+ if (new_ctrl == nn->dp.ctrl)
+ return 0;
+
+ nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (!err)
+ nn->dp.ctrl = new_ctrl;
+
+ return err;
+ }
+
+ return -EINVAL;
+}
+
const struct net_device_ops nfp_nfd3_netdev_ops = {
.ndo_init = nfp_app_ndo_init,
.ndo_uninit = nfp_app_ndo_uninit,
@@ -1914,11 +2007,14 @@ const struct net_device_ops nfp_nfd3_netdev_ops = {
.ndo_change_mtu = nfp_net_change_mtu,
.ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
+ .ndo_fix_features = nfp_net_fix_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
.ndo_xsk_wakeup = nfp_net_xsk_wakeup,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+ .ndo_bridge_getlink = nfp_net_bridge_getlink,
+ .ndo_bridge_setlink = nfp_net_bridge_setlink,
};
const struct net_device_ops nfp_nfdk_netdev_ops = {
@@ -1932,6 +2028,7 @@ const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
+ .ndo_set_vf_rate = nfp_app_set_vf_rate,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
.ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
@@ -1942,10 +2039,13 @@ const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_change_mtu = nfp_net_change_mtu,
.ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
+ .ndo_fix_features = nfp_net_fix_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+ .ndo_bridge_getlink = nfp_net_bridge_getlink,
+ .ndo_bridge_setlink = nfp_net_bridge_setlink,
};
static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
@@ -1993,7 +2093,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2002,6 +2102,9 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RXQINQ ? "RXQINQ " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ? "TXVLANv2 " : "",
nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
@@ -2012,6 +2115,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_TXRWB ? "TXRWB " : "",
+ nn->cap & NFP_NET_CFG_CTRL_VEPA ? "VEPA " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
@@ -2040,6 +2144,7 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings)
{
+ u64 dma_mask = dma_get_mask(&pdev->dev);
struct nfp_net *nn;
int err;
@@ -2085,6 +2190,14 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
goto err_free_nn;
}
+ if ((dma_mask & nn->dp.ops->dma_mask) != dma_mask) {
+ dev_err(&pdev->dev,
+ "DMA mask of loaded firmware: %llx, required DMA mask: %llx\n",
+ nn->dp.ops->dma_mask, dma_mask);
+ err = -EINVAL;
+ goto err_free_nn;
+ }
+
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
@@ -2279,31 +2392,39 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->vlan_features = netdev->hw_features;
- if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN_ANY) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_RXVLAN;
}
- if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
} else {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_TXVLAN;
}
}
if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
}
+ if (nn->cap & NFP_NET_CFG_CTRL_RXQINQ) {
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
+ }
netdev->features = netdev->hw_features;
if (nfp_app_has_tc(nn->app) && nn->port)
netdev->hw_features |= NETIF_F_HW_TC;
- /* Advertise but disable TSO by default. */
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
+ /* C-Tag strip and S-Tag strip can't be supported simultaneously,
+ * so enable C-Tag strip and disable S-Tag strip by default.
+ */
+ netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
/* Finalise the netdev setup */
switch (nn->dp.ops->version) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 8892a94f00c3..ac05ec34d69e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -31,10 +31,16 @@
#define NFP_NET_LSO_MAX_HDR_SZ 255
#define NFP_NET_LSO_MAX_SEGS 64
+/* working with metadata vlan api (NFD version >= 2.0) */
+#define NFP_NET_META_VLAN_STRIP BIT(31)
+#define NFP_NET_META_VLAN_TPID_MASK GENMASK(19, 16)
+#define NFP_NET_META_VLAN_TCI_MASK GENMASK(15, 0)
+
/* Prepend field types */
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
+#define NFP_NET_META_VLAN 4 /* ctag or stag type */
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
@@ -42,6 +48,10 @@
#define NFP_META_PORT_ID_CTRL ~0U
+/* Prepend field sizes */
+#define NFP_NET_META_VLAN_SIZE 4
+#define NFP_NET_META_PORTID_SIZE 4
+#define NFP_NET_META_CONN_HANDLE_SIZE 8
/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
@@ -89,11 +99,15 @@
#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
#define NFP_NET_CFG_CTRL_CMSG_DATA (0x1 << 12) /* RX cmsgs on data Qs */
+#define NFP_NET_CFG_CTRL_RXQINQ (0x1 << 13) /* Enable S-tag strip */
+#define NFP_NET_CFG_CTRL_RXVLAN_V2 (0x1 << 15) /* Enable C-tag strip */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
+#define NFP_NET_CFG_CTRL_VEPA (0x1 << 22) /* Enable VEPA mode */
+#define NFP_NET_CFG_CTRL_TXVLAN_V2 (0x1 << 23) /* Enable VLAN C-tag insert*/
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
@@ -110,6 +124,10 @@
NFP_NET_CFG_CTRL_CSUM_COMPLETE)
#define NFP_NET_CFG_CTRL_CHAIN_META (NFP_NET_CFG_CTRL_RSS2 | \
NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+#define NFP_NET_CFG_CTRL_RXVLAN_ANY (NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2)
+#define NFP_NET_CFG_CTRL_TXVLAN_ANY (NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2)
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
index 34dd94811df3..550df83b798c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
@@ -440,3 +440,27 @@ bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
return ret;
}
+
+bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta)
+{
+ u16 tpid = 0, tci = 0;
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) {
+ tpid = ETH_P_8021Q;
+ tci = le16_to_cpu(rxd->rxd.vlan);
+ } else if (meta->vlan.stripped) {
+ if (meta->vlan.tpid == NFP_NET_VLAN_CTAG)
+ tpid = ETH_P_8021Q;
+ else if (meta->vlan.tpid == NFP_NET_VLAN_STAG)
+ tpid = ETH_P_8021AD;
+ else
+ return false;
+
+ tci = meta->vlan.tci;
+ }
+ if (tpid)
+ __vlan_hwaccel_put_tag(skb, htons(tpid), tci);
+
+ return true;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
index c934cc2d3208..831c83ce0d3d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
@@ -106,6 +106,8 @@ int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
+bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta);
enum nfp_nfd_version {
NFP_NFD_VER_NFD3,
@@ -117,6 +119,7 @@ enum nfp_nfd_version {
* @version: Indicate dp type
* @tx_min_desc_per_pkt: Minimal TX descs needed for each packet
* @cap_mask: Mask of supported features
+ * @dma_mask: DMA addressing capability
* @poll: Napi poll for normal rx/tx
* @xsk_poll: Napi poll when xsk is enabled
* @ctrl_poll: Tasklet poll for ctrl rx/tx
@@ -134,6 +137,7 @@ struct nfp_dp_ops {
enum nfp_nfd_version version;
unsigned int tx_min_desc_per_pkt;
u32 cap_mask;
+ u64 dma_mask;
int (*poll)(struct napi_struct *napi, int budget);
int (*xsk_poll)(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index df0afd271a21..eeb1455a4e5d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -29,6 +29,7 @@
#include "nfp_net_dp.h"
#include "nfp_net.h"
#include "nfp_port.h"
+#include "nfpcore/nfp_cpp.h"
struct nfp_et_stat {
char name[ETH_GSTRING_LEN];
@@ -442,6 +443,160 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
+static int nfp_test_link(struct net_device *netdev)
+{
+ if (!netif_carrier_ok(netdev) || !(netdev->flags & IFF_UP))
+ return 1;
+
+ return 0;
+}
+
+static int nfp_test_nsp(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ struct nfp_nsp_identify *nspi;
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_nsp_open(app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_info(netdev, "NSP Test: failed to access the NSP: %d\n", err);
+ goto exit;
+ }
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 15) {
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ nspi = kzalloc(sizeof(*nspi), GFP_KERNEL);
+ if (!nspi) {
+ err = -ENOMEM;
+ goto exit_close_nsp;
+ }
+
+ err = nfp_nsp_read_identify(nsp, nspi, sizeof(*nspi));
+ if (err < 0)
+ netdev_info(netdev, "NSP Test: reading bsp version failed %d\n", err);
+
+ kfree(nspi);
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+exit:
+ return err;
+}
+
+static int nfp_test_fw(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err;
+
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (err)
+ netdev_info(netdev, "FW Test: update failed %d\n", err);
+
+ return err;
+}
+
+static int nfp_test_reg(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ struct nfp_cpp *cpp = app->cpp;
+ u32 model = nfp_cpp_model(cpp);
+ u32 value;
+ int err;
+
+ err = nfp_cpp_model_autodetect(cpp, &value);
+ if (err < 0) {
+ netdev_info(netdev, "REG Test: NFP model detection failed %d\n", err);
+ return err;
+ }
+
+ return (value == model) ? 0 : 1;
+}
+
+static bool link_test_supported(struct net_device *netdev)
+{
+ return true;
+}
+
+static bool nsp_test_supported(struct net_device *netdev)
+{
+ if (nfp_app_from_netdev(netdev))
+ return true;
+
+ return false;
+}
+
+static bool fw_test_supported(struct net_device *netdev)
+{
+ if (nfp_netdev_is_nfp_net(netdev))
+ return true;
+
+ return false;
+}
+
+static bool reg_test_supported(struct net_device *netdev)
+{
+ if (nfp_app_from_netdev(netdev))
+ return true;
+
+ return false;
+}
+
+static struct nfp_self_test_item {
+ char name[ETH_GSTRING_LEN];
+ bool (*is_supported)(struct net_device *dev);
+ int (*func)(struct net_device *dev);
+} nfp_self_test[] = {
+ {"Link Test", link_test_supported, nfp_test_link},
+ {"NSP Test", nsp_test_supported, nfp_test_nsp},
+ {"Firmware Test", fw_test_supported, nfp_test_fw},
+ {"Register Test", reg_test_supported, nfp_test_reg}
+};
+
+#define NFP_TEST_TOTAL_NUM ARRAY_SIZE(nfp_self_test)
+
+static void nfp_get_self_test_strings(struct net_device *netdev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
+ if (nfp_self_test[i].is_supported(netdev))
+ ethtool_sprintf(&data, nfp_self_test[i].name);
+}
+
+static int nfp_get_self_test_count(struct net_device *netdev)
+{
+ int i, count = 0;
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
+ if (nfp_self_test[i].is_supported(netdev))
+ count++;
+
+ return count;
+}
+
+static void nfp_net_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ int i, ret, count = 0;
+
+ netdev_info(netdev, "Start self test\n");
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++) {
+ if (nfp_self_test[i].is_supported(netdev)) {
+ ret = nfp_self_test[i].func(netdev);
+ if (ret)
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[count++] = ret;
+ }
+ }
+
+ netdev_info(netdev, "Test end\n");
+}
+
static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -705,6 +860,9 @@ static void nfp_net_get_strings(struct net_device *netdev,
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
break;
+ case ETH_SS_TEST:
+ nfp_get_self_test_strings(netdev, data);
+ break;
}
}
@@ -739,6 +897,8 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
cnt += nfp_mac_get_stats_count(netdev);
cnt += nfp_app_port_get_stats_count(nn->port);
return cnt;
+ case ETH_SS_TEST:
+ return nfp_get_self_test_count(netdev);
default:
return -EOPNOTSUPP;
}
@@ -757,6 +917,9 @@ static void nfp_port_get_strings(struct net_device *netdev,
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(port, data);
break;
+ case ETH_SS_TEST:
+ nfp_get_self_test_strings(netdev, data);
+ break;
}
}
@@ -786,6 +949,8 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
count = nfp_mac_get_stats_count(netdev);
count += nfp_app_port_get_stats_count(port);
return count;
+ case ETH_SS_TEST:
+ return nfp_get_self_test_count(netdev);
default:
return -EOPNOTSUPP;
}
@@ -1230,6 +1395,8 @@ nfp_port_get_module_info(struct net_device *netdev,
u8 data;
port = nfp_port_from_netdev(netdev);
+ /* update port state to get latest interface */
+ set_bit(NFP_PORT_CHANGED, &port->flags);
eth_port = nfp_port_get_eth_port(port);
if (!eth_port)
return -EOPNOTSUPP;
@@ -1460,6 +1627,55 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
+static void nfp_port_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return;
+
+ /* Currently pause frame support is fixed */
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+}
+
+static int nfp_net_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Control LED to blink */
+ err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 1);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Control LED to normal mode */
+ err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 0);
+ break;
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_OFF:
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -1468,6 +1684,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
.set_ringparam = nfp_net_set_ringparam,
+ .self_test = nfp_net_self_test,
.get_strings = nfp_net_get_strings,
.get_ethtool_stats = nfp_net_get_stats,
.get_sset_count = nfp_net_get_sset_count,
@@ -1492,6 +1709,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .get_pauseparam = nfp_port_get_pauseparam,
+ .set_phys_id = nfp_net_set_phys_id,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
@@ -1499,6 +1718,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
+ .self_test = nfp_net_self_test,
.get_sset_count = nfp_port_get_sset_count,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
@@ -1509,6 +1729,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .get_pauseparam = nfp_port_get_pauseparam,
+ .set_phys_id = nfp_net_set_phys_id,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 75b5018f2e1b..8b77582bdfa0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -365,9 +365,9 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->vlan_features = netdev->hw_features;
- if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN)
+ if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN_ANY)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
if (repr_cap & NFP_NET_CFG_CTRL_LSO2)
netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
else
@@ -375,11 +375,15 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
}
if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (repr_cap & NFP_NET_CFG_CTRL_RXQINQ)
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
netdev->features = netdev->hw_features;
- /* Advertise but disable TSO by default. */
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ /* C-Tag strip and S-Tag strip can't be supported simultaneously,
+ * so enable C-Tag strip and disable S-Tag strip by default.
+ */
+ netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
index 86829446c637..aea507aed49d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
@@ -70,8 +70,12 @@ void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp);
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
- rx_ring->xsk_rxbufs[wr_idx].dma_addr);
+ /* DMA address is expanded to 48-bit width in freelist for NFP3800,
+ * so the *_48b macro is used accordingly, it's also OK to fill
+ * a 40-bit address since the top 8 bits are get set to 0.
+ */
+ nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
+ rx_ring->xsk_rxbufs[wr_idx].dma_addr);
rx_ring->wr_p++;
wr_ptr_add++;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
index afab6f0fc564..6ad43c7cefe6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
@@ -4,7 +4,6 @@
#ifndef NFP_CRC32_H
#define NFP_CRC32_H
-#include <linux/kernel.h>
#include <linux/crc32.h>
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index ddb34bfb9bef..3d379e937184 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -13,36 +13,22 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/sizes.h>
-#include <linux/stringify.h>
#ifndef NFP_SUBSYS
#define NFP_SUBSYS "nfp"
#endif
-#define string_format(x) __FILE__ ":" __stringify(__LINE__) ": " x
-
-#define __nfp_err(cpp, fmt, args...) \
- dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define __nfp_warn(cpp, fmt, args...) \
- dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define __nfp_info(cpp, fmt, args...) \
- dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define __nfp_dbg(cpp, fmt, args...) \
- dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define __nfp_printk(level, cpp, fmt, args...) \
- dev_printk(level, nfp_cpp_device(cpp)->parent, \
- NFP_SUBSYS ": " fmt, ## args)
-
#define nfp_err(cpp, fmt, args...) \
- __nfp_err(cpp, string_format(fmt), ## args)
+ dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_warn(cpp, fmt, args...) \
- __nfp_warn(cpp, string_format(fmt), ## args)
+ dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_info(cpp, fmt, args...) \
- __nfp_info(cpp, string_format(fmt), ## args)
+ dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_dbg(cpp, fmt, args...) \
- __nfp_dbg(cpp, string_format(fmt), ## args)
+ dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_printk(level, cpp, fmt, args...) \
- __nfp_printk(level, cpp, string_format(fmt), ## args)
+ dev_printk(level, nfp_cpp_device(cpp)->parent, \
+ NFP_SUBSYS ": " fmt, ## args)
#define PCI_64BIT_BAR_COUNT 3
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 34c0d2ddf9ef..a8286d0032d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -874,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
}
/* Adjust the start address to be cache size aligned */
- cache->id = id;
cache->addr = addr & ~(u64)(cache->size - 1);
/* Re-init to the new ID and address */
@@ -894,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
return NULL;
}
+ cache->id = id;
+
exit:
/* Adjust offset */
*offset = addr - cache->addr;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
index 28384d6d1c6f..0725b51c2a95 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
@@ -9,7 +9,7 @@
const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = {
[NFP_DEV_NFP3800] = {
- .dma_mask = DMA_BIT_MASK(40),
+ .dma_mask = DMA_BIT_MASK(48),
.qc_idx_mask = GENMASK(8, 0),
.qc_addr_offset = 0x400000,
.min_qc_size = 512,
@@ -21,7 +21,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = {
.qc_area_sz = 0x100000,
},
[NFP_DEV_NFP3800_VF] = {
- .dma_mask = DMA_BIT_MASK(40),
+ .dma_mask = DMA_BIT_MASK(48),
.qc_idx_mask = GENMASK(8, 0),
.qc_addr_offset = 0,
.min_qc_size = 512,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index f5360bae6f75..77d66855be42 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -196,6 +196,8 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
int
nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state);
+
static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
{
return !!eth_port->fec_modes_supported;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 311a5be25acb..edd300033735 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -49,6 +49,7 @@
#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
+#define NSP_ETH_CTRL_SET_IDMODE BIT_ULL(8)
enum nfp_eth_raw {
NSP_ETH_RAW_PORT = 0,
@@ -492,6 +493,35 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
return 0;
}
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ u64 reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ /* Set this features were added in ABI 0.32 */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
+ nfp_err(nfp_nsp_cpp(nsp),
+ "set id mode operation not supported, please update flash\n");
+ return -EOPNOTSUPP;
+ }
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ reg = le64_to_cpu(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_SET_IDMODE;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, state);
+ entries[idx].control = cpu_to_le64(reg);
+
+ nfp_nsp_config_set_modified(nsp, true);
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \
({ \
__BF_FIELD_CHECK(mask, 0ULL, val, "NFP_ETH_SET_BIT_CONFIG: "); \
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index f3568901eb91..1443f788ee37 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1437,7 +1437,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
- if ((vlan_flags & features) &&
+ if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
!(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index f54035455ad6..c03986bf2628 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -947,10 +947,9 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
}
if (encap)
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
tso_rem = len;
seg_rem = min(tso_rem, hdrlen + mss);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 07dd3c3b1771..4e6f00af17d9 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1877,7 +1877,7 @@ netxen_tso_check(struct net_device *netdev,
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
skb_shinfo(skb)->gso_size > 0) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 672480c9d195..d61cd32ec3b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -412,7 +412,7 @@ static int qed_llh_alloc(struct qed_dev *cdev)
continue;
p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
- DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %hhd\n",
+ DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %u\n",
p_llh_info->num_ppfid, i);
p_llh_info->num_ppfid++;
}
@@ -626,7 +626,7 @@ static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid)
if (ppfid >= p_llh_info->num_ppfid) {
DP_NOTICE(cdev,
- "ppfid %d is not valid, available indices are 0..%hhd\n",
+ "ppfid %d is not valid, available indices are 0..%d\n",
ppfid, p_llh_info->num_ppfid - 1);
*p_abs_ppfid = 0;
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 82e74f62b677..d701ecd3ba00 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1110,7 +1110,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
bit_len);
/* Some bits represent more than a
- * a single interrupt. Correctly print
+ * single interrupt. Correctly print
* their name.
*/
if (ATTENTION_LENGTH(flags) > 2 ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 69b0ede75cae..5a5dbbb8d8aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -42,8 +42,7 @@ int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
bmap->max_count = max_count;
- bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
- GFP_KERNEL);
+ bmap->bitmap = bitmap_zalloc(max_count, GFP_KERNEL);
if (!bmap->bitmap)
return -ENOMEM;
@@ -107,7 +106,7 @@ int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
static bool qed_bmap_is_empty(struct qed_bmap *bmap)
{
- return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
+ return bitmap_empty(bmap->bitmap, bmap->max_count);
}
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
@@ -343,7 +342,7 @@ void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
}
end:
- kfree(bmap->bitmap);
+ bitmap_free(bmap->bitmap);
bmap->bitmap = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index b7cc36589f59..7c2af482192d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -260,11 +260,9 @@ static int map_frag_to_bd(struct qede_tx_queue *txq,
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
{
if (is_encap_pkt)
- return (skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data);
- else
- return (skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 8d43ca282956..9da5e97f8a0a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -497,7 +497,7 @@ set_flags:
}
opcode = QLCNIC_TX_ETHER_PKT;
if (skb_is_gso(skb)) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->hdr_length = hdr_len;
opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index e90fa97c0ae6..8dd7aa08ecfb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1869,8 +1869,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
if (!min_tx_rate)
min_tx_rate = QLC_VF_MIN_TX_RATE;
- if (max_tx_rate &&
- (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
+ if (max_tx_rate && max_tx_rate >= 10000) {
netdev_err(netdev,
"Invalid max Tx rate, allowed range is [%d - %d]",
min_tx_rate, QLC_VF_MAX_TX_RATE);
@@ -1880,8 +1879,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
if (!max_tx_rate)
max_tx_rate = 10000;
- if (min_tx_rate &&
- (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+ if (min_tx_rate && min_tx_rate < QLC_VF_MIN_TX_RATE) {
netdev_err(netdev,
"Invalid min Tx rate, allowed range is [%d - %d]",
QLC_VF_MIN_TX_RATE, max_tx_rate);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 06104d2ff5b3..0d80447d4d3b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1264,7 +1264,7 @@ static int emac_tso_csum(struct emac_adapter *adpt,
pskb_trim(skb, pkt_len);
}
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* we only need to do csum */
netif_warn(adpt, tx_err, adpt->netdev,
@@ -1339,7 +1339,7 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
/* if Large Segment Offload is (in TCP Segmentation Offload struct) */
if (TPD_LSO(tpd)) {
- mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mapped_len = skb_tcp_all_headers(skb);
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
tpbuf->length = mapped_len;
@@ -1465,7 +1465,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
/* Make sure the are enough free descriptors to hold one
* maximum-sized SKB. We need one desc for each fragment,
* one for the checksum (emac_tso_csum), one for TSO, and
- * and one for the SKB header.
+ * one for the SKB header.
*/
if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3))
netif_stop_queue(adpt->netdev);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 407a1f8e3059..a1c10b61269b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -89,7 +89,7 @@ static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
{
- /* Exit and disable EEE in case of we are are in LPI state. */
+ /* Exit and disable EEE in case of we are in LPI state. */
priv->hw->mac->reset_eee_mode(priv->ioaddr);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index b9298031ea51..bb06fa228367 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -8,7 +8,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
-sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o
+sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
+ mae.o tc.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 186cb28c03bd..ee734b69150f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1932,7 +1932,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
efx_update_sw_stats(efx, stats);
out:
+ /* releasing a DMA coherent buffer with BH disabled can panic */
+ spin_unlock_bh(&efx->stats_lock);
efx_nic_free_buffer(efx, &stats_buf);
+ spin_lock_bh(&efx->stats_lock);
return rc;
}
@@ -2535,23 +2538,33 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
if (rc)
return rc;
+ down_write(&efx->filter_sem);
rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
if (rc)
- return rc;
+ goto out_unlock;
list_for_each_entry(vlan, &nic_data->vlan_list, list) {
rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
if (rc)
goto fail_add_vlan;
}
- return 0;
+ goto out_unlock;
fail_add_vlan:
efx_mcdi_filter_table_remove(efx);
+out_unlock:
+ up_write(&efx->filter_sem);
return rc;
}
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+}
+
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -3208,9 +3221,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
- up_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
if (rc)
@@ -3240,9 +3251,7 @@ restore_vadaptor:
if (rc2)
goto reset_nic;
restore_filters:
- down_write(&efx->filter_sem);
rc2 = efx_ef10_filter_table_probe(efx);
- up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -3272,8 +3281,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_net_stop(efx->net_dev);
mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
+ efx_ef10_filter_table_remove(efx);
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
@@ -3283,7 +3291,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
sizeof(inbuf), NULL, 0, NULL);
efx_ef10_filter_table_probe(efx);
- up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
if (was_enabled)
@@ -3874,7 +3881,7 @@ static int efx_ef10_udp_tnl_set_port(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
struct efx_ef10_nic_data *nic_data;
int efx_tunnel_type, rc;
@@ -3934,7 +3941,7 @@ static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
struct efx_ef10_nic_data *nic_data;
int rc;
@@ -4089,7 +4096,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe,
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index 173f0ecebc70..71aab3d0480f 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -423,65 +423,61 @@ static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx,
*/
static void ef100_pci_remove(struct pci_dev *pci_dev)
{
- struct efx_nic *efx;
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+ struct efx_probe_data *probe_data;
- efx = pci_get_drvdata(pci_dev);
if (!efx)
return;
- rtnl_lock();
- dev_close(efx->net_dev);
- rtnl_unlock();
-
- /* Unregistering our netdev notifier triggers unbinding of TC indirect
- * blocks, so we have to do it before PCI removal.
- */
- unregister_netdevice_notifier(&efx->netdev_notifier);
-#if defined(CONFIG_SFC_SRIOV)
- if (!efx->type->is_vf)
- efx_ef100_pci_sriov_disable(efx);
+ probe_data = container_of(efx, struct efx_probe_data, efx);
+ ef100_remove_netdev(probe_data);
+#ifdef CONFIG_SFC_SRIOV
+ efx_fini_struct_tc(efx);
#endif
+
ef100_remove(efx);
efx_fini_io(efx);
- netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
- pci_set_drvdata(pci_dev, NULL);
- efx_fini_struct(efx);
- free_netdev(efx->net_dev);
+ pci_dbg(pci_dev, "shutdown successful\n");
pci_disable_pcie_error_reporting(pci_dev);
+
+ pci_set_drvdata(pci_dev, NULL);
+ efx_fini_struct(efx);
+ kfree(probe_data);
};
static int ef100_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
struct ef100_func_ctl_window fcw = { 0 };
- struct net_device *net_dev;
+ struct efx_probe_data *probe_data;
struct efx_nic *efx;
int rc;
- /* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
- if (!net_dev)
+ /* Allocate probe data and struct efx_nic */
+ probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
+ if (!probe_data)
return -ENOMEM;
- efx = netdev_priv(net_dev);
+ probe_data->pci_dev = pci_dev;
+ efx = &probe_data->efx;
+
efx->type = (const struct efx_nic_type *)entry->driver_data;
+ efx->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, efx);
- SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail;
efx->vi_stride = EF100_DEFAULT_VI_STRIDE;
- netif_info(efx, probe, efx->net_dev,
- "Solarflare EF100 NIC detected\n");
+ pci_info(pci_dev, "Solarflare EF100 NIC detected\n");
rc = ef100_pci_find_func_ctrl_window(efx, &fcw);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Error looking for ef100 function control window, rc=%d\n",
- rc);
+ pci_err(pci_dev,
+ "Error looking for ef100 function control window, rc=%d\n",
+ rc);
goto fail;
}
@@ -493,8 +489,7 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
}
if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) {
- netif_err(efx, probe, efx->net_dev,
- "Func control window overruns BAR\n");
+ pci_err(pci_dev, "Func control window overruns BAR\n");
rc = -EIO;
goto fail;
}
@@ -508,19 +503,16 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
efx->reg_base = fcw.offset;
- efx->netdev_notifier.notifier_call = ef100_netdev_event;
- rc = register_netdevice_notifier(&efx->netdev_notifier);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Failed to register netdevice notifier, rc=%d\n", rc);
+ rc = efx->type->probe(efx);
+ if (rc)
goto fail;
- }
- rc = efx->type->probe(efx);
+ efx->state = STATE_PROBED;
+ rc = ef100_probe_netdev(probe_data);
if (rc)
goto fail;
- netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+ pci_dbg(pci_dev, "initialisation successful\n");
return 0;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 5dba4125d953..702abbe59b76 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -26,7 +26,7 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
ring->rx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 67fe44db6b61..17b9d37218cb 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -22,6 +22,7 @@
#include "ef100_regs.h"
#include "mcdi_filters.h"
#include "rx_common.h"
+#include "ef100_sriov.h"
static void ef100_update_name(struct efx_nic *efx)
{
@@ -79,11 +80,12 @@ static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
*/
static int ef100_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
+ efx_detach_reps(efx);
netif_stop_queue(net_dev);
efx_stop_all(efx);
efx_mcdi_mac_fini_stats(efx);
@@ -96,13 +98,15 @@ static int ef100_net_stop(struct net_device *net_dev)
efx_mcdi_free_vis(efx);
efx_remove_interrupts(efx);
+ efx->state = STATE_NET_DOWN;
+
return 0;
}
/* Context: process, rtnl_lock() held. */
static int ef100_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
unsigned int allocated_vis;
int rc;
@@ -172,6 +176,10 @@ static int ef100_net_open(struct net_device *net_dev)
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
+ efx->state = STATE_NET_UP;
+ if (netif_running(efx->net_dev))
+ efx_attach_reps(efx);
+
return 0;
fail:
@@ -189,7 +197,16 @@ fail:
static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
+}
+
+netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
+ struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct efx_rep *efv)
+{
struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
int rc;
@@ -204,7 +221,7 @@ static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
}
tx_queue = &channel->tx_queue[0];
- rc = ef100_enqueue_skb(tx_queue, skb);
+ rc = __ef100_enqueue_skb(tx_queue, skb, efv);
if (rc == 0)
return NETDEV_TX_OK;
@@ -239,13 +256,14 @@ int ef100_netdev_event(struct notifier_block *this,
struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if (netdev_priv(net_dev) == efx && event == NETDEV_CHANGENAME)
+ if (efx->net_dev == net_dev &&
+ (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
ef100_update_name(efx);
return NOTIFY_DONE;
}
-int ef100_register_netdev(struct efx_nic *efx)
+static int ef100_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
int rc;
@@ -271,7 +289,7 @@ int ef100_register_netdev(struct efx_nic *efx)
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
- efx->state = STATE_READY;
+ efx->state = STATE_NET_DOWN;
rtnl_unlock();
efx_init_mcdi_logging(efx);
@@ -283,11 +301,123 @@ fail_locked:
return rc;
}
-void ef100_unregister_netdev(struct efx_nic *efx)
+static void ef100_unregister_netdev(struct efx_nic *efx)
{
if (efx_dev_registered(efx)) {
efx_fini_mcdi_logging(efx);
- efx->state = STATE_UNINIT;
+ efx->state = STATE_PROBED;
unregister_netdev(efx->net_dev);
}
}
+
+void ef100_remove_netdev(struct efx_probe_data *probe_data)
+{
+ struct efx_nic *efx = &probe_data->efx;
+
+ if (!efx->net_dev)
+ return;
+
+ rtnl_lock();
+ dev_close(efx->net_dev);
+ rtnl_unlock();
+
+ unregister_netdevice_notifier(&efx->netdev_notifier);
+#if defined(CONFIG_SFC_SRIOV)
+ if (!efx->type->is_vf)
+ efx_ef100_pci_sriov_disable(efx, true);
+#endif
+
+ ef100_unregister_netdev(efx);
+
+#ifdef CONFIG_SFC_SRIOV
+ efx_fini_tc(efx);
+#endif
+
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+ efx_fini_channels(efx);
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+
+ free_netdev(efx->net_dev);
+ efx->net_dev = NULL;
+ efx->state = STATE_PROBED;
+}
+
+int ef100_probe_netdev(struct efx_probe_data *probe_data)
+{
+ struct efx_nic *efx = &probe_data->efx;
+ struct efx_probe_data **probe_ptr;
+ struct net_device *net_dev;
+ int rc;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
+ pci_info(efx->pci_dev, "No network port on this PCI function");
+ return 0;
+ }
+
+ /* Allocate and initialise a struct net_device */
+ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ probe_ptr = netdev_priv(net_dev);
+ *probe_ptr = probe_data;
+ efx->net_dev = net_dev;
+ SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
+
+ net_dev->features |= efx->type->offload_features;
+ net_dev->hw_features |= efx->type->offload_features;
+ net_dev->hw_enc_features |= efx->type->offload_features;
+ net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
+ netif_set_tso_max_segs(net_dev,
+ ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
+ efx->mdio.dev = net_dev;
+
+ rc = efx_ef100_init_datapath_caps(efx);
+ if (rc < 0)
+ goto fail;
+
+ rc = ef100_phy_probe(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ down_write(&efx->filter_sem);
+ rc = ef100_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc)
+ goto fail;
+
+ netdev_rss_key_fill(efx->rss_context.rx_hash_key,
+ sizeof(efx->rss_context.rx_hash_key));
+
+ /* Don't fail init if RSS setup doesn't work. */
+ efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
+
+ rc = ef100_register_netdev(efx);
+ if (rc)
+ goto fail;
+
+ if (!efx->type->is_vf) {
+ rc = ef100_probe_netdev_pf(efx);
+ if (rc)
+ goto fail;
+ }
+
+ efx->netdev_notifier.notifier_call = ef100_netdev_event;
+ rc = register_netdevice_notifier(&efx->netdev_notifier);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to register netdevice notifier, rc=%d\n", rc);
+ goto fail;
+ }
+
+fail:
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.h b/drivers/net/ethernet/sfc/ef100_netdev.h
index d40abb7cc086..86bf985e0951 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.h
+++ b/drivers/net/ethernet/sfc/ef100_netdev.h
@@ -10,8 +10,13 @@
*/
#include <linux/netdevice.h>
+#include "ef100_rep.h"
+netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
+ struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct efx_rep *efv);
int ef100_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
-int ef100_register_netdev(struct efx_nic *efx);
-void ef100_unregister_netdev(struct efx_nic *efx);
+int ef100_probe_netdev(struct efx_probe_data *probe_data);
+void ef100_remove_netdev(struct efx_probe_data *probe_data);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index b2536d2c218a..8061efdaf82c 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -24,6 +24,8 @@
#include "ef100_tx.h"
#include "ef100_sriov.h"
#include "ef100_netdev.h"
+#include "tc.h"
+#include "mae.h"
#include "rx_common.h"
#define EF100_MAX_VIS 4096
@@ -148,7 +150,7 @@ static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
return 0;
}
-static int efx_ef100_init_datapath_caps(struct efx_nic *efx)
+int efx_ef100_init_datapath_caps(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
struct ef100_nic_data *nic_data = efx->nic_data;
@@ -327,7 +329,7 @@ static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ef100_phy_probe(struct efx_nic *efx)
+int ef100_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
int rc;
@@ -365,7 +367,7 @@ static int ef100_phy_probe(struct efx_nic *efx)
return 0;
}
-static int ef100_filter_table_probe(struct efx_nic *efx)
+int ef100_filter_table_probe(struct efx_nic *efx)
{
return efx_mcdi_filter_table_probe(efx, true);
}
@@ -374,26 +376,46 @@ static int ef100_filter_table_up(struct efx_nic *efx)
{
int rc;
+ down_write(&efx->filter_sem);
rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
- if (rc) {
- efx_mcdi_filter_table_down(efx);
- return rc;
- }
+ if (rc)
+ goto fail_unspec;
rc = efx_mcdi_filter_add_vlan(efx, 0);
- if (rc) {
- efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
- efx_mcdi_filter_table_down(efx);
- }
+ if (rc)
+ goto fail_vlan0;
+ /* Drop the lock: we've finished altering table existence, and
+ * filter insertion will need to take the lock for read.
+ */
+ up_write(&efx->filter_sem);
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx_tc_insert_rep_filters(efx);
+ /* Rep filter failure is nonfatal */
+ if (rc)
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to insert representor filters, rc %d\n",
+ rc);
+#endif
+ return 0;
+fail_vlan0:
+ efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
+fail_unspec:
+ efx_mcdi_filter_table_down(efx);
+ up_write(&efx->filter_sem);
return rc;
}
static void ef100_filter_table_down(struct efx_nic *efx)
{
+#ifdef CONFIG_SFC_SRIOV
+ efx_tc_remove_rep_filters(efx);
+#endif
+ down_write(&efx->filter_sem);
efx_mcdi_filter_del_vlan(efx, 0);
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
efx_mcdi_filter_table_down(efx);
+ up_write(&efx->filter_sem);
}
/* Other
@@ -704,178 +726,31 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
return 10 * EFX_RECYCLE_RING_SIZE_10G;
}
-/* NIC level access functions
- */
-#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
- NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
- NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
- NETIF_F_HW_VLAN_CTAG_TX)
-
-const struct efx_nic_type ef100_pf_nic_type = {
- .revision = EFX_REV_EF100,
- .is_vf = false,
- .probe = ef100_probe_pf,
- .offload_features = EF100_OFFLOAD_FEATURES,
- .mcdi_max_ver = 2,
- .mcdi_request = ef100_mcdi_request,
- .mcdi_poll_response = ef100_mcdi_poll_response,
- .mcdi_read_response = ef100_mcdi_read_response,
- .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
- .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
- .irq_enable_master = efx_port_dummy_op_void,
- .irq_test_generate = efx_ef100_irq_test_generate,
- .irq_disable_non_ev = efx_port_dummy_op_void,
- .push_irq_moderation = efx_channel_dummy_op_void,
- .min_interrupt_mode = EFX_INT_MODE_MSIX,
- .map_reset_reason = ef100_map_reset_reason,
- .map_reset_flags = ef100_map_reset_flags,
- .reset = ef100_reset,
-
- .check_caps = ef100_check_caps,
-
- .ev_probe = ef100_ev_probe,
- .ev_init = ef100_ev_init,
- .ev_fini = efx_mcdi_ev_fini,
- .ev_remove = efx_mcdi_ev_remove,
- .irq_handle_msi = ef100_msi_interrupt,
- .ev_process = ef100_ev_process,
- .ev_read_ack = ef100_ev_read_ack,
- .ev_test_generate = efx_ef100_ev_test_generate,
- .tx_probe = ef100_tx_probe,
- .tx_init = ef100_tx_init,
- .tx_write = ef100_tx_write,
- .tx_enqueue = ef100_enqueue_skb,
- .rx_probe = efx_mcdi_rx_probe,
- .rx_init = efx_mcdi_rx_init,
- .rx_remove = efx_mcdi_rx_remove,
- .rx_write = ef100_rx_write,
- .rx_packet = __ef100_rx_packet,
- .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
- .fini_dmaq = efx_fini_dmaq,
- .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
- .filter_table_probe = ef100_filter_table_up,
- .filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = ef100_filter_table_down,
- .filter_insert = efx_mcdi_filter_insert,
- .filter_remove_safe = efx_mcdi_filter_remove_safe,
- .filter_get_safe = efx_mcdi_filter_get_safe,
- .filter_clear_rx = efx_mcdi_filter_clear_rx,
- .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
-#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
-#endif
-
- .get_phys_port_id = efx_ef100_get_phys_port_id,
-
- .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
- .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
- .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
- .rx_hash_key_size = 40,
- .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
- .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
- .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
- .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
- .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
- .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
-
- .reconfigure_mac = ef100_reconfigure_mac,
- .reconfigure_port = efx_mcdi_port_reconfigure,
- .test_nvram = efx_new_mcdi_nvram_test_all,
- .describe_stats = ef100_describe_stats,
- .start_stats = efx_mcdi_mac_start_stats,
- .update_stats = ef100_update_stats,
- .pull_stats = efx_mcdi_mac_pull_stats,
- .stop_stats = efx_mcdi_mac_stop_stats,
#ifdef CONFIG_SFC_SRIOV
- .sriov_configure = efx_ef100_sriov_configure,
-#endif
+static int efx_ef100_get_base_mport(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ u32 selector, id;
+ int rc;
- /* Per-type bar/size configuration not used on ef100. Location of
- * registers is defined by extended capabilities.
+ /* Construct mport selector for "physical network port" */
+ efx_mae_mport_wire(efx, &selector);
+ /* Look up actual mport ID */
+ rc = efx_mae_lookup_mport(efx, selector, &id);
+ if (rc)
+ return rc;
+ /* The ID should always fit in 16 bits, because that's how wide the
+ * corresponding fields in the RX prefix & TX override descriptor are
*/
- .mem_bar = NULL,
- .mem_map_size = NULL,
-
-};
-
-const struct efx_nic_type ef100_vf_nic_type = {
- .revision = EFX_REV_EF100,
- .is_vf = true,
- .probe = ef100_probe_vf,
- .offload_features = EF100_OFFLOAD_FEATURES,
- .mcdi_max_ver = 2,
- .mcdi_request = ef100_mcdi_request,
- .mcdi_poll_response = ef100_mcdi_poll_response,
- .mcdi_read_response = ef100_mcdi_read_response,
- .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
- .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
- .irq_enable_master = efx_port_dummy_op_void,
- .irq_test_generate = efx_ef100_irq_test_generate,
- .irq_disable_non_ev = efx_port_dummy_op_void,
- .push_irq_moderation = efx_channel_dummy_op_void,
- .min_interrupt_mode = EFX_INT_MODE_MSIX,
- .map_reset_reason = ef100_map_reset_reason,
- .map_reset_flags = ef100_map_reset_flags,
- .reset = ef100_reset,
- .check_caps = ef100_check_caps,
- .ev_probe = ef100_ev_probe,
- .ev_init = ef100_ev_init,
- .ev_fini = efx_mcdi_ev_fini,
- .ev_remove = efx_mcdi_ev_remove,
- .irq_handle_msi = ef100_msi_interrupt,
- .ev_process = ef100_ev_process,
- .ev_read_ack = ef100_ev_read_ack,
- .ev_test_generate = efx_ef100_ev_test_generate,
- .tx_probe = ef100_tx_probe,
- .tx_init = ef100_tx_init,
- .tx_write = ef100_tx_write,
- .tx_enqueue = ef100_enqueue_skb,
- .rx_probe = efx_mcdi_rx_probe,
- .rx_init = efx_mcdi_rx_init,
- .rx_remove = efx_mcdi_rx_remove,
- .rx_write = ef100_rx_write,
- .rx_packet = __ef100_rx_packet,
- .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
- .fini_dmaq = efx_fini_dmaq,
- .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
- .filter_table_probe = ef100_filter_table_up,
- .filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = ef100_filter_table_down,
- .filter_insert = efx_mcdi_filter_insert,
- .filter_remove_safe = efx_mcdi_filter_remove_safe,
- .filter_get_safe = efx_mcdi_filter_get_safe,
- .filter_clear_rx = efx_mcdi_filter_clear_rx,
- .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
-#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+ if (id >> 16)
+ netif_warn(efx, probe, efx->net_dev, "Bad base m-port id %#x\n",
+ id);
+ nic_data->base_mport = id;
+ nic_data->have_mport = true;
+ return 0;
+}
#endif
- .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
- .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
- .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
- .rx_hash_key_size = 40,
- .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
- .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
- .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
- .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
-
- .reconfigure_mac = ef100_reconfigure_mac,
- .test_nvram = efx_new_mcdi_nvram_test_all,
- .describe_stats = ef100_describe_stats,
- .start_stats = efx_mcdi_mac_start_stats,
- .update_stats = ef100_update_stats,
- .pull_stats = efx_mcdi_mac_pull_stats,
- .stop_stats = efx_mcdi_mac_stop_stats,
-
- .mem_bar = NULL,
- .mem_map_size = NULL,
-
-};
-
static int compare_versions(const char *a, const char *b)
{
int a_major, a_minor, a_point, a_patch;
@@ -1077,8 +952,7 @@ static int ef100_check_design_params(struct efx_nic *efx)
efx_readd(efx, &reg, ER_GZ_PARAMS_TLV_LEN);
total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
- netif_dbg(efx, probe, efx->net_dev, "%u bytes of design parameters\n",
- total_len);
+ pci_dbg(efx->pci_dev, "%u bytes of design parameters\n", total_len);
while (offset < total_len) {
efx_readd(efx, &reg, ER_GZ_PARAMS_TLV + offset);
data = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
@@ -1117,9 +991,9 @@ out:
static int ef100_probe_main(struct efx_nic *efx)
{
unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
- struct net_device *net_dev = efx->net_dev;
struct ef100_nic_data *nic_data;
char fw_version[32];
+ u32 priv_mask = 0;
int i, rc;
if (WARN_ON(bar_size == 0))
@@ -1130,24 +1004,18 @@ static int ef100_probe_main(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
nic_data->efx = efx;
- net_dev->features |= efx->type->offload_features;
- net_dev->hw_features |= efx->type->offload_features;
- net_dev->hw_enc_features |= efx->type->offload_features;
- net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
+ efx->max_vis = EF100_MAX_VIS;
/* Populate design-parameter defaults */
nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
- netif_set_tso_max_segs(net_dev,
- ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
+
/* Read design parameters */
rc = ef100_check_design_params(efx);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unsupported design parameters\n");
+ pci_err(efx->pci_dev, "Unsupported design parameters\n");
goto fail;
}
@@ -1184,12 +1052,6 @@ static int ef100_probe_main(struct efx_nic *efx)
/* Post-IO section. */
rc = efx_mcdi_init(efx);
- if (!rc && efx->mcdi->fn_flags &
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
- netif_info(efx, probe, efx->net_dev,
- "No network port on this PCI function");
- rc = -ENODEV;
- }
if (rc)
goto fail;
/* Reset (most) configuration for this function */
@@ -1205,67 +1067,43 @@ static int ef100_probe_main(struct efx_nic *efx)
if (rc)
goto fail;
- rc = efx_ef100_init_datapath_caps(efx);
- if (rc < 0)
- goto fail;
-
- efx->max_vis = EF100_MAX_VIS;
-
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
goto fail;
efx->port_num = rc;
efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version));
- netif_dbg(efx, drv, efx->net_dev, "Firmware version %s\n", fw_version);
+ pci_dbg(efx->pci_dev, "Firmware version %s\n", fw_version);
+
+ rc = efx_mcdi_get_privilege_mask(efx, &priv_mask);
+ if (rc) /* non-fatal, and priv_mask will still be 0 */
+ pci_info(efx->pci_dev,
+ "Failed to get privilege mask from FW, rc %d\n", rc);
+ nic_data->grp_mae = !!(priv_mask & MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE);
if (compare_versions(fw_version, "1.1.0.1000") < 0) {
- netif_info(efx, drv, efx->net_dev, "Firmware uses old event descriptors\n");
+ pci_info(efx->pci_dev, "Firmware uses old event descriptors\n");
rc = -EINVAL;
goto fail;
}
if (efx_has_cap(efx, UNSOL_EV_CREDIT_SUPPORTED)) {
- netif_info(efx, drv, efx->net_dev, "Firmware uses unsolicited-event credits\n");
+ pci_info(efx->pci_dev, "Firmware uses unsolicited-event credits\n");
rc = -EINVAL;
goto fail;
}
- rc = ef100_phy_probe(efx);
- if (rc)
- goto fail;
-
- down_write(&efx->filter_sem);
- rc = ef100_filter_table_probe(efx);
- up_write(&efx->filter_sem);
- if (rc)
- goto fail;
-
- netdev_rss_key_fill(efx->rss_context.rx_hash_key,
- sizeof(efx->rss_context.rx_hash_key));
-
- /* Don't fail init if RSS setup doesn't work. */
- efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
-
- rc = ef100_register_netdev(efx);
- if (rc)
- goto fail;
-
return 0;
fail:
return rc;
}
-int ef100_probe_pf(struct efx_nic *efx)
+int ef100_probe_netdev_pf(struct efx_nic *efx)
{
+ struct ef100_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
- struct ef100_nic_data *nic_data;
- int rc = ef100_probe_main(efx);
-
- if (rc)
- goto fail;
+ int rc;
- nic_data = efx->nic_data;
rc = ef100_get_mac_address(efx, net_dev->perm_addr);
if (rc)
goto fail;
@@ -1273,6 +1111,34 @@ int ef100_probe_pf(struct efx_nic *efx)
eth_hw_addr_set(net_dev, net_dev->perm_addr);
memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
+ if (!nic_data->grp_mae)
+ return 0;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx_init_struct_tc(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ef100_get_base_mport(efx);
+ if (rc) {
+ netif_warn(efx, probe, net_dev,
+ "Failed to probe base mport rc %d; representors will not function\n",
+ rc);
+ }
+
+ rc = efx_init_tc(efx);
+ if (rc) {
+ /* Either we don't have an MAE at all (i.e. legacy v-switching),
+ * or we do but we failed to probe it. In the latter case, we
+ * may not have set up default rules, in which case we won't be
+ * able to pass any traffic. However, we don't fail the probe,
+ * because the user might need to use the netdevice to apply
+ * configuration changes to fix whatever's wrong with the MAE.
+ */
+ netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
+ rc);
+ }
+#endif
return 0;
fail:
@@ -1288,14 +1154,6 @@ void ef100_remove(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
- ef100_unregister_netdev(efx);
-
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
- up_write(&efx->filter_sem);
- efx_fini_channels(efx);
- kfree(efx->phy_data);
- efx->phy_data = NULL;
efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
if (nic_data)
@@ -1303,3 +1161,175 @@ void ef100_remove(struct efx_nic *efx)
kfree(nic_data);
efx->nic_data = NULL;
}
+
+/* NIC level access functions
+ */
+#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
+ NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
+ NETIF_F_HW_VLAN_CTAG_TX)
+
+const struct efx_nic_type ef100_pf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = false,
+ .probe = ef100_probe_main,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+
+ .check_caps = ef100_check_caps,
+
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+
+ .get_phys_port_id = efx_ef100_get_phys_port_id,
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_ef100_sriov_configure,
+#endif
+
+ /* Per-type bar/size configuration not used on ef100. Location of
+ * registers is defined by extended capabilities.
+ */
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
+
+const struct efx_nic_type ef100_vf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = true,
+ .probe = ef100_probe_vf,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+ .check_caps = ef100_check_caps,
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
index e799688d5264..0295933145fa 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.h
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -8,6 +8,8 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
+#ifndef EFX_EF100_NIC_H
+#define EFX_EF100_NIC_H
#include "net_driver.h"
#include "nic_common.h"
@@ -15,7 +17,7 @@
extern const struct efx_nic_type ef100_pf_nic_type;
extern const struct efx_nic_type ef100_vf_nic_type;
-int ef100_probe_pf(struct efx_nic *efx);
+int ef100_probe_netdev_pf(struct efx_nic *efx);
int ef100_probe_vf(struct efx_nic *efx);
void ef100_remove(struct efx_nic *efx);
@@ -70,6 +72,9 @@ struct ef100_nic_data {
u8 port_id[ETH_ALEN];
DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
u64 stats[EF100_STAT_COUNT];
+ u32 base_mport;
+ bool have_mport; /* base_mport was populated successfully */
+ bool grp_mae; /* MAE Privilege */
u16 tso_max_hdr_len;
u16 tso_max_payload_num_segs;
u16 tso_max_frames;
@@ -78,3 +83,9 @@ struct ef100_nic_data {
#define efx_ef100_has_cap(caps, flag) \
(!!((caps) & BIT_ULL(MC_CMD_GET_CAPABILITIES_V4_OUT_ ## flag ## _LBN)))
+
+int efx_ef100_init_datapath_caps(struct efx_nic *efx);
+int ef100_phy_probe(struct efx_nic *efx);
+int ef100_filter_table_probe(struct efx_nic *efx);
+
+#endif /* EFX_EF100_NIC_H */
diff --git a/drivers/net/ethernet/sfc/ef100_regs.h b/drivers/net/ethernet/sfc/ef100_regs.h
index 710bbdb19885..982b6ab1eb62 100644
--- a/drivers/net/ethernet/sfc/ef100_regs.h
+++ b/drivers/net/ethernet/sfc/ef100_regs.h
@@ -2,7 +2,7 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
- * Copyright 2019-2020 Xilinx Inc.
+ * Copyright 2019-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -181,12 +181,6 @@
/* RHEAD_BASE_EVENT */
#define ESF_GZ_E_TYPE_LBN 60
#define ESF_GZ_E_TYPE_WIDTH 4
-#define ESE_GZ_EF100_EV_DRIVER 5
-#define ESE_GZ_EF100_EV_MCDI 4
-#define ESE_GZ_EF100_EV_CONTROL 3
-#define ESE_GZ_EF100_EV_TX_TIMESTAMP 2
-#define ESE_GZ_EF100_EV_TX_COMPLETION 1
-#define ESE_GZ_EF100_EV_RX_PKTS 0
#define ESF_GZ_EV_EVQ_PHASE_LBN 59
#define ESF_GZ_EV_EVQ_PHASE_WIDTH 1
#define ESE_GZ_RHEAD_BASE_EVENT_STRUCT_SIZE 64
@@ -369,14 +363,18 @@
#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_WIDTH 16
#define ESF_GZ_RX_PREFIX_CSUM_FRAME_LBN 144
#define ESF_GZ_RX_PREFIX_CSUM_FRAME_WIDTH 16
-#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_LBN 128
-#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_WIDTH 16
+#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_LBN 128
+#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_WIDTH 16
#define ESF_GZ_RX_PREFIX_USER_MARK_LBN 96
#define ESF_GZ_RX_PREFIX_USER_MARK_WIDTH 32
#define ESF_GZ_RX_PREFIX_RSS_HASH_LBN 64
#define ESF_GZ_RX_PREFIX_RSS_HASH_WIDTH 32
-#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 32
-#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 32
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 34
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 30
+#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_LBN 33
+#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_WIDTH 1
+#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_LBN 32
+#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_WIDTH 1
#define ESF_GZ_RX_PREFIX_CLASS_LBN 16
#define ESF_GZ_RX_PREFIX_CLASS_WIDTH 16
#define ESF_GZ_RX_PREFIX_USER_FLAG_LBN 15
@@ -454,12 +452,8 @@
#define ESF_GZ_M2M_TRANSLATE_ADDR_WIDTH 1
#define ESF_GZ_M2M_RSVD_LBN 120
#define ESF_GZ_M2M_RSVD_WIDTH 2
-#define ESF_GZ_M2M_ADDR_SPC_LBN 108
-#define ESF_GZ_M2M_ADDR_SPC_WIDTH 12
-#define ESF_GZ_M2M_ADDR_SPC_PASID_LBN 86
-#define ESF_GZ_M2M_ADDR_SPC_PASID_WIDTH 22
-#define ESF_GZ_M2M_ADDR_SPC_MODE_LBN 84
-#define ESF_GZ_M2M_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_M2M_ADDR_SPC_ID_LBN 84
+#define ESF_GZ_M2M_ADDR_SPC_ID_WIDTH 36
#define ESF_GZ_M2M_LEN_MINUS_1_LBN 64
#define ESF_GZ_M2M_LEN_MINUS_1_WIDTH 20
#define ESF_GZ_M2M_ADDR_LBN 0
@@ -492,12 +486,8 @@
#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_WIDTH 1
#define ESF_GZ_TX_SEG_RSVD2_LBN 120
#define ESF_GZ_TX_SEG_RSVD2_WIDTH 2
-#define ESF_GZ_TX_SEG_ADDR_SPC_LBN 108
-#define ESF_GZ_TX_SEG_ADDR_SPC_WIDTH 12
-#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_LBN 86
-#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_WIDTH 22
-#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_LBN 84
-#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_TX_SEG_ADDR_SPC_ID_LBN 84
+#define ESF_GZ_TX_SEG_ADDR_SPC_ID_WIDTH 36
#define ESF_GZ_TX_SEG_RSVD_LBN 80
#define ESF_GZ_TX_SEG_RSVD_WIDTH 4
#define ESF_GZ_TX_SEG_LEN_LBN 64
@@ -583,6 +573,12 @@
#define ESE_GZ_SF_TX_TSO_DSC_FMT_STRUCT_SIZE 124
+/* Enum D2VIO_MSG_OP */
+#define ESE_GZ_QUE_JBDNE 3
+#define ESE_GZ_QUE_EVICT 2
+#define ESE_GZ_QUE_EMPTY 1
+#define ESE_GZ_NOP 0
+
/* Enum DESIGN_PARAMS */
#define ESE_EF100_DP_GZ_RX_MAX_RUNT 17
#define ESE_EF100_DP_GZ_VI_STRIDES 16
@@ -630,6 +626,19 @@
#define ESE_GZ_PCI_BASE_CONFIG_SPACE_SIZE 256
#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_SIZE 4
+/* Enum RH_DSC_TYPE */
+#define ESE_GZ_TX_TOMB 0xF
+#define ESE_GZ_TX_VIO 0xE
+#define ESE_GZ_TX_TSO_OVRRD 0x8
+#define ESE_GZ_TX_D2CMP 0x7
+#define ESE_GZ_TX_DATA 0x6
+#define ESE_GZ_TX_D2M 0x5
+#define ESE_GZ_TX_M2M 0x4
+#define ESE_GZ_TX_SEG 0x3
+#define ESE_GZ_TX_TSO 0x2
+#define ESE_GZ_TX_OVRRD 0x1
+#define ESE_GZ_TX_SEND 0x0
+
/* Enum RH_HCLASS_L2_CLASS */
#define ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN 1
#define ESE_GZ_RH_HCLASS_L2_CLASS_OTHER 0
@@ -666,6 +675,25 @@
#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN 1
#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE 0
+/* Enum SF_CTL_EVENT_SUBTYPE */
+#define ESE_GZ_EF100_CTL_EV_EVQ_TIMEOUT 0x3
+#define ESE_GZ_EF100_CTL_EV_FLUSH 0x2
+#define ESE_GZ_EF100_CTL_EV_TIME_SYNC 0x1
+#define ESE_GZ_EF100_CTL_EV_UNSOL_OVERFLOW 0x0
+
+/* Enum SF_EVENT_TYPE */
+#define ESE_GZ_EF100_EV_DRIVER 0x5
+#define ESE_GZ_EF100_EV_MCDI 0x4
+#define ESE_GZ_EF100_EV_CONTROL 0x3
+#define ESE_GZ_EF100_EV_TX_TIMESTAMP 0x2
+#define ESE_GZ_EF100_EV_TX_COMPLETION 0x1
+#define ESE_GZ_EF100_EV_RX_PKTS 0x0
+
+/* Enum SF_EW_EVENT_TYPE */
+#define ESE_GZ_EF100_EWEV_VIRTQ_DESC 0x2
+#define ESE_GZ_EF100_EWEV_TXQ_DESC 0x1
+#define ESE_GZ_EF100_EWEV_64BIT 0x0
+
/* Enum TX_DESC_CSO_PARTIAL_EN */
#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP 2
#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP 1
@@ -681,6 +709,15 @@
#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 2
#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD15 1
#define ESE_GZ_TX_DESC_IP4_ID_NO_OP 0
+
+/* Enum VIRTIO_NET_HDR_F */
+#define ESE_GZ_NEEDS_CSUM 0x1
+
+/* Enum VIRTIO_NET_HDR_GSO */
+#define ESE_GZ_TCPV6 0x4
+#define ESE_GZ_UDP 0x3
+#define ESE_GZ_TCPV4 0x1
+#define ESE_GZ_NONE 0x0
/**************************************************************************/
#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_LBN 44
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
new file mode 100644
index 000000000000..73ae4656a6e7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "ef100_rep.h"
+#include "ef100_netdev.h"
+#include "ef100_nic.h"
+#include "mae.h"
+#include "rx_common.h"
+
+#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
+
+#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
+
+static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
+ unsigned int i)
+{
+ efv->parent = efx;
+ efv->idx = i;
+ INIT_LIST_HEAD(&efv->list);
+ efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efv->dflt.acts.list);
+ INIT_LIST_HEAD(&efv->rx_list);
+ spin_lock_init(&efv->rx_lock);
+ efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW;
+ return 0;
+}
+
+static int efx_ef100_rep_open(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&efv->napi);
+ return 0;
+}
+
+static int efx_ef100_rep_close(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ napi_disable(&efv->napi);
+ netif_napi_del(&efv->napi);
+ return 0;
+}
+
+static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+ struct efx_nic *efx = efv->parent;
+ netdev_tx_t rc;
+
+ /* __ef100_hard_start_xmit() will always return success even in the
+ * case of TX drops, where it will increment efx's tx_dropped. The
+ * efv stats really only count attempted TX, not success/failure.
+ */
+ atomic64_inc(&efv->stats.tx_packets);
+ atomic64_add(skb->len, &efv->stats.tx_bytes);
+ netif_tx_lock(efx->net_dev);
+ rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
+ netif_tx_unlock(efx->net_dev);
+ return rc;
+}
+
+static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+ struct efx_nic *efx = efv->parent;
+ struct ef100_nic_data *nic_data;
+
+ nic_data = efx->nic_data;
+ /* nic_data->port_id is a u8[] */
+ ppid->id_len = sizeof(nic_data->port_id);
+ memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
+ return 0;
+}
+
+static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+ struct efx_nic *efx = efv->parent;
+ struct ef100_nic_data *nic_data;
+ int ret;
+
+ nic_data = efx->nic_data;
+ ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
+ nic_data->pf_index, efv->idx);
+ if (ret >= len)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void efx_ef100_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+
+ stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
+ stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
+ stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
+ stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
+ stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
+ stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
+}
+
+static const struct net_device_ops efx_ef100_rep_netdev_ops = {
+ .ndo_open = efx_ef100_rep_open,
+ .ndo_stop = efx_ef100_rep_close,
+ .ndo_start_xmit = efx_ef100_rep_xmit,
+ .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
+ .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
+ .ndo_get_stats64 = efx_ef100_rep_get_stats64,
+};
+
+static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
+}
+
+static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ return efv->msg_enable;
+}
+
+static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
+ u32 msg_enable)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ efv->msg_enable = msg_enable;
+}
+
+static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ ring->rx_max_pending = U32_MAX;
+ ring->rx_pending = efv->rx_pring_size;
+}
+
+static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
+ return -EINVAL;
+
+ efv->rx_pring_size = ring->rx_pending;
+ return 0;
+}
+
+static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
+ .get_drvinfo = efx_ef100_rep_get_drvinfo,
+ .get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
+ .set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
+ .get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
+ .set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
+};
+
+static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
+ unsigned int i)
+{
+ struct net_device *net_dev;
+ struct efx_rep *efv;
+ int rc;
+
+ net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
+ if (!net_dev)
+ return ERR_PTR(-ENOMEM);
+
+ efv = netdev_priv(net_dev);
+ rc = efx_ef100_rep_init_struct(efx, efv, i);
+ if (rc)
+ goto fail1;
+ efv->net_dev = net_dev;
+ rtnl_lock();
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_add_tail(&efv->list, &efx->vf_reps);
+ spin_unlock_bh(&efx->vf_reps_lock);
+ if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
+ netif_device_attach(net_dev);
+ netif_carrier_on(net_dev);
+ } else {
+ netif_carrier_off(net_dev);
+ netif_tx_stop_all_queues(net_dev);
+ }
+ rtnl_unlock();
+
+ net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
+ net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
+ net_dev->min_mtu = EFX_MIN_MTU;
+ net_dev->max_mtu = EFX_MAX_MTU;
+ net_dev->features |= NETIF_F_LLTX;
+ net_dev->hw_features |= NETIF_F_LLTX;
+ return efv;
+fail1:
+ free_netdev(net_dev);
+ return ERR_PTR(rc);
+}
+
+static int efx_ef100_configure_rep(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+ u32 selector;
+ int rc;
+
+ efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
+ /* Construct mport selector for corresponding VF */
+ efx_mae_mport_vf(efx, efv->idx, &selector);
+ /* Look up actual mport ID */
+ rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
+ if (rc)
+ return rc;
+ pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
+ /* mport label should fit in 16 bits */
+ WARN_ON(efv->mport >> 16);
+
+ return efx_tc_configure_default_rule_rep(efv);
+}
+
+static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+
+ efx_tc_deconfigure_default_rule(efx, &efv->dflt);
+}
+
+static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+
+ rtnl_lock();
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_del(&efv->list);
+ spin_unlock_bh(&efx->vf_reps_lock);
+ rtnl_unlock();
+ synchronize_rcu();
+ free_netdev(efv->net_dev);
+}
+
+int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
+{
+ struct efx_rep *efv;
+ int rc;
+
+ efv = efx_ef100_rep_create_netdev(efx, i);
+ if (IS_ERR(efv)) {
+ rc = PTR_ERR(efv);
+ pci_err(efx->pci_dev,
+ "Failed to create representor for VF %d, rc %d\n", i,
+ rc);
+ return rc;
+ }
+ rc = efx_ef100_configure_rep(efv);
+ if (rc) {
+ pci_err(efx->pci_dev,
+ "Failed to configure representor for VF %d, rc %d\n",
+ i, rc);
+ goto fail1;
+ }
+ rc = register_netdev(efv->net_dev);
+ if (rc) {
+ pci_err(efx->pci_dev,
+ "Failed to register representor for VF %d, rc %d\n",
+ i, rc);
+ goto fail2;
+ }
+ pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
+ efv->net_dev->name);
+ return 0;
+fail2:
+ efx_ef100_deconfigure_rep(efv);
+fail1:
+ efx_ef100_rep_destroy_netdev(efv);
+ return rc;
+}
+
+void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
+{
+ struct net_device *rep_dev;
+
+ rep_dev = efv->net_dev;
+ if (!rep_dev)
+ return;
+ netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
+ unregister_netdev(rep_dev);
+ efx_ef100_deconfigure_rep(efv);
+ efx_ef100_rep_destroy_netdev(efv);
+}
+
+void efx_ef100_fini_vfreps(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct efx_rep *efv, *next;
+
+ if (!nic_data->grp_mae)
+ return;
+
+ list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
+ efx_ef100_vfrep_destroy(efx, efv);
+}
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
+{
+ struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
+ unsigned int read_index;
+ struct list_head head;
+ struct sk_buff *skb;
+ bool need_resched;
+ int spent = 0;
+
+ INIT_LIST_HEAD(&head);
+ /* Grab up to 'weight' pending SKBs */
+ spin_lock_bh(&efv->rx_lock);
+ read_index = efv->write_index;
+ while (spent < weight && !list_empty(&efv->rx_list)) {
+ skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
+ list_del(&skb->list);
+ list_add_tail(&skb->list, &head);
+ spent++;
+ }
+ spin_unlock_bh(&efv->rx_lock);
+ /* Receive them */
+ netif_receive_skb_list(&head);
+ if (spent < weight)
+ if (napi_complete_done(napi, spent)) {
+ spin_lock_bh(&efv->rx_lock);
+ efv->read_index = read_index;
+ /* If write_index advanced while we were doing the
+ * RX, then storing our read_index won't re-prime the
+ * fake-interrupt. In that case, we need to schedule
+ * NAPI again to consume the additional packet(s).
+ */
+ need_resched = efv->write_index != read_index;
+ spin_unlock_bh(&efv->rx_lock);
+ if (need_resched)
+ napi_schedule(&efv->napi);
+ }
+ return spent;
+}
+
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
+{
+ u8 *eh = efx_rx_buf_va(rx_buf);
+ struct sk_buff *skb;
+ bool primed;
+
+ /* Don't allow too many queued SKBs to build up, as they consume
+ * GFP_ATOMIC memory. If we overrun, just start dropping.
+ */
+ if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "nodesc-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+
+ skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
+ if (!skb) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "noskb-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+ memcpy(skb->data, eh, rx_buf->len);
+ __skb_put(skb, rx_buf->len);
+
+ skb_record_rx_queue(skb, 0); /* rep is single-queue */
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efv->net_dev);
+
+ skb_checksum_none_assert(skb);
+
+ atomic64_inc(&efv->stats.rx_packets);
+ atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
+
+ /* Add it to the rx list */
+ spin_lock_bh(&efv->rx_lock);
+ primed = efv->read_index == efv->write_index;
+ list_add_tail(&skb->list, &efv->rx_list);
+ efv->write_index++;
+ spin_unlock_bh(&efv->rx_lock);
+ /* Trigger rx work */
+ if (primed)
+ napi_schedule(&efv->napi);
+}
+
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
+{
+ struct efx_rep *efv, *out = NULL;
+
+ /* spinlock guards against list mutation while we're walking it;
+ * but caller must also hold rcu_read_lock() to ensure the netdev
+ * isn't freed after we drop the spinlock.
+ */
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_for_each_entry(efv, &efx->vf_reps, list)
+ if (efv->mport == mport) {
+ out = efv;
+ break;
+ }
+ spin_unlock_bh(&efx->vf_reps_lock);
+ return out;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h
new file mode 100644
index 000000000000..070f700893c1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rep.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Handling for ef100 representor netdevs */
+#ifndef EF100_REP_H
+#define EF100_REP_H
+
+#include "net_driver.h"
+#include "tc.h"
+
+struct efx_rep_sw_stats {
+ atomic64_t rx_packets, tx_packets;
+ atomic64_t rx_bytes, tx_bytes;
+ atomic64_t rx_dropped, tx_errors;
+};
+
+/**
+ * struct efx_rep - Private data for an Efx representor
+ *
+ * @parent: the efx PF which manages this representor
+ * @net_dev: representor netdevice
+ * @msg_enable: log message enable flags
+ * @mport: m-port ID of corresponding VF
+ * @idx: VF index
+ * @write_index: number of packets enqueued to @rx_list
+ * @read_index: number of packets consumed from @rx_list
+ * @rx_pring_size: max length of RX list
+ * @dflt: default-rule for MAE switching
+ * @list: entry on efx->vf_reps
+ * @rx_list: list of SKBs queued for receive in NAPI poll
+ * @rx_lock: protects @rx_list
+ * @napi: NAPI control structure
+ * @stats: software traffic counters for netdev stats
+ */
+struct efx_rep {
+ struct efx_nic *parent;
+ struct net_device *net_dev;
+ u32 msg_enable;
+ u32 mport;
+ unsigned int idx;
+ unsigned int write_index, read_index;
+ unsigned int rx_pring_size;
+ struct efx_tc_flow_rule dflt;
+ struct list_head list;
+ struct list_head rx_list;
+ spinlock_t rx_lock;
+ struct napi_struct napi;
+ struct efx_rep_sw_stats stats;
+};
+
+int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
+void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv);
+void efx_ef100_fini_vfreps(struct efx_nic *efx);
+
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
+/* Returns the representor corresponding to a VF m-port, or NULL
+ * @mport is an m-port label, *not* an m-port ID!
+ * Caller must hold rcu_read_lock().
+ */
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
+#endif /* EF100_REP_H */
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 85207acf7dee..65bbe37753e6 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -55,10 +55,14 @@ static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
void __ef100_rx_packet(struct efx_channel *channel)
{
- struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
+ channel->rx_pkt_index);
struct efx_nic *efx = channel->efx;
+ struct ef100_nic_data *nic_data;
u8 *eh = efx_rx_buf_va(rx_buf);
__wsum csum = 0;
+ u16 ing_port;
u32 *prefix;
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
@@ -76,6 +80,37 @@ void __ef100_rx_packet(struct efx_channel *channel)
goto out;
}
+ ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
+
+ nic_data = efx->nic_data;
+
+ if (nic_data->have_mport && ing_port != nic_data->base_mport) {
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_rep *efv;
+
+ rcu_read_lock();
+ efv = efx_ef100_find_rep_by_mport(efx, ing_port);
+ if (efv) {
+ if (efv->net_dev->flags & IFF_UP)
+ efx_ef100_rep_rx_packet(efv, rx_buf);
+ rcu_read_unlock();
+ /* Representor Rx doesn't care about PF Rx buffer
+ * ownership, it just makes a copy. So, we are done
+ * with the Rx buffer from PF point of view and should
+ * free it.
+ */
+ goto free_rx_buffer;
+ }
+ rcu_read_unlock();
+#endif
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Unrecognised ing_port %04x (base %04x), dropping\n",
+ ing_port, nic_data->base_mport);
+ channel->n_rx_mport_bad++;
+ goto free_rx_buffer;
+ }
+
if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
++channel->n_rx_ip_hdr_chksum_err;
@@ -87,17 +122,16 @@ void __ef100_rx_packet(struct efx_channel *channel)
}
if (channel->type->receive_skb) {
- struct efx_rx_queue *rx_queue =
- efx_channel_get_rx_queue(channel);
-
/* no support for special channels yet, so just discard */
WARN_ON_ONCE(1);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
- goto out;
+ goto free_rx_buffer;
}
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
+ goto out;
+free_rx_buffer:
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
out:
channel->rx_pkt_n_frags = 0;
}
diff --git a/drivers/net/ethernet/sfc/ef100_sriov.c b/drivers/net/ethernet/sfc/ef100_sriov.c
index 664578176bfe..94bdbfcb47e8 100644
--- a/drivers/net/ethernet/sfc/ef100_sriov.c
+++ b/drivers/net/ethernet/sfc/ef100_sriov.c
@@ -11,46 +11,62 @@
#include "ef100_sriov.h"
#include "ef100_nic.h"
+#include "ef100_rep.h"
static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
{
+ struct ef100_nic_data *nic_data = efx->nic_data;
struct pci_dev *dev = efx->pci_dev;
- int rc;
+ struct efx_rep *efv, *next;
+ int rc, i;
efx->vf_count = num_vfs;
rc = pci_enable_sriov(dev, num_vfs);
if (rc)
- goto fail;
+ goto fail1;
+ if (!nic_data->grp_mae)
+ return 0;
+
+ for (i = 0; i < num_vfs; i++) {
+ rc = efx_ef100_vfrep_create(efx, i);
+ if (rc)
+ goto fail2;
+ }
return 0;
-fail:
+fail2:
+ list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
+ efx_ef100_vfrep_destroy(efx, efv);
+ pci_disable_sriov(dev);
+fail1:
netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n");
efx->vf_count = 0;
return rc;
}
-int efx_ef100_pci_sriov_disable(struct efx_nic *efx)
+int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
unsigned int vfs_assigned;
vfs_assigned = pci_vfs_assigned(dev);
- if (vfs_assigned) {
+ if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
"please detach them before disabling SR-IOV\n");
return -EBUSY;
}
- pci_disable_sriov(dev);
-
+ efx_ef100_fini_vfreps(efx);
+ if (!vfs_assigned)
+ pci_disable_sriov(dev);
return 0;
}
int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs)
{
if (num_vfs == 0)
- return efx_ef100_pci_sriov_disable(efx);
+ return efx_ef100_pci_sriov_disable(efx, false);
else
return efx_ef100_pci_sriov_enable(efx, num_vfs);
}
diff --git a/drivers/net/ethernet/sfc/ef100_sriov.h b/drivers/net/ethernet/sfc/ef100_sriov.h
index c48fccd46c57..8ffdf464dd1d 100644
--- a/drivers/net/ethernet/sfc/ef100_sriov.h
+++ b/drivers/net/ethernet/sfc/ef100_sriov.h
@@ -11,4 +11,4 @@
#include "net_driver.h"
int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs);
-int efx_ef100_pci_sriov_disable(struct efx_nic *efx);
+int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index 26ef51d6b542..102ddc7e206a 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -254,7 +254,8 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
const struct sk_buff *skb,
- unsigned int segment_count)
+ unsigned int segment_count,
+ struct efx_rep *efv)
{
unsigned int old_write_count = tx_queue->write_count;
unsigned int new_write_count = old_write_count;
@@ -272,6 +273,20 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
else
next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND;
+ if (unlikely(efv)) {
+ /* Create TX override descriptor */
+ write_ptr = new_write_count & tx_queue->ptr_mask;
+ txd = ef100_tx_desc(tx_queue, write_ptr);
+ ++new_write_count;
+
+ tx_queue->packet_write_count = new_write_count;
+ EFX_POPULATE_OWORD_3(*txd,
+ ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX,
+ ESF_GZ_TX_PREFIX_EGRESS_MPORT, efv->mport,
+ ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1);
+ nr_descs--;
+ }
+
/* if it's a raw write (such as XDP) then always SEND single frames */
if (!skb)
nr_descs = 1;
@@ -306,6 +321,9 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
/* if it's a raw write (such as XDP) then always SEND */
next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
ESE_GZ_TX_DESC_TYPE_SEND;
+ /* mark as an EFV buffer if applicable */
+ if (unlikely(efv))
+ buffer->flags |= EFX_TX_BUF_EFV;
} while (new_write_count != tx_queue->insert_count);
@@ -324,7 +342,7 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
void ef100_tx_write(struct efx_tx_queue *tx_queue)
{
- ef100_tx_make_descriptors(tx_queue, NULL, 0);
+ ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL);
ef100_tx_push_buffers(tx_queue);
}
@@ -351,6 +369,12 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
*/
int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
+ return __ef100_enqueue_skb(tx_queue, skb, NULL);
+}
+
+int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ struct efx_rep *efv)
+{
unsigned int old_insert_count = tx_queue->insert_count;
struct efx_nic *efx = tx_queue->efx;
bool xmit_more = netdev_xmit_more();
@@ -376,16 +400,64 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return 0;
}
+ if (unlikely(efv)) {
+ struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ /* Drop representor packets if the queue is stopped.
+ * We currently don't assert backoff to representors so this is
+ * to make sure representor traffic can't starve the main
+ * net device.
+ * And, of course, if there are no TX descriptors left.
+ */
+ if (netif_tx_queue_stopped(tx_queue->core_txq) ||
+ unlikely(efx_tx_buffer_in_use(buffer))) {
+ atomic64_inc(&efv->stats.tx_errors);
+ rc = -ENOSPC;
+ goto err;
+ }
+
+ /* Also drop representor traffic if it could cause us to
+ * stop the queue. If we assert backoff and we haven't
+ * received traffic on the main net device recently then the
+ * TX watchdog can go off erroneously.
+ */
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
+ fill_level += efx_tx_max_skb_descs(efx);
+ if (fill_level > efx->txq_stop_thresh) {
+ struct efx_tx_queue *txq2;
+
+ /* Refresh cached fill level and re-check */
+ efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
+
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
+ fill_level += efx_tx_max_skb_descs(efx);
+ if (fill_level > efx->txq_stop_thresh) {
+ atomic64_inc(&efv->stats.tx_errors);
+ rc = -ENOSPC;
+ goto err;
+ }
+ }
+
+ buffer->flags = EFX_TX_BUF_OPTION | EFX_TX_BUF_EFV;
+ tx_queue->insert_count++;
+ }
+
/* Map for DMA and create descriptors */
rc = efx_tx_map_data(tx_queue, skb, segments);
if (rc)
goto err;
- ef100_tx_make_descriptors(tx_queue, skb, segments);
+ ef100_tx_make_descriptors(tx_queue, skb, segments, efv);
fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
if (fill_level > efx->txq_stop_thresh) {
struct efx_tx_queue *txq2;
+ /* Because of checks above, representor traffic should
+ * not be able to stop the queue.
+ */
+ WARN_ON(efv);
+
netif_tx_stop_queue(tx_queue->core_txq);
/* Re-read after a memory barrier in case we've raced with
* the completion path. Otherwise there's a danger we'll never
@@ -404,8 +476,12 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* If xmit_more then we don't need to push the doorbell, unless there
* are 256 descriptors already queued in which case we have to push to
* ensure we never push more than 256 at once.
+ *
+ * Always push for representor traffic, and don't account it to parent
+ * PF netdevice's BQL.
*/
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
+ if (unlikely(efv) ||
+ __netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
tx_queue->write_count - tx_queue->notify_count > 255)
ef100_tx_push_buffers(tx_queue);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
index ddc4b98fa6db..e9e11540fcde 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.h
+++ b/drivers/net/ethernet/sfc/ef100_tx.h
@@ -13,6 +13,7 @@
#define EFX_EF100_TX_H
#include "net_driver.h"
+#include "ef100_rep.h"
int ef100_tx_probe(struct efx_tx_queue *tx_queue);
void ef100_tx_init(struct efx_tx_queue *tx_queue);
@@ -22,4 +23,6 @@ unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ struct efx_rep *efv);
#endif
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 7f5aa4a8c451..9aae0d8b713f 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -408,8 +408,9 @@ fail1:
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int vfs_assigned = pci_vfs_assigned(dev);
- int rc = 0;
+ int i, rc = 0;
if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
@@ -417,10 +418,13 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
return -EBUSY;
}
- if (!vfs_assigned)
+ if (!vfs_assigned) {
+ for (i = 0; i < efx->vf_count; i++)
+ nic_data->vf[i].pci_dev = NULL;
pci_disable_sriov(dev);
- else
+ } else {
rc = -EBUSY;
+ }
efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0;
@@ -497,14 +501,11 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
efx_device_detach_sync(vf->efx);
efx_net_stop(vf->efx->net_dev);
- down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
- if (rc) {
- up_write(&vf->efx->filter_sem);
+ if (rc)
return rc;
- }
}
rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
@@ -535,12 +536,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
if (vf->efx) {
/* VF cannot use the vport_id that the PF created */
rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
- if (rc) {
- up_write(&vf->efx->filter_sem);
+ if (rc)
return rc;
- }
vf->efx->type->filter_table_probe(vf->efx);
- up_write(&vf->efx->filter_sem);
efx_net_open(vf->efx->net_dev);
efx_device_attach_if_not_resetting(vf->efx);
}
@@ -576,7 +574,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
efx_net_stop(vf->efx->net_dev);
mutex_lock(&vf->efx->mac_lock);
- down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
@@ -650,7 +647,6 @@ restore_filters:
if (rc2)
goto reset_nic_up_write;
- up_write(&vf->efx->filter_sem);
mutex_unlock(&vf->efx->mac_lock);
rc2 = efx_net_open(vf->efx->net_dev);
@@ -662,10 +658,8 @@ restore_filters:
return rc;
reset_nic_up_write:
- if (vf->efx) {
- up_write(&vf->efx->filter_sem);
+ if (vf->efx)
mutex_unlock(&vf->efx->mac_lock);
- }
reset_nic:
if (vf->efx) {
netif_err(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 5a772354da83..153d68e29b8b 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -106,14 +106,6 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags);
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
- do { \
- if ((efx->state == STATE_READY) || \
- (efx->state == STATE_RECOVERY) || \
- (efx->state == STATE_DISABLED)) \
- ASSERT_RTNL(); \
- } while (0)
-
/**************************************************************************
*
* Port handling
@@ -378,6 +370,8 @@ static int efx_probe_all(struct efx_nic *efx)
if (rc)
goto fail5;
+ efx->state = STATE_NET_DOWN;
+
return 0;
fail5:
@@ -498,7 +492,7 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
*/
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
if (cmd == SIOCSHWTSTAMP)
@@ -523,7 +517,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
/* Context: process, rtnl_lock() held. */
int efx_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
@@ -544,6 +538,9 @@ int efx_net_open(struct net_device *net_dev)
efx_start_all(efx);
if (efx->state == STATE_DISABLED || efx->reset_pending)
netif_device_detach(efx->net_dev);
+ else
+ efx->state = STATE_NET_UP;
+
efx_selftest_async_start(efx);
return 0;
}
@@ -554,7 +551,7 @@ int efx_net_open(struct net_device *net_dev)
*/
int efx_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
@@ -567,7 +564,7 @@ int efx_net_stop(struct net_device *net_dev)
static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_add_vid)
return efx->type->vlan_rx_add_vid(efx, proto, vid);
@@ -577,7 +574,7 @@ static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid
static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_kill_vid)
return efx->type->vlan_rx_kill_vid(efx, proto, vid);
@@ -646,7 +643,7 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
/* Context: process, rtnl_lock() held. */
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -659,7 +656,7 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
@@ -681,7 +678,7 @@ static int efx_netdev_event(struct notifier_block *this,
if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
- efx_update_name(netdev_priv(net_dev));
+ efx_update_name(efx_netdev_priv(net_dev));
return NOTIFY_DONE;
}
@@ -720,8 +717,6 @@ static int efx_register_netdev(struct efx_nic *efx)
* already requested. If so, the NIC is probably hosed so we
* abort.
*/
- efx->state = STATE_READY;
- smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) {
pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
rc = -EIO;
@@ -748,6 +743,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_associate(efx);
+ efx->state = STATE_NET_DOWN;
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -777,7 +774,8 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (!efx->net_dev)
return;
- BUG_ON(netdev_priv(efx->net_dev) != efx);
+ if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx))
+ return;
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
@@ -845,7 +843,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
/* Flush reset_work. It can no longer be scheduled since we
* are not READY.
*/
- BUG_ON(efx->state == STATE_READY);
+ WARN_ON(efx_net_active(efx->state));
efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx);
@@ -863,6 +861,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
+ struct efx_probe_data *probe_data;
struct efx_nic *efx;
efx = pci_get_drvdata(pci_dev);
@@ -887,10 +886,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
efx_fini_io(efx);
- netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+ pci_dbg(efx->pci_dev, "shutdown successful\n");
efx_fini_struct(efx);
free_netdev(efx->net_dev);
+ probe_data = container_of(efx, struct efx_probe_data, efx);
+ kfree(probe_data);
pci_disable_pcie_error_reporting(pci_dev);
};
@@ -1044,24 +1045,34 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
static int efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
+ struct efx_probe_data *probe_data, **probe_ptr;
struct net_device *net_dev;
struct efx_nic *efx;
int rc;
- /* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
- EFX_MAX_RX_QUEUES);
+ /* Allocate probe data and struct efx_nic */
+ probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
+ if (!probe_data)
+ return -ENOMEM;
+ probe_data->pci_dev = pci_dev;
+ efx = &probe_data->efx;
+
+ /* Allocate and initialise a struct net_device */
+ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
if (!net_dev)
return -ENOMEM;
- efx = netdev_priv(net_dev);
+ probe_ptr = netdev_priv(net_dev);
+ *probe_ptr = probe_data;
+ efx->net_dev = net_dev;
efx->type = (const struct efx_nic_type *) entry->driver_data;
efx->fixed_features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
+ efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
@@ -1150,13 +1161,13 @@ static int efx_pm_freeze(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
- efx->state = STATE_UNINIT;
-
+ if (efx_net_active(efx->state)) {
efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_disable_interrupts(efx);
+
+ efx->state = efx_freeze(efx->state);
}
rtnl_unlock();
@@ -1171,7 +1182,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
+ if (efx_frozen(efx->state)) {
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
@@ -1184,7 +1195,7 @@ static int efx_pm_thaw(struct device *dev)
efx_device_attach_if_not_resetting(efx);
- efx->state = STATE_READY;
+ efx->state = efx_thaw(efx->state);
efx->type->resume_wol(efx);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index c05a83da9e44..4239c7ece123 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -12,6 +12,7 @@
#include "net_driver.h"
#include "ef100_rx.h"
#include "ef100_tx.h"
+#include "efx_common.h"
#include "filter.h"
int efx_net_open(struct net_device *net_dev);
@@ -206,6 +207,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
{
struct net_device *dev = efx->net_dev;
+ /* We must stop reps (which use our TX) before we stop ourselves. */
+ efx_detach_reps(efx);
+
/* Lock/freeze all TX queues so that we can be sure the
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
@@ -217,8 +221,11 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
{
- if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
+ if ((efx->state != STATE_DISABLED) && !efx->reset_pending) {
netif_device_attach(efx->net_dev);
+ if (efx->state == STATE_NET_UP)
+ efx_attach_reps(efx);
+ }
}
static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index f6577e74d6e6..a929a1aaba92 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -24,6 +24,7 @@
#include "mcdi_port_common.h"
#include "io.h"
#include "mcdi_pcol.h"
+#include "ef100_rep.h"
static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
@@ -167,7 +168,7 @@ static void efx_mac_work(struct work_struct *data)
int efx_set_mac_address(struct net_device *net_dev, void *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct sockaddr *addr = data;
u8 *new_addr = addr->sa_data;
u8 old_addr[6];
@@ -202,7 +203,7 @@ int efx_set_mac_address(struct net_device *net_dev, void *data)
/* Context: netif_addr_lock held, BHs disabled. */
void efx_set_rx_mode(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
@@ -211,7 +212,7 @@ void efx_set_rx_mode(struct net_device *net_dev)
int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
/* If disabling RX n-tuple filtering, clear existing filters */
@@ -285,7 +286,7 @@ unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
/* Context: process, rtnl_lock() held. */
int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx_check_disabled(efx);
@@ -600,7 +601,7 @@ void efx_stop_all(struct efx_nic *efx)
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
efx_nic_update_stats_atomic(efx, NULL, stats);
@@ -723,7 +724,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
/* Context: netif_tx_lock held, BHs disabled. */
void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_err(efx, tx_err, efx->net_dev,
"TX stuck with port_enabled=%d: resetting channels\n",
@@ -898,7 +899,7 @@ static void efx_reset_work(struct work_struct *data)
* have changed by now. Now that we have the RTNL lock,
* it cannot change again.
*/
- if (efx->state == STATE_READY)
+ if (efx_net_active(efx->state))
(void)efx_reset(efx, method);
rtnl_unlock();
@@ -908,7 +909,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
- if (efx->state == STATE_RECOVERY) {
+ if (efx_recovering(efx->state)) {
netif_dbg(efx, drv, efx->net_dev,
"recovering: skip scheduling %s reset\n",
RESET_TYPE(type));
@@ -943,7 +944,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (READ_ONCE(efx->state) != STATE_READY)
+ if (!efx_net_active(READ_ONCE(efx->state)))
return;
/* efx_process_channel() will no longer read events once a
@@ -978,8 +979,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx) {}
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
-int efx_init_struct(struct efx_nic *efx,
- struct pci_dev *pci_dev, struct net_device *net_dev)
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
{
int rc = -ENOMEM;
@@ -998,7 +998,6 @@ int efx_init_struct(struct efx_nic *efx,
efx->state = STATE_UNINIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
- efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
efx->rx_ip_align =
NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
@@ -1023,7 +1022,8 @@ int efx_init_struct(struct efx_nic *efx,
efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
sizeof(*efx->rps_hash_table), GFP_KERNEL);
#endif
- efx->mdio.dev = net_dev;
+ spin_lock_init(&efx->vf_reps_lock);
+ INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
@@ -1077,13 +1077,11 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
int rc;
efx->mem_bar = UINT_MAX;
-
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar);
+ pci_dbg(pci_dev, "initialising I/O bar=%d\n", bar);
rc = pci_enable_device(pci_dev);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to enable PCI device\n");
+ pci_err(pci_dev, "failed to enable PCI device\n");
goto fail1;
}
@@ -1091,42 +1089,40 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "could not find a suitable DMA mask\n");
+ pci_err(efx->pci_dev, "could not find a suitable DMA mask\n");
goto fail2;
}
- netif_dbg(efx, probe, efx->net_dev,
- "using DMA mask %llx\n", (unsigned long long)dma_mask);
+ pci_dbg(efx->pci_dev, "using DMA mask %llx\n", (unsigned long long)dma_mask);
efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
if (!efx->membase_phys) {
- netif_err(efx, probe, efx->net_dev,
- "ERROR: No BAR%d mapping from the BIOS. "
- "Try pci=realloc on the kernel command line\n", bar);
+ pci_err(efx->pci_dev,
+ "ERROR: No BAR%d mapping from the BIOS. Try pci=realloc on the kernel command line\n",
+ bar);
rc = -ENODEV;
goto fail3;
}
rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "request for memory BAR[%d] failed\n", bar);
+ pci_err(efx->pci_dev,
+ "request for memory BAR[%d] failed\n", bar);
rc = -EIO;
goto fail3;
}
efx->mem_bar = bar;
efx->membase = ioremap(efx->membase_phys, mem_map_size);
if (!efx->membase) {
- netif_err(efx, probe, efx->net_dev,
- "could not map memory BAR[%d] at %llx+%x\n", bar,
- (unsigned long long)efx->membase_phys, mem_map_size);
+ pci_err(efx->pci_dev,
+ "could not map memory BAR[%d] at %llx+%x\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size);
rc = -ENOMEM;
goto fail4;
}
- netif_dbg(efx, probe, efx->net_dev,
- "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
- (unsigned long long)efx->membase_phys, mem_map_size,
- efx->membase);
+ pci_dbg(efx->pci_dev,
+ "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
return 0;
@@ -1142,7 +1138,7 @@ fail1:
void efx_fini_io(struct efx_nic *efx)
{
- netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+ pci_dbg(efx->pci_dev, "shutting down I/O\n");
if (efx->membase) {
iounmap(efx->membase);
@@ -1217,13 +1213,15 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- efx->state = STATE_RECOVERY;
+ efx->state = efx_recover(efx->state);
efx->reset_pending = 0;
efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
+ if (efx_net_active(efx->state)) {
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+ }
status = PCI_ERS_RESULT_NEED_RESET;
} else {
@@ -1271,7 +1269,7 @@ static void efx_io_resume(struct pci_dev *pdev)
netif_err(efx, hw, efx->net_dev,
"efx_reset failed after PCI error (%d)\n", rc);
} else {
- efx->state = STATE_READY;
+ efx->state = efx_recovered(efx->state);
netif_dbg(efx, hw, efx->net_dev,
"Done resetting and resuming IO after PCI error.\n");
}
@@ -1357,7 +1355,7 @@ static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
if (skb->encapsulation) {
if (features & NETIF_F_GSO_MASK)
@@ -1378,7 +1376,7 @@ netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev
int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->get_phys_port_id)
return efx->type->get_phys_port_id(efx, ppid);
@@ -1388,9 +1386,44 @@ int efx_get_phys_port_id(struct net_device *net_dev,
int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (snprintf(name, len, "p%u", efx->port_num) >= len)
return -EINVAL;
return 0;
}
+
+void efx_detach_reps(struct efx_nic *efx)
+{
+ struct net_device *rep_dev;
+ struct efx_rep *efv;
+
+ ASSERT_RTNL();
+ netif_dbg(efx, drv, efx->net_dev, "Detaching VF representors\n");
+ list_for_each_entry(efv, &efx->vf_reps, list) {
+ rep_dev = efv->net_dev;
+ if (!rep_dev)
+ continue;
+ netif_carrier_off(rep_dev);
+ /* See efx_device_detach_sync() */
+ netif_tx_lock_bh(rep_dev);
+ netif_tx_stop_all_queues(rep_dev);
+ netif_tx_unlock_bh(rep_dev);
+ }
+}
+
+void efx_attach_reps(struct efx_nic *efx)
+{
+ struct net_device *rep_dev;
+ struct efx_rep *efv;
+
+ ASSERT_RTNL();
+ netif_dbg(efx, drv, efx->net_dev, "Attaching VF representors\n");
+ list_for_each_entry(efv, &efx->vf_reps, list) {
+ rep_dev = efv->net_dev;
+ if (!rep_dev)
+ continue;
+ netif_tx_wake_all_queues(rep_dev);
+ netif_carrier_on(rep_dev);
+ }
+}
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
index 65513fd0cf6c..2c54dac3e662 100644
--- a/drivers/net/ethernet/sfc/efx_common.h
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -14,8 +14,7 @@
int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
unsigned int mem_map_size);
void efx_fini_io(struct efx_nic *efx);
-int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
- struct net_device *net_dev);
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev);
void efx_fini_struct(struct efx_nic *efx);
#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -43,12 +42,11 @@ void efx_start_monitor(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx);
int efx_reconfigure_port(struct efx_nic *efx);
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
- do { \
- if ((efx->state == STATE_READY) || \
- (efx->state == STATE_RECOVERY) || \
- (efx->state == STATE_DISABLED)) \
- ASSERT_RTNL(); \
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx)->state != STATE_UNINIT && \
+ (efx)->state != STATE_PROBED) \
+ ASSERT_RTNL(); \
} while (0)
int efx_try_recovery(struct efx_nic *efx);
@@ -64,7 +62,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx);
static inline int efx_check_disabled(struct efx_nic *efx)
{
- if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ if (efx->state == STATE_DISABLED || efx_recovering(efx->state)) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
@@ -113,4 +111,7 @@ int efx_get_phys_port_id(struct net_device *net_dev,
int efx_get_phys_port_name(struct net_device *net_dev,
char *name, size_t len);
+
+void efx_detach_reps(struct efx_nic *efx);
+void efx_attach_reps(struct efx_nic *efx);
#endif
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 48506373721a..364323599f7b 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -33,7 +33,7 @@
static int efx_ethtool_phys_id(struct net_device *net_dev,
enum ethtool_phys_id_state state)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
enum efx_led_mode mode = EFX_LED_DEFAULT;
switch (state) {
@@ -55,13 +55,13 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
{
- return efx_nic_get_regs_len(netdev_priv(net_dev));
+ return efx_nic_get_regs_len(efx_netdev_priv(net_dev));
}
static void efx_ethtool_get_regs(struct net_device *net_dev,
struct ethtool_regs *regs, void *buf)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
regs->version = efx->type->revision;
efx_nic_get_regs(efx, buf);
@@ -101,7 +101,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
unsigned int tx_usecs, rx_usecs;
bool rx_adaptive;
@@ -121,7 +121,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_channel *channel;
unsigned int tx_usecs, rx_usecs;
bool adaptive, rx_may_override_tx;
@@ -163,7 +163,7 @@ efx_ethtool_get_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
@@ -177,7 +177,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
@@ -204,7 +204,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev,
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->get_wol(efx, wol);
}
@@ -212,14 +212,14 @@ static void efx_ethtool_get_wol(struct net_device *net_dev,
static int efx_ethtool_set_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->set_wol(efx, wol->wolopts);
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
struct ethtool_fec_stats *fec_stats)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->get_fec_stats)
efx->type->get_fec_stats(efx, fec_stats);
@@ -228,7 +228,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
struct ethtool_ts_info *ts_info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
/* Software capabilities */
ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index bd552c7dffcb..bc840ede3053 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -91,6 +91,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mport_bad),
#ifdef CONFIG_RFS_ACCEL
EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
@@ -103,7 +104,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
efx_mcdi_print_fwver(efx, info->fw_version,
@@ -113,14 +114,14 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev,
u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->msg_enable;
}
void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
efx->msg_enable = msg_enable;
}
@@ -128,7 +129,7 @@ void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
bool already_up;
int rc = -ENOMEM;
@@ -137,7 +138,7 @@ void efx_ethtool_self_test(struct net_device *net_dev,
if (!efx_tests)
goto fail;
- if (efx->state != STATE_READY) {
+ if (!efx_net_active(efx->state)) {
rc = -EBUSY;
goto out;
}
@@ -176,7 +177,7 @@ fail:
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
@@ -186,7 +187,7 @@ void efx_ethtool_get_pauseparam(struct net_device *net_dev,
int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u8 wanted_fc, old_fc;
u32 old_adv;
int rc = 0;
@@ -441,7 +442,7 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
switch (string_set) {
case ETH_SS_STATS:
@@ -459,7 +460,7 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int i;
switch (string_set) {
@@ -487,7 +488,7 @@ void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
const struct efx_sw_stat_desc *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
@@ -561,7 +562,7 @@ void efx_ethtool_get_stats(struct net_device *net_dev,
int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
mutex_lock(&efx->mac_lock);
@@ -584,7 +585,7 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
/* GMAC does not support 1000Mbps HD */
@@ -604,7 +605,7 @@ int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
int efx_ethtool_get_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -617,7 +618,7 @@ int efx_ethtool_get_fecparam(struct net_device *net_dev,
int efx_ethtool_set_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -809,7 +810,7 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 rss_context = 0;
s32 rc = 0;
@@ -1127,7 +1128,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
int efx_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx_filter_get_rx_id_limit(efx) == 0)
return -EOPNOTSUPP;
@@ -1148,7 +1149,7 @@ int efx_ethtool_set_rxnfc(struct net_device *net_dev,
u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->n_rx_channels == 1)
return 0;
@@ -1157,7 +1158,7 @@ u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->rx_hash_key_size;
}
@@ -1165,7 +1166,7 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx->type->rx_pull_rss_config(efx);
@@ -1186,7 +1187,7 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
/* Hash function is Toeplitz, cannot be changed */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
@@ -1205,7 +1206,7 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
int rc = 0;
@@ -1238,7 +1239,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
const u8 hfunc, u32 *rss_context,
bool delete)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
bool allocated = false;
int rc;
@@ -1300,7 +1301,7 @@ out_unlock:
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx->type->map_reset_flags(flags);
@@ -1314,7 +1315,7 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
u8 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int ret;
mutex_lock(&efx->mac_lock);
@@ -1327,7 +1328,7 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
int efx_ethtool_get_module_info(struct net_device *net_dev,
struct ethtool_modinfo *modinfo)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int ret;
mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/falcon/bitfield.h b/drivers/net/ethernet/sfc/falcon/bitfield.h
index 5eb178d0c149..78537a53009e 100644
--- a/drivers/net/ethernet/sfc/falcon/bitfield.h
+++ b/drivers/net/ethernet/sfc/falcon/bitfield.h
@@ -117,7 +117,7 @@ typedef union ef4_oword {
*
* ( element ) << 4
*
- * The result will contain the relevant bits filled in in the range
+ * The result will contain the relevant bits filled in the range
* [0,high-low), with garbage in bits [high-low+1,...).
*/
#define EF4_EXTRACT_NATIVE(native_element, min, max, low, high) \
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 2c91792cec01..c64623c2e80c 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -2711,7 +2711,7 @@ void ef4_farch_filter_table_remove(struct ef4_nic *efx)
enum ef4_farch_filter_table_id table_id;
for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
+ bitmap_free(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
kfree(state);
@@ -2740,9 +2740,7 @@ int ef4_farch_filter_table_probe(struct ef4_nic *efx)
table = &state->table[table_id];
if (table->size == 0)
continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
+ table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(array_size(sizeof(*table->spec),
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index a381cf9ec4f3..a2c7139f2b32 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -679,7 +679,7 @@ union ef4_multicast_hash {
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
* @rx_ip_align: RX DMA address offset to have IP header aligned in
- * in accordance with NET_IP_ALIGN
+ * accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 40b2af8bfb81..4d928839d292 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -88,6 +88,7 @@ enum efx_filter_priority {
* the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
+ * @EFX_FILTER_FLAG_VPORT_ID: Virtual port ID for adapter switching.
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
@@ -95,6 +96,7 @@ enum efx_filter_flags {
EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
+ EFX_FILTER_FLAG_VPORT_ID = 0x20,
};
/** enum efx_encap_type - types of encapsulation
@@ -127,6 +129,9 @@ enum efx_encap_type {
* MCFW context_id.
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
* an RX drop filter
+ * @vport_id: Virtual port ID associated with RX queue, for adapter switching,
+ * if %EFX_FILTER_FLAG_VPORT_ID is set. This is an MCFW vport_id, or on
+ * EF100 an mport selector.
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
@@ -156,6 +161,7 @@ struct efx_filter_spec {
u32 priority:2;
u32 flags:6;
u32 dmaq_id:12;
+ u32 vport_id;
u32 rss_context;
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
__be16 inner_vid;
@@ -292,6 +298,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
return 0;
}
+/**
+ * efx_filter_set_vport_id - override virtual port id relating to filter
+ * @spec: Specification to initialise
+ * @vport_id: firmware ID of the virtual port
+ */
+static inline void efx_filter_set_vport_id(struct efx_filter_spec *spec,
+ u32 vport_id)
+{
+ spec->flags |= EFX_FILTER_FLAG_VPORT_ID;
+ spec->vport_id = vport_id;
+}
+
static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
enum efx_encap_type encap_type)
{
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
new file mode 100644
index 000000000000..97627f5e3674
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "mae.h"
+#include "mcdi.h"
+#include "mcdi_pcol_mae.h"
+
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ if (WARN_ON_ONCE(!id))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!label))
+ return -EINVAL;
+
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_TYPE,
+ MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS);
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT,
+ MAE_MPORT_SELECTOR_ASSIGNED);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID);
+ *label = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_LABEL);
+ return 0;
+}
+
+int efx_mae_free_mport(struct efx_nic *efx, u32 id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_FREE_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_MAE_MPORT_FREE_OUT_LEN);
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_FREE_IN_MPORT_ID, id);
+ return efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_2(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT,
+ MAE_MPORT_SELECTOR_PPORT_ID, efx->port_num);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_3(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
+ MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
+ MAE_MPORT_SELECTOR_FUNC_VF_ID, MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_3(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
+ MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
+ MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+/* Constructs an mport selector from an mport ID, because they're not the same */
+void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_2(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_MPORT_ID,
+ MAE_MPORT_SELECTOR_MPORT_ID, mport_id);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+/* id is really only 24 bits wide */
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR, selector);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_LOOKUP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID);
+ return 0;
+}
+
+static bool efx_mae_asl_id(u32 id)
+{
+ return !!(id & BIT(31));
+}
+
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
+ MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID,
+ MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
+ MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL);
+ if (act->deliver)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DELIVER,
+ act->dest_mport);
+ BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ act->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_ALLOC_OUT_AS_ID);
+ /* We rely on the high bit of AS IDs always being clear.
+ * The firmware API guarantees this, but let's check it ourselves.
+ */
+ if (WARN_ON_ONCE(efx_mae_asl_id(act->fw_id))) {
+ efx_mae_free_action_set(efx, act->fw_id);
+ return -EIO;
+ }
+ return 0;
+}
+
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_FREE_IN_AS_ID, fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_FREE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what action-sets exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) != fw_id))
+ return -EIO;
+ return 0;
+}
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
+ struct efx_tc_action_set *act;
+ size_t inlen, outlen, i = 0;
+ efx_dword_t *inbuf;
+ int rc;
+
+ list_for_each_entry(act, &acts->list, list)
+ i++;
+ if (i == 0)
+ return -EINVAL;
+ if (i == 1) {
+ /* Don't wrap an ASL around a single AS, just use the AS_ID
+ * directly. ASLs are a more limited resource.
+ */
+ act = list_first_entry(&acts->list, struct efx_tc_action_set, list);
+ acts->fw_id = act->fw_id;
+ return 0;
+ }
+ if (i > MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2)
+ return -EOPNOTSUPP; /* Too many actions */
+ inlen = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(i);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+ i = 0;
+ list_for_each_entry(act, &acts->list, list) {
+ MCDI_SET_ARRAY_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS,
+ i, act->fw_id);
+ i++;
+ }
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, i);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_ALLOC, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto out_free;
+ if (outlen < sizeof(outbuf)) {
+ rc = -EIO;
+ goto out_free;
+ }
+ acts->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
+ /* We rely on the high bit of ASL IDs always being set.
+ * The firmware API guarantees this, but let's check it ourselves.
+ */
+ if (WARN_ON_ONCE(!efx_mae_asl_id(acts->fw_id))) {
+ efx_mae_free_action_set_list(efx, acts);
+ rc = -EIO;
+ }
+out_free:
+ kfree(inbuf);
+ return rc;
+}
+
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ /* If this is just an AS_ID with no ASL wrapper, then there is
+ * nothing for us to free. (The AS will be freed later.)
+ */
+ if (efx_mae_asl_id(acts->fw_id)) {
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_FREE_IN_ASL_ID,
+ acts->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_FREE, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what action-set-lists exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != acts->fw_id))
+ return -EIO;
+ }
+ /* We're probably about to free @acts, but let's just make sure its
+ * fw_id is blatted so that it won't look valid if it leaks out.
+ */
+ acts->fw_id = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL;
+ return 0;
+}
+
+static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
+ const struct efx_tc_match *match)
+{
+ if (match->mask.ingress_port) {
+ if (~match->mask.ingress_port)
+ return -EOPNOTSUPP;
+ MCDI_STRUCT_SET_DWORD(match_crit,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR,
+ match->value.ingress_port);
+ }
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
+ match->mask.ingress_port);
+ return 0;
+}
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+ u32 prio, u32 acts_id, u32 *id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN);
+ MCDI_DECLARE_STRUCT_PTR(match_crit);
+ MCDI_DECLARE_STRUCT_PTR(response);
+ size_t outlen;
+ int rc;
+
+ if (!id)
+ return -EINVAL;
+
+ match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA);
+ response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE);
+ if (efx_mae_asl_id(acts_id)) {
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID,
+ MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL);
+ } else {
+ /* We only had one AS, so we didn't wrap it in an ASL */
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID,
+ MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id);
+ }
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio);
+ rc = efx_mae_populate_match_criteria(match_crit, match);
+ if (rc)
+ return rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID);
+ return 0;
+}
+
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_DELETE_IN_AR_ID, id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_DELETE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what rules exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) != id))
+ return -EIO;
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
new file mode 100644
index 000000000000..0369be4d8983
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF100_MAE_H
+#define EF100_MAE_H
+/* MCDI interface for the ef100 Match-Action Engine */
+
+#include "net_driver.h"
+#include "tc.h"
+#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
+
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label);
+int efx_mae_free_mport(struct efx_nic *efx, u32 id);
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
+void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
+void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
+void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
+
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts);
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts);
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+ u32 prio, u32 acts_id, u32 *id);
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
+
+#endif /* EF100_MAE_H */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 50baf62b2cbc..af338208eae9 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -99,14 +99,12 @@ int efx_mcdi_init(struct efx_nic *efx)
*/
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unable to register driver with MCPU\n");
+ pci_err(efx->pci_dev, "Unable to register driver with MCPU\n");
goto fail2;
}
if (already_attached)
/* Not a fatal error */
- netif_err(efx, probe, efx->net_dev,
- "Host already registered with MCPU\n");
+ pci_err(efx->pci_dev, "Host already registered with MCPU\n");
if (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
@@ -1261,7 +1259,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
}
/* The MC is going down in to BIST mode. set the BIST flag to block
- * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset
* (which doesn't actually execute a reset, it waits for the controlling
* function to reset it).
*/
@@ -1447,7 +1445,7 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
return;
fail:
- netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
buf[0] = 0;
}
@@ -1471,8 +1469,9 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
* care what firmware we get.
*/
if (rc == -EPERM) {
- netif_dbg(efx, probe, efx->net_dev,
- "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+ pci_dbg(efx->pci_dev,
+ "%s with fw-variant setting failed EPERM, trying without it\n",
+ __func__);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
MC_CMD_FW_DONT_CARE);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
@@ -1514,7 +1513,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
return 0;
fail:
- netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -2130,6 +2129,52 @@ fail:
return rc;
}
+/* Failure to read a privilege mask is never fatal, because we can always
+ * carry on as though we didn't have the privilege we were interested in.
+ * So use efx_mcdi_rpc_quiet().
+ */
+int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask)
+{
+ MCDI_DECLARE_BUF(fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN);
+ MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN);
+ size_t outlen;
+ u16 pf, vf;
+ int rc;
+
+ if (!efx || !mask)
+ return -EINVAL;
+
+ /* Get our function number */
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0,
+ fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN,
+ &outlen);
+ if (rc != 0)
+ return rc;
+ if (outlen < MC_CMD_GET_FUNCTION_INFO_OUT_LEN)
+ return -EIO;
+
+ pf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_PF);
+ vf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_VF);
+
+ MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PRIVILEGE_MASK,
+ pm_inbuf, sizeof(pm_inbuf),
+ pm_outbuf, sizeof(pm_outbuf), &outlen);
+
+ if (rc != 0)
+ return rc;
+ if (outlen < MC_CMD_PRIVILEGE_MASK_OUT_LEN)
+ return -EIO;
+
+ *mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ return 0;
+}
+
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 69c2924a147c..26bc69f76801 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -205,6 +205,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
#define _MCDI_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+#define _MCDI_STRUCT_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
@@ -214,6 +216,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
#define MCDI_SET_DWORD(_buf, _field, _value) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_STRUCT_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
@@ -366,6 +370,7 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
unsigned int *flags);
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
unsigned int *enabled_out);
+int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask);
#ifdef CONFIG_SFC_MCDI_MON
int efx_mcdi_mon_probe(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 1523be77b9db..4ff6586116ee 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -221,7 +221,10 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
+ if (flags & EFX_FILTER_FLAG_VPORT_ID)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, spec->vport_id);
+ else
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
@@ -488,6 +491,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id;
+ saved_spec->vport_id = spec->vport_id;
}
} else if (!replacing) {
kfree(saved_spec);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index 06426aa9f2f3..c0d6558b9fd2 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -89,6 +89,7 @@ struct efx_mcdi_filter_table {
*/
bool mc_chaining;
bool vlan_filter;
+ /* Entries on the vlan_list are added/removed under filter_sem */
struct list_head vlan_list;
};
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index ff617b1b38d3..cd297e19cddc 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -165,138 +165,8 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
-/* Operation not permitted. */
-#define MC_CMD_ERR_EPERM 1
-/* Non-existent command target */
-#define MC_CMD_ERR_ENOENT 2
-/* assert() has killed the MC */
-#define MC_CMD_ERR_EINTR 4
-/* I/O failure */
-#define MC_CMD_ERR_EIO 5
-/* Already exists */
-#define MC_CMD_ERR_EEXIST 6
-/* Try again */
-#define MC_CMD_ERR_EAGAIN 11
-/* Out of memory */
-#define MC_CMD_ERR_ENOMEM 12
-/* Caller does not hold required locks */
-#define MC_CMD_ERR_EACCES 13
-/* Resource is currently unavailable (e.g. lock contention) */
-#define MC_CMD_ERR_EBUSY 16
-/* No such device */
-#define MC_CMD_ERR_ENODEV 19
-/* Invalid argument to target */
-#define MC_CMD_ERR_EINVAL 22
-/* Broken pipe */
-#define MC_CMD_ERR_EPIPE 32
-/* Read-only */
-#define MC_CMD_ERR_EROFS 30
-/* Out of range */
-#define MC_CMD_ERR_ERANGE 34
-/* Non-recursive resource is already acquired */
-#define MC_CMD_ERR_EDEADLK 35
-/* Operation not implemented */
-#define MC_CMD_ERR_ENOSYS 38
-/* Operation timed out */
-#define MC_CMD_ERR_ETIME 62
-/* Link has been severed */
-#define MC_CMD_ERR_ENOLINK 67
-/* Protocol error */
-#define MC_CMD_ERR_EPROTO 71
-/* Operation not supported */
-#define MC_CMD_ERR_ENOTSUP 95
-/* Address not available */
-#define MC_CMD_ERR_EADDRNOTAVAIL 99
-/* Not connected */
-#define MC_CMD_ERR_ENOTCONN 107
-/* Operation already in progress */
-#define MC_CMD_ERR_EALREADY 114
-
-/* Resource allocation failed. */
-#define MC_CMD_ERR_ALLOC_FAIL 0x1000
-/* V-adaptor not found. */
-#define MC_CMD_ERR_NO_VADAPTOR 0x1001
-/* EVB port not found. */
-#define MC_CMD_ERR_NO_EVB_PORT 0x1002
-/* V-switch not found. */
-#define MC_CMD_ERR_NO_VSWITCH 0x1003
-/* Too many VLAN tags. */
-#define MC_CMD_ERR_VLAN_LIMIT 0x1004
-/* Bad PCI function number. */
-#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
-/* Invalid VLAN mode. */
-#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
-/* Invalid v-switch type. */
-#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
-/* Invalid v-port type. */
-#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
-/* MAC address exists. */
-#define MC_CMD_ERR_MAC_EXIST 0x1009
-/* Slave core not present */
-#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
-/* The datapath is disabled. */
-#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
-/* The requesting client is not a function */
-#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
-/* The requested operation might require the
- command to be passed between MCs, and the
- transport doesn't support that. Should
- only ever been seen over the UART. */
-#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
-/* VLAN tag(s) exists */
-#define MC_CMD_ERR_VLAN_EXIST 0x100e
-/* No MAC address assigned to an EVB port */
-#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
-/* Notifies the driver that the request has been relayed
- * to an admin function for authorization. The driver should
- * wait for a PROXY_RESPONSE event and then resend its request.
- * This error code is followed by a 32-bit handle that
- * helps matching it with the respective PROXY_RESPONSE event. */
-#define MC_CMD_ERR_PROXY_PENDING 0x1010
-#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
-/* The request cannot be passed for authorization because
- * another request from the same function is currently being
- * authorized. The drvier should try again later. */
-#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
-/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
- * that has enabled proxying or BLOCK_INDEX points to a function that
- * doesn't await an authorization. */
-#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
-/* This code is currently only used internally in FW. Its meaning is that
- * an operation failed due to lack of SR-IOV privilege.
- * Normally it is translated to EPERM by send_cmd_err(),
- * but it may also be used to trigger some special mechanism
- * for handling such case, e.g. to relay the failed request
- * to a designated admin function for authorization. */
-#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
-/* Workaround 26807 could not be turned on/off because some functions
- * have already installed filters. See the comment at
- * MC_CMD_WORKAROUND_BUG26807.
- * May also returned for other operations such as sub-variant switching. */
-#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* The clock whose frequency you've attempted to set set
- * doesn't exist on this NIC */
-#define MC_CMD_ERR_NO_CLOCK 0x1015
-/* Returned by MC_CMD_TESTASSERT if the action that should
- * have caused an assertion failed to do so. */
-#define MC_CMD_ERR_UNREACHABLE 0x1016
-/* This command needs to be processed in the background but there were no
- * resources to do so. Send it again after a command has completed. */
-#define MC_CMD_ERR_QUEUE_FULL 0x1017
-/* The operation could not be completed because the PCIe link has gone
- * away. This error code is never expected to be returned over the TLP
- * transport. */
-#define MC_CMD_ERR_NO_PCIE 0x1018
-/* The operation could not be completed because the datapath has gone
- * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
- * datapath absence may be temporary*/
-#define MC_CMD_ERR_NO_DATAPATH 0x1019
-/* The operation could not complete because some VIs are allocated */
-#define MC_CMD_ERR_VIS_PRESENT 0x101a
-/* The operation could not complete because some PIO buffers are allocated */
-#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
-
#define MC_CMD_ERR_CODE_OFST 0
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
/* We define 8 "escape" commands to allow
for command number space extension */
@@ -365,10 +235,857 @@
*/
#define MC_CMD_ERR_ARG_OFST 4
-/* No space */
-#define MC_CMD_ERR_ENOSPC 28
-
-/* MCDI_EVENT structuredef */
+/* MC_CMD_ERR enum: Public MCDI error codes. Error codes that correspond to
+ * POSIX errnos should use the same numeric values that linux does. Error codes
+ * specific to Solarflare firmware should use values in the range 0x1000 -
+ * 0x10ff. The range 0x2000 - 0x20ff is reserved for private error codes (see
+ * MC_CMD_ERR_PRIV below).
+ */
+/* enum: Operation not permitted. */
+#define MC_CMD_ERR_EPERM 0x1
+/* enum: Non-existent command target */
+#define MC_CMD_ERR_ENOENT 0x2
+/* enum: assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 0x4
+/* enum: I/O failure */
+#define MC_CMD_ERR_EIO 0x5
+/* enum: Already exists */
+#define MC_CMD_ERR_EEXIST 0x6
+/* enum: Try again */
+#define MC_CMD_ERR_EAGAIN 0xb
+/* enum: Out of memory */
+#define MC_CMD_ERR_ENOMEM 0xc
+/* enum: Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 0xd
+/* enum: Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 0x10
+/* enum: No such device */
+#define MC_CMD_ERR_ENODEV 0x13
+/* enum: Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 0x16
+/* enum: No space */
+#define MC_CMD_ERR_ENOSPC 0x1c
+/* enum: Read-only */
+#define MC_CMD_ERR_EROFS 0x1e
+/* enum: Broken pipe */
+#define MC_CMD_ERR_EPIPE 0x20
+/* enum: Out of range */
+#define MC_CMD_ERR_ERANGE 0x22
+/* enum: Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 0x23
+/* enum: Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 0x26
+/* enum: Operation timed out */
+#define MC_CMD_ERR_ETIME 0x3e
+/* enum: Link has been severed */
+#define MC_CMD_ERR_ENOLINK 0x43
+/* enum: Protocol error */
+#define MC_CMD_ERR_EPROTO 0x47
+/* enum: Bad message */
+#define MC_CMD_ERR_EBADMSG 0x4a
+/* enum: Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 0x5f
+/* enum: Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 0x63
+/* enum: Not connected */
+#define MC_CMD_ERR_ENOTCONN 0x6b
+/* enum: Operation already in progress */
+#define MC_CMD_ERR_EALREADY 0x72
+/* enum: Stale handle. The handle references a resource that no longer exists.
+ */
+#define MC_CMD_ERR_ESTALE 0x74
+/* enum: Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* enum: V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* enum: EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* enum: V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* enum: Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* enum: Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* enum: Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* enum: Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* enum: Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* enum: MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* enum: Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* enum: The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* enum: The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* enum: The requested operation might require the command to be passed between
+ * MCs, and thetransport doesn't support that. Should only ever been seen over
+ * the UART.
+ */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* enum: VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* enum: No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* enum: Notifies the driver that the request has been relayed to an admin
+ * function for authorization. The driver should wait for a PROXY_RESPONSE
+ * event and then resend its request. This error code is followed by a 32-bit
+ * handle that helps matching it with the respective PROXY_RESPONSE event.
+ */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+/* enum: The request cannot be passed for authorization because another request
+ * from the same function is currently being authorized. The drvier should try
+ * again later.
+ */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* enum: Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that doesn't
+ * await an authorization.
+ */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* enum: This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege. Normally it is
+ * translated to EPERM by send_cmd_err(), but it may also be used to trigger
+ * some special mechanism for handling such case, e.g. to relay the failed
+ * request to a designated admin function for authorization.
+ */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* enum: Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. May also returned for other operations such as
+ * sub-variant switching.
+ */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* enum: The clock whose frequency you've attempted to set set doesn't exist on
+ * this NIC
+ */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* enum: Returned by MC_CMD_TESTASSERT if the action that should have caused an
+ * assertion failed to do so.
+ */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* enum: This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed.
+ */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* enum: The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport.
+ */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* enum: The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary
+ */
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* enum: The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/* enum: The operation could not complete because some PIO buffers are
+ * allocated
+ */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
+
+/* MC_CMD_FPGA_FLASH_INDEX enum */
+#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */
+#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */
+
+/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */
+/* enum: Legacy mode as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0
+/* enum: Switchdev mode as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1
+/* enum: Bootstrap mode as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2
+/* enum: Link-mode change is in-progress as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf
+
+/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe
+ * interfaces. There is a need to refer to interfaces explicitly from drivers
+ * (for example, a management driver on one interface administering a function
+ * on another interface). This enumeration provides stable identifiers to all
+ * interfaces present on a product. Product documentation will specify which
+ * interfaces exist and their associated identifier. In general, drivers,
+ * should not assign special meanings to specific values. Instead, behaviour
+ * should be determined by NIC configuration, which will identify interfaces
+ * where appropriate.
+ */
+/* enum: Primary host interfaces. Typically (i.e. for all known SFC products)
+ * the interface exposed on the edge connector (or form factor equivalent).
+ */
+#define PCIE_INTERFACE_HOST_PRIMARY 0x0
+/* enum: Riverhead and keystone products have a second PCIe interface to which
+ * an on-NIC ARM module is expected to be connected.
+ */
+#define PCIE_INTERFACE_NIC_EMBEDDED 0x1
+/* enum: For MCDI commands issued over a PCIe interface, this value is
+ * translated into the interface over which the command was issued. Not
+ * meaningful for other MCDI transports.
+ */
+#define PCIE_INTERFACE_CALLER 0xffffffff
+
+/* MC_CLIENT_ID_SPECIFIER enum */
+/* enum: Equivalent to the caller's client ID */
+#define MC_CMD_CLIENT_ID_SELF 0xffffffff
+
+/* MAE_FIELD_SUPPORT_STATUS enum */
+/* enum: The NIC does not support this field. The driver must ensure that any
+ * mask associated with this field in a match rule is zeroed. The NIC may
+ * either reject requests with an invalid mask for such a field, or may assume
+ * that the mask is zero. (This category only exists to describe behaviour for
+ * fields that a newer driver might know about but that older firmware does
+ * not. It is recommended that firmware report MAE_FIELD_FIELD_MATCH_NEVER for
+ * all match fields defined at the time of its compilation. If a driver see a
+ * field support status value that it does not recognise, it must treat that
+ * field as thought the field was reported as MAE_FIELD_SUPPORTED_MATCH_NEVER,
+ * and must never set a non-zero mask value for this field.
+ */
+#define MAE_FIELD_UNSUPPORTED 0x0
+/* enum: The NIC supports this field, but cannot use it in a match rule. The
+ * driver must ensure that any mask for such a field in a match rule is zeroed.
+ * The NIC will reject requests with an invalid mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_NEVER 0x1
+/* enum: The NIC supports this field, and must use it in all match rules. The
+ * driver must ensure that any mask for such a field is all ones. The NIC will
+ * reject requests with an invalid mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_ALWAYS 0x2
+/* enum: The NIC supports this field, and may optionally use it in match rules.
+ * The driver must ensure that any mask for such a field is either all zeroes
+ * or all ones. The NIC will reject requests with an invalid mask for such a
+ * field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_OPTIONAL 0x3
+/* enum: The NIC supports this field, and may optionally use it in match rules.
+ * The driver must ensure that any mask for such a field is either all zeroes
+ * or a consecutive set of ones following by all zeroes (starting from MSB).
+ * The NIC will reject requests with an invalid mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_PREFIX 0x4
+/* enum: The NIC supports this field, and may optionally use it in match rules.
+ * The driver may provide an arbitrary mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_MASK 0x5
+
+/* MAE_CT_VNI_MODE enum: Controls the layout of the VNI input to the conntrack
+ * lookup. (Values are not arbitrary - constrained by table access ABI.)
+ */
+/* enum: The VNI input to the conntrack lookup will be zero. */
+#define MAE_CT_VNI_MODE_ZERO 0x0
+/* enum: The VNI input to the conntrack lookup will be the VNI (VXLAN/Geneve)
+ * or VSID (NVGRE) field from the packet.
+ */
+#define MAE_CT_VNI_MODE_VNI 0x1
+/* enum: The VNI input to the conntrack lookup will be the VLAN ID from the
+ * outermost VLAN tag (in bottom 12 bits; top 12 bits zero).
+ */
+#define MAE_CT_VNI_MODE_1VLAN 0x2
+/* enum: The VNI input to the conntrack lookup will be the VLAN IDs from both
+ * VLAN tags (outermost in bottom 12 bits, innermost in top 12 bits).
+ */
+#define MAE_CT_VNI_MODE_2VLAN 0x3
+
+/* MAE_FIELD enum: NB: this enum shares namespace with the support status enum.
+ */
+/* enum: Source mport upon entering the MAE. */
+#define MAE_FIELD_INGRESS_PORT 0x0
+#define MAE_FIELD_MARK 0x1 /* enum */
+/* enum: Table ID used in action rule. Initially zero, can be changed in action
+ * rule response.
+ */
+#define MAE_FIELD_RECIRC_ID 0x2
+#define MAE_FIELD_IS_IP_FRAG 0x3 /* enum */
+#define MAE_FIELD_DO_CT 0x4 /* enum */
+#define MAE_FIELD_CT_HIT 0x5 /* enum */
+/* enum: Undefined unless CT_HIT=1. */
+#define MAE_FIELD_CT_MARK 0x6
+/* enum: Undefined unless DO_CT=1. */
+#define MAE_FIELD_CT_DOMAIN 0x7
+/* enum: Undefined unless CT_HIT=1. */
+#define MAE_FIELD_CT_PRIVATE_FLAGS 0x8
+/* enum: 1 if the packet ingressed the NIC from one of the MACs, else 0. */
+#define MAE_FIELD_IS_FROM_NETWORK 0x9
+/* enum: 1 if the packet has 1 or more VLAN tags, else 0. */
+#define MAE_FIELD_HAS_OVLAN 0xa
+/* enum: 1 if the packet has 2 or more VLAN tags, else 0. */
+#define MAE_FIELD_HAS_IVLAN 0xb
+/* enum: 1 if the outer packet has 1 or more VLAN tags, else 0; only present
+ * when encap
+ */
+#define MAE_FIELD_ENC_HAS_OVLAN 0xc
+/* enum: 1 if the outer packet has 2 or more VLAN tags, else 0; only present
+ * when encap
+ */
+#define MAE_FIELD_ENC_HAS_IVLAN 0xd
+/* enum: Packet is IP fragment */
+#define MAE_FIELD_ENC_IP_FRAG 0xe
+#define MAE_FIELD_ETHER_TYPE 0x21 /* enum */
+#define MAE_FIELD_VLAN0_TCI 0x22 /* enum */
+#define MAE_FIELD_VLAN0_PROTO 0x23 /* enum */
+#define MAE_FIELD_VLAN1_TCI 0x24 /* enum */
+#define MAE_FIELD_VLAN1_PROTO 0x25 /* enum */
+/* enum: Inner when encap */
+#define MAE_FIELD_ETH_SADDR 0x28
+/* enum: Inner when encap */
+#define MAE_FIELD_ETH_DADDR 0x29
+/* enum: Inner when encap. NB: IPv4 and IPv6 fields are mutually exclusive. */
+#define MAE_FIELD_SRC_IP4 0x2a
+/* enum: Inner when encap */
+#define MAE_FIELD_SRC_IP6 0x2b
+/* enum: Inner when encap */
+#define MAE_FIELD_DST_IP4 0x2c
+/* enum: Inner when encap */
+#define MAE_FIELD_DST_IP6 0x2d
+/* enum: Inner when encap */
+#define MAE_FIELD_IP_PROTO 0x2e
+/* enum: Inner when encap */
+#define MAE_FIELD_IP_TOS 0x2f
+/* enum: Inner when encap */
+#define MAE_FIELD_IP_TTL 0x30
+/* enum: Inner when encap TODO: how this is defined? The raw flags +
+ * frag_offset from the packet, or some derived value more amenable to ternary
+ * matching? TODO: there was a proposal for driver-allocation fields. The
+ * driver would provide some instruction for how to extract given field values,
+ * and would be given a field id in return. It could then use that field id in
+ * its matches. This feels like it would be extremely hard to implement in
+ * hardware, but I mention it for completeness.
+ */
+#define MAE_FIELD_IP_FLAGS 0x31
+/* enum: Ports (UDP, TCP) Inner when encap */
+#define MAE_FIELD_L4_SPORT 0x32
+/* enum: Ports (UDP, TCP) Inner when encap */
+#define MAE_FIELD_L4_DPORT 0x33
+/* enum: Inner when encap */
+#define MAE_FIELD_TCP_FLAGS 0x34
+/* enum: TCP packet with any of SYN, FIN or RST flag set */
+#define MAE_FIELD_TCP_SYN_FIN_RST 0x35
+/* enum: Packet is IP fragment with fragment offset 0 */
+#define MAE_FIELD_IP_FIRST_FRAG 0x36
+/* enum: The type of encapsulated used for this packet. Value as per
+ * ENCAP_TYPE_*.
+ */
+#define MAE_FIELD_ENCAP_TYPE 0x3f
+/* enum: The ID of the outer rule that marked this packet as encapsulated.
+ * Useful for implicitly matching on outer fields.
+ */
+#define MAE_FIELD_OUTER_RULE_ID 0x40
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_ETHER_TYPE 0x41
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN0_TCI 0x42
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN0_PROTO 0x43
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN1_TCI 0x44
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN1_PROTO 0x45
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_ETH_SADDR 0x48
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_ETH_DADDR 0x49
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_SRC_IP4 0x4a
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_SRC_IP6 0x4b
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_DST_IP4 0x4c
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_DST_IP6 0x4d
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_PROTO 0x4e
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_TOS 0x4f
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_TTL 0x50
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_FLAGS 0x51
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_L4_SPORT 0x52
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_L4_DPORT 0x53
+/* enum: VNI (when VXLAN or GENEVE) VSID (when NVGRE) Bottom 24 bits of Key
+ * (when L2GRE) Outer; only present when encap
+ */
+#define MAE_FIELD_ENC_VNET_ID 0x54
+
+/* MAE_MCDI_ENCAP_TYPE enum: Encapsulation type. Defines how the payload will
+ * be parsed to an inner frame. Other values are reserved. Unknown values
+ * should be treated same as NONE. (Values are not arbitrary - constrained by
+ * table access ABI.)
+ */
+#define MAE_MCDI_ENCAP_TYPE_NONE 0x0 /* enum */
+/* enum: Don't assume enum aligns with support bitmask... */
+#define MAE_MCDI_ENCAP_TYPE_VXLAN 0x1
+#define MAE_MCDI_ENCAP_TYPE_NVGRE 0x2 /* enum */
+#define MAE_MCDI_ENCAP_TYPE_GENEVE 0x3 /* enum */
+#define MAE_MCDI_ENCAP_TYPE_L2GRE 0x4 /* enum */
+
+/* MAE_MPORT_END enum: Selects which end of the logical link identified by an
+ * MPORT_SELECTOR is targeted by an operation.
+ */
+/* enum: Selects the port on the MAE virtual switch */
+#define MAE_MPORT_END_MAE 0x1
+/* enum: Selects the virtual NIC plugged into the MAE switch */
+#define MAE_MPORT_END_VNIC 0x2
+
+/* MAE_COUNTER_TYPE enum: The datapath maintains several sets of counters, each
+ * being associated with a different table. Note that the same counter ID may
+ * be allocated by different counter blocks, so e.g. AR counter 42 is different
+ * from CT counter 42. Generation counts are also type-specific. This value is
+ * also present in the header of streaming counter packets, in the IDENTIFIER
+ * field (see packetiser packet format definitions).
+ */
+/* enum: Action Rule counters - can be referenced in AR response. */
+#define MAE_COUNTER_TYPE_AR 0x0
+/* enum: Conntrack counters - can be referenced in CT response. */
+#define MAE_COUNTER_TYPE_CT 0x1
+/* enum: Outer Rule counters - can be referenced in OR response. */
+#define MAE_COUNTER_TYPE_OR 0x2
+
+/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been
+ * structured with bits [31:24] reserved (0), [23:16] indicating which major
+ * block the tables belongs to (0=VNIC TX, none currently; 1=MAE; 2=VNIC RX),
+ * [15:8] a unique ID within the block, and [7:0] reserved for future
+ * variations of the same table. (All of the tables currently defined within
+ * the streaming engines are listed here, but this does not imply that they are
+ * all supported - MC_CMD_TABLE_LIST returns the list of actually supported
+ * tables.)
+ */
+/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_OUTER_RULE_TABLE 0x10000
+/* enum: Outer_Rule_No_CT_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_OUTER_RULE_NO_CT_TABLE 0x10100
+/* enum: Mgmt_Filter_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_MGMT_FILTER_TABLE 0x10200
+/* enum: Conntrack_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_CONNTRACK_TABLE 0x10300
+/* enum: Action_Rule_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_ACTION_RULE_TABLE 0x10400
+/* enum: Mgroup_Default_Action_Set_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_MGROUP_DEFAULT_ACTION_SET_TABLE 0x10500
+/* enum: Encap_Hdr_Part1_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_ENCAP_HDR_PART1_TABLE 0x10600
+/* enum: Encap_Hdr_Part2_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_ENCAP_HDR_PART2_TABLE 0x10700
+/* enum: Replace_Src_MAC_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_REPLACE_SRC_MAC_TABLE 0x10800
+/* enum: Replace_Dst_MAC_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_REPLACE_DST_MAC_TABLE 0x10900
+/* enum: Dst_Mport_VC_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_DST_MPORT_VC_TABLE 0x10a00
+/* enum: LACP_LAG_Config_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_LACP_LAG_CONFIG_TABLE 0x10b00
+/* enum: LACP_Balance_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_LACP_BALANCE_TABLE 0x10c00
+/* enum: Dst_Mport_Host_Chan_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_DST_MPORT_HOST_CHAN_TABLE 0x10d00
+/* enum: VNIC_Rx_Encap_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_VNIC_RX_ENCAP_TABLE 0x20000
+/* enum: Steering_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_STEERING_TABLE 0x20100
+/* enum: RSS_Context_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200
+/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_INDIRECTION_TABLE 0x20300
+
+/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field
+ * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) |
+ * (ether_type_msb & 0x3);
+ */
+#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */
+
+/* TABLE_NAT_DIR enum: NAT direction. */
+#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */
+#define TABLE_NAT_DIR_DEST 0x1 /* enum */
+
+/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS
+ * is constructed as a concatenation (indicated here by "++") of packet header
+ * fields.
+ */
+/* enum: IP src addr ++ IP dst addr */
+#define TABLE_RSS_KEY_MODE_SA_DA 0x0
+/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */
+#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1
+/* enum: IP src addr */
+#define TABLE_RSS_KEY_MODE_SA 0x2
+/* enum: IP dst addr */
+#define TABLE_RSS_KEY_MODE_DA 0x3
+/* enum: IP src addr ++ TCP/UDP src port */
+#define TABLE_RSS_KEY_MODE_SA_SP 0x4
+/* enum: IP dest addr ++ TCP dest port */
+#define TABLE_RSS_KEY_MODE_DA_DP 0x5
+/* enum: Nothing (produces input of 0, resulting in output hash of 0) */
+#define TABLE_RSS_KEY_MODE_NONE 0x7
+
+/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */
+/* enum: RSS uses Indirection_Table lookup. */
+#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0
+/* enum: RSS uses even spreading calculation. */
+#define TABLE_RSS_SPREAD_MODE_EVEN 0x1
+
+/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been
+ * loosely grouped together into blocks with gaps for expansion, but the values
+ * are arbitrary. Field IDs are not specific to particular tables, and in some
+ * cases this sharing means that they are not used with the exact names of the
+ * corresponding table definitions in SF-123102-TC; however, the mapping should
+ * still be clear. The intent is that a list of fields, with their associated
+ * bit widths and semantics version code, unambiguously defines the semantics
+ * of the fields in a key or response. (Again, this list includes all of the
+ * fields currently defined within the streaming engines, but only a subset may
+ * actually be used by the supported list of tables.)
+ */
+/* enum: May appear multiple times within a key or response, and indicates that
+ * the field is unused and should be set to 0 (or masked out if permitted by
+ * the MASK_VALUE for this field).
+ */
+#define TABLE_FIELD_ID_UNUSED 0x0
+/* enum: Source m-port (a full m-port label). */
+#define TABLE_FIELD_ID_SRC_MPORT 0x1
+/* enum: Destination m-port (a full m-port label). */
+#define TABLE_FIELD_ID_DST_MPORT 0x2
+/* enum: Source m-group ID. */
+#define TABLE_FIELD_ID_SRC_MGROUP_ID 0x3
+/* enum: Physical network port ID (or m-port ID; same thing, for physical
+ * network ports).
+ */
+#define TABLE_FIELD_ID_NETWORK_PORT_ID 0x4
+/* enum: True if packet arrived via network port, false if it arrived via host.
+ */
+#define TABLE_FIELD_ID_IS_FROM_NETWORK 0x5
+/* enum: Full virtual channel from capsule header. */
+#define TABLE_FIELD_ID_CH_VC 0x6
+/* enum: Low bits of virtual channel from capsule header. */
+#define TABLE_FIELD_ID_CH_VC_LOW 0x7
+/* enum: User mark value in metadata and packet prefix. */
+#define TABLE_FIELD_ID_USER_MARK 0x8
+/* enum: User flag value in metadata and packet prefix. */
+#define TABLE_FIELD_ID_USER_FLAG 0x9
+/* enum: Counter ID associated with a response. All-bits-1 is a null value to
+ * suppress counting.
+ */
+#define TABLE_FIELD_ID_COUNTER_ID 0xa
+/* enum: Discriminator which may be set by plugins in some lookup keys; this
+ * allows plugins to make a reinterpretation of packet fields in these keys
+ * without clashing with the normal interpretation.
+ */
+#define TABLE_FIELD_ID_DISCRIM 0xb
+/* enum: Destination MAC address. The mapping from bytes in a frame to the
+ * 48-bit value for this field is in network order, i.e. a MAC address of
+ * AA:BB:CC:DD:EE:FF becomes a 48-bit value of 0xAABBCCDDEEFF.
+ */
+#define TABLE_FIELD_ID_DST_MAC 0x14
+/* enum: Source MAC address (see notes for DST_MAC). */
+#define TABLE_FIELD_ID_SRC_MAC 0x15
+/* enum: Outer VLAN tag TPID, compressed to an enumeration. */
+#define TABLE_FIELD_ID_OVLAN_TPID_COMPRESSED 0x16
+/* enum: Full outer VLAN tag TCI (16 bits). */
+#define TABLE_FIELD_ID_OVLAN 0x17
+/* enum: Outer VLAN ID (least significant 12 bits of full 16-bit TCI) only. */
+#define TABLE_FIELD_ID_OVLAN_VID 0x18
+/* enum: Inner VLAN tag TPID, compressed to an enumeration. */
+#define TABLE_FIELD_ID_IVLAN_TPID_COMPRESSED 0x19
+/* enum: Full inner VLAN tag TCI (16 bits). */
+#define TABLE_FIELD_ID_IVLAN 0x1a
+/* enum: Inner VLAN ID (least significant 12 bits of full 16-bit TCI) only. */
+#define TABLE_FIELD_ID_IVLAN_VID 0x1b
+/* enum: Ethertype. */
+#define TABLE_FIELD_ID_ETHER_TYPE 0x1c
+/* enum: Source IP address, either IPv4 or IPv6. The mapping from bytes in a
+ * frame to the 128-bit value for this field is in network order, with IPv4
+ * addresses assumed to have 12 bytes of trailing zeroes. i.e. the IPv6 address
+ * [2345::6789:ABCD] is 0x2345000000000000000000006789ABCD; the IPv4 address
+ * 192.168.1.2 is 0xC0A80102000000000000000000000000.
+ */
+#define TABLE_FIELD_ID_SRC_IP 0x1d
+/* enum: Destination IP address (see notes for SRC_IP). */
+#define TABLE_FIELD_ID_DST_IP 0x1e
+/* enum: IPv4 Type-of-Service or IPv6 Traffic Class field. */
+#define TABLE_FIELD_ID_IP_TOS 0x1f
+/* enum: IP Protocol. */
+#define TABLE_FIELD_ID_IP_PROTO 0x20
+/* enum: Layer 4 source port. */
+#define TABLE_FIELD_ID_SRC_PORT 0x21
+/* enum: Layer 4 destination port. */
+#define TABLE_FIELD_ID_DST_PORT 0x22
+/* enum: TCP flags. */
+#define TABLE_FIELD_ID_TCP_FLAGS 0x23
+/* enum: Virtual Network Identifier (VXLAN) or Virtual Session ID (NVGRE). */
+#define TABLE_FIELD_ID_VNI 0x24
+/* enum: True if packet has any tunnel encapsulation header. */
+#define TABLE_FIELD_ID_HAS_ENCAP 0x32
+/* enum: True if encap header has an outer VLAN tag. */
+#define TABLE_FIELD_ID_HAS_ENC_OVLAN 0x33
+/* enum: True if encap header has an inner VLAN tag. */
+#define TABLE_FIELD_ID_HAS_ENC_IVLAN 0x34
+/* enum: True if encap header is some sort of IP. */
+#define TABLE_FIELD_ID_HAS_ENC_IP 0x35
+/* enum: True if encap header is specifically IPv4. */
+#define TABLE_FIELD_ID_HAS_ENC_IP4 0x36
+/* enum: True if encap header is UDP. */
+#define TABLE_FIELD_ID_HAS_ENC_UDP 0x37
+/* enum: True if only/inner frame has an outer VLAN tag. */
+#define TABLE_FIELD_ID_HAS_OVLAN 0x38
+/* enum: True if only/inner frame has an inner VLAN tag. */
+#define TABLE_FIELD_ID_HAS_IVLAN 0x39
+/* enum: True if only/inner frame is some sort of IP. */
+#define TABLE_FIELD_ID_HAS_IP 0x3a
+/* enum: True if only/inner frame has a recognised L4 IP protocol (TCP or UDP).
+ */
+#define TABLE_FIELD_ID_HAS_L4 0x3b
+/* enum: True if only/inner frame is an IP fragment. */
+#define TABLE_FIELD_ID_IP_FRAG 0x3c
+/* enum: True if only/inner frame is the first IP fragment (fragment offset 0).
+ */
+#define TABLE_FIELD_ID_IP_FIRST_FRAG 0x3d
+/* enum: True if only/inner frame has an IP Time-To-Live of <= 1. (Note: the
+ * implementation calls this "ip_ttl_is_one" but does in fact match packets
+ * with TTL=0 - which we shouldn't be seeing! - as well.)
+ */
+#define TABLE_FIELD_ID_IP_TTL_LE_ONE 0x3e
+/* enum: True if only/inner frame has any of TCP SYN, FIN or RST flags set. */
+#define TABLE_FIELD_ID_TCP_INTERESTING_FLAGS 0x3f
+/* enum: Plugin channel selection. */
+#define TABLE_FIELD_ID_RDP_PL_CHAN 0x50
+/* enum: Enable update of CH_ROUTE_RDP_C_PL route bit. */
+#define TABLE_FIELD_ID_RDP_C_PL_EN 0x51
+/* enum: New value of CH_ROUTE_RDP_C_PL route bit. */
+#define TABLE_FIELD_ID_RDP_C_PL 0x52
+/* enum: Enable update of CH_ROUTE_RDP_D_PL route bit. */
+#define TABLE_FIELD_ID_RDP_D_PL_EN 0x53
+/* enum: New value of CH_ROUTE_RDP_D_PL route bit. */
+#define TABLE_FIELD_ID_RDP_D_PL 0x54
+/* enum: Enable update of CH_ROUTE_RDP_OUT_HOST_CHAN route bit. */
+#define TABLE_FIELD_ID_RDP_OUT_HOST_CHAN_EN 0x55
+/* enum: New value of CH_ROUTE_RDP_OUT_HOST_CHAN route bit. */
+#define TABLE_FIELD_ID_RDP_OUT_HOST_CHAN 0x56
+/* enum: Recirculation ID for lookup sequences with two action rule lookups. */
+#define TABLE_FIELD_ID_RECIRC_ID 0x64
+/* enum: Domain ID passed to conntrack and action rule lookups. */
+#define TABLE_FIELD_ID_DOMAIN 0x65
+/* enum: Construction mode for encap_tunnel_id - see MAE_CT_VNI_MODE enum. */
+#define TABLE_FIELD_ID_CT_VNI_MODE 0x66
+/* enum: True to inhibit conntrack lookup if TCP SYN, FIN or RST flag is set.
+ */
+#define TABLE_FIELD_ID_CT_TCP_FLAGS_INHIBIT 0x67
+/* enum: True to do conntrack lookups for IPv4 TCP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP4_TCP 0x68
+/* enum: True to do conntrack lookups for IPv4 UDP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP4_UDP 0x69
+/* enum: True to do conntrack lookups for IPv6 TCP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP6_TCP 0x6a
+/* enum: True to do conntrack lookups for IPv6 UDP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP6_UDP 0x6b
+/* enum: Outer rule identifier. */
+#define TABLE_FIELD_ID_OUTER_RULE_ID 0x6c
+/* enum: Encapsulation type - see MAE_MCDI_ENCAP_TYPE enum. */
+#define TABLE_FIELD_ID_ENCAP_TYPE 0x6d
+/* enum: Encap tunnel ID for conntrack lookups from VNI, VLAN tag(s), or 0,
+ * depending on CT_VNI_MODE.
+ */
+#define TABLE_FIELD_ID_ENCAP_TUNNEL_ID 0x78
+/* enum: A conntrack entry identifier, passed to plugins. */
+#define TABLE_FIELD_ID_CT_ENTRY_ID 0x79
+/* enum: Either source or destination NAT replacement port. */
+#define TABLE_FIELD_ID_NAT_PORT 0x7a
+/* enum: Either source or destination NAT replacement IPv4 address. Note that
+ * this is specifically an IPv4 address (IPv6 is not supported for NAT), with
+ * byte mapped to a 32-bit value in network order, i.e. the IPv4 address
+ * 192.168.1.2 is the value 0xC0A80102.
+ */
+#define TABLE_FIELD_ID_NAT_IP 0x7b
+/* enum: NAT direction: 0=>source, 1=>destination. */
+#define TABLE_FIELD_ID_NAT_DIR 0x7c
+/* enum: Conntrack mark value, passed to action rule lookup. Note that this is
+ * not related to the "user mark" in the metadata / packet prefix.
+ */
+#define TABLE_FIELD_ID_CT_MARK 0x7d
+/* enum: Private flags for conntrack, passed to action rule lookup. */
+#define TABLE_FIELD_ID_CT_PRIV_FLAGS 0x7e
+/* enum: True if the conntrack lookup resulted in a hit. */
+#define TABLE_FIELD_ID_CT_HIT 0x7f
+/* enum: True to suppress delivery when source and destination m-ports match.
+ */
+#define TABLE_FIELD_ID_SUPPRESS_SELF_DELIVERY 0x8c
+/* enum: True to perform tunnel decapsulation. */
+#define TABLE_FIELD_ID_DO_DECAP 0x8d
+/* enum: True to copy outer frame DSCP to inner on decap. */
+#define TABLE_FIELD_ID_DECAP_DSCP_COPY 0x8e
+/* enum: True to map outer frame ECN to inner on decap, by RFC 6040 rules. */
+#define TABLE_FIELD_ID_DECAP_ECN_RFC6040 0x8f
+/* enum: True to replace DSCP field. */
+#define TABLE_FIELD_ID_DO_REPLACE_DSCP 0x90
+/* enum: True to replace ECN field. */
+#define TABLE_FIELD_ID_DO_REPLACE_ECN 0x91
+/* enum: True to decrement IP Time-To-Live. */
+#define TABLE_FIELD_ID_DO_DECR_IP_TTL 0x92
+/* enum: True to replace source MAC address. */
+#define TABLE_FIELD_ID_DO_SRC_MAC 0x93
+/* enum: True to replace destination MAC address. */
+#define TABLE_FIELD_ID_DO_DST_MAC 0x94
+/* enum: Number of VLAN tags to pop. Valid values are 0, 1, or 2. */
+#define TABLE_FIELD_ID_DO_VLAN_POP 0x95
+/* enum: Number of VLANs tags to push. Valid values are 0, 1, or 2. */
+#define TABLE_FIELD_ID_DO_VLAN_PUSH 0x96
+/* enum: True to count this packet. */
+#define TABLE_FIELD_ID_DO_COUNT 0x97
+/* enum: True to perform tunnel encapsulation. */
+#define TABLE_FIELD_ID_DO_ENCAP 0x98
+/* enum: True to copy inner frame DSCP to outer on encap. */
+#define TABLE_FIELD_ID_ENCAP_DSCP_COPY 0x99
+/* enum: True to copy inner frame ECN to outer on encap. */
+#define TABLE_FIELD_ID_ENCAP_ECN_COPY 0x9a
+/* enum: True to deliver the packet (otherwise it is dropped). */
+#define TABLE_FIELD_ID_DO_DELIVER 0x9b
+/* enum: True to set the user flag in the metadata. */
+#define TABLE_FIELD_ID_DO_FLAG 0x9c
+/* enum: True to update the user mark in the metadata. */
+#define TABLE_FIELD_ID_DO_MARK 0x9d
+/* enum: True to override the capsule virtual channel for network deliveries.
+ */
+#define TABLE_FIELD_ID_DO_SET_NET_CHAN 0x9e
+/* enum: True to override the reported source m-port for host deliveries. */
+#define TABLE_FIELD_ID_DO_SET_SRC_MPORT 0x9f
+/* enum: Encap header ID for DO_ENCAP, indexing Encap_Hdr_Part1/2_Table. */
+#define TABLE_FIELD_ID_ENCAP_HDR_ID 0xaa
+/* enum: New DSCP value for DO_REPLACE_DSCP. */
+#define TABLE_FIELD_ID_DSCP_VALUE 0xab
+/* enum: If DO_REPLACE_ECN is set, the new value for the ECN field. If
+ * DO_REPLACE_ECN is not set, ECN_CONTROL[0] and ECN_CONTROL[1] are set to
+ * request remapping of ECT0 and ECT1 ECN codepoints respectively to CE.
+ */
+#define TABLE_FIELD_ID_ECN_CONTROL 0xac
+/* enum: Source MAC ID for DO_SRC_MAC, indexing Replace_Src_MAC_Table. */
+#define TABLE_FIELD_ID_SRC_MAC_ID 0xad
+/* enum: Destination MAC ID for DO_DST_MAC, indexing Replace_Dst_MAC_Table. */
+#define TABLE_FIELD_ID_DST_MAC_ID 0xae
+/* enum: Parameter for either DO_SET_NET_CHAN (only bottom 6 bits used in this
+ * case) or DO_SET_SRC_MPORT.
+ */
+#define TABLE_FIELD_ID_REPORTED_SRC_MPORT_OR_NET_CHAN 0xaf
+/* enum: 64-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK64 0xb4
+/* enum: 32-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK32 0xb5
+/* enum: 16-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK16 0xb6
+/* enum: 8-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK8 0xb7
+/* enum: 4-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK4 0xb8
+/* enum: 2-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK2 0xb9
+/* enum: Added encapsulation header length in words. */
+#define TABLE_FIELD_ID_HDR_LEN_W 0xba
+/* enum: Static value for layer 2/3 LACP hash of the encapsulation header. */
+#define TABLE_FIELD_ID_ENC_LACP_HASH_L23 0xbb
+/* enum: Static value for layer 4 LACP hash of the encapsulation header. */
+#define TABLE_FIELD_ID_ENC_LACP_HASH_L4 0xbc
+/* enum: True to use the static ENC_LACP_HASH values for the encap header
+ * instead of the calculated values for the inner frame when delivering a newly
+ * encapsulated packet to a LAG m-port.
+ */
+#define TABLE_FIELD_ID_USE_ENC_LACP_HASHES 0xbd
+/* enum: True to trigger conntrack from first action rule lookup (AR=>CT=>AR
+ * sequence).
+ */
+#define TABLE_FIELD_ID_DO_CT 0xc8
+/* enum: True to perform NAT using parameters from conntrack lookup response.
+ */
+#define TABLE_FIELD_ID_DO_NAT 0xc9
+/* enum: True to trigger recirculated action rule lookup (AR=>AR sequence). */
+#define TABLE_FIELD_ID_DO_RECIRC 0xca
+/* enum: Next action set payload ID for replay. The null value is all-1-bits.
+ */
+#define TABLE_FIELD_ID_NEXT_ACTION_SET_PAYLOAD 0xcb
+/* enum: Next action set row ID for replay. The null value is all-1-bits. */
+#define TABLE_FIELD_ID_NEXT_ACTION_SET_ROW 0xcc
+/* enum: Action set payload ID for additional delivery to management CPU. The
+ * null value is all-1-bits.
+ */
+#define TABLE_FIELD_ID_MC_ACTION_SET_PAYLOAD 0xcd
+/* enum: Action set row ID for additional delivery to management CPU. The null
+ * value is all-1-bits.
+ */
+#define TABLE_FIELD_ID_MC_ACTION_SET_ROW 0xce
+/* enum: True to include layer 4 in LACP hash on delivery to a LAG m-port. */
+#define TABLE_FIELD_ID_LACP_INC_L4 0xdc
+/* enum: True to request that LACP is performed by a plugin. */
+#define TABLE_FIELD_ID_LACP_PLUGIN 0xdd
+/* enum: LACP_Balance_Table base address divided by 64. */
+#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde
+/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */
+#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf
+/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for
+ * other encapsulation types.
+ */
+#define TABLE_FIELD_ID_UDP_PORT 0xe6
+/* enum: True to perform RSS based on outer fields rather than inner fields. */
+#define TABLE_FIELD_ID_RSS_ON_OUTER 0xe7
+/* enum: True to perform steering table lookup on outer fields rather than
+ * inner fields.
+ */
+#define TABLE_FIELD_ID_STEER_ON_OUTER 0xe8
+/* enum: Destination queue ID for host delivery. */
+#define TABLE_FIELD_ID_DST_QID 0xf0
+/* enum: True to drop this packet. */
+#define TABLE_FIELD_ID_DROP 0xf1
+/* enum: True to strip outer VLAN tag from this packet. */
+#define TABLE_FIELD_ID_VLAN_STRIP 0xf2
+/* enum: True to override the user mark field with the supplied USER_MARK, or
+ * false to bitwise-OR the USER_MARK into it.
+ */
+#define TABLE_FIELD_ID_MARK_OVERRIDE 0xf3
+/* enum: True to override the user flag field with the supplied USER_FLAG, or
+ * false to bitwise-OR the USER_FLAG into it.
+ */
+#define TABLE_FIELD_ID_FLAG_OVERRIDE 0xf4
+/* enum: RSS context ID, indexing the RSS_Context_Table. */
+#define TABLE_FIELD_ID_RSS_CTX_ID 0xfa
+/* enum: True to enable RSS. */
+#define TABLE_FIELD_ID_RSS_EN 0xfb
+/* enum: Toeplitz hash key. */
+#define TABLE_FIELD_ID_KEY 0xfc
+/* enum: Key mode for IPv4 TCP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_TCP_V4_KEY_MODE 0xfd
+/* enum: Key mode for IPv6 TCP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_TCP_V6_KEY_MODE 0xfe
+/* enum: Key mode for IPv4 UDP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_UDP_V4_KEY_MODE 0xff
+/* enum: Key mode for IPv6 UDP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_UDP_V6_KEY_MODE 0x100
+/* enum: Key mode for other IPv4 packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_OTHER_V4_KEY_MODE 0x101
+/* enum: Key mode for other IPv6 packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_OTHER_V6_KEY_MODE 0x102
+/* enum: Spreading mode - 0=>indirection; 1=>even. */
+#define TABLE_FIELD_ID_SPREAD_MODE 0x103
+/* enum: For indirection spreading mode, the base address of a region within
+ * the Indirection_Table. For even spreading mode, the number of queues to
+ * spread across (only values 1-255 are valid for this mode).
+ */
+#define TABLE_FIELD_ID_INDIR_TBL_BASE 0x104
+/* enum: For indirection spreading mode, identifies the length of a region
+ * within the Indirection_Table, where length = 32 << len_id. Must be set to 0
+ * for even spreading mode.
+ */
+#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105
+/* enum: An offset to be applied to the base destination queue ID. */
+#define TABLE_FIELD_ID_INDIR_OFFSET 0x106
+
+/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100
+ * platforms
+ */
#define MCDI_EVENT_LEN 8
#define MCDI_EVENT_CONT_LBN 32
#define MCDI_EVENT_CONT_WIDTH 1
@@ -447,17 +1164,21 @@
#define MCDI_EVENT_TX_ERR_TYPE_OFST 0
#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
-/* enum: Descriptor loader reported failure */
+/* enum: Descriptor loader reported failure. Specific to EF10-family NICs. */
#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
-/* enum: Descriptor ring empty and no EOP seen for packet */
+/* enum: Descriptor ring empty and no EOP seen for packet. Specific to
+ * EF10-family NICs
+ */
#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
-/* enum: Overlength packet */
+/* enum: Overlength packet. Specific to EF10-family NICs. */
#define MCDI_EVENT_TX_ERR_2BIG 0x3
-/* enum: Malformed option descriptor */
+/* enum: Malformed option descriptor. Specific to EF10-family NICs. */
#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
-/* enum: Option descriptor part way through a packet */
+/* enum: Option descriptor part way through a packet. Specific to EF10-family
+ * NICs.
+ */
#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
-/* enum: DMA or PIO data access error */
+/* enum: DMA or PIO data access error. Specific to EF10-family NICs */
#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
#define MCDI_EVENT_TX_ERR_INFO_OFST 0
#define MCDI_EVENT_TX_ERR_INFO_LBN 16
@@ -773,6 +1494,12 @@
* SF-122927-TC for details.
*/
#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26
+/* enum: Notification that the mport journal has changed since it was last read
+ * and updates can be read using the MC_CMD_MAE_MPORT_READ_JOURNAL command. The
+ * firmware may moderate the events so that an event is not sent for every
+ * change to the journal.
+ */
+#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -1070,7 +1797,13 @@
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
@@ -1482,12 +2215,24 @@
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_LBN 2080
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_WIDTH 32
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_LBN 2112
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_WIDTH 32
/* MC firmware version number */
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_LBN 2144
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_WIDTH 32
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_LBN 2176
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_WIDTH 32
/* MC firmware security level */
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4
@@ -1571,7 +2316,13 @@
#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_WIDTH 32
#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_WIDTH 32
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
@@ -1587,7 +2338,13 @@
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_WIDTH 32
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_WIDTH 32
/* extra info */
#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
@@ -1611,7 +2368,13 @@
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_WIDTH 32
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_WIDTH 32
/* extra info */
#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32
#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16
@@ -1633,6 +2396,33 @@
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
/* MC firmware unique build ID (as binary SHA-1 value) */
#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52
#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20
@@ -1650,7 +2440,13 @@
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
/* The ID of the SUC chip. This is specific to the platform but typically
* indicates family, memory sizes etc. See SF-116728-SW for further details.
*/
@@ -1664,7 +2460,13 @@
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
/* FPGA version as three numbers. On Riverhead based systems this field uses
* the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
* FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
@@ -1686,6 +2488,489 @@
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64
+/* MC_CMD_GET_VERSION_V3_OUT msgresponse: Extended response providing version
+ * information for all adapter components. For Riverhead based designs, base MC
+ * firmware version fields refer to NMC firmware, while CMC firmware data is in
+ * dedicated CMC fields. Flags indicate which data is present in the response
+ * (depending on which components exist on a particular adapter)
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_LEN 328
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V3_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V3_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V3_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V3_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_WIDTH 32
+/* extra info */
+#define MC_CMD_GET_VERSION_V3_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V3_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V3_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_SERIAL_LEN 64
+/* The version of the datapath hardware design as three number - a.b.c */
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_OFST 304
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_NUM 3
+/* The version of the firmware library used to control the datapath as three
+ * number - a.b.c
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_OFST 316
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_NUM 3
+
+/* MC_CMD_GET_VERSION_V4_OUT msgresponse: Extended response providing SoC
+ * version information
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_LEN 392
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V4_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V4_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V4_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V4_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_WIDTH 32
+/* extra info */
+#define MC_CMD_GET_VERSION_V4_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V4_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V4_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_SERIAL_LEN 64
+/* The version of the datapath hardware design as three number - a.b.c */
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_OFST 304
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_NUM 3
+/* The version of the firmware library used to control the datapath as three
+ * number - a.b.c
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_OFST 316
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_NUM 3
+/* The SOC boot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_OFST 328
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_NUM 4
+/* The SOC uboot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_OFST 344
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_NUM 4
+/* The SOC main rootfs version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_OFST 360
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_NUM 4
+/* The SOC recovery buildroot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_OFST 376
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_NUM 4
+
+/* MC_CMD_GET_VERSION_V5_OUT msgresponse: Extended response providing bundle
+ * and board version information
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_LEN 424
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V5_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V5_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V5_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V5_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_WIDTH 32
+/* extra info */
+#define MC_CMD_GET_VERSION_V5_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V5_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V5_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_SERIAL_LEN 64
+/* The version of the datapath hardware design as three number - a.b.c */
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_OFST 304
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_NUM 3
+/* The version of the firmware library used to control the datapath as three
+ * number - a.b.c
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_OFST 316
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_NUM 3
+/* The SOC boot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_OFST 328
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_NUM 4
+/* The SOC uboot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_OFST 344
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_NUM 4
+/* The SOC main rootfs version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_OFST 360
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_NUM 4
+/* The SOC recovery buildroot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_OFST 376
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_NUM 4
+/* Board version as four numbers - a.b.c.d. BOARD_VERSION[0] duplicates the
+ * BOARD_REVISION field
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_OFST 392
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_NUM 4
+/* Bundle version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_OFST 408
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_NUM 4
+
/***********************************/
/* MC_CMD_PTP
@@ -1789,7 +3074,9 @@
#define MC_CMD_PTP_IN_CMD_LEN 4
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
-/* Not used. Events are always sent to function relative queue 0. */
+/* Not used, initialize to 0. Events are always sent to function relative queue
+ * 0.
+ */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
/* PTP timestamping mode. Not used from Huntington onwards. */
@@ -1866,7 +3153,13 @@
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_LBN 64
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_WIDTH 32
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_LBN 96
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_WIDTH 32
/* enum: Number of fractional bits in frequency adjustment */
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
@@ -1897,7 +3190,13 @@
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_LBN 64
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_WIDTH 32
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_LBN 96
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_WIDTH 32
/* enum: Number of fractional bits in frequency adjustment */
/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
@@ -1936,7 +3235,13 @@
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_LEN 4
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_LBN 96
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_WIDTH 32
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_LEN 4
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_LBN 128
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_WIDTH 32
/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
@@ -2052,7 +3357,13 @@
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_LEN 4
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_LBN 64
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_WIDTH 32
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_LEN 4
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_LBN 96
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_WIDTH 32
/* Enum values, see field(s): */
/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
@@ -2083,7 +3394,13 @@
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_LBN 96
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_WIDTH 32
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_LBN 128
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_WIDTH 32
/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
@@ -2130,7 +3447,9 @@
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Not used. Events are always sent to function relative queue 0. */
+/* Not used, initialize to 0. Events are always sent to function relative queue
+ * 0.
+ */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
@@ -2492,6 +3811,87 @@
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_LEN 40
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_TIME_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_QTR_NANOSECONDS 0x3
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SYNC_WINDOW_MIN_LEN 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_CAPABILITIES_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED0_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED1_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED2_LEN 4
+/* Minimum supported value for the FREQ field in
+ * MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST and
+ * MC_CMD_PTP/MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST message requests. If this message
+ * response is not supported a value of -0.1 ns should be assumed, which is
+ * equivalent to a -10% adjustment.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_OFST 24
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LEN 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_OFST 24
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_LBN 192
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_WIDTH 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_OFST 28
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_LBN 224
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_WIDTH 32
+/* Maximum supported value for the FREQ field in
+ * MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST and
+ * MC_CMD_PTP/MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST message requests. If this message
+ * response is not supported a value of 0.1 ns should be assumed, which is
+ * equivalent to a +10% adjustment.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_OFST 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LEN 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_OFST 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_LBN 256
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_WIDTH 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_OFST 36
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_LBN 288
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_WIDTH 32
+
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
@@ -2634,7 +4034,13 @@
#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32
#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32
/* The requested update interval, in seconds. (Or the sub-command if ADDR is
* NULL.)
*/
@@ -3039,7 +4445,13 @@
#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32
#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32
#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
@@ -3643,6 +5055,8 @@
#define MC_CMD_MEDIA_BASE_T 0x6
/* enum: QSFP+. */
#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+/* enum: DSFP. */
+#define MC_CMD_MEDIA_DSFP 0x8
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
/* enum: Native clause 22 */
@@ -3912,7 +5326,13 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32
/* enum: None. */
#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
@@ -3995,28 +5415,52 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
@@ -4028,7 +5472,13 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32
/* enum: None. */
/* MC_CMD_LOOPBACK_NONE 0x0 */
/* enum: Data. */
@@ -4111,49 +5561,91 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported 25G loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_LBN 320
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported 50 loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_LBN 384
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported 100G loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_LBN 448
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
@@ -4524,7 +6016,13 @@
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_LEN 4
+#define MC_CMD_SET_MAC_IN_ADDR_LO_LBN 64
+#define MC_CMD_SET_MAC_IN_ADDR_LO_WIDTH 32
#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_ADDR_HI_LEN 4
+#define MC_CMD_SET_MAC_IN_ADDR_HI_LBN 96
+#define MC_CMD_SET_MAC_IN_ADDR_HI_WIDTH 32
#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16
@@ -4565,7 +6063,13 @@
#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_LBN 64
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_WIDTH 32
#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_LBN 96
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_WIDTH 32
#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16
@@ -4616,6 +6120,129 @@
#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+/* MC_CMD_SET_MAC_V3_IN msgrequest */
+#define MC_CMD_SET_MAC_V3_IN_LEN 40
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_V3_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_V3_IN_MTU_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_V3_IN_DRAIN_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_LBN 64
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_LBN 96
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_V3_IN_REJECT_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_OFST 16
+#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_OFST 16
+#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_V3_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_V3_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_V3_IN_FLAGS_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_OFST 24
+#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_V3_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CONTROL_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1
+/* Identifies the MAC to update by the specifying the end of a logical MAE
+ * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the
+ * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible
+ * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all
+ * circumstances. 1. Some will always work (e.g. a VF can always address its
+ * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not
+ * meaningful and will always fail with EINVAL (e.g. attempting to address the
+ * VNIC end of a link to a physical port), 3. Some are meaningful but require
+ * the MCDI client to have the required permission and fail with EPERM
+ * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer),
+ * and 4. Some could be implementation-specific and fail with ENOTSUP if not
+ * available (no examples exist right now). See SF-123581-TC section 4.3 for
+ * more details.
+ */
+#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_LBN 256
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_OFST 36
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 35
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 256
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 276
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 272
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 34
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LINK_END_OFST 36
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LINK_END_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LEN 8
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_LBN 256
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_OFST 36
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_LBN 288
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -4649,7 +6276,13 @@
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_WIDTH 32
/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
@@ -4731,7 +6364,13 @@
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
#define MC_CMD_MAC_STATS_IN_DMA_OFST 8
@@ -4774,7 +6413,13 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
@@ -4930,7 +6575,13 @@
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
/* enum: Start of FEC stats buffer space, Medford2 and up */
#define MC_CMD_MAC_FEC_DMABUF_START 0x61
@@ -4963,7 +6614,13 @@
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
/* enum: Start of CTPIO stats buffer space, Medford2 and up */
#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
@@ -5037,7 +6694,13 @@
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
/* enum: Start of V4 stats buffer space */
#define MC_CMD_MAC_V4_DMABUF_START 0x79
@@ -5097,7 +6760,13 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
@@ -5108,7 +6777,13 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
@@ -5201,7 +6876,13 @@
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_LBN 64
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_WIDTH 32
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_LBN 96
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_WIDTH 32
/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
@@ -5706,6 +7387,9 @@
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_LBN 3
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_WIDTH 1
/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
* response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
@@ -6180,7 +7864,13 @@
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LO_LEN 4
+#define MC_CMD_SENSOR_ENTRY_LO_LBN 32
+#define MC_CMD_SENSOR_ENTRY_LO_WIDTH 32
#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_HI_LEN 4
+#define MC_CMD_SENSOR_ENTRY_HI_LBN 64
+#define MC_CMD_SENSOR_ENTRY_HI_WIDTH 32
#define MC_CMD_SENSOR_ENTRY_MINNUM 0
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127
@@ -6202,7 +7892,13 @@
/* MC_CMD_SENSOR_ENTRY_OFST 4 */
/* MC_CMD_SENSOR_ENTRY_LEN 8 */
/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LO_LEN 4 */
+/* MC_CMD_SENSOR_ENTRY_LO_LBN 32 */
+/* MC_CMD_SENSOR_ENTRY_LO_WIDTH 32 */
/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_HI_LEN 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_LBN 64 */
+/* MC_CMD_SENSOR_ENTRY_HI_WIDTH 32 */
/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */
@@ -6259,7 +7955,13 @@
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_WIDTH 32
/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
@@ -6271,7 +7973,13 @@
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_WIDTH 32
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
@@ -6286,7 +7994,13 @@
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_LBN 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_LBN 32
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_WIDTH 32
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8
#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4
@@ -6583,11 +8297,16 @@
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
* Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
- * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
- * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
- * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * SFP+ PHYs). The "media type" can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid "page number" input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+, PAGE=0 or 1
* returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
- * Anything else: currently undefined. Locks required: None. Return code: 0.
+ * For QSFP, PAGE=-1 is the lower (unbanked) page. PAGE=2 is the EEPROM and
+ * PAGE=3 is the module limits. For DSFP, module addressing requires a
+ * "BANK:PAGE". Not every bank has the same number of pages. See the Common
+ * Management Interface Specification (CMIS) for further details. A BANK:PAGE
+ * of "0xffff:0xffff" retrieves the lower (unbanked) page. Locks required -
+ * None. Return code - 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
#undef MC_CMD_0x4b_PRIVILEGE_CTG
@@ -6598,6 +8317,12 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_LBN 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_WIDTH 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -7404,7 +9129,13 @@
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32
/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
@@ -7589,7 +9320,13 @@
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
@@ -7782,7 +9519,7 @@
* large number (253) it is not anticipated that this will be needed in the
* near future, so can currently be ignored.
*
- * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * On Riverhead this command is implemented as a wrapper for `list` in the
* sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
@@ -7827,7 +9564,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for
+ * On Riverhead this command is implemented as a wrapper for
* `get_descriptions` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
@@ -7876,7 +9613,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * On Riverhead this command is implemented as a wrapper for `get_readings`
* in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
@@ -7997,7 +9734,13 @@
#define BUFTBL_ENTRY_RAWADDR_OFST 4
#define BUFTBL_ENTRY_RAWADDR_LEN 8
#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4
+#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32
#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4
+#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64
+#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32
#define BUFTBL_ENTRY_RAWADDR_LBN 32
#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
@@ -8007,14 +9750,25 @@
#define NVRAM_PARTITION_TYPE_ID_LEN 2
/* enum: Primary MC firmware partition */
#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: NMC firmware partition (this is intentionally an alias of MC_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_NMC_FIRMWARE 0x100
/* enum: Secondary MC firmware partition */
#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
/* enum: Expansion ROM partition */
#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
/* enum: Static configuration TLV partition */
#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Factory configuration TLV partition (this is intentionally an alias of
+ * STATIC_CONFIG)
+ */
+#define NVRAM_PARTITION_TYPE_FACTORY_CONFIG 0x400
/* enum: Dynamic configuration TLV partition */
#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: User configuration TLV partition (this is intentionally an alias of
+ * DYNAMIC_CONFIG)
+ */
+#define NVRAM_PARTITION_TYPE_USER_CONFIG 0x500
/* enum: Expansion ROM configuration data for port 0 */
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
@@ -8027,10 +9781,16 @@
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
/* enum: Non-volatile log output partition */
#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output partition for NMC firmware (this is
+ * intentionally an alias of LOG)
+ */
+#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Crash log partition for NMC firmware */
+#define NVRAM_PARTITION_TYPE_NMC_CRASH_LOG 0x801
/* enum: Application license key storage partition */
#define NVRAM_PARTITION_TYPE_LICENSE 0x900
/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
@@ -8047,6 +9807,22 @@
#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
/* enum: Non-volatile log output partition for FC */
#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: FPGA Stage 1 bitstream */
+#define NVRAM_PARTITION_TYPE_FPGA_STAGE1 0xb05
+/* enum: FPGA Stage 2 bitstream */
+#define NVRAM_PARTITION_TYPE_FPGA_STAGE2 0xb06
+/* enum: FPGA User XCLBIN / Programmable Region 0 bitstream */
+#define NVRAM_PARTITION_TYPE_FPGA_REGION0 0xb07
+/* enum: FPGA User XCLBIN (this is intentionally an alias of FPGA_REGION0) */
+#define NVRAM_PARTITION_TYPE_FPGA_XCLBIN_USER 0xb07
+/* enum: FPGA jump instruction (a.k.a. boot) partition to select Stage1
+ * bitstream
+ */
+#define NVRAM_PARTITION_TYPE_FPGA_JUMP 0xb08
+/* enum: FPGA Validate XCLBIN */
+#define NVRAM_PARTITION_TYPE_FPGA_XCLBIN_VALIDATE 0xb09
+/* enum: FPGA XOCL Configuration information */
+#define NVRAM_PARTITION_TYPE_FPGA_XOCL_CONFIG 0xb0a
/* enum: MUM firmware partition */
#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
/* enum: SUC firmware partition (this is intentionally an alias of
@@ -8055,6 +9831,10 @@
#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
/* enum: MUM Non-volatile log output partition. */
#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: SUC Non-volatile log output partition (this is intentionally an alias
+ * of MUM_LOG).
+ */
+#define NVRAM_PARTITION_TYPE_SUC_LOG 0xc01
/* enum: MUM Application table partition. */
#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
/* enum: MUM boot rom partition. */
@@ -8069,6 +9849,10 @@
#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
/* enum: Used by the expansion ROM for logging */
#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
+/* enum: Non-volatile log output partition for Expansion ROM (this is
+ * intentionally an alias of PXE_LOG).
+ */
+#define NVRAM_PARTITION_TYPE_EXPROM_LOG 0x1000
/* enum: Used for XIP code of shmbooted images */
#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
/* enum: Spare partition 2 */
@@ -8077,6 +9861,10 @@
* between XJTAG and Manftest.
*/
#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Deployment configuration TLV partition (this is intentionally an alias
+ * of MANUFACTURING)
+ */
+#define NVRAM_PARTITION_TYPE_DEPLOYMENT_CONFIG 0x1300
/* enum: Spare partition 4 */
#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
/* enum: Spare partition 5 */
@@ -8112,14 +9900,45 @@
#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02
/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */
#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03
+/* enum: Partition to store ASN.1 format Bundle Signature for checking. */
+#define NVRAM_PARTITION_TYPE_BUNDLE_SIGNATURE 0x1e04
+/* enum: Test partition on SmartNIC system microcontroller (SUC) */
+#define NVRAM_PARTITION_TYPE_SUC_TEST 0x1f00
+/* enum: System microcontroller access to primary FPGA flash. */
+#define NVRAM_PARTITION_TYPE_SUC_FPGA_PRIMARY 0x1f01
+/* enum: System microcontroller access to secondary FPGA flash (if present) */
+#define NVRAM_PARTITION_TYPE_SUC_FPGA_SECONDARY 0x1f02
+/* enum: System microcontroller access to primary System-on-Chip flash */
+#define NVRAM_PARTITION_TYPE_SUC_SOC_PRIMARY 0x1f03
+/* enum: System microcontroller access to secondary System-on-Chip flash (if
+ * present)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_SOC_SECONDARY 0x1f04
+/* enum: System microcontroller critical failure logs. Contains structured
+ * details of sensors leading up to a critical failure (where the board is shut
+ * down).
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FAILURE_LOG 0x1f05
+/* enum: System-on-Chip configuration information (see XN-200467-PS). */
+#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07
+/* enum: System-on-Chip update information. */
+#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
/* enum: Recovery partition map (provided if real map is missing or corrupt) */
#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Recovery Flash Partition Table, see SF-122606-TC. (this is
+ * intentionally an alias of RECOVERY_MAP)
+ */
+#define NVRAM_PARTITION_TYPE_RECOVERY_FPT 0xfffe
/* enum: Partition map (real map as stored in flash) */
#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+/* enum: Flash Partition Table, see SF-122606-TC. (this is intentionally an
+ * alias of PARTITION_MAP)
+ */
+#define NVRAM_PARTITION_TYPE_FPT 0xffff
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
@@ -8168,7 +9987,13 @@
#define LICENSED_FEATURES_MASK_OFST 0
#define LICENSED_FEATURES_MASK_LEN 8
#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_LO_LEN 4
+#define LICENSED_FEATURES_MASK_LO_LBN 0
+#define LICENSED_FEATURES_MASK_LO_WIDTH 32
#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_MASK_HI_LEN 4
+#define LICENSED_FEATURES_MASK_HI_LBN 32
+#define LICENSED_FEATURES_MASK_HI_WIDTH 32
#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
@@ -8208,7 +10033,13 @@
#define LICENSED_V3_APPS_MASK_OFST 0
#define LICENSED_V3_APPS_MASK_LEN 8
#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_LO_LEN 4
+#define LICENSED_V3_APPS_MASK_LO_LBN 0
+#define LICENSED_V3_APPS_MASK_LO_WIDTH 32
#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_MASK_HI_LEN 4
+#define LICENSED_V3_APPS_MASK_HI_LBN 32
+#define LICENSED_V3_APPS_MASK_HI_WIDTH 32
#define LICENSED_V3_APPS_ONLOAD_OFST 0
#define LICENSED_V3_APPS_ONLOAD_LBN 0
#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
@@ -8266,7 +10097,13 @@
#define LICENSED_V3_FEATURES_MASK_OFST 0
#define LICENSED_V3_FEATURES_MASK_LEN 8
#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LO_LEN 4
+#define LICENSED_V3_FEATURES_MASK_LO_LBN 0
+#define LICENSED_V3_FEATURES_MASK_LO_WIDTH 32
#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_MASK_HI_LEN 4
+#define LICENSED_V3_FEATURES_MASK_HI_LBN 32
+#define LICENSED_V3_FEATURES_MASK_HI_WIDTH 32
#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0
#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
@@ -8421,7 +10258,8 @@
#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
@@ -8493,7 +10331,13 @@
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_LBN 288
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_LBN 320
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64
@@ -8514,7 +10358,8 @@
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
@@ -8611,7 +10456,13 @@
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_LBN 288
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_LBN 320
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64
@@ -8637,6 +10488,158 @@
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+/* MC_CMD_INIT_EVQ_V3_IN msgrequest: Extended request to specify per-queue
+ * event merge timeouts.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_LEN 556
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V3_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V3_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V3_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_LBN 11
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V3_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V3_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V3_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_LBN 288
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_LBN 320
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MAXNUM_MCDI2 64
+/* Receive event merge timeout to configure, in nanoseconds. The valid range
+ * and granularity are device specific. Specify 0 to use the firmware's default
+ * value. This field is ignored and per-queue merging is disabled if
+ * MC_CMD_INIT_EVQ/MC_CMD_INIT_EVQ_IN/FLAG_RX_MERGE is not set.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_RX_MERGE_TIMEOUT_NS_OFST 548
+#define MC_CMD_INIT_EVQ_V3_IN_RX_MERGE_TIMEOUT_NS_LEN 4
+/* Transmit event merge timeout to configure, in nanoseconds. The valid range
+ * and granularity are device specific. Specify 0 to use the firmware's default
+ * value. This field is ignored and per-queue merging is disabled if
+ * MC_CMD_INIT_EVQ/MC_CMD_INIT_EVQ_IN/FLAG_TX_MERGE is not set.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_TX_MERGE_TIMEOUT_NS_OFST 552
+#define MC_CMD_INIT_EVQ_V3_IN_TX_MERGE_TIMEOUT_NS_LEN 4
+
+/* MC_CMD_INIT_EVQ_V3_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V3_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V3_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V3_OUT_IRQ_LEN 4
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
/* QUEUE_CRC_MODE structuredef */
#define QUEUE_CRC_MODE_LEN 1
#define QUEUE_CRC_MODE_MODE_LBN 0
@@ -8687,7 +10690,8 @@
#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
@@ -8728,7 +10732,13 @@
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
@@ -8752,7 +10762,8 @@
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
@@ -8826,8 +10837,16 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
@@ -8849,7 +10868,8 @@
#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4
@@ -8923,8 +10943,16 @@
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
@@ -8975,7 +11003,8 @@
#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4
@@ -9049,8 +11078,16 @@
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4
@@ -9114,7 +11151,8 @@
#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4
@@ -9188,8 +11226,16 @@
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4
@@ -9285,7 +11331,8 @@
#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
@@ -9329,7 +11376,13 @@
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
@@ -9350,7 +11403,8 @@
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
@@ -9399,6 +11453,9 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_LBN 17
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
@@ -9409,8 +11466,14 @@
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 0
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Flags related to Qbb flow control mode. */
@@ -9507,7 +11570,13 @@
#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_LEN 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_LBN 32
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_WIDTH 32
#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_LEN 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_LBN 64
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_WIDTH 32
/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
@@ -9606,7 +11675,13 @@
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
@@ -9616,7 +11691,13 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
@@ -9627,7 +11708,13 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
@@ -9651,7 +11738,13 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
@@ -9661,7 +11754,13 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
@@ -9672,7 +11771,13 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
@@ -9788,7 +11893,13 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
@@ -9844,7 +11955,13 @@
#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_WIDTH 32
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
@@ -9888,6 +12005,9 @@
#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16
#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
@@ -10000,7 +12120,13 @@
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_WIDTH 32
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
@@ -10086,6 +12212,9 @@
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
@@ -10263,9 +12392,10 @@
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional
- * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via
- * its rte_flow API. This extension is only useful with the sfc_efx driver
- * included as part of DPDK, used in conjunction with the dpdk datapath
+ * filter actions for EF100. Some of these actions are also supported on EF10,
+ * for Intel's DPDK (Data Plane Development Kit, dpdk.org) via its rte_flow
+ * API. In the latter case, this extension is only useful with the sfc_efx
+ * driver included as part of DPDK, used in conjunction with the dpdk datapath
* firmware variant.
*/
#define MC_CMD_FILTER_OP_V3_IN_LEN 180
@@ -10278,7 +12408,13 @@
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_WIDTH 32
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12
@@ -10364,6 +12500,9 @@
#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1
#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
@@ -10539,11 +12678,42 @@
*/
#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156
#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16
-/* Set an action for all packets matching this filter. The DPDK driver and dpdk
- * f/w variant use their own specific delivery structures, which are documented
- * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything
- * other than MATCH_ACTION_NONE when the NIC is running another f/w variant
- * will cause the filter insertion to fail with ENOTSUP.
+/* Flags controlling mutations of the packet and/or metadata when the filter is
+ * matched. The user_mark and user_flag fields' logic is as follows: if
+ * (req.MATCH_BITOR_FLAG == 1) user_flag = req.MATCH_SET_FLAG bit_or user_flag;
+ * else user_flag = req.MATCH_SET_FLAG; if (req.MATCH_SET_MARK == 0) user_mark
+ * = 0; else if (req.MATCH_BITOR_MARK == 1) user_mark = req.MATCH_SET_MARK
+ * bit_or user_mark; else user_mark = req.MATCH_SET_MARK; N.B. These flags
+ * overlap with the MATCH_ACTION field, which is deprecated in favour of this
+ * field. For the cases where these flags induce a valid encoding of the
+ * MATCH_ACTION field, the semantics agree.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAGS_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAGS_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_LBN 2
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_LBN 3
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_LBN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_WIDTH 1
+/* Deprecated: the overlapping MATCH_ACTION_FLAGS field exposes all of the
+ * functionality of this field in an ABI-backwards-compatible manner, and
+ * should be used instead. Any future extensions should be made to the
+ * MATCH_ACTION_FLAGS field, and not to this field. Set an action for all
+ * packets matching this filter. The DPDK driver and (on EF10) dpdk f/w variant
+ * use their own specific delivery structures, which are documented in the DPDK
+ * Firmware Driver Interface (SF-119419-TC). Requesting anything other than
+ * MATCH_ACTION_NONE on an EF10 NIC running another f/w variant will cause the
+ * filter insertion to fail with ENOTSUP.
*/
#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172
#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4
@@ -10580,7 +12750,13 @@
#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_WIDTH 32
/* enum: guaranteed invalid filter handle (low 32 bits) */
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
/* enum: guaranteed invalid filter handle (high 32 bits) */
@@ -10600,7 +12776,13 @@
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_WIDTH 32
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_OUT/HANDLE */
@@ -10638,6 +12820,8 @@
* rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later)
*/
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
+/* enum: read the supported encapsulation types for the VNIC */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -10704,6 +12888,30 @@
#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61
#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+/* MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT msgresponse: Returns
+ * the supported encapsulation types for the VNIC
+ */
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_LEN 8
+/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_TYPES is returned */
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+
/***********************************/
/* MC_CMD_PARSER_DISP_RW
@@ -10849,9 +13057,15 @@
/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
-/* Identifies the port assignment for this function. */
+/* Identifies the port assignment for this function. On EF100, it is possible
+ * for the function to have no network port assigned (either because it is not
+ * yet configured, or assigning a port to a given function personality makes no
+ * sense - e.g. virtio-blk), in which case the return value is NULL_PORT.
+ */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
+/* enum: Special value to indicate no port is assigned to a function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_NULL_PORT 0xffffffff
/***********************************/
@@ -11009,7 +13223,8 @@
/***********************************/
/* MC_CMD_GET_VI_ALLOC_INFO
* Get information about number of VI's and base VI number allocated to this
- * function.
+ * function. This message is not available to dynamic clients created by
+ * MC_CMD_CLIENT_ALLOC.
*/
#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
#undef MC_CMD_0x8d_PRIVILEGE_CTG
@@ -11036,7 +13251,9 @@
/***********************************/
/* MC_CMD_DUMP_VI_STATE
- * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ * For CmdClient use. Dump pertinent information on a specific absolute VI. The
+ * VI must be owned by the calling client or one of its ancestors; usership of
+ * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient.
*/
#define MC_CMD_DUMP_VI_STATE 0x8e
#undef MC_CMD_0x8e_PRIVILEGE_CTG
@@ -11050,7 +13267,7 @@
#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
-#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100
/* The PF part of the function owning this VI. */
#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
@@ -11073,12 +13290,24 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32
/* Raw evq timer table data. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
@@ -11095,22 +13324,46 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32
/* TXDPCPU raw table data for queue. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32
/* TXDPCPU raw table data for queue. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
@@ -11130,22 +13383,46 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32
/* RXDPCPU raw table data for queue. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32
/* Reserved, currently 0. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
@@ -11158,6 +13435,9 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+/* Current user, as assigned by MC_CMD_SET_VI_USER. */
+#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96
+#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4
/***********************************/
@@ -11200,7 +13480,9 @@
/***********************************/
/* MC_CMD_GET_VI_TLP_PROCESSING
- * Get TLP steering and ordering information for a VI.
+ * Get TLP steering and ordering information for a VI. The caller must have the
+ * GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
#undef MC_CMD_0xb0_PRIVILEGE_CTG
@@ -11239,7 +13521,9 @@
/***********************************/
/* MC_CMD_SET_VI_TLP_PROCESSING
- * Set TLP steering and ordering information for a VI.
+ * Set TLP steering and ordering information for a VI. The caller must have the
+ * GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
#undef MC_CMD_0xb1_PRIVILEGE_CTG
@@ -14497,6 +16781,24 @@
#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
@@ -14983,6 +17285,24 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -14990,7 +17310,13 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_WIDTH 32
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_WIDTH 32
/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184
@@ -15477,6 +17803,24 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -15484,7 +17828,13 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_WIDTH 32
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_WIDTH 32
/* The minimum size (in table entries) of indirection table to be allocated
* from the pool for an RSS context. Note that the table size used must be a
* power of 2.
@@ -15521,6 +17871,573 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* MC_CMD_GET_CAPABILITIES_V10_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LEN 192
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -15729,7 +18646,7 @@
/* Handle for allocated push I/O buffer. */
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
-/* Function Local Instance (VI) number. */
+/* Function Local Instance (VI) number which has a TxQ allocated to it. */
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
@@ -17303,7 +20220,13 @@
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
@@ -17503,6 +20426,18 @@
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
+/* MC_CMD_GET_FUNCTION_INFO_OUT_V2 msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN 12
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_PF_LEN 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_VF_LEN 4
+/* Values from PCIE_INTERFACE enumeration. For NICs with a single interface, or
+ * in the case of a V1 response, this should be HOST_PRIMARY.
+ */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_INTF_OFST 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_INTF_LEN 4
+
/***********************************/
/* MC_CMD_ENABLE_OFFLINE_BIST
@@ -18570,7 +21505,13 @@
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_LBN 192
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_WIDTH 32
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_LBN 224
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_WIDTH 32
/* reserved for future use */
#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
@@ -18578,7 +21519,13 @@
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_LBN 448
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_WIDTH 32
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_LBN 480
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_WIDTH 32
/* reserved for future use */
#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
@@ -18681,7 +21628,13 @@
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32
/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
@@ -18713,7 +21666,13 @@
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32
/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
@@ -18721,7 +21680,13 @@
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32
/***********************************/
@@ -18826,7 +21791,13 @@
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32
/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
@@ -18876,7 +21847,13 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32
/* whether to turn on or turn off the masked features */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
@@ -18956,7 +21933,13 @@
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32
/***********************************/
@@ -19322,7 +22305,7 @@
* TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
* contains all modes implemented in firmware for a particular board. Modes
* listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
* should be considered hidden (not to be exposed in userland tools) and for
* engineering use only. There are no other semantic differences and any mode
* listed in either MODES or ENGINEERING_MODES can be set on the board.
@@ -19490,6 +22473,22 @@
* SF-117064-DG for background).
*/
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
+/* enum: Control the Match-Action Engine if present. See mcdi_mae.yml. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE 0x10000
+/* enum: This Function/client may call MC_CMD_CLIENT_ALLOC to create new
+ * dynamic client children of itself.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALLOC_CLIENT 0x20000
+/* enum: A dynamic client with this privilege may perform all the same DMA
+ * operations as the function client from which it is descended.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_FUNC_DMA 0x40000
+/* enum: A client with this privilege may perform DMA as any PCIe function on
+ * the device and to on-device DDR. It allows clients to use TX-DESC2CMPT-DESC
+ * descriptors, and to use TX-SEG-DESC and TX-MEM2MEM-DESC with an address
+ * space override (i.e. with the ADDR_SPC_EN bit set).
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ARBITRARY_DMA 0x80000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
@@ -20277,7 +23276,8 @@
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
@@ -20499,7 +23499,13 @@
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32
#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
@@ -20521,6 +23527,9 @@
#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
+#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7
+#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1
#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
@@ -20530,6 +23539,12 @@
#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1
/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
@@ -20575,9 +23590,12 @@
#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
+#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */
#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
+#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */
+#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */
#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24
#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
@@ -20814,6 +23832,21 @@
#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
+/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make
+ * requests of the device and that can own resources managed by the device.
+ * Examples of clients include PCIe functions and dynamic clients. A client
+ * handle is a 32b opaque value used to refer to a client. Further details can
+ * be found within XN-200418-TC.
+ */
+#define CLIENT_HANDLE_LEN 4
+#define CLIENT_HANDLE_OPAQUE_OFST 0
+#define CLIENT_HANDLE_OPAQUE_LEN 4
+/* enum: A client handle guaranteed never to refer to a real client. */
+#define CLIENT_HANDLE_NULL 0xffffffff
+/* enum: Used to refer to the calling client. */
+#define CLIENT_HANDLE_SELF 0xfffffffe
+#define CLIENT_HANDLE_OPAQUE_LBN 0
+#define CLIENT_HANDLE_OPAQUE_WIDTH 32
/* CLOCK_INFO structuredef: Information about a single hardware clock */
#define CLOCK_INFO_LEN 28
@@ -20848,7 +23881,13 @@
#define CLOCK_INFO_FREQUENCY_OFST 4
#define CLOCK_INFO_FREQUENCY_LEN 8
#define CLOCK_INFO_FREQUENCY_LO_OFST 4
+#define CLOCK_INFO_FREQUENCY_LO_LEN 4
+#define CLOCK_INFO_FREQUENCY_LO_LBN 32
+#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32
#define CLOCK_INFO_FREQUENCY_HI_OFST 8
+#define CLOCK_INFO_FREQUENCY_HI_LEN 4
+#define CLOCK_INFO_FREQUENCY_HI_LBN 64
+#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32
#define CLOCK_INFO_FREQUENCY_LBN 32
#define CLOCK_INFO_FREQUENCY_WIDTH 64
/* Human-readable ASCII name for clock, with NUL termination */
@@ -20858,6 +23897,57 @@
#define CLOCK_INFO_NAME_LBN 96
#define CLOCK_INFO_NAME_WIDTH 8
+/* SCHED_CREDIT_CHECK_RESULT structuredef */
+#define SCHED_CREDIT_CHECK_RESULT_LEN 16
+/* The instance of the scheduler. Refer to XN-200389-AW for the location of
+ * these schedulers in the hardware.
+ */
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1
+#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_A 0x0 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_A 0x1 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_B 0x2 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_C 0x3 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_TX 0x4 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_D 0x5 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_REPLAY 0x6 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8
+/* The type of node that this result refers to. */
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_OFST 1
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LEN 1
+/* enum: Destination node */
+#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0
+/* enum: Source node */
+#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8
+/* Level of node in scheduler hierarchy (level 0 is the bottom of the
+ * hierarchy, increasing towards the root node).
+ */
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_OFST 2
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_LEN 2
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_LBN 16
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_WIDTH 16
+/* Node index */
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_OFST 4
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_LEN 4
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_LBN 32
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_WIDTH 32
+/* The number of credits the node is expected to have. */
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_OFST 8
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_LEN 4
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_LBN 64
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_WIDTH 32
+/* The number of credits the node actually had. */
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_OFST 12
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_LEN 4
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_LBN 96
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_WIDTH 32
+
/***********************************/
/* MC_CMD_GET_CLOCKS_INFO
@@ -20887,7 +23977,19 @@
/***********************************/
/* MC_CMD_VNIC_ENCAP_RULE_ADD
- * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full.
+ * Add a rule for detecting encapsulations in the VNIC stage. Currently this
+ * only affects checksum validation in VNIC RX - on TX the send descriptor
+ * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only
+ * apply to the current driver. If a rule matches, then the packet is
+ * considered to have the corresponding encapsulation type, and the inner
+ * packet is parsed. It is up to the driver to ensure that overlapping rules
+ * are not inserted. (If a packet would match multiple rules, a random one of
+ * them will be used.) A rule with the exact same match criteria may not be
+ * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are
+ * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP
+ * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported
+ * combinations. Each driver may only have a limited set of active rules -
+ * returns ENOSPC if the caller's table is full.
*/
#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
#undef MC_CMD_0x16d_PRIVILEGE_CTG
@@ -20951,6 +24053,12 @@
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1
/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
@@ -20967,7 +24075,9 @@
/***********************************/
/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
- * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule.
+ * Remove a VNIC encapsulation rule. Packets which would have previously
+ * matched the rule will then be considered as unencapsulated. Returns EALREADY
+ * if the input HANDLE doesn't correspond to an existing rule.
*/
#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
#undef MC_CMD_0x16e_PRIVILEGE_CTG
@@ -20983,6 +24093,964 @@
/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
+/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in
+ * the endianness specified by the RFC; users should ignore the broken-out
+ * fields and instead do straight memory copies to ensure correct ordering.
+ */
+#define UUID_LEN 16
+#define UUID_TIME_LOW_OFST 0
+#define UUID_TIME_LOW_LEN 4
+#define UUID_TIME_LOW_LBN 0
+#define UUID_TIME_LOW_WIDTH 32
+#define UUID_TIME_MID_OFST 4
+#define UUID_TIME_MID_LEN 2
+#define UUID_TIME_MID_LBN 32
+#define UUID_TIME_MID_WIDTH 16
+#define UUID_TIME_HI_LBN 52
+#define UUID_TIME_HI_WIDTH 12
+#define UUID_VERSION_LBN 48
+#define UUID_VERSION_WIDTH 4
+#define UUID_RESERVED_LBN 64
+#define UUID_RESERVED_WIDTH 2
+#define UUID_CLK_SEQ_LBN 66
+#define UUID_CLK_SEQ_WIDTH 14
+#define UUID_NODE_OFST 10
+#define UUID_NODE_LEN 6
+#define UUID_NODE_LBN 80
+#define UUID_NODE_WIDTH 48
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_ALLOC
+ * Create a handle to a datapath plugin's extension. This involves finding a
+ * currently-loaded plugin offering the given functionality (as identified by
+ * the UUID) and allocating a handle to track the usage of it. Plugin
+ * functionality is identified by 'extension' rather than any other identifier
+ * so that a single plugin bitfile may offer more than one piece of independent
+ * functionality. If two bitfiles are loaded which both offer the same
+ * extension, then the metadata is interrogated further to determine which is
+ * the newest and that is the one opened. See SF-123625-SW for architectural
+ * detail on datapath plugins.
+ */
+#define MC_CMD_PLUGIN_ALLOC 0x1ad
+#undef MC_CMD_0x1ad_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */
+#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24
+/* The functionality requested of the plugin, as a UUID structure */
+#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0
+#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16
+/* Additional options for opening the handle */
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1
+/* Load the extension only if it is in the specified administrative group.
+ * Specify ANY to load the extension wherever it is found (if there are
+ * multiple choices then the extension with the highest MINOR_VER/PATCH_VER
+ * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of
+ * administrative groups.
+ */
+#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20
+#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2
+/* enum: Load the extension from any ADMIN_GROUP. */
+#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff
+/* Reserved */
+#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22
+#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2
+
+/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */
+#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4
+/* Unique identifier of this usage */
+#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_FREE
+ * Delete a handle to a plugin's extension.
+ */
+#define MC_CMD_PLUGIN_FREE 0x1ae
+#undef MC_CMD_0x1ae_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_FREE_IN msgrequest */
+#define MC_CMD_PLUGIN_FREE_IN_LEN 4
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4
+
+/* MC_CMD_PLUGIN_FREE_OUT msgresponse */
+#define MC_CMD_PLUGIN_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_META_GLOBAL
+ * Returns the global metadata applying to the whole plugin extension. See the
+ * other metadata calls for subtypes of data.
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af
+#undef MC_CMD_0x1af_PRIVILEGE_CTG
+
+#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4
+
+/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36
+/* Unique identifier of this plugin extension. This is identical to the value
+ * which was requested when the handle was allocated.
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16
+/* semver sub-version of this plugin extension */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2
+/* semver micro-version of this plugin extension */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2
+/* Number of different messages which can be sent to this extension */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4
+/* Byte offset within the VI window of the plugin's mapped CSR window. */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2
+/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was
+ * not requested by the plugin (in which case MAPPED_CSR_OFFSET and
+ * MAPPED_CSR_FLAGS are ignored).
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2
+/* Flags indicating how to perform the CSR window mapping. */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1
+/* Identifier of the set of extensions which all change state together.
+ * Extensions having the same ADMIN_GROUP will always load and unload at the
+ * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a
+ * generation number as an implementation detail to ensure that they're not
+ * reused rapidly).
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1
+/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters
+ * corresponding to this extension, i.e. set the bit 1<<PRIVILEGE_BIT to permit
+ * access to this extension.
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_OFST 33
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_LEN 1
+/* Reserved */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_OFST 34
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_LEN 2
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_META_PUBLISHER
+ * Returns metadata supplied by the plugin author which describes this
+ * extension in a human-readable way. Contrast with
+ * MC_CMD_PLUGIN_GET_META_GLOBAL, which returns information needed for software
+ * to operate.
+ */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER 0x1b0
+#undef MC_CMD_0x1b0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_META_PUBLISHER_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_LEN 12
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_LEN 4
+/* Category of data to return */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_OFST 4
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_LEN 4
+/* enum: Top-level information about the extension. The returned data is an
+ * array of key/value pairs using the keys in RFC5013 (Dublin Core) to describe
+ * the extension. The data is a back-to-back list of zero-terminated strings;
+ * the even-numbered fields (0,2,4,...) are keys and their following odd-
+ * numbered fields are the corresponding values. Both keys and values are
+ * nominally UTF-8. Per RFC5013, the same key may be repeated any number of
+ * times. Note that all information (including the key/value structure itself
+ * and the UTF-8 encoding) may have been provided by the plugin author, so
+ * callers must be cautious about parsing it. Callers should parse only the
+ * top-level structure to separate out the keys and values; the contents of the
+ * values is not expected to be machine-readable.
+ */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_EXTENSION_KVS 0x0
+/* Byte position of the data to be returned within the full data block of the
+ * given SUBTYPE.
+ */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_OFST 8
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_LEN 4
+
+/* MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMIN 4
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX 252
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_NUM(len) (((len)-4)/1)
+/* Full length of the data block of the requested SUBTYPE, in bytes. */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_LEN 4
+/* The information requested by SUBTYPE. */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_OFST 4
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_LEN 1
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MINNUM 0
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM 248
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_META_MSG
+ * Returns the simple metadata for a specific plugin request message. This
+ * supplies information necessary for the host to know how to build an
+ * MC_CMD_PLUGIN_REQ request.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG 0x1b1
+#undef MC_CMD_0x1b1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_META_MSG_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_LEN 8
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_LEN 4
+/* Unique message ID to obtain */
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_OFST 4
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_LEN 4
+
+/* MC_CMD_PLUGIN_GET_META_MSG_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_LEN 44
+/* Unique message ID. This is the same value as the input parameter; it exists
+ * to allow future MCDI extensions which enumerate all messages.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_OFST 0
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_LEN 4
+/* Packed index number of this message, assigned by the MC to give each message
+ * a unique ID in an array to allow for more efficient storage/management.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_OFST 4
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_LEN 4
+/* Short human-readable codename for this message. This is conventionally
+ * formatted as a C identifier in the basic ASCII character set with any spare
+ * bytes at the end set to 0, however this convention is not enforced by the MC
+ * so consumers must check for all potential malformations before using it for
+ * a trusted purpose.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_OFST 8
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_LEN 32
+/* Number of bytes of data which must be passed from the host kernel to the MC
+ * for this message's payload, and which are passed back again in the response.
+ * The MC's plugin metadata loader will have validated that the number of bytes
+ * specified here will fit in to MC_CMD_PLUGIN_REQ_IN_DATA in a single MCDI
+ * message.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_OFST 40
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_LEN 4
+
+/* PLUGIN_EXTENSION structuredef: Used within MC_CMD_PLUGIN_GET_ALL to describe
+ * an individual extension.
+ */
+#define PLUGIN_EXTENSION_LEN 20
+#define PLUGIN_EXTENSION_UUID_OFST 0
+#define PLUGIN_EXTENSION_UUID_LEN 16
+#define PLUGIN_EXTENSION_UUID_LBN 0
+#define PLUGIN_EXTENSION_UUID_WIDTH 128
+#define PLUGIN_EXTENSION_ADMIN_GROUP_OFST 16
+#define PLUGIN_EXTENSION_ADMIN_GROUP_LEN 1
+#define PLUGIN_EXTENSION_ADMIN_GROUP_LBN 128
+#define PLUGIN_EXTENSION_ADMIN_GROUP_WIDTH 8
+#define PLUGIN_EXTENSION_FLAG_ENABLED_LBN 136
+#define PLUGIN_EXTENSION_FLAG_ENABLED_WIDTH 1
+#define PLUGIN_EXTENSION_RESERVED_LBN 137
+#define PLUGIN_EXTENSION_RESERVED_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_ALL
+ * Returns a list of all plugin extensions currently loaded and available. The
+ * UUIDs returned can be passed to MC_CMD_PLUGIN_ALLOC in order to obtain more
+ * detailed metadata via the MC_CMD_PLUGIN_GET_META_* family of requests. The
+ * ADMIN_GROUP field collects how extensions are grouped in to units which are
+ * loaded/unloaded together; extensions with the same value are in the same
+ * group.
+ */
+#define MC_CMD_PLUGIN_GET_ALL 0x1b2
+#undef MC_CMD_0x1b2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_ALL_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_ALL_IN_LEN 4
+/* Additional options for querying. Note that if neither FLAG_INCLUDE_ENABLED
+ * nor FLAG_INCLUDE_DISABLED are specified then the result set will be empty.
+ */
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_LEN 4
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_LBN 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_WIDTH 1
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_LBN 1
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_WIDTH 1
+
+/* MC_CMD_PLUGIN_GET_ALL_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMIN 0
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX 240
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LEN(num) (0+20*(num))
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_NUM(len) (((len)-0)/20)
+/* The list of available plugin extensions, as an array of PLUGIN_EXTENSION
+ * structs.
+ */
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_LEN 20
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MINNUM 0
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM 12
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM_MCDI2 51
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_REQ
+ * Send a command to a plugin. A plugin may define an arbitrary number of
+ * 'messages' which it allows applications on the host system to send, each
+ * identified by a 32-bit ID.
+ */
+#define MC_CMD_PLUGIN_REQ 0x1b3
+#undef MC_CMD_0x1b3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_REQ_IN msgrequest */
+#define MC_CMD_PLUGIN_REQ_IN_LENMIN 8
+#define MC_CMD_PLUGIN_REQ_IN_LENMAX 252
+#define MC_CMD_PLUGIN_REQ_IN_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_REQ_IN_LEN(num) (8+1*(num))
+#define MC_CMD_PLUGIN_REQ_IN_DATA_NUM(len) (((len)-8)/1)
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_REQ_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_REQ_IN_HANDLE_LEN 4
+/* Message ID defined by the plugin author */
+#define MC_CMD_PLUGIN_REQ_IN_ID_OFST 4
+#define MC_CMD_PLUGIN_REQ_IN_ID_LEN 4
+/* Data blob being the parameter to the message. This must be of the length
+ * specified by MC_CMD_PLUGIN_GET_META_MSG_IN_MCDI_PARAM_SIZE.
+ */
+#define MC_CMD_PLUGIN_REQ_IN_DATA_OFST 8
+#define MC_CMD_PLUGIN_REQ_IN_DATA_LEN 1
+#define MC_CMD_PLUGIN_REQ_IN_DATA_MINNUM 0
+#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM 244
+#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM_MCDI2 1012
+
+/* MC_CMD_PLUGIN_REQ_OUT msgresponse */
+#define MC_CMD_PLUGIN_REQ_OUT_LENMIN 0
+#define MC_CMD_PLUGIN_REQ_OUT_LENMAX 252
+#define MC_CMD_PLUGIN_REQ_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_REQ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_NUM(len) (((len)-0)/1)
+/* The input data, as transformed and/or updated by the plugin's eBPF. Will be
+ * the same size as the input DATA parameter.
+ */
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_OFST 0
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_LEN 1
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_MINNUM 0
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM 252
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM_MCDI2 1020
+
+/* DESC_ADDR_REGION structuredef: Describes a contiguous region of DESC_ADDR
+ * space that maps to a contiguous region of TRGT_ADDR space. Addresses
+ * DESC_ADDR in the range [DESC_ADDR_BASE:DESC_ADDR_BASE + 1 <<
+ * WINDOW_SIZE_LOG2) map to TRGT_ADDR = DESC_ADDR - DESC_ADDR_BASE +
+ * TRGT_ADDR_BASE.
+ */
+#define DESC_ADDR_REGION_LEN 32
+/* The start of the region in DESC_ADDR space. */
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_OFST 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LEN 8
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_OFST 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LEN 4
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LBN 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_WIDTH 32
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_OFST 4
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LEN 4
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LBN 32
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_WIDTH 32
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LBN 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_WIDTH 64
+/* The start of the region in TRGT_ADDR space. Drivers can set this via
+ * MC_CMD_SET_DESC_ADDR_REGIONS.
+ */
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_OFST 8
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LEN 8
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_OFST 8
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LEN 4
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LBN 64
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_WIDTH 32
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_OFST 12
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LEN 4
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LBN 96
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_WIDTH 32
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LBN 64
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_WIDTH 64
+/* The size of the region. */
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_OFST 16
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LEN 4
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LBN 128
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_WIDTH 32
+/* The alignment restriction on TRGT_ADDR. TRGT_ADDR values set by the driver
+ * must be a multiple of 1 << TRGT_ADDR_ALIGN_LOG2.
+ */
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_OFST 20
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LEN 4
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LBN 160
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_WIDTH 32
+#define DESC_ADDR_REGION_RSVD_OFST 24
+#define DESC_ADDR_REGION_RSVD_LEN 8
+#define DESC_ADDR_REGION_RSVD_LO_OFST 24
+#define DESC_ADDR_REGION_RSVD_LO_LEN 4
+#define DESC_ADDR_REGION_RSVD_LO_LBN 192
+#define DESC_ADDR_REGION_RSVD_LO_WIDTH 32
+#define DESC_ADDR_REGION_RSVD_HI_OFST 28
+#define DESC_ADDR_REGION_RSVD_HI_LEN 4
+#define DESC_ADDR_REGION_RSVD_HI_LBN 224
+#define DESC_ADDR_REGION_RSVD_HI_WIDTH 32
+#define DESC_ADDR_REGION_RSVD_LBN 192
+#define DESC_ADDR_REGION_RSVD_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_GET_DESC_ADDR_INFO
+ * Returns a description of the mapping from DESC_ADDR to TRGT_ADDR for the calling function's address space.
+ */
+#define MC_CMD_GET_DESC_ADDR_INFO 0x1b7
+#undef MC_CMD_0x1b7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_DESC_ADDR_INFO_IN msgrequest */
+#define MC_CMD_GET_DESC_ADDR_INFO_IN_LEN 0
+
+/* MC_CMD_GET_DESC_ADDR_INFO_OUT msgresponse */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN 4
+/* The type of mapping; see SF-nnnnnn-xx (EF100 driver writer's guide, once
+ * written) for details of each type.
+ */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE_OFST 0
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE_LEN 4
+/* enum: TRGT_ADDR = DESC_ADDR */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_FLAT 0x0
+/* enum: DESC_ADDR has one or more regions that map into TRGT_ADDR. The base
+ * TRGT_ADDR for each region is programmable via MCDI.
+ */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_REGIONED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_DESC_ADDR_REGIONS
+ * Returns a list of the DESC_ADDR regions for the calling function's address space. Only valid if that function's address space has the REGIONED mapping from DESC_ADDR to TRGT_ADDR.
+ */
+#define MC_CMD_GET_DESC_ADDR_REGIONS 0x1b8
+#undef MC_CMD_0x1b8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_DESC_ADDR_REGIONS_IN msgrequest */
+#define MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN 0
+
+/* MC_CMD_GET_DESC_ADDR_REGIONS_OUT msgresponse */
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMIN 32
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX 224
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX_MCDI2 992
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LEN(num) (0+32*(num))
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_NUM(len) (((len)-0)/32)
+/* An array of DESC_ADDR_REGION strutures. The number of entries in the array
+ * indicates the number of available regions.
+ */
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_OFST 0
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_LEN 32
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MINNUM 1
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MAXNUM 7
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MAXNUM_MCDI2 31
+
+
+/***********************************/
+/* MC_CMD_SET_DESC_ADDR_REGIONS
+ * Set the base TRGT_ADDR for a set of DESC_ADDR regions for the calling function's address space. Only valid if that function's address space had the REGIONED mapping from DESC_ADDR to TRGT_ADDR.
+ */
+#define MC_CMD_SET_DESC_ADDR_REGIONS 0x1b9
+#undef MC_CMD_0x1b9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_DESC_ADDR_REGIONS_IN msgrequest */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMIN 16
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX 248
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX_MCDI2 1016
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LEN(num) (8+8*(num))
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_NUM(len) (((len)-8)/8)
+/* A bitmask indicating which regions should have their base TRGT_ADDR updated.
+ * To update the base TRGR_ADDR for a DESC_ADDR region, the corresponding bit
+ * should be set to 1.
+ */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK_OFST 0
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK_LEN 4
+/* Reserved field; must be set to zero. */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_RSVD_OFST 4
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_RSVD_LEN 4
+/* An array of values used to updated the base TRGT_ADDR for DESC_ADDR regions.
+ * Array indices corresponding to region numbers (i.e. the array is sparse, and
+ * included entries for regions even if the corresponding SET_REGION_MASK bit
+ * is zero).
+ */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_OFST 8
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LEN 8
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_OFST 8
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_LEN 4
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_LBN 64
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_WIDTH 32
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_OFST 12
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_LEN 4
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_LBN 96
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_WIDTH 32
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MINNUM 1
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM 30
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM_MCDI2 126
+
+/* MC_CMD_SET_DESC_ADDR_REGIONS_OUT msgresponse */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CLIENT_CMD
+ * Execute an arbitrary MCDI command on behalf of a different client. The
+ * consequences of the command (e.g. ownership of any resources created) apply
+ * to the indicated client rather than the function client which actually sent
+ * this command. All inherent permission checks are also performed on the
+ * indicated client. The given client must be a descendant of the requestor.
+ * The command to be proxied follows immediately afterward in the host buffer
+ * (or on the UART). Chaining multiple MC_CMD_CLIENT_CMD is unnecessary and not
+ * supported. New dynamic clients may be created with MC_CMD_CLIENT_ALLOC.
+ */
+#define MC_CMD_CLIENT_CMD 0x1ba
+#undef MC_CMD_0x1ba_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_CLIENT_CMD_IN msgrequest */
+#define MC_CMD_CLIENT_CMD_IN_LEN 4
+/* The client as which to execute the following command. */
+#define MC_CMD_CLIENT_CMD_IN_CLIENT_ID_OFST 0
+#define MC_CMD_CLIENT_CMD_IN_CLIENT_ID_LEN 4
+
+/* MC_CMD_CLIENT_CMD_OUT msgresponse */
+#define MC_CMD_CLIENT_CMD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CLIENT_ALLOC
+ * Create a new client object. Clients are a system for delineating NIC
+ * resource ownership, such that groups of resources may be torn down as a
+ * unit. See also MC_CMD_CLIENT_CMD. See XN-200265-TC for background, concepts
+ * and a glossary. Clients created by this command are known as "dynamic
+ * clients". The newly-created client is a child of the client which sent this
+ * command. The caller must have the GRP_ALLOC_CLIENT privilege. The new client
+ * initially has no permission to do anything; see
+ * MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY.
+ */
+#define MC_CMD_CLIENT_ALLOC 0x1bb
+#undef MC_CMD_0x1bb_PRIVILEGE_CTG
+
+#define MC_CMD_0x1bb_PRIVILEGE_CTG SRIOV_CTG_ALLOC_CLIENT
+
+/* MC_CMD_CLIENT_ALLOC_IN msgrequest */
+#define MC_CMD_CLIENT_ALLOC_IN_LEN 0
+
+/* MC_CMD_CLIENT_ALLOC_OUT msgresponse */
+#define MC_CMD_CLIENT_ALLOC_OUT_LEN 4
+/* The ID of the new client object which has been created. */
+#define MC_CMD_CLIENT_ALLOC_OUT_CLIENT_ID_OFST 0
+#define MC_CMD_CLIENT_ALLOC_OUT_CLIENT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CLIENT_FREE
+ * Destroy and release an existing client object. All resources owned by that
+ * client (including its child clients, and thus all resources owned by the
+ * entire family tree) are freed.
+ */
+#define MC_CMD_CLIENT_FREE 0x1bc
+#undef MC_CMD_0x1bc_PRIVILEGE_CTG
+
+#define MC_CMD_0x1bc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_CLIENT_FREE_IN msgrequest */
+#define MC_CMD_CLIENT_FREE_IN_LEN 4
+/* The ID of the client to be freed. This client must be a descendant of the
+ * requestor. A client cannot free itself.
+ */
+#define MC_CMD_CLIENT_FREE_IN_CLIENT_ID_OFST 0
+#define MC_CMD_CLIENT_FREE_IN_CLIENT_ID_LEN 4
+
+/* MC_CMD_CLIENT_FREE_OUT msgresponse */
+#define MC_CMD_CLIENT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_USER
+ * Assign partial rights over this VI to another client. VIs have an 'owner'
+ * and a 'user'. The owner is the client which allocated the VI
+ * (MC_CMD_ALLOC_VIS) and cannot be changed. The user is the client which has
+ * permission to create queues and other resources on that VI. Initially
+ * user==owner, but the user can be changed by this command; the resources thus
+ * created are then owned by the user-client. Only the VI owner can call this
+ * command, and the request will fail if there are any outstanding child
+ * resources (e.g. queues) currently allocated from this VI.
+ */
+#define MC_CMD_SET_VI_USER 0x1be
+#undef MC_CMD_0x1be_PRIVILEGE_CTG
+
+#define MC_CMD_0x1be_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VI_USER_IN msgrequest */
+#define MC_CMD_SET_VI_USER_IN_LEN 8
+/* Function-relative VI number to modify. */
+#define MC_CMD_SET_VI_USER_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_USER_IN_INSTANCE_LEN 4
+/* Client ID to become the new user. This must be a descendant of the owning
+ * client, the owning client itself, or the special value MC_CMD_CLIENT_ID_SELF
+ * which is synonymous with the owning client.
+ */
+#define MC_CMD_SET_VI_USER_IN_CLIENT_ID_OFST 4
+#define MC_CMD_SET_VI_USER_IN_CLIENT_ID_LEN 4
+
+/* MC_CMD_SET_VI_USER_OUT msgresponse */
+#define MC_CMD_SET_VI_USER_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_CLIENT_MAC_ADDRESSES
+ * A device reports a set of MAC addresses for each client to use, known as the
+ * "permanent MAC addresses". Those MAC addresses are provided by the client's
+ * administrator, e.g. via MC_CMD_SET_CLIENT_MAC_ADDRESSES, and are intended as
+ * a hint to that client which MAC address its administrator would like to use
+ * to identity itself. This API exists solely to allow communication of MAC
+ * address from administrator to adminstree, and has no inherent interaction
+ * with switching within the device. There is no guarantee that a client will
+ * be able to send traffic with a source MAC address taken from the list of MAC
+ * address reported, nor is there a guarantee that a client will be able to
+ * resource traffic with a destination MAC taken from the list of MAC
+ * addresses. Likewise, there is no guarantee that a client will not be able to
+ * use a MAC address not present in the list. Restrictions on switching are
+ * controlled either through the EVB API if operating in EVB mode, or via MAE
+ * rules if host software is directly managing the MAE. In order to allow
+ * tenants to use this API whilst a provider is using the EVB API, the MAC
+ * addresses reported by MC_CMD_GET_CLIENT_MAC_ADDRESSES will be augmented with
+ * any MAC addresses associated with the vPort assigned to the caller. In order
+ * to allow tenants to use the EVB API whilst a provider is using this API, if
+ * a client queries the MAC addresses for a vPort using the host_evb_port_id
+ * EVB_PORT_ASSIGNED, that list of MAC addresses will be augmented with the MAC
+ * addresses assigned to the calling client. This query can either be explicit
+ * (i.e. MC_CMD_VPORT_GET_MAC_ADDRESSES) or implicit (e.g. creation of a
+ * vAdaptor with a NULL/automatic MAC address). Changing the MAC address on a
+ * vAdaptor only affects VNIC steering filters; it has no effect on the MAC
+ * addresses assigned to the vAdaptor's owner. VirtIO clients behave as EVB
+ * clients. On VirtIO device reset, a vAdaptor is created with an automatic MAC
+ * address. Querying the VirtIO device's MAC address queries the underlying
+ * vAdaptor's MAC address. Setting the VirtIO device's MAC address sets the
+ * underlying vAdaptor's MAC addresses.
+ */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES 0x1c4
+#undef MC_CMD_0x1c4_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN 4
+/* A handle for the client for whom MAC address should be obtained. Use
+ * CLIENT_HANDLE_SELF to obtain the MAC addresses assigned to the calling
+ * client.
+ */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_OFST 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_LEN 4
+
+/* MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LENMIN 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LENMAX 252
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(num) (0+6*(num))
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_NUM(len) (((len)-0)/6)
+/* An array of MAC addresses assigned to the client. */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_OFST 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_LEN 6
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_MINNUM 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_MAXNUM 42
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_MAXNUM_MCDI2 170
+
+
+/***********************************/
+/* MC_CMD_SET_CLIENT_MAC_ADDRESSES
+ * Set the permanent MAC addresses for a client. The caller must by an
+ * administrator of the target client. See MC_CMD_GET_CLIENT_MAC_ADDRESSES for
+ * additional detail.
+ */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES 0x1c5
+#undef MC_CMD_0x1c5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LENMIN 4
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LENMAX 250
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LENMAX_MCDI2 1018
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(num) (4+6*(num))
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_NUM(len) (((len)-4)/6)
+/* A handle for the client for whom MAC addresses should be set */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_OFST 0
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_LEN 4
+/* An array of MAC addresses to assign to the client. */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_OFST 4
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_LEN 6
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_MINNUM 0
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_MAXNUM 41
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_MAXNUM_MCDI2 169
+
+/* MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_ATTR
+ * Retrieve physical build-level board attributes as configured at
+ * manufacturing stage. Fields originate from EEPROM and per-platform constants
+ * in firmware. Fields are used in development to identify/ differentiate
+ * boards based on build levels/parameters, and also in manufacturing to cross
+ * check "what was programmed in manufacturing" is same as "what firmware
+ * thinks has been programmed" as there are two layers to translation within
+ * firmware before the attributes reach this MCDI handler. Some parameters are
+ * retrieved as part of other commands and therefore not replicated here. See
+ * GET_VERSION_OUT.
+ */
+#define MC_CMD_GET_BOARD_ATTR 0x1c6
+#undef MC_CMD_0x1c6_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_ATTR_IN msgrequest */
+#define MC_CMD_GET_BOARD_ATTR_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_ATTR_OUT msgresponse */
+#define MC_CMD_GET_BOARD_ATTR_OUT_LEN 16
+/* Defines board capabilities and validity of attributes returned in this
+ * response-message.
+ */
+#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_LBN 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_LBN 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_LBN 2
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_LEN 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_LBN 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_LBN 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_LBN 16
+#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_WIDTH 8
+/* enum: The FPGA voltage on the adapter can be set to low */
+#define MC_CMD_FPGA_VOLTAGE_LOW 0x0
+/* enum: The FPGA voltage on the adapter can be set to regular */
+#define MC_CMD_FPGA_VOLTAGE_REG 0x1
+/* enum: The FPGA voltage on the adapter can be set to high */
+#define MC_CMD_FPGA_VOLTAGE_HIGH 0x2
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_LBN 24
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_WIDTH 8
+/* An array of cage types on the board */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_OFST 8
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_LEN 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_NUM 8
+/* enum: The cages are not known */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_UNKNOWN 0x0
+/* enum: The cages are SFP/SFP+ */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_SFP 0x1
+/* enum: The cages are QSFP/QSFP+ */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_QSFP 0x2
+
+
+/***********************************/
+/* MC_CMD_GET_SOC_STATE
+ * Retrieve current state of the System-on-Chip. This command is valid when
+ * MC_CMD_GET_BOARD_ATTR:HAS_SOC is set.
+ */
+#define MC_CMD_GET_SOC_STATE 0x1c7
+#undef MC_CMD_0x1c7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SOC_STATE_IN msgrequest */
+#define MC_CMD_GET_SOC_STATE_IN_LEN 0
+
+/* MC_CMD_GET_SOC_STATE_OUT msgresponse */
+#define MC_CMD_GET_SOC_STATE_OUT_LEN 12
+/* Status flags for the SoC */
+#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_LBN 0
+#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_WIDTH 1
+#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_LBN 1
+#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_WIDTH 1
+#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_LBN 2
+#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_WIDTH 1
+/* Status fields for the SoC */
+#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_OFST 4
+#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_LEN 4
+#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_OFST 4
+#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_LBN 0
+#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_WIDTH 8
+/* enum: Power on (set by SUC on power up) */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOT 0x0
+/* enum: Running bootloader */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOTLOADER 0x1
+/* enum: Bootloader has started OS. OS is booting */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_START 0x2
+/* enum: OS is running */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_RUNNING 0x3
+/* enum: Maintenance OS is running */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_MAINTENANCE 0x4
+/* Number of SoC resets since power on */
+#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_OFST 8
+#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CHECK_SCHEDULER_CREDITS
+ * For debugging purposes. For each source and destination node in the hardware
+ * schedulers, check whether the number of credits is as it should be. This
+ * should only be used when the NIC is idle, because collection is not atomic
+ * and because the expected credit counts are only meaningful when no traffic
+ * is flowing.
+ */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS 0x1c8
+#undef MC_CMD_0x1c8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CHECK_SCHEDULER_CREDITS_IN msgrequest */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_LEN 8
+/* Flags for the request */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_FLAGS_OFST 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_FLAGS_LEN 4
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_REPORT_ALL_OFST 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_REPORT_ALL_LBN 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_REPORT_ALL_WIDTH 1
+/* If there are too many results to fit into an MCDI response, they're split
+ * into pages. This field specifies which (0-indexed) page to request. A
+ * request with PAGE=0 will snapshot the results, and subsequent requests with
+ * PAGE>0 will return data from the most recent snapshot. The GENERATION field
+ * in the response allows callers to verify that all responses correspond to
+ * the same snapshot.
+ */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_PAGE_OFST 4
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_PAGE_LEN 4
+
+/* MC_CMD_CHECK_SCHEDULER_CREDITS_OUT msgresponse */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMIN 16
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX 240
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX_MCDI2 1008
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LEN(num) (16+16*(num))
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_NUM(len) (((len)-16)/16)
+/* The total number of results (across all pages). */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_TOTAL_RESULTS_OFST 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_TOTAL_RESULTS_LEN 4
+/* The number of pages that the response is split across. */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_NUM_PAGES_OFST 4
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_NUM_PAGES_LEN 4
+/* The number of results in this response. */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_THIS_PAGE_OFST 8
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_THIS_PAGE_LEN 4
+/* Result generation count. Incremented any time a request is made with PAGE=0.
+ */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_GENERATION_OFST 12
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_GENERATION_LEN 4
+/* The results, as an array of SCHED_CREDIT_CHECK_RESULT structures. */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_OFST 16
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_LEN 16
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MINNUM 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MAXNUM 14
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MAXNUM_MCDI2 62
+
+
+/***********************************/
+/* MC_CMD_TXQ_STATS
+ * Query per-TXQ statistics.
+ */
+#define MC_CMD_TXQ_STATS 0x1d5
+#undef MC_CMD_0x1d5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TXQ_STATS_IN msgrequest */
+#define MC_CMD_TXQ_STATS_IN_LEN 8
+/* Instance of TXQ to retrieve statistics for */
+#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0
+#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4
+/* Flags for the request */
+#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4
+#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4
+#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4
+#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0
+#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1
+
+/* MC_CMD_TXQ_STATS_OUT msgresponse */
+#define MC_CMD_TXQ_STATS_OUT_LENMIN 0
+#define MC_CMD_TXQ_STATS_OUT_LENMAX 248
+#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016
+#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8)
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127
+#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */
+
/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
* defined in SF-120734-TC with more information in SF-122717-TC.
*/
@@ -21044,7 +25112,13 @@
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_OFST 0
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LEN 8
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_OFST 0
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_LEN 4
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_LBN 0
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_WIDTH 32
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_OFST 4
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_LEN 4
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_LBN 32
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_WIDTH 32
/***********************************/
@@ -21075,13 +25149,50 @@
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_OFST 8
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LEN 8
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_OFST 8
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_LEN 4
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_LBN 64
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_WIDTH 32
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_OFST 12
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_LEN 4
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_LBN 96
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_WIDTH 32
/* MC_CMD_VIRTIO_TEST_FEATURES_OUT msgresponse */
#define MC_CMD_VIRTIO_TEST_FEATURES_OUT_LEN 0
/***********************************/
+/* MC_CMD_VIRTIO_GET_CAPABILITIES
+ * Get virtio capabilities supported by the device. Returns general virtio
+ * capabilities and limitations of the hardware / firmware implementation
+ * (hardware device as a whole), rather than that of individual configured
+ * virtio devices. At present, only the absolute maximum number of queues
+ * allowed on multi-queue devices is returned. Response is expected to be
+ * extended as necessary in the future.
+ */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3
+#undef MC_CMD_0x1d3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4
+/* Type of device to get capabilities for. Matches the device id as defined by
+ * the virtio spec.
+ */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
+
+/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4
+/* Maximum number of queues supported for a single device instance */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4
+
+
+/***********************************/
/* MC_CMD_VIRTIO_INIT_QUEUE
* Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
* Fails with ENOSUP if a feature is requested that isn't supported. Fails with
@@ -21133,17 +25244,35 @@
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_OFST 16
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_OFST 16
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_LBN 128
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_OFST 20
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_LBN 160
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_WIDTH 32
/* Address of the available ring in the virtqueue. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_OFST 24
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_OFST 24
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_LBN 192
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_OFST 28
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_LBN 224
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_WIDTH 32
/* Address of the used ring in the virtqueue. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_OFST 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_OFST 32
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_LBN 256
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_OFST 36
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_LBN 288
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_WIDTH 32
/* PASID to use on PCIe transactions involving this queue. Ignored if the
* USE_PASID flag is not set.
*/
@@ -21167,21 +25296,35 @@
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_OFST 48
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_OFST 48
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_LBN 384
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_OFST 52
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_LBN 416
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_WIDTH 32
/* Enum values, see field(s): */
/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_OUT/FEATURES */
-/* The inital producer index for this queue's used ring. If this queue is being
- * created to be migrated into, this should be the FINAL_PIDX value returned by
- * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from. Otherwise, it
+/* The initial available index for this virtqueue. If this queue is being
+ * created to be migrated into, this should be the FINAL_AVAIL_IDX value
+ * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from (or
+ * equivalent if the original queue was on a thirdparty device). Otherwise, it
* should be zero.
*/
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_AVAIL_IDX_OFST 56
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_AVAIL_IDX_LEN 4
+/* Alias of INITIAL_AVAIL_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_OFST 56
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_LEN 4
-/* The inital consumer index for this queue's available ring. If this queue is
- * being created to be migrated into, this should be the FINAL_CIDX value
- * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from.
- * Otherwise, it should be zero.
- */
+/* The initial used index for this virtqueue. If this queue is being created to
+ * be migrated into, this should be the FINAL_USED_IDX value returned by
+ * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from (or equivalent if
+ * the original queue was on a thirdparty device). Otherwise, it should be
+ * zero.
+ */
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_USED_IDX_OFST 60
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_USED_IDX_LEN 4
+/* Alias of INITIAL_USED_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_OFST 60
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_LEN 4
/* A MAE_MPORT_SELECTOR defining which mport this queue should be associated
@@ -21226,10 +25369,16 @@
/* MC_CMD_VIRTIO_FINI_QUEUE_RESP msgresponse */
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN 8
-/* The producer index of the used ring when the queue was stopped. */
+/* The available index of the virtqueue when the queue was stopped. */
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_AVAIL_IDX_OFST 0
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_AVAIL_IDX_LEN 4
+/* Alias of FINAL_AVAIL_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_OFST 0
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_LEN 4
-/* The consumer index of the available ring when the queue was stopped. */
+/* The used index of the virtqueue when the queue was stopped. */
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_USED_IDX_OFST 4
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_USED_IDX_LEN 4
+/* Alias of FINAL_USED_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_OFST 4
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_LEN 4
@@ -21309,16 +25458,40 @@
#define PCIE_FUNCTION_VF_NULL 0xffff
#define PCIE_FUNCTION_VF_LBN 16
#define PCIE_FUNCTION_VF_WIDTH 16
-/* PCIe interface of the function */
+/* PCIe interface of the function. Values should be taken from the
+ * PCIE_INTERFACE enum
+ */
#define PCIE_FUNCTION_INTF_OFST 4
#define PCIE_FUNCTION_INTF_LEN 4
-/* enum: Host PCIe interface */
+/* enum: Host PCIe interface. (Alias for HOST_PRIMARY, provided for backwards
+ * compatibility)
+ */
#define PCIE_FUNCTION_INTF_HOST 0x0
-/* enum: Application Processor interface */
+/* enum: Application Processor interface (alias for NIC_EMBEDDED, provided for
+ * backwards compatibility)
+ */
#define PCIE_FUNCTION_INTF_AP 0x1
#define PCIE_FUNCTION_INTF_LBN 32
#define PCIE_FUNCTION_INTF_WIDTH 32
+/* QUEUE_ID structuredef: Structure representing an absolute queue identifier
+ * (absolute VI number + VI relative queue number). On Keystone, a VI can
+ * contain multiple queues (at present, up to 2), each with separate controls
+ * for direction. This structure is required to uniquely identify the absolute
+ * source queue for descriptor proxy functions.
+ */
+#define QUEUE_ID_LEN 4
+/* Absolute VI number */
+#define QUEUE_ID_ABS_VI_OFST 0
+#define QUEUE_ID_ABS_VI_LEN 2
+#define QUEUE_ID_ABS_VI_LBN 0
+#define QUEUE_ID_ABS_VI_WIDTH 16
+/* Relative queue number within the VI */
+#define QUEUE_ID_REL_QUEUE_LBN 16
+#define QUEUE_ID_REL_QUEUE_WIDTH 1
+#define QUEUE_ID_RESERVED_LBN 17
+#define QUEUE_ID_RESERVED_WIDTH 15
+
/***********************************/
/* MC_CMD_DESC_PROXY_FUNC_CREATE
@@ -21347,7 +25520,19 @@
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4
/* The personality to set. The meanings of the personalities are defined in
* SF-120734-TC with more information in SF-122717-TC. At present, we only
* support proxying for VIRTIO_BLK
@@ -21371,7 +25556,19 @@
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4
/***********************************/
@@ -21412,7 +25609,13 @@
#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0
#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8
#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
+#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4
+#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0
+#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32
#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
+#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4
+#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32
+#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32
#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
@@ -21485,7 +25688,13 @@
#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
+#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4
+#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64
+#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32
#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
+#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4
+#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96
+#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32
#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
@@ -21720,7 +25929,19 @@
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4
/* Function personality */
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
@@ -21733,6 +25954,10 @@
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
/* enum: Function configuration is pending reset */
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
+/* enum: Function configuration is missing (created, but no configuration
+ * committed)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2
/* Generation count to be delivered in an event once the configuration becomes
* live (if status is "pending")
*/
@@ -21742,7 +25967,7 @@
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
/* Configuration data corresponding to function personality. Currently, only
- * supported format is VIRTIO_BLK_CONFIG
+ * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED.
*/
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
@@ -21780,9 +26005,27 @@
#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0
#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8
#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
+#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4
+#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0
+#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32
#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
+#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4
+#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32
+#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32
#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0
#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32
/* Function personality */
#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
@@ -21840,7 +26083,11 @@
* Enable descriptor proxying for function into target event queue. Returns VI
* allocation info for the proxy source function, so that the caller can map
* absolute VI IDs from descriptor proxy events back to the originating
- * function.
+ * function. This is a legacy function that only supports single queue proxy
+ * devices. It is also limited in that it can only be called after host driver
+ * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN
+ * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which
+ * supports multi-queue devices and has no dependency on host driver attach.
*/
#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
#undef MC_CMD_0x178_PRIVILEGE_CTG
@@ -21871,8 +26118,44 @@
/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE
+ * Enable descriptor proxying for a source queue on a host function into target
+ * event queue. Source queue number is a relative virtqueue number on the
+ * source function (0 to max_virtqueues-1). For a multi-queue device, the
+ * caller must enable all source queues individually. To retrieve absolute VI
+ * information for the source function (so that VI IDs from descriptor proxy
+ * events can be mapped back to source function / queue) see
+ * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0
+#undef MC_CMD_0x1d0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4
+/* Source relative queue number to enable proxying on */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
+/* Descriptor proxy sink queue (caller function relative). Must be extended
+ * width event queue
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_DESC_PROXY_FUNC_DISABLE
- * Disable descriptor proxying for function
+ * Disable descriptor proxying for function. For multi-queue functions,
+ * disables all queues.
*/
#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
#undef MC_CMD_0x179_PRIVILEGE_CTG
@@ -21892,6 +26175,75 @@
/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE
+ * Disable descriptor proxying for a specific source queue on a function.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1
+#undef MC_CMD_0x1d1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4
+/* Source relative queue number to disable proxying on */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_GET_VI_INFO
+ * Returns absolute VI allocation information for the descriptor proxy source
+ * function referenced by HANDLE, so that the caller can map absolute VI IDs
+ * from descriptor proxy events back to the originating function and queue. The
+ * call is only valid after the host driver for the source function has
+ * attached (after receiving a driver attach event for the descriptor proxy
+ * function) and will fail with ENOTCONN otherwise.
+ */
+#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2
+#undef MC_CMD_0x1d2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */
+#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0
+#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4)
+/* VI information (VI ID + VI relative queue number) for each of the source
+ * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID
+ * structures.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15
+
+
+/***********************************/
/* MC_CMD_GET_ADDR_SPC_ID
* Get Address space identifier for use in mem2mem descriptors for a given
* target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
@@ -21942,7 +26294,19 @@
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4
/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
@@ -21962,7 +26326,3381 @@
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_GET_CLIENT_HANDLE
+ * Obtain a handle for a client given a description of that client. N.B. this
+ * command is subject to change given the open discussion about how PCIe
+ * functions should be referenced on an iEP (integrated endpoint: functions
+ * span multiple buses) and multihost (multiple PCIe interfaces) system.
+ */
+#define MC_CMD_GET_CLIENT_HANDLE 0x1c3
+#undef MC_CMD_0x1c3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLIENT_HANDLE_IN msgrequest */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_LEN 12
+/* Type of client to get a client handle for */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_OFST 0
+#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_LEN 4
+/* enum: Obtain a client handle for a PCIe function-type client. */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC 0x0
+/* PCIe Function ID (as struct PCIE_FUNCTION). Valid when TYPE==FUNC. Use: -
+ * INTF=CALLER, PF=PF_NULL, VF=VF_NULL to refer to the calling function -
+ * INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or
+ * a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer
+ * to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a
+ * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF
+ * on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
+ * interface where ... refers to a small integer for the VF/PF fields, and to
+ * values from the PCIE_INTERFACE enum for for the INTF field. It's only
+ * meaningful to use INTF=CALLER within a structure that's an argument to
+ * MC_CMD_DEVEL_GET_CLIENT_HANDLE.
+ */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_OFST 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LEN 8
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_OFST 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_LEN 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_LBN 32
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_WIDTH 32
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_OFST 8
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_LEN 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_LBN 64
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_WIDTH 32
+/* enum: NULL value for the INTF field of struct PCIE_FUNCTION. Provided for
+ * backwards compatibility only, callers should use PCIE_INTERFACE_CALLER.
+ */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_LEN 2
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_INTF_OFST 8
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_INTF_LEN 4
+
+/* MC_CMD_GET_CLIENT_HANDLE_OUT msgresponse */
+#define MC_CMD_GET_CLIENT_HANDLE_OUT_LEN 4
+#define MC_CMD_GET_CLIENT_HANDLE_OUT_HANDLE_OFST 0
+#define MC_CMD_GET_CLIENT_HANDLE_OUT_HANDLE_LEN 4
+
+/* MAE_FIELD_FLAGS structuredef */
+#define MAE_FIELD_FLAGS_LEN 4
+#define MAE_FIELD_FLAGS_FLAT_OFST 0
+#define MAE_FIELD_FLAGS_FLAT_LEN 4
+#define MAE_FIELD_FLAGS_SUPPORT_STATUS_OFST 0
+#define MAE_FIELD_FLAGS_SUPPORT_STATUS_LBN 0
+#define MAE_FIELD_FLAGS_SUPPORT_STATUS_WIDTH 6
+#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_OFST 0
+#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_LBN 6
+#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_WIDTH 1
+#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_OFST 0
+#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_LBN 7
+#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_WIDTH 1
+#define MAE_FIELD_FLAGS_FLAT_LBN 0
+#define MAE_FIELD_FLAGS_FLAT_WIDTH 32
+
+/* MAE_ENC_FIELD_PAIRS structuredef: Mask and value pairs for all fields that
+ * it makes sense to use to determine the encapsulation type of a packet. Its
+ * intended use is to keep a common packing of fields across multiple MCDI
+ * commands, keeping things inherently sychronised and allowing code shared. To
+ * use in an MCDI command, the command should end with a variable length byte
+ * array populated with this structure. Do not extend this structure. Instead,
+ * create _Vx versions with the necessary fields appended. That way, the
+ * existing semantics for extending MCDI commands are preserved.
+ */
+#define MAE_ENC_FIELD_PAIRS_LEN 156
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_OFST 0
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_LEN 4
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_LBN 0
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_OFST 4
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LBN 32
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_OFST 8
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_LBN 64
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_OFST 10
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_LBN 80
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_OFST 12
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_LBN 96
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_OFST 14
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_LBN 112
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_OFST 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_LBN 128
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_OFST 18
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LBN 144
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_OFST 20
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_LBN 160
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_OFST 22
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_LBN 176
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_OFST 24
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_LBN 192
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_OFST 26
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LBN 208
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_OFST 28
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_LBN 224
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_OFST 34
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_LBN 272
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_OFST 40
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_LBN 320
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_OFST 46
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_LBN 368
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_OFST 52
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_LBN 416
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_OFST 56
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_LBN 448
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_OFST 60
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_LBN 480
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_OFST 76
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_LBN 608
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_OFST 92
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_LBN 736
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_OFST 96
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_LBN 768
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_OFST 100
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_LBN 800
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_OFST 116
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_LBN 928
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_OFST 132
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_LBN 1056
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_OFST 133
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_LBN 1064
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_OFST 134
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_LBN 1072
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_OFST 135
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_LBN 1080
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_OFST 136
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_LBN 1088
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_OFST 137
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_LBN 1096
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_WIDTH 8
+/* Deprecated in favour of ENC_FLAGS alias. */
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_LBN 0
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_LBN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_LBN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_LBN 1104
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_WIDTH 8
+/* More generic alias for ENC_VLAN_FLAGS. */
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_LBN 1104
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_WIDTH 8
+/* Deprecated in favour of ENC_FLAGS_MASK alias. */
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_LBN 0
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_LBN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_LBN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_LBN 1112
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_WIDTH 8
+/* More generic alias for ENC_FLAGS_MASK. */
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_LBN 1112
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_OFST 140
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_LBN 1120
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_OFST 144
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_LBN 1152
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_OFST 148
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_LBN 1184
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_OFST 150
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_LBN 1200
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_OFST 152
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_LBN 1216
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_OFST 154
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_LBN 1232
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_WIDTH 16
+
+/* MAE_FIELD_MASK_VALUE_PAIRS structuredef: Mask and value pairs for all fields
+ * currently defined. Same semantics as MAE_ENC_FIELD_PAIRS.
+ */
+#define MAE_FIELD_MASK_VALUE_PAIRS_LEN 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_OFST 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_LBN 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_OFST 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LBN 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_OFST 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_LBN 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_OFST 12
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_LBN 96
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_OFST 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_LBN 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_OFST 18
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_LBN 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_OFST 20
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_LBN 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_OFST 22
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_LBN 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_OFST 24
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_LBN 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_OFST 26
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_LBN 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_OFST 28
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_LBN 224
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_OFST 30
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_LBN 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_OFST 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_LBN 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_OFST 34
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_LBN 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_OFST 36
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_LBN 288
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_OFST 42
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_LBN 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_OFST 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_LBN 384
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_OFST 54
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_LBN 432
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_OFST 60
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_LBN 480
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_OFST 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_LBN 512
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_OFST 68
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_LBN 544
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_OFST 84
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_LBN 672
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_OFST 100
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_LBN 800
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_OFST 104
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_LBN 832
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_OFST 108
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_LBN 864
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_OFST 124
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_LBN 992
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_OFST 140
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_LBN 1120
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_OFST 141
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_LBN 1128
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_OFST 142
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_LBN 1136
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_OFST 143
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_LBN 1144
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_WIDTH 8
+/* Due to hardware limitations, firmware may return
+ * MC_CMD_ERR_EINVAL(BAD_IP_TTL) when attempting to match on an IP_TTL value
+ * other than 1.
+ */
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_OFST 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_LBN 1152
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_OFST 145
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_LBN 1160
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_OFST 148
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_LBN 1184
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_OFST 152
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_LBN 1216
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_OFST 156
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_LBN 1248
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_OFST 158
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_LBN 1264
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_OFST 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_LBN 1280
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_OFST 162
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_LBN 1296
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_OFST 164
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_LBN 1312
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_OFST 166
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_LBN 1328
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_OFST 168
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_LBN 1344
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_OFST 172
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_LBN 1376
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_OFST 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_LBN 1408
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_OFST 180
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_LBN 1440
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_OFST 184
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_LBN 1472
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_OFST 188
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_LBN 1504
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_OFST 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_LBN 1536
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_OFST 194
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_LBN 1552
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_OFST 196
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_LBN 1568
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_OFST 198
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LBN 1584
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_OFST 200
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_LBN 1600
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_OFST 202
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_LBN 1616
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_OFST 204
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_LBN 1632
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_OFST 206
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LBN 1648
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_OFST 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_LBN 1664
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_OFST 214
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_LBN 1712
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_OFST 220
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_LBN 1760
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_OFST 226
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_LBN 1808
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_OFST 232
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_LBN 1856
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_OFST 236
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_LBN 1888
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_OFST 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_LBN 1920
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_OFST 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_LBN 2048
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_OFST 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_LBN 2176
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_OFST 276
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_LBN 2208
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_OFST 280
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_LBN 2240
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_OFST 296
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_LBN 2368
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_OFST 312
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_LBN 2496
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_OFST 313
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_LBN 2504
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_OFST 314
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_LBN 2512
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_OFST 315
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_LBN 2520
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_OFST 316
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_LBN 2528
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_OFST 317
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_LBN 2536
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_OFST 320
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_LBN 2560
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_OFST 324
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_LBN 2592
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_OFST 328
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_LBN 2624
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_OFST 330
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_LBN 2640
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_OFST 332
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_LBN 2656
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_OFST 334
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_LBN 2672
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_OFST 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_LBN 2688
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_OFST 340
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_LBN 2720
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_WIDTH 32
+
+/* MAE_FIELD_MASK_VALUE_PAIRS_V2 structuredef */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN 372
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_OFST 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_LBN 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_OFST 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_LBN 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_OFST 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_LBN 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_OFST 12
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_LBN 96
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_OFST 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_LBN 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_OFST 18
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_LBN 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_OFST 20
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_LBN 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_OFST 22
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_LBN 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_OFST 24
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_LBN 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_OFST 26
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_LBN 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_OFST 28
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_LBN 224
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_OFST 30
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_LBN 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_OFST 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_LBN 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_OFST 34
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_LBN 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_OFST 36
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_LBN 288
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_OFST 42
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_LBN 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_OFST 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_LBN 384
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_OFST 54
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_LBN 432
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_OFST 60
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_LBN 480
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_OFST 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_LBN 512
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_OFST 68
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_LBN 544
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_OFST 84
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_LBN 672
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_OFST 100
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_LBN 800
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_OFST 104
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_LBN 832
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_OFST 108
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_LBN 864
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_OFST 124
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_LBN 992
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_OFST 140
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_LBN 1120
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_OFST 141
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_LBN 1128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_OFST 142
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_LBN 1136
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_OFST 143
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_LBN 1144
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_WIDTH 8
+/* Due to hardware limitations, firmware may return
+ * MC_CMD_ERR_EINVAL(BAD_IP_TTL) when attempting to match on an IP_TTL value
+ * other than 1.
+ */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_OFST 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_LBN 1152
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_OFST 145
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_LBN 1160
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_OFST 148
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_LBN 1184
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_OFST 152
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_LBN 1216
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_OFST 156
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_LBN 1248
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_OFST 158
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_LBN 1264
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_OFST 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_LBN 1280
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_OFST 162
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_LBN 1296
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_OFST 164
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_LBN 1312
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_OFST 166
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_LBN 1328
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_OFST 168
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_LBN 1344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_OFST 172
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_LBN 1376
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_OFST 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_LBN 1408
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_OFST 180
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_LBN 1440
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_OFST 184
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_LBN 1472
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_OFST 188
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_LBN 1504
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_OFST 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_LBN 1536
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_OFST 194
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_LBN 1552
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_OFST 196
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_LBN 1568
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_OFST 198
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_LBN 1584
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_OFST 200
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_LBN 1600
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_OFST 202
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_LBN 1616
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_OFST 204
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_LBN 1632
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_OFST 206
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_LBN 1648
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_OFST 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_LBN 1664
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_OFST 214
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_LBN 1712
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_OFST 220
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_LBN 1760
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_OFST 226
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_LBN 1808
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_OFST 232
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_LBN 1856
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_OFST 236
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_LBN 1888
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_OFST 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_LBN 1920
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_OFST 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_LBN 2048
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_OFST 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_LBN 2176
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_OFST 276
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_LBN 2208
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_OFST 280
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_LBN 2240
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_OFST 296
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_LBN 2368
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_OFST 312
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_LBN 2496
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_OFST 313
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_LBN 2504
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_OFST 314
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_LBN 2512
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_OFST 315
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_LBN 2520
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_OFST 316
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_LBN 2528
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_OFST 317
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_LBN 2536
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_OFST 320
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_LBN 2560
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_OFST 324
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_LBN 2592
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_OFST 328
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_LBN 2624
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_OFST 330
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_LBN 2640
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_OFST 332
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_LBN 2656
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_OFST 334
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_LBN 2672
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_OFST 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_LBN 2688
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_OFST 340
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_LBN 2720
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_LBN 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_LBN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_LBN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_LBN 3
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_LBN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_LBN 5
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_LBN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_LBN 7
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_LBN 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_LBN 9
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_LBN 2752
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_OFST 348
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_LBN 2784
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_OFST 352
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_LBN 2816
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_OFST 354
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_LBN 2832
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_OFST 356
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_LBN 2848
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_OFST 360
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_LBN 2880
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_OFST 364
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_LBN 2912
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_WIDTH 8
+/* Set to zero. */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_OFST 365
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_LBN 2920
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_OFST 366
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_LBN 2928
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_WIDTH 8
+/* Set to zero. */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_OFST 367
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_LBN 2936
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_OFST 368
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_LBN 2944
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_WIDTH 8
+/* Set to zero */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_OFST 369
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_LBN 2952
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_OFST 370
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_LBN 2960
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_WIDTH 8
+/* Set to zero */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_OFST 371
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_LBN 2968
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_WIDTH 8
+
+/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned
+ * integer value (mport_id) that is guaranteed to be representable within
+ * 32-bits or within any NIC interface field that needs store the value
+ * (whichever is narrowers). This selector structure provides a stable way to
+ * refer to m-ports.
+ */
+#define MAE_MPORT_SELECTOR_LEN 4
+/* Used to force the tools to output bitfield-style defines for this structure.
+ */
+#define MAE_MPORT_SELECTOR_FLAT_OFST 0
+#define MAE_MPORT_SELECTOR_FLAT_LEN 4
+/* enum: An m-port selector value that is guaranteed never to represent a real
+ * mport
+ */
+#define MAE_MPORT_SELECTOR_NULL 0x0
+/* enum: The m-port assigned to the calling client. */
+#define MAE_MPORT_SELECTOR_ASSIGNED 0x1000000
+#define MAE_MPORT_SELECTOR_TYPE_OFST 0
+#define MAE_MPORT_SELECTOR_TYPE_LBN 24
+#define MAE_MPORT_SELECTOR_TYPE_WIDTH 8
+/* enum: The MPORT connected to a given physical port */
+#define MAE_MPORT_SELECTOR_TYPE_PPORT 0x2
+/* enum: The MPORT assigned to a given PCIe function. Deprecated in favour of
+ * MH_FUNC.
+ */
+#define MAE_MPORT_SELECTOR_TYPE_FUNC 0x3
+/* enum: An mport_id */
+#define MAE_MPORT_SELECTOR_TYPE_MPORT_ID 0x4
+/* enum: The MPORT assigned to a given PCIe function (see also FWRIVERHD-1108)
+ */
+#define MAE_MPORT_SELECTOR_TYPE_MH_FUNC 0x5
+/* enum: This is guaranteed never to be a valid selector type */
+#define MAE_MPORT_SELECTOR_TYPE_INVALID 0xff
+#define MAE_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MAE_MPORT_SELECTOR_MPORT_ID_LBN 0
+#define MAE_MPORT_SELECTOR_MPORT_ID_WIDTH 24
+#define MAE_MPORT_SELECTOR_PPORT_ID_OFST 0
+#define MAE_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MAE_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MAE_MPORT_SELECTOR_HOST_PRIMARY 0x1 /* enum */
+#define MAE_MPORT_SELECTOR_NIC_EMBEDDED 0x2 /* enum */
+/* enum: Deprecated, use CALLER_INTF instead. */
+#define MAE_MPORT_SELECTOR_CALLER 0xf
+#define MAE_MPORT_SELECTOR_CALLER_INTF 0xf /* enum */
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_LBN 16
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_WIDTH 8
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_LBN 0
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_WIDTH 16
+/* enum: Used for VF_ID to indicate a physical function. */
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL 0xffff
+/* enum: Used for PF_ID to indicate the physical function of the calling
+ * client. - When used by a PF with VF_ID == VF_ID_NULL, the mport selector
+ * relates to the calling function. (For clarity, it is recommended that
+ * clients use ASSIGNED to achieve this behaviour). - When used by a PF with
+ * VF_ID != VF_ID_NULL, the mport selector relates to a VF child of the calling
+ * function. - When used by a VF with VF_ID == VF_ID_NULL, the mport selector
+ * relates to the PF owning the calling function. - When used by a VF with
+ * VF_ID != VF_ID_NULL, the mport selector relates to a sibling VF of the
+ * calling function. - Not meaningful used by a client that is not a PCIe
+ * function.
+ */
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER 0xff
+/* enum: Same as PF_ID_CALLER, but for use in the smaller MH_PF_ID field. Only
+ * valid if FUNC_INTF_ID is CALLER.
+ */
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_CALLER 0xf
+#define MAE_MPORT_SELECTOR_FLAT_LBN 0
+#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32
+
+/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or
+ * virtual network port by MAE port and link end
+ */
+#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8
+/* The MAE MPORT of interest */
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_WIDTH 32
+/* Which end of the link identified by MPORT to consider */
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_OFST 4
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_LEN 4
+/* Enum values, see field(s): */
+/* MAE_MPORT_END */
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_LBN 32
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_WIDTH 32
+/* A field for accessing the endpoint selector as a collection of bits */
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_OFST 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LEN 8
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_OFST 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_LEN 4
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_LBN 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_WIDTH 32
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_OFST 4
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_LEN 4
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_LBN 32
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_WIDTH 32
+/* enum: Set FLAT to this value to obtain backward-compatible behaviour in
+ * commands that have been extended to take a MAE_LINK_ENDPOINT_SELECTOR
+ * argument. New commands that are designed to take such an argument from the
+ * start will not support this.
+ */
+#define MAE_LINK_ENDPOINT_SELECTOR_MAE_LINK_ENDPOINT_COMPAT 0x0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LBN 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_MAE_GET_CAPS
+ * Describes capabilities of the MAE (Match-Action Engine)
+ */
+#define MC_CMD_MAE_GET_CAPS 0x140
+#undef MC_CMD_0x140_PRIVILEGE_CTG
+
+#define MC_CMD_0x140_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAE_GET_CAPS_IN msgrequest */
+#define MC_CMD_MAE_GET_CAPS_IN_LEN 0
+
+/* MC_CMD_MAE_GET_CAPS_OUT msgresponse */
+#define MC_CMD_MAE_GET_CAPS_OUT_LEN 52
+/* The number of field IDs that the NIC supports. Any field with a ID greater
+ * than or equal to the value returned in this field must be treated as having
+ * a support level of MAE_FIELD_UNSUPPORTED in all requests.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT_OFST 0
+#define MC_CMD_MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT_LEN 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+/* Deprecated alias for AR_COUNTERS. */
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTERS_LEN 4
+/* The total number of AR counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_AR_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_OUT_AR_COUNTERS_LEN 4
+/* The total number of counters lists available to allocate. A value of zero
+ * indicates that counter lists are not supported by the NIC. (But single
+ * counters may still be.)
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTER_LISTS_OFST 12
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTER_LISTS_LEN 4
+/* The total number of encap header structures available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_HEADER_LIMIT_OFST 16
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_HEADER_LIMIT_LEN 4
+/* Reserved. Should be zero. */
+#define MC_CMD_MAE_GET_CAPS_OUT_RSVD_OFST 20
+#define MC_CMD_MAE_GET_CAPS_OUT_RSVD_LEN 4
+/* The total number of action sets available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SETS_OFST 24
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SETS_LEN 4
+/* The total number of action set lists available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SET_LISTS_OFST 28
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SET_LISTS_LEN 4
+/* The total number of outer rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_RULES_OFST 32
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_RULES_LEN 4
+/* The total number of action rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_RULES_OFST 36
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_RULES_LEN 4
+/* The number of priorities available for ACTION_RULE filters. It is invalid to
+ * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_PRIOS_OFST 40
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_PRIOS_LEN 4
+/* The number of priorities available for OUTER_RULE filters. It is invalid to
+ * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_PRIOS_OFST 44
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_PRIOS_LEN 4
+/* MAE API major version. Currently 1. If this field is not present in the
+ * response (i.e. response shorter than 384 bits), then its value is zero. If
+ * the value does not match the client's expectations, the client should raise
+ * a fatal error.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_API_VER_OFST 48
+#define MC_CMD_MAE_GET_CAPS_OUT_API_VER_LEN 4
+
+/* MC_CMD_MAE_GET_CAPS_V2_OUT msgresponse */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_LEN 60
+/* The number of field IDs that the NIC supports. Any field with a ID greater
+ * than or equal to the value returned in this field must be treated as having
+ * a support level of MAE_FIELD_UNSUPPORTED in all requests.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_MATCH_FIELD_COUNT_OFST 0
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_MATCH_FIELD_COUNT_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+/* Deprecated alias for AR_COUNTERS. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTERS_LEN 4
+/* The total number of AR counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_AR_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_AR_COUNTERS_LEN 4
+/* The total number of counters lists available to allocate. A value of zero
+ * indicates that counter lists are not supported by the NIC. (But single
+ * counters may still be.)
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_LISTS_OFST 12
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_LISTS_LEN 4
+/* The total number of encap header structures available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_HEADER_LIMIT_OFST 16
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_HEADER_LIMIT_LEN 4
+/* Reserved. Should be zero. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_RSVD_OFST 20
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_RSVD_LEN 4
+/* The total number of action sets available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SETS_OFST 24
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SETS_LEN 4
+/* The total number of action set lists available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SET_LISTS_OFST 28
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SET_LISTS_LEN 4
+/* The total number of outer rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_RULES_OFST 32
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_RULES_LEN 4
+/* The total number of action rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_RULES_OFST 36
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_RULES_LEN 4
+/* The number of priorities available for ACTION_RULE filters. It is invalid to
+ * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_PRIOS_OFST 40
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_PRIOS_LEN 4
+/* The number of priorities available for OUTER_RULE filters. It is invalid to
+ * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_PRIOS_OFST 44
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_PRIOS_LEN 4
+/* MAE API major version. Currently 1. If this field is not present in the
+ * response (i.e. response shorter than 384 bits), then its value is zero. If
+ * the value does not match the client's expectations, the client should raise
+ * a fatal error.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_API_VER_OFST 48
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_API_VER_LEN 4
+/* Mask of supported counter types. Each bit position corresponds to a value of
+ * the MAE_COUNTER_TYPE enum. If this field is missing (i.e. V1 response),
+ * clients must assume that only AR counters are supported (i.e.
+ * COUNTER_TYPES_SUPPORTED==0x1). See also
+ * MC_CMD_MAE_COUNTERS_STREAM_START/COUNTER_TYPES_MASK.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_TYPES_SUPPORTED_OFST 52
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_TYPES_SUPPORTED_LEN 4
+/* The total number of conntrack counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_CT_COUNTERS_OFST 56
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_CT_COUNTERS_LEN 4
+
+/* MC_CMD_MAE_GET_CAPS_V3_OUT msgresponse */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_LEN 64
+/* The number of field IDs that the NIC supports. Any field with a ID greater
+ * than or equal to the value returned in this field must be treated as having
+ * a support level of MAE_FIELD_UNSUPPORTED in all requests.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_MATCH_FIELD_COUNT_OFST 0
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_MATCH_FIELD_COUNT_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+/* Deprecated alias for AR_COUNTERS. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTERS_LEN 4
+/* The total number of AR counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_AR_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_AR_COUNTERS_LEN 4
+/* The total number of counters lists available to allocate. A value of zero
+ * indicates that counter lists are not supported by the NIC. (But single
+ * counters may still be.)
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_LISTS_OFST 12
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_LISTS_LEN 4
+/* The total number of encap header structures available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_HEADER_LIMIT_OFST 16
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_HEADER_LIMIT_LEN 4
+/* Reserved. Should be zero. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_RSVD_OFST 20
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_RSVD_LEN 4
+/* The total number of action sets available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SETS_OFST 24
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SETS_LEN 4
+/* The total number of action set lists available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SET_LISTS_OFST 28
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SET_LISTS_LEN 4
+/* The total number of outer rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_RULES_OFST 32
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_RULES_LEN 4
+/* The total number of action rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_RULES_OFST 36
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_RULES_LEN 4
+/* The number of priorities available for ACTION_RULE filters. It is invalid to
+ * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_PRIOS_OFST 40
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_PRIOS_LEN 4
+/* The number of priorities available for OUTER_RULE filters. It is invalid to
+ * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_PRIOS_OFST 44
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_PRIOS_LEN 4
+/* MAE API major version. Currently 1. If this field is not present in the
+ * response (i.e. response shorter than 384 bits), then its value is zero. If
+ * the value does not match the client's expectations, the client should raise
+ * a fatal error.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_API_VER_OFST 48
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_API_VER_LEN 4
+/* Mask of supported counter types. Each bit position corresponds to a value of
+ * the MAE_COUNTER_TYPE enum. If this field is missing (i.e. V1 response),
+ * clients must assume that only AR counters are supported (i.e.
+ * COUNTER_TYPES_SUPPORTED==0x1). See also
+ * MC_CMD_MAE_COUNTERS_STREAM_START/COUNTER_TYPES_MASK.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_TYPES_SUPPORTED_OFST 52
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_TYPES_SUPPORTED_LEN 4
+/* The total number of conntrack counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_CT_COUNTERS_OFST 56
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_CT_COUNTERS_LEN 4
+/* The total number of Outer Rule counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OR_COUNTERS_OFST 60
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OR_COUNTERS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MAE_GET_AR_CAPS
+ * Get a level of support for match fields when used in match-action rules
+ */
+#define MC_CMD_MAE_GET_AR_CAPS 0x141
+#undef MC_CMD_0x141_PRIVILEGE_CTG
+
+#define MC_CMD_0x141_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_GET_AR_CAPS_IN msgrequest */
+#define MC_CMD_MAE_GET_AR_CAPS_IN_LEN 0
+
+/* MC_CMD_MAE_GET_AR_CAPS_OUT msgresponse */
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMIN 4
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX 252
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_NUM(len) (((len)-4)/4)
+/* Number of fields actually returned in FIELD_FLAGS. */
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_COUNT_OFST 0
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_COUNT_LEN 4
+/* Array of values indicating the NIC's support for a given field, indexed by
+ * field id. The driver must ensure space for
+ * MC_CMD_MAE_GET_CAPS.MATCH_FIELD_COUNT entries in the array..
+ */
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST 4
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_LEN 4
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MINNUM 0
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MAXNUM 62
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MAXNUM_MCDI2 254
+
+
+/***********************************/
+/* MC_CMD_MAE_GET_OR_CAPS
+ * Get a level of support for fields used in outer rule keys.
+ */
+#define MC_CMD_MAE_GET_OR_CAPS 0x142
+#undef MC_CMD_0x142_PRIVILEGE_CTG
+
+#define MC_CMD_0x142_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_GET_OR_CAPS_IN msgrequest */
+#define MC_CMD_MAE_GET_OR_CAPS_IN_LEN 0
+
+/* MC_CMD_MAE_GET_OR_CAPS_OUT msgresponse */
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMIN 4
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX 252
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_NUM(len) (((len)-4)/4)
+/* Number of fields actually returned in FIELD_FLAGS. */
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_COUNT_OFST 0
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_COUNT_LEN 4
+/* Same semantics as MC_CMD_MAE_GET_AR_CAPS.MAE_FIELD_FLAGS */
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_OFST 4
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_LEN 4
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MINNUM 0
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MAXNUM 62
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MAXNUM_MCDI2 254
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTER_ALLOC
+ * Allocate match-action-engine counters, which can be referenced in various
+ * tables.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC 0x143
+#undef MC_CMD_0x143_PRIVILEGE_CTG
+
+#define MC_CMD_0x143_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTER_ALLOC_IN msgrequest: Using this is equivalent to using V2
+ * with COUNTER_TYPE=AR.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC_IN_LEN 4
+/* The number of counters that the driver would like allocated */
+#define MC_CMD_MAE_COUNTER_ALLOC_IN_REQUESTED_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_ALLOC_IN_REQUESTED_COUNT_LEN 4
+
+/* MC_CMD_MAE_COUNTER_ALLOC_V2_IN msgrequest */
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_LEN 8
+/* The number of counters that the driver would like allocated */
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT_LEN 4
+/* Which type of counter to allocate. */
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE_OFST 4
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_TYPE */
+
+/* MC_CMD_MAE_COUNTER_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMIN 12
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMAX 252
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NUM(len) (((len)-8)/4)
+/* Generation count. Packets with generation count >= GENERATION_COUNT will
+ * contain valid counter values for counter IDs allocated in this call, unless
+ * the counter values are zero and zero squash is enabled. Note that there is
+ * an independent GENERATION_COUNT object per counter type, and that generation
+ * counts wrap from 0xffffffff to 1.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_LEN 4
+/* enum: Generation counter 0 is reserved and unused. */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_INVALID 0x0
+/* The number of counter IDs that the NIC allocated. It is never less than 1;
+ * failure to allocate a single counter will cause an error to be returned. It
+ * is never greater than REQUESTED_COUNT, but may be less.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_COUNT_OFST 4
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_COUNT_LEN 4
+/* An array containing the IDs for the counters allocated. */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 8
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 61
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253
+/* enum: A counter ID that is guaranteed never to represent a real counter */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTER_FREE
+ * Free match-action-engine counters
+ */
+#define MC_CMD_MAE_COUNTER_FREE 0x144
+#undef MC_CMD_0x144_PRIVILEGE_CTG
+
+#define MC_CMD_0x144_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTER_FREE_IN msgrequest: Using this is equivalent to using V2
+ * with COUNTER_TYPE=AR.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_IN_LENMIN 8
+#define MC_CMD_MAE_COUNTER_FREE_IN_LENMAX 132
+#define MC_CMD_MAE_COUNTER_FREE_IN_LENMAX_MCDI2 132
+#define MC_CMD_MAE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_NUM(len) (((len)-4)/4)
+/* The number of counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_IN_COUNTER_ID_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_FREE_IN_COUNTER_ID_COUNT_LEN 4
+/* An array containing the counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_OFST 4
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MAXNUM 32
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_COUNTER_FREE_V2_IN msgrequest */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_LEN 136
+/* The number of counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT_LEN 4
+/* An array containing the counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_OFST 4
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MAXNUM 32
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MAXNUM_MCDI2 32
+/* Which type of counter to free. */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE_OFST 132
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_TYPE */
+
+/* MC_CMD_MAE_COUNTER_FREE_OUT msgresponse */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMIN 12
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMAX 136
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMAX_MCDI2 136
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_NUM(len) (((len)-8)/4)
+/* Generation count. A packet with generation count == GENERATION_COUNT will
+ * contain the final values for these counter IDs, unless the counter values
+ * are zero and zero squash is enabled. Note that the GENERATION_COUNT value is
+ * specific to the COUNTER_TYPE (IDENTIFIER field in packet header). Receiving
+ * a packet with generation count > GENERATION_COUNT guarantees that no more
+ * values will be written for these counters. If values for these counter IDs
+ * are present, the counter ID has been reallocated. A counter ID will not be
+ * reallocated within a single read cycle as this would merge increments from
+ * the 'old' and 'new' counters. GENERATION_COUNT_INVALID is reserved and
+ * unused.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_FREE_OUT_GENERATION_COUNT_LEN 4
+/* The number of counter IDs actually freed. It is never less than 1; failure
+ * to free a single counter will cause an error to be returned. It is never
+ * greater than the number that were requested to be freed, but may be less if
+ * counters could not be freed.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_COUNTER_ID_COUNT_OFST 4
+#define MC_CMD_MAE_COUNTER_FREE_OUT_COUNTER_ID_COUNT_LEN 4
+/* An array containing the IDs for the counters to that were freed. Note,
+ * failure to free a counter can only occur on incorrect driver behaviour, so
+ * asserting that the expected counters were freed is reasonable. When
+ * debugging, attempting to free a single counter at a time will provide a
+ * reason for the failure to free said counter.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_OFST 8
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MAXNUM 32
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTERS_STREAM_START
+ * Start streaming counter values, specifying an RxQ to deliver packets to.
+ * Counters allocated to the calling function will be written in a round robin
+ * at a fixed cycle rate, assuming sufficient credits are available. The driver
+ * may cause the counter values to be written at a slower rate by constraining
+ * the availability of credits. Note that if the driver wishes to deliver
+ * packets to a different queue, it must call MAE_COUNTERS_STREAM_STOP to stop
+ * delivering packets to the current queue first.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_START 0x151
+#undef MC_CMD_0x151_PRIVILEGE_CTG
+
+#define MC_CMD_0x151_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTERS_STREAM_START_IN msgrequest: Using V1 is equivalent to V2
+ * with COUNTER_TYPES_MASK=0x1 (i.e. AR counters only).
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_LEN 8
+/* The RxQ to write packets to. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_QID_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_QID_LEN 2
+/* Maximum size in bytes of packets that may be written to the RxQ. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_PACKET_SIZE_OFST 2
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_PACKET_SIZE_LEN 2
+/* Optional flags. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_FLAGS_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_LBN 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_WIDTH 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_LBN 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_WIDTH 1
+
+/* MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN msgrequest */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_LEN 12
+/* The RxQ to write packets to. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_QID_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_QID_LEN 2
+/* Maximum size in bytes of packets that may be written to the RxQ. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE_OFST 2
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE_LEN 2
+/* Optional flags. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_LBN 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_WIDTH 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_LBN 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_WIDTH 1
+/* Mask of which counter types should be reported. Each bit position
+ * corresponds to a value of the MAE_COUNTER_TYPE enum. For example a value of
+ * 0x3 requests both AR and CT counters. A value of zero is invalid. Counter
+ * types not selected by the mask value won't be included in the stream. If a
+ * client wishes to change which counter types are reported, it must first call
+ * MAE_COUNTERS_STREAM_STOP, then restart it with the new mask value.
+ * Requesting a counter type which isn't supported by firmware (reported in
+ * MC_CMD_MAE_GET_CAPS/COUNTER_TYPES_SUPPORTED) will result in ENOTSUP.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK_OFST 8
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK_LEN 4
+
+/* MC_CMD_MAE_COUNTERS_STREAM_START_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_FLAGS_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_FLAGS_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_LBN 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP
+ * Stop streaming counter values to the specified RxQ.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP 0x152
+#undef MC_CMD_0x152_PRIVILEGE_CTG
+
+#define MC_CMD_0x152_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP_IN msgrequest */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_LEN 2
+/* The RxQ to stop writing packets to. */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_QID_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_QID_LEN 2
+
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_LEN 4
+/* Generation count for AR counters. The final set of AR counter values will be
+ * written out in packets with count == GENERATION_COUNT. An empty packet with
+ * count > GENERATION_COUNT indicates that no more counter values of this type
+ * will be written to this stream. GENERATION_COUNT_INVALID is reserved and
+ * unused.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_GENERATION_COUNT_LEN 4
+
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMIN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX 32
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX_MCDI2 32
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_NUM(len) (((len)-0)/4)
+/* Array of generation counts, indexed by MAE_COUNTER_TYPE. Note that since
+ * MAE_COUNTER_TYPE_AR==0, this response is backwards-compatible with V1. The
+ * final set of counter values will be written out in packets with count ==
+ * GENERATION_COUNT. An empty packet with count > GENERATION_COUNT indicates
+ * that no more counter values of this type will be written to this stream.
+ * GENERATION_COUNT_INVALID is reserved and unused.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MINNUM 1
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MAXNUM 8
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MAXNUM_MCDI2 8
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS
+ * Give a number of credits to the packetiser. Each credit received allows the
+ * MC to write one packet to the RxQ, therefore for each credit the driver must
+ * have written sufficient descriptors for a packet of length
+ * MAE_COUNTERS_PACKETISER_STREAM_START/PACKET_SIZE and rung the doorbell.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS 0x153
+#undef MC_CMD_0x153_PRIVILEGE_CTG
+
+#define MC_CMD_0x153_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN msgrequest */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_LEN 4
+/* Number of credits to give to the packetiser. */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS_LEN 4
+
+/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAE_ENCAP_HEADER_ALLOC
+ * Allocate an encapsulation header to be used in an Action Rule response. The
+ * header must be constructed as a valid packet with 0-length payload.
+ * Specifically, the L3/L4 lengths & checksums will only be incrementally fixed
+ * by the NIC, rather than recomputed entirely. Currently only IPv4, IPv6 and
+ * UDP are supported. If the maximum number of headers have already been
+ * allocated then the command will fail with MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC 0x148
+#undef MC_CMD_0x148_PRIVILEGE_CTG
+
+#define MC_CMD_0x148_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMIN 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMAX 252
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LEN(num) (4+1*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_NUM(len) (((len)-4)/1)
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_OFST 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_LEN 1
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MINNUM 0
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM 248
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM_MCDI2 1016
+
+/* MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_LEN 4
+/* enum: An encap metadata ID that is guaranteed never to represent real encap
+ * metadata
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ENCAP_HEADER_UPDATE
+ * Update encap action metadata. See comments for MAE_ENCAP_HEADER_ALLOC.
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE 0x149
+#undef MC_CMD_0x149_PRIVILEGE_CTG
+
+#define MC_CMD_0x149_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN msgrequest */
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMIN 8
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMAX 252
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LEN(num) (8+1*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_NUM(len) (((len)-8)/1)
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_EH_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_EH_ID_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE_OFST 4
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_OFST 8
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_LEN 1
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MINNUM 0
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MAXNUM 244
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MAXNUM_MCDI2 1012
+
+/* MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT msgresponse */
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAE_ENCAP_HEADER_FREE
+ * Free encap action metadata
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE 0x14a
+#undef MC_CMD_0x14a_PRIVILEGE_CTG
+
+#define MC_CMD_0x14a_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ENCAP_HEADER_FREE_IN msgrequest */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MINNUM 1
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MAXNUM 32
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ENCAP_HEADER_FREE_OUT msgresponse */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MINNUM 1
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MAXNUM 32
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_MAC_ADDR_ALLOC
+ * Allocate MAC address. Hardware implementations have MAC addresses programmed
+ * into an indirection table, and clients should take care not to allocate the
+ * same MAC address twice (but instead reuse its ID). If the maximum number of
+ * MAC addresses have already been allocated then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC 0x15e
+#undef MC_CMD_0x15e_PRIVILEGE_CTG
+
+#define MC_CMD_0x15e_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MAC_ADDR_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN 6
+/* MAC address as bytes in network order. */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_LEN 6
+
+/* MC_CMD_MAE_MAC_ADDR_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_LEN 4
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_LEN 4
+/* enum: An MAC address ID that is guaranteed never to represent a real MAC
+ * address.
+ */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_MAC_ADDR_FREE
+ * Free MAC address.
+ */
+#define MC_CMD_MAE_MAC_ADDR_FREE 0x15f
+#undef MC_CMD_0x15f_PRIVILEGE_CTG
+
+#define MC_CMD_0x15f_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MAC_ADDR_FREE_IN msgrequest */
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_LEN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MINNUM 1
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MAXNUM 32
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_MAC_ADDR_FREE_OUT msgresponse */
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_LEN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MINNUM 1
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MAXNUM 32
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_ALLOC
+ * Allocate an action set, which can be referenced either in response to an
+ * Action Rule, or as part of an Action Set List. If the maxmimum number of
+ * action sets have already been allocated then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC 0x14d
+#undef MC_CMD_0x14d_PRIVILEGE_CTG
+
+#define MC_CMD_0x14d_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * COUNTER_LIST_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
+ * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID_LEN 4
+
+/* MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN msgrequest: Only supported if
+ * MAE_ACTION_SET_ALLOC_V2_SUPPORTED is advertised in
+ * MC_CMD_GET_CAPABILITIES_V7_OUT.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LEN 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * COUNTER_LIST_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
+ * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DST_MAC_ID_LEN 4
+/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_REPORTED_SRC_MPORT_OFST 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_REPORTED_SRC_MPORT_LEN 4
+/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits
+ * within IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_CONTROL_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_CONTROL_LEN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_WIDTH 6
+/* Actions for modifying the Explicit Congestion Notification (ECN) bits within
+ * IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_CONTROL_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_CONTROL_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_LBN 5
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1
+
+/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4
+/* The MSB of the AS_ID is guaranteed to be clear if the ID is not
+ * ACTION_SET_ID_NULL. This allows an AS_ID to be distinguished from an ASL_ID
+ * returned from MC_CMD_MAE_ACTION_SET_LIST_ALLOC.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_AS_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_AS_ID_LEN 4
+/* enum: An action set ID that is guaranteed never to represent an action set
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_FREE
+ */
+#define MC_CMD_MAE_ACTION_SET_FREE 0x14e
+#undef MC_CMD_0x14e_PRIVILEGE_CTG
+
+#define MC_CMD_0x14e_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_FREE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ACTION_SET_FREE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC
+ * Allocate an action set list (ASL) that can be referenced by an ID. The ASL
+ * ID can be used when inserting an action rule, so that for each packet
+ * matching the rule every action set in the list is applied. If the maximum
+ * number of ASLs have already been allocated then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC 0x14f
+#undef MC_CMD_0x14f_PRIVILEGE_CTG
+
+#define MC_CMD_0x14f_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMIN 8
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX 252
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_NUM(len) (((len)-4)/4)
+/* Number of elements in the AS_IDS field. */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_COUNT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_COUNT_LEN 4
+/* The IDs of the action sets in this list. The last element of this list may
+ * be the ID of an already allocated ASL. In this case the action sets from the
+ * already allocated ASL will be applied after the action sets supplied by this
+ * request. This mechanism can be used to reduce resource usage in the case
+ * where one ASL is a sublist of another ASL. The sublist should be allocated
+ * first, then the superlist should be allocated by supplying all required
+ * action set IDs that are not in the sublist followed by the ID of the
+ * sublist. One sublist can be referenced by multiple superlists.
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_OFST 4
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM 62
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2 254
+
+/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN 4
+/* The MSB of the ASL_ID is guaranteed to be set. This allows an ASL_ID to be
+ * distinguished from an AS_ID returned from MC_CMD_MAE_ACTION_SET_ALLOC.
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_LEN 4
+/* enum: An action set list ID that is guaranteed never to represent an action
+ * set list
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_LIST_FREE
+ * Free match-action-engine redirect_lists
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE 0x150
+#undef MC_CMD_0x150_PRIVILEGE_CTG
+
+#define MC_CMD_0x150_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_LIST_FREE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_OUTER_RULE_INSERT
+ * Inserts an Outer Rule, which controls encapsulation parsing, and may
+ * influence the Lookup Sequence. If the maximum number of rules have already
+ * been inserted then the command will fail with MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT 0x15a
+#undef MC_CMD_0x15a_PRIVILEGE_CTG
+
+#define MC_CMD_0x15a_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_OUTER_RULE_INSERT_IN msgrequest */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMIN 16
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX 252
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(num) (16+1*(num))
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_NUM(len) (((len)-16)/1)
+/* Packets matching the rule will be parsed with this encapsulation. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_MCDI_ENCAP_TYPE */
+/* Match priority. Lower values have higher priority. Must be less than
+ * MC_CMD_MAE_GET_CAPS_OUT.ENCAP_PRIOS If a packet matches two filters with
+ * equal priority then it is unspecified which takes priority.
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_PRIO_OFST 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_PRIO_LEN 4
+/* Deprecated alias for ACTION_CONTROL. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_LBN 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_LBN 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_WIDTH 2
+/* Enum values, see field(s): */
+/* MAE_CT_VNI_MODE */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_LBN 3
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_LBN 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_WIDTH 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_LBN 16
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_WIDTH 16
+/* This field controls the actions that are performed when a rule is hit. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ACTION_CONTROL_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ACTION_CONTROL_LEN 4
+/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
+ * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_COUNTER_ID_OFST 12
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_COUNTER_ID_LEN 4
+/* Structure of the format MAE_ENC_FIELD_PAIRS. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_OFST 16
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_LEN 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MINNUM 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MAXNUM 236
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MAXNUM_MCDI2 1004
+
+/* MC_CMD_MAE_OUTER_RULE_INSERT_OUT msgresponse */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OR_ID_LEN 4
+/* enum: An outer match ID that is guaranteed never to represent an outer match
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_OUTER_RULE_REMOVE
+ */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE 0x15b
+#undef MC_CMD_0x15b_PRIVILEGE_CTG
+
+#define MC_CMD_0x15b_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_OUTER_RULE_REMOVE_IN msgrequest */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMIN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMAX 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MINNUM 1
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MAXNUM 32
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_OUTER_RULE_REMOVE_OUT msgresponse */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMIN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMAX 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MINNUM 1
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_OUTER_RULE_UPDATE
+ * Atomically change the response of an Outer Rule.
+ */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d
+#undef MC_CMD_0x17d_PRIVILEGE_CTG
+
+#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16
+/* ID of outer rule to update */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4
+/* Packets matching the rule will be parsed with this encapsulation. */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_MCDI_ENCAP_TYPE */
+/* This field controls the actions that are performed when a rule is hit. */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2
+/* Enum values, see field(s): */
+/* MAE_CT_VNI_MODE */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16
+/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
+ * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
+ */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4
+
+/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0
+
+/* MAE_ACTION_RULE_RESPONSE structuredef */
+#define MAE_ACTION_RULE_RESPONSE_LEN 16
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_LBN 0
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_WIDTH 32
+/* Only one of ASL_ID or AS_ID may have a non-NULL value. */
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_OFST 4
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_LBN 32
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_WIDTH 32
+/* Controls lookup flow when this rule is hit. See sub-fields for details. More
+ * info on the lookup sequence can be found in SF-122976-TC. It is an error to
+ * set both DO_CT and DO_RECIRC.
+ */
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_DO_CT_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_DO_CT_LBN 0
+#define MAE_ACTION_RULE_RESPONSE_DO_CT_WIDTH 1
+#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_LBN 1
+#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_WIDTH 1
+#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_LBN 2
+#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_WIDTH 2
+/* Enum values, see field(s): */
+/* MAE_CT_VNI_MODE */
+#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_LBN 8
+#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_WIDTH 8
+#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_LBN 16
+#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_WIDTH 16
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_LBN 64
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_WIDTH 32
+/* Counter ID to increment if DO_CT or DO_RECIRC is set. Must be set to
+ * COUNTER_ID_NULL otherwise. Counter ID must have been allocated with
+ * COUNTER_TYPE=AR.
+ */
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_OFST 12
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_LBN 96
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_RULE_INSERT
+ * Insert a rule specify that packets matching a filter be processed according
+ * to a previous allocated action. Masks can be set as indicated by
+ * MC_CMD_MAE_GET_MATCH_FIELD_CAPABILITIES. If the maximum number of rules have
+ * already been inserted then the command will fail with MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ACTION_RULE_INSERT 0x15c
+#undef MC_CMD_0x15c_PRIVILEGE_CTG
+
+#define MC_CMD_0x15c_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_RULE_INSERT_IN msgrequest */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMIN 28
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX 252
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(num) (28+1*(num))
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_NUM(len) (((len)-28)/1)
+/* See MC_CMD_MAE_OUTER_RULE_REGISTER_IN/PRIO. */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_PRIO_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_PRIO_LEN 4
+/* Structure of the format MAE_ACTION_RULE_RESPONSE */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_OFST 4
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_LEN 20
+/* Reserved for future use. Must be set to zero. */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RSVD_OFST 24
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RSVD_LEN 4
+/* Structure of the format MAE_FIELD_MASK_VALUE_PAIRS */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_OFST 28
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_LEN 1
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MINNUM 0
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MAXNUM 224
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MAXNUM_MCDI2 992
+
+/* MC_CMD_MAE_ACTION_RULE_INSERT_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN 4
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_AR_ID_LEN 4
+/* enum: An action rule ID that is guaranteed never to represent an action rule
+ */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_RULE_UPDATE
+ * Atomically change the response of an action rule. Firmware may return
+ * ENOTSUP, in which case the driver should DELETE/INSERT.
+ */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE 0x15d
+#undef MC_CMD_0x15d_PRIVILEGE_CTG
+
+#define MC_CMD_0x15d_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_RULE_UPDATE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_LEN 24
+/* ID of action rule to update */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_AR_ID_LEN 4
+/* Structure of the format MAE_ACTION_RULE_RESPONSE */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_RESPONSE_OFST 4
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_RESPONSE_LEN 20
+
+/* MC_CMD_MAE_ACTION_RULE_UPDATE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_RULE_DELETE
+ */
+#define MC_CMD_MAE_ACTION_RULE_DELETE 0x155
+#undef MC_CMD_0x155_PRIVILEGE_CTG
+
+#define MC_CMD_0x155_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_RULE_DELETE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMIN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMAX 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_LEN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ACTION_RULE_DELETE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMIN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMAX 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_LEN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_LOOKUP
+ * Return the m-port corresponding to a selector.
+ */
+#define MC_CMD_MAE_MPORT_LOOKUP 0x160
+#undef MC_CMD_0x160_PRIVILEGE_CTG
+
+#define MC_CMD_0x160_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAE_MPORT_LOOKUP_IN msgrequest */
+#define MC_CMD_MAE_MPORT_LOOKUP_IN_LEN 4
+#define MC_CMD_MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR_OFST 0
+#define MC_CMD_MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR_LEN 4
+
+/* MC_CMD_MAE_MPORT_LOOKUP_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN 4
+#define MC_CMD_MAE_MPORT_LOOKUP_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_LOOKUP_OUT_MPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_ALLOC
+ * Allocates a m-port, which can subsequently be used in action rules as a
+ * match or delivery argument.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC 0x163
+#undef MC_CMD_0x163_PRIVILEGE_CTG
+
+#define MC_CMD_0x163_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MPORT_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_LEN 20
+/* The type of m-port to allocate. Firmware may return ENOTSUP for certain
+ * types.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_TYPE_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_IN_TYPE_LEN 4
+/* enum: Traffic can be sent to this type of m-port using an override
+ * descriptor. Traffic received on this type of m-port will go to the VNIC on a
+ * nominated m-port, and will be delivered with metadata identifying the alias
+ * m-port.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_MPORT_TYPE_ALIAS 0x1
+/* enum: This type of m-port has a VNIC attached. Queues can be created on this
+ * VNIC by specifying the created m-port as an m-port selector at queue
+ * creation time.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_MPORT_TYPE_VNIC 0x2
+/* 128-bit value for use by the driver. */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_UUID_OFST 4
+#define MC_CMD_MAE_MPORT_ALLOC_IN_UUID_LEN 16
+
+/* MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN 24
+/* The type of m-port to allocate. Firmware may return ENOTSUP for certain
+ * types.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_TYPE_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_TYPE_LEN 4
+/* enum: Traffic can be sent to this type of m-port using an override
+ * descriptor. Traffic received on this type of m-port will go to the VNIC on a
+ * nominated m-port, and will be delivered with metadata identifying the alias
+ * m-port.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS 0x1
+/* enum: This type of m-port has a VNIC attached. Queues can be created on this
+ * VNIC by specifying the created m-port as an m-port selector at queue
+ * creation time.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_VNIC 0x2
+/* 128-bit value for use by the driver. */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_UUID_OFST 4
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_UUID_LEN 16
+/* An m-port selector identifying the VNIC to which traffic should be
+ * delivered. This must currently be set to MAE_MPORT_SELECTOR_ASSIGNED (i.e.
+ * the m-port assigned to the calling client).
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT_OFST 20
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT_LEN 4
+
+/* MC_CMD_MAE_MPORT_ALLOC_VNIC_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_LEN 20
+/* The type of m-port to allocate. Firmware may return ENOTSUP for certain
+ * types.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_TYPE_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_TYPE_LEN 4
+/* enum: Traffic can be sent to this type of m-port using an override
+ * descriptor. Traffic received on this type of m-port will go to the VNIC on a
+ * nominated m-port, and will be delivered with metadata identifying the alias
+ * m-port.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_MPORT_TYPE_ALIAS 0x1
+/* enum: This type of m-port has a VNIC attached. Queues can be created on this
+ * VNIC by specifying the created m-port as an m-port selector at queue
+ * creation time.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_MPORT_TYPE_VNIC 0x2
+/* 128-bit value for use by the driver. */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_UUID_OFST 4
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_UUID_LEN 16
+
+/* MC_CMD_MAE_MPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_ALLOC_OUT_LEN 4
+/* ID of newly-allocated m-port. */
+#define MC_CMD_MAE_MPORT_ALLOC_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_OUT_MPORT_ID_LEN 4
+
+/* MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN 24
+/* ID of newly-allocated m-port. */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID_LEN 4
+/* A value that will appear in the packet metadata for any packets delivered
+ * using an alias type m-port. This value is guaranteed unique on the VNIC
+ * being delivered to, and is guaranteed not to exceed the range of values
+ * representable in the relevant metadata field.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LABEL_OFST 20
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LABEL_LEN 4
+
+/* MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_LEN 4
+/* ID of newly-allocated m-port. */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_MPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_FREE
+ * Free a m-port which was previously allocated by the driver.
+ */
+#define MC_CMD_MAE_MPORT_FREE 0x164
+#undef MC_CMD_0x164_PRIVILEGE_CTG
+
+#define MC_CMD_0x164_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MPORT_FREE_IN msgrequest */
+#define MC_CMD_MAE_MPORT_FREE_IN_LEN 4
+/* MPORT_ID as returned by MC_CMD_MAE_MPORT_ALLOC. */
+#define MC_CMD_MAE_MPORT_FREE_IN_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_FREE_IN_MPORT_ID_LEN 4
+
+/* MC_CMD_MAE_MPORT_FREE_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_FREE_OUT_LEN 0
+
+/* MAE_MPORT_DESC structuredef */
+#define MAE_MPORT_DESC_LEN 52
+#define MAE_MPORT_DESC_MPORT_ID_OFST 0
+#define MAE_MPORT_DESC_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_MPORT_ID_LBN 0
+#define MAE_MPORT_DESC_MPORT_ID_WIDTH 32
+/* Reserved for future purposes, contains information independent of caller */
+#define MAE_MPORT_DESC_FLAGS_OFST 4
+#define MAE_MPORT_DESC_FLAGS_LEN 4
+#define MAE_MPORT_DESC_FLAGS_LBN 32
+#define MAE_MPORT_DESC_FLAGS_WIDTH 32
+#define MAE_MPORT_DESC_CALLER_FLAGS_OFST 8
+#define MAE_MPORT_DESC_CALLER_FLAGS_LEN 4
+#define MAE_MPORT_DESC_CAN_RECEIVE_ON_OFST 8
+#define MAE_MPORT_DESC_CAN_RECEIVE_ON_LBN 0
+#define MAE_MPORT_DESC_CAN_RECEIVE_ON_WIDTH 1
+#define MAE_MPORT_DESC_CAN_DELIVER_TO_OFST 8
+#define MAE_MPORT_DESC_CAN_DELIVER_TO_LBN 1
+#define MAE_MPORT_DESC_CAN_DELIVER_TO_WIDTH 1
+#define MAE_MPORT_DESC_CAN_DELETE_OFST 8
+#define MAE_MPORT_DESC_CAN_DELETE_LBN 2
+#define MAE_MPORT_DESC_CAN_DELETE_WIDTH 1
+#define MAE_MPORT_DESC_IS_ZOMBIE_OFST 8
+#define MAE_MPORT_DESC_IS_ZOMBIE_LBN 3
+#define MAE_MPORT_DESC_IS_ZOMBIE_WIDTH 1
+#define MAE_MPORT_DESC_CALLER_FLAGS_LBN 64
+#define MAE_MPORT_DESC_CALLER_FLAGS_WIDTH 32
+/* Not the ideal name; it's really the type of thing connected to the m-port */
+#define MAE_MPORT_DESC_MPORT_TYPE_OFST 12
+#define MAE_MPORT_DESC_MPORT_TYPE_LEN 4
+/* enum: Connected to a MAC... */
+#define MAE_MPORT_DESC_MPORT_TYPE_NET_PORT 0x0
+/* enum: Adds metadata and delivers to another m-port */
+#define MAE_MPORT_DESC_MPORT_TYPE_ALIAS 0x1
+/* enum: Connected to a VNIC. */
+#define MAE_MPORT_DESC_MPORT_TYPE_VNIC 0x2
+#define MAE_MPORT_DESC_MPORT_TYPE_LBN 96
+#define MAE_MPORT_DESC_MPORT_TYPE_WIDTH 32
+/* 128-bit value available to drivers for m-port identification. */
+#define MAE_MPORT_DESC_UUID_OFST 16
+#define MAE_MPORT_DESC_UUID_LEN 16
+#define MAE_MPORT_DESC_UUID_LBN 128
+#define MAE_MPORT_DESC_UUID_WIDTH 128
+/* Big wadge of space reserved for other common properties */
+#define MAE_MPORT_DESC_RESERVED_OFST 32
+#define MAE_MPORT_DESC_RESERVED_LEN 8
+#define MAE_MPORT_DESC_RESERVED_LO_OFST 32
+#define MAE_MPORT_DESC_RESERVED_LO_LEN 4
+#define MAE_MPORT_DESC_RESERVED_LO_LBN 256
+#define MAE_MPORT_DESC_RESERVED_LO_WIDTH 32
+#define MAE_MPORT_DESC_RESERVED_HI_OFST 36
+#define MAE_MPORT_DESC_RESERVED_HI_LEN 4
+#define MAE_MPORT_DESC_RESERVED_HI_LBN 288
+#define MAE_MPORT_DESC_RESERVED_HI_WIDTH 32
+#define MAE_MPORT_DESC_RESERVED_LBN 256
+#define MAE_MPORT_DESC_RESERVED_WIDTH 64
+/* Logical port index. Only valid when type NET Port. */
+#define MAE_MPORT_DESC_NET_PORT_IDX_OFST 40
+#define MAE_MPORT_DESC_NET_PORT_IDX_LEN 4
+#define MAE_MPORT_DESC_NET_PORT_IDX_LBN 320
+#define MAE_MPORT_DESC_NET_PORT_IDX_WIDTH 32
+/* The m-port delivered to */
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_OFST 40
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_LBN 320
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_WIDTH 32
+/* The type of thing that owns the VNIC */
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_OFST 40
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_LEN 4
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_LBN 320
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_WIDTH 32
+/* The PCIe interface on which the function lives. CJK: We need an enumeration
+ * of interfaces that we extend as new interface (types) appear. This belongs
+ * elsewhere and should be referenced from here
+ */
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_OFST 44
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_LEN 4
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_LBN 352
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_WIDTH 32
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_OFST 48
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_LEN 2
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_LBN 384
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_WIDTH 16
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_OFST 50
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_LEN 2
+/* enum: Indicates that the function is a PF */
+#define MAE_MPORT_DESC_VF_IDX_NULL 0xffff
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_LBN 400
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_WIDTH 16
+/* Reserved. Should be ignored for now. */
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_OFST 44
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LEN 4
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32
+
+/* MAE_MPORT_DESC_V2 structuredef */
+#define MAE_MPORT_DESC_V2_LEN 56
+#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0
+#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0
+#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32
+/* Reserved for future purposes, contains information independent of caller */
+#define MAE_MPORT_DESC_V2_FLAGS_OFST 4
+#define MAE_MPORT_DESC_V2_FLAGS_LEN 4
+#define MAE_MPORT_DESC_V2_FLAGS_LBN 32
+#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4
+#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8
+#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0
+#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1
+#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8
+#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1
+#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1
+#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8
+#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2
+#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1
+#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8
+#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3
+#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32
+/* Not the ideal name; it's really the type of thing connected to the m-port */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4
+/* enum: Connected to a MAC... */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0
+/* enum: Adds metadata and delivers to another m-port */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1
+/* enum: Connected to a VNIC. */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32
+/* 128-bit value available to drivers for m-port identification. */
+#define MAE_MPORT_DESC_V2_UUID_OFST 16
+#define MAE_MPORT_DESC_V2_UUID_LEN 16
+#define MAE_MPORT_DESC_V2_UUID_LBN 128
+#define MAE_MPORT_DESC_V2_UUID_WIDTH 128
+/* Big wadge of space reserved for other common properties */
+#define MAE_MPORT_DESC_V2_RESERVED_OFST 32
+#define MAE_MPORT_DESC_V2_RESERVED_LEN 8
+#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32
+#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4
+#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256
+#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32
+#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36
+#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4
+#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288
+#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32
+#define MAE_MPORT_DESC_V2_RESERVED_LBN 256
+#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64
+/* Logical port index. Only valid when type NET Port. */
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32
+/* The m-port delivered to */
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32
+/* The type of thing that owns the VNIC */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32
+/* The PCIe interface on which the function lives. CJK: We need an enumeration
+ * of interfaces that we extend as new interface (types) appear. This belongs
+ * elsewhere and should be referenced from here
+ */
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2
+/* enum: Indicates that the function is a PF */
+#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16
+/* Reserved. Should be ignored for now. */
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32
+/* A client handle for the VNIC's owner. Only valid for type VNIC. */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_ENUMERATE
+ * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command
+ * will be removed at some future point.
+ */
+#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c
+#undef MC_CMD_0x17c_PRIVILEGE_CTG
+
+#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0
+
+/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num))
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1)
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4
+/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
+ * grow in future version of this command. Drivers should use a stride of
+ * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
+ */
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_READ_JOURNAL
+ * Firmware maintains a per-client journal of mport creations and deletions.
+ * This journal is clear-on-read, i.e. repeated calls of this command will
+ * drain the buffer. Whenever the caller resets its function via FLR or
+ * MC_CMD_ENTITY_RESET, the journal is regenerated from a blank start.
+ */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL 0x147
+#undef MC_CMD_0x147_PRIVILEGE_CTG
+
+#define MC_CMD_0x147_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MPORT_READ_JOURNAL_IN msgrequest */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_LEN 4
+/* Any unused flags are reserved and must be set to zero. */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_FLAGS_LEN 4
+
+/* MC_CMD_MAE_MPORT_READ_JOURNAL_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMIN 12
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX 252
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LEN(num) (12+1*(num))
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_NUM(len) (((len)-12)/1)
+/* Any unused flags are reserved and must be ignored. */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_FLAGS_OFST 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_FLAGS_LEN 4
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_OFST 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_LBN 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_WIDTH 1
+/* The number of MAE_MPORT_DESC structures in MPORT_DESC_DATA. May be zero. */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT_OFST 4
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT_LEN 4
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC_OFST 8
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC_LEN 4
+/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
+ * grow in future version of this command. Drivers should use a stride of
+ * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
+ */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST 12
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_LEN 1
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MINNUM 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MAXNUM 240
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1008
+
+/* TABLE_FIELD_DESCR structuredef: An individual table field descriptor. This
+ * describes the location and properties of one N-bit field within a wider
+ * M-bit key/mask/response value.
+ */
+#define TABLE_FIELD_DESCR_LEN 8
+/* Identifier for this field. */
+#define TABLE_FIELD_DESCR_FIELD_ID_OFST 0
+#define TABLE_FIELD_DESCR_FIELD_ID_LEN 2
+/* Enum values, see field(s): */
+/* TABLE_FIELD_ID */
+#define TABLE_FIELD_DESCR_FIELD_ID_LBN 0
+#define TABLE_FIELD_DESCR_FIELD_ID_WIDTH 16
+/* Lowest (least significant) bit number of the bits of this field. */
+#define TABLE_FIELD_DESCR_LBN_OFST 2
+#define TABLE_FIELD_DESCR_LBN_LEN 2
+#define TABLE_FIELD_DESCR_LBN_LBN 16
+#define TABLE_FIELD_DESCR_LBN_WIDTH 16
+/* Width of this field in bits. */
+#define TABLE_FIELD_DESCR_WIDTH_OFST 4
+#define TABLE_FIELD_DESCR_WIDTH_LEN 2
+#define TABLE_FIELD_DESCR_WIDTH_LBN 32
+#define TABLE_FIELD_DESCR_WIDTH_WIDTH 16
+/* The mask type for this field. (Note that masking is relevant to keys; fields
+ * of responses are always reported with the EXACT type.)
+ */
+#define TABLE_FIELD_DESCR_MASK_TYPE_OFST 6
+#define TABLE_FIELD_DESCR_MASK_TYPE_LEN 1
+/* enum: Field must never be selected in the mask. */
+#define TABLE_FIELD_DESCR_MASK_NEVER 0x0
+/* enum: Exact match: field must always be selected in the mask. */
+#define TABLE_FIELD_DESCR_MASK_EXACT 0x1
+/* enum: Ternary match: arbitrary mask bits are allowed. */
+#define TABLE_FIELD_DESCR_MASK_TERNARY 0x2
+/* enum: Whole field match: mask must be all 1 bits, or all 0 bits. */
+#define TABLE_FIELD_DESCR_MASK_WHOLE_FIELD 0x3
+/* enum: Longest prefix match: mask must be 1 bit(s) followed by 0 bit(s). */
+#define TABLE_FIELD_DESCR_MASK_LPM 0x4
+#define TABLE_FIELD_DESCR_MASK_TYPE_LBN 48
+#define TABLE_FIELD_DESCR_MASK_TYPE_WIDTH 8
+/* A version code that allows field semantics to be extended. All fields
+ * currently use version 0.
+ */
+#define TABLE_FIELD_DESCR_SCHEME_OFST 7
+#define TABLE_FIELD_DESCR_SCHEME_LEN 1
+#define TABLE_FIELD_DESCR_SCHEME_LBN 56
+#define TABLE_FIELD_DESCR_SCHEME_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_TABLE_LIST
+ * Return the list of tables which may be accessed via this table API.
+ */
+#define MC_CMD_TABLE_LIST 0x1c9
+#undef MC_CMD_0x1c9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_LIST_IN msgrequest */
+#define MC_CMD_TABLE_LIST_IN_LEN 4
+/* Index of the first item to be returned in the TABLE_ID sequence. (Set to 0
+ * for the first call; further calls are only required if the whole sequence
+ * does not fit within the maximum MCDI message size.)
+ */
+#define MC_CMD_TABLE_LIST_IN_FIRST_TABLE_ID_INDEX_OFST 0
+#define MC_CMD_TABLE_LIST_IN_FIRST_TABLE_ID_INDEX_LEN 4
+
+/* MC_CMD_TABLE_LIST_OUT msgresponse */
+#define MC_CMD_TABLE_LIST_OUT_LENMIN 4
+#define MC_CMD_TABLE_LIST_OUT_LENMAX 252
+#define MC_CMD_TABLE_LIST_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_LIST_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(len) (((len)-4)/4)
+/* The total number of tables. */
+#define MC_CMD_TABLE_LIST_OUT_N_TABLES_OFST 0
+#define MC_CMD_TABLE_LIST_OUT_N_TABLES_LEN 4
+/* A sequence of table identifiers. If all N_TABLES items do not fit, further
+ * items can be obtained by repeating the call with a non-zero
+ * FIRST_TABLE_ID_INDEX.
+ */
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_OFST 4
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_LEN 4
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MINNUM 0
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MAXNUM 62
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MAXNUM_MCDI2 254
+/* Enum values, see field(s): */
+/* TABLE_ID */
+
+
+/***********************************/
+/* MC_CMD_TABLE_DESCRIPTOR
+ * Request the table descriptor for a particular table. This describes
+ * properties of the table and the format of the key and response. May return
+ * EINVAL for unknown table ID.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR 0x1ca
+#undef MC_CMD_0x1ca_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ca_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_DESCRIPTOR_IN msgrequest */
+#define MC_CMD_TABLE_DESCRIPTOR_IN_LEN 8
+/* Identifier for this field. */
+#define MC_CMD_TABLE_DESCRIPTOR_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_DESCRIPTOR_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Index of the first item to be returned in the FIELDS sequence. (Set to 0 for
+ * the first call; further calls are only required if the whole sequence does
+ * not fit within the maximum MCDI message size.)
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX_OFST 4
+#define MC_CMD_TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX_LEN 4
+
+/* MC_CMD_TABLE_DESCRIPTOR_OUT msgresponse */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMIN 28
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMAX 252
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(num) (20+8*(num))
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_NUM(len) (((len)-20)/8)
+/* Maximum number of entries in this table. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_ENTRIES_OFST 0
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_ENTRIES_LEN 4
+/* The type of table. (This is really just informational; the important
+ * properties of a table that affect programming can be deduced from other
+ * items in the table or field descriptor.)
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_OFST 4
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_LEN 2
+/* enum: Direct table (essentially just an array). Behaves like a BCAM for
+ * programming purposes, where the fact that the key is actually used as an
+ * array index is really just an implementation detail.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_DIRECT 0x1
+/* enum: BCAM (binary CAM) table: exact match on all key fields." */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_BCAM 0x2
+/* enum: TCAM (ternary CAM) table: matches fields with a mask. Each entry may
+ * have its own different mask.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_TCAM 0x3
+/* enum: STCAM (semi-TCAM) table: like a TCAM but entries shared a limited
+ * number of unique masks.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_STCAM 0x4
+/* Width of key (and corresponding mask, for TCAM or STCAM) in bits. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_KEY_WIDTH_OFST 6
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_KEY_WIDTH_LEN 2
+/* Width of response in bits. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_RESP_WIDTH_LEN 2
+/* The total number of fields in the key. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS_OFST 10
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS_LEN 2
+/* The total number of fields in the response. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS_OFST 12
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS_LEN 2
+/* Number of priorities for STCAM or TCAM; otherwise 0. The priority of a table
+ * entry (relevant when more than one masked entry matches) ranges from
+ * 0=highest to N_PRIORITIES-1=lowest.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_PRIORITIES_OFST 14
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_PRIORITIES_LEN 2
+/* Maximum number of masks for STCAM; otherwise 0. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_MASKS_OFST 16
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_MASKS_LEN 2
+/* Flags. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FLAGS_OFST 18
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FLAGS_LEN 1
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_OFST 18
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_LBN 0
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_WIDTH 1
+/* Access scheme version code, allowing the method of accessing table entries
+ * to change semantics in future. A client which does not understand the value
+ * of this field should assume that it cannot program this table. Currently
+ * always set to 0 indicating the original MC_CMD_TABLE_INSERT/UPDATE/DELETE
+ * semantics.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_SCHEME_OFST 19
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_SCHEME_LEN 1
+/* A sequence of TABLE_FIELD_DESCR structures: N_KEY_FIELDS items describing
+ * the key, followed by N_RESP_FIELDS items describing the response. If all
+ * N_KEY_FIELDS+N_RESP_FIELDS items do not fit, further items can be obtained
+ * by repeating the call with a non-zero FIRST_FIELDS_INDEX.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_OFST 20
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LEN 8
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_OFST 20
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_LEN 4
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_LBN 160
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_WIDTH 32
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_OFST 24
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_LEN 4
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_LBN 192
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_WIDTH 32
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MINNUM 1
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MAXNUM 29
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MAXNUM_MCDI2 125
+
+
+/***********************************/
+/* MC_CMD_TABLE_INSERT
+ * Insert a new entry into a table. The entry must not currently exist. May
+ * return EINVAL for unknown table ID or other bad request parameters, EEXIST
+ * if the entry already exists, ENOSPC if there is no space or EPERM if the
+ * operation is not permitted. In case of an error, the additional MCDI error
+ * argument field returns the raw error code from the underlying CAM driver.
+ */
+#define MC_CMD_TABLE_INSERT 0x1cd
+#undef MC_CMD_0x1cd_PRIVILEGE_CTG
+
+#define MC_CMD_0x1cd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_INSERT_IN msgrequest */
+#define MC_CMD_TABLE_INSERT_IN_LENMIN 16
+#define MC_CMD_TABLE_INSERT_IN_LENMAX 252
+#define MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_INSERT_IN_LEN(num) (12+4*(num))
+#define MC_CMD_TABLE_INSERT_IN_DATA_NUM(len) (((len)-12)/4)
+/* Table identifier. */
+#define MC_CMD_TABLE_INSERT_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_INSERT_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Width in bits of supplied key data (must match table properties). */
+#define MC_CMD_TABLE_INSERT_IN_KEY_WIDTH_OFST 4
+#define MC_CMD_TABLE_INSERT_IN_KEY_WIDTH_LEN 2
+/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
+ * when allocated MASK_ID is used instead).
+ */
+#define MC_CMD_TABLE_INSERT_IN_MASK_WIDTH_OFST 6
+#define MC_CMD_TABLE_INSERT_IN_MASK_WIDTH_LEN 2
+/* Width in bits of supplied response data (for INSERT and UPDATE operations
+ * this must match the table properties; for DELETE operations, no response
+ * data is required and this must be 0).
+ */
+#define MC_CMD_TABLE_INSERT_IN_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_INSERT_IN_RESP_WIDTH_LEN 2
+/* Mask ID for STCAM table - used instead of mask data if the table descriptor
+ * reports ALLOC_MASKS==1. Otherwise set to 0.
+ */
+#define MC_CMD_TABLE_INSERT_IN_MASK_ID_OFST 6
+#define MC_CMD_TABLE_INSERT_IN_MASK_ID_LEN 2
+/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
+#define MC_CMD_TABLE_INSERT_IN_PRIORITY_OFST 8
+#define MC_CMD_TABLE_INSERT_IN_PRIORITY_LEN 2
+/* (32-bit alignment padding - set to 0) */
+#define MC_CMD_TABLE_INSERT_IN_RESERVED_OFST 10
+#define MC_CMD_TABLE_INSERT_IN_RESERVED_LEN 2
+/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
+ * data values. Each of these items is logically treated as a single wide N-bit
+ * value, in which the individual fields have been placed within that value per
+ * the LBN and WIDTH information from the table field descriptors. The wide
+ * N-bit value is padded with 0 bits at the MSB end if necessary to make a
+ * multiple of 32 bits. The value is then packed into this command as a
+ * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
+ */
+#define MC_CMD_TABLE_INSERT_IN_DATA_OFST 12
+#define MC_CMD_TABLE_INSERT_IN_DATA_LEN 4
+#define MC_CMD_TABLE_INSERT_IN_DATA_MINNUM 1
+#define MC_CMD_TABLE_INSERT_IN_DATA_MAXNUM 60
+#define MC_CMD_TABLE_INSERT_IN_DATA_MAXNUM_MCDI2 252
+
+/* MC_CMD_TABLE_INSERT_OUT msgresponse */
+#define MC_CMD_TABLE_INSERT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TABLE_UPDATE
+ * Update an existing entry in a table with a new response value. May return
+ * EINVAL for unknown table ID or other bad request parameters, ENOENT if the
+ * entry does not already exist, or EPERM if the operation is not permitted. In
+ * case of an error, the additional MCDI error argument field returns the raw
+ * error code from the underlying CAM driver.
+ */
+#define MC_CMD_TABLE_UPDATE 0x1ce
+#undef MC_CMD_0x1ce_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_UPDATE_IN msgrequest */
+#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16
+#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252
+#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num))
+#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4)
+/* Table identifier. */
+#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Width in bits of supplied key data (must match table properties). */
+#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4
+#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2
+/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
+ * when allocated MASK_ID is used instead).
+ */
+#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6
+#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2
+/* Width in bits of supplied response data (for INSERT and UPDATE operations
+ * this must match the table properties; for DELETE operations, no response
+ * data is required and this must be 0).
+ */
+#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2
+/* Mask ID for STCAM table - used instead of mask data if the table descriptor
+ * reports ALLOC_MASKS==1. Otherwise set to 0.
+ */
+#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6
+#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2
+/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
+#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8
+#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2
+/* (32-bit alignment padding - set to 0) */
+#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10
+#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2
+/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
+ * data values. Each of these items is logically treated as a single wide N-bit
+ * value, in which the individual fields have been placed within that value per
+ * the LBN and WIDTH information from the table field descriptors. The wide
+ * N-bit value is padded with 0 bits at the MSB end if necessary to make a
+ * multiple of 32 bits. The value is then packed into this command as a
+ * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
+ */
+#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12
+#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4
+#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1
+#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60
+#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252
+
+/* MC_CMD_TABLE_UPDATE_OUT msgresponse */
+#define MC_CMD_TABLE_UPDATE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TABLE_DELETE
+ * Delete an existing entry in a table. May return EINVAL for unknown table ID
+ * or other bad request parameters, ENOENT if the entry does not exist, or
+ * EPERM if the operation is not permitted. In case of an error, the additional
+ * MCDI error argument field returns the raw error code from the underlying CAM
+ * driver.
+ */
+#define MC_CMD_TABLE_DELETE 0x1cf
+#undef MC_CMD_0x1cf_PRIVILEGE_CTG
+
+#define MC_CMD_0x1cf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_DELETE_IN msgrequest */
+#define MC_CMD_TABLE_DELETE_IN_LENMIN 16
+#define MC_CMD_TABLE_DELETE_IN_LENMAX 252
+#define MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_DELETE_IN_LEN(num) (12+4*(num))
+#define MC_CMD_TABLE_DELETE_IN_DATA_NUM(len) (((len)-12)/4)
+/* Table identifier. */
+#define MC_CMD_TABLE_DELETE_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_DELETE_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Width in bits of supplied key data (must match table properties). */
+#define MC_CMD_TABLE_DELETE_IN_KEY_WIDTH_OFST 4
+#define MC_CMD_TABLE_DELETE_IN_KEY_WIDTH_LEN 2
+/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
+ * when allocated MASK_ID is used instead).
+ */
+#define MC_CMD_TABLE_DELETE_IN_MASK_WIDTH_OFST 6
+#define MC_CMD_TABLE_DELETE_IN_MASK_WIDTH_LEN 2
+/* Width in bits of supplied response data (for INSERT and UPDATE operations
+ * this must match the table properties; for DELETE operations, no response
+ * data is required and this must be 0).
+ */
+#define MC_CMD_TABLE_DELETE_IN_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_DELETE_IN_RESP_WIDTH_LEN 2
+/* Mask ID for STCAM table - used instead of mask data if the table descriptor
+ * reports ALLOC_MASKS==1. Otherwise set to 0.
+ */
+#define MC_CMD_TABLE_DELETE_IN_MASK_ID_OFST 6
+#define MC_CMD_TABLE_DELETE_IN_MASK_ID_LEN 2
+/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
+#define MC_CMD_TABLE_DELETE_IN_PRIORITY_OFST 8
+#define MC_CMD_TABLE_DELETE_IN_PRIORITY_LEN 2
+/* (32-bit alignment padding - set to 0) */
+#define MC_CMD_TABLE_DELETE_IN_RESERVED_OFST 10
+#define MC_CMD_TABLE_DELETE_IN_RESERVED_LEN 2
+/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
+ * data values. Each of these items is logically treated as a single wide N-bit
+ * value, in which the individual fields have been placed within that value per
+ * the LBN and WIDTH information from the table field descriptors. The wide
+ * N-bit value is padded with 0 bits at the MSB end if necessary to make a
+ * multiple of 32 bits. The value is then packed into this command as a
+ * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
+ */
+#define MC_CMD_TABLE_DELETE_IN_DATA_OFST 12
+#define MC_CMD_TABLE_DELETE_IN_DATA_LEN 4
+#define MC_CMD_TABLE_DELETE_IN_DATA_MINNUM 1
+#define MC_CMD_TABLE_DELETE_IN_DATA_MAXNUM 60
+#define MC_CMD_TABLE_DELETE_IN_DATA_MAXNUM_MCDI2 252
+
+/* MC_CMD_TABLE_DELETE_OUT msgresponse */
+#define MC_CMD_TABLE_DELETE_OUT_LEN 0
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol_mae.h b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h
new file mode 100644
index 000000000000..ff6d80c8e486
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2019-2022 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef MCDI_PCOL_MAE_H
+#define MCDI_PCOL_MAE_H
+/* MCDI definitions for Match-Action Engine functionality, that are
+ * missing from the main mcdi_pcol.h
+ */
+
+/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the
+ * following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC.
+ */
+/* enum: A counter ID that is guaranteed never to represent a real counter */
+#define MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff
+
+#endif /* MCDI_PCOL_MAE_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 94c6a345c0b1..ad4694fa3dda 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -20,7 +20,7 @@
static int efx_mcdi_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
size_t outlen;
@@ -46,7 +46,7 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
static int efx_mcdi_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
size_t outlen;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 723bbeea5d0c..7ef823d7a89a 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -178,6 +178,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */
+#define EFX_TX_BUF_EFV 0x100 /* buffer was sent from representor */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -477,6 +478,8 @@ enum efx_sync_events_state {
* @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
* @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
+ * @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
+ * not recognised
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -539,6 +542,7 @@ struct efx_channel {
unsigned int n_rx_xdp_bad_drops;
unsigned int n_rx_xdp_tx;
unsigned int n_rx_xdp_redirect;
+ unsigned int n_rx_mport_bad;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -622,12 +626,55 @@ enum efx_int_mode {
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
enum nic_state {
- STATE_UNINIT = 0, /* device being probed/removed or is frozen */
- STATE_READY = 1, /* hardware ready and netdev registered */
- STATE_DISABLED = 2, /* device disabled due to hardware errors */
- STATE_RECOVERY = 3, /* device recovering from PCI error */
+ STATE_UNINIT = 0, /* device being probed/removed */
+ STATE_PROBED, /* hardware probed */
+ STATE_NET_DOWN, /* netdev registered */
+ STATE_NET_UP, /* ready for traffic */
+ STATE_DISABLED, /* device disabled due to hardware errors */
+
+ STATE_RECOVERY = 0x100,/* recovering from PCI error */
+ STATE_FROZEN = 0x200, /* frozen by power management */
};
+static inline bool efx_net_active(enum nic_state state)
+{
+ return state == STATE_NET_DOWN || state == STATE_NET_UP;
+}
+
+static inline bool efx_frozen(enum nic_state state)
+{
+ return state & STATE_FROZEN;
+}
+
+static inline bool efx_recovering(enum nic_state state)
+{
+ return state & STATE_RECOVERY;
+}
+
+static inline enum nic_state efx_freeze(enum nic_state state)
+{
+ WARN_ON(!efx_net_active(state));
+ return state | STATE_FROZEN;
+}
+
+static inline enum nic_state efx_thaw(enum nic_state state)
+{
+ WARN_ON(!efx_frozen(state));
+ return state & ~STATE_FROZEN;
+}
+
+static inline enum nic_state efx_recover(enum nic_state state)
+{
+ WARN_ON(!efx_net_active(state));
+ return state | STATE_RECOVERY;
+}
+
+static inline enum nic_state efx_recovered(enum nic_state state)
+{
+ WARN_ON(!efx_recovering(state));
+ return state & ~STATE_RECOVERY;
+}
+
/* Forward declaration */
struct efx_nic;
@@ -923,12 +970,15 @@ enum efx_xdp_tx_queues_mode {
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
+ * @vf_reps_lock: Protects vf_reps list
+ * @vf_reps: local VF reps
* @ptp_data: PTP state data
* @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
* @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
* xdp_rxq_info structures?
* @netdev_notifier: Netdevice notifier.
+ * @tc: state for TC offload (EF100).
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
* @monitor_work: Hardware monitor workitem
@@ -1102,6 +1152,8 @@ struct efx_nic {
unsigned vf_init_count;
unsigned vi_scale;
#endif
+ spinlock_t vf_reps_lock;
+ struct list_head vf_reps;
struct efx_ptp_data *ptp_data;
bool ptp_warned;
@@ -1110,6 +1162,7 @@ struct efx_nic {
bool xdp_rxq_info_failed;
struct notifier_block netdev_notifier;
+ struct efx_tc_state *tc;
unsigned int mem_bar;
u32 reg_base;
@@ -1123,6 +1176,24 @@ struct efx_nic {
atomic_t n_rx_noskb_drops;
};
+/**
+ * struct efx_probe_data - State after hardware probe
+ * @pci_dev: The PCI device
+ * @efx: Efx NIC details
+ */
+struct efx_probe_data {
+ struct pci_dev *pci_dev;
+ struct efx_nic efx;
+};
+
+static inline struct efx_nic *efx_netdev_priv(struct net_device *dev)
+{
+ struct efx_probe_data **probe_ptr = netdev_priv(dev);
+ struct efx_probe_data *probe_data = *probe_ptr;
+
+ return &probe_data->efx;
+}
+
static inline int efx_dev_registered(struct efx_nic *efx)
{
return efx->net_dev->reg_state == NETREG_REGISTERED;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 4625f85acab2..10ad0b93d283 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1100,7 +1100,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
if (tx_queue && tx_queue->timestamping) {
+ /* This code invokes normal driver TX code which is always
+ * protected from softirqs when called from generic TX code,
+ * which in turn disables preemption. Look at __dev_queue_xmit
+ * which uses rcu_read_lock_bh disabling preemption for RCU
+ * plus disabling softirqs. We do not need RCU reader
+ * protection here.
+ *
+ * Although it is theoretically safe for current PTP TX/RX code
+ * running without disabling softirqs, there are three good
+ * reasond for doing so:
+ *
+ * 1) The code invoked is mainly implemented for non-PTP
+ * packets and it is always executed with softirqs
+ * disabled.
+ * 2) This being a single PTP packet, better to not
+ * interrupt its processing by softirqs which can lead
+ * to high latencies.
+ * 3) netdev_xmit_more checks preemption is disabled and
+ * triggers a BUG_ON if not.
+ */
+ local_bh_disable();
efx_enqueue_skb(tx_queue, skb);
+ local_bh_enable();
} else {
WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index fa8b9aacca11..4826e6a7e4ce 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -793,7 +793,6 @@ int efx_probe_filters(struct efx_nic *efx)
int rc;
mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
goto out_unlock;
@@ -830,7 +829,6 @@ int efx_probe_filters(struct efx_nic *efx)
}
#endif
out_unlock:
- up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -846,9 +844,7 @@ void efx_remove_filters(struct efx_nic *efx)
channel->rps_flow_id = NULL;
}
#endif
- down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
- up_write(&efx->filter_sem);
}
#ifdef CONFIG_RFS_ACCEL
@@ -857,7 +853,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
{
struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
work);
- struct efx_nic *efx = netdev_priv(req->net_dev);
+ struct efx_nic *efx = efx_netdev_priv(req->net_dev);
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
int slot_idx = req - efx->rps_slot;
struct efx_arfs_rule *rule;
@@ -942,7 +938,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_async_filter_insertion *req;
struct efx_arfs_rule *rule;
struct flow_keys fk;
diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c
index cce23803c652..89ccd65c978b 100644
--- a/drivers/net/ethernet/sfc/siena/farch.c
+++ b/drivers/net/ethernet/sfc/siena/farch.c
@@ -2778,7 +2778,7 @@ void efx_farch_filter_table_remove(struct efx_nic *efx)
enum efx_farch_filter_table_id table_id;
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
+ bitmap_free(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
kfree(state);
@@ -2822,9 +2822,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
table = &state->table[table_id];
if (table->size == 0)
continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
+ table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(array_size(sizeof(*table->spec),
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c
index 3df0f0eca3b7..3f7899daa86a 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi.c
+++ b/drivers/net/ethernet/sfc/siena/mcdi.c
@@ -1264,7 +1264,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
}
/* The MC is going down in to BIST mode. set the BIST flag to block
- * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset
* (which doesn't actually execute a reset, it waits for the controlling
* function to reset it).
*/
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
index 89a7fd47b057..a3cc8b7ec732 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
@@ -274,7 +274,7 @@
* MC_CMD_WORKAROUND_BUG26807.
* May also returned for other operations such as sub-variant switching. */
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* The clock whose frequency you've attempted to set set
+/* The clock whose frequency you've attempted to set
* doesn't exist on this NIC */
#define MC_CMD_ERR_NO_CLOCK 0x1015
/* Returned by MC_CMD_TESTASSERT if the action that should
@@ -7782,7 +7782,7 @@
* large number (253) it is not anticipated that this will be needed in the
* near future, so can currently be ignored.
*
- * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * On Riverhead this command is implemented as a wrapper for `list` in the
* sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
@@ -7827,7 +7827,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for
+ * On Riverhead this command is implemented as a wrapper for
* `get_descriptions` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
@@ -7876,7 +7876,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * On Riverhead this command is implemented as a wrapper for `get_readings`
* in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
@@ -16682,7 +16682,7 @@
* TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
* contains all modes implemented in firmware for a particular board. Modes
* listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
* should be considered hidden (not to be exposed in userland tools) and for
* engineering use only. There are no other semantic differences and any mode
* listed in either MODES or ENGINEERING_MODES can be set on the board.
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index c4a97fbf4672..ff7bbc325952 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -838,7 +838,7 @@ enum efx_xdp_tx_queues_mode {
* @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
* @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
* @rx_ip_align: RX DMA address offset to have IP header aligned in
- * in accordance with NET_IP_ALIGN
+ * accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 3f241e6c881a..fc9f0189f285 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -10,7 +10,7 @@
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_mac)
return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
@@ -21,7 +21,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
u8 qos, __be16 vlan_proto)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_vlan) {
if ((vlan & ~VLAN_VID_MASK) ||
@@ -40,7 +40,7 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_spoofchk)
return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
@@ -51,7 +51,7 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_get_vf_config)
return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
@@ -62,7 +62,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
int link_state)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_link_state)
return efx->type->sriov_set_vf_link_state(efx, vf_i,
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
new file mode 100644
index 000000000000..0c0aeb91f500
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc.h"
+#include "mae.h"
+#include "ef100_rep.h"
+#include "efx.h"
+
+static void efx_tc_free_action_set(struct efx_nic *efx,
+ struct efx_tc_action_set *act, bool in_hw)
+{
+ /* Failure paths calling this on the 'running action' set in_hw=false,
+ * because if the alloc had succeeded we'd've put it in acts.list and
+ * not still have it in act.
+ */
+ if (in_hw) {
+ efx_mae_free_action_set(efx, act->fw_id);
+ /* in_hw is true iff we are on an acts.list; make sure to
+ * remove ourselves from that list before we are freed.
+ */
+ list_del(&act->list);
+ }
+ kfree(act);
+}
+
+static void efx_tc_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts,
+ bool in_hw)
+{
+ struct efx_tc_action_set *act, *next;
+
+ /* Failure paths set in_hw=false, because usually the acts didn't get
+ * to efx_mae_alloc_action_set_list(); if they did, the failure tree
+ * has a separate efx_mae_free_action_set_list() before calling us.
+ */
+ if (in_hw)
+ efx_mae_free_action_set_list(efx, acts);
+ /* Any act that's on the list will be in_hw even if the list isn't */
+ list_for_each_entry_safe(act, next, &acts->list, list)
+ efx_tc_free_action_set(efx, act, true);
+ /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
+}
+
+static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
+{
+ efx_mae_delete_rule(efx, rule->fw_id);
+
+ /* Release entries in subsidiary tables */
+ efx_tc_free_action_set_list(efx, &rule->acts, true);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
+ u32 eg_port, struct efx_tc_flow_rule *rule)
+{
+ struct efx_tc_action_set_list *acts = &rule->acts;
+ struct efx_tc_match *match = &rule->match;
+ struct efx_tc_action_set *act;
+ int rc;
+
+ match->value.ingress_port = ing_port;
+ match->mask.ingress_port = ~0;
+ act = kzalloc(sizeof(*act), GFP_KERNEL);
+ if (!act)
+ return -ENOMEM;
+ act->deliver = 1;
+ act->dest_mport = eg_port;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc)
+ goto fail1;
+ EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
+ list_add_tail(&act->list, &acts->list);
+ rc = efx_mae_alloc_action_set_list(efx, acts);
+ if (rc)
+ goto fail2;
+ rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
+ acts->fw_id, &rule->fw_id);
+ if (rc)
+ goto fail3;
+ return 0;
+fail3:
+ efx_mae_free_action_set_list(efx, acts);
+fail2:
+ list_del(&act->list);
+ efx_mae_free_action_set(efx, act->fw_id);
+fail1:
+ kfree(act);
+ return rc;
+}
+
+static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_uplink(efx, &ing_port);
+ efx_mae_mport_wire(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_wire(efx, &ing_port);
+ efx_mae_mport_uplink(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
+{
+ struct efx_tc_flow_rule *rule = &efv->dflt;
+ struct efx_nic *efx = efv->parent;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_mport(efx, efv->mport, &ing_port);
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ struct efx_tc_flow_rule *rule)
+{
+ if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
+ efx_tc_delete_rule(efx, rule);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_rep_mport(struct efx_nic *efx)
+{
+ u32 rep_mport_label;
+ int rc;
+
+ rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
+ if (rc)
+ return rc;
+ pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
+ efx->tc->reps_mport_id, rep_mport_label);
+ /* Use mport *selector* as vport ID */
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+ &efx->tc->reps_mport_vport_id);
+ return 0;
+}
+
+static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
+{
+ efx_mae_free_mport(efx, efx->tc->reps_mport_id);
+ efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
+}
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx)
+{
+ struct efx_filter_spec promisc, allmulti;
+ int rc;
+
+ if (efx->type->is_vf)
+ return 0;
+ if (!efx->tc)
+ return 0;
+ efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_uc_def(&promisc);
+ efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &promisc, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_uc = rc;
+ efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_mc_def(&allmulti);
+ efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &allmulti, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_mc = rc;
+ return 0;
+}
+
+void efx_tc_remove_rep_filters(struct efx_nic *efx)
+{
+ if (efx->type->is_vf)
+ return;
+ if (!efx->tc)
+ return;
+ if (efx->tc->reps_filter_mc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
+ efx->tc->reps_filter_mc = -1;
+ if (efx->tc->reps_filter_uc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
+ efx->tc->reps_filter_uc = -1;
+}
+
+int efx_init_tc(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_tc_configure_default_rule_pf(efx);
+ if (rc)
+ return rc;
+ rc = efx_tc_configure_default_rule_wire(efx);
+ if (rc)
+ return rc;
+ return efx_tc_configure_rep_mport(efx);
+}
+
+void efx_fini_tc(struct efx_nic *efx)
+{
+ /* We can get called even if efx_init_struct_tc() failed */
+ if (!efx->tc)
+ return;
+ efx_tc_deconfigure_rep_mport(efx);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
+}
+
+int efx_init_struct_tc(struct efx_nic *efx)
+{
+ if (efx->type->is_vf)
+ return 0;
+
+ efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
+ if (!efx->tc)
+ return -ENOMEM;
+
+ efx->tc->reps_filter_uc = -1;
+ efx->tc->reps_filter_mc = -1;
+ INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
+ efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
+ efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ return 0;
+}
+
+void efx_fini_struct_tc(struct efx_nic *efx)
+{
+ if (!efx->tc)
+ return;
+
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ kfree(efx->tc);
+ efx->tc = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
new file mode 100644
index 000000000000..309123c6b386
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_H
+#define EFX_TC_H
+#include "net_driver.h"
+
+struct efx_tc_action_set {
+ u16 deliver:1;
+ u32 dest_mport;
+ u32 fw_id; /* index of this entry in firmware actions table */
+ struct list_head list;
+};
+
+struct efx_tc_match_fields {
+ /* L1 */
+ u32 ingress_port;
+};
+
+struct efx_tc_match {
+ struct efx_tc_match_fields value;
+ struct efx_tc_match_fields mask;
+};
+
+struct efx_tc_action_set_list {
+ struct list_head list;
+ u32 fw_id;
+};
+
+struct efx_tc_flow_rule {
+ struct efx_tc_match match;
+ struct efx_tc_action_set_list acts;
+ u32 fw_id;
+};
+
+enum efx_tc_rule_prios {
+ EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
+ EFX_TC_PRIO__NUM
+};
+
+/**
+ * struct efx_tc_state - control plane data for TC offload
+ *
+ * @reps_mport_id: MAE port allocated for representor RX
+ * @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
+ * @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
+ * @reps_mport_vport_id: vport_id for representor RX filters
+ * @dflt: Match-action rules for default switching; at priority
+ * %EFX_TC_PRIO_DFLT. Named by *ingress* port
+ * @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
+ * @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
+ */
+struct efx_tc_state {
+ u32 reps_mport_id, reps_mport_vport_id;
+ s32 reps_filter_uc, reps_filter_mc;
+ struct {
+ struct efx_tc_flow_rule pf;
+ struct efx_tc_flow_rule wire;
+ } dflt;
+};
+
+struct efx_rep;
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ struct efx_tc_flow_rule *rule);
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx);
+void efx_tc_remove_rep_filters(struct efx_nic *efx);
+
+int efx_init_tc(struct efx_nic *efx);
+void efx_fini_tc(struct efx_nic *efx);
+
+int efx_init_struct_tc(struct efx_nic *efx);
+void efx_fini_struct_tc(struct efx_nic *efx);
+
+#endif /* EFX_TC_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 138bca611341..d12474042c84 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -512,7 +512,7 @@ unlock:
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
unsigned index, type;
@@ -559,6 +559,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
{
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int efv_pkts_compl = 0;
unsigned int read_ptr;
bool finished = false;
@@ -580,7 +581,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
/* Need to check the flag before dequeueing. */
if (buffer->flags & EFX_TX_BUF_SKB)
finished = true;
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -589,7 +591,7 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
- EFX_WARN_ON_PARANOID(pkts_compl != 1);
+ EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
efx_xmit_done_check_empty(tx_queue);
}
@@ -609,7 +611,7 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct tc_mqprio_qopt *mqprio = type_data;
unsigned tc, num_tc;
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 658ea2d34070..67e789b96c43 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -109,9 +109,11 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int efv_pkts_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
++tx_queue->read_count;
}
@@ -146,7 +148,8 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
- unsigned int *bytes_compl)
+ unsigned int *bytes_compl,
+ unsigned int *efv_pkts_compl)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
@@ -164,9 +167,15 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
if (buffer->flags & EFX_TX_BUF_SKB) {
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
- EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
- (*pkts_compl)++;
- (*bytes_compl) += skb->len;
+ if (unlikely(buffer->flags & EFX_TX_BUF_EFV)) {
+ EFX_WARN_ON_PARANOID(!efv_pkts_compl);
+ (*efv_pkts_compl)++;
+ } else {
+ EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ }
+
if (tx_queue->timestamping &&
(tx_queue->completed_timestamp_major ||
tx_queue->completed_timestamp_minor)) {
@@ -199,7 +208,8 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
- unsigned int *bytes_compl)
+ unsigned int *bytes_compl,
+ unsigned int *efv_pkts_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -218,7 +228,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
return;
}
- efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl,
+ efv_pkts_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -241,15 +252,17 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ unsigned int efv_pkts_compl = 0;
struct efx_nic *efx = tx_queue->efx;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
- efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
- if (pkts_compl > 1)
+ if (pkts_compl + efv_pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
@@ -274,6 +287,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
+ unsigned int efv_pkts_compl = 0;
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
unsigned int pkts_compl = 0;
@@ -282,7 +296,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
while (tx_queue->insert_count != insert_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
}
}
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index bbab7f248250..d87aecbc7bf1 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -19,7 +19,8 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
- unsigned int *bytes_compl);
+ unsigned int *bytes_compl,
+ unsigned int *efv_pkts_compl);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 929cfc22cd0c..31ff35174034 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -91,6 +91,9 @@ config DWMAC_IPQ806X
acceleration features available on this SoC. Network devices
will behave like standard non-accelerated ethernet interfaces.
+ Select the QCOM_SOCINFO config flag to enable specific dwmac
+ fixup based on the ipq806x SoC revision.
+
config DWMAC_LPC18XX
tristate "NXP LPC18xx/43xx DWMAC support"
default ARCH_LPC18XX
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index d2cdc02d9f94..2e8744ac6b91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
while (len != 0) {
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
desc = tx_q->dma_tx + entry;
if (len > bmax) {
@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
(((rx_q->dirty_rx) + 1) %
- priv->dma_rx_size) *
+ priv->dma_conf.dma_rx_size) *
sizeof(struct dma_desc)));
}
@@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
((tx_q->dirty_tx + 1) %
- priv->dma_tx_size))
+ priv->dma_conf.dma_tx_size))
* sizeof(struct dma_desc)));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index bc91fd867dcd..358fc26f8d1f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -361,6 +361,7 @@ bypass_clk_reset_gpio:
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
+ data->sph_disable = 1;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 9a6d819b84ae..378b4dd826bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -273,7 +273,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->tx_delay = tx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_config_dt;
}
}
@@ -283,7 +284,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->rx_delay = rx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_config_dt;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 38fe77d1035e..4f2b82a884b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -251,7 +251,6 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
priv->plat->mdio_bus_data->xpcs_an_inband = false;
} else {
priv->plat->max_speed = 1000;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
priv->plat->mdio_bus_data->xpcs_an_inband = true;
}
}
@@ -298,6 +297,11 @@ static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
*art_time = ns;
}
+static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
+{
+ return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
+}
+
static int intel_crosststamp(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
@@ -313,8 +317,6 @@ static int intel_crosststamp(ktime_t *device,
u32 num_snapshot;
u32 gpio_value;
u32 acr_value;
- int ret;
- u32 v;
int i;
if (!boot_cpu_has(X86_FEATURE_ART))
@@ -328,6 +330,8 @@ static int intel_crosststamp(ktime_t *device,
if (priv->plat->ext_snapshot_en)
return -EBUSY;
+ priv->plat->int_snapshot_en = 1;
+
mutex_lock(&priv->aux_ts_lock);
/* Enable Internal snapshot trigger */
acr_value = readl(ptpaddr + PTP_ACR);
@@ -347,6 +351,7 @@ static int intel_crosststamp(ktime_t *device,
break;
default:
mutex_unlock(&priv->aux_ts_lock);
+ priv->plat->int_snapshot_en = 0;
return -EINVAL;
}
writel(acr_value, ptpaddr + PTP_ACR);
@@ -368,13 +373,12 @@ static int intel_crosststamp(ktime_t *device,
gpio_value |= GMAC_GPO1;
writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
- /* Poll for time sync operation done */
- ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
- (v & GMAC_INT_TSIE), 100, 10000);
-
- if (ret == -ETIMEDOUT) {
- pr_err("%s: Wait for time sync operation timeout\n", __func__);
- return ret;
+ /* Time sync done Indication - Interrupt method */
+ if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
+ stmmac_cross_ts_isr(priv),
+ HZ / 100)) {
+ priv->plat->int_snapshot_en = 0;
+ return -ETIMEDOUT;
}
num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
@@ -392,6 +396,7 @@ static int intel_crosststamp(ktime_t *device,
}
system->cycles *= intel_priv->crossts_adj;
+ priv->plat->int_snapshot_en = 0;
return 0;
}
@@ -443,6 +448,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
static int intel_mgbe_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct fwnode_handle *fwnode;
char clk_name[20];
int ret;
int i;
@@ -561,12 +567,42 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Use the last Rx queue */
plat->vlan_fail_q = plat->rx_queues_to_use - 1;
+ /* For fixed-link setup, we allow phy-mode setting */
+ fwnode = dev_fwnode(&pdev->dev);
+ if (fwnode) {
+ int phy_mode;
+
+ /* "phy-mode" setting is optional. If it is set,
+ * we allow either sgmii or 1000base-x for now.
+ */
+ phy_mode = fwnode_get_phy_mode(fwnode);
+ if (phy_mode >= 0) {
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_1000BASEX)
+ plat->phy_interface = phy_mode;
+ else
+ dev_warn(&pdev->dev, "Invalid phy-mode\n");
+ }
+ }
+
/* Intel mgbe SGMII interface uses pcs-xcps */
- if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
plat->mdio_bus_data->has_xpcs = true;
plat->mdio_bus_data->xpcs_an_inband = true;
}
+ /* For fixed-link setup, we clear xpcs_an_inband */
+ if (fwnode) {
+ struct fwnode_handle *fixed_node;
+
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (fixed_node)
+ plat->mdio_bus_data->xpcs_an_inband = false;
+
+ fwnode_handle_put(fixed_node);
+ }
+
/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
@@ -576,6 +612,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->has_crossts = true;
plat->crosststamp = intel_crosststamp;
+ plat->int_snapshot_en = 0;
/* Setup MSI vector offset specific to Intel mGbE controller */
plat->msi_mac_vec = 29;
@@ -1097,6 +1134,7 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
pcim_iounmap_regions(pdev, BIT(0));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index f7dc8458cde8..e888c8a9c830 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -27,6 +27,8 @@
#include <linux/stmmac.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
#include "stmmac_platform.h"
@@ -64,6 +66,17 @@
#define NSS_COMMON_CLK_DIV_SGMII_100 4
#define NSS_COMMON_CLK_DIV_SGMII_10 49
+#define QSGMII_PCS_ALL_CH_CTL 0x80
+#define QSGMII_PCS_CH_SPEED_FORCE BIT(1)
+#define QSGMII_PCS_CH_SPEED_10 0x0
+#define QSGMII_PCS_CH_SPEED_100 BIT(2)
+#define QSGMII_PCS_CH_SPEED_1000 BIT(3)
+#define QSGMII_PCS_CH_SPEED_MASK (QSGMII_PCS_CH_SPEED_FORCE | \
+ QSGMII_PCS_CH_SPEED_10 | \
+ QSGMII_PCS_CH_SPEED_100 | \
+ QSGMII_PCS_CH_SPEED_1000)
+#define QSGMII_PCS_CH_SPEED_SHIFT(x) ((x) * 4)
+
#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
@@ -75,11 +88,20 @@
#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2)
#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
#define QSGMII_PHY_QSGMII_EN BIT(7)
-#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12
-#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18
-#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20
-#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22
-#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28
+#define QSGMII_PHY_DEEMPHASIS_LVL_MASK GENMASK(11, 10)
+#define QSGMII_PHY_DEEMPHASIS_LVL(x) FIELD_PREP(QSGMII_PHY_DEEMPHASIS_LVL_MASK, (x))
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK GENMASK(14, 12)
+#define QSGMII_PHY_PHASE_LOOP_GAIN(x) FIELD_PREP(QSGMII_PHY_PHASE_LOOP_GAIN_MASK, (x))
+#define QSGMII_PHY_RX_DC_BIAS_MASK GENMASK(19, 18)
+#define QSGMII_PHY_RX_DC_BIAS(x) FIELD_PREP(QSGMII_PHY_RX_DC_BIAS_MASK, (x))
+#define QSGMII_PHY_RX_INPUT_EQU_MASK GENMASK(21, 20)
+#define QSGMII_PHY_RX_INPUT_EQU(x) FIELD_PREP(QSGMII_PHY_RX_INPUT_EQU_MASK, (x))
+#define QSGMII_PHY_CDR_PI_SLEW_MASK GENMASK(23, 22)
+#define QSGMII_PHY_CDR_PI_SLEW(x) FIELD_PREP(QSGMII_PHY_CDR_PI_SLEW_MASK, (x))
+#define QSGMII_PHY_TX_SLEW_MASK GENMASK(27, 26)
+#define QSGMII_PHY_TX_SLEW(x) FIELD_PREP(QSGMII_PHY_TX_SLEW_MASK, (x))
+#define QSGMII_PHY_TX_DRV_AMP_MASK GENMASK(31, 28)
+#define QSGMII_PHY_TX_DRV_AMP(x) FIELD_PREP(QSGMII_PHY_TX_DRV_AMP_MASK, (x))
struct ipq806x_gmac {
struct platform_device *pdev;
@@ -242,6 +264,113 @@ static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
ipq806x_gmac_set_speed(gmac, speed);
}
+static int
+ipq806x_gmac_configure_qsgmii_pcs_speed(struct ipq806x_gmac *gmac)
+{
+ struct platform_device *pdev = gmac->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn;
+ int link_speed;
+ int val = 0;
+ int ret;
+
+ /* Some bootloader may apply wrong configuration and cause
+ * not functioning port. If fixed link is not set,
+ * reset the force speed bit.
+ */
+ if (!of_phy_is_fixed_link(pdev->dev.of_node))
+ goto write;
+
+ dn = of_get_child_by_name(pdev->dev.of_node, "fixed-link");
+ ret = of_property_read_u32(dn, "speed", &link_speed);
+ of_node_put(dn);
+ if (ret) {
+ dev_err(dev, "found fixed-link node with no speed");
+ return ret;
+ }
+
+ val = QSGMII_PCS_CH_SPEED_FORCE;
+
+ switch (link_speed) {
+ case SPEED_1000:
+ val |= QSGMII_PCS_CH_SPEED_1000;
+ break;
+ case SPEED_100:
+ val |= QSGMII_PCS_CH_SPEED_100;
+ break;
+ case SPEED_10:
+ val |= QSGMII_PCS_CH_SPEED_10;
+ break;
+ }
+
+write:
+ regmap_update_bits(gmac->qsgmii_csr, QSGMII_PCS_ALL_CH_CTL,
+ QSGMII_PCS_CH_SPEED_MASK <<
+ QSGMII_PCS_CH_SPEED_SHIFT(gmac->id),
+ val <<
+ QSGMII_PCS_CH_SPEED_SHIFT(gmac->id));
+
+ return 0;
+}
+
+static const struct soc_device_attribute ipq806x_gmac_soc_v1[] = {
+ {
+ .revision = "1.*",
+ },
+ {
+ /* sentinel */
+ }
+};
+
+static int
+ipq806x_gmac_configure_qsgmii_params(struct ipq806x_gmac *gmac)
+{
+ struct platform_device *pdev = gmac->pdev;
+ const struct soc_device_attribute *soc;
+ struct device *dev = &pdev->dev;
+ u32 qsgmii_param;
+
+ switch (gmac->id) {
+ case 1:
+ soc = soc_device_match(ipq806x_gmac_soc_v1);
+
+ if (soc)
+ qsgmii_param = QSGMII_PHY_TX_DRV_AMP(0xc) |
+ QSGMII_PHY_TX_SLEW(0x2) |
+ QSGMII_PHY_DEEMPHASIS_LVL(0x2);
+ else
+ qsgmii_param = QSGMII_PHY_TX_DRV_AMP(0xd) |
+ QSGMII_PHY_TX_SLEW(0x0) |
+ QSGMII_PHY_DEEMPHASIS_LVL(0x0);
+
+ qsgmii_param |= QSGMII_PHY_RX_DC_BIAS(0x2);
+ break;
+ case 2:
+ case 3:
+ qsgmii_param = QSGMII_PHY_RX_DC_BIAS(0x3) |
+ QSGMII_PHY_TX_DRV_AMP(0xc);
+ break;
+ default: /* gmac 0 can't be set in SGMII mode */
+ dev_err(dev, "gmac id %d can't be in SGMII mode", gmac->id);
+ return -EINVAL;
+ }
+
+ /* Common params across all gmac id */
+ qsgmii_param |= QSGMII_PHY_CDR_EN |
+ QSGMII_PHY_RX_FRONT_EN |
+ QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+ QSGMII_PHY_TX_DRIVER_EN |
+ QSGMII_PHY_QSGMII_EN |
+ QSGMII_PHY_PHASE_LOOP_GAIN(0x4) |
+ QSGMII_PHY_RX_INPUT_EQU(0x1) |
+ QSGMII_PHY_CDR_PI_SLEW(0x2);
+
+ regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+ qsgmii_param);
+
+ return 0;
+}
+
static int ipq806x_gmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -328,17 +457,13 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
- regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
- QSGMII_PHY_CDR_EN |
- QSGMII_PHY_RX_FRONT_EN |
- QSGMII_PHY_RX_SIGNAL_DETECT_EN |
- QSGMII_PHY_TX_DRIVER_EN |
- QSGMII_PHY_QSGMII_EN |
- 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
- 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
- 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
- 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
- 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+ err = ipq806x_gmac_configure_qsgmii_params(gmac);
+ if (err)
+ goto err_remove_config_dt;
+
+ err = ipq806x_gmac_configure_qsgmii_pcs_speed(gmac);
+ if (err)
+ goto err_remove_config_dt;
}
plat_dat->has_gmac = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 6ff88df58767..d42e1afb6521 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -576,32 +576,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
}
}
- ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
- if (ret) {
- dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(plat->rmii_internal_clk);
- if (ret) {
- dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
- goto err_clk;
- }
-
return 0;
-
-err_clk:
- clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
- return ret;
-}
-
-static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
-{
- struct mediatek_dwmac_plat_data *plat = priv;
- const struct mediatek_dwmac_variant *variant = plat->variant;
-
- clk_disable_unprepare(plat->rmii_internal_clk);
- clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
}
static int mediatek_dwmac_clks_config(void *priv, bool enabled)
@@ -643,7 +618,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
plat->addr64 = priv_plat->variant->dma_bit_mask;
plat->bsp_priv = priv_plat;
plat->init = mediatek_dwmac_init;
- plat->exit = mediatek_dwmac_exit;
plat->clks_config = mediatek_dwmac_clks_config;
if (priv_plat->variant->dwmac_fix_mac_speed)
plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
@@ -712,13 +686,33 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
mediatek_dwmac_init(pdev, priv_plat);
+ ret = mediatek_dwmac_clks_config(priv_plat, true);
+ if (ret)
+ goto err_remove_config_dt;
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret) {
- stmmac_remove_config_dt(pdev, plat_dat);
- return ret;
- }
+ if (ret)
+ goto err_drv_probe;
return 0;
+
+err_drv_probe:
+ mediatek_dwmac_clks_config(priv_plat, false);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int mediatek_dwmac_remove(struct platform_device *pdev)
+{
+ struct mediatek_dwmac_plat_data *priv_plat = get_stmmac_bsp_priv(&pdev->dev);
+ int ret;
+
+ ret = stmmac_pltfr_remove(pdev);
+ mediatek_dwmac_clks_config(priv_plat, false);
+
+ return ret;
}
static const struct of_device_id mediatek_dwmac_match[] = {
@@ -733,7 +727,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
static struct platform_driver mediatek_dwmac_driver = {
.probe = mediatek_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = mediatek_dwmac_remove,
.driver = {
.name = "dwmac-mediatek",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 462ca7ed095a..71dad409f78b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -150,7 +150,8 @@
#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
GMAC_INT_PCS_ANE)
-#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
+#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \
+ GMAC_INT_TSIE)
enum dwmac4_irq_status {
time_stamp_irq = 0x00001000,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index fd41db65fe1d..d8f1fbc25bdd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -23,6 +23,7 @@
static void dwmac4_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
+ struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
@@ -58,6 +59,9 @@ static void dwmac4_core_init(struct mac_device_info *hw,
value |= GMAC_INT_FPE_EN;
writel(value, ioaddr + GMAC_INT_EN);
+
+ if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
+ init_waitqueue_head(&priv->tstamp_busy_wait);
}
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -219,6 +223,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
if (queue == 0 || queue == 4) {
value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+ } else if (queue > 4) {
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
} else {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a57b0fa815ab..ea4910ae0921 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -197,7 +197,7 @@ static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
MMC_CNTRL, value);
}
-/* To mask all all interrupts.*/
+/* To mask all interrupts.*/
static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 8ad900949dc8..2b5b17d8b8a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
if (priv->extend_desc)
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
@@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 57970ae2178d..bdbf86cb102a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -188,6 +188,18 @@ struct stmmac_rfs_entry {
int tc;
};
+struct stmmac_dma_conf {
+ unsigned int dma_buf_sz;
+
+ /* RX Queue */
+ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+ unsigned int dma_rx_size;
+
+ /* TX Queue */
+ struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ unsigned int dma_tx_size;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
@@ -201,7 +213,6 @@ struct stmmac_priv {
int sph_cap;
u32 sarc_type;
- unsigned int dma_buf_sz;
unsigned int rx_copybreak;
u32 rx_riwt[MTL_MAX_TX_QUEUES];
int hwts_rx_en;
@@ -213,13 +224,7 @@ struct stmmac_priv {
int (*hwif_quirks)(struct stmmac_priv *priv);
struct mutex lock;
- /* RX Queue */
- struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
- unsigned int dma_rx_size;
-
- /* TX Queue */
- struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
- unsigned int dma_tx_size;
+ struct stmmac_dma_conf dma_conf;
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
@@ -266,6 +271,7 @@ struct stmmac_priv {
rwlock_t ptp_lock;
/* Protects auxiliary snapshot registers from concurrent access. */
struct mutex aux_ts_lock;
+ wait_queue_head_t tstamp_busy_wait;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index abfb3cd5958d..d6a44d53fe08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -485,8 +485,8 @@ static void stmmac_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = DMA_MAX_RX_SIZE;
ring->tx_max_pending = DMA_MAX_TX_SIZE;
- ring->rx_pending = priv->dma_rx_size;
- ring->tx_pending = priv->dma_tx_size;
+ ring->rx_pending = priv->dma_conf.dma_rx_size;
+ ring->tx_pending = priv->dma_conf.dma_tx_size;
}
static int stmmac_set_ringparam(struct net_device *netdev,
@@ -803,14 +803,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
netdev_warn(priv->dev,
"Setting EEE tx-lpi is not supported\n");
- if (priv->hw->xpcs) {
- ret = xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- edata->eee_enabled);
- if (ret)
- return ret;
- }
-
if (!edata->eee_enabled)
stmmac_disable_eee_mode(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 92d32940aff0..764832f4dae1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -179,6 +179,11 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
u64 ptp_time;
int i;
+ if (priv->plat->int_snapshot_en) {
+ wake_up(&priv->tstamp_busy_wait);
+ return;
+ }
+
tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE;
if (!tsync_int)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d1a7cf4567bc..070b5ef165eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -74,8 +74,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
-#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
+#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -130,6 +130,9 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_queues_param(struct stmmac_priv *priv);
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
@@ -231,7 +234,7 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
/* synchronize_rcu() needed for pending XDP buffers to drain */
for (queue = 0; queue < rx_queues_cnt; queue++) {
- rx_q = &priv->rx_queue[queue];
+ rx_q = &priv->dma_conf.rx_queue[queue];
if (rx_q->xsk_pool) {
synchronize_rcu();
break;
@@ -357,13 +360,13 @@ static void print_pkt(unsigned char *buf, int len)
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
u32 avail;
if (tx_q->dirty_tx > tx_q->cur_tx)
avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else
- avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
+ avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail;
}
@@ -375,13 +378,13 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
*/
static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
u32 dirty;
if (rx_q->dirty_rx <= rx_q->cur_rx)
dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
- dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
+ dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
return dirty;
}
@@ -409,7 +412,7 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
/* check if all TX queues have the work finished */
for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
return -EBUSY; /* still unfinished work */
@@ -834,19 +837,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
struct timespec64 now;
u32 sec_inc = 0;
u64 temp = 0;
- int ret;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0) {
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
-
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
@@ -1128,18 +1122,20 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct device_node *node;
+ struct fwnode_handle *fwnode;
int ret;
- node = priv->plat->phylink_node;
+ fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
- if (node)
- ret = phylink_of_phy_connect(priv->phylink, node, 0);
+ if (fwnode)
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
/* Some DT bindings do not set-up the PHY handle. Let's try to
* manually parse it
*/
- if (!node || ret) {
+ if (!fwnode || ret) {
int addr = priv->plat->phy_addr;
struct phy_device *phydev;
@@ -1227,7 +1223,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
return 0;
}
-static void stmmac_display_rx_rings(struct stmmac_priv *priv)
+static void stmmac_display_rx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int desc_size;
@@ -1236,7 +1233,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
/* Display RX rings */
for (queue = 0; queue < rx_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
pr_info("\tRX Queue %u rings\n", queue);
@@ -1249,12 +1246,13 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
}
/* Display RX ring */
- stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
}
-static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+static void stmmac_display_tx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
unsigned int desc_size;
@@ -1263,7 +1261,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
/* Display TX rings */
for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
pr_info("\tTX Queue %d rings\n", queue);
@@ -1278,18 +1276,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
+ stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
tx_q->dma_tx_phy, desc_size);
}
}
-static void stmmac_display_rings(struct stmmac_priv *priv)
+static void stmmac_display_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* Display RX ring */
- stmmac_display_rx_rings(priv);
+ stmmac_display_rx_rings(priv, dma_conf);
/* Display TX ring */
- stmmac_display_tx_rings(priv);
+ stmmac_display_tx_rings(priv, dma_conf);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1313,44 +1312,50 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
/**
* stmmac_clear_rx_descriptors - clear RX descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* Description: this function is called to clear the RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
/* Clear the RX descriptors */
- for (i = 0; i < priv->dma_rx_size; i++)
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == priv->dma_rx_size - 1),
- priv->dma_buf_sz);
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == priv->dma_rx_size - 1),
- priv->dma_buf_sz);
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
}
/**
* stmmac_clear_tx_descriptors - clear tx descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index.
* Description: this function is called to clear the TX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
/* Clear the TX descriptors */
- for (i = 0; i < priv->dma_tx_size; i++) {
- int last = (i == (priv->dma_tx_size - 1));
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
+ int last = (i == (dma_conf->dma_tx_size - 1));
struct dma_desc *p;
if (priv->extend_desc)
@@ -1367,10 +1372,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
/**
* stmmac_clear_descriptors - clear descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* Description: this function is called to clear the TX and RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+static void stmmac_clear_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
@@ -1378,16 +1385,17 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
/* Clear the RX descriptors */
for (queue = 0; queue < rx_queue_cnt; queue++)
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
/* Clear the TX descriptors */
for (queue = 0; queue < tx_queue_cnt; queue++)
- stmmac_clear_tx_descriptors(priv, queue);
+ stmmac_clear_tx_descriptors(priv, dma_conf, queue);
}
/**
* stmmac_init_rx_buffers - init the RX descriptor buffer.
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @p: descriptor pointer
* @i: descriptor index
* @flags: gfp flag
@@ -1395,10 +1403,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
* Description: this function is called to allocate a receive buffer, perform
* the DMA mapping and init the descriptor.
*/
-static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ struct dma_desc *p,
int i, gfp_t flags, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
@@ -1427,7 +1437,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
return 0;
@@ -1436,12 +1446,13 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
/**
* stmmac_free_rx_buffer - free RX dma buffers
* @priv: private structure
- * @queue: RX queue index
+ * @rx_q: RX queue
* @i: buffer index.
*/
-static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
+ struct stmmac_rx_queue *rx_q,
+ int i)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page)
@@ -1456,12 +1467,15 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/**
* stmmac_free_tx_buffer - free RX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* @i: buffer index.
*/
-static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, int i)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
if (tx_q->tx_skbuff_dma[i].buf &&
tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
@@ -1500,23 +1514,28 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/**
* dma_free_rx_skbufs - free RX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_rx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++)
- stmmac_free_rx_buffer(priv, queue, i);
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
+ stmmac_free_rx_buffer(priv, rx_q, i);
}
-static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
- gfp_t flags)
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct dma_desc *p;
int ret;
@@ -1525,7 +1544,7 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
else
p = rx_q->dma_rx + i;
- ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
queue);
if (ret)
return ret;
@@ -1539,14 +1558,17 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
/**
* dma_free_rx_xskbufs - free RX dma buffers from XSK pool
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (!buf->xdp)
@@ -1557,12 +1579,14 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
}
}
-static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr;
struct dma_desc *p;
@@ -1597,22 +1621,25 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q
/**
* __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int ret;
netif_dbg(priv, probe, priv->dev,
"(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy);
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
@@ -1639,32 +1666,31 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f
/* RX XDP ZC buffer pool may not be populated, e.g.
* xdpsock TX-only.
*/
- stmmac_alloc_rx_buffers_zc(priv, queue);
+ stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
} else {
- ret = stmmac_alloc_rx_buffers(priv, queue, flags);
+ ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
if (ret < 0)
return -ENOMEM;
}
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
-
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx,
rx_q->dma_rx_phy,
- priv->dma_rx_size, 1);
+ dma_conf->dma_rx_size, 1);
else
stmmac_mode_init(priv, rx_q->dma_rx,
rx_q->dma_rx_phy,
- priv->dma_rx_size, 0);
+ dma_conf->dma_rx_size, 0);
}
return 0;
}
-static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_rx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
@@ -1676,7 +1702,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
"SKB addresses:\nskb\t\tskb data\tdma data\n");
for (queue = 0; queue < rx_count; queue++) {
- ret = __init_dma_rx_desc_rings(priv, queue, flags);
+ ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
if (ret)
goto err_init_rx_buffers;
}
@@ -1685,12 +1711,12 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
err_init_rx_buffers:
while (queue >= 0) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
if (rx_q->xsk_pool)
- dma_free_rx_xskbufs(priv, queue);
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
else
- dma_free_rx_skbufs(priv, queue);
+ dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
@@ -1704,14 +1730,17 @@ err_init_rx_buffers:
/**
* __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
* @priv: driver private structure
- * @queue : TX queue index
+ * @dma_conf: structure to take the dma data
+ * @queue: TX queue index
* Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
netif_dbg(priv, probe, priv->dev,
@@ -1723,16 +1752,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy,
- priv->dma_tx_size, 1);
+ dma_conf->dma_tx_size, 1);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy,
- priv->dma_tx_size, 0);
+ dma_conf->dma_tx_size, 0);
}
tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
- for (i = 0; i < priv->dma_tx_size; i++) {
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1751,16 +1780,11 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
tx_q->tx_skbuff[i] = NULL;
}
- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
-
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
-
return 0;
}
-static int init_dma_tx_desc_rings(struct net_device *dev)
+static int init_dma_tx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 tx_queue_cnt;
@@ -1769,7 +1793,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_queue_cnt = priv->plat->tx_queues_to_use;
for (queue = 0; queue < tx_queue_cnt; queue++)
- __init_dma_tx_desc_rings(priv, queue);
+ __init_dma_tx_desc_rings(priv, dma_conf, queue);
return 0;
}
@@ -1777,26 +1801,29 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
+ * @dma_conf: structure to take the dma data
* @flags: gfp flag.
* Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- ret = init_dma_rx_desc_rings(dev, flags);
+ ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
if (ret)
return ret;
- ret = init_dma_tx_desc_rings(dev);
+ ret = init_dma_tx_desc_rings(dev, dma_conf);
- stmmac_clear_descriptors(priv);
+ stmmac_clear_descriptors(priv, dma_conf);
if (netif_msg_hw(priv))
- stmmac_display_rings(priv);
+ stmmac_display_rings(priv, dma_conf);
return ret;
}
@@ -1804,17 +1831,20 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
/**
* dma_free_tx_skbufs - free TX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
*/
-static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_tx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
tx_q->xsk_frames_done = 0;
- for (i = 0; i < priv->dma_tx_size; i++)
- stmmac_free_tx_buffer(priv, queue, i);
+ for (i = 0; i < dma_conf->dma_tx_size; i++)
+ stmmac_free_tx_buffer(priv, dma_conf, queue, i);
if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
@@ -1833,34 +1863,37 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
u32 queue;
for (queue = 0; queue < tx_queue_cnt; queue++)
- dma_free_tx_skbufs(priv, queue);
+ dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
}
/**
* __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
/* Release the DMA RX socket buffers */
if (rx_q->xsk_pool)
- dma_free_rx_xskbufs(priv, queue);
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
else
- dma_free_rx_skbufs(priv, queue);
+ dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
@@ -1872,29 +1905,33 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
page_pool_destroy(rx_q->page_pool);
}
-static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
/* Free RX queue resources */
for (queue = 0; queue < rx_count; queue++)
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, dma_conf, queue);
}
/**
* __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
*/
-static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size;
void *addr;
/* Release the DMA TX socket buffers */
- dma_free_tx_skbufs(priv, queue);
+ dma_free_tx_skbufs(priv, dma_conf, queue);
if (priv->extend_desc) {
size = sizeof(struct dma_extended_desc);
@@ -1907,7 +1944,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
addr = tx_q->dma_tx;
}
- size *= priv->dma_tx_size;
+ size *= dma_conf->dma_tx_size;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
@@ -1915,28 +1952,32 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
kfree(tx_q->tx_skbuff);
}
-static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
/* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++)
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, dma_conf, queue);
}
/**
* __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
@@ -1948,8 +1989,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- pp_params.pool_size = priv->dma_rx_size;
- num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+ pp_params.pool_size = dma_conf->dma_rx_size;
+ num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
@@ -1964,7 +2005,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return ret;
}
- rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
sizeof(*rx_q->buf_pool),
GFP_KERNEL);
if (!rx_q->buf_pool)
@@ -1972,7 +2013,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
+ dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
@@ -1981,7 +2022,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
} else {
rx_q->dma_rx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
+ dma_conf->dma_rx_size *
sizeof(struct dma_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
@@ -2006,7 +2047,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0;
}
-static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
@@ -2014,7 +2056,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) {
- ret = __alloc_dma_rx_desc_resources(priv, queue);
+ ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
if (ret)
goto err_dma;
}
@@ -2022,7 +2064,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
return 0;
err_dma:
- free_dma_rx_desc_resources(priv);
+ free_dma_rx_desc_resources(priv, dma_conf);
return ret;
}
@@ -2030,28 +2072,31 @@ err_dma:
/**
* __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size;
void *addr;
tx_q->queue_index = queue;
tx_q->priv_data = priv;
- tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
+ tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
return -ENOMEM;
- tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
+ tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
sizeof(struct sk_buff *),
GFP_KERNEL);
if (!tx_q->tx_skbuff)
@@ -2064,7 +2109,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
else
size = sizeof(struct dma_desc);
- size *= priv->dma_tx_size;
+ size *= dma_conf->dma_tx_size;
addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
@@ -2081,7 +2126,8 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0;
}
-static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
@@ -2089,7 +2135,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
- ret = __alloc_dma_tx_desc_resources(priv, queue);
+ ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
if (ret)
goto err_dma;
}
@@ -2097,27 +2143,29 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
return 0;
err_dma:
- free_dma_tx_desc_resources(priv);
+ free_dma_tx_desc_resources(priv, dma_conf);
return ret;
}
/**
* alloc_dma_desc_resources - alloc TX/RX resources.
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* RX Allocation */
- int ret = alloc_dma_rx_desc_resources(priv);
+ int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
if (ret)
return ret;
- ret = alloc_dma_tx_desc_resources(priv);
+ ret = alloc_dma_tx_desc_resources(priv, dma_conf);
return ret;
}
@@ -2125,16 +2173,18 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
/**
* free_dma_desc_resources - free dma desc resources
* @priv: private structure
+ * @dma_conf: structure to take the dma data
*/
-static void free_dma_desc_resources(struct stmmac_priv *priv)
+static void free_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* Release the DMA TX socket buffers */
- free_dma_tx_desc_resources(priv);
+ free_dma_tx_desc_resources(priv, dma_conf);
/* Release the DMA RX socket buffers later
* to ensure all pending XDP_TX buffers are returned.
*/
- free_dma_rx_desc_resources(priv);
+ free_dma_rx_desc_resources(priv, dma_conf);
}
/**
@@ -2308,7 +2358,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/* configure all channels */
for (chan = 0; chan < rx_channels_count; chan++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
u32 buf_size;
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
@@ -2323,7 +2373,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
chan);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
chan);
}
}
@@ -2339,7 +2389,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2414,7 +2464,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
@@ -2455,7 +2505,7 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
*/
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
@@ -2468,7 +2518,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
entry = tx_q->dirty_tx;
/* Try to clean all TX complete frame in 1 shot */
- while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
+ while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
struct xdp_frame *xdpf;
struct sk_buff *skb;
struct dma_desc *p;
@@ -2570,7 +2620,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
stmmac_release_tx_desc(priv, p, priv->mode);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
}
tx_q->dirty_tx = entry;
@@ -2635,17 +2685,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
*/
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan);
- dma_free_tx_skbufs(priv, chan);
- stmmac_clear_tx_descriptors(priv, chan);
- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
+ dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
+ stmmac_reset_tx_queue(priv, chan);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
stmmac_start_tx_dma(priv, chan);
@@ -2705,8 +2752,8 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
{
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan, dir);
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
struct stmmac_channel *ch = &priv->channel[chan];
struct napi_struct *rx_napi;
struct napi_struct *tx_napi;
@@ -2872,7 +2919,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
- rx_q = &priv->rx_queue[chan];
+ rx_q = &priv->dma_conf.rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
@@ -2886,7 +2933,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_channels_count; chan++) {
- tx_q = &priv->tx_queue[chan];
+ tx_q = &priv->dma_conf.tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
@@ -2901,7 +2948,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
@@ -2951,7 +2998,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
u32 chan;
for (chan = 0; chan < tx_channel_count; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
@@ -2973,12 +3020,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
/* set TX ring length */
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_set_tx_ring_len(priv, priv->ioaddr,
- (priv->dma_tx_size - 1), chan);
+ (priv->dma_conf.dma_tx_size - 1), chan);
/* set RX ring length */
for (chan = 0; chan < rx_channels_count; chan++)
stmmac_set_rx_ring_len(priv, priv->ioaddr,
- (priv->dma_rx_size - 1), chan);
+ (priv->dma_conf.dma_rx_size - 1), chan);
}
/**
@@ -3270,6 +3317,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv);
+ if (ptp_register) {
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ }
+
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_info(priv->dev, "PTP not supported by HW\n");
@@ -3305,7 +3360,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Enable TSO */
if (priv->tso) {
for (chan = 0; chan < tx_cnt; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
/* TSO and TBS cannot co-exist */
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3327,7 +3382,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* TBS */
for (chan = 0; chan < tx_cnt; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
@@ -3371,7 +3426,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->tx_irq[j] > 0) {
irq_set_affinity_hint(priv->tx_irq[j], NULL);
- free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
+ free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
}
}
irq_idx = priv->plat->rx_queues_to_use;
@@ -3380,7 +3435,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->rx_irq[j] > 0) {
irq_set_affinity_hint(priv->rx_irq[j], NULL);
- free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
+ free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
}
}
@@ -3515,7 +3570,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
ret = request_irq(priv->rx_irq[i],
stmmac_msi_intr_rx,
- 0, int_name, &priv->rx_queue[i]);
+ 0, int_name, &priv->dma_conf.rx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc rx-%d MSI %d (error: %d)\n",
@@ -3540,7 +3595,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
ret = request_irq(priv->tx_irq[i],
stmmac_msi_intr_tx,
- 0, int_name, &priv->tx_queue[i]);
+ 0, int_name, &priv->dma_conf.tx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc tx-%d MSI %d (error: %d)\n",
@@ -3627,19 +3682,93 @@ static int stmmac_request_irq(struct net_device *dev)
}
/**
- * stmmac_open - open entry point of the driver
+ * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
+ * @priv: driver private structure
+ * @mtu: MTU to setup the dma queue and buf with
+ * Description: Allocate and generate a dma_conf based on the provided MTU.
+ * Allocate the Tx/Rx DMA queue and init them.
+ * Return value:
+ * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
+ */
+static struct stmmac_dma_conf *
+stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
+{
+ struct stmmac_dma_conf *dma_conf;
+ int chan, bfsize, ret;
+
+ dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
+ if (!dma_conf) {
+ netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bfsize = stmmac_set_16kib_bfsize(priv, mtu);
+ if (bfsize < 0)
+ bfsize = 0;
+
+ if (bfsize < BUF_SIZE_16KiB)
+ bfsize = stmmac_set_bfsize(mtu, 0);
+
+ dma_conf->dma_buf_sz = bfsize;
+ /* Chose the tx/rx size from the already defined one in the
+ * priv struct. (if defined)
+ */
+ dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
+ dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
+
+ if (!dma_conf->dma_tx_size)
+ dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!dma_conf->dma_rx_size)
+ dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ }
+
+ ret = alloc_dma_desc_resources(priv, dma_conf);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto alloc_error;
+ }
+
+ ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ return dma_conf;
+
+init_error:
+ free_dma_desc_resources(priv, dma_conf);
+alloc_error:
+ kfree(dma_conf);
+ return ERR_PTR(ret);
+}
+
+/**
+ * __stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
+ * @dma_conf : structure to take the dma data
* Description:
* This function is the open entry point of the driver.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_open(struct net_device *dev)
+static int __stmmac_open(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface;
- int bfsize = 0;
u32 chan;
int ret;
@@ -3664,45 +3793,12 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
- if (bfsize < 0)
- bfsize = 0;
-
- if (bfsize < BUF_SIZE_16KiB)
- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
-
- priv->dma_buf_sz = bfsize;
- buf_sz = bfsize;
-
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
- if (!priv->dma_tx_size)
- priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
- if (!priv->dma_rx_size)
- priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
-
- /* Earlier check for TBS */
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
-
- /* Setup per-TXQ tbs flag before TX descriptor alloc */
- tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
- }
+ buf_sz = dma_conf->dma_buf_sz;
+ memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
- ret = alloc_dma_desc_resources(priv);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
- __func__);
- goto dma_desc_error;
- }
-
- ret = init_dma_desc_rings(dev, GFP_KERNEL);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
- __func__);
- goto init_error;
- }
+ stmmac_reset_queues_param(priv);
ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
@@ -3730,18 +3826,32 @@ irq_error:
phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
stmmac_hw_teardown(dev);
init_error:
- free_dma_desc_resources(priv);
-dma_desc_error:
+ free_dma_desc_resources(priv, &priv->dma_conf);
phylink_disconnect_phy(priv->phylink);
init_phy_error:
pm_runtime_put(priv->device);
return ret;
}
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_dma_conf *dma_conf;
+ int ret;
+
+ dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
+ if (IS_ERR(dma_conf))
+ return PTR_ERR(dma_conf);
+
+ ret = __stmmac_open(dev, dma_conf);
+ kfree(dma_conf);
+ return ret;
+}
+
static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
@@ -3763,8 +3873,6 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- netif_tx_disable(dev);
-
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@@ -3774,7 +3882,9 @@ static int stmmac_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ netif_tx_disable(dev);
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
@@ -3788,7 +3898,7 @@ static int stmmac_release(struct net_device *dev)
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
@@ -3832,7 +3942,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
return false;
stmmac_set_tx_owner(priv, p);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
return true;
}
@@ -3850,7 +3960,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct dma_desc *desc;
u32 buff_size;
int tmp_len;
@@ -3861,7 +3971,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
dma_addr_t curr_addr;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- priv->dma_tx_size);
+ priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3889,7 +3999,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
int desc_size;
if (likely(priv->extend_desc))
@@ -3951,7 +4061,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t des;
int i;
- tx_q = &priv->tx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx;
/* Compute header lengths */
@@ -3959,7 +4069,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
hdr = sizeof(struct udphdr);
} else {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
hdr = tcp_hdrlen(skb);
}
@@ -3991,7 +4101,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- priv->dma_tx_size);
+ priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
@@ -4103,7 +4213,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
@@ -4191,7 +4301,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int entry, first_tx;
dma_addr_t des;
- tx_q = &priv->tx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
@@ -4254,7 +4364,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
@@ -4325,7 +4435,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
@@ -4440,7 +4550,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
@@ -4494,7 +4604,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
@@ -4522,12 +4632,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
/* First descriptor, not last descriptor and not split header */
if (status & rx_not_ls)
- return priv->dma_buf_sz;
+ return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe);
/* First descriptor and last descriptor and not split header */
- return min_t(unsigned int, priv->dma_buf_sz, plen);
+ return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
}
static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
@@ -4543,7 +4653,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
/* Not last descriptor */
if (status & rx_not_ls)
- return priv->dma_buf_sz;
+ return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe);
@@ -4554,7 +4664,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -4617,7 +4727,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
return STMMAC_XDP_TX;
@@ -4791,7 +4901,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int entry = rx_q->dirty_rx;
struct dma_desc *rx_desc = NULL;
bool ret = true;
@@ -4834,7 +4944,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
dma_wmb();
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
if (rx_desc) {
@@ -4849,7 +4959,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
@@ -4871,7 +4981,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
@@ -4918,7 +5028,7 @@ read_again:
/* Prefetch the next RX descriptor */
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- priv->dma_rx_size);
+ priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -5039,7 +5149,7 @@ read_again:
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum;
@@ -5052,7 +5162,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int buf_sz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -5066,7 +5176,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
@@ -5110,7 +5220,7 @@ read_again:
break;
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- priv->dma_rx_size);
+ priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -5245,7 +5355,7 @@ read_again:
buf1_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
- priv->dma_buf_sz);
+ priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page);
@@ -5257,7 +5367,7 @@ read_again:
buf2_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
- priv->dma_buf_sz);
+ priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
@@ -5441,18 +5551,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
+ struct stmmac_dma_conf *dma_conf;
const int mtu = new_mtu;
+ int ret;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
txfifosz /= priv->plat->tx_queues_to_use;
- if (netif_running(dev)) {
- netdev_err(priv->dev, "must be stopped to change its MTU\n");
- return -EBUSY;
- }
-
if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
return -EINVAL;
@@ -5464,8 +5571,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
- dev->mtu = mtu;
+ if (netif_running(dev)) {
+ netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
+ /* Try to allocate the new DMA conf with the new mtu */
+ dma_conf = stmmac_setup_dma_desc(priv, mtu);
+ if (IS_ERR(dma_conf)) {
+ netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
+ mtu);
+ return PTR_ERR(dma_conf);
+ }
+
+ stmmac_release(dev);
+
+ ret = __stmmac_open(dev, dma_conf);
+ kfree(dma_conf);
+ if (ret) {
+ netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
+ return ret;
+ }
+ stmmac_set_rx_mode(dev);
+ }
+
+ dev->mtu = mtu;
netdev_update_features(dev);
return 0;
@@ -5699,11 +5827,13 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
int chan = tx_q->queue_index;
struct stmmac_priv *priv;
int status;
- priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
+ dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
@@ -5729,10 +5859,12 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
{
struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
int chan = rx_q->queue_index;
struct stmmac_priv *priv;
- priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
+ dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
@@ -5763,10 +5895,10 @@ static void stmmac_poll_controller(struct net_device *dev)
if (priv->plat->multi_msi_en) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
- stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
+ stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
+ stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
} else {
disable_irq(dev->irq);
stmmac_interrupt(dev->irq, dev);
@@ -5945,34 +6077,34 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
return 0;
for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
seq_printf(seq, "RX Queue %d:\n", queue);
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_erx,
- priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
+ priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
} else {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_rx,
- priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
+ priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
}
}
for (queue = 0; queue < tx_count; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
seq_printf(seq, "TX Queue %d:\n", queue);
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
- priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
+ priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
- priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
+ priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
}
}
@@ -6306,31 +6438,32 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_rx_dma(priv, queue);
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
}
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
u32 buf_size;
int ret;
- ret = __alloc_dma_rx_desc_resources(priv, queue);
+ ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc RX desc.\n");
return;
}
- ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
+ ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
if (ret) {
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init RX desc.\n");
return;
}
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_reset_rx_queue(priv, queue);
+ stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, rx_q->queue_index);
@@ -6347,7 +6480,7 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
rx_q->queue_index);
}
@@ -6368,30 +6501,31 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_tx_dma(priv, queue);
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
}
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
int ret;
- ret = __alloc_dma_tx_desc_resources(priv, queue);
+ ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc TX desc.\n");
return;
}
- ret = __init_dma_tx_desc_rings(priv, queue);
+ ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
if (ret) {
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init TX desc.\n");
return;
}
- stmmac_clear_tx_descriptors(priv, queue);
+ stmmac_reset_tx_queue(priv, queue);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, tx_q->queue_index);
@@ -6419,7 +6553,7 @@ void stmmac_xdp_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
@@ -6428,7 +6562,7 @@ void stmmac_xdp_release(struct net_device *dev)
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
@@ -6453,14 +6587,14 @@ int stmmac_xdp_open(struct net_device *dev)
u32 chan;
int ret;
- ret = alloc_dma_desc_resources(priv);
+ ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors allocation failed\n",
__func__);
goto dma_desc_error;
}
- ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors initialization failed\n",
__func__);
@@ -6478,7 +6612,7 @@ int stmmac_xdp_open(struct net_device *dev)
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
- rx_q = &priv->rx_queue[chan];
+ rx_q = &priv->dma_conf.rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
@@ -6496,7 +6630,7 @@ int stmmac_xdp_open(struct net_device *dev)
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
rx_q->queue_index);
}
@@ -6505,7 +6639,7 @@ int stmmac_xdp_open(struct net_device *dev)
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_cnt; chan++) {
- tx_q = &priv->tx_queue[chan];
+ tx_q = &priv->dma_conf.tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
@@ -6538,11 +6672,11 @@ int stmmac_xdp_open(struct net_device *dev)
irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
stmmac_hw_teardown(dev);
init_error:
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
return ret;
}
@@ -6565,8 +6699,8 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
- rx_q = &priv->rx_queue[queue];
- tx_q = &priv->tx_queue[queue];
+ rx_q = &priv->dma_conf.rx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool)
@@ -6821,8 +6955,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
if (netif_running(dev))
stmmac_release(dev);
- priv->dma_rx_size = rx_size;
- priv->dma_tx_size = tx_size;
+ priv->dma_conf.dma_rx_size = rx_size;
+ priv->dma_conf.dma_tx_size = tx_size;
if (netif_running(dev))
ret = stmmac_open(dev);
@@ -7213,8 +7347,6 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
pm_runtime_get_sync(dev);
- pm_runtime_disable(dev);
- pm_runtime_put_noidle(dev);
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
@@ -7241,6 +7373,9 @@ int stmmac_dvr_remove(struct device *dev)
mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
@@ -7268,7 +7403,7 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
@@ -7317,6 +7452,25 @@ int stmmac_suspend(struct device *dev)
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+}
+
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
/**
* stmmac_reset_queues_param - reset queue parameters
* @priv: device pointer
@@ -7327,22 +7481,11 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
- for (queue = 0; queue < rx_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
- }
+ for (queue = 0; queue < rx_cnt; queue++)
+ stmmac_reset_rx_queue(priv, queue);
- for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
-
- tx_q->cur_tx = 0;
- tx_q->dirty_tx = 0;
- tx_q->mss = 0;
-
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
- }
+ for (queue = 0; queue < tx_cnt; queue++)
+ stmmac_reset_tx_queue(priv, queue);
}
/**
@@ -7402,7 +7545,7 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv);
stmmac_free_tx_skbufs(priv);
- stmmac_clear_descriptors(priv);
+ stmmac_clear_descriptors(priv, &priv->dma_conf);
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 03d3d1f7aa4b..5f177ea80725 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -434,9 +434,11 @@ int stmmac_mdio_register(struct net_device *ndev)
int err = 0;
struct mii_bus *new_bus;
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
struct device *dev = ndev->dev.parent;
+ struct fwnode_handle *fixed_node;
int addr, found, max_addr;
if (!mdio_bus_data)
@@ -490,6 +492,18 @@ int stmmac_mdio_register(struct net_device *ndev)
if (priv->plat->has_xgmac)
stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
+ /* If fixed-link is set, skip PHY scanning */
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
+
+ if (fwnode) {
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (fixed_node) {
+ fwnode_handle_put(fixed_node);
+ goto bus_register_done;
+ }
+ }
+
if (priv->plat->phy_node || mdio_node)
goto bus_register_done;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 11e1055e8260..9f5cac4000da 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -815,7 +815,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (ret)
return ret;
- stmmac_init_tstamp_counter(priv, priv->systime_flags);
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0) {
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index e45fb191d8e6..4d11980dcd64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -175,11 +175,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
struct stmmac_priv *priv =
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
void __iomem *ptpaddr = priv->ptpaddr;
- void __iomem *ioaddr = priv->hw->pcsr;
struct stmmac_pps_cfg *cfg;
- u32 intr_value, acr_value;
int ret = -EOPNOTSUPP;
unsigned long flags;
+ u32 acr_value;
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
@@ -213,19 +212,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n",
priv->plat->ext_snapshot_num >>
PTP_ACR_ATSEN_SHIFT);
- /* Enable Timestamp Interrupt */
- intr_value = readl(ioaddr + GMAC_INT_EN);
- intr_value |= GMAC_INT_TSIE;
- writel(intr_value, ioaddr + GMAC_INT_EN);
-
} else {
netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n",
priv->plat->ext_snapshot_num >>
PTP_ACR_ATSEN_SHIFT);
- /* Disable Timestamp Interrupt */
- intr_value = readl(ioaddr + GMAC_INT_EN);
- intr_value &= ~GMAC_INT_TSIE;
- writel(intr_value, ioaddr + GMAC_INT_EN);
}
writel(acr_value, ptpaddr + PTP_ACR);
mutex_unlock(&priv->aux_ts_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 2fc51dc5eb0b..49af7e78b7f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
struct stmmac_channel *ch = &priv->channel[i];
u32 tail;
- tail = priv->rx_queue[i].dma_rx_phy +
- (priv->dma_rx_size * sizeof(struct dma_desc));
+ tail = priv->dma_conf.rx_queue[i].dma_rx_phy +
+ (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
stmmac_start_rx(priv, priv->ioaddr, i);
@@ -1680,7 +1680,7 @@ cleanup:
static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
{
struct stmmac_packet_attrs attr = { };
- int size = priv->dma_buf_sz;
+ int size = priv->dma_conf.dma_buf_sz;
attr.dst = priv->dev->dev_addr;
attr.max_size = size - ETH_FCS_LEN;
@@ -1763,7 +1763,7 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
/* Find first TBS enabled Queue, if any */
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL)
break;
if (i >= priv->plat->tx_queues_to_use)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index d61766eeac6d..773e415cc2de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1091,13 +1091,13 @@ static int tc_setup_etf(struct stmmac_priv *priv,
return -EOPNOTSUPP;
if (qopt->queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
- if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
+ if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
return -EINVAL;
if (qopt->enable)
- priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
+ priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
else
- priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
+ priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
netdev_info(priv->dev, "%s ETF for Queue %d\n",
qopt->enable ? "enabled" : "disabled", qopt->queue);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 435dc00d04e5..0b08b0e085e8 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -29,7 +29,7 @@
* -- on page reclamation, the driver swaps the page with a spare page.
* if that page is still in use, it frees its reference to that page,
* and allocates a new page for use. otherwise, it just recycles the
- * the page.
+ * page.
*
* NOTE: cassini can parse the header. however, it's not worth it
* as long as the network stack requires a header copy.
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index ae5f05f03f88..2d91f4936d52 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -764,7 +764,7 @@
* PAUSE thresholds defined in terms of FIFO occupancy and may be translated
* into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
* when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
- * value is is 0x6F.
+ * value is 0x6F.
* DEFAULT: 0x00078
*/
#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 6b59b14e74b1..0cd8493b810f 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -335,7 +335,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
port->tsolen = 0;
/* Mark the port as belonging to ldmvsw which directs the
- * the common code to use the net_device in the vnet_port
+ * common code to use the net_device in the vnet_port
* rather than the net_device in the vnet (which is used
* by sunvnet). This bit is used by the VNET_PORT_TO_NET_DEVICE
* macro.
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 45bd89153de2..a14591b41acb 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1088,7 +1088,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
/* netif_stop_queue() must be done before checking
- * checking tx index in TX_BUFFS_AVAIL() below, because
+ * tx index in TX_BUFFS_AVAIL() below, because
* in gem_tx(), we update tx_old before checking for
* netif_queue_stopped().
*/
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 77e5dffb558f..8594ee839628 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -545,43 +545,24 @@ static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
{
- printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
- if (hp->tcvr_type == external)
- printk("external ");
- else
- printk("internal ");
- printk("transceiver at ");
hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
- if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
- if (hp->sw_lpa & LPA_100FULL)
- printk("100Mb/s, Full Duplex.\n");
- else
- printk("100Mb/s, Half Duplex.\n");
- } else {
- if (hp->sw_lpa & LPA_10FULL)
- printk("10Mb/s, Full Duplex.\n");
- else
- printk("10Mb/s, Half Duplex.\n");
- }
+
+ netdev_info(hp->dev,
+ "Link is up using %s transceiver at %dMb/s, %s Duplex.\n",
+ hp->tcvr_type == external ? "external" : "internal",
+ hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10,
+ hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");
}
static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
{
- printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
- if (hp->tcvr_type == external)
- printk("external ");
- else
- printk("internal ");
- printk("transceiver at ");
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
- if (hp->sw_bmcr & BMCR_SPEED100)
- printk("100Mb/s, ");
- else
- printk("10Mb/s, ");
- if (hp->sw_bmcr & BMCR_FULLDPLX)
- printk("Full Duplex.\n");
- else
- printk("Half Duplex.\n");
+
+ netdev_info(hp->dev,
+ "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n",
+ hp->tcvr_type == external ? "external" : "internal",
+ hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10,
+ hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");
}
static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 3773ce5e12cc..546206640492 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -494,7 +494,7 @@ static int spl2sw_probe(struct platform_device *pdev)
/* Add and enable napi. */
netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll, NAPI_POLL_WEIGHT);
napi_enable(&comm->rx_napi);
- netif_napi_add(ndev, &comm->tx_napi, spl2sw_tx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(ndev, &comm->tx_napi, spl2sw_tx_poll);
napi_enable(&comm->tx_napi);
return 0;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index d435519236e4..e54ce73396ee 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -81,7 +81,7 @@ static int xlgmac_prep_tso(struct sk_buff *skb,
if (ret)
return ret;
- pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pkt_info->header_len = skb_tcp_all_headers(skb);
pkt_info->tcp_header_len = tcp_hdrlen(skb);
pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
pkt_info->mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index fb92d4c1547d..f4a6b590a1e3 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2467,7 +2467,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
port->port_id, ret);
goto dl_port_unreg;
}
- devlink_port_type_eth_set(dl_port, port->ndev);
}
devlink_register(common->devlink);
return ret;
@@ -2511,6 +2510,7 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct devlink_port *dl_port;
struct am65_cpsw_port *port;
int ret = 0, i;
@@ -2527,6 +2527,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
}
+ ret = am65_cpsw_nuss_register_devlink(common);
+ if (ret)
+ return ret;
+
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
@@ -2539,25 +2543,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
i, ret);
goto err_cleanup_ndev;
}
+
+ dl_port = &port->devlink_port;
+ devlink_port_type_eth_set(dl_port, port->ndev);
}
ret = am65_cpsw_register_notifiers(common);
if (ret)
goto err_cleanup_ndev;
- ret = am65_cpsw_nuss_register_devlink(common);
- if (ret)
- goto clean_unregister_notifiers;
-
/* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA
*/
return 0;
-clean_unregister_notifiers:
- am65_cpsw_unregister_notifiers(common);
+
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_unregister_devlink(common);
return ret;
}
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index c02a9654dce6..ffdac6fac054 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -938,7 +938,7 @@ enum velocity_owner {
#define IMR_MASK_VALUE 0x0013FB0FUL /* initial value of IMR
ignore MIBFI,RACEI to
reduce intr. frequency
- NOTE.... do not enable NoBuf int mask at driver driver
+ NOTE.... do not enable NoBuf int mask at driver
when (1) NoBuf -> RxThreshold = SF
(2) OK -> RxThreshold = original value
*/
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
new file mode 100644
index 000000000000..b4a4fa0a58f8
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Wangxun network device configuration
+#
+
+config NET_VENDOR_WANGXUN
+ bool "Wangxun devices"
+ default y
+ help
+ If you have a network (Ethernet) card from Wangxun(R), say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Wangxun(R) cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_WANGXUN
+
+config TXGBE
+ tristate "Wangxun(R) 10GbE PCI Express adapters support"
+ depends on PCI
+ help
+ This driver supports Wangxun(R) 10GbE PCI Express family of
+ adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called txgbe.
+
+endif # NET_VENDOR_WANGXUN
diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile
new file mode 100644
index 000000000000..c34db1bead25
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Wangxun network device drivers.
+#
+
+obj-$(CONFIG_TXGBE) += txgbe/
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
new file mode 100644
index 000000000000..431303ca75b4
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_TXGBE) += txgbe.o
+
+txgbe-objs := txgbe_main.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
new file mode 100644
index 000000000000..38ddbde0ed0f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_H_
+#define _TXGBE_H_
+
+#include "txgbe_type.h"
+
+#define TXGBE_MAX_FDIR_INDICES 63
+
+#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+
+/* board specific private data structure */
+struct txgbe_adapter {
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+};
+
+extern char txgbe_driver_name[];
+
+#endif /* _TXGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
new file mode 100644
index 000000000000..d3b9f73ecba4
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/aer.h>
+#include <linux/etherdevice.h>
+
+#include "txgbe.h"
+
+char txgbe_driver_name[] = "txgbe";
+
+/* txgbe_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id txgbe_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static void txgbe_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ txgbe_dev_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * txgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in txgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * txgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int txgbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct txgbe_adapter *adapter = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ txgbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct txgbe_adapter),
+ TXGBE_MAX_TX_QUEUES,
+ TXGBE_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ adapter->io_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, adapter);
+
+ return 0;
+
+err_pci_release_regions:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * txgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * txgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void txgbe_remove(struct pci_dev *pdev)
+{
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver txgbe_driver = {
+ .name = txgbe_driver_name,
+ .id_table = txgbe_pci_tbl,
+ .probe = txgbe_probe,
+ .remove = txgbe_remove,
+ .shutdown = txgbe_shutdown,
+};
+
+module_pci_driver(txgbe_driver);
+
+MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
+MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
new file mode 100644
index 000000000000..b2e329f50bae
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_TYPE_H_
+#define _TXGBE_TYPE_H_
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+/************ txgbe_register.h ************/
+/* Vendor ID */
+#ifndef PCI_VENDOR_ID_WANGXUN
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+#endif
+
+/* Device IDs */
+#define TXGBE_DEV_ID_SP1000 0x1001
+#define TXGBE_DEV_ID_WX1820 0x2001
+
+/* Subsystem IDs */
+/* SFP */
+#define TXGBE_ID_SP1000_SFP 0x0000
+#define TXGBE_ID_WX1820_SFP 0x2000
+#define TXGBE_ID_SFP 0x00
+
+/* copper */
+#define TXGBE_ID_SP1000_XAUI 0x1010
+#define TXGBE_ID_WX1820_XAUI 0x2010
+#define TXGBE_ID_XAUI 0x10
+#define TXGBE_ID_SP1000_SGMII 0x1020
+#define TXGBE_ID_WX1820_SGMII 0x2020
+#define TXGBE_ID_SGMII 0x20
+/* backplane */
+#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030
+#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030
+#define TXGBE_ID_KR_KX_KX4 0x30
+/* MAC Interface */
+#define TXGBE_ID_SP1000_MAC_XAUI 0x1040
+#define TXGBE_ID_WX1820_MAC_XAUI 0x2040
+#define TXGBE_ID_MAC_XAUI 0x40
+#define TXGBE_ID_SP1000_MAC_SGMII 0x1060
+#define TXGBE_ID_WX1820_MAC_SGMII 0x2060
+#define TXGBE_ID_MAC_SGMII 0x60
+
+#define TXGBE_NCSI_SUP 0x8000
+#define TXGBE_NCSI_MASK 0x8000
+#define TXGBE_WOL_SUP 0x4000
+#define TXGBE_WOL_MASK 0x4000
+#define TXGBE_DEV_MASK 0xf0
+
+/* Combined interface*/
+#define TXGBE_ID_SFI_XAUI 0x50
+
+/* Revision ID */
+#define TXGBE_SP_MPW 1
+
+#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 48f544f6c999..2772a79cd3ed 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -106,7 +106,7 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* Return: 0 on success, -ETIMEDOUT on a timeout
*
* Writes the value to the requested register by first writing the value
- * into MWD register. The the MCR register is then appropriately setup
+ * into MWD register. The MCR register is then appropriately setup
* to finish the write operation.
*/
static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 89770c2e0ffb..3591b9edc9a1 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -29,6 +29,7 @@
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
@@ -156,7 +157,7 @@ struct eth_plat_info {
u8 phy; /* MII PHY ID, 0 - 31 */
u8 rxq; /* configurable, currently 0 - 31 only */
u8 txreadyq;
- u8 hwaddr[6];
+ u8 hwaddr[ETH_ALEN];
u8 npe; /* NPE instance used by this interface */
bool has_mdio; /* If this instance has an MDIO bus */
};
@@ -1387,6 +1388,7 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
struct of_phandle_args npe_spec;
struct device_node *mdio_np;
struct eth_plat_info *plat;
+ u8 mac[ETH_ALEN];
int ret;
plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
@@ -1428,6 +1430,12 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
}
plat->txreadyq = queue_spec.args[0];
+ ret = of_get_mac_address(np, mac);
+ if (!ret) {
+ dev_info(dev, "Setting macaddr from DT %pM\n", mac);
+ memcpy(plat->hwaddr, mac, ETH_ALEN);
+ }
+
return plat;
}
@@ -1487,7 +1495,10 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- eth_hw_addr_set(ndev, plat->hwaddr);
+ if (is_valid_ether_addr(plat->hwaddr))
+ eth_hw_addr_set(ndev, plat->hwaddr);
+ else
+ eth_hw_addr_random(ndev);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index 4cbb145c74ab..036062376c06 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -1314,7 +1314,7 @@ void mac_set_rx_mode(struct s_smc *smc, int mode)
o Connect a UPPS ISA or EISA station to the network.
o Give the FORMAC of UPPS station the command to send
restricted tokens until the ring becomes instable.
- o Now connect your test test client.
+ o Now connect your test client.
o The restricted token monitor should detect the restricted token,
and your break point will be reached.
o You can ovserve how the station will clean the ring.
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 76c4a709d73d..e97db826cdd4 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -348,7 +348,7 @@ do { \
* This macro is invoked by the OS-specific before it left the
* function mac_drv_rx_complete. This macro calls mac_drv_fill_rxd
* if the number of used RxDs is equal or lower than the
- * the given low water mark.
+ * given low water mark.
*
* para low_water low water mark of used RxD's
*
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 2495a5719e1c..7962c37b3f14 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -797,7 +797,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct geneve_sock *gs4,
struct flowi4 *fl4,
const struct ip_tunnel_info *info,
- __be16 dport, __be16 sport)
+ __be16 dport, __be16 sport,
+ __u8 *full_tos)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -815,6 +816,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
fl4->saddr = info->key.u.ipv4.src;
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
+ fl4->flowi4_flags = info->key.flow_flags;
tos = info->key.tos;
if ((tos == 1) && !geneve->cfg.collect_md) {
@@ -822,6 +824,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
use_cache = false;
}
fl4->flowi4_tos = RT_TOS(tos);
+ if (full_tos)
+ *full_tos = tos;
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
@@ -875,8 +879,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
use_cache = false;
}
- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
- info->key.label);
+ fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label);
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
@@ -910,6 +913,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
struct flowi4 fl4;
+ __u8 full_tos;
__u8 tos, ttl;
__be16 df = 0;
__be16 sport;
@@ -920,7 +924,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, &full_tos);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -964,7 +968,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
- tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
+ tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb);
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
else
@@ -1148,7 +1152,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, NULL);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index bdfb2430ab2c..8b2220eb6b92 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -1,3 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Qualcomm IPA driver.
+
+IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.9 4.11
+
obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
@@ -7,6 +13,4 @@ ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
ipa_sysfs.o
-ipa-y += ipa_data-v3.1.o ipa_data-v3.5.1.o \
- ipa_data-v4.2.o ipa_data-v4.5.o \
- ipa_data-v4.9.o ipa_data-v4.11.o
+ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o)
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index 00f4e506e6e5..1c1895aea811 100644
--- a/drivers/net/ipa/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -6,10 +6,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index b7e32e87733e..58b708d2fc75 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -6,10 +6,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index 1be823e5c5c2..a204e439c23d 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 683f1f91042f..04f574fe006f 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 79398f286a9c..684239e71f46 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index 4b96efd05cf2..2333e15f9533 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 9cfe84319ee4..9e307eebd33f 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -665,7 +665,8 @@ static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
+ struct gsi_ring *ring = &evt_ring->ring;
+ size_t size;
u32 val;
/* We program all event rings as GPI type/protocol */
@@ -674,6 +675,7 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+ size = ring->count * GSI_RING_ELEMENT_SIZE;
val = ev_r_length_encoded(gsi->version, size);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
@@ -681,9 +683,9 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
* high-order 32 bits of the address of the event ring,
* respectively.
*/
- val = lower_32_bits(evt_ring->ring.addr);
+ val = lower_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
- val = upper_32_bits(evt_ring->ring.addr);
+ val = upper_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
@@ -700,8 +702,8 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
- /* Finally, tell the hardware we've completed event 0 (arbitrary) */
- gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
+ /* Finally, tell the hardware our "last processed" event (arbitrary) */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
}
/* Find the transaction whose completion indicates a channel is quiesced */
@@ -720,6 +722,9 @@ static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
list = &trans_info->alloc;
if (!list_empty(list))
goto done;
+ list = &trans_info->committed;
+ if (!list_empty(list))
+ goto done;
list = &trans_info->pending;
if (!list_empty(list))
goto done;
@@ -770,9 +775,6 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
u32 wrr_weight = 0;
u32 val;
- /* Arbitrarily pick TRE 0 as the first channel element to use */
- channel->tre_ring.index = 0;
-
/* We program all channels as GPI type/protocol */
val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
@@ -823,7 +825,7 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Now update the scratch registers for GPI protocol */
gpi = &scr.gpi;
- gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
+ gpi->max_outstanding_tre = channel->trans_tre_max *
GSI_RING_ELEMENT_SIZE;
gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
@@ -949,6 +951,8 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
+ /* Hardware assumes this is 0 following reset */
+ channel->tre_ring.index = 0;
gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
@@ -991,75 +995,66 @@ void gsi_resume(struct gsi *gsi)
enable_irq(gsi->irq);
}
-/**
- * gsi_channel_tx_queued() - Report queued TX transfers for a channel
- * @channel: Channel for which to report
- *
- * Report to the network stack the number of bytes and transactions that
- * have been queued to hardware since last call. This and the next function
- * supply information used by the network stack for throttling.
- *
- * For each channel we track the number of transactions used and bytes of
- * data those transactions represent. We also track what those values are
- * each time this function is called. Subtracting the two tells us
- * the number of bytes and transactions that have been added between
- * successive calls.
- *
- * Calling this each time we ring the channel doorbell allows us to
- * provide accurate information to the network stack about how much
- * work we've given the hardware at any point in time.
- */
-void gsi_channel_tx_queued(struct gsi_channel *channel)
+void gsi_trans_tx_committed(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+
+ channel->trans_count++;
+ channel->byte_count += trans->len;
+
+ trans->trans_count = channel->trans_count;
+ trans->byte_count = channel->byte_count;
+}
+
+void gsi_trans_tx_queued(struct gsi_trans *trans)
{
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
u32 trans_count;
u32 byte_count;
+ channel = &gsi->channel[channel_id];
+
byte_count = channel->byte_count - channel->queued_byte_count;
trans_count = channel->trans_count - channel->queued_trans_count;
channel->queued_byte_count = channel->byte_count;
channel->queued_trans_count = channel->trans_count;
- ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
}
/**
- * gsi_channel_tx_update() - Report completed TX transfers
- * @channel: Channel that has completed transmitting packets
- * @trans: Last transation known to be complete
- *
- * Compute the number of transactions and bytes that have been transferred
- * over a TX channel since the given transaction was committed. Report this
- * information to the network stack.
+ * gsi_trans_tx_completed() - Report completed TX transactions
+ * @trans: TX channel transaction that has completed
*
- * At the time a transaction is committed, we record its channel's
- * committed transaction and byte counts *in the transaction*.
- * Completions are signaled by the hardware with an interrupt, and
- * we can determine the latest completed transaction at that time.
+ * Report that a transaction on a TX channel has completed. At the time a
+ * transaction is committed, we record *in the transaction* its channel's
+ * committed transaction and byte counts. Transactions are completed in
+ * order, and the difference between the channel's byte/transaction count
+ * when the transaction was committed and when it completes tells us
+ * exactly how much data has been transferred while the transaction was
+ * pending.
*
- * The difference between the byte/transaction count recorded in
- * the transaction and the count last time we recorded a completion
- * tells us exactly how much data has been transferred between
- * completions.
- *
- * Calling this each time we learn of a newly-completed transaction
- * allows us to provide accurate information to the network stack
- * about how much work has been completed by the hardware at a given
- * point in time.
+ * We report this information to the network stack, which uses it to manage
+ * the rate at which data is sent to hardware.
*/
-static void
-gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
+static void gsi_trans_tx_completed(struct gsi_trans *trans)
{
- u64 byte_count = trans->byte_count + trans->len;
- u64 trans_count = trans->trans_count + 1;
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
+ u32 trans_count;
+ u32 byte_count;
+
+ channel = &gsi->channel[channel_id];
+ trans_count = trans->trans_count - channel->compl_trans_count;
+ byte_count = trans->byte_count - channel->compl_byte_count;
- byte_count -= channel->compl_byte_count;
- channel->compl_byte_count += byte_count;
- trans_count -= channel->compl_trans_count;
channel->compl_trans_count += trans_count;
+ channel->compl_byte_count += byte_count;
- ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
}
/* Channel control interrupt handler */
@@ -1327,61 +1322,73 @@ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
}
/* Return the transaction associated with a transfer completion event */
-static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
- struct gsi_event *event)
+static struct gsi_trans *
+gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
{
+ u32 channel_id = event->chid;
+ struct gsi_channel *channel;
+ struct gsi_trans *trans;
u32 tre_offset;
u32 tre_index;
+ channel = &gsi->channel[channel_id];
+ if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
+ return NULL;
+
/* Event xfer_ptr records the TRE it's associated with */
tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
- return gsi_channel_trans_mapped(channel, tre_index);
+ trans = gsi_channel_trans_mapped(channel, tre_index);
+
+ if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
+ return NULL;
+
+ return trans;
}
/**
- * gsi_evt_ring_rx_update() - Record lengths of received data
- * @evt_ring: Event ring associated with channel that received packets
- * @index: Event index in ring reported by hardware
+ * gsi_evt_ring_update() - Update transaction state from hardware
+ * @gsi: GSI pointer
+ * @evt_ring_id: Event ring ID
+ * @index: Event index in ring reported by hardware
*
* Events for RX channels contain the actual number of bytes received into
* the buffer. Every event has a transaction associated with it, and here
* we update transactions to record their actual received lengths.
*
+ * When an event for a TX channel arrives we use information in the
+ * transaction to report the number of requests and bytes have been
+ * transferred.
+ *
* This function is called whenever we learn that the GSI hardware has filled
* new events since the last time we checked. The ring's index field tells
* the first entry in need of processing. The index provided is the
* first *unfilled* event in the ring (following the last filled one).
*
* Events are sequential within the event ring, and transactions are
- * sequential within the transaction pool.
+ * sequential within the transaction array.
*
* Note that @index always refers to an element *within* the event ring.
*/
-static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
{
- struct gsi_channel *channel = evt_ring->channel;
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring;
- struct gsi_trans_info *trans_info;
struct gsi_event *event_done;
struct gsi_event *event;
- struct gsi_trans *trans;
- u32 trans_count = 0;
- u32 byte_count = 0;
u32 event_avail;
u32 old_index;
- trans_info = &channel->trans_info;
-
- /* We'll start with the oldest un-processed event. RX channels
- * replenish receive buffers in single-TRE transactions, so we
- * can just map that event to its transaction. Transactions
- * associated with completion events are consecutive.
+ /* Starting with the oldest un-processed event, determine which
+ * transaction (and which channel) is associated with the event.
+ * For RX channels, update each completed transaction with the
+ * number of bytes that were actually received. For TX channels
+ * associated with a network device, report to the network stack
+ * the number of transfers and bytes this completion represents.
*/
old_index = ring->index;
event = gsi_ring_virt(ring, old_index);
- trans = gsi_event_trans(channel, event);
/* Compute the number of events to process before we wrap,
* and determine when we'll be done processing events.
@@ -1389,21 +1396,28 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
event_avail = ring->count - old_index % ring->count;
event_done = gsi_ring_virt(ring, index);
do {
- trans->len = __le16_to_cpu(event->len);
- byte_count += trans->len;
- trans_count++;
+ struct gsi_trans *trans;
+
+ trans = gsi_event_trans(gsi, event);
+ if (!trans)
+ return;
+
+ if (trans->direction == DMA_FROM_DEVICE)
+ trans->len = __le16_to_cpu(event->len);
+ else
+ gsi_trans_tx_completed(trans);
+
+ gsi_trans_move_complete(trans);
/* Move on to the next event and transaction */
if (--event_avail)
event++;
else
event = gsi_ring_virt(ring, 0);
- trans = gsi_trans_pool_next(&trans_info->pool, trans);
} while (event != event_done);
- /* We record RX bytes when they are received */
- channel->byte_count += byte_count;
- channel->trans_count += trans_count;
+ /* Tell the hardware we've handled these events */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
}
/* Initialize a ring, including allocating DMA memory for its entries */
@@ -1423,6 +1437,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
ring->addr = addr;
ring->count = count;
+ ring->index = 0;
return 0;
}
@@ -1493,22 +1508,16 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
return NULL;
/* Get the transaction for the latest completed event. */
- trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
+ trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
+ if (!trans)
+ return NULL;
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
* the number of transactions and bytes this completion represents
* up the network stack.
*/
- if (channel->toward_ipa)
- gsi_channel_tx_update(channel, trans);
- else
- gsi_evt_ring_rx_update(evt_ring, index);
-
- gsi_trans_move_complete(trans);
-
- /* Tell the hardware we've handled these events */
- gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
+ gsi_evt_ring_update(gsi, evt_ring_id, index);
return gsi_channel_trans_complete(channel);
}
@@ -2001,9 +2010,10 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
-static bool gsi_channel_data_valid(struct gsi *gsi,
+static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
const struct ipa_gsi_endpoint_data *data)
{
+ const struct gsi_channel_data *channel_data;
u32 channel_id = data->channel_id;
struct device *dev = gsi->dev;
@@ -2019,10 +2029,24 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
return false;
}
- if (!data->channel.tlv_count ||
- data->channel.tlv_count > GSI_TLV_MAX) {
+ if (command && !data->toward_ipa) {
+ dev_err(dev, "command channel %u is not TX\n", channel_id);
+ return false;
+ }
+
+ channel_data = &data->channel;
+
+ if (!channel_data->tlv_count ||
+ channel_data->tlv_count > GSI_TLV_MAX) {
dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
- channel_id, data->channel.tlv_count, GSI_TLV_MAX);
+ channel_id, channel_data->tlv_count, GSI_TLV_MAX);
+ return false;
+ }
+
+ if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
+ dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
+ channel_id, IPA_COMMAND_TRANS_TRE_MAX,
+ channel_data->tlv_count);
return false;
}
@@ -2031,22 +2055,22 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
* gsi_channel_tre_max() is computed, tre_count has to be almost
* twice the TLV FIFO size to satisfy this requirement.
*/
- if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
+ if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
- channel_id, data->channel.tlv_count,
- data->channel.tre_count);
+ channel_id, channel_data->tlv_count,
+ channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.tre_count)) {
+ if (!is_power_of_2(channel_data->tre_count)) {
dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
- channel_id, data->channel.tre_count);
+ channel_id, channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.event_count)) {
+ if (!is_power_of_2(channel_data->event_count)) {
dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
- channel_id, data->channel.event_count);
+ channel_id, channel_data->event_count);
return false;
}
@@ -2062,7 +2086,7 @@ static int gsi_channel_init_one(struct gsi *gsi,
u32 tre_count;
int ret;
- if (!gsi_channel_data_valid(gsi, data))
+ if (!gsi_channel_data_valid(gsi, command, data))
return -EINVAL;
/* Worst case we need an event for every outstanding TRE */
@@ -2080,7 +2104,7 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
- channel->tlv_count = data->channel.tlv_count;
+ channel->trans_tre_max = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
@@ -2295,13 +2319,5 @@ u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
/* Hardware limit is channel->tre_count - 1 */
- return channel->tre_count - (channel->tlv_count - 1);
-}
-
-/* Returns the maximum number of TREs in a single transaction for a channel */
-u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
-{
- struct gsi_channel *channel = &gsi->channel[channel_id];
-
- return channel->tlv_count;
+ return channel->tre_count - (channel->trans_tre_max - 1);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 5d66116b46b0..23de5f67374c 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -48,12 +48,13 @@ struct gsi_ring {
*
* A channel ring consists of TRE entries filled by the AP and passed
* to the hardware for processing. For a channel ring, the ring index
- * identifies the next unused entry to be filled by the AP.
+ * identifies the next unused entry to be filled by the AP. In this
+ * case the initial value is assumed by hardware to be 0.
*
* An event ring consists of event structures filled by the hardware
* and passed to the AP. For event rings, the ring index identifies
* the next ring entry that is not known to have been filled by the
- * hardware.
+ * hardware. The initial value used is arbitrary (so we use 0).
*/
u32 index;
};
@@ -82,13 +83,15 @@ struct gsi_trans_pool {
struct gsi_trans_info {
atomic_t tre_avail; /* TREs available for allocation */
struct gsi_trans_pool pool; /* transaction pool */
+ struct gsi_trans **map; /* TRE -> transaction map */
+
struct gsi_trans_pool sg_pool; /* scatterlist pool */
struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
- struct gsi_trans **map; /* TRE -> transaction map */
spinlock_t spinlock; /* protects updates to the lists */
struct list_head alloc; /* allocated, not committed */
- struct list_head pending; /* committed, awaiting completion */
+ struct list_head committed; /* committed, awaiting doorbell */
+ struct list_head pending; /* pending, awaiting completion */
struct list_head complete; /* completed, awaiting poll */
struct list_head polled; /* returned by gsi_channel_poll_one() */
};
@@ -110,16 +113,16 @@ struct gsi_channel {
bool toward_ipa;
bool command; /* AP command TX channel or not */
- u8 tlv_count; /* # entries in TLV FIFO */
+ u8 trans_tre_max; /* max TREs in a transaction */
u16 tre_count;
u16 event_count;
struct gsi_ring tre_ring;
u32 evt_ring_id;
+ /* The following counts are used only for TX endpoints */
u64 byte_count; /* total # bytes transferred */
u64 trans_count; /* total # transactions */
- /* The following counts are used only for TX endpoints */
u64 queued_byte_count; /* last reported queued byte count */
u64 queued_trans_count; /* ...and queued trans count */
u64 compl_byte_count; /* last reported completed byte count */
@@ -184,20 +187,11 @@ void gsi_teardown(struct gsi *gsi);
* @gsi: GSI pointer
* @channel_id: Channel whose limit is to be returned
*
- * Return: The maximum number of TREs oustanding on the channel
+ * Return: The maximum number of TREs outstanding on the channel
*/
u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
/**
- * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
- * @gsi: GSI pointer
- * @channel_id: Channel whose limit is to be returned
- *
- * Return: The maximum TRE count per transaction on the channel
- */
-u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
-
-/**
* gsi_channel_start() - Start an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to start
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index ea333a244cf5..0b2516fa21b5 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -16,9 +16,6 @@ struct gsi_channel;
#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
-/* Return the entry that follows one provided in a transaction pool */
-void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
-
/**
* gsi_trans_move_complete() - Mark a GSI transaction completed
* @trans: Transaction to commit
@@ -105,14 +102,21 @@ void gsi_channel_doorbell(struct gsi_channel *channel);
void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
/**
- * gsi_channel_tx_queued() - Report the number of bytes queued to hardware
- * @channel: Channel whose bytes have been queued
+ * gsi_trans_tx_committed() - Record bytes committed for transmit
+ * @trans: TX endpoint transaction being committed
+ *
+ * Report that a TX transaction has been committed. It updates some
+ * statistics used to manage transmit rates.
+ */
+void gsi_trans_tx_committed(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_tx_queued() - Report a queued TX channel transaction
+ * @trans: Transaction being passed to hardware
*
- * This arranges for the the number of transactions and bytes for
- * transfer that have been queued to hardware to be reported. It
- * passes this information up the network stack so it can be used to
- * throttle transmissions.
+ * Report to the network stack that a TX transaction is being supplied
+ * to the hardware.
*/
-void gsi_channel_tx_queued(struct gsi_channel *channel);
+void gsi_trans_tx_queued(struct gsi_trans *trans);
#endif /* _GSI_PRIVATE_H_ */
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 55f8fe7d2668..18e7e8c405be 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -214,26 +214,14 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
return pool->base + offset;
}
-/* Return the pool element that immediately follows the one given.
- * This only works done if elements are allocated one at a time.
- */
-void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
+/* Map a TRE ring entry index to the transaction it is associated with */
+static void gsi_trans_map(struct gsi_trans *trans, u32 index)
{
- void *end = pool->base + pool->count * pool->size;
-
- WARN_ON(element < pool->base);
- WARN_ON(element >= end);
- WARN_ON(pool->max_alloc != 1);
-
- element += pool->size;
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
- return element < end ? element : pool->base;
-}
+ /* The completion event will indicate the last TRE used */
+ index += trans->used_count - 1;
-/* Map a given ring entry index to the transaction associated with it */
-static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
- struct gsi_trans *trans)
-{
/* Note: index *must* be used modulo the ring count here */
channel->trans_info.map[index % channel->tre_ring.count] = trans;
}
@@ -253,15 +241,31 @@ struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
struct gsi_trans, links);
}
-/* Move a transaction from the allocated list to the pending list */
+/* Move a transaction from the allocated list to the committed list */
+static void gsi_trans_move_committed(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_move_tail(&trans->links, &trans_info->committed);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Move transactions from the committed list to the pending list */
static void gsi_trans_move_pending(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct list_head list;
spin_lock_bh(&trans_info->spinlock);
- list_move_tail(&trans->links, &trans_info->pending);
+ /* Move this transaction and all predecessors to the pending list */
+ list_cut_position(&list, &trans_info->committed, &trans->links);
+ list_splice_tail(&list, &trans_info->pending);
spin_unlock_bh(&trans_info->spinlock);
}
@@ -340,7 +344,7 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
- if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id)))
+ if (WARN_ON(tre_count > channel->trans_tre_max))
return NULL;
trans_info = &channel->trans_info;
@@ -351,14 +355,14 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
if (!gsi_trans_tre_reserve(trans_info, tre_count))
return NULL;
- /* Allocate and initialize non-zero fields in the the transaction */
+ /* Allocate and initialize non-zero fields in the transaction */
trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
trans->gsi = gsi;
trans->channel_id = channel_id;
- trans->tre_count = tre_count;
+ trans->rsvd_count = tre_count;
init_completion(&trans->completion);
- /* Allocate the scatterlist and (if requested) info entries. */
+ /* Allocate the scatterlist */
trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
sg_init_marker(trans->sgl, tre_count);
@@ -400,22 +404,23 @@ void gsi_trans_free(struct gsi_trans *trans)
if (!last)
return;
- ipa_gsi_trans_release(trans);
+ if (trans->used_count)
+ ipa_gsi_trans_release(trans);
/* Releasing the reserved TREs implicitly frees the sgl[] and
* (if present) info[] arrays, plus the transaction itself.
*/
- gsi_trans_tre_release(trans_info, trans->tre_count);
+ gsi_trans_tre_release(trans_info, trans->rsvd_count);
}
/* Add an immediate command to a transaction */
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
dma_addr_t addr, enum ipa_cmd_opcode opcode)
{
- u32 which = trans->used++;
+ u32 which = trans->used_count++;
struct scatterlist *sg;
- WARN_ON(which >= trans->tre_count);
+ WARN_ON(which >= trans->rsvd_count);
/* Commands are quite different from data transfer requests.
* Their payloads come from a pool whose memory is allocated
@@ -446,9 +451,9 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
struct scatterlist *sg = &trans->sgl[0];
int ret;
- if (WARN_ON(trans->tre_count != 1))
+ if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
- if (WARN_ON(trans->used))
+ if (WARN_ON(trans->used_count))
return -EINVAL;
sg_set_page(sg, page, size, offset);
@@ -456,7 +461,7 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
if (!ret)
return -ENOMEM;
- trans->used++; /* Transaction now owns the (DMA mapped) page */
+ trans->used_count++; /* Transaction now owns the (DMA mapped) page */
return 0;
}
@@ -465,25 +470,26 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
{
struct scatterlist *sg = &trans->sgl[0];
- u32 used;
+ u32 used_count;
int ret;
- if (WARN_ON(trans->tre_count != 1))
+ if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
- if (WARN_ON(trans->used))
+ if (WARN_ON(trans->used_count))
return -EINVAL;
/* skb->len will not be 0 (checked early) */
ret = skb_to_sgvec(skb, sg, 0, skb->len);
if (ret < 0)
return ret;
- used = ret;
+ used_count = ret;
- ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
+ ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction);
if (!ret)
return -ENOMEM;
- trans->used += used; /* Transaction now owns the (DMA mapped) skb */
+ /* Transaction now owns the (DMA mapped) skb */
+ trans->used_count += used_count;
return 0;
}
@@ -549,7 +555,7 @@ static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
- struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_ring *tre_ring = &channel->tre_ring;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
bool bei = channel->toward_ipa;
struct gsi_tre *dest_tre;
@@ -559,7 +565,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
u32 avail;
u32 i;
- WARN_ON(!trans->used);
+ WARN_ON(!trans->used_count);
/* Consume the entries. If we cross the end of the ring while
* filling them we'll switch to the beginning to finish.
@@ -567,43 +573,39 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
* transfer request, whose opcode is IPA_CMD_NONE.
*/
cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
- avail = ring->count - ring->index % ring->count;
- dest_tre = gsi_ring_virt(ring, ring->index);
- for_each_sg(trans->sgl, sg, trans->used, i) {
- bool last_tre = i == trans->used - 1;
+ avail = tre_ring->count - tre_ring->index % tre_ring->count;
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
+ for_each_sg(trans->sgl, sg, trans->used_count, i) {
+ bool last_tre = i == trans->used_count - 1;
dma_addr_t addr = sg_dma_address(sg);
u32 len = sg_dma_len(sg);
byte_count += len;
if (!avail--)
- dest_tre = gsi_ring_virt(ring, 0);
+ dest_tre = gsi_ring_virt(tre_ring, 0);
if (cmd_opcode)
opcode = *cmd_opcode++;
gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
dest_tre++;
}
- ring->index += trans->used;
-
- if (channel->toward_ipa) {
- /* We record TX bytes when they are sent */
- trans->len = byte_count;
- trans->trans_count = channel->trans_count;
- trans->byte_count = channel->byte_count;
- channel->trans_count++;
- channel->byte_count += byte_count;
- }
+ /* Associate the TRE with the transaction */
+ gsi_trans_map(trans, tre_ring->index);
- /* Associate the last TRE with the transaction */
- gsi_channel_trans_map(channel, ring->index - 1, trans);
+ tre_ring->index += trans->used_count;
- gsi_trans_move_pending(trans);
+ trans->len = byte_count;
+ if (channel->toward_ipa)
+ gsi_trans_tx_committed(trans);
+
+ gsi_trans_move_committed(trans);
/* Ring doorbell if requested, or if all TREs are allocated */
if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
/* Report what we're handing off to hardware for TX channels */
if (channel->toward_ipa)
- gsi_channel_tx_queued(channel);
+ gsi_trans_tx_queued(trans);
+ gsi_trans_move_pending(trans);
gsi_channel_doorbell(channel);
}
}
@@ -611,7 +613,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
/* Commit a GSI transaction */
void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
- if (trans->used)
+ if (trans->used_count)
__gsi_trans_commit(trans, ring_db);
else
gsi_trans_free(trans);
@@ -620,7 +622,7 @@ void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
/* Commit a GSI transaction and wait for it to complete */
void gsi_trans_commit_wait(struct gsi_trans *trans)
{
- if (!trans->used)
+ if (!trans->used_count)
goto out_trans_free;
refcount_inc(&trans->refcount);
@@ -638,7 +640,7 @@ void gsi_trans_complete(struct gsi_trans *trans)
{
/* If the entire SGL was mapped when added, unmap it now */
if (trans->direction != DMA_NONE)
- dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
+ dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count,
trans->direction);
ipa_gsi_trans_complete(trans);
@@ -675,7 +677,7 @@ void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_ring *tre_ring = &channel->tre_ring;
struct gsi_trans_info *trans_info;
struct gsi_tre *dest_tre;
@@ -685,12 +687,12 @@ int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
if (!gsi_trans_tre_reserve(trans_info, 1))
return -EBUSY;
- /* Now fill the the reserved TRE and tell the hardware */
+ /* Now fill the reserved TRE and tell the hardware */
- dest_tre = gsi_ring_virt(ring, ring->index);
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
- ring->index++;
+ tre_ring->index++;
gsi_channel_doorbell(channel);
return 0;
@@ -708,6 +710,7 @@ void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 tre_count = channel->tre_count;
struct gsi_trans_info *trans_info;
u32 tre_max;
int ret;
@@ -715,68 +718,66 @@ int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
/* Ensure the size of a channel element is what's expected */
BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
- /* The map array is used to determine what transaction is associated
- * with a TRE that the hardware reports has completed. We need one
- * map entry per TRE.
- */
trans_info = &channel->trans_info;
- trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
- GFP_KERNEL);
- if (!trans_info->map)
- return -ENOMEM;
- /* We can't use more TREs than there are available in the ring.
- * This limits the number of transactions that can be oustanding.
- * Worst case is one TRE per transaction (but we actually limit
- * it to something a little less than that). We allocate resources
- * for transactions (including transaction structures) based on
- * this maximum number.
+ /* The tre_avail field is what ultimately limits the number of
+ * outstanding transactions and their resources. A transaction
+ * allocation succeeds only if the TREs available are sufficient
+ * for what the transaction might need.
*/
tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
+ atomic_set(&trans_info->tre_avail, tre_max);
- /* Transactions are allocated one at a time. */
+ /* We can't use more TREs than the number available in the ring.
+ * This limits the number of transactions that can be outstanding.
+ * Worst case is one TRE per transaction (but we actually limit
+ * it to something a little less than that). By allocating a
+ * power-of-two number of transactions we can use an index
+ * modulo that number to determine the next one that's free.
+ * Transactions are allocated one at a time.
+ */
ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
tre_max, 1);
if (ret)
- goto err_kfree;
+ return -ENOMEM;
+
+ /* A completion event contains a pointer to the TRE that caused
+ * the event (which will be the last one used by the transaction).
+ * Each entry in this map records the transaction associated
+ * with a corresponding completed TRE.
+ */
+ trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map),
+ GFP_KERNEL);
+ if (!trans_info->map) {
+ ret = -ENOMEM;
+ goto err_trans_free;
+ }
/* A transaction uses a scatterlist array to represent the data
* transfers implemented by the transaction. Each scatterlist
* element is used to fill a single TRE when the transaction is
* committed. So we need as many scatterlist elements as the
* maximum number of TREs that can be outstanding.
- *
- * All TREs in a transaction must fit within the channel's TLV FIFO.
- * A transaction on a channel can allocate as many TREs as that but
- * no more.
*/
ret = gsi_trans_pool_init(&trans_info->sg_pool,
sizeof(struct scatterlist),
- tre_max, channel->tlv_count);
+ tre_max, channel->trans_tre_max);
if (ret)
- goto err_trans_pool_exit;
-
- /* Finally, the tre_avail field is what ultimately limits the number
- * of outstanding transactions and their resources. A transaction
- * allocation succeeds only if the TREs available are sufficient for
- * what the transaction might need. Transaction resource pools are
- * sized based on the maximum number of outstanding TREs, so there
- * will always be resources available if there are TREs available.
- */
- atomic_set(&trans_info->tre_avail, tre_max);
+ goto err_map_free;
spin_lock_init(&trans_info->spinlock);
INIT_LIST_HEAD(&trans_info->alloc);
+ INIT_LIST_HEAD(&trans_info->committed);
INIT_LIST_HEAD(&trans_info->pending);
INIT_LIST_HEAD(&trans_info->complete);
INIT_LIST_HEAD(&trans_info->polled);
return 0;
-err_trans_pool_exit:
- gsi_trans_pool_exit(&trans_info->pool);
-err_kfree:
+err_map_free:
kfree(trans_info->map);
+err_trans_free:
+ gsi_trans_pool_exit(&trans_info->pool);
dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
ret, channel_id);
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 020c3b32de1d..7084507830c2 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -33,9 +33,9 @@ struct gsi_trans_pool;
* @gsi: GSI pointer
* @channel_id: Channel number transaction is associated with
* @cancelled: If set by the core code, transaction was cancelled
- * @tre_count: Number of TREs reserved for this transaction
- * @used: Number of TREs *used* (could be less than tre_count)
- * @len: Total # of transfer bytes represented in sgl[] (set by core)
+ * @rsvd_count: Number of TREs reserved for this transaction
+ * @used_count: Number of TREs *used* (could be less than rsvd_count)
+ * @len: Number of bytes sent or received by the transaction
* @data: Preserved but not touched by the core transaction code
* @cmd_opcode: Array of command opcodes (command channel only)
* @sgl: An array of scatter/gather entries managed by core code
@@ -45,8 +45,9 @@ struct gsi_trans_pool;
* @byte_count: TX channel byte count recorded when transaction committed
* @trans_count: Channel transaction count when committed (for BQL accounting)
*
- * The size used for some fields in this structure were chosen to ensure
- * the full structure size is no larger than 128 bytes.
+ * The @len field is set when the transaction is committed. For RX
+ * transactions it is updated later to reflect the actual number of bytes
+ * received.
*/
struct gsi_trans {
struct list_head links; /* gsi_channel lists */
@@ -56,8 +57,8 @@ struct gsi_trans {
bool cancelled; /* true if transaction was cancelled */
- u8 tre_count; /* # TREs requested */
- u8 used; /* # entries used in sgl[] */
+ u8 rsvd_count; /* # TREs requested */
+ u8 used_count; /* # entries used in sgl[] */
u32 len; /* total # bytes across sgl[] */
union {
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index e58cd4478fd3..6dea40259b60 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -353,13 +353,13 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
/* This is as good a place as any to validate build constants */
ipa_cmd_validate_build();
- /* Even though command payloads are allocated one at a time,
- * a single transaction can require up to tlv_count of them,
- * so we treat them as if that many can be allocated at once.
+ /* Command payloads are allocated one at a time, but a single
+ * transaction can require up to the maximum supported by the
+ * channel; treat them as if they were allocated all at once.
*/
return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
sizeof(union ipa_cmd_payload),
- tre_max, channel->tlv_count);
+ tre_max, channel->trans_tre_max);
}
void ipa_cmd_pool_exit(struct gsi_channel *channel)
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index d3b3255ac3d1..66d2bfdf9e42 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1020,7 +1020,7 @@ int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
* If not, see if we can linearize it before giving up.
*/
nr_frags = skb_shinfo(skb)->nr_frags;
- if (1 + nr_frags > endpoint->trans_tre_max) {
+ if (nr_frags > endpoint->skb_frag_max) {
if (skb_linearize(skb))
return -E2BIG;
nr_frags = 0;
@@ -1368,18 +1368,14 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
}
}
-/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
-static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
-{
-}
-
-/* Complete transaction initiated in ipa_endpoint_replenish_one() */
-static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
{
struct page *page;
+ if (endpoint->toward_ipa)
+ return;
+
if (trans->cancelled)
goto done;
@@ -1393,15 +1389,6 @@ done:
ipa_endpoint_replenish(endpoint);
}
-void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
-{
- if (endpoint->toward_ipa)
- ipa_endpoint_tx_complete(endpoint, trans);
- else
- ipa_endpoint_rx_complete(endpoint, trans);
-}
-
void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct gsi_trans *trans)
{
@@ -1721,7 +1708,7 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
if (endpoint->ee_id != GSI_EE_AP)
return;
- endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
+ endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
if (!endpoint->toward_ipa) {
/* RX transactions require a single TRE, so the maximum
* backlog is the same as the maximum outstanding TREs.
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 01790c60bee8..28e0a7386fd7 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -142,7 +142,7 @@ enum ipa_replenish_flag {
* @endpoint_id: IPA endpoint number
* @toward_ipa: Endpoint direction (true = TX, false = RX)
* @config: Default endpoint configuration
- * @trans_tre_max: Maximum number of TRE descriptors per transaction
+ * @skb_frag_max: Maximum allowed number of TX SKB fragments
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
* @replenish_flags: Replenishing state flags
@@ -157,7 +157,7 @@ struct ipa_endpoint {
bool toward_ipa;
struct ipa_endpoint_config config;
- u32 trans_tre_max;
+ u32 skb_frag_max; /* Used for netdev TX only */
u32 evt_ring_id;
/* Net device this endpoint is associated with, if any */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 3757ce3de2c5..32962d885acd 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -836,6 +836,8 @@ out_power_put:
kfree(ipa);
ipa_power_exit(power);
+ dev_info(dev, "IPA driver removed");
+
return 0;
}
@@ -851,6 +853,7 @@ static void ipa_shutdown(struct platform_device *pdev)
static const struct attribute_group *ipa_attribute_groups[] = {
&ipa_attribute_group,
&ipa_feature_attribute_group,
+ &ipa_endpoint_id_attribute_group,
&ipa_modem_attribute_group,
NULL,
};
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index 3233d145fd87..495e85abe50b 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -214,7 +214,7 @@ struct ipa_init_modem_driver_req {
/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
* QMI response, but contains other information as well. Currently we
- * simply wait for the the INIT_DRIVER transaction to complete and
+ * simply wait for the INIT_DRIVER transaction to complete and
* ignore any other data that might be returned.
*/
struct ipa_init_modem_driver_rsp {
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index a5b355384d4a..6f35438cda89 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -48,7 +48,7 @@ struct ipa;
*
* The offset of registers related to resource types is computed by a macro
* that is supplied a parameter "rt". The "rt" represents a resource type,
- * which is is a member of the ipa_resource_type_src enumerated type for
+ * which is a member of the ipa_resource_type_src enumerated type for
* source endpoint resources or the ipa_resource_type_dst enumerated type
* for destination endpoint resources.
*
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index ff61dbdd70d8..c0c8641cdd14 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -96,38 +96,71 @@ const struct attribute_group ipa_feature_attribute_group = {
.attrs = ipa_feature_attrs,
};
-static ssize_t
-ipa_endpoint_id_show(struct ipa *ipa, char *buf, enum ipa_endpoint_name name)
+static umode_t ipa_endpoint_id_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- u32 endpoint_id = ipa->name_map[name]->endpoint_id;
+ struct ipa *ipa = dev_get_drvdata(kobj_to_dev(kobj));
+ struct device_attribute *dev_attr;
+ struct dev_ext_attribute *ea;
+ bool visible;
+
+ /* An endpoint id attribute is only visible if it's defined */
+ dev_attr = container_of(attr, struct device_attribute, attr);
+ ea = container_of(dev_attr, struct dev_ext_attribute, attr);
- return scnprintf(buf, PAGE_SIZE, "%u\n", endpoint_id);
+ visible = !!ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
+
+ return visible ? attr->mode : 0;
}
-static ssize_t rx_endpoint_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t endpoint_id_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
+ struct ipa_endpoint *endpoint;
+ struct dev_ext_attribute *ea;
+
+ ea = container_of(attr, struct dev_ext_attribute, attr);
+ endpoint = ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
- return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_RX);
+ return sysfs_emit(buf, "%u\n", endpoint->endpoint_id);
}
-static DEVICE_ATTR_RO(rx_endpoint_id);
+#define ENDPOINT_ID_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_endpoint_id_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
-static ssize_t tx_endpoint_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ipa *ipa = dev_get_drvdata(dev);
+ENDPOINT_ID_ATTR(modem_rx, IPA_ENDPOINT_AP_MODEM_RX);
+ENDPOINT_ID_ATTR(modem_tx, IPA_ENDPOINT_AP_MODEM_TX);
- return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_TX);
-}
+static struct attribute *ipa_endpoint_id_attrs[] = {
+ &dev_attr_endpoint_id_modem_rx.attr.attr,
+ &dev_attr_endpoint_id_modem_tx.attr.attr,
+ NULL
+};
-static DEVICE_ATTR_RO(tx_endpoint_id);
+const struct attribute_group ipa_endpoint_id_attribute_group = {
+ .name = "endpoint_id",
+ .is_visible = ipa_endpoint_id_is_visible,
+ .attrs = ipa_endpoint_id_attrs,
+};
+
+/* Reuse endpoint ID attributes for the legacy modem endpoint IDs */
+#define MODEM_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_modem_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
+
+MODEM_ATTR(rx_endpoint_id, IPA_ENDPOINT_AP_MODEM_RX);
+MODEM_ATTR(tx_endpoint_id, IPA_ENDPOINT_AP_MODEM_TX);
static struct attribute *ipa_modem_attrs[] = {
- &dev_attr_rx_endpoint_id.attr,
- &dev_attr_tx_endpoint_id.attr,
- NULL
+ &dev_attr_modem_rx_endpoint_id.attr.attr,
+ &dev_attr_modem_tx_endpoint_id.attr.attr,
+ NULL,
};
const struct attribute_group ipa_modem_attribute_group = {
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
index b34e5650bf8c..4a3ffd1e4e3f 100644
--- a/drivers/net/ipa/ipa_sysfs.h
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -10,6 +10,7 @@ struct attribute_group;
extern const struct attribute_group ipa_attribute_group;
extern const struct attribute_group ipa_feature_attribute_group;
+extern const struct attribute_group ipa_endpoint_id_attribute_group;
extern const struct attribute_group ipa_modem_attribute_group;
#endif /* _IPA_SYSFS_H_ */
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 3837c897832e..de94921cbef9 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -47,11 +47,11 @@ typedef enum {
} ipvl_hdr_type;
struct ipvl_pcpu_stats {
- u64 rx_pkts;
- u64 rx_bytes;
- u64 rx_mcast;
- u64 tx_pkts;
- u64 tx_bytes;
+ u64_stats_t rx_pkts;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_mcast;
+ u64_stats_t tx_pkts;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errs;
u32 tx_drps;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 6ffb27419e64..dfeb5b392e64 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -19,10 +19,10 @@ void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
u64_stats_update_begin(&pcptr->syncp);
- pcptr->rx_pkts++;
- pcptr->rx_bytes += len;
+ u64_stats_inc(&pcptr->rx_pkts);
+ u64_stats_add(&pcptr->rx_bytes, len);
if (mcast)
- pcptr->rx_mcast++;
+ u64_stats_inc(&pcptr->rx_mcast);
u64_stats_update_end(&pcptr->syncp);
} else {
this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index aa28a29e228c..49ba8a50dfb1 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -224,8 +224,8 @@ static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
u64_stats_update_begin(&pcptr->syncp);
- pcptr->tx_pkts++;
- pcptr->tx_bytes += skblen;
+ u64_stats_inc(&pcptr->tx_pkts);
+ u64_stats_add(&pcptr->tx_bytes, skblen);
u64_stats_update_end(&pcptr->syncp);
} else {
this_cpu_inc(ipvlan->pcpu_stats->tx_drps);
@@ -300,11 +300,11 @@ static void ipvlan_get_stats64(struct net_device *dev,
pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
do {
strt= u64_stats_fetch_begin_irq(&pcptr->syncp);
- rx_pkts = pcptr->rx_pkts;
- rx_bytes = pcptr->rx_bytes;
- rx_mcast = pcptr->rx_mcast;
- tx_pkts = pcptr->tx_pkts;
- tx_bytes = pcptr->tx_bytes;
+ rx_pkts = u64_stats_read(&pcptr->rx_pkts);
+ rx_bytes = u64_stats_read(&pcptr->rx_bytes);
+ rx_mcast = u64_stats_read(&pcptr->rx_mcast);
+ tx_pkts = u64_stats_read(&pcptr->tx_pkts);
+ tx_bytes = u64_stats_read(&pcptr->tx_bytes);
} while (u64_stats_fetch_retry_irq(&pcptr->syncp,
strt));
@@ -315,8 +315,8 @@ static void ipvlan_get_stats64(struct net_device *dev,
s->tx_bytes += tx_bytes;
/* u32 values are updated without syncp protection. */
- rx_errs += pcptr->rx_errs;
- tx_drps += pcptr->tx_drps;
+ rx_errs += READ_ONCE(pcptr->rx_errs);
+ tx_drps += READ_ONCE(pcptr->tx_drps);
}
s->rx_errors = rx_errs;
s->rx_dropped = rx_errs;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 817577e713d7..ee6087e7b2bf 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -162,6 +162,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
+static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
+{
+ struct macsec_rx_sa *sa = NULL;
+ int an;
+
+ for (an = 0; an < MACSEC_NUM_AN; an++) {
+ sa = macsec_rxsa_get(rx_sc->sa[an]);
+ if (sa)
+ break;
+ }
+ return sa;
+}
+
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@@ -243,6 +256,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
+#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
static bool send_sci(const struct macsec_secy *secy)
{
@@ -499,18 +513,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
skb->protocol = eth_hdr(skb)->h_proto;
}
+static unsigned int macsec_msdu_len(struct sk_buff *skb)
+{
+ struct macsec_dev *macsec = macsec_priv(skb->dev);
+ struct macsec_secy *secy = &macsec->secy;
+ bool sci_present = macsec_skb_cb(skb)->has_sci;
+
+ return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
+}
+
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
struct macsec_tx_sa *tx_sa)
{
+ unsigned int msdu_len = macsec_msdu_len(skb);
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
u64_stats_update_begin(&txsc_stats->syncp);
if (tx_sc->encrypt) {
- txsc_stats->stats.OutOctetsEncrypted += skb->len;
+ txsc_stats->stats.OutOctetsEncrypted += msdu_len;
txsc_stats->stats.OutPktsEncrypted++;
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
} else {
- txsc_stats->stats.OutOctetsProtected += skb->len;
+ txsc_stats->stats.OutOctetsProtected += msdu_len;
txsc_stats->stats.OutPktsProtected++;
this_cpu_inc(tx_sa->stats->OutPktsProtected);
}
@@ -523,8 +547,8 @@ static void count_tx(struct net_device *dev, int ret, int len)
struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += len;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, len);
u64_stats_update_end(&stats->syncp);
}
}
@@ -540,9 +564,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
aead_request_free(macsec_skb_cb(skb)->req);
rcu_read_lock_bh();
- macsec_encrypt_finish(skb, dev);
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
- len = skb->len;
+ /* packet is encrypted/protected so tx_bytes must be calculated */
+ len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
+ macsec_encrypt_finish(skb, dev);
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
rcu_read_unlock_bh();
@@ -701,6 +726,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
+ macsec_skb_cb(skb)->has_sci = sci_present;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
@@ -742,15 +768,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_dropped++;
return false;
}
if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
+ unsigned int msdu_len = macsec_msdu_len(skb);
u64_stats_update_begin(&rxsc_stats->syncp);
if (hdr->tci_an & MACSEC_TCI_E)
- rxsc_stats->stats.InOctetsDecrypted += skb->len;
+ rxsc_stats->stats.InOctetsDecrypted += msdu_len;
else
- rxsc_stats->stats.InOctetsValidated += skb->len;
+ rxsc_stats->stats.InOctetsValidated += msdu_len;
u64_stats_update_end(&rxsc_stats->syncp);
}
@@ -763,6 +791,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
+ this_cpu_inc(rx_sa->stats->InPktsNotValid);
+ secy->netdev->stats.rx_errors++;
return false;
}
@@ -825,8 +855,8 @@ static void count_rx(struct net_device *dev, int len)
struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, len);
u64_stats_update_end(&stats->syncp);
}
@@ -855,9 +885,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_finalize_skb(skb, macsec->secy.icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, macsec->secy.netdev);
- len = skb->len;
if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1048,6 +1078,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
continue;
}
@@ -1157,6 +1188,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
+ secy->netdev->stats.rx_errors++;
goto drop_nosa;
}
@@ -1167,11 +1199,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
+ struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_errors++;
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@@ -1181,6 +1217,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@@ -1201,6 +1239,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
goto drop;
}
}
@@ -1229,6 +1268,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
deliver:
macsec_finalize_skb(skb, secy->icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, secy->netdev);
if (rx_sa)
@@ -1236,7 +1276,6 @@ deliver:
macsec_rxsc_put(rx_sc);
skb_orphan(skb);
- len = skb->len;
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1278,6 +1317,7 @@ nosci:
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_errors++;
continue;
}
@@ -1697,7 +1737,7 @@ static bool validate_add_rxsa(struct nlattr **attrs)
return false;
if (attrs[MACSEC_SA_ATTR_PN] &&
- *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -1753,7 +1793,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
}
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
- if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ if (tb_sa[MACSEC_SA_ATTR_PN] &&
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
rtnl_unlock();
@@ -1769,7 +1810,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SA_ATTR_SALT);
+ MACSEC_SALT_LEN);
rtnl_unlock();
return -EINVAL;
}
@@ -1842,7 +1883,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
return 0;
cleanup:
- kfree(rx_sa);
+ macsec_rxsa_put(rx_sa);
rtnl_unlock();
return err;
}
@@ -1939,7 +1980,7 @@ static bool validate_add_txsa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -2011,7 +2052,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SA_ATTR_SALT);
+ MACSEC_SALT_LEN);
rtnl_unlock();
return -EINVAL;
}
@@ -2085,7 +2126,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
cleanup:
secy->operational = was_operational;
- kfree(tx_sa);
+ macsec_txsa_put(tx_sa);
rtnl_unlock();
return err;
}
@@ -2293,7 +2334,7 @@ static bool validate_upd_sa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -3402,6 +3443,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ len = skb->len;
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
@@ -3412,7 +3454,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
macsec_encrypt_finish(skb, dev);
- len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
return ret;
@@ -3462,7 +3503,7 @@ static int macsec_dev_init(struct net_device *dev)
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
/* Get macsec's reference to real_dev */
- dev_hold_track(real_dev, &macsec->dev_tracker, GFP_KERNEL);
+ netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL);
return 0;
}
@@ -3660,6 +3701,7 @@ static void macsec_get_stats64(struct net_device *dev,
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
+ s->rx_errors = dev->stats.rx_errors;
}
static int macsec_get_iflink(const struct net_device *dev)
@@ -3710,7 +3752,7 @@ static void macsec_free_netdev(struct net_device *dev)
free_percpu(macsec->secy.tx_sc.stats);
/* Get rid of the macsec's reference to real_dev */
- dev_put_track(macsec->real_dev, &macsec->dev_tracker);
+ netdev_put(macsec->real_dev, &macsec->dev_tracker);
}
static void macsec_setup(struct net_device *dev)
@@ -3745,9 +3787,6 @@ static int macsec_changelink_common(struct net_device *dev,
secy->operational = tx_sa && tx_sa->active;
}
- if (data[IFLA_MACSEC_WINDOW])
- secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
-
if (data[IFLA_MACSEC_ENCRYPT])
tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
@@ -3793,6 +3832,16 @@ static int macsec_changelink_common(struct net_device *dev,
}
}
+ if (data[IFLA_MACSEC_WINDOW]) {
+ secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
+
+ /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
+ * for XPN cipher suites */
+ if (secy->xpn &&
+ secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3822,7 +3871,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
ret = macsec_changelink_common(dev, data);
if (ret)
- return ret;
+ goto cleanup;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index eff75beb1395..1080d6ebff63 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -575,8 +575,8 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->tx_packets++;
- pcpu_stats->tx_bytes += len;
+ u64_stats_inc(&pcpu_stats->tx_packets);
+ u64_stats_add(&pcpu_stats->tx_bytes, len);
u64_stats_update_end(&pcpu_stats->syncp);
} else {
this_cpu_inc(vlan->pcpu_stats->tx_dropped);
@@ -915,7 +915,7 @@ static int macvlan_init(struct net_device *dev)
port->count += 1;
/* Get macvlan's reference to lowerdev */
- dev_hold_track(lowerdev, &vlan->dev_tracker, GFP_KERNEL);
+ netdev_hold(lowerdev, &vlan->dev_tracker, GFP_KERNEL);
return 0;
}
@@ -949,11 +949,11 @@ static void macvlan_dev_get_stats64(struct net_device *dev,
p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
- rx_packets = p->rx_packets;
- rx_bytes = p->rx_bytes;
- rx_multicast = p->rx_multicast;
- tx_packets = p->tx_packets;
- tx_bytes = p->tx_bytes;
+ rx_packets = u64_stats_read(&p->rx_packets);
+ rx_bytes = u64_stats_read(&p->rx_bytes);
+ rx_multicast = u64_stats_read(&p->rx_multicast);
+ tx_packets = u64_stats_read(&p->tx_packets);
+ tx_bytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
@@ -964,8 +964,8 @@ static void macvlan_dev_get_stats64(struct net_device *dev,
/* rx_errors & tx_dropped are u32, updated
* without syncp protection.
*/
- rx_errors += p->rx_errors;
- tx_dropped += p->tx_dropped;
+ rx_errors += READ_ONCE(p->rx_errors);
+ tx_dropped += READ_ONCE(p->tx_dropped);
}
stats->rx_errors = rx_errors;
stats->rx_dropped = rx_errors;
@@ -1185,7 +1185,7 @@ static void macvlan_dev_free(struct net_device *dev)
struct macvlan_dev *vlan = netdev_priv(dev);
/* Get rid of the macvlan's reference to lowerdev */
- dev_put_track(vlan->lowerdev, &vlan->dev_tracker);
+ netdev_put(vlan->lowerdev, &vlan->dev_tracker);
}
void macvlan_common_setup(struct net_device *dev)
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 1c1584fca632..3e79c2c51929 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -47,9 +47,7 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
* just fall back to poll mode
*/
if (rc == -EPROBE_DEFER)
- rc = driver_deferred_probe_check_state(&phy->mdio.dev);
- if (rc == -EPROBE_DEFER)
- return rc;
+ rc = -ENODEV;
if (rc > 0) {
phy->irq = rc;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ab8cd5551020..ddac61d79145 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -721,7 +721,7 @@ restart:
__netpoll_cleanup(&nt->np);
spin_lock_irqsave(&target_list_lock, flags);
- dev_put_track(nt->np.dev, &nt->np.dev_tracker);
+ netdev_put(nt->np.dev, &nt->np.dev_tracker);
nt->np.dev = NULL;
nt->enabled = false;
stopped = true;
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index a43820212932..50854265864d 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -351,10 +351,12 @@ nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
- nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].key)
return -ENOMEM;
- nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].value) {
kfree(nmap->entry[idx].key);
nmap->entry[idx].key = NULL;
@@ -496,7 +498,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
if (offmap->map.map_flags)
return -EINVAL;
- nmap = kzalloc(sizeof(*nmap), GFP_USER);
+ nmap = kzalloc(sizeof(*nmap), GFP_KERNEL_ACCOUNT);
if (!nmap)
return -ENOMEM;
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 25cb2e600d53..b5f4df1a07a3 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -72,16 +72,7 @@ new_port_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock))
- return -EBUSY;
-
- if (nsim_bus_dev->in_reload) {
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
- return -EBUSY;
- }
-
ret = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -102,16 +93,7 @@ del_port_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock))
- return -EBUSY;
-
- if (nsim_bus_dev->in_reload) {
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
- return -EBUSY;
- }
-
ret = nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -298,7 +280,6 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queu
nsim_bus_dev->num_queues = num_queues;
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS;
- mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 57a3ac893792..e88f783c297e 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -436,62 +436,62 @@ static int nsim_dev_resources_register(struct devlink *devlink)
int err;
/* Resources for IPv4 */
- err = devlink_resource_register(devlink, "IPv4", (u64)-1,
- NSIM_RESOURCE_IPV4,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
+ err = devl_resource_register(devlink, "IPv4", (u64)-1,
+ NSIM_RESOURCE_IPV4,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
if (err) {
pr_err("Failed to register IPv4 top resource\n");
goto out;
}
- err = devlink_resource_register(devlink, "fib", (u64)-1,
- NSIM_RESOURCE_IPV4_FIB,
- NSIM_RESOURCE_IPV4, &params);
+ err = devl_resource_register(devlink, "fib", (u64)-1,
+ NSIM_RESOURCE_IPV4_FIB,
+ NSIM_RESOURCE_IPV4, &params);
if (err) {
pr_err("Failed to register IPv4 FIB resource\n");
return err;
}
- err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV4, &params);
+ err = devl_resource_register(devlink, "fib-rules", (u64)-1,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV4, &params);
if (err) {
pr_err("Failed to register IPv4 FIB rules resource\n");
return err;
}
/* Resources for IPv6 */
- err = devlink_resource_register(devlink, "IPv6", (u64)-1,
- NSIM_RESOURCE_IPV6,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
+ err = devl_resource_register(devlink, "IPv6", (u64)-1,
+ NSIM_RESOURCE_IPV6,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
if (err) {
pr_err("Failed to register IPv6 top resource\n");
goto out;
}
- err = devlink_resource_register(devlink, "fib", (u64)-1,
- NSIM_RESOURCE_IPV6_FIB,
- NSIM_RESOURCE_IPV6, &params);
+ err = devl_resource_register(devlink, "fib", (u64)-1,
+ NSIM_RESOURCE_IPV6_FIB,
+ NSIM_RESOURCE_IPV6, &params);
if (err) {
pr_err("Failed to register IPv6 FIB resource\n");
return err;
}
- err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- NSIM_RESOURCE_IPV6, &params);
+ err = devl_resource_register(devlink, "fib-rules", (u64)-1,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_IPV6, &params);
if (err) {
pr_err("Failed to register IPv6 FIB rules resource\n");
return err;
}
/* Resources for nexthops */
- err = devlink_resource_register(devlink, "nexthops", (u64)-1,
- NSIM_RESOURCE_NEXTHOPS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
+ err = devl_resource_register(devlink, "nexthops", (u64)-1,
+ NSIM_RESOURCE_NEXTHOPS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
out:
return err;
@@ -557,15 +557,15 @@ static int nsim_dev_dummy_region_init(struct nsim_dev *nsim_dev,
struct devlink *devlink)
{
nsim_dev->dummy_region =
- devlink_region_create(devlink, &dummy_region_ops,
- NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
- NSIM_DEV_DUMMY_REGION_SIZE);
+ devl_region_create(devlink, &dummy_region_ops,
+ NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
+ NSIM_DEV_DUMMY_REGION_SIZE);
return PTR_ERR_OR_ZERO(nsim_dev->dummy_region);
}
static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
{
- devlink_region_destroy(nsim_dev->dummy_region);
+ devl_region_destroy(nsim_dev->dummy_region);
}
static int
@@ -832,7 +832,11 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
/* For each running port and enabled packet trap, generate a UDP
* packet with a random 5-tuple and report it.
*/
- devl_lock(priv_to_devlink(nsim_dev));
+ if (!devl_trylock(priv_to_devlink(nsim_dev))) {
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
+ return;
+ }
+
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
if (!netif_running(nsim_dev_port->ns->netdev))
continue;
@@ -880,18 +884,18 @@ static int nsim_dev_traps_init(struct devlink *devlink)
nsim_trap_data->nsim_dev = nsim_dev;
nsim_dev->trap_data = nsim_trap_data;
- err = devlink_trap_policers_register(devlink, nsim_trap_policers_arr,
- policers_count);
+ err = devl_trap_policers_register(devlink, nsim_trap_policers_arr,
+ policers_count);
if (err)
goto err_trap_policers_cnt_free;
- err = devlink_trap_groups_register(devlink, nsim_trap_groups_arr,
- ARRAY_SIZE(nsim_trap_groups_arr));
+ err = devl_trap_groups_register(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
if (err)
goto err_trap_policers_unregister;
- err = devlink_traps_register(devlink, nsim_traps_arr,
- ARRAY_SIZE(nsim_traps_arr), NULL);
+ err = devl_traps_register(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr), NULL);
if (err)
goto err_trap_groups_unregister;
@@ -903,11 +907,11 @@ static int nsim_dev_traps_init(struct devlink *devlink)
return 0;
err_trap_groups_unregister:
- devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
- ARRAY_SIZE(nsim_trap_groups_arr));
+ devl_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
err_trap_policers_unregister:
- devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
- ARRAY_SIZE(nsim_trap_policers_arr));
+ devl_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
err_trap_policers_cnt_free:
kfree(nsim_trap_data->trap_policers_cnt_arr);
err_trap_items_free:
@@ -923,12 +927,12 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
/* caution, trap work takes devlink lock */
cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
- devlink_traps_unregister(devlink, nsim_traps_arr,
- ARRAY_SIZE(nsim_traps_arr));
- devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
- ARRAY_SIZE(nsim_trap_groups_arr));
- devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
- ARRAY_SIZE(nsim_trap_policers_arr));
+ devl_traps_unregister(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr));
+ devl_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+ devl_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
kfree(nsim_dev->trap_data->trap_policers_cnt_arr);
kfree(nsim_dev->trap_data->trap_items_arr);
kfree(nsim_dev->trap_data);
@@ -943,24 +947,16 @@ static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- struct nsim_bus_dev *nsim_bus_dev;
-
- nsim_bus_dev = nsim_dev->nsim_bus_dev;
- if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock))
- return -EOPNOTSUPP;
if (nsim_dev->dont_allow_reload) {
/* For testing purposes, user set debugfs dont_allow_reload
* value to true. So forbid it.
*/
NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes");
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return -EOPNOTSUPP;
}
- nsim_bus_dev->in_reload = true;
nsim_dev_reload_destroy(nsim_dev);
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return 0;
}
@@ -969,25 +965,18 @@ static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_actio
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- struct nsim_bus_dev *nsim_bus_dev;
int ret;
- nsim_bus_dev = nsim_dev->nsim_bus_dev;
- mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
- nsim_bus_dev->in_reload = false;
-
if (nsim_dev->fail_reload) {
/* For testing purposes, user set debugfs fail_reload
* value to true. Fail right away.
*/
NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes");
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return -EINVAL;
}
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
ret = nsim_dev_reload_create(nsim_dev, extack);
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret;
}
@@ -1434,11 +1423,9 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
{
struct nsim_dev_port *nsim_dev_port, *tmp;
- devl_lock(priv_to_devlink(nsim_dev));
list_for_each_entry_safe(nsim_dev_port, tmp,
&nsim_dev->port_list, list)
__nsim_dev_port_del(nsim_dev_port);
- devl_unlock(priv_to_devlink(nsim_dev));
}
static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
@@ -1447,9 +1434,7 @@ static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
int i, err;
for (i = 0; i < port_count; i++) {
- devl_lock(priv_to_devlink(nsim_dev));
err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_PF, i);
- devl_unlock(priv_to_devlink(nsim_dev));
if (err)
goto err_port_del_all;
}
@@ -1537,6 +1522,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_bus_dev->initial_net, &nsim_bus_dev->dev);
if (!devlink)
return -ENOMEM;
+ devl_lock(devlink);
nsim_dev = devlink_priv(devlink);
nsim_dev->nsim_bus_dev = nsim_bus_dev;
nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
@@ -1555,7 +1541,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
GFP_KERNEL | __GFP_NOWARN);
if (!nsim_dev->vfconfigs) {
err = -ENOMEM;
- goto err_devlink_free;
+ goto err_devlink_unlock;
}
err = nsim_dev_resources_register(devlink);
@@ -1608,6 +1594,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -1631,10 +1618,11 @@ err_params_unregister:
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
err_vfc_free:
kfree(nsim_dev->vfconfigs);
-err_devlink_free:
+err_devlink_unlock:
+ devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
return err;
@@ -1648,13 +1636,11 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
return;
debugfs_remove(nsim_dev->take_snapshot);
- devl_lock(devlink);
if (nsim_dev_get_vfs(nsim_dev)) {
nsim_bus_dev_set_vfs(nsim_dev->nsim_bus_dev, 0);
if (nsim_esw_mode_is_switchdev(nsim_dev))
nsim_esw_legacy_enable(nsim_dev, NULL);
}
- devl_unlock(devlink);
nsim_dev_port_del_all(nsim_dev);
nsim_dev_hwstats_exit(nsim_dev);
@@ -1671,14 +1657,16 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
struct devlink *devlink = priv_to_devlink(nsim_dev);
devlink_unregister(devlink);
+ devl_lock(devlink);
nsim_dev_reload_destroy(nsim_dev);
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
kfree(nsim_dev->vfconfigs);
+ devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index c8f398f5bc5b..a1f91ff8ec56 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -54,6 +54,7 @@ struct nsim_fib_data {
struct rhashtable nexthop_ht;
struct devlink *devlink;
struct work_struct fib_event_work;
+ struct work_struct fib_flush_work;
struct list_head fib_event_queue;
spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
struct mutex nh_lock; /* Protects NH HT */
@@ -61,6 +62,7 @@ struct nsim_fib_data {
bool fail_route_offload;
bool fail_res_nexthop_group_replace;
bool fail_nexthop_bucket_replace;
+ bool fail_route_delete;
};
struct nsim_fib_rt_key {
@@ -914,6 +916,10 @@ static int nsim_fib4_prepare_event(struct fib_notifier_info *info,
}
break;
case FIB_EVENT_ENTRY_DEL:
+ if (data->fail_route_delete) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion");
+ return -EINVAL;
+ }
nsim_fib_account(&data->ipv4.fib, false);
break;
}
@@ -952,6 +958,11 @@ static int nsim_fib6_prepare_event(struct fib_notifier_info *info,
}
break;
case FIB_EVENT_ENTRY_DEL:
+ if (data->fail_route_delete) {
+ err = -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion");
+ goto err_fib6_event_fini;
+ }
nsim_fib_account(&data->ipv6.fib, false);
break;
}
@@ -978,7 +989,7 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data,
fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
if (!fib_event)
- return NOTIFY_BAD;
+ goto err_fib_event_alloc;
fib_event->data = data;
fib_event->event = event;
@@ -1006,6 +1017,9 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data,
err_fib_prepare_event:
kfree(fib_event);
+err_fib_event_alloc:
+ if (event == FIB_EVENT_ENTRY_DEL)
+ schedule_work(&data->fib_flush_work);
return NOTIFY_BAD;
}
@@ -1453,7 +1467,7 @@ static void nsim_fib_set_max_all(struct nsim_fib_data *data,
int err;
u64 val;
- err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ err = devl_resource_size_get(devlink, res_ids[i], &val);
if (err)
val = (u64) -1;
nsim_fib_set_max(data, res_ids[i], val);
@@ -1483,6 +1497,24 @@ static void nsim_fib_event_work(struct work_struct *work)
mutex_unlock(&data->fib_lock);
}
+static void nsim_fib_flush_work(struct work_struct *work)
+{
+ struct nsim_fib_data *data = container_of(work, struct nsim_fib_data,
+ fib_flush_work);
+ struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
+
+ /* Process pending work. */
+ flush_work(&data->fib_event_work);
+
+ mutex_lock(&data->fib_lock);
+ list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) {
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_rt_free(fib_rt, data);
+ }
+ mutex_unlock(&data->fib_lock);
+}
+
static int
nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev)
{
@@ -1504,6 +1536,10 @@ nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev)
debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir,
data, &nsim_nexthop_bucket_activity_fops);
+
+ data->fail_route_delete = false;
+ debugfs_create_bool("fail_route_delete", 0600, data->ddir,
+ &data->fail_route_delete);
return 0;
}
@@ -1541,6 +1577,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
goto err_rhashtable_nexthop_destroy;
INIT_WORK(&data->fib_event_work, nsim_fib_event_work);
+ INIT_WORK(&data->fib_flush_work, nsim_fib_flush_work);
INIT_LIST_HEAD(&data->fib_event_queue);
spin_lock_init(&data->fib_event_queue_lock);
@@ -1562,31 +1599,32 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
goto err_nexthop_nb_unregister;
}
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB,
- nsim_fib_ipv4_resource_occ_get,
- data);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- nsim_fib_ipv4_rules_res_occ_get,
- data);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB,
- nsim_fib_ipv6_resource_occ_get,
- data);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- nsim_fib_ipv6_rules_res_occ_get,
- data);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_NEXTHOPS,
- nsim_fib_nexthops_res_occ_get,
- data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_fib_ipv4_resource_occ_get,
+ data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_fib_ipv4_rules_res_occ_get,
+ data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_fib_ipv6_resource_occ_get,
+ data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_fib_ipv6_rules_res_occ_get,
+ data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_NEXTHOPS,
+ nsim_fib_nexthops_res_occ_get,
+ data);
return data;
err_nexthop_nb_unregister:
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
err_rhashtable_fib_destroy:
+ cancel_work_sync(&data->fib_flush_work);
flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
@@ -1604,18 +1642,19 @@ err_data_free:
void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
{
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_NEXTHOPS);
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_IPV6_FIB_RULES);
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_IPV6_FIB);
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_IPV4_FIB_RULES);
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_IPV4_FIB);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_NEXTHOPS);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
+ cancel_work_sync(&data->fib_flush_work);
flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 0b122872b2c9..7d8ed8d8df5c 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -376,9 +376,6 @@ struct nsim_bus_dev {
*/
unsigned int max_vfs;
unsigned int num_vfs;
- /* Lock for devlink->reload_enabled in netdevsim module */
- struct mutex nsim_bus_reload_lock;
- bool in_reload;
bool init;
};
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index 22ba7b0b476d..6289b7c765f1 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -6,8 +6,8 @@
menu "PCS device drivers"
config PCS_XPCS
- tristate "Synopsys DesignWare XPCS controller"
- depends on MDIO_DEVICE && MDIO_BUS
+ tristate
+ select PHYLINK
help
This module provides helper functions for Synopsys DesignWare XPCS
controllers.
@@ -18,4 +18,12 @@ config PCS_LYNX
This module provides helpers to phylink for managing the Lynx PCS
which is part of the Layerscape and QorIQ Ethernet SERDES.
+config PCS_RZN1_MIIC
+ tristate "Renesas RZ/N1 MII converter"
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ help
+ This module provides a driver for the MII converter that is available
+ on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
+ pass-through mode for MII.
+
endmenu
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
index 0603d469bd57..0ff5388fcdea 100644
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
@@ -5,3 +5,4 @@ pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o
obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
+obj-$(CONFIG_PCS_RZN1_MIIC) += pcs-rzn1-miic.o
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index fd3445374955..7d5fc7f54b2f 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -71,12 +71,10 @@ static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
struct phylink_link_state *state)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
int bmsr, lpa;
- bmsr = mdiobus_read(bus, addr, MII_BMSR);
- lpa = mdiobus_read(bus, addr, MII_LPA);
+ bmsr = mdiodev_read(pcs, MII_BMSR);
+ lpa = mdiodev_read(pcs, MII_LPA);
if (bmsr < 0 || lpa < 0) {
state->link = false;
return;
@@ -124,57 +122,39 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs,
state->link, state->an_enabled, state->an_complete);
}
-static int lynx_pcs_config_1000basex(struct mdio_device *pcs,
- unsigned int mode,
- const unsigned long *advertising)
+static int lynx_pcs_config_giga(struct mdio_device *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
u32 link_timer;
- int err;
-
- link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS);
- mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
- mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
-
- err = mdiobus_modify(bus, addr, IF_MODE,
- IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
- 0);
- if (err)
- return err;
-
- return phylink_mii_c22_pcs_config(pcs, mode,
- PHY_INTERFACE_MODE_1000BASEX,
- advertising);
-}
-
-static int lynx_pcs_config_sgmii(struct mdio_device *pcs, unsigned int mode,
- const unsigned long *advertising)
-{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
u16 if_mode;
int err;
- if_mode = IF_MODE_SGMII_EN;
- if (mode == MLO_AN_INBAND) {
- u32 link_timer;
-
- if_mode |= IF_MODE_USE_SGMII_AN;
-
- /* Adjust link timer for SGMII */
- link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
- mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
- mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
+ if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS);
+ mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
+
+ if_mode = 0;
+ } else {
+ if_mode = IF_MODE_SGMII_EN;
+ if (mode == MLO_AN_INBAND) {
+ if_mode |= IF_MODE_USE_SGMII_AN;
+
+ /* Adjust link timer for SGMII */
+ link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
+ mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
+ }
}
- err = mdiobus_modify(bus, addr, IF_MODE,
+
+ err = mdiodev_modify(pcs, IF_MODE,
IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
if_mode);
if (err)
return err;
- return phylink_mii_c22_pcs_config(pcs, mode, PHY_INTERFACE_MODE_SGMII,
- advertising);
+ return phylink_mii_c22_pcs_config(pcs, mode, interface, advertising);
}
static int lynx_pcs_config_usxgmii(struct mdio_device *pcs, unsigned int mode,
@@ -204,10 +184,10 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
switch (ifmode) {
case PHY_INTERFACE_MODE_1000BASEX:
- return lynx_pcs_config_1000basex(lynx->mdio, mode, advertising);
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- return lynx_pcs_config_sgmii(lynx->mdio, mode, advertising);
+ return lynx_pcs_config_giga(lynx->mdio, mode, ifmode,
+ advertising);
case PHY_INTERFACE_MODE_2500BASEX:
if (phylink_autoneg_inband(mode)) {
dev_err(&lynx->mdio->dev,
@@ -237,9 +217,7 @@ static void lynx_pcs_an_restart(struct phylink_pcs *pcs)
static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
int speed, int duplex)
{
- struct mii_bus *bus = pcs->bus;
u16 if_mode = 0, sgmii_speed;
- int addr = pcs->addr;
/* The PCS needs to be configured manually only
* when not operating on in-band mode
@@ -269,7 +247,7 @@ static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
}
if_mode |= IF_MODE_SPEED(sgmii_speed);
- mdiobus_modify(bus, addr, IF_MODE,
+ mdiodev_modify(pcs, IF_MODE,
IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
if_mode);
}
@@ -294,8 +272,6 @@ static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
unsigned int mode,
int speed, int duplex)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
u16 if_mode = 0;
if (mode == MLO_AN_INBAND) {
@@ -307,7 +283,7 @@ static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
if_mode |= IF_MODE_HALF_DUPLEX;
if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500);
- mdiobus_modify(bus, addr, IF_MODE,
+ mdiodev_modify(pcs, IF_MODE,
IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
if_mode);
}
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
new file mode 100644
index 000000000000..c1424119e821
--- /dev/null
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mdio.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <linux/phylink.h>
+#include <linux/pm_runtime.h>
+#include <dt-bindings/net/pcs-rzn1-miic.h>
+
+#define MIIC_PRCMD 0x0
+#define MIIC_ESID_CODE 0x4
+
+#define MIIC_MODCTRL 0x20
+#define MIIC_MODCTRL_SW_MODE GENMASK(4, 0)
+
+#define MIIC_CONVCTRL(port) (0x100 + (port) * 4)
+
+#define MIIC_CONVCTRL_CONV_SPEED GENMASK(1, 0)
+#define CONV_MODE_10MBPS 0
+#define CONV_MODE_100MBPS 1
+#define CONV_MODE_1000MBPS 2
+
+#define MIIC_CONVCTRL_CONV_MODE GENMASK(3, 2)
+#define CONV_MODE_MII 0
+#define CONV_MODE_RMII 1
+#define CONV_MODE_RGMII 2
+
+#define MIIC_CONVCTRL_FULLD BIT(8)
+#define MIIC_CONVCTRL_RGMII_LINK BIT(12)
+#define MIIC_CONVCTRL_RGMII_DUPLEX BIT(13)
+#define MIIC_CONVCTRL_RGMII_SPEED GENMASK(15, 14)
+
+#define MIIC_CONVRST 0x114
+#define MIIC_CONVRST_PHYIF_RST(port) BIT(port)
+#define MIIC_CONVRST_PHYIF_RST_MASK GENMASK(4, 0)
+
+#define MIIC_SWCTRL 0x304
+#define MIIC_SWDUPC 0x308
+
+#define MIIC_MAX_NR_PORTS 5
+
+#define MIIC_MODCTRL_CONF_CONV_NUM 6
+#define MIIC_MODCTRL_CONF_NONE -1
+
+/**
+ * struct modctrl_match - Matching table entry for convctrl configuration
+ * See section 8.2.1 of manual.
+ * @mode_cfg: Configuration value for convctrl
+ * @conv: Configuration of ethernet port muxes. First index is SWITCH_PORTIN,
+ * then index 1 - 5 are CONV1 - CONV5.
+ */
+struct modctrl_match {
+ u32 mode_cfg;
+ u8 conv[MIIC_MODCTRL_CONF_CONV_NUM];
+};
+
+static struct modctrl_match modctrl_match_table[] = {
+ {0x0, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
+ {0x1, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x2, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x3, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}},
+
+ {0x8, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
+ {0x9, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0xA, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0xB, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}},
+
+ {0x10, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
+ {0x11, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x12, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x13, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}}
+};
+
+static const char * const conf_to_string[] = {
+ [MIIC_GMAC1_PORT] = "GMAC1_PORT",
+ [MIIC_GMAC2_PORT] = "GMAC2_PORT",
+ [MIIC_RTOS_PORT] = "RTOS_PORT",
+ [MIIC_SERCOS_PORTA] = "SERCOS_PORTA",
+ [MIIC_SERCOS_PORTB] = "SERCOS_PORTB",
+ [MIIC_ETHERCAT_PORTA] = "ETHERCAT_PORTA",
+ [MIIC_ETHERCAT_PORTB] = "ETHERCAT_PORTB",
+ [MIIC_ETHERCAT_PORTC] = "ETHERCAT_PORTC",
+ [MIIC_SWITCH_PORTA] = "SWITCH_PORTA",
+ [MIIC_SWITCH_PORTB] = "SWITCH_PORTB",
+ [MIIC_SWITCH_PORTC] = "SWITCH_PORTC",
+ [MIIC_SWITCH_PORTD] = "SWITCH_PORTD",
+ [MIIC_HSR_PORTA] = "HSR_PORTA",
+ [MIIC_HSR_PORTB] = "HSR_PORTB",
+};
+
+static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
+ "SWITCH_PORTIN",
+ "CONV1",
+ "CONV2",
+ "CONV3",
+ "CONV4",
+ "CONV5",
+};
+
+/**
+ * struct miic - MII converter structure
+ * @base: base address of the MII converter
+ * @dev: Device associated to the MII converter
+ * @clks: Clocks used for this device
+ * @nclk: Number of clocks
+ * @lock: Lock used for read-modify-write access
+ */
+struct miic {
+ void __iomem *base;
+ struct device *dev;
+ struct clk_bulk_data *clks;
+ int nclk;
+ spinlock_t lock;
+};
+
+/**
+ * struct miic_port - Per port MII converter struct
+ * @miic: backiling to MII converter structure
+ * @pcs: PCS structure associated to the port
+ * @port: port number
+ * @interface: interface mode of the port
+ */
+struct miic_port {
+ struct miic *miic;
+ struct phylink_pcs pcs;
+ int port;
+ phy_interface_t interface;
+};
+
+static struct miic_port *phylink_pcs_to_miic_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct miic_port, pcs);
+}
+
+static void miic_reg_writel(struct miic *miic, int offset, u32 value)
+{
+ writel(value, miic->base + offset);
+}
+
+static u32 miic_reg_readl(struct miic *miic, int offset)
+{
+ return readl(miic->base + offset);
+}
+
+static void miic_reg_rmw(struct miic *miic, int offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ spin_lock(&miic->lock);
+
+ reg = miic_reg_readl(miic, offset);
+ reg &= ~mask;
+ reg |= val;
+ miic_reg_writel(miic, offset, reg);
+
+ spin_unlock(&miic->lock);
+}
+
+static void miic_converter_enable(struct miic *miic, int port, int enable)
+{
+ u32 val = 0;
+
+ if (enable)
+ val = MIIC_CONVRST_PHYIF_RST(port);
+
+ miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val);
+}
+
+static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising, bool permit)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+ struct miic *miic = miic_port->miic;
+ u32 speed, conv_mode, val, mask;
+ int port = miic_port->port;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ conv_mode = CONV_MODE_RMII;
+ speed = CONV_MODE_100MBPS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ conv_mode = CONV_MODE_RGMII;
+ speed = CONV_MODE_1000MBPS;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ conv_mode = CONV_MODE_MII;
+ /* When in MII mode, speed should be set to 0 (which is actually
+ * CONV_MODE_10MBPS)
+ */
+ speed = CONV_MODE_10MBPS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ val = FIELD_PREP(MIIC_CONVCTRL_CONV_MODE, conv_mode);
+ mask = MIIC_CONVCTRL_CONV_MODE;
+
+ /* Update speed only if we are going to change the interface because
+ * the link might already be up and it would break it if the speed is
+ * changed.
+ */
+ if (interface != miic_port->interface) {
+ val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, speed);
+ mask |= MIIC_CONVCTRL_CONV_SPEED;
+ miic_port->interface = interface;
+ }
+
+ miic_reg_rmw(miic, MIIC_CONVCTRL(port), mask, val);
+ miic_converter_enable(miic_port->miic, miic_port->port, 1);
+
+ return 0;
+}
+
+static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+ struct miic *miic = miic_port->miic;
+ u32 conv_speed = 0, val = 0;
+ int port = miic_port->port;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MIIC_CONVCTRL_FULLD;
+
+ /* No speed in MII through-mode */
+ if (interface != PHY_INTERFACE_MODE_MII) {
+ switch (speed) {
+ case SPEED_1000:
+ conv_speed = CONV_MODE_1000MBPS;
+ break;
+ case SPEED_100:
+ conv_speed = CONV_MODE_100MBPS;
+ break;
+ case SPEED_10:
+ conv_speed = CONV_MODE_10MBPS;
+ break;
+ default:
+ return;
+ }
+ }
+
+ val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, conv_speed);
+
+ miic_reg_rmw(miic, MIIC_CONVCTRL(port),
+ (MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val);
+}
+
+static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ if (phy_interface_mode_is_rgmii(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_RMII ||
+ state->interface == PHY_INTERFACE_MODE_MII)
+ return 1;
+
+ return -EINVAL;
+}
+
+static const struct phylink_pcs_ops miic_phylink_ops = {
+ .pcs_validate = miic_validate,
+ .pcs_config = miic_config,
+ .pcs_link_up = miic_link_up,
+};
+
+struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct miic_port *miic_port;
+ struct device_node *pcs_np;
+ struct miic *miic;
+ u32 port;
+
+ if (!of_device_is_available(np))
+ return ERR_PTR(-ENODEV);
+
+ if (of_property_read_u32(np, "reg", &port))
+ return ERR_PTR(-EINVAL);
+
+ if (port > MIIC_MAX_NR_PORTS || port < 1)
+ return ERR_PTR(-EINVAL);
+
+ /* The PCS pdev is attached to the parent node */
+ pcs_np = of_get_parent(np);
+ if (!pcs_np)
+ return ERR_PTR(-ENODEV);
+
+ if (!of_device_is_available(pcs_np)) {
+ of_node_put(pcs_np);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(pcs_np);
+ of_node_put(pcs_np);
+ if (!pdev || !platform_get_drvdata(pdev))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
+ if (!miic_port)
+ return ERR_PTR(-ENOMEM);
+
+ miic = platform_get_drvdata(pdev);
+ device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ miic_port->miic = miic;
+ miic_port->port = port - 1;
+ miic_port->pcs.ops = &miic_phylink_ops;
+
+ return &miic_port->pcs;
+}
+EXPORT_SYMBOL(miic_create);
+
+void miic_destroy(struct phylink_pcs *pcs)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+
+ miic_converter_enable(miic_port->miic, miic_port->port, 0);
+ kfree(miic_port);
+}
+EXPORT_SYMBOL(miic_destroy);
+
+static int miic_init_hw(struct miic *miic, u32 cfg_mode)
+{
+ int port;
+
+ /* Unlock write access to accessory registers (cf datasheet). If this
+ * is going to be used in conjunction with the Cortex-M3, this sequence
+ * will have to be moved in register write
+ */
+ miic_reg_writel(miic, MIIC_PRCMD, 0x00A5);
+ miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
+ miic_reg_writel(miic, MIIC_PRCMD, 0xFFFE);
+ miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
+
+ miic_reg_writel(miic, MIIC_MODCTRL,
+ FIELD_PREP(MIIC_MODCTRL_SW_MODE, cfg_mode));
+
+ for (port = 0; port < MIIC_MAX_NR_PORTS; port++) {
+ miic_converter_enable(miic, port, 0);
+ /* Disable speed/duplex control from these registers, datasheet
+ * says switch registers should be used to setup switch port
+ * speed and duplex.
+ */
+ miic_reg_writel(miic, MIIC_SWCTRL, 0x0);
+ miic_reg_writel(miic, MIIC_SWDUPC, 0x0);
+ }
+
+ return 0;
+}
+
+static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
+ s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM])
+{
+ int i;
+
+ for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ if (dt_val[i] == MIIC_MODCTRL_CONF_NONE)
+ continue;
+
+ if (dt_val[i] != table_val[i])
+ return false;
+ }
+
+ return true;
+}
+
+static void miic_dump_conf(struct device *dev,
+ s8 conf[MIIC_MODCTRL_CONF_CONV_NUM])
+{
+ const char *conf_name;
+ int i;
+
+ for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ if (conf[i] != MIIC_MODCTRL_CONF_NONE)
+ conf_name = conf_to_string[conf[i]];
+ else
+ conf_name = "NONE";
+
+ dev_err(dev, "%s: %s\n", index_to_string[i], conf_name);
+ }
+}
+
+static int miic_match_dt_conf(struct device *dev,
+ s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM],
+ u32 *mode_cfg)
+{
+ struct modctrl_match *table_entry;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(modctrl_match_table); i++) {
+ table_entry = &modctrl_match_table[i];
+
+ if (miic_modctrl_match(table_entry->conv, dt_val)) {
+ *mode_cfg = table_entry->mode_cfg;
+ return 0;
+ }
+ }
+
+ dev_err(dev, "Failed to apply requested configuration\n");
+ miic_dump_conf(dev, dt_val);
+
+ return -EINVAL;
+}
+
+static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
+{
+ s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM];
+ struct device_node *np = dev->of_node;
+ struct device_node *conv;
+ u32 conf;
+ int port;
+
+ memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(dt_val));
+
+ if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0)
+ dt_val[0] = conf;
+
+ for_each_child_of_node(np, conv) {
+ if (of_property_read_u32(conv, "reg", &port))
+ continue;
+
+ if (!of_device_is_available(conv))
+ continue;
+
+ if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
+ dt_val[port] = conf;
+ }
+
+ return miic_match_dt_conf(dev, dt_val, mode_cfg);
+}
+
+static int miic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct miic *miic;
+ u32 mode_cfg;
+ int ret;
+
+ ret = miic_parse_dt(dev, &mode_cfg);
+ if (ret < 0)
+ return ret;
+
+ miic = devm_kzalloc(dev, sizeof(*miic), GFP_KERNEL);
+ if (!miic)
+ return -ENOMEM;
+
+ spin_lock_init(&miic->lock);
+ miic->dev = dev;
+ miic->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(miic->base))
+ return PTR_ERR(miic->base);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = miic_init_hw(miic, mode_cfg);
+ if (ret)
+ goto disable_runtime_pm;
+
+ /* miic_create() relies on that fact that data are attached to the
+ * platform device to determine if the driver is ready so this needs to
+ * be the last thing to be done after everything is initialized
+ * properly.
+ */
+ platform_set_drvdata(pdev, miic);
+
+ return 0;
+
+disable_runtime_pm:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int miic_remove(struct platform_device *pdev)
+{
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id miic_of_mtable[] = {
+ { .compatible = "renesas,rzn1-miic" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, miic_of_mtable);
+
+static struct platform_driver miic_driver = {
+ .driver = {
+ .name = "rzn1_miic",
+ .suppress_bind_attrs = true,
+ .of_match_table = miic_of_mtable,
+ },
+ .probe = miic_probe,
+ .remove = miic_remove,
+};
+module_platform_driver(miic_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas MII converter PCS driver");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 4cfd05c15aee..70f88eae2a9e 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -77,6 +77,14 @@ static const int xpcs_sgmii_features[] = {
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const int xpcs_1000basex_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const int xpcs_2500basex_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
@@ -102,6 +110,10 @@ static const phy_interface_t xpcs_sgmii_interfaces[] = {
PHY_INTERFACE_MODE_SGMII,
};
+static const phy_interface_t xpcs_1000basex_interfaces[] = {
+ PHY_INTERFACE_MODE_1000BASEX,
+};
+
static const phy_interface_t xpcs_2500basex_interfaces[] = {
PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_MAX,
@@ -112,6 +124,7 @@ enum {
DW_XPCS_10GKR,
DW_XPCS_XLGMII,
DW_XPCS_SGMII,
+ DW_XPCS_1000BASEX,
DW_XPCS_2500BASEX,
DW_XPCS_INTERFACE_MAX,
};
@@ -189,6 +202,14 @@ int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val)
return mdiobus_c45_write(bus, addr, dev, reg, val);
}
+static int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg,
+ u16 mask, u16 set)
+{
+ u32 reg_addr = mdiobus_c45_addr(dev, reg);
+
+ return mdiodev_modify_changed(xpcs->mdiodev, reg_addr, mask, set);
+}
+
static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg)
{
return xpcs_read(xpcs, dev, DW_VENDOR | reg);
@@ -237,6 +258,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
break;
case DW_AN_C37_SGMII:
case DW_2500BASEX:
+ case DW_AN_C37_1000BASEX:
dev = MDIO_MMD_VEND2;
break;
default:
@@ -772,6 +794,68 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
return ret;
}
+static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, unsigned int mode,
+ const unsigned long *advertising)
+{
+ phy_interface_t interface = PHY_INTERFACE_MODE_1000BASEX;
+ int ret, mdio_ctrl, adv;
+ bool changed = 0;
+
+ /* According to Chap 7.12, to set 1000BASE-X C37 AN, AN must
+ * be disabled first:-
+ * 1) VR_MII_MMD_CTRL Bit(12)[AN_ENABLE] = 0b
+ * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 00b (1000BASE-X C37)
+ */
+ mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL);
+ if (mdio_ctrl < 0)
+ return mdio_ctrl;
+
+ if (mdio_ctrl & AN_CL37_EN) {
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+ mdio_ctrl & ~AN_CL37_EN);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~DW_VR_MII_PCS_MODE_MASK;
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Check for advertising changes and update the C45 MII ADV
+ * register accordingly.
+ */
+ adv = phylink_mii_c22_pcs_encode_advertisement(interface,
+ advertising);
+ if (adv >= 0) {
+ ret = xpcs_modify_changed(xpcs, MDIO_MMD_VEND2,
+ MII_ADVERTISE, 0xffff, adv);
+ if (ret < 0)
+ return ret;
+
+ changed = ret;
+ }
+
+ /* Clear CL37 AN complete status */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, 0);
+ if (ret < 0)
+ return ret;
+
+ if (phylink_autoneg_inband(mode) &&
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) {
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+ mdio_ctrl | AN_CL37_EN);
+ if (ret < 0)
+ return ret;
+ }
+
+ return changed;
+}
+
static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
{
int ret;
@@ -795,7 +879,7 @@ static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
}
int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
- unsigned int mode)
+ unsigned int mode, const unsigned long *advertising)
{
const struct xpcs_compat *compat;
int ret;
@@ -817,6 +901,12 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
if (ret)
return ret;
break;
+ case DW_AN_C37_1000BASEX:
+ ret = xpcs_config_aneg_c37_1000basex(xpcs, mode,
+ advertising);
+ if (ret)
+ return ret;
+ break;
case DW_2500BASEX:
ret = xpcs_config_2500basex(xpcs);
if (ret)
@@ -843,7 +933,7 @@ static int xpcs_config(struct phylink_pcs *pcs, unsigned int mode,
{
struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
- return xpcs_do_config(xpcs, interface, mode);
+ return xpcs_do_config(xpcs, interface, mode, advertising);
}
static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
@@ -864,7 +954,7 @@ static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
state->link = 0;
- return xpcs_do_config(xpcs, state->interface, MLO_AN_INBAND);
+ return xpcs_do_config(xpcs, state->interface, MLO_AN_INBAND, NULL);
}
if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state, compat)) {
@@ -896,7 +986,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
*/
ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);
if (ret < 0)
- return false;
+ return ret;
if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {
int speed_value;
@@ -921,6 +1011,29 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
return 0;
}
+static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state)
+{
+ int lpa, bmsr;
+
+ if (state->an_enabled) {
+ /* Reset link state */
+ state->link = false;
+
+ lpa = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_LPA);
+ if (lpa < 0 || lpa & LPA_RFAULT)
+ return lpa;
+
+ bmsr = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_BMSR);
+ if (bmsr < 0)
+ return bmsr;
+
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ }
+
+ return 0;
+}
+
static void xpcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
@@ -948,6 +1061,13 @@ static void xpcs_get_state(struct phylink_pcs *pcs,
ERR_PTR(ret));
}
break;
+ case DW_AN_C37_1000BASEX:
+ ret = xpcs_get_state_c37_1000basex(xpcs, state);
+ if (ret) {
+ pr_err("xpcs_get_state_c37_1000basex returned %pe\n",
+ ERR_PTR(ret));
+ }
+ break;
default:
return;
}
@@ -961,22 +1081,35 @@ static void xpcs_link_up_sgmii(struct dw_xpcs *xpcs, unsigned int mode,
if (phylink_autoneg_inband(mode))
return;
+ val = mii_bmcr_encode_fixed(speed, duplex);
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val);
+ if (ret)
+ pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret));
+}
+
+static void xpcs_link_up_1000basex(struct dw_xpcs *xpcs, unsigned int mode,
+ int speed, int duplex)
+{
+ int val, ret;
+
+ if (phylink_autoneg_inband(mode))
+ return;
+
switch (speed) {
case SPEED_1000:
val = BMCR_SPEED1000;
break;
case SPEED_100:
- val = BMCR_SPEED100;
- break;
case SPEED_10:
- val = BMCR_SPEED10;
- break;
default:
+ pr_err("%s: speed = %d\n", __func__, speed);
return;
}
if (duplex == DUPLEX_FULL)
val |= BMCR_FULLDPLX;
+ else
+ pr_err("%s: half duplex not supported\n", __func__);
ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val);
if (ret)
@@ -992,9 +1125,23 @@ void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
return xpcs_config_usxgmii(xpcs, speed);
if (interface == PHY_INTERFACE_MODE_SGMII)
return xpcs_link_up_sgmii(xpcs, mode, speed, duplex);
+ if (interface == PHY_INTERFACE_MODE_1000BASEX)
+ return xpcs_link_up_1000basex(xpcs, mode, speed, duplex);
}
EXPORT_SYMBOL_GPL(xpcs_link_up);
+static void xpcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1);
+ if (ret >= 0) {
+ ret |= BMCR_ANRESTART;
+ xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret);
+ }
+}
+
static u32 xpcs_get_id(struct dw_xpcs *xpcs)
{
int ret;
@@ -1060,6 +1207,12 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
.num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces),
.an_mode = DW_AN_C37_SGMII,
},
+ [DW_XPCS_1000BASEX] = {
+ .supported = xpcs_1000basex_features,
+ .interface = xpcs_1000basex_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_1000basex_interfaces),
+ .an_mode = DW_AN_C37_1000BASEX,
+ },
[DW_XPCS_2500BASEX] = {
.supported = xpcs_2500basex_features,
.interface = xpcs_2500basex_interfaces,
@@ -1115,6 +1268,7 @@ static const struct phylink_pcs_ops xpcs_phylink_ops = {
.pcs_validate = xpcs_validate,
.pcs_config = xpcs_config,
.pcs_get_state = xpcs_get_state,
+ .pcs_an_restart = xpcs_an_restart,
.pcs_link_up = xpcs_link_up,
};
diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h
index 35651d32a224..770df50323a0 100644
--- a/drivers/net/pcs/pcs-xpcs.h
+++ b/drivers/net/pcs/pcs-xpcs.h
@@ -109,7 +109,6 @@
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val);
-
int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs);
int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs);
int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9fee639ee5c8..c57a0262fb64 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -104,6 +104,8 @@ config AX88796B_PHY
config BROADCOM_PHY
tristate "Broadcom 54XX PHYs"
select BCM_NET_PHYLIB
+ select BCM_NET_PHYPTP if NETWORK_PHY_TIMESTAMPING
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
BCM5481, BCM54810 and BCM5482 PHYs.
@@ -160,6 +162,9 @@ config BCM_CYGNUS_PHY
config BCM_NET_PHYLIB
tristate
+config BCM_NET_PHYPTP
+ tristate
+
config CICADA_PHY
tristate "Cicada PHYs"
help
@@ -216,6 +221,8 @@ config MARVELL_88X2222_PHY
config MAXLINEAR_GPHY
tristate "Maxlinear Ethernet PHYs"
+ select POLYNOMIAL if HWMON
+ depends on HWMON || HWMON=n
help
Support for the Maxlinear GPY115, GPY211, GPY212, GPY215,
GPY241, GPY245 PHYs.
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b12b1d86fc99..f7138d3c896b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
+obj-$(CONFIG_BCM_NET_PHYPTP) += bcm-phy-ptp.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index c7047f5d7a9b..8b7a46db30e0 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -22,6 +22,7 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define PHY_ID_AQR113C 0x31c31c12
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
@@ -697,6 +698,24 @@ static struct phy_driver aqr_driver[] = {
.handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
+ .name = "Aquantia AQR113C",
+ .probe = aqr107_probe,
+ .config_init = aqr107_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
};
module_phy_driver(aqr_driver);
@@ -709,6 +728,7 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
{ }
};
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index c3842f87c33b..9902fb182099 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -87,4 +87,23 @@ int bcm_phy_cable_test_start_rdb(struct phy_device *phydev);
int bcm_phy_cable_test_start(struct phy_device *phydev);
int bcm_phy_cable_test_get_status(struct phy_device *phydev, bool *finished);
+#if IS_ENABLED(CONFIG_BCM_NET_PHYPTP)
+struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev);
+void bcm_ptp_config_init(struct phy_device *phydev);
+void bcm_ptp_stop(struct bcm_ptp_private *priv);
+#else
+static inline struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
+{
+ return NULL;
+}
+
+static inline void bcm_ptp_config_init(struct phy_device *phydev)
+{
+}
+
+static inline void bcm_ptp_stop(struct bcm_ptp_private *priv)
+{
+}
+#endif
+
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
new file mode 100644
index 000000000000..ef00d6163061
--- /dev/null
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Meta Platforms Inc.
+ * Copyright (C) 2022 Jonathan Lemon <jonathan.lemon@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+
+#include "bcm-phy-lib.h"
+
+/* IEEE 1588 Expansion registers */
+#define SLICE_CTRL 0x0810
+#define SLICE_TX_EN BIT(0)
+#define SLICE_RX_EN BIT(8)
+#define TX_EVENT_MODE 0x0811
+#define MODE_TX_UPDATE_CF BIT(0)
+#define MODE_TX_REPLACE_TS_CF BIT(1)
+#define MODE_TX_REPLACE_TS GENMASK(1, 0)
+#define RX_EVENT_MODE 0x0819
+#define MODE_RX_UPDATE_CF BIT(0)
+#define MODE_RX_INSERT_TS_48 BIT(1)
+#define MODE_RX_INSERT_TS_64 GENMASK(1, 0)
+
+#define MODE_EVT_SHIFT_SYNC 0
+#define MODE_EVT_SHIFT_DELAY_REQ 2
+#define MODE_EVT_SHIFT_PDELAY_REQ 4
+#define MODE_EVT_SHIFT_PDELAY_RESP 6
+
+#define MODE_SEL_SHIFT_PORT 0
+#define MODE_SEL_SHIFT_CPU 8
+
+#define RX_MODE_SEL(sel, evt, act) \
+ (((MODE_RX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel))
+
+#define TX_MODE_SEL(sel, evt, act) \
+ (((MODE_TX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel))
+
+/* needs global TS capture first */
+#define TX_TS_CAPTURE 0x0821
+#define TX_TS_CAP_EN BIT(0)
+#define RX_TS_CAPTURE 0x0822
+#define RX_TS_CAP_EN BIT(0)
+
+#define TIME_CODE_0 0x0854
+#define TIME_CODE_1 0x0855
+#define TIME_CODE_2 0x0856
+#define TIME_CODE_3 0x0857
+#define TIME_CODE_4 0x0858
+
+#define DPLL_SELECT 0x085b
+#define DPLL_HB_MODE2 BIT(6)
+
+#define SHADOW_CTRL 0x085c
+#define SHADOW_LOAD 0x085d
+#define TIME_CODE_LOAD BIT(10)
+#define SYNC_OUT_LOAD BIT(9)
+#define NCO_TIME_LOAD BIT(7)
+#define FREQ_LOAD BIT(6)
+#define INTR_MASK 0x085e
+#define INTR_STATUS 0x085f
+#define INTC_FSYNC BIT(0)
+#define INTC_SOP BIT(1)
+
+#define NCO_FREQ_LSB 0x0873
+#define NCO_FREQ_MSB 0x0874
+
+#define NCO_TIME_0 0x0875
+#define NCO_TIME_1 0x0876
+#define NCO_TIME_2_CTRL 0x0877
+#define FREQ_MDIO_SEL BIT(14)
+
+#define SYNC_OUT_0 0x0878
+#define SYNC_OUT_1 0x0879
+#define SYNC_OUT_2 0x087a
+
+#define SYNC_IN_DIVIDER 0x087b
+
+#define SYNOUT_TS_0 0x087c
+#define SYNOUT_TS_1 0x087d
+#define SYNOUT_TS_2 0x087e
+
+#define NSE_CTRL 0x087f
+#define NSE_GMODE_EN GENMASK(15, 14)
+#define NSE_CAPTURE_EN BIT(13)
+#define NSE_INIT BIT(12)
+#define NSE_CPU_FRAMESYNC BIT(5)
+#define NSE_SYNC1_FRAMESYNC BIT(3)
+#define NSE_FRAMESYNC_MASK GENMASK(5, 2)
+#define NSE_PEROUT_EN BIT(1)
+#define NSE_ONESHOT_EN BIT(0)
+#define NSE_SYNC_OUT_MASK GENMASK(1, 0)
+
+#define TS_READ_CTRL 0x0885
+#define TS_READ_START BIT(0)
+#define TS_READ_END BIT(1)
+
+#define HB_REG_0 0x0886
+#define HB_REG_1 0x0887
+#define HB_REG_2 0x0888
+#define HB_REG_3 0x08ec
+#define HB_REG_4 0x08ed
+#define HB_STAT_CTRL 0x088e
+#define HB_READ_START BIT(10)
+#define HB_READ_END BIT(11)
+#define HB_READ_MASK GENMASK(11, 10)
+
+#define TS_REG_0 0x0889
+#define TS_REG_1 0x088a
+#define TS_REG_2 0x088b
+#define TS_REG_3 0x08c4
+
+#define TS_INFO_0 0x088c
+#define TS_INFO_1 0x088d
+
+#define TIMECODE_CTRL 0x08c3
+#define TX_TIMECODE_SEL GENMASK(7, 0)
+#define RX_TIMECODE_SEL GENMASK(15, 8)
+
+#define TIME_SYNC 0x0ff5
+#define TIME_SYNC_EN BIT(0)
+
+struct bcm_ptp_private {
+ struct phy_device *phydev;
+ struct mii_timestamper mii_ts;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+ struct ptp_pin_desc pin;
+ struct mutex mutex;
+ struct sk_buff_head tx_queue;
+ int tx_type;
+ bool hwts_rx;
+ u16 nse_ctrl;
+ bool pin_active;
+ struct delayed_work pin_work;
+};
+
+struct bcm_ptp_skb_cb {
+ unsigned long timeout;
+ u16 seq_id;
+ u8 msgtype;
+ bool discard;
+};
+
+struct bcm_ptp_capture {
+ ktime_t hwtstamp;
+ u16 seq_id;
+ u8 msgtype;
+ bool tx_dir;
+};
+
+#define BCM_SKB_CB(skb) ((struct bcm_ptp_skb_cb *)(skb)->cb)
+#define SKB_TS_TIMEOUT 10 /* jiffies */
+
+#define BCM_MAX_PULSE_8NS ((1U << 9) - 1)
+#define BCM_MAX_PERIOD_8NS ((1U << 30) - 1)
+
+#define BRCM_PHY_MODEL(phydev) \
+ ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
+
+static struct bcm_ptp_private *mii2priv(struct mii_timestamper *mii_ts)
+{
+ return container_of(mii_ts, struct bcm_ptp_private, mii_ts);
+}
+
+static struct bcm_ptp_private *ptp2priv(struct ptp_clock_info *info)
+{
+ return container_of(info, struct bcm_ptp_private, ptp_info);
+}
+
+static void bcm_ptp_get_framesync_ts(struct phy_device *phydev,
+ struct timespec64 *ts)
+{
+ u16 hb[4];
+
+ bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_START);
+
+ hb[0] = bcm_phy_read_exp(phydev, HB_REG_0);
+ hb[1] = bcm_phy_read_exp(phydev, HB_REG_1);
+ hb[2] = bcm_phy_read_exp(phydev, HB_REG_2);
+ hb[3] = bcm_phy_read_exp(phydev, HB_REG_3);
+
+ bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_END);
+ bcm_phy_write_exp(phydev, HB_STAT_CTRL, 0);
+
+ ts->tv_sec = (hb[3] << 16) | hb[2];
+ ts->tv_nsec = (hb[1] << 16) | hb[0];
+}
+
+static u16 bcm_ptp_framesync_disable(struct phy_device *phydev, u16 orig_ctrl)
+{
+ u16 ctrl = orig_ctrl & ~(NSE_FRAMESYNC_MASK | NSE_CAPTURE_EN);
+
+ bcm_phy_write_exp(phydev, NSE_CTRL, ctrl);
+
+ return ctrl;
+}
+
+static void bcm_ptp_framesync_restore(struct phy_device *phydev, u16 orig_ctrl)
+{
+ if (orig_ctrl & NSE_FRAMESYNC_MASK)
+ bcm_phy_write_exp(phydev, NSE_CTRL, orig_ctrl);
+}
+
+static void bcm_ptp_framesync(struct phy_device *phydev, u16 ctrl)
+{
+ /* trigger framesync - must have 0->1 transition. */
+ bcm_phy_write_exp(phydev, NSE_CTRL, ctrl | NSE_CPU_FRAMESYNC);
+}
+
+static int bcm_ptp_framesync_ts(struct phy_device *phydev,
+ struct ptp_system_timestamp *sts,
+ struct timespec64 *ts,
+ u16 orig_ctrl)
+{
+ u16 ctrl, reg;
+ int i;
+
+ ctrl = bcm_ptp_framesync_disable(phydev, orig_ctrl);
+
+ ptp_read_system_prets(sts);
+
+ /* trigger framesync + capture */
+ bcm_ptp_framesync(phydev, ctrl | NSE_CAPTURE_EN);
+
+ ptp_read_system_postts(sts);
+
+ /* poll for FSYNC interrupt from TS capture */
+ for (i = 0; i < 10; i++) {
+ reg = bcm_phy_read_exp(phydev, INTR_STATUS);
+ if (reg & INTC_FSYNC) {
+ bcm_ptp_get_framesync_ts(phydev, ts);
+ break;
+ }
+ }
+
+ bcm_ptp_framesync_restore(phydev, orig_ctrl);
+
+ return reg & INTC_FSYNC ? 0 : -ETIMEDOUT;
+}
+
+static int bcm_ptp_gettimex(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err;
+
+ mutex_lock(&priv->mutex);
+ err = bcm_ptp_framesync_ts(priv->phydev, sts, ts, priv->nse_ctrl);
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int bcm_ptp_settime_locked(struct bcm_ptp_private *priv,
+ const struct timespec64 *ts)
+{
+ struct phy_device *phydev = priv->phydev;
+ u16 ctrl;
+ u64 ns;
+
+ ctrl = bcm_ptp_framesync_disable(phydev, priv->nse_ctrl);
+
+ /* set up time code */
+ bcm_phy_write_exp(phydev, TIME_CODE_0, ts->tv_nsec);
+ bcm_phy_write_exp(phydev, TIME_CODE_1, ts->tv_nsec >> 16);
+ bcm_phy_write_exp(phydev, TIME_CODE_2, ts->tv_sec);
+ bcm_phy_write_exp(phydev, TIME_CODE_3, ts->tv_sec >> 16);
+ bcm_phy_write_exp(phydev, TIME_CODE_4, ts->tv_sec >> 32);
+
+ /* set NCO counter to match */
+ ns = timespec64_to_ns(ts);
+ bcm_phy_write_exp(phydev, NCO_TIME_0, ns >> 4);
+ bcm_phy_write_exp(phydev, NCO_TIME_1, ns >> 20);
+ bcm_phy_write_exp(phydev, NCO_TIME_2_CTRL, (ns >> 36) & 0xfff);
+
+ /* set up load on next frame sync (auto-clears due to NSE_INIT) */
+ bcm_phy_write_exp(phydev, SHADOW_LOAD, TIME_CODE_LOAD | NCO_TIME_LOAD);
+
+ /* must have NSE_INIT in order to write time code */
+ bcm_ptp_framesync(phydev, ctrl | NSE_INIT);
+
+ bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
+
+ return 0;
+}
+
+static int bcm_ptp_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err;
+
+ mutex_lock(&priv->mutex);
+ err = bcm_ptp_settime_locked(priv, ts);
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int bcm_ptp_adjtime_locked(struct bcm_ptp_private *priv,
+ s64 delta_ns)
+{
+ struct timespec64 ts;
+ int err;
+ s64 ns;
+
+ err = bcm_ptp_framesync_ts(priv->phydev, NULL, &ts, priv->nse_ctrl);
+ if (!err) {
+ ns = timespec64_to_ns(&ts) + delta_ns;
+ ts = ns_to_timespec64(ns);
+ err = bcm_ptp_settime_locked(priv, &ts);
+ }
+ return err;
+}
+
+static int bcm_ptp_adjtime(struct ptp_clock_info *info, s64 delta_ns)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err;
+
+ mutex_lock(&priv->mutex);
+ err = bcm_ptp_adjtime_locked(priv, delta_ns);
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+/* A 125Mhz clock should adjust 8ns per pulse.
+ * The frequency adjustment base is 0x8000 0000, or 8*2^28.
+ *
+ * Frequency adjustment is
+ * adj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
+ * which simplifies to:
+ * adj = scaled_ppm * 2^9 / 5^6
+ */
+static int bcm_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int neg_adj = 0;
+ u32 diff, freq;
+ u16 ctrl;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = scaled_ppm << 9;
+ diff = div_u64(adj, 15625);
+ freq = (8 << 28) + (neg_adj ? -diff : diff);
+
+ mutex_lock(&priv->mutex);
+
+ ctrl = bcm_ptp_framesync_disable(priv->phydev, priv->nse_ctrl);
+
+ bcm_phy_write_exp(priv->phydev, NCO_FREQ_LSB, freq);
+ bcm_phy_write_exp(priv->phydev, NCO_FREQ_MSB, freq >> 16);
+
+ bcm_phy_write_exp(priv->phydev, NCO_TIME_2_CTRL, FREQ_MDIO_SEL);
+
+ /* load on next framesync */
+ bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, FREQ_LOAD);
+
+ bcm_ptp_framesync(priv->phydev, ctrl);
+
+ /* clear load */
+ bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, 0);
+
+ bcm_ptp_framesync_restore(priv->phydev, priv->nse_ctrl);
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
+static bool bcm_ptp_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+ struct skb_shared_hwtstamps *hwts;
+ struct ptp_header *header;
+ u32 sec, nsec;
+ u8 *data;
+ int off;
+
+ if (!priv->hwts_rx)
+ return false;
+
+ header = ptp_parse_header(skb, type);
+ if (!header)
+ return false;
+
+ data = (u8 *)(header + 1);
+ sec = get_unaligned_be32(data);
+ nsec = get_unaligned_be32(data + 4);
+
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = ktime_set(sec, nsec);
+
+ off = data - skb->data + 8;
+ if (off < skb->len) {
+ memmove(data, data + 8, skb->len - off);
+ __pskb_trim(skb, skb->len - 8);
+ }
+
+ return false;
+}
+
+static bool bcm_ptp_get_tstamp(struct bcm_ptp_private *priv,
+ struct bcm_ptp_capture *capts)
+{
+ struct phy_device *phydev = priv->phydev;
+ u16 ts[4], reg;
+ u32 sec, nsec;
+
+ mutex_lock(&priv->mutex);
+
+ reg = bcm_phy_read_exp(phydev, INTR_STATUS);
+ if ((reg & INTC_SOP) == 0) {
+ mutex_unlock(&priv->mutex);
+ return false;
+ }
+
+ bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_START);
+
+ ts[0] = bcm_phy_read_exp(phydev, TS_REG_0);
+ ts[1] = bcm_phy_read_exp(phydev, TS_REG_1);
+ ts[2] = bcm_phy_read_exp(phydev, TS_REG_2);
+ ts[3] = bcm_phy_read_exp(phydev, TS_REG_3);
+
+ /* not in be32 format for some reason */
+ capts->seq_id = bcm_phy_read_exp(priv->phydev, TS_INFO_0);
+
+ reg = bcm_phy_read_exp(phydev, TS_INFO_1);
+ capts->msgtype = reg >> 12;
+ capts->tx_dir = !!(reg & BIT(11));
+
+ bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_END);
+ bcm_phy_write_exp(phydev, TS_READ_CTRL, 0);
+
+ mutex_unlock(&priv->mutex);
+
+ sec = (ts[3] << 16) | ts[2];
+ nsec = (ts[1] << 16) | ts[0];
+ capts->hwtstamp = ktime_set(sec, nsec);
+
+ return true;
+}
+
+static void bcm_ptp_match_tstamp(struct bcm_ptp_private *priv,
+ struct bcm_ptp_capture *capts)
+{
+ struct skb_shared_hwtstamps hwts;
+ struct sk_buff *skb, *ts_skb;
+ unsigned long flags;
+ bool first = false;
+
+ ts_skb = NULL;
+ spin_lock_irqsave(&priv->tx_queue.lock, flags);
+ skb_queue_walk(&priv->tx_queue, skb) {
+ if (BCM_SKB_CB(skb)->seq_id == capts->seq_id &&
+ BCM_SKB_CB(skb)->msgtype == capts->msgtype) {
+ first = skb_queue_is_first(&priv->tx_queue, skb);
+ __skb_unlink(skb, &priv->tx_queue);
+ ts_skb = skb;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
+
+ /* TX captures one-step packets, discard them if needed. */
+ if (ts_skb) {
+ if (BCM_SKB_CB(ts_skb)->discard) {
+ kfree_skb(ts_skb);
+ } else {
+ memset(&hwts, 0, sizeof(hwts));
+ hwts.hwtstamp = capts->hwtstamp;
+ skb_complete_tx_timestamp(ts_skb, &hwts);
+ }
+ }
+
+ /* not first match, try and expire entries */
+ if (!first) {
+ while ((skb = skb_dequeue(&priv->tx_queue))) {
+ if (!time_after(jiffies, BCM_SKB_CB(skb)->timeout)) {
+ skb_queue_head(&priv->tx_queue, skb);
+ break;
+ }
+ kfree_skb(skb);
+ }
+ }
+}
+
+static long bcm_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ struct bcm_ptp_capture capts;
+ bool reschedule = false;
+
+ while (!skb_queue_empty_lockless(&priv->tx_queue)) {
+ if (!bcm_ptp_get_tstamp(priv, &capts)) {
+ reschedule = true;
+ break;
+ }
+ bcm_ptp_match_tstamp(priv, &capts);
+ }
+
+ return reschedule ? 1 : -1;
+}
+
+static int bcm_ptp_cancel_func(struct bcm_ptp_private *priv)
+{
+ if (!priv->pin_active)
+ return 0;
+
+ priv->pin_active = false;
+
+ priv->nse_ctrl &= ~(NSE_SYNC_OUT_MASK | NSE_SYNC1_FRAMESYNC |
+ NSE_CAPTURE_EN);
+ bcm_phy_write_exp(priv->phydev, NSE_CTRL, priv->nse_ctrl);
+
+ cancel_delayed_work_sync(&priv->pin_work);
+
+ return 0;
+}
+
+static void bcm_ptp_perout_work(struct work_struct *pin_work)
+{
+ struct bcm_ptp_private *priv =
+ container_of(pin_work, struct bcm_ptp_private, pin_work.work);
+ struct phy_device *phydev = priv->phydev;
+ struct timespec64 ts;
+ u64 ns, next;
+ u16 ctrl;
+
+ mutex_lock(&priv->mutex);
+
+ /* no longer running */
+ if (!priv->pin_active) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ bcm_ptp_framesync_ts(phydev, NULL, &ts, priv->nse_ctrl);
+
+ /* this is 1PPS only */
+ next = NSEC_PER_SEC - ts.tv_nsec;
+ ts.tv_sec += next < NSEC_PER_MSEC ? 2 : 1;
+ ts.tv_nsec = 0;
+
+ ns = timespec64_to_ns(&ts);
+
+ /* force 0->1 transition for ONESHOT */
+ ctrl = bcm_ptp_framesync_disable(phydev,
+ priv->nse_ctrl & ~NSE_ONESHOT_EN);
+
+ bcm_phy_write_exp(phydev, SYNOUT_TS_0, ns & 0xfff0);
+ bcm_phy_write_exp(phydev, SYNOUT_TS_1, ns >> 16);
+ bcm_phy_write_exp(phydev, SYNOUT_TS_2, ns >> 32);
+
+ /* load values on next framesync */
+ bcm_phy_write_exp(phydev, SHADOW_LOAD, SYNC_OUT_LOAD);
+
+ bcm_ptp_framesync(phydev, ctrl | NSE_ONESHOT_EN | NSE_INIT);
+
+ priv->nse_ctrl |= NSE_ONESHOT_EN;
+ bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
+
+ mutex_unlock(&priv->mutex);
+
+ next = next + NSEC_PER_MSEC;
+ schedule_delayed_work(&priv->pin_work, nsecs_to_jiffies(next));
+}
+
+static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
+ struct ptp_perout_request *req, int on)
+{
+ struct phy_device *phydev = priv->phydev;
+ u64 period, pulse;
+ u16 val;
+
+ if (!on)
+ return bcm_ptp_cancel_func(priv);
+
+ /* 1PPS */
+ if (req->period.sec != 1 || req->period.nsec != 0)
+ return -EINVAL;
+
+ period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
+
+ if (req->flags & PTP_PEROUT_PHASE)
+ return -EOPNOTSUPP;
+
+ if (req->flags & PTP_PEROUT_DUTY_CYCLE)
+ pulse = ktime_to_ns(ktime_set(req->on.sec, req->on.nsec));
+ else
+ pulse = (u64)BCM_MAX_PULSE_8NS << 3;
+
+ /* convert to 8ns units */
+ pulse >>= 3;
+
+ if (!pulse || pulse > period || pulse > BCM_MAX_PULSE_8NS)
+ return -EINVAL;
+
+ bcm_phy_write_exp(phydev, SYNC_OUT_0, period);
+
+ val = ((pulse & 0x3) << 14) | ((period >> 16) & 0x3fff);
+ bcm_phy_write_exp(phydev, SYNC_OUT_1, val);
+
+ val = ((pulse >> 2) & 0x7f) | (pulse << 7);
+ bcm_phy_write_exp(phydev, SYNC_OUT_2, val);
+
+ if (priv->pin_active)
+ cancel_delayed_work_sync(&priv->pin_work);
+
+ priv->pin_active = true;
+ INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_perout_work);
+ schedule_delayed_work(&priv->pin_work, 0);
+
+ return 0;
+}
+
+static void bcm_ptp_extts_work(struct work_struct *pin_work)
+{
+ struct bcm_ptp_private *priv =
+ container_of(pin_work, struct bcm_ptp_private, pin_work.work);
+ struct phy_device *phydev = priv->phydev;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+ u16 reg;
+
+ mutex_lock(&priv->mutex);
+
+ /* no longer running */
+ if (!priv->pin_active) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ reg = bcm_phy_read_exp(phydev, INTR_STATUS);
+ if ((reg & INTC_FSYNC) == 0)
+ goto out;
+
+ bcm_ptp_get_framesync_ts(phydev, &ts);
+
+ event.index = 0;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = timespec64_to_ns(&ts);
+ ptp_clock_event(priv->ptp_clock, &event);
+
+out:
+ mutex_unlock(&priv->mutex);
+ schedule_delayed_work(&priv->pin_work, HZ / 4);
+}
+
+static int bcm_ptp_extts_locked(struct bcm_ptp_private *priv, int on)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (!on)
+ return bcm_ptp_cancel_func(priv);
+
+ if (priv->pin_active)
+ cancel_delayed_work_sync(&priv->pin_work);
+
+ bcm_ptp_framesync_disable(phydev, priv->nse_ctrl);
+
+ priv->nse_ctrl |= NSE_SYNC1_FRAMESYNC | NSE_CAPTURE_EN;
+
+ bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
+
+ priv->pin_active = true;
+ INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_extts_work);
+ schedule_delayed_work(&priv->pin_work, 0);
+
+ return 0;
+}
+
+static int bcm_ptp_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err = -EBUSY;
+
+ mutex_lock(&priv->mutex);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (priv->pin.func == PTP_PF_PEROUT)
+ err = bcm_ptp_perout_locked(priv, &rq->perout, on);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ if (priv->pin.func == PTP_PF_EXTTS)
+ err = bcm_ptp_extts_locked(priv, on);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int bcm_ptp_verify(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static const struct ptp_clock_info bcm_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .max_adj = 100000000,
+ .gettimex64 = bcm_ptp_gettimex,
+ .settime64 = bcm_ptp_settime,
+ .adjtime = bcm_ptp_adjtime,
+ .adjfine = bcm_ptp_adjfine,
+ .enable = bcm_ptp_enable,
+ .verify = bcm_ptp_verify,
+ .do_aux_work = bcm_ptp_do_aux_work,
+ .n_pins = 1,
+ .n_per_out = 1,
+ .n_ext_ts = 1,
+};
+
+static void bcm_ptp_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+ struct ptp_header *hdr;
+ bool discard = false;
+ int msgtype;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ goto out;
+ msgtype = ptp_get_msgtype(hdr, type);
+
+ switch (priv->tx_type) {
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ if (msgtype == PTP_MSGTYPE_PDELAY_RESP)
+ discard = true;
+ fallthrough;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (msgtype == PTP_MSGTYPE_SYNC)
+ discard = true;
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ BCM_SKB_CB(skb)->timeout = jiffies + SKB_TS_TIMEOUT;
+ BCM_SKB_CB(skb)->seq_id = be16_to_cpu(hdr->sequence_id);
+ BCM_SKB_CB(skb)->msgtype = msgtype;
+ BCM_SKB_CB(skb)->discard = discard;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&priv->tx_queue, skb);
+ ptp_schedule_worker(priv->ptp_clock, 0);
+ return;
+ default:
+ break;
+ }
+
+out:
+ kfree_skb(skb);
+}
+
+static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
+ struct ifreq *ifr)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+ struct hwtstamp_config cfg;
+ u16 mode, ctrl;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ priv->hwts_rx = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ priv->tx_type = cfg.tx_type;
+
+ ctrl = priv->hwts_rx ? SLICE_RX_EN : 0;
+ ctrl |= priv->tx_type != HWTSTAMP_TX_OFF ? SLICE_TX_EN : 0;
+
+ mode = TX_MODE_SEL(PORT, SYNC, REPLACE_TS) |
+ TX_MODE_SEL(PORT, DELAY_REQ, REPLACE_TS) |
+ TX_MODE_SEL(PORT, PDELAY_REQ, REPLACE_TS) |
+ TX_MODE_SEL(PORT, PDELAY_RESP, REPLACE_TS);
+
+ bcm_phy_write_exp(priv->phydev, TX_EVENT_MODE, mode);
+
+ mode = RX_MODE_SEL(PORT, SYNC, INSERT_TS_64) |
+ RX_MODE_SEL(PORT, DELAY_REQ, INSERT_TS_64) |
+ RX_MODE_SEL(PORT, PDELAY_REQ, INSERT_TS_64) |
+ RX_MODE_SEL(PORT, PDELAY_RESP, INSERT_TS_64);
+
+ bcm_phy_write_exp(priv->phydev, RX_EVENT_MODE, mode);
+
+ bcm_phy_write_exp(priv->phydev, SLICE_CTRL, ctrl);
+
+ if (ctrl & SLICE_TX_EN)
+ bcm_phy_write_exp(priv->phydev, TX_TS_CAPTURE, TX_TS_CAP_EN);
+ else
+ ptp_cancel_worker_sync(priv->ptp_clock);
+
+ /* purge existing data */
+ skb_queue_purge(&priv->tx_queue);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int bcm_ptp_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *ts_info)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+
+ ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
+ ts_info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ ts_info->tx_types =
+ BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC) |
+ BIT(HWTSTAMP_TX_ONESTEP_P2P);
+ ts_info->rx_filters =
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+void bcm_ptp_stop(struct bcm_ptp_private *priv)
+{
+ ptp_cancel_worker_sync(priv->ptp_clock);
+ bcm_ptp_cancel_func(priv);
+}
+EXPORT_SYMBOL_GPL(bcm_ptp_stop);
+
+void bcm_ptp_config_init(struct phy_device *phydev)
+{
+ /* init network sync engine */
+ bcm_phy_write_exp(phydev, NSE_CTRL, NSE_GMODE_EN | NSE_INIT);
+
+ /* enable time sync (TX/RX SOP capture) */
+ bcm_phy_write_exp(phydev, TIME_SYNC, TIME_SYNC_EN);
+
+ /* use sec.nsec heartbeat capture */
+ bcm_phy_write_exp(phydev, DPLL_SELECT, DPLL_HB_MODE2);
+
+ /* use 64 bit timecode for TX */
+ bcm_phy_write_exp(phydev, TIMECODE_CTRL, TX_TIMECODE_SEL);
+
+ /* always allow FREQ_LOAD on framesync */
+ bcm_phy_write_exp(phydev, SHADOW_CTRL, FREQ_LOAD);
+
+ bcm_phy_write_exp(phydev, SYNC_IN_DIVIDER, 1);
+}
+EXPORT_SYMBOL_GPL(bcm_ptp_config_init);
+
+static void bcm_ptp_init(struct bcm_ptp_private *priv)
+{
+ priv->nse_ctrl = NSE_GMODE_EN;
+
+ mutex_init(&priv->mutex);
+ skb_queue_head_init(&priv->tx_queue);
+
+ priv->mii_ts.rxtstamp = bcm_ptp_rxtstamp;
+ priv->mii_ts.txtstamp = bcm_ptp_txtstamp;
+ priv->mii_ts.hwtstamp = bcm_ptp_hwtstamp;
+ priv->mii_ts.ts_info = bcm_ptp_ts_info;
+
+ priv->phydev->mii_ts = &priv->mii_ts;
+}
+
+struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
+{
+ struct bcm_ptp_private *priv;
+ struct ptp_clock *clock;
+
+ switch (BRCM_PHY_MODEL(phydev)) {
+ case PHY_ID_BCM54210E:
+ break;
+ default:
+ return NULL;
+ }
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->ptp_info = bcm_ptp_clock_info;
+
+ snprintf(priv->pin.name, sizeof(priv->pin.name), "SYNC_OUT");
+ priv->ptp_info.pin_config = &priv->pin;
+
+ clock = ptp_clock_register(&priv->ptp_info, &phydev->mdio.dev);
+ if (IS_ERR(clock))
+ return ERR_CAST(clock);
+ priv->ptp_clock = clock;
+
+ priv->phydev = phydev;
+ bcm_ptp_init(priv);
+
+ return priv;
+}
+EXPORT_SYMBOL_GPL(bcm_ptp_probe);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index e36809aa6d30..31fbcdddc9ad 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -27,6 +27,11 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
+struct bcm54xx_phy_priv {
+ u64 *stats;
+ struct bcm_ptp_private *ptp;
+};
+
static int bcm54xx_config_clock_delay(struct phy_device *phydev)
{
int rc, val;
@@ -313,6 +318,22 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
bcm_phy_write_shadow(phydev, BCM54XX_SHD_APD, val);
}
+static void bcm54xx_ptp_stop(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+
+ if (priv->ptp)
+ bcm_ptp_stop(priv->ptp);
+}
+
+static void bcm54xx_ptp_config_init(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+
+ if (priv->ptp)
+ bcm_ptp_config_init(phydev);
+}
+
static int bcm54xx_config_init(struct phy_device *phydev)
{
int reg, err, val;
@@ -390,6 +411,8 @@ static int bcm54xx_config_init(struct phy_device *phydev)
bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
}
+ bcm54xx_ptp_config_init(phydev);
+
return 0;
}
@@ -418,6 +441,8 @@ static int bcm54xx_suspend(struct phy_device *phydev)
{
int ret;
+ bcm54xx_ptp_stop(phydev);
+
/* We cannot use a read/modify/write here otherwise the PHY gets into
* a bad state where its LEDs keep flashing, thus defeating the purpose
* of low power mode.
@@ -741,10 +766,6 @@ static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-struct bcm54xx_phy_priv {
- u64 *stats;
-};
-
static int bcm54xx_phy_probe(struct phy_device *phydev)
{
struct bcm54xx_phy_priv *priv;
@@ -761,6 +782,10 @@ static int bcm54xx_phy_probe(struct phy_device *phydev)
if (!priv->stats)
return -ENOMEM;
+ priv->ptp = bcm_ptp_probe(phydev);
+ if (IS_ERR(priv->ptp))
+ return PTR_ERR(priv->ptp);
+
return 0;
}
@@ -1042,6 +1067,20 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
}, {
+ .phy_id = PHY_ID_BCM53128,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM53128",
+ .flags = PHY_IS_INTERNAL,
+ /* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
+ .config_init = bcm54xx_config_init,
+ .config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
+}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
@@ -1077,6 +1116,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5241, 0xfffffff0 },
{ PHY_ID_BCM5395, 0xfffffff0 },
{ PHY_ID_BCM53125, 0xfffffff0 },
+ { PHY_ID_BCM53128, 0xfffffff0 },
{ PHY_ID_BCM89610, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 13dafe7a29bd..6939563d3b7c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/bitfield.h>
+#include <linux/nvmem-consumer.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -522,6 +523,51 @@ static int dp83867_verify_rgmii_cfg(struct phy_device *phydev)
}
#if IS_ENABLED(CONFIG_OF_MDIO)
+static int dp83867_of_init_io_impedance(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ struct nvmem_cell *cell;
+ u8 *buf, val;
+ int ret;
+
+ cell = of_nvmem_cell_get(of_node, "io_impedance_ctrl");
+ if (IS_ERR(cell)) {
+ ret = PTR_ERR(cell);
+ if (ret != -ENOENT && ret != -EOPNOTSUPP)
+ return phydev_err_probe(phydev, ret,
+ "failed to get nvmem cell io_impedance_ctrl\n");
+
+ /* If no nvmem cell, check for the boolean properties. */
+ if (of_property_read_bool(of_node, "ti,max-output-impedance"))
+ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX;
+ else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
+ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN;
+ else
+ dp83867->io_impedance = -1; /* leave at default */
+
+ return 0;
+ }
+
+ buf = nvmem_cell_read(cell, NULL);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ val = *buf;
+ kfree(buf);
+
+ if ((val & DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK) != val) {
+ phydev_err(phydev, "nvmem cell 'io_impedance_ctrl' contents out of range\n");
+ return -ERANGE;
+ }
+ dp83867->io_impedance = val;
+
+ return 0;
+}
+
static int dp83867_of_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867 = phydev->priv;
@@ -549,12 +595,9 @@ static int dp83867_of_init(struct phy_device *phydev)
}
}
- if (of_property_read_bool(of_node, "ti,max-output-impedance"))
- dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX;
- else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
- dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN;
- else
- dp83867->io_impedance = -1; /* leave at default */
+ ret = dp83867_of_init_io_impedance(phydev);
+ if (ret)
+ return ret;
dp83867->rxctrl_strap_quirk = of_property_read_bool(of_node,
"ti,dp83867-rxctrl-strap-quirk");
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index 1ae792b0daaa..3cd9a77f9532 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -27,6 +27,27 @@
#define DP83TD510E_AN_STAT_1 0x60c
#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
+#define DP83TD510E_MSE_DETECT 0xa85
+
+#define DP83TD510_SQI_MAX 7
+
+/* Register values are converted to SNR(dB) as suggested by
+ * "Application Report - DP83TD510E Cable Diagnostics Toolkit":
+ * SNR(dB) = -10 * log10 (VAL/2^17) - 1.76 dB.
+ * SQI ranges are implemented according to "OPEN ALLIANCE - Advanced diagnostic
+ * features for 100BASE-T1 automotive Ethernet PHYs"
+ */
+static const u16 dp83td510_mse_sqi_map[] = {
+ 0x0569, /* < 18dB */
+ 0x044c, /* 18dB =< SNR < 19dB */
+ 0x0369, /* 19dB =< SNR < 20dB */
+ 0x02b6, /* 20dB =< SNR < 21dB */
+ 0x0227, /* 21dB =< SNR < 22dB */
+ 0x01b6, /* 22dB =< SNR < 23dB */
+ 0x015b, /* 23dB =< SNR < 24dB */
+ 0x0000 /* 24dB =< SNR */
+};
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -164,6 +185,32 @@ static int dp83td510_config_aneg(struct phy_device *phydev)
return genphy_c45_check_and_restart_aneg(phydev, changed);
}
+static int dp83td510_get_sqi(struct phy_device *phydev)
+{
+ int sqi, ret;
+ u16 mse_val;
+
+ if (!phydev->link)
+ return 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_MSE_DETECT);
+ if (ret < 0)
+ return ret;
+
+ mse_val = 0xFFFF & ret;
+ for (sqi = 0; sqi < ARRAY_SIZE(dp83td510_mse_sqi_map); sqi++) {
+ if (mse_val >= dp83td510_mse_sqi_map[sqi])
+ return sqi;
+ }
+
+ return -EINVAL;
+}
+
+static int dp83td510_get_sqi_max(struct phy_device *phydev)
+{
+ return DP83TD510_SQI_MAX;
+}
+
static int dp83td510_get_features(struct phy_device *phydev)
{
/* This PHY can't respond on MDIO bus if no RMII clock is enabled.
@@ -192,6 +239,8 @@ static struct phy_driver dp83td510_driver[] = {
.get_features = dp83td510_get_features,
.config_intr = dp83td510_config_intr,
.handle_interrupt = dp83td510_handle_interrupt,
+ .get_sqi = dp83td510_get_sqi,
+ .get_sqi_max = dp83td510_get_sqi_max,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 03abe6233bbb..aef739c20ac4 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -353,6 +353,7 @@ static int __init fixed_mdio_bus_init(void)
fmb->mii_bus->parent = &pdev->dev;
fmb->mii_bus->read = &fixed_mdio_read;
fmb->mii_bus->write = &fixed_mdio_write;
+ fmb->mii_bus->phy_mask = ~0;
ret = mdiobus_register(fmb->mii_bus);
if (ret)
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index d8b31d4d2a73..f070776ca904 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -490,6 +490,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
dev = &phydev->mdio.dev;
sfp_parse_support(phydev->sfp_bus, id, sfp_supported);
+ phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported);
sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface));
@@ -526,6 +527,7 @@ static void mv2222_sfp_remove(void *upstream)
priv->line_interface = PHY_INTERFACE_MODE_NA;
linkmode_zero(priv->supported);
+ phydev->port = PORT_NONE;
}
static void mv2222_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index d777c8851ed6..a714150f5e8c 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1991,15 +1991,9 @@ static int m88e1510_loopback(struct phy_device *phydev, bool enable)
int err;
if (enable) {
- u16 bmcr_ctl = 0, mscr2_ctl = 0;
+ u16 bmcr_ctl, mscr2_ctl = 0;
- if (phydev->speed == SPEED_1000)
- bmcr_ctl = BMCR_SPEED1000;
- else if (phydev->speed == SPEED_100)
- bmcr_ctl = BMCR_SPEED100;
-
- if (phydev->duplex == DUPLEX_FULL)
- bmcr_ctl |= BMCR_FULLDPLX;
+ bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
err = phy_write(phydev, MII_BMCR, bmcr_ctl);
if (err < 0)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 22139901f01c..e78d0bf69bc3 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -209,6 +209,9 @@
#define PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_ BIT(1)
#define PTP_TSU_INT_STS_PTP_RX_TS_EN_ BIT(0)
+#define LAN8814_LED_CTRL_1 0x0
+#define LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_ BIT(6)
+
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
#define KSZ8081_CTRL1_MDIX_STAT BIT(4)
@@ -308,6 +311,10 @@ struct kszphy_priv {
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
+static const struct kszphy_type lan8814_type = {
+ .led_mode_reg = ~LAN8814_LED_CTRL_1,
+};
+
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
.has_broadcast_disable = true,
@@ -1688,6 +1695,30 @@ static int kszphy_suspend(struct phy_device *phydev)
return genphy_suspend(phydev);
}
+static void kszphy_parse_led_mode(struct phy_device *phydev)
+{
+ const struct kszphy_type *type = phydev->drv->driver_data;
+ const struct device_node *np = phydev->mdio.dev.of_node;
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ if (type && type->led_mode_reg) {
+ ret = of_property_read_u32(np, "micrel,led-mode",
+ &priv->led_mode);
+
+ if (ret)
+ priv->led_mode = -1;
+
+ if (priv->led_mode > 3) {
+ phydev_err(phydev, "invalid led mode: 0x%02x\n",
+ priv->led_mode);
+ priv->led_mode = -1;
+ }
+ } else {
+ priv->led_mode = -1;
+ }
+}
+
static int kszphy_resume(struct phy_device *phydev)
{
int ret;
@@ -1720,7 +1751,6 @@ static int kszphy_probe(struct phy_device *phydev)
const struct device_node *np = phydev->mdio.dev.of_node;
struct kszphy_priv *priv;
struct clk *clk;
- int ret;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1730,20 +1760,7 @@ static int kszphy_probe(struct phy_device *phydev)
priv->type = type;
- if (type && type->led_mode_reg) {
- ret = of_property_read_u32(np, "micrel,led-mode",
- &priv->led_mode);
- if (ret)
- priv->led_mode = -1;
-
- if (priv->led_mode > 3) {
- phydev_err(phydev, "invalid led mode: 0x%02x\n",
- priv->led_mode);
- priv->led_mode = -1;
- }
- } else {
- priv->led_mode = -1;
- }
+ kszphy_parse_led_mode(phydev);
clk = devm_clk_get(&phydev->mdio.dev, "rmii-ref");
/* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */
@@ -2815,8 +2832,23 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
return 0;
}
+static void lan8814_setup_led(struct phy_device *phydev, int val)
+{
+ int temp;
+
+ temp = lanphy_read_page_reg(phydev, 5, LAN8814_LED_CTRL_1);
+
+ if (val)
+ temp |= LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
+ else
+ temp &= ~LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
+
+ lanphy_write_page_reg(phydev, 5, LAN8814_LED_CTRL_1, temp);
+}
+
static int lan8814_config_init(struct phy_device *phydev)
{
+ struct kszphy_priv *lan8814 = phydev->priv;
int val;
/* Reset the PHY */
@@ -2835,6 +2867,9 @@ static int lan8814_config_init(struct phy_device *phydev)
val |= LAN8814_ALIGN_TX_A_B_SWAP;
lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+ if (lan8814->led_mode >= 0)
+ lan8814_setup_led(phydev, lan8814->led_mode);
+
return 0;
}
@@ -2855,6 +2890,7 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
static int lan8814_probe(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
struct kszphy_priv *priv;
u16 addr;
int err;
@@ -2863,10 +2899,12 @@ static int lan8814_probe(struct phy_device *phydev)
if (!priv)
return -ENOMEM;
- priv->led_mode = -1;
-
phydev->priv = priv;
+ priv->type = type;
+
+ kszphy_parse_led_mode(phydev);
+
/* Strap-in value for PHY address, below register read gives starting
* phy address value
*/
@@ -3068,6 +3106,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip INDY Gigabit Quad PHY",
.config_init = lan8814_config_init,
+ .driver_data = &lan8814_type,
.probe = lan8814_probe,
.soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 5ce1bf03bbd7..24bae27eedef 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -8,7 +8,9 @@
#include <linux/module.h>
#include <linux/bitfield.h>
+#include <linux/hwmon.h>
#include <linux/phy.h>
+#include <linux/polynomial.h>
#include <linux/netdevice.h>
/* PHY ID */
@@ -54,7 +56,7 @@
PHY_IMASK_ANC)
#define PHY_FWV_REL_MASK BIT(15)
-#define PHY_FWV_TYPE_MASK GENMASK(11, 8)
+#define PHY_FWV_MAJOR_MASK GENMASK(11, 8)
#define PHY_FWV_MINOR_MASK GENMASK(7, 0)
/* SGMII */
@@ -64,6 +66,10 @@
#define VSPEC1_SGMII_ANEN_ANRS (VSPEC1_SGMII_CTRL_ANEN | \
VSPEC1_SGMII_CTRL_ANRS)
+/* Temperature sensor */
+#define VPSPEC1_TEMP_STA 0x0E
+#define VPSPEC1_TEMP_STA_DATA GENMASK(9, 0)
+
/* WoL */
#define VPSPEC2_WOL_CTL 0x0E06
#define VPSPEC2_WOL_AD01 0x0E08
@@ -71,8 +77,13 @@
#define VPSPEC2_WOL_AD45 0x0E0A
#define WOL_EN BIT(0)
+struct gpy_priv {
+ u8 fw_major;
+ u8 fw_minor;
+};
+
static const struct {
- int type;
+ int major;
int minor;
} ver_need_sgmii_reaneg[] = {
{7, 0x6D},
@@ -80,6 +91,102 @@ static const struct {
{9, 0x73},
};
+#if IS_ENABLED(CONFIG_HWMON)
+/* The original translation formulae of the temperature (in degrees of Celsius)
+ * are as follows:
+ *
+ * T = -2.5761e-11*(N^4) + 9.7332e-8*(N^3) + -1.9165e-4*(N^2) +
+ * 3.0762e-1*(N^1) + -5.2156e1
+ *
+ * where [-52.156, 137.961]C and N = [0, 1023].
+ *
+ * They must be accordingly altered to be suitable for the integer arithmetics.
+ * The technique is called 'factor redistribution', which just makes sure the
+ * multiplications and divisions are made so to have a result of the operations
+ * within the integer numbers limit. In addition we need to translate the
+ * formulae to accept millidegrees of Celsius. Here what it looks like after
+ * the alterations:
+ *
+ * T = -25761e-12*(N^4) + 97332e-9*(N^3) + -191650e-6*(N^2) +
+ * 307620e-3*(N^1) + -52156
+ *
+ * where T = [-52156, 137961]mC and N = [0, 1023].
+ */
+static const struct polynomial poly_N_to_temp = {
+ .terms = {
+ {4, -25761, 1000, 1},
+ {3, 97332, 1000, 1},
+ {2, -191650, 1000, 1},
+ {1, 307620, 1000, 1},
+ {0, -52156, 1, 1}
+ }
+};
+
+static int gpy_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VPSPEC1_TEMP_STA);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENODATA;
+
+ *value = polynomial_calc(&poly_N_to_temp,
+ FIELD_GET(VPSPEC1_TEMP_STA_DATA, ret));
+
+ return 0;
+}
+
+static umode_t gpy_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static const struct hwmon_channel_info *gpy_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops gpy_hwmon_hwmon_ops = {
+ .is_visible = gpy_hwmon_is_visible,
+ .read = gpy_hwmon_read,
+};
+
+static const struct hwmon_chip_info gpy_hwmon_chip_info = {
+ .ops = &gpy_hwmon_hwmon_ops,
+ .info = gpy_hwmon_info,
+};
+
+static int gpy_hwmon_register(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device *hwmon_dev;
+ char *hwmon_name;
+
+ hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(hwmon_name))
+ return PTR_ERR(hwmon_name);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
+ phydev,
+ &gpy_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+#else
+static int gpy_hwmon_register(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
static int gpy_config_init(struct phy_device *phydev)
{
int ret;
@@ -96,6 +203,9 @@ static int gpy_config_init(struct phy_device *phydev)
static int gpy_probe(struct phy_device *phydev)
{
+ struct device *dev = &phydev->mdio.dev;
+ struct gpy_priv *priv;
+ int fw_version;
int ret;
if (!phydev->is_c45) {
@@ -104,33 +214,38 @@ static int gpy_probe(struct phy_device *phydev)
return ret;
}
- /* Show GPY PHY FW version in dmesg */
- ret = phy_read(phydev, PHY_FWV);
- if (ret < 0)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ fw_version = phy_read(phydev, PHY_FWV);
+ if (fw_version < 0)
+ return fw_version;
+ priv->fw_major = FIELD_GET(PHY_FWV_MAJOR_MASK, fw_version);
+ priv->fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, fw_version);
+
+ ret = gpy_hwmon_register(phydev);
+ if (ret)
return ret;
- phydev_info(phydev, "Firmware Version: 0x%04X (%s)\n", ret,
- (ret & PHY_FWV_REL_MASK) ? "release" : "test");
+ /* Show GPY PHY FW version in dmesg */
+ phydev_info(phydev, "Firmware Version: %d.%d (0x%04X%s)\n",
+ priv->fw_major, priv->fw_minor, fw_version,
+ fw_version & PHY_FWV_REL_MASK ? "" : " test version");
return 0;
}
static bool gpy_sgmii_need_reaneg(struct phy_device *phydev)
{
- int fw_ver, fw_type, fw_minor;
+ struct gpy_priv *priv = phydev->priv;
size_t i;
- fw_ver = phy_read(phydev, PHY_FWV);
- if (fw_ver < 0)
- return true;
-
- fw_type = FIELD_GET(PHY_FWV_TYPE_MASK, fw_ver);
- fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, fw_ver);
-
for (i = 0; i < ARRAY_SIZE(ver_need_sgmii_reaneg); i++) {
- if (fw_type != ver_need_sgmii_reaneg[i].type)
+ if (priv->fw_major != ver_need_sgmii_reaneg[i].major)
continue;
- if (fw_minor < ver_need_sgmii_reaneg[i].minor)
+ if (priv->fw_minor < ver_need_sgmii_reaneg[i].minor)
return true;
break;
}
@@ -295,6 +410,9 @@ static void gpy_update_interface(struct phy_device *phydev)
ret);
break;
}
+
+ if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000)
+ genphy_read_master_slave(phydev);
}
static int gpy_read_status(struct phy_device *phydev)
@@ -495,18 +613,12 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
static int gpy115_loopback(struct phy_device *phydev, bool enable)
{
- int ret;
- int fw_minor;
+ struct gpy_priv *priv = phydev->priv;
if (enable)
return gpy_loopback(phydev, enable);
- ret = phy_read(phydev, PHY_FWV);
- if (ret < 0)
- return ret;
-
- fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, ret);
- if (fw_minor > 0x0076)
+ if (priv->fw_minor > 0x76)
return gpy_loopback(phydev, 0);
return genphy_soft_reset(phydev);
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 9944cc501806..2a8195c50d14 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -444,15 +444,10 @@ static int tja11xx_hwmon_register(struct phy_device *phydev,
struct tja11xx_priv *priv)
{
struct device *dev = &phydev->mdio.dev;
- int i;
-
- priv->hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
- if (!priv->hwmon_name)
- return -ENOMEM;
- for (i = 0; priv->hwmon_name[i]; i++)
- if (hwmon_is_bad_char(priv->hwmon_name[i]))
- priv->hwmon_name[i] = '_';
+ priv->hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(priv->hwmon_name))
+ return PTR_ERR(priv->hwmon_name);
priv->hwmon_dev =
devm_hwmon_device_register_with_info(dev, priv->hwmon_name,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 29b1df03f3e8..a87a4b3ffce4 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -190,44 +190,42 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
*/
static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
{
+ u16 adv_l_mask, adv_l = 0;
+ u16 adv_m_mask, adv_m = 0;
int changed = 0;
- u16 adv_l = 0;
- u16 adv_m = 0;
int ret;
+ adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP |
+ MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+ adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
+
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_FORCE:
adv_l |= MDIO_AN_T1_ADV_L_FORCE_MS;
break;
case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
break;
case MASTER_SLAVE_CFG_UNKNOWN:
case MASTER_SLAVE_CFG_UNSUPPORTED:
- return 0;
+ /* if master/slave role is not specified, do not overwrite it */
+ adv_l_mask &= ~MDIO_AN_T1_ADV_L_FORCE_MS;
+ adv_m_mask &= ~MDIO_AN_T1_ADV_M_MST;
+ break;
default:
phydev_warn(phydev, "Unsupported Master/Slave mode\n");
return -EOPNOTSUPP;
}
- switch (phydev->master_slave_set) {
- case MASTER_SLAVE_CFG_MASTER_FORCE:
- case MASTER_SLAVE_CFG_MASTER_PREFERRED:
- adv_m |= MDIO_AN_T1_ADV_M_MST;
- break;
- case MASTER_SLAVE_CFG_SLAVE_FORCE:
- case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
- break;
- default:
- break;
- }
-
adv_l |= linkmode_adv_to_mii_t1_adv_l_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L,
- (MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP
- | MDIO_AN_T1_ADV_L_PAUSE_ASYM), adv_l);
+ adv_l_mask, adv_l);
if (ret < 0)
return ret;
if (ret > 0)
@@ -236,7 +234,7 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
adv_m |= linkmode_adv_to_mii_t1_adv_m_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M,
- MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L, adv_m);
+ adv_m_mask, adv_m);
if (ret < 0)
return ret;
if (ret > 0)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 46acddd865a7..0c6efd792690 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -316,6 +316,12 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
+ /* If we managed to get here with the PHY state machine in a state other
+ * than PHY_HALTED this is an indication that something went wrong and
+ * we should most likely be using MAC managed PM and we are not.
+ */
+ WARN_ON(phydev->state != PHY_HALTED && !phydev->mac_managed_pm);
+
ret = phy_init_hw(phydev);
if (ret < 0)
return ret;
@@ -2024,18 +2030,12 @@ EXPORT_SYMBOL(genphy_config_eee_advert);
*/
int genphy_setup_forced(struct phy_device *phydev)
{
- u16 ctl = 0;
+ u16 ctl;
phydev->pause = 0;
phydev->asym_pause = 0;
- if (SPEED_1000 == phydev->speed)
- ctl |= BMCR_SPEED1000;
- else if (SPEED_100 == phydev->speed)
- ctl |= BMCR_SPEED100;
-
- if (DUPLEX_FULL == phydev->duplex)
- ctl |= BMCR_FULLDPLX;
+ ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
return phy_modify(phydev, MII_BMCR,
~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl);
@@ -2637,13 +2637,7 @@ int genphy_loopback(struct phy_device *phydev, bool enable)
u16 val, ctl = BMCR_LOOPBACK;
int ret;
- if (phydev->speed == SPEED_1000)
- ctl |= BMCR_SPEED1000;
- else if (phydev->speed == SPEED_100)
- ctl |= BMCR_SPEED100;
-
- if (phydev->duplex == DUPLEX_FULL)
- ctl |= BMCR_FULLDPLX;
+ ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
phy_modify(phydev, MII_BMCR, ~0, ctl);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 066684b80919..9bd69328dc4d 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -43,7 +43,6 @@ struct phylink {
/* private: */
struct net_device *netdev;
const struct phylink_mac_ops *mac_ops;
- const struct phylink_pcs_ops *pcs_ops;
struct phylink_config *config;
struct phylink_pcs *pcs;
struct device *dev;
@@ -759,6 +758,18 @@ static void phylink_resolve_flow(struct phylink_link_state *state)
}
}
+static void phylink_pcs_poll_stop(struct phylink *pl)
+{
+ if (pl->cfg_link_an_mode == MLO_AN_INBAND)
+ del_timer(&pl->link_poll);
+}
+
+static void phylink_pcs_poll_start(struct phylink *pl)
+{
+ if (pl->pcs && pl->pcs->poll && pl->cfg_link_an_mode == MLO_AN_INBAND)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+}
+
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
@@ -779,8 +790,8 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl)
if (pl->link_config.an_enabled &&
phy_interface_mode_is_8023z(pl->link_config.interface) &&
phylink_autoneg_inband(pl->cur_link_an_mode)) {
- if (pl->pcs_ops)
- pl->pcs_ops->pcs_an_restart(pl->pcs);
+ if (pl->pcs)
+ pl->pcs->ops->pcs_an_restart(pl->pcs);
else if (pl->config->legacy_pre_march2020)
pl->mac_ops->mac_an_restart(pl->config);
}
@@ -790,6 +801,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
const struct phylink_link_state *state)
{
struct phylink_pcs *pcs = NULL;
+ bool pcs_changed = false;
int err;
phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
@@ -802,8 +814,12 @@ static void phylink_major_config(struct phylink *pl, bool restart,
pcs);
return;
}
+
+ pcs_changed = pcs && pl->pcs != pcs;
}
+ phylink_pcs_poll_stop(pl);
+
if (pl->mac_ops->mac_prepare) {
err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
state->interface);
@@ -817,27 +833,17 @@ static void phylink_major_config(struct phylink *pl, bool restart,
/* If we have a new PCS, switch to the new PCS after preparing the MAC
* for the change.
*/
- if (pcs) {
+ if (pcs_changed)
pl->pcs = pcs;
- pl->pcs_ops = pcs->ops;
-
- if (!pl->phylink_disable_state &&
- pl->cfg_link_an_mode == MLO_AN_INBAND) {
- if (pcs->poll)
- mod_timer(&pl->link_poll, jiffies + HZ);
- else
- del_timer(&pl->link_poll);
- }
- }
phylink_mac_config(pl, state);
- if (pl->pcs_ops) {
- err = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
- state->interface,
- state->advertising,
- !!(pl->link_config.pause &
- MLO_PAUSE_AN));
+ if (pl->pcs) {
+ err = pl->pcs->ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
+ state->interface,
+ state->advertising,
+ !!(pl->link_config.pause &
+ MLO_PAUSE_AN));
if (err < 0)
phylink_err(pl, "pcs_config failed: %pe\n",
ERR_PTR(err));
@@ -854,6 +860,8 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
}
+
+ phylink_pcs_poll_start(pl);
}
/*
@@ -869,7 +877,7 @@ static int phylink_change_inband_advert(struct phylink *pl)
if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
return 0;
- if (!pl->pcs_ops && pl->config->legacy_pre_march2020) {
+ if (!pl->pcs && pl->config->legacy_pre_march2020) {
/* Legacy method */
phylink_mac_config(pl, &pl->link_config);
phylink_mac_pcs_an_restart(pl);
@@ -886,10 +894,11 @@ static int phylink_change_inband_advert(struct phylink *pl)
* restart negotiation if the pcs_config() helper indicates that
* the programmed advertisement has changed.
*/
- ret = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
- pl->link_config.interface,
- pl->link_config.advertising,
- !!(pl->link_config.pause & MLO_PAUSE_AN));
+ ret = pl->pcs->ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
+ pl->link_config.interface,
+ pl->link_config.advertising,
+ !!(pl->link_config.pause &
+ MLO_PAUSE_AN));
if (ret < 0)
return ret;
@@ -918,8 +927,8 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->an_complete = 0;
state->link = 1;
- if (pl->pcs_ops)
- pl->pcs_ops->pcs_get_state(pl->pcs, state);
+ if (pl->pcs)
+ pl->pcs->ops->pcs_get_state(pl->pcs, state);
else if (pl->mac_ops->mac_pcs_get_state &&
pl->config->legacy_pre_march2020)
pl->mac_ops->mac_pcs_get_state(pl->config, state);
@@ -992,8 +1001,8 @@ static void phylink_link_up(struct phylink *pl,
pl->cur_interface = link_state.interface;
- if (pl->pcs_ops && pl->pcs_ops->pcs_link_up)
- pl->pcs_ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
+ if (pl->pcs && pl->pcs->ops->pcs_link_up)
+ pl->pcs->ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
pl->cur_interface,
link_state.speed, link_state.duplex);
@@ -1115,7 +1124,7 @@ static void phylink_resolve(struct work_struct *w)
}
phylink_major_config(pl, false, &link_state);
pl->link_config.interface = link_state.interface;
- } else if (!pl->pcs_ops && pl->config->legacy_pre_march2020) {
+ } else if (!pl->pcs && pl->config->legacy_pre_march2020) {
/* The interface remains unchanged, only the speed,
* duplex or pause settings have changed. Call the
* old mac_config() method to configure the MAC/PCS
@@ -2991,6 +3000,7 @@ int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
adv |= ADVERTISE_1000XPSE_ASYM;
return adv;
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
return 0x0001;
default:
/* Nothing to do for other modes */
@@ -3030,7 +3040,9 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
/* Ensure ISOLATE bit is disabled */
if (mode == MLO_AN_INBAND &&
- linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising))
+ (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_QSGMII ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)))
bmcr = BMCR_ANENABLE;
else
bmcr = 0;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index e7b0e12cc75b..63f90fe9a4d2 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1290,7 +1290,7 @@ static const struct hwmon_chip_info sfp_hwmon_chip_info = {
static void sfp_hwmon_probe(struct work_struct *work)
{
struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
- int err, i;
+ int err;
/* hwmon interface needs to access 16bit registers in atomic way to
* guarantee coherency of the diagnostic monitoring data. If it is not
@@ -1318,16 +1318,12 @@ static void sfp_hwmon_probe(struct work_struct *work)
return;
}
- sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL);
- if (!sfp->hwmon_name) {
+ sfp->hwmon_name = hwmon_sanitize_name(dev_name(sfp->dev));
+ if (IS_ERR(sfp->hwmon_name)) {
dev_err(sfp->dev, "out of memory for hwmon name\n");
return;
}
- for (i = 0; sfp->hwmon_name[i]; i++)
- if (hwmon_is_bad_char(sfp->hwmon_name[i]))
- sfp->hwmon_name[i] = '_';
-
sfp->hwmon_dev = hwmon_device_register_with_info(sfp->dev,
sfp->hwmon_name, sfp,
&sfp_hwmon_chip_info,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 96d3c40932d8..69423b8965b3 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -121,10 +121,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
/* Enable energy detect mode for this SMSC Transceivers */
rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
- if (rc < 0)
- return rc;
-
- return smsc_phy_ack_interrupt(phydev);
+ return rc;
}
static int smsc_phy_reset(struct phy_device *phydev)
@@ -146,11 +143,6 @@ static int smsc_phy_reset(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
-static int lan911x_config_init(struct phy_device *phydev)
-{
- return smsc_phy_ack_interrupt(phydev);
-}
-
static int lan87xx_config_aneg(struct phy_device *phydev)
{
int rc;
@@ -420,9 +412,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
- /* basic functions */
- .config_init = lan911x_config_init,
-
/* IRQ related */
.config_intr = smsc_phy_config_intr,
.handle_interrupt = smsc_phy_handle_interrupt,
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index dafd3e9ebbf8..c8791e9b451d 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1111,7 +1111,7 @@ plip_open(struct net_device *dev)
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
- const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
+ const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
if (ifa != NULL) {
dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 4a365f15533e..9206c660a72e 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2968,7 +2968,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
chan->ppp = NULL;
/*
- * This ensures that we have returned from any calls into the
+ * This ensures that we have returned from any calls into
* the channel's start_xmit or ioctl routine before we proceed.
*/
down_write(&pch->chan_sem);
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index ff22b6b1c686..36803d932dff 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -450,6 +450,7 @@ static int bcm5421_init(struct mii_phy* phy)
int can_low_power = 1;
if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
can_low_power = 0;
+ of_node_put(np);
if (can_low_power) {
/* Enable automatic low-power */
sungem_phy_write(phy, 0x1c, 0x9002);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index c3d42062559d..9e75ed3f08ce 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -716,10 +716,20 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_reset_mac_header(skb);
skb->protocol = eth_hdr(skb)->h_proto;
+ rcu_read_lock();
+ tap = rcu_dereference(q->tap);
+ if (!tap) {
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return total_len;
+ }
+ skb->dev = tap->dev;
+
if (vnet_hdr_len) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
tap_is_little_endian(q));
if (err) {
+ rcu_read_unlock();
drop_reason = SKB_DROP_REASON_DEV_HDR;
goto err_kfree;
}
@@ -732,8 +742,6 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
- rcu_read_lock();
- tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
skb_zcopy_init(skb, msg_control);
@@ -742,14 +750,8 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
uarg->callback(NULL, uarg, false);
}
- if (tap) {
- skb->dev = tap->dev;
- dev_queue_xmit(skb);
- } else {
- kfree_skb(skb);
- }
+ dev_queue_xmit(skb);
rcu_read_unlock();
-
return total_len;
err_kfree:
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b07dde6f0abf..aac133a1e27a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -749,10 +749,10 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
pcpu_stats = this_cpu_ptr(team->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += skb->len;
+ u64_stats_inc(&pcpu_stats->rx_packets);
+ u64_stats_add(&pcpu_stats->rx_bytes, skb->len);
if (skb->pkt_type == PACKET_MULTICAST)
- pcpu_stats->rx_multicast++;
+ u64_stats_inc(&pcpu_stats->rx_multicast);
u64_stats_update_end(&pcpu_stats->syncp);
skb->dev = team->dev;
@@ -1720,8 +1720,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
pcpu_stats = this_cpu_ptr(team->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->tx_packets++;
- pcpu_stats->tx_bytes += len;
+ u64_stats_inc(&pcpu_stats->tx_packets);
+ u64_stats_add(&pcpu_stats->tx_bytes, len);
u64_stats_update_end(&pcpu_stats->syncp);
} else {
this_cpu_inc(team->pcpu_stats->tx_dropped);
@@ -1854,11 +1854,11 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
p = per_cpu_ptr(team->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
- rx_packets = p->rx_packets;
- rx_bytes = p->rx_bytes;
- rx_multicast = p->rx_multicast;
- tx_packets = p->tx_packets;
- tx_bytes = p->tx_bytes;
+ rx_packets = u64_stats_read(&p->rx_packets);
+ rx_bytes = u64_stats_read(&p->rx_bytes);
+ rx_multicast = u64_stats_read(&p->rx_multicast);
+ tx_packets = u64_stats_read(&p->tx_packets);
+ tx_bytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
@@ -1870,9 +1870,9 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
* rx_dropped, tx_dropped & rx_nohandler are u32,
* updated without syncp protection.
*/
- rx_dropped += p->rx_dropped;
- tx_dropped += p->tx_dropped;
- rx_nohandler += p->rx_nohandler;
+ rx_dropped += READ_ONCE(p->rx_dropped);
+ tx_dropped += READ_ONCE(p->tx_dropped);
+ rx_nohandler += READ_ONCE(p->rx_nohandler);
}
stats->rx_dropped = rx_dropped;
stats->tx_dropped = tx_dropped;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index e62fc4f2aee0..76659c1c525a 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -637,8 +637,9 @@ config USB_NET_AQC111
* Aquantia AQtion USB to 5GbE
config USB_RTL8153_ECM
- tristate "RTL8153 ECM support"
+ tristate
depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
+ default y
help
This option supports ECM mode for RTL8153 ethernet adapter, when
CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 45d3cc5cc355..21c1ca275cc4 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -212,9 +212,6 @@ void asix_rx_fixup_common_free(struct asix_common_private *dp);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags);
-int asix_set_sw_mii(struct usbnet *dev, int in_pm);
-int asix_set_hw_mii(struct usbnet *dev, int in_pm);
-
int asix_read_phy_addr(struct usbnet *dev, bool internal);
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index b4a1b7abcfc9..9ea91c3ff045 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -68,6 +68,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
+static int asix_set_sw_mii(struct usbnet *dev, int in_pm)
+{
+ int ret;
+
+ ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL, in_pm);
+
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+static int asix_set_hw_mii(struct usbnet *dev, int in_pm)
+{
+ int ret;
+
+ ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL, in_pm);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
static int asix_check_host_enable(struct usbnet *dev, int in_pm)
{
int i, ret;
@@ -297,25 +318,6 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
return skb;
}
-int asix_set_sw_mii(struct usbnet *dev, int in_pm)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL, in_pm);
-
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable software MII access\n");
- return ret;
-}
-
-int asix_set_hw_mii(struct usbnet *dev, int in_pm)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL, in_pm);
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable hardware MII access\n");
- return ret;
-}
-
int asix_read_phy_addr(struct usbnet *dev, bool internal)
{
int ret, offset;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index ac2d400d1d6c..aff39bf3161d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -164,11 +164,15 @@
#define GMII_PHY_PGSEL_PAGE3 0x0003
#define GMII_PHY_PGSEL_PAGE5 0x0005
+static int ax88179_reset(struct usbnet *dev);
+
struct ax88179_data {
u8 eee_enabled;
u8 eee_active;
u16 rxctl;
- u16 reserved;
+ u8 in_pm;
+ u32 wol_supported;
+ u32 wolopts;
};
struct ax88179_int_data {
@@ -185,15 +189,29 @@ static const struct {
{7, 0xcc, 0x4c, 0x18, 8},
};
+static void ax88179_set_pm_mode(struct usbnet *dev, bool pm_mode)
+{
+ struct ax88179_data *ax179_data = dev->driver_priv;
+
+ ax179_data->in_pm = pm_mode;
+}
+
+static int ax88179_in_pm(struct usbnet *dev)
+{
+ struct ax88179_data *ax179_data = dev->driver_priv;
+
+ return ax179_data->in_pm;
+}
+
static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm)
+ u16 size, void *data)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
BUG_ON(!dev);
- if (!in_pm)
+ if (!ax88179_in_pm(dev))
fn = usbnet_read_cmd;
else
fn = usbnet_read_cmd_nopm;
@@ -209,14 +227,14 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
}
static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, const void *data, int in_pm)
+ u16 size, const void *data)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
BUG_ON(!dev);
- if (!in_pm)
+ if (!ax88179_in_pm(dev))
fn = usbnet_write_cmd;
else
fn = usbnet_write_cmd_nopm;
@@ -249,47 +267,6 @@ static void ax88179_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
}
}
-static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, void *data)
-{
- int ret;
-
- if (2 == size) {
- u16 buf;
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
- le16_to_cpus(&buf);
- *((u16 *)data) = buf;
- } else if (4 == size) {
- u32 buf;
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
- le32_to_cpus(&buf);
- *((u32 *)data) = buf;
- } else {
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 1);
- }
-
- return ret;
-}
-
-static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, const void *data)
-{
- int ret;
-
- if (2 == size) {
- u16 buf;
- buf = *((u16 *)data);
- cpu_to_le16s(&buf);
- ret = __ax88179_write_cmd(dev, cmd, value, index,
- size, &buf, 1);
- } else {
- ret = __ax88179_write_cmd(dev, cmd, value, index,
- size, data, 1);
- }
-
- return ret;
-}
-
static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
@@ -297,16 +274,16 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
if (2 == size) {
u16 buf = 0;
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf);
le16_to_cpus(&buf);
*((u16 *)data) = buf;
} else if (4 == size) {
u32 buf = 0;
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf);
le32_to_cpus(&buf);
*((u32 *)data) = buf;
} else {
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 0);
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, data);
}
return ret;
@@ -322,10 +299,10 @@ static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
buf = *((u16 *)data);
cpu_to_le16s(&buf);
ret = __ax88179_write_cmd(dev, cmd, value, index,
- size, &buf, 0);
+ size, &buf);
} else {
ret = __ax88179_write_cmd(dev, cmd, value, index,
- size, data, 0);
+ size, data);
}
return ret;
@@ -425,55 +402,63 @@ ax88179_phy_write_mmd_indirect(struct usbnet *dev, u16 prtad, u16 devad,
static int ax88179_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
+ struct ax88179_data *priv = dev->driver_priv;
u16 tmp16;
u8 tmp8;
+ ax88179_set_pm_mode(dev, true);
+
usbnet_suspend(intf, message);
+ /* Enable WoL */
+ if (priv->wolopts) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+ 1, 1, &tmp8);
+ if (priv->wolopts & WAKE_PHY)
+ tmp8 |= AX_MONITOR_MODE_RWLC;
+ if (priv->wolopts & WAKE_MAGIC)
+ tmp8 |= AX_MONITOR_MODE_RWMP;
+
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+ 1, 1, &tmp8);
+ }
+
/* Disable RX path */
- ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
- 2, 2, &tmp16);
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
- 2, 2, &tmp16);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
/* Force bulk-in zero length */
- ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
- 2, 2, &tmp16);
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+ 2, 2, &tmp16);
tmp16 |= AX_PHYPWR_RSTCTL_BZ | AX_PHYPWR_RSTCTL_IPRL;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
- 2, 2, &tmp16);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+ 2, 2, &tmp16);
/* change clock */
tmp8 = 0;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
/* Configure RX control register => stop operation */
tmp16 = AX_RX_CTL_STOP;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+ ax88179_set_pm_mode(dev, false);
return 0;
}
/* This function is used to enable the autodetach function. */
/* This function is determined by offset 0x43 of EEPROM */
-static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
+static int ax88179_auto_detach(struct usbnet *dev)
{
u16 tmp16;
u8 tmp8;
- int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
- int (*fnw)(struct usbnet *, u8, u16, u16, u16, const void *);
-
- if (!in_pm) {
- fnr = ax88179_read_cmd;
- fnw = ax88179_write_cmd;
- } else {
- fnr = ax88179_read_cmd_nopm;
- fnw = ax88179_write_cmd_nopm;
- }
- if (fnr(dev, AX_ACCESS_EEPROM, 0x43, 1, 2, &tmp16) < 0)
+ if (ax88179_read_cmd(dev, AX_ACCESS_EEPROM, 0x43, 1, 2, &tmp16) < 0)
return 0;
if ((tmp16 == 0xFFFF) || (!(tmp16 & 0x0100)))
@@ -481,13 +466,13 @@ static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
/* Enable Auto Detach bit */
tmp8 = 0;
- fnr(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
tmp8 |= AX_CLK_SELECT_ULR;
- fnw(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
- fnr(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
tmp16 |= AX_PHYPWR_RSTCTL_AT;
- fnw(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
return 0;
}
@@ -495,35 +480,14 @@ static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
static int ax88179_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- u16 tmp16;
- u8 tmp8;
-
- usbnet_link_change(dev, 0, 0);
- /* Power up ethernet PHY */
- tmp16 = 0;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
- 2, 2, &tmp16);
- udelay(1000);
-
- tmp16 = AX_PHYPWR_RSTCTL_IPRL;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
- 2, 2, &tmp16);
- msleep(200);
+ ax88179_set_pm_mode(dev, true);
- /* Ethernet PHY Auto Detach*/
- ax88179_auto_detach(dev, 1);
+ usbnet_link_change(dev, 0, 0);
- /* Enable clock */
- ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
- tmp8 |= AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
- msleep(100);
+ ax88179_reset(dev);
- /* Configure RX control register => start operation */
- tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
- AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+ ax88179_set_pm_mode(dev, false);
return usbnet_resume(intf);
}
@@ -532,40 +496,22 @@ static void
ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- u8 opt;
+ struct ax88179_data *priv = dev->driver_priv;
- if (ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
- 1, 1, &opt) < 0) {
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
- return;
- }
-
- wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
- wolinfo->wolopts = 0;
- if (opt & AX_MONITOR_MODE_RWLC)
- wolinfo->wolopts |= WAKE_PHY;
- if (opt & AX_MONITOR_MODE_RWMP)
- wolinfo->wolopts |= WAKE_MAGIC;
+ wolinfo->supported = priv->wol_supported;
+ wolinfo->wolopts = priv->wolopts;
}
static int
ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- u8 opt = 0;
+ struct ax88179_data *priv = dev->driver_priv;
- if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ if (wolinfo->wolopts & ~(priv->wol_supported))
return -EINVAL;
- if (wolinfo->wolopts & WAKE_PHY)
- opt |= AX_MONITOR_MODE_RWLC;
- if (wolinfo->wolopts & WAKE_MAGIC)
- opt |= AX_MONITOR_MODE_RWMP;
-
- if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
- 1, 1, &opt) < 0)
- return -EINVAL;
+ priv->wolopts = wolinfo->wolopts;
return 0;
}
@@ -599,8 +545,7 @@ ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
/* ax88179/178A returns 2 bytes from eeprom on read */
for (i = first_word; i <= last_word; i++) {
ret = __ax88179_read_cmd(dev, AX_ACCESS_EEPROM, i, 1, 2,
- &eeprom_buff[i - first_word],
- 0);
+ &eeprom_buff[i - first_word]);
if (ret < 0) {
kfree(eeprom_buff);
return -EIO;
@@ -745,7 +690,7 @@ ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_eee *data)
static int ax88179_chk_eee(struct usbnet *dev)
{
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
- struct ax88179_data *priv = (struct ax88179_data *)dev->data;
+ struct ax88179_data *priv = dev->driver_priv;
mii_ethtool_gset(&dev->mii, &ecmd);
@@ -848,7 +793,7 @@ static void ax88179_enable_eee(struct usbnet *dev)
static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
{
struct usbnet *dev = netdev_priv(net);
- struct ax88179_data *priv = (struct ax88179_data *)dev->data;
+ struct ax88179_data *priv = dev->driver_priv;
edata->eee_enabled = priv->eee_enabled;
edata->eee_active = priv->eee_active;
@@ -859,7 +804,7 @@ static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
static int ax88179_set_eee(struct net_device *net, struct ethtool_eee *edata)
{
struct usbnet *dev = netdev_priv(net);
- struct ax88179_data *priv = (struct ax88179_data *)dev->data;
+ struct ax88179_data *priv = dev->driver_priv;
int ret;
priv->eee_enabled = edata->eee_enabled;
@@ -910,8 +855,8 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
static void ax88179_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
- struct ax88179_data *data = (struct ax88179_data *)dev->data;
- u8 *m_filter = ((u8 *)dev->data) + 12;
+ struct ax88179_data *data = dev->driver_priv;
+ u8 *m_filter = ((u8 *)dev->data);
data->rxctl = (AX_RX_CTL_START | AX_RX_CTL_AB | AX_RX_CTL_IPE);
@@ -923,7 +868,7 @@ static void ax88179_set_multicast(struct net_device *net)
} else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
- /* We use the 20 byte dev->data for our 8 byte filter buffer
+ /* We use dev->data for our 8 byte filter buffer
* to avoid allocating memory that is tricky to free later
*/
u32 crc_bits;
@@ -1069,7 +1014,7 @@ static int ax88179_check_eeprom(struct usbnet *dev)
} while (buf & EEP_BUSY);
__ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
- 2, 2, &eeprom[i * 2], 0);
+ 2, 2, &eeprom[i * 2]);
if ((i == 0) && (eeprom[0] == 0xFF))
return -EINVAL;
@@ -1322,46 +1267,15 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
{
- u8 buf[5];
- u16 *tmp16;
- u8 *tmp;
- struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
- struct ethtool_eee eee_data;
+ struct ax88179_data *ax179_data;
usbnet_get_endpoints(dev, intf);
- tmp16 = (u16 *)buf;
- tmp = (u8 *)buf;
-
- memset(ax179_data, 0, sizeof(*ax179_data));
-
- /* Power up ethernet PHY */
- *tmp16 = 0;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
- *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
- msleep(200);
-
- *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
- msleep(100);
-
- /* Read MAC address from DTB or asix chip */
- ax88179_get_mac_addr(dev);
- memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
-
- /* RX bulk configuration */
- memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
-
- dev->rx_urb_size = 1024 * 20;
+ ax179_data = kzalloc(sizeof(*ax179_data), GFP_KERNEL);
+ if (!ax179_data)
+ return -ENOMEM;
- *tmp = 0x34;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
-
- *tmp = 0x52;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
- 1, 1, tmp);
+ dev->driver_priv = ax179_data;
dev->net->netdev_ops = &ax88179_netdev_ops;
dev->net->ethtool_ops = &ax88179_ethtool_ops;
@@ -1384,52 +1298,14 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
netif_set_tso_max_size(dev->net, 16384);
- /* Enable checksum offload */
- *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
- AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
-
- *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
- AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
-
- /* Configure RX control register => start operation */
- *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
- AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
-
- *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
- AX_MONITOR_MODE_RWMP;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
-
- /* Configure default medium type => giga */
- *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
- AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_FULL_DUPLEX |
- AX_MEDIUM_GIGAMODE;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
- 2, 2, tmp16);
-
- ax88179_led_setting(dev);
-
- ax179_data->eee_enabled = 0;
- ax179_data->eee_active = 0;
-
- ax88179_disable_eee(dev);
-
- ax88179_ethtool_get_eee(dev, &eee_data);
- eee_data.advertised = 0;
- ax88179_ethtool_set_eee(dev, &eee_data);
-
- /* Restart autoneg */
- mii_nway_restart(&dev->mii);
-
- usbnet_link_change(dev, 0, 0);
+ ax88179_reset(dev);
return 0;
}
static void ax88179_unbind(struct usbnet *dev, struct usb_interface *intf)
{
+ struct ax88179_data *ax179_data = dev->driver_priv;
u16 tmp16;
/* Configure RX control register => stop operation */
@@ -1442,6 +1318,8 @@ static void ax88179_unbind(struct usbnet *dev, struct usb_interface *intf)
/* Power down ethernet PHY */
tmp16 = 0;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+
+ kfree(ax179_data);
}
static void
@@ -1618,7 +1496,7 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
static int ax88179_link_reset(struct usbnet *dev)
{
- struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+ struct ax88179_data *ax179_data = dev->driver_priv;
u8 tmp[5], link_sts;
u16 mode, tmp16, delay = HZ / 10;
u32 tmp32 = 0x40000000;
@@ -1693,7 +1571,7 @@ static int ax88179_reset(struct usbnet *dev)
u8 buf[5];
u16 *tmp16;
u8 *tmp;
- struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+ struct ax88179_data *ax179_data = dev->driver_priv;
struct ethtool_eee eee_data;
tmp16 = (u16 *)buf;
@@ -1712,10 +1590,11 @@ static int ax88179_reset(struct usbnet *dev)
msleep(100);
/* Ethernet PHY Auto Detach*/
- ax88179_auto_detach(dev, 0);
+ ax88179_auto_detach(dev);
/* Read MAC address from DTB or asix chip */
ax88179_get_mac_addr(dev);
+ memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
/* RX bulk configuration */
memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
@@ -1730,12 +1609,6 @@ static int ax88179_reset(struct usbnet *dev)
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
1, 1, tmp);
- dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM;
-
- dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM;
-
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
@@ -1761,6 +1634,12 @@ static int ax88179_reset(struct usbnet *dev)
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
2, 2, tmp16);
+ /* Check if WoL is supported */
+ ax179_data->wol_supported = 0;
+ if (ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+ 1, 1, &tmp) > 0)
+ ax179_data->wol_supported = WAKE_MAGIC | WAKE_PHY;
+
ax88179_led_setting(dev);
ax179_data->eee_enabled = 0;
@@ -1801,7 +1680,7 @@ static const struct driver_info ax88179_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1814,7 +1693,7 @@ static const struct driver_info ax88178a_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1827,7 +1706,7 @@ static const struct driver_info cypress_GX3_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1840,7 +1719,7 @@ static const struct driver_info dlink_dub1312_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1853,7 +1732,7 @@ static const struct driver_info sitecom_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1866,7 +1745,7 @@ static const struct driver_info samsung_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1879,7 +1758,7 @@ static const struct driver_info lenovo_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1892,7 +1771,7 @@ static const struct driver_info belkin_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1905,7 +1784,7 @@ static const struct driver_info toshiba_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1918,7 +1797,7 @@ static const struct driver_info mct_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1931,7 +1810,7 @@ static const struct driver_info at_umc2000_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1944,7 +1823,7 @@ static const struct driver_info at_umc200_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1957,7 +1836,7 @@ static const struct driver_info at_umc2000sp_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1965,55 +1844,55 @@ static const struct driver_info at_umc2000sp_info = {
static const struct usb_device_id products[] = {
{
/* ASIX AX88179 10/100/1000 */
- USB_DEVICE(0x0b95, 0x1790),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x1790, 0xff, 0xff, 0),
.driver_info = (unsigned long)&ax88179_info,
}, {
/* ASIX AX88178A 10/100/1000 */
- USB_DEVICE(0x0b95, 0x178a),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x178a, 0xff, 0xff, 0),
.driver_info = (unsigned long)&ax88178a_info,
}, {
/* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */
- USB_DEVICE(0x04b4, 0x3610),
+ USB_DEVICE_AND_INTERFACE_INFO(0x04b4, 0x3610, 0xff, 0xff, 0),
.driver_info = (unsigned long)&cypress_GX3_info,
}, {
/* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
- USB_DEVICE(0x2001, 0x4a00),
+ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x4a00, 0xff, 0xff, 0),
.driver_info = (unsigned long)&dlink_dub1312_info,
}, {
/* Sitecom USB 3.0 to Gigabit Adapter */
- USB_DEVICE(0x0df6, 0x0072),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0072, 0xff, 0xff, 0),
.driver_info = (unsigned long)&sitecom_info,
}, {
/* Samsung USB Ethernet Adapter */
- USB_DEVICE(0x04e8, 0xa100),
+ USB_DEVICE_AND_INTERFACE_INFO(0x04e8, 0xa100, 0xff, 0xff, 0),
.driver_info = (unsigned long)&samsung_info,
}, {
/* Lenovo OneLinkDock Gigabit LAN */
- USB_DEVICE(0x17ef, 0x304b),
+ USB_DEVICE_AND_INTERFACE_INFO(0x17ef, 0x304b, 0xff, 0xff, 0),
.driver_info = (unsigned long)&lenovo_info,
}, {
/* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */
- USB_DEVICE(0x050d, 0x0128),
+ USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x0128, 0xff, 0xff, 0),
.driver_info = (unsigned long)&belkin_info,
}, {
/* Toshiba USB 3.0 GBit Ethernet Adapter */
- USB_DEVICE(0x0930, 0x0a13),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x0a13, 0xff, 0xff, 0),
.driver_info = (unsigned long)&toshiba_info,
}, {
/* Magic Control Technology U3-A9003 USB 3.0 Gigabit Ethernet Adapter */
- USB_DEVICE(0x0711, 0x0179),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0711, 0x0179, 0xff, 0xff, 0),
.driver_info = (unsigned long)&mct_info,
}, {
/* Allied Telesis AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */
- USB_DEVICE(0x07c9, 0x000e),
+ USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000e, 0xff, 0xff, 0),
.driver_info = (unsigned long)&at_umc2000_info,
}, {
/* Allied Telesis AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter */
- USB_DEVICE(0x07c9, 0x000f),
+ USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000f, 0xff, 0xff, 0),
.driver_info = (unsigned long)&at_umc200_info,
}, {
/* Allied Telesis AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */
- USB_DEVICE(0x07c9, 0x0010),
+ USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x0010, 0xff, 0xff, 0),
.driver_info = (unsigned long)&at_umc2000sp_info,
},
{ },
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 1a376ed45d7a..843893482abd 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -8,13 +8,13 @@
*
* Based on the work of
* Donald Becker
- *
+ *
* Old chipset support added by Simon Evans <spse@secret.org.uk> 2002
* - adds support for Belkin F5U011
*/
/*
- *
+ *
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
@@ -54,7 +54,7 @@ static const char driver_name[] = "catc";
/*
* Some defines.
- */
+ */
#define STATS_UPDATE (HZ) /* Time between stats updates */
#define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */
@@ -280,7 +280,7 @@ static void catc_irq_done(struct urb *urb)
struct catc *catc = urb->context;
u8 *data = urb->transfer_buffer;
int status = urb->status;
- unsigned int hasdata = 0, linksts = LinkNoChange;
+ unsigned int hasdata, linksts = LinkNoChange;
int res;
if (!catc->is_f5u011) {
@@ -332,7 +332,7 @@ static void catc_irq_done(struct urb *urb)
dev_err(&catc->usbdev->dev,
"submit(rx_urb) status %d\n", res);
}
- }
+ }
}
resubmit:
res = usb_submit_urb (urb, GFP_ATOMIC);
@@ -538,7 +538,7 @@ static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value,
unsigned long flags;
spin_lock_irqsave(&catc->ctrl_lock, flags);
-
+
q = catc->ctrl_queue + catc->ctrl_head;
q->dir = dir;
@@ -639,7 +639,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
memset(catc->multicast, 0xff, 64);
rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc;
- }
+ }
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
@@ -806,7 +806,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
- if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
+ if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
dev_err(&intf->dev, "No free urbs available.\n");
ret = -ENOMEM;
@@ -814,17 +814,17 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
- if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
+ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
dev_dbg(dev, "Testing for f5u011\n");
- catc->is_f5u011 = 1;
+ catc->is_f5u011 = 1;
atomic_set(&catc->recq_sz, 0);
pktsz = RX_PKT_SZ;
} else {
pktsz = RX_MAX_BURST * (PKT_SZ + 2);
}
-
+
usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0),
NULL, NULL, 0, catc_ctrl_done, catc);
@@ -854,7 +854,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
*buf = 0x87654321;
catc_write_mem(catc, 0xfa80, buf, 4);
catc_read_mem(catc, 0x7a80, buf, 4);
-
+
switch (*buf) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
@@ -873,32 +873,32 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
}
kfree(buf);
-
+
dev_dbg(dev, "Getting MAC from SEEROM.\n");
-
+
catc_get_mac(catc, macbuf);
eth_hw_addr_set(netdev, macbuf);
-
+
dev_dbg(dev, "Setting MAC into registers.\n");
-
+
for (i = 0; i < 6; i++)
catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
-
+
dev_dbg(dev, "Filling the multicast list.\n");
-
+
eth_broadcast_addr(broadcast);
catc_multicast(broadcast, catc->multicast);
catc_multicast(netdev->dev_addr, catc->multicast);
catc_write_mem(catc, 0xfa80, catc->multicast, 64);
-
+
dev_dbg(dev, "Clearing error counters.\n");
-
+
for (i = 0; i < 8; i++)
catc_set_reg(catc, EthStats + i, 0);
catc->last_stats = jiffies;
-
+
dev_dbg(dev, "Enabling.\n");
-
+
catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
catc_set_reg(catc, LEDCtrl, LEDLink);
@@ -908,7 +908,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc_reset(catc);
catc_get_mac(catc, macbuf);
eth_hw_addr_set(netdev, macbuf);
-
+
dev_dbg(dev, "Setting RX Mode\n");
catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
catc->rxmode[1] = 0;
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 359ea0d10e59..baa9b14b1644 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -218,7 +218,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(!skb2))
goto next;
skb_trim(skb2, len);
- put_unaligned_le16(BIT(15) | (1 << 11) | len,
+ put_unaligned_le16(BIT(15) | BIT(11) | len,
skb_push(skb2, 2));
eem_linkcmd(dev, skb2);
break;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d55f59ce4a31..8d5cbda33f66 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1892,7 +1892,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
}
static const struct driver_info cdc_ncm_info = {
- .description = "CDC NCM",
+ .description = "CDC NCM (NO ZLP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
| FLAG_LINK_INTR | FLAG_ETHER,
.bind = cdc_ncm_bind,
@@ -1904,6 +1904,19 @@ static const struct driver_info cdc_ncm_info = {
.set_rx_mode = usbnet_cdc_update_filter,
};
+/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
+static const struct driver_info cdc_ncm_zlp_info = {
+ .description = "CDC NCM (SEND ZLP)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_LINK_INTR | FLAG_ETHER | FLAG_SEND_ZLP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
/* Same as cdc_ncm_info, but with FLAG_WWAN */
static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
@@ -2010,6 +2023,16 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_info,
},
+ /* DisplayLink docking stations */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+ .idVendor = 0x17e9,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = (unsigned long)&cdc_ncm_zlp_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index 32637df0f4cc..f4a44f05c6ab 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -120,7 +120,7 @@ static const struct driver_info an2720_info = {
#endif /* CONFIG_USB_AN2720 */
-
+
#ifdef CONFIG_USB_BELKIN
#define HAVE_HARDWARE
@@ -140,7 +140,7 @@ static const struct driver_info belkin_info = {
#endif /* CONFIG_USB_BELKIN */
-
+
#ifdef CONFIG_USB_EPSON2888
#define HAVE_HARDWARE
@@ -167,7 +167,7 @@ static const struct driver_info epson2888_info = {
#endif /* CONFIG_USB_EPSON2888 */
-
+
/*-------------------------------------------------------------------------
*
* info from Jonathan McDowell <noodles@earth.li>
@@ -181,7 +181,7 @@ static const struct driver_info kc2190_info = {
};
#endif /* CONFIG_USB_KC2190 */
-
+
#ifdef CONFIG_USB_ARMLINUX
#define HAVE_HARDWARE
@@ -222,7 +222,7 @@ static const struct driver_info blob_info = {
#endif /* CONFIG_USB_ARMLINUX */
-
+
/*-------------------------------------------------------------------------*/
#ifndef HAVE_HARDWARE
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 9b2bc1993ece..c9efb7df892e 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -221,7 +221,7 @@ struct kaweth_device
dma_addr_t rxbufferhandle;
__u8 *rx_buf;
-
+
struct sk_buff *tx_skb;
__u8 *firmware_buf;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 17c9c63b8eeb..2c82fbcaab22 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -18,7 +18,7 @@
/*
- * Prolific PL-2301/PL-2302 driver ... http://www.prolific.com.tw/
+ * Prolific PL-2301/PL-2302 driver ... http://www.prolific.com.tw/
*
* The protocol and handshaking used here should be bug-compatible
* with the Linux 2.2 "plusb" driver, by Deti Fliegl.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 571a399c195d..709e3c59e340 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1390,6 +1390,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
{QMI_FIXED_INTF(0x1e2d, 0x00b9, 0)}, /* Cinterion MV31 RmNet based on new baseline */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f3, 0)}, /* Cinterion MV32-W-A RmNet */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f4, 0)}, /* Cinterion MV32-W-B RmNet */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7389d6ef8569..0f6efaabaa32 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "12"
/* Information for net */
-#define NET_VERSION "12"
+#define NET_VERSION "13"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -2156,7 +2156,7 @@ static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb)
}
static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
- struct sk_buff *skb, u32 len, u32 transport_offset)
+ struct sk_buff *skb, u32 len)
{
u32 mss = skb_shinfo(skb)->gso_size;
u32 opts1, opts2 = 0;
@@ -2167,6 +2167,8 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
opts1 = len | TX_FS | TX_LS;
if (mss) {
+ u32 transport_offset = (u32)skb_transport_offset(skb);
+
if (transport_offset > GTTCPHO_MAX) {
netif_warn(tp, tx_err, tp->netdev,
"Invalid transport offset 0x%x for TSO\n",
@@ -2197,6 +2199,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
opts1 |= transport_offset << GTTCPHO_SHIFT;
opts2 |= min(mss, MSS_MAX) << MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u32 transport_offset = (u32)skb_transport_offset(skb);
u8 ip_protocol;
if (transport_offset > TCPHO_MAX) {
@@ -2260,7 +2263,6 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
struct tx_desc *tx_desc;
struct sk_buff *skb;
unsigned int len;
- u32 offset;
skb = __skb_dequeue(&skb_head);
if (!skb)
@@ -2276,9 +2278,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
tx_data = tx_agg_align(tx_data);
tx_desc = (struct tx_desc *)tx_data;
- offset = (u32)skb_transport_offset(skb);
-
- if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
+ if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) {
r8152_csum_workaround(tp, skb, &skb_head);
continue;
}
@@ -2759,9 +2759,9 @@ rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
{
u32 mss = skb_shinfo(skb)->gso_size;
int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
- int offset = skb_transport_offset(skb);
- if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+ if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) &&
+ skb_transport_offset(skb) > max_offset)
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
features &= ~NETIF_F_GSO_MASK;
@@ -5917,7 +5917,8 @@ static void r8153_enter_oob(struct r8152 *tp)
wait_oob_link_list_ready(tp);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu));
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
switch (tp->version) {
case RTL_VER_03:
@@ -5953,6 +5954,10 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -6555,6 +6560,9 @@ static void rtl8156_down(struct r8152 *tp)
rtl_disable(tp);
rtl_reset_bmu(tp);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
+
/* Clear teredo wake event. bit[15:8] is the teredo wakeup
* type. Set it to zero. bits[7:0] are the W1C bits about
* the events. Set them to all 1 to clear them.
@@ -6565,6 +6573,10 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data |= NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
rtl_rx_vlan_en(tp, true);
rxdy_gated_en(tp, false);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bd03e16f98a1..bfb58c91db04 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -71,22 +71,22 @@ struct smsc95xx_priv {
struct fwnode_handle *irqfwnode;
struct mii_bus *mdiobus;
struct phy_device *phydev;
+ struct task_struct *pm_task;
};
static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
-static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data, int in_pm)
+static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 buf;
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
- BUG_ON(!dev);
-
- if (!in_pm)
+ if (current != pdata->pm_task)
fn = usbnet_read_cmd;
else
fn = usbnet_read_cmd_nopm;
@@ -107,16 +107,15 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
return ret;
}
-static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
- u32 data, int in_pm)
+static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 buf;
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
- BUG_ON(!dev);
-
- if (!in_pm)
+ if (current != pdata->pm_task)
fn = usbnet_write_cmd;
else
fn = usbnet_write_cmd_nopm;
@@ -134,41 +133,16 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
return ret;
}
-static int __must_check smsc95xx_read_reg_nopm(struct usbnet *dev, u32 index,
- u32 *data)
-{
- return __smsc95xx_read_reg(dev, index, data, 1);
-}
-
-static int __must_check smsc95xx_write_reg_nopm(struct usbnet *dev, u32 index,
- u32 data)
-{
- return __smsc95xx_write_reg(dev, index, data, 1);
-}
-
-static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data)
-{
- return __smsc95xx_read_reg(dev, index, data, 0);
-}
-
-static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
- u32 data)
-{
- return __smsc95xx_write_reg(dev, index, data, 0);
-}
-
/* Loop until the read is completed with timeout
* called with phy_mutex held */
-static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
- int in_pm)
+static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
{
unsigned long start_time = jiffies;
u32 val;
int ret;
do {
- ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
+ ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
if (ret < 0) {
/* Ignore -ENODEV error during disconnect() */
if (ret == -ENODEV)
@@ -189,8 +163,7 @@ static u32 mii_address_cmd(int phy_id, int idx, u16 op)
return (phy_id & 0x1f) << 11 | (idx & 0x1f) << 6 | op;
}
-static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
- int in_pm)
+static int smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx)
{
u32 val, addr;
int ret;
@@ -198,7 +171,7 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
@@ -206,20 +179,20 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
/* set the address, index & direction (read from PHY) */
addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
- ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+ ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
if (ret < 0) {
if (ret != -ENODEV)
netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
goto done;
}
- ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
+ ret = smsc95xx_read_reg(dev, MII_DATA, &val);
if (ret < 0) {
if (ret != -ENODEV)
netdev_warn(dev->net, "Error reading MII_DATA\n");
@@ -237,8 +210,8 @@ done:
return ret;
}
-static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
- int idx, int regval, int in_pm)
+static void smsc95xx_mdio_write(struct usbnet *dev, int phy_id, int idx,
+ int regval)
{
u32 val, addr;
int ret;
@@ -246,14 +219,14 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
}
val = regval;
- ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
+ ret = smsc95xx_write_reg(dev, MII_DATA, val);
if (ret < 0) {
if (ret != -ENODEV)
netdev_warn(dev->net, "Error writing MII_DATA\n");
@@ -262,14 +235,14 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
/* set the address, index & direction (write to PHY) */
addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
- ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+ ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
if (ret < 0) {
if (ret != -ENODEV)
netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
goto done;
@@ -279,25 +252,11 @@ done:
mutex_unlock(&dev->phy_mutex);
}
-static int smsc95xx_mdio_read_nopm(struct usbnet *dev, int idx)
-{
- struct smsc95xx_priv *pdata = dev->driver_priv;
-
- return __smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, idx, 1);
-}
-
-static void smsc95xx_mdio_write_nopm(struct usbnet *dev, int idx, int regval)
-{
- struct smsc95xx_priv *pdata = dev->driver_priv;
-
- __smsc95xx_mdio_write(dev, pdata->phydev->mdio.addr, idx, regval, 1);
-}
-
static int smsc95xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
{
struct usbnet *dev = bus->priv;
- return __smsc95xx_mdio_read(dev, phy_id, idx, 0);
+ return smsc95xx_mdio_read(dev, phy_id, idx);
}
static int smsc95xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
@@ -305,7 +264,7 @@ static int smsc95xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
{
struct usbnet *dev = bus->priv;
- __smsc95xx_mdio_write(dev, phy_id, idx, regval, 0);
+ smsc95xx_mdio_write(dev, phy_id, idx, regval);
return 0;
}
@@ -865,7 +824,7 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
}
/* Starts the Receive path */
-static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
+static int smsc95xx_start_rx_path(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
@@ -874,7 +833,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
pdata->mac_cr |= MAC_CR_RXEN_;
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- return __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
+ return smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
}
static int smsc95xx_reset(struct usbnet *dev)
@@ -1057,7 +1016,7 @@ static int smsc95xx_reset(struct usbnet *dev)
return ret;
}
- ret = smsc95xx_start_rx_path(dev, 0);
+ ret = smsc95xx_start_rx_path(dev);
if (ret < 0) {
netdev_warn(dev->net, "Failed to start RX path\n");
return ret;
@@ -1291,16 +1250,17 @@ static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
return crc << ((filter % 2) * 16);
}
-static int smsc95xx_link_ok_nopm(struct usbnet *dev)
+static int smsc95xx_link_ok(struct usbnet *dev)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
/* first, a dummy read, needed to latch some MII phys */
- ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
+ ret = smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, MII_BMSR);
if (ret < 0)
return ret;
- ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
+ ret = smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, MII_BMSR);
if (ret < 0)
return ret;
@@ -1313,14 +1273,14 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
u32 val;
int ret;
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
val |= PM_CTL_SUS_MODE_0;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1332,12 +1292,12 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
if (pdata->wolopts & WAKE_PHY)
val |= PM_CTL_WUPS_ED_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
/* read back PM_CTRL */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
@@ -1349,34 +1309,34 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
static int smsc95xx_enter_suspend1(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
+ int ret, phy_id = pdata->phydev->mdio.addr;
u32 val;
- int ret;
/* reconfigure link pulse detection timing for
* compatibility with non-standard link partners
*/
if (pdata->features & FEATURE_PHY_NLP_CROSSOVER)
- smsc95xx_mdio_write_nopm(dev, PHY_EDPD_CONFIG,
- PHY_EDPD_CONFIG_DEFAULT);
+ smsc95xx_mdio_write(dev, phy_id, PHY_EDPD_CONFIG,
+ PHY_EDPD_CONFIG_DEFAULT);
/* enable energy detect power-down mode */
- ret = smsc95xx_mdio_read_nopm(dev, PHY_MODE_CTRL_STS);
+ ret = smsc95xx_mdio_read(dev, phy_id, PHY_MODE_CTRL_STS);
if (ret < 0)
return ret;
ret |= MODE_CTRL_STS_EDPWRDOWN_;
- smsc95xx_mdio_write_nopm(dev, PHY_MODE_CTRL_STS, ret);
+ smsc95xx_mdio_write(dev, phy_id, PHY_MODE_CTRL_STS, ret);
/* enter SUSPEND1 mode */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_1;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1384,7 +1344,7 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
val &= ~PM_CTL_WUPS_;
val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1399,14 +1359,14 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev)
u32 val;
int ret;
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_2;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1421,7 +1381,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
u32 val;
int ret;
- ret = smsc95xx_read_reg_nopm(dev, RX_FIFO_INF, &val);
+ ret = smsc95xx_read_reg(dev, RX_FIFO_INF, &val);
if (ret < 0)
return ret;
@@ -1430,14 +1390,14 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
return -EBUSY;
}
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_3 | PM_CTL_RES_CLR_WKP_STS;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1445,7 +1405,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
val &= ~PM_CTL_WUPS_;
val |= PM_CTL_WUPS_WOL_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1490,9 +1450,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
u32 val, link_up;
int ret;
+ pdata->pm_task = current;
+
ret = usbnet_suspend(intf, message);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_suspend error\n");
+ pdata->pm_task = NULL;
return ret;
}
@@ -1501,8 +1464,7 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
pdata->suspend_flags = 0;
}
- /* determine if link is up using only _nopm functions */
- link_up = smsc95xx_link_ok_nopm(dev);
+ link_up = smsc95xx_link_ok(dev);
if (message.event == PM_EVENT_AUTO_SUSPEND &&
(pdata->features & FEATURE_REMOTE_WAKEUP)) {
@@ -1519,23 +1481,23 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
netdev_info(dev->net, "entering SUSPEND2 mode\n");
/* disable energy detect (link up) & wake up events */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
goto done;
val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
goto done;
@@ -1626,7 +1588,7 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
}
for (i = 0; i < (wuff_filter_count * 4); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, filter_mask[i]);
if (ret < 0) {
kfree(filter_mask);
goto done;
@@ -1635,50 +1597,50 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
kfree(filter_mask);
for (i = 0; i < (wuff_filter_count / 4); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, command[i]);
if (ret < 0)
goto done;
}
for (i = 0; i < (wuff_filter_count / 4); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, offset[i]);
if (ret < 0)
goto done;
}
for (i = 0; i < (wuff_filter_count / 2); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, crc[i]);
if (ret < 0)
goto done;
}
/* clear any pending pattern match packet status */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
val |= WUCSR_WUFR_;
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
}
if (pdata->wolopts & WAKE_MAGIC) {
/* clear any pending magic packet status */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
val |= WUCSR_MPR_;
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
}
/* enable/disable wakeup sources */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
@@ -1698,12 +1660,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
val &= ~WUCSR_MPEN_;
}
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
/* enable wol wakeup source */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
goto done;
@@ -1713,12 +1675,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
if (pdata->wolopts & WAKE_PHY)
val |= PM_CTL_ED_EN_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
goto done;
/* enable receiver to enable frame reception */
- smsc95xx_start_rx_path(dev, 1);
+ smsc95xx_start_rx_path(dev);
/* some wol options are enabled, so enter SUSPEND0 */
netdev_info(dev->net, "entering SUSPEND0 mode\n");
@@ -1732,6 +1694,7 @@ done:
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
+ pdata->pm_task = NULL;
return ret;
}
@@ -1752,29 +1715,31 @@ static int smsc95xx_resume(struct usb_interface *intf)
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
+ pdata->pm_task = current;
+
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
- return ret;
+ goto done;
val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
- return ret;
+ goto done;
/* clear wake-up status */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
- return ret;
+ goto done;
val &= ~PM_CTL_WOL_EN_;
val |= PM_CTL_WUPS_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
- return ret;
+ goto done;
}
phy_init_hw(pdata->phydev);
@@ -1783,15 +1748,20 @@ static int smsc95xx_resume(struct usb_interface *intf)
if (ret < 0)
netdev_warn(dev->net, "usbnet_resume error\n");
+done:
+ pdata->pm_task = NULL;
return ret;
}
static int smsc95xx_reset_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
+ pdata->pm_task = current;
ret = smsc95xx_reset(dev);
+ pdata->pm_task = NULL;
if (ret < 0)
return ret;
@@ -2088,6 +2058,11 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0424, 0x9E08),
.driver_info = (unsigned long) &smsc95xx_info,
},
+ {
+ /* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
+ USB_DEVICE(0x184F, 0x0051),
+ .driver_info = (unsigned long)&smsc95xx_info,
+ },
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 78a92751ce4c..aaa89b4cfd50 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -17,9 +17,6 @@
* issues can usefully be addressed by this framework.
*/
-// #define DEBUG // error path messages, extra info
-// #define VERBOSE // more; success messages
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -337,8 +334,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
skb->protocol = eth_type_trans (skb, dev->net);
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
- stats64->rx_packets++;
- stats64->rx_bytes += skb->len;
+ u64_stats_inc(&stats64->rx_packets);
+ u64_stats_add(&stats64->rx_bytes, skb->len);
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
@@ -384,7 +381,7 @@ insanity:
}
EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
-
+
/*-------------------------------------------------------------------------
*
* Network Device Driver (peer link to "Host Device", from USB host)
@@ -849,13 +846,11 @@ int usbnet_stop (struct net_device *net)
mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
- /* deferred work (task, timer, softirq) must also stop.
- * can't flush_scheduled_work() until we drop rtnl (later),
- * else workers could deadlock; so make workers a NOP.
- */
+ /* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ cancel_work_sync(&dev->kevent);
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1258,8 +1253,8 @@ static void tx_complete (struct urb *urb)
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
- stats64->tx_packets += entry->packets;
- stats64->tx_bytes += entry->length;
+ u64_stats_add(&stats64->tx_packets, entry->packets);
+ u64_stats_add(&stats64->tx_bytes, entry->length);
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
} else {
dev->net->stats.tx_errors++;
@@ -1619,8 +1614,6 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
- cancel_work_sync(&dev->kevent);
-
usb_scuttle_anchored_urbs(&dev->deferred);
if (dev->driver_info->unbind)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2cb833b3006a..466da01ba2e3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -312,7 +312,6 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
- struct netdev_queue *queue = NULL;
struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
@@ -330,7 +329,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
- queue = netdev_get_tx_queue(dev, rxq);
/* The napi pointer is available when an XDP program is
* attached or when GRO is enabled
@@ -342,8 +340,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
- if (queue)
- txq_trans_cond_update(queue);
if (!use_napi)
dev_lstats_add(dev, length);
} else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 356cf8dd4164..9cce7dec7366 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -135,6 +135,9 @@ struct send_queue {
struct virtnet_sq_stats stats;
struct napi_struct napi;
+
+ /* Record whether sq is in reset state. */
+ bool reset;
};
/* Internal representation of a receive virtqueue */
@@ -242,9 +245,15 @@ struct virtnet_info {
/* Packet virtio header size */
u8 hdr_len;
- /* Work struct for refilling if we run low on memory. */
+ /* Work struct for delayed refilling if we run low on memory. */
struct delayed_work refill;
+ /* Is delayed refill enabled? */
+ bool refill_enabled;
+
+ /* The lock to synchronize the access to refill_enabled */
+ spinlock_t refill_lock;
+
/* Work struct for config space updates */
struct work_struct config_work;
@@ -261,6 +270,12 @@ struct virtnet_info {
u8 duplex;
u32 speed;
+ /* Interrupt coalescing settings */
+ u32 tx_usecs;
+ u32 rx_usecs;
+ u32 tx_max_packets;
+ u32 rx_max_packets;
+
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
@@ -278,6 +293,9 @@ struct padded_vnet_hdr {
char padding[12];
};
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
@@ -348,6 +366,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void enable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = true;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
+static void disable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = false;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
@@ -1037,8 +1069,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ if (unlikely(xdp_page != page))
+ put_page(xdp_page);
goto err_xdp;
+ }
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(!err)) {
xdp_return_frame_rx_napi(xdpf);
@@ -1176,7 +1211,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
if (!hdr_hash || !skb)
return;
- switch ((int)hdr_hash->hash_report) {
+ switch (__le16_to_cpu(hdr_hash->hash_report)) {
case VIRTIO_NET_HASH_REPORT_TCPv4:
case VIRTIO_NET_HASH_REPORT_UDPv4:
case VIRTIO_NET_HASH_REPORT_TCPv6:
@@ -1194,7 +1229,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
- skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+ skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
}
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
@@ -1527,8 +1562,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
- if (!try_fill_recv(vi, rq, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
+ spin_lock(&vi->refill_lock);
+ if (vi->refill_enabled)
+ schedule_delayed_work(&vi->refill, 0);
+ spin_unlock(&vi->refill_lock);
+ }
}
u64_stats_update_begin(&rq->stats.syncp);
@@ -1601,6 +1640,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
+ if (sq->reset) {
+ __netif_tx_unlock(txq);
+ return;
+ }
+
do {
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
@@ -1651,6 +1695,8 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i, err;
+ enable_delayed_refill(vi);
+
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
@@ -1846,6 +1892,70 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int virtnet_rx_resize(struct virtnet_info *vi,
+ struct receive_queue *rq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ if (running)
+ napi_disable(&rq->napi);
+
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
+ if (running)
+ virtnet_napi_enable(rq->vq, &rq->napi);
+ return err;
+}
+
+static int virtnet_tx_resize(struct virtnet_info *vi,
+ struct send_queue *sq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ struct netdev_queue *txq;
+ int err, qindex;
+
+ qindex = sq - vi->sq;
+
+ if (running)
+ virtnet_napi_tx_disable(&sq->napi);
+
+ txq = netdev_get_tx_queue(vi->dev, qindex);
+
+ /* 1. wait all ximt complete
+ * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
+ */
+ __netif_tx_lock_bh(txq);
+
+ /* Prevent rx poll from accessing sq. */
+ sq->reset = true;
+
+ /* Prevent the upper layer from trying to send packets. */
+ netif_stop_subqueue(vi->dev, qindex);
+
+ __netif_tx_unlock_bh(txq);
+
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+ __netif_tx_lock_bh(txq);
+ sq->reset = false;
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock_bh(txq);
+
+ if (running)
+ virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ return err;
+}
+
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
@@ -2033,6 +2143,8 @@ static int virtnet_close(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
+ /* Make sure NAPI doesn't schedule refill work */
+ disable_delayed_refill(vi);
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
@@ -2254,10 +2366,57 @@ static void virtnet_get_ringparam(struct net_device *dev,
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
- ring->rx_pending = ring->rx_max_pending;
- ring->tx_pending = ring->tx_max_pending;
+ ring->rx_max_pending = vi->rq[0].vq->num_max;
+ ring->tx_max_pending = vi->sq[0].vq->num_max;
+ ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+}
+
+static int virtnet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u32 rx_pending, tx_pending;
+ struct receive_queue *rq;
+ struct send_queue *sq;
+ int i, err;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+
+ if (ring->rx_pending == rx_pending &&
+ ring->tx_pending == tx_pending)
+ return 0;
+
+ if (ring->rx_pending > vi->rq[0].vq->num_max)
+ return -EINVAL;
+
+ if (ring->tx_pending > vi->sq[0].vq->num_max)
+ return -EINVAL;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ rq = vi->rq + i;
+ sq = vi->sq + i;
+
+ if (ring->tx_pending != tx_pending) {
+ err = virtnet_tx_resize(vi, sq, ring->tx_pending);
+ if (err)
+ return err;
+ }
+
+ if (ring->rx_pending != rx_pending) {
+ err = virtnet_rx_resize(vi, rq, ring->rx_pending);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
}
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
@@ -2587,27 +2746,89 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+{
+ struct scatterlist sgs_tx, sgs_rx;
+ struct virtio_net_ctrl_coal_tx coal_tx;
+ struct virtio_net_ctrl_coal_rx coal_rx;
+
+ coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
+ &sgs_tx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->tx_usecs = ec->tx_coalesce_usecs;
+ vi->tx_max_packets = ec->tx_max_coalesced_frames;
+
+ coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
+ &sgs_rx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->rx_usecs = ec->rx_coalesce_usecs;
+ vi->rx_max_packets = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
+{
+ /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
+ * feature is negotiated.
+ */
+ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
+ return -EOPNOTSUPP;
+
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- int i, napi_weight;
-
- if (ec->tx_max_coalesced_frames > 1 ||
- ec->rx_max_coalesced_frames != 1)
- return -EINVAL;
+ int ret, i, napi_weight;
+ bool update_napi = false;
+ /* Can't change NAPI weight if the link is up */
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
+ else
+ update_napi = true;
+ }
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
+ ret = virtnet_send_notf_coal_cmds(vi, ec);
+ else
+ ret = virtnet_coal_params_supported(ec);
+
+ if (ret)
+ return ret;
+
+ if (update_napi) {
for (i = 0; i < vi->max_queue_pairs; i++)
vi->sq[i].napi.weight = napi_weight;
}
- return 0;
+ return ret;
}
static int virtnet_get_coalesce(struct net_device *dev,
@@ -2615,16 +2836,19 @@ static int virtnet_get_coalesce(struct net_device *dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_GCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
- memcpy(ec, &ec_default, sizeof(ec_default));
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ ec->rx_coalesce_usecs = vi->rx_usecs;
+ ec->tx_coalesce_usecs = vi->tx_usecs;
+ ec->tx_max_coalesced_frames = vi->tx_max_packets;
+ ec->rx_max_coalesced_frames = vi->rx_max_packets;
+ } else {
+ ec->rx_max_coalesced_frames = 1;
- if (vi->sq[0].napi.weight)
- ec->tx_max_coalesced_frames = 1;
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
return 0;
}
@@ -2743,10 +2967,12 @@ static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .set_ringparam = virtnet_set_ringparam,
.get_strings = virtnet_get_strings,
.get_sset_count = virtnet_get_sset_count,
.get_ethtool_stats = virtnet_get_ethtool_stats,
@@ -2792,6 +3018,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ enable_delayed_refill(vi);
+
if (netif_running(vi->dev)) {
err = virtnet_open(vi->dev);
if (err)
@@ -3138,6 +3366,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ if (!is_xdp_frame(buf))
+ dev_kfree_skb(buf);
+ else
+ xdp_return_frame(ptr_to_xdp(buf));
+}
+
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2rxq(vq);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -3145,26 +3394,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_frame(buf))
- dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
-
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(&vi->rq[i], buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
}
}
@@ -3411,6 +3648,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
"VIRTIO_NET_F_CTRL_VQ"))) {
return false;
}
@@ -3535,6 +3774,7 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+ spin_lock_init(&vi->refill_lock);
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -3546,6 +3786,13 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ vi->rx_usecs = 0;
+ vi->tx_usecs = 0;
+ vi->tx_max_packets = 0;
+ vi->rx_max_packets = 0;
+ }
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
@@ -3780,7 +4027,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
- VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT
+ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL
static unsigned int features[] = {
VIRTNET_FEATURES,
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 7a38925f4165..a666a88ac1ff 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
-# Copyright (C) 2007-2021, VMware, Inc. All Rights Reserved.
+# Copyright (C) 2007-2022, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index f9f3a23d1698..41c0660a0c54 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 74d4e8bc4abc..41d6767283a6 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -40,7 +40,13 @@ enum {
VMXNET3_REG_MACL = 0x28, /* MAC Address Low */
VMXNET3_REG_MACH = 0x30, /* MAC Address High */
VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */
- VMXNET3_REG_ECR = 0x40 /* Event Cause Register */
+ VMXNET3_REG_ECR = 0x40, /* Event Cause Register */
+ VMXNET3_REG_DCR = 0x48, /* Device capability register,
+ * from 0x48 to 0x80
+ */
+ VMXNET3_REG_PTCR = 0x88, /* Passthru capbility register
+ * from 0x88 to 0xb0
+ */
};
/* BAR 0 */
@@ -51,8 +57,18 @@ enum {
VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */
};
-#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
-#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
+/* For Large PT BAR, the following offset to DB register */
+enum {
+ VMXNET3_REG_LB_TXPROD = 0x1000, /* Tx Producer Index */
+ VMXNET3_REG_LB_RXPROD = 0x1400, /* Rx Producer Index for ring 1 */
+ VMXNET3_REG_LB_RXPROD2 = 0x1800, /* Rx Producer Index for ring 2 */
+};
+
+#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
+#define VMXNET3_LARGE_PT_REG_SIZE 8192 /* large PT pages */
+#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
+#define VMXNET3_LARGE_BAR0_REG_SIZE (4096 * 4096) /* LARGE BAR 0 */
+#define VMXNET3_OOB_REG_SIZE (4094 * 4096) /* OOB pages */
#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
#define VMXNET3_REG_ALIGN_MASK 0x7
@@ -83,6 +99,9 @@ enum {
VMXNET3_CMD_SET_COALESCE,
VMXNET3_CMD_REGISTER_MEMREGS,
VMXNET3_CMD_SET_RSS_FIELDS,
+ VMXNET3_CMD_RESERVED4,
+ VMXNET3_CMD_RESERVED5,
+ VMXNET3_CMD_SET_RING_BUFFER_SIZE,
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -101,6 +120,9 @@ enum {
VMXNET3_CMD_GET_RESERVED2,
VMXNET3_CMD_GET_RESERVED3,
VMXNET3_CMD_GET_MAX_QUEUES_CONF,
+ VMXNET3_CMD_GET_RESERVED4,
+ VMXNET3_CMD_GET_MAX_CAPABILITIES,
+ VMXNET3_CMD_GET_DCR0_REG,
};
/*
@@ -126,17 +148,17 @@ struct Vmxnet3_TxDesc {
#ifdef __BIG_ENDIAN_BITFIELD
u32 msscof:14; /* MSS, checksum offset, flags */
- u32 ext1:1;
+ u32 ext1:1; /* set to 1 to indicate inner csum/tso, vmxnet3 v7 */
u32 dtype:1; /* descriptor type */
- u32 oco:1;
+ u32 oco:1; /* Outer csum offload */
u32 gen:1; /* generation bit */
u32 len:14;
#else
u32 len:14;
u32 gen:1; /* generation bit */
- u32 oco:1;
+ u32 oco:1; /* Outer csum offload */
u32 dtype:1; /* descriptor type */
- u32 ext1:1;
+ u32 ext1:1; /* set to 1 to indicate inner csum/tso, vmxnet3 v7 */
u32 msscof:14; /* MSS, checksum offset, flags */
#endif /* __BIG_ENDIAN_BITFIELD */
@@ -240,11 +262,13 @@ struct Vmxnet3_RxCompDesc {
u32 rqID:10; /* rx queue/ring ID */
u32 sop:1; /* Start of Packet */
u32 eop:1; /* End of Packet */
- u32 ext1:2;
+ u32 ext1:2; /* bit 0: indicating v4/v6/.. is for inner header */
+ /* bit 1: indicating rssType is based on inner header */
u32 rxdIdx:12; /* Index of the RxDesc */
#else
u32 rxdIdx:12; /* Index of the RxDesc */
- u32 ext1:2;
+ u32 ext1:2; /* bit 0: indicating v4/v6/.. is for inner header */
+ /* bit 1: indicating rssType is based on inner header */
u32 eop:1; /* End of Packet */
u32 sop:1; /* Start of Packet */
u32 rqID:10; /* rx queue/ring ID */
@@ -378,6 +402,8 @@ union Vmxnet3_GenericDesc {
/* max # of tx descs for a non-tso pkt */
#define VMXNET3_MAX_TXD_PER_PKT 16
+/* max # of tx descs for a tso pkt */
+#define VMXNET3_MAX_TSO_TXD_PER_PKT 24
/* Max size of a single rx buffer */
#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
@@ -724,6 +750,13 @@ enum Vmxnet3_RSSField {
VMXNET3_RSS_FIELDS_ESPIP6 = 0x0020,
};
+struct Vmxnet3_RingBufferSize {
+ __le16 ring1BufSizeType0;
+ __le16 ring1BufSizeType1;
+ __le16 ring2BufSizeType1;
+ __le16 pad;
+};
+
/* If the command data <= 16 bytes, use the shared memory directly.
* otherwise, use variable length configuration descriptor.
*/
@@ -731,6 +764,7 @@ union Vmxnet3_CmdInfo {
struct Vmxnet3_VariableLenConfDesc varConf;
struct Vmxnet3_SetPolling setPolling;
enum Vmxnet3_RSSField setRssFields;
+ struct Vmxnet3_RingBufferSize ringBufSize;
__le64 data[2];
};
@@ -801,4 +835,30 @@ struct Vmxnet3_DriverShared {
#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
#define VMXNET3_LINK_DOWN 0
+#define VMXNET3_DCR_ERROR 31 /* error when bit 31 of DCR is set */
+#define VMXNET3_CAP_UDP_RSS 0 /* bit 0 of DCR 0 */
+#define VMXNET3_CAP_ESP_RSS_IPV4 1 /* bit 1 of DCR 0 */
+#define VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD 2 /* bit 2 of DCR 0 */
+#define VMXNET3_CAP_GENEVE_TSO 3 /* bit 3 of DCR 0 */
+#define VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD 4 /* bit 4 of DCR 0 */
+#define VMXNET3_CAP_VXLAN_TSO 5 /* bit 5 of DCR 0 */
+#define VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD 6 /* bit 6 of DCR 0 */
+#define VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD 7 /* bit 7 of DCR 0 */
+#define VMXNET3_CAP_PKT_STEERING_IPV4 8 /* bit 8 of DCR 0 */
+#define VMXNET3_CAP_VERSION_4_MAX VMXNET3_CAP_PKT_STEERING_IPV4
+#define VMXNET3_CAP_ESP_RSS_IPV6 9 /* bit 9 of DCR 0 */
+#define VMXNET3_CAP_VERSION_5_MAX VMXNET3_CAP_ESP_RSS_IPV6
+#define VMXNET3_CAP_ESP_OVER_UDP_RSS 10 /* bit 10 of DCR 0 */
+#define VMXNET3_CAP_INNER_RSS 11 /* bit 11 of DCR 0 */
+#define VMXNET3_CAP_INNER_ESP_RSS 12 /* bit 12 of DCR 0 */
+#define VMXNET3_CAP_CRC32_HASH_FUNC 13 /* bit 13 of DCR 0 */
+#define VMXNET3_CAP_VERSION_6_MAX VMXNET3_CAP_CRC32_HASH_FUNC
+#define VMXNET3_CAP_OAM_FILTER 14 /* bit 14 of DCR 0 */
+#define VMXNET3_CAP_ESP_QS 15 /* bit 15 of DCR 0 */
+#define VMXNET3_CAP_LARGE_BAR 16 /* bit 16 of DCR 0 */
+#define VMXNET3_CAP_OOORX_COMP 17 /* bit 17 of DCR 0 */
+#define VMXNET3_CAP_VERSION_7_MAX 18
+/* when new capability is introduced, update VMXNET3_CAP_MAX */
+#define VMXNET3_CAP_MAX VMXNET3_CAP_VERSION_7_MAX
+
#endif /* _VMXNET3_DEFS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 93e8d119d45f..53b3b241e027 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -130,6 +130,20 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
+/* Check if capability is supported by UPT device or
+ * UPT is even requested
+ */
+bool
+vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
+{
+ if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
+ cap_supported & (1UL << cap)) {
+ return true;
+ }
+
+ return false;
+}
+
/*
* Check the link state. This may start or stop the tx queue.
@@ -571,6 +585,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
rbi = rbi_base + ring->next2fill;
gd = ring->base + ring->next2fill;
+ rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
if (rbi->skb == NULL) {
@@ -630,8 +645,10 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
/* Fill the last buffer but dont mark it ready, or else the
* device will think that the queue is full */
- if (num_allocated == num_to_alloc)
+ if (num_allocated == num_to_alloc) {
+ rbi->comp_state = VMXNET3_RXD_COMP_DONE;
break;
+ }
gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
num_allocated++;
@@ -1044,6 +1061,23 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
tq->stats.copy_skb_header++;
}
+ if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
+ /* tso pkts must not use more than
+ * VMXNET3_MAX_TSO_TXD_PER_PKT entries
+ */
+ if (skb_linearize(skb) != 0) {
+ tq->stats.drop_too_many_frags++;
+ goto drop_pkt;
+ }
+ tq->stats.linearized++;
+
+ /* recalculate the # of descriptors to use */
+ count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
+ if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
+ tq->stats.drop_too_many_frags++;
+ goto drop_pkt;
+ }
+ }
if (skb->encapsulation) {
vmxnet3_prepare_inner_tso(skb, &ctx);
} else {
@@ -1127,7 +1161,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (ctx.mss) {
if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
- gdesc->txd.om = VMXNET3_OM_ENCAP;
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ gdesc->txd.om = VMXNET3_OM_TSO;
+ gdesc->txd.ext1 = 1;
+ } else {
+ gdesc->txd.om = VMXNET3_OM_ENCAP;
+ }
gdesc->txd.msscof = ctx.mss;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
@@ -1144,8 +1183,15 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb->encapsulation) {
gdesc->txd.hlen = ctx.l4_offset +
ctx.l4_hdr_size;
- gdesc->txd.om = VMXNET3_OM_ENCAP;
- gdesc->txd.msscof = 0; /* Reserved */
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ gdesc->txd.om = VMXNET3_OM_CSUM;
+ gdesc->txd.msscof = ctx.l4_offset +
+ skb->csum_offset;
+ gdesc->txd.ext1 = 1;
+ } else {
+ gdesc->txd.om = VMXNET3_OM_ENCAP;
+ gdesc->txd.msscof = 0; /* Reserved */
+ }
} else {
gdesc->txd.hlen = ctx.l4_offset;
gdesc->txd.om = VMXNET3_OM_CSUM;
@@ -1193,7 +1239,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
tq->shared->txNumDeferred = 0;
VMXNET3_WRITE_BAR0_REG(adapter,
- VMXNET3_REG_TXPROD + tq->qid * 8,
+ adapter->tx_prod_offset + tq->qid * 8,
tq->tx_ring.next2fill);
}
@@ -1345,14 +1391,15 @@ static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
{
- static const u32 rxprod_reg[2] = {
- VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
+ u32 rxprod_reg[2] = {
+ adapter->rx_prod_offset, adapter->rx_prod2_offset
};
u32 num_pkts = 0;
bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
u16 segCnt = 0, mss = 0;
+ int comp_offset, fill_offset;
#ifdef __BIG_ENDIAN_BITFIELD
struct Vmxnet3_RxDesc rxCmdDesc;
struct Vmxnet3_RxCompDesc rxComp;
@@ -1503,6 +1550,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
hash_type);
}
#endif
+ skb_record_rx_queue(ctx->skb, rq->qid);
skb_put(ctx->skb, rcd->len);
if (VMXNET3_VERSION_GE_2(adapter) &&
@@ -1625,9 +1673,15 @@ not_lro:
rcd_done:
/* device may have skipped some rx descs */
- ring->next2comp = idx;
- num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
ring = rq->rx_ring + ring_idx;
+ rbi->comp_state = VMXNET3_RXD_COMP_DONE;
+
+ comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
+ fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
+ idx - ring->next2fill - 1;
+ if (!ring->isOutOfOrder || fill_offset >= comp_offset)
+ ring->next2comp = idx;
+ num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
/* Ensure that the writes to rxd->gen bits will be observed
* after all other writes to rxd objects.
@@ -1635,18 +1689,38 @@ rcd_done:
dma_wmb();
while (num_to_alloc) {
- vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
- &rxCmdDesc);
- BUG_ON(!rxd->addr);
+ rbi = rq->buf_info[ring_idx] + ring->next2fill;
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
+ goto refill_buf;
+ if (ring_idx == 0) {
+ /* ring0 Type1 buffers can get skipped; re-fill them */
+ if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
+ goto refill_buf;
+ }
+ if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
+refill_buf:
+ vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+ &rxCmdDesc);
+ WARN_ON(!rxd->addr);
+
+ /* Recv desc is ready to be used by the device */
+ rxd->gen = ring->gen;
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
+ num_to_alloc--;
+ } else {
+ /* rx completion hasn't occurred */
+ ring->isOutOfOrder = 1;
+ break;
+ }
+ }
- /* Recv desc is ready to be used by the device */
- rxd->gen = ring->gen;
- vmxnet3_cmd_ring_adv_next2fill(ring);
- num_to_alloc--;
+ if (num_to_alloc == 0) {
+ ring->isOutOfOrder = 0;
}
/* if needed, update the register */
- if (unlikely(rq->shared->updateRxProd)) {
+ if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
VMXNET3_WRITE_BAR0_REG(adapter,
rxprod_reg[ring_idx] + rq->qid * 8,
ring->next2fill);
@@ -1810,6 +1884,7 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
sizeof(struct Vmxnet3_RxDesc));
rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
+ rq->rx_ring[i].isOutOfOrder = 0;
}
if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
adapter) == 0) {
@@ -2627,6 +2702,23 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
}
static void
+vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_7(adapter))
+ return;
+
+ cmdInfo->ringBufSize = adapter->ringBufSize;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_RING_BUFFER_SIZE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+}
+
+static void
vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
@@ -2671,6 +2763,36 @@ vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
adapter->rss_fields =
VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
} else {
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
+ adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_UDP_RSS)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
+ }
+
+ if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV4)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
+ }
+
+ if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV6)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ }
cmdInfo->setRssFields = adapter->rss_fields;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_SET_RSS_FIELDS);
@@ -2734,14 +2856,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
goto activate_err;
}
+ vmxnet3_init_bufsize(adapter);
vmxnet3_init_coalesce(adapter);
vmxnet3_init_rssfields(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
VMXNET3_WRITE_BAR0_REG(adapter,
- VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
+ adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
adapter->rx_queue[i].rx_ring[0].next2fill);
- VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
+ VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
(i * VMXNET3_REG_ALIGN)),
adapter->rx_queue[i].rx_ring[1].next2fill);
}
@@ -2907,19 +3030,29 @@ static void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
size_t sz, i, ring0_size, ring1_size, comp_size;
- if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
- VMXNET3_MAX_ETH_HDR_SIZE) {
- adapter->skb_buf_size = adapter->netdev->mtu +
- VMXNET3_MAX_ETH_HDR_SIZE;
- if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
- adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
-
- adapter->rx_buf_per_pkt = 1;
+ /* With version7 ring1 will have only T0 buffers */
+ if (!VMXNET3_VERSION_GE_7(adapter)) {
+ if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
+ VMXNET3_MAX_ETH_HDR_SIZE) {
+ adapter->skb_buf_size = adapter->netdev->mtu +
+ VMXNET3_MAX_ETH_HDR_SIZE;
+ if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
+ adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
+
+ adapter->rx_buf_per_pkt = 1;
+ } else {
+ adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
+ sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
+ VMXNET3_MAX_ETH_HDR_SIZE;
+ adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
+ }
} else {
- adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
- sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
- VMXNET3_MAX_ETH_HDR_SIZE;
- adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
+ adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
+ VMXNET3_MAX_SKB_BUF_SIZE);
+ adapter->rx_buf_per_pkt = 1;
+ adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
+ adapter->ringBufSize.ring1BufSizeType1 = 0;
+ adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
}
/*
@@ -2935,6 +3068,11 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
ring1_size = (ring1_size + sz - 1) / sz * sz;
ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
sz * sz);
+ /* For v7 and later, keep ring size power of 2 for UPT */
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ ring0_size = rounddown_pow_of_two(ring0_size);
+ ring1_size = rounddown_pow_of_two(ring1_size);
+ }
comp_size = ring0_size + ring1_size;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -3185,6 +3323,54 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ unsigned long flags;
+
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ }
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+ }
+
netdev->vlan_features = netdev->hw_features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
@@ -3472,7 +3658,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
- if (ver & (1 << VMXNET3_REV_6)) {
+ if (ver & (1 << VMXNET3_REV_7)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_7);
+ adapter->version = VMXNET3_REV_7 + 1;
+ } else if (ver & (1 << VMXNET3_REV_6)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_6);
@@ -3520,6 +3711,39 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_ver;
}
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
+ adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
+ if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
+ adapter->dev_caps[0] = adapter->devcap_supported[0] &
+ (1UL << VMXNET3_CAP_LARGE_BAR);
+ }
+ if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
+ adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
+ adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
+ adapter->dev_caps[0] |= adapter->devcap_supported[0] &
+ (1UL << VMXNET3_CAP_OOORX_COMP);
+ }
+ if (adapter->dev_caps[0])
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+
+ if (VMXNET3_VERSION_GE_7(adapter) &&
+ adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
+ adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
+ adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
+ adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
+ } else {
+ adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
+ adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
+ adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
+ }
+
if (VMXNET3_VERSION_GE_6(adapter)) {
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 3172d46c0335..e2034adc3a1a 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -298,7 +298,7 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
return features;
}
-static void vmxnet3_enable_encap_offloads(struct net_device *netdev)
+static void vmxnet3_enable_encap_offloads(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -306,8 +306,56 @@ static void vmxnet3_enable_encap_offloads(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ NETIF_F_LRO;
+ if (features & NETIF_F_GSO_UDP_TUNNEL)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ unsigned long flags;
+
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ }
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
}
}
@@ -322,6 +370,22 @@ static void vmxnet3_disable_encap_offloads(struct net_device *netdev)
NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM);
}
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ unsigned long flags;
+
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD |
+ 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD |
+ 1UL << VMXNET3_CAP_GENEVE_TSO |
+ 1UL << VMXNET3_CAP_VXLAN_TSO |
+ 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD |
+ 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD);
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
}
int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
@@ -357,8 +421,8 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXVLAN;
- if ((features & tun_offload_mask) != 0 && !udp_tun_enabled) {
- vmxnet3_enable_encap_offloads(netdev);
+ if ((features & tun_offload_mask) != 0) {
+ vmxnet3_enable_encap_offloads(netdev, features);
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXINNEROFLD;
} else if ((features & tun_offload_mask) == 0 &&
@@ -462,7 +526,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
- buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_TXPROD +
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->tx_prod_offset +
i * VMXNET3_REG_ALIGN);
buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA);
@@ -490,9 +554,9 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
- buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD +
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod_offset +
i * VMXNET3_REG_ALIGN);
- buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD2 +
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod2_offset +
i * VMXNET3_REG_ALIGN);
buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
@@ -660,6 +724,13 @@ vmxnet3_set_ringparam(struct net_device *netdev,
new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
VMXNET3_RX_RING2_MAX_SIZE);
+ /* For v7 and later, keep ring size power of 2 for UPT */
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ new_tx_ring_size = rounddown_pow_of_two(new_tx_ring_size);
+ new_rx_ring_size = rounddown_pow_of_two(new_rx_ring_size);
+ new_rx_ring2_size = rounddown_pow_of_two(new_rx_ring2_size);
+ }
+
/* rx data ring buffer size has to be a multiple of
* VMXNET3_RXDATA_DESC_SIZE_ALIGN
*/
@@ -913,6 +984,39 @@ vmxnet3_set_rss_hash_opt(struct net_device *netdev,
union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
unsigned long flags;
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ if ((rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
+ rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_UDP_RSS)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
+ }
+ if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV4)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
+ }
+ if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV6)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR,
+ adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter,
+ VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
spin_lock_irqsave(&adapter->cmd_lock, flags);
cmdInfo->setRssFields = rss_fields;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -1188,6 +1292,34 @@ done:
return 0;
}
+static void vmxnet3_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ec)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI) && adapter->intr.type == VMXNET3_IT_MSIX) {
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
+ ec->combined_count = adapter->num_tx_queues;
+ } else {
+ ec->rx_count = adapter->num_rx_queues;
+ ec->tx_count =
+ adapter->share_intr == VMXNET3_INTR_TXSHARE ?
+ 1 : adapter->num_tx_queues;
+ }
+ } else {
+ ec->combined_count = 1;
+ }
+
+ ec->other_count = 1;
+
+ /* Number of interrupts cannot be changed on the fly */
+ /* Just set maximums to actual values */
+ ec->max_rx = ec->rx_count;
+ ec->max_tx = ec->tx_count;
+ ec->max_combined = ec->combined_count;
+ ec->max_other = ec->other_count;
+}
+
static const struct ethtool_ops vmxnet3_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -1213,6 +1345,7 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.set_rxfh = vmxnet3_set_rss,
#endif
.get_link_ksettings = vmxnet3_get_link_ksettings,
+ .get_channels = vmxnet3_get_channels,
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7027ff483fa5..3367db23aa13 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -69,18 +69,19 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.6.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.7.0.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
-#define VMXNET3_DRIVER_VERSION_NUM 0x01060000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01070000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
+#define VMXNET3_REV_7 6 /* Vmxnet3 Rev. 7 */
#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */
#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */
#define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */
@@ -135,6 +136,7 @@ struct vmxnet3_cmd_ring {
u32 next2fill;
u32 next2comp;
u8 gen;
+ u8 isOutOfOrder;
dma_addr_t basePA;
};
@@ -259,9 +261,13 @@ enum vmxnet3_rx_buf_type {
VMXNET3_RX_BUF_PAGE = 2
};
+#define VMXNET3_RXD_COMP_PENDING 0
+#define VMXNET3_RXD_COMP_DONE 1
+
struct vmxnet3_rx_buf_info {
enum vmxnet3_rx_buf_type buf_type;
u16 len;
+ u8 comp_state;
union {
struct sk_buff *skb;
struct page *page;
@@ -402,6 +408,13 @@ struct vmxnet3_adapter {
dma_addr_t pm_conf_pa;
dma_addr_t rss_conf_pa;
bool queuesExtEnabled;
+ struct Vmxnet3_RingBufferSize ringBufSize;
+ u32 devcap_supported[8];
+ u32 ptcap_supported[8];
+ u32 dev_caps[8];
+ u16 tx_prod_offset;
+ u16 rx_prod_offset;
+ u16 rx_prod2_offset;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -431,11 +444,13 @@ struct vmxnet3_adapter {
(adapter->version >= VMXNET3_REV_5 + 1)
#define VMXNET3_VERSION_GE_6(adapter) \
(adapter->version >= VMXNET3_REV_6 + 1)
+#define VMXNET3_VERSION_GE_7(adapter) \
+ (adapter->version >= VMXNET3_REV_7 + 1)
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 1024
-#define VMXNET3_DEF_RX_RING2_SIZE 256
+#define VMXNET3_DEF_RX_RING2_SIZE 512
#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
@@ -494,6 +509,7 @@ void vmxnet3_set_ethtool_ops(struct net_device *netdev);
void vmxnet3_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats);
+bool vmxnet3_check_ptcapability(u32 cap_supported, u32 cap);
extern char vmxnet3_driver_name[];
#endif
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index cfc30ce4c6e1..5df7a0abc39d 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -814,8 +814,8 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
*/
if (rt6) {
dst = &rt6->dst;
- dev_replace_track(dst->dev, net->loopback_dev,
- &dst->dev_tracker, GFP_KERNEL);
+ netdev_ref_replace(dst->dev, net->loopback_dev,
+ &dst->dev_tracker, GFP_KERNEL);
dst->dev = net->loopback_dev;
dst_release(dst);
}
@@ -1061,8 +1061,8 @@ static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
*/
if (rth) {
dst = &rth->dst;
- dev_replace_track(dst->dev, net->loopback_dev,
- &dst->dev_tracker, GFP_KERNEL);
+ netdev_ref_replace(dst->dev, net->loopback_dev,
+ &dst->dev_tracker, GFP_KERNEL);
dst->dev = net->loopback_dev;
dst_release(dst);
}
@@ -1077,7 +1077,7 @@ static int vrf_rtable_create(struct net_device *dev)
return -ENOMEM;
/* create a dst for routing packets out through a VRF device */
- rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1);
+ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1);
if (!rth)
return -ENOMEM;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 265d4a0245e7..c3285242f74f 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2243,7 +2243,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
__be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
- struct dst_cache *dst_cache,
+ __u8 flow_flags, struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
@@ -2270,6 +2270,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.saddr = *saddr;
fl4.fl4_dport = dport;
fl4.fl4_sport = sport;
+ fl4.flowi4_flags = flow_flags;
rt = ip_route_output_key(vxlan->net, &fl4);
if (!IS_ERR(rt)) {
@@ -2320,7 +2321,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
fl6.saddr = *saddr;
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
+ fl6.flowlabel = ip6_make_flowinfo(tos, label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = dport;
@@ -2385,15 +2386,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tx_packets++;
- tx_stats->tx_bytes += len;
+ u64_stats_inc(&tx_stats->tx_packets);
+ u64_stats_add(&tx_stats->tx_bytes, len);
u64_stats_update_end(&tx_stats->syncp);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->rx_packets++;
- rx_stats->rx_bytes += len;
+ u64_stats_inc(&rx_stats->rx_packets);
+ u64_stats_add(&rx_stats->rx_bytes, len);
u64_stats_update_end(&rx_stats->syncp);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);
@@ -2459,7 +2460,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
unsigned int pkt_len = skb->len;
__be16 src_port = 0, dst_port;
struct dst_entry *ndst = NULL;
- __u8 tos, ttl;
+ __u8 tos, ttl, flow_flags = 0;
int ifindex;
int err;
u32 flags = vxlan->cfg.flags;
@@ -2525,6 +2526,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
dst = &remote_ip;
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+ flow_flags = info->key.flow_flags;
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
@@ -2555,7 +2557,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
dst->sin.sin_addr.s_addr,
&local_ip.sin.sin_addr.s_addr,
- dst_port, src_port,
+ dst_port, src_port, flow_flags,
dst_cache, info);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
@@ -3061,7 +3063,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
&info->key.u.ipv4.src, dport, sport,
- &info->dst_cache, info);
+ info->key.flow_flags, &info->dst_cache,
+ info);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
index 5f43568a9715..63908dbbb02d 100644
--- a/drivers/net/wan/farsync.h
+++ b/drivers/net/wan/farsync.h
@@ -43,7 +43,7 @@
* This version number is incremented with each official release of the
* package and is a simplified number for normal user reference.
* Individual files are tracked by the version control system and may
- * have individual versions (or IDs) that move much faster than the
+ * have individual versions (or IDs) that move much faster than
* the release version as individual updates are tracked.
*/
#define FST_USER_VERSION "1.04"
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 9a4c8ff32d9d..5bf7822c53f1 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -6,6 +6,8 @@
#include "allowedips.h"
#include "peer.h"
+enum { MAX_ALLOWEDIPS_BITS = 128 };
+
static struct kmem_cache *node_cache;
static void swap_endian(u8 *dst, const u8 *src, u8 bits)
@@ -40,7 +42,8 @@ static void push_rcu(struct allowedips_node **stack,
struct allowedips_node __rcu *p, unsigned int *len)
{
if (rcu_access_pointer(p)) {
- WARN_ON(IS_ENABLED(DEBUG) && *len >= 128);
+ if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS))
+ return;
stack[(*len)++] = rcu_dereference_raw(p);
}
}
@@ -52,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu)
static void root_free_rcu(struct rcu_head *rcu)
{
- struct allowedips_node *node, *stack[128] = {
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = {
container_of(rcu, struct allowedips_node, rcu) };
unsigned int len = 1;
@@ -65,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu)
static void root_remove_peer_lists(struct allowedips_node *root)
{
- struct allowedips_node *node, *stack[128] = { root };
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root };
unsigned int len = 1;
while (len > 0 && (node = stack[--len])) {
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index aa9a7a5970fd..d58e9f818d3b 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -69,7 +69,8 @@ static int wg_pm_notification(struct notifier_block *nb, unsigned long action, v
* its normal operation rather than as a somewhat rare event, then we
* don't actually want to clear keys.
*/
- if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID))
+ if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) ||
+ IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP))
return 0;
if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 7b8df406c773..7135d51d2d87 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -19,15 +19,8 @@
/* Must be called with bh disabled. */
static void update_rx_stats(struct wg_peer *peer, size_t len)
{
- struct pcpu_sw_netstats *tstats =
- get_cpu_ptr(peer->device->dev->tstats);
-
- u64_stats_update_begin(&tstats->syncp);
- ++tstats->rx_packets;
- tstats->rx_bytes += len;
+ dev_sw_netstats_rx_add(peer->device->dev, len);
peer->rx_bytes += len;
- u64_stats_update_end(&tstats->syncp);
- put_cpu_ptr(tstats);
}
#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index e173204ae7d7..41db10f9be49 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -593,10 +593,10 @@ bool __init wg_allowedips_selftest(void)
wg_allowedips_remove_by_peer(&t, a, &mutex);
test_negative(4, a, 192, 168, 0, 1);
- /* These will hit the WARN_ON(len >= 128) in free_node if something
- * goes wrong.
+ /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node
+ * if something goes wrong.
*/
- for (i = 0; i < 128; ++i) {
+ for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) {
part = cpu_to_be64(~(1LLU << (i % 64)));
memset(&ip, 0xff, 16);
memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
index 007cd4457c5f..ba87d294604f 100644
--- a/drivers/net/wireguard/selftest/ratelimiter.c
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -6,28 +6,29 @@
#ifdef DEBUG
#include <linux/jiffies.h>
+#include <linux/hrtimer.h>
static const struct {
bool result;
- unsigned int msec_to_sleep_before;
+ u64 nsec_to_sleep_before;
} expected_results[] __initconst = {
[0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
[PACKETS_BURSTABLE] = { false, 0 },
- [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
[PACKETS_BURSTABLE + 2] = { false, 0 },
- [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
[PACKETS_BURSTABLE + 4] = { true, 0 },
[PACKETS_BURSTABLE + 5] = { false, 0 }
};
static __init unsigned int maximum_jiffies_at_index(int index)
{
- unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
int i;
for (i = 0; i <= index; ++i)
- total_msecs += expected_results[i].msec_to_sleep_before;
- return msecs_to_jiffies(total_msecs);
+ total_nsecs += expected_results[i].nsec_to_sleep_before;
+ return nsecs_to_jiffies(total_nsecs);
}
static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
@@ -42,8 +43,12 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
loop_start_time = jiffies;
for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
- if (expected_results[i].msec_to_sleep_before)
- msleep(expected_results[i].msec_to_sleep_before);
+ if (expected_results[i].nsec_to_sleep_before) {
+ ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
+ ns_to_ktime(expected_results[i].nsec_to_sleep_before));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
+ }
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
@@ -127,7 +132,7 @@ bool __init wg_ratelimiter_selftest(void)
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
return true;
- BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+ BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
if (wg_ratelimiter_init())
goto out;
@@ -176,7 +181,6 @@ bool __init wg_ratelimiter_selftest(void)
test += test_count;
goto err;
}
- msleep(500);
continue;
} else if (ret < 0) {
test += test_count;
@@ -195,7 +199,6 @@ bool __init wg_ratelimiter_selftest(void)
test += test_count;
goto err;
}
- msleep(50);
continue;
}
test += test_count;
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 2db9c948c0fc..6bee16b207d1 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1311,7 +1311,7 @@ static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changes)
+ u64 changes)
{
struct adm8211_priv *priv = dev->priv;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 9f84a6fde0c2..6f937d2cc126 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1256,14 +1256,14 @@ static int ar5523_create_connection(struct ar5523 *ar,
sizeof(create), 0);
}
-static int ar5523_write_associd(struct ar5523 *ar,
- struct ieee80211_bss_conf *bss)
+static int ar5523_write_associd(struct ar5523 *ar, struct ieee80211_vif *vif)
{
+ struct ieee80211_bss_conf *bss = &vif->bss_conf;
struct ar5523_cmd_set_associd associd;
memset(&associd, 0, sizeof(associd));
associd.defaultrateix = cpu_to_be32(0); /* XXX */
- associd.associd = cpu_to_be32(bss->aid);
+ associd.associd = cpu_to_be32(vif->cfg.aid);
associd.timoffset = cpu_to_be32(0x3b); /* XXX */
memcpy(associd.bssid, bss->bssid, ETH_ALEN);
return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd,
@@ -1273,7 +1273,7 @@ static int ar5523_write_associd(struct ar5523 *ar,
static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss,
- u32 changed)
+ u64 changed)
{
struct ar5523 *ar = hw->priv;
int error;
@@ -1284,7 +1284,7 @@ static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
if (!(changed & BSS_CHANGED_ASSOC))
goto out_unlock;
- if (bss->assoc) {
+ if (vif->cfg.assoc) {
error = ar5523_create_connection(ar, vif, bss);
if (error) {
ar5523_err(ar, "could not create connection\n");
@@ -1297,7 +1297,7 @@ static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
goto out_unlock;
}
- error = ar5523_write_associd(ar, bss);
+ error = ar5523_write_associd(ar, vif);
if (error) {
ar5523_err(ar, "could not set association\n");
goto out_unlock;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 688177453b07..276954b70d63 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -33,9 +33,11 @@ EXPORT_SYMBOL(ath10k_debug_mask);
static unsigned int ath10k_cryptmode_param;
static bool uart_print;
static bool skip_otp;
-static bool rawmode;
static bool fw_diag_log;
+/* frame mode values are mapped as per enum ath10k_hw_txrx_mode */
+unsigned int ath10k_frame_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+
unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) |
BIT(ATH10K_FW_CRASH_DUMP_CE_DATA);
@@ -44,15 +46,16 @@ module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
-module_param(rawmode, bool, 0644);
module_param(fw_diag_log, bool, 0644);
+module_param_named(frame_mode, ath10k_frame_mode, uint, 0644);
module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
-MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+MODULE_PARM_DESC(frame_mode,
+ "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging");
@@ -2599,7 +2602,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
- if (rawmode) {
+ if (ath10k_frame_mode == ATH10K_HW_TXRX_RAW) {
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
fw_file->fw_features)) {
ath10k_err(ar, "rawmode = 1 requires support from firmware");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 8bfabbcfdb14..d70d7d088a2b 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1314,6 +1314,7 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
return false;
}
+extern unsigned int ath10k_frame_mode;
extern unsigned long ath10k_coredump_mask;
void ath10k_core_napi_sync_disable(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 771252dd6d4e..8a075a711b71 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -3563,7 +3563,7 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx tx mode switch ind info0 0x%04hx info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
+ "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
info0, info1, enable, num_records, mode, threshold);
len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
@@ -3840,7 +3840,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
switch (txrate.flags) {
case WMI_RATE_PREAMBLE_OFDM:
if (arsta->arvif && arsta->arvif->vif)
- conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
+ conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);
if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
arsta->tx_info.status.rates[0].idx = rate_idx - 4;
break;
@@ -3884,6 +3884,10 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
arsta->tx_info.status.rates[0].flags |=
IEEE80211_TX_RC_80_MHZ_WIDTH;
break;
+ case RATE_INFO_BW_160:
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
}
if (peer_stats->succ_pkts) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 9842a4b2f78f..a19b0795c86d 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1275,7 +1275,6 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
struct ath10k *ar = htt->ar;
int res, data_len;
struct htt_cmd_hdr *cmd_hdr;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct htt_data_tx_desc *tx_desc;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *tmp_skb;
@@ -1286,11 +1285,15 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
u16 flags1 = 0;
u16 msdu_id = 0;
- if ((ieee80211_is_action(hdr->frame_control) ||
- ieee80211_is_deauth(hdr->frame_control) ||
- ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ if (!is_eth) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
}
data_len = msdu->len;
@@ -1387,7 +1390,6 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
@@ -1419,15 +1421,19 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
txbuf_paddr = htt->txbuf.paddr +
(sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
- if ((ieee80211_is_action(hdr->frame_control) ||
- ieee80211_is_deauth(hdr->frame_control) ||
- ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
- } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
- txmode == ATH10K_HW_TXRX_RAW &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ if (!is_eth) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
}
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
@@ -1589,7 +1595,6 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
@@ -1621,15 +1626,19 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
txbuf_paddr = htt->txbuf.paddr +
(sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
- if ((ieee80211_is_action(hdr->frame_control) ||
- ieee80211_is_deauth(hdr->frame_control) ||
- ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
- } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
- txmode == ATH10K_HW_TXRX_RAW &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ if (!is_eth) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
}
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 3570a5895ea8..9dd3b8fba4b0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -659,7 +659,7 @@ int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *conf;
rcu_read_lock();
- conf = rcu_dereference(vif->chanctx_conf);
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (!conf) {
rcu_read_unlock();
return -ENOENT;
@@ -1509,8 +1509,8 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
arg.channel.chan_radar =
!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
- arg.ssid = arvif->vif->bss_conf.ssid;
- arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+ arg.ssid = arvif->vif->cfg.ssid;
+ arg.ssid_len = arvif->vif->cfg.ssid_len;
}
ath10k_dbg(ar, ATH10K_DBG_MAC,
@@ -1630,7 +1630,7 @@ static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
return 0;
- bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!bcn) {
ath10k_warn(ar, "failed to get beacon template from mac80211\n");
return -EPERM;
@@ -1823,8 +1823,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
}
static void ath10k_control_ibss(struct ath10k_vif *arvif,
- struct ieee80211_bss_conf *info,
- const u8 self_peer[ETH_ALEN])
+ struct ieee80211_vif *vif)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
@@ -1832,7 +1831,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
lockdep_assert_held(&arvif->ar->conf_mutex);
- if (!info->ibss_joined) {
+ if (!vif->cfg.ibss_joined) {
if (is_zero_ether_addr(arvif->bssid))
return;
@@ -2028,7 +2027,7 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return;
- if (!vif->csa_active)
+ if (!vif->bss_conf.csa_active)
return;
if (!arvif->is_up)
@@ -2163,7 +2162,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
lockdep_assert_held(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_STATION)
- aid = vif->bss_conf.aid;
+ aid = vif->cfg.aid;
else
aid = sta->aid;
@@ -2193,7 +2192,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
return;
bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid,
- info->ssid_len ? info->ssid : NULL, info->ssid_len,
+ vif->cfg.ssid_len ? vif->cfg.ssid : NULL,
+ vif->cfg.ssid_len,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (bss) {
const struct cfg80211_bss_ies *ies;
@@ -3118,11 +3118,11 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
- arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+ arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
WARN_ON(arvif->is_up);
- arvif->aid = bss_conf->aid;
+ arvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
ret = ath10k_wmi_pdev_set_param(ar,
@@ -3713,6 +3713,9 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
__le16 fc = hdr->frame_control;
+ if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return ATH10K_HW_TXRX_ETHERNET;
+
if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
return ATH10K_HW_TXRX_RAW;
@@ -3873,6 +3876,12 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
bool noack = false;
cb->flags = 0;
+
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ cb->flags |= ATH10K_SKB_F_QOS; /* Assume data frames are QoS */
+ goto finish_cb_fill;
+ }
+
if (!ath10k_tx_h_use_hwcrypto(vif, skb))
cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
@@ -3911,6 +3920,7 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
cb->flags |= ATH10K_SKB_F_RAW_TX;
}
+finish_cb_fill:
cb->vif = vif;
cb->txq = txq;
cb->airtime_est = airtime;
@@ -4034,7 +4044,11 @@ static int ath10k_mac_tx(struct ath10k *ar,
ath10k_tx_h_seq_no(vif, skb);
break;
case ATH10K_HW_TXRX_ETHERNET:
- ath10k_tx_h_8023(skb);
+ /* Convert 802.11->802.3 header only if the frame was erlier
+ * encapsulated to 802.11 by mac80211. Otherwise pass it as is.
+ */
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+ ath10k_tx_h_8023(skb);
break;
case ATH10K_HW_TXRX_RAW:
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) &&
@@ -4645,12 +4659,10 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = info->control.vif;
struct ieee80211_sta *sta = control->sta;
struct ieee80211_txq *txq = NULL;
- struct ieee80211_hdr *hdr = (void *)skb->data;
enum ath10k_hw_txrx_mode txmode;
enum ath10k_mac_tx_path txpath;
bool is_htt;
bool is_mgmt;
- bool is_presp;
int ret;
u16 airtime;
@@ -4664,8 +4676,14 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
if (is_htt) {
+ bool is_presp = false;
+
spin_lock_bh(&ar->htt.tx_lock);
- is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+ }
ret = ath10k_htt_tx_inc_pending(htt);
if (ret) {
@@ -5465,6 +5483,30 @@ static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
ar->wmi.vdev_param->txbf, value);
}
+static void ath10k_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k *ar = hw->priv;
+ u32 vdev_param;
+ int ret;
+
+ if (ath10k_frame_mode != ATH10K_HW_TXRX_ETHERNET ||
+ ar->wmi.vdev_param->tx_encap_type == WMI_VDEV_PARAM_UNSUPPORTED ||
+ (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP))
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ vdev_param = ar->wmi.vdev_param->tx_encap_type;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ ATH10K_HW_TXRX_NATIVE_WIFI);
+ /* 10.X firmware does not support this VDEV parameter. Do not warn */
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
+ arvif->vdev_id, ret);
+ }
+}
+
/*
* TODO:
* Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@@ -5674,15 +5716,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->def_wep_key_idx = -1;
- vdev_param = ar->wmi.vdev_param->tx_encap_type;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- ATH10K_HW_TXRX_NATIVE_WIFI);
- /* 10.X firmware does not support this VDEV parameter. Do not warn */
- if (ret && ret != -EOPNOTSUPP) {
- ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
- arvif->vdev_id, ret);
- goto err_vdev_delete;
- }
+ ath10k_update_vif_offload(hw, vif);
/* Configuring number of spatial stream for monitor interface is causing
* target assert in qca9888 and qca6174.
@@ -6034,7 +6068,7 @@ static void ath10k_recalculate_mgmt_rate(struct ath10k *ar,
static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
@@ -6048,7 +6082,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
if (changed & BSS_CHANGED_IBSS)
- ath10k_control_ibss(arvif, info, vif->addr);
+ ath10k_control_ibss(arvif, vif);
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
@@ -6113,9 +6147,10 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_SSID &&
vif->type == NL80211_IFTYPE_AP) {
- arvif->u.ap.ssid_len = info->ssid_len;
- if (info->ssid_len)
- memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
+ vif->cfg.ssid_len);
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
@@ -6192,7 +6227,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- if (info->assoc) {
+ if (vif->cfg.assoc) {
/* Workaround: Make sure monitor vdev is not running
* when associating to prevent some firmware revisions
* (e.g. 10.1 and 10.2) from crashing.
@@ -6217,7 +6252,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_PS) {
- arvif->ps = vif->bss_conf.ps;
+ arvif->ps = vif->cfg.ps;
ret = ath10k_config_ps(ar);
if (ret)
@@ -7779,7 +7814,8 @@ exit:
}
static int ath10k_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct ath10k *ar = hw->priv;
@@ -8798,7 +8834,7 @@ ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
{
struct ath10k_mac_change_chanctx_arg *arg = data;
- if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
return;
arg->n_vifs++;
@@ -8811,7 +8847,7 @@ ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ath10k_mac_change_chanctx_arg *arg = data;
struct ieee80211_chanctx_conf *ctx;
- ctx = rcu_access_pointer(vif->chanctx_conf);
+ ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
if (ctx != arg->ctx)
return;
@@ -8884,6 +8920,7 @@ unlock:
static int
ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath10k *ar = hw->priv;
@@ -8963,6 +9000,7 @@ err:
static void
ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath10k *ar = hw->priv;
@@ -9375,6 +9413,7 @@ static const struct ieee80211_ops ath10k_ops = {
.stop = ath10k_stop,
.config = ath10k_config,
.add_interface = ath10k_add_interface,
+ .update_vif_offload = ath10k_update_vif_offload,
.remove_interface = ath10k_remove_interface,
.configure_filter = ath10k_configure_filter,
.bss_info_changed = ath10k_bss_info_changed,
@@ -10044,6 +10083,12 @@ int ath10k_mac_register(struct ath10k *ar)
if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA);
+ if (ath10k_frame_mode == ATH10K_HW_TXRX_ETHERNET) {
+ if (ar->wmi.vdev_param->tx_encap_type !=
+ WMI_VDEV_PARAM_UNSUPPORTED)
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ }
+
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
@@ -10229,7 +10274,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
}
- if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
+ if (!ath_is_world_regd(&ar->ath_common.reg_world_copy) &&
+ !ath_is_world_regd(&ar->ath_common.regulatory)) {
ret = regulatory_hint(ar->hw->wiphy,
ar->ath_common.regulatory.alpha2);
if (ret)
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 80fcb917fe4e..d7e406916bc8 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -590,12 +590,12 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
if (resp->fw_version_info_valid) {
qmi->fw_version = resp->fw_version_info.fw_version;
- strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
+ strscpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
sizeof(qmi->fw_build_timestamp));
}
if (resp->fw_build_id_valid)
- strlcpy(qmi->fw_build_id, resp->fw_build_id,
+ strscpy(qmi->fw_build_id, resp->fw_build_id,
MAX_BUILD_ID_LEN + 1);
if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 607e8164bf98..5576ad9fd116 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1249,13 +1249,12 @@ static void ath10k_snoc_init_napi(struct ath10k *ar)
static int ath10k_snoc_request_irq(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- int irqflags = IRQF_TRIGGER_RISING;
int ret, id;
for (id = 0; id < CE_COUNT_MAX; id++) {
ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
- ath10k_snoc_per_engine_handler,
- irqflags, ce_name[id], ar);
+ ath10k_snoc_per_engine_handler, 0,
+ ce_name[id], ar);
if (ret) {
ath10k_err(ar,
"failed to register IRQ handler for CE %d: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 4714c86bb501..64e7a767d963 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -52,15 +52,12 @@ DECLARE_EVENT_CLASS(ath10k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
@@ -92,16 +89,13 @@ TRACE_EVENT(ath10k_log_dbg,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 10123974c3da..da3bc35e41aa 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -43,6 +43,7 @@ out:
int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
+ struct ieee80211_tx_status status;
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
@@ -128,7 +129,19 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
- ieee80211_tx_status(htt->ar->hw, msdu);
+ memset(&status, 0, sizeof(status));
+ status.skb = msdu;
+ status.info = info;
+
+ rcu_read_lock();
+
+ if (txq)
+ status.sta = txq->sta;
+
+ ieee80211_tx_status_ext(htt->ar->hw, &status);
+
+ rcu_read_unlock();
+
/* we do not own the msdu anymore */
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 7efbe03fbca8..876410a47d1d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -205,7 +205,7 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
}
arvif = ath10k_get_arvif(ar, vdev_id);
- if (arvif && arvif->is_up && arvif->vif->csa_active)
+ if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active)
ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
kfree(tb);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cd438f76f284..074d8ba5072a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3882,13 +3882,13 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* Once CSA counter is completed stop sending beacons until
* actual channel switch is done
*/
- if (arvif->vif->csa_active &&
+ if (arvif->vif->bss_conf.csa_active &&
ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
ieee80211_csa_finish(arvif->vif);
continue;
}
- bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
+ bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0);
if (!bcn) {
ath10k_warn(ar, "could not get mac80211 beacon\n");
continue;
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index fa11807f48a9..c47414710138 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -140,8 +140,53 @@ ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
return ab->pci.msi.irqs[vector];
}
+static inline u32
+ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start = 0;
+
+ /* If offset lies within DP register range, use 1st window */
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = ATH11K_PCI_WINDOW_START;
+ /* If offset lies within CE register range, use 2nd window */
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = 2 * ATH11K_PCI_WINDOW_START;
+
+ return window_start;
+}
+
+static void
+ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ u32 window_start;
+
+ /* WCN6750 uses static window based register access*/
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+}
+
+static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start;
+ u32 val;
+
+ /* WCN6750 uses static window based register access */
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ return val;
+}
+
static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
+ .wakeup = NULL,
+ .release = NULL,
.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
+ .window_write32 = ath11k_ahb_window_write32_wcn6750,
+ .window_read32 = ath11k_ahb_window_read32_wcn6750,
};
static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
@@ -971,19 +1016,24 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
}
ab->hif.ops = hif_ops;
- ab->pci.ops = pci_ops;
ab->pdev = pdev;
ab->hw_rev = hw_rev;
platform_set_drvdata(pdev, ab);
- ret = ath11k_ahb_setup_resources(ab);
- if (ret)
+ ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
goto err_core_free;
+ }
ret = ath11k_core_pre_init(ab);
if (ret)
goto err_core_free;
+ ret = ath11k_ahb_setup_resources(ab);
+ if (ret)
+ goto err_core_free;
+
ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
goto err_core_free;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 1e98ff9ff288..c3e9e4f7bc24 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -54,9 +54,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
.svc_to_ce_map_len = 21,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -107,8 +104,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = true,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
},
@@ -133,9 +128,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
.svc_to_ce_map_len = 19,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -183,8 +175,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = true,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
},
@@ -209,9 +199,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 48,
- .rfkill_cfg = 0,
- .rfkill_on_level = 1,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -258,8 +245,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
},
@@ -284,9 +269,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
.svc_to_ce_map_len = 18,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = false,
@@ -333,8 +315,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = true,
.hybrid_bus_type = false,
- .dp_window_idx = 3,
- .ce_window_idx = 2,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
},
@@ -359,9 +339,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -408,8 +385,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
},
@@ -434,9 +409,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -482,8 +454,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
},
@@ -508,9 +478,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 1,
@@ -556,8 +523,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = true,
.hybrid_bus_type = true,
- .dp_window_idx = 1,
- .ce_window_idx = 2,
.fixed_fw_mem = true,
.support_off_channel_tx = false,
},
@@ -1225,23 +1190,23 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
return ret;
}
- ret = ath11k_mac_register(ab);
+ ret = ath11k_dp_pdev_alloc(ab);
if (ret) {
- ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
goto err_pdev_debug;
}
- ret = ath11k_dp_pdev_alloc(ab);
+ ret = ath11k_mac_register(ab);
if (ret) {
- ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
- goto err_mac_unregister;
+ ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ goto err_dp_pdev_free;
}
ret = ath11k_thermal_register(ab);
if (ret) {
ath11k_err(ab, "could not register thermal device: %d\n",
ret);
- goto err_dp_pdev_free;
+ goto err_mac_unregister;
}
ret = ath11k_spectral_init(ab);
@@ -1254,10 +1219,10 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
err_thermal_unregister:
ath11k_thermal_unregister(ab);
-err_dp_pdev_free:
- ath11k_dp_pdev_free(ab);
err_mac_unregister:
ath11k_mac_unregister(ab);
+err_dp_pdev_free:
+ ath11k_dp_pdev_free(ab);
err_pdev_debug:
ath11k_debugfs_pdev_destroy(ab);
@@ -1402,27 +1367,6 @@ static int ath11k_core_start_firmware(struct ath11k_base *ab,
return ret;
}
-static int ath11k_core_rfkill_config(struct ath11k_base *ab)
-{
- struct ath11k *ar;
- int ret = 0, i;
-
- if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
- return 0;
-
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
-
- ret = ath11k_mac_rfkill_config(ar);
- if (ret && ret != -EOPNOTSUPP) {
- ath11k_warn(ab, "failed to configure rfkill: %d", ret);
- return ret;
- }
- }
-
- return ret;
-}
-
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
@@ -1475,13 +1419,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
goto err_core_stop;
}
ath11k_hif_irq_enable(ab);
-
- ret = ath11k_core_rfkill_config(ab);
- if (ret && ret != -EOPNOTSUPP) {
- ath11k_err(ab, "failed to config rfkill: %d\n", ret);
- goto err_core_stop;
- }
-
mutex_unlock(&ab->core_lock);
return 0;
@@ -1550,7 +1487,6 @@ void ath11k_core_halt(struct ath11k *ar)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->update_11d_work);
- cancel_work_sync(&ab->rfkill_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
@@ -1558,28 +1494,6 @@ void ath11k_core_halt(struct ath11k *ar)
idr_init(&ar->txmgmt_idr);
}
-static void ath11k_rfkill_work(struct work_struct *work)
-{
- struct ath11k_base *ab = container_of(work, struct ath11k_base, rfkill_work);
- struct ath11k *ar;
- bool rfkill_radio_on;
- int i;
-
- spin_lock_bh(&ab->base_lock);
- rfkill_radio_on = ab->rfkill_radio_on;
- spin_unlock_bh(&ab->base_lock);
-
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- if (!ar)
- continue;
-
- /* notify cfg80211 radio state change */
- ath11k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
- wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on);
- }
-}
-
static void ath11k_update_11d(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
@@ -1891,7 +1805,6 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
init_waitqueue_head(&ab->qmi.cold_boot_waitq);
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
- INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work);
INIT_WORK(&ab->reset_work, ath11k_core_reset);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 95bca0b078b1..afad8f55e433 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -14,6 +14,7 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/rhashtable.h>
+#include <linux/average.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -464,6 +465,8 @@ struct ath11k_per_ppdu_tx_stats {
u32 retry_bytes;
};
+DECLARE_EWMA(avg_rssi, 10, 8)
+
struct ath11k_sta {
struct ath11k_vif *arvif;
@@ -482,6 +485,7 @@ struct ath11k_sta {
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
+ struct ewma_avg_rssi avg_rssi;
s8 rssi_beacon;
s8 chain_signal[IEEE80211_MAX_CHAINS];
struct ath11k_htt_tx_stats *tx_stats;
@@ -578,8 +582,6 @@ struct ath11k {
struct ath11k_pdev_wmi *wmi;
struct ath11k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
- u32 ht_cap_info;
- u32 vht_cap_info;
struct ath11k_he ar_he;
enum ath11k_state state;
bool supports_6ghz;
@@ -927,10 +929,6 @@ struct ath11k_base {
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
- struct work_struct rfkill_work;
-
- /* true means radio is on */
- bool rfkill_radio_on;
/* To synchronize 11d scan vdev id */
struct mutex vdev_id_11d_lock;
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index fbbd5fe02aa8..91545640c47b 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -23,8 +23,8 @@ enum ath11k_debug_mask {
ATH11K_DBG_TESTMODE = 0x00000400,
ATH11k_DBG_HAL = 0x00000800,
ATH11K_DBG_PCI = 0x00001000,
- ATH11K_DBG_DP_TX = 0x00001000,
- ATH11K_DBG_DP_RX = 0x00002000,
+ ATH11K_DBG_DP_TX = 0x00002000,
+ ATH11K_DBG_DP_RX = 0x00004000,
ATH11K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 4484235bcda4..b3efca6bd7dd 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -1403,6 +1404,8 @@ htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
htt_stats_buf->ax_mu_mimo_brpoll_7);
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
htt_stats_buf->ax_basic_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger = %u\n",
+ htt_stats_buf->ax_ulmumimo_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
htt_stats_buf->ax_bsr_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
@@ -1485,6 +1488,8 @@ htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
htt_stats_buf->ax_mu_mimo_brp7_err);
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
htt_stats_buf->ax_basic_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger_err = %u\n",
+ htt_stats_buf->ax_ulmumimo_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
htt_stats_buf->ax_bsr_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
@@ -1519,6 +1524,16 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
htt_stats_buf->mu_mimo_ppdu_posted);
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_posted_per_group_index %u = %u\n",
+ i, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz[i]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_posted_per_group_index %u = %u\n",
+ i, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz[i]);
+
len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
@@ -1535,10 +1550,34 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
- for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++)
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_basic_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_basic_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_bsr_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_sch_bar_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_bar_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_brp_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_brp_sch_nusers[i]);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax UL MUMIO SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_basic_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_mumimo_basic_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_brp_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_mumimo_brp_sch_nusers[i]);
+ }
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2933,6 +2972,21 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
htt_stats_buf->txbf);
+ len += scnprintf(buf + len, buf_len - len, "\nrx_su_ndpa = %u",
+ htt_stats_buf->rx_su_ndpa);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_su_txbf_mcs,
+ "rx_11ax_su_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_mu_ndpa = %u",
+ htt_stats_buf->rx_mu_ndpa);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_mu_txbf_mcs,
+ "rx_11ax_mu_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_br_poll = %u",
+ htt_stats_buf->rx_br_poll);
+
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
"rx_legacy_cck_rate",
HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
@@ -2995,6 +3049,38 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "\n");
}
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_nusers,
+ "rx_ulofdma_non_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_nusers,
+ "rx_ulofdma_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_mcs,
+ "rx_11ax_dl_ofdma_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_ru,
+ "rx_11ax_dl_ofdma_ru", HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_non_data_ppdu,
+ "rx_ulmumimo_non_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_data_ppdu,
+ "rx_ulmumimo_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_ok,
+ "rx_ulmumimo_mpdu_ok", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_fail,
+ "rx_ulmumimo_mpdu_fail", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
htt_stats_buf->per_chain_rssi_pkt_type);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index dc210c54d131..5d722b51b125 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef DEBUG_HTT_STATS_H
@@ -682,6 +683,7 @@ struct htt_tx_selfgen_ax_stats_tlv {
u32 ax_bsr_trigger;
u32 ax_mu_bar_trigger;
u32 ax_mu_rts_trigger;
+ u32 ax_ulmumimo_trigger;
};
struct htt_tx_selfgen_ac_err_stats_tlv {
@@ -712,12 +714,14 @@ struct htt_tx_selfgen_ax_err_stats_tlv {
u32 ax_bsr_trigger_err;
u32 ax_mu_bar_trigger_err;
u32 ax_mu_rts_trigger_err;
+ u32 ax_ulmumimo_trigger_err;
};
/* == TX MU STATS == */
#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74
+#define HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS 8
struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
/* mu-mimo sw sched cmd stats */
@@ -734,6 +738,24 @@ struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
u32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_bsr_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_bar_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+
+ /* UL MU-MIMO */
+ /* ax_ul_mumimo_basic_sch_nusers[i] is the number of basic triggers sent
+ * for (i+1) users
+ */
+ u32 ax_ul_mumimo_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+
+ /* ax_ul_mumimo_brp_sch_nusers[i] is the number of brp triggers sent
+ * for (i+1) users
+ */
+ u32 ax_ul_mumimo_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+
+ u32 ac_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ u32 ax_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
};
struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv {
@@ -1297,6 +1319,8 @@ struct htt_tx_pdev_rate_stats_tlv {
#define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
#define HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
#define HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS 16
+#define HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6
+#define HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8
struct htt_rx_pdev_rate_stats_tlv {
u32 mac_id__word;
@@ -1375,6 +1399,21 @@ struct htt_rx_pdev_rate_stats_tlv {
u32 per_chain_rssi_pkt_type;
s8 rx_per_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ u32 rx_su_ndpa;
+ u32 rx_11ax_su_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_mu_ndpa;
+ u32 rx_11ax_mu_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_br_poll;
+ u32 rx_11ax_dl_ofdma_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_11ax_dl_ofdma_ru[HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
+
+ u32 rx_ulmumimo_non_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_mpdu_ok[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_mpdu_fail[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulofdma_non_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ u32 rx_ulofdma_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
};
/* == RX PDEV/SOC STATS == */
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 049774cc158c..2148acf37071 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -835,8 +835,9 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
ath11k_dp_rx_tid_del_func);
if (ret) {
- ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
- tid, ret);
+ if (ret != -ESHUTDOWN)
+ ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ tid, ret);
dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
@@ -2765,6 +2766,9 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
if (!rx_stats)
return;
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 1dba7b9e0bda..bda71ab5a1f2 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1165,7 +1165,7 @@ void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
lockdep_assert_held(&srng->lock);
/* check whether the ring is emptry. Update the shadow
- * HP only when then ring isn't' empty.
+ * HP only when then ring isn't empty.
*/
if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
*srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 4bb1fbaed0c9..7f39c6fb7408 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -757,7 +757,7 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
/* TODO: HW queue descriptors are currently allocated for max BA
* window size for all QOS TIDs so that same descriptor can be used
- * later when ADDBA request is recevied. This should be changed to
+ * later when ADDBA request is received. This should be changed to
* allocate HW queue descriptors based on BA window size being
* negotiated (0 for non BA cases), and reallocate when BA window
* size changes and also send WMI message to FW to change the REO
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index 069c29a4fac7..ca3aedc0252d 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -258,8 +258,10 @@ void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
u8 eid;
eid = ATH11K_SKB_CB(skb)->eid;
- if (eid >= ATH11K_HTC_EP_COUNT)
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ dev_kfree_skb_any(skb);
return;
+ }
ep = &htc->endpoint[eid];
spin_lock_bh(&htc->tx_lock);
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 77dc5c851c9b..bb5ac940e470 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -153,9 +153,6 @@ struct ath11k_hw_params {
u32 svc_to_ce_map_len;
bool single_pdev_only;
- u32 rfkill_pin;
- u32 rfkill_cfg;
- u32 rfkill_on_level;
bool rxdma1_enable;
int num_rxmda_per_pdev;
@@ -201,8 +198,6 @@ struct ath11k_hw_params {
bool fixed_mem_region;
bool static_window_map;
bool hybrid_bus_type;
- u8 dp_window_idx;
- u8 ce_window_idx;
bool fixed_fw_mem;
bool support_off_channel_tx;
};
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index ee1590b16eff..7e91e347c9ff 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -505,7 +505,7 @@ static int ath11k_mac_vif_chan(struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *conf;
rcu_read_lock();
- conf = rcu_dereference(vif->chanctx_conf);
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (!conf) {
rcu_read_unlock();
return -ENOENT;
@@ -1362,7 +1362,7 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
- bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!bcn) {
ath11k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
@@ -1398,10 +1398,11 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
{
struct ieee80211_vif *vif = arvif->vif;
- if (!vif->color_change_active && !arvif->bcca_zero_sent)
+ if (!vif->bss_conf.color_change_active && !arvif->bcca_zero_sent)
return;
- if (vif->color_change_active && ieee80211_beacon_cntdwn_is_complete(vif)) {
+ if (vif->bss_conf.color_change_active &&
+ ieee80211_beacon_cntdwn_is_complete(vif)) {
arvif->bcca_zero_sent = true;
ieee80211_color_change_finish(vif);
return;
@@ -1409,7 +1410,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
arvif->bcca_zero_sent = false;
- if (vif->color_change_active)
+ if (vif->bss_conf.color_change_active)
ieee80211_beacon_update_cntdwn(vif);
ath11k_mac_setup_bcn_tmpl(arvif);
}
@@ -1539,7 +1540,7 @@ static void ath11k_peer_assoc_h_basic(struct ath11k *ar,
lockdep_assert_held(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_STATION)
- aid = vif->bss_conf.aid;
+ aid = vif->cfg.aid;
else
aid = sta->aid;
@@ -2749,7 +2750,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
WARN_ON(arvif->is_up);
- arvif->aid = bss_conf->aid;
+ arvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
@@ -2764,7 +2765,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
- arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+ arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
spin_lock_bh(&ar->ab->base_lock);
@@ -3091,7 +3092,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
@@ -3185,9 +3186,10 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_SSID &&
vif->type == NL80211_IFTYPE_AP) {
- arvif->u.ap.ssid_len = info->ssid_len;
- if (info->ssid_len)
- memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
+ vif->cfg.ssid_len);
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
@@ -3275,7 +3277,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- if (info->assoc)
+ if (vif->cfg.assoc)
ath11k_bss_assoc(hw, vif, info);
else
ath11k_bss_disassoc(hw, vif);
@@ -3291,7 +3293,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS &&
ar->ab->hw_params.supports_sta_ps) {
- arvif->ps = vif->bss_conf.ps;
+ arvif->ps = vif->cfg.ps;
ret = ath11k_mac_config_ps(ar);
if (ret)
@@ -3406,14 +3408,15 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_mac_fils_discovery(arvif, info);
if (changed & BSS_CHANGED_ARP_FILTER) {
- ipv4_cnt = min(info->arp_addr_cnt, ATH11K_IPV4_MAX_COUNT);
- memcpy(arvif->arp_ns_offload.ipv4_addr, info->arp_addr_list,
+ ipv4_cnt = min(vif->cfg.arp_addr_cnt, ATH11K_IPV4_MAX_COUNT);
+ memcpy(arvif->arp_ns_offload.ipv4_addr,
+ vif->cfg.arp_addr_list,
ipv4_cnt * sizeof(u32));
memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN);
arvif->arp_ns_offload.ipv4_count = ipv4_cnt;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
- info->arp_addr_cnt,
+ vif->cfg.arp_addr_cnt,
vif->addr, arvif->arp_ns_offload.ipv4_addr);
}
@@ -4479,6 +4482,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
}
+ ewma_avg_rssi_init(&arsta->avg_rssi);
return 0;
free_tx_stats:
@@ -4819,7 +4823,8 @@ exit:
}
static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct ath11k *ar = hw->priv;
@@ -5606,63 +5611,6 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
return 0;
}
-int ath11k_mac_rfkill_config(struct ath11k *ar)
-{
- struct ath11k_base *ab = ar->ab;
- u32 param;
- int ret;
-
- if (ab->hw_params.rfkill_pin == 0)
- return -EOPNOTSUPP;
-
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
- ab->hw_params.rfkill_pin, ab->hw_params.rfkill_cfg,
- ab->hw_params.rfkill_on_level);
-
- param = FIELD_PREP(WMI_RFKILL_CFG_RADIO_LEVEL,
- ab->hw_params.rfkill_on_level) |
- FIELD_PREP(WMI_RFKILL_CFG_GPIO_PIN_NUM,
- ab->hw_params.rfkill_pin) |
- FIELD_PREP(WMI_RFKILL_CFG_PIN_AS_GPIO,
- ab->hw_params.rfkill_cfg);
-
- ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
- param, ar->pdev->pdev_id);
- if (ret) {
- ath11k_warn(ab,
- "failed to set rfkill config 0x%x: %d\n",
- param, ret);
- return ret;
- }
-
- return 0;
-}
-
-int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable)
-{
- enum wmi_rfkill_enable_radio param;
- int ret;
-
- if (enable)
- param = WMI_RFKILL_ENABLE_RADIO_ON;
- else
- param = WMI_RFKILL_ENABLE_RADIO_OFF;
-
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac %d rfkill enable %d",
- ar->pdev_idx, param);
-
- ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE,
- param, ar->pdev->pdev_id);
- if (ret) {
- ath11k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n",
- param, ret);
- return ret;
- }
-
- return 0;
-}
-
static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -5917,7 +5865,6 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->update_11d_work);
- cancel_work_sync(&ar->ab->rfkill_work);
if (ar->state_11d == ATH11K_11D_PREPARING) {
ar->state_11d = ATH11K_11D_IDLE;
@@ -6848,7 +6795,7 @@ ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
{
struct ath11k_mac_change_chanctx_arg *arg = data;
- if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
return;
arg->n_vifs++;
@@ -6861,7 +6808,7 @@ ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ath11k_mac_change_chanctx_arg *arg = data;
struct ieee80211_chanctx_conf *ctx;
- ctx = rcu_access_pointer(vif->chanctx_conf);
+ ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
if (ctx != arg->ctx)
return;
@@ -7069,6 +7016,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
static int
ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k *ar = hw->priv;
@@ -7158,6 +7106,7 @@ out:
static void
ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k *ar = hw->priv;
@@ -7799,6 +7748,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
{
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
+ struct ath11k_pdev_cap *cap;
struct ath11k *ar = arvif->ar;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -7819,10 +7769,11 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
return -EPERM;
band = def.chan->band;
+ cap = &ar->pdev->cap;
ht_mcs_mask = mask->control[band].ht_mcs;
vht_mcs_mask = mask->control[band].vht_mcs;
he_mcs_mask = mask->control[band].he_mcs;
- ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+ ldpc = !!(cap->band[band].ht_cap_info & WMI_HT_CAP_TX_LDPC);
sgi = mask->control[band].gi;
if (sgi == NL80211_TXRATE_FORCE_LGI)
@@ -7832,7 +7783,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
he_ltf = mask->control[band].he_ltf;
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
- * requires passing atleast one of used basic rates along with them.
+ * requires passing at least one of used basic rates along with them.
* Fixed rate setting across different preambles(legacy, HT, VHT) is
* not supported by the FW. Hence use of FIXED_RATE vdev param is not
* suitable for setting single HT/VHT rates.
@@ -8161,6 +8112,10 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->signal = db2dbm ? signal : signal + ATH11K_DEFAULT_NOISE_FLOOR;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
+
+ sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) +
+ ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -8297,11 +8252,15 @@ static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
{
struct ath11k *ar = hw->priv;
- const struct cfg80211_sar_sub_specs *sspec = sar->sub_specs;
+ const struct cfg80211_sar_sub_specs *sspec;
int ret, index;
u8 *sar_tbl;
u32 i;
+ if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
+ sar->num_sub_specs == 0)
+ return -EINVAL;
+
mutex_lock(&ar->conf_mutex);
if (!test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) ||
@@ -8310,12 +8269,6 @@ static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw,
goto exit;
}
- if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
- sar->num_sub_specs == 0) {
- ret = -EINVAL;
- goto exit;
- }
-
ret = ath11k_wmi_pdev_set_bios_geo_table_param(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to set geo table: %d\n", ret);
@@ -8328,6 +8281,7 @@ static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw,
goto exit;
}
+ sspec = sar->sub_specs;
for (i = 0; i < sar->num_sub_specs; i++) {
if (sspec->freq_range_index >= (BIOS_SAR_TABLE_LEN >> 1)) {
ath11k_warn(ar->ab, "Ignore bad frequency index %u, max allowed %u\n",
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 57ebfc592b00..2a0d3afb0c99 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -148,8 +148,6 @@ u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
void __ath11k_mac_scan_finish(struct ath11k *ar);
void ath11k_mac_scan_finish(struct ath11k *ar);
-int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable);
-int ath11k_mac_rfkill_config(struct ath11k *ar);
struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index dedf1b88ddf6..5bd34a6273d9 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -50,6 +50,22 @@ static void ath11k_pci_bus_release(struct ath11k_base *ab)
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
}
+static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset)
+{
+ if (!ab->hw_params.static_window_map)
+ return ATH11K_PCI_WINDOW_START;
+
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within DP register range, use 3rd window */
+ return 3 * ATH11K_PCI_WINDOW_START;
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within CE register range, use 2nd window */
+ return 2 * ATH11K_PCI_WINDOW_START;
+ else
+ return ATH11K_PCI_WINDOW_START;
+}
+
static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
{
struct ath11k_base *ab = ab_pci->ab;
@@ -70,26 +86,39 @@ static void
ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 window_start = ATH11K_PCI_WINDOW_START;
+ u32 window_start;
+
+ window_start = ath11k_pci_get_window_start(ab, offset);
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ }
}
static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 window_start = ATH11K_PCI_WINDOW_START;
- u32 val;
+ u32 window_start, val;
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- val = ioread32(ab->mem + window_start +
- (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
+ window_start = ath11k_pci_get_window_start(ab, offset);
+
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ }
return val;
}
@@ -110,6 +139,8 @@ static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
};
static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
+ .wakeup = NULL,
+ .release = NULL,
.get_msi_irq = ath11k_pci_get_msi_irq,
.window_write32 = ath11k_pci_window_write32,
.window_read32 = ath11k_pci_window_read32,
@@ -697,6 +728,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
+ const struct ath11k_pci_ops *pci_ops;
int ret;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
@@ -754,10 +786,10 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
- ab->pci.ops = &ath11k_pci_ops_qca6390;
+ pci_ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
- ab->pci.ops = &ath11k_pci_ops_qcn9074;
+ pci_ops = &ath11k_pci_ops_qcn9074;
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
@@ -787,7 +819,7 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- ab->pci.ops = &ath11k_pci_ops_qca6390;
+ pci_ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@@ -796,6 +828,12 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
+ ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
@@ -920,7 +958,9 @@ qmi_fail:
static void ath11k_pci_shutdown(struct pci_dev *pdev)
{
struct ath11k_base *ab = pci_get_drvdata(pdev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath11k_pci_power_down(ab);
}
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index cf12b98c480d..1adf20ebef27 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -140,23 +140,8 @@ int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
-static inline u32 ath11k_pcic_get_window_start(struct ath11k_base *ab,
- u32 offset)
-{
- u32 window_start = 0;
-
- if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
- window_start = ab->hw_params.dp_window_idx * ATH11K_PCI_WINDOW_START;
- else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
- ATH11K_PCI_WINDOW_RANGE_MASK)
- window_start = ab->hw_params.ce_window_idx * ATH11K_PCI_WINDOW_START;
-
- return window_start;
-}
-
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
- u32 window_start;
int ret = 0;
/* for offset beyond BAR + 4K - 32, may
@@ -166,15 +151,10 @@ void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
- if (offset < ATH11K_PCI_WINDOW_START) {
+ if (offset < ATH11K_PCI_WINDOW_START)
iowrite32(value, ab->mem + offset);
- } else if (ab->hw_params.static_window_map) {
- window_start = ath11k_pcic_get_window_start(ab, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
- } else if (ab->pci.ops->window_write32) {
+ else
ab->pci.ops->window_write32(ab, offset, value);
- }
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
@@ -185,9 +165,8 @@ EXPORT_SYMBOL(ath11k_pcic_write32);
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
- u32 val = 0;
- u32 window_start;
int ret = 0;
+ u32 val;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
@@ -196,15 +175,10 @@ u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
- if (offset < ATH11K_PCI_WINDOW_START) {
+ if (offset < ATH11K_PCI_WINDOW_START)
val = ioread32(ab->mem + offset);
- } else if (ab->hw_params.static_window_map) {
- window_start = ath11k_pcic_get_window_start(ab, offset);
- val = ioread32(ab->mem + window_start +
- (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
- } else if (ab->pci.ops->window_read32) {
+ else
val = ab->pci.ops->window_read32(ab, offset);
- }
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
@@ -516,11 +490,6 @@ static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
static int
ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{
- if (!ab->pci.ops->get_msi_irq) {
- WARN_ONCE(1, "get_msi_irq pci op not defined");
- return -EOPNOTSUPP;
- }
-
return ab->pci.ops->get_msi_irq(ab, vector);
}
@@ -746,3 +715,19 @@ int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
+
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops)
+{
+ if (!pci_ops)
+ return 0;
+
+ /* Return error if mandatory pci_ops callbacks are missing */
+ if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
+ !pci_ops->window_read32)
+ return -EINVAL;
+
+ ab->pci.ops = pci_ops;
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h
index c53d86289a8e..0afbb34510db 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.h
+++ b/drivers/net/wireless/ath/ath11k/pcic.h
@@ -43,4 +43,6 @@ int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 61ead37a944a..00136601cb7d 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -2229,13 +2229,13 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
if (resp.fw_version_info_valid) {
ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
- strlcpy(ab->qmi.target.fw_build_timestamp,
+ strscpy(ab->qmi.target.fw_build_timestamp,
resp.fw_version_info.fw_build_timestamp,
sizeof(ab->qmi.target.fw_build_timestamp));
}
if (resp.fw_build_id_valid)
- strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
+ strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
sizeof(ab->qmi.target.fw_build_id));
if (resp.eeprom_read_timeout_valid) {
@@ -2659,7 +2659,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
memset(&resp, 0, sizeof(resp));
req->host_version_valid = 1;
- strlcpy(req->host_version, ATH11K_HOST_VERSION_STRING,
+ strscpy(req->host_version, ATH11K_HOST_VERSION_STRING,
sizeof(req->host_version));
req->tgt_cfg_valid = 1;
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index a02e54735e88..76560587bea0 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -126,15 +126,12 @@ DECLARE_EVENT_CLASS(ath11k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
- __dynamic_array(char, msg, ATH11K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH11K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH11K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 84d1c7054013..88ee4f9d19da 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -129,8 +129,6 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
[WMI_TAG_STATS_EVENT]
= { .min_len = sizeof(struct wmi_stats_event) },
- [WMI_TAG_RFKILL_EVENT] = {
- .min_len = sizeof(struct wmi_rfkill_state_change_ev) },
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
[WMI_TAG_HOST_SWFDA_EVENT] = {
@@ -533,8 +531,6 @@ static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
cap->num_msdu_desc = ev->num_msdu_desc;
- ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi sys cap info 0x%x\n", cap->sys_cap_info);
-
return 0;
}
@@ -1700,7 +1696,7 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
- if (vif->csa_active) {
+ if (vif->bss_conf.csa_active) {
cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
}
@@ -3822,7 +3818,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
+ ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ GFP_KERNEL);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -6563,7 +6560,7 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
fallback:
/* Fallback to older reg (by sending previous country setting
- * again if fw has succeded and we failed to process here.
+ * again if fw has succeeded and we failed to process here.
* The Regdomain should be uniform across driver and fw. Since the
* FW has processed the command and sent a success status, we expect
* this function to succeed as well. If it doesn't, CTRY needs to be
@@ -7475,7 +7472,7 @@ ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
continue;
}
- if (arvif->is_up && arvif->vif->csa_active)
+ if (arvif->is_up && arvif->vif->bss_conf.csa_active)
ieee80211_csa_finish(arvif->vif);
}
rcu_read_unlock();
@@ -7565,40 +7562,6 @@ exit:
kfree(tb);
}
-static void ath11k_rfkill_state_change_event(struct ath11k_base *ab,
- struct sk_buff *skb)
-{
- const struct wmi_rfkill_state_change_ev *ev;
- const void **tb;
- int ret;
-
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return;
- }
-
- ev = tb[WMI_TAG_RFKILL_EVENT];
- if (!ev) {
- kfree(tb);
- return;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
- ev->gpio_pin_num,
- ev->int_type,
- ev->radio_state);
-
- spin_lock_bh(&ab->base_lock);
- ab->rfkill_radio_on = (ev->radio_state == WMI_RFKILL_RADIO_STATE_ON);
- spin_unlock_bh(&ab->base_lock);
-
- queue_work(ab->workqueue, &ab->rfkill_work);
- kfree(tb);
-}
-
static void
ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
struct sk_buff *skb)
@@ -7994,9 +7957,6 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_11D_NEW_COUNTRY_EVENTID:
ath11k_reg_11d_new_cc_event(ab, skb);
break;
- case WMI_RFKILL_STATE_CHANGE_EVENTID:
- ath11k_rfkill_state_change_event(ab, skb);
- break;
case WMI_DIAG_EVENTID:
ath11k_wmi_diag_event(ab, skb);
break;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index b1fad4707dc6..4da248ffa318 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -5328,31 +5328,6 @@ struct target_resource_config {
u32 twt_ap_sta_count;
};
-enum wmi_sys_cap_info_flags {
- WMI_SYS_CAP_INFO_RXTX_LED = BIT(0),
- WMI_SYS_CAP_INFO_RFKILL = BIT(1),
-};
-
-#define WMI_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0)
-#define WMI_RFKILL_CFG_RADIO_LEVEL BIT(6)
-#define WMI_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7)
-
-enum wmi_rfkill_enable_radio {
- WMI_RFKILL_ENABLE_RADIO_ON = 0,
- WMI_RFKILL_ENABLE_RADIO_OFF = 1,
-};
-
-enum wmi_rfkill_radio_state {
- WMI_RFKILL_RADIO_STATE_OFF = 1,
- WMI_RFKILL_RADIO_STATE_ON = 2,
-};
-
-struct wmi_rfkill_state_change_ev {
- u32 gpio_pin_num;
- u32 int_type;
- u32 radio_state;
-} __packed;
-
enum wmi_debug_log_param {
WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 66d123f48085..c59c14483177 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1946,7 +1946,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
}
- skb = ieee80211_beacon_get(hw, vif);
+ skb = ieee80211_beacon_get(hw, vif, 0);
if (!skb) {
ret = -ENOMEM;
@@ -1982,7 +1982,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
/*
* Check if the previous beacon has gone out. If
- * not, don't don't try to post another: skip this
+ * not, don't try to post another: skip this
* period and wait for the next. Missed beacons
* indicate a problem and should not occur. If we
* miss too many consecutive beacons reset the device.
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 532eeac9e83e..ed5d2160a72a 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -250,7 +250,7 @@ unlock:
static void
ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf, u32 changes)
+ struct ieee80211_bss_conf *bss_conf, u64 changes)
{
struct ath5k_vif *avf = (void *)vif->drv_priv;
struct ath5k_hw *ah = hw->priv;
@@ -278,9 +278,9 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changes & BSS_CHANGED_ASSOC) {
- avf->assoc = bss_conf->assoc;
- if (bss_conf->assoc)
- ah->assoc = bss_conf->assoc;
+ avf->assoc = vif->cfg.assoc;
+ if (vif->cfg.assoc)
+ ah->assoc = vif->cfg.assoc;
else
ah->assoc = ath5k_any_vif_assoc(ah);
@@ -288,11 +288,11 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ath5k_set_beacon_filter(hw, ah->assoc);
ath5k_hw_set_ledstate(ah, ah->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Bss Info ASSOC %d, bssid: %pM\n",
- bss_conf->aid, common->curbssid);
- common->curaid = bss_conf->aid;
+ vif->cfg.aid, common->curbssid);
+ common->curaid = vif->cfg.aid;
ath5k_hw_set_bssid(ah);
/* Once ANI is available you would start it here */
}
@@ -410,7 +410,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
/* FIF_CONTROL doc says we should only pass on control frames for this
* station. This needs testing. I believe right now this
* enables *all* control frames, which is OK.. but
- * but we should see if we can improve on granularity */
+ * we should see if we can improve on granularity */
if (*new_flags & FIF_CONTROL)
rfilt |= AR5K_RX_FILTER_CONTROL;
@@ -572,7 +572,8 @@ ath5k_get_stats(struct ieee80211_hw *hw,
static int
-ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath5k_hw *ah = hw->priv;
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 00f9e347d414..5797ef9c73d7 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3136,7 +3136,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
pdadc_n = gain_boundaries[pdg] + pd_gain_overlap - pwr_min[pdg];
/* Limit it to be inside pwr range */
table_size = pwr_max[pdg] - pwr_min[pdg];
- max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
+ max_idx = min(pdadc_n, table_size);
/* Fill pdadc_out table */
while (pdadc_0 < max_idx && pdadc_i < 128)
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index bd1183830e91..e11c7e9accc0 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -807,7 +807,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
cfg80211_put_bss(ar->wiphy, bss);
} else if (vif->sme_state == SME_CONNECTED) {
struct cfg80211_roam_info roam_info = {
- .bss = bss,
+ .links[0].bss = bss,
.req_ie = assoc_req_ie,
.req_ie_len = assoc_req_len,
.resp_ie = assoc_resp_ie,
@@ -1119,7 +1119,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->ndev, &chandef);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef, 0);
mutex_unlock(&vif->wdev.mtx);
}
@@ -2967,7 +2967,8 @@ static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev,
return ath6kl_set_ies(vif, beacon);
}
-static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
@@ -3368,6 +3369,7 @@ static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
struct net_device *dev,
+ unsigned int link_id,
const u8 *addr,
const struct cfg80211_bitrate_mask *mask)
{
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index f9d3f3a5edfe..ba16b98c872d 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -92,7 +92,7 @@ struct bus_request {
* emode - This indicates the whether the command is to be executed in a
* blocking or non-blocking fashion (HIF_SYNCHRONOUS/
* HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
- * implemented using the asynchronous mode allowing the the bus
+ * implemented using the asynchronous mode allowing the bus
* driver to indicate the completion of operation through the
* registered callback routine. The requirement primarily comes
* from the contexts these operations get called from (a driver's
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 6b51a2dceadc..8a43c48ec1cf 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1185,7 +1185,7 @@ static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
* Wait for first 4 bytes to be in FIFO
* If CONSERVATIVE_BMI_READ is enabled, also wait for
* a BMI command credit, which indicates that the ENTIRE
- * response is available in the the FIFO
+ * response is available in the FIFO
*
* CASE 3: length > 128
* Wait for the first 4 bytes to be in FIFO
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
index a3d3740419eb..231a94769ddb 100644
--- a/drivers/net/wireless/ath/ath6kl/trace.h
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -253,13 +253,10 @@ DECLARE_EVENT_CLASS(ath6kl_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -284,14 +281,11 @@ TRACE_EVENT(ath6kl_log_dbg,
TP_ARGS(level, vaf),
TP_STRUCT__entry(
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 65e683effdcb..5220809841a6 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -71,6 +71,7 @@ struct ath6kl_usb {
u8 *diag_cmd_buffer;
u8 *diag_resp_buffer;
struct ath6kl *ar;
+ struct workqueue_struct *wq;
};
/* usb urb object */
@@ -478,7 +479,7 @@ static void ath6kl_usb_flush_all(struct ath6kl_usb *ar_usb)
* Flushing any pending I/O may schedule work this call will block
* until all scheduled work runs to completion.
*/
- flush_scheduled_work();
+ flush_workqueue(ar_usb->wq);
}
static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
@@ -544,7 +545,7 @@ static void ath6kl_usb_recv_complete(struct urb *urb)
/* note: queue implements a lock */
skb_queue_tail(&pipe->io_comp_queue, skb);
- schedule_work(&pipe->io_complete_work);
+ queue_work(pipe->ar_usb->wq, &pipe->io_complete_work);
cleanup_recv_urb:
ath6kl_usb_cleanup_recv_urb(urb_context);
@@ -579,7 +580,7 @@ static void ath6kl_usb_usb_transmit_complete(struct urb *urb)
/* note: queue implements a lock */
skb_queue_tail(&pipe->io_comp_queue, skb);
- schedule_work(&pipe->io_complete_work);
+ queue_work(pipe->ar_usb->wq, &pipe->io_complete_work);
}
static void ath6kl_usb_io_comp_work(struct work_struct *work)
@@ -619,6 +620,7 @@ static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
kfree(ar_usb->diag_cmd_buffer);
kfree(ar_usb->diag_resp_buffer);
+ destroy_workqueue(ar_usb->wq);
kfree(ar_usb);
}
@@ -631,9 +633,15 @@ static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
int status = 0;
int i;
+ /* ath6kl_usb_destroy() needs ar_usb != NULL && ar_usb->wq != NULL. */
ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
if (ar_usb == NULL)
- goto fail_ath6kl_usb_create;
+ return NULL;
+ ar_usb->wq = alloc_workqueue("ath6kl_wq", 0, 0);
+ if (!ar_usb->wq) {
+ kfree(ar_usb);
+ return NULL;
+ }
usb_set_intfdata(interface, ar_usb);
spin_lock_init(&(ar_usb->cs_lock));
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 672014973cee..b4fcfb72991c 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -698,7 +698,7 @@ enum auth_mode {
/*
* NB: these values are ordered carefully; there are lots of
- * of implications in any reordering. In particular beware
+ * implications in any reordering. In particular beware
* that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
*/
#define ATH6KL_CIPHER_WEP 0
@@ -1278,7 +1278,7 @@ struct wmi_snr_threshold_params_cmd {
/* "alpha" */
u8 weight;
- /* lowest of uppper */
+ /* lowest of upper */
u8 thresh_above1_val;
u8 thresh_above2_val;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index fcfed8e59d29..ebdb97999335 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -498,7 +498,7 @@ static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
else
REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN, repeat_bit);
- /* on AR92xx, the highest bit of count will make the the chip send
+ /* on AR92xx, the highest bit of count will make the chip send
* spectral samples endlessly. Check if this really was intended,
* and fix otherwise.
*/
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 72e2e71aac0e..ee72faac2f1d 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -135,7 +135,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
bf->bf_mpdu = NULL;
}
- skb = ieee80211_beacon_get(hw, vif);
+ skb = ieee80211_beacon_get(hw, vif, 0);
if (skb == NULL)
return NULL;
@@ -362,7 +362,7 @@ static void ath9k_set_tsfadjust(struct ath_softc *sc,
bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- if (!vif || !vif->csa_active)
+ if (!vif || !vif->bss_conf.csa_active)
return false;
if (!ieee80211_beacon_cntdwn_is_complete(vif))
@@ -585,8 +585,9 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
static void ath9k_cache_beacon_config(struct ath_softc *sc,
struct ath_chanctx *ctx,
- struct ieee80211_bss_conf *bss_conf)
+ struct ieee80211_vif *vif)
{
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &ctx->beacon;
@@ -596,7 +597,7 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
cur_conf->beacon_interval = bss_conf->beacon_int;
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->dtim_count = 1;
- cur_conf->ibss_creator = bss_conf->ibss_creator;
+ cur_conf->ibss_creator = vif->cfg.ibss_creator;
/*
* It looks like mac80211 may end up using beacon interval of zero in
@@ -649,7 +650,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
cur_conf->enable_beacon = beacons;
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
- ath9k_cache_beacon_config(sc, ctx, &main_vif->bss_conf);
+ ath9k_cache_beacon_config(sc, ctx, main_vif);
ath9k_set_beacon(sc);
set_bit(ATH_OP_BEACONS, &common->op_flags);
@@ -657,7 +658,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
}
/* Update the beacon configuration. */
- ath9k_cache_beacon_config(sc, ctx, &main_vif->bss_conf);
+ ath9k_cache_beacon_config(sc, ctx, main_vif);
/*
* Configure the HW beacon registers only when we have a valid
@@ -670,7 +671,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
* IBSS interface.
*/
if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC &&
- !enabled && beacons && !main_vif->bss_conf.ibss_creator) {
+ !enabled && beacons && !main_vif->cfg.ibss_creator) {
spin_lock_irqsave(&sc->sc_pm_lock, flags);
sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index acb9602aa464..11349218bc21 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -246,7 +246,7 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
DFS_STAT_INC(sc, dc_phy_errors);
/* when both are present use stronger one */
- rssi = (ard->rssi < ard->ext_rssi) ? ard->ext_rssi : ard->rssi;
+ rssi = max(ard->rssi, ard->ext_rssi);
break;
default:
/*
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 518deb5098a2..4d9002a9d082 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -244,11 +244,11 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
skb, txok);
if (txok) {
- TX_STAT_INC(skb_success);
- TX_STAT_ADD(skb_success_bytes, ln);
+ TX_STAT_INC(hif_dev, skb_success);
+ TX_STAT_ADD(hif_dev, skb_success_bytes, ln);
}
else
- TX_STAT_INC(skb_failed);
+ TX_STAT_INC(hif_dev, skb_failed);
}
}
@@ -302,7 +302,7 @@ static void hif_usb_tx_cb(struct urb *urb)
hif_dev->tx.tx_buf_cnt++;
if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
__hif_usb_tx(hif_dev); /* Check for pending SKBs */
- TX_STAT_INC(buf_completed);
+ TX_STAT_INC(hif_dev, buf_completed);
spin_unlock(&hif_dev->tx.tx_lock);
}
@@ -353,7 +353,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
tx_buf->len += tx_buf->offset;
__skb_queue_tail(&tx_buf->skb_queue, nskb);
- TX_STAT_INC(skb_queued);
+ TX_STAT_INC(hif_dev, skb_queued);
}
usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
@@ -369,7 +369,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
hif_dev->tx.tx_buf_cnt++;
} else {
- TX_STAT_INC(buf_queued);
+ TX_STAT_INC(hif_dev, buf_queued);
}
return ret;
@@ -514,7 +514,7 @@ static void hif_usb_sta_drain(void *hif_handle, u8 idx)
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
skb, false);
hif_dev->tx.tx_skb_cnt--;
- TX_STAT_INC(skb_failed);
+ TX_STAT_INC(hif_dev, skb_failed);
}
}
@@ -585,14 +585,14 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
pkt_tag = get_unaligned_le16(ptr + index + 2);
if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
- RX_STAT_INC(skb_dropped);
+ RX_STAT_INC(hif_dev, skb_dropped);
return;
}
if (pkt_len > 2 * MAX_RX_BUF_SIZE) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: invalid pkt_len (%x)\n", pkt_len);
- RX_STAT_INC(skb_dropped);
+ RX_STAT_INC(hif_dev, skb_dropped);
return;
}
@@ -618,7 +618,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
goto err;
}
skb_reserve(nskb, 32);
- RX_STAT_INC(skb_allocated);
+ RX_STAT_INC(hif_dev, skb_allocated);
memcpy(nskb->data, &(skb->data[chk_idx+4]),
hif_dev->rx_transfer_len);
@@ -639,7 +639,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
goto err;
}
skb_reserve(nskb, 32);
- RX_STAT_INC(skb_allocated);
+ RX_STAT_INC(hif_dev, skb_allocated);
memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
skb_put(nskb, pkt_len);
@@ -649,10 +649,10 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
err:
for (i = 0; i < pool_index; i++) {
- RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len);
+ RX_STAT_ADD(hif_dev, skb_completed_bytes, skb_pool[i]->len);
ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
skb_pool[i]->len, USB_WLAN_RX_PIPE);
- RX_STAT_INC(skb_completed);
+ RX_STAT_INC(hif_dev, skb_completed);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 6b45e63fae4b..30f0765fb9fd 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -327,14 +327,18 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
}
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
-
-#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
-#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
-#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
-#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
-#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
-
-#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
+#define __STAT_SAFE(hif_dev, expr) ((hif_dev)->htc_handle->drv_priv ? (expr) : 0)
+#define CAB_STAT_INC(priv) ((priv)->debug.tx_stats.cab_queued++)
+#define TX_QSTAT_INC(priv, q) ((priv)->debug.tx_stats.queue_stats[q]++)
+
+#define TX_STAT_INC(hif_dev, c) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c++)
+#define TX_STAT_ADD(hif_dev, c, a) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c += a)
+#define RX_STAT_INC(hif_dev, c) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c++)
+#define RX_STAT_ADD(hif_dev, c, a) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c += a)
void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
struct ath_rx_status *rs);
@@ -374,13 +378,13 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
struct ethtool_stats *stats, u64 *data);
#else
-#define TX_STAT_INC(c) do { } while (0)
-#define TX_STAT_ADD(c, a) do { } while (0)
-#define RX_STAT_INC(c) do { } while (0)
-#define RX_STAT_ADD(c, a) do { } while (0)
-#define CAB_STAT_INC do { } while (0)
+#define TX_STAT_INC(hif_dev, c)
+#define TX_STAT_ADD(hif_dev, c, a)
+#define RX_STAT_INC(hif_dev, c)
+#define RX_STAT_ADD(hif_dev, c, a)
-#define TX_QSTAT_INC(c) do { } while (0)
+#define CAB_STAT_INC(priv)
+#define TX_QSTAT_INC(priv, c)
static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
struct ath_rx_status *rs)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index c745897aa3d6..533471e69400 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -215,7 +215,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
}
/* Get a new beacon */
- beacon = ieee80211_beacon_get(priv->hw, vif);
+ beacon = ieee80211_beacon_get(priv->hw, vif, 0);
if (!beacon) {
spin_unlock_bh(&priv->beacon_lock);
return;
@@ -511,7 +511,7 @@ bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
struct ieee80211_vif *vif;
vif = priv->csa_vif;
- if (!vif || !vif->csa_active)
+ if (!vif || !vif->bss_conf.csa_active)
return false;
if (!ieee80211_beacon_cntdwn_is_complete(vif))
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index ff61ae34ecdf..07ac88fb1c57 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -944,7 +944,6 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
priv->hw = hw;
priv->htc = htc_handle;
priv->dev = dev;
- htc_handle->drv_priv = priv;
SET_IEEE80211_DEV(hw, priv->dev);
ret = ath9k_htc_wait_for_target(priv);
@@ -965,6 +964,8 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
if (ret)
goto err_init;
+ htc_handle->drv_priv = priv;
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index cfee732a89b1..61875c45366b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -100,7 +100,7 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
priv->rearm_ani = true;
}
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
priv->rearm_ani = true;
priv->reconfig_beacon = true;
}
@@ -1369,7 +1369,8 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
}
static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath9k_htc_priv *priv = hw->priv;
@@ -1488,8 +1489,8 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
- common->curaid = bss_conf->aid;
+ if ((vif->type == NL80211_IFTYPE_STATION) && vif->cfg.assoc) {
+ common->curaid = vif->cfg.aid;
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
@@ -1509,7 +1510,7 @@ static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv)
static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
@@ -1521,17 +1522,17 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
ath_dbg(common, CONFIG, "BSS Changed ASSOC %d\n",
- bss_conf->assoc);
+ vif->cfg.assoc);
- bss_conf->assoc ?
+ vif->cfg.assoc ?
priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
- if (!bss_conf->assoc)
+ if (!vif->cfg.assoc)
clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_htc_choose_set_bssid(priv);
- if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
+ if (vif->cfg.assoc && (priv->num_sta_assoc_vif == 1))
ath9k_htc_start_ani(priv);
else if (priv->num_sta_assoc_vif == 0)
ath9k_htc_stop_ani(priv);
@@ -1540,7 +1541,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_IBSS) {
if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
- common->curaid = bss_conf->aid;
+ common->curaid = vif->cfg.aid;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
ath9k_htc_set_bssid(priv);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index a23eaca0326d..672789e3c55d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -106,20 +106,20 @@ static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
switch (qnum) {
case 0:
- TX_QSTAT_INC(IEEE80211_AC_VO);
+ TX_QSTAT_INC(priv, IEEE80211_AC_VO);
epid = priv->data_vo_ep;
break;
case 1:
- TX_QSTAT_INC(IEEE80211_AC_VI);
+ TX_QSTAT_INC(priv, IEEE80211_AC_VI);
epid = priv->data_vi_ep;
break;
case 2:
- TX_QSTAT_INC(IEEE80211_AC_BE);
+ TX_QSTAT_INC(priv, IEEE80211_AC_BE);
epid = priv->data_be_ep;
break;
case 3:
default:
- TX_QSTAT_INC(IEEE80211_AC_BK);
+ TX_QSTAT_INC(priv, IEEE80211_AC_BK);
epid = priv->data_bk_ep;
break;
}
@@ -328,7 +328,7 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
if (is_cab) {
- CAB_STAT_INC;
+ CAB_STAT_INC(priv);
tx_ctl->epid = priv->cab_ep;
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 77144647f4fc..a4197c14f0a9 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1712,7 +1712,8 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
}
static int ath9k_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath_softc *sc = hw->priv;
@@ -1863,7 +1864,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
#define CHECK_ANI \
(BSS_CHANGED_ASSOC | \
@@ -1881,11 +1882,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
- bss_conf->bssid, bss_conf->assoc);
+ bss_conf->bssid, vif->cfg.assoc);
memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
- avp->aid = bss_conf->aid;
- avp->assoc = bss_conf->assoc;
+ avp->aid = vif->cfg.aid;
+ avp->assoc = vif->cfg.assoc;
ath9k_calculate_summary_state(sc, avp->chanctx);
}
@@ -1893,7 +1894,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_IBSS) ||
(changed & BSS_CHANGED_OCB)) {
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
- common->curaid = bss_conf->aid;
+ common->curaid = vif->cfg.aid;
ath9k_hw_write_associd(sc->sc_ah);
}
@@ -2596,6 +2597,7 @@ static void ath9k_change_chanctx(struct ieee80211_hw *hw,
static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf)
{
struct ath_softc *sc = hw->priv;
@@ -2627,6 +2629,7 @@ static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf)
{
struct ath_softc *sc = hw->priv;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 101295162967..1540e9827f48 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1032,7 +1032,7 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct ar9170 *ar = hw->priv;
struct ath_common *common = &ar->common;
@@ -1115,7 +1115,7 @@ static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- ar->common.curaid = bss_conf->aid;
+ ar->common.curaid = vif->cfg.aid;
err = carl9170_set_beacon_timers(ar);
if (err)
goto out;
@@ -1365,7 +1365,8 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
}
static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
struct ar9170 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 514f568d9d07..6bb9aa2bfe65 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1628,7 +1628,7 @@ int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
goto out_unlock;
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
- NULL, NULL);
+ NULL, NULL, 0);
if (!skb) {
err = -ENOMEM;
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index b53ebb3ac9a2..85955572a705 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -48,7 +48,7 @@
* the MAC address to obtain the relevant bits and compare the result with
* (frame's BSSID & mask) to see if they match.
*
- * Simple example: on your card you have have two BSSes you have created with
+ * Simple example: on your card you have two BSSes you have created with
* BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
* There is another BSSID-03 but you are not part of it. For simplicity's sake,
* assuming only 4 bits for a mac address and for BSSIDs you can then have:
diff --git a/drivers/net/wireless/ath/trace.h b/drivers/net/wireless/ath/trace.h
index ba711644d27e..9935cf475b6d 100644
--- a/drivers/net/wireless/ath/trace.h
+++ b/drivers/net/wireless/ath/trace.h
@@ -40,16 +40,13 @@ TRACE_EVENT(ath_log,
TP_STRUCT__entry(
__string(device, wiphy_name(wiphy))
__string(driver, KBUILD_MODNAME)
- __dynamic_array(char, msg, ATH_DBG_MAX_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, wiphy_name(wiphy));
__assign_str(driver, KBUILD_MODNAME);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH_DBG_MAX_LEN,
- vaf->fmt,
- *vaf->va) >= ATH_DBG_MAX_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
index 27413703ad69..26bec795b372 100644
--- a/drivers/net/wireless/ath/wcn36xx/Makefile
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -5,6 +5,7 @@ wcn36xx-y += main.o \
txrx.o \
smd.o \
pmc.o \
- debug.o
+ debug.o \
+ firmware.o
wcn36xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index 6af306ae41ad..58b3c0501bfd 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -21,6 +21,7 @@
#include "wcn36xx.h"
#include "debug.h"
#include "pmc.h"
+#include "firmware.h"
#ifdef CONFIG_WCN36XX_DEBUGFS
@@ -136,6 +137,42 @@ static const struct file_operations fops_wcn36xx_dump = {
.write = write_file_dump,
};
+static ssize_t read_file_firmware_feature_caps(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ size_t len = 0, buf_len = 2048;
+ char *buf;
+ int i;
+ int ret;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&wcn->hal_mutex);
+ for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
+ len += scnprintf(buf + len, buf_len - len, "%s\n",
+ wcn36xx_firmware_get_cap_name(i));
+ }
+ if (len >= buf_len)
+ break;
+ }
+ mutex_unlock(&wcn->hal_mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_wcn36xx_firmware_feat_caps = {
+ .open = simple_open,
+ .read = read_file_firmware_feature_caps,
+};
+
#define ADD_FILE(name, mode, fop, priv_data) \
do { \
struct dentry *d; \
@@ -163,6 +200,8 @@ void wcn36xx_debugfs_init(struct wcn36xx *wcn)
ADD_FILE(bmps_switcher, 0600, &fops_wcn36xx_bmps, wcn);
ADD_FILE(dump, 0200, &fops_wcn36xx_dump, wcn);
+ ADD_FILE(firmware_feat_caps, 0200,
+ &fops_wcn36xx_firmware_feat_caps, wcn);
}
void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
index 46307aa562d3..7116d96e0543 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.h
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -31,6 +31,7 @@ struct wcn36xx_dfs_entry {
struct dentry *rootdir;
struct wcn36xx_dfs_file file_bmps_switcher;
struct wcn36xx_dfs_file file_dump;
+ struct wcn36xx_dfs_file file_firmware_feat_caps;
};
void wcn36xx_debugfs_init(struct wcn36xx *wcn);
diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.c b/drivers/net/wireless/ath/wcn36xx/firmware.c
new file mode 100644
index 000000000000..4b7f439e4db5
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/firmware.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "wcn36xx.h"
+#include "firmware.h"
+
+#define DEFINE(s)[s] = #s
+
+static const char * const wcn36xx_firmware_caps_names[] = {
+ DEFINE(MCC),
+ DEFINE(P2P),
+ DEFINE(DOT11AC),
+ DEFINE(SLM_SESSIONIZATION),
+ DEFINE(DOT11AC_OPMODE),
+ DEFINE(SAP32STA),
+ DEFINE(TDLS),
+ DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
+ DEFINE(WLANACTIVE_OFFLOAD),
+ DEFINE(BEACON_OFFLOAD),
+ DEFINE(SCAN_OFFLOAD),
+ DEFINE(ROAM_OFFLOAD),
+ DEFINE(BCN_MISS_OFFLOAD),
+ DEFINE(STA_POWERSAVE),
+ DEFINE(STA_ADVANCED_PWRSAVE),
+ DEFINE(AP_UAPSD),
+ DEFINE(AP_DFS),
+ DEFINE(BLOCKACK),
+ DEFINE(PHY_ERR),
+ DEFINE(BCN_FILTER),
+ DEFINE(RTT),
+ DEFINE(RATECTRL),
+ DEFINE(WOW),
+ DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
+ DEFINE(SPECULATIVE_PS_POLL),
+ DEFINE(SCAN_SCH),
+ DEFINE(IBSS_HEARTBEAT_OFFLOAD),
+ DEFINE(WLAN_SCAN_OFFLOAD),
+ DEFINE(WLAN_PERIODIC_TX_PTRN),
+ DEFINE(ADVANCE_TDLS),
+ DEFINE(BATCH_SCAN),
+ DEFINE(FW_IN_TX_PATH),
+ DEFINE(EXTENDED_NSOFFLOAD_SLOT),
+ DEFINE(CH_SWITCH_V1),
+ DEFINE(HT40_OBSS_SCAN),
+ DEFINE(UPDATE_CHANNEL_LIST),
+ DEFINE(WLAN_MCADDR_FLT),
+ DEFINE(WLAN_CH144),
+ DEFINE(NAN),
+ DEFINE(TDLS_SCAN_COEXISTENCE),
+ DEFINE(LINK_LAYER_STATS_MEAS),
+ DEFINE(MU_MIMO),
+ DEFINE(EXTENDED_SCAN),
+ DEFINE(DYNAMIC_WMM_PS),
+ DEFINE(MAC_SPOOFED_SCAN),
+ DEFINE(BMU_ERROR_GENERIC_RECOVERY),
+ DEFINE(DISA),
+ DEFINE(FW_STATS),
+ DEFINE(WPS_PRBRSP_TMPL),
+ DEFINE(BCN_IE_FLT_DELTA),
+ DEFINE(TDLS_OFF_CHANNEL),
+ DEFINE(RTT3),
+ DEFINE(MGMT_FRAME_LOGGING),
+ DEFINE(ENHANCED_TXBD_COMPLETION),
+ DEFINE(LOGGING_ENHANCEMENT),
+ DEFINE(EXT_SCAN_ENHANCED),
+ DEFINE(MEMORY_DUMP_SUPPORTED),
+ DEFINE(PER_PKT_STATS_SUPPORTED),
+ DEFINE(EXT_LL_STAT),
+ DEFINE(WIFI_CONFIG),
+ DEFINE(ANTENNA_DIVERSITY_SELECTION),
+};
+
+#undef DEFINE
+
+const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x)
+{
+ if (x >= ARRAY_SIZE(wcn36xx_firmware_caps_names))
+ return "UNKNOWN";
+ return wcn36xx_firmware_caps_names[x];
+}
+
+void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return -EINVAL;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+
+ return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+}
+
+void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] &= ~(1 << bit_idx);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.h b/drivers/net/wireless/ath/wcn36xx/firmware.h
new file mode 100644
index 000000000000..f991cf959f82
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/firmware.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _FIRMWARE_H_
+#define _FIRMWARE_H_
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum wcn36xx_firmware_feat_caps {
+ MCC = 0,
+ P2P = 1,
+ DOT11AC = 2,
+ SLM_SESSIONIZATION = 3,
+ DOT11AC_OPMODE = 4,
+ SAP32STA = 5,
+ TDLS = 6,
+ P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+ WLANACTIVE_OFFLOAD = 8,
+ BEACON_OFFLOAD = 9,
+ SCAN_OFFLOAD = 10,
+ ROAM_OFFLOAD = 11,
+ BCN_MISS_OFFLOAD = 12,
+ STA_POWERSAVE = 13,
+ STA_ADVANCED_PWRSAVE = 14,
+ AP_UAPSD = 15,
+ AP_DFS = 16,
+ BLOCKACK = 17,
+ PHY_ERR = 18,
+ BCN_FILTER = 19,
+ RTT = 20,
+ RATECTRL = 21,
+ WOW = 22,
+ WLAN_ROAM_SCAN_OFFLOAD = 23,
+ SPECULATIVE_PS_POLL = 24,
+ SCAN_SCH = 25,
+ IBSS_HEARTBEAT_OFFLOAD = 26,
+ WLAN_SCAN_OFFLOAD = 27,
+ WLAN_PERIODIC_TX_PTRN = 28,
+ ADVANCE_TDLS = 29,
+ BATCH_SCAN = 30,
+ FW_IN_TX_PATH = 31,
+ EXTENDED_NSOFFLOAD_SLOT = 32,
+ CH_SWITCH_V1 = 33,
+ HT40_OBSS_SCAN = 34,
+ UPDATE_CHANNEL_LIST = 35,
+ WLAN_MCADDR_FLT = 36,
+ WLAN_CH144 = 37,
+ NAN = 38,
+ TDLS_SCAN_COEXISTENCE = 39,
+ LINK_LAYER_STATS_MEAS = 40,
+ MU_MIMO = 41,
+ EXTENDED_SCAN = 42,
+ DYNAMIC_WMM_PS = 43,
+ MAC_SPOOFED_SCAN = 44,
+ BMU_ERROR_GENERIC_RECOVERY = 45,
+ DISA = 46,
+ FW_STATS = 47,
+ WPS_PRBRSP_TMPL = 48,
+ BCN_IE_FLT_DELTA = 49,
+ TDLS_OFF_CHANNEL = 51,
+ RTT3 = 52,
+ MGMT_FRAME_LOGGING = 53,
+ ENHANCED_TXBD_COMPLETION = 54,
+ LOGGING_ENHANCEMENT = 55,
+ EXT_SCAN_ENHANCED = 56,
+ MEMORY_DUMP_SUPPORTED = 57,
+ PER_PKT_STATS_SUPPORTED = 58,
+ EXT_LL_STAT = 60,
+ WIFI_CONFIG = 61,
+ ANTENNA_DIVERSITY_SELECTION = 62,
+
+ MAX_FEATURE_SUPPORTED = 128,
+};
+
+void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap);
+int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap);
+void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap);
+
+const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x);
+
+#endif /* _FIRMWARE_H_ */
+
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 46a49f0a51b3..f1a43fd1d957 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -1961,7 +1961,7 @@ struct wcn36xx_hal_config_bss_params {
/* HAL should update the existing BSS entry, if this flag is set.
* UMAC will set this flag in case of reassoc, where we want to
- * resue the the old BSSID and still return success 0 = Add, 1 =
+ * resue the old BSSID and still return success 0 = Add, 1 =
* Update */
u8 action;
@@ -2098,7 +2098,7 @@ struct wcn36xx_hal_config_bss_params_v1 {
/* HAL should update the existing BSS entry, if this flag is set.
* UMAC will set this flag in case of reassoc, where we want to
- * resue the the old BSSID and still return success 0 = Add, 1 =
+ * resue the old BSSID and still return success 0 = Add, 1 =
* Update */
u8 action;
@@ -4142,7 +4142,7 @@ struct wcn36xx_hal_dump_cmd_rsp_msg {
/* Length of the responce message */
u32 rsp_length;
- /* FIXME: Currently considering the the responce will be less than
+ /* FIXME: Currently considering the responce will be less than
* 100bytes */
u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
} __packed;
@@ -4758,74 +4758,6 @@ struct wcn36xx_hal_set_power_params_resp {
u32 status;
} __packed;
-/* Capability bitmap exchange definitions and macros starts */
-
-enum place_holder_in_cap_bitmap {
- MCC = 0,
- P2P = 1,
- DOT11AC = 2,
- SLM_SESSIONIZATION = 3,
- DOT11AC_OPMODE = 4,
- SAP32STA = 5,
- TDLS = 6,
- P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
- WLANACTIVE_OFFLOAD = 8,
- BEACON_OFFLOAD = 9,
- SCAN_OFFLOAD = 10,
- ROAM_OFFLOAD = 11,
- BCN_MISS_OFFLOAD = 12,
- STA_POWERSAVE = 13,
- STA_ADVANCED_PWRSAVE = 14,
- AP_UAPSD = 15,
- AP_DFS = 16,
- BLOCKACK = 17,
- PHY_ERR = 18,
- BCN_FILTER = 19,
- RTT = 20,
- RATECTRL = 21,
- WOW = 22,
- WLAN_ROAM_SCAN_OFFLOAD = 23,
- SPECULATIVE_PS_POLL = 24,
- SCAN_SCH = 25,
- IBSS_HEARTBEAT_OFFLOAD = 26,
- WLAN_SCAN_OFFLOAD = 27,
- WLAN_PERIODIC_TX_PTRN = 28,
- ADVANCE_TDLS = 29,
- BATCH_SCAN = 30,
- FW_IN_TX_PATH = 31,
- EXTENDED_NSOFFLOAD_SLOT = 32,
- CH_SWITCH_V1 = 33,
- HT40_OBSS_SCAN = 34,
- UPDATE_CHANNEL_LIST = 35,
- WLAN_MCADDR_FLT = 36,
- WLAN_CH144 = 37,
- NAN = 38,
- TDLS_SCAN_COEXISTENCE = 39,
- LINK_LAYER_STATS_MEAS = 40,
- MU_MIMO = 41,
- EXTENDED_SCAN = 42,
- DYNAMIC_WMM_PS = 43,
- MAC_SPOOFED_SCAN = 44,
- BMU_ERROR_GENERIC_RECOVERY = 45,
- DISA = 46,
- FW_STATS = 47,
- WPS_PRBRSP_TMPL = 48,
- BCN_IE_FLT_DELTA = 49,
- TDLS_OFF_CHANNEL = 51,
- RTT3 = 52,
- MGMT_FRAME_LOGGING = 53,
- ENHANCED_TXBD_COMPLETION = 54,
- LOGGING_ENHANCEMENT = 55,
- EXT_SCAN_ENHANCED = 56,
- MEMORY_DUMP_SUPPORTED = 57,
- PER_PKT_STATS_SUPPORTED = 58,
- EXT_LL_STAT = 60,
- WIFI_CONFIG = 61,
- ANTENNA_DIVERSITY_SELECTION = 62,
-
- MAX_FEATURE_SUPPORTED = 128,
-};
-
#define WCN36XX_HAL_CAPS_SIZE 4
struct wcn36xx_hal_feat_caps_msg {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e34d3d0b7082..6b8d2889d73f 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -28,6 +28,7 @@
#include <net/ipv6.h>
#include "wcn36xx.h"
#include "testmode.h"
+#include "firmware.h"
unsigned int wcn36xx_dbg_mask;
module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
@@ -192,88 +193,15 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index;
}
-#define DEFINE(s) [s] = #s
-
-static const char * const wcn36xx_caps_names[] = {
- DEFINE(MCC),
- DEFINE(P2P),
- DEFINE(DOT11AC),
- DEFINE(SLM_SESSIONIZATION),
- DEFINE(DOT11AC_OPMODE),
- DEFINE(SAP32STA),
- DEFINE(TDLS),
- DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
- DEFINE(WLANACTIVE_OFFLOAD),
- DEFINE(BEACON_OFFLOAD),
- DEFINE(SCAN_OFFLOAD),
- DEFINE(ROAM_OFFLOAD),
- DEFINE(BCN_MISS_OFFLOAD),
- DEFINE(STA_POWERSAVE),
- DEFINE(STA_ADVANCED_PWRSAVE),
- DEFINE(AP_UAPSD),
- DEFINE(AP_DFS),
- DEFINE(BLOCKACK),
- DEFINE(PHY_ERR),
- DEFINE(BCN_FILTER),
- DEFINE(RTT),
- DEFINE(RATECTRL),
- DEFINE(WOW),
- DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
- DEFINE(SPECULATIVE_PS_POLL),
- DEFINE(SCAN_SCH),
- DEFINE(IBSS_HEARTBEAT_OFFLOAD),
- DEFINE(WLAN_SCAN_OFFLOAD),
- DEFINE(WLAN_PERIODIC_TX_PTRN),
- DEFINE(ADVANCE_TDLS),
- DEFINE(BATCH_SCAN),
- DEFINE(FW_IN_TX_PATH),
- DEFINE(EXTENDED_NSOFFLOAD_SLOT),
- DEFINE(CH_SWITCH_V1),
- DEFINE(HT40_OBSS_SCAN),
- DEFINE(UPDATE_CHANNEL_LIST),
- DEFINE(WLAN_MCADDR_FLT),
- DEFINE(WLAN_CH144),
- DEFINE(NAN),
- DEFINE(TDLS_SCAN_COEXISTENCE),
- DEFINE(LINK_LAYER_STATS_MEAS),
- DEFINE(MU_MIMO),
- DEFINE(EXTENDED_SCAN),
- DEFINE(DYNAMIC_WMM_PS),
- DEFINE(MAC_SPOOFED_SCAN),
- DEFINE(BMU_ERROR_GENERIC_RECOVERY),
- DEFINE(DISA),
- DEFINE(FW_STATS),
- DEFINE(WPS_PRBRSP_TMPL),
- DEFINE(BCN_IE_FLT_DELTA),
- DEFINE(TDLS_OFF_CHANNEL),
- DEFINE(RTT3),
- DEFINE(MGMT_FRAME_LOGGING),
- DEFINE(ENHANCED_TXBD_COMPLETION),
- DEFINE(LOGGING_ENHANCEMENT),
- DEFINE(EXT_SCAN_ENHANCED),
- DEFINE(MEMORY_DUMP_SUPPORTED),
- DEFINE(PER_PKT_STATS_SUPPORTED),
- DEFINE(EXT_LL_STAT),
- DEFINE(WIFI_CONFIG),
- DEFINE(ANTENNA_DIVERSITY_SELECTION),
-};
-
-#undef DEFINE
-
-static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
-{
- if (x >= ARRAY_SIZE(wcn36xx_caps_names))
- return "UNKNOWN";
- return wcn36xx_caps_names[x];
-}
-
static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
{
int i;
for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
- if (get_feat_caps(wcn->fw_feat_caps, i))
- wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n", wcn36xx_get_cap_name(i));
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n",
+ wcn36xx_firmware_get_cap_name(i));
+ }
}
}
@@ -385,7 +313,7 @@ static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
list_for_each_entry(tmp, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(tmp);
if (enable && !wcn->sw_scan) {
- if (vif->bss_conf.ps) /* ps allowed ? */
+ if (vif->cfg.ps) /* ps allowed ? */
wcn36xx_pmc_enter_bmps_state(wcn, vif);
} else {
wcn36xx_pmc_exit_bmps_state(wcn, vif);
@@ -705,7 +633,7 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
{
struct wcn36xx *wcn = hw->priv;
- if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* fallback to mac80211 software scan */
return 1;
}
@@ -743,7 +671,7 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
wcn->scan_aborted = true;
mutex_unlock(&wcn->scan_lock);
- if (get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* ieee80211_scan_completed will be called on FW scan
* indication */
wcn36xx_smd_stop_hw_scan(wcn);
@@ -872,7 +800,7 @@ void wcn36xx_set_default_rates_v1(struct wcn36xx_hal_supported_rates_v1 *rates)
static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct wcn36xx *wcn = hw->priv;
struct sk_buff *skb = NULL;
@@ -880,7 +808,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
enum wcn36xx_hal_link_state link_state;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%llx\n",
vif, changed);
mutex_lock(&wcn->conf_mutex);
@@ -919,17 +847,17 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
wcn36xx_dbg(WCN36XX_DBG_MAC,
"mac bss changed ssid\n");
wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
- bss_conf->ssid, bss_conf->ssid_len);
+ vif->cfg.ssid, vif->cfg.ssid_len);
- vif_priv->ssid.length = bss_conf->ssid_len;
+ vif_priv->ssid.length = vif->cfg.ssid_len;
memcpy(&vif_priv->ssid.ssid,
- bss_conf->ssid,
- bss_conf->ssid_len);
+ vif->cfg.ssid,
+ vif->cfg.ssid_len);
}
if (changed & BSS_CHANGED_ASSOC) {
vif_priv->is_joining = false;
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
struct ieee80211_sta *sta;
struct wcn36xx_sta *sta_priv;
@@ -937,7 +865,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
"mac assoc bss %pM vif %pM AID=%d\n",
bss_conf->bssid,
vif->addr,
- bss_conf->aid);
+ vif->cfg.aid);
vif_priv->sta_assoc = true;
@@ -963,7 +891,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
wcn36xx_smd_config_bss(wcn, vif, sta,
bss_conf->bssid,
true);
- sta_priv->aid = bss_conf->aid;
+ sta_priv->aid = vif->cfg.aid;
/*
* config_sta must be called from because this is the
* place where AID is available.
@@ -977,7 +905,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
"disassociated bss %pM vif %pM AID=%d\n",
bss_conf->bssid,
vif->addr,
- bss_conf->aid);
+ vif->cfg.aid);
vif_priv->sta_assoc = false;
wcn36xx_smd_set_link_st(wcn,
bss_conf->bssid,
@@ -1010,7 +938,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
- &tim_len);
+ &tim_len, 0);
if (!skb) {
wcn36xx_err("failed to alloc beacon skb\n");
goto out;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 7ac9a1e6f768..566f0b9c1584 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -22,6 +22,7 @@
#include <linux/bitops.h>
#include <linux/rpmsg.h>
#include "smd.h"
+#include "firmware.h"
struct wcn36xx_cfg_val {
u32 cfg_id;
@@ -295,7 +296,7 @@ static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
sta_params->vht_capable = sta->deflink.vht_cap.vht_supported;
sta_params->vht_ldpc_enabled =
is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC);
- if (get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
sta_params->vht_tx_mu_beamformee_capable =
is_cap_supported(caps, IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
if (sta_params->vht_tx_mu_beamformee_capable)
@@ -2431,49 +2432,6 @@ out:
return ret;
}
-void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
- int arr_idx, bit_idx;
-
- if (cap < 0 || cap > 127) {
- wcn36xx_warn("error cap idx %d\n", cap);
- return;
- }
-
- arr_idx = cap / 32;
- bit_idx = cap % 32;
- bitmap[arr_idx] |= (1 << bit_idx);
-}
-
-int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
- int arr_idx, bit_idx;
-
- if (cap < 0 || cap > 127) {
- wcn36xx_warn("error cap idx %d\n", cap);
- return -EINVAL;
- }
-
- arr_idx = cap / 32;
- bit_idx = cap % 32;
-
- return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
-}
-
-void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
- int arr_idx, bit_idx;
-
- if (cap < 0 || cap > 127) {
- wcn36xx_warn("error cap idx %d\n", cap);
- return;
- }
-
- arr_idx = cap / 32;
- bit_idx = cap % 32;
- bitmap[arr_idx] &= ~(1 << bit_idx);
-}
-
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
@@ -2482,11 +2440,12 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
- set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
if (wcn->rf_id == RF_IRIS_WCN3680) {
- set_feat_caps(msg_body.feat_caps, DOT11AC);
- set_feat_caps(msg_body.feat_caps, WLAN_CH144);
- set_feat_caps(msg_body.feat_caps, ANTENNA_DIVERSITY_SELECTION);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, DOT11AC);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, WLAN_CH144);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps,
+ ANTENNA_DIVERSITY_SELECTION);
}
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -3005,7 +2964,7 @@ int wcn36xx_smd_arp_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg_body.host_offload_params.enable =
WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE;
memcpy(&msg_body.host_offload_params.u,
- &vif->bss_conf.arp_addr_list[0], sizeof(__be32));
+ &vif->cfg.arp_addr_list[0], sizeof(__be32));
}
msg_body.ns_offload_params.bss_index = vif_priv->bss_index;
@@ -3300,7 +3259,7 @@ int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn,
size_t payload_size;
int ret;
- if (!get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
+ if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
return -EOPNOTSUPP;
mutex_lock(&wcn->hal_mutex);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 3fd598ac2a27..cf15cde2a364 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -125,9 +125,6 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5);
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
-void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
-int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
-void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 8f2638f5b87b..f93bdffa4d1d 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2098,8 +2098,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
bcon->tail_len))
privacy = 1;
- memcpy(vif->ssid, wdev->ssid, wdev->ssid_len);
- vif->ssid_len = wdev->ssid_len;
+ memcpy(vif->ssid, wdev->u.ap.ssid, wdev->u.ap.ssid_len);
+ vif->ssid_len = wdev->u.ap.ssid_len;
/* in case privacy has changed, need to restart the AP */
if (vif->privacy != privacy) {
@@ -2108,7 +2108,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid,
vif->ssid_len, privacy,
- wdev->beacon_interval,
+ wdev->links[0].ap.beacon_interval,
vif->channel,
vif->wmi_edmg_channel, bcon,
vif->hidden_ssid,
@@ -2186,7 +2186,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
}
static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
- struct net_device *ndev)
+ struct net_device *ndev,
+ unsigned int link_id)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(ndev);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 64d6c98174c8..04d1aa0e2d35 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1010,20 +1010,14 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
void *cmd;
int cmdlen = len - sizeof(struct wmi_cmd_hdr);
u16 cmdid;
- int rc, rc1;
+ int rc1;
- if (cmdlen < 0)
+ if (cmdlen < 0 || *ppos != 0)
return -EINVAL;
- wmi = kmalloc(len, GFP_KERNEL);
- if (!wmi)
- return -ENOMEM;
-
- rc = simple_write_to_buffer(wmi, len, ppos, buf, len);
- if (rc < 0) {
- kfree(wmi);
- return rc;
- }
+ wmi = memdup_user(buf, len);
+ if (IS_ERR(wmi))
+ return PTR_ERR(wmi);
cmd = (cmdlen > 0) ? &wmi[1] : NULL;
cmdid = le16_to_cpu(wmi->command_id);
@@ -1033,7 +1027,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
- return rc;
+ return len;
}
static const struct file_operations fops_wmi = {
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index 11c989e95880..201f44612c31 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -70,13 +70,10 @@ DECLARE_EVENT_CLASS(wil6210_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, WIL6210_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- WIL6210_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= WIL6210_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 5704defd7be1..237cbd5c5060 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1782,9 +1782,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
}
/* Header Length = MAC header len + IP header len + TCP header len*/
- hdrlen = ETH_HLEN +
- (int)skb_network_header_len(skb) +
- tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
switch (gso_type) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 1f4c8ec75be8..1ae1bec1b97f 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -356,7 +356,7 @@ struct vring_rx_mac {
* bit 10 : cmd_dma_it:1 immediate interrupt
* bit 11..15 : reserved:5
* bit 16..29 : phy_info_length:14 It is valid when the PII is set.
- * When the FFM bit is set bits 29-27 are used for for
+ * When the FFM bit is set bits 29-27 are used for
* Flex Filter Match. Matching Index to one of the L2
* EtherType Flex Filter
* bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 98b4c189eecc..ea7bd403e706 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1822,8 +1822,8 @@ wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len)
freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ);
memset(&info, 0, sizeof(info));
- info.channel = ieee80211_get_channel(wiphy, freq);
- info.bss = vif->bss;
+ info.links[0].channel = ieee80211_get_channel(wiphy, freq);
+ info.links[0].bss = vif->bss;
info.req_ie = assoc_req_ie;
info.req_ie_len = assoc_req_ie_len;
info.resp_ie = assoc_resp_ie;
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 7582761c61e2..24e609c1f523 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2033,7 +2033,7 @@ static int at76_config(struct ieee80211_hw *hw, u32 changed)
static void at76_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct at76_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 35c2e798d98b..0361c8eb2008 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -3353,7 +3353,7 @@ static void atmel_management_frame(struct atmel_private *priv,
priv->beacons_this_sec++;
atmel_smooth_qual(priv);
if (priv->last_beacon_timestamp) {
- /* Note truncate this to 32 bits - kernel can't divide a long long */
+ /* Note truncate this to 32 bits - kernel can't divide a long */
u32 beacon_delay = timestamp - priv->last_beacon_timestamp;
int beacons = beacon_delay / (beacon_interval * 1000);
if (beacons > 1)
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 17bcec5f3ff7..b2539a916fd0 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -105,7 +105,7 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
-static int b43_modparam_pio = 0;
+static int b43_modparam_pio;
module_param_named(pio, b43_modparam_pio, int, 0644);
MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
@@ -366,7 +366,7 @@ static int b43_wireless_core_start(struct b43_wldev *dev);
static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed);
+ u64 changed);
static int b43_ratelimit(struct b43_wl *wl)
{
@@ -1832,7 +1832,7 @@ static void b43_update_templates(struct b43_wl *wl)
* the TIM field, but that would probably require resizing and
* moving of data within the beacon template.
* Simply request a new beacon and let mac80211 do the hard work. */
- beacon = ieee80211_beacon_get(wl->hw, wl->vif);
+ beacon = ieee80211_beacon_get(wl->hw, wl->vif, 0);
if (unlikely(!beacon))
return;
@@ -3783,7 +3783,8 @@ static void b43_qos_init(struct b43_wldev *dev)
}
static int b43_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 _queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 _queue,
const struct ieee80211_tx_queue_params *params)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -4097,7 +4098,7 @@ static void b43_update_basic_rates(struct b43_wldev *dev, u32 brates)
static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h
index 4213caca9117..5ec5233acf40 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.h
+++ b/drivers/net/wireless/broadcom/b43/phy_common.h
@@ -88,7 +88,7 @@ enum b43_txpwr_result {
* initialized here.
* Must not be NULL.
* @prepare_hardware: Prepare the PHY. This is called before b43_chip_init to
- * do some early early PHY hardware init.
+ * do some early PHY hardware init.
* Can be NULL, if not required.
* @init: Initialize the PHY.
* Must not be NULL.
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index eec3af9c3745..4022c544aefe 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1241,7 +1241,7 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
* field, but that would probably require resizing and moving of data
* within the beacon template. Simply request a new beacon and let
* mac80211 do the hard work. */
- beacon = ieee80211_beacon_get(wl->hw, wl->vif);
+ beacon = ieee80211_beacon_get(wl->hw, wl->vif, 0);
if (unlikely(!beacon))
return;
@@ -2505,7 +2505,8 @@ static void b43legacy_op_tx(struct ieee80211_hw *hw,
}
static int b43legacy_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
return 0;
@@ -2806,7 +2807,7 @@ static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates
static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
@@ -2943,7 +2944,7 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
}
-b43legacy_mac_suspend(dev);
+ b43legacy_mac_suspend(dev);
free_irq(dev->dev->irq, dev);
b43legacydbg(wl, "Wireless interface stopped\n");
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 9c598ea97499..d639bb8b51ae 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -784,9 +784,11 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
}
-#ifdef CONFIG_PM_SLEEP
static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
{
+ if (!IS_ENABLED(CONFIG_PM_SLEEP))
+ return 0;
+
sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
if (!sdiodev->freezer)
return -ENOMEM;
@@ -802,6 +804,7 @@ static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
if (sdiodev->freezer) {
WARN_ON(atomic_read(&sdiodev->freezer->freezing));
kfree(sdiodev->freezer);
+ sdiodev->freezer = NULL;
}
}
@@ -833,7 +836,8 @@ static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
{
- return atomic_read(&sdiodev->freezer->freezing);
+ return IS_ENABLED(CONFIG_PM_SLEEP) &&
+ atomic_read(&sdiodev->freezer->freezing);
}
void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
@@ -847,23 +851,15 @@ void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
{
- atomic_inc(&sdiodev->freezer->thread_count);
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ atomic_inc(&sdiodev->freezer->thread_count);
}
void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
{
- atomic_dec(&sdiodev->freezer->thread_count);
-}
-#else
-static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
-{
- return 0;
-}
-
-static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
-{
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ atomic_dec(&sdiodev->freezer->thread_count);
}
-#endif /* CONFIG_PM_SLEEP */
int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{
@@ -875,13 +871,9 @@ int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
brcmf_sdiod_freezer_detach(sdiodev);
- /* Disable Function 2 */
- sdio_claim_host(sdiodev->func2);
- sdio_disable_func(sdiodev->func2);
- sdio_release_host(sdiodev->func2);
-
- /* Disable Function 1 */
+ /* Disable functions 2 then 1. */
sdio_claim_host(sdiodev->func1);
+ sdio_disable_func(sdiodev->func2);
sdio_disable_func(sdiodev->func1);
sdio_release_host(sdiodev->func1);
@@ -911,7 +903,7 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
if (ret) {
brcmf_err("Failed to set F1 blocksize\n");
sdio_release_host(sdiodev->func1);
- goto out;
+ return ret;
}
switch (sdiodev->func2->device) {
case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
@@ -933,7 +925,7 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
sdio_release_host(sdiodev->func1);
- goto out;
+ return ret;
} else {
brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz);
}
@@ -1136,7 +1128,6 @@ notsup:
brcmf_dbg(SDIO, "WOWL not supported\n");
}
-#ifdef CONFIG_PM_SLEEP
static int brcmf_ops_sdio_suspend(struct device *dev)
{
struct sdio_func *func;
@@ -1204,11 +1195,9 @@ static int brcmf_ops_sdio_resume(struct device *dev)
return ret;
}
-static const struct dev_pm_ops brcmf_sdio_pm_ops = {
- .suspend = brcmf_ops_sdio_suspend,
- .resume = brcmf_ops_sdio_resume,
-};
-#endif /* CONFIG_PM_SLEEP */
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmf_sdio_pm_ops,
+ brcmf_ops_sdio_suspend,
+ brcmf_ops_sdio_resume);
static struct sdio_driver brcmf_sdmmc_driver = {
.probe = brcmf_ops_sdio_probe,
@@ -1217,9 +1206,7 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.id_table = brcmf_sdmmc_ids,
.drv = {
.owner = THIS_MODULE,
-#ifdef CONFIG_PM_SLEEP
- .pm = &brcmf_sdio_pm_ops,
-#endif /* CONFIG_PM_SLEEP */
+ .pm = pm_sleep_ptr(&brcmf_sdio_pm_ops),
.coredump = brcmf_dev_coredump,
},
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 3f5da3bb6aa5..ae5af76e2568 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -89,7 +89,7 @@ struct brcmf_bus_ops {
*
* @commonrings: commonrings which are always there.
* @flowrings: commonrings which are dynamically created and destroyed for data.
- * @rx_dataoffset: if set then all rx data has this this offset.
+ * @rx_dataoffset: if set then all rx data has this offset.
* @max_rxbufpost: maximum number of buffers to post for rx.
* @max_flowrings: maximum number of tx flow rings supported.
* @max_submissionrings: maximum number of submission rings(h2d) supported.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 605206abe424..db45da33adfd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4965,7 +4965,8 @@ exit:
return err;
}
-static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
+static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
+ unsigned int link_id)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -5302,6 +5303,7 @@ exit:
static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
@@ -6015,8 +6017,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
done:
kfree(buf);
- roam_info.channel = notify_channel;
- roam_info.bssid = profile->bssid;
+ roam_info.links[0].channel = notify_channel;
+ roam_info.links[0].bssid = profile->bssid;
roam_info.req_ie = conn_info->req_ie;
roam_info.req_ie_len = conn_info->req_ie_len;
roam_info.resp_ie = conn_info->resp_ie;
@@ -6059,7 +6061,7 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
} else {
conn_params.status = WLAN_STATUS_AUTH_TIMEOUT;
}
- conn_params.bssid = profile->bssid;
+ conn_params.links[0].bssid = profile->bssid;
conn_params.req_ie = conn_info->req_ie;
conn_params.req_ie_len = conn_info->req_ie_len;
conn_params.resp_ie = conn_info->resp_ie;
@@ -7479,6 +7481,9 @@ int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg,
static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
{
+ if (drvr->settings->trivial_ccode_map)
+ return true;
+
switch (drvr->bus_if->chip) {
case BRCM_CC_4345_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index fe01da9e620d..7485e784be2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -190,6 +190,31 @@ done:
return err;
}
+int brcmf_c_set_cur_etheraddr(struct brcmf_if *ifp, const u8 *addr)
+{
+ s32 err;
+
+ err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", addr, ETH_ALEN);
+ if (err < 0)
+ bphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err);
+
+ return err;
+}
+
+/* On some boards there is no eeprom to hold the nvram, in this case instead
+ * a board specific nvram is loaded from /lib/firmware. On most boards the
+ * macaddr setting in the /lib/firmware nvram file is ignored because the
+ * wifibt chip has a unique MAC programmed into the chip itself.
+ * But in some cases the actual MAC from the /lib/firmware nvram file gets
+ * used, leading to MAC conflicts.
+ * The MAC addresses in the troublesome nvram files seem to all come from
+ * the same nvram file template, so we only need to check for 1 known
+ * address to detect this.
+ */
+static const u8 brcmf_default_mac_address[ETH_ALEN] = {
+ 0x00, 0x90, 0x4c, 0xc5, 0x12, 0x38
+};
+
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -204,12 +229,9 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
if (is_valid_ether_addr(ifp->mac_addr)) {
/* set mac address */
- err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
- ETH_ALEN);
- if (err < 0) {
- bphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err);
+ err = brcmf_c_set_cur_etheraddr(ifp, ifp->mac_addr);
+ if (err < 0)
goto done;
- }
} else {
/* retrieve mac address */
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
@@ -218,6 +240,15 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err);
goto done;
}
+
+ if (ether_addr_equal_unaligned(ifp->mac_addr, brcmf_default_mac_address)) {
+ bphy_err(drvr, "Default MAC is used, replacing with random MAC to avoid conflicts\n");
+ eth_random_addr(ifp->mac_addr);
+ ifp->ndev->addr_assign_type = NET_ADDR_RANDOM;
+ err = brcmf_c_set_cur_etheraddr(ifp, ifp->mac_addr);
+ if (err < 0)
+ goto done;
+ }
}
memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 15accc88d5c0..6c5a22a32a96 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -38,6 +38,7 @@ extern struct brcmf_mp_global_t brcmf_mp_global;
* @fcmode: FWS flow control.
* @roamoff: Firmware roaming off?
* @ignore_probe_fail: Ignore probe failure.
+ * @trivial_ccode_map: Assume firmware uses ISO3166 country codes with rev 0
* @country_codes: If available, pointer to struct for translating country codes
* @bus: Bus specific platform data. Only SDIO at the mmoment.
*/
@@ -48,6 +49,7 @@ struct brcmf_mp_device {
bool roamoff;
bool iapp;
bool ignore_probe_fail;
+ bool trivial_ccode_map;
struct brcmfmac_pd_cc *country_codes;
const char *board_type;
unsigned char mac[ETH_ALEN];
@@ -65,6 +67,7 @@ void brcmf_release_module_param(struct brcmf_mp_device *module_param);
/* Sets dongle media info (drv_version, mac address). */
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_set_cur_etheraddr(struct brcmf_if *ifp, const u8 *addr);
#ifdef CONFIG_DMI
void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 87aef211b35f..bd164a0821f9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -233,16 +233,12 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct sockaddr *sa = (struct sockaddr *)addr;
- struct brcmf_pub *drvr = ifp->drvr;
int err;
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
- err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", sa->sa_data,
- ETH_ALEN);
- if (err < 0) {
- bphy_err(drvr, "Setting cur_etheraddr failed, %d\n", err);
- } else {
+ err = brcmf_c_set_cur_etheraddr(ifp, sa->sa_data);
+ if (err >= 0) {
brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data);
memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN);
eth_hw_addr_set(ifp->ndev, ifp->mac_addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index dcbe55b56e43..b8379e4034a4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -459,43 +459,34 @@ static void brcmf_fw_fix_efi_nvram_ccode(char *data, unsigned long data_len)
static u8 *brcmf_fw_nvram_from_efi(size_t *data_len_ret)
{
- const u16 name[] = { 'n', 'v', 'r', 'a', 'm', 0 };
- struct efivar_entry *nvram_efivar;
+ efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f,
+ 0x43, 0x26, 0x81, 0x23, 0xd1, 0x13);
unsigned long data_len = 0;
+ efi_status_t status;
u8 *data = NULL;
- int err;
- nvram_efivar = kzalloc(sizeof(*nvram_efivar), GFP_KERNEL);
- if (!nvram_efivar)
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
return NULL;
- memcpy(&nvram_efivar->var.VariableName, name, sizeof(name));
- nvram_efivar->var.VendorGuid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61,
- 0xb5, 0x1f, 0x43, 0x26,
- 0x81, 0x23, 0xd1, 0x13);
-
- err = efivar_entry_size(nvram_efivar, &data_len);
- if (err)
+ status = efi.get_variable(L"nvram", &guid, NULL, &data_len, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL)
goto fail;
data = kmalloc(data_len, GFP_KERNEL);
if (!data)
goto fail;
- err = efivar_entry_get(nvram_efivar, NULL, &data_len, data);
- if (err)
+ status = efi.get_variable(L"nvram", &guid, NULL, &data_len, data);
+ if (status != EFI_SUCCESS)
goto fail;
brcmf_fw_fix_efi_nvram_ccode(data, data_len);
brcmf_info("Using nvram EFI variable\n");
- kfree(nvram_efivar);
*data_len_ret = data_len;
return data;
-
fail:
kfree(data);
- kfree(nvram_efivar);
return NULL;
}
#else
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 083ac58f466d..79388d49c256 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -24,6 +24,12 @@ static int brcmf_of_get_country_codes(struct device *dev,
count = of_property_count_strings(np, "brcm,ccode-map");
if (count < 0) {
+ /* If no explicit country code map is specified, check whether
+ * the trivial map should be used.
+ */
+ settings->trivial_ccode_map =
+ of_property_read_bool(np, "brcm,ccode-map-trivial");
+
/* The property is optional, so return success if it doesn't
* exist. Otherwise propagate the error code.
*/
@@ -72,7 +78,6 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
if (root) {
- int i;
char *board_type;
const char *tmp;
@@ -84,10 +89,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
of_node_put(root);
return;
}
- for (i = 0; i < board_type[i]; i++) {
- if (board_type[i] == '/')
- board_type[i] = '-';
- }
+ strreplace(board_type, '/', '-');
settings->board_type = board_type;
of_node_put(root);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 212fbbe1cd7e..8968809399c7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1617,7 +1617,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
- * packet and and copy into the chain.
+ * packet and copy into the chain.
*/
sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
@@ -4020,15 +4020,14 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
*/
brcmf_sdiod_sgtable_alloc(sdiodev);
-#ifdef CONFIG_PM_SLEEP
/* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
* is true or when platform data OOB irq is true).
*/
- if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
+ if (IS_ENABLED(CONFIG_PM_SLEEP) &&
+ (sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) ||
(sdiodev->settings->bus.sdio.oob_irq_supported)))
sdiodev->bus_if->wowl_supported = true;
-#endif
if (brcmf_sdio_kso_init(bus)) {
brcmf_err("error enabling KSO\n");
@@ -4152,7 +4151,6 @@ int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
static int brcmf_sdio_bus_reset(struct device *dev)
{
- int ret = 0;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -4169,14 +4167,7 @@ static int brcmf_sdio_bus_reset(struct device *dev)
sdio_release_host(sdiodev->func1);
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
-
- ret = brcmf_sdiod_probe(sdiodev);
- if (ret) {
- brcmf_err("Failed to probe after sdio device reset: ret %d\n",
- ret);
- }
-
- return ret;
+ return 0;
}
static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 15d2c02fa3ec..47351ff458ca 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -346,26 +346,10 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func);
void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state);
-#ifdef CONFIG_PM_SLEEP
bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev);
-#else
-static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
-{
- return false;
-}
-static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
-{
-}
-static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
-{
-}
-static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
-{
-}
-#endif /* CONFIG_PM_SLEEP */
int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev);
int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
index 338c66d0c5f8..5a139d7ed47a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
@@ -33,13 +33,11 @@ TRACE_EVENT(brcmf_err,
TP_ARGS(func, vaf),
TP_STRUCT__entry(
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
@@ -50,14 +48,12 @@ TRACE_EVENT(brcmf_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
index 0e8a69ab909f..488456420353 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
@@ -28,12 +28,10 @@ DECLARE_EVENT_CLASS(brcms_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -64,14 +62,12 @@ TRACE_EVENT(brcms_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 8c741b98d8e5..a4034d44609b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -507,7 +507,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
brcms_c_start_station(wl->wlc, vif->addr);
else if (vif->type == NL80211_IFTYPE_AP)
brcms_c_start_ap(wl->wlc, vif->addr, vif->bss_conf.bssid,
- vif->bss_conf.ssid, vif->bss_conf.ssid_len);
+ vif->cfg.ssid, vif->cfg.ssid_len);
else if (vif->type == NL80211_IFTYPE_ADHOC)
brcms_c_start_adhoc(wl->wlc, vif->addr);
spin_unlock_bh(&wl->lock);
@@ -582,7 +582,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
static void
brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+ struct ieee80211_bss_conf *info, u64 changed)
{
struct brcms_info *wl = hw->priv;
struct bcma_device *core = wl->wlc->hw->d11core;
@@ -592,9 +592,9 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
* also implies a change in the AID.
*/
brcms_err(core, "%s: %s: %sassociated\n", KBUILD_MODNAME,
- __func__, info->assoc ? "" : "dis");
+ __func__, vif->cfg.assoc ? "" : "dis");
spin_lock_bh(&wl->lock);
- brcms_c_associate_upd(wl->wlc, info->assoc);
+ brcms_c_associate_upd(wl->wlc, vif->cfg.assoc);
spin_unlock_bh(&wl->lock);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -669,7 +669,7 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_SSID) {
/* BSSID changed, for whatever reason (IBSS and managed mode) */
spin_lock_bh(&wl->lock);
- brcms_c_set_ssid(wl->wlc, info->ssid, info->ssid_len);
+ brcms_c_set_ssid(wl->wlc, vif->cfg.ssid, vif->cfg.ssid_len);
spin_unlock_bh(&wl->lock);
}
if (changed & BSS_CHANGED_BEACON) {
@@ -678,7 +678,7 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
u16 tim_offset = 0;
spin_lock_bh(&wl->lock);
- beacon = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL);
+ beacon = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL, 0);
brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
info->dtim_period);
spin_unlock_bh(&wl->lock);
@@ -715,13 +715,13 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_IBSS) {
/* IBSS join status changed */
brcms_err(core, "%s: IBSS joined: %s (implement)\n",
- __func__, info->ibss_joined ? "true" : "false");
+ __func__, vif->cfg.ibss_joined ? "true" : "false");
}
if (changed & BSS_CHANGED_ARP_FILTER) {
/* Hardware ARP filter address list or state changed */
brcms_err(core, "%s: arp filtering: %d addresses"
- " (implement)\n", __func__, info->arp_addr_cnt);
+ " (implement)\n", __func__, vif->cfg.arp_addr_cnt);
}
if (changed & BSS_CHANGED_QOS) {
@@ -787,7 +787,8 @@ static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw,
}
static int
-brcms_ops_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+brcms_ops_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct brcms_info *wl = hw->priv;
@@ -950,7 +951,7 @@ static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
spin_lock_bh(&wl->lock);
if (wl->wlc->vif)
beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
- &tim_offset, NULL);
+ &tim_offset, NULL, 0);
if (beacon)
brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
wl->wlc->vif->bss_conf.dtim_period);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 8ddfc3d06687..11b33e78127c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -3800,7 +3800,7 @@ static void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw, bool shortslot)
}
/*
- * Suspend the the MAC and update the slot timing
+ * Suspend the MAC and update the slot timing
* for standard 11b/g (20us slots) or shortslot 11g (9us slots).
*/
static void brcms_c_switch_shortslot(struct brcms_c_info *wlc, bool shortslot)
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ed343d4fb9d5..029dacebe751 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -2584,7 +2584,7 @@ static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
* through a couple of memory mapped registers.
*
* The following is a simplified implementation for pulling data out of the
- * the eeprom, along with some helper functions to find information in
+ * eeprom, along with some helper functions to find information in
* the per device private data's copy of the eeprom.
*
* NOTE: To better understand how these functions work (i.e what is a chip
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index bd4e7d752958..846138d6e33d 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -2701,7 +2701,7 @@ il3945_post_associate(struct il_priv *il)
if (!il->vif || !il->is_open)
return;
- D_ASSOC("Associated as %d to: %pM\n", il->vif->bss_conf.aid,
+ D_ASSOC("Associated as %d to: %pM\n", il->vif->cfg.aid,
il->active.bssid_addr);
if (test_bit(S_EXIT_PENDING, &il->status))
@@ -2718,9 +2718,9 @@ il3945_post_associate(struct il_priv *il)
il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- il->staging.assoc_id = cpu_to_le16(il->vif->bss_conf.aid);
+ il->staging.assoc_id = cpu_to_le16(il->vif->cfg.aid);
- D_ASSOC("assoc id %d beacon interval %d\n", il->vif->bss_conf.aid,
+ D_ASSOC("assoc id %d beacon interval %d\n", il->vif->cfg.aid,
il->vif->bss_conf.beacon_int);
if (il->vif->bss_conf.use_short_preamble)
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index d93900e62e3d..943de47170c7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -6690,7 +6690,7 @@ il4965_pci_remove(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
/* ieee80211_unregister_hw call wil cause il_mac_stop to
- * to be called and il4965_down since we are removing the device
+ * be called and il4965_down since we are removing the device
* we need to set S_EXIT_PENDING bit.
*/
set_bit(S_EXIT_PENDING, &il->status);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 9dd2d890e35f..c62f299b9e0a 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
/* Repeat initial/next rate.
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+ while (repeat_rate > 0) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
@@ -2422,6 +2422,8 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
cpu_to_le32(new_rate);
repeat_rate--;
idx++;
+ if (idx >= LINK_QUAL_MAX_RETRY_NUM)
+ goto out;
}
il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@@ -2466,6 +2468,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
repeat_rate--;
}
+out:
lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index 9fa556486511..c34729f576cd 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1756,9 +1756,9 @@ il4965_post_associate(struct il_priv *il)
if (il->ops->set_rxon_chain)
il->ops->set_rxon_chain(il);
- il->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+ il->staging.assoc_id = cpu_to_le16(vif->cfg.aid);
- D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
+ D_ASSOC("assoc id %d beacon interval %d\n", vif->cfg.aid,
vif->bss_conf.beacon_int);
if (vif->bss_conf.use_short_preamble)
@@ -1775,7 +1775,7 @@ il4965_post_associate(struct il_priv *il)
il_commit_rxon(il);
- D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
+ D_ASSOC("Associated as %d to: %pM\n", vif->cfg.aid,
il->active.bssid_addr);
switch (vif->type) {
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 8299d89e7505..04d27a26260b 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -4480,7 +4480,8 @@ il_clear_isr_stats(struct il_priv *il)
}
int
-il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct il_priv *il = hw->priv;
@@ -4816,7 +4817,7 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
#define IL_WD_TICK(timeout) ((timeout) / 4)
/*
- * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * Watchdog timer callback, we check each tx queue for stuck, if hung
* we reset the firmware. If everything is fine just rearm the timer.
*/
void
@@ -5276,7 +5277,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct il_priv *il = hw->priv;
unsigned long flags;
__le64 timestamp;
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif, 0);
if (!skb)
return;
@@ -5311,13 +5312,13 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
void
il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf, u32 changes)
+ struct ieee80211_bss_conf *bss_conf, u64 changes)
{
struct il_priv *il = hw->priv;
int ret;
mutex_lock(&il->mutex);
- D_MAC80211("enter: changes 0x%x\n", changes);
+ D_MAC80211("enter: changes 0x%llx\n", changes);
if (!il_is_alive(il)) {
D_MAC80211("leave - not alive\n");
@@ -5427,8 +5428,8 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changes & BSS_CHANGED_ASSOC) {
- D_MAC80211("ASSOC %d\n", bss_conf->assoc);
- if (bss_conf->assoc) {
+ D_MAC80211("ASSOC %d\n", vif->cfg.assoc);
+ if (vif->cfg.assoc) {
il->timestamp = bss_conf->sync_tsf;
if (!il_is_rfkill(il))
@@ -5437,8 +5438,8 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
il_set_no_assoc(il, vif);
}
- if (changes && il_is_associated(il) && bss_conf->aid) {
- D_MAC80211("Changes (%#x) while associated\n", changes);
+ if (changes && il_is_associated(il) && vif->cfg.aid) {
+ D_MAC80211("Changes (%#llx) while associated\n", changes);
ret = il_send_rxon_assoc(il);
if (!ret) {
/* Sync active_rxon with latest change. */
@@ -5459,10 +5460,10 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_IBSS) {
ret = il->ops->manage_ibss_station(il, vif,
- bss_conf->ibss_joined);
+ vif->cfg.ibss_joined);
if (ret)
IL_ERR("failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
+ vif->cfg.ibss_joined ? "add" : "remove",
bss_conf->bssid);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 40877ef1fbf2..69687fcf963f 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -1683,7 +1683,8 @@ struct il_cfg {
***************************/
int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
@@ -1947,7 +1948,7 @@ il_get_hw_mode(struct il_priv *il, enum nl80211_band band)
int il_mac_config(struct ieee80211_hw *hw, u32 changed);
void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf, u32 changes);
+ struct ieee80211_bss_conf *bss_conf, u64 changes);
void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index abb8696ba294..411a6f6638b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -92,7 +92,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes);
+ u64 changes);
void iwlagn_config_ht40(struct ieee80211_conf *conf,
struct iwl_rxon_context *ctx);
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 40d790b36d85..1dc974e2c511 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014, 2022 Intel Corporation. All rights reserved.
*****************************************************************************/
#include <linux/etherdevice.h>
#include <linux/kernel.h>
@@ -441,7 +441,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
priv->current_ht_config.smps = smps_request;
for_each_context(priv, ctx) {
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
- ieee80211_request_smps(ctx->vif, smps_request);
+ ieee80211_request_smps(ctx->vif, 0, smps_request);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index e8bd4f0e3d2d..f4070fddc8c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2022 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -1153,7 +1153,8 @@ static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
}
static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index caf452922dbd..a873be109f43 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014, 2018 - 2021 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
@@ -284,7 +284,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
}
/* Pull updated AP beacon from mac80211. will fail if not in AP mode */
- beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
+ beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif, 0);
if (!beacon) {
IWL_ERR(priv, "update beacon failed -- keeping old\n");
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 5dd2d43a01d8..f80cce37e2c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -183,7 +183,7 @@ static int iwlagn_update_beacon(struct iwl_priv *priv,
lockdep_assert_held(&priv->mutex);
dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
+ priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif, 0);
if (!priv->beacon_skb)
return -ENOMEM;
return iwlagn_send_beacon_cmd(priv);
@@ -562,12 +562,12 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
slot1 = bcnint - slot0;
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
- (!ctx_bss->vif->bss_conf.idle &&
- !ctx_bss->vif->bss_conf.assoc)) {
+ (!ctx_bss->vif->cfg.idle &&
+ !ctx_bss->vif->cfg.assoc)) {
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
- } else if (!ctx_pan->vif->bss_conf.idle &&
- !ctx_pan->vif->bss_conf.assoc) {
+ } else if (!ctx_pan->vif->cfg.idle &&
+ !ctx_pan->vif->cfg.assoc) {
slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot0 = IWL_MIN_SLOT_TIME;
}
@@ -1383,7 +1383,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
@@ -1392,7 +1392,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
+ if (changes & BSS_CHANGED_IDLE && vif->cfg.idle) {
/*
* If we go idle, then clearly no "passive-no-rx"
* workaround is needed any more, this is a reset.
@@ -1420,14 +1420,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
iwlagn_update_qos(priv, ctx);
}
- ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+ ctx->staging.assoc_id = cpu_to_le16(vif->cfg.aid);
if (vif->bss_conf.use_short_preamble)
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (changes & BSS_CHANGED_ASSOC) {
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
priv->timestamp = bss_conf->sync_tsf;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
@@ -1483,7 +1483,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
*/
if (vif->type == NL80211_IFTYPE_STATION) {
- if (!bss_conf->assoc)
+ if (!vif->cfg.assoc)
ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
else
ctx->staging.filter_flags &=
@@ -1493,7 +1493,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
iwlagn_commit_rxon(priv, ctx);
- if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
/*
* The chain noise calibration will enable PM upon
* completion. If calibration has already been run
@@ -1509,10 +1509,10 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_IBSS) {
ret = iwlagn_manage_ibss_station(priv, vif,
- bss_conf->ibss_joined);
+ vif->cfg.ibss_joined);
if (ret)
IWL_ERR(priv, "failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
+ vif->cfg.ibss_joined ? "add" : "remove",
bss_conf->bssid);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 23b1d689ba7b..6d408cd0f517 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -19,20 +19,14 @@
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
- struct efivar_entry *pnvm_efivar;
void *data;
unsigned long package_size;
- int err;
+ efi_status_t status;
*len = 0;
- pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
- if (!pnvm_efivar)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
- sizeof(IWL_UEFI_OEM_PNVM_NAME));
- pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return ERR_PTR(-ENODEV);
/*
* TODO: we hardcode a maximum length here, because reading
@@ -42,27 +36,22 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
package_size = IWL_HARDCODED_PNVM_SIZE;
data = kmalloc(package_size, GFP_KERNEL);
- if (!data) {
- data = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!data)
+ return ERR_PTR(-ENOMEM);
- err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data);
- if (err) {
+ status = efi.get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
IWL_DEBUG_FW(trans,
- "PNVM UEFI variable not found %d (len %lu)\n",
- err, package_size);
+ "PNVM UEFI variable not found 0x%lx (len %lu)\n",
+ status, package_size);
kfree(data);
- data = ERR_PTR(err);
- goto out;
+ return ERR_PTR(-ENOENT);
}
IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %lu\n", package_size);
*len = package_size;
-out:
- kfree(pnvm_efivar);
-
return data;
}
@@ -211,21 +200,15 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
{
- struct efivar_entry *reduce_power_efivar;
struct pnvm_sku_package *package;
void *data = NULL;
unsigned long package_size;
- int err;
+ efi_status_t status;
*len = 0;
- reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL);
- if (!reduce_power_efivar)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME,
- sizeof(IWL_UEFI_REDUCED_POWER_NAME));
- reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return ERR_PTR(-ENODEV);
/*
* TODO: we hardcode a maximum length here, because reading
@@ -235,19 +218,17 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
package_size = IWL_HARDCODED_REDUCE_POWER_SIZE;
package = kmalloc(package_size, GFP_KERNEL);
- if (!package) {
- package = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!package)
+ return ERR_PTR(-ENOMEM);
- err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package);
- if (err) {
+ status = efi.get_variable(IWL_UEFI_REDUCED_POWER_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
IWL_DEBUG_FW(trans,
- "Reduced Power UEFI variable not found %d (len %lu)\n",
- err, package_size);
+ "Reduced Power UEFI variable not found 0x%lx (len %lu)\n",
+ status, package_size);
kfree(package);
- data = ERR_PTR(err);
- goto out;
+ return ERR_PTR(-ENOENT);
}
IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
@@ -262,9 +243,6 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
kfree(package);
-out:
- kfree(reduce_power_efivar);
-
return data;
}
@@ -304,22 +282,15 @@ static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
- struct efivar_entry *sgom_efivar;
struct uefi_cnv_wlan_sgom_data *data;
unsigned long package_size;
- int err, ret;
-
- if (!fwrt->geo_enabled)
- return;
+ efi_status_t status;
+ int ret;
- sgom_efivar = kzalloc(sizeof(*sgom_efivar), GFP_KERNEL);
- if (!sgom_efivar)
+ if (!fwrt->geo_enabled ||
+ !efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
return;
- memcpy(&sgom_efivar->var.VariableName, IWL_UEFI_SGOM_NAME,
- sizeof(IWL_UEFI_SGOM_NAME));
- sgom_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
-
/* TODO: we hardcode a maximum length here, because reading
* from the UEFI is not working. To implement this properly,
* we have to call efivar_entry_size().
@@ -327,15 +298,14 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
package_size = IWL_HARDCODED_SGOM_SIZE;
data = kmalloc(package_size, GFP_KERNEL);
- if (!data) {
- data = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!data)
+ return;
- err = efivar_entry_get(sgom_efivar, NULL, &package_size, data);
- if (err) {
+ status = efi.get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
IWL_DEBUG_FW(trans,
- "SGOM UEFI variable not found %d\n", err);
+ "SGOM UEFI variable not found 0x%lx\n", status);
goto out_free;
}
@@ -349,8 +319,6 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
out_free:
kfree(data);
-out:
- kfree(sgom_efivar);
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table);
#endif /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index 7dd70011fd1e..1d6c292cf545 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -18,12 +18,10 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -55,14 +53,12 @@ TRACE_EVENT(iwlwifi_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 9b194cb8d65e..ee3c8a786199 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2020, 2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
@@ -106,7 +106,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (!chanctx_conf ||
chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
@@ -283,7 +283,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
return;
}
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
@@ -311,7 +311,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
smps_mode = IEEE80211_SMPS_DYNAMIC;
/* relax SMPS constraints for next association */
- if (!vif->bss_conf.assoc)
+ if (!vif->cfg.assoc)
smps_mode = IEEE80211_SMPS_AUTOMATIC;
if (mvmvif->phy_ctxt &&
@@ -382,7 +382,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
* we are not associated
*/
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
- mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
+ mvm->cfg->bt_shared_single_ant || !vif->cfg.assoc ||
le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 61f9136a333d..aeb0015b73d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -731,7 +731,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EINVAL;
rcu_read_lock();
- ctx = rcu_dereference(vif->chanctx_conf);
+ ctx = rcu_dereference(vif->bss_conf.chanctx_conf);
if (WARN_ON(!ctx)) {
rcu_read_unlock();
return -EINVAL;
@@ -749,7 +749,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* add back the MAC */
mvmvif->uploaded = false;
- if (WARN_ON(!vif->bss_conf.assoc))
+ if (WARN_ON(!vif->cfg.assoc))
return -EINVAL;
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 7d9faeffd154..78d8b37eb71a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -234,7 +234,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
}
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (chanctx_conf)
pos += scnprintf(buf+pos, bufsz-pos,
"idle rx chains %d, active rx chains: %d\n",
@@ -597,7 +597,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
/* make sure the channel context is assigned */
if (!chanctx_conf) {
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 49898fd99594..c0bd697b080a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1233,7 +1233,7 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
mvm->hw->extra_beacon_tailroom = len;
- beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
+ beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0);
if (!beacon)
goto out_err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 430044bc4755..8c5b97fb1941 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -67,7 +67,7 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* the TK is already configured for this station, so it
* shouldn't be set again here.
*/
- if (vif->bss_conf.assoc &&
+ if (vif->cfg.assoc &&
!memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *sta;
@@ -222,7 +222,7 @@ static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 0; i < ETH_ALEN; i++)
cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
else
eth_broadcast_addr(cmd->range_req_bssid);
@@ -254,7 +254,7 @@ static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
for (i = 0; i < ETH_ALEN; i++)
cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
- if (vif->bss_conf.assoc) {
+ if (vif->cfg.assoc) {
memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
/* AP's TSF is only relevant if associated */
@@ -503,7 +503,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_ftm_put_target_common(mvm, peer, target);
- if (vif->bss_conf.assoc &&
+ if (vif->cfg.assoc &&
!memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *sta;
@@ -693,7 +693,7 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
target->cipher = entry->cipher;
memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
- if (vif->bss_conf.assoc &&
+ if (vif->cfg.assoc &&
!memcmp(vif->bss_conf.bssid, target->bssid,
sizeof(target->bssid)))
ieee80211_iter_keys(mvm->hw, vif, iter, target);
@@ -1105,10 +1105,10 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
- IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index);
+ IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
- IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread);
+ IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 9729680476fd..e862d1b43f21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <net/cfg80211.h>
#include <linux/etherdevice.h>
@@ -398,7 +398,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
rcu_read_lock();
- pctx = rcu_dereference(vif->chanctx_conf);
+ pctx = rcu_dereference(vif->bss_conf.chanctx_conf);
/* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care
* about changes in the ctx after releasing the lock because the driver
* is still protected by the mutex. */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 56fa20596f16..ed586e6d7d64 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -481,7 +481,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
eth_broadcast_addr(cmd->bssid_addr);
rcu_read_lock();
- chanctx = rcu_dereference(vif->chanctx_conf);
+ chanctx = rcu_dereference(vif->bss_conf.chanctx_conf);
iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
: NL80211_BAND_2GHZ,
&cck_ack_rates, &ofdm_ack_rates);
@@ -570,7 +570,7 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
}
/* We need the dtim_period to set the MAC as associated */
- if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
+ if (vif->cfg.assoc && vif->bss_conf.dtim_period &&
!force_assoc_off) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 dtim_offs;
@@ -628,9 +628,9 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
vif->bss_conf.dtim_period);
ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
- ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
+ ctxt_sta->assoc_id = cpu_to_le32(vif->cfg.aid);
- if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
+ if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
@@ -934,7 +934,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
/* Enable FILS on PSC channels only */
rcu_read_lock();
- ctx = rcu_dereference(vif->chanctx_conf);
+ ctx = rcu_dereference(vif->bss_conf.chanctx_conf);
channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq);
WARN_ON(channel == 0);
if (cfg80211_channel_is_psc(ctx->def.chan) &&
@@ -944,8 +944,8 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
IWL_MAC_BEACON_FILS :
IWL_MAC_BEACON_FILS_V1;
beacon_cmd.short_ssid =
- cpu_to_le32(~crc32_le(~0, vif->bss_conf.ssid,
- vif->bss_conf.ssid_len));
+ cpu_to_le32(~crc32_le(~0, vif->cfg.ssid,
+ vif->cfg.ssid_len));
}
rcu_read_unlock();
@@ -1002,7 +1002,7 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
WARN_ON(vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC);
- beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
+ beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0);
if (!beacon)
return -ENOMEM;
@@ -1031,7 +1031,7 @@ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
{
struct iwl_mvm_mac_ap_iterator_data *data = _data;
- if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return;
/* Station client has higher priority over P2P client*/
@@ -1335,7 +1335,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
csa_vif = rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
- if (unlikely(csa_vif && csa_vif->csa_active))
+ if (unlikely(csa_vif && csa_vif->bss_conf.csa_active))
iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
(status == TX_STATUS_SUCCESS));
@@ -1558,7 +1558,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
switch (vif->type) {
case NL80211_IFTYPE_AP:
csa_vif = rcu_dereference(mvm->csa_vif);
- if (WARN_ON(!csa_vif || !csa_vif->csa_active ||
+ if (WARN_ON(!csa_vif || !csa_vif->bss_conf.csa_active ||
csa_vif != vif))
goto out_unlock;
@@ -1587,7 +1587,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
*/
if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
CHANNEL_SWITCH_ERROR_NOTIF,
- 0) && !vif->csa_active) {
+ 0) && !vif->bss_conf.csa_active) {
IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n");
iwl_mvm_cancel_channel_switch(mvm, vif, mac_id);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index bb9bd2165355..5eb28f8ee87e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1614,7 +1614,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
return;
if (vif->type != NL80211_IFTYPE_STATION ||
- !vif->bss_conf.assoc)
+ !vif->cfg.assoc)
return;
cmd->port_id = data->port_id++;
@@ -1740,7 +1740,7 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
return;
/* Supported only for p2p client interfaces */
- if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc ||
!vif->p2p)
return;
@@ -1768,7 +1768,7 @@ static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
- if (vif->mu_mimo_owner) {
+ if (vif->bss_conf.mu_mimo_owner) {
struct iwl_mu_group_mgmt_notif *notif = _data;
/*
@@ -1776,7 +1776,7 @@ static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
* the data received from firmware as if it came from the
* action frame, so no conversion is needed.
*/
- ieee80211_update_mu_groups(vif,
+ ieee80211_update_mu_groups(vif, 0,
(u8 *)&notif->membership_status,
(u8 *)&notif->user_position);
}
@@ -1965,7 +1965,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return;
@@ -2181,7 +2181,7 @@ static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm,
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
@@ -2191,7 +2191,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* on the beacon interval, which was not known when the station
* interface was added.
*/
- if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
if (vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
@@ -2201,7 +2201,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
- bss_conf->assoc && vif->bss_conf.he_support &&
+ vif->cfg.assoc && vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
@@ -2220,10 +2220,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* after sending it once, adopt mac80211 data */
memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
- mvmvif->associated = bss_conf->assoc;
+ mvmvif->associated = vif->cfg.assoc;
if (changes & BSS_CHANGED_ASSOC) {
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
/* clear statistics to get clean beacon counter */
iwl_mvm_request_statistics(mvm, true);
memset(&mvmvif->beacon_stats, 0,
@@ -2337,7 +2337,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* However, on HW restart we should restore this data.
*/
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
+ (changes & BSS_CHANGED_MU_GROUPS) && vif->bss_conf.mu_mimo_owner) {
ret = iwl_mvm_update_mu_groups(mvm, vif);
if (ret)
IWL_ERR(mvm,
@@ -2396,7 +2396,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -2522,8 +2523,22 @@ out_unlock:
return ret;
}
+static int iwl_mvm_start_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ return iwl_mvm_start_ap_ibss(hw, vif, link_conf);
+}
+
+static int iwl_mvm_start_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ return iwl_mvm_start_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -2586,11 +2601,24 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
+static void iwl_mvm_stop_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ iwl_mvm_stop_ap_ibss(hw, vif, link_conf);
+}
+
+static void iwl_mvm_stop_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ iwl_mvm_stop_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
static void
iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -2621,13 +2649,13 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
mutex_lock(&mvm->mutex);
- if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
+ if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
switch (vif->type) {
@@ -3020,7 +3048,7 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
#if IS_ENABLED(CONFIG_IWLMEI)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mei_conn_info conn_info = {
- .ssid_len = vif->bss_conf.ssid_len,
+ .ssid_len = vif->cfg.ssid_len,
.channel = vif->bss_conf.chandef.chan->hw_value,
};
@@ -3068,7 +3096,7 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
return;
}
- memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len);
+ memcpy(conn_info.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN);
/* TODO: add support for collocated AP data */
@@ -3321,7 +3349,8 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
}
static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -3381,7 +3410,7 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- if (!vif->bss_conf.idle) {
+ if (!vif->cfg.idle) {
ret = -EBUSY;
goto out;
}
@@ -3747,7 +3776,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
* like the delay to be for 2-3 dtim intervals, in case there are
* other time events with higher priority.
*/
- if (vif->bss_conf.assoc) {
+ if (vif->cfg.assoc) {
delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
/* We cannot remain off-channel longer than the DTIM interval */
if (dtim_interval <= req_dur) {
@@ -4004,7 +4033,7 @@ static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
{
struct iwl_mvm_ftm_responder_iter_data *data = _data;
- if (rcu_access_pointer(vif->chanctx_conf) == data->ctx &&
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) == data->ctx &&
vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params)
data->responder = true;
}
@@ -4235,6 +4264,7 @@ out:
}
static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -4308,6 +4338,7 @@ out:
static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -4502,7 +4533,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
/* must be associated client vif - ignore authorized */
if (!vif || vif->type != NL80211_IFTYPE_STATION ||
- !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
+ !vif->cfg.assoc || !vif->bss_conf.dtim_period ||
!tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
return -EINVAL;
@@ -4631,7 +4662,7 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
csa_vif =
rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
- if (WARN_ONCE(csa_vif && csa_vif->csa_active,
+ if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active,
"Another CSA is already in progress")) {
ret = -EBUSY;
goto out_unlock;
@@ -4670,7 +4701,7 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
* we don't know the dtim period. In this case, the firmware can't
* track the beacons.
*/
- if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) {
+ if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) {
ret = -EBUSY;
goto out_unlock;
}
@@ -5069,7 +5100,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;
- if (!vif->bss_conf.assoc)
+ if (!vif->cfg.assoc)
return;
mutex_lock(&mvm->mutex);
@@ -5406,10 +5437,10 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
.switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
- .start_ap = iwl_mvm_start_ap_ibss,
- .stop_ap = iwl_mvm_stop_ap_ibss,
- .join_ibss = iwl_mvm_start_ap_ibss,
- .leave_ibss = iwl_mvm_stop_ap_ibss,
+ .start_ap = iwl_mvm_start_ap,
+ .stop_ap = iwl_mvm_stop_ap,
+ .join_ibss = iwl_mvm_start_ibss,
+ .leave_ibss = iwl_mvm_stop_ibss,
.tx_last_beacon = iwl_mvm_tx_last_beacon,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
index c7dabc6b3765..a8bd0f5f795c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2021-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -192,9 +192,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
size = sizeof(cmd.v1);
}
- if (vif->bss_conf.arp_addr_cnt) {
+ if (vif->cfg.arp_addr_cnt) {
enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
- common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+ common->host_ipv4_addr = vif->cfg.arp_addr_list[0];
memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index b2f33ebdf485..db43c8a83a31 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -162,7 +162,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
return;
- if (!vif->bss_conf.assoc)
+ if (!vif->cfg.assoc)
return;
/* this shouldn't happen *again*, ignore it */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index b9bd81242b21..f5744162d0d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -223,7 +223,7 @@ static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
*is_p2p_standalone = false;
break;
case NL80211_IFTYPE_STATION:
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
*is_p2p_standalone = false;
break;
@@ -283,7 +283,7 @@ static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
bool radar_detect = false;
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
WARN_ON(!chanctx_conf);
if (chanctx_conf) {
chan = chanctx_conf->def.chan;
@@ -359,7 +359,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.ps || !mvmvif->pm_enabled)
+ if (!vif->cfg.ps || !mvmvif->pm_enabled)
return;
if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
@@ -890,7 +890,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
mvm->ps_disabled ||
- !vif->bss_conf.ps ||
+ !vif->cfg.ps ||
iwl_mvm_vif_low_latency(mvmvif));
return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index c862bd243b55..cea1a34f9130 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018, 2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2021-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -47,7 +47,7 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
break;
return;
case NL80211_IFTYPE_AP:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 974eeecc9153..a79043f30775 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1861,7 +1861,7 @@ static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int index = rate->index;
bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
- !vif->bss_conf.ps);
+ !vif->cfg.ps);
IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
cam, sta_ps_disabled);
@@ -1980,7 +1980,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
#endif
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf))
band = NUM_NL80211_BANDS;
else
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index a4077053e374..582a95ffc7ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1948,14 +1948,14 @@ static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
* reset or resume flow, or while not associated and a large interval
* has passed since the last 6GHz passive scan.
*/
- if ((vif->bss_conf.assoc ||
+ if ((vif->cfg.assoc ||
time_after(mvm->last_6ghz_passive_scan_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
(time_before(mvm->last_reset_or_resume_time_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
jiffies))) {
IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n",
- vif->bss_conf.assoc ? "associated" :
+ vif->cfg.assoc ? "associated" :
"timeout did not expire");
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 693752d8f65b..1f4ac1e93cee 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2019, 2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#include "mvm.h"
@@ -31,7 +31,7 @@ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
if (vif->type == NL80211_IFTYPE_STATION) {
data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
data->sta_vif_state = SF_FULL_ON;
else
data->sta_vif_state = SF_INIT_OFF;
@@ -261,7 +261,7 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
return -EINVAL;
if (changed_vif->type != NL80211_IFTYPE_STATION) {
new_state = SF_UNINIT;
- } else if (changed_vif->bss_conf.assoc &&
+ } else if (changed_vif->cfg.assoc &&
changed_vif->bss_conf.dtim_period) {
mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
sta_id = mvmvif->ap_sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index bbb1522e7280..ff0d3b3df140 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1861,6 +1861,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
iwl_mvm_txq_from_mac80211(sta->txq[i]);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
}
@@ -1948,7 +1949,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == sta_id) {
/* if associated - we can't remove the AP STA now */
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
return ret;
/* unassoc - go ahead - remove the AP STA now */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index bf04326e35ff..674dd137fb9f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2020, 2022 Intel Corporation
*/
#include <linux/etherdevice.h>
#include "mvm.h"
@@ -380,7 +380,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
type == TDLS_MOVE_CH) {
/* we need to return to base channel */
struct ieee80211_chanctx_conf *chanctx =
- rcu_dereference(vif->chanctx_conf);
+ rcu_dereference(vif->bss_conf.chanctx_conf);
if (WARN_ON_ONCE(!chanctx)) {
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 6edf2b79db43..ed8ba81a6043 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -123,7 +123,7 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
rcu_read_lock();
csa_vif = rcu_dereference(mvm->csa_vif);
- if (!csa_vif || !csa_vif->csa_active)
+ if (!csa_vif || !csa_vif->bss_conf.csa_active)
goto out_unlock;
IWL_DEBUG_TE(mvm, "CSA NOA started\n");
@@ -160,7 +160,7 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
if (vif->type != NL80211_IFTYPE_STATION)
return false;
- if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
+ if (!mvmvif->csa_bcn_pending && vif->cfg.assoc &&
vif->bss_conf.dtim_period)
return false;
if (errmsg)
@@ -176,7 +176,7 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
rcu_read_unlock();
}
- if (vif->bss_conf.assoc) {
+ if (vif->cfg.assoc) {
/*
* When not associated, this will be called from
* iwl_mvm_event_mlme_callback_ini()
@@ -346,7 +346,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
- !te_data->vif->bss_conf.assoc ?
+ !te_data->vif->cfg.assoc ?
"Not associated and the time event is over already..." :
"No beacon heard and the time event is over already...");
break;
@@ -859,7 +859,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, vif,
- !vif->bss_conf.assoc ?
+ !vif->cfg.assoc ?
"Not associated and the session protection is over already..." :
"No beacon heard and the session protection is over already...");
spin_lock_bh(&mvm->time_event_lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 8125bb76f59e..f9e08b339e0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1959,7 +1959,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
if (mvmsta->vif)
chanctx_conf =
- rcu_dereference(mvmsta->vif->chanctx_conf);
+ rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf);
if (WARN_ON_ONCE(!chanctx_conf))
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index bc947733d982..14b2de65bd84 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -304,7 +304,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
smps_mode = IEEE80211_SMPS_DYNAMIC;
}
- ieee80211_request_smps(vif, smps_mode);
+ ieee80211_request_smps(vif, 0, smps_mode);
}
static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
@@ -604,7 +604,7 @@ static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
if (vif->type != NL80211_IFTYPE_STATION)
return;
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
data->assoc = true;
}
@@ -816,7 +816,7 @@ static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
if (vif->type != NL80211_IFTYPE_STATION)
return;
- if (!vif->bss_conf.assoc)
+ if (!vif->cfg.assoc)
return;
if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index bece14e4ff0d..b52cce38115d 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -173,10 +173,8 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
* keeping a extra list for uploaded keys.
*/
- priv->used_rxkeys = kcalloc(BITS_TO_LONGS(priv->rx_keycache_size),
- sizeof(long),
- GFP_KERNEL);
-
+ priv->used_rxkeys = bitmap_zalloc(priv->rx_keycache_size,
+ GFP_KERNEL);
if (!priv->used_rxkeys)
return -ENOMEM;
}
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index a3ca6620dc0c..b925e327e091 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -139,7 +139,7 @@ static int p54_beacon_update(struct p54_common *priv,
struct sk_buff *beacon;
int ret;
- beacon = ieee80211_beacon_get(priv->hw, vif);
+ beacon = ieee80211_beacon_get(priv->hw, vif, 0);
if (!beacon)
return -ENOMEM;
ret = p54_beacon_format_ie_tim(beacon);
@@ -404,7 +404,8 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
}
static int p54_conf_tx(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct p54_common *priv = dev->priv;
@@ -449,7 +450,7 @@ static int p54_get_stats(struct ieee80211_hw *dev,
static void p54_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct p54_common *priv = dev->priv;
@@ -480,8 +481,8 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
p54_scan(priv, P54_SCAN_EXIT, 0);
}
if (changed & BSS_CHANGED_ASSOC) {
- if (info->assoc) {
- priv->aid = info->aid;
+ if (vif->cfg.assoc) {
+ priv->aid = vif->cfg.aid;
priv->wakeup_timer = info->beacon_int *
info->dtim_period * 5;
p54_setup_mac(priv);
@@ -682,7 +683,7 @@ static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
* queues have already been stopped and no new frames can sneak
* up from behind.
*/
- while ((total = p54_flush_count(priv) && i--)) {
+ while ((total = p54_flush_count(priv)) && i--) {
/* waste time */
msleep(20);
}
@@ -830,7 +831,7 @@ void p54_free_common(struct ieee80211_hw *dev)
kfree(priv->output_limit);
kfree(priv->curve_data);
kfree(priv->rssi_db);
- kfree(priv->used_rxkeys);
+ bitmap_free(priv->used_rxkeys);
kfree(priv->survey);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index f99b7ba69fc3..19152fd449ba 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -164,7 +164,7 @@ static int p54spi_request_firmware(struct ieee80211_hw *dev)
ret = p54_parse_firmware(dev, priv->firmware);
if (ret) {
- release_firmware(priv->firmware);
+ /* the firmware is released by the caller */
return ret;
}
@@ -659,6 +659,7 @@ static int p54spi_probe(struct spi_device *spi)
return 0;
err_free_common:
+ release_firmware(priv->firmware);
free_irq(gpio_to_irq(p54spi_gpio_irq), spi);
err_free_gpio_irq:
gpio_free(p54spi_gpio_irq);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 2f746eb64507..6e55f153ff26 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -65,6 +65,10 @@ static bool support_p2p_device = true;
module_param(support_p2p_device, bool, 0444);
MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
+static bool mlo;
+module_param(mlo, bool, 0444);
+MODULE_PARM_DESC(mlo, "Support MLO");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -224,6 +228,7 @@ static inline void hwsim_clear_magic(struct ieee80211_vif *vif)
struct hwsim_sta_priv {
u32 magic;
+ unsigned int last_link;
};
#define HWSIM_STA_MAGIC 0x6d537749
@@ -290,8 +295,7 @@ static inline int hwsim_net_set_netgroup(struct net *net)
{
struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
- hwsim_net->netgroup = ida_simple_get(&hwsim_netgroup_ida,
- 0, 0, GFP_KERNEL);
+ hwsim_net->netgroup = ida_alloc(&hwsim_netgroup_ida, GFP_KERNEL);
return hwsim_net->netgroup >= 0 ? 0 : -ENOMEM;
}
@@ -624,6 +628,12 @@ static struct platform_driver mac80211_hwsim_driver = {
},
};
+struct mac80211_hwsim_link_data {
+ u32 link_id;
+ u64 beacon_int /* beacon interval in us */;
+ struct hrtimer beacon_timer;
+};
+
struct mac80211_hwsim_data {
struct list_head list;
struct rhash_head rht;
@@ -669,18 +679,16 @@ struct mac80211_hwsim_data {
struct ieee80211_channel *channel;
enum nl80211_chan_width bw;
- u64 beacon_int /* beacon interval in us */;
unsigned int rx_filter;
bool started, idle, scanning;
struct mutex mutex;
- struct hrtimer beacon_timer;
enum ps_mode {
PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
} ps;
bool ps_poll_pending;
struct dentry *debugfs;
- uintptr_t pending_cookie;
+ atomic_t pending_cookie;
struct sk_buff_head pending; /* packets pending */
/*
* Only radios in the same group can communicate together (the
@@ -710,6 +718,8 @@ struct mac80211_hwsim_data {
/* RSSI in rx status of the receiver */
int rx_rssi;
+
+ struct mac80211_hwsim_link_data link_data[IEEE80211_MLD_MAX_NUM_LINKS];
};
static const struct rhashtable_params hwsim_rht_params = {
@@ -777,6 +787,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_PERM_ADDR] = NLA_POLICY_ETH_ADDR_COMPAT,
[HWSIM_ATTR_IFTYPE_SUPPORT] = { .type = NLA_U32 },
[HWSIM_ATTR_CIPHER_SUPPORT] = { .type = NLA_BINARY },
+ [HWSIM_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
};
#if IS_REACHABLE(CONFIG_VIRTIO)
@@ -889,7 +900,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
rcu_read_lock();
mac80211_hwsim_tx_frame(data->hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan);
rcu_read_unlock();
}
@@ -922,7 +933,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
rcu_read_lock();
mac80211_hwsim_tx_frame(data->hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan);
rcu_read_unlock();
}
@@ -1072,7 +1083,8 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
{
struct mac80211_hwsim_data *data = hw->priv;
u64 now = mac80211_hwsim_get_tsf(hw, vif);
- u32 bcn_int = data->beacon_int;
+ /* MLD not supported here */
+ u32 bcn_int = data->link_data[0].beacon_int;
u64 delta = abs(tsf - now);
/* adjust after beaconing with new timestamp at old TBTT */
@@ -1187,10 +1199,27 @@ struct mac80211_hwsim_addr_match_data {
static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ int i;
struct mac80211_hwsim_addr_match_data *md = data;
- if (memcmp(mac, md->addr, ETH_ALEN) == 0)
+ if (memcmp(mac, md->addr, ETH_ALEN) == 0) {
md->ret = true;
+ return;
+ }
+
+ /* Match the link address */
+ for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
+ struct ieee80211_bss_conf *conf;
+
+ conf = rcu_dereference(vif->link_conf[i]);
+ if (!conf)
+ continue;
+
+ if (memcmp(conf->addr, md->addr, ETH_ALEN) == 0) {
+ md->ret = true;
+ return;
+ }
+ }
}
static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
@@ -1416,8 +1445,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
/* We create a cookie to identify this skb */
- data->pending_cookie++;
- cookie = data->pending_cookie;
+ cookie = atomic_inc_return(&data->pending_cookie);
info->rate_driver_data[0] = (void *)cookie;
if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD))
goto nla_put_failure;
@@ -1464,15 +1492,26 @@ static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
struct ieee80211_vif *vif)
{
struct tx_iter_data *data = _data;
+ int i;
- if (!vif->chanctx_conf)
- return;
+ for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
+ struct ieee80211_bss_conf *conf;
+ struct ieee80211_chanctx_conf *chanctx;
- if (!hwsim_chans_compat(data->channel,
- rcu_dereference(vif->chanctx_conf)->def.chan))
- return;
+ conf = rcu_dereference(vif->link_conf[i]);
+ if (!conf)
+ continue;
+
+ chanctx = rcu_dereference(conf->chanctx_conf);
+ if (!chanctx)
+ continue;
- data->receive = true;
+ if (!hwsim_chans_compat(data->channel, chanctx->def.chan))
+ continue;
+
+ data->receive = true;
+ return;
+ }
}
static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb)
@@ -1662,6 +1701,51 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
return ack;
}
+static struct ieee80211_bss_conf *
+mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_link_sta **link_sta)
+{
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+ int i;
+
+ if (!vif->valid_links)
+ return &vif->bss_conf;
+
+ /* FIXME: handle multicast TX properly */
+ if (is_multicast_ether_addr(hdr->addr1) || WARN_ON_ONCE(!sta)) {
+ unsigned int first_link = ffs(vif->valid_links) - 1;
+
+ return rcu_dereference(vif->link_conf[first_link]);
+ }
+
+ if (WARN_ON_ONCE(!sta->valid_links))
+ return &vif->bss_conf;
+
+ for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
+ struct ieee80211_bss_conf *bss_conf;
+ unsigned int link_id;
+
+ /* round-robin the available link IDs */
+ link_id = (sp->last_link + i + 1) % ARRAY_SIZE(vif->link_conf);
+
+ *link_sta = rcu_dereference(sta->link[link_id]);
+ if (!*link_sta)
+ continue;
+
+ bss_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (WARN_ON_ONCE(!bss_conf))
+ continue;
+
+ sp->last_link = link_id;
+ return bss_conf;
+ }
+
+ return NULL;
+}
+
static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -1687,7 +1771,47 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
} else if (txi->hw_queue == 4) {
channel = data->tmp_chan;
} else {
- chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf);
+ u8 link = u32_get_bits(IEEE80211_SKB_CB(skb)->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+ struct ieee80211_vif *vif = txi->control.vif;
+ struct ieee80211_link_sta *link_sta = NULL;
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_bss_conf *bss_conf;
+
+ if (link != IEEE80211_LINK_UNSPECIFIED) {
+ bss_conf = rcu_dereference(txi->control.vif->link_conf[link]);
+ if (sta)
+ link_sta = rcu_dereference(sta->link[link]);
+ } else {
+ bss_conf = mac80211_hwsim_select_tx_link(data, vif, sta,
+ hdr, &link_sta);
+ }
+
+ if (WARN_ON(!bss_conf)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ if (sta && sta->mlo) {
+ if (WARN_ON(!link_sta)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ /* address translation to link addresses on TX */
+ ether_addr_copy(hdr->addr1, link_sta->addr);
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+ /* translate A3 only if it's the BSSID */
+ if (!ieee80211_has_tods(hdr->frame_control) &&
+ !ieee80211_has_fromds(hdr->frame_control)) {
+ if (ether_addr_equal(hdr->addr3, sta->addr))
+ ether_addr_copy(hdr->addr3, link_sta->addr);
+ else if (ether_addr_equal(hdr->addr3, vif->addr))
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+ /* no need to look at A4, if present it's SA */
+ }
+
+ chanctx_conf = rcu_dereference(bss_conf->chanctx_conf);
if (chanctx_conf) {
channel = chanctx_conf->def.chan;
confbw = chanctx_conf->def.width;
@@ -1795,9 +1919,12 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
+ int i;
data->started = false;
- hrtimer_cancel(&data->beacon_timer);
+
+ for (i = 0; i < ARRAY_SIZE(data->link_data); i++)
+ hrtimer_cancel(&data->link_data[i].beacon_timer);
while (!skb_queue_empty(&data->pending))
ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
@@ -1888,7 +2015,12 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_vif *vif)
{
- struct mac80211_hwsim_data *data = arg;
+ struct mac80211_hwsim_link_data *link_data = arg;
+ u32 link_id = link_data->link_id;
+ struct ieee80211_bss_conf *link_conf;
+ struct mac80211_hwsim_data *data =
+ container_of(link_data, struct mac80211_hwsim_data,
+ link_data[link_id]);
struct ieee80211_hw *hw = data->hw;
struct ieee80211_tx_info *info;
struct ieee80211_rate *txrate;
@@ -1899,13 +2031,17 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
hwsim_check_magic(vif);
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (!link_conf)
+ return;
+
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_MESH_POINT &&
vif->type != NL80211_IFTYPE_ADHOC &&
vif->type != NL80211_IFTYPE_OCB)
return;
- skb = ieee80211_beacon_get(hw, vif);
+ skb = ieee80211_beacon_get(hw, vif, link_data->link_id);
if (skb == NULL)
return;
info = IEEE80211_SKB_CB(skb);
@@ -1936,38 +2072,41 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
}
mac80211_hwsim_tx_frame(hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_dereference(link_conf->chanctx_conf)->def.chan);
while ((skb = ieee80211_get_buffered_bc(hw, vif)) != NULL) {
mac80211_hwsim_tx_frame(hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_dereference(link_conf->chanctx_conf)->def.chan);
}
- if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
+ if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
static enum hrtimer_restart
mac80211_hwsim_beacon(struct hrtimer *timer)
{
+ struct mac80211_hwsim_link_data *link_data =
+ container_of(timer, struct mac80211_hwsim_link_data, beacon_timer);
struct mac80211_hwsim_data *data =
- container_of(timer, struct mac80211_hwsim_data, beacon_timer);
+ container_of(link_data, struct mac80211_hwsim_data,
+ link_data[link_data->link_id]);
struct ieee80211_hw *hw = data->hw;
- u64 bcn_int = data->beacon_int;
+ u64 bcn_int = link_data->beacon_int;
if (!data->started)
return HRTIMER_NORESTART;
ieee80211_iterate_active_interfaces_atomic(
hw, IEEE80211_IFACE_ITER_NORMAL,
- mac80211_hwsim_beacon_tx, data);
+ mac80211_hwsim_beacon_tx, link_data);
/* beacon at new TBTT + beacon interval */
if (data->bcn_delta) {
bcn_int -= data->bcn_delta;
data->bcn_delta = 0;
}
- hrtimer_forward_now(&data->beacon_timer,
+ hrtimer_forward_now(&link_data->beacon_timer,
ns_to_ktime(bcn_int * NSEC_PER_USEC));
return HRTIMER_RESTART;
}
@@ -2051,16 +2190,21 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
}
mutex_unlock(&data->mutex);
- if (!data->started || !data->beacon_int)
- hrtimer_cancel(&data->beacon_timer);
- else if (!hrtimer_is_queued(&data->beacon_timer)) {
- u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
- u32 bcn_int = data->beacon_int;
- u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
+ for (idx = 0; idx < ARRAY_SIZE(data->link_data); idx++) {
+ struct mac80211_hwsim_link_data *link_data =
+ &data->link_data[idx];
- hrtimer_start(&data->beacon_timer,
- ns_to_ktime(until_tbtt * NSEC_PER_USEC),
- HRTIMER_MODE_REL_SOFT);
+ if (!data->started || !link_data->beacon_int) {
+ hrtimer_cancel(&link_data->beacon_timer);
+ } else if (!hrtimer_is_queued(&link_data->beacon_timer)) {
+ u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
+ u32 bcn_int = link_data->beacon_int;
+ u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
+
+ hrtimer_start(&link_data->beacon_timer,
+ ns_to_ktime(until_tbtt * NSEC_PER_USEC),
+ HRTIMER_MODE_REL_SOFT);
+ }
}
return 0;
@@ -2094,47 +2238,61 @@ static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac,
(*count)++;
}
-static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
+static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
- struct mac80211_hwsim_data *data = hw->priv;
hwsim_check_magic(vif);
- wiphy_dbg(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n",
+ wiphy_dbg(hw->wiphy, "%s(changed=0x%llx vif->addr=%pM)\n",
__func__, changed, vif->addr);
+ if (changed & BSS_CHANGED_ASSOC) {
+ wiphy_dbg(hw->wiphy, " ASSOC: assoc=%d aid=%d\n",
+ vif->cfg.assoc, vif->cfg.aid);
+ vp->assoc = vif->cfg.assoc;
+ vp->aid = vif->cfg.aid;
+ }
+}
+
+static void mac80211_hwsim_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+ struct mac80211_hwsim_data *data = hw->priv;
+ unsigned int link_id = info->link_id;
+ struct mac80211_hwsim_link_data *link_data = &data->link_data[link_id];
+
+ hwsim_check_magic(vif);
+
+ wiphy_dbg(hw->wiphy, "%s(changed=0x%llx vif->addr=%pM, link id %u)\n",
+ __func__, (unsigned long long)changed, vif->addr, link_id);
+
if (changed & BSS_CHANGED_BSSID) {
wiphy_dbg(hw->wiphy, "%s: BSSID changed: %pM\n",
__func__, info->bssid);
memcpy(vp->bssid, info->bssid, ETH_ALEN);
}
- if (changed & BSS_CHANGED_ASSOC) {
- wiphy_dbg(hw->wiphy, " ASSOC: assoc=%d aid=%d\n",
- info->assoc, info->aid);
- vp->assoc = info->assoc;
- vp->aid = info->aid;
- }
-
if (changed & BSS_CHANGED_BEACON_ENABLED) {
wiphy_dbg(hw->wiphy, " BCN EN: %d (BI=%u)\n",
info->enable_beacon, info->beacon_int);
vp->bcn_en = info->enable_beacon;
if (data->started &&
- !hrtimer_is_queued(&data->beacon_timer) &&
+ !hrtimer_is_queued(&link_data->beacon_timer) &&
info->enable_beacon) {
u64 tsf, until_tbtt;
u32 bcn_int;
- data->beacon_int = info->beacon_int * 1024;
+ link_data->beacon_int = info->beacon_int * 1024;
tsf = mac80211_hwsim_get_tsf(hw, vif);
- bcn_int = data->beacon_int;
+ bcn_int = link_data->beacon_int;
until_tbtt = bcn_int - do_div(tsf, bcn_int);
- hrtimer_start(&data->beacon_timer,
+ hrtimer_start(&link_data->beacon_timer,
ns_to_ktime(until_tbtt * NSEC_PER_USEC),
HRTIMER_MODE_REL_SOFT);
} else if (!info->enable_beacon) {
@@ -2145,8 +2303,8 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u",
count);
if (count == 0) {
- hrtimer_cancel(&data->beacon_timer);
- data->beacon_int = 0;
+ hrtimer_cancel(&link_data->beacon_timer);
+ link_data->beacon_int = 0;
}
}
}
@@ -2187,35 +2345,56 @@ mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
{
struct mac80211_hwsim_data *data = hw->priv;
u32 bw = U32_MAX;
- enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+ int link_id;
+
+ rcu_read_lock();
+ for (link_id = 0;
+ link_id < ARRAY_SIZE(vif->link_conf);
+ link_id++) {
+ enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+ struct ieee80211_bss_conf *vif_conf;
+ struct ieee80211_link_sta *link_sta;
+
+ link_sta = rcu_dereference(sta->link[link_id]);
- switch (sta->deflink.bandwidth) {
+ if (!link_sta)
+ continue;
+
+ switch (link_sta->bandwidth) {
#define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break
- C(20);
- C(40);
- C(80);
- C(160);
- C(320);
+ C(20);
+ C(40);
+ C(80);
+ C(160);
+ C(320);
#undef C
- }
+ }
- if (!data->use_chanctx) {
- confbw = data->bw;
- } else {
- struct ieee80211_chanctx_conf *chanctx_conf;
+ if (!data->use_chanctx) {
+ confbw = data->bw;
+ } else {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ vif_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (WARN_ON(!vif_conf))
+ continue;
+
+ chanctx_conf = rcu_dereference(vif_conf->chanctx_conf);
+
+ if (!WARN_ON(!chanctx_conf))
+ confbw = chanctx_conf->def.width;
+ }
+
+ WARN(bw > hwsim_get_chanwidth(confbw),
+ "intf %pM [link=%d]: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n",
+ vif->addr, link_id, sta->addr, bw, sta->deflink.bandwidth,
+ hwsim_get_chanwidth(data->bw), data->bw);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- if (!WARN_ON(!chanctx_conf))
- confbw = chanctx_conf->def.width;
- rcu_read_unlock();
}
+ rcu_read_unlock();
+
- WARN(bw > hwsim_get_chanwidth(confbw),
- "intf %pM: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n",
- vif->addr, sta->addr, bw, sta->deflink.bandwidth,
- hwsim_get_chanwidth(data->bw), data->bw);
}
static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
@@ -2239,6 +2418,21 @@ static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw,
return 0;
}
+static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ if (new_state == IEEE80211_STA_NOTEXIST)
+ return mac80211_hwsim_sta_remove(hw, vif, sta);
+
+ if (old_state == IEEE80211_STA_NOTEXIST)
+ return mac80211_hwsim_sta_add(hw, vif, sta);
+
+ return 0;
+}
+
static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
@@ -2265,10 +2459,10 @@ static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw,
return 0;
}
-static int mac80211_hwsim_conf_tx(
- struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
+static int mac80211_hwsim_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
wiphy_dbg(hw->wiphy,
"%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n",
@@ -2719,6 +2913,7 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
hwsim_check_magic(vif);
@@ -2729,6 +2924,7 @@ static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
hwsim_check_magic(vif);
@@ -2789,6 +2985,45 @@ static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw)
return 1;
}
+static int mac80211_hwsim_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mac80211_hwsim_change_vif_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ unsigned long rem = old_links & ~new_links ?: BIT(0);
+ unsigned long add = new_links & ~old_links;
+ int i;
+
+ for_each_set_bit(i, &rem, IEEE80211_MLD_MAX_NUM_LINKS)
+ mac80211_hwsim_config_mac_nl(hw, old[i]->addr, false);
+
+ for_each_set_bit(i, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf;
+
+ /* FIXME: figure out how to get the locking here */
+ link_conf = rcu_dereference_protected(vif->link_conf[i], 1);
+ if (WARN_ON(!link_conf))
+ continue;
+
+ mac80211_hwsim_config_mac_nl(hw, link_conf->addr, true);
+ }
+
+ return 0;
+}
+
+static int mac80211_hwsim_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ return 0;
+}
+
#define HWSIM_COMMON_OPS \
.tx = mac80211_hwsim_tx, \
.start = mac80211_hwsim_start, \
@@ -2798,43 +3033,58 @@ static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw)
.remove_interface = mac80211_hwsim_remove_interface, \
.config = mac80211_hwsim_config, \
.configure_filter = mac80211_hwsim_configure_filter, \
- .bss_info_changed = mac80211_hwsim_bss_info_changed, \
+ .vif_cfg_changed = mac80211_hwsim_vif_info_changed, \
+ .link_info_changed = mac80211_hwsim_link_info_changed, \
.tx_last_beacon = mac80211_hwsim_tx_last_beacon, \
- .sta_add = mac80211_hwsim_sta_add, \
- .sta_remove = mac80211_hwsim_sta_remove, \
.sta_notify = mac80211_hwsim_sta_notify, \
.sta_rc_update = mac80211_hwsim_sta_rc_update, \
- .set_tim = mac80211_hwsim_set_tim, \
.conf_tx = mac80211_hwsim_conf_tx, \
.get_survey = mac80211_hwsim_get_survey, \
CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) \
.ampdu_action = mac80211_hwsim_ampdu_action, \
.flush = mac80211_hwsim_flush, \
- .get_tsf = mac80211_hwsim_get_tsf, \
- .set_tsf = mac80211_hwsim_set_tsf, \
.get_et_sset_count = mac80211_hwsim_get_et_sset_count, \
.get_et_stats = mac80211_hwsim_get_et_stats, \
.get_et_strings = mac80211_hwsim_get_et_strings,
+#define HWSIM_NON_MLO_OPS \
+ .sta_add = mac80211_hwsim_sta_add, \
+ .sta_remove = mac80211_hwsim_sta_remove, \
+ .set_tim = mac80211_hwsim_set_tim, \
+ .get_tsf = mac80211_hwsim_get_tsf, \
+ .set_tsf = mac80211_hwsim_set_tsf,
+
static const struct ieee80211_ops mac80211_hwsim_ops = {
HWSIM_COMMON_OPS
+ HWSIM_NON_MLO_OPS
.sw_scan_start = mac80211_hwsim_sw_scan,
.sw_scan_complete = mac80211_hwsim_sw_scan_complete,
};
+#define HWSIM_CHANCTX_OPS \
+ .hw_scan = mac80211_hwsim_hw_scan, \
+ .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan, \
+ .remain_on_channel = mac80211_hwsim_roc, \
+ .cancel_remain_on_channel = mac80211_hwsim_croc, \
+ .add_chanctx = mac80211_hwsim_add_chanctx, \
+ .remove_chanctx = mac80211_hwsim_remove_chanctx, \
+ .change_chanctx = mac80211_hwsim_change_chanctx, \
+ .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,\
+ .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx,
+
static const struct ieee80211_ops mac80211_hwsim_mchan_ops = {
HWSIM_COMMON_OPS
- .hw_scan = mac80211_hwsim_hw_scan,
- .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan,
- .sw_scan_start = NULL,
- .sw_scan_complete = NULL,
- .remain_on_channel = mac80211_hwsim_roc,
- .cancel_remain_on_channel = mac80211_hwsim_croc,
- .add_chanctx = mac80211_hwsim_add_chanctx,
- .remove_chanctx = mac80211_hwsim_remove_chanctx,
- .change_chanctx = mac80211_hwsim_change_chanctx,
- .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,
- .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx,
+ HWSIM_NON_MLO_OPS
+ HWSIM_CHANCTX_OPS
+};
+
+static const struct ieee80211_ops mac80211_hwsim_mlo_ops = {
+ HWSIM_COMMON_OPS
+ HWSIM_CHANCTX_OPS
+ .set_rts_threshold = mac80211_hwsim_set_rts_threshold,
+ .change_vif_links = mac80211_hwsim_change_vif_links,
+ .change_sta_links = mac80211_hwsim_change_sta_links,
+ .sta_state = mac80211_hwsim_sta_state,
};
struct hwsim_new_radio_params {
@@ -2851,6 +3101,7 @@ struct hwsim_new_radio_params {
u32 iftypes;
u32 *ciphers;
u8 n_ciphers;
+ bool mlo;
};
static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
@@ -3005,7 +3256,7 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
.phy_cap_info[0] =
@@ -3158,7 +3409,7 @@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
.phy_cap_info[0] =
@@ -3340,7 +3591,7 @@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
.phy_cap_info[0] =
@@ -3544,7 +3795,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
idx = hwsim_radio_idx++;
spin_unlock_bh(&hwsim_radio_lock);
- if (param->use_chanctx)
+ if (param->mlo)
+ ops = &mac80211_hwsim_mlo_ops;
+ else if (param->use_chanctx)
ops = &mac80211_hwsim_mchan_ops;
hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname);
if (!hw) {
@@ -3705,13 +3958,22 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
- ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
- if (rctbl)
- ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ if (param->mlo) {
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ } else {
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ if (rctbl)
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ }
+
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
@@ -3856,9 +4118,13 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_SOFT);
- data->beacon_timer.function = mac80211_hwsim_beacon;
+ for (i = 0; i < ARRAY_SIZE(data->link_data); i++) {
+ hrtimer_init(&data->link_data[i].beacon_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_SOFT);
+ data->link_data[i].beacon_timer.function =
+ mac80211_hwsim_beacon;
+ data->link_data[i].link_id = i;
+ }
err = ieee80211_register_hw(hw);
if (err < 0) {
@@ -4080,6 +4346,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
const u8 *src;
unsigned int hwsim_flags;
int i;
+ unsigned long flags;
bool found = false;
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
@@ -4107,18 +4374,20 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
}
/* look for the skb matching the cookie passed back from user */
+ spin_lock_irqsave(&data2->pending.lock, flags);
skb_queue_walk_safe(&data2->pending, skb, tmp) {
- u64 skb_cookie;
+ uintptr_t skb_cookie;
txi = IEEE80211_SKB_CB(skb);
- skb_cookie = (u64)(uintptr_t)txi->rate_driver_data[0];
+ skb_cookie = (uintptr_t)txi->rate_driver_data[0];
if (skb_cookie == ret_skb_cookie) {
- skb_unlink(skb, &data2->pending);
+ __skb_unlink(skb, &data2->pending);
found = true;
break;
}
}
+ spin_unlock_irqrestore(&data2->pending.lock, flags);
/* not found */
if (!found)
@@ -4227,16 +4496,28 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
/* A frame is received from user space */
memset(&rx_status, 0, sizeof(rx_status));
if (info->attrs[HWSIM_ATTR_FREQ]) {
+ struct tx_iter_data iter_data = {};
+
/* throw away off-channel packets, but allow both the temporary
- * ("hw" scan/remain-on-channel) and regular channel, since the
- * internal datapath also allows this
+ * ("hw" scan/remain-on-channel), regular channels and links,
+ * since the internal datapath also allows this
*/
- mutex_lock(&data2->mutex);
rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]);
- if (rx_status.freq != channel->center_freq) {
- mutex_unlock(&data2->mutex);
+ iter_data.channel = ieee80211_get_channel(data2->hw->wiphy,
+ rx_status.freq);
+ if (!iter_data.channel)
goto out;
+
+ mutex_lock(&data2->mutex);
+ if (!hwsim_chans_compat(iter_data.channel, channel)) {
+ ieee80211_iterate_active_interfaces_atomic(
+ data2->hw, IEEE80211_IFACE_ITER_NORMAL,
+ mac80211_hwsim_tx_iter, &iter_data);
+ if (!iter_data.receive) {
+ mutex_unlock(&data2->mutex);
+ goto out;
+ }
}
mutex_unlock(&data2->mutex);
} else {
@@ -4426,6 +4707,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
}
}
+ param.mlo = info->attrs[HWSIM_ATTR_MLO_SUPPORT];
+
+ if (param.mlo)
+ param.use_chanctx = true;
+
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
@@ -4733,7 +5019,7 @@ static void __net_exit hwsim_exit_net(struct net *net)
NULL);
}
- ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
+ ida_free(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
}
static struct pernet_operations hwsim_net_ops = {
@@ -4912,6 +5198,8 @@ static int hwsim_virtio_probe(struct virtio_device *vdev)
if (err)
return err;
+ virtio_device_ready(vdev);
+
err = fill_vq(hwsim_vqs[HWSIM_VQ_RX]);
if (err)
goto out_remove;
@@ -5082,7 +5370,8 @@ static int __init init_mac80211_hwsim(void)
}
param.p2p_device = support_p2p_device;
- param.use_chanctx = channels > 1;
+ param.mlo = mlo;
+ param.use_chanctx = channels > 1 || mlo;
param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
if (param.p2p_device)
param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 9dceed77c5d6..527799b2de0f 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -3,7 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020, 2022 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -140,6 +140,8 @@ enum {
* @HWSIM_ATTR_PERM_ADDR: permanent mac address of new radio
* @HWSIM_ATTR_IFTYPE_SUPPORT: u32 attribute of supported interface types bits
* @HWSIM_ATTR_CIPHER_SUPPORT: u32 array of supported cipher types
+ * @HWSIM_ATTR_MLO_SUPPORT: claim MLO support (exact parameters TBD) for
+ * the new radio
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -170,6 +172,7 @@ enum {
HWSIM_ATTR_PERM_ADDR,
HWSIM_ATTR_IFTYPE_SUPPORT,
HWSIM_ATTR_CIPHER_SUPPORT,
+ HWSIM_ATTR_MLO_SUPPORT,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 5d6dc1dd050d..32fdc4150b60 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -287,6 +287,7 @@ static int if_usb_probe(struct usb_interface *intf,
return 0;
err_get_fw:
+ usb_put_dev(udev);
lbs_remove_card(priv);
err_add_card:
if_usb_reset_device(cardp);
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index a58c1e141f2c..90ffe8d1e0e8 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -109,9 +109,9 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
if (priv->mesh_dev) {
mesh_wdev = priv->mesh_dev->ieee80211_ptr;
- ie->val.mesh_id_len = mesh_wdev->mesh_id_up_len;
- memcpy(ie->val.mesh_id, mesh_wdev->ssid,
- mesh_wdev->mesh_id_up_len);
+ ie->val.mesh_id_len = mesh_wdev->u.mesh.id_up_len;
+ memcpy(ie->val.mesh_id, mesh_wdev->u.mesh.id,
+ mesh_wdev->u.mesh.id_up_len);
}
ie->len = sizeof(struct mrvl_meshie_val) -
@@ -986,8 +986,8 @@ static int lbs_add_mesh(struct lbs_private *priv)
mesh_wdev->wiphy = priv->wdev->wiphy;
if (priv->mesh_tlv) {
- sprintf(mesh_wdev->ssid, "mesh");
- mesh_wdev->mesh_id_up_len = 4;
+ sprintf(mesh_wdev->u.mesh.id, "mesh");
+ mesh_wdev->u.mesh.id_up_len = 4;
}
mesh_wdev->netdev = mesh_dev;
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 02a1e1f547d8..74c4942b9a5a 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -417,7 +417,7 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct lbtf_private *priv = hw->priv;
struct sk_buff *beacon;
@@ -427,7 +427,7 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
switch (priv->vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
- beacon = ieee80211_beacon_get(hw, vif);
+ beacon = ieee80211_beacon_get(hw, vif, 0);
if (beacon) {
lbtf_beacon_set(priv, beacon);
kfree_skb(beacon);
@@ -691,7 +691,7 @@ void lbtf_bcn_sent(struct lbtf_private *priv)
}
}
- skb = ieee80211_beacon_get(priv->hw, priv->vif);
+ skb = ieee80211_beacon_get(priv->hw, priv->vif, 0);
if (skb) {
lbtf_beacon_set(priv, skb);
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.c b/drivers/net/wireless/marvell/mwifiex/11ac.c
index 756f019ef28a..b9278d996c56 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.c
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11ac
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.h b/drivers/net/wireless/marvell/mwifiex/11ac.h
index 29e83468cf3f..65a88d6d8b88 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.h
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: 802.11ac
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_11AC_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 3fa25cd64cda..6a9d7bc1f41e 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11h
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
@@ -304,6 +292,6 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
mutex_lock(&priv->wdev.mtx);
- cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
+ cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0);
mutex_unlock(&priv->wdev.mtx);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 9ff2058bcd7e..4af57e6d4393 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11n
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index 83a88eecbda6..94b5e3e4ba08 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: 802.11n
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_11N_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index 46f41dbcf30d..34b4b34276d6 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11n Aggregation
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
index 382c1265c441..69b6888812f1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: 802.11n Aggregation
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_11N_AGGR_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 1046b59647f5..bd835288ce57 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
index 465f244b3636..c205a3bbc8b3 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_11N_RXREORDER_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/Makefile b/drivers/net/wireless/marvell/mwifiex/Makefile
index 2bd00f40958e..12d8affced18 100644
--- a/drivers/net/wireless/marvell/mwifiex/Makefile
+++ b/drivers/net/wireless/marvell/mwifiex/Makefile
@@ -1,18 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# Copyright 2011-2020 NXP
#
-# This software file (the "File") is distributed by NXP
-# under the terms of the GNU General Public License Version 2, June 1991
-# (the "License"). You may use, redistribute and/or modify this File in
-# accordance with the terms and conditions of the License, a copy of which
-# is available by writing to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
-# worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
-#
-# THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
-# ARE EXPRESSLY DISCLAIMED. The License provides additional details about
-# this warranty disclaimer.
mwifiex-y += main.o
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 6f23ec34e2e2..134114ac1ac0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: CFG80211
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "cfg80211.h"
@@ -1753,10 +1741,12 @@ mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES] = {
* Function configures data rates to firmware using bitrate mask
* provided by cfg80211.
*/
-static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
- struct net_device *dev,
- const u8 *peer,
- const struct cfg80211_bitrate_mask *mask)
+static int
+mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
+ struct net_device *dev,
+ unsigned int link_id,
+ const u8 *peer,
+ const struct cfg80211_bitrate_mask *mask)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
@@ -1998,7 +1988,8 @@ mwifiex_cfg80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
/* cfg80211 operation handler for stop ap.
* Function stops BSS running at uAP interface.
*/
-static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -2421,7 +2412,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
}
- if (priv->wdev.current_bss) {
+ if (priv->wdev.connected) {
mwifiex_dbg(adapter, ERROR,
"%s: already connected\n", dev->name);
return -EALREADY;
@@ -2649,7 +2640,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
- if (!priv->wdev.current_bss && priv->scan_block)
+ if (!priv->wdev.connected && priv->scan_block)
priv->scan_block = false;
if (!mwifiex_stop_bg_scan(priv))
@@ -4025,6 +4016,7 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.h b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
index 530a63f13f14..50f7001f5ef0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: CFG80211
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef __MWIFIEX_CFG80211__
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index fb91ecfc5546..d39092b99212 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: Channel, Frequence and Power
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index d6a61f850c6f..d3339d67e7a0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: commands and events
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index dded92db1f37..bda53cb91f37 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: debugfs
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/debugfs.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h
index 6bd23c9b1eed..88648c062713 100644
--- a/drivers/net/wireless/marvell/mwifiex/decl.h
+++ b/drivers/net/wireless/marvell/mwifiex/decl.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: generic data structures and APIs
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_DECL_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/ethtool.c b/drivers/net/wireless/marvell/mwifiex/ethtool.c
index 9bdad3f59039..17c6e7fedfc4 100644
--- a/drivers/net/wireless/marvell/mwifiex/ethtool.c
+++ b/drivers/net/wireless/marvell/mwifiex/ethtool.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: ethtool
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 63c25c69ed2b..26a48d8f49be 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: Firmware specific macros & structures
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_FW_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 40e99eaf5a30..26694cee15d3 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: management IE handling- setting and
* deleting IE.
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 88c72d1827a0..fc77489cc511 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: HW/FW Initialization
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 3db449efa167..091e7ca79376 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: ioctl data structures & APIs
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_IOCTL_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 173ccf79cbfc..a6e254a1185c 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: association and ad-hoc start/join
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index ace7371c4773..da2e6557e684 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: major functions
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/suspend.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 332dd1c8db35..87729d251fed 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: major data structures and prototypes
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_MAIN_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index d5fb29400bad..f7f9277602a5 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: PCIE specific handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/iopoll.h>
@@ -3373,7 +3361,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
} else {
mwifiex_dbg(adapter, INFO,
"%s(): calling free_irq()\n", __func__);
- free_irq(card->dev->irq, &card->share_irq_ctx);
+ free_irq(card->dev->irq, &card->share_irq_ctx);
if (card->msi_enable)
pci_disable_msi(pdev);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index 981e330c77d7..de901b3b59ad 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* @file mwifiex_pcie.h
*
* @brief This file contains definitions for PCI-E interface.
* driver.
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_PCIE_H
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
index 0234cf3c2974..dd6d21f1dbfd 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
@@ -1,19 +1,5 @@
-/*
- * NXP Wireless LAN device driver: PCIE and platform specific quirks
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// NXP Wireless LAN device driver: PCIE and platform specific quirks
#include <linux/dmi.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
index 8ec4176d698f..d6ff964aec5b 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
@@ -1,19 +1,5 @@
-/*
- * NXP Wireless LAN device driver: PCIE and platform specific quirks
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* NXP Wireless LAN device driver: PCIE and platform specific quirks */
#include "pcie.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 0b877f3f6b97..ac8001c84293 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: scan ioctl and command handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 76004bda0c02..b8dc3b5c9ad9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: SDIO specific handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/firmware.h>
@@ -1549,7 +1537,7 @@ done:
/*
* This function decode sdio aggreation pkt.
*
- * Based on the the data block size and pkt_len,
+ * Based on the data block size and pkt_len,
* skb data will be decoded to few packets.
*/
static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 28e8f76bdd58..3a24bb48b299 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: SDIO specific definitions
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_SDIO_H
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 1e2798dce18f..512b5bb9cf6f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station command handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
@@ -1790,29 +1778,31 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
wmm_qos_info->qos_info = 0;
config_len += sizeof(struct mwifiex_ie_types_qos_info);
- if (params->ht_capa) {
+ if (params->link_sta_params.ht_capa) {
ht_capab = (struct mwifiex_ie_types_htcap *)(pos +
config_len);
ht_capab->header.type =
cpu_to_le16(WLAN_EID_HT_CAPABILITY);
ht_capab->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
- memcpy(&ht_capab->ht_cap, params->ht_capa,
+ memcpy(&ht_capab->ht_cap, params->link_sta_params.ht_capa,
sizeof(struct ieee80211_ht_cap));
config_len += sizeof(struct mwifiex_ie_types_htcap);
}
- if (params->supported_rates && params->supported_rates_len) {
+ if (params->link_sta_params.supported_rates &&
+ params->link_sta_params.supported_rates_len) {
tlv_rates = (struct host_cmd_tlv_rates *)(pos +
config_len);
tlv_rates->header.type =
cpu_to_le16(WLAN_EID_SUPP_RATES);
tlv_rates->header.len =
- cpu_to_le16(params->supported_rates_len);
- memcpy(tlv_rates->rates, params->supported_rates,
- params->supported_rates_len);
+ cpu_to_le16(params->link_sta_params.supported_rates_len);
+ memcpy(tlv_rates->rates,
+ params->link_sta_params.supported_rates,
+ params->link_sta_params.supported_rates_len);
config_len += sizeof(struct host_cmd_tlv_rates) +
- params->supported_rates_len;
+ params->link_sta_params.supported_rates_len;
}
if (params->ext_capab && params->ext_capab_len) {
@@ -1826,14 +1816,14 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
config_len += sizeof(struct mwifiex_ie_types_extcap) +
params->ext_capab_len;
}
- if (params->vht_capa) {
+ if (params->link_sta_params.vht_capa) {
vht_capab = (struct mwifiex_ie_types_vhtcap *)(pos +
config_len);
vht_capab->header.type =
cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
vht_capab->header.len =
cpu_to_le16(sizeof(struct ieee80211_vht_cap));
- memcpy(&vht_capab->vht_cap, params->vht_capa,
+ memcpy(&vht_capab->vht_cap, params->link_sta_params.vht_capa,
sizeof(struct ieee80211_vht_cap));
config_len += sizeof(struct mwifiex_ie_types_vhtcap);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 1a4ae8a42a31..7b69d27e0c0e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station command response handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 7d42c5d2dbf6..b95e90a7d124 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station event handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 4062e515697a..a2ad2b53f016 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: functions for station ioctl
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 0d2adf887900..13659b02ba88 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station RX data handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <uapi/linux/ipv6.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index a9b5eb992220..13c0e67ededf 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station TX data handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index a8479b879382..54c204608dab 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: generic TX/RX data handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 630e1679c3f9..e78a201cd150 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: AP specific command handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 2e25d72dcac5..58ef5020a46a 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: AP event handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 4e49ed21c5ce..e495f7eaea03 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: AP TX and RX data handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 8f01fcbe9396..c2f2ce2a3f95 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: USB specific handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index 61a96b7fbf21..7e920b51994c 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This file contains definitions for mwifiex USB interface driver.
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_USB_H
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index d5edb1e89f5b..94c2d219835d 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: utility functions
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h
index 44aa80eb7827..4699c505c0a0 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.h
+++ b/drivers/net/wireless/marvell/mwifiex/util.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: utility functions
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_UTIL_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 0b375608df7d..00a5679b5c51 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: WMM
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.h b/drivers/net/wireless/marvell/mwifiex/wmm.h
index 1cb3d1804758..4f53a271dae0 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.h
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: WMM
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_WMM_H_
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 36c24d17136c..4dc7e2e53b81 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -1880,7 +1880,7 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
* packets ever exceeds the ampdu_min_traffic threshold, we will allow
* an ampdu stream to be started.
*/
- if (jiffies - tx_stats->start_time > HZ) {
+ if (time_after(jiffies, (unsigned long)tx_stats->start_time + HZ)) {
tx_stats->pkts = 0;
tx_stats->start_time = 0;
} else
@@ -3250,7 +3250,7 @@ mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->aid = cpu_to_le16(vif->bss_conf.aid);
+ cmd->aid = cpu_to_le16(vif->cfg.aid);
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
if (vif->bss_conf.use_cts_prot) {
@@ -5013,13 +5013,13 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/*
* No need to capture a beacon if we're no longer associated.
*/
- if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
+ if ((changed & BSS_CHANGED_ASSOC) && !vif->cfg.assoc)
priv->capture_beacon = false;
/*
* Get the AP's legacy and MCS rates.
*/
- if (vif->bss_conf.assoc) {
+ if (vif->cfg.assoc) {
struct ieee80211_sta *ap;
rcu_read_lock();
@@ -5085,7 +5085,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- if (vif->bss_conf.assoc && !priv->ap_fw &&
+ if (vif->cfg.assoc && !priv->ap_fw &&
(changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_HT))) {
rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
@@ -5093,7 +5093,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- if (vif->bss_conf.assoc &&
+ if (vif->cfg.assoc &&
(changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
/*
* Finalize the join. Tell rx handler to process
@@ -5147,7 +5147,7 @@ mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
struct sk_buff *skb;
- skb = ieee80211_beacon_get(hw, vif);
+ skb = ieee80211_beacon_get(hw, vif, 0);
if (skb != NULL) {
mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
kfree_skb(skb);
@@ -5163,7 +5163,7 @@ out:
static void
mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+ struct ieee80211_bss_conf *info, u64 changed)
{
if (vif->type == NL80211_IFTYPE_STATION)
mwl8k_bss_info_changed_sta(hw, vif, info, changed);
@@ -5365,7 +5365,8 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mwl8k_priv *priv = hw->priv;
@@ -6050,7 +6051,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
goto fail;
for (i = 0; i < MWL8K_TX_WMM_QUEUES; i++) {
- rc = mwl8k_conf_tx(hw, NULL, i, &priv->wmm_params[i]);
+ rc = mwl8k_conf_tx(hw, NULL, 0, i, &priv->wmm_params[i]);
if (rc)
goto fail;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 30de8be4aac1..40cb91097b2e 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -349,8 +349,8 @@ error:
static int
mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+ enum mt76_txq_id qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct ieee80211_tx_status status = {
.sta = sta,
@@ -406,7 +406,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
+ ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
@@ -791,10 +791,15 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_worker_disable(&dev->tx_worker);
netif_napi_del(&dev->tx_napi);
- for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
- mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
- if (dev->phy2)
- mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
+ struct mt76_phy *phy = dev->phys[i];
+ int j;
+
+ if (!phy)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
+ mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
}
for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index a499861918fa..9bc8758573fc 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -162,10 +162,13 @@ mt76_find_power_limits_node(struct mt76_dev *dev)
}
if (mt76_string_prop_find(country, dev->alpha2) ||
- mt76_string_prop_find(regd, region_name))
+ mt76_string_prop_find(regd, region_name)) {
+ of_node_put(np);
return cur;
+ }
}
+ of_node_put(np);
return fallback;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 18b5de55334c..253cbc1956d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -178,6 +178,12 @@ static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
{ .start_freq = 5350, .end_freq = 5470, },
{ .start_freq = 5470, .end_freq = 5725, },
{ .start_freq = 5725, .end_freq = 5950, },
+ { .start_freq = 5945, .end_freq = 6165, },
+ { .start_freq = 6165, .end_freq = 6405, },
+ { .start_freq = 6405, .end_freq = 6525, },
+ { .start_freq = 6525, .end_freq = 6705, },
+ { .start_freq = 6705, .end_freq = 6865, },
+ { .start_freq = 6865, .end_freq = 7125, },
};
static const struct cfg80211_sar_capa mt76_sar_capa = {
@@ -210,6 +216,7 @@ static int mt76_led_init(struct mt76_dev *dev)
if (!of_property_read_u32(np, "led-sources", &led_pin))
dev->led_pin = led_pin;
dev->led_al = of_property_read_bool(np, "led-active-low");
+ of_node_put(np);
}
return led_classdev_register(dev->dev, &dev->led_cdev);
@@ -260,6 +267,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
}
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
}
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
@@ -444,7 +453,7 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
struct mt76_phy *
mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
- const struct ieee80211_ops *ops)
+ const struct ieee80211_ops *ops, u8 band_idx)
{
struct ieee80211_hw *hw;
unsigned int phy_size;
@@ -459,6 +468,7 @@ mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
phy->dev = dev;
phy->hw = hw;
phy->priv = hw->priv + phy_size;
+ phy->band_idx = band_idx;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->interface_modes =
@@ -511,7 +521,7 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
if (ret)
return ret;
- phy->dev->phy2 = phy;
+ phy->dev->phys[phy->band_idx] = phy;
return 0;
}
@@ -523,7 +533,7 @@ void mt76_unregister_phy(struct mt76_phy *phy)
mt76_tx_status_check(dev, true);
ieee80211_unregister_hw(phy->hw);
- dev->phy2 = NULL;
+ dev->phys[phy->band_idx] = NULL;
}
EXPORT_SYMBOL_GPL(mt76_unregister_phy);
@@ -550,6 +560,8 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
phy = &dev->phy;
phy->dev = dev;
phy->hw = hw;
+ phy->band_idx = MT_BAND0;
+ dev->phys[phy->band_idx] = phy;
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
@@ -731,7 +743,7 @@ static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
+ struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
dev_kfree_skb(skb);
@@ -1007,10 +1019,10 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
sizeof(mstat.chain_signal));
*sta = wcid_to_sta(mstat.wcid);
- *hw = mt76_phy_hw(dev, mstat.ext_phy);
+ *hw = mt76_phy_hw(dev, mstat.phy_idx);
}
-static int
+static void
mt76_check_ccmp_pn(struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -1020,10 +1032,13 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
int ret;
if (!(status->flag & RX_FLAG_DECRYPTED))
- return 0;
+ return;
+
+ if (status->flag & RX_FLAG_ONLY_MONITOR)
+ return;
if (!wcid || !wcid->rx_check_pn)
- return 0;
+ return;
security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
if (status->flag & RX_FLAG_8023)
@@ -1037,7 +1052,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
*/
if (ieee80211_is_frag(hdr) &&
!ieee80211_is_first_frag(hdr->frame_control))
- return 0;
+ return;
}
/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
@@ -1054,15 +1069,15 @@ skip_hdr_check:
BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
sizeof(status->iv));
- if (ret <= 0)
- return -EINVAL; /* replay */
+ if (ret <= 0) {
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+ return;
+ }
memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
if (status->flag & RX_FLAG_IV_STRIPPED)
status->flag |= RX_FLAG_PN_VALIDATED;
-
- return 0;
}
static void
@@ -1167,7 +1182,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
bool ps;
- hw = mt76_phy_hw(dev, status->ext_phy);
+ hw = mt76_phy_hw(dev, status->phy_idx);
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
!(status->flag & RX_FLAG_8023)) {
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
@@ -1235,11 +1250,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
while ((skb = __skb_dequeue(frames)) != NULL) {
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
- if (mt76_check_ccmp_pn(skb)) {
- dev_kfree_skb(skb);
- continue;
- }
-
+ mt76_check_ccmp_pn(skb);
skb_shinfo(skb)->frag_list = NULL;
mt76_rx_convert(dev, skb, &hw, &sta);
ieee80211_rx_list(hw, sta, skb, &list);
@@ -1285,10 +1296,11 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
static int
-mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool ext_phy)
+mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct mt76_dev *dev = phy->dev;
int ret;
int i;
@@ -1309,9 +1321,9 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
}
ewma_signal_init(&wcid->rssi);
- if (ext_phy)
+ if (phy->band_idx == MT_BAND1)
mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
- wcid->ext_phy = ext_phy;
+ wcid->phy_idx = phy->band_idx;
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
mt76_packet_id_init(wcid);
@@ -1356,11 +1368,10 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
- return mt76_sta_add(dev, vif, sta, ext_phy);
+ return mt76_sta_add(phy, vif, sta);
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
@@ -1459,7 +1470,7 @@ EXPORT_SYMBOL_GPL(mt76_get_sar_power);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
+ if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
@@ -1481,7 +1492,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt76_dev *dev = priv;
- if (!vif->csa_active)
+ if (!vif->bss_conf.csa_active)
return;
dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index 914ee278e6e2..a8cafa39a56d 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -7,17 +7,19 @@
struct sk_buff *
__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
- int data_len, gfp_t gfp)
+ int len, int data_len, gfp_t gfp)
{
const struct mt76_mcu_ops *ops = dev->mcu_ops;
- int length = ops->headroom + data_len + ops->tailroom;
struct sk_buff *skb;
- skb = alloc_skb(length, gfp);
+ len = max_t(int, len, data_len);
+ len = ops->headroom + len + ops->tailroom;
+
+ skb = alloc_skb(len, gfp);
if (!skb)
return NULL;
- memset(skb->head, 0, length);
+ memset(skb->head, 0, len);
skb_reserve(skb, ops->headroom);
if (data && data_len)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 4e8997c45c1b..4da77d47b0a6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -99,12 +99,21 @@ enum mt76_rxq_id {
MT_RXQ_MAIN,
MT_RXQ_MCU,
MT_RXQ_MCU_WA,
- MT_RXQ_EXT,
- MT_RXQ_EXT_WA,
+ MT_RXQ_BAND1,
+ MT_RXQ_BAND1_WA,
MT_RXQ_MAIN_WA,
+ MT_RXQ_BAND2,
+ MT_RXQ_BAND2_WA,
__MT_RXQ_MAX
};
+enum mt76_band_id {
+ MT_BAND0,
+ MT_BAND1,
+ MT_BAND2,
+ __MT_MAX_BAND
+};
+
enum mt76_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
@@ -185,7 +194,6 @@ struct mt76_queue {
u8 buf_offset;
u8 hw_idx;
- u8 qid;
u8 flags;
u32 wed_regs;
@@ -223,8 +231,8 @@ struct mt76_queue_ops {
u32 ring_base);
int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+ enum mt76_txq_id qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta);
int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, u32 tx_info);
@@ -254,7 +262,7 @@ enum mt76_wcid_flags {
#define MT76_N_WCIDS 544
/* stored in ieee80211_tx_info::hw_queue */
-#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
+#define MT_TX_HW_QUEUE_PHY GENMASK(3, 2)
DECLARE_EWMA(signal, 10, 8);
@@ -279,8 +287,8 @@ struct mt76_wcid {
u8 hw_key_idx2;
u8 sta:1;
- u8 ext_phy:1;
u8 amsdu:1;
+ u8 phy_idx:2;
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6];
@@ -573,7 +581,7 @@ struct mt76_rx_status {
u8 iv[6];
- u8 ext_phy:1;
+ u8 phy_idx:2;
u8 aggr:1;
u8 qos_ctl;
u16 seqno;
@@ -660,6 +668,7 @@ struct mt76_phy {
void *priv;
unsigned long state;
+ u8 band_idx;
struct mt76_queue *q_tx[__MT_TXQ_MAX];
@@ -699,8 +708,7 @@ struct mt76_phy {
struct mt76_dev {
struct mt76_phy phy; /* must be first */
-
- struct mt76_phy *phy2;
+ struct mt76_phy *phys[__MT_MAX_BAND];
struct ieee80211_hw *hw;
@@ -885,16 +893,6 @@ extern struct ieee80211_rate mt76_rates[12];
#define mt76_hw(dev) (dev)->mphy.hw
-static inline struct ieee80211_hw *
-mt76_wcid_hw(struct mt76_dev *dev, u16 wcid)
-{
- if (wcid <= MT76_N_WCIDS &&
- mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
- return dev->phy2->hw;
-
- return dev->phy.hw;
-}
-
bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
int timeout);
@@ -945,7 +943,8 @@ void mt76_free_device(struct mt76_dev *dev);
void mt76_unregister_phy(struct mt76_phy *phy);
struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
- const struct ieee80211_ops *ops);
+ const struct ieee80211_ops *ops,
+ u8 band_idx);
int mt76_register_phy(struct mt76_phy *phy, bool vht,
struct ieee80211_rate *rates, int n_rates);
@@ -977,7 +976,6 @@ static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
if (IS_ERR(q))
return PTR_ERR(q);
- q->qid = qid;
phy->q_tx[qid] = q;
return 0;
@@ -992,24 +990,25 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
if (IS_ERR(q))
return PTR_ERR(q);
- q->qid = __MT_TXQ_MAX + qid;
dev->q_mcu[qid] = q;
return 0;
}
static inline struct mt76_phy *
-mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
+mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx)
{
- if (phy_ext && dev->phy2)
- return dev->phy2;
+ if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) ||
+ (phy_idx == MT_BAND2 && dev->phys[phy_idx]))
+ return dev->phys[phy_idx];
+
return &dev->phy;
}
static inline struct ieee80211_hw *
-mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
+mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx)
{
- return mt76_dev_phy(dev, phy_ext)->hw;
+ return mt76_dev_phy(dev, phy_idx)->hw;
}
static inline u8 *
@@ -1120,13 +1119,17 @@ static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
struct ieee80211_hw **hw)
{
#ifdef CONFIG_NL80211_TESTMODE
- if (skb == dev->phy.test.tx_skb)
- *hw = dev->phy.hw;
- else if (dev->phy2 && skb == dev->phy2->test.tx_skb)
- *hw = dev->phy2->hw;
- else
- return false;
- return true;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
+ struct mt76_phy *phy = dev->phys[i];
+
+ if (phy && skb == phy->test.tx_skb) {
+ *hw = dev->phys[i]->hw;
+ return true;
+ }
+ }
+ return false;
#else
return false;
#endif
@@ -1242,12 +1245,10 @@ static inline struct ieee80211_hw *
mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hw *hw = dev->phy.hw;
-
- if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
- hw = dev->phy2->hw;
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+ struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx);
- info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
+ info->hw_queue &= ~MT_TX_HW_QUEUE_PHY;
return hw;
}
@@ -1346,12 +1347,12 @@ int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
struct sk_buff *
__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
- int data_len, gfp_t gfp);
+ int len, int data_len, gfp_t gfp);
static inline struct sk_buff *
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
int data_len)
{
- return __mt76_mcu_msg_alloc(dev, data, data_len, GFP_KERNEL);
+ return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL);
}
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index b5e8308e0cc7..b65b0a88c1de 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -20,12 +20,12 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!(mdev->beacon_mask & BIT(mvif->idx)))
return;
- skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif, 0);
if (!skb)
return;
- mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], skb,
- &mvif->sta.wcid, NULL);
+ mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
+ MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
spin_lock_bh(&dev->ps_lock);
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
@@ -123,7 +123,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
- mt76_tx_queue_skb(dev, q, skb, &mvif->sta.wcid, NULL);
+ mt76_tx_queue_skb(dev, q, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
}
mt76_queue_kick(dev, q);
spin_unlock(&q->lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 91425b454cae..051715ed90dd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -297,7 +297,7 @@ mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
static void
mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+ struct ieee80211_bss_conf *info, u64 changed)
{
struct mt7603_dev *dev = hw->priv;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
@@ -305,7 +305,7 @@ mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mt76.mutex);
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID)) {
- if (info->assoc || info->ibss_joined) {
+ if (vif->cfg.assoc || vif->cfg.ibss_joined) {
mt76_wr(dev, MT_BSSID0(mvif->idx),
get_unaligned_le32(info->bssid));
mt76_wr(dev, MT_BSSID1(mvif->idx),
@@ -527,7 +527,8 @@ mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
static int
-mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7603_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index ce19f57de475..f1914431ff7f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -44,7 +44,7 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
static int
mt7615_init_tx_queues(struct mt7615_dev *dev)
{
- int ret, i;
+ int ret;
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7615_TXQ_FWDL,
MT7615_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
@@ -54,14 +54,11 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
if (!is_mt7615(&dev->mt76))
return mt7622_init_tx_queues_multi(dev);
- ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE,
- MT_TX_RING_BASE, 0);
+ ret = mt76_connac_init_tx_queues(&dev->mphy, 0, MT7615_TX_RING_SIZE,
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
- for (i = 1; i <= MT_TXQ_PSD ; i++)
- dev->mphy.q_tx[i] = dev->mphy.q_tx[0];
-
return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7615_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index a06dcbb8c673..07a1fea94f66 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -401,6 +401,7 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
if (is_mt7615(&phy->dev->mt76))
hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
@@ -458,7 +459,7 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
return 0;
mt7615_cap_dbdc_enable(dev);
- mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops, MT_BAND1);
if (!mphy)
return -ENOMEM;
@@ -508,7 +509,7 @@ EXPORT_SYMBOL_GPL(mt7615_register_ext_phy);
void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
{
struct mt7615_phy *phy = mt7615_ext_phy(dev);
- struct mt76_phy *mphy = dev->mt76.phy2;
+ struct mt76_phy *mphy = dev->mt76.phys[MT_BAND1];
if (!phy)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index bd687f7de628..ad6c7d632eed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -109,6 +109,7 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
void mt7615_mac_reset_counters(struct mt7615_dev *dev)
{
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
int i;
for (i = 0; i < 4; i++) {
@@ -118,8 +119,8 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev)
memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
dev->mt76.phy.survey_time = ktime_get_boottime();
- if (dev->mt76.phy2)
- dev->mt76.phy2->survey_time = ktime_get_boottime();
+ if (mphy_ext)
+ mphy_ext->survey_time = ktime_get_boottime();
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
@@ -336,9 +337,9 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7615_phy *phy = &dev->phy;
- struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
+ struct mt7615_phy *phy2;
__le32 *rxd = (__le32 *)skb->data;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
@@ -355,6 +356,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
memset(status, 0, sizeof(*status));
chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+
+ phy2 = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
if (!phy2)
phy_idx = 0;
else if (phy2->chfreq == phy->chfreq)
@@ -498,9 +501,9 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
}
if (phy_idx == 1 && phy2) {
- mphy = dev->mt76.phy2;
+ mphy = dev->mt76.phys[MT_BAND1];
phy = phy2;
- status->ext_phy = true;
+ status->phy_idx = phy_idx;
}
if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq)
@@ -717,13 +720,14 @@ mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
- struct ieee80211_key_conf *key, bool beacon)
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, bool beacon)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
- bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
bool multicast = is_multicast_ether_addr(hdr->addr1);
struct ieee80211_vif *vif = info->control.vif;
bool is_mmio = mt76_is_mmio(&dev->mt76);
@@ -746,18 +750,18 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
tx_count = msta->rate_count;
}
- if (ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (phy_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
if (beacon) {
p_fmt = MT_TX_TYPE_FW;
- q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
- } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
+ q_idx = phy_idx ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
+ } else if (qid >= MT_TXQ_PSD) {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
- q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
+ q_idx = phy_idx ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
} else {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
@@ -876,60 +880,6 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi);
-static void
-mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
-{
- int i;
-
- for (i = 0; i < txp->nbuf; i++)
- dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
- le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
-}
-
-static void
-mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
-{
- u32 last_mask;
- int i;
-
- last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST;
-
- for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
- struct mt7615_txp_ptr *ptr = &txp->ptr[i];
- bool last;
- u16 len;
-
- len = le16_to_cpu(ptr->len0);
- last = len & last_mask;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
- DMA_TO_DEVICE);
- if (last)
- break;
-
- len = le16_to_cpu(ptr->len1);
- last = len & last_mask;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
- DMA_TO_DEVICE);
- if (last)
- break;
- }
-}
-
-void mt7615_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *t)
-{
- struct mt7615_txp_common *txp;
-
- txp = mt7615_txwi_to_txp(dev, t);
- if (is_mt7615(dev))
- mt7615_txp_skb_unmap_fw(dev, &txp->fw);
- else
- mt7615_txp_skb_unmap_hw(dev, &txp->hw);
-}
-EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap);
-
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
{
mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
@@ -1439,8 +1389,8 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
if (sta->rate_probe) {
struct mt7615_phy *phy = &dev->phy;
- if (sta->wcid.ext_phy && dev->mt76.phy2)
- phy = dev->mt76.phy2->priv;
+ if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
+ phy = dev->mt76.phys[MT_BAND1]->priv;
mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
}
@@ -1482,8 +1432,8 @@ out:
fallthrough;
case MT_PHY_TYPE_OFDM:
mphy = &dev->mphy;
- if (sta->wcid.ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
@@ -1590,8 +1540,8 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
if (wcidx >= MT7615_WTBL_STA || !sta)
goto out;
- if (wcid->ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (wcid->phy_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
if (mt7615_fill_txs(dev, msta, &info, txs_data))
ieee80211_tx_status_noskb(mphy->hw, sta, &info);
@@ -1608,7 +1558,7 @@ mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi)
u32 val;
u8 wcid;
- mt7615_txp_skb_unmap(mdev, txwi);
+ mt76_connac_txp_skb_unmap(mdev, txwi);
if (!txwi->skb)
goto out;
@@ -1638,7 +1588,8 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
{
- struct mt7615_tx_free *free = (struct mt7615_tx_free *)data;
+ struct mt76_connac_tx_free *free = data;
+ void *tx_token = data + sizeof(*free);
void *end = data + len;
u8 i, count;
@@ -1652,7 +1603,7 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT);
if (is_mt7615(&dev->mt76)) {
- __le16 *token = &free->token[0];
+ __le16 *token = tx_token;
if (WARN_ON_ONCE((void *)&token[count] > end))
return;
@@ -1660,7 +1611,7 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
} else {
- __le32 *token = (__le32 *)&free->token[0];
+ __le32 *token = tx_token;
if (WARN_ON_ONCE((void *)&token[count] > end))
return;
@@ -2007,6 +1958,7 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
static void mt7615_update_survey(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
ktime_t cur_time;
/* MT7615 can only update both phys simultaneously
@@ -2014,14 +1966,14 @@ static void mt7615_update_survey(struct mt7615_dev *dev)
*/
mt7615_phy_update_channel(&mdev->phy, 0);
- if (mdev->phy2)
- mt7615_phy_update_channel(mdev->phy2, 1);
+ if (mphy_ext)
+ mt7615_phy_update_channel(mphy_ext, 1);
cur_time = ktime_get_boottime();
mt76_update_survey_active_time(&mdev->phy, cur_time);
- if (mdev->phy2)
- mt76_update_survey_active_time(mdev->phy2, cur_time);
+ if (mphy_ext)
+ mt76_update_survey_active_time(mphy_ext, cur_time);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
@@ -2094,8 +2046,10 @@ void mt7615_pm_wake_work(struct work_struct *work)
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
+ local_bh_disable();
mt76_for_each_q_rx(mdev, i)
napi_schedule(&mdev->napi[i]);
+ local_bh_enable();
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
false);
@@ -2282,6 +2236,7 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
enum mt76_dfs_state dfs_state, prev_state;
@@ -2292,13 +2247,13 @@ int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
prev_state = phy->mt76->dfs_state;
dfs_state = mt76_phy_dfs_state(phy->mt76);
+ if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ dfs_state < MT_DFS_STATE_CAC)
+ dfs_state = MT_DFS_STATE_ACTIVE;
if (prev_state == dfs_state)
return 0;
- if (prev_state == MT_DFS_STATE_UNKNOWN)
- mt7615_dfs_stop_radar_detector(phy);
-
if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index e241c613091c..880c9f74a7f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -165,12 +165,6 @@ enum tx_phy_bandwidth {
#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
#define MT_CT_INFO_HSR2_TX BIT(4)
-#define MT_TXD_SIZE (8 * 4)
-
-#define MT_USB_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
-#define MT_USB_HDR_SIZE 4
-#define MT_USB_TAIL_SIZE 4
-
#define MT_TXD0_P_IDX BIT(31)
#define MT_TXD0_Q_IDX GENMASK(30, 26)
#define MT_TXD0_UDP_TCP_SUM BIT(24)
@@ -250,56 +244,6 @@ enum tx_phy_bandwidth {
#define MT_TX_RATE_MODE GENMASK(8, 6)
#define MT_TX_RATE_IDX GENMASK(5, 0)
-#define MT_TXP_MAX_BUF_NUM 6
-#define MT_HW_TXP_MAX_MSDU_NUM 4
-#define MT_HW_TXP_MAX_BUF_NUM 4
-
-#define MT_MSDU_ID_VALID BIT(15)
-
-#define MT_TXD_LEN_MASK GENMASK(11, 0)
-#define MT_TXD_LEN_MSDU_LAST BIT(14)
-#define MT_TXD_LEN_AMSDU_LAST BIT(15)
-/* mt7663 */
-#define MT_TXD_LEN_LAST BIT(15)
-
-struct mt7615_txp_ptr {
- __le32 buf0;
- __le16 len0;
- __le16 len1;
- __le32 buf1;
-} __packed __aligned(4);
-
-struct mt7615_hw_txp {
- __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
- struct mt7615_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
-} __packed __aligned(4);
-
-struct mt7615_fw_txp {
- __le16 flags;
- __le16 token;
- u8 bss_idx;
- u8 rept_wds_wcid;
- u8 rsv;
- u8 nbuf;
- __le32 buf[MT_TXP_MAX_BUF_NUM];
- __le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed __aligned(4);
-
-struct mt7615_txp_common {
- union {
- struct mt7615_fw_txp fw;
- struct mt7615_hw_txp hw;
- };
-};
-
-struct mt7615_tx_free {
- __le16 rx_byte_cnt;
- __le16 ctrl;
- u8 txd_cnt;
- u8 rsv[3];
- __le16 token[];
-} __packed __aligned(4);
-
#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
#define MT_TXS0_PID GENMASK(31, 24)
@@ -385,19 +329,6 @@ struct mt7615_dfs_radar_spec {
struct mt7615_dfs_pattern radar_pattern[16];
};
-static inline struct mt7615_txp_common *
-mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- u8 *txwi;
-
- if (!t)
- return NULL;
-
- txwi = mt76_get_txwi_ptr(dev, t);
-
- return (struct mt7615_txp_common *)(txwi + MT_TXD_SIZE);
-}
-
static inline u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid)
{
return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index a9c9b97d173e..9bf8545c8c17 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -224,7 +224,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
+ mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mt76_packet_id_init(&mvif->sta.wcid);
@@ -282,26 +282,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid);
}
-static void mt7615_init_dfs_state(struct mt7615_phy *phy)
-{
- struct mt76_phy *mphy = phy->mt76;
- struct ieee80211_hw *hw = mphy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
-
- if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- return;
-
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- !(mphy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
- if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
- mphy->chandef.width == chandef->width)
- return;
-
- phy->dfs_state = -1;
-}
-
int mt7615_set_channel(struct mt7615_phy *phy)
{
struct mt7615_dev *dev = phy->dev;
@@ -314,7 +294,6 @@ int mt7615_set_channel(struct mt7615_phy *phy)
set_bit(MT76_RESET, &phy->mt76->state);
- mt7615_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
@@ -494,7 +473,8 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
}
static int
-mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
@@ -576,7 +556,7 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -616,7 +596,7 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC)
- mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
+ mt7615_mac_set_beacon_filter(phy, vif, vif->cfg.assoc);
mt7615_mutex_release(dev);
}
@@ -650,7 +630,7 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = mvif->mt76.band_idx;
+ msta->wcid.phy_idx = mvif->mt76.band_idx;
phy = mvif->mt76.band_idx ? mt7615_ext_phy(dev) : &dev->phy;
err = mt76_connac_pm_wake(phy->mt76, &dev->pm);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 97e2a85cb728..3dac76e6df4d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -40,18 +40,6 @@ struct mt7615_fw_trailer {
#define FW_START_DLYCAL BIT(1)
#define FW_START_WORKING_PDA_CR4 BIT(2)
-struct mt7663_fw_trailer {
- u8 chip_id;
- u8 eco_code;
- u8 n_region;
- u8 format_ver;
- u8 format_flag;
- u8 reserv[2];
- char fw_ver[10];
- char build_date[15];
- __le32 crc;
-} __packed;
-
struct mt7663_fw_buf {
__le32 crc;
__le32 d_img_size;
@@ -350,10 +338,11 @@ static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
}
mt7622_trigger_hif_int(dev, false);
-
- pm->stats.last_doze_event = jiffies;
- pm->stats.awake_time += pm->stats.last_doze_event -
- pm->stats.last_wake_event;
+ if (!err) {
+ pm->stats.last_doze_event = jiffies;
+ pm->stats.awake_time += pm->stats.last_doze_event -
+ pm->stats.last_wake_event;
+ }
out:
mutex_unlock(&pm->mutex);
@@ -363,7 +352,7 @@ out:
static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->csa_active)
+ if (vif->bss_conf.csa_active)
ieee80211_csa_finish(vif);
}
@@ -380,7 +369,7 @@ mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb)
return;
if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx))
- mphy = dev->mt76.phy2;
+ mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -399,8 +388,11 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
!r->constant_prf_detected && !r->staggered_prf_detected)
return;
- if (r->band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (r->band_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
+
+ if (mt76_phy_dfs_state(mphy) < MT_DFS_STATE_CAC)
+ return;
ieee80211_radar_detected(mphy->hw);
dev->hw_pattern++;
@@ -456,8 +448,8 @@ mt7615_mcu_scan_event(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_phy *phy;
struct mt76_phy *mphy;
- if (*seq_num & BIT(7) && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (*seq_num & BIT(7) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
else
mphy = &dev->mt76.phy;
@@ -482,8 +474,8 @@ mt7615_mcu_roc_event(struct mt7615_dev *dev, struct sk_buff *skb)
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
event = (struct mt7615_roc_tlv *)skb->data;
- if (event->dbdc_band && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (event->dbdc_band && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
else
mphy = &dev->mt76.phy;
@@ -507,8 +499,8 @@ mt7615_mcu_beacon_loss_event(struct mt7615_dev *dev, struct sk_buff *skb)
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
event = (struct mt76_connac_beacon_loss_event *)skb->data;
- if (band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (band_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
else
mphy = &dev->mt76.phy;
@@ -528,8 +520,8 @@ mt7615_mcu_bss_event(struct mt7615_dev *dev, struct sk_buff *skb)
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
event = (struct mt76_connac_mcu_bss_event *)skb->data;
- if (band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (band_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
else
mphy = &dev->mt76.phy;
@@ -706,7 +698,7 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
if (!enable)
goto out;
- skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!skb)
return -EINVAL;
@@ -716,13 +708,11 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
return -EINVAL;
}
- if (mvif->mt76.band_idx) {
- info = IEEE80211_SKB_CB(skb);
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
- }
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, mvif->mt76.band_idx);
mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
- 0, NULL, true);
+ 0, NULL, 0, true);
memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
@@ -855,6 +845,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct mt7615_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
struct mt7615_sta *msta;
+ bool new_entry = true;
int cmd, err;
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
@@ -864,7 +855,13 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
if (IS_ERR(sskb))
return PTR_ERR(sskb);
- mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable, true);
+ if (!sta) {
+ if (mvif->sta_added)
+ new_entry = false;
+ else
+ mvif->sta_added = true;
+ }
+ mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1076,7 +1073,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
if (!enable)
goto out;
- skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
+ skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0);
if (!skb)
return -EINVAL;
@@ -1087,7 +1084,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
}
mt7615_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb,
- wcid, NULL, 0, NULL, true);
+ wcid, NULL, 0, NULL, 0, true);
memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
@@ -1518,7 +1515,7 @@ static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev)
static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
{
u32 offset = 0, override_addr = 0, flag = FW_START_DLYCAL;
- const struct mt7663_fw_trailer *hdr;
+ const struct mt76_connac2_fw_trailer *hdr;
const struct mt7663_fw_buf *buf;
const struct firmware *fw;
const u8 *base_addr;
@@ -1534,9 +1531,7 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
goto out;
}
- hdr = (const struct mt7663_fw_trailer *)(fw->data + fw->size -
- FW_V3_COMMON_TAILER_SIZE);
-
+ hdr = (const void *)(fw->data + fw->size - FW_V3_COMMON_TAILER_SIZE);
dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
hdr->fw_ver, hdr->build_date);
dev_info(dev->mt76.dev, "Region number: 0x%x\n", hdr->n_region);
@@ -2333,7 +2328,7 @@ int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy)
.bw = mt7615_mcu_chan_bw(chandef),
.band = chandef->center_freq1 > 4000,
- .dbdc_en = !!dev->mt76.phy2,
+ .dbdc_en = !!dev->mt76.phys[MT_BAND1],
};
u16 center_freq = chandef->center_freq1;
int freq_idx;
@@ -2454,7 +2449,7 @@ int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy)
.bw = mt7615_mcu_chan_bw(chandef),
.band = chandef->center_freq1 > 4000,
- .dbdc_en = !!dev->mt76.phy2,
+ .dbdc_en = !!dev->mt76.phys[MT_BAND1],
};
u16 center_freq = chandef->center_freq1;
int freq_idx;
@@ -2530,7 +2525,7 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
u8 pad;
} req = {
.bss_idx = mvif->mt76.idx,
- .aid = cpu_to_le16(vif->bss_conf.aid),
+ .aid = cpu_to_le16(vif->cfg.aid),
.dtim_period = vif->bss_conf.dtim_period,
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index 47863ae9f30b..615956acc6b5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -201,9 +201,6 @@ struct mt7615_mcu_rdd_report {
} hw_pulse[32];
};
-#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
-#define MCU_PKT_ID 0xa0
-
enum {
MCU_ATE_SET_FREQ_OFFSET = 0xa,
MCU_ATE_SET_TX_POWER_CONTROL = 0x15,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index a208035e197a..a784f9d9e935 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -186,14 +186,14 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common),
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_txp_common),
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
- .tx_complete_skb = mt7615_tx_complete_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
.rx_check = mt7615_rx_check,
.rx_skb = mt7615_queue_rx_skb,
.rx_poll_complete = mt7615_rx_poll_complete,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 2e91f6a27d0f..060d52c81d9e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -141,6 +141,7 @@ struct mt7615_sta {
struct mt7615_vif {
struct mt76_vif mt76; /* must be first */
struct mt7615_sta sta;
+ bool sta_added;
};
struct mib_stats {
@@ -177,7 +178,6 @@ struct mt7615_phy {
u8 chfreq;
u8 rdd_state;
- int dfs_state;
u32 rx_ampdu_ts;
u32 ampdu_ref;
@@ -345,7 +345,7 @@ mt7615_hw_dev(struct ieee80211_hw *hw)
static inline struct mt7615_phy *
mt7615_ext_phy(struct mt7615_dev *dev)
{
- struct mt76_phy *phy = dev->mt76.phy2;
+ struct mt76_phy *phy = dev->mt76.phys[MT_BAND1];
if (!phy)
return NULL;
@@ -477,7 +477,8 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
- struct ieee80211_key_conf *key, bool beacon);
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, bool beacon);
void mt7615_mac_set_timing(struct mt7615_phy *phy);
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
@@ -507,7 +508,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7615_tx_worker(struct mt76_worker *w);
-void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_tx_token_put(struct mt7615_dev *dev);
bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
@@ -518,8 +518,6 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7615_mac_work(struct work_struct *work);
-void mt7615_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *txwi);
int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev);
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index d1806f198aed..0019890fdb78 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -14,75 +14,6 @@
#include "../dma.h"
#include "mac.h"
-void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7615_dev *dev;
- struct mt7615_txp_common *txp;
- u16 token;
-
- dev = container_of(mdev, struct mt7615_dev, mt76);
- txp = mt7615_txwi_to_txp(mdev, e->txwi);
-
- if (is_mt7615(&dev->mt76))
- token = le16_to_cpu(txp->fw.token);
- else
- token = le16_to_cpu(txp->hw.msdu_id[0]) &
- ~MT_MSDU_ID_VALID;
-
- t = mt76_token_put(mdev, token);
- e->skb = t ? t->skb : NULL;
- }
-
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
-static void
-mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
- void *txp_ptr, u32 id)
-{
- struct mt7615_hw_txp *txp = txp_ptr;
- struct mt7615_txp_ptr *ptr = &txp->ptr[0];
- int i, nbuf = tx_info->nbuf - 1;
- u32 last_mask;
-
- tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
- tx_info->nbuf = 1;
-
- txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
-
- if (is_mt7663(&dev->mt76))
- last_mask = MT_TXD_LEN_LAST;
- else
- last_mask = MT_TXD_LEN_AMSDU_LAST |
- MT_TXD_LEN_MSDU_LAST;
-
- for (i = 0; i < nbuf; i++) {
- u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
- u32 addr = tx_info->buf[i + 1].addr;
-
- if (i == nbuf - 1)
- len |= last_mask;
-
- if (i & 1) {
- ptr->buf1 = cpu_to_le32(addr);
- ptr->len1 = cpu_to_le16(len);
- ptr++;
- } else {
- ptr->buf0 = cpu_to_le32(addr);
- ptr->len0 = cpu_to_le16(len);
- }
- }
-}
-
static void
mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
void *txp_ptr, u32 id)
@@ -91,7 +22,8 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
- struct mt7615_fw_txp *txp = txp_ptr;
+ struct mt76_connac_fw_txp *txp = txp_ptr;
+ u8 *rept_wds_wcid = (u8 *)&txp->rept_wds_wcid;
int nbuf = tx_info->nbuf - 1;
int i;
@@ -122,7 +54,7 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
}
txp->token = cpu_to_le16(id);
- txp->rept_wds_wcid = 0xff;
+ *rept_wds_wcid = 0xff;
}
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
@@ -145,9 +77,10 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
struct mt7615_phy *phy = &dev->phy;
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
- if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
- phy = mdev->phy2->priv;
+ if (phy_idx && mdev->phys[MT_BAND1])
+ phy = mdev->phys[MT_BAND1]->priv;
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
@@ -164,14 +97,14 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
- pid, key, false);
+ pid, key, qid, false);
txp = txwi + MT_TXD_SIZE;
- memset(txp, 0, sizeof(struct mt7615_txp_common));
+ memset(txp, 0, sizeof(struct mt76_connac_txp_common));
if (is_mt7615(&dev->mt76))
mt7615_write_fw_txp(dev, tx_info, txp, id);
else
- mt7615_write_hw_txp(dev, tx_info, txp, id);
+ mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
tx_info->skb = DMA_DUMMY_DATA;
@@ -250,16 +183,18 @@ mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
static void
mt7615_update_beacons(struct mt7615_dev *dev)
{
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+
ieee80211_iterate_active_interfaces(dev->mt76.hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_update_vif_beacon, dev->mt76.hw);
- if (!dev->mt76.phy2)
+ if (!mphy_ext)
return;
- ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ ieee80211_iterate_active_interfaces(mphy_ext->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7615_update_vif_beacon, dev->mt76.phy2->hw);
+ mt7615_update_vif_beacon, mphy_ext->hw);
}
void mt7615_mac_reset_work(struct work_struct *work)
@@ -268,9 +203,10 @@ void mt7615_mac_reset_work(struct work_struct *work)
struct mt76_phy *ext_phy;
struct mt7615_dev *dev;
unsigned long timeout;
+ int i;
dev = container_of(work, struct mt7615_dev, reset_work);
- ext_phy = dev->mt76.phy2;
+ ext_phy = dev->mt76.phys[MT_BAND1];
phy2 = ext_phy ? ext_phy->priv : NULL;
if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
@@ -299,8 +235,8 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt76_txq_schedule_all(ext_phy);
mt76_worker_disable(&dev->mt76.tx_worker);
- napi_disable(&dev->mt76.napi[0]);
- napi_disable(&dev->mt76.napi[1]);
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_disable(&dev->mt76.napi[i]);
napi_disable(&dev->mt76.tx_napi);
mt7615_mutex_acquire(dev);
@@ -330,11 +266,10 @@ void mt7615_mac_reset_work(struct work_struct *work)
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
- napi_enable(&dev->mt76.napi[0]);
- napi_schedule(&dev->mt76.napi[0]);
-
- napi_enable(&dev->mt76.napi[1]);
- napi_schedule(&dev->mt76.napi[1]);
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
local_bh_enable();
ieee80211_wake_queues(mt76_hw(dev));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 5a6d7829c6e0..0052d103e276 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -49,7 +49,7 @@ mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
__le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
memset(txwi, 0, MT_USB_TXD_SIZE);
- mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
+ mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, qid, false);
skb_push(skb, MT_USB_TXD_SIZE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 400ba514460e..75afcb469d3c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -12,9 +12,28 @@
#define MT76_CONNAC_MAX_SCHED_SCAN_SSID 10
#define MT76_CONNAC_MAX_SCAN_MATCH 16
+#define MT76_CONNAC_MAX_WMM_SETS 4
+
#define MT76_CONNAC_COREDUMP_TIMEOUT (HZ / 20)
#define MT76_CONNAC_COREDUMP_SZ (1300 * 1024)
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_USB_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
+#define MT_USB_HDR_SIZE 4
+#define MT_USB_TAIL_SIZE 4
+
+#define MT_SDIO_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
+#define MT_SDIO_TAIL_SIZE 8
+#define MT_SDIO_HDR_SIZE 4
+
+#define MT_MSDU_ID_VALID BIT(15)
+
+#define MT_TXD_LEN_LAST BIT(15)
+#define MT_TXD_LEN_MASK GENMASK(11, 0)
+#define MT_TXD_LEN_MSDU_LAST BIT(14)
+#define MT_TXD_LEN_AMSDU_LAST BIT(15)
+
enum {
CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
CMD_CBW_40MHZ = IEEE80211_STA_RX_BW_40,
@@ -90,6 +109,46 @@ struct mt76_connac_sta_key_conf {
u8 key[16];
};
+#define MT_TXP_MAX_BUF_NUM 6
+
+struct mt76_connac_fw_txp {
+ __le16 flags;
+ __le16 token;
+ u8 bss_idx;
+ __le16 rept_wds_wcid;
+ u8 nbuf;
+ __le32 buf[MT_TXP_MAX_BUF_NUM];
+ __le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed __aligned(4);
+
+#define MT_HW_TXP_MAX_MSDU_NUM 4
+#define MT_HW_TXP_MAX_BUF_NUM 4
+
+struct mt76_connac_txp_ptr {
+ __le32 buf0;
+ __le16 len0;
+ __le16 len1;
+ __le32 buf1;
+} __packed __aligned(4);
+
+struct mt76_connac_hw_txp {
+ __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
+ struct mt76_connac_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
+} __packed __aligned(4);
+
+struct mt76_connac_txp_common {
+ union {
+ struct mt76_connac_fw_txp fw;
+ struct mt76_connac_hw_txp hw;
+ };
+};
+
+struct mt76_connac_tx_free {
+ __le16 rx_byte_cnt;
+ __le16 ctrl;
+ __le32 txd;
+} __packed __aligned(4);
+
extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
static inline bool is_mt7922(struct mt76_dev *dev)
@@ -145,6 +204,19 @@ static inline bool is_connac_v1(struct mt76_dev *dev)
return is_mt7615(dev) || is_mt7663(dev) || is_mt7622(dev);
}
+static inline bool is_mt76_fw_txp(struct mt76_dev *dev)
+{
+ switch (mt76_chip(dev)) {
+ case 0x7961:
+ case 0x7922:
+ case 0x7663:
+ case 0x7622:
+ return false;
+ default:
+ return true;
+ }
+}
+
static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
{
static const u8 width_to_bw[] = {
@@ -170,12 +242,31 @@ static inline u8 mt76_connac_lmac_mapping(u8 ac)
return 3 - ac;
}
+static inline void *
+mt76_connac_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ u8 *txwi;
+
+ if (!t)
+ return NULL;
+
+ txwi = mt76_get_txwi_ptr(dev, t);
+
+ return (void *)(txwi + MT_TXD_SIZE);
+}
+
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
void mt76_connac_power_save_sched(struct mt76_phy *phy,
struct mt76_connac_pm *pm);
void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm,
struct mt76_wcid *wcid);
+static inline void mt76_connac_tx_cleanup(struct mt76_dev *dev)
+{
+ dev->queue_ops->tx_cleanup(dev, dev->q_mcu[MT_MCUQ_WM], false);
+ dev->queue_ops->tx_cleanup(dev, dev->q_mcu[MT_MCUQ_WA], false);
+}
+
static inline bool
mt76_connac_pm_ref(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
@@ -238,11 +329,36 @@ mt76_connac_mutex_release(struct mt76_dev *dev, struct mt76_connac_pm *pm)
mutex_unlock(&dev->mutex);
}
+int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
+ int ring_base, u32 flags);
+void mt76_connac_write_hw_txp(struct mt76_dev *dev,
+ struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id);
+void mt76_connac_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *txwi);
+void mt76_connac_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e);
void mt76_connac_pm_queue_skb(struct ieee80211_hw *hw,
struct mt76_connac_pm *pm,
struct mt76_wcid *wcid,
struct sk_buff *skb);
void mt76_connac_pm_dequeue_skbs(struct mt76_phy *phy,
struct mt76_connac_pm *pm);
+void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, int pid,
+ enum mt76_txq_id qid, u32 changed);
+bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ int pid, __le32 *txs_data,
+ struct mt76_sta_stats *stats);
+void mt76_connac2_mac_decode_he_radiotap(struct mt76_dev *dev,
+ struct sk_buff *skb,
+ __le32 *rxv, u32 mode);
+int mt76_connac2_reverse_frag0_hdr_trans(struct ieee80211_vif *vif,
+ struct sk_buff *skb, u16 hdr_offset);
+int mt76_connac2_mac_fill_rx_rate(struct mt76_dev *dev,
+ struct mt76_rx_status *status,
+ struct ieee80211_supported_band *sband,
+ __le32 *rxv, u8 *mode);
#endif /* __MT76_CONNAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
new file mode 100644
index 000000000000..67ce216fb564
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#ifndef __MT76_CONNAC2_MAC_H
+#define __MT76_CONNAC2_MAC_H
+
+enum tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+ MT_TX_TYPE_CT,
+ MT_TX_TYPE_SF,
+ MT_TX_TYPE_CMD,
+ MT_TX_TYPE_FW,
+};
+
+enum {
+ MT_CTX0,
+ MT_HIF0 = 0x0,
+
+ MT_LMAC_AC00 = 0x0,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0,
+ MT_LMAC_BCN0,
+ MT_LMAC_PSMP0,
+};
+
+#define MT_TXD0_Q_IDX GENMASK(31, 25)
+#define MT_TXD0_PKT_FMT GENMASK(24, 23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_LONG_FORMAT BIT(31)
+#define MT_TXD1_TGID BIT(30)
+#define MT_TXD1_OWN_MAC GENMASK(29, 24)
+#define MT_TXD1_AMSDU BIT(23)
+#define MT_TXD1_TID GENMASK(22, 20)
+#define MT_TXD1_HDR_PAD GENMASK(19, 18)
+#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
+#define MT_TXD1_HDR_INFO GENMASK(15, 11)
+#define MT_TXD1_ETH_802_3 BIT(15)
+#define MT_TXD1_VTA BIT(10)
+#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_FIXED_RATE BIT(30)
+#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SW_POWER_MGMT BIT(29)
+#define MT_TXD3_BA_DISABLE BIT(28)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+#define MT_TXD3_TIMING_MEASURE BIT(5)
+#define MT_TXD3_DAS BIT(4)
+#define MT_TXD3_EEOSP BIT(3)
+#define MT_TXD3_EMRD BIT(2)
+#define MT_TXD3_PROTECT_FRAME BIT(1)
+#define MT_TXD3_NO_ACK BIT(0)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_MD BIT(15)
+#define MT_TXD5_ADD_BA BIT(14)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_TX_IBF BIT(31)
+#define MT_TXD6_TX_EBF BIT(30)
+#define MT_TXD6_TX_RATE GENMASK(29, 16)
+#define MT_TXD6_SGI GENMASK(15, 14)
+#define MT_TXD6_HELTF GENMASK(13, 12)
+#define MT_TXD6_LDPC BIT(11)
+#define MT_TXD6_SPE_ID_IDX BIT(10)
+#define MT_TXD6_ANT_ID GENMASK(7, 4)
+#define MT_TXD6_DYN_BW BIT(3)
+#define MT_TXD6_FIXED_BW BIT(2)
+#define MT_TXD6_BW GENMASK(1, 0)
+
+#define MT_TXD7_TXD_LEN GENMASK(31, 30)
+#define MT_TXD7_UDP_TCP_SUM BIT(29)
+#define MT_TXD7_IP_SUM BIT(28)
+#define MT_TXD7_TYPE GENMASK(21, 20)
+#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+
+#define MT_TXD7_PSE_FID GENMASK(27, 16)
+#define MT_TXD7_SPE_IDX GENMASK(15, 11)
+#define MT_TXD7_HW_AMSDU BIT(10)
+#define MT_TXD7_TX_TIME GENMASK(9, 0)
+
+#define MT_TXD8_L_TYPE GENMASK(5, 4)
+#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TX_RATE_STBC BIT(13)
+#define MT_TX_RATE_NSS GENMASK(12, 10)
+#define MT_TX_RATE_MODE GENMASK(9, 6)
+#define MT_TX_RATE_SU_EXT_TONE BIT(5)
+#define MT_TX_RATE_DCM BIT(4)
+/* VHT/HE only use bits 0-3 */
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+#define MT_TXS0_FIXED_RATE BIT(31)
+#define MT_TXS0_BW GENMASK(30, 29)
+#define MT_TXS0_TID GENMASK(28, 26)
+#define MT_TXS0_AMPDU BIT(25)
+#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
+#define MT_TXS0_BA_ERROR BIT(22)
+#define MT_TXS0_PS_FLAG BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT BIT(20)
+#define MT_TXS0_BIP_ERROR BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
+#define MT_TXS0_RTS_TIMEOUT BIT(17)
+#define MT_TXS0_ACK_TIMEOUT BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST BIT(15)
+#define MT_TXS0_TX_STATUS_MCU BIT(14)
+#define MT_TXS0_TX_RATE GENMASK(13, 0)
+
+#define MT_TXS1_SEQNO GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE GENMASK(19, 16)
+#define MT_TXS1_RXV_SEQNO GENMASK(15, 8)
+#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0)
+
+#define MT_TXS2_BF_STATUS GENMASK(31, 30)
+#define MT_TXS2_LAST_TX_RATE GENMASK(29, 27)
+#define MT_TXS2_SHARED_ANTENNA BIT(26)
+#define MT_TXS2_WCID GENMASK(25, 16)
+#define MT_TXS2_TX_DELAY GENMASK(15, 0)
+
+#define MT_TXS3_PID GENMASK(31, 24)
+#define MT_TXS3_ANT_ID GENMASK(23, 0)
+
+#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
+
+/* RXD DW1 */
+#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
+#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
+#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
+#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
+#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
+#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
+#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
+#define MT_RXD1_NORMAL_CM BIT(23)
+#define MT_RXD1_NORMAL_CLM BIT(24)
+#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
+#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
+#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
+#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
+#define MT_RXD1_NORMAL_SPP_EN BIT(29)
+#define MT_RXD1_NORMAL_ADD_OM BIT(30)
+#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
+
+/* RXD DW2 */
+#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
+#define MT_RXD2_NORMAL_CO_ANT BIT(6)
+#define MT_RXD2_NORMAL_BF_CQI BIT(7)
+#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
+#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
+#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
+#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
+#define MT_RXD2_NORMAL_MU_BAR BIT(21)
+#define MT_RXD2_NORMAL_SW_BIT BIT(22)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
+#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
+#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
+
+/* RXD DW4 */
+#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
+#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
+#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
+#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
+#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD4_NORMAL_CLS BIT(10)
+#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+
+#define MT_RXV_HDR_BAND_IDX BIT(24)
+
+/* RXD DW3 */
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
+#define MT_RXD3_NORMAL_U2M BIT(0)
+#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
+#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
+#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
+#define MT_RXD3_NORMAL_AMSDU BIT(22)
+#define MT_RXD3_NORMAL_MESH BIT(23)
+#define MT_RXD3_NORMAL_MHCP BIT(24)
+#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
+#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
+#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
+#define MT_RXD3_NORMAL_MORE BIT(28)
+#define MT_RXD3_NORMAL_UNWANT BIT(29)
+#define MT_RXD3_NORMAL_RX_DROP BIT(30)
+#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
+
+/* RXD GROUP4 */
+#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0)
+#define MT_RXD6_TA_LO GENMASK(31, 16)
+
+#define MT_RXD7_TA_HI GENMASK(31, 0)
+
+#define MT_RXD8_SEQ_CTRL GENMASK(15, 0)
+#define MT_RXD8_QOS_CTL GENMASK(31, 16)
+
+#define MT_RXD9_HT_CONTROL GENMASK(31, 0)
+
+/* P-RXV DW0 */
+#define MT_PRXV_TX_RATE GENMASK(6, 0)
+#define MT_PRXV_TX_DCM BIT(4)
+#define MT_PRXV_TX_ER_SU_106T BIT(5)
+#define MT_PRXV_NSTS GENMASK(9, 7)
+#define MT_PRXV_TXBF BIT(10)
+#define MT_PRXV_HT_AD_CODE BIT(11)
+#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
+
+#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
+#define MT_PRXV_HT_SGI GENMASK(16, 15)
+#define MT_PRXV_HT_STBC GENMASK(23, 22)
+#define MT_PRXV_TX_MODE GENMASK(27, 24)
+#define MT_PRXV_DCM BIT(17)
+#define MT_PRXV_NUM_RX BIT(20, 18)
+
+/* P-RXV DW1 */
+#define MT_PRXV_RCPI3 GENMASK(31, 24)
+#define MT_PRXV_RCPI2 GENMASK(23, 16)
+#define MT_PRXV_RCPI1 GENMASK(15, 8)
+#define MT_PRXV_RCPI0 GENMASK(7, 0)
+#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
+
+/* C-RXV */
+#define MT_CRXV_HT_STBC GENMASK(1, 0)
+#define MT_CRXV_TX_MODE GENMASK(7, 4)
+#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
+#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
+#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
+#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
+#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
+#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
+#define MT_CRXV_HE_UPLINK BIT(31)
+
+#define MT_CRXV_HE_RU0 GENMASK(7, 0)
+#define MT_CRXV_HE_RU1 GENMASK(15, 8)
+#define MT_CRXV_HE_RU2 GENMASK(23, 16)
+#define MT_CRXV_HE_RU3 GENMASK(31, 24)
+
+#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
+
+#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
+#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
+#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
+#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
+
+#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
+#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
+#define MT_CRXV_HE_BEAM_CHNG BIT(13)
+#define MT_CRXV_HE_DOPPLER BIT(16)
+
+#define MT_CRXV_SNR GENMASK(18, 13)
+#define MT_CRXV_FOE_LO GENMASK(31, 19)
+#define MT_CRXV_FOE_HI GENMASK(6, 0)
+#define MT_CRXV_FOE_SHIFT 13
+
+#define MT_CT_INFO_APPLY_TXD BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
+#define MT_CT_INFO_MGMT_FRAME BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
+#define MT_CT_INFO_HSR2_TX BIT(4)
+#define MT_CT_INFO_FROM_HOST BIT(7)
+
+enum tx_mcu_port_q_idx {
+ MT_TX_MCU_PORT_RX_Q0 = 0x20,
+ MT_TX_MCU_PORT_RX_Q1,
+ MT_TX_MCU_PORT_RX_Q2,
+ MT_TX_MCU_PORT_RX_Q3,
+ MT_TX_MCU_PORT_RX_FWDL = 0x3e
+};
+
+enum tx_port_idx {
+ MT_TX_PORT_IDX_LMAC,
+ MT_TX_PORT_IDX_MCU
+};
+
+#endif /* __MT76_CONNAC2_MAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index 306e9eaea917..18dea8e1fb20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -2,6 +2,12 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include "mt76_connac.h"
+#include "mt76_connac2_mac.h"
+#include "dma.h"
+
+#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
+#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
+ IEEE80211_RADIOTAP_HE_##f)
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
@@ -115,3 +121,917 @@ void mt76_connac_pm_dequeue_skbs(struct mt76_phy *phy,
mt76_worker_schedule(&phy->dev->tx_worker);
}
EXPORT_SYMBOL_GPL(mt76_connac_pm_dequeue_skbs);
+
+void mt76_connac_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e)
+{
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_connac_txp_common *txp;
+ struct mt76_txwi_cache *t;
+ u16 token;
+
+ txp = mt76_connac_txwi_to_txp(mdev, e->txwi);
+ if (is_mt76_fw_txp(mdev))
+ token = le16_to_cpu(txp->fw.token);
+ else
+ token = le16_to_cpu(txp->hw.msdu_id[0]) &
+ ~MT_MSDU_ID_VALID;
+
+ t = mt76_token_put(mdev, token);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_tx_complete_skb);
+
+void mt76_connac_write_hw_txp(struct mt76_dev *dev,
+ struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct mt76_connac_hw_txp *txp = txp_ptr;
+ struct mt76_connac_txp_ptr *ptr = &txp->ptr[0];
+ int i, nbuf = tx_info->nbuf - 1;
+ u32 last_mask;
+
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->nbuf = 1;
+
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
+
+ if (is_mt7663(dev) || is_mt7921(dev))
+ last_mask = MT_TXD_LEN_LAST;
+ else
+ last_mask = MT_TXD_LEN_AMSDU_LAST |
+ MT_TXD_LEN_MSDU_LAST;
+
+ for (i = 0; i < nbuf; i++) {
+ u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
+ u32 addr = tx_info->buf[i + 1].addr;
+
+ if (i == nbuf - 1)
+ len |= last_mask;
+
+ if (i & 1) {
+ ptr->buf1 = cpu_to_le32(addr);
+ ptr->len1 = cpu_to_le16(len);
+ ptr++;
+ } else {
+ ptr->buf0 = cpu_to_le32(addr);
+ ptr->len0 = cpu_to_le16(len);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac_write_hw_txp);
+
+static void
+mt76_connac_txp_skb_unmap_fw(struct mt76_dev *mdev,
+ struct mt76_connac_fw_txp *txp)
+{
+ struct device *dev = is_connac_v1(mdev) ? mdev->dev : mdev->dma_dev;
+ int i;
+
+ for (i = 0; i < txp->nbuf; i++)
+ dma_unmap_single(dev, le32_to_cpu(txp->buf[i]),
+ le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+}
+
+static void
+mt76_connac_txp_skb_unmap_hw(struct mt76_dev *dev,
+ struct mt76_connac_hw_txp *txp)
+{
+ u32 last_mask;
+ int i;
+
+ if (is_mt7663(dev) || is_mt7921(dev))
+ last_mask = MT_TXD_LEN_LAST;
+ else
+ last_mask = MT_TXD_LEN_MSDU_LAST;
+
+ for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
+ struct mt76_connac_txp_ptr *ptr = &txp->ptr[i];
+ bool last;
+ u16 len;
+
+ len = le16_to_cpu(ptr->len0);
+ last = len & last_mask;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+
+ len = le16_to_cpu(ptr->len1);
+ last = len & last_mask;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+ }
+}
+
+void mt76_connac_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt76_connac_txp_common *txp;
+
+ txp = mt76_connac_txwi_to_txp(dev, t);
+ if (is_mt76_fw_txp(dev))
+ mt76_connac_txp_skb_unmap_fw(dev, &txp->fw);
+ else
+ mt76_connac_txp_skb_unmap_hw(dev, &txp->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_txp_skb_unmap);
+
+int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
+ int ring_base, u32 flags)
+{
+ int i, err;
+
+ err = mt76_init_tx_queue(phy, 0, idx, n_desc, ring_base, flags);
+ if (err < 0)
+ return err;
+
+ for (i = 1; i <= MT_TXQ_PSD; i++)
+ phy->q_tx[i] = phy->q_tx[0];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_init_tx_queues);
+
+static u16
+mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ bool beacon, bool mcast)
+{
+ u8 mode = 0, band = mphy->chandef.chan->band;
+ int rateidx = 0, mcast_rate;
+
+ if (!vif)
+ goto legacy;
+
+ if (is_mt7921(mphy->dev)) {
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ goto legacy;
+ }
+
+ if (beacon) {
+ struct cfg80211_bitrate_mask *mask;
+
+ mask = &vif->bss_conf.beacon_tx_rate;
+ if (hweight16(mask->control[band].he_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_HE_SU;
+ goto out;
+ } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_VHT;
+ goto out;
+ } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_HT;
+ goto out;
+ } else if (hweight32(mask->control[band].legacy) == 1) {
+ rateidx = ffs(mask->control[band].legacy) - 1;
+ goto legacy;
+ }
+ }
+
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+ if (mcast && mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+legacy:
+ rateidx = mt76_calculate_default_rate(mphy, rateidx);
+ mode = rateidx >> 8;
+ rateidx &= GENMASK(7, 0);
+
+out:
+ return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
+ FIELD_PREP(MT_TX_RATE_MODE, mode);
+}
+
+static void
+mt76_connac2_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb,
+ struct mt76_wcid *wcid)
+{
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ u8 fc_type, fc_stype;
+ u16 ethertype;
+ bool wmm = false;
+ u32 val;
+
+ if (wcid->sta) {
+ struct ieee80211_sta *sta;
+
+ sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+ wmm = sta->wme;
+ }
+
+ val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
+ FIELD_PREP(MT_TXD1_TID, tid);
+
+ ethertype = get_unaligned_be16(&skb->data[12]);
+ if (ethertype >= ETH_P_802_3_MIN)
+ val |= MT_TXD1_ETH_802_3;
+
+ txwi[1] |= cpu_to_le32(val);
+
+ fc_type = IEEE80211_FTYPE_DATA >> 2;
+ fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+
+ txwi[2] |= cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+
+ txwi[7] |= cpu_to_le32(val);
+}
+
+static void
+mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ __le16 fc = hdr->frame_control;
+ u8 fc_type, fc_stype;
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+
+ txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
+ tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
+ } else if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
+ u16 control = le16_to_cpu(bar->control);
+
+ tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
+ }
+
+ val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO,
+ ieee80211_get_hdrlen_from_skb(skb) / 2) |
+ FIELD_PREP(MT_TXD1_TID, tid);
+
+ txwi[1] |= cpu_to_le32(val);
+
+ fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+ fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD2_MULTICAST, multicast);
+
+ if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
+ key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ val |= MT_TXD2_BIP;
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+ }
+
+ if (!ieee80211_is_data(fc) || multicast ||
+ info->flags & IEEE80211_TX_CTL_USE_MINRATE)
+ val |= MT_TXD2_FIX_RATE;
+
+ txwi[2] |= cpu_to_le32(val);
+
+ if (ieee80211_is_beacon(fc)) {
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
+ txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
+ if (!is_mt7921(dev))
+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
+ 0x18));
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar;
+
+ bar = (struct ieee80211_bar *)skb->data;
+ seqno = le16_to_cpu(bar->start_seq_num);
+ }
+
+ val = MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ txwi[3] |= cpu_to_le32(val);
+ txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
+ }
+
+ if (mt76_is_mmio(dev)) {
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ txwi[7] |= cpu_to_le32(val);
+ } else {
+ val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
+ txwi[8] |= cpu_to_le32(val);
+ }
+}
+
+void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, int pid,
+ enum mt76_txq_id qid, u32 changed)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_phy *mphy = &dev->phy;
+ u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
+ u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
+ bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ bool beacon = !!(changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED));
+ bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ BSS_CHANGED_FILS_DISCOVERY));
+
+ if (vif) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ omac_idx = mvif->omac_idx;
+ wmm_idx = mvif->wmm_idx;
+ band_idx = mvif->band_idx;
+ }
+
+ if (phy_idx && dev->phys[MT_BAND1])
+ mphy = dev->phys[MT_BAND1];
+
+ if (inband_disc) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = MT_LMAC_ALTX0;
+ } else if (beacon) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = MT_LMAC_BCN0;
+ } else if (qid >= MT_TXQ_PSD) {
+ p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
+ q_idx = MT_LMAC_ALTX0;
+ } else {
+ p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
+ q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
+ mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+ FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+ if (!is_mt7921(dev))
+ val |= MT_TXD1_VTA;
+ if (phy_idx || band_idx)
+ val |= MT_TXD1_TGID;
+
+ txwi[1] = cpu_to_le32(val);
+ txwi[2] = 0;
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15);
+ if (!is_mt7921(dev))
+ val |= MT_TXD3_SW_POWER_MGMT;
+ if (key)
+ val |= MT_TXD3_PROTECT_FRAME;
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ val |= MT_TXD3_NO_ACK;
+
+ txwi[3] = cpu_to_le32(val);
+ txwi[4] = 0;
+
+ val = FIELD_PREP(MT_TXD5_PID, pid);
+ if (pid >= MT_PACKET_ID_FIRST)
+ val |= MT_TXD5_TX_STATUS_HOST;
+
+ txwi[5] = cpu_to_le32(val);
+ txwi[6] = 0;
+ txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
+
+ if (is_8023)
+ mt76_connac2_mac_write_txwi_8023(txwi, skb, wcid);
+ else
+ mt76_connac2_mac_write_txwi_80211(dev, txwi, skb, key);
+
+ if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
+ /* Fixed rata is available just for 802.11 txd */
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
+ multicast);
+ u32 val = MT_TXD6_FIXED_BW;
+
+ /* hardware won't add HTC for mgmt/ctrl frame */
+ txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
+
+ val |= FIELD_PREP(MT_TXD6_TX_RATE, rate);
+ txwi[6] |= cpu_to_le32(val);
+ txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
+
+bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ int pid, __le32 *txs_data,
+ struct mt76_sta_stats *stats)
+{
+ struct ieee80211_supported_band *sband;
+ struct mt76_phy *mphy;
+ struct ieee80211_tx_info *info;
+ struct sk_buff_head list;
+ struct rate_info rate = {};
+ struct sk_buff *skb;
+ bool cck = false;
+ u32 txrate, txs, mode;
+
+ mt76_tx_status_lock(dev, &list);
+ skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
+ if (!skb)
+ goto out;
+
+ txs = le32_to_cpu(txs_data[0]);
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!(txs & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !!(info->flags &
+ IEEE80211_TX_STAT_ACK);
+
+ info->status.rates[0].idx = -1;
+
+ txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+
+ rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
+ rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
+
+ if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
+ stats->tx_nss[rate.nss - 1]++;
+ if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
+ stats->tx_mcs[rate.mcs]++;
+
+ mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ mphy = &dev->phy;
+ if (wcid->phy_idx == MT_BAND1 && dev->phys[MT_BAND1])
+ mphy = dev->phys[MT_BAND1];
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
+ rate.legacy = sband->bitrates[rate.mcs].bitrate;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ if (rate.mcs > 31)
+ goto out;
+
+ rate.flags = RATE_INFO_FLAGS_MCS;
+ if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
+ rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_VHT:
+ if (rate.mcs > 9)
+ goto out;
+
+ rate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ if (rate.mcs > 11)
+ goto out;
+
+ rate.he_gi = wcid->rate.he_gi;
+ rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
+ rate.flags = RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ goto out;
+ }
+
+ stats->tx_mode[mode]++;
+
+ switch (FIELD_GET(MT_TXS0_BW, txs)) {
+ case IEEE80211_STA_RX_BW_160:
+ rate.bw = RATE_INFO_BW_160;
+ stats->tx_bw[3]++;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ rate.bw = RATE_INFO_BW_80;
+ stats->tx_bw[2]++;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ rate.bw = RATE_INFO_BW_40;
+ stats->tx_bw[1]++;
+ break;
+ default:
+ rate.bw = RATE_INFO_BW_20;
+ stats->tx_bw[0]++;
+ break;
+ }
+ wcid->rate = rate;
+
+out:
+ if (skb)
+ mt76_tx_status_skb_done(dev, skb, &list);
+
+ mt76_tx_status_unlock(dev, &list);
+
+ return !!skb;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_add_txs_skb);
+
+static void
+mt76_connac2_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
+ struct ieee80211_radiotap_he *he,
+ __le32 *rxv)
+{
+ u32 ru_h, ru_l;
+ u8 ru, offs = 0;
+
+ ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+ ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
+ ru = (u8)(ru_l | ru_h << 4);
+
+ status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+
+ he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+ he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+}
+
+static void
+mt76_connac2_mac_decode_he_mu_radiotap(struct mt76_dev *dev, struct sk_buff *skb,
+ __le32 *rxv)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ static struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
+ HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN),
+ .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+ };
+ struct ieee80211_radiotap_he_mu *he_mu;
+
+ if (is_mt7921(dev)) {
+ mu_known.flags1 |= HE_BITS(MU_FLAGS1_SIG_B_COMP_KNOWN);
+ mu_known.flags2 |= HE_BITS(MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN);
+ }
+
+ status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+
+ he_mu = skb_push(skb, sizeof(mu_known));
+ memcpy(he_mu, &mu_known, sizeof(mu_known));
+
+#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
+
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
+ if (status->he_dcm)
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
+
+ he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
+ MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
+ le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
+
+ he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
+
+ if (status->bw >= RATE_INFO_BW_40) {
+ he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
+ he_mu->ru_ch2[0] =
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
+ }
+
+ if (status->bw >= RATE_INFO_BW_80) {
+ he_mu->ru_ch1[1] =
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
+ he_mu->ru_ch2[1] =
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
+ }
+}
+
+void mt76_connac2_mac_decode_he_radiotap(struct mt76_dev *dev,
+ struct sk_buff *skb,
+ __le32 *rxv, u32 mode)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
+ HE_BITS(DATA1_DATA_DCM_KNOWN) |
+ HE_BITS(DATA1_STBC_KNOWN) |
+ HE_BITS(DATA1_CODING_KNOWN) |
+ HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
+ HE_BITS(DATA1_DOPPLER_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
+ HE_BITS(DATA1_BSS_COLOR_KNOWN),
+ .data2 = HE_BITS(DATA2_GI_KNOWN) |
+ HE_BITS(DATA2_TXBF_KNOWN) |
+ HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
+ HE_BITS(DATA2_TXOP_KNOWN),
+ };
+ u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
+ struct ieee80211_radiotap_he *he;
+
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ he = skb_push(skb, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+
+ he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
+ HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
+ he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+ he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
+ le16_encode_bits(ltf_size,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+ if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
+ he->data5 |= HE_BITS(DATA5_TXBF);
+ he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
+ HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
+
+ switch (mode) {
+ case MT_PHY_TYPE_HE_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
+ HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ break;
+ case MT_PHY_TYPE_HE_EXT_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
+ HE_BITS(DATA1_UL_DL_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
+
+ mt76_connac2_mac_decode_he_radiotap_ru(status, he, rxv);
+ mt76_connac2_mac_decode_he_mu_radiotap(dev, skb, rxv);
+ break;
+ case MT_PHY_TYPE_HE_TB:
+ he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
+ HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
+
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
+
+ mt76_connac2_mac_decode_he_radiotap_ru(status, he, rxv);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_decode_he_radiotap);
+
+/* The HW does not translate the mac header to 802.3 for mesh point */
+int mt76_connac2_reverse_frag0_hdr_trans(struct ieee80211_vif *vif,
+ struct sk_buff *skb, u16 hdr_offset)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_offset);
+ __le32 *rxd = (__le32 *)skb->data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr hdr;
+ u16 frame_control;
+
+ if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
+ MT_RXD3_NORMAL_U2M)
+ return -EINVAL;
+
+ if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
+ return -EINVAL;
+
+ sta = container_of((void *)status->wcid, struct ieee80211_sta, drv_priv);
+
+ /* store the info from RXD and ethhdr to avoid being overridden */
+ frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
+ hdr.duration_id = 0;
+
+ ether_addr_copy(hdr.addr1, vif->addr);
+ ether_addr_copy(hdr.addr2, sta->addr);
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
+ case 0:
+ ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ break;
+ case IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
+ break;
+ case IEEE80211_FCTL_TODS:
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ break;
+ case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
+ break;
+ default:
+ break;
+ }
+
+ skb_pull(skb, hdr_offset + sizeof(struct ethhdr) - 2);
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
+ ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
+ ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
+ else
+ skb_pull(skb, 2);
+
+ if (ieee80211_has_order(hdr.frame_control))
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
+ IEEE80211_HT_CTL_LEN);
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
+ if (ieee80211_has_a4(hdr.frame_control))
+ memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
+ else
+ memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_reverse_frag0_hdr_trans);
+
+int mt76_connac2_mac_fill_rx_rate(struct mt76_dev *dev,
+ struct mt76_rx_status *status,
+ struct ieee80211_supported_band *sband,
+ __le32 *rxv, u8 *mode)
+{
+ u32 v0, v2;
+ u8 stbc, gi, bw, dcm, nss;
+ int i, idx;
+ bool cck = false;
+
+ v0 = le32_to_cpu(rxv[0]);
+ v2 = le32_to_cpu(rxv[2]);
+
+ idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+
+ if (!is_mt7915(dev)) {
+ stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
+ gi = FIELD_GET(MT_PRXV_HT_SGI, v0);
+ *mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
+ if (is_mt7921(dev))
+ dcm = !!(idx & MT_PRXV_TX_DCM);
+ else
+ dcm = FIELD_GET(MT_PRXV_DCM, v0);
+ bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0);
+ } else {
+ stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
+ gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
+ *mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
+ dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM);
+ bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2);
+ }
+
+ switch (*mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(dev, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss = nss;
+ status->encoding = RX_ENC_VHT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 11)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss = nss;
+ status->encoding = RX_ENC_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ status->he_dcm = dcm;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (bw) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
+ }
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (*mode < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_fill_rx_rate);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index faa279bbbcb2..9b17bd97ec09 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/firmware.h>
+#include "mt76_connac2_mac.h"
#include "mt76_connac_mcu.h"
int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
@@ -193,7 +195,7 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
*/
} req = {
.bss_idx = mvif->idx,
- .ps_state = vif->bss_conf.ps ? 2 : 0,
+ .ps_state = vif->cfg.ps ? 2 : 0,
};
if (vif->type != NL80211_IFTYPE_STATION)
@@ -402,7 +404,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
else
conn_type = CONNECTION_INFRA_AP;
basic->conn_type = cpu_to_le32(conn_type);
- basic->aid = cpu_to_le16(vif->bss_conf.aid);
+ basic->aid = cpu_to_le16(vif->cfg.aid);
break;
case NL80211_IFTYPE_ADHOC:
basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
@@ -546,7 +548,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
if (sta) {
if (vif->type == NL80211_IFTYPE_STATION)
- generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
+ generic->partial_aid = cpu_to_le16(vif->cfg.aid);
else
generic->partial_aid = cpu_to_le16(sta->aid);
memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
@@ -1403,6 +1405,8 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
else
conn_type = CONNECTION_INFRA_AP;
basic_req.basic.conn_type = cpu_to_le32(conn_type);
+ /* Fully active/deactivate BSS network in AP mode only */
+ basic_req.basic.active = enable;
break;
case NL80211_IFTYPE_STATION:
if (vif->p2p)
@@ -2157,8 +2161,10 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
struct mt76_vif *vif,
struct ieee80211_bss_conf *info)
{
+ struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif,
+ bss_conf);
struct sk_buff *skb;
- int i, len = min_t(int, info->arp_addr_cnt,
+ int i, len = min_t(int, mvif->cfg.arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
struct {
struct {
@@ -2186,7 +2192,7 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
for (i = 0; i < len; i++)
- skb_put_data(skb, &info->arp_addr_list[i], sizeof(__be32));
+ skb_put_data(skb, &mvif->cfg.arp_addr_list[i], sizeof(__be32));
return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true);
}
@@ -2806,5 +2812,304 @@ int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_rdd_cmd);
+static int
+mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
+ const struct mt76_connac2_fw_trailer *hdr,
+ const u8 *data, bool is_wa)
+{
+ int i, offset = 0, max_len = mt76_is_sdio(dev) ? 2048 : 4096;
+ u32 override = 0, option = 0;
+
+ for (i = 0; i < hdr->n_region; i++) {
+ const struct mt76_connac2_fw_region *region;
+ u32 len, addr, mode;
+ int err;
+
+ region = (const void *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
+ mode = mt76_connac_mcu_gen_dl_mode(dev, region->feature_set,
+ is_wa);
+ len = le32_to_cpu(region->len);
+ addr = le32_to_cpu(region->addr);
+
+ if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
+ override = addr;
+
+ err = mt76_connac_mcu_init_download(dev, addr, len, mode);
+ if (err) {
+ dev_err(dev->dev, "Download request failed\n");
+ return err;
+ }
+
+ err = __mt76_mcu_send_firmware(dev, MCU_CMD(FW_SCATTER),
+ data + offset, len, max_len);
+ if (err) {
+ dev_err(dev->dev, "Failed to send firmware.\n");
+ return err;
+ }
+
+ offset += len;
+ }
+
+ if (override)
+ option |= FW_START_OVERRIDE;
+ if (is_wa)
+ option |= FW_START_WORKING_PDA_CR4;
+
+ return mt76_connac_mcu_start_firmware(dev, override, option);
+}
+
+int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
+ const char *fw_wa)
+{
+ const struct mt76_connac2_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, fw_wm, dev->dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
+ dev_info(dev->dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, false);
+ if (ret) {
+ dev_err(dev->dev, "Failed to start WM firmware\n");
+ goto out;
+ }
+
+ release_firmware(fw);
+
+ if (!fw_wa)
+ return 0;
+
+ ret = request_firmware(&fw, fw_wa, dev->dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
+ dev_info(dev->dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, true);
+ if (ret) {
+ dev_err(dev->dev, "Failed to start WA firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->hw->wiphy->fw_version,
+ sizeof(dev->hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_load_ram);
+
+static u32 mt76_connac2_get_data_mode(struct mt76_dev *dev, u32 info)
+{
+ u32 mode = DL_MODE_NEED_RSP;
+
+ if (!is_mt7921(dev) || info == PATCH_SEC_NOT_SUPPORT)
+ return mode;
+
+ switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) {
+ case PATCH_SEC_ENC_TYPE_PLAIN:
+ break;
+ case PATCH_SEC_ENC_TYPE_AES:
+ mode |= DL_MODE_ENCRYPT;
+ mode |= FIELD_PREP(DL_MODE_KEY_IDX,
+ (info & PATCH_SEC_ENC_AES_KEY_MASK)) & DL_MODE_KEY_IDX;
+ mode |= DL_MODE_RESET_SEC_IV;
+ break;
+ case PATCH_SEC_ENC_TYPE_SCRAMBLE:
+ mode |= DL_MODE_ENCRYPT;
+ mode |= DL_CONFIG_ENCRY_MODE_SEL;
+ mode |= DL_MODE_RESET_SEC_IV;
+ break;
+ default:
+ dev_err(dev->dev, "Encryption type not support!\n");
+ }
+
+ return mode;
+}
+
+int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name)
+{
+ int i, ret, sem, max_len = mt76_is_sdio(dev) ? 2048 : 4096;
+ const struct mt76_connac2_patch_hdr *hdr;
+ const struct firmware *fw = NULL;
+
+ sem = mt76_connac_mcu_patch_sem_ctrl(dev, true);
+ switch (sem) {
+ case PATCH_IS_DL:
+ return 0;
+ case PATCH_NOT_DL_SEM_SUCCESS:
+ break;
+ default:
+ dev_err(dev->dev, "Failed to get patch semaphore\n");
+ return -EAGAIN;
+ }
+
+ ret = request_firmware(&fw, fw_name, dev->dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const void *)fw->data;
+ dev_info(dev->dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+ for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
+ struct mt76_connac2_patch_sec *sec;
+ u32 len, addr, mode;
+ const u8 *dl;
+ u32 sec_info;
+
+ sec = (void *)(fw->data + sizeof(*hdr) + i * sizeof(*sec));
+ if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
+ PATCH_SEC_TYPE_INFO) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ addr = be32_to_cpu(sec->info.addr);
+ len = be32_to_cpu(sec->info.len);
+ dl = fw->data + be32_to_cpu(sec->offs);
+ sec_info = be32_to_cpu(sec->info.sec_key_idx);
+ mode = mt76_connac2_get_data_mode(dev, sec_info);
+
+ ret = mt76_connac_mcu_init_download(dev, addr, len, mode);
+ if (ret) {
+ dev_err(dev->dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = __mt76_mcu_send_firmware(dev, MCU_CMD(FW_SCATTER),
+ dl, len, max_len);
+ if (ret) {
+ dev_err(dev->dev, "Failed to send patch\n");
+ goto out;
+ }
+ }
+
+ ret = mt76_connac_mcu_start_patch(dev);
+ if (ret)
+ dev_err(dev->dev, "Failed to start patch\n");
+
+out:
+ sem = mt76_connac_mcu_patch_sem_ctrl(dev, false);
+ switch (sem) {
+ case PATCH_REL_SEM_SUCCESS:
+ break;
+ default:
+ ret = -EAGAIN;
+ dev_err(dev->dev, "Failed to release patch semaphore\n");
+ break;
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_load_patch);
+
+int mt76_connac2_mcu_fill_message(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
+ struct mt76_connac2_mcu_uni_txd *uni_txd;
+ struct mt76_connac2_mcu_txd *mcu_txd;
+ __le32 *txd;
+ u32 val;
+ u8 seq;
+
+ /* TODO: make dynamic based on msg type */
+ dev->mcu.timeout = 20 * HZ;
+
+ seq = ++dev->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ if (cmd == MCU_CMD(FW_SCATTER))
+ goto exit;
+
+ txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
+ txd = (__le32 *)skb_push(skb, txd_len);
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) |
+ FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0);
+ txd[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
+ txd[1] = cpu_to_le32(val);
+
+ if (cmd & __MCU_CMD_FIELD_UNI) {
+ uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd;
+ uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ uni_txd->cid = cpu_to_le16(mcu_cmd);
+ uni_txd->s2d_index = MCU_S2D_H2N;
+ uni_txd->pkt_type = MCU_PKT_ID;
+ uni_txd->seq = seq;
+
+ goto exit;
+ }
+
+ mcu_txd = (struct mt76_connac2_mcu_txd *)txd;
+ mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
+ MT_TX_MCU_PORT_RX_Q0));
+ mcu_txd->pkt_type = MCU_PKT_ID;
+ mcu_txd->seq = seq;
+ mcu_txd->cid = mcu_cmd;
+ mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
+
+ if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) {
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ mcu_txd->set_query = MCU_Q_QUERY;
+ else
+ mcu_txd->set_query = MCU_Q_SET;
+ mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid;
+ } else {
+ mcu_txd->set_query = MCU_Q_NA;
+ }
+
+ if (cmd & __MCU_CMD_FIELD_WA)
+ mcu_txd->s2d_index = MCU_S2D_H2C;
+ else
+ mcu_txd->s2d_index = MCU_S2D_H2N;
+
+exit:
+ if (wait_seq)
+ *wait_seq = seq;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mcu_fill_message);
+
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 561fb0368708..f1d7c05bd794 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -26,6 +26,157 @@
#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
#define PATCH_SEC_TYPE_INFO 0x2
+#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
+#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
+#define PATCH_SEC_ENC_TYPE_AES 0x01
+#define PATCH_SEC_ENC_TYPE_SCRAMBLE 0x02
+#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
+#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
+
+#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID 0xa0
+
+struct mt76_connac2_mcu_txd {
+ __le32 txd[8];
+
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query; /* FW don't care */
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 s2d_index;
+ u8 ext_cid_ack;
+
+ u32 rsv[5];
+} __packed __aligned(4);
+
+/**
+ * struct mt76_connac2_mcu_uni_txd - mcu command descriptor for firmware v3
+ * @txd: hardware descriptor
+ * @len: total length not including txd
+ * @cid: command identifier
+ * @pkt_type: must be 0xa0 (cmd packet by long format)
+ * @frag_n: fragment number
+ * @seq: sequence number
+ * @checksum: 0 mean there is no checksum
+ * @s2d_index: index for command source and destination
+ * Definition | value | note
+ * CMD_S2D_IDX_H2N | 0x00 | command from HOST to WM
+ * CMD_S2D_IDX_C2N | 0x01 | command from WA to WM
+ * CMD_S2D_IDX_H2C | 0x02 | command from HOST to WA
+ * CMD_S2D_IDX_H2N_AND_H2C | 0x03 | command from HOST to WA and WM
+ *
+ * @option: command option
+ * BIT[0]: UNI_CMD_OPT_BIT_ACK
+ * set to 1 to request a fw reply
+ * if UNI_CMD_OPT_BIT_0_ACK is set and UNI_CMD_OPT_BIT_2_SET_QUERY
+ * is set, mcu firmware will send response event EID = 0x01
+ * (UNI_EVENT_ID_CMD_RESULT) to the host.
+ * BIT[1]: UNI_CMD_OPT_BIT_UNI_CMD
+ * 0: original command
+ * 1: unified command
+ * BIT[2]: UNI_CMD_OPT_BIT_SET_QUERY
+ * 0: QUERY command
+ * 1: SET command
+ */
+struct mt76_connac2_mcu_uni_txd {
+ __le32 txd[8];
+
+ /* DW1 */
+ __le16 len;
+ __le16 cid;
+
+ /* DW2 */
+ u8 rsv;
+ u8 pkt_type;
+ u8 frag_n;
+ u8 seq;
+
+ /* DW3 */
+ __le16 checksum;
+ u8 s2d_index;
+ u8 option;
+
+ /* DW4 */
+ u8 rsv1[4];
+} __packed __aligned(4);
+
+struct mt76_connac2_mcu_rxd {
+ __le32 rxd[6];
+
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ u8 rsv[2];
+
+ u8 ext_eid;
+ u8 rsv1[2];
+ u8 s2d_index;
+};
+
+struct mt76_connac2_patch_hdr {
+ char build_date[16];
+ char platform[4];
+ __be32 hw_sw_ver;
+ __be32 patch_ver;
+ __be16 checksum;
+ u16 rsv;
+ struct {
+ __be32 patch_ver;
+ __be32 subsys;
+ __be32 feature;
+ __be32 n_region;
+ __be32 crc;
+ u32 rsv[11];
+ } desc;
+} __packed;
+
+struct mt76_connac2_patch_sec {
+ __be32 type;
+ __be32 offs;
+ __be32 size;
+ union {
+ __be32 spec[13];
+ struct {
+ __be32 addr;
+ __be32 len;
+ __be32 sec_key_idx;
+ __be32 align_len;
+ u32 rsv[9];
+ } info;
+ };
+} __packed;
+
+struct mt76_connac2_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 n_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 rsv[2];
+ char fw_ver[10];
+ char build_date[15];
+ __le32 crc;
+} __packed;
+
+struct mt76_connac2_fw_region {
+ __le32 decomp_crc;
+ __le32 decomp_len;
+ __le32 decomp_blk_sz;
+ u8 rsv[4];
+ __le32 addr;
+ __le32 len;
+ u8 feature_set;
+ u8 rsv1[15];
+} __packed;
+
struct tlv {
__le16 tag;
__le16 len;
@@ -1653,4 +1804,9 @@ int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
int mt76_connac_mcu_restart(struct mt76_dev *dev);
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val);
+int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
+ const char *fw_wa);
+int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name);
+int mt76_connac2_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *wait_seq);
#endif /* __MT76_CONNAC_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index f76fd22ee035..50eaeff11af3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -156,7 +156,8 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -187,7 +188,7 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed);
+ struct ieee80211_bss_conf *info, u64 changed);
void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index 5d034cec191b..ad4dc8e17b58 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -57,8 +57,11 @@ void mt76x02_mac_set_beacon(struct mt76x02_dev *dev,
int bcn_len = dev->beacon_ops->slot_size;
int bcn_addr = MT_BEACON_BASE + (bcn_len * dev->beacon_data_count);
- if (!mt76x02_write_beacon(dev, bcn_addr, skb))
+ if (!mt76x02_write_beacon(dev, bcn_addr, skb)) {
+ if (!dev->beacon_data_count)
+ dev->beacon_hang_check++;
dev->beacon_data_count++;
+ }
dev_kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
@@ -74,6 +77,7 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
if (!dev->mt76.beacon_mask)
dev->tbtt_count = 0;
+ dev->beacon_hang_check = 0;
if (enable) {
dev->mt76.beacon_mask |= BIT(mvif->idx);
} else {
@@ -139,7 +143,7 @@ mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
return;
- skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif, 0);
if (!skb)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index cf4d4110cc99..de30cf5e2d2f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -1044,10 +1044,9 @@ static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
return;
}
- if (++dev->beacon_hang_check < 10)
+ if (dev->beacon_hang_check < 10)
return;
- dev->beacon_hang_check = 0;
} else {
u32 val = mt76_rr(dev, 0x10f4);
if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
@@ -1057,10 +1056,16 @@ static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
dev_err(dev->mt76.dev, "MAC error detected\n");
mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
- mt76x02_wait_for_txrx_idle(&dev->mt76);
+ if (!mt76x02_wait_for_txrx_idle(&dev->mt76)) {
+ dev_err(dev->mt76.dev, "MAC stop failed\n");
+ goto out;
+ }
+ dev->beacon_hang_check = 0;
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
+
+out:
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 96ec96df6a3c..e9c5e85ec07c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -59,7 +59,8 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
struct ieee80211_vif *vif = info->control.vif;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL);
+ mt76_tx_queue_skb(dev, q, MT_TXQ_PSD, skb, &mvif->group_wcid,
+ NULL);
}
spin_unlock(&q->lock);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index 2953df7d8388..c6c16fe8ee85 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -108,7 +108,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
MT_EP_OUT_INBAND_CMD);
if (ret)
- return ret;
+ goto out;
if (wait_resp)
ret = mt76x02u_mcu_wait_resp(dev, seq);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 5bd0a0bae688..604ddcc21123 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -487,7 +487,8 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(mt76x02_set_key);
int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct mt76x02_dev *dev = hw->priv;
u8 cw_min = 5, cw_max = 10, qid;
@@ -636,7 +637,7 @@ EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index cab6e02e1f8c..fd76db8f5269 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -976,7 +976,7 @@ mt7915_rf_regval_get(void *data, u64 *val)
if (ret)
return ret;
- *val = le32_to_cpu(regval);
+ *val = regval;
return 0;
}
@@ -985,8 +985,9 @@ static int
mt7915_rf_regval_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
+ u32 val32 = val;
- return mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, (u32 *)&val, true);
+ return mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &val32, true);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7915_rf_regval_get,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index f3d608d2d3b2..00aafc2422f3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -9,29 +9,14 @@ static int
mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
struct mt7915_dev *dev = phy->dev;
- int i, err;
if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
ring_base = MT_WED_TX_RING_BASE;
idx -= MT_TXQ_ID(0);
}
- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base,
- MT_WED_Q_TX(idx));
- if (err < 0)
- return err;
-
- for (i = 0; i <= MT_TXQ_PSD; i++)
- phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
-
- return 0;
-}
-
-static void
-mt7915_tx_cleanup(struct mt7915_dev *dev)
-{
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
+ return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base,
+ MT_WED_Q_TX(idx));
}
static int mt7915_poll_tx(struct napi_struct *napi, int budget)
@@ -40,8 +25,7 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
- mt7915_tx_cleanup(dev);
-
+ mt76_connac_tx_cleanup(&dev->mt76);
if (napi_complete_done(napi, 0))
mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU);
@@ -65,8 +49,8 @@ static void mt7915_dma_config(struct mt7915_dev *dev)
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7915_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, MT7915_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, MT7915_RXQ_MCU_WA);
- RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
- RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, MT7915_RXQ_MCU_WA);
TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
@@ -77,8 +61,8 @@ static void mt7915_dma_config(struct mt7915_dev *dev)
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
- RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
- RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
@@ -100,24 +84,33 @@ static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4));
mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4));
- mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x140, 0x4));
- mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x180, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs,
+ PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs,
+ PREFETCH(0x180, 0x4));
if (!is_mt7915(&dev->mt76)) {
- mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x1c0, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs,
+ PREFETCH(0x1c0, 0x4));
base = 0x40;
}
- mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x1c0 + base, 0x4));
- mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x200 + base, 0x4));
- mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x240 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs,
+ PREFETCH(0x1c0 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs,
+ PREFETCH(0x200 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs,
+ PREFETCH(0x240 + base, 0x4));
/* for mt7915, the ring which is next the last
* used ring must be initialized.
*/
if (is_mt7915(&dev->mt76)) {
ofs += 0x4;
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x140, 0x0));
- mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x200 + base, 0x0));
- mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x280 + base, 0x0));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs,
+ PREFETCH(0x140, 0x0));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs,
+ PREFETCH(0x200 + base, 0x0));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs,
+ PREFETCH(0x280 + base, 0x0));
}
}
@@ -455,20 +448,20 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (dev->dbdc_support || dev->phy.band_idx) {
/* rx data queue for band1 */
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
- MT_RXQ_ID(MT_RXQ_EXT),
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
+ MT_RXQ_ID(MT_RXQ_BAND1),
MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_EXT) + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs);
if (ret)
return ret;
/* tx free notify event from WA for band1 */
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
- MT_RXQ_ID(MT_RXQ_EXT_WA),
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
+ MT_RXQ_ID(MT_RXQ_BAND1_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_EXT_WA) + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs);
if (ret)
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 01169853355e..cc2aac86bcfb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -365,18 +365,24 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
hw->max_tx_fragments = 4;
- if (phy->mt76->cap.has_2ghz)
+ if (phy->mt76->cap.has_2ghz) {
phy->mt76->sband_2g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
+ phy->mt76->sband_2g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_4;
+ }
if (phy->mt76->cap.has_5ghz) {
phy->mt76->sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
+ phy->mt76->sband_5g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_4;
if (is_mt7915(&dev->mt76)) {
phy->mt76->sband_5g.sband.vht_cap.cap |=
@@ -498,7 +504,7 @@ mt7915_alloc_ext_phy(struct mt7915_dev *dev)
if (!dev->dbdc_support)
return NULL;
- mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops, MT_BAND1);
if (!mphy)
return ERR_PTR(-ENOMEM);
@@ -752,9 +758,10 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
- c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
- IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US;
+ if (!is_mt7915(&dev->mt76))
+ c |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
elem->phy_cap_info[2] |= c;
c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
@@ -978,7 +985,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
- cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_8,
+ cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_2,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
@@ -1031,7 +1038,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
{
struct mt7915_phy *phy = mt7915_ext_phy(dev);
- struct mt76_phy *mphy = dev->mt76.phy2;
+ struct mt76_phy *mphy = dev->mt76.phys[MT_BAND1];
if (!phy)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 086244d9be76..60ae834d95a6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -10,10 +10,6 @@
#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
-#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
-#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
- IEEE80211_RADIOTAP_HE_##f)
-
static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
.radar_pattern = {
@@ -180,7 +176,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
/*
* We don't support reading GI info from txs packets.
* For accurate tx status reporting and AQL improvement,
- * we need to make sure that flags match so polling GI
+ we need to make sure that flags match so polling GI
* from per-sta counters directly.
*/
rate = &msta->wcid.rate;
@@ -218,360 +214,6 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
rcu_read_unlock();
}
-static void
-mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
- struct ieee80211_radiotap_he *he,
- __le32 *rxv)
-{
- u32 ru_h, ru_l;
- u8 ru, offs = 0;
-
- ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
- ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
- ru = (u8)(ru_l | ru_h << 4);
-
- status->bw = RATE_INFO_BW_HE_RU;
-
- switch (ru) {
- case 0 ... 36:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
- offs = ru;
- break;
- case 37 ... 52:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
- offs = ru - 37;
- break;
- case 53 ... 60:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- offs = ru - 53;
- break;
- case 61 ... 64:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- offs = ru - 61;
- break;
- case 65 ... 66:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- offs = ru - 65;
- break;
- case 67:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- case 68:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
- break;
- }
-
- he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
- he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
- le16_encode_bits(offs,
- IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
-}
-
-static void
-mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he_mu mu_known = {
- .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
- HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN),
- .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
- };
- struct ieee80211_radiotap_he_mu *he_mu = NULL;
-
- status->flag |= RX_FLAG_RADIOTAP_HE_MU;
-
- he_mu = skb_push(skb, sizeof(mu_known));
- memcpy(he_mu, &mu_known, sizeof(mu_known));
-
-#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
-
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
- if (status->he_dcm)
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
-
- he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
- MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
- le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
-
- he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
-
- if (status->bw >= RATE_INFO_BW_40) {
- he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
- he_mu->ru_ch2[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
- }
-
- if (status->bw >= RATE_INFO_BW_80) {
- he_mu->ru_ch1[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
- he_mu->ru_ch2[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
- }
-}
-
-static void
-mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he known = {
- .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
- HE_BITS(DATA1_DATA_DCM_KNOWN) |
- HE_BITS(DATA1_STBC_KNOWN) |
- HE_BITS(DATA1_CODING_KNOWN) |
- HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
- HE_BITS(DATA1_DOPPLER_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
- HE_BITS(DATA1_BSS_COLOR_KNOWN),
- .data2 = HE_BITS(DATA2_GI_KNOWN) |
- HE_BITS(DATA2_TXBF_KNOWN) |
- HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
- HE_BITS(DATA2_TXOP_KNOWN),
- };
- struct ieee80211_radiotap_he *he = NULL;
- u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
-
- status->flag |= RX_FLAG_RADIOTAP_HE;
-
- he = skb_push(skb, sizeof(known));
- memcpy(he, &known, sizeof(known));
-
- he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
- HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
- he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
- he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
- le16_encode_bits(ltf_size,
- IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
- if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
- he->data5 |= HE_BITS(DATA5_TXBF);
- he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
- HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
-
- switch (mode) {
- case MT_PHY_TYPE_HE_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
- HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
- HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- break;
- case MT_PHY_TYPE_HE_EXT_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- break;
- case MT_PHY_TYPE_HE_MU:
- he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
-
- mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
- mt7915_mac_decode_he_mu_radiotap(skb, rxv);
- break;
- case MT_PHY_TYPE_HE_TB:
- he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
- HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
-
- he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
-
- mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
- break;
- default:
- break;
- }
-}
-
-/* The HW does not translate the mac header to 802.3 for mesh point */
-static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
- struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid;
- __le32 *rxd = (__le32 *)skb->data;
- struct ieee80211_sta *sta;
- struct ieee80211_vif *vif;
- struct ieee80211_hdr hdr;
- u16 frame_control;
-
- if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
- MT_RXD3_NORMAL_U2M)
- return -EINVAL;
-
- if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
- return -EINVAL;
-
- if (!msta || !msta->vif)
- return -EINVAL;
-
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
- vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
-
- /* store the info from RXD and ethhdr to avoid being overridden */
- frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
- hdr.frame_control = cpu_to_le16(frame_control);
- hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
- hdr.duration_id = 0;
-
- ether_addr_copy(hdr.addr1, vif->addr);
- ether_addr_copy(hdr.addr2, sta->addr);
- switch (frame_control & (IEEE80211_FCTL_TODS |
- IEEE80211_FCTL_FROMDS)) {
- case 0:
- ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
- break;
- case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr->h_source);
- break;
- case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
- break;
- case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr->h_source);
- break;
- default:
- break;
- }
-
- skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
- eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
- ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
- ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
- else
- skb_pull(skb, 2);
-
- if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
- IEEE80211_HT_CTL_LEN);
- if (ieee80211_is_data_qos(hdr.frame_control)) {
- __le16 qos_ctrl;
-
- qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
- memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
- IEEE80211_QOS_CTL_LEN);
- }
-
- if (ieee80211_has_a4(hdr.frame_control))
- memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
- else
- memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
-
- return 0;
-}
-
-static int
-mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
- struct mt76_rx_status *status,
- struct ieee80211_supported_band *sband,
- __le32 *rxv, u8 *mode)
-{
- u32 v0, v2;
- u8 stbc, gi, bw, dcm, nss;
- int i, idx;
- bool cck = false;
-
- v0 = le32_to_cpu(rxv[0]);
- v2 = le32_to_cpu(rxv[2]);
-
- idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
- nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
-
- if (!is_mt7915(&dev->mt76)) {
- stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
- gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v0);
- *mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
- dcm = FIELD_GET(MT_PRXV_DCM, v0);
- bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0);
- } else {
- stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
- gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
- *mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
- dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM);
- bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2);
- }
-
- switch (*mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- i = mt76_get_rate(&dev->mt76, sband, i, cck);
- break;
- case MT_PHY_TYPE_HT_GF:
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- if (gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (i > 31)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_VHT:
- status->nss = nss;
- status->encoding = RX_ENC_VHT;
- if (gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (i > 11)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_HE_MU:
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- status->nss = nss;
- status->encoding = RX_ENC_HE;
- i &= GENMASK(3, 0);
-
- if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
- status->he_gi = gi;
-
- status->he_dcm = dcm;
- break;
- default:
- return -EINVAL;
- }
- status->rate_idx = i;
-
- switch (bw) {
- case IEEE80211_STA_RX_BW_20:
- break;
- case IEEE80211_STA_RX_BW_40:
- if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
- (idx & MT_PRXV_TX_ER_SU_106T)) {
- status->bw = RATE_INFO_BW_HE_RU;
- status->he_ru =
- NL80211_RATE_INFO_HE_RU_ALLOC_106;
- } else {
- status->bw = RATE_INFO_BW_40;
- }
- break;
- case IEEE80211_STA_RX_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_160:
- status->bw = RATE_INFO_BW_160;
- break;
- default:
- return -EINVAL;
- }
-
- status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- if (*mode < MT_PHY_TYPE_HE_SU && gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
- return 0;
-}
-
static int
mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
{
@@ -590,6 +232,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
bool unicast, insert_ccmp_hdr = false;
u8 remove_pad, amsdu_info;
u8 mode = 0, qos_ctl = 0;
+ struct mt7915_sta *msta;
bool hdr_trans;
u16 hdr_gap;
u16 seq_ctrl = 0;
@@ -599,12 +242,12 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
memset(status, 0, sizeof(*status));
if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) {
- mphy = dev->mt76.phy2;
+ mphy = dev->mt76.phys[MT_BAND1];
if (!mphy)
return -EINVAL;
phy = mphy->priv;
- status->ext_phy = true;
+ status->phy_idx = 1;
}
if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
@@ -626,8 +269,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
- struct mt7915_sta *msta;
-
msta = container_of(status->wcid, struct mt7915_sta, wcid);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
@@ -765,8 +406,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
}
if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
- ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv,
- &mode);
+ ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status,
+ sband, rxv, &mode);
if (ret < 0)
return ret;
}
@@ -781,8 +422,18 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
if (hdr_trans && ieee80211_has_morefrags(fc)) {
- if (mt7915_reverse_frag0_hdr_trans(skb, hdr_gap))
+ struct ieee80211_vif *vif;
+ int err;
+
+ if (!msta || !msta->vif)
return -EINVAL;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
+ drv_priv);
+ err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
+ if (err)
+ return err;
+
hdr_trans = false;
} else {
int pad_start = 0;
@@ -832,7 +483,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
}
if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
- mt7915_mac_decode_he_radiotap(skb, rxv, mode);
+ mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
@@ -1009,266 +660,19 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
#endif
}
-static void
-mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid)
-{
-
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- u8 fc_type, fc_stype;
- u16 ethertype;
- bool wmm = false;
- u32 val;
-
- if (wcid->sta) {
- struct ieee80211_sta *sta;
-
- sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
- wmm = sta->wme;
- }
-
- val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
- FIELD_PREP(MT_TXD1_TID, tid);
-
- ethertype = get_unaligned_be16(&skb->data[12]);
- if (ethertype >= ETH_P_802_3_MIN)
- val |= MT_TXD1_ETH_802_3;
-
- txwi[1] |= cpu_to_le32(val);
-
- fc_type = IEEE80211_FTYPE_DATA >> 2;
- fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
-
- val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
- FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
-
- txwi[2] |= cpu_to_le32(val);
-
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
-}
-
-static void
-mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct ieee80211_key_conf *key,
- bool *mcast)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- __le16 fc = hdr->frame_control;
- u8 fc_type, fc_stype;
- u32 val;
-
- *mcast = is_multicast_ether_addr(hdr->addr1);
-
- if (ieee80211_is_action(fc) &&
- mgmt->u.action.category == WLAN_CATEGORY_BACK &&
- mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
- u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
-
- txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
- tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
- } else if (ieee80211_is_back_req(hdr->frame_control)) {
- struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
- u16 control = le16_to_cpu(bar->control);
-
- tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
- }
-
- val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
- FIELD_PREP(MT_TXD1_HDR_INFO,
- ieee80211_get_hdrlen_from_skb(skb) / 2) |
- FIELD_PREP(MT_TXD1_TID, tid);
- txwi[1] |= cpu_to_le32(val);
-
- fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
- fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
-
- val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
- FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
- FIELD_PREP(MT_TXD2_MULTICAST, *mcast);
-
- if (key && *mcast && ieee80211_is_robust_mgmt_frame(skb) &&
- key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
- val |= MT_TXD2_BIP;
- txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
- }
-
- if (!ieee80211_is_data(fc) || *mcast ||
- info->flags & IEEE80211_TX_CTL_USE_MINRATE)
- val |= MT_TXD2_FIX_RATE;
-
- txwi[2] |= cpu_to_le32(val);
-
- if (ieee80211_is_beacon(fc)) {
- txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
- txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 0x18));
- }
-
- if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
-
- if (ieee80211_is_back_req(hdr->frame_control)) {
- struct ieee80211_bar *bar;
-
- bar = (struct ieee80211_bar *)skb->data;
- seqno = le16_to_cpu(bar->start_seq_num);
- }
-
- val = MT_TXD3_SN_VALID |
- FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
- txwi[3] |= cpu_to_le32(val);
- txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
- }
-
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
-}
-
-static u16
-mt7915_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- bool beacon, bool mcast)
-{
- u8 mode = 0, band = mphy->chandef.chan->band;
- int rateidx = 0, mcast_rate;
-
- if (beacon) {
- struct cfg80211_bitrate_mask *mask;
-
- mask = &vif->bss_conf.beacon_tx_rate;
- if (hweight16(mask->control[band].he_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
- mode = MT_PHY_TYPE_HE_SU;
- goto out;
- } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
- mode = MT_PHY_TYPE_VHT;
- goto out;
- } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
- mode = MT_PHY_TYPE_HT;
- goto out;
- } else if (hweight32(mask->control[band].legacy) == 1) {
- rateidx = ffs(mask->control[band].legacy) - 1;
- goto legacy;
- }
- }
-
- mcast_rate = vif->bss_conf.mcast_rate[band];
- if (mcast && mcast_rate > 0)
- rateidx = mcast_rate - 1;
- else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
-
-legacy:
- rateidx = mt76_calculate_default_rate(mphy, rateidx);
- mode = rateidx >> 8;
- rateidx &= GENMASK(7, 0);
-
-out:
- return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
- FIELD_PREP(MT_TX_RATE_MODE, mode);
-}
-
-void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
+void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
- struct ieee80211_key_conf *key, u32 changed)
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, u32 changed)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_phy *mphy = &dev->mphy;
- bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
- u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
- bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- bool mcast = false;
- u16 tx_count = 15;
- u32 val;
- bool beacon = !!(changed & (BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED));
- bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
- BSS_CHANGED_FILS_DISCOVERY));
-
- if (vif) {
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+ struct mt76_phy *mphy = &dev->phy;
- omac_idx = mvif->mt76.omac_idx;
- wmm_idx = mvif->mt76.wmm_idx;
- band_idx = mvif->mt76.band_idx;
- }
+ if (phy_idx && dev->phys[MT_BAND1])
+ mphy = dev->phys[MT_BAND1];
- if (ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
-
- if (inband_disc) {
- p_fmt = MT_TX_TYPE_FW;
- q_idx = MT_LMAC_ALTX0;
- } else if (beacon) {
- p_fmt = MT_TX_TYPE_FW;
- q_idx = MT_LMAC_BCN0;
- } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
- p_fmt = MT_TX_TYPE_CT;
- q_idx = MT_LMAC_ALTX0;
- } else {
- p_fmt = MT_TX_TYPE_CT;
- q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
- mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
- }
-
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
- FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
- FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
- txwi[0] = cpu_to_le32(val);
-
- val = MT_TXD1_LONG_FORMAT | MT_TXD1_VTA |
- FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
- FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
-
- if (ext_phy || band_idx)
- val |= MT_TXD1_TGID;
-
- txwi[1] = cpu_to_le32(val);
-
- txwi[2] = 0;
-
- val = MT_TXD3_SW_POWER_MGMT |
- FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
- if (key)
- val |= MT_TXD3_PROTECT_FRAME;
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
- val |= MT_TXD3_NO_ACK;
-
- txwi[3] = cpu_to_le32(val);
- txwi[4] = 0;
-
- val = FIELD_PREP(MT_TXD5_PID, pid);
- if (pid >= MT_PACKET_ID_FIRST)
- val |= MT_TXD5_TX_STATUS_HOST;
- txwi[5] = cpu_to_le32(val);
-
- txwi[6] = 0;
- txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
-
- if (is_8023)
- mt7915_mac_write_txwi_8023(dev, txwi, skb, wcid);
- else
- mt7915_mac_write_txwi_80211(dev, txwi, skb, key, &mcast);
-
- if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
- u16 rate = mt7915_mac_tx_rate_val(mphy, vif, beacon, mcast);
-
- /* hardware won't add HTC for mgmt/ctrl frame */
- txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
-
- val = MT_TXD6_FIXED_BW |
- FIELD_PREP(MT_TXD6_TX_RATE, rate);
- txwi[6] |= cpu_to_le32(val);
- txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
- }
+ mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed);
if (mt76_testmode_enabled(mphy))
mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
@@ -1284,8 +688,8 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_connac_fw_txp *txp;
struct mt76_txwi_cache *t;
- struct mt7915_txp *txp;
int id, i, nbuf = tx_info->nbuf - 1;
u8 *txwi = (u8 *)txwi_ptr;
int pid;
@@ -1315,9 +719,10 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return id;
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, 0);
+ mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key,
+ qid, 0);
- txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
+ txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
@@ -1356,7 +761,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
{
- struct mt7915_txp *txp = ptr + MT_TXD_SIZE;
+ struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
__le32 *txwi = ptr;
u32 val;
@@ -1403,18 +808,6 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
}
static void
-mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- struct mt7915_txp *txp;
- int i;
-
- txp = mt7915_txwi_to_txp(dev, t);
- for (i = 0; i < txp->nbuf; i++)
- dma_unmap_single(dev->dma_dev, le32_to_cpu(txp->buf[i]),
- le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
-}
-
-static void
mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, struct list_head *free_list)
{
@@ -1424,7 +817,7 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
__le32 *txwi;
u16 wcid_idx;
- mt7915_txp_skb_unmap(mdev, t);
+ mt76_connac_txp_skb_unmap(mdev, t);
if (!t->skb)
goto out;
@@ -1461,7 +854,7 @@ static void
mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
- struct mt76_phy *mphy_ext = mdev->phy2;
+ struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1494,7 +887,8 @@ mt7915_mac_tx_free_done(struct mt7915_dev *dev,
static void
mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
{
- struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
+ struct mt76_connac_tx_free *free = data;
+ __le32 *tx_info = (__le32 *)(data + sizeof(*free));
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
@@ -1509,10 +903,10 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
- if (WARN_ON_ONCE((void *)&free->info[total >> v3] > end))
+ if (WARN_ON_ONCE((void *)&tx_info[total >> v3] > end))
return;
- for (cur_info = &free->info[0]; count < total; cur_info++) {
+ for (cur_info = tx_info; count < total; cur_info++) {
u32 msdu, info = le32_to_cpu(*cur_info);
u8 i;
@@ -1565,9 +959,9 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
static void
mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
{
- struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
+ struct mt76_connac_tx_free *free = data;
+ __le16 *info = (__le16 *)(data + sizeof(*free));
struct mt76_dev *mdev = &dev->mt76;
- __le16 *info = (__le16 *)free->info;
void *end = data + len;
LIST_HEAD(free_list);
bool wake = false;
@@ -1593,128 +987,6 @@ mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
mt7915_mac_tx_free_done(dev, &free_list, wake);
}
-static bool
-mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
- __le32 *txs_data, struct mt76_sta_stats *stats)
-{
- struct ieee80211_supported_band *sband;
- struct mt76_dev *mdev = &dev->mt76;
- struct mt76_phy *mphy;
- struct ieee80211_tx_info *info;
- struct sk_buff_head list;
- struct rate_info rate = {};
- struct sk_buff *skb;
- bool cck = false;
- u32 txrate, txs, mode;
-
- mt76_tx_status_lock(mdev, &list);
- skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
- if (!skb)
- goto out_no_skb;
-
- txs = le32_to_cpu(txs_data[0]);
-
- info = IEEE80211_SKB_CB(skb);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
-
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len = !!(info->flags &
- IEEE80211_TX_STAT_ACK);
-
- info->status.rates[0].idx = -1;
-
- txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
-
- rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
- rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
-
- if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
- stats->tx_nss[rate.nss - 1]++;
- if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
- stats->tx_mcs[rate.mcs]++;
-
- mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- mphy = &dev->mphy;
- if (wcid->ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
-
- if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &mphy->sband_5g.sband;
- else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
- sband = &mphy->sband_6g.sband;
- else
- sband = &mphy->sband_2g.sband;
-
- rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
- rate.legacy = sband->bitrates[rate.mcs].bitrate;
- break;
- case MT_PHY_TYPE_HT:
- case MT_PHY_TYPE_HT_GF:
- if (rate.mcs > 31)
- goto out;
-
- rate.flags = RATE_INFO_FLAGS_MCS;
- if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
- rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case MT_PHY_TYPE_VHT:
- if (rate.mcs > 9)
- goto out;
-
- rate.flags = RATE_INFO_FLAGS_VHT_MCS;
- break;
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- case MT_PHY_TYPE_HE_MU:
- if (rate.mcs > 11)
- goto out;
-
- rate.he_gi = wcid->rate.he_gi;
- rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
- rate.flags = RATE_INFO_FLAGS_HE_MCS;
- break;
- default:
- goto out;
- }
-
- stats->tx_mode[mode]++;
-
- switch (FIELD_GET(MT_TXS0_BW, txs)) {
- case IEEE80211_STA_RX_BW_160:
- rate.bw = RATE_INFO_BW_160;
- stats->tx_bw[3]++;
- break;
- case IEEE80211_STA_RX_BW_80:
- rate.bw = RATE_INFO_BW_80;
- stats->tx_bw[2]++;
- break;
- case IEEE80211_STA_RX_BW_40:
- rate.bw = RATE_INFO_BW_40;
- stats->tx_bw[1]++;
- break;
- default:
- rate.bw = RATE_INFO_BW_20;
- stats->tx_bw[0]++;
- break;
- }
- wcid->rate = rate;
-
-out:
- mt76_tx_status_skb_done(mdev, skb, &list);
-
-out_no_skb:
- mt76_tx_status_unlock(mdev, &list);
-
- return !!skb;
-}
-
static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
{
struct mt7915_sta *msta = NULL;
@@ -1743,8 +1015,8 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
msta = container_of(wcid, struct mt7915_sta, wcid);
- mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats);
-
+ mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data,
+ &msta->stats);
if (!wcid->sta)
goto out;
@@ -1831,27 +1103,6 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
-void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7915_txp *txp;
-
- txp = mt7915_txwi_to_txp(mdev, e->txwi);
- t = mt76_token_put(mdev, le16_to_cpu(txp->token));
- e->skb = t ? t->skb : NULL;
- }
-
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
@@ -2025,22 +1276,24 @@ mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
static void
mt7915_update_beacons(struct mt7915_dev *dev)
{
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+
ieee80211_iterate_active_interfaces(dev->mt76.hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7915_update_vif_beacon, dev->mt76.hw);
- if (!dev->mt76.phy2)
+ if (!mphy_ext)
return;
- ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ ieee80211_iterate_active_interfaces(mphy_ext->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7915_update_vif_beacon, dev->mt76.phy2->hw);
+ mt7915_update_vif_beacon, mphy_ext->hw);
}
static void
mt7915_dma_reset(struct mt7915_dev *dev)
{
- struct mt76_phy *mphy_ext = dev->mt76.phy2;
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
int i;
@@ -2124,9 +1377,10 @@ void mt7915_mac_reset_work(struct work_struct *work)
struct mt7915_phy *phy2;
struct mt76_phy *ext_phy;
struct mt7915_dev *dev;
+ int i;
dev = container_of(work, struct mt7915_dev, reset_work);
- ext_phy = dev->mt76.phy2;
+ ext_phy = dev->mt76.phys[MT_BAND1];
phy2 = ext_phy ? ext_phy->priv : NULL;
if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
@@ -2145,9 +1399,8 @@ void mt7915_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&phy2->mt76->mac_work);
}
mt76_worker_disable(&dev->mt76.tx_worker);
- napi_disable(&dev->mt76.napi[0]);
- napi_disable(&dev->mt76.napi[1]);
- napi_disable(&dev->mt76.napi[2]);
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_disable(&dev->mt76.napi[i]);
napi_disable(&dev->mt76.tx_napi);
mutex_lock(&dev->mt76.mutex);
@@ -2170,14 +1423,10 @@ void mt7915_mac_reset_work(struct work_struct *work)
clear_bit(MT76_RESET, &phy2->mt76->state);
local_bh_disable();
- napi_enable(&dev->mt76.napi[0]);
- napi_schedule(&dev->mt76.napi[0]);
-
- napi_enable(&dev->mt76.napi[1]);
- napi_schedule(&dev->mt76.napi[1]);
-
- napi_enable(&dev->mt76.napi[2]);
- napi_schedule(&dev->mt76.napi[2]);
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
local_bh_enable();
tasklet_schedule(&dev->irq_tasklet);
@@ -2187,8 +1436,10 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt76_worker_enable(&dev->mt76.tx_worker);
+ local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
+ local_bh_enable();
ieee80211_wake_queues(mt76_hw(dev));
if (ext_phy)
@@ -2214,7 +1465,8 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
u32 val;
cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
- mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ mib->fcs_err_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx));
@@ -2227,19 +1479,28 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx));
- mib->rx_vector_mismatch_cnt += FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
+ mib->rx_vector_mismatch_cnt +=
+ FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx));
- mib->rx_delimiter_fail_cnt += FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
+ mib->rx_delimiter_fail_cnt +=
+ FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR10(phy->band_idx));
+ mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx));
- mib->rx_len_mismatch_cnt += FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
+ mib->rx_len_mismatch_cnt +=
+ FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx));
mib->tx_ampdu_cnt += cnt;
cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx));
- mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
+ mib->tx_stop_q_empty_cnt +=
+ FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx));
mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
@@ -2251,6 +1512,29 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR16(phy->band_idx));
+ mib->primary_cca_busy_time +=
+ FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR17(phy->band_idx));
+ mib->secondary_cca_busy_time +=
+ FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR18(phy->band_idx));
+ mib->primary_energy_detect_time +=
+ FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR19(phy->band_idx));
+ mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR20(phy->band_idx));
+ mib->ofdm_mdrdy_time +=
+ FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR21(phy->band_idx));
+ mib->green_mdrdy_time +=
+ FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt);
+
cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx));
mib->rx_ampdu_cnt += cnt;
@@ -2266,10 +1550,12 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx));
- mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
+ mib->tx_rwp_fail_cnt +=
+ FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx));
- mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
+ mib->tx_rwp_need_cnt +=
+ FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx));
mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
@@ -2311,7 +1597,8 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
if (is_mt7915(&dev->mt76)) {
for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4)));
- mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ba_miss_cnt +=
+ FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
mib->ack_fail_cnt +=
FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
index c5fd1a618ae7..6fa9c79f3e5f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -4,6 +4,8 @@
#ifndef __MT7915_MAC_H
#define __MT7915_MAC_H
+#include "../mt76_connac2_mac.h"
+
#define MT_CT_PARSE_LEN 72
#define MT_CT_DMA_BUF_NUM 2
@@ -27,289 +29,6 @@ enum rx_pkt_type {
PKT_TYPE_TXRX_NOTIFY_V0 = 0x18,
};
-/* RXD DW1 */
-#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
-#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
-#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
-#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
-#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
-#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
-#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
-#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
-#define MT_RXD1_NORMAL_CM BIT(23)
-#define MT_RXD1_NORMAL_CLM BIT(24)
-#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
-#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
-#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
-#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
-#define MT_RXD1_NORMAL_SPP_EN BIT(29)
-#define MT_RXD1_NORMAL_ADD_OM BIT(30)
-#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
-
-/* RXD DW2 */
-#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
-#define MT_RXD2_NORMAL_CO_ANT BIT(6)
-#define MT_RXD2_NORMAL_BF_CQI BIT(7)
-#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
-#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
-#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
-#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
-#define MT_RXD2_NORMAL_MU_BAR BIT(21)
-#define MT_RXD2_NORMAL_SW_BIT BIT(22)
-#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
-#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
-#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
-#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
-#define MT_RXD2_NORMAL_FRAG BIT(27)
-#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
-#define MT_RXD2_NORMAL_NDATA BIT(29)
-#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
-#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
-
-/* RXD DW3 */
-#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
-#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
-#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
-#define MT_RXD3_NORMAL_U2M BIT(0)
-#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
-#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
-#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
-#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
-#define MT_RXD3_NORMAL_AMSDU BIT(22)
-#define MT_RXD3_NORMAL_MESH BIT(23)
-#define MT_RXD3_NORMAL_MHCP BIT(24)
-#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
-#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
-#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
-#define MT_RXD3_NORMAL_MORE BIT(28)
-#define MT_RXD3_NORMAL_UNWANT BIT(29)
-#define MT_RXD3_NORMAL_RX_DROP BIT(30)
-#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
-
-/* RXD DW4 */
-#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
-#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
-#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
-#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
-
-#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
-#define MT_RXD4_NORMAL_CLS BIT(10)
-#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
-#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
-#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
-#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
-#define MT_RXD3_NORMAL_PF_MODE BIT(29)
-#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
-
-#define MT_RXV_HDR_BAND_IDX BIT(24)
-
-/* RXD GROUP4 */
-#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0)
-#define MT_RXD6_TA_LO GENMASK(31, 16)
-
-#define MT_RXD7_TA_HI GENMASK(31, 0)
-
-#define MT_RXD8_SEQ_CTRL GENMASK(15, 0)
-#define MT_RXD8_QOS_CTL GENMASK(31, 16)
-
-#define MT_RXD9_HT_CONTROL GENMASK(31, 0)
-
-/* P-RXV */
-#define MT_PRXV_TX_RATE GENMASK(6, 0)
-#define MT_PRXV_TX_DCM BIT(4)
-#define MT_PRXV_TX_ER_SU_106T BIT(5)
-#define MT_PRXV_NSTS GENMASK(9, 7)
-#define MT_PRXV_TXBF BIT(10)
-#define MT_PRXV_HT_AD_CODE BIT(11)
-#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
-#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
-#define MT_PRXV_RCPI3 GENMASK(31, 24)
-#define MT_PRXV_RCPI2 GENMASK(23, 16)
-#define MT_PRXV_RCPI1 GENMASK(15, 8)
-#define MT_PRXV_RCPI0 GENMASK(7, 0)
-#define MT_PRXV_HT_SHORT_GI GENMASK(16, 15)
-#define MT_PRXV_HT_STBC GENMASK(23, 22)
-#define MT_PRXV_TX_MODE GENMASK(27, 24)
-#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
-#define MT_PRXV_DCM BIT(17)
-#define MT_PRXV_NUM_RX BIT(20, 18)
-
-/* C-RXV */
-#define MT_CRXV_HT_STBC GENMASK(1, 0)
-#define MT_CRXV_TX_MODE GENMASK(7, 4)
-#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
-#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
-#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
-#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
-#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
-#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
-#define MT_CRXV_HE_UPLINK BIT(31)
-#define MT_CRXV_HE_RU0 GENMASK(7, 0)
-#define MT_CRXV_HE_RU1 GENMASK(15, 8)
-#define MT_CRXV_HE_RU2 GENMASK(23, 16)
-#define MT_CRXV_HE_RU3 GENMASK(31, 24)
-
-#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
-
-#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
-#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
-#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
-#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
-
-#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
-#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
-#define MT_CRXV_HE_BEAM_CHNG BIT(13)
-#define MT_CRXV_HE_DOPPLER BIT(16)
-
-#define MT_CRXV_SNR GENMASK(18, 13)
-#define MT_CRXV_FOE_LO GENMASK(31, 19)
-#define MT_CRXV_FOE_HI GENMASK(6, 0)
-#define MT_CRXV_FOE_SHIFT 13
-
-enum tx_header_format {
- MT_HDR_FORMAT_802_3,
- MT_HDR_FORMAT_CMD,
- MT_HDR_FORMAT_802_11,
- MT_HDR_FORMAT_802_11_EXT,
-};
-
-enum tx_pkt_type {
- MT_TX_TYPE_CT,
- MT_TX_TYPE_SF,
- MT_TX_TYPE_CMD,
- MT_TX_TYPE_FW,
-};
-
-enum tx_port_idx {
- MT_TX_PORT_IDX_LMAC,
- MT_TX_PORT_IDX_MCU
-};
-
-enum tx_mcu_port_q_idx {
- MT_TX_MCU_PORT_RX_Q0 = 0x20,
- MT_TX_MCU_PORT_RX_Q1,
- MT_TX_MCU_PORT_RX_Q2,
- MT_TX_MCU_PORT_RX_Q3,
- MT_TX_MCU_PORT_RX_FWDL = 0x3e
-};
-
-#define MT_CT_INFO_APPLY_TXD BIT(0)
-#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
-#define MT_CT_INFO_MGMT_FRAME BIT(2)
-#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
-#define MT_CT_INFO_HSR2_TX BIT(4)
-#define MT_CT_INFO_FROM_HOST BIT(7)
-
-#define MT_TXD_SIZE (8 * 4)
-
-#define MT_TXD0_Q_IDX GENMASK(31, 25)
-#define MT_TXD0_PKT_FMT GENMASK(24, 23)
-#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
-#define MT_TXD0_TX_BYTES GENMASK(15, 0)
-
-#define MT_TXD1_LONG_FORMAT BIT(31)
-#define MT_TXD1_TGID BIT(30)
-#define MT_TXD1_OWN_MAC GENMASK(29, 24)
-#define MT_TXD1_AMSDU BIT(23)
-#define MT_TXD1_TID GENMASK(22, 20)
-#define MT_TXD1_HDR_PAD GENMASK(19, 18)
-#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
-#define MT_TXD1_HDR_INFO GENMASK(15, 11)
-#define MT_TXD1_ETH_802_3 BIT(15)
-#define MT_TXD1_VTA BIT(10)
-#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
-
-#define MT_TXD2_FIX_RATE BIT(31)
-#define MT_TXD2_FIXED_RATE BIT(30)
-#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
-#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
-#define MT_TXD2_FRAG GENMASK(15, 14)
-#define MT_TXD2_HTC_VLD BIT(13)
-#define MT_TXD2_DURATION BIT(12)
-#define MT_TXD2_BIP BIT(11)
-#define MT_TXD2_MULTICAST BIT(10)
-#define MT_TXD2_RTS BIT(9)
-#define MT_TXD2_SOUNDING BIT(8)
-#define MT_TXD2_NDPA BIT(7)
-#define MT_TXD2_NDP BIT(6)
-#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
-#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
-
-#define MT_TXD3_SN_VALID BIT(31)
-#define MT_TXD3_PN_VALID BIT(30)
-#define MT_TXD3_SW_POWER_MGMT BIT(29)
-#define MT_TXD3_BA_DISABLE BIT(28)
-#define MT_TXD3_SEQ GENMASK(27, 16)
-#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
-#define MT_TXD3_TX_COUNT GENMASK(10, 6)
-#define MT_TXD3_TIMING_MEASURE BIT(5)
-#define MT_TXD3_DAS BIT(4)
-#define MT_TXD3_EEOSP BIT(3)
-#define MT_TXD3_EMRD BIT(2)
-#define MT_TXD3_PROTECT_FRAME BIT(1)
-#define MT_TXD3_NO_ACK BIT(0)
-
-#define MT_TXD4_PN_LOW GENMASK(31, 0)
-
-#define MT_TXD5_PN_HIGH GENMASK(31, 16)
-#define MT_TXD5_MD BIT(15)
-#define MT_TXD5_ADD_BA BIT(14)
-#define MT_TXD5_TX_STATUS_HOST BIT(10)
-#define MT_TXD5_TX_STATUS_MCU BIT(9)
-#define MT_TXD5_TX_STATUS_FMT BIT(8)
-#define MT_TXD5_PID GENMASK(7, 0)
-
-#define MT_TXD6_TX_IBF BIT(31)
-#define MT_TXD6_TX_EBF BIT(30)
-#define MT_TXD6_TX_RATE GENMASK(29, 16)
-#define MT_TXD6_SGI GENMASK(15, 14)
-#define MT_TXD6_HELTF GENMASK(13, 12)
-#define MT_TXD6_LDPC BIT(11)
-#define MT_TXD6_SPE_ID_IDX BIT(10)
-#define MT_TXD6_ANT_ID GENMASK(7, 4)
-#define MT_TXD6_DYN_BW BIT(3)
-#define MT_TXD6_FIXED_BW BIT(2)
-#define MT_TXD6_BW GENMASK(1, 0)
-
-#define MT_TXD7_TXD_LEN GENMASK(31, 30)
-#define MT_TXD7_UDP_TCP_SUM BIT(29)
-#define MT_TXD7_IP_SUM BIT(28)
-
-#define MT_TXD7_TYPE GENMASK(21, 20)
-#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
-
-#define MT_TXD7_PSE_FID GENMASK(27, 16)
-#define MT_TXD7_SPE_IDX GENMASK(15, 11)
-#define MT_TXD7_HW_AMSDU BIT(10)
-#define MT_TXD7_TX_TIME GENMASK(9, 0)
-
-#define MT_TX_RATE_STBC BIT(13)
-#define MT_TX_RATE_NSS GENMASK(12, 10)
-#define MT_TX_RATE_MODE GENMASK(9, 6)
-#define MT_TX_RATE_SU_EXT_TONE BIT(5)
-#define MT_TX_RATE_DCM BIT(4)
-/* VHT/HE only use bits 0-3 */
-#define MT_TX_RATE_IDX GENMASK(5, 0)
-
-#define MT_TXP_MAX_BUF_NUM 6
-
-struct mt7915_txp {
- __le16 flags;
- __le16 token;
- u8 bss_idx;
- __le16 rept_wds_wcid;
- u8 nbuf;
- __le32 buf[MT_TXP_MAX_BUF_NUM];
- __le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed __aligned(4);
-
-struct mt7915_tx_free {
- __le16 rx_byte_cnt;
- __le16 ctrl;
- __le32 txd;
- __le32 info[];
-} __packed __aligned(4);
-
#define MT_TX_FREE_VER GENMASK(18, 16)
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_MSDU_CNT_V0 GENMASK(6, 0)
@@ -324,41 +43,6 @@ struct mt7915_tx_free {
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)
-#define MT_TXS0_FIXED_RATE BIT(31)
-#define MT_TXS0_BW GENMASK(30, 29)
-#define MT_TXS0_TID GENMASK(28, 26)
-#define MT_TXS0_AMPDU BIT(25)
-#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
-#define MT_TXS0_BA_ERROR BIT(22)
-#define MT_TXS0_PS_FLAG BIT(21)
-#define MT_TXS0_TXOP_TIMEOUT BIT(20)
-#define MT_TXS0_BIP_ERROR BIT(19)
-
-#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
-#define MT_TXS0_RTS_TIMEOUT BIT(17)
-#define MT_TXS0_ACK_TIMEOUT BIT(16)
-#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
-
-#define MT_TXS0_TX_STATUS_HOST BIT(15)
-#define MT_TXS0_TX_STATUS_MCU BIT(14)
-#define MT_TXS0_TX_RATE GENMASK(13, 0)
-
-#define MT_TXS1_SEQNO GENMASK(31, 20)
-#define MT_TXS1_RESP_RATE GENMASK(19, 16)
-#define MT_TXS1_RXV_SEQNO GENMASK(15, 8)
-#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0)
-
-#define MT_TXS2_BF_STATUS GENMASK(31, 30)
-#define MT_TXS2_LAST_TX_RATE GENMASK(29, 27)
-#define MT_TXS2_SHARED_ANTENNA BIT(26)
-#define MT_TXS2_WCID GENMASK(25, 16)
-#define MT_TXS2_TX_DELAY GENMASK(15, 0)
-
-#define MT_TXS3_PID GENMASK(31, 24)
-#define MT_TXS3_ANT_ID GENMASK(23, 0)
-
-#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
-
#define MT_TXS5_F0_FINAL_MPDU BIT(31)
#define MT_TXS5_F0_QOS BIT(30)
#define MT_TXS5_F0_TX_COUNT GENMASK(29, 25)
@@ -414,17 +98,4 @@ struct mt7915_dfs_radar_spec {
struct mt7915_dfs_pattern radar_pattern[16];
};
-static inline struct mt7915_txp *
-mt7915_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- u8 *txwi;
-
- if (!t)
- return NULL;
-
- txwi = mt76_get_txwi_ptr(dev, t);
-
- return (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
-}
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 710ca757fb52..bd3386bf0f8a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -227,7 +227,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
INIT_LIST_HEAD(&mvif->sta.rc_list);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = ext_phy;
+ mvif->sta.wcid.phy_idx = ext_phy;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
mt76_packet_id_init(&mvif->sta.wcid);
@@ -235,7 +235,6 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = idx;
@@ -251,6 +250,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mt7915_mcu_add_bss_info(phy, vif, true);
mt7915_mcu_add_sta(dev, vif, NULL, true);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -478,7 +478,8 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
}
static int
-mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
@@ -573,7 +574,7 @@ mt7915_update_bss_color(struct ieee80211_hw *hw,
static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = mt7915_hw_dev(hw);
@@ -593,7 +594,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- mt7915_mcu_add_bss_info(phy, vif, info->assoc);
+ mt7915_mcu_add_bss_info(phy, vif, vif->cfg.assoc);
mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
}
@@ -660,7 +661,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = ext_phy;
+ msta->wcid.phy_idx = ext_phy;
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
@@ -1146,8 +1147,15 @@ static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_fifo_full_cnt",
"rx_mpdu_cnt",
"channel_idle_cnt",
+ "primary_cca_busy_time",
+ "secondary_cca_busy_time",
+ "primary_energy_detect_time",
+ "cck_mdrdy_time",
+ "ofdm_mdrdy_time",
+ "green_mdrdy_time",
"rx_vector_mismatch_cnt",
"rx_delimiter_fail_cnt",
+ "rx_mrdy_cnt",
"rx_len_mismatch_cnt",
"rx_ampdu_cnt",
"rx_ampdu_bytes_cnt",
@@ -1287,8 +1295,15 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
data[ei++] = mib->rx_fifo_full_cnt;
data[ei++] = mib->rx_mpdu_cnt;
data[ei++] = mib->channel_idle_cnt;
+ data[ei++] = mib->primary_cca_busy_time;
+ data[ei++] = mib->secondary_cca_busy_time;
+ data[ei++] = mib->primary_energy_detect_time;
+ data[ei++] = mib->cck_mdrdy_time;
+ data[ei++] = mib->ofdm_mdrdy_time;
+ data[ei++] = mib->green_mdrdy_time;
data[ei++] = mib->rx_vector_mismatch_cnt;
data[ei++] = mib->rx_delimiter_fail_cnt;
+ data[ei++] = mib->rx_mrdy_cnt;
data[ei++] = mib->rx_len_mismatch_cnt;
data[ei++] = mib->rx_ampdu_cnt;
data[ei++] = mib->rx_ampdu_bytes_cnt;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index b7e2b365356c..f83067961945 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -1,69 +1,12 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
-#include <linux/firmware.h>
#include <linux/fs.h>
#include "mt7915.h"
#include "mcu.h"
#include "mac.h"
#include "eeprom.h"
-struct mt7915_patch_hdr {
- char build_date[16];
- char platform[4];
- __be32 hw_sw_ver;
- __be32 patch_ver;
- __be16 checksum;
- u16 reserved;
- struct {
- __be32 patch_ver;
- __be32 subsys;
- __be32 feature;
- __be32 n_region;
- __be32 crc;
- u32 reserved[11];
- } desc;
-} __packed;
-
-struct mt7915_patch_sec {
- __be32 type;
- __be32 offs;
- __be32 size;
- union {
- __be32 spec[13];
- struct {
- __be32 addr;
- __be32 len;
- __be32 sec_key_idx;
- __be32 align_len;
- u32 reserved[9];
- } info;
- };
-} __packed;
-
-struct mt7915_fw_trailer {
- u8 chip_id;
- u8 eco_code;
- u8 n_region;
- u8 format_ver;
- u8 format_flag;
- u8 reserved[2];
- char fw_ver[10];
- char build_date[15];
- u32 crc;
-} __packed;
-
-struct mt7915_fw_region {
- __le32 decomp_crc;
- __le32 decomp_len;
- __le32 decomp_blk_sz;
- u8 reserved[4];
- __le32 addr;
- __le32 len;
- u8 feature_set;
- u8 reserved1[15];
-} __packed;
-
#define fw_name(_dev, name, ...) ({ \
char *_fw; \
switch (mt76_chip(&(_dev)->mt76)) { \
@@ -207,7 +150,7 @@ static int
mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
- struct mt7915_mcu_rxd *rxd;
+ struct mt76_connac2_mcu_rxd *rxd;
int ret = 0;
if (!skb) {
@@ -216,7 +159,7 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return -ETIMEDOUT;
}
- rxd = (struct mt7915_mcu_rxd *)skb->data;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (seq != rxd->seq)
return -EAGAIN;
@@ -227,7 +170,7 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
skb_pull(skb, sizeof(*rxd) + 4);
ret = le32_to_cpu(*(__le32 *)skb->data);
} else {
- skb_pull(skb, sizeof(struct mt7915_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
}
return ret;
@@ -238,69 +181,20 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- struct mt7915_mcu_txd *mcu_txd;
enum mt76_mcuq_id qid;
- __le32 *txd;
- u32 val;
- u8 seq;
-
- /* TODO: make dynamic based on msg type */
- mdev->mcu.timeout = 20 * HZ;
+ int ret;
- seq = ++dev->mt76.mcu.msg_seq & 0xf;
- if (!seq)
- seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, wait_seq);
+ if (ret)
+ return ret;
- if (cmd == MCU_CMD(FW_SCATTER)) {
+ if (cmd == MCU_CMD(FW_SCATTER))
qid = MT_MCUQ_FWDL;
- goto exit;
- }
-
- mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ else if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
qid = MT_MCUQ_WA;
else
qid = MT_MCUQ_WM;
- txd = mcu_txd->txd;
-
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
- FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) |
- FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0);
- txd[0] = cpu_to_le32(val);
-
- val = MT_TXD1_LONG_FORMAT |
- FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
- txd[1] = cpu_to_le32(val);
-
- mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
- mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
- MT_TX_MCU_PORT_RX_Q0));
- mcu_txd->pkt_type = MCU_PKT_ID;
- mcu_txd->seq = seq;
-
- mcu_txd->cid = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
- mcu_txd->set_query = MCU_Q_NA;
- mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
- if (mcu_txd->ext_cid) {
- mcu_txd->ext_cid_ack = 1;
-
- /* do not use Q_SET for efuse */
- if (cmd & __MCU_CMD_FIELD_QUERY)
- mcu_txd->set_query = MCU_Q_QUERY;
- else
- mcu_txd->set_query = MCU_Q_SET;
- }
-
- if (cmd & __MCU_CMD_FIELD_WA)
- mcu_txd->s2d_index = MCU_S2D_H2C;
- else
- mcu_txd->s2d_index = MCU_S2D_H2N;
-
-exit:
- if (wait_seq)
- *wait_seq = seq;
-
return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
}
@@ -322,7 +216,7 @@ int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
static void
mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->csa_active)
+ if (vif->bss_conf.csa_active)
ieee80211_csa_finish(vif);
}
@@ -334,8 +228,8 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
c = (struct mt7915_mcu_csa_notify *)skb->data;
- if ((c->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if ((c->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -353,8 +247,8 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
return;
- if ((t->ctrl.band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if ((t->ctrl.band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
phy = (struct mt7915_phy *)mphy->priv;
phy->throttle_state = t->ctrl.duty.duty_cycle;
@@ -368,8 +262,8 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
r = (struct mt7915_mcu_rdd_report *)skb->data;
- if ((r->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if ((r->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
if (r->band_idx == MT_RX_SEL2)
cfg80211_background_radar_event(mphy->hw->wiphy,
@@ -383,10 +277,12 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
static void
mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
{
- struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
- const char *data = (char *)&rxd[1];
- const char *type;
+ struct mt76_connac2_mcu_rxd *rxd;
int len = skb->len - sizeof(*rxd);
+ const char *data, *type;
+
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+ data = (char *)&rxd[1];
switch (rxd->s2d_index) {
case 0:
@@ -409,7 +305,7 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
static void
mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (!vif->color_change_active)
+ if (!vif->bss_conf.color_change_active)
return;
ieee80211_color_change_finish(vif);
@@ -423,8 +319,8 @@ mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
b = (struct mt7915_mcu_bcc_notify *)skb->data;
- if ((b->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if ((b->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -434,8 +330,9 @@ mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
static void
mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
{
- struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ struct mt76_connac2_mcu_rxd *rxd;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
switch (rxd->ext_eid) {
case MCU_EXT_EVENT_THERMAL_PROTECT:
mt7915_mcu_rx_thermal_notify(dev, skb);
@@ -460,8 +357,9 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
static void
mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb)
{
- struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ struct mt76_connac2_mcu_rxd *rxd;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
switch (rxd->eid) {
case MCU_EVENT_EXT:
mt7915_mcu_rx_ext_event(dev, skb);
@@ -474,8 +372,9 @@ mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb)
void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
{
- struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ struct mt76_connac2_mcu_rxd *rxd;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
@@ -927,8 +826,8 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
}
static void
-mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif)
+mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct ieee80211_vif *vif)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
@@ -946,7 +845,8 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer ||
mvif->cap.vht_mu_ebfer ||
mvif->cap.vht_mu_ebfee;
- muru->cfg.mimo_ul_en = true;
+ if (!is_mt7915(&dev->mt76))
+ muru->cfg.mimo_ul_en = true;
muru->cfg.ofdma_dl_en = true;
if (sta->deflink.vht_cap.vht_supported)
@@ -1720,7 +1620,8 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable, true);
+ mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable,
+ !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
if (!enable)
goto out;
@@ -1748,7 +1649,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
/* starec he */
mt7915_mcu_sta_he_tlv(skb, sta, vif);
/* starec muru */
- mt7915_mcu_sta_muru_tlv(skb, sta, vif);
+ mt7915_mcu_sta_muru_tlv(dev, skb, sta, vif);
/* starec bfee */
mt7915_mcu_sta_bfee_tlv(dev, skb, vif, sta);
}
@@ -1818,7 +1719,7 @@ mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
if (!offs->cntdwn_counter_offs[0])
return;
- sub_tag = vif->csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC;
+ sub_tag = vif->bss_conf.csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC;
tlv = mt7915_mcu_add_nested_subtlv(rskb, sub_tag, sizeof(*info),
&bcn->sub_ntlv, &bcn->len);
info = (struct bss_info_bcn_cntdwn *)tlv;
@@ -1903,15 +1804,15 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
if (offs->cntdwn_counter_offs[0]) {
u16 offset = offs->cntdwn_counter_offs[0];
- if (vif->csa_active)
+ if (vif->bss_conf.csa_active)
cont->csa_ofs = cpu_to_le16(offset - 4);
- if (vif->color_change_active)
+ if (vif->bss_conf.color_change_active)
cont->bcc_ofs = cpu_to_le16(offset - 3);
}
buf = (u8 *)tlv + sizeof(*cont);
- mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
- BSS_CHANGED_BEACON);
+ mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
+ 0, BSS_CHANGED_BEACON);
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
}
@@ -2031,12 +1932,17 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
info->control.vif = vif;
info->band = band;
- if (ext_phy)
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
len = (len & 0x3) ? ((len | 0x3) + 1) : len;
+ if (len > (MT7915_MAX_BSS_OFFLOAD_SIZE - rskb->len)) {
+ dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
len, &bcn->sub_ntlv, &bcn->len);
discov = (struct bss_info_inband_discovery *)tlv;
@@ -2049,8 +1955,8 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
buf = (u8 *)tlv + sizeof(*discov);
- mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
- changed);
+ mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
+ 0, changed);
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
dev_kfree_skb(skb);
@@ -2059,7 +1965,6 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int en, u32 changed)
{
-#define MAX_BEACON_SIZE 512
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
@@ -2068,7 +1973,7 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct sk_buff *skb, *rskb;
struct tlv *tlv;
struct bss_info_bcn *bcn;
- int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
+ int len = MT7915_MAX_BSS_OFFLOAD_SIZE;
bool ext_phy = phy != &dev->phy;
if (vif->bss_conf.nontransmitted)
@@ -2086,20 +1991,18 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!en)
goto out;
- skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!skb)
return -EINVAL;
- if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) {
+ if (skb->len > MT7915_MAX_BEACON_SIZE - MT_TXD_SIZE) {
dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
dev_kfree_skb(skb);
return -EINVAL;
}
- if (ext_phy) {
- info = IEEE80211_SKB_CB(skb);
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
- }
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue = FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
mt7915_mcu_beacon_check_caps(phy, vif, skb);
@@ -2134,203 +2037,6 @@ static int mt7915_driver_own(struct mt7915_dev *dev, u8 band)
return 0;
}
-static int mt7915_load_patch(struct mt7915_dev *dev)
-{
- const struct mt7915_patch_hdr *hdr;
- const struct firmware *fw = NULL;
- int i, ret, sem;
-
- sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 1);
- switch (sem) {
- case PATCH_IS_DL:
- return 0;
- case PATCH_NOT_DL_SEM_SUCCESS:
- break;
- default:
- dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
- return -EAGAIN;
- }
-
- ret = request_firmware(&fw, fw_name_var(dev, ROM_PATCH),
- dev->mt76.dev);
- if (ret)
- goto out;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
- goto out;
- }
-
- hdr = (const struct mt7915_patch_hdr *)(fw->data);
-
- dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
- be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
-
- for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
- struct mt7915_patch_sec *sec;
- const u8 *dl;
- u32 len, addr;
-
- sec = (struct mt7915_patch_sec *)(fw->data + sizeof(*hdr) +
- i * sizeof(*sec));
- if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
- PATCH_SEC_TYPE_INFO) {
- ret = -EINVAL;
- goto out;
- }
-
- addr = be32_to_cpu(sec->info.addr);
- len = be32_to_cpu(sec->info.len);
- dl = fw->data + be32_to_cpu(sec->offs);
-
- ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
- DL_MODE_NEED_RSP);
- if (ret) {
- dev_err(dev->mt76.dev, "Download request failed\n");
- goto out;
- }
-
- ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- dl, len, 4096);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to send patch\n");
- goto out;
- }
- }
-
- ret = mt76_connac_mcu_start_patch(&dev->mt76);
- if (ret)
- dev_err(dev->mt76.dev, "Failed to start patch\n");
-
-out:
- sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 0);
- switch (sem) {
- case PATCH_REL_SEM_SUCCESS:
- break;
- default:
- ret = -EAGAIN;
- dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
- break;
- }
- release_firmware(fw);
-
- return ret;
-}
-
-static int
-mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
- const struct mt7915_fw_trailer *hdr,
- const u8 *data, bool is_wa)
-{
- int i, offset = 0;
- u32 override = 0, option = 0;
-
- for (i = 0; i < hdr->n_region; i++) {
- const struct mt7915_fw_region *region;
- int err;
- u32 len, addr, mode;
-
- region = (const struct mt7915_fw_region *)((const u8 *)hdr -
- (hdr->n_region - i) * sizeof(*region));
- mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
- region->feature_set, is_wa);
- len = le32_to_cpu(region->len);
- addr = le32_to_cpu(region->addr);
-
- if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
- override = addr;
-
- err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
- mode);
- if (err) {
- dev_err(dev->mt76.dev, "Download request failed\n");
- return err;
- }
-
- err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- data + offset, len, 4096);
- if (err) {
- dev_err(dev->mt76.dev, "Failed to send firmware.\n");
- return err;
- }
-
- offset += len;
- }
-
- if (override)
- option |= FW_START_OVERRIDE;
-
- if (is_wa)
- option |= FW_START_WORKING_PDA_CR4;
-
- return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
-}
-
-static int mt7915_load_ram(struct mt7915_dev *dev)
-{
- const struct mt7915_fw_trailer *hdr;
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, fw_name_var(dev, FIRMWARE_WM),
- dev->mt76.dev);
- if (ret)
- return ret;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
- goto out;
- }
-
- hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size -
- sizeof(*hdr));
-
- dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
- hdr->fw_ver, hdr->build_date);
-
- ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, false);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to start WM firmware\n");
- goto out;
- }
-
- release_firmware(fw);
-
- ret = request_firmware(&fw, fw_name(dev, FIRMWARE_WA),
- dev->mt76.dev);
- if (ret)
- return ret;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
- goto out;
- }
-
- hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size -
- sizeof(*hdr));
-
- dev_info(dev->mt76.dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
- hdr->fw_ver, hdr->build_date);
-
- ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, true);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to start WA firmware\n");
- goto out;
- }
-
- snprintf(dev->mt76.hw->wiphy->fw_version,
- sizeof(dev->mt76.hw->wiphy->fw_version),
- "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
static int
mt7915_firmware_state(struct mt7915_dev *dev, bool wa)
{
@@ -2361,11 +2067,12 @@ static int mt7915_load_firmware(struct mt7915_dev *dev)
}
}
- ret = mt7915_load_patch(dev);
+ ret = mt76_connac2_load_patch(&dev->mt76, fw_name_var(dev, ROM_PATCH));
if (ret)
return ret;
- ret = mt7915_load_ram(dev);
+ ret = mt76_connac2_load_ram(&dev->mt76, fw_name_var(dev, FIRMWARE_WM),
+ fw_name(dev, FIRMWARE_WA));
if (ret)
return ret;
@@ -2528,7 +2235,7 @@ mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev)
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
- .headroom = sizeof(struct mt7915_mcu_txd),
+ .headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
.mcu_restart = mt76_connac_mcu_restart,
@@ -2685,7 +2392,7 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
struct edca *e = &req.edca[ac];
e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT7915_MAX_WMM_SETS;
+ e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 5abde482a97f..cd1edf553fc1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -6,25 +6,6 @@
#include "../mt76_connac_mcu.h"
-struct mt7915_mcu_txd {
- __le32 txd[8];
-
- __le16 len;
- __le16 pq_id;
-
- u8 cid;
- u8 pkt_type;
- u8 set_query; /* FW don't care */
- u8 seq;
-
- u8 uc_d2b0_rev;
- u8 ext_cid;
- u8 s2d_index;
- u8 ext_cid_ack;
-
- u32 reserved[5];
-} __packed __aligned(4);
-
enum {
MCU_ATE_SET_TRX = 0x1,
MCU_ATE_SET_FREQ_OFFSET = 0xa,
@@ -32,21 +13,6 @@ enum {
MCU_ATE_CLEAN_TXQUEUE = 0x1c,
};
-struct mt7915_mcu_rxd {
- __le32 rxd[6];
-
- __le16 len;
- __le16 pkt_type_id;
-
- u8 eid;
- u8 seq;
- __le16 __rsv;
-
- u8 ext_eid;
- u8 __rsv1[2];
- u8 s2d_index;
-};
-
struct mt7915_mcu_thermal_ctrl {
u8 ctrl_id;
u8 band_idx;
@@ -63,7 +29,7 @@ struct mt7915_mcu_thermal_ctrl {
} __packed;
struct mt7915_mcu_thermal_notify {
- struct mt7915_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd rxd;
struct mt7915_mcu_thermal_ctrl ctrl;
__le32 temperature;
@@ -71,7 +37,7 @@ struct mt7915_mcu_thermal_notify {
} __packed;
struct mt7915_mcu_csa_notify {
- struct mt7915_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd rxd;
u8 omac_idx;
u8 csa_count;
@@ -80,7 +46,7 @@ struct mt7915_mcu_csa_notify {
} __packed;
struct mt7915_mcu_bcc_notify {
- struct mt7915_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd rxd;
u8 band_idx;
u8 omac_idx;
@@ -89,7 +55,7 @@ struct mt7915_mcu_bcc_notify {
} __packed;
struct mt7915_mcu_rdd_report {
- struct mt7915_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd rxd;
u8 band_idx;
u8 long_detected;
@@ -267,9 +233,6 @@ struct mt7915_mcu_muru_stats {
#define WMM_TXOP_SET BIT(3)
#define WMM_PARAM_SET GENMASK(3, 0)
-#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
-#define MCU_PKT_ID 0xa0
-
enum {
MCU_FW_LOG_WM,
MCU_FW_LOG_WA,
@@ -489,6 +452,12 @@ enum {
SER_RECOVER
};
+#define MT7915_MAX_BEACON_SIZE 512
+#define MT7915_MAX_INBAND_FRAME_SIZE 256
+#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \
+ MT7915_MAX_INBAND_FRAME_SIZE + \
+ MT7915_BEACON_UPDATE_SIZE)
+
#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct bss_info_omac) + \
sizeof(struct bss_info_basic) +\
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 46ee8a7db7bc..4499a630e8f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -591,8 +591,8 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
if (intr & MT_INT_RX(MT_RXQ_MAIN))
napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
- if (intr & MT_INT_RX(MT_RXQ_EXT))
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
+ if (intr & MT_INT_RX(MT_RXQ_BAND1))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND1]);
if (intr & MT_INT_RX(MT_RXQ_MCU))
napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
@@ -604,8 +604,8 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
(intr & MT_INT_RX(MT_RXQ_MAIN_WA)))
napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN_WA]);
- if (intr & MT_INT_RX(MT_RXQ_EXT_WA))
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
+ if (intr & MT_INT_RX(MT_RXQ_BAND1_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND1_WA]);
if (intr & MT_INT_MCU_CMD) {
u32 val = mt76_rr(dev, MT_MCU_CMD);
@@ -645,14 +645,14 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7915_TOKEN_SIZE,
.tx_prepare_skb = mt7915_tx_prepare_skb,
- .tx_complete_skb = mt7915_tx_complete_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
.rx_skb = mt7915_queue_rx_skb,
.rx_check = mt7915_rx_check,
.rx_poll_complete = mt7915_rx_poll_complete,
@@ -661,16 +661,11 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
.sta_remove = mt7915_mac_sta_remove,
.update_survey = mt7915_update_channel,
};
- struct ieee80211_ops *ops;
struct mt7915_dev *dev;
struct mt76_dev *mdev;
int ret;
- ops = devm_kmemdup(pdev, &mt7915_ops, sizeof(mt7915_ops), GFP_KERNEL);
- if (!ops)
- return ERR_PTR(-ENOMEM);
-
- mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7915_ops, &drv_ops);
if (!mdev)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 4dcae6991669..54ef2a12a443 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -10,7 +10,6 @@
#include "regs.h"
#define MT7915_MAX_INTERFACES 19
-#define MT7915_MAX_WMM_SETS 4
#define MT7915_WTBL_SIZE 288
#define MT7916_WTBL_SIZE 544
#define MT7915_WTBL_RESERVED (mt7915_wtbl_size(dev) - 1)
@@ -67,7 +66,7 @@
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
#define MT7915_MIN_TWT_DUR 64
-#define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
+#define MT7915_MAX_QUEUE (MT_RXQ_BAND2 + __MT_MCUQ_MAX + 2)
struct mt7915_vif;
struct mt7915_sta;
@@ -200,8 +199,15 @@ struct mib_stats {
/* rx stats */
u32 rx_fifo_full_cnt;
u32 channel_idle_cnt;
+ u32 primary_cca_busy_time;
+ u32 secondary_cca_busy_time;
+ u32 primary_energy_detect_time;
+ u32 cck_mdrdy_time;
+ u32 ofdm_mdrdy_time;
+ u32 green_mdrdy_time;
u32 rx_vector_mismatch_cnt;
u32 rx_delimiter_fail_cnt;
+ u32 rx_mrdy_cnt;
u32 rx_len_mismatch_cnt;
u32 rx_mpdu_cnt;
u32 rx_ampdu_cnt;
@@ -342,20 +348,6 @@ enum {
};
enum {
- MT_CTX0,
- MT_HIF0 = 0x0,
-
- MT_LMAC_AC00 = 0x0,
- MT_LMAC_AC01,
- MT_LMAC_AC02,
- MT_LMAC_AC03,
- MT_LMAC_ALTX0 = 0x10,
- MT_LMAC_BMC0,
- MT_LMAC_BCN0,
- MT_LMAC_PSMP0,
-};
-
-enum {
MT_RX_SEL0,
MT_RX_SEL1,
MT_RX_SEL2, /* monitor chain */
@@ -396,7 +388,7 @@ mt7915_hw_dev(struct ieee80211_hw *hw)
static inline struct mt7915_phy *
mt7915_ext_phy(struct mt7915_dev *dev)
{
- struct mt76_phy *phy = dev->mt76.phy2;
+ struct mt76_phy *phy = dev->mt76.phys[MT_BAND1];
if (!phy)
return NULL;
@@ -557,9 +549,10 @@ bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
void mt7915_mac_reset_counters(struct mt7915_phy *phy);
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
-void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
+void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
- struct ieee80211_key_conf *key, u32 changed);
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, u32 changed);
void mt7915_mac_set_timing(struct mt7915_phy *phy);
int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -579,7 +572,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_tx_token_put(struct mt7915_dev *dev);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 4953be208c5e..2493c3ad3c56 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -305,7 +305,7 @@ enum offs_rev {
#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR9))
#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR10))
+#define MT_MIB_SDR10(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR10))
#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
#define MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916 GENMASK(31, 0)
@@ -329,24 +329,24 @@ enum offs_rev {
#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916 GENMASK(31, 0)
/* in units of 'us' */
-#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR16))
+#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR16))
#define MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR17))
+#define MT_MIB_SDR17(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR17))
#define MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR18))
#define MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK GENMASK(23, 0)
/* units are us */
-#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR19))
+#define MT_MIB_SDR19(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR19))
#define MT_MIB_SDR19_CCK_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR20))
+#define MT_MIB_SDR20(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR20))
#define MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR21))
-#define MT_MIB_SDR20_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR21(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR21))
+#define MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
/* rx ampdu count, 32-bit */
#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR22))
@@ -623,7 +623,7 @@ enum offs_rev {
/* WFDMA COMMON */
#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
-#define __TXQ(q) (__RXQ(q) + __MT_RXQ_MAX)
+#define __TXQ(q) (__RXQ(q) + MT_RXQ_BAND2)
#define MT_Q_ID(q) (dev->q_id[(q)])
#define MT_Q_BASE(q) ((dev->wfdma_mask >> (q)) & 0x1 ? \
@@ -639,7 +639,7 @@ enum offs_rev {
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
MT_MCUQ_ID(q)* 0x4)
-#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
+#define MT_RXQ_BAND1_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
MT_RXQ_ID(q)* 0x4)
#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
MT_TXQ_ID(q)* 0x4)
@@ -671,8 +671,8 @@ enum offs_rev {
#define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
MT_INT_RX(MT_RXQ_MAIN_WA))
-#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_EXT) | \
- MT_INT_RX(MT_RXQ_EXT_WA) | \
+#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \
+ MT_INT_RX(MT_RXQ_BAND1_WA) | \
MT_INT_RX(MT_RXQ_MAIN_WA))
#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
@@ -940,7 +940,7 @@ enum offs_rev {
#define MT_ADIE_TYPE_MASK BIT(1)
/* FW MODE SYNC */
-#define MT_FW_EXCEPTION __REG(FW_EXCEPTION_ADDR)
+#define MT_FW_EXCEPTION __REG(FW_EXCEPTION_ADDR)
#define MT_SWDEF_BASE __REG(SWDEF_BASE_ADDR)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index 20f63644e929..efb9bb8231e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -168,13 +168,14 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
}
static int
-mt7915_tm_set_wmm_qid(struct mt7915_dev *dev, u8 qid, u8 aifs, u8 cw_min,
+mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
u16 cw_max, u16 txop)
{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
struct mt7915_mcu_tx req = { .total = 1 };
struct edca *e = &req.edca[0];
- e->queue = qid;
+ e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
e->set = WMM_PARAM_SET;
e->aifs = aifs;
@@ -182,7 +183,7 @@ mt7915_tm_set_wmm_qid(struct mt7915_dev *dev, u8 qid, u8 aifs, u8 cw_min,
e->cw_max = cpu_to_le16(cw_max);
e->txop = cpu_to_le16(txop);
- return mt7915_mcu_update_edca(dev, &req);
+ return mt7915_mcu_update_edca(phy->dev, &req);
}
static int
@@ -244,7 +245,7 @@ done:
mt7915_tm_set_slot_time(phy, slot_time, sifs);
- return mt7915_tm_set_wmm_qid(dev,
+ return mt7915_tm_set_wmm_qid(phy,
mt76_connac_lmac_mapping(IEEE80211_AC_BE),
aifsn, cw, cw, 0);
}
@@ -774,7 +775,7 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
- q = phy->band_idx ? MT_RXQ_EXT : MT_RXQ_MAIN;
+ q = phy->band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
mphy->test.rx_stats.packets[q] += fcs_err;
mphy->test.rx_stats.fcs_error[q] += fcs_err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
index 0a146818c623..e5d2d2e131a2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
@@ -9,6 +9,7 @@ CFLAGS_trace.o := -I$(src)
mt7921-common-y := mac.o mcu.o main.o init.o debugfs.o trace.o
mt7921-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
+mt7921-common-$(CONFIG_ACPI) += acpi_sar.o
mt7921e-y := pci.o pci_mac.o pci_mcu.o dma.o
mt7921s-y := sdio.o sdio_mac.o sdio_mcu.o
mt7921u-y := usb.o usb_mac.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
new file mode 100644
index 000000000000..be4f07ad3af9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#include <linux/acpi.h>
+#include "mt7921.h"
+
+static int
+mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *sar_root, *sar_unit;
+ struct mt76_dev *mdev = &dev->mt76;
+ acpi_handle root, handle;
+ acpi_status status;
+ u32 i = 0;
+
+ root = ACPI_HANDLE(mdev->dev);
+ if (!root)
+ return -EOPNOTSUPP;
+
+ status = acpi_get_handle(root, method, &handle);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ sar_root = buf.pointer;
+ if (sar_root->type != ACPI_TYPE_PACKAGE ||
+ sar_root->package.count < 4 ||
+ sar_root->package.elements[0].type != ACPI_TYPE_INTEGER) {
+ dev_err(mdev->dev, "sar cnt = %d\n",
+ sar_root->package.count);
+ goto free;
+ }
+
+ if (!*tbl) {
+ *tbl = devm_kzalloc(mdev->dev, sar_root->package.count,
+ GFP_KERNEL);
+ if (!*tbl)
+ goto free;
+ }
+ if (len)
+ *len = sar_root->package.count;
+
+ for (i = 0; i < sar_root->package.count; i++) {
+ sar_unit = &sar_root->package.elements[i];
+
+ if (sar_unit->type != ACPI_TYPE_INTEGER)
+ break;
+ *(*tbl + i) = (u8)sar_unit->integer.value;
+ }
+free:
+ kfree(sar_root);
+
+ return (i == sar_root->package.count) ? 0 : -EINVAL;
+}
+
+/* MTCL : Country List Table for 6G band */
+static int
+mt7921_asar_acpi_read_mtcl(struct mt7921_dev *dev, u8 **table, u8 *version)
+{
+ *version = (mt7921_acpi_read(dev, MT7921_ACPI_MTCL, table, NULL) < 0)
+ ? 1 : 2;
+ return 0;
+}
+
+/* MTDS : Dynamic SAR Power Table */
+static int
+mt7921_asar_acpi_read_mtds(struct mt7921_dev *dev, u8 **table, u8 version)
+{
+ int len, ret, sarlen, prelen, tblcnt;
+ bool enable;
+
+ ret = mt7921_acpi_read(dev, MT7921_ACPI_MTDS, table, &len);
+ if (ret)
+ return ret;
+
+ /* Table content validation */
+ switch (version) {
+ case 1:
+ enable = ((struct mt7921_asar_dyn *)*table)->enable;
+ sarlen = sizeof(struct mt7921_asar_dyn_limit);
+ prelen = sizeof(struct mt7921_asar_dyn);
+ break;
+ case 2:
+ enable = ((struct mt7921_asar_dyn_v2 *)*table)->enable;
+ sarlen = sizeof(struct mt7921_asar_dyn_limit_v2);
+ prelen = sizeof(struct mt7921_asar_dyn_v2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tblcnt = (len - prelen) / sarlen;
+ if (!enable ||
+ tblcnt > MT7921_ASAR_MAX_DYN || tblcnt < MT7921_ASAR_MIN_DYN)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/* MTGS : Geo SAR Power Table */
+static int
+mt7921_asar_acpi_read_mtgs(struct mt7921_dev *dev, u8 **table, u8 version)
+{
+ int len, ret = 0, sarlen, prelen, tblcnt;
+
+ ret = mt7921_acpi_read(dev, MT7921_ACPI_MTGS, table, &len);
+ if (ret)
+ return ret;
+
+ /* Table content validation */
+ switch (version) {
+ case 1:
+ sarlen = sizeof(struct mt7921_asar_geo_limit);
+ prelen = sizeof(struct mt7921_asar_geo);
+ break;
+ case 2:
+ sarlen = sizeof(struct mt7921_asar_geo_limit_v2);
+ prelen = sizeof(struct mt7921_asar_geo_v2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tblcnt = (len - prelen) / sarlen;
+ if (tblcnt > MT7921_ASAR_MAX_GEO || tblcnt < MT7921_ASAR_MIN_GEO)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int mt7921_init_acpi_sar(struct mt7921_dev *dev)
+{
+ struct mt7921_acpi_sar *asar;
+ int ret;
+
+ asar = devm_kzalloc(dev->mt76.dev, sizeof(*asar), GFP_KERNEL);
+ if (!asar)
+ return -ENOMEM;
+
+ mt7921_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+
+ /* MTDS is mandatory. Return error if table is invalid */
+ ret = mt7921_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->dyn);
+ devm_kfree(dev->mt76.dev, asar->countrylist);
+ devm_kfree(dev->mt76.dev, asar);
+ return ret;
+ }
+
+ /* MTGS is optional */
+ ret = mt7921_asar_acpi_read_mtgs(dev, (u8 **)&asar->geo, asar->ver);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->geo);
+ asar->geo = NULL;
+ }
+
+ dev->phy.acpisar = asar;
+
+ return 0;
+}
+
+static s8
+mt7921_asar_get_geo_pwr(struct mt7921_phy *phy,
+ enum nl80211_band band, s8 dyn_power)
+{
+ struct mt7921_acpi_sar *asar = phy->acpisar;
+ struct mt7921_asar_geo_band *band_pwr;
+ s8 geo_power;
+ u8 idx, max;
+
+ if (!asar->geo)
+ return dyn_power;
+
+ switch (phy->mt76->dev->region) {
+ case NL80211_DFS_FCC:
+ idx = 0;
+ break;
+ case NL80211_DFS_ETSI:
+ idx = 1;
+ break;
+ default: /* WW */
+ idx = 2;
+ break;
+ }
+
+ if (asar->ver == 1) {
+ band_pwr = &asar->geo->tbl[idx].band[0];
+ max = ARRAY_SIZE(asar->geo->tbl[idx].band);
+ } else {
+ band_pwr = &asar->geo_v2->tbl[idx].band[0];
+ max = ARRAY_SIZE(asar->geo_v2->tbl[idx].band);
+ }
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ idx = 0;
+ break;
+ case NL80211_BAND_5GHZ:
+ idx = 1;
+ break;
+ case NL80211_BAND_6GHZ:
+ idx = 2;
+ break;
+ default:
+ return dyn_power;
+ }
+
+ if (idx >= max)
+ return dyn_power;
+
+ geo_power = (band_pwr + idx)->pwr;
+ dyn_power += (band_pwr + idx)->offset;
+
+ return min(geo_power, dyn_power);
+}
+
+static s8
+mt7921_asar_range_pwr(struct mt7921_phy *phy,
+ const struct cfg80211_sar_freq_ranges *range,
+ u8 idx)
+{
+ const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
+ struct mt7921_acpi_sar *asar = phy->acpisar;
+ u8 *limit, band, max;
+
+ if (!capa)
+ return 127;
+
+ if (asar->ver == 1) {
+ limit = &asar->dyn->tbl[0].frp[0];
+ max = ARRAY_SIZE(asar->dyn->tbl[0].frp);
+ } else {
+ limit = &asar->dyn_v2->tbl[0].frp[0];
+ max = ARRAY_SIZE(asar->dyn_v2->tbl[0].frp);
+ }
+
+ if (idx >= max)
+ return 127;
+
+ if (range->start_freq >= 5945)
+ band = NL80211_BAND_6GHZ;
+ else if (range->start_freq >= 5150)
+ band = NL80211_BAND_5GHZ;
+ else
+ band = NL80211_BAND_2GHZ;
+
+ return mt7921_asar_get_geo_pwr(phy, band, limit[idx]);
+}
+
+int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
+{
+ const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
+ int i;
+
+ if (!phy->acpisar)
+ return 0;
+
+ /* When ACPI SAR enabled in HW, we should apply rules for .frp
+ * 1. w/o .sar_specs : set ACPI SAR power as the defatul value
+ * 2. w/ .sar_specs : set power with min(.sar_specs, ACPI_SAR)
+ */
+ for (i = 0; i < capa->num_freq_ranges; i++) {
+ struct mt76_freq_range_power *frp = &phy->mt76->frp[i];
+
+ frp->range = set_default ? &capa->freq_ranges[i] : frp->range;
+ if (!frp->range)
+ continue;
+
+ frp->power = min_t(s8, set_default ? 127 : frp->power,
+ mt7921_asar_range_pwr(phy, frp->range, i));
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
new file mode 100644
index 000000000000..23f86bfae0c0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#ifndef __MT7921_ACPI_SAR_H
+#define __MT7921_ACPI_SAR_H
+
+#define MT7921_ASAR_MIN_DYN 1
+#define MT7921_ASAR_MAX_DYN 8
+#define MT7921_ASAR_MIN_GEO 3
+#define MT7921_ASAR_MAX_GEO 8
+
+#define MT7921_ACPI_MTCL "MTCL"
+#define MT7921_ACPI_MTDS "MTDS"
+#define MT7921_ACPI_MTGS "MTGS"
+
+struct mt7921_asar_dyn_limit {
+ u8 idx;
+ u8 frp[5];
+} __packed;
+
+struct mt7921_asar_dyn {
+ u8 names[4];
+ u8 enable;
+ u8 nr_tbl;
+ struct mt7921_asar_dyn_limit tbl[0];
+} __packed;
+
+struct mt7921_asar_dyn_limit_v2 {
+ u8 idx;
+ u8 frp[11];
+} __packed;
+
+struct mt7921_asar_dyn_v2 {
+ u8 names[4];
+ u8 enable;
+ u8 rsvd;
+ u8 nr_tbl;
+ struct mt7921_asar_dyn_limit_v2 tbl[0];
+} __packed;
+
+struct mt7921_asar_geo_band {
+ u8 pwr;
+ u8 offset;
+} __packed;
+
+struct mt7921_asar_geo_limit {
+ u8 idx;
+ /* 0:2G, 1:5G */
+ struct mt7921_asar_geo_band band[2];
+} __packed;
+
+struct mt7921_asar_geo {
+ u8 names[4];
+ u8 version;
+ u8 nr_tbl;
+ struct mt7921_asar_geo_limit tbl[0];
+} __packed;
+
+struct mt7921_asar_geo_limit_v2 {
+ u8 idx;
+ /* 0:2G, 1:5G, 2:6G */
+ struct mt7921_asar_geo_band band[3];
+} __packed;
+
+struct mt7921_asar_geo_v2 {
+ u8 names[4];
+ u8 version;
+ u8 rsvd;
+ u8 nr_tbl;
+ struct mt7921_asar_geo_limit_v2 tbl[0];
+} __packed;
+
+struct mt7921_asar_cl {
+ u8 names[4];
+ u8 version;
+ u8 mode_6g;
+ u8 cl6g[6];
+} __packed;
+
+struct mt7921_acpi_sar {
+ u8 ver;
+ union {
+ struct mt7921_asar_dyn *dyn;
+ struct mt7921_asar_dyn_v2 *dyn_v2;
+ };
+ union {
+ struct mt7921_asar_geo *geo;
+ struct mt7921_asar_geo_v2 *geo_v2;
+ };
+ struct mt7921_asar_cl *countrylist;
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index 3a6b158b779e..d1f10f6d9adc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -5,20 +5,6 @@
#include "../dma.h"
#include "mac.h"
-static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
-{
- int i, err;
-
- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE, 0);
- if (err < 0)
- return err;
-
- for (i = 0; i <= MT_TXQ_PSD; i++)
- phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
-
- return 0;
-}
-
static int mt7921_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7921_dev *dev;
@@ -31,7 +17,7 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
- mt7921_mcu_tx_cleanup(dev);
+ mt76_connac_tx_cleanup(&dev->mt76);
if (napi_complete(napi))
mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt76_connac_pm_unref(&dev->mphy, &dev->pm);
@@ -250,8 +236,9 @@ int mt7921_dma_init(struct mt7921_dev *dev)
return ret;
/* init tx queue */
- ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
- MT7921_TX_RING_SIZE);
+ ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0,
+ MT7921_TX_RING_SIZE,
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 4a8675634f80..cd960e23770f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -34,14 +34,13 @@ mt7921_regd_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt7921_phy *phy = mt7921_hw_phy(hw);
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
mt7921_mutex_acquire(dev);
mt76_connac_mcu_set_channel_domain(hw->priv);
- mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ mt7921_set_tx_sar_pwr(hw, NULL);
mt7921_mutex_release(dev);
}
@@ -53,8 +52,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
struct wiphy *wiphy = hw->wiphy;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = 64;
- hw->max_tx_aggregation_subframes = 128;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
hw->netdev_features = NETIF_F_RXCSUM;
hw->radiotap_timestamp.units_pos =
@@ -97,6 +96,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
if (dev->pm.enable)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -289,6 +290,8 @@ int mt7921_register_device(struct mt7921_dev *dev)
if (!mt76_is_mmio(&dev->mt76))
hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
+ mt7921_init_acpi_sar(dev);
+
ret = mt7921_init_wcid(dev);
if (ret)
return ret;
@@ -304,7 +307,7 @@ int mt7921_register_device(struct mt7921_dev *dev)
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index a630ddbf19e5..47f0aa81ab02 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -9,10 +9,6 @@
#include "mac.h"
#include "mcu.h"
-#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
-#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
- IEEE80211_RADIOTAP_HE_##f)
-
static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
u16 idx, bool unicast)
{
@@ -169,183 +165,6 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
EXPORT_SYMBOL_GPL(mt7921_mac_sta_poll);
static void
-mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
- struct ieee80211_radiotap_he *he,
- __le32 *rxv)
-{
- u32 ru_h, ru_l;
- u8 ru, offs = 0;
-
- ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
- ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
- ru = (u8)(ru_l | ru_h << 4);
-
- status->bw = RATE_INFO_BW_HE_RU;
-
- switch (ru) {
- case 0 ... 36:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
- offs = ru;
- break;
- case 37 ... 52:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
- offs = ru - 37;
- break;
- case 53 ... 60:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- offs = ru - 53;
- break;
- case 61 ... 64:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- offs = ru - 61;
- break;
- case 65 ... 66:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- offs = ru - 65;
- break;
- case 67:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- case 68:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
- break;
- }
-
- he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
- he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
- le16_encode_bits(offs,
- IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
-}
-
-static void
-mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he_mu mu_known = {
- .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
- HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_COMP_KNOWN),
- .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN) |
- HE_BITS(MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
- };
- struct ieee80211_radiotap_he_mu *he_mu;
-
- status->flag |= RX_FLAG_RADIOTAP_HE_MU;
-
- he_mu = skb_push(skb, sizeof(mu_known));
- memcpy(he_mu, &mu_known, sizeof(mu_known));
-
-#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
-
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
- if (status->he_dcm)
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
-
- he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
- MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
- le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
-
- he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
-
- if (status->bw >= RATE_INFO_BW_40) {
- he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
- he_mu->ru_ch2[0] =
- le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
- }
-
- if (status->bw >= RATE_INFO_BW_80) {
- he_mu->ru_ch1[1] =
- le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
- he_mu->ru_ch2[1] =
- le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
- }
-}
-
-static void
-mt7921_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he known = {
- .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
- HE_BITS(DATA1_DATA_DCM_KNOWN) |
- HE_BITS(DATA1_STBC_KNOWN) |
- HE_BITS(DATA1_CODING_KNOWN) |
- HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
- HE_BITS(DATA1_DOPPLER_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
- HE_BITS(DATA1_BSS_COLOR_KNOWN),
- .data2 = HE_BITS(DATA2_GI_KNOWN) |
- HE_BITS(DATA2_TXBF_KNOWN) |
- HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
- HE_BITS(DATA2_TXOP_KNOWN),
- };
- struct ieee80211_radiotap_he *he = NULL;
- u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
-
- status->flag |= RX_FLAG_RADIOTAP_HE;
-
- he = skb_push(skb, sizeof(known));
- memcpy(he, &known, sizeof(known));
-
- he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
- HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
- he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
- he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
- le16_encode_bits(ltf_size,
- IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
- if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
- he->data5 |= HE_BITS(DATA5_TXBF);
- he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
- HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
-
- switch (mode) {
- case MT_PHY_TYPE_HE_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
- HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
- HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- break;
- case MT_PHY_TYPE_HE_EXT_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- break;
- case MT_PHY_TYPE_HE_MU:
- he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
-
- mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
- mt7921_mac_decode_he_mu_radiotap(skb, rxv);
- break;
- case MT_PHY_TYPE_HE_TB:
- he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
- HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
-
- he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
-
- mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
- break;
- default:
- break;
- }
-}
-
-static void
mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
struct mt76_rx_status *status, u8 chfreq)
{
@@ -399,86 +218,6 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_mac_rssi_iter, skb);
}
-/* The HW does not translate the mac header to 802.3 for mesh point */
-static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
- struct mt7921_sta *msta = (struct mt7921_sta *)status->wcid;
- __le32 *rxd = (__le32 *)skb->data;
- struct ieee80211_sta *sta;
- struct ieee80211_vif *vif;
- struct ieee80211_hdr hdr;
- u16 frame_control;
-
- if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
- MT_RXD3_NORMAL_U2M)
- return -EINVAL;
-
- if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
- return -EINVAL;
-
- if (!msta || !msta->vif)
- return -EINVAL;
-
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
- vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
-
- /* store the info from RXD and ethhdr to avoid being overridden */
- frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
- hdr.frame_control = cpu_to_le16(frame_control);
- hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
- hdr.duration_id = 0;
-
- ether_addr_copy(hdr.addr1, vif->addr);
- ether_addr_copy(hdr.addr2, sta->addr);
- switch (frame_control & (IEEE80211_FCTL_TODS |
- IEEE80211_FCTL_FROMDS)) {
- case 0:
- ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
- break;
- case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr->h_source);
- break;
- case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
- break;
- case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr->h_source);
- break;
- default:
- break;
- }
-
- skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
- eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
- ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
- ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
- else
- skb_pull(skb, 2);
-
- if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
- IEEE80211_HT_CTL_LEN);
- if (ieee80211_is_data_qos(hdr.frame_control)) {
- __le16 qos_ctrl;
-
- qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
- memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
- IEEE80211_QOS_CTL_LEN);
- }
-
- if (ieee80211_has_a4(hdr.frame_control))
- memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
- else
- memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
-
- return 0;
-}
-
static int
mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
{
@@ -496,9 +235,10 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
+ struct mt7921_sta *msta;
u16 seq_ctrl = 0;
__le16 fc = 0;
- u32 mode = 0;
+ u8 mode = 0;
int i, idx;
memset(status, 0, sizeof(*status));
@@ -526,8 +266,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
- struct mt7921_sta *msta;
-
msta = container_of(status->wcid, struct mt7921_sta, wcid);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
@@ -642,9 +380,8 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
/* RXD Group 3 - P-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
- u8 stbc, gi;
u32 v0, v1;
- bool cck;
+ int ret;
rxv = rxd;
rxd += 2;
@@ -672,79 +409,10 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->chain_signal[i]);
}
- stbc = FIELD_GET(MT_PRXV_STBC, v0);
- gi = FIELD_GET(MT_PRXV_SGI, v0);
- cck = false;
-
- idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
- mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
-
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- i = mt76_get_rate(&dev->mt76, sband, i, cck);
- break;
- case MT_PHY_TYPE_HT_GF:
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- if (i > 31)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_VHT:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_VHT;
- if (i > 11)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_HE_MU:
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_HE;
- i &= GENMASK(3, 0);
-
- if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
- status->he_gi = gi;
-
- status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
- break;
- default:
- return -EINVAL;
- }
-
- status->rate_idx = i;
-
- switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
- case IEEE80211_STA_RX_BW_20:
- break;
- case IEEE80211_STA_RX_BW_40:
- if (mode & MT_PHY_TYPE_HE_EXT_SU &&
- (idx & MT_PRXV_TX_ER_SU_106T)) {
- status->bw = RATE_INFO_BW_HE_RU;
- status->he_ru =
- NL80211_RATE_INFO_HE_RU_ALLOC_106;
- } else {
- status->bw = RATE_INFO_BW_40;
- }
- break;
- case IEEE80211_STA_RX_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_160:
- status->bw = RATE_INFO_BW_160;
- break;
- default:
- return -EINVAL;
- }
-
- status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- if (mode < MT_PHY_TYPE_HE_SU && gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
+ rxv, &mode);
+ if (ret < 0)
+ return ret;
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
rxd += 18;
@@ -762,8 +430,18 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
if (hdr_trans && ieee80211_has_morefrags(fc)) {
- if (mt7921_reverse_frag0_hdr_trans(skb, hdr_gap))
+ struct ieee80211_vif *vif;
+ int err;
+
+ if (!msta || !msta->vif)
return -EINVAL;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
+ drv_priv);
+ err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
+ if (err)
+ return err;
+
hdr_trans = false;
} else {
skb_pull(skb, hdr_gap);
@@ -796,7 +474,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_mac_assoc_rssi(dev, skb);
if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
- mt7921_mac_decode_he_radiotap(skb, rxv, mode);
+ mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
@@ -808,217 +486,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
return 0;
}
-static void
-mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid)
-{
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- u8 fc_type, fc_stype;
- u16 ethertype;
- bool wmm = false;
- u32 val;
-
- if (wcid->sta) {
- struct ieee80211_sta *sta;
-
- sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
- wmm = sta->wme;
- }
-
- val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
- FIELD_PREP(MT_TXD1_TID, tid);
-
- ethertype = get_unaligned_be16(&skb->data[12]);
- if (ethertype >= ETH_P_802_3_MIN)
- val |= MT_TXD1_ETH_802_3;
-
- txwi[1] |= cpu_to_le32(val);
-
- fc_type = IEEE80211_FTYPE_DATA >> 2;
- fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
-
- val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
- FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
-
- txwi[2] |= cpu_to_le32(val);
-
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
-}
-
-static void
-mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct ieee80211_key_conf *key)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- bool multicast = is_multicast_ether_addr(hdr->addr1);
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- __le16 fc = hdr->frame_control;
- u8 fc_type, fc_stype;
- u32 val;
-
- if (ieee80211_is_action(fc) &&
- mgmt->u.action.category == WLAN_CATEGORY_BACK &&
- mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
- u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
-
- txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
- tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
- } else if (ieee80211_is_back_req(hdr->frame_control)) {
- struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
- u16 control = le16_to_cpu(bar->control);
-
- tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
- }
-
- val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
- FIELD_PREP(MT_TXD1_HDR_INFO,
- ieee80211_get_hdrlen_from_skb(skb) / 2) |
- FIELD_PREP(MT_TXD1_TID, tid);
- txwi[1] |= cpu_to_le32(val);
-
- fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
- fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
-
- val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
- FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
- FIELD_PREP(MT_TXD2_MULTICAST, multicast);
-
- if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
- key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
- val |= MT_TXD2_BIP;
- txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
- }
-
- if (!ieee80211_is_data(fc) || multicast ||
- info->flags & IEEE80211_TX_CTL_USE_MINRATE)
- val |= MT_TXD2_FIX_RATE;
-
- txwi[2] |= cpu_to_le32(val);
-
- if (ieee80211_is_beacon(fc)) {
- txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
- txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
- }
-
- if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
-
- if (ieee80211_is_back_req(hdr->frame_control)) {
- struct ieee80211_bar *bar;
-
- bar = (struct ieee80211_bar *)skb->data;
- seqno = le16_to_cpu(bar->start_seq_num);
- }
-
- val = MT_TXD3_SN_VALID |
- FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
- txwi[3] |= cpu_to_le32(val);
- txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
- }
-
- if (mt76_is_mmio(&dev->mt76)) {
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
- } else {
- val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
- FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
- txwi[8] |= cpu_to_le32(val);
- }
-}
-
-void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key, int pid,
- bool beacon)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_phy *mphy = &dev->mphy;
- u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
- bool is_mmio = mt76_is_mmio(&dev->mt76);
- u32 sz_txd = is_mmio ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
- bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- u16 tx_count = 15;
- u32 val;
-
- if (vif) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
-
- omac_idx = mvif->omac_idx;
- wmm_idx = mvif->wmm_idx;
- }
-
- if (beacon) {
- p_fmt = MT_TX_TYPE_FW;
- q_idx = MT_LMAC_BCN0;
- } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
- p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
- q_idx = MT_LMAC_ALTX0;
- } else {
- p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
- q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
- mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
- }
-
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
- FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
- FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
- txwi[0] = cpu_to_le32(val);
-
- val = MT_TXD1_LONG_FORMAT |
- FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
- FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
-
- txwi[1] = cpu_to_le32(val);
- txwi[2] = 0;
-
- val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
- if (key)
- val |= MT_TXD3_PROTECT_FRAME;
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
- val |= MT_TXD3_NO_ACK;
-
- txwi[3] = cpu_to_le32(val);
- txwi[4] = 0;
-
- val = FIELD_PREP(MT_TXD5_PID, pid);
- if (pid >= MT_PACKET_ID_FIRST)
- val |= MT_TXD5_TX_STATUS_HOST;
- txwi[5] = cpu_to_le32(val);
-
- txwi[6] = 0;
- txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
-
- if (is_8023)
- mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid);
- else
- mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
-
- if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
- int rateidx = vif ? ffs(vif->bss_conf.basic_rates) - 1 : 0;
- u16 rate, mode;
-
- /* hardware won't add HTC for mgmt/ctrl frame */
- txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
-
- rate = mt76_calculate_default_rate(mphy, rateidx);
- mode = rate >> 8;
- rate &= GENMASK(7, 0);
- rate |= FIELD_PREP(MT_TX_RATE_MODE, mode);
-
- val = MT_TXD6_FIXED_BW |
- FIELD_PREP(MT_TXD6_TX_RATE, rate);
- txwi[6] |= cpu_to_le32(val);
- txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
- }
-}
-EXPORT_SYMBOL_GPL(mt7921_mac_write_txwi);
-
void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
struct mt7921_sta *msta;
@@ -1044,123 +511,6 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
}
EXPORT_SYMBOL_GPL(mt7921_tx_check_aggr);
-static bool
-mt7921_mac_add_txs_skb(struct mt7921_dev *dev, struct mt76_wcid *wcid, int pid,
- __le32 *txs_data)
-{
- struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid);
- struct mt76_sta_stats *stats = &msta->stats;
- struct ieee80211_supported_band *sband;
- struct mt76_dev *mdev = &dev->mt76;
- struct ieee80211_tx_info *info;
- struct rate_info rate = {};
- struct sk_buff_head list;
- u32 txrate, txs, mode;
- struct sk_buff *skb;
- bool cck = false;
-
- mt76_tx_status_lock(mdev, &list);
- skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
- if (!skb)
- goto out;
-
- info = IEEE80211_SKB_CB(skb);
- txs = le32_to_cpu(txs_data[0]);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
-
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len = !!(info->flags &
- IEEE80211_TX_STAT_ACK);
-
- info->status.rates[0].idx = -1;
-
- if (!wcid->sta)
- goto out;
-
- txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
-
- rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
- rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
-
- if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
- stats->tx_nss[rate.nss - 1]++;
- if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
- stats->tx_mcs[rate.mcs]++;
-
- mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &dev->mphy.sband_5g.sband;
- else
- sband = &dev->mphy.sband_2g.sband;
-
- rate.mcs = mt76_get_rate(dev->mphy.dev, sband, rate.mcs, cck);
- rate.legacy = sband->bitrates[rate.mcs].bitrate;
- break;
- case MT_PHY_TYPE_HT:
- case MT_PHY_TYPE_HT_GF:
- if (rate.mcs > 31)
- goto out;
-
- rate.flags = RATE_INFO_FLAGS_MCS;
- if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
- rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case MT_PHY_TYPE_VHT:
- if (rate.mcs > 9)
- goto out;
-
- rate.flags = RATE_INFO_FLAGS_VHT_MCS;
- break;
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- case MT_PHY_TYPE_HE_MU:
- if (rate.mcs > 11)
- goto out;
-
- rate.he_gi = wcid->rate.he_gi;
- rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
- rate.flags = RATE_INFO_FLAGS_HE_MCS;
- break;
- default:
- goto out;
- }
- stats->tx_mode[mode]++;
-
- switch (FIELD_GET(MT_TXS0_BW, txs)) {
- case IEEE80211_STA_RX_BW_160:
- rate.bw = RATE_INFO_BW_160;
- stats->tx_bw[3]++;
- break;
- case IEEE80211_STA_RX_BW_80:
- rate.bw = RATE_INFO_BW_80;
- stats->tx_bw[2]++;
- break;
- case IEEE80211_STA_RX_BW_40:
- rate.bw = RATE_INFO_BW_40;
- stats->tx_bw[1]++;
- break;
- default:
- rate.bw = RATE_INFO_BW_20;
- stats->tx_bw[0]++;
- break;
- }
- wcid->rate = rate;
-
-out:
- if (skb)
- mt76_tx_status_skb_done(mdev, skb, &list);
- mt76_tx_status_unlock(mdev, &list);
-
- return !!skb;
-}
-
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
{
struct mt7921_sta *msta = NULL;
@@ -1187,12 +537,13 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
if (!wcid)
goto out;
- mt7921_mac_add_txs_skb(dev, wcid, pid, txs_data);
+ msta = container_of(wcid, struct mt7921_sta, wcid);
+ mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data,
+ &msta->stats);
if (!wcid->sta)
goto out;
- msta = container_of(wcid, struct mt7921_sta, wcid);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
list_add_tail(&msta->poll_list, &dev->sta_poll_list);
@@ -1387,9 +738,9 @@ void mt7921_mac_reset_work(struct work_struct *work)
reset_work);
struct ieee80211_hw *hw = mt76_hw(dev);
struct mt76_connac_pm *pm = &dev->pm;
- int i;
+ int i, ret;
- dev_err(dev->mt76.dev, "chip reset\n");
+ dev_dbg(dev->mt76.dev, "chip reset\n");
dev->hw_full_reset = true;
ieee80211_stop_queues(hw);
@@ -1397,11 +748,14 @@ void mt7921_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
- mutex_lock(&dev->mt76.mutex);
- for (i = 0; i < 10; i++)
- if (!mt7921_dev_reset(dev))
+ for (i = 0; i < 10; i++) {
+ mutex_lock(&dev->mt76.mutex);
+ ret = mt7921_dev_reset(dev);
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (!ret)
break;
- mutex_unlock(&dev->mt76.mutex);
+ }
if (i == 10)
dev_err(dev->mt76.dev, "chip reset failed\n");
@@ -1538,10 +892,12 @@ void mt7921_pm_wake_work(struct work_struct *work)
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
+ local_bh_disable();
mt76_for_each_q_rx(mdev, i)
napi_schedule(&mdev->napi[i]);
+ local_bh_enable();
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
- mt7921_mcu_tx_cleanup(dev);
+ mt76_connac_tx_cleanup(mdev);
}
if (test_bit(MT76_STATE_RUNNING, &mphy->state))
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
@@ -1617,7 +973,7 @@ void mt7921_coredump_work(struct work_struct *work)
if (!skb)
break;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
dev_kfree_skb(skb);
continue;
@@ -1646,7 +1002,7 @@ mt7921_usb_sdio_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
__le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
memset(txwi, 0, MT_SDIO_TXD_SIZE);
- mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
+ mt76_connac2_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
skb_push(skb, MT_SDIO_TXD_SIZE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
index 79447e2d0143..8afec600364f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
@@ -4,6 +4,8 @@
#ifndef __MT7921_MAC_H
#define __MT7921_MAC_H
+#include "../mt76_connac2_mac.h"
+
#define MT_CT_PARSE_LEN 72
#define MT_CT_DMA_BUF_NUM 2
@@ -27,294 +29,6 @@ enum rx_pkt_type {
PKT_TYPE_NORMAL_MCU,
};
-/* RXD DW1 */
-#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
-#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
-#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
-#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
-#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
-#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
-#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
-#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
-#define MT_RXD1_NORMAL_CM BIT(23)
-#define MT_RXD1_NORMAL_CLM BIT(24)
-#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
-#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
-#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
-#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
-#define MT_RXD1_NORMAL_SPP_EN BIT(29)
-#define MT_RXD1_NORMAL_ADD_OM BIT(30)
-#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
-
-/* RXD DW2 */
-#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
-#define MT_RXD2_NORMAL_CO_ANT BIT(6)
-#define MT_RXD2_NORMAL_BF_CQI BIT(7)
-#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
-#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
-#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
-#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
-#define MT_RXD2_NORMAL_MU_BAR BIT(21)
-#define MT_RXD2_NORMAL_SW_BIT BIT(22)
-#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
-#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
-#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
-#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
-#define MT_RXD2_NORMAL_FRAG BIT(27)
-#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
-#define MT_RXD2_NORMAL_NDATA BIT(29)
-#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
-#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
-
-/* RXD DW3 */
-#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
-#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
-#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
-#define MT_RXD3_NORMAL_U2M BIT(0)
-#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
-#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
-#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
-#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
-#define MT_RXD3_NORMAL_AMSDU BIT(22)
-#define MT_RXD3_NORMAL_MESH BIT(23)
-#define MT_RXD3_NORMAL_MHCP BIT(24)
-#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
-#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
-#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
-#define MT_RXD3_NORMAL_MORE BIT(28)
-#define MT_RXD3_NORMAL_UNWANT BIT(29)
-#define MT_RXD3_NORMAL_RX_DROP BIT(30)
-#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
-
-/* RXD DW4 */
-#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
-#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
-#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
-#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
-#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
-#define MT_RXD4_NORMAL_CLS BIT(10)
-#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
-#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
-#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
-#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
-#define MT_RXD3_NORMAL_PF_MODE BIT(29)
-#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
-
-/* RXD GROUP4 */
-#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0)
-#define MT_RXD6_TA_LO GENMASK(31, 16)
-
-#define MT_RXD7_TA_HI GENMASK(31, 0)
-
-#define MT_RXD8_SEQ_CTRL GENMASK(15, 0)
-#define MT_RXD8_QOS_CTL GENMASK(31, 16)
-
-#define MT_RXD9_HT_CONTROL GENMASK(31, 0)
-
-/* P-RXV DW0 */
-#define MT_PRXV_TX_RATE GENMASK(6, 0)
-#define MT_PRXV_TX_DCM BIT(4)
-#define MT_PRXV_TX_ER_SU_106T BIT(5)
-#define MT_PRXV_NSTS GENMASK(9, 7)
-#define MT_PRXV_TXBF BIT(10)
-#define MT_PRXV_HT_AD_CODE BIT(11)
-#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
-#define MT_PRXV_SGI GENMASK(16, 15)
-#define MT_PRXV_STBC GENMASK(23, 22)
-#define MT_PRXV_TX_MODE GENMASK(27, 24)
-#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
-
-/* P-RXV DW1 */
-#define MT_PRXV_RCPI3 GENMASK(31, 24)
-#define MT_PRXV_RCPI2 GENMASK(23, 16)
-#define MT_PRXV_RCPI1 GENMASK(15, 8)
-#define MT_PRXV_RCPI0 GENMASK(7, 0)
-#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
-
-/* C-RXV */
-#define MT_CRXV_HT_STBC GENMASK(1, 0)
-#define MT_CRXV_TX_MODE GENMASK(7, 4)
-#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
-#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
-#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
-#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
-#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
-#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
-#define MT_CRXV_HE_UPLINK BIT(31)
-
-#define MT_CRXV_HE_RU0 GENMASK(7, 0)
-#define MT_CRXV_HE_RU1 GENMASK(15, 8)
-#define MT_CRXV_HE_RU2 GENMASK(23, 16)
-#define MT_CRXV_HE_RU3 GENMASK(31, 24)
-#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
-
-#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
-#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
-#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
-#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
-
-#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
-#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
-#define MT_CRXV_HE_BEAM_CHNG BIT(13)
-#define MT_CRXV_HE_DOPPLER BIT(16)
-
-#define MT_CRXV_SNR GENMASK(18, 13)
-#define MT_CRXV_FOE_LO GENMASK(31, 19)
-#define MT_CRXV_FOE_HI GENMASK(6, 0)
-#define MT_CRXV_FOE_SHIFT 13
-
-enum tx_header_format {
- MT_HDR_FORMAT_802_3,
- MT_HDR_FORMAT_CMD,
- MT_HDR_FORMAT_802_11,
- MT_HDR_FORMAT_802_11_EXT,
-};
-
-enum tx_pkt_type {
- MT_TX_TYPE_CT,
- MT_TX_TYPE_SF,
- MT_TX_TYPE_CMD,
- MT_TX_TYPE_FW,
-};
-
-enum tx_port_idx {
- MT_TX_PORT_IDX_LMAC,
- MT_TX_PORT_IDX_MCU
-};
-
-enum tx_mcu_port_q_idx {
- MT_TX_MCU_PORT_RX_Q0 = 0x20,
- MT_TX_MCU_PORT_RX_Q1,
- MT_TX_MCU_PORT_RX_Q2,
- MT_TX_MCU_PORT_RX_Q3,
- MT_TX_MCU_PORT_RX_FWDL = 0x3e
-};
-
-#define MT_CT_INFO_APPLY_TXD BIT(0)
-#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
-#define MT_CT_INFO_MGMT_FRAME BIT(2)
-#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
-#define MT_CT_INFO_HSR2_TX BIT(4)
-#define MT_CT_INFO_FROM_HOST BIT(7)
-
-#define MT_TXD_SIZE (8 * 4)
-
-#define MT_SDIO_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
-#define MT_SDIO_TAIL_SIZE 8
-#define MT_SDIO_HDR_SIZE 4
-#define MT_USB_TAIL_SIZE 4
-
-#define MT_TXD0_Q_IDX GENMASK(31, 25)
-#define MT_TXD0_PKT_FMT GENMASK(24, 23)
-#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
-#define MT_TXD0_TX_BYTES GENMASK(15, 0)
-
-#define MT_TXD1_LONG_FORMAT BIT(31)
-#define MT_TXD1_TGID BIT(30)
-#define MT_TXD1_OWN_MAC GENMASK(29, 24)
-#define MT_TXD1_AMSDU BIT(23)
-#define MT_TXD1_TID GENMASK(22, 20)
-#define MT_TXD1_HDR_PAD GENMASK(19, 18)
-#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
-#define MT_TXD1_HDR_INFO GENMASK(15, 11)
-#define MT_TXD1_ETH_802_3 BIT(15)
-#define MT_TXD1_VTA BIT(10)
-#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
-
-#define MT_TXD2_FIX_RATE BIT(31)
-#define MT_TXD2_FIXED_RATE BIT(30)
-#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
-#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
-#define MT_TXD2_FRAG GENMASK(15, 14)
-#define MT_TXD2_HTC_VLD BIT(13)
-#define MT_TXD2_DURATION BIT(12)
-#define MT_TXD2_BIP BIT(11)
-#define MT_TXD2_MULTICAST BIT(10)
-#define MT_TXD2_RTS BIT(9)
-#define MT_TXD2_SOUNDING BIT(8)
-#define MT_TXD2_NDPA BIT(7)
-#define MT_TXD2_NDP BIT(6)
-#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
-#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
-
-#define MT_TXD3_SN_VALID BIT(31)
-#define MT_TXD3_PN_VALID BIT(30)
-#define MT_TXD3_SW_POWER_MGMT BIT(29)
-#define MT_TXD3_BA_DISABLE BIT(28)
-#define MT_TXD3_SEQ GENMASK(27, 16)
-#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
-#define MT_TXD3_TX_COUNT GENMASK(10, 6)
-#define MT_TXD3_TIMING_MEASURE BIT(5)
-#define MT_TXD3_DAS BIT(4)
-#define MT_TXD3_EEOSP BIT(3)
-#define MT_TXD3_EMRD BIT(2)
-#define MT_TXD3_PROTECT_FRAME BIT(1)
-#define MT_TXD3_NO_ACK BIT(0)
-
-#define MT_TXD4_PN_LOW GENMASK(31, 0)
-
-#define MT_TXD5_PN_HIGH GENMASK(31, 16)
-#define MT_TXD5_MD BIT(15)
-#define MT_TXD5_ADD_BA BIT(14)
-#define MT_TXD5_TX_STATUS_HOST BIT(10)
-#define MT_TXD5_TX_STATUS_MCU BIT(9)
-#define MT_TXD5_TX_STATUS_FMT BIT(8)
-#define MT_TXD5_PID GENMASK(7, 0)
-
-#define MT_TXD6_TX_IBF BIT(31)
-#define MT_TXD6_TX_EBF BIT(30)
-#define MT_TXD6_TX_RATE GENMASK(29, 16)
-#define MT_TXD6_SGI GENMASK(15, 14)
-#define MT_TXD6_HELTF GENMASK(13, 12)
-#define MT_TXD6_LDPC BIT(11)
-#define MT_TXD6_SPE_ID_IDX BIT(10)
-#define MT_TXD6_ANT_ID GENMASK(7, 4)
-#define MT_TXD6_DYN_BW BIT(3)
-#define MT_TXD6_FIXED_BW BIT(2)
-#define MT_TXD6_BW GENMASK(1, 0)
-
-#define MT_TXD7_TXD_LEN GENMASK(31, 30)
-#define MT_TXD7_UDP_TCP_SUM BIT(29)
-#define MT_TXD7_IP_SUM BIT(28)
-
-#define MT_TXD7_TYPE GENMASK(21, 20)
-#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
-
-#define MT_TXD7_PSE_FID GENMASK(27, 16)
-#define MT_TXD7_SPE_IDX GENMASK(15, 11)
-#define MT_TXD7_HW_AMSDU BIT(10)
-#define MT_TXD7_TX_TIME GENMASK(9, 0)
-
-#define MT_TXD8_L_TYPE GENMASK(5, 4)
-#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
-
-#define MT_TX_RATE_STBC BIT(13)
-#define MT_TX_RATE_NSS GENMASK(12, 10)
-#define MT_TX_RATE_MODE GENMASK(9, 6)
-#define MT_TX_RATE_SU_EXT_TONE BIT(5)
-#define MT_TX_RATE_DCM BIT(4)
-#define MT_TX_RATE_IDX GENMASK(3, 0)
-
-#define MT_TXP_MAX_BUF_NUM 6
-
-struct mt7921_txp {
- __le16 flags;
- __le16 token;
- u8 bss_idx;
- __le16 rept_wds_wcid;
- u8 nbuf;
- __le32 buf[MT_TXP_MAX_BUF_NUM];
- __le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed __aligned(4);
-
-struct mt7921_tx_free {
- __le16 rx_byte_cnt;
- __le16 ctrl;
- u8 txd_cnt;
- u8 rsv[3];
- __le32 info[];
-} __packed __aligned(4);
-
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_LATENCY GENMASK(12, 0)
@@ -325,56 +39,6 @@ struct mt7921_tx_free {
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)
-#define MT_TXS0_BW GENMASK(30, 29)
-#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
-#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
-#define MT_TXS0_TX_RATE GENMASK(13, 0)
-
-#define MT_TXS2_WCID GENMASK(25, 16)
-
-#define MT_TXS3_PID GENMASK(31, 24)
-
-static inline struct mt7921_txp_common *
-mt7921_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- u8 *txwi;
-
- if (!t)
- return NULL;
-
- txwi = mt76_get_txwi_ptr(dev, t);
-
- return (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
-}
-
-#define MT_HW_TXP_MAX_MSDU_NUM 4
-#define MT_HW_TXP_MAX_BUF_NUM 4
-
-#define MT_MSDU_ID_VALID BIT(15)
-
-#define MT_TXD_LEN_MASK GENMASK(11, 0)
-#define MT_TXD_LEN_MSDU_LAST BIT(14)
-#define MT_TXD_LEN_AMSDU_LAST BIT(15)
-#define MT_TXD_LEN_LAST BIT(15)
-
-struct mt7921_txp_ptr {
- __le32 buf0;
- __le16 len0;
- __le16 len1;
- __le32 buf1;
-} __packed __aligned(4);
-
-struct mt7921_hw_txp {
- __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
- struct mt7921_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
-} __packed __aligned(4);
-
-struct mt7921_txp_common {
- union {
- struct mt7921_hw_txp hw;
- };
-};
-
#define MT_WTBL_TXRX_CAP_RATE_OFFSET 7
#define MT_WTBL_TXRX_RATE_G2_HE 24
#define MT_WTBL_TXRX_RATE_G2 12
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 80279f342109..1438a9f8d1fd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -257,7 +257,7 @@ int __mt7921_start(struct mt7921_phy *phy)
if (err)
return err;
- err = mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ err = mt7921_set_tx_sar_pwr(mphy->hw, NULL);
if (err)
return err;
@@ -322,7 +322,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
mvif->mt76.omac_idx = mvif->mt76.idx;
mvif->phy = phy;
mvif->mt76.band_idx = 0;
- mvif->mt76.wmm_idx = mvif->mt76.idx % MT7921_MAX_WMM_SETS;
+ mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid,
true);
@@ -336,7 +336,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
+ mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
mt76_packet_id_init(&mvif->sta.wcid);
@@ -352,6 +352,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
mtxq->wcid = idx;
}
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
out:
mt7921_mutex_release(dev);
@@ -495,8 +496,21 @@ static void
mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt7921_dev *dev = priv;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ bool pm_enable = dev->pm.enable;
+ int err;
+
+ err = mt7921_mcu_set_beacon_filter(dev, vif, pm_enable);
+ if (err < 0)
+ return;
- mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
+ if (pm_enable) {
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ } else {
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
+ }
}
static void
@@ -548,7 +562,7 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
mt7921_mutex_acquire(dev);
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ret = mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ ret = mt7921_set_tx_sar_pwr(hw, NULL);
if (ret)
goto out;
}
@@ -567,7 +581,8 @@ out:
}
static int
-mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
@@ -637,7 +652,7 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct mt7921_phy *phy = mt7921_hw_phy(hw);
struct mt7921_dev *dev = mt7921_hw_dev(hw);
@@ -653,15 +668,6 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
-
- mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
- true);
- mt7921_mcu_sta_update(dev, NULL, vif, true,
- MT76_STA_INFO_STATE_NONE);
- }
-
if (changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED))
mt7921_mcu_uni_add_beacon_offload(dev, hw, vif,
@@ -677,8 +683,7 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_ASSOC);
- if (dev->pm.enable)
- mt7921_mcu_set_beacon_filter(dev, vif, info->assoc);
+ mt7921_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc);
}
if (changed & BSS_CHANGED_ARP_FILTER) {
@@ -707,7 +712,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = mvif->mt76.band_idx;
+ msta->wcid.phy_idx = mvif->mt76.band_idx;
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->last_txs = jiffies;
@@ -1177,7 +1182,7 @@ void mt7921_scan_work(struct work_struct *work)
scan_work.work);
while (true) {
- struct mt7921_mcu_rxd *rxd;
+ struct mt76_connac2_mcu_rxd *rxd;
struct sk_buff *skb;
spin_lock_bh(&phy->dev->mt76.lock);
@@ -1187,7 +1192,7 @@ void mt7921_scan_work(struct work_struct *work)
if (!skb)
break;
- rxd = (struct mt7921_mcu_rxd *)skb->data;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (rxd->eid == MCU_EVENT_SCHED_SCAN_DONE) {
ieee80211_sched_scan_results(phy->mt76->hw);
} else if (test_and_clear_bit(MT76_HW_SCANNING,
@@ -1450,15 +1455,14 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
if (!idx)
return;
- skb = __mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) +
- idx * sizeof(struct in6_addr), GFP_ATOMIC);
- if (!skb)
- return;
-
req_hdr.arpns.ips_num = idx;
req_hdr.arpns.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)
+ idx * sizeof(struct in6_addr));
- skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+ skb = __mt76_mcu_msg_alloc(&dev->mt76, &req_hdr,
+ sizeof(req_hdr) + idx * sizeof(struct in6_addr),
+ sizeof(req_hdr), GFP_ATOMIC);
+ if (!skb)
+ return;
for (i = 0; i < idx; i++)
skb_put_data(skb, &ns_addrs[i].in6_u, sizeof(struct in6_addr));
@@ -1469,20 +1473,33 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
}
#endif
+int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+
+ if (sar) {
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ return err;
+ }
+
+ mt7921_init_acpi_sar_power(mt7921_hw_phy(hw), !sar);
+
+ err = mt76_connac_mcu_set_rate_txpower(mphy);
+
+ return err;
+}
+
static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt76_phy *mphy = hw->priv;
int err;
mt7921_mutex_acquire(dev);
- err = mt76_init_sar_power(hw, sar);
- if (err)
- goto out;
-
- err = mt76_connac_mcu_set_rate_txpower(mphy);
-out:
+ err = mt7921_set_tx_sar_pwr(hw, sar);
mt7921_mutex_release(dev);
return err;
@@ -1500,6 +1517,44 @@ mt7921_channel_switch_beacon(struct ieee80211_hw *hw,
mt7921_mutex_release(dev);
}
+static int
+mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ int err;
+
+ err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
+ true);
+ if (err)
+ return err;
+
+ err = mt7921_mcu_set_bss_pm(dev, vif, true);
+ if (err)
+ return err;
+
+ return mt7921_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+}
+
+static void
+mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ int err;
+
+ err = mt7921_mcu_set_bss_pm(dev, vif, false);
+ if (err)
+ return;
+
+ mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false);
+}
+
const struct ieee80211_ops mt7921_ops = {
.tx = mt7921_tx,
.start = mt7921_start,
@@ -1510,6 +1565,8 @@ const struct ieee80211_ops mt7921_ops = {
.conf_tx = mt7921_conf_tx,
.configure_filter = mt7921_configure_filter,
.bss_info_changed = mt7921_bss_info_changed,
+ .start_ap = mt7921_start_ap,
+ .stop_ap = mt7921_stop_ap,
.sta_state = mt7921_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7921_set_key,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 12bab18c4171..da12d0ae0835 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -1,79 +1,15 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
-#include <linux/firmware.h>
#include <linux/fs.h>
#include "mt7921.h"
#include "mt7921_trace.h"
#include "mcu.h"
#include "mac.h"
-struct mt7921_patch_hdr {
- char build_date[16];
- char platform[4];
- __be32 hw_sw_ver;
- __be32 patch_ver;
- __be16 checksum;
- u16 reserved;
- struct {
- __be32 patch_ver;
- __be32 subsys;
- __be32 feature;
- __be32 n_region;
- __be32 crc;
- u32 reserved[11];
- } desc;
-} __packed;
-
-struct mt7921_patch_sec {
- __be32 type;
- __be32 offs;
- __be32 size;
- union {
- __be32 spec[13];
- struct {
- __be32 addr;
- __be32 len;
- __be32 sec_key_idx;
- __be32 align_len;
- u32 reserved[9];
- } info;
- };
-} __packed;
-
-struct mt7921_fw_trailer {
- u8 chip_id;
- u8 eco_code;
- u8 n_region;
- u8 format_ver;
- u8 format_flag;
- u8 reserved[2];
- char fw_ver[10];
- char build_date[15];
- u32 crc;
-} __packed;
-
-struct mt7921_fw_region {
- __le32 decomp_crc;
- __le32 decomp_len;
- __le32 decomp_blk_sz;
- u8 reserved[4];
- __le32 addr;
- __le32 len;
- u8 feature_set;
- u8 reserved1[15];
-} __packed;
-
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
-#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
-#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
-#define PATCH_SEC_ENC_TYPE_AES 0x01
-#define PATCH_SEC_ENC_TYPE_SCRAMBLE 0x02
-#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
-#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
-
static int
mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
{
@@ -83,7 +19,7 @@ mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
if (!skb)
return -EINVAL;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
res = (struct mt7921_mcu_eeprom_info *)skb->data;
buf = dev->eeprom.data + le32_to_cpu(res->addr);
@@ -96,7 +32,7 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
- struct mt7921_mcu_rxd *rxd;
+ struct mt76_connac2_mcu_rxd *rxd;
int ret = 0;
if (!skb) {
@@ -107,11 +43,12 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return -ETIMEDOUT;
}
- rxd = (struct mt7921_mcu_rxd *)skb->data;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (seq != rxd->seq)
return -EAGAIN;
- if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) {
+ if (cmd == MCU_CMD(PATCH_SEM_CONTROL) ||
+ cmd == MCU_CMD(PATCH_FINISH_REQ)) {
skb_pull(skb, sizeof(*rxd) - 4);
ret = *skb->data;
} else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
@@ -140,90 +77,13 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
event = (struct mt7921_mcu_reg_event *)skb->data;
ret = (int)le32_to_cpu(event->val);
} else {
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
}
return ret;
}
EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response);
-int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
- int cmd, int *wait_seq)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
- struct mt7921_uni_txd *uni_txd;
- struct mt7921_mcu_txd *mcu_txd;
- __le32 *txd;
- u32 val;
- u8 seq;
-
- if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
- cmd == MCU_UNI_CMD(SUSPEND) ||
- cmd == MCU_UNI_CMD(OFFLOAD))
- mdev->mcu.timeout = HZ;
- else
- mdev->mcu.timeout = 3 * HZ;
-
- seq = ++dev->mt76.mcu.msg_seq & 0xf;
- if (!seq)
- seq = ++dev->mt76.mcu.msg_seq & 0xf;
-
- if (cmd == MCU_CMD(FW_SCATTER))
- goto exit;
-
- txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
- txd = (__le32 *)skb_push(skb, txd_len);
-
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
- FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) |
- FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0);
- txd[0] = cpu_to_le32(val);
-
- val = MT_TXD1_LONG_FORMAT |
- FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
- txd[1] = cpu_to_le32(val);
-
- if (cmd & __MCU_CMD_FIELD_UNI) {
- uni_txd = (struct mt7921_uni_txd *)txd;
- uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
- uni_txd->option = MCU_CMD_UNI_EXT_ACK;
- uni_txd->cid = cpu_to_le16(mcu_cmd);
- uni_txd->s2d_index = MCU_S2D_H2N;
- uni_txd->pkt_type = MCU_PKT_ID;
- uni_txd->seq = seq;
-
- goto exit;
- }
-
- mcu_txd = (struct mt7921_mcu_txd *)txd;
- mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
- mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
- MT_TX_MCU_PORT_RX_Q0));
- mcu_txd->pkt_type = MCU_PKT_ID;
- mcu_txd->seq = seq;
- mcu_txd->cid = mcu_cmd;
- mcu_txd->s2d_index = MCU_S2D_H2N;
- mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
-
- if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) {
- if (cmd & __MCU_CMD_FIELD_QUERY)
- mcu_txd->set_query = MCU_Q_QUERY;
- else
- mcu_txd->set_query = MCU_Q_SET;
- mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid;
- } else {
- mcu_txd->set_query = MCU_Q_NA;
- }
-
-exit:
- if (wait_seq)
- *wait_seq = seq;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt7921_mcu_fill_message);
-
#ifdef CONFIG_PM
static int
@@ -304,7 +164,7 @@ mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
struct mt76_connac_beacon_loss_event *event;
struct mt76_phy *mphy = &dev->mt76.phy;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
event = (struct mt76_connac_beacon_loss_event *)skb->data;
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -318,7 +178,7 @@ mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb)
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_mcu_bss_event *event;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
event = (struct mt76_connac_mcu_bss_event *)skb->data;
if (event->is_absent)
ieee80211_stop_queues(mphy->hw);
@@ -338,7 +198,7 @@ mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb)
u8 content[512];
} __packed * msg;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
msg = (struct mt7921_debug_msg *)skb->data;
if (msg->type == 3) { /* fw log */
@@ -361,7 +221,7 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
u8 reserved[3];
} __packed * event;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
event = (struct mt7921_mcu_lp_event *)skb->data;
trace_lp_event(dev, event->state);
@@ -372,7 +232,7 @@ mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt7921_mcu_tx_done_event *event;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
event = (struct mt7921_mcu_tx_done_event *)skb->data;
mt7921_mac_add_txs(dev, event->txs);
@@ -381,8 +241,9 @@ mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
static void
mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
- struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
+ struct mt76_connac2_mcu_rxd *rxd;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
switch (rxd->eid) {
case MCU_EVENT_BSS_BEACON_LOSS:
mt7921_mcu_connection_loss_event(dev, skb);
@@ -416,12 +277,12 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
- struct mt7921_mcu_rxd *rxd;
+ struct mt76_connac2_mcu_rxd *rxd;
if (skb_linearize(skb))
return;
- rxd = (struct mt7921_mcu_rxd *)skb->data;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (rxd->eid == 0x6) {
mt76_mcu_rx_event(&dev->mt76, skb);
@@ -469,34 +330,6 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
enable, false);
}
-static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info)
-{
- u32 mode = DL_MODE_NEED_RSP;
-
- if (info == PATCH_SEC_NOT_SUPPORT)
- return mode;
-
- switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) {
- case PATCH_SEC_ENC_TYPE_PLAIN:
- break;
- case PATCH_SEC_ENC_TYPE_AES:
- mode |= DL_MODE_ENCRYPT;
- mode |= FIELD_PREP(DL_MODE_KEY_IDX,
- (info & PATCH_SEC_ENC_AES_KEY_MASK)) & DL_MODE_KEY_IDX;
- mode |= DL_MODE_RESET_SEC_IV;
- break;
- case PATCH_SEC_ENC_TYPE_SCRAMBLE:
- mode |= DL_MODE_ENCRYPT;
- mode |= DL_CONFIG_ENCRY_MODE_SEL;
- mode |= DL_MODE_RESET_SEC_IV;
- break;
- default:
- dev_err(dev->mt76.dev, "Encryption type not support!\n");
- }
-
- return mode;
-}
-
static char *mt7921_patch_name(struct mt7921_dev *dev)
{
char *ret;
@@ -509,152 +342,6 @@ static char *mt7921_patch_name(struct mt7921_dev *dev)
return ret;
}
-static int mt7921_load_patch(struct mt7921_dev *dev)
-{
- const struct mt7921_patch_hdr *hdr;
- const struct firmware *fw = NULL;
- int i, ret, sem, max_len;
-
- max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096;
-
- sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true);
- switch (sem) {
- case PATCH_IS_DL:
- return 0;
- case PATCH_NOT_DL_SEM_SUCCESS:
- break;
- default:
- dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
- return -EAGAIN;
- }
-
- ret = request_firmware(&fw, mt7921_patch_name(dev), dev->mt76.dev);
- if (ret)
- goto out;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
- goto out;
- }
-
- hdr = (const struct mt7921_patch_hdr *)(fw->data);
-
- dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
- be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
-
- for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
- struct mt7921_patch_sec *sec;
- const u8 *dl;
- u32 len, addr, mode;
- u32 sec_info = 0;
-
- sec = (struct mt7921_patch_sec *)(fw->data + sizeof(*hdr) +
- i * sizeof(*sec));
- if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
- PATCH_SEC_TYPE_INFO) {
- ret = -EINVAL;
- goto out;
- }
-
- addr = be32_to_cpu(sec->info.addr);
- len = be32_to_cpu(sec->info.len);
- dl = fw->data + be32_to_cpu(sec->offs);
- sec_info = be32_to_cpu(sec->info.sec_key_idx);
- mode = mt7921_get_data_mode(dev, sec_info);
-
- ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
- mode);
- if (ret) {
- dev_err(dev->mt76.dev, "Download request failed\n");
- goto out;
- }
-
- ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- dl, len, max_len);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to send patch\n");
- goto out;
- }
- }
-
- ret = mt76_connac_mcu_start_patch(&dev->mt76);
- if (ret)
- dev_err(dev->mt76.dev, "Failed to start patch\n");
-
- if (mt76_is_sdio(&dev->mt76)) {
- /* activate again */
- ret = __mt7921_mcu_fw_pmctrl(dev);
- if (!ret)
- ret = __mt7921_mcu_drv_pmctrl(dev);
- }
-
-out:
- sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false);
- switch (sem) {
- case PATCH_REL_SEM_SUCCESS:
- break;
- default:
- ret = -EAGAIN;
- dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
- break;
- }
- release_firmware(fw);
-
- return ret;
-}
-
-static int
-mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
- const struct mt7921_fw_trailer *hdr,
- const u8 *data, bool is_wa)
-{
- int i, offset = 0, max_len;
- u32 override = 0, option = 0;
-
- max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096;
-
- for (i = 0; i < hdr->n_region; i++) {
- const struct mt7921_fw_region *region;
- int err;
- u32 len, addr, mode;
-
- region = (const struct mt7921_fw_region *)((const u8 *)hdr -
- (hdr->n_region - i) * sizeof(*region));
- mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
- region->feature_set, is_wa);
- len = le32_to_cpu(region->len);
- addr = le32_to_cpu(region->addr);
-
- if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
- override = addr;
-
- err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
- mode);
- if (err) {
- dev_err(dev->mt76.dev, "Download request failed\n");
- return err;
- }
-
- err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- data + offset, len, max_len);
- if (err) {
- dev_err(dev->mt76.dev, "Failed to send firmware.\n");
- return err;
- }
-
- offset += len;
- }
-
- if (override)
- option |= FW_START_OVERRIDE;
-
- if (is_wa)
- option |= FW_START_WORKING_PDA_CR4;
-
- return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
-}
-
static char *mt7921_ram_name(struct mt7921_dev *dev)
{
char *ret;
@@ -667,44 +354,6 @@ static char *mt7921_ram_name(struct mt7921_dev *dev)
return ret;
}
-static int mt7921_load_ram(struct mt7921_dev *dev)
-{
- const struct mt7921_fw_trailer *hdr;
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, mt7921_ram_name(dev), dev->mt76.dev);
- if (ret)
- return ret;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
- goto out;
- }
-
- hdr = (const struct mt7921_fw_trailer *)(fw->data + fw->size -
- sizeof(*hdr));
-
- dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
- hdr->fw_ver, hdr->build_date);
-
- ret = mt7921_mcu_send_ram_firmware(dev, hdr, fw->data, false);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to start WM firmware\n");
- goto out;
- }
-
- snprintf(dev->mt76.hw->wiphy->fw_version,
- sizeof(dev->mt76.hw->wiphy->fw_version),
- "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
static int mt7921_load_firmware(struct mt7921_dev *dev)
{
int ret;
@@ -715,11 +364,18 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
goto fw_loaded;
}
- ret = mt7921_load_patch(dev);
+ ret = mt76_connac2_load_patch(&dev->mt76, mt7921_patch_name(dev));
if (ret)
return ret;
- ret = mt7921_load_ram(dev);
+ if (mt76_is_sdio(&dev->mt76)) {
+ /* activate again */
+ ret = __mt7921_mcu_fw_pmctrl(dev);
+ if (!ret)
+ ret = __mt7921_mcu_drv_pmctrl(dev);
+ }
+
+ ret = mt76_connac2_load_ram(&dev->mt76, mt7921_ram_name(dev), NULL);
if (ret)
return ret;
@@ -771,12 +427,6 @@ int mt7921_run_firmware(struct mt7921_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7921_run_firmware);
-void mt7921_mcu_exit(struct mt7921_dev *dev)
-{
- skb_queue_purge(&dev->mt76.mcu.res_q);
-}
-EXPORT_SYMBOL_GPL(mt7921_mcu_exit);
-
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
@@ -972,7 +622,7 @@ int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
.ps = {
.tag = cpu_to_le16(UNI_BSS_INFO_PS),
.len = cpu_to_le16(sizeof(struct ps_tlv)),
- .ps_state = vif->bss_conf.ps ? 2 : 0,
+ .ps_state = vif->cfg.ps ? 2 : 0,
},
};
@@ -1019,7 +669,7 @@ mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
&bcnft_req, sizeof(bcnft_req), true);
}
-static int
+int
mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable)
{
@@ -1036,7 +686,7 @@ mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
u8 pad;
} req = {
.bss_idx = mvif->mt76.idx,
- .aid = cpu_to_le16(vif->bss_conf.aid),
+ .aid = cpu_to_le16(vif->cfg.aid),
.dtim_period = vif->bss_conf.dtim_period,
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
};
@@ -1048,9 +698,6 @@ mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
};
int err;
- if (vif->type != NL80211_IFTYPE_STATION)
- return 0;
-
err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
&req_hdr, sizeof(req_hdr), false);
if (err < 0 || !enable)
@@ -1132,7 +779,6 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
struct ieee80211_vif *vif,
bool enable)
{
- struct ieee80211_hw *hw = mt76_hw(dev);
int err;
if (enable) {
@@ -1140,8 +786,6 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
if (err)
return err;
- vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
- ieee80211_hw_set(hw, CONNECTION_MONITOR);
mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
return 0;
@@ -1151,8 +795,6 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
if (err)
return err;
- vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
- __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
return 0;
@@ -1255,10 +897,13 @@ mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
};
struct sk_buff *skb;
+ /* support enable/update process only
+ * disable flow would be handled in bss stop handler automatically
+ */
if (!enable)
- goto out;
+ return -EOPNOTSUPP;
- skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
+ skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0);
if (!skb)
return -EINVAL;
@@ -1268,8 +913,8 @@ mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
return -EINVAL;
}
- mt7921_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb,
- wcid, NULL, 0, true);
+ mt76_connac2_mac_write_txwi(&dev->mt76, (__le32 *)(req.beacon_tlv.pkt),
+ skb, wcid, NULL, 0, 0, BSS_CHANGED_BEACON);
memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
@@ -1282,7 +927,6 @@ mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
}
dev_kfree_skb(skb);
-out:
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
&req, sizeof(req), true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index 77cc0cc5b436..0d20f7d8d474 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -6,76 +6,6 @@
#include "../mt76_connac_mcu.h"
-struct mt7921_mcu_txd {
- __le32 txd[8];
-
- __le16 len;
- __le16 pq_id;
-
- u8 cid;
- u8 pkt_type;
- u8 set_query; /* FW don't care */
- u8 seq;
-
- u8 uc_d2b0_rev;
- u8 ext_cid;
- u8 s2d_index;
- u8 ext_cid_ack;
-
- u32 reserved[5];
-} __packed __aligned(4);
-
-/**
- * struct mt7921_uni_txd - mcu command descriptor for firmware v3
- * @txd: hardware descriptor
- * @len: total length not including txd
- * @cid: command identifier
- * @pkt_type: must be 0xa0 (cmd packet by long format)
- * @frag_n: fragment number
- * @seq: sequence number
- * @checksum: 0 mean there is no checksum
- * @s2d_index: index for command source and destination
- * Definition | value | note
- * CMD_S2D_IDX_H2N | 0x00 | command from HOST to WM
- * CMD_S2D_IDX_C2N | 0x01 | command from WA to WM
- * CMD_S2D_IDX_H2C | 0x02 | command from HOST to WA
- * CMD_S2D_IDX_H2N_AND_H2C | 0x03 | command from HOST to WA and WM
- *
- * @option: command option
- * BIT[0]: UNI_CMD_OPT_BIT_ACK
- * set to 1 to request a fw reply
- * if UNI_CMD_OPT_BIT_0_ACK is set and UNI_CMD_OPT_BIT_2_SET_QUERY
- * is set, mcu firmware will send response event EID = 0x01
- * (UNI_EVENT_ID_CMD_RESULT) to the host.
- * BIT[1]: UNI_CMD_OPT_BIT_UNI_CMD
- * 0: original command
- * 1: unified command
- * BIT[2]: UNI_CMD_OPT_BIT_SET_QUERY
- * 0: QUERY command
- * 1: SET command
- */
-struct mt7921_uni_txd {
- __le32 txd[8];
-
- /* DW1 */
- __le16 len;
- __le16 cid;
-
- /* DW2 */
- u8 reserved;
- u8 pkt_type;
- u8 frag_n;
- u8 seq;
-
- /* DW3 */
- __le16 checksum;
- u8 s2d_index;
- u8 option;
-
- /* DW4 */
- u8 reserved2[4];
-} __packed __aligned(4);
-
struct mt7921_mcu_tx_done_event {
u8 pid;
u8 status;
@@ -108,21 +38,6 @@ enum {
MCU_EXT_EVENT_RATE_REPORT = 0x87,
};
-struct mt7921_mcu_rxd {
- __le32 rxd[6];
-
- __le16 len;
- __le16 pkt_type_id;
-
- u8 eid;
- u8 seq;
- __le16 __rsv;
-
- u8 ext_eid;
- u8 __rsv1[2];
- u8 s2d_index;
-};
-
struct mt7921_mcu_eeprom_info {
__le32 addr;
__le32 valid;
@@ -135,9 +50,6 @@ struct mt7921_mcu_eeprom_info {
#define MT_RA_RATE_DCM_EN BIT(4)
#define MT_RA_RATE_BW GENMASK(14, 13)
-#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
-#define MCU_PKT_ID 0xa0
-
struct mt7921_mcu_uni_event {
u8 cid;
u8 pad[3];
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 5ca584bb2fc6..c161031ac62a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -8,9 +8,9 @@
#include <linux/ktime.h>
#include "../mt76_connac_mcu.h"
#include "regs.h"
+#include "acpi_sar.h"
#define MT7921_MAX_INTERFACES 4
-#define MT7921_MAX_WMM_SETS 4
#define MT7921_WTBL_SIZE 20
#define MT7921_WTBL_RESERVED (MT7921_WTBL_SIZE - 1)
#define MT7921_WTBL_STA (MT7921_WTBL_RESERVED - \
@@ -171,6 +171,9 @@ struct mt7921_phy {
struct sk_buff_head scan_event_list;
struct delayed_work scan_work;
+#ifdef CONFIG_ACPI
+ struct mt7921_acpi_sar *acpisar;
+#endif
};
#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev))
@@ -247,16 +250,6 @@ struct mt7921_txpwr {
} data[TXPWR_MAX_NUM];
};
-enum {
- MT_LMAC_AC00,
- MT_LMAC_AC01,
- MT_LMAC_AC02,
- MT_LMAC_AC03,
- MT_LMAC_ALTX0 = 0x10,
- MT_LMAC_BMC0,
- MT_LMAC_BCN0,
-};
-
static inline struct mt7921_phy *
mt7921_hw_phy(struct ieee80211_hw *hw)
{
@@ -279,7 +272,6 @@ mt7921_hw_dev(struct ieee80211_hw *hw)
mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
extern const struct ieee80211_ops mt7921_ops;
-extern struct pci_driver mt7921_pci_driver;
u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr);
@@ -291,6 +283,8 @@ int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force);
int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev);
void mt7921_dma_cleanup(struct mt7921_dev *dev);
int mt7921_run_firmware(struct mt7921_dev *dev);
+int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state);
@@ -301,7 +295,6 @@ int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl);
void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb);
-void mt7921_mcu_exit(struct mt7921_dev *dev);
static inline void mt7921_irq_enable(struct mt7921_dev *dev, u32 mask)
{
@@ -352,12 +345,6 @@ static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev)
return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
}
-static inline void mt7921_mcu_tx_cleanup(struct mt7921_dev *dev)
-{
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
-}
-
static inline void
mt7921_skb_add_usb_sdio_hdr(struct mt7921_dev *dev, struct sk_buff *skb,
int type)
@@ -392,7 +379,6 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7921_tx_worker(struct mt76_worker *w);
-void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7921_tx_token_put(struct mt7921_dev *dev);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -424,14 +410,8 @@ int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct netlink_callback *cb, void *data, int len);
-void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key, int pid,
- bool beacon);
void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi);
void mt7921_mac_sta_poll(struct mt7921_dev *dev);
-int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
- int cmd, int *wait_seq);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
@@ -481,4 +461,22 @@ int mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
bool enable);
+#ifdef CONFIG_ACPI
+int mt7921_init_acpi_sar(struct mt7921_dev *dev);
+int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default);
+#else
+static inline int
+mt7921_init_acpi_sar(struct mt7921_dev *dev)
+{
+ return 0;
+}
+
+static inline int
+mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
+{
+ return 0;
+}
+#endif
+int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index b5fb22b8e086..ea3069d18c35 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -116,7 +116,7 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
mt7921_mcu_drv_pmctrl(dev);
mt7921_dma_cleanup(dev);
mt7921_wfsys_reset(dev);
- mt7921_mcu_exit(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
tasklet_disable(&dev->irq_tasklet);
}
@@ -230,14 +230,14 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7921_txp_common),
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_hw_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921e_tx_prepare_skb,
- .tx_complete_skb = mt7921e_tx_complete_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
.rx_check = mt7921e_rx_check,
.rx_skb = mt7921e_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
@@ -247,7 +247,6 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt7921_update_channel,
};
-
static const struct mt7921_hif_ops mt7921_pcie_ops = {
.init_reset = mt7921e_init_reset,
.reset = mt7921e_mac_reset,
@@ -359,9 +358,9 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
}
-#ifdef CONFIG_PM
-static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int mt7921_pci_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
@@ -391,8 +390,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
napi_disable(&mdev->napi[i]);
}
- pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
-
/* wait until dma is idle */
mt76_poll(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
@@ -412,8 +409,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (err)
goto restore_napi;
- pci_save_state(pdev);
- err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
if (err)
goto restore_napi;
@@ -436,19 +431,14 @@ restore_suspend:
return err;
}
-static int mt7921_pci_resume(struct pci_dev *pdev)
+static int mt7921_pci_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
int i, err;
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- return err;
-
- pci_restore_state(pdev);
-
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
return err;
@@ -488,17 +478,15 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
return err;
}
-#endif /* CONFIG_PM */
-struct pci_driver mt7921_pci_driver = {
+static DEFINE_SIMPLE_DEV_PM_OPS(mt7921_pm_ops, mt7921_pci_suspend, mt7921_pci_resume);
+
+static struct pci_driver mt7921_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7921_pci_device_table,
.probe = mt7921_pci_probe,
.remove = mt7921_pci_remove,
-#ifdef CONFIG_PM
- .suspend = mt7921_pci_suspend,
- .resume = mt7921_pci_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = pm_sleep_ptr(&mt7921_pm_ops),
};
module_pci_driver(mt7921_pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index 5ca14dbbdd26..e1800674089a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -5,37 +5,6 @@
#include "../dma.h"
#include "mac.h"
-static void
-mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
- void *txp_ptr, u32 id)
-{
- struct mt7921_hw_txp *txp = txp_ptr;
- struct mt7921_txp_ptr *ptr = &txp->ptr[0];
- int i, nbuf = tx_info->nbuf - 1;
-
- tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
- tx_info->nbuf = 1;
-
- txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
-
- for (i = 0; i < nbuf; i++) {
- u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
- u32 addr = tx_info->buf[i + 1].addr;
-
- if (i == nbuf - 1)
- len |= MT_TXD_LEN_LAST;
-
- if (i & 1) {
- ptr->buf1 = cpu_to_le32(addr);
- ptr->len1 = cpu_to_le16(len);
- ptr++;
- } else {
- ptr->buf0 = cpu_to_le32(addr);
- ptr->len0 = cpu_to_le16(len);
- }
- }
-}
-
int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@@ -44,8 +13,8 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
+ struct mt76_connac_hw_txp *txp;
struct mt76_txwi_cache *t;
- struct mt7921_txp_common *txp;
int id, pid;
u8 *txwi = (u8 *)txwi_ptr;
@@ -72,12 +41,12 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
- pid, false);
+ mt76_connac2_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, key,
+ pid, qid, 0);
- txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
- memset(txp, 0, sizeof(struct mt7921_txp_common));
- mt7921_write_hw_txp(dev, tx_info, txp, id);
+ txp = (struct mt76_connac_hw_txp *)(txwi + MT_TXD_SIZE);
+ memset(txp, 0, sizeof(struct mt76_connac_hw_txp));
+ mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
tx_info->skb = DMA_DUMMY_DATA;
@@ -85,37 +54,6 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
static void
-mt7921_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- struct mt7921_txp_common *txp;
- int i;
-
- txp = mt7921_txwi_to_txp(dev, t);
-
- for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
- struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
- bool last;
- u16 len;
-
- len = le16_to_cpu(ptr->len0);
- last = len & MT_TXD_LEN_LAST;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
- DMA_TO_DEVICE);
- if (last)
- break;
-
- len = le16_to_cpu(ptr->len1);
- last = len & MT_TXD_LEN_LAST;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
- DMA_TO_DEVICE);
- if (last)
- break;
- }
-}
-
-static void
mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, bool clear_status,
struct list_head *free_list)
@@ -124,7 +62,7 @@ mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
__le32 *txwi;
u16 wcid_idx;
- mt7921_txp_skb_unmap(mdev, t);
+ mt76_connac_txp_skb_unmap(mdev, t);
if (!t->skb)
goto out;
@@ -150,7 +88,8 @@ out:
static void
mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
{
- struct mt7921_tx_free *free = (struct mt7921_tx_free *)data;
+ struct mt76_connac_tx_free *free = data;
+ __le32 *tx_info = (__le32 *)(data + sizeof(*free));
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
@@ -165,11 +104,11 @@ mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
- if (WARN_ON_ONCE((void *)&free->info[count] > end))
+ if (WARN_ON_ONCE((void *)&tx_info[count] > end))
return;
for (i = 0; i < count; i++) {
- u32 msdu, info = le32_to_cpu(free->info[i]);
+ u32 msdu, info = le32_to_cpu(tx_info[i]);
u8 stat;
/* 1'b1: new wcid pair.
@@ -262,29 +201,6 @@ void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
-void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7921_txp_common *txp;
- u16 token;
-
- txp = mt7921_txwi_to_txp(mdev, e->txwi);
- token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
- t = mt76_token_put(mdev, token);
- e->skb = t ? t->skb : NULL;
- }
-
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
void mt7921_tx_token_put(struct mt7921_dev *dev)
{
struct mt76_txwi_cache *txwi;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
index 36669e5aeef3..5efda694fb9d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -26,10 +26,17 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
enum mt76_mcuq_id txq = MT_MCUQ_WM;
int ret;
- ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+ ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, seq);
if (ret)
return ret;
+ if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
+ cmd == MCU_UNI_CMD(SUSPEND) ||
+ cmd == MCU_UNI_CMD(OFFLOAD))
+ mdev->mcu.timeout = HZ;
+ else
+ mdev->mcu.timeout = 3 * HZ;
+
if (cmd == MCU_CMD(FW_SCATTER))
txq = MT_MCUQ_FWDL;
@@ -39,7 +46,7 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int mt7921e_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mt7921_mcu_ops = {
- .headroom = sizeof(struct mt7921_mcu_txd),
+ .headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7921_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
.mcu_restart = mt76_connac_mcu_restart,
@@ -102,7 +109,7 @@ int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
- int i, err = 0;
+ int i;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
@@ -114,12 +121,12 @@ int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "firmware own failed\n");
clear_bit(MT76_STATE_PM, &mphy->state);
- err = -EIO;
+ return -EIO;
}
pm->stats.last_doze_event = jiffies;
pm->stats.awake_time += pm->stats.last_doze_event -
pm->stats.last_wake_event;
- return err;
+ return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index af26d59fa2f0..487acd6e2be8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -48,7 +48,7 @@ static void mt7921s_unregister_device(struct mt7921_dev *dev)
mt76s_deinit(&dev->mt76);
mt7921s_wfsys_reset(dev);
- mt7921_mcu_exit(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
index 54a5c712a3c3..e038d7404323 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
@@ -29,10 +29,17 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (dev->fw_assert)
return -EBUSY;
- ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+ ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, seq);
if (ret)
return ret;
+ if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
+ cmd == MCU_UNI_CMD(SUSPEND) ||
+ cmd == MCU_UNI_CMD(OFFLOAD))
+ mdev->mcu.timeout = HZ;
+ else
+ mdev->mcu.timeout = 3 * HZ;
+
if (cmd == MCU_CMD(FW_SCATTER))
type = MT7921_SDIO_FWDL;
@@ -72,7 +79,8 @@ static u32 mt7921s_clear_rm3r_drv_own(struct mt7921_dev *dev)
int mt7921s_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mt7921s_mcu_ops = {
- .headroom = MT_SDIO_HDR_SIZE + sizeof(struct mt7921_mcu_txd),
+ .headroom = MT_SDIO_HDR_SIZE +
+ sizeof(struct mt76_connac2_mcu_txd),
.tailroom = MT_SDIO_TAIL_SIZE,
.mcu_skb_send_msg = mt7921s_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
@@ -99,8 +107,8 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
- int err = 0;
u32 status;
+ int err;
sdio_claim_host(func);
@@ -118,8 +126,7 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
if (err < 0) {
dev_err(dev->mt76.dev, "driver own failed\n");
- err = -EIO;
- goto out;
+ return -EIO;
}
clear_bit(MT76_STATE_PM, &mphy->state);
@@ -127,8 +134,8 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
-out:
- return err;
+
+ return 0;
}
int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
@@ -136,8 +143,8 @@ int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
- int err = 0;
u32 status;
+ int err;
sdio_claim_host(func);
@@ -148,7 +155,7 @@ int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
2000, 1000000);
if (err < 0) {
dev_err(dev->mt76.dev, "mailbox ACK not cleared\n");
- goto err;
+ goto out;
}
}
@@ -156,18 +163,18 @@ int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
+out:
sdio_release_host(func);
-err:
if (err < 0) {
dev_err(dev->mt76.dev, "firmware own failed\n");
clear_bit(MT76_STATE_PM, &mphy->state);
- err = -EIO;
+ return -EIO;
}
pm->stats.last_doze_event = jiffies;
pm->stats.awake_time += pm->stats.last_doze_event -
pm->stats.last_wake_event;
- return err;
+ return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index dc38baef273a..dd3b8884e162 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -102,10 +102,17 @@ mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
u32 pad, ep;
int ret;
- ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+ ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, seq);
if (ret)
return ret;
+ if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
+ cmd == MCU_UNI_CMD(SUSPEND) ||
+ cmd == MCU_UNI_CMD(OFFLOAD))
+ mdev->mcu.timeout = HZ;
+ else
+ mdev->mcu.timeout = 3 * HZ;
+
if (cmd != MCU_CMD(FW_SCATTER))
ep = MT_EP_OUT_INBAND_CMD;
else
@@ -125,7 +132,8 @@ mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
static int mt7921u_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mcu_ops = {
- .headroom = MT_SDIO_HDR_SIZE + sizeof(struct mt7921_mcu_txd),
+ .headroom = MT_SDIO_HDR_SIZE +
+ sizeof(struct mt76_connac2_mcu_txd),
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7921u_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
@@ -158,7 +166,7 @@ static void mt7921u_cleanup(struct mt7921_dev *dev)
{
clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt7921u_wfsys_reset(dev);
- mt7921_mcu_exit(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
mt76u_queues_deinit(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
index cd2f09743d2f..efbd3954c883 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
@@ -185,7 +185,7 @@ int mt7921u_init_reset(struct mt7921_dev *dev)
set_bit(MT76_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- mt7921_mcu_exit(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
mt76u_stop_rx(&dev->mt76);
mt76u_stop_tx(&dev->mt76);
@@ -208,7 +208,7 @@ int mt7921u_mac_reset(struct mt7921_dev *dev)
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- mt7921_mcu_exit(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
mt76u_stop_rx(&dev->mt76);
mt76u_stop_tx(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index def7f325f5c5..aba2a9865821 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -350,7 +350,6 @@ int mt76s_alloc_tx(struct mt76_dev *dev)
if (IS_ERR(q))
return PTR_ERR(q);
- q->qid = i;
dev->phy.q_tx[i] = q;
}
@@ -358,7 +357,6 @@ int mt76s_alloc_tx(struct mt76_dev *dev)
if (IS_ERR(q))
return PTR_ERR(q);
- q->qid = MT_MCUQ_WM;
dev->q_mcu[MT_MCUQ_WM] = q;
return 0;
@@ -517,8 +515,8 @@ static void mt76s_tx_status_data(struct work_struct *work)
static int
mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+ enum mt76_txq_id qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct mt76_tx_info tx_info = {
.skb = skb,
@@ -530,7 +528,7 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return -ENOSPC;
skb->prev = skb->next = NULL;
- err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
+ err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
if (err < 0)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 382b45639f26..71fd3fbfa7d2 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -50,8 +50,8 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
q->queued < q->ndesc / 2) {
int ret;
- ret = dev->queue_ops->tx_queue_skb(dev, q, skb_get(skb), wcid,
- NULL);
+ ret = dev->queue_ops->tx_queue_skb(dev, q, qid, skb_get(skb),
+ wcid, NULL);
if (ret < 0)
break;
@@ -101,7 +101,6 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
IEEE80211_FCTL_FROMDS;
struct mt76_testmode_data *td = &phy->test;
- bool ext_phy = phy != &phy->dev->phy;
struct sk_buff **frag_tail, *head;
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
@@ -136,9 +135,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
IEEE80211_TX_CTL_NO_ACK |
IEEE80211_TX_CTL_NO_PS_BUFFER;
- if (ext_phy)
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
-
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
frag_tail = &skb_shinfo(head)->frag_list;
for (i = 0; i < nfrags; i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 1d08d99e298c..e67cc7909bce 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -284,7 +284,7 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
int idx;
non_aql = !info->tx_time_est;
- idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
if (idx < 0 || !sta)
return idx;
@@ -310,7 +310,6 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt76_queue *q;
int qid = skb_get_queue_mapping(skb);
- bool ext_phy = phy != &dev->phy;
if (mt76_testmode_enabled(phy)) {
ieee80211_free_txskb(phy->hw, skb);
@@ -327,16 +326,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
qid = MT_TXQ_PSD;
- skb_set_queue_mapping(skb, qid);
}
if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
- if (ext_phy)
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
-
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
q = phy->q_tx[qid];
spin_lock_bh(&q->lock);
@@ -351,7 +347,6 @@ mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_tx_info *info;
- bool ext_phy = phy != &phy->dev->phy;
struct sk_buff *skb;
skb = ieee80211_tx_dequeue(phy->hw, txq);
@@ -359,8 +354,7 @@ mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
return NULL;
info = IEEE80211_SKB_CB(skb);
- if (ext_phy)
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
return skb;
}
@@ -586,15 +580,25 @@ EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
void mt76_tx_worker_run(struct mt76_dev *dev)
{
- mt76_txq_schedule_all(&dev->phy);
- if (dev->phy2)
- mt76_txq_schedule_all(dev->phy2);
+ struct mt76_phy *phy;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
+ phy = dev->phys[i];
+ if (!phy)
+ continue;
+
+ mt76_txq_schedule_all(phy);
+ }
#ifdef CONFIG_NL80211_TESTMODE
- if (dev->phy.test.tx_pending)
- mt76_testmode_tx_pending(&dev->phy);
- if (dev->phy2 && dev->phy2->test.tx_pending)
- mt76_testmode_tx_pending(dev->phy2);
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
+ phy = dev->phys[i];
+ if (!phy || !phy->test.tx_pending)
+ continue;
+
+ mt76_testmode_tx_pending(phy);
+ }
#endif
}
EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
@@ -697,17 +701,23 @@ EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
- struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
- struct mt76_queue *q, *q2 = NULL;
+ struct mt76_phy *phy = &dev->phy;
+ struct mt76_queue *q = phy->q_tx[0];
- q = phy->q_tx[0];
if (blocked == q->blocked)
return;
q->blocked = blocked;
- if (phy2) {
- q2 = phy2->q_tx[0];
- q2->blocked = blocked;
+
+ phy = dev->phys[MT_BAND1];
+ if (phy) {
+ q = phy->q_tx[0];
+ q->blocked = blocked;
+ }
+ phy = dev->phys[MT_BAND2];
+ if (phy) {
+ q = phy->q_tx[0];
+ q->blocked = blocked;
}
if (!blocked)
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 1bb92ca7451b..6b8964c19f50 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -845,8 +845,8 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
static int
mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+ enum mt76_txq_id qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct mt76_tx_info tx_info = {
.skb = skb,
@@ -858,7 +858,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return -ENOSPC;
skb->prev = skb->next = NULL;
- err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
+ err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
if (err < 0)
return err;
@@ -937,7 +937,6 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
spin_lock_init(&q->lock);
q->hw_idx = mt76u_ac_to_hwq(dev, i);
- q->qid = i;
dev->phy.q_tx[i] = q;
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
index 20669eacb66e..230b0e1061a7 100644
--- a/drivers/net/wireless/mediatek/mt7601u/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -88,7 +88,7 @@ mt7601u_eeprom_param_show(struct seq_file *file, void *data)
dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]);
seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp);
seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain);
- seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+ seq_printf(file, "Reg channels: %hhu-%d\n", dev->ee->reg.start,
dev->ee->reg.start + dev->ee->reg.num - 1);
seq_puts(file, "Per rate power:\n");
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
index aa3b64902cf9..625bebe60538 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -188,7 +188,7 @@ mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom)
if (idx != -1)
dev_info(dev->dev,
- "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+ "EEPROM country region %02x (channels %d-%d)\n",
val, chan_bounds[idx].start,
chan_bounds[idx].start + chan_bounds[idx].num - 1);
else
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 671d8897ae76..6c9c7a61c5c9 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -132,7 +132,7 @@ mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
static void
mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+ struct ieee80211_bss_conf *info, u64 changed)
{
struct mt7601u_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
index a122f1dd38f6..118d43707853 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -368,7 +368,8 @@ void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
void mt7601u_tx_stat(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 8a00f6a75ca9..d4cd2215aba9 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -1097,7 +1097,10 @@ static void mt7601u_phy_freq_cal(struct work_struct *work)
void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
struct ieee80211_bss_conf *info)
{
- if (!info->assoc)
+ struct ieee80211_vif *vif = container_of(info, struct ieee80211_vif,
+ bss_conf);
+
+ if (!vif->cfg.assoc)
cancel_delayed_work_sync(&dev->freq_cal.work);
/* Start/stop collecting beacon data */
@@ -1108,10 +1111,10 @@ void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
spin_unlock_bh(&dev->con_mon_lock);
dev->freq_cal.freq = dev->ee->rf_freq_off;
- dev->freq_cal.enabled = info->assoc;
+ dev->freq_cal.enabled = vif->cfg.assoc;
dev->freq_cal.adjusting = false;
- if (info->assoc)
+ if (vif->cfg.assoc)
ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
MT_FREQ_CAL_INIT_DELAY);
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index f1fa0442a57f..51d977ffc52f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -258,7 +258,8 @@ void mt7601u_tx_stat(struct work_struct *work)
}
int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct mt7601u_dev *dev = hw->priv;
u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 8d8378bafd9b..3ac373d29d93 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -20,9 +20,11 @@
static const struct ieee80211_txrx_stypes
wilc_wfi_cfg80211_mgmt_types[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
- .tx = 0xffff,
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4)
},
[NL80211_IFTYPE_AP] = {
.tx = 0xffff,
@@ -305,6 +307,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
int ret;
u32 i;
u8 security = WILC_FW_SEC_NO;
+ enum mfptype mfp_type = WILC_FW_MFP_NONE;
enum authtype auth_type = WILC_FW_AUTH_ANY;
u32 cipher_group;
struct cfg80211_bss *bss;
@@ -313,32 +316,9 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
vif->connecting = true;
- memset(priv->wep_key, 0, sizeof(priv->wep_key));
- memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
-
cipher_group = sme->crypto.cipher_group;
if (cipher_group != 0) {
- if (cipher_group == WLAN_CIPHER_SUITE_WEP40) {
- security = WILC_FW_SEC_WEP;
-
- priv->wep_key_len[sme->key_idx] = sme->key_len;
- memcpy(priv->wep_key[sme->key_idx], sme->key,
- sme->key_len);
-
- wilc_set_wep_default_keyid(vif, sme->key_idx);
- wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
- sme->key_idx);
- } else if (cipher_group == WLAN_CIPHER_SUITE_WEP104) {
- security = WILC_FW_SEC_WEP_EXTENDED;
-
- priv->wep_key_len[sme->key_idx] = sme->key_len;
- memcpy(priv->wep_key[sme->key_idx], sme->key,
- sme->key_len);
-
- wilc_set_wep_default_keyid(vif, sme->key_idx);
- wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
- sme->key_idx);
- } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
+ if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
if (cipher_group == WLAN_CIPHER_SUITE_TKIP)
security = WILC_FW_SEC_WPA2_TKIP;
else
@@ -373,8 +353,14 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
auth_type = WILC_FW_AUTH_OPEN_SYSTEM;
break;
- case NL80211_AUTHTYPE_SHARED_KEY:
- auth_type = WILC_FW_AUTH_SHARED_KEY;
+ case NL80211_AUTHTYPE_SAE:
+ auth_type = WILC_FW_AUTH_SAE;
+ if (sme->ssid_len) {
+ memcpy(vif->auth.ssid.ssid, sme->ssid, sme->ssid_len);
+ vif->auth.ssid.ssid_len = sme->ssid_len;
+ }
+ vif->auth.key_mgmt_suite = cpu_to_be32(sme->crypto.akm_suites[0]);
+ ether_addr_copy(vif->auth.bssid, sme->bssid);
break;
default:
@@ -384,6 +370,10 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
if (sme->crypto.n_akm_suites) {
if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_8021X)
auth_type = WILC_FW_AUTH_IEEE8021;
+ else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_PSK_SHA256)
+ auth_type = WILC_FW_AUTH_OPEN_SYSTEM_SHA256;
+ else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_8021X_SHA256)
+ auth_type = WILC_FW_AUTH_IEE8021X_SHA256;
}
if (wfi_drv->usr_scan_req.scan_result) {
@@ -427,6 +417,13 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
wfi_drv->conn_info.arg = priv;
wfi_drv->conn_info.param = join_params;
+ if (sme->mfp == NL80211_MFP_OPTIONAL)
+ mfp_type = WILC_FW_MFP_OPTIONAL;
+ else if (sme->mfp == NL80211_MFP_REQUIRED)
+ mfp_type = WILC_FW_MFP_REQUIRED;
+
+ wfi_drv->conn_info.mfp_type = mfp_type;
+
ret = wilc_set_join_req(vif, bss->bssid, sme->ie, sme->ie_len);
if (ret) {
netdev_err(dev, "wilc_set_join_req(): Error\n");
@@ -487,14 +484,6 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static inline void wilc_wfi_cfg_copy_wep_info(struct wilc_priv *priv,
- u8 key_index,
- struct key_params *params)
-{
- priv->wep_key_len[key_index] = params->key_len;
- memcpy(priv->wep_key[key_index], params->key, params->key_len);
-}
-
static int wilc_wfi_cfg_allocate_wpa_entry(struct wilc_priv *priv, u8 idx)
{
if (!priv->wilc_gtk[idx]) {
@@ -514,6 +503,18 @@ static int wilc_wfi_cfg_allocate_wpa_entry(struct wilc_priv *priv, u8 idx)
return 0;
}
+static int wilc_wfi_cfg_allocate_wpa_igtk_entry(struct wilc_priv *priv, u8 idx)
+{
+ idx -= 4;
+ if (!priv->wilc_igtk[idx]) {
+ priv->wilc_igtk[idx] = kzalloc(sizeof(*priv->wilc_igtk[idx]),
+ GFP_KERNEL);
+ if (!priv->wilc_igtk[idx])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static int wilc_wfi_cfg_copy_wpa_info(struct wilc_wfi_key *key_info,
struct key_params *params)
{
@@ -550,35 +551,9 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
u8 op_mode;
struct wilc_vif *vif = netdev_priv(netdev);
struct wilc_priv *priv = &vif->priv;
+ struct wilc_wfi_key *key;
switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- if (priv->wdev.iftype == NL80211_IFTYPE_AP) {
- wilc_wfi_cfg_copy_wep_info(priv, key_index, params);
-
- if (params->cipher == WLAN_CIPHER_SUITE_WEP40)
- mode = WILC_FW_SEC_WEP;
- else
- mode = WILC_FW_SEC_WEP_EXTENDED;
-
- ret = wilc_add_wep_key_bss_ap(vif, params->key,
- params->key_len,
- key_index, mode,
- WILC_FW_AUTH_OPEN_SYSTEM);
- break;
- }
- if (memcmp(params->key, priv->wep_key[key_index],
- params->key_len)) {
- wilc_wfi_cfg_copy_wep_info(priv, key_index, params);
-
- ret = wilc_add_wep_key_bss_sta(vif, params->key,
- params->key_len,
- key_index);
- }
-
- break;
-
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
if (priv->wdev.iftype == NL80211_IFTYPE_AP ||
@@ -640,6 +615,26 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
key_index);
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ ret = wilc_wfi_cfg_allocate_wpa_igtk_entry(priv, key_index);
+ if (ret)
+ return -ENOMEM;
+
+ key = priv->wilc_igtk[key_index - 4];
+ ret = wilc_wfi_cfg_copy_wpa_info(key, params);
+ if (ret)
+ return -ENOMEM;
+
+ if (priv->wdev.iftype == NL80211_IFTYPE_AP ||
+ priv->wdev.iftype == NL80211_IFTYPE_P2P_GO)
+ op_mode = WILC_AP_MODE;
+ else
+ op_mode = WILC_STATION_MODE;
+
+ ret = wilc_add_igtk(vif, params->key, keylen, params->seq,
+ params->seq_len, mac_addr, op_mode,
+ key_index);
+ break;
default:
netdev_err(netdev, "%s: Unsupported cipher\n", __func__);
@@ -657,30 +652,34 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
struct wilc_vif *vif = netdev_priv(netdev);
struct wilc_priv *priv = &vif->priv;
- if (priv->wilc_gtk[key_index]) {
- kfree(priv->wilc_gtk[key_index]->key);
- priv->wilc_gtk[key_index]->key = NULL;
- kfree(priv->wilc_gtk[key_index]->seq);
- priv->wilc_gtk[key_index]->seq = NULL;
-
- kfree(priv->wilc_gtk[key_index]);
- priv->wilc_gtk[key_index] = NULL;
- }
-
- if (priv->wilc_ptk[key_index]) {
- kfree(priv->wilc_ptk[key_index]->key);
- priv->wilc_ptk[key_index]->key = NULL;
- kfree(priv->wilc_ptk[key_index]->seq);
- priv->wilc_ptk[key_index]->seq = NULL;
- kfree(priv->wilc_ptk[key_index]);
- priv->wilc_ptk[key_index] = NULL;
- }
-
- if (key_index <= 3 && priv->wep_key_len[key_index]) {
- memset(priv->wep_key[key_index], 0,
- priv->wep_key_len[key_index]);
- priv->wep_key_len[key_index] = 0;
- wilc_remove_wep_key(vif, key_index);
+ if (!pairwise && (key_index == 4 || key_index == 5)) {
+ key_index -= 4;
+ if (priv->wilc_igtk[key_index]) {
+ kfree(priv->wilc_igtk[key_index]->key);
+ priv->wilc_igtk[key_index]->key = NULL;
+ kfree(priv->wilc_igtk[key_index]->seq);
+ priv->wilc_igtk[key_index]->seq = NULL;
+ kfree(priv->wilc_igtk[key_index]);
+ priv->wilc_igtk[key_index] = NULL;
+ }
+ } else {
+ if (priv->wilc_gtk[key_index]) {
+ kfree(priv->wilc_gtk[key_index]->key);
+ priv->wilc_gtk[key_index]->key = NULL;
+ kfree(priv->wilc_gtk[key_index]->seq);
+ priv->wilc_gtk[key_index]->seq = NULL;
+
+ kfree(priv->wilc_gtk[key_index]);
+ priv->wilc_gtk[key_index] = NULL;
+ }
+ if (priv->wilc_ptk[key_index]) {
+ kfree(priv->wilc_ptk[key_index]->key);
+ priv->wilc_ptk[key_index]->key = NULL;
+ kfree(priv->wilc_ptk[key_index]->seq);
+ priv->wilc_ptk[key_index]->seq = NULL;
+ kfree(priv->wilc_ptk[key_index]);
+ priv->wilc_ptk[key_index] = NULL;
+ }
}
return 0;
@@ -695,11 +694,20 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
struct key_params key_params;
if (!pairwise) {
- key_params.key = priv->wilc_gtk[key_index]->key;
- key_params.cipher = priv->wilc_gtk[key_index]->cipher;
- key_params.key_len = priv->wilc_gtk[key_index]->key_len;
- key_params.seq = priv->wilc_gtk[key_index]->seq;
- key_params.seq_len = priv->wilc_gtk[key_index]->seq_len;
+ if (key_index == 4 || key_index == 5) {
+ key_index -= 4;
+ key_params.key = priv->wilc_igtk[key_index]->key;
+ key_params.cipher = priv->wilc_igtk[key_index]->cipher;
+ key_params.key_len = priv->wilc_igtk[key_index]->key_len;
+ key_params.seq = priv->wilc_igtk[key_index]->seq;
+ key_params.seq_len = priv->wilc_igtk[key_index]->seq_len;
+ } else {
+ key_params.key = priv->wilc_gtk[key_index]->key;
+ key_params.cipher = priv->wilc_gtk[key_index]->cipher;
+ key_params.key_len = priv->wilc_gtk[key_index]->key_len;
+ key_params.seq = priv->wilc_gtk[key_index]->seq;
+ key_params.seq_len = priv->wilc_gtk[key_index]->seq_len;
+ }
} else {
key_params.key = priv->wilc_ptk[key_index]->key;
key_params.cipher = priv->wilc_ptk[key_index]->cipher;
@@ -713,14 +721,19 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
return 0;
}
+/* wiphy_new_nm() will WARNON if not present */
static int set_default_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool unicast, bool multicast)
{
- struct wilc_vif *vif = netdev_priv(netdev);
+ return 0;
+}
- wilc_set_wep_default_keyid(vif, key_index);
+static int set_default_mgmt_key(struct wiphy *wiphy, struct net_device *netdev,
+ u8 key_index)
+{
+ struct wilc_vif *vif = netdev_priv(netdev);
- return 0;
+ return wilc_set_default_mgmt_key_index(vif, key_index);
}
static int get_station(struct wiphy *wiphy, struct net_device *dev,
@@ -977,6 +990,18 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch)
}
}
+bool wilc_wfi_mgmt_frame_rx(struct wilc_vif *vif, u8 *buff, u32 size)
+{
+ struct wilc *wl = vif->wilc;
+ struct wilc_priv *priv = &vif->priv;
+ int freq, ret;
+
+ freq = ieee80211_channel_to_frequency(wl->op_ch, NL80211_BAND_2GHZ);
+ ret = cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0);
+
+ return ret;
+}
+
void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size)
{
struct wilc *wl = vif->wilc;
@@ -1162,8 +1187,14 @@ static int mgmt_tx(struct wiphy *wiphy,
goto out_txq_add_pkt;
}
- if (!ieee80211_is_public_action((struct ieee80211_hdr *)buf, len))
+ if (!ieee80211_is_public_action((struct ieee80211_hdr *)buf, len)) {
+ if (chan)
+ wilc_set_mac_chnl_num(vif, chan->hw_value);
+ else
+ wilc_set_mac_chnl_num(vif, vif->wilc->op_ch);
+
goto out_set_timeout;
+ }
d = (struct wilc_p2p_pub_act_frame *)(&mgmt->u.action);
if (d->oui_type != WLAN_OUI_TYPE_WFA_P2P ||
@@ -1230,6 +1261,7 @@ void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy,
struct wilc_vif *vif = netdev_priv(wdev->netdev);
u32 presp_bit = BIT(IEEE80211_STYPE_PROBE_REQ >> 4);
u32 action_bit = BIT(IEEE80211_STYPE_ACTION >> 4);
+ u32 pauth_bit = BIT(IEEE80211_STYPE_AUTH >> 4);
if (wl->initialized) {
bool prev = vif->mgmt_reg_stypes & presp_bit;
@@ -1243,10 +1275,26 @@ void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy,
if (now != prev)
wilc_frame_register(vif, IEEE80211_STYPE_ACTION, now);
+
+ prev = vif->mgmt_reg_stypes & pauth_bit;
+ now = upd->interface_stypes & pauth_bit;
+ if (now != prev)
+ wilc_frame_register(vif, IEEE80211_STYPE_AUTH, now);
}
vif->mgmt_reg_stypes =
- upd->interface_stypes & (presp_bit | action_bit);
+ upd->interface_stypes & (presp_bit | action_bit | pauth_bit);
+}
+
+static int external_auth(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *auth)
+{
+ struct wilc_vif *vif = netdev_priv(dev);
+
+ if (auth->status == WLAN_STATUS_SUCCESS)
+ wilc_set_external_auth_param(vif, auth);
+
+ return 0;
}
static int set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev,
@@ -1264,12 +1312,11 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
if (idx != 0)
return -ENOENT;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
-
ret = wilc_get_rssi(vif, &sinfo->signal);
if (ret)
return ret;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
memcpy(mac, vif->priv.associated_bss, ETH_ALEN);
return 0;
}
@@ -1378,7 +1425,8 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
return wilc_add_beacon(vif, 0, 0, beacon);
}
-static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
+static int stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id)
{
int ret;
struct wilc_vif *vif = netdev_priv(dev);
@@ -1647,6 +1695,7 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.del_key = del_key,
.get_key = get_key,
.set_default_key = set_default_key,
+ .set_default_mgmt_key = set_default_mgmt_key,
.add_virtual_intf = add_virtual_intf,
.del_virtual_intf = del_virtual_intf,
.change_virtual_intf = change_virtual_intf,
@@ -1662,6 +1711,7 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.change_bss = change_bss,
.set_wiphy_params = set_wiphy_params,
+ .external_auth = external_auth,
.set_pmksa = set_pmksa,
.del_pmksa = del_pmksa,
.flush_pmksa = flush_pmksa,
@@ -1804,7 +1854,7 @@ struct wilc *wilc_create_wiphy(struct device *dev)
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT);
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
-
+ wiphy->features |= NL80211_FEATURE_SAE;
set_wiphy_dev(wiphy, dev);
wl->wiphy = wiphy;
ret = wiphy_register(wiphy);
diff --git a/drivers/net/wireless/microchip/wilc1000/fw.h b/drivers/net/wireless/microchip/wilc1000/fw.h
index 1114530d03e4..5c5cac4aab02 100644
--- a/drivers/net/wireless/microchip/wilc1000/fw.h
+++ b/drivers/net/wireless/microchip/wilc1000/fw.h
@@ -41,21 +41,23 @@ struct wilc_drv_handler {
u8 mode;
} __packed;
-struct wilc_wep_key {
- u8 index;
+struct wilc_sta_wpa_ptk {
+ u8 mac_addr[ETH_ALEN];
u8 key_len;
u8 key[];
} __packed;
-struct wilc_sta_wpa_ptk {
+struct wilc_ap_wpa_ptk {
u8 mac_addr[ETH_ALEN];
+ u8 index;
u8 key_len;
u8 key[];
} __packed;
-struct wilc_ap_wpa_ptk {
- u8 mac_addr[ETH_ALEN];
+struct wilc_wpa_igtk {
u8 index;
+ u8 pn_len;
+ u8 pn[6];
u8 key_len;
u8 key[];
} __packed;
@@ -116,4 +118,13 @@ struct wilc_join_bss_param {
struct wilc_noa_opp_enable opp_en;
};
} __packed;
+
+struct wilc_external_auth_param {
+ u8 action;
+ u8 bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ __le32 key_mgmt_suites;
+ __le16 status;
+} __packed;
#endif
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 71b44cfe0dfc..eb1d1ba3a443 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -271,12 +271,19 @@ error:
static int wilc_send_connect_wid(struct wilc_vif *vif)
{
int result = 0;
- struct wid wid_list[4];
+ struct wid wid_list[5];
u32 wid_cnt = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
struct wilc_conn_info *conn_attr = &hif_drv->conn_info;
struct wilc_join_bss_param *bss_param = conn_attr->param;
+
+ wid_list[wid_cnt].id = WID_SET_MFP;
+ wid_list[wid_cnt].type = WID_CHAR;
+ wid_list[wid_cnt].size = sizeof(char);
+ wid_list[wid_cnt].val = (s8 *)&conn_attr->mfp_type;
+ wid_cnt++;
+
wid_list[wid_cnt].id = WID_INFO_ELEMENT_ASSOCIATE;
wid_list[wid_cnt].type = WID_BIN_DATA;
wid_list[wid_cnt].val = conn_attr->req_ies;
@@ -306,7 +313,10 @@ static int wilc_send_connect_wid(struct wilc_vif *vif)
netdev_err(vif->ndev, "failed to send config packet\n");
goto error;
} else {
- hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP;
+ if (conn_attr->auth_type == WILC_FW_AUTH_SAE)
+ hif_drv->hif_state = HOST_IF_EXTERNAL_AUTH;
+ else
+ hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP;
}
return 0;
@@ -625,7 +635,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
conn_info->req_ies_len = 0;
}
-static inline void host_int_handle_disconnect(struct wilc_vif *vif)
+void wilc_handle_disconnect(struct wilc_vif *vif)
{
struct host_if_drv *hif_drv = vif->hif_drv;
@@ -637,8 +647,6 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
if (hif_drv->conn_info.conn_result)
hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
0, hif_drv->conn_info.arg);
- else
- netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
eth_zero_addr(hif_drv->assoc_bssid);
@@ -665,11 +673,16 @@ static void handle_rcvd_gnrl_async_info(struct work_struct *work)
goto free_msg;
}
- if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
+
+ if (hif_drv->hif_state == HOST_IF_EXTERNAL_AUTH) {
+ cfg80211_external_auth_request(vif->ndev, &vif->auth,
+ GFP_KERNEL);
+ hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP;
+ } else if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
host_int_parse_assoc_resp_info(vif, mac_info->status);
} else if (mac_info->status == WILC_MAC_STATUS_DISCONNECTED) {
if (hif_drv->hif_state == HOST_IF_CONNECTED) {
- host_int_handle_disconnect(vif);
+ wilc_handle_disconnect(vif);
} else if (hif_drv->usr_scan_req.scan_result) {
del_timer(&hif_drv->scan_timer);
handle_scan_done(vif, SCAN_EVENT_ABORTED);
@@ -710,7 +723,8 @@ int wilc_disconnect(struct wilc_vif *vif)
}
if (conn_info->conn_result) {
- if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP)
+ if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP ||
+ hif_drv->hif_state == HOST_IF_EXTERNAL_AUTH)
del_timer(&hif_drv->connect_timer);
conn_info->conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, 0,
@@ -800,15 +814,15 @@ static void wilc_hif_pack_sta_param(u8 *cur_byte, const u8 *mac,
put_unaligned_le16(params->aid, cur_byte);
cur_byte += 2;
- *cur_byte++ = params->supported_rates_len;
- if (params->supported_rates_len > 0)
- memcpy(cur_byte, params->supported_rates,
- params->supported_rates_len);
- cur_byte += params->supported_rates_len;
+ *cur_byte++ = params->link_sta_params.supported_rates_len;
+ if (params->link_sta_params.supported_rates_len > 0)
+ memcpy(cur_byte, params->link_sta_params.supported_rates,
+ params->link_sta_params.supported_rates_len);
+ cur_byte += params->link_sta_params.supported_rates_len;
- if (params->ht_capa) {
+ if (params->link_sta_params.ht_capa) {
*cur_byte++ = true;
- memcpy(cur_byte, params->ht_capa,
+ memcpy(cur_byte, params->link_sta_params.ht_capa,
sizeof(struct ieee80211_ht_cap));
} else {
*cur_byte++ = false;
@@ -986,6 +1000,31 @@ void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled)
pr_err("Failed to send wowlan trigger config packet\n");
}
+int wilc_set_external_auth_param(struct wilc_vif *vif,
+ struct cfg80211_external_auth_params *auth)
+{
+ int ret;
+ struct wid wid;
+ struct wilc_external_auth_param *param;
+
+ wid.id = WID_EXTERNAL_AUTH_PARAM;
+ wid.type = WID_BIN_DATA;
+ wid.size = sizeof(*param);
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param)
+ return -EINVAL;
+
+ wid.val = (u8 *)param;
+ param->action = auth->action;
+ ether_addr_copy(param->bssid, auth->bssid);
+ memcpy(param->ssid, auth->ssid.ssid, auth->ssid.ssid_len);
+ param->ssid_len = auth->ssid.ssid_len;
+ ret = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
+
+ kfree(param);
+ return ret;
+}
+
static void handle_scan_timer(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
@@ -1038,108 +1077,6 @@ static void timer_connect_cb(struct timer_list *t)
kfree(msg);
}
-int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
-{
- struct wid wid;
- int result;
-
- wid.id = WID_REMOVE_WEP_KEY;
- wid.type = WID_STR;
- wid.size = sizeof(char);
- wid.val = &index;
-
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
- if (result)
- netdev_err(vif->ndev,
- "Failed to send remove wep key config packet\n");
- return result;
-}
-
-int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
-{
- struct wid wid;
- int result;
-
- wid.id = WID_KEY_ID;
- wid.type = WID_CHAR;
- wid.size = sizeof(char);
- wid.val = &index;
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
- if (result)
- netdev_err(vif->ndev,
- "Failed to send wep default key config packet\n");
-
- return result;
-}
-
-int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index)
-{
- struct wid wid;
- int result;
- struct wilc_wep_key *wep_key;
-
- wid.id = WID_ADD_WEP_KEY;
- wid.type = WID_STR;
- wid.size = sizeof(*wep_key) + len;
- wep_key = kzalloc(wid.size, GFP_KERNEL);
- if (!wep_key)
- return -ENOMEM;
-
- wid.val = (u8 *)wep_key;
-
- wep_key->index = index;
- wep_key->key_len = len;
- memcpy(wep_key->key, key, len);
-
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
- if (result)
- netdev_err(vif->ndev,
- "Failed to add wep key config packet\n");
-
- kfree(wep_key);
- return result;
-}
-
-int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index, u8 mode, enum authtype auth_type)
-{
- struct wid wid_list[3];
- int result;
- struct wilc_wep_key *wep_key;
-
- wid_list[0].id = WID_11I_MODE;
- wid_list[0].type = WID_CHAR;
- wid_list[0].size = sizeof(char);
- wid_list[0].val = &mode;
-
- wid_list[1].id = WID_AUTH_TYPE;
- wid_list[1].type = WID_CHAR;
- wid_list[1].size = sizeof(char);
- wid_list[1].val = (s8 *)&auth_type;
-
- wid_list[2].id = WID_WEP_KEY_VALUE;
- wid_list[2].type = WID_STR;
- wid_list[2].size = sizeof(*wep_key) + len;
- wep_key = kzalloc(wid_list[2].size, GFP_KERNEL);
- if (!wep_key)
- return -ENOMEM;
-
- wid_list[2].val = (u8 *)wep_key;
-
- wep_key->index = index;
- wep_key->key_len = len;
- memcpy(wep_key->key, key, len);
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
- ARRAY_SIZE(wid_list));
- if (result)
- netdev_err(vif->ndev,
- "Failed to add wep ap key config packet\n");
-
- kfree(wep_key);
- return result;
-}
-
int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic,
u8 mode, u8 cipher_mode, u8 index)
@@ -1211,6 +1148,36 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
return result;
}
+int wilc_add_igtk(struct wilc_vif *vif, const u8 *igtk, u8 igtk_key_len,
+ const u8 *pn, u8 pn_len, const u8 *mac_addr, u8 mode, u8 index)
+{
+ int result = 0;
+ u8 t_key_len = igtk_key_len;
+ struct wid wid;
+ struct wilc_wpa_igtk *key_buf;
+
+ key_buf = kzalloc(sizeof(*key_buf) + t_key_len, GFP_KERNEL);
+ if (!key_buf)
+ return -ENOMEM;
+
+ key_buf->index = index;
+
+ memcpy(&key_buf->pn[0], pn, pn_len);
+ key_buf->pn_len = pn_len;
+
+ memcpy(&key_buf->key[0], igtk, igtk_key_len);
+ key_buf->key_len = t_key_len;
+
+ wid.id = WID_ADD_IGTK;
+ wid.type = WID_STR;
+ wid.size = sizeof(*key_buf) + t_key_len;
+ wid.val = (s8 *)key_buf;
+ result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
+ kfree(key_buf);
+
+ return result;
+}
+
int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
u8 index, u32 key_rsc_len, const u8 *key_rsc,
const u8 *rx_mic, const u8 *tx_mic, u8 mode,
@@ -1749,6 +1716,10 @@ void wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
reg_frame.reg_id = WILC_FW_PROBE_REQ_IDX;
break;
+ case IEEE80211_STYPE_AUTH:
+ reg_frame.reg_id = WILC_FW_AUTH_REQ_IDX;
+ break;
+
default:
break;
}
@@ -1826,7 +1797,8 @@ int wilc_add_station(struct wilc_vif *vif, const u8 *mac,
wid.id = WID_ADD_STA;
wid.type = WID_BIN;
- wid.size = WILC_ADD_STA_LENGTH + params->supported_rates_len;
+ wid.size = WILC_ADD_STA_LENGTH +
+ params->link_sta_params.supported_rates_len;
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
return -ENOMEM;
@@ -1911,7 +1883,8 @@ int wilc_edit_station(struct wilc_vif *vif, const u8 *mac,
wid.id = WID_EDIT_STA;
wid.type = WID_BIN;
- wid.size = WILC_ADD_STA_LENGTH + params->supported_rates_len;
+ wid.size = WILC_ADD_STA_LENGTH +
+ params->link_sta_params.supported_rates_len;
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
return -ENOMEM;
@@ -1996,3 +1969,20 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
return wilc_send_config_pkt(vif, WILC_GET_CFG, &wid, 1);
}
+
+int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index)
+{
+ struct wid wid;
+ int result;
+
+ wid.id = WID_DEFAULT_MGMT_KEY_ID;
+ wid.type = WID_CHAR;
+ wid.size = sizeof(char);
+ wid.val = &index;
+ result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
+ if (result)
+ netdev_err(vif->ndev,
+ "Failed to send default mgmt key index\n");
+
+ return result;
+}
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 77616fc77575..baa2881f4465 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -47,6 +47,7 @@ enum host_if_state {
HOST_IF_WAITING_CONN_RESP = 3,
HOST_IF_CONNECTED = 4,
HOST_IF_P2P_LISTEN = 5,
+ HOST_IF_EXTERNAL_AUTH = 6,
HOST_IF_FORCE_32BIT = 0xFFFFFFFF
};
@@ -107,6 +108,7 @@ struct wilc_conn_info {
u8 bssid[ETH_ALEN];
u8 security;
enum authtype auth_type;
+ enum mfptype mfp_type;
u8 ch;
u8 *req_ies;
size_t req_ies_len;
@@ -151,15 +153,12 @@ struct host_if_drv {
};
struct wilc_vif;
-int wilc_remove_wep_key(struct wilc_vif *vif, u8 index);
-int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index);
-int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index);
-int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index, u8 mode, enum authtype auth_type);
int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic,
u8 mode, u8 cipher_mode, u8 index);
+int wilc_add_igtk(struct wilc_vif *vif, const u8 *igtk, u8 igtk_key_len,
+ const u8 *pn, u8 pn_len, const u8 *mac_addr, u8 mode,
+ u8 index);
s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
u32 *out_val);
int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
@@ -208,9 +207,14 @@ int wilc_get_vif_idx(struct wilc_vif *vif);
int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power);
int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power);
void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled);
+int wilc_set_external_auth_param(struct wilc_vif *vif,
+ struct cfg80211_external_auth_params *param);
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto);
+int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index);
+void wilc_handle_disconnect(struct wilc_vif *vif);
+
#endif
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 3c292e3464c2..9b319a455b96 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -97,12 +97,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
- if (vif->mode == WILC_STATION_MODE)
+ if (vif->iftype == WILC_STATION_MODE)
if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
ndev = vif->ndev;
goto out;
}
- if (vif->mode == WILC_AP_MODE)
+ if (vif->iftype == WILC_AP_MODE)
if (ether_addr_equal_unaligned(h->addr1, vif->bssid)) {
ndev = vif->ndev;
goto out;
@@ -122,7 +122,7 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
else
eth_zero_addr(vif->bssid);
- vif->mode = mode;
+ vif->iftype = mode;
}
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
@@ -472,7 +472,7 @@ static int wlan_initialize_threads(struct net_device *dev)
"%s-tx", dev->name);
if (IS_ERR(wilc->txq_thread)) {
netdev_err(dev, "couldn't create TXQ thread\n");
- wilc->close = 0;
+ wilc->close = 1;
return PTR_ERR(wilc->txq_thread);
}
wait_for_completion(&wilc->txq_thread_started);
@@ -780,6 +780,7 @@ static int wilc_mac_close(struct net_device *ndev)
if (vif->ndev) {
netif_stop_queue(vif->ndev);
+ wilc_handle_disconnect(vif);
wilc_deinit_host_int(vif->ndev);
}
@@ -835,15 +836,24 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
}
}
-void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
+void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
{
int srcu_idx;
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
u16 type = le16_to_cpup((__le16 *)buff);
u32 type_bit = BIT(type >> 4);
+ u32 auth_bit = BIT(IEEE80211_STYPE_AUTH >> 4);
+
+ if ((vif->mgmt_reg_stypes & auth_bit &&
+ ieee80211_is_auth(mgmt->frame_control)) &&
+ vif->iftype == WILC_STATION_MODE && is_auth) {
+ wilc_wfi_mgmt_frame_rx(vif, buff, size);
+ break;
+ }
if (vif->priv.p2p_listen_state &&
vif->mgmt_reg_stypes & type_bit)
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index a067274c2014..43c085c74b7a 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -45,12 +45,6 @@ struct wilc_wfi_key {
u32 cipher;
};
-struct wilc_wfi_wep_key {
- u8 *key;
- u8 key_len;
- u8 key_idx;
-};
-
struct sta_info {
u8 sta_associated_bss[WILC_MAX_NUM_STA][ETH_ALEN];
};
@@ -63,8 +57,6 @@ struct wilc_wfi_p2p_listen_params {
};
static const u32 wilc_cipher_suites[] = {
- WLAN_CIPHER_SUITE_WEP40,
- WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WLAN_CIPHER_SUITE_AES_CMAC
@@ -132,13 +124,12 @@ struct wilc_priv {
struct net_device *dev;
struct host_if_drv *hif_drv;
struct wilc_pmkid_attr pmkid_list;
- u8 wep_key[4][WLAN_KEY_LEN_WEP104];
- u8 wep_key_len[4];
/* The real interface that the monitor is on */
struct net_device *real_ndev;
struct wilc_wfi_key *wilc_gtk[WILC_MAX_NUM_STA];
struct wilc_wfi_key *wilc_ptk[WILC_MAX_NUM_STA];
+ struct wilc_wfi_key *wilc_igtk[2];
u8 wilc_groupkey;
/* mutexes */
@@ -186,7 +177,6 @@ struct wilc_vif {
u8 bssid[ETH_ALEN];
struct host_if_drv *hif_drv;
struct net_device *ndev;
- u8 mode;
struct timer_list during_ip_timer;
struct timer_list periodic_rssi;
struct rf_info periodic_stat;
@@ -195,6 +185,7 @@ struct wilc_vif {
struct wilc_priv priv;
struct list_head list;
struct cfg80211_bss *bss;
+ struct cfg80211_external_auth_params auth;
};
struct wilc_tx_queue_status {
@@ -288,7 +279,7 @@ struct wilc_wfi_mon_priv {
void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
void wilc_mac_indicate(struct wilc *wilc);
void wilc_netdev_cleanup(struct wilc *wilc);
-void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size);
+void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth);
void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
u8 mode);
struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 7962c11cfe84..600cc57e9da2 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -26,6 +26,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
struct wilc_sdio {
bool irq_gpio;
u32 block_size;
+ bool isinit;
int has_thrpt_enh3;
};
@@ -193,6 +194,13 @@ static int wilc_sdio_reset(struct wilc *wilc)
return 0;
}
+static bool wilc_sdio_is_init(struct wilc *wilc)
+{
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+
+ return sdio_priv->isinit;
+}
+
static int wilc_sdio_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
@@ -581,6 +589,9 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
static int wilc_sdio_deinit(struct wilc *wilc)
{
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+
+ sdio_priv->isinit = false;
return 0;
}
@@ -700,6 +711,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
sdio_priv->has_thrpt_enh3);
}
+ sdio_priv->isinit = true;
return 0;
}
@@ -981,6 +993,7 @@ static const struct wilc_hif_func wilc_hif_sdio = {
.enable_interrupt = wilc_sdio_enable_interrupt,
.disable_interrupt = wilc_sdio_disable_interrupt,
.hif_reset = wilc_sdio_reset,
+ .hif_is_init = wilc_sdio_is_init,
};
static int wilc_sdio_resume(struct device *dev)
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 18420e954402..b0fc5e68feec 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -191,11 +191,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
/* assert ENABLE: */
gpiod_set_value(gpios->enable, 1);
mdelay(5);
- /* deassert RESET: */
- gpiod_set_value(gpios->reset, 0);
- } else {
/* assert RESET: */
gpiod_set_value(gpios->reset, 1);
+ } else {
+ /* deassert RESET: */
+ gpiod_set_value(gpios->reset, 0);
/* deassert ENABLE: */
gpiod_set_value(gpios->enable, 0);
}
@@ -1029,6 +1029,13 @@ static int wilc_spi_reset(struct wilc *wilc)
return result;
}
+static bool wilc_spi_is_init(struct wilc *wilc)
+{
+ struct wilc_spi *spi_priv = wilc->bus_data;
+
+ return spi_priv->isinit;
+}
+
static int wilc_spi_deinit(struct wilc *wilc)
{
struct wilc_spi *spi_priv = wilc->bus_data;
@@ -1250,4 +1257,5 @@ static const struct wilc_hif_func wilc_hif_spi = {
.hif_block_rx_ext = wilc_spi_read,
.hif_sync_ext = wilc_spi_sync_ext,
.hif_reset = wilc_spi_reset,
+ .hif_is_init = wilc_spi_is_init,
};
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 48441f0389ca..947d9a0a494e 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -968,7 +968,8 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
if (pkt_offset & IS_MANAGMEMENT) {
buff_ptr += HOST_HDR_OFFSET;
- wilc_wfi_mgmt_rx(wilc, buff_ptr, pkt_len);
+ wilc_wfi_mgmt_rx(wilc, buff_ptr, pkt_len,
+ pkt_offset & IS_MGMT_AUTH_PKT);
} else {
if (!is_cfg_packet) {
wilc_frmw_to_host(wilc, buff_ptr, pkt_len,
@@ -1480,9 +1481,12 @@ int wilc_wlan_init(struct net_device *dev)
wilc->quit = 0;
- if (wilc->hif_func->hif_init(wilc, false)) {
- ret = -EIO;
- goto fail;
+ if (!wilc->hif_func->hif_is_init(wilc)) {
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ ret = wilc->hif_func->hif_init(wilc, false);
+ release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ if (ret)
+ goto fail;
}
if (!wilc->tx_buffer)
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index eb7978166d73..a72cd5cac81d 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -305,6 +305,7 @@
#define IS_MANAGMEMENT 0x100
#define IS_MANAGMEMENT_CALLBACK 0x080
#define IS_MGMT_STATUS_SUCCES 0x040
+#define IS_MGMT_AUTH_PKT 0x010
#define WILC_WID_TYPE GENMASK(15, 12)
#define WILC_VMM_ENTRY_FULL_RETRY 1
@@ -372,6 +373,7 @@ struct wilc_hif_func {
int (*enable_interrupt)(struct wilc *nic);
void (*disable_interrupt)(struct wilc *nic);
int (*hif_reset)(struct wilc *wilc);
+ bool (*hif_is_init)(struct wilc *wilc);
};
#define WILC_MAX_CFG_FRAME_SIZE 1468
@@ -423,6 +425,7 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc);
netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size);
+bool wilc_wfi_mgmt_frame_rx(struct wilc_vif *vif, u8 *buff, u32 size);
void host_wakeup_notify(struct wilc *wilc);
void host_sleep_notify(struct wilc *wilc);
void chip_allow_sleep(struct wilc *wilc);
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
index dba301378b7f..131388886acb 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
@@ -22,6 +22,7 @@ static const struct wilc_cfg_byte g_cfg_byte[] = {
{WID_STATUS, 0},
{WID_RSSI, 0},
{WID_LINKSPEED, 0},
+ {WID_TX_POWER, 0},
{WID_WOWLAN_TRIGGER, 0},
{WID_NIL, 0}
};
@@ -180,9 +181,10 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
i++;
if (cfg->s[i].id == wid)
- memcpy(cfg->s[i].str, &info[2], info[2] + 2);
+ memcpy(cfg->s[i].str, &info[2],
+ get_unaligned_le16(&info[2]) + 2);
- len = 2 + info[2];
+ len = 2 + get_unaligned_le16(&info[2]);
break;
default:
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_if.h b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
index 6eb7eb4ac294..df2f5a63bdf6 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_if.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
@@ -85,7 +85,16 @@ enum authtype {
WILC_FW_AUTH_OPEN_SYSTEM = 1,
WILC_FW_AUTH_SHARED_KEY = 2,
WILC_FW_AUTH_ANY = 3,
- WILC_FW_AUTH_IEEE8021 = 5
+ WILC_FW_AUTH_IEEE8021 = 5,
+ WILC_FW_AUTH_SAE = 7,
+ WILC_FW_AUTH_IEE8021X_SHA256 = 9,
+ WILC_FW_AUTH_OPEN_SYSTEM_SHA256 = 13
+};
+
+enum mfptype {
+ WILC_FW_MFP_NONE = 0x0,
+ WILC_FW_MFP_OPTIONAL = 0x1,
+ WILC_FW_MFP_REQUIRED = 0x2
};
enum site_survey {
@@ -176,7 +185,8 @@ enum {
enum {
WILC_FW_ACTION_FRM_IDX = 0,
- WILC_FW_PROBE_REQ_IDX = 1
+ WILC_FW_PROBE_REQ_IDX = 1,
+ WILC_FW_AUTH_REQ_IDX = 2
};
enum wid_type {
@@ -657,6 +667,9 @@ enum {
WID_LOG_TERMINAL_SWITCH = 0x00CD,
WID_TX_POWER = 0x00CE,
WID_WOWLAN_TRIGGER = 0X00CF,
+ WID_SET_MFP = 0x00D0,
+
+ WID_DEFAULT_MGMT_KEY_ID = 0x00D2,
/* EMAC Short WID list */
/* RTS Threshold */
/*
@@ -746,6 +759,7 @@ enum {
WID_REMOVE_KEY = 0x301E,
WID_ASSOC_REQ_INFO = 0x301F,
WID_ASSOC_RES_INFO = 0x3020,
+ WID_ADD_IGTK = 0x3022,
WID_MANUFACTURER = 0x3026, /* Added for CAPI tool */
WID_MODEL_NAME = 0x3027, /* Added for CAPI tool */
WID_MODEL_NUM = 0x3028, /* Added for CAPI tool */
@@ -789,7 +803,7 @@ enum {
WID_ADD_BEACON = 0x408a,
WID_SETUP_MULTICAST_FILTER = 0x408b,
-
+ WID_EXTERNAL_AUTH_PARAM = 0x408d,
/* Miscellaneous WIDs */
WID_ALL = 0x7FFE,
WID_MAX = 0xFFFF
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index 90e552532701..d3cdffbded69 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -133,7 +133,7 @@ int plfxlc_restore_settings(struct plfxlc_mac *mac)
return 0;
if (mac->vif) {
- beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0);
if (beacon) {
/*beacon is hardcoded in firmware */
kfree_skb(beacon);
@@ -587,12 +587,12 @@ static void plfxlc_op_configure_filter(struct ieee80211_hw *hw,
static void plfxlc_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
int associated;
- dev_dbg(plfxlc_mac_dev(mac), "changes: %x\n", changes);
+ dev_dbg(plfxlc_mac_dev(mac), "changes: %llx\n", changes);
if (mac->type != NL80211_IFTYPE_ADHOC) { /* for STATION */
associated = is_valid_ether_addr(bss_conf->bssid);
@@ -601,7 +601,7 @@ static void plfxlc_op_bss_info_changed(struct ieee80211_hw *hw,
/* for ADHOC */
associated = true;
if (changes & BSS_CHANGED_BEACON) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif, 0);
if (beacon) {
/*beacon is hardcoded in firmware */
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index 8519cf0adfff..39e54b3787d6 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -562,7 +562,7 @@ static void sta_queue_cleanup_timer_callb(struct timer_list *t)
if (tx->station[sidx].flag & STATION_HEARTBEAT_FLAG) {
tx->station[sidx].flag ^= STATION_HEARTBEAT_FLAG;
} else {
- memset(tx->station[sidx].mac, 0, ETH_ALEN);
+ eth_zero_addr(tx->station[sidx].mac);
tx->station[sidx].flag = 0;
}
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 84b15a655eab..1593e810b3ca 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -352,7 +352,8 @@ static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -500,7 +501,7 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
switch (vif->wdev.iftype) {
case NL80211_IFTYPE_STATION:
- if (idx != 0 || !vif->wdev.current_bss)
+ if (idx != 0 || !vif->wdev.connected)
return -ENOENT;
ether_addr_copy(mac, vif->bssid);
@@ -729,7 +730,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
pr_err("VIF%u.%u: failed to disconnect\n",
mac->macid, vif->vifid);
- if (vif->wdev.current_bss) {
+ if (vif->wdev.connected) {
netif_carrier_off(vif->netdev);
cfg80211_disconnected(vif->netdev, reason_code,
NULL, 0, true, GFP_KERNEL);
@@ -745,10 +746,11 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct ieee80211_supported_band *sband;
- const struct cfg80211_chan_def *chandef = &wdev->chandef;
+ const struct cfg80211_chan_def *chandef = wdev_chandef(wdev, 0);
struct ieee80211_channel *chan;
int ret;
+
sband = wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
@@ -765,7 +767,7 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
survey->channel = chan;
survey->filled = 0x0;
- if (chan == chandef->chan)
+ if (chandef && chan == chandef->chan)
survey->filled = SURVEY_INFO_IN_USE;
ret = qtnf_cmd_get_chan_stats(mac, chan->center_freq, survey);
@@ -778,7 +780,7 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
static int
qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct cfg80211_chan_def *chandef)
+ unsigned int link_id, struct cfg80211_chan_def *chandef)
{
struct net_device *ndev = wdev->netdev;
struct qtnf_vif *vif;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index c68563c83098..0fad53693292 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -241,6 +241,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
struct qlink_auth_encr *aen;
int ret;
int i;
+ int n;
if (!qtnf_cmd_start_ap_can_fit(vif, s))
return -E2BIG;
@@ -280,8 +281,9 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
for (i = 0; i < QLINK_MAX_NR_CIPHER_SUITES; i++)
aen->ciphers_pairwise[i] =
cpu_to_le32(s->crypto.ciphers_pairwise[i]);
- aen->n_akm_suites = cpu_to_le32(s->crypto.n_akm_suites);
- for (i = 0; i < QLINK_MAX_NR_AKM_SUITES; i++)
+ n = min(QLINK_MAX_NR_AKM_SUITES, s->crypto.n_akm_suites);
+ aen->n_akm_suites = cpu_to_le32(n);
+ for (i = 0; i < n; i++)
aen->akm_suites[i] = cpu_to_le32(s->crypto.akm_suites[i]);
aen->control_port = s->crypto.control_port;
aen->control_port_no_encrypt = s->crypto.control_port_no_encrypt;
@@ -2005,7 +2007,7 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
dwell_active = scan_req->duration;
dwell_passive = scan_req->duration;
} else if (wdev->iftype == NL80211_IFTYPE_STATION &&
- wdev->current_bss) {
+ wdev->connected) {
/* let device select dwell based on traffic conditions */
dwell_active = QTNF_SCAN_TIME_AUTO;
dwell_passive = QTNF_SCAN_TIME_AUTO;
@@ -2076,6 +2078,7 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
struct qlink_auth_encr *aen;
int ret;
int i;
+ int n;
u32 connect_flags = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2132,9 +2135,10 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
aen->ciphers_pairwise[i] =
cpu_to_le32(sme->crypto.ciphers_pairwise[i]);
- aen->n_akm_suites = cpu_to_le32(sme->crypto.n_akm_suites);
+ n = min(QLINK_MAX_NR_AKM_SUITES, sme->crypto.n_akm_suites);
+ aen->n_akm_suites = cpu_to_le32(n);
- for (i = 0; i < QLINK_MAX_NR_AKM_SUITES; i++)
+ for (i = 0; i < n; i++)
aen->akm_suites[i] = cpu_to_le32(sme->crypto.akm_suites[i]);
aen->control_port = sme->crypto.control_port;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 8dc80574d08d..4fafe370101a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -189,7 +189,7 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
vif->mac->macid, vif->vifid,
join_info->bssid, chandef.chan->hw_value);
- if (!vif->wdev.ssid_len) {
+ if (!vif->wdev.u.client.ssid_len) {
pr_warn("VIF%u.%u: SSID unknown for BSS:%pM\n",
vif->mac->macid, vif->vifid,
join_info->bssid);
@@ -197,7 +197,7 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
goto done;
}
- ie = kzalloc(2 + vif->wdev.ssid_len, GFP_KERNEL);
+ ie = kzalloc(2 + vif->wdev.u.client.ssid_len, GFP_KERNEL);
if (!ie) {
pr_warn("VIF%u.%u: IE alloc failed for BSS:%pM\n",
vif->mac->macid, vif->vifid,
@@ -207,14 +207,15 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
}
ie[0] = WLAN_EID_SSID;
- ie[1] = vif->wdev.ssid_len;
- memcpy(ie + 2, vif->wdev.ssid, vif->wdev.ssid_len);
+ ie[1] = vif->wdev.u.client.ssid_len;
+ memcpy(ie + 2, vif->wdev.u.client.ssid,
+ vif->wdev.u.client.ssid_len);
bss = cfg80211_inform_bss(wiphy, chandef.chan,
CFG80211_BSS_FTYPE_UNKNOWN,
join_info->bssid, 0,
WLAN_CAPABILITY_ESS, 100,
- ie, 2 + vif->wdev.ssid_len,
+ ie, 2 + vif->wdev.u.client.ssid_len,
0, GFP_KERNEL);
if (!bss) {
pr_warn("VIF%u.%u: can't connect to unknown BSS: %pM\n",
@@ -470,14 +471,14 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
continue;
if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
- !vif->wdev.current_bss)
+ !vif->wdev.connected)
continue;
if (!vif->netdev)
continue;
mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->netdev, &chandef);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef, 0);
mutex_unlock(&vif->wdev.mtx);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 2dda4c5d7427..674461fa7fb3 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -1721,8 +1721,8 @@ enum qlink_chan_stat {
* @time_on: amount of time radio operated on that channel.
* @time_tx: amount of time radio spent transmitting on the channel.
* @time_rx: amount of time radio spent receiving on the channel.
- * @cca_busy: amount of time the the primary channel was busy.
- * @cca_busy_ext: amount of time the the secondary channel was busy.
+ * @cca_busy: amount of time the primary channel was busy.
+ * @cca_busy_ext: amount of time the secondary channel was busy.
* @time_scan: amount of radio spent scanning on the channel.
* @chan_noise: channel noise.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index dec6ffdf07c4..273c5eac3362 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1654,7 +1654,8 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
* IEEE80211 stack callback functions.
*/
static int rt2400pci_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -1667,7 +1668,7 @@ static int rt2400pci_conf_tx(struct ieee80211_hw *hw,
if (queue != 0)
return -EINVAL;
- if (rt2x00mac_conf_tx(hw, vif, queue, params))
+ if (rt2x00mac_conf_tx(hw, vif, link_id, queue, params))
return -EINVAL;
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index cbdaf7992f98..18102fbe36d6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -10395,7 +10395,8 @@ int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
EXPORT_SYMBOL_GPL(rt2800_set_rts_threshold);
int rt2800_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -10411,7 +10412,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw,
* we are free to update the registers based on the value
* in the queue parameter.
*/
- retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
+ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params);
if (retval)
return retval;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 1139405c0ebb..e1761f467b94 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -245,7 +245,8 @@ void rt2800_get_key_seq(struct ieee80211_hw *hw,
struct ieee80211_key_seq *seq);
int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
int rt2800_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params);
u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 9f6fc40649be..8f5772b98f58 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -232,7 +232,7 @@ struct link_qual {
* VGC levels
* Hardware driver will tune the VGC level during each call
* to the link_tuner() callback function. This vgc_level is
- * is determined based on the link quality statistics like
+ * determined based on the link quality statistics like
* average RSSI and the false CCA count.
*
* In some cases the drivers need to differentiate between
@@ -1479,9 +1479,10 @@ int rt2x00mac_get_stats(struct ieee80211_hw *hw,
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes);
+ u64 changes);
int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params);
void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
index 6bafdd991171..f895f560a185 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
@@ -70,6 +70,8 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
+ bss_conf);
struct rt2x00lib_erp erp;
memset(&erp, 0, sizeof(erp));
@@ -87,7 +89,7 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
erp.beacon_int = bss_conf->beacon_int;
/* Update the AID, this is needed for dynamic PS support */
- rt2x00dev->aid = bss_conf->assoc ? bss_conf->aid : 0;
+ rt2x00dev->aid = vif->cfg.assoc ? vif->cfg.aid : 0;
rt2x00dev->last_beacon = bss_conf->sync_tsf;
/* Update global beacon interval time, this is needed for PS support */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index dea5babd30fe..4202c6517783 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -325,7 +325,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
*/
rt2x00queue_stop_queue(rt2x00dev->rx);
- /* Do not race with with link tuner. */
+ /* Do not race with link tuner. */
mutex_lock(&rt2x00dev->conf_mutex);
/*
@@ -574,7 +574,7 @@ EXPORT_SYMBOL_GPL(rt2x00mac_get_stats);
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
@@ -645,7 +645,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
rt2x00dev->link.count = 0;
- if (bss_conf->assoc)
+ if (vif->cfg.assoc)
rt2x00dev->intf_associated++;
else
rt2x00dev->intf_associated--;
@@ -665,7 +665,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index aa6b2f3d2eff..4d06038afd83 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -758,7 +758,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
*/
rt2x00queue_free_skb(intf->beacon);
- intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
+ intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif, 0);
if (!intf->beacon->skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 82cfc2aadc2b..d92f9eb07dc9 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2799,7 +2799,8 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
* IEEE80211 stack callback functions.
*/
static int rt61pci_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -2815,7 +2816,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw,
* we are free to update the registers based on the value
* in the queue parameter.
*/
- retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
+ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params);
if (retval)
return retval;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 5ff2c740c3ea..e3269fd7c59e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -2218,7 +2218,8 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
* IEEE80211 stack callback functions.
*/
static int rt73usb_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -2234,7 +2235,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw,
* we are free to update the registers based on the value
* in the queue parameter.
*/
- retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
+ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params);
if (retval)
return retval;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 87e98ab068ed..1f57a0055bbd 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1643,38 +1643,34 @@ static void authenticate_timeout(struct timer_list *t)
/*===========================================================================*/
static int parse_addr(char *in_str, UCHAR *out)
{
+ int i, k;
int len;
- int i, j, k;
- int status;
if (in_str == NULL)
return 0;
- if ((len = strlen(in_str)) < 2)
+ len = strnlen(in_str, ADDRLEN * 2 + 1) - 1;
+ if (len < 1)
return 0;
memset(out, 0, ADDRLEN);
- status = 1;
- j = len - 1;
- if (j > 12)
- j = 12;
i = 5;
- while (j > 0) {
- if ((k = hex_to_bin(in_str[j--])) != -1)
+ while (len > 0) {
+ if ((k = hex_to_bin(in_str[len--])) != -1)
out[i] = k;
else
return 0;
- if (j == 0)
+ if (len == 0)
break;
- if ((k = hex_to_bin(in_str[j--])) != -1)
+ if ((k = hex_to_bin(in_str[len--])) != -1)
out[i] += k << 4;
else
return 0;
if (!i--)
break;
}
- return status;
+ return 1;
}
/*===========================================================================*/
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index 025619cd14e8..cdfe08078c57 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -1300,7 +1300,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
goto resched;
/* grab a fresh beacon */
- skb = ieee80211_beacon_get(dev, vif);
+ skb = ieee80211_beacon_get(dev, vif, 0);
if (!skb)
goto resched;
@@ -1424,7 +1424,8 @@ static void rtl8187se_conf_ac_parm(struct ieee80211_hw *dev, u8 queue)
}
static int rtl8180_conf_tx(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rtl8180_priv *priv = dev->priv;
@@ -1500,7 +1501,7 @@ static void rtl8180_conf_erp(struct ieee80211_hw *dev,
static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct rtl8180_priv *priv = dev->priv;
struct rtl8180_vif *vif_priv;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index eb68b2d3caa1..c0f6e9c6d03e 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1075,7 +1075,7 @@ static void rtl8187_beacon_work(struct work_struct *work)
goto resched;
/* grab a fresh beacon */
- skb = ieee80211_beacon_get(dev, vif);
+ skb = ieee80211_beacon_get(dev, vif, 0);
if (!skb)
goto resched;
@@ -1251,7 +1251,7 @@ static void rtl8187_conf_erp(struct rtl8187_priv *priv, bool use_short_slot,
static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct rtl8187_priv *priv = dev->priv;
struct rtl8187_vif *vif_priv;
@@ -1338,7 +1338,8 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev,
}
static int rtl8187_conf_tx(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rtl8187_priv *priv = dev->priv;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 8b2ca9e8eac6..c66f0726b253 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4558,7 +4558,7 @@ rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta)
static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf, u32 changed)
+ struct ieee80211_bss_conf *bss_conf, u64 changed)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
@@ -4570,11 +4570,11 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rarpt = &priv->ra_report;
if (changed & BSS_CHANGED_ASSOC) {
- dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc);
+ dev_dbg(dev, "Changed ASSOC: %i!\n", vif->cfg.assoc);
rtl8xxxu_set_linktype(priv, vif->type);
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
u32 ramask;
int sgi = 0;
u8 highest_rate;
@@ -4639,7 +4639,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* joinbss sequence */
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
- 0xc000 | bss_conf->aid);
+ 0xc000 | vif->cfg.aid);
priv->fops->report_connect(priv, 0, true);
} else {
@@ -5405,7 +5405,7 @@ void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->bss_conf.assoc);
+ wifi_connected = (vif && vif->cfg.assoc);
if (!wifi_connected) {
rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
@@ -5431,7 +5431,7 @@ void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->bss_conf.assoc);
+ wifi_connected = (vif && vif->cfg.assoc);
if (wifi_connected) {
u32 val32 = 0;
@@ -5957,7 +5957,8 @@ exit:
}
static int rtl8xxxu_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
struct rtl8xxxu_priv *priv = hw->priv;
@@ -6657,7 +6658,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (!hw) {
ret = -ENOMEM;
priv = NULL;
- goto exit;
+ goto err_put_dev;
}
priv = hw->priv;
@@ -6679,24 +6680,24 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ret = rtl8xxxu_parse_usb(priv, interface);
if (ret)
- goto exit;
+ goto err_set_intfdata;
ret = rtl8xxxu_identify_chip(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to identify chip\n");
- goto exit;
+ goto err_set_intfdata;
}
ret = rtl8xxxu_read_efuse(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
- goto exit;
+ goto err_set_intfdata;
}
ret = priv->fops->parse_efuse(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to parse EFuse\n");
- goto exit;
+ goto err_set_intfdata;
}
rtl8xxxu_print_chipinfo(priv);
@@ -6704,12 +6705,12 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ret = priv->fops->load_firmware(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to load firmware\n");
- goto exit;
+ goto err_set_intfdata;
}
ret = rtl8xxxu_init_device(hw);
if (ret)
- goto exit;
+ goto err_set_intfdata;
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
@@ -6759,12 +6760,12 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (ret) {
dev_err(&udev->dev, "%s: Failed to register: %i\n",
__func__, ret);
- goto exit;
+ goto err_set_intfdata;
}
return 0;
-exit:
+err_set_intfdata:
usb_set_intfdata(interface, NULL);
if (priv) {
@@ -6772,9 +6773,10 @@ exit:
mutex_destroy(&priv->usb_buf_mutex);
mutex_destroy(&priv->h2c_mutex);
}
- usb_put_dev(udev);
ieee80211_free_hw(hw);
+err_put_dev:
+ usb_put_dev(udev);
return ret;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 99a1d91ced5a..ca01270944fe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -671,7 +671,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
/*
*because we should back channel to
- *current_network.chan in in scanning,
+ *current_network.chan in scanning,
*So if set_chan == current_network.chan
*we should set it.
*because mac80211 tell us wrong bw40
@@ -982,7 +982,8 @@ static int _rtl_get_hal_qnum(u16 queue)
*for rtl819x BE = 0, BK = 1, VI = 2, VO = 3
*/
static int rtl_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1009,7 +1010,7 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif, 0);
struct rtl_tcb_desc tcb_desc;
if (skb) {
@@ -1040,7 +1041,7 @@ EXPORT_SYMBOL_GPL(rtl_update_beacon_work_callback);
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
@@ -1094,7 +1095,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
u8 mstatus;
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
struct ieee80211_sta *sta = NULL;
u8 keep_alive = 10;
@@ -1111,7 +1112,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->link_state = MAC80211_LINKED;
mac->cnt_after_linked = 0;
- mac->assoc_id = bss_conf->aid;
+ mac->assoc_id = vif->cfg.aid;
memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
if (rtlpriv->cfg->ops->linked_set_reg)
@@ -1702,7 +1703,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = 0;
eth_zero_addr(mac_addr);
/*
- *mac80211 will delete entrys one by one,
+ *mac80211 will delete entries one by one,
*so don't use rtl_cam_reset_all_entry
*or clear all entry here.
*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 901cdfe3723c..0b1bc04cb6ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -329,8 +329,8 @@ static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;
tmp[tmp_len] = '\0';
@@ -340,8 +340,8 @@ static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
&h2c_data[4], &h2c_data[5],
&h2c_data[6], &h2c_data[7]);
- if (h2c_len <= 0)
- return count;
+ if (h2c_len == 0)
+ return -EINVAL;
for (i = 0; i < h2c_len; i++)
h2c_data_packed[i] = (u8)h2c_data[i];
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 8e4c15654746..ca79f652fef3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1100,7 +1100,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct tasklet_struct *t)
}
/*NB: the beacon data buffer must be 32-bit aligned. */
- pskb = ieee80211_beacon_get(hw, mac->vif);
+ pskb = ieee80211_beacon_get(hw, mac->vif, 0);
if (!pskb)
return;
hdr = rtl_get_hdr(pskb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 4cf8face0bbd..0bc4afa4fda3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -178,7 +178,7 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
}
}
-/* Allows active scan scan on Ch 12 and 13 */
+/* Allows active scan on Ch 12 and 13 */
static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator
initiator)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 4ca299c9de77..bd0b7e365edb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1407,7 +1407,7 @@ static void _rtl92se_power_domain_init(struct ieee80211_hw *hw)
tmpu1b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
/* If IPS we need to turn LED on. So we not
- * not disable BIT 3/7 of reg3. */
+ * disable BIT 3/7 of reg3. */
if (rtlpriv->psc.rfoff_reason & (RF_CHANGE_BY_IPS | RF_CHANGE_BY_HW))
tmpu1b &= 0xFB;
else
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index e76841d3417b..76c7f3257dd3 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -67,7 +67,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
ether_addr_copy(bfee->mac_addr, bssid);
bfee->role = RTW_BFEE_MU;
bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
- bfee->aid = bss_conf->aid;
+ bfee->aid = vif->cfg.aid;
bfinfo->bfer_mu_cnt++;
rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true);
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 1a52ff585fbc..7cde6bcf253b 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -269,11 +269,7 @@ static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v)
for (i = 0 ; i < buf_size ; i += 8) {
if (i % page_size == 0)
seq_printf(m, "PAGE %d\n", (i + offset) / page_size);
- seq_printf(m, "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
- *(buf + i), *(buf + i + 1),
- *(buf + i + 2), *(buf + i + 3),
- *(buf + i + 4), *(buf + i + 5),
- *(buf + i + 6), *(buf + i + 7));
+ seq_printf(m, "%8ph\n", buf + i);
}
vfree(buf);
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index c3ae631c2264..4fdab0329695 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1070,7 +1070,7 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
switch (rsvd_pkt->type) {
case RSVD_BEACON:
- skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL);
+ skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL, 0);
rsvd_pkt->tim_offset = tim_offset;
break;
case RSVD_PS_POLL:
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 4310362dc333..c7b98a0599d5 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -355,7 +355,7 @@ static void rtw_conf_tx(struct rtw_dev *rtwdev,
static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
@@ -369,12 +369,12 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
rtw_vif_assoc_changed(rtwvif, conf);
- if (conf->assoc) {
+ if (vif->cfg.assoc) {
rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH);
rtw_fw_download_rsvd_page(rtwdev);
rtw_send_rsvd_page_h2c(rtwdev);
- rtw_coex_media_status_notify(rtwdev, conf->assoc);
+ rtw_coex_media_status_notify(rtwdev, vif->cfg.assoc);
if (rtw_bf_support)
rtw_bf_assoc(rtwdev, vif, conf);
rtw_store_op_chan(rtwdev);
@@ -429,7 +429,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
-static int rtw_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static int rtw_ops_start_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -442,7 +444,8 @@ static int rtw_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static int rtw_ops_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct rtw_dev *rtwdev = hw->priv;
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index efabd5b1bf5b..76dc9da88f6c 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -171,7 +171,7 @@ static void rtw_vif_watch_dog_iter(void *data, u8 *mac,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
if (vif->type == NL80211_IFTYPE_STATION)
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
iter_data->rtwvif = rtwvif;
rtw_dynamic_csi_rate(iter_data->rtwdev, rtwvif);
@@ -525,8 +525,13 @@ EXPORT_SYMBOL(rtw_dump_reg);
void rtw_vif_assoc_changed(struct rtw_vif *rtwvif,
struct ieee80211_bss_conf *conf)
{
- if (conf && conf->assoc) {
- rtwvif->aid = conf->aid;
+ struct ieee80211_vif *vif = NULL;
+
+ if (conf)
+ vif = container_of(conf, struct ieee80211_vif, bss_conf);
+
+ if (conf && vif->cfg.assoc) {
+ rtwvif->aid = vif->cfg.aid;
rtwvif->net_type = RTW_NET_MGD_LINKED;
} else {
rtwvif->aid = 0;
@@ -1383,9 +1388,12 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
bool hw_scan)
{
- struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
u32 config = 0;
+ if (!rtwvif)
+ return;
+
clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
@@ -1588,13 +1596,13 @@ static void rtw_vif_smps_iter(void *data, u8 *mac,
{
struct rtw_dev *rtwdev = (struct rtw_dev *)data;
- if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return;
if (rtwdev->hal.txrx_1ss)
- ieee80211_request_smps(vif, IEEE80211_SMPS_STATIC);
+ ieee80211_request_smps(vif, 0, IEEE80211_SMPS_STATIC);
else
- ieee80211_request_smps(vif, IEEE80211_SMPS_OFF);
+ ieee80211_request_smps(vif, 0, IEEE80211_SMPS_OFF);
}
void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool txrx_1ss)
@@ -1984,6 +1992,10 @@ int rtw_core_init(struct rtw_dev *rtwdev)
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
+ if (!rtwdev->tx_wq) {
+ rtw_warn(rtwdev, "alloc_workqueue rtw_tx_wq failed\n");
+ return -ENOMEM;
+ }
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index c02be4ac159e..7db627fc26be 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -1233,9 +1233,6 @@ struct rtw_chip_info {
const struct wiphy_wowlan_support *wowlan_stub;
const u8 max_sched_scan_ssids;
- /* for 8821c set channel */
- u32 ch_param[3];
-
/* coex paras */
u32 coex_para_ver;
u8 bt_desired_ver;
@@ -1937,6 +1934,9 @@ struct rtw_hal {
enum rtw_sar_bands sar_band;
struct rtw_sar sar;
+
+ /* for 8821c set channel */
+ u32 ch_param[3];
};
struct rtw_path_div {
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 93cce44df531..993bd6b1d723 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -2701,7 +2701,7 @@ static const struct rtw_reg_domain coex_info_hw_regs_8723d[] = {
{0x953, BIT(1), RTW_REG_DOMAIN_MAC8},
};
-struct rtw_chip_info rtw8723d_hw_spec = {
+const struct rtw_chip_info rtw8723d_hw_spec = {
.ops = &rtw8723d_ops,
.id = RTW_CHIP_TYPE_8723D,
.fw_name = "rtw88/rtw8723d_fw.bin",
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
index 41d35174a542..4641f6e047b4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -72,6 +72,8 @@ struct rtw8723d_efuse {
struct rtw8723de_efuse e;
};
+extern const struct rtw_chip_info rtw8723d_hw_spec;
+
/* phy status page0 */
#define GET_PHY_STAT_P0_PWDB(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.c b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
index 2dd689441e8d..abbaafa32851 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723de.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include "pci.h"
-#include "rtw8723de.h"
+#include "rtw8723d.h"
static const struct pci_device_id rtw_8723de_id_table[] = {
{
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.h b/drivers/net/wireless/realtek/rtw88/rtw8723de.h
deleted file mode 100644
index 2b4894846a07..000000000000
--- a/drivers/net/wireless/realtek/rtw88/rtw8723de.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2018-2019 Realtek Corporation
- */
-
-#ifndef __RTW_8723DE_H_
-#define __RTW_8723DE_H_
-
-extern struct rtw_chip_info rtw8723d_hw_spec;
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index ffee39ea5df6..025262a8970e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -125,6 +125,7 @@ static void rtw8821c_phy_bf_init(struct rtw_dev *rtwdev)
static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)
{
+ struct rtw_hal *hal = &rtwdev->hal;
u8 crystal_cap, val;
/* power on BB/RF domain */
@@ -159,9 +160,9 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)
/* post init after header files config */
rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
- rtwdev->chip->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD);
- rtwdev->chip->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD);
- rtwdev->chip->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);
+ hal->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD);
+ hal->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD);
+ hal->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);
rtw_phy_init(rtwdev);
rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f;
@@ -351,6 +352,7 @@ static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw)
static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx)
{
+ struct rtw_hal *hal = &rtwdev->hal;
u32 val32;
if (channel <= 14) {
@@ -367,11 +369,11 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667);
} else {
rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD,
- rtwdev->chip->ch_param[0]);
+ hal->ch_param[0]);
rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD,
- rtwdev->chip->ch_param[1] & MASKLWORD);
+ hal->ch_param[1] & MASKLWORD);
rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD,
- rtwdev->chip->ch_param[2]);
+ hal->ch_param[2]);
}
} else if (channel > 35) {
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
@@ -1877,7 +1879,7 @@ static const struct rtw_reg_domain coex_info_hw_regs_8821c[] = {
{0x60A, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
};
-struct rtw_chip_info rtw8821c_hw_spec = {
+const struct rtw_chip_info rtw8821c_hw_spec = {
.ops = &rtw8821c_ops,
.id = RTW_CHIP_TYPE_8821C,
.fw_name = "rtw88/rtw8821c_fw.bin",
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
index d9fbddd7b0f3..2698801fc35d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
@@ -84,6 +84,8 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
rtw_write32_mask(rtwdev, addr + 0x200, mask, data);
}
+extern const struct rtw_chip_info rtw8821c_hw_spec;
+
#define rtw_write32s_mask(rtwdev, addr, mask, data) \
do { \
BUILD_BUG_ON((addr) < 0xC00 || (addr) >= 0xD00); \
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
index 56d22f9de904..f3d971feda04 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include "pci.h"
-#include "rtw8821ce.h"
+#include "rtw8821c.h"
static const struct pci_device_id rtw_8821ce_id_table[] = {
{
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.h b/drivers/net/wireless/realtek/rtw88/rtw8821ce.h
deleted file mode 100644
index 54142acca534..000000000000
--- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2018-2019 Realtek Corporation
- */
-
-#ifndef __RTW_8821CE_H_
-#define __RTW_8821CE_H_
-
-extern struct rtw_chip_info rtw8821c_hw_spec;
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index dccd722b8e62..321848870561 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2497,7 +2497,7 @@ static struct rtw_hw_reg_offset rtw8822b_edcca_th[] = {
[EDCCA_TH_H2L_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE1}, .offset = 0},
};
-struct rtw_chip_info rtw8822b_hw_spec = {
+const struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
.fw_name = "rtw88/rtw8822b_fw.bin",
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 3fff8b881854..01d3644e0c94 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -187,4 +187,6 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define REG_ANTWT 0x1904
#define REG_IQKFAILMSK 0x1bf0
+extern const struct rtw_chip_info rtw8822b_hw_spec;
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.c b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
index 62ee7e62cac0..4994950776cd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822be.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include "pci.h"
-#include "rtw8822be.h"
+#include "rtw8822b.h"
static const struct pci_device_id rtw_8822be_id_table[] = {
{
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.h b/drivers/net/wireless/realtek/rtw88/rtw8822be.h
deleted file mode 100644
index 6668460d664d..000000000000
--- a/drivers/net/wireless/realtek/rtw88/rtw8822be.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2018-2019 Realtek Corporation
- */
-
-#ifndef __RTW_8822BE_H_
-#define __RTW_8822BE_H_
-
-extern struct rtw_chip_info rtw8822b_hw_spec;
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index c043b5c520b9..09f9e4adcf34 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -5310,7 +5310,7 @@ static const struct rtw_reg_domain coex_info_hw_regs_8822c[] = {
{0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
};
-struct rtw_chip_info rtw8822c_hw_spec = {
+const struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
.fw_name = "rtw88/rtw8822c_fw.bin",
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 8201955e1f21..479d5d769c52 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -118,6 +118,8 @@ enum rtw8822c_dpk_one_shot_action {
void rtw8822c_parse_tbl_dpk(struct rtw_dev *rtwdev,
const struct rtw_table *tbl);
+extern const struct rtw_chip_info rtw8822c_hw_spec;
+
#define RTW_DECL_TABLE_DPK(name) \
const struct rtw_table name ## _tbl = { \
.data = name, \
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
index 3845b1333dc3..e26c6bc82936 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include "pci.h"
-#include "rtw8822ce.h"
+#include "rtw8822c.h"
static const struct pci_device_id rtw_8822ce_id_table[] = {
{
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.h b/drivers/net/wireless/realtek/rtw88/rtw8822ce.h
deleted file mode 100644
index fee32d7a4504..000000000000
--- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2018-2019 Realtek Corporation
- */
-
-#ifndef __RTW_8822CE_H_
-#define __RTW_8822CE_H_
-
-extern struct rtw_chip_info rtw8822c_hw_spec;
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 8a26adeb23fb..f5301c2bbf13 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -445,15 +445,22 @@ void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map);
}
-void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+void rtw89_cam_deinit_bssid_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_bssid_cam_entry *bssid_cam)
{
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+
+ bssid_cam->valid = false;
+ clear_bit(bssid_cam->bssid_cam_idx, cam_info->bssid_cam_map);
+}
+
+void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
rtw89_cam_deinit_addr_cam(rtwdev, addr_cam);
- bssid_cam->valid = false;
- clear_bit(bssid_cam->bssid_cam_idx, cam_info->bssid_cam_map);
+ rtw89_cam_deinit_bssid_cam(rtwdev, bssid_cam);
}
void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev)
@@ -539,10 +546,11 @@ static int rtw89_cam_get_avail_bssid_cam(struct rtw89_dev *rtwdev,
return 0;
}
-static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_bssid_cam_entry *bssid_cam,
+ const u8 *bssid)
{
- struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
u8 bssid_cam_idx;
int ret;
@@ -563,7 +571,7 @@ static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
bssid_cam->len = BSSID_CAM_ENT_SIZE;
bssid_cam->offset = 0;
bssid_cam->valid = true;
- ether_addr_copy(bssid_cam->bssid, rtwvif->bssid);
+ ether_addr_copy(bssid_cam->bssid, bssid);
return 0;
}
@@ -581,7 +589,7 @@ int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
int ret;
- ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif);
+ ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, rtwvif->bssid);
if (ret) {
rtw89_err(rtwdev, "failed to init bssid cam\n");
return ret;
@@ -597,16 +605,24 @@ int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
}
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, u8 *cmd)
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta, u8 *cmd)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+ struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta);
u8 bss_color = vif->bss_conf.he_bss_color.color;
+ u8 bss_mask;
+
+ if (vif->bss_conf.nontransmitted)
+ bss_mask = RTW89_BSSID_MATCH_5_BYTES;
+ else
+ bss_mask = RTW89_BSSID_MATCH_ALL;
FWCMD_SET_ADDR_BSSID_IDX(cmd, bssid_cam->bssid_cam_idx);
FWCMD_SET_ADDR_BSSID_OFFSET(cmd, bssid_cam->offset);
FWCMD_SET_ADDR_BSSID_LEN(cmd, bssid_cam->len);
FWCMD_SET_ADDR_BSSID_VALID(cmd, bssid_cam->valid);
+ FWCMD_SET_ADDR_BSSID_MASK(cmd, bss_mask);
FWCMD_SET_ADDR_BSSID_BB_SEL(cmd, bssid_cam->phy_idx);
FWCMD_SET_ADDR_BSSID_BSS_COLOR(cmd, bss_color);
@@ -694,7 +710,7 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
FWCMD_SET_ADDR_FRM_TGT_IND(cmd, rtwvif->frm_tgt_ind);
FWCMD_SET_ADDR_MACID(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id);
if (rtwvif->net_type == RTW89_NET_TYPE_INFRA)
- FWCMD_SET_ADDR_AID12(cmd, vif->bss_conf.aid & 0xfff);
+ FWCMD_SET_ADDR_AID12(cmd, vif->cfg.aid & 0xfff);
else if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
FWCMD_SET_ADDR_AID12(cmd, sta ? sta->aid & 0xfff : 0);
FWCMD_SET_ADDR_WOL_PATTERN(cmd, rtwvif->wowlan_pattern);
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index a3931d3e40d2..83c160a614e6 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -9,6 +9,9 @@
#define RTW89_SEC_CAM_LEN 20
+#define RTW89_BSSID_MATCH_ALL GENMASK(5, 0)
+#define RTW89_BSSID_MATCH_5_BYTES GENMASK(4, 0)
+
static inline void FWCMD_SET_ADDR_IDX(void *cmd, u32 value)
{
le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(7, 0));
@@ -309,6 +312,11 @@ static inline void FWCMD_SET_ADDR_BSSID_BB_SEL(void *cmd, u32 value)
le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(1));
}
+static inline void FWCMD_SET_ADDR_BSSID_MASK(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(7, 2));
+}
+
static inline void FWCMD_SET_ADDR_BSSID_BSS_COLOR(void *cmd, u32 value)
{
le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(13, 8));
@@ -351,6 +359,12 @@ int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
const struct rtw89_bssid_cam_entry *bssid_cam);
void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
struct rtw89_addr_cam_entry *addr_cam);
+int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_bssid_cam_entry *bssid_cam,
+ const u8 *bssid);
+void rtw89_cam_deinit_bssid_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_bssid_cam_entry *bssid_cam);
void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *vif,
struct rtw89_sta *rtwsta,
@@ -360,7 +374,8 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta,
u8 *cmd);
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
- struct rtw89_vif *vif, u8 *cmd);
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta, u8 *cmd);
int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index a6a90572e74b..a5880a54812e 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -155,18 +155,19 @@ static struct ieee80211_rate rtw89_bitrates[] = {
{ .bitrate = 540, .hw_value = 0x0b, },
};
-u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate)
+bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
{
struct ieee80211_rate rate;
if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) {
- rtw89_info(rtwdev, "invalid rpt rate %d\n", rpt_rate);
- return 0;
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate);
+ return false;
}
rate = rtw89_bitrates[rpt_rate];
+ *bitrate = rate.bitrate;
- return rate.bitrate;
+ return true;
}
static struct ieee80211_supported_band rtw89_sband_2ghz = {
@@ -408,18 +409,30 @@ rtw89_core_get_tx_type(struct rtw89_dev *rtwdev,
static void
rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev,
- struct rtw89_core_tx_request *tx_req, u8 tid)
+ struct rtw89_core_tx_request *tx_req,
+ enum btc_pkt_type pkt_type)
{
struct ieee80211_sta *sta = tx_req->sta;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct sk_buff *skb = tx_req->skb;
struct rtw89_sta *rtwsta;
u8 ampdu_num;
+ u8 tid;
+
+ if (pkt_type == PACKET_EAPOL) {
+ desc_info->bk = true;
+ return;
+ }
+
+ if (!(IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU))
+ return;
if (!sta) {
rtw89_warn(rtwdev, "cannot set ampdu info without sta\n");
return;
}
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
rtwsta = (struct rtw89_sta *)sta->drv_priv;
ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ?
@@ -720,8 +733,6 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
/* enable wd_info for AMPDU */
desc_info->en_wd_info = true;
- if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU)
- rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, tid);
if (IEEE80211_SKB_CB(skb)->control.hw_key)
rtw89_core_tx_update_sec_key(rtwdev, tx_req);
@@ -832,6 +843,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
rtw89_core_tx_update_data_info(rtwdev, tx_req);
pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req);
rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type);
+ rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, pkt_type);
rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb);
break;
case RTW89_CORE_TX_TYPE_FWCMD:
@@ -1232,7 +1244,7 @@ static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu)
{
if (RTW89_GET_PHY_STS_LEN(phy_ppdu->buf) << 3 != phy_ppdu->len) {
- rtw89_warn(rtwdev, "phy ppdu len mismatch\n");
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "phy ppdu len mismatch\n");
return -EINVAL;
}
rtw89_core_update_phy_ppdu(phy_ppdu);
@@ -1343,6 +1355,47 @@ struct rtw89_vif_rx_stats_iter_data {
const u8 *bssid;
};
+static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_trigger *tf = (struct ieee80211_trigger *)skb->data;
+ u8 *pos, *end, type;
+ u16 aid;
+
+ if (!ether_addr_equal(vif->bss_conf.bssid, tf->ta) ||
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION ||
+ rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+ return;
+
+ type = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_TYPE_MASK);
+ if (type != IEEE80211_TRIGGER_TYPE_BASIC)
+ return;
+
+ end = (u8 *)tf + skb->len;
+ pos = tf->variable;
+
+ while (end - pos >= RTW89_TF_BASIC_USER_INFO_SZ) {
+ aid = RTW89_GET_TF_USER_INFO_AID12(pos);
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "[TF] aid: %d, ul_mcs: %d, rua: %d\n",
+ aid, RTW89_GET_TF_USER_INFO_UL_MCS(pos),
+ RTW89_GET_TF_USER_INFO_RUA(pos));
+
+ if (aid == RTW89_TF_PAD)
+ break;
+
+ if (aid == vif->cfg.aid) {
+ rtwvif->stats.rx_tf_acc++;
+ rtwdev->stats.rx_tf_acc++;
+ break;
+ }
+
+ pos += RTW89_TF_BASIC_USER_INFO_SZ;
+ }
+}
+
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -1355,6 +1408,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
const u8 *bssid = iter_data->bssid;
+ if (ieee80211_is_trigger(hdr->frame_control)) {
+ rtw89_stats_trigger_frame(rtwdev, vif, skb);
+ return;
+ }
+
if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
return;
@@ -1425,11 +1483,17 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct sk_buff *skb_ppdu,
struct ieee80211_rx_status *rx_status)
{
+ struct napi_struct *napi = &rtwdev->napi;
+
+ /* In low power mode, napi isn't scheduled. Receive it to netif. */
+ if (unlikely(!test_bit(NAPI_STATE_SCHED, &napi->state)))
+ napi = NULL;
+
rtw89_core_hw_to_sband_rate(rx_status);
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
/* In low power mode, it does RX in thread context. */
local_bh_disable();
- ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
+ ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi);
local_bh_enable();
rtwdev->napi_budget_countdown--;
}
@@ -1608,7 +1672,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
if (rtwdev->scanning &&
RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
- u8 chan = hal->current_channel;
+ u8 chan = hal->current_primary_channel;
u8 band = hal->current_band_type;
enum nl80211_band nl_band;
@@ -1811,6 +1875,55 @@ static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
spin_unlock_bh(&rtwdev->ba_lock);
}
+static void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_txq *rtwtxq, *tmp;
+
+ spin_lock_bh(&rtwdev->ba_lock);
+ list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
+ struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+
+ if (sta == txq->sta) {
+ clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ list_del_init(&rtwtxq->list);
+ }
+ }
+ spin_unlock_bh(&rtwdev->ba_lock);
+}
+
+static void rtw89_core_stop_tx_ba_session(struct rtw89_dev *rtwdev,
+ struct rtw89_txq *rtwtxq)
+{
+ struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+ struct ieee80211_sta *sta = txq->sta;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+
+ if (unlikely(!rtwsta) || unlikely(rtwsta->disassoc))
+ return;
+
+ if (!test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags) ||
+ test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
+ return;
+
+ spin_lock_bh(&rtwdev->ba_lock);
+ if (!list_empty(&rtwtxq->list)) {
+ list_del_init(&rtwtxq->list);
+ goto out;
+ }
+
+ set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+
+ list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list);
+ ieee80211_stop_tx_ba_session(sta, txq->tid);
+ cancel_delayed_work(&rtwdev->forbid_ba_work);
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->forbid_ba_work,
+ RTW89_FORBID_BA_TIMER);
+
+out:
+ spin_unlock_bh(&rtwdev->ba_lock);
+}
+
static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
struct rtw89_txq *rtwtxq,
struct sk_buff *skb)
@@ -1820,13 +1933,15 @@ static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta = txq->sta;
struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
- if (unlikely(skb_get_queue_mapping(skb) == IEEE80211_AC_VO))
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ rtw89_core_stop_tx_ba_session(rtwdev, rtwtxq);
return;
+ }
- if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ if (unlikely(!sta))
return;
- if (unlikely(!sta))
+ if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
return;
if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags)))
@@ -1945,6 +2060,10 @@ static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinv
ieee80211_return_txq(hw, txq, sched_txq);
if (frame_cnt != 0)
rtw89_core_tx_kick_off(rtwdev, rtw89_core_get_qsel(rtwdev, txq->tid));
+
+ /* bound of tx_resource could get stuck due to burst traffic */
+ if (frame_cnt == tx_resource)
+ *reinvoke = true;
}
ieee80211_txq_schedule_end(hw, ac);
}
@@ -1982,6 +2101,20 @@ static void rtw89_core_txq_reinvoke_work(struct work_struct *w)
queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
}
+static void rtw89_forbid_ba_work(struct work_struct *w)
+{
+ struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev,
+ forbid_ba_work.work);
+ struct rtw89_txq *rtwtxq, *tmp;
+
+ spin_lock_bh(&rtwdev->ba_lock);
+ list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
+ clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ list_del_init(&rtwtxq->list);
+ }
+ spin_unlock_bh(&rtwdev->ba_lock);
+}
+
static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
u32 throughput, u64 cnt)
{
@@ -2023,6 +2156,8 @@ static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev,
stats->rx_unicast = 0;
stats->tx_cnt = 0;
stats->rx_cnt = 0;
+ stats->rx_tf_periodic = stats->rx_tf_acc;
+ stats->rx_tf_acc = 0;
if (tx_tfc_lv != stats->tx_tfc_lv || rx_tfc_lv != stats->rx_tfc_lv)
return true;
@@ -2238,13 +2373,13 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
ewma_rssi_init(&rtwsta->avg_rssi);
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
/* for station mode, assign the mac_id from itself */
rtwsta->mac_id = rtwvif->mac_id;
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
rtw89_chip_rfk_channel(rtwdev);
- } else if (vif->type == NL80211_IFTYPE_AP) {
+ } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
RTW89_MAX_MAC_ID_NUM);
}
@@ -2275,10 +2410,13 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
rtw89_mac_bf_disassoc(rtwdev, vif, sta);
rtw89_core_free_sta_pending_ba(rtwdev, sta);
- if (vif->type == NL80211_IFTYPE_AP)
+ rtw89_core_free_sta_pending_forbid_ba(rtwdev, sta);
+ if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+ if (sta->tdls)
+ rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
rtw89_vif_type_mapping(vif, false);
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
@@ -2293,7 +2431,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
- if (vif->type == NL80211_IFTYPE_AP) {
+ if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_REMOVE);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c role info\n");
@@ -2317,9 +2455,10 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta);
int ret;
- if (vif->type == NL80211_IFTYPE_AP) {
+ if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
@@ -2332,7 +2471,15 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta->addr_cam, &rtwvif->bssid_cam);
+ if (sta->tdls) {
+ ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, sta->addr);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c init bssid cam for TDLS\n");
+ return ret;
+ }
+ }
+
+ ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta->addr_cam, bssid_cam);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c init addr cam\n");
return ret;
@@ -2369,7 +2516,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_mac_bf_assoc(rtwdev, vif, sta);
rtw89_mac_bf_monitor_calc(rtwdev, sta, false);
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_END);
rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template);
@@ -2385,10 +2532,10 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_DIS_CONN);
- else if (vif->type == NL80211_IFTYPE_AP)
+ else if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
return 0;
@@ -2776,6 +2923,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work);
cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work);
cancel_delayed_work_sync(&rtwdev->cfo_track_work);
+ cancel_delayed_work_sync(&rtwdev->forbid_ba_work);
mutex_lock(&rtwdev->mutex);
@@ -2795,6 +2943,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
u8 band;
INIT_LIST_HEAD(&rtwdev->ba_list);
+ INIT_LIST_HEAD(&rtwdev->forbid_ba_list);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
INIT_LIST_HEAD(&rtwdev->early_h2c_list);
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
@@ -2810,6 +2959,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
+ INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
spin_lock_init(&rtwdev->ba_lock);
spin_lock_init(&rtwdev->rpwm_lock);
@@ -2875,7 +3025,10 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif, bool hw_scan)
{
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_vif *rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+
+ if (!rtwvif)
+ return;
ether_addr_copy(rtwvif->mac_addr, vif->addr);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
@@ -3008,12 +3161,15 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP);
hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index e8a77225a90f..7a9d6f5d8a51 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -29,6 +29,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define INV_RF_DATA 0xffffffff
#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2)
+#define RTW89_FORBID_BA_TIMER round_jiffies_relative(HZ * 4)
#define CFO_TRACK_MAX_USER 64
#define MAX_RSSI 110
#define RSSI_FACTOR 1
@@ -55,6 +56,16 @@ enum htc_om_channel_width {
#define RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR BIT(16)
#define RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS BIT(17)
+#define RTW89_TF_PAD GENMASK(11, 0)
+#define RTW89_TF_BASIC_USER_INFO_SZ 6
+
+#define RTW89_GET_TF_USER_INFO_AID12(data) \
+ le32_get_bits(*((const __le32 *)(data)), GENMASK(11, 0))
+#define RTW89_GET_TF_USER_INFO_RUA(data) \
+ le32_get_bits(*((const __le32 *)(data)), GENMASK(19, 12))
+#define RTW89_GET_TF_USER_INFO_UL_MCS(data) \
+ le32_get_bits(*((const __le32 *)(data)), GENMASK(24, 21))
+
enum rtw89_subband {
RTW89_CH_2G = 0,
RTW89_CH_5G_BAND_1 = 1,
@@ -134,6 +145,7 @@ enum rtw89_core_rx_type {
enum rtw89_txq_flags {
RTW89_TXQ_F_AMPDU = 0,
RTW89_TXQ_F_BLOCK_BA = 1,
+ RTW89_TXQ_F_FORBID_BA = 2,
};
enum rtw89_net_type {
@@ -943,6 +955,10 @@ struct rtw89_traffic_stats {
u32 rx_throughput;
u32 tx_throughput_raw;
u32 rx_throughput_raw;
+
+ u32 rx_tf_acc;
+ u32 rx_tf_periodic;
+
enum rtw89_tfc_lv tx_tfc_lv;
enum rtw89_tfc_lv rx_tfc_lv;
struct ewma_tp tx_ewma_tp;
@@ -1961,7 +1977,8 @@ struct rtw89_sta {
struct ieee80211_rx_status rx_status;
u16 rx_hw_rate;
__le32 htc_template;
- struct rtw89_addr_cam_entry addr_cam; /* AP mode only */
+ struct rtw89_addr_cam_entry addr_cam; /* AP mode or TDLS peer only */
+ struct rtw89_bssid_cam_entry bssid_cam; /* TDLS peer only */
bool use_cfg_mask;
struct cfg80211_bitrate_mask mask;
@@ -2550,9 +2567,24 @@ enum rtw89_sar_sources {
RTW89_SAR_SOURCE_NR,
};
+enum rtw89_sar_subband {
+ RTW89_SAR_2GHZ_SUBBAND,
+ RTW89_SAR_5GHZ_SUBBAND_1_2, /* U-NII-1 and U-NII-2 */
+ RTW89_SAR_5GHZ_SUBBAND_2_E, /* U-NII-2-Extended */
+ RTW89_SAR_5GHZ_SUBBAND_3, /* U-NII-3 */
+ RTW89_SAR_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */
+ RTW89_SAR_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */
+ RTW89_SAR_6GHZ_SUBBAND_6, /* U-NII-6 */
+ RTW89_SAR_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */
+ RTW89_SAR_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */
+ RTW89_SAR_6GHZ_SUBBAND_8, /* U-NII-8 */
+
+ RTW89_SAR_SUBBAND_NR,
+};
+
struct rtw89_sar_cfg_common {
- bool set[RTW89_SUBBAND_NR];
- s32 cfg[RTW89_SUBBAND_NR];
+ bool set[RTW89_SAR_SUBBAND_NR];
+ s32 cfg[RTW89_SAR_SUBBAND_NR];
};
struct rtw89_sar_info {
@@ -2646,6 +2678,10 @@ struct rtw89_lck_info {
u8 thermal[RF_PATH_MAX];
};
+struct rtw89_rx_dck_info {
+ u8 thermal[RF_PATH_MAX];
+};
+
struct rtw89_iqk_info {
bool lok_cor_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
bool lok_fin_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
@@ -2776,13 +2812,20 @@ enum rtw89_multi_cfo_mode {
enum rtw89_phy_cfo_status {
RTW89_PHY_DCFO_STATE_NORMAL = 0,
RTW89_PHY_DCFO_STATE_ENHANCE = 1,
+ RTW89_PHY_DCFO_STATE_HOLD = 2,
RTW89_PHY_DCFO_STATE_MAX
};
+enum rtw89_phy_cfo_ul_ofdma_acc_mode {
+ RTW89_CFO_UL_OFDMA_ACC_DISABLE = 0,
+ RTW89_CFO_UL_OFDMA_ACC_ENABLE = 1
+};
+
struct rtw89_cfo_tracking_info {
u16 cfo_timer_ms;
bool cfo_trig_by_timer_en;
enum rtw89_phy_cfo_status phy_cfo_status;
+ enum rtw89_phy_cfo_ul_ofdma_acc_mode cfo_ul_ofdma_acc_mode;
u8 phy_cfo_trk_cnt;
bool is_adjust;
enum rtw89_multi_cfo_mode rtw89_multi_cfo_mode;
@@ -3096,10 +3139,12 @@ struct rtw89_dev {
struct workqueue_struct *txq_wq;
struct work_struct txq_work;
struct delayed_work txq_reinvoke_work;
- /* used to protect ba_list */
+ /* used to protect ba_list and forbid_ba_list */
spinlock_t ba_lock;
/* txqs to setup ba session */
struct list_head ba_list;
+ /* txqs to forbid ba session */
+ struct list_head forbid_ba_list;
struct work_struct ba_work;
/* used to protect rpwm */
spinlock_t rpwm_lock;
@@ -3125,6 +3170,7 @@ struct rtw89_dev {
struct rtw89_dpk_info dpk;
struct rtw89_mcc_info mcc;
struct rtw89_lck_info lck;
+ struct rtw89_rx_dck_info rx_dck;
bool is_tssi_mode[RF_PATH_MAX];
bool is_bt_iqk_timeout;
@@ -3145,6 +3191,7 @@ struct rtw89_dev {
struct delayed_work coex_bt_devinfo_work;
struct delayed_work coex_rfk_chk_work;
struct delayed_work cfo_track_work;
+ struct delayed_work forbid_ba_work;
struct rtw89_ppdu_sts_info ppdu_sts;
u8 total_sta_assoc;
bool scanning;
@@ -3517,12 +3564,29 @@ static inline
struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta)
{
- if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE && rtwsta)
- return &rtwsta->addr_cam;
+ if (rtwsta) {
+ struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
+ return &rtwsta->addr_cam;
+ }
return &rtwvif->addr_cam;
}
static inline
+struct rtw89_bssid_cam_entry *rtw89_get_bssid_cam_of(struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ if (rtwsta) {
+ struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+
+ if (sta->tdls)
+ return &rtwsta->bssid_cam;
+ }
+ return &rtwvif->bssid_cam;
+}
+
+static inline
void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev,
struct rtw89_channel_help_params *p)
{
@@ -3674,7 +3738,7 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (!vif->bss_conf.he_support || !vif->bss_conf.assoc)
+ if (!vif->bss_conf.he_support || !vif->cfg.assoc)
return;
if (chip->ops->set_txpwr_ul_tb_offset)
@@ -3850,7 +3914,7 @@ int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_id
int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
-u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate);
+bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
int rtw89_regd_init(struct rtw89_dev *rtwdev,
void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request));
void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 7820bc3ab3b4..829c61da99bb 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -2376,7 +2376,8 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d), RX: %u [%u] Mbps (lv: %d)\n",
stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv,
stats->rx_throughput, stats->rx_throughput_raw, stats->rx_tfc_lv);
- seq_printf(m, "Beacon: %u\n", pkt_stat->beacon_nr);
+ seq_printf(m, "Beacon: %u, TF: %u\n", pkt_stat->beacon_nr,
+ stats->rx_tf_periodic);
seq_printf(m, "Avg packet length: TX=%u, RX=%u\n", stats->tx_avg_len,
stats->rx_avg_len);
@@ -2437,7 +2438,8 @@ static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct seq_file *m = (struct seq_file *)data;
- seq_printf(m, "STA [%d] %pM\n", rtwsta->mac_id, sta->addr);
+ seq_printf(m, "STA [%d] %pM %s\n", rtwsta->mac_id, sta->addr,
+ sta->tdls ? "(TDLS)" : "");
rtw89_dump_addr_cam(m, &rtwsta->addr_cam);
}
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index de72155ad1fe..6176152dbf6b 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -24,6 +24,9 @@ enum rtw89_debug_mask {
RTW89_DBG_BTC = BIT(13),
RTW89_DBG_BF = BIT(14),
RTW89_DBG_HW_SCAN = BIT(15),
+ RTW89_DBG_SAR = BIT(16),
+
+ RTW89_DBG_UNEXP = BIT(31),
};
enum rtw89_debug_mac_reg_sel {
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 4718aced1428..6473015a6b2a 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -579,7 +579,7 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
}
skb_put(skb, H2C_CAM_LEN);
rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
- rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, skb->data);
+ rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
@@ -1043,7 +1043,8 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
u16 tim_offset;
int bcn_total_len;
- skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, NULL);
+ skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
+ NULL, 0);
if (!skb_beacon) {
rtw89_err(rtwdev, "failed to get beacon skb\n");
return -ENOMEM;
@@ -2257,7 +2258,7 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
list_add_tail(&ch_info->list, &chan_list);
off_chan_time += ch_info->period;
}
- rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+ ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
out:
list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
@@ -2339,6 +2340,9 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif->scan_req = NULL;
rtwvif->scan_ies = NULL;
rtwdev->scan_info.scanning_vif = NULL;
+
+ if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK)
+ rtw89_store_op_chan(rtwdev, false);
}
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
@@ -2365,20 +2369,27 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
if (ret)
goto out;
}
- rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
+ ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
out:
return ret;
}
-void rtw89_store_op_chan(struct rtw89_dev *rtwdev)
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup)
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_hal *hal = &rtwdev->hal;
- scan_info->op_pri_ch = hal->current_primary_channel;
- scan_info->op_chan = hal->current_channel;
- scan_info->op_bw = hal->current_band_width;
- scan_info->op_band = hal->current_band_type;
+ if (backup) {
+ scan_info->op_pri_ch = hal->current_primary_channel;
+ scan_info->op_chan = hal->current_channel;
+ scan_info->op_bw = hal->current_band_width;
+ scan_info->op_band = hal->current_band_type;
+ } else {
+ hal->current_primary_channel = scan_info->op_pri_ch;
+ hal->current_channel = scan_info->op_chan;
+ hal->current_band_width = scan_info->op_bw;
+ hal->current_band_type = scan_info->op_band;
+ }
}
#define H2C_FW_CPU_EXCEPTION_LEN 4
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 95a55c4213db..e75ad22aa85d 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -2633,17 +2633,14 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_c2h_info *c2h_info);
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable);
void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev);
-void rtw89_store_op_chan(struct rtw89_dev *rtwdev);
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup);
void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req);
void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool aborted);
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable);
-void rtw89_hw_scan_status_report(struct rtw89_dev *rtwdev, struct sk_buff *skb);
-void rtw89_hw_scan_chan_switch(struct rtw89_dev *rtwdev, struct sk_buff *skb);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
-void rtw89_store_op_chan(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 3cf892912c1d..93124b815825 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -3681,17 +3681,20 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
rtw89_hw_scan_complete(rtwdev, vif, false);
break;
case RTW89_SCAN_ENTER_CH_NOTIFY:
- if (rtw89_is_op_chan(rtwdev, band, chan))
+ hal->prev_band_type = hal->current_band_type;
+ hal->current_band_type = band;
+ hal->prev_primary_channel = hal->current_primary_channel;
+ hal->current_primary_channel = chan;
+ hal->current_channel = chan;
+ hal->current_band_width = RTW89_CHANNEL_WIDTH_20;
+ if (rtw89_is_op_chan(rtwdev, band, chan)) {
+ rtw89_store_op_chan(rtwdev, false);
ieee80211_wake_queues(rtwdev->hw);
+ }
break;
default:
return;
}
-
- hal->prev_band_type = hal->current_band_type;
- hal->prev_primary_channel = hal->current_channel;
- hal->current_channel = chan;
- hal->current_band_type = band;
}
static void
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 9f511c8d8a37..f66619354734 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -666,6 +666,7 @@ enum mac_ax_err_info {
MAC_AX_ERR_L2_ERR_APB_BBRF_TO_RX4281 = 0x2360,
MAC_AX_ERR_L2_ERR_APB_BBRF_TO_OTHERS = 0x2370,
MAC_AX_ERR_L2_RESET_DONE = 0x2400,
+ MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT = 0x2599,
MAC_AX_ERR_CPU_EXCEPTION = 0x3000,
MAC_AX_ERR_ASSERTION = 0x4000,
MAC_AX_GET_ERR_MAX,
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index f24e4a208376..cef27e781ae2 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -336,7 +336,7 @@ static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
@@ -345,12 +345,12 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw89_leave_ps_mode(rtwdev);
if (changed & BSS_CHANGED_ASSOC) {
- if (conf->assoc) {
+ if (vif->cfg.assoc) {
rtw89_station_mode_sta_assoc(rtwdev, vif, conf);
rtw89_phy_set_bss_color(rtwdev, vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
rtw89_mac_port_update(rtwdev, rtwvif);
- rtw89_store_op_chan(rtwdev);
+ rtw89_store_op_chan(rtwdev, true);
} else {
/* Abort ongoing scan if cancel_scan isn't issued
* when disconnected by peer
@@ -381,7 +381,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
-static int rtw89_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
@@ -401,7 +403,8 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif
}
static
-void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
@@ -425,7 +428,8 @@ static int rtw89_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
}
static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct rtw89_dev *rtwdev = hw->priv;
@@ -454,7 +458,7 @@ static int __rtw89_ops_sta_state(struct ieee80211_hw *hw,
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
return 0; /* defer to bss_info_changed to have vif info */
return rtw89_core_sta_assoc(rtwdev, vif, sta);
}
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 0ef7821b2e0f..c68fec9eb5a6 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -228,7 +228,8 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
if (fs) {
if (new) {
- rtw89_err(rtwdev, "skb should not be ready before first segment start\n");
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ "skb should not be ready before first segment start\n");
goto err_sync_device;
}
if (desc_info->ready) {
@@ -251,7 +252,7 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
} else {
offset = sizeof(struct rtw89_pci_rxbd_info);
if (!new) {
- rtw89_warn(rtwdev, "no last skb\n");
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
goto err_sync_device;
}
}
@@ -605,7 +606,7 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
hw_idx_next = (hw_idx + 1) % bd_ring->len;
if (hw_idx_next == host_idx)
- rtw89_warn(rtwdev, "%d RXD unavailable\n", i);
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"%d RXD unavailable, idx=0x%08x, len=%d\n",
@@ -738,6 +739,9 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
+ if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN))
+ rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
+
if (unlikely(rtwpci->under_recovery))
goto enable_intr;
@@ -948,9 +952,10 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
if (wd_cnt == 0 || bd_cnt == 0) {
cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
- if (!cnt)
+ if (cnt)
+ rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
+ else if (wd_cnt == 0)
goto out_unlock;
- rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
if (bd_cnt == 0)
@@ -961,7 +966,9 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
wd_cnt = wd_ring->curr_num;
min_cnt = min(bd_cnt, wd_cnt);
if (min_cnt == 0)
- rtw89_warn(rtwdev, "still no tx resource after reclaim\n");
+ rtw89_debug(rtwdev, rtwpci->low_power ? RTW89_DBG_TXRX : RTW89_DBG_UNEXP,
+ "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
+ wd_cnt, bd_cnt);
out_unlock:
spin_unlock_bh(&rtwpci->trx_lock);
@@ -3104,7 +3111,7 @@ void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
if (rtwpci->under_recovery) {
- rtwpci->intrs[0] = 0;
+ rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN;
rtwpci->intrs[1] = 0;
} else {
rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
@@ -3126,7 +3133,7 @@ static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
- rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN;
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = 0;
rtwpci->intrs[1] = 0;
}
@@ -3138,7 +3145,7 @@ static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
B_AX_HS1ISR_IND_INT_EN |
B_AX_HS0ISR_IND_INT_EN;
- rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN;
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
B_AX_RXDMA_INT_EN |
B_AX_RXP1DMA_INT_EN |
@@ -3155,7 +3162,7 @@ static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
B_AX_HS0ISR_IND_INT_EN;
- rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN;
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = 0;
rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
}
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index bb585ed19190..a118647213e3 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -94,6 +94,7 @@
/* Interrupts */
#define R_AX_HIMR0 0x01A0
+#define B_AX_WDT_TIMEOUT_INT_EN BIT(22)
#define B_AX_HALT_C2H_INT_EN BIT(21)
#define R_AX_HISR0 0x01A4
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 762cdba9d3cf..1532c0a6bbc4 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -1918,21 +1918,29 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
struct rtw89_ra_report *ra_report = &rtwsta->ra_report;
struct sk_buff *c2h = ra_data->c2h;
u8 mode, rate, bw, giltf, mac_id;
+ u16 legacy_bitrate;
+ bool valid;
mac_id = RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h->data);
if (mac_id != rtwsta->mac_id)
return;
- memset(ra_report, 0, sizeof(*ra_report));
-
rate = RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h->data);
bw = RTW89_GET_PHY_C2H_RA_RPT_BW(c2h->data);
giltf = RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h->data);
mode = RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h->data);
+ if (mode == RTW89_RA_RPT_MODE_LEGACY) {
+ valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate);
+ if (!valid)
+ return;
+ }
+
+ memset(ra_report, 0, sizeof(*ra_report));
+
switch (mode) {
case RTW89_RA_RPT_MODE_LEGACY:
- ra_report->txrate.legacy = rtw89_ra_report_to_bitrate(rtwdev, rate);
+ ra_report->txrate.legacy = legacy_bitrate;
break;
case RTW89_RA_RPT_MODE_HT:
ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
@@ -2151,6 +2159,7 @@ static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
cfo->cfo_trig_by_timer_en = false;
cfo->phy_cfo_trk_cnt = 0;
cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
+ cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE;
}
static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
@@ -2419,6 +2428,13 @@ void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
struct rtw89_traffic_stats *stats = &rtwdev->stats;
+ bool is_ul_ofdma = false, ofdma_acc_en = false;
+
+ if (stats->rx_tf_periodic > CFO_TF_CNT_TH)
+ is_ul_ofdma = true;
+ if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE &&
+ is_ul_ofdma)
+ ofdma_acc_en = true;
switch (cfo->phy_cfo_status) {
case RTW89_PHY_DCFO_STATE_NORMAL:
@@ -2430,16 +2446,26 @@ void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
}
break;
case RTW89_PHY_DCFO_STATE_ENHANCE:
- if (cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT) {
+ if (stats->tx_throughput <= CFO_TP_LOWER)
+ cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
+ else if (ofdma_acc_en &&
+ cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT)
+ cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD;
+ else
+ cfo->phy_cfo_trk_cnt++;
+
+ if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) {
cfo->phy_cfo_trk_cnt = 0;
cfo->cfo_trig_by_timer_en = false;
}
- if (cfo->cfo_trig_by_timer_en == 1)
- cfo->phy_cfo_trk_cnt++;
+ break;
+ case RTW89_PHY_DCFO_STATE_HOLD:
if (stats->tx_throughput <= CFO_TP_LOWER) {
cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
cfo->phy_cfo_trk_cnt = 0;
cfo->cfo_trig_by_timer_en = false;
+ } else {
+ cfo->phy_cfo_trk_cnt++;
}
break;
default:
@@ -3099,11 +3125,9 @@ static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
u8 i;
- if (chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
- rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0);
+ rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0);
for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
if (i >= RTW89_CCK_PKT)
@@ -3612,7 +3636,7 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
u8 bss_color;
- if (!vif->bss_conf.he_support || !vif->bss_conf.assoc)
+ if (!vif->bss_conf.he_support || !vif->cfg.assoc)
return;
bss_color = vif->bss_conf.he_bss_color.color;
@@ -3622,7 +3646,7 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_TGT, bss_color,
phy_idx);
rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID,
- vif->bss_conf.aid, phy_idx);
+ vif->cfg.aid, phy_idx);
}
static void
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 291660154d58..e20636f54b55 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -62,6 +62,7 @@
#define CFO_COMP_PERIOD 250
#define CFO_COMP_WEIGHT 8
#define MAX_CFO_TOLERANCE 30
+#define CFO_TF_CNT_TH 300
#define CCX_MAX_PERIOD 2097
#define CCX_MAX_PERIOD_UNIT 32
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index e3c2fce32651..3d60feb78312 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -2330,8 +2330,8 @@ static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
val2_q = abs(sign_extend32(val2_q, 11));
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
- (val1_i * val1_i + val1_q * val1_q) /
- (val2_i * val2_i + val2_q * val2_q));
+ phy_div(val1_i * val1_i + val1_q * val1_q,
+ val2_i * val2_i + val2_q * val2_q));
} else {
for (i = 0; i < 32; i++) {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
index 99479bbb0939..320bcd4852c6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
@@ -1281,7 +1281,6 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x018, 0x00011124},
{0x000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x055, 0x00080000},
{0x056, 0x0008FFF0},
{0x057, 0x0000C485},
@@ -20496,7 +20495,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20516,7 +20515,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20542,7 +20541,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20562,7 +20561,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20588,7 +20587,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20608,7 +20607,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20622,17 +20621,17 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0xB0000000, 0x00000000},
{0x033, 0x0000002E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20644,15 +20643,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20664,21 +20663,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000002F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20690,15 +20689,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20710,21 +20709,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20736,15 +20735,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20756,21 +20755,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000031},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20782,15 +20781,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20802,21 +20801,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000032},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20828,15 +20827,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20848,21 +20847,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000033},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20874,15 +20873,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20894,21 +20893,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000034},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20920,15 +20919,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20940,21 +20939,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000035},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20966,15 +20965,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20986,21 +20985,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000036},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21012,15 +21011,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21032,21 +21031,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000037},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21058,15 +21057,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21078,21 +21077,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000038},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21104,15 +21103,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21124,21 +21123,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000039},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21150,15 +21149,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21170,21 +21169,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003A},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21196,15 +21195,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21216,21 +21215,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003B},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21242,15 +21241,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21262,21 +21261,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003C},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21288,15 +21287,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21308,21 +21307,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003D},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21334,15 +21333,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21354,21 +21353,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21380,15 +21379,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21400,21 +21399,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21426,15 +21425,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21446,7 +21445,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
@@ -21596,8 +21595,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x087, 0x00000427},
{0xB0000000, 0x00000000},
{0x002, 0x00000000},
- {0x067, 0x00000052},
-
+ {0x067, 0x00000056},
};
static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
@@ -21671,7 +21669,6 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x018, 0x00011124},
{0x000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x055, 0x00080000},
{0x056, 0x0008FFF0},
{0x057, 0x0000C485},
@@ -41142,7 +41139,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41162,7 +41159,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41188,7 +41185,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41208,7 +41205,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41234,7 +41231,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41254,7 +41251,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41268,17 +41265,17 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0xB0000000, 0x00000000},
{0x033, 0x0000002E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41290,15 +41287,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41310,21 +41307,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000002F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41336,15 +41333,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41356,21 +41353,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41382,15 +41379,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41402,21 +41399,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000031},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41428,15 +41425,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41448,21 +41445,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000032},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41474,15 +41471,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41494,21 +41491,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000033},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41520,15 +41517,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41540,21 +41537,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000034},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41566,15 +41563,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41586,21 +41583,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000035},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41612,15 +41609,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41632,21 +41629,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000036},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41658,15 +41655,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41678,21 +41675,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000037},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41704,15 +41701,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41724,21 +41721,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000038},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41750,15 +41747,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41770,21 +41767,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000039},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41796,15 +41793,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41816,21 +41813,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003A},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41842,15 +41839,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41862,21 +41859,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003B},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41888,15 +41885,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41908,21 +41905,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003C},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41934,15 +41931,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41954,21 +41951,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003D},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41980,15 +41977,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42000,21 +41997,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -42026,15 +42023,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42046,21 +42043,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -42072,15 +42069,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42092,7 +42089,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
@@ -42243,8 +42240,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x087, 0x00000427},
{0xB0000000, 0x00000000},
{0x002, 0x00000000},
- {0x067, 0x00000052},
-
+ {0x067, 0x00000056},
};
static const struct rtw89_reg2_def rtw89_8852a_phy_nctl_regs[] = {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 64840c8d9efe..b697aef2faf2 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1861,6 +1861,7 @@ static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev)
{
rtw8852c_dpk_track(rtwdev);
rtw8852c_lck_track(rtwdev);
+ rtw8852c_rx_dck_track(rtwdev);
}
static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index dfb9caba9bc4..4186d825d19b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -3864,6 +3864,7 @@ void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
{
+ struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
u8 path, kpath;
u32 rf_reg5;
@@ -3883,6 +3884,7 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
_set_rx_dck(rtwdev, phy, path, is_afe);
+ rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
if (rtwdev->is_tssi_mode[path])
@@ -3891,6 +3893,31 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
}
}
+#define RTW8852C_RX_DCK_TH 8
+
+void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
+ u8 cur_thermal;
+ int delta;
+ int path;
+
+ for (path = 0; path < RF_PATH_NUM_8852C; path++) {
+ cur_thermal =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ delta = abs((int)cur_thermal - rx_dck->thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n",
+ path, cur_thermal, delta);
+
+ if (delta >= RTW8852C_RX_DCK_TH) {
+ rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
+ return;
+ }
+ }
+}
+
void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
u32 tx_en;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
index c32756f0c01a..5118a49da8d3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -12,6 +12,7 @@ void rtw8852c_rck(struct rtw89_dev *rtwdev);
void rtw8852c_dack(struct rtw89_dev *rtwdev);
void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe);
+void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev);
void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
void rtw8852c_dpk_track(struct rtw89_dev *rtwdev);
void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 097c87899cea..eb2d3ec28775 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -5,15 +5,122 @@
#include "debug.h"
#include "sar.h"
+static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
+ u32 center_freq)
+{
+ switch (center_freq) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "center freq: %u to SAR subband is unhandled\n",
+ center_freq);
+ fallthrough;
+ case 2412 ... 2484:
+ return RTW89_SAR_2GHZ_SUBBAND;
+ case 5180 ... 5320:
+ return RTW89_SAR_5GHZ_SUBBAND_1_2;
+ case 5500 ... 5720:
+ return RTW89_SAR_5GHZ_SUBBAND_2_E;
+ case 5745 ... 5825:
+ return RTW89_SAR_5GHZ_SUBBAND_3;
+ case 5955 ... 6155:
+ return RTW89_SAR_6GHZ_SUBBAND_5_L;
+ case 6175 ... 6415:
+ return RTW89_SAR_6GHZ_SUBBAND_5_H;
+ case 6435 ... 6515:
+ return RTW89_SAR_6GHZ_SUBBAND_6;
+ case 6535 ... 6695:
+ return RTW89_SAR_6GHZ_SUBBAND_7_L;
+ case 6715 ... 6855:
+ return RTW89_SAR_6GHZ_SUBBAND_7_H;
+
+ /* freq 6875 (ch 185, 20MHz) spans RTW89_SAR_6GHZ_SUBBAND_7_H
+ * and RTW89_SAR_6GHZ_SUBBAND_8, so directly describe it with
+ * struct rtw89_sar_span in the following.
+ */
+
+ case 6895 ... 7115:
+ return RTW89_SAR_6GHZ_SUBBAND_8;
+ }
+}
+
+struct rtw89_sar_span {
+ enum rtw89_sar_subband subband_low;
+ enum rtw89_sar_subband subband_high;
+};
+
+#define RTW89_SAR_SPAN_VALID(span) ((span)->subband_high)
+
+#define RTW89_SAR_6GHZ_SPAN_HEAD 6145
+#define RTW89_SAR_6GHZ_SPAN_IDX(center_freq) \
+ ((((int)(center_freq) - RTW89_SAR_6GHZ_SPAN_HEAD) / 5) / 2)
+
+#define RTW89_DECL_SAR_6GHZ_SPAN(center_freq, subband_l, subband_h) \
+ [RTW89_SAR_6GHZ_SPAN_IDX(center_freq)] = { \
+ .subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
+ .subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
+ }
+
+/* Since 6GHz SAR subbands are not edge aligned, some cases span two SAR
+ * subbands. In the following, we describe each of them with rtw89_sar_span.
+ */
+static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = {
+ RTW89_DECL_SAR_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_SAR_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_SAR_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_SAR_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_SAR_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_SAR_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_SAR_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
+};
+
static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg)
{
struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
- enum rtw89_subband subband = rtwdev->hal.current_subband;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ enum rtw89_band band = hal->current_band_type;
+ u32 center_freq = hal->current_freq;
+ const struct rtw89_sar_span *span = NULL;
+ enum rtw89_sar_subband subband_l, subband_h;
+ int idx;
+
+ if (band == RTW89_BAND_6G) {
+ idx = RTW89_SAR_6GHZ_SPAN_IDX(center_freq);
+ /* To decrease size of rtw89_sar_overlapping_6ghz[],
+ * RTW89_SAR_6GHZ_SPAN_IDX() truncates the leading NULLs
+ * to make first span as index 0 of the table. So, if center
+ * frequency is less than the first one, it will get netative.
+ */
+ if (idx >= 0 && idx < ARRAY_SIZE(rtw89_sar_overlapping_6ghz))
+ span = &rtw89_sar_overlapping_6ghz[idx];
+ }
+
+ if (span && RTW89_SAR_SPAN_VALID(span)) {
+ subband_l = span->subband_low;
+ subband_h = span->subband_high;
+ } else {
+ subband_l = rtw89_sar_get_subband(rtwdev, center_freq);
+ subband_h = subband_l;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "for {band %u, center_freq %u}, SAR subband: {%u, %u}\n",
+ band, center_freq, subband_l, subband_h);
- if (!rtwsar->set[subband])
+ if (!rtwsar->set[subband_l] && !rtwsar->set[subband_h])
return -ENODATA;
- *cfg = rtwsar->cfg[subband];
+ if (!rtwsar->set[subband_l])
+ *cfg = rtwsar->cfg[subband_h];
+ else if (!rtwsar->set[subband_h])
+ *cfg = rtwsar->cfg[subband_l];
+ else
+ *cfg = min(rtwsar->cfg[subband_l], rtwsar->cfg[subband_h]);
+
return 0;
}
@@ -128,21 +235,20 @@ exit:
return ret;
}
-static const u8 rtw89_common_sar_subband_map[] = {
- RTW89_CH_2G,
- RTW89_CH_5G_BAND_1,
- RTW89_CH_5G_BAND_3,
- RTW89_CH_5G_BAND_4,
-};
-
static const struct cfg80211_sar_freq_ranges rtw89_common_sar_freq_ranges[] = {
{ .start_freq = 2412, .end_freq = 2484, },
{ .start_freq = 5180, .end_freq = 5320, },
{ .start_freq = 5500, .end_freq = 5720, },
{ .start_freq = 5745, .end_freq = 5825, },
+ { .start_freq = 5955, .end_freq = 6155, },
+ { .start_freq = 6175, .end_freq = 6415, },
+ { .start_freq = 6435, .end_freq = 6515, },
+ { .start_freq = 6535, .end_freq = 6695, },
+ { .start_freq = 6715, .end_freq = 6875, },
+ { .start_freq = 6875, .end_freq = 7115, },
};
-static_assert(ARRAY_SIZE(rtw89_common_sar_subband_map) ==
+static_assert(RTW89_SAR_SUBBAND_NR ==
ARRAY_SIZE(rtw89_common_sar_freq_ranges));
const struct cfg80211_sar_capa rtw89_sar_capa = {
@@ -159,7 +265,6 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
u8 fct;
u32 freq_start;
u32 freq_end;
- u32 band;
s32 power;
u32 i, idx;
@@ -175,15 +280,14 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
freq_start = rtw89_common_sar_freq_ranges[idx].start_freq;
freq_end = rtw89_common_sar_freq_ranges[idx].end_freq;
- band = rtw89_common_sar_subband_map[idx];
power = sar->sub_specs[i].power;
- rtw89_info(rtwdev, "On freq %u to %u, ", freq_start, freq_end);
- rtw89_info(rtwdev, "set SAR power limit %d (unit: 1/%lu dBm)\n",
- power, BIT(fct));
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "On freq %u to %u, set SAR limit %d (unit: 1/%lu dBm)\n",
+ freq_start, freq_end, power, BIT(fct));
- sar_common.set[band] = true;
- sar_common.cfg[band] = power;
+ sar_common.set[idx] = true;
+ sar_common.cfg[idx] = power;
}
return rtw89_apply_sar_common(rtwdev, &sar_common);
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 9e95ed972710..726223f25dc6 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -300,18 +300,21 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta)
{
- struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
- rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
+ rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+ if (sta->tdls)
+ rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
}
static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
- ieee80211_iterate_stations_atomic(rtwdev->hw,
- ser_sta_deinit_addr_cam_iter,
- rtwdev);
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ ser_sta_deinit_addr_cam_iter,
+ rtwvif);
rtw89_cam_deinit(rtwdev, rtwvif);
}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index ff2448394a1e..05524291d60c 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2813,8 +2813,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
resp_ie_len, 0, GFP_KERNEL);
} else {
struct cfg80211_roam_info roam_info = {
- .channel = get_current_channel(usbdev, NULL),
- .bssid = bssid,
+ .links[0].channel =
+ get_current_channel(usbdev, NULL),
+ .links[0].bssid = bssid,
.req_ie = req_ie,
.req_ie_len = req_ie_len,
.resp_ie = resp_ie,
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 6bfaab48b507..0f3a80f66b61 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -420,7 +420,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
rsi_hal_send_sta_notify_frame(common,
RSI_IFTYPE_STATION,
STA_CONNECTED, bss->bssid,
- bss->qos, bss->aid, 0,
+ bss->qos, vif->cfg.aid,
+ 0,
vif);
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index dca81a4bbdd7..c61f83a7333b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -295,7 +295,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
struct rsi_hw *adapter = common->priv;
struct ieee80211_vif *vif;
struct ieee80211_tx_info *info;
- struct ieee80211_bss_conf *bss;
int status = -EINVAL;
if (!skb)
@@ -307,11 +306,10 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
if (!info->control.vif)
goto err;
vif = info->control.vif;
- bss = &vif->bss_conf;
if (((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
- (!bss->assoc))
+ (!vif->cfg.assoc))
goto err;
status = rsi_send_pkt_to_bus(common, skb);
@@ -336,7 +334,6 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
- struct ieee80211_bss_conf *bss;
struct ieee80211_hdr *wh;
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
@@ -361,13 +358,13 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
return status;
}
- bss = &info->control.vif->bss_conf;
wh = (struct ieee80211_hdr *)&skb->data[header_size];
mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ];
/* Indicate to firmware to give cfm for probe */
- if (ieee80211_is_probe_req(wh->frame_control) && !bss->assoc) {
+ if (ieee80211_is_probe_req(wh->frame_control) &&
+ !info->control.vif->cfg.assoc) {
rsi_dbg(INFO_ZONE,
"%s: blocking mgmt queue\n", __func__);
mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST;
@@ -444,7 +441,7 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
return -EINVAL;
mac_bcn = ieee80211_beacon_get_tim(adapter->hw,
vif,
- &tim_offset, NULL);
+ &tim_offset, NULL, 0);
if (!mac_bcn) {
rsi_dbg(ERR_ZONE, "Failed to get beacon from mac80211\n");
return -EINVAL;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index f01e82b90c07..bf39c4bda26f 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -237,7 +237,6 @@ static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
struct cfg80211_scan_request *scan_req = &hw_req->req;
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
- struct ieee80211_bss_conf *bss = &vif->bss_conf;
rsi_dbg(INFO_ZONE, "***** Hardware scan start *****\n");
common->mac_ops_resumed = false;
@@ -256,7 +255,7 @@ static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
/* If STA is not connected, return with special value 1, in order
* to start sw_scan in mac80211
*/
- if (!bss->assoc)
+ if (!vif->cfg.assoc)
return 1;
mutex_lock(&common->mutex);
@@ -579,7 +578,6 @@ static int rsi_channel_change(struct ieee80211_hw *hw)
struct ieee80211_channel *curchan = hw->conf.chandef.chan;
u16 channel = curchan->hw_value;
struct ieee80211_vif *vif;
- struct ieee80211_bss_conf *bss;
bool assoc = false;
int i;
@@ -593,8 +591,7 @@ static int rsi_channel_change(struct ieee80211_hw *hw)
if (!vif)
continue;
if (vif->type == NL80211_IFTYPE_STATION) {
- bss = &vif->bss_conf;
- if (bss->assoc) {
+ if (vif->cfg.assoc) {
assoc = true;
break;
}
@@ -700,7 +697,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
}
if ((vif->type == NL80211_IFTYPE_STATION ||
vif->type == NL80211_IFTYPE_P2P_CLIENT) &&
- (!sta_vif || vif->bss_conf.assoc))
+ (!sta_vif || vif->cfg.assoc))
sta_vif = vif;
}
if (set_ps && sta_vif) {
@@ -786,7 +783,7 @@ static void rsi_switch_channel(struct rsi_hw *adapter,
static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
@@ -797,8 +794,8 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&common->mutex);
if (changed & BSS_CHANGED_ASSOC) {
rsi_dbg(INFO_ZONE, "%s: Changed Association status: %d\n",
- __func__, bss_conf->assoc);
- if (bss_conf->assoc) {
+ __func__, vif->cfg.assoc);
+ if (vif->cfg.assoc) {
/* Send the RX filter frame */
rx_filter_word = (ALLOW_DATA_ASSOC_PEER |
ALLOW_CTRL_ASSOC_PEER |
@@ -807,17 +804,17 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
}
rsi_inform_bss_status(common,
RSI_OPMODE_STA,
- bss_conf->assoc,
+ vif->cfg.assoc,
bss_conf->bssid,
bss_conf->qos,
- bss_conf->aid,
+ vif->cfg.aid,
NULL, 0,
bss_conf->assoc_capability, vif);
adapter->ps_info.dtim_interval_duration = bss->dtim_period;
adapter->ps_info.listen_interval = conf->listen_interval;
/* If U-APSD is updated, send ps parameters to firmware */
- if (bss->assoc) {
+ if (vif->cfg.assoc) {
if (common->uapsd_bitmap) {
rsi_dbg(INFO_ZONE, "Configuring UAPSD\n");
rsi_conf_uapsd(adapter, vif);
@@ -898,7 +895,8 @@ static void rsi_mac80211_conf_filter(struct ieee80211_hw *hw,
* Return: 0 on success, negative error code on failure.
*/
static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rsi_hw *adapter = hw->priv;
@@ -1359,7 +1357,7 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw,
if (!bss)
return;
/* CQM only for connected AP beacons, the RSSI is a weighted avg */
- if (bss->assoc && !(memcmp(bss->bssid, hdr->addr2, ETH_ALEN))) {
+ if (vif->cfg.assoc && !(memcmp(bss->bssid, hdr->addr2, ETH_ALEN))) {
if (ieee80211_is_beacon(hdr->frame_control))
rsi_perform_cqm(common, hdr->addr2, rxs->signal, vif);
}
@@ -1737,7 +1735,7 @@ static void rsi_resume_conn_channel(struct rsi_common *common)
}
if (((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
- vif->bss_conf.assoc) {
+ vif->cfg.assoc) {
rsi_switch_channel(adapter, vif);
break;
}
@@ -1862,17 +1860,15 @@ static u16 rsi_wow_map_triggers(struct rsi_common *common,
int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan)
{
struct rsi_common *common = adapter->priv;
+ struct ieee80211_vif *vif = adapter->vifs[0];
u16 triggers = 0;
u16 rx_filter_word = 0;
- struct ieee80211_bss_conf *bss = NULL;
rsi_dbg(INFO_ZONE, "Config WoWLAN to device\n");
- if (!adapter->vifs[0])
+ if (!vif)
return -EINVAL;
- bss = &adapter->vifs[0]->bss_conf;
-
if (WARN_ON(!wowlan)) {
rsi_dbg(ERR_ZONE, "WoW triggers not enabled\n");
return -EINVAL;
@@ -1884,7 +1880,7 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan)
rsi_dbg(ERR_ZONE, "%s:No valid WoW triggers\n", __func__);
return -EINVAL;
}
- if (!bss->assoc) {
+ if (!vif->cfg.assoc) {
rsi_dbg(ERR_ZONE,
"Cannot configure WoWLAN (Station not connected)\n");
common->wow_flags |= RSI_WOW_NO_CONNECTION;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index c14689266fec..1b309e47a1f1 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1635,7 +1635,6 @@ int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
struct ieee80211_vif *vif)
{
struct rsi_common *common = adapter->priv;
- struct ieee80211_bss_conf *bss = &vif->bss_conf;
struct rsi_request_ps *ps;
struct rsi_ps_info *ps_info;
struct sk_buff *skb;
@@ -1669,7 +1668,7 @@ int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
ps->ps_sleep.sleep_duration =
cpu_to_le32(ps_info->deep_sleep_wakeup_period);
- if (bss->assoc)
+ if (vif->cfg.assoc)
ps->ps_sleep.connected_sleep = RSI_CONNECTED_SLEEP;
else
ps->ps_sleep.connected_sleep = RSI_DEEP_SLEEP;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 9f16128e4ffa..d09998796ac0 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -796,7 +796,7 @@ static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter,
* rsi_sdio_host_intf_read_pkt() - This function reads the packet
* from the device.
* @adapter: Pointer to the adapter data structure.
- * @pkt: Pointer to the packet data to be read from the the device.
+ * @pkt: Pointer to the packet data to be read from the device.
* @length: Length of the data to be read from the device.
*
* Return: 0 on success, -1 on failure.
diff --git a/drivers/net/wireless/silabs/wfx/fwio.c b/drivers/net/wireless/silabs/wfx/fwio.c
index 3d1b8a135dc0..52c7f560b062 100644
--- a/drivers/net/wireless/silabs/wfx/fwio.c
+++ b/drivers/net/wireless/silabs/wfx/fwio.c
@@ -286,8 +286,7 @@ static int load_firmware_secure(struct wfx_dev *wdev)
error:
kfree(buf);
- if (fw)
- release_firmware(fw);
+ release_firmware(fw);
if (ret)
print_boot_status(wdev);
return ret;
diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c
index d35dd940d968..9402503fbde3 100644
--- a/drivers/net/wireless/silabs/wfx/hif_tx.c
+++ b/drivers/net/wireless/silabs/wfx/hif_tx.c
@@ -282,6 +282,8 @@ int wfx_hif_stop_scan(struct wfx_vif *wvif)
int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct ieee80211_channel *channel, const u8 *ssid, int ssid_len)
{
+ struct ieee80211_vif *vif = container_of(conf, struct ieee80211_vif,
+ bss_conf);
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
@@ -289,10 +291,10 @@ int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
WARN_ON(!conf->beacon_int);
WARN_ON(!conf->basic_rates);
WARN_ON(sizeof(body->ssid) < ssid_len);
- WARN(!conf->ibss_joined && !ssid_len, "joining an unknown BSS");
+ WARN(!vif->cfg.ibss_joined && !ssid_len, "joining an unknown BSS");
if (!hif)
return -ENOMEM;
- body->infrastructure_bss_mode = !conf->ibss_joined;
+ body->infrastructure_bss_mode = !vif->cfg.ibss_joined;
body->short_preamble = conf->use_short_preamble;
body->probe_for_join = !(channel->flags & IEEE80211_CHAN_NO_IR);
body->channel_number = channel->hw_value;
@@ -417,6 +419,8 @@ int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
const struct ieee80211_channel *channel)
{
+ struct ieee80211_vif *vif = container_of(conf, struct ieee80211_vif,
+ bss_conf);
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
@@ -429,8 +433,8 @@ int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
body->channel_number = channel->hw_value;
body->beacon_interval = cpu_to_le32(conf->beacon_int);
body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
- body->ssid_length = conf->ssid_len;
- memcpy(body->ssid, conf->ssid, conf->ssid_len);
+ body->ssid_length = vif->cfg.ssid_len;
+ memcpy(body->ssid, vif->cfg.ssid, vif->cfg.ssid_len);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 329d7f4a2b2e..626dfb4b7a55 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -156,7 +156,7 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
struct ieee80211_vif *vif = wvif_to_vif(wvif);
- WARN(!vif->bss_conf.assoc && enable_ps,
+ WARN(!vif->cfg.assoc && enable_ps,
"enable_ps is reliable only if associated");
if (wdev_to_wvif(wvif->wdev, 0)) {
struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0);
@@ -175,7 +175,7 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
/* It is useless to enable PS if channels are the same. */
if (enable_ps)
*enable_ps = false;
- if (vif->bss_conf.assoc && vif->bss_conf.ps)
+ if (vif->cfg.assoc && vif->cfg.ps)
dev_info(wvif->wdev->dev, "ignoring requested PS mode");
return -1;
}
@@ -188,8 +188,8 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
return 30;
}
if (enable_ps)
- *enable_ps = vif->bss_conf.ps;
- if (vif->bss_conf.assoc && vif->bss_conf.ps)
+ *enable_ps = vif->cfg.ps;
+ if (vif->cfg.assoc && vif->cfg.ps)
return conf->dynamic_ps_timeout;
else
return -1;
@@ -201,7 +201,7 @@ int wfx_update_pm(struct wfx_vif *wvif)
int ps_timeout;
bool ps;
- if (!vif->bss_conf.assoc)
+ if (!vif->cfg.assoc)
return 0;
ps_timeout = wfx_get_ps_timeout(wvif, &ps);
if (!ps)
@@ -216,7 +216,8 @@ int wfx_update_pm(struct wfx_vif *wvif)
}
int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
@@ -339,7 +340,7 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif)
struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct sk_buff *skb;
- skb = ieee80211_beacon_get(wvif->wdev->hw, vif);
+ skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0);
if (!skb)
return -ENOMEM;
wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN, API_RATE_INDEX_B_1MBPS);
@@ -356,7 +357,7 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif)
static void wfx_set_mfp_ap(struct wfx_vif *wvif)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
- struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif);
+ struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0);
const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
@@ -378,7 +379,8 @@ static void wfx_set_mfp_ap(struct wfx_vif *wvif)
}
}
-int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct wfx_dev *wdev = wvif->wdev;
@@ -396,7 +398,8 @@ int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return ret;
}
-void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
@@ -417,7 +420,7 @@ static void wfx_join(struct wfx_vif *wvif)
bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel, conf->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
- if (!bss && !conf->ibss_joined) {
+ if (!bss && !vif->cfg.ibss_joined) {
wfx_tx_unlock(wvif->wdev);
return;
}
@@ -458,7 +461,7 @@ static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *i
bool greenfield = false;
rcu_read_lock(); /* protect sta */
- if (info->bssid && !info->ibss_joined)
+ if (info->bssid && !vif->cfg.ibss_joined)
sta = ieee80211_find_sta(vif, info->bssid);
if (sta && sta->deflink.ht_cap.ht_supported)
ampdu_density = sta->deflink.ht_cap.ampdu_density;
@@ -471,7 +474,7 @@ static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *i
wfx_hif_set_association_mode(wvif, ampdu_density, greenfield, info->use_short_preamble);
wfx_hif_keep_alive_period(wvif, 0);
/* beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use the same value. */
- wfx_hif_set_bss_params(wvif, info->aid, 7);
+ wfx_hif_set_bss_params(wvif, vif->cfg.aid, 7);
wfx_hif_set_beacon_wakeup_period(wvif, 1, 1);
wfx_update_pm(wvif);
}
@@ -506,7 +509,7 @@ static void wfx_enable_beacon(struct wfx_vif *wvif, bool enable)
}
void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+ struct ieee80211_bss_conf *info, u64 changed)
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
@@ -522,9 +525,9 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changed & BSS_CHANGED_ASSOC) {
- if (info->assoc || info->ibss_joined)
+ if (vif->cfg.assoc || vif->cfg.ibss_joined)
wfx_join_finalize(wvif, info);
- else if (!info->assoc && vif->type == NL80211_IFTYPE_STATION)
+ else if (!vif->cfg.assoc && vif->type == NL80211_IFTYPE_STATION)
wfx_reset(wvif);
else
dev_warn(wdev->dev, "misunderstood change: ASSOC\n");
@@ -540,11 +543,11 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_ARP_FILTER) {
for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
- __be32 *arp_addr = &info->arp_addr_list[i];
+ __be32 *arp_addr = &vif->cfg.arp_addr_list[i];
- if (info->arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
+ if (vif->cfg.arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
arp_addr = NULL;
- if (i >= info->arp_addr_cnt)
+ if (i >= vif->cfg.arp_addr_cnt)
arp_addr = NULL;
wfx_hif_set_arp_ipv4_filter(wvif, i, arp_addr);
}
@@ -586,7 +589,7 @@ static int wfx_update_tim(struct wfx_vif *wvif)
u8 *tim_ptr;
skb = ieee80211_beacon_get_tim(wvif->wdev->hw, vif, &tim_offset,
- &tim_length);
+ &tim_length, 0);
if (!skb)
return -ENOENT;
tim_ptr = skb->data + tim_offset;
@@ -680,6 +683,7 @@ void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *
}
int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
@@ -692,6 +696,7 @@ int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h
index c69b2227e9ac..888db5cd3206 100644
--- a/drivers/net/wireless/silabs/wfx/sta.h
+++ b/drivers/net/wireless/silabs/wfx/sta.h
@@ -29,14 +29,17 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed);
+ struct ieee80211_bss_conf *info, u64 changed);
int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta);
int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta);
void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -48,8 +51,10 @@ int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf
void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf);
void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed);
int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf);
void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf);
/* Hardware API Callbacks */
diff --git a/drivers/net/wireless/st/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c
index 10e019cddcc6..3b4ded2ac801 100644
--- a/drivers/net/wireless/st/cw1200/bh.c
+++ b/drivers/net/wireless/st/cw1200/bh.c
@@ -327,18 +327,12 @@ static int cw1200_bh_rx_helper(struct cw1200_common *priv,
if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
goto err;
- if (skb_rx) {
- dev_kfree_skb(skb_rx);
- skb_rx = NULL;
- }
+ dev_kfree_skb(skb_rx);
return 0;
err:
- if (skb_rx) {
- dev_kfree_skb(skb_rx);
- skb_rx = NULL;
- }
+ dev_kfree_skb(skb_rx);
return -1;
}
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 321df124d449..26d3614519b1 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -606,7 +606,8 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
}
int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct cw1200_common *priv = dev->priv;
int ret = 0;
@@ -1208,8 +1209,8 @@ static void cw1200_do_join(struct cw1200_common *priv)
struct cfg80211_bss *bss = NULL;
struct wsm_protected_mgmt_policy mgmt_policy;
struct wsm_join join = {
- .mode = conf->ibss_joined ?
- WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS,
+ .mode = priv->vif->cfg.ibss_joined ?
+ WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS,
.preamble_type = WSM_JOIN_PREAMBLE_LONG,
.probe_for_join = 1,
.atim_window = 0,
@@ -1230,7 +1231,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel, bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
- if (!bss && !conf->ibss_joined) {
+ if (!bss && !priv->vif->cfg.ibss_joined) {
wsm_unlock_tx(priv);
return;
}
@@ -1284,7 +1285,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
join.bssid,
join.dtim_period, priv->beacon_int);
- if (!conf->ibss_joined) {
+ if (!priv->vif->cfg.ibss_joined) {
const u8 *ssidie;
rcu_read_lock();
ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
@@ -1302,7 +1303,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
}
/* Enable asynchronous join calls */
- if (!conf->ibss_joined) {
+ if (!priv->vif->cfg.ibss_joined) {
join.flags |= WSM_JOIN_FLAGS_FORCE;
join.flags |= WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND;
}
@@ -1671,7 +1672,7 @@ static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set)
pr_debug("[AP] mcast: %s.\n", aid0_bit_set ? "ena" : "dis");
skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
- &tim_offset, &tim_length);
+ &tim_offset, &tim_length, 0);
if (!skb) {
if (!__cw1200_flush(priv, true))
wsm_unlock_tx(priv);
@@ -1796,14 +1797,14 @@ static int cw1200_set_btcoexinfo(struct cw1200_common *priv)
void cw1200_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct cw1200_common *priv = dev->priv;
bool do_join = false;
mutex_lock(&priv->conf_mutex);
- pr_debug("BSS CHANGED: %08x\n", changed);
+ pr_debug("BSS CHANGED: %llx\n", changed);
/* TODO: BSS_CHANGED_QOS */
/* TODO: BSS_CHANGED_TXPOWER */
@@ -1813,15 +1814,15 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
int i;
pr_debug("[STA] BSS_CHANGED_ARP_FILTER cnt: %d\n",
- info->arp_addr_cnt);
+ vif->cfg.arp_addr_cnt);
/* Currently only one IP address is supported by firmware.
* In case of more IPs arp filtering will be disabled.
*/
- if (info->arp_addr_cnt > 0 &&
- info->arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) {
- for (i = 0; i < info->arp_addr_cnt; i++) {
- filter.ipv4addrs[i] = info->arp_addr_list[i];
+ if (vif->cfg.arp_addr_cnt > 0 &&
+ vif->cfg.arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) {
+ for (i = 0; i < vif->cfg.arp_addr_cnt; i++) {
+ filter.ipv4addrs[i] = vif->cfg.arp_addr_list[i];
pr_debug("[STA] addr[%d]: 0x%X\n",
i, filter.ipv4addrs[i]);
}
@@ -1857,7 +1858,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
if (changed & BSS_CHANGED_BEACON_INT) {
pr_debug("CHANGED_BEACON_INT\n");
- if (info->ibss_joined)
+ if (vif->cfg.ibss_joined)
do_join = true;
else if (priv->join_status == CW1200_JOIN_STATUS_AP)
cw1200_update_beaconing(priv);
@@ -1882,7 +1883,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_HT)) {
pr_debug("BSS_CHANGED_ASSOC\n");
- if (info->assoc) {
+ if (vif->cfg.assoc) {
if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) {
ieee80211_connection_loss(vif);
mutex_unlock(&priv->conf_mutex);
@@ -1894,7 +1895,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
do_join = true;
}
- if (info->assoc || info->ibss_joined) {
+ if (vif->cfg.assoc || vif->cfg.ibss_joined) {
struct ieee80211_sta *sta = NULL;
__le32 htprot = 0;
@@ -1904,7 +1905,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
rcu_read_lock();
- if (info->bssid && !info->ibss_joined)
+ if (info->bssid && !vif->cfg.ibss_joined)
sta = ieee80211_find_sta(vif, info->bssid);
if (sta) {
priv->ht_info.ht_cap = sta->deflink.ht_cap;
@@ -1958,7 +1959,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
cancel_work_sync(&priv->unjoin_work);
priv->bss_params.beacon_lost_count = priv->cqm_beacon_loss_count;
- priv->bss_params.aid = info->aid;
+ priv->bss_params.aid = vif->cfg.aid;
if (priv->join_dtim_period < 1)
priv->join_dtim_period = 1;
@@ -1973,7 +1974,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
priv->association_mode.basic_rate_set);
wsm_set_association_mode(priv, &priv->association_mode);
- if (!info->ibss_joined) {
+ if (!vif->cfg.ibss_joined) {
wsm_keep_alive_period(priv, 30 /* sec */);
wsm_set_bss_params(priv, &priv->bss_params);
priv->setbssparams_done = true;
@@ -2203,7 +2204,7 @@ static int cw1200_upload_beacon(struct cw1200_common *priv)
frame.rate = WSM_TRANSMIT_RATE_6;
frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
- &tim_offset, &tim_len);
+ &tim_offset, &tim_len, 0);
if (!frame.skb)
return -ENOMEM;
@@ -2330,8 +2331,8 @@ static int cw1200_start_ap(struct cw1200_common *priv)
memset(start.ssid, 0, sizeof(start.ssid));
if (!conf->hidden_ssid) {
- start.ssid_len = conf->ssid_len;
- memcpy(start.ssid, conf->ssid, start.ssid_len);
+ start.ssid_len = priv->vif->cfg.ssid_len;
+ memcpy(start.ssid, priv->vif->cfg.ssid, start.ssid_len);
}
priv->beacon_int = conf->beacon_int;
diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h
index 706dab8e73bf..a49f187c7049 100644
--- a/drivers/net/wireless/st/cw1200/sta.h
+++ b/drivers/net/wireless/st/cw1200/sta.h
@@ -28,7 +28,8 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
unsigned int *total_flags,
u64 multicast);
int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
int cw1200_get_stats(struct ieee80211_hw *dev,
struct ieee80211_low_level_stats *stats);
int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
@@ -103,7 +104,7 @@ void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
void cw1200_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed);
+ u64 changed);
int cw1200_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params);
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index 7de666b90ff5..fde21fca6c5e 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -1183,8 +1183,8 @@ void cw1200_rx_cb(struct cw1200_common *priv,
/* Disable beacon filter once we're associated... */
if (priv->disable_beacon_filter &&
- (priv->vif->bss_conf.assoc ||
- priv->vif->bss_conf.ibss_joined)) {
+ (priv->vif->cfg.assoc ||
+ priv->vif->cfg.ibss_joined)) {
priv->disable_beacon_filter = false;
queue_work(priv->workqueue,
&priv->update_filtering_work);
diff --git a/drivers/net/wireless/ti/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index 1da6ba95d3d4..1da6ab664e41 100644
--- a/drivers/net/wireless/ti/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
@@ -1229,7 +1229,7 @@ struct wl1251_acx_arp_filter {
u8 address[16]; /* The IP address used to filter ARP packets.
ARP packets that do not match this address are
dropped. When the IP Version is 4, the last 12
- bytes of the the address are ignored. */
+ bytes of the address are ignored. */
} __attribute__((packed));
struct wl1251_acx_ac_cfg {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index a25a6143e65f..9144ef5538a8 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1077,7 +1077,7 @@ out:
static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct wl1251 *wl = hw->priv;
struct sk_buff *beacon, *skb;
@@ -1123,7 +1123,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
wl->beacon_int = bss_conf->beacon_int;
skb = ieee80211_pspoll_get(wl->hw, wl->vif);
@@ -1137,7 +1137,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- ret = wl1251_acx_aid(wl, bss_conf->aid);
+ ret = wl1251_acx_aid(wl, vif->cfg.aid);
if (ret < 0)
goto out_sleep;
} else {
@@ -1176,17 +1176,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ARP_FILTER) {
- __be32 addr = bss_conf->arp_addr_list[0];
+ __be32 addr = vif->cfg.arp_addr_list[0];
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
- enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
+ enable = vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc;
ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
if (changed & BSS_CHANGED_BEACON) {
- beacon = ieee80211_beacon_get(hw, vif);
+ beacon = ieee80211_beacon_get(hw, vif, 0);
if (!beacon)
goto out_sleep;
@@ -1282,7 +1282,8 @@ static struct ieee80211_channel wl1251_channels[] = {
};
static int wl1251_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
enum wl1251_acx_ps_scheme ps_scheme;
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index c6da0cfb4afb..d06a2c419447 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1924,13 +1924,10 @@ static int wl12xx_remove(struct platform_device *pdev)
struct wl1271 *wl = platform_get_drvdata(pdev);
struct wl12xx_priv *priv;
- if (!wl)
- goto out;
priv = wl->priv;
kfree(priv->rx_mem_addr);
-out:
return wlcore_remove(pdev);
}
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index df6029ef6304..138edd28b0de 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -675,8 +675,8 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
} else {
cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
- cmd->ap.ssid_len = bss_conf->ssid_len;
- memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
+ cmd->ap.ssid_len = vif->cfg.ssid_len;
+ memcpy(cmd->ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
}
supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 6959efa4bfa9..3e3922d4c788 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2904,10 +2904,12 @@ static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_bss_conf *bss_conf,
u32 sta_rate_set)
{
+ struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
+ bss_conf);
int ieoffset;
int ret;
- wlvif->aid = bss_conf->aid;
+ wlvif->aid = vif->cfg.aid;
wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
wlvif->beacon_int = bss_conf->beacon_int;
wlvif->wmm_enabled = bss_conf->qos;
@@ -3935,7 +3937,6 @@ static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
u32 rates)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
int ssid_ie_offset, ie_offset, templ_len;
const u8 *ptr;
@@ -3948,7 +3949,7 @@ static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
probe_rsp_len, 0,
rates);
- if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
+ if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
wl1271_error("probe_rsp template too big");
return -EINVAL;
}
@@ -3970,12 +3971,12 @@ static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
/* insert SSID from bss_conf */
probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
- probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
+ probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len;
memcpy(probe_rsp_templ + ssid_ie_offset + 2,
- bss_conf->ssid, bss_conf->ssid_len);
- templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
+ vif->cfg.ssid, vif->cfg.ssid_len);
+ templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len;
- memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
+ memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len,
ptr, probe_rsp_len - (ptr - probe_rsp_data));
templ_len += probe_rsp_len - (ptr - probe_rsp_data);
@@ -4038,7 +4039,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
u32 min_rate;
int ret;
int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
- struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+ struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0);
u16 tmpl_id;
if (!beacon) {
@@ -4255,15 +4256,15 @@ out:
}
static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct ieee80211_bss_conf *bss_conf,
- u32 sta_rate_set)
+ struct ieee80211_vif *vif, u32 sta_rate_set)
{
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u32 rates;
int ret;
wl1271_debug(DEBUG_MAC80211,
"changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
- bss_conf->bssid, bss_conf->aid,
+ bss_conf->bssid, vif->cfg.aid,
bss_conf->beacon_int,
bss_conf->basic_rates, sta_rate_set);
@@ -4351,7 +4352,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
if (changed & BSS_CHANGED_IBSS) {
- if (bss_conf->ibss_joined) {
+ if (vif->cfg.ibss_joined) {
set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
ibss_joined = true;
} else {
@@ -4375,7 +4376,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
if (changed & BSS_CHANGED_IDLE && !is_ibss)
- wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+ wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle);
if (changed & BSS_CHANGED_CQM) {
bool enable = false;
@@ -4411,7 +4412,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (changed & BSS_CHANGED_BSSID) {
if (!is_zero_ether_addr(bss_conf->bssid)) {
- ret = wlcore_set_bssid(wl, wlvif, bss_conf,
+ ret = wlcore_set_bssid(wl, wlvif, vif,
sta_rate_set);
if (ret < 0)
goto out;
@@ -4427,9 +4428,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (changed & BSS_CHANGED_IBSS) {
wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
- bss_conf->ibss_joined);
+ vif->cfg.ibss_joined);
- if (bss_conf->ibss_joined) {
+ if (vif->cfg.ibss_joined) {
u32 rates = bss_conf->basic_rates;
wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
@@ -4466,7 +4467,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
if (changed & BSS_CHANGED_ASSOC) {
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
ret = wlcore_set_assoc(wl, wlvif, bss_conf,
sta_rate_set);
if (ret < 0)
@@ -4480,7 +4481,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
if (changed & BSS_CHANGED_PS) {
- if ((bss_conf->ps) &&
+ if (vif->cfg.ps &&
test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
int ps_mode;
@@ -4500,7 +4501,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (ret < 0)
wl1271_warning("enter %s ps failed %d",
ps_mode_str, ret);
- } else if (!bss_conf->ps &&
+ } else if (!vif->cfg.ps &&
test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "auto ps disabled");
@@ -4541,11 +4542,11 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
/* Handle arp filtering. Done after join. */
if ((changed & BSS_CHANGED_ARP_FILTER) ||
(!is_ibss && (changed & BSS_CHANGED_QOS))) {
- __be32 addr = bss_conf->arp_addr_list[0];
+ __be32 addr = vif->cfg.arp_addr_list[0];
wlvif->sta.qos = bss_conf->qos;
WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
- if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
+ if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) {
wlvif->ip_addr = addr;
/*
* The template should have been configured only upon
@@ -4579,7 +4580,7 @@ out:
static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
@@ -4675,7 +4676,7 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
rcu_read_lock();
- if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) {
rcu_read_unlock();
continue;
}
@@ -4700,6 +4701,7 @@ out:
static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct wl1271 *wl = hw->priv;
@@ -4750,6 +4752,7 @@ out:
static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct wl1271 *wl = hw->priv;
@@ -4861,7 +4864,8 @@ out:
}
static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
@@ -5490,7 +5494,7 @@ static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
{
int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
struct sk_buff *beacon =
- ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
+ ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0);
if (!beacon)
return NULL;
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 514f2c1124b6..ba14d83353a4 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -654,7 +654,7 @@ static int __init virt_wifi_init_module(void)
{
int err;
- /* Guaranteed to be locallly-administered and not multicast. */
+ /* Guaranteed to be locally-administered and not multicast. */
eth_random_addr(fake_router_bssid);
err = register_netdevice_notifier(&virt_wifi_notifier);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 3ef8533205f9..80b905d49954 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -398,7 +398,7 @@ int zd_restore_settings(struct zd_mac *mac)
mac->type == NL80211_IFTYPE_ADHOC ||
mac->type == NL80211_IFTYPE_AP) {
if (mac->vif != NULL) {
- beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0);
if (beacon)
zd_mac_config_beacon(mac->hw, beacon, false);
}
@@ -1167,7 +1167,7 @@ static void zd_beacon_done(struct zd_mac *mac)
/*
* Fetch next beacon so that tim_count is updated.
*/
- beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0);
if (beacon)
zd_mac_config_beacon(mac->hw, beacon, true);
@@ -1278,19 +1278,20 @@ static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct zd_mac *mac = zd_hw_mac(hw);
int associated;
- dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
+ dev_dbg_f(zd_mac_dev(mac), "changes: %llx\n", changes);
if (mac->type == NL80211_IFTYPE_MESH_POINT ||
mac->type == NL80211_IFTYPE_ADHOC ||
mac->type == NL80211_IFTYPE_AP) {
associated = true;
if (changes & BSS_CHANGED_BEACON) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif,
+ 0);
if (beacon) {
zd_chip_disable_hwint(&mac->chip);
@@ -1447,7 +1448,7 @@ static void beacon_watchdog_handler(struct work_struct *work)
zd_chip_disable_hwint(&mac->chip);
- beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0);
if (beacon) {
zd_mac_free_cur_beacon(mac);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index d9dea4829c86..8174d7b2966c 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -48,7 +48,6 @@
#include <linux/debugfs.h>
typedef unsigned int pending_ring_idx_t;
-#define INVALID_PENDING_RING_IDX (~0U)
struct pending_tx_info {
struct xen_netif_tx_request req; /* tx request */
@@ -82,8 +81,6 @@ struct xenvif_rx_meta {
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
-#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
-
#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
/* The maximum number of frags is derived from the size of a grant (same
@@ -367,11 +364,6 @@ void xenvif_free(struct xenvif *vif);
int xenvif_xenbus_init(void);
void xenvif_xenbus_fini(void);
-int xenvif_schedulable(struct xenvif *vif);
-
-int xenvif_queue_stopped(struct xenvif_queue *queue);
-void xenvif_wake_queue(struct xenvif_queue *queue);
-
/* (Un)Map communication rings. */
void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
@@ -394,7 +386,6 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
-void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
@@ -403,9 +394,6 @@ void xenvif_carrier_on(struct xenvif *vif);
void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
bool zerocopy_success);
-/* Unmap a pending page and release it back to the guest */
-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
-
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
return MAX_PENDING_REQS -
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 8e035374a370..fb32ae82d9b0 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -69,7 +69,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
wake_up(&queue->dealloc_wq);
}
-int xenvif_schedulable(struct xenvif *vif)
+static int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) &&
test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
@@ -177,20 +177,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int xenvif_queue_stopped(struct xenvif_queue *queue)
-{
- struct net_device *dev = queue->vif->dev;
- unsigned int id = queue->id;
- return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
-}
-
-void xenvif_wake_queue(struct xenvif_queue *queue)
-{
- struct net_device *dev = queue->vif->dev;
- unsigned int id = queue->id;
- netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
-}
-
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d93814c14a23..a256695fc89e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -112,6 +112,8 @@ static void make_tx_response(struct xenvif_queue *queue,
s8 st);
static void push_tx_responses(struct xenvif_queue *queue);
+static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
+
static inline int tx_work_todo(struct xenvif_queue *queue);
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
@@ -1199,9 +1201,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
}
mss = skb_shinfo(skb)->gso_size;
- hdrlen = skb_transport_header(skb) -
- skb_mac_header(skb) +
- tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
skb_shinfo(skb)->gso_segs =
DIV_ROUND_UP(skb->len - hdrlen, mss);
@@ -1418,7 +1418,7 @@ static void push_tx_responses(struct xenvif_queue *queue)
notify_remote_via_irq(queue->tx_irq);
}
-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
+static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{
int ret;
struct gnttab_unmap_grant_ref tx_unmap_op;
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index dbac4c03d21a..932762177110 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -486,7 +486,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue)
#define RX_BATCH_SIZE 64
-void xenvif_rx_action(struct xenvif_queue *queue)
+static void xenvif_rx_action(struct xenvif_queue *queue)
{
struct sk_buff_head completed_skbs;
unsigned int work_done = 0;
@@ -495,6 +495,7 @@ void xenvif_rx_action(struct xenvif_queue *queue)
queue->rx_copy.completed = &completed_skbs;
while (xenvif_rx_ring_slots_available(queue) &&
+ !skb_queue_empty(&queue->rx_queue) &&
work_done < RX_BATCH_SIZE) {
xenvif_rx_skb(queue);
work_done++;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2409007f1fd9..27a11cc08c61 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1043,16 +1043,6 @@ static int xennet_get_responses(struct netfront_queue *queue,
}
for (;;) {
- if (unlikely(rx->status < 0 ||
- rx->offset + rx->status > XEN_PAGE_SIZE)) {
- if (net_ratelimit())
- dev_warn(dev, "rx->offset: %u, size: %d\n",
- rx->offset, rx->status);
- xennet_move_rx_slot(queue, skb, ref);
- err = -EINVAL;
- goto next;
- }
-
/*
* This definitely indicates a bug, either in this driver or in
* the backend driver. In future this should flag the bad
@@ -1066,6 +1056,16 @@ static int xennet_get_responses(struct netfront_queue *queue,
goto next;
}
+ if (unlikely(rx->status < 0 ||
+ rx->offset + rx->status > XEN_PAGE_SIZE)) {
+ if (net_ratelimit())
+ dev_warn(dev, "rx->offset: %u, size: %d\n",
+ rx->offset, rx->status);
+ xennet_move_rx_slot(queue, skb, ref);
+ err = -EINVAL;
+ goto next;
+ }
+
if (!gnttab_end_foreign_access_ref(ref)) {
dev_alert(dev,
"Grant still in use by backend domain\n");
@@ -2464,10 +2464,6 @@ static int xennet_connect(struct net_device *dev)
if (queue->tx_irq != queue->rx_irq)
notify_remote_via_irq(queue->rx_irq);
- spin_lock_irq(&queue->tx_lock);
- xennet_tx_buf_gc(queue);
- spin_unlock_irq(&queue->tx_lock);
-
spin_lock_bh(&queue->rx_lock);
xennet_alloc_rx_buffers(queue);
spin_unlock_bh(&queue->rx_lock);
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
index 518e2afb43a8..7c93d484dc1b 100644
--- a/drivers/nfc/nxp-nci/core.c
+++ b/drivers/nfc/nxp-nci/core.c
@@ -27,6 +27,9 @@
NFC_PROTO_ISO14443_B_MASK | \
NFC_PROTO_NFC_DEP_MASK)
+#define NXP_NCI_RF_PLL_UNLOCKED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x21)
+#define NXP_NCI_RF_TXLDO_ERROR_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x23)
+
static int nxp_nci_open(struct nci_dev *ndev)
{
struct nxp_nci_info *info = nci_get_drvdata(ndev);
@@ -83,11 +86,42 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
return r;
}
+static int nxp_nci_rf_pll_unlocked_ntf(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ nfc_err(&ndev->nfc_dev->dev,
+ "PLL didn't lock. Missing or unstable clock?\n");
+
+ return 0;
+}
+
+static int nxp_nci_rf_txldo_error_ntf(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ nfc_err(&ndev->nfc_dev->dev,
+ "RF transmitter couldn't start. Bad power and/or configuration?\n");
+
+ return 0;
+}
+
+static const struct nci_driver_ops nxp_nci_core_ops[] = {
+ {
+ .opcode = NXP_NCI_RF_PLL_UNLOCKED_NTF,
+ .ntf = nxp_nci_rf_pll_unlocked_ntf,
+ },
+ {
+ .opcode = NXP_NCI_RF_TXLDO_ERROR_NTF,
+ .ntf = nxp_nci_rf_txldo_error_ntf,
+ },
+};
+
static const struct nci_ops nxp_nci_ops = {
.open = nxp_nci_open,
.close = nxp_nci_close,
.send = nxp_nci_send,
.fw_download = nxp_nci_fw_download,
+ .core_ops = nxp_nci_core_ops,
+ .n_core_ops = ARRAY_SIZE(nxp_nci_core_ops),
};
int nxp_nci_probe(void *phy_id, struct device *pdev,
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
index b019755e4e21..3ece49cb18ff 100644
--- a/drivers/ntb/hw/epf/ntb_hw_epf.c
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -45,7 +45,6 @@
#define NTB_EPF_MIN_DB_COUNT 3
#define NTB_EPF_MAX_DB_COUNT 31
-#define NTB_EPF_MW_OFFSET 2
#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */
@@ -67,6 +66,7 @@ struct ntb_epf_dev {
enum pci_barno ctrl_reg_bar;
enum pci_barno peer_spad_reg_bar;
enum pci_barno db_reg_bar;
+ enum pci_barno mw_bar;
unsigned int mw_count;
unsigned int spad_count;
@@ -92,6 +92,8 @@ struct ntb_epf_data {
enum pci_barno peer_spad_reg_bar;
/* BAR that contains Doorbell region and Memory window '1' */
enum pci_barno db_reg_bar;
+ /* BAR that contains memory windows*/
+ enum pci_barno mw_bar;
};
static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
@@ -411,7 +413,7 @@ static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return -EINVAL;
}
- bar = idx + NTB_EPF_MW_OFFSET;
+ bar = idx + ndev->mw_bar;
mw_size = pci_resource_len(ntb->pdev, bar);
@@ -453,7 +455,7 @@ static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
if (idx == 0)
offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
- bar = idx + NTB_EPF_MW_OFFSET;
+ bar = idx + ndev->mw_bar;
if (base)
*base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
@@ -565,6 +567,7 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
struct pci_dev *pdev)
{
struct device *dev = ndev->dev;
+ size_t spad_sz, spad_off;
int ret;
pci_set_drvdata(pdev, ndev);
@@ -599,10 +602,16 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
goto err_dma_mask;
}
- ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
- if (!ndev->peer_spad_reg) {
- ret = -EIO;
- goto err_dma_mask;
+ if (ndev->peer_spad_reg_bar) {
+ ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
+ if (!ndev->peer_spad_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+ } else {
+ spad_sz = 4 * readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
+ spad_off = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
+ ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz;
}
ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
@@ -657,6 +666,7 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
enum pci_barno peer_spad_reg_bar = BAR_1;
enum pci_barno ctrl_reg_bar = BAR_0;
enum pci_barno db_reg_bar = BAR_2;
+ enum pci_barno mw_bar = BAR_2;
struct device *dev = &pdev->dev;
struct ntb_epf_data *data;
struct ntb_epf_dev *ndev;
@@ -671,17 +681,16 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
data = (struct ntb_epf_data *)id->driver_data;
if (data) {
- if (data->peer_spad_reg_bar)
- peer_spad_reg_bar = data->peer_spad_reg_bar;
- if (data->ctrl_reg_bar)
- ctrl_reg_bar = data->ctrl_reg_bar;
- if (data->db_reg_bar)
- db_reg_bar = data->db_reg_bar;
+ peer_spad_reg_bar = data->peer_spad_reg_bar;
+ ctrl_reg_bar = data->ctrl_reg_bar;
+ db_reg_bar = data->db_reg_bar;
+ mw_bar = data->mw_bar;
}
ndev->peer_spad_reg_bar = peer_spad_reg_bar;
ndev->ctrl_reg_bar = ctrl_reg_bar;
ndev->db_reg_bar = db_reg_bar;
+ ndev->mw_bar = mw_bar;
ndev->dev = dev;
ntb_epf_init_struct(ndev, pdev);
@@ -729,6 +738,14 @@ static const struct ntb_epf_data j721e_data = {
.ctrl_reg_bar = BAR_0,
.peer_spad_reg_bar = BAR_1,
.db_reg_bar = BAR_2,
+ .mw_bar = BAR_2,
+};
+
+static const struct ntb_epf_data mx8_data = {
+ .ctrl_reg_bar = BAR_0,
+ .peer_spad_reg_bar = BAR_0,
+ .db_reg_bar = BAR_2,
+ .mw_bar = BAR_4,
};
static const struct pci_device_id ntb_epf_pci_tbl[] = {
@@ -737,6 +754,11 @@ static const struct pci_device_id ntb_epf_pci_tbl[] = {
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809),
+ .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
+ .driver_data = (kernel_ulong_t)&mx8_data,
+ },
{ },
};
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 733557231ed0..0ed6f809ff2e 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2406,7 +2406,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
- "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+ "\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ",
idt_get_mw_name(data), ndev->mws[idx].bar);
@@ -2435,7 +2435,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
- "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+ "\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off,
"%s BAR%hhu, ", idt_get_mw_name(data),
@@ -2480,7 +2480,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
int src;
data = idt_ntb_msg_read(&ndev->ntb, &src, idx);
off += scnprintf(strbuf + off, size - off,
- "\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n",
+ "\t%hhu. 0x%08x from peer %d (Port %hhu)\n",
idx, data, src, ndev->peers[src].port);
}
off += scnprintf(strbuf + off, size - off, "\n");
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index e5f14e20a9ff..84772013812b 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -763,7 +763,7 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_gen3(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
- else if (pdev_is_gen4(ndev->ntb.pdev))
+ else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev))
return ndev_ntb4_debugfs_read(filp, ubuf, count, offp);
return -ENXIO;
@@ -1874,7 +1874,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
rc = gen3_init_dev(ndev);
if (rc)
goto err_init_dev;
- } else if (pdev_is_gen4(pdev)) {
+ } else if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) {
ndev->ntb.ops = &intel_ntb4_ops;
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
@@ -1904,7 +1904,8 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
err_register:
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
@@ -1920,7 +1921,8 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
@@ -2047,6 +2049,8 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
/* GEN4 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)},
+ /* GEN5 PCIe */
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_GNR)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
index 4081fc538ff4..22cac7975b3c 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -197,7 +197,7 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
if (pdev_is_ICX(pdev))
ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
- else if (pdev_is_SPR(pdev))
+ else if (pdev_is_SPR(pdev) || pdev_is_gen5(pdev))
ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
ntb_topo_string(ndev->ntb.topo));
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index b233d1c6ba2d..da4d5fe55bab 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -70,6 +70,7 @@
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
#define PCI_DEVICE_ID_INTEL_NTB_B2B_ICX 0x347e
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_GNR 0x0db4
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)
@@ -228,4 +229,10 @@ static inline int pdev_is_gen4(struct pci_dev *pdev)
return 0;
}
+
+static inline int pdev_is_gen5(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_GNR;
+}
+
#endif
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index b7bf3f863d79..5ee0afa621a9 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -367,14 +367,16 @@ static ssize_t tool_fn_write(struct tool_ctx *tc,
u64 bits;
int n;
+ if (*offp)
+ return 0;
+
buf = kmalloc(size + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = simple_write_to_buffer(buf, size, offp, ubuf, size);
- if (ret < 0) {
+ if (copy_from_user(buf, ubuf, size)) {
kfree(buf);
- return ret;
+ return -EFAULT;
}
buf[size] = 0;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 9613e54c7a67..0297b7882e33 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1422,7 +1422,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- unsigned int op, sector_t sector)
+ enum req_op op, sector_t sector)
{
int ret;
@@ -1483,7 +1483,7 @@ static void btt_submit_bio(struct bio *bio)
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
struct btt *btt = bdev->bd_disk->private_data;
int rc;
@@ -1548,14 +1548,14 @@ static int btt_blk_init(struct btt *btt)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(btt->btt_disk);
+ put_disk(btt->btt_disk);
return rc;
}
static void btt_blk_cleanup(struct btt *btt)
{
del_gendisk(btt->btt_disk);
- blk_cleanup_disk(btt->btt_disk);
+ put_disk(btt->btt_disk);
}
/**
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 629d10fcf53b..7e88cd242380 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -239,7 +239,7 @@ static void pmem_submit_bio(struct bio *bio)
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
struct pmem_device *pmem = bdev->bd_disk->private_data;
blk_status_t rc;
@@ -450,9 +450,24 @@ static void pmem_release_disk(void *__pmem)
put_dax(pmem->dax_dev);
del_gendisk(pmem->disk);
- blk_cleanup_disk(pmem->disk);
+ put_disk(pmem->disk);
}
+static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
+ unsigned long pfn, unsigned long nr_pages, int mf_flags)
+{
+ struct pmem_device *pmem =
+ container_of(pgmap, struct pmem_device, pgmap);
+ u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
+ u64 len = nr_pages << PAGE_SHIFT;
+
+ return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
+}
+
+static const struct dev_pagemap_ops fsdax_pagemap_ops = {
+ .memory_failure = pmem_pagemap_memory_failure,
+};
+
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
{
@@ -514,6 +529,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags = PFN_DEV;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -527,6 +543,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
@@ -596,7 +613,7 @@ out_cleanup_dax:
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
out:
- blk_cleanup_disk(pmem->disk);
+ put_disk(pmem->disk);
return rc;
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index d976260eca7a..473a71bbd9c9 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -133,7 +133,8 @@ static void nd_region_release(struct device *dev)
put_device(&nvdimm->dev);
}
free_percpu(nd_region->lane);
- memregion_free(nd_region->id);
+ if (!test_bit(ND_REGION_CXL, &nd_region->flags))
+ memregion_free(nd_region->id);
kfree(nd_region);
}
@@ -982,9 +983,14 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (!nd_region)
return NULL;
- nd_region->id = memregion_alloc(GFP_KERNEL);
- if (nd_region->id < 0)
- goto err_id;
+ /* CXL pre-assigns memregion ids before creating nvdimm regions */
+ if (test_bit(ND_REGION_CXL, &ndr_desc->flags)) {
+ nd_region->id = ndr_desc->memregion;
+ } else {
+ nd_region->id = memregion_alloc(GFP_KERNEL);
+ if (nd_region->id < 0)
+ goto err_id;
+ }
nd_region->lane = alloc_percpu(struct nd_percpu_lane);
if (!nd_region->lane)
@@ -1043,9 +1049,10 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return nd_region;
- err_percpu:
- memregion_free(nd_region->id);
- err_id:
+err_percpu:
+ if (!test_bit(ND_REGION_CXL, &ndr_desc->flags))
+ memregion_free(nd_region->id);
+err_id:
kfree(nd_region);
return NULL;
}
@@ -1068,6 +1075,13 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
+void nvdimm_region_delete(struct nd_region *nd_region)
+{
+ if (nd_region)
+ nd_device_unregister(&nd_region->dev, ND_SYNC);
+}
+EXPORT_SYMBOL_GPL(nvdimm_region_delete);
+
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{
int rc = 0;
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 995b6cdc67ed..20da455d2ef6 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -81,17 +81,24 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
ndr_desc.res = &res;
ndr_desc.numa_node = nid;
ndr_desc.flush = async_pmem_flush;
+ ndr_desc.provider_data = vdev;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
+ /*
+ * The NVDIMM region could be available before the
+ * virtio_device_ready() that is called by
+ * virtio_dev_probe(), so we set device ready here.
+ */
+ virtio_device_ready(vdev);
nd_region = nvdimm_pmem_region_create(vpmem->nvdimm_bus, &ndr_desc);
if (!nd_region) {
dev_err(&vdev->dev, "failed to create nvdimm region\n");
err = -ENXIO;
goto out_nd;
}
- nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent);
return 0;
out_nd:
+ virtio_reset_device(vdev);
nvdimm_bus_unregister(vpmem->nvdimm_bus);
out_vq:
vdev->config->del_vqs(vdev);
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index 87ae409a32b9..656e46d938da 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "NVME Support"
+source "drivers/nvme/common/Kconfig"
source "drivers/nvme/host/Kconfig"
source "drivers/nvme/target/Kconfig"
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index fb42c44609a8..eedca8c72098 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NVME_COMMON) += common/
obj-y += host/
obj-y += target/
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
new file mode 100644
index 000000000000..4514f44362dd
--- /dev/null
+++ b/drivers/nvme/common/Kconfig
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NVME_COMMON
+ tristate
diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile
new file mode 100644
index 000000000000..720c625b8a52
--- /dev/null
+++ b/drivers/nvme/common/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_NVME_COMMON) += nvme-common.o
+
+nvme-common-y += auth.o
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
new file mode 100644
index 000000000000..04bd28f17dcc
--- /dev/null
+++ b/drivers/nvme/common/auth.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include <linux/nvme.h>
+#include <linux/nvme-auth.h>
+
+static u32 nvme_dhchap_seqnum;
+static DEFINE_MUTEX(nvme_dhchap_mutex);
+
+u32 nvme_auth_get_seqnum(void)
+{
+ u32 seqnum;
+
+ mutex_lock(&nvme_dhchap_mutex);
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum = prandom_u32();
+ else {
+ nvme_dhchap_seqnum++;
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum++;
+ }
+ seqnum = nvme_dhchap_seqnum;
+ mutex_unlock(&nvme_dhchap_mutex);
+ return seqnum;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum);
+
+static struct nvme_auth_dhgroup_map {
+ const char name[16];
+ const char kpp[16];
+} dhgroup_map[] = {
+ [NVME_AUTH_DHGROUP_NULL] = {
+ .name = "null", .kpp = "null" },
+ [NVME_AUTH_DHGROUP_2048] = {
+ .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" },
+ [NVME_AUTH_DHGROUP_3072] = {
+ .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" },
+ [NVME_AUTH_DHGROUP_4096] = {
+ .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" },
+ [NVME_AUTH_DHGROUP_6144] = {
+ .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" },
+ [NVME_AUTH_DHGROUP_8192] = {
+ .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" },
+};
+
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].name;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
+
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].kpp;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
+
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name)
+{
+ int i;
+
+ if (!dhgroup_name || !strlen(dhgroup_name))
+ return NVME_AUTH_DHGROUP_INVALID;
+ for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+ if (!strlen(dhgroup_map[i].name))
+ continue;
+ if (!strncmp(dhgroup_map[i].name, dhgroup_name,
+ strlen(dhgroup_map[i].name)))
+ return i;
+ }
+ return NVME_AUTH_DHGROUP_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
+
+static struct nvme_dhchap_hash_map {
+ int len;
+ const char hmac[15];
+ const char digest[8];
+} hash_map[] = {
+ [NVME_AUTH_HASH_SHA256] = {
+ .len = 32,
+ .hmac = "hmac(sha256)",
+ .digest = "sha256",
+ },
+ [NVME_AUTH_HASH_SHA384] = {
+ .len = 48,
+ .hmac = "hmac(sha384)",
+ .digest = "sha384",
+ },
+ [NVME_AUTH_HASH_SHA512] = {
+ .len = 64,
+ .hmac = "hmac(sha512)",
+ .digest = "sha512",
+ },
+};
+
+const char *nvme_auth_hmac_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].hmac;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
+
+const char *nvme_auth_digest_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].digest;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
+
+u8 nvme_auth_hmac_id(const char *hmac_name)
+{
+ int i;
+
+ if (!hmac_name || !strlen(hmac_name))
+ return NVME_AUTH_HASH_INVALID;
+
+ for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+ if (!strlen(hash_map[i].hmac))
+ continue;
+ if (!strncmp(hash_map[i].hmac, hmac_name,
+ strlen(hash_map[i].hmac)))
+ return i;
+ }
+ return NVME_AUTH_HASH_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
+
+size_t nvme_auth_hmac_hash_len(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return 0;
+ return hash_map[hmac_id].len;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
+
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash)
+{
+ struct nvme_dhchap_key *key;
+ unsigned char *p;
+ u32 crc;
+ int ret, key_len;
+ size_t allocated_len = strlen(secret);
+
+ /* Secret might be affixed with a ':' */
+ p = strrchr(secret, ':');
+ if (p)
+ allocated_len = p - secret;
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key)
+ return ERR_PTR(-ENOMEM);
+ key->key = kzalloc(allocated_len, GFP_KERNEL);
+ if (!key->key) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_len = base64_decode(secret, allocated_len, key->key);
+ if (key_len < 0) {
+ pr_debug("base64 key decoding error %d\n",
+ key_len);
+ ret = key_len;
+ goto out_free_secret;
+ }
+
+ if (key_len != 36 && key_len != 52 &&
+ key_len != 68) {
+ pr_err("Invalid key len %d\n", key_len);
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ if (key_hash > 0 &&
+ (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
+ pr_err("Mismatched key len %d for %s\n", key_len,
+ nvme_auth_hmac_name(key_hash));
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ /* The last four bytes is the CRC in little-endian format */
+ key_len -= 4;
+ /*
+ * The linux implementation doesn't do pre- and post-increments,
+ * so we have to do it manually.
+ */
+ crc = ~crc32(~0, key->key, key_len);
+
+ if (get_unaligned_le32(key->key + key_len) != crc) {
+ pr_err("key crc mismatch (key %08x, crc %08x)\n",
+ get_unaligned_le32(key->key + key_len), crc);
+ ret = -EKEYREJECTED;
+ goto out_free_secret;
+ }
+ key->len = key_len;
+ key->hash = key_hash;
+ return key;
+out_free_secret:
+ kfree_sensitive(key->key);
+out_free_key:
+ kfree(key);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
+
+void nvme_auth_free_key(struct nvme_dhchap_key *key)
+{
+ if (!key)
+ return;
+ kfree_sensitive(key->key);
+ kfree(key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free_key);
+
+u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
+{
+ const char *hmac_name;
+ struct crypto_shash *key_tfm;
+ struct shash_desc *shash;
+ u8 *transformed_key;
+ int ret;
+
+ if (!key || !key->key) {
+ pr_warn("No key specified\n");
+ return ERR_PTR(-ENOKEY);
+ }
+ if (key->hash == 0) {
+ transformed_key = kmemdup(key->key, key->len, GFP_KERNEL);
+ return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
+ }
+ hmac_name = nvme_auth_hmac_name(key->hash);
+ if (!hmac_name) {
+ pr_warn("Invalid key hash id %d\n", key->hash);
+ return ERR_PTR(-EINVAL);
+ }
+
+ key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(key_tfm))
+ return (u8 *)key_tfm;
+
+ shash = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(key_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
+ if (!transformed_key) {
+ ret = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ shash->tfm = key_tfm;
+ ret = crypto_shash_setkey(key_tfm, key->key, key->len);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_init(shash);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, nqn, strlen(nqn));
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_final(shash, transformed_key);
+ if (ret < 0)
+ goto out_free_transformed_key;
+
+ kfree(shash);
+ crypto_free_shash(key_tfm);
+
+ return transformed_key;
+
+out_free_transformed_key:
+ kfree_sensitive(transformed_key);
+out_free_shash:
+ kfree(shash);
+out_free_key:
+ crypto_free_shash(key_tfm);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
+
+static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey)
+{
+ const char *digest_name;
+ struct crypto_shash *tfm;
+ int ret;
+
+ digest_name = nvme_auth_digest_name(hmac_id);
+ if (!digest_name) {
+ pr_debug("%s: failed to get digest for %d\n", __func__,
+ hmac_id);
+ return -EINVAL;
+ }
+ tfm = crypto_alloc_shash(digest_name, 0, 0);
+ if (IS_ERR(tfm))
+ return -ENOMEM;
+
+ ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey);
+ if (ret < 0)
+ pr_debug("%s: Failed to hash digest len %zu\n", __func__,
+ skey_len);
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ u8 *hashed_key;
+ const char *hmac_name;
+ int ret;
+
+ hashed_key = kmalloc(hlen, GFP_KERNEL);
+ if (!hashed_key)
+ return -ENOMEM;
+
+ ret = nvme_auth_hash_skey(hmac_id, skey,
+ skey_len, hashed_key);
+ if (ret < 0)
+ goto out_free_key;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ ret = -EINVAL;
+ goto out_free_key;
+ }
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out_free_key;
+ }
+
+ desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out_free_hash;
+ }
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, hashed_key, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_update(desc, challenge, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_final(desc, aug);
+out_free_desc:
+ kfree_sensitive(desc);
+out_free_hash:
+ crypto_free_shash(tfm);
+out_free_key:
+ kfree_sensitive(hashed_key);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge);
+
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid)
+{
+ int ret;
+
+ ret = crypto_kpp_set_secret(dh_tfm, NULL, 0);
+ if (ret)
+ pr_debug("failed to set private key, error %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey);
+
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ kpp_request_set_input(req, NULL, 0);
+ sg_init_one(&dst, host_key, host_key_len);
+ kpp_request_set_output(req, &dst, host_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey);
+
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist src, dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ sg_init_one(&src, ctrl_key, ctrl_key_len);
+ kpp_request_set_input(req, &src, ctrl_key_len);
+ sg_init_one(&dst, sess_key, sess_key_len);
+ kpp_request_set_output(req, &dst, sess_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
+
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret);
+
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
+{
+ struct nvme_dhchap_key *key;
+ u8 key_hash;
+
+ if (!secret) {
+ *ret_key = NULL;
+ return 0;
+ }
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
+ return -EINVAL;
+
+ /* Pass in the secret without the 'DHHC-1:XX:' prefix */
+ key = nvme_auth_extract_key(secret + 10, key_hash);
+ if (IS_ERR(key)) {
+ *ret_key = NULL;
+ return PTR_ERR(key);
+ }
+
+ *ret_key = key;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 877d2ec4ea9f..2f6a7f8c94e8 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -92,6 +92,21 @@ config NVME_TCP
If unsure, say N.
+config NVME_AUTH
+ bool "NVM Express over Fabrics In-Band Authentication"
+ depends on NVME_CORE
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This provides support for NVMe over Fabrics In-Band Authentication.
+
+ If unsure, say N.
+
config NVME_APPLE
tristate "Apple ANS2 NVM Express host driver"
depends on OF && BLOCK
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index a36ae1612059..e27202d22c7d 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -10,12 +10,14 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
-nvme-core-y := core.o ioctl.o constants.o
+nvme-core-y += core.o ioctl.o
+nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
+nvme-core-$(CONFIG_NVME_AUTH) += auth.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index d702d7d60235..5fc5ea196b40 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -845,11 +845,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
apple_nvme_handle_cq(&anv->adminq, true);
spin_unlock_irqrestore(&anv->lock, flags);
- blk_mq_tagset_busy_iter(&anv->tagset, nvme_cancel_request, &anv->ctrl);
- blk_mq_tagset_busy_iter(&anv->admin_tagset, nvme_cancel_request,
- &anv->ctrl);
- blk_mq_tagset_wait_completed_request(&anv->tagset);
- blk_mq_tagset_wait_completed_request(&anv->admin_tagset);
+ nvme_cancel_tagset(&anv->ctrl);
+ nvme_cancel_admin_tagset(&anv->ctrl);
/*
* The driver will not be starting up queues again if shutting down so
@@ -862,8 +859,7 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
}
}
-static enum blk_eh_timer_return apple_nvme_timeout(struct request *req,
- bool reserved)
+static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
{
struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct apple_nvme_queue *q = iod->q;
@@ -1223,6 +1219,11 @@ static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
nvme_put_ctrl(&anv->ctrl);
}
+static void devm_apple_nvme_put_tag_set(void *data)
+{
+ blk_mq_free_tag_set(data);
+}
+
static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
{
int ret;
@@ -1239,8 +1240,7 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
if (ret)
return ret;
- ret = devm_add_action_or_reset(anv->dev,
- (void (*)(void *))blk_mq_free_tag_set,
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
&anv->admin_tagset);
if (ret)
return ret;
@@ -1264,8 +1264,8 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
ret = blk_mq_alloc_tag_set(&anv->tagset);
if (ret)
return ret;
- ret = devm_add_action_or_reset(
- anv->dev, (void (*)(void *))blk_mq_free_tag_set, &anv->tagset);
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
+ &anv->tagset);
if (ret)
return ret;
@@ -1366,6 +1366,11 @@ static int apple_nvme_attach_genpd(struct apple_nvme *anv)
return 0;
}
+static void devm_apple_nvme_mempool_destroy(void *data)
+{
+ mempool_destroy(data);
+}
+
static int apple_nvme_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1463,8 +1468,8 @@ static int apple_nvme_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto put_dev;
}
- ret = devm_add_action_or_reset(
- anv->dev, (void (*)(void *))mempool_destroy, anv->iod_mempool);
+ ret = devm_add_action_or_reset(anv->dev,
+ devm_apple_nvme_mempool_destroy, anv->iod_mempool);
if (ret)
goto put_dev;
@@ -1502,7 +1507,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
if (!blk_get_queue(anv->ctrl.admin_q)) {
nvme_start_admin_queue(&anv->ctrl);
- blk_cleanup_queue(anv->ctrl.admin_q);
+ blk_mq_destroy_queue(anv->ctrl.admin_q);
anv->ctrl.admin_q = NULL;
ret = -ENODEV;
goto put_dev;
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
new file mode 100644
index 000000000000..c8a6db7c4498
--- /dev/null
+++ b/drivers/nvme/host/auth.c
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-auth.h>
+
+struct nvme_dhchap_queue_context {
+ struct list_head entry;
+ struct work_struct auth_work;
+ struct nvme_ctrl *ctrl;
+ struct crypto_shash *shash_tfm;
+ struct crypto_kpp *dh_tfm;
+ void *buf;
+ size_t buf_size;
+ int qid;
+ int error;
+ u32 s1;
+ u32 s2;
+ u16 transaction;
+ u8 status;
+ u8 hash_id;
+ size_t hash_len;
+ u8 dhgroup_id;
+ u8 c1[64];
+ u8 c2[64];
+ u8 response[64];
+ u8 *host_response;
+ u8 *ctrl_key;
+ int ctrl_key_len;
+ u8 *host_key;
+ int host_key_len;
+ u8 *sess_key;
+ int sess_key_len;
+};
+
+#define nvme_auth_flags_from_qid(qid) \
+ (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
+#define nvme_auth_queue_from_qid(ctrl, qid) \
+ (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
+
+static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
+ void *data, size_t data_len, bool auth_send)
+{
+ struct nvme_command cmd = {};
+ blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
+ struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ int ret;
+
+ cmd.auth_common.opcode = nvme_fabrics_command;
+ cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
+ cmd.auth_common.spsp0 = 0x01;
+ cmd.auth_common.spsp1 = 0x01;
+ if (auth_send) {
+ cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
+ cmd.auth_send.tl = cpu_to_le32(data_len);
+ } else {
+ cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
+ cmd.auth_receive.al = cpu_to_le32(data_len);
+ }
+
+ ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
+ qid == 0 ? NVME_QID_ANY : qid,
+ 0, flags);
+ if (ret > 0)
+ dev_warn(ctrl->device,
+ "qid %d auth_send failed with status %d\n", qid, ret);
+ else if (ret < 0)
+ dev_err(ctrl->device,
+ "qid %d auth_send failed with error %d\n", qid, ret);
+ return ret;
+}
+
+static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
+ struct nvmf_auth_dhchap_failure_data *data,
+ u16 transaction, u8 expected_msg)
+{
+ dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
+ __func__, qid, data->auth_type, data->auth_id);
+
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ return data->rescode_exp;
+ }
+ if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
+ data->auth_id != expected_msg) {
+ dev_warn(ctrl->device,
+ "qid %d invalid message %02x/%02x\n",
+ qid, data->auth_type, data->auth_id);
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ if (le16_to_cpu(data->t_id) != transaction) {
+ dev_warn(ctrl->device,
+ "qid %d invalid transaction ID %d\n",
+ qid, le16_to_cpu(data->t_id));
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
+ size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+ memset((u8 *)chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->sc_c = 0; /* No secure channel concatenation */
+ data->napd = 1;
+ data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
+ data->auth_protocol[0].dhchap.halen = 3;
+ data->auth_protocol[0].dhchap.dhlen = 6;
+ data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
+ data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
+ data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
+ data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
+ data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
+ data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
+ data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
+ data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
+ data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ size_t size = sizeof(*data) + data->hl + dhvlen;
+ const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
+ const char *hmac_name, *kpp_name;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ hmac_name = nvme_auth_hmac_name(data->hashid);
+ if (!hmac_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid HASH ID %d\n",
+ chap->qid, data->hashid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (chap->hash_id == data->hashid && chap->shash_tfm &&
+ !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
+ crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing hash %s\n",
+ chap->qid, hmac_name);
+ goto select_kpp;
+ }
+
+ /* Reset if hash cannot be reused */
+ if (chap->shash_tfm) {
+ crypto_free_shash(chap->shash_tfm);
+ chap->hash_id = 0;
+ chap->hash_len = 0;
+ }
+ chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(chap->shash_tfm)) {
+ dev_warn(ctrl->device,
+ "qid %d: failed to allocate hash %s, error %ld\n",
+ chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %d\n",
+ chap->qid, data->hl);
+ crypto_free_shash(chap->shash_tfm);
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Reset host response if the hash had been changed */
+ if (chap->hash_id != data->hashid) {
+ kfree(chap->host_response);
+ chap->host_response = NULL;
+ }
+
+ chap->hash_id = data->hashid;
+ chap->hash_len = data->hl;
+ dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
+ chap->qid, hmac_name);
+
+select_kpp:
+ kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
+ if (!kpp_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH group id %d\n",
+ chap->qid, data->dhgid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ /* Leave previous dh_tfm intact */
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Clear host and controller key to avoid accidental reuse */
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+
+ if (chap->dhgroup_id == data->dhgid &&
+ (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing DH group %s\n",
+ chap->qid, gid_name);
+ goto skip_kpp;
+ }
+
+ /* Reset dh_tfm if it can't be reused */
+ if (chap->dh_tfm) {
+ crypto_free_kpp(chap->dh_tfm);
+ chap->dh_tfm = NULL;
+ }
+
+ if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
+ if (dhvlen == 0) {
+ dev_warn(ctrl->device,
+ "qid %d: empty DH value\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
+ if (IS_ERR(chap->dh_tfm)) {
+ int ret = PTR_ERR(chap->dh_tfm);
+
+ dev_warn(ctrl->device,
+ "qid %d: error %d initializing DH group %s\n",
+ chap->qid, ret, gid_name);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ chap->dh_tfm = NULL;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
+ chap->qid, gid_name);
+ } else if (dhvlen != 0) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH value for NULL DH\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+ chap->dhgroup_id = data->dhgid;
+
+skip_kpp:
+ chap->s1 = le32_to_cpu(data->seqnum);
+ memcpy(chap->c1, data->cval, chap->hash_len);
+ if (dhvlen) {
+ chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
+ if (!chap->ctrl_key) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ chap->ctrl_key_len = dhvlen;
+ memcpy(chap->ctrl_key, data->cval + chap->hash_len,
+ dhvlen);
+ dev_dbg(ctrl->device, "ctrl public key %*ph\n",
+ (int)chap->ctrl_key_len, chap->ctrl_key);
+ }
+
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_reply_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ size += 2 * chap->hash_len;
+
+ if (chap->host_key_len)
+ size += chap->host_key_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->hl = chap->hash_len;
+ data->dhvlen = cpu_to_le16(chap->host_key_len);
+ memcpy(data->rval, chap->response, chap->hash_len);
+ if (ctrl->ctrl_key) {
+ get_random_bytes(chap->c2, chap->hash_len);
+ data->cvalid = 1;
+ chap->s2 = nvme_auth_get_seqnum();
+ memcpy(data->rval + chap->hash_len, chap->c2,
+ chap->hash_len);
+ dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, chap->c2);
+ } else {
+ memset(chap->c2, 0, chap->hash_len);
+ chap->s2 = 0;
+ }
+ data->seqnum = cpu_to_le32(chap->s2);
+ if (chap->host_key_len) {
+ dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
+ __func__, chap->qid,
+ chap->host_key_len, chap->host_key);
+ memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
+ chap->host_key_len);
+ }
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success1_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ if (ctrl->ctrl_key)
+ size += chap->hash_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (data->hl != chap->hash_len) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %u\n",
+ chap->qid, data->hl);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: authenticated with hash %s dhgroup %s\n",
+ nvme_auth_hmac_name(chap->hash_id),
+ nvme_auth_dhgroup_name(chap->dhgroup_id));
+
+ if (!data->rvalid)
+ return 0;
+
+ /* Validate controller response */
+ if (memcmp(chap->response, data->rval, data->hl)) {
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, data->rval);
+ dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len,
+ chap->response);
+ dev_warn(ctrl->device,
+ "qid %d: controller authentication failed\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: controller authenticated\n");
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success2_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ data->t_id = cpu_to_le16(chap->transaction);
+
+ return size;
+}
+
+static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_failure_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = chap->status;
+
+ return size;
+}
+
+static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 buf[4], *challenge = chap->c1;
+ int ret;
+
+ dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s1, chap->transaction);
+
+ if (!chap->host_response) {
+ chap->host_response = nvme_auth_transform_key(ctrl->host_key,
+ ctrl->opts->host->nqn);
+ if (IS_ERR(chap->host_response)) {
+ ret = PTR_ERR(chap->host_response);
+ chap->host_response = NULL;
+ return ret;
+ }
+ } else {
+ dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
+ __func__, chap->qid);
+ }
+
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ chap->host_response, ctrl->host_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c1, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, sizeof(buf));
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c1)
+ kfree(challenge);
+ return ret;
+}
+
+static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 *ctrl_response;
+ u8 buf[4], *challenge = chap->c2;
+ int ret;
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->opts->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ return ret;
+ }
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ ctrl_response, ctrl->ctrl_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c2, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s2, chap->transaction);
+ dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, challenge);
+ dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
+ __func__, chap->qid, ctrl->opts->subsysnqn);
+ dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
+ __func__, chap->qid, ctrl->opts->host->nqn);
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c2)
+ kfree(challenge);
+ kfree(ctrl_response);
+ return ret;
+}
+
+static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ int ret;
+
+ if (chap->host_key && chap->host_key_len) {
+ dev_dbg(ctrl->device,
+ "qid %d: reusing host key\n", chap->qid);
+ goto gen_sesskey;
+ }
+ ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
+ if (ret < 0) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+ chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
+
+ chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
+ if (!chap->host_key) {
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(chap->dh_tfm,
+ chap->host_key, chap->host_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate public key, error %d\n", ret);
+ kfree(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+gen_sesskey:
+ chap->sess_key_len = chap->host_key_len;
+ chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
+ if (!chap->sess_key) {
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+
+ ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
+ chap->ctrl_key, chap->ctrl_key_len,
+ chap->sess_key, chap->sess_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate shared secret, error %d\n", ret);
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+ dev_dbg(ctrl->device, "shared secret %*ph\n",
+ (int)chap->sess_key_len, chap->sess_key);
+ return 0;
+}
+
+static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
+{
+ kfree_sensitive(chap->host_response);
+ chap->host_response = NULL;
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = 0;
+ chap->error = 0;
+ chap->s1 = 0;
+ chap->s2 = 0;
+ chap->transaction = 0;
+ memset(chap->c1, 0, sizeof(chap->c1));
+ memset(chap->c2, 0, sizeof(chap->c2));
+}
+
+static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
+{
+ __nvme_auth_reset(chap);
+ if (chap->shash_tfm)
+ crypto_free_shash(chap->shash_tfm);
+ if (chap->dh_tfm)
+ crypto_free_kpp(chap->dh_tfm);
+ kfree_sensitive(chap->ctrl_key);
+ kfree_sensitive(chap->host_key);
+ kfree_sensitive(chap->sess_key);
+ kfree_sensitive(chap->host_response);
+ kfree(chap->buf);
+ kfree(chap);
+}
+
+static void __nvme_auth_work(struct work_struct *work)
+{
+ struct nvme_dhchap_queue_context *chap =
+ container_of(work, struct nvme_dhchap_queue_context, auth_work);
+ struct nvme_ctrl *ctrl = chap->ctrl;
+ size_t tl;
+ int ret = 0;
+
+ chap->transaction = ctrl->transaction++;
+
+ /* DH-HMAC-CHAP Step 1: send negotiate */
+ dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ return;
+ }
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ return;
+ }
+
+ /* DH-HMAC-CHAP Step 2: receive challenge */
+ dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive challenge, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
+ if (ret) {
+ /* Invalid challenge parameters */
+ chap->error = ret;
+ goto fail2;
+ }
+
+ if (chap->ctrl_key_len) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d DH exponential\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_exponential(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ dev_dbg(ctrl->device, "%s: qid %d host response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 3: send reply */
+ dev_dbg(ctrl->device, "%s: qid %d send reply\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 4: receive success1 */
+ dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive success1, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid,
+ chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ if (ctrl->ctrl_key) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d controller response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ ret = nvme_auth_process_dhchap_success1(ctrl, chap);
+ if (ret) {
+ /* Controller authentication failed */
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ goto fail2;
+ }
+
+ if (ctrl->ctrl_key) {
+ /* DH-HMAC-CHAP Step 5: send success2 */
+ dev_dbg(ctrl->device, "%s: qid %d send success2\n",
+ __func__, chap->qid);
+ tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret)
+ chap->error = ret;
+ }
+ if (!ret) {
+ chap->error = 0;
+ return;
+ }
+
+fail2:
+ dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
+ __func__, chap->qid, chap->status);
+ tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ /*
+ * only update error if send failure2 failed and no other
+ * error had been set during authentication.
+ */
+ if (ret && !chap->error)
+ chap->error = ret;
+}
+
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ if (!ctrl->host_key) {
+ dev_warn(ctrl->device, "qid %d: no key\n", qid);
+ return -ENOKEY;
+ }
+
+ if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
+ dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
+ return -ENOKEY;
+ }
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ /* Check if the context is already queued */
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ WARN_ON(!chap->buf);
+ if (chap->qid == qid) {
+ dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+ }
+ }
+ chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+ if (!chap) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENOMEM;
+ }
+ chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
+ chap->ctrl = ctrl;
+
+ /*
+ * Allocate a large enough buffer for the entire negotiation:
+ * 4k should be enough to ffdhe8192.
+ */
+ chap->buf_size = 4096;
+ chap->buf = kzalloc(chap->buf_size, GFP_KERNEL);
+ if (!chap->buf) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ kfree(chap);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&chap->auth_work, __nvme_auth_work);
+ list_add(&chap->entry, &ctrl->dhchap_auth_list);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
+
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+ int ret;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ if (chap->qid != qid)
+ continue;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ ret = chap->error;
+ return ret;
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_wait);
+
+void nvme_auth_reset(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_reset);
+
+static void nvme_dhchap_auth_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, dhchap_auth_work);
+ int ret, q;
+
+ /* Authenticate admin queue first */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: error %d setting up authentication\n", ret);
+ return;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ return;
+ }
+
+ for (q = 1; q < ctrl->queue_count; q++) {
+ ret = nvme_auth_negotiate(ctrl, q);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: error %d setting up authentication\n",
+ q, ret);
+ break;
+ }
+ }
+
+ /*
+ * Failure is a soft-state; credentials remain valid until
+ * the controller terminates the connection.
+ */
+}
+
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+{
+ INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+ INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
+ mutex_init(&ctrl->dhchap_auth_mutex);
+ if (!ctrl->opts)
+ return;
+ nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
+ nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
+
+void nvme_auth_stop(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ cancel_work_sync(&ctrl->dhchap_auth_work);
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
+ cancel_work_sync(&chap->auth_work);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_stop);
+
+void nvme_auth_free(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
+ list_del_init(&chap->entry);
+ flush_work(&chap->auth_work);
+ __nvme_auth_free(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free);
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 4910543f00ff..e958d5015585 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -6,7 +6,6 @@
#include "nvme.h"
-#ifdef CONFIG_NVME_VERBOSE_ERRORS
static const char * const nvme_ops[] = {
[nvme_cmd_flush] = "Flush",
[nvme_cmd_write] = "Write",
@@ -178,6 +177,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
return nvme_ops[opcode];
return "Unknown";
}
+EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{
@@ -185,4 +185,3 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
return nvme_admin_ops[opcode];
return "Unknown";
}
-#endif /* CONFIG_NVME_VERBOSE_ERRORS */
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ec6ac298d8de..af367b22871b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -24,12 +24,22 @@
#include "nvme.h"
#include "fabrics.h"
+#include <linux/nvme-auth.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#define NVME_MINORS (1U << MINORBITS)
+struct nvme_ns_info {
+ struct nvme_ns_ids ids;
+ u32 nsid;
+ __le32 anagrpid;
+ bool is_shared;
+ bool is_readonly;
+ bool is_ready;
+};
+
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
@@ -330,6 +340,7 @@ enum nvme_disposition {
COMPLETE,
RETRY,
FAILOVER,
+ AUTHENTICATE,
};
static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
@@ -337,6 +348,9 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
if (likely(nvme_req(req)->status == 0))
return COMPLETE;
+ if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+ return AUTHENTICATE;
+
if (blk_noretry_request(req) ||
(nvme_req(req)->status & NVME_SC_DNR) ||
nvme_req(req)->retries >= nvme_max_retries)
@@ -375,11 +389,13 @@ static inline void nvme_end_req(struct request *req)
void nvme_complete_rq(struct request *req)
{
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- if (nvme_req(req)->ctrl->kas)
- nvme_req(req)->ctrl->comp_seen = true;
+ if (ctrl->kas)
+ ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) {
case COMPLETE:
@@ -391,6 +407,14 @@ void nvme_complete_rq(struct request *req)
case FAILOVER:
nvme_failover_req(req);
return;
+ case AUTHENTICATE:
+#ifdef CONFIG_NVME_AUTH
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+ nvme_retry_req(req);
+#else
+ nvme_end_req(req);
+#endif
+ return;
}
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -418,7 +442,7 @@ blk_status_t nvme_host_path_error(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_host_path_error);
-bool nvme_cancel_request(struct request *req, void *data, bool reserved)
+bool nvme_cancel_request(struct request *req, void *data)
{
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag);
@@ -702,7 +726,9 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
switch (ctrl->state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
- req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+ (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
return true;
break;
default:
@@ -990,8 +1016,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags)
+ int qid, int at_head, blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -1000,15 +1025,12 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
- qid ? qid - 1 : 0);
+ qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
- if (timeout)
- req->timeout = timeout;
-
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
if (ret)
@@ -1028,7 +1050,7 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1329,8 +1351,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
}
}
-static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
struct nvme_command c = { };
bool csi_seen = false;
@@ -1343,7 +1365,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
return 0;
c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.nsid = cpu_to_le32(info->nsid);
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
@@ -1355,7 +1377,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (status) {
dev_warn(ctrl->device,
"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
- nsid, status);
+ info->nsid, status);
goto free_data;
}
@@ -1365,7 +1387,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (cur->nidl == 0)
break;
- len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
+ len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
if (len < 0)
break;
@@ -1374,7 +1396,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (nvme_multi_css(ctrl) && !csi_seen) {
dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
- nsid);
+ info->nsid);
status = -EINVAL;
}
@@ -1384,7 +1406,7 @@ free_data:
}
static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids, struct nvme_id_ns **id)
+ struct nvme_id_ns **id)
{
struct nvme_command c = { };
int error;
@@ -1407,51 +1429,66 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
error = NVME_SC_INVALID_NS | NVME_SC_DNR;
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
+ return 0;
+out_free_id:
+ kfree(*id);
+ return error;
+}
+static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_ns_ids *ids = &info->ids;
+ struct nvme_id_ns *id;
+ int ret;
+
+ ret = nvme_identify_ns(ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = true;
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
dev_info(ctrl->device,
"Ignoring bogus Namespace Identifiers\n");
} else {
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0) &&
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+ memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
}
-
+ kfree(id);
return 0;
-
-out_free_id:
- kfree(*id);
- return error;
}
-static int nvme_identify_ns_cs_indep(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_id_ns_cs_indep **id)
+static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
+ struct nvme_id_ns_cs_indep *id;
struct nvme_command c = {
.identify.opcode = nvme_admin_identify,
- .identify.nsid = cpu_to_le32(nsid),
+ .identify.nsid = cpu_to_le32(info->nsid),
.identify.cns = NVME_ID_CNS_NS_CS_INDEP,
};
int ret;
- *id = kmalloc(sizeof(**id), GFP_KERNEL);
- if (!*id)
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
return -ENOMEM;
- ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
- if (ret) {
- dev_warn(ctrl->device,
- "Identify namespace (CS independent) failed (%d)\n",
- ret);
- kfree(*id);
- return ret;
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (!ret) {
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = id->nstat & NVME_NSTAT_NRDY;
}
-
- return 0;
+ kfree(id);
+ return ret;
}
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
@@ -1466,7 +1503,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1875,6 +1912,11 @@ static void nvme_update_disk_info(struct gendisk *disk,
ns->ctrl->max_zeroes_sectors);
}
+static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
+}
+
static inline bool nvme_first_scan(struct gendisk *disk)
{
/* nvme_alloc_ns() scans the disk prior to adding it */
@@ -1912,12 +1954,44 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
blk_queue_chunk_sectors(ns->queue, iob);
}
-static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_update_ns_info_generic(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
{
- unsigned lbaf = nvme_lbaf_index(id->flbas);
+ blk_mq_freeze_queue(ns->disk->queue);
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ blk_mq_unfreeze_queue(ns->disk->queue);
+
+ if (nvme_ns_head_multipath(ns->head)) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ /* Hide the block-interface for these devices */
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+
+ return 0;
+}
+
+static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
+{
+ struct nvme_id_ns *id;
+ unsigned lbaf;
int ret;
+ ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+
blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
@@ -1927,36 +2001,35 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf);
- if (ret)
- goto out_unfreeze;
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
}
- set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
if (ret && !nvme_first_scan(ns->disk))
- return ret;
+ goto out;
}
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
- set_disk_ro(ns->head->disk,
- (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
disk_update_readahead(ns->head->disk);
blk_mq_unfreeze_queue(ns->head->disk->queue);
}
- return 0;
-out_unfreeze:
+ ret = 0;
+out:
/*
* If probing fails due an unsupported feature, hide the block device,
* but still allow other access.
@@ -1966,10 +2039,31 @@ out_unfreeze:
set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
- blk_mq_unfreeze_queue(ns->disk->queue);
+ kfree(id);
return ret;
}
+static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ switch (info->ids.csi) {
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
+ info->nsid);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+ return nvme_update_ns_info_block(ns, info);
+ case NVME_CSI_NVM:
+ return nvme_update_ns_info_block(ns, info);
+ default:
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported (csi %u)\n",
+ info->nsid, info->ids.csi);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+}
+
static char nvme_pr_type(enum pr_type type)
{
switch (type) {
@@ -2103,7 +2197,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
cmd.common.cdw11 = cpu_to_le32(len);
- return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
@@ -2123,6 +2217,7 @@ static int nvme_report_zones(struct gendisk *disk, sector_t sector,
static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
@@ -3613,6 +3708,108 @@ static ssize_t dctype_show(struct device *dev,
}
static DEVICE_ATTR_RO(dctype);
+#ifdef CONFIG_NVME_AUTH
+static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_ctrl_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_ctrl_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
+#endif
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3636,6 +3833,10 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_kato.attr,
&dev_attr_cntrltype.attr,
&dev_attr_dctype.attr,
+#ifdef CONFIG_NVME_AUTH
+ &dev_attr_dhchap_secret.attr,
+ &dev_attr_dhchap_ctrl_secret.attr,
+#endif
NULL
};
@@ -3659,6 +3860,12 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
return 0;
+#ifdef CONFIG_NVME_AUTH
+ if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
+ return 0;
+#endif
return a->mode;
}
@@ -3786,7 +3993,7 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_ns_ids *ids)
+ struct nvme_ns_info *info)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3808,8 +4015,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
if (ret)
goto out_ida_remove;
head->subsys = ctrl->subsys;
- head->ns_id = nsid;
- head->ids = *ids;
+ head->ns_id = info->nsid;
+ head->ids = info->ids;
+ head->shared = info->is_shared;
kref_init(&head->ref);
if (head->ids.csi) {
@@ -3866,55 +4074,54 @@ static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
return ret;
}
-static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
- struct nvme_ns_ids *ids, bool is_shared)
+static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
{
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_ns_head *head = NULL;
int ret;
- ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
- "globally duplicate IDs for nsid %d\n", nsid);
+ "globally duplicate IDs for nsid %d\n", info->nsid);
nvme_print_device_info(ctrl);
return ret;
}
mutex_lock(&ctrl->subsys->lock);
- head = nvme_find_ns_head(ctrl, nsid);
+ head = nvme_find_ns_head(ctrl, info->nsid);
if (!head) {
- ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs in subsystem for nsid %d\n",
- nsid);
+ info->nsid);
goto out_unlock;
}
- head = nvme_alloc_ns_head(ctrl, nsid, ids);
+ head = nvme_alloc_ns_head(ctrl, info);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
- head->shared = is_shared;
} else {
ret = -EINVAL;
- if (!is_shared || !head->shared) {
+ if (!info->is_shared || !head->shared) {
dev_err(ctrl->device,
- "Duplicate unshared namespace %d\n", nsid);
+ "Duplicate unshared namespace %d\n",
+ info->nsid);
goto out_put_ns_head;
}
- if (!nvme_ns_ids_equal(&head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
- nsid);
+ info->nsid);
goto out_put_ns_head;
}
if (!multipath && !list_empty(&head->list)) {
dev_warn(ctrl->device,
"Found shared namespace %d, but multipathing not supported.\n",
- nsid);
+ info->nsid);
dev_warn_once(ctrl->device,
"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
}
@@ -3968,20 +4175,15 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
list_add(&ns->list, &ns->ctrl->namespaces);
}
-static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{
struct nvme_ns *ns;
struct gendisk *disk;
- struct nvme_id_ns *id;
int node = ctrl->numa_node;
- if (nvme_identify_ns(ctrl, nsid, ids, &id))
- return;
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
- goto out_free_id;
+ return;
disk = blk_mq_alloc_disk(ctrl->tagset, ns);
if (IS_ERR(disk))
@@ -3996,13 +4198,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
- if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
+ if (ctrl->ops->supports_pci_p2pdma &&
+ ctrl->ops->supports_pci_p2pdma(ctrl))
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
ns->ctrl = ctrl;
kref_init(&ns->kref);
- if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
+ if (nvme_init_ns_head(ns, info))
goto out_cleanup_disk;
/*
@@ -4028,7 +4231,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
ns->head->instance);
}
- if (nvme_update_ns_info(ns, id))
+ if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
down_write(&ctrl->namespaces_rwsem);
@@ -4042,9 +4245,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!nvme_ns_head_multipath(ns->head))
nvme_add_ns_cdev(ns);
- nvme_mpath_add_disk(ns, id);
+ nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
- kfree(id);
return;
@@ -4061,11 +4263,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
mutex_unlock(&ctrl->subsys->lock);
nvme_put_ns_head(ns->head);
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_free_ns:
kfree(ns);
- out_free_id:
- kfree(id);
}
static void nvme_ns_remove(struct nvme_ns *ns)
@@ -4103,7 +4303,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
- blk_cleanup_queue(ns->queue);
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);
@@ -4124,29 +4323,21 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
}
}
-static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
+static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
{
- struct nvme_id_ns *id;
int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
if (test_bit(NVME_NS_DEAD, &ns->flags))
goto out;
- ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
- if (ret)
- goto out;
-
ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
- if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
dev_err(ns->ctrl->device,
"identifiers changed for nsid %d\n", ns->head->ns_id);
- goto out_free_id;
+ goto out;
}
- ret = nvme_update_ns_info(ns, id);
-
-out_free_id:
- kfree(id);
+ ret = nvme_update_ns_info(ns, info);
out:
/*
* Only remove the namespace if we got a fatal error back from the
@@ -4158,59 +4349,47 @@ out:
nvme_ns_remove(ns);
}
-static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns_ids ids = { };
- struct nvme_id_ns_cs_indep *id;
+ struct nvme_ns_info info = { .nsid = nsid };
struct nvme_ns *ns;
- bool ready = true;
- if (nvme_identify_ns_descs(ctrl, nsid, &ids))
+ if (nvme_identify_ns_descs(ctrl, &info))
return;
+ if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
+ dev_warn(ctrl->device,
+ "command set not reported for nsid: %d\n", nsid);
+ return;
+ }
+
/*
- * Check if the namespace is ready. If not ignore it, we will get an
- * AEN once it becomes ready and restart the scan.
+ * If available try to use the Command Set Idependent Identify Namespace
+ * data structure to find all the generic information that is needed to
+ * set up a namespace. If not fall back to the legacy version.
*/
- if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) &&
- !nvme_identify_ns_cs_indep(ctrl, nsid, &id)) {
- ready = id->nstat & NVME_NSTAT_NRDY;
- kfree(id);
+ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) {
+ if (nvme_ns_info_from_id_cs_indep(ctrl, &info))
+ return;
+ } else {
+ if (nvme_ns_info_from_identify(ctrl, &info))
+ return;
}
- if (!ready)
+ /*
+ * Ignore the namespace if it is not ready. We will get an AEN once it
+ * becomes ready and restart the scan.
+ */
+ if (!info.is_ready)
return;
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- nvme_validate_ns(ns, &ids);
+ nvme_validate_ns(ns, &info);
nvme_put_ns(ns);
- return;
- }
-
- switch (ids.csi) {
- case NVME_CSI_NVM:
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- case NVME_CSI_ZNS:
- if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
- dev_warn(ctrl->device,
- "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
- nsid);
- break;
- }
- if (!nvme_multi_css(ctrl)) {
- dev_warn(ctrl->device,
- "command set not reported for nsid: %d\n",
- nsid);
- break;
- }
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- default:
- dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
- ids.csi, nsid);
- break;
+ } else {
+ nvme_alloc_ns(ctrl, &info);
}
}
@@ -4266,7 +4445,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
if (!nsid) /* end of the list? */
goto out;
- nvme_validate_or_alloc_ns(ctrl, nsid);
+ nvme_scan_ns(ctrl, nsid);
while (++prev < nsid)
nvme_ns_remove_by_nsid(ctrl, prev);
}
@@ -4289,7 +4468,7 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
kfree(id);
for (i = 1; i <= nn; i++)
- nvme_validate_or_alloc_ns(ctrl, i);
+ nvme_scan_ns(ctrl, i);
nvme_remove_invalid_namespaces(ctrl, nn);
}
@@ -4526,9 +4705,19 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_get_fw_slot_info(ctrl);
}
+static u32 nvme_aer_type(u32 result)
+{
+ return result & 0x7;
+}
+
+static u32 nvme_aer_subtype(u32 result)
+{
+ return (result & 0xff00) >> 8;
+}
+
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
- u32 aer_notice_type = (result & 0xff00) >> 8;
+ u32 aer_notice_type = nvme_aer_subtype(result);
trace_nvme_async_event(ctrl, aer_notice_type);
@@ -4543,8 +4732,10 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
* recovery actions from interfering with the controller's
* firmware activation.
*/
- if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+ nvme_auth_stop(ctrl);
queue_work(nvme_wq, &ctrl->fw_act_work);
+ }
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
@@ -4561,11 +4752,19 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
}
}
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+{
+ trace_nvme_async_event(ctrl, NVME_AER_ERROR);
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
+ nvme_reset_ctrl(ctrl);
+}
+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- u32 aer_type = result & 0x07;
+ u32 aer_type = nvme_aer_type(result);
+ u32 aer_subtype = nvme_aer_subtype(result);
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
@@ -4575,6 +4774,15 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
+ /*
+ * For a persistent internal error, don't run async_event_work
+ * to submit a new AER. The controller reset will do it.
+ */
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
+ nvme_handle_aer_persistent_error(ctrl);
+ return;
+ }
+ fallthrough;
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
@@ -4591,6 +4799,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
@@ -4650,6 +4859,8 @@ static void nvme_free_ctrl(struct device *dev)
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ nvme_auth_stop(ctrl);
+ nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
if (subsys) {
@@ -4740,6 +4951,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
nvme_mpath_init_ctrl(ctrl);
+ nvme_auth_init_ctrl(ctrl);
return 0;
out_free_name:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ee79a6d639b4..10cc4a814602 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -152,7 +152,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -198,7 +198,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -243,7 +243,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
NVME_QID_ANY, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
@@ -270,6 +270,12 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
{
int err_sctype = errval & ~NVME_SC_DNR;
+ if (errval < 0) {
+ dev_err(ctrl->device,
+ "Connect command failed, errno: %d\n", errval);
+ return;
+ }
+
switch (err_sctype) {
case NVME_SC_CONNECT_INVALID_PARAM:
if (offset >> 16) {
@@ -331,6 +337,10 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
dev_err(ctrl->device,
"Connect command failed: host path error\n");
break;
+ case NVME_SC_AUTH_REQUIRED:
+ dev_err(ctrl->device,
+ "Connect command failed: authentication required\n");
+ break;
default:
dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n",
@@ -365,6 +375,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
union nvme_result res;
struct nvmf_connect_data *data;
int ret;
+ u32 result;
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -389,7 +400,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), 0, NVME_QID_ANY, 1,
+ data, sizeof(*data), NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
@@ -397,8 +408,25 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
goto out_free_data;
}
- ctrl->cntlid = le16_to_cpu(res.u16);
-
+ result = le32_to_cpu(res.u32);
+ ctrl->cntlid = result & 0xFFFF;
+ if ((result >> 16) & 0x3) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication setup failed\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ else
+ dev_info(ctrl->device,
+ "qid 0: authenticated\n");
+ }
out_free_data:
kfree(data);
return ret;
@@ -431,6 +459,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
struct nvmf_connect_data *data;
union nvme_result res;
int ret;
+ u32 result;
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -450,12 +479,27 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), 0, qid, 1,
+ data, sizeof(*data), qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
}
+ result = le32_to_cpu(res.u32);
+ if ((result >> 16) & 2) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, qid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: authentication setup failed\n", qid);
+ ret = NVME_SC_AUTH_REQUIRED;
+ } else {
+ ret = nvme_auth_wait(ctrl, qid);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid %u: authentication failed\n", qid);
+ }
+ }
kfree(data);
return ret;
}
@@ -548,6 +592,8 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
+ { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
+ { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
{ NVMF_OPT_ERR, NULL }
};
@@ -829,6 +875,34 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true;
break;
+ case NVMF_OPT_DHCHAP_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = p;
+ break;
+ case NVMF_OPT_DHCHAP_CTRL_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = p;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -947,6 +1021,8 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->subsysnqn);
kfree(opts->host_traddr);
kfree(opts->host_iface);
+ kfree(opts->dhchap_secret);
+ kfree(opts->dhchap_ctrl_secret);
kfree(opts);
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
@@ -956,7 +1032,8 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
- NVMF_OPT_FAIL_FAST_TMO)
+ NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
+ NVMF_OPT_DHCHAP_CTRL_SECRET)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf)
@@ -1159,7 +1236,7 @@ static int __init nvmf_init(void)
nvmf_device =
device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(nvmf_device)) {
- pr_err("couldn't create nvme-fabris device!\n");
+ pr_err("couldn't create nvme-fabrics device!\n");
ret = PTR_ERR(nvmf_device);
goto out_destroy_class;
}
@@ -1192,7 +1269,14 @@ static void __exit nvmf_exit(void)
BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..a6e22116e139 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,8 @@ enum {
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
NVMF_OPT_DISCOVERY = 1 << 22,
+ NVMF_OPT_DHCHAP_SECRET = 1 << 23,
+ NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
};
/**
@@ -97,6 +99,9 @@ enum {
* @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove
* immediately;
+ * @dhchap_secret: DH-HMAC-CHAP secret
+ * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
+ * authentication
* @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP)
@@ -121,6 +126,8 @@ struct nvmf_ctrl_options {
unsigned int kato;
struct nvmf_host *host;
int max_reconnects;
+ char *dhchap_secret;
+ char *dhchap_ctrl_secret;
bool disable_sqflow;
bool hdr_digest;
bool data_digest;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 3c778bb0c294..127abaf9ba5d 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2392,7 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
unsigned long flags;
if (ctrl->ctrl.tagset) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(&ctrl->tag_set);
}
@@ -2402,8 +2402,8 @@ nvme_fc_ctrl_free(struct kref *ref)
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
nvme_start_admin_queue(&ctrl->ctrl);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
kfree(ctrl->queues);
@@ -2456,8 +2456,7 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
* status. The done path will return the io request back to the block
* layer with an error status.
*/
-static bool
-nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+static bool nvme_fc_terminate_exchange(struct request *req, void *data)
{
struct nvme_ctrl *nctrl = data;
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
@@ -2534,6 +2533,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ if (start_queues)
+ nvme_start_admin_queue(&ctrl->ctrl);
}
static void
@@ -2565,8 +2566,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
nvme_reset_ctrl(&ctrl->ctrl);
}
-static enum blk_eh_timer_return
-nvme_fc_timeout(struct request *rq, bool reserved)
+static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
@@ -2953,7 +2953,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
out_delete_hw_queues:
nvme_fc_delete_hw_io_queues(ctrl);
out_cleanup_blk_queue:
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
blk_mq_free_tag_set(&ctrl->tag_set);
nvme_fc_free_io_queues(ctrl);
@@ -3642,9 +3642,9 @@ fail_ctrl:
return ERR_PTR(-EIO);
out_cleanup_admin_q:
- blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_admin_tag_set:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_queues:
@@ -3880,6 +3880,7 @@ static int fc_parse_cgrpid(const char *buf, u64 *id)
static ssize_t fc_appid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
+ size_t orig_count = count;
u64 cgrp_id;
int appid_len = 0;
int cgrpid_len = 0;
@@ -3904,7 +3905,7 @@ static ssize_t fc_appid_store(struct device *dev,
ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
if (ret < 0)
return ret;
- return count;
+ return orig_count;
}
static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
#endif /* CONFIG_BLK_CGROUP_FC_APPID */
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index a2e89db1cd63..27614bee7380 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -68,7 +68,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, void **metap, unsigned timeout, bool vec,
- unsigned int rq_flags, blk_mq_req_flags_t blk_flags)
+ blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
@@ -407,7 +407,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_uring_data d;
struct nvme_command c;
struct request *req;
- unsigned int rq_flags = 0;
+ blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
void *meta = NULL;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index d3e2440d8abb..6ef497c75a16 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -346,7 +346,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
* different queue via blk_steal_bios(), so we need to use the bio_split
* pool from the original queue to allocate the bvecs from.
*/
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
@@ -408,6 +408,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ns_head_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = nvme_getgeo,
.report_zones = nvme_ns_head_report_zones,
.pr_ops = &nvme_pr_ops,
@@ -800,16 +801,16 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
return -ENXIO; /* just break out of the loop */
}
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
struct nvme_ana_group_desc desc = {
- .grpid = id->anagrpid,
+ .grpid = anagrpid,
.state = 0,
};
mutex_lock(&ns->ctrl->ana_lock);
- ns->ana_grpid = le32_to_cpu(id->anagrpid);
+ ns->ana_grpid = le32_to_cpu(anagrpid);
nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
mutex_unlock(&ns->ctrl->ana_lock);
if (desc.state) {
@@ -830,7 +831,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->head->disk->queue);
#ifdef CONFIG_BLK_DEV_ZONED
if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
- ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
+ ns->head->disk->nr_zones = ns->disk->nr_zones;
#endif
}
@@ -853,7 +854,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
- blk_cleanup_disk(head->disk);
+ put_disk(head->disk);
}
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 5558f8812157..1bdf714dcd9e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -140,7 +140,7 @@ enum nvme_quirks {
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
/*
- * The controller requires the command_id value be be limited, so skip
+ * The controller requires the command_id value be limited, so skip
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
@@ -328,6 +328,15 @@ struct nvme_ctrl {
struct work_struct ana_work;
#endif
+#ifdef CONFIG_NVME_AUTH
+ struct work_struct dhchap_auth_work;
+ struct list_head dhchap_auth_list;
+ struct mutex dhchap_auth_mutex;
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u16 transaction;
+#endif
+
/* Power saving configuration */
u64 ps_max_latency_us;
bool apst_enabled;
@@ -495,7 +504,6 @@ struct nvme_ctrl_ops {
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
-#define NVME_F_PCI_P2PDMA (1 << 2)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -505,6 +513,7 @@ struct nvme_ctrl_ops {
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
void (*print_device_info)(struct nvme_ctrl *ctrl);
+ bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
};
/*
@@ -698,7 +707,7 @@ static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
}
blk_status_t nvme_host_path_error(struct request *req);
-bool nvme_cancel_request(struct request *req, void *data, bool reserved);
+bool nvme_cancel_request(struct request *req, void *data);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
@@ -734,7 +743,7 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
-static inline unsigned int nvme_req_op(struct nvme_command *cmd)
+static inline enum req_op nvme_req_op(struct nvme_command *cmd)
{
return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
}
@@ -781,7 +790,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
+ int qid, int at_head,
blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
@@ -837,7 +846,7 @@ void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
@@ -879,8 +888,7 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
{
return 0;
}
-static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
- struct nvme_id_ns *id)
+static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
}
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -992,6 +1000,27 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
return ctrl->sgls & ((1 << 0) | (1 << 1));
}
+#ifdef CONFIG_NVME_AUTH
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_auth_stop(struct nvme_ctrl *ctrl);
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
+void nvme_auth_reset(struct nvme_ctrl *ctrl);
+void nvme_auth_free(struct nvme_ctrl *ctrl);
+#else
+static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
+static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
+static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ return -EPROTONOSUPPORT;
+}
+static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ return NVME_SC_AUTH_REQUIRED;
+}
+static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+#endif
+
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
int nvme_execute_passthru_rq(struct request *rq);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 193b44755662..3a1c37f32f30 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -230,11 +230,10 @@ struct nvme_iod {
bool use_sgl;
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
- int nents; /* Used in scatterlist */
dma_addr_t first_dma;
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t meta_dma;
- struct scatterlist *sg;
+ struct sg_table sgt;
};
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@ -524,7 +523,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
static void **nvme_pci_iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+ return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
}
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
@@ -576,17 +575,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
}
}
-static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
-}
-
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -597,9 +585,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
return;
}
- WARN_ON_ONCE(!iod->nents);
+ WARN_ON_ONCE(!iod->sgt.nents);
+
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- nvme_unmap_sg(dev, req);
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma);
@@ -607,7 +596,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
nvme_free_sgls(dev, req);
else
nvme_free_prps(dev, req);
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -630,7 +619,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
int length = blk_rq_payload_bytes(req);
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
@@ -670,7 +659,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
- iod->first_dma = dma_addr;
iod->npages = -1;
return BLK_STS_RESOURCE;
}
@@ -703,16 +691,16 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_len = sg_dma_len(sg);
}
done:
- cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
return BLK_STS_OK;
free_prps:
nvme_free_prps(dev, req);
return BLK_STS_RESOURCE;
bad_sgl:
- WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
+ WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
"Invalid SGL for payload:%d nents:%d\n",
- blk_rq_payload_bytes(req), iod->nents);
+ blk_rq_payload_bytes(req), iod->sgt.nents);
return BLK_STS_IOERR;
}
@@ -738,12 +726,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmd, int entries)
+ struct request *req, struct nvme_rw_command *cmd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
+ unsigned int entries = iod->sgt.nents;
dma_addr_t sgl_dma;
int i = 0;
@@ -841,7 +830,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE;
- int nr_mapped;
+ int rc;
if (blk_rq_nr_phys_segments(req) == 1) {
struct bio_vec bv = req_bvec(req);
@@ -859,26 +848,25 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
}
iod->dma_len = 0;
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
+ iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sgt.sgl)
return BLK_STS_RESOURCE;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
- if (!iod->nents)
+ sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
+ iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+ if (!iod->sgt.orig_nents)
goto out_free_sg;
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
- iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
- else
- nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req), DMA_ATTR_NO_WARN);
- if (!nr_mapped)
+ rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
+ DMA_ATTR_NO_WARN);
+ if (rc) {
+ if (rc == -EREMOTEIO)
+ ret = BLK_STS_TARGET;
goto out_free_sg;
+ }
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
if (ret != BLK_STS_OK)
@@ -886,9 +874,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return BLK_STS_OK;
out_unmap_sg:
- nvme_unmap_sg(dev, req);
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
out_free_sg:
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
return ret;
}
@@ -912,7 +900,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
iod->aborted = 0;
iod->npages = -1;
- iod->nents = 0;
+ iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
@@ -1344,7 +1332,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
"Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
}
-static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
+static enum blk_eh_timer_return nvme_timeout(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = iod->nvmeq;
@@ -1435,8 +1423,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
- "I/O %d QID %d timeout, aborting\n",
- req->tag, nvmeq->qid);
+ "I/O %d (%s) QID %d timeout, aborting\n",
+ req->tag,
+ nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
+ nvmeq->qid);
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
BLK_MQ_REQ_NOWAIT);
@@ -1760,42 +1750,40 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
* queue to flush these to completion.
*/
nvme_start_admin_queue(&dev->ctrl);
- blk_cleanup_queue(dev->ctrl.admin_q);
+ blk_mq_destroy_queue(dev->ctrl.admin_q);
blk_mq_free_tag_set(&dev->admin_tagset);
}
}
-static int nvme_alloc_admin_tags(struct nvme_dev *dev)
+static int nvme_pci_alloc_admin_tag_set(struct nvme_dev *dev)
{
- if (!dev->ctrl.admin_q) {
- dev->admin_tagset.ops = &nvme_mq_admin_ops;
- dev->admin_tagset.nr_hw_queues = 1;
+ struct blk_mq_tag_set *set = &dev->admin_tagset;
- dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
- dev->admin_tagset.numa_node = dev->ctrl.numa_node;
- dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
- dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
- dev->admin_tagset.driver_data = dev;
+ set->ops = &nvme_mq_admin_ops;
+ set->nr_hw_queues = 1;
- if (blk_mq_alloc_tag_set(&dev->admin_tagset))
- return -ENOMEM;
- dev->ctrl.admin_tagset = &dev->admin_tagset;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_NO_SCHED;
+ set->driver_data = dev;
- dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
- if (IS_ERR(dev->ctrl.admin_q)) {
- blk_mq_free_tag_set(&dev->admin_tagset);
- dev->ctrl.admin_q = NULL;
- return -ENOMEM;
- }
- if (!blk_get_queue(dev->ctrl.admin_q)) {
- nvme_dev_remove_admin(dev);
- dev->ctrl.admin_q = NULL;
- return -ENODEV;
- }
- } else
- nvme_start_admin_queue(&dev->ctrl);
+ if (blk_mq_alloc_tag_set(set))
+ return -ENOMEM;
+ dev->ctrl.admin_tagset = set;
+ dev->ctrl.admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(dev->ctrl.admin_q)) {
+ blk_mq_free_tag_set(set);
+ dev->ctrl.admin_q = NULL;
+ return -ENOMEM;
+ }
+ if (!blk_get_queue(dev->ctrl.admin_q)) {
+ nvme_dev_remove_admin(dev);
+ dev->ctrl.admin_q = NULL;
+ return -ENODEV;
+ }
return 0;
}
@@ -2534,47 +2522,45 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
return true;
}
-static void nvme_dev_add(struct nvme_dev *dev)
+static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
{
+ struct blk_mq_tag_set * set = &dev->tagset;
int ret;
- if (!dev->ctrl.tagset) {
- dev->tagset.ops = &nvme_mq_ops;
- dev->tagset.nr_hw_queues = dev->online_queues - 1;
- dev->tagset.nr_maps = 2; /* default + read */
- if (dev->io_queues[HCTX_TYPE_POLL])
- dev->tagset.nr_maps++;
- dev->tagset.timeout = NVME_IO_TIMEOUT;
- dev->tagset.numa_node = dev->ctrl.numa_node;
- dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
- BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = sizeof(struct nvme_iod);
- dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
- dev->tagset.driver_data = dev;
-
- /*
- * Some Apple controllers requires tags to be unique
- * across admin and IO queue, so reserve the first 32
- * tags of the IO queue.
- */
- if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
- dev->tagset.reserved_tags = NVME_AQ_DEPTH;
+ set->ops = &nvme_mq_ops;
+ set->nr_hw_queues = dev->online_queues - 1;
+ set->nr_maps = 2; /* default + read */
+ if (dev->io_queues[HCTX_TYPE_POLL])
+ set->nr_maps++;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->driver_data = dev;
- ret = blk_mq_alloc_tag_set(&dev->tagset);
- if (ret) {
- dev_warn(dev->ctrl.device,
- "IO queues tagset allocation failed %d\n", ret);
- return;
- }
- dev->ctrl.tagset = &dev->tagset;
- } else {
- blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /*
+ * Some Apple controllers requires tags to be unique
+ * across admin and IO queue, so reserve the first 32
+ * tags of the IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ set->reserved_tags = NVME_AQ_DEPTH;
- /* Free previously allocated queues that are no longer usable */
- nvme_free_queues(dev, dev->online_queues);
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return;
}
+ dev->ctrl.tagset = set;
+}
- nvme_dbbuf_set(dev);
+static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+{
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /* free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
}
static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2690,8 +2676,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
struct pci_dev *pdev = to_pci_dev(dev->dev);
mutex_lock(&dev->shutdown_lock);
- if (pci_device_is_present(pdev) && pci_is_enabled(pdev)) {
- u32 csts = readl(dev->bar + NVME_REG_CSTS);
+ if (pci_is_enabled(pdev)) {
+ u32 csts;
+
+ if (pci_device_is_present(pdev))
+ csts = readl(dev->bar + NVME_REG_CSTS);
+ else
+ csts = ~0;
if (dev->ctrl.state == NVME_CTRL_LIVE ||
dev->ctrl.state == NVME_CTRL_RESETTING) {
@@ -2720,10 +2711,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_pci_disable(dev);
nvme_reap_pending_cqes(dev);
- blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_wait_completed_request(&dev->tagset);
- blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
+ nvme_cancel_tagset(&dev->ctrl);
+ nvme_cancel_admin_tagset(&dev->ctrl);
/*
* The driver will not be starting up queues again if shutting down so
@@ -2837,9 +2826,13 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out_unlock;
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto out_unlock;
+ if (!dev->ctrl.admin_q) {
+ result = nvme_pci_alloc_admin_tag_set(dev);
+ if (result)
+ goto out_unlock;
+ } else {
+ nvme_start_admin_queue(&dev->ctrl);
+ }
/*
* Limit the max command size to prevent iod->sg allocations going
@@ -2918,7 +2911,11 @@ static void nvme_reset_work(struct work_struct *work)
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ if (!dev->ctrl.tagset)
+ nvme_pci_alloc_tag_set(dev);
+ else
+ nvme_pci_update_nr_queues(dev);
+ nvme_dbbuf_set(dev);
nvme_unfreeze(&dev->ctrl);
}
@@ -2984,7 +2981,6 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
}
-
static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
{
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
@@ -2999,11 +2995,17 @@ static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
subsys->firmware_rev);
}
+static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+ return dma_pci_p2pdma_supported(dev->dev);
+}
+
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
- .flags = NVME_F_METADATA_SUPPORTED |
- NVME_F_PCI_P2PDMA,
+ .flags = NVME_F_METADATA_SUPPORTED,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -3011,6 +3013,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.submit_async_event = nvme_pci_submit_async_event,
.get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
+ .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
};
static int nvme_dev_map(struct nvme_dev *dev)
@@ -3508,8 +3511,12 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 46c2dcf72f7e..3100643be299 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -29,7 +29,7 @@
#include "fabrics.h"
-#define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */
+#define NVME_RDMA_CM_TIMEOUT_MS 3000 /* 3 second */
#define NVME_RDMA_MAX_SEGMENTS 256
@@ -248,12 +248,9 @@ static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
{
int ret;
- ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
- msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
- if (ret < 0)
+ ret = wait_for_completion_interruptible(&queue->cm_done);
+ if (ret)
return ret;
- if (ret == 0)
- return -ETIMEDOUT;
WARN_ON_ONCE(queue->cm_error > 0);
return queue->cm_error;
}
@@ -612,7 +609,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
queue->cm_error = -ETIMEDOUT;
ret = rdma_resolve_addr(queue->cm_id, src_addr,
(struct sockaddr *)&ctrl->addr,
- NVME_RDMA_CONNECT_TIMEOUT_MS);
+ NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_info(ctrl->ctrl.device,
"rdma_resolve_addr failed (%d).\n", ret);
@@ -790,58 +787,62 @@ out_free_queues:
return ret;
}
-static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
+static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set;
+ struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret;
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- set->flags = BLK_MQ_F_NO_SCHED;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- if (nctrl->max_integrity_segments)
- set->cmd_size += sizeof(struct nvme_rdma_sgl) +
- NVME_RDMA_METADATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_rdma_admin_mq_ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ set->flags = BLK_MQ_F_NO_SCHED;
ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ if (!ret)
+ ctrl->ctrl.admin_tagset = set;
+ return ret;
+}
+
+static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int ret;
- return set;
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_rdma_mq_ops;
+ set->queue_depth = nctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+ if (nctrl->max_integrity_segments)
+ set->cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = nctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (!ret)
+ ctrl->ctrl.tagset = set;
+ return ret;
}
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
if (remove) {
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) {
@@ -885,11 +886,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_queue;
if (new) {
- ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
- if (IS_ERR(ctrl->ctrl.admin_tagset)) {
- error = PTR_ERR(ctrl->ctrl.admin_tagset);
+ error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
+ if (error)
goto out_free_async_qe;
- }
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
@@ -935,10 +934,10 @@ out_stop_queue:
nvme_cancel_admin_tagset(&ctrl->ctrl);
out_cleanup_queue:
if (new)
- blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
if (new)
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
if (new)
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
@@ -957,7 +956,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
if (remove) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(ctrl->ctrl.tagset);
}
nvme_rdma_free_io_queues(ctrl);
@@ -972,11 +971,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (new) {
- ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
- if (IS_ERR(ctrl->ctrl.tagset)) {
- ret = PTR_ERR(ctrl->ctrl.tagset);
+ ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
+ if (ret)
goto out_free_io_queues;
- }
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
@@ -1012,7 +1009,7 @@ out_wait_freeze_timed_out:
out_cleanup_connect_q:
nvme_cancel_tagset(&ctrl->ctrl);
if (new)
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
if (new)
blk_mq_free_tag_set(ctrl->ctrl.tagset);
@@ -1205,6 +1202,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
+ nvme_auth_stop(&ctrl->ctrl);
nvme_stop_keep_alive(&ctrl->ctrl);
flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
@@ -1894,7 +1892,7 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
if (ctrl->opts->tos >= 0)
rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
- ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
+ ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
queue->cm_error);
@@ -2021,8 +2019,7 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
nvmf_complete_timed_out_request(rq);
}
-static enum blk_eh_timer_return
-nvme_rdma_timeout(struct request *rq, bool reserved)
+static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 7a9e6ffa2342..044da18c06f5 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -209,9 +209,11 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}
-static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
{
- return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+ if (nvme_is_fabrics(req->req.cmd))
+ return NVME_TCP_ADMIN_CCSZ;
+ return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
}
static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
@@ -229,7 +231,7 @@ static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
rq = blk_mq_rq_from_pdu(req);
return rq_data_dir(rq) == WRITE && req->data_len &&
- req->data_len <= nvme_tcp_inline_data_size(req->queue);
+ req->data_len <= nvme_tcp_inline_data_size(req);
}
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
@@ -1658,6 +1660,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ return;
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
@@ -1685,45 +1690,49 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
return ret;
}
-static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
+static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct blk_mq_tag_set *set;
+ struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret;
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_admin_mq_ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_BLOCKING;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ if (!ret)
+ nctrl->admin_tagset = set;
+ return ret;
+}
- return set;
+static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_mq_ops;
+ set->queue_depth = nctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = nctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (!ret)
+ nctrl->tagset = set;
+ return ret;
}
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
@@ -1884,7 +1893,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_io_queues(ctrl);
if (remove) {
- blk_cleanup_queue(ctrl->connect_q);
+ blk_mq_destroy_queue(ctrl->connect_q);
blk_mq_free_tag_set(ctrl->tagset);
}
nvme_tcp_free_io_queues(ctrl);
@@ -1899,11 +1908,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return ret;
if (new) {
- ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
- if (IS_ERR(ctrl->tagset)) {
- ret = PTR_ERR(ctrl->tagset);
+ ret = nvme_tcp_alloc_tag_set(ctrl);
+ if (ret)
goto out_free_io_queues;
- }
ret = nvme_ctrl_init_connect_q(ctrl);
if (ret)
@@ -1939,7 +1946,7 @@ out_wait_freeze_timed_out:
out_cleanup_connect_q:
nvme_cancel_tagset(ctrl);
if (new)
- blk_cleanup_queue(ctrl->connect_q);
+ blk_mq_destroy_queue(ctrl->connect_q);
out_free_tag_set:
if (new)
blk_mq_free_tag_set(ctrl->tagset);
@@ -1952,8 +1959,8 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_queue(ctrl, 0);
if (remove) {
- blk_cleanup_queue(ctrl->admin_q);
- blk_cleanup_queue(ctrl->fabrics_q);
+ blk_mq_destroy_queue(ctrl->admin_q);
+ blk_mq_destroy_queue(ctrl->fabrics_q);
blk_mq_free_tag_set(ctrl->admin_tagset);
}
nvme_tcp_free_admin_queue(ctrl);
@@ -1968,11 +1975,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return error;
if (new) {
- ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
- if (IS_ERR(ctrl->admin_tagset)) {
- error = PTR_ERR(ctrl->admin_tagset);
+ error = nvme_tcp_alloc_admin_tag_set(ctrl);
+ if (error)
goto out_free_queue;
- }
ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
if (IS_ERR(ctrl->fabrics_q)) {
@@ -2011,10 +2016,10 @@ out_stop_queue:
nvme_cancel_admin_tagset(ctrl);
out_cleanup_queue:
if (new)
- blk_cleanup_queue(ctrl->admin_q);
+ blk_mq_destroy_queue(ctrl->admin_q);
out_cleanup_fabrics_q:
if (new)
- blk_cleanup_queue(ctrl->fabrics_q);
+ blk_mq_destroy_queue(ctrl->fabrics_q);
out_free_tagset:
if (new)
blk_mq_free_tag_set(ctrl->admin_tagset);
@@ -2173,6 +2178,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
@@ -2323,8 +2329,7 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
nvmf_complete_timed_out_request(rq);
}
-static enum blk_eh_timer_return
-nvme_tcp_timeout(struct request *rq, bool reserved)
+static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
@@ -2372,7 +2377,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
nvme_tcp_set_sg_null(c);
else if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->data_len <= nvme_tcp_inline_data_size(req))
nvme_tcp_set_sg_inline(queue, c, req->data_len);
else
nvme_tcp_set_sg_host_data(c, req->data_len);
@@ -2407,7 +2412,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
nvme_tcp_init_iter(req, rq_data_dir(rq));
if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->data_len <= nvme_tcp_inline_data_size(req))
req->pdu_len = req->data_len;
pdu->hdr.type = nvme_tcp_cmd;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 2a89c5aa0790..1c36fcedea20 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -287,6 +287,34 @@ static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
return ret;
}
+static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -306,6 +334,10 @@ const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvme_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get:
return nvme_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvme_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvme_trace_fabrics_auth_receive(p, spc);
default:
return nvme_trace_fabrics_common(p, spc);
}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 37c7f4c89f92..6f0eaf6a1528 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -98,7 +98,7 @@ TRACE_EVENT(nvme_complete_rq,
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
__entry->qid = nvme_req_qid(req);
- __entry->cid = req->tag;
+ __entry->cid = nvme_req(req)->cmd->common.command_id;
__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 9f81beb4df4e..12316ab51bda 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -109,10 +109,10 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data;
}
- blk_queue_set_zoned(ns->disk, BLK_ZONED_HM);
+ disk_set_zoned(ns->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1);
- blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1);
+ disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
+ disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
free_data:
kfree(id);
return status;
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 973561c93888..79fc64035ee3 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -83,3 +83,18 @@ config NVME_TARGET_TCP
devices over TCP.
If unsure, say N.
+
+config NVME_TARGET_AUTH
+ bool "NVMe over Fabrics In-band Authentication support"
+ depends on NVME_TARGET
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This enables support for NVMe over Fabrics In-band Authentication
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 9837e580fa7e..c66820102493 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -13,6 +13,7 @@ nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
+nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 397daaf51f1b..fc8a957fad0a 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1017,7 +1017,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
u16 ret;
if (nvme_is_fabrics(cmd))
- return nvmet_parse_fabrics_cmd(req);
+ return nvmet_parse_fabrics_admin_cmd(req);
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
new file mode 100644
index 000000000000..cf690df34775
--- /dev/null
+++ b/drivers/nvme/target/auth.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <crypto/hash.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <asm/unaligned.h>
+
+#include "nvmet.h"
+
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl)
+{
+ unsigned char key_hash;
+ char *dhchap_secret;
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
+ return -EINVAL;
+ if (key_hash > 3) {
+ pr_warn("Invalid DH-HMAC-CHAP hash id %d\n",
+ key_hash);
+ return -EINVAL;
+ }
+ if (key_hash > 0) {
+ /* Validate selected hash algorithm */
+ const char *hmac = nvme_auth_hmac_name(key_hash);
+
+ if (!crypto_has_shash(hmac, 0, 0)) {
+ pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac);
+ return -ENOTSUPP;
+ }
+ }
+ dhchap_secret = kstrdup(secret, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ if (set_ctrl) {
+ host->dhchap_ctrl_secret = strim(dhchap_secret);
+ host->dhchap_ctrl_key_hash = key_hash;
+ } else {
+ host->dhchap_secret = strim(dhchap_secret);
+ host->dhchap_key_hash = key_hash;
+ }
+ return 0;
+}
+
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
+{
+ const char *dhgroup_kpp;
+ int ret = 0;
+
+ pr_debug("%s: ctrl %d selecting dhgroup %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+
+ if (ctrl->dh_tfm) {
+ if (ctrl->dh_gid == dhgroup_id) {
+ pr_debug("%s: ctrl %d reuse existing DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return 0;
+ }
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+
+ if (dhgroup_id == NVME_AUTH_DHGROUP_NULL)
+ return 0;
+
+ dhgroup_kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+ if (!dhgroup_kpp) {
+ pr_debug("%s: ctrl %d invalid DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return -EINVAL;
+ }
+ ctrl->dh_tfm = crypto_alloc_kpp(dhgroup_kpp, 0, 0);
+ if (IS_ERR(ctrl->dh_tfm)) {
+ pr_debug("%s: ctrl %d failed to setup DH group %d, err %ld\n",
+ __func__, ctrl->cntlid, dhgroup_id,
+ PTR_ERR(ctrl->dh_tfm));
+ ret = PTR_ERR(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ } else {
+ ctrl->dh_gid = dhgroup_id;
+ pr_debug("%s: ctrl %d setup DH group %d\n",
+ __func__, ctrl->cntlid, ctrl->dh_gid);
+ ret = nvme_auth_gen_privkey(ctrl->dh_tfm, ctrl->dh_gid);
+ if (ret < 0) {
+ pr_debug("%s: ctrl %d failed to generate private key, err %d\n",
+ __func__, ctrl->cntlid, ret);
+ kfree_sensitive(ctrl->dh_key);
+ return ret;
+ }
+ ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm);
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = kzalloc(ctrl->dh_keysize, GFP_KERNEL);
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d failed to allocate public key\n",
+ ctrl->cntlid);
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(ctrl->dh_tfm, ctrl->dh_key,
+ ctrl->dh_keysize);
+ if (ret < 0) {
+ pr_warn("ctrl %d failed to generate public key\n",
+ ctrl->cntlid);
+ kfree(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+ }
+ }
+
+ return ret;
+}
+
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ int ret = 0;
+ struct nvmet_host_link *p;
+ struct nvmet_host *host = NULL;
+ const char *hash_name;
+
+ down_read(&nvmet_config_sem);
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ goto out_unlock;
+
+ if (ctrl->subsys->allow_any_host)
+ goto out_unlock;
+
+ list_for_each_entry(p, &ctrl->subsys->hosts, entry) {
+ pr_debug("check %s\n", nvmet_host_name(p->host));
+ if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn))
+ continue;
+ host = p->host;
+ break;
+ }
+ if (!host) {
+ pr_debug("host %s not found\n", ctrl->hostnqn);
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
+ if (ret < 0)
+ pr_warn("Failed to setup DH group");
+
+ if (!host->dhchap_secret) {
+ pr_debug("No authentication provided\n");
+ goto out_unlock;
+ }
+
+ if (host->dhchap_hash_id == ctrl->shash_id) {
+ pr_debug("Re-use existing hash ID %d\n",
+ ctrl->shash_id);
+ } else {
+ hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ctrl->shash_id = host->dhchap_hash_id;
+ }
+
+ /* Skip the 'DHHC-1:XX:' prefix */
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
+ host->dhchap_key_hash);
+ if (IS_ERR(ctrl->host_key)) {
+ ret = PTR_ERR(ctrl->host_key);
+ ctrl->host_key = NULL;
+ goto out_free_hash;
+ }
+ pr_debug("%s: using hash %s key %*ph\n", __func__,
+ ctrl->host_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->host_key->hash) : "none",
+ (int)ctrl->host_key->len, ctrl->host_key->key);
+
+ nvme_auth_free_key(ctrl->ctrl_key);
+ if (!host->dhchap_ctrl_secret) {
+ ctrl->ctrl_key = NULL;
+ goto out_unlock;
+ }
+
+ ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
+ host->dhchap_ctrl_key_hash);
+ if (IS_ERR(ctrl->ctrl_key)) {
+ ret = PTR_ERR(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+ pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
+ ctrl->ctrl_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none",
+ (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key);
+
+out_free_hash:
+ if (ret) {
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ ctrl->shash_id = 0;
+ }
+out_unlock:
+ up_read(&nvmet_config_sem);
+
+ return ret;
+}
+
+void nvmet_auth_sq_free(struct nvmet_sq *sq)
+{
+ cancel_delayed_work(&sq->auth_expired_work);
+ kfree(sq->dhchap_c1);
+ sq->dhchap_c1 = NULL;
+ kfree(sq->dhchap_c2);
+ sq->dhchap_c2 = NULL;
+ kfree(sq->dhchap_skey);
+ sq->dhchap_skey = NULL;
+}
+
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
+{
+ ctrl->shash_id = 0;
+
+ if (ctrl->dh_tfm) {
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+
+bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ if (req->sq->ctrl->host_key &&
+ !req->sq->authenticated)
+ return false;
+ return true;
+}
+
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c1, *host_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn);
+ if (IS_ERR(host_response)) {
+ ret = PTR_ERR(host_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, host_response,
+ ctrl->host_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c1,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid);
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c1)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(host_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c2, *ctrl_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, ctrl_response,
+ ctrl->ctrl_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c2,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c2)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(ctrl_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d no DH public key!\n", ctrl->cntlid);
+ return -ENOKEY;
+ }
+ if (buf_size != ctrl->dh_keysize) {
+ pr_warn("ctrl %d DH public key size mismatch, need %zu is %d\n",
+ ctrl->cntlid, ctrl->dh_keysize, buf_size);
+ ret = -EINVAL;
+ } else {
+ memcpy(buf, ctrl->dh_key, buf_size);
+ pr_debug("%s: ctrl %d public key %*ph\n", __func__,
+ ctrl->cntlid, (int)buf_size, buf);
+ }
+
+ return ret;
+}
+
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *pkey, int pkey_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret;
+
+ req->sq->dhchap_skey_len = ctrl->dh_keysize;
+ req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL);
+ if (!req->sq->dhchap_skey)
+ return -ENOMEM;
+ ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm,
+ pkey, pkey_size,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len);
+ if (ret)
+ pr_debug("failed to compute shared secret, err %d\n", ret);
+ else
+ pr_debug("%s: shared secret %*ph\n", __func__,
+ (int)req->sq->dhchap_skey_len,
+ req->sq->dhchap_skey);
+
+ return ret;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index ff77c3d2354f..2bcd60758919 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -11,6 +11,11 @@
#include <linux/ctype.h>
#include <linux/pci.h>
#include <linux/pci-p2pdma.h>
+#ifdef CONFIG_NVME_TARGET_AUTH
+#include <linux/nvme-auth.h>
+#endif
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
#include "nvmet.h"
@@ -1680,10 +1685,133 @@ static const struct config_item_type nvmet_ports_type = {
static struct config_group nvmet_subsystems_group;
static struct config_group nvmet_ports_group;
+#ifdef CONFIG_NVME_TARGET_AUTH
+static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, false);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_key);
+
+static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, true);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
+
+static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+
+ return sprintf(page, "%s\n", hash_name ? hash_name : "none");
+}
+
+static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ u8 hmac_id;
+
+ hmac_id = nvme_auth_hmac_id(page);
+ if (hmac_id == NVME_AUTH_HASH_INVALID)
+ return -EINVAL;
+ if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
+ return -ENOTSUPP;
+ host->dhchap_hash_id = hmac_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
+
+static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
+
+ return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
+}
+
+static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int dhgroup_id;
+
+ dhgroup_id = nvme_auth_dhgroup_id(page);
+ if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
+ return -EINVAL;
+ if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
+ const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+
+ if (!crypto_has_kpp(kpp, 0, 0))
+ return -EINVAL;
+ }
+ host->dhchap_dhgroup_id = dhgroup_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
+
+static struct configfs_attribute *nvmet_host_attrs[] = {
+ &nvmet_host_attr_dhchap_key,
+ &nvmet_host_attr_dhchap_ctrl_key,
+ &nvmet_host_attr_dhchap_hash,
+ &nvmet_host_attr_dhchap_dhgroup,
+ NULL,
+};
+#endif /* CONFIG_NVME_TARGET_AUTH */
+
static void nvmet_host_release(struct config_item *item)
{
struct nvmet_host *host = to_host(item);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ kfree(host->dhchap_secret);
+#endif
kfree(host);
}
@@ -1693,6 +1821,9 @@ static struct configfs_item_operations nvmet_host_item_ops = {
static const struct config_item_type nvmet_host_type = {
.ct_item_ops = &nvmet_host_item_ops,
+#ifdef CONFIG_NVME_TARGET_AUTH
+ .ct_attrs = nvmet_host_attrs,
+#endif
.ct_owner = THIS_MODULE,
};
@@ -1705,6 +1836,11 @@ static struct config_group *nvmet_hosts_make_group(struct config_group *group,
if (!host)
return ERR_PTR(-ENOMEM);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ /* Default to SHA256 */
+ host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
+#endif
+
config_group_init_type_name(&host->group, name, &nvmet_host_type);
return &host->group;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index c27660a660d9..a1345790005f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -795,6 +795,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
+ nvmet_auth_sq_free(sq);
if (ctrl) {
/*
@@ -865,8 +866,15 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
+ struct nvme_command *cmd = req->cmd;
u16 ret;
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_io_cmd(req);
+
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+
ret = nvmet_check_ctrl_status(req);
if (unlikely(ret))
return ret;
@@ -1271,6 +1279,11 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req)
req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
+
+ if (unlikely(!nvmet_check_auth_status(req))) {
+ pr_warn("qid %d not authenticated\n", req->sq->qid);
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ }
return 0;
}
@@ -1467,6 +1480,8 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
+ nvmet_destroy_auth(ctrl);
+
ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl);
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
new file mode 100644
index 000000000000..ebdf9aa81041
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include "nvmet.h"
+
+static void nvmet_auth_expired_work(struct work_struct *work)
+{
+ struct nvmet_sq *sq = container_of(to_delayed_work(work),
+ struct nvmet_sq, auth_expired_work);
+
+ pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
+ __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ sq->dhchap_tid = -1;
+}
+
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+{
+ u32 result = le32_to_cpu(req->cqe->result.u32);
+
+ /* Initialize in-band authentication */
+ INIT_DELAYED_WORK(&req->sq->auth_expired_work,
+ nvmet_auth_expired_work);
+ req->sq->authenticated = false;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
+ req->cqe->result.u32 = cpu_to_le32(result);
+}
+
+static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_negotiate_data *data = d;
+ int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
+
+ pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
+ data->auth_protocol[0].dhchap.halen,
+ data->auth_protocol[0].dhchap.dhlen);
+ req->sq->dhchap_tid = le16_to_cpu(data->t_id);
+ if (data->sc_c)
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+
+ if (data->napd != 1)
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+
+ if (data->auth_protocol[0].dhchap.authid !=
+ NVME_AUTH_DHCHAP_AUTH_ID)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+
+ for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
+ u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
+
+ if (!fallback_hash_id &&
+ crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
+ fallback_hash_id = host_hmac_id;
+ if (ctrl->shash_id != host_hmac_id)
+ continue;
+ hash_id = ctrl->shash_id;
+ break;
+ }
+ if (hash_id == 0) {
+ if (fallback_hash_id == 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_hmac_name(fallback_hash_id));
+ ctrl->shash_id = fallback_hash_id;
+ }
+
+ dhgid = -1;
+ fallback_dhgid = -1;
+ for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
+ int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
+
+ if (tmp_dhgid != ctrl->dh_gid) {
+ dhgid = tmp_dhgid;
+ break;
+ }
+ if (fallback_dhgid < 0) {
+ const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
+
+ if (crypto_has_kpp(kpp, 0, 0))
+ fallback_dhgid = tmp_dhgid;
+ }
+ }
+ if (dhgid < 0) {
+ if (fallback_dhgid < 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(fallback_dhgid));
+ ctrl->dh_gid = fallback_dhgid;
+ }
+ pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
+ return 0;
+}
+
+static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_reply_data *data = d;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ u8 *response;
+
+ pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->hl, data->cvalid, dhvlen);
+
+ if (dhvlen) {
+ if (!ctrl->dh_tfm)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
+ dhvlen) < 0)
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+
+ response = kmalloc(data->hl, GFP_KERNEL);
+ if (!response)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ if (!ctrl->host_key) {
+ pr_warn("ctrl %d qid %d no host key\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
+ pr_debug("ctrl %d qid %d host hash failed\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+
+ if (memcmp(data->rval, response, data->hl)) {
+ pr_info("ctrl %d qid %d host response mismatch\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ kfree(response);
+ pr_debug("%s: ctrl %d qid %d host authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ if (data->cvalid) {
+ req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
+ GFP_KERNEL);
+ if (!req->sq->dhchap_c2)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
+ __func__, ctrl->cntlid, req->sq->qid, data->hl,
+ req->sq->dhchap_c2);
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+ } else {
+ req->sq->authenticated = true;
+ req->sq->dhchap_c2 = NULL;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ return data->rescode_exp;
+}
+
+void nvmet_execute_auth_send(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_success2_data *data;
+ void *d;
+ u32 tl;
+ u16 status = 0;
+
+ if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp1);
+ goto done;
+ }
+ tl = le32_to_cpu(req->cmd->auth_send.tl);
+ if (!tl) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, tl);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, tl)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
+ return;
+ }
+
+ d = kmalloc(tl, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, tl);
+ if (status) {
+ kfree(d);
+ goto done;
+ }
+
+ data = d;
+ pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
+ req->sq->dhchap_step);
+ if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
+ goto done_failure1;
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
+ if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
+ /* Restart negotiation */
+ pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
+ ctrl->cntlid, req->sq->qid);
+ if (!req->sq->qid) {
+ if (nvmet_setup_auth(ctrl) < 0) {
+ status = NVME_SC_INTERNAL;
+ pr_err("ctrl %d qid 0 failed to setup"
+ "re-authentication",
+ ctrl->cntlid);
+ goto done_failure1;
+ }
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ } else if (data->auth_id != req->sq->dhchap_step)
+ goto done_failure1;
+ /* Validate negotiation parameters */
+ status = nvmet_auth_negotiate(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ }
+ if (data->auth_id != req->sq->dhchap_step) {
+ pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->auth_id, req->sq->dhchap_step);
+ goto done_failure1;
+ }
+ if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
+ pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ le16_to_cpu(data->t_id),
+ req->sq->dhchap_tid);
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ goto done_kfree;
+ }
+
+ switch (data->auth_id) {
+ case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
+ status = nvmet_auth_reply(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+ req->sq->authenticated = true;
+ pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
+ status = nvmet_auth_failure2(req, d);
+ if (status) {
+ pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ default:
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ req->sq->authenticated = false;
+ goto done_kfree;
+ break;
+ }
+done_failure1:
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+
+done_kfree:
+ kfree(d);
+done:
+ pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status, req->sq->dhchap_step);
+ if (status)
+ pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ status, req->error_loc);
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+
+ mod_delayed_work(system_wq, &req->sq->auth_expired_work,
+ auth_expire_secs * HZ);
+ return;
+ }
+ /* Final states, clear up variables */
+ nvmet_auth_sq_free(req->sq);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+ nvmet_ctrl_fatal_error(ctrl);
+}
+
+static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+ int data_size = sizeof(*d) + hash_len;
+
+ if (ctrl->dh_tfm)
+ data_size += ctrl->dh_keysize;
+ if (al < data_size) {
+ pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
+ al, data_size);
+ return -EINVAL;
+ }
+ memset(data, 0, data_size);
+ req->sq->dhchap_s1 = nvme_auth_get_seqnum();
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hashid = ctrl->shash_id;
+ data->hl = hash_len;
+ data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
+ req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
+ if (!req->sq->dhchap_c1)
+ return -ENOMEM;
+ get_random_bytes(req->sq->dhchap_c1, data->hl);
+ memcpy(data->cval, req->sq->dhchap_c1, data->hl);
+ if (ctrl->dh_tfm) {
+ data->dhgid = ctrl->dh_gid;
+ data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
+ ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
+ ctrl->dh_keysize);
+ }
+ pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
+ __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
+ return ret;
+}
+
+static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_success1_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+
+ WARN_ON(al < sizeof(*data));
+ memset(data, 0, sizeof(*data));
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hl = hash_len;
+ if (req->sq->dhchap_c2) {
+ if (!ctrl->ctrl_key) {
+ pr_warn("ctrl %d qid %d no ctrl key\n",
+ ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ data->rvalid = 1;
+ pr_debug("ctrl %d qid %d response %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+ }
+ return 0;
+}
+
+static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ WARN_ON(al < sizeof(*data));
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = req->sq->dhchap_status;
+}
+
+void nvmet_execute_auth_receive(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ void *d;
+ u32 al;
+ u16 status = 0;
+
+ if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp1);
+ goto done;
+ }
+ al = le32_to_cpu(req->cmd->auth_receive.al);
+ if (!al) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, al);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, al)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
+ return;
+ }
+
+ d = kmalloc(al, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+ pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ switch (req->sq->dhchap_step) {
+ case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
+ if (nvmet_auth_challenge(req, d, al) < 0) {
+ pr_warn("ctrl %d qid %d: challenge error (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ status = NVME_SC_INTERNAL;
+ break;
+ }
+ if (status) {
+ req->sq->dhchap_status = status;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: challenge status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ status = 0;
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
+ status = nvmet_auth_success1(req, d, al);
+ if (status) {
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: success1 status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d failure1 (%x)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
+ break;
+ default:
+ pr_warn("ctrl %d qid %d unhandled step (%d)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ nvmet_auth_failure1(req, d, al);
+ status = 0;
+ break;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, d, al);
+ kfree(d);
+done:
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ nvmet_auth_sq_free(req->sq);
+ else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ nvmet_auth_sq_free(req->sq);
+ nvmet_ctrl_fatal_error(ctrl);
+ }
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 70fb587e9413..f91a56180d3d 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -82,7 +82,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -93,6 +93,37 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
case nvme_fabrics_type_property_get:
req->execute = nvmet_execute_prop_get;
break;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
+ default:
+ pr_debug("received unknown capsule type 0x%x\n",
+ cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return 0;
+}
+
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
default:
pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
@@ -173,6 +204,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
+ int ret;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
@@ -215,18 +247,32 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
uuid_copy(&ctrl->hostid, &d->hostid);
+ ret = nvmet_setup_auth(ctrl);
+ if (ret < 0) {
+ pr_err("Failed to setup authentication, error %d\n", ret);
+ nvmet_ctrl_put(ctrl);
+ if (ret == -EPERM)
+ status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
+ else
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
status = nvmet_install_queue(ctrl, req);
if (status) {
nvmet_ctrl_put(ctrl);
goto out;
}
- pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n",
+ pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
- ctrl->pi_support ? " T10-PI is enabled" : "");
+ ctrl->pi_support ? " T10-PI is enabled" : "",
+ nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (nvmet_has_auth(ctrl))
+ nvmet_init_auth(ctrl, req);
out:
kfree(d);
complete:
@@ -286,6 +332,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (nvmet_has_auth(ctrl))
+ nvmet_init_auth(ctrl, req);
out:
kfree(d);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 27a72504d31c..2dc1c1035626 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -246,7 +246,8 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
struct scatterlist *sg;
struct blk_plug plug;
sector_t sector;
- int op, i, rc;
+ blk_opf_t opf;
+ int i, rc;
struct sg_mapping_iter prot_miter;
unsigned int iter_flags;
unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
@@ -260,26 +261,26 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
if (req->cmd->rw.opcode == nvme_cmd_write) {
- op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+ opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- op |= REQ_FUA;
+ opf |= REQ_FUA;
iter_flags = SG_MITER_TO_SG;
} else {
- op = REQ_OP_READ;
+ opf = REQ_OP_READ;
iter_flags = SG_MITER_FROM_SG;
}
if (is_pci_p2pdma_page(sg_page(req->sg)))
- op |= REQ_NOMERGE;
+ opf |= REQ_NOMERGE;
sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
bio_init(bio, req->ns->bdev, req->inline_bvec,
- ARRAY_SIZE(req->inline_bvec), op);
+ ARRAY_SIZE(req->inline_bvec), opf);
} else {
- bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op,
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
GFP_KERNEL);
}
bio->bi_iter.bi_sector = sector;
@@ -306,7 +307,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
- op, GFP_KERNEL);
+ opf, GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
bio_chain(bio, prev);
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index f3d58abf11e0..64b47e2a4633 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -112,7 +112,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
iocb->ki_pos = pos;
iocb->ki_filp = req->ns->file;
- iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
+ iocb->ki_flags = ki_flags | iocb->ki_filp->f_iocb_flags;
return call_iter(iocb, &iter);
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..9750a7fca268 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -266,8 +266,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
}
@@ -283,7 +283,7 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
mutex_unlock(&nvme_loop_ctrl_mutex);
if (nctrl->tagset) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(&ctrl->tag_set);
}
kfree(ctrl->queues);
@@ -410,9 +410,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
out_cleanup_queue:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_sq:
@@ -424,9 +424,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ nvme_cancel_tagset(&ctrl->ctrl);
nvme_loop_destroy_io_queues(ctrl);
}
@@ -434,9 +432,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
}
@@ -554,7 +550,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
return 0;
out_cleanup_connect_q:
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues:
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 2b3e5719f24e..6ffeeb0a1c49 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -108,6 +108,19 @@ struct nvmet_sq {
u16 size;
u32 sqhd;
bool sqhd_disabled;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct delayed_work auth_expired_work;
+ bool authenticated;
+ u16 dhchap_tid;
+ u16 dhchap_status;
+ int dhchap_step;
+ u8 *dhchap_c1;
+ u8 *dhchap_c2;
+ u32 dhchap_s1;
+ u32 dhchap_s2;
+ u8 *dhchap_skey;
+ int dhchap_skey_len;
+#endif
struct completion free_done;
struct completion confirm_done;
};
@@ -209,6 +222,15 @@ struct nvmet_ctrl {
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
bool pi_support;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u8 shash_id;
+ struct crypto_kpp *dh_tfm;
+ u8 dh_gid;
+ u8 *dh_key;
+ size_t dh_keysize;
+#endif
};
struct nvmet_subsys {
@@ -271,6 +293,12 @@ static inline struct nvmet_subsys *namespaces_to_subsys(
struct nvmet_host {
struct config_group group;
+ u8 *dhchap_secret;
+ u8 *dhchap_ctrl_secret;
+ u8 dhchap_key_hash;
+ u8 dhchap_ctrl_key_hash;
+ u8 dhchap_hash_id;
+ u8 dhchap_dhgroup_id;
};
static inline struct nvmet_host *to_host(struct config_item *item)
@@ -420,7 +448,8 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
@@ -668,4 +697,48 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
bio_put(bio);
}
+#ifdef CONFIG_NVME_TARGET_AUTH
+void nvmet_execute_auth_send(struct nvmet_req *req);
+void nvmet_execute_auth_receive(struct nvmet_req *req);
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl);
+int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req);
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
+void nvmet_auth_sq_free(struct nvmet_sq *sq);
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
+bool nvmet_check_auth_status(struct nvmet_req *req);
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return ctrl->host_key != NULL;
+}
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+#else
+static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ return 0;
+}
+static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req) {};
+static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
+static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
+static inline bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ return true;
+}
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return false;
+}
+static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
+#endif
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 09fdcac87d17..4597bca43a6d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -415,7 +415,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp;
- if (!ib_uses_virt_dma(ndev->device))
+ if (ib_dma_pci_p2p_dma_supported(ndev->device))
r->req.p2p_client = &ndev->device->dev;
r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 0a9542599ad1..dc3b4dc8fe08 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1839,7 +1839,8 @@ static int __init nvmet_tcp_init(void)
{
int ret;
- nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nvmet_tcp_wq)
return -ENOMEM;
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 82b61acf7a72..c7ef69f29fe4 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -57,10 +57,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
* zones, reject the device. Otherwise, use report zones to detect if
* the device has conventional zones.
*/
- if (ns->bdev->bd_disk->queue->conv_zones_bitmap)
+ if (ns->bdev->bd_disk->conv_zones_bitmap)
return false;
- ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk),
+ ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
validate_conv_zones_cb, NULL);
if (ret < 0)
return false;
@@ -241,7 +241,7 @@ static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
{
unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
- return blkdev_nr_zones(req->ns->bdev->bd_disk) -
+ return bdev_nr_zones(req->ns->bdev) -
(sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
}
@@ -308,7 +308,7 @@ void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
queue_work(zbd_wq, &req->z.zmgmt_work);
}
-static inline enum req_opf zsa_req_op(u8 zsa)
+static inline enum req_op zsa_req_op(u8 zsa)
{
switch (zsa) {
case NVME_ZONE_OPEN:
@@ -386,7 +386,7 @@ static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
{
struct block_device *bdev = req->ns->bdev;
- unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk);
+ unsigned int nr_zones = bdev_nr_zones(bdev);
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = NULL;
sector_t sector = 0;
@@ -413,8 +413,8 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
ret = 0;
}
- while (sector < get_capacity(bdev->bd_disk)) {
- if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
+ while (sector < bdev_nr_sectors(bdev)) {
+ if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) {
bio = blk_next_bio(bio, bdev, 0,
zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
GFP_KERNEL);
@@ -422,7 +422,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
/* This may take a while, so be nice to others */
cond_resched();
}
- sector += blk_queue_zone_sectors(q);
+ sector += bdev_zone_sectors(bdev);
}
if (bio) {
@@ -465,7 +465,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
- enum req_opf op = zsa_req_op(req->cmd->zms.zsa);
+ enum req_op op = zsa_req_op(req->cmd->zms.zsa);
struct block_device *bdev = req->ns->bdev;
sector_t zone_sectors = bdev_zone_sectors(bdev);
u16 status = NVME_SC_SUCCESS;
@@ -525,7 +525,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
{
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
- const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+ const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0;
struct scatterlist *sg;
@@ -556,9 +556,9 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->z.inline_bio;
bio_init(bio, req->ns->bdev, req->inline_bvec,
- ARRAY_SIZE(req->inline_bvec), op);
+ ARRAY_SIZE(req->inline_bvec), opf);
} else {
- bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
+ bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
}
bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 967d0084800e..d72d879a6d34 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -107,6 +107,13 @@ config MTK_EFUSE
This driver can also be built as a module. If so, the module
will be called efuse-mtk.
+config MICROCHIP_OTPC
+ tristate "Microchip OTPC support"
+ depends on ARCH_AT91 || COMPILE_TEST
+ help
+ This driver enable the OTP controller available on Microchip SAMA7G5
+ SoCs. It controlls the access to the OTP memory connected to it.
+
config NVMEM_NINTENDO_OTP
tristate "Nintendo Wii and Wii U OTP Support"
depends on WII || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 00e136a0a123..c710b64f9fe4 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -67,3 +67,5 @@ obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
nvmem_sunplus_ocotp-y := sunplus-ocotp.o
obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
nvmem-apple-efuses-y := apple-efuses.o
+obj-$(CONFIG_MICROCHIP_OTPC) += nvmem-microchip-otpc.o
+nvmem-microchip-otpc-y := microchip-otpc.o
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index dfea96c52463..a128c7f5e351 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2016 Broadcom
#include <linux/acpi.h>
#include <linux/delay.h>
diff --git a/drivers/nvmem/microchip-otpc.c b/drivers/nvmem/microchip-otpc.c
new file mode 100644
index 000000000000..436e0dc4f337
--- /dev/null
+++ b/drivers/nvmem/microchip-otpc.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OTP Memory controller
+ *
+ * Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define MCHP_OTPC_CR (0x0)
+#define MCHP_OTPC_CR_READ BIT(6)
+#define MCHP_OTPC_MR (0x4)
+#define MCHP_OTPC_MR_ADDR GENMASK(31, 16)
+#define MCHP_OTPC_AR (0x8)
+#define MCHP_OTPC_SR (0xc)
+#define MCHP_OTPC_SR_READ BIT(6)
+#define MCHP_OTPC_HR (0x20)
+#define MCHP_OTPC_HR_SIZE GENMASK(15, 8)
+#define MCHP_OTPC_DR (0x24)
+
+#define MCHP_OTPC_NAME "mchp-otpc"
+#define MCHP_OTPC_SIZE (11 * 1024)
+
+/**
+ * struct mchp_otpc - OTPC private data structure
+ * @base: base address
+ * @dev: struct device pointer
+ * @packets: list of packets in OTP memory
+ * @npackets: number of packets in OTP memory
+ */
+struct mchp_otpc {
+ void __iomem *base;
+ struct device *dev;
+ struct list_head packets;
+ u32 npackets;
+};
+
+/**
+ * struct mchp_otpc_packet - OTPC packet data structure
+ * @list: list head
+ * @id: packet ID
+ * @offset: packet offset (in words) in OTP memory
+ */
+struct mchp_otpc_packet {
+ struct list_head list;
+ u32 id;
+ u32 offset;
+};
+
+static struct mchp_otpc_packet *mchp_otpc_id_to_packet(struct mchp_otpc *otpc,
+ u32 id)
+{
+ struct mchp_otpc_packet *packet;
+
+ if (id >= otpc->npackets)
+ return NULL;
+
+ list_for_each_entry(packet, &otpc->packets, list) {
+ if (packet->id == id)
+ return packet;
+ }
+
+ return NULL;
+}
+
+static int mchp_otpc_prepare_read(struct mchp_otpc *otpc,
+ unsigned int offset)
+{
+ u32 tmp;
+
+ /* Set address. */
+ tmp = readl_relaxed(otpc->base + MCHP_OTPC_MR);
+ tmp &= ~MCHP_OTPC_MR_ADDR;
+ tmp |= FIELD_PREP(MCHP_OTPC_MR_ADDR, offset);
+ writel_relaxed(tmp, otpc->base + MCHP_OTPC_MR);
+
+ /* Set read. */
+ tmp = readl_relaxed(otpc->base + MCHP_OTPC_CR);
+ tmp |= MCHP_OTPC_CR_READ;
+ writel_relaxed(tmp, otpc->base + MCHP_OTPC_CR);
+
+ /* Wait for packet to be transferred into temporary buffers. */
+ return read_poll_timeout(readl_relaxed, tmp, !(tmp & MCHP_OTPC_SR_READ),
+ 10000, 2000, false, otpc->base + MCHP_OTPC_SR);
+}
+
+/*
+ * OTPC memory is organized into packets. Each packets contains a header and
+ * a payload. Header is 4 bytes long and contains the size of the payload.
+ * Payload size varies. The memory footprint is something as follows:
+ *
+ * Memory offset Memory footprint Packet ID
+ * ------------- ---------------- ---------
+ *
+ * 0x0 +------------+ <-- packet 0
+ * | header 0 |
+ * 0x4 +------------+
+ * | payload 0 |
+ * . .
+ * . ... .
+ * . .
+ * offset1 +------------+ <-- packet 1
+ * | header 1 |
+ * offset1 + 0x4 +------------+
+ * | payload 1 |
+ * . .
+ * . ... .
+ * . .
+ * offset2 +------------+ <-- packet 2
+ * . .
+ * . ... .
+ * . .
+ * offsetN +------------+ <-- packet N
+ * | header N |
+ * offsetN + 0x4 +------------+
+ * | payload N |
+ * . .
+ * . ... .
+ * . .
+ * +------------+
+ *
+ * where offset1, offset2, offsetN depends on the size of payload 0, payload 1,
+ * payload N-1.
+ *
+ * The access to memory is done on a per packet basis: the control registers
+ * need to be updated with an offset address (within a packet range) and the
+ * data registers will be update by controller with information contained by
+ * that packet. E.g. if control registers are updated with any address within
+ * the range [offset1, offset2) the data registers are updated by controller
+ * with packet 1. Header data is accessible though MCHP_OTPC_HR register.
+ * Payload data is accessible though MCHP_OTPC_DR and MCHP_OTPC_AR registers.
+ * There is no direct mapping b/w the offset requested by software and the
+ * offset returned by hardware.
+ *
+ * For this, the read function will return the first requested bytes in the
+ * packet. The user will have to be aware of the memory footprint before doing
+ * the read request.
+ */
+static int mchp_otpc_read(void *priv, unsigned int off, void *val,
+ size_t bytes)
+{
+ struct mchp_otpc *otpc = priv;
+ struct mchp_otpc_packet *packet;
+ u32 *buf = val;
+ u32 offset;
+ size_t len = 0;
+ int ret, payload_size;
+
+ /*
+ * We reach this point with off being multiple of stride = 4 to
+ * be able to cross the subsystem. Inside the driver we use continuous
+ * unsigned integer numbers for packet id, thus devide off by 4
+ * before passing it to mchp_otpc_id_to_packet().
+ */
+ packet = mchp_otpc_id_to_packet(otpc, off / 4);
+ if (!packet)
+ return -EINVAL;
+ offset = packet->offset;
+
+ while (len < bytes) {
+ ret = mchp_otpc_prepare_read(otpc, offset);
+ if (ret)
+ return ret;
+
+ /* Read and save header content. */
+ *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_HR);
+ len += sizeof(*buf);
+ offset++;
+ if (len >= bytes)
+ break;
+
+ /* Read and save payload content. */
+ payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, *(buf - 1));
+ writel_relaxed(0UL, otpc->base + MCHP_OTPC_AR);
+ do {
+ *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_DR);
+ len += sizeof(*buf);
+ offset++;
+ payload_size--;
+ } while (payload_size >= 0 && len < bytes);
+ }
+
+ return 0;
+}
+
+static int mchp_otpc_init_packets_list(struct mchp_otpc *otpc, u32 *size)
+{
+ struct mchp_otpc_packet *packet;
+ u32 word, word_pos = 0, id = 0, npackets = 0, payload_size;
+ int ret;
+
+ INIT_LIST_HEAD(&otpc->packets);
+ *size = 0;
+
+ while (*size < MCHP_OTPC_SIZE) {
+ ret = mchp_otpc_prepare_read(otpc, word_pos);
+ if (ret)
+ return ret;
+
+ word = readl_relaxed(otpc->base + MCHP_OTPC_HR);
+ payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, word);
+ if (!payload_size)
+ break;
+
+ packet = devm_kzalloc(otpc->dev, sizeof(*packet), GFP_KERNEL);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->id = id++;
+ packet->offset = word_pos;
+ INIT_LIST_HEAD(&packet->list);
+ list_add_tail(&packet->list, &otpc->packets);
+
+ /* Count size by adding header and paload sizes. */
+ *size += 4 * (payload_size + 1);
+ /* Next word: this packet (header, payload) position + 1. */
+ word_pos += payload_size + 2;
+
+ npackets++;
+ }
+
+ otpc->npackets = npackets;
+
+ return 0;
+}
+
+static struct nvmem_config mchp_nvmem_config = {
+ .name = MCHP_OTPC_NAME,
+ .type = NVMEM_TYPE_OTP,
+ .read_only = true,
+ .word_size = 4,
+ .stride = 4,
+ .reg_read = mchp_otpc_read,
+};
+
+static int mchp_otpc_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ struct mchp_otpc *otpc;
+ u32 size;
+ int ret;
+
+ otpc = devm_kzalloc(&pdev->dev, sizeof(*otpc), GFP_KERNEL);
+ if (!otpc)
+ return -ENOMEM;
+
+ otpc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otpc->base))
+ return PTR_ERR(otpc->base);
+
+ otpc->dev = &pdev->dev;
+ ret = mchp_otpc_init_packets_list(otpc, &size);
+ if (ret)
+ return ret;
+
+ mchp_nvmem_config.dev = otpc->dev;
+ mchp_nvmem_config.size = size;
+ mchp_nvmem_config.priv = otpc;
+ nvmem = devm_nvmem_register(&pdev->dev, &mchp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id __maybe_unused mchp_otpc_ids[] = {
+ { .compatible = "microchip,sama7g5-otpc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mchp_otpc_ids);
+
+static struct platform_driver mchp_otpc_driver = {
+ .probe = mchp_otpc_probe,
+ .driver = {
+ .name = MCHP_OTPC_NAME,
+ .of_match_table = of_match_ptr(mchp_otpc_ids),
+ },
+};
+module_platform_driver(mchp_otpc_driver);
+
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea@microchip.com>");
+MODULE_DESCRIPTION("Microchip SAMA7G5 OTPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index e9a375dd84af..a08e0aedd21c 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -41,8 +41,7 @@ static int mtk_efuse_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 94f017d808c4..96f0a12e507c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -1045,26 +1045,29 @@ phys_addr_t __init of_dma_get_max_cpu_address(struct device_node *np)
*
* It returns true if "dma-coherent" property was found
* for this device in the DT, or if DMA is coherent by
- * default for OF devices on the current platform.
+ * default for OF devices on the current platform and no
+ * "dma-noncoherent" property was found for this device.
*/
bool of_dma_is_coherent(struct device_node *np)
{
struct device_node *node;
-
- if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
- return true;
+ bool is_coherent = IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT);
node = of_node_get(np);
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
- of_node_put(node);
- return true;
+ is_coherent = true;
+ break;
+ }
+ if (of_property_read_bool(node, "dma-noncoherent")) {
+ is_coherent = false;
+ break;
}
node = of_get_next_dma_parent(node);
}
of_node_put(node);
- return false;
+ return is_coherent;
}
EXPORT_SYMBOL_GPL(of_dma_is_coherent);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d4f98c8469ed..7fa960bd3df1 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1919,6 +1919,8 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
of_property_read_string(of_aliases, "stdout", &name);
if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
+ if (of_stdout)
+ of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT;
}
if (!of_aliases)
@@ -2077,7 +2079,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
*
* @cpu: cpu number(logical index) for which the last cache level is needed
*
- * Return: The the level at which the last cache is present. It is exactly
+ * Return: The level at which the last cache is present. It is exactly
* same as the total number of cache levels for the given logical cpu.
*/
int of_find_last_cache_level(unsigned int cpu)
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 874f031442dc..75b6cbffa755 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -81,8 +81,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
* restricted-dma-pool region is allowed.
*/
if (of_device_is_compatible(node, "restricted-dma-pool") &&
- of_device_is_available(node))
+ of_device_is_available(node)) {
+ of_node_put(node);
break;
+ }
+ of_node_put(node);
}
/*
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index a8f5b6532165..7bc92923104c 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -246,7 +246,7 @@ static int populate_node(const void *blob,
}
*pnp = np;
- return true;
+ return 0;
}
static void reverse_nodes(struct device_node *parent)
@@ -477,8 +477,8 @@ void *initial_boot_params __ro_after_init;
static u32 of_fdt_crc32;
-static int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
- phys_addr_t size, bool nomap)
+static int __init early_init_dt_reserve_memory(phys_addr_t base,
+ phys_addr_t size, bool nomap)
{
if (nomap) {
/*
@@ -525,15 +525,15 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
- early_init_dt_reserve_memory_arch(base, size, nomap) == 0) {
+ early_init_dt_reserve_memory(base, size, nomap) == 0) {
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
if (!nomap)
- kmemleak_alloc_phys(base, size, 0, 0);
+ kmemleak_alloc_phys(base, size, 0);
}
else
- pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
- uname, &base, (unsigned long)(size / SZ_1M));
+ pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
len -= t_len;
if (first) {
@@ -644,7 +644,7 @@ void __init early_init_fdt_scan_reserved_mem(void)
fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
if (!size)
break;
- early_init_dt_reserve_memory_arch(base, size, false);
+ memblock_reserve(base, size);
}
fdt_scan_reserved_mem();
@@ -661,9 +661,8 @@ void __init early_init_fdt_reserve_self(void)
return;
/* Reserve the dtb region */
- early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
- fdt_totalsize(initial_boot_params),
- false);
+ memblock_reserve(__pa(initial_boot_params),
+ fdt_totalsize(initial_boot_params));
}
/**
@@ -1025,6 +1024,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
int l;
const struct earlycon_id *match;
const void *fdt = initial_boot_params;
+ int ret;
offset = fdt_path_offset(fdt, "/chosen");
if (offset < 0)
@@ -1057,7 +1057,8 @@ int __init early_init_dt_scan_chosen_stdout(void)
if (fdt_node_check_compatible(fdt, offset, match->compatible))
continue;
- if (of_setup_earlycon(match, offset, options) == 0)
+ ret = of_setup_earlycon(match, offset, options);
+ if (!ret || ret == -EALREADY)
return 0;
}
return -ENODEV;
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index 8d374cc552be..e6c01db393f9 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -9,6 +9,7 @@
* Copyright (C) 2016 IBM Corporation
*/
+#include <linux/ima.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/memblock.h>
@@ -115,6 +116,7 @@ static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
return 0;
}
+#ifdef CONFIG_HAVE_IMA_KEXEC
/**
* ima_get_kexec_buffer - get IMA buffer from the previous kernel
* @addr: On successful return, set to point to the buffer contents.
@@ -122,16 +124,14 @@ static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
*
* Return: 0 on success, negative errno on error.
*/
-int ima_get_kexec_buffer(void **addr, size_t *size)
+int __init ima_get_kexec_buffer(void **addr, size_t *size)
{
int ret, len;
unsigned long tmp_addr;
+ unsigned long start_pfn, end_pfn;
size_t tmp_size;
const void *prop;
- if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC))
- return -ENOTSUPP;
-
prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len);
if (!prop)
return -ENOENT;
@@ -140,6 +140,22 @@ int ima_get_kexec_buffer(void **addr, size_t *size)
if (ret)
return ret;
+ /* Do some sanity on the returned size for the ima-kexec buffer */
+ if (!tmp_size)
+ return -ENOENT;
+
+ /*
+ * Calculate the PFNs for the buffer and ensure
+ * they are with in addressable memory.
+ */
+ start_pfn = PHYS_PFN(tmp_addr);
+ end_pfn = PHYS_PFN(tmp_addr + tmp_size - 1);
+ if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) {
+ pr_warn("IMA buffer at 0x%lx, size = 0x%zx beyond memory\n",
+ tmp_addr, tmp_size);
+ return -EINVAL;
+ }
+
*addr = __va(tmp_addr);
*size = tmp_size;
@@ -149,16 +165,13 @@ int ima_get_kexec_buffer(void **addr, size_t *size)
/**
* ima_free_kexec_buffer - free memory used by the IMA buffer
*/
-int ima_free_kexec_buffer(void)
+int __init ima_free_kexec_buffer(void)
{
int ret;
unsigned long addr;
size_t size;
struct property *prop;
- if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC))
- return -ENOTSUPP;
-
prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL);
if (!prop)
return -ENOENT;
@@ -173,6 +186,7 @@ int ima_free_kexec_buffer(void)
return memblock_phys_free(addr, size);
}
+#endif
/**
* remove_ima_buffer - remove the IMA buffer property and reservation from @fdt
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 75caa6f5d36f..65f3b02a0e4e 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -156,7 +156,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
}
if (base == 0) {
- pr_info("failed to allocate memory for node '%s'\n", uname);
+ pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
+ uname, (unsigned long)(size / SZ_1M));
return -ENOMEM;
}
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 4044ddcb02c6..bd8ff4df723d 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -903,12 +903,6 @@ static int of_overlay_apply(struct overlay_changeset *ovcs)
{
int ret = 0, ret_revert, ret_tmp;
- if (devicetree_corrupt()) {
- pr_err("devicetree state suspect, refuse to apply overlay\n");
- ret = -EBUSY;
- goto out;
- }
-
ret = of_resolve_phandles(ovcs->overlay_root);
if (ret)
goto out;
@@ -983,6 +977,11 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
*ret_ovcs_id = 0;
+ if (devicetree_corrupt()) {
+ pr_err("devicetree state suspect, refuse to apply overlay\n");
+ return -EBUSY;
+ }
+
if (overlay_fdt_size < sizeof(struct fdt_header) ||
fdt_check_header(overlay_fdt)) {
pr_err("Invalid overlay_fdt header\n");
@@ -1044,20 +1043,15 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
* goto err_free_ovcs. Instead, the caller of of_overlay_fdt_apply()
* can call of_overlay_remove();
*/
-
- mutex_unlock(&of_mutex);
- of_overlay_mutex_unlock();
-
*ret_ovcs_id = ovcs->id;
-
- return ret;
+ goto out_unlock;
err_free_ovcs:
free_overlay_changeset(ovcs);
+out_unlock:
mutex_unlock(&of_mutex);
of_overlay_mutex_unlock();
-
return ret;
}
EXPORT_SYMBOL_GPL(of_overlay_fdt_apply);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 7f6bba18c515..eafa8ffefbd0 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1602,7 +1602,7 @@ static int unittest_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, devptr);
- devptr->chip.of_node = pdev->dev.of_node;
+ devptr->chip.fwnode = dev_fwnode(&pdev->dev);
devptr->chip.label = "of-unittest-gpio";
devptr->chip.base = -1; /* dynamic allocation */
devptr->chip.ngpio = 5;
@@ -1611,7 +1611,7 @@ static int unittest_gpio_probe(struct platform_device *pdev)
ret = gpiochip_add_data(&devptr->chip, NULL);
unittest(!ret,
- "gpiochip_add_data() for node @%pOF failed, ret = %d\n", devptr->chip.of_node, ret);
+ "gpiochip_add_data() for node @%pfw failed, ret = %d\n", devptr->chip.fwnode, ret);
if (!ret)
unittest_gpio_probe_pass_count++;
@@ -1620,20 +1620,19 @@ static int unittest_gpio_probe(struct platform_device *pdev)
static int unittest_gpio_remove(struct platform_device *pdev)
{
- struct unittest_gpio_dev *gdev = platform_get_drvdata(pdev);
+ struct unittest_gpio_dev *devptr = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
- dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
+ dev_dbg(dev, "%s for node @%pfw\n", __func__, devptr->chip.fwnode);
- if (!gdev)
+ if (!devptr)
return -EINVAL;
- if (gdev->chip.base != -1)
- gpiochip_remove(&gdev->chip);
+ if (devptr->chip.base != -1)
+ gpiochip_remove(&devptr->chip);
platform_set_drvdata(pdev, NULL);
- kfree(gdev);
+ kfree(devptr);
return 0;
}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 84063eaebb91..77d1ba3a4154 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -13,11 +13,12 @@
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/slab.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
#include "opp.h"
@@ -36,6 +37,9 @@ DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
static bool opp_tables_busy;
+/* OPP ID allocator */
+static DEFINE_XARRAY_ALLOC1(opp_configs);
+
static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
{
struct opp_device *opp_dev;
@@ -93,6 +97,18 @@ struct opp_table *_find_opp_table(struct device *dev)
return opp_table;
}
+/*
+ * Returns true if multiple clocks aren't there, else returns false with WARN.
+ *
+ * We don't force clk_count == 1 here as there are users who don't have a clock
+ * representation in the OPP table and manage the clock configuration themselves
+ * in an platform specific way.
+ */
+static bool assert_single_clk(struct opp_table *opp_table)
+{
+ return !WARN_ON(opp_table->clk_count > 1);
+}
+
/**
* dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
@@ -114,6 +130,31 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
/**
+ * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp
+ * @opp: opp for which voltage has to be returned for
+ * @supplies: Placeholder for copying the supply information.
+ *
+ * Return: negative error number on failure, 0 otherwise on success after
+ * setting @supplies.
+ *
+ * This can be used for devices with any number of power supplies. The caller
+ * must ensure the @supplies array must contain space for each regulator.
+ */
+int dev_pm_opp_get_supplies(struct dev_pm_opp *opp,
+ struct dev_pm_opp_supply *supplies)
+{
+ if (IS_ERR_OR_NULL(opp) || !supplies) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(supplies, opp->supplies,
+ sizeof(*supplies) * opp->opp_table->regulator_count);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies);
+
+/**
* dev_pm_opp_get_power() - Gets the power corresponding to an opp
* @opp: opp for which power has to be returned for
*
@@ -152,7 +193,10 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return 0;
}
- return opp->rate;
+ if (!assert_single_clk(opp->opp_table))
+ return 0;
+
+ return opp->rates[0];
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
@@ -398,6 +442,154 @@ int dev_pm_opp_get_opp_count(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
+/* Helpers to read keys */
+static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
+{
+ return opp->rates[0];
+}
+
+static unsigned long _read_level(struct dev_pm_opp *opp, int index)
+{
+ return opp->level;
+}
+
+static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
+{
+ return opp->bandwidth[index].peak;
+}
+
+/* Generic comparison helpers */
+static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key == key) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
+static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key >= key) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
+static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key > key)
+ return true;
+
+ *opp = temp_opp;
+ return false;
+}
+
+/* Generic key finding helpers */
+static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ /* Assert that the requirement is met */
+ if (assert && !assert(opp_table))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available == available) {
+ if (compare(&opp, temp_opp, read(temp_opp, index), *key))
+ break;
+ }
+ }
+
+ /* Increment the reference count of OPP */
+ if (!IS_ERR(opp)) {
+ *key = read(opp, index);
+ dev_pm_opp_get(opp);
+ }
+
+ mutex_unlock(&opp_table->lock);
+
+ return opp;
+}
+
+static struct dev_pm_opp *
+_find_key(struct device *dev, unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp;
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
+ PTR_ERR(opp_table));
+ return ERR_CAST(opp_table);
+ }
+
+ opp = _opp_table_find_key(opp_table, key, index, available, read,
+ compare, assert);
+
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+
+static struct dev_pm_opp *_find_key_exact(struct device *dev,
+ unsigned long key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ /*
+ * The value of key will be updated here, but will be ignored as the
+ * caller doesn't need it.
+ */
+ return _find_key(dev, &key, index, available, read, _compare_exact,
+ assert);
+}
+
+static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _opp_table_find_key(opp_table, key, index, available, read,
+ _compare_ceil, assert);
+}
+
+static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
+ int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _find_key(dev, key, index, available, read, _compare_ceil,
+ assert);
+}
+
+static struct dev_pm_opp *_find_key_floor(struct device *dev,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _find_key(dev, key, index, available, read, _compare_floor,
+ assert);
+}
+
/**
* dev_pm_opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
@@ -422,61 +614,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* use.
*/
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
- unsigned long freq,
- bool available)
+ unsigned long freq, bool available)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available == available &&
- temp_opp->rate == freq) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_exact(dev, freq, 0, available, _read_freq,
+ assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->rate >= *freq) {
- opp = temp_opp;
- *freq = opp->rate;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
-
- return opp;
+ return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq,
+ assert_single_clk);
}
/**
@@ -500,23 +649,7 @@ static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *opp;
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- opp = _find_freq_ceil(opp_table, freq);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
@@ -541,98 +674,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available) {
- /* go to the next node, before choosing prev */
- if (temp_opp->rate > *freq)
- break;
- else
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- if (!IS_ERR(opp))
- *freq = opp->rate;
-
- return opp;
+ return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
- * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
- * target voltage.
- * @dev: Device for which we do this operation.
- * @u_volt: Target voltage.
- *
- * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
- *
- * Return: matching *opp, else returns ERR_PTR in case of error which should be
- * handled using IS_ERR.
- *
- * Error return values can be:
- * EINVAL: bad parameters
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !u_volt) {
- dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
- u_volt);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available) {
- if (temp_opp->supplies[0].u_volt > u_volt)
- break;
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
-
-/**
* dev_pm_opp_find_level_exact() - search for an exact level
* @dev: device for which we do this operation
* @level: level to search for
@@ -650,33 +696,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->level == level) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_exact(dev, level, 0, true, _read_level, NULL);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
@@ -698,33 +718,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
unsigned int *level)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->level >= *level) {
- opp = temp_opp;
- *level = opp->level;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
+ unsigned long temp = *level;
+ struct dev_pm_opp *opp;
+ opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
+ *level = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
@@ -732,7 +730,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
/**
* dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
* @dev: device for which we do this operation
- * @freq: start bandwidth
+ * @bw: start bandwidth
* @index: which bandwidth to compare, in case of OPPs with several values
*
* Search for the matching floor *available* OPP from a starting bandwidth
@@ -748,42 +746,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
* The callers are required to call dev_pm_opp_put() for the returned OPP after
* use.
*/
-struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
- unsigned int *bw, int index)
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
+ int index)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !bw) {
- dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- if (index >= opp_table->path_count)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->bandwidth) {
- if (temp_opp->bandwidth[index].peak >= *bw) {
- opp = temp_opp;
- *bw = opp->bandwidth[index].peak;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
+ unsigned long temp = *bw;
+ struct dev_pm_opp *opp;
+ opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
+ *bw = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
@@ -791,7 +761,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
/**
* dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
* @dev: device for which we do this operation
- * @freq: start bandwidth
+ * @bw: start bandwidth
* @index: which bandwidth to compare, in case of OPPs with several values
*
* Search for the matching floor *available* OPP from a starting bandwidth
@@ -810,41 +780,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned int *bw, int index)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !bw) {
- dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- if (index >= opp_table->path_count)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->bandwidth) {
- /* go to the next node, before choosing prev */
- if (temp_opp->bandwidth[index].peak > *bw)
- break;
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- if (!IS_ERR(opp))
- *bw = opp->bandwidth[index].peak;
+ unsigned long temp = *bw;
+ struct dev_pm_opp *opp;
+ opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
+ *bw = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
@@ -874,80 +814,97 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return ret;
}
-static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
- unsigned long freq)
+static int
+_opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data, bool scaling_down)
{
+ unsigned long *target = data;
+ unsigned long freq;
int ret;
- /* We may reach here for devices which don't change frequency */
- if (IS_ERR(clk))
- return 0;
+ /* One of target and opp must be available */
+ if (target) {
+ freq = *target;
+ } else if (opp) {
+ freq = opp->rates[0];
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
- ret = clk_set_rate(clk, freq);
+ ret = clk_set_rate(opp_table->clk, freq);
if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
ret);
+ } else {
+ opp_table->rate_clk_single = freq;
}
return ret;
}
-static int _generic_set_opp_regulator(struct opp_table *opp_table,
- struct device *dev,
- struct dev_pm_opp *opp,
- unsigned long freq,
- int scaling_down)
+/*
+ * Simple implementation for configuring multiple clocks. Configure clocks in
+ * the order in which they are present in the array while scaling up.
+ */
+int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
{
- struct regulator *reg = opp_table->regulators[0];
- struct dev_pm_opp *old_opp = opp_table->current_opp;
+ int ret, i;
+
+ if (scaling_down) {
+ for (i = opp_table->clk_count - 1; i >= 0; i--) {
+ ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ return ret;
+ }
+ }
+ } else {
+ for (i = 0; i < opp_table->clk_count; i++) {
+ ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
+
+static int _opp_config_regulator_single(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count)
+{
+ struct regulator *reg = regulators[0];
int ret;
/* This function only supports single regulator per device */
- if (WARN_ON(opp_table->regulator_count > 1)) {
+ if (WARN_ON(count > 1)) {
dev_err(dev, "multiple regulators are not supported\n");
return -EINVAL;
}
- /* Scaling up? Scale voltage before frequency */
- if (!scaling_down) {
- ret = _set_opp_voltage(dev, reg, opp->supplies);
- if (ret)
- goto restore_voltage;
- }
-
- /* Change frequency */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ ret = _set_opp_voltage(dev, reg, new_opp->supplies);
if (ret)
- goto restore_voltage;
-
- /* Scaling down? Scale voltage after frequency */
- if (scaling_down) {
- ret = _set_opp_voltage(dev, reg, opp->supplies);
- if (ret)
- goto restore_freq;
- }
+ return ret;
/*
* Enable the regulator after setting its voltages, otherwise it breaks
* some boot-enabled regulators.
*/
- if (unlikely(!opp_table->enabled)) {
+ if (unlikely(!new_opp->opp_table->enabled)) {
ret = regulator_enable(reg);
if (ret < 0)
dev_warn(dev, "Failed to enable regulator: %d", ret);
}
return 0;
-
-restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_opp->rate);
-restore_voltage:
- /* This shouldn't harm even if the voltages weren't updated earlier */
- _set_opp_voltage(dev, reg, old_opp->supplies);
-
- return ret;
}
static int _set_opp_bw(const struct opp_table *opp_table,
@@ -978,36 +935,6 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
}
-static int _set_opp_custom(const struct opp_table *opp_table,
- struct device *dev, struct dev_pm_opp *opp,
- unsigned long freq)
-{
- struct dev_pm_set_opp_data *data = opp_table->set_opp_data;
- struct dev_pm_opp *old_opp = opp_table->current_opp;
- int size;
-
- /*
- * We support this only if dev_pm_opp_set_regulators() was called
- * earlier.
- */
- if (opp_table->sod_supplies) {
- size = sizeof(*old_opp->supplies) * opp_table->regulator_count;
- memcpy(data->old_opp.supplies, old_opp->supplies, size);
- memcpy(data->new_opp.supplies, opp->supplies, size);
- data->regulator_count = opp_table->regulator_count;
- } else {
- data->regulator_count = 0;
- }
-
- data->regulators = opp_table->regulators;
- data->clk = opp_table->clk;
- data->dev = dev;
- data->old_opp.rate = old_opp->rate;
- data->new_opp.rate = freq;
-
- return opp_table->set_opp(data);
-}
-
static int _set_required_opp(struct device *dev, struct device *pd_dev,
struct dev_pm_opp *opp, int i)
{
@@ -1019,7 +946,7 @@ static int _set_required_opp(struct device *dev, struct device *pd_dev,
ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
if (ret) {
- dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
+ dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
dev_name(pd_dev), pstate, ret);
}
@@ -1138,7 +1065,7 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
}
static int _set_opp(struct device *dev, struct opp_table *opp_table,
- struct dev_pm_opp *opp, unsigned long freq)
+ struct dev_pm_opp *opp, void *clk_data, bool forced)
{
struct dev_pm_opp *old_opp;
int scaling_down, ret;
@@ -1153,18 +1080,17 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
old_opp = opp_table->current_opp;
/* Return early if nothing to do */
- if (old_opp == opp && opp_table->current_rate == freq &&
- opp_table->enabled) {
+ if (!forced && old_opp == opp && opp_table->enabled) {
dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
return 0;
}
dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
- __func__, opp_table->current_rate, freq, old_opp->level,
+ __func__, old_opp->rates[0], opp->rates[0], old_opp->level,
opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
opp->bandwidth ? opp->bandwidth[0].peak : 0);
- scaling_down = _opp_compare_key(old_opp, opp);
+ scaling_down = _opp_compare_key(opp_table, old_opp, opp);
if (scaling_down == -1)
scaling_down = 0;
@@ -1181,23 +1107,38 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
dev_err(dev, "Failed to set bw: %d\n", ret);
return ret;
}
- }
- if (opp_table->set_opp) {
- ret = _set_opp_custom(opp_table, dev, opp, freq);
- } else if (opp_table->regulators) {
- ret = _generic_set_opp_regulator(opp_table, dev, opp, freq,
- scaling_down);
- } else {
- /* Only frequency scaling */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ if (opp_table->config_regulators) {
+ ret = opp_table->config_regulators(dev, old_opp, opp,
+ opp_table->regulators,
+ opp_table->regulator_count);
+ if (ret) {
+ dev_err(dev, "Failed to set regulator voltages: %d\n",
+ ret);
+ return ret;
+ }
+ }
}
- if (ret)
- return ret;
+ if (opp_table->config_clks) {
+ ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down);
+ if (ret)
+ return ret;
+ }
/* Scaling down? Configure required OPPs after frequency */
if (scaling_down) {
+ if (opp_table->config_regulators) {
+ ret = opp_table->config_regulators(dev, old_opp, opp,
+ opp_table->regulators,
+ opp_table->regulator_count);
+ if (ret) {
+ dev_err(dev, "Failed to set regulator voltages: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
ret = _set_opp_bw(opp_table, opp, dev);
if (ret) {
dev_err(dev, "Failed to set bw: %d\n", ret);
@@ -1217,7 +1158,6 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
/* Make sure current_opp doesn't get freed */
dev_pm_opp_get(opp);
opp_table->current_opp = opp;
- opp_table->current_rate = freq;
return ret;
}
@@ -1238,6 +1178,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
struct opp_table *opp_table;
unsigned long freq = 0, temp_freq;
struct dev_pm_opp *opp = NULL;
+ bool forced = false;
int ret;
opp_table = _find_opp_table(dev);
@@ -1255,7 +1196,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
* equivalent to a clk_set_rate()
*/
if (!_get_opp_count(opp_table)) {
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq);
+ ret = opp_table->config_clks(dev, opp_table, NULL,
+ &target_freq, false);
goto put_opp_table;
}
@@ -1276,12 +1218,22 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
__func__, freq, ret);
goto put_opp_table;
}
+
+ /*
+ * An OPP entry specifies the highest frequency at which other
+ * properties of the OPP entry apply. Even if the new OPP is
+ * same as the old one, we may still reach here for a different
+ * value of the frequency. In such a case, do not abort but
+ * configure the hardware to the desired frequency forcefully.
+ */
+ forced = opp_table->rate_clk_single != target_freq;
}
- ret = _set_opp(dev, opp_table, opp, freq);
+ ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
if (target_freq)
dev_pm_opp_put(opp);
+
put_opp_table:
dev_pm_opp_put_opp_table(opp_table);
return ret;
@@ -1309,7 +1261,7 @@ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
return PTR_ERR(opp_table);
}
- ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0);
+ ret = _set_opp(dev, opp_table, opp, NULL, false);
dev_pm_opp_put_opp_table(opp_table);
return ret;
@@ -1366,6 +1318,8 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
INIT_LIST_HEAD(&opp_table->dev_list);
INIT_LIST_HEAD(&opp_table->lazy);
+ opp_table->clk = ERR_PTR(-ENODEV);
+
/* Mark regulator count uninitialized */
opp_table->regulator_count = -1;
@@ -1412,20 +1366,38 @@ static struct opp_table *_update_opp_table_clk(struct device *dev,
int ret;
/*
- * Return early if we don't need to get clk or we have already tried it
+ * Return early if we don't need to get clk or we have already done it
* earlier.
*/
- if (!getclk || IS_ERR(opp_table) || opp_table->clk)
+ if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
+ opp_table->clks)
return opp_table;
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
ret = PTR_ERR_OR_ZERO(opp_table->clk);
- if (!ret)
+ if (!ret) {
+ opp_table->config_clks = _opp_config_clk_single;
+ opp_table->clk_count = 1;
return opp_table;
+ }
if (ret == -ENOENT) {
+ /*
+ * There are few platforms which don't want the OPP core to
+ * manage device's clock settings. In such cases neither the
+ * platform provides the clks explicitly to us, nor the DT
+ * contains a valid clk entry. The OPP nodes in DT may still
+ * contain "opp-hz" property though, which we need to parse and
+ * allow the platform to find an OPP based on freq later on.
+ *
+ * This is a simple solution to take care of such corner cases,
+ * i.e. make the clk_count 1, which lets us allocate space for
+ * frequency in opp->rates and also parse the entries in DT.
+ */
+ opp_table->clk_count = 1;
+
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
return opp_table;
}
@@ -1528,7 +1500,7 @@ static void _opp_table_kref_release(struct kref *kref)
_of_clear_opp_table(opp_table);
- /* Release clk */
+ /* Release automatically acquired single clk */
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
@@ -1581,7 +1553,7 @@ static void _opp_kref_release(struct kref *kref)
* frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
- _of_opp_free_required_opps(opp_table, opp);
+ _of_clear_opp(opp_table, opp);
opp_debug_remove_one(opp);
kfree(opp);
}
@@ -1613,10 +1585,13 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
if (IS_ERR(opp_table))
return;
+ if (!assert_single_clk(opp_table))
+ goto put_table;
+
mutex_lock(&opp_table->lock);
list_for_each_entry(iter, &opp_table->opp_list, node) {
- if (iter->rate == freq) {
+ if (iter->rates[0] == freq) {
opp = iter;
break;
}
@@ -1634,6 +1609,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
__func__, freq);
}
+put_table:
/* Drop the reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
}
@@ -1720,26 +1696,31 @@ void dev_pm_opp_remove_all_dynamic(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
-struct dev_pm_opp *_opp_allocate(struct opp_table *table)
+struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
- int supply_count, supply_size, icc_size;
+ int supply_count, supply_size, icc_size, clk_size;
/* Allocate space for at least one supply */
- supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
+ supply_count = opp_table->regulator_count > 0 ?
+ opp_table->regulator_count : 1;
supply_size = sizeof(*opp->supplies) * supply_count;
- icc_size = sizeof(*opp->bandwidth) * table->path_count;
+ clk_size = sizeof(*opp->rates) * opp_table->clk_count;
+ icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
/* allocate new OPP node and supplies structures */
- opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
-
+ opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL);
if (!opp)
return NULL;
- /* Put the supplies at the end of the OPP structure as an empty array */
+ /* Put the supplies, bw and clock at the end of the OPP structure */
opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+
+ opp->rates = (unsigned long *)(opp->supplies + supply_count);
+
if (icc_size)
- opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
+ opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count);
+
INIT_LIST_HEAD(&opp->node);
return opp;
@@ -1770,15 +1751,57 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
-int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+static int _opp_compare_rate(struct opp_table *opp_table,
+ struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+{
+ int i;
+
+ for (i = 0; i < opp_table->clk_count; i++) {
+ if (opp1->rates[i] != opp2->rates[i])
+ return opp1->rates[i] < opp2->rates[i] ? -1 : 1;
+ }
+
+ /* Same rates for both OPPs */
+ return 0;
+}
+
+static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1,
+ struct dev_pm_opp *opp2)
{
- if (opp1->rate != opp2->rate)
- return opp1->rate < opp2->rate ? -1 : 1;
- if (opp1->bandwidth && opp2->bandwidth &&
- opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
- return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
+ int i;
+
+ for (i = 0; i < opp_table->path_count; i++) {
+ if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak)
+ return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1;
+ }
+
+ /* Same bw for both OPPs */
+ return 0;
+}
+
+/*
+ * Returns
+ * 0: opp1 == opp2
+ * 1: opp1 > opp2
+ * -1: opp1 < opp2
+ */
+int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1,
+ struct dev_pm_opp *opp2)
+{
+ int ret;
+
+ ret = _opp_compare_rate(opp_table, opp1, opp2);
+ if (ret)
+ return ret;
+
+ ret = _opp_compare_bw(opp_table, opp1, opp2);
+ if (ret)
+ return ret;
+
if (opp1->level != opp2->level)
return opp1->level < opp2->level ? -1 : 1;
+
+ /* Duplicate OPPs */
return 0;
}
@@ -1798,7 +1821,7 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
* loop.
*/
list_for_each_entry(opp, &opp_table->opp_list, node) {
- opp_cmp = _opp_compare_key(new_opp, opp);
+ opp_cmp = _opp_compare_key(opp_table, new_opp, opp);
if (opp_cmp > 0) {
*head = &opp->node;
continue;
@@ -1809,8 +1832,8 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
/* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
- __func__, opp->rate, opp->supplies[0].u_volt,
- opp->available, new_opp->rate,
+ __func__, opp->rates[0], opp->supplies[0].u_volt,
+ opp->available, new_opp->rates[0],
new_opp->supplies[0].u_volt, new_opp->available);
/* Should we compare voltages for all regulators here ? */
@@ -1831,7 +1854,7 @@ void _required_opps_available(struct dev_pm_opp *opp, int count)
opp->available = false;
pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
- __func__, opp->required_opps[i]->np, opp->rate);
+ __func__, opp->required_opps[i]->np, opp->rates[0]);
return;
}
}
@@ -1847,7 +1870,7 @@ void _required_opps_available(struct dev_pm_opp *opp, int count)
* should be considered an error by the callers of _opp_add().
*/
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct opp_table *opp_table, bool rate_not_available)
+ struct opp_table *opp_table)
{
struct list_head *head;
int ret;
@@ -1872,7 +1895,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
if (!_opp_supported_by_regulators(new_opp, opp_table)) {
new_opp->available = false;
dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
- __func__, new_opp->rate);
+ __func__, new_opp->rates[0]);
}
/* required-opps not fully initialized yet */
@@ -1913,12 +1936,15 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
unsigned long tol;
int ret;
+ if (!assert_single_clk(opp_table))
+ return -EINVAL;
+
new_opp = _opp_allocate(opp_table);
if (!new_opp)
return -ENOMEM;
/* populate the opp table */
- new_opp->rate = freq;
+ new_opp->rates[0] = freq;
tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->supplies[0].u_volt = u_volt;
new_opp->supplies[0].u_volt_min = u_volt - tol;
@@ -1926,7 +1952,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
new_opp->available = true;
new_opp->dynamic = dynamic;
- ret = _opp_add(dev, new_opp, opp_table, false);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@@ -1948,7 +1974,7 @@ free_opp:
}
/**
- * dev_pm_opp_set_supported_hw() - Set supported platforms
+ * _opp_set_supported_hw() - Set supported platforms
* @dev: Device for which supported-hw has to be set.
* @versions: Array of hierarchy of versions to match.
* @count: Number of elements in the array.
@@ -1958,87 +1984,42 @@ free_opp:
* OPPs, which are available for those versions, based on its 'opp-supported-hw'
* property.
*/
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions, unsigned int count)
+static int _opp_set_supported_hw(struct opp_table *opp_table,
+ const u32 *versions, unsigned int count)
{
- struct opp_table *opp_table;
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
/* Another CPU that shares the OPP table has set the property ? */
if (opp_table->supported_hw)
- return opp_table;
+ return 0;
opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
GFP_KERNEL);
- if (!opp_table->supported_hw) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-ENOMEM);
- }
+ if (!opp_table->supported_hw)
+ return -ENOMEM;
opp_table->supported_hw_count = count;
- return opp_table;
+ return 0;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
/**
- * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
- * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
+ * _opp_put_supported_hw() - Releases resources blocked for supported hw
+ * @opp_table: OPP table returned by _opp_set_supported_hw().
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
+ * _opp_set_supported_hw(). Until this is called, the opp_table structure
* will not be freed.
*/
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
-
- kfree(opp_table->supported_hw);
- opp_table->supported_hw = NULL;
- opp_table->supported_hw_count = 0;
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
-
-static void devm_pm_opp_supported_hw_release(void *data)
+static void _opp_put_supported_hw(struct opp_table *opp_table)
{
- dev_pm_opp_put_supported_hw(data);
-}
-
-/**
- * devm_pm_opp_set_supported_hw() - Set supported platforms
- * @dev: Device for which supported-hw has to be set.
- * @versions: Array of hierarchy of versions to match.
- * @count: Number of elements in the array.
- *
- * This is a resource-managed variant of dev_pm_opp_set_supported_hw().
- *
- * Return: 0 on success and errorno otherwise.
- */
-int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
- unsigned int count)
-{
- struct opp_table *opp_table;
-
- opp_table = dev_pm_opp_set_supported_hw(dev, versions, count);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- return devm_add_action_or_reset(dev, devm_pm_opp_supported_hw_release,
- opp_table);
+ if (opp_table->supported_hw) {
+ kfree(opp_table->supported_hw);
+ opp_table->supported_hw = NULL;
+ opp_table->supported_hw_count = 0;
+ }
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_supported_hw);
/**
- * dev_pm_opp_set_prop_name() - Set prop-extn name
+ * _opp_set_prop_name() - Set prop-extn name
* @dev: Device for which the prop-name has to be set.
* @name: name to postfix to properties.
*
@@ -2047,53 +2028,36 @@ EXPORT_SYMBOL_GPL(devm_pm_opp_set_supported_hw);
* which the extension will apply are opp-microvolt and opp-microamp. OPP core
* should postfix the property name with -<name> while looking for them.
*/
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+static int _opp_set_prop_name(struct opp_table *opp_table, const char *name)
{
- struct opp_table *opp_table;
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
/* Another CPU that shares the OPP table has set the property ? */
- if (opp_table->prop_name)
- return opp_table;
-
- opp_table->prop_name = kstrdup(name, GFP_KERNEL);
if (!opp_table->prop_name) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-ENOMEM);
+ opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+ if (!opp_table->prop_name)
+ return -ENOMEM;
}
- return opp_table;
+ return 0;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
/**
- * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
- * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
+ * _opp_put_prop_name() - Releases resources blocked for prop-name
+ * @opp_table: OPP table returned by _opp_set_prop_name().
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
+ * _opp_set_prop_name(). Until this is called, the opp_table structure
* will not be freed.
*/
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
+static void _opp_put_prop_name(struct opp_table *opp_table)
{
- if (unlikely(!opp_table))
- return;
-
- kfree(opp_table->prop_name);
- opp_table->prop_name = NULL;
-
- dev_pm_opp_put_opp_table(opp_table);
+ if (opp_table->prop_name) {
+ kfree(opp_table->prop_name);
+ opp_table->prop_name = NULL;
+ }
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
/**
- * dev_pm_opp_set_regulators() - Set regulator names for the device
+ * _opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
* @names: Array of pointers to the names of the regulator.
* @count: Number of regulators.
@@ -2104,36 +2068,29 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
+static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev,
+ const char * const names[])
{
- struct dev_pm_opp_supply *supplies;
- struct opp_table *opp_table;
+ const char * const *temp = names;
struct regulator *reg;
- int ret, i;
+ int count = 0, ret, i;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
+ /* Count number of regulators */
+ while (*temp++)
+ count++;
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
+ if (!count)
+ return -EINVAL;
/* Another CPU that shares the OPP table has set the regulators ? */
if (opp_table->regulators)
- return opp_table;
+ return 0;
opp_table->regulators = kmalloc_array(count,
sizeof(*opp_table->regulators),
GFP_KERNEL);
- if (!opp_table->regulators) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!opp_table->regulators)
+ return -ENOMEM;
for (i = 0; i < count; i++) {
reg = regulator_get_optional(dev, names[i]);
@@ -2149,21 +2106,11 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
opp_table->regulator_count = count;
- supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL);
- if (!supplies) {
- ret = -ENOMEM;
- goto free_regulators;
- }
-
- mutex_lock(&opp_table->lock);
- opp_table->sod_supplies = supplies;
- if (opp_table->set_opp_data) {
- opp_table->set_opp_data->old_opp.supplies = supplies;
- opp_table->set_opp_data->new_opp.supplies = supplies + count;
- }
- mutex_unlock(&opp_table->lock);
+ /* Set generic config_regulators() for single regulators here */
+ if (count == 1)
+ opp_table->config_regulators = _opp_config_regulator_single;
- return opp_table;
+ return 0;
free_regulators:
while (i != 0)
@@ -2172,26 +2119,20 @@ free_regulators:
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = -1;
-err:
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(ret);
+ return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
/**
- * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
- * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
+ * _opp_put_regulators() - Releases resources blocked for regulator
+ * @opp_table: OPP table returned from _opp_set_regulators().
*/
-void dev_pm_opp_put_regulators(struct opp_table *opp_table)
+static void _opp_put_regulators(struct opp_table *opp_table)
{
int i;
- if (unlikely(!opp_table))
- return;
-
if (!opp_table->regulators)
- goto put_opp_table;
+ return;
if (opp_table->enabled) {
for (i = opp_table->regulator_count - 1; i >= 0; i--)
@@ -2201,252 +2142,158 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- mutex_lock(&opp_table->lock);
- if (opp_table->set_opp_data) {
- opp_table->set_opp_data->old_opp.supplies = NULL;
- opp_table->set_opp_data->new_opp.supplies = NULL;
- }
-
- kfree(opp_table->sod_supplies);
- opp_table->sod_supplies = NULL;
- mutex_unlock(&opp_table->lock);
-
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = -1;
-
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
-static void devm_pm_opp_regulators_release(void *data)
+static void _put_clks(struct opp_table *opp_table, int count)
{
- dev_pm_opp_put_regulators(data);
-}
-
-/**
- * devm_pm_opp_set_regulators() - Set regulator names for the device
- * @dev: Device for which regulator name is being set.
- * @names: Array of pointers to the names of the regulator.
- * @count: Number of regulators.
- *
- * This is a resource-managed variant of dev_pm_opp_set_regulators().
- *
- * Return: 0 on success and errorno otherwise.
- */
-int devm_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
-{
- struct opp_table *opp_table;
+ int i;
- opp_table = dev_pm_opp_set_regulators(dev, names, count);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
+ for (i = count - 1; i >= 0; i--)
+ clk_put(opp_table->clks[i]);
- return devm_add_action_or_reset(dev, devm_pm_opp_regulators_release,
- opp_table);
+ kfree(opp_table->clks);
+ opp_table->clks = NULL;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_regulators);
/**
- * dev_pm_opp_set_clkname() - Set clk name for the device
- * @dev: Device for which clk name is being set.
- * @name: Clk name.
- *
- * In order to support OPP switching, OPP layer needs to get pointer to the
- * clock for the device. Simple cases work fine without using this routine (i.e.
- * by passing connection-id as NULL), but for a device with multiple clocks
- * available, the OPP core needs to know the exact name of the clk to use.
+ * _opp_set_clknames() - Set clk names for the device
+ * @dev: Device for which clk names is being set.
+ * @names: Clk names.
+ *
+ * In order to support OPP switching, OPP layer needs to get pointers to the
+ * clocks for the device. Simple cases work fine without using this routine
+ * (i.e. by passing connection-id as NULL), but for a device with multiple
+ * clocks available, the OPP core needs to know the exact names of the clks to
+ * use.
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
+static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev,
+ const char * const names[],
+ config_clks_t config_clks)
{
- struct opp_table *opp_table;
- int ret;
+ const char * const *temp = names;
+ int count = 0, ret, i;
+ struct clk *clk;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
+ /* Count number of clks */
+ while (*temp++)
+ count++;
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
+ /*
+ * This is a special case where we have a single clock, whose connection
+ * id name is NULL, i.e. first two entries are NULL in the array.
+ */
+ if (!count && !names[1])
+ count = 1;
- /* clk shouldn't be initialized at this point */
- if (WARN_ON(opp_table->clk)) {
- ret = -EBUSY;
- goto err;
- }
+ /* Fail early for invalid configurations */
+ if (!count || (!config_clks && count > 1))
+ return -EINVAL;
- /* Find clk for the device */
- opp_table->clk = clk_get(dev, name);
- if (IS_ERR(opp_table->clk)) {
- ret = dev_err_probe(dev, PTR_ERR(opp_table->clk),
- "%s: Couldn't find clock\n", __func__);
- goto err;
- }
+ /* Another CPU that shares the OPP table has set the clkname ? */
+ if (opp_table->clks)
+ return 0;
- return opp_table;
+ opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks),
+ GFP_KERNEL);
+ if (!opp_table->clks)
+ return -ENOMEM;
-err:
- dev_pm_opp_put_opp_table(opp_table);
+ /* Find clks for the device */
+ for (i = 0; i < count; i++) {
+ clk = clk_get(dev, names[i]);
+ if (IS_ERR(clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(clk),
+ "%s: Couldn't find clock with name: %s\n",
+ __func__, names[i]);
+ goto free_clks;
+ }
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
+ opp_table->clks[i] = clk;
+ }
-/**
- * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
- * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
- */
-void dev_pm_opp_put_clkname(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
+ opp_table->clk_count = count;
+ opp_table->config_clks = config_clks;
- clk_put(opp_table->clk);
- opp_table->clk = ERR_PTR(-EINVAL);
+ /* Set generic single clk set here */
+ if (count == 1) {
+ if (!opp_table->config_clks)
+ opp_table->config_clks = _opp_config_clk_single;
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
+ /*
+ * We could have just dropped the "clk" field and used "clks"
+ * everywhere. Instead we kept the "clk" field around for
+ * following reasons:
+ *
+ * - avoiding clks[0] everywhere else.
+ * - not running single clk helpers for multiple clk usecase by
+ * mistake.
+ *
+ * Since this is single-clk case, just update the clk pointer
+ * too.
+ */
+ opp_table->clk = opp_table->clks[0];
+ }
-static void devm_pm_opp_clkname_release(void *data)
-{
- dev_pm_opp_put_clkname(data);
+ return 0;
+
+free_clks:
+ _put_clks(opp_table, i);
+ return ret;
}
/**
- * devm_pm_opp_set_clkname() - Set clk name for the device
- * @dev: Device for which clk name is being set.
- * @name: Clk name.
- *
- * This is a resource-managed variant of dev_pm_opp_set_clkname().
- *
- * Return: 0 on success and errorno otherwise.
+ * _opp_put_clknames() - Releases resources blocked for clks.
+ * @opp_table: OPP table returned from _opp_set_clknames().
*/
-int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+static void _opp_put_clknames(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
+ if (!opp_table->clks)
+ return;
- opp_table = dev_pm_opp_set_clkname(dev, name);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
+ opp_table->config_clks = NULL;
+ opp_table->clk = ERR_PTR(-ENODEV);
- return devm_add_action_or_reset(dev, devm_pm_opp_clkname_release,
- opp_table);
+ _put_clks(opp_table, opp_table->clk_count);
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_clkname);
/**
- * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * _opp_set_config_regulators_helper() - Register custom set regulator helper.
* @dev: Device for which the helper is getting registered.
- * @set_opp: Custom set OPP helper.
+ * @config_regulators: Custom set regulator helper.
*
- * This is useful to support complex platforms (like platforms with multiple
- * regulators per device), instead of the generic OPP set rate helper.
+ * This is useful to support platforms with multiple regulators per device.
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static int _opp_set_config_regulators_helper(struct opp_table *opp_table,
+ struct device *dev, config_regulators_t config_regulators)
{
- struct dev_pm_set_opp_data *data;
- struct opp_table *opp_table;
-
- if (!set_opp)
- return ERR_PTR(-EINVAL);
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-EBUSY);
- }
-
/* Another CPU that shares the OPP table has set the helper ? */
- if (opp_table->set_opp)
- return opp_table;
+ if (!opp_table->config_regulators)
+ opp_table->config_regulators = config_regulators;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&opp_table->lock);
- opp_table->set_opp_data = data;
- if (opp_table->sod_supplies) {
- data->old_opp.supplies = opp_table->sod_supplies;
- data->new_opp.supplies = opp_table->sod_supplies +
- opp_table->regulator_count;
- }
- mutex_unlock(&opp_table->lock);
-
- opp_table->set_opp = set_opp;
-
- return opp_table;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
-
-/**
- * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
- * set_opp helper
- * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
- *
- * Release resources blocked for platform specific set_opp helper.
- */
-void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
-
- opp_table->set_opp = NULL;
-
- mutex_lock(&opp_table->lock);
- kfree(opp_table->set_opp_data);
- opp_table->set_opp_data = NULL;
- mutex_unlock(&opp_table->lock);
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
-
-static void devm_pm_opp_unregister_set_opp_helper(void *data)
-{
- dev_pm_opp_unregister_set_opp_helper(data);
+ return 0;
}
/**
- * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper
- * @dev: Device for which the helper is getting registered.
- * @set_opp: Custom set OPP helper.
- *
- * This is a resource-managed version of dev_pm_opp_register_set_opp_helper().
+ * _opp_put_config_regulators_helper() - Releases resources blocked for
+ * config_regulators helper.
+ * @opp_table: OPP table returned from _opp_set_config_regulators_helper().
*
- * Return: 0 on success and errorno otherwise.
+ * Release resources blocked for platform specific config_regulators helper.
*/
-int devm_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
-
- opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- return devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper,
- opp_table);
+ if (opp_table->config_regulators)
+ opp_table->config_regulators = NULL;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper);
-static void _opp_detach_genpd(struct opp_table *opp_table)
+static void _detach_genpd(struct opp_table *opp_table)
{
int index;
@@ -2466,7 +2313,7 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
}
/**
- * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
+ * _opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
* @dev: Consumer device for which the genpd is getting attached.
* @names: Null terminated array of pointers containing names of genpd to attach.
* @virt_devs: Pointer to return the array of virtual devices.
@@ -2487,30 +2334,23 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
* The order of entries in the names array must match the order in which
* "required-opps" are added in DT.
*/
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
- const char * const *names, struct device ***virt_devs)
+static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
+ const char * const *names, struct device ***virt_devs)
{
- struct opp_table *opp_table;
struct device *virt_dev;
int index = 0, ret = -EINVAL;
const char * const *name = names;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
if (opp_table->genpd_virt_devs)
- return opp_table;
+ return 0;
/*
* If the genpd's OPP table isn't already initialized, parsing of the
* required-opps fail for dev. We should retry this after genpd's OPP
* table is added.
*/
- if (!opp_table->required_opp_count) {
- ret = -EPROBE_DEFER;
- goto put_table;
- }
+ if (!opp_table->required_opp_count)
+ return -EPROBE_DEFER;
mutex_lock(&opp_table->genpd_virt_dev_lock);
@@ -2528,8 +2368,8 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
}
virt_dev = dev_pm_domain_attach_by_name(dev, *name);
- if (IS_ERR(virt_dev)) {
- ret = PTR_ERR(virt_dev);
+ if (IS_ERR_OR_NULL(virt_dev)) {
+ ret = PTR_ERR(virt_dev) ? : -ENODEV;
dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
goto err;
}
@@ -2543,73 +2383,230 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
*virt_devs = opp_table->genpd_virt_devs;
mutex_unlock(&opp_table->genpd_virt_dev_lock);
- return opp_table;
+ return 0;
err:
- _opp_detach_genpd(opp_table);
+ _detach_genpd(opp_table);
unlock:
mutex_unlock(&opp_table->genpd_virt_dev_lock);
+ return ret;
-put_table:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
/**
- * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device.
- * @opp_table: OPP table returned by dev_pm_opp_attach_genpd().
+ * _opp_detach_genpd() - Detach genpd(s) from the device.
+ * @opp_table: OPP table returned by _opp_attach_genpd().
*
* This detaches the genpd(s), resets the virtual device pointers, and puts the
* OPP table.
*/
-void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
+static void _opp_detach_genpd(struct opp_table *opp_table)
{
- if (unlikely(!opp_table))
- return;
-
/*
* Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
* used in parallel.
*/
mutex_lock(&opp_table->genpd_virt_dev_lock);
- _opp_detach_genpd(opp_table);
+ _detach_genpd(opp_table);
mutex_unlock(&opp_table->genpd_virt_dev_lock);
-
- dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
-static void devm_pm_opp_detach_genpd(void *data)
+static void _opp_clear_config(struct opp_config_data *data)
{
- dev_pm_opp_detach_genpd(data);
+ if (data->flags & OPP_CONFIG_GENPD)
+ _opp_detach_genpd(data->opp_table);
+ if (data->flags & OPP_CONFIG_REGULATOR)
+ _opp_put_regulators(data->opp_table);
+ if (data->flags & OPP_CONFIG_SUPPORTED_HW)
+ _opp_put_supported_hw(data->opp_table);
+ if (data->flags & OPP_CONFIG_REGULATOR_HELPER)
+ _opp_put_config_regulators_helper(data->opp_table);
+ if (data->flags & OPP_CONFIG_PROP_NAME)
+ _opp_put_prop_name(data->opp_table);
+ if (data->flags & OPP_CONFIG_CLK)
+ _opp_put_clknames(data->opp_table);
+
+ dev_pm_opp_put_opp_table(data->opp_table);
+ kfree(data);
}
/**
- * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual
- * device pointer
- * @dev: Consumer device for which the genpd is getting attached.
- * @names: Null terminated array of pointers containing names of genpd to attach.
- * @virt_devs: Pointer to return the array of virtual devices.
+ * dev_pm_opp_set_config() - Set OPP configuration for the device.
+ * @dev: Device for which configuration is being set.
+ * @config: OPP configuration.
*
- * This is a resource-managed version of dev_pm_opp_attach_genpd().
+ * This allows all device OPP configurations to be performed at once.
*
- * Return: 0 on success and errorno otherwise.
+ * This must be called before any OPPs are initialized for the device. This may
+ * be called multiple times for the same OPP table, for example once for each
+ * CPU that share the same table. This must be balanced by the same number of
+ * calls to dev_pm_opp_clear_config() in order to free the OPP table properly.
+ *
+ * This returns a token to the caller, which must be passed to
+ * dev_pm_opp_clear_config() to free the resources later. The value of the
+ * returned token will be >= 1 for success and negative for errors. The minimum
+ * value of 1 is chosen here to make it easy for callers to manage the resource.
*/
-int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names,
- struct device ***virt_devs)
+int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
struct opp_table *opp_table;
+ struct opp_config_data *data;
+ unsigned int id;
+ int ret;
- opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs);
- if (IS_ERR(opp_table))
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ opp_table = _add_opp_table(dev, false);
+ if (IS_ERR(opp_table)) {
+ kfree(data);
return PTR_ERR(opp_table);
+ }
+
+ data->opp_table = opp_table;
+ data->flags = 0;
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Configure clocks */
+ if (config->clk_names) {
+ ret = _opp_set_clknames(opp_table, dev, config->clk_names,
+ config->config_clks);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_CLK;
+ } else if (config->config_clks) {
+ /* Don't allow config callback without clocks */
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Configure property names */
+ if (config->prop_name) {
+ ret = _opp_set_prop_name(opp_table, config->prop_name);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_PROP_NAME;
+ }
+
+ /* Configure config_regulators helper */
+ if (config->config_regulators) {
+ ret = _opp_set_config_regulators_helper(opp_table, dev,
+ config->config_regulators);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REGULATOR_HELPER;
+ }
- return devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd,
- opp_table);
+ /* Configure supported hardware */
+ if (config->supported_hw) {
+ ret = _opp_set_supported_hw(opp_table, config->supported_hw,
+ config->supported_hw_count);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_SUPPORTED_HW;
+ }
+
+ /* Configure supplies */
+ if (config->regulator_names) {
+ ret = _opp_set_regulators(opp_table, dev,
+ config->regulator_names);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REGULATOR;
+ }
+
+ /* Attach genpds */
+ if (config->genpd_names) {
+ ret = _opp_attach_genpd(opp_table, dev, config->genpd_names,
+ config->virt_devs);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_GENPD;
+ }
+
+ ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
+ GFP_KERNEL);
+ if (ret)
+ goto err;
+
+ return id;
+
+err:
+ _opp_clear_config(data);
+ return ret;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd);
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_config);
+
+/**
+ * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration.
+ * @opp_table: OPP table returned from dev_pm_opp_set_config().
+ *
+ * This allows all device OPP configurations to be cleared at once. This must be
+ * called once for each call made to dev_pm_opp_set_config(), in order to free
+ * the OPPs properly.
+ *
+ * Currently the first call itself ends up freeing all the OPP configurations,
+ * while the later ones only drop the OPP table reference. This works well for
+ * now as we would never want to use an half initialized OPP table and want to
+ * remove the configurations together.
+ */
+void dev_pm_opp_clear_config(int token)
+{
+ struct opp_config_data *data;
+
+ /*
+ * This lets the callers call this unconditionally and keep their code
+ * simple.
+ */
+ if (unlikely(token <= 0))
+ return;
+
+ data = xa_erase(&opp_configs, token);
+ if (WARN_ON(!data))
+ return;
+
+ _opp_clear_config(data);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config);
+
+static void devm_pm_opp_config_release(void *token)
+{
+ dev_pm_opp_clear_config((unsigned long)token);
+}
+
+/**
+ * devm_pm_opp_set_config() - Set OPP configuration for the device.
+ * @dev: Device for which configuration is being set.
+ * @config: OPP configuration.
+ *
+ * This allows all device OPP configurations to be performed at once.
+ * This is a resource-managed variant of dev_pm_opp_set_config().
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
+{
+ int token = dev_pm_opp_set_config(dev, config);
+
+ if (token < 0)
+ return token;
+
+ return devm_add_action_or_reset(dev, devm_pm_opp_config_release,
+ (void *) ((unsigned long) token));
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_set_config);
/**
* dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
@@ -2795,11 +2792,16 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
return r;
}
+ if (!assert_single_clk(opp_table)) {
+ r = -EINVAL;
+ goto put_table;
+ }
+
mutex_lock(&opp_table->lock);
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rate == freq) {
+ if (tmp_opp->rates[0] == freq) {
opp = tmp_opp;
break;
}
@@ -2866,11 +2868,16 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
return r;
}
+ if (!assert_single_clk(opp_table)) {
+ r = -EINVAL;
+ goto put_table;
+ }
+
mutex_lock(&opp_table->lock);
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rate == freq) {
+ if (tmp_opp->rates[0] == freq) {
opp = tmp_opp;
break;
}
@@ -2897,11 +2904,11 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
opp);
dev_pm_opp_put(opp);
- goto adjust_put_table;
+ goto put_table;
adjust_unlock:
mutex_unlock(&opp_table->lock);
-adjust_put_table:
+put_table:
dev_pm_opp_put_opp_table(opp_table);
return r;
}
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 5004335cf0de..3c3506021501 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -41,7 +41,7 @@
* the table if any of the mentioned functions have been invoked in the interim.
*/
int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
+ struct cpufreq_frequency_table **opp_table)
{
struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table = NULL;
@@ -76,7 +76,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
freq_table[i].driver_data = i;
freq_table[i].frequency = CPUFREQ_TABLE_END;
- *table = &freq_table[0];
+ *opp_table = &freq_table[0];
out:
if (ret)
@@ -94,13 +94,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
* Free up the table allocated by dev_pm_opp_init_cpufreq_table
*/
void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
+ struct cpufreq_frequency_table **opp_table)
{
- if (!table)
+ if (!opp_table)
return;
- kfree(*table);
- *table = NULL;
+ kfree(*opp_table);
+ *opp_table = NULL;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 1b6e5c55c3ed..96a30a032c5f 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -74,6 +74,24 @@ static void opp_debug_create_bw(struct dev_pm_opp *opp,
}
}
+static void opp_debug_create_clks(struct dev_pm_opp *opp,
+ struct opp_table *opp_table,
+ struct dentry *pdentry)
+{
+ char name[12];
+ int i;
+
+ if (opp_table->clk_count == 1) {
+ debugfs_create_ulong("rate_hz", S_IRUGO, pdentry, &opp->rates[0]);
+ return;
+ }
+
+ for (i = 0; i < opp_table->clk_count; i++) {
+ snprintf(name, sizeof(name), "rate_hz_%d", i);
+ debugfs_create_ulong(name, S_IRUGO, pdentry, &opp->rates[i]);
+ }
+}
+
static void opp_debug_create_supplies(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
@@ -117,10 +135,11 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
* Get directory name for OPP.
*
* - Normally rate is unique to each OPP, use it to get unique opp-name.
- * - For some devices rate isn't available, use index instead.
+ * - For some devices rate isn't available or there are multiple, use
+ * index instead for them.
*/
- if (likely(opp->rate))
- id = opp->rate;
+ if (likely(opp_table->clk_count == 1 && opp->rates[0]))
+ id = opp->rates[0];
else
id = _get_opp_count(opp_table);
@@ -134,7 +153,6 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
- debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
debugfs_create_u32("level", S_IRUGO, d, &opp->level);
debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
&opp->clock_latency_ns);
@@ -142,6 +160,7 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
opp->of_name = of_node_full_name(opp->np);
debugfs_create_str("of_name", S_IRUGO, d, (char **)&opp->of_name);
+ opp_debug_create_clks(opp, opp_table, d);
opp_debug_create_supplies(opp, opp_table, d);
opp_debug_create_bw(opp, opp_table, d);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 30394929d700..605d68673f92 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -242,20 +242,20 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
opp_table->np = opp_np;
_opp_table_alloc_required_tables(opp_table, dev, opp_np);
- of_node_put(opp_np);
}
void _of_clear_opp_table(struct opp_table *opp_table)
{
_opp_table_free_required_tables(opp_table);
+ of_node_put(opp_table->np);
}
/*
* Release all resources previously acquired with a call to
* _of_opp_alloc_required_opps().
*/
-void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp)
+static void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
{
struct dev_pm_opp **required_opps = opp->required_opps;
int i;
@@ -275,6 +275,12 @@ void _of_opp_free_required_opps(struct opp_table *opp_table,
kfree(required_opps);
}
+void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
+{
+ _of_opp_free_required_opps(opp_table, opp);
+ of_node_put(opp->np);
+}
+
/* Populate all required OPPs which are part of "required-opps" list */
static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
struct dev_pm_opp *opp)
@@ -767,7 +773,51 @@ void dev_pm_opp_of_remove_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
+static int _read_rate(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
+ struct device_node *np)
+{
+ struct property *prop;
+ int i, count, ret;
+ u64 *rates;
+
+ prop = of_find_property(np, "opp-hz", NULL);
+ if (!prop)
+ return -ENODEV;
+
+ count = prop->length / sizeof(u64);
+ if (opp_table->clk_count != count) {
+ pr_err("%s: Count mismatch between opp-hz and clk_count (%d %d)\n",
+ __func__, count, opp_table->clk_count);
+ return -EINVAL;
+ }
+
+ rates = kmalloc_array(count, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return -ENOMEM;
+
+ ret = of_property_read_u64_array(np, "opp-hz", rates, count);
+ if (ret) {
+ pr_err("%s: Error parsing opp-hz: %d\n", __func__, ret);
+ } else {
+ /*
+ * Rate is defined as an unsigned long in clk API, and so
+ * casting explicitly to its type. Must be fixed once rate is 64
+ * bit guaranteed in clk API.
+ */
+ for (i = 0; i < count; i++) {
+ new_opp->rates[i] = (unsigned long)rates[i];
+
+ /* This will happen for frequencies > 4.29 GHz */
+ WARN_ON(new_opp->rates[i] != rates[i]);
+ }
+ }
+
+ kfree(rates);
+
+ return ret;
+}
+
+static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
struct device_node *np, bool peak)
{
const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
@@ -780,9 +830,9 @@ static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
return -ENODEV;
count = prop->length / sizeof(u32);
- if (table->path_count != count) {
+ if (opp_table->path_count != count) {
pr_err("%s: Mismatch between %s and paths (%d %d)\n",
- __func__, name, count, table->path_count);
+ __func__, name, count, opp_table->path_count);
return -EINVAL;
}
@@ -808,34 +858,27 @@ out:
return ret;
}
-static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
- struct device_node *np, bool *rate_not_available)
+static int _read_opp_key(struct dev_pm_opp *new_opp,
+ struct opp_table *opp_table, struct device_node *np)
{
bool found = false;
- u64 rate;
int ret;
- ret = of_property_read_u64(np, "opp-hz", &rate);
- if (!ret) {
- /*
- * Rate is defined as an unsigned long in clk API, and so
- * casting explicitly to its type. Must be fixed once rate is 64
- * bit guaranteed in clk API.
- */
- new_opp->rate = (unsigned long)rate;
+ ret = _read_rate(new_opp, opp_table, np);
+ if (!ret)
found = true;
- }
- *rate_not_available = !!ret;
+ else if (ret != -ENODEV)
+ return ret;
/*
* Bandwidth consists of peak and average (optional) values:
* opp-peak-kBps = <path1_value path2_value>;
* opp-avg-kBps = <path1_value path2_value>;
*/
- ret = _read_bw(new_opp, table, np, true);
+ ret = _read_bw(new_opp, opp_table, np, true);
if (!ret) {
found = true;
- ret = _read_bw(new_opp, table, np, false);
+ ret = _read_bw(new_opp, opp_table, np, false);
}
/* The properties were found but we failed to parse them */
@@ -881,13 +924,12 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
struct dev_pm_opp *new_opp;
u32 val;
int ret;
- bool rate_not_available = false;
new_opp = _opp_allocate(opp_table);
if (!new_opp)
return ERR_PTR(-ENOMEM);
- ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
+ ret = _read_opp_key(new_opp, opp_table, np);
if (ret < 0) {
dev_err(dev, "%s: opp key field not found\n", __func__);
goto free_opp;
@@ -895,14 +937,14 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
- dev_dbg(dev, "OPP not supported by hardware: %lu\n",
- new_opp->rate);
+ dev_dbg(dev, "OPP not supported by hardware: %s\n",
+ of_node_full_name(np));
goto free_opp;
}
new_opp->turbo = of_property_read_bool(np, "turbo-mode");
- new_opp->np = np;
+ new_opp->np = of_node_get(np);
new_opp->dynamic = false;
new_opp->available = true;
@@ -920,7 +962,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (opp_table->is_genpd)
new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
- ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@@ -931,8 +973,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
if (opp_table->suspend_opp) {
- /* Pick the OPP with higher rate as suspend OPP */
- if (new_opp->rate > opp_table->suspend_opp->rate) {
+ /* Pick the OPP with higher rate/bw/level as suspend OPP */
+ if (_opp_compare_key(opp_table, new_opp, opp_table->suspend_opp) == 1) {
opp_table->suspend_opp->suspend = false;
new_opp->suspend = true;
opp_table->suspend_opp = new_opp;
@@ -947,7 +989,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n",
- __func__, new_opp->turbo, new_opp->rate,
+ __func__, new_opp->turbo, new_opp->rates[0],
new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns,
new_opp->level);
@@ -1084,7 +1126,7 @@ remove_static_opp:
return ret;
}
-static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
+static int _of_add_table_indexed(struct device *dev, int index)
{
struct opp_table *opp_table;
int ret, count;
@@ -1100,7 +1142,7 @@ static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
index = 0;
}
- opp_table = _add_opp_table_indexed(dev, index, getclk);
+ opp_table = _add_opp_table_indexed(dev, index, true);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@@ -1124,11 +1166,11 @@ static void devm_pm_opp_of_table_release(void *data)
dev_pm_opp_of_remove_table(data);
}
-static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk)
+static int _devm_of_add_table_indexed(struct device *dev, int index)
{
int ret;
- ret = _of_add_table_indexed(dev, index, getclk);
+ ret = _of_add_table_indexed(dev, index);
if (ret)
return ret;
@@ -1156,7 +1198,7 @@ static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk
*/
int devm_pm_opp_of_add_table(struct device *dev)
{
- return _devm_of_add_table_indexed(dev, 0, true);
+ return _devm_of_add_table_indexed(dev, 0);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
@@ -1179,7 +1221,7 @@ EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
*/
int dev_pm_opp_of_add_table(struct device *dev)
{
- return _of_add_table_indexed(dev, 0, true);
+ return _of_add_table_indexed(dev, 0);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
@@ -1195,7 +1237,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
*/
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return _of_add_table_indexed(dev, index, true);
+ return _of_add_table_indexed(dev, index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
@@ -1208,42 +1250,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
*/
int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return _devm_of_add_table_indexed(dev, index, true);
+ return _devm_of_add_table_indexed(dev, index);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_indexed);
-/**
- * dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
- * tree without getting clk for device.
- * @dev: device pointer used to lookup OPP table.
- * @index: Index number.
- *
- * Register the initial OPP table with the OPP library for given device only
- * using the "operating-points-v2" property. Do not try to get the clk for the
- * device.
- *
- * Return: Refer to dev_pm_opp_of_add_table() for return values.
- */
-int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return _of_add_table_indexed(dev, index, false);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk);
-
-/**
- * devm_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
- * tree without getting clk for device.
- * @dev: device pointer used to lookup OPP table.
- * @index: Index number.
- *
- * This is a resource-managed variant of dev_pm_opp_of_add_table_noclk().
- */
-int devm_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return _devm_of_add_table_indexed(dev, index, false);
-}
-EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_noclk);
-
/* CPU device specific helpers */
/**
@@ -1443,12 +1453,12 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
* It provides the power used by @dev at @kHz if it is the frequency of an
* existing OPP, or at the frequency of the first OPP above @kHz otherwise
* (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
- * frequency and @mW to the associated power.
+ * frequency and @uW to the associated power.
*
* Returns 0 on success or a proper -EINVAL value in case of error.
*/
static int __maybe_unused
-_get_dt_power(struct device *dev, unsigned long *mW, unsigned long *kHz)
+_get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
{
struct dev_pm_opp *opp;
unsigned long opp_freq, opp_power;
@@ -1465,7 +1475,7 @@ _get_dt_power(struct device *dev, unsigned long *mW, unsigned long *kHz)
return -EINVAL;
*kHz = opp_freq / 1000;
- *mW = opp_power / 1000;
+ *uW = opp_power;
return 0;
}
@@ -1475,14 +1485,14 @@ _get_dt_power(struct device *dev, unsigned long *mW, unsigned long *kHz)
* This computes the power estimated by @dev at @kHz if it is the frequency
* of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
* (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
- * frequency and @mW to the associated power. The power is estimated as
+ * frequency and @uW to the associated power. The power is estimated as
* P = C * V^2 * f with C being the device's capacitance and V and f
* respectively the voltage and frequency of the OPP.
*
* Returns -EINVAL if the power calculation failed because of missing
* parameters, 0 otherwise.
*/
-static int __maybe_unused _get_power(struct device *dev, unsigned long *mW,
+static int __maybe_unused _get_power(struct device *dev, unsigned long *uW,
unsigned long *kHz)
{
struct dev_pm_opp *opp;
@@ -1512,9 +1522,10 @@ static int __maybe_unused _get_power(struct device *dev, unsigned long *mW,
return -EINVAL;
tmp = (u64)cap * mV * mV * (Hz / 1000000);
- do_div(tmp, 1000000000);
+ /* Provide power in micro-Watts */
+ do_div(tmp, 1000000);
- *mW = (unsigned long)tmp;
+ *uW = (unsigned long)tmp;
*kHz = Hz / 1000;
return 0;
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 45e3a55239a1..3a6e077df386 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -28,6 +28,27 @@ extern struct mutex opp_table_lock;
extern struct list_head opp_tables, lazy_opp_tables;
+/* OPP Config flags */
+#define OPP_CONFIG_CLK BIT(0)
+#define OPP_CONFIG_REGULATOR BIT(1)
+#define OPP_CONFIG_REGULATOR_HELPER BIT(2)
+#define OPP_CONFIG_PROP_NAME BIT(3)
+#define OPP_CONFIG_SUPPORTED_HW BIT(4)
+#define OPP_CONFIG_GENPD BIT(5)
+
+/**
+ * struct opp_config_data - data for set config operations
+ * @opp_table: OPP table
+ * @flags: OPP config flags
+ *
+ * This structure stores the OPP config information for each OPP table
+ * configuration by the callers.
+ */
+struct opp_config_data {
+ struct opp_table *opp_table;
+ unsigned int flags;
+};
+
/*
* Internal data structure organization with the OPP layer library is as
* follows:
@@ -58,7 +79,7 @@ extern struct list_head opp_tables, lazy_opp_tables;
* @suspend: true if suspend OPP
* @removed: flag indicating that OPP's reference is dropped by OPP core.
* @pstate: Device's power domain's performance state.
- * @rate: Frequency in hertz
+ * @rates: Frequencies in hertz
* @level: Performance level
* @supplies: Power supplies voltage/current values
* @bandwidth: Interconnect bandwidth values
@@ -81,7 +102,7 @@ struct dev_pm_opp {
bool suspend;
bool removed;
unsigned int pstate;
- unsigned long rate;
+ unsigned long *rates;
unsigned int level;
struct dev_pm_opp_supply *supplies;
@@ -138,7 +159,7 @@ enum opp_table_access {
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
- * @current_rate: Currently configured frequency.
+ * @rate_clk_single: Currently configured frequency for single clk.
* @current_opp: Currently configured OPP for the table.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
@@ -149,7 +170,11 @@ enum opp_table_access {
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
- * @clk: Device's clock handle
+ * @config_clks: Platform specific config_clks() callback.
+ * @clks: Device's clock handles, for multiple clocks.
+ * @clk: Device's clock handle, for single clock.
+ * @clk_count: Number of clocks.
+ * @config_regulators: Platform specific config_regulators() callback.
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators. Its value can be -1
* (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
@@ -159,9 +184,6 @@ enum opp_table_access {
* @enabled: Set to true if the device's resources are enabled/configured.
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
- * @set_opp: Platform specific set_opp callback
- * @sod_supplies: Set opp data supplies
- * @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -188,7 +210,7 @@ struct opp_table {
unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
- unsigned long current_rate;
+ unsigned long rate_clk_single;
struct dev_pm_opp *current_opp;
struct dev_pm_opp *suspend_opp;
@@ -200,7 +222,11 @@ struct opp_table {
unsigned int *supported_hw;
unsigned int supported_hw_count;
const char *prop_name;
+ config_clks_t config_clks;
+ struct clk **clks;
struct clk *clk;
+ int clk_count;
+ config_regulators_t config_regulators;
struct regulator **regulators;
int regulator_count;
struct icc_path **paths;
@@ -209,10 +235,6 @@ struct opp_table {
bool genpd_performance_state;
bool is_genpd;
- int (*set_opp)(struct dev_pm_set_opp_data *data);
- struct dev_pm_opp_supply *sod_supplies;
- struct dev_pm_set_opp_data *set_opp_data;
-
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
char dentry_name[NAME_MAX];
@@ -228,8 +250,8 @@ struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
-int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
-int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
+int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk);
@@ -245,14 +267,12 @@ static inline bool lazy_linking_pending(struct opp_table *opp_table)
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
void _of_clear_opp_table(struct opp_table *opp_table);
struct opp_table *_managed_opp(struct device *dev, int index);
-void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp);
+void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp);
#else
static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {}
static inline void _of_clear_opp_table(struct opp_table *opp_table) {}
static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; }
-static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp) {}
+static inline void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) {}
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index bd4771f388ab..8f3f13fbbb25 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -36,11 +36,15 @@ struct ti_opp_supply_optimum_voltage_table {
* @vdd_table: Optimized voltage mapping table
* @num_vdd_table: number of entries in vdd_table
* @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ * @old_supplies: Placeholder for supplies information for old OPP.
+ * @new_supplies: Placeholder for supplies information for new OPP.
*/
struct ti_opp_supply_data {
struct ti_opp_supply_optimum_voltage_table *vdd_table;
u32 num_vdd_table;
u32 vdd_absolute_max_voltage_uv;
+ struct dev_pm_opp_supply old_supplies[2];
+ struct dev_pm_opp_supply new_supplies[2];
};
static struct ti_opp_supply_data opp_data;
@@ -266,27 +270,32 @@ static int _opp_set_voltage(struct device *dev,
return 0;
}
-/**
- * ti_opp_supply_set_opp() - do the opp supply transition
- * @data: information on regulators and new and old opps provided by
- * opp core to use in transition
- *
- * Return: If successful, 0, else appropriate error value.
- */
-static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+/* Do the opp supply transition */
+static int ti_opp_config_regulators(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count)
{
- struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
- struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
- struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
- struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
- struct device *dev = data->dev;
- unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
- struct clk *clk = data->clk;
- struct regulator *vdd_reg = data->regulators[0];
- struct regulator *vbb_reg = data->regulators[1];
+ struct dev_pm_opp_supply *old_supply_vdd = &opp_data.old_supplies[0];
+ struct dev_pm_opp_supply *old_supply_vbb = &opp_data.old_supplies[1];
+ struct dev_pm_opp_supply *new_supply_vdd = &opp_data.new_supplies[0];
+ struct dev_pm_opp_supply *new_supply_vbb = &opp_data.new_supplies[1];
+ struct regulator *vdd_reg = regulators[0];
+ struct regulator *vbb_reg = regulators[1];
+ unsigned long old_freq, freq;
int vdd_uv;
int ret;
+ /* We must have two regulators here */
+ WARN_ON(count != 2);
+
+ /* Fetch supplies and freq information from OPP core */
+ ret = dev_pm_opp_get_supplies(new_opp, opp_data.new_supplies);
+ WARN_ON(ret);
+
+ old_freq = dev_pm_opp_get_freq(old_opp);
+ freq = dev_pm_opp_get_freq(new_opp);
+ WARN_ON(!old_freq || !freq);
+
vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
new_supply_vdd->u_volt);
@@ -303,39 +312,24 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
if (ret)
goto restore_voltage;
- }
-
- /* Change frequency */
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
- __func__, old_freq, freq);
-
- ret = clk_set_rate(clk, freq);
- if (ret) {
- dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
- ret);
- goto restore_voltage;
- }
-
- /* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
+ } else {
ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
if (ret)
- goto restore_freq;
+ goto restore_voltage;
ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
"vdd");
if (ret)
- goto restore_freq;
+ goto restore_voltage;
}
return 0;
-restore_freq:
- ret = clk_set_rate(clk, old_freq);
- if (ret)
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
restore_voltage:
+ /* Fetch old supplies information only if required */
+ ret = dev_pm_opp_get_supplies(old_opp, opp_data.old_supplies);
+ WARN_ON(ret);
+
/* This shouldn't harm even if the voltages weren't updated earlier */
if (old_supply_vdd->u_volt) {
ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
@@ -405,9 +399,8 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
return ret;
}
- ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
- ti_opp_supply_set_opp));
- if (ret)
+ ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators);
+ if (ret < 0)
_free_optimized_voltages(dev, &opp_data);
return ret;
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 8a3b0c3a1e92..3a8c98615634 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -677,7 +677,7 @@ static int iosapic_set_affinity_irq(struct irq_data *d,
if (dest_cpu < 0)
return -1;
- cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu));
+ irq_data_update_affinity(d, cpumask_of(dest_cpu));
vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
spin_lock_irqsave(&iosapic_lock, flags);
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 732b516c7bf8..afc6e66ddc31 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1476,9 +1476,13 @@ lba_driver_probe(struct parisc_device *dev)
u32 func_class;
void *tmp_obj;
char *version;
- void __iomem *addr = ioremap(dev->hpa.start, 4096);
+ void __iomem *addr;
int max;
+ addr = ioremap(dev->hpa.start, 4096);
+ if (addr == NULL)
+ return -ENOMEM;
+
/* Read HW Rev First */
func_class = READ_REG32(addr + LBA_FCLASS);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 133c73207782..55c028af4bd9 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -121,6 +121,9 @@ config XEN_PCIDEV_FRONTEND
config PCI_ATS
bool
+config PCI_DOE
+ bool
+
config PCI_ECAM
bool
@@ -164,6 +167,11 @@ config PCI_PASID
config PCI_P2PDMA
bool "PCI peer-to-peer transfer support"
depends on ZONE_DEVICE
+ #
+ # The need for the scatterlist DMA bus address flag means PCI P2PDMA
+ # requires 64bit
+ #
+ depends on 64BIT
select GENERIC_ALLOCATOR
help
Enableѕ drivers to do PCI peer-to-peer transactions to and from
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 0da6b1ebc694..2680e4c92f0a 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
+obj-$(CONFIG_PCI_DOE) += doe.o
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index b8d96d38064d..d1c5fcf00a8a 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -237,7 +237,7 @@ config PCIE_ROCKCHIP_EP
config PCIE_MEDIATEK
tristate "MediaTek PCIe controller"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
help
@@ -293,7 +293,7 @@ config PCI_HYPERV_INTERFACE
config PCI_LOONGSON
bool "LOONGSON PCI Controller"
depends on MACH_LOONGSON64 || COMPILE_TEST
- depends on OF
+ depends on OF || ACPI
depends on PCI_QUIRKS
default MACH_LOONGSON64
help
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 52767f26048f..13c4032ca379 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -243,7 +243,6 @@ err_phy:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
static int cdns_pcie_suspend_noirq(struct device *dev)
{
struct cdns_pcie *pcie = dev_get_drvdata(dev);
@@ -266,9 +265,8 @@ static int cdns_pcie_resume_noirq(struct device *dev)
return 0;
}
-#endif
const struct dev_pm_ops cdns_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
- cdns_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
+ cdns_pcie_resume_noirq)
};
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index dfcdeb432dc8..38462ed11d07 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -178,7 +178,7 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
dra7xx_pcie_enable_msi_interrupts(dra7xx);
}
-static int dra7xx_pcie_host_init(struct pcie_port *pp)
+static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
@@ -202,7 +202,7 @@ static const struct irq_domain_ops intx_domain_ops = {
.xlate = pci_irqd_intx_xlate,
};
-static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
+static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned long val;
@@ -224,7 +224,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
return 1;
}
-static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
+static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
int ret, i, count, num_ctrls;
@@ -255,8 +255,8 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct dra7xx_pcie *dra7xx;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
- struct pcie_port *pp;
unsigned long reg;
u32 bit;
@@ -344,7 +344,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
@@ -475,7 +475,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
{
int ret;
struct dw_pcie *pci = dra7xx->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
pp->irq = platform_get_irq(pdev, 1);
@@ -483,7 +483,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return pp->irq;
/* MSI IRQ is muxed */
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
@@ -862,7 +862,6 @@ err_link:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
static int dra7xx_pcie_suspend(struct device *dev)
{
struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
@@ -919,7 +918,6 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
return 0;
}
-#endif
static void dra7xx_pcie_shutdown(struct platform_device *pdev)
{
@@ -940,9 +938,9 @@ static void dra7xx_pcie_shutdown(struct platform_device *pdev)
}
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
- dra7xx_pcie_resume_noirq)
+ SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+ dra7xx_pcie_resume_noirq)
};
static struct platform_driver dra7xx_pcie_driver = {
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 467c8d1cd7e4..ec5611005566 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -249,7 +249,7 @@ static int exynos_pcie_link_up(struct dw_pcie *pci)
return (val & PCIE_ELBI_XMLH_LINKUP);
}
-static int exynos_pcie_host_init(struct pcie_port *pp)
+static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct exynos_pcie *ep = to_exynos_pcie(pci);
@@ -258,9 +258,8 @@ static int exynos_pcie_host_init(struct pcie_port *pp)
exynos_pcie_assert_core_reset(ep);
- phy_reset(ep->phy);
- phy_power_on(ep->phy);
phy_init(ep->phy);
+ phy_power_on(ep->phy);
exynos_pcie_deassert_core_reset(ep);
exynos_pcie_enable_irq_pulse(ep);
@@ -276,7 +275,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,
struct platform_device *pdev)
{
struct dw_pcie *pci = &ep->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -292,7 +291,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,
}
pp->ops = &exynos_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -390,7 +389,7 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
+static int exynos_pcie_suspend_noirq(struct device *dev)
{
struct exynos_pcie *ep = dev_get_drvdata(dev);
@@ -402,11 +401,11 @@ static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
+static int exynos_pcie_resume_noirq(struct device *dev)
{
struct exynos_pcie *ep = dev_get_drvdata(dev);
struct dw_pcie *pci = &ep->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
@@ -421,8 +420,8 @@ static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops exynos_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
- exynos_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
+ exynos_pcie_resume_noirq)
};
static const struct of_device_id exynos_pcie_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 7a285fb0f619..6e5debdbc55b 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -67,6 +67,7 @@ struct imx6_pcie {
struct dw_pcie *pci;
int reset_gpio;
bool gpio_active_high;
+ bool link_is_up;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie_inbound_axi;
@@ -146,6 +147,31 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
+static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+{
+ WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
+ imx6_pcie->drvdata->variant != IMX8MM);
+ return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
+static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+{
+ unsigned int mask, val;
+
+ if (imx6_pcie->drvdata->variant == IMX8MQ &&
+ imx6_pcie->controller_id == 1) {
+ mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+ val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ } else {
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+}
+
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -271,6 +297,134 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ /*
+ * The PHY initialization had been done in the PHY
+ * driver, break here directly.
+ */
+ break;
+ case IMX8MQ:
+ /*
+ * TODO: Currently this code assumes external
+ * oscillator is being used
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested
+ * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
+ * VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph &&
+ regulator_get_voltage(imx6_pcie->vph) > 3000000)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+ IMX6SX_GPR12_PCIE_RX_EQ_2);
+ fallthrough;
+ default:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ break;
+ }
+
+ imx6_pcie_configure_type(imx6_pcie);
+}
+
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ struct device *dev = imx6_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR22, val,
+ val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT))
+ dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+ unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ int mult, div;
+ u16 val;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ return 0;
+
+ switch (phy_rate) {
+ case 125000000:
+ /*
+ * The default settings of the MPLL are for a 125MHz input
+ * clock, so no need to reconfigure anything in that case.
+ */
+ return 0;
+ case 100000000:
+ mult = 25;
+ div = 0;
+ break;
+ case 200000000:
+ mult = 25;
+ div = 1;
+ break;
+ default:
+ dev_err(imx6_pcie->pci->dev,
+ "Unsupported PHY reference clock rate %lu\n", phy_rate);
+ return -EINVAL;
+ }
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+ PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+ val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+ val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+ PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+ val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+ val |= PCIE_PHY_ATEOVRD_EN;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+ return 0;
+}
+
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
u16 tmp;
@@ -367,61 +521,6 @@ static int imx6_pcie_attach_pd(struct device *dev)
return 0;
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
-{
- struct device *dev = imx6_pcie->pci->dev;
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- reset_control_assert(imx6_pcie->pciephy_reset);
- fallthrough;
- case IMX8MM:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- }
-
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- int ret = regulator_disable(imx6_pcie->vpcie);
-
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
-
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio))
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
-}
-
-static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
-{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MM);
- return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
-}
-
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -482,38 +581,44 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
return ret;
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
{
- u32 val;
- struct device *dev = imx6_pcie->pci->dev;
-
- if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
- IOMUXC_GPR22, val,
- val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
- PHY_PLL_LOCK_WAIT_USLEEP_MAX,
- PHY_PLL_LOCK_WAIT_TIMEOUT))
- dev_err(dev, "PCIe PLL lock timeout\n");
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ case IMX6QP:
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD,
+ IMX6Q_GPR1_PCIE_TEST_PD);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ break;
+ case IMX8MM:
+ case IMX8MQ:
+ clk_disable_unprepare(imx6_pcie->pcie_aux);
+ break;
+ default:
+ break;
+ }
}
-static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
int ret;
- if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
- ret = regulator_enable(imx6_pcie->vpcie);
- if (ret) {
- dev_err(dev, "failed to enable vpcie regulator: %d\n",
- ret);
- return;
- }
- }
-
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
dev_err(dev, "unable to enable pcie_phy clock\n");
- goto err_pcie_phy;
+ return ret;
}
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
@@ -534,25 +639,75 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
goto err_ref_clk;
}
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+ return 0;
+
+err_ref_clk:
+ clk_disable_unprepare(imx6_pcie->pcie);
+err_pcie:
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+
+ return ret;
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ imx6_pcie_disable_ref_clk(imx6_pcie);
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+}
+
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+{
switch (imx6_pcie->drvdata->variant) {
+ case IMX7D:
+ case IMX8MQ:
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ fallthrough;
case IMX8MM:
- if (phy_power_on(imx6_pcie->phy))
- dev_err(dev, "unable to power on PHY\n");
+ reset_control_assert(imx6_pcie->apps_reset);
break;
- default:
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST,
+ IMX6Q_GPR1_PCIE_SW_RST);
+ break;
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
}
- /* allow the clocks to stabilize */
- usleep_range(200, 500);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio))
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
+}
+
+static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
reset_control_deassert(imx6_pcie->pciephy_reset);
break;
- case IMX8MM:
- if (phy_init(imx6_pcie->phy))
- dev_err(dev, "waiting for phy ready timeout!\n");
- break;
case IMX7D:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -588,6 +743,7 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
usleep_range(200, 500);
break;
case IMX6Q: /* Nothing to do */
+ case IMX8MM:
break;
}
@@ -600,153 +756,6 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
msleep(100);
}
- return;
-
-err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
-err_pcie_phy:
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- ret = regulator_disable(imx6_pcie->vpcie);
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
-}
-
-static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
-{
- unsigned int mask, val;
-
- if (imx6_pcie->drvdata->variant == IMX8MQ &&
- imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- } else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- }
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
-}
-
-static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
-{
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- /*
- * The PHY initialization had been done in the PHY
- * driver, break here directly.
- */
- break;
- case IMX8MQ:
- /*
- * TODO: Currently this code assumes external
- * oscillator is being used
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_REF_USE_PAD,
- IMX8MQ_GPR_PCIE_REF_USE_PAD);
- /*
- * Regarding the datasheet, the PCIE_VPH is suggested
- * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
- * VREG_BYPASS should be cleared to zero.
- */
- if (imx6_pcie->vph &&
- regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_VREG_BYPASS,
- 0);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_RX_EQ_MASK,
- IMX6SX_GPR12_PCIE_RX_EQ_2);
- fallthrough;
- default:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
-
- /* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
- break;
- }
-
- imx6_pcie_configure_type(imx6_pcie);
-}
-
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
-{
- unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
- int mult, div;
- u16 val;
-
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
- return 0;
-
- switch (phy_rate) {
- case 125000000:
- /*
- * The default settings of the MPLL are for a 125MHz input
- * clock, so no need to reconfigure anything in that case.
- */
- return 0;
- case 100000000:
- mult = 25;
- div = 0;
- break;
- case 200000000:
- mult = 25;
- div = 1;
- break;
- default:
- dev_err(imx6_pcie->pci->dev,
- "Unsupported PHY reference clock rate %lu\n", phy_rate);
- return -EINVAL;
- }
-
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
- val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
- PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
- val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
- val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
-
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
- val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
- PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
- val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
- val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
-
return 0;
}
@@ -789,6 +798,25 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
}
}
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0);
+ break;
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MM:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
static int imx6_pcie_start_link(struct dw_pcie *pci)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
@@ -802,21 +830,26 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
+ dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
- dw_pcie_wait_for_link(pci);
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
- if (pci->link_gen == 2) {
- /* Allow Gen2 mode after the link is up. */
+ if (pci->link_gen > 1) {
+ /* Allow faster modes after the link is up */
+ dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
+ tmp |= pci->link_gen;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -826,6 +859,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
if (imx6_pcie->drvdata->flags &
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
@@ -846,34 +880,110 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
}
/* Make sure link training is finished as well! */
- dw_pcie_wait_for_link(pci);
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
} else {
- dev_info(dev, "Link: Gen2 disabled\n");
+ dev_info(dev, "Link: Only Gen1 is enabled\n");
}
+ imx6_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
+ imx6_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
imx6_pcie_reset_phy(imx6_pcie);
- return ret;
+ return 0;
+}
+
+static void imx6_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+
+ /* Turn off PCIe LTSSM */
+ imx6_pcie_ltssm_disable(dev);
}
-static int imx6_pcie_host_init(struct pcie_port *pp)
+static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ int ret;
+
+ if (imx6_pcie->vpcie) {
+ ret = regulator_enable(imx6_pcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie regulator: %d\n",
+ ret);
+ return ret;
+ }
+ }
imx6_pcie_assert_core_reset(imx6_pcie);
imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
+
+ ret = imx6_pcie_clk_enable(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
+ goto err_reg_disable;
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_power_on(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "pcie PHY power up failed\n");
+ goto err_clk_disable;
+ }
+ }
+
+ ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ if (ret < 0) {
+ dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+ goto err_phy_off;
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_init(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "waiting for PHY ready timeout!\n");
+ goto err_phy_off;
+ }
+ }
imx6_setup_phy_mpll(imx6_pcie);
return 0;
+
+err_phy_off:
+ if (imx6_pcie->phy)
+ phy_power_off(imx6_pcie->phy);
+err_clk_disable:
+ imx6_pcie_clk_disable(imx6_pcie);
+err_reg_disable:
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
+ return ret;
+}
+
+static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ if (imx6_pcie->phy) {
+ if (phy_power_off(imx6_pcie->phy))
+ dev_err(pci->dev, "unable to power off PHY\n");
+ phy_exit(imx6_pcie->phy);
+ }
+ imx6_pcie_clk_disable(imx6_pcie);
+
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
}
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
@@ -884,26 +994,6 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = imx6_pcie_start_link,
};
-#ifdef CONFIG_PM_SLEEP
-static void imx6_pcie_ltssm_disable(struct device *dev)
-{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0);
- break;
- case IMX7D:
- case IMX8MM:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- default:
- dev_err(dev, "ltssm_disable not supported\n");
- }
-}
-
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
struct device *dev = imx6_pcie->pci->dev;
@@ -941,49 +1031,17 @@ pm_turnoff_sleep:
usleep_range(1000, 10000);
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
-{
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- case IMX8MQ:
- case IMX8MM:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
- default:
- break;
- }
-}
-
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_ltssm_disable(dev);
- imx6_pcie_clk_disable(imx6_pcie);
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- if (phy_power_off(imx6_pcie->phy))
- dev_err(dev, "unable to power off PHY\n");
- phy_exit(imx6_pcie->phy);
- break;
- default:
- break;
- }
+ imx6_pcie_stop_link(imx6_pcie->pci);
+ imx6_pcie_host_exit(pp);
return 0;
}
@@ -992,27 +1050,25 @@ static int imx6_pcie_resume_noirq(struct device *dev)
{
int ret;
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct pcie_port *pp = &imx6_pcie->pci->pp;
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
+ ret = imx6_pcie_host_init(pp);
+ if (ret)
+ return ret;
dw_pcie_setup_rc(pp);
- ret = imx6_pcie_start_link(imx6_pcie->pci);
- if (ret < 0)
- dev_info(dev, "pcie link is down after resume.\n");
+ if (imx6_pcie->link_is_up)
+ imx6_pcie_start_link(imx6_pcie->pci);
return 0;
}
-#endif
static const struct dev_pm_ops imx6_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+ imx6_pcie_resume_noirq)
};
static int imx6_pcie_probe(struct platform_device *pdev)
@@ -1291,7 +1347,7 @@ static struct platform_driver imx6_pcie_driver = {
static void imx6_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
/* Bus parent is the PCI bridge, its parent is this platform driver */
if (!bus->dev.parent || !bus->dev.parent->parent)
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index d10e5fd0f83c..78818853af9e 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -109,7 +109,7 @@ struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
const struct dw_pcie_ep_ops *ep_ops;
- unsigned int version;
+ u32 version;
};
struct keystone_pcie {
@@ -147,7 +147,7 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
static void ks_pcie_msi_irq_ack(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
@@ -167,7 +167,7 @@ static void ks_pcie_msi_irq_ack(struct irq_data *data)
static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
struct dw_pcie *pci;
u64 msi_target;
@@ -192,7 +192,7 @@ static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
static void ks_pcie_msi_mask(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
@@ -216,7 +216,7 @@ static void ks_pcie_msi_mask(struct irq_data *data)
static void ks_pcie_msi_unmask(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
@@ -247,7 +247,7 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.irq_unmask = ks_pcie_msi_unmask,
};
-static int ks_pcie_msi_host_init(struct pcie_port *pp)
+static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
@@ -390,7 +390,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
u64 start, end;
struct resource *mem;
int i;
@@ -428,7 +428,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
@@ -456,7 +456,7 @@ static struct pci_ops ks_child_pcie_ops = {
*/
static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
@@ -574,7 +574,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irq;
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 vector, reg, pos;
@@ -799,7 +799,7 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
return 0;
}
-static int __init ks_pcie_host_init(struct pcie_port *pp)
+static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
@@ -1069,19 +1069,19 @@ static int ks_pcie_am654_set_mode(struct device *dev,
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
- .version = 0x365A,
+ .version = DW_PCIE_VER_365A,
};
static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
.host_ops = &ks_pcie_am654_host_ops,
.mode = DW_PCIE_RC_TYPE,
- .version = 0x490A,
+ .version = DW_PCIE_VER_490A,
};
static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
.ep_ops = &ks_pcie_am654_ep_ops,
.mode = DW_PCIE_EP_TYPE,
- .version = 0x490A,
+ .version = DW_PCIE_VER_490A,
};
static const struct of_device_id ks_pcie_of_match[] = {
@@ -1114,12 +1114,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct device_link **link;
struct gpio_desc *gpiod;
struct resource *res;
- unsigned int version;
void __iomem *base;
u32 num_viewport;
struct phy **phy;
u32 num_lanes;
char name[10];
+ u32 version;
int ret;
int irq;
int i;
@@ -1233,7 +1233,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- if (pci->version >= 0x480A)
+ if (dw_pcie_ver_is_ge(pci, 480A))
ret = ks_pcie_am654_set_mode(dev, mode);
else
ret = ks_pcie_set_mode(dev);
@@ -1324,7 +1324,7 @@ static struct platform_driver ks_pcie_driver __refdata = {
.remove = __exit_p(ks_pcie_remove),
.driver = {
.name = "keystone-pcie",
- .of_match_table = of_match_ptr(ks_pcie_of_match),
+ .of_match_table = ks_pcie_of_match,
},
};
builtin_platform_driver(ks_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 39f4664bd84c..ad99707b3b99 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -32,15 +32,6 @@ struct ls_pcie_ep {
const struct ls_pcie_ep_drvdata *drvdata;
};
-static int ls_pcie_establish_link(struct dw_pcie *pci)
-{
- return 0;
-}
-
-static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {
- .start_link = ls_pcie_establish_link,
-};
-
static const struct pci_epc_features*
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
@@ -106,19 +97,16 @@ static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
.func_offset = 0x20000,
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
.func_offset = 0x8000,
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct of_device_id ls_pcie_ep_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 6a4f0619bb1c..879b8692f96a 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -74,7 +74,7 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
}
-static int ls_pcie_host_init(struct pcie_port *pp)
+static int ls_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index f44bf347904a..c1527693bed9 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -370,7 +370,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int meson_pcie_host_init(struct pcie_port *pp)
+static int meson_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct meson_pcie *mp = to_meson_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index e8afa50129a8..b8cb77c9c4bd 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -217,7 +217,7 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
unsigned int busnr = bus->number;
struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
@@ -245,7 +245,7 @@ static struct pci_ops al_child_pci_ops = {
static void al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
- struct pcie_port *pp = &pcie->pci->pp;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
u32 cfg_control_offset;
u8 subordinate_bus;
@@ -289,7 +289,7 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
}
-static int al_pcie_host_init(struct pcie_port *pp)
+static int al_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct al_pcie *pcie = to_al_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 4e2552dcf982..dc469ef8e99b 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -166,7 +166,7 @@ static int armada8k_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int armada8k_pcie_host_init(struct pcie_port *pp)
+static int armada8k_pcie_host_init(struct dw_pcie_rp *pp)
{
u32 reg;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -233,7 +233,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -343,7 +343,7 @@ static struct platform_driver armada8k_pcie_driver = {
.probe = armada8k_pcie_probe,
.driver = {
.name = "armada8k-pcie",
- .of_match_table = of_match_ptr(armada8k_pcie_of_match),
+ .of_match_table = armada8k_pcie_of_match,
.suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 2f15441770e1..98102079e26d 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -97,7 +97,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct dw_pcie_ep *ep = &pci->ep;
switch (artpec6_pcie->mode) {
@@ -315,7 +315,7 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
usleep_range(100, 200);
}
-static int artpec6_pcie_host_init(struct pcie_port *pp)
+static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 0eda8236c125..83ddb190292e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -154,22 +154,25 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
return 0;
}
-static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_barno bar, dma_addr_t cpu_addr,
- enum dw_pcie_as_type as_type)
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ dma_addr_t cpu_addr, enum pci_barno bar)
{
int ret;
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ if (!ep->bar_to_atu[bar])
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ else
+ free_win = ep->bar_to_atu[bar];
+
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,
- as_type);
+ ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type,
+ cpu_addr, bar);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -185,8 +188,9 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size)
{
- u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 free_win;
+ int ret;
free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
if (free_win >= pci->num_ob_windows) {
@@ -194,8 +198,10 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return -EINVAL;
}
- dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
+ if (ret)
+ return ret;
set_bit(free_win, ep->ob_window_map);
ep->outbound_addr[free_win] = phys_addr;
@@ -213,38 +219,40 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
clear_bit(atu_index, ep->ib_window_map);
ep->epf_bar[bar] = NULL;
+ ep->bar_to_atu[bar] = 0;
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
int flags = epf_bar->flags;
- enum dw_pcie_as_type as_type;
- u32 reg;
unsigned int func_offset = 0;
+ int ret, type;
+ u32 reg;
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
if (!(flags & PCI_BASE_ADDRESS_SPACE))
- as_type = DW_PCIE_AS_MEM;
+ type = PCIE_ATU_TYPE_MEM;
else
- as_type = DW_PCIE_AS_IO;
+ type = PCIE_ATU_TYPE_IO;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, bar,
- epf_bar->phys_addr, as_type);
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
if (ret)
return ret;
+ if (ep->epf_bar[bar])
+ return 0;
+
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
@@ -289,7 +297,7 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
if (ret < 0)
return;
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
clear_bit(atu_index, ep->ob_window_map);
}
@@ -435,8 +443,7 @@ static void dw_pcie_ep_stop(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (pci->ops && pci->ops->stop_link)
- pci->ops->stop_link(pci);
+ dw_pcie_stop_link(pci);
}
static int dw_pcie_ep_start(struct pci_epc *epc)
@@ -444,10 +451,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops || !pci->ops->start_link)
- return -EINVAL;
-
- return pci->ops->start_link(pci);
+ return dw_pcie_start_link(pci);
}
static const struct pci_epc_features*
@@ -699,17 +703,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (!pci->dbi_base2) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- if (!res)
+ if (!res) {
pci->dbi_base2 = pci->dbi_base + SZ_4K;
- else {
+ } else {
pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base2))
return PTR_ERR(pci->dbi_base2);
}
}
- dw_pcie_iatu_detect(pci);
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -EINVAL;
@@ -717,17 +719,17 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- ep->ib_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ib_windows),
- sizeof(long),
- GFP_KERNEL);
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
if (!ep->ib_window_map)
return -ENOMEM;
- ep->ob_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ob_windows),
- sizeof(long),
- GFP_KERNEL);
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
if (!ep->ob_window_map)
return -ENOMEM;
@@ -780,8 +782,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
epc->mem->window.page_size);
if (!ep->msi_mem) {
+ ret = -ENOMEM;
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
- return -ENOMEM;
+ goto err_exit_epc_mem;
}
if (ep->ops->get_features) {
@@ -790,6 +793,19 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
- return dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret)
+ goto err_free_epc_mem;
+
+ return 0;
+
+err_free_epc_mem:
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->window.page_size);
+
+err_exit_epc_mem:
+ pci_epc_mem_exit(epc);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 9979302532b7..7746f94a715f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -53,7 +53,7 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
};
/* MSI int handler */
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
int i, pos;
unsigned long val;
@@ -88,7 +88,7 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
static void dw_chained_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
chained_irq_enter(chip, desc);
@@ -100,7 +100,7 @@ static void dw_chained_msi_isr(struct irq_desc *desc)
static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
@@ -123,7 +123,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d,
static void dw_pci_bottom_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -142,7 +142,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
static void dw_pci_bottom_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -161,7 +161,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
static void dw_pci_bottom_ack(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
@@ -185,7 +185,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *args)
{
- struct pcie_port *pp = domain->host_data;
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
u32 i;
int bit;
@@ -213,7 +213,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct pcie_port *pp = domain->host_data;
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
@@ -229,7 +229,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
.free = dw_pcie_irq_domain_free,
};
-int dw_pcie_allocate_domains(struct pcie_port *pp)
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
@@ -255,10 +255,15 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return 0;
}
-static void dw_pcie_free_msi(struct pcie_port *pp)
+static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
- if (pp->msi_irq)
- irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL);
+ u32 ctrl;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ NULL, NULL);
+ }
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
@@ -267,12 +272,13 @@ static void dw_pcie_free_msi(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (pp->msi_page)
+ __free_page(pp->msi_page);
}
}
-static void dw_pcie_msi_init(struct pcie_port *pp)
+static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
@@ -285,7 +291,112 @@ static void dw_pcie_msi_init(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
-int dw_pcie_host_init(struct pcie_port *pp)
+static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 ctrl, max_vectors;
+ int irq;
+
+ /* Parse any "msiX" IRQs described in the devicetree */
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ char msi_name[] = "msiX";
+
+ msi_name[3] = '0' + ctrl;
+ irq = platform_get_irq_byname_optional(pdev, msi_name);
+ if (irq == -ENXIO)
+ break;
+ if (irq < 0)
+ return dev_err_probe(dev, irq,
+ "Failed to parse MSI IRQ '%s'\n",
+ msi_name);
+
+ pp->msi_irq[ctrl] = irq;
+ }
+
+ /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
+ if (ctrl == 0)
+ return -ENXIO;
+
+ max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
+ if (pp->num_vectors > max_vectors) {
+ dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
+ max_vectors);
+ pp->num_vectors = max_vectors;
+ }
+ if (!pp->num_vectors)
+ pp->num_vectors = max_vectors;
+
+ return 0;
+}
+
+static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+ u32 ctrl, num_ctrls;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
+
+ if (!pp->msi_irq[0]) {
+ ret = dw_pcie_parse_split_msi_irq(pp);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ }
+
+ if (!pp->num_vectors)
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ if (!pp->msi_irq[0]) {
+ pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq[0] < 0) {
+ pp->msi_irq[0] = platform_get_irq(pdev, 0);
+ if (pp->msi_irq[0] < 0)
+ return pp->msi_irq[0];
+ }
+ }
+
+ dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
+
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
+ ret = dw_pcie_allocate_domains(pp);
+ if (ret)
+ return ret;
+
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ dw_chained_msi_isr, pp);
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+
+ pp->msi_page = alloc_page(GFP_DMA32);
+ pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dev, pp->msi_data);
+ if (ret) {
+ dev_err(pci->dev, "Failed to map MSI data\n");
+ __free_page(pp->msi_page);
+ pp->msi_page = NULL;
+ pp->msi_data = 0;
+ dw_pcie_free_msi(pp);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
@@ -293,17 +404,17 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
struct pci_host_bridge *bridge;
- struct resource *cfg_res;
+ struct resource *res;
int ret;
- raw_spin_lock_init(&pci->pp.lock);
+ raw_spin_lock_init(&pp->lock);
- cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res);
- pp->cfg0_base = cfg_res->start;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (res) {
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, cfg_res);
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pp->va_cfg0_base))
return PTR_ERR(pp->va_cfg0_base);
} else {
@@ -312,8 +423,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
if (!pci->dbi_base) {
- struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
}
@@ -350,67 +461,39 @@ int dw_pcie_host_init(struct pcie_port *pp)
of_property_read_bool(np, "msi-parent") ||
of_property_read_bool(np, "msi-map"));
- if (!pp->num_vectors) {
+ /*
+ * For the has_msi_ctrl case the default assignment is handled
+ * in the dw_pcie_msi_host_init().
+ */
+ if (!pp->has_msi_ctrl && !pp->num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
} else if (pp->num_vectors > MAX_MSI_IRQS) {
dev_err(dev, "Invalid number of vectors\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_deinit_host;
}
if (pp->ops->msi_host_init) {
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
- return ret;
+ goto err_deinit_host;
} else if (pp->has_msi_ctrl) {
- u32 ctrl, num_ctrls;
-
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
- for (ctrl = 0; ctrl < num_ctrls; ctrl++)
- pp->irq_mask[ctrl] = ~0;
-
- if (!pp->msi_irq) {
- pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
- if (pp->msi_irq < 0) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
- }
-
- pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
-
- ret = dw_pcie_allocate_domains(pp);
- if (ret)
- return ret;
-
- if (pp->msi_irq > 0)
- irq_set_chained_handler_and_data(pp->msi_irq,
- dw_chained_msi_isr,
- pp);
-
- ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
-
- pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
- sizeof(pp->msi_msg),
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- ret = dma_mapping_error(pci->dev, pp->msi_data);
- if (ret) {
- dev_err(pci->dev, "Failed to map MSI data\n");
- pp->msi_data = 0;
- goto err_free_msi;
- }
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret < 0)
+ goto err_deinit_host;
}
}
+ dw_pcie_version_detect(pci);
+
dw_pcie_iatu_detect(pci);
- dw_pcie_setup_rc(pp);
+ ret = dw_pcie_setup_rc(pp);
+ if (ret)
+ goto err_free_msi;
- if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) {
- ret = pci->ops->start_link(pci);
+ if (!dw_pcie_link_up(pci)) {
+ ret = dw_pcie_start_link(pci);
if (ret)
goto err_free_msi;
}
@@ -421,32 +504,50 @@ int dw_pcie_host_init(struct pcie_port *pp)
bridge->sysdata = pp;
ret = pci_host_probe(bridge);
- if (!ret)
- return 0;
+ if (ret)
+ goto err_stop_link;
+
+ return 0;
+
+err_stop_link:
+ dw_pcie_stop_link(pci);
err_free_msi:
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
+
+err_deinit_host:
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
+
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_host_init);
-void dw_pcie_host_deinit(struct pcie_port *pp)
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
+
+ dw_pcie_stop_link(pci);
+
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
+
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- int type;
- u32 busdev;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int type, ret;
+ u32 busdev;
/*
* Checking whether the link is up here is a last line of defense
@@ -467,8 +568,10 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
else
type = PCIE_ATU_TYPE_CFG1;
-
- dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
+ ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
+ pp->cfg0_size);
+ if (ret)
+ return NULL;
return pp->va_cfg0_base + where;
}
@@ -476,33 +579,45 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- int ret;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
- if (!ret && pci->io_cfg_atu_shared)
- dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
- return ret;
+ return PCIBIOS_SUCCESSFUL;
}
static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- int ret;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
- if (!ret && pci->io_cfg_atu_shared)
- dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
- return ret;
+ return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops dw_child_pcie_ops = {
@@ -513,7 +628,7 @@ static struct pci_ops dw_child_pcie_ops = {
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
if (PCI_SLOT(devfn) > 0)
@@ -529,11 +644,72 @@ static struct pci_ops dw_pcie_ops = {
.write = pci_generic_config_write,
};
-void dw_pcie_setup_rc(struct pcie_port *pp)
+static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
- int i;
- u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *entry;
+ int i, ret;
+
+ /* Note the very first outbound ATU is used for CFG IOs */
+ if (!pci->num_ob_windows) {
+ dev_err(pci->dev, "No outbound iATU found\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure all outbound windows are disabled before proceeding with
+ * the MEM/IO ranges setups.
+ */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++i)
+ break;
+
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set MEM range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++i) {
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
+ pp->io_base,
+ pp->io_bus_addr,
+ pp->io_size);
+ if (ret) {
+ dev_err(pci->dev, "Failed to set IO range %pr\n",
+ entry->res);
+ return ret;
+ }
+ } else {
+ pp->cfg0_io_shared = true;
+ }
+ }
+
+ if (pci->num_ob_windows <= i)
+ dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
+ pci->num_ob_windows);
+
+ return 0;
+}
+
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 val, ctrl, num_ctrls;
+ int ret;
/*
* Enable DBI read-only registers for writing/updating configuration.
@@ -582,45 +758,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
- /* Ensure all outbound windows are disabled so there are multiple matches */
- for (i = 0; i < pci->num_ob_windows; i++)
- dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
-
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
* ATU, so we should not program the ATU here.
*/
if (pp->bridge->child_ops == &dw_child_pcie_ops) {
- int atu_idx = 0;
- struct resource_entry *entry;
-
- /* Get last memory resource entry */
- resource_list_for_each_entry(entry, &pp->bridge->windows) {
- if (resource_type(entry->res) != IORESOURCE_MEM)
- continue;
-
- if (pci->num_ob_windows <= ++atu_idx)
- break;
-
- dw_pcie_prog_outbound_atu(pci, atu_idx,
- PCIE_ATU_TYPE_MEM, entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
- }
-
- if (pp->io_size) {
- if (pci->num_ob_windows > ++atu_idx)
- dw_pcie_prog_outbound_atu(pci, atu_idx,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
- else
- pci->io_cfg_atu_shared = true;
- }
-
- if (pci->num_ob_windows <= atu_idx)
- dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
- pci->num_ob_windows);
+ ret = dw_pcie_iatu_setup(pp);
+ if (ret)
+ return ret;
}
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
@@ -633,5 +779,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 0c5de87d3cc6..1fcfb840f238 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -17,13 +17,11 @@
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/types.h>
-#include <linux/regmap.h>
#include "pcie-designware.h"
struct dw_plat_pcie {
struct dw_pcie *pci;
- struct regmap *regmap;
enum dw_pcie_device_mode mode;
};
@@ -31,20 +29,9 @@ struct dw_plat_pcie_of_data {
enum dw_pcie_device_mode mode;
};
-static const struct of_device_id dw_plat_pcie_of_match[];
-
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
};
-static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
-{
- return 0;
-}
-
-static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = dw_plat_pcie_establish_link,
-};
-
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -96,7 +83,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = dw_plat_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -140,7 +127,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
dw_plat_pcie->mode = mode;
@@ -153,20 +139,21 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
return -ENODEV;
ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
- if (ret < 0)
- return ret;
break;
case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
return -ENODEV;
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ ret = -EINVAL;
+ break;
}
- return 0;
+ return ret;
}
static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index d92c8a25094f..c6725c519a47 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -8,14 +8,41 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/sizes.h>
#include <linux/types.h>
#include "../../pci.h"
#include "pcie-designware.h"
+void dw_pcie_version_detect(struct dw_pcie *pci)
+{
+ u32 ver;
+
+ /* The content of the CSR is zero on DWC PCIe older than v4.70a */
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
+ if (!ver)
+ return;
+
+ if (pci->version && pci->version != ver)
+ dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
+ pci->version, ver);
+ else
+ pci->version = ver;
+
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
+
+ if (pci->type && pci->type != ver)
+ dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
+ pci->type, ver);
+ else
+ pci->type = ver;
+}
+
/*
* These interfaces resemble the pci_find_*capability() interfaces, but these
* are for configuring host controllers, which are bridges *to* PCI devices but
@@ -181,48 +208,61 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
dev_err(pci->dev, "write DBI address failed\n");
}
-static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
+static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
+ u32 index)
{
+ if (pci->iatu_unroll_enabled)
+ return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
+ return pci->atu_base;
+}
+
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
+{
+ void __iomem *base;
int ret;
u32 val;
+ base = dw_pcie_select_atu(pci, dir, index);
+
if (pci->ops && pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
+ return pci->ops->read_dbi(pci, base, reg, 4);
- ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
+ ret = dw_pcie_read(base + reg, 4, &val);
if (ret)
dev_err(pci->dev, "Read ATU address failed\n");
return val;
}
-static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
+ u32 reg, u32 val)
{
+ void __iomem *base;
int ret;
+ base = dw_pcie_select_atu(pci, dir, index);
+
if (pci->ops && pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
+ pci->ops->write_dbi(pci, base, reg, 4, val);
return;
}
- ret = dw_pcie_write(pci->atu_base + reg, 4, val);
+ ret = dw_pcie_write(base + reg, 4, val);
if (ret)
dev_err(pci->dev, "Write ATU address failed\n");
}
-static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- return dw_pcie_readl_atu(pci, offset + reg);
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
}
-static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- dw_pcie_writel_atu(pci, offset + reg, val);
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
}
static inline u32 dw_pcie_enable_ecrc(u32 val)
@@ -266,264 +306,160 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD;
}
-static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
- int index, int type,
- u64 cpu_addr, u64 pci_addr,
- u64 size)
+static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
{
u32 retries, val;
- u64 limit_addr = cpu_addr + size - 1;
-
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
- lower_32_bits(limit_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
- upper_32_bits(limit_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(pci_addr));
- val = type | PCIE_ATU_FUNC_NUM(func_no);
- val = upper_32_bits(size - 1) ?
- val | PCIE_ATU_INCREASE_REGION_SIZE : val;
- if (pci->version == 0x490A)
- val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_ENABLE);
+ u64 limit_addr;
- /*
- * Make sure ATU enable takes effect before any subsequent config
- * and I/O accesses.
- */
- for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ob_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- if (val & PCIE_ATU_ENABLE)
- return;
+ if (pci->ops && pci->ops->cpu_addr_fixup)
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
- mdelay(LINK_WAIT_IATU);
+ limit_addr = cpu_addr + size - 1;
+
+ if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
}
- dev_err(pci->dev, "Outbound iATU is not being enabled\n");
-}
-static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
-{
- u32 retries, val;
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
- if (pci->iatu_unroll_enabled) {
- dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
- return;
- }
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
- PCIE_ATU_REGION_OUTBOUND | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- if (pci->version >= 0x460A)
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT,
- upper_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
val = type | PCIE_ATU_FUNC_NUM(func_no);
- val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ?
- val | PCIE_ATU_INCREASE_REGION_SIZE : val;
- if (pci->version == 0x490A)
+ if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
- return;
+ return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
-}
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
-{
- __dw_pcie_prog_outbound_atu(pci, 0, index, type,
- cpu_addr, pci_addr, size);
+ return -ETIMEDOUT;
}
-void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
{
- __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
+ return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
+ cpu_addr, pci_addr, size);
}
-static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u64 size)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- return dw_pcie_readl_atu(pci, offset + reg);
+ return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
}
-static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- dw_pcie_writel_atu(pci, offset + reg, val);
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
}
-static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
- int index, int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type)
+static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
- int type;
- u32 retries, val;
-
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
- return -EINVAL;
- }
-
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
- PCIE_ATU_FUNC_NUM(func_no));
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_FUNC_NUM_MATCH_EN |
- PCIE_ATU_ENABLE |
- PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
-
- /*
- * Make sure ATU enable takes effect before any subsequent config
- * and I/O accesses.
- */
- for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ib_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- if (val & PCIE_ATU_ENABLE)
- return 0;
-
- mdelay(LINK_WAIT_IATU);
- }
- dev_err(pci->dev, "Inbound iATU is not being enabled\n");
-
- return -EBUSY;
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type)
+ int type, u64 cpu_addr, u8 bar)
{
- int type;
u32 retries, val;
- if (pci->iatu_unroll_enabled)
- return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
- cpu_addr, as_type);
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
- index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
+ if (!IS_ALIGNED(cpu_addr, pci->region_align))
return -EINVAL;
- }
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
- PCIE_ATU_FUNC_NUM(func_no));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
- PCIE_ATU_FUNC_NUM_MATCH_EN |
- PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
+ PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
- return -EBUSY;
+ return -ETIMEDOUT;
}
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type)
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
{
- int region;
-
- switch (type) {
- case DW_PCIE_REGION_INBOUND:
- region = PCIE_ATU_REGION_INBOUND;
- break;
- case DW_PCIE_REGION_OUTBOUND:
- region = PCIE_ATU_REGION_OUTBOUND;
- break;
- default:
- return;
- }
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
+ u32 offset, val;
int retries;
/* Check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "Link up\n");
- return 0;
- }
+ if (dw_pcie_link_up(pci))
+ break;
+
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_info(pci->dev, "Phy link never came up\n");
+ if (retries >= LINK_WAIT_MAX_RETRIES) {
+ dev_err(pci->dev, "Phy link never came up\n");
+ return -ETIMEDOUT;
+ }
- return -ETIMEDOUT;
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
+ FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
@@ -534,7 +470,7 @@ int dw_pcie_link_up(struct dw_pcie *pci)
if (pci->ops && pci->ops->link_up)
return pci->ops->link_up(pci);
- val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
@@ -586,95 +522,81 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
}
-static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
if (val == 0xffffffff)
- return 1;
+ return true;
- return 0;
+ return false;
}
-static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
+static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
{
- int max_region, i, ob = 0, ib = 0;
- u32 val;
+ int max_region, ob, ib;
+ u32 val, min, dir;
+ u64 max;
- max_region = min((int)pci->atu_size / 512, 256);
-
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
- 0x11110000);
+ if (pci->iatu_unroll_enabled) {
+ max_region = min((int)pci->atu_size / 512, 256);
+ } else {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+ }
- val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
- if (val == 0x11110000)
- ob++;
- else
+ for (ob = 0; ob < max_region; ob++) {
+ dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
break;
}
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
- 0x11110000);
-
- val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
- if (val == 0x11110000)
- ib++;
- else
+ for (ib = 0; ib < max_region; ib++) {
+ dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
break;
}
- pci->num_ib_windows = ib;
- pci->num_ob_windows = ob;
-}
-
-static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
-{
- int max_region, i, ob = 0, ib = 0;
- u32 val;
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
- max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
- if (val == 0x11110000)
- ob++;
- else
- break;
+ if (ob) {
+ dir = PCIE_ATU_REGION_DIR_OB;
+ } else if (ib) {
+ dir = PCIE_ATU_REGION_DIR_IB;
+ } else {
+ dev_err(pci->dev, "No iATU regions found\n");
+ return;
}
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
- if (val == 0x11110000)
- ib++;
- else
- break;
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
+ min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
+
+ if (dw_pcie_ver_is_ge(pci, 460A)) {
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
+ max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
+ } else {
+ max = 0;
}
- pci->num_ib_windows = ib;
pci->num_ob_windows = ob;
+ pci->num_ib_windows = ib;
+ pci->region_align = 1 << fls(min);
+ pci->region_limit = (max << 32) | (SZ_4G - 1);
}
void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(pci->dev);
- if (pci->version >= 0x480A || (!pci->version &&
- dw_pcie_iatu_unroll_enabled(pci))) {
- pci->iatu_unroll_enabled = true;
+ pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
+ if (pci->iatu_unroll_enabled) {
if (!pci->atu_base) {
struct resource *res =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
if (res) {
pci->atu_size = resource_size(res);
- pci->atu_base = devm_ioremap_resource(dev, res);
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
}
if (!pci->atu_base || IS_ERR(pci->atu_base))
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
@@ -683,23 +605,25 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)
if (!pci->atu_size)
/* Pick a minimal default, enough for 8 in and 8 out windows */
pci->atu_size = SZ_4K;
+ } else {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+ }
- dw_pcie_iatu_detect_regions_unroll(pci);
- } else
- dw_pcie_iatu_detect_regions(pci);
+ dw_pcie_iatu_detect_regions(pci);
dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
"enabled" : "disabled");
- dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
- pci->num_ob_windows, pci->num_ib_windows);
+ dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
+ pci->num_ob_windows, pci->num_ib_windows,
+ pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
void dw_pcie_setup(struct dw_pcie *pci)
{
+ struct device_node *np = pci->dev->of_node;
u32 val;
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
if (pci->link_gen > 0)
dw_pcie_link_set_max_speed(pci, pci->link_gen);
@@ -726,6 +650,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+ if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+ PCIE_PL_CHK_REG_CHK_REG_START;
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ }
+
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
@@ -772,11 +703,4 @@ void dw_pcie_setup(struct dw_pcie *pci)
break;
}
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
-
- if (of_property_read_bool(np, "snps,enable-cdm-check")) {
- val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
- PCIE_PL_CHK_REG_CHK_REG_START;
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
- }
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 7d6e9b7576be..09b887093a84 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -20,6 +20,29 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+/* DWC PCIe IP-core versions (native support since v4.70a) */
+#define DW_PCIE_VER_365A 0x3336352a
+#define DW_PCIE_VER_460A 0x3436302a
+#define DW_PCIE_VER_470A 0x3437302a
+#define DW_PCIE_VER_480A 0x3438302a
+#define DW_PCIE_VER_490A 0x3439302a
+#define DW_PCIE_VER_520A 0x3532302a
+
+#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
+ ((_pci)->version _op DW_PCIE_VER_ ## _ver)
+
+#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
+
+#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
+
+#define dw_pcie_ver_type_is(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
+
+#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
+
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
@@ -74,13 +97,34 @@
#define PCIE_MSI_INTR0_MASK 0x82C
#define PCIE_MSI_INTR0_STATUS 0x830
+#define GEN3_RELATED_OFF 0x890
+#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
+#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
+#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
+#define PCIE_VERSION_NUMBER 0x8F8
+#define PCIE_VERSION_TYPE 0x8FC
+
+/*
+ * iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each
+ * iATU region CSRs had been indirectly accessible by means of the dedicated
+ * viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe
+ * v4.80a in a way so the viewport was unrolled into the directly accessible
+ * iATU/eDMA CSRs space.
+ */
#define PCIE_ATU_VIEWPORT 0x900
-#define PCIE_ATU_REGION_INBOUND BIT(31)
-#define PCIE_ATU_REGION_OUTBOUND 0
-#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_REGION_DIR_IB BIT(31)
+#define PCIE_ATU_REGION_DIR_OB 0
+#define PCIE_ATU_VIEWPORT_BASE 0x904
+#define PCIE_ATU_UNROLL_BASE(dir, index) \
+ (((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0))
+#define PCIE_ATU_VIEWPORT_SIZE 0x2C
+#define PCIE_ATU_REGION_CTRL1 0x000
#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
#define PCIE_ATU_TYPE_MEM 0x0
#define PCIE_ATU_TYPE_IO 0x2
@@ -88,19 +132,19 @@
#define PCIE_ATU_TYPE_CFG1 0x5
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
-#define PCIE_ATU_CR2 0x908
+#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
-#define PCIE_ATU_LOWER_BASE 0x90C
-#define PCIE_ATU_UPPER_BASE 0x910
-#define PCIE_ATU_LIMIT 0x914
-#define PCIE_ATU_LOWER_TARGET 0x918
+#define PCIE_ATU_LOWER_BASE 0x008
+#define PCIE_ATU_UPPER_BASE 0x00C
+#define PCIE_ATU_LIMIT 0x010
+#define PCIE_ATU_LOWER_TARGET 0x014
#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
-#define PCIE_ATU_UPPER_TARGET 0x91C
-#define PCIE_ATU_UPPER_LIMIT 0x924
+#define PCIE_ATU_UPPER_TARGET 0x018
+#define PCIE_ATU_UPPER_LIMIT 0x020
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
#define PCIE_DBI_RO_WR_EN BIT(0)
@@ -131,6 +175,25 @@
#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
/*
+ * RAS-DES register definitions
+ */
+#define PCIE_RAS_DES_EVENT_COUNTER_CONTROL 0x8
+#define EVENT_COUNTER_ALL_CLEAR 0x3
+#define EVENT_COUNTER_ENABLE_ALL 0x7
+#define EVENT_COUNTER_ENABLE_SHIFT 2
+#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
+#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
+#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
+#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
+#define EVENT_COUNTER_EVENT_L1 0x5
+#define EVENT_COUNTER_EVENT_L1_1 0x7
+#define EVENT_COUNTER_EVENT_L1_2 0x8
+#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
+#define EVENT_COUNTER_GROUP_5 0x5
+
+#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+
+/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
* default; the driver core automatically derives atu_base from dbi_base using
@@ -138,13 +201,6 @@
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
-/* Register address builder */
-#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
- ((region) << 9)
-
-#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
- (((region) << 9) | BIT(8))
-
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
@@ -155,16 +211,10 @@
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
-struct pcie_port;
struct dw_pcie;
+struct dw_pcie_rp;
struct dw_pcie_ep;
-enum dw_pcie_region_type {
- DW_PCIE_REGION_UNKNOWN,
- DW_PCIE_REGION_INBOUND,
- DW_PCIE_REGION_OUTBOUND,
-};
-
enum dw_pcie_device_mode {
DW_PCIE_UNKNOWN_TYPE,
DW_PCIE_EP_TYPE,
@@ -173,12 +223,14 @@ enum dw_pcie_device_mode {
};
struct dw_pcie_host_ops {
- int (*host_init)(struct pcie_port *pp);
- int (*msi_host_init)(struct pcie_port *pp);
+ int (*host_init)(struct dw_pcie_rp *pp);
+ void (*host_deinit)(struct dw_pcie_rp *pp);
+ int (*msi_host_init)(struct dw_pcie_rp *pp);
};
-struct pcie_port {
+struct dw_pcie_rp {
bool has_msi_ctrl:1;
+ bool cfg0_io_shared:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
@@ -187,11 +239,11 @@ struct pcie_port {
u32 io_size;
int irq;
const struct dw_pcie_host_ops *ops;
- int msi_irq;
+ int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
- u16 msi_msg;
dma_addr_t msi_data;
+ struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
@@ -200,12 +252,6 @@ struct pcie_port {
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
-enum dw_pcie_as_type {
- DW_PCIE_AS_UNKNOWN,
- DW_PCIE_AS_MEM,
- DW_PCIE_AS_IO,
-};
-
struct dw_pcie_ep_ops {
void (*ep_init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
@@ -261,20 +307,21 @@ struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
void __iomem *dbi_base2;
- /* Used when iatu_unroll_enabled is true */
void __iomem *atu_base;
size_t atu_size;
u32 num_ib_windows;
u32 num_ob_windows;
- struct pcie_port pp;
+ u32 region_align;
+ u64 region_limit;
+ struct dw_pcie_rp pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
- unsigned int version;
+ u32 version;
+ u32 type;
int num_lanes;
int link_gen;
u8 n_fts[2];
bool iatu_unroll_enabled: 1;
- bool io_cfg_atu_shared: 1;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -282,6 +329,8 @@ struct dw_pcie {
#define to_dw_pcie_from_ep(endpoint) \
container_of((endpoint), struct dw_pcie, ep)
+void dw_pcie_version_detect(struct dw_pcie *pci);
+
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
@@ -294,17 +343,13 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size);
-void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type);
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type);
+ int type, u64 cpu_addr, u8 bar);
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
@@ -365,34 +410,49 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, reg, val);
}
+static inline int dw_pcie_start_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->start_link)
+ return pci->ops->start_link(pci);
+
+ return 0;
+}
+
+static inline void dw_pcie_stop_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->stop_link)
+ pci->ops->stop_link(pci);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
-void dw_pcie_setup_rc(struct pcie_port *pp);
-int dw_pcie_host_init(struct pcie_port *pp);
-void dw_pcie_host_deinit(struct pcie_port *pp);
-int dw_pcie_allocate_domains(struct pcie_port *pp);
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
+int dw_pcie_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
-static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
}
-static inline void dw_pcie_setup_rc(struct pcie_port *pp)
+static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
+ return 0;
}
-static inline int dw_pcie_host_init(struct pcie_port *pp)
+static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
return 0;
}
-static inline void dw_pcie_host_deinit(struct pcie_port *pp)
+static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
}
-static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
+static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 8c5bb9d7cc36..c1e7653e508e 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -186,7 +186,7 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int rockchip_pcie_host_init(struct pcie_port *pp)
+static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
@@ -288,7 +288,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
int ret;
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
index 02cc70d8cc06..0c90583c078b 100644
--- a/drivers/pci/controller/dwc/pcie-fu740.c
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -16,11 +16,9 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
#include <linux/resource.h>
#include <linux/types.h>
#include <linux/interrupt.h>
@@ -236,7 +234,7 @@ err:
return ret;
}
-static int fu740_pcie_host_init(struct pcie_port *pp)
+static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fu740_pcie *afp = to_fu740_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 410555dccb6d..e2b80f10030d 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -74,7 +74,7 @@ static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
writel(val, histb_pcie->ctrl + reg);
}
-static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -88,7 +88,7 @@ static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
}
-static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -180,7 +180,7 @@ static int histb_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int histb_pcie_host_init(struct pcie_port *pp)
+static int histb_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -219,7 +219,7 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie)
regulator_disable(hipcie->vpcie);
}
-static int histb_pcie_host_enable(struct pcie_port *pp)
+static int histb_pcie_host_enable(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -297,7 +297,7 @@ static int histb_pcie_probe(struct platform_device *pdev)
{
struct histb_pcie *hipcie;
struct dw_pcie *pci;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
enum of_gpio_flags of_flags;
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 5ba144924ff8..333c33d98a70 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -58,10 +58,6 @@
#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
-struct intel_pcie_soc {
- unsigned int pcie_ver;
-};
-
struct intel_pcie {
struct dw_pcie pci;
void __iomem *app_base;
@@ -306,7 +302,11 @@ static int intel_pcie_host_setup(struct intel_pcie *pcie)
intel_pcie_ltssm_disable(pcie);
intel_pcie_link_setup(pcie);
intel_pcie_init_n_fts(pci);
- dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_setup_rc(&pci->pp);
+ if (ret)
+ goto app_init_err;
+
dw_pcie_upconfig_setup(pci);
intel_pcie_device_rst_deassert(pcie);
@@ -343,7 +343,7 @@ static void __intel_pcie_remove(struct intel_pcie *pcie)
static int intel_pcie_remove(struct platform_device *pdev)
{
struct intel_pcie *pcie = platform_get_drvdata(pdev);
- struct pcie_port *pp = &pcie->pci.pp;
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
dw_pcie_host_deinit(pp);
__intel_pcie_remove(pcie);
@@ -351,7 +351,7 @@ static int intel_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
+static int intel_pcie_suspend_noirq(struct device *dev)
{
struct intel_pcie *pcie = dev_get_drvdata(dev);
int ret;
@@ -366,14 +366,14 @@ static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
return ret;
}
-static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
+static int intel_pcie_resume_noirq(struct device *dev)
{
struct intel_pcie *pcie = dev_get_drvdata(dev);
return intel_pcie_host_setup(pcie);
}
-static int intel_pcie_rc_init(struct pcie_port *pp)
+static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
@@ -394,16 +394,11 @@ static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
.host_init = intel_pcie_rc_init,
};
-static const struct intel_pcie_soc pcie_data = {
- .pcie_ver = 0x520A,
-};
-
static int intel_pcie_probe(struct platform_device *pdev)
{
- const struct intel_pcie_soc *data;
struct device *dev = &pdev->dev;
struct intel_pcie *pcie;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
int ret;
@@ -424,12 +419,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- data = device_get_match_data(dev);
- if (!data)
- return -ENODEV;
-
pci->ops = &intel_pcie_ops;
- pci->version = data->pcie_ver;
pp->ops = &intel_pcie_dw_ops;
ret = dw_pcie_host_init(pp);
@@ -442,12 +432,12 @@ static int intel_pcie_probe(struct platform_device *pdev)
}
static const struct dev_pm_ops intel_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
- intel_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
+ intel_pcie_resume_noirq)
};
static const struct of_device_id of_intel_pcie_match[] = {
- { .compatible = "intel,lgm-pcie", .data = &pcie_data },
+ { .compatible = "intel,lgm-pcie" },
{}
};
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 1ac29a6eef22..f90f36bac018 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -231,7 +231,7 @@ static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 val, mask, status;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
/*
* Keem Bay PCIe Controller provides an additional IP logic on top of
@@ -332,13 +332,13 @@ static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
u32 val;
int ret;
pp->ops = &keembay_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = keembay_pcie_setup_msi_irq(pcie);
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index a52cad269f85..7f67aad71df4 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -620,7 +620,7 @@ static int kirin_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int kirin_pcie_host_init(struct pcie_port *pp)
+static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
{
pp->bridge->ops = &kirin_pci_ops;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 2ea13750b492..66886dc6e777 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -41,6 +41,9 @@
#define L23_CLK_RMV_DIS BIT(2)
#define L1_CLK_RMV_DIS BIT(1)
+#define PCIE20_PARF_PM_CTRL 0x20
+#define REQ_NOT_ENTR_L1 BIT(5)
+
#define PCIE20_PARF_PHY_CTRL 0x40
#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
@@ -52,6 +55,10 @@
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define AHB_CLK_EN BIT(0)
+#define MSTR_AXI_CLK_EN BIT(1)
+#define BYPASS BIT(4)
+
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
#define PCIE20_PARF_LTSSM 0x1B0
@@ -69,7 +76,20 @@
#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
#define CFG_BRIDGE_SB_INIT BIT(0)
-#define PCIE_CAP_LINK1_VAL 0x2FD7F
+#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \
+ 250)
+#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \
+ 1)
+#define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
+ PCI_EXP_SLTCAP_PCP | \
+ PCI_EXP_SLTCAP_MRLSP | \
+ PCI_EXP_SLTCAP_AIP | \
+ PCI_EXP_SLTCAP_PIP | \
+ PCI_EXP_SLTCAP_HPS | \
+ PCI_EXP_SLTCAP_HPC | \
+ PCI_EXP_SLTCAP_EIP | \
+ PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
+ PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
#define PCIE20_PARF_Q2A_FLUSH 0x1AC
@@ -128,7 +148,6 @@ struct qcom_pcie_resources_2_3_2 {
struct clk *master_clk;
struct clk *slave_clk;
struct clk *cfg_clk;
- struct clk *pipe_clk;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
@@ -165,10 +184,11 @@ struct qcom_pcie_resources_2_7_0 {
int num_clks;
struct regulator_bulk_data supplies[2];
struct reset_control *pci_reset;
- struct clk *pipe_clk;
- struct clk *pipe_clk_src;
- struct clk *phy_pipe_clk;
- struct clk *ref_clk_src;
+};
+
+struct qcom_pcie_resources_2_9_0 {
+ struct clk_bulk_data clks[5];
+ struct reset_control *rst;
};
union qcom_pcie_resources {
@@ -178,6 +198,7 @@ union qcom_pcie_resources {
struct qcom_pcie_resources_2_3_3 v2_3_3;
struct qcom_pcie_resources_2_4_0 v2_4_0;
struct qcom_pcie_resources_2_7_0 v2_7_0;
+ struct qcom_pcie_resources_2_9_0 v2_9_0;
};
struct qcom_pcie;
@@ -194,7 +215,6 @@ struct qcom_pcie_ops {
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
- unsigned int pipe_clk_need_muxing:1;
unsigned int has_tbu_clk:1;
unsigned int has_ddrss_sf_tbu_clk:1;
unsigned int has_aggre0_clk:1;
@@ -325,8 +345,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- struct device_node *node = dev->of_node;
- u32 val;
int ret;
/* reset the PCIe interface as uboot can leave it undefined state */
@@ -337,8 +355,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
reset_control_assert(res->ext_reset);
reset_control_assert(res->phy_reset);
- writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
-
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
if (ret < 0) {
dev_err(dev, "cannot enable regulators\n");
@@ -381,15 +397,42 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
goto err_deassert_axi;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
- if (ret)
- goto err_clks;
+ return 0;
+
+err_deassert_axi:
+ reset_control_assert(res->por_reset);
+err_deassert_por:
+ reset_control_assert(res->pci_reset);
+err_deassert_pci:
+ reset_control_assert(res->phy_reset);
+err_deassert_phy:
+ reset_control_assert(res->ext_reset);
+err_deassert_ext:
+ reset_control_assert(res->ahb_reset);
+err_deassert_ahb:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ u32 val;
+ int ret;
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret)
+ return ret;
+
if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
@@ -428,23 +471,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
return 0;
-
-err_clks:
- reset_control_assert(res->axi_reset);
-err_deassert_axi:
- reset_control_assert(res->por_reset);
-err_deassert_por:
- reset_control_assert(res->pci_reset);
-err_deassert_pci:
- reset_control_assert(res->phy_reset);
-err_deassert_phy:
- reset_control_assert(res->ext_reset);
-err_deassert_ext:
- reset_control_assert(res->ahb_reset);
-err_deassert_ahb:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-
- return ret;
}
static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
@@ -532,16 +558,6 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
goto err_slave;
}
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
-
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
- }
-
return 0;
err_slave:
clk_disable_unprepare(res->slave_bus);
@@ -557,6 +573,21 @@ err_res:
return ret;
}
+static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
+{
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ return 0;
+}
+
static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -597,8 +628,7 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (IS_ERR(res->slave_clk))
return PTR_ERR(res->slave_clk);
- res->pipe_clk = devm_clk_get(dev, "pipe");
- return PTR_ERR_OR_ZERO(res->pipe_clk);
+ return 0;
}
static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
@@ -613,19 +643,11 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
-static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
-
- clk_disable_unprepare(res->pipe_clk);
-}
-
static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u32 val;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -658,6 +680,25 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
goto err_slave_clk;
}
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+err_aux_clk:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ u32 val;
+
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
@@ -680,34 +721,6 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
return 0;
-
-err_slave_clk:
- clk_disable_unprepare(res->master_clk);
-err_master_clk:
- clk_disable_unprepare(res->cfg_clk);
-err_cfg_clk:
- clk_disable_unprepare(res->aux_clk);
-
-err_aux_clk:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-
- return ret;
-}
-
-static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
- int ret;
-
- ret = clk_prepare_enable(res->pipe_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable pipe clock\n");
- return ret;
- }
-
- return 0;
}
static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
@@ -814,7 +827,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u32 val;
int ret;
ret = reset_control_assert(res->axi_m_reset);
@@ -939,6 +951,33 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
if (ret)
goto err_clks;
+ return 0;
+
+err_clks:
+ reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+ reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+ reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+ reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+ reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+ reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+ reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+ reset_control_assert(res->phy_reset);
+err_rst_phy:
+ reset_control_assert(res->phy_ahb_reset);
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie)
+{
+ u32 val;
+
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
@@ -961,26 +1000,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
return 0;
-
-err_clks:
- reset_control_assert(res->ahb_reset);
-err_rst_ahb:
- reset_control_assert(res->pwr_reset);
-err_rst_pwr:
- reset_control_assert(res->axi_s_reset);
-err_rst_axi_s:
- reset_control_assert(res->axi_m_sticky_reset);
-err_rst_axi_m_sticky:
- reset_control_assert(res->axi_m_reset);
-err_rst_axi_m:
- reset_control_assert(res->pipe_sticky_reset);
-err_rst_pipe_sticky:
- reset_control_assert(res->pipe_reset);
-err_rst_pipe:
- reset_control_assert(res->phy_reset);
-err_rst_phy:
- reset_control_assert(res->phy_ahb_reset);
- return ret;
}
static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
@@ -1038,9 +1057,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int i, ret;
- u32 val;
for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
ret = reset_control_assert(res->rst[i]);
@@ -1097,6 +1114,33 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
goto err_clk_aux;
}
+ return 0;
+
+err_clk_aux:
+ clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+ clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+ clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->iface);
+err_clk_iface:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+ reset_control_assert(res->rst[i]);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
writel(SLV_ADDR_SPACE_SZ,
pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
@@ -1114,7 +1158,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
- writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
val &= ~PCI_EXP_LNKCAP_ASPMS;
@@ -1124,24 +1168,6 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
PCI_EXP_DEVCTL2);
return 0;
-
-err_clk_aux:
- clk_disable_unprepare(res->ahb_clk);
-err_clk_ahb:
- clk_disable_unprepare(res->axi_s_clk);
-err_clk_axi_s:
- clk_disable_unprepare(res->axi_m_clk);
-err_clk_axi_m:
- clk_disable_unprepare(res->iface);
-err_clk_iface:
- /*
- * Not checking for failure, will anyway return
- * the original failure in 'ret'.
- */
- for (i = 0; i < ARRAY_SIZE(res->rst); i++)
- reset_control_assert(res->rst[i]);
-
- return ret;
}
static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
@@ -1184,22 +1210,7 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret < 0)
return ret;
- if (pcie->cfg->pipe_clk_need_muxing) {
- res->pipe_clk_src = devm_clk_get(dev, "pipe_mux");
- if (IS_ERR(res->pipe_clk_src))
- return PTR_ERR(res->pipe_clk_src);
-
- res->phy_pipe_clk = devm_clk_get(dev, "phy_pipe");
- if (IS_ERR(res->phy_pipe_clk))
- return PTR_ERR(res->phy_pipe_clk);
-
- res->ref_clk_src = devm_clk_get(dev, "ref");
- if (IS_ERR(res->ref_clk_src))
- return PTR_ERR(res->ref_clk_src);
- }
-
- res->pipe_clk = devm_clk_get(dev, "pipe");
- return PTR_ERR_OR_ZERO(res->pipe_clk);
+ return 0;
}
static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
@@ -1216,10 +1227,6 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
return ret;
}
- /* Set TCXO as clock source for pcie_pipe_clk_src */
- if (pcie->cfg->pipe_clk_need_muxing)
- clk_set_parent(res->pipe_clk_src, res->ref_clk_src);
-
ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret < 0)
goto err_disable_regulators;
@@ -1261,6 +1268,11 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val |= BIT(4);
writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ /* Enable L1 and L1SS */
+ val = readl(pcie->parf + PCIE20_PARF_PM_CTRL);
+ val &= ~REQ_NOT_ENTR_L1;
+ writel(val, pcie->parf + PCIE20_PARF_PM_CTRL);
+
if (IS_ENABLED(CONFIG_PCI_MSI)) {
val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
val |= BIT(31);
@@ -1281,25 +1293,114 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
clk_bulk_disable_unprepare(res->num_clks, res->clks);
+
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
-static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
- /* Set pipe clock as clock source for pcie_pipe_clk_src */
- if (pcie->cfg->pipe_clk_need_muxing)
- clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk);
+ res->clks[0].id = "iface";
+ res->clks[1].id = "axi_m";
+ res->clks[2].id = "axi_s";
+ res->clks[3].id = "axi_bridge";
+ res->clks[4].id = "rchng";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
- return clk_prepare_enable(res->pipe_clk);
+ return 0;
}
-static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct device *dev = pcie->pci->dev;
+ int ret;
+
+ ret = reset_control_assert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Delay periods before and after reset deassert are working values
+ * from downstream Codeaurora kernel
+ */
+ usleep_range(2000, 2500);
+
+ ret = reset_control_deassert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(2000, 2500);
- clk_disable_unprepare(res->pipe_clk);
+ return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int i;
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+ writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
+ pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
+ GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
+ pci->dbi_base + GEN3_RELATED_OFF);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
+ SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
+
+ for (i = 0; i < 256; i++)
+ writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i));
+
+ return 0;
}
static int qcom_pcie_link_up(struct dw_pcie *pci)
@@ -1381,7 +1482,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_host_init(struct pcie_port *pp)
+static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
@@ -1433,6 +1534,7 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
static const struct qcom_pcie_ops ops_2_1_0 = {
.get_resources = qcom_pcie_get_resources_2_1_0,
.init = qcom_pcie_init_2_1_0,
+ .post_init = qcom_pcie_post_init_2_1_0,
.deinit = qcom_pcie_deinit_2_1_0,
.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
@@ -1441,6 +1543,7 @@ static const struct qcom_pcie_ops ops_2_1_0 = {
static const struct qcom_pcie_ops ops_1_0_0 = {
.get_resources = qcom_pcie_get_resources_1_0_0,
.init = qcom_pcie_init_1_0_0,
+ .post_init = qcom_pcie_post_init_1_0_0,
.deinit = qcom_pcie_deinit_1_0_0,
.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
@@ -1451,7 +1554,6 @@ static const struct qcom_pcie_ops ops_2_3_2 = {
.init = qcom_pcie_init_2_3_2,
.post_init = qcom_pcie_post_init_2_3_2,
.deinit = qcom_pcie_deinit_2_3_2,
- .post_deinit = qcom_pcie_post_deinit_2_3_2,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1459,6 +1561,7 @@ static const struct qcom_pcie_ops ops_2_3_2 = {
static const struct qcom_pcie_ops ops_2_4_0 = {
.get_resources = qcom_pcie_get_resources_2_4_0,
.init = qcom_pcie_init_2_4_0,
+ .post_init = qcom_pcie_post_init_2_4_0,
.deinit = qcom_pcie_deinit_2_4_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1467,6 +1570,7 @@ static const struct qcom_pcie_ops ops_2_4_0 = {
static const struct qcom_pcie_ops ops_2_3_3 = {
.get_resources = qcom_pcie_get_resources_2_3_3,
.init = qcom_pcie_init_2_3_3,
+ .post_init = qcom_pcie_post_init_2_3_3,
.deinit = qcom_pcie_deinit_2_3_3,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1477,8 +1581,6 @@ static const struct qcom_pcie_ops ops_2_7_0 = {
.init = qcom_pcie_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
- .post_init = qcom_pcie_post_init_2_7_0,
- .post_deinit = qcom_pcie_post_deinit_2_7_0,
};
/* Qcom IP rev.: 1.9.0 */
@@ -1487,11 +1589,18 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.init = qcom_pcie_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
- .post_init = qcom_pcie_post_init_2_7_0,
- .post_deinit = qcom_pcie_post_deinit_2_7_0,
.config_sid = qcom_pcie_config_sid_sm8250,
};
+/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
+static const struct qcom_pcie_ops ops_2_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_9_0,
+ .init = qcom_pcie_init_2_9_0,
+ .post_init = qcom_pcie_post_init_2_9_0,
+ .deinit = qcom_pcie_deinit_2_9_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
static const struct qcom_pcie_cfg apq8084_cfg = {
.ops = &ops_1_0_0,
};
@@ -1533,7 +1642,6 @@ static const struct qcom_pcie_cfg sm8250_cfg = {
static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
.ops = &ops_1_9_0,
.has_ddrss_sf_tbu_clk = true,
- .pipe_clk_need_muxing = true,
.has_aggre0_clk = true,
.has_aggre1_clk = true,
};
@@ -1541,14 +1649,12 @@ static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
.ops = &ops_1_9_0,
.has_ddrss_sf_tbu_clk = true,
- .pipe_clk_need_muxing = true,
.has_aggre1_clk = true,
};
static const struct qcom_pcie_cfg sc7280_cfg = {
.ops = &ops_1_9_0,
.has_tbu_clk = true,
- .pipe_clk_need_muxing = true,
};
static const struct qcom_pcie_cfg sc8180x_cfg = {
@@ -1556,6 +1662,10 @@ static const struct qcom_pcie_cfg sc8180x_cfg = {
.has_tbu_clk = true,
};
+static const struct qcom_pcie_cfg ipq6018_cfg = {
+ .ops = &ops_2_9_0,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1564,7 +1674,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int qcom_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
struct qcom_pcie *pcie;
const struct qcom_pcie_cfg *pcie_cfg;
@@ -1666,6 +1776,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
{ .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
+ { .compatible = "qcom,pcie-ipq6018", .data = &ipq6018_cfg },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 1569e82b5568..99d47ae80331 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -85,7 +85,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
struct spear13xx_pcie *spear13xx_pcie = arg;
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
unsigned int status;
status = readl(&app_reg->int_sts);
@@ -121,7 +121,7 @@ static int spear13xx_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int spear13xx_pcie_host_init(struct pcie_port *pp)
+static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
@@ -155,7 +155,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -172,7 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
}
pp->ops = &spear13xx_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -258,7 +258,7 @@ static struct platform_driver spear13xx_pcie_driver = {
.probe = spear13xx_pcie_probe,
.driver = {
.name = "spear-pcie",
- .of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+ .of_match_table = spear13xx_pcie_of_match,
.suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
index c2de6ed4d86f..55f61914a986 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -39,7 +39,8 @@ static int tegra194_acpi_init(struct pci_config_window *cfg)
static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
u32 val, u32 reg)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+ u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) +
+ PCIE_ATU_VIEWPORT_BASE;
writel(val, pcie_ecam->iatu_base + offset + reg);
}
@@ -58,8 +59,8 @@ static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
PCIE_ATU_LIMIT);
atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
PCIE_ATU_UPPER_TARGET);
- atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
- atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2);
}
static void __iomem *tegra194_map_bus(struct pci_bus *bus,
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index cc2678490162..1b6b437823d2 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * PCIe host controller driver for Tegra194 SoC
+ * PCIe host controller driver for the following SoCs
+ * Tegra194
+ * Tegra234
*
- * Copyright (C) 2019 NVIDIA Corporation.
+ * Copyright (C) 2019-2022 NVIDIA Corporation.
*
* Author: Vidya Sagar <vidyas@nvidia.com>
*/
@@ -35,6 +37,9 @@
#include <soc/tegra/bpmp-abi.h>
#include "../../pci.h"
+#define TEGRA194_DWC_IP_VER 0x490A
+#define TEGRA234_DWC_IP_VER 0x562A
+
#define APPL_PINMUX 0x0
#define APPL_PINMUX_PEX_RST BIT(0)
#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
@@ -49,6 +54,7 @@
#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2
#define APPL_INTR_EN_L0_0 0x8
#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
@@ -170,19 +176,6 @@
#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
-#define EVENT_COUNTER_ALL_CLEAR 0x3
-#define EVENT_COUNTER_ENABLE_ALL 0x7
-#define EVENT_COUNTER_ENABLE_SHIFT 2
-#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
-#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
-#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
-#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
-#define EVENT_COUNTER_EVENT_L1 0x5
-#define EVENT_COUNTER_EVENT_L1_1 0x7
-#define EVENT_COUNTER_EVENT_L1_2 0x8
-#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
-#define EVENT_COUNTER_GROUP_5 0x5
-
#define N_FTS_VAL 52
#define FTS_VAL 52
@@ -191,12 +184,6 @@
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
-#define GEN3_RELATED_OFF 0x890
-#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
-#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
-#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
-#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
-
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
@@ -243,7 +230,19 @@ static const unsigned int pcie_gen_freq[] = {
GEN4_CORE_CLK_FREQ
};
-struct tegra194_pcie {
+struct tegra_pcie_dw_of_data {
+ u32 version;
+ enum dw_pcie_device_mode mode;
+ bool has_msix_doorbell_access_fix;
+ bool has_sbr_reset_fix;
+ bool has_l1ss_exit_fix;
+ bool has_ltr_req_fix;
+ u32 cdm_chk_int_en_bit;
+ u32 gen4_preset_vec;
+ u8 n_fts[2];
+};
+
+struct tegra_pcie_dw {
struct device *dev;
struct resource *appl_res;
struct resource *dbi_res;
@@ -255,17 +254,20 @@ struct tegra194_pcie {
struct dw_pcie pci;
struct tegra_bpmp *bpmp;
- enum dw_pcie_device_mode mode;
+ struct tegra_pcie_dw_of_data *of_data;
bool supports_clkreq;
bool enable_cdm_check;
+ bool enable_srns;
bool link_state;
bool update_fc_fixup;
+ bool enable_ext_refclk;
u8 init_link_width;
u32 msi_ctrl_int;
u32 num_lanes;
u32 cid;
u32 cfg_link_cap_l1sub;
+ u32 ras_des_cap;
u32 pcie_cap_base;
u32 aspm_cmrt;
u32 aspm_pwr_on_t;
@@ -287,22 +289,18 @@ struct tegra194_pcie {
int ep_state;
};
-struct tegra194_pcie_of_data {
- enum dw_pcie_device_mode mode;
-};
-
-static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci)
+static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
{
- return container_of(pci, struct tegra194_pcie, pci);
+ return container_of(pci, struct tegra_pcie_dw, pci);
}
-static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value,
+static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
const u32 reg)
{
writel_relaxed(value, pcie->appl_base + reg);
}
-static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg)
+static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
{
return readl_relaxed(pcie->appl_base + reg);
}
@@ -311,10 +309,10 @@ struct tegra_pcie_soc {
enum dw_pcie_device_mode mode;
};
-static void apply_bad_link_workaround(struct pcie_port *pp)
+static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 current_link_width;
u16 val;
@@ -347,18 +345,18 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u32 val, tmp;
+ struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, status_l0, status_l1;
u16 val_w;
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
- if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
-
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ if (!pcie->of_data->has_sbr_reset_fix &&
+ status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
/* SBR & Surprise Link Down WAR */
val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
@@ -374,15 +372,21 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
}
}
- if (val & APPL_INTR_STATUS_L0_INT_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
- if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+ if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
APPL_INTR_STATUS_L1_8_0);
apply_bad_link_workaround(pp);
}
- if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_w |= PCI_EXP_LNKSTA_LBMS;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA, val_w);
+
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
APPL_INTR_STATUS_L1_8_0);
@@ -394,31 +398,30 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
}
}
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+ if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
dev_info(pci->dev, "CDM check complete\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
dev_err(pci->dev, "CDM comparison mismatch\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
dev_err(pci->dev, "CDM Logic error\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
}
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
- dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp);
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+ dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
}
return IRQ_HANDLED;
}
-static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -446,7 +449,7 @@ static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)
static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
u32 val, speed;
@@ -454,6 +457,9 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
PCI_EXP_LNKSTA_CLS;
clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+ if (pcie->of_data->has_ltr_req_fix)
+ return IRQ_HANDLED;
+
/* If EP doesn't advertise L1SS, just return */
val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
@@ -492,7 +498,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
u32 status_l0, status_l1, link_status;
@@ -535,16 +541,21 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
*val = 0x00000000;
return PCIBIOS_SUCCESSFUL;
}
@@ -552,16 +563,21 @@ static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
return pci_generic_config_read(bus, devfn, where, size, val);
}
-static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 val)
{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
return PCIBIOS_SUCCESSFUL;
return pci_generic_config_write(bus, devfn, where, size, val);
@@ -569,30 +585,12 @@ static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
static struct pci_ops tegra_pci_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
- .read = tegra194_pcie_rd_own_conf,
- .write = tegra194_pcie_wr_own_conf,
+ .read = tegra_pcie_dw_rd_own_conf,
+ .write = tegra_pcie_dw_wr_own_conf,
};
#if defined(CONFIG_PCIEASPM)
-static const u32 event_cntr_ctrl_offset[] = {
- 0x1d8,
- 0x1a8,
- 0x1a8,
- 0x1a8,
- 0x1c4,
- 0x1d8
-};
-
-static const u32 event_cntr_data_offset[] = {
- 0x1dc,
- 0x1ac,
- 0x1ac,
- 0x1ac,
- 0x1c8,
- 0x1dc
-};
-
-static void disable_aspm_l11(struct tegra194_pcie *pcie)
+static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -601,7 +599,7 @@ static void disable_aspm_l11(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static void disable_aspm_l12(struct tegra194_pcie *pcie)
+static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -610,24 +608,27 @@ static void disable_aspm_l12(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event)
+static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
{
u32 val;
- val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
- dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
- val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_DATA);
return val;
}
static int aspm_state_cnt(struct seq_file *s, void *data)
{
- struct tegra194_pcie *pcie = (struct tegra194_pcie *)
+ struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
dev_get_drvdata(s->private);
u32 val;
@@ -647,18 +648,20 @@ static int aspm_state_cnt(struct seq_file *s, void *data)
event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
/* Clear all counters */
- dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
EVENT_COUNTER_ALL_CLEAR);
/* Re-enable counting */
val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
- dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
return 0;
}
-static void init_host_aspm(struct tegra194_pcie *pcie)
+static void init_host_aspm(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val;
@@ -666,10 +669,14 @@ static void init_host_aspm(struct tegra194_pcie *pcie)
val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
+ pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
+ PCI_EXT_CAP_ID_VNDR);
+
/* Enable ASPM counters */
val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
- dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
+ dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
/* Program T_cmrt and T_pwr_on values */
val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
@@ -686,22 +693,22 @@ static void init_host_aspm(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
-static void init_debugfs(struct tegra194_pcie *pcie)
+static void init_debugfs(struct tegra_pcie_dw *pcie)
{
debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
-static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; }
-static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; }
-static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; }
-static inline void init_debugfs(struct tegra194_pcie *pcie) { return; }
+static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
+static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
#endif
-static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
u16 val_w;
@@ -709,13 +716,15 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
- val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
- val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
- appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ }
if (pcie->enable_cdm_check) {
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
- val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
+ val |= pcie->of_data->cdm_chk_int_en_bit;
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
val = appl_readl(pcie, APPL_INTR_EN_L1_18);
@@ -736,10 +745,10 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
val_w);
}
-static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable legacy interrupt generation */
@@ -757,10 +766,10 @@ static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
}
-static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable MSI interrupt generation */
@@ -770,10 +779,10 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
}
-static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
/* Clear interrupt statuses before enabling interrupts */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
@@ -798,7 +807,7 @@ static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
tegra_pcie_enable_msi_interrupts(pp);
}
-static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
+static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val, offset, i;
@@ -842,7 +851,8 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+ val |= (pcie->of_data->gen4_preset_vec <<
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
@@ -851,11 +861,12 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static int tegra194_pcie_host_init(struct pcie_port *pp)
+static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
+ u16 val_16;
pp->bridge->ops = &tegra_pci_ops;
@@ -863,6 +874,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+ val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+
val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
@@ -887,6 +903,15 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
config_gen3_gen4_eq_presets(pcie);
init_host_aspm(pcie);
@@ -897,9 +922,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
disable_aspm_l12(pcie);
}
- val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
- val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
- dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
if (pcie->update_fc_fixup) {
val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
@@ -912,14 +939,14 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
return 0;
}
-static int tegra194_pcie_start_link(struct dw_pcie *pci)
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
{
u32 val, offset, speed, tmp;
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
- struct pcie_port *pp = &pci->pp;
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct dw_pcie_rp *pp = &pci->pp;
bool retry = true;
- if (pcie->mode == DW_PCIE_EP_TYPE) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
enable_irq(pcie->pex_rst_irq);
return 0;
}
@@ -978,9 +1005,9 @@ retry_link:
offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
val &= ~PCI_DLF_EXCHANGE_ENABLE;
- dw_pcie_writel_dbi(pci, offset, val);
+ dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
- tegra194_pcie_host_init(pp);
+ tegra_pcie_dw_host_init(pp);
dw_pcie_setup_rc(pp);
retry = false;
@@ -996,32 +1023,32 @@ retry_link:
return 0;
}
-static int tegra194_pcie_link_up(struct dw_pcie *pci)
+static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
{
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static void tegra194_pcie_stop_link(struct dw_pcie *pci)
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
{
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
disable_irq(pcie->pex_rst_irq);
}
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
- .link_up = tegra194_pcie_link_up,
- .start_link = tegra194_pcie_start_link,
- .stop_link = tegra194_pcie_stop_link,
+ .link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
};
-static const struct dw_pcie_host_ops tegra194_pcie_host_ops = {
- .host_init = tegra194_pcie_host_init,
+static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+ .host_init = tegra_pcie_dw_host_init,
};
-static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)
+static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
{
unsigned int phy_count = pcie->phy_count;
@@ -1031,7 +1058,7 @@ static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)
}
}
-static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie)
+static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
{
unsigned int i;
int ret;
@@ -1058,7 +1085,7 @@ phy_exit:
return ret;
}
-static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
+static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device_node *np = pcie->dev->of_node;
@@ -1111,13 +1138,27 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
pcie->update_fc_fixup = true;
+ /* RP using an external REFCLK is supported only in Tegra234 */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ pcie->enable_ext_refclk = true;
+ } else {
+ pcie->enable_ext_refclk =
+ of_property_read_bool(pcie->dev->of_node,
+ "nvidia,enable-ext-refclk");
+ }
+
pcie->supports_clkreq =
of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
pcie->enable_cdm_check =
of_property_read_bool(np, "snps,enable-cdm-check");
- if (pcie->mode == DW_PCIE_RC_TYPE)
+ if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
+ pcie->enable_srns =
+ of_property_read_bool(np, "nvidia,enable-srns");
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
return 0;
/* Endpoint mode specific DT entries */
@@ -1154,15 +1195,18 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
return 0;
}
-static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
bool enable)
{
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
- /* Controller-5 doesn't need to have its state set by BPMP-FW */
- if (pcie->cid == 5)
+ /*
+ * Controller-5 doesn't need to have its state set by BPMP-FW in
+ * Tegra194
+ */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
return 0;
memset(&req, 0, sizeof(req));
@@ -1182,7 +1226,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
bool enable)
{
struct mrq_uphy_response resp;
@@ -1210,9 +1254,9 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
+static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
- struct pcie_port *pp = &pcie->pci.pp;
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
struct pci_bus *child, *root_bus = NULL;
struct pci_dev *pdev;
@@ -1248,7 +1292,7 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
}
}
-static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)
+static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
{
pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
if (IS_ERR(pcie->slot_ctl_3v3)) {
@@ -1269,7 +1313,7 @@ static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)
return 0;
}
-static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie)
+static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
{
int ret;
@@ -1307,7 +1351,7 @@ fail_12v_enable:
return ret;
}
-static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)
+static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
{
if (pcie->slot_ctl_12v)
regulator_disable(pcie->slot_ctl_12v);
@@ -1315,7 +1359,7 @@ static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)
regulator_disable(pcie->slot_ctl_3v3);
}
-static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
+static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
bool en_hw_hot_rst)
{
int ret;
@@ -1328,6 +1372,14 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
return ret;
}
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
+ goto fail_pll_init;
+ }
+ }
+
ret = tegra_pcie_enable_slot_regulators(pcie);
if (ret < 0)
goto fail_slot_reg_en;
@@ -1351,11 +1403,13 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
goto fail_core_apb_rst;
}
- if (en_hw_hot_rst) {
+ if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
/* Enable HW_HOT_RST mode */
val = appl_readl(pcie, APPL_CTRL);
val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
val |= APPL_CTRL_HW_HOT_RST_EN;
appl_writel(pcie, val, APPL_CTRL);
}
@@ -1382,6 +1436,19 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
appl_writel(pcie, val, APPL_CFG_MISC);
+ if (pcie->enable_srns || pcie->enable_ext_refclk) {
+ /*
+ * When Tegra PCIe RP is using external clock, it cannot supply
+ * same clock to its downstream hierarchy. Hence, gate PCIe RP
+ * REFCLK out pads when RP & EP are using separate clocks or RP
+ * is using an external REFCLK.
+ */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+ }
+
if (!pcie->supports_clkreq) {
val = appl_readl(pcie, APPL_PINMUX);
val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
@@ -1407,12 +1474,15 @@ fail_core_clk:
fail_reg_en:
tegra_pcie_disable_slot_regulators(pcie);
fail_slot_reg_en:
+ if (pcie->enable_ext_refclk)
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
tegra_pcie_bpmp_set_ctrl_state(pcie, false);
return ret;
}
-static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
+static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
{
int ret;
@@ -1434,23 +1504,29 @@ static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
tegra_pcie_disable_slot_regulators(pcie);
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
+ }
+
ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
if (ret)
dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
pcie->cid, ret);
}
-static int tegra_pcie_init_controller(struct tegra194_pcie *pcie)
+static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
int ret;
ret = tegra_pcie_config_controller(pcie, false);
if (ret < 0)
return ret;
- pp->ops = &tegra194_pcie_host_ops;
+ pp->ops = &tegra_pcie_dw_host_ops;
ret = dw_pcie_host_init(pp);
if (ret < 0) {
@@ -1465,11 +1541,11 @@ fail_host_init:
return ret;
}
-static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)
+static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
{
u32 val;
- if (!tegra194_pcie_link_up(&pcie->pci))
+ if (!tegra_pcie_dw_link_up(&pcie->pci))
return 0;
val = appl_readl(pcie, APPL_RADM_STATUS);
@@ -1481,12 +1557,12 @@ static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)
1, PME_ACK_TIMEOUT);
}
-static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)
+static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
{
u32 data;
int err;
- if (!tegra194_pcie_link_up(&pcie->pci)) {
+ if (!tegra_pcie_dw_link_up(&pcie->pci)) {
dev_dbg(pcie->dev, "PCIe link is not up...!\n");
return;
}
@@ -1543,15 +1619,15 @@ static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)
appl_writel(pcie, data, APPL_PINMUX);
}
-static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie)
+static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
{
tegra_pcie_downstream_dev_to_D0(pcie);
dw_pcie_host_deinit(&pcie->pci.pp);
- tegra194_pcie_pme_turnoff(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
}
-static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)
+static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
struct device *dev = pcie->dev;
char *name;
@@ -1578,7 +1654,7 @@ static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)
goto fail_pm_get_sync;
}
- pcie->link_state = tegra194_pcie_link_up(&pcie->pci);
+ pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
if (!pcie->link_state) {
ret = -ENOMEDIUM;
goto fail_host_init;
@@ -1603,7 +1679,7 @@ fail_pm_get_sync:
return ret;
}
-static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
{
u32 val;
int ret;
@@ -1634,6 +1710,13 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
pm_runtime_put_sync(pcie->dev);
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
+ ret);
+ }
+
ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
if (ret)
dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
@@ -1642,13 +1725,14 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
}
-static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
struct dw_pcie_ep *ep = &pci->ep;
struct device *dev = pcie->dev;
u32 val;
int ret;
+ u16 val_16;
if (pcie->ep_state == EP_STATE_ENABLED)
return;
@@ -1660,10 +1744,20 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
return;
}
- ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
if (ret) {
- dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
- goto fail_pll_init;
+ dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
+ pcie->cid, ret);
+ goto fail_set_ctrl_state;
+ }
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
+ ret);
+ goto fail_pll_init;
+ }
}
ret = clk_prepare_enable(pcie->core_clk);
@@ -1760,12 +1854,29 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
disable_aspm_l12(pcie);
}
- val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
- val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
- dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
+
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+ val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
@@ -1782,6 +1893,13 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
dw_pcie_ep_init_notify(ep);
+ /* Program the private control to allow sending LTR upstream */
+ if (pcie->of_data->has_ltr_req_fix) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+ }
+
/* Enable LTSSM */
val = appl_readl(pcie, APPL_CTRL);
val |= APPL_CTRL_LTSSM_EN;
@@ -1802,12 +1920,14 @@ fail_core_apb_rst:
fail_core_clk_enable:
tegra_pcie_bpmp_set_pll_state(pcie, false);
fail_pll_init:
+ tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+fail_set_ctrl_state:
pm_runtime_put_sync(dev);
}
static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
if (gpiod_get_value(pcie->pex_rst_gpiod))
pex_ep_event_pex_rst_assert(pcie);
@@ -1817,7 +1937,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
if (irq > 1)
@@ -1829,7 +1949,7 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
if (unlikely(irq > 31))
return -EINVAL;
@@ -1839,7 +1959,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
struct dw_pcie_ep *ep = &pcie->pci.ep;
@@ -1853,7 +1973,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
@@ -1894,7 +2014,7 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
.get_features = tegra_pcie_ep_get_features,
};
-static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
@@ -1949,19 +2069,20 @@ static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,
if (ret) {
dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
ret);
+ pm_runtime_disable(dev);
return ret;
}
return 0;
}
-static int tegra194_pcie_probe(struct platform_device *pdev)
+static int tegra_pcie_dw_probe(struct platform_device *pdev)
{
- const struct tegra194_pcie_of_data *data;
+ const struct tegra_pcie_dw_of_data *data;
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
- struct tegra194_pcie *pcie;
- struct pcie_port *pp;
+ struct tegra_pcie_dw *pcie;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
struct phy **phys;
char *name;
@@ -1977,16 +2098,14 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
pci = &pcie->pci;
pci->dev = &pdev->dev;
pci->ops = &tegra_dw_pcie_ops;
- pci->n_fts[0] = N_FTS_VAL;
- pci->n_fts[1] = FTS_VAL;
- pci->version = 0x490A;
-
+ pcie->dev = &pdev->dev;
+ pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
+ pci->n_fts[0] = pcie->of_data->n_fts[0];
+ pci->n_fts[1] = pcie->of_data->n_fts[1];
pp = &pci->pp;
pp->num_vectors = MAX_MSI_IRQS;
- pcie->dev = &pdev->dev;
- pcie->mode = (enum dw_pcie_device_mode)data->mode;
- ret = tegra194_pcie_parse_dt(pcie);
+ ret = tegra_pcie_dw_parse_dt(pcie);
if (ret < 0) {
const char *level = KERN_ERR;
@@ -2101,7 +2220,7 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
- switch (pcie->mode) {
+ switch (pcie->of_data->mode) {
case DW_PCIE_RC_TYPE:
ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
IRQF_SHARED, "tegra-pcie-intr", pcie);
@@ -2136,7 +2255,8 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
break;
default:
- dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
+ dev_err(dev, "Invalid PCIe device type %d\n",
+ pcie->of_data->mode);
}
fail:
@@ -2144,16 +2264,22 @@ fail:
return ret;
}
-static int tegra194_pcie_remove(struct platform_device *pdev)
+static int tegra_pcie_dw_remove(struct platform_device *pdev)
{
- struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
- if (!pcie->link_state)
- return 0;
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return 0;
+
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_deinit_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
- debugfs_remove_recursive(pcie->debugfs);
- tegra_pcie_deinit_controller(pcie);
- pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
tegra_bpmp_put(pcie->bpmp);
if (pcie->pex_refclk_sel_gpiod)
@@ -2162,41 +2288,48 @@ static int tegra194_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int tegra194_pcie_suspend_late(struct device *dev)
+static int tegra_pcie_dw_suspend_late(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
+ return -EPERM;
+ }
+
if (!pcie->link_state)
return 0;
/* Enable HW_HOT_RST mode */
- val = appl_readl(pcie, APPL_CTRL);
- val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
- APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
- val |= APPL_CTRL_HW_HOT_RST_EN;
- appl_writel(pcie, val, APPL_CTRL);
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
return 0;
}
-static int tegra194_pcie_suspend_noirq(struct device *dev)
+static int tegra_pcie_dw_suspend_noirq(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
if (!pcie->link_state)
return 0;
tegra_pcie_downstream_dev_to_D0(pcie);
- tegra194_pcie_pme_turnoff(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
return 0;
}
-static int tegra194_pcie_resume_noirq(struct device *dev)
+static int tegra_pcie_dw_resume_noirq(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
int ret;
if (!pcie->link_state)
@@ -2206,7 +2339,7 @@ static int tegra194_pcie_resume_noirq(struct device *dev)
if (ret < 0)
return ret;
- ret = tegra194_pcie_host_init(&pcie->pci.pp);
+ ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
if (ret < 0) {
dev_err(dev, "Failed to init host: %d\n", ret);
goto fail_host_init;
@@ -2214,7 +2347,7 @@ static int tegra194_pcie_resume_noirq(struct device *dev)
dw_pcie_setup_rc(&pcie->pci.pp);
- ret = tegra194_pcie_start_link(&pcie->pci);
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
if (ret < 0)
goto fail_host_init;
@@ -2225,12 +2358,12 @@ fail_host_init:
return ret;
}
-static int tegra194_pcie_resume_early(struct device *dev)
+static int tegra_pcie_dw_resume_early(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
- if (pcie->mode == DW_PCIE_EP_TYPE) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
dev_err(dev, "Suspend is not supported in EP mode");
return -ENOTSUPP;
}
@@ -2239,75 +2372,124 @@ static int tegra194_pcie_resume_early(struct device *dev)
return 0;
/* Disable HW_HOT_RST mode */
- val = appl_readl(pcie, APPL_CTRL);
- val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
- APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
- val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
- APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
- val &= ~APPL_CTRL_HW_HOT_RST_EN;
- appl_writel(pcie, val, APPL_CTRL);
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
+ val &= ~APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
return 0;
}
-static void tegra194_pcie_shutdown(struct platform_device *pdev)
+static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
{
- struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
- if (!pcie->link_state)
- return;
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return;
- debugfs_remove_recursive(pcie->debugfs);
- tegra_pcie_downstream_dev_to_D0(pcie);
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_downstream_dev_to_D0(pcie);
- disable_irq(pcie->pci.pp.irq);
- if (IS_ENABLED(CONFIG_PCI_MSI))
- disable_irq(pcie->pci.pp.msi_irq);
+ disable_irq(pcie->pci.pp.irq);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ disable_irq(pcie->pci.pp.msi_irq[0]);
- tegra194_pcie_pme_turnoff(pcie);
- tegra_pcie_unconfig_controller(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
}
-static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = {
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_RC_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_EP_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
.mode = DW_PCIE_RC_TYPE,
+ .has_msix_doorbell_access_fix = true,
+ .has_sbr_reset_fix = true,
+ .has_l1ss_exit_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
};
-static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = {
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
.mode = DW_PCIE_EP_TYPE,
+ .has_l1ss_exit_fix = true,
+ .has_ltr_req_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
};
-static const struct of_device_id tegra194_pcie_of_match[] = {
+static const struct of_device_id tegra_pcie_dw_of_match[] = {
{
.compatible = "nvidia,tegra194-pcie",
- .data = &tegra194_pcie_rc_of_data,
+ .data = &tegra194_pcie_dw_rc_of_data,
},
{
.compatible = "nvidia,tegra194-pcie-ep",
- .data = &tegra194_pcie_ep_of_data,
+ .data = &tegra194_pcie_dw_ep_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie",
+ .data = &tegra234_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie-ep",
+ .data = &tegra234_pcie_dw_ep_of_data,
},
- {},
+ {}
};
-static const struct dev_pm_ops tegra194_pcie_pm_ops = {
- .suspend_late = tegra194_pcie_suspend_late,
- .suspend_noirq = tegra194_pcie_suspend_noirq,
- .resume_noirq = tegra194_pcie_resume_noirq,
- .resume_early = tegra194_pcie_resume_early,
+static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
+ .suspend_late = tegra_pcie_dw_suspend_late,
+ .suspend_noirq = tegra_pcie_dw_suspend_noirq,
+ .resume_noirq = tegra_pcie_dw_resume_noirq,
+ .resume_early = tegra_pcie_dw_resume_early,
};
-static struct platform_driver tegra194_pcie_driver = {
- .probe = tegra194_pcie_probe,
- .remove = tegra194_pcie_remove,
- .shutdown = tegra194_pcie_shutdown,
+static struct platform_driver tegra_pcie_dw_driver = {
+ .probe = tegra_pcie_dw_probe,
+ .remove = tegra_pcie_dw_remove,
+ .shutdown = tegra_pcie_dw_shutdown,
.driver = {
.name = "tegra194-pcie",
- .pm = &tegra194_pcie_pm_ops,
- .of_match_table = tegra194_pcie_of_match,
+ .pm = &tegra_pcie_dw_pm_ops,
+ .of_match_table = tegra_pcie_dw_of_match,
},
};
-module_platform_driver(tegra194_pcie_driver);
+module_platform_driver(tegra_pcie_dw_driver);
-MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match);
+MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index b45ac3754242..48c3eba817b4 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -171,7 +171,7 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
@@ -188,7 +188,7 @@ static void uniphier_pcie_irq_mask(struct irq_data *d)
static void uniphier_pcie_irq_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
@@ -225,7 +225,7 @@ static const struct irq_domain_ops uniphier_intx_domain_ops = {
static void uniphier_pcie_irq_handler(struct irq_desc *desc)
{
- struct pcie_port *pp = irq_desc_get_handler_data(desc);
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -258,7 +258,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
+static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
@@ -295,7 +295,7 @@ out_put_node:
return ret;
}
-static int uniphier_pcie_host_init(struct pcie_port *pp)
+static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index 50f80f07e4db..71026fefa366 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -178,7 +178,7 @@ static void visconti_pcie_stop_link(struct dw_pcie *pci)
*/
static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
return cpu_addr & ~pp->io_base;
}
@@ -190,7 +190,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.stop_link = visconti_pcie_stop_link,
};
-static int visconti_pcie_host_init(struct pcie_port *pp)
+static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
@@ -278,7 +278,7 @@ static int visconti_add_pcie_port(struct visconti_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
pp->irq = platform_get_irq_byname(pdev, "intr");
if (pp->irq < 0)
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index ffec82c8a523..966c8b48bd96 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -8,6 +8,7 @@
* Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_DEV_REV_REG 0x8
#define PCIE_CORE_PCIEXP_CAP 0xc0
+#define PCIE_CORE_PCIERR_CAP 0x100
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
@@ -857,14 +859,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
switch (reg) {
- case PCI_EXP_SLTCTL:
- *value = PCI_EXP_SLTSTA_PDS << 16;
- return PCI_BRIDGE_EMUL_HANDLED;
-
/*
- * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
- * to be handled here, because their values are stored in emulated
- * config space buffer, and we read them from there when needed.
+ * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
+ * also supported, but do not need to be handled here, because their
+ * values are stored in emulated config space buffer, and we read them
+ * from there when needed.
*/
case PCI_EXP_LNKCAP: {
@@ -944,11 +943,89 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
}
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case 0:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+
+ /*
+ * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada
+ * 3700 Functional Specification does not document registers
+ * at those addresses.
+ *
+ * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error
+ * Reporting Capability header the last Extended Capability.
+ * If we obtain documentation for those registers in the
+ * future, this can be changed.
+ */
+ *value &= 0x000fffff;
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_STATUS:
+ case PCI_ERR_ROOT_ERR_SRC:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+}
+
+static void
+advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ /* These are W1C registers, so clear other bits */
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_ROOT_STATUS:
+ new &= mask;
+ fallthrough;
+
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_ERR_SRC:
+ advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg);
+ break;
+
+ default:
+ break;
+ }
+}
+
static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
.read_base = advk_pci_bridge_emul_base_conf_read,
.write_base = advk_pci_bridge_emul_base_conf_write,
.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
+ .read_ext = advk_pci_bridge_emul_ext_conf_read,
+ .write_ext = advk_pci_bridge_emul_ext_conf_write,
};
/*
@@ -977,8 +1054,25 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCI_INTERRUPT_INTA;
- /* Aardvark HW provides PCIe Capability structure in version 2 */
- bridge->pcie_conf.cap = cpu_to_le16(2);
+ /*
+ * Aardvark HW provides PCIe Capability structure in version 2 and
+ * indicate slot support, which is emulated.
+ */
+ bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT);
+
+ /*
+ * Set Presence Detect State bit permanently since there is no support
+ * for unplugging the card nor detecting whether it is plugged. (If a
+ * platform exists in the future that supports it, via a GPIO for
+ * example, it should be implemented via this bit.)
+ *
+ * Set physical slot number to 1 since there is only one port and zero
+ * value is reserved for ports within the same silicon as Root Port
+ * which is not our case.
+ */
+ bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN,
+ 1));
+ bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index db814f7b93ba..e7c6f6629e7c 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -642,7 +642,7 @@ static void hv_arch_irq_unmask(struct irq_data *data)
struct hv_retarget_device_interrupt *params;
struct tran_int_desc *int_desc;
struct hv_pcibus_device *hbus;
- struct cpumask *dest;
+ const struct cpumask *dest;
cpumask_var_t tmp;
struct pci_bus *pbus;
struct pci_dev *pdev;
@@ -1613,7 +1613,7 @@ out:
}
static u32 hv_compose_msi_req_v1(
- struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
+ struct pci_create_interrupt *int_pkt, const struct cpumask *affinity,
u32 slot, u8 vector, u8 vector_count)
{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
@@ -1635,13 +1635,13 @@ static u32 hv_compose_msi_req_v1(
* Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
* by subsequent retarget in hv_irq_unmask().
*/
-static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
+static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
{
return cpumask_first_and(affinity, cpu_online_mask);
}
static u32 hv_compose_msi_req_v2(
- struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
+ struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity,
u32 slot, u8 vector, u8 vector_count)
{
int cpu;
@@ -1660,7 +1660,7 @@ static u32 hv_compose_msi_req_v2(
}
static u32 hv_compose_msi_req_v3(
- struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity,
+ struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity,
u32 slot, u32 vector, u8 vector_count)
{
int cpu;
@@ -1697,7 +1697,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct hv_pci_dev *hpdev;
struct pci_bus *pbus;
struct pci_dev *pdev;
- struct cpumask *dest;
+ const struct cpumask *dest;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct msi_desc *msi_desc;
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 50a8e1d6f70a..05c50408f13b 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -9,6 +9,8 @@
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include "../pci.h"
@@ -18,18 +20,31 @@
#define DEV_PCIE_PORT_2 0x7a29
#define DEV_LS2K_APB 0x7a02
-#define DEV_LS7A_CONF 0x7a10
+#define DEV_LS7A_GMAC 0x7a03
+#define DEV_LS7A_DC1 0x7a06
#define DEV_LS7A_LPC 0x7a0c
+#define DEV_LS7A_AHCI 0x7a08
+#define DEV_LS7A_CONF 0x7a10
+#define DEV_LS7A_GNET 0x7a13
+#define DEV_LS7A_EHCI 0x7a14
+#define DEV_LS7A_DC2 0x7a36
+#define DEV_LS7A_HDMI 0x7a37
#define FLAG_CFG0 BIT(0)
#define FLAG_CFG1 BIT(1)
#define FLAG_DEV_FIX BIT(2)
+#define FLAG_DEV_HIDDEN BIT(3)
+
+struct loongson_pci_data {
+ u32 flags;
+ struct pci_ops *ops;
+};
struct loongson_pci {
void __iomem *cfg0_base;
void __iomem *cfg1_base;
struct platform_device *pdev;
- u32 flags;
+ const struct loongson_pci_data *data;
};
/* Fixup wrong class code in PCIe bridges */
@@ -92,55 +107,106 @@ static void loongson_mrrs_quirk(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
-static void __iomem *cfg1_map(struct loongson_pci *priv, int bus,
- unsigned int devfn, int where)
+static void loongson_pci_pin_quirk(struct pci_dev *pdev)
{
- unsigned long addroff = 0x0;
+ pdev->pin = 1 + (PCI_FUNC(pdev->devfn) & 3);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_DC1, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_DC2, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_GMAC, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_AHCI, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_EHCI, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_GNET, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+
+static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg;
- if (bus != 0)
- addroff |= BIT(28); /* Type 1 Access */
- addroff |= (where & 0xff) | ((where & 0xf00) << 16);
- addroff |= (bus << 16) | (devfn << 8);
- return priv->cfg1_base + addroff;
+ if (acpi_disabled)
+ return (struct loongson_pci *)(bus->sysdata);
+
+ cfg = bus->sysdata;
+ return (struct loongson_pci *)(cfg->priv);
}
-static void __iomem *cfg0_map(struct loongson_pci *priv, int bus,
- unsigned int devfn, int where)
+static void __iomem *cfg0_map(struct loongson_pci *priv, struct pci_bus *bus,
+ unsigned int devfn, int where)
{
unsigned long addroff = 0x0;
+ unsigned char busnum = bus->number;
- if (bus != 0)
+ if (!pci_is_root_bus(bus)) {
addroff |= BIT(24); /* Type 1 Access */
- addroff |= (bus << 16) | (devfn << 8) | where;
+ addroff |= (busnum << 16);
+ }
+ addroff |= (devfn << 8) | where;
return priv->cfg0_base + addroff;
}
-static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *cfg1_map(struct loongson_pci *priv, struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ unsigned long addroff = 0x0;
unsigned char busnum = bus->number;
- struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
- struct loongson_pci *priv = pci_host_bridge_priv(bridge);
+
+ if (!pci_is_root_bus(bus)) {
+ addroff |= BIT(28); /* Type 1 Access */
+ addroff |= (busnum << 16);
+ }
+ addroff |= (devfn << 8) | (where & 0xff) | ((where & 0xf00) << 16);
+ return priv->cfg1_base + addroff;
+}
+
+static bool pdev_may_exist(struct pci_bus *bus, unsigned int device,
+ unsigned int function)
+{
+ return !(pci_is_root_bus(bus) &&
+ (device >= 9 && device <= 20) && (function > 0));
+}
+
+static void __iomem *pci_loongson_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ unsigned int device = PCI_SLOT(devfn);
+ unsigned int function = PCI_FUNC(devfn);
+ struct loongson_pci *priv = pci_bus_to_loongson_pci(bus);
/*
* Do not read more than one device on the bus other than
- * the host bus. For our hardware the root bus is always bus 0.
+ * the host bus.
*/
- if (priv->flags & FLAG_DEV_FIX && busnum != 0 &&
- PCI_SLOT(devfn) > 0)
- return NULL;
+ if ((priv->data->flags & FLAG_DEV_FIX) && bus->self) {
+ if (!pci_is_root_bus(bus) && (device > 0))
+ return NULL;
+ }
+
+ /* Don't access non-existent devices */
+ if (priv->data->flags & FLAG_DEV_HIDDEN) {
+ if (!pdev_may_exist(bus, device, function))
+ return NULL;
+ }
/* CFG0 can only access standard space */
if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base)
- return cfg0_map(priv, busnum, devfn, where);
+ return cfg0_map(priv, bus, devfn, where);
/* CFG1 can access extended space */
if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base)
- return cfg1_map(priv, busnum, devfn, where);
+ return cfg1_map(priv, bus, devfn, where);
return NULL;
}
+#ifdef CONFIG_OF
+
static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
@@ -159,20 +225,42 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return val;
}
-/* H/w only accept 32-bit PCI operations */
+/* LS2K/LS7A accept 8/16/32-bit PCI config operations */
static struct pci_ops loongson_pci_ops = {
.map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/* RS780/SR5690 only accept 32-bit PCI config operations */
+static struct pci_ops loongson_pci_ops32 = {
+ .map_bus = pci_loongson_map_bus,
.read = pci_generic_config_read32,
.write = pci_generic_config_write32,
};
+static const struct loongson_pci_data ls2k_pci_data = {
+ .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
+ .ops = &loongson_pci_ops,
+};
+
+static const struct loongson_pci_data ls7a_pci_data = {
+ .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
+ .ops = &loongson_pci_ops,
+};
+
+static const struct loongson_pci_data rs780e_pci_data = {
+ .flags = FLAG_CFG0,
+ .ops = &loongson_pci_ops32,
+};
+
static const struct of_device_id loongson_pci_of_match[] = {
{ .compatible = "loongson,ls2k-pci",
- .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), },
+ .data = &ls2k_pci_data, },
{ .compatible = "loongson,ls7a-pci",
- .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), },
+ .data = &ls7a_pci_data, },
{ .compatible = "loongson,rs780e-pci",
- .data = (void *)(FLAG_CFG0), },
+ .data = &rs780e_pci_data, },
{}
};
@@ -193,20 +281,20 @@ static int loongson_pci_probe(struct platform_device *pdev)
priv = pci_host_bridge_priv(bridge);
priv->pdev = pdev;
- priv->flags = (unsigned long)of_device_get_match_data(dev);
+ priv->data = of_device_get_match_data(dev);
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_err(dev, "missing mem resources for cfg0\n");
- return -EINVAL;
+ if (priv->data->flags & FLAG_CFG0) {
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ dev_err(dev, "missing mem resources for cfg0\n");
+ else {
+ priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(priv->cfg0_base))
+ return PTR_ERR(priv->cfg0_base);
+ }
}
- priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs);
- if (IS_ERR(priv->cfg0_base))
- return PTR_ERR(priv->cfg0_base);
-
- /* CFG1 is optional */
- if (priv->flags & FLAG_CFG1) {
+ if (priv->data->flags & FLAG_CFG1) {
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!regs)
dev_info(dev, "missing mem resource for cfg1\n");
@@ -218,7 +306,7 @@ static int loongson_pci_probe(struct platform_device *pdev)
}
bridge->sysdata = priv;
- bridge->ops = &loongson_pci_ops;
+ bridge->ops = priv->data->ops;
bridge->map_irq = loongson_map_irq;
return pci_host_probe(bridge);
@@ -232,3 +320,41 @@ static struct platform_driver loongson_pci_driver = {
.probe = loongson_pci_probe,
};
builtin_platform_driver(loongson_pci_driver);
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static int loongson_pci_ecam_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct loongson_pci *priv;
+ struct loongson_pci_data *data;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ cfg->priv = priv;
+ data->flags = FLAG_CFG1 | FLAG_DEV_HIDDEN;
+ priv->data = data;
+ priv->cfg1_base = cfg->win - (cfg->busr.start << 16);
+
+ return 0;
+}
+
+const struct pci_ecam_ops loongson_pci_ecam_ops = {
+ .bus_shift = 16,
+ .init = loongson_pci_ecam_init,
+ .pci_ops = {
+ .map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index c1ffdb06c971..af915c951f06 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1216,7 +1216,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
return -ENOENT;
}
-#ifdef CONFIG_PM_SLEEP
static int mvebu_pcie_suspend(struct device *dev)
{
struct mvebu_pcie *pcie;
@@ -1249,7 +1248,6 @@ static int mvebu_pcie_resume(struct device *dev)
return 0;
}
-#endif
static void mvebu_pcie_port_clk_put(void *data)
{
@@ -1737,7 +1735,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
};
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
};
static struct platform_driver mvebu_pcie_driver = {
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c
index 35804ea394fd..839695791757 100644
--- a/drivers/pci/controller/pci-rcar-gen2.c
+++ b/drivers/pci/controller/pci-rcar-gen2.c
@@ -328,6 +328,7 @@ static const struct of_device_id rcar_pci_of_match[] = {
{ .compatible = "renesas,pci-r8a7791", },
{ .compatible = "renesas,pci-r8a7794", },
{ .compatible = "renesas,pci-rcar-gen2", },
+ { .compatible = "renesas,pci-rzn1", },
{ },
};
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 0457ec02ab70..8e323e93be91 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2707,7 +2707,7 @@ static int tegra_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
+static int tegra_pcie_pm_suspend(struct device *dev)
{
struct tegra_pcie *pcie = dev_get_drvdata(dev);
struct tegra_pcie_port *port;
@@ -2742,7 +2742,7 @@ static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
+static int tegra_pcie_pm_resume(struct device *dev)
{
struct tegra_pcie *pcie = dev_get_drvdata(dev);
int err;
@@ -2798,9 +2798,8 @@ poweroff:
}
static const struct dev_pm_ops tegra_pcie_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
- tegra_pcie_pm_resume)
+ RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
};
static struct platform_driver tegra_pcie_driver = {
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index eb6240958bb0..549d3bd6d1c2 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -641,7 +641,7 @@ static const struct of_device_id xgene_pcie_match_table[] = {
static struct platform_driver xgene_pcie_driver = {
.driver = {
.name = "xgene-pcie",
- .of_match_table = of_match_ptr(xgene_pcie_match_table),
+ .of_match_table = xgene_pcie_match_table,
.suppress_bind_attrs = true,
},
.probe = xgene_pcie_probe,
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index e61058e13818..521acd632f1a 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -190,11 +191,6 @@
/* Forward declarations */
struct brcm_pcie;
-static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
enum {
RGR1_SW_INIT_1,
@@ -223,64 +219,9 @@ struct pcie_cfg_data {
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
};
-static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
-};
-
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
-};
-
-static const struct pcie_cfg_data generic_cfg = {
- .offsets = pcie_offsets,
- .type = GENERIC,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm7435_cfg = {
- .offsets = pcie_offsets,
- .type = BCM7435,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm4908_cfg = {
- .offsets = pcie_offsets,
- .type = BCM4908,
- .perst_set = brcm_pcie_perst_set_4908,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
-};
-
-static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
- .perst_set = brcm_pcie_perst_set_7278,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
-};
-
-static const struct pcie_cfg_data bcm2711_cfg = {
- .offsets = pcie_offsets,
- .type = BCM2711,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+struct subdev_regulators {
+ unsigned int num_supplies;
+ struct regulator_bulk_data supplies[];
};
struct brcm_msi {
@@ -320,6 +261,8 @@ struct brcm_pcie {
u32 hw_rev;
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ struct subdev_regulators *sr;
+ bool ep_wakeup_capable;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
@@ -741,52 +684,48 @@ static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
return dla && plu;
}
-static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct brcm_pcie *pcie = bus->sysdata;
void __iomem *base = pcie->base;
int idx;
- /* Accesses to the RC go right to the RC registers if slot==0 */
+ /* Accesses to the RC go right to the RC registers if !devfn */
if (pci_is_root_bus(bus))
- return PCI_SLOT(devfn) ? NULL : base + where;
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + where;
+ return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
}
-static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct brcm_pcie *pcie = bus->sysdata;
void __iomem *base = pcie->base;
int idx;
- /* Accesses to the RC go right to the RC registers if slot==0 */
+ /* Accesses to the RC go right to the RC registers if !devfn */
if (pci_is_root_bus(bus))
- return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3);
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
/* For devices, write to the config space index register */
- idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3));
+ idx = PCIE_ECAM_OFFSET(bus->number, devfn, where);
writel(idx, base + IDX_ADDR(pcie));
return base + DATA_ADDR(pcie);
}
-static struct pci_ops brcm_pcie_ops = {
- .map_bus = brcm_pcie_map_conf,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
-};
-
-static struct pci_ops brcm_pcie_ops32 = {
- .map_bus = brcm_pcie_map_conf32,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
-};
-
static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
@@ -926,17 +865,13 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u64 rc_bar2_offset, rc_bar2_size;
void __iomem *base = pcie->base;
- struct device *dev = pcie->dev;
+ struct pci_host_bridge *bridge;
struct resource_entry *entry;
- bool ssc_good = false;
- struct resource *res;
- int num_out_wins = 0;
- u16 nlw, cls, lnksta;
- int i, ret, memc;
u32 tmp, burst, aspm_support;
+ int num_out_wins = 0;
+ int ret, memc;
/* Reset the bridge */
pcie->bridge_sw_init_set(pcie, 1);
@@ -1012,6 +947,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
+
/* disable the PCIe->GISB memory window (RC_BAR1) */
tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
@@ -1022,31 +962,27 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- if (pcie->gen)
- brcm_pcie_set_gen(pcie, pcie->gen);
-
- /* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
/*
- * Give the RC/EP time to wake up, before trying to configure RC.
- * Intermittently check status for link-up, up to a total of 100ms.
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
*/
- for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
- msleep(5);
-
- if (!brcm_pcie_link_up(pcie)) {
- dev_err(dev, "link down\n");
- return -ENODEV;
- }
-
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(dev, "PCIe misconfigured; is in EP mode\n");
- return -EINVAL;
- }
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ bridge = pci_host_bridge_from_priv(pcie);
resource_list_for_each_entry(entry, &bridge->windows) {
- res = entry->res;
+ struct resource *res = entry->res;
if (resource_type(res) != IORESOURCE_MEM)
continue;
@@ -1075,23 +1011,41 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
- /* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
- tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ /* PCIe->SCB endian mode for BAR */
+ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
+ writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+
+ return 0;
+}
+
+static int brcm_pcie_start_link(struct brcm_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ void __iomem *base = pcie->base;
+ u16 nlw, cls, lnksta;
+ bool ssc_good = false;
+ u32 tmp;
+ int ret, i;
+
+ /* Unassert the fundamental reset */
+ pcie->perst_set(pcie, 0);
/*
- * For config space accesses on the RC, show the right class for
- * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ * Give the RC/EP time to wake up, before trying to configure RC.
+ * Intermittently check status for link-up, up to a total of 100ms.
*/
- tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
- u32p_replace_bits(&tmp, 0x060400,
- PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
+ msleep(5);
+
+ if (!brcm_pcie_link_up(pcie)) {
+ dev_err(dev, "link down\n");
+ return -ENODEV;
+ }
+
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
@@ -1108,12 +1062,6 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
pci_speed_string(pcie_link_speed[cls]), nlw,
ssc_good ? "(SSC)" : "(!SSC)");
- /* PCIe->SCB endian mode for BAR */
- tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
- PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
- writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
-
/*
* Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
* is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
@@ -1125,6 +1073,82 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
return 0;
}
+static const char * const supplies[] = {
+ "vpcie3v3",
+ "vpcie3v3aux",
+ "vpcie12v",
+};
+
+static void *alloc_subdev_regulators(struct device *dev)
+{
+ const size_t size = sizeof(struct subdev_regulators) +
+ sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
+ struct subdev_regulators *sr;
+ int i;
+
+ sr = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (sr) {
+ sr->num_supplies = ARRAY_SIZE(supplies);
+ for (i = 0; i < ARRAY_SIZE(supplies); i++)
+ sr->supplies[i].supply = supplies[i];
+ }
+
+ return sr;
+}
+
+static int brcm_pcie_add_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct device *dev = &bus->dev;
+ struct subdev_regulators *sr;
+ int ret;
+
+ if (!bus->parent || !pci_is_root_bus(bus->parent))
+ return 0;
+
+ if (dev->of_node) {
+ sr = alloc_subdev_regulators(dev);
+ if (!sr) {
+ dev_info(dev, "Can't allocate regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ pcie->sr = sr;
+
+ ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_info(dev, "No regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_err(dev, "Can't enable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+ }
+ }
+
+no_regulators:
+ brcm_pcie_start_link(pcie);
+ return 0;
+}
+
+static void brcm_pcie_remove_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct subdev_regulators *sr = pcie->sr;
+ struct device *dev = &bus->dev;
+
+ if (!sr)
+ return;
+
+ if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
+ dev_err(dev, "Failed to disable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+}
+
/* L23 is a low-power PCIe link state */
static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
{
@@ -1221,9 +1245,21 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
pcie->bridge_sw_init_set(pcie, 1);
}
-static int brcm_pcie_suspend(struct device *dev)
+static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
+{
+ bool *ret = data;
+
+ if (device_may_wakeup(&dev->dev)) {
+ *ret = true;
+ dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n");
+ }
+ return (int) *ret;
+}
+
+static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
brcm_pcie_turn_off(pcie);
@@ -1241,12 +1277,31 @@ static int brcm_pcie_suspend(struct device *dev)
return ret;
}
+ if (pcie->sr) {
+ /*
+ * Now turn off the regulators, but if at least one
+ * downstream device is enabled as a wake-up source, do not
+ * turn off regulators.
+ */
+ pcie->ep_wakeup_capable = false;
+ pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
+ &pcie->ep_wakeup_capable);
+ if (!pcie->ep_wakeup_capable) {
+ ret = regulator_bulk_disable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn off regulators\n");
+ reset_control_reset(pcie->rescal);
+ return ret;
+ }
+ }
+ }
clk_disable_unprepare(pcie->clk);
return 0;
}
-static int brcm_pcie_resume(struct device *dev)
+static int brcm_pcie_resume_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
@@ -1281,11 +1336,37 @@ static int brcm_pcie_resume(struct device *dev)
if (ret)
goto err_reset;
+ if (pcie->sr) {
+ if (pcie->ep_wakeup_capable) {
+ /*
+ * We are resuming from a suspend. In the suspend we
+ * did not disable the power supplies, so there is
+ * no need to enable them (and falsely increase their
+ * usage count).
+ */
+ pcie->ep_wakeup_capable = false;
+ } else {
+ ret = regulator_bulk_enable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn on regulators\n");
+ goto err_reset;
+ }
+ }
+ }
+
+ ret = brcm_pcie_start_link(pcie);
+ if (ret)
+ goto err_regulator;
+
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
return 0;
+err_regulator:
+ if (pcie->sr)
+ regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
reset_control_rearm(pcie->rescal);
err_disable_clk:
@@ -1316,6 +1397,66 @@ static int brcm_pcie_remove(struct platform_device *pdev)
return 0;
}
+static const int pcie_offsets[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const int pcie_offsets_bmips_7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+};
+
+static const struct pcie_cfg_data generic_cfg = {
+ .offsets = pcie_offsets,
+ .type = GENERIC,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bmips_7425,
+ .type = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM7435,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm4908_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM4908,
+ .perst_set = brcm_pcie_perst_set_4908,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const int pcie_offset_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const struct pcie_cfg_data bcm7278_cfg = {
+ .offsets = pcie_offset_bcm7278,
+ .type = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+};
+
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM2711,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
@@ -1328,6 +1469,22 @@ static const struct of_device_id brcm_pcie_match[] = {
{},
};
+static struct pci_ops brcm_pcie_ops = {
+ .map_bus = brcm_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
+static struct pci_ops brcm7425_pcie_ops = {
+ .map_bus = brcm7425_pcie_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
static int brcm_pcie_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *msi_np;
@@ -1414,12 +1571,22 @@ static int brcm_pcie_probe(struct platform_device *pdev)
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops;
+ bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
- return pci_host_probe(bridge);
+ ret = pci_host_probe(bridge);
+ if (!ret && !brcm_pcie_link_up(pcie))
+ ret = -ENODEV;
+
+ if (ret) {
+ brcm_pcie_remove(pdev);
+ return ret;
+ }
+
+ return 0;
+
fail:
__brcm_pcie_remove(pcie);
return ret;
@@ -1428,8 +1595,8 @@ fail:
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
static const struct dev_pm_ops brcm_pcie_pm_ops = {
- .suspend = brcm_pcie_suspend,
- .resume = brcm_pcie_resume,
+ .suspend_noirq = brcm_pcie_suspend_noirq,
+ .resume_noirq = brcm_pcie_resume_noirq,
};
static struct platform_driver brcm_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 757b7fbcdc59..fee036b07cd4 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -589,8 +589,8 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
msi->has_inten_reg = true;
msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
- msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
- sizeof(*msi->bitmap), GFP_KERNEL);
+ msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs,
+ GFP_KERNEL);
if (!msi->bitmap)
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 5d9fd36b02d1..11cdb9b6f109 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -153,6 +153,37 @@ struct mtk_gen3_pcie {
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
};
+/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
+static const char *const ltssm_str[] = {
+ "detect.quiet", /* 0x00 */
+ "detect.active", /* 0x01 */
+ "polling.active", /* 0x02 */
+ "polling.compliance", /* 0x03 */
+ "polling.configuration", /* 0x04 */
+ "config.linkwidthstart", /* 0x05 */
+ "config.linkwidthaccept", /* 0x06 */
+ "config.lanenumwait", /* 0x07 */
+ "config.lanenumaccept", /* 0x08 */
+ "config.complete", /* 0x09 */
+ "config.idle", /* 0x0A */
+ "recovery.receiverlock", /* 0x0B */
+ "recovery.equalization", /* 0x0C */
+ "recovery.speed", /* 0x0D */
+ "recovery.receiverconfig", /* 0x0E */
+ "recovery.idle", /* 0x0F */
+ "L0", /* 0x10 */
+ "L0s", /* 0x11 */
+ "L1.entry", /* 0x12 */
+ "L1.idle", /* 0x13 */
+ "L2.idle", /* 0x14 */
+ "L2.transmitwake", /* 0x15 */
+ "disable", /* 0x16 */
+ "loopback.entry", /* 0x17 */
+ "loopback.active", /* 0x18 */
+ "loopback.exit", /* 0x19 */
+ "hotreset", /* 0x1A */
+};
+
/**
* mtk_pcie_config_tlp_header() - Configure a configuration TLP header
* @bus: PCI bus to query
@@ -327,8 +358,16 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
!!(val & PCIE_PORT_LINKUP), 20,
PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
if (err) {
+ const char *ltssm_state;
+ int ltssm_index;
+
val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
- dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val);
+ ltssm_index = PCIE_LTSSM_STATE(val);
+ ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
+ "Unknown state" : ltssm_str[ltssm_index];
+ dev_err(pcie->dev,
+ "PCIe link down, current LTSSM state: %s (%#x)\n",
+ ltssm_state, val);
return err;
}
@@ -600,7 +639,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
&intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put_node;
}
/* Setup MSI */
@@ -623,13 +663,15 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
goto err_msi_domain;
}
+ of_node_put(intc_node);
return 0;
err_msi_domain:
irq_domain_remove(pcie->msi_bottom_domain);
err_msi_bottom_domain:
irq_domain_remove(pcie->intx_domain);
-
+out_put_node:
+ of_node_put(intc_node);
return ret;
}
@@ -917,7 +959,7 @@ static int mtk_pcie_remove(struct platform_device *pdev)
return 0;
}
-static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
+static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
{
int i;
@@ -935,7 +977,7 @@ static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
raw_spin_unlock(&pcie->irq_lock);
}
-static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
+static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
{
int i;
@@ -953,7 +995,7 @@ static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
raw_spin_unlock(&pcie->irq_lock);
}
-static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
+static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
{
u32 val;
@@ -968,7 +1010,7 @@ static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
50 * USEC_PER_MSEC);
}
-static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+static int mtk_pcie_suspend_noirq(struct device *dev)
{
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
@@ -994,7 +1036,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+static int mtk_pcie_resume_noirq(struct device *dev)
{
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
@@ -1015,8 +1057,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops mtk_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
- mtk_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
};
static const struct of_device_id mtk_pcie_of_match[] = {
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index be8bd919cb88..ae5ad05ddc1d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1150,7 +1150,7 @@ static int mtk_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+static int mtk_pcie_suspend_noirq(struct device *dev)
{
struct mtk_pcie *pcie = dev_get_drvdata(dev);
struct mtk_pcie_port *port;
@@ -1174,7 +1174,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+static int mtk_pcie_resume_noirq(struct device *dev)
{
struct mtk_pcie *pcie = dev_get_drvdata(dev);
struct mtk_pcie_port *port, *tmp;
@@ -1195,8 +1195,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops mtk_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
- mtk_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
};
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index dd5dba419047..7263d175b5ad 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -904,6 +904,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port)
&event_domain_ops, port);
if (!port->event_domain) {
dev_err(dev, "failed to get event domain\n");
+ of_node_put(pcie_intc_node);
return -ENOMEM;
}
@@ -913,6 +914,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port)
&intx_domain_ops, port);
if (!port->intx_domain) {
dev_err(dev, "failed to get an INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
return -ENOMEM;
}
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 997c4df6a1e7..e4faf90feaf5 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -1072,7 +1072,7 @@ err_pm_put:
return err;
}
-static int __maybe_unused rcar_pcie_resume(struct device *dev)
+static int rcar_pcie_resume(struct device *dev)
{
struct rcar_pcie_host *host = dev_get_drvdata(dev);
struct rcar_pcie *pcie = &host->pcie;
@@ -1127,7 +1127,7 @@ static int rcar_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops rcar_pcie_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
+ SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
.resume_noirq = rcar_pcie_resume_noirq,
};
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 7f56f99b4116..7352b5ff8d35 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -864,7 +864,7 @@ static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
return 0;
}
-static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
+static int rockchip_pcie_suspend_noirq(struct device *dev)
{
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int ret;
@@ -889,7 +889,7 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
return ret;
}
-static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
+static int rockchip_pcie_resume_noirq(struct device *dev)
{
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int err;
@@ -1035,8 +1035,8 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops rockchip_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
- rockchip_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
+ rockchip_pcie_resume_noirq)
};
static const struct of_device_id rockchip_pcie_of_match[] = {
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index c7cd44ed4dfc..e4ab48041eb6 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -35,6 +35,10 @@
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+
/* Interrupt registers definitions */
#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
#define XILINX_CPM_PCIE_INTR_HOT_RESET 3
@@ -98,6 +102,19 @@
/* Phy Status/Control Register definitions */
#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
+enum xilinx_cpm_version {
+ CPM,
+ CPM5,
+};
+
+/**
+ * struct xilinx_cpm_variant - CPM variant information
+ * @version: CPM version
+ */
+struct xilinx_cpm_variant {
+ enum xilinx_cpm_version version;
+};
+
/**
* struct xilinx_cpm_pcie - PCIe port information
* @dev: Device pointer
@@ -109,6 +126,7 @@
* @intx_irq: legacy interrupt number
* @irq: Error interrupt number
* @lock: lock protecting shared register access
+ * @variant: CPM version check pointer
*/
struct xilinx_cpm_pcie {
struct device *dev;
@@ -120,6 +138,7 @@ struct xilinx_cpm_pcie {
int intx_irq;
int irq;
raw_spinlock_t lock;
+ const struct xilinx_cpm_variant *variant;
};
static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg)
@@ -285,6 +304,13 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
+ if (port->variant->version == CPM5) {
+ val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (val)
+ writel_relaxed(val, port->cpm_base +
+ XILINX_CPM_PCIE_IR_STATUS);
+ }
+
/*
* XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
* CPM SLCR block.
@@ -484,6 +510,12 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
*/
writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
+
+ if (port->variant->version == CPM5) {
+ writel(XILINX_CPM_PCIE_IR_LOCAL,
+ port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ }
+
/* Enable the Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
@@ -518,7 +550,14 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- port->reg_base = port->cfg->win;
+ if (port->variant->version == CPM5) {
+ port->reg_base = devm_platform_ioremap_resource_byname(pdev,
+ "cpm_csr");
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ } else {
+ port->reg_base = port->cfg->win;
+ }
return 0;
}
@@ -559,6 +598,8 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
if (!bus)
return -ENODEV;
+ port->variant = of_device_get_match_data(dev);
+
err = xilinx_cpm_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
@@ -591,8 +632,23 @@ err_parse_dt:
return err;
}
+static const struct xilinx_cpm_variant cpm_host = {
+ .version = CPM,
+};
+
+static const struct xilinx_cpm_variant cpm5_host = {
+ .version = CPM5,
+};
+
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
- { .compatible = "xlnx,versal-cpm-host-1.00", },
+ {
+ .compatible = "xlnx,versal-cpm-host-1.00",
+ .data = &cpm_host,
+ },
+ {
+ .compatible = "xlnx,versal-cpm5-host",
+ .data = &cpm5_host,
+ },
{}
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 94a14a3d7e55..e06e9f4fc50f 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -898,7 +898,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (vmd->instance < 0)
return vmd->instance;
- vmd->name = kasprintf(GFP_KERNEL, "vmd%d", vmd->instance);
+ vmd->name = devm_kasprintf(&dev->dev, GFP_KERNEL, "vmd%d",
+ vmd->instance);
if (!vmd->name) {
err = -ENOMEM;
goto out_release_instance;
@@ -936,7 +937,6 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_release_instance:
ida_simple_remove(&vmd_instance_ida, vmd->instance);
- kfree(vmd->name);
return err;
}
@@ -959,7 +959,6 @@ static void vmd_remove(struct pci_dev *dev)
vmd_detach_resources(vmd);
vmd_remove_irq_domain(vmd);
ida_simple_remove(&vmd_instance_ida, vmd->instance);
- kfree(vmd->name);
}
#ifdef CONFIG_PM_SLEEP
@@ -1013,6 +1012,14 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7d0b),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xad0b),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
new file mode 100644
index 000000000000..e402f05068a5
--- /dev/null
+++ b/drivers/pci/doe.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data Object Exchange
+ * PCIe r6.0, sec 6.30 DOE
+ *
+ * Copyright (C) 2021 Huawei
+ * Jonathan Cameron <Jonathan.Cameron@huawei.com>
+ *
+ * Copyright (C) 2022 Intel Corporation
+ * Ira Weiny <ira.weiny@intel.com>
+ */
+
+#define dev_fmt(fmt) "DOE: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci-doe.h>
+#include <linux/workqueue.h>
+
+#define PCI_DOE_PROTOCOL_DISCOVERY 0
+
+/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
+#define PCI_DOE_TIMEOUT HZ
+#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
+
+#define PCI_DOE_FLAG_CANCEL 0
+#define PCI_DOE_FLAG_DEAD 1
+
+/**
+ * struct pci_doe_mb - State for a single DOE mailbox
+ *
+ * This state is used to manage a single DOE mailbox capability. All fields
+ * should be considered opaque to the consumers and the structure passed into
+ * the helpers below after being created by devm_pci_doe_create()
+ *
+ * @pdev: PCI device this mailbox belongs to
+ * @cap_offset: Capability offset
+ * @prots: Array of protocols supported (encoded as long values)
+ * @wq: Wait queue for work item
+ * @work_queue: Queue of pci_doe_work items
+ * @flags: Bit array of PCI_DOE_FLAG_* flags
+ */
+struct pci_doe_mb {
+ struct pci_dev *pdev;
+ u16 cap_offset;
+ struct xarray prots;
+
+ wait_queue_head_t wq;
+ struct workqueue_struct *work_queue;
+ unsigned long flags;
+};
+
+static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
+{
+ if (wait_event_timeout(doe_mb->wq,
+ test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags),
+ timeout))
+ return -EIO;
+ return 0;
+}
+
+static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+
+ pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val);
+}
+
+static int pci_doe_abort(struct pci_doe_mb *doe_mb)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
+
+ pci_dbg(pdev, "[%x] Issuing Abort\n", offset);
+
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT);
+
+ do {
+ int rc;
+ u32 val;
+
+ rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
+ if (rc)
+ return rc;
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+
+ /* Abort success! */
+ if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) &&
+ !FIELD_GET(PCI_DOE_STATUS_BUSY, val))
+ return 0;
+
+ } while (!time_after(jiffies, timeout_jiffies));
+
+ /* Abort has timed out and the MB is dead */
+ pci_err(pdev, "[%x] ABORT timed out\n", offset);
+ return -EIO;
+}
+
+static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ struct pci_doe_task *task)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ u32 val;
+ int i;
+
+ /*
+ * Check the DOE busy bit is not set. If it is set, this could indicate
+ * someone other than Linux (e.g. firmware) is using the mailbox. Note
+ * it is expected that firmware and OS will negotiate access rights via
+ * an, as yet to be defined, method.
+ */
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_BUSY, val))
+ return -EBUSY;
+
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ return -EIO;
+
+ /* Write DOE Header */
+ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
+ /* Length is 2 DW of header + length of payload in DW */
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
+ 2 + task->request_pl_sz /
+ sizeof(u32)));
+ for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ task->request_pl[i]);
+
+ pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
+
+ return 0;
+}
+
+static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ u32 val;
+
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val))
+ return true;
+ return false;
+}
+
+static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ size_t length, payload_length;
+ u32 val;
+ int i;
+
+ /* Read the first dword to get the protocol */
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
+ (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
+ dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
+ doe_mb->cap_offset, task->prot.vid, task->prot.type,
+ FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
+ FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
+ return -EIO;
+ }
+
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ /* Read the second dword to get the length */
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+
+ length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val);
+ if (length > SZ_1M || length < 2)
+ return -EIO;
+
+ /* First 2 dwords have already been read */
+ length -= 2;
+ payload_length = min(length, task->response_pl_sz / sizeof(u32));
+ /* Read the rest of the response payload */
+ for (i = 0; i < payload_length; i++) {
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ,
+ &task->response_pl[i]);
+ /* Prior to the last ack, ensure Data Object Ready */
+ if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
+ return -EIO;
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ }
+
+ /* Flush excess length */
+ for (; i < length; i++) {
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ }
+
+ /* Final error check to pick up on any since Data Object Ready */
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ return -EIO;
+
+ return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
+}
+
+static void signal_task_complete(struct pci_doe_task *task, int rv)
+{
+ task->rv = rv;
+ task->complete(task);
+}
+
+static void signal_task_abort(struct pci_doe_task *task, int rv)
+{
+ struct pci_doe_mb *doe_mb = task->doe_mb;
+ struct pci_dev *pdev = doe_mb->pdev;
+
+ if (pci_doe_abort(doe_mb)) {
+ /*
+ * If the device can't process an abort; set the mailbox dead
+ * - no more submissions
+ */
+ pci_err(pdev, "[%x] Abort failed marking mailbox dead\n",
+ doe_mb->cap_offset);
+ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
+ }
+ signal_task_complete(task, rv);
+}
+
+static void doe_statemachine_work(struct work_struct *work)
+{
+ struct pci_doe_task *task = container_of(work, struct pci_doe_task,
+ work);
+ struct pci_doe_mb *doe_mb = task->doe_mb;
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
+ u32 val;
+ int rc;
+
+ if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) {
+ signal_task_complete(task, -EIO);
+ return;
+ }
+
+ /* Send request */
+ rc = pci_doe_send_req(doe_mb, task);
+ if (rc) {
+ /*
+ * The specification does not provide any guidance on how to
+ * resolve conflicting requests from other entities.
+ * Furthermore, it is likely that busy will not be detected
+ * most of the time. Flag any detection of status busy with an
+ * error.
+ */
+ if (rc == -EBUSY)
+ dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n",
+ offset);
+ signal_task_abort(task, rc);
+ return;
+ }
+
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ /* Poll for response */
+retry_resp:
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) {
+ signal_task_abort(task, -EIO);
+ return;
+ }
+
+ if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) {
+ if (time_after(jiffies, timeout_jiffies)) {
+ signal_task_abort(task, -EIO);
+ return;
+ }
+ rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
+ if (rc) {
+ signal_task_abort(task, rc);
+ return;
+ }
+ goto retry_resp;
+ }
+
+ rc = pci_doe_recv_resp(doe_mb, task);
+ if (rc < 0) {
+ signal_task_abort(task, rc);
+ return;
+ }
+
+ signal_task_complete(task, rc);
+}
+
+static void pci_doe_task_complete(struct pci_doe_task *task)
+{
+ complete(task->private);
+}
+
+static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+ u8 *protocol)
+{
+ u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
+ *index);
+ u32 response_pl;
+ DECLARE_COMPLETION_ONSTACK(c);
+ struct pci_doe_task task = {
+ .prot.vid = PCI_VENDOR_ID_PCI_SIG,
+ .prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
+ .request_pl = &request_pl,
+ .request_pl_sz = sizeof(request_pl),
+ .response_pl = &response_pl,
+ .response_pl_sz = sizeof(response_pl),
+ .complete = pci_doe_task_complete,
+ .private = &c,
+ };
+ int rc;
+
+ rc = pci_doe_submit_task(doe_mb, &task);
+ if (rc < 0)
+ return rc;
+
+ wait_for_completion(&c);
+
+ if (task.rv != sizeof(response_pl))
+ return -EIO;
+
+ *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
+ *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ response_pl);
+ *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
+ response_pl);
+
+ return 0;
+}
+
+static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
+{
+ return xa_mk_value((vid << 8) | prot);
+}
+
+static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
+{
+ u8 index = 0;
+ u8 xa_idx = 0;
+
+ do {
+ int rc;
+ u16 vid;
+ u8 prot;
+
+ rc = pci_doe_discovery(doe_mb, &index, &vid, &prot);
+ if (rc)
+ return rc;
+
+ pci_dbg(doe_mb->pdev,
+ "[%x] Found protocol %d vid: %x prot: %x\n",
+ doe_mb->cap_offset, xa_idx, vid, prot);
+
+ rc = xa_insert(&doe_mb->prots, xa_idx++,
+ pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
+ if (rc)
+ return rc;
+ } while (index);
+
+ return 0;
+}
+
+static void pci_doe_xa_destroy(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ xa_destroy(&doe_mb->prots);
+}
+
+static void pci_doe_destroy_workqueue(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ destroy_workqueue(doe_mb->work_queue);
+}
+
+static void pci_doe_flush_mb(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ /* Stop all pending work items from starting */
+ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
+
+ /* Cancel an in progress work item, if necessary */
+ set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags);
+ wake_up(&doe_mb->wq);
+
+ /* Flush all work items */
+ flush_workqueue(doe_mb->work_queue);
+}
+
+/**
+ * pcim_doe_create_mb() - Create a DOE mailbox object
+ *
+ * @pdev: PCI device to create the DOE mailbox for
+ * @cap_offset: Offset of the DOE mailbox
+ *
+ * Create a single mailbox object to manage the mailbox protocol at the
+ * cap_offset specified.
+ *
+ * RETURNS: created mailbox object on success
+ * ERR_PTR(-errno) on failure
+ */
+struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset)
+{
+ struct pci_doe_mb *doe_mb;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ doe_mb = devm_kzalloc(dev, sizeof(*doe_mb), GFP_KERNEL);
+ if (!doe_mb)
+ return ERR_PTR(-ENOMEM);
+
+ doe_mb->pdev = pdev;
+ doe_mb->cap_offset = cap_offset;
+ init_waitqueue_head(&doe_mb->wq);
+
+ xa_init(&doe_mb->prots);
+ rc = devm_add_action(dev, pci_doe_xa_destroy, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
+ dev_driver_string(&pdev->dev),
+ pci_name(pdev),
+ doe_mb->cap_offset);
+ if (!doe_mb->work_queue) {
+ pci_err(pdev, "[%x] failed to allocate work queue\n",
+ doe_mb->cap_offset);
+ return ERR_PTR(-ENOMEM);
+ }
+ rc = devm_add_action_or_reset(dev, pci_doe_destroy_workqueue, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* Reset the mailbox by issuing an abort */
+ rc = pci_doe_abort(doe_mb);
+ if (rc) {
+ pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n",
+ doe_mb->cap_offset, rc);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * The state machine and the mailbox should be in sync now;
+ * Set up mailbox flush prior to using the mailbox to query protocols.
+ */
+ rc = devm_add_action_or_reset(dev, pci_doe_flush_mb, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = pci_doe_cache_protocols(doe_mb);
+ if (rc) {
+ pci_err(pdev, "[%x] failed to cache protocols : %d\n",
+ doe_mb->cap_offset, rc);
+ return ERR_PTR(rc);
+ }
+
+ return doe_mb;
+}
+EXPORT_SYMBOL_GPL(pcim_doe_create_mb);
+
+/**
+ * pci_doe_supports_prot() - Return if the DOE instance supports the given
+ * protocol
+ * @doe_mb: DOE mailbox capability to query
+ * @vid: Protocol Vendor ID
+ * @type: Protocol type
+ *
+ * RETURNS: True if the DOE mailbox supports the protocol specified
+ */
+bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
+{
+ unsigned long index;
+ void *entry;
+
+ /* The discovery protocol must always be supported */
+ if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
+ return true;
+
+ xa_for_each(&doe_mb->prots, index, entry)
+ if (entry == pci_doe_xa_prot_entry(vid, type))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
+
+/**
+ * pci_doe_submit_task() - Submit a task to be processed by the state machine
+ *
+ * @doe_mb: DOE mailbox capability to submit to
+ * @task: task to be queued
+ *
+ * Submit a DOE task (request/response) to the DOE mailbox to be processed.
+ * Returns upon queueing the task object. If the queue is full this function
+ * will sleep until there is room in the queue.
+ *
+ * task->complete will be called when the state machine is done processing this
+ * task.
+ *
+ * Excess data will be discarded.
+ *
+ * RETURNS: 0 when task has been successfully queued, -ERRNO on error
+ */
+int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+{
+ if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
+ return -EINVAL;
+
+ /*
+ * DOE requests must be a whole number of DW and the response needs to
+ * be big enough for at least 1 DW
+ */
+ if (task->request_pl_sz % sizeof(u32) ||
+ task->response_pl_sz < sizeof(u32))
+ return -EINVAL;
+
+ if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
+ return -EIO;
+
+ task->doe_mb = doe_mb;
+ INIT_WORK(&task->work, doe_statemachine_work);
+ queue_work(doe_mb->work_queue, &task->work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_doe_submit_task);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 5f1242ca2f4e..295a033ee9a2 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -25,3 +25,15 @@ config PCI_EPF_NTB
device tree.
If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_VNTB
+ tristate "PCI Endpoint NTB driver"
+ depends on PCI_ENDPOINT
+ depends on NTB
+ select CONFIGFS_FS
+ help
+ Select this configuration option to enable the Non-Transparent
+ Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB
+ between PCI Root Port and PCIe Endpoint.
+
+ If in doubt, say "N" to disable Endpoint NTB driver.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index 96ab932a537a..5c13001deaba 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
+obj-$(CONFIG_PCI_EPF_VNTB) += pci-epf-vntb.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 5b833f00e980..36b1801a061b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -52,9 +52,11 @@ struct pci_epf_test {
enum pci_barno test_reg_bar;
size_t msix_table_offset;
struct delayed_work cmd_handler;
- struct dma_chan *dma_chan;
+ struct dma_chan *dma_chan_tx;
+ struct dma_chan *dma_chan_rx;
struct completion transfer_complete;
bool dma_supported;
+ bool dma_private;
const struct pci_epc_features *epc_features;
};
@@ -96,6 +98,8 @@ static void pci_epf_test_dma_callback(void *param)
* @dma_src: The source address of the data transfer. It can be a physical
* address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
* @len: The size of the data transfer
+ * @dma_remote: remote RC physical address
+ * @dir: DMA transfer direction
*
* Function that uses dmaengine API to transfer data between PCIe EP and remote
* PCIe RC. The source and destination address can be a physical address given
@@ -105,12 +109,16 @@ static void pci_epf_test_dma_callback(void *param)
*/
static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
dma_addr_t dma_dst, dma_addr_t dma_src,
- size_t len)
+ size_t len, dma_addr_t dma_remote,
+ enum dma_transfer_direction dir)
{
+ struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
+ epf_test->dma_chan_tx : epf_test->dma_chan_rx;
+ dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
- struct dma_chan *chan = epf_test->dma_chan;
struct pci_epf *epf = epf_test->epf;
struct dma_async_tx_descriptor *tx;
+ struct dma_slave_config sconf = {};
struct device *dev = &epf->dev;
dma_cookie_t cookie;
int ret;
@@ -120,7 +128,24 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
return -EINVAL;
}
- tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
+ if (epf_test->dma_private) {
+ sconf.direction = dir;
+ if (dir == DMA_MEM_TO_DEV)
+ sconf.dst_addr = dma_remote;
+ else
+ sconf.src_addr = dma_remote;
+
+ if (dmaengine_slave_config(chan, &sconf)) {
+ dev_err(dev, "DMA slave config fail\n");
+ return -EIO;
+ }
+ tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
+ flags);
+ } else {
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
+ flags);
+ }
+
if (!tx) {
dev_err(dev, "Failed to prepare DMA memcpy\n");
return -EIO;
@@ -148,6 +173,23 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
return 0;
}
+struct epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+ struct epf_dma_filter *filter = node;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev
+ && (filter->dma_mask & caps.directions);
+}
+
/**
* pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
* @epf_test: the EPF test device that performs data transfer operation
@@ -158,10 +200,44 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
{
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
+ struct epf_dma_filter filter;
struct dma_chan *dma_chan;
dma_cap_mask_t mask;
int ret;
+ filter.dev = epf->epc->dev.parent;
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+ if (!dma_chan) {
+ dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
+ goto fail_back_tx;
+ }
+
+ epf_test->dma_chan_rx = dma_chan;
+
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+
+ if (!dma_chan) {
+ dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
+ goto fail_back_rx;
+ }
+
+ epf_test->dma_chan_tx = dma_chan;
+ epf_test->dma_private = true;
+
+ init_completion(&epf_test->transfer_complete);
+
+ return 0;
+
+fail_back_rx:
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_tx = NULL;
+
+fail_back_tx:
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -174,7 +250,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
}
init_completion(&epf_test->transfer_complete);
- epf_test->dma_chan = dma_chan;
+ epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
return 0;
}
@@ -190,8 +266,17 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
if (!epf_test->dma_supported)
return;
- dma_release_channel(epf_test->dma_chan);
- epf_test->dma_chan = NULL;
+ dma_release_channel(epf_test->dma_chan_tx);
+ if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
+ return;
+ }
+
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_rx = NULL;
+
+ return;
}
static void pci_epf_test_print_rate(const char *ops, u64 size,
@@ -280,8 +365,15 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_map_addr;
}
+ if (epf_test->dma_private) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- src_phys_addr, reg->size);
+ src_phys_addr, reg->size, 0,
+ DMA_MEM_TO_MEM);
if (ret)
dev_err(dev, "Data transfer failed\n");
} else {
@@ -373,7 +465,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
ktime_get_ts64(&start);
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- phys_addr, reg->size);
+ phys_addr, reg->size,
+ reg->src_addr, DMA_DEV_TO_MEM);
if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
@@ -463,8 +556,11 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
}
ktime_get_ts64(&start);
+
ret = pci_epf_test_data_transfer(epf_test, phys_addr,
- src_phys_addr, reg->size);
+ src_phys_addr, reg->size,
+ reg->dst_addr,
+ DMA_MEM_TO_DEV);
if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
@@ -627,7 +723,6 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
cancel_delayed_work(&epf_test->cmd_handler);
pci_epf_test_clean_dma_chan(epf_test);
- pci_epc_stop(epc);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
new file mode 100644
index 000000000000..0ea85e1d292e
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -0,0 +1,1442 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ * Between PCI RC and EP
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Copyright (C) 2022 NXP
+ *
+ * Based on pci-epf-ntb.c
+ * Author: Frank Li <Frank.Li@nxp.com>
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+/**
+ * +------------+ +---------------------------------------+
+ * | | | |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | NetDev | | | NetDev |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | Transfer | | | Transfer |
+ * +------------+ | +--------------+
+ * | | | | |
+ * | PCI NTB | | | |
+ * | EPF | | | |
+ * | Driver | | | PCI Virtual |
+ * | | +---------------+ | NTB Driver |
+ * | | | PCI EP NTB |<------>| |
+ * | | | FN Driver | | |
+ * +------------+ +---------------+ +--------------+
+ * | | | | | |
+ * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
+ * | | PCI | | | Bus |
+ * +------------+ +---------------+--------+--------------+
+ * PCIe Root Port PCI EP
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/ntb.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL 1
+#define COMMAND_TEARDOWN_DOORBELL 2
+#define COMMAND_CONFIGURE_MW 3
+#define COMMAND_TEARDOWN_MW 4
+#define COMMAND_LINK_UP 5
+#define COMMAND_LINK_DOWN 6
+
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define LINK_STATUS_UP BIT(0)
+
+#define SPAD_COUNT 64
+#define DB_COUNT 4
+#define NTB_MW_OFFSET 2
+#define DB_COUNT_MASK GENMASK(15, 0)
+#define MSIX_ENABLE BIT(16)
+#define MAX_DB_COUNT 32
+#define MAX_MW 4
+
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_DB,
+ BAR_MW0,
+ BAR_MW1,
+ BAR_MW2,
+};
+
+/*
+ * +--------------------------------------------------+ Base
+ * | |
+ * | |
+ * | |
+ * | Common Control Register |
+ * | |
+ * | |
+ * | |
+ * +-----------------------+--------------------------+ Base+span_offset
+ * | | |
+ * | Peer Span Space | Span Space |
+ * | | |
+ * | | |
+ * +-----------------------+--------------------------+ Base+span_offset
+ * | | | +span_count * 4
+ * | | |
+ * | Span Space | Peer Span Space |
+ * | | |
+ * +-----------------------+--------------------------+
+ * Virtual PCI PCIe Endpoint
+ * NTB Driver NTB Driver
+ */
+struct epf_ntb_ctrl {
+ u32 command;
+ u32 argument;
+ u16 command_status;
+ u16 link_status;
+ u32 topology;
+ u64 addr;
+ u64 size;
+ u32 num_mws;
+ u32 reserved;
+ u32 spad_offset;
+ u32 spad_count;
+ u32 db_entry_size;
+ u32 db_data[MAX_DB_COUNT];
+ u32 db_offset[MAX_DB_COUNT];
+} __packed;
+
+struct epf_ntb {
+ struct ntb_dev ntb;
+ struct pci_epf *epf;
+ struct config_group group;
+
+ u32 num_mws;
+ u32 db_count;
+ u32 spad_count;
+ u64 mws_size[MAX_MW];
+ u64 db;
+ u32 vbus_number;
+ u16 vntb_pid;
+ u16 vntb_vid;
+
+ bool linkup;
+ u32 spad_size;
+
+ enum pci_barno epf_ntb_bar[6];
+
+ struct epf_ntb_ctrl *reg;
+
+ phys_addr_t epf_db_phy;
+ void __iomem *epf_db;
+
+ phys_addr_t vpci_mw_phy[MAX_MW];
+ void __iomem *vpci_mw_addr[MAX_MW];
+
+ struct delayed_work cmd_handler;
+};
+
+#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
+#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
+
+static struct pci_epf_header epf_ntb_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_BASE_CLASS_MEMORY,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+/**
+ * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @link_up: true or false indicating Link is UP or Down
+ *
+ * Once NTB function in HOST invoke ntb_link_enable(),
+ * this NTB function driver will trigger a link event to vhost.
+ */
+static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+{
+ if (link_up)
+ ntb->reg->link_status |= LINK_STATUS_UP;
+ else
+ ntb->reg->link_status &= ~LINK_STATUS_UP;
+
+ ntb_link_event(&ntb->ntb);
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_mw() - Configure the Outbound Address Space for vhost
+ * to access the memory window of host
+ * @ntb: NTB device that facilitates communication between host and vhost
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * EP Outbound Window
+ * +--------+ +-----------+
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | +-----------+
+ * | Virtual| | Memory Win|
+ * | NTB | -----------> | |
+ * | Driver | | |
+ * | | +-----------+
+ * | | | |
+ * | | | |
+ * +--------+ +-----------+
+ * VHost PCI EP
+ */
+static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
+{
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ u64 addr, size;
+ int ret = 0;
+
+ phys_addr = ntb->vpci_mw_phy[mw];
+ addr = ntb->reg->addr;
+ size = ntb->reg->size;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
+ if (ret)
+ dev_err(&ntb->epf->epc->dev,
+ "Failed to map memory window %d address\n", mw);
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+ * pci_epc_unmap_addr()
+ */
+static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
+{
+ pci_epc_unmap_addr(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ ntb->vpci_mw_phy[mw]);
+}
+
+/**
+ * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
+ * @work: work_struct for the epf_ntb_epc
+ *
+ * Workqueue function that gets invoked for the two epf_ntb_epc
+ * periodically (once every 5ms) to see if it has received any commands
+ * from NTB host. The host can send commands to configure doorbell or
+ * configure memory window or to update link status.
+ */
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+ struct epf_ntb_ctrl *ctrl;
+ u32 command, argument;
+ struct epf_ntb *ntb;
+ struct device *dev;
+ int ret;
+ int i;
+
+ ntb = container_of(work, struct epf_ntb, cmd_handler.work);
+
+ for (i = 1; i < ntb->db_count; i++) {
+ if (readl(ntb->epf_db + i * 4)) {
+ if (readl(ntb->epf_db + i * 4))
+ ntb->db |= 1 << (i - 1);
+
+ ntb_db_event(&ntb->ntb, i);
+ writel(0, ntb->epf_db + i * 4);
+ }
+ }
+
+ ctrl = ntb->reg;
+ command = ctrl->command;
+ if (!command)
+ goto reset_handler;
+ argument = ctrl->argument;
+
+ ctrl->command = 0;
+ ctrl->argument = 0;
+
+ ctrl = ntb->reg;
+ dev = &ntb->epf->dev;
+
+ switch (command) {
+ case COMMAND_CONFIGURE_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_CONFIGURE_MW:
+ ret = epf_ntb_configure_mw(ntb, argument);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_MW:
+ epf_ntb_teardown_mw(ntb, argument);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_UP:
+ ntb->linkup = true;
+ ret = epf_ntb_link_up(ntb, true);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ goto reset_handler;
+ case COMMAND_LINK_DOWN:
+ ntb->linkup = false;
+ ret = epf_ntb_link_up(ntb, false);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ default:
+ dev_err(dev, "UNKNOWN command: %d\n", command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
+ msecs_to_jiffies(5));
+}
+
+/**
+ * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
+ *
+ * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+ * the default self scratchpad BAR, an NTB could have other BARs for self
+ * scratchpad (because of reserved BARs). This function can get the exact BAR
+ * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+}
+
+/**
+ * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region.
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR.
+ */
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+ * config + scratchpad region
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+ * region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Allocate the Local Memory mentioned in the above diagram. The size of
+ * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+ * is obtained from "spad-count" configfs entry.
+ */
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
+{
+ size_t align;
+ enum pci_barno barno;
+ struct epf_ntb_ctrl *ctrl;
+ u32 spad_size, ctrl_size;
+ u64 size;
+ struct pci_epf *epf = ntb->epf;
+ struct device *dev = &epf->dev;
+ u32 spad_count;
+ void *base;
+ int i;
+ const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
+ epf->func_no,
+ epf->vfunc_no);
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ size = epc_features->bar_fixed_size[barno];
+ align = epc_features->align;
+
+ if ((!IS_ALIGNED(size, align)))
+ return -EINVAL;
+
+ spad_count = ntb->spad_count;
+
+ ctrl_size = sizeof(struct epf_ntb_ctrl);
+ spad_size = 2 * spad_count * 4;
+
+ if (!align) {
+ ctrl_size = roundup_pow_of_two(ctrl_size);
+ spad_size = roundup_pow_of_two(spad_size);
+ } else {
+ ctrl_size = ALIGN(ctrl_size, align);
+ spad_size = ALIGN(spad_size, align);
+ }
+
+ if (!size)
+ size = ctrl_size + spad_size;
+ else if (size < ctrl_size + spad_size)
+ return -EINVAL;
+
+ base = pci_epf_alloc_space(epf, size, barno, align, 0);
+ if (!base) {
+ dev_err(dev, "Config/Status/SPAD alloc region fail\n");
+ return -ENOMEM;
+ }
+
+ ntb->reg = base;
+
+ ctrl = ntb->reg;
+ ctrl->spad_offset = ctrl_size;
+
+ ctrl->spad_count = spad_count;
+ ctrl->num_mws = ntb->num_mws;
+ ntb->spad_size = spad_size;
+
+ ctrl->db_entry_size = 4;
+
+ for (i = 0; i < ntb->db_count; i++) {
+ ntb->reg->db_data[i] = 1 + i;
+ ntb->reg->db_offset[i] = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Configure MSI/MSI-X capability for each interface with number of
+ * interrupts equal to "db_count" configfs entry.
+ */
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ struct device *dev;
+ u32 db_count;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ if (!(epc_features->msix_capable || epc_features->msi_capable)) {
+ dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_count = ntb->db_count;
+ if (db_count > MAX_DB_COUNT) {
+ dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+ return -EINVAL;
+ }
+
+ ntb->db_count = db_count;
+
+ if (epc_features->msi_capable) {
+ ret = pci_epc_set_msi(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ 16);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_db_bar_init() - Configure Doorbell window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ u32 align;
+ struct device *dev = &ntb->epf->dev;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+ void __iomem *mw_addr;
+ enum pci_barno barno;
+ size_t size = 4 * ntb->db_count;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no);
+ align = epc_features->align;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+
+ mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
+ if (!mw_addr) {
+ dev_err(dev, "Failed to allocate OB address\n");
+ return -ENOMEM;
+ }
+
+ ntb->epf_db = mw_addr;
+
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Doorbell BAR set failed\n");
+ goto err_alloc_peer_mem;
+ }
+ return ret;
+
+err_alloc_peer_mem:
+ pci_epc_mem_free_addr(ntb->epf->epc, epf_bar->phys_addr, mw_addr, epf_bar->size);
+ return -1;
+}
+
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
+
+/**
+ * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
+ * allocated in peer's outbound address space
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+ pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+}
+
+/**
+ * epf_ntb_mw_bar_init() - Configure Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ */
+static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
+{
+ int ret = 0;
+ int i;
+ u64 size;
+ enum pci_barno barno;
+ struct device *dev = &ntb->epf->dev;
+
+ for (i = 0; i < ntb->num_mws; i++) {
+ size = ntb->mws_size[i];
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+
+ ntb->epf->bar[barno].barno = barno;
+ ntb->epf->bar[barno].size = size;
+ ntb->epf->bar[barno].addr = NULL;
+ ntb->epf->bar[barno].phys_addr = 0;
+ ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
+ PCI_BASE_ADDRESS_MEM_TYPE_64 :
+ PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+ if (ret) {
+ dev_err(dev, "MW set failed\n");
+ goto err_alloc_mem;
+ }
+
+ /* Allocate EPC outbound memory windows to vpci vntb device */
+ ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
+ &ntb->vpci_mw_phy[i],
+ size);
+ if (!ntb->vpci_mw_addr[i]) {
+ ret = -ENOMEM;
+ dev_err(dev, "Failed to allocate source address\n");
+ goto err_set_bar;
+ }
+ }
+
+ return ret;
+
+err_set_bar:
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+err_alloc_mem:
+ epf_ntb_mw_bar_clear(ntb, i);
+ return ret;
+}
+
+/**
+ * epf_ntb_mw_bar_clear() - Clear Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
+{
+ enum pci_barno barno;
+ int i;
+
+ for (i = 0; i < num_mws; i++) {
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+
+ pci_epc_mem_free_addr(ntb->epf->epc,
+ ntb->vpci_mw_phy[i],
+ ntb->vpci_mw_addr[i],
+ ntb->mws_size[i]);
+ }
+}
+
+/**
+ * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+ */
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+ pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
+ pci_epc_put(ntb->epf->epc);
+}
+
+/**
+ * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+ * constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ enum pci_barno barno;
+ enum epf_ntb_bar bar;
+ struct device *dev;
+ u32 num_mws;
+ int i;
+
+ barno = BAR_0;
+ num_mws = ntb->num_mws;
+ dev = &ntb->epf->dev;
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ /* These are required BARs which are mandatory for NTB functionality */
+ for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ dev_err(dev, "Fail to get NTB function BAR\n");
+ return barno;
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ /* These are optional BARs which don't impact NTB functionality */
+ for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ ntb->num_mws = i;
+ dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_init() - Initialize NTB interface
+ * @ntb: NTB device that facilitates communication between HOST and vHOST2
+ *
+ * Wrapper to initialize a particular EPC interface and start the workqueue
+ * to check for commands from host. This function will write to the
+ * EP controller HW for configuring it.
+ */
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+ epc = epf->epc;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = epf_ntb_config_sspad_bar_set(ntb);
+ if (ret) {
+ dev_err(dev, "Config/self SPAD BAR init failed");
+ return ret;
+ }
+
+ ret = epf_ntb_configure_interrupt(ntb);
+ if (ret) {
+ dev_err(dev, "Interrupt configuration failed\n");
+ goto err_config_interrupt;
+ }
+
+ ret = epf_ntb_db_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "DB BAR init failed\n");
+ goto err_db_bar_init;
+ }
+
+ ret = epf_ntb_mw_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "MW BAR init failed\n");
+ goto err_mw_bar_init;
+ }
+
+ if (vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ goto err_write_header;
+ }
+ }
+
+ INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
+ queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
+
+ return 0;
+
+err_write_header:
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+err_mw_bar_init:
+ epf_ntb_db_bar_clear(ntb);
+err_db_bar_init:
+err_config_interrupt:
+ epf_ntb_config_sspad_bar_clear(ntb);
+
+ return ret;
+}
+
+
+/**
+ * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to cleanup all NTB interfaces.
+ */
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+}
+
+#define EPF_NTB_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->_name); \
+}
+
+#define EPF_NTB_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ ntb->_name = val; \
+ \
+ return len; \
+}
+
+#define EPF_NTB_MW_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+}
+
+#define EPF_NTB_MW_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ u64 val; \
+ int ret; \
+ \
+ ret = kstrtou64(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ ntb->mws_size[win_no - 1] = val; \
+ \
+ return len; \
+}
+
+static ssize_t epf_ntb_num_mws_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct epf_ntb *ntb = to_epf_ntb(group);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > MAX_MW)
+ return -EINVAL;
+
+ ntb->num_mws = val;
+
+ return len;
+}
+
+EPF_NTB_R(spad_count)
+EPF_NTB_W(spad_count)
+EPF_NTB_R(db_count)
+EPF_NTB_W(db_count)
+EPF_NTB_R(num_mws)
+EPF_NTB_R(vbus_number)
+EPF_NTB_W(vbus_number)
+EPF_NTB_R(vntb_pid)
+EPF_NTB_W(vntb_pid)
+EPF_NTB_R(vntb_vid)
+EPF_NTB_W(vntb_vid)
+EPF_NTB_MW_R(mw1)
+EPF_NTB_MW_W(mw1)
+EPF_NTB_MW_R(mw2)
+EPF_NTB_MW_W(mw2)
+EPF_NTB_MW_R(mw3)
+EPF_NTB_MW_W(mw3)
+EPF_NTB_MW_R(mw4)
+EPF_NTB_MW_W(mw4)
+
+CONFIGFS_ATTR(epf_ntb_, spad_count);
+CONFIGFS_ATTR(epf_ntb_, db_count);
+CONFIGFS_ATTR(epf_ntb_, num_mws);
+CONFIGFS_ATTR(epf_ntb_, mw1);
+CONFIGFS_ATTR(epf_ntb_, mw2);
+CONFIGFS_ATTR(epf_ntb_, mw3);
+CONFIGFS_ATTR(epf_ntb_, mw4);
+CONFIGFS_ATTR(epf_ntb_, vbus_number);
+CONFIGFS_ATTR(epf_ntb_, vntb_pid);
+CONFIGFS_ATTR(epf_ntb_, vntb_vid);
+
+static struct configfs_attribute *epf_ntb_attrs[] = {
+ &epf_ntb_attr_spad_count,
+ &epf_ntb_attr_db_count,
+ &epf_ntb_attr_num_mws,
+ &epf_ntb_attr_mw1,
+ &epf_ntb_attr_mw2,
+ &epf_ntb_attr_mw3,
+ &epf_ntb_attr_mw4,
+ &epf_ntb_attr_vbus_number,
+ &epf_ntb_attr_vntb_pid,
+ &epf_ntb_attr_vntb_vid,
+ NULL,
+};
+
+static const struct config_item_type ntb_group_type = {
+ .ct_attrs = epf_ntb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * epf_ntb_add_cfs() - Add configfs directory specific to NTB
+ * @epf: NTB endpoint function device
+ * @group: A pointer to the config_group structure referencing a group of
+ * config_items of a specific type that belong to a specific sub-system.
+ *
+ * Add configfs directory specific to NTB. This directory will hold
+ * NTB specific properties like db_count, spad_count, num_mws etc.,
+ */
+static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct config_group *ntb_group = &ntb->group;
+ struct device *dev = &epf->dev;
+
+ config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
+
+ return ntb_group;
+}
+
+/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
+
+static u32 pci_space[] = {
+ 0xffffffff, /*DeviceID, Vendor ID*/
+ 0, /*Status, Command*/
+ 0xffffffff, /*Class code, subclass, prog if, revision id*/
+ 0x40, /*bist, header type, latency Timer, cache line size*/
+ 0, /*BAR 0*/
+ 0, /*BAR 1*/
+ 0, /*BAR 2*/
+ 0, /*BAR 3*/
+ 0, /*BAR 4*/
+ 0, /*BAR 5*/
+ 0, /*Cardbus cis point*/
+ 0, /*Subsystem ID Subystem vendor id*/
+ 0, /*ROM Base Address*/
+ 0, /*Reserved, Cap. Point*/
+ 0, /*Reserved,*/
+ 0, /*Max Lat, Min Gnt, interrupt pin, interrupt line*/
+};
+
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
+{
+ if (devfn == 0) {
+ memcpy(val, ((u8 *)pci_space) + where, size);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
+{
+ return 0;
+}
+
+static struct pci_ops vpci_ops = {
+ .read = pci_read,
+ .write = pci_write,
+};
+
+static int vpci_scan_bus(void *sysdata)
+{
+ struct pci_bus *vpci_bus;
+ struct epf_ntb *ndev = sysdata;
+
+ vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
+ if (vpci_bus)
+ pr_err("create pci bus\n");
+
+ pci_bus_add_devices(vpci_bus);
+
+ return 0;
+}
+
+/*==================== Virtual PCIe NTB driver ==========================*/
+
+static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct epf_ntb *ndev = ntb_ndev(ntb);
+
+ return ndev->num_mws;
+}
+
+static int vntb_epf_spad_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->spad_count;
+}
+
+static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->num_mws;
+}
+
+static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
+{
+ return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
+}
+
+static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ int ret;
+ struct device *dev;
+
+ dev = &ntb->ntb.dev;
+ barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
+ epf_bar = &ntb->epf->bar[barno];
+ epf_bar->phys_addr = addr;
+ epf_bar->barno = barno;
+ epf_bar->size = size;
+
+ ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
+ if (ret) {
+ dev_err(dev, "failure set mw trans\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
+{
+ return 0;
+}
+
+static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
+ phys_addr_t *base, resource_size_t *size)
+{
+
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (base)
+ *base = ntb->vpci_mw_phy[idx];
+
+ if (size)
+ *size = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static int vntb_epf_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ return 0;
+}
+
+static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * 4;
+ u32 val;
+ void __iomem *base = ntb->reg;
+
+ val = readl(base + off + ct + idx * 4);
+ return val;
+}
+
+static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset, ct = ctrl->spad_count * 4;
+ void __iomem *base = ntb->reg;
+
+ writel(val, base + off + ct + idx * 4);
+ return 0;
+}
+
+static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = ntb->reg;
+ u32 val;
+
+ val = readl(base + off + idx * 4);
+ return val;
+}
+
+static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = ntb->reg;
+
+ writel(val, base + off + idx * 4);
+ return 0;
+}
+
+static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
+{
+ u32 interrupt_num = ffs(db_bits) + 1;
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ u8 func_no, vfunc_no;
+ int ret;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_raise_irq(ntb->epf->epc,
+ func_no,
+ vfunc_no,
+ PCI_EPC_IRQ_MSI,
+ interrupt_num + 1);
+ if (ret)
+ dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
+
+ return ret;
+}
+
+static u64 vntb_epf_db_read(struct ntb_dev *ndev)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->db;
+}
+
+static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (addr_align)
+ *addr_align = SZ_4K;
+
+ if (size_align)
+ *size_align = 1;
+
+ if (size_max)
+ *size_max = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->reg->link_status;
+}
+
+static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ ntb->db &= ~db_bits;
+ return 0;
+}
+
+static int vntb_epf_link_disable(struct ntb_dev *ntb)
+{
+ return 0;
+}
+
+static const struct ntb_dev_ops vntb_epf_ops = {
+ .mw_count = vntb_epf_mw_count,
+ .spad_count = vntb_epf_spad_count,
+ .peer_mw_count = vntb_epf_peer_mw_count,
+ .db_valid_mask = vntb_epf_db_valid_mask,
+ .db_set_mask = vntb_epf_db_set_mask,
+ .mw_set_trans = vntb_epf_mw_set_trans,
+ .mw_clear_trans = vntb_epf_mw_clear_trans,
+ .peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
+ .link_enable = vntb_epf_link_enable,
+ .spad_read = vntb_epf_spad_read,
+ .spad_write = vntb_epf_spad_write,
+ .peer_spad_read = vntb_epf_peer_spad_read,
+ .peer_spad_write = vntb_epf_peer_spad_write,
+ .peer_db_set = vntb_epf_peer_db_set,
+ .db_read = vntb_epf_db_read,
+ .mw_get_align = vntb_epf_mw_get_align,
+ .link_is_up = vntb_epf_link_is_up,
+ .db_clear_mask = vntb_epf_db_clear_mask,
+ .db_clear = vntb_epf_db_clear,
+ .link_disable = vntb_epf_link_disable,
+};
+
+static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int ret;
+ struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
+ struct device *dev = &pdev->dev;
+
+ ndev->ntb.pdev = pdev;
+ ndev->ntb.topo = NTB_TOPO_NONE;
+ ndev->ntb.ops = &vntb_epf_ops;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "Cannot set DMA mask\n");
+ return -EINVAL;
+ }
+
+ ret = ntb_register_device(&ndev->ntb);
+ if (ret) {
+ dev_err(dev, "Failed to register NTB device\n");
+ goto err_register_dev;
+ }
+
+ dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
+ return 0;
+
+err_register_dev:
+ return -EINVAL;
+}
+
+static struct pci_device_id pci_vntb_table[] = {
+ {
+ PCI_DEVICE(0xffff, 0xffff),
+ },
+ {},
+};
+
+static struct pci_driver vntb_pci_driver = {
+ .name = "pci-vntb",
+ .id_table = pci_vntb_table,
+ .probe = pci_vntb_probe,
+};
+
+/* ============ PCIe EPF Driver Bind ====================*/
+
+/**
+ * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
+ * @epf: NTB endpoint function device
+ *
+ * Initialize both the endpoint controllers associated with NTB function device.
+ * Invoked when a primary interface or secondary interface is bound to EPC
+ * device. This function will succeed only when EPC is bound to both the
+ * interfaces.
+ */
+static int epf_ntb_bind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (!epf->epc) {
+ dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ ret = epf_ntb_init_epc_bar(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ goto err_bar_init;
+ }
+
+ ret = epf_ntb_config_spad_bar_alloc(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to allocate BAR memory\n");
+ goto err_bar_alloc;
+ }
+
+ ret = epf_ntb_epc_init(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to initialize EPC\n");
+ goto err_bar_alloc;
+ }
+
+ epf_set_drvdata(epf, ntb);
+
+ pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
+ pci_vntb_table[0].vendor = ntb->vntb_vid;
+ pci_vntb_table[0].device = ntb->vntb_pid;
+
+ ret = pci_register_driver(&vntb_pci_driver);
+ if (ret) {
+ dev_err(dev, "failure register vntb pci driver\n");
+ goto err_bar_alloc;
+ }
+
+ vpci_scan_bus(ntb);
+
+ return 0;
+
+err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+ epf_ntb_epc_destroy(ntb);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
+ * @epf: NTB endpoint function device
+ *
+ * Cleanup the initialization from epf_ntb_bind()
+ */
+static void epf_ntb_unbind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+ epf_ntb_epc_cleanup(ntb);
+ epf_ntb_config_spad_bar_free(ntb);
+ epf_ntb_epc_destroy(ntb);
+
+ pci_unregister_driver(&vntb_pci_driver);
+}
+
+// EPF driver probe
+static struct pci_epf_ops epf_ntb_ops = {
+ .bind = epf_ntb_bind,
+ .unbind = epf_ntb_unbind,
+ .add_cfs = epf_ntb_add_cfs,
+};
+
+/**
+ * epf_ntb_probe() - Probe NTB function driver
+ * @epf: NTB endpoint function device
+ *
+ * Probe NTB function driver when endpoint function bus detects a NTB
+ * endpoint function.
+ */
+static int epf_ntb_probe(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb;
+ struct device *dev;
+
+ dev = &epf->dev;
+
+ ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+ if (!ntb)
+ return -ENOMEM;
+
+ epf->header = &epf_ntb_header;
+ ntb->epf = epf;
+ ntb->vbus_number = 0xff;
+ epf_set_drvdata(epf, ntb);
+
+ dev_info(dev, "pci-ep epf driver loaded\n");
+ return 0;
+}
+
+static const struct pci_epf_device_id epf_ntb_ids[] = {
+ {
+ .name = "pci_epf_vntb",
+ },
+ {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+ .driver.name = "pci_epf_vntb",
+ .probe = epf_ntb_probe,
+ .id_table = epf_ntb_ids,
+ .ops = &epf_ntb_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init epf_ntb_init(void)
+{
+ int ret;
+
+ kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&epf_ntb_driver);
+ if (ret) {
+ destroy_workqueue(kpcintb_workqueue);
+ pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(epf_ntb_init);
+
+static void __exit epf_ntb_exit(void)
+{
+ pci_epf_unregister_driver(&epf_ntb_driver);
+ destroy_workqueue(kpcintb_workqueue);
+}
+module_exit(epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
index b8c9011987f4..4504039056d1 100644
--- a/drivers/pci/mmap.c
+++ b/drivers/pci/mmap.c
@@ -13,27 +13,6 @@
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
-/*
- * Modern setup: generic pci_mmap_resource_range(), and implement the legacy
- * pci_mmap_page_range() (if needed) as a wrapper round it.
- */
-
-#ifdef HAVE_PCI_MMAP
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t start, end;
-
- pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
-
- /* Adjust vm_pgoff to be the offset within the resource */
- vma->vm_pgoff -= start >> PAGE_SHIFT;
- return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
- write_combine);
-}
-#endif
-
static const struct vm_operations_struct pci_phys_vm_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys,
@@ -70,27 +49,4 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
vma->vm_page_prot);
}
-#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
-
-/*
- * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around
- * the architecture's pci_mmap_page_range(), converting to "user visible"
- * addresses as necessary.
- */
-
-int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t start, end;
-
- /*
- * pci_mmap_page_range() expects the same kind of entry as coming
- * from /proc/bus/pci/ which is a "user visible" value. If this is
- * different from the resource itself, arch will do necessary fixup.
- */
- pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
- vma->vm_pgoff += start >> PAGE_SHIFT;
- return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
-}
#endif
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 462b429ad243..4496a7c5c478 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) "pci-p2pdma: " fmt
#include <linux/ctype.h>
+#include <linux/dma-map-ops.h>
#include <linux/pci-p2pdma.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,13 +21,6 @@
#include <linux/seq_buf.h>
#include <linux/xarray.h>
-enum pci_p2pdma_map_type {
- PCI_P2PDMA_MAP_UNKNOWN = 0,
- PCI_P2PDMA_MAP_NOT_SUPPORTED,
- PCI_P2PDMA_MAP_BUS_ADDR,
- PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
-};
-
struct pci_p2pdma {
struct gen_pool *pool;
bool p2pmem_published;
@@ -854,6 +848,7 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
struct pci_dev *client;
struct pci_p2pdma *p2pdma;
+ int dist;
if (!provider->p2pdma)
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
@@ -870,74 +865,48 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
type = xa_to_value(xa_load(&p2pdma->map_types,
map_types_idx(client)));
rcu_read_unlock();
- return type;
-}
-static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
- struct device *dev, struct scatterlist *sg, int nents)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- s->dma_address = sg_phys(s) + p2p_pgmap->bus_offset;
- sg_dma_len(s) = s->length;
- }
+ if (type == PCI_P2PDMA_MAP_UNKNOWN)
+ return calc_map_type_and_dist(provider, client, &dist, true);
- return nents;
+ return type;
}
/**
- * pci_p2pdma_map_sg_attrs - map a PCI peer-to-peer scatterlist for DMA
- * @dev: device doing the DMA request
- * @sg: scatter list to map
- * @nents: elements in the scatterlist
- * @dir: DMA direction
- * @attrs: DMA attributes passed to dma_map_sg() (if called)
+ * pci_p2pdma_map_segment - map an sg segment determining the mapping type
+ * @state: State structure that should be declared outside of the for_each_sg()
+ * loop and initialized to zero.
+ * @dev: DMA device that's doing the mapping operation
+ * @sg: scatterlist segment to map
*
- * Scatterlists mapped with this function should be unmapped using
- * pci_p2pdma_unmap_sg_attrs().
+ * This is a helper to be used by non-IOMMU dma_map_sg() implementations where
+ * the sg segment is the same for the page_link and the dma_address.
*
- * Returns the number of SG entries mapped or 0 on error.
+ * Attempt to map a single segment in an SGL with the PCI bus address.
+ * The segment must point to a PCI P2PDMA page and thus must be
+ * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check.
+ *
+ * Returns the type of mapping used and maps the page if the type is
+ * PCI_P2PDMA_MAP_BUS_ADDR.
*/
-int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg)
{
- struct pci_p2pdma_pagemap *p2p_pgmap =
- to_p2p_pgmap(sg_page(sg)->pgmap);
-
- switch (pci_p2pdma_map_type(sg_page(sg)->pgmap, dev)) {
- case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
- return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
- case PCI_P2PDMA_MAP_BUS_ADDR:
- return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
- default:
- WARN_ON_ONCE(1);
- return 0;
+ if (state->pgmap != sg_page(sg)->pgmap) {
+ state->pgmap = sg_page(sg)->pgmap;
+ state->map = pci_p2pdma_map_type(state->pgmap, dev);
+ state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
}
-}
-EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
-/**
- * pci_p2pdma_unmap_sg_attrs - unmap a PCI peer-to-peer scatterlist that was
- * mapped with pci_p2pdma_map_sg()
- * @dev: device doing the DMA request
- * @sg: scatter list to map
- * @nents: number of elements returned by pci_p2pdma_map_sg()
- * @dir: DMA direction
- * @attrs: DMA attributes passed to dma_unmap_sg() (if called)
- */
-void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- enum pci_p2pdma_map_type map_type;
-
- map_type = pci_p2pdma_map_type(sg_page(sg)->pgmap, dev);
+ if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
+ sg->dma_address = sg_phys(sg) + state->bus_off;
+ sg_dma_len(sg) = sg->length;
+ sg_dma_mark_bus_address(sg);
+ }
- if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
- dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
+ return state->map;
}
-EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
/**
* pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 3760d85c10d2..a46fec776ad7 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -21,8 +21,9 @@
#include "pci.h"
/*
- * The GUID is defined in the PCI Firmware Specification available here:
- * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
+ * The GUID is defined in the PCI Firmware Specification available
+ * here to PCI-SIG members:
+ * https://members.pcisig.com/wg/PCI-SIG/document/15350
*/
const guid_t pci_acpi_dsm_guid =
GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cfaf40a540a8..95bc329e74c0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -41,8 +41,10 @@ const char *pci_power_names[] = {
};
EXPORT_SYMBOL_GPL(pci_power_names);
+#ifdef CONFIG_X86_32
int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);
+#endif
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
@@ -1293,9 +1295,6 @@ static int pci_set_full_power_state(struct pci_dev *dev)
pci_restore_bars(dev);
}
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
@@ -1390,9 +1389,6 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
pci_power_name(dev->current_state),
pci_power_name(state));
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e10cdec6c56e..785f31086313 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -560,12 +560,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 7952e5efd6cf..e2d8a74f83c3 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -392,6 +392,11 @@ void pci_aer_init(struct pci_dev *dev)
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
pci_aer_clear_status(dev);
+
+ if (pci_aer_available())
+ pci_enable_pcie_error_reporting(dev);
+
+ pcie_set_ecrc_checking(dev);
}
void pci_aer_exit(struct pci_dev *dev)
@@ -538,7 +543,7 @@ static const char *aer_agent_string[] = {
u64 *stats = pdev->aer_stats->stats_array; \
size_t len = 0; \
\
- for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
+ for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
if (strings_array[i]) \
len += sysfs_emit_at(buf, len, "%s %llu\n", \
strings_array[i], \
@@ -1228,9 +1233,6 @@ static int set_device_error_reporting(struct pci_dev *dev, void *data)
pci_disable_pcie_error_reporting(dev);
}
- if (enable)
- pcie_set_ecrc_checking(dev);
-
return 0;
}
@@ -1347,6 +1349,11 @@ static int aer_probe(struct pcie_device *dev)
struct device *device = &dev->device;
struct pci_dev *port = dev->port;
+ BUILD_BUG_ON(ARRAY_SIZE(aer_correctable_error_string) <
+ AER_MAX_TYPEOF_COR_ERRS);
+ BUILD_BUG_ON(ARRAY_SIZE(aer_uncorrectable_error_string) <
+ AER_MAX_TYPEOF_UNCOR_ERRS);
+
/* Limit to Root Ports or Root Complex Event Collectors */
if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) &&
(pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT))
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index a96b7424c9bc..a8aec190986c 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1012,25 +1012,6 @@ out:
up_read(&pci_bus_sem);
}
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
-{
- struct pcie_link_state *link = pdev->link_state;
-
- if (aspm_disabled || !link)
- return;
- /*
- * Devices changed PM state, we should recheck if latency
- * meets all functions' requirement
- */
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- pcie_update_aspm_capable(link->root);
- pcie_config_aspm_path(link);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
@@ -1366,4 +1347,3 @@ bool pcie_aspm_support_enabled(void)
{
return aspm_support_enabled;
}
-EXPORT_SYMBOL(pcie_aspm_support_enabled);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 0c5a143025af..59c90d04a609 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -55,10 +55,14 @@ static int report_error_detected(struct pci_dev *dev,
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pci_dev_set_io_state(dev, state) ||
- !pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->error_detected) {
+ if (pci_dev_is_disconnected(dev)) {
+ vote = PCI_ERS_RESULT_DISCONNECT;
+ } else if (!pci_dev_set_io_state(dev, state)) {
+ pci_info(dev, "can't recover (state transition %u -> %u invalid)\n",
+ dev->error_state, state);
+ vote = PCI_ERS_RESULT_NONE;
+ } else if (!pdrv || !pdrv->err_handler ||
+ !pdrv->err_handler->error_detected) {
/*
* If any device in the subtree does not have an error_detected
* callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 604feeb84ee4..1ac7fec47d6f 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -222,15 +222,8 @@ static int get_port_device_capability(struct pci_dev *dev)
#ifdef CONFIG_PCIEAER
if (dev->aer_cap && pci_aer_available() &&
- (pcie_ports_native || host->native_aer)) {
+ (pcie_ports_native || host->native_aer))
services |= PCIE_PORT_SERVICE_AER;
-
- /*
- * Disable AER on this port in case it's been enabled by the
- * BIOS (the AER service driver will enable it when necessary).
- */
- pci_disable_pcie_error_reporting(dev);
- }
#endif
/* Root Ports and Root Complex Event Collectors may generate PMEs */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 17a969942d37..c5286b027f00 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1890,6 +1890,9 @@ int pci_setup_device(struct pci_dev *dev)
dev->broken_intx_masking = pci_intx_mask_broken(dev);
+ /* Clear errors left from system firmware */
+ pci_write_config_word(dev, PCI_STATUS, 0xffff);
+
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
@@ -2312,7 +2315,7 @@ EXPORT_SYMBOL(pci_alloc_dev);
static bool pci_bus_crs_vendor_id(u32 l)
{
- return (l & 0xffff) == 0x0001;
+ return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
}
static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
@@ -2579,33 +2582,39 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
}
EXPORT_SYMBOL(pci_scan_single_device);
-static unsigned int next_fn(struct pci_bus *bus, struct pci_dev *dev,
- unsigned int fn)
+static int next_ari_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
{
int pos;
u16 cap = 0;
unsigned int next_fn;
- if (pci_ari_enabled(bus)) {
- if (!dev)
- return 0;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
- if (!pos)
- return 0;
+ if (!dev)
+ return -ENODEV;
- pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
- next_fn = PCI_ARI_CAP_NFN(cap);
- if (next_fn <= fn)
- return 0; /* protect against malformed list */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return -ENODEV;
- return next_fn;
- }
+ pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
+ next_fn = PCI_ARI_CAP_NFN(cap);
+ if (next_fn <= fn)
+ return -ENODEV; /* protect against malformed list */
- /* dev may be NULL for non-contiguous multifunction devices */
- if (!dev || dev->multifunction)
- return (fn + 1) % 8;
+ return next_fn;
+}
- return 0;
+static int next_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
+{
+ if (pci_ari_enabled(bus))
+ return next_ari_fn(bus, dev, fn);
+
+ if (fn >= 7)
+ return -ENODEV;
+ /* only multifunction devices may have more functions */
+ if (dev && !dev->multifunction)
+ return -ENODEV;
+
+ return fn + 1;
}
static int only_one_child(struct pci_bus *bus)
@@ -2643,26 +2652,30 @@ static int only_one_child(struct pci_bus *bus)
*/
int pci_scan_slot(struct pci_bus *bus, int devfn)
{
- unsigned int fn, nr = 0;
struct pci_dev *dev;
+ int fn = 0, nr = 0;
if (only_one_child(bus) && (devfn > 0))
return 0; /* Already scanned the entire slot */
- dev = pci_scan_single_device(bus, devfn);
- if (!dev)
- return 0;
- if (!pci_dev_is_added(dev))
- nr++;
-
- for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
+ do {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!pci_dev_is_added(dev))
nr++;
- dev->multifunction = 1;
+ if (fn > 0)
+ dev->multifunction = 1;
+ } else if (fn == 0) {
+ /*
+ * Function 0 is required unless we are running on
+ * a hypervisor that passes through individual PCI
+ * functions.
+ */
+ if (!hypervisor_isolated_pci_functions())
+ break;
}
- }
+ fn = next_fn(bus, dev, fn);
+ } while (fn >= 0);
/* Only one slot has PCIe device */
if (bus->self && nr)
@@ -2858,29 +2871,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
{
unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
unsigned int start = bus->busn_res.start;
- unsigned int devfn, fn, cmax, max = start;
+ unsigned int devfn, cmax, max = start;
struct pci_dev *dev;
- int nr_devs;
dev_dbg(&bus->dev, "scanning bus\n");
/* Go find them, Rover! */
- for (devfn = 0; devfn < 256; devfn += 8) {
- nr_devs = pci_scan_slot(bus, devfn);
-
- /*
- * The Jailhouse hypervisor may pass individual functions of a
- * multi-function device to a guest without passing function 0.
- * Look for them as well.
- */
- if (jailhouse_paravirt() && nr_devs == 0) {
- for (fn = 1; fn < 8; fn++) {
- dev = pci_scan_single_device(bus, devfn + fn);
- if (dev)
- dev->multifunction = 1;
- }
- }
- }
+ for (devfn = 0; devfn < 256; devfn += 8)
+ pci_scan_slot(bus, devfn);
/* Reserve buses for SR-IOV capability */
used_buses = pci_iov_bus_range(bus);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 31b26d8ea6cc..f967709082d6 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -244,6 +244,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pci_dev *dev = pde_data(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
+ resource_size_t start, end;
int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
if (!capable(CAP_SYS_RAWIO) ||
@@ -278,7 +279,11 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
iomem_is_exclusive(dev->resource[i].start))
return -EINVAL;
- ret = pci_mmap_page_range(dev, i, vma,
+ pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
+
+ /* Adjust vm_pgoff to be the offset within the resource */
+ vma->vm_pgoff -= start >> PAGE_SHIFT;
+ ret = pci_mmap_resource_range(dev, i, vma,
fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 41aeaa235132..4944798e75b5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
+#include <linux/isa-dma.h> /* isa_dma_bridge_buggy */
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -30,7 +31,6 @@
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/switchtec.h>
-#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
static ktime_t fixup_debug_start(struct pci_dev *dev,
@@ -239,6 +239,7 @@ static void quirk_passive_release(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
+#ifdef CONFIG_X86_32
/*
* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a
* workaround but VIA don't answer queries. If you happen to have good
@@ -265,6 +266,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
+#endif
/*
* Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
@@ -2709,10 +2711,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
nvenet_msi_disable);
/*
- * PCIe spec r4.0 sec 7.7.1.2 and sec 7.7.2.2 say that if MSI/MSI-X is enabled,
- * then the device can't use INTx interrupts. Tegra's PCIe root ports don't
- * generate MSI interrupts for PME and AER events instead only INTx interrupts
- * are generated. Though Tegra's PCIe root ports can generate MSI interrupts
+ * PCIe spec r6.0 sec 6.1.4.3 says that if MSI/MSI-X is enabled, the device
+ * can't use INTx interrupts. Tegra's PCIe Root Ports don't generate MSI
+ * interrupts for PME and AER events; instead only INTx interrupts are
+ * generated. Though Tegra's PCIe Root Ports can generate MSI interrupts
* for other events, since PCIe specification doesn't support using a mix of
* INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port
* service drivers registering their respective ISRs for MSIs.
@@ -2760,6 +2762,15 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
PCI_CLASS_BRIDGE_PCI, 8,
pci_quirk_nvidia_tegra_disable_rp_msi);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229a,
+ PCI_CLASS_BRIDGE_PCI, 8,
+ pci_quirk_nvidia_tegra_disable_rp_msi);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229c,
+ PCI_CLASS_BRIDGE_PCI, 8,
+ pci_quirk_nvidia_tegra_disable_rp_msi);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229e,
+ PCI_CLASS_BRIDGE_PCI, 8,
+ pci_quirk_nvidia_tegra_disable_rp_msi);
/*
* Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
@@ -4924,6 +4935,9 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
/* Broadcom multi-function device */
{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index c36c1238c604..75be4fe22509 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1376,8 +1376,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
dev->groups = switchtec_device_groups;
dev->release = stdev_release;
- minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
- GFP_KERNEL);
+ minor = ida_alloc(&switchtec_minor_ida, GFP_KERNEL);
if (minor < 0) {
rc = minor;
goto err_put;
@@ -1692,7 +1691,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
err_devadd:
stdev_kill(stdev);
err_put:
- ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
put_device(&stdev->dev);
return rc;
}
@@ -1704,7 +1703,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
cdev_device_del(&stdev->cdev, &stdev->dev);
- ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
dev_info(&stdev->dev, "unregistered.\n");
stdev_kill(stdev);
put_device(&stdev->dev);
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 96e09fa40909..03b1309875ae 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1139,7 +1139,7 @@ static void cci_pmu_start(struct perf_event *event, int pmu_flags)
/*
* To handle interrupt latency, we always reprogram the period
- * regardlesss of PERF_EF_RELOAD.
+ * regardless of PERF_EF_RELOAD.
*/
if (pmu_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
@@ -1261,7 +1261,7 @@ static int validate_group(struct perf_event *event)
*/
.used_mask = mask,
};
- memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
+ bitmap_zero(mask, cci_pmu->num_cntrs);
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
@@ -1629,10 +1629,9 @@ static struct cci_pmu *cci_pmu_alloc(struct device *dev)
GFP_KERNEL);
if (!cci_pmu->hw_events.events)
return ERR_PTR(-ENOMEM);
- cci_pmu->hw_events.used_mask = devm_kcalloc(dev,
- BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)),
- sizeof(*cci_pmu->hw_events.used_mask),
- GFP_KERNEL);
+ cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev,
+ CCI_PMU_MAX_HW_CNTRS(model),
+ GFP_KERNEL);
if (!cci_pmu->hw_events.used_mask)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 40b352e8aa7f..728d13d8e98a 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1250,7 +1250,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
/* Get a convenient /sys/event_source/devices/ name */
- ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
+ ccn->dt.id = ida_alloc(&arm_ccn_pmu_ida, GFP_KERNEL);
if (ccn->dt.id == 0) {
name = "ccn";
} else {
@@ -1312,7 +1312,7 @@ error_pmu_register:
&ccn->dt.node);
error_set_affinity:
error_choose_name:
- ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+ ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
writel(0, ccn->dt.base + CCN_DT_PMCR);
@@ -1329,7 +1329,7 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
writel(0, ccn->dt.base + CCN_DT_PMCR);
perf_pmu_unregister(&ccn->dt.pmu);
- ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+ ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
}
static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index db670b265897..b65a7d9640e1 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -39,6 +39,24 @@
#include <asm/mmu.h>
#include <asm/sysreg.h>
+/*
+ * Cache if the event is allowed to trace Context information.
+ * This allows us to perform the check, i.e, perfmon_capable(),
+ * in the context of the event owner, once, during the event_init().
+ */
+#define SPE_PMU_HW_FLAGS_CX BIT(0)
+
+static void set_spe_event_has_cx(struct perf_event *event)
+{
+ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+ event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
+}
+
+static bool get_spe_event_has_cx(struct perf_event *event)
+{
+ return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
+}
+
#define ARM_SPE_BUF_PAD_BYTE 0
struct arm_spe_pmu_buf {
@@ -272,7 +290,7 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event)
if (!attr->exclude_kernel)
reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
- if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+ if (get_spe_event_has_cx(event))
reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
return reg;
@@ -709,10 +727,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
+ set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (!perfmon_capable() &&
(reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
- BIT(SYS_PMSCR_EL1_CX_SHIFT) |
BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
return -EACCES;
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index b1b2a55de77f..8e058e08fe81 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -611,7 +611,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
.dev = dev,
};
- pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+ pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL);
return pmu->id;
}
@@ -765,7 +765,7 @@ ddr_perf_err:
cpuhp_instance_err:
cpuhp_remove_multi_state(pmu->cpuhp_state);
cpuhp_state_err:
- ida_simple_remove(&ddr_ida, pmu->id);
+ ida_free(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
}
@@ -779,7 +779,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
- ida_simple_remove(&ddr_ida, pmu->id);
+ ida_free(&ddr_ida, pmu->id);
return 0;
}
diff --git a/drivers/perf/hisilicon/Kconfig b/drivers/perf/hisilicon/Kconfig
index 5546218b5598..171bfc1b6bc2 100644
--- a/drivers/perf/hisilicon/Kconfig
+++ b/drivers/perf/hisilicon/Kconfig
@@ -14,3 +14,13 @@ config HISI_PCIE_PMU
RCiEP devices.
Adds the PCIe PMU into perf events system for monitoring latency,
bandwidth etc.
+
+config HNS3_PMU
+ tristate "HNS3 PERF PMU"
+ depends on ARM64 || COMPILE_TEST
+ depends on PCI
+ help
+ Provide support for HNS3 performance monitoring unit (PMU) RCiEP
+ devices.
+ Adds the HNS3 PMU into perf events system for monitoring latency,
+ bandwidth etc.
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index 6be83517acaa..4d2c9abe3372 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o
obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
+obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 62299ab5a9be..50d0c0a2f1fe 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -516,21 +516,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
"hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
ddrc_pmu->index_id);
- ddrc_pmu->pmu = (struct pmu) {
- .name = name,
- .module = THIS_MODULE,
- .task_ctx_nr = perf_invalid_context,
- .event_init = hisi_uncore_pmu_event_init,
- .pmu_enable = hisi_uncore_pmu_enable,
- .pmu_disable = hisi_uncore_pmu_disable,
- .add = hisi_uncore_pmu_add,
- .del = hisi_uncore_pmu_del,
- .start = hisi_uncore_pmu_start,
- .stop = hisi_uncore_pmu_stop,
- .read = hisi_uncore_pmu_read,
- .attr_groups = ddrc_pmu->pmu_events.attr_groups,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- };
+ hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE);
ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 393513150106..13017b3412a5 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -519,21 +519,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
hha_pmu->sccl_id, hha_pmu->index_id);
- hha_pmu->pmu = (struct pmu) {
- .name = name,
- .module = THIS_MODULE,
- .task_ctx_nr = perf_invalid_context,
- .event_init = hisi_uncore_pmu_event_init,
- .pmu_enable = hisi_uncore_pmu_enable,
- .pmu_disable = hisi_uncore_pmu_disable,
- .add = hisi_uncore_pmu_add,
- .del = hisi_uncore_pmu_del,
- .start = hisi_uncore_pmu_start,
- .stop = hisi_uncore_pmu_stop,
- .read = hisi_uncore_pmu_read,
- .attr_groups = hha_pmu->pmu_events.attr_groups,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- };
+ hisi_pmu_init(&hha_pmu->pmu, name, hha_pmu->pmu_events.attr_groups, THIS_MODULE);
ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 560ab964c8b5..2995f3630d49 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -557,21 +557,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
*/
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
l3c_pmu->sccl_id, l3c_pmu->ccl_id);
- l3c_pmu->pmu = (struct pmu) {
- .name = name,
- .module = THIS_MODULE,
- .task_ctx_nr = perf_invalid_context,
- .event_init = hisi_uncore_pmu_event_init,
- .pmu_enable = hisi_uncore_pmu_enable,
- .pmu_disable = hisi_uncore_pmu_disable,
- .add = hisi_uncore_pmu_add,
- .del = hisi_uncore_pmu_del,
- .start = hisi_uncore_pmu_start,
- .stop = hisi_uncore_pmu_stop,
- .read = hisi_uncore_pmu_read,
- .attr_groups = l3c_pmu->pmu_events.attr_groups,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- };
+ hisi_pmu_init(&l3c_pmu->pmu, name, l3c_pmu->pmu_events.attr_groups, THIS_MODULE);
ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index a0ee84d97c41..47d3cc9b6eec 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -412,21 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
return ret;
}
- pa_pmu->pmu = (struct pmu) {
- .module = THIS_MODULE,
- .task_ctx_nr = perf_invalid_context,
- .event_init = hisi_uncore_pmu_event_init,
- .pmu_enable = hisi_uncore_pmu_enable,
- .pmu_disable = hisi_uncore_pmu_disable,
- .add = hisi_uncore_pmu_add,
- .del = hisi_uncore_pmu_del,
- .start = hisi_uncore_pmu_start,
- .stop = hisi_uncore_pmu_stop,
- .read = hisi_uncore_pmu_read,
- .attr_groups = pa_pmu->pmu_events.attr_groups,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- };
-
+ hisi_pmu_init(&pa_pmu->pmu, name, pa_pmu->pmu_events.attr_groups, THIS_MODULE);
ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
if (ret) {
dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 980b9ee6eb14..fbc8a93d5eac 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -531,4 +531,22 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
+void hisi_pmu_init(struct pmu *pmu, const char *name,
+ const struct attribute_group **attr_groups, struct module *module)
+{
+ pmu->name = name;
+ pmu->module = module;
+ pmu->task_ctx_nr = perf_invalid_context;
+ pmu->event_init = hisi_uncore_pmu_event_init;
+ pmu->pmu_enable = hisi_uncore_pmu_enable;
+ pmu->pmu_disable = hisi_uncore_pmu_disable;
+ pmu->add = hisi_uncore_pmu_add;
+ pmu->del = hisi_uncore_pmu_del;
+ pmu->start = hisi_uncore_pmu_start;
+ pmu->stop = hisi_uncore_pmu_stop;
+ pmu->read = hisi_uncore_pmu_read;
+ pmu->attr_groups = attr_groups;
+}
+EXPORT_SYMBOL_GPL(hisi_pmu_init);
+
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 96eeddad55ff..b59de33cd059 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -121,4 +121,6 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
+void hisi_pmu_init(struct pmu *pmu, const char *name,
+ const struct attribute_group **attr_groups, struct module *module);
#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index 6aedc303ff56..b9c79f17230c 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -445,20 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
return ret;
}
- sllc_pmu->pmu = (struct pmu) {
- .module = THIS_MODULE,
- .task_ctx_nr = perf_invalid_context,
- .event_init = hisi_uncore_pmu_event_init,
- .pmu_enable = hisi_uncore_pmu_enable,
- .pmu_disable = hisi_uncore_pmu_disable,
- .add = hisi_uncore_pmu_add,
- .del = hisi_uncore_pmu_del,
- .start = hisi_uncore_pmu_start,
- .stop = hisi_uncore_pmu_stop,
- .read = hisi_uncore_pmu_read,
- .attr_groups = sllc_pmu->pmu_events.attr_groups,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- };
+ hisi_pmu_init(&sllc_pmu->pmu, name, sllc_pmu->pmu_events.attr_groups, THIS_MODULE);
ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
new file mode 100644
index 000000000000..e0457d84af6b
--- /dev/null
+++ b/drivers/perf/hisilicon/hns3_pmu.c
@@ -0,0 +1,1671 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This driver adds support for HNS3 PMU iEP device. Related perf events are
+ * bandwidth, latency, packet rate, interrupt rate etc.
+ *
+ * Copyright (C) 2022 HiSilicon Limited
+ */
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-epf.h>
+#include <linux/perf_event.h>
+#include <linux/smp.h>
+
+/* registers offset address */
+#define HNS3_PMU_REG_GLOBAL_CTRL 0x0000
+#define HNS3_PMU_REG_CLOCK_FREQ 0x0020
+#define HNS3_PMU_REG_BDF 0x0fe0
+#define HNS3_PMU_REG_VERSION 0x0fe4
+#define HNS3_PMU_REG_DEVICE_ID 0x0fe8
+
+#define HNS3_PMU_REG_EVENT_OFFSET 0x1000
+#define HNS3_PMU_REG_EVENT_SIZE 0x1000
+#define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00
+#define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04
+#define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08
+#define HNS3_PMU_REG_EVENT_INTR_MASK 0x0c
+#define HNS3_PMU_REG_EVENT_COUNTER 0x10
+#define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18
+#define HNS3_PMU_REG_EVENT_QID_CTRL 0x28
+#define HNS3_PMU_REG_EVENT_QID_PARA 0x2c
+
+#define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0)
+#define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1)
+#define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2)
+#define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3)
+#define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4)
+#define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5)
+
+#define HNS3_PMU_FILTER_ALL_TC 0xf
+#define HNS3_PMU_FILTER_ALL_QUEUE 0xffff
+
+#define HNS3_PMU_CTRL_SUBEVENT_S 4
+#define HNS3_PMU_CTRL_FILTER_MODE_S 24
+
+#define HNS3_PMU_GLOBAL_START BIT(0)
+
+#define HNS3_PMU_EVENT_STATUS_RESET BIT(11)
+#define HNS3_PMU_EVENT_EN BIT(12)
+#define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15)
+
+#define HNS3_PMU_QID_PARA_FUNC_S 0
+#define HNS3_PMU_QID_PARA_QUEUE_S 16
+
+#define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0)
+#define HNS3_PMU_QID_CTRL_DONE BIT(1)
+#define HNS3_PMU_QID_CTRL_MISS BIT(2)
+
+#define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1)
+
+#define HNS3_PMU_MAX_HW_EVENTS 8
+
+/*
+ * Each hardware event contains two registers (counter and ext_counter) for
+ * bandwidth, packet rate, latency and interrupt rate. These two registers will
+ * be triggered to run at the same when a hardware event is enabled. The meaning
+ * of counter and ext_counter of different event type are different, their
+ * meaning show as follow:
+ *
+ * +----------------+------------------+---------------+
+ * | event type | counter | ext_counter |
+ * +----------------+------------------+---------------+
+ * | bandwidth | byte number | cycle number |
+ * +----------------+------------------+---------------+
+ * | packet rate | packet number | cycle number |
+ * +----------------+------------------+---------------+
+ * | latency | cycle number | packet number |
+ * +----------------+------------------+---------------+
+ * | interrupt rate | interrupt number | cycle number |
+ * +----------------+------------------+---------------+
+ *
+ * The cycle number indicates increment of counter of hardware timer, the
+ * frequency of hardware timer can be read from hw_clk_freq file.
+ *
+ * Performance of each hardware event is calculated by: counter / ext_counter.
+ *
+ * Since processing of data is preferred to be done in userspace, we expose
+ * ext_counter as a separate event for userspace and use bit 16 to indicate it.
+ * For example, event 0x00001 and 0x10001 are actually one event for hardware
+ * because bit 0-15 are same. If the bit 16 of one event is 0 means to read
+ * counter register, otherwise means to read ext_counter register.
+ */
+/* bandwidth events */
+#define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM 0x00001
+#define HNS3_PMU_EVT_BW_SSU_EGU_TIME 0x10001
+#define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM 0x00002
+#define HNS3_PMU_EVT_BW_SSU_RPU_TIME 0x10002
+#define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM 0x00003
+#define HNS3_PMU_EVT_BW_SSU_ROCE_TIME 0x10003
+#define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM 0x00004
+#define HNS3_PMU_EVT_BW_ROCE_SSU_TIME 0x10004
+#define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM 0x00005
+#define HNS3_PMU_EVT_BW_TPU_SSU_TIME 0x10005
+#define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM 0x00006
+#define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME 0x10006
+#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM 0x00008
+#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME 0x10008
+#define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM 0x00009
+#define HNS3_PMU_EVT_BW_WR_FBD_TIME 0x10009
+#define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM 0x0000a
+#define HNS3_PMU_EVT_BW_WR_EBD_TIME 0x1000a
+#define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM 0x0000b
+#define HNS3_PMU_EVT_BW_RD_FBD_TIME 0x1000b
+#define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM 0x0000c
+#define HNS3_PMU_EVT_BW_RD_EBD_TIME 0x1000c
+#define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM 0x0000d
+#define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME 0x1000d
+#define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM 0x0000e
+#define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME 0x1000e
+#define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM 0x0000f
+#define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME 0x1000f
+#define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM 0x00010
+#define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME 0x10010
+
+/* packet rate events */
+#define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM 0x00100
+#define HNS3_PMU_EVT_PPS_IGU_SSU_TIME 0x10100
+#define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM 0x00101
+#define HNS3_PMU_EVT_PPS_SSU_EGU_TIME 0x10101
+#define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM 0x00102
+#define HNS3_PMU_EVT_PPS_SSU_RPU_TIME 0x10102
+#define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM 0x00103
+#define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME 0x10103
+#define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM 0x00104
+#define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME 0x10104
+#define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM 0x00105
+#define HNS3_PMU_EVT_PPS_TPU_SSU_TIME 0x10105
+#define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM 0x00106
+#define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME 0x10106
+#define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM 0x00107
+#define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME 0x10107
+#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM 0x00108
+#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME 0x10108
+#define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM 0x00109
+#define HNS3_PMU_EVT_PPS_WR_FBD_TIME 0x10109
+#define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM 0x0010a
+#define HNS3_PMU_EVT_PPS_WR_EBD_TIME 0x1010a
+#define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM 0x0010b
+#define HNS3_PMU_EVT_PPS_RD_FBD_TIME 0x1010b
+#define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM 0x0010c
+#define HNS3_PMU_EVT_PPS_RD_EBD_TIME 0x1010c
+#define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM 0x0010d
+#define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME 0x1010d
+#define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM 0x0010e
+#define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME 0x1010e
+#define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM 0x0010f
+#define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME 0x1010f
+#define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM 0x00110
+#define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME 0x10110
+#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM 0x00111
+#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME 0x10111
+#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM 0x00112
+#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME 0x10112
+
+/* latency events */
+#define HNS3_PMU_EVT_DLY_TX_PUSH_TIME 0x00202
+#define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM 0x10202
+#define HNS3_PMU_EVT_DLY_TX_TIME 0x00204
+#define HNS3_PMU_EVT_DLY_TX_PACKET_NUM 0x10204
+#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME 0x00206
+#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM 0x10206
+#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME 0x00207
+#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM 0x10207
+#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME 0x00208
+#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM 0x10208
+#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME 0x00209
+#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM 0x10209
+#define HNS3_PMU_EVT_DLY_RPU_TIME 0x0020e
+#define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM 0x1020e
+#define HNS3_PMU_EVT_DLY_TPU_TIME 0x0020f
+#define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM 0x1020f
+#define HNS3_PMU_EVT_DLY_RPE_TIME 0x00210
+#define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM 0x10210
+#define HNS3_PMU_EVT_DLY_TPE_TIME 0x00211
+#define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM 0x10211
+#define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME 0x00212
+#define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM 0x10212
+#define HNS3_PMU_EVT_DLY_WR_FBD_TIME 0x00213
+#define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM 0x10213
+#define HNS3_PMU_EVT_DLY_WR_EBD_TIME 0x00214
+#define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM 0x10214
+#define HNS3_PMU_EVT_DLY_RD_FBD_TIME 0x00215
+#define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM 0x10215
+#define HNS3_PMU_EVT_DLY_RD_EBD_TIME 0x00216
+#define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM 0x10216
+#define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME 0x00217
+#define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM 0x10217
+#define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME 0x00218
+#define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM 0x10218
+#define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME 0x00219
+#define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM 0x10219
+#define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME 0x0021a
+#define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM 0x1021a
+#define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME 0x0021c
+#define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM 0x1021c
+
+/* interrupt rate events */
+#define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM 0x00300
+#define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME 0x10300
+
+/* filter mode supported by each bandwidth event */
+#define HNS3_PMU_FILTER_BW_SSU_EGU 0x07
+#define HNS3_PMU_FILTER_BW_SSU_RPU 0x1f
+#define HNS3_PMU_FILTER_BW_SSU_ROCE 0x0f
+#define HNS3_PMU_FILTER_BW_ROCE_SSU 0x0f
+#define HNS3_PMU_FILTER_BW_TPU_SSU 0x1f
+#define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11
+#define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x11
+#define HNS3_PMU_FILTER_BW_WR_FBD 0x1b
+#define HNS3_PMU_FILTER_BW_WR_EBD 0x11
+#define HNS3_PMU_FILTER_BW_RD_FBD 0x01
+#define HNS3_PMU_FILTER_BW_RD_EBD 0x1b
+#define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01
+#define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01
+#define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01
+#define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01
+
+/* filter mode supported by each packet rate event */
+#define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07
+#define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07
+#define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1f
+#define HNS3_PMU_FILTER_PPS_SSU_ROCE 0x0f
+#define HNS3_PMU_FILTER_PPS_ROCE_SSU 0x0f
+#define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1f
+#define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11
+#define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1f
+#define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x11
+#define HNS3_PMU_FILTER_PPS_WR_FBD 0x1b
+#define HNS3_PMU_FILTER_PPS_WR_EBD 0x11
+#define HNS3_PMU_FILTER_PPS_RD_FBD 0x01
+#define HNS3_PMU_FILTER_PPS_RD_EBD 0x1b
+#define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01
+#define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01
+#define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01
+#define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01
+#define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x01
+#define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x01
+
+/* filter mode supported by each latency event */
+#define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01
+#define HNS3_PMU_FILTER_DLY_TX 0x01
+#define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07
+#define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE 0x07
+#define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07
+#define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE 0x07
+#define HNS3_PMU_FILTER_DLY_RPU 0x11
+#define HNS3_PMU_FILTER_DLY_TPU 0x1f
+#define HNS3_PMU_FILTER_DLY_RPE 0x01
+#define HNS3_PMU_FILTER_DLY_TPE 0x0b
+#define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1b
+#define HNS3_PMU_FILTER_DLY_WR_FBD 0x1b
+#define HNS3_PMU_FILTER_DLY_WR_EBD 0x11
+#define HNS3_PMU_FILTER_DLY_RD_FBD 0x01
+#define HNS3_PMU_FILTER_DLY_RD_EBD 0x1b
+#define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01
+#define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01
+#define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01
+#define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01
+#define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01
+
+/* filter mode supported by each interrupt rate event */
+#define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01
+
+enum hns3_pmu_hw_filter_mode {
+ HNS3_PMU_HW_FILTER_GLOBAL,
+ HNS3_PMU_HW_FILTER_PORT,
+ HNS3_PMU_HW_FILTER_PORT_TC,
+ HNS3_PMU_HW_FILTER_FUNC,
+ HNS3_PMU_HW_FILTER_FUNC_QUEUE,
+ HNS3_PMU_HW_FILTER_FUNC_INTR,
+};
+
+struct hns3_pmu_event_attr {
+ u32 event;
+ u16 filter_support;
+};
+
+struct hns3_pmu {
+ struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS];
+ struct hlist_node node;
+ struct pci_dev *pdev;
+ struct pmu pmu;
+ void __iomem *base;
+ int irq;
+ int on_cpu;
+ u32 identifier;
+ u32 hw_clk_freq; /* hardware clock frequency of PMU */
+ /* maximum and minimum bdf allowed by PMU */
+ u16 bdf_min;
+ u16 bdf_max;
+};
+
+#define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu))
+
+#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff)
+
+#define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff)
+#define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07))
+#define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func))
+
+#define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \
+ static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \
+ { \
+ return FIELD_GET(GENMASK_ULL(_end, _start), \
+ event->attr._config); \
+ }
+
+HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7);
+HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15);
+HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16);
+HNS3_PMU_FILTER_ATTR(port, config1, 0, 3);
+HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7);
+HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23);
+HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39);
+HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51);
+HNS3_PMU_FILTER_ATTR(global, config1, 52, 52);
+
+#define HNS3_BW_EVT_BYTE_NUM(_name) (&(struct hns3_pmu_event_attr) {\
+ HNS3_PMU_EVT_BW_##_name##_BYTE_NUM, \
+ HNS3_PMU_FILTER_BW_##_name})
+#define HNS3_BW_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
+ HNS3_PMU_EVT_BW_##_name##_TIME, \
+ HNS3_PMU_FILTER_BW_##_name})
+#define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
+ HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM, \
+ HNS3_PMU_FILTER_PPS_##_name})
+#define HNS3_PPS_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
+ HNS3_PMU_EVT_PPS_##_name##_TIME, \
+ HNS3_PMU_FILTER_PPS_##_name})
+#define HNS3_DLY_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
+ HNS3_PMU_EVT_DLY_##_name##_TIME, \
+ HNS3_PMU_FILTER_DLY_##_name})
+#define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
+ HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM, \
+ HNS3_PMU_FILTER_DLY_##_name})
+#define HNS3_INTR_EVT_INTR_NUM(_name) (&(struct hns3_pmu_event_attr) {\
+ HNS3_PMU_EVT_PPS_##_name##_INTR_NUM, \
+ HNS3_PMU_FILTER_INTR_##_name})
+#define HNS3_INTR_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
+ HNS3_PMU_EVT_PPS_##_name##_TIME, \
+ HNS3_PMU_FILTER_INTR_##_name})
+
+static ssize_t hns3_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
+static ssize_t hns3_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hns3_pmu_event_attr *event;
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ event = eattr->var;
+
+ return sysfs_emit(buf, "config=0x%x\n", event->event);
+}
+
+static ssize_t hns3_pmu_filter_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hns3_pmu_event_attr *event;
+ struct dev_ext_attribute *eattr;
+ int len;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ event = eattr->var;
+
+ len = sysfs_emit_at(buf, 0, "filter mode supported: ");
+ if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)
+ len += sysfs_emit_at(buf, len, "global ");
+ if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)
+ len += sysfs_emit_at(buf, len, "port ");
+ if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)
+ len += sysfs_emit_at(buf, len, "port-tc ");
+ if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)
+ len += sysfs_emit_at(buf, len, "func ");
+ if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)
+ len += sysfs_emit_at(buf, len, "func-queue ");
+ if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)
+ len += sysfs_emit_at(buf, len, "func-intr ");
+
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+#define HNS3_PMU_ATTR(_name, _func, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, 0444, _func, NULL), (void *)_config } \
+ })[0].attr.attr)
+
+#define HNS3_PMU_FORMAT_ATTR(_name, _format) \
+ HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format)
+#define HNS3_PMU_EVENT_ATTR(_name, _event) \
+ HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event)
+#define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \
+ HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event)
+
+#define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \
+ HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
+ HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
+#define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \
+ HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
+ HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
+#define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \
+ HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
+ HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
+#define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \
+ HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
+ HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
+
+#define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \
+ HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
+ HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
+#define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \
+ HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
+ HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
+#define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \
+ HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
+ HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
+#define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \
+ HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
+ HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
+
+static u8 hns3_pmu_hw_filter_modes[] = {
+ HNS3_PMU_HW_FILTER_GLOBAL,
+ HNS3_PMU_HW_FILTER_PORT,
+ HNS3_PMU_HW_FILTER_PORT_TC,
+ HNS3_PMU_HW_FILTER_FUNC,
+ HNS3_PMU_HW_FILTER_FUNC_QUEUE,
+ HNS3_PMU_HW_FILTER_FUNC_INTR,
+};
+
+#define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \
+ ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)])
+
+static ssize_t identifier_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier);
+}
+static DEVICE_ATTR_RO(identifier);
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu);
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+ u16 bdf = hns3_pmu->bdf_min;
+
+ return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
+ PCI_SLOT(bdf), PCI_FUNC(bdf));
+}
+static DEVICE_ATTR_RO(bdf_min);
+
+static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+ u16 bdf = hns3_pmu->bdf_max;
+
+ return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
+ PCI_SLOT(bdf), PCI_FUNC(bdf));
+}
+static DEVICE_ATTR_RO(bdf_max);
+
+static ssize_t hw_clk_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq);
+}
+static DEVICE_ATTR_RO(hw_clk_freq);
+
+static struct attribute *hns3_pmu_events_attr[] = {
+ /* bandwidth events */
+ HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU),
+ HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU),
+ HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE),
+ HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU),
+ HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU),
+ HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
+ HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
+ HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD),
+ HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD),
+ HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD),
+ HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD),
+ HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0),
+ HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1),
+ HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0),
+ HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1),
+
+ /* packet rate events */
+ HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU),
+ HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU),
+ HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU),
+ HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE),
+ HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU),
+ HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU),
+ HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
+ HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
+ HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
+ HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD),
+ HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD),
+ HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD),
+ HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD),
+ HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0),
+ HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1),
+ HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0),
+ HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1),
+ HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
+ HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
+
+ /* latency events */
+ HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH),
+ HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX),
+ HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
+ HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
+ HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
+ HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
+ HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU),
+ HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU),
+ HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE),
+ HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE),
+ HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH),
+ HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD),
+ HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD),
+ HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD),
+ HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD),
+ HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0),
+ HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1),
+ HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0),
+ HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1),
+ HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE),
+
+ /* interrupt rate events */
+ HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC),
+
+ NULL
+};
+
+static struct attribute *hns3_pmu_filter_mode_attr[] = {
+ /* bandwidth events */
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0),
+ HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1),
+
+ /* packet rate events */
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
+ HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
+
+ /* latency events */
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1),
+ HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE),
+
+ /* interrupt rate events */
+ HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC),
+
+ NULL
+};
+
+static struct attribute_group hns3_pmu_events_group = {
+ .name = "events",
+ .attrs = hns3_pmu_events_attr,
+};
+
+static struct attribute_group hns3_pmu_filter_mode_group = {
+ .name = "filtermode",
+ .attrs = hns3_pmu_filter_mode_attr,
+};
+
+static struct attribute *hns3_pmu_format_attr[] = {
+ HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"),
+ HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"),
+ HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"),
+ HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"),
+ HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"),
+ HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"),
+ HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"),
+ HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"),
+ HNS3_PMU_FORMAT_ATTR(global, "config1:52"),
+ NULL
+};
+
+static struct attribute_group hns3_pmu_format_group = {
+ .name = "format",
+ .attrs = hns3_pmu_format_attr,
+};
+
+static struct attribute *hns3_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static struct attribute_group hns3_pmu_cpumask_attr_group = {
+ .attrs = hns3_pmu_cpumask_attrs,
+};
+
+static struct attribute *hns3_pmu_identifier_attrs[] = {
+ &dev_attr_identifier.attr,
+ NULL
+};
+
+static struct attribute_group hns3_pmu_identifier_attr_group = {
+ .attrs = hns3_pmu_identifier_attrs,
+};
+
+static struct attribute *hns3_pmu_bdf_range_attrs[] = {
+ &dev_attr_bdf_min.attr,
+ &dev_attr_bdf_max.attr,
+ NULL
+};
+
+static struct attribute_group hns3_pmu_bdf_range_attr_group = {
+ .attrs = hns3_pmu_bdf_range_attrs,
+};
+
+static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = {
+ &dev_attr_hw_clk_freq.attr,
+ NULL
+};
+
+static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = {
+ .attrs = hns3_pmu_hw_clk_freq_attrs,
+};
+
+static const struct attribute_group *hns3_pmu_attr_groups[] = {
+ &hns3_pmu_events_group,
+ &hns3_pmu_filter_mode_group,
+ &hns3_pmu_format_group,
+ &hns3_pmu_cpumask_attr_group,
+ &hns3_pmu_identifier_attr_group,
+ &hns3_pmu_bdf_range_attr_group,
+ &hns3_pmu_hw_clk_freq_attr_group,
+ NULL
+};
+
+static u32 hns3_pmu_get_event(struct perf_event *event)
+{
+ return hns3_pmu_get_ext_counter_used(event) << 16 |
+ hns3_pmu_get_event_type(event) << 8 |
+ hns3_pmu_get_subevent(event);
+}
+
+static u32 hns3_pmu_get_real_event(struct perf_event *event)
+{
+ return hns3_pmu_get_event_type(event) << 8 |
+ hns3_pmu_get_subevent(event);
+}
+
+static u32 hns3_pmu_get_offset(u32 offset, u32 idx)
+{
+ return offset + HNS3_PMU_REG_EVENT_OFFSET +
+ HNS3_PMU_REG_EVENT_SIZE * idx;
+}
+
+static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
+{
+ u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+ return readl(hns3_pmu->base + offset);
+}
+
+static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
+ u32 val)
+{
+ u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+ writel(val, hns3_pmu->base + offset);
+}
+
+static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
+{
+ u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+ return readq(hns3_pmu->base + offset);
+}
+
+static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
+ u64 val)
+{
+ u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+ writeq(val, hns3_pmu->base + offset);
+}
+
+static bool hns3_pmu_cmp_event(struct perf_event *target,
+ struct perf_event *event)
+{
+ return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event);
+}
+
+static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu,
+ struct perf_event *event)
+{
+ struct perf_event *sibling;
+ int hw_event_used = 0;
+ int idx;
+
+ for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
+ sibling = hns3_pmu->hw_events[idx];
+ if (!sibling)
+ continue;
+
+ hw_event_used++;
+
+ if (!hns3_pmu_cmp_event(sibling, event))
+ continue;
+
+ /* Related events is used in group */
+ if (sibling->group_leader == event->group_leader)
+ return idx;
+ }
+
+ /* No related event and all hardware events are used up */
+ if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS)
+ return -EBUSY;
+
+ /* No related event and there is extra hardware events can be use */
+ return -ENOENT;
+}
+
+static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu)
+{
+ int idx;
+
+ for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
+ if (!hns3_pmu->hw_events[idx])
+ return idx;
+ }
+
+ return -EBUSY;
+}
+
+static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf)
+{
+ struct pci_dev *pdev;
+
+ if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) {
+ pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf);
+ return false;
+ }
+
+ pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus),
+ PCI_BUS_NUM(bdf),
+ GET_PCI_DEVFN(bdf));
+ if (!pdev) {
+ pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf);
+ return false;
+ }
+
+ pci_dev_put(pdev);
+ return true;
+}
+
+static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
+ u16 queue)
+{
+ u32 val;
+
+ val = GET_PCI_DEVFN(bdf);
+ val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S;
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val);
+}
+
+static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx)
+{
+ bool queue_id_valid = false;
+ u32 reg_qid_ctrl, val;
+ int err;
+
+ /* enable queue id request */
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx,
+ HNS3_PMU_QID_CTRL_REQ_ENABLE);
+
+ reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx);
+ err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val,
+ val & HNS3_PMU_QID_CTRL_DONE, 1, 1000);
+ if (err == -ETIMEDOUT) {
+ pci_err(hns3_pmu->pdev, "QID request timeout!\n");
+ goto out;
+ }
+
+ queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS);
+
+out:
+ /* disable qid request and clear status */
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0);
+
+ return queue_id_valid;
+}
+
+static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
+ u16 queue)
+{
+ hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue);
+
+ return hns3_pmu_qid_req_start(hns3_pmu, idx);
+}
+
+static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event)
+{
+ struct hns3_pmu_event_attr *pmu_event;
+ struct dev_ext_attribute *eattr;
+ struct device_attribute *dattr;
+ struct attribute *attr;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) {
+ attr = hns3_pmu_events_attr[i];
+ dattr = container_of(attr, struct device_attribute, attr);
+ eattr = container_of(dattr, struct dev_ext_attribute, attr);
+ pmu_event = eattr->var;
+
+ if (event == pmu_event->event)
+ return pmu_event;
+ }
+
+ return NULL;
+}
+
+static int hns3_pmu_set_func_mode(struct perf_event *event,
+ struct hns3_pmu *hns3_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u16 bdf = hns3_pmu_get_bdf(event);
+
+ if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
+ return -ENOENT;
+
+ HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC);
+
+ return 0;
+}
+
+static int hns3_pmu_set_func_queue_mode(struct perf_event *event,
+ struct hns3_pmu *hns3_pmu)
+{
+ u16 queue_id = hns3_pmu_get_queue(event);
+ struct hw_perf_event *hwc = &event->hw;
+ u16 bdf = hns3_pmu_get_bdf(event);
+
+ if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
+ return -ENOENT;
+
+ if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) {
+ pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id);
+ return -ENOENT;
+ }
+
+ HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE);
+
+ return 0;
+}
+
+static bool
+hns3_pmu_is_enabled_global_mode(struct perf_event *event,
+ struct hns3_pmu_event_attr *pmu_event)
+{
+ u8 global = hns3_pmu_get_global(event);
+
+ if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL))
+ return false;
+
+ return global;
+}
+
+static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event,
+ struct hns3_pmu_event_attr *pmu_event)
+{
+ u16 queue_id = hns3_pmu_get_queue(event);
+ u16 bdf = hns3_pmu_get_bdf(event);
+
+ if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC))
+ return false;
+ else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE)
+ return false;
+
+ return bdf;
+}
+
+static bool
+hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event,
+ struct hns3_pmu_event_attr *pmu_event)
+{
+ u16 queue_id = hns3_pmu_get_queue(event);
+ u16 bdf = hns3_pmu_get_bdf(event);
+
+ if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE))
+ return false;
+ else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE)
+ return false;
+
+ return bdf;
+}
+
+static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event,
+ struct hns3_pmu_event_attr *pmu_event)
+{
+ u8 tc_id = hns3_pmu_get_tc(event);
+
+ if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT))
+ return false;
+
+ return tc_id == HNS3_PMU_FILTER_ALL_TC;
+}
+
+static bool
+hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event,
+ struct hns3_pmu_event_attr *pmu_event)
+{
+ u8 tc_id = hns3_pmu_get_tc(event);
+
+ if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC))
+ return false;
+
+ return tc_id != HNS3_PMU_FILTER_ALL_TC;
+}
+
+static bool
+hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event,
+ struct hns3_pmu *hns3_pmu,
+ struct hns3_pmu_event_attr *pmu_event)
+{
+ u16 bdf = hns3_pmu_get_bdf(event);
+
+ if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR))
+ return false;
+
+ return hns3_pmu_valid_bdf(hns3_pmu, bdf);
+}
+
+static int hns3_pmu_select_filter_mode(struct perf_event *event,
+ struct hns3_pmu *hns3_pmu)
+{
+ u32 event_id = hns3_pmu_get_event(event);
+ struct hw_perf_event *hwc = &event->hw;
+ struct hns3_pmu_event_attr *pmu_event;
+
+ pmu_event = hns3_pmu_get_pmu_event(event_id);
+ if (!pmu_event) {
+ pci_err(hns3_pmu->pdev, "Invalid pmu event\n");
+ return -ENOENT;
+ }
+
+ if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) {
+ HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL);
+ return 0;
+ }
+
+ if (hns3_pmu_is_enabled_func_mode(event, pmu_event))
+ return hns3_pmu_set_func_mode(event, hns3_pmu);
+
+ if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event))
+ return hns3_pmu_set_func_queue_mode(event, hns3_pmu);
+
+ if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) {
+ HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT);
+ return 0;
+ }
+
+ if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) {
+ HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC);
+ return 0;
+ }
+
+ if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) {
+ HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static bool hns3_pmu_validate_event_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS];
+ int counters = 1;
+ int num;
+
+ event_group[0] = leader;
+ if (!is_software_event(leader)) {
+ if (leader->pmu != event->pmu)
+ return false;
+
+ if (leader != event && !hns3_pmu_cmp_event(leader, event))
+ event_group[counters++] = event;
+ }
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (is_software_event(sibling))
+ continue;
+
+ if (sibling->pmu != event->pmu)
+ return false;
+
+ for (num = 0; num < counters; num++) {
+ if (hns3_pmu_cmp_event(event_group[num], sibling))
+ break;
+ }
+
+ if (num == counters)
+ event_group[counters++] = sibling;
+ }
+
+ return counters <= HNS3_PMU_MAX_HW_EVENTS;
+}
+
+static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u16 intr_id = hns3_pmu_get_intr(event);
+ u8 port_id = hns3_pmu_get_port(event);
+ u16 bdf = hns3_pmu_get_bdf(event);
+ u8 tc_id = hns3_pmu_get_tc(event);
+ u8 filter_mode;
+
+ filter_mode = *(u8 *)hwc->addr_filters;
+ switch (filter_mode) {
+ case HNS3_PMU_HW_FILTER_PORT:
+ return FILTER_CONDITION_PORT(port_id);
+ case HNS3_PMU_HW_FILTER_PORT_TC:
+ return FILTER_CONDITION_PORT_TC(port_id, tc_id);
+ case HNS3_PMU_HW_FILTER_FUNC:
+ case HNS3_PMU_HW_FILTER_FUNC_QUEUE:
+ return GET_PCI_DEVFN(bdf);
+ case HNS3_PMU_HW_FILTER_FUNC_INTR:
+ return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void hns3_pmu_config_filter(struct perf_event *event)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+ u8 event_type = hns3_pmu_get_event_type(event);
+ u8 subevent_id = hns3_pmu_get_subevent(event);
+ u16 queue_id = hns3_pmu_get_queue(event);
+ struct hw_perf_event *hwc = &event->hw;
+ u8 filter_mode = *(u8 *)hwc->addr_filters;
+ u16 bdf = hns3_pmu_get_bdf(event);
+ u32 idx = hwc->idx;
+ u32 val;
+
+ val = event_type;
+ val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S;
+ val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S;
+ val |= HNS3_PMU_EVENT_OVERFLOW_RESTART;
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+
+ val = hns3_pmu_get_filter_condition(event);
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val);
+
+ if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE)
+ hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id);
+}
+
+static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+ u32 val;
+
+ val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+ val |= HNS3_PMU_EVENT_EN;
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+}
+
+static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+ u32 val;
+
+ val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+ val &= ~HNS3_PMU_EVENT_EN;
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+}
+
+static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+ u32 val;
+
+ val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
+ val &= ~HNS3_PMU_INTR_MASK_OVERFLOW;
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
+}
+
+static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+ u32 val;
+
+ val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
+ val |= HNS3_PMU_INTR_MASK_OVERFLOW;
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
+}
+
+static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx)
+{
+ u32 val;
+
+ val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+ val |= HNS3_PMU_EVENT_STATUS_RESET;
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+
+ val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+ val &= ~HNS3_PMU_EVENT_STATUS_RESET;
+ hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+}
+
+static u64 hns3_pmu_read_counter(struct perf_event *event)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+
+ return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx);
+}
+
+static void hns3_pmu_write_counter(struct perf_event *event, u64 value)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+ u32 idx = event->hw.idx;
+
+ hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value);
+ hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value);
+}
+
+static void hns3_pmu_init_counter(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ local64_set(&hwc->prev_count, 0);
+ hns3_pmu_write_counter(event, 0);
+}
+
+static int hns3_pmu_event_init(struct perf_event *event)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int ret;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* Sampling is not supported */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ event->cpu = hns3_pmu->on_cpu;
+
+ idx = hns3_pmu_get_event_idx(hns3_pmu);
+ if (idx < 0) {
+ pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n",
+ HNS3_PMU_MAX_HW_EVENTS);
+ return -EBUSY;
+ }
+
+ hwc->idx = idx;
+
+ ret = hns3_pmu_select_filter_mode(event, hns3_pmu);
+ if (ret) {
+ pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret);
+ return ret;
+ }
+
+ if (!hns3_pmu_validate_event_group(event)) {
+ pci_err(hns3_pmu->pdev, "Invalid event group.\n");
+ return -EINVAL;
+ }
+
+ if (hns3_pmu_get_ext_counter_used(event))
+ hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER;
+ else
+ hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER;
+
+ return 0;
+}
+
+static void hns3_pmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 new_cnt, prev_cnt, delta;
+
+ do {
+ prev_cnt = local64_read(&hwc->prev_count);
+ new_cnt = hns3_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) !=
+ prev_cnt);
+
+ delta = new_cnt - prev_cnt;
+ local64_add(delta, &event->count);
+}
+
+static void hns3_pmu_start(struct perf_event *event, int flags)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+ hwc->state = 0;
+
+ hns3_pmu_config_filter(event);
+ hns3_pmu_init_counter(event);
+ hns3_pmu_enable_intr(hns3_pmu, hwc);
+ hns3_pmu_enable_counter(hns3_pmu, hwc);
+
+ perf_event_update_userpage(event);
+}
+
+static void hns3_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hns3_pmu_disable_counter(hns3_pmu, hwc);
+ hns3_pmu_disable_intr(hns3_pmu, hwc);
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (hwc->state & PERF_HES_UPTODATE)
+ return;
+
+ /* Read hardware counter and update the perf counter statistics */
+ hns3_pmu_read(event);
+ hwc->state |= PERF_HES_UPTODATE;
+}
+
+static int hns3_pmu_add(struct perf_event *event, int flags)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ /* Check all working events to find a related event. */
+ idx = hns3_pmu_find_related_event_idx(hns3_pmu, event);
+ if (idx < 0 && idx != -ENOENT)
+ return idx;
+
+ /* Current event shares an enabled hardware event with related event */
+ if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) {
+ hwc->idx = idx;
+ goto start_count;
+ }
+
+ idx = hns3_pmu_get_event_idx(hns3_pmu);
+ if (idx < 0)
+ return idx;
+
+ hwc->idx = idx;
+ hns3_pmu->hw_events[idx] = event;
+
+start_count:
+ if (flags & PERF_EF_START)
+ hns3_pmu_start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+static void hns3_pmu_del(struct perf_event *event, int flags)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hns3_pmu_stop(event, PERF_EF_UPDATE);
+ hns3_pmu->hw_events[hwc->idx] = NULL;
+ perf_event_update_userpage(event);
+}
+
+static void hns3_pmu_enable(struct pmu *pmu)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
+ u32 val;
+
+ val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+ val |= HNS3_PMU_GLOBAL_START;
+ writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+}
+
+static void hns3_pmu_disable(struct pmu *pmu)
+{
+ struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
+ u32 val;
+
+ val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+ val &= ~HNS3_PMU_GLOBAL_START;
+ writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+}
+
+static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+{
+ u16 device_id;
+ char *name;
+ u32 val;
+
+ hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2];
+ if (!hns3_pmu->base) {
+ pci_err(pdev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ);
+
+ val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF);
+ hns3_pmu->bdf_min = val & 0xffff;
+ hns3_pmu->bdf_max = val >> 16;
+
+ val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID);
+ device_id = val & 0xffff;
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id);
+ if (!name)
+ return -ENOMEM;
+
+ hns3_pmu->pdev = pdev;
+ hns3_pmu->on_cpu = -1;
+ hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION);
+ hns3_pmu->pmu = (struct pmu) {
+ .name = name,
+ .module = THIS_MODULE,
+ .event_init = hns3_pmu_event_init,
+ .pmu_enable = hns3_pmu_enable,
+ .pmu_disable = hns3_pmu_disable,
+ .add = hns3_pmu_add,
+ .del = hns3_pmu_del,
+ .start = hns3_pmu_start,
+ .stop = hns3_pmu_stop,
+ .read = hns3_pmu_read,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = hns3_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ return 0;
+}
+
+static irqreturn_t hns3_pmu_irq(int irq, void *data)
+{
+ struct hns3_pmu *hns3_pmu = data;
+ u32 intr_status, idx;
+
+ for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
+ intr_status = hns3_pmu_readl(hns3_pmu,
+ HNS3_PMU_REG_EVENT_INTR_STATUS,
+ idx);
+
+ /*
+ * As each counter will restart from 0 when it is overflowed,
+ * extra processing is no need, just clear interrupt status.
+ */
+ if (intr_status)
+ hns3_pmu_clear_intr_status(hns3_pmu, idx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hns3_pmu *hns3_pmu;
+
+ hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
+ if (!hns3_pmu)
+ return -ENODEV;
+
+ if (hns3_pmu->on_cpu == -1) {
+ hns3_pmu->on_cpu = cpu;
+ irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu));
+ }
+
+ return 0;
+}
+
+static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hns3_pmu *hns3_pmu;
+ unsigned int target;
+
+ hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
+ if (!hns3_pmu)
+ return -ENODEV;
+
+ /* Nothing to do if this CPU doesn't own the PMU */
+ if (hns3_pmu->on_cpu != cpu)
+ return 0;
+
+ /* Choose a new CPU from all online cpus */
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target);
+ hns3_pmu->on_cpu = target;
+ irq_set_affinity(hns3_pmu->irq, cpumask_of(target));
+
+ return 0;
+}
+
+static void hns3_pmu_free_irq(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ pci_free_irq_vectors(pdev);
+}
+
+static int hns3_pmu_irq_register(struct pci_dev *pdev,
+ struct hns3_pmu *hns3_pmu)
+{
+ int irq, ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
+ if (ret) {
+ pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
+ return ret;
+ }
+
+ irq = pci_irq_vector(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0,
+ hns3_pmu->pmu.name, hns3_pmu);
+ if (ret) {
+ pci_err(pdev, "failed to register irq, ret = %d.\n", ret);
+ return ret;
+ }
+
+ hns3_pmu->irq = irq;
+
+ return 0;
+}
+
+static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+{
+ int ret;
+
+ ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu);
+ if (ret)
+ return ret;
+
+ ret = hns3_pmu_irq_register(pdev, hns3_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+ &hns3_pmu->node);
+ if (ret) {
+ pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
+ if (ret) {
+ pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+ &hns3_pmu->node);
+ }
+
+ return ret;
+}
+
+static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
+{
+ struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
+
+ perf_pmu_unregister(&hns3_pmu->pmu);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+ &hns3_pmu->node);
+}
+
+static int hns3_pmu_init_dev(struct pci_dev *pdev)
+{
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu");
+ if (ret < 0) {
+ pci_err(pdev, "failed to request pci region, ret = %d.\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hns3_pmu *hns3_pmu;
+ int ret;
+
+ hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL);
+ if (!hns3_pmu)
+ return -ENOMEM;
+
+ ret = hns3_pmu_init_dev(pdev);
+ if (ret)
+ return ret;
+
+ ret = hns3_pmu_init_pmu(pdev, hns3_pmu);
+ if (ret) {
+ pci_clear_master(pdev);
+ return ret;
+ }
+
+ pci_set_drvdata(pdev, hns3_pmu);
+
+ return ret;
+}
+
+static void hns3_pmu_remove(struct pci_dev *pdev)
+{
+ hns3_pmu_uninit_pmu(pdev);
+ pci_clear_master(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static const struct pci_device_id hns3_pmu_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, hns3_pmu_ids);
+
+static struct pci_driver hns3_pmu_driver = {
+ .name = "hns3_pmu",
+ .id_table = hns3_pmu_ids,
+ .probe = hns3_pmu_probe,
+ .remove = hns3_pmu_remove,
+};
+
+static int __init hns3_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+ "AP_PERF_ARM_HNS3_PMU_ONLINE",
+ hns3_pmu_online_cpu,
+ hns3_pmu_offline_cpu);
+ if (ret) {
+ pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&hns3_pmu_driver);
+ if (ret) {
+ pr_err("failed to register pci driver, ret = %d.\n", ret);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
+ }
+
+ return ret;
+}
+module_init(hns3_pmu_module_init);
+
+static void __exit hns3_pmu_module_exit(void)
+{
+ pci_unregister_driver(&hns3_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
+}
+module_exit(hns3_pmu_module_exit);
+
+MODULE_DESCRIPTION("HNS3 PMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index 282d3a071a67..69c3050a4348 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -2,10 +2,6 @@
/* Marvell CN10K LLC-TAD perf driver
*
* Copyright (C) 2021 Marvell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "tad_pmu: " fmt
@@ -18,9 +14,9 @@
#include <linux/perf_event.h>
#include <linux/platform_device.h>
-#define TAD_PFC_OFFSET 0x0
+#define TAD_PFC_OFFSET 0x800
#define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3))
-#define TAD_PRF_OFFSET 0x100
+#define TAD_PRF_OFFSET 0x900
#define TAD_PRF(counter) (TAD_PRF_OFFSET | (counter << 3))
#define TAD_PRF_CNTSEL_MASK 0xFF
#define TAD_MAX_COUNTERS 8
@@ -100,9 +96,7 @@ static void tad_pmu_event_counter_start(struct perf_event *event, int flags)
* which sets TAD()_PRF()[CNTSEL] != 0
*/
for (i = 0; i < tad_pmu->region_cnt; i++) {
- reg_val = readq_relaxed(tad_pmu->regions[i].base +
- TAD_PRF(counter_idx));
- reg_val |= (event_idx & 0xFF);
+ reg_val = event_idx & 0xFF;
writeq_relaxed(reg_val, tad_pmu->regions[i].base +
TAD_PRF(counter_idx));
}
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index b2b8d2074ed0..ebca5eab9c9b 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -121,7 +121,7 @@ u64 riscv_pmu_event_update(struct perf_event *event)
return delta;
}
-static void riscv_pmu_stop(struct perf_event *event, int flags)
+void riscv_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
@@ -170,12 +170,11 @@ int riscv_pmu_event_set_period(struct perf_event *event)
left = (max_period >> 1);
local64_set(&hwc->prev_count, (u64)-left);
- perf_event_update_userpage(event);
return overflow;
}
-static void riscv_pmu_start(struct perf_event *event, int flags)
+void riscv_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 342778782359..2c20b0de8cb0 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -72,7 +72,7 @@ static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
local64_set(&hwc->prev_count, initial_val);
}
-/**
+/*
* This is just a simple implementation to allow legacy implementations
* compatible with new RISC-V PMU driver framework.
* This driver only allows reading two counters i.e CYCLE & INSTRET.
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index dca3537a8dcc..6f6681bbfd36 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -17,22 +17,28 @@
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/of.h>
+#include <linux/cpu_pm.h>
#include <asm/sbi.h>
#include <asm/hwcap.h>
-union sbi_pmu_ctr_info {
- unsigned long value;
- struct {
- unsigned long csr:12;
- unsigned long width:6;
-#if __riscv_xlen == 32
- unsigned long reserved:13;
-#else
- unsigned long reserved:45;
-#endif
- unsigned long type:1;
- };
+PMU_FORMAT_ATTR(event, "config:0-47");
+PMU_FORMAT_ATTR(firmware, "config:63");
+
+static struct attribute *riscv_arch_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_firmware.attr,
+ NULL,
+};
+
+static struct attribute_group riscv_pmu_format_group = {
+ .name = "format",
+ .attrs = riscv_arch_formats_attr,
+};
+
+static const struct attribute_group *riscv_pmu_attr_groups[] = {
+ &riscv_pmu_format_group,
+ NULL,
};
/*
@@ -274,8 +280,13 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
cflags |= SBI_PMU_CFG_FLAG_SET_UINH;
/* retrieve the available counter index */
+#if defined(CONFIG_32BIT)
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
+ cflags, hwc->event_base, hwc->config, hwc->config >> 32);
+#else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
cflags, hwc->event_base, hwc->config, 0);
+#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
hwc->event_base, hwc->config);
@@ -417,8 +428,13 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
struct hw_perf_event *hwc = &event->hw;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
+#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
1, flag, ival, ival >> 32, 0);
+#else
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
+ 1, flag, ival, 0, 0);
+#endif
if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
pr_err("Starting counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
@@ -525,8 +541,14 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
hwc = &event->hw;
max_period = riscv_pmu_ctr_get_width_mask(event);
init_val = local64_read(&hwc->prev_count) & max_period;
+#if defined(CONFIG_32BIT)
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
+ flag, init_val, init_val >> 32, 0);
+#else
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
flag, init_val, 0, 0);
+#endif
+ perf_event_update_userpage(event);
}
ctr_ovf_mask = ctr_ovf_mask >> 1;
idx++;
@@ -666,12 +688,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
child = of_get_compatible_child(cpu, "riscv,cpu-intc");
if (!child) {
pr_err("Failed to find INTC node\n");
+ of_node_put(cpu);
return -ENODEV;
}
domain = irq_find_host(child);
of_node_put(child);
- if (domain)
+ if (domain) {
+ of_node_put(cpu);
break;
+ }
}
if (!domain) {
pr_err("Failed to find INTC IRQ root domain\n");
@@ -693,6 +718,73 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
return 0;
}
+#ifdef CONFIG_CPU_PM
+static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+ void *v)
+{
+ struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
+ int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
+ struct perf_event *event;
+ int idx;
+
+ if (!enabled)
+ return NOTIFY_OK;
+
+ for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
+ event = cpuc->events[idx];
+ if (!event)
+ continue;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /*
+ * Stop and update the counter
+ */
+ riscv_pmu_stop(event, PERF_EF_UPDATE);
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ /*
+ * Restore and enable the counter.
+ *
+ * Requires RCU read locking to be functional,
+ * wrap the call within RCU_NONIDLE to make the
+ * RCU subsystem aware this cpu is not idle from
+ * an RCU perspective for the riscv_pmu_start() call
+ * duration.
+ */
+ RCU_NONIDLE(riscv_pmu_start(event, PERF_EF_RELOAD));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
+{
+ pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
+ return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
+}
+
+static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
+{
+ cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
+}
+#else
+static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
+static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
+#endif
+
+static void riscv_pmu_destroy(struct riscv_pmu *pmu)
+{
+ riscv_pm_pmu_unregister(pmu);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
+}
+
static int pmu_sbi_device_probe(struct platform_device *pdev)
{
struct riscv_pmu *pmu = NULL;
@@ -720,6 +812,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
}
+ pmu->pmu.attr_groups = riscv_pmu_attr_groups;
pmu->num_counters = num_counters;
pmu->ctr_start = pmu_sbi_ctr_start;
pmu->ctr_stop = pmu_sbi_ctr_stop;
@@ -733,14 +826,19 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = riscv_pm_pmu_register(pmu);
+ if (ret)
+ goto out_unregister;
+
ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
- if (ret) {
- cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
- return ret;
- }
+ if (ret)
+ goto out_unregister;
return 0;
+out_unregister:
+ riscv_pmu_destroy(pmu);
+
out_free:
kfree(pmu);
return ret;
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
index 486ca23aba32..ce7ba3eb2a8e 100644
--- a/drivers/phy/amlogic/Kconfig
+++ b/drivers/phy/amlogic/Kconfig
@@ -37,6 +37,18 @@ config PHY_MESON_GXL_USB2
GXL and GXM SoCs.
If unsure, say N.
+config PHY_MESON_G12A_MIPI_DPHY_ANALOG
+ tristate "Meson G12A MIPI Analog DPHY driver"
+ default ARCH_MESON
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
+ select GENERIC_PHY
+ select MFD_SYSCON
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Enable this to support the Meson MIPI Analog DPHY found in Meson G12A
+ SoCs.
+ If unsure, say N.
+
config PHY_MESON_G12A_USB2
tristate "Meson G12A USB2 PHY driver"
default ARCH_MESON
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
index c0886c850bb0..91e3b9790c03 100644
--- a/drivers/phy/amlogic/Makefile
+++ b/drivers/phy/amlogic/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE) += phy-meson-g12a-usb3-pcie.o
+obj-$(CONFIG_PHY_MESON_G12A_MIPI_DPHY_ANALOG) += phy-meson-g12a-mipi-dphy-analog.o
obj-$(CONFIG_PHY_MESON_AXG_PCIE) += phy-meson-axg-pcie.o
obj-$(CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG) += phy-meson-axg-mipi-pcie-analog.o
obj-$(CONFIG_PHY_MESON_AXG_MIPI_DPHY) += phy-meson-axg-mipi-dphy.o
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
index fdbd64c03e12..32d1ff09befb 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
@@ -136,7 +136,7 @@
/* TWAKEUP. */
#define MIPI_DSI_WAKEUP_TIM 0x20
-/* when in RxULPS check state, after the the logic enable the analog,
+/* when in RxULPS check state, after the logic enable the analog,
* how long we should wait to check the lP state .
*/
#define MIPI_DSI_LPOK_TIM 0x24
diff --git a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
new file mode 100644
index 000000000000..c14089fa7db4
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Meson G12A MIPI DSI Analog PHY
+ *
+ * Copyright (C) 2018 Amlogic, Inc. All rights reserved
+ * Copyright (C) 2022 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/phy/phy.h>
+
+#define HHI_MIPI_CNTL0 0x00
+#define HHI_MIPI_CNTL0_DIF_REF_CTL1 GENMASK(31, 16)
+#define HHI_MIPI_CNTL0_DIF_REF_CTL0 GENMASK(15, 0)
+
+#define HHI_MIPI_CNTL1 0x04
+#define HHI_MIPI_CNTL1_BANDGAP BIT(16)
+#define HHI_MIPI_CNTL2_DIF_REF_CTL2 GENMASK(15, 0)
+
+#define HHI_MIPI_CNTL2 0x08
+#define HHI_MIPI_CNTL2_DIF_TX_CTL1 GENMASK(31, 16)
+#define HHI_MIPI_CNTL2_CH_EN GENMASK(15, 11)
+#define HHI_MIPI_CNTL2_DIF_TX_CTL0 GENMASK(10, 0)
+
+#define DSI_LANE_0 BIT(4)
+#define DSI_LANE_1 BIT(3)
+#define DSI_LANE_CLK BIT(2)
+#define DSI_LANE_2 BIT(1)
+#define DSI_LANE_3 BIT(0)
+
+struct phy_g12a_mipi_dphy_analog_priv {
+ struct phy *phy;
+ struct regmap *regmap;
+ struct phy_configure_opts_mipi_dphy config;
+};
+
+static int phy_g12a_mipi_dphy_analog_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ struct phy_g12a_mipi_dphy_analog_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+ if (ret)
+ return ret;
+
+ memcpy(&priv->config, opts, sizeof(priv->config));
+
+ return 0;
+}
+
+static int phy_g12a_mipi_dphy_analog_power_on(struct phy *phy)
+{
+ struct phy_g12a_mipi_dphy_analog_priv *priv = phy_get_drvdata(phy);
+ unsigned int reg;
+
+ regmap_write(priv->regmap, HHI_MIPI_CNTL0,
+ FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL0, 0x8) |
+ FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL1, 0xa487));
+
+ regmap_write(priv->regmap, HHI_MIPI_CNTL1,
+ FIELD_PREP(HHI_MIPI_CNTL2_DIF_REF_CTL2, 0x2e) |
+ HHI_MIPI_CNTL1_BANDGAP);
+
+ regmap_write(priv->regmap, HHI_MIPI_CNTL2,
+ FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x459) |
+ FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL1, 0x2680));
+
+ reg = DSI_LANE_CLK;
+ switch (priv->config.lanes) {
+ case 4:
+ reg |= DSI_LANE_3;
+ fallthrough;
+ case 3:
+ reg |= DSI_LANE_2;
+ fallthrough;
+ case 2:
+ reg |= DSI_LANE_1;
+ fallthrough;
+ case 1:
+ reg |= DSI_LANE_0;
+ break;
+ default:
+ reg = 0;
+ }
+
+ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL2,
+ HHI_MIPI_CNTL2_CH_EN,
+ FIELD_PREP(HHI_MIPI_CNTL2_CH_EN, reg));
+
+ return 0;
+}
+
+static int phy_g12a_mipi_dphy_analog_power_off(struct phy *phy)
+{
+ struct phy_g12a_mipi_dphy_analog_priv *priv = phy_get_drvdata(phy);
+
+ regmap_write(priv->regmap, HHI_MIPI_CNTL0, 0);
+ regmap_write(priv->regmap, HHI_MIPI_CNTL1, 0);
+ regmap_write(priv->regmap, HHI_MIPI_CNTL2, 0);
+
+ return 0;
+}
+
+static const struct phy_ops phy_g12a_mipi_dphy_analog_ops = {
+ .configure = phy_g12a_mipi_dphy_analog_configure,
+ .power_on = phy_g12a_mipi_dphy_analog_power_on,
+ .power_off = phy_g12a_mipi_dphy_analog_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int phy_g12a_mipi_dphy_analog_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_g12a_mipi_dphy_analog_priv *priv;
+ struct device_node *np = dev->of_node, *parent_np;
+ struct regmap *map;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Get the hhi system controller node */
+ parent_np = of_get_parent(np);
+ map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map), "failed to get HHI regmap\n");
+
+ priv->regmap = map;
+
+ priv->phy = devm_phy_create(dev, np, &phy_g12a_mipi_dphy_analog_ops);
+ if (IS_ERR(priv->phy))
+ return dev_err_probe(dev, PTR_ERR(priv->phy), "failed to create PHY\n");
+
+ phy_set_drvdata(priv->phy, priv);
+ dev_set_drvdata(dev, priv);
+
+ phy = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy);
+}
+
+static const struct of_device_id phy_g12a_mipi_dphy_analog_of_match[] = {
+ {
+ .compatible = "amlogic,g12a-mipi-dphy-analog",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, phy_g12a_mipi_dphy_analog_of_match);
+
+static struct platform_driver phy_g12a_mipi_dphy_analog_driver = {
+ .probe = phy_g12a_mipi_dphy_analog_probe,
+ .driver = {
+ .name = "phy-meson-g12a-mipi-dphy-analog",
+ .of_match_table = phy_g12a_mipi_dphy_analog_of_match,
+ },
+};
+module_platform_driver(phy_g12a_mipi_dphy_analog_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Meson G12A MIPI Analog D-PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 849c4204f550..93a6a8ee4716 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -83,7 +83,7 @@ config PHY_NS2_USB_DRD
config PHY_BRCM_SATA
tristate "Broadcom SATA PHY driver"
depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || \
- ARCH_BCM_63XX || COMPILE_TEST
+ ARCH_BCMBCA || COMPILE_TEST
depends on OF
select GENERIC_PHY
default ARCH_BCM_IPROC
diff --git a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
index 548e46776100..cc29b08e49eb 100644
--- a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2015 Broadcom Corporation
#include <linux/delay.h>
#include <linux/io.h>
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
index 9e7434a0d3e0..2eaa41f8fc70 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2016 Broadcom
#include <linux/device.h>
#include <linux/module.h>
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
index 65a399acc845..36ad02c33ac5 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2017 Broadcom
#include <linux/delay.h>
#include <linux/extcon-provider.h>
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index dd0f66288fbd..dddcbd3cd5f3 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -861,7 +861,7 @@ static void usb_init_common(struct brcm_usb_init_params *params)
brcmusb_usb2_eye_fix(ctrl);
/*
- * Make sure the the second and third memory controller
+ * Make sure the second and third memory controller
* interfaces are enabled if they exist.
*/
if (USB_CTRL_MASK_FAMILY(params, SETUP, SCB1_EN))
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
index ba042e39cfaf..3dfdfb33cd0a 100644
--- a/drivers/phy/cadence/cdns-dphy.c
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -3,9 +3,11 @@
* Copyright: 2017-2018 Cadence Design Systems, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -17,6 +19,7 @@
#define REG_WAKEUP_TIME_NS 800
#define DPHY_PLL_RATE_HZ 108000000
+#define POLL_TIMEOUT_US 1000
/* DPHY registers */
#define DPHY_PMA_CMN(reg) (reg)
@@ -45,6 +48,10 @@
#define DPHY_CMN_OPDIV_FROM_REG BIT(6)
#define DPHY_CMN_OPDIV(x) ((x) << 7)
+#define DPHY_BAND_CFG DPHY_PCS(0x0)
+#define DPHY_BAND_CFG_LEFT_BAND GENMASK(4, 0)
+#define DPHY_BAND_CFG_RIGHT_BAND GENMASK(9, 5)
+
#define DPHY_PSM_CFG DPHY_PCS(0x4)
#define DPHY_PSM_CFG_FROM_REG BIT(0)
#define DPHY_PSM_CLK_DIV(x) ((x) << 1)
@@ -57,6 +64,18 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
+#define DPHY_TX_J721E_WIZ_PLL_CTRL 0xF04
+#define DPHY_TX_J721E_WIZ_STATUS 0xF08
+#define DPHY_TX_J721E_WIZ_RST_CTRL 0xF0C
+#define DPHY_TX_J721E_WIZ_PSM_FREQ 0xF10
+
+#define DPHY_TX_J721E_WIZ_IPDIV GENMASK(4, 0)
+#define DPHY_TX_J721E_WIZ_OPDIV GENMASK(13, 8)
+#define DPHY_TX_J721E_WIZ_FBDIV GENMASK(25, 16)
+#define DPHY_TX_J721E_WIZ_LANE_RSTB BIT(31)
+#define DPHY_TX_WIZ_PLL_LOCK BIT(31)
+#define DPHY_TX_WIZ_O_CMN_READY BIT(31)
+
struct cdns_dphy_cfg {
u8 pll_ipdiv;
u8 pll_opdiv;
@@ -92,6 +111,12 @@ struct cdns_dphy {
struct phy *phy;
};
+/* Order of bands is important since the index is the band number. */
+static const unsigned int tx_bands[] = {
+ 80, 100, 120, 160, 200, 240, 320, 390, 450, 510, 560, 640, 690, 770,
+ 870, 950, 1000, 1200, 1400, 1600, 1800, 2000, 2200, 2500
+};
+
static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
struct cdns_dphy_cfg *cfg,
struct phy_configure_opts_mipi_dphy *opts,
@@ -199,6 +224,46 @@ static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div)
dphy->regs + DPHY_PSM_CFG);
}
+static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+ /* Minimum wakeup time as per MIPI D-PHY spec v1.2 */
+ return 1000000;
+}
+
+static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg)
+{
+ u32 status;
+
+ /*
+ * set the PWM and PLL Byteclk divider settings to recommended values
+ * which is same as that of in ref ops
+ */
+ writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
+ DPHY_CMN_PWM_DIV(0x8),
+ dphy->regs + DPHY_CMN_PWM);
+
+ writel((FIELD_PREP(DPHY_TX_J721E_WIZ_IPDIV, cfg->pll_ipdiv) |
+ FIELD_PREP(DPHY_TX_J721E_WIZ_OPDIV, cfg->pll_opdiv) |
+ FIELD_PREP(DPHY_TX_J721E_WIZ_FBDIV, cfg->pll_fbdiv)),
+ dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL);
+
+ writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
+ dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
+
+ readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
+ (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
+
+ readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
+ (status & DPHY_TX_WIZ_O_CMN_READY), 0,
+ POLL_TIMEOUT_US);
+}
+
+static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
+{
+ writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
+}
+
/*
* This is the reference implementation of DPHY hooks. Specific integration of
* this IP may have to re-implement some of them depending on how they decided
@@ -210,6 +275,12 @@ static const struct cdns_dphy_ops ref_dphy_ops = {
.set_psm_div = cdns_dphy_ref_set_psm_div,
};
+static const struct cdns_dphy_ops j721e_dphy_ops = {
+ .get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
+ .set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
+ .set_psm_div = cdns_dphy_j721e_set_psm_div,
+};
+
static int cdns_dphy_config_from_opts(struct phy *phy,
struct phy_configure_opts_mipi_dphy *opts,
struct cdns_dphy_cfg *cfg)
@@ -232,6 +303,24 @@ static int cdns_dphy_config_from_opts(struct phy *phy,
return 0;
}
+static int cdns_dphy_tx_get_band_ctrl(unsigned long hs_clk_rate)
+{
+ unsigned int rate;
+ int i;
+
+ rate = hs_clk_rate / 1000000UL;
+
+ if (rate < tx_bands[0])
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ARRAY_SIZE(tx_bands) - 1; i++) {
+ if (rate >= tx_bands[i] && rate < tx_bands[i + 1])
+ return i;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts)
{
@@ -247,7 +336,8 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
struct cdns_dphy_cfg cfg = { 0 };
- int ret;
+ int ret, band_ctrl;
+ unsigned int reg;
ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
if (ret)
@@ -276,6 +366,14 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
*/
cdns_dphy_set_pll_cfg(dphy, &cfg);
+ band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
+ if (band_ctrl < 0)
+ return band_ctrl;
+
+ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
+ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
+ writel(reg, dphy->regs + DPHY_BAND_CFG);
+
return 0;
}
@@ -370,6 +468,7 @@ static int cdns_dphy_remove(struct platform_device *pdev)
static const struct of_device_id cdns_dphy_of_match[] = {
{ .compatible = "cdns,dphy", .data = &ref_dphy_ops },
+ { .compatible = "ti,j721e-dphy", .data = &j721e_dphy_ops },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cdns_dphy_of_match);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 73fb99ccd525..6e86a6517f37 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -348,7 +348,6 @@ struct cdns_regmap_cdb_context {
struct cdns_sierra_phy {
struct device *dev;
- struct regmap *regmap;
const struct cdns_sierra_data *init_data;
struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
struct reset_control *phy_rst;
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 7c4b8050485f..f099053c583c 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -331,7 +331,6 @@ struct cdns_torrent_phy {
struct cdns_torrent_inst phys[MAX_NUM_LANES];
int nsubnodes;
const struct cdns_torrent_data *init_data;
- struct regmap *regmap;
struct regmap *regmap_common_cdb;
struct regmap *regmap_phy_pcs_common_cdb;
struct regmap *regmap_phy_pma_common_cdb;
diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig
index f9c54cd02036..853958fb2c06 100644
--- a/drivers/phy/freescale/Kconfig
+++ b/drivers/phy/freescale/Kconfig
@@ -8,6 +8,15 @@ config PHY_FSL_IMX8MQ_USB
select GENERIC_PHY
default ARCH_MXC && ARM64
+config PHY_MIXEL_LVDS_PHY
+ tristate "Mixel LVDS PHY support"
+ depends on OF
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Enable this to add support for the Mixel LVDS PHY as found
+ on NXP's i.MX8qm SoC.
+
config PHY_MIXEL_MIPI_DPHY
tristate "Mixel MIPI DSI PHY support"
depends on OF && HAS_IOMEM
diff --git a/drivers/phy/freescale/Makefile b/drivers/phy/freescale/Makefile
index 3518d5dbe8a7..cedb328bc4d2 100644
--- a/drivers/phy/freescale/Makefile
+++ b/drivers/phy/freescale/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PHY_FSL_IMX8MQ_USB) += phy-fsl-imx8mq-usb.o
+obj-$(CONFIG_PHY_MIXEL_LVDS_PHY) += phy-fsl-imx8qm-lvds-phy.o
obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY) += phy-fsl-imx8-mipi-dphy.o
obj-$(CONFIG_PHY_FSL_IMX8M_PCIE) += phy-fsl-imx8m-pcie.o
obj-$(CONFIG_PHY_FSL_LYNX_28G) += phy-fsl-lynx-28g.o
diff --git a/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c
new file mode 100644
index 000000000000..e514b64bfdab
--- /dev/null
+++ b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2017-2020,2022 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#define REG_SET 0x4
+#define REG_CLR 0x8
+
+#define PHY_CTRL 0x0
+#define M_MASK GENMASK(18, 17)
+#define M(n) FIELD_PREP(M_MASK, (n))
+#define CCM_MASK GENMASK(16, 14)
+#define CCM(n) FIELD_PREP(CCM_MASK, (n))
+#define CA_MASK GENMASK(13, 11)
+#define CA(n) FIELD_PREP(CA_MASK, (n))
+#define TST_MASK GENMASK(10, 5)
+#define TST(n) FIELD_PREP(TST_MASK, (n))
+#define CH_EN(id) BIT(3 + (id))
+#define NB BIT(2)
+#define RFB BIT(1)
+#define PD BIT(0)
+
+/* Power On Reset(POR) value */
+#define CTRL_RESET_VAL (M(0x0) | CCM(0x4) | CA(0x4) | TST(0x25))
+
+/* PHY initialization value and mask */
+#define CTRL_INIT_MASK (M_MASK | CCM_MASK | CA_MASK | TST_MASK | NB | RFB)
+#define CTRL_INIT_VAL (M(0x0) | CCM(0x5) | CA(0x4) | TST(0x25) | RFB)
+
+#define PHY_STATUS 0x10
+#define LOCK BIT(0)
+
+#define PHY_NUM 2
+
+#define MIN_CLKIN_FREQ (25 * MEGA)
+#define MAX_CLKIN_FREQ (165 * MEGA)
+
+#define PLL_LOCK_SLEEP 10
+#define PLL_LOCK_TIMEOUT 1000
+
+struct mixel_lvds_phy {
+ struct phy *phy;
+ struct phy_configure_opts_lvds cfg;
+ unsigned int id;
+};
+
+struct mixel_lvds_phy_priv {
+ struct regmap *regmap;
+ struct mutex lock; /* protect remap access and cfg of our own */
+ struct clk *phy_ref_clk;
+ struct mixel_lvds_phy *phys[PHY_NUM];
+};
+
+static int mixel_lvds_phy_init(struct phy *phy)
+{
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
+
+ mutex_lock(&priv->lock);
+ regmap_update_bits(priv->regmap,
+ PHY_CTRL, CTRL_INIT_MASK, CTRL_INIT_VAL);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int mixel_lvds_phy_power_on(struct phy *phy)
+{
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
+ struct mixel_lvds_phy *lvds_phy = phy_get_drvdata(phy);
+ struct mixel_lvds_phy *companion = priv->phys[lvds_phy->id ^ 1];
+ struct phy_configure_opts_lvds *cfg = &lvds_phy->cfg;
+ u32 val = 0;
+ u32 locked;
+ int ret;
+
+ /* The master PHY would power on the slave PHY. */
+ if (cfg->is_slave)
+ return 0;
+
+ ret = clk_prepare_enable(priv->phy_ref_clk);
+ if (ret < 0) {
+ dev_err(&phy->dev,
+ "failed to enable PHY reference clock: %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&priv->lock);
+ if (cfg->bits_per_lane_and_dclk_cycle == 7) {
+ if (cfg->differential_clk_rate < 44000000)
+ val |= M(0x2);
+ else if (cfg->differential_clk_rate < 90000000)
+ val |= M(0x1);
+ else
+ val |= M(0x0);
+ } else {
+ val = NB;
+
+ if (cfg->differential_clk_rate < 32000000)
+ val |= M(0x2);
+ else if (cfg->differential_clk_rate < 63000000)
+ val |= M(0x1);
+ else
+ val |= M(0x0);
+ }
+ regmap_update_bits(priv->regmap, PHY_CTRL, M_MASK | NB, val);
+
+ /*
+ * Enable two channels synchronously,
+ * if the companion PHY is a slave PHY.
+ */
+ if (companion->cfg.is_slave)
+ val = CH_EN(0) | CH_EN(1);
+ else
+ val = CH_EN(lvds_phy->id);
+ regmap_write(priv->regmap, PHY_CTRL + REG_SET, val);
+
+ ret = regmap_read_poll_timeout(priv->regmap, PHY_STATUS, locked,
+ locked, PLL_LOCK_SLEEP,
+ PLL_LOCK_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&phy->dev, "failed to get PHY lock: %d\n", ret);
+ clk_disable_unprepare(priv->phy_ref_clk);
+ }
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int mixel_lvds_phy_power_off(struct phy *phy)
+{
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
+ struct mixel_lvds_phy *lvds_phy = phy_get_drvdata(phy);
+ struct mixel_lvds_phy *companion = priv->phys[lvds_phy->id ^ 1];
+ struct phy_configure_opts_lvds *cfg = &lvds_phy->cfg;
+
+ /* The master PHY would power off the slave PHY. */
+ if (cfg->is_slave)
+ return 0;
+
+ mutex_lock(&priv->lock);
+ if (companion->cfg.is_slave)
+ regmap_write(priv->regmap, PHY_CTRL + REG_CLR,
+ CH_EN(0) | CH_EN(1));
+ else
+ regmap_write(priv->regmap, PHY_CTRL + REG_CLR,
+ CH_EN(lvds_phy->id));
+ mutex_unlock(&priv->lock);
+
+ clk_disable_unprepare(priv->phy_ref_clk);
+
+ return 0;
+}
+
+static int mixel_lvds_phy_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
+ struct phy_configure_opts_lvds *cfg = &opts->lvds;
+ int ret;
+
+ ret = clk_set_rate(priv->phy_ref_clk, cfg->differential_clk_rate);
+ if (ret)
+ dev_err(&phy->dev, "failed to set PHY reference clock rate(%lu): %d\n",
+ cfg->differential_clk_rate, ret);
+
+ return ret;
+}
+
+/* Assume the master PHY's configuration set is cached first. */
+static int mixel_lvds_phy_check_slave(struct phy *slave_phy)
+{
+ struct device *dev = &slave_phy->dev;
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev->parent);
+ struct mixel_lvds_phy *slv = phy_get_drvdata(slave_phy);
+ struct mixel_lvds_phy *mst = priv->phys[slv->id ^ 1];
+ struct phy_configure_opts_lvds *mst_cfg = &mst->cfg;
+ struct phy_configure_opts_lvds *slv_cfg = &slv->cfg;
+
+ if (mst_cfg->bits_per_lane_and_dclk_cycle !=
+ slv_cfg->bits_per_lane_and_dclk_cycle) {
+ dev_err(dev, "number bits mismatch(mst: %u vs slv: %u)\n",
+ mst_cfg->bits_per_lane_and_dclk_cycle,
+ slv_cfg->bits_per_lane_and_dclk_cycle);
+ return -EINVAL;
+ }
+
+ if (mst_cfg->differential_clk_rate !=
+ slv_cfg->differential_clk_rate) {
+ dev_err(dev, "dclk rate mismatch(mst: %lu vs slv: %lu)\n",
+ mst_cfg->differential_clk_rate,
+ slv_cfg->differential_clk_rate);
+ return -EINVAL;
+ }
+
+ if (mst_cfg->lanes != slv_cfg->lanes) {
+ dev_err(dev, "lanes mismatch(mst: %u vs slv: %u)\n",
+ mst_cfg->lanes, slv_cfg->lanes);
+ return -EINVAL;
+ }
+
+ if (mst_cfg->is_slave == slv_cfg->is_slave) {
+ dev_err(dev, "master PHY is not found\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mixel_lvds_phy_validate(struct phy *phy, enum phy_mode mode,
+ int submode, union phy_configure_opts *opts)
+{
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
+ struct mixel_lvds_phy *lvds_phy = phy_get_drvdata(phy);
+ struct phy_configure_opts_lvds *cfg = &opts->lvds;
+ int ret = 0;
+
+ if (mode != PHY_MODE_LVDS) {
+ dev_err(&phy->dev, "invalid PHY mode(%d)\n", mode);
+ return -EINVAL;
+ }
+
+ if (cfg->bits_per_lane_and_dclk_cycle != 7 &&
+ cfg->bits_per_lane_and_dclk_cycle != 10) {
+ dev_err(&phy->dev, "invalid bits per data lane(%u)\n",
+ cfg->bits_per_lane_and_dclk_cycle);
+ return -EINVAL;
+ }
+
+ if (cfg->lanes != 4 && cfg->lanes != 3) {
+ dev_err(&phy->dev, "invalid data lanes(%u)\n", cfg->lanes);
+ return -EINVAL;
+ }
+
+ if (cfg->differential_clk_rate < MIN_CLKIN_FREQ ||
+ cfg->differential_clk_rate > MAX_CLKIN_FREQ) {
+ dev_err(&phy->dev, "invalid differential clock rate(%lu)\n",
+ cfg->differential_clk_rate);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->lock);
+ /* cache configuration set of our own for check */
+ memcpy(&lvds_phy->cfg, cfg, sizeof(*cfg));
+
+ if (cfg->is_slave) {
+ ret = mixel_lvds_phy_check_slave(phy);
+ if (ret)
+ dev_err(&phy->dev, "failed to check slave PHY: %d\n", ret);
+ }
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static const struct phy_ops mixel_lvds_phy_ops = {
+ .init = mixel_lvds_phy_init,
+ .power_on = mixel_lvds_phy_power_on,
+ .power_off = mixel_lvds_phy_power_off,
+ .configure = mixel_lvds_phy_configure,
+ .validate = mixel_lvds_phy_validate,
+ .owner = THIS_MODULE,
+};
+
+static int mixel_lvds_phy_reset(struct device *dev)
+{
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get PM runtime: %d\n", ret);
+ return ret;
+ }
+
+ regmap_write(priv->regmap, PHY_CTRL, CTRL_RESET_VAL);
+
+ ret = pm_runtime_put(dev);
+ if (ret < 0)
+ dev_err(dev, "failed to put PM runtime: %d\n", ret);
+
+ return ret;
+}
+
+static struct phy *mixel_lvds_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev);
+ unsigned int phy_id;
+
+ if (args->args_count != 1) {
+ dev_err(dev,
+ "invalid argument number(%d) for 'phys' property\n",
+ args->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ phy_id = args->args[0];
+
+ if (phy_id >= PHY_NUM) {
+ dev_err(dev, "invalid PHY index(%d)\n", phy_id);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return priv->phys[phy_id]->phy;
+}
+
+static int mixel_lvds_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct mixel_lvds_phy_priv *priv;
+ struct mixel_lvds_phy *lvds_phy;
+ struct phy *phy;
+ int i;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = syscon_node_to_regmap(dev->of_node->parent);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap),
+ "failed to get regmap\n");
+
+ priv->phy_ref_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->phy_ref_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->phy_ref_clk),
+ "failed to get PHY reference clock\n");
+
+ mutex_init(&priv->lock);
+
+ dev_set_drvdata(dev, priv);
+
+ pm_runtime_enable(dev);
+
+ ret = mixel_lvds_phy_reset(dev);
+ if (ret) {
+ dev_err(dev, "failed to do POR reset: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < PHY_NUM; i++) {
+ lvds_phy = devm_kzalloc(dev, sizeof(*lvds_phy), GFP_KERNEL);
+ if (!lvds_phy) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ phy = devm_phy_create(dev, NULL, &mixel_lvds_phy_ops);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ dev_err(dev, "failed to create PHY for channel%d: %d\n",
+ i, ret);
+ goto err;
+ }
+
+ lvds_phy->phy = phy;
+ lvds_phy->id = i;
+ priv->phys[i] = lvds_phy;
+
+ phy_set_drvdata(phy, lvds_phy);
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, mixel_lvds_phy_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ dev_err(dev, "failed to register PHY provider: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int mixel_lvds_phy_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused mixel_lvds_phy_runtime_suspend(struct device *dev)
+{
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev);
+
+ /* power down */
+ mutex_lock(&priv->lock);
+ regmap_write(priv->regmap, PHY_CTRL + REG_SET, PD);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int __maybe_unused mixel_lvds_phy_runtime_resume(struct device *dev)
+{
+ struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev);
+
+ /* power up + control initialization */
+ mutex_lock(&priv->lock);
+ regmap_update_bits(priv->regmap, PHY_CTRL,
+ CTRL_INIT_MASK | PD, CTRL_INIT_VAL);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mixel_lvds_phy_pm_ops = {
+ SET_RUNTIME_PM_OPS(mixel_lvds_phy_runtime_suspend,
+ mixel_lvds_phy_runtime_resume, NULL)
+};
+
+static const struct of_device_id mixel_lvds_phy_of_match[] = {
+ { .compatible = "fsl,imx8qm-lvds-phy" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mixel_lvds_phy_of_match);
+
+static struct platform_driver mixel_lvds_phy_driver = {
+ .probe = mixel_lvds_phy_probe,
+ .remove = mixel_lvds_phy_remove,
+ .driver = {
+ .pm = &mixel_lvds_phy_pm_ops,
+ .name = "mixel-lvds-phy",
+ .of_match_table = mixel_lvds_phy_of_match,
+ }
+};
+module_platform_driver(mixel_lvds_phy_driver);
+
+MODULE_DESCRIPTION("Mixel LVDS PHY driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 55f8e6c048ab..3125ecb5d119 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -2,6 +2,17 @@
#
# Phy drivers for Mediatek devices
#
+config PHY_MTK_PCIE
+ tristate "MediaTek PCIe-PHY Driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Say 'Y' here to add support for MediaTek PCIe PHY driver.
+ This driver create the basic PHY instance and provides initialize
+ callback for PCIe GEN3 port, it supports software efuse
+ initialization.
+
config PHY_MTK_TPHY
tristate "MediaTek T-PHY Driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
@@ -55,3 +66,11 @@ config PHY_MTK_MIPI_DSI
select GENERIC_PHY
help
Support MIPI DSI for Mediatek SoCs.
+
+config PHY_MTK_DP
+ tristate "MediaTek DP-PHY Driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Support DisplayPort PHY for MediaTek SoCs.
diff --git a/drivers/phy/mediatek/Makefile b/drivers/phy/mediatek/Makefile
index ace660fbed3a..fb1f8edaffa7 100644
--- a/drivers/phy/mediatek/Makefile
+++ b/drivers/phy/mediatek/Makefile
@@ -3,6 +3,8 @@
# Makefile for the phy drivers.
#
+obj-$(CONFIG_PHY_MTK_DP) += phy-mtk-dp.o
+obj-$(CONFIG_PHY_MTK_PCIE) += phy-mtk-pcie.o
obj-$(CONFIG_PHY_MTK_TPHY) += phy-mtk-tphy.o
obj-$(CONFIG_PHY_MTK_UFS) += phy-mtk-ufs.o
obj-$(CONFIG_PHY_MTK_XSPHY) += phy-mtk-xsphy.o
diff --git a/drivers/phy/mediatek/phy-mtk-dp.c b/drivers/phy/mediatek/phy-mtk-dp.c
new file mode 100644
index 000000000000..31266e7ca324
--- /dev/null
+++ b/drivers/phy/mediatek/phy-mtk-dp.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek DisplayPort PHY driver
+ *
+ * Copyright (c) 2022, BayLibre Inc.
+ * Copyright (c) 2022, MediaTek Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define PHY_OFFSET 0x1000
+
+#define MTK_DP_PHY_DIG_PLL_CTL_1 (PHY_OFFSET + 0x14)
+#define TPLL_SSC_EN BIT(3)
+
+#define MTK_DP_PHY_DIG_BIT_RATE (PHY_OFFSET + 0x3C)
+#define BIT_RATE_RBR 0
+#define BIT_RATE_HBR 1
+#define BIT_RATE_HBR2 2
+#define BIT_RATE_HBR3 3
+
+#define MTK_DP_PHY_DIG_SW_RST (PHY_OFFSET + 0x38)
+#define DP_GLB_SW_RST_PHYD BIT(0)
+
+#define MTK_DP_LANE0_DRIVING_PARAM_3 (PHY_OFFSET + 0x138)
+#define MTK_DP_LANE1_DRIVING_PARAM_3 (PHY_OFFSET + 0x238)
+#define MTK_DP_LANE2_DRIVING_PARAM_3 (PHY_OFFSET + 0x338)
+#define MTK_DP_LANE3_DRIVING_PARAM_3 (PHY_OFFSET + 0x438)
+#define XTP_LN_TX_LCTXC0_SW0_PRE0_DEFAULT BIT(4)
+#define XTP_LN_TX_LCTXC0_SW0_PRE1_DEFAULT (BIT(10) | BIT(12))
+#define XTP_LN_TX_LCTXC0_SW0_PRE2_DEFAULT GENMASK(20, 19)
+#define XTP_LN_TX_LCTXC0_SW0_PRE3_DEFAULT GENMASK(29, 29)
+#define DRIVING_PARAM_3_DEFAULT (XTP_LN_TX_LCTXC0_SW0_PRE0_DEFAULT | \
+ XTP_LN_TX_LCTXC0_SW0_PRE1_DEFAULT | \
+ XTP_LN_TX_LCTXC0_SW0_PRE2_DEFAULT | \
+ XTP_LN_TX_LCTXC0_SW0_PRE3_DEFAULT)
+
+#define XTP_LN_TX_LCTXC0_SW1_PRE0_DEFAULT GENMASK(4, 3)
+#define XTP_LN_TX_LCTXC0_SW1_PRE1_DEFAULT GENMASK(12, 9)
+#define XTP_LN_TX_LCTXC0_SW1_PRE2_DEFAULT (BIT(18) | BIT(21))
+#define XTP_LN_TX_LCTXC0_SW2_PRE0_DEFAULT GENMASK(29, 29)
+#define DRIVING_PARAM_4_DEFAULT (XTP_LN_TX_LCTXC0_SW1_PRE0_DEFAULT | \
+ XTP_LN_TX_LCTXC0_SW1_PRE1_DEFAULT | \
+ XTP_LN_TX_LCTXC0_SW1_PRE2_DEFAULT | \
+ XTP_LN_TX_LCTXC0_SW2_PRE0_DEFAULT)
+
+#define XTP_LN_TX_LCTXC0_SW2_PRE1_DEFAULT (BIT(3) | BIT(5))
+#define XTP_LN_TX_LCTXC0_SW3_PRE0_DEFAULT GENMASK(13, 12)
+#define DRIVING_PARAM_5_DEFAULT (XTP_LN_TX_LCTXC0_SW2_PRE1_DEFAULT | \
+ XTP_LN_TX_LCTXC0_SW3_PRE0_DEFAULT)
+
+#define XTP_LN_TX_LCTXCP1_SW0_PRE0_DEFAULT 0
+#define XTP_LN_TX_LCTXCP1_SW0_PRE1_DEFAULT GENMASK(10, 10)
+#define XTP_LN_TX_LCTXCP1_SW0_PRE2_DEFAULT GENMASK(19, 19)
+#define XTP_LN_TX_LCTXCP1_SW0_PRE3_DEFAULT GENMASK(28, 28)
+#define DRIVING_PARAM_6_DEFAULT (XTP_LN_TX_LCTXCP1_SW0_PRE0_DEFAULT | \
+ XTP_LN_TX_LCTXCP1_SW0_PRE1_DEFAULT | \
+ XTP_LN_TX_LCTXCP1_SW0_PRE2_DEFAULT | \
+ XTP_LN_TX_LCTXCP1_SW0_PRE3_DEFAULT)
+
+#define XTP_LN_TX_LCTXCP1_SW1_PRE0_DEFAULT 0
+#define XTP_LN_TX_LCTXCP1_SW1_PRE1_DEFAULT GENMASK(10, 9)
+#define XTP_LN_TX_LCTXCP1_SW1_PRE2_DEFAULT GENMASK(19, 18)
+#define XTP_LN_TX_LCTXCP1_SW2_PRE0_DEFAULT 0
+#define DRIVING_PARAM_7_DEFAULT (XTP_LN_TX_LCTXCP1_SW1_PRE0_DEFAULT | \
+ XTP_LN_TX_LCTXCP1_SW1_PRE1_DEFAULT | \
+ XTP_LN_TX_LCTXCP1_SW1_PRE2_DEFAULT | \
+ XTP_LN_TX_LCTXCP1_SW2_PRE0_DEFAULT)
+
+#define XTP_LN_TX_LCTXCP1_SW2_PRE1_DEFAULT GENMASK(3, 3)
+#define XTP_LN_TX_LCTXCP1_SW3_PRE0_DEFAULT 0
+#define DRIVING_PARAM_8_DEFAULT (XTP_LN_TX_LCTXCP1_SW2_PRE1_DEFAULT | \
+ XTP_LN_TX_LCTXCP1_SW3_PRE0_DEFAULT)
+
+struct mtk_dp_phy {
+ struct regmap *regs;
+};
+
+static int mtk_dp_phy_init(struct phy *phy)
+{
+ struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy);
+ u32 driving_params[] = {
+ DRIVING_PARAM_3_DEFAULT,
+ DRIVING_PARAM_4_DEFAULT,
+ DRIVING_PARAM_5_DEFAULT,
+ DRIVING_PARAM_6_DEFAULT,
+ DRIVING_PARAM_7_DEFAULT,
+ DRIVING_PARAM_8_DEFAULT
+ };
+
+ regmap_bulk_write(dp_phy->regs, MTK_DP_LANE0_DRIVING_PARAM_3,
+ driving_params, ARRAY_SIZE(driving_params));
+ regmap_bulk_write(dp_phy->regs, MTK_DP_LANE1_DRIVING_PARAM_3,
+ driving_params, ARRAY_SIZE(driving_params));
+ regmap_bulk_write(dp_phy->regs, MTK_DP_LANE2_DRIVING_PARAM_3,
+ driving_params, ARRAY_SIZE(driving_params));
+ regmap_bulk_write(dp_phy->regs, MTK_DP_LANE3_DRIVING_PARAM_3,
+ driving_params, ARRAY_SIZE(driving_params));
+
+ return 0;
+}
+
+static int mtk_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy);
+ u32 val;
+
+ if (opts->dp.set_rate) {
+ switch (opts->dp.link_rate) {
+ default:
+ dev_err(&phy->dev,
+ "Implementation error, unknown linkrate %x\n",
+ opts->dp.link_rate);
+ return -EINVAL;
+ case 1620:
+ val = BIT_RATE_RBR;
+ break;
+ case 2700:
+ val = BIT_RATE_HBR;
+ break;
+ case 5400:
+ val = BIT_RATE_HBR2;
+ break;
+ case 8100:
+ val = BIT_RATE_HBR3;
+ break;
+ }
+ regmap_write(dp_phy->regs, MTK_DP_PHY_DIG_BIT_RATE, val);
+ }
+
+ regmap_update_bits(dp_phy->regs, MTK_DP_PHY_DIG_PLL_CTL_1,
+ TPLL_SSC_EN, opts->dp.ssc ? TPLL_SSC_EN : 0);
+
+ return 0;
+}
+
+static int mtk_dp_phy_reset(struct phy *phy)
+{
+ struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy);
+
+ regmap_update_bits(dp_phy->regs, MTK_DP_PHY_DIG_SW_RST,
+ DP_GLB_SW_RST_PHYD, 0);
+ usleep_range(50, 200);
+ regmap_update_bits(dp_phy->regs, MTK_DP_PHY_DIG_SW_RST,
+ DP_GLB_SW_RST_PHYD, 1);
+
+ return 0;
+}
+
+static const struct phy_ops mtk_dp_phy_dev_ops = {
+ .init = mtk_dp_phy_init,
+ .configure = mtk_dp_phy_configure,
+ .reset = mtk_dp_phy_reset,
+ .owner = THIS_MODULE,
+};
+
+static int mtk_dp_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_dp_phy *dp_phy;
+ struct phy *phy;
+ struct regmap *regs;
+
+ regs = *(struct regmap **)dev->platform_data;
+ if (!regs)
+ return dev_err_probe(dev, EINVAL,
+ "No data passed, requires struct regmap**\n");
+
+ dp_phy = devm_kzalloc(dev, sizeof(*dp_phy), GFP_KERNEL);
+ if (!dp_phy)
+ return -ENOMEM;
+
+ dp_phy->regs = regs;
+ phy = devm_phy_create(dev, NULL, &mtk_dp_phy_dev_ops);
+ if (IS_ERR(phy))
+ return dev_err_probe(dev, PTR_ERR(phy),
+ "Failed to create DP PHY\n");
+
+ phy_set_drvdata(phy, dp_phy);
+ if (!dev->of_node)
+ phy_create_lookup(phy, "dp", dev_name(dev));
+
+ return 0;
+}
+
+static struct platform_driver mtk_dp_phy_driver = {
+ .probe = mtk_dp_phy_probe,
+ .driver = {
+ .name = "mediatek-dp-phy",
+ },
+};
+module_platform_driver(mtk_dp_phy_driver);
+
+MODULE_AUTHOR("Markus Schneider-Pargmann <msp@baylibre.com>");
+MODULE_DESCRIPTION("MediaTek DP PHY Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/mediatek/phy-mtk-pcie.c b/drivers/phy/mediatek/phy-mtk-pcie.c
new file mode 100644
index 000000000000..7f29d43442bf
--- /dev/null
+++ b/drivers/phy/mediatek/phy-mtk-pcie.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Jianjun Wang <jianjun.wang@mediatek.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "phy-mtk-io.h"
+
+#define PEXTP_ANA_GLB_00_REG 0x9000
+/* Internal Resistor Selection of TX Bias Current */
+#define EFUSE_GLB_INTR_SEL GENMASK(28, 24)
+
+#define PEXTP_ANA_LN0_TRX_REG 0xa000
+
+#define PEXTP_ANA_TX_REG 0x04
+/* TX PMOS impedance selection */
+#define EFUSE_LN_TX_PMOS_SEL GENMASK(5, 2)
+/* TX NMOS impedance selection */
+#define EFUSE_LN_TX_NMOS_SEL GENMASK(11, 8)
+
+#define PEXTP_ANA_RX_REG 0x3c
+/* RX impedance selection */
+#define EFUSE_LN_RX_SEL GENMASK(3, 0)
+
+#define PEXTP_ANA_LANE_OFFSET 0x100
+
+/**
+ * struct mtk_pcie_lane_efuse - eFuse data for each lane
+ * @tx_pmos: TX PMOS impedance selection data
+ * @tx_nmos: TX NMOS impedance selection data
+ * @rx_data: RX impedance selection data
+ * @lane_efuse_supported: software eFuse data is supported for this lane
+ */
+struct mtk_pcie_lane_efuse {
+ u32 tx_pmos;
+ u32 tx_nmos;
+ u32 rx_data;
+ bool lane_efuse_supported;
+};
+
+/**
+ * struct mtk_pcie_phy_data - phy data for each SoC
+ * @num_lanes: supported lane numbers
+ * @sw_efuse_supported: support software to load eFuse data
+ */
+struct mtk_pcie_phy_data {
+ int num_lanes;
+ bool sw_efuse_supported;
+};
+
+/**
+ * struct mtk_pcie_phy - PCIe phy driver main structure
+ * @dev: pointer to device
+ * @phy: pointer to generic phy
+ * @sif_base: IO mapped register base address of system interface
+ * @data: pointer to SoC dependent data
+ * @sw_efuse_en: software eFuse enable status
+ * @efuse_glb_intr: internal resistor selection of TX bias current data
+ * @efuse: pointer to eFuse data for each lane
+ */
+struct mtk_pcie_phy {
+ struct device *dev;
+ struct phy *phy;
+ void __iomem *sif_base;
+ const struct mtk_pcie_phy_data *data;
+
+ bool sw_efuse_en;
+ u32 efuse_glb_intr;
+ struct mtk_pcie_lane_efuse *efuse;
+};
+
+static void mtk_pcie_efuse_set_lane(struct mtk_pcie_phy *pcie_phy,
+ unsigned int lane)
+{
+ struct mtk_pcie_lane_efuse *data = &pcie_phy->efuse[lane];
+ void __iomem *addr;
+
+ if (!data->lane_efuse_supported)
+ return;
+
+ addr = pcie_phy->sif_base + PEXTP_ANA_LN0_TRX_REG +
+ lane * PEXTP_ANA_LANE_OFFSET;
+
+ mtk_phy_update_bits(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_PMOS_SEL,
+ FIELD_PREP(EFUSE_LN_TX_PMOS_SEL, data->tx_pmos));
+
+ mtk_phy_update_bits(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_NMOS_SEL,
+ FIELD_PREP(EFUSE_LN_TX_NMOS_SEL, data->tx_nmos));
+
+ mtk_phy_update_bits(addr + PEXTP_ANA_RX_REG, EFUSE_LN_RX_SEL,
+ FIELD_PREP(EFUSE_LN_RX_SEL, data->rx_data));
+}
+
+/**
+ * mtk_pcie_phy_init() - Initialize the phy
+ * @phy: the phy to be initialized
+ *
+ * Initialize the phy by setting the efuse data.
+ * The hardware settings will be reset during suspend, it should be
+ * reinitialized when the consumer calls phy_init() again on resume.
+ */
+static int mtk_pcie_phy_init(struct phy *phy)
+{
+ struct mtk_pcie_phy *pcie_phy = phy_get_drvdata(phy);
+ int i;
+
+ if (!pcie_phy->sw_efuse_en)
+ return 0;
+
+ /* Set global data */
+ mtk_phy_update_bits(pcie_phy->sif_base + PEXTP_ANA_GLB_00_REG,
+ EFUSE_GLB_INTR_SEL,
+ FIELD_PREP(EFUSE_GLB_INTR_SEL, pcie_phy->efuse_glb_intr));
+
+ for (i = 0; i < pcie_phy->data->num_lanes; i++)
+ mtk_pcie_efuse_set_lane(pcie_phy, i);
+
+ return 0;
+}
+
+static const struct phy_ops mtk_pcie_phy_ops = {
+ .init = mtk_pcie_phy_init,
+ .owner = THIS_MODULE,
+};
+
+static int mtk_pcie_efuse_read_for_lane(struct mtk_pcie_phy *pcie_phy,
+ unsigned int lane)
+{
+ struct mtk_pcie_lane_efuse *efuse = &pcie_phy->efuse[lane];
+ struct device *dev = pcie_phy->dev;
+ char efuse_id[16];
+ int ret;
+
+ snprintf(efuse_id, sizeof(efuse_id), "tx_ln%d_pmos", lane);
+ ret = nvmem_cell_read_variable_le_u32(dev, efuse_id, &efuse->tx_pmos);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read %s\n", efuse_id);
+
+ snprintf(efuse_id, sizeof(efuse_id), "tx_ln%d_nmos", lane);
+ ret = nvmem_cell_read_variable_le_u32(dev, efuse_id, &efuse->tx_nmos);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read %s\n", efuse_id);
+
+ snprintf(efuse_id, sizeof(efuse_id), "rx_ln%d", lane);
+ ret = nvmem_cell_read_variable_le_u32(dev, efuse_id, &efuse->rx_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read %s\n", efuse_id);
+
+ if (!(efuse->tx_pmos || efuse->tx_nmos || efuse->rx_data))
+ return dev_err_probe(dev, -EINVAL,
+ "No eFuse data found for lane%d, but dts enable it\n",
+ lane);
+
+ efuse->lane_efuse_supported = true;
+
+ return 0;
+}
+
+static int mtk_pcie_read_efuse(struct mtk_pcie_phy *pcie_phy)
+{
+ struct device *dev = pcie_phy->dev;
+ bool nvmem_enabled;
+ int ret, i;
+
+ /* nvmem data is optional */
+ nvmem_enabled = device_property_present(dev, "nvmem-cells");
+ if (!nvmem_enabled)
+ return 0;
+
+ ret = nvmem_cell_read_variable_le_u32(dev, "glb_intr",
+ &pcie_phy->efuse_glb_intr);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read glb_intr\n");
+
+ pcie_phy->sw_efuse_en = true;
+
+ pcie_phy->efuse = devm_kzalloc(dev, pcie_phy->data->num_lanes *
+ sizeof(*pcie_phy->efuse), GFP_KERNEL);
+ if (!pcie_phy->efuse)
+ return -ENOMEM;
+
+ for (i = 0; i < pcie_phy->data->num_lanes; i++) {
+ ret = mtk_pcie_efuse_read_for_lane(pcie_phy, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_pcie_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+ struct mtk_pcie_phy *pcie_phy;
+ int ret;
+
+ pcie_phy = devm_kzalloc(dev, sizeof(*pcie_phy), GFP_KERNEL);
+ if (!pcie_phy)
+ return -ENOMEM;
+
+ pcie_phy->sif_base = devm_platform_ioremap_resource_byname(pdev, "sif");
+ if (IS_ERR(pcie_phy->sif_base))
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->sif_base),
+ "Failed to map phy-sif base\n");
+
+ pcie_phy->phy = devm_phy_create(dev, dev->of_node, &mtk_pcie_phy_ops);
+ if (IS_ERR(pcie_phy->phy))
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->phy),
+ "Failed to create PCIe phy\n");
+
+ pcie_phy->dev = dev;
+ pcie_phy->data = of_device_get_match_data(dev);
+ if (!pcie_phy->data)
+ return dev_err_probe(dev, -EINVAL, "Failed to get phy data\n");
+
+ if (pcie_phy->data->sw_efuse_supported) {
+ /*
+ * Failed to read the efuse data is not a fatal problem,
+ * ignore the failure and keep going.
+ */
+ ret = mtk_pcie_read_efuse(pcie_phy);
+ if (ret == -EPROBE_DEFER || ret == -ENOMEM)
+ return ret;
+ }
+
+ phy_set_drvdata(pcie_phy->phy, pcie_phy);
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider))
+ return dev_err_probe(dev, PTR_ERR(provider),
+ "PCIe phy probe failed\n");
+
+ return 0;
+}
+
+static const struct mtk_pcie_phy_data mt8195_data = {
+ .num_lanes = 2,
+ .sw_efuse_supported = true,
+};
+
+static const struct of_device_id mtk_pcie_phy_of_match[] = {
+ { .compatible = "mediatek,mt8195-pcie-phy", .data = &mt8195_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mtk_pcie_phy_of_match);
+
+static struct platform_driver mtk_pcie_phy_driver = {
+ .probe = mtk_pcie_phy_probe,
+ .driver = {
+ .name = "mtk-pcie-phy",
+ .of_match_table = mtk_pcie_phy_of_match,
+ },
+};
+module_platform_driver(mtk_pcie_phy_driver);
+
+MODULE_DESCRIPTION("MediaTek PCIe PHY driver");
+MODULE_AUTHOR("Jianjun Wang <jianjun.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 6ee478bc5211..2f8210167b77 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Motorola CPCAP PMIC USB PHY driver
* Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
@@ -5,15 +6,6 @@
* Some parts based on earlier Motorola Linux kernel tree code in
* board-mapphone-usb.c and cpcap-usb-det.c:
* Copyright (C) 2007 - 2011 Motorola, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/atomic.h>
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index e9e3b1a4dbb0..65f6c30a3e93 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -5,7 +5,13 @@ obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o
obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
-obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
+obj-$(CONFIG_PHY_QCOM_QMP) += \
+ phy-qcom-qmp-combo.o \
+ phy-qcom-qmp-pcie.o \
+ phy-qcom-qmp-pcie-msm8996.o \
+ phy-qcom-qmp-ufs.o \
+ phy-qcom-qmp-usb.o
+
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c
index cacd32f6e0cc..7e3570789845 100644
--- a/drivers/phy/qualcomm/phy-qcom-edp.c
+++ b/drivers/phy/qualcomm/phy-qcom-edp.c
@@ -639,6 +639,18 @@ static int qcom_edp_phy_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = regulator_set_load(edp->supplies[0].consumer, 21800); /* 1.2 V vdda-phy */
+ if (ret) {
+ dev_err(dev, "failed to set load at %s\n", edp->supplies[0].supply);
+ return ret;
+ }
+
+ ret = regulator_set_load(edp->supplies[1].consumer, 36000); /* 0.9 V vdda-pll */
+ if (ret) {
+ dev_err(dev, "failed to set load at %s\n", edp->supplies[1].supply);
+ return ret;
+ }
+
ret = qcom_edp_clks_register(edp, pdev->dev.of_node);
if (ret)
return ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
new file mode 100644
index 000000000000..4b1828976104
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -0,0 +1,2621 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+#define PHYSTATUS_4_20 BIT(7)
+/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
+#define RCVR_DETECT BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+/* Define the assumed distance between lanes for underspecified device trees. */
+#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ bool in_layout;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = true, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* PCS_MISC registers */
+ QPHY_PCS_MISC_TYPEC_CTRL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_STATUS] = 0x174,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
+};
+
+static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x008,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x014,
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_rbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x6f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr2[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x8c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr3[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x2a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRANSCEIVER_BIAS_EN, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_INTERFACE_SELECT, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRAN_DRVR_EMP_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_INTERFACE_MODE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_BAND, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_POL_INV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_DRV_LVL, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_EMP_POST1_LVL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORE_CLK_EN, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_rbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x6f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr2[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x8c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr3[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x2a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_INTERFACE_SELECT, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_INTERFACE_MODE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_BAND, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_POL_INV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_DRV_LVL, 0x2a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_EMP_POST1_LVL, 0x20),
+};
+
+
+/* list of regulators */
+struct qmp_regulator_data {
+ const char *name;
+ unsigned int enable_load;
+};
+
+static struct qmp_regulator_data qmp_phy_vreg_l[] = {
+ { .name = "vdda-phy", .enable_load = 21800 },
+ { .name = "vdda-pll", .enable_load = 36000 },
+};
+
+struct qmp_phy;
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_usb_tbl;
+ int pcs_usb_tbl_num;
+
+ /* Init sequence for DP PHY block link rates */
+ const struct qmp_phy_init_tbl *serdes_tbl_rbr;
+ int serdes_tbl_rbr_num;
+ const struct qmp_phy_init_tbl *serdes_tbl_hbr;
+ int serdes_tbl_hbr_num;
+ const struct qmp_phy_init_tbl *serdes_tbl_hbr2;
+ int serdes_tbl_hbr2_num;
+ const struct qmp_phy_init_tbl *serdes_tbl_hbr3;
+ int serdes_tbl_hbr3_num;
+
+ /* DP PHY callbacks */
+ int (*configure_dp_phy)(struct qmp_phy *qphy);
+ void (*configure_dp_tx)(struct qmp_phy *qphy);
+ int (*calibrate_dp_phy)(struct qmp_phy *qphy);
+ void (*dp_aux_init)(struct qmp_phy *qphy);
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const struct qmp_regulator_data *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+ unsigned int phy_status;
+
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+ /* power_down delay in usec */
+ int pwrdn_delay_min;
+ int pwrdn_delay_max;
+
+ /* true, if PHY has a separate DP_COM control block */
+ bool has_phy_dp_com_ctrl;
+ /* true, if PHY has secondary tx/rx lanes to be configured */
+ bool is_dual_lane_phy;
+
+ /* Offset from PCS to PCS_USB region */
+ unsigned int pcs_usb_offset;
+
+};
+
+struct qmp_phy_combo_cfg {
+ const struct qmp_phy_cfg *usb_cfg;
+ const struct qmp_phy_cfg *dp_cfg;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @cfg: phy specific configuration
+ * @serdes: iomapped memory space for phy's serdes (i.e. PLL)
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
+ * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @pcs_usb: iomapped memory space for lane's pcs_usb
+ * @pipe_clk: pipe clock
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @lane_rst: lane's reset controller
+ * @mode: current PHY mode
+ * @dp_aux_cfg: Display port aux config
+ * @dp_opts: Display port optional config
+ * @dp_clks: Display port clocks
+ */
+struct qmp_phy {
+ struct phy *phy;
+ const struct qmp_phy_cfg *cfg;
+ void __iomem *serdes;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ void __iomem *tx2;
+ void __iomem *rx2;
+ void __iomem *pcs_misc;
+ void __iomem *pcs_usb;
+ struct clk *pipe_clk;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ struct reset_control *lane_rst;
+ enum phy_mode mode;
+ unsigned int dp_aux_cfg;
+ struct phy_configure_opts_dp dp_opts;
+ struct qmp_phy_dp_clks *dp_clks;
+};
+
+struct qmp_phy_dp_clks {
+ struct qmp_phy *qphy;
+ struct clk_hw dp_link_hw;
+ struct clk_hw dp_pixel_hw;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ * @dp_com: iomapped memory space for phy's dp_com control block
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @phys: array of per-lane phy descriptors
+ * @phy_mutex: mutex lock for PHY common block initialization
+ * @init_count: phy common block initialization count
+ * @ufs_reset: optional UFS PHY reset handle
+ */
+struct qcom_qmp {
+ struct device *dev;
+ void __iomem *dp_com;
+
+ struct clk_bulk_data *clks;
+ struct reset_control_bulk_data *resets;
+ struct regulator_bulk_data *vregs;
+
+ struct qmp_phy **phys;
+
+ struct mutex phy_mutex;
+ int init_count;
+
+ struct reset_control *ufs_reset;
+};
+
+static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy);
+static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy);
+static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy);
+static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy);
+
+static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy);
+static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy);
+static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy);
+static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy);
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const qmp_v3_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+static const char * const qmp_v4_phy_clk_l[] = {
+ "aux", "ref_clk_src", "ref", "com_aux",
+};
+
+/* the primary usb3 phy on sm8250 doesn't have a ref clock */
+static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
+ "aux", "ref_clk_src", "com_aux"
+};
+
+/* list of resets */
+static const char * const msm8996_usb3phy_reset_l[] = {
+ "phy", "common",
+};
+
+static const char * const sc7180_usb3phy_reset_l[] = {
+ "phy",
+};
+
+static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = sc7180_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
+ .type = PHY_TYPE_DP,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl),
+ .tx_tbl = qmp_v3_dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v3_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v3_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v3_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3),
+
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = sc7180_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+
+ .dp_aux_init = qcom_qmp_v3_phy_dp_aux_init,
+ .configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx,
+ .configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy,
+ .calibrate_dp_phy = qcom_qmp_v3_dp_phy_calibrate,
+};
+
+static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = {
+ .usb_cfg = &sc7180_usb3phy_cfg,
+ .dp_cfg = &sc7180_dpphy_cfg,
+};
+
+static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8150_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl),
+ .rx_tbl = sm8150_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl),
+ .pcs_tbl = sm8150_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sc8180x_dpphy_cfg = {
+ .type = PHY_TYPE_DP,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v4_dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
+ .tx_tbl = qmp_v4_dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = sc7180_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+
+ .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
+ .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
+ .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
+ .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate,
+};
+
+static const struct qmp_phy_combo_cfg sc8180x_usb3dpphy_cfg = {
+ .usb_cfg = &sm8150_usb3phy_cfg,
+ .dp_cfg = &sc8180x_dpphy_cfg,
+};
+
+static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8250_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl),
+ .rx_tbl = sm8250_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl),
+ .pcs_tbl = sm8250_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8250_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
+ .type = PHY_TYPE_DP,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v4_dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
+ .tx_tbl = qmp_v4_dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+
+ .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
+ .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
+ .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
+ .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate,
+};
+
+static const struct qmp_phy_combo_cfg sm8250_usb3dpphy_cfg = {
+ .usb_cfg = &sm8250_usb3phy_cfg,
+ .dp_cfg = &sm8250_dpphy_cfg,
+};
+
+static void qcom_qmp_phy_combo_configure_lane(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static void qcom_qmp_phy_combo_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qcom_qmp_phy_combo_configure_lane(base, regs, tbl, num, 0xff);
+}
+
+static int qcom_qmp_phy_combo_serdes_init(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+ int serdes_tbl_num = cfg->serdes_tbl_num;
+
+ qcom_qmp_phy_combo_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+
+ if (cfg->type == PHY_TYPE_DP) {
+ switch (dp_opts->link_rate) {
+ case 1620:
+ qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ cfg->serdes_tbl_rbr,
+ cfg->serdes_tbl_rbr_num);
+ break;
+ case 2700:
+ qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ cfg->serdes_tbl_hbr,
+ cfg->serdes_tbl_hbr_num);
+ break;
+ case 5400:
+ qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ cfg->serdes_tbl_hbr2,
+ cfg->serdes_tbl_hbr2_num);
+ break;
+ case 8100:
+ qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ cfg->serdes_tbl_hbr3,
+ cfg->serdes_tbl_hbr3_num);
+ break;
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy)
+{
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+
+ /* Turn on BIAS current for PHY/PLL */
+ writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX |
+ QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL,
+ qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
+
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_LANE_0_1_PWRDN |
+ DP_PHY_PD_CTL_LANE_2_3_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN |
+ DP_PHY_PD_CTL_DP_CLAMP_EN,
+ qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+
+ writel(QSERDES_V3_COM_BIAS_EN |
+ QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN |
+ QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL |
+ QSERDES_V3_COM_CLKBUF_RX_DRIVE_L,
+ qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
+
+ writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0);
+ writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+ writel(0x24, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+ writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3);
+ writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4);
+ writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5);
+ writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6);
+ writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7);
+ writel(0xbb, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8);
+ writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9);
+ qphy->dp_aux_cfg = 0;
+
+ writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
+ PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
+ PHY_AUX_REQ_ERR_MASK,
+ qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK);
+}
+
+static const u8 qmp_dp_v3_pre_emphasis_hbr3_hbr2[4][4] = {
+ { 0x00, 0x0c, 0x15, 0x1a },
+ { 0x02, 0x0e, 0x16, 0xff },
+ { 0x02, 0x11, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_voltage_swing_hbr3_hbr2[4][4] = {
+ { 0x02, 0x12, 0x16, 0x1a },
+ { 0x09, 0x19, 0x1f, 0xff },
+ { 0x10, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = {
+ { 0x00, 0x0c, 0x14, 0x19 },
+ { 0x00, 0x0b, 0x12, 0xff },
+ { 0x00, 0x0b, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
+ { 0x08, 0x0f, 0x16, 0x1f },
+ { 0x11, 0x1e, 0x1f, 0xff },
+ { 0x19, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static int qcom_qmp_phy_combo_configure_dp_swing(struct qmp_phy *qphy,
+ unsigned int drv_lvl_reg, unsigned int emp_post_reg)
+{
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ unsigned int v_level = 0, p_level = 0;
+ u8 voltage_swing_cfg, pre_emphasis_cfg;
+ int i;
+
+ for (i = 0; i < dp_opts->lanes; i++) {
+ v_level = max(v_level, dp_opts->voltage[i]);
+ p_level = max(p_level, dp_opts->pre[i]);
+ }
+
+ if (dp_opts->link_rate <= 2700) {
+ voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level];
+ pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level];
+ } else {
+ voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr3_hbr2[v_level][p_level];
+ pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr3_hbr2[v_level][p_level];
+ }
+
+ /* TODO: Move check to config check */
+ if (voltage_swing_cfg == 0xFF && pre_emphasis_cfg == 0xFF)
+ return -EINVAL;
+
+ /* Enable MUX to use Cursor values from these registers */
+ voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN;
+ pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN;
+
+ writel(voltage_swing_cfg, qphy->tx + drv_lvl_reg);
+ writel(pre_emphasis_cfg, qphy->tx + emp_post_reg);
+ writel(voltage_swing_cfg, qphy->tx2 + drv_lvl_reg);
+ writel(pre_emphasis_cfg, qphy->tx2 + emp_post_reg);
+
+ return 0;
+}
+
+static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy)
+{
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 bias_en, drvr_en;
+
+ if (qcom_qmp_phy_combo_configure_dp_swing(qphy,
+ QSERDES_V3_TX_TX_DRV_LVL,
+ QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0)
+ return;
+
+ if (dp_opts->lanes == 1) {
+ bias_en = 0x3e;
+ drvr_en = 0x13;
+ } else {
+ bias_en = 0x3f;
+ drvr_en = 0x10;
+ }
+
+ writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN);
+ writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN);
+ writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
+}
+
+static bool qcom_qmp_phy_combo_configure_dp_mode(struct qmp_phy *qphy)
+{
+ u32 val;
+ bool reverse = false;
+
+ val = DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN;
+
+ /*
+ * TODO: Assume orientation is CC1 for now and two lanes, need to
+ * use type-c connector to understand orientation and lanes.
+ *
+ * Otherwise val changes to be like below if this code understood
+ * the orientation of the type-c cable.
+ *
+ * if (lane_cnt == 4 || orientation == ORIENTATION_CC2)
+ * val |= DP_PHY_PD_CTL_LANE_0_1_PWRDN;
+ * if (lane_cnt == 4 || orientation == ORIENTATION_CC1)
+ * val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
+ * if (orientation == ORIENTATION_CC2)
+ * writel(0x4c, qphy->pcs + QSERDES_V3_DP_PHY_MODE);
+ */
+ val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
+ writel(val, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+
+ writel(0x5c, qphy->pcs + QSERDES_DP_PHY_MODE);
+
+ return reverse;
+}
+
+static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 phy_vco_div, status;
+ unsigned long pixel_freq;
+
+ qcom_qmp_phy_combo_configure_dp_mode(qphy);
+
+ writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
+ writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ phy_vco_div = 0x1;
+ pixel_freq = 1620000000UL / 2;
+ break;
+ case 2700:
+ phy_vco_div = 0x1;
+ pixel_freq = 2700000000UL / 2;
+ break;
+ case 5400:
+ phy_vco_div = 0x2;
+ pixel_freq = 5400000000UL / 4;
+ break;
+ case 8100:
+ phy_vco_div = 0x0;
+ pixel_freq = 8100000000UL / 6;
+ break;
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+ writel(phy_vco_div, qphy->pcs + QSERDES_V3_DP_PHY_VCO_DIV);
+
+ clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000);
+ clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq);
+
+ writel(0x04, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+ writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ writel(0x20, qphy->serdes + QSERDES_V3_COM_RESETSM_CNTRL);
+
+ if (readl_poll_timeout(qphy->serdes + QSERDES_V3_COM_C_READY_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
+ udelay(2000);
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ return readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000);
+}
+
+/*
+ * We need to calibrate the aux setting here as many times
+ * as the caller tries
+ */
+static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy)
+{
+ static const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d };
+ u8 val;
+
+ qphy->dp_aux_cfg++;
+ qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings);
+ val = cfg1_settings[qphy->dp_aux_cfg];
+
+ writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+
+ return 0;
+}
+
+static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy)
+{
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+
+ /* Turn on BIAS current for PHY/PLL */
+ writel(0x17, qphy->serdes + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
+
+ writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0);
+ writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+ writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+ writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3);
+ writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4);
+ writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5);
+ writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6);
+ writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7);
+ writel(0xb7, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8);
+ writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9);
+ qphy->dp_aux_cfg = 0;
+
+ writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
+ PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
+ PHY_AUX_REQ_ERR_MASK,
+ qphy->pcs + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK);
+}
+
+static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy)
+{
+ /* Program default values before writing proper values */
+ writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL);
+ writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL);
+
+ writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+
+ qcom_qmp_phy_combo_configure_dp_swing(qphy,
+ QSERDES_V4_TX_TX_DRV_LVL,
+ QSERDES_V4_TX_TX_EMP_POST1_LVL);
+}
+
+static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 phy_vco_div, status;
+ unsigned long pixel_freq;
+ u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+ bool reverse;
+
+ writel(0x0f, qphy->pcs + QSERDES_V4_DP_PHY_CFG_1);
+
+ reverse = qcom_qmp_phy_combo_configure_dp_mode(qphy);
+
+ writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+ writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+
+ writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL);
+ writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL);
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ phy_vco_div = 0x1;
+ pixel_freq = 1620000000UL / 2;
+ break;
+ case 2700:
+ phy_vco_div = 0x1;
+ pixel_freq = 2700000000UL / 2;
+ break;
+ case 5400:
+ phy_vco_div = 0x2;
+ pixel_freq = 5400000000UL / 4;
+ break;
+ case 8100:
+ phy_vco_div = 0x0;
+ pixel_freq = 8100000000UL / 6;
+ break;
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+ writel(phy_vco_div, qphy->pcs + QSERDES_V4_DP_PHY_VCO_DIV);
+
+ clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000);
+ clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq);
+
+ writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+ writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ writel(0x20, qphy->serdes + QSERDES_V4_COM_RESETSM_CNTRL);
+
+ if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_C_READY_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ /*
+ * At least for 7nm DP PHY this has to be done after enabling link
+ * clock.
+ */
+
+ if (dp_opts->lanes == 1) {
+ bias0_en = reverse ? 0x3e : 0x15;
+ bias1_en = reverse ? 0x15 : 0x3e;
+ drvr0_en = reverse ? 0x13 : 0x10;
+ drvr1_en = reverse ? 0x10 : 0x13;
+ } else if (dp_opts->lanes == 2) {
+ bias0_en = reverse ? 0x3f : 0x15;
+ bias1_en = reverse ? 0x15 : 0x3f;
+ drvr0_en = 0x10;
+ drvr1_en = 0x10;
+ } else {
+ bias0_en = 0x3f;
+ bias1_en = 0x3f;
+ drvr0_en = 0x10;
+ drvr1_en = 0x10;
+ }
+
+ writel(drvr0_en, qphy->tx + QSERDES_V4_TX_HIGHZ_DRVR_EN);
+ writel(bias0_en, qphy->tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr1_en, qphy->tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN);
+ writel(bias1_en, qphy->tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
+
+ writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
+ udelay(2000);
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x0a, qphy->tx + QSERDES_V4_TX_TX_POL_INV);
+ writel(0x0a, qphy->tx2 + QSERDES_V4_TX_TX_POL_INV);
+
+ writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL);
+ writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL);
+
+ writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+
+ return 0;
+}
+
+/*
+ * We need to calibrate the aux setting here as many times
+ * as the caller tries
+ */
+static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy)
+{
+ static const u8 cfg1_settings[] = { 0x20, 0x13, 0x23, 0x1d };
+ u8 val;
+
+ qphy->dp_aux_cfg++;
+ qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings);
+ val = cfg1_settings[qphy->dp_aux_cfg];
+
+ writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+
+ return 0;
+}
+
+static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ const struct phy_configure_opts_dp *dp_opts = &opts->dp;
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts));
+ if (qphy->dp_opts.set_voltages) {
+ cfg->configure_dp_tx(qphy);
+ qphy->dp_opts.set_voltages = 0;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ if (cfg->calibrate_dp_phy)
+ return cfg->calibrate_dp_phy(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_combo_com_init(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *dp_com = qmp->dp_com;
+ int ret;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (qmp->init_count++) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ goto err_unlock;
+ }
+
+ ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset assert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset deassert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ goto err_assert_reset;
+
+ if (cfg->has_phy_dp_com_ctrl) {
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
+ SW_PWRDN);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ /* Default type-c orientation, i.e CC1 */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
+ USB3_MODE | DP_MODE);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+ }
+
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
+ qphy_setbits(pcs,
+ cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ else
+ qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+
+err_assert_reset:
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+err_disable_regulators:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+err_unlock:
+ mutex_unlock(&qmp->phy_mutex);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_combo_com_exit(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (--qmp->init_count) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ reset_control_assert(qmp->ufs_reset);
+
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_combo_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ int ret;
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ ret = qcom_qmp_phy_combo_com_init(qphy);
+ if (ret)
+ return ret;
+
+ if (cfg->type == PHY_TYPE_DP)
+ cfg->dp_aux_init(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_combo_power_on(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *status;
+ unsigned int mask, val, ready;
+ int ret;
+
+ qcom_qmp_phy_combo_serdes_init(qphy);
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ return ret;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_combo_configure_lane(tx, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 1);
+
+ /* Configuration for other LANE for USB-DP combo PHY */
+ if (cfg->is_dual_lane_phy) {
+ qcom_qmp_phy_combo_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ }
+
+ /* Configure special DP tx tunings */
+ if (cfg->type == PHY_TYPE_DP)
+ cfg->configure_dp_tx(qphy);
+
+ qcom_qmp_phy_combo_configure_lane(rx, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 1);
+
+ if (cfg->is_dual_lane_phy) {
+ qcom_qmp_phy_combo_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ }
+
+ /* Configure link rate, swing, etc. */
+ if (cfg->type == PHY_TYPE_DP) {
+ cfg->configure_dp_phy(qphy);
+ } else {
+ qcom_qmp_phy_combo_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ }
+
+ ret = reset_control_deassert(qmp->ufs_reset);
+ if (ret)
+ goto err_disable_pipe_clk;
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
+
+ if (cfg->type != PHY_TYPE_DP) {
+ /* Pull PHY out of reset state */
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ mask = cfg->phy_status;
+ ready = 0;
+
+ ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_disable_pipe_clk;
+ }
+ }
+ return 0;
+
+err_disable_pipe_clk:
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_combo_power_off(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ if (cfg->type == PHY_TYPE_DP) {
+ /* Assert DP PHY power down */
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+ } else {
+ /* PHY reset */
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ } else {
+ qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_combo_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ qcom_qmp_phy_combo_com_exit(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_combo_enable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_combo_init(phy);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_combo_power_on(phy);
+ if (ret)
+ qcom_qmp_phy_combo_exit(phy);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_combo_disable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_combo_power_off(phy);
+ if (ret)
+ return ret;
+ return qcom_qmp_phy_combo_exit(phy);
+}
+
+static int qcom_qmp_phy_combo_set_mode(struct phy *phy,
+ enum phy_mode mode, int submode)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ qphy->mode = mode;
+
+ return 0;
+}
+
+static void qcom_qmp_phy_combo_enable_autonomous_mode(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+ u32 intr_mask;
+
+ if (qphy->mode == PHY_MODE_USB_HOST_SS ||
+ qphy->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (pcs_misc)
+ qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+}
+
+static void qcom_qmp_phy_combo_disable_autonomous_mode(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs_usb;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (pcs_misc)
+ qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qcom_qmp_phy_combo_runtime_suspend(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode);
+
+ /* Supported only for USB3 PHY and luckily USB3 is the first phy */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qcom_qmp_phy_combo_enable_autonomous_mode(qphy);
+
+ clk_disable_unprepare(qphy->pipe_clk);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qmp_phy_combo_runtime_resume(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode);
+
+ /* Supported only for USB3 PHY and luckily USB3 is the first phy */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qmp->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qcom_qmp_phy_combo_disable_autonomous_mode(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_combo_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_vregs;
+ int ret, i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = cfg->vreg_list[i].name;
+
+ ret = devm_regulator_bulk_get(dev, num, qmp->vregs);
+ if (ret) {
+ dev_err(dev, "failed at devm_regulator_bulk_get\n");
+ return ret;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = regulator_set_load(qmp->vregs[i].consumer,
+ cfg->vreg_list[i].enable_load);
+ if (ret) {
+ dev_err(dev, "failed to set load at %s\n",
+ qmp->vregs[i].supply);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_combo_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int i;
+ int ret;
+
+ qmp->resets = devm_kcalloc(dev, cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->num_resets; i++)
+ qmp->resets[i].id = cfg->reset_list[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get resets\n");
+
+ return 0;
+}
+
+static int qcom_qmp_phy_combo_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+static void phy_clk_release_provider(void *res)
+{
+ of_clk_del_provider(res);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
+ return ret;
+ }
+
+ fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -ENOMEM;
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
+}
+
+/*
+ * Display Port PLL driver block diagram for branch clocks
+ *
+ * +------------------------------+
+ * | DP_VCO_CLK |
+ * | |
+ * | +-------------------+ |
+ * | | (DP PLL/VCO) | |
+ * | +---------+---------+ |
+ * | v |
+ * | +----------+-----------+ |
+ * | | hsclk_divsel_clk_src | |
+ * | +----------+-----------+ |
+ * +------------------------------+
+ * |
+ * +---------<---------v------------>----------+
+ * | |
+ * +--------v----------------+ |
+ * | dp_phy_pll_link_clk | |
+ * | link_clk | |
+ * +--------+----------------+ |
+ * | |
+ * | |
+ * v v
+ * Input to DISPCC block |
+ * for link clk, crypto clk |
+ * and interface clock |
+ * |
+ * |
+ * +--------<------------+-----------------+---<---+
+ * | | |
+ * +----v---------+ +--------v-----+ +--------v------+
+ * | vco_divided | | vco_divided | | vco_divided |
+ * | _clk_src | | _clk_src | | _clk_src |
+ * | | | | | |
+ * |divsel_six | | divsel_two | | divsel_four |
+ * +-------+------+ +-----+--------+ +--------+------+
+ * | | |
+ * v---->----------v-------------<------v
+ * |
+ * +----------+-----------------+
+ * | dp_phy_pll_vco_div_clk |
+ * +---------+------------------+
+ * |
+ * v
+ * Input to DISPCC block
+ * for DP pixel clock
+ *
+ */
+static int qcom_qmp_dp_pixel_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ switch (req->rate) {
+ case 1620000000UL / 2:
+ case 2700000000UL / 2:
+ /* 5.4 and 8.1 GHz are same link rate as 2.7GHz, i.e. div 4 and div 6 */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long
+qcom_qmp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ const struct qmp_phy_dp_clks *dp_clks;
+ const struct qmp_phy *qphy;
+ const struct phy_configure_opts_dp *dp_opts;
+
+ dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_pixel_hw);
+ qphy = dp_clks->qphy;
+ dp_opts = &qphy->dp_opts;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ return 1620000000UL / 2;
+ case 2700:
+ return 2700000000UL / 2;
+ case 5400:
+ return 5400000000UL / 4;
+ case 8100:
+ return 8100000000UL / 6;
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops qcom_qmp_dp_pixel_clk_ops = {
+ .determine_rate = qcom_qmp_dp_pixel_clk_determine_rate,
+ .recalc_rate = qcom_qmp_dp_pixel_clk_recalc_rate,
+};
+
+static int qcom_qmp_dp_link_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ switch (req->rate) {
+ case 162000000:
+ case 270000000:
+ case 540000000:
+ case 810000000:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long
+qcom_qmp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ const struct qmp_phy_dp_clks *dp_clks;
+ const struct qmp_phy *qphy;
+ const struct phy_configure_opts_dp *dp_opts;
+
+ dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_link_hw);
+ qphy = dp_clks->qphy;
+ dp_opts = &qphy->dp_opts;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ case 2700:
+ case 5400:
+ case 8100:
+ return dp_opts->link_rate * 100000;
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops qcom_qmp_dp_link_clk_ops = {
+ .determine_rate = qcom_qmp_dp_link_clk_determine_rate,
+ .recalc_rate = qcom_qmp_dp_link_clk_recalc_rate,
+};
+
+static struct clk_hw *
+qcom_qmp_dp_clks_hw_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct qmp_phy_dp_clks *dp_clks = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= 2) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (idx == 0)
+ return &dp_clks->dp_link_hw;
+
+ return &dp_clks->dp_pixel_hw;
+}
+
+static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy,
+ struct device_node *np)
+{
+ struct clk_init_data init = { };
+ struct qmp_phy_dp_clks *dp_clks;
+ char name[64];
+ int ret;
+
+ dp_clks = devm_kzalloc(qmp->dev, sizeof(*dp_clks), GFP_KERNEL);
+ if (!dp_clks)
+ return -ENOMEM;
+
+ dp_clks->qphy = qphy;
+ qphy->dp_clks = dp_clks;
+
+ snprintf(name, sizeof(name), "%s::link_clk", dev_name(qmp->dev));
+ init.ops = &qcom_qmp_dp_link_clk_ops;
+ init.name = name;
+ dp_clks->dp_link_hw.init = &init;
+ ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_link_hw);
+ if (ret)
+ return ret;
+
+ snprintf(name, sizeof(name), "%s::vco_div_clk", dev_name(qmp->dev));
+ init.ops = &qcom_qmp_dp_pixel_clk_ops;
+ init.name = name;
+ dp_clks->dp_pixel_hw.init = &init;
+ ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_pixel_hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, qcom_qmp_dp_clks_hw_get, dp_clks);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
+}
+
+static const struct phy_ops qcom_qmp_phy_combo_usb_ops = {
+ .init = qcom_qmp_phy_combo_enable,
+ .exit = qcom_qmp_phy_combo_disable,
+ .set_mode = qcom_qmp_phy_combo_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static const struct phy_ops qcom_qmp_phy_combo_dp_ops = {
+ .init = qcom_qmp_phy_combo_init,
+ .configure = qcom_qmp_dp_phy_configure,
+ .power_on = qcom_qmp_phy_combo_power_on,
+ .calibrate = qcom_qmp_dp_phy_calibrate,
+ .power_off = qcom_qmp_phy_combo_power_off,
+ .exit = qcom_qmp_phy_combo_exit,
+ .set_mode = qcom_qmp_phy_combo_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static
+int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id,
+ void __iomem *serdes, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ const struct phy_ops *ops;
+ char prop_name[MAX_PROP_NAME];
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->cfg = cfg;
+ qphy->serdes = serdes;
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
+ * For single lane PHYs: pcs_misc (optional) -> 3.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (!qphy->tx)
+ return -ENOMEM;
+
+ qphy->rx = of_iomap(np, 1);
+ if (!qphy->rx)
+ return -ENOMEM;
+
+ qphy->pcs = of_iomap(np, 2);
+ if (!qphy->pcs)
+ return -ENOMEM;
+
+ if (cfg->pcs_usb_offset)
+ qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset;
+
+ /*
+ * If this is a dual-lane PHY, then there should be registers for the
+ * second lane. Some old device trees did not specify this, so fall
+ * back to old legacy behavior of assuming they can be reached at an
+ * offset from the first lane.
+ */
+ if (cfg->is_dual_lane_phy) {
+ qphy->tx2 = of_iomap(np, 3);
+ qphy->rx2 = of_iomap(np, 4);
+ if (!qphy->tx2 || !qphy->rx2) {
+ dev_warn(dev,
+ "Underspecified device tree, falling back to legacy register regions\n");
+
+ /* In the old version, pcs_misc is at index 3. */
+ qphy->pcs_misc = qphy->tx2;
+ qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
+ qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
+
+ } else {
+ qphy->pcs_misc = of_iomap(np, 5);
+ }
+
+ } else {
+ qphy->pcs_misc = of_iomap(np, 3);
+ }
+
+ if (!qphy->pcs_misc)
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+
+ /*
+ * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
+ * based phys, so they essentially have pipe clock. So,
+ * we return error in case phy is USB3 or PIPE type.
+ * Otherwise, we initialize pipe clock to NULL for
+ * all phys that don't need this.
+ */
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ if (cfg->type == PHY_TYPE_USB3) {
+ ret = PTR_ERR(qphy->pipe_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get lane%d pipe_clk, %d\n",
+ id, ret);
+ return ret;
+ }
+ qphy->pipe_clk = NULL;
+ }
+
+ if (cfg->type == PHY_TYPE_DP)
+ ops = &qcom_qmp_phy_combo_dp_ops;
+ else
+ ops = &qcom_qmp_phy_combo_usb_ops;
+
+ generic_phy = devm_phy_create(dev, np, ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = {
+ {
+ .compatible = "qcom,sc7180-qmp-usb3-dp-phy",
+ .data = &sc7180_usb3dpphy_cfg,
+ },
+ {
+ .compatible = "qcom,sm8250-qmp-usb3-dp-phy",
+ .data = &sm8250_usb3dpphy_cfg,
+ },
+ {
+ .compatible = "qcom,sc8180x-qmp-usb3-dp-phy",
+ .data = &sc8180x_usb3dpphy_cfg,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_combo_phy_of_match_table);
+
+static const struct dev_pm_ops qcom_qmp_phy_combo_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_qmp_phy_combo_runtime_suspend,
+ qcom_qmp_phy_combo_runtime_resume, NULL)
+};
+
+static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *serdes;
+ void __iomem *usb_serdes;
+ void __iomem *dp_serdes = NULL;
+ const struct qmp_phy_combo_cfg *combo_cfg = NULL;
+ const struct qmp_phy_cfg *cfg = NULL;
+ const struct qmp_phy_cfg *usb_cfg = NULL;
+ const struct qmp_phy_cfg *dp_cfg = NULL;
+ int num, id, expected_phys;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ /* Get the specific init parameters of QMP phy */
+ combo_cfg = of_device_get_match_data(dev);
+ if (!combo_cfg)
+ return -EINVAL;
+
+ usb_cfg = combo_cfg->usb_cfg;
+ cfg = usb_cfg; /* Setup clks and regulators */
+
+ /* per PHY serdes; usually located at base address */
+ usb_serdes = serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(serdes))
+ return PTR_ERR(serdes);
+
+ /* per PHY dp_com; if PHY has dp_com control block */
+ if (cfg->has_phy_dp_com_ctrl) {
+ qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(qmp->dp_com))
+ return PTR_ERR(qmp->dp_com);
+ }
+
+ /* Only two serdes for combo PHY */
+ dp_serdes = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(dp_serdes))
+ return PTR_ERR(dp_serdes);
+
+ dp_cfg = combo_cfg->dp_cfg;
+ expected_phys = 2;
+
+ mutex_init(&qmp->phy_mutex);
+
+ ret = qcom_qmp_phy_combo_clk_init(dev, cfg);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_combo_reset_init(dev, cfg);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_combo_vreg_init(dev, cfg);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > expected_phys)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ id = 0;
+ for_each_available_child_of_node(dev->of_node, child) {
+ if (of_node_name_eq(child, "dp-phy")) {
+ cfg = dp_cfg;
+ serdes = dp_serdes;
+
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_combo_create(dev, child, id, serdes, cfg);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ goto err_node_put;
+ }
+
+ ret = phy_dp_clks_register(qmp, qmp->phys[id], child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register DP clock source\n");
+ goto err_node_put;
+ }
+ } else if (of_node_name_eq(child, "usb3-phy")) {
+ cfg = usb_cfg;
+ serdes = usb_serdes;
+
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_combo_create(dev, child, id, serdes, cfg);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ goto err_node_put;
+ }
+
+ /*
+ * Register the pipe clock provided by phy.
+ * See function description to see details of this pipe clock.
+ */
+ ret = phy_pipe_clk_register(qmp, child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ goto err_node_put;
+ }
+ }
+
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+ else
+ pm_runtime_disable(dev);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ pm_runtime_disable(dev);
+ of_node_put(child);
+ return ret;
+}
+
+static struct platform_driver qcom_qmp_phy_combo_driver = {
+ .probe = qcom_qmp_phy_combo_probe,
+ .driver = {
+ .name = "qcom-qmp-combo-phy",
+ .pm = &qcom_qmp_phy_combo_pm_ops,
+ .of_match_table = qcom_qmp_combo_phy_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_combo_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP USB+DP combo PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
new file mode 100644
index 000000000000..be6a94439b6c
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -0,0 +1,1054 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+#define PHYSTATUS_4_20 BIT(7)
+/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
+#define RCVR_DETECT BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+/* Define the assumed distance between lanes for underspecified device trees. */
+#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ bool in_layout;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = true, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* PCS_MISC registers */
+ QPHY_PCS_MISC_TYPEC_CTRL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
+};
+
+static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_COM_SW_RESET] = 0x400,
+ [QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
+ [QPHY_COM_START_CONTROL] = 0x408,
+ [QPHY_COM_PCS_READY_STATUS] = 0x448,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_STATUS] = 0x174,
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_IDLE_DTCT_CNTRL, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME, 0x05),
+
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_DOWN_CONTROL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG4, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG1, 0xa3),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
+};
+
+struct qmp_phy;
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *serdes_tbl_sec;
+ int serdes_tbl_num_sec;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl_sec;
+ int tx_tbl_num_sec;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl_sec;
+ int rx_tbl_num_sec;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl_sec;
+ int pcs_tbl_num_sec;
+ const struct qmp_phy_init_tbl *pcs_misc_tbl;
+ int pcs_misc_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_misc_tbl_sec;
+ int pcs_misc_tbl_num_sec;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ unsigned int mask_com_pcs_ready;
+ /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+ unsigned int phy_status;
+
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+ /* power_down delay in usec */
+ int pwrdn_delay_min;
+ int pwrdn_delay_max;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @cfg: phy specific configuration
+ * @serdes: iomapped memory space for phy's serdes (i.e. PLL)
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @pipe_clk: pipe clock
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @lane_rst: lane's reset controller
+ * @mode: current PHY mode
+ */
+struct qmp_phy {
+ struct phy *phy;
+ const struct qmp_phy_cfg *cfg;
+ void __iomem *serdes;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ void __iomem *pcs_misc;
+ struct clk *pipe_clk;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ struct reset_control *lane_rst;
+ enum phy_mode mode;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @phys: array of per-lane phy descriptors
+ * @phy_mutex: mutex lock for PHY common block initialization
+ * @init_count: phy common block initialization count
+ */
+struct qcom_qmp {
+ struct device *dev;
+
+ struct clk_bulk_data *clks;
+ struct reset_control_bulk_data *resets;
+ struct regulator_bulk_data *vregs;
+
+ struct qmp_phy **phys;
+
+ struct mutex phy_mutex;
+ int init_count;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const msm8996_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref",
+};
+
+/* list of resets */
+static const char * const msm8996_pciephy_reset_l[] = {
+ "phy", "common", "cfg",
+};
+
+/* list of regulators */
+static const char * const qmp_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 3,
+
+ .serdes_tbl = msm8996_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl),
+ .tx_tbl = msm8996_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_pcie_tx_tbl),
+ .rx_tbl = msm8996_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_pcie_rx_tbl),
+ .pcs_tbl = msm8996_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_pcie_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | PLL_READY_GATE_EN,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_com_pcs_ready = PCS_READY,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static void qcom_qmp_phy_pcie_msm8996_configure_lane(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static void qcom_qmp_phy_pcie_msm8996_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qcom_qmp_phy_pcie_msm8996_configure_lane(base, regs, tbl, num, 0xff);
+}
+
+static int qcom_qmp_phy_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
+ const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+ int serdes_tbl_num = cfg->serdes_tbl_num;
+ void __iomem *status;
+ unsigned int mask, val;
+ int ret;
+
+ qcom_qmp_phy_pcie_msm8996_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ if (cfg->serdes_tbl_sec)
+ qcom_qmp_phy_pcie_msm8996_configure(serdes, cfg->regs, cfg->serdes_tbl_sec,
+ cfg->serdes_tbl_num_sec);
+
+
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+
+ status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
+ mask = cfg->mask_com_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, (val & mask), 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev,
+ "phy common block init timed-out\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_com_init(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
+ int ret;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (qmp->init_count++) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ goto err_unlock;
+ }
+
+ ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset assert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset deassert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ goto err_assert_reset;
+
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+
+err_assert_reset:
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+err_disable_regulators:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+err_unlock:
+ mutex_unlock(&qmp->phy_mutex);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_com_exit(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (--qmp->init_count) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ int ret;
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ ret = qcom_qmp_phy_pcie_msm8996_com_init(qphy);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_power_on(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+ void __iomem *status;
+ unsigned int mask, val, ready;
+ int ret;
+
+ qcom_qmp_phy_pcie_msm8996_serdes_init(qphy);
+
+ ret = reset_control_deassert(qphy->lane_rst);
+ if (ret) {
+ dev_err(qmp->dev, "lane%d reset deassert failed\n",
+ qphy->index);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ goto err_reset_lane;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_pcie_msm8996_configure_lane(tx, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ if (cfg->tx_tbl_sec)
+ qcom_qmp_phy_pcie_msm8996_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec,
+ cfg->tx_tbl_num_sec, 1);
+
+ qcom_qmp_phy_pcie_msm8996_configure_lane(rx, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ if (cfg->rx_tbl_sec)
+ qcom_qmp_phy_pcie_msm8996_configure_lane(rx, cfg->regs,
+ cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
+
+ qcom_qmp_phy_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ if (cfg->pcs_tbl_sec)
+ qcom_qmp_phy_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl_sec,
+ cfg->pcs_tbl_num_sec);
+
+ qcom_qmp_phy_pcie_msm8996_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
+ cfg->pcs_misc_tbl_num);
+ if (cfg->pcs_misc_tbl_sec)
+ qcom_qmp_phy_pcie_msm8996_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec,
+ cfg->pcs_misc_tbl_num_sec);
+
+ /*
+ * Pull out PHY from POWER DOWN state.
+ * This is active low enable signal to power-down PHY.
+ */
+ qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ mask = cfg->phy_status;
+ ready = 0;
+
+ ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_disable_pipe_clk;
+ }
+
+ return 0;
+
+err_disable_pipe_clk:
+ clk_disable_unprepare(qphy->pipe_clk);
+err_reset_lane:
+ reset_control_assert(qphy->lane_rst);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_power_off(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ /* PHY reset */
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ } else {
+ qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ reset_control_assert(qphy->lane_rst);
+
+ qcom_qmp_phy_pcie_msm8996_com_exit(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_enable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_pcie_msm8996_init(phy);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_pcie_msm8996_power_on(phy);
+ if (ret)
+ qcom_qmp_phy_pcie_msm8996_exit(phy);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_disable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_pcie_msm8996_power_off(phy);
+ if (ret)
+ return ret;
+ return qcom_qmp_phy_pcie_msm8996_exit(phy);
+}
+
+static int qcom_qmp_phy_pcie_msm8996_set_mode(struct phy *phy,
+ enum phy_mode mode, int submode)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ qphy->mode = mode;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qcom_qmp_phy_pcie_msm8996_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int i;
+ int ret;
+
+ qmp->resets = devm_kcalloc(dev, cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->num_resets; i++)
+ qmp->resets[i].id = cfg->reset_list[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get resets\n");
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_msm8996_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+static void phy_clk_release_provider(void *res)
+{
+ of_clk_del_provider(res);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
+ return ret;
+ }
+
+ fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -ENOMEM;
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
+}
+
+static const struct phy_ops qcom_qmp_phy_pcie_msm8996_ops = {
+ .power_on = qcom_qmp_phy_pcie_msm8996_enable,
+ .power_off = qcom_qmp_phy_pcie_msm8996_disable,
+ .set_mode = qcom_qmp_phy_pcie_msm8996_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static void qcom_qmp_reset_control_put(void *data)
+{
+ reset_control_put(data);
+}
+
+static
+int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np, int id,
+ void __iomem *serdes, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ char prop_name[MAX_PROP_NAME];
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->cfg = cfg;
+ qphy->serdes = serdes;
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
+ * For single lane PHYs: pcs_misc (optional) -> 3.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (!qphy->tx)
+ return -ENOMEM;
+
+ qphy->rx = of_iomap(np, 1);
+ if (!qphy->rx)
+ return -ENOMEM;
+
+ qphy->pcs = of_iomap(np, 2);
+ if (!qphy->pcs)
+ return -ENOMEM;
+
+ qphy->pcs_misc = of_iomap(np, 3);
+
+ if (!qphy->pcs_misc)
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
+ "failed to get lane%d pipe clock\n", id);
+ }
+
+ /* Get lane reset, if any */
+ snprintf(prop_name, sizeof(prop_name), "lane%d", id);
+ qphy->lane_rst = of_reset_control_get_exclusive(np, prop_name);
+ if (IS_ERR(qphy->lane_rst)) {
+ dev_err(dev, "failed to get lane%d reset\n", id);
+ return PTR_ERR(qphy->lane_rst);
+ }
+ ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
+ qphy->lane_rst);
+ if (ret)
+ return ret;
+
+ generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_pcie_msm8996_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_phy_pcie_msm8996_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8996-qmp-pcie-phy",
+ .data = &msm8996_pciephy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_phy_pcie_msm8996_of_match_table);
+
+static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *serdes;
+ const struct qmp_phy_cfg *cfg = NULL;
+ int num, id, expected_phys;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ /* Get the specific init parameters of QMP phy */
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
+ return -EINVAL;
+
+ /* per PHY serdes; usually located at base address */
+ serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(serdes))
+ return PTR_ERR(serdes);
+
+ expected_phys = cfg->nlanes;
+
+ mutex_init(&qmp->phy_mutex);
+
+ ret = qcom_qmp_phy_pcie_msm8996_clk_init(dev, cfg);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_pcie_msm8996_reset_init(dev, cfg);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_pcie_msm8996_vreg_init(dev, cfg);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > expected_phys)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ id = 0;
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_pcie_msm8996_create(dev, child, id, serdes, cfg);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ goto err_node_put;
+ }
+
+ /*
+ * Register the pipe clock provided by phy.
+ * See function description to see details of this pipe clock.
+ */
+ ret = phy_pipe_clk_register(qmp, child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ goto err_node_put;
+ }
+
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+ else
+ pm_runtime_disable(dev);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ pm_runtime_disable(dev);
+ of_node_put(child);
+ return ret;
+}
+
+static struct platform_driver qcom_qmp_phy_pcie_msm8996_driver = {
+ .probe = qcom_qmp_phy_pcie_msm8996_probe,
+ .driver = {
+ .name = "qcom-qmp-msm8996-pcie-phy",
+ .of_match_table = qcom_qmp_phy_pcie_msm8996_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_pcie_msm8996_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP MSM8996 PCIe PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-qhp.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-qhp.h
new file mode 100644
index 000000000000..e4a4d2cd85eb
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-qhp.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCIE_QHP_H_
+#define QCOM_PHY_QMP_PCIE_QHP_H_
+
+/* PCIE GEN3 COM registers */
+#define PCIE_GEN3_QHP_COM_SSC_EN_CENTER 0x14
+#define PCIE_GEN3_QHP_COM_SSC_PER1 0x20
+#define PCIE_GEN3_QHP_COM_SSC_PER2 0x24
+#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1 0x28
+#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2 0x2c
+#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1 0x34
+#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1 0x38
+#define PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN 0x54
+#define PCIE_GEN3_QHP_COM_CLK_ENABLE1 0x58
+#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0 0x6c
+#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0 0x70
+#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1 0x78
+#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1 0x7c
+#define PCIE_GEN3_QHP_COM_BGV_TRIM 0x98
+#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE0 0xb4
+#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE1 0xb8
+#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0 0xc0
+#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1 0xc4
+#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0 0xcc
+#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1 0xd0
+#define PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL 0xdc
+#define PCIE_GEN3_QHP_COM_RESTRIM_CTRL2 0xf0
+#define PCIE_GEN3_QHP_COM_LOCK_CMP_EN 0xf8
+#define PCIE_GEN3_QHP_COM_DEC_START_MODE0 0x100
+#define PCIE_GEN3_QHP_COM_DEC_START_MODE1 0x108
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0 0x11c
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0 0x120
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0 0x124
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1 0x128
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1 0x12c
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1 0x130
+#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0 0x150
+#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1 0x158
+#define PCIE_GEN3_QHP_COM_VCO_TUNE_MAP 0x178
+#define PCIE_GEN3_QHP_COM_BG_CTRL 0x1c8
+#define PCIE_GEN3_QHP_COM_CLK_SELECT 0x1cc
+#define PCIE_GEN3_QHP_COM_HSCLK_SEL1 0x1d0
+#define PCIE_GEN3_QHP_COM_CORECLK_DIV 0x1e0
+#define PCIE_GEN3_QHP_COM_CORE_CLK_EN 0x1e8
+#define PCIE_GEN3_QHP_COM_CMN_CONFIG 0x1f0
+#define PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL 0x1fc
+#define PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1 0x21c
+#define PCIE_GEN3_QHP_COM_CMN_MODE 0x224
+#define PCIE_GEN3_QHP_COM_VREGCLK_DIV1 0x228
+#define PCIE_GEN3_QHP_COM_VREGCLK_DIV2 0x22c
+
+/* PCIE GEN3 QHP Lane registers */
+#define PCIE_GEN3_QHP_L0_DRVR_CTRL0 0xc
+#define PCIE_GEN3_QHP_L0_DRVR_CTRL1 0x10
+#define PCIE_GEN3_QHP_L0_DRVR_CTRL2 0x14
+#define PCIE_GEN3_QHP_L0_DRVR_TAP_EN 0x18
+#define PCIE_GEN3_QHP_L0_TX_BAND_MODE 0x60
+#define PCIE_GEN3_QHP_L0_LANE_MODE 0x64
+#define PCIE_GEN3_QHP_L0_PARALLEL_RATE 0x7c
+#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE0 0xc0
+#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE1 0xc4
+#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE2 0xc8
+#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1 0xd0
+#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2 0xd4
+#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0 0xd8
+#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1 0xdc
+#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2 0xe0
+#define PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE 0xfc
+#define PCIE_GEN3_QHP_L0_CGA_THRESH_DFE 0x100
+#define PCIE_GEN3_QHP_L0_RXENGINE_EN0 0x108
+#define PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME 0x114
+#define PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME 0x118
+#define PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME 0x11c
+#define PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME 0x120
+#define PCIE_GEN3_QHP_L0_VGA_GAIN 0x124
+#define PCIE_GEN3_QHP_L0_DFE_GAIN 0x128
+#define PCIE_GEN3_QHP_L0_EQ_GAIN 0x130
+#define PCIE_GEN3_QHP_L0_OFFSET_GAIN 0x134
+#define PCIE_GEN3_QHP_L0_PRE_GAIN 0x138
+#define PCIE_GEN3_QHP_L0_VGA_INITVAL 0x13c
+#define PCIE_GEN3_QHP_L0_EQ_INTVAL 0x154
+#define PCIE_GEN3_QHP_L0_EDAC_INITVAL 0x160
+#define PCIE_GEN3_QHP_L0_RXEQ_INITB0 0x168
+#define PCIE_GEN3_QHP_L0_RXEQ_INITB1 0x16c
+#define PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1 0x178
+#define PCIE_GEN3_QHP_L0_RXEQ_CTRL 0x180
+#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0 0x184
+#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1 0x188
+#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2 0x18c
+#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0 0x190
+#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1 0x194
+#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2 0x198
+#define PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG 0x19c
+#define PCIE_GEN3_QHP_L0_RX_BAND 0x1a4
+#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0 0x1c0
+#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1 0x1c4
+#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2 0x1c8
+#define PCIE_GEN3_QHP_L0_SIGDET_ENABLES 0x230
+#define PCIE_GEN3_QHP_L0_SIGDET_CNTRL 0x234
+#define PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL 0x238
+#define PCIE_GEN3_QHP_L0_DCC_GAIN 0x2a4
+#define PCIE_GEN3_QHP_L0_RSM_START 0x2a8
+#define PCIE_GEN3_QHP_L0_RX_EN_SIGNAL 0x2ac
+#define PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL 0x2b0
+#define PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0 0x2b8
+#define PCIE_GEN3_QHP_L0_TS0_TIMER 0x2c0
+#define PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE 0x2c4
+#define PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET 0x2cc
+
+/* PCIE GEN3 PCS registers */
+#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB 0x2c
+#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB 0x40
+#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB 0x54
+#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB 0x68
+#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG 0x15c
+#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5 0x16c
+#define PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG 0x174
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
new file mode 100644
index 000000000000..2d65e1f56bfc
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -0,0 +1,2556 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+#define PHYSTATUS_4_20 BIT(7)
+/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
+#define RCVR_DETECT BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+/* Define the assumed distance between lanes for underspecified device trees. */
+#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ bool in_layout;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = true, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* PCS_MISC registers */
+ QPHY_PCS_MISC_TYPEC_CTRL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
+};
+
+static const unsigned int ipq_pciephy_gen3_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+};
+
+static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_COM_SW_RESET] = 0x400,
+ [QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
+ [QPHY_COM_START_CONTROL] = 0x408,
+ [QPHY_COM_PCS_READY_STATUS] = 0x448,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_STATUS] = 0x174,
+};
+
+static const unsigned int sdm845_qmp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_STATUS] = 0x174,
+};
+
+static const unsigned int sdm845_qhp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_STATUS] = 0x2ac,
+};
+
+static const unsigned int sm8250_pcie_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+};
+
+static const struct qmp_phy_init_tbl msm8998_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15),
+};
+
+static const struct qmp_phy_init_tbl msm8998_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8998_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40),
+};
+
+static const struct qmp_phy_init_tbl msm8998_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03),
+};
+
+static const struct qmp_phy_init_tbl ipq6018_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
+};
+
+static const struct qmp_phy_init_tbl ipq6018_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl ipq6018_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x61),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x73),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xd3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+};
+
+static const struct qmp_phy_init_tbl ipq6018_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNTRL1, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x01),
+};
+
+static const struct qmp_phy_init_tbl ipq6018_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x11),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x3),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0xD),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xD04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0xb),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_TX_EMP_POST1_LVL, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE, 0x4),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_OSC_DTCT_ACTIONS, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0xe),
+ QMP_PHY_INIT_CFG_L(QPHY_SW_RESET, 0x0),
+ QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0x355),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0x35555),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0x1a0a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0xb),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x2aa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x2aaab),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0x3414),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0xe),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x73),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xd3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x02),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNT_VAL_L, 0x9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNT_VAL_H_TOL, 0x42),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNTRL1, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x1),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x11),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0xb),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x6),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x007),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_01, 0x59),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xbb),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG4, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2, 0x52),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4, 0x1a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5, 0x06),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qhp_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL, 0x27),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2, 0x07),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_ENABLE1, 0xb0),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0, 0x8c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0, 0x20),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_RESTRIM_CTRL2, 0x05),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VCO_TUNE_MAP, 0x10),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_SELECT, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_HSCLK_SEL1, 0x30),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORE_CLK_EN, 0x73),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL, 0x15),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_MODE, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV1, 0x22),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV2, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BGV_TRIM, 0x20),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BG_CTRL, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qhp_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL0, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_TAP_EN, 0x0d),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TX_BAND_MODE, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_LANE_MODE, 0x1a),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PARALLEL_RATE, 0x2f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE0, 0x09),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE1, 0x09),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE2, 0x1b),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2, 0x07),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0, 0x31),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1, 0x31),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2, 0x03),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE, 0x02),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CGA_THRESH_DFE, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXENGINE_EN0, 0x12),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME, 0x25),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME, 0x05),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_GAIN, 0x26),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_GAIN, 0x12),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_OFFSET_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PRE_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_INTVAL, 0x15),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EDAC_INITVAL, 0x28),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB0, 0x7f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB1, 0x07),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_CTRL, 0x70),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0, 0x8b),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2, 0x0a),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_BAND, 0x02),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0, 0x5c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1, 0x3e),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2, 0x3f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_ENABLES, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_CNTRL, 0xa0),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL, 0x08),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DCC_GAIN, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_EN_SIGNAL, 0xc3),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0, 0xbc),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TS0_TIMER, 0x7f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE, 0x15),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_INITVAL, 0x20),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RSM_START, 0x01),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qhp_pcie_rx_tbl[] = {
+};
+
+static const struct qmp_phy_init_tbl sdm845_qhp_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG, 0x3f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG, 0x50),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB, 0x19),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB, 0x07),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB, 0x17),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB, 0x09),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5, 0x9f),
+};
+
+static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x90),
+};
+
+static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x5),
+};
+
+static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x6e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x6e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x39),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xec),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x39),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RCLK_AUXDATA_SEL, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x03),
+};
+
+static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RATE_SLEW_CNTRL1, 0x0b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x01),
+};
+
+static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x90),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xec),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x36),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RCLK_AUXDATA_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x30),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RATE_SLEW_CNTRL1, 0x0b),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x12),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE, 0x33),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG2, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MISC2, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MODE, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_DC_LEVEL_CTRL, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x56),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x22),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_3, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_VMODE_CTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_PI_QEC_CTRL, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_3, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_VGA_CAL_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x27),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2, 0x5a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B0, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B1, 0xf9),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B3, 0xce),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B4, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B0, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B1, 0x7d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B3, 0xcf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B4, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_PHPRE_CTRL, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_MARG_COARSE_CTRL2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG2, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG4, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG5, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_EQ_CONFIG1, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x04),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_TX_ADAPT_POST_THRESH, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xcc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xcc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0xc5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xad),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xc7),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xef),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x05),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+};
+
+struct qmp_phy;
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *serdes_tbl_sec;
+ int serdes_tbl_num_sec;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl_sec;
+ int tx_tbl_num_sec;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl_sec;
+ int rx_tbl_num_sec;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl_sec;
+ int pcs_tbl_num_sec;
+ const struct qmp_phy_init_tbl *pcs_misc_tbl;
+ int pcs_misc_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_misc_tbl_sec;
+ int pcs_misc_tbl_num_sec;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ unsigned int mask_com_pcs_ready;
+ /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+ unsigned int phy_status;
+
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+ /* power_down delay in usec */
+ int pwrdn_delay_min;
+ int pwrdn_delay_max;
+
+ /* true, if PHY has secondary tx/rx lanes to be configured */
+ bool is_dual_lane_phy;
+
+ /* QMP PHY pipe clock interface rate */
+ unsigned long pipe_clock_rate;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @cfg: phy specific configuration
+ * @serdes: iomapped memory space for phy's serdes (i.e. PLL)
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
+ * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @pipe_clk: pipe clock
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @mode: current PHY mode
+ */
+struct qmp_phy {
+ struct phy *phy;
+ const struct qmp_phy_cfg *cfg;
+ void __iomem *serdes;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ void __iomem *tx2;
+ void __iomem *rx2;
+ void __iomem *pcs_misc;
+ struct clk *pipe_clk;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ enum phy_mode mode;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @phys: array of per-lane phy descriptors
+ */
+struct qcom_qmp {
+ struct device *dev;
+
+ struct clk_bulk_data *clks;
+ struct reset_control_bulk_data *resets;
+ struct regulator_bulk_data *vregs;
+
+ struct qmp_phy **phys;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const msm8996_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref",
+};
+
+
+static const char * const sdm845_pciephy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "refgen",
+};
+
+/* list of regulators */
+static const char * const qmp_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const char * const ipq8074_pciephy_clk_l[] = {
+ "aux", "cfg_ahb",
+};
+
+/* list of resets */
+static const char * const ipq8074_pciephy_reset_l[] = {
+ "phy", "common",
+};
+
+static const char * const sdm845_pciephy_reset_l[] = {
+ "phy",
+};
+
+static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq8074_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
+ .tx_tbl = ipq8074_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl),
+ .rx_tbl = ipq8074_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
+ .pcs_tbl = ipq8074_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq8074_pcie_gen3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl),
+ .tx_tbl = ipq8074_pcie_gen3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl),
+ .rx_tbl = ipq8074_pcie_gen3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_rx_tbl),
+ .pcs_tbl = ipq8074_pcie_gen3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_tbl),
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = ipq_pciephy_gen3_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+
+ .pipe_clock_rate = 250000000,
+};
+
+static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq6018_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
+ .tx_tbl = ipq6018_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ipq6018_pcie_tx_tbl),
+ .rx_tbl = ipq6018_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq6018_pcie_rx_tbl),
+ .pcs_tbl = ipq6018_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_tbl),
+ .pcs_misc_tbl = ipq6018_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl),
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = ipq_pciephy_gen3_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sdm845_qmp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
+ .tx_tbl = sdm845_qmp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl),
+ .rx_tbl = sdm845_qmp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl),
+ .pcs_tbl = sdm845_qmp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl),
+ .pcs_misc_tbl = sdm845_qmp_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sdm845_qmp_pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sdm845_qhp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
+ .tx_tbl = sdm845_qhp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl),
+ .rx_tbl = sdm845_qhp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl),
+ .pcs_tbl = sdm845_qhp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sdm845_qhp_pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
+ .serdes_tbl_sec = sm8250_qmp_gen3x1_pcie_serdes_tbl,
+ .serdes_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl),
+ .tx_tbl = sm8250_qmp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
+ .rx_tbl = sm8250_qmp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
+ .rx_tbl_sec = sm8250_qmp_gen3x1_pcie_rx_tbl,
+ .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl),
+ .pcs_tbl = sm8250_qmp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
+ .pcs_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_tbl,
+ .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
+ .pcs_misc_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
+ .tx_tbl = sm8250_qmp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
+ .tx_tbl_sec = sm8250_qmp_gen3x2_pcie_tx_tbl,
+ .tx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl),
+ .rx_tbl = sm8250_qmp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
+ .rx_tbl_sec = sm8250_qmp_gen3x2_pcie_rx_tbl,
+ .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl),
+ .pcs_tbl = sm8250_qmp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
+ .pcs_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_tbl,
+ .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
+ .pcs_misc_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8998_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl),
+ .tx_tbl = msm8998_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8998_pcie_tx_tbl),
+ .rx_tbl = msm8998_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8998_pcie_rx_tbl),
+ .pcs_tbl = msm8998_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
+static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sc8180x_qmp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
+ .tx_tbl = sc8180x_qmp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl),
+ .rx_tbl = sc8180x_qmp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl),
+ .pcs_tbl = sc8180x_qmp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl),
+ .pcs_misc_tbl = sc8180x_qmp_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 2,
+
+ .serdes_tbl = sdx55_qmp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
+ .tx_tbl = sdx55_qmp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl),
+ .rx_tbl = sdx55_qmp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl),
+ .pcs_tbl = sdx55_qmp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl),
+ .pcs_misc_tbl = sdx55_qmp_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS_4_20,
+
+ .is_dual_lane_phy = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
+ .tx_tbl = sm8450_qmp_gen3x1_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
+ .rx_tbl = sm8450_qmp_gen3x1_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
+ .pcs_tbl = sm8450_qmp_gen3x1_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
+ .tx_tbl = sm8450_qmp_gen4x2_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl),
+ .rx_tbl = sm8450_qmp_gen4x2_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl),
+ .pcs_tbl = sm8450_qmp_gen4x2_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+
+ .is_dual_lane_phy = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static void qcom_qmp_phy_pcie_configure_lane(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static void qcom_qmp_phy_pcie_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qcom_qmp_phy_pcie_configure_lane(base, regs, tbl, num, 0xff);
+}
+
+static int qcom_qmp_phy_pcie_serdes_init(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
+ const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+ int serdes_tbl_num = cfg->serdes_tbl_num;
+
+ qcom_qmp_phy_pcie_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ if (cfg->serdes_tbl_sec)
+ qcom_qmp_phy_pcie_configure(serdes, cfg->regs, cfg->serdes_tbl_sec,
+ cfg->serdes_tbl_num_sec);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_com_init(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *pcs = qphy->pcs;
+ int ret;
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset assert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset deassert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ goto err_assert_reset;
+
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
+ qphy_setbits(pcs,
+ cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ else
+ qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+
+ return 0;
+
+err_assert_reset:
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+err_disable_regulators:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_pcie_com_exit(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ int ret;
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ ret = qcom_qmp_phy_pcie_com_init(qphy);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_power_on(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+ void __iomem *status;
+ unsigned int mask, val, ready;
+ int ret;
+
+ qcom_qmp_phy_pcie_serdes_init(qphy);
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ return ret;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_pcie_configure_lane(tx, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ if (cfg->tx_tbl_sec)
+ qcom_qmp_phy_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec,
+ cfg->tx_tbl_num_sec, 1);
+
+ /* Configuration for other LANE for USB-DP combo PHY */
+ if (cfg->is_dual_lane_phy) {
+ qcom_qmp_phy_pcie_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ if (cfg->tx_tbl_sec)
+ qcom_qmp_phy_pcie_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl_sec,
+ cfg->tx_tbl_num_sec, 2);
+ }
+
+ qcom_qmp_phy_pcie_configure_lane(rx, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ if (cfg->rx_tbl_sec)
+ qcom_qmp_phy_pcie_configure_lane(rx, cfg->regs,
+ cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
+
+ if (cfg->is_dual_lane_phy) {
+ qcom_qmp_phy_pcie_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ if (cfg->rx_tbl_sec)
+ qcom_qmp_phy_pcie_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl_sec,
+ cfg->rx_tbl_num_sec, 2);
+ }
+
+ qcom_qmp_phy_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ if (cfg->pcs_tbl_sec)
+ qcom_qmp_phy_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl_sec,
+ cfg->pcs_tbl_num_sec);
+
+ qcom_qmp_phy_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
+ cfg->pcs_misc_tbl_num);
+ if (cfg->pcs_misc_tbl_sec)
+ qcom_qmp_phy_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec,
+ cfg->pcs_misc_tbl_num_sec);
+
+ /*
+ * Pull out PHY from POWER DOWN state.
+ * This is active low enable signal to power-down PHY.
+ */
+ qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ mask = cfg->phy_status;
+ ready = 0;
+
+ ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_disable_pipe_clk;
+ }
+
+ return 0;
+
+err_disable_pipe_clk:
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_pcie_power_off(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ /* PHY reset */
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ } else {
+ qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ qcom_qmp_phy_pcie_com_exit(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_enable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_pcie_init(phy);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_pcie_power_on(phy);
+ if (ret)
+ qcom_qmp_phy_pcie_exit(phy);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_pcie_disable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_pcie_power_off(phy);
+ if (ret)
+ return ret;
+ return qcom_qmp_phy_pcie_exit(phy);
+}
+
+static int qcom_qmp_phy_pcie_set_mode(struct phy *phy,
+ enum phy_mode mode, int submode)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ qphy->mode = mode;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qcom_qmp_phy_pcie_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int i;
+ int ret;
+
+ qmp->resets = devm_kcalloc(dev, cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->num_resets; i++)
+ qmp->resets[i].id = cfg->reset_list[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get resets\n");
+
+ return 0;
+}
+
+static int qcom_qmp_phy_pcie_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+static void phy_clk_release_provider(void *res)
+{
+ of_clk_del_provider(res);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
+ return ret;
+ }
+
+ fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -ENOMEM;
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /*
+ * Controllers using QMP PHY-s use 125MHz pipe clock interface
+ * unless other frequency is specified in the PHY config.
+ */
+ if (qmp->phys[0]->cfg->pipe_clock_rate)
+ fixed->fixed_rate = qmp->phys[0]->cfg->pipe_clock_rate;
+ else
+ fixed->fixed_rate = 125000000;
+
+ fixed->hw.init = &init;
+
+ ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
+}
+
+static const struct phy_ops qcom_qmp_phy_pcie_ops = {
+ .power_on = qcom_qmp_phy_pcie_enable,
+ .power_off = qcom_qmp_phy_pcie_disable,
+ .set_mode = qcom_qmp_phy_pcie_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static
+int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id,
+ void __iomem *serdes, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ char prop_name[MAX_PROP_NAME];
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->cfg = cfg;
+ qphy->serdes = serdes;
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
+ * For single lane PHYs: pcs_misc (optional) -> 3.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (!qphy->tx)
+ return -ENOMEM;
+
+ qphy->rx = of_iomap(np, 1);
+ if (!qphy->rx)
+ return -ENOMEM;
+
+ qphy->pcs = of_iomap(np, 2);
+ if (!qphy->pcs)
+ return -ENOMEM;
+
+ /*
+ * If this is a dual-lane PHY, then there should be registers for the
+ * second lane. Some old device trees did not specify this, so fall
+ * back to old legacy behavior of assuming they can be reached at an
+ * offset from the first lane.
+ */
+ if (cfg->is_dual_lane_phy) {
+ qphy->tx2 = of_iomap(np, 3);
+ qphy->rx2 = of_iomap(np, 4);
+ if (!qphy->tx2 || !qphy->rx2) {
+ dev_warn(dev,
+ "Underspecified device tree, falling back to legacy register regions\n");
+
+ /* In the old version, pcs_misc is at index 3. */
+ qphy->pcs_misc = qphy->tx2;
+ qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
+ qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
+
+ } else {
+ qphy->pcs_misc = of_iomap(np, 5);
+ }
+
+ } else {
+ qphy->pcs_misc = of_iomap(np, 3);
+ }
+
+ if (!qphy->pcs_misc &&
+ of_device_is_compatible(dev->of_node, "qcom,ipq6018-qmp-pcie-phy"))
+ qphy->pcs_misc = qphy->pcs + 0x400;
+
+ if (!qphy->pcs_misc)
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
+ "failed to get lane%d pipe clock\n", id);
+ }
+
+ generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_pcie_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_phy_pcie_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8998-qmp-pcie-phy",
+ .data = &msm8998_pciephy_cfg,
+ }, {
+ .compatible = "qcom,ipq8074-qmp-pcie-phy",
+ .data = &ipq8074_pciephy_cfg,
+ }, {
+ .compatible = "qcom,ipq8074-qmp-gen3-pcie-phy",
+ .data = &ipq8074_pciephy_gen3_cfg,
+ }, {
+ .compatible = "qcom,ipq6018-qmp-pcie-phy",
+ .data = &ipq6018_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sc8180x-qmp-pcie-phy",
+ .data = &sc8180x_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qhp-pcie-phy",
+ .data = &sdm845_qhp_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-pcie-phy",
+ .data = &sdm845_qmp_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy",
+ .data = &sm8250_qmp_gen3x1_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy",
+ .data = &sm8250_qmp_gen3x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-modem-pcie-phy",
+ .data = &sm8250_qmp_gen3x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sdx55-qmp-pcie-phy",
+ .data = &sdx55_qmp_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy",
+ .data = &sm8450_qmp_gen3x1_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy",
+ .data = &sm8450_qmp_gen4x2_pciephy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_phy_pcie_of_match_table);
+
+static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *serdes;
+ const struct qmp_phy_cfg *cfg = NULL;
+ int num, id;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ /* Get the specific init parameters of QMP phy */
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
+ return -EINVAL;
+
+ /* per PHY serdes; usually located at base address */
+ serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(serdes))
+ return PTR_ERR(serdes);
+
+ ret = qcom_qmp_phy_pcie_clk_init(dev, cfg);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_pcie_reset_init(dev, cfg);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_pcie_vreg_init(dev, cfg);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > 1)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ id = 0;
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_pcie_create(dev, child, id, serdes, cfg);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ goto err_node_put;
+ }
+
+ /*
+ * Register the pipe clock provided by phy.
+ * See function description to see details of this pipe clock.
+ */
+ ret = phy_pipe_clk_register(qmp, child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ goto err_node_put;
+ }
+
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+ else
+ pm_runtime_disable(dev);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ pm_runtime_disable(dev);
+ of_node_put(child);
+ return ret;
+}
+
+static struct platform_driver qcom_qmp_phy_pcie_driver = {
+ .probe = qcom_qmp_phy_pcie_probe,
+ .driver = {
+ .name = "qcom-qmp-pcie-phy",
+ .of_match_table = qcom_qmp_phy_pcie_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_pcie_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP PCIe PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v3.h
new file mode 100644
index 000000000000..a45bd301bc9e
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v3.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_MISC_V3_H_
+#define QCOM_PHY_QMP_PCS_MISC_V3_H_
+
+/* Only for QMP V3 PHY - PCS_MISC registers */
+#define QPHY_V3_PCS_MISC_CLAMP_ENABLE 0x0c
+#define QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2 0x2c
+#define QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1 0x44
+#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2 0x54
+#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c
+#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4.h
new file mode 100644
index 000000000000..4cc02288d418
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_PCIE_V4_H_
+#define QCOM_PHY_QMP_PCS_PCIE_V4_H_
+
+/* Only for QMP V4 PHY - PCS_PCIE registers (same as PCS_MISC?) */
+#define QPHY_V4_PCS_PCIE_INT_AUX_CLK_STATUS 0x00
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_STATUS 0x04
+#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG1 0x08
+#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2 0x0c
+#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG3 0x10
+#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4 0x14
+#define QPHY_V4_PCS_PCIE_PCS_TX_RX_CONFIG 0x18
+#define QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x1c
+#define QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_CNTRL 0x20
+#define QPHY_V4_PCS_PCIE_EPCLK_PRE_PLL_LOCK_DLY_AUXCLK 0x24
+#define QPHY_V4_PCS_PCIE_EPCLK_DLY_COUNT_VAL_L 0x28
+#define QPHY_V4_PCS_PCIE_EPCLK_DLY_COUNT_VAL_H 0x2c
+#define QPHY_V4_PCS_PCIE_RX_IDLE_DTCT_CNTRL1 0x30
+#define QPHY_V4_PCS_PCIE_RX_IDLE_DTCT_CNTRL2 0x34
+#define QPHY_V4_PCS_PCIE_SIGDET_CNTRL 0x38
+#define QPHY_V4_PCS_PCIE_SIGDET_LOW_2_IDLE_TIME 0x3c
+#define QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x40
+#define QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H 0x44
+#define QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x48
+#define QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H 0x4c
+#define QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x50
+#define QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG2 0x54
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG1 0x58
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG2 0x5c
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG3 0x60
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG4 0x64
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG5 0x68
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG6 0x6c
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG7 0x70
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG1 0x74
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2 0x78
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG3 0x7c
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4 0x80
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x84
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6 0x88
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG7 0x8c
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS 0x90
+#define QPHY_V4_PCS_PCIE_LOCAL_FS 0x94
+#define QPHY_V4_PCS_PCIE_LOCAL_LF 0x98
+#define QPHY_V4_PCS_PCIE_LOCAL_FS_RS 0x9c
+#define QPHY_V4_PCS_PCIE_EQ_CONFIG1 0xa0
+#define QPHY_V4_PCS_PCIE_EQ_CONFIG2 0xa4
+#define QPHY_V4_PCS_PCIE_PRESET_P0_P1_PRE 0xa8
+#define QPHY_V4_PCS_PCIE_PRESET_P2_P3_PRE 0xac
+#define QPHY_V4_PCS_PCIE_PRESET_P4_P5_PRE 0xb0
+#define QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE 0xb4
+#define QPHY_V4_PCS_PCIE_PRESET_P8_P9_PRE 0xb8
+#define QPHY_V4_PCS_PCIE_PRESET_P10_PRE 0xbc
+#define QPHY_V4_PCS_PCIE_PRESET_P1_P3_PRE_RS 0xc0
+#define QPHY_V4_PCS_PCIE_PRESET_P4_P5_PRE_RS 0xc4
+#define QPHY_V4_PCS_PCIE_PRESET_P6_P9_PRE_RS 0xc8
+#define QPHY_V4_PCS_PCIE_PRESET_P0_P1_POST 0xcc
+#define QPHY_V4_PCS_PCIE_PRESET_P2_P3_POST 0xd0
+#define QPHY_V4_PCS_PCIE_PRESET_P4_P5_POST 0xd4
+#define QPHY_V4_PCS_PCIE_PRESET_P6_P7_POST 0xd8
+#define QPHY_V4_PCS_PCIE_PRESET_P8_P9_POST 0xdc
+#define QPHY_V4_PCS_PCIE_PRESET_P10_POST 0xe0
+#define QPHY_V4_PCS_PCIE_PRESET_P1_P3_POST_RS 0xe4
+#define QPHY_V4_PCS_PCIE_PRESET_P4_P5_POST_RS 0xe8
+#define QPHY_V4_PCS_PCIE_PRESET_P6_P9_POST_RS 0xec
+#define QPHY_V4_PCS_PCIE_RXEQEVAL_TIME 0xf0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h
new file mode 100644
index 000000000000..af273602998e
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_PCIE_V4_20_H_
+#define QCOM_PHY_QMP_PCS_PCIE_V4_20_H_
+
+#define QPHY_V4_20_PCS_PCIE_EQ_CONFIG1 0x0a0
+#define QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME 0x0f0
+#define QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME 0x0f4
+#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2 0x0fc
+#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
+#define QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2 0x824
+#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h
new file mode 100644
index 000000000000..2e19fb3f051e
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h
@@ -0,0 +1,16 @@
+/* Only for QMP V5 PHY - PCS_PCIE registers */
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_PCIE_V5_H_
+#define QCOM_PHY_QMP_PCS_PCIE_V5_H_
+
+/* Only for QMP V5 PHY - PCS_PCIE registers */
+#define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
+#define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94
+#define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
new file mode 100644
index 000000000000..1eedf50cf9cb
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_PCIE_V5_20_H_
+#define QCOM_PHY_QMP_PCS_PCIE_V5_20_H_
+
+/* Only for QMP V5_20 PHY - PCIe PCS registers */
+#define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c
+#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090
+#define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0
+#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
+#define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c
+#define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h
new file mode 100644
index 000000000000..ba1ea29d2884
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_UFS_V3_H_
+#define QCOM_PHY_QMP_PCS_UFS_V3_H_
+
+#define QPHY_V3_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x02c
+#define QPHY_V3_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x034
+#define QPHY_V3_PCS_UFS_RX_SYM_RESYNC_CTRL 0x134
+#define QPHY_V3_PCS_UFS_RX_MIN_HIBERN8_TIME 0x138
+#define QPHY_V3_PCS_UFS_RX_SIGDET_CTRL1 0x13c
+#define QPHY_V3_PCS_UFS_RX_SIGDET_CTRL2 0x140
+#define QPHY_V3_PCS_UFS_TX_MID_TERM_CTRL1 0x1bc
+#define QPHY_V3_PCS_UFS_MULTI_LANE_CTRL1 0x1c4
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v4.h
new file mode 100644
index 000000000000..a1c7d3d17150
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v4.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_UFS_V4_H_
+#define QCOM_PHY_QMP_PCS_UFS_V4_H_
+
+/* Only for QMP V4 PHY - UFS PCS registers */
+#define QPHY_V4_PCS_UFS_PHY_START 0x000
+#define QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL 0x004
+#define QPHY_V4_PCS_UFS_SW_RESET 0x008
+#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V4_PCS_UFS_PLL_CNTL 0x02c
+#define QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V4_PCS_UFS_BIST_FIXED_PAT_CTRL 0x060
+#define QPHY_V4_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V4_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4
+#define QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL 0x124
+#define QPHY_V4_PCS_UFS_LINECFG_DISABLE 0x148
+#define QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME 0x150
+#define QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2 0x158
+#define QPHY_V4_PCS_UFS_TX_PWM_GEAR_BAND 0x160
+#define QPHY_V4_PCS_UFS_TX_HS_GEAR_BAND 0x168
+#define QPHY_V4_PCS_UFS_READY_STATUS 0x180
+#define QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8
+#define QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1 0x1e0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h
new file mode 100644
index 000000000000..bcca23493b7e
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h
@@ -0,0 +1,27 @@
+/* Only for QMP V5 PHY - UFS PCS registers */
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_UFS_V5_H_
+#define QCOM_PHY_QMP_PCS_UFS_V5_H_
+
+/* Only for QMP V5 PHY - UFS PCS registers */
+#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V5_PCS_UFS_PLL_CNTL 0x02c
+#define QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4
+#define QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL 0x124
+#define QPHY_V5_PCS_UFS_RX_MIN_HIBERN8_TIME 0x150
+#define QPHY_V5_PCS_UFS_RX_SIGDET_CTRL1 0x154
+#define QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2 0x158
+#define QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND 0x160
+#define QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND 0x168
+#define QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8
+#define QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1 0x1e0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v4.h
new file mode 100644
index 000000000000..d7fd4ac0fc55
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v4.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_USB_V4_H_
+#define QCOM_PHY_QMP_PCS_USB_V4_H_
+
+/* Only for QMP V4 PHY - USB3 PCS registers */
+#define QPHY_V4_PCS_USB3_POWER_STATE_CONFIG1 0x000
+#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x004
+#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x008
+#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x00c
+#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x010
+#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x014
+#define QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x018
+#define QPHY_V4_PCS_USB3_LFPS_TX_ECSTART 0x01c
+#define QPHY_V4_PCS_USB3_LFPS_PER_TIMER_VAL 0x020
+#define QPHY_V4_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x024
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x028
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x02c
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x030
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x034
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x038
+#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x03c
+#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x040
+#define QPHY_V4_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x044
+#define QPHY_V4_PCS_USB3_ARCVR_DTCT_CM_DLY 0x048
+#define QPHY_V4_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x04c
+#define QPHY_V4_PCS_USB3_ALFPS_DEGLITCH_VAL 0x050
+#define QPHY_V4_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x054
+#define QPHY_V4_PCS_USB3_TEST_CONTROL 0x058
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v5.h
new file mode 100644
index 000000000000..73de626223ed
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_USB_V5_H_
+#define QCOM_PHY_QMP_PCS_USB_V5_H_
+
+/* Only for QMP V5 PHY - USB3 have different offsets than V4 */
+#define QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1 0x000
+#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x004
+#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x008
+#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x00c
+#define QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x010
+#define QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x014
+#define QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x018
+#define QPHY_V5_PCS_USB3_LFPS_TX_ECSTART 0x01c
+#define QPHY_V5_PCS_USB3_LFPS_PER_TIMER_VAL 0x020
+#define QPHY_V5_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x024
+#define QPHY_V5_PCS_USB3_LFPS_CONFIG1 0x028
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x02c
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x030
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x034
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x038
+#define QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x03c
+#define QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x040
+#define QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x044
+#define QPHY_V5_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x048
+#define QPHY_V5_PCS_USB3_ARCVR_DTCT_CM_DLY 0x04c
+#define QPHY_V5_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x050
+#define QPHY_V5_PCS_USB3_ALFPS_DEGLITCH_VAL 0x054
+#define QPHY_V5_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x058
+#define QPHY_V5_PCS_USB3_TEST_CONTROL 0x05c
+#define QPHY_V5_PCS_USB3_RXTERMINATION_DLY_SEL 0x060
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
new file mode 100644
index 000000000000..c8515f506872
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V2_H_
+#define QCOM_PHY_QMP_PCS_V2_H_
+
+/* Only for QMP V2 PHY - PCS registers */
+#define QPHY_V2_PCS_POWER_DOWN_CONTROL 0x004
+#define QPHY_V2_PCS_TXDEEMPH_M6DB_V0 0x024
+#define QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0 0x028
+#define QPHY_V2_PCS_TX_LARGE_AMP_DRV_LVL 0x034
+#define QPHY_V2_PCS_TX_LARGE_AMP_POST_EMP_LVL 0x038
+#define QPHY_V2_PCS_TX_SMALL_AMP_DRV_LVL 0x03c
+#define QPHY_V2_PCS_TX_SMALL_AMP_POST_EMP_LVL 0x040
+#define QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE 0x054
+#define QPHY_V2_PCS_RX_IDLE_DTCT_CNTRL 0x058
+#define QPHY_V2_PCS_POWER_STATE_CONFIG1 0x060
+#define QPHY_V2_PCS_POWER_STATE_CONFIG2 0x064
+#define QPHY_V2_PCS_POWER_STATE_CONFIG4 0x06c
+#define QPHY_V2_PCS_LOCK_DETECT_CONFIG1 0x080
+#define QPHY_V2_PCS_LOCK_DETECT_CONFIG2 0x084
+#define QPHY_V2_PCS_LOCK_DETECT_CONFIG3 0x088
+#define QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x0a0
+#define QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK 0x0a4
+#define QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME 0x0a8
+#define QPHY_V2_PCS_FLL_CNTRL1 0x0c0
+#define QPHY_V2_PCS_FLL_CNTRL2 0x0c4
+#define QPHY_V2_PCS_FLL_CNT_VAL_L 0x0c8
+#define QPHY_V2_PCS_FLL_CNT_VAL_H_TOL 0x0cc
+#define QPHY_V2_PCS_FLL_MAN_CODE 0x0d0
+
+/* UFS only ? */
+#define QPHY_V2_PCS_RX_MIN_STALL_NOCONFIG_TIME_CAP 0x0cc
+#define QPHY_V2_PCS_RX_SYM_RESYNC_CTRL 0x13c
+#define QPHY_V2_PCS_RX_MIN_HIBERN8_TIME 0x140
+#define QPHY_V2_PCS_RX_SIGDET_CTRL2 0x148
+#define QPHY_V2_PCS_RX_PWM_GEAR_BAND 0x154
+#define QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB 0x1a8
+#define QPHY_V2_PCS_OSC_DTCT_ACTIONS 0x1ac
+#define QPHY_V2_PCS_RX_SIGDET_LVL 0x1d8
+#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1dc
+#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1e0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v3.h
new file mode 100644
index 000000000000..10dbbb006201
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v3.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V3_H_
+#define QCOM_PHY_QMP_PCS_V3_H_
+
+/* Only for QMP V3 PHY - PCS registers */
+#define QPHY_V3_PCS_SW_RESET 0x000
+#define QPHY_V3_PCS_POWER_DOWN_CONTROL 0x004
+#define QPHY_V3_PCS_START_CONTROL 0x008
+#define QPHY_V3_PCS_TXMGN_V0 0x00c
+#define QPHY_V3_PCS_TXMGN_V1 0x010
+#define QPHY_V3_PCS_TXMGN_V2 0x014
+#define QPHY_V3_PCS_TXMGN_V3 0x018
+#define QPHY_V3_PCS_TXMGN_V4 0x01c
+#define QPHY_V3_PCS_TXMGN_LS 0x020
+#define QPHY_V3_PCS_TXDEEMPH_M6DB_V0 0x024
+#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0 0x028
+#define QPHY_V3_PCS_TXDEEMPH_M6DB_V1 0x02c
+#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1 0x030
+#define QPHY_V3_PCS_TXDEEMPH_M6DB_V2 0x034
+#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2 0x038
+#define QPHY_V3_PCS_TXDEEMPH_M6DB_V3 0x03c
+#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3 0x040
+#define QPHY_V3_PCS_TXDEEMPH_M6DB_V4 0x044
+#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4 0x048
+#define QPHY_V3_PCS_TXDEEMPH_M6DB_LS 0x04c
+#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS 0x050
+#define QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE 0x054
+#define QPHY_V3_PCS_RX_IDLE_DTCT_CNTRL 0x058
+#define QPHY_V3_PCS_RATE_SLEW_CNTRL 0x05c
+#define QPHY_V3_PCS_POWER_STATE_CONFIG1 0x060
+#define QPHY_V3_PCS_POWER_STATE_CONFIG2 0x064
+#define QPHY_V3_PCS_POWER_STATE_CONFIG3 0x068
+#define QPHY_V3_PCS_POWER_STATE_CONFIG4 0x06c
+#define QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L 0x070
+#define QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H 0x074
+#define QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L 0x078
+#define QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H 0x07c
+#define QPHY_V3_PCS_LOCK_DETECT_CONFIG1 0x080
+#define QPHY_V3_PCS_LOCK_DETECT_CONFIG2 0x084
+#define QPHY_V3_PCS_LOCK_DETECT_CONFIG3 0x088
+#define QPHY_V3_PCS_TSYNC_RSYNC_TIME 0x08c
+#define QPHY_V3_PCS_SIGDET_LOW_2_IDLE_TIME 0x090
+#define QPHY_V3_PCS_BEACON_2_IDLE_TIME_L 0x094
+#define QPHY_V3_PCS_BEACON_2_IDLE_TIME_H 0x098
+#define QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_SYSCLK 0x09c
+#define QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x0a0
+#define QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK 0x0a4
+#define QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME 0x0a8
+#define QPHY_V3_PCS_LFPS_DET_HIGH_COUNT_VAL 0x0ac
+#define QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK 0x0b0
+#define QPHY_V3_PCS_LFPS_TX_END_CNT_P2U3_START 0x0b4
+#define QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME 0x0b8
+#define QPHY_V3_PCS_RXEQTRAINING_RUN_TIME 0x0bc
+#define QPHY_V3_PCS_TXONESZEROS_RUN_LENGTH 0x0c0
+#define QPHY_V3_PCS_FLL_CNTRL1 0x0c4
+#define QPHY_V3_PCS_FLL_CNTRL2 0x0c8
+#define QPHY_V3_PCS_FLL_CNT_VAL_L 0x0cc
+#define QPHY_V3_PCS_FLL_CNT_VAL_H_TOL 0x0d0
+#define QPHY_V3_PCS_FLL_MAN_CODE 0x0d4
+#define QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL 0x0d8
+#define QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR 0x0dc
+#define QPHY_V3_PCS_ARCVR_DTCT_EN_PERIOD 0x0e0
+#define QPHY_V3_PCS_ARCVR_DTCT_CM_DLY 0x0e4
+#define QPHY_V3_PCS_ALFPS_DEGLITCH_VAL 0x0e8
+#define QPHY_V3_PCS_INSIG_SW_CTRL1 0x0ec
+#define QPHY_V3_PCS_INSIG_SW_CTRL2 0x0f0
+#define QPHY_V3_PCS_INSIG_SW_CTRL3 0x0f4
+#define QPHY_V3_PCS_INSIG_MX_CTRL1 0x0f8
+#define QPHY_V3_PCS_INSIG_MX_CTRL2 0x0fc
+#define QPHY_V3_PCS_INSIG_MX_CTRL3 0x100
+#define QPHY_V3_PCS_OUTSIG_SW_CTRL1 0x104
+#define QPHY_V3_PCS_OUTSIG_MX_CTRL1 0x108
+#define QPHY_V3_PCS_CLK_DEBUG_BYPASS_CTRL 0x10c
+#define QPHY_V3_PCS_TEST_CONTROL 0x110
+#define QPHY_V3_PCS_TEST_CONTROL2 0x114
+#define QPHY_V3_PCS_TEST_CONTROL3 0x118
+#define QPHY_V3_PCS_TEST_CONTROL4 0x11c
+#define QPHY_V3_PCS_TEST_CONTROL5 0x120
+#define QPHY_V3_PCS_TEST_CONTROL6 0x124
+#define QPHY_V3_PCS_TEST_CONTROL7 0x128
+#define QPHY_V3_PCS_COM_RESET_CONTROL 0x12c
+#define QPHY_V3_PCS_BIST_CTRL 0x130
+#define QPHY_V3_PCS_PRBS_POLY0 0x134
+#define QPHY_V3_PCS_PRBS_POLY1 0x138
+#define QPHY_V3_PCS_PRBS_SEED0 0x13c
+#define QPHY_V3_PCS_PRBS_SEED1 0x140
+#define QPHY_V3_PCS_FIXED_PAT_CTRL 0x144
+#define QPHY_V3_PCS_FIXED_PAT0 0x148
+#define QPHY_V3_PCS_FIXED_PAT1 0x14c
+#define QPHY_V3_PCS_FIXED_PAT2 0x150
+#define QPHY_V3_PCS_FIXED_PAT3 0x154
+#define QPHY_V3_PCS_COM_CLK_SWITCH_CTRL 0x158
+#define QPHY_V3_PCS_ELECIDLE_DLY_SEL 0x15c
+#define QPHY_V3_PCS_SPARE1 0x160
+#define QPHY_V3_PCS_BIST_CHK_ERR_CNT_L_STATUS 0x164
+#define QPHY_V3_PCS_BIST_CHK_ERR_CNT_H_STATUS 0x168
+#define QPHY_V3_PCS_BIST_CHK_STATUS 0x16c
+#define QPHY_V3_PCS_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x170
+#define QPHY_V3_PCS_PCS_STATUS 0x174
+#define QPHY_V3_PCS_PCS_STATUS2 0x178
+#define QPHY_V3_PCS_PCS_STATUS3 0x17c
+#define QPHY_V3_PCS_COM_RESET_STATUS 0x180
+#define QPHY_V3_PCS_OSC_DTCT_STATUS 0x184
+#define QPHY_V3_PCS_REVISION_ID0 0x188
+#define QPHY_V3_PCS_REVISION_ID1 0x18c
+#define QPHY_V3_PCS_REVISION_ID2 0x190
+#define QPHY_V3_PCS_REVISION_ID3 0x194
+#define QPHY_V3_PCS_DEBUG_BUS_0_STATUS 0x198
+#define QPHY_V3_PCS_DEBUG_BUS_1_STATUS 0x19c
+#define QPHY_V3_PCS_DEBUG_BUS_2_STATUS 0x1a0
+#define QPHY_V3_PCS_DEBUG_BUS_3_STATUS 0x1a4
+#define QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1a8
+#define QPHY_V3_PCS_OSC_DTCT_ACTIONS 0x1ac
+#define QPHY_V3_PCS_SIGDET_CNTRL 0x1b0
+#define QPHY_V3_PCS_IDAC_CAL_CNTRL 0x1b4
+#define QPHY_V3_PCS_CMN_ACK_OUT_SEL 0x1b8
+#define QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME_SYSCLK 0x1bc
+#define QPHY_V3_PCS_AUTONOMOUS_MODE_STATUS 0x1c0
+#define QPHY_V3_PCS_ENDPOINT_REFCLK_CNTRL 0x1c4
+#define QPHY_V3_PCS_EPCLK_PRE_PLL_LOCK_DLY_SYSCLK 0x1c8
+#define QPHY_V3_PCS_EPCLK_PRE_PLL_LOCK_DLY_AUXCLK 0x1cc
+#define QPHY_V3_PCS_EPCLK_DLY_COUNT_VAL_L 0x1d0
+#define QPHY_V3_PCS_EPCLK_DLY_COUNT_VAL_H 0x1d4
+#define QPHY_V3_PCS_RX_SIGDET_LVL 0x1d8
+#define QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1dc
+#define QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1e0
+#define QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL2 0x1e4
+#define QPHY_V3_PCS_RXTERMINATION_DLY_SEL 0x1e8
+#define QPHY_V3_PCS_LFPS_PER_TIMER_VAL 0x1ec
+#define QPHY_V3_PCS_SIGDET_STARTUP_TIMER_VAL 0x1f0
+#define QPHY_V3_PCS_LOCK_DETECT_CONFIG4 0x1f4
+#define QPHY_V3_PCS_RX_SIGDET_DTCT_CNTRL 0x1f8
+#define QPHY_V3_PCS_PCS_STATUS4 0x1fc
+#define QPHY_V3_PCS_PCS_STATUS4_CLEAR 0x200
+#define QPHY_V3_PCS_DEC_ERROR_COUNT_STATUS 0x204
+#define QPHY_V3_PCS_COMMA_POS_STATUS 0x208
+#define QPHY_V3_PCS_REFGEN_REQ_CONFIG1 0x20c
+#define QPHY_V3_PCS_REFGEN_REQ_CONFIG2 0x210
+#define QPHY_V3_PCS_REFGEN_REQ_CONFIG3 0x214
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4.h
new file mode 100644
index 000000000000..a2c1eba2b693
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V4_H_
+#define QCOM_PHY_QMP_PCS_V4_H_
+
+/* Only for QMP V4 PHY - USB/PCIe PCS registers */
+#define QPHY_V4_PCS_SW_RESET 0x000
+#define QPHY_V4_PCS_REVISION_ID0 0x004
+#define QPHY_V4_PCS_REVISION_ID1 0x008
+#define QPHY_V4_PCS_REVISION_ID2 0x00c
+#define QPHY_V4_PCS_REVISION_ID3 0x010
+#define QPHY_V4_PCS_PCS_STATUS1 0x014
+#define QPHY_V4_PCS_PCS_STATUS2 0x018
+#define QPHY_V4_PCS_PCS_STATUS3 0x01c
+#define QPHY_V4_PCS_PCS_STATUS4 0x020
+#define QPHY_V4_PCS_PCS_STATUS5 0x024
+#define QPHY_V4_PCS_PCS_STATUS6 0x028
+#define QPHY_V4_PCS_PCS_STATUS7 0x02c
+#define QPHY_V4_PCS_DEBUG_BUS_0_STATUS 0x030
+#define QPHY_V4_PCS_DEBUG_BUS_1_STATUS 0x034
+#define QPHY_V4_PCS_DEBUG_BUS_2_STATUS 0x038
+#define QPHY_V4_PCS_DEBUG_BUS_3_STATUS 0x03c
+#define QPHY_V4_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V4_PCS_START_CONTROL 0x044
+#define QPHY_V4_PCS_INSIG_SW_CTRL1 0x048
+#define QPHY_V4_PCS_INSIG_SW_CTRL2 0x04c
+#define QPHY_V4_PCS_INSIG_SW_CTRL3 0x050
+#define QPHY_V4_PCS_INSIG_SW_CTRL4 0x054
+#define QPHY_V4_PCS_INSIG_SW_CTRL5 0x058
+#define QPHY_V4_PCS_INSIG_SW_CTRL6 0x05c
+#define QPHY_V4_PCS_INSIG_SW_CTRL7 0x060
+#define QPHY_V4_PCS_INSIG_SW_CTRL8 0x064
+#define QPHY_V4_PCS_INSIG_MX_CTRL1 0x068
+#define QPHY_V4_PCS_INSIG_MX_CTRL2 0x06c
+#define QPHY_V4_PCS_INSIG_MX_CTRL3 0x070
+#define QPHY_V4_PCS_INSIG_MX_CTRL4 0x074
+#define QPHY_V4_PCS_INSIG_MX_CTRL5 0x078
+#define QPHY_V4_PCS_INSIG_MX_CTRL7 0x07c
+#define QPHY_V4_PCS_INSIG_MX_CTRL8 0x080
+#define QPHY_V4_PCS_OUTSIG_SW_CTRL1 0x084
+#define QPHY_V4_PCS_OUTSIG_MX_CTRL1 0x088
+#define QPHY_V4_PCS_CLAMP_ENABLE 0x08c
+#define QPHY_V4_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V4_PCS_POWER_STATE_CONFIG2 0x094
+#define QPHY_V4_PCS_FLL_CNTRL1 0x098
+#define QPHY_V4_PCS_FLL_CNTRL2 0x09c
+#define QPHY_V4_PCS_FLL_CNT_VAL_L 0x0a0
+#define QPHY_V4_PCS_FLL_CNT_VAL_H_TOL 0x0a4
+#define QPHY_V4_PCS_FLL_MAN_CODE 0x0a8
+#define QPHY_V4_PCS_TEST_CONTROL1 0x0ac
+#define QPHY_V4_PCS_TEST_CONTROL2 0x0b0
+#define QPHY_V4_PCS_TEST_CONTROL3 0x0b4
+#define QPHY_V4_PCS_TEST_CONTROL4 0x0b8
+#define QPHY_V4_PCS_TEST_CONTROL5 0x0bc
+#define QPHY_V4_PCS_TEST_CONTROL6 0x0c0
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG4 0x0d0
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG5 0x0d4
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V4_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V4_PCS_REFGEN_REQ_CONFIG2 0x0e0
+#define QPHY_V4_PCS_REFGEN_REQ_CONFIG3 0x0e4
+#define QPHY_V4_PCS_BIST_CTRL 0x0e8
+#define QPHY_V4_PCS_PRBS_POLY0 0x0ec
+#define QPHY_V4_PCS_PRBS_POLY1 0x0f0
+#define QPHY_V4_PCS_FIXED_PAT0 0x0f4
+#define QPHY_V4_PCS_FIXED_PAT1 0x0f8
+#define QPHY_V4_PCS_FIXED_PAT2 0x0fc
+#define QPHY_V4_PCS_FIXED_PAT3 0x100
+#define QPHY_V4_PCS_FIXED_PAT4 0x104
+#define QPHY_V4_PCS_FIXED_PAT5 0x108
+#define QPHY_V4_PCS_FIXED_PAT6 0x10c
+#define QPHY_V4_PCS_FIXED_PAT7 0x110
+#define QPHY_V4_PCS_FIXED_PAT8 0x114
+#define QPHY_V4_PCS_FIXED_PAT9 0x118
+#define QPHY_V4_PCS_FIXED_PAT10 0x11c
+#define QPHY_V4_PCS_FIXED_PAT11 0x120
+#define QPHY_V4_PCS_FIXED_PAT12 0x124
+#define QPHY_V4_PCS_FIXED_PAT13 0x128
+#define QPHY_V4_PCS_FIXED_PAT14 0x12c
+#define QPHY_V4_PCS_FIXED_PAT15 0x130
+#define QPHY_V4_PCS_TXMGN_CONFIG 0x134
+#define QPHY_V4_PCS_G12S1_TXMGN_V0 0x138
+#define QPHY_V4_PCS_G12S1_TXMGN_V1 0x13c
+#define QPHY_V4_PCS_G12S1_TXMGN_V2 0x140
+#define QPHY_V4_PCS_G12S1_TXMGN_V3 0x144
+#define QPHY_V4_PCS_G12S1_TXMGN_V4 0x148
+#define QPHY_V4_PCS_G12S1_TXMGN_V0_RS 0x14c
+#define QPHY_V4_PCS_G12S1_TXMGN_V1_RS 0x150
+#define QPHY_V4_PCS_G12S1_TXMGN_V2_RS 0x154
+#define QPHY_V4_PCS_G12S1_TXMGN_V3_RS 0x158
+#define QPHY_V4_PCS_G12S1_TXMGN_V4_RS 0x15c
+#define QPHY_V4_PCS_G3S2_TXMGN_MAIN 0x160
+#define QPHY_V4_PCS_G3S2_TXMGN_MAIN_RS 0x164
+#define QPHY_V4_PCS_G12S1_TXDEEMPH_M6DB 0x168
+#define QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB 0x16c
+#define QPHY_V4_PCS_G3S2_PRE_GAIN 0x170
+#define QPHY_V4_PCS_G3S2_POST_GAIN 0x174
+#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET 0x178
+#define QPHY_V4_PCS_G3S2_PRE_GAIN_RS 0x17c
+#define QPHY_V4_PCS_G3S2_POST_GAIN_RS 0x180
+#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET_RS 0x184
+#define QPHY_V4_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V4_PCS_RX_SIGDET_DTCT_CNTRL 0x18c
+#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_V4_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V4_PCS_RATE_SLEW_CNTRL2 0x19c
+#define QPHY_V4_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x1a0
+#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L 0x1a4
+#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H 0x1a8
+#define QPHY_V4_PCS_TSYNC_RSYNC_TIME 0x1ac
+#define QPHY_V4_PCS_CDR_RESET_TIME 0x1b0
+#define QPHY_V4_PCS_TSYNC_DLY_TIME 0x1b4
+#define QPHY_V4_PCS_ELECIDLE_DLY_SEL 0x1b8
+#define QPHY_V4_PCS_CMN_ACK_OUT_SEL 0x1bc
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG3 0x1c8
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG4 0x1cc
+#define QPHY_V4_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V4_PCS_RX_IDLE_DTCT_CNTRL 0x1d4
+#define QPHY_V4_PCS_RX_DCC_CAL_CONFIG 0x1d8
+#define QPHY_V4_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V4_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V4_PCS_EQ_CONFIG3 0x1e4
+#define QPHY_V4_PCS_EQ_CONFIG4 0x1e8
+#define QPHY_V4_PCS_EQ_CONFIG5 0x1ec
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4_20.h
new file mode 100644
index 000000000000..08c3dd115488
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v4_20.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V4_20_H_
+#define QCOM_PHY_QMP_PCS_V4_20_H_
+
+/* Only for QMP V4_20 PHY - USB/PCIe PCS registers */
+#define QPHY_V4_20_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V4_20_PCS_EQ_CONFIG2 0x1d8
+#define QPHY_V4_20_PCS_EQ_CONFIG4 0x1e0
+#define QPHY_V4_20_PCS_EQ_CONFIG5 0x1e4
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
new file mode 100644
index 000000000000..61a44519f969
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V5_H_
+#define QCOM_PHY_QMP_PCS_V5_H_
+
+/* Only for QMP V5 PHY - USB/PCIe PCS registers */
+#define QPHY_V5_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V5_PCS_G3S2_PRE_GAIN 0x170
+#define QPHY_V5_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V5_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V5_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V5_PCS_EQ_CONFIG3 0x1e4
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v3.h
new file mode 100644
index 000000000000..c0bd54e0e7b6
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v3.h
@@ -0,0 +1,111 @@
+
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_COM_V3_H_
+#define QCOM_PHY_QMP_QSERDES_COM_V3_H_
+
+/* Only for QMP V3 PHY - QSERDES COM registers */
+#define QSERDES_V3_COM_ATB_SEL1 0x000
+#define QSERDES_V3_COM_ATB_SEL2 0x004
+#define QSERDES_V3_COM_FREQ_UPDATE 0x008
+#define QSERDES_V3_COM_BG_TIMER 0x00c
+#define QSERDES_V3_COM_SSC_EN_CENTER 0x010
+#define QSERDES_V3_COM_SSC_ADJ_PER1 0x014
+#define QSERDES_V3_COM_SSC_ADJ_PER2 0x018
+#define QSERDES_V3_COM_SSC_PER1 0x01c
+#define QSERDES_V3_COM_SSC_PER2 0x020
+#define QSERDES_V3_COM_SSC_STEP_SIZE1 0x024
+#define QSERDES_V3_COM_SSC_STEP_SIZE2 0x028
+#define QSERDES_V3_COM_POST_DIV 0x02c
+#define QSERDES_V3_COM_POST_DIV_MUX 0x030
+#define QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN 0x034
+#define QSERDES_V3_COM_CLK_ENABLE1 0x038
+#define QSERDES_V3_COM_SYS_CLK_CTRL 0x03c
+#define QSERDES_V3_COM_SYSCLK_BUF_ENABLE 0x040
+#define QSERDES_V3_COM_PLL_EN 0x044
+#define QSERDES_V3_COM_PLL_IVCO 0x048
+#define QSERDES_V3_COM_CMN_IETRIM 0x04c
+#define QSERDES_V3_COM_CMN_IPTRIM 0x050
+#define QSERDES_V3_COM_EP_CLOCK_DETECT_CTR 0x054
+#define QSERDES_V3_COM_SYSCLK_DET_COMP_STATUS 0x058
+#define QSERDES_V3_COM_CLK_EP_DIV 0x05c
+#define QSERDES_V3_COM_CP_CTRL_MODE0 0x060
+#define QSERDES_V3_COM_CP_CTRL_MODE1 0x064
+#define QSERDES_V3_COM_PLL_RCTRL_MODE0 0x068
+#define QSERDES_V3_COM_PLL_RCTRL_MODE1 0x06c
+#define QSERDES_V3_COM_PLL_CCTRL_MODE0 0x070
+#define QSERDES_V3_COM_PLL_CCTRL_MODE1 0x074
+#define QSERDES_V3_COM_PLL_CNTRL 0x078
+#define QSERDES_V3_COM_BIAS_EN_CTRL_BY_PSM 0x07c
+#define QSERDES_V3_COM_SYSCLK_EN_SEL 0x080
+#define QSERDES_V3_COM_CML_SYSCLK_SEL 0x084
+#define QSERDES_V3_COM_RESETSM_CNTRL 0x088
+#define QSERDES_V3_COM_RESETSM_CNTRL2 0x08c
+#define QSERDES_V3_COM_LOCK_CMP_EN 0x090
+#define QSERDES_V3_COM_LOCK_CMP_CFG 0x094
+#define QSERDES_V3_COM_LOCK_CMP1_MODE0 0x098
+#define QSERDES_V3_COM_LOCK_CMP2_MODE0 0x09c
+#define QSERDES_V3_COM_LOCK_CMP3_MODE0 0x0a0
+#define QSERDES_V3_COM_LOCK_CMP1_MODE1 0x0a4
+#define QSERDES_V3_COM_LOCK_CMP2_MODE1 0x0a8
+#define QSERDES_V3_COM_LOCK_CMP3_MODE1 0x0ac
+#define QSERDES_V3_COM_DEC_START_MODE0 0x0b0
+#define QSERDES_V3_COM_DEC_START_MODE1 0x0b4
+#define QSERDES_V3_COM_DIV_FRAC_START1_MODE0 0x0b8
+#define QSERDES_V3_COM_DIV_FRAC_START2_MODE0 0x0bc
+#define QSERDES_V3_COM_DIV_FRAC_START3_MODE0 0x0c0
+#define QSERDES_V3_COM_DIV_FRAC_START1_MODE1 0x0c4
+#define QSERDES_V3_COM_DIV_FRAC_START2_MODE1 0x0c8
+#define QSERDES_V3_COM_DIV_FRAC_START3_MODE1 0x0cc
+#define QSERDES_V3_COM_INTEGLOOP_INITVAL 0x0d0
+#define QSERDES_V3_COM_INTEGLOOP_EN 0x0d4
+#define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0 0x0d8
+#define QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0 0x0dc
+#define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1 0x0e0
+#define QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1 0x0e4
+#define QSERDES_V3_COM_VCOCAL_DEADMAN_CTRL 0x0e8
+#define QSERDES_V3_COM_VCO_TUNE_CTRL 0x0ec
+#define QSERDES_V3_COM_VCO_TUNE_MAP 0x0f0
+#define QSERDES_V3_COM_VCO_TUNE1_MODE0 0x0f4
+#define QSERDES_V3_COM_VCO_TUNE2_MODE0 0x0f8
+#define QSERDES_V3_COM_VCO_TUNE1_MODE1 0x0fc
+#define QSERDES_V3_COM_VCO_TUNE2_MODE1 0x100
+#define QSERDES_V3_COM_VCO_TUNE_INITVAL1 0x104
+#define QSERDES_V3_COM_VCO_TUNE_INITVAL2 0x108
+#define QSERDES_V3_COM_VCO_TUNE_MINVAL1 0x10c
+#define QSERDES_V3_COM_VCO_TUNE_MINVAL2 0x110
+#define QSERDES_V3_COM_VCO_TUNE_MAXVAL1 0x114
+#define QSERDES_V3_COM_VCO_TUNE_MAXVAL2 0x118
+#define QSERDES_V3_COM_VCO_TUNE_TIMER1 0x11c
+#define QSERDES_V3_COM_VCO_TUNE_TIMER2 0x120
+#define QSERDES_V3_COM_CMN_STATUS 0x124
+#define QSERDES_V3_COM_RESET_SM_STATUS 0x128
+#define QSERDES_V3_COM_RESTRIM_CODE_STATUS 0x12c
+#define QSERDES_V3_COM_PLLCAL_CODE1_STATUS 0x130
+#define QSERDES_V3_COM_PLLCAL_CODE2_STATUS 0x134
+#define QSERDES_V3_COM_CLK_SELECT 0x138
+#define QSERDES_V3_COM_HSCLK_SEL 0x13c
+#define QSERDES_V3_COM_INTEGLOOP_BINCODE_STATUS 0x140
+#define QSERDES_V3_COM_PLL_ANALOG 0x144
+#define QSERDES_V3_COM_CORECLK_DIV_MODE0 0x148
+#define QSERDES_V3_COM_CORECLK_DIV_MODE1 0x14c
+#define QSERDES_V3_COM_SW_RESET 0x150
+#define QSERDES_V3_COM_CORE_CLK_EN 0x154
+#define QSERDES_V3_COM_C_READY_STATUS 0x158
+#define QSERDES_V3_COM_CMN_CONFIG 0x15c
+#define QSERDES_V3_COM_CMN_RATE_OVERRIDE 0x160
+#define QSERDES_V3_COM_SVS_MODE_CLK_SEL 0x164
+#define QSERDES_V3_COM_DEBUG_BUS0 0x168
+#define QSERDES_V3_COM_DEBUG_BUS1 0x16c
+#define QSERDES_V3_COM_DEBUG_BUS2 0x170
+#define QSERDES_V3_COM_DEBUG_BUS3 0x174
+#define QSERDES_V3_COM_DEBUG_BUS_SEL 0x178
+#define QSERDES_V3_COM_CMN_MISC1 0x17c
+#define QSERDES_V3_COM_CMN_MISC2 0x180
+#define QSERDES_V3_COM_CMN_MODE 0x184
+#define QSERDES_V3_COM_CMN_VREG_SEL 0x188
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v4.h
new file mode 100644
index 000000000000..b0e3298d990d
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v4.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_COM_V4_H_
+#define QCOM_PHY_QMP_QSERDES_COM_V4_H_
+
+/* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_ATB_SEL1 0x000
+#define QSERDES_V4_COM_ATB_SEL2 0x004
+#define QSERDES_V4_COM_FREQ_UPDATE 0x008
+#define QSERDES_V4_COM_BG_TIMER 0x00c
+#define QSERDES_V4_COM_SSC_EN_CENTER 0x010
+#define QSERDES_V4_COM_SSC_ADJ_PER1 0x014
+#define QSERDES_V4_COM_SSC_ADJ_PER2 0x018
+#define QSERDES_V4_COM_SSC_PER1 0x01c
+#define QSERDES_V4_COM_SSC_PER2 0x020
+#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0 0x024
+#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0 0x028
+#define QSERDES_V4_COM_SSC_STEP_SIZE3_MODE0 0x02c
+#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1 0x030
+#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1 0x034
+#define QSERDES_V4_COM_SSC_STEP_SIZE3_MODE1 0x038
+#define QSERDES_V4_COM_POST_DIV 0x03c
+#define QSERDES_V4_COM_POST_DIV_MUX 0x040
+#define QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN 0x044
+#define QSERDES_V4_COM_CLK_ENABLE1 0x048
+#define QSERDES_V4_COM_SYS_CLK_CTRL 0x04c
+#define QSERDES_V4_COM_SYSCLK_BUF_ENABLE 0x050
+#define QSERDES_V4_COM_PLL_EN 0x054
+#define QSERDES_V4_COM_PLL_IVCO 0x058
+#define QSERDES_V4_COM_CMN_IETRIM 0x05c
+#define QSERDES_V4_COM_CMN_IPTRIM 0x060
+#define QSERDES_V4_COM_EP_CLOCK_DETECT_CTRL 0x064
+#define QSERDES_V4_COM_SYSCLK_DET_COMP_STATUS 0x068
+#define QSERDES_V4_COM_CLK_EP_DIV_MODE0 0x06c
+#define QSERDES_V4_COM_CLK_EP_DIV_MODE1 0x070
+#define QSERDES_V4_COM_CP_CTRL_MODE0 0x074
+#define QSERDES_V4_COM_CP_CTRL_MODE1 0x078
+#define QSERDES_V4_COM_PLL_RCTRL_MODE0 0x07c
+#define QSERDES_V4_COM_PLL_RCTRL_MODE1 0x080
+#define QSERDES_V4_COM_PLL_CCTRL_MODE0 0x084
+#define QSERDES_V4_COM_PLL_CCTRL_MODE1 0x088
+#define QSERDES_V4_COM_PLL_CNTRL 0x08c
+#define QSERDES_V4_COM_BIAS_EN_CTRL_BY_PSM 0x090
+#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094
+#define QSERDES_V4_COM_CML_SYSCLK_SEL 0x098
+#define QSERDES_V4_COM_RESETSM_CNTRL 0x09c
+#define QSERDES_V4_COM_RESETSM_CNTRL2 0x0a0
+#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V4_COM_LOCK_CMP_CFG 0x0a8
+#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac
+#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0
+#define QSERDES_V4_COM_LOCK_CMP1_MODE1 0x0b4
+#define QSERDES_V4_COM_LOCK_CMP2_MODE1 0x0b8
+#define QSERDES_V4_COM_DEC_START_MODE0 0x0bc
+#define QSERDES_V4_COM_DEC_START_MSB_MODE0 0x0c0
+#define QSERDES_V4_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V4_COM_DEC_START_MSB_MODE1 0x0c8
+#define QSERDES_V4_COM_DIV_FRAC_START1_MODE0 0x0cc
+#define QSERDES_V4_COM_DIV_FRAC_START2_MODE0 0x0d0
+#define QSERDES_V4_COM_DIV_FRAC_START3_MODE0 0x0d4
+#define QSERDES_V4_COM_DIV_FRAC_START1_MODE1 0x0d8
+#define QSERDES_V4_COM_DIV_FRAC_START2_MODE1 0x0dc
+#define QSERDES_V4_COM_DIV_FRAC_START3_MODE1 0x0e0
+#define QSERDES_V4_COM_INTEGLOOP_INITVAL 0x0e4
+#define QSERDES_V4_COM_INTEGLOOP_EN 0x0e8
+#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0 0x0ec
+#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0 0x0f0
+#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1 0x0f4
+#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1 0x0f8
+#define QSERDES_V4_COM_INTEGLOOP_P_PATH_GAIN0 0x0fc
+#define QSERDES_V4_COM_INTEGLOOP_P_PATH_GAIN1 0x100
+#define QSERDES_V4_COM_VCOCAL_DEADMAN_CTRL 0x104
+#define QSERDES_V4_COM_VCO_TUNE_CTRL 0x108
+#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V4_COM_VCO_TUNE1_MODE0 0x110
+#define QSERDES_V4_COM_VCO_TUNE2_MODE0 0x114
+#define QSERDES_V4_COM_VCO_TUNE1_MODE1 0x118
+#define QSERDES_V4_COM_VCO_TUNE2_MODE1 0x11c
+#define QSERDES_V4_COM_VCO_TUNE_INITVAL1 0x120
+#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V4_COM_VCO_TUNE_MINVAL1 0x128
+#define QSERDES_V4_COM_VCO_TUNE_MINVAL2 0x12c
+#define QSERDES_V4_COM_VCO_TUNE_MAXVAL1 0x130
+#define QSERDES_V4_COM_VCO_TUNE_MAXVAL2 0x134
+#define QSERDES_V4_COM_VCO_TUNE_TIMER1 0x138
+#define QSERDES_V4_COM_VCO_TUNE_TIMER2 0x13c
+#define QSERDES_V4_COM_CMN_STATUS 0x140
+#define QSERDES_V4_COM_RESET_SM_STATUS 0x144
+#define QSERDES_V4_COM_RESTRIM_CODE_STATUS 0x148
+#define QSERDES_V4_COM_PLLCAL_CODE1_STATUS 0x14c
+#define QSERDES_V4_COM_PLLCAL_CODE2_STATUS 0x150
+#define QSERDES_V4_COM_CLK_SELECT 0x154
+#define QSERDES_V4_COM_HSCLK_SEL 0x158
+#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V4_COM_INTEGLOOP_BINCODE_STATUS 0x160
+#define QSERDES_V4_COM_PLL_ANALOG 0x164
+#define QSERDES_V4_COM_CORECLK_DIV_MODE0 0x168
+#define QSERDES_V4_COM_CORECLK_DIV_MODE1 0x16c
+#define QSERDES_V4_COM_SW_RESET 0x170
+#define QSERDES_V4_COM_CORE_CLK_EN 0x174
+#define QSERDES_V4_COM_C_READY_STATUS 0x178
+#define QSERDES_V4_COM_CMN_CONFIG 0x17c
+#define QSERDES_V4_COM_CMN_RATE_OVERRIDE 0x180
+#define QSERDES_V4_COM_SVS_MODE_CLK_SEL 0x184
+#define QSERDES_V4_COM_DEBUG_BUS0 0x188
+#define QSERDES_V4_COM_DEBUG_BUS1 0x18c
+#define QSERDES_V4_COM_DEBUG_BUS2 0x190
+#define QSERDES_V4_COM_DEBUG_BUS3 0x194
+#define QSERDES_V4_COM_DEBUG_BUS_SEL 0x198
+#define QSERDES_V4_COM_CMN_MISC1 0x19c
+#define QSERDES_V4_COM_CMN_MISC2 0x1a0
+#define QSERDES_V4_COM_CMN_MODE 0x1a4
+#define QSERDES_V4_COM_VCO_DC_LEVEL_CTRL 0x1a8
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
+#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v5.h
new file mode 100644
index 000000000000..c8afdf7bc1ee
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v5.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_COM_V5_H_
+#define QCOM_PHY_QMP_QSERDES_COM_V5_H_
+
+/* Only for QMP V5 PHY - QSERDES COM registers */
+#define QSERDES_V5_COM_ATB_SEL1 0x000
+#define QSERDES_V5_COM_ATB_SEL2 0x004
+#define QSERDES_V5_COM_FREQ_UPDATE 0x008
+#define QSERDES_V5_COM_BG_TIMER 0x00c
+#define QSERDES_V5_COM_SSC_EN_CENTER 0x010
+#define QSERDES_V5_COM_SSC_ADJ_PER1 0x014
+#define QSERDES_V5_COM_SSC_ADJ_PER2 0x018
+#define QSERDES_V5_COM_SSC_PER1 0x01c
+#define QSERDES_V5_COM_SSC_PER2 0x020
+#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0 0x024
+#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0 0x028
+#define QSERDES_V5_COM_SSC_STEP_SIZE3_MODE0 0x02c
+#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1 0x030
+#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1 0x034
+#define QSERDES_V5_COM_SSC_STEP_SIZE3_MODE1 0x038
+#define QSERDES_V5_COM_POST_DIV 0x03c
+#define QSERDES_V5_COM_POST_DIV_MUX 0x040
+#define QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN 0x044
+#define QSERDES_V5_COM_CLK_ENABLE1 0x048
+#define QSERDES_V5_COM_SYS_CLK_CTRL 0x04c
+#define QSERDES_V5_COM_SYSCLK_BUF_ENABLE 0x050
+#define QSERDES_V5_COM_PLL_EN 0x054
+#define QSERDES_V5_COM_PLL_IVCO 0x058
+#define QSERDES_V5_COM_CMN_IETRIM 0x05c
+#define QSERDES_V5_COM_CMN_IPTRIM 0x060
+#define QSERDES_V5_COM_EP_CLOCK_DETECT_CTRL 0x064
+#define QSERDES_V5_COM_SYSCLK_DET_COMP_STATUS 0x068
+#define QSERDES_V5_COM_CLK_EP_DIV_MODE0 0x06c
+#define QSERDES_V5_COM_CLK_EP_DIV_MODE1 0x070
+#define QSERDES_V5_COM_CP_CTRL_MODE0 0x074
+#define QSERDES_V5_COM_CP_CTRL_MODE1 0x078
+#define QSERDES_V5_COM_PLL_RCTRL_MODE0 0x07c
+#define QSERDES_V5_COM_PLL_RCTRL_MODE1 0x080
+#define QSERDES_V5_COM_PLL_CCTRL_MODE0 0x084
+#define QSERDES_V5_COM_PLL_CCTRL_MODE1 0x088
+#define QSERDES_V5_COM_PLL_CNTRL 0x08c
+#define QSERDES_V5_COM_BIAS_EN_CTRL_BY_PSM 0x090
+#define QSERDES_V5_COM_SYSCLK_EN_SEL 0x094
+#define QSERDES_V5_COM_CML_SYSCLK_SEL 0x098
+#define QSERDES_V5_COM_RESETSM_CNTRL 0x09c
+#define QSERDES_V5_COM_RESETSM_CNTRL2 0x0a0
+#define QSERDES_V5_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V5_COM_LOCK_CMP_CFG 0x0a8
+#define QSERDES_V5_COM_LOCK_CMP1_MODE0 0x0ac
+#define QSERDES_V5_COM_LOCK_CMP2_MODE0 0x0b0
+#define QSERDES_V5_COM_LOCK_CMP1_MODE1 0x0b4
+#define QSERDES_V5_COM_LOCK_CMP2_MODE1 0x0b8
+#define QSERDES_V5_COM_DEC_START_MODE0 0x0bc
+#define QSERDES_V5_COM_DEC_START_MSB_MODE0 0x0c0
+#define QSERDES_V5_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V5_COM_DEC_START_MSB_MODE1 0x0c8
+#define QSERDES_V5_COM_DIV_FRAC_START1_MODE0 0x0cc
+#define QSERDES_V5_COM_DIV_FRAC_START2_MODE0 0x0d0
+#define QSERDES_V5_COM_DIV_FRAC_START3_MODE0 0x0d4
+#define QSERDES_V5_COM_DIV_FRAC_START1_MODE1 0x0d8
+#define QSERDES_V5_COM_DIV_FRAC_START2_MODE1 0x0dc
+#define QSERDES_V5_COM_DIV_FRAC_START3_MODE1 0x0e0
+#define QSERDES_V5_COM_INTEGLOOP_INITVAL 0x0e4
+#define QSERDES_V5_COM_INTEGLOOP_EN 0x0e8
+#define QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE0 0x0ec
+#define QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE0 0x0f0
+#define QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE1 0x0f4
+#define QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE1 0x0f8
+#define QSERDES_V5_COM_INTEGLOOP_P_PATH_GAIN0 0x0fc
+#define QSERDES_V5_COM_INTEGLOOP_P_PATH_GAIN1 0x100
+#define QSERDES_V5_COM_VCOCAL_DEADMAN_CTRL 0x104
+#define QSERDES_V5_COM_VCO_TUNE_CTRL 0x108
+#define QSERDES_V5_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V5_COM_VCO_TUNE1_MODE0 0x110
+#define QSERDES_V5_COM_VCO_TUNE2_MODE0 0x114
+#define QSERDES_V5_COM_VCO_TUNE1_MODE1 0x118
+#define QSERDES_V5_COM_VCO_TUNE2_MODE1 0x11c
+#define QSERDES_V5_COM_VCO_TUNE_INITVAL1 0x120
+#define QSERDES_V5_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V5_COM_VCO_TUNE_MINVAL1 0x128
+#define QSERDES_V5_COM_VCO_TUNE_MINVAL2 0x12c
+#define QSERDES_V5_COM_VCO_TUNE_MAXVAL1 0x130
+#define QSERDES_V5_COM_VCO_TUNE_MAXVAL2 0x134
+#define QSERDES_V5_COM_VCO_TUNE_TIMER1 0x138
+#define QSERDES_V5_COM_VCO_TUNE_TIMER2 0x13c
+#define QSERDES_V5_COM_CMN_STATUS 0x140
+#define QSERDES_V5_COM_RESET_SM_STATUS 0x144
+#define QSERDES_V5_COM_RESTRIM_CODE_STATUS 0x148
+#define QSERDES_V5_COM_PLLCAL_CODE1_STATUS 0x14c
+#define QSERDES_V5_COM_PLLCAL_CODE2_STATUS 0x150
+#define QSERDES_V5_COM_CLK_SELECT 0x154
+#define QSERDES_V5_COM_HSCLK_SEL 0x158
+#define QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V5_COM_INTEGLOOP_BINCODE_STATUS 0x160
+#define QSERDES_V5_COM_PLL_ANALOG 0x164
+#define QSERDES_V5_COM_CORECLK_DIV_MODE0 0x168
+#define QSERDES_V5_COM_CORECLK_DIV_MODE1 0x16c
+#define QSERDES_V5_COM_SW_RESET 0x170
+#define QSERDES_V5_COM_CORE_CLK_EN 0x174
+#define QSERDES_V5_COM_C_READY_STATUS 0x178
+#define QSERDES_V5_COM_CMN_CONFIG 0x17c
+#define QSERDES_V5_COM_CMN_RATE_OVERRIDE 0x180
+#define QSERDES_V5_COM_SVS_MODE_CLK_SEL 0x184
+#define QSERDES_V5_COM_DEBUG_BUS0 0x188
+#define QSERDES_V5_COM_DEBUG_BUS1 0x18c
+#define QSERDES_V5_COM_DEBUG_BUS2 0x190
+#define QSERDES_V5_COM_DEBUG_BUS3 0x194
+#define QSERDES_V5_COM_DEBUG_BUS_SEL 0x198
+#define QSERDES_V5_COM_CMN_MISC1 0x19c
+#define QSERDES_V5_COM_CMN_MODE 0x1a0
+#define QSERDES_V5_COM_CMN_MODE_CONTD 0x1a4
+#define QSERDES_V5_COM_VCO_DC_LEVEL_CTRL 0x1a8
+#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
+#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
+#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
+#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
+#define QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
+#define QSERDES_V5_COM_RESERVED_1 0x1c0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h
new file mode 100644
index 000000000000..fbaf6ef467f8
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_COM_H_
+#define QCOM_PHY_QMP_QSERDES_COM_H_
+
+/* Only for QMP V2 PHY - QSERDES COM registers */
+#define QSERDES_COM_ATB_SEL1 0x000
+#define QSERDES_COM_ATB_SEL2 0x004
+#define QSERDES_COM_FREQ_UPDATE 0x008
+#define QSERDES_COM_BG_TIMER 0x00c
+#define QSERDES_COM_SSC_EN_CENTER 0x010
+#define QSERDES_COM_SSC_ADJ_PER1 0x014
+#define QSERDES_COM_SSC_ADJ_PER2 0x018
+#define QSERDES_COM_SSC_PER1 0x01c
+#define QSERDES_COM_SSC_PER2 0x020
+#define QSERDES_COM_SSC_STEP_SIZE1 0x024
+#define QSERDES_COM_SSC_STEP_SIZE2 0x028
+#define QSERDES_COM_POST_DIV 0x02c
+#define QSERDES_COM_POST_DIV_MUX 0x030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
+#define QSERDES_COM_CLK_ENABLE1 0x038
+#define QSERDES_COM_SYS_CLK_CTRL 0x03c
+#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
+#define QSERDES_COM_PLL_EN 0x044
+#define QSERDES_COM_PLL_IVCO 0x048
+#define QSERDES_COM_LOCK_CMP1_MODE0 0x04c
+#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
+#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
+#define QSERDES_COM_LOCK_CMP1_MODE1 0x058
+#define QSERDES_COM_LOCK_CMP2_MODE1 0x05c
+#define QSERDES_COM_LOCK_CMP3_MODE1 0x060
+#define QSERDES_COM_LOCK_CMP1_MODE2 0x064
+#define QSERDES_COM_CMN_RSVD0 0x064
+#define QSERDES_COM_LOCK_CMP2_MODE2 0x068
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x068
+#define QSERDES_COM_LOCK_CMP3_MODE2 0x06c
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x06c
+#define QSERDES_COM_BG_TRIM 0x070
+#define QSERDES_COM_CLK_EP_DIV 0x074
+#define QSERDES_COM_CP_CTRL_MODE0 0x078
+#define QSERDES_COM_CP_CTRL_MODE1 0x07c
+#define QSERDES_COM_CP_CTRL_MODE2 0x080
+#define QSERDES_COM_CMN_RSVD1 0x080
+#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
+#define QSERDES_COM_PLL_RCTRL_MODE1 0x088
+#define QSERDES_COM_PLL_RCTRL_MODE2 0x08c
+#define QSERDES_COM_CMN_RSVD2 0x08c
+#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
+#define QSERDES_COM_PLL_CCTRL_MODE1 0x094
+#define QSERDES_COM_PLL_CCTRL_MODE2 0x098
+#define QSERDES_COM_CMN_RSVD3 0x098
+#define QSERDES_COM_PLL_CNTRL 0x09c
+#define QSERDES_COM_PHASE_SEL_CTRL 0x0a0
+#define QSERDES_COM_PHASE_SEL_DC 0x0a4
+#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x0a8
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x0a8
+#define QSERDES_COM_SYSCLK_EN_SEL 0x0ac
+#define QSERDES_COM_CML_SYSCLK_SEL 0x0b0
+#define QSERDES_COM_RESETSM_CNTRL 0x0b4
+#define QSERDES_COM_RESETSM_CNTRL2 0x0b8
+#define QSERDES_COM_RESTRIM_CTRL 0x0bc
+#define QSERDES_COM_RESTRIM_CTRL2 0x0c0
+#define QSERDES_COM_RESCODE_DIV_NUM 0x0c4
+#define QSERDES_COM_LOCK_CMP_EN 0x0c8
+#define QSERDES_COM_LOCK_CMP_CFG 0x0cc
+#define QSERDES_COM_DEC_START_MODE0 0x0d0
+#define QSERDES_COM_DEC_START_MODE1 0x0d4
+#define QSERDES_COM_DEC_START_MODE2 0x0d8
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x0d8
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0dc
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0e0
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0e4
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x0e8
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x0ec
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x0f0
+#define QSERDES_COM_DIV_FRAC_START1_MODE2 0x0f4
+#define QSERDES_COM_VCO_TUNE_MINVAL1 0x0f4
+#define QSERDES_COM_DIV_FRAC_START2_MODE2 0x0f8
+#define QSERDES_COM_VCO_TUNE_MINVAL2 0x0f8
+#define QSERDES_COM_DIV_FRAC_START3_MODE2 0x0fc
+#define QSERDES_COM_CMN_RSVD4 0x0fc
+#define QSERDES_COM_INTEGLOOP_INITVAL 0x100
+#define QSERDES_COM_INTEGLOOP_EN 0x104
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10c
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x110
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x114
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE2 0x118
+#define QSERDES_COM_VCO_TUNE_MAXVAL1 0x118
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE2 0x11c
+#define QSERDES_COM_VCO_TUNE_MAXVAL2 0x11c
+#define QSERDES_COM_RES_TRIM_CONTROL2 0x120
+#define QSERDES_COM_VCO_TUNE_CTRL 0x124
+#define QSERDES_COM_VCO_TUNE_MAP 0x128
+#define QSERDES_COM_VCO_TUNE1_MODE0 0x12c
+#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
+#define QSERDES_COM_VCO_TUNE1_MODE1 0x134
+#define QSERDES_COM_VCO_TUNE2_MODE1 0x138
+#define QSERDES_COM_VCO_TUNE1_MODE2 0x13c
+#define QSERDES_COM_VCO_TUNE_INITVAL1 0x13c
+#define QSERDES_COM_VCO_TUNE2_MODE2 0x140
+#define QSERDES_COM_VCO_TUNE_INITVAL2 0x140
+#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
+#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
+#define QSERDES_COM_SAR 0x14c
+#define QSERDES_COM_SAR_CLK 0x150
+#define QSERDES_COM_SAR_CODE_OUT_STATUS 0x154
+#define QSERDES_COM_SAR_CODE_READY_STATUS 0x158
+#define QSERDES_COM_CMN_STATUS 0x15c
+#define QSERDES_COM_RESET_SM_STATUS 0x160
+#define QSERDES_COM_RESTRIM_CODE_STATUS 0x164
+#define QSERDES_COM_PLLCAL_CODE1_STATUS 0x168
+#define QSERDES_COM_PLLCAL_CODE2_STATUS 0x16c
+#define QSERDES_COM_BG_CTRL 0x170
+#define QSERDES_COM_CLK_SELECT 0x174
+#define QSERDES_COM_HSCLK_SEL 0x178
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x17c
+#define QSERDES_COM_PLL_ANALOG 0x180
+#define QSERDES_COM_CORECLK_DIV 0x184
+#define QSERDES_COM_SW_RESET 0x188
+#define QSERDES_COM_CORE_CLK_EN 0x18c
+#define QSERDES_COM_C_READY_STATUS 0x190
+#define QSERDES_COM_CMN_CONFIG 0x194
+#define QSERDES_COM_CMN_RATE_OVERRIDE 0x198
+#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19c
+#define QSERDES_COM_DEBUG_BUS0 0x1a0
+#define QSERDES_COM_DEBUG_BUS1 0x1a4
+#define QSERDES_COM_DEBUG_BUS2 0x1a8
+#define QSERDES_COM_DEBUG_BUS3 0x1ac
+#define QSERDES_COM_DEBUG_BUS_SEL 0x1b0
+#define QSERDES_COM_CMN_MISC1 0x1b4
+#define QSERDES_COM_CMN_MISC2 0x1b8
+#define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
+#define QSERDES_COM_CORECLK_DIV_MODE2 0x1c0
+#define QSERDES_COM_CMN_RSVD5 0x1c0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h
new file mode 100644
index 000000000000..ad326e301a3a
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_PLL_H_
+#define QCOM_PHY_QMP_QSERDES_PLL_H_
+
+/* QMP V2 PHY for PCIE gen3 ports - QSERDES PLL registers */
+#define QSERDES_PLL_BG_TIMER 0x00c
+#define QSERDES_PLL_SSC_PER1 0x01c
+#define QSERDES_PLL_SSC_PER2 0x020
+#define QSERDES_PLL_SSC_STEP_SIZE1_MODE0 0x024
+#define QSERDES_PLL_SSC_STEP_SIZE2_MODE0 0x028
+#define QSERDES_PLL_SSC_STEP_SIZE1_MODE1 0x02c
+#define QSERDES_PLL_SSC_STEP_SIZE2_MODE1 0x030
+#define QSERDES_PLL_BIAS_EN_CLKBUFLR_EN 0x03c
+#define QSERDES_PLL_CLK_ENABLE1 0x040
+#define QSERDES_PLL_SYS_CLK_CTRL 0x044
+#define QSERDES_PLL_SYSCLK_BUF_ENABLE 0x048
+#define QSERDES_PLL_PLL_IVCO 0x050
+#define QSERDES_PLL_LOCK_CMP1_MODE0 0x054
+#define QSERDES_PLL_LOCK_CMP2_MODE0 0x058
+#define QSERDES_PLL_LOCK_CMP1_MODE1 0x060
+#define QSERDES_PLL_LOCK_CMP2_MODE1 0x064
+#define QSERDES_PLL_BG_TRIM 0x074
+#define QSERDES_PLL_CLK_EP_DIV_MODE0 0x078
+#define QSERDES_PLL_CLK_EP_DIV_MODE1 0x07c
+#define QSERDES_PLL_CP_CTRL_MODE0 0x080
+#define QSERDES_PLL_CP_CTRL_MODE1 0x084
+#define QSERDES_PLL_PLL_RCTRL_MODE0 0x088
+#define QSERDES_PLL_PLL_RCTRL_MODE1 0x08c
+#define QSERDES_PLL_PLL_CCTRL_MODE0 0x090
+#define QSERDES_PLL_PLL_CCTRL_MODE1 0x094
+#define QSERDES_PLL_BIAS_EN_CTRL_BY_PSM 0x0a4
+#define QSERDES_PLL_SYSCLK_EN_SEL 0x0a8
+#define QSERDES_PLL_RESETSM_CNTRL 0x0b0
+#define QSERDES_PLL_LOCK_CMP_EN 0x0c4
+#define QSERDES_PLL_DEC_START_MODE0 0x0cc
+#define QSERDES_PLL_DEC_START_MODE1 0x0d0
+#define QSERDES_PLL_DIV_FRAC_START1_MODE0 0x0d8
+#define QSERDES_PLL_DIV_FRAC_START2_MODE0 0x0dc
+#define QSERDES_PLL_DIV_FRAC_START3_MODE0 0x0e0
+#define QSERDES_PLL_DIV_FRAC_START1_MODE1 0x0e4
+#define QSERDES_PLL_DIV_FRAC_START2_MODE1 0x0e8
+#define QSERDES_PLL_DIV_FRAC_START3_MODE1 0x0ec
+#define QSERDES_PLL_INTEGLOOP_GAIN0_MODE0 0x100
+#define QSERDES_PLL_INTEGLOOP_GAIN1_MODE0 0x104
+#define QSERDES_PLL_INTEGLOOP_GAIN0_MODE1 0x108
+#define QSERDES_PLL_INTEGLOOP_GAIN1_MODE1 0x10c
+#define QSERDES_PLL_VCO_TUNE_MAP 0x120
+#define QSERDES_PLL_VCO_TUNE1_MODE0 0x124
+#define QSERDES_PLL_VCO_TUNE2_MODE0 0x128
+#define QSERDES_PLL_VCO_TUNE1_MODE1 0x12c
+#define QSERDES_PLL_VCO_TUNE2_MODE1 0x130
+#define QSERDES_PLL_VCO_TUNE_TIMER1 0x13c
+#define QSERDES_PLL_VCO_TUNE_TIMER2 0x140
+#define QSERDES_PLL_CLK_SELECT 0x16c
+#define QSERDES_PLL_HSCLK_SEL 0x170
+#define QSERDES_PLL_CORECLK_DIV 0x17c
+#define QSERDES_PLL_CORE_CLK_EN 0x184
+#define QSERDES_PLL_CMN_CONFIG 0x18c
+#define QSERDES_PLL_SVS_MODE_CLK_SEL 0x194
+#define QSERDES_PLL_CORECLK_DIV_MODE1 0x1b4
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v3.h
new file mode 100644
index 000000000000..161e6df30ea8
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v3.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V3_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V3_H_
+
+/* Only for QMP V3 PHY - TX registers */
+#define QSERDES_V3_TX_BIST_MODE_LANENO 0x000
+#define QSERDES_V3_TX_CLKBUF_ENABLE 0x008
+#define QSERDES_V3_TX_TX_EMP_POST1_LVL 0x00c
+#define QSERDES_V3_TX_TX_DRV_LVL 0x01c
+#define QSERDES_V3_TX_RESET_TSYNC_EN 0x024
+#define QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN 0x028
+#define QSERDES_V3_TX_TX_BAND 0x02c
+#define QSERDES_V3_TX_SLEW_CNTL 0x030
+#define QSERDES_V3_TX_INTERFACE_SELECT 0x034
+#define QSERDES_V3_TX_RES_CODE_LANE_TX 0x03c
+#define QSERDES_V3_TX_RES_CODE_LANE_RX 0x040
+#define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX 0x044
+#define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX 0x048
+#define QSERDES_V3_TX_DEBUG_BUS_SEL 0x058
+#define QSERDES_V3_TX_TRANSCEIVER_BIAS_EN 0x05c
+#define QSERDES_V3_TX_HIGHZ_DRVR_EN 0x060
+#define QSERDES_V3_TX_TX_POL_INV 0x064
+#define QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN 0x068
+#define QSERDES_V3_TX_LANE_MODE_1 0x08c
+#define QSERDES_V3_TX_LANE_MODE_2 0x090
+#define QSERDES_V3_TX_LANE_MODE_3 0x094
+#define QSERDES_V3_TX_RCV_DETECT_LVL_2 0x0a4
+#define QSERDES_V3_TX_TRAN_DRVR_EMP_EN 0x0c0
+#define QSERDES_V3_TX_TX_INTERFACE_MODE 0x0c4
+#define QSERDES_V3_TX_VMODE_CTRL1 0x0f0
+
+/* Only for QMP V3 PHY - RX registers */
+#define QSERDES_V3_RX_UCDR_FO_GAIN 0x008
+#define QSERDES_V3_RX_UCDR_SO_GAIN_HALF 0x00c
+#define QSERDES_V3_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF 0x024
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN 0x02c
+#define QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN 0x030
+#define QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
+#define QSERDES_V3_RX_UCDR_PI_CONTROLS 0x044
+#define QSERDES_V3_RX_RX_TERM_BW 0x07c
+#define QSERDES_V3_RX_VGA_CAL_CNTRL1 0x0bc
+#define QSERDES_V3_RX_VGA_CAL_CNTRL2 0x0c0
+#define QSERDES_V3_RX_RX_EQ_GAIN2_LSB 0x0c8
+#define QSERDES_V3_RX_RX_EQ_GAIN2_MSB 0x0cc
+#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL1 0x0d0
+#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2 0x0d4
+#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3 0x0d8
+#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4 0x0dc
+#define QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x0f8
+#define QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x0fc
+#define QSERDES_V3_RX_SIGDET_ENABLES 0x100
+#define QSERDES_V3_RX_SIGDET_CNTRL 0x104
+#define QSERDES_V3_RX_SIGDET_LVL 0x108
+#define QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL 0x10c
+#define QSERDES_V3_RX_RX_BAND 0x110
+#define QSERDES_V3_RX_RX_INTERFACE_MODE 0x11c
+#define QSERDES_V3_RX_RX_MODE_00 0x164
+#define QSERDES_V3_RX_RX_MODE_01 0x168
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4.h
new file mode 100644
index 000000000000..6ee3bec9ac4a
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V4_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V4_H_
+
+/* Only for QMP V4 PHY - TX registers */
+#define QSERDES_V4_TX_BIST_MODE_LANENO 0x000
+#define QSERDES_V4_TX_BIST_INVERT 0x004
+#define QSERDES_V4_TX_CLKBUF_ENABLE 0x008
+#define QSERDES_V4_TX_TX_EMP_POST1_LVL 0x00c
+#define QSERDES_V4_TX_TX_IDLE_LVL_LARGE_AMP 0x010
+#define QSERDES_V4_TX_TX_DRV_LVL 0x014
+#define QSERDES_V4_TX_TX_DRV_LVL_OFFSET 0x018
+#define QSERDES_V4_TX_RESET_TSYNC_EN 0x01c
+#define QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN 0x020
+#define QSERDES_V4_TX_TX_BAND 0x024
+#define QSERDES_V4_TX_SLEW_CNTL 0x028
+#define QSERDES_V4_TX_INTERFACE_SELECT 0x02c
+#define QSERDES_V4_TX_LPB_EN 0x030
+#define QSERDES_V4_TX_RES_CODE_LANE_TX 0x034
+#define QSERDES_V4_TX_RES_CODE_LANE_RX 0x038
+#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX 0x03c
+#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX 0x040
+#define QSERDES_V4_TX_PERL_LENGTH1 0x044
+#define QSERDES_V4_TX_PERL_LENGTH2 0x048
+#define QSERDES_V4_TX_SERDES_BYP_EN_OUT 0x04c
+#define QSERDES_V4_TX_DEBUG_BUS_SEL 0x050
+#define QSERDES_V4_TX_TRANSCEIVER_BIAS_EN 0x054
+#define QSERDES_V4_TX_HIGHZ_DRVR_EN 0x058
+#define QSERDES_V4_TX_TX_POL_INV 0x05c
+#define QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN 0x060
+#define QSERDES_V4_TX_BIST_PATTERN1 0x064
+#define QSERDES_V4_TX_BIST_PATTERN2 0x068
+#define QSERDES_V4_TX_BIST_PATTERN3 0x06c
+#define QSERDES_V4_TX_BIST_PATTERN4 0x070
+#define QSERDES_V4_TX_BIST_PATTERN5 0x074
+#define QSERDES_V4_TX_BIST_PATTERN6 0x078
+#define QSERDES_V4_TX_BIST_PATTERN7 0x07c
+#define QSERDES_V4_TX_BIST_PATTERN8 0x080
+#define QSERDES_V4_TX_LANE_MODE_1 0x084
+#define QSERDES_V4_TX_LANE_MODE_2 0x088
+#define QSERDES_V4_TX_LANE_MODE_3 0x08c
+#define QSERDES_V4_TX_ATB_SEL1 0x090
+#define QSERDES_V4_TX_ATB_SEL2 0x094
+#define QSERDES_V4_TX_RCV_DETECT_LVL 0x098
+#define QSERDES_V4_TX_RCV_DETECT_LVL_2 0x09c
+#define QSERDES_V4_TX_PRBS_SEED1 0x0a0
+#define QSERDES_V4_TX_PRBS_SEED2 0x0a4
+#define QSERDES_V4_TX_PRBS_SEED3 0x0a8
+#define QSERDES_V4_TX_PRBS_SEED4 0x0ac
+#define QSERDES_V4_TX_RESET_GEN 0x0b0
+#define QSERDES_V4_TX_RESET_GEN_MUXES 0x0b4
+#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0x0b8
+#define QSERDES_V4_TX_TX_INTERFACE_MODE 0x0bc
+#define QSERDES_V4_TX_PWM_CTRL 0x0c0
+#define QSERDES_V4_TX_PWM_ENCODED_OR_DATA 0x0c4
+#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND2 0x0c8
+#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND2 0x0cc
+#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND2 0x0d0
+#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND2 0x0d4
+#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x0d8
+#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x0dc
+#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x0e0
+#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x0e4
+#define QSERDES_V4_TX_VMODE_CTRL1 0x0e8
+#define QSERDES_V4_TX_ALOG_OBSV_BUS_CTRL_1 0x0ec
+#define QSERDES_V4_TX_BIST_STATUS 0x0f0
+#define QSERDES_V4_TX_BIST_ERROR_COUNT1 0x0f4
+#define QSERDES_V4_TX_BIST_ERROR_COUNT2 0x0f8
+#define QSERDES_V4_TX_ALOG_OBSV_BUS_STATUS_1 0x0fc
+#define QSERDES_V4_TX_LANE_DIG_CONFIG 0x100
+#define QSERDES_V4_TX_PI_QEC_CTRL 0x104
+#define QSERDES_V4_TX_PRE_EMPH 0x108
+#define QSERDES_V4_TX_SW_RESET 0x10c
+#define QSERDES_V4_TX_DCC_OFFSET 0x110
+#define QSERDES_V4_TX_DIG_BKUP_CTRL 0x114
+#define QSERDES_V4_TX_DEBUG_BUS0 0x118
+#define QSERDES_V4_TX_DEBUG_BUS1 0x11c
+#define QSERDES_V4_TX_DEBUG_BUS2 0x120
+#define QSERDES_V4_TX_DEBUG_BUS3 0x124
+#define QSERDES_V4_TX_READ_EQCODE 0x128
+#define QSERDES_V4_TX_READ_OFFSETCODE 0x12c
+#define QSERDES_V4_TX_IA_ERROR_COUNTER_LOW 0x130
+#define QSERDES_V4_TX_IA_ERROR_COUNTER_HIGH 0x134
+#define QSERDES_V4_TX_VGA_READ_CODE 0x138
+#define QSERDES_V4_TX_VTH_READ_CODE 0x13c
+#define QSERDES_V4_TX_DFE_TAP1_READ_CODE 0x140
+#define QSERDES_V4_TX_DFE_TAP2_READ_CODE 0x144
+#define QSERDES_V4_TX_IDAC_STATUS_I 0x148
+#define QSERDES_V4_TX_IDAC_STATUS_IBAR 0x14c
+#define QSERDES_V4_TX_IDAC_STATUS_Q 0x150
+#define QSERDES_V4_TX_IDAC_STATUS_QBAR 0x154
+#define QSERDES_V4_TX_IDAC_STATUS_A 0x158
+#define QSERDES_V4_TX_IDAC_STATUS_ABAR 0x15c
+#define QSERDES_V4_TX_IDAC_STATUS_SM_ON 0x160
+#define QSERDES_V4_TX_IDAC_STATUS_CAL_DONE 0x164
+#define QSERDES_V4_TX_IDAC_STATUS_SIGNERROR 0x168
+#define QSERDES_V4_TX_DCC_CAL_STATUS 0x16c
+
+/* Only for QMP V4 PHY - RX registers */
+#define QSERDES_V4_RX_UCDR_FO_GAIN_HALF 0x000
+#define QSERDES_V4_RX_UCDR_FO_GAIN_QUARTER 0x004
+#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
+#define QSERDES_V4_RX_UCDR_SO_GAIN_HALF 0x00c
+#define QSERDES_V4_RX_UCDR_SO_GAIN_QUARTER 0x010
+#define QSERDES_V4_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V4_RX_UCDR_SVS_FO_GAIN_HALF 0x018
+#define QSERDES_V4_RX_UCDR_SVS_FO_GAIN_QUARTER 0x01c
+#define QSERDES_V4_RX_UCDR_SVS_FO_GAIN 0x020
+#define QSERDES_V4_RX_UCDR_SVS_SO_GAIN_HALF 0x024
+#define QSERDES_V4_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028
+#define QSERDES_V4_RX_UCDR_SVS_SO_GAIN 0x02c
+#define QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN 0x030
+#define QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V4_RX_UCDR_FO_TO_SO_DELAY 0x038
+#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
+#define QSERDES_V4_RX_UCDR_PI_CONTROLS 0x044
+#define QSERDES_V4_RX_UCDR_PI_CTRL2 0x048
+#define QSERDES_V4_RX_UCDR_SB2_THRESH1 0x04c
+#define QSERDES_V4_RX_UCDR_SB2_THRESH2 0x050
+#define QSERDES_V4_RX_UCDR_SB2_GAIN1 0x054
+#define QSERDES_V4_RX_UCDR_SB2_GAIN2 0x058
+#define QSERDES_V4_RX_AUX_CONTROL 0x05c
+#define QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE 0x060
+#define QSERDES_V4_RX_RCLK_AUXDATA_SEL 0x064
+#define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068
+#define QSERDES_V4_RX_AC_JTAG_INITP 0x06c
+#define QSERDES_V4_RX_AC_JTAG_INITN 0x070
+#define QSERDES_V4_RX_AC_JTAG_LVL 0x074
+#define QSERDES_V4_RX_AC_JTAG_MODE 0x078
+#define QSERDES_V4_RX_AC_JTAG_RESET 0x07c
+#define QSERDES_V4_RX_RX_TERM_BW 0x080
+#define QSERDES_V4_RX_RX_RCVR_IQ_EN 0x084
+#define QSERDES_V4_RX_RX_IDAC_I_DC_OFFSETS 0x088
+#define QSERDES_V4_RX_RX_IDAC_IBAR_DC_OFFSETS 0x08c
+#define QSERDES_V4_RX_RX_IDAC_Q_DC_OFFSETS 0x090
+#define QSERDES_V4_RX_RX_IDAC_QBAR_DC_OFFSETS 0x094
+#define QSERDES_V4_RX_RX_IDAC_A_DC_OFFSETS 0x098
+#define QSERDES_V4_RX_RX_IDAC_ABAR_DC_OFFSETS 0x09c
+#define QSERDES_V4_RX_RX_IDAC_EN 0x0a0
+#define QSERDES_V4_RX_RX_IDAC_ENABLES 0x0a4
+#define QSERDES_V4_RX_RX_IDAC_SIGN 0x0a8
+#define QSERDES_V4_RX_RX_HIGHZ_HIGHRATE 0x0ac
+#define QSERDES_V4_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x0b0
+#define QSERDES_V4_RX_DFE_1 0x0b4
+#define QSERDES_V4_RX_DFE_2 0x0b8
+#define QSERDES_V4_RX_DFE_3 0x0bc
+#define QSERDES_V4_RX_DFE_4 0x0c0
+#define QSERDES_V4_RX_TX_ADAPT_PRE_THRESH1 0x0c4
+#define QSERDES_V4_RX_TX_ADAPT_PRE_THRESH2 0x0c8
+#define QSERDES_V4_RX_TX_ADAPT_POST_THRESH 0x0cc
+#define QSERDES_V4_RX_TX_ADAPT_MAIN_THRESH 0x0d0
+#define QSERDES_V4_RX_VGA_CAL_CNTRL1 0x0d4
+#define QSERDES_V4_RX_VGA_CAL_CNTRL2 0x0d8
+#define QSERDES_V4_RX_GM_CAL 0x0dc
+#define QSERDES_V4_RX_RX_VGA_GAIN2_LSB 0x0e0
+#define QSERDES_V4_RX_RX_VGA_GAIN2_MSB 0x0e4
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1 0x0e8
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW 0x0f8
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
+#define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100
+#define QSERDES_V4_RX_RX_IDAC_ACCUMULATOR 0x104
+#define QSERDES_V4_RX_RX_EQ_OFFSET_LSB 0x108
+#define QSERDES_V4_RX_RX_EQ_OFFSET_MSB 0x10c
+#define QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
+#define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
+#define QSERDES_V4_RX_SIGDET_ENABLES 0x118
+#define QSERDES_V4_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V4_RX_SIGDET_LVL 0x120
+#define QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V4_RX_RX_BAND 0x128
+#define QSERDES_V4_RX_CDR_FREEZE_UP_DN 0x12c
+#define QSERDES_V4_RX_CDR_RESET_OVERRIDE 0x130
+#define QSERDES_V4_RX_RX_INTERFACE_MODE 0x134
+#define QSERDES_V4_RX_JITTER_GEN_MODE 0x138
+#define QSERDES_V4_RX_SJ_AMP1 0x13c
+#define QSERDES_V4_RX_SJ_AMP2 0x140
+#define QSERDES_V4_RX_SJ_PER1 0x144
+#define QSERDES_V4_RX_SJ_PER2 0x148
+#define QSERDES_V4_RX_PPM_OFFSET1 0x14c
+#define QSERDES_V4_RX_PPM_OFFSET2 0x150
+#define QSERDES_V4_RX_SIGN_PPM_PERIOD1 0x154
+#define QSERDES_V4_RX_SIGN_PPM_PERIOD2 0x158
+#define QSERDES_V4_RX_RX_PWM_ENABLE_AND_DATA 0x15c
+#define QSERDES_V4_RX_RX_PWM_GEAR1_TIMEOUT_COUNT 0x160
+#define QSERDES_V4_RX_RX_PWM_GEAR2_TIMEOUT_COUNT 0x164
+#define QSERDES_V4_RX_RX_PWM_GEAR3_TIMEOUT_COUNT 0x168
+#define QSERDES_V4_RX_RX_PWM_GEAR4_TIMEOUT_COUNT 0x16c
+#define QSERDES_V4_RX_RX_MODE_00_LOW 0x170
+#define QSERDES_V4_RX_RX_MODE_00_HIGH 0x174
+#define QSERDES_V4_RX_RX_MODE_00_HIGH2 0x178
+#define QSERDES_V4_RX_RX_MODE_00_HIGH3 0x17c
+#define QSERDES_V4_RX_RX_MODE_00_HIGH4 0x180
+#define QSERDES_V4_RX_RX_MODE_01_LOW 0x184
+#define QSERDES_V4_RX_RX_MODE_01_HIGH 0x188
+#define QSERDES_V4_RX_RX_MODE_01_HIGH2 0x18c
+#define QSERDES_V4_RX_RX_MODE_01_HIGH3 0x190
+#define QSERDES_V4_RX_RX_MODE_01_HIGH4 0x194
+#define QSERDES_V4_RX_RX_MODE_10_LOW 0x198
+#define QSERDES_V4_RX_RX_MODE_10_HIGH 0x19c
+#define QSERDES_V4_RX_RX_MODE_10_HIGH2 0x1a0
+#define QSERDES_V4_RX_RX_MODE_10_HIGH3 0x1a4
+#define QSERDES_V4_RX_RX_MODE_10_HIGH4 0x1a8
+#define QSERDES_V4_RX_PHPRE_CTRL 0x1ac
+#define QSERDES_V4_RX_PHPRE_INITVAL 0x1b0
+#define QSERDES_V4_RX_DFE_EN_TIMER 0x1b4
+#define QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET 0x1b8
+#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
+#define QSERDES_V4_RX_DCC_CTRL2 0x1c0
+#define QSERDES_V4_RX_VTH_CODE 0x1c4
+#define QSERDES_V4_RX_VTH_MIN_THRESH 0x1c8
+#define QSERDES_V4_RX_VTH_MAX_THRESH 0x1cc
+#define QSERDES_V4_RX_ALOG_OBSV_BUS_CTRL_1 0x1d0
+#define QSERDES_V4_RX_PI_CTRL1 0x1d4
+#define QSERDES_V4_RX_PI_CTRL2 0x1d8
+#define QSERDES_V4_RX_PI_QUAD 0x1dc
+#define QSERDES_V4_RX_IDATA1 0x1e0
+#define QSERDES_V4_RX_IDATA2 0x1e4
+#define QSERDES_V4_RX_AUX_DATA1 0x1e8
+#define QSERDES_V4_RX_AUX_DATA2 0x1ec
+#define QSERDES_V4_RX_AC_JTAG_OUTP 0x1f0
+#define QSERDES_V4_RX_AC_JTAG_OUTN 0x1f4
+#define QSERDES_V4_RX_RX_SIGDET 0x1f8
+#define QSERDES_V4_RX_ALOG_OBSV_BUS_STATUS_1 0x1fc
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4_20.h
new file mode 100644
index 000000000000..114570f3017f
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v4_20.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V4_20_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V4_20_H_
+
+/* Only for QMP V4_20 PHY - TX registers */
+#define QSERDES_V4_20_TX_LANE_MODE_1 0x88
+#define QSERDES_V4_20_TX_LANE_MODE_2 0x8c
+#define QSERDES_V4_20_TX_LANE_MODE_3 0x90
+#define QSERDES_V4_20_TX_VMODE_CTRL1 0xc4
+#define QSERDES_V4_20_TX_PI_QEC_CTRL 0xe0
+
+/* Only for QMP V4_20 PHY - RX registers */
+#define QSERDES_V4_20_RX_FO_GAIN_RATE2 0x008
+#define QSERDES_V4_20_RX_UCDR_PI_CONTROLS 0x058
+#define QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE 0x0ac
+#define QSERDES_V4_20_RX_DFE_3 0x110
+#define QSERDES_V4_20_RX_DFE_DAC_ENABLE1 0x134
+#define QSERDES_V4_20_RX_DFE_DAC_ENABLE2 0x138
+#define QSERDES_V4_20_RX_VGA_CAL_CNTRL2 0x150
+#define QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x178
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1 0x1c8
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2 0x1cc
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3 0x1d0
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4 0x1d4
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B0 0x1d8
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B1 0x1dc
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B2 0x1e0
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B3 0x1e4
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B4 0x1e8
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B0 0x1ec
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B1 0x1f0
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B2 0x1f4
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B3 0x1f8
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B4 0x1fc
+#define QSERDES_V4_20_RX_PHPRE_CTRL 0x200
+#define QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x20c
+#define QSERDES_V4_20_RX_MARG_COARSE_CTRL2 0x23c
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5.h
new file mode 100644
index 000000000000..fe8f3e330d09
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5.h
@@ -0,0 +1,231 @@
+
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V5_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V5_H_
+
+/* Only for QMP V5 PHY - TX registers */
+#define QSERDES_V5_TX_BIST_MODE_LANENO 0x000
+#define QSERDES_V5_TX_BIST_INVERT 0x004
+#define QSERDES_V5_TX_CLKBUF_ENABLE 0x008
+#define QSERDES_V5_TX_TX_EMP_POST1_LVL 0x00c
+#define QSERDES_V5_TX_TX_IDLE_LVL_LARGE_AMP 0x010
+#define QSERDES_V5_TX_TX_DRV_LVL 0x014
+#define QSERDES_V5_TX_TX_DRV_LVL_OFFSET 0x018
+#define QSERDES_V5_TX_RESET_TSYNC_EN 0x01c
+#define QSERDES_V5_TX_PRE_STALL_LDO_BOOST_EN 0x020
+#define QSERDES_V5_TX_TX_BAND 0x024
+#define QSERDES_V5_TX_SLEW_CNTL 0x028
+#define QSERDES_V5_TX_INTERFACE_SELECT 0x02c
+#define QSERDES_V5_TX_LPB_EN 0x030
+#define QSERDES_V5_TX_RES_CODE_LANE_TX 0x034
+#define QSERDES_V5_TX_RES_CODE_LANE_RX 0x038
+#define QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX 0x03c
+#define QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX 0x040
+#define QSERDES_V5_TX_PERL_LENGTH1 0x044
+#define QSERDES_V5_TX_PERL_LENGTH2 0x048
+#define QSERDES_V5_TX_SERDES_BYP_EN_OUT 0x04c
+#define QSERDES_V5_TX_DEBUG_BUS_SEL 0x050
+#define QSERDES_V5_TX_TRANSCEIVER_BIAS_EN 0x054
+#define QSERDES_V5_TX_HIGHZ_DRVR_EN 0x058
+#define QSERDES_V5_TX_TX_POL_INV 0x05c
+#define QSERDES_V5_TX_PARRATE_REC_DETECT_IDLE_EN 0x060
+#define QSERDES_V5_TX_BIST_PATTERN1 0x064
+#define QSERDES_V5_TX_BIST_PATTERN2 0x068
+#define QSERDES_V5_TX_BIST_PATTERN3 0x06c
+#define QSERDES_V5_TX_BIST_PATTERN4 0x070
+#define QSERDES_V5_TX_BIST_PATTERN5 0x074
+#define QSERDES_V5_TX_BIST_PATTERN6 0x078
+#define QSERDES_V5_TX_BIST_PATTERN7 0x07c
+#define QSERDES_V5_TX_BIST_PATTERN8 0x080
+#define QSERDES_V5_TX_LANE_MODE_1 0x084
+#define QSERDES_V5_TX_LANE_MODE_2 0x088
+#define QSERDES_V5_TX_LANE_MODE_3 0x08c
+#define QSERDES_V5_TX_LANE_MODE_4 0x090
+#define QSERDES_V5_TX_LANE_MODE_5 0x094
+#define QSERDES_V5_TX_ATB_SEL1 0x098
+#define QSERDES_V5_TX_ATB_SEL2 0x09c
+#define QSERDES_V5_TX_RCV_DETECT_LVL 0x0a0
+#define QSERDES_V5_TX_RCV_DETECT_LVL_2 0x0a4
+#define QSERDES_V5_TX_PRBS_SEED1 0x0a8
+#define QSERDES_V5_TX_PRBS_SEED2 0x0ac
+#define QSERDES_V5_TX_PRBS_SEED3 0x0b0
+#define QSERDES_V5_TX_PRBS_SEED4 0x0b4
+#define QSERDES_V5_TX_RESET_GEN 0x0b8
+#define QSERDES_V5_TX_RESET_GEN_MUXES 0x0bc
+#define QSERDES_V5_TX_TRAN_DRVR_EMP_EN 0x0c0
+#define QSERDES_V5_TX_TX_INTERFACE_MODE 0x0c4
+#define QSERDES_V5_TX_VMODE_CTRL1 0x0c8
+#define QSERDES_V5_TX_ALOG_OBSV_BUS_CTRL_1 0x0cc
+#define QSERDES_V5_TX_BIST_STATUS 0x0d0
+#define QSERDES_V5_TX_BIST_ERROR_COUNT1 0x0d4
+#define QSERDES_V5_TX_BIST_ERROR_COUNT2 0x0d8
+#define QSERDES_V5_TX_ALOG_OBSV_BUS_STATUS_1 0x0dc
+#define QSERDES_V5_TX_LANE_DIG_CONFIG 0x0e0
+#define QSERDES_V5_TX_PI_QEC_CTRL 0x0e4
+#define QSERDES_V5_TX_PRE_EMPH 0x0e8
+#define QSERDES_V5_TX_SW_RESET 0x0ec
+#define QSERDES_V5_TX_DCC_OFFSET 0x0f0
+#define QSERDES_V5_TX_DCC_CMUX_POSTCAL_OFFSET 0x0f4
+#define QSERDES_V5_TX_DCC_CMUX_CAL_CTRL1 0x0f8
+#define QSERDES_V5_TX_DCC_CMUX_CAL_CTRL2 0x0fc
+#define QSERDES_V5_TX_DIG_BKUP_CTRL 0x100
+#define QSERDES_V5_TX_DEBUG_BUS0 0x104
+#define QSERDES_V5_TX_DEBUG_BUS1 0x108
+#define QSERDES_V5_TX_DEBUG_BUS2 0x10c
+#define QSERDES_V5_TX_DEBUG_BUS3 0x110
+#define QSERDES_V5_TX_READ_EQCODE 0x114
+#define QSERDES_V5_TX_READ_OFFSETCODE 0x118
+#define QSERDES_V5_TX_IA_ERROR_COUNTER_LOW 0x11c
+#define QSERDES_V5_TX_IA_ERROR_COUNTER_HIGH 0x120
+#define QSERDES_V5_TX_VGA_READ_CODE 0x124
+#define QSERDES_V5_TX_VTH_READ_CODE 0x128
+#define QSERDES_V5_TX_DFE_TAP1_READ_CODE 0x12c
+#define QSERDES_V5_TX_DFE_TAP2_READ_CODE 0x130
+#define QSERDES_V5_TX_IDAC_STATUS_I 0x134
+#define QSERDES_V5_TX_IDAC_STATUS_IBAR 0x138
+#define QSERDES_V5_TX_IDAC_STATUS_Q 0x13c
+#define QSERDES_V5_TX_IDAC_STATUS_QBAR 0x140
+#define QSERDES_V5_TX_IDAC_STATUS_A 0x144
+#define QSERDES_V5_TX_IDAC_STATUS_ABAR 0x148
+#define QSERDES_V5_TX_IDAC_STATUS_SM_ON 0x14c
+#define QSERDES_V5_TX_IDAC_STATUS_CAL_DONE 0x150
+#define QSERDES_V5_TX_IDAC_STATUS_SIGNERROR 0x154
+#define QSERDES_V5_TX_DCC_CAL_STATUS 0x158
+#define QSERDES_V5_TX_DCC_READ_CODE_STATUS 0x15c
+
+/* Only for QMP V5 PHY - RX registers */
+#define QSERDES_V5_RX_UCDR_FO_GAIN_HALF 0x000
+#define QSERDES_V5_RX_UCDR_FO_GAIN_QUARTER 0x004
+#define QSERDES_V5_RX_UCDR_FO_GAIN 0x008
+#define QSERDES_V5_RX_UCDR_SO_GAIN_HALF 0x00c
+#define QSERDES_V5_RX_UCDR_SO_GAIN_QUARTER 0x010
+#define QSERDES_V5_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V5_RX_UCDR_SVS_FO_GAIN_HALF 0x018
+#define QSERDES_V5_RX_UCDR_SVS_FO_GAIN_QUARTER 0x01c
+#define QSERDES_V5_RX_UCDR_SVS_FO_GAIN 0x020
+#define QSERDES_V5_RX_UCDR_SVS_SO_GAIN_HALF 0x024
+#define QSERDES_V5_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028
+#define QSERDES_V5_RX_UCDR_SVS_SO_GAIN 0x02c
+#define QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN 0x030
+#define QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V5_RX_UCDR_FO_TO_SO_DELAY 0x038
+#define QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
+#define QSERDES_V5_RX_UCDR_PI_CONTROLS 0x044
+#define QSERDES_V5_RX_UCDR_PI_CTRL2 0x048
+#define QSERDES_V5_RX_UCDR_SB2_THRESH1 0x04c
+#define QSERDES_V5_RX_UCDR_SB2_THRESH2 0x050
+#define QSERDES_V5_RX_UCDR_SB2_GAIN1 0x054
+#define QSERDES_V5_RX_UCDR_SB2_GAIN2 0x058
+#define QSERDES_V5_RX_AUX_CONTROL 0x05c
+#define QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE 0x060
+#define QSERDES_V5_RX_RCLK_AUXDATA_SEL 0x064
+#define QSERDES_V5_RX_AC_JTAG_ENABLE 0x068
+#define QSERDES_V5_RX_AC_JTAG_INITP 0x06c
+#define QSERDES_V5_RX_AC_JTAG_INITN 0x070
+#define QSERDES_V5_RX_AC_JTAG_LVL 0x074
+#define QSERDES_V5_RX_AC_JTAG_MODE 0x078
+#define QSERDES_V5_RX_AC_JTAG_RESET 0x07c
+#define QSERDES_V5_RX_RX_TERM_BW 0x080
+#define QSERDES_V5_RX_RX_RCVR_IQ_EN 0x084
+#define QSERDES_V5_RX_RX_IDAC_I_DC_OFFSETS 0x088
+#define QSERDES_V5_RX_RX_IDAC_IBAR_DC_OFFSETS 0x08c
+#define QSERDES_V5_RX_RX_IDAC_Q_DC_OFFSETS 0x090
+#define QSERDES_V5_RX_RX_IDAC_QBAR_DC_OFFSETS 0x094
+#define QSERDES_V5_RX_RX_IDAC_A_DC_OFFSETS 0x098
+#define QSERDES_V5_RX_RX_IDAC_ABAR_DC_OFFSETS 0x09c
+#define QSERDES_V5_RX_RX_IDAC_EN 0x0a0
+#define QSERDES_V5_RX_RX_IDAC_ENABLES 0x0a4
+#define QSERDES_V5_RX_RX_IDAC_SIGN 0x0a8
+#define QSERDES_V5_RX_RX_HIGHZ_HIGHRATE 0x0ac
+#define QSERDES_V5_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x0b0
+#define QSERDES_V5_RX_DFE_1 0x0b4
+#define QSERDES_V5_RX_DFE_2 0x0b8
+#define QSERDES_V5_RX_DFE_3 0x0bc
+#define QSERDES_V5_RX_DFE_4 0x0c0
+#define QSERDES_V5_RX_TX_ADAPT_PRE_THRESH1 0x0c4
+#define QSERDES_V5_RX_TX_ADAPT_PRE_THRESH2 0x0c8
+#define QSERDES_V5_RX_TX_ADAPT_POST_THRESH 0x0cc
+#define QSERDES_V5_RX_TX_ADAPT_MAIN_THRESH 0x0d0
+#define QSERDES_V5_RX_VGA_CAL_CNTRL1 0x0d4
+#define QSERDES_V5_RX_VGA_CAL_CNTRL2 0x0d8
+#define QSERDES_V5_RX_GM_CAL 0x0dc
+#define QSERDES_V5_RX_RX_VGA_GAIN2_LSB 0x0e0
+#define QSERDES_V5_RX_RX_VGA_GAIN2_MSB 0x0e4
+#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1 0x0e8
+#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
+#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
+#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
+#define QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW 0x0f8
+#define QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
+#define QSERDES_V5_RX_RX_IDAC_MEASURE_TIME 0x100
+#define QSERDES_V5_RX_RX_IDAC_ACCUMULATOR 0x104
+#define QSERDES_V5_RX_RX_EQ_OFFSET_LSB 0x108
+#define QSERDES_V5_RX_RX_EQ_OFFSET_MSB 0x10c
+#define QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
+#define QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
+#define QSERDES_V5_RX_SIGDET_ENABLES 0x118
+#define QSERDES_V5_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V5_RX_SIGDET_LVL 0x120
+#define QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V5_RX_RX_BAND 0x128
+#define QSERDES_V5_RX_CDR_FREEZE_UP_DN 0x12c
+#define QSERDES_V5_RX_CDR_RESET_OVERRIDE 0x130
+#define QSERDES_V5_RX_RX_INTERFACE_MODE 0x134
+#define QSERDES_V5_RX_JITTER_GEN_MODE 0x138
+#define QSERDES_V5_RX_SJ_AMP1 0x13c
+#define QSERDES_V5_RX_SJ_AMP2 0x140
+#define QSERDES_V5_RX_SJ_PER1 0x144
+#define QSERDES_V5_RX_SJ_PER2 0x148
+#define QSERDES_V5_RX_PPM_OFFSET1 0x14c
+#define QSERDES_V5_RX_PPM_OFFSET2 0x150
+#define QSERDES_V5_RX_SIGN_PPM_PERIOD1 0x154
+#define QSERDES_V5_RX_SIGN_PPM_PERIOD2 0x158
+#define QSERDES_V5_RX_RX_MODE_00_LOW 0x15c
+#define QSERDES_V5_RX_RX_MODE_00_HIGH 0x160
+#define QSERDES_V5_RX_RX_MODE_00_HIGH2 0x164
+#define QSERDES_V5_RX_RX_MODE_00_HIGH3 0x168
+#define QSERDES_V5_RX_RX_MODE_00_HIGH4 0x16c
+#define QSERDES_V5_RX_RX_MODE_01_LOW 0x170
+#define QSERDES_V5_RX_RX_MODE_01_HIGH 0x174
+#define QSERDES_V5_RX_RX_MODE_01_HIGH2 0x178
+#define QSERDES_V5_RX_RX_MODE_01_HIGH3 0x17c
+#define QSERDES_V5_RX_RX_MODE_01_HIGH4 0x180
+#define QSERDES_V5_RX_RX_MODE_10_LOW 0x184
+#define QSERDES_V5_RX_RX_MODE_10_HIGH 0x188
+#define QSERDES_V5_RX_RX_MODE_10_HIGH2 0x18c
+#define QSERDES_V5_RX_RX_MODE_10_HIGH3 0x190
+#define QSERDES_V5_RX_RX_MODE_10_HIGH4 0x194
+#define QSERDES_V5_RX_PHPRE_CTRL 0x198
+#define QSERDES_V5_RX_PHPRE_INITVAL 0x19c
+#define QSERDES_V5_RX_DFE_EN_TIMER 0x1a0
+#define QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET 0x1a4
+#define QSERDES_V5_RX_DCC_CTRL1 0x1a8
+#define QSERDES_V5_RX_DCC_CTRL2 0x1ac
+#define QSERDES_V5_RX_VTH_CODE 0x1b0
+#define QSERDES_V5_RX_VTH_MIN_THRESH 0x1b4
+#define QSERDES_V5_RX_VTH_MAX_THRESH 0x1b8
+#define QSERDES_V5_RX_ALOG_OBSV_BUS_CTRL_1 0x1bc
+#define QSERDES_V5_RX_PI_CTRL1 0x1c0
+#define QSERDES_V5_RX_PI_CTRL2 0x1c4
+#define QSERDES_V5_RX_PI_QUAD 0x1c8
+#define QSERDES_V5_RX_IDATA1 0x1cc
+#define QSERDES_V5_RX_IDATA2 0x1d0
+#define QSERDES_V5_RX_AUX_DATA1 0x1d4
+#define QSERDES_V5_RX_AUX_DATA2 0x1d8
+#define QSERDES_V5_RX_AC_JTAG_OUTP 0x1dc
+#define QSERDES_V5_RX_AC_JTAG_OUTN 0x1e0
+#define QSERDES_V5_RX_RX_SIGDET 0x1e4
+#define QSERDES_V5_RX_ALOG_OBSV_BUS_STATUS_1 0x1e8
+
+/* Only for QMP V5 UFS ? */
+#define QSERDES_V5_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x178
+#define QSERDES_V5_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x17c
+#define QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x180
+#define QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x184
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h
new file mode 100644
index 000000000000..86c01104799e
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V5_20_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V5_20_H_
+
+/* Only for QMP V5_20 PHY - TX registers */
+#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V5_20_TX_LANE_MODE_1 0x78
+#define QSERDES_V5_20_TX_LANE_MODE_2 0x7c
+
+/* Only for QMP V5_20 PHY - RX registers */
+#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2 0x008
+#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3 0x00c
+#define QSERDES_V5_20_RX_UCDR_PI_CONTROLS 0x020
+#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1 0x02c
+#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3 0x030
+#define QSERDES_V5_20_RX_RX_IDAC_SAOFFSET 0x07c
+#define QSERDES_V5_20_RX_DFE_3 0x090
+#define QSERDES_V5_20_RX_DFE_DAC_ENABLE1 0x0b4
+#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1 0x0c4
+#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2 0x0c8
+#define QSERDES_V5_20_RX_VGA_CAL_MAN_VAL 0x0dc
+#define QSERDES_V5_20_RX_GM_CAL 0x0ec
+#define QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4 0x108
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1 0x164
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2 0x168
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3 0x16c
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5 0x174
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6 0x178
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B0 0x17c
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B1 0x180
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B2 0x184
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B3 0x188
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B4 0x18c
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B5 0x190
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B6 0x194
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B0 0x198
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B1 0x19c
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B2 0x1a0
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B3 0x1a4
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B4 0x1a8
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B5 0x1ac
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B6 0x1b0
+#define QSERDES_V5_20_RX_PHPRE_CTRL 0x1b4
+#define QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x1c0
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210 0x1f4
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3 0x1f8
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210 0x1fc
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3 0x200
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210 0x204
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3 0x208
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3 0x210
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3 0x218
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3 0x220
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx.h
new file mode 100644
index 000000000000..d20694513eb4
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_H_
+
+/* Only for QMP V2 PHY - TX registers */
+#define QSERDES_TX_BIST_MODE_LANENO 0x000
+#define QSERDES_TX_BIST_INVERT 0x004
+#define QSERDES_TX_CLKBUF_ENABLE 0x008
+#define QSERDES_TX_CMN_CONTROL_ONE 0x00c
+#define QSERDES_TX_CMN_CONTROL_TWO 0x010
+#define QSERDES_TX_CMN_CONTROL_THREE 0x014
+#define QSERDES_TX_TX_EMP_POST1_LVL 0x018
+#define QSERDES_TX_TX_POST2_EMPH 0x01c
+#define QSERDES_TX_TX_BOOST_LVL_UP_DN 0x020
+#define QSERDES_TX_HP_PD_ENABLES 0x024
+#define QSERDES_TX_TX_IDLE_LVL_LARGE_AMP 0x028
+#define QSERDES_TX_TX_DRV_LVL 0x02c
+#define QSERDES_TX_TX_DRV_LVL_OFFSET 0x030
+#define QSERDES_TX_RESET_TSYNC_EN 0x034
+#define QSERDES_TX_PRE_STALL_LDO_BOOST_EN 0x038
+#define QSERDES_TX_TX_BAND 0x03c
+#define QSERDES_TX_SLEW_CNTL 0x040
+#define QSERDES_TX_INTERFACE_SELECT 0x044
+#define QSERDES_TX_LPB_EN 0x048
+#define QSERDES_TX_RES_CODE_LANE_TX 0x04c
+#define QSERDES_TX_RES_CODE_LANE_RX 0x050
+#define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054
+#define QSERDES_TX_PERL_LENGTH1 0x058
+#define QSERDES_TX_PERL_LENGTH2 0x05c
+#define QSERDES_TX_SERDES_BYP_EN_OUT 0x060
+#define QSERDES_TX_DEBUG_BUS_SEL 0x064
+#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068
+#define QSERDES_TX_TX_POL_INV 0x06c
+#define QSERDES_TX_PARRATE_REC_DETECT_IDLE_EN 0x070
+#define QSERDES_TX_BIST_PATTERN1 0x074
+#define QSERDES_TX_BIST_PATTERN2 0x078
+#define QSERDES_TX_BIST_PATTERN3 0x07c
+#define QSERDES_TX_BIST_PATTERN4 0x080
+#define QSERDES_TX_BIST_PATTERN5 0x084
+#define QSERDES_TX_BIST_PATTERN6 0x088
+#define QSERDES_TX_BIST_PATTERN7 0x08c
+#define QSERDES_TX_BIST_PATTERN8 0x090
+#define QSERDES_TX_LANE_MODE 0x094
+#define QSERDES_TX_IDAC_CAL_LANE_MODE 0x098
+#define QSERDES_TX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x09c
+#define QSERDES_TX_ATB_SEL1 0x0a0
+#define QSERDES_TX_ATB_SEL2 0x0a4
+#define QSERDES_TX_RCV_DETECT_LVL 0x0a8
+#define QSERDES_TX_RCV_DETECT_LVL_2 0x0ac
+#define QSERDES_TX_PRBS_SEED1 0x0b0
+#define QSERDES_TX_PRBS_SEED2 0x0b4
+#define QSERDES_TX_PRBS_SEED3 0x0b8
+#define QSERDES_TX_PRBS_SEED4 0x0bc
+#define QSERDES_TX_RESET_GEN 0x0c0
+#define QSERDES_TX_RESET_GEN_MUXES 0x0c4
+#define QSERDES_TX_TRAN_DRVR_EMP_EN 0x0c8
+#define QSERDES_TX_TX_INTERFACE_MODE 0x0cc
+#define QSERDES_TX_PWM_CTRL 0x0d0
+#define QSERDES_TX_PWM_ENCODED_OR_DATA 0x0d4
+#define QSERDES_TX_PWM_GEAR_1_DIVIDER_BAND2 0x0d8
+#define QSERDES_TX_PWM_GEAR_2_DIVIDER_BAND2 0x0dc
+#define QSERDES_TX_PWM_GEAR_3_DIVIDER_BAND2 0x0e0
+#define QSERDES_TX_PWM_GEAR_4_DIVIDER_BAND2 0x0e4
+#define QSERDES_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x0e8
+#define QSERDES_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x0ec
+#define QSERDES_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x0f0
+#define QSERDES_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x0f4
+#define QSERDES_TX_VMODE_CTRL1 0x0f8
+#define QSERDES_TX_VMODE_CTRL2 0x0fc
+#define QSERDES_TX_TX_ALOG_INTF_OBSV_CNTL 0x100
+#define QSERDES_TX_BIST_STATUS 0x104
+#define QSERDES_TX_BIST_ERROR_COUNT1 0x108
+#define QSERDES_TX_BIST_ERROR_COUNT2 0x10c
+#define QSERDES_TX_TX_ALOG_INTF_OBSV 0x110
+
+/* Only for QMP V2 PHY - RX registers */
+#define QSERDES_RX_UCDR_FO_GAIN_HALF 0x000
+#define QSERDES_RX_UCDR_FO_GAIN_QUARTER 0x004
+#define QSERDES_RX_UCDR_FO_GAIN_EIGHTH 0x008
+#define QSERDES_RX_UCDR_FO_GAIN 0x00c
+#define QSERDES_RX_UCDR_SO_GAIN_HALF 0x010
+#define QSERDES_RX_UCDR_SO_GAIN_QUARTER 0x014
+#define QSERDES_RX_UCDR_SO_GAIN_EIGHTH 0x018
+#define QSERDES_RX_UCDR_SO_GAIN 0x01c
+#define QSERDES_RX_UCDR_SVS_FO_GAIN_HALF 0x020
+#define QSERDES_RX_UCDR_SVS_FO_GAIN_QUARTER 0x024
+#define QSERDES_RX_UCDR_SVS_FO_GAIN_EIGHTH 0x028
+#define QSERDES_RX_UCDR_SVS_FO_GAIN 0x02c
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF 0x030
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER 0x034
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH 0x038
+#define QSERDES_RX_UCDR_SVS_SO_GAIN 0x03c
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x040
+#define QSERDES_RX_UCDR_FD_GAIN 0x044
+#define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x048
+#define QSERDES_RX_UCDR_FO_TO_SO_DELAY 0x04c
+#define QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW 0x050
+#define QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH 0x054
+#define QSERDES_RX_UCDR_MODULATE 0x058
+#define QSERDES_RX_UCDR_PI_CONTROLS 0x05c
+#define QSERDES_RX_RBIST_CONTROL 0x060
+#define QSERDES_RX_AUX_CONTROL 0x064
+#define QSERDES_RX_AUX_DATA_TCOARSE 0x068
+#define QSERDES_RX_AUX_DATA_TFINE_LSB 0x06c
+#define QSERDES_RX_AUX_DATA_TFINE_MSB 0x070
+#define QSERDES_RX_RCLK_AUXDATA_SEL 0x074
+#define QSERDES_RX_AC_JTAG_ENABLE 0x078
+#define QSERDES_RX_AC_JTAG_INITP 0x07c
+#define QSERDES_RX_AC_JTAG_INITN 0x080
+#define QSERDES_RX_AC_JTAG_LVL 0x084
+#define QSERDES_RX_AC_JTAG_MODE 0x088
+#define QSERDES_RX_AC_JTAG_RESET 0x08c
+#define QSERDES_RX_RX_TERM_BW 0x090
+#define QSERDES_RX_RX_RCVR_IQ_EN 0x094
+#define QSERDES_RX_RX_IDAC_I_DC_OFFSETS 0x098
+#define QSERDES_RX_RX_IDAC_IBAR_DC_OFFSETS 0x09c
+#define QSERDES_RX_RX_IDAC_Q_DC_OFFSETS 0x0a0
+#define QSERDES_RX_RX_IDAC_QBAR_DC_OFFSETS 0x0a4
+#define QSERDES_RX_RX_IDAC_A_DC_OFFSETS 0x0a8
+#define QSERDES_RX_RX_IDAC_ABAR_DC_OFFSETS 0x0ac
+#define QSERDES_RX_RX_IDAC_EN 0x0b0
+#define QSERDES_RX_RX_IDAC_ENABLES 0x0b4
+#define QSERDES_RX_RX_IDAC_SIGN 0x0b8
+#define QSERDES_RX_RX_HIGHZ_HIGHRATE 0x0bc
+#define QSERDES_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x0c0
+#define QSERDES_RX_RX_EQ_GAIN1_LSB 0x0c4
+#define QSERDES_RX_RX_EQ_GAIN1_MSB 0x0c8
+#define QSERDES_RX_RX_EQ_GAIN2_LSB 0x0cc
+#define QSERDES_RX_RX_EQ_GAIN2_MSB 0x0d0
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1 0x0d4
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x0d8
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 0x0dc
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 0x0e0
+#define QSERDES_RX_RX_IDAC_CAL_CONFIGURATION 0x0e4
+#define QSERDES_RX_RX_IDAC_TSETTLE_LOW 0x0e8
+#define QSERDES_RX_RX_IDAC_TSETTLE_HIGH 0x0ec
+#define QSERDES_RX_RX_IDAC_ENDSAMP_LOW 0x0f0
+#define QSERDES_RX_RX_IDAC_ENDSAMP_HIGH 0x0f4
+#define QSERDES_RX_RX_IDAC_MIDPOINT_LOW 0x0f8
+#define QSERDES_RX_RX_IDAC_MIDPOINT_HIGH 0x0fc
+#define QSERDES_RX_RX_EQ_OFFSET_LSB 0x100
+#define QSERDES_RX_RX_EQ_OFFSET_MSB 0x104
+#define QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x108
+#define QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x10c
+#define QSERDES_RX_SIGDET_ENABLES 0x110
+#define QSERDES_RX_SIGDET_CNTRL 0x114
+#define QSERDES_RX_SIGDET_LVL 0x118
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL 0x11c
+#define QSERDES_RX_RX_BAND 0x120
+#define QSERDES_RX_CDR_FREEZE_UP_DN 0x124
+#define QSERDES_RX_CDR_RESET_OVERRIDE 0x128
+#define QSERDES_RX_RX_INTERFACE_MODE 0x12c
+#define QSERDES_RX_JITTER_GEN_MODE 0x130
+#define QSERDES_RX_BUJ_AMP 0x134
+#define QSERDES_RX_SJ_AMP1 0x138
+#define QSERDES_RX_SJ_AMP2 0x13c
+#define QSERDES_RX_SJ_PER1 0x140
+#define QSERDES_RX_SJ_PER2 0x144
+#define QSERDES_RX_BUJ_STEP_FREQ1 0x148
+#define QSERDES_RX_BUJ_STEP_FREQ2 0x14c
+#define QSERDES_RX_PPM_OFFSET1 0x150
+#define QSERDES_RX_PPM_OFFSET2 0x154
+#define QSERDES_RX_SIGN_PPM_PERIOD1 0x158
+#define QSERDES_RX_SIGN_PPM_PERIOD2 0x15c
+#define QSERDES_RX_SSC_CTRL 0x160
+#define QSERDES_RX_SSC_COUNT1 0x164
+#define QSERDES_RX_SSC_COUNT2 0x168
+#define QSERDES_RX_RX_ALOG_INTF_OBSV_CNTL 0x16c
+#define QSERDES_RX_RX_PWM_ENABLE_AND_DATA 0x170
+#define QSERDES_RX_RX_PWM_GEAR1_TIMEOUT_COUNT 0x174
+#define QSERDES_RX_RX_PWM_GEAR2_TIMEOUT_COUNT 0x178
+#define QSERDES_RX_RX_PWM_GEAR3_TIMEOUT_COUNT 0x17c
+#define QSERDES_RX_RX_PWM_GEAR4_TIMEOUT_COUNT 0x180
+#define QSERDES_RX_PI_CTRL1 0x184
+#define QSERDES_RX_PI_CTRL2 0x188
+#define QSERDES_RX_PI_QUAD 0x18c
+#define QSERDES_RX_IDATA1 0x190
+#define QSERDES_RX_IDATA2 0x194
+#define QSERDES_RX_AUX_DATA1 0x198
+#define QSERDES_RX_AUX_DATA2 0x19c
+#define QSERDES_RX_AC_JTAG_OUTP 0x1a0
+#define QSERDES_RX_AC_JTAG_OUTN 0x1a4
+#define QSERDES_RX_RX_SIGDET 0x1a8
+#define QSERDES_RX_RX_VDCOFF 0x1ac
+#define QSERDES_RX_IDAC_CAL_ON 0x1b0
+#define QSERDES_RX_IDAC_STATUS_I 0x1b4
+#define QSERDES_RX_IDAC_STATUS_IBAR 0x1b8
+#define QSERDES_RX_IDAC_STATUS_Q 0x1bc
+#define QSERDES_RX_IDAC_STATUS_QBAR 0x1c0
+#define QSERDES_RX_IDAC_STATUS_A 0x1c4
+#define QSERDES_RX_IDAC_STATUS_ABAR 0x1c8
+#define QSERDES_RX_CALST_STATUS_I 0x1cc
+#define QSERDES_RX_CALST_STATUS_Q 0x1d0
+#define QSERDES_RX_CALST_STATUS_A 0x1d4
+#define QSERDES_RX_RX_ALOG_INTF_OBSV 0x1d8
+#define QSERDES_RX_READ_EQCODE 0x1dc
+#define QSERDES_RX_READ_OFFSETCODE 0x1e0
+#define QSERDES_RX_IA_ERROR_COUNTER_LOW 0x1e4
+#define QSERDES_RX_IA_ERROR_COUNTER_HIGH 0x1e8
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
new file mode 100644
index 000000000000..c8583f5a54bd
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -0,0 +1,1383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+#define PHYSTATUS_4_20 BIT(7)
+/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
+#define RCVR_DETECT BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+/* Define the assumed distance between lanes for underspecified device trees. */
+#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ bool in_layout;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = true, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* PCS_MISC registers */
+ QPHY_PCS_MISC_TYPEC_CTRL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
+};
+
+static const unsigned int msm8996_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x168,
+};
+
+static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x160,
+};
+
+static const unsigned int sm6115_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x168,
+};
+
+static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = QPHY_V4_PCS_UFS_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_UFS_READY_STATUS,
+ [QPHY_SW_RESET] = QPHY_V4_PCS_UFS_SW_RESET,
+};
+
+static const struct qmp_phy_init_tbl msm8996_ufs_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+};
+
+static const struct qmp_phy_init_tbl msm8996_ufs_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x02),
+};
+
+static const struct qmp_phy_init_tbl msm8996_ufs_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
+};
+
+static const struct qmp_phy_init_tbl sm6115_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl sm6115_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sm6115_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5B),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5B),
+};
+
+static const struct qmp_phy_init_tbl sm6115_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_PWM_GEAR_BAND, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_LARGE_AMP_DRV_LVL, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SYM_RESYNC_CTRL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_LARGE_AMP_POST_EMP_LVL, 0x12),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_SMALL_AMP_POST_EMP_LVL, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_MIN_HIBERN8_TIME, 0x9a), /* 8 us */
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_RX_SIGDET_CTRL2, 0x6e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_RX_SYM_RESYNC_CTRL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_RX_SIGDET_CTRL1, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_RX_MIN_HIBERN8_TIME, 0x9a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0xf1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x65),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xf5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_TRAN_DRVR_EMP_EN, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf1),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_TERM_BW, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_MEASURE_TIME, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0x3c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_PLL_CNTL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB, 0xd8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND, 0x06),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL1, 0x0e),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+};
+
+struct qmp_phy;
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+ unsigned int phy_status;
+
+ /* true, if PHY has secondary tx/rx lanes to be configured */
+ bool is_dual_lane_phy;
+
+ /* true, if PCS block has no separate SW_RESET register */
+ bool no_pcs_sw_reset;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @cfg: phy specific configuration
+ * @serdes: iomapped memory space for phy's serdes (i.e. PLL)
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
+ * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @mode: current PHY mode
+ */
+struct qmp_phy {
+ struct phy *phy;
+ const struct qmp_phy_cfg *cfg;
+ void __iomem *serdes;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ void __iomem *tx2;
+ void __iomem *rx2;
+ void __iomem *pcs_misc;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ enum phy_mode mode;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @phys: array of per-lane phy descriptors
+ * @ufs_reset: optional UFS PHY reset handle
+ */
+struct qcom_qmp {
+ struct device *dev;
+
+ struct clk_bulk_data *clks;
+ struct regulator_bulk_data *vregs;
+
+ struct qmp_phy **phys;
+
+ struct reset_control *ufs_reset;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const msm8996_ufs_phy_clk_l[] = {
+ "ref",
+};
+
+/* the primary usb3 phy on sm8250 doesn't have a ref clock */
+static const char * const sm8450_ufs_phy_clk_l[] = {
+ "qref", "ref", "ref_aux",
+};
+
+static const char * const sdm845_ufs_phy_clk_l[] = {
+ "ref", "ref_aux",
+};
+
+/* list of regulators */
+static const char * const qmp_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg msm8996_ufs_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8996_ufs_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_ufs_serdes_tbl),
+ .tx_tbl = msm8996_ufs_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_ufs_tx_tbl),
+ .rx_tbl = msm8996_ufs_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_ufs_rx_tbl),
+
+ .clk_list = msm8996_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_ufs_phy_clk_l),
+
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+
+ .regs = msm8996_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .no_pcs_sw_reset = true,
+};
+
+static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sdm845_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdm845_ufsphy_serdes_tbl),
+ .tx_tbl = sdm845_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_tx_tbl),
+ .rx_tbl = sdm845_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_rx_tbl),
+ .pcs_tbl = sdm845_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdm845_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sdm845_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+ .no_pcs_sw_reset = true,
+};
+
+static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 1,
+
+ .serdes_tbl = sm6115_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm6115_ufsphy_serdes_tbl),
+ .tx_tbl = sm6115_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_tx_tbl),
+ .rx_tbl = sm6115_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_rx_tbl),
+ .pcs_tbl = sm6115_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm6115_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm6115_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .no_pcs_sw_reset = true,
+};
+
+static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8150_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl),
+ .tx_tbl = sm8150_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_tx_tbl),
+ .rx_tbl = sm8150_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_rx_tbl),
+ .pcs_tbl = sm8150_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8350_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
+ .tx_tbl = sm8350_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
+ .rx_tbl = sm8350_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
+ .pcs_tbl = sm8350_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8350_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
+ .tx_tbl = sm8350_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
+ .rx_tbl = sm8350_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
+ .pcs_tbl = sm8350_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
+ .clk_list = sm8450_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+};
+
+static void qcom_qmp_phy_ufs_configure_lane(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static void qcom_qmp_phy_ufs_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qcom_qmp_phy_ufs_configure_lane(base, regs, tbl, num, 0xff);
+}
+
+static int qcom_qmp_phy_ufs_serdes_init(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
+ const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+ int serdes_tbl_num = cfg->serdes_tbl_num;
+
+ qcom_qmp_phy_ufs_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_ufs_com_init(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *pcs = qphy->pcs;
+ int ret;
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ goto err_disable_regulators;
+
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
+ qphy_setbits(pcs,
+ cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ else
+ qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+
+ return 0;
+
+err_disable_regulators:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_ufs_com_exit(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ reset_control_assert(qmp->ufs_reset);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_ufs_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ int ret;
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ if (cfg->no_pcs_sw_reset) {
+ /*
+ * Get UFS reset, which is delayed until now to avoid a
+ * circular dependency where UFS needs its PHY, but the PHY
+ * needs this UFS reset.
+ */
+ if (!qmp->ufs_reset) {
+ qmp->ufs_reset =
+ devm_reset_control_get_exclusive(qmp->dev,
+ "ufsphy");
+
+ if (IS_ERR(qmp->ufs_reset)) {
+ ret = PTR_ERR(qmp->ufs_reset);
+ dev_err(qmp->dev,
+ "failed to get UFS reset: %d\n",
+ ret);
+
+ qmp->ufs_reset = NULL;
+ return ret;
+ }
+ }
+
+ ret = reset_control_assert(qmp->ufs_reset);
+ if (ret)
+ return ret;
+ }
+
+ ret = qcom_qmp_phy_ufs_com_init(qphy);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_ufs_power_on(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *status;
+ unsigned int mask, val, ready;
+ int ret;
+
+ qcom_qmp_phy_ufs_serdes_init(qphy);
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_ufs_configure_lane(tx, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 1);
+
+ /* Configuration for other LANE for USB-DP combo PHY */
+ if (cfg->is_dual_lane_phy) {
+ qcom_qmp_phy_ufs_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ }
+
+ qcom_qmp_phy_ufs_configure_lane(rx, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 1);
+
+ if (cfg->is_dual_lane_phy) {
+ qcom_qmp_phy_ufs_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ }
+
+ qcom_qmp_phy_ufs_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ ret = reset_control_deassert(qmp->ufs_reset);
+ if (ret)
+ return ret;
+
+ /* Pull PHY out of reset state */
+ if (!cfg->no_pcs_sw_reset)
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = PCS_READY;
+ ready = PCS_READY;
+
+ ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_ufs_power_off(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ /* PHY reset */
+ if (!cfg->no_pcs_sw_reset)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ } else {
+ qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_ufs_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ qcom_qmp_phy_ufs_com_exit(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_ufs_enable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_ufs_init(phy);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_ufs_power_on(phy);
+ if (ret)
+ qcom_qmp_phy_ufs_exit(phy);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_ufs_disable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_ufs_power_off(phy);
+ if (ret)
+ return ret;
+ return qcom_qmp_phy_ufs_exit(phy);
+}
+
+static int qcom_qmp_phy_ufs_set_mode(struct phy *phy,
+ enum phy_mode mode, int submode)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ qphy->mode = mode;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_ufs_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qcom_qmp_phy_ufs_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+static const struct phy_ops qcom_qmp_ufs_ops = {
+ .power_on = qcom_qmp_phy_ufs_enable,
+ .power_off = qcom_qmp_phy_ufs_disable,
+ .set_mode = qcom_qmp_phy_ufs_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static
+int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id,
+ void __iomem *serdes, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->cfg = cfg;
+ qphy->serdes = serdes;
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
+ * For single lane PHYs: pcs_misc (optional) -> 3.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (!qphy->tx)
+ return -ENOMEM;
+
+ qphy->rx = of_iomap(np, 1);
+ if (!qphy->rx)
+ return -ENOMEM;
+
+ qphy->pcs = of_iomap(np, 2);
+ if (!qphy->pcs)
+ return -ENOMEM;
+
+ /*
+ * If this is a dual-lane PHY, then there should be registers for the
+ * second lane. Some old device trees did not specify this, so fall
+ * back to old legacy behavior of assuming they can be reached at an
+ * offset from the first lane.
+ */
+ if (cfg->is_dual_lane_phy) {
+ qphy->tx2 = of_iomap(np, 3);
+ qphy->rx2 = of_iomap(np, 4);
+ if (!qphy->tx2 || !qphy->rx2) {
+ dev_warn(dev,
+ "Underspecified device tree, falling back to legacy register regions\n");
+
+ /* In the old version, pcs_misc is at index 3. */
+ qphy->pcs_misc = qphy->tx2;
+ qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
+ qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
+
+ } else {
+ qphy->pcs_misc = of_iomap(np, 5);
+ }
+
+ } else {
+ qphy->pcs_misc = of_iomap(np, 3);
+ }
+
+ if (!qphy->pcs_misc)
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+
+ generic_phy = devm_phy_create(dev, np, &qcom_qmp_ufs_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_phy_ufs_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8996-qmp-ufs-phy",
+ .data = &msm8996_ufs_cfg,
+ }, {
+ .compatible = "qcom,msm8998-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sc8180x-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sc8280xp-qmp-ufs-phy",
+ .data = &sm8350_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm6115-qmp-ufs-phy",
+ .data = &sm6115_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm6350-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8350-qmp-ufs-phy",
+ .data = &sm8350_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-ufs-phy",
+ .data = &sm8450_ufsphy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_phy_ufs_of_match_table);
+
+static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *serdes;
+ const struct qmp_phy_cfg *cfg = NULL;
+ int num, id;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ /* Get the specific init parameters of QMP phy */
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
+ return -EINVAL;
+
+ /* per PHY serdes; usually located at base address */
+ serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(serdes))
+ return PTR_ERR(serdes);
+
+ ret = qcom_qmp_phy_ufs_clk_init(dev, cfg);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_ufs_vreg_init(dev, cfg);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > 1)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ id = 0;
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_ufs_create(dev, child, id, serdes, cfg);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ goto err_node_put;
+ }
+
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+ else
+ pm_runtime_disable(dev);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ pm_runtime_disable(dev);
+ of_node_put(child);
+ return ret;
+}
+
+static struct platform_driver qcom_qmp_phy_ufs_driver = {
+ .probe = qcom_qmp_phy_ufs_probe,
+ .driver = {
+ .name = "qcom-qmp-ufs-phy",
+ .of_match_table = qcom_qmp_phy_ufs_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_ufs_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP UFS PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
new file mode 100644
index 000000000000..1d270356a97f
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -0,0 +1,2765 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+#define PHYSTATUS_4_20 BIT(7)
+/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
+#define RCVR_DETECT BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+/* Define the assumed distance between lanes for underspecified device trees. */
+#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ bool in_layout;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = true, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* PCS_MISC registers */
+ QPHY_PCS_MISC_TYPEC_CTRL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
+};
+
+static const unsigned int usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_STATUS] = 0x17c,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_STATUS] = 0x174,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
+};
+
+static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x008,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x014,
+};
+
+static const unsigned int qcm2290_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0xd8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0xdc,
+ [QPHY_PCS_STATUS] = 0x174,
+ [QPHY_PCS_MISC_TYPEC_CTRL] = 0x00,
+};
+
+static const struct qmp_phy_init_tbl ipq8074_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x0),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x04),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_CNT_VAL_H_TOL, 0x42),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_FLL_MAN_CODE, 0x85),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG2, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb5),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x64),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_INITVAL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x8a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x95),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xef),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+};
+
+static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x26),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x048),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0b),
+};
+
+static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_5, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbb),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3d, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3c, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_INITVAL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
+};
+
+struct qmp_phy;
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_usb_tbl;
+ int pcs_usb_tbl_num;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+ unsigned int phy_status;
+
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+ /* power_down delay in usec */
+ int pwrdn_delay_min;
+ int pwrdn_delay_max;
+
+ /* true, if PHY has a separate DP_COM control block */
+ bool has_phy_dp_com_ctrl;
+ /* true, if PHY has secondary tx/rx lanes to be configured */
+ bool is_dual_lane_phy;
+
+ /* Offset from PCS to PCS_USB region */
+ unsigned int pcs_usb_offset;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @cfg: phy specific configuration
+ * @serdes: iomapped memory space for phy's serdes (i.e. PLL)
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
+ * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
+ * @pcs_misc: iomapped memory space for lane's pcs_misc
+ * @pcs_usb: iomapped memory space for lane's pcs_usb
+ * @pipe_clk: pipe clock
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @mode: current PHY mode
+ */
+struct qmp_phy {
+ struct phy *phy;
+ const struct qmp_phy_cfg *cfg;
+ void __iomem *serdes;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ void __iomem *tx2;
+ void __iomem *rx2;
+ void __iomem *pcs_misc;
+ void __iomem *pcs_usb;
+ struct clk *pipe_clk;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ enum phy_mode mode;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ * @dp_com: iomapped memory space for phy's dp_com control block
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @phys: array of per-lane phy descriptors
+ */
+struct qcom_qmp {
+ struct device *dev;
+ void __iomem *dp_com;
+
+ struct clk_bulk_data *clks;
+ struct reset_control_bulk_data *resets;
+ struct regulator_bulk_data *vregs;
+
+ struct qmp_phy **phys;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const msm8996_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref",
+};
+
+static const char * const qmp_v3_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+static const char * const qmp_v4_phy_clk_l[] = {
+ "aux", "ref_clk_src", "ref", "com_aux",
+};
+
+/* the primary usb3 phy on sm8250 doesn't have a ref clock */
+static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
+ "aux", "ref_clk_src", "com_aux"
+};
+
+/* usb3 phy on sdx55 doesn't have com_aux clock */
+static const char * const qmp_v4_sdx55_usbphy_clk_l[] = {
+ "aux", "cfg_ahb", "ref"
+};
+
+static const char * const qcm2290_usb3phy_clk_l[] = {
+ "cfg_ahb", "ref", "com_aux",
+};
+
+/* list of resets */
+static const char * const msm8996_usb3phy_reset_l[] = {
+ "phy", "common",
+};
+
+static const char * const sc7180_usb3phy_reset_l[] = {
+ "phy",
+};
+
+static const char * const qcm2290_usb3phy_reset_l[] = {
+ "phy_phy", "phy",
+};
+
+/* list of regulators */
+static const char * const qmp_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq8074_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = ipq8074_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl),
+ .pcs_tbl = ipq8074_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+};
+
+static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8996_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = msm8996_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_usb3_rx_tbl),
+ .pcs_tbl = msm8996_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = sc7180_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8998_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
+ .tx_tbl = msm8998_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8998_usb3_tx_tbl),
+ .rx_tbl = msm8998_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl),
+ .pcs_tbl = msm8998_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8150_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl),
+ .rx_tbl = sm8150_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl),
+ .pcs_tbl = sm8150_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sm8150_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_tx_tbl),
+ .rx_tbl = sm8150_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8150_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sm8150_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_usb_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x600,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8250_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl),
+ .rx_tbl = sm8250_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl),
+ .pcs_tbl = sm8250_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8250_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sm8250_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_tx_tbl),
+ .rx_tbl = sm8250_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8250_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sm8250_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_usb_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x600,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sdx55_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdx55_usb3_uniphy_tx_tbl),
+ .rx_tbl = sdx55_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdx55_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8250_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sm8250_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_usb_tbl),
+ .clk_list = qmp_v4_sdx55_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x600,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sdx65_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_tx_tbl),
+ .rx_tbl = sdx65_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sm8350_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_usb_tbl),
+ .clk_list = qmp_v4_sdx55_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x1000,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8350_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_tx_tbl),
+ .rx_tbl = sm8350_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_rx_tbl),
+ .pcs_tbl = sm8350_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8350_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sm8350_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_tx_tbl),
+ .rx_tbl = sm8350_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sm8350_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_usb_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x1000,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qcm2290_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
+ .tx_tbl = qcm2290_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qcm2290_usb3_tx_tbl),
+ .rx_tbl = qcm2290_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qcm2290_usb3_rx_tbl),
+ .pcs_tbl = qcm2290_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qcm2290_usb3_pcs_tbl),
+ .clk_list = qcm2290_usb3phy_clk_l,
+ .num_clks = ARRAY_SIZE(qcm2290_usb3phy_clk_l),
+ .reset_list = qcm2290_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qcm2290_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+};
+
+static void qcom_qmp_phy_usb_configure_lane(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static void qcom_qmp_phy_usb_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qcom_qmp_phy_usb_configure_lane(base, regs, tbl, num, 0xff);
+}
+
+static int qcom_qmp_phy_usb_serdes_init(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
+ const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+ int serdes_tbl_num = cfg->serdes_tbl_num;
+
+ qcom_qmp_phy_usb_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_usb_com_init(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *dp_com = qmp->dp_com;
+ int ret;
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset assert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset deassert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ goto err_assert_reset;
+
+ if (cfg->has_phy_dp_com_ctrl) {
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
+ SW_PWRDN);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ /* Default type-c orientation, i.e CC1 */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
+ USB3_MODE | DP_MODE);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+ }
+
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
+ qphy_setbits(pcs,
+ cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ else
+ qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+
+ return 0;
+
+err_assert_reset:
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+err_disable_regulators:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_usb_com_exit(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_usb_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ int ret;
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ ret = qcom_qmp_phy_usb_com_init(qphy);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int qcom_qmp_phy_usb_power_on(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *status;
+ unsigned int mask, val, ready;
+ int ret;
+
+ qcom_qmp_phy_usb_serdes_init(qphy);
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ return ret;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_usb_configure_lane(tx, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 1);
+
+ /* Configuration for other LANE for USB-DP combo PHY */
+ if (cfg->is_dual_lane_phy) {
+ qcom_qmp_phy_usb_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ }
+
+ qcom_qmp_phy_usb_configure_lane(rx, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 1);
+
+ if (cfg->is_dual_lane_phy) {
+ qcom_qmp_phy_usb_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ }
+
+ /* Configure link rate, swing, etc. */
+ qcom_qmp_phy_usb_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ mask = cfg->phy_status;
+ ready = 0;
+
+ ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_disable_pipe_clk;
+ }
+
+ return 0;
+
+err_disable_pipe_clk:
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_usb_power_off(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ /* PHY reset */
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ } else {
+ qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_usb_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ qcom_qmp_phy_usb_com_exit(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_usb_enable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_usb_init(phy);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_usb_power_on(phy);
+ if (ret)
+ qcom_qmp_phy_usb_exit(phy);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_usb_disable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_usb_power_off(phy);
+ if (ret)
+ return ret;
+ return qcom_qmp_phy_usb_exit(phy);
+}
+
+static int qcom_qmp_phy_usb_set_mode(struct phy *phy,
+ enum phy_mode mode, int submode)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ qphy->mode = mode;
+
+ return 0;
+}
+
+static void qcom_qmp_phy_usb_enable_autonomous_mode(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+ u32 intr_mask;
+
+ if (qphy->mode == PHY_MODE_USB_HOST_SS ||
+ qphy->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (pcs_misc)
+ qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+}
+
+static void qcom_qmp_phy_usb_disable_autonomous_mode(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (pcs_misc)
+ qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qcom_qmp_phy_usb_runtime_suspend(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode);
+
+ /* Supported only for USB3 PHY and luckily USB3 is the first phy */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qphy->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qcom_qmp_phy_usb_enable_autonomous_mode(qphy);
+
+ clk_disable_unprepare(qphy->pipe_clk);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qmp_phy_usb_runtime_resume(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct qmp_phy *qphy = qmp->phys[0];
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode);
+
+ /* Supported only for USB3 PHY and luckily USB3 is the first phy */
+ if (cfg->type != PHY_TYPE_USB3)
+ return 0;
+
+ if (!qphy->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qcom_qmp_phy_usb_disable_autonomous_mode(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_usb_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qcom_qmp_phy_usb_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int i;
+ int ret;
+
+ qmp->resets = devm_kcalloc(dev, cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->num_resets; i++)
+ qmp->resets[i].id = cfg->reset_list[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get resets\n");
+
+ return 0;
+}
+
+static int qcom_qmp_phy_usb_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+static void phy_clk_release_provider(void *res)
+{
+ of_clk_del_provider(res);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
+ return ret;
+ }
+
+ fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -ENOMEM;
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
+}
+
+static const struct phy_ops qcom_qmp_phy_usb_ops = {
+ .init = qcom_qmp_phy_usb_enable,
+ .exit = qcom_qmp_phy_usb_disable,
+ .set_mode = qcom_qmp_phy_usb_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static
+int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id,
+ void __iomem *serdes, const struct qmp_phy_cfg *cfg)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ char prop_name[MAX_PROP_NAME];
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->cfg = cfg;
+ qphy->serdes = serdes;
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
+ * For single lane PHYs: pcs_misc (optional) -> 3.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (!qphy->tx)
+ return -ENOMEM;
+
+ qphy->rx = of_iomap(np, 1);
+ if (!qphy->rx)
+ return -ENOMEM;
+
+ qphy->pcs = of_iomap(np, 2);
+ if (!qphy->pcs)
+ return -ENOMEM;
+
+ if (cfg->pcs_usb_offset)
+ qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset;
+
+ /*
+ * If this is a dual-lane PHY, then there should be registers for the
+ * second lane. Some old device trees did not specify this, so fall
+ * back to old legacy behavior of assuming they can be reached at an
+ * offset from the first lane.
+ */
+ if (cfg->is_dual_lane_phy) {
+ qphy->tx2 = of_iomap(np, 3);
+ qphy->rx2 = of_iomap(np, 4);
+ if (!qphy->tx2 || !qphy->rx2) {
+ dev_warn(dev,
+ "Underspecified device tree, falling back to legacy register regions\n");
+
+ /* In the old version, pcs_misc is at index 3. */
+ qphy->pcs_misc = qphy->tx2;
+ qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
+ qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
+
+ } else {
+ qphy->pcs_misc = of_iomap(np, 5);
+ }
+
+ } else {
+ qphy->pcs_misc = of_iomap(np, 3);
+ }
+
+ if (!qphy->pcs_misc)
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
+ "failed to get lane%d pipe clock\n", id);
+ }
+
+ generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_usb_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_phy_usb_of_match_table[] = {
+ {
+ .compatible = "qcom,ipq8074-qmp-usb3-phy",
+ .data = &ipq8074_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,msm8996-qmp-usb3-phy",
+ .data = &msm8996_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,ipq6018-qmp-usb3-phy",
+ .data = &ipq8074_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sc7180-qmp-usb3-phy",
+ .data = &sc7180_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sc8180x-qmp-usb3-phy",
+ .data = &sm8150_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-usb3-phy",
+ .data = &qmp_v3_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-usb3-uni-phy",
+ .data = &qmp_v3_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,msm8998-qmp-usb3-phy",
+ .data = &msm8998_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-usb3-phy",
+ .data = &sm8150_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-usb3-uni-phy",
+ .data = &sm8150_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-usb3-phy",
+ .data = &sm8250_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-usb3-uni-phy",
+ .data = &sm8250_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sdx55-qmp-usb3-uni-phy",
+ .data = &sdx55_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sdx65-qmp-usb3-uni-phy",
+ .data = &sdx65_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sm8350-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8350-qmp-usb3-uni-phy",
+ .data = &sm8350_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,qcm2290-qmp-usb3-phy",
+ .data = &qcm2290_usb3phy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_phy_usb_of_match_table);
+
+static const struct dev_pm_ops qcom_qmp_phy_usb_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_qmp_phy_usb_runtime_suspend,
+ qcom_qmp_phy_usb_runtime_resume, NULL)
+};
+
+static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *serdes;
+ const struct qmp_phy_cfg *cfg = NULL;
+ int num, id;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ /* Get the specific init parameters of QMP phy */
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
+ return -EINVAL;
+
+ /* per PHY serdes; usually located at base address */
+ serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(serdes))
+ return PTR_ERR(serdes);
+
+ /* per PHY dp_com; if PHY has dp_com control block */
+ if (cfg->has_phy_dp_com_ctrl) {
+ qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(qmp->dp_com))
+ return PTR_ERR(qmp->dp_com);
+ }
+
+ ret = qcom_qmp_phy_usb_clk_init(dev, cfg);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_usb_reset_init(dev, cfg);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_usb_vreg_init(dev, cfg);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > 1)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ id = 0;
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_usb_create(dev, child, id, serdes, cfg);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ goto err_node_put;
+ }
+
+ /*
+ * Register the pipe clock provided by phy.
+ * See function description to see details of this pipe clock.
+ */
+ ret = phy_pipe_clk_register(qmp, child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ goto err_node_put;
+ }
+
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+ else
+ pm_runtime_disable(dev);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ pm_runtime_disable(dev);
+ of_node_put(child);
+ return ret;
+}
+
+static struct platform_driver qcom_qmp_phy_usb_driver = {
+ .probe = qcom_qmp_phy_usb_probe,
+ .driver = {
+ .name = "qcom-qmp-usb-phy",
+ .pm = &qcom_qmp_phy_usb_pm_ops,
+ .of_match_table = qcom_qmp_phy_usb_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_usb_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP USB PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
deleted file mode 100644
index c7309e981bfb..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ /dev/null
@@ -1,6350 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-
-#include <dt-bindings/phy/phy.h>
-
-#include "phy-qcom-qmp.h"
-
-/* QPHY_SW_RESET bit */
-#define SW_RESET BIT(0)
-/* QPHY_POWER_DOWN_CONTROL */
-#define SW_PWRDN BIT(0)
-#define REFCLK_DRV_DSBL BIT(1)
-/* QPHY_START_CONTROL bits */
-#define SERDES_START BIT(0)
-#define PCS_START BIT(1)
-#define PLL_READY_GATE_EN BIT(3)
-/* QPHY_PCS_STATUS bit */
-#define PHYSTATUS BIT(6)
-#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
-#define PCS_READY BIT(0)
-
-/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
-/* DP PHY soft reset */
-#define SW_DPPHY_RESET BIT(0)
-/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
-#define SW_DPPHY_RESET_MUX BIT(1)
-/* USB3 PHY soft reset */
-#define SW_USB3PHY_RESET BIT(2)
-/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
-#define SW_USB3PHY_RESET_MUX BIT(3)
-
-/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
-#define USB3_MODE BIT(0) /* enables USB3 mode */
-#define DP_MODE BIT(1) /* enables DP mode */
-
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
-#define RCVR_DETECT BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-
-#define PHY_INIT_COMPLETE_TIMEOUT 10000
-#define POWER_DOWN_DELAY_US_MIN 10
-#define POWER_DOWN_DELAY_US_MAX 11
-
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
-
-struct qmp_phy_init_tbl {
- unsigned int offset;
- unsigned int val;
- /*
- * register part of layout ?
- * if yes, then offset gives index in the reg-layout
- */
- bool in_layout;
- /*
- * mask of lanes for which this register is written
- * for cases when second lane needs different values
- */
- u8 lane_mask;
-};
-
-#define QMP_PHY_INIT_CFG(o, v) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = 0xff, \
- }
-
-#define QMP_PHY_INIT_CFG_L(o, v) \
- { \
- .offset = o, \
- .val = v, \
- .in_layout = true, \
- .lane_mask = 0xff, \
- }
-
-#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = l, \
- }
-
-/* set of registers with offsets different per-PHY */
-enum qphy_reg_layout {
- /* Common block control registers */
- QPHY_COM_SW_RESET,
- QPHY_COM_POWER_DOWN_CONTROL,
- QPHY_COM_START_CONTROL,
- QPHY_COM_PCS_READY_STATUS,
- /* PCS registers */
- QPHY_PLL_LOCK_CHK_DLY_TIME,
- QPHY_FLL_CNTRL1,
- QPHY_FLL_CNTRL2,
- QPHY_FLL_CNT_VAL_L,
- QPHY_FLL_CNT_VAL_H_TOL,
- QPHY_FLL_MAN_CODE,
- QPHY_SW_RESET,
- QPHY_START_CTRL,
- QPHY_PCS_READY_STATUS,
- QPHY_PCS_STATUS,
- QPHY_PCS_AUTONOMOUS_MODE_CTRL,
- QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
- QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
- QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
- /* Keep last to ensure regs_layout arrays are properly initialized */
- QPHY_LAYOUT_SIZE
-};
-
-static const unsigned int msm8996_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_START_CTRL] = 0x00,
- [QPHY_PCS_READY_STATUS] = 0x168,
-};
-
-static const unsigned int ipq_pciephy_gen3_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x44,
- [QPHY_PCS_STATUS] = 0x14,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
-};
-
-static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_COM_SW_RESET] = 0x400,
- [QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
- [QPHY_COM_START_CONTROL] = 0x408,
- [QPHY_COM_PCS_READY_STATUS] = 0x448,
- [QPHY_PLL_LOCK_CHK_DLY_TIME] = 0xa8,
- [QPHY_FLL_CNTRL1] = 0xc4,
- [QPHY_FLL_CNTRL2] = 0xc8,
- [QPHY_FLL_CNT_VAL_L] = 0xcc,
- [QPHY_FLL_CNT_VAL_H_TOL] = 0xd0,
- [QPHY_FLL_MAN_CODE] = 0xd4,
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x174,
-};
-
-static const unsigned int usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_FLL_CNTRL1] = 0xc0,
- [QPHY_FLL_CNTRL2] = 0xc4,
- [QPHY_FLL_CNT_VAL_L] = 0xc8,
- [QPHY_FLL_CNT_VAL_H_TOL] = 0xcc,
- [QPHY_FLL_MAN_CODE] = 0xd0,
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x17c,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
- [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
-};
-
-static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x174,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
- [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
-};
-
-static const unsigned int sdm845_qmp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x174,
-};
-
-static const unsigned int sdm845_qhp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x2ac,
-};
-
-static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x44,
- [QPHY_PCS_STATUS] = 0x14,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x308,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x314,
-};
-
-static const unsigned int qmp_v4_usb3_uniphy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x44,
- [QPHY_PCS_STATUS] = 0x14,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x608,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x614,
-};
-
-static const unsigned int sm8350_usb3_uniphy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x44,
- [QPHY_PCS_STATUS] = 0x14,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x1008,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x1014,
-};
-
-static const unsigned int qcm2290_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0xd8,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0xdc,
- [QPHY_PCS_STATUS] = 0x174,
- [QPHY_PCS_MISC_TYPEC_CTRL] = 0x00,
-};
-
-static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_START_CTRL] = 0x00,
- [QPHY_PCS_READY_STATUS] = 0x160,
-};
-
-static const unsigned int sm6115_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_START_CTRL] = 0x00,
- [QPHY_PCS_READY_STATUS] = 0x168,
-};
-
-static const unsigned int sm8250_pcie_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x44,
- [QPHY_PCS_STATUS] = 0x14,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
-};
-
-static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_START_CTRL] = QPHY_V4_PCS_UFS_PHY_START,
- [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_UFS_READY_STATUS,
- [QPHY_SW_RESET] = QPHY_V4_PCS_UFS_SW_RESET,
-};
-
-static const struct qmp_phy_init_tbl ipq8074_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
- /* PLL and Loop filter settings */
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
- /* SSC settings */
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl ipq8074_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xb8),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x0),
-};
-
-static const struct qmp_phy_init_tbl ipq8074_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
-};
-
-static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
- QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
-};
-
-static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
- QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
-};
-
-static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19),
-};
-
-static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_RX_IDLE_DTCT_CNTRL, 0x4c),
- QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00),
- QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
-
- QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x05),
-
- QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x05),
- QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG4, 0x00),
- QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG1, 0xa3),
- QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0x0e),
-};
-
-static const struct qmp_phy_init_tbl msm8998_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15),
-};
-
-static const struct qmp_phy_init_tbl msm8998_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
-};
-
-static const struct qmp_phy_init_tbl msm8998_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40),
-};
-
-static const struct qmp_phy_init_tbl msm8998_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x99),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03),
-};
-
-static const struct qmp_phy_init_tbl msm8996_ufs_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
-};
-
-static const struct qmp_phy_init_tbl msm8996_ufs_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
- QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x02),
-};
-
-static const struct qmp_phy_init_tbl msm8996_ufs_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5b),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
-};
-
-static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x04),
- /* PLL and Loop filter settings */
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
- /* SSC settings */
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl msm8996_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
- QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
-};
-
-static const struct qmp_phy_init_tbl msm8996_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xbb),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
-};
-
-static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
- /* FLL settings */
- QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL2, 0x03),
- QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL1, 0x02),
- QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_H_TOL, 0x42),
- QMP_PHY_INIT_CFG_L(QPHY_FLL_MAN_CODE, 0x85),
-
- /* Lock Det settings */
- QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG2, 0x08),
-};
-
-static const struct qmp_phy_init_tbl ipq6018_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d),
- QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
- QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4),
- QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa),
- QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29),
- QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
- QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68),
- QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53),
- QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa),
- QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
- QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
- QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
-};
-
-static const struct qmp_phy_init_tbl ipq6018_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_TX0_RES_CODE_LANE_OFFSET_TX, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_TX0_LANE_MODE_1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_TX0_RCV_DETECT_LVL_2, 0x12),
-};
-
-static const struct qmp_phy_init_tbl ipq6018_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_RX0_UCDR_FO_GAIN, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_RX0_UCDR_SO_GAIN, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_RX0_UCDR_PI_CONTROLS, 0x70),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x61),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x73),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_RX0_SIGDET_ENABLES, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_RX0_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_RX0_SIGDET_DEGLITCH_CNTRL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_LOW, 0xf0),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_HIGH, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_HIGH2, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_HIGH3, 0xd3),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_HIGH4, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_LOW, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_HIGH, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_HIGH2, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_HIGH3, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_HIGH4, 0xb1),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_LOW, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_HIGH, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_HIGH2, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_HIGH3, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_HIGH4, 0xb1),
- QMP_PHY_INIT_CFG(QSERDES_RX0_DFE_EN_TIMER, 0x04),
-};
-
-static const struct qmp_phy_init_tbl ipq6018_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(PCS_COM_FLL_CNTRL1, 0x01),
- QMP_PHY_INIT_CFG(PCS_COM_REFGEN_REQ_CONFIG1, 0x0d),
- QMP_PHY_INIT_CFG(PCS_COM_G12S1_TXDEEMPH_M3P5DB, 0x10),
- QMP_PHY_INIT_CFG(PCS_COM_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(PCS_COM_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
- QMP_PHY_INIT_CFG(PCS_COM_RX_DCC_CAL_CONFIG, 0x01),
- QMP_PHY_INIT_CFG(PCS_COM_EQ_CONFIG5, 0x01),
- QMP_PHY_INIT_CFG(PCS_PCIE_POWER_STATE_CONFIG2, 0x0d),
- QMP_PHY_INIT_CFG(PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
- QMP_PHY_INIT_CFG(PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
- QMP_PHY_INIT_CFG(PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
- QMP_PHY_INIT_CFG(PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
- QMP_PHY_INIT_CFG(PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
- QMP_PHY_INIT_CFG(PCS_PCIE_EQ_CONFIG1, 0x11),
- QMP_PHY_INIT_CFG(PCS_PCIE_PRESET_P10_PRE, 0x00),
- QMP_PHY_INIT_CFG(PCS_PCIE_PRESET_P10_POST, 0x58),
-};
-
-static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
- QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x1),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0xa),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0xa),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xa),
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x3),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x0),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0xD),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xD04),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x2),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0xb),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x2),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x0),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
-};
-
-static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
- QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
- QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
- QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a),
-};
-
-static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x0),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
-};
-
-static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x4),
- QMP_PHY_INIT_CFG(QPHY_OSC_DTCT_ACTIONS, 0x0),
- QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40),
- QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x0),
- QMP_PHY_INIT_CFG(QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x40),
- QMP_PHY_INIT_CFG(QPHY_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB, 0x0),
- QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40),
- QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x73),
- QMP_PHY_INIT_CFG(QPHY_RX_SIGDET_LVL, 0x99),
- QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M6DB_V0, 0x15),
- QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0xe),
- QMP_PHY_INIT_CFG_L(QPHY_SW_RESET, 0x0),
- QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3),
-};
-
-static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x007),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15),
-};
-
-static const struct qmp_phy_init_tbl sdm845_qmp_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
-};
-
-static const struct qmp_phy_init_tbl sdm845_qmp_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x71),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_01, 0x59),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40),
-};
-
-static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xbb),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x0d),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG4, 0x00),
-};
-
-static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_misc_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2, 0x52),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2, 0x10),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4, 0x1a),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5, 0x06),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
-};
-
-static const struct qmp_phy_init_tbl sdm845_qhp_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL, 0x27),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1, 0xde),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2, 0x07),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1, 0x06),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN, 0x18),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_ENABLE1, 0xb0),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0, 0x8c),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0, 0x20),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1, 0x14),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1, 0x34),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_RESTRIM_CTRL2, 0x05),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP_EN, 0x42),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE1, 0x68),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1, 0xab),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1, 0xaa),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1, 0x02),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VCO_TUNE_MAP, 0x10),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_SELECT, 0x04),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_HSCLK_SEL1, 0x30),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV, 0x04),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORE_CLK_EN, 0x73),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL, 0x15),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1, 0x04),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_MODE, 0x01),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV1, 0x22),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV2, 0x00),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BGV_TRIM, 0x20),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BG_CTRL, 0x07),
-};
-
-static const struct qmp_phy_init_tbl sdm845_qhp_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL0, 0x00),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_TAP_EN, 0x0d),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TX_BAND_MODE, 0x01),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_LANE_MODE, 0x1a),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PARALLEL_RATE, 0x2f),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE0, 0x09),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE1, 0x09),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE2, 0x1b),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1, 0x01),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2, 0x07),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0, 0x31),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1, 0x31),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2, 0x03),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE, 0x02),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CGA_THRESH_DFE, 0x00),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXENGINE_EN0, 0x12),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME, 0x25),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME, 0x00),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME, 0x05),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME, 0x01),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_GAIN, 0x26),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_GAIN, 0x12),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_GAIN, 0x04),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_OFFSET_GAIN, 0x04),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PRE_GAIN, 0x09),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_INTVAL, 0x15),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EDAC_INITVAL, 0x28),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB0, 0x7f),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB1, 0x07),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1, 0x04),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_CTRL, 0x70),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0, 0x8b),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1, 0x08),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2, 0x0a),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0, 0x03),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1, 0x04),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2, 0x04),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_BAND, 0x02),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0, 0x5c),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1, 0x3e),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2, 0x3f),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_ENABLES, 0x01),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_CNTRL, 0xa0),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL, 0x08),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DCC_GAIN, 0x01),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_EN_SIGNAL, 0xc3),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL, 0x00),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0, 0xbc),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TS0_TIMER, 0x7f),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE, 0x15),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL2, 0x0f),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET, 0x04),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_INITVAL, 0x20),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RSM_START, 0x01),
-};
-
-static const struct qmp_phy_init_tbl sdm845_qhp_pcie_rx_tbl[] = {
-};
-
-static const struct qmp_phy_init_tbl sdm845_qhp_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG, 0x3f),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG, 0x50),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB, 0x19),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB, 0x07),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB, 0x17),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB, 0x09),
- QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5, 0x9f),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x37),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_rbr[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x6f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr2[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x8c),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr3[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x2a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x08),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_dp_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRANSCEIVER_BIAS_EN, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_VMODE_CTRL1, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_INTERFACE_SELECT, 0x3d),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_CLKBUF_ENABLE, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RESET_TSYNC_EN, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRAN_DRVR_EMP_EN, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_INTERFACE_MODE, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_BAND, 0x4),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_POL_INV, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_DRV_LVL, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_EMP_POST1_LVL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
- /* FLL settings */
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
-
- /* Lock Det settings */
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x50),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
- /* FLL settings */
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
-
- /* Lock Det settings */
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb5),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4c),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x64),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6a),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
-};
-
-static const struct qmp_phy_init_tbl sm6115_ufsphy_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL1, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
-
- /* Rate B */
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x44),
-};
-
-static const struct qmp_phy_init_tbl sm6115_ufsphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
- QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
-};
-
-static const struct qmp_phy_init_tbl sm6115_ufsphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x0F),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5B),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
- QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5B),
-};
-
-static const struct qmp_phy_init_tbl sm6115_ufsphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_RX_PWM_GEAR_BAND, 0x15),
- QMP_PHY_INIT_CFG(QPHY_RX_SIGDET_CTRL2, 0x6d),
- QMP_PHY_INIT_CFG(QPHY_TX_LARGE_AMP_DRV_LVL, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_TX_SMALL_AMP_DRV_LVL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
- QMP_PHY_INIT_CFG(QPHY_RX_SYM_RESYNC_CTRL, 0x03),
- QMP_PHY_INIT_CFG(QPHY_TX_LARGE_AMP_POST_EMP_LVL, 0x12),
- QMP_PHY_INIT_CFG(QPHY_TX_SMALL_AMP_POST_EMP_LVL, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_RX_MIN_HIBERN8_TIME, 0x9a), /* 8 us */
-};
-
-static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
-
- /* Rate B */
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
-};
-
-static const struct qmp_phy_init_tbl sdm845_ufsphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
-};
-
-static const struct qmp_phy_init_tbl sdm845_ufsphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59),
-};
-
-static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_CTRL2, 0x6e),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_SMALL_AMP_DRV_LVL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SYM_RESYNC_CTRL, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_MID_TERM_CTRL1, 0x43),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_CTRL1, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_MIN_HIBERN8_TIME, 0x9a),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_INITVAL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x43),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x8a),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
-};
-
-static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_INITVAL2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x98),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x32),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
-
- /* Rate B */
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x06),
-};
-
-static const struct qmp_phy_init_tbl sm8150_ufsphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0c),
-};
-
-static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_BAND, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0xf1),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x1b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xf6),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x3d),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xe0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xe0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
-
-};
-
-static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
- /* Lock Det settings */
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
-
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x95),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x05),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x37),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xef),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_2, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
-};
-
-static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SVS_MODE_CLK_SEL, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_CTRL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x17),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORE_CLK_EN, 0x1f),
-};
-
-static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_rbr[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x6f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
-};
-
-static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
-};
-
-static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr2[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x8c),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
-};
-
-static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr3[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x2a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
-};
-
-static const struct qmp_phy_init_tbl qmp_v4_dp_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_VMODE_CTRL1, 0x40),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_INTERFACE_SELECT, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_CLKBUF_ENABLE, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RESET_TSYNC_EN, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_INTERFACE_MODE, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_BAND, 0x4),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_POL_INV, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_DRV_LVL, 0x2a),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_EMP_POST1_LVL, 0x20),
-};
-
-static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x42),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0xb4),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x68),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xaa),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x90),
-};
-
-static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x5),
-};
-
-static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x6e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x6e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x37),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xd4),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xdb),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x39),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xe4),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xec),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x39),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xdb),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x75),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RCLK_AUXDATA_SEL, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x03),
-};
-
-static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RATE_SLEW_CNTRL1, 0x0b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x01),
-};
-
-static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_pcs_misc_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x42),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0xb4),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x68),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xaa),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x90),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x07),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x35),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xd4),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xdb),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xe4),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xec),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x36),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RCLK_AUXDATA_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x30),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0x77),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RATE_SLEW_CNTRL1, 0x0b),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x12),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_misc_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE, 0x33),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0f),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x05),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG2, 0x0f),
-};
-
-static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_misc_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
-};
-
-static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x08),
-};
-
-static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x26),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x048),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
-};
-
-static const struct qmp_phy_init_tbl sdx55_qmp_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x46),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_CFG, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x4b),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x50),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0xfb),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1, 0xfb),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MISC1, 0x88),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTERNAL_DIG_CORECLK_DIV, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MODE, 0x17),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_DC_LEVEL_CTRL, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x56),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1d),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x4b),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x22),
-};
-
-static const struct qmp_phy_init_tbl sdx55_qmp_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_2, 0xf6),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_3, 0x13),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_VMODE_CTRL1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_PI_QEC_CTRL, 0x00),
-};
-
-static const struct qmp_phy_init_tbl sdx55_qmp_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_FO_GAIN_RATE2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_UCDR_PI_CONTROLS, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_3, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_VGA_CAL_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x27),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2, 0x5a),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4, 0x37),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B0, 0xbd),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B1, 0xf9),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B2, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B3, 0xce),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B4, 0x62),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B0, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B1, 0x7d),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B2, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B3, 0xcf),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B4, 0xd6),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_PHPRE_CTRL, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_MARG_COARSE_CTRL2, 0x12),
-};
-
-static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_RX_SIGDET_LVL, 0x77),
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG2, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG4, 0x16),
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG5, 0x02),
-};
-
-static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_EQ_CONFIG1, 0x17),
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2, 0x01),
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
-};
-
-static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0b),
-};
-
-static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
-};
-
-static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_INITVAL2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x98),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x65),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
-
- /* Rate B */
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x06),
-};
-
-static const struct qmp_phy_init_tbl sm8350_ufsphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xf5),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_TRAN_DRVR_EMP_EN, 0x0c),
-};
-
-static const struct qmp_phy_init_tbl sm8350_ufsphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_LVL, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_BAND, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf1),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_TERM_BW, 0x1b),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_MEASURE_TIME, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x6d),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x6d),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xed),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0x3c),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xe0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xb7),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_LOW, 0xe0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH2, 0xc8),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x3b),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0xb7),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
-};
-
-static const struct qmp_phy_init_tbl sm8350_ufsphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_PLL_CNTL, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB, 0xd8),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND, 0x06),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL1, 0x0e),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_TX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_RX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x35),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_5, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbb),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbb),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3d, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3c, 2),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xd2),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x13),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VTH_CODE, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_INITVAL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x01),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x00),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x00),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
-};
-
-static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xb4),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x68),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xaa),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
-};
-
-static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x04),
-};
-
-static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_TX_ADAPT_POST_THRESH, 0xf0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
-};
-
-static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x77),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05),
-};
-
-static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
-};
-
-static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
-};
-
-static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_2, 0xf6),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX, 0x0c),
-};
-
-static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xcc),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xcc),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0xc5),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xad),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xb6),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xfb),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xc7),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xef),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x81),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
-
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x05),
-
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
-
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
-
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
-};
-
-/* Register names should be validated, they might be different for this PHY */
-static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99),
-};
-
-static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
-};
-
-struct qmp_phy;
-
-/* struct qmp_phy_cfg - per-PHY initialization config */
-struct qmp_phy_cfg {
- /* phy-type - PCIE/UFS/USB */
- unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
-
- /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
- const struct qmp_phy_init_tbl *serdes_tbl;
- int serdes_tbl_num;
- const struct qmp_phy_init_tbl *serdes_tbl_sec;
- int serdes_tbl_num_sec;
- const struct qmp_phy_init_tbl *tx_tbl;
- int tx_tbl_num;
- const struct qmp_phy_init_tbl *tx_tbl_sec;
- int tx_tbl_num_sec;
- const struct qmp_phy_init_tbl *rx_tbl;
- int rx_tbl_num;
- const struct qmp_phy_init_tbl *rx_tbl_sec;
- int rx_tbl_num_sec;
- const struct qmp_phy_init_tbl *pcs_tbl;
- int pcs_tbl_num;
- const struct qmp_phy_init_tbl *pcs_tbl_sec;
- int pcs_tbl_num_sec;
- const struct qmp_phy_init_tbl *pcs_misc_tbl;
- int pcs_misc_tbl_num;
- const struct qmp_phy_init_tbl *pcs_misc_tbl_sec;
- int pcs_misc_tbl_num_sec;
-
- /* Init sequence for DP PHY block link rates */
- const struct qmp_phy_init_tbl *serdes_tbl_rbr;
- int serdes_tbl_rbr_num;
- const struct qmp_phy_init_tbl *serdes_tbl_hbr;
- int serdes_tbl_hbr_num;
- const struct qmp_phy_init_tbl *serdes_tbl_hbr2;
- int serdes_tbl_hbr2_num;
- const struct qmp_phy_init_tbl *serdes_tbl_hbr3;
- int serdes_tbl_hbr3_num;
-
- /* DP PHY callbacks */
- int (*configure_dp_phy)(struct qmp_phy *qphy);
- void (*configure_dp_tx)(struct qmp_phy *qphy);
- int (*calibrate_dp_phy)(struct qmp_phy *qphy);
- void (*dp_aux_init)(struct qmp_phy *qphy);
-
- /* clock ids to be requested */
- const char * const *clk_list;
- int num_clks;
- /* resets to be requested */
- const char * const *reset_list;
- int num_resets;
- /* regulators to be requested */
- const char * const *vreg_list;
- int num_vregs;
-
- /* array of registers with different offsets */
- const unsigned int *regs;
-
- unsigned int start_ctrl;
- unsigned int pwrdn_ctrl;
- unsigned int mask_com_pcs_ready;
- /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
- unsigned int phy_status;
-
- /* true, if PHY has a separate PHY_COM control block */
- bool has_phy_com_ctrl;
- /* true, if PHY has a reset for individual lanes */
- bool has_lane_rst;
- /* true, if PHY needs delay after POWER_DOWN */
- bool has_pwrdn_delay;
- /* power_down delay in usec */
- int pwrdn_delay_min;
- int pwrdn_delay_max;
-
- /* true, if PHY has a separate DP_COM control block */
- bool has_phy_dp_com_ctrl;
- /* true, if PHY has secondary tx/rx lanes to be configured */
- bool is_dual_lane_phy;
-
- /* true, if PCS block has no separate SW_RESET register */
- bool no_pcs_sw_reset;
-};
-
-struct qmp_phy_combo_cfg {
- const struct qmp_phy_cfg *usb_cfg;
- const struct qmp_phy_cfg *dp_cfg;
-};
-
-/**
- * struct qmp_phy - per-lane phy descriptor
- *
- * @phy: generic phy
- * @cfg: phy specific configuration
- * @serdes: iomapped memory space for phy's serdes (i.e. PLL)
- * @tx: iomapped memory space for lane's tx
- * @rx: iomapped memory space for lane's rx
- * @pcs: iomapped memory space for lane's pcs
- * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
- * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
- * @pcs_misc: iomapped memory space for lane's pcs_misc
- * @pipe_clk: pipe clock
- * @index: lane index
- * @qmp: QMP phy to which this lane belongs
- * @lane_rst: lane's reset controller
- * @mode: current PHY mode
- * @dp_aux_cfg: Display port aux config
- * @dp_opts: Display port optional config
- * @dp_clks: Display port clocks
- */
-struct qmp_phy {
- struct phy *phy;
- const struct qmp_phy_cfg *cfg;
- void __iomem *serdes;
- void __iomem *tx;
- void __iomem *rx;
- void __iomem *pcs;
- void __iomem *tx2;
- void __iomem *rx2;
- void __iomem *pcs_misc;
- struct clk *pipe_clk;
- unsigned int index;
- struct qcom_qmp *qmp;
- struct reset_control *lane_rst;
- enum phy_mode mode;
- unsigned int dp_aux_cfg;
- struct phy_configure_opts_dp dp_opts;
- struct qmp_phy_dp_clks *dp_clks;
-};
-
-struct qmp_phy_dp_clks {
- struct qmp_phy *qphy;
- struct clk_hw dp_link_hw;
- struct clk_hw dp_pixel_hw;
-};
-
-/**
- * struct qcom_qmp - structure holding QMP phy block attributes
- *
- * @dev: device
- * @dp_com: iomapped memory space for phy's dp_com control block
- *
- * @clks: array of clocks required by phy
- * @resets: array of resets required by phy
- * @vregs: regulator supplies bulk data
- *
- * @phys: array of per-lane phy descriptors
- * @phy_mutex: mutex lock for PHY common block initialization
- * @init_count: phy common block initialization count
- * @ufs_reset: optional UFS PHY reset handle
- */
-struct qcom_qmp {
- struct device *dev;
- void __iomem *dp_com;
-
- struct clk_bulk_data *clks;
- struct reset_control **resets;
- struct regulator_bulk_data *vregs;
-
- struct qmp_phy **phys;
-
- struct mutex phy_mutex;
- int init_count;
-
- struct reset_control *ufs_reset;
-};
-
-static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy);
-static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy);
-static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy);
-static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy);
-
-static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy);
-static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy);
-static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy);
-static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy);
-
-static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
-{
- u32 reg;
-
- reg = readl(base + offset);
- reg |= val;
- writel(reg, base + offset);
-
- /* ensure that above write is through */
- readl(base + offset);
-}
-
-static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
-{
- u32 reg;
-
- reg = readl(base + offset);
- reg &= ~val;
- writel(reg, base + offset);
-
- /* ensure that above write is through */
- readl(base + offset);
-}
-
-/* list of clocks required by phy */
-static const char * const msm8996_phy_clk_l[] = {
- "aux", "cfg_ahb", "ref",
-};
-
-static const char * const msm8996_ufs_phy_clk_l[] = {
- "ref",
-};
-
-static const char * const qmp_v3_phy_clk_l[] = {
- "aux", "cfg_ahb", "ref", "com_aux",
-};
-
-static const char * const sdm845_pciephy_clk_l[] = {
- "aux", "cfg_ahb", "ref", "refgen",
-};
-
-static const char * const qmp_v4_phy_clk_l[] = {
- "aux", "ref_clk_src", "ref", "com_aux",
-};
-
-/* the primary usb3 phy on sm8250 doesn't have a ref clock */
-static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
- "aux", "ref_clk_src", "com_aux"
-};
-
-static const char * const sm8450_ufs_phy_clk_l[] = {
- "qref", "ref", "ref_aux",
-};
-
-static const char * const sdm845_ufs_phy_clk_l[] = {
- "ref", "ref_aux",
-};
-
-/* usb3 phy on sdx55 doesn't have com_aux clock */
-static const char * const qmp_v4_sdx55_usbphy_clk_l[] = {
- "aux", "cfg_ahb", "ref"
-};
-
-static const char * const qcm2290_usb3phy_clk_l[] = {
- "cfg_ahb", "ref", "com_aux",
-};
-
-/* list of resets */
-static const char * const msm8996_pciephy_reset_l[] = {
- "phy", "common", "cfg",
-};
-
-static const char * const msm8996_usb3phy_reset_l[] = {
- "phy", "common",
-};
-
-static const char * const sc7180_usb3phy_reset_l[] = {
- "phy",
-};
-
-static const char * const qcm2290_usb3phy_reset_l[] = {
- "phy_phy", "phy",
-};
-
-static const char * const sdm845_pciephy_reset_l[] = {
- "phy",
-};
-
-/* list of regulators */
-static const char * const qmp_phy_vreg_l[] = {
- "vdda-phy", "vdda-pll",
-};
-
-static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = ipq8074_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
- .tx_tbl = msm8996_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
- .rx_tbl = ipq8074_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl),
- .pcs_tbl = ipq8074_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl),
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-};
-
-static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 3,
-
- .serdes_tbl = msm8996_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl),
- .tx_tbl = msm8996_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(msm8996_pcie_tx_tbl),
- .rx_tbl = msm8996_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(msm8996_pcie_rx_tbl),
- .pcs_tbl = msm8996_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(msm8996_pcie_pcs_tbl),
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
- .reset_list = msm8996_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = pciephy_regs_layout,
-
- .start_ctrl = PCS_START | PLL_READY_GATE_EN,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .mask_com_pcs_ready = PCS_READY,
- .phy_status = PHYSTATUS,
-
- .has_phy_com_ctrl = true,
- .has_lane_rst = true,
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-};
-
-static const struct qmp_phy_cfg msm8996_ufs_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 1,
-
- .serdes_tbl = msm8996_ufs_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(msm8996_ufs_serdes_tbl),
- .tx_tbl = msm8996_ufs_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(msm8996_ufs_tx_tbl),
- .rx_tbl = msm8996_ufs_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(msm8996_ufs_rx_tbl),
-
- .clk_list = msm8996_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_ufs_phy_clk_l),
-
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
-
- .regs = msm8996_ufsphy_regs_layout,
-
- .start_ctrl = SERDES_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .no_pcs_sw_reset = true,
-};
-
-static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = msm8996_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
- .tx_tbl = msm8996_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
- .rx_tbl = msm8996_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(msm8996_usb3_rx_tbl),
- .pcs_tbl = msm8996_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(msm8996_usb3_pcs_tbl),
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-};
-
-static const char * const ipq8074_pciephy_clk_l[] = {
- "aux", "cfg_ahb",
-};
-/* list of resets */
-static const char * const ipq8074_pciephy_reset_l[] = {
- "phy", "common",
-};
-
-static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
-
- .serdes_tbl = ipq8074_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
- .tx_tbl = ipq8074_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl),
- .rx_tbl = ipq8074_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
- .pcs_tbl = ipq8074_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
- .clk_list = ipq8074_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
- .reset_list = ipq8074_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
- .vreg_list = NULL,
- .num_vregs = 0,
- .regs = pciephy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .phy_status = PHYSTATUS,
-
- .has_phy_com_ctrl = false,
- .has_lane_rst = false,
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
-
- .serdes_tbl = ipq6018_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
- .tx_tbl = ipq6018_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(ipq6018_pcie_tx_tbl),
- .rx_tbl = ipq6018_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(ipq6018_pcie_rx_tbl),
- .pcs_tbl = ipq6018_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_tbl),
- .clk_list = ipq8074_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
- .reset_list = ipq8074_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
- .vreg_list = NULL,
- .num_vregs = 0,
- .regs = ipq_pciephy_gen3_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
-
- .has_phy_com_ctrl = false,
- .has_lane_rst = false,
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
-
- .serdes_tbl = sdm845_qmp_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
- .tx_tbl = sdm845_qmp_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl),
- .rx_tbl = sdm845_qmp_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl),
- .pcs_tbl = sdm845_qmp_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl),
- .pcs_misc_tbl = sdm845_qmp_pcie_pcs_misc_tbl,
- .pcs_misc_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl),
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
- .reset_list = sdm845_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sdm845_qmp_pciephy_regs_layout,
-
- .start_ctrl = PCS_START | SERDES_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
-
- .serdes_tbl = sdm845_qhp_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
- .tx_tbl = sdm845_qhp_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl),
- .rx_tbl = sdm845_qhp_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl),
- .pcs_tbl = sdm845_qhp_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl),
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
- .reset_list = sdm845_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sdm845_qhp_pciephy_regs_layout,
-
- .start_ctrl = PCS_START | SERDES_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
-
- .serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
- .serdes_tbl_sec = sm8250_qmp_gen3x1_pcie_serdes_tbl,
- .serdes_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl),
- .tx_tbl = sm8250_qmp_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
- .rx_tbl = sm8250_qmp_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
- .rx_tbl_sec = sm8250_qmp_gen3x1_pcie_rx_tbl,
- .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl),
- .pcs_tbl = sm8250_qmp_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
- .pcs_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_tbl,
- .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl),
- .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl,
- .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
- .pcs_misc_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl,
- .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl),
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
- .reset_list = sdm845_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
-
- .start_ctrl = PCS_START | SERDES_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 2,
-
- .serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
- .tx_tbl = sm8250_qmp_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
- .tx_tbl_sec = sm8250_qmp_gen3x2_pcie_tx_tbl,
- .tx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl),
- .rx_tbl = sm8250_qmp_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
- .rx_tbl_sec = sm8250_qmp_gen3x2_pcie_rx_tbl,
- .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl),
- .pcs_tbl = sm8250_qmp_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
- .pcs_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_tbl,
- .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl),
- .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl,
- .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
- .pcs_misc_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl,
- .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl),
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
- .reset_list = sdm845_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
-
- .start_ctrl = PCS_START | SERDES_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = qmp_v3_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
- .tx_tbl = qmp_v3_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
- .rx_tbl = qmp_v3_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
- .pcs_tbl = qmp_v3_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-};
-
-static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = qmp_v3_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
- .tx_tbl = qmp_v3_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
- .rx_tbl = qmp_v3_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
- .pcs_tbl = qmp_v3_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = sc7180_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-};
-
-static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
- .type = PHY_TYPE_DP,
- .nlanes = 1,
-
- .serdes_tbl = qmp_v3_dp_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl),
- .tx_tbl = qmp_v3_dp_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl),
-
- .serdes_tbl_rbr = qmp_v3_dp_serdes_tbl_rbr,
- .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr),
- .serdes_tbl_hbr = qmp_v3_dp_serdes_tbl_hbr,
- .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr),
- .serdes_tbl_hbr2 = qmp_v3_dp_serdes_tbl_hbr2,
- .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr2),
- .serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3,
- .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3),
-
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = sc7180_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-
- .dp_aux_init = qcom_qmp_v3_phy_dp_aux_init,
- .configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx,
- .configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy,
- .calibrate_dp_phy = qcom_qmp_v3_dp_phy_calibrate,
-};
-
-static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = {
- .usb_cfg = &sc7180_usb3phy_cfg,
- .dp_cfg = &sc7180_dpphy_cfg,
-};
-
-static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl),
- .tx_tbl = qmp_v3_usb3_uniphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_tx_tbl),
- .rx_tbl = qmp_v3_usb3_uniphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_rx_tbl),
- .pcs_tbl = qmp_v3_usb3_uniphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_pcs_tbl),
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-};
-
-static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
-
- .serdes_tbl = sdm845_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sdm845_ufsphy_serdes_tbl),
- .tx_tbl = sdm845_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_tx_tbl),
- .rx_tbl = sdm845_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_rx_tbl),
- .pcs_tbl = sdm845_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sdm845_ufsphy_pcs_tbl),
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sdm845_ufsphy_regs_layout,
-
- .start_ctrl = SERDES_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
- .no_pcs_sw_reset = true,
-};
-
-static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 1,
-
- .serdes_tbl = sm6115_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm6115_ufsphy_serdes_tbl),
- .tx_tbl = sm6115_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_tx_tbl),
- .rx_tbl = sm6115_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_rx_tbl),
- .pcs_tbl = sm6115_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm6115_ufsphy_pcs_tbl),
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm6115_ufsphy_regs_layout,
-
- .start_ctrl = SERDES_START,
- .pwrdn_ctrl = SW_PWRDN,
-
- .is_dual_lane_phy = false,
- .no_pcs_sw_reset = true,
-};
-
-static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
-
- .serdes_tbl = msm8998_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl),
- .tx_tbl = msm8998_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(msm8998_pcie_tx_tbl),
- .rx_tbl = msm8998_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(msm8998_pcie_rx_tbl),
- .pcs_tbl = msm8998_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl),
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
- .reset_list = ipq8074_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = pciephy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .phy_status = PHYSTATUS,
-};
-
-static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = msm8998_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
- .tx_tbl = msm8998_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(msm8998_usb3_tx_tbl),
- .rx_tbl = msm8998_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl),
- .pcs_tbl = msm8998_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl),
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
-};
-
-static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
-
- .serdes_tbl = sm8150_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl),
- .tx_tbl = sm8150_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_tx_tbl),
- .rx_tbl = sm8150_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_rx_tbl),
- .pcs_tbl = sm8150_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8150_ufsphy_pcs_tbl),
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8150_ufsphy_regs_layout,
-
- .start_ctrl = SERDES_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
-};
-
-static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = sm8150_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
- .tx_tbl = sm8150_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl),
- .rx_tbl = sm8150_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl),
- .pcs_tbl = sm8150_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl),
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-};
-
-static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
-
- .serdes_tbl = sc8180x_qmp_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
- .tx_tbl = sc8180x_qmp_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl),
- .rx_tbl = sc8180x_qmp_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl),
- .pcs_tbl = sc8180x_qmp_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl),
- .pcs_misc_tbl = sc8180x_qmp_pcie_pcs_misc_tbl,
- .pcs_misc_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl),
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
- .reset_list = sdm845_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
-
- .start_ctrl = PCS_START | SERDES_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg sc8180x_dpphy_cfg = {
- .type = PHY_TYPE_DP,
- .nlanes = 1,
-
- .serdes_tbl = qmp_v4_dp_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
- .tx_tbl = qmp_v4_dp_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl),
-
- .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr,
- .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr),
- .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr,
- .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr),
- .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2,
- .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2),
- .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
- .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
-
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = sc7180_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-
- .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
- .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
- .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
- .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate,
-};
-
-static const struct qmp_phy_combo_cfg sc8180x_usb3dpphy_cfg = {
- .usb_cfg = &sm8150_usb3phy_cfg,
- .dp_cfg = &sc8180x_dpphy_cfg,
-};
-
-static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
- .tx_tbl = sm8150_usb3_uniphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_tx_tbl),
- .rx_tbl = sm8150_usb3_uniphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_rx_tbl),
- .pcs_tbl = sm8150_usb3_uniphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_tbl),
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3_uniphy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-};
-
-static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = sm8150_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
- .tx_tbl = sm8250_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl),
- .rx_tbl = sm8250_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl),
- .pcs_tbl = sm8250_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl),
- .clk_list = qmp_v4_sm8250_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-};
-
-static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
- .tx_tbl = sm8250_usb3_uniphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_tx_tbl),
- .rx_tbl = sm8250_usb3_uniphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_rx_tbl),
- .pcs_tbl = sm8250_usb3_uniphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl),
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3_uniphy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-};
-
-static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
- .type = PHY_TYPE_DP,
- .nlanes = 1,
-
- .serdes_tbl = qmp_v4_dp_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
- .tx_tbl = qmp_v4_dp_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl),
-
- .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr,
- .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr),
- .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr,
- .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr),
- .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2,
- .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2),
- .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
- .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
-
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-
- .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
- .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
- .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
- .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate,
-};
-
-static const struct qmp_phy_combo_cfg sm8250_usb3dpphy_cfg = {
- .usb_cfg = &sm8250_usb3phy_cfg,
- .dp_cfg = &sm8250_dpphy_cfg,
-};
-
-static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
- .tx_tbl = sdx55_usb3_uniphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sdx55_usb3_uniphy_tx_tbl),
- .rx_tbl = sdx55_usb3_uniphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sdx55_usb3_uniphy_rx_tbl),
- .pcs_tbl = sm8250_usb3_uniphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl),
- .clk_list = qmp_v4_sdx55_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3_uniphy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-};
-
-static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 2,
-
- .serdes_tbl = sdx55_qmp_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
- .tx_tbl = sdx55_qmp_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl),
- .rx_tbl = sdx55_qmp_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl),
- .pcs_tbl = sdx55_qmp_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl),
- .pcs_misc_tbl = sdx55_qmp_pcie_pcs_misc_tbl,
- .pcs_misc_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl),
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
- .reset_list = sdm845_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
-
- .start_ctrl = PCS_START | SERDES_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS_4_20,
-
- .is_dual_lane_phy = true,
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
- .tx_tbl = sdx65_usb3_uniphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_tx_tbl),
- .rx_tbl = sdx65_usb3_uniphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_rx_tbl),
- .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
- .clk_list = qmp_v4_sdx55_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8350_usb3_uniphy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-};
-
-static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
-
- .serdes_tbl = sm8350_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
- .tx_tbl = sm8350_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
- .rx_tbl = sm8350_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
- .pcs_tbl = sm8350_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8150_ufsphy_regs_layout,
-
- .start_ctrl = SERDES_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
-};
-
-static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = sm8150_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
- .tx_tbl = sm8350_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_tx_tbl),
- .rx_tbl = sm8350_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_rx_tbl),
- .pcs_tbl = sm8350_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_tbl),
- .clk_list = qmp_v4_sm8250_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-};
-
-static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
- .tx_tbl = sm8350_usb3_uniphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_tx_tbl),
- .rx_tbl = sm8350_usb3_uniphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_rx_tbl),
- .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8350_usb3_uniphy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
- .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-};
-
-static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
-
- .serdes_tbl = sm8350_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
- .tx_tbl = sm8350_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
- .rx_tbl = sm8350_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
- .pcs_tbl = sm8350_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
- .clk_list = sm8450_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8150_ufsphy_regs_layout,
-
- .start_ctrl = SERDES_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
-};
-
-static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
-
- .serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
- .tx_tbl = sm8450_qmp_gen3x1_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
- .rx_tbl = sm8450_qmp_gen3x1_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
- .pcs_tbl = sm8450_qmp_gen3x1_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
- .pcs_misc_tbl = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
- .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
- .reset_list = sdm845_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .phy_status = PHYSTATUS,
-
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 2,
-
- .serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
- .tx_tbl = sm8450_qmp_gen4x2_pcie_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl),
- .rx_tbl = sm8450_qmp_gen4x2_pcie_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl),
- .pcs_tbl = sm8450_qmp_gen4x2_pcie_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl),
- .pcs_misc_tbl = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl,
- .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl),
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
- .reset_list = sdm845_pciephy_reset_l,
- .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .phy_status = PHYSTATUS_4_20,
-
- .is_dual_lane_phy = true,
- .has_pwrdn_delay = true,
- .pwrdn_delay_min = 995, /* us */
- .pwrdn_delay_max = 1005, /* us */
-};
-
-static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
-
- .serdes_tbl = qcm2290_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
- .tx_tbl = qcm2290_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qcm2290_usb3_tx_tbl),
- .rx_tbl = qcm2290_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qcm2290_usb3_rx_tbl),
- .pcs_tbl = qcm2290_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qcm2290_usb3_pcs_tbl),
- .clk_list = qcm2290_usb3phy_clk_l,
- .num_clks = ARRAY_SIZE(qcm2290_usb3phy_clk_l),
- .reset_list = qcm2290_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qcm2290_usb3phy_regs_layout,
-
- .start_ctrl = SERDES_START | PCS_START,
- .pwrdn_ctrl = SW_PWRDN,
- .phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
-};
-
-static void qcom_qmp_phy_configure_lane(void __iomem *base,
- const unsigned int *regs,
- const struct qmp_phy_init_tbl tbl[],
- int num,
- u8 lane_mask)
-{
- int i;
- const struct qmp_phy_init_tbl *t = tbl;
-
- if (!t)
- return;
-
- for (i = 0; i < num; i++, t++) {
- if (!(t->lane_mask & lane_mask))
- continue;
-
- if (t->in_layout)
- writel(t->val, base + regs[t->offset]);
- else
- writel(t->val, base + t->offset);
- }
-}
-
-static void qcom_qmp_phy_configure(void __iomem *base,
- const unsigned int *regs,
- const struct qmp_phy_init_tbl tbl[],
- int num)
-{
- qcom_qmp_phy_configure_lane(base, regs, tbl, num, 0xff);
-}
-
-static int qcom_qmp_phy_serdes_init(struct qmp_phy *qphy)
-{
- struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qphy->cfg;
- void __iomem *serdes = qphy->serdes;
- const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
- const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
- int serdes_tbl_num = cfg->serdes_tbl_num;
- int ret;
-
- qcom_qmp_phy_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
- if (cfg->serdes_tbl_sec)
- qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl_sec,
- cfg->serdes_tbl_num_sec);
-
- if (cfg->type == PHY_TYPE_DP) {
- switch (dp_opts->link_rate) {
- case 1620:
- qcom_qmp_phy_configure(serdes, cfg->regs,
- cfg->serdes_tbl_rbr,
- cfg->serdes_tbl_rbr_num);
- break;
- case 2700:
- qcom_qmp_phy_configure(serdes, cfg->regs,
- cfg->serdes_tbl_hbr,
- cfg->serdes_tbl_hbr_num);
- break;
- case 5400:
- qcom_qmp_phy_configure(serdes, cfg->regs,
- cfg->serdes_tbl_hbr2,
- cfg->serdes_tbl_hbr2_num);
- break;
- case 8100:
- qcom_qmp_phy_configure(serdes, cfg->regs,
- cfg->serdes_tbl_hbr3,
- cfg->serdes_tbl_hbr3_num);
- break;
- default:
- /* Other link rates aren't supported */
- return -EINVAL;
- }
- }
-
-
- if (cfg->has_phy_com_ctrl) {
- void __iomem *status;
- unsigned int mask, val;
-
- qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
- qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
- SERDES_START | PCS_START);
-
- status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
- mask = cfg->mask_com_pcs_ready;
-
- ret = readl_poll_timeout(status, val, (val & mask), 10,
- PHY_INIT_COMPLETE_TIMEOUT);
- if (ret) {
- dev_err(qmp->dev,
- "phy common block init timed-out\n");
- return ret;
- }
- }
-
- return 0;
-}
-
-static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy)
-{
- writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
- DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
- qphy->pcs + QSERDES_DP_PHY_PD_CTL);
-
- /* Turn on BIAS current for PHY/PLL */
- writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX |
- QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL,
- qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
-
- writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
-
- writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
- DP_PHY_PD_CTL_LANE_0_1_PWRDN |
- DP_PHY_PD_CTL_LANE_2_3_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN |
- DP_PHY_PD_CTL_DP_CLAMP_EN,
- qphy->pcs + QSERDES_DP_PHY_PD_CTL);
-
- writel(QSERDES_V3_COM_BIAS_EN |
- QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN |
- QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL |
- QSERDES_V3_COM_CLKBUF_RX_DRIVE_L,
- qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
-
- writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0);
- writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
- writel(0x24, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
- writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3);
- writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4);
- writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5);
- writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6);
- writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7);
- writel(0xbb, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8);
- writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9);
- qphy->dp_aux_cfg = 0;
-
- writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
- PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
- PHY_AUX_REQ_ERR_MASK,
- qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK);
-}
-
-static const u8 qmp_dp_v3_pre_emphasis_hbr3_hbr2[4][4] = {
- { 0x00, 0x0c, 0x15, 0x1a },
- { 0x02, 0x0e, 0x16, 0xff },
- { 0x02, 0x11, 0xff, 0xff },
- { 0x04, 0xff, 0xff, 0xff }
-};
-
-static const u8 qmp_dp_v3_voltage_swing_hbr3_hbr2[4][4] = {
- { 0x02, 0x12, 0x16, 0x1a },
- { 0x09, 0x19, 0x1f, 0xff },
- { 0x10, 0x1f, 0xff, 0xff },
- { 0x1f, 0xff, 0xff, 0xff }
-};
-
-static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = {
- { 0x00, 0x0c, 0x14, 0x19 },
- { 0x00, 0x0b, 0x12, 0xff },
- { 0x00, 0x0b, 0xff, 0xff },
- { 0x04, 0xff, 0xff, 0xff }
-};
-
-static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
- { 0x08, 0x0f, 0x16, 0x1f },
- { 0x11, 0x1e, 0x1f, 0xff },
- { 0x19, 0x1f, 0xff, 0xff },
- { 0x1f, 0xff, 0xff, 0xff }
-};
-
-static int qcom_qmp_phy_configure_dp_swing(struct qmp_phy *qphy,
- unsigned int drv_lvl_reg, unsigned int emp_post_reg)
-{
- const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
- unsigned int v_level = 0, p_level = 0;
- u8 voltage_swing_cfg, pre_emphasis_cfg;
- int i;
-
- for (i = 0; i < dp_opts->lanes; i++) {
- v_level = max(v_level, dp_opts->voltage[i]);
- p_level = max(p_level, dp_opts->pre[i]);
- }
-
- if (dp_opts->link_rate <= 2700) {
- voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level];
- pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level];
- } else {
- voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr3_hbr2[v_level][p_level];
- pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr3_hbr2[v_level][p_level];
- }
-
- /* TODO: Move check to config check */
- if (voltage_swing_cfg == 0xFF && pre_emphasis_cfg == 0xFF)
- return -EINVAL;
-
- /* Enable MUX to use Cursor values from these registers */
- voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN;
- pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN;
-
- writel(voltage_swing_cfg, qphy->tx + drv_lvl_reg);
- writel(pre_emphasis_cfg, qphy->tx + emp_post_reg);
- writel(voltage_swing_cfg, qphy->tx2 + drv_lvl_reg);
- writel(pre_emphasis_cfg, qphy->tx2 + emp_post_reg);
-
- return 0;
-}
-
-static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy)
-{
- const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
- u32 bias_en, drvr_en;
-
- if (qcom_qmp_phy_configure_dp_swing(qphy,
- QSERDES_V3_TX_TX_DRV_LVL,
- QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0)
- return;
-
- if (dp_opts->lanes == 1) {
- bias_en = 0x3e;
- drvr_en = 0x13;
- } else {
- bias_en = 0x3f;
- drvr_en = 0x10;
- }
-
- writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN);
- writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
- writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN);
- writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
-}
-
-static bool qcom_qmp_phy_configure_dp_mode(struct qmp_phy *qphy)
-{
- u32 val;
- bool reverse = false;
-
- val = DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
- DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN;
-
- /*
- * TODO: Assume orientation is CC1 for now and two lanes, need to
- * use type-c connector to understand orientation and lanes.
- *
- * Otherwise val changes to be like below if this code understood
- * the orientation of the type-c cable.
- *
- * if (lane_cnt == 4 || orientation == ORIENTATION_CC2)
- * val |= DP_PHY_PD_CTL_LANE_0_1_PWRDN;
- * if (lane_cnt == 4 || orientation == ORIENTATION_CC1)
- * val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
- * if (orientation == ORIENTATION_CC2)
- * writel(0x4c, qphy->pcs + QSERDES_V3_DP_PHY_MODE);
- */
- val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
- writel(val, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
-
- writel(0x5c, qphy->pcs + QSERDES_DP_PHY_MODE);
-
- return reverse;
-}
-
-static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy)
-{
- const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
- const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
- u32 phy_vco_div, status;
- unsigned long pixel_freq;
-
- qcom_qmp_phy_configure_dp_mode(qphy);
-
- writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
- writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
-
- switch (dp_opts->link_rate) {
- case 1620:
- phy_vco_div = 0x1;
- pixel_freq = 1620000000UL / 2;
- break;
- case 2700:
- phy_vco_div = 0x1;
- pixel_freq = 2700000000UL / 2;
- break;
- case 5400:
- phy_vco_div = 0x2;
- pixel_freq = 5400000000UL / 4;
- break;
- case 8100:
- phy_vco_div = 0x0;
- pixel_freq = 8100000000UL / 6;
- break;
- default:
- /* Other link rates aren't supported */
- return -EINVAL;
- }
- writel(phy_vco_div, qphy->pcs + QSERDES_V3_DP_PHY_VCO_DIV);
-
- clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000);
- clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq);
-
- writel(0x04, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
- writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
- writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG);
- writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
- writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG);
-
- writel(0x20, qphy->serdes + QSERDES_V3_COM_RESETSM_CNTRL);
-
- if (readl_poll_timeout(qphy->serdes + QSERDES_V3_COM_C_READY_STATUS,
- status,
- ((status & BIT(0)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
-
- if (readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
- status,
- ((status & BIT(1)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
- udelay(2000);
- writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
-
- return readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
- status,
- ((status & BIT(1)) > 0),
- 500,
- 10000);
-}
-
-/*
- * We need to calibrate the aux setting here as many times
- * as the caller tries
- */
-static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy)
-{
- static const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d };
- u8 val;
-
- qphy->dp_aux_cfg++;
- qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings);
- val = cfg1_settings[qphy->dp_aux_cfg];
-
- writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
-
- return 0;
-}
-
-static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy)
-{
- writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
- DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
- qphy->pcs + QSERDES_DP_PHY_PD_CTL);
-
- /* Turn on BIAS current for PHY/PLL */
- writel(0x17, qphy->serdes + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
-
- writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0);
- writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
- writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
- writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3);
- writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4);
- writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5);
- writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6);
- writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7);
- writel(0xb7, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8);
- writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9);
- qphy->dp_aux_cfg = 0;
-
- writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
- PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
- PHY_AUX_REQ_ERR_MASK,
- qphy->pcs + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK);
-}
-
-static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy)
-{
- /* Program default values before writing proper values */
- writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL);
- writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL);
-
- writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
-
- qcom_qmp_phy_configure_dp_swing(qphy,
- QSERDES_V4_TX_TX_DRV_LVL,
- QSERDES_V4_TX_TX_EMP_POST1_LVL);
-}
-
-static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
-{
- const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
- const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
- u32 phy_vco_div, status;
- unsigned long pixel_freq;
- u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- bool reverse;
-
- writel(0x0f, qphy->pcs + QSERDES_V4_DP_PHY_CFG_1);
-
- reverse = qcom_qmp_phy_configure_dp_mode(qphy);
-
- writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
- writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
-
- writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL);
- writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL);
-
- switch (dp_opts->link_rate) {
- case 1620:
- phy_vco_div = 0x1;
- pixel_freq = 1620000000UL / 2;
- break;
- case 2700:
- phy_vco_div = 0x1;
- pixel_freq = 2700000000UL / 2;
- break;
- case 5400:
- phy_vco_div = 0x2;
- pixel_freq = 5400000000UL / 4;
- break;
- case 8100:
- phy_vco_div = 0x0;
- pixel_freq = 8100000000UL / 6;
- break;
- default:
- /* Other link rates aren't supported */
- return -EINVAL;
- }
- writel(phy_vco_div, qphy->pcs + QSERDES_V4_DP_PHY_VCO_DIV);
-
- clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000);
- clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq);
-
- writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
- writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG);
- writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
- writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG);
-
- writel(0x20, qphy->serdes + QSERDES_V4_COM_RESETSM_CNTRL);
-
- if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_C_READY_STATUS,
- status,
- ((status & BIT(0)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS,
- status,
- ((status & BIT(0)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS,
- status,
- ((status & BIT(1)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
-
- if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
- status,
- ((status & BIT(0)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
- status,
- ((status & BIT(1)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- /*
- * At least for 7nm DP PHY this has to be done after enabling link
- * clock.
- */
-
- if (dp_opts->lanes == 1) {
- bias0_en = reverse ? 0x3e : 0x15;
- bias1_en = reverse ? 0x15 : 0x3e;
- drvr0_en = reverse ? 0x13 : 0x10;
- drvr1_en = reverse ? 0x10 : 0x13;
- } else if (dp_opts->lanes == 2) {
- bias0_en = reverse ? 0x3f : 0x15;
- bias1_en = reverse ? 0x15 : 0x3f;
- drvr0_en = 0x10;
- drvr1_en = 0x10;
- } else {
- bias0_en = 0x3f;
- bias1_en = 0x3f;
- drvr0_en = 0x10;
- drvr1_en = 0x10;
- }
-
- writel(drvr0_en, qphy->tx + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias0_en, qphy->tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
- writel(drvr1_en, qphy->tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias1_en, qphy->tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
-
- writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
- udelay(2000);
- writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
-
- if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
- status,
- ((status & BIT(1)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- writel(0x0a, qphy->tx + QSERDES_V4_TX_TX_POL_INV);
- writel(0x0a, qphy->tx2 + QSERDES_V4_TX_TX_POL_INV);
-
- writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL);
- writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL);
-
- writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
-
- return 0;
-}
-
-/*
- * We need to calibrate the aux setting here as many times
- * as the caller tries
- */
-static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy)
-{
- static const u8 cfg1_settings[] = { 0x20, 0x13, 0x23, 0x1d };
- u8 val;
-
- qphy->dp_aux_cfg++;
- qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings);
- val = cfg1_settings[qphy->dp_aux_cfg];
-
- writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
-
- return 0;
-}
-
-static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
-{
- const struct phy_configure_opts_dp *dp_opts = &opts->dp;
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- const struct qmp_phy_cfg *cfg = qphy->cfg;
-
- memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts));
- if (qphy->dp_opts.set_voltages) {
- cfg->configure_dp_tx(qphy);
- qphy->dp_opts.set_voltages = 0;
- }
-
- return 0;
-}
-
-static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- const struct qmp_phy_cfg *cfg = qphy->cfg;
-
- if (cfg->calibrate_dp_phy)
- return cfg->calibrate_dp_phy(qphy);
-
- return 0;
-}
-
-static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
-{
- struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qphy->cfg;
- void __iomem *serdes = qphy->serdes;
- void __iomem *pcs = qphy->pcs;
- void __iomem *dp_com = qmp->dp_com;
- int ret, i;
-
- mutex_lock(&qmp->phy_mutex);
- if (qmp->init_count++) {
- mutex_unlock(&qmp->phy_mutex);
- return 0;
- }
-
- /* turn on regulator supplies */
- ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
- if (ret) {
- dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
- goto err_unlock;
- }
-
- for (i = 0; i < cfg->num_resets; i++) {
- ret = reset_control_assert(qmp->resets[i]);
- if (ret) {
- dev_err(qmp->dev, "%s reset assert failed\n",
- cfg->reset_list[i]);
- goto err_disable_regulators;
- }
- }
-
- for (i = cfg->num_resets - 1; i >= 0; i--) {
- ret = reset_control_deassert(qmp->resets[i]);
- if (ret) {
- dev_err(qmp->dev, "%s reset deassert failed\n",
- qphy->cfg->reset_list[i]);
- goto err_assert_reset;
- }
- }
-
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
- if (ret)
- goto err_assert_reset;
-
- if (cfg->has_phy_dp_com_ctrl) {
- qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
- SW_PWRDN);
- /* override hardware control for reset of qmp phy */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
-
- /* Default type-c orientation, i.e CC1 */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
-
- qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
- USB3_MODE | DP_MODE);
-
- /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
-
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
- }
-
- if (cfg->has_phy_com_ctrl) {
- qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
- SW_PWRDN);
- } else {
- if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
- qphy_setbits(pcs,
- cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
- cfg->pwrdn_ctrl);
- else
- qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL,
- cfg->pwrdn_ctrl);
- }
-
- mutex_unlock(&qmp->phy_mutex);
-
- return 0;
-
-err_assert_reset:
- while (++i < cfg->num_resets)
- reset_control_assert(qmp->resets[i]);
-err_disable_regulators:
- regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_unlock:
- mutex_unlock(&qmp->phy_mutex);
-
- return ret;
-}
-
-static int qcom_qmp_phy_com_exit(struct qmp_phy *qphy)
-{
- struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qphy->cfg;
- void __iomem *serdes = qphy->serdes;
- int i = cfg->num_resets;
-
- mutex_lock(&qmp->phy_mutex);
- if (--qmp->init_count) {
- mutex_unlock(&qmp->phy_mutex);
- return 0;
- }
-
- reset_control_assert(qmp->ufs_reset);
- if (cfg->has_phy_com_ctrl) {
- qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
- SERDES_START | PCS_START);
- qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
- SW_RESET);
- qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
- SW_PWRDN);
- }
-
- while (--i >= 0)
- reset_control_assert(qmp->resets[i]);
-
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
-
- regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-
- mutex_unlock(&qmp->phy_mutex);
-
- return 0;
-}
-
-static int qcom_qmp_phy_init(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qphy->cfg;
- int ret;
- dev_vdbg(qmp->dev, "Initializing QMP phy\n");
-
- if (cfg->no_pcs_sw_reset) {
- /*
- * Get UFS reset, which is delayed until now to avoid a
- * circular dependency where UFS needs its PHY, but the PHY
- * needs this UFS reset.
- */
- if (!qmp->ufs_reset) {
- qmp->ufs_reset =
- devm_reset_control_get_exclusive(qmp->dev,
- "ufsphy");
-
- if (IS_ERR(qmp->ufs_reset)) {
- ret = PTR_ERR(qmp->ufs_reset);
- dev_err(qmp->dev,
- "failed to get UFS reset: %d\n",
- ret);
-
- qmp->ufs_reset = NULL;
- return ret;
- }
- }
-
- ret = reset_control_assert(qmp->ufs_reset);
- if (ret)
- return ret;
- }
-
- ret = qcom_qmp_phy_com_init(qphy);
- if (ret)
- return ret;
-
- if (cfg->type == PHY_TYPE_DP)
- cfg->dp_aux_init(qphy);
-
- return 0;
-}
-
-static int qcom_qmp_phy_power_on(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qphy->cfg;
- void __iomem *tx = qphy->tx;
- void __iomem *rx = qphy->rx;
- void __iomem *pcs = qphy->pcs;
- void __iomem *pcs_misc = qphy->pcs_misc;
- void __iomem *status;
- unsigned int mask, val, ready;
- int ret;
-
- qcom_qmp_phy_serdes_init(qphy);
-
- if (cfg->has_lane_rst) {
- ret = reset_control_deassert(qphy->lane_rst);
- if (ret) {
- dev_err(qmp->dev, "lane%d reset deassert failed\n",
- qphy->index);
- return ret;
- }
- }
-
- ret = clk_prepare_enable(qphy->pipe_clk);
- if (ret) {
- dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
- goto err_reset_lane;
- }
-
- /* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
- if (cfg->tx_tbl_sec)
- qcom_qmp_phy_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec,
- cfg->tx_tbl_num_sec, 1);
-
- /* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 2);
- if (cfg->tx_tbl_sec)
- qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl_sec,
- cfg->tx_tbl_num_sec, 2);
- }
-
- /* Configure special DP tx tunings */
- if (cfg->type == PHY_TYPE_DP)
- cfg->configure_dp_tx(qphy);
-
- qcom_qmp_phy_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->rx_tbl_sec)
- qcom_qmp_phy_configure_lane(rx, cfg->regs,
- cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
-
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 2);
- if (cfg->rx_tbl_sec)
- qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl_sec,
- cfg->rx_tbl_num_sec, 2);
- }
-
- /* Configure link rate, swing, etc. */
- if (cfg->type == PHY_TYPE_DP) {
- cfg->configure_dp_phy(qphy);
- } else {
- qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
- if (cfg->pcs_tbl_sec)
- qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl_sec,
- cfg->pcs_tbl_num_sec);
- }
-
- ret = reset_control_deassert(qmp->ufs_reset);
- if (ret)
- goto err_disable_pipe_clk;
-
- qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
- cfg->pcs_misc_tbl_num);
- if (cfg->pcs_misc_tbl_sec)
- qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec,
- cfg->pcs_misc_tbl_num_sec);
-
- /*
- * Pull out PHY from POWER DOWN state.
- * This is active low enable signal to power-down PHY.
- */
- if(cfg->type == PHY_TYPE_PCIE)
- qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
-
- if (cfg->has_pwrdn_delay)
- usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
-
- if (cfg->type != PHY_TYPE_DP) {
- /* Pull PHY out of reset state */
- if (!cfg->no_pcs_sw_reset)
- qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
- /* start SerDes and Phy-Coding-Sublayer */
- qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
-
- if (cfg->type == PHY_TYPE_UFS) {
- status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
- mask = PCS_READY;
- ready = PCS_READY;
- } else {
- status = pcs + cfg->regs[QPHY_PCS_STATUS];
- mask = cfg->phy_status;
- ready = 0;
- }
-
- ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
- PHY_INIT_COMPLETE_TIMEOUT);
- if (ret) {
- dev_err(qmp->dev, "phy initialization timed-out\n");
- goto err_disable_pipe_clk;
- }
- }
- return 0;
-
-err_disable_pipe_clk:
- clk_disable_unprepare(qphy->pipe_clk);
-err_reset_lane:
- if (cfg->has_lane_rst)
- reset_control_assert(qphy->lane_rst);
-
- return ret;
-}
-
-static int qcom_qmp_phy_power_off(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- const struct qmp_phy_cfg *cfg = qphy->cfg;
-
- clk_disable_unprepare(qphy->pipe_clk);
-
- if (cfg->type == PHY_TYPE_DP) {
- /* Assert DP PHY power down */
- writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
- } else {
- /* PHY reset */
- if (!cfg->no_pcs_sw_reset)
- qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
-
- /* stop SerDes and Phy-Coding-Sublayer */
- qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
-
- /* Put PHY into POWER DOWN state: active low */
- if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
- qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
- cfg->pwrdn_ctrl);
- } else {
- qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL,
- cfg->pwrdn_ctrl);
- }
- }
-
- return 0;
-}
-
-static int qcom_qmp_phy_exit(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- const struct qmp_phy_cfg *cfg = qphy->cfg;
-
- if (cfg->has_lane_rst)
- reset_control_assert(qphy->lane_rst);
-
- qcom_qmp_phy_com_exit(qphy);
-
- return 0;
-}
-
-static int qcom_qmp_phy_enable(struct phy *phy)
-{
- int ret;
-
- ret = qcom_qmp_phy_init(phy);
- if (ret)
- return ret;
-
- ret = qcom_qmp_phy_power_on(phy);
- if (ret)
- qcom_qmp_phy_exit(phy);
-
- return ret;
-}
-
-static int qcom_qmp_phy_disable(struct phy *phy)
-{
- int ret;
-
- ret = qcom_qmp_phy_power_off(phy);
- if (ret)
- return ret;
- return qcom_qmp_phy_exit(phy);
-}
-
-static int qcom_qmp_phy_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
-
- qphy->mode = mode;
-
- return 0;
-}
-
-static void qcom_qmp_phy_enable_autonomous_mode(struct qmp_phy *qphy)
-{
- const struct qmp_phy_cfg *cfg = qphy->cfg;
- void __iomem *pcs = qphy->pcs;
- void __iomem *pcs_misc = qphy->pcs_misc;
- u32 intr_mask;
-
- if (qphy->mode == PHY_MODE_USB_HOST_SS ||
- qphy->mode == PHY_MODE_USB_DEVICE_SS)
- intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
- else
- intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
-
- /* Clear any pending interrupts status */
- qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
- /* Writing 1 followed by 0 clears the interrupt */
- qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
-
- qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
- ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
-
- /* Enable required PHY autonomous mode interrupts */
- qphy_setbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
-
- /* Enable i/o clamp_n for autonomous mode */
- if (pcs_misc)
- qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
-}
-
-static void qcom_qmp_phy_disable_autonomous_mode(struct qmp_phy *qphy)
-{
- const struct qmp_phy_cfg *cfg = qphy->cfg;
- void __iomem *pcs = qphy->pcs;
- void __iomem *pcs_misc = qphy->pcs_misc;
-
- /* Disable i/o clamp_n on resume for normal mode */
- if (pcs_misc)
- qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
-
- qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
- ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
-
- qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
- /* Writing 1 followed by 0 clears the interrupt */
- qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
-}
-
-static int __maybe_unused qcom_qmp_phy_runtime_suspend(struct device *dev)
-{
- struct qcom_qmp *qmp = dev_get_drvdata(dev);
- struct qmp_phy *qphy = qmp->phys[0];
- const struct qmp_phy_cfg *cfg = qphy->cfg;
-
- dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode);
-
- /* Supported only for USB3 PHY and luckily USB3 is the first phy */
- if (cfg->type != PHY_TYPE_USB3)
- return 0;
-
- if (!qmp->init_count) {
- dev_vdbg(dev, "PHY not initialized, bailing out\n");
- return 0;
- }
-
- qcom_qmp_phy_enable_autonomous_mode(qphy);
-
- clk_disable_unprepare(qphy->pipe_clk);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
-
- return 0;
-}
-
-static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev)
-{
- struct qcom_qmp *qmp = dev_get_drvdata(dev);
- struct qmp_phy *qphy = qmp->phys[0];
- const struct qmp_phy_cfg *cfg = qphy->cfg;
- int ret = 0;
-
- dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode);
-
- /* Supported only for USB3 PHY and luckily USB3 is the first phy */
- if (cfg->type != PHY_TYPE_USB3)
- return 0;
-
- if (!qmp->init_count) {
- dev_vdbg(dev, "PHY not initialized, bailing out\n");
- return 0;
- }
-
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(qphy->pipe_clk);
- if (ret) {
- dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
- return ret;
- }
-
- qcom_qmp_phy_disable_autonomous_mode(qphy);
-
- return 0;
-}
-
-static int qcom_qmp_phy_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
-{
- struct qcom_qmp *qmp = dev_get_drvdata(dev);
- int num = cfg->num_vregs;
- int i;
-
- qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
- if (!qmp->vregs)
- return -ENOMEM;
-
- for (i = 0; i < num; i++)
- qmp->vregs[i].supply = cfg->vreg_list[i];
-
- return devm_regulator_bulk_get(dev, num, qmp->vregs);
-}
-
-static int qcom_qmp_phy_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
-{
- struct qcom_qmp *qmp = dev_get_drvdata(dev);
- int i;
-
- qmp->resets = devm_kcalloc(dev, cfg->num_resets,
- sizeof(*qmp->resets), GFP_KERNEL);
- if (!qmp->resets)
- return -ENOMEM;
-
- for (i = 0; i < cfg->num_resets; i++) {
- struct reset_control *rst;
- const char *name = cfg->reset_list[i];
-
- rst = devm_reset_control_get_exclusive(dev, name);
- if (IS_ERR(rst)) {
- dev_err(dev, "failed to get %s reset\n", name);
- return PTR_ERR(rst);
- }
- qmp->resets[i] = rst;
- }
-
- return 0;
-}
-
-static int qcom_qmp_phy_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
-{
- struct qcom_qmp *qmp = dev_get_drvdata(dev);
- int num = cfg->num_clks;
- int i;
-
- qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
- if (!qmp->clks)
- return -ENOMEM;
-
- for (i = 0; i < num; i++)
- qmp->clks[i].id = cfg->clk_list[i];
-
- return devm_clk_bulk_get(dev, num, qmp->clks);
-}
-
-static void phy_clk_release_provider(void *res)
-{
- of_clk_del_provider(res);
-}
-
-/*
- * Register a fixed rate pipe clock.
- *
- * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
- * controls it. The <s>_pipe_clk coming out of the GCC is requested
- * by the PHY driver for its operations.
- * We register the <s>_pipe_clksrc here. The gcc driver takes care
- * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
- * Below picture shows this relationship.
- *
- * +---------------+
- * | PHY block |<<---------------------------------------+
- * | | |
- * | +-------+ | +-----+ |
- * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
- * clk | +-------+ | +-----+
- * +---------------+
- */
-static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
-{
- struct clk_fixed_rate *fixed;
- struct clk_init_data init = { };
- int ret;
-
- ret = of_property_read_string(np, "clock-output-names", &init.name);
- if (ret) {
- dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
- return ret;
- }
-
- fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
- if (!fixed)
- return -ENOMEM;
-
- init.ops = &clk_fixed_rate_ops;
-
- /* controllers using QMP phys use 125MHz pipe clock interface */
- fixed->fixed_rate = 125000000;
- fixed->hw.init = &init;
-
- ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
- if (ret)
- return ret;
-
- ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
- if (ret)
- return ret;
-
- /*
- * Roll a devm action because the clock provider is the child node, but
- * the child node is not actually a device.
- */
- return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
-}
-
-/*
- * Display Port PLL driver block diagram for branch clocks
- *
- * +------------------------------+
- * | DP_VCO_CLK |
- * | |
- * | +-------------------+ |
- * | | (DP PLL/VCO) | |
- * | +---------+---------+ |
- * | v |
- * | +----------+-----------+ |
- * | | hsclk_divsel_clk_src | |
- * | +----------+-----------+ |
- * +------------------------------+
- * |
- * +---------<---------v------------>----------+
- * | |
- * +--------v----------------+ |
- * | dp_phy_pll_link_clk | |
- * | link_clk | |
- * +--------+----------------+ |
- * | |
- * | |
- * v v
- * Input to DISPCC block |
- * for link clk, crypto clk |
- * and interface clock |
- * |
- * |
- * +--------<------------+-----------------+---<---+
- * | | |
- * +----v---------+ +--------v-----+ +--------v------+
- * | vco_divided | | vco_divided | | vco_divided |
- * | _clk_src | | _clk_src | | _clk_src |
- * | | | | | |
- * |divsel_six | | divsel_two | | divsel_four |
- * +-------+------+ +-----+--------+ +--------+------+
- * | | |
- * v---->----------v-------------<------v
- * |
- * +----------+-----------------+
- * | dp_phy_pll_vco_div_clk |
- * +---------+------------------+
- * |
- * v
- * Input to DISPCC block
- * for DP pixel clock
- *
- */
-static int qcom_qmp_dp_pixel_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- switch (req->rate) {
- case 1620000000UL / 2:
- case 2700000000UL / 2:
- /* 5.4 and 8.1 GHz are same link rate as 2.7GHz, i.e. div 4 and div 6 */
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static unsigned long
-qcom_qmp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
-{
- const struct qmp_phy_dp_clks *dp_clks;
- const struct qmp_phy *qphy;
- const struct phy_configure_opts_dp *dp_opts;
-
- dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_pixel_hw);
- qphy = dp_clks->qphy;
- dp_opts = &qphy->dp_opts;
-
- switch (dp_opts->link_rate) {
- case 1620:
- return 1620000000UL / 2;
- case 2700:
- return 2700000000UL / 2;
- case 5400:
- return 5400000000UL / 4;
- case 8100:
- return 8100000000UL / 6;
- default:
- return 0;
- }
-}
-
-static const struct clk_ops qcom_qmp_dp_pixel_clk_ops = {
- .determine_rate = qcom_qmp_dp_pixel_clk_determine_rate,
- .recalc_rate = qcom_qmp_dp_pixel_clk_recalc_rate,
-};
-
-static int qcom_qmp_dp_link_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- switch (req->rate) {
- case 162000000:
- case 270000000:
- case 540000000:
- case 810000000:
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static unsigned long
-qcom_qmp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
-{
- const struct qmp_phy_dp_clks *dp_clks;
- const struct qmp_phy *qphy;
- const struct phy_configure_opts_dp *dp_opts;
-
- dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_link_hw);
- qphy = dp_clks->qphy;
- dp_opts = &qphy->dp_opts;
-
- switch (dp_opts->link_rate) {
- case 1620:
- case 2700:
- case 5400:
- case 8100:
- return dp_opts->link_rate * 100000;
- default:
- return 0;
- }
-}
-
-static const struct clk_ops qcom_qmp_dp_link_clk_ops = {
- .determine_rate = qcom_qmp_dp_link_clk_determine_rate,
- .recalc_rate = qcom_qmp_dp_link_clk_recalc_rate,
-};
-
-static struct clk_hw *
-qcom_qmp_dp_clks_hw_get(struct of_phandle_args *clkspec, void *data)
-{
- struct qmp_phy_dp_clks *dp_clks = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx >= 2) {
- pr_err("%s: invalid index %u\n", __func__, idx);
- return ERR_PTR(-EINVAL);
- }
-
- if (idx == 0)
- return &dp_clks->dp_link_hw;
-
- return &dp_clks->dp_pixel_hw;
-}
-
-static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy,
- struct device_node *np)
-{
- struct clk_init_data init = { };
- struct qmp_phy_dp_clks *dp_clks;
- char name[64];
- int ret;
-
- dp_clks = devm_kzalloc(qmp->dev, sizeof(*dp_clks), GFP_KERNEL);
- if (!dp_clks)
- return -ENOMEM;
-
- dp_clks->qphy = qphy;
- qphy->dp_clks = dp_clks;
-
- snprintf(name, sizeof(name), "%s::link_clk", dev_name(qmp->dev));
- init.ops = &qcom_qmp_dp_link_clk_ops;
- init.name = name;
- dp_clks->dp_link_hw.init = &init;
- ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_link_hw);
- if (ret)
- return ret;
-
- snprintf(name, sizeof(name), "%s::vco_div_clk", dev_name(qmp->dev));
- init.ops = &qcom_qmp_dp_pixel_clk_ops;
- init.name = name;
- dp_clks->dp_pixel_hw.init = &init;
- ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_pixel_hw);
- if (ret)
- return ret;
-
- ret = of_clk_add_hw_provider(np, qcom_qmp_dp_clks_hw_get, dp_clks);
- if (ret)
- return ret;
-
- /*
- * Roll a devm action because the clock provider is the child node, but
- * the child node is not actually a device.
- */
- return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
-}
-
-static const struct phy_ops qcom_qmp_phy_gen_ops = {
- .init = qcom_qmp_phy_enable,
- .exit = qcom_qmp_phy_disable,
- .set_mode = qcom_qmp_phy_set_mode,
- .owner = THIS_MODULE,
-};
-
-static const struct phy_ops qcom_qmp_phy_dp_ops = {
- .init = qcom_qmp_phy_init,
- .configure = qcom_qmp_dp_phy_configure,
- .power_on = qcom_qmp_phy_power_on,
- .calibrate = qcom_qmp_dp_phy_calibrate,
- .power_off = qcom_qmp_phy_power_off,
- .exit = qcom_qmp_phy_exit,
- .set_mode = qcom_qmp_phy_set_mode,
- .owner = THIS_MODULE,
-};
-
-static const struct phy_ops qcom_qmp_pcie_ufs_ops = {
- .power_on = qcom_qmp_phy_enable,
- .power_off = qcom_qmp_phy_disable,
- .set_mode = qcom_qmp_phy_set_mode,
- .owner = THIS_MODULE,
-};
-
-static void qcom_qmp_reset_control_put(void *data)
-{
- reset_control_put(data);
-}
-
-static
-int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
- void __iomem *serdes, const struct qmp_phy_cfg *cfg)
-{
- struct qcom_qmp *qmp = dev_get_drvdata(dev);
- struct phy *generic_phy;
- struct qmp_phy *qphy;
- const struct phy_ops *ops;
- char prop_name[MAX_PROP_NAME];
- int ret;
-
- qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
- if (!qphy)
- return -ENOMEM;
-
- qphy->cfg = cfg;
- qphy->serdes = serdes;
- /*
- * Get memory resources for each phy lane:
- * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
- * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
- * For single lane PHYs: pcs_misc (optional) -> 3.
- */
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
-
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
-
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
-
- /*
- * If this is a dual-lane PHY, then there should be registers for the
- * second lane. Some old device trees did not specify this, so fall
- * back to old legacy behavior of assuming they can be reached at an
- * offset from the first lane.
- */
- if (cfg->is_dual_lane_phy) {
- qphy->tx2 = of_iomap(np, 3);
- qphy->rx2 = of_iomap(np, 4);
- if (!qphy->tx2 || !qphy->rx2) {
- dev_warn(dev,
- "Underspecified device tree, falling back to legacy register regions\n");
-
- /* In the old version, pcs_misc is at index 3. */
- qphy->pcs_misc = qphy->tx2;
- qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
- qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
-
- } else {
- qphy->pcs_misc = of_iomap(np, 5);
- }
-
- } else {
- qphy->pcs_misc = of_iomap(np, 3);
- }
-
- if (!qphy->pcs_misc)
- dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
-
- /*
- * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
- * based phys, so they essentially have pipe clock. So,
- * we return error in case phy is USB3 or PIPE type.
- * Otherwise, we initialize pipe clock to NULL for
- * all phys that don't need this.
- */
- snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
- if (IS_ERR(qphy->pipe_clk)) {
- if (cfg->type == PHY_TYPE_PCIE ||
- cfg->type == PHY_TYPE_USB3) {
- ret = PTR_ERR(qphy->pipe_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev,
- "failed to get lane%d pipe_clk, %d\n",
- id, ret);
- return ret;
- }
- qphy->pipe_clk = NULL;
- }
-
- /* Get lane reset, if any */
- if (cfg->has_lane_rst) {
- snprintf(prop_name, sizeof(prop_name), "lane%d", id);
- qphy->lane_rst = of_reset_control_get_exclusive(np, prop_name);
- if (IS_ERR(qphy->lane_rst)) {
- dev_err(dev, "failed to get lane%d reset\n", id);
- return PTR_ERR(qphy->lane_rst);
- }
- ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
- qphy->lane_rst);
- if (ret)
- return ret;
- }
-
- if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE)
- ops = &qcom_qmp_pcie_ufs_ops;
- else if (cfg->type == PHY_TYPE_DP)
- ops = &qcom_qmp_phy_dp_ops;
- else
- ops = &qcom_qmp_phy_gen_ops;
-
- generic_phy = devm_phy_create(dev, np, ops);
- if (IS_ERR(generic_phy)) {
- ret = PTR_ERR(generic_phy);
- dev_err(dev, "failed to create qphy %d\n", ret);
- return ret;
- }
-
- qphy->phy = generic_phy;
- qphy->index = id;
- qphy->qmp = qmp;
- qmp->phys[id] = qphy;
- phy_set_drvdata(generic_phy, qphy);
-
- return 0;
-}
-
-static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
- {
- .compatible = "qcom,ipq8074-qmp-usb3-phy",
- .data = &ipq8074_usb3phy_cfg,
- }, {
- .compatible = "qcom,msm8996-qmp-pcie-phy",
- .data = &msm8996_pciephy_cfg,
- }, {
- .compatible = "qcom,msm8996-qmp-ufs-phy",
- .data = &msm8996_ufs_cfg,
- }, {
- .compatible = "qcom,msm8996-qmp-usb3-phy",
- .data = &msm8996_usb3phy_cfg,
- }, {
- .compatible = "qcom,msm8998-qmp-pcie-phy",
- .data = &msm8998_pciephy_cfg,
- }, {
- .compatible = "qcom,msm8998-qmp-ufs-phy",
- .data = &sdm845_ufsphy_cfg,
- }, {
- .compatible = "qcom,ipq8074-qmp-pcie-phy",
- .data = &ipq8074_pciephy_cfg,
- }, {
- .compatible = "qcom,ipq6018-qmp-pcie-phy",
- .data = &ipq6018_pciephy_cfg,
- }, {
- .compatible = "qcom,ipq6018-qmp-usb3-phy",
- .data = &ipq8074_usb3phy_cfg,
- }, {
- .compatible = "qcom,sc7180-qmp-usb3-phy",
- .data = &sc7180_usb3phy_cfg,
- }, {
- .compatible = "qcom,sc7180-qmp-usb3-dp-phy",
- /* It's a combo phy */
- }, {
- .compatible = "qcom,sc8180x-qmp-pcie-phy",
- .data = &sc8180x_pciephy_cfg,
- }, {
- .compatible = "qcom,sc8180x-qmp-ufs-phy",
- .data = &sm8150_ufsphy_cfg,
- }, {
- .compatible = "qcom,sc8280xp-qmp-ufs-phy",
- .data = &sm8350_ufsphy_cfg,
- }, {
- .compatible = "qcom,sc8180x-qmp-usb3-phy",
- .data = &sm8150_usb3phy_cfg,
- }, {
- .compatible = "qcom,sc8180x-qmp-usb3-dp-phy",
- /* It's a combo phy */
- }, {
- .compatible = "qcom,sdm845-qhp-pcie-phy",
- .data = &sdm845_qhp_pciephy_cfg,
- }, {
- .compatible = "qcom,sdm845-qmp-pcie-phy",
- .data = &sdm845_qmp_pciephy_cfg,
- }, {
- .compatible = "qcom,sdm845-qmp-usb3-phy",
- .data = &qmp_v3_usb3phy_cfg,
- }, {
- .compatible = "qcom,sdm845-qmp-usb3-uni-phy",
- .data = &qmp_v3_usb3_uniphy_cfg,
- }, {
- .compatible = "qcom,sdm845-qmp-ufs-phy",
- .data = &sdm845_ufsphy_cfg,
- }, {
- .compatible = "qcom,msm8998-qmp-usb3-phy",
- .data = &msm8998_usb3phy_cfg,
- }, {
- .compatible = "qcom,sm6115-qmp-ufs-phy",
- .data = &sm6115_ufsphy_cfg,
- }, {
- .compatible = "qcom,sm6350-qmp-ufs-phy",
- .data = &sdm845_ufsphy_cfg,
- }, {
- .compatible = "qcom,sm8150-qmp-ufs-phy",
- .data = &sm8150_ufsphy_cfg,
- }, {
- .compatible = "qcom,sm8250-qmp-ufs-phy",
- .data = &sm8150_ufsphy_cfg,
- }, {
- .compatible = "qcom,sm8150-qmp-usb3-phy",
- .data = &sm8150_usb3phy_cfg,
- }, {
- .compatible = "qcom,sm8150-qmp-usb3-uni-phy",
- .data = &sm8150_usb3_uniphy_cfg,
- }, {
- .compatible = "qcom,sm8250-qmp-usb3-phy",
- .data = &sm8250_usb3phy_cfg,
- }, {
- .compatible = "qcom,sm8250-qmp-usb3-dp-phy",
- /* It's a combo phy */
- }, {
- .compatible = "qcom,sm8250-qmp-usb3-uni-phy",
- .data = &sm8250_usb3_uniphy_cfg,
- }, {
- .compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy",
- .data = &sm8250_qmp_gen3x1_pciephy_cfg,
- }, {
- .compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy",
- .data = &sm8250_qmp_gen3x2_pciephy_cfg,
- }, {
- .compatible = "qcom,sm8350-qmp-ufs-phy",
- .data = &sm8350_ufsphy_cfg,
- }, {
- .compatible = "qcom,sm8250-qmp-modem-pcie-phy",
- .data = &sm8250_qmp_gen3x2_pciephy_cfg,
- }, {
- .compatible = "qcom,sdx55-qmp-pcie-phy",
- .data = &sdx55_qmp_pciephy_cfg,
- }, {
- .compatible = "qcom,sdx55-qmp-usb3-uni-phy",
- .data = &sdx55_usb3_uniphy_cfg,
- }, {
- .compatible = "qcom,sdx65-qmp-usb3-uni-phy",
- .data = &sdx65_usb3_uniphy_cfg,
- }, {
- .compatible = "qcom,sm8350-qmp-usb3-phy",
- .data = &sm8350_usb3phy_cfg,
- }, {
- .compatible = "qcom,sm8350-qmp-usb3-uni-phy",
- .data = &sm8350_usb3_uniphy_cfg,
- }, {
- .compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy",
- .data = &sm8450_qmp_gen3x1_pciephy_cfg,
- }, {
- .compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy",
- .data = &sm8450_qmp_gen4x2_pciephy_cfg,
- }, {
- .compatible = "qcom,sm8450-qmp-ufs-phy",
- .data = &sm8450_ufsphy_cfg,
- }, {
- .compatible = "qcom,sm8450-qmp-usb3-phy",
- .data = &sm8350_usb3phy_cfg,
- }, {
- .compatible = "qcom,qcm2290-qmp-usb3-phy",
- .data = &qcm2290_usb3phy_cfg,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, qcom_qmp_phy_of_match_table);
-
-static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = {
- {
- .compatible = "qcom,sc7180-qmp-usb3-dp-phy",
- .data = &sc7180_usb3dpphy_cfg,
- },
- {
- .compatible = "qcom,sm8250-qmp-usb3-dp-phy",
- .data = &sm8250_usb3dpphy_cfg,
- },
- {
- .compatible = "qcom,sc8180x-qmp-usb3-dp-phy",
- .data = &sc8180x_usb3dpphy_cfg,
- },
- { }
-};
-
-static const struct dev_pm_ops qcom_qmp_phy_pm_ops = {
- SET_RUNTIME_PM_OPS(qcom_qmp_phy_runtime_suspend,
- qcom_qmp_phy_runtime_resume, NULL)
-};
-
-static int qcom_qmp_phy_probe(struct platform_device *pdev)
-{
- struct qcom_qmp *qmp;
- struct device *dev = &pdev->dev;
- struct device_node *child;
- struct phy_provider *phy_provider;
- void __iomem *serdes;
- void __iomem *usb_serdes;
- void __iomem *dp_serdes = NULL;
- const struct qmp_phy_combo_cfg *combo_cfg = NULL;
- const struct qmp_phy_cfg *cfg = NULL;
- const struct qmp_phy_cfg *usb_cfg = NULL;
- const struct qmp_phy_cfg *dp_cfg = NULL;
- int num, id, expected_phys;
- int ret;
-
- qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
- if (!qmp)
- return -ENOMEM;
-
- qmp->dev = dev;
- dev_set_drvdata(dev, qmp);
-
- /* Get the specific init parameters of QMP phy */
- cfg = of_device_get_match_data(dev);
- if (!cfg) {
- const struct of_device_id *match;
-
- match = of_match_device(qcom_qmp_combo_phy_of_match_table, dev);
- if (!match)
- return -EINVAL;
-
- combo_cfg = match->data;
- if (!combo_cfg)
- return -EINVAL;
-
- usb_cfg = combo_cfg->usb_cfg;
- cfg = usb_cfg; /* Setup clks and regulators */
- }
-
- /* per PHY serdes; usually located at base address */
- usb_serdes = serdes = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- /* per PHY dp_com; if PHY has dp_com control block */
- if (combo_cfg || cfg->has_phy_dp_com_ctrl) {
- qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(qmp->dp_com))
- return PTR_ERR(qmp->dp_com);
- }
-
- if (combo_cfg) {
- /* Only two serdes for combo PHY */
- dp_serdes = devm_platform_ioremap_resource(pdev, 2);
- if (IS_ERR(dp_serdes))
- return PTR_ERR(dp_serdes);
-
- dp_cfg = combo_cfg->dp_cfg;
- expected_phys = 2;
- } else {
- expected_phys = cfg->nlanes;
- }
-
- mutex_init(&qmp->phy_mutex);
-
- ret = qcom_qmp_phy_clk_init(dev, cfg);
- if (ret)
- return ret;
-
- ret = qcom_qmp_phy_reset_init(dev, cfg);
- if (ret)
- return ret;
-
- ret = qcom_qmp_phy_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
-
- num = of_get_available_child_count(dev->of_node);
- /* do we have a rogue child node ? */
- if (num > expected_phys)
- return -EINVAL;
-
- qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
- if (!qmp->phys)
- return -ENOMEM;
-
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- /*
- * Prevent runtime pm from being ON by default. Users can enable
- * it using power/control in sysfs.
- */
- pm_runtime_forbid(dev);
-
- id = 0;
- for_each_available_child_of_node(dev->of_node, child) {
- if (of_node_name_eq(child, "dp-phy")) {
- cfg = dp_cfg;
- serdes = dp_serdes;
- } else if (of_node_name_eq(child, "usb3-phy")) {
- cfg = usb_cfg;
- serdes = usb_serdes;
- }
-
- /* Create per-lane phy */
- ret = qcom_qmp_phy_create(dev, child, id, serdes, cfg);
- if (ret) {
- dev_err(dev, "failed to create lane%d phy, %d\n",
- id, ret);
- goto err_node_put;
- }
-
- /*
- * Register the pipe clock provided by phy.
- * See function description to see details of this pipe clock.
- */
- if (cfg->type == PHY_TYPE_USB3 || cfg->type == PHY_TYPE_PCIE) {
- ret = phy_pipe_clk_register(qmp, child);
- if (ret) {
- dev_err(qmp->dev,
- "failed to register pipe clock source\n");
- goto err_node_put;
- }
- } else if (cfg->type == PHY_TYPE_DP) {
- ret = phy_dp_clks_register(qmp, qmp->phys[id], child);
- if (ret) {
- dev_err(qmp->dev,
- "failed to register DP clock source\n");
- goto err_node_put;
- }
- }
- id++;
- }
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
-
- return PTR_ERR_OR_ZERO(phy_provider);
-
-err_node_put:
- pm_runtime_disable(dev);
- of_node_put(child);
- return ret;
-}
-
-static struct platform_driver qcom_qmp_phy_driver = {
- .probe = qcom_qmp_phy_probe,
- .driver = {
- .name = "qcom-qmp-phy",
- .pm = &qcom_qmp_phy_pm_ops,
- .of_match_table = qcom_qmp_phy_of_match_table,
- },
-};
-
-module_platform_driver(qcom_qmp_phy_driver);
-
-MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
-MODULE_DESCRIPTION("Qualcomm QMP PHY driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 06b2556ed93a..b139c8af5e8b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -6,275 +6,44 @@
#ifndef QCOM_PHY_QMP_H_
#define QCOM_PHY_QMP_H_
-/* QMP V2 PHY for PCIE gen3 ports - QSERDES PLL registers */
+#include "phy-qcom-qmp-qserdes-com.h"
+#include "phy-qcom-qmp-qserdes-txrx.h"
-#define QSERDES_PLL_BG_TIMER 0x00c
-#define QSERDES_PLL_SSC_PER1 0x01c
-#define QSERDES_PLL_SSC_PER2 0x020
-#define QSERDES_PLL_SSC_STEP_SIZE1_MODE0 0x024
-#define QSERDES_PLL_SSC_STEP_SIZE2_MODE0 0x028
-#define QSERDES_PLL_SSC_STEP_SIZE1_MODE1 0x02c
-#define QSERDES_PLL_SSC_STEP_SIZE2_MODE1 0x030
-#define QSERDES_PLL_BIAS_EN_CLKBUFLR_EN 0x03c
-#define QSERDES_PLL_CLK_ENABLE1 0x040
-#define QSERDES_PLL_SYS_CLK_CTRL 0x044
-#define QSERDES_PLL_SYSCLK_BUF_ENABLE 0x048
-#define QSERDES_PLL_PLL_IVCO 0x050
-#define QSERDES_PLL_LOCK_CMP1_MODE0 0x054
-#define QSERDES_PLL_LOCK_CMP2_MODE0 0x058
-#define QSERDES_PLL_LOCK_CMP1_MODE1 0x060
-#define QSERDES_PLL_LOCK_CMP2_MODE1 0x064
-#define QSERDES_PLL_BG_TRIM 0x074
-#define QSERDES_PLL_CLK_EP_DIV_MODE0 0x078
-#define QSERDES_PLL_CLK_EP_DIV_MODE1 0x07c
-#define QSERDES_PLL_CP_CTRL_MODE0 0x080
-#define QSERDES_PLL_CP_CTRL_MODE1 0x084
-#define QSERDES_PLL_PLL_RCTRL_MODE0 0x088
-#define QSERDES_PLL_PLL_RCTRL_MODE1 0x08C
-#define QSERDES_PLL_PLL_CCTRL_MODE0 0x090
-#define QSERDES_PLL_PLL_CCTRL_MODE1 0x094
-#define QSERDES_PLL_BIAS_EN_CTRL_BY_PSM 0x0a4
-#define QSERDES_PLL_SYSCLK_EN_SEL 0x0a8
-#define QSERDES_PLL_RESETSM_CNTRL 0x0b0
-#define QSERDES_PLL_LOCK_CMP_EN 0x0c4
-#define QSERDES_PLL_DEC_START_MODE0 0x0cc
-#define QSERDES_PLL_DEC_START_MODE1 0x0d0
-#define QSERDES_PLL_DIV_FRAC_START1_MODE0 0x0d8
-#define QSERDES_PLL_DIV_FRAC_START2_MODE0 0x0dc
-#define QSERDES_PLL_DIV_FRAC_START3_MODE0 0x0e0
-#define QSERDES_PLL_DIV_FRAC_START1_MODE1 0x0e4
-#define QSERDES_PLL_DIV_FRAC_START2_MODE1 0x0e8
-#define QSERDES_PLL_DIV_FRAC_START3_MODE1 0x0eC
-#define QSERDES_PLL_INTEGLOOP_GAIN0_MODE0 0x100
-#define QSERDES_PLL_INTEGLOOP_GAIN1_MODE0 0x104
-#define QSERDES_PLL_INTEGLOOP_GAIN0_MODE1 0x108
-#define QSERDES_PLL_INTEGLOOP_GAIN1_MODE1 0x10c
-#define QSERDES_PLL_VCO_TUNE_MAP 0x120
-#define QSERDES_PLL_VCO_TUNE1_MODE0 0x124
-#define QSERDES_PLL_VCO_TUNE2_MODE0 0x128
-#define QSERDES_PLL_VCO_TUNE1_MODE1 0x12c
-#define QSERDES_PLL_VCO_TUNE2_MODE1 0x130
-#define QSERDES_PLL_VCO_TUNE_TIMER1 0x13c
-#define QSERDES_PLL_VCO_TUNE_TIMER2 0x140
-#define QSERDES_PLL_CLK_SELECT 0x16c
-#define QSERDES_PLL_HSCLK_SEL 0x170
-#define QSERDES_PLL_CORECLK_DIV 0x17c
-#define QSERDES_PLL_CORE_CLK_EN 0x184
-#define QSERDES_PLL_CMN_CONFIG 0x18c
-#define QSERDES_PLL_SVS_MODE_CLK_SEL 0x194
-#define QSERDES_PLL_CORECLK_DIV_MODE1 0x1b4
+#include "phy-qcom-qmp-qserdes-com-v3.h"
+#include "phy-qcom-qmp-qserdes-txrx-v3.h"
-/* QMP V2 PHY for PCIE gen3 ports - QSERDES TX registers */
+#include "phy-qcom-qmp-qserdes-com-v4.h"
+#include "phy-qcom-qmp-qserdes-txrx-v4.h"
+#include "phy-qcom-qmp-qserdes-txrx-v4_20.h"
-#define QSERDES_TX0_RES_CODE_LANE_OFFSET_TX 0x03c
-#define QSERDES_TX0_HIGHZ_DRVR_EN 0x058
-#define QSERDES_TX0_LANE_MODE_1 0x084
-#define QSERDES_TX0_RCV_DETECT_LVL_2 0x09c
+#include "phy-qcom-qmp-qserdes-com-v5.h"
+#include "phy-qcom-qmp-qserdes-txrx-v5.h"
+#include "phy-qcom-qmp-qserdes-txrx-v5_20.h"
-/* QMP V2 PHY for PCIE gen3 ports - QSERDES RX registers */
+#include "phy-qcom-qmp-qserdes-pll.h"
-#define QSERDES_RX0_UCDR_FO_GAIN 0x008
-#define QSERDES_RX0_UCDR_SO_GAIN 0x014
-#define QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE 0x034
-#define QSERDES_RX0_UCDR_PI_CONTROLS 0x044
-#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2 0x0ec
-#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3 0x0f0
-#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4 0x0f4
-#define QSERDES_RX0_RX_IDAC_TSETTLE_LOW 0x0f8
-#define QSERDES_RX0_RX_IDAC_TSETTLE_HIGH 0x0fc
-#define QSERDES_RX0_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
-#define QSERDES_RX0_RX_OFFSET_ADAPTOR_CNTRL2 0x114
-#define QSERDES_RX0_SIGDET_ENABLES 0x118
-#define QSERDES_RX0_SIGDET_CNTRL 0x11c
-#define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL 0x124
-#define QSERDES_RX0_RX_MODE_00_LOW 0x170
-#define QSERDES_RX0_RX_MODE_00_HIGH 0x174
-#define QSERDES_RX0_RX_MODE_00_HIGH2 0x178
-#define QSERDES_RX0_RX_MODE_00_HIGH3 0x17c
-#define QSERDES_RX0_RX_MODE_00_HIGH4 0x180
-#define QSERDES_RX0_RX_MODE_01_LOW 0x184
-#define QSERDES_RX0_RX_MODE_01_HIGH 0x188
-#define QSERDES_RX0_RX_MODE_01_HIGH2 0x18c
-#define QSERDES_RX0_RX_MODE_01_HIGH3 0x190
-#define QSERDES_RX0_RX_MODE_01_HIGH4 0x194
-#define QSERDES_RX0_RX_MODE_10_LOW 0x198
-#define QSERDES_RX0_RX_MODE_10_HIGH 0x19c
-#define QSERDES_RX0_RX_MODE_10_HIGH2 0x1a0
-#define QSERDES_RX0_RX_MODE_10_HIGH3 0x1a4
-#define QSERDES_RX0_RX_MODE_10_HIGH4 0x1a8
-#define QSERDES_RX0_DFE_EN_TIMER 0x1b4
+#include "phy-qcom-qmp-pcs-v2.h"
-/* QMP V2 PHY for PCIE gen3 ports - PCS registers */
+#include "phy-qcom-qmp-pcs-v3.h"
+#include "phy-qcom-qmp-pcs-misc-v3.h"
+#include "phy-qcom-qmp-pcs-ufs-v3.h"
-#define PCS_COM_FLL_CNTRL1 0x098
-#define PCS_COM_FLL_CNTRL2 0x09c
-#define PCS_COM_FLL_CNT_VAL_L 0x0a0
-#define PCS_COM_FLL_CNT_VAL_H_TOL 0x0a4
-#define PCS_COM_FLL_MAN_CODE 0x0a8
-#define PCS_COM_REFGEN_REQ_CONFIG1 0x0dc
-#define PCS_COM_G12S1_TXDEEMPH_M3P5DB 0x16c
-#define PCS_COM_RX_SIGDET_LVL 0x188
-#define PCS_COM_P2U3_WAKEUP_DLY_TIME_AUXCLK_L 0x1a4
-#define PCS_COM_P2U3_WAKEUP_DLY_TIME_AUXCLK_H 0x1a8
-#define PCS_COM_RX_DCC_CAL_CONFIG 0x1d8
-#define PCS_COM_EQ_CONFIG5 0x1ec
+#include "phy-qcom-qmp-pcs-v4.h"
+#include "phy-qcom-qmp-pcs-pcie-v4.h"
+#include "phy-qcom-qmp-pcs-usb-v4.h"
+#include "phy-qcom-qmp-pcs-ufs-v4.h"
-/* QMP V2 PHY for PCIE gen3 ports - PCS Misc registers */
+#include "phy-qcom-qmp-pcs-v4_20.h"
+#include "phy-qcom-qmp-pcs-pcie-v4_20.h"
-#define PCS_PCIE_POWER_STATE_CONFIG2 0x40c
-#define PCS_PCIE_POWER_STATE_CONFIG4 0x414
-#define PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x41c
-#define PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x440
-#define PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H 0x444
-#define PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x448
-#define PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H 0x44c
-#define PCS_PCIE_OSC_DTCT_CONFIG2 0x45c
-#define PCS_PCIE_OSC_DTCT_MODE2_CONFIG2 0x478
-#define PCS_PCIE_OSC_DTCT_MODE2_CONFIG4 0x480
-#define PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x484
-#define PCS_PCIE_OSC_DTCT_ACTIONS 0x490
-#define PCS_PCIE_EQ_CONFIG1 0x4a0
-#define PCS_PCIE_EQ_CONFIG2 0x4a4
-#define PCS_PCIE_PRESET_P10_PRE 0x4bc
-#define PCS_PCIE_PRESET_P10_POST 0x4e0
+#include "phy-qcom-qmp-pcs-v5.h"
+#include "phy-qcom-qmp-pcs-pcie-v5.h"
+#include "phy-qcom-qmp-pcs-usb-v5.h"
+#include "phy-qcom-qmp-pcs-ufs-v5.h"
-/* Only for QMP V2 PHY - QSERDES COM registers */
-#define QSERDES_COM_BG_TIMER 0x00c
-#define QSERDES_COM_SSC_EN_CENTER 0x010
-#define QSERDES_COM_SSC_ADJ_PER1 0x014
-#define QSERDES_COM_SSC_ADJ_PER2 0x018
-#define QSERDES_COM_SSC_PER1 0x01c
-#define QSERDES_COM_SSC_PER2 0x020
-#define QSERDES_COM_SSC_STEP_SIZE1 0x024
-#define QSERDES_COM_SSC_STEP_SIZE2 0x028
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
-#define QSERDES_COM_CLK_ENABLE1 0x038
-#define QSERDES_COM_SYS_CLK_CTRL 0x03c
-#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
-#define QSERDES_COM_PLL_IVCO 0x048
-#define QSERDES_COM_LOCK_CMP1_MODE0 0x04c
-#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
-#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
-#define QSERDES_COM_LOCK_CMP1_MODE1 0x058
-#define QSERDES_COM_LOCK_CMP2_MODE1 0x05c
-#define QSERDES_COM_LOCK_CMP3_MODE1 0x060
-#define QSERDES_COM_BG_TRIM 0x070
-#define QSERDES_COM_CLK_EP_DIV 0x074
-#define QSERDES_COM_CP_CTRL_MODE0 0x078
-#define QSERDES_COM_CP_CTRL_MODE1 0x07c
-#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
-#define QSERDES_COM_PLL_RCTRL_MODE1 0x088
-#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
-#define QSERDES_COM_PLL_CCTRL_MODE1 0x094
-#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x0a8
-#define QSERDES_COM_SYSCLK_EN_SEL 0x0ac
-#define QSERDES_COM_RESETSM_CNTRL 0x0b4
-#define QSERDES_COM_RESETSM_CNTRL2 0x0b8
-#define QSERDES_COM_RESTRIM_CTRL 0x0bc
-#define QSERDES_COM_RESCODE_DIV_NUM 0x0c4
-#define QSERDES_COM_LOCK_CMP_EN 0x0c8
-#define QSERDES_COM_LOCK_CMP_CFG 0x0cc
-#define QSERDES_COM_DEC_START_MODE0 0x0d0
-#define QSERDES_COM_DEC_START_MODE1 0x0d4
-#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0dc
-#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0e0
-#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0e4
-#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x0e8
-#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x0ec
-#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x0f0
-#define QSERDES_COM_INTEGLOOP_INITVAL 0x100
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10c
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x110
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x114
-#define QSERDES_COM_VCO_TUNE_CTRL 0x124
-#define QSERDES_COM_VCO_TUNE_MAP 0x128
-#define QSERDES_COM_VCO_TUNE1_MODE0 0x12c
-#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
-#define QSERDES_COM_VCO_TUNE1_MODE1 0x134
-#define QSERDES_COM_VCO_TUNE2_MODE1 0x138
-#define QSERDES_COM_VCO_TUNE_INITVAL1 0x13c
-#define QSERDES_COM_VCO_TUNE_INITVAL2 0x140
-#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
-#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
-#define QSERDES_COM_BG_CTRL 0x170
-#define QSERDES_COM_CLK_SELECT 0x174
-#define QSERDES_COM_HSCLK_SEL 0x178
-#define QSERDES_COM_CORECLK_DIV 0x184
-#define QSERDES_COM_CORE_CLK_EN 0x18c
-#define QSERDES_COM_C_READY_STATUS 0x190
-#define QSERDES_COM_CMN_CONFIG 0x194
-#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19c
-#define QSERDES_COM_DEBUG_BUS0 0x1a0
-#define QSERDES_COM_DEBUG_BUS1 0x1a4
-#define QSERDES_COM_DEBUG_BUS2 0x1a8
-#define QSERDES_COM_DEBUG_BUS3 0x1ac
-#define QSERDES_COM_DEBUG_BUS_SEL 0x1b0
-#define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
+#include "phy-qcom-qmp-pcs-pcie-v5_20.h"
-/* Only for QMP V2 PHY - TX registers */
-#define QSERDES_TX_EMP_POST1_LVL 0x018
-#define QSERDES_TX_SLEW_CNTL 0x040
-#define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054
-#define QSERDES_TX_DEBUG_BUS_SEL 0x064
-#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068
-#define QSERDES_TX_LANE_MODE 0x094
-#define QSERDES_TX_RCV_DETECT_LVL_2 0x0ac
-
-/* Only for QMP V2 PHY - RX registers */
-#define QSERDES_RX_UCDR_SO_GAIN_HALF 0x010
-#define QSERDES_RX_UCDR_SO_GAIN 0x01c
-#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF 0x030
-#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER 0x034
-#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH 0x038
-#define QSERDES_RX_UCDR_SVS_SO_GAIN 0x03c
-#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x040
-#define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x048
-#define QSERDES_RX_RX_TERM_BW 0x090
-#define QSERDES_RX_RX_EQ_GAIN1_LSB 0x0c4
-#define QSERDES_RX_RX_EQ_GAIN1_MSB 0x0c8
-#define QSERDES_RX_RX_EQ_GAIN2_LSB 0x0cc
-#define QSERDES_RX_RX_EQ_GAIN2_MSB 0x0d0
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x0d8
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 0x0dc
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 0x0e0
-#define QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x108
-#define QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x10c
-#define QSERDES_RX_SIGDET_ENABLES 0x110
-#define QSERDES_RX_SIGDET_CNTRL 0x114
-#define QSERDES_RX_SIGDET_LVL 0x118
-#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL 0x11c
-#define QSERDES_RX_RX_BAND 0x120
-#define QSERDES_RX_RX_INTERFACE_MODE 0x12c
-
-/* Only for QMP V2 PHY - PCS registers */
-#define QPHY_POWER_DOWN_CONTROL 0x04
-#define QPHY_TXDEEMPH_M6DB_V0 0x24
-#define QPHY_TXDEEMPH_M3P5DB_V0 0x28
-#define QPHY_TX_LARGE_AMP_DRV_LVL 0x34
-#define QPHY_TX_LARGE_AMP_POST_EMP_LVL 0x38
-#define QPHY_TX_SMALL_AMP_DRV_LVL 0x3c
-#define QPHY_TX_SMALL_AMP_POST_EMP_LVL 0x40
-#define QPHY_ENDPOINT_REFCLK_DRIVE 0x54
-#define QPHY_RX_IDLE_DTCT_CNTRL 0x58
-#define QPHY_POWER_STATE_CONFIG1 0x60
-#define QPHY_POWER_STATE_CONFIG2 0x64
-#define QPHY_POWER_STATE_CONFIG4 0x6c
-#define QPHY_LOCK_DETECT_CONFIG1 0x80
-#define QPHY_LOCK_DETECT_CONFIG2 0x84
-#define QPHY_LOCK_DETECT_CONFIG3 0x88
-#define QPHY_PWRUP_RESET_DLY_TIME_AUXCLK 0xa0
-#define QPHY_LP_WAKEUP_DLY_TIME_AUXCLK 0xa4
-#define QPHY_RX_MIN_STALL_NOCONFIG_TIME_CAP 0xcc
-#define QPHY_RX_SYM_RESYNC_CTRL 0x13c
-#define QPHY_RX_MIN_HIBERN8_TIME 0x140
-#define QPHY_RX_SIGDET_CTRL2 0x148
-#define QPHY_RX_PWM_GEAR_BAND 0x154
-#define QPHY_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB 0x1A8
-#define QPHY_OSC_DTCT_ACTIONS 0x1AC
-#define QPHY_RX_SIGDET_LVL 0x1D8
-#define QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1DC
-#define QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1E0
+#include "phy-qcom-qmp-pcie-qhp.h"
/* Only for QMP V3 & V4 PHY - DP COM registers */
#define QPHY_V3_DP_COM_PHY_MODE_CTRL 0x00
@@ -285,21 +54,7 @@
#define QPHY_V3_DP_COM_TYPEC_PWRDN_CTRL 0x14
#define QPHY_V3_DP_COM_RESET_OVRD_CTRL 0x1c
-/* Only for QMP V3 PHY - QSERDES COM registers */
-#define QSERDES_V3_COM_ATB_SEL1 0x000
-#define QSERDES_V3_COM_ATB_SEL2 0x004
-#define QSERDES_V3_COM_FREQ_UPDATE 0x008
-#define QSERDES_V3_COM_BG_TIMER 0x00c
-#define QSERDES_V3_COM_SSC_EN_CENTER 0x010
-#define QSERDES_V3_COM_SSC_ADJ_PER1 0x014
-#define QSERDES_V3_COM_SSC_ADJ_PER2 0x018
-#define QSERDES_V3_COM_SSC_PER1 0x01c
-#define QSERDES_V3_COM_SSC_PER2 0x020
-#define QSERDES_V3_COM_SSC_STEP_SIZE1 0x024
-#define QSERDES_V3_COM_SSC_STEP_SIZE2 0x028
-#define QSERDES_V3_COM_POST_DIV 0x02c
-#define QSERDES_V3_COM_POST_DIV_MUX 0x030
-#define QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN 0x034
+/* QSERDES V3 COM bits */
# define QSERDES_V3_COM_BIAS_EN 0x0001
# define QSERDES_V3_COM_BIAS_EN_MUX 0x0002
# define QSERDES_V3_COM_CLKBUF_R_EN 0x0004
@@ -307,200 +62,13 @@
# define QSERDES_V3_COM_EN_SYSCLK_TX_SEL 0x0010
# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_L 0x0020
# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_R 0x0040
-#define QSERDES_V3_COM_CLK_ENABLE1 0x038
-#define QSERDES_V3_COM_SYS_CLK_CTRL 0x03c
-#define QSERDES_V3_COM_SYSCLK_BUF_ENABLE 0x040
-#define QSERDES_V3_COM_PLL_EN 0x044
-#define QSERDES_V3_COM_PLL_IVCO 0x048
-#define QSERDES_V3_COM_LOCK_CMP1_MODE0 0x098
-#define QSERDES_V3_COM_LOCK_CMP2_MODE0 0x09c
-#define QSERDES_V3_COM_LOCK_CMP3_MODE0 0x0a0
-#define QSERDES_V3_COM_LOCK_CMP1_MODE1 0x0a4
-#define QSERDES_V3_COM_LOCK_CMP2_MODE1 0x0a8
-#define QSERDES_V3_COM_LOCK_CMP3_MODE1 0x0ac
-#define QSERDES_V3_COM_CLK_EP_DIV 0x05c
-#define QSERDES_V3_COM_CP_CTRL_MODE0 0x060
-#define QSERDES_V3_COM_CP_CTRL_MODE1 0x064
-#define QSERDES_V3_COM_PLL_RCTRL_MODE0 0x068
-#define QSERDES_V3_COM_PLL_RCTRL_MODE1 0x06c
-#define QSERDES_V3_COM_PLL_CCTRL_MODE0 0x070
-#define QSERDES_V3_COM_PLL_CCTRL_MODE1 0x074
-#define QSERDES_V3_COM_SYSCLK_EN_SEL 0x080
-#define QSERDES_V3_COM_RESETSM_CNTRL 0x088
-#define QSERDES_V3_COM_RESETSM_CNTRL2 0x08c
-#define QSERDES_V3_COM_LOCK_CMP_EN 0x090
-#define QSERDES_V3_COM_LOCK_CMP_CFG 0x094
-#define QSERDES_V3_COM_DEC_START_MODE0 0x0b0
-#define QSERDES_V3_COM_DEC_START_MODE1 0x0b4
-#define QSERDES_V3_COM_DIV_FRAC_START1_MODE0 0x0b8
-#define QSERDES_V3_COM_DIV_FRAC_START2_MODE0 0x0bc
-#define QSERDES_V3_COM_DIV_FRAC_START3_MODE0 0x0c0
-#define QSERDES_V3_COM_DIV_FRAC_START1_MODE1 0x0c4
-#define QSERDES_V3_COM_DIV_FRAC_START2_MODE1 0x0c8
-#define QSERDES_V3_COM_DIV_FRAC_START3_MODE1 0x0cc
-#define QSERDES_V3_COM_INTEGLOOP_INITVAL 0x0d0
-#define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0 0x0d8
-#define QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0 0x0dc
-#define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1 0x0e0
-#define QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1 0x0e4
-#define QSERDES_V3_COM_VCO_TUNE_CTRL 0x0ec
-#define QSERDES_V3_COM_VCO_TUNE_MAP 0x0f0
-#define QSERDES_V3_COM_VCO_TUNE1_MODE0 0x0f4
-#define QSERDES_V3_COM_VCO_TUNE2_MODE0 0x0f8
-#define QSERDES_V3_COM_VCO_TUNE1_MODE1 0x0fc
-#define QSERDES_V3_COM_VCO_TUNE2_MODE1 0x100
-#define QSERDES_V3_COM_VCO_TUNE_INITVAL1 0x104
-#define QSERDES_V3_COM_VCO_TUNE_INITVAL2 0x108
-#define QSERDES_V3_COM_VCO_TUNE_TIMER1 0x11c
-#define QSERDES_V3_COM_VCO_TUNE_TIMER2 0x120
-#define QSERDES_V3_COM_CLK_SELECT 0x138
-#define QSERDES_V3_COM_HSCLK_SEL 0x13c
-#define QSERDES_V3_COM_CORECLK_DIV_MODE0 0x148
-#define QSERDES_V3_COM_CORECLK_DIV_MODE1 0x14c
-#define QSERDES_V3_COM_CORE_CLK_EN 0x154
-#define QSERDES_V3_COM_C_READY_STATUS 0x158
-#define QSERDES_V3_COM_CMN_CONFIG 0x15c
-#define QSERDES_V3_COM_SVS_MODE_CLK_SEL 0x164
-#define QSERDES_V3_COM_DEBUG_BUS0 0x168
-#define QSERDES_V3_COM_DEBUG_BUS1 0x16c
-#define QSERDES_V3_COM_DEBUG_BUS2 0x170
-#define QSERDES_V3_COM_DEBUG_BUS3 0x174
-#define QSERDES_V3_COM_DEBUG_BUS_SEL 0x178
-#define QSERDES_V3_COM_CMN_MODE 0x184
-/* Only for QMP V3 PHY - TX registers */
-#define QSERDES_V3_TX_BIST_MODE_LANENO 0x000
-#define QSERDES_V3_TX_CLKBUF_ENABLE 0x008
-#define QSERDES_V3_TX_TX_EMP_POST1_LVL 0x00c
+/* QSERDES V3 TX bits */
# define DP_PHY_TXn_TX_EMP_POST1_LVL_MASK 0x001f
# define DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN 0x0020
-
-#define QSERDES_V3_TX_TX_DRV_LVL 0x01c
# define DP_PHY_TXn_TX_DRV_LVL_MASK 0x001f
# define DP_PHY_TXn_TX_DRV_LVL_MUX_EN 0x0020
-#define QSERDES_V3_TX_RESET_TSYNC_EN 0x024
-#define QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN 0x028
-
-#define QSERDES_V3_TX_TX_BAND 0x02c
-#define QSERDES_V3_TX_SLEW_CNTL 0x030
-#define QSERDES_V3_TX_INTERFACE_SELECT 0x034
-#define QSERDES_V3_TX_RES_CODE_LANE_TX 0x03c
-#define QSERDES_V3_TX_RES_CODE_LANE_RX 0x040
-#define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX 0x044
-#define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX 0x048
-#define QSERDES_V3_TX_DEBUG_BUS_SEL 0x058
-#define QSERDES_V3_TX_TRANSCEIVER_BIAS_EN 0x05c
-#define QSERDES_V3_TX_HIGHZ_DRVR_EN 0x060
-#define QSERDES_V3_TX_TX_POL_INV 0x064
-#define QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN 0x068
-#define QSERDES_V3_TX_LANE_MODE_1 0x08c
-#define QSERDES_V3_TX_RCV_DETECT_LVL_2 0x0a4
-#define QSERDES_V3_TX_TRAN_DRVR_EMP_EN 0x0c0
-#define QSERDES_V3_TX_TX_INTERFACE_MODE 0x0c4
-#define QSERDES_V3_TX_VMODE_CTRL1 0x0f0
-
-/* Only for QMP V3 PHY - RX registers */
-#define QSERDES_V3_RX_UCDR_FO_GAIN 0x008
-#define QSERDES_V3_RX_UCDR_SO_GAIN_HALF 0x00c
-#define QSERDES_V3_RX_UCDR_SO_GAIN 0x014
-#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF 0x024
-#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028
-#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN 0x02c
-#define QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN 0x030
-#define QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
-#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
-#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
-#define QSERDES_V3_RX_UCDR_PI_CONTROLS 0x044
-#define QSERDES_V3_RX_RX_TERM_BW 0x07c
-#define QSERDES_V3_RX_VGA_CAL_CNTRL1 0x0bc
-#define QSERDES_V3_RX_VGA_CAL_CNTRL2 0x0c0
-#define QSERDES_V3_RX_RX_EQ_GAIN2_LSB 0x0c8
-#define QSERDES_V3_RX_RX_EQ_GAIN2_MSB 0x0cc
-#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2 0x0d4
-#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3 0x0d8
-#define QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4 0x0dc
-#define QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x0f8
-#define QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x0fc
-#define QSERDES_V3_RX_SIGDET_ENABLES 0x100
-#define QSERDES_V3_RX_SIGDET_CNTRL 0x104
-#define QSERDES_V3_RX_SIGDET_LVL 0x108
-#define QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL 0x10c
-#define QSERDES_V3_RX_RX_BAND 0x110
-#define QSERDES_V3_RX_RX_INTERFACE_MODE 0x11c
-#define QSERDES_V3_RX_RX_MODE_00 0x164
-#define QSERDES_V3_RX_RX_MODE_01 0x168
-
-/* Only for QMP V3 PHY - PCS registers */
-#define QPHY_V3_PCS_POWER_DOWN_CONTROL 0x004
-#define QPHY_V3_PCS_TXMGN_V0 0x00c
-#define QPHY_V3_PCS_TXMGN_V1 0x010
-#define QPHY_V3_PCS_TXMGN_V2 0x014
-#define QPHY_V3_PCS_TXMGN_V3 0x018
-#define QPHY_V3_PCS_TXMGN_V4 0x01c
-#define QPHY_V3_PCS_TXMGN_LS 0x020
-#define QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL 0x02c
-#define QPHY_V3_PCS_TX_SMALL_AMP_DRV_LVL 0x034
-#define QPHY_V3_PCS_TXDEEMPH_M6DB_V0 0x024
-#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0 0x028
-#define QPHY_V3_PCS_TXDEEMPH_M6DB_V1 0x02c
-#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1 0x030
-#define QPHY_V3_PCS_TXDEEMPH_M6DB_V2 0x034
-#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2 0x038
-#define QPHY_V3_PCS_TXDEEMPH_M6DB_V3 0x03c
-#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3 0x040
-#define QPHY_V3_PCS_TXDEEMPH_M6DB_V4 0x044
-#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4 0x048
-#define QPHY_V3_PCS_TXDEEMPH_M6DB_LS 0x04c
-#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS 0x050
-#define QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE 0x054
-#define QPHY_V3_PCS_RX_IDLE_DTCT_CNTRL 0x058
-#define QPHY_V3_PCS_RATE_SLEW_CNTRL 0x05c
-#define QPHY_V3_PCS_POWER_STATE_CONFIG1 0x060
-#define QPHY_V3_PCS_POWER_STATE_CONFIG2 0x064
-#define QPHY_V3_PCS_POWER_STATE_CONFIG4 0x06c
-#define QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L 0x070
-#define QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H 0x074
-#define QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L 0x078
-#define QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H 0x07c
-#define QPHY_V3_PCS_LOCK_DETECT_CONFIG1 0x080
-#define QPHY_V3_PCS_LOCK_DETECT_CONFIG2 0x084
-#define QPHY_V3_PCS_LOCK_DETECT_CONFIG3 0x088
-#define QPHY_V3_PCS_TSYNC_RSYNC_TIME 0x08c
-#define QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x0a0
-#define QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK 0x0a4
-#define QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME 0x0a8
-#define QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK 0x0b0
-#define QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME 0x0b8
-#define QPHY_V3_PCS_RXEQTRAINING_RUN_TIME 0x0bc
-#define QPHY_V3_PCS_FLL_CNTRL1 0x0c4
-#define QPHY_V3_PCS_FLL_CNTRL2 0x0c8
-#define QPHY_V3_PCS_FLL_CNT_VAL_L 0x0cc
-#define QPHY_V3_PCS_FLL_CNT_VAL_H_TOL 0x0d0
-#define QPHY_V3_PCS_FLL_MAN_CODE 0x0d4
-#define QPHY_V3_PCS_RX_SYM_RESYNC_CTRL 0x134
-#define QPHY_V3_PCS_RX_MIN_HIBERN8_TIME 0x138
-#define QPHY_V3_PCS_RX_SIGDET_CTRL1 0x13c
-#define QPHY_V3_PCS_RX_SIGDET_CTRL2 0x140
-#define QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1a8
-#define QPHY_V3_PCS_OSC_DTCT_ACTIONS 0x1ac
-#define QPHY_V3_PCS_SIGDET_CNTRL 0x1b0
-#define QPHY_V3_PCS_TX_MID_TERM_CTRL1 0x1bc
-#define QPHY_V3_PCS_MULTI_LANE_CTRL1 0x1c4
-#define QPHY_V3_PCS_RX_SIGDET_LVL 0x1d8
-#define QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1dc
-#define QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1e0
-#define QPHY_V3_PCS_REFGEN_REQ_CONFIG1 0x20c
-#define QPHY_V3_PCS_REFGEN_REQ_CONFIG2 0x210
-
-/* Only for QMP V3 PHY - PCS_MISC registers */
-#define QPHY_V3_PCS_MISC_CLAMP_ENABLE 0x0c
-#define QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2 0x2c
-#define QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1 0x44
-#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2 0x54
-#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c
-#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
-
/* QMP PHY - DP PHY registers */
#define QSERDES_DP_PHY_REVISION_ID0 0x000
#define QSERDES_DP_PHY_REVISION_ID1 0x004
@@ -548,165 +116,6 @@
#define QSERDES_V3_DP_PHY_STATUS 0x0c0
-/* Only for QMP V4 PHY - QSERDES COM registers */
-#define QSERDES_V4_COM_BG_TIMER 0x00c
-#define QSERDES_V4_COM_SSC_EN_CENTER 0x010
-#define QSERDES_V4_COM_SSC_ADJ_PER1 0x014
-#define QSERDES_V4_COM_SSC_PER1 0x01c
-#define QSERDES_V4_COM_SSC_PER2 0x020
-#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0 0x024
-#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0 0x028
-#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1 0x030
-#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1 0x034
-#define QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN 0x044
-#define QSERDES_V4_COM_CLK_ENABLE1 0x048
-#define QSERDES_V4_COM_SYS_CLK_CTRL 0x04c
-#define QSERDES_V4_COM_SYSCLK_BUF_ENABLE 0x050
-#define QSERDES_V4_COM_PLL_IVCO 0x058
-#define QSERDES_V4_COM_CMN_IPTRIM 0x060
-#define QSERDES_V4_COM_CP_CTRL_MODE0 0x074
-#define QSERDES_V4_COM_CP_CTRL_MODE1 0x078
-#define QSERDES_V4_COM_PLL_RCTRL_MODE0 0x07c
-#define QSERDES_V4_COM_PLL_RCTRL_MODE1 0x080
-#define QSERDES_V4_COM_PLL_CCTRL_MODE0 0x084
-#define QSERDES_V4_COM_PLL_CCTRL_MODE1 0x088
-#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094
-#define QSERDES_V4_COM_RESETSM_CNTRL 0x09c
-#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4
-#define QSERDES_V4_COM_LOCK_CMP_CFG 0x0a8
-#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac
-#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0
-#define QSERDES_V4_COM_LOCK_CMP1_MODE1 0x0b4
-#define QSERDES_V4_COM_DEC_START_MODE0 0x0bc
-#define QSERDES_V4_COM_LOCK_CMP2_MODE1 0x0b8
-#define QSERDES_V4_COM_DEC_START_MODE1 0x0c4
-#define QSERDES_V4_COM_DIV_FRAC_START1_MODE0 0x0cc
-#define QSERDES_V4_COM_DIV_FRAC_START2_MODE0 0x0d0
-#define QSERDES_V4_COM_DIV_FRAC_START3_MODE0 0x0d4
-#define QSERDES_V4_COM_DIV_FRAC_START1_MODE1 0x0d8
-#define QSERDES_V4_COM_DIV_FRAC_START2_MODE1 0x0dc
-#define QSERDES_V4_COM_DIV_FRAC_START3_MODE1 0x0e0
-#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0 0x0ec
-#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0 0x0f0
-#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1 0x0f4
-#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1 0x0f8
-#define QSERDES_V4_COM_VCO_TUNE_CTRL 0x108
-#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
-#define QSERDES_V4_COM_VCO_TUNE1_MODE0 0x110
-#define QSERDES_V4_COM_VCO_TUNE2_MODE0 0x114
-#define QSERDES_V4_COM_VCO_TUNE1_MODE1 0x118
-#define QSERDES_V4_COM_VCO_TUNE2_MODE1 0x11c
-#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124
-#define QSERDES_V4_COM_CMN_STATUS 0x140
-#define QSERDES_V4_COM_CLK_SELECT 0x154
-#define QSERDES_V4_COM_HSCLK_SEL 0x158
-#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
-#define QSERDES_V4_COM_CORECLK_DIV_MODE0 0x168
-#define QSERDES_V4_COM_CORECLK_DIV_MODE1 0x16c
-#define QSERDES_V4_COM_CORE_CLK_EN 0x174
-#define QSERDES_V4_COM_C_READY_STATUS 0x178
-#define QSERDES_V4_COM_CMN_CONFIG 0x17c
-#define QSERDES_V4_COM_SVS_MODE_CLK_SEL 0x184
-#define QSERDES_V4_COM_CMN_MISC1 0x19c
-#define QSERDES_V4_COM_INTERNAL_DIG_CORECLK_DIV 0x1a0
-#define QSERDES_V4_COM_CMN_MODE 0x1a4
-#define QSERDES_V4_COM_VCO_DC_LEVEL_CTRL 0x1a8
-#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
-#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
-#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
-#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
-#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
-
-/* Only for QMP V4 PHY - TX registers */
-#define QSERDES_V4_TX_CLKBUF_ENABLE 0x08
-#define QSERDES_V4_TX_TX_EMP_POST1_LVL 0x0c
-#define QSERDES_V4_TX_TX_DRV_LVL 0x14
-#define QSERDES_V4_TX_RESET_TSYNC_EN 0x1c
-#define QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN 0x20
-#define QSERDES_V4_TX_TX_BAND 0x24
-#define QSERDES_V4_TX_INTERFACE_SELECT 0x2c
-#define QSERDES_V4_TX_RES_CODE_LANE_TX 0x34
-#define QSERDES_V4_TX_RES_CODE_LANE_RX 0x38
-#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX 0x3c
-#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX 0x40
-#define QSERDES_V4_TX_TRANSCEIVER_BIAS_EN 0x54
-#define QSERDES_V4_TX_HIGHZ_DRVR_EN 0x58
-#define QSERDES_V4_TX_TX_POL_INV 0x5c
-#define QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN 0x60
-#define QSERDES_V4_TX_LANE_MODE_1 0x84
-#define QSERDES_V4_TX_LANE_MODE_2 0x88
-#define QSERDES_V4_TX_RCV_DETECT_LVL_2 0x9c
-#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
-#define QSERDES_V4_TX_TX_INTERFACE_MODE 0xbc
-#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
-#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
-#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0xe0
-#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0xe4
-#define QSERDES_V4_TX_VMODE_CTRL1 0xe8
-#define QSERDES_V4_TX_PI_QEC_CTRL 0x104
-
-/* Only for QMP V4_20 PHY - TX registers */
-#define QSERDES_V4_20_TX_LANE_MODE_1 0x88
-#define QSERDES_V4_20_TX_LANE_MODE_2 0x8c
-#define QSERDES_V4_20_TX_LANE_MODE_3 0x90
-#define QSERDES_V4_20_TX_VMODE_CTRL1 0xc4
-#define QSERDES_V4_20_TX_PI_QEC_CTRL 0xe0
-
-/* Only for QMP V4 PHY - RX registers */
-#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
-#define QSERDES_V4_RX_UCDR_SO_GAIN 0x014
-#define QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN 0x030
-#define QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
-#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
-#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
-#define QSERDES_V4_RX_UCDR_PI_CONTROLS 0x044
-#define QSERDES_V4_RX_UCDR_PI_CTRL2 0x048
-#define QSERDES_V4_RX_UCDR_SB2_THRESH1 0x04c
-#define QSERDES_V4_RX_UCDR_SB2_THRESH2 0x050
-#define QSERDES_V4_RX_UCDR_SB2_GAIN1 0x054
-#define QSERDES_V4_RX_UCDR_SB2_GAIN2 0x058
-#define QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE 0x060
-#define QSERDES_V4_RX_RCLK_AUXDATA_SEL 0x064
-#define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068
-#define QSERDES_V4_RX_AC_JTAG_MODE 0x078
-#define QSERDES_V4_RX_RX_TERM_BW 0x080
-#define QSERDES_V4_RX_VGA_CAL_CNTRL1 0x0d4
-#define QSERDES_V4_RX_VGA_CAL_CNTRL2 0x0d8
-#define QSERDES_V4_RX_GM_CAL 0x0dc
-#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1 0x0e8
-#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
-#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
-#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
-#define QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW 0x0f8
-#define QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
-#define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100
-#define QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
-#define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
-#define QSERDES_V4_RX_SIGDET_ENABLES 0x118
-#define QSERDES_V4_RX_SIGDET_CNTRL 0x11c
-#define QSERDES_V4_RX_SIGDET_LVL 0x120
-#define QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL 0x124
-#define QSERDES_V4_RX_RX_BAND 0x128
-#define QSERDES_V4_RX_RX_MODE_00_LOW 0x170
-#define QSERDES_V4_RX_RX_MODE_00_HIGH 0x174
-#define QSERDES_V4_RX_RX_MODE_00_HIGH2 0x178
-#define QSERDES_V4_RX_RX_MODE_00_HIGH3 0x17c
-#define QSERDES_V4_RX_RX_MODE_00_HIGH4 0x180
-#define QSERDES_V4_RX_RX_MODE_01_LOW 0x184
-#define QSERDES_V4_RX_RX_MODE_01_HIGH 0x188
-#define QSERDES_V4_RX_RX_MODE_01_HIGH2 0x18c
-#define QSERDES_V4_RX_RX_MODE_01_HIGH3 0x190
-#define QSERDES_V4_RX_RX_MODE_01_HIGH4 0x194
-#define QSERDES_V4_RX_RX_MODE_10_LOW 0x198
-#define QSERDES_V4_RX_RX_MODE_10_HIGH 0x19c
-#define QSERDES_V4_RX_RX_MODE_10_HIGH2 0x1a0
-#define QSERDES_V4_RX_RX_MODE_10_HIGH3 0x1a4
-#define QSERDES_V4_RX_RX_MODE_10_HIGH4 0x1a8
-#define QSERDES_V4_RX_DFE_EN_TIMER 0x1b4
-#define QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET 0x1b8
-#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
-#define QSERDES_V4_RX_VTH_CODE 0x1c4
-
/* Only for QMP V4 PHY - DP PHY registers */
#define QSERDES_V4_DP_PHY_CFG_1 0x014
#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK 0x054
@@ -718,328 +127,6 @@
#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
#define QSERDES_V4_DP_PHY_STATUS 0x0dc
-/* Only for QMP V4_20 PHY - RX registers */
-#define QSERDES_V4_20_RX_FO_GAIN_RATE2 0x008
-#define QSERDES_V4_20_RX_UCDR_PI_CONTROLS 0x058
-#define QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE 0x0ac
-#define QSERDES_V4_20_RX_DFE_3 0x110
-#define QSERDES_V4_20_RX_DFE_DAC_ENABLE1 0x134
-#define QSERDES_V4_20_RX_DFE_DAC_ENABLE2 0x138
-#define QSERDES_V4_20_RX_VGA_CAL_CNTRL2 0x150
-#define QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x178
-#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1 0x1c8
-#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2 0x1cc
-#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3 0x1d0
-#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4 0x1d4
-#define QSERDES_V4_20_RX_RX_MODE_RATE2_B0 0x1d8
-#define QSERDES_V4_20_RX_RX_MODE_RATE2_B1 0x1dc
-#define QSERDES_V4_20_RX_RX_MODE_RATE2_B2 0x1e0
-#define QSERDES_V4_20_RX_RX_MODE_RATE2_B3 0x1e4
-#define QSERDES_V4_20_RX_RX_MODE_RATE2_B4 0x1e8
-#define QSERDES_V4_20_RX_RX_MODE_RATE3_B0 0x1ec
-#define QSERDES_V4_20_RX_RX_MODE_RATE3_B1 0x1f0
-#define QSERDES_V4_20_RX_RX_MODE_RATE3_B2 0x1f4
-#define QSERDES_V4_20_RX_RX_MODE_RATE3_B3 0x1f8
-#define QSERDES_V4_20_RX_RX_MODE_RATE3_B4 0x1fc
-#define QSERDES_V4_20_RX_PHPRE_CTRL 0x200
-#define QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x20c
-#define QSERDES_V4_20_RX_MARG_COARSE_CTRL2 0x23c
-
-/* Only for QMP V4 PHY - UFS PCS registers */
-#define QPHY_V4_PCS_UFS_PHY_START 0x000
-#define QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL 0x004
-#define QPHY_V4_PCS_UFS_SW_RESET 0x008
-#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
-#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
-#define QPHY_V4_PCS_UFS_PLL_CNTL 0x02c
-#define QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
-#define QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
-#define QPHY_V4_PCS_UFS_BIST_FIXED_PAT_CTRL 0x060
-#define QPHY_V4_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
-#define QPHY_V4_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4
-#define QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL 0x124
-#define QPHY_V4_PCS_UFS_LINECFG_DISABLE 0x148
-#define QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME 0x150
-#define QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2 0x158
-#define QPHY_V4_PCS_UFS_TX_PWM_GEAR_BAND 0x160
-#define QPHY_V4_PCS_UFS_TX_HS_GEAR_BAND 0x168
-#define QPHY_V4_PCS_UFS_READY_STATUS 0x180
-#define QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8
-#define QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1 0x1e0
-
-/* PCIE GEN3 COM registers */
-#define PCIE_GEN3_QHP_COM_SSC_EN_CENTER 0x14
-#define PCIE_GEN3_QHP_COM_SSC_PER1 0x20
-#define PCIE_GEN3_QHP_COM_SSC_PER2 0x24
-#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1 0x28
-#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2 0x2c
-#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1 0x34
-#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1 0x38
-#define PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN 0x54
-#define PCIE_GEN3_QHP_COM_CLK_ENABLE1 0x58
-#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0 0x6c
-#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0 0x70
-#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1 0x78
-#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1 0x7c
-#define PCIE_GEN3_QHP_COM_BGV_TRIM 0x98
-#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE0 0xb4
-#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE1 0xb8
-#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0 0xc0
-#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1 0xc4
-#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0 0xcc
-#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1 0xd0
-#define PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL 0xdc
-#define PCIE_GEN3_QHP_COM_RESTRIM_CTRL2 0xf0
-#define PCIE_GEN3_QHP_COM_LOCK_CMP_EN 0xf8
-#define PCIE_GEN3_QHP_COM_DEC_START_MODE0 0x100
-#define PCIE_GEN3_QHP_COM_DEC_START_MODE1 0x108
-#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0 0x11c
-#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0 0x120
-#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0 0x124
-#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1 0x128
-#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1 0x12c
-#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1 0x130
-#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0 0x150
-#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1 0x158
-#define PCIE_GEN3_QHP_COM_VCO_TUNE_MAP 0x178
-#define PCIE_GEN3_QHP_COM_BG_CTRL 0x1c8
-#define PCIE_GEN3_QHP_COM_CLK_SELECT 0x1cc
-#define PCIE_GEN3_QHP_COM_HSCLK_SEL1 0x1d0
-#define PCIE_GEN3_QHP_COM_CORECLK_DIV 0x1e0
-#define PCIE_GEN3_QHP_COM_CORE_CLK_EN 0x1e8
-#define PCIE_GEN3_QHP_COM_CMN_CONFIG 0x1f0
-#define PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL 0x1fc
-#define PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1 0x21c
-#define PCIE_GEN3_QHP_COM_CMN_MODE 0x224
-#define PCIE_GEN3_QHP_COM_VREGCLK_DIV1 0x228
-#define PCIE_GEN3_QHP_COM_VREGCLK_DIV2 0x22c
-
-/* PCIE GEN3 QHP Lane registers */
-#define PCIE_GEN3_QHP_L0_DRVR_CTRL0 0xc
-#define PCIE_GEN3_QHP_L0_DRVR_CTRL1 0x10
-#define PCIE_GEN3_QHP_L0_DRVR_CTRL2 0x14
-#define PCIE_GEN3_QHP_L0_DRVR_TAP_EN 0x18
-#define PCIE_GEN3_QHP_L0_TX_BAND_MODE 0x60
-#define PCIE_GEN3_QHP_L0_LANE_MODE 0x64
-#define PCIE_GEN3_QHP_L0_PARALLEL_RATE 0x7c
-#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE0 0xc0
-#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE1 0xc4
-#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE2 0xc8
-#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1 0xd0
-#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2 0xd4
-#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0 0xd8
-#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1 0xdc
-#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2 0xe0
-#define PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE 0xfc
-#define PCIE_GEN3_QHP_L0_CGA_THRESH_DFE 0x100
-#define PCIE_GEN3_QHP_L0_RXENGINE_EN0 0x108
-#define PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME 0x114
-#define PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME 0x118
-#define PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME 0x11c
-#define PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME 0x120
-#define PCIE_GEN3_QHP_L0_VGA_GAIN 0x124
-#define PCIE_GEN3_QHP_L0_DFE_GAIN 0x128
-#define PCIE_GEN3_QHP_L0_EQ_GAIN 0x130
-#define PCIE_GEN3_QHP_L0_OFFSET_GAIN 0x134
-#define PCIE_GEN3_QHP_L0_PRE_GAIN 0x138
-#define PCIE_GEN3_QHP_L0_VGA_INITVAL 0x13c
-#define PCIE_GEN3_QHP_L0_EQ_INTVAL 0x154
-#define PCIE_GEN3_QHP_L0_EDAC_INITVAL 0x160
-#define PCIE_GEN3_QHP_L0_RXEQ_INITB0 0x168
-#define PCIE_GEN3_QHP_L0_RXEQ_INITB1 0x16c
-#define PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1 0x178
-#define PCIE_GEN3_QHP_L0_RXEQ_CTRL 0x180
-#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0 0x184
-#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1 0x188
-#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2 0x18c
-#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0 0x190
-#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1 0x194
-#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2 0x198
-#define PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG 0x19c
-#define PCIE_GEN3_QHP_L0_RX_BAND 0x1a4
-#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0 0x1c0
-#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1 0x1c4
-#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2 0x1c8
-#define PCIE_GEN3_QHP_L0_SIGDET_ENABLES 0x230
-#define PCIE_GEN3_QHP_L0_SIGDET_CNTRL 0x234
-#define PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL 0x238
-#define PCIE_GEN3_QHP_L0_DCC_GAIN 0x2a4
-#define PCIE_GEN3_QHP_L0_RSM_START 0x2a8
-#define PCIE_GEN3_QHP_L0_RX_EN_SIGNAL 0x2ac
-#define PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL 0x2b0
-#define PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0 0x2b8
-#define PCIE_GEN3_QHP_L0_TS0_TIMER 0x2c0
-#define PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE 0x2c4
-#define PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET 0x2cc
-
-/* PCIE GEN3 PCS registers */
-#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB 0x2c
-#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB 0x40
-#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB 0x54
-#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB 0x68
-#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG 0x15c
-#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5 0x16c
-#define PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG 0x174
-
-/* Only for QMP V4 PHY - USB/PCIe PCS registers */
-#define QPHY_V4_PCS_SW_RESET 0x000
-#define QPHY_V4_PCS_REVISION_ID0 0x004
-#define QPHY_V4_PCS_REVISION_ID1 0x008
-#define QPHY_V4_PCS_REVISION_ID2 0x00c
-#define QPHY_V4_PCS_REVISION_ID3 0x010
-#define QPHY_V4_PCS_PCS_STATUS1 0x014
-#define QPHY_V4_PCS_PCS_STATUS2 0x018
-#define QPHY_V4_PCS_PCS_STATUS3 0x01c
-#define QPHY_V4_PCS_PCS_STATUS4 0x020
-#define QPHY_V4_PCS_PCS_STATUS5 0x024
-#define QPHY_V4_PCS_PCS_STATUS6 0x028
-#define QPHY_V4_PCS_PCS_STATUS7 0x02c
-#define QPHY_V4_PCS_DEBUG_BUS_0_STATUS 0x030
-#define QPHY_V4_PCS_DEBUG_BUS_1_STATUS 0x034
-#define QPHY_V4_PCS_DEBUG_BUS_2_STATUS 0x038
-#define QPHY_V4_PCS_DEBUG_BUS_3_STATUS 0x03c
-#define QPHY_V4_PCS_POWER_DOWN_CONTROL 0x040
-#define QPHY_V4_PCS_START_CONTROL 0x044
-#define QPHY_V4_PCS_INSIG_SW_CTRL1 0x048
-#define QPHY_V4_PCS_INSIG_SW_CTRL2 0x04c
-#define QPHY_V4_PCS_INSIG_SW_CTRL3 0x050
-#define QPHY_V4_PCS_INSIG_SW_CTRL4 0x054
-#define QPHY_V4_PCS_INSIG_SW_CTRL5 0x058
-#define QPHY_V4_PCS_INSIG_SW_CTRL6 0x05c
-#define QPHY_V4_PCS_INSIG_SW_CTRL7 0x060
-#define QPHY_V4_PCS_INSIG_SW_CTRL8 0x064
-#define QPHY_V4_PCS_INSIG_MX_CTRL1 0x068
-#define QPHY_V4_PCS_INSIG_MX_CTRL2 0x06c
-#define QPHY_V4_PCS_INSIG_MX_CTRL3 0x070
-#define QPHY_V4_PCS_INSIG_MX_CTRL4 0x074
-#define QPHY_V4_PCS_INSIG_MX_CTRL5 0x078
-#define QPHY_V4_PCS_INSIG_MX_CTRL7 0x07c
-#define QPHY_V4_PCS_INSIG_MX_CTRL8 0x080
-#define QPHY_V4_PCS_OUTSIG_SW_CTRL1 0x084
-#define QPHY_V4_PCS_OUTSIG_MX_CTRL1 0x088
-#define QPHY_V4_PCS_CLAMP_ENABLE 0x08c
-#define QPHY_V4_PCS_POWER_STATE_CONFIG1 0x090
-#define QPHY_V4_PCS_POWER_STATE_CONFIG2 0x094
-#define QPHY_V4_PCS_FLL_CNTRL1 0x098
-#define QPHY_V4_PCS_FLL_CNTRL2 0x09c
-#define QPHY_V4_PCS_FLL_CNT_VAL_L 0x0a0
-#define QPHY_V4_PCS_FLL_CNT_VAL_H_TOL 0x0a4
-#define QPHY_V4_PCS_FLL_MAN_CODE 0x0a8
-#define QPHY_V4_PCS_TEST_CONTROL1 0x0ac
-#define QPHY_V4_PCS_TEST_CONTROL2 0x0b0
-#define QPHY_V4_PCS_TEST_CONTROL3 0x0b4
-#define QPHY_V4_PCS_TEST_CONTROL4 0x0b8
-#define QPHY_V4_PCS_TEST_CONTROL5 0x0bc
-#define QPHY_V4_PCS_TEST_CONTROL6 0x0c0
-#define QPHY_V4_PCS_LOCK_DETECT_CONFIG1 0x0c4
-#define QPHY_V4_PCS_LOCK_DETECT_CONFIG2 0x0c8
-#define QPHY_V4_PCS_LOCK_DETECT_CONFIG3 0x0cc
-#define QPHY_V4_PCS_LOCK_DETECT_CONFIG4 0x0d0
-#define QPHY_V4_PCS_LOCK_DETECT_CONFIG5 0x0d4
-#define QPHY_V4_PCS_LOCK_DETECT_CONFIG6 0x0d8
-#define QPHY_V4_PCS_REFGEN_REQ_CONFIG1 0x0dc
-#define QPHY_V4_PCS_REFGEN_REQ_CONFIG2 0x0e0
-#define QPHY_V4_PCS_REFGEN_REQ_CONFIG3 0x0e4
-#define QPHY_V4_PCS_BIST_CTRL 0x0e8
-#define QPHY_V4_PCS_PRBS_POLY0 0x0ec
-#define QPHY_V4_PCS_PRBS_POLY1 0x0f0
-#define QPHY_V4_PCS_FIXED_PAT0 0x0f4
-#define QPHY_V4_PCS_FIXED_PAT1 0x0f8
-#define QPHY_V4_PCS_FIXED_PAT2 0x0fc
-#define QPHY_V4_PCS_FIXED_PAT3 0x100
-#define QPHY_V4_PCS_FIXED_PAT4 0x104
-#define QPHY_V4_PCS_FIXED_PAT5 0x108
-#define QPHY_V4_PCS_FIXED_PAT6 0x10c
-#define QPHY_V4_PCS_FIXED_PAT7 0x110
-#define QPHY_V4_PCS_FIXED_PAT8 0x114
-#define QPHY_V4_PCS_FIXED_PAT9 0x118
-#define QPHY_V4_PCS_FIXED_PAT10 0x11c
-#define QPHY_V4_PCS_FIXED_PAT11 0x120
-#define QPHY_V4_PCS_FIXED_PAT12 0x124
-#define QPHY_V4_PCS_FIXED_PAT13 0x128
-#define QPHY_V4_PCS_FIXED_PAT14 0x12c
-#define QPHY_V4_PCS_FIXED_PAT15 0x130
-#define QPHY_V4_PCS_TXMGN_CONFIG 0x134
-#define QPHY_V4_PCS_G12S1_TXMGN_V0 0x138
-#define QPHY_V4_PCS_G12S1_TXMGN_V1 0x13c
-#define QPHY_V4_PCS_G12S1_TXMGN_V2 0x140
-#define QPHY_V4_PCS_G12S1_TXMGN_V3 0x144
-#define QPHY_V4_PCS_G12S1_TXMGN_V4 0x148
-#define QPHY_V4_PCS_G12S1_TXMGN_V0_RS 0x14c
-#define QPHY_V4_PCS_G12S1_TXMGN_V1_RS 0x150
-#define QPHY_V4_PCS_G12S1_TXMGN_V2_RS 0x154
-#define QPHY_V4_PCS_G12S1_TXMGN_V3_RS 0x158
-#define QPHY_V4_PCS_G12S1_TXMGN_V4_RS 0x15c
-#define QPHY_V4_PCS_G3S2_TXMGN_MAIN 0x160
-#define QPHY_V4_PCS_G3S2_TXMGN_MAIN_RS 0x164
-#define QPHY_V4_PCS_G12S1_TXDEEMPH_M6DB 0x168
-#define QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB 0x16c
-#define QPHY_V4_PCS_G3S2_PRE_GAIN 0x170
-#define QPHY_V4_PCS_G3S2_POST_GAIN 0x174
-#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET 0x178
-#define QPHY_V4_PCS_G3S2_PRE_GAIN_RS 0x17c
-#define QPHY_V4_PCS_G3S2_POST_GAIN_RS 0x180
-#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET_RS 0x184
-#define QPHY_V4_PCS_RX_SIGDET_LVL 0x188
-#define QPHY_V4_PCS_RX_SIGDET_DTCT_CNTRL 0x18c
-#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
-#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
-#define QPHY_V4_PCS_RATE_SLEW_CNTRL1 0x198
-#define QPHY_V4_PCS_RATE_SLEW_CNTRL2 0x19c
-#define QPHY_V4_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x1a0
-#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L 0x1a4
-#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H 0x1a8
-#define QPHY_V4_PCS_TSYNC_RSYNC_TIME 0x1ac
-#define QPHY_V4_PCS_CDR_RESET_TIME 0x1b0
-#define QPHY_V4_PCS_TSYNC_DLY_TIME 0x1b4
-#define QPHY_V4_PCS_ELECIDLE_DLY_SEL 0x1b8
-#define QPHY_V4_PCS_CMN_ACK_OUT_SEL 0x1bc
-#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG1 0x1c0
-#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG2 0x1c4
-#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG3 0x1c8
-#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG4 0x1cc
-#define QPHY_V4_PCS_PCS_TX_RX_CONFIG 0x1d0
-#define QPHY_V4_PCS_RX_IDLE_DTCT_CNTRL 0x1d4
-#define QPHY_V4_PCS_RX_DCC_CAL_CONFIG 0x1d8
-#define QPHY_V4_PCS_EQ_CONFIG1 0x1dc
-#define QPHY_V4_PCS_EQ_CONFIG2 0x1e0
-#define QPHY_V4_PCS_EQ_CONFIG3 0x1e4
-#define QPHY_V4_PCS_EQ_CONFIG4 0x1e8
-#define QPHY_V4_PCS_EQ_CONFIG5 0x1ec
-#define QPHY_V4_PCS_USB3_POWER_STATE_CONFIG1 0x300
-#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x304
-#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x308
-#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x30c
-#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x310
-#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x314
-#define QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x318
-#define QPHY_V4_PCS_USB3_LFPS_TX_ECSTART 0x31c
-#define QPHY_V4_PCS_USB3_LFPS_PER_TIMER_VAL 0x320
-#define QPHY_V4_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x324
-#define QPHY_V4_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x328
-#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x32c
-#define QPHY_V4_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x330
-#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x334
-#define QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x338
-#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x33c
-#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x340
-#define QPHY_V4_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x344
-#define QPHY_V4_PCS_USB3_ARCVR_DTCT_CM_DLY 0x348
-#define QPHY_V4_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x34c
-#define QPHY_V4_PCS_USB3_ALFPS_DEGLITCH_VAL 0x350
-#define QPHY_V4_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x354
-#define QPHY_V4_PCS_USB3_TEST_CONTROL 0x358
-
-/* Only for QMP V4_20 PHY - USB/PCIe PCS registers */
-#define QPHY_V4_20_PCS_RX_SIGDET_LVL 0x188
-#define QPHY_V4_20_PCS_EQ_CONFIG2 0x1d8
-#define QPHY_V4_20_PCS_EQ_CONFIG4 0x1e0
-#define QPHY_V4_20_PCS_EQ_CONFIG5 0x1e4
-
-/* Only for QMP V4 PHY - UNI has 0x300 offset for PCS_USB3 regs */
-#define QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL 0x618
-#define QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2 0x638
-
/* Only for QMP V4 PHY - PCS_MISC registers */
#define QPHY_V4_PCS_MISC_TYPEC_CTRL 0x00
#define QPHY_V4_PCS_MISC_TYPEC_PWRDN_CTRL 0x04
@@ -1048,275 +135,4 @@
#define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10
#define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14
-/* Only for QMP V4 PHY - PCS_PCIE registers (same as PCS_MISC?) */
-#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2 0x0c
-#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4 0x14
-#define QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x1c
-#define QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x40
-#define QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x48
-#define QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x50
-#define QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS 0x90
-#define QPHY_V4_PCS_PCIE_EQ_CONFIG2 0xa4
-#define QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE 0xb4
-#define QPHY_V4_PCS_PCIE_PRESET_P10_PRE 0xbc
-#define QPHY_V4_PCS_PCIE_PRESET_P10_POST 0xe0
-
-#define QPHY_V4_20_PCS_PCIE_EQ_CONFIG1 0x0a0
-#define QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME 0x0f0
-#define QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME 0x0f4
-#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2 0x0fc
-#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
-#define QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2 0x824
-#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828
-
-/* Only for QMP V5 PHY - QSERDES COM registers */
-#define QSERDES_V5_COM_SSC_EN_CENTER 0x010
-#define QSERDES_V5_COM_SSC_PER1 0x01c
-#define QSERDES_V5_COM_SSC_PER2 0x020
-#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0 0x024
-#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0 0x028
-#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1 0x030
-#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1 0x034
-#define QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN 0x044
-#define QSERDES_V5_COM_CLK_ENABLE1 0x048
-#define QSERDES_V5_COM_SYSCLK_BUF_ENABLE 0x050
-#define QSERDES_V5_COM_PLL_IVCO 0x058
-#define QSERDES_V5_COM_CP_CTRL_MODE0 0x074
-#define QSERDES_V5_COM_CP_CTRL_MODE1 0x078
-#define QSERDES_V5_COM_PLL_RCTRL_MODE0 0x07c
-#define QSERDES_V5_COM_PLL_RCTRL_MODE1 0x080
-#define QSERDES_V5_COM_PLL_CCTRL_MODE0 0x084
-#define QSERDES_V5_COM_PLL_CCTRL_MODE1 0x088
-#define QSERDES_V5_COM_SYSCLK_EN_SEL 0x094
-#define QSERDES_V5_COM_LOCK_CMP_EN 0x0a4
-#define QSERDES_V5_COM_LOCK_CMP_CFG 0x0a8
-#define QSERDES_V5_COM_LOCK_CMP1_MODE0 0x0ac
-#define QSERDES_V5_COM_LOCK_CMP2_MODE0 0x0b0
-#define QSERDES_V5_COM_LOCK_CMP1_MODE1 0x0b4
-#define QSERDES_V5_COM_DEC_START_MODE0 0x0bc
-#define QSERDES_V5_COM_LOCK_CMP2_MODE1 0x0b8
-#define QSERDES_V5_COM_DEC_START_MODE1 0x0c4
-#define QSERDES_V5_COM_DIV_FRAC_START1_MODE0 0x0cc
-#define QSERDES_V5_COM_DIV_FRAC_START2_MODE0 0x0d0
-#define QSERDES_V5_COM_DIV_FRAC_START3_MODE0 0x0d4
-#define QSERDES_V5_COM_DIV_FRAC_START1_MODE1 0x0d8
-#define QSERDES_V5_COM_DIV_FRAC_START2_MODE1 0x0dc
-#define QSERDES_V5_COM_DIV_FRAC_START3_MODE1 0x0e0
-#define QSERDES_V5_COM_VCO_TUNE_MAP 0x10c
-#define QSERDES_V5_COM_VCO_TUNE1_MODE0 0x110
-#define QSERDES_V5_COM_VCO_TUNE2_MODE0 0x114
-#define QSERDES_V5_COM_VCO_TUNE1_MODE1 0x118
-#define QSERDES_V5_COM_VCO_TUNE2_MODE1 0x11c
-#define QSERDES_V5_COM_VCO_TUNE_INITVAL2 0x124
-#define QSERDES_V5_COM_CLK_SELECT 0x154
-#define QSERDES_V5_COM_HSCLK_SEL 0x158
-#define QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL 0x15c
-#define QSERDES_V5_COM_CORECLK_DIV_MODE0 0x168
-#define QSERDES_V5_COM_CORECLK_DIV_MODE1 0x16c
-#define QSERDES_V5_COM_CORE_CLK_EN 0x174
-#define QSERDES_V5_COM_CMN_CONFIG 0x17c
-#define QSERDES_V5_COM_CMN_MISC1 0x19c
-#define QSERDES_V5_COM_CMN_MODE 0x1a4
-#define QSERDES_V5_COM_VCO_DC_LEVEL_CTRL 0x1a8
-#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
-#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
-#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
-#define QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
-#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
-
-/* Only for QMP V5 PHY - TX registers */
-#define QSERDES_V5_TX_RES_CODE_LANE_TX 0x34
-#define QSERDES_V5_TX_RES_CODE_LANE_RX 0x38
-#define QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX 0x3c
-#define QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX 0x40
-#define QSERDES_V5_TX_LANE_MODE_1 0x84
-#define QSERDES_V5_TX_LANE_MODE_2 0x88
-#define QSERDES_V5_TX_LANE_MODE_3 0x8c
-#define QSERDES_V5_TX_LANE_MODE_4 0x90
-#define QSERDES_V5_TX_LANE_MODE_5 0x94
-#define QSERDES_V5_TX_RCV_DETECT_LVL_2 0xa4
-#define QSERDES_V5_TX_TRAN_DRVR_EMP_EN 0xc0
-#define QSERDES_V5_TX_PI_QEC_CTRL 0xe4
-#define QSERDES_V5_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x178
-#define QSERDES_V5_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x17c
-#define QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x180
-#define QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x184
-
-/* Only for QMP V5_20 PHY - TX registers */
-#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX 0x30
-#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX 0x34
-#define QSERDES_V5_20_TX_LANE_MODE_1 0x78
-#define QSERDES_V5_20_TX_LANE_MODE_2 0x7c
-
-/* Only for QMP V5 PHY - RX registers */
-#define QSERDES_V5_RX_UCDR_FO_GAIN 0x008
-#define QSERDES_V5_RX_UCDR_SO_GAIN 0x014
-#define QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN 0x030
-#define QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
-#define QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
-#define QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
-#define QSERDES_V5_RX_UCDR_PI_CONTROLS 0x044
-#define QSERDES_V5_RX_UCDR_PI_CTRL2 0x048
-#define QSERDES_V5_RX_UCDR_SB2_THRESH1 0x04c
-#define QSERDES_V5_RX_UCDR_SB2_THRESH2 0x050
-#define QSERDES_V5_RX_UCDR_SB2_GAIN1 0x054
-#define QSERDES_V5_RX_UCDR_SB2_GAIN2 0x058
-#define QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE 0x060
-#define QSERDES_V5_RX_RCLK_AUXDATA_SEL 0x064
-#define QSERDES_V5_RX_AC_JTAG_ENABLE 0x068
-#define QSERDES_V5_RX_AC_JTAG_MODE 0x078
-#define QSERDES_V5_RX_RX_TERM_BW 0x080
-#define QSERDES_V5_RX_TX_ADAPT_POST_THRESH 0x0cc
-#define QSERDES_V5_RX_VGA_CAL_CNTRL1 0x0d4
-#define QSERDES_V5_RX_VGA_CAL_CNTRL2 0x0d8
-#define QSERDES_V5_RX_GM_CAL 0x0dc
-#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1 0x0e8
-#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
-#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
-#define QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
-#define QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW 0x0f8
-#define QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
-#define QSERDES_V5_RX_RX_IDAC_MEASURE_TIME 0x100
-#define QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
-#define QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
-#define QSERDES_V5_RX_SIGDET_ENABLES 0x118
-#define QSERDES_V5_RX_SIGDET_CNTRL 0x11c
-#define QSERDES_V5_RX_SIGDET_LVL 0x120
-#define QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL 0x124
-#define QSERDES_V5_RX_RX_BAND 0x128
-#define QSERDES_V5_RX_RX_MODE_00_LOW 0x15c
-#define QSERDES_V5_RX_RX_MODE_00_HIGH 0x160
-#define QSERDES_V5_RX_RX_MODE_00_HIGH2 0x164
-#define QSERDES_V5_RX_RX_MODE_00_HIGH3 0x168
-#define QSERDES_V5_RX_RX_MODE_00_HIGH4 0x16c
-#define QSERDES_V5_RX_RX_MODE_01_LOW 0x170
-#define QSERDES_V5_RX_RX_MODE_01_HIGH 0x174
-#define QSERDES_V5_RX_RX_MODE_01_HIGH2 0x178
-#define QSERDES_V5_RX_RX_MODE_01_HIGH3 0x17c
-#define QSERDES_V5_RX_RX_MODE_01_HIGH4 0x180
-#define QSERDES_V5_RX_RX_MODE_10_LOW 0x184
-#define QSERDES_V5_RX_RX_MODE_10_HIGH 0x188
-#define QSERDES_V5_RX_RX_MODE_10_HIGH2 0x18c
-#define QSERDES_V5_RX_RX_MODE_10_HIGH3 0x190
-#define QSERDES_V5_RX_RX_MODE_10_HIGH4 0x194
-#define QSERDES_V5_RX_DFE_EN_TIMER 0x1a0
-#define QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET 0x1a4
-#define QSERDES_V5_RX_DCC_CTRL1 0x1a8
-#define QSERDES_V5_RX_VTH_CODE 0x1b0
-
-/* Only for QMP V5_20 PHY - RX registers */
-#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2 0x008
-#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3 0x00c
-#define QSERDES_V5_20_RX_UCDR_PI_CONTROLS 0x020
-#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1 0x02c
-#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3 0x030
-#define QSERDES_V5_20_RX_RX_IDAC_SAOFFSET 0x07c
-#define QSERDES_V5_20_RX_DFE_3 0x090
-#define QSERDES_V5_20_RX_DFE_DAC_ENABLE1 0x0b4
-#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1 0x0c4
-#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2 0x0c8
-#define QSERDES_V5_20_RX_VGA_CAL_MAN_VAL 0x0dc
-#define QSERDES_V5_20_RX_GM_CAL 0x0ec
-#define QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4 0x108
-#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1 0x164
-#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2 0x168
-#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3 0x16c
-#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5 0x174
-#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6 0x178
-#define QSERDES_V5_20_RX_RX_MODE_RATE2_B0 0x17c
-#define QSERDES_V5_20_RX_RX_MODE_RATE2_B1 0x180
-#define QSERDES_V5_20_RX_RX_MODE_RATE2_B2 0x184
-#define QSERDES_V5_20_RX_RX_MODE_RATE2_B3 0x188
-#define QSERDES_V5_20_RX_RX_MODE_RATE2_B4 0x18c
-#define QSERDES_V5_20_RX_RX_MODE_RATE2_B5 0x190
-#define QSERDES_V5_20_RX_RX_MODE_RATE2_B6 0x194
-#define QSERDES_V5_20_RX_RX_MODE_RATE3_B0 0x198
-#define QSERDES_V5_20_RX_RX_MODE_RATE3_B1 0x19c
-#define QSERDES_V5_20_RX_RX_MODE_RATE3_B2 0x1a0
-#define QSERDES_V5_20_RX_RX_MODE_RATE3_B3 0x1a4
-#define QSERDES_V5_20_RX_RX_MODE_RATE3_B4 0x1a8
-#define QSERDES_V5_20_RX_RX_MODE_RATE3_B5 0x1ac
-#define QSERDES_V5_20_RX_RX_MODE_RATE3_B6 0x1b0
-#define QSERDES_V5_20_RX_PHPRE_CTRL 0x1b4
-#define QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x1c0
-#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210 0x1f4
-#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3 0x1f8
-#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210 0x1fc
-#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3 0x200
-#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210 0x204
-#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3 0x208
-#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3 0x210
-#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3 0x218
-#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3 0x220
-
-/* Only for QMP V5 PHY - USB/PCIe PCS registers */
-#define QPHY_V5_PCS_REFGEN_REQ_CONFIG1 0x0dc
-#define QPHY_V5_PCS_G3S2_PRE_GAIN 0x170
-#define QPHY_V5_PCS_RX_SIGDET_LVL 0x188
-#define QPHY_V5_PCS_RATE_SLEW_CNTRL1 0x198
-#define QPHY_V5_PCS_EQ_CONFIG2 0x1e0
-#define QPHY_V5_PCS_EQ_CONFIG3 0x1e4
-
-/* Only for QMP V5 PHY - PCS_PCIE registers */
-#define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
-#define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54
-#define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94
-#define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8
-
-/* Only for QMP V5_20 PHY - PCIe PCS registers */
-#define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c
-#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090
-#define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0
-#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
-#define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c
-#define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184
-
-/* Only for QMP V5 PHY - UFS PCS registers */
-#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
-#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
-#define QPHY_V5_PCS_UFS_PLL_CNTL 0x02c
-#define QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
-#define QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
-#define QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
-#define QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4
-#define QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL 0x124
-#define QPHY_V5_PCS_UFS_RX_MIN_HIBERN8_TIME 0x150
-#define QPHY_V5_PCS_UFS_RX_SIGDET_CTRL1 0x154
-#define QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2 0x158
-#define QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND 0x160
-#define QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND 0x168
-#define QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8
-#define QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1 0x1e0
-
-/* Only for QMP V5 PHY - USB3 have different offsets than V4 */
-#define QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1 0x300
-#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x304
-#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x308
-#define QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x30c
-#define QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x310
-#define QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x314
-#define QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x318
-#define QPHY_V5_PCS_USB3_LFPS_TX_ECSTART 0x31c
-#define QPHY_V5_PCS_USB3_LFPS_PER_TIMER_VAL 0x320
-#define QPHY_V5_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x324
-#define QPHY_V5_PCS_USB3_LFPS_CONFIG1 0x328
-#define QPHY_V5_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x32c
-#define QPHY_V5_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x330
-#define QPHY_V5_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x334
-#define QPHY_V5_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x338
-#define QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x33c
-#define QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x340
-#define QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x344
-#define QPHY_V5_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x348
-#define QPHY_V5_PCS_USB3_ARCVR_DTCT_CM_DLY 0x34c
-#define QPHY_V5_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x350
-#define QPHY_V5_PCS_USB3_ALFPS_DEGLITCH_VAL 0x354
-#define QPHY_V5_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x358
-#define QPHY_V5_PCS_USB3_TEST_CONTROL 0x35c
-#define QPHY_V5_PCS_USB3_RXTERMINATION_DLY_SEL 0x360
-
-/* Only for QMP V5 PHY - UNI has 0x1000 offset for PCS_USB3 regs */
-#define QPHY_V5_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL 0x1018
-#define QPHY_V5_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2 0x103c
-
#endif
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 6711659f727c..0b1e9337ee8e 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -978,7 +978,9 @@ static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
switch (rport->port_id) {
case USB2PHY_PORT_OTG:
- ret |= rockchip_usb2phy_otg_mux_irq(irq, rport);
+ if (rport->mode != USB_DR_MODE_HOST &&
+ rport->mode != USB_DR_MODE_UNKNOWN)
+ ret |= rockchip_usb2phy_otg_mux_irq(irq, rport);
break;
case USB2PHY_PORT_HOST:
ret |= rockchip_usb2phy_linestate_irq(irq, rport);
@@ -1162,6 +1164,12 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
EXTCON_USB_HOST, &rport->event_nb);
if (ret)
dev_err(rphy->dev, "register USB HOST notifier failed\n");
+
+ if (!of_property_read_bool(rphy->dev->of_node, "extcon")) {
+ /* do initial sync of usb state */
+ ret = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
+ extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !ret);
+ }
}
out:
@@ -1283,7 +1291,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops);
if (IS_ERR(phy)) {
- dev_err(dev, "failed to create phy\n");
+ dev_err_probe(dev, PTR_ERR(phy), "failed to create phy\n");
ret = PTR_ERR(phy);
goto put_child;
}
diff --git a/drivers/phy/samsung/Makefile b/drivers/phy/samsung/Makefile
index 65e4cc59403f..afb34a153e34 100644
--- a/drivers/phy/samsung/Makefile
+++ b/drivers/phy/samsung/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_PHY_SAMSUNG_UFS) += phy-exynos-ufs.o
phy-exynos-ufs-y += phy-samsung-ufs.o
phy-exynos-ufs-y += phy-exynos7-ufs.o
phy-exynos-ufs-y += phy-exynosautov9-ufs.o
+phy-exynos-ufs-y += phy-fsd-ufs.o
obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o
phy-exynos-usb2-y += phy-samsung-usb2.o
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
index 578cfe07d07a..53c9230c2907 100644
--- a/drivers/phy/samsung/phy-exynos-pcie.c
+++ b/drivers/phy/samsung/phy-exynos-pcie.c
@@ -51,6 +51,13 @@ static int exynos5433_pcie_phy_init(struct phy *phy)
{
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
+ regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
+ BIT(0), 1);
+ regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
+ PCIE_APP_REQ_EXIT_L1_MODE, 0);
+ regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
+ PCIE_REFCLK_GATING_EN, 0);
+
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET,
PCIE_PHY_RESET, 1);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
@@ -109,20 +116,7 @@ static int exynos5433_pcie_phy_init(struct phy *phy)
return 0;
}
-static int exynos5433_pcie_phy_power_on(struct phy *phy)
-{
- struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
-
- regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
- BIT(0), 1);
- regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
- PCIE_APP_REQ_EXIT_L1_MODE, 0);
- regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
- PCIE_REFCLK_GATING_EN, 0);
- return 0;
-}
-
-static int exynos5433_pcie_phy_power_off(struct phy *phy)
+static int exynos5433_pcie_phy_exit(struct phy *phy)
{
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
@@ -135,8 +129,7 @@ static int exynos5433_pcie_phy_power_off(struct phy *phy)
static const struct phy_ops exynos5433_phy_ops = {
.init = exynos5433_pcie_phy_init,
- .power_on = exynos5433_pcie_phy_power_on,
- .power_off = exynos5433_pcie_phy_power_off,
+ .exit = exynos5433_pcie_phy_exit,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/samsung/phy-exynos7-ufs.c b/drivers/phy/samsung/phy-exynos7-ufs.c
index 7c9008e163db..a982e7c128c5 100644
--- a/drivers/phy/samsung/phy-exynos7-ufs.c
+++ b/drivers/phy/samsung/phy-exynos7-ufs.c
@@ -11,6 +11,8 @@
#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_MASK 0x1
#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_EN BIT(0)
+#define EXYNOS7_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS 0x5e
+
/* Calibration for phy initialization */
static const struct samsung_ufs_phy_cfg exynos7_pre_init_cfg[] = {
PHY_COMN_REG_CFG(0x00f, 0xfa, PWR_MODE_ANY),
@@ -66,12 +68,18 @@ static const struct samsung_ufs_phy_cfg *exynos7_ufs_phy_cfgs[CFG_TAG_MAX] = {
[CFG_POST_PWR_HS] = exynos7_post_pwr_hs_cfg,
};
+static const char * const exynos7_ufs_phy_clks[] = {
+ "tx0_symbol_clk", "rx0_symbol_clk", "rx1_symbol_clk", "ref_clk",
+};
+
const struct samsung_ufs_phy_drvdata exynos7_ufs_phy = {
- .cfg = exynos7_ufs_phy_cfgs,
+ .cfgs = exynos7_ufs_phy_cfgs,
.isol = {
.offset = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL,
.mask = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_MASK,
.en = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_EN,
},
- .has_symbol_clk = 1,
+ .clk_list = exynos7_ufs_phy_clks,
+ .num_clks = ARRAY_SIZE(exynos7_ufs_phy_clks),
+ .cdr_lock_status_offset = EXYNOS7_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS,
};
diff --git a/drivers/phy/samsung/phy-exynosautov9-ufs.c b/drivers/phy/samsung/phy-exynosautov9-ufs.c
index 36398a15c2db..49e2bcbef0b4 100644
--- a/drivers/phy/samsung/phy-exynosautov9-ufs.c
+++ b/drivers/phy/samsung/phy-exynosautov9-ufs.c
@@ -10,6 +10,7 @@
#define EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL 0x728
#define EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL_MASK 0x1
#define EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL_EN BIT(0)
+#define EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS 0x5e
#define PHY_TRSV_REG_CFG_AUTOV9(o, v, d) \
PHY_TRSV_REG_CFG_OFFSET(o, v, d, 0x50)
@@ -31,22 +32,22 @@ static const struct samsung_ufs_phy_cfg exynosautov9_pre_init_cfg[] = {
PHY_COMN_REG_CFG(0x023, 0xc0, PWR_MODE_ANY),
PHY_COMN_REG_CFG(0x023, 0x00, PWR_MODE_ANY),
- PHY_TRSV_REG_CFG(0x042, 0x5d, PWR_MODE_ANY),
- PHY_TRSV_REG_CFG(0x043, 0x80, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV9(0x042, 0x5d, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV9(0x043, 0x80, PWR_MODE_ANY),
END_UFS_PHY_CFG,
};
/* Calibration for HS mode series A/B */
static const struct samsung_ufs_phy_cfg exynosautov9_pre_pwr_hs_cfg[] = {
- PHY_TRSV_REG_CFG(0x032, 0xbc, PWR_MODE_HS_ANY),
- PHY_TRSV_REG_CFG(0x03c, 0x7f, PWR_MODE_HS_ANY),
- PHY_TRSV_REG_CFG(0x048, 0xc0, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG_AUTOV9(0x032, 0xbc, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG_AUTOV9(0x03c, 0x7f, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG_AUTOV9(0x048, 0xc0, PWR_MODE_HS_ANY),
- PHY_TRSV_REG_CFG(0x04a, 0x00, PWR_MODE_HS_G3_SER_B),
- PHY_TRSV_REG_CFG(0x04b, 0x10, PWR_MODE_HS_G1_SER_B |
- PWR_MODE_HS_G3_SER_B),
- PHY_TRSV_REG_CFG(0x04d, 0x63, PWR_MODE_HS_G3_SER_B),
+ PHY_TRSV_REG_CFG_AUTOV9(0x04a, 0x00, PWR_MODE_HS_G3_SER_B),
+ PHY_TRSV_REG_CFG_AUTOV9(0x04b, 0x10, PWR_MODE_HS_G1_SER_B |
+ PWR_MODE_HS_G3_SER_B),
+ PHY_TRSV_REG_CFG_AUTOV9(0x04d, 0x63, PWR_MODE_HS_G3_SER_B),
END_UFS_PHY_CFG,
};
@@ -56,12 +57,18 @@ static const struct samsung_ufs_phy_cfg *exynosautov9_ufs_phy_cfgs[CFG_TAG_MAX]
[CFG_PRE_PWR_HS] = exynosautov9_pre_pwr_hs_cfg,
};
+static const char * const exynosautov9_ufs_phy_clks[] = {
+ "ref_clk",
+};
+
const struct samsung_ufs_phy_drvdata exynosautov9_ufs_phy = {
- .cfg = exynosautov9_ufs_phy_cfgs,
+ .cfgs = exynosautov9_ufs_phy_cfgs,
.isol = {
.offset = EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL,
.mask = EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL_MASK,
.en = EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CTRL_EN,
},
- .has_symbol_clk = 0,
+ .clk_list = exynosautov9_ufs_phy_clks,
+ .num_clks = ARRAY_SIZE(exynosautov9_ufs_phy_clks),
+ .cdr_lock_status_offset = EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS,
};
diff --git a/drivers/phy/samsung/phy-fsd-ufs.c b/drivers/phy/samsung/phy-fsd-ufs.c
new file mode 100644
index 000000000000..d36cabd53434
--- /dev/null
+++ b/drivers/phy/samsung/phy-fsd-ufs.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UFS PHY driver data for FSD SoC
+ *
+ * Copyright (C) 2022 Samsung Electronics Co., Ltd.
+ *
+ */
+#include "phy-samsung-ufs.h"
+
+#define FSD_EMBEDDED_COMBO_PHY_CTRL 0x724
+#define FSD_EMBEDDED_COMBO_PHY_CTRL_MASK 0x1
+#define FSD_EMBEDDED_COMBO_PHY_CTRL_EN BIT(0)
+#define FSD_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS 0x6e
+
+static const struct samsung_ufs_phy_cfg fsd_pre_init_cfg[] = {
+ PHY_COMN_REG_CFG(0x00f, 0xfa, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x010, 0x82, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x011, 0x1e, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x017, 0x94, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x035, 0x58, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x036, 0x32, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x037, 0x40, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x03b, 0x83, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x042, 0x88, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x043, 0xa6, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x048, 0x74, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x04c, 0x5b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x04d, 0x83, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x05c, 0x14, PWR_MODE_ANY),
+ END_UFS_PHY_CFG
+};
+
+/* Calibration for HS mode series A/B */
+static const struct samsung_ufs_phy_cfg fsd_pre_pwr_hs_cfg[] = {
+ END_UFS_PHY_CFG
+};
+
+/* Calibration for HS mode series A/B atfer PMC */
+static const struct samsung_ufs_phy_cfg fsd_post_pwr_hs_cfg[] = {
+ END_UFS_PHY_CFG
+};
+
+static const struct samsung_ufs_phy_cfg *fsd_ufs_phy_cfgs[CFG_TAG_MAX] = {
+ [CFG_PRE_INIT] = fsd_pre_init_cfg,
+ [CFG_PRE_PWR_HS] = fsd_pre_pwr_hs_cfg,
+ [CFG_POST_PWR_HS] = fsd_post_pwr_hs_cfg,
+};
+
+static const char * const fsd_ufs_phy_clks[] = {
+ "ref_clk",
+};
+
+const struct samsung_ufs_phy_drvdata fsd_ufs_phy = {
+ .cfgs = fsd_ufs_phy_cfgs,
+ .isol = {
+ .offset = FSD_EMBEDDED_COMBO_PHY_CTRL,
+ .mask = FSD_EMBEDDED_COMBO_PHY_CTRL_MASK,
+ .en = FSD_EMBEDDED_COMBO_PHY_CTRL_EN,
+ },
+ .clk_list = fsd_ufs_phy_clks,
+ .num_clks = ARRAY_SIZE(fsd_ufs_phy_clks),
+ .cdr_lock_status_offset = FSD_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS,
+};
diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c
index 602ddef259eb..183c88e3d1ec 100644
--- a/drivers/phy/samsung/phy-samsung-ufs.c
+++ b/drivers/phy/samsung/phy-samsung-ufs.c
@@ -63,7 +63,8 @@ static int samsung_ufs_phy_wait_for_lock_acq(struct phy *phy)
}
err = readl_poll_timeout(
- ufs_phy->reg_pma + PHY_APB_ADDR(PHY_CDR_LOCK_STATUS),
+ ufs_phy->reg_pma +
+ PHY_APB_ADDR(ufs_phy->drvdata->cdr_lock_status_offset),
val, (val & PHY_CDR_LOCK_BIT), sleep_us, timeout_us);
if (err)
dev_err(ufs_phy->dev,
@@ -75,7 +76,7 @@ out:
static int samsung_ufs_phy_calibrate(struct phy *phy)
{
struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
- struct samsung_ufs_phy_cfg **cfgs = ufs_phy->cfg;
+ const struct samsung_ufs_phy_cfg * const *cfgs = ufs_phy->cfgs;
const struct samsung_ufs_phy_cfg *cfg;
int err = 0;
int i;
@@ -130,113 +131,63 @@ out:
return err;
}
-static int samsung_ufs_phy_symbol_clk_init(struct samsung_ufs_phy *phy)
+static int samsung_ufs_phy_clks_init(struct samsung_ufs_phy *phy)
{
- int ret;
-
- phy->tx0_symbol_clk = devm_clk_get(phy->dev, "tx0_symbol_clk");
- if (IS_ERR(phy->tx0_symbol_clk)) {
- dev_err(phy->dev, "failed to get tx0_symbol_clk clock\n");
- return PTR_ERR(phy->tx0_symbol_clk);
- }
+ int i;
+ const struct samsung_ufs_phy_drvdata *drvdata = phy->drvdata;
+ int num_clks = drvdata->num_clks;
- phy->rx0_symbol_clk = devm_clk_get(phy->dev, "rx0_symbol_clk");
- if (IS_ERR(phy->rx0_symbol_clk)) {
- dev_err(phy->dev, "failed to get rx0_symbol_clk clock\n");
- return PTR_ERR(phy->rx0_symbol_clk);
- }
+ phy->clks = devm_kcalloc(phy->dev, num_clks, sizeof(*phy->clks),
+ GFP_KERNEL);
+ if (!phy->clks)
+ return -ENOMEM;
- phy->rx1_symbol_clk = devm_clk_get(phy->dev, "rx1_symbol_clk");
- if (IS_ERR(phy->rx1_symbol_clk)) {
- dev_err(phy->dev, "failed to get rx1_symbol_clk clock\n");
- return PTR_ERR(phy->rx1_symbol_clk);
- }
+ for (i = 0; i < num_clks; i++)
+ phy->clks[i].id = drvdata->clk_list[i];
- ret = clk_prepare_enable(phy->tx0_symbol_clk);
- if (ret) {
- dev_err(phy->dev, "%s: tx0_symbol_clk enable failed %d\n", __func__, ret);
- goto out;
- }
+ return devm_clk_bulk_get(phy->dev, num_clks, phy->clks);
+}
- ret = clk_prepare_enable(phy->rx0_symbol_clk);
- if (ret) {
- dev_err(phy->dev, "%s: rx0_symbol_clk enable failed %d\n", __func__, ret);
- goto out_disable_tx0_clk;
- }
+static int samsung_ufs_phy_init(struct phy *phy)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
- ret = clk_prepare_enable(phy->rx1_symbol_clk);
- if (ret) {
- dev_err(phy->dev, "%s: rx1_symbol_clk enable failed %d\n", __func__, ret);
- goto out_disable_rx0_clk;
- }
+ ss_phy->lane_cnt = phy->attrs.bus_width;
+ ss_phy->ufs_phy_state = CFG_PRE_INIT;
return 0;
-
-out_disable_rx0_clk:
- clk_disable_unprepare(phy->rx0_symbol_clk);
-out_disable_tx0_clk:
- clk_disable_unprepare(phy->tx0_symbol_clk);
-out:
- return ret;
}
-static int samsung_ufs_phy_clks_init(struct samsung_ufs_phy *phy)
+static int samsung_ufs_phy_power_on(struct phy *phy)
{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
int ret;
- phy->ref_clk = devm_clk_get(phy->dev, "ref_clk");
- if (IS_ERR(phy->ref_clk))
- dev_err(phy->dev, "failed to get ref_clk clock\n");
+ samsung_ufs_phy_ctrl_isol(ss_phy, false);
- ret = clk_prepare_enable(phy->ref_clk);
+ ret = clk_bulk_prepare_enable(ss_phy->drvdata->num_clks, ss_phy->clks);
if (ret) {
- dev_err(phy->dev, "%s: ref_clk enable failed %d\n", __func__, ret);
+ dev_err(ss_phy->dev, "failed to enable ufs phy clocks\n");
return ret;
}
- dev_dbg(phy->dev, "UFS MPHY ref_clk_rate = %ld\n", clk_get_rate(phy->ref_clk));
-
- return 0;
-}
-
-static int samsung_ufs_phy_init(struct phy *phy)
-{
- struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
- int ret;
-
- ss_phy->lane_cnt = phy->attrs.bus_width;
- ss_phy->ufs_phy_state = CFG_PRE_INIT;
-
- if (ss_phy->drvdata->has_symbol_clk) {
- ret = samsung_ufs_phy_symbol_clk_init(ss_phy);
+ if (ss_phy->ufs_phy_state == CFG_PRE_INIT) {
+ ret = samsung_ufs_phy_calibrate(phy);
if (ret)
- dev_err(ss_phy->dev, "failed to set ufs phy symbol clocks\n");
+ dev_err(ss_phy->dev, "ufs phy calibration failed\n");
}
- ret = samsung_ufs_phy_clks_init(ss_phy);
- if (ret)
- dev_err(ss_phy->dev, "failed to set ufs phy clocks\n");
-
- ret = samsung_ufs_phy_calibrate(phy);
- if (ret)
- dev_err(ss_phy->dev, "ufs phy calibration failed\n");
-
return ret;
}
-static int samsung_ufs_phy_power_on(struct phy *phy)
-{
- struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
-
- samsung_ufs_phy_ctrl_isol(ss_phy, false);
- return 0;
-}
-
static int samsung_ufs_phy_power_off(struct phy *phy)
{
struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
+ clk_bulk_disable_unprepare(ss_phy->drvdata->num_clks, ss_phy->clks);
+
samsung_ufs_phy_ctrl_isol(ss_phy, true);
+
return 0;
}
@@ -257,13 +208,7 @@ static int samsung_ufs_phy_exit(struct phy *phy)
{
struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
- clk_disable_unprepare(ss_phy->ref_clk);
-
- if (ss_phy->drvdata->has_symbol_clk) {
- clk_disable_unprepare(ss_phy->tx0_symbol_clk);
- clk_disable_unprepare(ss_phy->rx0_symbol_clk);
- clk_disable_unprepare(ss_phy->rx1_symbol_clk);
- }
+ ss_phy->ufs_phy_state = CFG_TAG_MAX;
return 0;
}
@@ -288,6 +233,7 @@ static int samsung_ufs_phy_probe(struct platform_device *pdev)
struct phy *gen_phy;
struct phy_provider *phy_provider;
const struct samsung_ufs_phy_drvdata *drvdata;
+ u32 isol_offset;
int err = 0;
match = of_match_node(samsung_ufs_phy_match, dev->of_node);
@@ -327,10 +273,21 @@ static int samsung_ufs_phy_probe(struct platform_device *pdev)
drvdata = match->data;
phy->dev = dev;
phy->drvdata = drvdata;
- phy->cfg = (struct samsung_ufs_phy_cfg **)drvdata->cfg;
- phy->isol = &drvdata->isol;
+ phy->cfgs = drvdata->cfgs;
+ memcpy(&phy->isol, &drvdata->isol, sizeof(phy->isol));
+
+ if (!of_property_read_u32_index(dev->of_node, "samsung,pmu-syscon", 1,
+ &isol_offset))
+ phy->isol.offset = isol_offset;
+
phy->lane_cnt = PHY_DEF_LANE_CNT;
+ err = samsung_ufs_phy_clks_init(phy);
+ if (err) {
+ dev_err(dev, "failed to get phy clocks\n");
+ goto out;
+ }
+
phy_set_drvdata(gen_phy, phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -350,6 +307,9 @@ static const struct of_device_id samsung_ufs_phy_match[] = {
}, {
.compatible = "samsung,exynosautov9-ufs-phy",
.data = &exynosautov9_ufs_phy,
+ }, {
+ .compatible = "tesla,fsd-ufs-phy",
+ .data = &fsd_ufs_phy,
},
{},
};
diff --git a/drivers/phy/samsung/phy-samsung-ufs.h b/drivers/phy/samsung/phy-samsung-ufs.h
index 91a0e9f94f98..e122960cfee8 100644
--- a/drivers/phy/samsung/phy-samsung-ufs.h
+++ b/drivers/phy/samsung/phy-samsung-ufs.h
@@ -40,7 +40,6 @@
/* UFS PHY registers */
#define PHY_PLL_LOCK_STATUS 0x1e
-#define PHY_CDR_LOCK_STATUS 0x5e
#define PHY_PLL_LOCK_BIT BIT(5)
#define PHY_CDR_LOCK_BIT BIT(4)
@@ -101,28 +100,28 @@ struct samsung_ufs_phy_cfg {
u8 id;
};
+struct samsung_ufs_phy_pmu_isol {
+ u32 offset;
+ u32 mask;
+ u32 en;
+};
+
struct samsung_ufs_phy_drvdata {
- const struct samsung_ufs_phy_cfg **cfg;
- struct pmu_isol {
- u32 offset;
- u32 mask;
- u32 en;
- } isol;
- bool has_symbol_clk;
+ const struct samsung_ufs_phy_cfg **cfgs;
+ struct samsung_ufs_phy_pmu_isol isol;
+ const char * const *clk_list;
+ int num_clks;
+ u32 cdr_lock_status_offset;
};
struct samsung_ufs_phy {
struct device *dev;
void __iomem *reg_pma;
struct regmap *reg_pmu;
- struct clk *ref_clk;
- struct clk *ref_clk_parent;
- struct clk *tx0_symbol_clk;
- struct clk *rx0_symbol_clk;
- struct clk *rx1_symbol_clk;
+ struct clk_bulk_data *clks;
const struct samsung_ufs_phy_drvdata *drvdata;
- struct samsung_ufs_phy_cfg **cfg;
- const struct pmu_isol *isol;
+ const struct samsung_ufs_phy_cfg * const *cfgs;
+ struct samsung_ufs_phy_pmu_isol isol;
u8 lane_cnt;
int ufs_phy_state;
enum phy_mode mode;
@@ -136,11 +135,12 @@ static inline struct samsung_ufs_phy *get_samsung_ufs_phy(struct phy *phy)
static inline void samsung_ufs_phy_ctrl_isol(
struct samsung_ufs_phy *phy, u32 isol)
{
- regmap_update_bits(phy->reg_pmu, phy->isol->offset,
- phy->isol->mask, isol ? 0 : phy->isol->en);
+ regmap_update_bits(phy->reg_pmu, phy->isol.offset,
+ phy->isol.mask, isol ? 0 : phy->isol.en);
}
extern const struct samsung_ufs_phy_drvdata exynos7_ufs_phy;
extern const struct samsung_ufs_phy_drvdata exynosautov9_ufs_phy;
+extern const struct samsung_ufs_phy_drvdata fsd_ufs_phy;
#endif /* _PHY_SAMSUNG_UFS_ */
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 007a23c78d56..a98c911cc37a 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -358,7 +358,9 @@ static int stm32_usbphyc_phy_init(struct phy *phy)
return 0;
pll_disable:
- return stm32_usbphyc_pll_disable(usbphyc);
+ stm32_usbphyc_pll_disable(usbphyc);
+
+ return ret;
}
static int stm32_usbphyc_phy_exit(struct phy *phy)
diff --git a/drivers/phy/tegra/phy-tegra194-p2u.c b/drivers/phy/tegra/phy-tegra194-p2u.c
index 3ee02b9eb04f..1415ca71de38 100644
--- a/drivers/phy/tegra/phy-tegra194-p2u.c
+++ b/drivers/phy/tegra/phy-tegra194-p2u.c
@@ -2,7 +2,7 @@
/*
* P2U (PIPE to UPHY) driver for Tegra T194 SoC
*
- * Copyright (C) 2019 NVIDIA Corporation.
+ * Copyright (C) 2019-2022 NVIDIA Corporation.
*
* Author: Vidya Sagar <vidyas@nvidia.com>
*/
@@ -14,6 +14,9 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
+#define P2U_CONTROL_CMN 0x74
+#define P2U_CONTROL_CMN_SKP_SIZE_PROTECTION_EN BIT(20)
+
#define P2U_PERIODIC_EQ_CTRL_GEN3 0xc0
#define P2U_PERIODIC_EQ_CTRL_GEN3_PERIODIC_EQ_EN BIT(0)
#define P2U_PERIODIC_EQ_CTRL_GEN3_INIT_PRESET_EQ_TRAIN_EN BIT(1)
@@ -24,8 +27,17 @@
#define P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_MASK 0xffff
#define P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_VAL 160
+#define P2U_DIR_SEARCH_CTRL 0xd4
+#define P2U_DIR_SEARCH_CTRL_GEN4_FINE_GRAIN_SEARCH_TWICE BIT(18)
+
+struct tegra_p2u_of_data {
+ bool one_dir_search;
+};
+
struct tegra_p2u {
void __iomem *base;
+ bool skip_sz_protection_en; /* Needed to support two retimers */
+ struct tegra_p2u_of_data *of_data;
};
static inline void p2u_writel(struct tegra_p2u *phy, const u32 value,
@@ -44,6 +56,12 @@ static int tegra_p2u_power_on(struct phy *x)
struct tegra_p2u *phy = phy_get_drvdata(x);
u32 val;
+ if (phy->skip_sz_protection_en) {
+ val = p2u_readl(phy, P2U_CONTROL_CMN);
+ val |= P2U_CONTROL_CMN_SKP_SIZE_PROTECTION_EN;
+ p2u_writel(phy, val, P2U_CONTROL_CMN);
+ }
+
val = p2u_readl(phy, P2U_PERIODIC_EQ_CTRL_GEN3);
val &= ~P2U_PERIODIC_EQ_CTRL_GEN3_PERIODIC_EQ_EN;
val |= P2U_PERIODIC_EQ_CTRL_GEN3_INIT_PRESET_EQ_TRAIN_EN;
@@ -58,6 +76,12 @@ static int tegra_p2u_power_on(struct phy *x)
val |= P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_VAL;
p2u_writel(phy, val, P2U_RX_DEBOUNCE_TIME);
+ if (phy->of_data->one_dir_search) {
+ val = p2u_readl(phy, P2U_DIR_SEARCH_CTRL);
+ val &= ~P2U_DIR_SEARCH_CTRL_GEN4_FINE_GRAIN_SEARCH_TWICE;
+ p2u_writel(phy, val, P2U_DIR_SEARCH_CTRL);
+ }
+
return 0;
}
@@ -77,10 +101,19 @@ static int tegra_p2u_probe(struct platform_device *pdev)
if (!phy)
return -ENOMEM;
+ phy->of_data =
+ (struct tegra_p2u_of_data *)of_device_get_match_data(dev);
+ if (!phy->of_data)
+ return -EINVAL;
+
phy->base = devm_platform_ioremap_resource_byname(pdev, "ctl");
if (IS_ERR(phy->base))
return PTR_ERR(phy->base);
+ phy->skip_sz_protection_en =
+ of_property_read_bool(dev->of_node,
+ "nvidia,skip-sz-protect-en");
+
platform_set_drvdata(pdev, phy);
generic_phy = devm_phy_create(dev, NULL, &ops);
@@ -96,9 +129,22 @@ static int tegra_p2u_probe(struct platform_device *pdev)
return 0;
}
+static const struct tegra_p2u_of_data tegra194_p2u_of_data = {
+ .one_dir_search = false,
+};
+
+static const struct tegra_p2u_of_data tegra234_p2u_of_data = {
+ .one_dir_search = true,
+};
+
static const struct of_device_id tegra_p2u_id_table[] = {
{
.compatible = "nvidia,tegra194-p2u",
+ .data = &tegra194_p2u_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-p2u",
+ .data = &tegra234_p2u_of_data,
},
{}
};
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index 9fe6ea6fdae5..fb619908f912 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -1,13 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index da546c35d1d5..70bac931f99a 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -253,6 +253,14 @@ enum wiz_type {
AM64_WIZ_10G,
};
+struct wiz_data {
+ enum wiz_type type;
+ const struct reg_field *refclk_dig_sel;
+ const struct reg_field *pma_cmn_refclk1_dig_div;
+ const struct wiz_clk_mux_sel *clk_mux_sel;
+ unsigned int clk_div_sel_num;
+};
+
#define WIZ_TYPEC_DIR_DEBOUNCE_MIN 100 /* ms */
#define WIZ_TYPEC_DIR_DEBOUNCE_MAX 1000
@@ -290,6 +298,7 @@ struct wiz {
struct clk *input_clks[WIZ_MAX_INPUT_CLOCKS];
struct clk *output_clks[WIZ_MAX_OUTPUT_CLOCKS];
struct clk_onecell_data clk_data;
+ const struct wiz_data *data;
};
static int wiz_reset(struct wiz *wiz)
@@ -409,6 +418,7 @@ static int wiz_regfield_init(struct wiz *wiz)
struct regmap *regmap = wiz->regmap;
int num_lanes = wiz->num_lanes;
struct device *dev = wiz->dev;
+ const struct wiz_data *data = wiz->data;
int i;
wiz->por_en = devm_regmap_field_alloc(dev, regmap, por_en);
@@ -445,10 +455,10 @@ static int wiz_regfield_init(struct wiz *wiz)
return PTR_ERR(wiz->div_sel_field[CMN_REFCLK_DIG_DIV]);
}
- if (wiz->type == J721E_WIZ_16G) {
+ if (data->pma_cmn_refclk1_dig_div) {
wiz->div_sel_field[CMN_REFCLK1_DIG_DIV] =
devm_regmap_field_alloc(dev, regmap,
- pma_cmn_refclk1_dig_div);
+ *data->pma_cmn_refclk1_dig_div);
if (IS_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV])) {
dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
return PTR_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV]);
@@ -469,15 +479,8 @@ static int wiz_regfield_init(struct wiz *wiz)
return PTR_ERR(wiz->mux_sel_field[PLL1_REFCLK]);
}
- if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G)
- wiz->mux_sel_field[REFCLK_DIG] =
- devm_regmap_field_alloc(dev, regmap,
- refclk_dig_sel_10g);
- else
- wiz->mux_sel_field[REFCLK_DIG] =
- devm_regmap_field_alloc(dev, regmap,
- refclk_dig_sel_16g);
-
+ wiz->mux_sel_field[REFCLK_DIG] = devm_regmap_field_alloc(dev, regmap,
+ *data->refclk_dig_sel);
if (IS_ERR(wiz->mux_sel_field[REFCLK_DIG])) {
dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[REFCLK_DIG]);
@@ -1078,15 +1081,37 @@ static const struct regmap_config wiz_regmap_config = {
.fast_io = true,
};
+static struct wiz_data j721e_16g_data = {
+ .type = J721E_WIZ_16G,
+ .refclk_dig_sel = &refclk_dig_sel_16g,
+ .pma_cmn_refclk1_dig_div = &pma_cmn_refclk1_dig_div,
+ .clk_mux_sel = clk_mux_sel_16g,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G,
+};
+
+static struct wiz_data j721e_10g_data = {
+ .type = J721E_WIZ_10G,
+ .refclk_dig_sel = &refclk_dig_sel_10g,
+ .clk_mux_sel = clk_mux_sel_10g,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
+};
+
+static struct wiz_data am64_10g_data = {
+ .type = AM64_WIZ_10G,
+ .refclk_dig_sel = &refclk_dig_sel_10g,
+ .clk_mux_sel = clk_mux_sel_10g,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
+};
+
static const struct of_device_id wiz_id_table[] = {
{
- .compatible = "ti,j721e-wiz-16g", .data = (void *)J721E_WIZ_16G
+ .compatible = "ti,j721e-wiz-16g", .data = &j721e_16g_data,
},
{
- .compatible = "ti,j721e-wiz-10g", .data = (void *)J721E_WIZ_10G
+ .compatible = "ti,j721e-wiz-10g", .data = &j721e_10g_data,
},
{
- .compatible = "ti,am64-wiz-10g", .data = (void *)AM64_WIZ_10G
+ .compatible = "ti,am64-wiz-10g", .data = &am64_10g_data,
},
{}
};
@@ -1145,12 +1170,20 @@ static int wiz_probe(struct platform_device *pdev)
struct wiz *wiz;
int ret, val, i;
u32 num_lanes;
+ const struct wiz_data *data;
wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
if (!wiz)
return -ENOMEM;
- wiz->type = (enum wiz_type)of_device_get_match_data(dev);
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "NULL device data\n");
+ return -EINVAL;
+ }
+
+ wiz->data = data;
+ wiz->type = data->type;
child_node = of_get_child_by_name(node, "serdes");
if (!child_node) {
@@ -1226,17 +1259,9 @@ static int wiz_probe(struct platform_device *pdev)
wiz->dev = dev;
wiz->regmap = regmap;
wiz->num_lanes = num_lanes;
- if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G)
- wiz->clk_mux_sel = clk_mux_sel_10g;
- else
- wiz->clk_mux_sel = clk_mux_sel_16g;
-
+ wiz->clk_mux_sel = data->clk_mux_sel;
wiz->clk_div_sel = clk_div_sel;
-
- if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G)
- wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G;
- else
- wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G;
+ wiz->clk_div_sel_num = data->clk_div_sel_num;
platform_set_drvdata(pdev, wiz);
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index c3ab4b69ea68..669c13d6e402 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -105,8 +105,9 @@ static int tusb1210_power_on(struct phy *phy)
msleep(TUSB1210_RESET_TIME_MS);
/* Restore the optional eye diagram optimization value */
- return tusb1210_ulpi_write(tusb, TUSB1210_VENDOR_SPECIFIC2,
- tusb->vendor_specific2);
+ tusb1210_ulpi_write(tusb, TUSB1210_VENDOR_SPECIFIC2, tusb->vendor_specific2);
+
+ return 0;
}
static int tusb1210_power_off(struct phy *phy)
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index f52960d2dfbe..1cf74b0c42e5 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -32,7 +32,7 @@ config DEBUG_PINCTRL
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
config PINCTRL_AMD
- tristate "AMD GPIO pin control"
+ bool "AMD GPIO pin control"
depends on HAS_IOMEM
depends on ACPI || COMPILE_TEST
select GPIOLIB
@@ -311,7 +311,7 @@ config PINCTRL_MICROCHIP_SGPIO
LED controller.
config PINCTRL_OCELOT
- bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
+ tristate "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
depends on HAS_IOMEM
select GPIOLIB
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 4d7548686f39..aaa78a613196 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -632,7 +632,7 @@ struct aspeed_pin_desc {
SIG_EXPR_LIST_ALIAS(pin, sig, group)
/**
- * Similar to the above, but for pins with a dual expressions (DE) and
+ * Similar to the above, but for pins with a dual expressions (DE)
* and a single group (SG) of pins.
*
* @pin: The pin the signal will be routed to
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index 9ab1f427286a..fd52a83387ef 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2013-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2013-2017 Broadcom
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index dad453054776..7857e612a100 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -507,7 +507,7 @@ static void bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
}
}
-static void bcm2835_gpio_irq_enable(struct irq_data *data)
+static void bcm2835_gpio_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -516,13 +516,15 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data)
unsigned bank = GPIO_REG_OFFSET(gpio);
unsigned long flags;
+ gpiochip_enable_irq(chip, gpio);
+
raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
set_bit(offset, &pc->enabled_irq_map[bank]);
bcm2835_gpio_irq_config(pc, gpio, true);
raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
}
-static void bcm2835_gpio_irq_disable(struct irq_data *data)
+static void bcm2835_gpio_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -537,6 +539,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
clear_bit(offset, &pc->enabled_irq_map[bank]);
raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+
+ gpiochip_disable_irq(chip, gpio);
}
static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
@@ -693,16 +697,15 @@ static int bcm2835_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
return ret;
}
-static struct irq_chip bcm2835_gpio_irq_chip = {
+static const struct irq_chip bcm2835_gpio_irq_chip = {
.name = MODULE_NAME,
- .irq_enable = bcm2835_gpio_irq_enable,
- .irq_disable = bcm2835_gpio_irq_disable,
.irq_set_type = bcm2835_gpio_irq_set_type,
.irq_ack = bcm2835_gpio_irq_ack,
- .irq_mask = bcm2835_gpio_irq_disable,
- .irq_unmask = bcm2835_gpio_irq_enable,
+ .irq_mask = bcm2835_gpio_irq_mask,
+ .irq_unmask = bcm2835_gpio_irq_unmask,
.irq_set_wake = bcm2835_gpio_irq_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .flags = (IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE),
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
@@ -1280,7 +1283,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
girq = &pc->gpio_chip.irq;
- girq->chip = &bcm2835_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &bcm2835_gpio_irq_chip);
girq->parent_handler = bcm2835_gpio_irq_handler;
girq->num_parents = BCM2835_NUM_IRQS;
girq->parents = devm_kcalloc(dev, BCM2835_NUM_IRQS,
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
index 4344c5732400..5251460f6327 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014-2017 Broadcom
/*
* Broadcom Cygnus IOMUX driver
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 0fe4a1fcdf00..960e253f0be4 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2016 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This file contains the Northstar2 IOMUX driver that supports group
* based PINMUX configuration. The PWM is functional only when the
* corresponding mfio pin group is selected as gpio.
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 643dbd315033..3c792bf03bda 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014-2017 Broadcom
/*
* This file contains the Broadcom Northstar Plus (NSP) GPIO driver that
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index f1d60a708815..db8f79920ff0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2015 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This file contains the Northstar plus (NSP) IOMUX driver that supports
* group based PINMUX configuration. The Northstar plus IOMUX controller
* allows pins to be individually muxed to GPIO function. The NAND and MMC is
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index ffe39336fcac..9e57f4c62e60 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -126,7 +126,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np)
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node)
- if (pctldev->dev->of_node == np) {
+ if (device_match_of_node(pctldev->dev, np)) {
mutex_unlock(&pinctrldev_list_mutex);
return pctldev;
}
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 3fb238714718..ef898ee8ca6b 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -129,7 +129,7 @@ static int dt_to_map_one_config(struct pinctrl *p,
np_pctldev = of_get_next_parent(np_pctldev);
if (!np_pctldev || of_node_is_root(np_pctldev)) {
of_node_put(np_pctldev);
- ret = driver_deferred_probe_check_state(p->dev);
+ ret = -ENODEV;
/* keep deferring if modules are enabled */
if (IS_ENABLED(CONFIG_MODULES) && !allow_default && ret < 0)
ret = -EPROBE_DEFER;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx93.c b/drivers/pinctrl/freescale/pinctrl-imx93.c
index 417e41b37a6f..91b3ee1e6fa9 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx93.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx93.c
@@ -247,6 +247,7 @@ static const struct of_device_id imx93_pinctrl_of_match[] = {
{ .compatible = "fsl,imx93-iomuxc", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx93_pinctrl_of_match);
static int imx93_pinctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index e5ec8b8956da..078eec8af4a4 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -151,6 +151,14 @@ config PINCTRL_LEWISBURG
This pinctrl driver provides an interface that allows configuring
of Intel Lewisburg pins and using them as GPIOs.
+config PINCTRL_METEORLAKE
+ tristate "Intel Meteor Lake pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Meteor Lake pins and using them as GPIOs.
+
config PINCTRL_SUNRISEPOINT
tristate "Intel Sunrisepoint pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 181ffcf34d62..bb87e7bc7b20 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -18,5 +18,6 @@ obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_JASPERLAKE) += pinctrl-jasperlake.o
obj-$(CONFIG_PINCTRL_LAKEFIELD) += pinctrl-lakefield.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
+obj-$(CONFIG_PINCTRL_METEORLAKE) += pinctrl-meteorlake.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 31f8f271628c..67db79f38051 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -603,7 +603,7 @@ static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc->groups[selector].name;
+ return vg->soc->groups[selector].grp.name;
}
static int byt_get_group_pins(struct pinctrl_dev *pctldev,
@@ -613,8 +613,8 @@ static int byt_get_group_pins(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *pins = vg->soc->groups[selector].pins;
- *num_pins = vg->soc->groups[selector].npins;
+ *pins = vg->soc->groups[selector].grp.pins;
+ *num_pins = vg->soc->groups[selector].grp.npins;
return 0;
}
@@ -662,15 +662,15 @@ static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < group.npins; i++) {
+ for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ padcfg0 = byt_gpio_reg(vg, group.grp.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
- group.name, i);
+ group.grp.name, i);
continue;
}
@@ -692,15 +692,15 @@ static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < group.npins; i++) {
+ for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ padcfg0 = byt_gpio_reg(vg, group.grp.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
- group.name, i);
+ group.grp.name, i);
continue;
}
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 26b2a425d201..5c4fd16e5b01 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -627,7 +627,7 @@ static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->groups[group].name;
+ return pctrl->soc->groups[group].grp.name;
}
static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -635,8 +635,8 @@ static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = pctrl->soc->groups[group].pins;
- *npins = pctrl->soc->groups[group].npins;
+ *pins = pctrl->soc->groups[group].grp.pins;
+ *npins = pctrl->soc->groups[group].grp.npins;
return 0;
}
@@ -721,16 +721,16 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&chv_lock, flags);
/* Check first that the pad is not locked */
- for (i = 0; i < grp->npins; i++) {
- if (chv_pad_locked(pctrl, grp->pins[i])) {
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (chv_pad_locked(pctrl, grp->grp.pins[i])) {
raw_spin_unlock_irqrestore(&chv_lock, flags);
- dev_warn(dev, "unable to set mode for locked pin %u\n", grp->pins[i]);
+ dev_warn(dev, "unable to set mode for locked pin %u\n", grp->grp.pins[i]);
return -EBUSY;
}
}
- for (i = 0; i < grp->npins; i++) {
- int pin = grp->pins[i];
+ for (i = 0; i < grp->grp.npins; i++) {
+ int pin = grp->grp.pins[i];
unsigned int mode;
bool invert_oe;
u32 value;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index ffc045f7bf00..52ecd66ce357 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -279,7 +279,7 @@ static const char *intel_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->groups[group].name;
+ return pctrl->soc->groups[group].grp.name;
}
static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -287,8 +287,8 @@ static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = pctrl->soc->groups[group].pins;
- *npins = pctrl->soc->groups[group].npins;
+ *pins = pctrl->soc->groups[group].grp.pins;
+ *npins = pctrl->soc->groups[group].grp.npins;
return 0;
}
@@ -391,19 +391,19 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
- for (i = 0; i < grp->npins; i++) {
- if (!intel_pad_usable(pctrl, grp->pins[i])) {
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (!intel_pad_usable(pctrl, grp->grp.pins[i])) {
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
}
/* Now enable the mux setting for each pin in the group */
- for (i = 0; i < grp->npins; i++) {
+ for (i = 0; i < grp->grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = intel_get_padcfg(pctrl, grp->pins[i], PADCFG0);
+ padcfg0 = intel_get_padcfg(pctrl, grp->grp.pins[i], PADCFG0);
value = readl(padcfg0);
value &= ~PADCFG0_PMODE_MASK;
@@ -1641,16 +1641,14 @@ EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev)
{
+ const struct intel_pinctrl_soc_data * const *table;
const struct intel_pinctrl_soc_data *data = NULL;
- const struct intel_pinctrl_soc_data **table;
- struct acpi_device *adev;
- unsigned int i;
- adev = ACPI_COMPANION(&pdev->dev);
- if (adev) {
- const void *match = device_get_match_data(&pdev->dev);
+ table = device_get_match_data(&pdev->dev);
+ if (table) {
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ unsigned int i;
- table = (const struct intel_pinctrl_soc_data **)match;
for (i = 0; table[i]; i++) {
if (!strcmp(adev->pnp.unique_id, table[i]->uid)) {
data = table[i];
@@ -1664,7 +1662,7 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
if (!id)
return ERR_PTR(-ENODEV);
- table = (const struct intel_pinctrl_soc_data **)id->driver_data;
+ table = (const struct intel_pinctrl_soc_data * const *)id->driver_data;
data = table[pdev->id];
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 710341bb67cc..65628423bf63 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -24,17 +24,12 @@ struct device;
/**
* struct intel_pingroup - Description about group of pins
- * @name: Name of the groups
- * @pins: All pins in this group
- * @npins: Number of pins in this groups
- * @mode: Native mode in which the group is muxed out @pins. Used if @modes
- * is %NULL.
+ * @grp: Generic data of the pin group (name and pins)
+ * @mode: Native mode in which the group is muxed out @pins. Used if @modes is %NULL.
* @modes: If not %NULL this will hold mode for each pin in @pins
*/
struct intel_pingroup {
- const char *name;
- const unsigned int *pins;
- size_t npins;
+ struct pingroup grp;
unsigned short mode;
const unsigned int *modes;
};
@@ -156,15 +151,11 @@ struct intel_community {
* a single integer or an array of integers in which case mode is per
* pin.
*/
-#define PIN_GROUP(n, p, m) \
- { \
- .name = (n), \
- .pins = (p), \
- .npins = ARRAY_SIZE((p)), \
- .mode = __builtin_choose_expr( \
- __builtin_constant_p((m)), (m), 0), \
- .modes = __builtin_choose_expr( \
- __builtin_constant_p((m)), NULL, (m)), \
+#define PIN_GROUP(n, p, m) \
+ { \
+ .grp = PINCTRL_PINGROUP((n), (p), ARRAY_SIZE((p))), \
+ .mode = __builtin_choose_expr(__builtin_constant_p((m)), (m), 0), \
+ .modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
#define FUNCTION(n, g) \
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 4fb39eb30902..5d1abee30f8f 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -282,7 +282,7 @@ static const char *lp_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- return lg->soc->groups[selector].name;
+ return lg->soc->groups[selector].grp.name;
}
static int lp_get_group_pins(struct pinctrl_dev *pctldev,
@@ -292,8 +292,8 @@ static int lp_get_group_pins(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- *pins = lg->soc->groups[selector].pins;
- *num_pins = lg->soc->groups[selector].npins;
+ *pins = lg->soc->groups[selector].grp.pins;
+ *num_pins = lg->soc->groups[selector].grp.npins;
return 0;
}
@@ -366,8 +366,8 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&lg->lock, flags);
/* Now enable the mux setting for each pin in the group */
- for (i = 0; i < grp->npins; i++) {
- void __iomem *reg = lp_gpio_reg(&lg->chip, grp->pins[i], LP_CONFIG1);
+ for (i = 0; i < grp->grp.npins; i++) {
+ void __iomem *reg = lp_gpio_reg(&lg->chip, grp->grp.pins[i], LP_CONFIG1);
u32 value;
value = ioread32(reg);
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 3ae141e0b421..5e752818adb4 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -520,7 +520,7 @@ static const char *mrfld_get_group_name(struct pinctrl_dev *pctldev,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- return mp->groups[group].name;
+ return mp->groups[group].grp.name;
}
static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -528,8 +528,8 @@ static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- *pins = mp->groups[group].pins;
- *npins = mp->groups[group].npins;
+ *pins = mp->groups[group].grp.pins;
+ *npins = mp->groups[group].grp.npins;
return 0;
}
@@ -604,15 +604,15 @@ static int mrfld_pinmux_set_mux(struct pinctrl_dev *pctldev,
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
- for (i = 0; i < grp->npins; i++) {
- if (!mrfld_buf_available(mp, grp->pins[i]))
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (!mrfld_buf_available(mp, grp->grp.pins[i]))
return -EBUSY;
}
/* Now enable the mux setting for each pin in the group */
raw_spin_lock_irqsave(&mp->lock, flags);
- for (i = 0; i < grp->npins; i++)
- mrfld_update_bufcfg(mp, grp->pins[i], bits, mask);
+ for (i = 0; i < grp->grp.npins; i++)
+ mrfld_update_bufcfg(mp, grp->grp.pins[i], bits, mask);
raw_spin_unlock_irqrestore(&mp->lock, flags);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
new file mode 100644
index 000000000000..9576dcd1cb29
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Meteor Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2022, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define MTL_PAD_OWN 0x0b0
+#define MTL_PADCFGLOCK 0x110
+#define MTL_HOSTSW_OWN 0x140
+#define MTL_GPI_IS 0x200
+#define MTL_GPI_IE 0x210
+
+#define MTL_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define MTL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = MTL_PAD_OWN, \
+ .padcfglock_offset = MTL_PADCFGLOCK, \
+ .hostown_offset = MTL_HOSTSW_OWN, \
+ .is_offset = MTL_GPI_IS, \
+ .ie_offset = MTL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Meteor Lake-P */
+static const struct pinctrl_pin_desc mtlp_pins[] = {
+ /* CPU */
+ PINCTRL_PIN(0, "PECI"),
+ PINCTRL_PIN(1, "UFS_RESET_B"),
+ PINCTRL_PIN(2, "VIDSOUT"),
+ PINCTRL_PIN(3, "VIDSCK"),
+ PINCTRL_PIN(4, "VIDALERT_B"),
+ /* GPP_V */
+ PINCTRL_PIN(5, "BATLOW_B"),
+ PINCTRL_PIN(6, "AC_PRESENT"),
+ PINCTRL_PIN(7, "SOC_WAKE_B"),
+ PINCTRL_PIN(8, "PWRBTN_B"),
+ PINCTRL_PIN(9, "SLP_S3_B"),
+ PINCTRL_PIN(10, "SLP_S4_B"),
+ PINCTRL_PIN(11, "SLP_A_B"),
+ PINCTRL_PIN(12, "GPP_V_7"),
+ PINCTRL_PIN(13, "SUSCLK"),
+ PINCTRL_PIN(14, "SLP_WLAN_B"),
+ PINCTRL_PIN(15, "SLP_S5_B"),
+ PINCTRL_PIN(16, "LANPHYPC"),
+ PINCTRL_PIN(17, "SLP_LAN_B"),
+ PINCTRL_PIN(18, "GPP_V_13"),
+ PINCTRL_PIN(19, "WAKE_B"),
+ PINCTRL_PIN(20, "GPP_V_15"),
+ PINCTRL_PIN(21, "GPP_V_16"),
+ PINCTRL_PIN(22, "GPP_V_17"),
+ PINCTRL_PIN(23, "GPP_V_18"),
+ PINCTRL_PIN(24, "CATERR_B"),
+ PINCTRL_PIN(25, "PROCHOT_B"),
+ PINCTRL_PIN(26, "THERMTRIP_B"),
+ PINCTRL_PIN(27, "DSI_DE_TE_2_GENLOCK_REF"),
+ PINCTRL_PIN(28, "DSI_DE_TE_1_DISP_UTILS"),
+ /* GPP_C */
+ PINCTRL_PIN(29, "SMBCLK"),
+ PINCTRL_PIN(30, "SMBDATA"),
+ PINCTRL_PIN(31, "SMBALERT_B"),
+ PINCTRL_PIN(32, "SML0CLK"),
+ PINCTRL_PIN(33, "SML0DATA"),
+ PINCTRL_PIN(34, "GPP_C_5"),
+ PINCTRL_PIN(35, "GPP_C_6"),
+ PINCTRL_PIN(36, "GPP_C_7"),
+ PINCTRL_PIN(37, "GPP_C_8"),
+ PINCTRL_PIN(38, "GPP_C_9"),
+ PINCTRL_PIN(39, "GPP_C_10"),
+ PINCTRL_PIN(40, "GPP_C_11"),
+ PINCTRL_PIN(41, "GPP_C_12"),
+ PINCTRL_PIN(42, "GPP_C_13"),
+ PINCTRL_PIN(43, "GPP_C_14"),
+ PINCTRL_PIN(44, "GPP_C_15"),
+ PINCTRL_PIN(45, "GPP_C_16"),
+ PINCTRL_PIN(46, "GPP_C_17"),
+ PINCTRL_PIN(47, "GPP_C_18"),
+ PINCTRL_PIN(48, "GPP_C_19"),
+ PINCTRL_PIN(49, "GPP_C_20"),
+ PINCTRL_PIN(50, "GPP_C_21"),
+ PINCTRL_PIN(51, "GPP_C_22"),
+ PINCTRL_PIN(52, "GPP_C_23"),
+ /* GPP_A */
+ PINCTRL_PIN(53, "ESPI_IO_0"),
+ PINCTRL_PIN(54, "ESPI_IO_1"),
+ PINCTRL_PIN(55, "ESPI_IO_2"),
+ PINCTRL_PIN(56, "ESPI_IO_3"),
+ PINCTRL_PIN(57, "ESPI_CS0_B"),
+ PINCTRL_PIN(58, "ESPI_CLK"),
+ PINCTRL_PIN(59, "ESPI_RESET_B"),
+ PINCTRL_PIN(60, "GPP_A_7"),
+ PINCTRL_PIN(61, "GPP_A_8"),
+ PINCTRL_PIN(62, "GPP_A_9"),
+ PINCTRL_PIN(63, "GPP_A_10"),
+ PINCTRL_PIN(64, "GPP_A_11"),
+ PINCTRL_PIN(65, "GPP_A_12"),
+ PINCTRL_PIN(66, "ESPI_CS1_B"),
+ PINCTRL_PIN(67, "ESPI_CS2_B"),
+ PINCTRL_PIN(68, "ESPI_CS3_B"),
+ PINCTRL_PIN(69, "ESPI_ALERT0_B"),
+ PINCTRL_PIN(70, "ESPI_ALERT1_B"),
+ PINCTRL_PIN(71, "ESPI_ALERT2_B"),
+ PINCTRL_PIN(72, "ESPI_ALERT3_B"),
+ PINCTRL_PIN(73, "GPP_A_20"),
+ PINCTRL_PIN(74, "GPP_A_21"),
+ PINCTRL_PIN(75, "GPP_A_22"),
+ PINCTRL_PIN(76, "GPP_A_23"),
+ PINCTRL_PIN(77, "ESPI_CLK_LOOPBK"),
+ /* GPP_E */
+ PINCTRL_PIN(78, "GPP_E_0"),
+ PINCTRL_PIN(79, "GPP_E_1"),
+ PINCTRL_PIN(80, "GPP_E_2"),
+ PINCTRL_PIN(81, "GPP_E_3"),
+ PINCTRL_PIN(82, "GPP_E_4"),
+ PINCTRL_PIN(83, "GPP_E_5"),
+ PINCTRL_PIN(84, "GPP_E_6"),
+ PINCTRL_PIN(85, "GPP_E_7"),
+ PINCTRL_PIN(86, "GPP_E_8"),
+ PINCTRL_PIN(87, "GPP_E_9"),
+ PINCTRL_PIN(88, "GPP_E_10"),
+ PINCTRL_PIN(89, "GPP_E_11"),
+ PINCTRL_PIN(90, "GPP_E_12"),
+ PINCTRL_PIN(91, "GPP_E_13"),
+ PINCTRL_PIN(92, "GPP_E_14"),
+ PINCTRL_PIN(93, "SLP_DRAM_B"),
+ PINCTRL_PIN(94, "GPP_E_16"),
+ PINCTRL_PIN(95, "GPP_E_17"),
+ PINCTRL_PIN(96, "GPP_E_18"),
+ PINCTRL_PIN(97, "GPP_E_19"),
+ PINCTRL_PIN(98, "GPP_E_20"),
+ PINCTRL_PIN(99, "GPP_E_21"),
+ PINCTRL_PIN(100, "DNX_FORCE_RELOAD"),
+ PINCTRL_PIN(101, "GPP_E_23"),
+ PINCTRL_PIN(102, "THC0_GSPI0_CLK_LOOPBK"),
+ /* GPP_H */
+ PINCTRL_PIN(103, "GPP_H_0"),
+ PINCTRL_PIN(104, "GPP_H_1"),
+ PINCTRL_PIN(105, "GPP_H_2"),
+ PINCTRL_PIN(106, "GPP_H_3"),
+ PINCTRL_PIN(107, "GPP_H_4"),
+ PINCTRL_PIN(108, "GPP_H_5"),
+ PINCTRL_PIN(109, "GPP_H_6"),
+ PINCTRL_PIN(110, "GPP_H_7"),
+ PINCTRL_PIN(111, "GPP_H_8"),
+ PINCTRL_PIN(112, "GPP_H_9"),
+ PINCTRL_PIN(113, "GPP_H_10"),
+ PINCTRL_PIN(114, "GPP_H_11"),
+ PINCTRL_PIN(115, "GPP_H_12"),
+ PINCTRL_PIN(116, "CPU_C10_GATE_B"),
+ PINCTRL_PIN(117, "GPP_H_14"),
+ PINCTRL_PIN(118, "GPP_H_15"),
+ PINCTRL_PIN(119, "GPP_H_16"),
+ PINCTRL_PIN(120, "GPP_H_17"),
+ PINCTRL_PIN(121, "GPP_H_18"),
+ PINCTRL_PIN(122, "GPP_H_19"),
+ PINCTRL_PIN(123, "GPP_H_20"),
+ PINCTRL_PIN(124, "GPP_H_21"),
+ PINCTRL_PIN(125, "GPP_H_22"),
+ PINCTRL_PIN(126, "GPP_H_23"),
+ PINCTRL_PIN(127, "LPI3C1_CLK_LOOPBK"),
+ PINCTRL_PIN(128, "I3C0_CLK_LOOPBK"),
+ /* GPP_F */
+ PINCTRL_PIN(129, "CNV_BRI_DT"),
+ PINCTRL_PIN(130, "CNV_BRI_RSP"),
+ PINCTRL_PIN(131, "CNV_RGI_DT"),
+ PINCTRL_PIN(132, "CNV_RGI_RSP"),
+ PINCTRL_PIN(133, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(134, "CRF_CLKREQ"),
+ PINCTRL_PIN(135, "GPP_F_6"),
+ PINCTRL_PIN(136, "FUSA_DIAGTEST_EN"),
+ PINCTRL_PIN(137, "FUSA_DIAGTEST_MODE"),
+ PINCTRL_PIN(138, "BOOTMPC"),
+ PINCTRL_PIN(139, "GPP_F_10"),
+ PINCTRL_PIN(140, "GPP_F_11"),
+ PINCTRL_PIN(141, "GSXDOUT"),
+ PINCTRL_PIN(142, "GSXSLOAD"),
+ PINCTRL_PIN(143, "GSXDIN"),
+ PINCTRL_PIN(144, "GSXSRESETB"),
+ PINCTRL_PIN(145, "GSXCLK"),
+ PINCTRL_PIN(146, "GMII_MDC_0"),
+ PINCTRL_PIN(147, "GMII_MDIO_0"),
+ PINCTRL_PIN(148, "GPP_F_19"),
+ PINCTRL_PIN(149, "GPP_F_20"),
+ PINCTRL_PIN(150, "GPP_F_21"),
+ PINCTRL_PIN(151, "GPP_F_22"),
+ PINCTRL_PIN(152, "GPP_F_23"),
+ PINCTRL_PIN(153, "THC1_GSPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(154, "GSPI0A_CLK_LOOPBK"),
+ /* SPI0 */
+ PINCTRL_PIN(155, "SPI0_IO_2"),
+ PINCTRL_PIN(156, "SPI0_IO_3"),
+ PINCTRL_PIN(157, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(158, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(159, "SPI0_TPM_CS_B"),
+ PINCTRL_PIN(160, "SPI0_FLASH_0_CS_B"),
+ PINCTRL_PIN(161, "SPI0_FLASH_1_CS_B"),
+ PINCTRL_PIN(162, "SPI0_CLK"),
+ PINCTRL_PIN(163, "L_BKLTEN"),
+ PINCTRL_PIN(164, "L_BKLTCTL"),
+ PINCTRL_PIN(165, "L_VDDEN"),
+ PINCTRL_PIN(166, "SYS_PWROK"),
+ PINCTRL_PIN(167, "SYS_RESET_B"),
+ PINCTRL_PIN(168, "MLK_RST_B"),
+ PINCTRL_PIN(169, "SPI0_CLK_LOOPBK"),
+ /* vGPIO_3 */
+ PINCTRL_PIN(170, "ESPI_USB_OCB_0"),
+ PINCTRL_PIN(171, "ESPI_USB_OCB_1"),
+ PINCTRL_PIN(172, "ESPI_USB_OCB_2"),
+ PINCTRL_PIN(173, "ESPI_USB_OCB_3"),
+ PINCTRL_PIN(174, "USB_CPU_OCB_0"),
+ PINCTRL_PIN(175, "USB_CPU_OCB_1"),
+ PINCTRL_PIN(176, "USB_CPU_OCB_2"),
+ PINCTRL_PIN(177, "USB_CPU_OCB_3"),
+ PINCTRL_PIN(178, "TS0_IN_INT"),
+ PINCTRL_PIN(179, "TS1_IN_INT"),
+ PINCTRL_PIN(180, "THC0_WOT_INT"),
+ PINCTRL_PIN(181, "THC1_WOT_INT"),
+ PINCTRL_PIN(182, "THC0_WHC_INT"),
+ PINCTRL_PIN(183, "THC1_WHC_INT"),
+ /* GPP_S */
+ PINCTRL_PIN(184, "GPP_S_0"),
+ PINCTRL_PIN(185, "GPP_S_1"),
+ PINCTRL_PIN(186, "GPP_S_2"),
+ PINCTRL_PIN(187, "GPP_S_3"),
+ PINCTRL_PIN(188, "GPP_S_4"),
+ PINCTRL_PIN(189, "GPP_S_5"),
+ PINCTRL_PIN(190, "GPP_S_6"),
+ PINCTRL_PIN(191, "GPP_S_7"),
+ /* JTAG */
+ PINCTRL_PIN(192, "JTAG_MBPB0"),
+ PINCTRL_PIN(193, "JTAG_MBPB1"),
+ PINCTRL_PIN(194, "JTAG_MBPB2"),
+ PINCTRL_PIN(195, "JTAG_MBPB3"),
+ PINCTRL_PIN(196, "JTAG_TDO"),
+ PINCTRL_PIN(197, "PRDY_B"),
+ PINCTRL_PIN(198, "PREQ_B"),
+ PINCTRL_PIN(199, "JTAG_TDI"),
+ PINCTRL_PIN(200, "JTAG_TMS"),
+ PINCTRL_PIN(201, "JTAG_TCK"),
+ PINCTRL_PIN(202, "DBG_PMODE"),
+ PINCTRL_PIN(203, "JTAG_TRST_B"),
+ /* GPP_B */
+ PINCTRL_PIN(204, "ADM_VID_0"),
+ PINCTRL_PIN(205, "ADM_VID_1"),
+ PINCTRL_PIN(206, "GPP_B_2"),
+ PINCTRL_PIN(207, "GPP_B_3"),
+ PINCTRL_PIN(208, "GPP_B_4"),
+ PINCTRL_PIN(209, "GPP_B_5"),
+ PINCTRL_PIN(210, "GPP_B_6"),
+ PINCTRL_PIN(211, "GPP_B_7"),
+ PINCTRL_PIN(212, "GPP_B_8"),
+ PINCTRL_PIN(213, "GPP_B_9"),
+ PINCTRL_PIN(214, "GPP_B_10"),
+ PINCTRL_PIN(215, "GPP_B_11"),
+ PINCTRL_PIN(216, "SLP_S0_B"),
+ PINCTRL_PIN(217, "PLTRST_B"),
+ PINCTRL_PIN(218, "GPP_B_14"),
+ PINCTRL_PIN(219, "GPP_B_15"),
+ PINCTRL_PIN(220, "GPP_B_16"),
+ PINCTRL_PIN(221, "GPP_B_17"),
+ PINCTRL_PIN(222, "GPP_B_18"),
+ PINCTRL_PIN(223, "GPP_B_19"),
+ PINCTRL_PIN(224, "GPP_B_20"),
+ PINCTRL_PIN(225, "GPP_B_21"),
+ PINCTRL_PIN(226, "GPP_B_22"),
+ PINCTRL_PIN(227, "GPP_B_23"),
+ PINCTRL_PIN(228, "ISH_I3C0_CLK_LOOPBK"),
+ /* GPP_D */
+ PINCTRL_PIN(229, "GPP_D_0"),
+ PINCTRL_PIN(230, "GPP_D_1"),
+ PINCTRL_PIN(231, "GPP_D_2"),
+ PINCTRL_PIN(232, "GPP_D_3"),
+ PINCTRL_PIN(233, "GPP_D_4"),
+ PINCTRL_PIN(234, "GPP_D_5"),
+ PINCTRL_PIN(235, "GPP_D_6"),
+ PINCTRL_PIN(236, "GPP_D_7"),
+ PINCTRL_PIN(237, "GPP_D_8"),
+ PINCTRL_PIN(238, "GPP_D_9"),
+ PINCTRL_PIN(239, "HDA_BCLK"),
+ PINCTRL_PIN(240, "HDA_SYNC"),
+ PINCTRL_PIN(241, "HDA_SDO"),
+ PINCTRL_PIN(242, "HDA_SDI_0"),
+ PINCTRL_PIN(243, "GPP_D_14"),
+ PINCTRL_PIN(244, "GPP_D_15"),
+ PINCTRL_PIN(245, "GPP_D_16"),
+ PINCTRL_PIN(246, "HDA_RST_B"),
+ PINCTRL_PIN(247, "GPP_D_18"),
+ PINCTRL_PIN(248, "GPP_D_19"),
+ PINCTRL_PIN(249, "GPP_D_20"),
+ PINCTRL_PIN(250, "UFS_REFCLK"),
+ PINCTRL_PIN(251, "BPKI3C_SDA"),
+ PINCTRL_PIN(252, "BPKI3C_SCL"),
+ PINCTRL_PIN(253, "BOOTHALT_B"),
+ /* vGPIO */
+ PINCTRL_PIN(254, "CNV_BTEN"),
+ PINCTRL_PIN(255, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(256, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(257, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(258, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(259, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(260, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(261, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(262, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(263, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(264, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(265, "vUART0_TXD"),
+ PINCTRL_PIN(266, "vUART0_RXD"),
+ PINCTRL_PIN(267, "vUART0_CTS_B"),
+ PINCTRL_PIN(268, "vUART0_RTS_B"),
+ PINCTRL_PIN(269, "vISH_UART0_TXD"),
+ PINCTRL_PIN(270, "vISH_UART0_RXD"),
+ PINCTRL_PIN(271, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(272, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(273, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(274, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(275, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(276, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(277, "vI2S2_SCLK"),
+ PINCTRL_PIN(278, "vI2S2_SFRM"),
+ PINCTRL_PIN(279, "vI2S2_TXD"),
+ PINCTRL_PIN(280, "vI2S2_RXD"),
+ PINCTRL_PIN(281, "vCNV_BT_I2S_BCLK_2"),
+ PINCTRL_PIN(282, "vCNV_BT_I2S_WS_SYNC_2"),
+ PINCTRL_PIN(283, "vCNV_BT_I2S_SDO_2"),
+ PINCTRL_PIN(284, "vCNV_BT_I2S_SDI_2"),
+ PINCTRL_PIN(285, "vI2S2_SCLK_2"),
+ PINCTRL_PIN(286, "vI2S2_SFRM_2"),
+ PINCTRL_PIN(287, "vI2S2_TXD_2"),
+ PINCTRL_PIN(288, "vI2S2_RXD_2"),
+};
+
+static const struct intel_padgroup mtlp_community0_gpps[] = {
+ MTL_GPP(0, 0, 4, 0), /* CPU */
+ MTL_GPP(1, 5, 28, 32), /* GPP_V */
+ MTL_GPP(2, 29, 52, 64), /* GPP_C */
+};
+
+static const struct intel_padgroup mtlp_community1_gpps[] = {
+ MTL_GPP(0, 53, 77, 96), /* GPP_A */
+ MTL_GPP(1, 78, 102, 128), /* GPP_E */
+};
+
+static const struct intel_padgroup mtlp_community3_gpps[] = {
+ MTL_GPP(0, 103, 128, 160), /* GPP_H */
+ MTL_GPP(1, 129, 154, 192), /* GPP_F */
+ MTL_GPP(2, 155, 169, 224), /* SPI0 */
+ MTL_GPP(3, 170, 183, 256), /* vGPIO_3 */
+};
+
+static const struct intel_padgroup mtlp_community4_gpps[] = {
+ MTL_GPP(0, 184, 191, 288), /* GPP_S */
+ MTL_GPP(1, 192, 203, 320), /* JTAG */
+};
+
+static const struct intel_padgroup mtlp_community5_gpps[] = {
+ MTL_GPP(0, 204, 228, 352), /* GPP_B */
+ MTL_GPP(1, 229, 253, 384), /* GPP_D */
+ MTL_GPP(2, 254, 285, 416), /* vGPIO_0 */
+ MTL_GPP(3, 286, 288, 448), /* vGPIO_1 */
+};
+
+static const struct intel_community mtlp_communities[] = {
+ MTL_COMMUNITY(0, 0, 52, mtlp_community0_gpps),
+ MTL_COMMUNITY(1, 53, 102, mtlp_community1_gpps),
+ MTL_COMMUNITY(2, 103, 183, mtlp_community3_gpps),
+ MTL_COMMUNITY(3, 184, 203, mtlp_community4_gpps),
+ MTL_COMMUNITY(4, 204, 288, mtlp_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data mtlp_soc_data = {
+ .pins = mtlp_pins,
+ .npins = ARRAY_SIZE(mtlp_pins),
+ .communities = mtlp_communities,
+ .ncommunities = ARRAY_SIZE(mtlp_communities),
+};
+
+static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
+ { "INTC1083", (kernel_ulong_t)&mtlp_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, mtl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(mtl_pinctrl_pm_ops);
+
+static struct platform_driver mtl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_hid,
+ .driver = {
+ .name = "meteorlake-pinctrl",
+ .acpi_match_table = mtl_pinctrl_acpi_match,
+ .pm = &mtl_pinctrl_pm_ops,
+ },
+};
+module_platform_driver(mtl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Meteor Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index acccde9262ba..78c02b7c81f0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1107,24 +1107,10 @@ static const struct mtk_pin_field_calc mt8192_pin_pupd_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0060, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0060, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0060, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 31, 1),
PIN_FIELD_BASE(152, 152, 7, 0x0090, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x0090, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x0090, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x0090, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 31, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 31, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0030, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0030, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0030, 0x10, 4, 1),
@@ -1137,12 +1123,6 @@ static const struct mtk_pin_field_calc mt8192_pin_pupd_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0030, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0030, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0030, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 31, 1),
};
static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
@@ -1164,24 +1144,10 @@ static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0080, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0080, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0080, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 0, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 12, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 10, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 22, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 8, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 20, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 6, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 18, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 4, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 16, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 2, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 14, 1),
PIN_FIELD_BASE(152, 152, 7, 0x00c0, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x00c0, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x00c0, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x00c0, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 0, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 2, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0040, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0040, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0040, 0x10, 4, 1),
@@ -1194,12 +1160,6 @@ static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0040, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0040, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0040, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 2, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 6, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 0, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 2, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 0, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 4, 1),
};
static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
@@ -1221,24 +1181,10 @@ static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0090, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0090, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0090, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 1, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 13, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 11, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 23, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 9, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 21, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 7, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 19, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 5, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 17, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 3, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 15, 1),
PIN_FIELD_BASE(152, 152, 7, 0x00d0, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x00d0, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x00d0, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x00d0, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 1, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 3, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0050, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0050, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0050, 0x10, 4, 1),
@@ -1251,83 +1197,169 @@ static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0050, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0050, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0050, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 3, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 7, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 1, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 3, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 1, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 5, 1),
};
-static const struct mtk_pin_field_calc mt8192_pin_e1e0en_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 0, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 18, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 15, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 3, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 12, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 0, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 9, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 27, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 6, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 24, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 3, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 21, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 0, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 3, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 3, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 9, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 0, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 3, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 0, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 6, 1),
-};
+static const struct mtk_pin_field_calc mt8192_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(89, 89, 2, 0x0040, 0x10, 0, 5),
+ PIN_FIELD_BASE(90, 90, 2, 0x0040, 0x10, 5, 5),
-static const struct mtk_pin_field_calc mt8192_pin_e0_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 1, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 19, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 16, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 4, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 13, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 1, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 10, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 28, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 7, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 25, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 4, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 22, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 1, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 4, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 4, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 10, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 1, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 4, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 1, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 7, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 18, 3),
+ PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 15, 3),
+ PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 27, 3),
+ PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 24, 3),
+ PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 21, 3),
+ PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 6, 3),
};
-static const struct mtk_pin_field_calc mt8192_pin_e1_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 2, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 20, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 17, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 5, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 14, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 2, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 11, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 29, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 8, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 26, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 5, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 23, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 2, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 5, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 5, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 11, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 2, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 5, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 2, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 8, 1),
+static const struct mtk_pin_field_calc mt8192_pin_rsel_range[] = {
+ PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 0, 2),
+ PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 12, 2),
+ PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 10, 2),
+ PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 22, 2),
+ PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 8, 2),
+ PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 20, 2),
+ PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 6, 2),
+ PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 18, 2),
+ PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 4, 2),
+ PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 2, 2),
+ PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 14, 2),
+ PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 0, 2),
+ PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 2, 2),
+ PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 2, 2),
+ PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 6, 2),
+ PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 0, 2),
+ PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 2, 2),
+ PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 0, 2),
+ PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 4, 2),
};
+static const unsigned int mt8192_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE,/*0*/ MTK_PULL_PU_PD_TYPE,/*1*/
+ MTK_PULL_PU_PD_TYPE,/*2*/ MTK_PULL_PU_PD_TYPE,/*3*/
+ MTK_PULL_PU_PD_TYPE,/*4*/ MTK_PULL_PU_PD_TYPE,/*5*/
+ MTK_PULL_PU_PD_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE,/*7*/
+ MTK_PULL_PU_PD_TYPE,/*8*/ MTK_PULL_PU_PD_TYPE,/*9*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*10*/ MTK_PULL_PUPD_R1R0_TYPE,/*11*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*12*/ MTK_PULL_PUPD_R1R0_TYPE,/*13*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*14*/ MTK_PULL_PUPD_R1R0_TYPE,/*15*/
+ MTK_PULL_PU_PD_TYPE,/*16*/ MTK_PULL_PU_PD_TYPE,/*17*/
+ MTK_PULL_PU_PD_TYPE,/*18*/ MTK_PULL_PU_PD_TYPE,/*19*/
+ MTK_PULL_PU_PD_TYPE,/*20*/ MTK_PULL_PU_PD_TYPE,/*21*/
+ MTK_PULL_PU_PD_TYPE,/*22*/ MTK_PULL_PU_PD_TYPE,/*23*/
+ MTK_PULL_PU_PD_TYPE,/*24*/ MTK_PULL_PU_PD_TYPE,/*25*/
+ MTK_PULL_PU_PD_TYPE,/*26*/ MTK_PULL_PU_PD_TYPE,/*27*/
+ MTK_PULL_PU_PD_TYPE,/*28*/ MTK_PULL_PU_PD_TYPE,/*29*/
+ MTK_PULL_PU_PD_TYPE,/*30*/ MTK_PULL_PU_PD_TYPE,/*31*/
+ MTK_PULL_PU_PD_TYPE,/*32*/ MTK_PULL_PU_PD_TYPE,/*33*/
+ MTK_PULL_PU_PD_TYPE,/*34*/ MTK_PULL_PU_PD_TYPE,/*35*/
+ MTK_PULL_PU_PD_TYPE,/*36*/ MTK_PULL_PU_PD_TYPE,/*37*/
+ MTK_PULL_PU_PD_TYPE,/*38*/ MTK_PULL_PU_PD_TYPE,/*39*/
+ MTK_PULL_PU_PD_TYPE,/*40*/ MTK_PULL_PU_PD_TYPE,/*41*/
+ MTK_PULL_PU_PD_TYPE,/*42*/ MTK_PULL_PU_PD_TYPE,/*43*/
+ MTK_PULL_PU_PD_TYPE,/*44*/ MTK_PULL_PUPD_R1R0_TYPE,/*45*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*46*/ MTK_PULL_PUPD_R1R0_TYPE,/*47*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*48*/ MTK_PULL_PUPD_R1R0_TYPE,/*49*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*50*/ MTK_PULL_PUPD_R1R0_TYPE,/*51*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*52*/ MTK_PULL_PUPD_R1R0_TYPE,/*53*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*54*/ MTK_PULL_PUPD_R1R0_TYPE,/*55*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*56*/ MTK_PULL_PU_PD_TYPE,/*57*/
+ MTK_PULL_PU_PD_TYPE,/*58*/ MTK_PULL_PU_PD_TYPE,/*59*/
+ MTK_PULL_PU_PD_TYPE,/*60*/ MTK_PULL_PU_PD_TYPE,/*61*/
+ MTK_PULL_PU_PD_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE,/*63*/
+ MTK_PULL_PU_PD_TYPE,/*64*/ MTK_PULL_PU_PD_TYPE,/*65*/
+ MTK_PULL_PU_PD_TYPE,/*66*/ MTK_PULL_PU_PD_TYPE,/*67*/
+ MTK_PULL_PU_PD_TYPE,/*68*/ MTK_PULL_PU_PD_TYPE,/*69*/
+ MTK_PULL_PU_PD_TYPE,/*70*/ MTK_PULL_PU_PD_TYPE,/*71*/
+ MTK_PULL_PU_PD_TYPE,/*72*/ MTK_PULL_PU_PD_TYPE,/*73*/
+ MTK_PULL_PU_PD_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE,/*75*/
+ MTK_PULL_PU_PD_TYPE,/*76*/ MTK_PULL_PU_PD_TYPE,/*77*/
+ MTK_PULL_PU_PD_TYPE,/*78*/ MTK_PULL_PU_PD_TYPE,/*79*/
+ MTK_PULL_PU_PD_TYPE,/*80*/ MTK_PULL_PU_PD_TYPE,/*81*/
+ MTK_PULL_PU_PD_TYPE,/*82*/ MTK_PULL_PU_PD_TYPE,/*83*/
+ MTK_PULL_PU_PD_TYPE,/*84*/ MTK_PULL_PU_PD_TYPE,/*85*/
+ MTK_PULL_PU_PD_TYPE,/*86*/ MTK_PULL_PU_PD_TYPE,/*87*/
+ MTK_PULL_PU_PD_TYPE,/*88*/ MTK_PULL_PU_PD_TYPE,/*89*/
+ MTK_PULL_PU_PD_TYPE,/*90*/ MTK_PULL_PU_PD_TYPE,/*91*/
+ MTK_PULL_PU_PD_TYPE,/*92*/ MTK_PULL_PU_PD_TYPE,/*93*/
+ MTK_PULL_PU_PD_TYPE,/*94*/ MTK_PULL_PU_PD_TYPE,/*95*/
+ MTK_PULL_PU_PD_TYPE,/*96*/ MTK_PULL_PU_PD_TYPE,/*97*/
+ MTK_PULL_PU_PD_TYPE,/*98*/ MTK_PULL_PU_PD_TYPE,/*99*/
+ MTK_PULL_PU_PD_TYPE,/*100*/ MTK_PULL_PU_PD_TYPE,/*101*/
+ MTK_PULL_PU_PD_TYPE,/*102*/ MTK_PULL_PU_PD_TYPE,/*103*/
+ MTK_PULL_PU_PD_TYPE,/*104*/ MTK_PULL_PU_PD_TYPE,/*105*/
+ MTK_PULL_PU_PD_TYPE,/*106*/ MTK_PULL_PU_PD_TYPE,/*107*/
+ MTK_PULL_PU_PD_TYPE,/*108*/ MTK_PULL_PU_PD_TYPE,/*109*/
+ MTK_PULL_PU_PD_TYPE,/*110*/ MTK_PULL_PU_PD_TYPE,/*111*/
+ MTK_PULL_PU_PD_TYPE,/*112*/ MTK_PULL_PU_PD_TYPE,/*113*/
+ MTK_PULL_PU_PD_TYPE,/*114*/ MTK_PULL_PU_PD_TYPE,/*115*/
+ MTK_PULL_PU_PD_TYPE,/*116*/ MTK_PULL_PU_PD_TYPE,/*117*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*118*/ MTK_PULL_PU_PD_RSEL_TYPE,/*119*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*120*/ MTK_PULL_PU_PD_RSEL_TYPE,/*121*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*122*/ MTK_PULL_PU_PD_RSEL_TYPE,/*123*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*124*/ MTK_PULL_PU_PD_RSEL_TYPE,/*125*/
+ MTK_PULL_PU_PD_TYPE,/*126*/ MTK_PULL_PU_PD_TYPE,/*127*/
+ MTK_PULL_PU_PD_TYPE,/*128*/ MTK_PULL_PU_PD_TYPE,/*129*/
+ MTK_PULL_PU_PD_TYPE,/*130*/ MTK_PULL_PU_PD_TYPE,/*131*/
+ MTK_PULL_PU_PD_TYPE,/*132*/ MTK_PULL_PU_PD_TYPE,/*133*/
+ MTK_PULL_PU_PD_TYPE,/*134*/ MTK_PULL_PU_PD_TYPE,/*135*/
+ MTK_PULL_PU_PD_TYPE,/*136*/ MTK_PULL_PU_PD_TYPE,/*137*/
+ MTK_PULL_PU_PD_TYPE,/*138*/ MTK_PULL_PU_PD_RSEL_TYPE,/*139*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*140*/ MTK_PULL_PU_PD_RSEL_TYPE,/*141*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*142*/ MTK_PULL_PU_PD_TYPE,/*143*/
+ MTK_PULL_PU_PD_TYPE,/*144*/ MTK_PULL_PU_PD_TYPE,/*145*/
+ MTK_PULL_PU_PD_TYPE,/*146*/ MTK_PULL_PU_PD_TYPE,/*147*/
+ MTK_PULL_PU_PD_TYPE,/*148*/ MTK_PULL_PU_PD_TYPE,/*149*/
+ MTK_PULL_PU_PD_TYPE,/*150*/ MTK_PULL_PU_PD_TYPE,/*151*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/
+ MTK_PULL_PU_PD_TYPE,/*156*/ MTK_PULL_PU_PD_TYPE,/*157*/
+ MTK_PULL_PU_PD_TYPE,/*158*/ MTK_PULL_PU_PD_TYPE,/*159*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*160*/ MTK_PULL_PU_PD_RSEL_TYPE,/*161*/
+ MTK_PULL_PU_PD_TYPE,/*162*/ MTK_PULL_PU_PD_TYPE,/*163*/
+ MTK_PULL_PU_PD_TYPE,/*164*/ MTK_PULL_PU_PD_TYPE,/*165*/
+ MTK_PULL_PU_PD_TYPE,/*166*/ MTK_PULL_PU_PD_TYPE,/*167*/
+ MTK_PULL_PU_PD_TYPE,/*168*/ MTK_PULL_PU_PD_TYPE,/*169*/
+ MTK_PULL_PU_PD_TYPE,/*170*/ MTK_PULL_PU_PD_TYPE,/*171*/
+ MTK_PULL_PU_PD_TYPE,/*172*/ MTK_PULL_PU_PD_TYPE,/*173*/
+ MTK_PULL_PU_PD_TYPE,/*174*/ MTK_PULL_PU_PD_TYPE,/*175*/
+ MTK_PULL_PU_PD_TYPE,/*176*/ MTK_PULL_PU_PD_TYPE,/*177*/
+ MTK_PULL_PU_PD_TYPE,/*178*/ MTK_PULL_PU_PD_TYPE,/*179*/
+ MTK_PULL_PU_PD_TYPE,/*180*/ MTK_PULL_PU_PD_TYPE,/*181*/
+ MTK_PULL_PU_PD_TYPE,/*182*/ MTK_PULL_PUPD_R1R0_TYPE,/*183*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*184*/ MTK_PULL_PUPD_R1R0_TYPE,/*185*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*186*/ MTK_PULL_PUPD_R1R0_TYPE,/*187*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*188*/ MTK_PULL_PUPD_R1R0_TYPE,/*189*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*190*/ MTK_PULL_PUPD_R1R0_TYPE,/*191*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*192*/ MTK_PULL_PUPD_R1R0_TYPE,/*193*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*194*/ MTK_PULL_PU_PD_TYPE,/*195*/
+ MTK_PULL_PU_PD_TYPE,/*196*/ MTK_PULL_PU_PD_TYPE,/*197*/
+ MTK_PULL_PU_PD_TYPE,/*198*/ MTK_PULL_PU_PD_TYPE,/*199*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*200*/ MTK_PULL_PU_PD_RSEL_TYPE,/*201*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*202*/ MTK_PULL_PU_PD_RSEL_TYPE,/*203*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*204*/ MTK_PULL_PU_PD_RSEL_TYPE,/*205*/
+ MTK_PULL_PU_PD_TYPE,/*206*/ MTK_PULL_PU_PD_TYPE,/*207*/
+ MTK_PULL_PU_PD_TYPE,/*208*/ MTK_PULL_PU_PD_TYPE,/*209*/
+ MTK_PULL_PU_PD_TYPE,/*210*/ MTK_PULL_PU_PD_TYPE,/*211*/
+ MTK_PULL_PU_PD_TYPE,/*212*/ MTK_PULL_PU_PD_TYPE,/*213*/
+ MTK_PULL_PU_PD_TYPE,/*214*/ MTK_PULL_PU_PD_TYPE,/*215*/
+ MTK_PULL_PU_PD_TYPE,/*216*/ MTK_PULL_PU_PD_TYPE,/*217*/
+ MTK_PULL_PU_PD_TYPE,/*218*/ MTK_PULL_PU_PD_TYPE,/*219*/
+};
static const char * const mt8192_pinctrl_register_base_names[] = {
"iocfg0", "iocfg_rm", "iocfg_bm", "iocfg_bl", "iocfg_br",
@@ -1355,9 +1387,8 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8192_pin_pupd_range),
[PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8192_pin_r0_range),
[PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8192_pin_r1_range),
- [PINCTRL_PIN_REG_DRV_EN] = MTK_RANGE(mt8192_pin_e1e0en_range),
- [PINCTRL_PIN_REG_DRV_E0] = MTK_RANGE(mt8192_pin_e0_range),
- [PINCTRL_PIN_REG_DRV_E1] = MTK_RANGE(mt8192_pin_e1_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8192_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8192_pin_rsel_range),
};
static const struct mtk_pin_soc mt8192_data = {
@@ -1367,17 +1398,16 @@ static const struct mtk_pin_soc mt8192_data = {
.ngrps = ARRAY_SIZE(mtk_pins_mt8192),
.base_names = mt8192_pinctrl_register_base_names,
.nbase_names = ARRAY_SIZE(mt8192_pinctrl_register_base_names),
+ .pull_type = mt8192_pull_type,
.eint_hw = &mt8192_eint_hw,
.nfuncs = 8,
.gpio_m = 0,
.bias_set_combo = mtk_pinconf_bias_set_combo,
.bias_get_combo = mtk_pinconf_bias_get_combo,
- .drive_set = mtk_pinconf_drive_set_raw,
- .drive_get = mtk_pinconf_drive_get_raw,
- .adv_pull_get = mtk_pinconf_adv_pull_get,
- .adv_pull_set = mtk_pinconf_adv_pull_set,
- .adv_drive_get = mtk_pinconf_adv_drive_get,
- .adv_drive_set = mtk_pinconf_adv_drive_set,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
};
static const struct of_device_id mt8192_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index a140b6bfbfaa..bcde042d29dc 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -102,7 +102,7 @@ struct armada_37xx_pinctrl {
struct device *dev;
struct gpio_chip gpio_chip;
struct irq_chip irq_chip;
- spinlock_t irq_lock;
+ raw_spinlock_t irq_lock;
struct pinctrl_desc pctl;
struct pinctrl_dev *pctl_dev;
struct armada_37xx_pin_group *groups;
@@ -523,9 +523,9 @@ static void armada_37xx_irq_ack(struct irq_data *d)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
writel(d->mask, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
static void armada_37xx_irq_mask(struct irq_data *d)
@@ -536,10 +536,10 @@ static void armada_37xx_irq_mask(struct irq_data *d)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg);
writel(val & ~d->mask, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
static void armada_37xx_irq_unmask(struct irq_data *d)
@@ -550,10 +550,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg);
writel(val | d->mask, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -564,14 +564,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg);
if (on)
val |= (BIT(d->hwirq % GPIO_PER_REG));
else
val &= ~(BIT(d->hwirq % GPIO_PER_REG));
writel(val, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0;
}
@@ -583,7 +583,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
u32 val, reg = IRQ_POL;
unsigned long flags;
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
armada_37xx_irq_update_reg(&reg, d);
val = readl(info->base + reg);
switch (type) {
@@ -607,11 +607,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
break;
}
default:
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return -EINVAL;
}
writel(val, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0;
}
@@ -626,7 +626,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
p = readl(info->base + IRQ_POL + 4 * reg_idx);
if ((p ^ l) & (1 << bit_num)) {
/*
@@ -647,7 +647,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
ret = -1;
}
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return ret;
}
@@ -664,11 +664,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
u32 status;
unsigned long flags;
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base + IRQ_STATUS + 4 * i);
/* Manage only the interrupt that was enabled */
status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
while (status) {
u32 hwirq = ffs(status) - 1;
u32 virq = irq_find_mapping(d, hwirq +
@@ -695,12 +695,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
update_status:
/* Update status in case a new IRQ appears */
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base +
IRQ_STATUS + 4 * i);
/* Manage only the interrupt that was enabled */
status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
}
chained_irq_exit(chip, desc);
@@ -731,7 +731,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct device *dev = &pdev->dev;
unsigned int i, nr_irq_parent;
- spin_lock_init(&info->irq_lock);
+ raw_spin_lock_init(&info->irq_lock);
nr_irq_parent = of_irq_count(np);
if (!nr_irq_parent) {
@@ -1107,25 +1107,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
{ },
};
+static const struct regmap_config armada_37xx_pinctrl_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .use_raw_spinlock = true,
+};
+
static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
{
struct armada_37xx_pinctrl *info;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct regmap *regmap;
+ void __iomem *base;
int ret;
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to ioremap base address: %pe\n", base);
+ return PTR_ERR(base);
+ }
+
+ regmap = devm_regmap_init_mmio(dev, base,
+ &armada_37xx_pinctrl_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to create regmap: %pe\n", regmap);
+ return PTR_ERR(regmap);
+ }
+
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
-
- regmap = syscon_node_to_regmap(np);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n");
info->regmap = regmap;
-
info->data = of_device_get_match_data(dev);
ret = armada_37xx_pinctrl_register(pdev, info);
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index a1f93859e7ca..8ef0a97d2bf5 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -96,10 +96,12 @@ static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_name(
struct mvebu_pinctrl *pctl, const char *name)
{
unsigned n;
+
for (n = 0; n < pctl->num_groups; n++) {
if (strcmp(name, pctl->groups[n].name) == 0)
return &pctl->groups[n];
}
+
return NULL;
}
@@ -108,6 +110,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val(
unsigned long config)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (config == grp->settings[n].val) {
if (!pctl->variant || (pctl->variant &
@@ -115,6 +118,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -123,6 +127,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name(
const char *name)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (strcmp(name, grp->settings[n].name) == 0) {
if (!pctl->variant || (pctl->variant &
@@ -130,6 +135,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -137,6 +143,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting(
struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (grp->settings[n].flags &
(MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
@@ -145,6 +152,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -152,10 +160,12 @@ static struct mvebu_pinctrl_function *mvebu_pinctrl_find_function_by_name(
struct mvebu_pinctrl *pctl, const char *name)
{
unsigned n;
+
for (n = 0; n < pctl->num_functions; n++) {
if (strcmp(name, pctl->functions[n].name) == 0)
return &pctl->functions[n];
}
+
return NULL;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 640e50d94f27..f5014d09d81a 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1421,8 +1421,10 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
has_config = nmk_pinctrl_dt_get_config(np, &configs);
np_config = of_parse_phandle(np, "ste,config", 0);
- if (np_config)
+ if (np_config) {
has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
+ of_node_put(np_config);
+ }
if (has_config) {
const char *gpio_name;
const char *pin;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 0645c2c24f50..4691a33bc374 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -6,8 +6,6 @@
* Authors: Ken Xue <Ken.Xue@amd.com>
* Wu, Jeff <Jeff.Wu@amd.com>
*
- * Contact Information: Nehal Shah <Nehal-bakulchandra.Shah@amd.com>
- * Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
*/
#include <linux/err.h>
@@ -31,6 +29,7 @@
#include <linux/bitops.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
#include "core.h"
#include "pinctrl-utils.h"
@@ -203,8 +202,6 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
bool tmr_out_unit;
- unsigned int time;
- unsigned int unit;
bool tmr_large;
char *level_trig;
@@ -218,13 +215,13 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *pull_up_sel;
char *pull_up_enable;
char *pull_down_enable;
- char *output_value;
- char *output_enable;
+ char *orientation;
char debounce_value[40];
char *debounce_enable;
for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
- seq_printf(s, "GPIO bank%d\t", bank);
+ unsigned int time = 0;
+ unsigned int unit = 0;
switch (bank) {
case 0:
@@ -247,8 +244,9 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
/* Illegal bank number, ignore */
continue;
}
+ seq_printf(s, "GPIO bank%d\n", bank);
for (; i < pin_num; i++) {
- seq_printf(s, "pin%d\t", i);
+ seq_printf(s, "📌%d\t", i);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -256,84 +254,91 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
u8 level = (pin_reg >> ACTIVE_LEVEL_OFF) &
ACTIVE_LEVEL_MASK;
- interrupt_enable = "interrupt is enabled|";
+ interrupt_enable = "+";
if (level == ACTIVE_LEVEL_HIGH)
- active_level = "Active high|";
+ active_level = "↑";
else if (level == ACTIVE_LEVEL_LOW)
- active_level = "Active low|";
+ active_level = "↓";
else if (!(pin_reg & BIT(LEVEL_TRIG_OFF)) &&
level == ACTIVE_LEVEL_BOTH)
- active_level = "Active on both|";
+ active_level = "b";
else
- active_level = "Unknown Active level|";
+ active_level = "?";
if (pin_reg & BIT(LEVEL_TRIG_OFF))
- level_trig = "Level trigger|";
+ level_trig = "level";
else
- level_trig = "Edge trigger|";
+ level_trig = " edge";
} else {
- interrupt_enable =
- "interrupt is disabled|";
- active_level = " ";
- level_trig = " ";
+ interrupt_enable = "∅";
+ active_level = "∅";
+ level_trig = " ∅";
}
if (pin_reg & BIT(INTERRUPT_MASK_OFF))
- interrupt_mask =
- "interrupt is unmasked|";
+ interrupt_mask = "-";
else
- interrupt_mask =
- "interrupt is masked|";
+ interrupt_mask = "+";
+ seq_printf(s, "int %s (🎭 %s)| active-%s| %s-🔫| ",
+ interrupt_enable,
+ interrupt_mask,
+ active_level,
+ level_trig);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S0I3))
- wake_cntrl0 = "enable wakeup in S0i3 state|";
+ wake_cntrl0 = "+";
else
- wake_cntrl0 = "disable wakeup in S0i3 state|";
+ wake_cntrl0 = "∅";
+ seq_printf(s, "S0i3 🌅 %s| ", wake_cntrl0);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S3))
- wake_cntrl1 = "enable wakeup in S3 state|";
+ wake_cntrl1 = "+";
else
- wake_cntrl1 = "disable wakeup in S3 state|";
+ wake_cntrl1 = "∅";
+ seq_printf(s, "S3 🌅 %s| ", wake_cntrl1);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S4))
- wake_cntrl2 = "enable wakeup in S4/S5 state|";
+ wake_cntrl2 = "+";
else
- wake_cntrl2 = "disable wakeup in S4/S5 state|";
+ wake_cntrl2 = "∅";
+ seq_printf(s, "S4/S5 🌅 %s| ", wake_cntrl2);
if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
- pull_up_enable = "pull-up is enabled|";
+ pull_up_enable = "+";
if (pin_reg & BIT(PULL_UP_SEL_OFF))
- pull_up_sel = "8k pull-up|";
+ pull_up_sel = "8k";
else
- pull_up_sel = "4k pull-up|";
+ pull_up_sel = "4k";
} else {
- pull_up_enable = "pull-up is disabled|";
- pull_up_sel = " ";
+ pull_up_enable = "∅";
+ pull_up_sel = " ";
}
+ seq_printf(s, "pull-↑ %s (%s)| ",
+ pull_up_enable,
+ pull_up_sel);
if (pin_reg & BIT(PULL_DOWN_ENABLE_OFF))
- pull_down_enable = "pull-down is enabled|";
+ pull_down_enable = "+";
else
- pull_down_enable = "Pull-down is disabled|";
+ pull_down_enable = "∅";
+ seq_printf(s, "pull-↓ %s| ", pull_down_enable);
if (pin_reg & BIT(OUTPUT_ENABLE_OFF)) {
- pin_sts = " ";
- output_enable = "output is enabled|";
+ pin_sts = "output";
if (pin_reg & BIT(OUTPUT_VALUE_OFF))
- output_value = "output is high|";
+ orientation = "↑";
else
- output_value = "output is low|";
+ orientation = "↓";
} else {
- output_enable = "output is disabled|";
- output_value = " ";
-
+ pin_sts = "input ";
if (pin_reg & BIT(PIN_STS_OFF))
- pin_sts = "input is high|";
+ orientation = "↑";
else
- pin_sts = "input is low|";
+ orientation = "↓";
}
+ seq_printf(s, "%s %s| ", pin_sts, orientation);
db_cntrl = (DB_CNTRl_MASK << DB_CNTRL_OFF) & pin_reg;
if (db_cntrl) {
@@ -352,27 +357,18 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
unit = 61;
}
if ((DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF) == db_cntrl)
- debounce_enable = "debouncing filter (high and low) enabled|";
+ debounce_enable = "b +";
else if ((DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF) == db_cntrl)
- debounce_enable = "debouncing filter (low) enabled|";
+ debounce_enable = "↓ +";
else
- debounce_enable = "debouncing filter (high) enabled|";
+ debounce_enable = "↑ +";
- snprintf(debounce_value, sizeof(debounce_value),
- "debouncing timeout is %u (us)|", time * unit);
} else {
- debounce_enable = "debouncing filter disabled|";
- snprintf(debounce_value, sizeof(debounce_value), " ");
+ debounce_enable = " ∅";
}
-
- seq_printf(s, "%s %s %s %s %s %s\n"
- " %s %s %s %s %s %s %s %s %s 0x%x\n",
- level_trig, active_level, interrupt_enable,
- interrupt_mask, wake_cntrl0, wake_cntrl1,
- wake_cntrl2, pin_sts, pull_up_sel,
- pull_up_enable, pull_down_enable,
- output_value, output_enable,
- debounce_enable, debounce_value, pin_reg);
+ snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
+ seq_printf(s, "debounce %s (⏰ %sus)| ", debounce_enable, debounce_value);
+ seq_printf(s, " 0x%x\n", pin_reg);
}
}
}
@@ -917,6 +913,7 @@ static int amd_gpio_suspend(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -925,7 +922,9 @@ static int amd_gpio_suspend(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -935,6 +934,7 @@ static int amd_gpio_resume(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -943,7 +943,10 @@ static int amd_gpio_resume(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -955,14 +958,115 @@ static const struct dev_pm_ops amd_gpio_pm_ops = {
};
#endif
+static int amd_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(pmx_functions);
+}
+
+static const char *amd_get_fname(struct pinctrl_dev *pctrldev, unsigned int selector)
+{
+ return pmx_functions[selector].name;
+}
+
+static int amd_get_groups(struct pinctrl_dev *pctrldev, unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctrldev);
+
+ if (!gpio_dev->iomux_base) {
+ dev_err(&gpio_dev->pdev->dev, "iomux function %d group not supported\n", selector);
+ return -EINVAL;
+ }
+
+ *groups = pmx_functions[selector].groups;
+ *num_groups = pmx_functions[selector].ngroups;
+ return 0;
+}
+
+static int amd_set_mux(struct pinctrl_dev *pctrldev, unsigned int function, unsigned int group)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctrldev);
+ struct device *dev = &gpio_dev->pdev->dev;
+ struct pin_desc *pd;
+ int ind, index;
+
+ if (!gpio_dev->iomux_base)
+ return -EINVAL;
+
+ for (index = 0; index < NSELECTS; index++) {
+ if (strcmp(gpio_dev->groups[group].name, pmx_functions[function].groups[index]))
+ continue;
+
+ if (readb(gpio_dev->iomux_base + pmx_functions[function].index) ==
+ FUNCTION_INVALID) {
+ dev_err(dev, "IOMUX_GPIO 0x%x not present or supported\n",
+ pmx_functions[function].index);
+ return -EINVAL;
+ }
+
+ writeb(index, gpio_dev->iomux_base + pmx_functions[function].index);
+
+ if (index != (readb(gpio_dev->iomux_base + pmx_functions[function].index) &
+ FUNCTION_MASK)) {
+ dev_err(dev, "IOMUX_GPIO 0x%x not present or supported\n",
+ pmx_functions[function].index);
+ return -EINVAL;
+ }
+
+ for (ind = 0; ind < gpio_dev->groups[group].npins; ind++) {
+ if (strncmp(gpio_dev->groups[group].name, "IMX_F", strlen("IMX_F")))
+ continue;
+
+ pd = pin_desc_get(gpio_dev->pctrl, gpio_dev->groups[group].pins[ind]);
+ pd->mux_owner = gpio_dev->groups[group].name;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops amd_pmxops = {
+ .get_functions_count = amd_get_functions_count,
+ .get_function_name = amd_get_fname,
+ .get_function_groups = amd_get_groups,
+ .set_mux = amd_set_mux,
+};
+
static struct pinctrl_desc amd_pinctrl_desc = {
.pins = kerncz_pins,
.npins = ARRAY_SIZE(kerncz_pins),
.pctlops = &amd_pinctrl_ops,
+ .pmxops = &amd_pmxops,
.confops = &amd_pinconf_ops,
.owner = THIS_MODULE,
};
+static void amd_get_iomux_res(struct amd_gpio *gpio_dev)
+{
+ struct pinctrl_desc *desc = &amd_pinctrl_desc;
+ struct device *dev = &gpio_dev->pdev->dev;
+ int index;
+
+ index = device_property_match_string(dev, "pinctrl-resource-names", "iomux");
+ if (index < 0) {
+ dev_warn(dev, "failed to get iomux index\n");
+ goto out_no_pinmux;
+ }
+
+ gpio_dev->iomux_base = devm_platform_ioremap_resource(gpio_dev->pdev, index);
+ if (IS_ERR(gpio_dev->iomux_base)) {
+ dev_warn(dev, "Failed to get iomux %d io resource\n", index);
+ goto out_no_pinmux;
+ }
+
+ return;
+
+out_no_pinmux:
+ desc->pmxops = NULL;
+}
+
static int amd_gpio_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -977,17 +1081,12 @@ static int amd_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&gpio_dev->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
+ gpio_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(gpio_dev->base)) {
dev_err(&pdev->dev, "Failed to get gpio io resource.\n");
- return -EINVAL;
+ return PTR_ERR(gpio_dev->base);
}
- gpio_dev->base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!gpio_dev->base)
- return -ENOMEM;
-
gpio_dev->irq = platform_get_irq(pdev, 0);
if (gpio_dev->irq < 0)
return gpio_dev->irq;
@@ -1020,6 +1119,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->ngroups = ARRAY_SIZE(kerncz_groups);
amd_pinctrl_desc.name = dev_name(&pdev->dev);
+ amd_get_iomux_res(gpio_dev);
gpio_dev->pctrl = devm_pinctrl_register(&pdev->dev, &amd_pinctrl_desc,
gpio_dev);
if (IS_ERR(gpio_dev->pctrl)) {
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 1d4317073654..c8635998465d 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -74,23 +74,24 @@
#define CLR_INTR_STAT 0x1UL
-struct amd_pingroup {
- const char *name;
- const unsigned *pins;
- unsigned npins;
-};
+#define NSELECTS 0x4
+
+#define FUNCTION_MASK GENMASK(1, 0)
+#define FUNCTION_INVALID GENMASK(7, 0)
struct amd_function {
const char *name;
- const char * const *groups;
+ const char * const groups[NSELECTS];
unsigned ngroups;
+ int index;
};
struct amd_gpio {
raw_spinlock_t lock;
void __iomem *base;
+ void __iomem *iomux_base;
- const struct amd_pingroup *groups;
+ const struct pingroup *groups;
u32 ngroups;
struct pinctrl_dev *pctrl;
struct gpio_chip gc;
@@ -288,45 +289,1332 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(183, "GPIO_183"),
};
-static const unsigned i2c0_pins[] = {145, 146};
-static const unsigned i2c1_pins[] = {147, 148};
-static const unsigned i2c2_pins[] = {113, 114};
-static const unsigned i2c3_pins[] = {19, 20};
+#define AMD_PINS(...) (const unsigned int []){__VA_ARGS__}
+
+enum amd_functions {
+ IMX_F0_GPIO0,
+ IMX_F1_GPIO0,
+ IMX_F2_GPIO0,
+ IMX_F3_GPIO0,
+ IMX_F0_GPIO1,
+ IMX_F1_GPIO1,
+ IMX_F2_GPIO1,
+ IMX_F3_GPIO1,
+ IMX_F0_GPIO2,
+ IMX_F1_GPIO2,
+ IMX_F2_GPIO2,
+ IMX_F3_GPIO2,
+ IMX_F0_GPIO3,
+ IMX_F1_GPIO3,
+ IMX_F2_GPIO3,
+ IMX_F3_GPIO3,
+ IMX_F0_GPIO4,
+ IMX_F1_GPIO4,
+ IMX_F2_GPIO4,
+ IMX_F3_GPIO4,
+ IMX_F0_GPIO5,
+ IMX_F1_GPIO5,
+ IMX_F2_GPIO5,
+ IMX_F3_GPIO5,
+ IMX_F0_GPIO6,
+ IMX_F1_GPIO6,
+ IMX_F2_GPIO6,
+ IMX_F3_GPIO6,
+ IMX_F0_GPIO7,
+ IMX_F1_GPIO7,
+ IMX_F2_GPIO7,
+ IMX_F3_GPIO7,
+ IMX_F0_GPIO8,
+ IMX_F1_GPIO8,
+ IMX_F2_GPIO8,
+ IMX_F3_GPIO8,
+ IMX_F0_GPIO9,
+ IMX_F1_GPIO9,
+ IMX_F2_GPIO9,
+ IMX_F3_GPIO9,
+ IMX_F0_GPIO10,
+ IMX_F1_GPIO10,
+ IMX_F2_GPIO10,
+ IMX_F3_GPIO10,
+ IMX_F0_GPIO11,
+ IMX_F1_GPIO11,
+ IMX_F2_GPIO11,
+ IMX_F3_GPIO11,
+ IMX_F0_GPIO12,
+ IMX_F1_GPIO12,
+ IMX_F2_GPIO12,
+ IMX_F3_GPIO12,
+ IMX_F0_GPIO13,
+ IMX_F1_GPIO13,
+ IMX_F2_GPIO13,
+ IMX_F3_GPIO13,
+ IMX_F0_GPIO14,
+ IMX_F1_GPIO14,
+ IMX_F2_GPIO14,
+ IMX_F3_GPIO14,
+ IMX_F0_GPIO15,
+ IMX_F1_GPIO15,
+ IMX_F2_GPIO15,
+ IMX_F3_GPIO15,
+ IMX_F0_GPIO16,
+ IMX_F1_GPIO16,
+ IMX_F2_GPIO16,
+ IMX_F3_GPIO16,
+ IMX_F0_GPIO17,
+ IMX_F1_GPIO17,
+ IMX_F2_GPIO17,
+ IMX_F3_GPIO17,
+ IMX_F0_GPIO18,
+ IMX_F1_GPIO18,
+ IMX_F2_GPIO18,
+ IMX_F3_GPIO18,
+ IMX_F0_GPIO19,
+ IMX_F1_GPIO19,
+ IMX_F2_GPIO19,
+ IMX_F3_GPIO19,
+ IMX_F0_GPIO20,
+ IMX_F1_GPIO20,
+ IMX_F2_GPIO20,
+ IMX_F3_GPIO20,
+ IMX_F0_GPIO21,
+ IMX_F1_GPIO21,
+ IMX_F2_GPIO21,
+ IMX_F3_GPIO21,
+ IMX_F0_GPIO22,
+ IMX_F1_GPIO22,
+ IMX_F2_GPIO22,
+ IMX_F3_GPIO22,
+ IMX_F0_GPIO23,
+ IMX_F1_GPIO23,
+ IMX_F2_GPIO23,
+ IMX_F3_GPIO23,
+ IMX_F0_GPIO24,
+ IMX_F1_GPIO24,
+ IMX_F2_GPIO24,
+ IMX_F3_GPIO24,
+ IMX_F0_GPIO25,
+ IMX_F1_GPIO25,
+ IMX_F2_GPIO25,
+ IMX_F3_GPIO25,
+ IMX_F0_GPIO26,
+ IMX_F1_GPIO26,
+ IMX_F2_GPIO26,
+ IMX_F3_GPIO26,
+ IMX_F0_GPIO27,
+ IMX_F1_GPIO27,
+ IMX_F2_GPIO27,
+ IMX_F3_GPIO27,
+ IMX_F0_GPIO28,
+ IMX_F1_GPIO28,
+ IMX_F2_GPIO28,
+ IMX_F3_GPIO28,
+ IMX_F0_GPIO29,
+ IMX_F1_GPIO29,
+ IMX_F2_GPIO29,
+ IMX_F3_GPIO29,
+ IMX_F0_GPIO30,
+ IMX_F1_GPIO30,
+ IMX_F2_GPIO30,
+ IMX_F3_GPIO30,
+ IMX_F0_GPIO31,
+ IMX_F1_GPIO31,
+ IMX_F2_GPIO31,
+ IMX_F3_GPIO31,
+ IMX_F0_GPIO32,
+ IMX_F1_GPIO32,
+ IMX_F2_GPIO32,
+ IMX_F3_GPIO32,
+ IMX_F0_GPIO33,
+ IMX_F1_GPIO33,
+ IMX_F2_GPIO33,
+ IMX_F3_GPIO33,
+ IMX_F0_GPIO34,
+ IMX_F1_GPIO34,
+ IMX_F2_GPIO34,
+ IMX_F3_GPIO34,
+ IMX_F0_GPIO35,
+ IMX_F1_GPIO35,
+ IMX_F2_GPIO35,
+ IMX_F3_GPIO35,
+ IMX_F0_GPIO36,
+ IMX_F1_GPIO36,
+ IMX_F2_GPIO36,
+ IMX_F3_GPIO36,
+ IMX_F0_GPIO37,
+ IMX_F1_GPIO37,
+ IMX_F2_GPIO37,
+ IMX_F3_GPIO37,
+ IMX_F0_GPIO38,
+ IMX_F1_GPIO38,
+ IMX_F2_GPIO38,
+ IMX_F3_GPIO38,
+ IMX_F0_GPIO39,
+ IMX_F1_GPIO39,
+ IMX_F2_GPIO39,
+ IMX_F3_GPIO39,
+ IMX_F0_GPIO40,
+ IMX_F1_GPIO40,
+ IMX_F2_GPIO40,
+ IMX_F3_GPIO40,
+ IMX_F0_GPIO41,
+ IMX_F1_GPIO41,
+ IMX_F2_GPIO41,
+ IMX_F3_GPIO41,
+ IMX_F0_GPIO42,
+ IMX_F1_GPIO42,
+ IMX_F2_GPIO42,
+ IMX_F3_GPIO42,
+ IMX_F0_GPIO43,
+ IMX_F1_GPIO43,
+ IMX_F2_GPIO43,
+ IMX_F3_GPIO43,
+ IMX_F0_GPIO44,
+ IMX_F1_GPIO44,
+ IMX_F2_GPIO44,
+ IMX_F3_GPIO44,
+ IMX_F0_GPIO45,
+ IMX_F1_GPIO45,
+ IMX_F2_GPIO45,
+ IMX_F3_GPIO45,
+ IMX_F0_GPIO46,
+ IMX_F1_GPIO46,
+ IMX_F2_GPIO46,
+ IMX_F3_GPIO46,
+ IMX_F0_GPIO47,
+ IMX_F1_GPIO47,
+ IMX_F2_GPIO47,
+ IMX_F3_GPIO47,
+ IMX_F0_GPIO48,
+ IMX_F1_GPIO48,
+ IMX_F2_GPIO48,
+ IMX_F3_GPIO48,
+ IMX_F0_GPIO49,
+ IMX_F1_GPIO49,
+ IMX_F2_GPIO49,
+ IMX_F3_GPIO49,
+ IMX_F0_GPIO50,
+ IMX_F1_GPIO50,
+ IMX_F2_GPIO50,
+ IMX_F3_GPIO50,
+ IMX_F0_GPIO51,
+ IMX_F1_GPIO51,
+ IMX_F2_GPIO51,
+ IMX_F3_GPIO51,
+ IMX_F0_GPIO52,
+ IMX_F1_GPIO52,
+ IMX_F2_GPIO52,
+ IMX_F3_GPIO52,
+ IMX_F0_GPIO53,
+ IMX_F1_GPIO53,
+ IMX_F2_GPIO53,
+ IMX_F3_GPIO53,
+ IMX_F0_GPIO54,
+ IMX_F1_GPIO54,
+ IMX_F2_GPIO54,
+ IMX_F3_GPIO54,
+ IMX_F0_GPIO55,
+ IMX_F1_GPIO55,
+ IMX_F2_GPIO55,
+ IMX_F3_GPIO55,
+ IMX_F0_GPIO56,
+ IMX_F1_GPIO56,
+ IMX_F2_GPIO56,
+ IMX_F3_GPIO56,
+ IMX_F0_GPIO57,
+ IMX_F1_GPIO57,
+ IMX_F2_GPIO57,
+ IMX_F3_GPIO57,
+ IMX_F0_GPIO58,
+ IMX_F1_GPIO58,
+ IMX_F2_GPIO58,
+ IMX_F3_GPIO58,
+ IMX_F0_GPIO59,
+ IMX_F1_GPIO59,
+ IMX_F2_GPIO59,
+ IMX_F3_GPIO59,
+ IMX_F0_GPIO60,
+ IMX_F1_GPIO60,
+ IMX_F2_GPIO60,
+ IMX_F3_GPIO60,
+ IMX_F0_GPIO61,
+ IMX_F1_GPIO61,
+ IMX_F2_GPIO61,
+ IMX_F3_GPIO61,
+ IMX_F0_GPIO62,
+ IMX_F1_GPIO62,
+ IMX_F2_GPIO62,
+ IMX_F3_GPIO62,
+ IMX_F0_GPIO64,
+ IMX_F1_GPIO64,
+ IMX_F2_GPIO64,
+ IMX_F3_GPIO64,
+ IMX_F0_GPIO65,
+ IMX_F1_GPIO65,
+ IMX_F2_GPIO65,
+ IMX_F3_GPIO65,
+ IMX_F0_GPIO66,
+ IMX_F1_GPIO66,
+ IMX_F2_GPIO66,
+ IMX_F3_GPIO66,
+ IMX_F0_GPIO67,
+ IMX_F1_GPIO67,
+ IMX_F2_GPIO67,
+ IMX_F3_GPIO67,
+ IMX_F0_GPIO68,
+ IMX_F1_GPIO68,
+ IMX_F2_GPIO68,
+ IMX_F3_GPIO68,
+ IMX_F0_GPIO69,
+ IMX_F1_GPIO69,
+ IMX_F2_GPIO69,
+ IMX_F3_GPIO69,
+ IMX_F0_GPIO70,
+ IMX_F1_GPIO70,
+ IMX_F2_GPIO70,
+ IMX_F3_GPIO70,
+ IMX_F0_GPIO71,
+ IMX_F1_GPIO71,
+ IMX_F2_GPIO71,
+ IMX_F3_GPIO71,
+ IMX_F0_GPIO72,
+ IMX_F1_GPIO72,
+ IMX_F2_GPIO72,
+ IMX_F3_GPIO72,
+ IMX_F0_GPIO73,
+ IMX_F1_GPIO73,
+ IMX_F2_GPIO73,
+ IMX_F3_GPIO73,
+ IMX_F0_GPIO74,
+ IMX_F1_GPIO74,
+ IMX_F2_GPIO74,
+ IMX_F3_GPIO74,
+ IMX_F0_GPIO75,
+ IMX_F1_GPIO75,
+ IMX_F2_GPIO75,
+ IMX_F3_GPIO75,
+ IMX_F0_GPIO76,
+ IMX_F1_GPIO76,
+ IMX_F2_GPIO76,
+ IMX_F3_GPIO76,
+ IMX_F0_GPIO77,
+ IMX_F1_GPIO77,
+ IMX_F2_GPIO77,
+ IMX_F3_GPIO77,
+ IMX_F0_GPIO78,
+ IMX_F1_GPIO78,
+ IMX_F2_GPIO78,
+ IMX_F3_GPIO78,
+ IMX_F0_GPIO79,
+ IMX_F1_GPIO79,
+ IMX_F2_GPIO79,
+ IMX_F3_GPIO79,
+ IMX_F0_GPIO80,
+ IMX_F1_GPIO80,
+ IMX_F2_GPIO80,
+ IMX_F3_GPIO80,
+ IMX_F0_GPIO81,
+ IMX_F1_GPIO81,
+ IMX_F2_GPIO81,
+ IMX_F3_GPIO81,
+ IMX_F0_GPIO82,
+ IMX_F1_GPIO82,
+ IMX_F2_GPIO82,
+ IMX_F3_GPIO82,
+ IMX_F0_GPIO83,
+ IMX_F1_GPIO83,
+ IMX_F2_GPIO83,
+ IMX_F3_GPIO83,
+ IMX_F0_GPIO84,
+ IMX_F1_GPIO84,
+ IMX_F2_GPIO84,
+ IMX_F3_GPIO84,
+ IMX_F0_GPIO85,
+ IMX_F1_GPIO85,
+ IMX_F2_GPIO85,
+ IMX_F3_GPIO85,
+ IMX_F0_GPIO86,
+ IMX_F1_GPIO86,
+ IMX_F2_GPIO86,
+ IMX_F3_GPIO86,
+ IMX_F0_GPIO87,
+ IMX_F1_GPIO87,
+ IMX_F2_GPIO87,
+ IMX_F3_GPIO87,
+ IMX_F0_GPIO88,
+ IMX_F1_GPIO88,
+ IMX_F2_GPIO88,
+ IMX_F3_GPIO88,
+ IMX_F0_GPIO89,
+ IMX_F1_GPIO89,
+ IMX_F2_GPIO89,
+ IMX_F3_GPIO89,
+ IMX_F0_GPIO90,
+ IMX_F1_GPIO90,
+ IMX_F2_GPIO90,
+ IMX_F3_GPIO90,
+ IMX_F0_GPIO91,
+ IMX_F1_GPIO91,
+ IMX_F2_GPIO91,
+ IMX_F3_GPIO91,
+ IMX_F0_GPIO92,
+ IMX_F1_GPIO92,
+ IMX_F2_GPIO92,
+ IMX_F3_GPIO92,
+ IMX_F0_GPIO93,
+ IMX_F1_GPIO93,
+ IMX_F2_GPIO93,
+ IMX_F3_GPIO93,
+ IMX_F0_GPIO94,
+ IMX_F1_GPIO94,
+ IMX_F2_GPIO94,
+ IMX_F3_GPIO94,
+ IMX_F0_GPIO95,
+ IMX_F1_GPIO95,
+ IMX_F2_GPIO95,
+ IMX_F3_GPIO95,
+ IMX_F0_GPIO96,
+ IMX_F1_GPIO96,
+ IMX_F2_GPIO96,
+ IMX_F3_GPIO96,
+ IMX_F0_GPIO97,
+ IMX_F1_GPIO97,
+ IMX_F2_GPIO97,
+ IMX_F3_GPIO97,
+ IMX_F0_GPIO98,
+ IMX_F1_GPIO98,
+ IMX_F2_GPIO98,
+ IMX_F3_GPIO98,
+ IMX_F0_GPIO99,
+ IMX_F1_GPIO99,
+ IMX_F2_GPIO99,
+ IMX_F3_GPIO99,
+ IMX_F0_GPIO100,
+ IMX_F1_GPIO100,
+ IMX_F2_GPIO100,
+ IMX_F3_GPIO100,
+ IMX_F0_GPIO101,
+ IMX_F1_GPIO101,
+ IMX_F2_GPIO101,
+ IMX_F3_GPIO101,
+ IMX_F0_GPIO102,
+ IMX_F1_GPIO102,
+ IMX_F2_GPIO102,
+ IMX_F3_GPIO102,
+ IMX_F0_GPIO103,
+ IMX_F1_GPIO103,
+ IMX_F2_GPIO103,
+ IMX_F3_GPIO103,
+ IMX_F0_GPIO104,
+ IMX_F1_GPIO104,
+ IMX_F2_GPIO104,
+ IMX_F3_GPIO104,
+ IMX_F0_GPIO105,
+ IMX_F1_GPIO105,
+ IMX_F2_GPIO105,
+ IMX_F3_GPIO105,
+ IMX_F0_GPIO106,
+ IMX_F1_GPIO106,
+ IMX_F2_GPIO106,
+ IMX_F3_GPIO106,
+ IMX_F0_GPIO107,
+ IMX_F1_GPIO107,
+ IMX_F2_GPIO107,
+ IMX_F3_GPIO107,
+ IMX_F0_GPIO108,
+ IMX_F1_GPIO108,
+ IMX_F2_GPIO108,
+ IMX_F3_GPIO108,
+ IMX_F0_GPIO109,
+ IMX_F1_GPIO109,
+ IMX_F2_GPIO109,
+ IMX_F3_GPIO109,
+ IMX_F0_GPIO110,
+ IMX_F1_GPIO110,
+ IMX_F2_GPIO110,
+ IMX_F3_GPIO110,
+ IMX_F0_GPIO111,
+ IMX_F1_GPIO111,
+ IMX_F2_GPIO111,
+ IMX_F3_GPIO111,
+ IMX_F0_GPIO112,
+ IMX_F1_GPIO112,
+ IMX_F2_GPIO112,
+ IMX_F3_GPIO112,
+ IMX_F0_GPIO113,
+ IMX_F1_GPIO113,
+ IMX_F2_GPIO113,
+ IMX_F3_GPIO113,
+ IMX_F0_GPIO114,
+ IMX_F1_GPIO114,
+ IMX_F2_GPIO114,
+ IMX_F3_GPIO114,
+ IMX_F0_GPIO115,
+ IMX_F1_GPIO115,
+ IMX_F2_GPIO115,
+ IMX_F3_GPIO115,
+ IMX_F0_GPIO116,
+ IMX_F1_GPIO116,
+ IMX_F2_GPIO116,
+ IMX_F3_GPIO116,
+ IMX_F0_GPIO117,
+ IMX_F1_GPIO117,
+ IMX_F2_GPIO117,
+ IMX_F3_GPIO117,
+ IMX_F0_GPIO118,
+ IMX_F1_GPIO118,
+ IMX_F2_GPIO118,
+ IMX_F3_GPIO118,
+ IMX_F0_GPIO119,
+ IMX_F1_GPIO119,
+ IMX_F2_GPIO119,
+ IMX_F3_GPIO119,
+ IMX_F0_GPIO120,
+ IMX_F1_GPIO120,
+ IMX_F2_GPIO120,
+ IMX_F3_GPIO120,
+ IMX_F0_GPIO121,
+ IMX_F1_GPIO121,
+ IMX_F2_GPIO121,
+ IMX_F3_GPIO121,
+ IMX_F0_GPIO122,
+ IMX_F1_GPIO122,
+ IMX_F2_GPIO122,
+ IMX_F3_GPIO122,
+ IMX_F0_GPIO123,
+ IMX_F1_GPIO123,
+ IMX_F2_GPIO123,
+ IMX_F3_GPIO123,
+ IMX_F0_GPIO124,
+ IMX_F1_GPIO124,
+ IMX_F2_GPIO124,
+ IMX_F3_GPIO124,
+ IMX_F0_GPIO125,
+ IMX_F1_GPIO125,
+ IMX_F2_GPIO125,
+ IMX_F3_GPIO125,
+ IMX_F0_GPIO126,
+ IMX_F1_GPIO126,
+ IMX_F2_GPIO126,
+ IMX_F3_GPIO126,
+ IMX_F0_GPIO127,
+ IMX_F1_GPIO127,
+ IMX_F2_GPIO127,
+ IMX_F3_GPIO127,
+ IMX_F0_GPIO128,
+ IMX_F1_GPIO128,
+ IMX_F2_GPIO128,
+ IMX_F3_GPIO128,
+ IMX_F0_GPIO129,
+ IMX_F1_GPIO129,
+ IMX_F2_GPIO129,
+ IMX_F3_GPIO129,
+ IMX_F0_GPIO130,
+ IMX_F1_GPIO130,
+ IMX_F2_GPIO130,
+ IMX_F3_GPIO130,
+ IMX_F0_GPIO131,
+ IMX_F1_GPIO131,
+ IMX_F2_GPIO131,
+ IMX_F3_GPIO131,
+ IMX_F0_GPIO132,
+ IMX_F1_GPIO132,
+ IMX_F2_GPIO132,
+ IMX_F3_GPIO132,
+ IMX_F0_GPIO133,
+ IMX_F1_GPIO133,
+ IMX_F2_GPIO133,
+ IMX_F3_GPIO133,
+ IMX_F0_GPIO134,
+ IMX_F1_GPIO134,
+ IMX_F2_GPIO134,
+ IMX_F3_GPIO134,
+ IMX_F0_GPIO135,
+ IMX_F1_GPIO135,
+ IMX_F2_GPIO135,
+ IMX_F3_GPIO135,
+ IMX_F0_GPIO136,
+ IMX_F1_GPIO136,
+ IMX_F2_GPIO136,
+ IMX_F3_GPIO136,
+ IMX_F0_GPIO137,
+ IMX_F1_GPIO137,
+ IMX_F2_GPIO137,
+ IMX_F3_GPIO137,
+ IMX_F0_GPIO138,
+ IMX_F1_GPIO138,
+ IMX_F2_GPIO138,
+ IMX_F3_GPIO138,
+ IMX_F0_GPIO139,
+ IMX_F1_GPIO139,
+ IMX_F2_GPIO139,
+ IMX_F3_GPIO139,
+ IMX_F0_GPIO140,
+ IMX_F1_GPIO140,
+ IMX_F2_GPIO140,
+ IMX_F3_GPIO140,
+ IMX_F0_GPIO141,
+ IMX_F1_GPIO141,
+ IMX_F2_GPIO141,
+ IMX_F3_GPIO141,
+ IMX_F0_GPIO142,
+ IMX_F1_GPIO142,
+ IMX_F2_GPIO142,
+ IMX_F3_GPIO142,
+ IMX_F0_GPIO143,
+ IMX_F1_GPIO143,
+ IMX_F2_GPIO143,
+ IMX_F3_GPIO143,
+ IMX_F0_GPIO144,
+ IMX_F1_GPIO144,
+ IMX_F2_GPIO144,
+ IMX_F3_GPIO144,
+};
+
+#define AMD_PINCTRL_FUNC_GRP(_number, _func) \
+ [IMX_F##_func##_GPIO##_number] = \
+ PINCTRL_PINGROUP("IMX_F"#_func "_GPIO"#_number, AMD_PINS(_number), 1)
+
+static const struct pingroup kerncz_groups[] = {
+ AMD_PINCTRL_FUNC_GRP(0, 0),
+ AMD_PINCTRL_FUNC_GRP(0, 1),
+ AMD_PINCTRL_FUNC_GRP(0, 2),
+ AMD_PINCTRL_FUNC_GRP(0, 3),
+ AMD_PINCTRL_FUNC_GRP(1, 0),
+ AMD_PINCTRL_FUNC_GRP(1, 1),
+ AMD_PINCTRL_FUNC_GRP(1, 2),
+ AMD_PINCTRL_FUNC_GRP(1, 3),
+ AMD_PINCTRL_FUNC_GRP(2, 0),
+ AMD_PINCTRL_FUNC_GRP(2, 1),
+ AMD_PINCTRL_FUNC_GRP(2, 2),
+ AMD_PINCTRL_FUNC_GRP(2, 3),
+ AMD_PINCTRL_FUNC_GRP(3, 0),
+ AMD_PINCTRL_FUNC_GRP(3, 1),
+ AMD_PINCTRL_FUNC_GRP(3, 2),
+ AMD_PINCTRL_FUNC_GRP(3, 3),
+ AMD_PINCTRL_FUNC_GRP(4, 0),
+ AMD_PINCTRL_FUNC_GRP(4, 1),
+ AMD_PINCTRL_FUNC_GRP(4, 2),
+ AMD_PINCTRL_FUNC_GRP(4, 3),
+ AMD_PINCTRL_FUNC_GRP(5, 0),
+ AMD_PINCTRL_FUNC_GRP(5, 1),
+ AMD_PINCTRL_FUNC_GRP(5, 2),
+ AMD_PINCTRL_FUNC_GRP(5, 3),
+ AMD_PINCTRL_FUNC_GRP(6, 0),
+ AMD_PINCTRL_FUNC_GRP(6, 1),
+ AMD_PINCTRL_FUNC_GRP(6, 2),
+ AMD_PINCTRL_FUNC_GRP(6, 3),
+ AMD_PINCTRL_FUNC_GRP(7, 0),
+ AMD_PINCTRL_FUNC_GRP(7, 1),
+ AMD_PINCTRL_FUNC_GRP(7, 2),
+ AMD_PINCTRL_FUNC_GRP(7, 3),
+ AMD_PINCTRL_FUNC_GRP(8, 0),
+ AMD_PINCTRL_FUNC_GRP(8, 1),
+ AMD_PINCTRL_FUNC_GRP(8, 2),
+ AMD_PINCTRL_FUNC_GRP(8, 3),
+ AMD_PINCTRL_FUNC_GRP(9, 0),
+ AMD_PINCTRL_FUNC_GRP(9, 1),
+ AMD_PINCTRL_FUNC_GRP(9, 2),
+ AMD_PINCTRL_FUNC_GRP(9, 3),
+ AMD_PINCTRL_FUNC_GRP(10, 0),
+ AMD_PINCTRL_FUNC_GRP(10, 1),
+ AMD_PINCTRL_FUNC_GRP(10, 2),
+ AMD_PINCTRL_FUNC_GRP(10, 3),
+ AMD_PINCTRL_FUNC_GRP(11, 0),
+ AMD_PINCTRL_FUNC_GRP(11, 1),
+ AMD_PINCTRL_FUNC_GRP(11, 2),
+ AMD_PINCTRL_FUNC_GRP(11, 3),
+ AMD_PINCTRL_FUNC_GRP(12, 0),
+ AMD_PINCTRL_FUNC_GRP(12, 1),
+ AMD_PINCTRL_FUNC_GRP(12, 2),
+ AMD_PINCTRL_FUNC_GRP(12, 3),
+ AMD_PINCTRL_FUNC_GRP(13, 0),
+ AMD_PINCTRL_FUNC_GRP(13, 1),
+ AMD_PINCTRL_FUNC_GRP(13, 2),
+ AMD_PINCTRL_FUNC_GRP(13, 3),
+ AMD_PINCTRL_FUNC_GRP(14, 0),
+ AMD_PINCTRL_FUNC_GRP(14, 1),
+ AMD_PINCTRL_FUNC_GRP(14, 2),
+ AMD_PINCTRL_FUNC_GRP(14, 3),
+ AMD_PINCTRL_FUNC_GRP(15, 0),
+ AMD_PINCTRL_FUNC_GRP(15, 1),
+ AMD_PINCTRL_FUNC_GRP(15, 2),
+ AMD_PINCTRL_FUNC_GRP(15, 3),
+ AMD_PINCTRL_FUNC_GRP(16, 0),
+ AMD_PINCTRL_FUNC_GRP(16, 1),
+ AMD_PINCTRL_FUNC_GRP(16, 2),
+ AMD_PINCTRL_FUNC_GRP(16, 3),
+ AMD_PINCTRL_FUNC_GRP(17, 0),
+ AMD_PINCTRL_FUNC_GRP(17, 1),
+ AMD_PINCTRL_FUNC_GRP(17, 2),
+ AMD_PINCTRL_FUNC_GRP(17, 3),
+ AMD_PINCTRL_FUNC_GRP(18, 0),
+ AMD_PINCTRL_FUNC_GRP(18, 1),
+ AMD_PINCTRL_FUNC_GRP(18, 2),
+ AMD_PINCTRL_FUNC_GRP(18, 3),
+ AMD_PINCTRL_FUNC_GRP(19, 0),
+ AMD_PINCTRL_FUNC_GRP(19, 1),
+ AMD_PINCTRL_FUNC_GRP(19, 2),
+ AMD_PINCTRL_FUNC_GRP(19, 3),
+ AMD_PINCTRL_FUNC_GRP(20, 0),
+ AMD_PINCTRL_FUNC_GRP(20, 1),
+ AMD_PINCTRL_FUNC_GRP(20, 2),
+ AMD_PINCTRL_FUNC_GRP(20, 3),
+ AMD_PINCTRL_FUNC_GRP(21, 0),
+ AMD_PINCTRL_FUNC_GRP(21, 1),
+ AMD_PINCTRL_FUNC_GRP(21, 2),
+ AMD_PINCTRL_FUNC_GRP(21, 3),
+ AMD_PINCTRL_FUNC_GRP(22, 0),
+ AMD_PINCTRL_FUNC_GRP(22, 1),
+ AMD_PINCTRL_FUNC_GRP(22, 2),
+ AMD_PINCTRL_FUNC_GRP(22, 3),
+ AMD_PINCTRL_FUNC_GRP(23, 0),
+ AMD_PINCTRL_FUNC_GRP(23, 1),
+ AMD_PINCTRL_FUNC_GRP(23, 2),
+ AMD_PINCTRL_FUNC_GRP(23, 3),
+ AMD_PINCTRL_FUNC_GRP(24, 0),
+ AMD_PINCTRL_FUNC_GRP(24, 1),
+ AMD_PINCTRL_FUNC_GRP(24, 2),
+ AMD_PINCTRL_FUNC_GRP(24, 3),
+ AMD_PINCTRL_FUNC_GRP(25, 0),
+ AMD_PINCTRL_FUNC_GRP(25, 1),
+ AMD_PINCTRL_FUNC_GRP(25, 2),
+ AMD_PINCTRL_FUNC_GRP(25, 3),
+ AMD_PINCTRL_FUNC_GRP(26, 0),
+ AMD_PINCTRL_FUNC_GRP(26, 1),
+ AMD_PINCTRL_FUNC_GRP(26, 2),
+ AMD_PINCTRL_FUNC_GRP(26, 3),
+ AMD_PINCTRL_FUNC_GRP(27, 0),
+ AMD_PINCTRL_FUNC_GRP(27, 1),
+ AMD_PINCTRL_FUNC_GRP(27, 2),
+ AMD_PINCTRL_FUNC_GRP(27, 3),
+ AMD_PINCTRL_FUNC_GRP(28, 0),
+ AMD_PINCTRL_FUNC_GRP(28, 1),
+ AMD_PINCTRL_FUNC_GRP(28, 2),
+ AMD_PINCTRL_FUNC_GRP(28, 3),
+ AMD_PINCTRL_FUNC_GRP(29, 0),
+ AMD_PINCTRL_FUNC_GRP(29, 1),
+ AMD_PINCTRL_FUNC_GRP(29, 2),
+ AMD_PINCTRL_FUNC_GRP(29, 3),
+ AMD_PINCTRL_FUNC_GRP(30, 0),
+ AMD_PINCTRL_FUNC_GRP(30, 1),
+ AMD_PINCTRL_FUNC_GRP(30, 2),
+ AMD_PINCTRL_FUNC_GRP(30, 3),
+ AMD_PINCTRL_FUNC_GRP(31, 0),
+ AMD_PINCTRL_FUNC_GRP(31, 1),
+ AMD_PINCTRL_FUNC_GRP(31, 2),
+ AMD_PINCTRL_FUNC_GRP(31, 3),
+ AMD_PINCTRL_FUNC_GRP(32, 0),
+ AMD_PINCTRL_FUNC_GRP(32, 1),
+ AMD_PINCTRL_FUNC_GRP(32, 2),
+ AMD_PINCTRL_FUNC_GRP(32, 3),
+ AMD_PINCTRL_FUNC_GRP(33, 0),
+ AMD_PINCTRL_FUNC_GRP(33, 1),
+ AMD_PINCTRL_FUNC_GRP(33, 2),
+ AMD_PINCTRL_FUNC_GRP(33, 3),
+ AMD_PINCTRL_FUNC_GRP(34, 0),
+ AMD_PINCTRL_FUNC_GRP(34, 1),
+ AMD_PINCTRL_FUNC_GRP(34, 2),
+ AMD_PINCTRL_FUNC_GRP(34, 3),
+ AMD_PINCTRL_FUNC_GRP(35, 0),
+ AMD_PINCTRL_FUNC_GRP(35, 1),
+ AMD_PINCTRL_FUNC_GRP(35, 2),
+ AMD_PINCTRL_FUNC_GRP(35, 3),
+ AMD_PINCTRL_FUNC_GRP(36, 0),
+ AMD_PINCTRL_FUNC_GRP(36, 1),
+ AMD_PINCTRL_FUNC_GRP(36, 2),
+ AMD_PINCTRL_FUNC_GRP(36, 3),
+ AMD_PINCTRL_FUNC_GRP(37, 0),
+ AMD_PINCTRL_FUNC_GRP(37, 1),
+ AMD_PINCTRL_FUNC_GRP(37, 2),
+ AMD_PINCTRL_FUNC_GRP(37, 3),
+ AMD_PINCTRL_FUNC_GRP(38, 0),
+ AMD_PINCTRL_FUNC_GRP(38, 1),
+ AMD_PINCTRL_FUNC_GRP(38, 2),
+ AMD_PINCTRL_FUNC_GRP(38, 3),
+ AMD_PINCTRL_FUNC_GRP(39, 0),
+ AMD_PINCTRL_FUNC_GRP(39, 1),
+ AMD_PINCTRL_FUNC_GRP(39, 2),
+ AMD_PINCTRL_FUNC_GRP(39, 3),
+ AMD_PINCTRL_FUNC_GRP(40, 0),
+ AMD_PINCTRL_FUNC_GRP(40, 1),
+ AMD_PINCTRL_FUNC_GRP(40, 2),
+ AMD_PINCTRL_FUNC_GRP(40, 3),
+ AMD_PINCTRL_FUNC_GRP(41, 0),
+ AMD_PINCTRL_FUNC_GRP(41, 1),
+ AMD_PINCTRL_FUNC_GRP(41, 2),
+ AMD_PINCTRL_FUNC_GRP(41, 3),
+ AMD_PINCTRL_FUNC_GRP(42, 0),
+ AMD_PINCTRL_FUNC_GRP(42, 1),
+ AMD_PINCTRL_FUNC_GRP(42, 2),
+ AMD_PINCTRL_FUNC_GRP(42, 3),
+ AMD_PINCTRL_FUNC_GRP(43, 0),
+ AMD_PINCTRL_FUNC_GRP(43, 1),
+ AMD_PINCTRL_FUNC_GRP(43, 2),
+ AMD_PINCTRL_FUNC_GRP(43, 3),
+ AMD_PINCTRL_FUNC_GRP(44, 0),
+ AMD_PINCTRL_FUNC_GRP(44, 1),
+ AMD_PINCTRL_FUNC_GRP(44, 2),
+ AMD_PINCTRL_FUNC_GRP(44, 3),
+ AMD_PINCTRL_FUNC_GRP(45, 0),
+ AMD_PINCTRL_FUNC_GRP(45, 1),
+ AMD_PINCTRL_FUNC_GRP(45, 2),
+ AMD_PINCTRL_FUNC_GRP(45, 3),
+ AMD_PINCTRL_FUNC_GRP(46, 0),
+ AMD_PINCTRL_FUNC_GRP(46, 1),
+ AMD_PINCTRL_FUNC_GRP(46, 2),
+ AMD_PINCTRL_FUNC_GRP(46, 3),
+ AMD_PINCTRL_FUNC_GRP(47, 0),
+ AMD_PINCTRL_FUNC_GRP(47, 1),
+ AMD_PINCTRL_FUNC_GRP(47, 2),
+ AMD_PINCTRL_FUNC_GRP(47, 3),
+ AMD_PINCTRL_FUNC_GRP(48, 0),
+ AMD_PINCTRL_FUNC_GRP(48, 1),
+ AMD_PINCTRL_FUNC_GRP(48, 2),
+ AMD_PINCTRL_FUNC_GRP(48, 3),
+ AMD_PINCTRL_FUNC_GRP(49, 0),
+ AMD_PINCTRL_FUNC_GRP(49, 1),
+ AMD_PINCTRL_FUNC_GRP(49, 2),
+ AMD_PINCTRL_FUNC_GRP(49, 3),
+ AMD_PINCTRL_FUNC_GRP(50, 0),
+ AMD_PINCTRL_FUNC_GRP(50, 1),
+ AMD_PINCTRL_FUNC_GRP(50, 2),
+ AMD_PINCTRL_FUNC_GRP(50, 3),
+ AMD_PINCTRL_FUNC_GRP(51, 0),
+ AMD_PINCTRL_FUNC_GRP(51, 1),
+ AMD_PINCTRL_FUNC_GRP(51, 2),
+ AMD_PINCTRL_FUNC_GRP(51, 3),
+ AMD_PINCTRL_FUNC_GRP(52, 0),
+ AMD_PINCTRL_FUNC_GRP(52, 1),
+ AMD_PINCTRL_FUNC_GRP(52, 2),
+ AMD_PINCTRL_FUNC_GRP(52, 3),
+ AMD_PINCTRL_FUNC_GRP(53, 0),
+ AMD_PINCTRL_FUNC_GRP(53, 1),
+ AMD_PINCTRL_FUNC_GRP(53, 2),
+ AMD_PINCTRL_FUNC_GRP(53, 3),
+ AMD_PINCTRL_FUNC_GRP(54, 0),
+ AMD_PINCTRL_FUNC_GRP(54, 1),
+ AMD_PINCTRL_FUNC_GRP(54, 2),
+ AMD_PINCTRL_FUNC_GRP(54, 3),
+ AMD_PINCTRL_FUNC_GRP(55, 0),
+ AMD_PINCTRL_FUNC_GRP(55, 1),
+ AMD_PINCTRL_FUNC_GRP(55, 2),
+ AMD_PINCTRL_FUNC_GRP(55, 3),
+ AMD_PINCTRL_FUNC_GRP(56, 0),
+ AMD_PINCTRL_FUNC_GRP(56, 1),
+ AMD_PINCTRL_FUNC_GRP(56, 2),
+ AMD_PINCTRL_FUNC_GRP(56, 3),
+ AMD_PINCTRL_FUNC_GRP(57, 0),
+ AMD_PINCTRL_FUNC_GRP(57, 1),
+ AMD_PINCTRL_FUNC_GRP(57, 2),
+ AMD_PINCTRL_FUNC_GRP(57, 3),
+ AMD_PINCTRL_FUNC_GRP(58, 0),
+ AMD_PINCTRL_FUNC_GRP(58, 1),
+ AMD_PINCTRL_FUNC_GRP(58, 2),
+ AMD_PINCTRL_FUNC_GRP(58, 3),
+ AMD_PINCTRL_FUNC_GRP(59, 0),
+ AMD_PINCTRL_FUNC_GRP(59, 1),
+ AMD_PINCTRL_FUNC_GRP(59, 2),
+ AMD_PINCTRL_FUNC_GRP(59, 3),
+ AMD_PINCTRL_FUNC_GRP(60, 0),
+ AMD_PINCTRL_FUNC_GRP(60, 1),
+ AMD_PINCTRL_FUNC_GRP(60, 2),
+ AMD_PINCTRL_FUNC_GRP(60, 3),
+ AMD_PINCTRL_FUNC_GRP(61, 0),
+ AMD_PINCTRL_FUNC_GRP(61, 1),
+ AMD_PINCTRL_FUNC_GRP(61, 2),
+ AMD_PINCTRL_FUNC_GRP(61, 3),
+ AMD_PINCTRL_FUNC_GRP(62, 0),
+ AMD_PINCTRL_FUNC_GRP(62, 1),
+ AMD_PINCTRL_FUNC_GRP(62, 2),
+ AMD_PINCTRL_FUNC_GRP(62, 3),
+ AMD_PINCTRL_FUNC_GRP(64, 0),
+ AMD_PINCTRL_FUNC_GRP(64, 1),
+ AMD_PINCTRL_FUNC_GRP(64, 2),
+ AMD_PINCTRL_FUNC_GRP(64, 3),
+ AMD_PINCTRL_FUNC_GRP(65, 0),
+ AMD_PINCTRL_FUNC_GRP(65, 1),
+ AMD_PINCTRL_FUNC_GRP(65, 2),
+ AMD_PINCTRL_FUNC_GRP(65, 3),
+ AMD_PINCTRL_FUNC_GRP(66, 0),
+ AMD_PINCTRL_FUNC_GRP(66, 1),
+ AMD_PINCTRL_FUNC_GRP(66, 2),
+ AMD_PINCTRL_FUNC_GRP(66, 3),
+ AMD_PINCTRL_FUNC_GRP(67, 0),
+ AMD_PINCTRL_FUNC_GRP(67, 1),
+ AMD_PINCTRL_FUNC_GRP(67, 2),
+ AMD_PINCTRL_FUNC_GRP(67, 3),
+ AMD_PINCTRL_FUNC_GRP(68, 0),
+ AMD_PINCTRL_FUNC_GRP(68, 1),
+ AMD_PINCTRL_FUNC_GRP(68, 2),
+ AMD_PINCTRL_FUNC_GRP(68, 3),
+ AMD_PINCTRL_FUNC_GRP(69, 0),
+ AMD_PINCTRL_FUNC_GRP(69, 1),
+ AMD_PINCTRL_FUNC_GRP(69, 2),
+ AMD_PINCTRL_FUNC_GRP(69, 3),
+ AMD_PINCTRL_FUNC_GRP(70, 0),
+ AMD_PINCTRL_FUNC_GRP(70, 1),
+ AMD_PINCTRL_FUNC_GRP(70, 2),
+ AMD_PINCTRL_FUNC_GRP(70, 3),
+ AMD_PINCTRL_FUNC_GRP(71, 0),
+ AMD_PINCTRL_FUNC_GRP(71, 1),
+ AMD_PINCTRL_FUNC_GRP(71, 2),
+ AMD_PINCTRL_FUNC_GRP(71, 3),
+ AMD_PINCTRL_FUNC_GRP(72, 0),
+ AMD_PINCTRL_FUNC_GRP(72, 1),
+ AMD_PINCTRL_FUNC_GRP(72, 2),
+ AMD_PINCTRL_FUNC_GRP(72, 3),
+ AMD_PINCTRL_FUNC_GRP(73, 0),
+ AMD_PINCTRL_FUNC_GRP(73, 1),
+ AMD_PINCTRL_FUNC_GRP(73, 2),
+ AMD_PINCTRL_FUNC_GRP(73, 3),
+ AMD_PINCTRL_FUNC_GRP(74, 0),
+ AMD_PINCTRL_FUNC_GRP(74, 1),
+ AMD_PINCTRL_FUNC_GRP(74, 2),
+ AMD_PINCTRL_FUNC_GRP(74, 3),
+ AMD_PINCTRL_FUNC_GRP(75, 0),
+ AMD_PINCTRL_FUNC_GRP(75, 1),
+ AMD_PINCTRL_FUNC_GRP(75, 2),
+ AMD_PINCTRL_FUNC_GRP(75, 3),
+ AMD_PINCTRL_FUNC_GRP(76, 0),
+ AMD_PINCTRL_FUNC_GRP(76, 1),
+ AMD_PINCTRL_FUNC_GRP(76, 2),
+ AMD_PINCTRL_FUNC_GRP(76, 3),
+ AMD_PINCTRL_FUNC_GRP(77, 0),
+ AMD_PINCTRL_FUNC_GRP(77, 1),
+ AMD_PINCTRL_FUNC_GRP(77, 2),
+ AMD_PINCTRL_FUNC_GRP(77, 3),
+ AMD_PINCTRL_FUNC_GRP(78, 0),
+ AMD_PINCTRL_FUNC_GRP(78, 1),
+ AMD_PINCTRL_FUNC_GRP(78, 2),
+ AMD_PINCTRL_FUNC_GRP(78, 3),
+ AMD_PINCTRL_FUNC_GRP(79, 0),
+ AMD_PINCTRL_FUNC_GRP(79, 1),
+ AMD_PINCTRL_FUNC_GRP(79, 2),
+ AMD_PINCTRL_FUNC_GRP(79, 3),
+ AMD_PINCTRL_FUNC_GRP(80, 0),
+ AMD_PINCTRL_FUNC_GRP(80, 1),
+ AMD_PINCTRL_FUNC_GRP(80, 2),
+ AMD_PINCTRL_FUNC_GRP(80, 3),
+ AMD_PINCTRL_FUNC_GRP(81, 0),
+ AMD_PINCTRL_FUNC_GRP(81, 1),
+ AMD_PINCTRL_FUNC_GRP(81, 2),
+ AMD_PINCTRL_FUNC_GRP(81, 3),
+ AMD_PINCTRL_FUNC_GRP(82, 0),
+ AMD_PINCTRL_FUNC_GRP(82, 1),
+ AMD_PINCTRL_FUNC_GRP(82, 2),
+ AMD_PINCTRL_FUNC_GRP(82, 3),
+ AMD_PINCTRL_FUNC_GRP(83, 0),
+ AMD_PINCTRL_FUNC_GRP(83, 1),
+ AMD_PINCTRL_FUNC_GRP(83, 2),
+ AMD_PINCTRL_FUNC_GRP(83, 3),
+ AMD_PINCTRL_FUNC_GRP(84, 0),
+ AMD_PINCTRL_FUNC_GRP(84, 1),
+ AMD_PINCTRL_FUNC_GRP(84, 2),
+ AMD_PINCTRL_FUNC_GRP(84, 3),
+ AMD_PINCTRL_FUNC_GRP(85, 0),
+ AMD_PINCTRL_FUNC_GRP(85, 1),
+ AMD_PINCTRL_FUNC_GRP(85, 2),
+ AMD_PINCTRL_FUNC_GRP(85, 3),
+ AMD_PINCTRL_FUNC_GRP(86, 0),
+ AMD_PINCTRL_FUNC_GRP(86, 1),
+ AMD_PINCTRL_FUNC_GRP(86, 2),
+ AMD_PINCTRL_FUNC_GRP(86, 3),
+ AMD_PINCTRL_FUNC_GRP(87, 0),
+ AMD_PINCTRL_FUNC_GRP(87, 1),
+ AMD_PINCTRL_FUNC_GRP(87, 2),
+ AMD_PINCTRL_FUNC_GRP(87, 3),
+ AMD_PINCTRL_FUNC_GRP(88, 0),
+ AMD_PINCTRL_FUNC_GRP(88, 1),
+ AMD_PINCTRL_FUNC_GRP(88, 2),
+ AMD_PINCTRL_FUNC_GRP(88, 3),
+ AMD_PINCTRL_FUNC_GRP(89, 0),
+ AMD_PINCTRL_FUNC_GRP(89, 1),
+ AMD_PINCTRL_FUNC_GRP(89, 2),
+ AMD_PINCTRL_FUNC_GRP(89, 3),
+ AMD_PINCTRL_FUNC_GRP(90, 0),
+ AMD_PINCTRL_FUNC_GRP(90, 1),
+ AMD_PINCTRL_FUNC_GRP(90, 2),
+ AMD_PINCTRL_FUNC_GRP(90, 3),
+ AMD_PINCTRL_FUNC_GRP(91, 0),
+ AMD_PINCTRL_FUNC_GRP(91, 1),
+ AMD_PINCTRL_FUNC_GRP(91, 2),
+ AMD_PINCTRL_FUNC_GRP(91, 3),
+ AMD_PINCTRL_FUNC_GRP(92, 0),
+ AMD_PINCTRL_FUNC_GRP(92, 1),
+ AMD_PINCTRL_FUNC_GRP(92, 2),
+ AMD_PINCTRL_FUNC_GRP(92, 3),
+ AMD_PINCTRL_FUNC_GRP(93, 0),
+ AMD_PINCTRL_FUNC_GRP(93, 1),
+ AMD_PINCTRL_FUNC_GRP(93, 2),
+ AMD_PINCTRL_FUNC_GRP(93, 3),
+ AMD_PINCTRL_FUNC_GRP(94, 0),
+ AMD_PINCTRL_FUNC_GRP(94, 1),
+ AMD_PINCTRL_FUNC_GRP(94, 2),
+ AMD_PINCTRL_FUNC_GRP(94, 3),
+ AMD_PINCTRL_FUNC_GRP(95, 0),
+ AMD_PINCTRL_FUNC_GRP(95, 1),
+ AMD_PINCTRL_FUNC_GRP(95, 2),
+ AMD_PINCTRL_FUNC_GRP(95, 3),
+ AMD_PINCTRL_FUNC_GRP(96, 0),
+ AMD_PINCTRL_FUNC_GRP(96, 1),
+ AMD_PINCTRL_FUNC_GRP(96, 2),
+ AMD_PINCTRL_FUNC_GRP(96, 3),
+ AMD_PINCTRL_FUNC_GRP(97, 0),
+ AMD_PINCTRL_FUNC_GRP(97, 1),
+ AMD_PINCTRL_FUNC_GRP(97, 2),
+ AMD_PINCTRL_FUNC_GRP(97, 3),
+ AMD_PINCTRL_FUNC_GRP(98, 0),
+ AMD_PINCTRL_FUNC_GRP(98, 1),
+ AMD_PINCTRL_FUNC_GRP(98, 2),
+ AMD_PINCTRL_FUNC_GRP(98, 3),
+ AMD_PINCTRL_FUNC_GRP(99, 0),
+ AMD_PINCTRL_FUNC_GRP(99, 1),
+ AMD_PINCTRL_FUNC_GRP(99, 2),
+ AMD_PINCTRL_FUNC_GRP(99, 3),
+ AMD_PINCTRL_FUNC_GRP(100, 0),
+ AMD_PINCTRL_FUNC_GRP(100, 1),
+ AMD_PINCTRL_FUNC_GRP(100, 2),
+ AMD_PINCTRL_FUNC_GRP(100, 3),
+ AMD_PINCTRL_FUNC_GRP(101, 0),
+ AMD_PINCTRL_FUNC_GRP(101, 1),
+ AMD_PINCTRL_FUNC_GRP(101, 2),
+ AMD_PINCTRL_FUNC_GRP(101, 3),
+ AMD_PINCTRL_FUNC_GRP(102, 0),
+ AMD_PINCTRL_FUNC_GRP(102, 1),
+ AMD_PINCTRL_FUNC_GRP(102, 2),
+ AMD_PINCTRL_FUNC_GRP(102, 3),
+ AMD_PINCTRL_FUNC_GRP(103, 0),
+ AMD_PINCTRL_FUNC_GRP(103, 1),
+ AMD_PINCTRL_FUNC_GRP(103, 2),
+ AMD_PINCTRL_FUNC_GRP(103, 3),
+ AMD_PINCTRL_FUNC_GRP(104, 0),
+ AMD_PINCTRL_FUNC_GRP(104, 1),
+ AMD_PINCTRL_FUNC_GRP(104, 2),
+ AMD_PINCTRL_FUNC_GRP(104, 3),
+ AMD_PINCTRL_FUNC_GRP(105, 0),
+ AMD_PINCTRL_FUNC_GRP(105, 1),
+ AMD_PINCTRL_FUNC_GRP(105, 2),
+ AMD_PINCTRL_FUNC_GRP(105, 3),
+ AMD_PINCTRL_FUNC_GRP(106, 0),
+ AMD_PINCTRL_FUNC_GRP(106, 1),
+ AMD_PINCTRL_FUNC_GRP(106, 2),
+ AMD_PINCTRL_FUNC_GRP(106, 3),
+ AMD_PINCTRL_FUNC_GRP(107, 0),
+ AMD_PINCTRL_FUNC_GRP(107, 1),
+ AMD_PINCTRL_FUNC_GRP(107, 2),
+ AMD_PINCTRL_FUNC_GRP(107, 3),
+ AMD_PINCTRL_FUNC_GRP(108, 0),
+ AMD_PINCTRL_FUNC_GRP(108, 1),
+ AMD_PINCTRL_FUNC_GRP(108, 2),
+ AMD_PINCTRL_FUNC_GRP(108, 3),
+ AMD_PINCTRL_FUNC_GRP(109, 0),
+ AMD_PINCTRL_FUNC_GRP(109, 1),
+ AMD_PINCTRL_FUNC_GRP(109, 2),
+ AMD_PINCTRL_FUNC_GRP(109, 3),
+ AMD_PINCTRL_FUNC_GRP(110, 0),
+ AMD_PINCTRL_FUNC_GRP(110, 1),
+ AMD_PINCTRL_FUNC_GRP(110, 2),
+ AMD_PINCTRL_FUNC_GRP(110, 3),
+ AMD_PINCTRL_FUNC_GRP(111, 0),
+ AMD_PINCTRL_FUNC_GRP(111, 1),
+ AMD_PINCTRL_FUNC_GRP(111, 2),
+ AMD_PINCTRL_FUNC_GRP(111, 3),
+ AMD_PINCTRL_FUNC_GRP(112, 0),
+ AMD_PINCTRL_FUNC_GRP(112, 1),
+ AMD_PINCTRL_FUNC_GRP(112, 2),
+ AMD_PINCTRL_FUNC_GRP(112, 3),
+ AMD_PINCTRL_FUNC_GRP(113, 0),
+ AMD_PINCTRL_FUNC_GRP(113, 1),
+ AMD_PINCTRL_FUNC_GRP(113, 2),
+ AMD_PINCTRL_FUNC_GRP(113, 3),
+ AMD_PINCTRL_FUNC_GRP(114, 0),
+ AMD_PINCTRL_FUNC_GRP(114, 1),
+ AMD_PINCTRL_FUNC_GRP(114, 2),
+ AMD_PINCTRL_FUNC_GRP(114, 3),
+ AMD_PINCTRL_FUNC_GRP(115, 0),
+ AMD_PINCTRL_FUNC_GRP(115, 1),
+ AMD_PINCTRL_FUNC_GRP(115, 2),
+ AMD_PINCTRL_FUNC_GRP(115, 3),
+ AMD_PINCTRL_FUNC_GRP(116, 0),
+ AMD_PINCTRL_FUNC_GRP(116, 1),
+ AMD_PINCTRL_FUNC_GRP(116, 2),
+ AMD_PINCTRL_FUNC_GRP(116, 3),
+ AMD_PINCTRL_FUNC_GRP(117, 0),
+ AMD_PINCTRL_FUNC_GRP(117, 1),
+ AMD_PINCTRL_FUNC_GRP(117, 2),
+ AMD_PINCTRL_FUNC_GRP(117, 3),
+ AMD_PINCTRL_FUNC_GRP(118, 0),
+ AMD_PINCTRL_FUNC_GRP(118, 1),
+ AMD_PINCTRL_FUNC_GRP(118, 2),
+ AMD_PINCTRL_FUNC_GRP(118, 3),
+ AMD_PINCTRL_FUNC_GRP(119, 0),
+ AMD_PINCTRL_FUNC_GRP(119, 1),
+ AMD_PINCTRL_FUNC_GRP(119, 2),
+ AMD_PINCTRL_FUNC_GRP(119, 3),
+ AMD_PINCTRL_FUNC_GRP(120, 0),
+ AMD_PINCTRL_FUNC_GRP(120, 1),
+ AMD_PINCTRL_FUNC_GRP(120, 2),
+ AMD_PINCTRL_FUNC_GRP(120, 3),
+ AMD_PINCTRL_FUNC_GRP(121, 0),
+ AMD_PINCTRL_FUNC_GRP(121, 1),
+ AMD_PINCTRL_FUNC_GRP(121, 2),
+ AMD_PINCTRL_FUNC_GRP(121, 3),
+ AMD_PINCTRL_FUNC_GRP(122, 0),
+ AMD_PINCTRL_FUNC_GRP(122, 1),
+ AMD_PINCTRL_FUNC_GRP(122, 2),
+ AMD_PINCTRL_FUNC_GRP(122, 3),
+ AMD_PINCTRL_FUNC_GRP(123, 0),
+ AMD_PINCTRL_FUNC_GRP(123, 1),
+ AMD_PINCTRL_FUNC_GRP(123, 2),
+ AMD_PINCTRL_FUNC_GRP(123, 3),
+ AMD_PINCTRL_FUNC_GRP(124, 0),
+ AMD_PINCTRL_FUNC_GRP(124, 1),
+ AMD_PINCTRL_FUNC_GRP(124, 2),
+ AMD_PINCTRL_FUNC_GRP(124, 3),
+ AMD_PINCTRL_FUNC_GRP(125, 0),
+ AMD_PINCTRL_FUNC_GRP(125, 1),
+ AMD_PINCTRL_FUNC_GRP(125, 2),
+ AMD_PINCTRL_FUNC_GRP(125, 3),
+ AMD_PINCTRL_FUNC_GRP(126, 0),
+ AMD_PINCTRL_FUNC_GRP(126, 1),
+ AMD_PINCTRL_FUNC_GRP(126, 2),
+ AMD_PINCTRL_FUNC_GRP(126, 3),
+ AMD_PINCTRL_FUNC_GRP(127, 0),
+ AMD_PINCTRL_FUNC_GRP(127, 1),
+ AMD_PINCTRL_FUNC_GRP(127, 2),
+ AMD_PINCTRL_FUNC_GRP(127, 3),
+ AMD_PINCTRL_FUNC_GRP(128, 0),
+ AMD_PINCTRL_FUNC_GRP(128, 1),
+ AMD_PINCTRL_FUNC_GRP(128, 2),
+ AMD_PINCTRL_FUNC_GRP(128, 3),
+ AMD_PINCTRL_FUNC_GRP(129, 0),
+ AMD_PINCTRL_FUNC_GRP(129, 1),
+ AMD_PINCTRL_FUNC_GRP(129, 2),
+ AMD_PINCTRL_FUNC_GRP(129, 3),
+ AMD_PINCTRL_FUNC_GRP(130, 0),
+ AMD_PINCTRL_FUNC_GRP(130, 1),
+ AMD_PINCTRL_FUNC_GRP(130, 2),
+ AMD_PINCTRL_FUNC_GRP(130, 3),
+ AMD_PINCTRL_FUNC_GRP(131, 0),
+ AMD_PINCTRL_FUNC_GRP(131, 1),
+ AMD_PINCTRL_FUNC_GRP(131, 2),
+ AMD_PINCTRL_FUNC_GRP(131, 3),
+ AMD_PINCTRL_FUNC_GRP(132, 0),
+ AMD_PINCTRL_FUNC_GRP(132, 1),
+ AMD_PINCTRL_FUNC_GRP(132, 2),
+ AMD_PINCTRL_FUNC_GRP(132, 3),
+ AMD_PINCTRL_FUNC_GRP(133, 0),
+ AMD_PINCTRL_FUNC_GRP(133, 1),
+ AMD_PINCTRL_FUNC_GRP(133, 2),
+ AMD_PINCTRL_FUNC_GRP(133, 3),
+ AMD_PINCTRL_FUNC_GRP(134, 0),
+ AMD_PINCTRL_FUNC_GRP(134, 1),
+ AMD_PINCTRL_FUNC_GRP(134, 2),
+ AMD_PINCTRL_FUNC_GRP(134, 3),
+ AMD_PINCTRL_FUNC_GRP(135, 0),
+ AMD_PINCTRL_FUNC_GRP(135, 1),
+ AMD_PINCTRL_FUNC_GRP(135, 2),
+ AMD_PINCTRL_FUNC_GRP(135, 3),
+ AMD_PINCTRL_FUNC_GRP(136, 0),
+ AMD_PINCTRL_FUNC_GRP(136, 1),
+ AMD_PINCTRL_FUNC_GRP(136, 2),
+ AMD_PINCTRL_FUNC_GRP(136, 3),
+ AMD_PINCTRL_FUNC_GRP(137, 0),
+ AMD_PINCTRL_FUNC_GRP(137, 1),
+ AMD_PINCTRL_FUNC_GRP(137, 2),
+ AMD_PINCTRL_FUNC_GRP(137, 3),
+ AMD_PINCTRL_FUNC_GRP(138, 0),
+ AMD_PINCTRL_FUNC_GRP(138, 1),
+ AMD_PINCTRL_FUNC_GRP(138, 2),
+ AMD_PINCTRL_FUNC_GRP(138, 3),
+ AMD_PINCTRL_FUNC_GRP(139, 0),
+ AMD_PINCTRL_FUNC_GRP(139, 1),
+ AMD_PINCTRL_FUNC_GRP(139, 2),
+ AMD_PINCTRL_FUNC_GRP(139, 3),
+ AMD_PINCTRL_FUNC_GRP(140, 0),
+ AMD_PINCTRL_FUNC_GRP(140, 1),
+ AMD_PINCTRL_FUNC_GRP(140, 2),
+ AMD_PINCTRL_FUNC_GRP(140, 3),
+ AMD_PINCTRL_FUNC_GRP(141, 0),
+ AMD_PINCTRL_FUNC_GRP(141, 1),
+ AMD_PINCTRL_FUNC_GRP(141, 2),
+ AMD_PINCTRL_FUNC_GRP(141, 3),
+ AMD_PINCTRL_FUNC_GRP(142, 0),
+ AMD_PINCTRL_FUNC_GRP(142, 1),
+ AMD_PINCTRL_FUNC_GRP(142, 2),
+ AMD_PINCTRL_FUNC_GRP(142, 3),
+ AMD_PINCTRL_FUNC_GRP(143, 0),
+ AMD_PINCTRL_FUNC_GRP(143, 1),
+ AMD_PINCTRL_FUNC_GRP(143, 2),
+ AMD_PINCTRL_FUNC_GRP(143, 3),
+ AMD_PINCTRL_FUNC_GRP(144, 0),
+ AMD_PINCTRL_FUNC_GRP(144, 1),
+ AMD_PINCTRL_FUNC_GRP(144, 2),
+ AMD_PINCTRL_FUNC_GRP(144, 3),
+
+ PINCTRL_PINGROUP("i2c0", AMD_PINS(145, 146), 2),
+ PINCTRL_PINGROUP("i2c1", AMD_PINS(147, 148), 2),
+ PINCTRL_PINGROUP("i2c2", AMD_PINS(113, 114), 2),
+ PINCTRL_PINGROUP("i2c3", AMD_PINS(19, 20), 2),
+ PINCTRL_PINGROUP("uart0", AMD_PINS(135, 136, 137, 138, 139), 5),
+ PINCTRL_PINGROUP("uart1", AMD_PINS(140, 141, 142, 143, 144), 5),
+};
-static const unsigned uart0_pins[] = {135, 136, 137, 138, 139};
-static const unsigned uart1_pins[] = {140, 141, 142, 143, 144};
+#define AMD_PMUX_FUNC(_number) { \
+ .name = "iomux_gpio_"#_number, \
+ .groups = { \
+ "IMX_F0_GPIO"#_number, "IMX_F1_GPIO"#_number, \
+ "IMX_F2_GPIO"#_number, "IMX_F3_GPIO"#_number, \
+ }, \
+ .index = _number, \
+ .ngroups = NSELECTS, \
+}
-static const struct amd_pingroup kerncz_groups[] = {
- {
- .name = "i2c0",
- .pins = i2c0_pins,
- .npins = 2,
- },
- {
- .name = "i2c1",
- .pins = i2c1_pins,
- .npins = 2,
- },
- {
- .name = "i2c2",
- .pins = i2c2_pins,
- .npins = 2,
- },
- {
- .name = "i2c3",
- .pins = i2c3_pins,
- .npins = 2,
- },
- {
- .name = "uart0",
- .pins = uart0_pins,
- .npins = 5,
- },
- {
- .name = "uart1",
- .pins = uart1_pins,
- .npins = 5,
- },
+static const struct amd_function pmx_functions[] = {
+ AMD_PMUX_FUNC(0),
+ AMD_PMUX_FUNC(1),
+ AMD_PMUX_FUNC(2),
+ AMD_PMUX_FUNC(3),
+ AMD_PMUX_FUNC(4),
+ AMD_PMUX_FUNC(5),
+ AMD_PMUX_FUNC(6),
+ AMD_PMUX_FUNC(7),
+ AMD_PMUX_FUNC(8),
+ AMD_PMUX_FUNC(9),
+ AMD_PMUX_FUNC(10),
+ AMD_PMUX_FUNC(11),
+ AMD_PMUX_FUNC(12),
+ AMD_PMUX_FUNC(13),
+ AMD_PMUX_FUNC(14),
+ AMD_PMUX_FUNC(15),
+ AMD_PMUX_FUNC(16),
+ AMD_PMUX_FUNC(17),
+ AMD_PMUX_FUNC(18),
+ AMD_PMUX_FUNC(19),
+ AMD_PMUX_FUNC(20),
+ AMD_PMUX_FUNC(21),
+ AMD_PMUX_FUNC(22),
+ AMD_PMUX_FUNC(23),
+ AMD_PMUX_FUNC(24),
+ AMD_PMUX_FUNC(25),
+ AMD_PMUX_FUNC(26),
+ AMD_PMUX_FUNC(27),
+ AMD_PMUX_FUNC(28),
+ AMD_PMUX_FUNC(29),
+ AMD_PMUX_FUNC(30),
+ AMD_PMUX_FUNC(31),
+ AMD_PMUX_FUNC(32),
+ AMD_PMUX_FUNC(33),
+ AMD_PMUX_FUNC(34),
+ AMD_PMUX_FUNC(35),
+ AMD_PMUX_FUNC(36),
+ AMD_PMUX_FUNC(37),
+ AMD_PMUX_FUNC(38),
+ AMD_PMUX_FUNC(39),
+ AMD_PMUX_FUNC(40),
+ AMD_PMUX_FUNC(41),
+ AMD_PMUX_FUNC(42),
+ AMD_PMUX_FUNC(43),
+ AMD_PMUX_FUNC(44),
+ AMD_PMUX_FUNC(45),
+ AMD_PMUX_FUNC(46),
+ AMD_PMUX_FUNC(47),
+ AMD_PMUX_FUNC(48),
+ AMD_PMUX_FUNC(49),
+ AMD_PMUX_FUNC(50),
+ AMD_PMUX_FUNC(51),
+ AMD_PMUX_FUNC(52),
+ AMD_PMUX_FUNC(53),
+ AMD_PMUX_FUNC(54),
+ AMD_PMUX_FUNC(55),
+ AMD_PMUX_FUNC(56),
+ AMD_PMUX_FUNC(57),
+ AMD_PMUX_FUNC(58),
+ AMD_PMUX_FUNC(59),
+ AMD_PMUX_FUNC(60),
+ AMD_PMUX_FUNC(61),
+ AMD_PMUX_FUNC(62),
+ AMD_PMUX_FUNC(64),
+ AMD_PMUX_FUNC(65),
+ AMD_PMUX_FUNC(66),
+ AMD_PMUX_FUNC(67),
+ AMD_PMUX_FUNC(68),
+ AMD_PMUX_FUNC(69),
+ AMD_PMUX_FUNC(70),
+ AMD_PMUX_FUNC(71),
+ AMD_PMUX_FUNC(72),
+ AMD_PMUX_FUNC(73),
+ AMD_PMUX_FUNC(74),
+ AMD_PMUX_FUNC(75),
+ AMD_PMUX_FUNC(76),
+ AMD_PMUX_FUNC(77),
+ AMD_PMUX_FUNC(78),
+ AMD_PMUX_FUNC(79),
+ AMD_PMUX_FUNC(80),
+ AMD_PMUX_FUNC(81),
+ AMD_PMUX_FUNC(82),
+ AMD_PMUX_FUNC(83),
+ AMD_PMUX_FUNC(84),
+ AMD_PMUX_FUNC(85),
+ AMD_PMUX_FUNC(86),
+ AMD_PMUX_FUNC(87),
+ AMD_PMUX_FUNC(88),
+ AMD_PMUX_FUNC(89),
+ AMD_PMUX_FUNC(90),
+ AMD_PMUX_FUNC(91),
+ AMD_PMUX_FUNC(92),
+ AMD_PMUX_FUNC(93),
+ AMD_PMUX_FUNC(94),
+ AMD_PMUX_FUNC(95),
+ AMD_PMUX_FUNC(96),
+ AMD_PMUX_FUNC(97),
+ AMD_PMUX_FUNC(98),
+ AMD_PMUX_FUNC(99),
+ AMD_PMUX_FUNC(100),
+ AMD_PMUX_FUNC(101),
+ AMD_PMUX_FUNC(102),
+ AMD_PMUX_FUNC(103),
+ AMD_PMUX_FUNC(104),
+ AMD_PMUX_FUNC(105),
+ AMD_PMUX_FUNC(106),
+ AMD_PMUX_FUNC(107),
+ AMD_PMUX_FUNC(108),
+ AMD_PMUX_FUNC(109),
+ AMD_PMUX_FUNC(110),
+ AMD_PMUX_FUNC(111),
+ AMD_PMUX_FUNC(112),
+ AMD_PMUX_FUNC(113),
+ AMD_PMUX_FUNC(114),
+ AMD_PMUX_FUNC(115),
+ AMD_PMUX_FUNC(116),
+ AMD_PMUX_FUNC(117),
+ AMD_PMUX_FUNC(118),
+ AMD_PMUX_FUNC(119),
+ AMD_PMUX_FUNC(120),
+ AMD_PMUX_FUNC(121),
+ AMD_PMUX_FUNC(122),
+ AMD_PMUX_FUNC(123),
+ AMD_PMUX_FUNC(124),
+ AMD_PMUX_FUNC(125),
+ AMD_PMUX_FUNC(126),
+ AMD_PMUX_FUNC(127),
+ AMD_PMUX_FUNC(128),
+ AMD_PMUX_FUNC(129),
+ AMD_PMUX_FUNC(130),
+ AMD_PMUX_FUNC(131),
+ AMD_PMUX_FUNC(132),
+ AMD_PMUX_FUNC(133),
+ AMD_PMUX_FUNC(134),
+ AMD_PMUX_FUNC(135),
+ AMD_PMUX_FUNC(136),
+ AMD_PMUX_FUNC(137),
+ AMD_PMUX_FUNC(138),
+ AMD_PMUX_FUNC(139),
+ AMD_PMUX_FUNC(140),
+ AMD_PMUX_FUNC(141),
+ AMD_PMUX_FUNC(142),
+ AMD_PMUX_FUNC(143),
+ AMD_PMUX_FUNC(144),
};
#endif
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 4313756b52e6..f0e5d87ac50b 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ams AS3722 pin control and GPIO driver.
*
* Copyright (c) 2013, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#include <linux/delay.h>
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 517f2a6330ad..82b921fd630d 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -237,8 +237,6 @@ static void atmel_gpio_irq_unmask(struct irq_data *d)
BIT(pin->line));
}
-#ifdef CONFIG_PM_SLEEP
-
static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
@@ -255,9 +253,6 @@ static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-#else
-#define atmel_gpio_irq_set_wake NULL
-#endif /* CONFIG_PM_SLEEP */
static struct irq_chip atmel_gpio_irq_chip = {
.name = "GPIO",
@@ -265,7 +260,7 @@ static struct irq_chip atmel_gpio_irq_chip = {
.irq_mask = atmel_gpio_irq_mask,
.irq_unmask = atmel_gpio_irq_unmask,
.irq_set_type = atmel_gpio_irq_set_type,
- .irq_set_wake = atmel_gpio_irq_set_wake,
+ .irq_set_wake = pm_sleep_ptr(atmel_gpio_irq_set_wake),
};
static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d91a010e65f5..5634fa063ebf 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1615,8 +1615,6 @@ static void gpio_irq_ack(struct irq_data *d)
/* the interrupt is already cleared before by reading ISR */
}
-#ifdef CONFIG_PM
-
static u32 wakeups[MAX_GPIO_BANKS];
static u32 backups[MAX_GPIO_BANKS];
@@ -1683,10 +1681,6 @@ void at91_pinctrl_gpio_resume(void)
}
}
-#else
-#define gpio_irq_set_wake NULL
-#endif /* CONFIG_PM */
-
static void gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1741,14 +1735,14 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
gpio_irqchip->irq_disable = gpio_irq_mask;
gpio_irqchip->irq_mask = gpio_irq_mask;
gpio_irqchip->irq_unmask = gpio_irq_unmask;
- gpio_irqchip->irq_set_wake = gpio_irq_set_wake;
+ gpio_irqchip->irq_set_wake = pm_ptr(gpio_irq_set_wake);
gpio_irqchip->irq_set_type = at91_gpio->ops->irq_type;
/* Disable irqs of this PIO controller */
writel_relaxed(~0, at91_gpio->regbase + PIO_IDR);
/*
- * Let the generic code handle this edge IRQ, the the chained
+ * Let the generic code handle this edge IRQ, the chained
* handler will perform the actual work of handling the parent
* interrupt.
*/
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index 207cbae3a7bf..7ab20ac15391 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -73,7 +73,7 @@ static const struct pinctrl_pin_desc axp209_pins[] = {
PINCTRL_PIN(2, "GPIO2"),
};
-static const struct pinctrl_pin_desc axp813_pins[] = {
+static const struct pinctrl_pin_desc axp22x_pins[] = {
PINCTRL_PIN(0, "GPIO0"),
PINCTRL_PIN(1, "GPIO1"),
};
@@ -87,9 +87,16 @@ static const struct axp20x_pctrl_desc axp20x_data = {
.adc_mux = AXP20X_MUX_ADC,
};
+static const struct axp20x_pctrl_desc axp22x_data = {
+ .pins = axp22x_pins,
+ .npins = ARRAY_SIZE(axp22x_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .gpio_status_offset = 0,
+};
+
static const struct axp20x_pctrl_desc axp813_data = {
- .pins = axp813_pins,
- .npins = ARRAY_SIZE(axp813_pins),
+ .pins = axp22x_pins,
+ .npins = ARRAY_SIZE(axp22x_pins),
.ldo_mask = BIT(0) | BIT(1),
.adc_mask = BIT(0),
.gpio_status_offset = 0,
@@ -388,6 +395,7 @@ static int axp20x_build_funcs_groups(struct platform_device *pdev)
static const struct of_device_id axp20x_pctl_match[] = {
{ .compatible = "x-powers,axp209-gpio", .data = &axp20x_data, },
+ { .compatible = "x-powers,axp221-gpio", .data = &axp22x_data, },
{ .compatible = "x-powers,axp813-gpio", .data = &axp813_data, },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 1ca11616db74..3a9ee9c8af11 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -21,6 +21,7 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include "core.h"
@@ -135,7 +136,6 @@ struct ingenic_pinctrl {
struct ingenic_gpio_chip {
struct ingenic_pinctrl *jzpc;
struct gpio_chip gc;
- struct irq_chip irq_chip;
unsigned int irq, reg_base;
};
@@ -3393,7 +3393,7 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, true);
@@ -3405,7 +3405,7 @@ static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, false);
@@ -3417,7 +3417,9 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ gpiochip_enable_irq(gc, irq);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4770))
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
@@ -3433,7 +3435,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
ingenic_gpio_irq_mask(irqd);
@@ -3443,13 +3445,15 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, false);
+
+ gpiochip_disable_irq(gc, irq);
}
static void ingenic_gpio_irq_ack(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
bool high;
if ((irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) &&
@@ -3477,6 +3481,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
@@ -3498,12 +3503,12 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
* best we can do is to set up a single-edge interrupt and then
* switch to the opposing edge when ACKing the interrupt.
*/
- bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
+ bool high = ingenic_gpio_get_value(jzgc, irq);
type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
}
- irq_set_type(jzgc, irqd->hwirq, type);
+ irq_set_type(jzgc, irq, type);
return 0;
}
@@ -3668,22 +3673,45 @@ static const struct pinctrl_ops ingenic_pctlops = {
static int ingenic_gpio_irq_request(struct irq_data *data)
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t irq = irqd_to_hwirq(data);
int ret;
- ret = ingenic_gpio_direction_input(gpio_chip, data->hwirq);
+ ret = ingenic_gpio_direction_input(gpio_chip, irq);
if (ret)
return ret;
- return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+ return gpiochip_reqres_irq(gpio_chip, irq);
}
static void ingenic_gpio_irq_release(struct irq_data *data)
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t irq = irqd_to_hwirq(data);
+
+ return gpiochip_relres_irq(gpio_chip, irq);
+}
+
+static void ingenic_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
- return gpiochip_relres_irq(gpio_chip, data->hwirq);
+ seq_printf(p, "%s", gpio_chip->label);
}
+static const struct irq_chip ingenic_gpio_irqchip = {
+ .irq_enable = ingenic_gpio_irq_enable,
+ .irq_disable = ingenic_gpio_irq_disable,
+ .irq_unmask = ingenic_gpio_irq_unmask,
+ .irq_mask = ingenic_gpio_irq_mask,
+ .irq_ack = ingenic_gpio_irq_ack,
+ .irq_set_type = ingenic_gpio_irq_set_type,
+ .irq_set_wake = ingenic_gpio_irq_set_wake,
+ .irq_request_resources = ingenic_gpio_irq_request,
+ .irq_release_resources = ingenic_gpio_irq_release,
+ .irq_print_chip = ingenic_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+};
+
static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
int pin, int func)
{
@@ -4172,20 +4200,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
if (!jzgc->irq)
return -EINVAL;
- jzgc->irq_chip.name = jzgc->gc.label;
- jzgc->irq_chip.irq_enable = ingenic_gpio_irq_enable;
- jzgc->irq_chip.irq_disable = ingenic_gpio_irq_disable;
- jzgc->irq_chip.irq_unmask = ingenic_gpio_irq_unmask;
- jzgc->irq_chip.irq_mask = ingenic_gpio_irq_mask;
- jzgc->irq_chip.irq_ack = ingenic_gpio_irq_ack;
- jzgc->irq_chip.irq_set_type = ingenic_gpio_irq_set_type;
- jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
- jzgc->irq_chip.irq_request_resources = ingenic_gpio_irq_request;
- jzgc->irq_chip.irq_release_resources = ingenic_gpio_irq_release;
- jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
-
girq = &jzgc->gc.irq;
- girq->chip = &jzgc->irq_chip;
+ gpio_irq_chip_set_chip(girq, &ingenic_gpio_irqchip);
girq->parent_handler = ingenic_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 5f4a8c5c6650..c5fd154990c8 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -29,19 +29,12 @@
#define ocelot_clrsetbits(addr, clear, set) \
writel((readl(addr) & ~(clear)) | (set), (addr))
-/* PINCONFIG bits (sparx5 only) */
enum {
PINCONF_BIAS,
PINCONF_SCHMITT,
PINCONF_DRIVE_STRENGTH,
};
-#define BIAS_PD_BIT BIT(4)
-#define BIAS_PU_BIT BIT(3)
-#define BIAS_BITS (BIAS_PD_BIT|BIAS_PU_BIT)
-#define SCHMITT_BIT BIT(2)
-#define DRIVE_BITS GENMASK(1, 0)
-
/* GPIO standard registers */
#define OCELOT_GPIO_OUT_SET 0x0
#define OCELOT_GPIO_OUT_CLR 0x4
@@ -321,6 +314,13 @@ struct ocelot_pin_caps {
unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */
};
+struct ocelot_pincfg_data {
+ u8 pd_bit;
+ u8 pu_bit;
+ u8 drive_bits;
+ u8 schmitt_bit;
+};
+
struct ocelot_pinctrl {
struct device *dev;
struct pinctrl_dev *pctl;
@@ -328,10 +328,16 @@ struct ocelot_pinctrl {
struct regmap *map;
struct regmap *pincfg;
struct pinctrl_desc *desc;
+ const struct ocelot_pincfg_data *pincfg_data;
struct ocelot_pmx_func func[FUNC_MAX];
u8 stride;
};
+struct ocelot_match_data {
+ struct pinctrl_desc desc;
+ struct ocelot_pincfg_data pincfg_data;
+};
+
#define LUTON_P(p, f0, f1) \
static struct ocelot_pin_caps luton_pin_##p = { \
.pin = p, \
@@ -1325,24 +1331,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
+ const struct ocelot_pincfg_data *opd = info->pincfg_data;
u32 regcfg;
- ret = regmap_read(info->pincfg, pin, &regcfg);
+ ret = regmap_read(info->pincfg,
+ pin * regmap_get_reg_stride(info->pincfg),
+ &regcfg);
if (ret)
return ret;
ret = 0;
switch (reg) {
case PINCONF_BIAS:
- *val = regcfg & BIAS_BITS;
+ *val = regcfg & (opd->pd_bit | opd->pu_bit);
break;
case PINCONF_SCHMITT:
- *val = regcfg & SCHMITT_BIT;
+ *val = regcfg & opd->schmitt_bit;
break;
case PINCONF_DRIVE_STRENGTH:
- *val = regcfg & DRIVE_BITS;
+ *val = regcfg & opd->drive_bits;
break;
default:
@@ -1359,14 +1368,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr,
u32 val;
int ret;
- ret = regmap_read(info->pincfg, regaddr, &val);
+ ret = regmap_read(info->pincfg,
+ regaddr * regmap_get_reg_stride(info->pincfg),
+ &val);
if (ret)
return ret;
val &= ~clrbits;
val |= setbits;
- ret = regmap_write(info->pincfg, regaddr, val);
+ ret = regmap_write(info->pincfg,
+ regaddr * regmap_get_reg_stride(info->pincfg),
+ val);
return ret;
}
@@ -1379,23 +1392,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
+ const struct ocelot_pincfg_data *opd = info->pincfg_data;
ret = 0;
switch (reg) {
case PINCONF_BIAS:
- ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS,
+ ret = ocelot_pincfg_clrsetbits(info, pin,
+ opd->pd_bit | opd->pu_bit,
val);
break;
case PINCONF_SCHMITT:
- ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT,
+ ret = ocelot_pincfg_clrsetbits(info, pin,
+ opd->schmitt_bit,
val);
break;
case PINCONF_DRIVE_STRENGTH:
if (val <= 3)
ret = ocelot_pincfg_clrsetbits(info, pin,
- DRIVE_BITS, val);
+ opd->drive_bits,
+ val);
else
ret = -EINVAL;
break;
@@ -1425,17 +1442,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
if (param == PIN_CONFIG_BIAS_DISABLE)
val = (val == 0);
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
- val = (val & BIAS_PD_BIT ? true : false);
+ val = !!(val & info->pincfg_data->pd_bit);
else /* PIN_CONFIG_BIAS_PULL_UP */
- val = (val & BIAS_PU_BIT ? true : false);
+ val = !!(val & info->pincfg_data->pu_bit);
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (!info->pincfg_data->schmitt_bit)
+ return -EOPNOTSUPP;
+
err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val);
if (err)
return err;
- val = (val & SCHMITT_BIT ? true : false);
+ val = !!(val & info->pincfg_data->schmitt_bit);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
@@ -1479,6 +1499,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int num_configs)
{
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ const struct ocelot_pincfg_data *opd = info->pincfg_data;
u32 param, arg, p;
int cfg, err = 0;
@@ -1491,8 +1512,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
- (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT :
- BIAS_PD_BIT;
+ (param == PIN_CONFIG_BIAS_PULL_UP) ?
+ opd->pu_bit : opd->pd_bit;
err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg);
if (err)
@@ -1501,7 +1522,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- arg = arg ? SCHMITT_BIT : 0;
+ if (!opd->schmitt_bit)
+ return -EOPNOTSUPP;
+
+ arg = arg ? opd->schmitt_bit : 0;
err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT,
arg);
if (err)
@@ -1562,69 +1586,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-static struct pinctrl_desc luton_desc = {
- .name = "luton-pinctrl",
- .pins = luton_pins,
- .npins = ARRAY_SIZE(luton_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data luton_desc = {
+ .desc = {
+ .name = "luton-pinctrl",
+ .pins = luton_pins,
+ .npins = ARRAY_SIZE(luton_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc serval_desc = {
- .name = "serval-pinctrl",
- .pins = serval_pins,
- .npins = ARRAY_SIZE(serval_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data serval_desc = {
+ .desc = {
+ .name = "serval-pinctrl",
+ .pins = serval_pins,
+ .npins = ARRAY_SIZE(serval_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc ocelot_desc = {
- .name = "ocelot-pinctrl",
- .pins = ocelot_pins,
- .npins = ARRAY_SIZE(ocelot_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data ocelot_desc = {
+ .desc = {
+ .name = "ocelot-pinctrl",
+ .pins = ocelot_pins,
+ .npins = ARRAY_SIZE(ocelot_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc jaguar2_desc = {
- .name = "jaguar2-pinctrl",
- .pins = jaguar2_pins,
- .npins = ARRAY_SIZE(jaguar2_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data jaguar2_desc = {
+ .desc = {
+ .name = "jaguar2-pinctrl",
+ .pins = jaguar2_pins,
+ .npins = ARRAY_SIZE(jaguar2_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc servalt_desc = {
- .name = "servalt-pinctrl",
- .pins = servalt_pins,
- .npins = ARRAY_SIZE(servalt_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data servalt_desc = {
+ .desc = {
+ .name = "servalt-pinctrl",
+ .pins = servalt_pins,
+ .npins = ARRAY_SIZE(servalt_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc sparx5_desc = {
- .name = "sparx5-pinctrl",
- .pins = sparx5_pins,
- .npins = ARRAY_SIZE(sparx5_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .confops = &ocelot_confops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data sparx5_desc = {
+ .desc = {
+ .name = "sparx5-pinctrl",
+ .pins = sparx5_pins,
+ .npins = ARRAY_SIZE(sparx5_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .confops = &ocelot_confops,
+ .owner = THIS_MODULE,
+ },
+ .pincfg_data = {
+ .pd_bit = BIT(4),
+ .pu_bit = BIT(3),
+ .drive_bits = GENMASK(1, 0),
+ .schmitt_bit = BIT(2),
+ },
};
-static struct pinctrl_desc lan966x_desc = {
- .name = "lan966x-pinctrl",
- .pins = lan966x_pins,
- .npins = ARRAY_SIZE(lan966x_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &lan966x_pmx_ops,
- .confops = &ocelot_confops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data lan966x_desc = {
+ .desc = {
+ .name = "lan966x-pinctrl",
+ .pins = lan966x_pins,
+ .npins = ARRAY_SIZE(lan966x_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &lan966x_pmx_ops,
+ .confops = &ocelot_confops,
+ .owner = THIS_MODULE,
+ },
+ .pincfg_data = {
+ .pd_bit = BIT(3),
+ .pu_bit = BIT(2),
+ .drive_bits = GENMASK(1, 0),
+ },
};
static int ocelot_create_group_func_map(struct device *dev,
@@ -1761,6 +1810,7 @@ static void ocelot_irq_mask(struct irq_data *data)
regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
BIT(gpio % 32), 0);
+ gpiochip_disable_irq(chip, gpio);
}
static void ocelot_irq_unmask(struct irq_data *data)
@@ -1769,6 +1819,7 @@ static void ocelot_irq_unmask(struct irq_data *data)
struct ocelot_pinctrl *info = gpiochip_get_data(chip);
unsigned int gpio = irqd_to_hwirq(data);
+ gpiochip_enable_irq(chip, gpio);
regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
BIT(gpio % 32), BIT(gpio % 32));
}
@@ -1790,8 +1841,10 @@ static struct irq_chip ocelot_eoi_irqchip = {
.irq_mask = ocelot_irq_mask,
.irq_eoi = ocelot_irq_ack,
.irq_unmask = ocelot_irq_unmask,
- .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+ .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
+ IRQCHIP_IMMUTABLE,
.irq_set_type = ocelot_irq_set_type,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS
};
static struct irq_chip ocelot_irqchip = {
@@ -1800,6 +1853,8 @@ static struct irq_chip ocelot_irqchip = {
.irq_ack = ocelot_irq_ack,
.irq_unmask = ocelot_irq_unmask,
.irq_set_type = ocelot_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS
};
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type)
@@ -1863,7 +1918,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
girq = &gc->irq;
- girq->chip = &ocelot_irqchip;
+ gpio_irq_chip_set_chip(girq, &ocelot_irqchip);
girq->parent_handler = ocelot_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
@@ -1889,8 +1944,10 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
{ .compatible = "microchip,lan966x-pinctrl", .data = &lan966x_desc },
{},
};
+MODULE_DEVICE_TABLE(of, ocelot_pinctrl_of_match);
-static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
+static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
+ const struct ocelot_pinctrl *info)
{
void __iomem *base;
@@ -1898,7 +1955,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = 32,
+ .max_register = info->desc->npins * 4,
.name = "pincfg",
};
@@ -1913,6 +1970,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
static int ocelot_pinctrl_probe(struct platform_device *pdev)
{
+ const struct ocelot_match_data *data;
struct device *dev = &pdev->dev;
struct ocelot_pinctrl *info;
struct reset_control *reset;
@@ -1929,7 +1987,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- info->desc = (struct pinctrl_desc *)device_get_match_data(dev);
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc),
+ GFP_KERNEL);
+ if (!info->desc)
+ return -ENOMEM;
+
+ info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch");
if (IS_ERR(reset))
@@ -1956,7 +2023,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
/* Pinconf registers */
if (info->desc->confops) {
- pincfg = ocelot_pinctrl_create_pincfg(pdev);
+ pincfg = ocelot_pinctrl_create_pincfg(pdev, info);
if (IS_ERR(pincfg))
dev_dbg(dev, "Failed to create pincfg regmap\n");
else
@@ -1984,4 +2051,5 @@ static struct platform_driver ocelot_pinctrl_driver = {
},
.probe = ocelot_pinctrl_probe,
};
-builtin_platform_driver(ocelot_pinctrl_driver);
+module_platform_driver(ocelot_pinctrl_driver);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index d42f18cb1bc7..fecc25d35d02 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* pinctrl-palmas.c -- TI PALMAS series pin control driver.
*
* Copyright (c) 2013, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#include <linux/delay.h>
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
index 2a86c1035cc8..3eb40e230d98 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -207,6 +207,7 @@ struct starfive_pinctrl {
void __iomem *base;
void __iomem *padctl;
struct pinctrl_dev *pctl;
+ struct mutex mutex; /* serialize adding groups and functions */
};
static inline unsigned int starfive_pin_to_gpio(const struct starfive_pinctrl *sfp,
@@ -522,6 +523,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
+ mutex_lock(&sfp->mutex);
for_each_child_of_node(np, child) {
int npins;
int i;
@@ -615,12 +617,14 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
*maps = map;
*num_maps = nmaps;
+ mutex_unlock(&sfp->mutex);
return 0;
put_child:
of_node_put(child);
free_map:
pinctrl_utils_free_map(pctldev, map, nmaps);
+ mutex_unlock(&sfp->mutex);
return ret;
}
@@ -1267,6 +1271,7 @@ static int starfive_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sfp);
sfp->gc.parent = dev;
raw_spin_lock_init(&sfp->lock);
+ mutex_init(&sfp->mutex);
ret = devm_pinctrl_register_and_init(dev, &starfive_desc, sfp, &sfp->pctl);
if (ret)
diff --git a/drivers/pinctrl/pinctrl-utils.c b/drivers/pinctrl/pinctrl-utils.c
index 93df0d4c0a24..3580e0fd94ed 100644
--- a/drivers/pinctrl/pinctrl-utils.c
+++ b/drivers/pinctrl/pinctrl-utils.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Utils functions to implement the pincontrol driver.
*
* Copyright (c) 2013, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#include <linux/device.h>
#include <linux/export.h>
diff --git a/drivers/pinctrl/pinctrl-utils.h b/drivers/pinctrl/pinctrl-utils.h
index 8f9f2d28c5b8..cec407a8cc4e 100644
--- a/drivers/pinctrl/pinctrl-utils.h
+++ b/drivers/pinctrl/pinctrl-utils.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Utils functions to implement the pincontrol driver.
*
* Copyright (c) 2013, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#ifndef __PINCTRL_UTILS_H__
#define __PINCTRL_UTILS_H__
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index e14012209992..7d2fbf8a02cd 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -163,6 +163,8 @@ static const char *zynqmp_pmux_get_function_name(struct pinctrl_dev *pctldev,
* @num_groups: Number of function groups.
*
* Get function's group count and group names.
+ *
+ * Return: 0
*/
static int zynqmp_pmux_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
@@ -410,6 +412,10 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ param = PM_PINCTRL_CONFIG_TRI_STATE;
+ arg = PM_PINCTRL_TRI_STATE_ENABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
case PIN_CONFIG_MODE_LOW_POWER:
/*
* These cases are mentioned in dts but configurable
@@ -418,6 +424,11 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
*/
ret = 0;
break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ param = PM_PINCTRL_CONFIG_TRI_STATE;
+ arg = PM_PINCTRL_TRI_STATE_DISABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
default:
dev_warn(pctldev->dev,
"unsupported configuration parameter '%u'\n",
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 3daeb9772391..f415c13caae0 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -113,6 +113,14 @@ config PINCTRL_MSM8X74
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8974 platform.
+config PINCTRL_MSM8909
+ tristate "Qualcomm 8909 pin controller driver"
+ depends on OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8909 platform.
+
config PINCTRL_MSM8916
tristate "Qualcomm 8916 pin controller driver"
depends on OF
@@ -320,6 +328,15 @@ config PINCTRL_SM6350
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM6350 platform.
+config PINCTRL_SM6375
+ tristate "Qualcomm Technologies Inc SM6375 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM6375 platform.
+
config PINCTRL_SDX65
tristate "Qualcomm Technologies Inc SDX65 pin controller driver"
depends on GPIOLIB && OF
@@ -367,7 +384,7 @@ config PINCTRL_SM8350
config PINCTRL_SM8450
tristate "Qualcomm Technologies Inc SM8450 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 4f0ee7597f81..fbd64853a24d 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_PINCTRL_MSM8226) += pinctrl-msm8226.o
obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
+obj-$(CONFIG_PINCTRL_MSM8909) += pinctrl-msm8909.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
obj-$(CONFIG_PINCTRL_MSM8953) += pinctrl-msm8953.o
obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
@@ -37,6 +38,7 @@ obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
obj-$(CONFIG_PINCTRL_SM6125) += pinctrl-sm6125.o
obj-$(CONFIG_PINCTRL_SM6350) += pinctrl-sm6350.o
+obj-$(CONFIG_PINCTRL_SM6375) += pinctrl-sm6375.o
obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 74810ec4df44..e97ce45b6d53 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -401,7 +401,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
"Slew resource not provided\n");
- if (data->is_clk_optional)
+ if (of_property_read_bool(dev->of_node, "qcom,adsp-bypass-mode"))
ret = devm_clk_bulk_get_optional(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
else
ret = devm_clk_bulk_get(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
index 759d5d8da562..afbac2a6c82c 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
@@ -77,7 +77,6 @@ struct lpi_pinctrl_variant_data {
int ngroups;
const struct lpi_function *functions;
int nfunctions;
- bool is_clk_optional;
};
int lpi_pinctrl_probe(struct platform_device *pdev);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
new file mode 100644
index 000000000000..6dd15b910632
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2022, Kernkonzept GmbH.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9, \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8909_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "SDC1_CLK"),
+ PINCTRL_PIN(114, "SDC1_CMD"),
+ PINCTRL_PIN(115, "SDC1_DATA"),
+ PINCTRL_PIN(116, "SDC2_CLK"),
+ PINCTRL_PIN(117, "SDC2_CMD"),
+ PINCTRL_PIN(118, "SDC2_DATA"),
+ PINCTRL_PIN(119, "QDSD_CLK"),
+ PINCTRL_PIN(120, "QDSD_CMD"),
+ PINCTRL_PIN(121, "QDSD_DATA0"),
+ PINCTRL_PIN(122, "QDSD_DATA1"),
+ PINCTRL_PIN(123, "QDSD_DATA2"),
+ PINCTRL_PIN(124, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+
+static const unsigned int sdc1_clk_pins[] = { 113 };
+static const unsigned int sdc1_cmd_pins[] = { 114 };
+static const unsigned int sdc1_data_pins[] = { 115 };
+static const unsigned int sdc2_clk_pins[] = { 116 };
+static const unsigned int sdc2_cmd_pins[] = { 117 };
+static const unsigned int sdc2_data_pins[] = { 118 };
+static const unsigned int qdsd_clk_pins[] = { 119 };
+static const unsigned int qdsd_cmd_pins[] = { 120 };
+static const unsigned int qdsd_data0_pins[] = { 121 };
+static const unsigned int qdsd_data1_pins[] = { 122 };
+static const unsigned int qdsd_data2_pins[] = { 123 };
+static const unsigned int qdsd_data3_pins[] = { 124 };
+
+enum msm8909_functions {
+ msm_mux_gpio,
+ msm_mux_adsp_ext,
+ msm_mux_atest_bbrx0,
+ msm_mux_atest_bbrx1,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_combodac,
+ msm_mux_atest_gpsadc0,
+ msm_mux_atest_gpsadc1,
+ msm_mux_atest_wlan0,
+ msm_mux_atest_wlan1,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi1_cs1,
+ msm_mux_blsp_spi1_cs2,
+ msm_mux_blsp_spi1_cs3,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi2_cs1,
+ msm_mux_blsp_spi2_cs2,
+ msm_mux_blsp_spi2_cs3,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi3_cs1,
+ msm_mux_blsp_spi3_cs2,
+ msm_mux_blsp_spi3_cs3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uim1,
+ msm_mux_blsp_uim2,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cdc_pdm0,
+ msm_mux_dbg_out,
+ msm_mux_dmic0_clk,
+ msm_mux_dmic0_data,
+ msm_mux_ebi0_wrcdc,
+ msm_mux_ebi2_a,
+ msm_mux_ebi2_lcd,
+ msm_mux_ext_lpass,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_gcc_plltest,
+ msm_mux_gsm0_tx,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_m_voc,
+ msm_mux_mdp_vsync,
+ msm_mux_modem_tsync,
+ msm_mux_nav_pps,
+ msm_mux_nav_tsync,
+ msm_mux_pa_indicator,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pbs2,
+ msm_mux_pri_mi2s_data0_a,
+ msm_mux_pri_mi2s_data0_b,
+ msm_mux_pri_mi2s_data1_a,
+ msm_mux_pri_mi2s_data1_b,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_sck_a,
+ msm_mux_pri_mi2s_sck_b,
+ msm_mux_pri_mi2s_ws_a,
+ msm_mux_pri_mi2s_ws_b,
+ msm_mux_prng_rosc,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_sd_write,
+ msm_mux_sec_mi2s,
+ msm_mux_smb_int,
+ msm_mux_ssbi0,
+ msm_mux_ssbi1,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim3_clk,
+ msm_mux_uim3_data,
+ msm_mux_uim3_present,
+ msm_mux_uim3_reset,
+ msm_mux_uim_batt,
+ msm_mux_wcss_bt,
+ msm_mux_wcss_fm,
+ msm_mux_wcss_wlan,
+ msm_mux__,
+};
+
+static const char * const adsp_ext_groups[] = { "gpio38" };
+static const char * const atest_bbrx0_groups[] = { "gpio37" };
+static const char * const atest_bbrx1_groups[] = { "gpio36" };
+static const char * const atest_char0_groups[] = { "gpio62" };
+static const char * const atest_char1_groups[] = { "gpio61" };
+static const char * const atest_char2_groups[] = { "gpio60" };
+static const char * const atest_char3_groups[] = { "gpio59" };
+static const char * const atest_char_groups[] = { "gpio63" };
+static const char * const atest_combodac_groups[] = {
+ "gpio32", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio47", "gpio48", "gpio66", "gpio81", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio94", "gpio95", "gpio110"
+};
+static const char * const atest_gpsadc0_groups[] = { "gpio65" };
+static const char * const atest_gpsadc1_groups[] = { "gpio79" };
+static const char * const atest_wlan0_groups[] = { "gpio96" };
+static const char * const atest_wlan1_groups[] = { "gpio97" };
+static const char * const bimc_dte0_groups[] = { "gpio6", "gpio59" };
+static const char * const bimc_dte1_groups[] = { "gpio7", "gpio60" };
+static const char * const blsp_i2c1_groups[] = { "gpio6", "gpio7" };
+static const char * const blsp_i2c2_groups[] = { "gpio111", "gpio112" };
+static const char * const blsp_i2c3_groups[] = { "gpio29", "gpio30" };
+static const char * const blsp_i2c4_groups[] = { "gpio14", "gpio15" };
+static const char * const blsp_i2c5_groups[] = { "gpio18", "gpio19" };
+static const char * const blsp_i2c6_groups[] = { "gpio10", "gpio11" };
+static const char * const blsp_spi1_cs1_groups[] = { "gpio97" };
+static const char * const blsp_spi1_cs2_groups[] = { "gpio37" };
+static const char * const blsp_spi1_cs3_groups[] = { "gpio65" };
+static const char * const blsp_spi1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_spi2_cs1_groups[] = { "gpio98" };
+static const char * const blsp_spi2_cs2_groups[] = { "gpio17" };
+static const char * const blsp_spi2_cs3_groups[] = { "gpio5" };
+static const char * const blsp_spi2_groups[] = {
+ "gpio20", "gpio21", "gpio111", "gpio112"
+};
+static const char * const blsp_spi3_cs1_groups[] = { "gpio95" };
+static const char * const blsp_spi3_cs2_groups[] = { "gpio65" };
+static const char * const blsp_spi3_cs3_groups[] = { "gpio4" };
+static const char * const blsp_spi3_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15"
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19"
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio20", "gpio21", "gpio111", "gpio112"
+};
+static const char * const blsp_uim1_groups[] = { "gpio4", "gpio5" };
+static const char * const blsp_uim2_groups[] = { "gpio20", "gpio21" };
+static const char * const cam_mclk_groups[] = { "gpio26", "gpio27" };
+static const char * const cci_async_groups[] = { "gpio33" };
+static const char * const cci_timer0_groups[] = { "gpio31" };
+static const char * const cci_timer1_groups[] = { "gpio32" };
+static const char * const cci_timer2_groups[] = { "gpio38" };
+static const char * const cdc_pdm0_groups[] = {
+ "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio64"
+};
+static const char * const dbg_out_groups[] = { "gpio10" };
+static const char * const dmic0_clk_groups[] = { "gpio4" };
+static const char * const dmic0_data_groups[] = { "gpio5" };
+static const char * const ebi0_wrcdc_groups[] = { "gpio64" };
+static const char * const ebi2_a_groups[] = { "gpio99" };
+static const char * const ebi2_lcd_groups[] = {
+ "gpio24", "gpio24", "gpio25", "gpio95"
+};
+static const char * const ext_lpass_groups[] = { "gpio45" };
+static const char * const gcc_gp1_clk_a_groups[] = { "gpio49" };
+static const char * const gcc_gp1_clk_b_groups[] = { "gpio14" };
+static const char * const gcc_gp2_clk_a_groups[] = { "gpio50" };
+static const char * const gcc_gp2_clk_b_groups[] = { "gpio12" };
+static const char * const gcc_gp3_clk_a_groups[] = { "gpio51" };
+static const char * const gcc_gp3_clk_b_groups[] = { "gpio13" };
+static const char * const gcc_plltest_groups[] = { "gpio66", "gpio67" };
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112"
+};
+static const char * const gsm0_tx_groups[] = { "gpio85" };
+static const char * const ldo_en_groups[] = { "gpio99" };
+static const char * const ldo_update_groups[] = { "gpio98" };
+static const char * const m_voc_groups[] = { "gpio8", "gpio95" };
+static const char * const mdp_vsync_groups[] = { "gpio24", "gpio25" };
+static const char * const modem_tsync_groups[] = { "gpio83" };
+static const char * const nav_pps_groups[] = { "gpio83" };
+static const char * const nav_tsync_groups[] = { "gpio83" };
+static const char * const pa_indicator_groups[] = { "gpio82" };
+static const char * const pbs0_groups[] = { "gpio90" };
+static const char * const pbs1_groups[] = { "gpio91" };
+static const char * const pbs2_groups[] = { "gpio92" };
+static const char * const pri_mi2s_data0_a_groups[] = { "gpio62" };
+static const char * const pri_mi2s_data0_b_groups[] = { "gpio95" };
+static const char * const pri_mi2s_data1_a_groups[] = { "gpio63" };
+static const char * const pri_mi2s_data1_b_groups[] = { "gpio96" };
+static const char * const pri_mi2s_mclk_a_groups[] = { "gpio59" };
+static const char * const pri_mi2s_mclk_b_groups[] = { "gpio98" };
+static const char * const pri_mi2s_sck_a_groups[] = { "gpio60" };
+static const char * const pri_mi2s_sck_b_groups[] = { "gpio94" };
+static const char * const pri_mi2s_ws_a_groups[] = { "gpio61" };
+static const char * const pri_mi2s_ws_b_groups[] = { "gpio110" };
+static const char * const prng_rosc_groups[] = { "gpio43" };
+static const char * const pwr_crypto_enabled_a_groups[] = { "gpio35" };
+static const char * const pwr_crypto_enabled_b_groups[] = { "gpio96" };
+static const char * const pwr_modem_enabled_a_groups[] = { "gpio28" };
+static const char * const pwr_modem_enabled_b_groups[] = { "gpio94" };
+static const char * const pwr_nav_enabled_a_groups[] = { "gpio34" };
+static const char * const pwr_nav_enabled_b_groups[] = { "gpio95" };
+static const char * const qdss_cti_trig_in_a0_groups[] = { "gpio20" };
+static const char * const qdss_cti_trig_in_a1_groups[] = { "gpio49" };
+static const char * const qdss_cti_trig_in_b0_groups[] = { "gpio21" };
+static const char * const qdss_cti_trig_in_b1_groups[] = { "gpio50" };
+static const char * const qdss_cti_trig_out_a0_groups[] = { "gpio23" };
+static const char * const qdss_cti_trig_out_a1_groups[] = { "gpio52" };
+static const char * const qdss_cti_trig_out_b0_groups[] = { "gpio22" };
+static const char * const qdss_cti_trig_out_b1_groups[] = { "gpio51" };
+static const char * const qdss_traceclk_a_groups[] = { "gpio46" };
+static const char * const qdss_tracectl_a_groups[] = { "gpio45" };
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio47", "gpio48", "gpio58", "gpio65", "gpio94", "gpio96",
+ "gpio97"
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio14", "gpio16", "gpio17", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio93"
+};
+static const char * const sd_write_groups[] = { "gpio99" };
+static const char * const sec_mi2s_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio98"
+};
+static const char * const smb_int_groups[] = { "gpio58" };
+static const char * const ssbi0_groups[] = { "gpio88" };
+static const char * const ssbi1_groups[] = { "gpio89" };
+static const char * const uim1_clk_groups[] = { "gpio54" };
+static const char * const uim1_data_groups[] = { "gpio53" };
+static const char * const uim1_present_groups[] = { "gpio56" };
+static const char * const uim1_reset_groups[] = { "gpio55" };
+static const char * const uim2_clk_groups[] = { "gpio50" };
+static const char * const uim2_data_groups[] = { "gpio49" };
+static const char * const uim2_present_groups[] = { "gpio52" };
+static const char * const uim2_reset_groups[] = { "gpio51" };
+static const char * const uim3_clk_groups[] = { "gpio23" };
+static const char * const uim3_data_groups[] = { "gpio20" };
+static const char * const uim3_present_groups[] = { "gpio21" };
+static const char * const uim3_reset_groups[] = { "gpio22" };
+static const char * const uim_batt_groups[] = { "gpio57" };
+static const char * const wcss_bt_groups[] = { "gpio39", "gpio47", "gpio48" };
+static const char * const wcss_fm_groups[] = { "gpio45", "gpio46" };
+static const char * const wcss_wlan_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio44"
+};
+
+static const struct msm_function msm8909_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(atest_bbrx0),
+ FUNCTION(atest_bbrx1),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_combodac),
+ FUNCTION(atest_gpsadc0),
+ FUNCTION(atest_gpsadc1),
+ FUNCTION(atest_wlan0),
+ FUNCTION(atest_wlan1),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi1_cs1),
+ FUNCTION(blsp_spi1_cs2),
+ FUNCTION(blsp_spi1_cs3),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi2_cs1),
+ FUNCTION(blsp_spi2_cs2),
+ FUNCTION(blsp_spi2_cs3),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi3_cs1),
+ FUNCTION(blsp_spi3_cs2),
+ FUNCTION(blsp_spi3_cs3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(dbg_out),
+ FUNCTION(dmic0_clk),
+ FUNCTION(dmic0_data),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(ebi2_a),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(ext_lpass),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gpio),
+ FUNCTION(gsm0_tx),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(m_voc),
+ FUNCTION(mdp_vsync),
+ FUNCTION(modem_tsync),
+ FUNCTION(nav_pps),
+ FUNCTION(nav_tsync),
+ FUNCTION(pa_indicator),
+ FUNCTION(pbs0),
+ FUNCTION(pbs1),
+ FUNCTION(pbs2),
+ FUNCTION(pri_mi2s_data0_a),
+ FUNCTION(pri_mi2s_data0_b),
+ FUNCTION(pri_mi2s_data1_a),
+ FUNCTION(pri_mi2s_data1_b),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(pri_mi2s_sck_a),
+ FUNCTION(pri_mi2s_sck_b),
+ FUNCTION(pri_mi2s_ws_a),
+ FUNCTION(pri_mi2s_ws_b),
+ FUNCTION(prng_rosc),
+ FUNCTION(pwr_crypto_enabled_a),
+ FUNCTION(pwr_crypto_enabled_b),
+ FUNCTION(pwr_modem_enabled_a),
+ FUNCTION(pwr_modem_enabled_b),
+ FUNCTION(pwr_nav_enabled_a),
+ FUNCTION(pwr_nav_enabled_b),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(sd_write),
+ FUNCTION(sec_mi2s),
+ FUNCTION(smb_int),
+ FUNCTION(ssbi0),
+ FUNCTION(ssbi1),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim3_clk),
+ FUNCTION(uim3_data),
+ FUNCTION(uim3_present),
+ FUNCTION(uim3_reset),
+ FUNCTION(uim_batt),
+ FUNCTION(wcss_bt),
+ FUNCTION(wcss_fm),
+ FUNCTION(wcss_wlan),
+};
+
+static const struct msm_pingroup msm8909_groups[] = {
+ PINGROUP(0, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(1, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(2, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(3, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(4, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi3_cs3, dmic0_clk, _, _, _, _),
+ PINGROUP(5, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi2_cs3, dmic0_data, _, _, _, _),
+ PINGROUP(6, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte0),
+ PINGROUP(7, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte1),
+ PINGROUP(8, blsp_spi6, m_voc, _, _, _, _, _, qdss_tracedata_a, _),
+ PINGROUP(9, blsp_spi6, _, _, _, _, _, qdss_tracedata_a, _, _),
+ PINGROUP(10, blsp_spi6, blsp_i2c6, dbg_out, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(11, blsp_spi6, blsp_i2c6, _, _, _, _, _, _, _),
+ PINGROUP(12, blsp_spi4, gcc_gp2_clk_b, _, _, _, _, _, _, _),
+ PINGROUP(13, blsp_spi4, gcc_gp3_clk_b, _, _, _, _, _, _, _),
+ PINGROUP(14, blsp_spi4, blsp_i2c4, gcc_gp1_clk_b, _, _, _, _, _, qdss_tracedata_b),
+ PINGROUP(15, blsp_spi4, blsp_i2c4, _, _, _, _, _, _, _),
+ PINGROUP(16, blsp_spi5, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(17, blsp_spi5, blsp_spi2_cs2, _, _, _, _, _, qdss_tracedata_b, _),
+ PINGROUP(18, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _),
+ PINGROUP(19, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _),
+ PINGROUP(20, uim3_data, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_a0, _, _, _),
+ PINGROUP(21, uim3_present, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_b0, _, _, _),
+ PINGROUP(22, uim3_reset, _, qdss_cti_trig_out_b0, _, _, _, _, _, _),
+ PINGROUP(23, uim3_clk, qdss_cti_trig_out_a0, _, _, _, _, _, _, _),
+ PINGROUP(24, mdp_vsync, ebi2_lcd, ebi2_lcd, _, _, _, _, _, _),
+ PINGROUP(25, mdp_vsync, ebi2_lcd, _, _, _, _, _, _, _),
+ PINGROUP(26, cam_mclk, _, _, _, _, _, _, _, _),
+ PINGROUP(27, cam_mclk, _, _, _, _, _, _, _, _),
+ PINGROUP(28, _, pwr_modem_enabled_a, _, _, _, _, _, _, _),
+ PINGROUP(29, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(30, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(31, cci_timer0, _, _, _, _, _, _, qdss_tracedata_b, _),
+ PINGROUP(32, cci_timer1, _, qdss_tracedata_b, _, atest_combodac, _, _, _, _),
+ PINGROUP(33, cci_async, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(34, pwr_nav_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(35, pwr_crypto_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(36, qdss_tracedata_b, _, atest_bbrx1, _, _, _, _, _, _),
+ PINGROUP(37, blsp_spi1_cs2, qdss_tracedata_b, _, atest_bbrx0, _, _, _, _, _),
+ PINGROUP(38, cci_timer2, adsp_ext, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(39, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(40, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(41, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(42, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(43, wcss_wlan, prng_rosc, qdss_tracedata_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(44, wcss_wlan, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(45, wcss_fm, ext_lpass, qdss_tracectl_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(46, wcss_fm, qdss_traceclk_a, _, _, _, _, _, _, _),
+ PINGROUP(47, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(48, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(49, uim2_data, gcc_gp1_clk_a, qdss_cti_trig_in_a1, _, _, _, _, _, _),
+ PINGROUP(50, uim2_clk, gcc_gp2_clk_a, qdss_cti_trig_in_b1, _, _, _, _, _, _),
+ PINGROUP(51, uim2_reset, gcc_gp3_clk_a, qdss_cti_trig_out_b1, _, _, _, _, _, _),
+ PINGROUP(52, uim2_present, qdss_cti_trig_out_a1, _, _, _, _, _, _, _),
+ PINGROUP(53, uim1_data, _, _, _, _, _, _, _, _),
+ PINGROUP(54, uim1_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(55, uim1_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(56, uim1_present, _, _, _, _, _, _, _, _),
+ PINGROUP(57, uim_batt, _, _, _, _, _, _, _, _),
+ PINGROUP(58, qdss_tracedata_a, smb_int, _, _, _, _, _, _, _),
+ PINGROUP(59, cdc_pdm0, pri_mi2s_mclk_a, atest_char3, _, _, _, _, _, bimc_dte0),
+ PINGROUP(60, cdc_pdm0, pri_mi2s_sck_a, atest_char2, _, _, _, _, _, bimc_dte1),
+ PINGROUP(61, cdc_pdm0, pri_mi2s_ws_a, atest_char1, _, _, _, _, _, _),
+ PINGROUP(62, cdc_pdm0, pri_mi2s_data0_a, atest_char0, _, _, _, _, _, _),
+ PINGROUP(63, cdc_pdm0, pri_mi2s_data1_a, atest_char, _, _, _, _, _, _),
+ PINGROUP(64, cdc_pdm0, _, _, _, _, _, ebi0_wrcdc, _, _),
+ PINGROUP(65, blsp_spi3_cs2, blsp_spi1_cs3, qdss_tracedata_a, _, atest_gpsadc0, _, _, _, _),
+ PINGROUP(66, _, gcc_plltest, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(67, _, gcc_plltest, _, _, _, _, _, _, _),
+ PINGROUP(68, _, _, _, _, _, _, _, _, _),
+ PINGROUP(69, _, _, _, _, _, _, _, _, _),
+ PINGROUP(70, _, _, _, _, _, _, _, _, _),
+ PINGROUP(71, _, _, _, _, _, _, _, _, _),
+ PINGROUP(72, _, _, _, _, _, _, _, _, _),
+ PINGROUP(73, _, _, _, _, _, _, _, _, _),
+ PINGROUP(74, _, _, _, _, _, _, _, _, _),
+ PINGROUP(75, _, _, _, _, _, _, _, _, _),
+ PINGROUP(76, _, _, _, _, _, _, _, _, _),
+ PINGROUP(77, _, _, _, _, _, _, _, _, _),
+ PINGROUP(78, _, _, _, _, _, _, _, _, _),
+ PINGROUP(79, _, _, atest_gpsadc1, _, _, _, _, _, _),
+ PINGROUP(80, _, _, _, _, _, _, _, _, _),
+ PINGROUP(81, _, _, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(82, _, pa_indicator, _, _, _, _, _, _, _),
+ PINGROUP(83, _, modem_tsync, nav_tsync, nav_pps, _, atest_combodac, _, _, _),
+ PINGROUP(84, _, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(85, gsm0_tx, _, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(86, _, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(87, _, _, _, _, _, _, _, _, _),
+ PINGROUP(88, _, ssbi0, _, _, _, _, _, _, _),
+ PINGROUP(89, _, ssbi1, _, _, _, _, _, _, _),
+ PINGROUP(90, pbs0, _, _, _, _, _, _, _, _),
+ PINGROUP(91, pbs1, _, _, _, _, _, _, _, _),
+ PINGROUP(92, pbs2, _, _, _, _, _, _, _, _),
+ PINGROUP(93, qdss_tracedata_b, _, _, _, _, _, _, _, _),
+ PINGROUP(94, pri_mi2s_sck_b, pwr_modem_enabled_b, qdss_tracedata_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(95, blsp_spi3_cs1, pri_mi2s_data0_b, ebi2_lcd, m_voc, pwr_nav_enabled_b, _, atest_combodac, _, _),
+ PINGROUP(96, pri_mi2s_data1_b, _, pwr_crypto_enabled_b, qdss_tracedata_a, _, atest_wlan0, _, _, _),
+ PINGROUP(97, blsp_spi1_cs1, qdss_tracedata_a, _, atest_wlan1, _, _, _, _, _),
+ PINGROUP(98, sec_mi2s, pri_mi2s_mclk_b, blsp_spi2_cs1, ldo_update, _, _, _, _, _),
+ PINGROUP(99, ebi2_a, sd_write, ldo_en, _, _, _, _, _, _),
+ PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ PINGROUP(102, _, _, _, _, _, _, _, _, _),
+ PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ PINGROUP(106, _, _, _, _, _, _, _, _, _),
+ PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ PINGROUP(110, pri_mi2s_ws_b, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(111, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ PINGROUP(112, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_gpio_wakeirq_map msm8909_mpm_map[] = {
+ { 65, 3 }, { 5, 4 }, { 11, 5 }, { 12, 6 }, { 64, 7 }, { 58, 8 },
+ { 50, 9 }, { 13, 10 }, { 49, 11 }, { 20, 12 }, { 21, 13 }, { 25, 14 },
+ { 46, 15 }, { 45, 16 }, { 28, 17 }, { 44, 18 }, { 31, 19 }, { 43, 20 },
+ { 42, 21 }, { 34, 22 }, { 35, 23 }, { 36, 24 }, { 37, 25 }, { 38, 26 },
+ { 39, 27 }, { 40, 28 }, { 41, 29 }, { 90, 30 }, { 91, 32 }, { 92, 33 },
+ { 94, 34 }, { 95, 35 }, { 96, 36 }, { 97, 37 }, { 98, 38 },
+ { 110, 39 }, { 111, 40 }, { 112, 41 }, { 105, 42 }, { 107, 43 },
+ { 47, 50 }, { 48, 51 },
+};
+
+static const struct msm_pinctrl_soc_data msm8909_pinctrl = {
+ .pins = msm8909_pins,
+ .npins = ARRAY_SIZE(msm8909_pins),
+ .functions = msm8909_functions,
+ .nfunctions = ARRAY_SIZE(msm8909_functions),
+ .groups = msm8909_groups,
+ .ngroups = ARRAY_SIZE(msm8909_groups),
+ .ngpios = 113,
+ .wakeirq_map = msm8909_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(msm8909_mpm_map),
+};
+
+static int msm8909_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8909_pinctrl);
+}
+
+static const struct of_device_id msm8909_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8909-tlmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8909_pinctrl_of_match);
+
+static struct platform_driver msm8909_pinctrl_driver = {
+ .driver = {
+ .name = "msm8909-pinctrl",
+ .of_match_table = msm8909_pinctrl_of_match,
+ },
+ .probe = msm8909_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8909_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8909_pinctrl_driver);
+}
+arch_initcall(msm8909_pinctrl_init);
+
+static void __exit msm8909_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8909_pinctrl_driver);
+}
+module_exit(msm8909_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm MSM8909 TLMM pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index 396db12ae904..bf68913ba821 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -844,8 +844,8 @@ static const struct msm_pingroup msm8916_groups[] = {
PINGROUP(28, pwr_modem_enabled_a, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(29, cci_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
- PINGROUP(31, cci_timer0, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(32, cci_timer1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, cci_timer0, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, cci_timer1, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(33, cci_async, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(34, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(35, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
index 2add9a4520c2..d615b6c55b89 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
@@ -141,7 +141,6 @@ static const struct lpi_pinctrl_variant_data sc7280_lpi_data = {
.ngroups = ARRAY_SIZE(sc7280_groups),
.functions = sc7280_functions,
.nfunctions = ARRAY_SIZE(sc7280_functions),
- .is_clk_optional = true,
};
static const struct of_device_id lpi_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
new file mode 100644
index 000000000000..1138e683e6f4
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -0,0 +1,1544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x100000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = REG_SIZE * id + 0x4, \
+ .intr_cfg_reg = REG_SIZE * id + 0x8, \
+ .intr_status_reg = REG_SIZE * id + 0xc, \
+ .intr_target_reg = REG_SIZE * id + 0x8, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm6375_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "UFS_RESET"),
+ PINCTRL_PIN(157, "SDC1_RCLK"),
+ PINCTRL_PIN(158, "SDC1_CLK"),
+ PINCTRL_PIN(159, "SDC1_CMD"),
+ PINCTRL_PIN(160, "SDC1_DATA"),
+ PINCTRL_PIN(161, "SDC2_CLK"),
+ PINCTRL_PIN(162, "SDC2_CMD"),
+ PINCTRL_PIN(163, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+
+
+static const unsigned int sdc1_rclk_pins[] = { 157 };
+static const unsigned int sdc1_clk_pins[] = { 158 };
+static const unsigned int sdc1_cmd_pins[] = { 159 };
+static const unsigned int sdc1_data_pins[] = { 160 };
+static const unsigned int sdc2_clk_pins[] = { 161 };
+static const unsigned int sdc2_cmd_pins[] = { 162 };
+static const unsigned int sdc2_data_pins[] = { 163 };
+static const unsigned int ufs_reset_pins[] = { 156 };
+
+enum sm6375_functions {
+ msm_mux_adsp_ext,
+ msm_mux_agera_pll,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_tsens,
+ msm_mux_atest_tsens2,
+ msm_mux_atest_usb1,
+ msm_mux_atest_usb10,
+ msm_mux_atest_usb11,
+ msm_mux_atest_usb12,
+ msm_mux_atest_usb13,
+ msm_mux_atest_usb2,
+ msm_mux_atest_usb20,
+ msm_mux_atest_usb21,
+ msm_mux_atest_usb22,
+ msm_mux_atest_usb23,
+ msm_mux_audio_ref,
+ msm_mux_btfm_slimbus,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_edp_lcd,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gp_pdm0,
+ msm_mux_gp_pdm1,
+ msm_mux_gp_pdm2,
+ msm_mux_gpio,
+ msm_mux_gps_tx,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_lpass_ext,
+ msm_mux_m_voc,
+ msm_mux_mclk,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mi2s_0,
+ msm_mux_mi2s_1,
+ msm_mux_mi2s_2,
+ msm_mux_mss_lte,
+ msm_mux_nav_gpio,
+ msm_mux_nav_pps,
+ msm_mux_pa_indicator,
+ msm_mux_phase_flag0,
+ msm_mux_phase_flag1,
+ msm_mux_phase_flag10,
+ msm_mux_phase_flag11,
+ msm_mux_phase_flag12,
+ msm_mux_phase_flag13,
+ msm_mux_phase_flag14,
+ msm_mux_phase_flag15,
+ msm_mux_phase_flag16,
+ msm_mux_phase_flag17,
+ msm_mux_phase_flag18,
+ msm_mux_phase_flag19,
+ msm_mux_phase_flag2,
+ msm_mux_phase_flag20,
+ msm_mux_phase_flag21,
+ msm_mux_phase_flag22,
+ msm_mux_phase_flag23,
+ msm_mux_phase_flag24,
+ msm_mux_phase_flag25,
+ msm_mux_phase_flag26,
+ msm_mux_phase_flag27,
+ msm_mux_phase_flag28,
+ msm_mux_phase_flag29,
+ msm_mux_phase_flag3,
+ msm_mux_phase_flag30,
+ msm_mux_phase_flag31,
+ msm_mux_phase_flag4,
+ msm_mux_phase_flag5,
+ msm_mux_phase_flag6,
+ msm_mux_phase_flag7,
+ msm_mux_phase_flag8,
+ msm_mux_phase_flag9,
+ msm_mux_pll_bist,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_clk,
+ msm_mux_pll_reset,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qdss_gpio0,
+ msm_mux_qdss_gpio1,
+ msm_mux_qdss_gpio10,
+ msm_mux_qdss_gpio11,
+ msm_mux_qdss_gpio12,
+ msm_mux_qdss_gpio13,
+ msm_mux_qdss_gpio14,
+ msm_mux_qdss_gpio15,
+ msm_mux_qdss_gpio2,
+ msm_mux_qdss_gpio3,
+ msm_mux_qdss_gpio4,
+ msm_mux_qdss_gpio5,
+ msm_mux_qdss_gpio6,
+ msm_mux_qdss_gpio7,
+ msm_mux_qdss_gpio8,
+ msm_mux_qdss_gpio9,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qup00,
+ msm_mux_qup01,
+ msm_mux_qup02,
+ msm_mux_qup10,
+ msm_mux_qup11_f1,
+ msm_mux_qup11_f2,
+ msm_mux_qup12,
+ msm_mux_qup13_f1,
+ msm_mux_qup13_f2,
+ msm_mux_qup14,
+ msm_mux_sd_write,
+ msm_mux_sdc1_tb,
+ msm_mux_sdc2_tb,
+ msm_mux_sp_cmu,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux_wlan1_adc0,
+ msm_mux_wlan1_adc1,
+ msm_mux_wlan2_adc0,
+ msm_mux_wlan2_adc1,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+ "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50",
+ "gpio51", "gpio52", "gpio53", "gpio56", "gpio57", "gpio58", "gpio59",
+ "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68", "gpio69", "gpio75", "gpio76", "gpio77", "gpio78",
+ "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", "gpio85",
+ "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", "gpio92",
+ "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99",
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105",
+ "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", "gpio111",
+ "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", "gpio117",
+ "gpio118", "gpio119", "gpio120", "gpio124", "gpio125", "gpio126",
+ "gpio127", "gpio128", "gpio129", "gpio130", "gpio131", "gpio132",
+ "gpio133", "gpio134", "gpio135", "gpio136", "gpio141", "gpio142",
+ "gpio143", "gpio150", "gpio151", "gpio152", "gpio153", "gpio154",
+ "gpio155",
+};
+static const char * const agera_pll_groups[] = {
+ "gpio89",
+};
+static const char * const cci_async_groups[] = {
+ "gpio35", "gpio36", "gpio48", "gpio52", "gpio53",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio2", "gpio3", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44",
+};
+static const char * const gps_tx_groups[] = {
+ "gpio101", "gpio102", "gpio107", "gpio108",
+};
+static const char * const gp_pdm0_groups[] = {
+ "gpio37", "gpio68",
+};
+static const char * const gp_pdm1_groups[] = {
+ "gpio8", "gpio52",
+};
+static const char * const gp_pdm2_groups[] = {
+ "gpio57",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio90",
+};
+static const char * const mclk_groups[] = {
+ "gpio93",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio6", "gpio23", "gpio24", "gpio27", "gpio28",
+};
+static const char * const mss_lte_groups[] = {
+ "gpio65", "gpio66",
+};
+static const char * const nav_pps_groups[] = {
+ "gpio101", "gpio101", "gpio102", "gpio102",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio27",
+};
+static const char * const qlink0_wmss_groups[] = {
+ "gpio103",
+};
+static const char * const qlink1_wmss_groups[] = {
+ "gpio106",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio124",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio87",
+};
+static const char * const atest_char_groups[] = {
+ "gpio95",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio96",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio97",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio98",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio99",
+};
+static const char * const atest_tsens_groups[] = {
+ "gpio92",
+};
+static const char * const atest_tsens2_groups[] = {
+ "gpio93",
+};
+static const char * const atest_usb1_groups[] = {
+ "gpio83",
+};
+static const char * const atest_usb10_groups[] = {
+ "gpio84",
+};
+static const char * const atest_usb11_groups[] = {
+ "gpio85",
+};
+static const char * const atest_usb12_groups[] = {
+ "gpio86",
+};
+static const char * const atest_usb13_groups[] = {
+ "gpio87",
+};
+static const char * const atest_usb2_groups[] = {
+ "gpio88",
+};
+static const char * const atest_usb20_groups[] = {
+ "gpio89",
+};
+static const char * const atest_usb21_groups[] = {
+ "gpio90",
+};
+static const char * const atest_usb22_groups[] = {
+ "gpio91",
+};
+static const char * const atest_usb23_groups[] = {
+ "gpio92",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio60",
+};
+static const char * const btfm_slimbus_groups[] = {
+ "gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33",
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio34",
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio35",
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio36",
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio37",
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio38",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio0", "gpio1", "gpio2",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio3",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio19", "gpio20", "gpio21", "gpio22",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio86", "gpio90",
+};
+static const char * const ddr_pxi1_groups[] = {
+ "gpio87", "gpio91",
+};
+static const char * const ddr_pxi2_groups[] = {
+ "gpio88", "gpio92",
+};
+static const char * const ddr_pxi3_groups[] = {
+ "gpio89", "gpio93",
+};
+static const char * const dp_hot_groups[] = {
+ "gpio12", "gpio118",
+};
+static const char * const edp_lcd_groups[] = {
+ "gpio23",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio48", "gpio58",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio21",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio22",
+};
+static const char * const ibi_i3c_groups[] = {
+ "gpio0", "gpio1",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio95",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio96",
+};
+static const char * const lpass_ext_groups[] = {
+ "gpio60", "gpio93",
+};
+static const char * const m_voc_groups[] = {
+ "gpio12",
+};
+static const char * const mdp_vsync0_groups[] = {
+ "gpio47",
+};
+static const char * const mdp_vsync1_groups[] = {
+ "gpio48",
+};
+static const char * const mdp_vsync2_groups[] = {
+ "gpio56",
+};
+static const char * const mdp_vsync3_groups[] = {
+ "gpio57",
+};
+static const char * const mi2s_0_groups[] = {
+ "gpio88", "gpio89", "gpio90", "gpio91",
+};
+static const char * const mi2s_1_groups[] = {
+ "gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const mi2s_2_groups[] = {
+ "gpio60",
+};
+static const char * const nav_gpio_groups[] = {
+ "gpio101", "gpio102",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio118",
+};
+static const char * const phase_flag0_groups[] = {
+ "gpio12",
+};
+static const char * const phase_flag1_groups[] = {
+ "gpio17",
+};
+static const char * const phase_flag10_groups[] = {
+ "gpio41",
+};
+static const char * const phase_flag11_groups[] = {
+ "gpio42",
+};
+static const char * const phase_flag12_groups[] = {
+ "gpio43",
+};
+static const char * const phase_flag13_groups[] = {
+ "gpio44",
+};
+static const char * const phase_flag14_groups[] = {
+ "gpio45",
+};
+static const char * const phase_flag15_groups[] = {
+ "gpio46",
+};
+static const char * const phase_flag16_groups[] = {
+ "gpio47",
+};
+static const char * const phase_flag17_groups[] = {
+ "gpio48",
+};
+static const char * const phase_flag18_groups[] = {
+ "gpio49",
+};
+static const char * const phase_flag19_groups[] = {
+ "gpio50",
+};
+static const char * const phase_flag2_groups[] = {
+ "gpio18",
+};
+static const char * const phase_flag20_groups[] = {
+ "gpio51",
+};
+static const char * const phase_flag21_groups[] = {
+ "gpio52",
+};
+static const char * const phase_flag22_groups[] = {
+ "gpio53",
+};
+static const char * const phase_flag23_groups[] = {
+ "gpio56",
+};
+static const char * const phase_flag24_groups[] = {
+ "gpio57",
+};
+static const char * const phase_flag25_groups[] = {
+ "gpio60",
+};
+static const char * const phase_flag26_groups[] = {
+ "gpio61",
+};
+static const char * const phase_flag27_groups[] = {
+ "gpio62",
+};
+static const char * const phase_flag28_groups[] = {
+ "gpio63",
+};
+static const char * const phase_flag29_groups[] = {
+ "gpio64",
+};
+static const char * const phase_flag3_groups[] = {
+ "gpio34",
+};
+static const char * const phase_flag30_groups[] = {
+ "gpio67",
+};
+static const char * const phase_flag31_groups[] = {
+ "gpio68",
+};
+static const char * const phase_flag4_groups[] = {
+ "gpio35",
+};
+static const char * const phase_flag5_groups[] = {
+ "gpio36",
+};
+static const char * const phase_flag6_groups[] = {
+ "gpio37",
+};
+static const char * const phase_flag7_groups[] = {
+ "gpio38",
+};
+static const char * const phase_flag8_groups[] = {
+ "gpio39",
+};
+static const char * const phase_flag9_groups[] = {
+ "gpio40",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio13",
+};
+static const char * const pll_clk_groups[] = {
+ "gpio98",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio14",
+};
+static const char * const prng_rosc0_groups[] = {
+ "gpio97",
+};
+static const char * const prng_rosc1_groups[] = {
+ "gpio98",
+};
+static const char * const prng_rosc2_groups[] = {
+ "gpio99",
+};
+static const char * const prng_rosc3_groups[] = {
+ "gpio100",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio2", "gpio3", "gpio6", "gpio7", "gpio61", "gpio62", "gpio86",
+ "gpio87",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio8", "gpio9", "gpio63", "gpio64",
+};
+static const char * const qdss_gpio0_groups[] = {
+ "gpio39", "gpio65",
+};
+static const char * const qdss_gpio1_groups[] = {
+ "gpio40", "gpio66",
+};
+static const char * const qdss_gpio10_groups[] = {
+ "gpio50", "gpio56",
+};
+static const char * const qdss_gpio11_groups[] = {
+ "gpio51", "gpio57",
+};
+static const char * const qdss_gpio12_groups[] = {
+ "gpio34", "gpio52",
+};
+static const char * const qdss_gpio13_groups[] = {
+ "gpio35", "gpio53",
+};
+static const char * const qdss_gpio14_groups[] = {
+ "gpio27", "gpio36",
+};
+static const char * const qdss_gpio15_groups[] = {
+ "gpio28", "gpio37",
+};
+static const char * const qdss_gpio2_groups[] = {
+ "gpio38", "gpio41",
+};
+static const char * const qdss_gpio3_groups[] = {
+ "gpio42", "gpio47",
+};
+static const char * const qdss_gpio4_groups[] = {
+ "gpio43", "gpio88",
+};
+static const char * const qdss_gpio5_groups[] = {
+ "gpio44", "gpio89",
+};
+static const char * const qdss_gpio6_groups[] = {
+ "gpio45", "gpio90",
+};
+static const char * const qdss_gpio7_groups[] = {
+ "gpio46", "gpio91",
+};
+static const char * const qdss_gpio8_groups[] = {
+ "gpio48", "gpio92",
+};
+static const char * const qdss_gpio9_groups[] = {
+ "gpio49", "gpio93",
+};
+static const char * const qlink0_enable_groups[] = {
+ "gpio105",
+};
+static const char * const qlink0_request_groups[] = {
+ "gpio104",
+};
+static const char * const qlink1_enable_groups[] = {
+ "gpio108",
+};
+static const char * const qlink1_request_groups[] = {
+ "gpio107",
+};
+static const char * const qup00_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qup01_groups[] = {
+ "gpio61", "gpio62", "gpio63", "gpio64",
+};
+static const char * const qup02_groups[] = {
+ "gpio45", "gpio46", "gpio48", "gpio56", "gpio57",
+};
+static const char * const qup10_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const qup11_f1_groups[] = {
+ "gpio27", "gpio28",
+};
+static const char * const qup11_f2_groups[] = {
+ "gpio27", "gpio28",
+};
+
+static const char * const qup12_groups[] = {
+ "gpio19", "gpio19", "gpio20", "gpio20",
+};
+static const char * const qup13_f1_groups[] = {
+ "gpio25", "gpio26",
+};
+static const char * const qup13_f2_groups[] = {
+ "gpio25", "gpio26",
+};
+static const char * const qup14_groups[] = {
+ "gpio4", "gpio4", "gpio5", "gpio5",
+};
+static const char * const sd_write_groups[] = {
+ "gpio85",
+};
+static const char * const sdc1_tb_groups[] = {
+ "gpio4",
+};
+static const char * const sdc2_tb_groups[] = {
+ "gpio5",
+};
+static const char * const sp_cmu_groups[] = {
+ "gpio3",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio61",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio62",
+};
+static const char * const tgu_ch2_groups[] = {
+ "gpio63",
+};
+static const char * const tgu_ch3_groups[] = {
+ "gpio64",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio88",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio88",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio80",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio79",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio82",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio81",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio76",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio75",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio78",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio77",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio47",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio49",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio89",
+};
+static const char * const wlan1_adc0_groups[] = {
+ "gpio90",
+};
+static const char * const wlan1_adc1_groups[] = {
+ "gpio92",
+};
+static const char * const wlan2_adc0_groups[] = {
+ "gpio91",
+};
+static const char * const wlan2_adc1_groups[] = {
+ "gpio93",
+};
+
+static const struct msm_function sm6375_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(agera_pll),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_tsens),
+ FUNCTION(atest_tsens2),
+ FUNCTION(atest_usb1),
+ FUNCTION(atest_usb10),
+ FUNCTION(atest_usb11),
+ FUNCTION(atest_usb12),
+ FUNCTION(atest_usb13),
+ FUNCTION(atest_usb2),
+ FUNCTION(atest_usb20),
+ FUNCTION(atest_usb21),
+ FUNCTION(atest_usb22),
+ FUNCTION(atest_usb23),
+ FUNCTION(audio_ref),
+ FUNCTION(btfm_slimbus),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cri_trng),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(edp_lcd),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gp_pdm0),
+ FUNCTION(gp_pdm1),
+ FUNCTION(gp_pdm2),
+ FUNCTION(gpio),
+ FUNCTION(gps_tx),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(lpass_ext),
+ FUNCTION(m_voc),
+ FUNCTION(mclk),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mi2s_0),
+ FUNCTION(mi2s_1),
+ FUNCTION(mi2s_2),
+ FUNCTION(mss_lte),
+ FUNCTION(nav_gpio),
+ FUNCTION(nav_pps),
+ FUNCTION(pa_indicator),
+ FUNCTION(phase_flag0),
+ FUNCTION(phase_flag1),
+ FUNCTION(phase_flag10),
+ FUNCTION(phase_flag11),
+ FUNCTION(phase_flag12),
+ FUNCTION(phase_flag13),
+ FUNCTION(phase_flag14),
+ FUNCTION(phase_flag15),
+ FUNCTION(phase_flag16),
+ FUNCTION(phase_flag17),
+ FUNCTION(phase_flag18),
+ FUNCTION(phase_flag19),
+ FUNCTION(phase_flag2),
+ FUNCTION(phase_flag20),
+ FUNCTION(phase_flag21),
+ FUNCTION(phase_flag22),
+ FUNCTION(phase_flag23),
+ FUNCTION(phase_flag24),
+ FUNCTION(phase_flag25),
+ FUNCTION(phase_flag26),
+ FUNCTION(phase_flag27),
+ FUNCTION(phase_flag28),
+ FUNCTION(phase_flag29),
+ FUNCTION(phase_flag3),
+ FUNCTION(phase_flag30),
+ FUNCTION(phase_flag31),
+ FUNCTION(phase_flag4),
+ FUNCTION(phase_flag5),
+ FUNCTION(phase_flag6),
+ FUNCTION(phase_flag7),
+ FUNCTION(phase_flag8),
+ FUNCTION(phase_flag9),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_clk),
+ FUNCTION(pll_reset),
+ FUNCTION(prng_rosc0),
+ FUNCTION(prng_rosc1),
+ FUNCTION(prng_rosc2),
+ FUNCTION(prng_rosc3),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qdss_gpio0),
+ FUNCTION(qdss_gpio1),
+ FUNCTION(qdss_gpio10),
+ FUNCTION(qdss_gpio11),
+ FUNCTION(qdss_gpio12),
+ FUNCTION(qdss_gpio13),
+ FUNCTION(qdss_gpio14),
+ FUNCTION(qdss_gpio15),
+ FUNCTION(qdss_gpio2),
+ FUNCTION(qdss_gpio3),
+ FUNCTION(qdss_gpio4),
+ FUNCTION(qdss_gpio5),
+ FUNCTION(qdss_gpio6),
+ FUNCTION(qdss_gpio7),
+ FUNCTION(qdss_gpio8),
+ FUNCTION(qdss_gpio9),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qup00),
+ FUNCTION(qup01),
+ FUNCTION(qup02),
+ FUNCTION(qup10),
+ FUNCTION(qup11_f1),
+ FUNCTION(qup11_f2),
+ FUNCTION(qup12),
+ FUNCTION(qup13_f1),
+ FUNCTION(qup13_f2),
+ FUNCTION(qup14),
+ FUNCTION(sd_write),
+ FUNCTION(sdc1_tb),
+ FUNCTION(sdc2_tb),
+ FUNCTION(sp_cmu),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+ FUNCTION(wlan1_adc0),
+ FUNCTION(wlan1_adc1),
+ FUNCTION(wlan2_adc0),
+ FUNCTION(wlan2_adc1),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm6375_groups[] = {
+ [0] = PINGROUP(0, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _),
+ [1] = PINGROUP(1, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup00, cci_i2c, cri_trng, qdss_cti, _, _, _, _, _),
+ [3] = PINGROUP(3, qup00, cci_i2c, sp_cmu, dbg_out, qdss_cti, _, _, _, _),
+ [4] = PINGROUP(4, qup14, qup14, sdc1_tb, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup14, qup14, sdc2_tb, _, _, _, _, _, _),
+ [6] = PINGROUP(6, mdp_vsync, qdss_cti, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qdss_cti, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, gp_pdm1, qdss_gpio, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qdss_gpio, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, _, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, _, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, m_voc, dp_hot, _, phase_flag0, _, _, _, _, _),
+ [13] = PINGROUP(13, qup10, pll_bypassnl, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup10, pll_reset, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup10, _, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup10, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, _, phase_flag1, qup10, _, _, _, _, _, _),
+ [18] = PINGROUP(18, _, phase_flag2, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup12, qup12, ddr_bist, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup12, qup12, ddr_bist, _, _, _, _, _, _),
+ [21] = PINGROUP(21, gcc_gp2, ddr_bist, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, gcc_gp3, ddr_bist, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, mdp_vsync, edp_lcd, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, mdp_vsync, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup13_f1, qup13_f2, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup13_f1, qup13_f2, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup11_f1, qup11_f2, mdp_vsync, pll_bist, _, qdss_gpio14, _, _, _),
+ [28] = PINGROUP(28, qup11_f1, qup11_f2, mdp_vsync, _, qdss_gpio15, _, _, _, _),
+ [29] = PINGROUP(29, cam_mclk, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, cam_mclk, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, cam_mclk, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, cam_mclk, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, cam_mclk, _, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, cci_timer0, _, phase_flag3, qdss_gpio12, _, _, _, _, _),
+ [35] = PINGROUP(35, cci_timer1, cci_async, _, phase_flag4, qdss_gpio13, _, _, _, _),
+ [36] = PINGROUP(36, cci_timer2, cci_async, _, phase_flag5, qdss_gpio14, _, _, _, _),
+ [37] = PINGROUP(37, cci_timer3, gp_pdm0, _, phase_flag6, qdss_gpio15, _, _, _, _),
+ [38] = PINGROUP(38, cci_timer4, _, phase_flag7, qdss_gpio2, _, _, _, _, _),
+ [39] = PINGROUP(39, cci_i2c, _, phase_flag8, qdss_gpio0, _, _, _, _, _),
+ [40] = PINGROUP(40, cci_i2c, _, phase_flag9, qdss_gpio1, _, _, _, _, _),
+ [41] = PINGROUP(41, cci_i2c, _, phase_flag10, qdss_gpio2, _, _, _, _, _),
+ [42] = PINGROUP(42, cci_i2c, _, phase_flag11, qdss_gpio3, _, _, _, _, _),
+ [43] = PINGROUP(43, cci_i2c, _, phase_flag12, qdss_gpio4, _, _, _, _, _),
+ [44] = PINGROUP(44, cci_i2c, _, phase_flag13, qdss_gpio5, _, _, _, _, _),
+ [45] = PINGROUP(45, qup02, _, phase_flag14, qdss_gpio6, _, _, _, _, _),
+ [46] = PINGROUP(46, qup02, _, phase_flag15, qdss_gpio7, _, _, _, _, _),
+ [47] = PINGROUP(47, mdp_vsync0, _, phase_flag16, qdss_gpio3, _, _, usb2phy_ac, _, _),
+ [48] = PINGROUP(48, cci_async, mdp_vsync1, gcc_gp1, _, phase_flag17, qdss_gpio8, qup02,
+ _, _),
+ [49] = PINGROUP(49, vfr_1, _, phase_flag18, qdss_gpio9, _, _, _, _, _),
+ [50] = PINGROUP(50, _, phase_flag19, qdss_gpio10, _, _, _, _, _, _),
+ [51] = PINGROUP(51, _, phase_flag20, qdss_gpio11, _, _, _, _, _, _),
+ [52] = PINGROUP(52, cci_async, gp_pdm1, _, phase_flag21, qdss_gpio12, _, _, _, _),
+ [53] = PINGROUP(53, cci_async, _, phase_flag22, qdss_gpio13, _, _, _, _, _),
+ [54] = PINGROUP(54, _, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, _, _, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup02, mdp_vsync2, _, phase_flag23, qdss_gpio10, _, _, _, _),
+ [57] = PINGROUP(57, qup02, mdp_vsync3, gp_pdm2, _, phase_flag24, qdss_gpio11, _, _, _),
+ [58] = PINGROUP(58, gcc_gp1, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, _, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, audio_ref, lpass_ext, mi2s_2, _, phase_flag25, _, _, _, _),
+ [61] = PINGROUP(61, qup01, tgu_ch0, _, phase_flag26, qdss_cti, _, _, _, _),
+ [62] = PINGROUP(62, qup01, tgu_ch1, _, phase_flag27, qdss_cti, _, _, _, _),
+ [63] = PINGROUP(63, qup01, tgu_ch2, _, phase_flag28, qdss_gpio, _, _, _, _),
+ [64] = PINGROUP(64, qup01, tgu_ch3, _, phase_flag29, qdss_gpio, _, _, _, _),
+ [65] = PINGROUP(65, mss_lte, _, qdss_gpio0, _, _, _, _, _, _),
+ [66] = PINGROUP(66, mss_lte, _, qdss_gpio1, _, _, _, _, _, _),
+ [67] = PINGROUP(67, btfm_slimbus, mi2s_1, _, phase_flag30, _, _, _, _, _),
+ [68] = PINGROUP(68, btfm_slimbus, mi2s_1, gp_pdm0, _, phase_flag31, _, _, _, _),
+ [69] = PINGROUP(69, _, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, _, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, _, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, _, _, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, _, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, _, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, uim2_data, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, uim2_clk, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, uim2_reset, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, uim2_present, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, uim1_data, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, uim1_clk, _, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, uim1_reset, _, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, uim1_present, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, atest_usb1, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, _, atest_usb10, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, sd_write, _, atest_usb11, _, _, _, _, _, _),
+ [86] = PINGROUP(86, btfm_slimbus, mi2s_1, _, qdss_cti, atest_usb12, ddr_pxi0, _, _, _),
+ [87] = PINGROUP(87, btfm_slimbus, mi2s_1, adsp_ext, _, qdss_cti, atest_usb13, ddr_pxi1, _,
+ _),
+ [88] = PINGROUP(88, mi2s_0, _, qdss_gpio4, _, atest_usb2, ddr_pxi2, tsense_pwm1,
+ tsense_pwm2, _),
+ [89] = PINGROUP(89, mi2s_0, agera_pll, _, qdss_gpio5, _, vsense_trigger, atest_usb20,
+ ddr_pxi3, _),
+ [90] = PINGROUP(90, mi2s_0, jitter_bist, _, qdss_gpio6, _, wlan1_adc0, atest_usb21,
+ ddr_pxi0, _),
+ [91] = PINGROUP(91, mi2s_0, _, qdss_gpio7, _, wlan2_adc0, atest_usb22, ddr_pxi1, _, _),
+ [92] = PINGROUP(92, _, qdss_gpio8, atest_tsens, wlan1_adc1, atest_usb23, ddr_pxi2, _, _,
+ _),
+ [93] = PINGROUP(93, mclk, lpass_ext, _, qdss_gpio9, atest_tsens2, wlan2_adc1, ddr_pxi3,
+ _, _),
+ [94] = PINGROUP(94, _, _, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, ldo_en, _, atest_char, _, _, _, _, _, _),
+ [96] = PINGROUP(96, ldo_update, _, atest_char0, _, _, _, _, _, _),
+ [97] = PINGROUP(97, prng_rosc0, _, atest_char1, _, _, _, _, _, _),
+ [98] = PINGROUP(98, _, atest_char2, _, _, prng_rosc1, pll_clk, _, _, _),
+ [99] = PINGROUP(99, _, atest_char3, _, _, prng_rosc2, _, _, _, _),
+ [100] = PINGROUP(100, _, _, prng_rosc3, _, _, _, _, _, _),
+ [101] = PINGROUP(101, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _),
+ [102] = PINGROUP(102, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _),
+ [103] = PINGROUP(103, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, qlink0_request, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, qlink0_enable, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, qlink1_request, gps_tx, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, qlink1_enable, gps_tx, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, _, _, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, _, _, pa_indicator, dp_hot, _, _, _, _, _),
+ [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, usb_phy, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, _, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, _, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, _, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, _, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, _, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, _, _, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, _, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, _, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, _, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, _, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, _, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _),
+ [156] = UFS_RESET(ufs_reset, 0x1ae000),
+ [157] = SDC_PINGROUP(sdc1_rclk, 0x1a1000, 0, 0),
+ [158] = SDC_PINGROUP(sdc1_clk, 0x1a0000, 13, 6),
+ [159] = SDC_PINGROUP(sdc1_cmd, 0x1a0000, 11, 3),
+ [160] = SDC_PINGROUP(sdc1_data, 0x1a0000, 9, 0),
+ [161] = SDC_PINGROUP(sdc2_clk, 0x1a2000, 14, 6),
+ [162] = SDC_PINGROUP(sdc2_cmd, 0x1a2000, 11, 3),
+ [163] = SDC_PINGROUP(sdc2_data, 0x1a2000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm6375_mpm_map[] = {
+ { 0, 84 }, { 3, 6 }, { 4, 7 }, { 7, 8 }, { 8, 9 }, { 9, 10 }, { 11, 11 }, { 12, 13 },
+ { 13, 14 }, { 16, 16 }, { 17, 17 }, { 18, 18 }, { 19, 19 }, { 21, 20 }, { 22, 21 },
+ { 23, 23 }, { 24, 24 }, { 25, 25 }, { 27, 26 }, { 28, 27 }, { 37, 28 }, { 38, 29 },
+ { 48, 30 }, { 50, 31 }, { 51, 32 }, { 52, 33 }, { 57, 34 }, { 59, 35 }, { 60, 37 },
+ { 61, 38 }, { 62, 39 }, { 64, 40 }, { 66, 41 }, { 67, 42 }, { 68, 43 }, { 69, 44 },
+ { 78, 45 }, { 82, 36 }, { 83, 47 }, { 84, 48 }, { 85, 49 }, { 87, 50 }, { 88, 51 },
+ { 91, 52 }, { 94, 53 }, { 95, 54 }, { 96, 55 }, { 97, 56 }, { 98, 57 }, { 99, 58 },
+ { 100, 59 }, { 104, 60 }, { 107, 61 }, { 118, 62 }, { 124, 63 }, { 125, 64 }, { 126, 65 },
+ { 128, 66 }, { 129, 67 }, { 131, 69 }, { 133, 70 }, { 134, 71 }, { 136, 73 }, { 142, 74 },
+ { 150, 75 }, { 153, 76 }, { 155, 77 },
+};
+
+static const struct msm_pinctrl_soc_data sm6375_tlmm = {
+ .pins = sm6375_pins,
+ .npins = ARRAY_SIZE(sm6375_pins),
+ .functions = sm6375_functions,
+ .nfunctions = ARRAY_SIZE(sm6375_functions),
+ .groups = sm6375_groups,
+ .ngroups = ARRAY_SIZE(sm6375_groups),
+ .ngpios = 157,
+ .wakeirq_map = sm6375_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(sm6375_mpm_map),
+};
+
+static int sm6375_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm6375_tlmm);
+}
+
+static const struct of_device_id sm6375_tlmm_of_match[] = {
+ { .compatible = "qcom,sm6375-tlmm", },
+ { },
+};
+
+static struct platform_driver sm6375_tlmm_driver = {
+ .driver = {
+ .name = "sm6375-tlmm",
+ .of_match_table = sm6375_tlmm_of_match,
+ },
+ .probe = sm6375_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm6375_tlmm_init(void)
+{
+ return platform_driver_register(&sm6375_tlmm_driver);
+}
+arch_initcall(sm6375_tlmm_init);
+
+static void __exit sm6375_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm6375_tlmm_driver);
+}
+module_exit(sm6375_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM6375 TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, sm6375_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index af144e724bd9..3bd7f9fedcc3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1316,7 +1316,7 @@ static const struct msm_pingroup sm8250_groups[] = {
static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
{ 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
{ 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
- { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
+ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 73 },
{ 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
{ 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
{ 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index fd5fff9adff0..ccaf40a9c0e6 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -966,16 +966,13 @@ static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
return 0;
}
-static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
- unsigned int parent_hwirq,
- unsigned int parent_type)
+static int pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
{
struct pmic_gpio_state *state = gpiochip_get_data(chip);
- struct irq_fwspec *fwspec;
-
- fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
- if (!fwspec)
- return NULL;
+ struct irq_fwspec *fwspec = &gfwspec->fwspec;
fwspec->fwnode = chip->irq.parent_domain->fwnode;
@@ -985,7 +982,7 @@ static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
/* param[2] must be left as 0 */
fwspec->param[3] = parent_type;
- return fwspec;
+ return 0;
}
static int pmic_gpio_probe(struct platform_device *pdev)
@@ -1162,6 +1159,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pmc8180c-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8226-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8350b-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350c-gpio", .data = (void *) 9 },
@@ -1178,6 +1176,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 },
{ .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmm8155au-gpio", .data = (void *) 10 },
+ /* pmp8074 has 12 GPIOs with holes on 1 and 12 */
+ { .compatible = "qcom,pmp8074-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pmr735a-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmr735b-gpio", .data = (void *) 4 },
/* pms405 has 12 GPIOs with holes on 1, 9, and 10 */
diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.c b/drivers/pinctrl/ralink/pinctrl-ralink.c
index 63429a287434..770862f45b3f 100644
--- a/drivers/pinctrl/ralink/pinctrl-ralink.c
+++ b/drivers/pinctrl/ralink/pinctrl-ralink.c
@@ -266,6 +266,8 @@ static int ralink_pinctrl_pins(struct ralink_priv *p)
p->func[i]->pin_count,
sizeof(int),
GFP_KERNEL);
+ if (!p->func[i]->pins)
+ return -ENOMEM;
for (j = 0; j < p->func[i]->pin_count; j++)
p->func[i]->pins[j] = p->func[i]->pin_first + j;
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 961007ce7b3a..0903a0a41831 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -38,7 +38,9 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A77995 if ARCH_R8A77995
select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0
select PINCTRL_PFC_R8A779F0 if ARCH_R8A779F0
+ select PINCTRL_PFC_R8A779G0 if ARCH_R8A779G0
select PINCTRL_RZG2L if ARCH_RZG2L
+ select PINCTRL_RZV2M if ARCH_R9A09G011
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
@@ -153,6 +155,10 @@ config PINCTRL_PFC_R8A779A0
bool "pin control support for R-Car V3U" if COMPILE_TEST
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A779G0
+ bool "pin control support for R-Car V4H" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7740
bool "pin control support for R-Mobile A1" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
@@ -237,6 +243,18 @@ config PINCTRL_RZN1
help
This selects pinctrl driver for Renesas RZ/N1 devices.
+config PINCTRL_RZV2M
+ bool "pin control support for RZ/V2M"
+ depends on OF
+ depends on ARCH_R9A09G011 || COMPILE_TEST
+ select GPIOLIB
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ help
+ This selects GPIO and pinctrl driver for Renesas RZ/V2M
+ platforms.
+
config PINCTRL_PFC_SH7203
bool "pin control support for SH7203" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 5d936c154a6f..558b30ce0dec 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A77990) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A77995) += pfc-r8a77995.o
obj-$(CONFIG_PINCTRL_PFC_R8A779A0) += pfc-r8a779a0.o
obj-$(CONFIG_PINCTRL_PFC_R8A779F0) += pfc-r8a779f0.o
+obj-$(CONFIG_PINCTRL_PFC_R8A779G0) += pfc-r8a779g0.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
@@ -49,6 +50,7 @@ obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
obj-$(CONFIG_PINCTRL_RZA2) += pinctrl-rza2.o
obj-$(CONFIG_PINCTRL_RZG2L) += pinctrl-rzg2l.o
obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
+obj-$(CONFIG_PINCTRL_RZV2M) += pinctrl-rzv2m.o
ifeq ($(CONFIG_COMPILE_TEST),y)
CFLAGS_pfc-sh7203.o += -I$(srctree)/arch/sh/include/cpu-sh2a
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 8c14b2021bf0..c91102d3f1d1 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -644,6 +644,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a779f0_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A779G0
+ {
+ .compatible = "renesas,pfc-r8a779g0",
+ .data = &r8a779g0_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{
.compatible = "renesas,pfc-sh73a0",
diff --git a/drivers/pinctrl/renesas/pfc-r8a779f0.c b/drivers/pinctrl/renesas/pfc-r8a779f0.c
index aaca4ee2af55..417c357f16b1 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779f0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779f0.c
@@ -1902,7 +1902,6 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
enum ioctrl_regs {
POC0,
POC1,
- POC2,
POC3,
TD0SEL1,
};
@@ -1910,7 +1909,6 @@ enum ioctrl_regs {
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
[POC0] = { 0xe60500a0, },
[POC1] = { 0xe60508a0, },
- [POC2] = { 0xe60510a0, },
[POC3] = { 0xe60518a0, },
[TD0SEL1] = { 0xe6050920, },
{ /* sentinel */ },
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
new file mode 100644
index 000000000000..5dd1c2c7708a
--- /dev/null
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -0,0 +1,4262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A779A0 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a779a0.c
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_GP(fn, sfx) \
+ PORT_GP_CFG_19(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_23(1, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(1, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_20(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_13(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 16, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 17, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 22, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 29, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_25(4, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(6, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(7, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_14(8, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33)
+
+/* GPSR0 */
+#define GPSR0_18 F_(MSIOF2_RXD, IP2SR0_11_8)
+#define GPSR0_17 F_(MSIOF2_SCK, IP2SR0_7_4)
+#define GPSR0_16 F_(MSIOF2_TXD, IP2SR0_3_0)
+#define GPSR0_15 F_(MSIOF2_SYNC, IP1SR0_31_28)
+#define GPSR0_14 F_(MSIOF2_SS1, IP1SR0_27_24)
+#define GPSR0_13 F_(MSIOF2_SS2, IP1SR0_23_20)
+#define GPSR0_12 F_(MSIOF5_RXD, IP1SR0_19_16)
+#define GPSR0_11 F_(MSIOF5_SCK, IP1SR0_15_12)
+#define GPSR0_10 F_(MSIOF5_TXD, IP1SR0_11_8)
+#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
+#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
+#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
+#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
+#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
+#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
+#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
+#define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
+#define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
+#define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
+
+/* GPSR1 */
+#define GPSR1_28 F_(HTX3, IP3SR1_19_16)
+#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12)
+#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8)
+#define GPSR1_25 F_(HSCK3, IP3SR1_7_4)
+#define GPSR1_24 F_(HRX3, IP3SR1_3_0)
+#define GPSR1_23 F_(GP1_23, IP2SR1_31_28)
+#define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24)
+#define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20)
+#define GPSR1_20 F_(SSI_SD, IP2SR1_19_16)
+#define GPSR1_19 F_(SSI_WS, IP2SR1_15_12)
+#define GPSR1_18 F_(SSI_SCK, IP2SR1_11_8)
+#define GPSR1_17 F_(SCIF_CLK, IP2SR1_7_4)
+#define GPSR1_16 F_(HRX0, IP2SR1_3_0)
+#define GPSR1_15 F_(HSCK0, IP1SR1_31_28)
+#define GPSR1_14 F_(HRTS0_N, IP1SR1_27_24)
+#define GPSR1_13 F_(HCTS0_N, IP1SR1_23_20)
+#define GPSR1_12 F_(HTX0, IP1SR1_19_16)
+#define GPSR1_11 F_(MSIOF0_RXD, IP1SR1_15_12)
+#define GPSR1_10 F_(MSIOF0_SCK, IP1SR1_11_8)
+#define GPSR1_9 F_(MSIOF0_TXD, IP1SR1_7_4)
+#define GPSR1_8 F_(MSIOF0_SYNC, IP1SR1_3_0)
+#define GPSR1_7 F_(MSIOF0_SS1, IP0SR1_31_28)
+#define GPSR1_6 F_(MSIOF0_SS2, IP0SR1_27_24)
+#define GPSR1_5 F_(MSIOF1_RXD, IP0SR1_23_20)
+#define GPSR1_4 F_(MSIOF1_TXD, IP0SR1_19_16)
+#define GPSR1_3 F_(MSIOF1_SCK, IP0SR1_15_12)
+#define GPSR1_2 F_(MSIOF1_SYNC, IP0SR1_11_8)
+#define GPSR1_1 F_(MSIOF1_SS1, IP0SR1_7_4)
+#define GPSR1_0 F_(MSIOF1_SS2, IP0SR1_3_0)
+
+/* GPSR2 */
+#define GPSR2_19 F_(CANFD7_RX, IP2SR2_15_12)
+#define GPSR2_18 F_(CANFD7_TX, IP2SR2_11_8)
+#define GPSR2_17 F_(CANFD4_RX, IP2SR2_7_4)
+#define GPSR2_16 F_(CANFD4_TX, IP2SR2_3_0)
+#define GPSR2_15 F_(CANFD3_RX, IP1SR2_31_28)
+#define GPSR2_14 F_(CANFD3_TX, IP1SR2_27_24)
+#define GPSR2_13 F_(CANFD2_RX, IP1SR2_23_20)
+#define GPSR2_12 F_(CANFD2_TX, IP1SR2_19_16)
+#define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12)
+#define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8)
+#define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4)
+#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0)
+#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28)
+#define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24)
+#define GPSR2_5 F_(FXR_TXENB_N, IP0SR2_23_20)
+#define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16)
+#define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12)
+#define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8)
+#define GPSR2_1 F_(FXR_TXENA_N, IP0SR2_7_4)
+#define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0)
+
+/* GPSR3 */
+#define GPSR3_29 F_(RPC_INT_N, IP3SR3_23_20)
+#define GPSR3_28 F_(RPC_WP_N, IP3SR3_19_16)
+#define GPSR3_27 F_(RPC_RESET_N, IP3SR3_15_12)
+#define GPSR3_26 F_(QSPI1_IO3, IP3SR3_11_8)
+#define GPSR3_25 F_(QSPI1_SSL, IP3SR3_7_4)
+#define GPSR3_24 F_(QSPI1_IO2, IP3SR3_3_0)
+#define GPSR3_23 F_(QSPI1_MISO_IO1, IP2SR3_31_28)
+#define GPSR3_22 F_(QSPI1_SPCLK, IP2SR3_27_24)
+#define GPSR3_21 F_(QSPI1_MOSI_IO0, IP2SR3_23_20)
+#define GPSR3_20 F_(QSPI0_SPCLK, IP2SR3_19_16)
+#define GPSR3_19 F_(QSPI0_MOSI_IO0, IP2SR3_15_12)
+#define GPSR3_18 F_(QSPI0_MISO_IO1, IP2SR3_11_8)
+#define GPSR3_17 F_(QSPI0_IO2, IP2SR3_7_4)
+#define GPSR3_16 F_(QSPI0_IO3, IP2SR3_3_0)
+#define GPSR3_15 F_(QSPI0_SSL, IP1SR3_31_28)
+#define GPSR3_14 F_(IPC_CLKOUT, IP1SR3_27_24)
+#define GPSR3_13 F_(IPC_CLKIN, IP1SR3_23_20)
+#define GPSR3_12 F_(SD_WP, IP1SR3_19_16)
+#define GPSR3_11 F_(SD_CD, IP1SR3_15_12)
+#define GPSR3_10 F_(MMC_SD_CMD, IP1SR3_11_8)
+#define GPSR3_9 F_(MMC_D6, IP1SR3_7_4)
+#define GPSR3_8 F_(MMC_D7, IP1SR3_3_0)
+#define GPSR3_7 F_(MMC_D4, IP0SR3_31_28)
+#define GPSR3_6 F_(MMC_D5, IP0SR3_27_24)
+#define GPSR3_5 F_(MMC_SD_D3, IP0SR3_23_20)
+#define GPSR3_4 F_(MMC_DS, IP0SR3_19_16)
+#define GPSR3_3 F_(MMC_SD_CLK, IP0SR3_15_12)
+#define GPSR3_2 F_(MMC_SD_D2, IP0SR3_11_8)
+#define GPSR3_1 F_(MMC_SD_D0, IP0SR3_7_4)
+#define GPSR3_0 F_(MMC_SD_D1, IP0SR3_3_0)
+
+/* GPSR4 */
+#define GPSR4_24 FM(AVS1)
+#define GPSR4_23 FM(AVS0)
+#define GPSR4_22 FM(PCIE1_CLKREQ_N)
+#define GPSR4_21 FM(PCIE0_CLKREQ_N)
+#define GPSR4_20 FM(TSN0_TXCREFCLK)
+#define GPSR4_19 FM(TSN0_TD2)
+#define GPSR4_18 FM(TSN0_TD3)
+#define GPSR4_17 FM(TSN0_RD2)
+#define GPSR4_16 FM(TSN0_RD3)
+#define GPSR4_15 FM(TSN0_TD0)
+#define GPSR4_14 FM(TSN0_TD1)
+#define GPSR4_13 FM(TSN0_RD1)
+#define GPSR4_12 FM(TSN0_TXC)
+#define GPSR4_11 FM(TSN0_RXC)
+#define GPSR4_10 FM(TSN0_RD0)
+#define GPSR4_9 FM(TSN0_TX_CTL)
+#define GPSR4_8 FM(TSN0_AVTP_PPS0)
+#define GPSR4_7 FM(TSN0_RX_CTL)
+#define GPSR4_6 FM(TSN0_AVTP_CAPTURE)
+#define GPSR4_5 FM(TSN0_AVTP_MATCH)
+#define GPSR4_4 FM(TSN0_LINK)
+#define GPSR4_3 FM(TSN0_PHY_INT)
+#define GPSR4_2 FM(TSN0_AVTP_PPS1)
+#define GPSR4_1 FM(TSN0_MDC)
+#define GPSR4_0 FM(TSN0_MDIO)
+
+/* GPSR 5 */
+#define GPSR5_20 FM(AVB2_RX_CTL)
+#define GPSR5_19 FM(AVB2_TX_CTL)
+#define GPSR5_18 FM(AVB2_RXC)
+#define GPSR5_17 FM(AVB2_RD0)
+#define GPSR5_16 FM(AVB2_TXC)
+#define GPSR5_15 FM(AVB2_TD0)
+#define GPSR5_14 FM(AVB2_RD1)
+#define GPSR5_13 FM(AVB2_RD2)
+#define GPSR5_12 FM(AVB2_TD1)
+#define GPSR5_11 FM(AVB2_TD2)
+#define GPSR5_10 FM(AVB2_MDIO)
+#define GPSR5_9 FM(AVB2_RD3)
+#define GPSR5_8 FM(AVB2_TD3)
+#define GPSR5_7 FM(AVB2_TXCREFCLK)
+#define GPSR5_6 FM(AVB2_MDC)
+#define GPSR5_5 FM(AVB2_MAGIC)
+#define GPSR5_4 FM(AVB2_PHY_INT)
+#define GPSR5_3 FM(AVB2_LINK)
+#define GPSR5_2 FM(AVB2_AVTP_MATCH)
+#define GPSR5_1 FM(AVB2_AVTP_CAPTURE)
+#define GPSR5_0 FM(AVB2_AVTP_PPS)
+
+/* GPSR 6 */
+#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16)
+#define GPSR6_19 F_(AVB1_RD3, IP2SR6_15_12)
+#define GPSR6_18 F_(AVB1_TD3, IP2SR6_11_8)
+#define GPSR6_17 F_(AVB1_RD2, IP2SR6_7_4)
+#define GPSR6_16 F_(AVB1_TD2, IP2SR6_3_0)
+#define GPSR6_15 F_(AVB1_RD0, IP1SR6_31_28)
+#define GPSR6_14 F_(AVB1_RD1, IP1SR6_27_24)
+#define GPSR6_13 F_(AVB1_TD0, IP1SR6_23_20)
+#define GPSR6_12 F_(AVB1_TD1, IP1SR6_19_16)
+#define GPSR6_11 F_(AVB1_AVTP_CAPTURE, IP1SR6_15_12)
+#define GPSR6_10 F_(AVB1_AVTP_PPS, IP1SR6_11_8)
+#define GPSR6_9 F_(AVB1_RX_CTL, IP1SR6_7_4)
+#define GPSR6_8 F_(AVB1_RXC, IP1SR6_3_0)
+#define GPSR6_7 F_(AVB1_TX_CTL, IP0SR6_31_28)
+#define GPSR6_6 F_(AVB1_TXC, IP0SR6_27_24)
+#define GPSR6_5 F_(AVB1_AVTP_MATCH, IP0SR6_23_20)
+#define GPSR6_4 F_(AVB1_LINK, IP0SR6_19_16)
+#define GPSR6_3 F_(AVB1_PHY_INT, IP0SR6_15_12)
+#define GPSR6_2 F_(AVB1_MDC, IP0SR6_11_8)
+#define GPSR6_1 F_(AVB1_MAGIC, IP0SR6_7_4)
+#define GPSR6_0 F_(AVB1_MDIO, IP0SR6_3_0)
+
+/* GPSR7 */
+#define GPSR7_20 F_(AVB0_RX_CTL, IP2SR7_19_16)
+#define GPSR7_19 F_(AVB0_RXC, IP2SR7_15_12)
+#define GPSR7_18 F_(AVB0_RD0, IP2SR7_11_8)
+#define GPSR7_17 F_(AVB0_RD1, IP2SR7_7_4)
+#define GPSR7_16 F_(AVB0_TX_CTL, IP2SR7_3_0)
+#define GPSR7_15 F_(AVB0_TXC, IP1SR7_31_28)
+#define GPSR7_14 F_(AVB0_MDIO, IP1SR7_27_24)
+#define GPSR7_13 F_(AVB0_MDC, IP1SR7_23_20)
+#define GPSR7_12 F_(AVB0_RD2, IP1SR7_19_16)
+#define GPSR7_11 F_(AVB0_TD0, IP1SR7_15_12)
+#define GPSR7_10 F_(AVB0_MAGIC, IP1SR7_11_8)
+#define GPSR7_9 F_(AVB0_TXCREFCLK, IP1SR7_7_4)
+#define GPSR7_8 F_(AVB0_RD3, IP1SR7_3_0)
+#define GPSR7_7 F_(AVB0_TD1, IP0SR7_31_28)
+#define GPSR7_6 F_(AVB0_TD2, IP0SR7_27_24)
+#define GPSR7_5 F_(AVB0_PHY_INT, IP0SR7_23_20)
+#define GPSR7_4 F_(AVB0_LINK, IP0SR7_19_16)
+#define GPSR7_3 F_(AVB0_TD3, IP0SR7_15_12)
+#define GPSR7_2 F_(AVB0_AVTP_MATCH, IP0SR7_11_8)
+#define GPSR7_1 F_(AVB0_AVTP_CAPTURE, IP0SR7_7_4)
+#define GPSR7_0 F_(AVB0_AVTP_PPS, IP0SR7_3_0)
+
+/* GPSR8 */
+#define GPSR8_13 F_(GP8_13, IP1SR8_23_20)
+#define GPSR8_12 F_(GP8_12, IP1SR8_19_16)
+#define GPSR8_11 F_(SDA5, IP1SR8_15_12)
+#define GPSR8_10 F_(SCL5, IP1SR8_11_8)
+#define GPSR8_9 F_(SDA4, IP1SR8_7_4)
+#define GPSR8_8 F_(SCL4, IP1SR8_3_0)
+#define GPSR8_7 F_(SDA3, IP0SR8_31_28)
+#define GPSR8_6 F_(SCL3, IP0SR8_27_24)
+#define GPSR8_5 F_(SDA2, IP0SR8_23_20)
+#define GPSR8_4 F_(SCL2, IP0SR8_19_16)
+#define GPSR8_3 F_(SDA1, IP0SR8_15_12)
+#define GPSR8_2 F_(SCL1, IP0SR8_11_8)
+#define GPSR8_1 F_(SDA0, IP0SR8_7_4)
+#define GPSR8_0 F_(SCL0, IP0SR8_3_0)
+
+/* SR0 */
+/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR0_3_0 FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_7_4 FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR1 */
+/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR2 */
+/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR2_3_0 FM(CANFD4_TX) F_(0, 0) FM(PWM4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_7_4 FM(CANFD4_RX) F_(0, 0) FM(PWM5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_11_8 FM(CANFD7_TX) F_(0, 0) FM(PWM6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_15_12 FM(CANFD7_RX) F_(0, 0) FM(PWM7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR3 */
+/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR3_3_0 FM(MMC_SD_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_7_4 FM(MMC_SD_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_11_8 FM(MMC_SD_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_15_12 FM(MMC_SD_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_19_16 FM(MMC_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_23_20 FM(MMC_SD_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_27_24 FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_31_28 FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR3_3_0 FM(MMC_D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_7_4 FM(MMC_D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR3_3_0 FM(QSPI0_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_7_4 FM(QSPI0_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_11_8 FM(QSPI0_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_15_12 FM(QSPI0_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_19_16 FM(QSPI0_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_23_20 FM(QSPI1_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_27_24 FM(QSPI1_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_31_28 FM(QSPI1_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR3_3_0 FM(QSPI1_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_7_4 FM(QSPI1_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_11_8 FM(QSPI1_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_15_12 FM(RPC_RESET_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_19_16 FM(RPC_WP_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_23_20 FM(RPC_INT_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR6 */
+/* IP0SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR6_3_0 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_7_4 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_11_8 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_15_12 FM(AVB1_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_19_16 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_23_20 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_27_24 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_31_28 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR6_3_0 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_7_4 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_11_8 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_19_16 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_23_20 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_27_24 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_31_28 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR6_3_0 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_7_4 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_11_8 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_15_12 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR7 */
+/* IP0SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR7_3_0 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_7_4 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_19_16 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_23_20 FM(AVB0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_27_24 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_31_28 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR7_3_0 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_7_4 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_11_8 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_15_12 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_23_20 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_27_24 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR7_3_0 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_7_4 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_15_12 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_19_16 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR8 */
+/* IP0SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR8_3_0 FM(SCL0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_7_4 FM(SDA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_11_8 FM(SCL1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_15_12 FM(SDA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_19_16 FM(SCL2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_23_20 FM(SDA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_27_24 FM(SCL3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_31_28 FM(SDA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR8_3_0 FM(SCL4) FM(HRX2) FM(SCK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_7_4 FM(SDA4) FM(HTX2) FM(CTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_11_8 FM(SCL5) FM(HRTS2_N) FM(RTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_15_12 FM(SDA5) FM(SCIF_CLK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_19_16 F_(0, 0) FM(HCTS2_N) FM(TX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_23_20 F_(0, 0) FM(HSCK2) FM(RX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR \
+ GPSR3_29 \
+ GPSR1_28 GPSR3_28 \
+ GPSR1_27 GPSR3_27 \
+ GPSR1_26 GPSR3_26 \
+ GPSR1_25 GPSR3_25 \
+ GPSR1_24 GPSR3_24 GPSR4_24 \
+ GPSR1_23 GPSR3_23 GPSR4_23 \
+ GPSR1_22 GPSR3_22 GPSR4_22 \
+ GPSR1_21 GPSR3_21 GPSR4_21 \
+ GPSR1_20 GPSR3_20 GPSR4_20 GPSR5_20 GPSR6_20 GPSR7_20 \
+ GPSR1_19 GPSR2_19 GPSR3_19 GPSR4_19 GPSR5_19 GPSR6_19 GPSR7_19 \
+GPSR0_18 GPSR1_18 GPSR2_18 GPSR3_18 GPSR4_18 GPSR5_18 GPSR6_18 GPSR7_18 \
+GPSR0_17 GPSR1_17 GPSR2_17 GPSR3_17 GPSR4_17 GPSR5_17 GPSR6_17 GPSR7_17 \
+GPSR0_16 GPSR1_16 GPSR2_16 GPSR3_16 GPSR4_16 GPSR5_16 GPSR6_16 GPSR7_16 \
+GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 GPSR7_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 GPSR7_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 GPSR7_13 GPSR8_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 GPSR7_12 GPSR8_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 GPSR7_11 GPSR8_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 GPSR7_10 GPSR8_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 GPSR7_9 GPSR8_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 GPSR7_8 GPSR8_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 GPSR7_7 GPSR8_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 GPSR7_6 GPSR8_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 GPSR7_5 GPSR8_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 GPSR7_4 GPSR8_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 GPSR8_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 GPSR8_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 GPSR8_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0 GPSR8_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0SR0_3_0) IP0SR0_3_0 FM(IP1SR0_3_0) IP1SR0_3_0 FM(IP2SR0_3_0) IP2SR0_3_0 \
+FM(IP0SR0_7_4) IP0SR0_7_4 FM(IP1SR0_7_4) IP1SR0_7_4 FM(IP2SR0_7_4) IP2SR0_7_4 \
+FM(IP0SR0_11_8) IP0SR0_11_8 FM(IP1SR0_11_8) IP1SR0_11_8 FM(IP2SR0_11_8) IP2SR0_11_8 \
+FM(IP0SR0_15_12) IP0SR0_15_12 FM(IP1SR0_15_12) IP1SR0_15_12 \
+FM(IP0SR0_19_16) IP0SR0_19_16 FM(IP1SR0_19_16) IP1SR0_19_16 \
+FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 \
+FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 \
+FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 \
+\
+FM(IP0SR1_3_0) IP0SR1_3_0 FM(IP1SR1_3_0) IP1SR1_3_0 FM(IP2SR1_3_0) IP2SR1_3_0 FM(IP3SR1_3_0) IP3SR1_3_0 \
+FM(IP0SR1_7_4) IP0SR1_7_4 FM(IP1SR1_7_4) IP1SR1_7_4 FM(IP2SR1_7_4) IP2SR1_7_4 FM(IP3SR1_7_4) IP3SR1_7_4 \
+FM(IP0SR1_11_8) IP0SR1_11_8 FM(IP1SR1_11_8) IP1SR1_11_8 FM(IP2SR1_11_8) IP2SR1_11_8 FM(IP3SR1_11_8) IP3SR1_11_8 \
+FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2SR1_15_12 FM(IP3SR1_15_12) IP3SR1_15_12 \
+FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \
+FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 \
+FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 \
+FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 \
+\
+FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 FM(IP2SR2_3_0) IP2SR2_3_0 \
+FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \
+FM(IP0SR2_11_8) IP0SR2_11_8 FM(IP1SR2_11_8) IP1SR2_11_8 FM(IP2SR2_11_8) IP2SR2_11_8 \
+FM(IP0SR2_15_12) IP0SR2_15_12 FM(IP1SR2_15_12) IP1SR2_15_12 FM(IP2SR2_15_12) IP2SR2_15_12 \
+FM(IP0SR2_19_16) IP0SR2_19_16 FM(IP1SR2_19_16) IP1SR2_19_16 \
+FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 \
+FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 \
+FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 \
+\
+FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 FM(IP2SR3_3_0) IP2SR3_3_0 FM(IP3SR3_3_0) IP3SR3_3_0 \
+FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 FM(IP2SR3_7_4) IP2SR3_7_4 FM(IP3SR3_7_4) IP3SR3_7_4 \
+FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 FM(IP2SR3_11_8) IP2SR3_11_8 FM(IP3SR3_11_8) IP3SR3_11_8 \
+FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 FM(IP2SR3_15_12) IP2SR3_15_12 FM(IP3SR3_15_12) IP3SR3_15_12 \
+FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 FM(IP2SR3_19_16) IP2SR3_19_16 FM(IP3SR3_19_16) IP3SR3_19_16 \
+FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 FM(IP2SR3_23_20) IP2SR3_23_20 FM(IP3SR3_23_20) IP3SR3_23_20 \
+FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 FM(IP2SR3_27_24) IP2SR3_27_24 \
+FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 FM(IP2SR3_31_28) IP2SR3_31_28 \
+\
+FM(IP0SR6_3_0) IP0SR6_3_0 FM(IP1SR6_3_0) IP1SR6_3_0 FM(IP2SR6_3_0) IP2SR6_3_0 \
+FM(IP0SR6_7_4) IP0SR6_7_4 FM(IP1SR6_7_4) IP1SR6_7_4 FM(IP2SR6_7_4) IP2SR6_7_4 \
+FM(IP0SR6_11_8) IP0SR6_11_8 FM(IP1SR6_11_8) IP1SR6_11_8 FM(IP2SR6_11_8) IP2SR6_11_8 \
+FM(IP0SR6_15_12) IP0SR6_15_12 FM(IP1SR6_15_12) IP1SR6_15_12 FM(IP2SR6_15_12) IP2SR6_15_12 \
+FM(IP0SR6_19_16) IP0SR6_19_16 FM(IP1SR6_19_16) IP1SR6_19_16 FM(IP2SR6_19_16) IP2SR6_19_16 \
+FM(IP0SR6_23_20) IP0SR6_23_20 FM(IP1SR6_23_20) IP1SR6_23_20 \
+FM(IP0SR6_27_24) IP0SR6_27_24 FM(IP1SR6_27_24) IP1SR6_27_24 \
+FM(IP0SR6_31_28) IP0SR6_31_28 FM(IP1SR6_31_28) IP1SR6_31_28 \
+\
+FM(IP0SR7_3_0) IP0SR7_3_0 FM(IP1SR7_3_0) IP1SR7_3_0 FM(IP2SR7_3_0) IP2SR7_3_0 \
+FM(IP0SR7_7_4) IP0SR7_7_4 FM(IP1SR7_7_4) IP1SR7_7_4 FM(IP2SR7_7_4) IP2SR7_7_4 \
+FM(IP0SR7_11_8) IP0SR7_11_8 FM(IP1SR7_11_8) IP1SR7_11_8 FM(IP2SR7_11_8) IP2SR7_11_8 \
+FM(IP0SR7_15_12) IP0SR7_15_12 FM(IP1SR7_15_12) IP1SR7_15_12 FM(IP2SR7_15_12) IP2SR7_15_12 \
+FM(IP0SR7_19_16) IP0SR7_19_16 FM(IP1SR7_19_16) IP1SR7_19_16 FM(IP2SR7_19_16) IP2SR7_19_16 \
+FM(IP0SR7_23_20) IP0SR7_23_20 FM(IP1SR7_23_20) IP1SR7_23_20 \
+FM(IP0SR7_27_24) IP0SR7_27_24 FM(IP1SR7_27_24) IP1SR7_27_24 \
+FM(IP0SR7_31_28) IP0SR7_31_28 FM(IP1SR7_31_28) IP1SR7_31_28 \
+\
+FM(IP0SR8_3_0) IP0SR8_3_0 FM(IP1SR8_3_0) IP1SR8_3_0 \
+FM(IP0SR8_7_4) IP0SR8_7_4 FM(IP1SR8_7_4) IP1SR8_7_4 \
+FM(IP0SR8_11_8) IP0SR8_11_8 FM(IP1SR8_11_8) IP1SR8_11_8 \
+FM(IP0SR8_15_12) IP0SR8_15_12 FM(IP1SR8_15_12) IP1SR8_15_12 \
+FM(IP0SR8_19_16) IP0SR8_19_16 FM(IP1SR8_19_16) IP1SR8_19_16 \
+FM(IP0SR8_23_20) IP0SR8_23_20 FM(IP1SR8_23_20) IP1SR8_23_20 \
+FM(IP0SR8_27_24) IP0SR8_27_24 \
+FM(IP0SR8_31_28) IP0SR8_31_28
+
+/* MOD_SEL4 */ /* 0 */ /* 1 */
+#define MOD_SEL4_19 FM(SEL_TSN0_TD2_0) FM(SEL_TSN0_TD2_1)
+#define MOD_SEL4_18 FM(SEL_TSN0_TD3_0) FM(SEL_TSN0_TD3_1)
+#define MOD_SEL4_15 FM(SEL_TSN0_TD0_0) FM(SEL_TSN0_TD0_1)
+#define MOD_SEL4_14 FM(SEL_TSN0_TD1_0) FM(SEL_TSN0_TD1_1)
+#define MOD_SEL4_12 FM(SEL_TSN0_TXC_0) FM(SEL_TSN0_TXC_1)
+#define MOD_SEL4_9 FM(SEL_TSN0_TX_CTL_0) FM(SEL_TSN0_TX_CTL_1)
+#define MOD_SEL4_8 FM(SEL_TSN0_AVTP_PPS0_0) FM(SEL_TSN0_AVTP_PPS0_1)
+#define MOD_SEL4_5 FM(SEL_TSN0_AVTP_MATCH_0) FM(SEL_TSN0_AVTP_MATCH_1)
+#define MOD_SEL4_2 FM(SEL_TSN0_AVTP_PPS1_0) FM(SEL_TSN0_AVTP_PPS1_1)
+#define MOD_SEL4_1 FM(SEL_TSN0_MDC_0) FM(SEL_TSN0_MDC_1)
+
+/* MOD_SEL5 */ /* 0 */ /* 1 */
+#define MOD_SEL5_19 FM(SEL_AVB2_TX_CTL_0) FM(SEL_AVB2_TX_CTL_1)
+#define MOD_SEL5_16 FM(SEL_AVB2_TXC_0) FM(SEL_AVB2_TXC_1)
+#define MOD_SEL5_15 FM(SEL_AVB2_TD0_0) FM(SEL_AVB2_TD0_1)
+#define MOD_SEL5_12 FM(SEL_AVB2_TD1_0) FM(SEL_AVB2_TD1_1)
+#define MOD_SEL5_11 FM(SEL_AVB2_TD2_0) FM(SEL_AVB2_TD2_1)
+#define MOD_SEL5_8 FM(SEL_AVB2_TD3_0) FM(SEL_AVB2_TD3_1)
+#define MOD_SEL5_6 FM(SEL_AVB2_MDC_0) FM(SEL_AVB2_MDC_1)
+#define MOD_SEL5_5 FM(SEL_AVB2_MAGIC_0) FM(SEL_AVB2_MAGIC_1)
+#define MOD_SEL5_2 FM(SEL_AVB2_AVTP_MATCH_0) FM(SEL_AVB2_AVTP_MATCH_1)
+#define MOD_SEL5_0 FM(SEL_AVB2_AVTP_PPS_0) FM(SEL_AVB2_AVTP_PPS_1)
+
+/* MOD_SEL6 */ /* 0 */ /* 1 */
+#define MOD_SEL6_18 FM(SEL_AVB1_TD3_0) FM(SEL_AVB1_TD3_1)
+#define MOD_SEL6_16 FM(SEL_AVB1_TD2_0) FM(SEL_AVB1_TD2_1)
+#define MOD_SEL6_13 FM(SEL_AVB1_TD0_0) FM(SEL_AVB1_TD0_1)
+#define MOD_SEL6_12 FM(SEL_AVB1_TD1_0) FM(SEL_AVB1_TD1_1)
+#define MOD_SEL6_10 FM(SEL_AVB1_AVTP_PPS_0) FM(SEL_AVB1_AVTP_PPS_1)
+#define MOD_SEL6_7 FM(SEL_AVB1_TX_CTL_0) FM(SEL_AVB1_TX_CTL_1)
+#define MOD_SEL6_6 FM(SEL_AVB1_TXC_0) FM(SEL_AVB1_TXC_1)
+#define MOD_SEL6_5 FM(SEL_AVB1_AVTP_MATCH_0) FM(SEL_AVB1_AVTP_MATCH_1)
+#define MOD_SEL6_2 FM(SEL_AVB1_MDC_0) FM(SEL_AVB1_MDC_1)
+#define MOD_SEL6_1 FM(SEL_AVB1_MAGIC_0) FM(SEL_AVB1_MAGIC_1)
+
+/* MOD_SEL7 */ /* 0 */ /* 1 */
+#define MOD_SEL7_16 FM(SEL_AVB0_TX_CTL_0) FM(SEL_AVB0_TX_CTL_1)
+#define MOD_SEL7_15 FM(SEL_AVB0_TXC_0) FM(SEL_AVB0_TXC_1)
+#define MOD_SEL7_13 FM(SEL_AVB0_MDC_0) FM(SEL_AVB0_MDC_1)
+#define MOD_SEL7_11 FM(SEL_AVB0_TD0_0) FM(SEL_AVB0_TD0_1)
+#define MOD_SEL7_10 FM(SEL_AVB0_MAGIC_0) FM(SEL_AVB0_MAGIC_1)
+#define MOD_SEL7_7 FM(SEL_AVB0_TD1_0) FM(SEL_AVB0_TD1_1)
+#define MOD_SEL7_6 FM(SEL_AVB0_TD2_0) FM(SEL_AVB0_TD2_1)
+#define MOD_SEL7_3 FM(SEL_AVB0_TD3_0) FM(SEL_AVB0_TD3_1)
+#define MOD_SEL7_2 FM(SEL_AVB0_AVTP_MATCH_0) FM(SEL_AVB0_AVTP_MATCH_1)
+#define MOD_SEL7_0 FM(SEL_AVB0_AVTP_PPS_0) FM(SEL_AVB0_AVTP_PPS_1)
+
+/* MOD_SEL8 */ /* 0 */ /* 1 */
+#define MOD_SEL8_11 FM(SEL_SDA5_0) FM(SEL_SDA5_1)
+#define MOD_SEL8_10 FM(SEL_SCL5_0) FM(SEL_SCL5_1)
+#define MOD_SEL8_9 FM(SEL_SDA4_0) FM(SEL_SDA4_1)
+#define MOD_SEL8_8 FM(SEL_SCL4_0) FM(SEL_SCL4_1)
+#define MOD_SEL8_7 FM(SEL_SDA3_0) FM(SEL_SDA3_1)
+#define MOD_SEL8_6 FM(SEL_SCL3_0) FM(SEL_SCL3_1)
+#define MOD_SEL8_5 FM(SEL_SDA2_0) FM(SEL_SDA2_1)
+#define MOD_SEL8_4 FM(SEL_SCL2_0) FM(SEL_SCL2_1)
+#define MOD_SEL8_3 FM(SEL_SDA1_0) FM(SEL_SDA1_1)
+#define MOD_SEL8_2 FM(SEL_SCL1_0) FM(SEL_SCL1_1)
+#define MOD_SEL8_1 FM(SEL_SDA0_0) FM(SEL_SDA0_1)
+#define MOD_SEL8_0 FM(SEL_SCL0_0) FM(SEL_SCL0_1)
+
+#define PINMUX_MOD_SELS \
+\
+MOD_SEL4_19 MOD_SEL5_19 \
+MOD_SEL4_18 MOD_SEL6_18 \
+ \
+ MOD_SEL5_16 MOD_SEL6_16 MOD_SEL7_16 \
+MOD_SEL4_15 MOD_SEL5_15 MOD_SEL7_15 \
+MOD_SEL4_14 \
+ MOD_SEL6_13 MOD_SEL7_13 \
+MOD_SEL4_12 MOD_SEL5_12 MOD_SEL6_12 \
+ MOD_SEL5_11 MOD_SEL7_11 MOD_SEL8_11 \
+ MOD_SEL6_10 MOD_SEL7_10 MOD_SEL8_10 \
+MOD_SEL4_9 MOD_SEL8_9 \
+MOD_SEL4_8 MOD_SEL5_8 MOD_SEL8_8 \
+ MOD_SEL6_7 MOD_SEL7_7 MOD_SEL8_7 \
+ MOD_SEL5_6 MOD_SEL6_6 MOD_SEL7_6 MOD_SEL8_6 \
+MOD_SEL4_5 MOD_SEL5_5 MOD_SEL6_5 MOD_SEL8_5 \
+ MOD_SEL8_4 \
+ MOD_SEL7_3 MOD_SEL8_3 \
+MOD_SEL4_2 MOD_SEL5_2 MOD_SEL6_2 MOD_SEL7_2 MOD_SEL8_2 \
+MOD_SEL4_1 MOD_SEL6_1 MOD_SEL8_1 \
+ MOD_SEL5_0 MOD_SEL7_0 MOD_SEL8_0
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ PINMUX_SINGLE(AVS1),
+ PINMUX_SINGLE(AVS0),
+ PINMUX_SINGLE(PCIE1_CLKREQ_N),
+ PINMUX_SINGLE(PCIE0_CLKREQ_N),
+
+ /* TSN0 without MODSEL4 */
+ PINMUX_SINGLE(TSN0_TXCREFCLK),
+ PINMUX_SINGLE(TSN0_RD2),
+ PINMUX_SINGLE(TSN0_RD3),
+ PINMUX_SINGLE(TSN0_RD1),
+ PINMUX_SINGLE(TSN0_RXC),
+ PINMUX_SINGLE(TSN0_RD0),
+ PINMUX_SINGLE(TSN0_RX_CTL),
+ PINMUX_SINGLE(TSN0_AVTP_CAPTURE),
+ PINMUX_SINGLE(TSN0_LINK),
+ PINMUX_SINGLE(TSN0_PHY_INT),
+ PINMUX_SINGLE(TSN0_MDIO),
+ /* TSN0 with MODSEL4 */
+ PINMUX_IPSR_NOGM(0, TSN0_TD2, SEL_TSN0_TD2_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD3, SEL_TSN0_TD3_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD0, SEL_TSN0_TD0_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD1, SEL_TSN0_TD1_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TXC, SEL_TSN0_TXC_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TX_CTL, SEL_TSN0_TX_CTL_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS0, SEL_TSN0_AVTP_PPS0_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_MATCH, SEL_TSN0_AVTP_MATCH_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS1, SEL_TSN0_AVTP_PPS1_1),
+ PINMUX_IPSR_NOGM(0, TSN0_MDC, SEL_TSN0_MDC_1),
+
+ /* TSN0 without MODSEL5 */
+ PINMUX_SINGLE(AVB2_RX_CTL),
+ PINMUX_SINGLE(AVB2_RXC),
+ PINMUX_SINGLE(AVB2_RD0),
+ PINMUX_SINGLE(AVB2_RD1),
+ PINMUX_SINGLE(AVB2_RD2),
+ PINMUX_SINGLE(AVB2_MDIO),
+ PINMUX_SINGLE(AVB2_RD3),
+ PINMUX_SINGLE(AVB2_TXCREFCLK),
+ PINMUX_SINGLE(AVB2_PHY_INT),
+ PINMUX_SINGLE(AVB2_LINK),
+ PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
+ /* TSN0 with MODSEL5 */
+ PINMUX_IPSR_NOGM(0, AVB2_TX_CTL, SEL_AVB2_TX_CTL_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TXC, SEL_AVB2_TXC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD0, SEL_AVB2_TD0_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD1, SEL_AVB2_TD1_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD2, SEL_AVB2_TD2_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD3, SEL_AVB2_TD3_1),
+ PINMUX_IPSR_NOGM(0, AVB2_MDC, SEL_AVB2_MDC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_MAGIC, SEL_AVB2_MAGIC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_AVTP_MATCH, SEL_AVB2_AVTP_MATCH_1),
+ PINMUX_IPSR_NOGM(0, AVB2_AVTP_PPS, SEL_AVB2_AVTP_PPS_1),
+
+ /* IP0SR0 */
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_B),
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_A),
+
+ PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1),
+
+ PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
+
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
+
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
+
+ PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
+
+ /* IP1SR0 */
+ PINMUX_IPSR_GPSR(IP1SR0_3_0, MSIOF5_SS1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_7_4, MSIOF5_SYNC),
+
+ PINMUX_IPSR_GPSR(IP1SR0_11_8, MSIOF5_TXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_15_12, MSIOF5_SCK),
+
+ PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_A),
+
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1),
+
+ /* IP2SR0 */
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N),
+
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N),
+
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1),
+
+ /* IP0SR1 */
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_A),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_A),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N),
+
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N),
+
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_X),
+
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_X),
+
+ /* IP1SR1 */
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CANFD5_TX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, CANFD5_RX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_X),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_X),
+
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, HTX0),
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, TX0),
+
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8_A),
+
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9_A),
+
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A),
+
+ /* IP2SR1 */
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, RX0),
+
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, SCIF_CLK),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3),
+
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4),
+
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B),
+
+ /* IP3SR1 */
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2),
+
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_A),
+
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_A),
+
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD),
+
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC),
+
+ /* IP0SR2 */
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, CANFD1_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, CANFD1_RX),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5),
+
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N),
+
+ PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB),
+
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, CANFD6_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_B),
+
+ /* IP1SR2 */
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, CANFD6_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK),
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_X),
+
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_X),
+
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B),
+
+ /* IP2SR2 */
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, CANFD4_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, PWM4),
+
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, CANFD4_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, PWM5),
+
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, CANFD7_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, PWM6),
+
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, CANFD7_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, PWM7),
+
+ /* IP0SR3 */
+ PINMUX_IPSR_GPSR(IP0SR3_3_0, MMC_SD_D1),
+ PINMUX_IPSR_GPSR(IP0SR3_7_4, MMC_SD_D0),
+ PINMUX_IPSR_GPSR(IP0SR3_11_8, MMC_SD_D2),
+ PINMUX_IPSR_GPSR(IP0SR3_15_12, MMC_SD_CLK),
+ PINMUX_IPSR_GPSR(IP0SR3_19_16, MMC_DS),
+ PINMUX_IPSR_GPSR(IP0SR3_23_20, MMC_SD_D3),
+ PINMUX_IPSR_GPSR(IP0SR3_27_24, MMC_D5),
+ PINMUX_IPSR_GPSR(IP0SR3_31_28, MMC_D4),
+
+ /* IP1SR3 */
+ PINMUX_IPSR_GPSR(IP1SR3_3_0, MMC_D7),
+
+ PINMUX_IPSR_GPSR(IP1SR3_7_4, MMC_D6),
+
+ PINMUX_IPSR_GPSR(IP1SR3_11_8, MMC_SD_CMD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_15_12, SD_CD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_19_16, SD_WP),
+
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKIN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKEN_IN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_X),
+
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKOUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKEN_OUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_A),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_X),
+
+ PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL),
+
+ /* IP2SR3 */
+ PINMUX_IPSR_GPSR(IP2SR3_3_0, QSPI0_IO3),
+ PINMUX_IPSR_GPSR(IP2SR3_7_4, QSPI0_IO2),
+ PINMUX_IPSR_GPSR(IP2SR3_11_8, QSPI0_MISO_IO1),
+ PINMUX_IPSR_GPSR(IP2SR3_15_12, QSPI0_MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP2SR3_19_16, QSPI0_SPCLK),
+ PINMUX_IPSR_GPSR(IP2SR3_23_20, QSPI1_MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP2SR3_27_24, QSPI1_SPCLK),
+ PINMUX_IPSR_GPSR(IP2SR3_31_28, QSPI1_MISO_IO1),
+
+ /* IP3SR3 */
+ PINMUX_IPSR_GPSR(IP3SR3_3_0, QSPI1_IO2),
+ PINMUX_IPSR_GPSR(IP3SR3_7_4, QSPI1_SSL),
+ PINMUX_IPSR_GPSR(IP3SR3_11_8, QSPI1_IO3),
+ PINMUX_IPSR_GPSR(IP3SR3_15_12, RPC_RESET_N),
+ PINMUX_IPSR_GPSR(IP3SR3_19_16, RPC_WP_N),
+ PINMUX_IPSR_GPSR(IP3SR3_23_20, RPC_INT_N),
+
+ /* IP0SR6 */
+ PINMUX_IPSR_GPSR(IP0SR6_3_0, AVB1_MDIO),
+
+ PINMUX_IPSR_MSEL(IP0SR6_7_4, AVB1_MAGIC, SEL_AVB1_MAGIC_1),
+
+ PINMUX_IPSR_MSEL(IP0SR6_11_8, AVB1_MDC, SEL_AVB1_MDC_1),
+
+ PINMUX_IPSR_GPSR(IP0SR6_15_12, AVB1_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_LINK),
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_MII_TX_ER),
+
+ PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_AVTP_MATCH, SEL_AVB1_AVTP_MATCH_1),
+ PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_MII_RX_ER, SEL_AVB1_AVTP_MATCH_0),
+
+ PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_TXC, SEL_AVB1_TXC_1),
+ PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_MII_TXC, SEL_AVB1_TXC_0),
+
+ PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_TX_CTL, SEL_AVB1_TX_CTL_1),
+ PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_MII_TX_EN, SEL_AVB1_TX_CTL_0),
+
+ /* IP1SR6 */
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_RXC),
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_RX_CTL),
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_MII_RX_DV),
+
+ PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_AVTP_PPS, SEL_AVB1_AVTP_PPS_1),
+ PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_MII_COL, SEL_AVB1_AVTP_PPS_0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_MII_CRS),
+
+ PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_TD1, SEL_AVB1_TD1_1),
+ PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_MII_TD1, SEL_AVB1_TD1_0),
+
+ PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_TD0, SEL_AVB1_TD0_1),
+ PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_MII_TD0, SEL_AVB1_TD0_0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_RD1),
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_RD0),
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_MII_RD0),
+
+ /* IP2SR6 */
+ PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_TD2, SEL_AVB1_TD2_1),
+ PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_MII_TD2, SEL_AVB1_TD2_0),
+
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_RD2),
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_MII_RD2),
+
+ PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_TD3, SEL_AVB1_TD3_1),
+ PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_MII_TD3, SEL_AVB1_TD3_0),
+
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_RD3),
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP2SR6_19_16, AVB1_TXCREFCLK),
+
+ /* IP0SR7 */
+ PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_AVTP_PPS, SEL_AVB0_AVTP_PPS_1),
+ PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_MII_COL, SEL_AVB0_AVTP_PPS_0),
+
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_MII_CRS),
+
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_AVTP_MATCH, SEL_AVB0_AVTP_MATCH_1),
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_MII_RX_ER, SEL_AVB0_AVTP_MATCH_0),
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, CC5_OSCOUT, SEL_AVB0_AVTP_MATCH_0),
+
+ PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_TD3, SEL_AVB0_TD3_1),
+ PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_MII_TD3, SEL_AVB0_TD3_0),
+
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_LINK),
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR7_23_20, AVB0_PHY_INT),
+
+ PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_TD2, SEL_AVB0_TD2_1),
+ PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_MII_TD2, SEL_AVB0_TD2_0),
+
+ PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_TD1, SEL_AVB0_TD1_1),
+ PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_MII_TD1, SEL_AVB0_TD1_0),
+
+ /* IP1SR7 */
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_RD3),
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP1SR7_7_4, AVB0_TXCREFCLK),
+
+ PINMUX_IPSR_MSEL(IP1SR7_11_8, AVB0_MAGIC, SEL_AVB0_MAGIC_1),
+
+ PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_TD0, SEL_AVB0_TD0_1),
+ PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_MII_TD0, SEL_AVB0_TD0_0),
+
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_RD2),
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_MII_RD2),
+
+ PINMUX_IPSR_MSEL(IP1SR7_23_20, AVB0_MDC, SEL_AVB0_MDC_1),
+
+ PINMUX_IPSR_GPSR(IP1SR7_27_24, AVB0_MDIO),
+
+ PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_TXC, SEL_AVB0_TXC_1),
+ PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_MII_TXC, SEL_AVB0_TXC_0),
+
+ /* IP2SR7 */
+ PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_TX_CTL, SEL_AVB0_TX_CTL_1),
+ PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_MII_TX_EN, SEL_AVB0_TX_CTL_0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_RD1),
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_RD0),
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_MII_RD0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_RXC),
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_RX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_MII_RX_DV),
+
+ /* IP0SR8 */
+ PINMUX_IPSR_MSEL(IP0SR8_3_0, SCL0, SEL_SCL0_0),
+ PINMUX_IPSR_MSEL(IP0SR8_7_4, SDA0, SEL_SDA0_0),
+ PINMUX_IPSR_MSEL(IP0SR8_11_8, SCL1, SEL_SCL1_0),
+ PINMUX_IPSR_MSEL(IP0SR8_15_12, SDA1, SEL_SDA1_0),
+ PINMUX_IPSR_MSEL(IP0SR8_19_16, SCL2, SEL_SCL2_0),
+ PINMUX_IPSR_MSEL(IP0SR8_23_20, SDA2, SEL_SDA2_0),
+ PINMUX_IPSR_MSEL(IP0SR8_27_24, SCL3, SEL_SCL3_0),
+ PINMUX_IPSR_MSEL(IP0SR8_31_28, SDA3, SEL_SDA3_0),
+
+ /* IP1SR8 */
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, SCL4, SEL_SCL4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, HRX2, SEL_SCL4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, SCK4, SEL_SCL4_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, SDA4, SEL_SDA4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, HTX2, SEL_SDA4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, CTS4_N, SEL_SDA4_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, SCL5, SEL_SCL5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, HRTS2_N, SEL_SCL5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, RTS4_N, SEL_SCL5_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_15_12, SDA5, SEL_SDA5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_15_12, SCIF_CLK2, SEL_SDA5_0),
+
+ PINMUX_IPSR_GPSR(IP1SR8_19_16, HCTS2_N),
+ PINMUX_IPSR_GPSR(IP1SR8_19_16, TX4),
+
+ PINMUX_IPSR_GPSR(IP1SR8_23_20, HSCK2),
+ PINMUX_IPSR_GPSR(IP1SR8_23_20, RX4),
+};
+
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - AVB0 ------------------------------------------------ */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(7, 4),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(7, 10),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(7, 5),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdio_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14),
+};
+static const unsigned int avb0_mdio_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_rgmii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0, AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0, AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ */
+ RCAR_GP_PIN(7, 16), RCAR_GP_PIN(7, 15),
+ RCAR_GP_PIN(7, 11), RCAR_GP_PIN(7, 7),
+ RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 3),
+ RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 19),
+ RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 8),
+};
+static const unsigned int avb0_rgmii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK,
+ AVB0_TD0_MARK, AVB0_TD1_MARK,
+ AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK,
+ AVB0_RD0_MARK, AVB0_RD1_MARK,
+ AVB0_RD2_MARK, AVB0_RD3_MARK,
+};
+static const unsigned int avb0_txcrefclk_pins[] = {
+ /* AVB0_TXCREFCLK */
+ RCAR_GP_PIN(7, 9),
+};
+static const unsigned int avb0_txcrefclk_mux[] = {
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_pins[] = {
+ /* AVB0_AVTP_PPS */
+ RCAR_GP_PIN(7, 0),
+};
+static const unsigned int avb0_avtp_pps_mux[] = {
+ AVB0_AVTP_PPS_MARK,
+};
+static const unsigned int avb0_avtp_capture_pins[] = {
+ /* AVB0_AVTP_CAPTURE */
+ RCAR_GP_PIN(7, 1),
+};
+static const unsigned int avb0_avtp_capture_mux[] = {
+ AVB0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb0_avtp_match_pins[] = {
+ /* AVB0_AVTP_MATCH */
+ RCAR_GP_PIN(7, 2),
+};
+static const unsigned int avb0_avtp_match_mux[] = {
+ AVB0_AVTP_MATCH_MARK,
+};
+
+/* - AVB1 ------------------------------------------------ */
+static const unsigned int avb1_link_pins[] = {
+ /* AVB1_LINK */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int avb1_link_mux[] = {
+ AVB1_LINK_MARK,
+};
+static const unsigned int avb1_magic_pins[] = {
+ /* AVB1_MAGIC */
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int avb1_magic_mux[] = {
+ AVB1_MAGIC_MARK,
+};
+static const unsigned int avb1_phy_int_pins[] = {
+ /* AVB1_PHY_INT */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int avb1_phy_int_mux[] = {
+ AVB1_PHY_INT_MARK,
+};
+static const unsigned int avb1_mdio_pins[] = {
+ /* AVB1_MDC, AVB1_MDIO */
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 0),
+};
+static const unsigned int avb1_mdio_mux[] = {
+ AVB1_MDC_MARK, AVB1_MDIO_MARK,
+};
+static const unsigned int avb1_rgmii_pins[] = {
+ /*
+ * AVB1_TX_CTL, AVB1_TXC, AVB1_TD0, AVB1_TD1, AVB1_TD2, AVB1_TD3,
+ * AVB1_RX_CTL, AVB1_RXC, AVB1_RD0, AVB1_RD1, AVB1_RD2, AVB1_RD3,
+ */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+ RCAR_GP_PIN(6, 13), RCAR_GP_PIN(6, 12),
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 18),
+ RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 8),
+ RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 14),
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int avb1_rgmii_mux[] = {
+ AVB1_TX_CTL_MARK, AVB1_TXC_MARK,
+ AVB1_TD0_MARK, AVB1_TD1_MARK,
+ AVB1_TD2_MARK, AVB1_TD3_MARK,
+ AVB1_RX_CTL_MARK, AVB1_RXC_MARK,
+ AVB1_RD0_MARK, AVB1_RD1_MARK,
+ AVB1_RD2_MARK, AVB1_RD3_MARK,
+};
+static const unsigned int avb1_txcrefclk_pins[] = {
+ /* AVB1_TXCREFCLK */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int avb1_txcrefclk_mux[] = {
+ AVB1_TXCREFCLK_MARK,
+};
+static const unsigned int avb1_avtp_pps_pins[] = {
+ /* AVB1_AVTP_PPS */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int avb1_avtp_pps_mux[] = {
+ AVB1_AVTP_PPS_MARK,
+};
+static const unsigned int avb1_avtp_capture_pins[] = {
+ /* AVB1_AVTP_CAPTURE */
+ RCAR_GP_PIN(6, 11),
+};
+static const unsigned int avb1_avtp_capture_mux[] = {
+ AVB1_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb1_avtp_match_pins[] = {
+ /* AVB1_AVTP_MATCH */
+ RCAR_GP_PIN(6, 5),
+};
+static const unsigned int avb1_avtp_match_mux[] = {
+ AVB1_AVTP_MATCH_MARK,
+};
+
+/* - AVB2 ------------------------------------------------ */
+static const unsigned int avb2_link_pins[] = {
+ /* AVB2_LINK */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int avb2_link_mux[] = {
+ AVB2_LINK_MARK,
+};
+static const unsigned int avb2_magic_pins[] = {
+ /* AVB2_MAGIC */
+ RCAR_GP_PIN(5, 5),
+};
+static const unsigned int avb2_magic_mux[] = {
+ AVB2_MAGIC_MARK,
+};
+static const unsigned int avb2_phy_int_pins[] = {
+ /* AVB2_PHY_INT */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int avb2_phy_int_mux[] = {
+ AVB2_PHY_INT_MARK,
+};
+static const unsigned int avb2_mdio_pins[] = {
+ /* AVB2_MDC, AVB2_MDIO */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int avb2_mdio_mux[] = {
+ AVB2_MDC_MARK, AVB2_MDIO_MARK,
+};
+static const unsigned int avb2_rgmii_pins[] = {
+ /*
+ * AVB2_TX_CTL, AVB2_TXC, AVB2_TD0, AVB2_TD1, AVB2_TD2, AVB2_TD3,
+ * AVB2_RX_CTL, AVB2_RXC, AVB2_RD0, AVB2_RD1, AVB2_RD2, AVB2_RD3,
+ */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 16),
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 12),
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 18),
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 9),
+};
+static const unsigned int avb2_rgmii_mux[] = {
+ AVB2_TX_CTL_MARK, AVB2_TXC_MARK,
+ AVB2_TD0_MARK, AVB2_TD1_MARK,
+ AVB2_TD2_MARK, AVB2_TD3_MARK,
+ AVB2_RX_CTL_MARK, AVB2_RXC_MARK,
+ AVB2_RD0_MARK, AVB2_RD1_MARK,
+ AVB2_RD2_MARK, AVB2_RD3_MARK,
+};
+static const unsigned int avb2_txcrefclk_pins[] = {
+ /* AVB2_TXCREFCLK */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int avb2_txcrefclk_mux[] = {
+ AVB2_TXCREFCLK_MARK,
+};
+static const unsigned int avb2_avtp_pps_pins[] = {
+ /* AVB2_AVTP_PPS */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int avb2_avtp_pps_mux[] = {
+ AVB2_AVTP_PPS_MARK,
+};
+static const unsigned int avb2_avtp_capture_pins[] = {
+ /* AVB2_AVTP_CAPTURE */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int avb2_avtp_capture_mux[] = {
+ AVB2_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb2_avtp_match_pins[] = {
+ /* AVB2_AVTP_MATCH */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int avb2_avtp_match_mux[] = {
+ AVB2_AVTP_MATCH_MARK,
+};
+
+/* - CANFD0 ----------------------------------------------------------------- */
+static const unsigned int canfd0_data_pins[] = {
+ /* CANFD0_TX, CANFD0_RX */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int canfd0_data_mux[] = {
+ CANFD0_TX_MARK, CANFD0_RX_MARK,
+};
+
+/* - CANFD1 ----------------------------------------------------------------- */
+static const unsigned int canfd1_data_pins[] = {
+ /* CANFD1_TX, CANFD1_RX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int canfd1_data_mux[] = {
+ CANFD1_TX_MARK, CANFD1_RX_MARK,
+};
+
+/* - CANFD2 ----------------------------------------------------------------- */
+static const unsigned int canfd2_data_pins[] = {
+ /* CANFD2_TX, CANFD2_RX */
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int canfd2_data_mux[] = {
+ CANFD2_TX_MARK, CANFD2_RX_MARK,
+};
+
+/* - CANFD3 ----------------------------------------------------------------- */
+static const unsigned int canfd3_data_pins[] = {
+ /* CANFD3_TX, CANFD3_RX */
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+};
+static const unsigned int canfd3_data_mux[] = {
+ CANFD3_TX_MARK, CANFD3_RX_MARK,
+};
+
+/* - CANFD4 ----------------------------------------------------------------- */
+static const unsigned int canfd4_data_pins[] = {
+ /* CANFD4_TX, CANFD4_RX */
+ RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17),
+};
+static const unsigned int canfd4_data_mux[] = {
+ CANFD4_TX_MARK, CANFD4_RX_MARK,
+};
+
+/* - CANFD5 ----------------------------------------------------------------- */
+static const unsigned int canfd5_data_pins[] = {
+ /* CANFD5_TX, CANFD5_RX */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int canfd5_data_mux[] = {
+ CANFD5_TX_MARK, CANFD5_RX_MARK,
+};
+
+/* - CANFD5_B ----------------------------------------------------------------- */
+static const unsigned int canfd5_data_b_pins[] = {
+ /* CANFD5_TX_B, CANFD5_RX_B */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int canfd5_data_b_mux[] = {
+ CANFD5_TX_B_MARK, CANFD5_RX_B_MARK,
+};
+
+/* - CANFD6 ----------------------------------------------------------------- */
+static const unsigned int canfd6_data_pins[] = {
+ /* CANFD6_TX, CANFD6_RX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int canfd6_data_mux[] = {
+ CANFD6_TX_MARK, CANFD6_RX_MARK,
+};
+
+/* - CANFD7 ----------------------------------------------------------------- */
+static const unsigned int canfd7_data_pins[] = {
+ /* CANFD7_TX, CANFD7_RX */
+ RCAR_GP_PIN(2, 18), RCAR_GP_PIN(2, 19),
+};
+static const unsigned int canfd7_data_mux[] = {
+ CANFD7_TX_MARK, CANFD7_RX_MARK,
+};
+
+/* - CANFD Clock ------------------------------------------------------------ */
+static const unsigned int can_clk_pins[] = {
+ /* CAN_CLK */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* HRX0, HTX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* HSCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* HRTS0_N, HCTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_pins[] = {
+ /* HRX1, HTX1 */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int hscif1_data_mux[] = {
+ HRX1_MARK, HTX1_MARK,
+};
+static const unsigned int hscif1_clk_pins[] = {
+ /* HSCK1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int hscif1_clk_mux[] = {
+ HSCK1_MARK,
+};
+static const unsigned int hscif1_ctrl_pins[] = {
+ /* HRTS1_N, HCTS1_N */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int hscif1_ctrl_mux[] = {
+ HRTS1_N_MARK, HCTS1_N_MARK,
+};
+
+/* - HSCIF1_X---------------------------------------------------------------- */
+static const unsigned int hscif1_data_x_pins[] = {
+ /* HRX1_X, HTX1_X */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int hscif1_data_x_mux[] = {
+ HRX1_X_MARK, HTX1_X_MARK,
+};
+static const unsigned int hscif1_clk_x_pins[] = {
+ /* HSCK1_X */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int hscif1_clk_x_mux[] = {
+ HSCK1_X_MARK,
+};
+static const unsigned int hscif1_ctrl_x_pins[] = {
+ /* HRTS1_N_X, HCTS1_N_X */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int hscif1_ctrl_x_mux[] = {
+ HRTS1_N_X_MARK, HCTS1_N_X_MARK,
+};
+
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_pins[] = {
+ /* HRX2, HTX2 */
+ RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9),
+};
+static const unsigned int hscif2_data_mux[] = {
+ HRX2_MARK, HTX2_MARK,
+};
+static const unsigned int hscif2_clk_pins[] = {
+ /* HSCK2 */
+ RCAR_GP_PIN(8, 13),
+};
+static const unsigned int hscif2_clk_mux[] = {
+ HSCK2_MARK,
+};
+static const unsigned int hscif2_ctrl_pins[] = {
+ /* HRTS2_N, HCTS2_N */
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 12),
+};
+static const unsigned int hscif2_ctrl_mux[] = {
+ HRTS2_N_MARK, HCTS2_N_MARK,
+};
+
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_pins[] = {
+ /* HRX3, HTX3 */
+ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int hscif3_data_mux[] = {
+ HRX3_MARK, HTX3_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+ /* HSCK3 */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* HRTS3_N, HCTS3_N */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+/* - HSCIF3_A ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* HRX3_A, HTX3_A */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_a_pins[] = {
+ /* HSCK3_A */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int hscif3_clk_a_mux[] = {
+ HSCK3_A_MARK,
+};
+static const unsigned int hscif3_ctrl_a_pins[] = {
+ /* HRTS3_N_A, HCTS3_N_A */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+};
+static const unsigned int hscif3_ctrl_a_mux[] = {
+ HRTS3_N_A_MARK, HCTS3_N_A_MARK,
+};
+
+/* - I2C0 ------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+ /* SDA0, SCL0 */
+ RCAR_GP_PIN(8, 1), RCAR_GP_PIN(8, 0),
+};
+static const unsigned int i2c0_mux[] = {
+ SDA0_MARK, SCL0_MARK,
+};
+
+/* - I2C1 ------------------------------------------------------------------- */
+static const unsigned int i2c1_pins[] = {
+ /* SDA1, SCL1 */
+ RCAR_GP_PIN(8, 3), RCAR_GP_PIN(8, 2),
+};
+static const unsigned int i2c1_mux[] = {
+ SDA1_MARK, SCL1_MARK,
+};
+
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_pins[] = {
+ /* SDA2, SCL2 */
+ RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 4),
+};
+static const unsigned int i2c2_mux[] = {
+ SDA2_MARK, SCL2_MARK,
+};
+
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_pins[] = {
+ /* SDA3, SCL3 */
+ RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 6),
+};
+static const unsigned int i2c3_mux[] = {
+ SDA3_MARK, SCL3_MARK,
+};
+
+/* - I2C4 ------------------------------------------------------------------- */
+static const unsigned int i2c4_pins[] = {
+ /* SDA4, SCL4 */
+ RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 8),
+};
+static const unsigned int i2c4_mux[] = {
+ SDA4_MARK, SCL4_MARK,
+};
+
+/* - I2C5 ------------------------------------------------------------------- */
+static const unsigned int i2c5_pins[] = {
+ /* SDA5, SCL5 */
+ RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 10),
+};
+static const unsigned int i2c5_mux[] = {
+ SDA5_MARK, SCL5_MARK,
+};
+
+/* - MMC -------------------------------------------------------------------- */
+static const unsigned int mmc_data_pins[] = {
+ /* MMC_SD_D[0:3], MMC_D[4:7] */
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0),
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 6),
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8),
+};
+static const unsigned int mmc_data_mux[] = {
+ MMC_SD_D0_MARK, MMC_SD_D1_MARK,
+ MMC_SD_D2_MARK, MMC_SD_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK,
+ MMC_D6_MARK, MMC_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* MMC_SD_CLK, MMC_SD_CMD */
+ RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 10),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC_SD_CLK_MARK, MMC_SD_CMD_MARK,
+};
+static const unsigned int mmc_cd_pins[] = {
+ /* SD_CD */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int mmc_cd_mux[] = {
+ SD_CD_MARK,
+};
+static const unsigned int mmc_wp_pins[] = {
+ /* SD_WP */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int mmc_wp_mux[] = {
+ SD_WP_MARK,
+};
+static const unsigned int mmc_ds_pins[] = {
+ /* MMC_DS */
+ RCAR_GP_PIN(3, 4),
+};
+static const unsigned int mmc_ds_mux[] = {
+ MMC_DS_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* MSIOF0_SCK */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* MSIOF0_SYNC */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* MSIOF0_SS1 */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* MSIOF0_SS2 */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* MSIOF0_TXD */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* MSIOF0_RXD */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* MSIOF1_SCK */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* MSIOF1_SYNC */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_ss1_pins[] = {
+ /* MSIOF1_SS1 */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+static const unsigned int msiof1_ss2_pins[] = {
+ /* MSIOF1_SS2 */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+static const unsigned int msiof1_txd_pins[] = {
+ /* MSIOF1_TXD */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof1_txd_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+static const unsigned int msiof1_rxd_pins[] = {
+ /* MSIOF1_RXD */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof1_rxd_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* MSIOF2_SCK */
+ RCAR_GP_PIN(0, 17),
+};
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+static const unsigned int msiof2_sync_pins[] = {
+ /* MSIOF2_SYNC */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof2_sync_mux[] = {
+ MSIOF2_SYNC_MARK,
+};
+static const unsigned int msiof2_ss1_pins[] = {
+ /* MSIOF2_SS1 */
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+static const unsigned int msiof2_ss2_pins[] = {
+ /* MSIOF2_SS2 */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+static const unsigned int msiof2_txd_pins[] = {
+ /* MSIOF2_TXD */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int msiof2_txd_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+static const unsigned int msiof2_rxd_pins[] = {
+ /* MSIOF2_RXD */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof2_rxd_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_pins[] = {
+ /* MSIOF3_SCK */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_clk_mux[] = {
+ MSIOF3_SCK_MARK,
+};
+static const unsigned int msiof3_sync_pins[] = {
+ /* MSIOF3_SYNC */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof3_sync_mux[] = {
+ MSIOF3_SYNC_MARK,
+};
+static const unsigned int msiof3_ss1_pins[] = {
+ /* MSIOF3_SS1 */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_ss1_mux[] = {
+ MSIOF3_SS1_MARK,
+};
+static const unsigned int msiof3_ss2_pins[] = {
+ /* MSIOF3_SS2 */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_ss2_mux[] = {
+ MSIOF3_SS2_MARK,
+};
+static const unsigned int msiof3_txd_pins[] = {
+ /* MSIOF3_TXD */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof3_txd_mux[] = {
+ MSIOF3_TXD_MARK,
+};
+static const unsigned int msiof3_rxd_pins[] = {
+ /* MSIOF3_RXD */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof3_rxd_mux[] = {
+ MSIOF3_RXD_MARK,
+};
+
+/* - MSIOF4 ----------------------------------------------------------------- */
+static const unsigned int msiof4_clk_pins[] = {
+ /* MSIOF4_SCK */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof4_clk_mux[] = {
+ MSIOF4_SCK_MARK,
+};
+static const unsigned int msiof4_sync_pins[] = {
+ /* MSIOF4_SYNC */
+ RCAR_GP_PIN(1, 28),
+};
+static const unsigned int msiof4_sync_mux[] = {
+ MSIOF4_SYNC_MARK,
+};
+static const unsigned int msiof4_ss1_pins[] = {
+ /* MSIOF4_SS1 */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof4_ss1_mux[] = {
+ MSIOF4_SS1_MARK,
+};
+static const unsigned int msiof4_ss2_pins[] = {
+ /* MSIOF4_SS2 */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof4_ss2_mux[] = {
+ MSIOF4_SS2_MARK,
+};
+static const unsigned int msiof4_txd_pins[] = {
+ /* MSIOF4_TXD */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof4_txd_mux[] = {
+ MSIOF4_TXD_MARK,
+};
+static const unsigned int msiof4_rxd_pins[] = {
+ /* MSIOF4_RXD */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int msiof4_rxd_mux[] = {
+ MSIOF4_RXD_MARK,
+};
+
+/* - MSIOF5 ----------------------------------------------------------------- */
+static const unsigned int msiof5_clk_pins[] = {
+ /* MSIOF5_SCK */
+ RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof5_clk_mux[] = {
+ MSIOF5_SCK_MARK,
+};
+static const unsigned int msiof5_sync_pins[] = {
+ /* MSIOF5_SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof5_sync_mux[] = {
+ MSIOF5_SYNC_MARK,
+};
+static const unsigned int msiof5_ss1_pins[] = {
+ /* MSIOF5_SS1 */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof5_ss1_mux[] = {
+ MSIOF5_SS1_MARK,
+};
+static const unsigned int msiof5_ss2_pins[] = {
+ /* MSIOF5_SS2 */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof5_ss2_mux[] = {
+ MSIOF5_SS2_MARK,
+};
+static const unsigned int msiof5_txd_pins[] = {
+ /* MSIOF5_TXD */
+ RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof5_txd_mux[] = {
+ MSIOF5_TXD_MARK,
+};
+static const unsigned int msiof5_rxd_pins[] = {
+ /* MSIOF5_RXD */
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof5_rxd_mux[] = {
+ MSIOF5_RXD_MARK,
+};
+
+/* - PCIE ------------------------------------------------------------------- */
+static const unsigned int pcie0_clkreq_n_pins[] = {
+ /* PCIE0_CLKREQ_N */
+ RCAR_GP_PIN(4, 21),
+};
+
+static const unsigned int pcie0_clkreq_n_mux[] = {
+ PCIE0_CLKREQ_N_MARK,
+};
+
+static const unsigned int pcie1_clkreq_n_pins[] = {
+ /* PCIE1_CLKREQ_N */
+ RCAR_GP_PIN(4, 22),
+};
+
+static const unsigned int pcie1_clkreq_n_mux[] = {
+ PCIE1_CLKREQ_N_MARK,
+};
+
+/* - PWM0_A ------------------------------------------------------------------- */
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM0_A */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+/* - PWM1_A ------------------------------------------------------------------- */
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM1_A */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+/* - PWM1_B ------------------------------------------------------------------- */
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM1_B */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+/* - PWM2_B ------------------------------------------------------------------- */
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM2_B */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+/* - PWM3_A ------------------------------------------------------------------- */
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM3_A */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+/* - PWM3_B ------------------------------------------------------------------- */
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM3_B */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+/* - PWM4 ------------------------------------------------------------------- */
+static const unsigned int pwm4_pins[] = {
+ /* PWM4 */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int pwm4_mux[] = {
+ PWM4_MARK,
+};
+
+/* - PWM5 ------------------------------------------------------------------- */
+static const unsigned int pwm5_pins[] = {
+ /* PWM5 */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int pwm5_mux[] = {
+ PWM5_MARK,
+};
+
+/* - PWM6 ------------------------------------------------------------------- */
+static const unsigned int pwm6_pins[] = {
+ /* PWM6 */
+ RCAR_GP_PIN(2, 18),
+};
+static const unsigned int pwm6_mux[] = {
+ PWM6_MARK,
+};
+
+/* - PWM7 ------------------------------------------------------------------- */
+static const unsigned int pwm7_pins[] = {
+ /* PWM7 */
+ RCAR_GP_PIN(2, 19),
+};
+static const unsigned int pwm7_mux[] = {
+ PWM7_MARK,
+};
+
+/* - PWM8_A ------------------------------------------------------------------- */
+static const unsigned int pwm8_a_pins[] = {
+ /* PWM8_A */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int pwm8_a_mux[] = {
+ PWM8_A_MARK,
+};
+
+/* - PWM9_A ------------------------------------------------------------------- */
+static const unsigned int pwm9_a_pins[] = {
+ /* PWM9_A */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int pwm9_a_mux[] = {
+ PWM9_A_MARK,
+};
+
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 15),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 18),
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int qspi0_data_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK
+};
+
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 25),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 23),
+ RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int qspi1_data_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX0, TX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS0_N, CTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_MARK, CTS0_N_MARK,
+};
+
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RX1, TX1 */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS1_N, CTS1_N */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_MARK, CTS1_N_MARK,
+};
+
+/* - SCIF1_X ------------------------------------------------------------------ */
+static const unsigned int scif1_data_x_pins[] = {
+ /* RX1_X, TX1_X */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int scif1_data_x_mux[] = {
+ RX1_X_MARK, TX1_X_MARK,
+};
+static const unsigned int scif1_clk_x_pins[] = {
+ /* SCK1_X */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int scif1_clk_x_mux[] = {
+ SCK1_X_MARK,
+};
+static const unsigned int scif1_ctrl_x_pins[] = {
+ /* RTS1_N_X, CTS1_N_X */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int scif1_ctrl_x_mux[] = {
+ RTS1_N_X_MARK, CTS1_N_X_MARK,
+};
+
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_pins[] = {
+ /* RX3, TX3 */
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int scif3_data_mux[] = {
+ RX3_MARK, TX3_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK3 */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCK3_MARK,
+};
+static const unsigned int scif3_ctrl_pins[] = {
+ /* RTS3_N, CTS3_N */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scif3_ctrl_mux[] = {
+ RTS3_N_MARK, CTS3_N_MARK,
+};
+
+/* - SCIF3_A ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX3_A, TX3_A */
+ RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_a_pins[] = {
+ /* SCK3_A */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_clk_a_mux[] = {
+ SCK3_A_MARK,
+};
+static const unsigned int scif3_ctrl_a_pins[] = {
+ /* RTS3_N_A, CTS3_N_A */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_a_mux[] = {
+ RTS3_N_A_MARK, CTS3_N_A_MARK,
+};
+
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_pins[] = {
+ /* RX4, TX4 */
+ RCAR_GP_PIN(8, 13), RCAR_GP_PIN(8, 12),
+};
+static const unsigned int scif4_data_mux[] = {
+ RX4_MARK, TX4_MARK,
+};
+static const unsigned int scif4_clk_pins[] = {
+ /* SCK4 */
+ RCAR_GP_PIN(8, 8),
+};
+static const unsigned int scif4_clk_mux[] = {
+ SCK4_MARK,
+};
+static const unsigned int scif4_ctrl_pins[] = {
+ /* RTS4_N, CTS4_N */
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 9),
+};
+static const unsigned int scif4_ctrl_mux[] = {
+ RTS4_N_MARK, CTS4_N_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int scif_clk_mux[] = {
+ SCIF_CLK_MARK,
+};
+
+/* - TPU ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_pins[] = {
+ /* TPU0TO0 */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int tpu_to0_mux[] = {
+ TPU0TO0_MARK,
+};
+static const unsigned int tpu_to1_pins[] = {
+ /* TPU0TO1 */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int tpu_to1_mux[] = {
+ TPU0TO1_MARK,
+};
+static const unsigned int tpu_to2_pins[] = {
+ /* TPU0TO2 */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int tpu_to2_mux[] = {
+ TPU0TO2_MARK,
+};
+static const unsigned int tpu_to3_pins[] = {
+ /* TPU0TO3 */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int tpu_to3_mux[] = {
+ TPU0TO3_MARK,
+};
+
+/* - TPU_A ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_a_pins[] = {
+ /* TPU0TO0_A */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int tpu_to0_a_mux[] = {
+ TPU0TO0_A_MARK,
+};
+static const unsigned int tpu_to1_a_pins[] = {
+ /* TPU0TO1_A */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int tpu_to1_a_mux[] = {
+ TPU0TO1_A_MARK,
+};
+static const unsigned int tpu_to2_a_pins[] = {
+ /* TPU0TO2_A */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int tpu_to2_a_mux[] = {
+ TPU0TO2_A_MARK,
+};
+static const unsigned int tpu_to3_a_pins[] = {
+ /* TPU0TO3_A */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int tpu_to3_a_mux[] = {
+ TPU0TO3_A_MARK,
+};
+
+/* - TSN0 ------------------------------------------------ */
+static const unsigned int tsn0_link_pins[] = {
+ /* TSN0_LINK */
+ RCAR_GP_PIN(4, 4),
+};
+static const unsigned int tsn0_link_mux[] = {
+ TSN0_LINK_MARK,
+};
+static const unsigned int tsn0_phy_int_pins[] = {
+ /* TSN0_PHY_INT */
+ RCAR_GP_PIN(4, 3),
+};
+static const unsigned int tsn0_phy_int_mux[] = {
+ TSN0_PHY_INT_MARK,
+};
+static const unsigned int tsn0_mdio_pins[] = {
+ /* TSN0_MDC, TSN0_MDIO */
+ RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 0),
+};
+static const unsigned int tsn0_mdio_mux[] = {
+ TSN0_MDC_MARK, TSN0_MDIO_MARK,
+};
+static const unsigned int tsn0_rgmii_pins[] = {
+ /*
+ * TSN0_TX_CTL, TSN0_TXC, TSN0_TD0, TSN0_TD1, TSN0_TD2, TSN0_TD3,
+ * TSN0_RX_CTL, TSN0_RXC, TSN0_RD0, TSN0_RD1, TSN0_RD2, TSN0_RD3,
+ */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 18),
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 11),
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 13),
+ RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int tsn0_rgmii_mux[] = {
+ TSN0_TX_CTL_MARK, TSN0_TXC_MARK,
+ TSN0_TD0_MARK, TSN0_TD1_MARK,
+ TSN0_TD2_MARK, TSN0_TD3_MARK,
+ TSN0_RX_CTL_MARK, TSN0_RXC_MARK,
+ TSN0_RD0_MARK, TSN0_RD1_MARK,
+ TSN0_RD2_MARK, TSN0_RD3_MARK,
+};
+static const unsigned int tsn0_txcrefclk_pins[] = {
+ /* TSN0_TXCREFCLK */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int tsn0_txcrefclk_mux[] = {
+ TSN0_TXCREFCLK_MARK,
+};
+static const unsigned int tsn0_avtp_pps_pins[] = {
+ /* TSN0_AVTP_PPS0, TSN0_AVTP_PPS1 */
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 2),
+};
+static const unsigned int tsn0_avtp_pps_mux[] = {
+ TSN0_AVTP_PPS0_MARK, TSN0_AVTP_PPS1_MARK,
+};
+static const unsigned int tsn0_avtp_capture_pins[] = {
+ /* TSN0_AVTP_CAPTURE */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int tsn0_avtp_capture_mux[] = {
+ TSN0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int tsn0_avtp_match_pins[] = {
+ /* TSN0_AVTP_MATCH */
+ RCAR_GP_PIN(4, 5),
+};
+static const unsigned int tsn0_avtp_match_mux[] = {
+ TSN0_AVTP_MATCH_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdio),
+ SH_PFC_PIN_GROUP(avb0_rgmii),
+ SH_PFC_PIN_GROUP(avb0_txcrefclk),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture),
+ SH_PFC_PIN_GROUP(avb0_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb1_link),
+ SH_PFC_PIN_GROUP(avb1_magic),
+ SH_PFC_PIN_GROUP(avb1_phy_int),
+ SH_PFC_PIN_GROUP(avb1_mdio),
+ SH_PFC_PIN_GROUP(avb1_rgmii),
+ SH_PFC_PIN_GROUP(avb1_txcrefclk),
+ SH_PFC_PIN_GROUP(avb1_avtp_pps),
+ SH_PFC_PIN_GROUP(avb1_avtp_capture),
+ SH_PFC_PIN_GROUP(avb1_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb2_link),
+ SH_PFC_PIN_GROUP(avb2_magic),
+ SH_PFC_PIN_GROUP(avb2_phy_int),
+ SH_PFC_PIN_GROUP(avb2_mdio),
+ SH_PFC_PIN_GROUP(avb2_rgmii),
+ SH_PFC_PIN_GROUP(avb2_txcrefclk),
+ SH_PFC_PIN_GROUP(avb2_avtp_pps),
+ SH_PFC_PIN_GROUP(avb2_avtp_capture),
+ SH_PFC_PIN_GROUP(avb2_avtp_match),
+
+ SH_PFC_PIN_GROUP(canfd0_data),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(canfd2_data),
+ SH_PFC_PIN_GROUP(canfd3_data),
+ SH_PFC_PIN_GROUP(canfd4_data),
+ SH_PFC_PIN_GROUP(canfd5_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(canfd5_data_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(canfd6_data),
+ SH_PFC_PIN_GROUP(canfd7_data),
+ SH_PFC_PIN_GROUP(can_clk),
+
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_data_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_clk_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_ctrl_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_data_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_clk_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_ctrl_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c5),
+
+ BUS_DATA_PIN_GROUP(mmc_data, 1),
+ BUS_DATA_PIN_GROUP(mmc_data, 4),
+ BUS_DATA_PIN_GROUP(mmc_data, 8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(mmc_cd),
+ SH_PFC_PIN_GROUP(mmc_wp),
+ SH_PFC_PIN_GROUP(mmc_ds),
+
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_txd),
+ SH_PFC_PIN_GROUP(msiof1_rxd),
+
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_txd),
+ SH_PFC_PIN_GROUP(msiof2_rxd),
+
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_txd),
+ SH_PFC_PIN_GROUP(msiof3_rxd),
+
+ SH_PFC_PIN_GROUP(msiof4_clk),
+ SH_PFC_PIN_GROUP(msiof4_sync),
+ SH_PFC_PIN_GROUP(msiof4_ss1),
+ SH_PFC_PIN_GROUP(msiof4_ss2),
+ SH_PFC_PIN_GROUP(msiof4_txd),
+ SH_PFC_PIN_GROUP(msiof4_rxd),
+
+ SH_PFC_PIN_GROUP(msiof5_clk),
+ SH_PFC_PIN_GROUP(msiof5_sync),
+ SH_PFC_PIN_GROUP(msiof5_ss1),
+ SH_PFC_PIN_GROUP(msiof5_ss2),
+ SH_PFC_PIN_GROUP(msiof5_txd),
+ SH_PFC_PIN_GROUP(msiof5_rxd),
+
+ SH_PFC_PIN_GROUP(pcie0_clkreq_n),
+ SH_PFC_PIN_GROUP(pcie1_clkreq_n),
+
+ SH_PFC_PIN_GROUP(pwm0_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4),
+ SH_PFC_PIN_GROUP(pwm5),
+ SH_PFC_PIN_GROUP(pwm6),
+ SH_PFC_PIN_GROUP(pwm7),
+ SH_PFC_PIN_GROUP(pwm8_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm9_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ BUS_DATA_PIN_GROUP(qspi0_data, 2),
+ BUS_DATA_PIN_GROUP(qspi0_data, 4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ BUS_DATA_PIN_GROUP(qspi1_data, 2),
+ BUS_DATA_PIN_GROUP(qspi1_data, 4),
+
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_data_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_clk_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_ctrl_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_data_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_clk_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_ctrl_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_clk),
+ SH_PFC_PIN_GROUP(scif4_ctrl),
+ SH_PFC_PIN_GROUP(scif_clk),
+
+ SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to1), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to1_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to2), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to2_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to3), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to3_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(tsn0_link),
+ SH_PFC_PIN_GROUP(tsn0_phy_int),
+ SH_PFC_PIN_GROUP(tsn0_mdio),
+ SH_PFC_PIN_GROUP(tsn0_rgmii),
+ SH_PFC_PIN_GROUP(tsn0_txcrefclk),
+ SH_PFC_PIN_GROUP(tsn0_avtp_pps),
+ SH_PFC_PIN_GROUP(tsn0_avtp_capture),
+ SH_PFC_PIN_GROUP(tsn0_avtp_match),
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdio",
+ "avb0_rgmii",
+ "avb0_txcrefclk",
+ "avb0_avtp_pps",
+ "avb0_avtp_capture",
+ "avb0_avtp_match",
+};
+
+static const char * const avb1_groups[] = {
+ "avb1_link",
+ "avb1_magic",
+ "avb1_phy_int",
+ "avb1_mdio",
+ "avb1_rgmii",
+ "avb1_txcrefclk",
+ "avb1_avtp_pps",
+ "avb1_avtp_capture",
+ "avb1_avtp_match",
+};
+
+static const char * const avb2_groups[] = {
+ "avb2_link",
+ "avb2_magic",
+ "avb2_phy_int",
+ "avb2_mdio",
+ "avb2_rgmii",
+ "avb2_txcrefclk",
+ "avb2_avtp_pps",
+ "avb2_avtp_capture",
+ "avb2_avtp_match",
+};
+
+static const char * const canfd0_groups[] = {
+ "canfd0_data",
+};
+
+static const char * const canfd1_groups[] = {
+ "canfd1_data",
+};
+
+static const char * const canfd2_groups[] = {
+ "canfd2_data",
+};
+
+static const char * const canfd3_groups[] = {
+ "canfd3_data",
+};
+
+static const char * const canfd4_groups[] = {
+ "canfd4_data",
+};
+
+static const char * const canfd5_groups[] = {
+ /* suffix might be updated */
+ "canfd5_data",
+ "canfd5_data_b",
+};
+
+static const char * const canfd6_groups[] = {
+ "canfd6_data",
+};
+
+static const char * const canfd7_groups[] = {
+ "canfd7_data",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+};
+
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ /* suffix might be updated */
+ "hscif1_data",
+ "hscif1_clk",
+ "hscif1_ctrl",
+ "hscif1_data_x",
+ "hscif1_clk_x",
+ "hscif1_ctrl_x",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data",
+ "hscif2_clk",
+ "hscif2_ctrl",
+};
+
+static const char * const hscif3_groups[] = {
+ /* suffix might be updated */
+ "hscif3_data",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_a",
+ "hscif3_clk_a",
+ "hscif3_ctrl_a",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4",
+};
+
+static const char * const i2c5_groups[] = {
+ "i2c5",
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+ "mmc_cd",
+ "mmc_wp",
+ "mmc_ds",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_txd",
+ "msiof1_rxd",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_sync",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_txd",
+ "msiof2_rxd",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk",
+ "msiof3_sync",
+ "msiof3_ss1",
+ "msiof3_ss2",
+ "msiof3_txd",
+ "msiof3_rxd",
+};
+
+static const char * const msiof4_groups[] = {
+ "msiof4_clk",
+ "msiof4_sync",
+ "msiof4_ss1",
+ "msiof4_ss2",
+ "msiof4_txd",
+ "msiof4_rxd",
+};
+
+static const char * const msiof5_groups[] = {
+ "msiof5_clk",
+ "msiof5_sync",
+ "msiof5_ss1",
+ "msiof5_ss2",
+ "msiof5_txd",
+ "msiof5_rxd",
+};
+
+static const char * const pcie_groups[] = {
+ "pcie0_clkreq_n",
+ "pcie1_clkreq_n",
+};
+
+static const char * const pwm0_groups[] = {
+ /* suffix might be updated */
+ "pwm0_a",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ /* suffix might be updated */
+ "pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6",
+};
+
+static const char * const pwm7_groups[] = {
+ "pwm7",
+};
+
+static const char * const pwm8_groups[] = {
+ /* suffix might be updated */
+ "pwm8_a",
+};
+
+static const char * const pwm9_groups[] = {
+ /* suffix might be updated */
+ "pwm9_a",
+};
+
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+ /* suffix might be updated */
+ "scif1_data",
+ "scif1_clk",
+ "scif1_ctrl",
+ "scif1_data_x",
+ "scif1_clk_x",
+ "scif1_ctrl_x",
+};
+
+static const char * const scif3_groups[] = {
+ /* suffix might be updated */
+ "scif3_data",
+ "scif3_clk",
+ "scif3_ctrl",
+ "scif3_data_a",
+ "scif3_clk_a",
+ "scif3_ctrl_a",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data",
+ "scif4_clk",
+ "scif4_ctrl",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk",
+};
+
+static const char * const tpu_groups[] = {
+ /* suffix might be updated */
+ "tpu_to0",
+ "tpu_to0_a",
+ "tpu_to1",
+ "tpu_to1_a",
+ "tpu_to2",
+ "tpu_to2_a",
+ "tpu_to3",
+ "tpu_to3_a",
+};
+
+static const char * const tsn0_groups[] = {
+ "tsn0_link",
+ "tsn0_phy_int",
+ "tsn0_mdio",
+ "tsn0_rgmii",
+ "tsn0_txcrefclk",
+ "tsn0_avtp_pps",
+ "tsn0_avtp_capture",
+ "tsn0_avtp_match",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb0),
+ SH_PFC_FUNCTION(avb1),
+ SH_PFC_FUNCTION(avb2),
+
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(canfd2),
+ SH_PFC_FUNCTION(canfd3),
+ SH_PFC_FUNCTION(canfd4),
+ SH_PFC_FUNCTION(canfd5),
+ SH_PFC_FUNCTION(canfd6),
+ SH_PFC_FUNCTION(canfd7),
+ SH_PFC_FUNCTION(can_clk),
+
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c5),
+
+ SH_PFC_FUNCTION(mmc),
+
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(msiof4),
+ SH_PFC_FUNCTION(msiof5),
+
+ SH_PFC_FUNCTION(pcie),
+
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(pwm7),
+ SH_PFC_FUNCTION(pwm8),
+ SH_PFC_FUNCTION(pwm9),
+
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
+
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif_clk),
+
+ SH_PFC_FUNCTION(tpu),
+
+ SH_PFC_FUNCTION(tsn0),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xE6050040, 32,
+ GROUP(-13, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_19 RESERVED */
+ GP_0_18_FN, GPSR0_18,
+ GP_0_17_FN, GPSR0_17,
+ GP_0_16_FN, GPSR0_16,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6050840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_28_FN, GPSR1_28,
+ GP_1_27_FN, GPSR1_27,
+ GP_1_26_FN, GPSR1_26,
+ GP_1_25_FN, GPSR1_25,
+ GP_1_24_FN, GPSR1_24,
+ GP_1_23_FN, GPSR1_23,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xE6058040, 32,
+ GROUP(-12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_20 RESERVED */
+ GP_2_19_FN, GPSR2_19,
+ GP_2_18_FN, GPSR2_18,
+ GP_2_17_FN, GPSR2_17,
+ GP_2_16_FN, GPSR2_16,
+ GP_2_15_FN, GPSR2_15,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6058840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ GP_3_29_FN, GPSR3_29,
+ GP_3_28_FN, GPSR3_28,
+ GP_3_27_FN, GPSR3_27,
+ GP_3_26_FN, GPSR3_26,
+ GP_3_25_FN, GPSR3_25,
+ GP_3_24_FN, GPSR3_24,
+ GP_3_23_FN, GPSR3_23,
+ GP_3_22_FN, GPSR3_22,
+ GP_3_21_FN, GPSR3_21,
+ GP_3_20_FN, GPSR3_20,
+ GP_3_19_FN, GPSR3_19,
+ GP_3_18_FN, GPSR3_18,
+ GP_3_17_FN, GPSR3_17,
+ GP_3_16_FN, GPSR3_16,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xE6060040, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_24_FN, GPSR4_24,
+ GP_4_23_FN, GPSR4_23,
+ GP_4_22_FN, GPSR4_22,
+ GP_4_21_FN, GPSR4_21,
+ GP_4_20_FN, GPSR4_20,
+ GP_4_19_FN, GPSR4_19,
+ GP_4_18_FN, GPSR4_18,
+ GP_4_17_FN, GPSR4_17,
+ GP_4_16_FN, GPSR4_16,
+ GP_4_15_FN, GPSR4_15,
+ GP_4_14_FN, GPSR4_14,
+ GP_4_13_FN, GPSR4_13,
+ GP_4_12_FN, GPSR4_12,
+ GP_4_11_FN, GPSR4_11,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xE6060840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_21 RESERVED */
+ GP_5_20_FN, GPSR5_20,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xE6061040, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_21 RESERVED */
+ GP_6_20_FN, GPSR6_20,
+ GP_6_19_FN, GPSR6_19,
+ GP_6_18_FN, GPSR6_18,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xE6061840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_21 RESERVED */
+ GP_7_20_FN, GPSR7_20,
+ GP_7_19_FN, GPSR7_19,
+ GP_7_18_FN, GPSR7_18,
+ GP_7_17_FN, GPSR7_17,
+ GP_7_16_FN, GPSR7_16,
+ GP_7_15_FN, GPSR7_15,
+ GP_7_14_FN, GPSR7_14,
+ GP_7_13_FN, GPSR7_13,
+ GP_7_12_FN, GPSR7_12,
+ GP_7_11_FN, GPSR7_11,
+ GP_7_10_FN, GPSR7_10,
+ GP_7_9_FN, GPSR7_9,
+ GP_7_8_FN, GPSR7_8,
+ GP_7_7_FN, GPSR7_7,
+ GP_7_6_FN, GPSR7_6,
+ GP_7_5_FN, GPSR7_5,
+ GP_7_4_FN, GPSR7_4,
+ GP_7_3_FN, GPSR7_3,
+ GP_7_2_FN, GPSR7_2,
+ GP_7_1_FN, GPSR7_1,
+ GP_7_0_FN, GPSR7_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR8", 0xE6068040, 32,
+ GROUP(-18, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP8_31_14 RESERVED */
+ GP_8_13_FN, GPSR8_13,
+ GP_8_12_FN, GPSR8_12,
+ GP_8_11_FN, GPSR8_11,
+ GP_8_10_FN, GPSR8_10,
+ GP_8_9_FN, GPSR8_9,
+ GP_8_8_FN, GPSR8_8,
+ GP_8_7_FN, GPSR8_7,
+ GP_8_6_FN, GPSR8_6,
+ GP_8_5_FN, GPSR8_5,
+ GP_8_4_FN, GPSR8_4,
+ GP_8_3_FN, GPSR8_3,
+ GP_8_2_FN, GPSR8_2,
+ GP_8_1_FN, GPSR8_1,
+ GP_8_0_FN, GPSR8_0, ))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IP0SR0", 0xE6050060, 32, 4, GROUP(
+ IP0SR0_31_28
+ IP0SR0_27_24
+ IP0SR0_23_20
+ IP0SR0_19_16
+ IP0SR0_15_12
+ IP0SR0_11_8
+ IP0SR0_7_4
+ IP0SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR0", 0xE6050064, 32, 4, GROUP(
+ IP1SR0_31_28
+ IP1SR0_27_24
+ IP1SR0_23_20
+ IP1SR0_19_16
+ IP1SR0_15_12
+ IP1SR0_11_8
+ IP1SR0_7_4
+ IP1SR0_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR0", 0xE6050068, 32,
+ GROUP(-20, 4, 4, 4),
+ GROUP(
+ /* IP2SR0_31_12 RESERVED */
+ IP2SR0_11_8
+ IP2SR0_7_4
+ IP2SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR1", 0xE6050860, 32, 4, GROUP(
+ IP0SR1_31_28
+ IP0SR1_27_24
+ IP0SR1_23_20
+ IP0SR1_19_16
+ IP0SR1_15_12
+ IP0SR1_11_8
+ IP0SR1_7_4
+ IP0SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR1", 0xE6050864, 32, 4, GROUP(
+ IP1SR1_31_28
+ IP1SR1_27_24
+ IP1SR1_23_20
+ IP1SR1_19_16
+ IP1SR1_15_12
+ IP1SR1_11_8
+ IP1SR1_7_4
+ IP1SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR1", 0xE6050868, 32, 4, GROUP(
+ IP2SR1_31_28
+ IP2SR1_27_24
+ IP2SR1_23_20
+ IP2SR1_19_16
+ IP2SR1_15_12
+ IP2SR1_11_8
+ IP2SR1_7_4
+ IP2SR1_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR1", 0xE605086C, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR1_31_20 RESERVED */
+ IP3SR1_19_16
+ IP3SR1_15_12
+ IP3SR1_11_8
+ IP3SR1_7_4
+ IP3SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR2", 0xE6058060, 32, 4, GROUP(
+ IP0SR2_31_28
+ IP0SR2_27_24
+ IP0SR2_23_20
+ IP0SR2_19_16
+ IP0SR2_15_12
+ IP0SR2_11_8
+ IP0SR2_7_4
+ IP0SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR2", 0xE6058064, 32, 4, GROUP(
+ IP1SR2_31_28
+ IP1SR2_27_24
+ IP1SR2_23_20
+ IP1SR2_19_16
+ IP1SR2_15_12
+ IP1SR2_11_8
+ IP1SR2_7_4
+ IP1SR2_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR2", 0xE6058068, 32,
+ GROUP(-16, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR2_31_16 RESERVED */
+ IP2SR2_15_12
+ IP2SR2_11_8
+ IP2SR2_7_4
+ IP2SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR3", 0xE6058860, 32, 4, GROUP(
+ IP0SR3_31_28
+ IP0SR3_27_24
+ IP0SR3_23_20
+ IP0SR3_19_16
+ IP0SR3_15_12
+ IP0SR3_11_8
+ IP0SR3_7_4
+ IP0SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR3", 0xE6058864, 32, 4, GROUP(
+ IP1SR3_31_28
+ IP1SR3_27_24
+ IP1SR3_23_20
+ IP1SR3_19_16
+ IP1SR3_15_12
+ IP1SR3_11_8
+ IP1SR3_7_4
+ IP1SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR3", 0xE6058868, 32, 4, GROUP(
+ IP2SR3_31_28
+ IP2SR3_27_24
+ IP2SR3_23_20
+ IP2SR3_19_16
+ IP2SR3_15_12
+ IP2SR3_11_8
+ IP2SR3_7_4
+ IP2SR3_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR3", 0xE605886C, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR3_31_24 RESERVED */
+ IP3SR3_23_20
+ IP3SR3_19_16
+ IP3SR3_15_12
+ IP3SR3_11_8
+ IP3SR3_7_4
+ IP3SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP(
+ IP0SR6_31_28
+ IP0SR6_27_24
+ IP0SR6_23_20
+ IP0SR6_19_16
+ IP0SR6_15_12
+ IP0SR6_11_8
+ IP0SR6_7_4
+ IP0SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR6", 0xE6061064, 32, 4, GROUP(
+ IP1SR6_31_28
+ IP1SR6_27_24
+ IP1SR6_23_20
+ IP1SR6_19_16
+ IP1SR6_15_12
+ IP1SR6_11_8
+ IP1SR6_7_4
+ IP1SR6_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR6", 0xE6061068, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR6_31_20 RESERVED */
+ IP2SR6_19_16
+ IP2SR6_15_12
+ IP2SR6_11_8
+ IP2SR6_7_4
+ IP2SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR7", 0xE6061860, 32, 4, GROUP(
+ IP0SR7_31_28
+ IP0SR7_27_24
+ IP0SR7_23_20
+ IP0SR7_19_16
+ IP0SR7_15_12
+ IP0SR7_11_8
+ IP0SR7_7_4
+ IP0SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR7", 0xE6061864, 32, 4, GROUP(
+ IP1SR7_31_28
+ IP1SR7_27_24
+ IP1SR7_23_20
+ IP1SR7_19_16
+ IP1SR7_15_12
+ IP1SR7_11_8
+ IP1SR7_7_4
+ IP1SR7_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR7", 0xE6061868, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR7_31_20 RESERVED */
+ IP2SR7_19_16
+ IP2SR7_15_12
+ IP2SR7_11_8
+ IP2SR7_7_4
+ IP2SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR8", 0xE6068060, 32, 4, GROUP(
+ IP0SR8_31_28
+ IP0SR8_27_24
+ IP0SR8_23_20
+ IP0SR8_19_16
+ IP0SR8_15_12
+ IP0SR8_11_8
+ IP0SR8_7_4
+ IP0SR8_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP1SR8", 0xE6068064, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP1SR8_31_24 RESERVED */
+ IP1SR8_23_20
+ IP1SR8_19_16
+ IP1SR8_15_12
+ IP1SR8_11_8
+ IP1SR8_7_4
+ IP1SR8_3_0))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32,
+ GROUP(-12, 1, 1, -2, 1, 1, -1, 1, -2, 1, 1, -2, 1,
+ -2, 1, 1, -1),
+ GROUP(
+ /* RESERVED 31-20 */
+ MOD_SEL4_19
+ MOD_SEL4_18
+ /* RESERVED 17-16 */
+ MOD_SEL4_15
+ MOD_SEL4_14
+ /* RESERVED 13 */
+ MOD_SEL4_12
+ /* RESERVED 11-10 */
+ MOD_SEL4_9
+ MOD_SEL4_8
+ /* RESERVED 7-6 */
+ MOD_SEL4_5
+ /* RESERVED 4-3 */
+ MOD_SEL4_2
+ MOD_SEL4_1
+ /* RESERVED 0 */
+ ))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL5", 0xE6060900, 32,
+ GROUP(-12, 1, -2, 1, 1, -2, 1, 1, -2, 1, -1,
+ 1, 1, -2, 1, -1, 1),
+ GROUP(
+ /* RESERVED 31-20 */
+ MOD_SEL5_19
+ /* RESERVED 18-17 */
+ MOD_SEL5_16
+ MOD_SEL5_15
+ /* RESERVED 14-13 */
+ MOD_SEL5_12
+ MOD_SEL5_11
+ /* RESERVED 10-9 */
+ MOD_SEL5_8
+ /* RESERVED 7 */
+ MOD_SEL5_6
+ MOD_SEL5_5
+ /* RESERVED 4-3 */
+ MOD_SEL5_2
+ /* RESERVED 1 */
+ MOD_SEL5_0))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL6", 0xE6061100, 32,
+ GROUP(-13, 1, -1, 1, -2, 1, 1,
+ -1, 1, -2, 1, 1, 1, -2, 1, 1, -1),
+ GROUP(
+ /* RESERVED 31-19 */
+ MOD_SEL6_18
+ /* RESERVED 17 */
+ MOD_SEL6_16
+ /* RESERVED 15-14 */
+ MOD_SEL6_13
+ MOD_SEL6_12
+ /* RESERVED 11 */
+ MOD_SEL6_10
+ /* RESERVED 9-8 */
+ MOD_SEL6_7
+ MOD_SEL6_6
+ MOD_SEL6_5
+ /* RESERVED 4-3 */
+ MOD_SEL6_2
+ MOD_SEL6_1
+ /* RESERVED 0 */
+ ))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL7", 0xE6061900, 32,
+ GROUP(-15, 1, 1, -1, 1, -1, 1, 1, -2, 1, 1,
+ -2, 1, 1, -1, 1),
+ GROUP(
+ /* RESERVED 31-17 */
+ MOD_SEL7_16
+ MOD_SEL7_15
+ /* RESERVED 14 */
+ MOD_SEL7_13
+ /* RESERVED 12 */
+ MOD_SEL7_11
+ MOD_SEL7_10
+ /* RESERVED 9-8 */
+ MOD_SEL7_7
+ MOD_SEL7_6
+ /* RESERVED 5-4 */
+ MOD_SEL7_3
+ MOD_SEL7_2
+ /* RESERVED 1 */
+ MOD_SEL7_0))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL8", 0xE6068100, 32,
+ GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED 31-12 */
+ MOD_SEL8_11
+ MOD_SEL8_10
+ MOD_SEL8_9
+ MOD_SEL8_8
+ MOD_SEL8_7
+ MOD_SEL8_6
+ MOD_SEL8_5
+ MOD_SEL8_4
+ MOD_SEL8_3
+ MOD_SEL8_2
+ MOD_SEL8_1
+ MOD_SEL8_0))
+ },
+ { },
+};
+
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRV0CTRL0", 0xE6050080) {
+ { RCAR_GP_PIN(0, 7), 28, 3 }, /* MSIOF5_SS2 */
+ { RCAR_GP_PIN(0, 6), 24, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(0, 5), 20, 3 }, /* IRQ1 */
+ { RCAR_GP_PIN(0, 4), 16, 3 }, /* IRQ2 */
+ { RCAR_GP_PIN(0, 3), 12, 3 }, /* IRQ3 */
+ { RCAR_GP_PIN(0, 2), 8, 3 }, /* GP0_02 */
+ { RCAR_GP_PIN(0, 1), 4, 3 }, /* GP0_01 */
+ { RCAR_GP_PIN(0, 0), 0, 3 }, /* GP0_00 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL0", 0xE6050084) {
+ { RCAR_GP_PIN(0, 15), 28, 3 }, /* MSIOF2_SYNC */
+ { RCAR_GP_PIN(0, 14), 24, 3 }, /* MSIOF2_SS1 */
+ { RCAR_GP_PIN(0, 13), 20, 3 }, /* MSIOF2_SS2 */
+ { RCAR_GP_PIN(0, 12), 16, 3 }, /* MSIOF5_RXD */
+ { RCAR_GP_PIN(0, 11), 12, 3 }, /* MSIOF5_SCK */
+ { RCAR_GP_PIN(0, 10), 8, 3 }, /* MSIOF5_TXD */
+ { RCAR_GP_PIN(0, 9), 4, 3 }, /* MSIOF5_SYNC */
+ { RCAR_GP_PIN(0, 8), 0, 3 }, /* MSIOF5_SS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL0", 0xE6050088) {
+ { RCAR_GP_PIN(0, 18), 8, 3 }, /* MSIOF2_RXD */
+ { RCAR_GP_PIN(0, 17), 4, 3 }, /* MSIOF2_SCK */
+ { RCAR_GP_PIN(0, 16), 0, 3 }, /* MSIOF2_TXD */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL1", 0xE6050880) {
+ { RCAR_GP_PIN(1, 7), 28, 3 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(1, 6), 24, 3 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(1, 5), 20, 3 }, /* MSIOF1_RXD */
+ { RCAR_GP_PIN(1, 4), 16, 3 }, /* MSIOF1_TXD */
+ { RCAR_GP_PIN(1, 3), 12, 3 }, /* MSIOF1_SCK */
+ { RCAR_GP_PIN(1, 2), 8, 3 }, /* MSIOF1_SYNC */
+ { RCAR_GP_PIN(1, 1), 4, 3 }, /* MSIOF1_SS1 */
+ { RCAR_GP_PIN(1, 0), 0, 3 }, /* MSIOF1_SS2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL1", 0xE6050884) {
+ { RCAR_GP_PIN(1, 15), 28, 3 }, /* HSCK0 */
+ { RCAR_GP_PIN(1, 14), 24, 3 }, /* HRTS0_N */
+ { RCAR_GP_PIN(1, 13), 20, 3 }, /* HCTS0_N */
+ { RCAR_GP_PIN(1, 12), 16, 3 }, /* HTX0 */
+ { RCAR_GP_PIN(1, 11), 12, 3 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(1, 10), 8, 3 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(1, 9), 4, 3 }, /* MSIOF0_TXD */
+ { RCAR_GP_PIN(1, 8), 0, 3 }, /* MSIOF0_SYNC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL1", 0xE6050888) {
+ { RCAR_GP_PIN(1, 23), 28, 3 }, /* GP1_23 */
+ { RCAR_GP_PIN(1, 22), 24, 3 }, /* AUDIO_CLKIN */
+ { RCAR_GP_PIN(1, 21), 20, 3 }, /* AUDIO_CLKOUT */
+ { RCAR_GP_PIN(1, 20), 16, 3 }, /* SSI_SD */
+ { RCAR_GP_PIN(1, 19), 12, 3 }, /* SSI_WS */
+ { RCAR_GP_PIN(1, 18), 8, 3 }, /* SSI_SCK */
+ { RCAR_GP_PIN(1, 17), 4, 3 }, /* SCIF_CLK */
+ { RCAR_GP_PIN(1, 16), 0, 3 }, /* HRX0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL1", 0xE605088C) {
+ { RCAR_GP_PIN(1, 28), 16, 3 }, /* HTX3 */
+ { RCAR_GP_PIN(1, 27), 12, 3 }, /* HCTS3_N */
+ { RCAR_GP_PIN(1, 26), 8, 3 }, /* HRTS3_N */
+ { RCAR_GP_PIN(1, 25), 4, 3 }, /* HSCK3 */
+ { RCAR_GP_PIN(1, 24), 0, 3 }, /* HRX3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL2", 0xE6058080) {
+ { RCAR_GP_PIN(2, 7), 28, 3 }, /* TPU0TO1 */
+ { RCAR_GP_PIN(2, 6), 24, 3 }, /* FXR_TXDB */
+ { RCAR_GP_PIN(2, 5), 20, 3 }, /* FXR_TXENB_N */
+ { RCAR_GP_PIN(2, 4), 16, 3 }, /* RXDB_EXTFXR */
+ { RCAR_GP_PIN(2, 3), 12, 3 }, /* CLK_EXTFXR */
+ { RCAR_GP_PIN(2, 2), 8, 3 }, /* RXDA_EXTFXR */
+ { RCAR_GP_PIN(2, 1), 4, 3 }, /* FXR_TXENA_N */
+ { RCAR_GP_PIN(2, 0), 0, 3 }, /* FXR_TXDA */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL2", 0xE6058084) {
+ { RCAR_GP_PIN(2, 15), 28, 3 }, /* CANFD3_RX */
+ { RCAR_GP_PIN(2, 14), 24, 3 }, /* CANFD3_TX */
+ { RCAR_GP_PIN(2, 13), 20, 3 }, /* CANFD2_RX */
+ { RCAR_GP_PIN(2, 12), 16, 3 }, /* CANFD2_TX */
+ { RCAR_GP_PIN(2, 11), 12, 3 }, /* CANFD0_RX */
+ { RCAR_GP_PIN(2, 10), 8, 3 }, /* CANFD0_TX */
+ { RCAR_GP_PIN(2, 9), 4, 3 }, /* CAN_CLK */
+ { RCAR_GP_PIN(2, 8), 0, 3 }, /* TPU0TO0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL2", 0xE6058088) {
+ { RCAR_GP_PIN(2, 19), 12, 3 }, /* CANFD7_RX */
+ { RCAR_GP_PIN(2, 18), 8, 3 }, /* CANFD7_TX */
+ { RCAR_GP_PIN(2, 17), 4, 3 }, /* CANFD4_RX */
+ { RCAR_GP_PIN(2, 16), 0, 3 }, /* CANFD4_TX */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL3", 0xE6058880) {
+ { RCAR_GP_PIN(3, 7), 28, 3 }, /* MMC_D4 */
+ { RCAR_GP_PIN(3, 6), 24, 3 }, /* MMC_D5 */
+ { RCAR_GP_PIN(3, 5), 20, 3 }, /* MMC_SD_D3 */
+ { RCAR_GP_PIN(3, 4), 16, 3 }, /* MMC_DS */
+ { RCAR_GP_PIN(3, 3), 12, 3 }, /* MMC_SD_CLK */
+ { RCAR_GP_PIN(3, 2), 8, 3 }, /* MMC_SD_D2 */
+ { RCAR_GP_PIN(3, 1), 4, 3 }, /* MMC_SD_D0 */
+ { RCAR_GP_PIN(3, 0), 0, 3 }, /* MMC_SD_D1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL3", 0xE6058884) {
+ { RCAR_GP_PIN(3, 15), 28, 2 }, /* QSPI0_SSL */
+ { RCAR_GP_PIN(3, 14), 24, 2 }, /* IPC_CLKOUT */
+ { RCAR_GP_PIN(3, 13), 20, 2 }, /* IPC_CLKIN */
+ { RCAR_GP_PIN(3, 12), 16, 3 }, /* SD_WP */
+ { RCAR_GP_PIN(3, 11), 12, 3 }, /* SD_CD */
+ { RCAR_GP_PIN(3, 10), 8, 3 }, /* MMC_SD_CMD */
+ { RCAR_GP_PIN(3, 9), 4, 3 }, /* MMC_D6*/
+ { RCAR_GP_PIN(3, 8), 0, 3 }, /* MMC_D7 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL3", 0xE6058888) {
+ { RCAR_GP_PIN(3, 23), 28, 2 }, /* QSPI1_MISO_IO1 */
+ { RCAR_GP_PIN(3, 22), 24, 2 }, /* QSPI1_SPCLK */
+ { RCAR_GP_PIN(3, 21), 20, 2 }, /* QSPI1_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 20), 16, 2 }, /* QSPI0_SPCLK */
+ { RCAR_GP_PIN(3, 19), 12, 2 }, /* QSPI0_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 18), 8, 2 }, /* QSPI0_MISO_IO1 */
+ { RCAR_GP_PIN(3, 17), 4, 2 }, /* QSPI0_IO2 */
+ { RCAR_GP_PIN(3, 16), 0, 2 }, /* QSPI0_IO3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL3", 0xE605888C) {
+ { RCAR_GP_PIN(3, 29), 20, 2 }, /* RPC_INT_N */
+ { RCAR_GP_PIN(3, 28), 16, 2 }, /* RPC_WP_N */
+ { RCAR_GP_PIN(3, 27), 12, 2 }, /* RPC_RESET_N */
+ { RCAR_GP_PIN(3, 26), 8, 2 }, /* QSPI1_IO3 */
+ { RCAR_GP_PIN(3, 25), 4, 2 }, /* QSPI1_SSL */
+ { RCAR_GP_PIN(3, 24), 0, 2 }, /* QSPI1_IO2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL4", 0xE6060080) {
+ { RCAR_GP_PIN(4, 7), 28, 3 }, /* TSN0_RX_CTL */
+ { RCAR_GP_PIN(4, 6), 24, 3 }, /* TSN0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(4, 5), 20, 3 }, /* TSN0_AVTP_MATCH */
+ { RCAR_GP_PIN(4, 4), 16, 3 }, /* TSN0_LINK */
+ { RCAR_GP_PIN(4, 3), 12, 3 }, /* TSN0_PHY_INT */
+ { RCAR_GP_PIN(4, 2), 8, 3 }, /* TSN0_AVTP_PPS1 */
+ { RCAR_GP_PIN(4, 1), 4, 3 }, /* TSN0_MDC */
+ { RCAR_GP_PIN(4, 0), 0, 3 }, /* TSN0_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL4", 0xE6060084) {
+ { RCAR_GP_PIN(4, 15), 28, 3 }, /* TSN0_TD0 */
+ { RCAR_GP_PIN(4, 14), 24, 3 }, /* TSN0_TD1 */
+ { RCAR_GP_PIN(4, 13), 20, 3 }, /* TSN0_RD1 */
+ { RCAR_GP_PIN(4, 12), 16, 3 }, /* TSN0_TXC */
+ { RCAR_GP_PIN(4, 11), 12, 3 }, /* TSN0_RXC */
+ { RCAR_GP_PIN(4, 10), 8, 3 }, /* TSN0_RD0 */
+ { RCAR_GP_PIN(4, 9), 4, 3 }, /* TSN0_TX_CTL */
+ { RCAR_GP_PIN(4, 8), 0, 3 }, /* TSN0_AVTP_PPS0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL4", 0xE6060088) {
+ { RCAR_GP_PIN(4, 23), 28, 3 }, /* AVS0 */
+ { RCAR_GP_PIN(4, 22), 24, 3 }, /* PCIE1_CLKREQ_N */
+ { RCAR_GP_PIN(4, 21), 20, 3 }, /* PCIE0_CLKREQ_N */
+ { RCAR_GP_PIN(4, 20), 16, 3 }, /* TSN0_TXCREFCLK */
+ { RCAR_GP_PIN(4, 19), 12, 3 }, /* TSN0_TD2 */
+ { RCAR_GP_PIN(4, 18), 8, 3 }, /* TSN0_TD3 */
+ { RCAR_GP_PIN(4, 17), 4, 3 }, /* TSN0_RD2 */
+ { RCAR_GP_PIN(4, 16), 0, 3 }, /* TSN0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL4", 0xE606008C) {
+ { RCAR_GP_PIN(4, 24), 0, 3 }, /* AVS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL5", 0xE6060880) {
+ { RCAR_GP_PIN(5, 7), 28, 3 }, /* AVB2_TXCREFCLK */
+ { RCAR_GP_PIN(5, 6), 24, 3 }, /* AVB2_MDC */
+ { RCAR_GP_PIN(5, 5), 20, 3 }, /* AVB2_MAGIC */
+ { RCAR_GP_PIN(5, 4), 16, 3 }, /* AVB2_PHY_INT */
+ { RCAR_GP_PIN(5, 3), 12, 3 }, /* AVB2_LINK */
+ { RCAR_GP_PIN(5, 2), 8, 3 }, /* AVB2_AVTP_MATCH */
+ { RCAR_GP_PIN(5, 1), 4, 3 }, /* AVB2_AVTP_CAPTURE */
+ { RCAR_GP_PIN(5, 0), 0, 3 }, /* AVB2_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL5", 0xE6060884) {
+ { RCAR_GP_PIN(5, 15), 28, 3 }, /* AVB2_TD0 */
+ { RCAR_GP_PIN(5, 14), 24, 3 }, /* AVB2_RD1 */
+ { RCAR_GP_PIN(5, 13), 20, 3 }, /* AVB2_RD2 */
+ { RCAR_GP_PIN(5, 12), 16, 3 }, /* AVB2_TD1 */
+ { RCAR_GP_PIN(5, 11), 12, 3 }, /* AVB2_TD2 */
+ { RCAR_GP_PIN(5, 10), 8, 3 }, /* AVB2_MDIO */
+ { RCAR_GP_PIN(5, 9), 4, 3 }, /* AVB2_RD3 */
+ { RCAR_GP_PIN(5, 8), 0, 3 }, /* AVB2_TD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL5", 0xE6060888) {
+ { RCAR_GP_PIN(5, 20), 16, 3 }, /* AVB2_RX_CTL */
+ { RCAR_GP_PIN(5, 19), 12, 3 }, /* AVB2_TX_CTL */
+ { RCAR_GP_PIN(5, 18), 8, 3 }, /* AVB2_RXC */
+ { RCAR_GP_PIN(5, 17), 4, 3 }, /* AVB2_RD0 */
+ { RCAR_GP_PIN(5, 16), 0, 3 }, /* AVB2_TXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL6", 0xE6061080) {
+ { RCAR_GP_PIN(6, 7), 28, 3 }, /* AVB1_TX_CTL */
+ { RCAR_GP_PIN(6, 6), 24, 3 }, /* AVB1_TXC */
+ { RCAR_GP_PIN(6, 5), 20, 3 }, /* AVB1_AVTP_MATCH */
+ { RCAR_GP_PIN(6, 4), 16, 3 }, /* AVB1_LINK */
+ { RCAR_GP_PIN(6, 3), 12, 3 }, /* AVB1_PHY_INT */
+ { RCAR_GP_PIN(6, 2), 8, 3 }, /* AVB1_MDC */
+ { RCAR_GP_PIN(6, 1), 4, 3 }, /* AVB1_MAGIC */
+ { RCAR_GP_PIN(6, 0), 0, 3 }, /* AVB1_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL6", 0xE6061084) {
+ { RCAR_GP_PIN(6, 15), 28, 3 }, /* AVB1_RD0 */
+ { RCAR_GP_PIN(6, 14), 24, 3 }, /* AVB1_RD1 */
+ { RCAR_GP_PIN(6, 13), 20, 3 }, /* AVB1_TD0 */
+ { RCAR_GP_PIN(6, 12), 16, 3 }, /* AVB1_TD1 */
+ { RCAR_GP_PIN(6, 11), 12, 3 }, /* AVB1_AVTP_CAPTURE */
+ { RCAR_GP_PIN(6, 10), 8, 3 }, /* AVB1_AVTP_PPS */
+ { RCAR_GP_PIN(6, 9), 4, 3 }, /* AVB1_RX_CTL */
+ { RCAR_GP_PIN(6, 8), 0, 3 }, /* AVB1_RXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL6", 0xE6061088) {
+ { RCAR_GP_PIN(6, 20), 16, 3 }, /* AVB1_TXCREFCLK */
+ { RCAR_GP_PIN(6, 19), 12, 3 }, /* AVB1_RD3 */
+ { RCAR_GP_PIN(6, 18), 8, 3 }, /* AVB1_TD3 */
+ { RCAR_GP_PIN(6, 17), 4, 3 }, /* AVB1_RD2 */
+ { RCAR_GP_PIN(6, 16), 0, 3 }, /* AVB1_TD2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL7", 0xE6061880) {
+ { RCAR_GP_PIN(7, 7), 28, 3 }, /* AVB0_TD1 */
+ { RCAR_GP_PIN(7, 6), 24, 3 }, /* AVB0_TD2 */
+ { RCAR_GP_PIN(7, 5), 20, 3 }, /* AVB0_PHY_INT */
+ { RCAR_GP_PIN(7, 4), 16, 3 }, /* AVB0_LINK */
+ { RCAR_GP_PIN(7, 3), 12, 3 }, /* AVB0_TD3 */
+ { RCAR_GP_PIN(7, 2), 8, 3 }, /* AVB0_AVTP_MATCH */
+ { RCAR_GP_PIN(7, 1), 4, 3 }, /* AVB0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(7, 0), 0, 3 }, /* AVB0_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL7", 0xE6061884) {
+ { RCAR_GP_PIN(7, 15), 28, 3 }, /* AVB0_TXC */
+ { RCAR_GP_PIN(7, 14), 24, 3 }, /* AVB0_MDIO */
+ { RCAR_GP_PIN(7, 13), 20, 3 }, /* AVB0_MDC */
+ { RCAR_GP_PIN(7, 12), 16, 3 }, /* AVB0_RD2 */
+ { RCAR_GP_PIN(7, 11), 12, 3 }, /* AVB0_TD0 */
+ { RCAR_GP_PIN(7, 10), 8, 3 }, /* AVB0_MAGIC */
+ { RCAR_GP_PIN(7, 9), 4, 3 }, /* AVB0_TXCREFCLK */
+ { RCAR_GP_PIN(7, 8), 0, 3 }, /* AVB0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL7", 0xE6061888) {
+ { RCAR_GP_PIN(7, 20), 16, 3 }, /* AVB0_RX_CTL */
+ { RCAR_GP_PIN(7, 19), 12, 3 }, /* AVB0_RXC */
+ { RCAR_GP_PIN(7, 18), 8, 3 }, /* AVB0_RD0 */
+ { RCAR_GP_PIN(7, 17), 4, 3 }, /* AVB0_RD1 */
+ { RCAR_GP_PIN(7, 16), 0, 3 }, /* AVB0_TX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL8", 0xE6068080) {
+ { RCAR_GP_PIN(8, 7), 28, 3 }, /* SDA3 */
+ { RCAR_GP_PIN(8, 6), 24, 3 }, /* SCL3 */
+ { RCAR_GP_PIN(8, 5), 20, 3 }, /* SDA2 */
+ { RCAR_GP_PIN(8, 4), 16, 3 }, /* SCL2 */
+ { RCAR_GP_PIN(8, 3), 12, 3 }, /* SDA1 */
+ { RCAR_GP_PIN(8, 2), 8, 3 }, /* SCL1 */
+ { RCAR_GP_PIN(8, 1), 4, 3 }, /* SDA0 */
+ { RCAR_GP_PIN(8, 0), 0, 3 }, /* SCL0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL8", 0xE6068084) {
+ { RCAR_GP_PIN(8, 13), 20, 3 }, /* GP8_13 */
+ { RCAR_GP_PIN(8, 12), 16, 3 }, /* GP8_12 */
+ { RCAR_GP_PIN(8, 11), 12, 3 }, /* SDA5 */
+ { RCAR_GP_PIN(8, 10), 8, 3 }, /* SCL5 */
+ { RCAR_GP_PIN(8, 9), 4, 3 }, /* SDA4 */
+ { RCAR_GP_PIN(8, 8), 0, 3 }, /* SCL4 */
+ } },
+ { },
+};
+
+enum ioctrl_regs {
+ POC0,
+ POC1,
+ POC3,
+ POC4,
+ POC5,
+ POC6,
+ POC7,
+ POC8,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POC0] = { 0xE60500A0, },
+ [POC1] = { 0xE60508A0, },
+ [POC3] = { 0xE60588A0, },
+ [POC4] = { 0xE60600A0, },
+ [POC5] = { 0xE60608A0, },
+ [POC6] = { 0xE60610A0, },
+ [POC7] = { 0xE60618A0, },
+ [POC8] = { 0xE60680A0, },
+ { /* sentinel */ },
+};
+
+static int r8a779g0_pin_to_pocctrl(unsigned int pin, u32 *pocctrl)
+{
+ int bit = pin & 0x1f;
+
+ *pocctrl = pinmux_ioctrl_regs[POC0].reg;
+ if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 18))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC1].reg;
+ if (pin >= RCAR_GP_PIN(1, 0) && pin <= RCAR_GP_PIN(1, 22))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC3].reg;
+ if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 12))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC8].reg;
+ if (pin >= RCAR_GP_PIN(8, 0) && pin <= RCAR_GP_PIN(8, 13))
+ return bit;
+
+ return -EINVAL;
+}
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xE60500C0, "PUD0", 0xE60500E0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* GP0_00 */
+ [ 1] = RCAR_GP_PIN(0, 1), /* GP0_01 */
+ [ 2] = RCAR_GP_PIN(0, 2), /* GP0_02 */
+ [ 3] = RCAR_GP_PIN(0, 3), /* IRQ3 */
+ [ 4] = RCAR_GP_PIN(0, 4), /* IRQ2 */
+ [ 5] = RCAR_GP_PIN(0, 5), /* IRQ1 */
+ [ 6] = RCAR_GP_PIN(0, 6), /* IRQ0 */
+ [ 7] = RCAR_GP_PIN(0, 7), /* MSIOF5_SS2 */
+ [ 8] = RCAR_GP_PIN(0, 8), /* MSIOF5_SS1 */
+ [ 9] = RCAR_GP_PIN(0, 9), /* MSIOF5_SYNC */
+ [10] = RCAR_GP_PIN(0, 10), /* MSIOF5_TXD */
+ [11] = RCAR_GP_PIN(0, 11), /* MSIOF5_SCK */
+ [12] = RCAR_GP_PIN(0, 12), /* MSIOF5_RXD */
+ [13] = RCAR_GP_PIN(0, 13), /* MSIOF2_SS2 */
+ [14] = RCAR_GP_PIN(0, 14), /* MSIOF2_SS1 */
+ [15] = RCAR_GP_PIN(0, 15), /* MSIOF2_SYNC */
+ [16] = RCAR_GP_PIN(0, 16), /* MSIOF2_TXD */
+ [17] = RCAR_GP_PIN(0, 17), /* MSIOF2_SCK */
+ [18] = RCAR_GP_PIN(0, 18), /* MSIOF2_RXD */
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xE60508C0, "PUD1", 0xE60508E0) {
+ [ 0] = RCAR_GP_PIN(1, 0), /* MSIOF1_SS2 */
+ [ 1] = RCAR_GP_PIN(1, 1), /* MSIOF1_SS1 */
+ [ 2] = RCAR_GP_PIN(1, 2), /* MSIOF1_SYNC */
+ [ 3] = RCAR_GP_PIN(1, 3), /* MSIOF1_SCK */
+ [ 4] = RCAR_GP_PIN(1, 4), /* MSIOF1_TXD */
+ [ 5] = RCAR_GP_PIN(1, 5), /* MSIOF1_RXD */
+ [ 6] = RCAR_GP_PIN(1, 6), /* MSIOF0_SS2 */
+ [ 7] = RCAR_GP_PIN(1, 7), /* MSIOF0_SS1 */
+ [ 8] = RCAR_GP_PIN(1, 8), /* MSIOF0_SYNC */
+ [ 9] = RCAR_GP_PIN(1, 9), /* MSIOF0_TXD */
+ [10] = RCAR_GP_PIN(1, 10), /* MSIOF0_SCK */
+ [11] = RCAR_GP_PIN(1, 11), /* MSIOF0_RXD */
+ [12] = RCAR_GP_PIN(1, 12), /* HTX0 */
+ [13] = RCAR_GP_PIN(1, 13), /* HCTS0_N */
+ [14] = RCAR_GP_PIN(1, 14), /* HRTS0_N */
+ [15] = RCAR_GP_PIN(1, 15), /* HSCK0 */
+ [16] = RCAR_GP_PIN(1, 16), /* HRX0 */
+ [17] = RCAR_GP_PIN(1, 17), /* SCIF_CLK */
+ [18] = RCAR_GP_PIN(1, 18), /* SSI_SCK */
+ [19] = RCAR_GP_PIN(1, 19), /* SSI_WS */
+ [20] = RCAR_GP_PIN(1, 20), /* SSI_SD */
+ [21] = RCAR_GP_PIN(1, 21), /* AUDIO_CLKOUT */
+ [22] = RCAR_GP_PIN(1, 22), /* AUDIO_CLKIN */
+ [23] = RCAR_GP_PIN(1, 23), /* GP1_23 */
+ [24] = RCAR_GP_PIN(1, 24), /* HRX3 */
+ [25] = RCAR_GP_PIN(1, 25), /* HSCK3 */
+ [26] = RCAR_GP_PIN(1, 26), /* HRTS3_N */
+ [27] = RCAR_GP_PIN(1, 27), /* HCTS3_N */
+ [28] = RCAR_GP_PIN(1, 28), /* HTX3 */
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xE60580C0, "PUD2", 0xE60580E0) {
+ [ 0] = RCAR_GP_PIN(2, 0), /* FXR_TXDA */
+ [ 1] = RCAR_GP_PIN(2, 1), /* FXR_TXENA_N */
+ [ 2] = RCAR_GP_PIN(2, 2), /* RXDA_EXTFXR */
+ [ 3] = RCAR_GP_PIN(2, 3), /* CLK_EXTFXR */
+ [ 4] = RCAR_GP_PIN(2, 4), /* RXDB_EXTFXR */
+ [ 5] = RCAR_GP_PIN(2, 5), /* FXR_TXENB_N */
+ [ 6] = RCAR_GP_PIN(2, 6), /* FXR_TXDB */
+ [ 7] = RCAR_GP_PIN(2, 7), /* TPU0TO1 */
+ [ 8] = RCAR_GP_PIN(2, 8), /* TPU0TO0 */
+ [ 9] = RCAR_GP_PIN(2, 9), /* CAN_CLK */
+ [10] = RCAR_GP_PIN(2, 10), /* CANFD0_TX */
+ [11] = RCAR_GP_PIN(2, 11), /* CANFD0_RX */
+ [12] = RCAR_GP_PIN(2, 12), /* CANFD2_TX */
+ [13] = RCAR_GP_PIN(2, 13), /* CANFD2_RX */
+ [14] = RCAR_GP_PIN(2, 14), /* CANFD3_TX */
+ [15] = RCAR_GP_PIN(2, 15), /* CANFD3_RX */
+ [16] = RCAR_GP_PIN(2, 16), /* CANFD4_TX */
+ [17] = RCAR_GP_PIN(2, 17), /* CANFD4_RX */
+ [18] = RCAR_GP_PIN(2, 18), /* CANFD7_TX */
+ [19] = RCAR_GP_PIN(2, 19), /* CANFD7_RX */
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xE60588C0, "PUD3", 0xE60588E0) {
+ [ 0] = RCAR_GP_PIN(3, 0), /* MMC_SD_D1 */
+ [ 1] = RCAR_GP_PIN(3, 1), /* MMC_SD_D0 */
+ [ 2] = RCAR_GP_PIN(3, 2), /* MMC_SD_D2 */
+ [ 3] = RCAR_GP_PIN(3, 3), /* MMC_SD_CLK */
+ [ 4] = RCAR_GP_PIN(3, 4), /* MMC_DS */
+ [ 5] = RCAR_GP_PIN(3, 5), /* MMC_SD_D3 */
+ [ 6] = RCAR_GP_PIN(3, 6), /* MMC_D5 */
+ [ 7] = RCAR_GP_PIN(3, 7), /* MMC_D4 */
+ [ 8] = RCAR_GP_PIN(3, 8), /* MMC_D7 */
+ [ 9] = RCAR_GP_PIN(3, 9), /* MMC_D6 */
+ [10] = RCAR_GP_PIN(3, 10), /* MMC_SD_CMD */
+ [11] = RCAR_GP_PIN(3, 11), /* SD_CD */
+ [12] = RCAR_GP_PIN(3, 12), /* SD_WP */
+ [13] = RCAR_GP_PIN(3, 13), /* IPC_CLKIN */
+ [14] = RCAR_GP_PIN(3, 14), /* IPC_CLKOUT */
+ [15] = RCAR_GP_PIN(3, 15), /* QSPI0_SSL */
+ [16] = RCAR_GP_PIN(3, 16), /* QSPI0_IO3 */
+ [17] = RCAR_GP_PIN(3, 17), /* QSPI0_IO2 */
+ [18] = RCAR_GP_PIN(3, 18), /* QSPI0_MISO_IO1 */
+ [19] = RCAR_GP_PIN(3, 19), /* QSPI0_MOSI_IO0 */
+ [20] = RCAR_GP_PIN(3, 20), /* QSPI0_SPCLK */
+ [21] = RCAR_GP_PIN(3, 21), /* QSPI1_MOSI_IO0 */
+ [22] = RCAR_GP_PIN(3, 22), /* QSPI1_SPCLK */
+ [23] = RCAR_GP_PIN(3, 23), /* QSPI1_MISO_IO1 */
+ [24] = RCAR_GP_PIN(3, 24), /* QSPI1_IO2 */
+ [25] = RCAR_GP_PIN(3, 25), /* QSPI1_SSL */
+ [26] = RCAR_GP_PIN(3, 26), /* QSPI1_IO3 */
+ [27] = RCAR_GP_PIN(3, 27), /* RPC_RESET_N */
+ [28] = RCAR_GP_PIN(3, 28), /* RPC_WP_N */
+ [29] = RCAR_GP_PIN(3, 29), /* RPC_INT_N */
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xE60600C0, "PUD4", 0xE60600E0) {
+ [ 0] = RCAR_GP_PIN(4, 0), /* TSN0_MDIO */
+ [ 1] = RCAR_GP_PIN(4, 1), /* TSN0_MDC */
+ [ 2] = RCAR_GP_PIN(4, 2), /* TSN0_AVTP_PPS1 */
+ [ 3] = RCAR_GP_PIN(4, 3), /* TSN0_PHY_INT */
+ [ 4] = RCAR_GP_PIN(4, 4), /* TSN0_LINK */
+ [ 5] = RCAR_GP_PIN(4, 5), /* TSN0_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(4, 6), /* TSN0_AVTP_CAPTURE */
+ [ 7] = RCAR_GP_PIN(4, 7), /* TSN0_RX_CTL */
+ [ 8] = RCAR_GP_PIN(4, 8), /* TSN0_AVTP_PPS0 */
+ [ 9] = RCAR_GP_PIN(4, 9), /* TSN0_TX_CTL */
+ [10] = RCAR_GP_PIN(4, 10), /* TSN0_RD0 */
+ [11] = RCAR_GP_PIN(4, 11), /* TSN0_RXC */
+ [12] = RCAR_GP_PIN(4, 12), /* TSN0_TXC */
+ [13] = RCAR_GP_PIN(4, 13), /* TSN0_RD1 */
+ [14] = RCAR_GP_PIN(4, 14), /* TSN0_TD1 */
+ [15] = RCAR_GP_PIN(4, 15), /* TSN0_TD0 */
+ [16] = RCAR_GP_PIN(4, 16), /* TSN0_RD3 */
+ [17] = RCAR_GP_PIN(4, 17), /* TSN0_RD2 */
+ [18] = RCAR_GP_PIN(4, 18), /* TSN0_TD3 */
+ [19] = RCAR_GP_PIN(4, 19), /* TSN0_TD2 */
+ [20] = RCAR_GP_PIN(4, 20), /* TSN0_TXCREFCLK */
+ [21] = RCAR_GP_PIN(4, 21), /* PCIE0_CLKREQ_N */
+ [22] = RCAR_GP_PIN(4, 22), /* PCIE1_CLKREQ_N */
+ [23] = RCAR_GP_PIN(4, 23), /* AVS0 */
+ [24] = RCAR_GP_PIN(4, 24), /* AVS1 */
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xE60608C0, "PUD5", 0xE60608E0) {
+ [ 0] = RCAR_GP_PIN(5, 0), /* AVB2_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(5, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(5, 2), /* AVB2_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(5, 3), /* AVB2_LINK */
+ [ 4] = RCAR_GP_PIN(5, 4), /* AVB2_PHY_INT */
+ [ 5] = RCAR_GP_PIN(5, 5), /* AVB2_MAGIC */
+ [ 6] = RCAR_GP_PIN(5, 6), /* AVB2_MDC */
+ [ 7] = RCAR_GP_PIN(5, 7), /* AVB2_TXCREFCLK */
+ [ 8] = RCAR_GP_PIN(5, 8), /* AVB2_TD3 */
+ [ 9] = RCAR_GP_PIN(5, 9), /* AVB2_RD3 */
+ [10] = RCAR_GP_PIN(5, 10), /* AVB2_MDIO */
+ [11] = RCAR_GP_PIN(5, 11), /* AVB2_TD2 */
+ [12] = RCAR_GP_PIN(5, 12), /* AVB2_TD1 */
+ [13] = RCAR_GP_PIN(5, 13), /* AVB2_RD2 */
+ [14] = RCAR_GP_PIN(5, 14), /* AVB2_RD1 */
+ [15] = RCAR_GP_PIN(5, 15), /* AVB2_TD0 */
+ [16] = RCAR_GP_PIN(5, 16), /* AVB2_TXC */
+ [17] = RCAR_GP_PIN(5, 17), /* AVB2_RD0 */
+ [18] = RCAR_GP_PIN(5, 18), /* AVB2_RXC */
+ [19] = RCAR_GP_PIN(5, 19), /* AVB2_TX_CTL */
+ [20] = RCAR_GP_PIN(5, 20), /* AVB2_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xE60610C0, "PUD6", 0xE60610E0) {
+ [ 0] = RCAR_GP_PIN(6, 0), /* AVB1_MDIO */
+ [ 1] = RCAR_GP_PIN(6, 1), /* AVB1_MAGIC */
+ [ 2] = RCAR_GP_PIN(6, 2), /* AVB1_MDC */
+ [ 3] = RCAR_GP_PIN(6, 3), /* AVB1_PHY_INT */
+ [ 4] = RCAR_GP_PIN(6, 4), /* AVB1_LINK */
+ [ 5] = RCAR_GP_PIN(6, 5), /* AVB1_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(6, 6), /* AVB1_TXC */
+ [ 7] = RCAR_GP_PIN(6, 7), /* AVB1_TX_CTL */
+ [ 8] = RCAR_GP_PIN(6, 8), /* AVB1_RXC */
+ [ 9] = RCAR_GP_PIN(6, 9), /* AVB1_RX_CTL */
+ [10] = RCAR_GP_PIN(6, 10), /* AVB1_AVTP_PPS */
+ [11] = RCAR_GP_PIN(6, 11), /* AVB1_AVTP_CAPTURE */
+ [12] = RCAR_GP_PIN(6, 12), /* AVB1_TD1 */
+ [13] = RCAR_GP_PIN(6, 13), /* AVB1_TD0 */
+ [14] = RCAR_GP_PIN(6, 14), /* AVB1_RD1*/
+ [15] = RCAR_GP_PIN(6, 15), /* AVB1_RD0 */
+ [16] = RCAR_GP_PIN(6, 16), /* AVB1_TD2 */
+ [17] = RCAR_GP_PIN(6, 17), /* AVB1_RD2 */
+ [18] = RCAR_GP_PIN(6, 18), /* AVB1_TD3 */
+ [19] = RCAR_GP_PIN(6, 19), /* AVB1_RD3 */
+ [20] = RCAR_GP_PIN(6, 20), /* AVB1_TXCREFCLK */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN7", 0xE60618C0, "PUD7", 0xE60618E0) {
+ [ 0] = RCAR_GP_PIN(7, 0), /* AVB0_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(7, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(7, 2), /* AVB0_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(7, 3), /* AVB0_TD3 */
+ [ 4] = RCAR_GP_PIN(7, 4), /* AVB0_LINK */
+ [ 5] = RCAR_GP_PIN(7, 5), /* AVB0_PHY_INT */
+ [ 6] = RCAR_GP_PIN(7, 6), /* AVB0_TD2 */
+ [ 7] = RCAR_GP_PIN(7, 7), /* AVB0_TD1 */
+ [ 8] = RCAR_GP_PIN(7, 8), /* AVB0_RD3 */
+ [ 9] = RCAR_GP_PIN(7, 9), /* AVB0_TXCREFCLK */
+ [10] = RCAR_GP_PIN(7, 10), /* AVB0_MAGIC */
+ [11] = RCAR_GP_PIN(7, 11), /* AVB0_TD0 */
+ [12] = RCAR_GP_PIN(7, 12), /* AVB0_RD2 */
+ [13] = RCAR_GP_PIN(7, 13), /* AVB0_MDC */
+ [14] = RCAR_GP_PIN(7, 14), /* AVB0_MDIO */
+ [15] = RCAR_GP_PIN(7, 15), /* AVB0_TXC */
+ [16] = RCAR_GP_PIN(7, 16), /* AVB0_TX_CTL */
+ [17] = RCAR_GP_PIN(7, 17), /* AVB0_RD1 */
+ [18] = RCAR_GP_PIN(7, 18), /* AVB0_RD0 */
+ [19] = RCAR_GP_PIN(7, 19), /* AVB0_RXC */
+ [20] = RCAR_GP_PIN(7, 20), /* AVB0_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN8", 0xE60680C0, "PUD8", 0xE60680E0) {
+ [ 0] = RCAR_GP_PIN(8, 0), /* SCL0 */
+ [ 1] = RCAR_GP_PIN(8, 1), /* SDA0 */
+ [ 2] = RCAR_GP_PIN(8, 2), /* SCL1 */
+ [ 3] = RCAR_GP_PIN(8, 3), /* SDA1 */
+ [ 4] = RCAR_GP_PIN(8, 4), /* SCL2 */
+ [ 5] = RCAR_GP_PIN(8, 5), /* SDA2 */
+ [ 6] = RCAR_GP_PIN(8, 6), /* SCL3 */
+ [ 7] = RCAR_GP_PIN(8, 7), /* SDA3 */
+ [ 8] = RCAR_GP_PIN(8, 8), /* SCL4 */
+ [ 9] = RCAR_GP_PIN(8, 9), /* SDA4 */
+ [10] = RCAR_GP_PIN(8, 10), /* SCL5 */
+ [11] = RCAR_GP_PIN(8, 11), /* SDA5 */
+ [12] = RCAR_GP_PIN(8, 12), /* GP8_12 */
+ [13] = RCAR_GP_PIN(8, 13), /* GP8_13 */
+ [14] = SH_PFC_PIN_NONE,
+ [15] = SH_PFC_PIN_NONE,
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { /* sentinel */ },
+};
+
+static const struct sh_pfc_soc_operations r8a779g0_pin_ops = {
+ .pin_to_pocctrl = r8a779g0_pin_to_pocctrl,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a779g0_pinmux_info = {
+ .name = "r8a779g0_pfc",
+ .ops = &r8a779g0_pin_ops,
+ .unlock_reg = 0x1ff, /* PMMRn mask */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index a48cac55152c..a43824fd9505 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -9,8 +9,10 @@
#include <linux/clk.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -89,6 +91,7 @@
#define PIN(n) (0x0800 + 0x10 + (n))
#define IOLH(n) (0x1000 + (n) * 8)
#define IEN(n) (0x1800 + (n) * 8)
+#define ISEL(n) (0x2c80 + (n) * 8)
#define PWPR (0x3014)
#define SD_CH(n) (0x3000 + (n) * 4)
#define QSPI (0x3008)
@@ -112,6 +115,10 @@
#define RZG2L_PIN_ID_TO_PORT_OFFSET(id) (RZG2L_PIN_ID_TO_PORT(id) + 0x10)
#define RZG2L_PIN_ID_TO_PIN(id) ((id) % RZG2L_PINS_PER_PORT)
+#define RZG2L_TINT_MAX_INTERRUPT 32
+#define RZG2L_TINT_IRQ_START_INDEX 9
+#define RZG2L_PACK_HWIRQ(t, i) (((t) << 16) | (i))
+
struct rzg2l_dedicated_configs {
const char *name;
u32 config;
@@ -137,6 +144,9 @@ struct rzg2l_pinctrl {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range gpio_range;
+ DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT);
+ spinlock_t bitmap_lock;
+ unsigned int hwirq[RZG2L_TINT_MAX_INTERRUPT];
spinlock_t lock;
};
@@ -517,6 +527,8 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
arg = rzg2l_read_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK);
+ if (!arg)
+ return -EINVAL;
break;
case PIN_CONFIG_POWER_SOURCE: {
@@ -883,8 +895,14 @@ static int rzg2l_gpio_get(struct gpio_chip *chip, unsigned int offset)
static void rzg2l_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
+ unsigned int virq;
+
pinctrl_gpio_free(chip->base + offset);
+ virq = irq_find_mapping(chip->irq.domain, offset);
+ if (virq)
+ irq_dispose_mapping(virq);
+
/*
* Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
@@ -1104,14 +1122,221 @@ static struct {
}
};
+static int rzg2l_gpio_get_gpioint(unsigned int virq)
+{
+ unsigned int gpioint;
+ unsigned int i;
+ u32 port, bit;
+
+ port = virq / 8;
+ bit = virq % 8;
+
+ if (port >= ARRAY_SIZE(rzg2l_gpio_configs) ||
+ bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port]))
+ return -EINVAL;
+
+ gpioint = bit;
+ for (i = 0; i < port; i++)
+ gpioint += RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[i]);
+
+ return gpioint;
+}
+
+static void rzg2l_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ void __iomem *addr;
+ u32 port;
+ u8 bit;
+
+ port = RZG2L_PIN_ID_TO_PORT(hwirq);
+ bit = RZG2L_PIN_ID_TO_PIN(hwirq);
+
+ addr = pctrl->base + ISEL(port);
+ if (bit >= 4) {
+ bit -= 4;
+ addr += 4;
+ }
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ writel(readl(addr) & ~BIT(bit * 8), addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ gpiochip_disable_irq(gc, hwirq);
+ irq_chip_disable_parent(d);
+}
+
+static void rzg2l_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ void __iomem *addr;
+ u32 port;
+ u8 bit;
+
+ gpiochip_enable_irq(gc, hwirq);
+
+ port = RZG2L_PIN_ID_TO_PORT(hwirq);
+ bit = RZG2L_PIN_ID_TO_PIN(hwirq);
+
+ addr = pctrl->base + ISEL(port);
+ if (bit >= 4) {
+ bit -= 4;
+ addr += 4;
+ }
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ writel(readl(addr) | BIT(bit * 8), addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ irq_chip_enable_parent(d);
+}
+
+static int rzg2l_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ return irq_chip_set_type_parent(d, type);
+}
+
+static void rzg2l_gpio_irqc_eoi(struct irq_data *d)
+{
+ irq_chip_eoi_parent(d);
+}
+
+static void rzg2l_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ seq_printf(p, dev_name(gc->parent));
+}
+
+static const struct irq_chip rzg2l_gpio_irqchip = {
+ .name = "rzg2l-gpio",
+ .irq_disable = rzg2l_gpio_irq_disable,
+ .irq_enable = rzg2l_gpio_irq_enable,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = rzg2l_gpio_irq_set_type,
+ .irq_eoi = rzg2l_gpio_irqc_eoi,
+ .irq_print_chip = rzg2l_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(gc);
+ unsigned long flags;
+ int gpioint, irq;
+
+ gpioint = rzg2l_gpio_get_gpioint(child);
+ if (gpioint < 0)
+ return gpioint;
+
+ spin_lock_irqsave(&pctrl->bitmap_lock, flags);
+ irq = bitmap_find_free_region(pctrl->tint_slot, RZG2L_TINT_MAX_INTERRUPT, get_order(1));
+ spin_unlock_irqrestore(&pctrl->bitmap_lock, flags);
+ if (irq < 0)
+ return -ENOSPC;
+ pctrl->hwirq[irq] = child;
+ irq += RZG2L_TINT_IRQ_START_INDEX;
+
+ /* All these interrupts are level high in the CPU */
+ *parent_type = IRQ_TYPE_LEVEL_HIGH;
+ *parent = RZG2L_PACK_HWIRQ(gpioint, irq);
+ return 0;
+}
+
+static int rzg2l_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct irq_fwspec *fwspec = &gfwspec->fwspec;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
+ fwspec->param_count = 2;
+ fwspec->param[0] = parent_hwirq;
+ fwspec->param[1] = parent_type;
+
+ return 0;
+}
+
+static void rzg2l_gpio_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d;
+
+ d = irq_domain_get_irq_data(domain, virq);
+ if (d) {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ unsigned int i;
+
+ for (i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
+ if (pctrl->hwirq[i] == hwirq) {
+ spin_lock_irqsave(&pctrl->bitmap_lock, flags);
+ bitmap_release_region(pctrl->tint_slot, i, get_order(1));
+ spin_unlock_irqrestore(&pctrl->bitmap_lock, flags);
+ pctrl->hwirq[i] = 0;
+ break;
+ }
+ }
+ }
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static void rzg2l_init_irq_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct gpio_chip *chip = &pctrl->gpio_chip;
+ unsigned int offset;
+
+ /* Forbid unused lines to be mapped as IRQs */
+ for (offset = 0; offset < chip->ngpio; offset++) {
+ u32 port, bit;
+
+ port = offset / 8;
+ bit = offset % 8;
+
+ if (port >= ARRAY_SIZE(rzg2l_gpio_configs) ||
+ bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port]))
+ clear_bit(offset, valid_mask);
+ }
+}
+
static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
{
struct device_node *np = pctrl->dev->of_node;
struct gpio_chip *chip = &pctrl->gpio_chip;
const char *name = dev_name(pctrl->dev);
+ struct irq_domain *parent_domain;
struct of_phandle_args of_args;
+ struct device_node *parent_np;
+ struct gpio_irq_chip *girq;
int ret;
+ parent_np = of_irq_find_parent(np);
+ if (!parent_np)
+ return -ENXIO;
+
+ parent_domain = irq_find_host(parent_np);
+ of_node_put(parent_np);
+ if (!parent_domain)
+ return -EPROBE_DEFER;
+
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args);
if (ret) {
dev_err(pctrl->dev, "Unable to parse gpio-ranges\n");
@@ -1138,6 +1363,15 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
chip->base = -1;
chip->ngpio = of_args.args[2];
+ girq = &chip->irq;
+ gpio_irq_chip_set_chip(girq, &rzg2l_gpio_irqchip);
+ girq->fwnode = of_node_to_fwnode(np);
+ girq->parent_domain = parent_domain;
+ girq->child_to_parent_hwirq = rzg2l_gpio_child_to_parent_hwirq;
+ girq->populate_parent_alloc_arg = rzg2l_gpio_populate_parent_fwspec;
+ girq->child_irq_domain_ops.free = rzg2l_gpio_irq_domain_free;
+ girq->init_valid_mask = rzg2l_init_irq_valid_mask;
+
pctrl->gpio_range.id = 0;
pctrl->gpio_range.pin_base = 0;
pctrl->gpio_range.base = 0;
@@ -1253,6 +1487,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
}
spin_lock_init(&pctrl->lock);
+ spin_lock_init(&pctrl->bitmap_lock);
platform_set_drvdata(pdev, pctrl);
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
new file mode 100644
index 000000000000..e8c18198bebd
--- /dev/null
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2M Pin Control and GPIO driver core
+ *
+ * Based on:
+ * Renesas RZ/G2L Pin Control and GPIO driver core
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/pinctrl/rzv2m-pinctrl.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+
+#define DRV_NAME "pinctrl-rzv2m"
+
+/*
+ * Use 16 lower bits [15:0] for pin identifier
+ * Use 16 higher bits [31:16] for pin mux function
+ */
+#define MUX_PIN_ID_MASK GENMASK(15, 0)
+#define MUX_FUNC_MASK GENMASK(31, 16)
+#define MUX_FUNC(pinconf) FIELD_GET(MUX_FUNC_MASK, (pinconf))
+
+/* PIN capabilities */
+#define PIN_CFG_GRP_1_8V_2 1
+#define PIN_CFG_GRP_1_8V_3 2
+#define PIN_CFG_GRP_SWIO_1 3
+#define PIN_CFG_GRP_SWIO_2 4
+#define PIN_CFG_GRP_3_3V 5
+#define PIN_CFG_GRP_MASK GENMASK(2, 0)
+#define PIN_CFG_BIAS BIT(3)
+#define PIN_CFG_DRV BIT(4)
+#define PIN_CFG_SLEW BIT(5)
+
+#define RZV2M_MPXED_PIN_FUNCS (PIN_CFG_BIAS | \
+ PIN_CFG_DRV | \
+ PIN_CFG_SLEW)
+
+/*
+ * n indicates number of pins in the port, a is the register index
+ * and f is pin configuration capabilities supported.
+ */
+#define RZV2M_GPIO_PORT_PACK(n, a, f) (((n) << 24) | ((a) << 16) | (f))
+#define RZV2M_GPIO_PORT_GET_PINCNT(x) FIELD_GET(GENMASK(31, 24), (x))
+#define RZV2M_GPIO_PORT_GET_INDEX(x) FIELD_GET(GENMASK(23, 16), (x))
+#define RZV2M_GPIO_PORT_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x))
+
+#define RZV2M_DEDICATED_PORT_IDX 22
+
+/*
+ * BIT(31) indicates dedicated pin, b is the register bits (b * 16)
+ * and f is the pin configuration capabilities supported.
+ */
+#define RZV2M_SINGLE_PIN BIT(31)
+#define RZV2M_SINGLE_PIN_PACK(b, f) (RZV2M_SINGLE_PIN | \
+ ((RZV2M_DEDICATED_PORT_IDX) << 24) | \
+ ((b) << 16) | (f))
+#define RZV2M_SINGLE_PIN_GET_PORT(x) FIELD_GET(GENMASK(30, 24), (x))
+#define RZV2M_SINGLE_PIN_GET_BIT(x) FIELD_GET(GENMASK(23, 16), (x))
+#define RZV2M_SINGLE_PIN_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x))
+
+#define RZV2M_PIN_ID_TO_PORT(id) ((id) / RZV2M_PINS_PER_PORT)
+#define RZV2M_PIN_ID_TO_PIN(id) ((id) % RZV2M_PINS_PER_PORT)
+
+#define DO(n) (0x00 + (n) * 0x40)
+#define OE(n) (0x04 + (n) * 0x40)
+#define IE(n) (0x08 + (n) * 0x40)
+#define PFSEL(n) (0x10 + (n) * 0x40)
+#define DI(n) (0x20 + (n) * 0x40)
+#define PUPD(n) (0x24 + (n) * 0x40)
+#define DRV(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x28 + (n) * 0x40) \
+ : 0x590)
+#define SR(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x2c + (n) * 0x40) \
+ : 0x594)
+#define DI_MSK(n) (0x30 + (n) * 0x40)
+#define EN_MSK(n) (0x34 + (n) * 0x40)
+
+#define PFC_MASK 0x07
+#define PUPD_MASK 0x03
+#define DRV_MASK 0x03
+
+struct rzv2m_dedicated_configs {
+ const char *name;
+ u32 config;
+};
+
+struct rzv2m_pinctrl_data {
+ const char * const *port_pins;
+ const u32 *port_pin_configs;
+ const struct rzv2m_dedicated_configs *dedicated_pins;
+ unsigned int n_port_pins;
+ unsigned int n_dedicated_pins;
+};
+
+struct rzv2m_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc desc;
+ struct pinctrl_pin_desc *pins;
+
+ const struct rzv2m_pinctrl_data *data;
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range gpio_range;
+
+ spinlock_t lock;
+};
+
+static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 };
+static const unsigned int drv_1_8V_group3_uA[] = { 1600, 3200, 6400, 9600 };
+static const unsigned int drv_SWIO_group2_3_3V_uA[] = { 9000, 11000, 13000, 18000 };
+static const unsigned int drv_3_3V_group_uA[] = { 2000, 4000, 8000, 12000 };
+
+/* Helper for registers that have a write enable bit in the upper word */
+static void rzv2m_writel_we(void __iomem *addr, u8 shift, u8 value)
+{
+ writel((BIT(16) | value) << shift, addr);
+}
+
+static void rzv2m_pinctrl_set_pfc_mode(struct rzv2m_pinctrl *pctrl,
+ u8 port, u8 pin, u8 func)
+{
+ void __iomem *addr;
+
+ /* Mask input/output */
+ rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 1);
+ rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 1);
+
+ /* Select the function and set the write enable bits */
+ addr = pctrl->base + PFSEL(port) + (pin / 4) * 4;
+ writel(((PFC_MASK << 16) | func) << ((pin % 4) * 4), addr);
+
+ /* Unmask input/output */
+ rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 0);
+ rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 0);
+};
+
+static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ unsigned int i, *psel_val;
+ struct group_desc *group;
+ int *pins;
+
+ func = pinmux_generic_get_function(pctldev, func_selector);
+ if (!func)
+ return -EINVAL;
+ group = pinctrl_generic_get_group(pctldev, group_selector);
+ if (!group)
+ return -EINVAL;
+
+ psel_val = func->data;
+ pins = group->pins;
+
+ for (i = 0; i < group->num_pins; i++) {
+ dev_dbg(pctrl->dev, "port:%u pin: %u PSEL:%u\n",
+ RZV2M_PIN_ID_TO_PORT(pins[i]), RZV2M_PIN_ID_TO_PIN(pins[i]),
+ psel_val[i]);
+ rzv2m_pinctrl_set_pfc_mode(pctrl, RZV2M_PIN_ID_TO_PORT(pins[i]),
+ RZV2M_PIN_ID_TO_PIN(pins[i]), psel_val[i]);
+ }
+
+ return 0;
+};
+
+static int rzv2m_map_add_config(struct pinctrl_map *map,
+ const char *group_or_pin,
+ enum pinctrl_map_type type,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ unsigned long *cfgs;
+
+ cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
+ GFP_KERNEL);
+ if (!cfgs)
+ return -ENOMEM;
+
+ map->type = type;
+ map->data.configs.group_or_pin = group_or_pin;
+ map->data.configs.configs = cfgs;
+ map->data.configs.num_configs = num_configs;
+
+ return 0;
+}
+
+static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps,
+ unsigned int *index)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_map *maps = *map;
+ unsigned int nmaps = *num_maps;
+ unsigned long *configs = NULL;
+ unsigned int *pins, *psel_val;
+ unsigned int num_pinmux = 0;
+ unsigned int idx = *index;
+ unsigned int num_pins, i;
+ unsigned int num_configs;
+ struct property *pinmux;
+ struct property *prop;
+ int ret, gsel, fsel;
+ const char **pin_fn;
+ const char *pin;
+
+ pinmux = of_find_property(np, "pinmux", NULL);
+ if (pinmux)
+ num_pinmux = pinmux->length / sizeof(u32);
+
+ ret = of_property_count_strings(np, "pins");
+ if (ret == -EINVAL) {
+ num_pins = 0;
+ } else if (ret < 0) {
+ dev_err(pctrl->dev, "Invalid pins list in DT\n");
+ return ret;
+ } else {
+ num_pins = ret;
+ }
+
+ if (!num_pinmux && !num_pins)
+ return 0;
+
+ if (num_pinmux && num_pins) {
+ dev_err(pctrl->dev,
+ "DT node must contain either a pinmux or pins and not both\n");
+ return -EINVAL;
+ }
+
+ ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs);
+ if (ret < 0)
+ return ret;
+
+ if (num_pins && !num_configs) {
+ dev_err(pctrl->dev, "DT node must contain a config\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (num_pinmux)
+ nmaps += 1;
+
+ if (num_pins)
+ nmaps += num_pins;
+
+ maps = krealloc_array(maps, nmaps, sizeof(*maps), GFP_KERNEL);
+ if (!maps) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ *map = maps;
+ *num_maps = nmaps;
+ if (num_pins) {
+ of_property_for_each_string(np, "pins", prop, pin) {
+ ret = rzv2m_map_add_config(&maps[idx], pin,
+ PIN_MAP_TYPE_CONFIGS_PIN,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+ ret = 0;
+ goto done;
+ }
+
+ pins = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*pins), GFP_KERNEL);
+ psel_val = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*psel_val),
+ GFP_KERNEL);
+ pin_fn = devm_kzalloc(pctrl->dev, sizeof(*pin_fn), GFP_KERNEL);
+ if (!pins || !psel_val || !pin_fn) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* Collect pin locations and mux settings from DT properties */
+ for (i = 0; i < num_pinmux; ++i) {
+ u32 value;
+
+ ret = of_property_read_u32_index(np, "pinmux", i, &value);
+ if (ret)
+ goto done;
+ pins[i] = value & MUX_PIN_ID_MASK;
+ psel_val[i] = MUX_FUNC(value);
+ }
+
+ /* Register a single pin group listing all the pins we read from DT */
+ gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
+ if (gsel < 0) {
+ ret = gsel;
+ goto done;
+ }
+
+ /*
+ * Register a single group function where the 'data' is an array PSEL
+ * register values read from DT.
+ */
+ pin_fn[0] = np->name;
+ fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
+ psel_val);
+ if (fsel < 0) {
+ ret = fsel;
+ goto remove_group;
+ }
+
+ maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+ maps[idx].data.mux.group = np->name;
+ maps[idx].data.mux.function = np->name;
+ idx++;
+
+ dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
+ ret = 0;
+ goto done;
+
+remove_group:
+ pinctrl_generic_remove_group(pctldev, gsel);
+done:
+ *index = idx;
+ kfree(configs);
+ return ret;
+}
+
+static void rzv2m_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned int num_maps)
+{
+ unsigned int i;
+
+ if (!map)
+ return;
+
+ for (i = 0; i < num_maps; ++i) {
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
+ map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(map[i].data.configs.configs);
+ }
+ kfree(map);
+}
+
+static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device_node *child;
+ unsigned int index;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ index = 0;
+
+ for_each_child_of_node(np, child) {
+ ret = rzv2m_dt_subnode_to_map(pctldev, child, map,
+ num_maps, &index);
+ if (ret < 0) {
+ of_node_put(child);
+ goto done;
+ }
+ }
+
+ if (*num_maps == 0) {
+ ret = rzv2m_dt_subnode_to_map(pctldev, np, map,
+ num_maps, &index);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (*num_maps)
+ return 0;
+
+ dev_err(pctrl->dev, "no mapping found in node %pOF\n", np);
+ ret = -EINVAL;
+
+done:
+ if (ret < 0)
+ rzv2m_dt_free_map(pctldev, *map, *num_maps);
+
+ return ret;
+}
+
+static int rzv2m_validate_gpio_pin(struct rzv2m_pinctrl *pctrl,
+ u32 cfg, u32 port, u8 bit)
+{
+ u8 pincount = RZV2M_GPIO_PORT_GET_PINCNT(cfg);
+ u32 port_index = RZV2M_GPIO_PORT_GET_INDEX(cfg);
+ u32 data;
+
+ if (bit >= pincount || port >= pctrl->data->n_port_pins)
+ return -EINVAL;
+
+ data = pctrl->data->port_pin_configs[port];
+ if (port_index != RZV2M_GPIO_PORT_GET_INDEX(data))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void rzv2m_rmw_pin_config(struct rzv2m_pinctrl *pctrl, u32 offset,
+ u8 shift, u32 mask, u32 val)
+{
+ void __iomem *addr = pctrl->base + offset;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ reg = readl(addr) & ~(mask << shift);
+ writel(reg | (val << shift), addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int rzv2m_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *config)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ unsigned int arg = 0;
+ u32 port;
+ u32 cfg;
+ u8 bit;
+ u32 val;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZV2M_SINGLE_PIN) {
+ port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data);
+ port = RZV2M_PIN_ID_TO_PORT(_pin);
+ bit = RZV2M_PIN_ID_TO_PIN(_pin);
+
+ if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN: {
+ enum pin_config_param bias;
+
+ if (!(cfg & PIN_CFG_BIAS))
+ return -EINVAL;
+
+ /* PUPD uses 2-bits per pin */
+ bit *= 2;
+
+ switch ((readl(pctrl->base + PUPD(port)) >> bit) & PUPD_MASK) {
+ case 0:
+ bias = PIN_CONFIG_BIAS_PULL_DOWN;
+ break;
+ case 2:
+ bias = PIN_CONFIG_BIAS_PULL_UP;
+ break;
+ default:
+ bias = PIN_CONFIG_BIAS_DISABLE;
+ }
+
+ if (bias != param)
+ return -EINVAL;
+ break;
+ }
+
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ if (!(cfg & PIN_CFG_DRV))
+ return -EINVAL;
+
+ /* DRV uses 2-bits per pin */
+ bit *= 2;
+
+ val = (readl(pctrl->base + DRV(port)) >> bit) & DRV_MASK;
+
+ switch (cfg & PIN_CFG_GRP_MASK) {
+ case PIN_CFG_GRP_1_8V_2:
+ arg = drv_1_8V_group2_uA[val];
+ break;
+ case PIN_CFG_GRP_1_8V_3:
+ arg = drv_1_8V_group3_uA[val];
+ break;
+ case PIN_CFG_GRP_SWIO_2:
+ arg = drv_SWIO_group2_3_3V_uA[val];
+ break;
+ case PIN_CFG_GRP_SWIO_1:
+ case PIN_CFG_GRP_3_3V:
+ arg = drv_3_3V_group_uA[val];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ if (!(cfg & PIN_CFG_SLEW))
+ return -EINVAL;
+
+ arg = readl(pctrl->base + SR(port)) & BIT(bit);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+};
+
+static int rzv2m_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *_configs,
+ unsigned int num_configs)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ enum pin_config_param param;
+ u32 port;
+ unsigned int i;
+ u32 cfg;
+ u8 bit;
+ u32 val;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZV2M_SINGLE_PIN) {
+ port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data);
+ port = RZV2M_PIN_ID_TO_PORT(_pin);
+ bit = RZV2M_PIN_ID_TO_PIN(_pin);
+
+ if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(_configs[i]);
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!(cfg & PIN_CFG_BIAS))
+ return -EINVAL;
+
+ /* PUPD uses 2-bits per pin */
+ bit *= 2;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = 2;
+ break;
+ default:
+ val = 1;
+ }
+
+ rzv2m_rmw_pin_config(pctrl, PUPD(port), bit, PUPD_MASK, val);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH_UA: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+ const unsigned int *drv_strengths;
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_DRV))
+ return -EINVAL;
+
+ switch (cfg & PIN_CFG_GRP_MASK) {
+ case PIN_CFG_GRP_1_8V_2:
+ drv_strengths = drv_1_8V_group2_uA;
+ break;
+ case PIN_CFG_GRP_1_8V_3:
+ drv_strengths = drv_1_8V_group3_uA;
+ break;
+ case PIN_CFG_GRP_SWIO_2:
+ drv_strengths = drv_SWIO_group2_3_3V_uA;
+ break;
+ case PIN_CFG_GRP_SWIO_1:
+ case PIN_CFG_GRP_3_3V:
+ drv_strengths = drv_3_3V_group_uA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (index = 0; index < 4; index++) {
+ if (arg == drv_strengths[index])
+ break;
+ }
+ if (index >= 4)
+ return -EINVAL;
+
+ /* DRV uses 2-bits per pin */
+ bit *= 2;
+
+ rzv2m_rmw_pin_config(pctrl, DRV(port), bit, DRV_MASK, index);
+ break;
+ }
+
+ case PIN_CONFIG_SLEW_RATE: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+
+ if (!(cfg & PIN_CFG_SLEW))
+ return -EINVAL;
+
+ rzv2m_writel_we(pctrl->base + SR(port), bit, !arg);
+ break;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int rzv2m_pinctrl_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzv2m_pinctrl_pinconf_set(pctldev, pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+};
+
+static int rzv2m_pinctrl_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const unsigned int *pins;
+ unsigned int i, npins, prev_config = 0;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzv2m_pinctrl_pinconf_get(pctldev, pins[i], config);
+ if (ret)
+ return ret;
+
+ /* Check config matches previous pins */
+ if (i && prev_config != *config)
+ return -EOPNOTSUPP;
+
+ prev_config = *config;
+ }
+
+ return 0;
+};
+
+static const struct pinctrl_ops rzv2m_pinctrl_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = rzv2m_dt_node_to_map,
+ .dt_free_map = rzv2m_dt_free_map,
+};
+
+static const struct pinmux_ops rzv2m_pinctrl_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = rzv2m_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct pinconf_ops rzv2m_pinctrl_confops = {
+ .is_generic = true,
+ .pin_config_get = rzv2m_pinctrl_pinconf_get,
+ .pin_config_set = rzv2m_pinctrl_pinconf_set,
+ .pin_config_group_set = rzv2m_pinctrl_pinconf_group_set,
+ .pin_config_group_get = rzv2m_pinctrl_pinconf_group_get,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static int rzv2m_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+ int ret;
+
+ ret = pinctrl_gpio_request(chip->base + offset);
+ if (ret)
+ return ret;
+
+ rzv2m_pinctrl_set_pfc_mode(pctrl, port, bit, 0);
+
+ return 0;
+}
+
+static void rzv2m_gpio_set_direction(struct rzv2m_pinctrl *pctrl, u32 port,
+ u8 bit, bool output)
+{
+ rzv2m_writel_we(pctrl->base + OE(port), bit, output);
+ rzv2m_writel_we(pctrl->base + IE(port), bit, !output);
+}
+
+static int rzv2m_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ if (!(readl(pctrl->base + IE(port)) & BIT(bit)))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int rzv2m_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_gpio_set_direction(pctrl, port, bit, false);
+
+ return 0;
+}
+
+static void rzv2m_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_writel_we(pctrl->base + DO(port), bit, !!value);
+}
+
+static int rzv2m_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_gpio_set(chip, offset, value);
+ rzv2m_gpio_set_direction(pctrl, port, bit, true);
+
+ return 0;
+}
+
+static int rzv2m_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+ int direction = rzv2m_gpio_get_direction(chip, offset);
+
+ if (direction == GPIO_LINE_DIRECTION_IN)
+ return !!(readl(pctrl->base + DI(port)) & BIT(bit));
+ else
+ return !!(readl(pctrl->base + DO(port)) & BIT(bit));
+}
+
+static void rzv2m_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pinctrl_gpio_free(chip->base + offset);
+
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
+ * drive the GPIO pin as an output.
+ */
+ rzv2m_gpio_direction_input(chip, offset);
+}
+
+static const char * const rzv2m_gpio_names[] = {
+ "P0_0", "P0_1", "P0_2", "P0_3", "P0_4", "P0_5", "P0_6", "P0_7",
+ "P0_8", "P0_9", "P0_10", "P0_11", "P0_12", "P0_13", "P0_14", "P0_15",
+ "P1_0", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P1_7",
+ "P1_8", "P1_9", "P1_10", "P1_11", "P1_12", "P1_13", "P1_14", "P1_15",
+ "P2_0", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6", "P2_7",
+ "P2_8", "P2_9", "P2_10", "P2_11", "P2_12", "P2_13", "P2_14", "P2_15",
+ "P3_0", "P3_1", "P3_2", "P3_3", "P3_4", "P3_5", "P3_6", "P3_7",
+ "P3_8", "P3_9", "P3_10", "P3_11", "P3_12", "P3_13", "P3_14", "P3_15",
+ "P4_0", "P4_1", "P4_2", "P4_3", "P4_4", "P4_5", "P4_6", "P4_7",
+ "P4_8", "P4_9", "P4_10", "P4_11", "P4_12", "P4_13", "P4_14", "P4_15",
+ "P5_0", "P5_1", "P5_2", "P5_3", "P5_4", "P5_5", "P5_6", "P5_7",
+ "P5_8", "P5_9", "P5_10", "P5_11", "P5_12", "P5_13", "P5_14", "P5_15",
+ "P6_0", "P6_1", "P6_2", "P6_3", "P6_4", "P6_5", "P6_6", "P6_7",
+ "P6_8", "P6_9", "P6_10", "P6_11", "P6_12", "P6_13", "P6_14", "P6_15",
+ "P7_0", "P7_1", "P7_2", "P7_3", "P7_4", "P7_5", "P7_6", "P7_7",
+ "P7_8", "P7_9", "P7_10", "P7_11", "P7_12", "P7_13", "P7_14", "P7_15",
+ "P8_0", "P8_1", "P8_2", "P8_3", "P8_4", "P8_5", "P8_6", "P8_7",
+ "P8_8", "P8_9", "P8_10", "P8_11", "P8_12", "P8_13", "P8_14", "P8_15",
+ "P9_0", "P9_1", "P9_2", "P9_3", "P9_4", "P9_5", "P9_6", "P9_7",
+ "P9_8", "P9_9", "P9_10", "P9_11", "P9_12", "P9_13", "P9_14", "P9_15",
+ "P10_0", "P10_1", "P10_2", "P10_3", "P10_4", "P10_5", "P10_6", "P10_7",
+ "P10_8", "P10_9", "P10_10", "P10_11", "P10_12", "P10_13", "P10_14", "P10_15",
+ "P11_0", "P11_1", "P11_2", "P11_3", "P11_4", "P11_5", "P11_6", "P11_7",
+ "P11_8", "P11_9", "P11_10", "P11_11", "P11_12", "P11_13", "P11_14", "P11_15",
+ "P12_0", "P12_1", "P12_2", "P12_3", "P12_4", "P12_5", "P12_6", "P12_7",
+ "P12_8", "P12_9", "P12_10", "P12_11", "P12_12", "P12_13", "P12_14", "P12_15",
+ "P13_0", "P13_1", "P13_2", "P13_3", "P13_4", "P13_5", "P13_6", "P13_7",
+ "P13_8", "P13_9", "P13_10", "P13_11", "P13_12", "P13_13", "P13_14", "P13_15",
+ "P14_0", "P14_1", "P14_2", "P14_3", "P14_4", "P14_5", "P14_6", "P14_7",
+ "P14_8", "P14_9", "P14_10", "P14_11", "P14_12", "P14_13", "P14_14", "P14_15",
+ "P15_0", "P15_1", "P15_2", "P15_3", "P15_4", "P15_5", "P15_6", "P15_7",
+ "P15_8", "P15_9", "P15_10", "P15_11", "P15_12", "P15_13", "P15_14", "P15_15",
+ "P16_0", "P16_1", "P16_2", "P16_3", "P16_4", "P16_5", "P16_6", "P16_7",
+ "P16_8", "P16_9", "P16_10", "P16_11", "P16_12", "P16_13", "P16_14", "P16_15",
+ "P17_0", "P17_1", "P17_2", "P17_3", "P17_4", "P17_5", "P17_6", "P17_7",
+ "P17_8", "P17_9", "P17_10", "P17_11", "P17_12", "P17_13", "P17_14", "P17_15",
+ "P18_0", "P18_1", "P18_2", "P18_3", "P18_4", "P18_5", "P18_6", "P18_7",
+ "P18_8", "P18_9", "P18_10", "P18_11", "P18_12", "P18_13", "P18_14", "P18_15",
+ "P19_0", "P19_1", "P19_2", "P19_3", "P19_4", "P19_5", "P19_6", "P19_7",
+ "P19_8", "P19_9", "P19_10", "P19_11", "P19_12", "P19_13", "P19_14", "P19_15",
+ "P20_0", "P20_1", "P20_2", "P20_3", "P20_4", "P20_5", "P20_6", "P20_7",
+ "P20_8", "P20_9", "P20_10", "P20_11", "P20_12", "P20_13", "P20_14", "P20_15",
+ "P21_0", "P21_1", "P21_2", "P21_3", "P21_4", "P21_5", "P21_6", "P21_7",
+ "P21_8", "P21_9", "P21_10", "P21_11", "P21_12", "P21_13", "P21_14", "P21_15",
+};
+
+static const u32 rzv2m_gpio_configs[] = {
+ RZV2M_GPIO_PORT_PACK(14, 0, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 1, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 2, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 3, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 4, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(4, 5, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(12, 6, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(6, 7, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 8, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 9, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(9, 10, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(9, 11, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(4, 12, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(12, 13, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 14, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 15, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(14, 16, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(1, 17, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(0, 18, 0),
+ RZV2M_GPIO_PORT_PACK(0, 19, 0),
+ RZV2M_GPIO_PORT_PACK(3, 20, PIN_CFG_GRP_1_8V_2 | PIN_CFG_DRV),
+ RZV2M_GPIO_PORT_PACK(1, 21, PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW),
+};
+
+static const struct rzv2m_dedicated_configs rzv2m_dedicated_pins[] = {
+ { "NAWPN", RZV2M_SINGLE_PIN_PACK(0,
+ (PIN_CFG_GRP_SWIO_2 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "IM0CLK", RZV2M_SINGLE_PIN_PACK(1,
+ (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "IM1CLK", RZV2M_SINGLE_PIN_PACK(2,
+ (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "DETDO", RZV2M_SINGLE_PIN_PACK(5,
+ (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "DETMS", RZV2M_SINGLE_PIN_PACK(6,
+ (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "PCRSTOUTB", RZV2M_SINGLE_PIN_PACK(12,
+ (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "USPWEN", RZV2M_SINGLE_PIN_PACK(14,
+ (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+};
+
+static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl)
+{
+ struct device_node *np = pctrl->dev->of_node;
+ struct gpio_chip *chip = &pctrl->gpio_chip;
+ const char *name = dev_name(pctrl->dev);
+ struct of_phandle_args of_args;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args);
+ if (ret) {
+ dev_err(pctrl->dev, "Unable to parse gpio-ranges\n");
+ return ret;
+ }
+
+ if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
+ of_args.args[2] != pctrl->data->n_port_pins) {
+ dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n");
+ return -EINVAL;
+ }
+
+ chip->names = pctrl->data->port_pins;
+ chip->request = rzv2m_gpio_request;
+ chip->free = rzv2m_gpio_free;
+ chip->get_direction = rzv2m_gpio_get_direction;
+ chip->direction_input = rzv2m_gpio_direction_input;
+ chip->direction_output = rzv2m_gpio_direction_output;
+ chip->get = rzv2m_gpio_get;
+ chip->set = rzv2m_gpio_set;
+ chip->label = name;
+ chip->parent = pctrl->dev;
+ chip->owner = THIS_MODULE;
+ chip->base = -1;
+ chip->ngpio = of_args.args[2];
+
+ pctrl->gpio_range.id = 0;
+ pctrl->gpio_range.pin_base = 0;
+ pctrl->gpio_range.base = 0;
+ pctrl->gpio_range.npins = chip->ngpio;
+ pctrl->gpio_range.name = chip->label;
+ pctrl->gpio_range.gc = chip;
+ ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO controller\n");
+ return ret;
+ }
+
+ dev_dbg(pctrl->dev, "Registered gpio controller\n");
+
+ return 0;
+}
+
+static int rzv2m_pinctrl_register(struct rzv2m_pinctrl *pctrl)
+{
+ struct pinctrl_pin_desc *pins;
+ unsigned int i, j;
+ u32 *pin_data;
+ int ret;
+
+ pctrl->desc.name = DRV_NAME;
+ pctrl->desc.npins = pctrl->data->n_port_pins + pctrl->data->n_dedicated_pins;
+ pctrl->desc.pctlops = &rzv2m_pinctrl_pctlops;
+ pctrl->desc.pmxops = &rzv2m_pinctrl_pmxops;
+ pctrl->desc.confops = &rzv2m_pinctrl_confops;
+ pctrl->desc.owner = THIS_MODULE;
+
+ pins = devm_kcalloc(pctrl->dev, pctrl->desc.npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pin_data = devm_kcalloc(pctrl->dev, pctrl->desc.npins,
+ sizeof(*pin_data), GFP_KERNEL);
+ if (!pin_data)
+ return -ENOMEM;
+
+ pctrl->pins = pins;
+ pctrl->desc.pins = pins;
+
+ for (i = 0, j = 0; i < pctrl->data->n_port_pins; i++) {
+ pins[i].number = i;
+ pins[i].name = pctrl->data->port_pins[i];
+ if (i && !(i % RZV2M_PINS_PER_PORT))
+ j++;
+ pin_data[i] = pctrl->data->port_pin_configs[j];
+ pins[i].drv_data = &pin_data[i];
+ }
+
+ for (i = 0; i < pctrl->data->n_dedicated_pins; i++) {
+ unsigned int index = pctrl->data->n_port_pins + i;
+
+ pins[index].number = index;
+ pins[index].name = pctrl->data->dedicated_pins[i].name;
+ pin_data[index] = pctrl->data->dedicated_pins[i].config;
+ pins[index].drv_data = &pin_data[index];
+ }
+
+ ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
+ &pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl registration failed\n");
+ return ret;
+ }
+
+ ret = pinctrl_enable(pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl enable failed\n");
+ return ret;
+ }
+
+ ret = rzv2m_gpio_register(pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO chip: %i\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rzv2m_pinctrl_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int rzv2m_pinctrl_probe(struct platform_device *pdev)
+{
+ struct rzv2m_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = &pdev->dev;
+
+ pctrl->data = of_device_get_match_data(&pdev->dev);
+ if (!pctrl->data)
+ return -EINVAL;
+
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pctrl->base))
+ return PTR_ERR(pctrl->base);
+
+ pctrl->clk = devm_clk_get(pctrl->dev, NULL);
+ if (IS_ERR(pctrl->clk)) {
+ ret = PTR_ERR(pctrl->clk);
+ dev_err(pctrl->dev, "failed to get GPIO clk : %i\n", ret);
+ return ret;
+ }
+
+ spin_lock_init(&pctrl->lock);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ ret = clk_prepare_enable(pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to enable GPIO clk: %i\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2m_pinctrl_clk_disable,
+ pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev,
+ "failed to register GPIO clk disable action, %i\n",
+ ret);
+ return ret;
+ }
+
+ ret = rzv2m_pinctrl_register(pctrl);
+ if (ret)
+ return ret;
+
+ dev_info(pctrl->dev, "%s support registered\n", DRV_NAME);
+ return 0;
+}
+
+static struct rzv2m_pinctrl_data r9a09g011_data = {
+ .port_pins = rzv2m_gpio_names,
+ .port_pin_configs = rzv2m_gpio_configs,
+ .dedicated_pins = rzv2m_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(rzv2m_gpio_configs) * RZV2M_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzv2m_dedicated_pins),
+};
+
+static const struct of_device_id rzv2m_pinctrl_of_table[] = {
+ {
+ .compatible = "renesas,r9a09g011-pinctrl",
+ .data = &r9a09g011_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzv2m_pinctrl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(rzv2m_pinctrl_of_table),
+ },
+ .probe = rzv2m_pinctrl_probe,
+};
+
+static int __init rzv2m_pinctrl_init(void)
+{
+ return platform_driver_register(&rzv2m_pinctrl_driver);
+}
+core_initcall(rzv2m_pinctrl_init);
+
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_DESCRIPTION("Pin and gpio controller driver for RZ/V2M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 12bc279f5733..0fcb29ab0c84 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -325,6 +325,7 @@ extern const struct sh_pfc_soc_info r8a77990_pinmux_info;
extern const struct sh_pfc_soc_info r8a77995_pinmux_info;
extern const struct sh_pfc_soc_info r8a779a0_pinmux_info;
extern const struct sh_pfc_soc_info r8a779f0_pinmux_info;
+extern const struct sh_pfc_soc_info r8a779g0_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
@@ -492,9 +493,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT_GP_CFG_1(bank, 11, fn, sfx, cfg)
#define PORT_GP_12(bank, fn, sfx) PORT_GP_CFG_12(bank, fn, sfx, 0)
-#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_13(bank, fn, sfx, cfg) \
PORT_GP_CFG_12(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 12, fn, sfx, cfg)
+#define PORT_GP_13(bank, fn, sfx) PORT_GP_CFG_13(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_13(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 13, fn, sfx, cfg)
#define PORT_GP_14(bank, fn, sfx) PORT_GP_CFG_14(bank, fn, sfx, 0)
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 6d7ca1758292..a8212fc126bf 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -27,8 +27,6 @@
#include <linux/soc/samsung/exynos-pmu.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
-#include <dt-bindings/pinctrl/samsung.h>
-
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
@@ -173,7 +171,7 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
- con |= EXYNOS_PIN_FUNC_EINT << shift;
+ con |= EXYNOS_PIN_CON_FUNC_EINT << shift;
writel(con, bank->pctl_base + reg_con);
raw_spin_unlock_irqrestore(&bank->slock, flags);
@@ -196,7 +194,7 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
- con |= EXYNOS_PIN_FUNC_INPUT << shift;
+ con |= PIN_CON_FUNC_INPUT << shift;
writel(con, bank->pctl_base + reg_con);
raw_spin_unlock_irqrestore(&bank->slock, flags);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index bfad1ced8017..7bd6d82c9f36 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -16,6 +16,9 @@
#ifndef __PINCTRL_SAMSUNG_EXYNOS_H
#define __PINCTRL_SAMSUNG_EXYNOS_H
+/* Values for the pin CON register */
+#define EXYNOS_PIN_CON_FUNC_EINT 0xf
+
/* External GPIO and wakeup interrupt related definitions */
#define EXYNOS_GPIO_ECON_OFFSET 0x700
#define EXYNOS_GPIO_EFLTCON_OFFSET 0x800
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 26d309d2516d..4837bceb767b 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -26,8 +26,6 @@
#include <linux/of_device.h>
#include <linux/spinlock.h>
-#include <dt-bindings/pinctrl/samsung.h>
-
#include "../core.h"
#include "pinctrl-samsung.h"
@@ -614,7 +612,7 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
data = readl(reg);
data &= ~(mask << shift);
if (!input)
- data |= EXYNOS_PIN_FUNC_OUTPUT << shift;
+ data |= PIN_CON_FUNC_OUTPUT << shift;
writel(data, reg);
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index fc6f5199c548..9af93e3d8d9f 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -53,6 +53,14 @@ enum pincfg_type {
#define PINCFG_UNPACK_TYPE(cfg) ((cfg) & PINCFG_TYPE_MASK)
#define PINCFG_UNPACK_VALUE(cfg) (((cfg) & PINCFG_VALUE_MASK) >> \
PINCFG_VALUE_SHIFT)
+/*
+ * Values for the pin CON register, choosing pin function.
+ * The basic set (input and output) are same between: S3C24xx, S3C64xx, S5PV210,
+ * Exynos ARMv7, Exynos ARMv8, Tesla FSD.
+ */
+#define PIN_CON_FUNC_INPUT 0x0
+#define PIN_CON_FUNC_OUTPUT 0x1
+
/**
* enum eint_type - possible external interrupt types.
* @EINT_TYPE_NONE: bank does not support external interrupts
diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
index 3ba47040ac42..2b3335ab56c6 100644
--- a/drivers/pinctrl/sunplus/sppctl.c
+++ b/drivers/pinctrl/sunplus/sppctl.c
@@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
}
*map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL);
+ if (*map == NULL)
+ return -ENOMEM;
+
for (i = 0; i < (*num_maps); i++) {
dt_pin = be32_to_cpu(list[i]);
pin_num = FIELD_GET(GENMASK(31, 24), dt_pin);
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 33751a6a0757..a78fdbbdfc0c 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -29,7 +29,6 @@ config PINCTRL_SUN6I_A31
config PINCTRL_SUN6I_A31_R
bool "Support for the Allwinner A31 R-PIO"
default MACH_SUN6I
- depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_A23
@@ -55,7 +54,6 @@ config PINCTRL_SUN8I_A83T_R
config PINCTRL_SUN8I_A23_R
bool "Support for the Allwinner A23 and A33 R-PIO"
default MACH_SUN8I
- depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_H3
@@ -81,7 +79,11 @@ config PINCTRL_SUN9I_A80
config PINCTRL_SUN9I_A80_R
bool "Support for the Allwinner A80 R-PIO"
default MACH_SUN9I
- depends on RESET_CONTROLLER
+ select PINCTRL_SUNXI
+
+config PINCTRL_SUN20I_D1
+ bool "Support for the Allwinner D1 PIO"
+ default MACH_SUN8I || (RISCV && ARCH_SUNXI)
select PINCTRL_SUNXI
config PINCTRL_SUN50I_A64
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index d3440c42b9d6..2ff5a55927ad 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PINCTRL_SUN8I_A83T_R) += pinctrl-sun8i-a83t-r.o
obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o
obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
+obj-$(CONFIG_PINCTRL_SUN20I_D1) += pinctrl-sun20i-d1.o
obj-$(CONFIG_PINCTRL_SUN50I_H5) += pinctrl-sun50i-h5.o
obj-$(CONFIG_PINCTRL_SUN50I_H6) += pinctrl-sun50i-h6.o
obj-$(CONFIG_PINCTRL_SUN50I_H6_R) += pinctrl-sun50i-h6-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
new file mode 100644
index 000000000000..40858b881298
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner D1 SoC pinctrl driver.
+ *
+ * Copyright (c) 2020 wuyan@allwinnertech.com
+ * Copyright (c) 2021-2022 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin d1_pins[] = {
+ /* PB */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm3"),
+ SUNXI_FUNCTION(0x3, "ir"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* WP */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x8, "spdif"), /* OUT */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm4"),
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT3 */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN3 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x7, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x8, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT2 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN2 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x7, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x7, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x7, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x5, "pwm0"),
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x7, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x7, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x8, "bist0"), /* BIST_RESULT0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x7, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x8, "bist1"), /* BIST_RESULT1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x3, "pwm5"),
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* HOLD */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x3, "pwm6"),
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x3, "pwm7"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x3, "pwm2"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x7, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x3, "pwm0"),
+ SUNXI_FUNCTION(0x4, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x5, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x7, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 12)),
+ /* PC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "ledc"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */
+ SUNXI_FUNCTION(0x4, "boot"), /* SEL0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MISO */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */
+ SUNXI_FUNCTION(0x4, "boot"), /* SEL1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* WP */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x6, "pll"), /* DBG-CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* HOLD */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 7)),
+ /* PD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V0P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D0P */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V0N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D0N */
+ SUNXI_FUNCTION(0x5, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V1P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D1P */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V1N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D1N */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V2P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* CKP */
+ SUNXI_FUNCTION(0x5, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V2N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* CKN */
+ SUNXI_FUNCTION(0x5, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKP */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D2P */
+ SUNXI_FUNCTION(0x5, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKN */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D2N */
+ SUNXI_FUNCTION(0x5, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V3P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D3P */
+ SUNXI_FUNCTION(0x5, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V3N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D3N */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V0P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V0N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V1P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V1N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V2P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* HOLD */
+ SUNXI_FUNCTION(0x5, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V2N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* WP */
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKP */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x5, "pwm0"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKN */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V3P */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x5, "pwm2"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 18)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V3N */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x5, "pwm3"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 19)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x5, "pwm4"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 20)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x5, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 21)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x3, "ir"), /* RX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x5, "pwm7"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 22)),
+ /* PE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x5, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXCTL/CRS_DV */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x5, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* MS */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* MS */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "ledc"),
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* DI */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* DI */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* DO */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* DO */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXCTL/TXEN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* CK */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* CK */
+ SUNXI_FUNCTION(0x8, "emac"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x4, "pwm2"),
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* MS */
+ SUNXI_FUNCTION(0x8, "emac"), /* MDC */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x4, "pwm3"),
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* DI */
+ SUNXI_FUNCTION(0x8, "emac"), /* MDIO */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x4, "pwm4"),
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* DO */
+ SUNXI_FUNCTION(0x8, "emac"), /* EPHY-25M */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT3 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN3 */
+ SUNXI_FUNCTION(0x6, "jtag"), /* CK */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x3, "ncsi0"), /* FIELD */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT2 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN2 */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x3, "pwm5"),
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* MS */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* DI */
+ SUNXI_FUNCTION(0x4, "pwm6"),
+ SUNXI_FUNCTION(0x5, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* DO */
+ SUNXI_FUNCTION(0x4, "pwm7"),
+ SUNXI_FUNCTION(0x5, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* CK */
+ SUNXI_FUNCTION(0x4, "ir"), /* TX */
+ SUNXI_FUNCTION(0x5, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 17)),
+ /* PF */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* MS */
+ SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* DI */
+ SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "ledc"),
+ SUNXI_FUNCTION(0x6, "spdif"), /* IN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* DO */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION(0x6, "ir"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* CK */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x4, "ir"), /* RX */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x6, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 6)),
+ /* PG */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXCTRL/CRS_DV */
+ SUNXI_FUNCTION(0x5, "pwm7"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD0 */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD1 */
+ SUNXI_FUNCTION(0x5, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXCK */
+ SUNXI_FUNCTION(0x5, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD0 */
+ SUNXI_FUNCTION(0x5, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD1 */
+ SUNXI_FUNCTION(0x5, "pwm4"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD2 */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD3 */
+ SUNXI_FUNCTION(0x5, "spdif"), /* IN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD2 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD3 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm3"),
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* EPHY-25M */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* LRCK */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXCTL/TXEN */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "pwm0"),
+ SUNXI_FUNCTION(0x7, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* BCLK */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* CLKIN/RXER */
+ SUNXI_FUNCTION(0x5, "pwm2"),
+ SUNXI_FUNCTION(0x6, "ledc"),
+ SUNXI_FUNCTION(0x7, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* MDC */
+ SUNXI_FUNCTION(0x5, "i2s1_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x6, "spi0"), /* WP */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* MDIO */
+ SUNXI_FUNCTION(0x5, "i2s1_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "spi0"), /* HOLD */
+ SUNXI_FUNCTION(0x7, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ir"), /* RX */
+ SUNXI_FUNCTION(0x3, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION(0x4, "pwm5"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x7, "ledc"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "pwm7"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "ir"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x4, "pwm6"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x7, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 18)),
+};
+
+static const unsigned int d1_irq_bank_map[] = { 1, 2, 3, 4, 5, 6 };
+
+static const struct sunxi_pinctrl_desc d1_pinctrl_data = {
+ .pins = d1_pins,
+ .npins = ARRAY_SIZE(d1_pins),
+ .irq_banks = ARRAY_SIZE(d1_irq_bank_map),
+ .irq_bank_map = d1_irq_bank_map,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
+};
+
+static int d1_pinctrl_probe(struct platform_device *pdev)
+{
+ unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ return sunxi_pinctrl_init_with_variant(pdev, &d1_pinctrl_data, variant);
+}
+
+static const struct of_device_id d1_pinctrl_match[] = {
+ {
+ .compatible = "allwinner,sun20i-d1-pinctrl",
+ .data = (void *)PINCTRL_SUN20I_D1
+ },
+ {}
+};
+
+static struct platform_driver d1_pinctrl_driver = {
+ .probe = d1_pinctrl_probe,
+ .driver = {
+ .name = "sun20i-d1-pinctrl",
+ .of_match_table = d1_pinctrl_match,
+ },
+};
+builtin_platform_driver(d1_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
index 21054fcacd34..afc1f5df7545 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
@@ -82,6 +82,7 @@ static const struct sunxi_pinctrl_desc a100_r_pinctrl_data = {
.npins = ARRAY_SIZE(a100_r_pins),
.pin_base = PL_BASE,
.irq_banks = 1,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int a100_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
index e69f6da40dc0..f682e0e4244d 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
@@ -684,7 +684,7 @@ static const struct sunxi_pinctrl_desc a100_pinctrl_data = {
.npins = ARRAY_SIZE(a100_pins),
.irq_banks = 7,
.irq_bank_map = a100_irq_bank_map,
- .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int a100_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
index e69c8dae121a..ef261eccda56 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
@@ -24,7 +24,6 @@
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
index c7d90c44e87a..3aba0aec3d78 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
@@ -16,7 +16,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -107,6 +106,7 @@ static const struct sunxi_pinctrl_desc sun50i_h6_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun50i_h6_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
};
static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
index 8e4f10ab96ce..c39ea46046c2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
@@ -12,7 +12,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
index 152b71226a80..d6ca720ee8d8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
@@ -525,7 +525,7 @@ static const struct sunxi_pinctrl_desc h616_pinctrl_data = {
.irq_banks = ARRAY_SIZE(h616_irq_bank_map),
.irq_bank_map = h616_irq_bank_map,
.irq_read_needs_mux = true,
- .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int h616_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index a00246d3dd49..2486cdf345e1 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -17,7 +17,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -111,26 +110,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_r_pinctrl_data = {
static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev)
{
- struct reset_control *rstc;
- int ret;
-
- rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- dev_err(&pdev->dev, "Reset controller missing\n");
- return PTR_ERR(rstc);
- }
-
- ret = reset_control_deassert(rstc);
- if (ret)
- return ret;
-
- ret = sunxi_pinctrl_init(pdev,
- &sun6i_a31_r_pinctrl_data);
-
- if (ret)
- reset_control_assert(rstc);
-
- return ret;
+ return sunxi_pinctrl_init(pdev, &sun6i_a31_r_pinctrl_data);
}
static const struct of_device_id sun6i_a31_r_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 9e5b61449999..4fae12c905b7 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -98,29 +97,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_r_pinctrl_data = {
static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
{
- struct reset_control *rstc;
- int ret;
-
- rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- ret = PTR_ERR(rstc);
- if (ret == -EPROBE_DEFER)
- return ret;
- dev_err(&pdev->dev, "Reset controller missing err=%d\n", ret);
- return ret;
- }
-
- ret = reset_control_deassert(rstc);
- if (ret)
- return ret;
-
- ret = sunxi_pinctrl_init(pdev,
- &sun8i_a23_r_pinctrl_data);
-
- if (ret)
- reset_control_assert(rstc);
-
- return ret;
+ return sunxi_pinctrl_init(pdev, &sun8i_a23_r_pinctrl_data);
}
static const struct of_device_id sun8i_a23_r_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
index 6531cf67958e..0cb6c1a970c9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
@@ -27,7 +27,6 @@
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index a191a65217ac..f11cb5bba0f7 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index dd928402af99..6c04027d0dd9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -46,6 +46,67 @@ static struct lock_class_key sunxi_pinctrl_irq_request_class;
static struct irq_chip sunxi_pinctrl_edge_irq_chip;
static struct irq_chip sunxi_pinctrl_level_irq_chip;
+/*
+ * The sunXi PIO registers are organized as a series of banks, with registers
+ * for each bank in the following order:
+ * - Mux config
+ * - Data value
+ * - Drive level
+ * - Pull direction
+ *
+ * Multiple consecutive registers are used for fields wider than one bit.
+ *
+ * The following functions calculate the register and the bit offset to access.
+ * They take a pin number which is relative to the start of the current device.
+ */
+static void sunxi_mux_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * MUX_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + MUX_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(MUX_FIELD_WIDTH) - 1) << *shift;
+}
+
+static void sunxi_data_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * DATA_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + DATA_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(DATA_FIELD_WIDTH) - 1) << *shift;
+}
+
+static void sunxi_dlevel_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * pctl->dlevel_field_width;
+
+ *reg = bank * pctl->bank_mem_size + DLEVEL_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(pctl->dlevel_field_width) - 1) << *shift;
+}
+
+static void sunxi_pull_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * PULL_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + pctl->pull_regs_offset +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(PULL_FIELD_WIDTH) - 1) << *shift;
+}
+
static struct sunxi_pinctrl_group *
sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group)
{
@@ -451,22 +512,19 @@ static const struct pinctrl_ops sunxi_pctrl_ops = {
.get_group_pins = sunxi_pctrl_get_group_pins,
};
-static int sunxi_pconf_reg(unsigned pin, enum pin_config_param param,
- u32 *offset, u32 *shift, u32 *mask)
+static int sunxi_pconf_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, enum pin_config_param param,
+ u32 *reg, u32 *shift, u32 *mask)
{
switch (param) {
case PIN_CONFIG_DRIVE_STRENGTH:
- *offset = sunxi_dlevel_reg(pin);
- *shift = sunxi_dlevel_offset(pin);
- *mask = DLEVEL_PINS_MASK;
+ sunxi_dlevel_reg(pctl, pin, reg, shift, mask);
break;
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_DISABLE:
- *offset = sunxi_pull_reg(pin);
- *shift = sunxi_pull_offset(pin);
- *mask = PULL_PINS_MASK;
+ sunxi_pull_reg(pctl, pin, reg, shift, mask);
break;
default:
@@ -481,17 +539,17 @@ static int sunxi_pconf_get(struct pinctrl_dev *pctldev, unsigned pin,
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- u32 offset, shift, mask, val;
+ u32 reg, shift, mask, val;
u16 arg;
int ret;
pin -= pctl->desc->pin_base;
- ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ ret = sunxi_pconf_reg(pctl, pin, param, &reg, &shift, &mask);
if (ret < 0)
return ret;
- val = (readl(pctl->membase + offset) >> shift) & mask;
+ val = (readl(pctl->membase + reg) & mask) >> shift;
switch (pinconf_to_config_param(*config)) {
case PIN_CONFIG_DRIVE_STRENGTH:
@@ -547,16 +605,15 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
pin -= pctl->desc->pin_base;
for (i = 0; i < num_configs; i++) {
+ u32 arg, reg, shift, mask, val;
enum pin_config_param param;
unsigned long flags;
- u32 offset, shift, mask, reg;
- u32 arg, val;
int ret;
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
- ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ ret = sunxi_pconf_reg(pctl, pin, param, &reg, &shift, &mask);
if (ret < 0)
return ret;
@@ -593,9 +650,8 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
}
raw_spin_lock_irqsave(&pctl->lock, flags);
- reg = readl(pctl->membase + offset);
- reg &= ~(mask << shift);
- writel(reg | val << shift, pctl->membase + offset);
+ writel((readl(pctl->membase + reg) & ~mask) | val << shift,
+ pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
} /* for each config */
@@ -624,7 +680,7 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
unsigned pin,
struct regulator *supply)
{
- unsigned short bank = pin / PINS_PER_BANK;
+ unsigned short bank;
unsigned long flags;
u32 val, reg;
int uV;
@@ -640,6 +696,9 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
if (uV == 0)
return 0;
+ pin -= pctl->desc->pin_base;
+ bank = pin / PINS_PER_BANK;
+
switch (pctl->desc->io_bias_cfg_variant) {
case BIAS_VOLTAGE_GRP_CONFIG:
/*
@@ -657,12 +716,20 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
else
val = 0xD; /* 3.3V */
- pin -= pctl->desc->pin_base;
-
reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
reg &= ~IO_BIAS_MASK;
writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
return 0;
+ case BIAS_VOLTAGE_PIO_POW_MODE_CTL:
+ val = uV > 1800000 && uV <= 2500000 ? BIT(bank) : 0;
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ reg = readl(pctl->membase + PIO_POW_MOD_CTL_REG);
+ reg &= ~BIT(bank);
+ writel(reg | val, pctl->membase + PIO_POW_MOD_CTL_REG);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+
+ fallthrough;
case BIAS_VOLTAGE_PIO_POW_MODE_SEL:
val = uV <= 1800000 ? 1 : 0;
@@ -710,16 +777,16 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
u8 config)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ u32 reg, shift, mask;
unsigned long flags;
- u32 val, mask;
+
+ pin -= pctl->desc->pin_base;
+ sunxi_mux_reg(pctl, pin, &reg, &shift, &mask);
raw_spin_lock_irqsave(&pctl->lock, flags);
- pin -= pctl->desc->pin_base;
- val = readl(pctl->membase + sunxi_mux_reg(pin));
- mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
- writel((val & ~mask) | config << sunxi_mux_offset(pin),
- pctl->membase + sunxi_mux_reg(pin));
+ writel((readl(pctl->membase + reg) & ~mask) | config << shift,
+ pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
}
@@ -852,43 +919,43 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- u32 reg = sunxi_data_reg(offset);
- u8 index = sunxi_data_offset(offset);
bool set_mux = pctl->desc->irq_read_needs_mux &&
gpiochip_line_is_irq(chip, offset);
u32 pin = offset + chip->base;
- u32 val;
+ u32 reg, shift, mask, val;
+
+ sunxi_data_reg(pctl, offset, &reg, &shift, &mask);
if (set_mux)
sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_INPUT);
- val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+ val = (readl(pctl->membase + reg) & mask) >> shift;
if (set_mux)
sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_IRQ);
- return !!val;
+ return val;
}
static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- u32 reg = sunxi_data_reg(offset);
- u8 index = sunxi_data_offset(offset);
+ u32 reg, shift, mask, val;
unsigned long flags;
- u32 regval;
+
+ sunxi_data_reg(pctl, offset, &reg, &shift, &mask);
raw_spin_lock_irqsave(&pctl->lock, flags);
- regval = readl(pctl->membase + reg);
+ val = readl(pctl->membase + reg);
if (value)
- regval |= BIT(index);
+ val |= mask;
else
- regval &= ~(BIT(index));
+ val &= ~mask;
- writel(regval, pctl->membase + reg);
+ writel(val, pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
}
@@ -1232,11 +1299,11 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
/*
* Find an upper bound for the maximum number of functions: in
- * the worst case we have gpio_in, gpio_out, irq and up to four
+ * the worst case we have gpio_in, gpio_out, irq and up to seven
* special functions per pin, plus one entry for the sentinel.
* We'll reallocate that later anyway.
*/
- pctl->functions = kcalloc(4 * pctl->ngroups + 4,
+ pctl->functions = kcalloc(7 * pctl->ngroups + 4,
sizeof(*pctl->functions),
GFP_KERNEL);
if (!pctl->functions)
@@ -1429,6 +1496,15 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
pctl->dev = &pdev->dev;
pctl->desc = desc;
pctl->variant = variant;
+ if (pctl->variant >= PINCTRL_SUN20I_D1) {
+ pctl->bank_mem_size = D1_BANK_MEM_SIZE;
+ pctl->pull_regs_offset = D1_PULL_REGS_OFFSET;
+ pctl->dlevel_field_width = D1_DLEVEL_FIELD_WIDTH;
+ } else {
+ pctl->bank_mem_size = BANK_MEM_SIZE;
+ pctl->pull_regs_offset = PULL_REGS_OFFSET;
+ pctl->dlevel_field_width = DLEVEL_FIELD_WIDTH;
+ }
pctl->irq_array = devm_kcalloc(&pdev->dev,
IRQ_PER_BANK * pctl->desc->irq_banks,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index a32bb5bcb754..a87a2f944d60 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -36,23 +36,19 @@
#define BANK_MEM_SIZE 0x24
#define MUX_REGS_OFFSET 0x0
+#define MUX_FIELD_WIDTH 4
#define DATA_REGS_OFFSET 0x10
+#define DATA_FIELD_WIDTH 1
#define DLEVEL_REGS_OFFSET 0x14
+#define DLEVEL_FIELD_WIDTH 2
#define PULL_REGS_OFFSET 0x1c
+#define PULL_FIELD_WIDTH 2
+
+#define D1_BANK_MEM_SIZE 0x30
+#define D1_DLEVEL_FIELD_WIDTH 4
+#define D1_PULL_REGS_OFFSET 0x24
#define PINS_PER_BANK 32
-#define MUX_PINS_PER_REG 8
-#define MUX_PINS_BITS 4
-#define MUX_PINS_MASK 0x0f
-#define DATA_PINS_PER_REG 32
-#define DATA_PINS_BITS 1
-#define DATA_PINS_MASK 0x01
-#define DLEVEL_PINS_PER_REG 16
-#define DLEVEL_PINS_BITS 2
-#define DLEVEL_PINS_MASK 0x03
-#define PULL_PINS_PER_REG 16
-#define PULL_PINS_BITS 2
-#define PULL_PINS_MASK 0x03
#define IRQ_PER_BANK 32
@@ -96,8 +92,11 @@
#define PINCTRL_SUN8I_R40 BIT(8)
#define PINCTRL_SUN8I_V3 BIT(9)
#define PINCTRL_SUN8I_V3S BIT(10)
+/* Variants below here have an updated register layout. */
+#define PINCTRL_SUN20I_D1 BIT(11)
#define PIO_POW_MOD_SEL_REG 0x340
+#define PIO_POW_MOD_CTL_REG 0x344
enum sunxi_desc_bias_voltage {
BIAS_VOLTAGE_NONE,
@@ -111,6 +110,12 @@ enum sunxi_desc_bias_voltage {
* register, as seen on H6 SoC, for example.
*/
BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ /*
+ * Bias voltage is set through PIO_POW_MOD_SEL_REG
+ * and PIO_POW_MOD_CTL_REG register, as seen on
+ * A100 and D1 SoC, for example.
+ */
+ BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
struct sunxi_desc_function {
@@ -170,6 +175,9 @@ struct sunxi_pinctrl {
raw_spinlock_t lock;
struct pinctrl_dev *pctl_dev;
unsigned long variant;
+ u32 bank_mem_size;
+ u32 pull_regs_offset;
+ u32 dlevel_field_width;
};
#define SUNXI_PIN(_pin, ...) \
@@ -215,83 +223,6 @@ struct sunxi_pinctrl {
.irqnum = _irq, \
}
-/*
- * The sunXi PIO registers are organized as is:
- * 0x00 - 0x0c Muxing values.
- * 8 pins per register, each pin having a 4bits value
- * 0x10 Pin values
- * 32 bits per register, each pin corresponding to one bit
- * 0x14 - 0x18 Drive level
- * 16 pins per register, each pin having a 2bits value
- * 0x1c - 0x20 Pull-Up values
- * 16 pins per register, each pin having a 2bits value
- *
- * This is for the first bank. Each bank will have the same layout,
- * with an offset being a multiple of 0x24.
- *
- * The following functions calculate from the pin number the register
- * and the bit offset that we should access.
- */
-static inline u32 sunxi_mux_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += MUX_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / MUX_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_mux_offset(u16 pin)
-{
- u32 pin_num = pin % MUX_PINS_PER_REG;
- return pin_num * MUX_PINS_BITS;
-}
-
-static inline u32 sunxi_data_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += DATA_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / DATA_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_data_offset(u16 pin)
-{
- u32 pin_num = pin % DATA_PINS_PER_REG;
- return pin_num * DATA_PINS_BITS;
-}
-
-static inline u32 sunxi_dlevel_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += DLEVEL_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / DLEVEL_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_dlevel_offset(u16 pin)
-{
- u32 pin_num = pin % DLEVEL_PINS_PER_REG;
- return pin_num * DLEVEL_PINS_BITS;
-}
-
-static inline u32 sunxi_pull_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += PULL_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / PULL_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_pull_offset(u16 pin)
-{
- u32 pin_num = pin % PULL_PINS_PER_REG;
- return pin_num * PULL_PINS_BITS;
-}
-
static inline u32 sunxi_irq_hw_bank_num(const struct sunxi_pinctrl_desc *desc, u8 bank)
{
if (!desc->irq_bank_map)
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 18fc6a08569e..b437847b6237 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-if X86
-source "drivers/platform/x86/Kconfig"
-endif
if MIPS
source "drivers/platform/mips/Kconfig"
endif
@@ -15,3 +12,5 @@ source "drivers/platform/mellanox/Kconfig"
source "drivers/platform/olpc/Kconfig"
source "drivers/platform/surface/Kconfig"
+
+source "drivers/platform/x86/Kconfig"
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 717299cbccac..c45fb376d653 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -139,7 +139,7 @@ config CROS_EC_PROTO
config CROS_KBD_LED_BACKLIGHT
tristate "Backlight LED support for Chrome OS keyboards"
- depends on LEDS_CLASS && ACPI
+ depends on LEDS_CLASS && (ACPI || CROS_EC)
help
This option enables support for the keyboard backlight LEDs on
select Chrome OS systems.
@@ -267,4 +267,13 @@ config CHROMEOS_PRIVACY_SCREEN
source "drivers/platform/chrome/wilco_ec/Kconfig"
+# Kunit test cases
+config CROS_KUNIT
+ tristate "Kunit tests for ChromeOS" if !KUNIT_ALL_TESTS
+ depends on KUNIT && CROS_EC
+ default KUNIT_ALL_TESTS
+ select CROS_EC_PROTO
+ help
+ ChromeOS Kunit tests.
+
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 52f5a2dde8b8..f7e74a845afc 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -30,3 +30,8 @@ obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
obj-$(CONFIG_CROS_USBPD_NOTIFY) += cros_usbpd_notify.o
obj-$(CONFIG_WILCO_EC) += wilco_ec/
+
+# Kunit test cases
+obj-$(CONFIG_CROS_KUNIT) += cros_kunit.o
+cros_kunit-objs := cros_kunit_util.o
+cros_kunit-objs += cros_ec_proto_test.o
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index b3e94cdf7d1a..8aace50d446d 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -19,9 +19,6 @@
#include "cros_ec.h"
-#define CROS_EC_DEV_EC_INDEX 0
-#define CROS_EC_DEV_PD_INDEX 1
-
static struct cros_ec_platform ec_p = {
.ec_name = CROS_EC_DEV_NAME,
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_EC_INDEX),
@@ -135,16 +132,16 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
-
- /* For now, report failure to transition to S0ix with a warning. */
+ /* Report failure to transition to system wide suspend with a warning. */
if (ret >= 0 && ec_dev->host_sleep_v1 &&
- (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME)) {
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME ||
+ sleep_event == HOST_SLEEP_EVENT_S3_RESUME)) {
ec_dev->last_resume_result =
buf.u.resp1.resume_response.sleep_transitions;
WARN_ONCE(buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TIMEOUT,
- "EC detected sleep transition timeout. Total slp_s0 transitions: %d",
+ "EC detected sleep transition timeout. Total sleep transitions: %d",
buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK);
}
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index ff767dccdf0f..05d2e8765a66 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -52,8 +52,8 @@ static int cros_ec_map_error(uint32_t result)
return ret;
}
-static int prepare_packet(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+static int prepare_tx(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
{
struct ec_host_request *request;
u8 *out;
@@ -85,8 +85,29 @@ static int prepare_packet(struct cros_ec_device *ec_dev,
return sizeof(*request) + msg->outsize;
}
-static int send_command(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+static int prepare_tx_legacy(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ u8 *out;
+ u8 csum;
+ int i;
+
+ if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE)
+ return -EINVAL;
+
+ out = ec_dev->dout;
+ out[0] = EC_CMD_VERSION0 + msg->version;
+ out[1] = msg->command;
+ out[2] = msg->outsize;
+ csum = out[0] + out[1] + out[2];
+ for (i = 0; i < msg->outsize; i++)
+ csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->data[i];
+ out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = csum;
+
+ return EC_MSG_TX_PROTO_BYTES + msg->outsize;
+}
+
+static int cros_ec_xfer_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
int ret;
int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
@@ -102,57 +123,68 @@ static int send_command(struct cros_ec_device *ec_dev,
* the EC is trying to use protocol v2, on an underlying
* communication mechanism that does not support v2.
*/
- dev_err_once(ec_dev->dev,
- "missing EC transfer API, cannot send command\n");
+ dev_err_once(ec_dev->dev, "missing EC transfer API, cannot send command\n");
return -EIO;
}
trace_cros_ec_request_start(msg);
ret = (*xfer_fxn)(ec_dev, msg);
trace_cros_ec_request_done(msg, ret);
- if (msg->result == EC_RES_IN_PROGRESS) {
- int i;
- struct cros_ec_command *status_msg;
- struct ec_response_get_comms_status *status;
- status_msg = kmalloc(sizeof(*status_msg) + sizeof(*status),
- GFP_KERNEL);
- if (!status_msg)
- return -ENOMEM;
+ return ret;
+}
- status_msg->version = 0;
- status_msg->command = EC_CMD_GET_COMMS_STATUS;
- status_msg->insize = sizeof(*status);
- status_msg->outsize = 0;
+static int cros_ec_wait_until_complete(struct cros_ec_device *ec_dev, uint32_t *result)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_get_comms_status status;
+ } __packed buf;
+ struct cros_ec_command *msg = &buf.msg;
+ struct ec_response_get_comms_status *status = &buf.status;
+ int ret = 0, i;
- /*
- * Query the EC's status until it's no longer busy or
- * we encounter an error.
- */
- for (i = 0; i < EC_COMMAND_RETRIES; i++) {
- usleep_range(10000, 11000);
-
- trace_cros_ec_request_start(status_msg);
- ret = (*xfer_fxn)(ec_dev, status_msg);
- trace_cros_ec_request_done(status_msg, ret);
- if (ret == -EAGAIN)
- continue;
- if (ret < 0)
- break;
-
- msg->result = status_msg->result;
- if (status_msg->result != EC_RES_SUCCESS)
- break;
-
- status = (struct ec_response_get_comms_status *)
- status_msg->data;
- if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
- break;
+ msg->version = 0;
+ msg->command = EC_CMD_GET_COMMS_STATUS;
+ msg->insize = sizeof(*status);
+ msg->outsize = 0;
+
+ /* Query the EC's status until it's no longer busy or we encounter an error. */
+ for (i = 0; i < EC_COMMAND_RETRIES; ++i) {
+ usleep_range(10000, 11000);
+
+ ret = cros_ec_xfer_command(ec_dev, msg);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+
+ *result = msg->result;
+ if (msg->result != EC_RES_SUCCESS)
+ return ret;
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ break;
}
- kfree(status_msg);
+ if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
+ return ret;
}
+ if (i >= EC_COMMAND_RETRIES)
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int cros_ec_send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ int ret = cros_ec_xfer_command(ec_dev, msg);
+
+ if (msg->result == EC_RES_IN_PROGRESS)
+ ret = cros_ec_wait_until_complete(ec_dev, &msg->result);
+
return ret;
}
@@ -161,35 +193,18 @@ static int send_command(struct cros_ec_device *ec_dev,
* @ec_dev: Device to register.
* @msg: Message to write.
*
- * This is intended to be used by all ChromeOS EC drivers, but at present
- * only SPI uses it. Once LPC uses the same protocol it can start using it.
- * I2C could use it now, with a refactor of the existing code.
+ * This is used by all ChromeOS EC drivers to prepare the outgoing message
+ * according to different protocol versions.
*
* Return: number of prepared bytes on success or negative error code.
*/
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
- u8 *out;
- u8 csum;
- int i;
-
if (ec_dev->proto_version > 2)
- return prepare_packet(ec_dev, msg);
-
- if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE)
- return -EINVAL;
+ return prepare_tx(ec_dev, msg);
- out = ec_dev->dout;
- out[0] = EC_CMD_VERSION0 + msg->version;
- out[1] = msg->command;
- out[2] = msg->outsize;
- csum = out[0] + out[1] + out[2];
- for (i = 0; i < msg->outsize; i++)
- csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->data[i];
- out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = csum;
-
- return EC_MSG_TX_PROTO_BYTES + msg->outsize;
+ return prepare_tx_legacy(ec_dev, msg);
}
EXPORT_SYMBOL(cros_ec_prepare_tx);
@@ -199,9 +214,12 @@ EXPORT_SYMBOL(cros_ec_prepare_tx);
* @msg: Message to check.
*
* This is used by ChromeOS EC drivers to check the ec_msg->result for
- * errors and to warn about them.
+ * EC_RES_IN_PROGRESS and to warn about them.
*
- * Return: 0 on success or negative error code.
+ * The function should not check for furthermore error codes. Otherwise,
+ * it would break the ABI.
+ *
+ * Return: -EAGAIN if ec_msg->result == EC_RES_IN_PROGRESS. Otherwise, 0.
*/
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
@@ -228,59 +246,66 @@ EXPORT_SYMBOL(cros_ec_check_result);
*
* @ec_dev: EC device to call
* @msg: message structure to use
- * @mask: result when function returns >=0.
+ * @mask: result when function returns 0.
*
* LOCKING:
* the caller has ec_dev->lock mutex, or the caller knows there is
* no other command in progress.
*/
-static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg,
- uint32_t *mask)
+static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev, uint32_t *mask)
{
+ struct cros_ec_command *msg;
struct ec_response_host_event_mask *r;
- int ret;
+ int ret, mapped;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*r), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
msg->command = EC_CMD_HOST_EVENT_GET_WAKE_MASK;
- msg->version = 0;
- msg->outsize = 0;
msg->insize = sizeof(*r);
- ret = send_command(ec_dev, msg);
- if (ret >= 0) {
- if (msg->result == EC_RES_INVALID_COMMAND)
- return -EOPNOTSUPP;
- if (msg->result != EC_RES_SUCCESS)
- return -EPROTO;
+ ret = cros_ec_send_command(ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
}
- if (ret > 0) {
- r = (struct ec_response_host_event_mask *)msg->data;
- *mask = r->mask;
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
}
+ r = (struct ec_response_host_event_mask *)msg->data;
+ *mask = r->mask;
+ ret = 0;
+exit:
+ kfree(msg);
return ret;
}
-static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
- int devidx,
- struct cros_ec_command *msg)
+static int cros_ec_get_proto_info(struct cros_ec_device *ec_dev, int devidx)
{
- /*
- * Try using v3+ to query for supported protocols. If this
- * command fails, fall back to v2. Returns the highest protocol
- * supported by the EC.
- * Also sets the max request/response/passthru size.
- */
- int ret;
+ struct cros_ec_command *msg;
+ struct ec_response_get_protocol_info *info;
+ int ret, mapped;
- if (!ec_dev->pkt_xfer)
- return -EPROTONOSUPPORT;
+ ec_dev->proto_version = 3;
+ if (devidx > 0)
+ ec_dev->max_passthru = 0;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*info), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
- memset(msg, 0, sizeof(*msg));
msg->command = EC_CMD_PASSTHRU_OFFSET(devidx) | EC_CMD_GET_PROTOCOL_INFO;
- msg->insize = sizeof(struct ec_response_get_protocol_info);
+ msg->insize = sizeof(*info);
- ret = send_command(ec_dev, msg);
+ ret = cros_ec_send_command(ec_dev, msg);
/*
* Send command once again when timeout occurred.
* Fingerprint MCU (FPMCU) is restarted during system boot which
@@ -289,68 +314,115 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
* attempt because we waited at least EC_MSG_DEADLINE_MS.
*/
if (ret == -ETIMEDOUT)
- ret = send_command(ec_dev, msg);
+ ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev,
"failed to check for EC[%d] protocol version: %d\n",
devidx, ret);
- return ret;
+ goto exit;
}
- if (devidx > 0 && msg->result == EC_RES_INVALID_COMMAND)
- return -ENODEV;
- else if (msg->result != EC_RES_SUCCESS)
- return msg->result;
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
+ }
- return 0;
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ info = (struct ec_response_get_protocol_info *)msg->data;
+
+ switch (devidx) {
+ case CROS_EC_DEV_EC_INDEX:
+ ec_dev->max_request = info->max_request_packet_size -
+ sizeof(struct ec_host_request);
+ ec_dev->max_response = info->max_response_packet_size -
+ sizeof(struct ec_host_response);
+ ec_dev->proto_version = min(EC_HOST_REQUEST_VERSION,
+ fls(info->protocol_versions) - 1);
+ ec_dev->din_size = info->max_response_packet_size + EC_MAX_RESPONSE_OVERHEAD;
+ ec_dev->dout_size = info->max_request_packet_size + EC_MAX_REQUEST_OVERHEAD;
+
+ dev_dbg(ec_dev->dev, "using proto v%u\n", ec_dev->proto_version);
+ break;
+ case CROS_EC_DEV_PD_INDEX:
+ ec_dev->max_passthru = info->max_request_packet_size -
+ sizeof(struct ec_host_request);
+
+ dev_dbg(ec_dev->dev, "found PD chip\n");
+ break;
+ default:
+ dev_dbg(ec_dev->dev, "unknown passthru index: %d\n", devidx);
+ break;
+ }
+
+ ret = 0;
+exit:
+ kfree(msg);
+ return ret;
}
-static int cros_ec_host_command_proto_query_v2(struct cros_ec_device *ec_dev)
+static int cros_ec_get_proto_info_legacy(struct cros_ec_device *ec_dev)
{
struct cros_ec_command *msg;
- struct ec_params_hello *hello_params;
- struct ec_response_hello *hello_response;
- int ret;
- int len = max(sizeof(*hello_params), sizeof(*hello_response));
+ struct ec_params_hello *params;
+ struct ec_response_hello *response;
+ int ret, mapped;
+
+ ec_dev->proto_version = 2;
- msg = kmalloc(sizeof(*msg) + len, GFP_KERNEL);
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*response)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
- msg->version = 0;
msg->command = EC_CMD_HELLO;
- hello_params = (struct ec_params_hello *)msg->data;
- msg->outsize = sizeof(*hello_params);
- hello_response = (struct ec_response_hello *)msg->data;
- msg->insize = sizeof(*hello_response);
-
- hello_params->in_data = 0xa0b0c0d0;
+ msg->insize = sizeof(*response);
+ msg->outsize = sizeof(*params);
- ret = send_command(ec_dev, msg);
+ params = (struct ec_params_hello *)msg->data;
+ params->in_data = 0xa0b0c0d0;
+ ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
- dev_dbg(ec_dev->dev,
- "EC failed to respond to v2 hello: %d\n",
- ret);
+ dev_dbg(ec_dev->dev, "EC failed to respond to v2 hello: %d\n", ret);
goto exit;
- } else if (msg->result != EC_RES_SUCCESS) {
- dev_err(ec_dev->dev,
- "EC responded to v2 hello with error: %d\n",
- msg->result);
- ret = msg->result;
+ }
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ dev_err(ec_dev->dev, "EC responded to v2 hello with error: %d\n", msg->result);
+ goto exit;
+ }
+
+ if (ret == 0) {
+ ret = -EPROTO;
goto exit;
- } else if (hello_response->out_data != 0xa1b2c3d4) {
+ }
+
+ response = (struct ec_response_hello *)msg->data;
+ if (response->out_data != 0xa1b2c3d4) {
dev_err(ec_dev->dev,
"EC responded to v2 hello with bad result: %u\n",
- hello_response->out_data);
+ response->out_data);
ret = -EBADMSG;
goto exit;
}
- ret = 0;
+ ec_dev->max_request = EC_PROTO2_MAX_PARAM_SIZE;
+ ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
+ ec_dev->max_passthru = 0;
+ ec_dev->pkt_xfer = NULL;
+ ec_dev->din_size = EC_PROTO2_MSG_BYTES;
+ ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
- exit:
+ dev_dbg(ec_dev->dev, "falling back to proto v2\n");
+ ret = 0;
+exit:
kfree(msg);
return ret;
}
@@ -371,13 +443,12 @@ static int cros_ec_host_command_proto_query_v2(struct cros_ec_device *ec_dev)
* the caller has ec_dev->lock mutex or the caller knows there is
* no other command in progress.
*/
-static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
- u16 cmd, u32 *mask)
+static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev, u16 cmd, u32 *mask)
{
struct ec_params_get_cmd_versions *pver;
struct ec_response_get_cmd_versions *rver;
struct cros_ec_command *msg;
- int ret;
+ int ret, mapped;
msg = kmalloc(sizeof(*msg) + max(sizeof(*rver), sizeof(*pver)),
GFP_KERNEL);
@@ -392,14 +463,26 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
pver = (struct ec_params_get_cmd_versions *)msg->data;
pver->cmd = cmd;
- ret = send_command(ec_dev, msg);
- if (ret > 0) {
- rver = (struct ec_response_get_cmd_versions *)msg->data;
- *mask = rver->version_mask;
+ ret = cros_ec_send_command(ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
}
- kfree(msg);
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+ rver = (struct ec_response_get_cmd_versions *)msg->data;
+ *mask = rver->version_mask;
+ ret = 0;
+exit:
+ kfree(msg);
return ret;
}
@@ -413,71 +496,17 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
int cros_ec_query_all(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
- struct cros_ec_command *proto_msg;
- struct ec_response_get_protocol_info *proto_info;
- u32 ver_mask = 0;
+ u32 ver_mask;
int ret;
- proto_msg = kzalloc(sizeof(*proto_msg) + sizeof(*proto_info),
- GFP_KERNEL);
- if (!proto_msg)
- return -ENOMEM;
-
/* First try sending with proto v3. */
- ec_dev->proto_version = 3;
- ret = cros_ec_host_command_proto_query(ec_dev, 0, proto_msg);
-
- if (ret == 0) {
- proto_info = (struct ec_response_get_protocol_info *)
- proto_msg->data;
- ec_dev->max_request = proto_info->max_request_packet_size -
- sizeof(struct ec_host_request);
- ec_dev->max_response = proto_info->max_response_packet_size -
- sizeof(struct ec_host_response);
- ec_dev->proto_version =
- min(EC_HOST_REQUEST_VERSION,
- fls(proto_info->protocol_versions) - 1);
- dev_dbg(ec_dev->dev,
- "using proto v%u\n",
- ec_dev->proto_version);
-
- ec_dev->din_size = ec_dev->max_response +
- sizeof(struct ec_host_response) +
- EC_MAX_RESPONSE_OVERHEAD;
- ec_dev->dout_size = ec_dev->max_request +
- sizeof(struct ec_host_request) +
- EC_MAX_REQUEST_OVERHEAD;
-
- /*
- * Check for PD
- */
- ret = cros_ec_host_command_proto_query(ec_dev, 1, proto_msg);
-
- if (ret) {
- dev_dbg(ec_dev->dev, "no PD chip found: %d\n", ret);
- ec_dev->max_passthru = 0;
- } else {
- dev_dbg(ec_dev->dev, "found PD chip\n");
- ec_dev->max_passthru =
- proto_info->max_request_packet_size -
- sizeof(struct ec_host_request);
- }
+ if (!cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_EC_INDEX)) {
+ /* Check for PD. */
+ cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_PD_INDEX);
} else {
/* Try querying with a v2 hello message. */
- ec_dev->proto_version = 2;
- ret = cros_ec_host_command_proto_query_v2(ec_dev);
-
- if (ret == 0) {
- /* V2 hello succeeded. */
- dev_dbg(ec_dev->dev, "falling back to proto v2\n");
-
- ec_dev->max_request = EC_PROTO2_MAX_PARAM_SIZE;
- ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
- ec_dev->max_passthru = 0;
- ec_dev->pkt_xfer = NULL;
- ec_dev->din_size = EC_PROTO2_MSG_BYTES;
- ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
- } else {
+ ret = cros_ec_get_proto_info_legacy(ec_dev);
+ if (ret) {
/*
* It's possible for a test to occur too early when
* the EC isn't listening. If this happens, we'll
@@ -485,7 +514,7 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
*/
ec_dev->proto_version = EC_PROTO_VERSION_UNKNOWN;
dev_dbg(ec_dev->dev, "EC query failed: %d\n", ret);
- goto exit;
+ return ret;
}
}
@@ -506,26 +535,21 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
}
/* Probe if MKBP event is supported */
- ret = cros_ec_get_host_command_version_mask(ec_dev,
- EC_CMD_GET_NEXT_EVENT,
- &ver_mask);
- if (ret < 0 || ver_mask == 0)
+ ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_GET_NEXT_EVENT, &ver_mask);
+ if (ret < 0 || ver_mask == 0) {
ec_dev->mkbp_event_supported = 0;
- else
+ } else {
ec_dev->mkbp_event_supported = fls(ver_mask);
- dev_dbg(ec_dev->dev, "MKBP support version %u\n",
- ec_dev->mkbp_event_supported - 1);
+ dev_dbg(ec_dev->dev, "MKBP support version %u\n", ec_dev->mkbp_event_supported - 1);
+ }
/* Probe if host sleep v1 is supported for S0ix failure detection. */
- ret = cros_ec_get_host_command_version_mask(ec_dev,
- EC_CMD_HOST_SLEEP_EVENT,
- &ver_mask);
- ec_dev->host_sleep_v1 = (ret >= 0 && (ver_mask & EC_VER_MASK(1)));
+ ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_HOST_SLEEP_EVENT, &ver_mask);
+ ec_dev->host_sleep_v1 = (ret == 0 && (ver_mask & EC_VER_MASK(1)));
/* Get host event wake mask. */
- ret = cros_ec_get_host_event_wake_mask(ec_dev, proto_msg,
- &ec_dev->host_event_wake_mask);
+ ret = cros_ec_get_host_event_wake_mask(ec_dev, &ec_dev->host_event_wake_mask);
if (ret < 0) {
/*
* If the EC doesn't support EC_CMD_HOST_EVENT_GET_WAKE_MASK,
@@ -556,7 +580,6 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
ret = 0;
exit:
- kfree(proto_msg);
return ret;
}
EXPORT_SYMBOL(cros_ec_query_all);
@@ -601,7 +624,7 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
msg->insize = ec_dev->max_response;
}
- if (msg->command < EC_CMD_PASSTHRU_OFFSET(1)) {
+ if (msg->command < EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX)) {
if (msg->outsize > ec_dev->max_request) {
dev_err(ec_dev->dev,
"request of size %u is too big (max: %u)\n",
@@ -621,7 +644,7 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
}
}
- ret = send_command(ec_dev, msg);
+ ret = cros_ec_send_command(ec_dev, msg);
mutex_unlock(&ec_dev->lock);
return ret;
@@ -852,8 +875,8 @@ bool cros_ec_check_features(struct cros_ec_dev *ec, int feature)
if (features->flags[0] == -1U && features->flags[1] == -1U) {
/* features bitmap not read yet */
- ret = cros_ec_command(ec->ec_dev, 0, EC_CMD_GET_FEATURES + ec->cmd_offset,
- NULL, 0, features, sizeof(*features));
+ ret = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_GET_FEATURES + ec->cmd_offset,
+ NULL, 0, features, sizeof(*features));
if (ret < 0) {
dev_warn(ec->dev, "cannot get EC features: %d\n", ret);
memset(features, 0, sizeof(*features));
@@ -934,7 +957,7 @@ int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
/**
- * cros_ec_command - Send a command to the EC.
+ * cros_ec_cmd - Send a command to the EC.
*
* @ec_dev: EC device
* @version: EC command version
@@ -946,13 +969,13 @@ EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
*
* Return: >= 0 on success, negative error number on failure.
*/
-int cros_ec_command(struct cros_ec_device *ec_dev,
- unsigned int version,
- int command,
- void *outdata,
- int outsize,
- void *indata,
- int insize)
+int cros_ec_cmd(struct cros_ec_device *ec_dev,
+ unsigned int version,
+ int command,
+ void *outdata,
+ size_t outsize,
+ void *indata,
+ size_t insize)
{
struct cros_ec_command *msg;
int ret;
@@ -979,4 +1002,4 @@ error:
kfree(msg);
return ret;
}
-EXPORT_SYMBOL_GPL(cros_ec_command);
+EXPORT_SYMBOL_GPL(cros_ec_cmd);
diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
new file mode 100644
index 000000000000..c6a83df91ae1
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_proto_test.c
@@ -0,0 +1,2753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit tests for ChromeOS Embedded Controller protocol.
+ */
+
+#include <kunit/test.h>
+
+#include <asm-generic/unaligned.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include "cros_ec.h"
+#include "cros_kunit_util.h"
+
+#define BUFSIZE 512
+
+struct cros_ec_proto_test_priv {
+ struct cros_ec_device ec_dev;
+ u8 dout[BUFSIZE];
+ u8 din[BUFSIZE];
+ struct cros_ec_command *msg;
+ u8 _msg[BUFSIZE];
+};
+
+static void cros_ec_proto_test_prepare_tx_legacy_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret, i;
+ u8 csum;
+
+ ec_dev->proto_version = 2;
+
+ msg->command = EC_CMD_HELLO;
+ msg->outsize = EC_PROTO2_MAX_PARAM_SIZE;
+ msg->data[0] = 0xde;
+ msg->data[1] = 0xad;
+ msg->data[2] = 0xbe;
+ msg->data[3] = 0xef;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+
+ KUNIT_EXPECT_EQ(test, ret, EC_MSG_TX_PROTO_BYTES + EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[0], EC_CMD_VERSION0);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[1], EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[2], EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, EC_MSG_TX_HEADER_BYTES, 3);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 0], 0xde);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 1], 0xad);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 2], 0xbe);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 3], 0xef);
+ for (i = 4; i < EC_PROTO2_MAX_PARAM_SIZE; ++i)
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + i], 0);
+
+ csum = EC_CMD_VERSION0;
+ csum += EC_CMD_HELLO;
+ csum += EC_PROTO2_MAX_PARAM_SIZE;
+ csum += 0xde;
+ csum += 0xad;
+ csum += 0xbe;
+ csum += 0xef;
+ KUNIT_EXPECT_EQ(test,
+ ec_dev->dout[EC_MSG_TX_HEADER_BYTES + EC_PROTO2_MAX_PARAM_SIZE],
+ csum);
+}
+
+static void cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret;
+
+ ec_dev->proto_version = 2;
+
+ msg->outsize = EC_PROTO2_MAX_PARAM_SIZE + 1;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+static void cros_ec_proto_test_prepare_tx_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ struct ec_host_request *request = (struct ec_host_request *)ec_dev->dout;
+ int ret, i;
+ u8 csum;
+
+ msg->command = EC_CMD_HELLO;
+ msg->outsize = 0x88;
+ msg->data[0] = 0xde;
+ msg->data[1] = 0xad;
+ msg->data[2] = 0xbe;
+ msg->data[3] = 0xef;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+
+ KUNIT_EXPECT_EQ(test, ret, sizeof(*request) + 0x88);
+
+ KUNIT_EXPECT_EQ(test, request->struct_version, EC_HOST_REQUEST_VERSION);
+ KUNIT_EXPECT_EQ(test, request->command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, request->command_version, 0);
+ KUNIT_EXPECT_EQ(test, request->data_len, 0x88);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 0], 0xde);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 1], 0xad);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 2], 0xbe);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 3], 0xef);
+ for (i = 4; i < 0x88; ++i)
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + i], 0);
+
+ csum = EC_HOST_REQUEST_VERSION;
+ csum += EC_CMD_HELLO;
+ csum += 0x88;
+ csum += 0xde;
+ csum += 0xad;
+ csum += 0xbe;
+ csum += 0xef;
+ KUNIT_EXPECT_EQ(test, request->checksum, (u8)-csum);
+}
+
+static void cros_ec_proto_test_prepare_tx_bad_msg_outsize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret;
+
+ msg->outsize = ec_dev->dout_size - sizeof(struct ec_host_request) + 1;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+static void cros_ec_proto_test_check_result(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret, i;
+ static enum ec_status status[] = {
+ EC_RES_SUCCESS,
+ EC_RES_INVALID_COMMAND,
+ EC_RES_ERROR,
+ EC_RES_INVALID_PARAM,
+ EC_RES_ACCESS_DENIED,
+ EC_RES_INVALID_RESPONSE,
+ EC_RES_INVALID_VERSION,
+ EC_RES_INVALID_CHECKSUM,
+ EC_RES_UNAVAILABLE,
+ EC_RES_TIMEOUT,
+ EC_RES_OVERFLOW,
+ EC_RES_INVALID_HEADER,
+ EC_RES_REQUEST_TRUNCATED,
+ EC_RES_RESPONSE_TOO_BIG,
+ EC_RES_BUS_ERROR,
+ EC_RES_BUSY,
+ EC_RES_INVALID_HEADER_VERSION,
+ EC_RES_INVALID_HEADER_CRC,
+ EC_RES_INVALID_DATA_CRC,
+ EC_RES_DUP_UNAVAILABLE,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(status); ++i) {
+ msg->result = status[i];
+ ret = cros_ec_check_result(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ }
+
+ msg->result = EC_RES_IN_PROGRESS;
+ ret = cros_ec_check_result(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+}
+
+static void cros_ec_proto_test_query_all_pretest(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+
+ /*
+ * cros_ec_query_all() will free din and dout and allocate them again to fit the usage by
+ * calling devm_kfree() and devm_kzalloc(). Set them to NULL as they aren't managed by
+ * ec_dev->dev but allocated statically in struct cros_ec_proto_test_priv
+ * (see cros_ec_proto_test_init()).
+ */
+ ec_dev->din = NULL;
+ ec_dev->dout = NULL;
+}
+
+static void cros_ec_proto_test_query_all_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->protocol_versions = BIT(3) | BIT(2);
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbf;
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = BIT(6) | BIT(5);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = BIT(1);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ struct ec_response_host_event_mask *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_host_event_mask *)mock->o_data;
+ data->mask = 0xbeef;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, 0xbe - sizeof(struct ec_host_request));
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, 0xef - sizeof(struct ec_host_response));
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 3);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, 0xef + EC_MAX_RESPONSE_OVERHEAD);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, 0xbe + EC_MAX_REQUEST_OVERHEAD);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0xbf - sizeof(struct ec_host_request));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 7);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_HOST_SLEEP_EVENT);
+
+ KUNIT_EXPECT_TRUE(test, ec_dev->host_sleep_v1);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->host_event_wake_mask, 0xbeef);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_pd_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->max_passthru = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_pd_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->max_passthru = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_normal_v3_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xa1b2c3d4;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_params_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_hello *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_normal_v3_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xa1b2c3d4;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_params_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_hello *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EOPNOTSUPP);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_data_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xbeefbfbf;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EBADMSG);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_host_sleep(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_sleep_v1 = true;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+
+ KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_host_sleep_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_sleep_v1 = true;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /* In order to pollute next cros_ec_get_host_command_version_mask(). */
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0xbeef;
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+
+ KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
+ }
+}
+
+static void cros_ec_proto_test_query_all_default_wake_mask_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ u32 mask;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ mask = ec_dev->host_event_wake_mask;
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_default_wake_mask_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For get_host_event_wake_mask(). */
+ {
+ u32 mask;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ mask = ec_dev->host_event_wake_mask;
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 2;
+ buf.data[0] = 0x55;
+ buf.data[1] = 0xaa;
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, 4);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (u8 *)mock->o_data;
+ data[0] = 0xaa;
+ data[1] = 0x55;
+ data[2] = 0xcc;
+ data[3] = 0x33;
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, 4);
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, 4);
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
+
+ data = (u8 *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data[0], 0x55);
+ KUNIT_EXPECT_EQ(test, data[1], 0xaa);
+
+ KUNIT_EXPECT_EQ(test, buf.data[0], 0xaa);
+ KUNIT_EXPECT_EQ(test, buf.data[1], 0x55);
+ KUNIT_EXPECT_EQ(test, buf.data[2], 0xcc);
+ KUNIT_EXPECT_EQ(test, buf.data[3], 0x33);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_insize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 0xee + 1;
+ buf.msg.outsize = 2;
+
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0xcc);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, 0xcc);
+
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, 0xee);
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 0xff + 1;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 0xdd + 1;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v3_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 3;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 0);
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 1);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v3_no_op(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 3;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = NULL;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v2_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 2;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 1);
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 0);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v2_no_op(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 2;
+ ec_dev->cmd_xfer = NULL;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ struct ec_response_get_comms_status *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_comms_status *)mock->o_data;
+ data->flags = 0;
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_comms_status));
+
+ KUNIT_EXPECT_EQ(test, msg.result, EC_RES_SUCCESS);
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_COMMS_STATUS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_comms_status));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ cros_kunit_ec_xfer_mock_default_ret = -EAGAIN;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ {
+ struct ec_response_get_comms_status *data;
+ int i;
+
+ for (i = 0; i < 50; ++i) {
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_comms_status *)mock->o_data;
+ data->flags |= EC_COMMS_STATUS_PROCESSING;
+ }
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, msg.result, EC_RES_INVALID_COMMAND);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For cros_ec_cmd_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For cros_ec_cmd_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_command msg;
+ static const int map[] = {
+ [EC_RES_SUCCESS] = 0,
+ [EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
+ [EC_RES_ERROR] = -EIO,
+ [EC_RES_INVALID_PARAM] = -EINVAL,
+ [EC_RES_ACCESS_DENIED] = -EACCES,
+ [EC_RES_INVALID_RESPONSE] = -EPROTO,
+ [EC_RES_INVALID_VERSION] = -ENOPROTOOPT,
+ [EC_RES_INVALID_CHECKSUM] = -EBADMSG,
+ /*
+ * EC_RES_IN_PROGRESS is special because cros_ec_send_command() has extra logic to
+ * handle it. Note that default cros_kunit_ec_xfer_mock_default_ret == 0 thus
+ * cros_ec_xfer_command() in cros_ec_wait_until_complete() returns 0. As a result,
+ * it returns -EPROTO without calling cros_ec_map_error().
+ */
+ [EC_RES_IN_PROGRESS] = -EPROTO,
+ [EC_RES_UNAVAILABLE] = -ENODATA,
+ [EC_RES_TIMEOUT] = -ETIMEDOUT,
+ [EC_RES_OVERFLOW] = -EOVERFLOW,
+ [EC_RES_INVALID_HEADER] = -EBADR,
+ [EC_RES_REQUEST_TRUNCATED] = -EBADR,
+ [EC_RES_RESPONSE_TOO_BIG] = -EFBIG,
+ [EC_RES_BUS_ERROR] = -EFAULT,
+ [EC_RES_BUSY] = -EBUSY,
+ [EC_RES_INVALID_HEADER_VERSION] = -EBADMSG,
+ [EC_RES_INVALID_HEADER_CRC] = -EBADMSG,
+ [EC_RES_INVALID_DATA_CRC] = -EBADMSG,
+ [EC_RES_DUP_UNAVAILABLE] = -ENODATA,
+ };
+
+ memset(&msg, 0, sizeof(msg));
+
+ for (i = 0; i < ARRAY_SIZE(map); ++i) {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, i, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, map[i]);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 0;
+
+ /* Set some garbage bytes. */
+ wake_event = false;
+ more_events = true;
+
+ /* For get_keyboard_state_event(). */
+ {
+ union ec_response_get_next_data_v1 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (union ec_response_get_next_data_v1 *)mock->o_data;
+ data->host_event = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v1));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_KEY_MATRIX);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.host_event, 0xbeef);
+
+ KUNIT_EXPECT_TRUE(test, wake_event);
+ KUNIT_EXPECT_FALSE(test, more_events);
+
+ /* For get_keyboard_state_event(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MKBP_STATE);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->suspended = true;
+
+ ret = cros_ec_get_next_event(ec_dev, NULL, NULL);
+ KUNIT_EXPECT_EQ(test, ret, -EHOSTDOWN);
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_version0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 1;
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+ more_events = false;
+
+ /* For get_next_event_xfer(). */
+ {
+ struct ec_response_get_next_event *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_SENSOR_FIFO | EC_MKBP_HAS_MORE_EVENTS;
+ data->data.sysrq = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_SENSOR_FIFO);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+ KUNIT_EXPECT_TRUE(test, more_events);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_next_event));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_version2(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+
+ /* Set some garbage bytes. */
+ wake_event = false;
+ more_events = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ struct ec_response_get_next_event_v1 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_FINGERPRINT;
+ data->data.sysrq = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event_v1));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_FINGERPRINT);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
+
+ KUNIT_EXPECT_TRUE(test, wake_event);
+ KUNIT_EXPECT_FALSE(test, more_events);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event;
+ struct ec_response_get_next_event_v1 *data;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test,
+ sizeof(data->event_type) +
+ sizeof(data->data.host_event));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_HOST_EVENT;
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC), &data->data.host_event);
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event;
+ struct ec_response_get_next_event_v1 *data;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+ ec_dev->host_event_wake_mask = U32_MAX & ~EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED);
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test,
+ sizeof(data->event_type) +
+ sizeof(data->data.host_event));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_HOST_EVENT;
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED),
+ &data->data.host_event);
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_host_event_no_mkbp_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 0;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_not_host_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_FINGERPRINT;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_wrong_event_size(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
+ ec_dev->event_size = 0xff;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
+ ec_dev->event_size = sizeof(ec_dev->event_data.data.host_event);
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC),
+ &ec_dev->event_data.data.host_event);
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC));
+}
+
+static void cros_ec_proto_test_check_features_cached(struct kunit *test)
+{
+ int ret, i;
+ struct cros_ec_dev ec;
+
+ ec.features.flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
+ ec.features.flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
+
+ for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
+ ret = cros_ec_check_features(&ec, i);
+ switch (i) {
+ case EC_FEATURE_FINGERPRINT:
+ case EC_FEATURE_SCP:
+ KUNIT_EXPECT_TRUE(test, ret);
+ break;
+ default:
+ KUNIT_EXPECT_FALSE(test, ret);
+ break;
+ }
+ }
+}
+
+static void cros_ec_proto_test_check_features_not_cached(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+ ec.features.flags[0] = -1;
+ ec.features.flags[1] = -1;
+
+ /* For EC_CMD_GET_FEATURES. */
+ {
+ struct ec_response_get_features *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_features *)mock->o_data;
+ data->flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
+ data->flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
+ }
+
+ for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
+ ret = cros_ec_check_features(&ec, i);
+ switch (i) {
+ case EC_FEATURE_FINGERPRINT:
+ case EC_FEATURE_SCP:
+ KUNIT_EXPECT_TRUE(test, ret);
+ break;
+ default:
+ KUNIT_EXPECT_FALSE(test, ret);
+ break;
+ }
+ }
+
+ /* For EC_CMD_GET_FEATURES. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_FEATURES);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_features));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_response_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_motion_sense *)mock->o_data;
+ data->dump.sensor_count = 0xbf;
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, 0xbf);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_legacy(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_dev ec;
+ struct {
+ u8 readmem_data;
+ int expected_result;
+ } test_data[] = {
+ { 0, 0 },
+ { EC_MEMMAP_ACC_STATUS_PRESENCE_BIT, 2 },
+ };
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->cmd_readmem = cros_kunit_readmem_mock;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ for (i = 0; i < ARRAY_SIZE(test_data); ++i) {
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For readmem. */
+ {
+ cros_kunit_readmem_mock_data = kunit_kzalloc(test, 1, GFP_KERNEL);
+ KUNIT_ASSERT_PTR_NE(test, cros_kunit_readmem_mock_data, NULL);
+ cros_kunit_readmem_mock_data[0] = test_data[i].readmem_data;
+
+ cros_kunit_ec_xfer_mock_default_ret = 1;
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, test_data[i].expected_result);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+
+ /* For readmem. */
+ {
+ KUNIT_EXPECT_EQ(test, cros_kunit_readmem_mock_offset, EC_MEMMAP_ACC_STATUS);
+ }
+ }
+}
+
+static void cros_ec_proto_test_ec_cmd(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ u8 out[3], in[2];
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+
+ out[0] = 0xdd;
+ out[1] = 0xcc;
+ out[2] = 0xbb;
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, 2);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (u8 *)mock->o_data;
+ data[0] = 0xaa;
+ data[1] = 0x99;
+ }
+
+ ret = cros_ec_cmd(ec_dev, 0x88, 0x77, out, ARRAY_SIZE(out), in, ARRAY_SIZE(in));
+ KUNIT_EXPECT_EQ(test, ret, 2);
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0x88);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, 0x77);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, ARRAY_SIZE(in));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, ARRAY_SIZE(out));
+
+ data = (u8 *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data[0], 0xdd);
+ KUNIT_EXPECT_EQ(test, data[1], 0xcc);
+ KUNIT_EXPECT_EQ(test, data[2], 0xbb);
+ }
+}
+
+static void cros_ec_proto_test_release(struct device *dev)
+{
+}
+
+static int cros_ec_proto_test_init(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv;
+ struct cros_ec_device *ec_dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ test->priv = priv;
+
+ ec_dev = &priv->ec_dev;
+ ec_dev->dout = (u8 *)priv->dout;
+ ec_dev->dout_size = ARRAY_SIZE(priv->dout);
+ ec_dev->din = (u8 *)priv->din;
+ ec_dev->din_size = ARRAY_SIZE(priv->din);
+ ec_dev->proto_version = EC_HOST_REQUEST_VERSION;
+ ec_dev->dev = kunit_kzalloc(test, sizeof(*ec_dev->dev), GFP_KERNEL);
+ if (!ec_dev->dev)
+ return -ENOMEM;
+ device_initialize(ec_dev->dev);
+ dev_set_name(ec_dev->dev, "cros_ec_proto_test");
+ ec_dev->dev->release = cros_ec_proto_test_release;
+ ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
+
+ priv->msg = (struct cros_ec_command *)priv->_msg;
+
+ cros_kunit_mock_reset();
+
+ return 0;
+}
+
+static void cros_ec_proto_test_exit(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+
+ put_device(ec_dev->dev);
+}
+
+static struct kunit_case cros_ec_proto_test_cases[] = {
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_normal),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_normal),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_bad_msg_outsize),
+ KUNIT_CASE(cros_ec_proto_test_check_result),
+ KUNIT_CASE(cros_ec_proto_test_query_all_normal),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_data_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return0),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_insize),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_no_op),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_no_op),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return0),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_return_error),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_no_mkbp_event),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version0),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version2),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_no_mkbp_event),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_not_host_event),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_wrong_event_size),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_normal),
+ KUNIT_CASE(cros_ec_proto_test_check_features_cached),
+ KUNIT_CASE(cros_ec_proto_test_check_features_not_cached),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_normal),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_legacy),
+ KUNIT_CASE(cros_ec_proto_test_ec_cmd),
+ {}
+};
+
+static struct kunit_suite cros_ec_proto_test_suite = {
+ .name = "cros_ec_proto_test",
+ .init = cros_ec_proto_test_init,
+ .exit = cros_ec_proto_test_exit,
+ .test_cases = cros_ec_proto_test_cases,
+};
+
+kunit_test_suite(cros_ec_proto_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index 9bb5cd2c98b8..d7e407de88df 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -30,8 +30,8 @@ TRACE_EVENT(cros_ec_request_start,
),
TP_fast_assign(
__entry->version = cmd->version;
- __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(1);
- __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->outsize = cmd->outsize;
__entry->insize = cmd->insize;
),
@@ -55,8 +55,8 @@ TRACE_EVENT(cros_ec_request_done,
),
TP_fast_assign(
__entry->version = cmd->version;
- __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(1);
- __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->outsize = cmd->outsize;
__entry->insize = cmd->insize;
__entry->result = cmd->result;
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 7cb2e35c4ded..de6ee0f926a6 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -25,6 +25,8 @@
#define DRV_NAME "cros-ec-typec"
+#define DP_PORT_VDO (BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D) | DP_CAP_DFP_D)
+
/* Supported alt modes. */
enum {
CROS_EC_ALTMODE_DP = 0,
@@ -60,8 +62,7 @@ struct cros_typec_port {
uint8_t mux_flags;
uint8_t role;
- /* Port alt modes. */
- struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX];
+ struct typec_altmode *port_altmode[CROS_EC_ALTMODE_MAX];
/* Flag indicating that PD partner discovery data parsing is completed. */
bool sop_disc_done;
@@ -254,6 +255,14 @@ static void cros_typec_remove_cable(struct cros_typec_data *typec,
port->sop_prime_disc_done = false;
}
+static void cros_typec_unregister_port_altmodes(struct cros_typec_port *port)
+{
+ int i;
+
+ for (i = 0; i < CROS_EC_ALTMODE_MAX; i++)
+ typec_unregister_altmode(port->port_altmode[i]);
+}
+
static void cros_unregister_ports(struct cros_typec_data *typec)
{
int i;
@@ -268,34 +277,49 @@ static void cros_unregister_ports(struct cros_typec_data *typec)
usb_role_switch_put(typec->ports[i]->role_sw);
typec_switch_put(typec->ports[i]->ori_sw);
typec_mux_put(typec->ports[i]->mux);
+ cros_typec_unregister_port_altmodes(typec->ports[i]);
typec_unregister_port(typec->ports[i]->port);
}
}
/*
- * Fake the alt mode structs until we actually start registering Type C port
- * and partner alt modes.
+ * Register port alt modes with known values till we start retrieving
+ * port capabilities from the EC.
*/
-static void cros_typec_register_port_altmodes(struct cros_typec_data *typec,
+static int cros_typec_register_port_altmodes(struct cros_typec_data *typec,
int port_num)
{
struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_altmode_desc desc;
+ struct typec_altmode *amode;
/* All PD capable CrOS devices are assumed to support DP altmode. */
- port->p_altmode[CROS_EC_ALTMODE_DP].svid = USB_TYPEC_DP_SID;
- port->p_altmode[CROS_EC_ALTMODE_DP].mode = USB_TYPEC_DP_MODE;
+ desc.svid = USB_TYPEC_DP_SID,
+ desc.mode = USB_TYPEC_DP_MODE,
+ desc.vdo = DP_PORT_VDO,
+ amode = typec_port_register_altmode(port->port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_DP] = amode;
/*
* Register TBT compatibility alt mode. The EC will not enter the mode
* if it doesn't support it, so it's safe to register it unconditionally
* here for now.
*/
- port->p_altmode[CROS_EC_ALTMODE_TBT].svid = USB_TYPEC_TBT_SID;
- port->p_altmode[CROS_EC_ALTMODE_TBT].mode = TYPEC_ANY_MODE;
+ memset(&desc, 0, sizeof(desc));
+ desc.svid = USB_TYPEC_TBT_SID,
+ desc.mode = TYPEC_ANY_MODE,
+ amode = typec_port_register_altmode(port->port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
port->state.data = NULL;
+
+ return 0;
}
static int cros_typec_init_ports(struct cros_typec_data *typec)
@@ -352,8 +376,8 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
cros_port->port = typec_register_port(dev, cap);
if (IS_ERR(cros_port->port)) {
- dev_err(dev, "Failed to register port %d\n", port_num);
ret = PTR_ERR(cros_port->port);
+ dev_err_probe(dev, ret, "Failed to register port %d\n", port_num);
goto unregister_ports;
}
@@ -362,7 +386,11 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
dev_dbg(dev, "No switch control for port %d\n",
port_num);
- cros_typec_register_port_altmodes(typec, port_num);
+ ret = cros_typec_register_port_altmodes(typec, port_num);
+ if (ret) {
+ dev_err(dev, "Failed to register port altmodes\n");
+ goto unregister_ports;
+ }
cros_port->disc_data = devm_kzalloc(dev, EC_PROTO2_MAX_RESPONSE_SIZE, GFP_KERNEL);
if (!cros_port->disc_data) {
@@ -431,7 +459,7 @@ static int cros_typec_enable_tbt(struct cros_typec_data *typec,
data.enter_vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
if (!port->state.alt) {
- port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_TBT];
+ port->state.alt = port->port_altmode[CROS_EC_ALTMODE_TBT];
ret = cros_typec_usb_safe_state(port);
if (ret)
return ret;
@@ -473,7 +501,7 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
/* Configuration VDO. */
dp_data.conf = DP_CONF_SET_PIN_ASSIGN(pd_ctrl->dp_mode);
if (!port->state.alt) {
- port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_DP];
+ port->state.alt = port->port_altmode[CROS_EC_ALTMODE_DP];
ret = cros_typec_usb_safe_state(port);
if (ret)
return ret;
@@ -525,8 +553,8 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
enum typec_orientation orientation;
int ret;
- ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
- &req, sizeof(req), &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
+ &req, sizeof(req), &resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "Failed to get mux info for port: %d, err = %d\n",
port_num, ret);
@@ -585,8 +613,8 @@ mux_ack:
/* Sending Acknowledgment to EC */
mux_ack.port = port_num;
- if (cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_ACK, &mux_ack,
- sizeof(mux_ack), NULL, 0) < 0)
+ if (cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_ACK, &mux_ack,
+ sizeof(mux_ack), NULL, 0) < 0)
dev_warn(typec->dev,
"Failed to send Mux ACK to EC for port: %d\n",
port_num);
@@ -754,8 +782,8 @@ static int cros_typec_handle_sop_prime_disc(struct cros_typec_data *typec, int p
int ret = 0;
memset(disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
- ret = cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
- disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ disc, EC_PROTO2_MAX_RESPONSE_SIZE);
if (ret < 0) {
dev_err(typec->dev, "Failed to get SOP' discovery data for port: %d\n", port_num);
goto sop_prime_disc_exit;
@@ -837,8 +865,8 @@ static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_nu
typec_partner_set_pd_revision(port->partner, pd_revision);
memset(sop_disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
- ret = cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
- sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
if (ret < 0) {
dev_err(typec->dev, "Failed to get SOP discovery data for port: %d\n", port_num);
goto disc_exit;
@@ -870,8 +898,8 @@ static int cros_typec_send_clear_event(struct cros_typec_data *typec, int port_n
.clear_events_mask = events_mask,
};
- return cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
- sizeof(req), NULL, 0);
+ return cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
+ sizeof(req), NULL, 0);
}
static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
@@ -882,8 +910,8 @@ static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num
};
int ret;
- ret = cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
- &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
+ &resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
return;
@@ -960,9 +988,9 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
req.swap = USB_PD_CTRL_SWAP_NONE;
- ret = cros_ec_command(typec->ec, typec->pd_ctrl_ver,
- EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
- &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, typec->pd_ctrl_ver,
+ EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
+ &resp, sizeof(resp));
if (ret < 0)
return ret;
@@ -997,9 +1025,8 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
/* We're interested in the PD control command version. */
req_v1.cmd = EC_CMD_USB_PD_CONTROL;
- ret = cros_ec_command(typec->ec, 1, EC_CMD_GET_CMD_VERSIONS,
- &req_v1, sizeof(req_v1), &resp,
- sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 1, EC_CMD_GET_CMD_VERSIONS,
+ &req_v1, sizeof(req_v1), &resp, sizeof(resp));
if (ret < 0)
return ret;
@@ -1090,8 +1117,8 @@ static int cros_typec_probe(struct platform_device *pdev)
typec->typec_cmd_supported = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
typec->needs_mux_ack = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
- ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
- &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
+ &resp, sizeof(resp));
if (ret < 0)
return ret;
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index aa409f0201fb..793fd3f1015d 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -4,24 +4,60 @@
// Copyright (C) 2012 Google, Inc.
#include <linux/acpi.h>
-#include <linux/leds.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
+struct keyboard_led {
+ struct led_classdev cdev;
+ struct cros_ec_device *ec;
+};
+
+/**
+ * struct keyboard_led_drvdata - keyboard LED driver data.
+ * @init: Init function.
+ * @brightness_get: Get LED brightness level.
+ * @brightness_set: Set LED brightness level. Must not sleep.
+ * @brightness_set_blocking: Set LED brightness level. It can block the
+ * caller for the time required for accessing a
+ * LED device register
+ * @max_brightness: Maximum brightness.
+ *
+ * See struct led_classdev in include/linux/leds.h for more details.
+ */
+struct keyboard_led_drvdata {
+ int (*init)(struct platform_device *pdev);
+
+ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
+
+ void (*brightness_set)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+ int (*brightness_set_blocking)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+
+ enum led_brightness max_brightness;
+};
+
+#define KEYBOARD_BACKLIGHT_MAX 100
+
+#ifdef CONFIG_ACPI
+
/* Keyboard LED ACPI Device must be defined in firmware */
#define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT"
#define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC"
#define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM"
-#define ACPI_KEYBOARD_BACKLIGHT_MAX 100
-
-static void keyboard_led_set_brightness(struct led_classdev *cdev,
- enum led_brightness brightness)
+static void keyboard_led_set_brightness_acpi(struct led_classdev *cdev,
+ enum led_brightness brightness)
{
union acpi_object param;
struct acpi_object_list input;
@@ -40,7 +76,7 @@ static void keyboard_led_set_brightness(struct led_classdev *cdev,
}
static enum led_brightness
-keyboard_led_get_brightness(struct led_classdev *cdev)
+keyboard_led_get_brightness_acpi(struct led_classdev *cdev)
{
unsigned long long brightness;
acpi_status status;
@@ -56,12 +92,10 @@ keyboard_led_get_brightness(struct led_classdev *cdev)
return brightness;
}
-static int keyboard_led_probe(struct platform_device *pdev)
+static int keyboard_led_init_acpi(struct platform_device *pdev)
{
- struct led_classdev *cdev;
acpi_handle handle;
acpi_status status;
- int error;
/* Look for the keyboard LED ACPI Device */
status = acpi_get_handle(ACPI_ROOT_OBJECT,
@@ -73,33 +107,151 @@ static int keyboard_led_probe(struct platform_device *pdev)
return -ENXIO;
}
- cdev = devm_kzalloc(&pdev->dev, sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
+ return 0;
+}
+
+static const struct keyboard_led_drvdata keyboard_led_drvdata_acpi = {
+ .init = keyboard_led_init_acpi,
+ .brightness_set = keyboard_led_set_brightness_acpi,
+ .brightness_get = keyboard_led_get_brightness_acpi,
+ .max_brightness = KEYBOARD_BACKLIGHT_MAX,
+};
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_CROS_EC)
+
+static int
+keyboard_led_set_brightness_ec_pwm(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_pwm_set_keyboard_backlight params;
+ } __packed buf;
+ struct ec_params_pwm_set_keyboard_backlight *params = &buf.params;
+ struct cros_ec_command *msg = &buf.msg;
+ struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->command = EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT;
+ msg->outsize = sizeof(*params);
+
+ params->percent = brightness;
+
+ return cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
+}
+
+static enum led_brightness
+keyboard_led_get_brightness_ec_pwm(struct led_classdev *cdev)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_pwm_get_keyboard_backlight resp;
+ } __packed buf;
+ struct ec_response_pwm_get_keyboard_backlight *resp = &buf.resp;
+ struct cros_ec_command *msg = &buf.msg;
+ struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
+ int ret;
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->command = EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT;
+ msg->insize = sizeof(*resp);
+
+ ret = cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
+ if (ret < 0)
+ return ret;
+
+ return resp->percent;
+}
+
+static int keyboard_led_init_ec_pwm(struct platform_device *pdev)
+{
+ struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
+
+ keyboard_led->ec = dev_get_drvdata(pdev->dev.parent);
+ if (!keyboard_led->ec) {
+ dev_err(&pdev->dev, "no parent EC device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {
+ .init = keyboard_led_init_ec_pwm,
+ .brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
+ .brightness_get = keyboard_led_get_brightness_ec_pwm,
+ .max_brightness = KEYBOARD_BACKLIGHT_MAX,
+};
+
+#else /* IS_ENABLED(CONFIG_CROS_EC) */
+
+static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {};
+
+#endif /* IS_ENABLED(CONFIG_CROS_EC) */
+
+static int keyboard_led_probe(struct platform_device *pdev)
+{
+ const struct keyboard_led_drvdata *drvdata;
+ struct keyboard_led *keyboard_led;
+ int error;
+
+ drvdata = device_get_match_data(&pdev->dev);
+ if (!drvdata)
+ return -EINVAL;
+
+ keyboard_led = devm_kzalloc(&pdev->dev, sizeof(*keyboard_led), GFP_KERNEL);
+ if (!keyboard_led)
return -ENOMEM;
+ platform_set_drvdata(pdev, keyboard_led);
+
+ if (drvdata->init) {
+ error = drvdata->init(pdev);
+ if (error)
+ return error;
+ }
- cdev->name = "chromeos::kbd_backlight";
- cdev->max_brightness = ACPI_KEYBOARD_BACKLIGHT_MAX;
- cdev->flags |= LED_CORE_SUSPENDRESUME;
- cdev->brightness_set = keyboard_led_set_brightness;
- cdev->brightness_get = keyboard_led_get_brightness;
+ keyboard_led->cdev.name = "chromeos::kbd_backlight";
+ keyboard_led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ keyboard_led->cdev.max_brightness = drvdata->max_brightness;
+ keyboard_led->cdev.brightness_set = drvdata->brightness_set;
+ keyboard_led->cdev.brightness_set_blocking = drvdata->brightness_set_blocking;
+ keyboard_led->cdev.brightness_get = drvdata->brightness_get;
- error = devm_led_classdev_register(&pdev->dev, cdev);
+ error = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
if (error)
return error;
return 0;
}
-static const struct acpi_device_id keyboard_led_id[] = {
- { "GOOG0002", 0 },
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id keyboard_led_acpi_match[] = {
+ { "GOOG0002", (kernel_ulong_t)&keyboard_led_drvdata_acpi },
{ }
};
-MODULE_DEVICE_TABLE(acpi, keyboard_led_id);
+MODULE_DEVICE_TABLE(acpi, keyboard_led_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id keyboard_led_of_match[] = {
+ {
+ .compatible = "google,cros-kbd-led-backlight",
+ .data = &keyboard_led_drvdata_ec_pwm,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, keyboard_led_of_match);
+#endif
static struct platform_driver keyboard_led_driver = {
.driver = {
.name = "chromeos-keyboard-leds",
- .acpi_match_table = ACPI_PTR(keyboard_led_id),
+ .acpi_match_table = ACPI_PTR(keyboard_led_acpi_match),
+ .of_match_table = of_match_ptr(keyboard_led_of_match),
},
.probe = keyboard_led_probe,
};
diff --git a/drivers/platform/chrome/cros_kunit_util.c b/drivers/platform/chrome/cros_kunit_util.c
new file mode 100644
index 000000000000..f0fda96b11bd
--- /dev/null
+++ b/drivers/platform/chrome/cros_kunit_util.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CrOS Kunit tests utilities.
+ */
+
+#include <kunit/test.h>
+
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include "cros_ec.h"
+#include "cros_kunit_util.h"
+
+int cros_kunit_ec_xfer_mock_default_result;
+int cros_kunit_ec_xfer_mock_default_ret;
+int cros_kunit_ec_cmd_xfer_mock_called;
+int cros_kunit_ec_pkt_xfer_mock_called;
+
+static struct list_head cros_kunit_ec_xfer_mock_in;
+static struct list_head cros_kunit_ec_xfer_mock_out;
+
+int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_in, struct ec_xfer_mock, list);
+ if (!mock) {
+ msg->result = cros_kunit_ec_xfer_mock_default_result;
+ return cros_kunit_ec_xfer_mock_default_ret;
+ }
+
+ list_del(&mock->list);
+
+ memcpy(&mock->msg, msg, sizeof(*msg));
+ if (msg->outsize) {
+ mock->i_data = kunit_kzalloc(mock->test, msg->outsize, GFP_KERNEL);
+ if (mock->i_data)
+ memcpy(mock->i_data, msg->data, msg->outsize);
+ }
+
+ msg->result = mock->result;
+ if (msg->insize)
+ memcpy(msg->data, mock->o_data, min(msg->insize, mock->o_data_len));
+
+ list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_out);
+
+ return mock->ret;
+}
+
+int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ ++cros_kunit_ec_cmd_xfer_mock_called;
+ return cros_kunit_ec_xfer_mock(ec_dev, msg);
+}
+
+int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ ++cros_kunit_ec_pkt_xfer_mock_called;
+ return cros_kunit_ec_xfer_mock(ec_dev, msg);
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size)
+{
+ return cros_kunit_ec_xfer_mock_addx(test, size, EC_RES_SUCCESS, size);
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test,
+ int ret, int result, size_t size)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ if (!mock)
+ return NULL;
+
+ list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_in);
+ mock->test = test;
+
+ mock->ret = ret;
+ mock->result = result;
+ mock->o_data = kunit_kzalloc(test, size, GFP_KERNEL);
+ if (!mock->o_data)
+ return NULL;
+ mock->o_data_len = size;
+
+ return mock;
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_out, struct ec_xfer_mock, list);
+ if (mock)
+ list_del(&mock->list);
+
+ return mock;
+}
+
+int cros_kunit_readmem_mock_offset;
+u8 *cros_kunit_readmem_mock_data;
+int cros_kunit_readmem_mock_ret;
+
+int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset,
+ unsigned int bytes, void *dest)
+{
+ cros_kunit_readmem_mock_offset = offset;
+
+ memcpy(dest, cros_kunit_readmem_mock_data, bytes);
+
+ return cros_kunit_readmem_mock_ret;
+}
+
+void cros_kunit_mock_reset(void)
+{
+ cros_kunit_ec_xfer_mock_default_result = 0;
+ cros_kunit_ec_xfer_mock_default_ret = 0;
+ cros_kunit_ec_cmd_xfer_mock_called = 0;
+ cros_kunit_ec_pkt_xfer_mock_called = 0;
+ INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_in);
+ INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_out);
+
+ cros_kunit_readmem_mock_offset = 0;
+ cros_kunit_readmem_mock_data = NULL;
+ cros_kunit_readmem_mock_ret = 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_kunit_util.h b/drivers/platform/chrome/cros_kunit_util.h
new file mode 100644
index 000000000000..414002271c9c
--- /dev/null
+++ b/drivers/platform/chrome/cros_kunit_util.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CrOS Kunit tests utilities.
+ */
+
+#ifndef _CROS_KUNIT_UTIL_H_
+#define _CROS_KUNIT_UTIL_H_
+
+#include <linux/platform_data/cros_ec_proto.h>
+
+struct ec_xfer_mock {
+ struct list_head list;
+ struct kunit *test;
+
+ /* input */
+ struct cros_ec_command msg;
+ void *i_data;
+
+ /* output */
+ int ret;
+ int result;
+ void *o_data;
+ u32 o_data_len;
+};
+
+extern int cros_kunit_ec_xfer_mock_default_result;
+extern int cros_kunit_ec_xfer_mock_default_ret;
+extern int cros_kunit_ec_cmd_xfer_mock_called;
+extern int cros_kunit_ec_pkt_xfer_mock_called;
+
+int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test,
+ int ret, int result, size_t size);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void);
+
+extern int cros_kunit_readmem_mock_offset;
+extern u8 *cros_kunit_readmem_mock_data;
+extern int cros_kunit_readmem_mock_ret;
+
+int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset,
+ unsigned int bytes, void *dest);
+
+void cros_kunit_mock_reset(void);
+
+#endif
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
index 91ce6be91aac..4b5a81c9dc6d 100644
--- a/drivers/platform/chrome/cros_usbpd_notify.c
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -71,8 +71,8 @@ static void cros_usbpd_get_event_and_notify(struct device *dev,
}
/* Check for PD host events on EC. */
- ret = cros_ec_command(ec_dev, 0, EC_CMD_PD_HOST_EVENT_STATUS,
- NULL, 0, &host_event_status, sizeof(host_event_status));
+ ret = cros_ec_cmd(ec_dev, 0, EC_CMD_PD_HOST_EVENT_STATUS,
+ NULL, 0, &host_event_status, sizeof(host_event_status));
if (ret < 0) {
dev_warn(dev, "Can't get host event status (err: %d)\n", ret);
goto send_notify;
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index 814518509739..32e400590be5 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -343,7 +343,7 @@ static __poll_t event_poll(struct file *filp, poll_table *wait)
*
* Removes the first event from the queue, places it in the passed buffer.
*
- * If there are no events in the the queue, then one of two things happens,
+ * If there are no events in the queue, then one of two things happens,
* depending on if the file was opened in nonblocking mode: If in nonblocking
* mode, then return -EAGAIN to say there's no data. If in blocking mode, then
* block until an event is available.
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 38800e86ed8a..1ae3c56b66b0 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -959,6 +959,8 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
goto error;
}
+ vq->num_max = vring->num;
+
vqs[i] = vq;
vring->vq = vq;
vq->priv = vring;
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index 2c2686d5c2fc..ddc08abf398c 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -31,6 +31,7 @@
* @group: sysfs attribute group;
* @groups: list of sysfs attribute group for hwmon registration;
* @regsize: size of a register value;
+ * @io_lock: user access locking;
*/
struct mlxreg_io_priv_data {
struct platform_device *pdev;
@@ -41,6 +42,7 @@ struct mlxreg_io_priv_data {
struct attribute_group group;
const struct attribute_group *groups[2];
int regsize;
+ struct mutex io_lock; /* Protects user access. */
};
static int
@@ -116,14 +118,19 @@ mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
u32 regval = 0;
int ret;
+ mutex_lock(&priv->io_lock);
+
ret = mlxreg_io_get_reg(priv->pdata->regmap, data, 0, true,
priv->regsize, &regval);
if (ret)
goto access_error;
+ mutex_unlock(&priv->io_lock);
+
return sprintf(buf, "%u\n", regval);
access_error:
+ mutex_unlock(&priv->io_lock);
return ret;
}
@@ -145,6 +152,8 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ mutex_lock(&priv->io_lock);
+
ret = mlxreg_io_get_reg(priv->pdata->regmap, data, input_val, false,
priv->regsize, &regval);
if (ret)
@@ -154,9 +163,12 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
if (ret)
goto access_error;
+ mutex_unlock(&priv->io_lock);
+
return len;
access_error:
+ mutex_unlock(&priv->io_lock);
dev_err(&priv->pdev->dev, "Bus access error\n");
return ret;
}
@@ -246,16 +258,27 @@ static int mlxreg_io_probe(struct platform_device *pdev)
return PTR_ERR(priv->hwmon);
}
+ mutex_init(&priv->io_lock);
dev_set_drvdata(&pdev->dev, priv);
return 0;
}
+static int mlxreg_io_remove(struct platform_device *pdev)
+{
+ struct mlxreg_io_priv_data *priv = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&priv->io_lock);
+
+ return 0;
+}
+
static struct platform_driver mlxreg_io_driver = {
.driver = {
.name = "mlxreg-io",
},
.probe = mlxreg_io_probe,
+ .remove = mlxreg_io_remove,
};
module_platform_driver(mlxreg_io_driver);
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
index c897a2f15840..55834ccb4ac7 100644
--- a/drivers/platform/mellanox/mlxreg-lc.c
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -716,8 +716,12 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
switch (regval) {
case MLXREG_LC_SN4800_C16:
err = mlxreg_lc_sn4800_c16_config_init(mlxreg_lc, regmap, data);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to config client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
return err;
+ }
break;
default:
return -ENODEV;
@@ -730,8 +734,11 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
mlxreg_lc->mux = platform_device_register_resndata(dev, "i2c-mux-mlxcpld", data->hpdev.nr,
NULL, 0, mlxreg_lc->mux_data,
sizeof(*mlxreg_lc->mux_data));
- if (IS_ERR(mlxreg_lc->mux))
+ if (IS_ERR(mlxreg_lc->mux)) {
+ dev_err(dev, "Failed to create mux infra for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
return PTR_ERR(mlxreg_lc->mux);
+ }
/* Register IO access driver. */
if (mlxreg_lc->io_data) {
@@ -740,6 +747,9 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
platform_device_register_resndata(dev, "mlxreg-io", data->hpdev.nr, NULL, 0,
mlxreg_lc->io_data, sizeof(*mlxreg_lc->io_data));
if (IS_ERR(mlxreg_lc->io_regs)) {
+ dev_err(dev, "Failed to create regio for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
err = PTR_ERR(mlxreg_lc->io_regs);
goto fail_register_io;
}
@@ -753,6 +763,9 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
mlxreg_lc->led_data,
sizeof(*mlxreg_lc->led_data));
if (IS_ERR(mlxreg_lc->led)) {
+ dev_err(dev, "Failed to create LED objects for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
err = PTR_ERR(mlxreg_lc->led);
goto fail_register_led;
}
@@ -809,7 +822,8 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (!data->hpdev.adapter) {
dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
data->hpdev.nr);
- return -EFAULT;
+ err = -EFAULT;
+ goto i2c_get_adapter_fail;
}
/* Create device at the top of line card I2C tree.*/
@@ -818,32 +832,40 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (IS_ERR(data->hpdev.client)) {
dev_err(&pdev->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
-
- i2c_put_adapter(data->hpdev.adapter);
- data->hpdev.adapter = NULL;
- return PTR_ERR(data->hpdev.client);
+ err = PTR_ERR(data->hpdev.client);
+ goto i2c_new_device_fail;
}
regmap = devm_regmap_init_i2c(data->hpdev.client,
&mlxreg_lc_regmap_conf);
if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to create regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
err = PTR_ERR(regmap);
- goto mlxreg_lc_probe_fail;
+ goto devm_regmap_init_i2c_fail;
}
/* Set default registers. */
for (i = 0; i < mlxreg_lc_regmap_conf.num_reg_defaults; i++) {
err = regmap_write(regmap, mlxreg_lc_regmap_default[i].reg,
mlxreg_lc_regmap_default[i].def);
- if (err)
- goto mlxreg_lc_probe_fail;
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set default regmap %d for client %s at bus %d at addr 0x%02x\n",
+ i, data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
+ goto regmap_write_fail;
+ }
}
/* Sync registers with hardware. */
regcache_mark_dirty(regmap);
err = regcache_sync(regmap);
- if (err)
- goto mlxreg_lc_probe_fail;
+ if (err) {
+ dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ err = PTR_ERR(regmap);
+ goto regcache_sync_fail;
+ }
par_pdata = data->hpdev.brdinfo->platform_data;
mlxreg_lc->par_regmap = par_pdata->regmap;
@@ -854,12 +876,27 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
/* Configure line card. */
err = mlxreg_lc_config_init(mlxreg_lc, regmap, data);
if (err)
- goto mlxreg_lc_probe_fail;
+ goto mlxreg_lc_config_init_fail;
return err;
-mlxreg_lc_probe_fail:
+mlxreg_lc_config_init_fail:
+regcache_sync_fail:
+regmap_write_fail:
+devm_regmap_init_i2c_fail:
+ if (data->hpdev.client) {
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
+ }
+i2c_new_device_fail:
i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+i2c_get_adapter_fail:
+ /* Clear event notification callback and handle. */
+ if (data->notifier) {
+ data->notifier->user_handler = NULL;
+ data->notifier->handle = NULL;
+ }
return err;
}
@@ -868,11 +905,18 @@ static int mlxreg_lc_remove(struct platform_device *pdev)
struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
- /* Clear event notification callback. */
- if (data->notifier) {
- data->notifier->user_handler = NULL;
- data->notifier->handle = NULL;
- }
+ /*
+ * Probing and removing are invoked by hotplug events raised upon line card insertion and
+ * removing. If probing procedure fails all data is cleared. However, hotplug event still
+ * will be raised on line card removing and activate removing procedure. In this case there
+ * is nothing to remove.
+ */
+ if (!data->notifier || !data->notifier->handle)
+ return 0;
+
+ /* Clear event notification callback and handle. */
+ data->notifier->user_handler = NULL;
+ data->notifier->handle = NULL;
/* Destroy static I2C device feeding by main power. */
mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 4ff5c3a12991..921520475ff6 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -264,7 +264,7 @@ static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf,
int i, m;
unsigned char ec_cmd[EC_MAX_CMD_ARGS];
unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
- char cmdbuf[64];
+ char cmdbuf[64] = "";
int ec_cmd_bytes;
mutex_lock(&ec_dbgfs_lock);
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index eb79fbed8059..b629e82af97c 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -72,18 +72,45 @@ config SURFACE_AGGREGATOR_CDEV
The provided interface is intended for debugging and development only,
and should not be used otherwise.
+config SURFACE_AGGREGATOR_HUB
+ tristate "Surface System Aggregator Module Subsystem Device Hubs"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ help
+ Device-hub drivers for Surface System Aggregator Module (SSAM) subsystem
+ devices.
+
+ Provides subsystem hub drivers which manage client devices on various
+ SSAM subsystems. In some subsystems, notably the BAS subsystem managing
+ devices contained in the base of the Surface Book 3 and the KIP subsystem
+ managing type-cover devices in the Surface Pro 8 and Surface Pro X,
+ devices can be (hot-)removed. Hub devices and drivers are required to
+ manage these subdevices.
+
+ Devices managed via these hubs are:
+ - Battery/AC devices (Surface Book 3).
+ - HID input devices (7th-generation and later models with detachable
+ input devices).
+
+ Select M (recommended) or Y here if you want support for the above
+ mentioned devices on the corresponding Surface models. Without this
+ module, the respective devices mentioned above will not be instantiated
+ and thus any functionality provided by them will be missing, even when
+ drivers for these devices are present. This module only provides the
+ respective subsystem hubs. Both drivers and device specification (e.g.
+ via the Surface Aggregator Registry) for these devices still need to be
+ selected via other options.
+
config SURFACE_AGGREGATOR_REGISTRY
tristate "Surface System Aggregator Module Device Registry"
depends on SURFACE_AGGREGATOR
depends on SURFACE_AGGREGATOR_BUS
help
- Device-registry and device-hubs for Surface System Aggregator Module
- (SSAM) devices.
+ Device-registry for Surface System Aggregator Module (SSAM) devices.
Provides a module and driver which act as a device-registry for SSAM
client devices that cannot be detected automatically, e.g. via ACPI.
- Such devices are instead provided via this registry and attached via
- device hubs, also provided in this module.
+ Such devices are instead provided and managed via this registry.
Devices provided via this registry are:
- Platform profile (performance-/cooling-mode) device (5th- and later
@@ -99,6 +126,29 @@ config SURFACE_AGGREGATOR_REGISTRY
the respective client devices. Drivers for these devices still need to
be selected via the other options.
+config SURFACE_AGGREGATOR_TABLET_SWITCH
+ tristate "Surface Aggregator Generic Tablet-Mode Switch Driver"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ depends on INPUT
+ help
+ Provides a tablet-mode switch input device on Microsoft Surface models
+ using the KIP subsystem for detachable keyboards (e.g. keyboard covers)
+ or the POS subsystem for device/screen posture changes.
+
+ The KIP subsystem is used on newer Surface generations to handle
+ detachable input peripherals, specifically the keyboard cover (containing
+ keyboard and touchpad) on the Surface Pro 8 and Surface Pro X. The POS
+ subsystem is used for device posture change notifications on the Surface
+ Laptop Studio. This module provides a driver to let user-space know when
+ the device should be considered in tablet-mode due to the keyboard cover
+ being detached or folded back (essentially signaling when the keyboard is
+ not available for input). It does so by creating a tablet-mode switch
+ input device, sending the standard SW_TABLET_MODE event on mode change.
+
+ Select M or Y here, if you want to provide tablet-mode switch input
+ events on the Surface Pro 8, Surface Pro X, and Surface Laptop Studio.
+
config SURFACE_DTX
tristate "Surface DTX (Detachment System) Driver"
depends on SURFACE_AGGREGATOR
diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
index 0fc9cd3e4dd9..53344330939b 100644
--- a/drivers/platform/surface/Makefile
+++ b/drivers/platform/surface/Makefile
@@ -9,7 +9,9 @@ obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o
obj-$(CONFIG_SURFACE_ACPI_NOTIFY) += surface_acpi_notify.o
obj-$(CONFIG_SURFACE_AGGREGATOR) += aggregator/
obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_HUB) += surface_aggregator_hub.o
obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_TABLET_SWITCH) += surface_aggregator_tabletsw.o
obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o
obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o
obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o
diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
index cab020324256..c114f9dd5fe1 100644
--- a/drivers/platform/surface/aggregator/Kconfig
+++ b/drivers/platform/surface/aggregator/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
-# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
menuconfig SURFACE_AGGREGATOR
tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
index c0d550eda5cd..fdf664a217f9 100644
--- a/drivers/platform/surface/aggregator/Makefile
+++ b/drivers/platform/surface/aggregator/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
-# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
# For include/trace/define_trace.h to include trace.h
CFLAGS_core.o = -I$(src)
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
index abbbb5b08b07..de539938896e 100644
--- a/drivers/platform/surface/aggregator/bus.c
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -2,10 +2,11 @@
/*
* Surface System Aggregator Module bus and device integration.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/surface_aggregator/controller.h>
@@ -14,6 +15,9 @@
#include "bus.h"
#include "controller.h"
+
+/* -- Device and bus functions. --------------------------------------------- */
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -46,6 +50,7 @@ static void ssam_device_release(struct device *dev)
struct ssam_device *sdev = to_ssam_device(dev);
ssam_controller_put(sdev->ctrl);
+ fwnode_handle_put(sdev->dev.fwnode);
kfree(sdev);
}
@@ -363,6 +368,134 @@ void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
}
EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
+
+/* -- Bus registration. ----------------------------------------------------- */
+
+/**
+ * ssam_bus_register() - Register and set-up the SSAM client device bus.
+ */
+int ssam_bus_register(void)
+{
+ return bus_register(&ssam_bus_type);
+}
+
+/**
+ * ssam_bus_unregister() - Unregister the SSAM client device bus.
+ */
+void ssam_bus_unregister(void)
+{
+ return bus_unregister(&ssam_bus_type);
+}
+
+
+/* -- Helpers for controller and hub devices. ------------------------------- */
+
+static int ssam_device_uid_from_string(const char *str, struct ssam_device_uid *uid)
+{
+ u8 d, tc, tid, iid, fn;
+ int n;
+
+ n = sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
+ if (n != 5)
+ return -EINVAL;
+
+ uid->domain = d;
+ uid->category = tc;
+ uid->target = tid;
+ uid->instance = iid;
+ uid->function = fn;
+
+ return 0;
+}
+
+static int ssam_get_uid_for_node(struct fwnode_handle *node, struct ssam_device_uid *uid)
+{
+ const char *str = fwnode_get_name(node);
+
+ /*
+ * To simplify definitions of firmware nodes, we set the device name
+ * based on the UID of the device, prefixed with "ssam:".
+ */
+ if (strncmp(str, "ssam:", strlen("ssam:")) != 0)
+ return -ENODEV;
+
+ str += strlen("ssam:");
+ return ssam_device_uid_from_string(str, uid);
+}
+
+static int ssam_add_client_device(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct ssam_device_uid uid;
+ struct ssam_device *sdev;
+ int status;
+
+ status = ssam_get_uid_for_node(node, &uid);
+ if (status)
+ return status;
+
+ sdev = ssam_device_alloc(ctrl, uid);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->dev.parent = parent;
+ sdev->dev.fwnode = fwnode_handle_get(node);
+
+ status = ssam_device_add(sdev);
+ if (status)
+ ssam_device_put(sdev);
+
+ return status;
+}
+
+/**
+ * __ssam_register_clients() - Register client devices defined under the
+ * given firmware node as children of the given device.
+ * @parent: The parent device under which clients should be registered.
+ * @ctrl: The controller with which client should be registered.
+ * @node: The firmware node holding definitions of the devices to be added.
+ *
+ * Register all clients that have been defined as children of the given root
+ * firmware node as children of the given parent device. The respective child
+ * firmware nodes will be associated with the correspondingly created child
+ * devices.
+ *
+ * The given controller will be used to instantiate the new devices. See
+ * ssam_device_add() for details.
+ *
+ * Note that, generally, the use of either ssam_device_register_clients() or
+ * ssam_register_clients() should be preferred as they directly use the
+ * firmware node and/or controller associated with the given device. This
+ * function is only intended for use when different device specifications (e.g.
+ * ACPI and firmware nodes) need to be combined (as is done in the platform hub
+ * of the device registry).
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct fwnode_handle *child;
+ int status;
+
+ fwnode_for_each_child_node(node, child) {
+ /*
+ * Try to add the device specified in the firmware node. If
+ * this fails with -ENODEV, the node does not specify any SSAM
+ * device, so ignore it and continue with the next one.
+ */
+ status = ssam_add_client_device(parent, ctrl, child);
+ if (status && status != -ENODEV)
+ goto err;
+ }
+
+ return 0;
+err:
+ ssam_remove_clients(parent);
+ return status;
+}
+EXPORT_SYMBOL_GPL(__ssam_register_clients);
+
static int ssam_remove_device(struct device *dev, void *_data)
{
struct ssam_device *sdev = to_ssam_device(dev);
@@ -387,19 +520,3 @@ void ssam_remove_clients(struct device *dev)
device_for_each_child_reverse(dev, NULL, ssam_remove_device);
}
EXPORT_SYMBOL_GPL(ssam_remove_clients);
-
-/**
- * ssam_bus_register() - Register and set-up the SSAM client device bus.
- */
-int ssam_bus_register(void)
-{
- return bus_register(&ssam_bus_type);
-}
-
-/**
- * ssam_bus_unregister() - Unregister the SSAM client device bus.
- */
-void ssam_bus_unregister(void)
-{
- return bus_unregister(&ssam_bus_type);
-}
diff --git a/drivers/platform/surface/aggregator/bus.h b/drivers/platform/surface/aggregator/bus.h
index 6964ee84e79c..5b4dbf21906c 100644
--- a/drivers/platform/surface/aggregator/bus.h
+++ b/drivers/platform/surface/aggregator/bus.h
@@ -2,7 +2,7 @@
/*
* Surface System Aggregator Module bus and device integration.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_BUS_H
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index b8c377b3f932..43e765199137 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -2,7 +2,7 @@
/*
* Main SSAM/SSH controller structure and functionality.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
@@ -2199,16 +2199,26 @@ static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
}
/**
- * ssam_nf_refcount_disable_free() - Disable event for reference count entry if it is
- * no longer in use and free the corresponding entry.
+ * ssam_nf_refcount_disable_free() - Disable event for reference count entry if
+ * it is no longer in use and free the corresponding entry.
* @ctrl: The controller to disable the event on.
* @entry: The reference count entry for the event to be disabled.
* @flags: The flags used for enabling the event on the EC.
+ * @ec: Flag specifying if the event should actually be disabled on the EC.
*
- * If the reference count equals zero, i.e. the event is no longer requested by
- * any client, the event will be disabled and the corresponding reference count
- * entry freed. The reference count entry must not be used any more after a
- * call to this function.
+ * If ``ec`` equals ``true`` and the reference count equals zero (i.e. the
+ * event is no longer requested by any client), the specified event will be
+ * disabled on the EC via the corresponding request.
+ *
+ * If ``ec`` equals ``false``, no request will be sent to the EC and the event
+ * can be considered in a detached state (i.e. no longer used but still
+ * enabled). Disabling an event via this method may be required for
+ * hot-removable devices, where event disable requests may time out after the
+ * device has been physically removed.
+ *
+ * In both cases, if the reference count equals zero, the corresponding
+ * reference count entry will be freed. The reference count entry must not be
+ * used any more after a call to this function.
*
* Also checks if the flags used for disabling the event match the flags used
* for enabling the event and warns if they do not (regardless of reference
@@ -2223,7 +2233,7 @@ static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
* returns the status of the event-enable EC command.
*/
static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
- struct ssam_nf_refcount_entry *entry, u8 flags)
+ struct ssam_nf_refcount_entry *entry, u8 flags, bool ec)
{
const struct ssam_event_registry reg = entry->key.reg;
const struct ssam_event_id id = entry->key.id;
@@ -2232,8 +2242,9 @@ static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
lockdep_assert_held(&nf->lock);
- ssam_dbg(ctrl, "disabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
- reg.target_category, id.target_category, id.instance, entry->refcount);
+ ssam_dbg(ctrl, "%s event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
+ ec ? "disabling" : "detaching", reg.target_category, id.target_category,
+ id.instance, entry->refcount);
if (entry->flags != flags) {
ssam_warn(ctrl,
@@ -2242,7 +2253,7 @@ static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
id.instance);
}
- if (entry->refcount == 0) {
+ if (ec && entry->refcount == 0) {
status = ssam_ssh_event_disable(ctrl, reg, id, flags);
kfree(entry);
}
@@ -2322,20 +2333,26 @@ int ssam_notifier_register(struct ssam_controller *ctrl, struct ssam_event_notif
EXPORT_SYMBOL_GPL(ssam_notifier_register);
/**
- * ssam_notifier_unregister() - Unregister an event notifier.
- * @ctrl: The controller the notifier has been registered on.
- * @n: The event notifier to unregister.
+ * __ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ * @disable: Whether to disable the corresponding event on the EC.
*
* Unregister an event notifier. Decrement the usage counter of the associated
* SAM event if the notifier is not marked as an observer. If the usage counter
- * reaches zero, the event will be disabled.
+ * reaches zero and ``disable`` equals ``true``, the event will be disabled.
+ *
+ * Useful for hot-removable devices, where communication may fail once the
+ * device has been physically removed. In that case, specifying ``disable`` as
+ * ``false`` avoids communication with the EC.
*
* Return: Returns zero on success, %-ENOENT if the given notifier block has
* not been registered on the controller. If the given notifier block was the
* last one associated with its specific event, returns the status of the
* event-disable EC-command.
*/
-int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n)
+int __ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n,
+ bool disable)
{
u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
struct ssam_nf_refcount_entry *entry;
@@ -2373,7 +2390,7 @@ int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_not
goto remove;
}
- status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags);
+ status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags, disable);
}
remove:
@@ -2383,7 +2400,7 @@ remove:
return status;
}
-EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
+EXPORT_SYMBOL_GPL(__ssam_notifier_unregister);
/**
* ssam_controller_event_enable() - Enable the specified event.
@@ -2477,7 +2494,7 @@ int ssam_controller_event_disable(struct ssam_controller *ctrl,
return -ENOENT;
}
- status = ssam_nf_refcount_disable_free(ctrl, entry, flags);
+ status = ssam_nf_refcount_disable_free(ctrl, entry, flags, true);
mutex_unlock(&nf->lock);
return status;
diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h
index a0963c3562ff..f0d987abc51e 100644
--- a/drivers/platform/surface/aggregator/controller.h
+++ b/drivers/platform/surface/aggregator/controller.h
@@ -2,7 +2,7 @@
/*
* Main SSAM/SSH controller structure and functionality.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index a62c5dfe42d6..1a6373dea109 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -7,7 +7,7 @@
* Handles communication via requests as well as enabling, disabling, and
* relaying of events.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h
index e562958ffdf0..f3ecad92eefd 100644
--- a/drivers/platform/surface/aggregator/ssh_msgb.h
+++ b/drivers/platform/surface/aggregator/ssh_msgb.h
@@ -2,7 +2,7 @@
/*
* SSH message builder functions.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
index 8a4451c1ffe5..6748fe4ac5d5 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -2,7 +2,7 @@
/*
* SSH packet transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h
index 2eb329f0b91a..64633522f971 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.h
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h
@@ -2,7 +2,7 @@
/*
* SSH packet transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
diff --git a/drivers/platform/surface/aggregator/ssh_parser.c b/drivers/platform/surface/aggregator/ssh_parser.c
index b77912f8f13b..a6f668694365 100644
--- a/drivers/platform/surface/aggregator/ssh_parser.c
+++ b/drivers/platform/surface/aggregator/ssh_parser.c
@@ -2,7 +2,7 @@
/*
* SSH message parser.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_parser.h b/drivers/platform/surface/aggregator/ssh_parser.h
index 3bd6e180fd16..801d8fa69fb5 100644
--- a/drivers/platform/surface/aggregator/ssh_parser.h
+++ b/drivers/platform/surface/aggregator/ssh_parser.h
@@ -2,7 +2,7 @@
/*
* SSH message parser.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
index 790f7f0eee98..f5565570f16c 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
@@ -2,7 +2,7 @@
/*
* SSH request transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.h b/drivers/platform/surface/aggregator/ssh_request_layer.h
index 9c3cbae2d4bd..4e387a031351 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.h
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.h
@@ -2,7 +2,7 @@
/*
* SSH request transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h
index de64cf169060..2a2c17771d01 100644
--- a/drivers/platform/surface/aggregator/trace.h
+++ b/drivers/platform/surface/aggregator/trace.h
@@ -2,7 +2,7 @@
/*
* Trace points for SSAM/SSH.
*
- * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#undef TRACE_SYSTEM
@@ -76,7 +76,7 @@ TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
-TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC0);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
@@ -85,6 +85,11 @@ TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SPT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SYS);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC1);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SHB);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_POS);
#define SSAM_PTR_UID_LEN 9
#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1)
@@ -229,40 +234,45 @@ static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
#define ssam_show_ssh_tc(rqid) \
__print_symbolic(rqid, \
- { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
- { SSAM_SSH_TC_SAM, "SAM" }, \
- { SSAM_SSH_TC_BAT, "BAT" }, \
- { SSAM_SSH_TC_TMP, "TMP" }, \
- { SSAM_SSH_TC_PMC, "PMC" }, \
- { SSAM_SSH_TC_FAN, "FAN" }, \
- { SSAM_SSH_TC_PoM, "PoM" }, \
- { SSAM_SSH_TC_DBG, "DBG" }, \
- { SSAM_SSH_TC_KBD, "KBD" }, \
- { SSAM_SSH_TC_FWU, "FWU" }, \
- { SSAM_SSH_TC_UNI, "UNI" }, \
- { SSAM_SSH_TC_LPC, "LPC" }, \
- { SSAM_SSH_TC_TCL, "TCL" }, \
- { SSAM_SSH_TC_SFL, "SFL" }, \
- { SSAM_SSH_TC_KIP, "KIP" }, \
- { SSAM_SSH_TC_EXT, "EXT" }, \
- { SSAM_SSH_TC_BLD, "BLD" }, \
- { SSAM_SSH_TC_BAS, "BAS" }, \
- { SSAM_SSH_TC_SEN, "SEN" }, \
- { SSAM_SSH_TC_SRQ, "SRQ" }, \
- { SSAM_SSH_TC_MCU, "MCU" }, \
- { SSAM_SSH_TC_HID, "HID" }, \
- { SSAM_SSH_TC_TCH, "TCH" }, \
- { SSAM_SSH_TC_BKL, "BKL" }, \
- { SSAM_SSH_TC_TAM, "TAM" }, \
- { SSAM_SSH_TC_ACC, "ACC" }, \
- { SSAM_SSH_TC_UFI, "UFI" }, \
- { SSAM_SSH_TC_USC, "USC" }, \
- { SSAM_SSH_TC_PEN, "PEN" }, \
- { SSAM_SSH_TC_VID, "VID" }, \
- { SSAM_SSH_TC_AUD, "AUD" }, \
- { SSAM_SSH_TC_SMC, "SMC" }, \
- { SSAM_SSH_TC_KPD, "KPD" }, \
- { SSAM_SSH_TC_REG, "REG" } \
+ { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
+ { SSAM_SSH_TC_SAM, "SAM" }, \
+ { SSAM_SSH_TC_BAT, "BAT" }, \
+ { SSAM_SSH_TC_TMP, "TMP" }, \
+ { SSAM_SSH_TC_PMC, "PMC" }, \
+ { SSAM_SSH_TC_FAN, "FAN" }, \
+ { SSAM_SSH_TC_PoM, "PoM" }, \
+ { SSAM_SSH_TC_DBG, "DBG" }, \
+ { SSAM_SSH_TC_KBD, "KBD" }, \
+ { SSAM_SSH_TC_FWU, "FWU" }, \
+ { SSAM_SSH_TC_UNI, "UNI" }, \
+ { SSAM_SSH_TC_LPC, "LPC" }, \
+ { SSAM_SSH_TC_TCL, "TCL" }, \
+ { SSAM_SSH_TC_SFL, "SFL" }, \
+ { SSAM_SSH_TC_KIP, "KIP" }, \
+ { SSAM_SSH_TC_EXT, "EXT" }, \
+ { SSAM_SSH_TC_BLD, "BLD" }, \
+ { SSAM_SSH_TC_BAS, "BAS" }, \
+ { SSAM_SSH_TC_SEN, "SEN" }, \
+ { SSAM_SSH_TC_SRQ, "SRQ" }, \
+ { SSAM_SSH_TC_MCU, "MCU" }, \
+ { SSAM_SSH_TC_HID, "HID" }, \
+ { SSAM_SSH_TC_TCH, "TCH" }, \
+ { SSAM_SSH_TC_BKL, "BKL" }, \
+ { SSAM_SSH_TC_TAM, "TAM" }, \
+ { SSAM_SSH_TC_ACC0, "ACC0" }, \
+ { SSAM_SSH_TC_UFI, "UFI" }, \
+ { SSAM_SSH_TC_USC, "USC" }, \
+ { SSAM_SSH_TC_PEN, "PEN" }, \
+ { SSAM_SSH_TC_VID, "VID" }, \
+ { SSAM_SSH_TC_AUD, "AUD" }, \
+ { SSAM_SSH_TC_SMC, "SMC" }, \
+ { SSAM_SSH_TC_KPD, "KPD" }, \
+ { SSAM_SSH_TC_REG, "REG" }, \
+ { SSAM_SSH_TC_SPT, "SPT" }, \
+ { SSAM_SSH_TC_SYS, "SYS" }, \
+ { SSAM_SSH_TC_ACC1, "ACC1" }, \
+ { SSAM_SSH_TC_SHB, "SMB" }, \
+ { SSAM_SSH_TC_POS, "POS" } \
)
DECLARE_EVENT_CLASS(ssam_frame_class,
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index 7b758f8cc137..44e317970557 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -8,7 +8,7 @@
* notifications sent from ACPI via the SAN interface by providing them to any
* registered external driver.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
@@ -37,6 +37,7 @@ struct san_data {
#define to_san_data(ptr, member) \
container_of(ptr, struct san_data, member)
+static struct workqueue_struct *san_wq;
/* -- dGPU notifier interface. ---------------------------------------------- */
@@ -356,7 +357,7 @@ static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
- schedule_delayed_work(&work->work, delay);
+ queue_delayed_work(san_wq, &work->work, delay);
return SSAM_NOTIF_HANDLED;
}
@@ -861,7 +862,7 @@ static int san_remove(struct platform_device *pdev)
* We have unregistered our event sources. Now we need to ensure that
* all delayed works they may have spawned are run to completion.
*/
- flush_scheduled_work();
+ flush_workqueue(san_wq);
return 0;
}
@@ -881,7 +882,27 @@ static struct platform_driver surface_acpi_notify = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
-module_platform_driver(surface_acpi_notify);
+
+static int __init san_init(void)
+{
+ int ret;
+
+ san_wq = alloc_workqueue("san_wq", 0, 0);
+ if (!san_wq)
+ return -ENOMEM;
+ ret = platform_driver_register(&surface_acpi_notify);
+ if (ret)
+ destroy_workqueue(san_wq);
+ return ret;
+}
+module_init(san_init);
+
+static void __exit san_exit(void)
+{
+ platform_driver_unregister(&surface_acpi_notify);
+ destroy_workqueue(san_wq);
+}
+module_exit(san_exit);
MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module");
diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
index 30fb50fde450..492c82e69182 100644
--- a/drivers/platform/surface/surface_aggregator_cdev.c
+++ b/drivers/platform/surface/surface_aggregator_cdev.c
@@ -3,7 +3,7 @@
* Provides user-space access to the SSAM EC via the /dev/surface/aggregator
* misc device. Intended for debugging and development.
*
- * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/fs.h>
diff --git a/drivers/platform/surface/surface_aggregator_hub.c b/drivers/platform/surface/surface_aggregator_hub.c
new file mode 100644
index 000000000000..43061514be38
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_hub.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Surface System Aggregator Module (SSAM) subsystem device hubs.
+ *
+ * Provides a driver for SSAM subsystems device hubs. This driver performs
+ * instantiation of the devices managed by said hubs and takes care of
+ * (hot-)removal.
+ *
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- SSAM generic subsystem hub driver framework. -------------------------- */
+
+enum ssam_hub_state {
+ SSAM_HUB_UNINITIALIZED, /* Only set during initialization. */
+ SSAM_HUB_CONNECTED,
+ SSAM_HUB_DISCONNECTED,
+};
+
+enum ssam_hub_flags {
+ SSAM_HUB_HOT_REMOVED,
+};
+
+struct ssam_hub;
+
+struct ssam_hub_ops {
+ int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
+};
+
+struct ssam_hub {
+ struct ssam_device *sdev;
+
+ enum ssam_hub_state state;
+ unsigned long flags;
+
+ struct delayed_work update_work;
+ unsigned long connect_delay;
+
+ struct ssam_event_notifier notif;
+ struct ssam_hub_ops ops;
+};
+
+struct ssam_hub_desc {
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ } event;
+
+ struct {
+ u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
+ int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
+ } ops;
+
+ unsigned long connect_delay_ms;
+};
+
+static void ssam_hub_update_workfn(struct work_struct *work)
+{
+ struct ssam_hub *hub = container_of(work, struct ssam_hub, update_work.work);
+ enum ssam_hub_state state;
+ int status = 0;
+
+ status = hub->ops.get_state(hub, &state);
+ if (status)
+ return;
+
+ /*
+ * There is a small possibility that hub devices were hot-removed and
+ * re-added before we were able to remove them here. In that case, both
+ * the state returned by get_state() and the state of the hub will
+ * equal SSAM_HUB_CONNECTED and we would bail early below, which would
+ * leave child devices without proper (re-)initialization and the
+ * hot-remove flag set.
+ *
+ * Therefore, we check whether devices have been hot-removed via an
+ * additional flag on the hub and, in this case, override the returned
+ * hub state. In case of a missed disconnect (i.e. get_state returned
+ * "connected"), we further need to re-schedule this work (with the
+ * appropriate delay) as the actual connect work submission might have
+ * been merged with this one.
+ *
+ * This then leads to one of two cases: Either we submit an unnecessary
+ * work item (which will get ignored via either the queue or the state
+ * checks) or, in the unlikely case that the work is actually required,
+ * double the normal connect delay.
+ */
+ if (test_and_clear_bit(SSAM_HUB_HOT_REMOVED, &hub->flags)) {
+ if (state == SSAM_HUB_CONNECTED)
+ schedule_delayed_work(&hub->update_work, hub->connect_delay);
+
+ state = SSAM_HUB_DISCONNECTED;
+ }
+
+ if (hub->state == state)
+ return;
+ hub->state = state;
+
+ if (hub->state == SSAM_HUB_CONNECTED)
+ status = ssam_device_register_clients(hub->sdev);
+ else
+ ssam_remove_clients(&hub->sdev->dev);
+
+ if (status)
+ dev_err(&hub->sdev->dev, "failed to update hub child devices: %d\n", status);
+}
+
+static int ssam_hub_mark_hot_removed(struct device *dev, void *_data)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (is_ssam_device(dev))
+ ssam_device_mark_hot_removed(sdev);
+
+ return 0;
+}
+
+static void ssam_hub_update(struct ssam_hub *hub, bool connected)
+{
+ unsigned long delay;
+
+ /* Mark devices as hot-removed before we remove any. */
+ if (!connected) {
+ set_bit(SSAM_HUB_HOT_REMOVED, &hub->flags);
+ device_for_each_child_reverse(&hub->sdev->dev, NULL, ssam_hub_mark_hot_removed);
+ }
+
+ /*
+ * Delay update when the base/keyboard cover is being connected to give
+ * devices/EC some time to set up.
+ */
+ delay = connected ? hub->connect_delay : 0;
+
+ schedule_delayed_work(&hub->update_work, delay);
+}
+
+static int __maybe_unused ssam_hub_resume(struct device *dev)
+{
+ struct ssam_hub *hub = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_hub_pm_ops, NULL, ssam_hub_resume);
+
+static int ssam_hub_probe(struct ssam_device *sdev)
+{
+ const struct ssam_hub_desc *desc;
+ struct ssam_hub *hub;
+ int status;
+
+ desc = ssam_device_get_match_data(sdev);
+ if (!desc) {
+ WARN(1, "no driver match data specified");
+ return -EINVAL;
+ }
+
+ hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ hub->sdev = sdev;
+ hub->state = SSAM_HUB_UNINITIALIZED;
+
+ hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
+ hub->notif.base.fn = desc->ops.notify;
+ hub->notif.event.reg = desc->event.reg;
+ hub->notif.event.id = desc->event.id;
+ hub->notif.event.mask = desc->event.mask;
+ hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ hub->connect_delay = msecs_to_jiffies(desc->connect_delay_ms);
+ hub->ops.get_state = desc->ops.get_state;
+
+ INIT_DELAYED_WORK(&hub->update_work, ssam_hub_update_workfn);
+
+ ssam_device_set_drvdata(sdev, hub);
+
+ status = ssam_device_notifier_register(sdev, &hub->notif);
+ if (status)
+ return status;
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+
+static void ssam_hub_remove(struct ssam_device *sdev)
+{
+ struct ssam_hub *hub = ssam_device_get_drvdata(sdev);
+
+ ssam_device_notifier_unregister(sdev, &hub->notif);
+ cancel_delayed_work_sync(&hub->update_work);
+ ssam_remove_clients(&sdev->dev);
+}
+
+
+/* -- SSAM base-subsystem hub driver. --------------------------------------- */
+
+/*
+ * Some devices (especially battery) may need a bit of time to be fully usable
+ * after being (re-)connected. This delay has been determined via
+ * experimentation.
+ */
+#define SSAM_BASE_UPDATE_CONNECT_DELAY 2500
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+#define SSAM_BAS_OPMODE_TABLET 0x00
+#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
+
+static int ssam_base_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
+{
+ u8 opmode;
+ int status;
+
+ status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
+ return status;
+ }
+
+ if (opmode != SSAM_BAS_OPMODE_TABLET)
+ *state = SSAM_HUB_CONNECTED;
+ else
+ *state = SSAM_HUB_DISCONNECTED;
+
+ return 0;
+}
+
+static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
+
+ if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
+ return 0;
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ ssam_hub_update(hub, event->data[0]);
+
+ /*
+ * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
+ * consumed by the detachment system driver. We're just a (more or less)
+ * silent observer.
+ */
+ return 0;
+}
+
+static const struct ssam_hub_desc base_hub = {
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_BAS,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_NONE,
+ },
+ .ops = {
+ .notify = ssam_base_hub_notif,
+ .get_state = ssam_base_hub_query_state,
+ },
+ .connect_delay_ms = SSAM_BASE_UPDATE_CONNECT_DELAY,
+};
+
+
+/* -- SSAM KIP-subsystem hub driver. ---------------------------------------- */
+
+/*
+ * Some devices may need a bit of time to be fully usable after being
+ * (re-)connected. This delay has been determined via experimentation.
+ */
+#define SSAM_KIP_UPDATE_CONNECT_DELAY 250
+
+#define SSAM_EVENT_KIP_CID_CONNECTION 0x2c
+
+SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_query_state, u8, {
+ .target_category = SSAM_SSH_TC_KIP,
+ .target_id = 0x01,
+ .command_id = 0x2c,
+ .instance_id = 0x00,
+});
+
+static int ssam_kip_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
+{
+ int status;
+ u8 connected;
+
+ status = ssam_retry(__ssam_kip_query_state, hub->sdev->ctrl, &connected);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query KIP connection state: %d\n", status);
+ return status;
+ }
+
+ *state = connected ? SSAM_HUB_CONNECTED : SSAM_HUB_DISCONNECTED;
+ return 0;
+}
+
+static u32 ssam_kip_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
+
+ if (event->command_id != SSAM_EVENT_KIP_CID_CONNECTION)
+ return 0; /* Return "unhandled". */
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ ssam_hub_update(hub, event->data[0]);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_hub_desc kip_hub = {
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_KIP,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+ .ops = {
+ .notify = ssam_kip_hub_notif,
+ .get_state = ssam_kip_hub_query_state,
+ },
+ .connect_delay_ms = SSAM_KIP_UPDATE_CONNECT_DELAY,
+};
+
+
+/* -- Driver registration. -------------------------------------------------- */
+
+static const struct ssam_device_id ssam_hub_match[] = {
+ { SSAM_VDEV(HUB, 0x01, SSAM_SSH_TC_KIP, 0x00), (unsigned long)&kip_hub },
+ { SSAM_VDEV(HUB, 0x02, SSAM_SSH_TC_BAS, 0x00), (unsigned long)&base_hub },
+ { }
+};
+MODULE_DEVICE_TABLE(ssam, ssam_hub_match);
+
+static struct ssam_device_driver ssam_subsystem_hub_driver = {
+ .probe = ssam_hub_probe,
+ .remove = ssam_hub_remove,
+ .match_table = ssam_hub_match,
+ .driver = {
+ .name = "surface_aggregator_subsystem_hub",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_hub_pm_ops,
+ },
+};
+module_ssam_device_driver(ssam_subsystem_hub_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Subsystem device hub driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index ce2bd88feeaa..d5655f6a4a41 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -6,19 +6,16 @@
* cannot be auto-detected. Provides device-hubs and performs instantiation
* for these devices.
*
- * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
-#include <linux/limits.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/device.h>
@@ -41,9 +38,15 @@ static const struct software_node ssam_node_root = {
.name = "ssam_platform_hub",
};
+/* KIP device hub (connects keyboard cover devices on Surface Pro 8). */
+static const struct software_node ssam_node_hub_kip = {
+ .name = "ssam:00:00:01:0e:00",
+ .parent = &ssam_node_root,
+};
+
/* Base device hub (devices attached to Surface Book 3 base). */
static const struct software_node ssam_node_hub_base = {
- .name = "ssam:00:00:02:00:00",
+ .name = "ssam:00:00:02:11:00",
.parent = &ssam_node_root,
};
@@ -71,6 +74,12 @@ static const struct software_node ssam_node_tmp_pprof = {
.parent = &ssam_node_root,
};
+/* Tablet-mode switch via KIP subsystem. */
+static const struct software_node ssam_node_kip_tablet_switch = {
+ .name = "ssam:01:0e:01:00:01",
+ .parent = &ssam_node_root,
+};
+
/* DTX / detachment-system device (Surface Book 3). */
static const struct software_node ssam_node_bas_dtx = {
.name = "ssam:01:11:01:00:00",
@@ -155,6 +164,36 @@ static const struct software_node ssam_node_hid_base_iid6 = {
.parent = &ssam_node_hub_base,
};
+/* HID keyboard (KIP hub). */
+static const struct software_node ssam_node_hid_kip_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID pen stash (KIP hub; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_kip_penstash = {
+ .name = "ssam:01:15:02:02:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID touchpad (KIP hub). */
+static const struct software_node ssam_node_hid_kip_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID device instance 5 (KIP hub, unknown HID device). */
+static const struct software_node ssam_node_hid_kip_iid5 = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* Tablet-mode switch via POS subsystem. */
+static const struct software_node ssam_node_pos_tablet_switch = {
+ .name = "ssam:01:26:01:00:01",
+ .parent = &ssam_node_root,
+};
+
/*
* Devices for 5th- and 6th-generations models:
* - Surface Book 2,
@@ -201,6 +240,7 @@ static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
+ &ssam_node_pos_tablet_switch,
&ssam_node_hid_tid1_keyboard,
&ssam_node_hid_tid1_penstash,
&ssam_node_hid_tid1_touchpad,
@@ -230,289 +270,18 @@ static const struct software_node *ssam_node_group_sp7[] = {
static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_root,
+ &ssam_node_hub_kip,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
- /* TODO: Add support for keyboard cover. */
- NULL,
-};
-
-
-/* -- Device registry helper functions. ------------------------------------- */
-
-static int ssam_uid_from_string(const char *str, struct ssam_device_uid *uid)
-{
- u8 d, tc, tid, iid, fn;
- int n;
-
- n = sscanf(str, "ssam:%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
- if (n != 5)
- return -EINVAL;
-
- uid->domain = d;
- uid->category = tc;
- uid->target = tid;
- uid->instance = iid;
- uid->function = fn;
-
- return 0;
-}
-
-static int ssam_hub_add_device(struct device *parent, struct ssam_controller *ctrl,
- struct fwnode_handle *node)
-{
- struct ssam_device_uid uid;
- struct ssam_device *sdev;
- int status;
-
- status = ssam_uid_from_string(fwnode_get_name(node), &uid);
- if (status)
- return status;
-
- sdev = ssam_device_alloc(ctrl, uid);
- if (!sdev)
- return -ENOMEM;
-
- sdev->dev.parent = parent;
- sdev->dev.fwnode = node;
-
- status = ssam_device_add(sdev);
- if (status)
- ssam_device_put(sdev);
-
- return status;
-}
-
-static int ssam_hub_register_clients(struct device *parent, struct ssam_controller *ctrl,
- struct fwnode_handle *node)
-{
- struct fwnode_handle *child;
- int status;
-
- fwnode_for_each_child_node(node, child) {
- /*
- * Try to add the device specified in the firmware node. If
- * this fails with -EINVAL, the node does not specify any SSAM
- * device, so ignore it and continue with the next one.
- */
-
- status = ssam_hub_add_device(parent, ctrl, child);
- if (status && status != -EINVAL)
- goto err;
- }
-
- return 0;
-err:
- ssam_remove_clients(parent);
- return status;
-}
-
-
-/* -- SSAM base-hub driver. ------------------------------------------------- */
-
-/*
- * Some devices (especially battery) may need a bit of time to be fully usable
- * after being (re-)connected. This delay has been determined via
- * experimentation.
- */
-#define SSAM_BASE_UPDATE_CONNECT_DELAY msecs_to_jiffies(2500)
-
-enum ssam_base_hub_state {
- SSAM_BASE_HUB_UNINITIALIZED,
- SSAM_BASE_HUB_CONNECTED,
- SSAM_BASE_HUB_DISCONNECTED,
-};
-
-struct ssam_base_hub {
- struct ssam_device *sdev;
-
- enum ssam_base_hub_state state;
- struct delayed_work update_work;
-
- struct ssam_event_notifier notif;
-};
-
-SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
- .target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
- .command_id = 0x0d,
- .instance_id = 0x00,
-});
-
-#define SSAM_BAS_OPMODE_TABLET 0x00
-#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
-
-static int ssam_base_hub_query_state(struct ssam_base_hub *hub, enum ssam_base_hub_state *state)
-{
- u8 opmode;
- int status;
-
- status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
- if (status < 0) {
- dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
- return status;
- }
-
- if (opmode != SSAM_BAS_OPMODE_TABLET)
- *state = SSAM_BASE_HUB_CONNECTED;
- else
- *state = SSAM_BASE_HUB_DISCONNECTED;
-
- return 0;
-}
-
-static ssize_t ssam_base_hub_state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ssam_base_hub *hub = dev_get_drvdata(dev);
- bool connected = hub->state == SSAM_BASE_HUB_CONNECTED;
-
- return sysfs_emit(buf, "%d\n", connected);
-}
-
-static struct device_attribute ssam_base_hub_attr_state =
- __ATTR(state, 0444, ssam_base_hub_state_show, NULL);
-
-static struct attribute *ssam_base_hub_attrs[] = {
- &ssam_base_hub_attr_state.attr,
+ &ssam_node_kip_tablet_switch,
+ &ssam_node_hid_kip_keyboard,
+ &ssam_node_hid_kip_penstash,
+ &ssam_node_hid_kip_touchpad,
+ &ssam_node_hid_kip_iid5,
NULL,
};
-static const struct attribute_group ssam_base_hub_group = {
- .attrs = ssam_base_hub_attrs,
-};
-
-static void ssam_base_hub_update_workfn(struct work_struct *work)
-{
- struct ssam_base_hub *hub = container_of(work, struct ssam_base_hub, update_work.work);
- struct fwnode_handle *node = dev_fwnode(&hub->sdev->dev);
- enum ssam_base_hub_state state;
- int status = 0;
-
- status = ssam_base_hub_query_state(hub, &state);
- if (status)
- return;
-
- if (hub->state == state)
- return;
- hub->state = state;
-
- if (hub->state == SSAM_BASE_HUB_CONNECTED)
- status = ssam_hub_register_clients(&hub->sdev->dev, hub->sdev->ctrl, node);
- else
- ssam_remove_clients(&hub->sdev->dev);
-
- if (status)
- dev_err(&hub->sdev->dev, "failed to update base-hub devices: %d\n", status);
-}
-
-static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
-{
- struct ssam_base_hub *hub = container_of(nf, struct ssam_base_hub, notif);
- unsigned long delay;
-
- if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
- return 0;
-
- if (event->length < 1) {
- dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
- return 0;
- }
-
- /*
- * Delay update when the base is being connected to give devices/EC
- * some time to set up.
- */
- delay = event->data[0] ? SSAM_BASE_UPDATE_CONNECT_DELAY : 0;
-
- schedule_delayed_work(&hub->update_work, delay);
-
- /*
- * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
- * consumed by the detachment system driver. We're just a (more or less)
- * silent observer.
- */
- return 0;
-}
-
-static int __maybe_unused ssam_base_hub_resume(struct device *dev)
-{
- struct ssam_base_hub *hub = dev_get_drvdata(dev);
-
- schedule_delayed_work(&hub->update_work, 0);
- return 0;
-}
-static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
-
-static int ssam_base_hub_probe(struct ssam_device *sdev)
-{
- struct ssam_base_hub *hub;
- int status;
-
- hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
- if (!hub)
- return -ENOMEM;
-
- hub->sdev = sdev;
- hub->state = SSAM_BASE_HUB_UNINITIALIZED;
-
- hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
- hub->notif.base.fn = ssam_base_hub_notif;
- hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
- hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
- hub->notif.event.id.instance = 0,
- hub->notif.event.mask = SSAM_EVENT_MASK_NONE;
- hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
-
- INIT_DELAYED_WORK(&hub->update_work, ssam_base_hub_update_workfn);
-
- ssam_device_set_drvdata(sdev, hub);
-
- status = ssam_notifier_register(sdev->ctrl, &hub->notif);
- if (status)
- return status;
-
- status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
- if (status)
- goto err;
-
- schedule_delayed_work(&hub->update_work, 0);
- return 0;
-
-err:
- ssam_notifier_unregister(sdev->ctrl, &hub->notif);
- cancel_delayed_work_sync(&hub->update_work);
- ssam_remove_clients(&sdev->dev);
- return status;
-}
-
-static void ssam_base_hub_remove(struct ssam_device *sdev)
-{
- struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
-
- sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
-
- ssam_notifier_unregister(sdev->ctrl, &hub->notif);
- cancel_delayed_work_sync(&hub->update_work);
- ssam_remove_clients(&sdev->dev);
-}
-
-static const struct ssam_device_id ssam_base_hub_match[] = {
- { SSAM_VDEV(HUB, 0x02, SSAM_ANY_IID, 0x00) },
- { },
-};
-
-static struct ssam_device_driver ssam_base_hub_driver = {
- .probe = ssam_base_hub_probe,
- .remove = ssam_base_hub_remove,
- .match_table = ssam_base_hub_match,
- .driver = {
- .name = "surface_aggregator_base_hub",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &ssam_base_hub_pm_ops,
- },
-};
-
/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
@@ -597,7 +366,7 @@ static int ssam_platform_hub_probe(struct platform_device *pdev)
set_secondary_fwnode(&pdev->dev, root);
- status = ssam_hub_register_clients(&pdev->dev, ctrl, root);
+ status = __ssam_register_clients(&pdev->dev, ctrl, root);
if (status) {
set_secondary_fwnode(&pdev->dev, NULL);
software_node_unregister_node_group(nodes);
@@ -626,32 +395,7 @@ static struct platform_driver ssam_platform_hub_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
-
-
-/* -- Module initialization. ------------------------------------------------ */
-
-static int __init ssam_device_hub_init(void)
-{
- int status;
-
- status = platform_driver_register(&ssam_platform_hub_driver);
- if (status)
- return status;
-
- status = ssam_device_driver_register(&ssam_base_hub_driver);
- if (status)
- platform_driver_unregister(&ssam_platform_hub_driver);
-
- return status;
-}
-module_init(ssam_device_hub_init);
-
-static void __exit ssam_device_hub_exit(void)
-{
- ssam_device_driver_unregister(&ssam_base_hub_driver);
- platform_driver_unregister(&ssam_platform_hub_driver);
-}
-module_exit(ssam_device_hub_exit);
+module_platform_driver(ssam_platform_hub_driver);
MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c
new file mode 100644
index 000000000000..27d95a6a7851
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_tabletsw.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) tablet mode switch driver.
+ *
+ * Copyright (C) 2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- SSAM generic tablet switch driver framework. -------------------------- */
+
+struct ssam_tablet_sw;
+
+struct ssam_tablet_sw_ops {
+ int (*get_state)(struct ssam_tablet_sw *sw, u32 *state);
+ const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state);
+ bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state);
+};
+
+struct ssam_tablet_sw {
+ struct ssam_device *sdev;
+
+ u32 state;
+ struct work_struct update_work;
+ struct input_dev *mode_switch;
+
+ struct ssam_tablet_sw_ops ops;
+ struct ssam_event_notifier notif;
+};
+
+struct ssam_tablet_sw_desc {
+ struct {
+ const char *name;
+ const char *phys;
+ } dev;
+
+ struct {
+ u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
+ int (*get_state)(struct ssam_tablet_sw *sw, u32 *state);
+ const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state);
+ bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state);
+ } ops;
+
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ u8 flags;
+ } event;
+};
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
+ const char *state = sw->ops.state_name(sw, sw->state);
+
+ return sysfs_emit(buf, "%s\n", state);
+}
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *ssam_tablet_sw_attrs[] = {
+ &dev_attr_state.attr,
+ NULL,
+};
+
+static const struct attribute_group ssam_tablet_sw_group = {
+ .attrs = ssam_tablet_sw_attrs,
+};
+
+static void ssam_tablet_sw_update_workfn(struct work_struct *work)
+{
+ struct ssam_tablet_sw *sw = container_of(work, struct ssam_tablet_sw, update_work);
+ int tablet, status;
+ u32 state;
+
+ status = sw->ops.get_state(sw, &state);
+ if (status)
+ return;
+
+ if (sw->state == state)
+ return;
+ sw->state = state;
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = sw->ops.state_is_tablet_mode(sw, state);
+ input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(sw->mode_switch);
+}
+
+static int __maybe_unused ssam_tablet_sw_resume(struct device *dev)
+{
+ struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
+
+ schedule_work(&sw->update_work);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_tablet_sw_pm_ops, NULL, ssam_tablet_sw_resume);
+
+static int ssam_tablet_sw_probe(struct ssam_device *sdev)
+{
+ const struct ssam_tablet_sw_desc *desc;
+ struct ssam_tablet_sw *sw;
+ int tablet, status;
+
+ desc = ssam_device_get_match_data(sdev);
+ if (!desc) {
+ WARN(1, "no driver match data specified");
+ return -EINVAL;
+ }
+
+ sw = devm_kzalloc(&sdev->dev, sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return -ENOMEM;
+
+ sw->sdev = sdev;
+
+ sw->ops.get_state = desc->ops.get_state;
+ sw->ops.state_name = desc->ops.state_name;
+ sw->ops.state_is_tablet_mode = desc->ops.state_is_tablet_mode;
+
+ INIT_WORK(&sw->update_work, ssam_tablet_sw_update_workfn);
+
+ ssam_device_set_drvdata(sdev, sw);
+
+ /* Get initial state. */
+ status = sw->ops.get_state(sw, &sw->state);
+ if (status)
+ return status;
+
+ /* Set up tablet mode switch. */
+ sw->mode_switch = devm_input_allocate_device(&sdev->dev);
+ if (!sw->mode_switch)
+ return -ENOMEM;
+
+ sw->mode_switch->name = desc->dev.name;
+ sw->mode_switch->phys = desc->dev.phys;
+ sw->mode_switch->id.bustype = BUS_HOST;
+ sw->mode_switch->dev.parent = &sdev->dev;
+
+ tablet = sw->ops.state_is_tablet_mode(sw, sw->state);
+ input_set_capability(sw->mode_switch, EV_SW, SW_TABLET_MODE);
+ input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
+
+ status = input_register_device(sw->mode_switch);
+ if (status)
+ return status;
+
+ /* Set up notifier. */
+ sw->notif.base.priority = 0;
+ sw->notif.base.fn = desc->ops.notify;
+ sw->notif.event.reg = desc->event.reg;
+ sw->notif.event.id = desc->event.id;
+ sw->notif.event.mask = desc->event.mask;
+ sw->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_device_notifier_register(sdev, &sw->notif);
+ if (status)
+ return status;
+
+ status = sysfs_create_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
+ if (status)
+ goto err;
+
+ /* We might have missed events during setup, so check again. */
+ schedule_work(&sw->update_work);
+ return 0;
+
+err:
+ ssam_device_notifier_unregister(sdev, &sw->notif);
+ cancel_work_sync(&sw->update_work);
+ return status;
+}
+
+static void ssam_tablet_sw_remove(struct ssam_device *sdev)
+{
+ struct ssam_tablet_sw *sw = ssam_device_get_drvdata(sdev);
+
+ sysfs_remove_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
+
+ ssam_device_notifier_unregister(sdev, &sw->notif);
+ cancel_work_sync(&sw->update_work);
+}
+
+
+/* -- SSAM KIP tablet switch implementation. -------------------------------- */
+
+#define SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED 0x1d
+
+enum ssam_kip_cover_state {
+ SSAM_KIP_COVER_STATE_DISCONNECTED = 0x01,
+ SSAM_KIP_COVER_STATE_CLOSED = 0x02,
+ SSAM_KIP_COVER_STATE_LAPTOP = 0x03,
+ SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
+ SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05,
+};
+
+static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_KIP_COVER_STATE_DISCONNECTED:
+ return "disconnected";
+
+ case SSAM_KIP_COVER_STATE_CLOSED:
+ return "closed";
+
+ case SSAM_KIP_COVER_STATE_LAPTOP:
+ return "laptop";
+
+ case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+ return "folded-canvas";
+
+ case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ return "folded-back";
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state);
+ return "<unknown>";
+ }
+}
+
+static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_KIP_COVER_STATE_DISCONNECTED:
+ case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+ case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ return true;
+
+ case SSAM_KIP_COVER_STATE_CLOSED:
+ case SSAM_KIP_COVER_STATE_LAPTOP:
+ return false;
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown KIP cover state: %d\n", sw->state);
+ return true;
+ }
+}
+
+SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_get_cover_state, u8, {
+ .target_category = SSAM_SSH_TC_KIP,
+ .target_id = 0x01,
+ .command_id = 0x1d,
+ .instance_id = 0x00,
+});
+
+static int ssam_kip_get_cover_state(struct ssam_tablet_sw *sw, u32 *state)
+{
+ int status;
+ u8 raw;
+
+ status = ssam_retry(__ssam_kip_get_cover_state, sw->sdev->ctrl, &raw);
+ if (status < 0) {
+ dev_err(&sw->sdev->dev, "failed to query KIP lid state: %d\n", status);
+ return status;
+ }
+
+ *state = raw;
+ return 0;
+}
+
+static u32 ssam_kip_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
+
+ if (event->command_id != SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED)
+ return 0; /* Return "unhandled". */
+
+ if (event->length < 1)
+ dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
+
+ schedule_work(&sw->update_work);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_tablet_sw_desc ssam_kip_sw_desc = {
+ .dev = {
+ .name = "Microsoft Surface KIP Tablet Mode Switch",
+ .phys = "ssam/01:0e:01:00:01/input0",
+ },
+ .ops = {
+ .notify = ssam_kip_sw_notif,
+ .get_state = ssam_kip_get_cover_state,
+ .state_name = ssam_kip_cover_state_name,
+ .state_is_tablet_mode = ssam_kip_cover_state_is_tablet_mode,
+ },
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_KIP,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+};
+
+
+/* -- SSAM POS tablet switch implementation. -------------------------------- */
+
+static bool tablet_mode_in_slate_state = true;
+module_param(tablet_mode_in_slate_state, bool, 0644);
+MODULE_PARM_DESC(tablet_mode_in_slate_state, "Enable tablet mode in slate device posture, default is 'true'");
+
+#define SSAM_EVENT_POS_CID_POSTURE_CHANGED 0x03
+#define SSAM_POS_MAX_SOURCES 4
+
+enum ssam_pos_state {
+ SSAM_POS_POSTURE_LID_CLOSED = 0x00,
+ SSAM_POS_POSTURE_LAPTOP = 0x01,
+ SSAM_POS_POSTURE_SLATE = 0x02,
+ SSAM_POS_POSTURE_TABLET = 0x03,
+};
+
+struct ssam_sources_list {
+ __le32 count;
+ __le32 id[SSAM_POS_MAX_SOURCES];
+} __packed;
+
+static const char *ssam_pos_state_name(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_POS_POSTURE_LID_CLOSED:
+ return "closed";
+
+ case SSAM_POS_POSTURE_LAPTOP:
+ return "laptop";
+
+ case SSAM_POS_POSTURE_SLATE:
+ return "slate";
+
+ case SSAM_POS_POSTURE_TABLET:
+ return "tablet";
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state);
+ return "<unknown>";
+ }
+}
+
+static bool ssam_pos_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_POS_POSTURE_LAPTOP:
+ case SSAM_POS_POSTURE_LID_CLOSED:
+ return false;
+
+ case SSAM_POS_POSTURE_SLATE:
+ return tablet_mode_in_slate_state;
+
+ case SSAM_POS_POSTURE_TABLET:
+ return true;
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state);
+ return true;
+ }
+}
+
+static int ssam_pos_get_sources_list(struct ssam_tablet_sw *sw, struct ssam_sources_list *sources)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status;
+
+ rqst.target_category = SSAM_SSH_TC_POS;
+ rqst.target_id = 0x01;
+ rqst.command_id = 0x01;
+ rqst.instance_id = 0x00;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = 0;
+ rqst.payload = NULL;
+
+ rsp.capacity = sizeof(*sources);
+ rsp.length = 0;
+ rsp.pointer = (u8 *)sources;
+
+ status = ssam_retry(ssam_request_sync_onstack, sw->sdev->ctrl, &rqst, &rsp, 0);
+ if (status)
+ return status;
+
+ /* We need at least the 'sources->count' field. */
+ if (rsp.length < sizeof(__le32)) {
+ dev_err(&sw->sdev->dev, "received source list response is too small\n");
+ return -EPROTO;
+ }
+
+ /* Make sure 'sources->count' matches with the response length. */
+ if (get_unaligned_le32(&sources->count) * sizeof(__le32) + sizeof(__le32) != rsp.length) {
+ dev_err(&sw->sdev->dev, "mismatch between number of sources and response size\n");
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int ssam_pos_get_source(struct ssam_tablet_sw *sw, u32 *source_id)
+{
+ struct ssam_sources_list sources = {};
+ int status;
+
+ status = ssam_pos_get_sources_list(sw, &sources);
+ if (status)
+ return status;
+
+ if (get_unaligned_le32(&sources.count) == 0) {
+ dev_err(&sw->sdev->dev, "no posture sources found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We currently don't know what to do with more than one posture
+ * source. At the moment, only one source seems to be used/provided.
+ * The WARN_ON() here should hopefully let us know quickly once there
+ * is a device that provides multiple sources, at which point we can
+ * then try to figure out how to handle them.
+ */
+ WARN_ON(get_unaligned_le32(&sources.count) > 1);
+
+ *source_id = get_unaligned_le32(&sources.id[0]);
+ return 0;
+}
+
+SSAM_DEFINE_SYNC_REQUEST_WR(__ssam_pos_get_posture_for_source, __le32, __le32, {
+ .target_category = SSAM_SSH_TC_POS,
+ .target_id = 0x01,
+ .command_id = 0x02,
+ .instance_id = 0x00,
+});
+
+static int ssam_pos_get_posture_for_source(struct ssam_tablet_sw *sw, u32 source_id, u32 *posture)
+{
+ __le32 source_le = cpu_to_le32(source_id);
+ __le32 rspval_le = 0;
+ int status;
+
+ status = ssam_retry(__ssam_pos_get_posture_for_source, sw->sdev->ctrl,
+ &source_le, &rspval_le);
+ if (status)
+ return status;
+
+ *posture = le32_to_cpu(rspval_le);
+ return 0;
+}
+
+static int ssam_pos_get_posture(struct ssam_tablet_sw *sw, u32 *state)
+{
+ u32 source_id;
+ int status;
+
+ status = ssam_pos_get_source(sw, &source_id);
+ if (status) {
+ dev_err(&sw->sdev->dev, "failed to get posture source ID: %d\n", status);
+ return status;
+ }
+
+ status = ssam_pos_get_posture_for_source(sw, source_id, state);
+ if (status) {
+ dev_err(&sw->sdev->dev, "failed to get posture value for source %u: %d\n",
+ source_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+static u32 ssam_pos_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
+
+ if (event->command_id != SSAM_EVENT_POS_CID_POSTURE_CHANGED)
+ return 0; /* Return "unhandled". */
+
+ if (event->length != sizeof(__le32) * 3)
+ dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
+
+ schedule_work(&sw->update_work);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_tablet_sw_desc ssam_pos_sw_desc = {
+ .dev = {
+ .name = "Microsoft Surface POS Tablet Mode Switch",
+ .phys = "ssam/01:26:01:00:01/input0",
+ },
+ .ops = {
+ .notify = ssam_pos_sw_notif,
+ .get_state = ssam_pos_get_posture,
+ .state_name = ssam_pos_state_name,
+ .state_is_tablet_mode = ssam_pos_state_is_tablet_mode,
+ },
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_POS,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+};
+
+
+/* -- Driver registration. -------------------------------------------------- */
+
+static const struct ssam_device_id ssam_tablet_sw_match[] = {
+ { SSAM_SDEV(KIP, 0x01, 0x00, 0x01), (unsigned long)&ssam_kip_sw_desc },
+ { SSAM_SDEV(POS, 0x01, 0x00, 0x01), (unsigned long)&ssam_pos_sw_desc },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_tablet_sw_match);
+
+static struct ssam_device_driver ssam_tablet_sw_driver = {
+ .probe = ssam_tablet_sw_probe,
+ .remove = ssam_tablet_sw_remove,
+ .match_table = ssam_tablet_sw_match,
+ .driver = {
+ .name = "surface_aggregator_tablet_mode_switch",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_tablet_sw_pm_ops,
+ },
+};
+module_ssam_device_driver(ssam_tablet_sw_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Tablet mode switch driver for Surface devices using the Surface Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 1203b9a82993..ed36944467f9 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -8,7 +8,7 @@
* acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
* use), or request detachment via user-space.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/fs.h>
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index ec66fde28e75..c219b840d491 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -4,7 +4,7 @@
* properly configuring the respective GPEs. Required for wakeup via lid on
* newer Intel-based Microsoft Surface devices.
*
- * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -172,6 +172,18 @@ static const struct dmi_system_id dmi_lid_device_table[] = {
.driver_data = (void *)lid_device_props_l4D,
},
{
+ .ident = "Surface Laptop 4 (Intel 13\")",
+ .matches = {
+ /*
+ * We match for SKU here due to different variants: The
+ * AMD (15") version does not rely on GPEs.
+ */
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1950:1951"),
+ },
+ .driver_data = (void *)lid_device_props_l4B,
+ },
+ {
.ident = "Surface Laptop Studio",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
diff --git a/drivers/platform/surface/surface_hotplug.c b/drivers/platform/surface/surface_hotplug.c
index cfcc15cfbacb..f004a2495201 100644
--- a/drivers/platform/surface/surface_hotplug.c
+++ b/drivers/platform/surface/surface_hotplug.c
@@ -10,7 +10,7 @@
* Event signaling is handled via ACPI, which will generate the appropriate
* device-check notifications to be picked up by the PCIe hot-plug driver.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index 6373d3b5eb7f..fbf2e11fd6ce 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -3,7 +3,7 @@
* Surface Platform Profile / Performance Mode driver for Surface System
* Aggregator Module (thermal subsystem).
*
- * Copyright (C) 2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2021-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index bc4013e950ed..f2f98e942cf2 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -177,17 +177,15 @@ config ACER_WIRELESS
config ACER_WMI
tristate "Acer WMI Laptop Extras"
- depends on ACPI
- select LEDS_CLASS
- select NEW_LEDS
depends on BACKLIGHT_CLASS_DEVICE
depends on SERIO_I8042
depends on INPUT
depends on RFKILL || RFKILL = n
depends on ACPI_WMI
+ select ACPI_VIDEO
select INPUT_SPARSEKMAP
- # Acer WMI depends on ACPI_VIDEO when ACPI is enabled
- select ACPI_VIDEO if ACPI
+ select LEDS_CLASS
+ select NEW_LEDS
help
This is a driver for newer Acer (and Wistron) laptops. It adds
wireless radio and bluetooth control, and on some laptops,
@@ -196,32 +194,7 @@ config ACER_WMI
If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
here.
-config AMD_PMC
- tristate "AMD SoC PMC driver"
- depends on ACPI && PCI && RTC_CLASS
- help
- The driver provides support for AMD Power Management Controller
- primarily responsible for S2Idle transactions that are driven from
- a platform firmware running on SMU. This driver also provides a debug
- mechanism to investigate the S2Idle transactions and failures.
-
- Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
-
- If you choose to compile this driver as a module the module will be
- called amd-pmc.
-
-config AMD_HSMP
- tristate "AMD HSMP Driver"
- depends on AMD_NB && X86_64
- help
- The driver provides a way for user space tools to monitor and manage
- system management functionality on EPYC server CPUs from AMD.
-
- Host System Management Port (HSMP) interface is a mailbox interface
- between the x86 core and the System Management Unit (SMU) firmware.
-
- If you choose to compile this driver as a module the module will be
- called amd_hsmp.
+source "drivers/platform/x86/amd/Kconfig"
config ADV_SWBUTTON
tristate "Advantech ACPI Software Button Driver"
@@ -300,6 +273,8 @@ config ASUS_WMI
select INPUT_SPARSEKMAP
select LEDS_CLASS
select NEW_LEDS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
select ACPI_PLATFORM_PROFILE
help
Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
@@ -1164,7 +1139,14 @@ config WINMATE_FM07_KEYS
endif # X86_PLATFORM_DEVICES
-config PMC_ATOM
- def_bool y
- depends on PCI
- select COMMON_CLK
+config P2SB
+ bool "Primary to Sideband (P2SB) bridge access support"
+ depends on PCI && X86
+ help
+ The Primary to Sideband (P2SB) bridge is an interface to some
+ PCI devices connected through it. In particular, SPI NOR controller
+ in Intel Apollo Lake SoC is one of such devices.
+
+ The main purpose of this library is to unhide P2SB device in case
+ firmware kept it hidden on some platforms in order to access devices
+ behind it.
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 4a59f47a46e2..5a428caa654a 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -23,8 +23,7 @@ obj-$(CONFIG_ACER_WIRELESS) += acer-wireless.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
# AMD
-obj-$(CONFIG_AMD_PMC) += amd-pmc.o
-obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
+obj-y += amd/
# Advantech
obj-$(CONFIG_ADV_SWBUTTON) += adv_swbutton.o
@@ -120,13 +119,17 @@ obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets.o
# Intel uncore drivers
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
+# Intel miscellaneous drivers
+intel_p2sb-y := p2sb.o
+obj-$(CONFIG_P2SB) += intel_p2sb.o
+
# Intel PMIC / PMC / P-Unit devices
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
obj-$(CONFIG_INTEL_SCU_PCI) += intel_scu_pcidrv.o
obj-$(CONFIG_INTEL_SCU_PLATFORM) += intel_scu_pltdrv.o
obj-$(CONFIG_INTEL_SCU_WDT) += intel_scu_wdt.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
-obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
+obj-$(CONFIG_X86_INTEL_LPSS) += pmc_atom.o
# Siemens Simatic Industrial PCs
obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 9c6943e401a6..e0230ea0cb7e 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1615,12 +1615,7 @@ static int read_brightness(struct backlight_device *bd)
static int update_bl_status(struct backlight_device *bd)
{
- int intensity = bd->props.brightness;
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
+ int intensity = backlight_get_brightness(bd);
set_u32(intensity, ACER_CAP_BRIGHTNESS);
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
new file mode 100644
index 000000000000..c0d0a3c5170c
--- /dev/null
+++ b/drivers/platform/x86/amd/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD x86 Platform Specific Drivers
+#
+
+config AMD_PMC
+ tristate "AMD SoC PMC driver"
+ depends on ACPI && PCI && RTC_CLASS
+ help
+ The driver provides support for AMD Power Management Controller
+ primarily responsible for S2Idle transactions that are driven from
+ a platform firmware running on SMU. This driver also provides a debug
+ mechanism to investigate the S2Idle transactions and failures.
+
+ Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
+
+ If you choose to compile this driver as a module the module will be
+ called amd-pmc.
+
+config AMD_HSMP
+ tristate "AMD HSMP Driver"
+ depends on AMD_NB && X86_64
+ help
+ The driver provides a way for user space tools to monitor and manage
+ system management functionality on EPYC server CPUs from AMD.
+
+ Host System Management Port (HSMP) interface is a mailbox interface
+ between the x86 core and the System Management Unit (SMU) firmware.
+
+ If you choose to compile this driver as a module the module will be
+ called amd_hsmp.
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
new file mode 100644
index 000000000000..a03fbb08e808
--- /dev/null
+++ b/drivers/platform/x86/amd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for drivers/platform/x86/amd
+# AMD x86 Platform-Specific Drivers
+#
+
+amd-pmc-y := pmc.o
+obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+amd_hsmp-y := hsmp.o
+obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
diff --git a/drivers/platform/x86/amd_hsmp.c b/drivers/platform/x86/amd/hsmp.c
index a0c54b838c11..a0c54b838c11 100644
--- a/drivers/platform/x86/amd_hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd/pmc.c
index f11d18beac18..700eb19e8450 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -91,6 +91,8 @@
#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
#define AMD_CPU_ID_YC 0x14B5
+#define AMD_CPU_ID_CB 0x14D8
+#define AMD_CPU_ID_PS 0x14E8
#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
@@ -318,6 +320,8 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
break;
case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
break;
default:
@@ -491,7 +495,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
&amd_pmc_idlemask_fops);
/* Enable STB only when the module_param is set */
if (enable_stb) {
- if (dev->cpu_id == AMD_CPU_ID_YC)
+ if (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB ||
+ dev->cpu_id == AMD_CPU_ID_PS)
debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
&amd_pmc_stb_debugfs_fops_v2);
else
@@ -615,6 +620,8 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
return MSG_OS_HINT_PCO;
case AMD_CPU_ID_RN:
case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
return MSG_OS_HINT_RN;
}
return -EINVAL;
@@ -735,6 +742,8 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
#endif
static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
@@ -877,7 +886,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
- if (enable_stb && dev->cpu_id == AMD_CPU_ID_YC) {
+ if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {
err = amd_pmc_s2d_init(dev);
if (err)
return err;
@@ -915,6 +924,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0005", 0},
{"AMDI0006", 0},
{"AMDI0007", 0},
+ {"AMDI0008", 0},
{"AMD0004", 0},
{"AMD0005", 0},
{ }
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 57553f9b4d1d..ffe98a18440b 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -291,10 +291,7 @@ static int gmux_get_brightness(struct backlight_device *bd)
static int gmux_update_status(struct backlight_device *bd)
{
struct apple_gmux_data *gmux_data = bl_get_data(bd);
- u32 brightness = bd->props.brightness;
-
- if (bd->props.state & BL_CORE_SUSPENDED)
- return 0;
+ u32 brightness = backlight_get_brightness(bd);
gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 57a07db659cb..478dd300b9c9 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -522,6 +522,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
{ KE_KEY, 0x35, { KEY_SCREENLOCK } },
+ { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
@@ -574,6 +575,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
{ KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
{ KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ { KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
{ KE_KEY, 0xB5, { KEY_CALC } },
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 62ce198a3463..89b604e04d7f 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -208,6 +208,7 @@ struct asus_wmi {
int kbd_led_wk;
struct led_classdev lightbar_led;
int lightbar_led_wk;
+ struct led_classdev micmute_led;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct wlan_led_work;
@@ -1028,12 +1029,23 @@ static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
return result & ASUS_WMI_DSTS_LIGHTBAR_MASK;
}
+static int micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int state = brightness != LED_OFF;
+ int err;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MICMUTE_LED, state, NULL);
+ return err < 0 ? err : 0;
+}
+
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
led_classdev_unregister(&asus->kbd_led);
led_classdev_unregister(&asus->tpd_led);
led_classdev_unregister(&asus->wlan_led);
led_classdev_unregister(&asus->lightbar_led);
+ led_classdev_unregister(&asus->micmute_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
@@ -1105,6 +1117,19 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
&asus->lightbar_led);
}
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
+ asus->micmute_led.name = "asus::micmute";
+ asus->micmute_led.max_brightness = 1;
+ asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ asus->micmute_led.brightness_set_blocking = micmute_led_set;
+ asus->micmute_led.default_trigger = "audio-micmute";
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->micmute_led);
+ if (rv)
+ goto error;
+ }
+
error:
if (rv)
asus_wmi_led_exit(asus);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index ab610376fdad..0942f50bd793 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -324,9 +324,7 @@ static int bl_update_status(struct backlight_device *b)
if (ret)
return ret;
- set_backlight_state((b->props.power == FB_BLANK_UNBLANK)
- && !(b->props.state & BL_CORE_SUSPENDED)
- && !(b->props.state & BL_CORE_FBBLANK));
+ set_backlight_state(!backlight_is_blank(b));
return 0;
}
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index fe224a54f24c..25421e061c47 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -5,7 +5,6 @@
menuconfig X86_PLATFORM_DRIVERS_DELL
bool "Dell X86 Platform Specific Device Drivers"
- depends on X86_PLATFORM_DEVICES
help
Say Y here to get to see options for device drivers for various
Dell x86 platforms, including vendor-specific laptop extension drivers.
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 497ad2f64a51..5e7e6659a849 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -150,6 +150,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z490 AORUS ELITE AC"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
diff --git a/drivers/platform/x86/intel/atomisp2/led.c b/drivers/platform/x86/intel/atomisp2/led.c
index 5935dfca166f..10077a61d8c5 100644
--- a/drivers/platform/x86/intel/atomisp2/led.c
+++ b/drivers/platform/x86/intel/atomisp2/led.c
@@ -50,7 +50,8 @@ static const struct dmi_system_id atomisp2_led_systems[] __initconst = {
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ /* Non exact match to also match T100TAF */
+ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
},
.driver_data = &asus_t100ta_lookup,
},
diff --git a/drivers/platform/x86/intel/ifs/Kconfig b/drivers/platform/x86/intel/ifs/Kconfig
index 7ce896434b8f..c341a27cc1a3 100644
--- a/drivers/platform/x86/intel/ifs/Kconfig
+++ b/drivers/platform/x86/intel/ifs/Kconfig
@@ -1,6 +1,9 @@
config INTEL_IFS
tristate "Intel In Field Scan"
depends on X86 && CPU_SUP_INTEL && 64BIT && SMP
+ # Discussion on the list has shown that the sysfs API needs a bit
+ # more work, mark this as broken for now
+ depends on BROKEN
select INTEL_IFS_DEVICE
help
Enable support for the In Field Scan capability in select
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 1c9e3f3ea41c..53d7fd2943b4 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -20,25 +20,16 @@
#define PMT_XA_MAX INT_MAX
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
-/*
- * Early implementations of PMT on client platforms have some
- * differences from the server platforms (which use the Out Of Band
- * Management Services Module OOBMSM). This list tracks those
- * platforms as needed to handle those differences. Newer client
- * platforms are expected to be fully compatible with server.
- */
-static const struct pci_device_id pmt_telem_early_client_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */
- { PCI_VDEVICE(INTEL, 0x490e) }, /* DG1 */
- { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */
- { }
-};
-
bool intel_pmt_is_early_client_hw(struct device *dev)
{
- struct pci_dev *parent = to_pci_dev(dev->parent);
+ struct intel_vsec_device *ivdev = dev_to_ivdev(dev);
- return !!pci_match_id(pmt_telem_early_client_pci_ids, parent);
+ /*
+ * Early implementations of PMT on client platforms have some
+ * differences from the server platforms (which use the Out Of Band
+ * Management Services Module OOBMSM).
+ */
+ return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW);
}
EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index f73ecfd4a309..5e4009c05ecf 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -23,12 +23,19 @@
#define TELEM_GUID_OFFSET 0x4
#define TELEM_BASE_OFFSET 0x8
#define TELEM_ACCESS(v) ((v) & GENMASK(3, 0))
+#define TELEM_TYPE(v) (((v) & GENMASK(7, 4)) >> 4)
/* size is in bytes */
#define TELEM_SIZE(v) (((v) & GENMASK(27, 12)) >> 10)
/* Used by client hardware to identify a fixed telemetry entry*/
#define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000
+enum telem_type {
+ TELEM_TYPE_PUNIT = 0,
+ TELEM_TYPE_CRASHLOG,
+ TELEM_TYPE_PUNIT_FIXED,
+};
+
struct pmt_telem_priv {
int num_entries;
struct intel_pmt_entry entry[];
@@ -39,10 +46,15 @@ static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
{
u32 guid = readl(entry->disc_table + TELEM_GUID_OFFSET);
- if (guid != TELEM_CLIENT_FIXED_BLOCK_GUID)
- return false;
+ if (intel_pmt_is_early_client_hw(dev)) {
+ u32 type = TELEM_TYPE(readl(entry->disc_table));
+
+ if ((type == TELEM_TYPE_PUNIT_FIXED) ||
+ (guid == TELEM_CLIENT_FIXED_BLOCK_GUID))
+ return true;
+ }
- return intel_pmt_is_early_client_hw(dev);
+ return false;
}
static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index e8424e70d81d..fd102678c75f 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -277,29 +277,38 @@ static int isst_if_get_platform_info(void __user *argp)
return 0;
}
+#define ISST_MAX_BUS_NUMBER 2
struct isst_if_cpu_info {
/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
- int bus_info[2];
- struct pci_dev *pci_dev[2];
+ int bus_info[ISST_MAX_BUS_NUMBER];
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
int punit_cpu_id;
int numa_node;
};
+struct isst_if_pkg_info {
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
+};
+
static struct isst_if_cpu_info *isst_cpu_info;
+static struct isst_if_pkg_info *isst_pkg_info;
+
#define ISST_MAX_PCI_DOMAINS 8
static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
{
struct pci_dev *matched_pci_dev = NULL;
struct pci_dev *pci_dev = NULL;
- int no_matches = 0;
+ int no_matches = 0, pkg_id;
int i, bus_number;
- if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
- cpu >= num_possible_cpus())
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
return NULL;
+ pkg_id = topology_physical_package_id(cpu);
+
bus_number = isst_cpu_info[cpu].bus_info[bus_no];
if (bus_number < 0)
return NULL;
@@ -324,6 +333,8 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
}
if (node == isst_cpu_info[cpu].numa_node) {
+ isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
+
pci_dev = _pci_dev;
break;
}
@@ -342,6 +353,10 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
if (!pci_dev && no_matches == 1)
pci_dev = matched_pci_dev;
+ /* Return pci_dev pointer for any matched CPU in the package */
+ if (!pci_dev)
+ pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
+
return pci_dev;
}
@@ -361,8 +376,8 @@ struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
{
struct pci_dev *pci_dev;
- if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
- cpu >= num_possible_cpus())
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
return NULL;
pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
@@ -417,10 +432,19 @@ static int isst_if_cpu_info_init(void)
if (!isst_cpu_info)
return -ENOMEM;
+ isst_pkg_info = kcalloc(topology_max_packages(),
+ sizeof(*isst_pkg_info),
+ GFP_KERNEL);
+ if (!isst_pkg_info) {
+ kfree(isst_cpu_info);
+ return -ENOMEM;
+ }
+
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"platform/x86/isst-if:online",
isst_if_cpu_online, NULL);
if (ret < 0) {
+ kfree(isst_pkg_info);
kfree(isst_cpu_info);
return ret;
}
@@ -433,6 +457,7 @@ static int isst_if_cpu_info_init(void)
static void isst_if_cpu_info_exit(void)
{
cpuhp_remove_state(isst_if_online_id);
+ kfree(isst_pkg_info);
kfree(isst_cpu_info);
};
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index bed436bf181f..bb81b8b1f7e9 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -15,6 +15,7 @@
#include <linux/auxiliary_bus.h>
#include <linux/bits.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/module.h>
@@ -30,9 +31,13 @@
#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
#define TABLE_OFFSET_SHIFT 3
+#define PMT_XA_START 0
+#define PMT_XA_MAX INT_MAX
+#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
static DEFINE_IDA(intel_vsec_ida);
static DEFINE_IDA(intel_vsec_sdsi_ida);
+static DEFINE_XARRAY_ALLOC(auxdev_array);
/**
* struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
@@ -54,12 +59,6 @@ struct intel_vsec_header {
u32 offset;
};
-/* Platform specific data */
-struct intel_vsec_platform_info {
- struct intel_vsec_header **capabilities;
- unsigned long quirks;
-};
-
enum intel_vsec_id {
VSEC_ID_TELEMETRY = 2,
VSEC_ID_WATCHER = 3,
@@ -138,7 +137,7 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
const char *name)
{
struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
- int ret;
+ int ret, id;
ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
if (ret < 0) {
@@ -165,14 +164,26 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
return ret;
}
- return devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux, auxdev);
+ ret = devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux,
+ auxdev);
+ if (ret < 0)
+ return ret;
+
+ /* Add auxdev to list */
+ ret = xa_alloc(&auxdev_array, &id, intel_vsec_dev, PMT_XA_LIMIT,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
- unsigned long quirks)
+ struct intel_vsec_platform_info *info)
{
struct intel_vsec_device *intel_vsec_dev;
struct resource *res, *tmp;
+ unsigned long quirks = info->quirks;
int i;
if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks))
@@ -216,7 +227,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
intel_vsec_dev->pcidev = pdev;
intel_vsec_dev->resource = res;
intel_vsec_dev->num_resources = header->num_entries;
- intel_vsec_dev->quirks = quirks;
+ intel_vsec_dev->info = info;
if (header->id == VSEC_ID_SDSI)
intel_vsec_dev->ida = &intel_vsec_sdsi_ida;
@@ -226,14 +237,15 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id));
}
-static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks,
- struct intel_vsec_header **header)
+static bool intel_vsec_walk_header(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
{
+ struct intel_vsec_header **header = info->capabilities;
bool have_devices = false;
int ret;
for ( ; *header; header++) {
- ret = intel_vsec_add_dev(pdev, *header, quirks);
+ ret = intel_vsec_add_dev(pdev, *header, info);
if (ret)
dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n",
(*header)->id);
@@ -244,7 +256,8 @@ static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks,
return have_devices;
}
-static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
+static bool intel_vsec_walk_dvsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
{
bool have_devices = false;
int pos = 0;
@@ -283,7 +296,7 @@ static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr);
header.id = PCI_DVSEC_HEADER2_ID(hdr);
- ret = intel_vsec_add_dev(pdev, &header, quirks);
+ ret = intel_vsec_add_dev(pdev, &header, info);
if (ret)
continue;
@@ -293,7 +306,8 @@ static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
return have_devices;
}
-static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks)
+static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
{
bool have_devices = false;
int pos = 0;
@@ -327,7 +341,7 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks)
header.tbir = INTEL_DVSEC_TABLE_BAR(table);
header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
- ret = intel_vsec_add_dev(pdev, &header, quirks);
+ ret = intel_vsec_add_dev(pdev, &header, info);
if (ret)
continue;
@@ -341,25 +355,25 @@ static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct intel_vsec_platform_info *info;
bool have_devices = false;
- unsigned long quirks = 0;
int ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
+ pci_save_state(pdev);
info = (struct intel_vsec_platform_info *)id->driver_data;
- if (info)
- quirks = info->quirks;
+ if (!info)
+ return -EINVAL;
- if (intel_vsec_walk_dvsec(pdev, quirks))
+ if (intel_vsec_walk_dvsec(pdev, info))
have_devices = true;
- if (intel_vsec_walk_vsec(pdev, quirks))
+ if (intel_vsec_walk_vsec(pdev, info))
have_devices = true;
if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) &&
- intel_vsec_walk_header(pdev, quirks, info->capabilities))
+ intel_vsec_walk_header(pdev, info))
have_devices = true;
if (!have_devices)
@@ -370,7 +384,8 @@ static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id
/* TGL info */
static const struct intel_vsec_platform_info tgl_info = {
- .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG | VSEC_QUIRK_TABLE_SHIFT,
+ .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG |
+ VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW,
};
/* DG1 info */
@@ -390,26 +405,89 @@ static struct intel_vsec_header *dg1_capabilities[] = {
static const struct intel_vsec_platform_info dg1_info = {
.capabilities = dg1_capabilities,
- .quirks = VSEC_QUIRK_NO_DVSEC,
+ .quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW,
};
#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
- { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, NULL) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
+static pci_ers_result_t intel_vsec_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_NEED_RESET;
+
+ dev_info(&pdev->dev, "PCI error detected, state %d", state);
+
+ if (state == pci_channel_io_perm_failure)
+ status = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+static pci_ers_result_t intel_vsec_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct intel_vsec_device *intel_vsec_dev;
+ pci_ers_result_t status = PCI_ERS_RESULT_DISCONNECT;
+ const struct pci_device_id *pci_dev_id;
+ unsigned long index;
+
+ dev_info(&pdev->dev, "Resetting PCI slot\n");
+
+ msleep(2000);
+ if (pci_enable_device(pdev)) {
+ dev_info(&pdev->dev,
+ "Failed to re-enable PCI device after reset.\n");
+ goto out;
+ }
+
+ status = PCI_ERS_RESULT_RECOVERED;
+
+ xa_for_each(&auxdev_array, index, intel_vsec_dev) {
+ /* check if pdev doesn't match */
+ if (pdev != intel_vsec_dev->pcidev)
+ continue;
+ devm_release_action(&pdev->dev, intel_vsec_remove_aux,
+ &intel_vsec_dev->auxdev);
+ }
+ pci_disable_device(pdev);
+ pci_restore_state(pdev);
+ pci_dev_id = pci_match_id(intel_vsec_pci_ids, pdev);
+ intel_vsec_pci_probe(pdev, pci_dev_id);
+
+out:
+ return status;
+}
+
+static void intel_vsec_pci_resume(struct pci_dev *pdev)
+{
+ dev_info(&pdev->dev, "Done resuming PCI device\n");
+}
+
+static const struct pci_error_handlers intel_vsec_pci_err_handlers = {
+ .error_detected = intel_vsec_pci_error_detected,
+ .slot_reset = intel_vsec_pci_slot_reset,
+ .resume = intel_vsec_pci_resume,
+};
+
static struct pci_driver intel_vsec_pci_driver = {
.name = "intel_vsec",
.id_table = intel_vsec_pci_ids,
.probe = intel_vsec_pci_probe,
+ .err_handler = &intel_vsec_pci_err_handlers,
};
module_pci_driver(intel_vsec_pci_driver);
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
index 4cc36678e8c5..3deeb05cf394 100644
--- a/drivers/platform/x86/intel/vsec.h
+++ b/drivers/platform/x86/intel/vsec.h
@@ -20,6 +20,15 @@ enum intel_vsec_quirks {
/* DVSEC not present (provided in driver data) */
VSEC_QUIRK_NO_DVSEC = BIT(3),
+
+ /* Platforms requiring quirk in the auxiliary driver */
+ VSEC_QUIRK_EARLY_HW = BIT(4),
+};
+
+/* Platform specific data */
+struct intel_vsec_platform_info {
+ struct intel_vsec_header **capabilities;
+ unsigned long quirks;
};
struct intel_vsec_device {
@@ -27,7 +36,7 @@ struct intel_vsec_device {
struct pci_dev *pcidev;
struct resource *resource;
struct ida *ida;
- unsigned long quirks;
+ struct intel_vsec_platform_info *info;
int num_resources;
};
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 447044fdcb77..5e072a0666f4 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -34,6 +34,7 @@
#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET 0x09
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET 0x0a
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET 0x0b
+#define MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET 0x19
#define MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET 0x1c
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
@@ -66,9 +67,15 @@
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
+#define MLXPLAT_CPLD_LPC_REG_GWP_OFFSET 0x4a
+#define MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET 0x4b
+#define MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET 0x4c
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET 0x53
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET 0x54
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET 0x55
#define MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET 0x56
#define MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET 0x57
#define MLXPLAT_CPLD_LPC_REG_PSU_OFFSET 0x58
@@ -143,6 +150,7 @@
#define MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET 0xfa
#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
+#define MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET 0xfd
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
@@ -193,6 +201,7 @@
MLXPLAT_CPLD_AGGR_MASK_LC_ACT | \
MLXPLAT_CPLD_AGGR_MASK_LC_SDWN)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2 BIT(2)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
@@ -204,6 +213,7 @@
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
+#define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0)
#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
@@ -588,6 +598,15 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_asic_items_data[] = {
},
};
+static struct mlxreg_core_data mlxplat_mlxcpld_default_asic2_items_data[] = {
+ {
+ .label = "asic2",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
{
.data = mlxplat_mlxcpld_default_psu_items_data,
@@ -1151,6 +1170,15 @@ static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
.inversed = 0,
.health = true,
},
+ {
+ .data = mlxplat_mlxcpld_default_asic2_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic2_items_data),
+ .inversed = 0,
+ .health = true,
+ }
};
static
@@ -1160,7 +1188,7 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
- .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2,
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_pwr_items_data[] = {
@@ -2004,6 +2032,38 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_modular_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+/* Platform hotplug for NVLink blade systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_global_wp_items_data[] = {
+ {
+ .label = "global_wp_grant",
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = MLXPLAT_CPLD_GWP_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_nvlink_blade_items[] = {
+ {
+ .data = mlxplat_mlxcpld_global_wp_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = MLXPLAT_CPLD_GWP_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_global_wp_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_nvlink_blade_data = {
+ .items = mlxplat_mlxcpld_nvlink_blade_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
@@ -2102,6 +2162,25 @@ static struct mlxreg_core_platform_data mlxplat_default_led_wc_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_wc_data),
};
+/* Platform led default data for water cooling Ethernet switch blade */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_led_eth_wc_blade_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_led_eth_wc_blade_data = {
+ .data = mlxplat_mlxcpld_default_led_eth_wc_blade_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_eth_wc_blade_data),
+};
+
/* Platform led MSN21xx system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_led_data[] = {
{
@@ -2857,6 +2936,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "asic_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "asic2_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -2996,6 +3087,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "asic2_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
.label = "fan_dir",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
.bit = GENMASK(7, 0),
@@ -3057,6 +3155,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "ufm_version",
.reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
.bit = GENMASK(7, 0),
@@ -3535,6 +3639,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_modular_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "ufm_version",
.reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
.bit = GENMASK(7, 0),
@@ -3547,6 +3657,209 @@ static struct mlxreg_core_platform_data mlxplat_modular_regs_io_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_regs_io_data),
};
+/* Platform register access for NVLink blade systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_nvlink_blade_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_from_comex",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_voltmon_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_reload_bios",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "global_wp_request",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "jtag_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "comm_chnl_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0200,
+ },
+ {
+ .label = "bios_safe_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_active_image",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_auth_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "global_wp_response",
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_nvlink_blade_regs_io_data = {
+ .data = mlxplat_mlxcpld_nvlink_blade_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_regs_io_data),
+};
+
/* Platform FAN default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
{
@@ -3932,8 +4245,12 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
@@ -4023,9 +4340,15 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
@@ -4100,6 +4423,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
@@ -4150,9 +4474,15 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
@@ -4221,6 +4551,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
@@ -4417,6 +4748,31 @@ static int __init mlxplat_dmi_default_wc_matched(const struct dmi_system_id *dmi
return 1;
}
+static int __init mlxplat_dmi_default_eth_wc_blade_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_default_wc_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_led_eth_wc_blade_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
+
+ return 1;
+}
+
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
@@ -4579,6 +4935,28 @@ static int __init mlxplat_dmi_modular_matched(const struct dmi_system_id *dmi)
return 1;
}
+static int __init mlxplat_dmi_nvlink_blade_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_nvlink_blade_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_regs_io = &mlxplat_nvlink_blade_regs_io_data;
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
+
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
.callback = mlxplat_dmi_default_wc_matched,
@@ -4612,6 +4990,13 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_default_eth_wc_blade_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI139"),
+ },
+ },
+ {
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
@@ -4642,6 +5027,12 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_nvlink_blade_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0015"),
+ },
+ },
+ {
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
@@ -4830,22 +5221,20 @@ static int __init mlxplat_init(void)
nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
if (mlxplat_i2c)
mlxplat_i2c->regmap = priv->regmap;
- priv->pdev_i2c = platform_device_register_resndata(
- &mlxplat_dev->dev, "i2c_mlxcpld",
- nr, mlxplat_mlxcpld_resources,
- ARRAY_SIZE(mlxplat_mlxcpld_resources),
- mlxplat_i2c, sizeof(*mlxplat_i2c));
+ priv->pdev_i2c = platform_device_register_resndata(&mlxplat_dev->dev, "i2c_mlxcpld",
+ nr, mlxplat_mlxcpld_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_resources),
+ mlxplat_i2c, sizeof(*mlxplat_i2c));
if (IS_ERR(priv->pdev_i2c)) {
err = PTR_ERR(priv->pdev_i2c);
goto fail_alloc;
}
for (i = 0; i < mlxplat_mux_num; i++) {
- priv->pdev_mux[i] = platform_device_register_resndata(
- &priv->pdev_i2c->dev,
- "i2c-mux-reg", i, NULL,
- 0, &mlxplat_mux_data[i],
- sizeof(mlxplat_mux_data[i]));
+ priv->pdev_mux[i] = platform_device_register_resndata(&priv->pdev_i2c->dev,
+ "i2c-mux-reg", i, NULL, 0,
+ &mlxplat_mux_data[i],
+ sizeof(mlxplat_mux_data[i]));
if (IS_ERR(priv->pdev_mux[i])) {
err = PTR_ERR(priv->pdev_mux[i]);
goto fail_platform_mux_register;
@@ -4853,16 +5242,18 @@ static int __init mlxplat_init(void)
}
/* Add hotplug driver */
- mlxplat_hotplug->regmap = priv->regmap;
- priv->pdev_hotplug = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlxreg-hotplug",
- PLATFORM_DEVID_NONE,
- mlxplat_mlxcpld_resources,
- ARRAY_SIZE(mlxplat_mlxcpld_resources),
- mlxplat_hotplug, sizeof(*mlxplat_hotplug));
- if (IS_ERR(priv->pdev_hotplug)) {
- err = PTR_ERR(priv->pdev_hotplug);
- goto fail_platform_mux_register;
+ if (mlxplat_hotplug) {
+ mlxplat_hotplug->regmap = priv->regmap;
+ priv->pdev_hotplug =
+ platform_device_register_resndata(&mlxplat_dev->dev,
+ "mlxreg-hotplug", PLATFORM_DEVID_NONE,
+ mlxplat_mlxcpld_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_resources),
+ mlxplat_hotplug, sizeof(*mlxplat_hotplug));
+ if (IS_ERR(priv->pdev_hotplug)) {
+ err = PTR_ERR(priv->pdev_hotplug);
+ goto fail_platform_mux_register;
+ }
}
/* Set default registers. */
@@ -4875,24 +5266,26 @@ static int __init mlxplat_init(void)
}
/* Add LED driver. */
- mlxplat_led->regmap = priv->regmap;
- priv->pdev_led = platform_device_register_resndata(
- &mlxplat_dev->dev, "leds-mlxreg",
- PLATFORM_DEVID_NONE, NULL, 0,
- mlxplat_led, sizeof(*mlxplat_led));
- if (IS_ERR(priv->pdev_led)) {
- err = PTR_ERR(priv->pdev_led);
- goto fail_platform_hotplug_register;
+ if (mlxplat_led) {
+ mlxplat_led->regmap = priv->regmap;
+ priv->pdev_led =
+ platform_device_register_resndata(&mlxplat_dev->dev, "leds-mlxreg",
+ PLATFORM_DEVID_NONE, NULL, 0, mlxplat_led,
+ sizeof(*mlxplat_led));
+ if (IS_ERR(priv->pdev_led)) {
+ err = PTR_ERR(priv->pdev_led);
+ goto fail_platform_hotplug_register;
+ }
}
/* Add registers io access driver. */
if (mlxplat_regs_io) {
mlxplat_regs_io->regmap = priv->regmap;
- priv->pdev_io_regs = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlxreg-io",
- PLATFORM_DEVID_NONE, NULL, 0,
- mlxplat_regs_io,
- sizeof(*mlxplat_regs_io));
+ priv->pdev_io_regs = platform_device_register_resndata(&mlxplat_dev->dev,
+ "mlxreg-io",
+ PLATFORM_DEVID_NONE, NULL,
+ 0, mlxplat_regs_io,
+ sizeof(*mlxplat_regs_io));
if (IS_ERR(priv->pdev_io_regs)) {
err = PTR_ERR(priv->pdev_io_regs);
goto fail_platform_led_register;
@@ -4902,11 +5295,10 @@ static int __init mlxplat_init(void)
/* Add FAN driver. */
if (mlxplat_fan) {
mlxplat_fan->regmap = priv->regmap;
- priv->pdev_fan = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlxreg-fan",
- PLATFORM_DEVID_NONE, NULL, 0,
- mlxplat_fan,
- sizeof(*mlxplat_fan));
+ priv->pdev_fan = platform_device_register_resndata(&mlxplat_dev->dev, "mlxreg-fan",
+ PLATFORM_DEVID_NONE, NULL, 0,
+ mlxplat_fan,
+ sizeof(*mlxplat_fan));
if (IS_ERR(priv->pdev_fan)) {
err = PTR_ERR(priv->pdev_fan);
goto fail_platform_io_regs_register;
@@ -4920,11 +5312,10 @@ static int __init mlxplat_init(void)
for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
if (mlxplat_wd_data[j]) {
mlxplat_wd_data[j]->regmap = priv->regmap;
- priv->pdev_wd[j] = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlx-wdt",
- j, NULL, 0,
- mlxplat_wd_data[j],
- sizeof(*mlxplat_wd_data[j]));
+ priv->pdev_wd[j] =
+ platform_device_register_resndata(&mlxplat_dev->dev, "mlx-wdt", j,
+ NULL, 0, mlxplat_wd_data[j],
+ sizeof(*mlxplat_wd_data[j]));
if (IS_ERR(priv->pdev_wd[j])) {
err = PTR_ERR(priv->pdev_wd[j]);
goto fail_platform_wd_register;
@@ -4949,9 +5340,11 @@ fail_platform_io_regs_register:
if (mlxplat_regs_io)
platform_device_unregister(priv->pdev_io_regs);
fail_platform_led_register:
- platform_device_unregister(priv->pdev_led);
+ if (mlxplat_led)
+ platform_device_unregister(priv->pdev_led);
fail_platform_hotplug_register:
- platform_device_unregister(priv->pdev_hotplug);
+ if (mlxplat_hotplug)
+ platform_device_unregister(priv->pdev_hotplug);
fail_platform_mux_register:
while (--i >= 0)
platform_device_unregister(priv->pdev_mux[i]);
@@ -4974,8 +5367,10 @@ static void __exit mlxplat_exit(void)
platform_device_unregister(priv->pdev_fan);
if (priv->pdev_io_regs)
platform_device_unregister(priv->pdev_io_regs);
- platform_device_unregister(priv->pdev_led);
- platform_device_unregister(priv->pdev_hotplug);
+ if (priv->pdev_led)
+ platform_device_unregister(priv->pdev_led);
+ if (priv->pdev_hotplug)
+ platform_device_unregister(priv->pdev_hotplug);
for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_mux[i]);
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
new file mode 100644
index 000000000000..fb2e141f3eb8
--- /dev/null
+++ b/drivers/platform/x86/p2sb.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Primary to Sideband (P2SB) bridge access support
+ *
+ * Copyright (c) 2017, 2021-2022 Intel Corporation.
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Jonathan Yong <jonathan.yong@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/p2sb.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define P2SBC 0xe0
+#define P2SBC_HIDE BIT(8)
+
+static const struct x86_cpu_id p2sb_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, PCI_DEVFN(31, 1)),
+ {}
+};
+
+static int p2sb_get_devfn(unsigned int *devfn)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(p2sb_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ *devfn = (unsigned int)id->driver_data;
+ return 0;
+}
+
+static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+{
+ /* Copy resource from the first BAR of the device in question */
+ *mem = pdev->resource[0];
+ return 0;
+}
+
+static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_scan_single_device(bus, devfn);
+ if (!pdev)
+ return -ENODEV;
+
+ ret = p2sb_read_bar0(pdev, mem);
+
+ pci_stop_and_remove_bus_device(pdev);
+ return ret;
+}
+
+/**
+ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+ * @bus: PCI bus to communicate with
+ * @devfn: PCI slot and function to communicate with
+ * @mem: memory resource to be filled in
+ *
+ * The BIOS prevents the P2SB device from being enumerated by the PCI
+ * subsystem, so we need to unhide and hide it back to lookup the BAR.
+ *
+ * if @bus is NULL, the bus 0 in domain 0 will be used.
+ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+ *
+ * Caller must provide a valid pointer to @mem.
+ *
+ * Locking is handled by pci_rescan_remove_lock mutex.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
+ */
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ struct pci_dev *pdev_p2sb;
+ unsigned int devfn_p2sb;
+ u32 value = P2SBC_HIDE;
+ int ret;
+
+ /* Get devfn for P2SB device itself */
+ ret = p2sb_get_devfn(&devfn_p2sb);
+ if (ret)
+ return ret;
+
+ /* if @bus is NULL, use bus 0 in domain 0 */
+ bus = bus ?: pci_find_bus(0, 0);
+
+ /*
+ * Prevent concurrent PCI bus scan from seeing the P2SB device and
+ * removing via sysfs while it is temporarily exposed.
+ */
+ pci_lock_rescan_remove();
+
+ /* Unhide the P2SB device, if needed */
+ pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
+
+ pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
+ if (devfn)
+ ret = p2sb_scan_and_read(bus, devfn, mem);
+ else
+ ret = p2sb_read_bar0(pdev_p2sb, mem);
+ pci_stop_and_remove_bus_device(pdev_p2sb);
+
+ /* Hide the P2SB device, if it was hidden */
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE);
+
+ pci_unlock_rescan_remove();
+
+ if (ret)
+ return ret;
+
+ if (mem->flags == 0)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(p2sb_bar);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 615e39cbbbf1..d9a095d2c0eb 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -998,19 +998,23 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pr_err("Couldn't retrieve BIOS data\n");
goto out_input;
}
- /* initialize backlight */
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
- pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
- &pcc_backlight_ops, &props);
- if (IS_ERR(pcc->backlight)) {
- result = PTR_ERR(pcc->backlight);
- goto out_input;
- }
- /* read the initial brightness setting from the hardware */
- pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ /* initialize backlight */
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
+
+ pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
+ &pcc_backlight_ops, &props);
+ if (IS_ERR(pcc->backlight)) {
+ result = PTR_ERR(pcc->backlight);
+ goto out_input;
+ }
+
+ /* read the initial brightness setting from the hardware */
+ pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ }
/* Reset initial sticky key mode since the hardware register state is not consistent */
acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index b8b1ed1406de..154317e9910d 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -389,21 +389,16 @@ static const struct dmi_system_id critclk_systems[] = {
},
},
{
- /* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */
- .ident = "Lex 3I380D",
+ /*
+ * Lex System / Lex Computech Co. makes a lot of Bay Trail
+ * based embedded boards which often come with multiple
+ * ethernet controllers using multiple pmc_plt_clks. See:
+ * https://www.lex.com.tw/products/embedded-ipc-board/
+ */
+ .ident = "Lex BayTrail",
.callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
- DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Lex 2I385SW",
- .callback = dmi_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
- DMI_MATCH(DMI_PRODUCT_NAME, "2I385SW"),
},
},
{
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 1e8063b7c169..5362f1a7b77c 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -61,36 +61,35 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
default:
return 0;
}
-
if (ret < 0)
- dev_err_probe(&pdev->dev, ret, "Error requesting irq at index %d: %d\n",
- inst->irq_idx, ret);
+ return dev_err_probe(&pdev->dev, ret, "Error requesting irq at index %d\n",
+ inst->irq_idx);
return ret;
}
static void smi_devs_unregister(struct smi *smi)
{
- while (smi->i2c_num > 0)
- i2c_unregister_device(smi->i2c_devs[--smi->i2c_num]);
+ while (smi->i2c_num--)
+ i2c_unregister_device(smi->i2c_devs[smi->i2c_num]);
- while (smi->spi_num > 0)
- spi_unregister_device(smi->spi_devs[--smi->spi_num]);
+ while (smi->spi_num--)
+ spi_unregister_device(smi->spi_devs[smi->spi_num]);
}
/**
* smi_spi_probe - Instantiate multiple SPI devices from inst array
* @pdev: Platform device
- * @adev: ACPI device
* @smi: Internal struct for Serial multi instantiate driver
* @inst_array: Array of instances to probe
*
* Returns the number of SPI devices instantiate, Zero if none is found or a negative error code.
*/
-static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev, struct smi *smi,
+static int smi_spi_probe(struct platform_device *pdev, struct smi *smi,
const struct smi_instance *inst_array)
{
struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct spi_controller *ctlr;
struct spi_device *spi_dev;
char name[50];
@@ -99,8 +98,8 @@ static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev,
ret = acpi_spi_count_resources(adev);
if (ret < 0)
return ret;
- else if (!ret)
- return -ENODEV;
+ if (!ret)
+ return -ENOENT;
count = ret;
@@ -112,9 +111,8 @@ static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev,
spi_dev = acpi_spi_device_alloc(NULL, adev, i);
if (IS_ERR(spi_dev)) {
- ret = PTR_ERR(spi_dev);
- dev_err_probe(dev, ret, "failed to allocate SPI device %s from ACPI: %d\n",
- dev_name(&adev->dev), ret);
+ ret = dev_err_probe(dev, PTR_ERR(spi_dev), "failed to allocate SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
goto error;
}
@@ -135,9 +133,8 @@ static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev,
ret = spi_add_device(spi_dev);
if (ret) {
- dev_err_probe(&ctlr->dev, ret,
- "failed to add SPI device %s from ACPI: %d\n",
- dev_name(&adev->dev), ret);
+ dev_err_probe(&ctlr->dev, ret, "failed to add SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
spi_dev_put(spi_dev);
goto error;
}
@@ -166,25 +163,25 @@ error:
/**
* smi_i2c_probe - Instantiate multiple I2C devices from inst array
* @pdev: Platform device
- * @adev: ACPI device
* @smi: Internal struct for Serial multi instantiate driver
* @inst_array: Array of instances to probe
*
* Returns the number of I2C devices instantiate, Zero if none is found or a negative error code.
*/
-static int smi_i2c_probe(struct platform_device *pdev, struct acpi_device *adev, struct smi *smi,
+static int smi_i2c_probe(struct platform_device *pdev, struct smi *smi,
const struct smi_instance *inst_array)
{
struct i2c_board_info board_info = {};
struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
char name[32];
int i, ret, count;
ret = i2c_acpi_client_count(adev);
if (ret < 0)
return ret;
- else if (!ret)
- return -ENODEV;
+ if (!ret)
+ return -ENOENT;
count = ret;
@@ -230,12 +227,8 @@ static int smi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct smi_node *node;
- struct acpi_device *adev;
struct smi *smi;
-
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENODEV;
+ int ret;
node = device_get_match_data(dev);
if (!node) {
@@ -251,19 +244,25 @@ static int smi_probe(struct platform_device *pdev)
switch (node->bus_type) {
case SMI_I2C:
- return smi_i2c_probe(pdev, adev, smi, node->instances);
+ return smi_i2c_probe(pdev, smi, node->instances);
case SMI_SPI:
- return smi_spi_probe(pdev, adev, smi, node->instances);
+ return smi_spi_probe(pdev, smi, node->instances);
case SMI_AUTO_DETECT:
- if (i2c_acpi_client_count(adev) > 0)
- return smi_i2c_probe(pdev, adev, smi, node->instances);
- else
- return smi_spi_probe(pdev, adev, smi, node->instances);
+ /*
+ * For backwards-compatibility with the existing nodes I2C
+ * is checked first and if such entries are found ONLY I2C
+ * devices are created. Since some existing nodes that were
+ * already handled by this driver could also contain unrelated
+ * SpiSerialBus nodes that were previously ignored, and this
+ * preserves that behavior.
+ */
+ ret = smi_i2c_probe(pdev, smi, node->instances);
+ if (ret != -ENOENT)
+ return ret;
+ return smi_spi_probe(pdev, smi, node->instances);
default:
return -EINVAL;
}
-
- return 0; /* never reached */
}
static int smi_remove(struct platform_device *pdev)
@@ -325,10 +324,11 @@ static const struct smi_node cs35l41_hda = {
static const struct acpi_device_id smi_acpi_ids[] = {
{ "BSG1160", (unsigned long)&bsg1160_data },
{ "BSG2150", (unsigned long)&bsg2150_data },
- { "INT3515", (unsigned long)&int3515_data },
{ "CSC3551", (unsigned long)&cs35l41_hda },
+ { "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
+ { "CLSA0101", (unsigned long)&cs35l41_hda },
{ }
};
MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
index b599cda5ba3c..ca3647b751d5 100644
--- a/drivers/platform/x86/simatic-ipc.c
+++ b/drivers/platform/x86/simatic-ipc.c
@@ -51,6 +51,7 @@ static int register_platform_devices(u32 station_id)
{
u8 ledmode = SIMATIC_IPC_DEVICE_NONE;
u8 wdtmode = SIMATIC_IPC_DEVICE_NONE;
+ char *pdevname = KBUILD_MODNAME "_leds";
int i;
platform_data.devmode = SIMATIC_IPC_DEVICE_NONE;
@@ -64,10 +65,12 @@ static int register_platform_devices(u32 station_id)
}
if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
+ if (ledmode == SIMATIC_IPC_DEVICE_127E)
+ pdevname = KBUILD_MODNAME "_leds_gpio";
platform_data.devmode = ledmode;
ipc_led_platform_device =
platform_device_register_data(NULL,
- KBUILD_MODNAME "_leds", PLATFORM_DEVID_NONE,
+ pdevname, PLATFORM_DEVID_NONE,
&platform_data,
sizeof(struct simatic_ipc_platform));
if (IS_ERR(ipc_led_platform_device))
@@ -101,44 +104,6 @@ static int register_platform_devices(u32 station_id)
return 0;
}
-/* FIXME: this should eventually be done with generic P2SB discovery code
- * the individual drivers for watchdogs and LEDs access memory that implements
- * GPIO, but pinctrl will not come up because of missing ACPI entries
- *
- * While there is no conflict a cleaner solution would be to somehow bring up
- * pinctrl even with these ACPI entries missing, and base the drivers on pinctrl.
- * After which the following function could be dropped, together with the code
- * poking the memory.
- */
-/*
- * Get membase address from PCI, used in leds and wdt module. Here we read
- * the bar0. The final address calculation is done in the appropriate modules
- */
-u32 simatic_ipc_get_membase0(unsigned int p2sb)
-{
- struct pci_bus *bus;
- u32 bar0 = 0;
- /*
- * The GPIO memory is in bar0 of the hidden P2SB device.
- * Unhide the device to have a quick look at it, before we hide it
- * again.
- * Also grab the pci rescan lock so that device does not get discovered
- * and remapped while it is visible.
- * This code is inspired by drivers/mfd/lpc_ich.c
- */
- bus = pci_find_bus(0, 0);
- pci_lock_rescan_remove();
- pci_bus_write_config_byte(bus, p2sb, 0xE1, 0x0);
- pci_bus_read_config_dword(bus, p2sb, PCI_BASE_ADDRESS_0, &bar0);
-
- bar0 &= ~0xf;
- pci_bus_write_config_byte(bus, p2sb, 0xE1, 0x1);
- pci_unlock_rescan_remove();
-
- return bar0;
-}
-EXPORT_SYMBOL(simatic_ipc_get_membase0);
-
static int __init simatic_ipc_init_module(void)
{
const struct dmi_system_id *match;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index d8d0c0bed5e9..07ef05f727a2 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4341,7 +4341,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
{
struct acpi_resource_irq *p = &resource->data.irq;
struct sony_pic_irq *interrupt = NULL;
- if (!p || !p->interrupt_count) {
+ if (!p->interrupt_count) {
/*
* IRQ descriptors may have no IRQ# bits set,
* particularly those those w/ _STA disabled
@@ -4374,11 +4374,6 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
struct acpi_resource_io *io = &resource->data.io;
struct sony_pic_ioport *ioport =
list_first_entry(&dev->ioports, struct sony_pic_ioport, list);
- if (!io) {
- dprintk("Blank IO resource\n");
- return AE_OK;
- }
-
if (!ioport->io1.minimum) {
memcpy(&ioport->io1, io, sizeof(*io));
dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum,
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
index 7299ad08c838..958df41ad509 100644
--- a/drivers/platform/x86/system76_acpi.c
+++ b/drivers/platform/x86/system76_acpi.c
@@ -339,7 +339,7 @@ static ssize_t kb_led_color_show(
struct led_classdev *led;
struct system76_data *data;
- led = (struct led_classdev *)dev->driver_data;
+ led = dev_get_drvdata(dev);
data = container_of(led, struct system76_data, kb_led);
return sysfs_emit(buf, "%06X\n", data->kb_color);
}
@@ -356,7 +356,7 @@ static ssize_t kb_led_color_store(
unsigned int val;
int ret;
- led = (struct led_classdev *)dev->driver_data;
+ led = dev_get_drvdata(dev);
data = container_of(led, struct system76_data, kb_led);
ret = kstrtouint(buf, 16, &val);
if (ret)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a8b383051528..22d4e8633e30 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -34,46 +34,51 @@
* thanks to Chris Wright <chrisw@osdl.org>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/sched/signal.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/nvram.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/sysfs.h>
+#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/fb.h>
-#include <linux/platform_device.h>
+#include <linux/freezer.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
#include <linux/input.h>
-#include <linux/leds.h>
-#include <linux/rfkill.h>
-#include <linux/dmi.h>
#include <linux/jiffies.h>
-#include <linux/workqueue.h>
-#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/leds.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvram.h>
#include <linux/pci.h>
-#include <linux/power_supply.h>
+#include <linux/platform_device.h>
#include <linux/platform_profile.h>
-#include <sound/core.h>
-#include <sound/control.h>
-#include <sound/initval.h>
+#include <linux/power_supply.h>
+#include <linux/proc_fs.h>
+#include <linux/rfkill.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
#include <acpi/battery.h>
#include <acpi/video.h>
+
#include <drm/drm_privacy_screen_driver.h>
+
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+
#include "dual_accel_detect.h"
/* ThinkPad CMOS commands */
@@ -159,6 +164,7 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
TP_HKEY_EV_PRIVACYGUARD_TOGGLE = 0x130f, /* Toggle priv.guard on/off */
+ TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
/* Reasons for waking up from S3/S4 */
TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
@@ -257,8 +263,6 @@ enum tpacpi_hkey_event_t {
#define TPACPI_DBG_BRGHT 0x0020
#define TPACPI_DBG_MIXER 0x0040
-#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off")
-#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
#define strlencmp(a, b) (strncmp((a), (b), strlen(b)))
@@ -1312,9 +1316,7 @@ static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, struct seq_file *
return status;
}
- seq_printf(m, "status:\t\t%s\n",
- (status == TPACPI_RFK_RADIO_ON) ?
- "enabled" : "disabled");
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status == TPACPI_RFK_RADIO_ON));
seq_printf(m, "commands:\tenable, disable\n");
}
@@ -1341,8 +1343,7 @@ static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
if (status != -1) {
tpacpi_disclose_usertask("procfs", "attempt to %s %s\n",
- (status == TPACPI_RFK_RADIO_ON) ?
- "enable" : "disable",
+ str_enable_disable(status == TPACPI_RFK_RADIO_ON),
tpacpi_rfkill_names[id]);
res = (tpacpi_rfkill_switches[id]->ops->set_status)(status);
tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[id]);
@@ -3499,8 +3500,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
radiosw_state = !!status;
- pr_info("radio switch found; radios are %s\n",
- enabled(status, 0));
+ pr_info("radio switch found; radios are %s\n", str_enabled_disabled(status & BIT(0)));
}
tabletsw_state = hotkey_init_tablet_mode();
@@ -3735,6 +3735,7 @@ static bool hotkey_notify_extended_hotkey(const u32 hkey)
switch (hkey) {
case TP_HKEY_EV_PRIVACYGUARD_TOGGLE:
+ case TP_HKEY_EV_AMT_TOGGLE:
tpacpi_driver_event(hkey);
return true;
}
@@ -4159,7 +4160,7 @@ static int hotkey_read(struct seq_file *m)
if (res)
return res;
- seq_printf(m, "status:\t\t%s\n", enabled(status, 0));
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status & BIT(0)));
if (hotkey_all_mask) {
seq_printf(m, "mask:\t\t0x%08x\n", hotkey_user_mask);
seq_printf(m, "commands:\tenable, disable, reset, <mask>\n");
@@ -4292,9 +4293,8 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
{
int status;
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will attempt to %s bluetooth\n",
- (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s bluetooth\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_bluetoothemul) {
@@ -4659,9 +4659,8 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
{
int status;
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will attempt to %s wwan\n",
- (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s wwan\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wwanemul) {
@@ -4837,9 +4836,8 @@ static int uwb_set_status(enum tpacpi_rfkill_state state)
{
int status;
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will attempt to %s UWB\n",
- (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s UWB\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_uwbemul) {
@@ -5193,11 +5191,11 @@ static int video_read(struct seq_file *m)
return autosw;
seq_printf(m, "status:\t\tsupported\n");
- seq_printf(m, "lcd:\t\t%s\n", enabled(status, 0));
- seq_printf(m, "crt:\t\t%s\n", enabled(status, 1));
+ seq_printf(m, "lcd:\t\t%s\n", str_enabled_disabled(status & BIT(0)));
+ seq_printf(m, "crt:\t\t%s\n", str_enabled_disabled(status & BIT(1)));
if (video_supported == TPACPI_VIDEO_NEW)
- seq_printf(m, "dvi:\t\t%s\n", enabled(status, 3));
- seq_printf(m, "auto:\t\t%s\n", enabled(autosw, 0));
+ seq_printf(m, "dvi:\t\t%s\n", str_enabled_disabled(status & BIT(3)));
+ seq_printf(m, "auto:\t\t%s\n", str_enabled_disabled(autosw & BIT(0)));
seq_printf(m, "commands:\tlcd_enable, lcd_disable\n");
seq_printf(m, "commands:\tcrt_enable, crt_disable\n");
if (video_supported == TPACPI_VIDEO_NEW)
@@ -5628,7 +5626,7 @@ static int light_read(struct seq_file *m)
status = light_get_status();
if (status < 0)
return status;
- seq_printf(m, "status:\t\t%s\n", onoff(status, 0));
+ seq_printf(m, "status:\t\t%s\n", str_on_off(status & BIT(0)));
seq_printf(m, "commands:\ton, off\n");
}
@@ -6084,9 +6082,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
return 0;
}
-#define str_led_status(s) \
- ((s) == TPACPI_LED_OFF ? "off" : \
- ((s) == TPACPI_LED_ON ? "on" : "blinking"))
+#define str_led_status(s) ((s) >= TPACPI_LED_BLINK ? "blinking" : str_on_off(s))
static int led_read(struct seq_file *m)
{
@@ -6103,8 +6099,7 @@ static int led_read(struct seq_file *m)
status = led_get_status(i);
if (status < 0)
return -EIO;
- seq_printf(m, "%d:\t\t%s\n",
- i, str_led_status(status));
+ seq_printf(m, "%d:\t\t%s\n", i, str_led_status(status));
}
}
@@ -6797,10 +6792,7 @@ static int brightness_set(unsigned int value)
static int brightness_update_status(struct backlight_device *bd)
{
- unsigned int level =
- (bd->props.fb_blank == FB_BLANK_UNBLANK &&
- bd->props.power == FB_BLANK_UNBLANK) ?
- bd->props.brightness : 0;
+ int level = backlight_get_brightness(bd);
dbg_printk(TPACPI_DBG_BRGHT,
"backlight: attempt to set level to %d\n",
@@ -6842,6 +6834,31 @@ static const struct backlight_ops ibm_backlight_data = {
/* --------------------------------------------------------------------- */
+static int __init tpacpi_evaluate_bcl(struct acpi_device *adev, void *not_used)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int rc;
+
+ status = acpi_evaluate_object(adev->handle, "_BCL", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ obj = buffer.pointer;
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE) {
+ acpi_handle_info(adev->handle,
+ "Unknown _BCL data, please report this to %s\n",
+ TPACPI_MAIL);
+ rc = 0;
+ } else {
+ rc = obj->package.count;
+ }
+ kfree(obj);
+
+ return rc;
+}
+
/*
* Call _BCL method of video device. On some ThinkPads this will
* switch the firmware to the ACPI brightness control mode.
@@ -6849,37 +6866,13 @@ static const struct backlight_ops ibm_backlight_data = {
static int __init tpacpi_query_bcl_levels(acpi_handle handle)
{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_device *device, *child;
- int rc;
+ struct acpi_device *device;
device = acpi_fetch_acpi_dev(handle);
if (!device)
return 0;
- rc = 0;
- list_for_each_entry(child, &device->children, node) {
- acpi_status status = acpi_evaluate_object(child->handle, "_BCL",
- NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- buffer.length = ACPI_ALLOCATE_BUFFER;
- continue;
- }
-
- obj = (union acpi_object *)buffer.pointer;
- if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
- pr_err("Unknown _BCL data, please report this to %s\n",
- TPACPI_MAIL);
- rc = 0;
- } else {
- rc = obj->package.count;
- }
- break;
- }
-
- kfree(buffer.pointer);
- return rc;
+ return acpi_dev_for_each_child(device, tpacpi_evaluate_bcl, NULL);
}
@@ -7829,8 +7822,7 @@ static int volume_read(struct seq_file *m)
seq_printf(m, "level:\t\t%d\n",
status & TP_EC_AUDIO_LVL_MSK);
- seq_printf(m, "mute:\t\t%s\n",
- onoff(status, TP_EC_AUDIO_MUTESW));
+ seq_printf(m, "mute:\t\t%s\n", str_on_off(status & BIT(TP_EC_AUDIO_MUTESW)));
if (volume_control_allowed) {
seq_printf(m, "commands:\tunmute, mute\n");
@@ -9059,7 +9051,7 @@ static int fan_read(struct seq_file *m)
seq_printf(m, "status:\t\t%s\n"
"level:\t\t%d\n",
- (status != 0) ? "enabled" : "disabled", status);
+ str_enabled_disabled(status), status);
break;
case TPACPI_FAN_RD_TPEC:
@@ -9068,8 +9060,7 @@ static int fan_read(struct seq_file *m)
if (rc)
return rc;
- seq_printf(m, "status:\t\t%s\n",
- (status != 0) ? "enabled" : "disabled");
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status));
rc = fan_get_speed(&speed);
if (rc < 0)
@@ -10267,6 +10258,7 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_CMD_FUNC_CAP 3 /* To get DYTC capabilities */
#define DYTC_FC_MMC 27 /* MMC Mode supported */
#define DYTC_FC_PSC 29 /* PSC Mode supported */
+#define DYTC_FC_AMT 31 /* AMT mode supported */
#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
@@ -10279,6 +10271,10 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_FUNCTION_CQL 1 /* Function = 1, lap mode */
#define DYTC_FUNCTION_MMC 11 /* Function = 11, MMC mode */
#define DYTC_FUNCTION_PSC 13 /* Function = 13, PSC mode */
+#define DYTC_FUNCTION_AMT 15 /* Function = 15, AMT mode */
+
+#define DYTC_MODE_AMT_ENABLE 0x1 /* Enable AMT (in balanced mode) */
+#define DYTC_MODE_AMT_DISABLE 0xF /* Disable AMT (in other modes) */
#define DYTC_MODE_MMC_PERFORM 2 /* High power mode aka performance */
#define DYTC_MODE_MMC_LOWPOWER 3 /* Low power mode */
@@ -10299,6 +10295,8 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 0)
#define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 1)
+static int dytc_control_amt(bool enable);
+static bool dytc_amt_active;
static enum platform_profile_option dytc_current_profile;
static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
@@ -10381,6 +10379,30 @@ static int dytc_profile_get(struct platform_profile_handler *pprof,
return 0;
}
+static int dytc_control_amt(bool enable)
+{
+ int dummy;
+ int err;
+ int cmd;
+
+ if (!(dytc_capabilities & BIT(DYTC_FC_AMT))) {
+ pr_warn("Attempting to toggle AMT on a system that doesn't advertise support\n");
+ return -ENODEV;
+ }
+
+ if (enable)
+ cmd = DYTC_SET_COMMAND(DYTC_FUNCTION_AMT, DYTC_MODE_AMT_ENABLE, enable);
+ else
+ cmd = DYTC_SET_COMMAND(DYTC_FUNCTION_AMT, DYTC_MODE_AMT_DISABLE, enable);
+
+ pr_debug("%sabling AMT (cmd 0x%x)", enable ? "en":"dis", cmd);
+ err = dytc_command(cmd, &dummy);
+ if (err)
+ return err;
+ dytc_amt_active = enable;
+ return 0;
+}
+
/*
* Helper function - check if we are in CQL mode and if we are
* - disable CQL,
@@ -10463,6 +10485,9 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
if (err)
goto unlock;
+ /* system supports AMT, activate it when on balanced */
+ if (dytc_capabilities & BIT(DYTC_FC_AMT))
+ dytc_control_amt(profile == PLATFORM_PROFILE_BALANCED);
}
/* Success - update current profile */
dytc_current_profile = profile;
@@ -10567,6 +10592,11 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
/* Ensure initial values are correct */
dytc_profile_refresh();
+ /* Set AMT correctly now we know current profile */
+ if ((dytc_capabilities & BIT(DYTC_FC_PSC)) &&
+ (dytc_capabilities & BIT(DYTC_FC_AMT)))
+ dytc_control_amt(dytc_current_profile == PLATFORM_PROFILE_BALANCED);
+
return 0;
}
@@ -11010,6 +11040,15 @@ static void tpacpi_driver_event(const unsigned int hkey_event)
if (changed)
drm_privacy_screen_call_notifier_chain(lcdshadow_dev);
}
+ if (hkey_event == TP_HKEY_EV_AMT_TOGGLE) {
+ /* If we're enabling AMT we need to force balanced mode */
+ if (!dytc_amt_active)
+ /* This will also set AMT mode enabled */
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
+ else
+ dytc_control_amt(!dytc_amt_active);
+ }
+
}
static void hotkey_driver_event(const unsigned int scancode)
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index f446be72e539..480375977435 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -27,8 +27,8 @@
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/lp855x.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
#include <linux/power/bq24190_charger.h>
+#include <linux/reboot.h>
#include <linux/rmi.h>
#include <linux/serdev.h>
#include <linux/spi/spi.h>
@@ -889,6 +889,7 @@ static const struct pinctrl_map lenovo_yoga_tab2_830_1050_codec_pinctrl_map =
"INT33FC:02", "pmu_clk2_grp", "pmu_clk");
static struct pinctrl *lenovo_yoga_tab2_830_1050_codec_pinctrl;
+static struct sys_off_handler *lenovo_yoga_tab2_830_1050_sys_off_handler;
static int __init lenovo_yoga_tab2_830_1050_init_codec(void)
{
@@ -933,9 +934,11 @@ err_put_device:
* followed by a normal 3 second press to recover. Avoid this by doing an EFI
* poweroff instead.
*/
-static void lenovo_yoga_tab2_830_1050_power_off(void)
+static int lenovo_yoga_tab2_830_1050_power_off(struct sys_off_data *data)
{
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+
+ return NOTIFY_DONE;
}
static int __init lenovo_yoga_tab2_830_1050_init(void)
@@ -950,13 +953,19 @@ static int __init lenovo_yoga_tab2_830_1050_init(void)
if (ret)
return ret;
- pm_power_off = lenovo_yoga_tab2_830_1050_power_off;
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ lenovo_yoga_tab2_830_1050_sys_off_handler =
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1,
+ lenovo_yoga_tab2_830_1050_power_off, NULL);
+ if (IS_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler))
+ return PTR_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler);
+
return 0;
}
static void lenovo_yoga_tab2_830_1050_exit(void)
{
- pm_power_off = NULL; /* Just turn poweroff into halt on module unload */
+ unregister_sys_off_handler(lenovo_yoga_tab2_830_1050_sys_off_handler);
if (lenovo_yoga_tab2_830_1050_codec_pinctrl) {
pinctrl_put(lenovo_yoga_tab2_830_1050_codec_pinctrl);
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 2fa0f7d55259..8f7695624c8c 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -17,6 +17,7 @@
#include <asm/dma.h>
#include <asm/irq.h>
#include <linux/pci.h>
+#include <linux/libata.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -322,8 +323,8 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
* treat the compatibility IRQs as busy.
*/
if ((progif & 0x5) != 0x5)
- if (pci_get_legacy_ide_irq(pci, 0) == irq ||
- pci_get_legacy_ide_irq(pci, 1) == irq) {
+ if (ATA_PRIMARY_IRQ(pci) == irq ||
+ ATA_SECONDARY_IRQ(pci) == irq) {
pnp_dbg(&pnp->dev, " legacy IDE device %s "
"using irq %d\n", pci_name(pci), irq);
return 1;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 4b563db3ab3e..a8c46ba5878f 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -297,4 +297,10 @@ config NVMEM_REBOOT_MODE
then the bootloader can read it and take different
action according to the mode.
+config POWER_MLXBF
+ tristate "Mellanox BlueField power handling driver"
+ depends on (GPIO_MLXBF2 && ACPI)
+ help
+ This driver supports reset or low power mode handling for Mellanox BlueField.
+
endif
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index f606a2f60539..0a39424fc558 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -35,3 +35,4 @@ obj-$(CONFIG_REBOOT_MODE) += reboot-mode.o
obj-$(CONFIG_SYSCON_REBOOT_MODE) += syscon-reboot-mode.o
obj-$(CONFIG_POWER_RESET_SC27XX) += sc27xx-poweroff.o
obj-$(CONFIG_NVMEM_REBOOT_MODE) += nvmem-reboot-mode.o
+obj-$(CONFIG_POWER_MLXBF) += pwr-mlxbf.o
diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c
index 08d0a07b58ef..c7624d7611a7 100644
--- a/drivers/power/reset/arm-versatile-reboot.c
+++ b/drivers/power/reset/arm-versatile-reboot.c
@@ -146,6 +146,7 @@ static int __init versatile_reboot_probe(void)
versatile_reboot_type = (enum versatile_reboot)reboot_id->data;
syscon_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(syscon_regmap))
return PTR_ERR(syscon_regmap);
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 64def79d557a..741e44a017c3 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -17,10 +17,13 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
+#include <linux/reset-controller.h>
#include <soc/at91/at91sam9_ddrsdr.h>
#include <soc/at91/at91sam9_sdramc.h>
+#include <dt-bindings/reset/sama7g5-reset.h>
+
#define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */
#define AT91_RSTC_PROCRST BIT(0) /* Processor Reset */
#define AT91_RSTC_PERRST BIT(2) /* Peripheral Reset */
@@ -39,6 +42,17 @@
#define AT91_RSTC_URSTIEN BIT(4) /* User Reset Interrupt Enable */
#define AT91_RSTC_ERSTL GENMASK(11, 8) /* External Reset Length */
+/**
+ * enum reset_type - reset types
+ * @RESET_TYPE_GENERAL: first power-up reset
+ * @RESET_TYPE_WAKEUP: return from backup mode
+ * @RESET_TYPE_WATCHDOG: watchdog fault
+ * @RESET_TYPE_SOFTWARE: processor reset required by software
+ * @RESET_TYPE_USER: NRST pin detected low
+ * @RESET_TYPE_CPU_FAIL: CPU clock failure detection
+ * @RESET_TYPE_XTAL_FAIL: 32KHz crystal failure dectection fault
+ * @RESET_TYPE_ULP2: ULP2 reset
+ */
enum reset_type {
RESET_TYPE_GENERAL = 0,
RESET_TYPE_WAKEUP = 1,
@@ -50,15 +64,48 @@ enum reset_type {
RESET_TYPE_ULP2 = 8,
};
+/**
+ * struct at91_reset - AT91 reset specific data structure
+ * @rstc_base: base address for system reset
+ * @ramc_base: array with base addresses of RAM controllers
+ * @dev_base: base address for devices reset
+ * @sclk: slow clock
+ * @data: platform specific reset data
+ * @rcdev: reset controller device
+ * @lock: lock for devices reset register access
+ * @nb: reset notifier block
+ * @args: SoC specific system reset arguments
+ * @ramc_lpr: SDRAM Controller Low Power Register
+ */
struct at91_reset {
void __iomem *rstc_base;
void __iomem *ramc_base[2];
+ void __iomem *dev_base;
struct clk *sclk;
+ const struct at91_reset_data *data;
+ struct reset_controller_dev rcdev;
+ spinlock_t lock;
struct notifier_block nb;
u32 args;
u32 ramc_lpr;
};
+#define to_at91_reset(r) container_of(r, struct at91_reset, rcdev)
+
+/**
+ * struct at91_reset_data - AT91 reset data
+ * @reset_args: SoC specific system reset arguments
+ * @n_device_reset: number of device resets
+ * @device_reset_min_id: min id for device reset
+ * @device_reset_max_id: max id for device reset
+ */
+struct at91_reset_data {
+ u32 reset_args;
+ u32 n_device_reset;
+ u8 device_reset_min_id;
+ u8 device_reset_max_id;
+};
+
/*
* unless the SDRAM is cleanly shutdown before we hit the
* reset register it can be left driving the data bus and
@@ -95,7 +142,7 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
"r" (reset->rstc_base),
"r" (1),
"r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN),
- "r" (reset->args),
+ "r" (reset->data->reset_args),
"r" (reset->ramc_lpr)
: "r4");
@@ -153,34 +200,133 @@ static const struct of_device_id at91_ramc_of_match[] = {
{ /* sentinel */ }
};
+static const struct at91_reset_data sam9260 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
+};
+
+static const struct at91_reset_data samx7 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PROCRST,
+};
+
+static const struct at91_reset_data sama7g5 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PROCRST,
+ .n_device_reset = 3,
+ .device_reset_min_id = SAMA7G5_RESET_USB_PHY1,
+ .device_reset_max_id = SAMA7G5_RESET_USB_PHY3,
+};
+
static const struct of_device_id at91_reset_of_match[] = {
{
.compatible = "atmel,at91sam9260-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST),
+ .data = &sam9260,
},
{
.compatible = "atmel,at91sam9g45-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST)
+ .data = &sam9260,
},
{
.compatible = "atmel,sama5d3-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST)
+ .data = &sam9260,
},
{
.compatible = "atmel,samx7-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ .data = &samx7,
},
{
.compatible = "microchip,sam9x60-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ .data = &samx7,
+ },
+ {
+ .compatible = "microchip,sama7g5-rstc",
+ .data = &sama7g5,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_reset_of_match);
+static int at91_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&reset->lock, flags);
+ val = readl_relaxed(reset->dev_base);
+ if (assert)
+ val |= BIT(id);
+ else
+ val &= ~BIT(id);
+ writel_relaxed(val, reset->dev_base);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ return 0;
+}
+
+static int at91_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return at91_reset_update(rcdev, id, true);
+}
+
+static int at91_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return at91_reset_update(rcdev, id, false);
+}
+
+static int at91_reset_dev_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+ u32 val;
+
+ val = readl_relaxed(reset->dev_base);
+
+ return !!(val & BIT(id));
+}
+
+static const struct reset_control_ops at91_reset_ops = {
+ .assert = at91_reset_assert,
+ .deassert = at91_reset_deassert,
+ .status = at91_reset_dev_status,
+};
+
+static int at91_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+
+ if (!reset->data->n_device_reset ||
+ (reset_spec->args[0] < reset->data->device_reset_min_id ||
+ reset_spec->args[0] > reset->data->device_reset_max_id))
+ return -EINVAL;
+
+ return reset_spec->args[0];
+}
+
+static int at91_rcdev_init(struct at91_reset *reset,
+ struct platform_device *pdev)
+{
+ if (!reset->data->n_device_reset)
+ return 0;
+
+ reset->dev_base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 1,
+ NULL);
+ if (IS_ERR(reset->dev_base))
+ return -ENODEV;
+
+ spin_lock_init(&reset->lock);
+ reset->rcdev.ops = &at91_reset_ops;
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.nr_resets = reset->data->n_device_reset;
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->rcdev.of_xlate = at91_reset_of_xlate;
+
+ return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+}
+
static int __init at91_reset_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -212,10 +358,12 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
}
- match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
+ reset->data = device_get_match_data(&pdev->dev);
+ if (!reset->data)
+ return -ENODEV;
+
reset->nb.notifier_call = at91_reset;
reset->nb.priority = 192;
- reset->args = (u32)match->data;
reset->sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(reset->sclk))
@@ -229,6 +377,10 @@ static int __init at91_reset_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, reset);
+ ret = at91_rcdev_init(reset, pdev);
+ if (ret)
+ goto disable_clk;
+
if (of_device_is_compatible(pdev->dev.of_node, "microchip,sam9x60-rstc")) {
u32 val = readl(reset->rstc_base + AT91_RSTC_MR);
@@ -237,14 +389,16 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
ret = register_restart_handler(&reset->nb);
- if (ret) {
- clk_disable_unprepare(reset->sclk);
- return ret;
- }
+ if (ret)
+ goto disable_clk;
at91_reset_status(pdev, reset->rstc_base);
return 0;
+
+disable_clk:
+ clk_disable_unprepare(reset->sclk);
+ return ret;
}
static int __exit at91_reset_remove(struct platform_device *pdev)
diff --git a/drivers/power/reset/brcm-kona-reset.c b/drivers/power/reset/brcm-kona-reset.c
index 8eaa959d8be6..3de024e3ceb7 100644
--- a/drivers/power/reset/brcm-kona-reset.c
+++ b/drivers/power/reset/brcm-kona-reset.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2016 Broadcom
#include <linux/io.h>
#include <linux/of_address.h>
diff --git a/drivers/power/reset/brcmstb-reboot.c b/drivers/power/reset/brcmstb-reboot.c
index 884b53c483c0..0f2944dc9355 100644
--- a/drivers/power/reset/brcmstb-reboot.c
+++ b/drivers/power/reset/brcmstb-reboot.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2013 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2013 Broadcom Corporation
#include <linux/bitops.h>
#include <linux/device.h>
diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c
new file mode 100644
index 000000000000..12dedf841a44
--- /dev/null
+++ b/drivers/power/reset/pwr-mlxbf.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause
+
+/*
+ * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/types.h>
+
+struct pwr_mlxbf {
+ struct work_struct send_work;
+ const char *hid;
+};
+
+static void pwr_mlxbf_send_work(struct work_struct *work)
+{
+ acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1);
+}
+
+static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr)
+{
+ const char *rst_pwr_hid = "MLNXBF24";
+ const char *low_pwr_hid = "MLNXBF29";
+ struct pwr_mlxbf *priv = ptr;
+
+ if (!strncmp(priv->hid, rst_pwr_hid, 8))
+ emergency_restart();
+
+ if (!strncmp(priv->hid, low_pwr_hid, 8))
+ schedule_work(&priv->send_work);
+
+ return IRQ_HANDLED;
+}
+
+static int pwr_mlxbf_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ struct pwr_mlxbf *priv;
+ const char *hid;
+ int irq, err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENXIO;
+
+ hid = acpi_device_hid(adev);
+ priv->hid = hid;
+
+ irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid);
+
+ err = devm_work_autocancel(dev, &priv->send_work, pwr_mlxbf_send_work);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev, irq, pwr_mlxbf_irq, 0, hid, priv);
+ if (err)
+ dev_err(dev, "Failed request of %s irq\n", priv->hid);
+
+ return err;
+}
+
+static const struct acpi_device_id __maybe_unused pwr_mlxbf_acpi_match[] = {
+ { "MLNXBF24", 0 },
+ { "MLNXBF29", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, pwr_mlxbf_acpi_match);
+
+static struct platform_driver pwr_mlxbf_driver = {
+ .driver = {
+ .name = "pwr_mlxbf",
+ .acpi_match_table = pwr_mlxbf_acpi_match,
+ },
+ .probe = pwr_mlxbf_probe,
+};
+
+module_platform_driver(pwr_mlxbf_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField power driver");
+MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/power/supply/ab8500-chargalg.h b/drivers/power/supply/ab8500-chargalg.h
index f47a0061c36a..8534d067ba95 100644
--- a/drivers/power/supply/ab8500-chargalg.h
+++ b/drivers/power/supply/ab8500-chargalg.h
@@ -34,7 +34,6 @@ struct ux500_charger_ops {
* @max_out_volt_uv maximum output charger voltage in uV
* @max_out_curr_ua maximum output charger current in uA
* @enabled indicates if this charger is used or not
- * @external external charger unit (pm2xxx)
*/
struct ux500_charger {
struct power_supply *psy;
@@ -43,9 +42,6 @@ struct ux500_charger {
int max_out_curr_ua;
int wdt_refresh;
bool enabled;
- bool external;
};
-extern struct blocking_notifier_head charger_notifier_list;
-
#endif /* _AB8500_CHARGALG_H_ */
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index b7e842dff567..863fabe05bdc 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -697,7 +697,6 @@ static void ab8500_btemp_unbind(struct device *dev, struct device *master,
/* Delete the work queue */
destroy_workqueue(di->btemp_wq);
- flush_scheduled_work();
}
static const struct component_ops ab8500_btemp_component_ops = {
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 431bbc352d1b..ae4be553f424 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -246,9 +246,6 @@ struct ab8500_chargalg {
struct kobject chargalg_kobject;
};
-/*External charger prepare notifier*/
-BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
-
/* Main battery properties */
static enum power_supply_property ab8500_chargalg_props[] = {
POWER_SUPPLY_PROP_STATUS,
@@ -343,8 +340,7 @@ static int ab8500_chargalg_check_charger_enable(struct ab8500_chargalg *di)
return di->usb_chg->ops.check_enable(di->usb_chg,
bi->constant_charge_voltage_max_uv,
bi->constant_charge_current_max_ua);
- } else if ((di->chg_info.charger_type & AC_CHG) &&
- !(di->ac_chg->external)) {
+ } else if (di->chg_info.charger_type & AC_CHG) {
return di->ac_chg->ops.check_enable(di->ac_chg,
bi->constant_charge_voltage_max_uv,
bi->constant_charge_current_max_ua);
@@ -473,15 +469,6 @@ static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
/* Check if charger exists and kick watchdog if charging */
if (di->ac_chg && di->ac_chg->ops.kick_wd &&
di->chg_info.online_chg & AC_CHG) {
- /*
- * If AB charger watchdog expired, pm2xxx charging
- * gets disabled. To be safe, kick both AB charger watchdog
- * and pm2xxx watchdog.
- */
- if (di->ac_chg->external &&
- di->usb_chg && di->usb_chg->ops.kick_wd)
- di->usb_chg->ops.kick_wd(di->usb_chg);
-
return di->ac_chg->ops.kick_wd(di->ac_chg);
} else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
di->chg_info.online_chg & USB_CHG)
@@ -517,14 +504,6 @@ static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
di->chg_info.ac_iset_ua = iset_ua;
di->chg_info.ac_vset_uv = vset_uv;
- /* Enable external charger */
- if (enable && di->ac_chg->external &&
- !ab8500_chargalg_ex_ac_enable_toggle) {
- blocking_notifier_call_chain(&charger_notifier_list,
- 0, di->dev);
- ab8500_chargalg_ex_ac_enable_toggle++;
- }
-
return di->ac_chg->ops.enable(di->ac_chg, enable, vset_uv, iset_ua);
}
@@ -1217,6 +1196,34 @@ static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
}
/**
+ * ab8500_chargalg_time_to_restart() - time to restart CC/CV charging?
+ * @di: charging algorithm state
+ *
+ * This checks if the voltage or capacity of the battery has fallen so
+ * low that we need to restart the CC/CV charge cycle.
+ */
+static bool ab8500_chargalg_time_to_restart(struct ab8500_chargalg *di)
+{
+ struct power_supply_battery_info *bi = di->bm->bi;
+
+ /* Sanity check - these need to have some reasonable values */
+ if (!di->batt_data.volt_uv || !di->batt_data.percent)
+ return false;
+
+ /* Some batteries tell us at which voltage we should restart charging */
+ if (bi->charge_restart_voltage_uv > 0) {
+ if (di->batt_data.volt_uv <= bi->charge_restart_voltage_uv)
+ return true;
+ /* Else we restart as we reach a certain capacity */
+ } else {
+ if (di->batt_data.percent <= AB8500_RECHARGE_CAP)
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ab8500_chargalg_algorithm() - Main function for the algorithm
* @di: pointer to the ab8500_chargalg structure
*
@@ -1459,7 +1466,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
fallthrough;
case STATE_WAIT_FOR_RECHARGE:
- if (di->batt_data.percent <= AB8500_RECHARGE_CAP)
+ if (ab8500_chargalg_time_to_restart(di))
ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
break;
@@ -1486,6 +1493,14 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
ab8500_chargalg_stop_maintenance_timer(di);
ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
}
+ /*
+ * This happens if the voltage drops too quickly during
+ * maintenance charging, especially in older batteries.
+ */
+ if (ab8500_chargalg_time_to_restart(di)) {
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ dev_info(di->dev, "restarted charging from maintenance state A - battery getting old?\n");
+ }
break;
case STATE_MAINTENANCE_B_INIT:
@@ -1510,6 +1525,14 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
ab8500_chargalg_stop_maintenance_timer(di);
ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
}
+ /*
+ * This happens if the voltage drops too quickly during
+ * maintenance charging, especially in older batteries.
+ */
+ if (ab8500_chargalg_time_to_restart(di)) {
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ dev_info(di->dev, "restarted charging from maintenance state B - battery getting old?\n");
+ }
break;
case STATE_TEMP_LOWHIGH_INIT:
@@ -1746,7 +1769,6 @@ static void ab8500_chargalg_unbind(struct device *dev, struct device *master,
/* Delete the work queue */
destroy_workqueue(di->chargalg_wq);
- flush_scheduled_work();
}
static const struct component_ops ab8500_chargalg_component_ops = {
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index d04d087caa50..c19c50442761 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -1716,29 +1716,6 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
return ret;
}
-static int ab8500_external_charger_prepare(struct notifier_block *charger_nb,
- unsigned long event, void *data)
-{
- int ret;
- struct device *dev = data;
- /*Toggle External charger control pin*/
- ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
- AB8500_SYS_CHARGER_CONTROL_REG,
- EXTERNAL_CHARGER_DISABLE_REG_VAL);
- if (ret < 0) {
- dev_err(dev, "write reg failed %d\n", ret);
- goto out;
- }
- ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
- AB8500_SYS_CHARGER_CONTROL_REG,
- EXTERNAL_CHARGER_ENABLE_REG_VAL);
- if (ret < 0)
- dev_err(dev, "Write reg failed %d\n", ret);
-
-out:
- return ret;
-}
-
/**
* ab8500_charger_usb_check_enable() - enable usb charging
* @charger: pointer to the ux500_charger structure
@@ -3316,10 +3293,6 @@ static int __maybe_unused ab8500_charger_suspend(struct device *dev)
return 0;
}
-static struct notifier_block charger_nb = {
- .notifier_call = ab8500_external_charger_prepare,
-};
-
static char *supply_interface[] = {
"ab8500_chargalg",
"ab8500_fg",
@@ -3378,6 +3351,7 @@ static int ab8500_charger_bind(struct device *dev)
ret = component_bind_all(dev, di);
if (ret) {
dev_err(dev, "can't bind component devices\n");
+ destroy_workqueue(di->charger_wq);
return ret;
}
@@ -3404,8 +3378,6 @@ static void ab8500_charger_unbind(struct device *dev)
/* Delete the work queue */
destroy_workqueue(di->charger_wq);
- flush_scheduled_work();
-
/* Unbind fg, btemp, algorithm */
component_unbind_all(dev, di);
}
@@ -3540,7 +3512,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
*/
if (!is_ab8505(di->parent))
di->ac_chg.enabled = true;
- di->ac_chg.external = false;
/* USB supply */
/* ux500_charger sub-class */
@@ -3553,7 +3524,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.max_out_curr_ua =
ab8500_charge_output_curr_map[ARRAY_SIZE(ab8500_charge_output_curr_map) - 1];
di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
- di->usb_chg.external = false;
di->usb_state.usb_current_ua = -1;
mutex_init(&di->charger_attached_mutex);
@@ -3677,17 +3647,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
goto remove_ab8500_bm;
}
- /* Notifier for external charger enabling */
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_register(
- &charger_notifier_list, &charger_nb);
-
-
di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(di->usb_phy)) {
dev_err(dev, "failed to get usb transceiver\n");
ret = -EINVAL;
- goto out_charger_notifier;
+ goto remove_ab8500_bm;
}
di->nb.notifier_call = ab8500_charger_usb_notifier_call;
ret = usb_register_notifier(di->usb_phy, &di->nb);
@@ -3696,7 +3660,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
goto put_usb_phy;
}
-
ret = component_master_add_with_match(&pdev->dev,
&ab8500_charger_comp_ops,
match);
@@ -3711,10 +3674,6 @@ free_notifier:
usb_unregister_notifier(di->usb_phy, &di->nb);
put_usb_phy:
usb_put_phy(di->usb_phy);
-out_charger_notifier:
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_unregister(
- &charger_notifier_list, &charger_nb);
remove_ab8500_bm:
ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
return ret;
@@ -3729,9 +3688,6 @@ static int ab8500_charger_remove(struct platform_device *pdev)
usb_unregister_notifier(di->usb_phy, &di->nb);
ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
usb_put_phy(di->usb_phy);
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_unregister(
- &charger_notifier_list, &charger_nb);
return 0;
}
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index ec8a404d71b4..c6c9804280db 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -412,7 +412,7 @@ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
* ab8500_fg_clear_cap_samples() - Clear average filter
* @di: pointer to the ab8500_fg structure
*
- * The capacity filter is is reset to zero.
+ * The capacity filter is reset to zero.
*/
static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
{
@@ -3148,6 +3148,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ret = ab8500_fg_init_hw_registers(di);
if (ret) {
dev_err(dev, "failed to initialize registers\n");
+ destroy_workqueue(di->fg_wq);
return ret;
}
@@ -3159,6 +3160,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->fg_psy = devm_power_supply_register(dev, &ab8500_fg_desc, &psy_cfg);
if (IS_ERR(di->fg_psy)) {
dev_err(dev, "failed to register FG psy\n");
+ destroy_workqueue(di->fg_wq);
return PTR_ERR(di->fg_psy);
}
@@ -3174,8 +3176,10 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register primary interrupt handlers */
for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
- if (irq < 0)
+ if (irq < 0) {
+ destroy_workqueue(di->fg_wq);
return irq;
+ }
ret = devm_request_threaded_irq(dev, irq, NULL,
ab8500_fg_irq[i].isr,
@@ -3185,6 +3189,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq[i].name, irq, ret);
+ destroy_workqueue(di->fg_wq);
return ret;
}
dev_dbg(dev, "Requested %s IRQ %d: %d\n",
@@ -3200,6 +3205,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ret = ab8500_fg_sysfs_init(di);
if (ret) {
dev_err(dev, "failed to create sysfs entry\n");
+ destroy_workqueue(di->fg_wq);
return ret;
}
@@ -3207,6 +3213,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to create FG psy\n");
ab8500_fg_sysfs_exit(di);
+ destroy_workqueue(di->fg_wq);
return ret;
}
@@ -3227,7 +3234,6 @@ static int ab8500_fg_remove(struct platform_device *pdev)
struct ab8500_fg *di = platform_get_drvdata(pdev);
destroy_workqueue(di->fg_wq);
- flush_scheduled_work();
component_del(&pdev->dev, &ab8500_fg_component_ops);
list_del(&di->node);
ab8500_fg_sysfs_exit(di);
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 96cb3290bcaa..ecba9ab86faf 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -287,7 +287,7 @@ static int bq24257_set_input_current_limit(struct bq24257_device *bq,
{
/*
* Address the case where the user manually sets an input current limit
- * while the charger auto-detection mechanism is is active. In this
+ * while the charger auto-detection mechanism is active. In this
* case we want to abort and go straight to the user-specified value.
*/
if (bq->iilimit_autoset_enable)
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index ae284bdd6cc3..d98d9244e394 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Battery driver for CPCAP PMIC
*
@@ -7,15 +8,6 @@
* drivers:
*
* Copyright (C) 2009-2010 Motorola, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/delay.h>
diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c
index 9fe6d826148d..1379afd9698d 100644
--- a/drivers/power/supply/cros_peripheral_charger.c
+++ b/drivers/power/supply/cros_peripheral_charger.c
@@ -63,7 +63,7 @@ static int cros_pchg_ec_command(const struct charger_data *charger,
struct cros_ec_command *msg;
int ret;
- msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ msg = kzalloc(struct_size(msg, data, max(outsize, insize)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index bf1754355c9f..a58d713d75ce 100644
--- a/drivers/power/supply/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
@@ -221,10 +221,8 @@ static int goldfish_battery_probe(struct platform_device *pdev)
}
data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
+ if (data->irq < 0)
return -ENODEV;
- }
ret = devm_request_irq(&pdev->dev, data->irq,
goldfish_battery_interrupt,
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 397e5a03b7d9..56c57529c228 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -376,7 +376,7 @@ static int lp8788_update_charger_params(struct platform_device *pdev,
return 0;
}
- /* settting charging parameters */
+ /* setting charging parameters */
for (i = 0; i < pdata->num_chg_params; i++) {
param = pdata->chg_params + i;
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
index 8b6c8cfa7503..4fed74511931 100644
--- a/drivers/power/supply/max77976_charger.c
+++ b/drivers/power/supply/max77976_charger.c
@@ -3,7 +3,7 @@
* max77976_charger.c - Driver for the Maxim MAX77976 battery charger
*
* Copyright (C) 2021 Luca Ceresoli
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/i2c.h>
@@ -504,6 +504,6 @@ static struct i2c_driver max77976_driver = {
};
module_i2c_driver(max77976_driver);
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_DESCRIPTION("Maxim MAX77976 charger driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index e0476ec06601..a5da20ffd685 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -635,6 +635,7 @@ static int olpc_battery_probe(struct platform_device *pdev)
struct power_supply_config bat_psy_cfg = {};
struct power_supply_config ac_psy_cfg = {};
struct olpc_battery_data *data;
+ struct device_node *np;
uint8_t status;
uint8_t ecver;
int ret;
@@ -649,7 +650,9 @@ static int olpc_battery_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec")) {
+ np = of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec");
+ if (np) {
+ of_node_put(np);
/* XO 1.75 */
data->new_proto = true;
data->little_endian = true;
diff --git a/drivers/power/supply/pm2301_charger.h b/drivers/power/supply/pm2301_charger.h
deleted file mode 100644
index 74397e377982..000000000000
--- a/drivers/power/supply/pm2301_charger.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2012
- *
- * PM2301 power supply interface
- */
-
-#ifndef PM2301_CHARGER_H
-#define PM2301_CHARGER_H
-
-/* Watchdog timeout constant */
-#define WD_TIMER 0x30 /* 4min */
-#define WD_KICK_INTERVAL (30 * HZ)
-
-#define PM2XXX_NUM_INT_REG 0x6
-
-/* Constant voltage/current */
-#define PM2XXX_CONST_CURR 0x0
-#define PM2XXX_CONST_VOLT 0x1
-
-/* Lowest charger voltage is 3.39V -> 0x4E */
-#define LOW_VOLT_REG 0x4E
-
-#define PM2XXX_BATT_CTRL_REG1 0x00
-#define PM2XXX_BATT_CTRL_REG2 0x01
-#define PM2XXX_BATT_CTRL_REG3 0x02
-#define PM2XXX_BATT_CTRL_REG4 0x03
-#define PM2XXX_BATT_CTRL_REG5 0x04
-#define PM2XXX_BATT_CTRL_REG6 0x05
-#define PM2XXX_BATT_CTRL_REG7 0x06
-#define PM2XXX_BATT_CTRL_REG8 0x07
-#define PM2XXX_NTC_CTRL_REG1 0x08
-#define PM2XXX_NTC_CTRL_REG2 0x09
-#define PM2XXX_BATT_CTRL_REG9 0x0A
-#define PM2XXX_BATT_STAT_REG1 0x0B
-#define PM2XXX_INP_VOLT_VPWR2 0x11
-#define PM2XXX_INP_DROP_VPWR2 0x13
-#define PM2XXX_INP_VOLT_VPWR1 0x15
-#define PM2XXX_INP_DROP_VPWR1 0x17
-#define PM2XXX_INP_MODE_VPWR 0x18
-#define PM2XXX_BATT_WD_KICK 0x70
-#define PM2XXX_DEV_VER_STAT 0x0C
-#define PM2XXX_THERM_WARN_CTRL_REG 0x20
-#define PM2XXX_BATT_DISC_REG 0x21
-#define PM2XXX_BATT_LOW_LEV_COMP_REG 0x22
-#define PM2XXX_BATT_LOW_LEV_VAL_REG 0x23
-#define PM2XXX_I2C_PAD_CTRL_REG 0x24
-#define PM2XXX_SW_CTRL_REG 0x26
-#define PM2XXX_LED_CTRL_REG 0x28
-
-#define PM2XXX_REG_INT1 0x40
-#define PM2XXX_MASK_REG_INT1 0x50
-#define PM2XXX_SRCE_REG_INT1 0x60
-#define PM2XXX_REG_INT2 0x41
-#define PM2XXX_MASK_REG_INT2 0x51
-#define PM2XXX_SRCE_REG_INT2 0x61
-#define PM2XXX_REG_INT3 0x42
-#define PM2XXX_MASK_REG_INT3 0x52
-#define PM2XXX_SRCE_REG_INT3 0x62
-#define PM2XXX_REG_INT4 0x43
-#define PM2XXX_MASK_REG_INT4 0x53
-#define PM2XXX_SRCE_REG_INT4 0x63
-#define PM2XXX_REG_INT5 0x44
-#define PM2XXX_MASK_REG_INT5 0x54
-#define PM2XXX_SRCE_REG_INT5 0x64
-#define PM2XXX_REG_INT6 0x45
-#define PM2XXX_MASK_REG_INT6 0x55
-#define PM2XXX_SRCE_REG_INT6 0x65
-
-#define VPWR_OVV 0x0
-#define VSYSTEM_OVV 0x1
-
-/* control Reg 1 */
-#define PM2XXX_CH_RESUME_EN 0x1
-#define PM2XXX_CH_RESUME_DIS 0x0
-
-/* control Reg 2 */
-#define PM2XXX_CH_AUTO_RESUME_EN 0X2
-#define PM2XXX_CH_AUTO_RESUME_DIS 0X0
-#define PM2XXX_CHARGER_ENA 0x4
-#define PM2XXX_CHARGER_DIS 0x0
-
-/* control Reg 3 */
-#define PM2XXX_CH_WD_CC_PHASE_OFF 0x0
-#define PM2XXX_CH_WD_CC_PHASE_5MIN 0x1
-#define PM2XXX_CH_WD_CC_PHASE_10MIN 0x2
-#define PM2XXX_CH_WD_CC_PHASE_30MIN 0x3
-#define PM2XXX_CH_WD_CC_PHASE_60MIN 0x4
-#define PM2XXX_CH_WD_CC_PHASE_120MIN 0x5
-#define PM2XXX_CH_WD_CC_PHASE_240MIN 0x6
-#define PM2XXX_CH_WD_CC_PHASE_360MIN 0x7
-
-#define PM2XXX_CH_WD_CV_PHASE_OFF (0x0<<3)
-#define PM2XXX_CH_WD_CV_PHASE_5MIN (0x1<<3)
-#define PM2XXX_CH_WD_CV_PHASE_10MIN (0x2<<3)
-#define PM2XXX_CH_WD_CV_PHASE_30MIN (0x3<<3)
-#define PM2XXX_CH_WD_CV_PHASE_60MIN (0x4<<3)
-#define PM2XXX_CH_WD_CV_PHASE_120MIN (0x5<<3)
-#define PM2XXX_CH_WD_CV_PHASE_240MIN (0x6<<3)
-#define PM2XXX_CH_WD_CV_PHASE_360MIN (0x7<<3)
-
-/* control Reg 4 */
-#define PM2XXX_CH_WD_PRECH_PHASE_OFF 0x0
-#define PM2XXX_CH_WD_PRECH_PHASE_1MIN 0x1
-#define PM2XXX_CH_WD_PRECH_PHASE_5MIN 0x2
-#define PM2XXX_CH_WD_PRECH_PHASE_10MIN 0x3
-#define PM2XXX_CH_WD_PRECH_PHASE_30MIN 0x4
-#define PM2XXX_CH_WD_PRECH_PHASE_60MIN 0x5
-#define PM2XXX_CH_WD_PRECH_PHASE_120MIN 0x6
-#define PM2XXX_CH_WD_PRECH_PHASE_240MIN 0x7
-
-/* control Reg 5 */
-#define PM2XXX_CH_WD_AUTO_TIMEOUT_NONE 0x0
-#define PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN 0x1
-
-/* control Reg 6 */
-#define PM2XXX_DIR_CH_CC_CURRENT_MASK 0x0F
-#define PM2XXX_DIR_CH_CC_CURRENT_200MA 0x0
-#define PM2XXX_DIR_CH_CC_CURRENT_400MA 0x2
-#define PM2XXX_DIR_CH_CC_CURRENT_600MA 0x3
-#define PM2XXX_DIR_CH_CC_CURRENT_800MA 0x4
-#define PM2XXX_DIR_CH_CC_CURRENT_1000MA 0x5
-#define PM2XXX_DIR_CH_CC_CURRENT_1200MA 0x6
-#define PM2XXX_DIR_CH_CC_CURRENT_1400MA 0x7
-#define PM2XXX_DIR_CH_CC_CURRENT_1600MA 0x8
-#define PM2XXX_DIR_CH_CC_CURRENT_1800MA 0x9
-#define PM2XXX_DIR_CH_CC_CURRENT_2000MA 0xA
-#define PM2XXX_DIR_CH_CC_CURRENT_2200MA 0xB
-#define PM2XXX_DIR_CH_CC_CURRENT_2400MA 0xC
-#define PM2XXX_DIR_CH_CC_CURRENT_2600MA 0xD
-#define PM2XXX_DIR_CH_CC_CURRENT_2800MA 0xE
-#define PM2XXX_DIR_CH_CC_CURRENT_3000MA 0xF
-
-#define PM2XXX_CH_PRECH_CURRENT_MASK 0x30
-#define PM2XXX_CH_PRECH_CURRENT_25MA (0x0<<4)
-#define PM2XXX_CH_PRECH_CURRENT_50MA (0x1<<4)
-#define PM2XXX_CH_PRECH_CURRENT_75MA (0x2<<4)
-#define PM2XXX_CH_PRECH_CURRENT_100MA (0x3<<4)
-
-#define PM2XXX_CH_EOC_CURRENT_MASK 0xC0
-#define PM2XXX_CH_EOC_CURRENT_100MA (0x0<<6)
-#define PM2XXX_CH_EOC_CURRENT_150MA (0x1<<6)
-#define PM2XXX_CH_EOC_CURRENT_300MA (0x2<<6)
-#define PM2XXX_CH_EOC_CURRENT_400MA (0x3<<6)
-
-/* control Reg 7 */
-#define PM2XXX_CH_PRECH_VOL_2_5 0x0
-#define PM2XXX_CH_PRECH_VOL_2_7 0x1
-#define PM2XXX_CH_PRECH_VOL_2_9 0x2
-#define PM2XXX_CH_PRECH_VOL_3_1 0x3
-
-#define PM2XXX_CH_VRESUME_VOL_3_2 (0x0<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_4 (0x1<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_6 (0x2<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_8 (0x3<<2)
-
-/* control Reg 8 */
-#define PM2XXX_CH_VOLT_MASK 0x3F
-#define PM2XXX_CH_VOLT_3_5 0x0
-#define PM2XXX_CH_VOLT_3_5225 0x1
-#define PM2XXX_CH_VOLT_3_6 0x4
-#define PM2XXX_CH_VOLT_3_7 0x8
-#define PM2XXX_CH_VOLT_4_0 0x14
-#define PM2XXX_CH_VOLT_4_175 0x1B
-#define PM2XXX_CH_VOLT_4_2 0x1C
-#define PM2XXX_CH_VOLT_4_275 0x1F
-#define PM2XXX_CH_VOLT_4_3 0x20
-
-/*NTC control register 1*/
-#define PM2XXX_BTEMP_HIGH_TH_45 0x0
-#define PM2XXX_BTEMP_HIGH_TH_50 0x1
-#define PM2XXX_BTEMP_HIGH_TH_55 0x2
-#define PM2XXX_BTEMP_HIGH_TH_60 0x3
-#define PM2XXX_BTEMP_HIGH_TH_65 0x4
-
-#define PM2XXX_BTEMP_LOW_TH_N5 (0x0<<3)
-#define PM2XXX_BTEMP_LOW_TH_0 (0x1<<3)
-#define PM2XXX_BTEMP_LOW_TH_5 (0x2<<3)
-#define PM2XXX_BTEMP_LOW_TH_10 (0x3<<3)
-
-/*NTC control register 2*/
-#define PM2XXX_NTC_BETA_COEFF_3477 0x0
-#define PM2XXX_NTC_BETA_COEFF_3964 0x1
-
-#define PM2XXX_NTC_RES_10K (0x0<<2)
-#define PM2XXX_NTC_RES_47K (0x1<<2)
-#define PM2XXX_NTC_RES_100K (0x2<<2)
-#define PM2XXX_NTC_RES_NO_NTC (0x3<<2)
-
-/* control Reg 9 */
-#define PM2XXX_CH_CC_MODEDROP_EN 1
-#define PM2XXX_CH_CC_MODEDROP_DIS 0
-
-#define PM2XXX_CH_CC_REDUCED_CURRENT_100MA (0x0<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_200MA (0x1<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_400MA (0x2<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_IDENT (0x3<<1)
-
-#define PM2XXX_CHARCHING_INFO_DIS (0<<3)
-#define PM2XXX_CHARCHING_INFO_EN (1<<3)
-
-#define PM2XXX_CH_150MV_DROP_300MV (0<<4)
-#define PM2XXX_CH_150MV_DROP_150MV (1<<4)
-
-
-/* charger status register */
-#define PM2XXX_CHG_STATUS_OFF 0x0
-#define PM2XXX_CHG_STATUS_ON 0x1
-#define PM2XXX_CHG_STATUS_FULL 0x2
-#define PM2XXX_CHG_STATUS_ERR 0x3
-#define PM2XXX_CHG_STATUS_WAIT 0x4
-#define PM2XXX_CHG_STATUS_NOBAT 0x5
-
-/* Input charger voltage VPWR2 */
-#define PM2XXX_VPWR2_OVV_6_0 0x0
-#define PM2XXX_VPWR2_OVV_6_3 0x1
-#define PM2XXX_VPWR2_OVV_10 0x2
-#define PM2XXX_VPWR2_OVV_NONE 0x3
-
-/* Input charger drop VPWR2 */
-#define PM2XXX_VPWR2_HW_OPT_EN (0x1<<4)
-#define PM2XXX_VPWR2_HW_OPT_DIS (0x0<<4)
-
-#define PM2XXX_VPWR2_VALID_EN (0x1<<3)
-#define PM2XXX_VPWR2_VALID_DIS (0x0<<3)
-
-#define PM2XXX_VPWR2_DROP_EN (0x1<<2)
-#define PM2XXX_VPWR2_DROP_DIS (0x0<<2)
-
-/* Input charger voltage VPWR1 */
-#define PM2XXX_VPWR1_OVV_6_0 0x0
-#define PM2XXX_VPWR1_OVV_6_3 0x1
-#define PM2XXX_VPWR1_OVV_10 0x2
-#define PM2XXX_VPWR1_OVV_NONE 0x3
-
-/* Input charger drop VPWR1 */
-#define PM2XXX_VPWR1_HW_OPT_EN (0x1<<4)
-#define PM2XXX_VPWR1_HW_OPT_DIS (0x0<<4)
-
-#define PM2XXX_VPWR1_VALID_EN (0x1<<3)
-#define PM2XXX_VPWR1_VALID_DIS (0x0<<3)
-
-#define PM2XXX_VPWR1_DROP_EN (0x1<<2)
-#define PM2XXX_VPWR1_DROP_DIS (0x0<<2)
-
-/* Battery low level comparator control register */
-#define PM2XXX_VBAT_LOW_MONITORING_DIS 0x0
-#define PM2XXX_VBAT_LOW_MONITORING_ENA 0x1
-
-/* Battery low level value control register */
-#define PM2XXX_VBAT_LOW_LEVEL_2_3 0x0
-#define PM2XXX_VBAT_LOW_LEVEL_2_4 0x1
-#define PM2XXX_VBAT_LOW_LEVEL_2_5 0x2
-#define PM2XXX_VBAT_LOW_LEVEL_2_6 0x3
-#define PM2XXX_VBAT_LOW_LEVEL_2_7 0x4
-#define PM2XXX_VBAT_LOW_LEVEL_2_8 0x5
-#define PM2XXX_VBAT_LOW_LEVEL_2_9 0x6
-#define PM2XXX_VBAT_LOW_LEVEL_3_0 0x7
-#define PM2XXX_VBAT_LOW_LEVEL_3_1 0x8
-#define PM2XXX_VBAT_LOW_LEVEL_3_2 0x9
-#define PM2XXX_VBAT_LOW_LEVEL_3_3 0xA
-#define PM2XXX_VBAT_LOW_LEVEL_3_4 0xB
-#define PM2XXX_VBAT_LOW_LEVEL_3_5 0xC
-#define PM2XXX_VBAT_LOW_LEVEL_3_6 0xD
-#define PM2XXX_VBAT_LOW_LEVEL_3_7 0xE
-#define PM2XXX_VBAT_LOW_LEVEL_3_8 0xF
-#define PM2XXX_VBAT_LOW_LEVEL_3_9 0x10
-#define PM2XXX_VBAT_LOW_LEVEL_4_0 0x11
-#define PM2XXX_VBAT_LOW_LEVEL_4_1 0x12
-#define PM2XXX_VBAT_LOW_LEVEL_4_2 0x13
-
-/* SW CTRL */
-#define PM2XXX_SWCTRL_HW 0x0
-#define PM2XXX_SWCTRL_SW 0x1
-
-
-/* LED Driver Control */
-#define PM2XXX_LED_CURRENT_MASK 0x0C
-#define PM2XXX_LED_CURRENT_2_5MA (0X0<<2)
-#define PM2XXX_LED_CURRENT_1MA (0X1<<2)
-#define PM2XXX_LED_CURRENT_5MA (0X2<<2)
-#define PM2XXX_LED_CURRENT_10MA (0X3<<2)
-
-#define PM2XXX_LED_SELECT_MASK 0x02
-#define PM2XXX_LED_SELECT_EN (0X0<<1)
-#define PM2XXX_LED_SELECT_DIS (0X1<<1)
-
-#define PM2XXX_ANTI_OVERSHOOT_MASK 0x01
-#define PM2XXX_ANTI_OVERSHOOT_DIS 0X0
-#define PM2XXX_ANTI_OVERSHOOT_EN 0X1
-
-enum pm2xxx_reg_int1 {
- PM2XXX_INT1_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_ITVBATLOWR = 0x04,
- PM2XXX_INT1_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_mask_reg_int1 {
- PM2XXX_INT1_M_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_M_ITVBATLOWR = 0x04,
- PM2XXX_INT1_M_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_source_reg_int1 {
- PM2XXX_INT1_S_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_S_ITVBATLOWR = 0x04,
- PM2XXX_INT1_S_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_reg_int2 {
- PM2XXX_INT2_ITVPWR2PLUG = 0x01,
- PM2XXX_INT2_ITVPWR2UNPLUG = 0x02,
- PM2XXX_INT2_ITVPWR1PLUG = 0x04,
- PM2XXX_INT2_ITVPWR1UNPLUG = 0x08,
-};
-
-enum pm2xxx_mask_reg_int2 {
- PM2XXX_INT2_M_ITVPWR2PLUG = 0x01,
- PM2XXX_INT2_M_ITVPWR2UNPLUG = 0x02,
- PM2XXX_INT2_M_ITVPWR1PLUG = 0x04,
- PM2XXX_INT2_M_ITVPWR1UNPLUG = 0x08,
-};
-
-enum pm2xxx_source_reg_int2 {
- PM2XXX_INT2_S_ITVPWR2PLUG = 0x03,
- PM2XXX_INT2_S_ITVPWR1PLUG = 0x0c,
-};
-
-enum pm2xxx_reg_int3 {
- PM2XXX_INT3_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_ITCHCCWD = 0x02,
- PM2XXX_INT3_ITCHCVWD = 0x04,
- PM2XXX_INT3_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_mask_reg_int3 {
- PM2XXX_INT3_M_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_M_ITCHCCWD = 0x02,
- PM2XXX_INT3_M_ITCHCVWD = 0x04,
- PM2XXX_INT3_M_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_source_reg_int3 {
- PM2XXX_INT3_S_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_S_ITCHCCWD = 0x02,
- PM2XXX_INT3_S_ITCHCVWD = 0x04,
- PM2XXX_INT3_S_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_reg_int4 {
- PM2XXX_INT4_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_ITCHARGINGON = 0x10,
- PM2XXX_INT4_ITVRESUME = 0x20,
- PM2XXX_INT4_ITBATTFULL = 0x40,
- PM2XXX_INT4_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_mask_reg_int4 {
- PM2XXX_INT4_M_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_M_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_M_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_M_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_M_ITCHARGINGON = 0x10,
- PM2XXX_INT4_M_ITVRESUME = 0x20,
- PM2XXX_INT4_M_ITBATTFULL = 0x40,
- PM2XXX_INT4_M_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_source_reg_int4 {
- PM2XXX_INT4_S_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_S_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_S_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_S_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_S_ITCHARGINGON = 0x10,
- PM2XXX_INT4_S_ITVRESUME = 0x20,
- PM2XXX_INT4_S_ITBATTFULL = 0x40,
- PM2XXX_INT4_S_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_reg_int5 {
- PM2XXX_INT5_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_mask_reg_int5 {
- PM2XXX_INT5_M_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_M_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_M_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_M_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_M_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_source_reg_int5 {
- PM2XXX_INT5_S_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_S_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_S_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_S_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_S_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_reg_int6 {
- PM2XXX_INT6_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_ITVPWR1VALIDFALL = 0x20,
-};
-
-enum pm2xxx_mask_reg_int6 {
- PM2XXX_INT6_M_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_M_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_M_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_M_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_M_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_M_ITVPWR1VALIDFALL = 0x20,
-};
-
-enum pm2xxx_source_reg_int6 {
- PM2XXX_INT6_S_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_S_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_S_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_S_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_S_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_S_ITVPWR1VALIDFALL = 0x20,
-};
-
-struct pm2xxx_charger_info {
- int charger_connected;
- int charger_online;
- int cv_active;
- bool wd_expired;
-};
-
-struct pm2xxx_charger_event_flags {
- bool mainextchnotok;
- bool main_thermal_prot;
- bool ovv;
- bool chgwdexp;
-};
-
-struct pm2xxx_interrupts {
- u8 reg[PM2XXX_NUM_INT_REG];
- int (*handler[PM2XXX_NUM_INT_REG])(void *, int);
-};
-
-struct pm2xxx_config {
- struct i2c_client *pm2xxx_i2c;
- struct i2c_device_id *pm2xxx_id;
-};
-
-struct pm2xxx_irq {
- char *name;
- irqreturn_t (*isr)(int irq, void *data);
-};
-
-struct pm2xxx_charger {
- struct device *dev;
- u8 chip_id;
- bool vddadc_en_ac;
- struct pm2xxx_config config;
- bool ac_conn;
- unsigned int gpio_irq;
- int vbat;
- int old_vbat;
- int failure_case;
- int failure_input_ovv;
- unsigned int lpn_pin;
- struct pm2xxx_interrupts *pm2_int;
- struct regulator *regu;
- struct pm2xxx_bm_data *bat;
- struct mutex lock;
- struct ab8500 *parent;
- struct pm2xxx_charger_info ac;
- struct pm2xxx_charger_platform_data *pdata;
- struct workqueue_struct *charger_wq;
- struct delayed_work check_vbat_work;
- struct work_struct ac_work;
- struct work_struct check_main_thermal_prot_work;
- struct delayed_work check_hw_failure_work;
- struct ux500_charger ac_chg;
- struct power_supply_desc ac_chg_desc;
- struct pm2xxx_charger_event_flags flags;
-};
-
-#endif /* PM2301_CHARGER_H */
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index fad5890c899e..4b5fb172fa99 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -263,13 +263,13 @@ static int power_supply_check_supplies(struct power_supply *psy)
return 0;
/* All supplies found, allocate char ** array for filling */
- psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(psy->supplied_from),
+ psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(*psy->supplied_from),
GFP_KERNEL);
if (!psy->supplied_from)
return -ENOMEM;
*psy->supplied_from = devm_kcalloc(&psy->dev,
- cnt - 1, sizeof(char *),
+ cnt - 1, sizeof(**psy->supplied_from),
GFP_KERNEL);
if (!*psy->supplied_from)
return -ENOMEM;
@@ -846,17 +846,17 @@ int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *t
{
int i, high, low;
- /* Break loop at table_len - 1 because that is the highest index */
- for (i = 0; i < table_len - 1; i++)
+ for (i = 0; i < table_len; i++)
if (temp > table[i].temp)
break;
/* The library function will deal with high == low */
- if ((i == 0) || (i == (table_len - 1)))
- high = i;
+ if (i == 0)
+ high = low = i;
+ else if (i == table_len)
+ high = low = i - 1;
else
- high = i - 1;
- low = i;
+ high = (low = i) - 1;
return fixp_linear_interpolate(table[low].temp,
table[low].resistance,
@@ -958,17 +958,17 @@ int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
{
int i, high, low;
- /* Break loop at table_len - 1 because that is the highest index */
- for (i = 0; i < table_len - 1; i++)
+ for (i = 0; i < table_len; i++)
if (ocv > table[i].ocv)
break;
/* The library function will deal with high == low */
- if ((i == 0) || (i == (table_len - 1)))
- high = i - 1;
+ if (i == 0)
+ high = low = i;
+ else if (i == table_len)
+ high = low = i - 1;
else
- high = i; /* i.e. i == 0 */
- low = i;
+ high = (low = i) - 1;
return fixp_linear_interpolate(table[low].ocv,
table[low].capacity,
diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c
index 5ec2e6bb2465..540707882bb0 100644
--- a/drivers/power/supply/surface_battery.c
+++ b/drivers/power/supply/surface_battery.c
@@ -802,7 +802,7 @@ static int spwr_battery_register(struct spwr_battery_device *bat)
if (IS_ERR(bat->psy))
return PTR_ERR(bat->psy);
- return ssam_notifier_register(bat->sdev->ctrl, &bat->notif);
+ return ssam_device_notifier_register(bat->sdev, &bat->notif);
}
@@ -837,7 +837,7 @@ static void surface_battery_remove(struct ssam_device *sdev)
{
struct spwr_battery_device *bat = ssam_device_get_drvdata(sdev);
- ssam_notifier_unregister(sdev->ctrl, &bat->notif);
+ ssam_device_notifier_unregister(sdev, &bat->notif);
cancel_delayed_work_sync(&bat->update_work);
}
diff --git a/drivers/power/supply/surface_charger.c b/drivers/power/supply/surface_charger.c
index a060c36c7766..59182d55742d 100644
--- a/drivers/power/supply/surface_charger.c
+++ b/drivers/power/supply/surface_charger.c
@@ -216,7 +216,7 @@ static int spwr_ac_register(struct spwr_ac_device *ac)
if (IS_ERR(ac->psy))
return PTR_ERR(ac->psy);
- return ssam_notifier_register(ac->sdev->ctrl, &ac->notif);
+ return ssam_device_notifier_register(ac->sdev, &ac->notif);
}
@@ -251,7 +251,7 @@ static void surface_ac_remove(struct ssam_device *sdev)
{
struct spwr_ac_device *ac = ssam_device_get_drvdata(sdev);
- ssam_notifier_unregister(sdev->ctrl, &ac->notif);
+ ssam_device_notifier_unregister(sdev, &ac->notif);
}
static const struct spwr_psy_properties spwr_psy_props_adp1 = {
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index f5eced0842b3..2ff7717530bf 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -53,7 +53,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
for (i = 0; i < pd->nr_perf_states; i++) {
- power = pd->table[i].power * MICROWATT_PER_MILLIWATT * nr_cpus;
+ power = pd->table[i].power * nr_cpus;
if (power > power_limit)
break;
@@ -63,42 +63,26 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
freq_qos_update_request(&dtpm_cpu->qos_req, freq);
- power_limit = pd->table[i - 1].power *
- MICROWATT_PER_MILLIWATT * nr_cpus;
+ power_limit = pd->table[i - 1].power * nr_cpus;
return power_limit;
}
static u64 scale_pd_power_uw(struct cpumask *pd_mask, u64 power)
{
- unsigned long max = 0, sum_util = 0;
+ unsigned long max, sum_util = 0;
int cpu;
- for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
-
- /*
- * The capacity is the same for all CPUs belonging to
- * the same perf domain, so a single call to
- * arch_scale_cpu_capacity() is enough. However, we
- * need the CPU parameter to be initialized by the
- * loop, so the call ends up in this block.
- *
- * We can initialize 'max' with a cpumask_first() call
- * before the loop but the bits computation is not
- * worth given the arch_scale_cpu_capacity() just
- * returns a value where the resulting assembly code
- * will be optimized by the compiler.
- */
- max = arch_scale_cpu_capacity(cpu);
- sum_util += sched_cpu_util(cpu, max);
- }
-
/*
- * In the improbable case where all the CPUs of the perf
- * domain are offline, 'max' will be zero and will lead to an
- * illegal operation with a zero division.
+ * The capacity is the same for all CPUs belonging to
+ * the same perf domain.
*/
- return max ? (power * ((sum_util << 10) / max)) >> 10 : 0;
+ max = arch_scale_cpu_capacity(cpumask_first(pd_mask));
+
+ for_each_cpu_and(cpu, pd_mask, cpu_online_mask)
+ sum_util += sched_cpu_util(cpu);
+
+ return (power * ((sum_util << 10) / max)) >> 10;
}
static u64 get_pd_power_uw(struct dtpm *dtpm)
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index a9c99d9e8b42..21d624f9f5fb 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1109,6 +1109,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index 9d23984d8931..bc6adda58883 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -140,7 +140,9 @@ static const struct x86_cpu_id pl4_support_ids[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_N, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_P, X86_FEATURE_ANY },
{}
};
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 458218f88c5e..fe4971b65c64 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -176,6 +176,7 @@ config PTP_1588_CLOCK_OCP
depends on !S390
depends on COMMON_CLK
select NET_DEVLINK
+ select CRC16
help
This driver adds support for an OpenCompute time card.
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
index 82d31ba32690..8641fd060491 100644
--- a/drivers/ptp/ptp_dte.c
+++ b/drivers/ptp/ptp_dte.c
@@ -1,15 +1,5 @@
-/*
- * Copyright 2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2017 Broadcom
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 4519ef42b458..e59ea2173aac 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
+#include <linux/bits.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -88,10 +89,10 @@ struct tod_reg {
#define TOD_CTRL_DISABLE_FMT_A BIT(17)
#define TOD_CTRL_DISABLE_FMT_B BIT(16)
#define TOD_CTRL_ENABLE BIT(0)
-#define TOD_CTRL_GNSS_MASK ((1U << 4) - 1)
+#define TOD_CTRL_GNSS_MASK GENMASK(3, 0)
#define TOD_CTRL_GNSS_SHIFT 24
-#define TOD_STATUS_UTC_MASK 0xff
+#define TOD_STATUS_UTC_MASK GENMASK(7, 0)
#define TOD_STATUS_UTC_VALID BIT(8)
#define TOD_STATUS_LEAP_ANNOUNCE BIT(12)
#define TOD_STATUS_LEAP_VALID BIT(16)
@@ -205,7 +206,7 @@ struct frequency_reg {
#define FREQ_STATUS_VALID BIT(31)
#define FREQ_STATUS_ERROR BIT(30)
#define FREQ_STATUS_OVERRUN BIT(29)
-#define FREQ_STATUS_MASK (BIT(24) - 1)
+#define FREQ_STATUS_MASK GENMASK(23, 0)
struct ptp_ocp_flash_info {
const char *name;
@@ -674,9 +675,9 @@ static const struct ocp_selector ptp_ocp_clock[] = {
{ }
};
+#define SMA_DISABLE BIT(16)
#define SMA_ENABLE BIT(15)
-#define SMA_SELECT_MASK ((1U << 15) - 1)
-#define SMA_DISABLE 0x10000
+#define SMA_SELECT_MASK GENMASK(14, 0)
static const struct ocp_selector ptp_ocp_sma_in[] = {
{ .name = "10Mhz", .value = 0x0000 },
@@ -2154,7 +2155,7 @@ ptp_ocp_fb_set_pins(struct ptp_ocp *bp)
struct ptp_pin_desc *config;
int i;
- config = kzalloc(sizeof(*config) * 4, GFP_KERNEL);
+ config = kcalloc(4, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
@@ -3440,7 +3441,7 @@ ptp_ocp_tod_status_show(struct seq_file *s, void *data)
val = ioread32(&bp->tod->utc_status);
seq_printf(s, "UTC status register: 0x%08X\n", val);
- seq_printf(s, "UTC offset: %d valid:%d\n",
+ seq_printf(s, "UTC offset: %ld valid:%d\n",
val & TOD_STATUS_UTC_MASK, val & TOD_STATUS_UTC_VALID ? 1 : 0);
seq_printf(s, "Leap second info valid:%d, Leap second announce %d\n",
val & TOD_STATUS_LEAP_VALID ? 1 : 0,
@@ -3700,10 +3701,8 @@ ptp_ocp_detach(struct ptp_ocp *bp)
serial8250_unregister_port(bp->mac_port);
if (bp->nmea_port != -1)
serial8250_unregister_port(bp->nmea_port);
- if (bp->spi_flash)
- platform_device_unregister(bp->spi_flash);
- if (bp->i2c_ctrl)
- platform_device_unregister(bp->i2c_ctrl);
+ platform_device_unregister(bp->spi_flash);
+ platform_device_unregister(bp->i2c_ctrl);
if (bp->i2c_clk)
clk_hw_unregister_fixed_rate(bp->i2c_clk);
if (bp->n_irqs)
@@ -3773,7 +3772,6 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out:
ptp_ocp_detach(bp);
- pci_set_drvdata(pdev, NULL);
out_disable:
pci_disable_device(pdev);
out_free:
@@ -3789,7 +3787,6 @@ ptp_ocp_remove(struct pci_dev *pdev)
devlink_unregister(devlink);
ptp_ocp_detach(bp);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
devlink_free(devlink);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 904de8d61828..60d13a949bc5 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -140,6 +140,16 @@ config PWM_BRCMSTB
To compile this driver as a module, choose M Here: the module
will be called pwm-brcmstb.c.
+config PWM_CLK
+ tristate "Clock based PWM support"
+ depends on HAVE_CLK || COMPILE_TEST
+ help
+ Generic PWM framework driver for outputs that can be
+ muxed to clocks.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-clk.
+
config PWM_CLPS711X
tristate "CLPS711X PWM support"
depends on ARCH_CLPS711X || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 5c08bdb817b4..7bf1a29f02b8 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o
obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o
obj-$(CONFIG_PWM_BERLIN) += pwm-berlin.o
obj-$(CONFIG_PWM_BRCMSTB) += pwm-brcmstb.o
+obj-$(CONFIG_PWM_CLK) += pwm-clk.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_CRC) += pwm-crc.o
obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index c7552df32082..0e042410f6b9 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -235,18 +235,8 @@ EXPORT_SYMBOL_GPL(pwm_get_chip_data);
static bool pwm_ops_check(const struct pwm_chip *chip)
{
-
const struct pwm_ops *ops = chip->ops;
- /* driver supports legacy, non-atomic operation */
- if (ops->config && ops->enable && ops->disable) {
- if (IS_ENABLED(CONFIG_PWM_DEBUG))
- dev_warn(chip->dev,
- "Driver needs updating to atomic API\n");
-
- return true;
- }
-
if (!ops->apply)
return false;
@@ -548,73 +538,6 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
}
}
-static int pwm_apply_legacy(struct pwm_chip *chip, struct pwm_device *pwm,
- const struct pwm_state *state)
-{
- int err;
- struct pwm_state initial_state = pwm->state;
-
- if (state->polarity != pwm->state.polarity) {
- if (!chip->ops->set_polarity)
- return -EINVAL;
-
- /*
- * Changing the polarity of a running PWM is only allowed when
- * the PWM driver implements ->apply().
- */
- if (pwm->state.enabled) {
- chip->ops->disable(chip, pwm);
-
- /*
- * Update pwm->state already here in case
- * .set_polarity() or another callback depend on that.
- */
- pwm->state.enabled = false;
- }
-
- err = chip->ops->set_polarity(chip, pwm, state->polarity);
- if (err)
- goto rollback;
-
- pwm->state.polarity = state->polarity;
- }
-
- if (!state->enabled) {
- if (pwm->state.enabled)
- chip->ops->disable(chip, pwm);
-
- return 0;
- }
-
- /*
- * We cannot skip calling ->config even if state->period ==
- * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
- * because we might have exited early in the last call to
- * pwm_apply_state because of !state->enabled and so the two values in
- * pwm->state might not be configured in hardware.
- */
- err = chip->ops->config(pwm->chip, pwm,
- state->duty_cycle,
- state->period);
- if (err)
- goto rollback;
-
- pwm->state.period = state->period;
- pwm->state.duty_cycle = state->duty_cycle;
-
- if (!pwm->state.enabled) {
- err = chip->ops->enable(chip, pwm);
- if (err)
- goto rollback;
- }
-
- return 0;
-
-rollback:
- pwm->state = initial_state;
- return err;
-}
-
/**
* pwm_apply_state() - atomically apply a new state to a PWM device
* @pwm: PWM device
@@ -647,10 +570,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
state->usage_power == pwm->state.usage_power)
return 0;
- if (chip->ops->apply)
- err = chip->ops->apply(chip, pwm, state);
- else
- err = pwm_apply_legacy(chip, pwm, state);
+ err = chip->ops->apply(chip, pwm, state);
if (err)
return err;
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 3977a0f9d132..2837b4ce8053 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -304,7 +304,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/*
* Find best clk divisor:
* the smallest divisor which can fulfill the period_ns requirements.
- * If there is a gclk, the first divisor is actuallly the gclk selector
+ * If there is a gclk, the first divisor is actually the gclk selector
*/
if (tcbpwmc->gclk)
i = 1;
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 0226bf697f09..7251037d4dd5 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2016 Broadcom
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index f171169c1c1f..4fa6e249e4cf 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/pwm/pwm-clk.c b/drivers/pwm/pwm-clk.c
new file mode 100644
index 000000000000..c2a503d684a7
--- /dev/null
+++ b/drivers/pwm/pwm-clk.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock based PWM controller
+ *
+ * Copyright (c) 2021 Nikita Travkin <nikita@trvn.ru>
+ *
+ * This is an "adapter" driver that allows PWM consumers to use
+ * system clocks with duty cycle control as PWM outputs.
+ *
+ * Limitations:
+ * - Due to the fact that exact behavior depends on the underlying
+ * clock driver, various limitations are possible.
+ * - Underlying clock may not be able to give 0% or 100% duty cycle
+ * (constant off or on), exact behavior will depend on the clock.
+ * - When the PWM is disabled, the clock will be disabled as well,
+ * line state will depend on the clock.
+ * - The clk API doesn't expose the necessary calls to implement
+ * .get_state().
+ */
+
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pwm.h>
+
+struct pwm_clk_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ bool clk_enabled;
+};
+
+#define to_pwm_clk_chip(_chip) container_of(_chip, struct pwm_clk_chip, chip)
+
+static int pwm_clk_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct pwm_clk_chip *pcchip = to_pwm_clk_chip(chip);
+ int ret;
+ u32 rate;
+ u64 period = state->period;
+ u64 duty_cycle = state->duty_cycle;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled) {
+ clk_disable(pcchip->clk);
+ pcchip->clk_enabled = false;
+ }
+ return 0;
+ } else if (!pwm->state.enabled) {
+ ret = clk_enable(pcchip->clk);
+ if (ret)
+ return ret;
+ pcchip->clk_enabled = true;
+ }
+
+ /*
+ * We have to enable the clk before setting the rate and duty_cycle,
+ * that however results in a window where the clk is on with a
+ * (potentially) different setting. Also setting period and duty_cycle
+ * are two separate calls, so that probably isn't atomic either.
+ */
+
+ rate = DIV64_U64_ROUND_UP(NSEC_PER_SEC, period);
+ ret = clk_set_rate(pcchip->clk, rate);
+ if (ret)
+ return ret;
+
+ if (state->polarity == PWM_POLARITY_INVERSED)
+ duty_cycle = period - duty_cycle;
+
+ return clk_set_duty_cycle(pcchip->clk, duty_cycle, period);
+}
+
+static const struct pwm_ops pwm_clk_ops = {
+ .apply = pwm_clk_apply,
+ .owner = THIS_MODULE,
+};
+
+static int pwm_clk_probe(struct platform_device *pdev)
+{
+ struct pwm_clk_chip *pcchip;
+ int ret;
+
+ pcchip = devm_kzalloc(&pdev->dev, sizeof(*pcchip), GFP_KERNEL);
+ if (!pcchip)
+ return -ENOMEM;
+
+ pcchip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pcchip->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pcchip->clk),
+ "Failed to get clock\n");
+
+ pcchip->chip.dev = &pdev->dev;
+ pcchip->chip.ops = &pwm_clk_ops;
+ pcchip->chip.npwm = 1;
+
+ ret = clk_prepare(pcchip->clk);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to prepare clock\n");
+
+ ret = pwmchip_add(&pcchip->chip);
+ if (ret < 0) {
+ clk_unprepare(pcchip->clk);
+ return dev_err_probe(&pdev->dev, ret, "Failed to add pwm chip\n");
+ }
+
+ platform_set_drvdata(pdev, pcchip);
+ return 0;
+}
+
+static int pwm_clk_remove(struct platform_device *pdev)
+{
+ struct pwm_clk_chip *pcchip = platform_get_drvdata(pdev);
+
+ pwmchip_remove(&pcchip->chip);
+
+ if (pcchip->clk_enabled)
+ clk_disable(pcchip->clk);
+
+ clk_unprepare(pcchip->clk);
+
+ return 0;
+}
+
+static const struct of_device_id pwm_clk_dt_ids[] = {
+ { .compatible = "clk-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pwm_clk_dt_ids);
+
+static struct platform_driver pwm_clk_driver = {
+ .driver = {
+ .name = "pwm-clk",
+ .of_match_table = pwm_clk_dt_ids,
+ },
+ .probe = pwm_clk_probe,
+ .remove = pwm_clk_remove,
+};
+module_platform_driver(pwm_clk_driver);
+
+MODULE_ALIAS("platform:pwm-clk");
+MODULE_AUTHOR("Nikita Travkin <nikita@trvn.ru>");
+MODULE_DESCRIPTION("Clock based PWM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 272e0b5d01b8..763f2e3a146d 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -98,7 +98,7 @@ struct lpc18xx_pwm_chip {
unsigned long clk_rate;
unsigned int period_ns;
unsigned int min_period_ns;
- unsigned int max_period_ns;
+ u64 max_period_ns;
unsigned int period_event;
unsigned long event_map;
struct mutex res_lock;
@@ -145,40 +145,48 @@ static void lpc18xx_pwm_set_conflict_res(struct lpc18xx_pwm_chip *lpc18xx_pwm,
mutex_unlock(&lpc18xx_pwm->res_lock);
}
-static void lpc18xx_pwm_config_period(struct pwm_chip *chip, int period_ns)
+static void lpc18xx_pwm_config_period(struct pwm_chip *chip, u64 period_ns)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
- u64 val;
+ u32 val;
- val = (u64)period_ns * lpc18xx_pwm->clk_rate;
- do_div(val, NSEC_PER_SEC);
+ /*
+ * With clk_rate < NSEC_PER_SEC this cannot overflow.
+ * With period_ns < max_period_ns this also fits into an u32.
+ * As period_ns >= min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC, lpc18xx_pwm->clk_rate);
+ * we have val >= 1.
+ */
+ val = mul_u64_u64_div_u64(period_ns, lpc18xx_pwm->clk_rate, NSEC_PER_SEC);
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_MATCH(lpc18xx_pwm->period_event),
- (u32)val - 1);
+ val - 1);
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_MATCHREL(lpc18xx_pwm->period_event),
- (u32)val - 1);
+ val - 1);
}
static void lpc18xx_pwm_config_duty(struct pwm_chip *chip,
- struct pwm_device *pwm, int duty_ns)
+ struct pwm_device *pwm, u64 duty_ns)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
- u64 val;
+ u32 val;
- val = (u64)duty_ns * lpc18xx_pwm->clk_rate;
- do_div(val, NSEC_PER_SEC);
+ /*
+ * With clk_rate < NSEC_PER_SEC this cannot overflow.
+ * With duty_ns <= period_ns < max_period_ns this also fits into an u32.
+ */
+ val = mul_u64_u64_div_u64(duty_ns, lpc18xx_pwm->clk_rate, NSEC_PER_SEC);
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_MATCH(lpc18xx_data->duty_event),
- (u32)val);
+ val);
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_MATCHREL(lpc18xx_data->duty_event),
- (u32)val);
+ val);
}
static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -359,30 +367,35 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
return PTR_ERR(lpc18xx_pwm->base);
lpc18xx_pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
- if (IS_ERR(lpc18xx_pwm->pwm_clk)) {
- dev_err(&pdev->dev, "failed to get pwm clock\n");
- return PTR_ERR(lpc18xx_pwm->pwm_clk);
- }
+ if (IS_ERR(lpc18xx_pwm->pwm_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(lpc18xx_pwm->pwm_clk),
+ "failed to get pwm clock\n");
ret = clk_prepare_enable(lpc18xx_pwm->pwm_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not prepare or enable pwm clock\n");
lpc18xx_pwm->clk_rate = clk_get_rate(lpc18xx_pwm->pwm_clk);
if (!lpc18xx_pwm->clk_rate) {
- dev_err(&pdev->dev, "pwm clock has no frequency\n");
- ret = -EINVAL;
+ ret = dev_err_probe(&pdev->dev,
+ -EINVAL, "pwm clock has no frequency\n");
+ goto disable_pwmclk;
+ }
+
+ /*
+ * If clkrate is too fast, the calculations in .apply() might overflow.
+ */
+ if (lpc18xx_pwm->clk_rate > NSEC_PER_SEC) {
+ ret = dev_err_probe(&pdev->dev, -EINVAL, "pwm clock to fast\n");
goto disable_pwmclk;
}
mutex_init(&lpc18xx_pwm->res_lock);
mutex_init(&lpc18xx_pwm->period_lock);
- val = (u64)NSEC_PER_SEC * LPC18XX_PWM_TIMER_MAX;
- do_div(val, lpc18xx_pwm->clk_rate);
- lpc18xx_pwm->max_period_ns = val;
+ lpc18xx_pwm->max_period_ns =
+ mul_u64_u64_div_u64(NSEC_PER_SEC, LPC18XX_PWM_TIMER_MAX, lpc18xx_pwm->clk_rate);
lpc18xx_pwm->min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC,
lpc18xx_pwm->clk_rate);
@@ -423,7 +436,7 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&lpc18xx_pwm->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "pwmchip_add failed\n");
goto disable_pwmclk;
}
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index d28c0874c7f2..6901a44dc428 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -323,6 +323,12 @@ static const struct pwm_mediatek_of_data mt8183_pwm_data = {
.has_ck_26m_sel = true,
};
+static const struct pwm_mediatek_of_data mt8365_pwm_data = {
+ .num_pwms = 3,
+ .pwm45_fixup = false,
+ .has_ck_26m_sel = true,
+};
+
static const struct pwm_mediatek_of_data mt8516_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = false,
@@ -337,6 +343,7 @@ static const struct of_device_id pwm_mediatek_of_match[] = {
{ .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
{ .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data },
{ .compatible = "mediatek,mt8183-pwm", .data = &mt8183_pwm_data },
+ { .compatible = "mediatek,mt8365-pwm", .data = &mt8365_pwm_data },
{ .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data },
{ },
};
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index e6d05a329002..2d4fa5e5fdd4 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -23,7 +23,7 @@
#define PWM_SIFIVE_PWMCFG 0x0
#define PWM_SIFIVE_PWMCOUNT 0x8
#define PWM_SIFIVE_PWMS 0x10
-#define PWM_SIFIVE_PWMCMP0 0x20
+#define PWM_SIFIVE_PWMCMP(i) (0x20 + 4 * (i))
/* PWMCFG fields */
#define PWM_SIFIVE_PWMCFG_SCALE GENMASK(3, 0)
@@ -36,14 +36,12 @@
#define PWM_SIFIVE_PWMCFG_GANG BIT(24)
#define PWM_SIFIVE_PWMCFG_IP BIT(28)
-/* PWM_SIFIVE_SIZE_PWMCMP is used to calculate offset for pwmcmpX registers */
-#define PWM_SIFIVE_SIZE_PWMCMP 4
#define PWM_SIFIVE_CMPWIDTH 16
#define PWM_SIFIVE_DEFAULT_PERIOD 10000000
struct pwm_sifive_ddata {
struct pwm_chip chip;
- struct mutex lock; /* lock to protect user_count */
+ struct mutex lock; /* lock to protect user_count and approx_period */
struct notifier_block notifier;
struct clk *clk;
void __iomem *regs;
@@ -78,6 +76,7 @@ static void pwm_sifive_free(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_unlock(&ddata->lock);
}
+/* Called holding ddata->lock */
static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata,
unsigned long rate)
{
@@ -112,8 +111,7 @@ static void pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
u32 duty, val;
- duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP0 +
- pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP);
+ duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm));
state->enabled = duty > 0;
@@ -127,24 +125,6 @@ static void pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->polarity = PWM_POLARITY_INVERSED;
}
-static int pwm_sifive_enable(struct pwm_chip *chip, bool enable)
-{
- struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
- int ret;
-
- if (enable) {
- ret = clk_enable(ddata->clk);
- if (ret) {
- dev_err(ddata->chip.dev, "Enable clk failed\n");
- return ret;
- }
- } else {
- clk_disable(ddata->clk);
- }
-
- return 0;
-}
-
static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
@@ -159,13 +139,6 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->polarity != PWM_POLARITY_INVERSED)
return -EINVAL;
- ret = clk_enable(ddata->clk);
- if (ret) {
- dev_err(ddata->chip.dev, "Enable clk failed\n");
- return ret;
- }
-
- mutex_lock(&ddata->lock);
cur_state = pwm->state;
enabled = cur_state.enabled;
@@ -184,25 +157,36 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
/* The hardware cannot generate a 100% duty cycle */
frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1);
+ mutex_lock(&ddata->lock);
if (state->period != ddata->approx_period) {
if (ddata->user_count != 1) {
- ret = -EBUSY;
- goto exit;
+ mutex_unlock(&ddata->lock);
+ return -EBUSY;
}
ddata->approx_period = state->period;
pwm_sifive_update_clock(ddata, clk_get_rate(ddata->clk));
}
+ mutex_unlock(&ddata->lock);
- writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP0 +
- pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP);
+ /*
+ * If the PWM is enabled the clk is already on. So only enable it
+ * conditionally to have it on exactly once afterwards independent of
+ * the PWM state.
+ */
+ if (!enabled) {
+ ret = clk_enable(ddata->clk);
+ if (ret) {
+ dev_err(ddata->chip.dev, "Enable clk failed\n");
+ return ret;
+ }
+ }
- if (state->enabled != enabled)
- pwm_sifive_enable(chip, state->enabled);
+ writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm));
-exit:
- clk_disable(ddata->clk);
- mutex_unlock(&ddata->lock);
- return ret;
+ if (!state->enabled)
+ clk_disable(ddata->clk);
+
+ return 0;
}
static const struct pwm_ops pwm_sifive_ops = {
@@ -232,6 +216,8 @@ static int pwm_sifive_probe(struct platform_device *pdev)
struct pwm_sifive_ddata *ddata;
struct pwm_chip *chip;
int ret;
+ u32 val;
+ unsigned int enabled_pwms = 0, enabled_clks = 1;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
@@ -258,6 +244,33 @@ static int pwm_sifive_probe(struct platform_device *pdev)
return ret;
}
+ val = readl(ddata->regs + PWM_SIFIVE_PWMCFG);
+ if (val & PWM_SIFIVE_PWMCFG_EN_ALWAYS) {
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; ++i) {
+ val = readl(ddata->regs + PWM_SIFIVE_PWMCMP(i));
+ if (val > 0)
+ ++enabled_pwms;
+ }
+ }
+
+ /* The clk should be on once for each running PWM. */
+ if (enabled_pwms) {
+ while (enabled_clks < enabled_pwms) {
+ /* This is not expected to fail as the clk is already on */
+ ret = clk_enable(ddata->clk);
+ if (unlikely(ret)) {
+ dev_err_probe(dev, ret, "Failed to enable clk\n");
+ goto disable_clk;
+ }
+ ++enabled_clks;
+ }
+ } else {
+ clk_disable(ddata->clk);
+ enabled_clks = 0;
+ }
+
/* Watch for changes to underlying clock frequency */
ddata->notifier.notifier_call = pwm_sifive_clock_notifier;
ret = clk_notifier_register(ddata->clk, &ddata->notifier);
@@ -280,7 +293,11 @@ static int pwm_sifive_probe(struct platform_device *pdev)
unregister_clk:
clk_notifier_unregister(ddata->clk, &ddata->notifier);
disable_clk:
- clk_disable_unprepare(ddata->clk);
+ while (enabled_clks) {
+ clk_disable(ddata->clk);
+ --enabled_clks;
+ }
+ clk_unprepare(ddata->clk);
return ret;
}
@@ -288,23 +305,19 @@ disable_clk:
static int pwm_sifive_remove(struct platform_device *dev)
{
struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev);
- bool is_enabled = false;
struct pwm_device *pwm;
int ch;
+ pwmchip_remove(&ddata->chip);
+ clk_notifier_unregister(ddata->clk, &ddata->notifier);
+
for (ch = 0; ch < ddata->chip.npwm; ch++) {
pwm = &ddata->chip.pwms[ch];
- if (pwm->state.enabled) {
- is_enabled = true;
- break;
- }
+ if (pwm->state.enabled)
+ clk_disable(ddata->clk);
}
- if (is_enabled)
- clk_disable(ddata->clk);
- clk_disable_unprepare(ddata->clk);
- pwmchip_remove(&ddata->chip);
- clk_notifier_unregister(ddata->clk, &ddata->notifier);
+ clk_unprepare(ddata->clk);
return 0;
}
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index ed0b63dd38f1..8fb84b441853 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -7,6 +7,22 @@
*
* This driver is a complete rewrite of the former pwm-twl6030.c authorded by:
* Hemanth V <hemanthv@ti.com>
+ *
+ * Reference manual for the twl6030 is available at:
+ * https://www.ti.com/lit/ds/symlink/twl6030.pdf
+ *
+ * Limitations:
+ * - The twl6030 hardware only supports two period lengths (128 clock ticks and
+ * 64 clock ticks), the driver only uses 128 ticks
+ * - The hardware doesn't support ON = 0, so the active part of a period doesn't
+ * start at its beginning.
+ * - The hardware could support inverted polarity (with a similar limitation as
+ * for normal: the last clock tick is always inactive).
+ * - The hardware emits a constant low output when disabled.
+ * - A request for .duty_cycle = 0 results in an output wave with one active
+ * clock tick per period. This should better use the disabled state.
+ * - The driver only implements setting the relative duty cycle.
+ * - The driver doesn't implement .get_state().
*/
#include <linux/module.h>
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index cbe0f96ca342..23e3e4a35cc9 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -546,6 +546,16 @@ config REGULATOR_MAX1586
regulator via I2C bus. The provided regulator is suitable
for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX597X
+ tristate "Maxim 597x power switch and monitor"
+ depends on I2C
+ depends on OF
+ depends on MFD_MAX597X
+ help
+ This driver controls a Maxim 5970/5978 switch via I2C bus.
+ The MAX5970/5978 is a smart switch with no output regulation, but
+ fault protection and voltage and current monitoring capabilities.
+
config REGULATOR_MAX77620
tristate "Maxim 77620/MAX20024 voltage regulator"
depends on MFD_MAX77620 || COMPILE_TEST
@@ -804,6 +814,14 @@ config REGULATOR_MT6360
2-channel buck with Thermal Shutdown and Overload Protection
6-channel High PSRR and Low Dropout LDO.
+config REGULATOR_MT6370
+ tristate "MT6370 SubPMIC Regulator"
+ depends on MFD_MT6370
+ help
+ Say Y here to enable MT6370 regulator support.
+ This driver supports the control for DisplayBias voltages and one
+ general purpose LDO which is commonly used to drive the vibrator.
+
config REGULATOR_MT6380
tristate "MediaTek MT6380 PMIC"
depends on MTK_PMIC_WRAP
@@ -1047,6 +1065,16 @@ config REGULATOR_RT5033
RT5033 PMIC. The device supports multiple regulators like
current source, LDO and Buck.
+config REGULATOR_RT5120
+ tristate "Richtek RT5120 PMIC Regulators"
+ depends on MFD_RT5120
+ help
+ This adds support for voltage regulator in Richtek RT5120 PMIC.
+ It integrates 4 channels buck controller, 1 channel LDO, 1 EXTEN
+ to control external power source. Only BUCK1 is adjustable from
+ 600mV to 1395mV, per step 6.250mV. The others are all fixed voltage
+ by external hardware circuit.
+
config REGULATOR_RT5190A
tristate "Richtek RT5190A PMIC"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 8d3ee8b6d41d..fa49bb6cc544 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o
obj-$(CONFIG_REGULATOR_LTC3676) += ltc3676.o
obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
+obj-$(CONFIG_REGULATOR_MAX597X) += max597x-regulator.o
obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
@@ -97,6 +98,7 @@ obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
obj-$(CONFIG_REGULATOR_MT6359) += mt6359-regulator.o
obj-$(CONFIG_REGULATOR_MT6360) += mt6360-regulator.o
+obj-$(CONFIG_REGULATOR_MT6370) += mt6370-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_MTK_DVFSRC) += mtk-dvfsrc-regulator.o
@@ -126,6 +128,7 @@ obj-$(CONFIG_REGULATOR_ROHM) += rohm-regulator.o
obj-$(CONFIG_REGULATOR_RT4801) += rt4801-regulator.o
obj-$(CONFIG_REGULATOR_RT4831) += rt4831-regulator.o
obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
+obj-$(CONFIG_REGULATOR_RT5120) += rt5120-regulator.o
obj-$(CONFIG_REGULATOR_RT5190A) += rt5190a-regulator.o
obj-$(CONFIG_REGULATOR_RT5759) += rt5759-regulator.o
obj-$(CONFIG_REGULATOR_RT6160) += rt6160-regulator.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 1e54a833f2cf..d8373cb04f90 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1565,6 +1565,9 @@ static int set_machine_constraints(struct regulator_dev *rdev)
rdev->constraints->always_on = true;
}
+ if (rdev->desc->off_on_delay)
+ rdev->last_off = ktime_get();
+
/* If the constraints say the regulator should be on at this point
* and we have control then make sure it is enabled.
*/
@@ -1592,8 +1595,6 @@ static int set_machine_constraints(struct regulator_dev *rdev)
if (rdev->constraints->always_on)
rdev->use_count++;
- } else if (rdev->desc->off_on_delay) {
- rdev->last_off = ktime_get();
}
print_constraints(rdev);
@@ -4783,22 +4784,26 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = regulator_get(dev,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
- ret = PTR_ERR(consumers[i].consumer);
+ ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
+ "Failed to get supply '%s'",
+ consumers[i].supply);
consumers[i].consumer = NULL;
goto err;
}
+
+ if (consumers[i].init_load_uA > 0) {
+ ret = regulator_set_load(consumers[i].consumer,
+ consumers[i].init_load_uA);
+ if (ret) {
+ i++;
+ goto err;
+ }
+ }
}
return 0;
err:
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get supply '%s': %pe\n",
- consumers[i].supply, ERR_PTR(ret));
- else
- dev_dbg(dev, "Failed to get supply '%s', deferring\n",
- consumers[i].supply);
-
while (--i >= 0)
regulator_put(consumers[i].consumer);
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index 79b3eb3222c6..b0c225d98631 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Motorola CPCAP PMIC regulator driver
*
@@ -6,15 +7,6 @@
*
* Rewritten for mainline kernel to use device tree and regmap
* Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/err.h>
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index c4754f3cf233..1591636f86c3 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -22,36 +22,6 @@ struct cros_ec_regulator_data {
u16 num_voltages;
};
-static int cros_ec_cmd(struct cros_ec_device *ec, u32 version, u32 command,
- void *outdata, u32 outsize, void *indata, u32 insize)
-{
- struct cros_ec_command *msg;
- int ret;
-
- msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->version = version;
- msg->command = command;
- msg->outsize = outsize;
- msg->insize = insize;
-
- if (outdata && outsize > 0)
- memcpy(msg->data, outdata, outsize);
-
- ret = cros_ec_cmd_xfer_status(ec, msg);
- if (ret < 0)
- goto cleanup;
-
- if (insize)
- memcpy(indata, msg->data, insize);
-
-cleanup:
- kfree(msg);
- return ret;
-}
-
static int cros_ec_regulator_enable(struct regulator_dev *dev)
{
struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
@@ -61,7 +31,7 @@ static int cros_ec_regulator_enable(struct regulator_dev *dev)
};
return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
- sizeof(cmd), NULL, 0);
+ sizeof(cmd), NULL, 0);
}
static int cros_ec_regulator_disable(struct regulator_dev *dev)
@@ -73,7 +43,7 @@ static int cros_ec_regulator_disable(struct regulator_dev *dev)
};
return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
- sizeof(cmd), NULL, 0);
+ sizeof(cmd), NULL, 0);
}
static int cros_ec_regulator_is_enabled(struct regulator_dev *dev)
@@ -161,7 +131,7 @@ static int cros_ec_regulator_init_info(struct device *dev,
int ret;
ret = cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_GET_INFO, &cmd,
- sizeof(cmd), &resp, sizeof(resp));
+ sizeof(cmd), &resp, sizeof(resp));
if (ret < 0)
return ret;
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 9113233f41cd..32823a87fd40 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -166,6 +166,34 @@ int devm_regulator_bulk_get(struct device *dev, int num_consumers,
}
EXPORT_SYMBOL_GPL(devm_regulator_bulk_get);
+/**
+ * devm_regulator_bulk_get_const - devm_regulator_bulk_get() w/ const data
+ *
+ * @dev: device to supply
+ * @num_consumers: number of consumers to register
+ * @in_consumers: const configuration of consumers
+ * @out_consumers: in_consumers is copied here and this is passed to
+ * devm_regulator_bulk_get().
+ *
+ * This is a convenience function to allow bulk regulator configuration
+ * to be stored "static const" in files.
+ *
+ * Return: 0 on success, an errno on failure.
+ */
+int devm_regulator_bulk_get_const(struct device *dev, int num_consumers,
+ const struct regulator_bulk_data *in_consumers,
+ struct regulator_bulk_data **out_consumers)
+{
+ *out_consumers = devm_kmemdup(dev, in_consumers,
+ num_consumers * sizeof(*in_consumers),
+ GFP_KERNEL);
+ if (*out_consumers == NULL)
+ return -ENOMEM;
+
+ return devm_regulator_bulk_get(dev, num_consumers, *out_consumers);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_const);
+
static void devm_rdev_release(struct device *dev, void *res)
{
regulator_unregister(*(struct regulator_dev **)res);
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 6f28bba81d13..591a64e1ca61 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* isl6271a-regulator.c
*
* Support for Intersil ISL6271A voltage regulator
*
* Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
index c38387e0fbb2..d6e597922cb5 100644
--- a/drivers/regulator/lp873x-regulator.c
+++ b/drivers/regulator/lp873x-regulator.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulator driver for LP873X PMIC
*
* Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
*/
#include <linux/module.h>
diff --git a/drivers/regulator/max597x-regulator.c b/drivers/regulator/max597x-regulator.c
new file mode 100644
index 000000000000..03c6027682d8
--- /dev/null
+++ b/drivers/regulator/max597x-regulator.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device driver for regulators in MAX5970 and MAX5978 IC
+ *
+ * Copyright (c) 2022 9elements GmbH
+ *
+ * Author: Patrick Rudolph <patrick.rudolph@9elements.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/max597x.h>
+
+struct max597x_regulator {
+ int num_switches, mon_rng, irng, shunt_micro_ohms, lim_uA;
+ struct regmap *regmap;
+};
+
+enum max597x_regulator_id {
+ MAX597X_SW0,
+ MAX597X_SW1,
+};
+
+static int max597x_uvp_ovp_check_mode(struct regulator_dev *rdev, int severity)
+{
+ int ret, reg;
+
+ /* Status1 register contains the soft strap values sampled at POR */
+ ret = regmap_read(rdev->regmap, MAX5970_REG_STATUS1, &reg);
+ if (ret)
+ return ret;
+
+ /* Check soft straps match requested mode */
+ if (severity == REGULATOR_SEVERITY_PROT) {
+ if (STATUS1_PROT(reg) != STATUS1_PROT_SHUTDOWN)
+ return -EOPNOTSUPP;
+
+ return 0;
+ }
+ if (STATUS1_PROT(reg) == STATUS1_PROT_SHUTDOWN)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int max597x_set_vp(struct regulator_dev *rdev, int lim_uV, int severity,
+ bool enable, bool overvoltage)
+{
+ int off_h, off_l, reg, ret;
+ struct max597x_regulator *data = rdev_get_drvdata(rdev);
+ int channel = rdev_get_id(rdev);
+
+ if (overvoltage) {
+ if (severity == REGULATOR_SEVERITY_WARN) {
+ off_h = MAX5970_REG_CH_OV_WARN_H(channel);
+ off_l = MAX5970_REG_CH_OV_WARN_L(channel);
+ } else {
+ off_h = MAX5970_REG_CH_OV_CRIT_H(channel);
+ off_l = MAX5970_REG_CH_OV_CRIT_L(channel);
+ }
+ } else {
+ if (severity == REGULATOR_SEVERITY_WARN) {
+ off_h = MAX5970_REG_CH_UV_WARN_H(channel);
+ off_l = MAX5970_REG_CH_UV_WARN_L(channel);
+ } else {
+ off_h = MAX5970_REG_CH_UV_CRIT_H(channel);
+ off_l = MAX5970_REG_CH_UV_CRIT_L(channel);
+ }
+ }
+
+ if (enable)
+ /* reg = ADC_MASK * (lim_uV / 1000000) / (data->mon_rng / 1000000) */
+ reg = ADC_MASK * lim_uV / data->mon_rng;
+ else
+ reg = 0;
+
+ ret = regmap_write(rdev->regmap, off_h, MAX5970_VAL2REG_H(reg));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rdev->regmap, off_l, MAX5970_VAL2REG_L(reg));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int max597x_set_uvp(struct regulator_dev *rdev, int lim_uV, int severity,
+ bool enable)
+{
+ int ret;
+
+ /*
+ * MAX5970 has enable control as a special value in limit reg. Can't
+ * set limit but keep feature disabled or enable W/O given limit.
+ */
+ if ((lim_uV && !enable) || (!lim_uV && enable))
+ return -EINVAL;
+
+ ret = max597x_uvp_ovp_check_mode(rdev, severity);
+ if (ret)
+ return ret;
+
+ return max597x_set_vp(rdev, lim_uV, severity, enable, false);
+}
+
+static int max597x_set_ovp(struct regulator_dev *rdev, int lim_uV, int severity,
+ bool enable)
+{
+ int ret;
+
+ /*
+ * MAX5970 has enable control as a special value in limit reg. Can't
+ * set limit but keep feature disabled or enable W/O given limit.
+ */
+ if ((lim_uV && !enable) || (!lim_uV && enable))
+ return -EINVAL;
+
+ ret = max597x_uvp_ovp_check_mode(rdev, severity);
+ if (ret)
+ return ret;
+
+ return max597x_set_vp(rdev, lim_uV, severity, enable, true);
+}
+
+static int max597x_set_ocp(struct regulator_dev *rdev, int lim_uA,
+ int severity, bool enable)
+{
+ int ret, val, reg;
+ unsigned int vthst, vthfst;
+
+ struct max597x_regulator *data = rdev_get_drvdata(rdev);
+ int rdev_id = rdev_get_id(rdev);
+ /*
+ * MAX5970 doesn't has enable control for ocp.
+ * If limit is specified but enable is not set then hold the value in
+ * variable & later use it when ocp needs to be enabled.
+ */
+ if (lim_uA != 0 && lim_uA != data->lim_uA)
+ data->lim_uA = lim_uA;
+
+ if (severity != REGULATOR_SEVERITY_PROT)
+ return -EINVAL;
+
+ if (enable) {
+
+ /* Calc Vtrip threshold in uV. */
+ vthst =
+ div_u64(mul_u32_u32(data->shunt_micro_ohms, data->lim_uA),
+ 1000000);
+
+ /*
+ * As recommended in datasheed, add 20% margin to avoid
+ * spurious event & passive component tolerance.
+ */
+ vthst = div_u64(mul_u32_u32(vthst, 120), 100);
+
+ /* Calc fast Vtrip threshold in uV */
+ vthfst = vthst * (MAX5970_FAST2SLOW_RATIO / 100);
+
+ if (vthfst > data->irng) {
+ dev_err(&rdev->dev, "Current limit out of range\n");
+ return -EINVAL;
+ }
+ /* Fast trip threshold to be programmed */
+ val = div_u64(mul_u32_u32(0xFF, vthfst), data->irng);
+ } else
+ /*
+ * Since there is no option to disable ocp, set limit to max
+ * value
+ */
+ val = 0xFF;
+
+ reg = MAX5970_REG_DAC_FAST(rdev_id);
+ ret = regmap_write(rdev->regmap, reg, val);
+
+ return ret;
+}
+
+static int max597x_get_status(struct regulator_dev *rdev)
+{
+ int val, ret;
+
+ ret = regmap_read(rdev->regmap, MAX5970_REG_STATUS3, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ if (val & MAX5970_STATUS3_ALERT)
+ return REGULATOR_STATUS_ERROR;
+
+ ret = regulator_is_enabled_regmap(rdev);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return REGULATOR_STATUS_ON;
+
+ return REGULATOR_STATUS_OFF;
+}
+
+static const struct regulator_ops max597x_switch_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = max597x_get_status,
+ .set_over_voltage_protection = max597x_set_ovp,
+ .set_under_voltage_protection = max597x_set_uvp,
+ .set_over_current_protection = max597x_set_ocp,
+};
+
+static int max597x_dt_parse(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct max597x_regulator *data = cfg->driver_data;
+ int ret = 0;
+
+ ret =
+ of_property_read_u32(np, "shunt-resistor-micro-ohms",
+ &data->shunt_micro_ohms);
+ if (ret < 0)
+ dev_err(cfg->dev,
+ "property 'shunt-resistor-micro-ohms' not found, err %d\n",
+ ret);
+ return ret;
+
+}
+
+#define MAX597X_SWITCH(_ID, _ereg, _chan, _supply) { \
+ .name = #_ID, \
+ .of_match = of_match_ptr(#_ID), \
+ .ops = &max597x_switch_ops, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MAX597X_##_ID, \
+ .owner = THIS_MODULE, \
+ .supply_name = _supply, \
+ .enable_reg = _ereg, \
+ .enable_mask = CHXEN((_chan)), \
+ .of_parse_cb = max597x_dt_parse, \
+}
+
+static const struct regulator_desc regulators[] = {
+ MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"),
+ MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"),
+};
+
+static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(map, reg, val);
+ if (ret)
+ return ret;
+
+ if (*val)
+ return regmap_write(map, reg, *val);
+
+ return 0;
+}
+
+static int max597x_irq_handler(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ struct regulator_err_state *stat;
+ struct max597x_regulator *d = (struct max597x_regulator *)rid->data;
+ int val, ret, i;
+
+ ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT0, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ *dev_mask = 0;
+ for (i = 0; i < d->num_switches; i++) {
+ stat = &rid->states[i];
+ stat->notifs = 0;
+ stat->errors = 0;
+ }
+
+ for (i = 0; i < d->num_switches; i++) {
+ stat = &rid->states[i];
+
+ if (val & UV_STATUS_CRIT(i)) {
+ *dev_mask |= 1 << i;
+ stat->notifs |= REGULATOR_EVENT_UNDER_VOLTAGE;
+ stat->errors |= REGULATOR_ERROR_UNDER_VOLTAGE;
+ } else if (val & UV_STATUS_WARN(i)) {
+ *dev_mask |= 1 << i;
+ stat->notifs |= REGULATOR_EVENT_UNDER_VOLTAGE_WARN;
+ stat->errors |= REGULATOR_ERROR_UNDER_VOLTAGE_WARN;
+ }
+ }
+
+ ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT1, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ for (i = 0; i < d->num_switches; i++) {
+ stat = &rid->states[i];
+
+ if (val & OV_STATUS_CRIT(i)) {
+ *dev_mask |= 1 << i;
+ stat->notifs |= REGULATOR_EVENT_REGULATION_OUT;
+ stat->errors |= REGULATOR_ERROR_REGULATION_OUT;
+ } else if (val & OV_STATUS_WARN(i)) {
+ *dev_mask |= 1 << i;
+ stat->notifs |= REGULATOR_EVENT_OVER_VOLTAGE_WARN;
+ stat->errors |= REGULATOR_ERROR_OVER_VOLTAGE_WARN;
+ }
+ }
+
+ ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT2, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ for (i = 0; i < d->num_switches; i++) {
+ stat = &rid->states[i];
+
+ if (val & OC_STATUS_WARN(i)) {
+ *dev_mask |= 1 << i;
+ stat->notifs |= REGULATOR_EVENT_OVER_CURRENT_WARN;
+ stat->errors |= REGULATOR_ERROR_OVER_CURRENT_WARN;
+ }
+ }
+
+ ret = regmap_read(d->regmap, MAX5970_REG_STATUS0, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ for (i = 0; i < d->num_switches; i++) {
+ stat = &rid->states[i];
+
+ if ((val & MAX5970_CB_IFAULTF(i))
+ || (val & MAX5970_CB_IFAULTS(i))) {
+ *dev_mask |= 1 << i;
+ stat->notifs |=
+ REGULATOR_EVENT_OVER_CURRENT |
+ REGULATOR_EVENT_DISABLE;
+ stat->errors |=
+ REGULATOR_ERROR_OVER_CURRENT | REGULATOR_ERROR_FAIL;
+
+ /* Clear the sub-IRQ status */
+ regulator_disable_regmap(stat->rdev);
+ }
+ }
+ return 0;
+}
+
+static const struct regmap_config max597x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX_REGISTERS,
+};
+
+static int max597x_adc_range(struct regmap *regmap, const int ch,
+ u32 *irng, u32 *mon_rng)
+{
+ unsigned int reg;
+ int ret;
+
+ /* Decode current ADC range */
+ ret = regmap_read(regmap, MAX5970_REG_STATUS2, &reg);
+ if (ret)
+ return ret;
+ switch (MAX5970_IRNG(reg, ch)) {
+ case 0:
+ *irng = 100000; /* 100 mV */
+ break;
+ case 1:
+ *irng = 50000; /* 50 mV */
+ break;
+ case 2:
+ *irng = 25000; /* 25 mV */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Decode current voltage monitor range */
+ ret = regmap_read(regmap, MAX5970_REG_MON_RANGE, &reg);
+ if (ret)
+ return ret;
+
+ *mon_rng = MAX5970_MON_MAX_RANGE_UV >> MAX5970_MON(reg, ch);
+
+ return 0;
+}
+
+static int max597x_setup_irq(struct device *dev,
+ int irq,
+ struct regulator_dev *rdevs[MAX5970_NUM_SWITCHES],
+ int num_switches, struct max597x_regulator *data)
+{
+ struct regulator_irq_desc max597x_notif = {
+ .name = "max597x-irq",
+ .map_event = max597x_irq_handler,
+ .data = data,
+ };
+ int errs = REGULATOR_ERROR_UNDER_VOLTAGE |
+ REGULATOR_ERROR_UNDER_VOLTAGE_WARN |
+ REGULATOR_ERROR_OVER_VOLTAGE_WARN |
+ REGULATOR_ERROR_REGULATION_OUT |
+ REGULATOR_ERROR_OVER_CURRENT |
+ REGULATOR_ERROR_OVER_CURRENT_WARN | REGULATOR_ERROR_FAIL;
+ void *irq_helper;
+
+ /* Register notifiers - can fail if IRQ is not given */
+ irq_helper = devm_regulator_irq_helper(dev, &max597x_notif,
+ irq, 0, errs, NULL,
+ &rdevs[0], num_switches);
+ if (IS_ERR(irq_helper)) {
+ if (PTR_ERR(irq_helper) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(dev, "IRQ disabled %pe\n", irq_helper);
+ }
+
+ return 0;
+}
+
+static int max597x_regulator_probe(struct platform_device *pdev)
+{
+
+
+ struct max597x_data *max597x = dev_get_drvdata(pdev->dev.parent);
+ struct max597x_regulator *data;
+
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct regulator_dev *rdevs[MAX5970_NUM_SWITCHES];
+ int num_switches = max597x->num_switches;
+ int ret, i;
+
+ for (i = 0; i < num_switches; i++) {
+ data =
+ devm_kzalloc(max597x->dev, sizeof(struct max597x_regulator),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->num_switches = num_switches;
+ data->regmap = max597x->regmap;
+
+ ret = max597x_adc_range(data->regmap, i, &max597x->irng[i], &max597x->mon_rng[i]);
+ if (ret < 0)
+ return ret;
+
+ data->irng = max597x->irng[i];
+ data->mon_rng = max597x->mon_rng[i];
+
+ config.dev = max597x->dev;
+ config.driver_data = (void *)data;
+ config.regmap = data->regmap;
+ rdev = devm_regulator_register(max597x->dev,
+ &regulators[i], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(max597x->dev, "failed to register regulator %s\n",
+ regulators[i].name);
+ return PTR_ERR(rdev);
+ }
+ rdevs[i] = rdev;
+ max597x->shunt_micro_ohms[i] = data->shunt_micro_ohms;
+ }
+
+ if (max597x->irq) {
+ ret =
+ max597x_setup_irq(max597x->dev, max597x->irq, rdevs, num_switches,
+ data);
+ if (ret) {
+ dev_err(max597x->dev, "IRQ setup failed");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static struct platform_driver max597x_regulator_driver = {
+ .driver = {
+ .name = "max597x-regulator",
+ },
+ .probe = max597x_regulator_probe,
+};
+
+module_platform_driver(max597x_regulator_driver);
+
+
+MODULE_AUTHOR("Patrick Rudolph <patrick.rudolph@9elements.com>");
+MODULE_DESCRIPTION("MAX5970_hot-swap controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index cb7e50003f70..fdcb0f508984 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* max8973-regulator.c -- Maxim max8973A
*
@@ -6,20 +7,6 @@
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#include <linux/kernel.h>
diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c
index 39cebec0edb6..82892d71c2c9 100644
--- a/drivers/regulator/mp5416.c
+++ b/drivers/regulator/mp5416.c
@@ -6,14 +6,14 @@
//
// Author: Saravanan Sekar <sravanhome@gmail.com>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
-#include <linux/i2c.h>
#define MP5416_REG_CTL0 0x00
#define MP5416_REG_CTL1 0x01
@@ -174,10 +174,22 @@ static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
MP5416LDO("ldo4", 4, BIT(1)),
};
+static struct regulator_desc mp5496_regulators_desc[MP5416_MAX_REGULATORS] = {
+ MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1),
+ MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 1),
+ MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1),
+ MP5416BUCK("buck4", 4, mp5416_I_limits2, MP5416_REG_CTL2, BIT(5), 1),
+ MP5416LDO("ldo1", 1, BIT(4)),
+ MP5416LDO("ldo2", 2, BIT(3)),
+ MP5416LDO("ldo3", 3, BIT(2)),
+ MP5416LDO("ldo4", 4, BIT(1)),
+};
+
static int mp5416_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct regulator_config config = { NULL, };
+ static const struct regulator_desc *desc;
struct regulator_dev *rdev;
struct regmap *regmap;
int i;
@@ -188,12 +200,16 @@ static int mp5416_i2c_probe(struct i2c_client *client)
return PTR_ERR(regmap);
}
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -ENODEV;
+
config.dev = dev;
config.regmap = regmap;
for (i = 0; i < MP5416_MAX_REGULATORS; i++) {
rdev = devm_regulator_register(dev,
- &mp5416_regulators_desc[i],
+ &desc[i],
&config);
if (IS_ERR(rdev)) {
dev_err(dev, "Failed to register regulator!\n");
@@ -205,13 +221,15 @@ static int mp5416_i2c_probe(struct i2c_client *client)
}
static const struct of_device_id mp5416_of_match[] = {
- { .compatible = "mps,mp5416" },
+ { .compatible = "mps,mp5416", .data = &mp5416_regulators_desc },
+ { .compatible = "mps,mp5496", .data = &mp5496_regulators_desc },
{},
};
MODULE_DEVICE_TABLE(of, mp5416_of_match);
static const struct i2c_device_id mp5416_id[] = {
{ "mp5416", },
+ { "mp5496", },
{ },
};
MODULE_DEVICE_TABLE(i2c, mp5416_id);
diff --git a/drivers/regulator/mt6370-regulator.c b/drivers/regulator/mt6370-regulator.c
new file mode 100644
index 000000000000..e73f5a46cb9a
--- /dev/null
+++ b/drivers/regulator/mt6370-regulator.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bits.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+enum {
+ MT6370_IDX_DSVBOOST = 0,
+ MT6370_IDX_DSVPOS,
+ MT6370_IDX_DSVNEG,
+ MT6370_IDX_VIBLDO,
+ MT6370_MAX_IDX
+};
+
+#define MT6370_REG_LDO_CFG 0x180
+#define MT6370_REG_LDO_VOUT 0x181
+#define MT6370_REG_DB_CTRL1 0x1B0
+#define MT6370_REG_DB_CTRL2 0x1B1
+#define MT6370_REG_DB_VBST 0x1B2
+#define MT6370_REG_DB_VPOS 0x1B3
+#define MT6370_REG_DB_VNEG 0x1B4
+#define MT6370_REG_LDO_STAT 0x1DC
+#define MT6370_REG_DB_STAT 0x1DF
+
+#define MT6370_LDOOMS_MASK BIT(7)
+#define MT6370_LDOEN_MASK BIT(7)
+#define MT6370_LDOVOUT_MASK GENMASK(3, 0)
+#define MT6370_DBPERD_MASK (BIT(7) | BIT(4))
+#define MT6370_DBEXTEN_MASK BIT(0)
+#define MT6370_DBVPOSEN_MASK BIT(6)
+#define MT6370_DBVPOSDISG_MASK BIT(5)
+#define MT6370_DBVNEGEN_MASK BIT(3)
+#define MT6370_DBVNEGDISG_MASK BIT(2)
+#define MT6370_DBALLON_MASK (MT6370_DBVPOSEN_MASK | MT6370_DBVNEGEN_MASK)
+#define MT6370_DBSLEW_MASK GENMASK(7, 6)
+#define MT6370_DBVOUT_MASK GENMASK(5, 0)
+#define MT6370_LDOOC_EVT_MASK BIT(7)
+#define MT6370_POSSCP_EVT_MASK BIT(7)
+#define MT6370_NEGSCP_EVT_MASK BIT(6)
+#define MT6370_BSTOCP_EVT_MASK BIT(5)
+#define MT6370_POSOCP_EVT_MASK BIT(4)
+#define MT6370_NEGOCP_EVT_MASK BIT(3)
+
+#define MT6370_LDO_MINUV 1600000
+#define MT6370_LDO_STPUV 200000
+#define MT6370_LDO_N_VOLT 13
+#define MT6370_DBVBOOST_MINUV 4000000
+#define MT6370_DBVBOOST_STPUV 50000
+#define MT6370_DBVBOOST_N_VOLT 45
+#define MT6370_DBVOUT_MINUV 4000000
+#define MT6370_DBVOUT_STPUV 50000
+#define MT6370_DBVOUT_N_VOLT 41
+
+struct mt6370_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_dev *rdev[MT6370_MAX_IDX];
+ bool use_external_ctrl;
+};
+
+static const unsigned int mt6370_vpos_ramp_tbl[] = { 8540, 5840, 4830, 3000 };
+static const unsigned int mt6370_vneg_ramp_tbl[] = { 10090, 6310, 5050, 3150 };
+
+static int mt6370_get_error_flags(struct regulator_dev *rdev,
+ unsigned int *flags)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int stat_reg, stat, rpt_flags = 0;
+ int rid = rdev_get_id(rdev), ret;
+
+ if (rid == MT6370_IDX_VIBLDO)
+ stat_reg = MT6370_REG_LDO_STAT;
+ else
+ stat_reg = MT6370_REG_DB_STAT;
+
+ ret = regmap_read(regmap, stat_reg, &stat);
+ if (ret)
+ return ret;
+
+ switch (rid) {
+ case MT6370_IDX_DSVBOOST:
+ if (stat & MT6370_BSTOCP_EVT_MASK)
+ rpt_flags |= REGULATOR_ERROR_OVER_CURRENT;
+ break;
+ case MT6370_IDX_DSVPOS:
+ if (stat & MT6370_POSSCP_EVT_MASK)
+ rpt_flags |= REGULATOR_ERROR_UNDER_VOLTAGE;
+
+ if (stat & MT6370_POSOCP_EVT_MASK)
+ rpt_flags |= REGULATOR_ERROR_OVER_CURRENT;
+ break;
+ case MT6370_IDX_DSVNEG:
+ if (stat & MT6370_NEGSCP_EVT_MASK)
+ rpt_flags |= REGULATOR_ERROR_UNDER_VOLTAGE;
+
+ if (stat & MT6370_NEGOCP_EVT_MASK)
+ rpt_flags |= REGULATOR_ERROR_OVER_CURRENT;
+ break;
+ default:
+ if (stat & MT6370_LDOOC_EVT_MASK)
+ rpt_flags |= REGULATOR_ERROR_OVER_CURRENT;
+ break;
+ }
+
+ *flags = rpt_flags;
+ return 0;
+}
+
+static const struct regulator_ops mt6370_dbvboost_ops = {
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_bypass = regulator_get_bypass_regmap,
+ .set_bypass = regulator_set_bypass_regmap,
+ .get_error_flags = mt6370_get_error_flags,
+};
+
+static const struct regulator_ops mt6370_dbvout_ops = {
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+ .get_error_flags = mt6370_get_error_flags,
+};
+
+static const struct regulator_ops mt6370_ldo_ops = {
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_error_flags = mt6370_get_error_flags,
+};
+
+static int mt6370_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ struct mt6370_priv *priv = config->driver_data;
+ struct gpio_desc *enable_gpio;
+ int ret;
+
+ enable_gpio = fwnode_gpiod_get_index(of_fwnode_handle(np), "enable", 0,
+ GPIOD_OUT_HIGH |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ desc->name);
+ if (IS_ERR(enable_gpio)) {
+ config->ena_gpiod = NULL;
+ return 0;
+ }
+
+ /*
+ * RG control by default
+ * Only if all are using external pin, change all by external control
+ */
+ if (priv->use_external_ctrl) {
+ ret = regmap_update_bits(priv->regmap, MT6370_REG_DB_CTRL1,
+ MT6370_DBEXTEN_MASK,
+ MT6370_DBEXTEN_MASK);
+ if (ret)
+ return ret;
+ }
+
+ config->ena_gpiod = enable_gpio;
+ priv->use_external_ctrl = true;
+ return 0;
+}
+
+static const struct regulator_desc mt6370_regulator_descs[] = {
+ {
+ .name = "mt6370-dsv-vbst",
+ .of_match = of_match_ptr("dsvbst"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = MT6370_IDX_DSVBOOST,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &mt6370_dbvboost_ops,
+ .min_uV = MT6370_DBVBOOST_MINUV,
+ .uV_step = MT6370_DBVBOOST_STPUV,
+ .n_voltages = MT6370_DBVBOOST_N_VOLT,
+ .vsel_reg = MT6370_REG_DB_VBST,
+ .vsel_mask = MT6370_DBVOUT_MASK,
+ .bypass_reg = MT6370_REG_DB_CTRL1,
+ .bypass_mask = MT6370_DBPERD_MASK,
+ .bypass_val_on = MT6370_DBPERD_MASK,
+ },
+ {
+ .name = "mt6370-dsv-vpos",
+ .of_match = of_match_ptr("dsvpos"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = MT6370_IDX_DSVPOS,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .of_parse_cb = mt6370_of_parse_cb,
+ .ops = &mt6370_dbvout_ops,
+ .min_uV = MT6370_DBVOUT_MINUV,
+ .uV_step = MT6370_DBVOUT_STPUV,
+ .n_voltages = MT6370_DBVOUT_N_VOLT,
+ .vsel_reg = MT6370_REG_DB_VPOS,
+ .vsel_mask = MT6370_DBVOUT_MASK,
+ .enable_reg = MT6370_REG_DB_CTRL2,
+ .enable_mask = MT6370_DBVPOSEN_MASK,
+ .ramp_reg = MT6370_REG_DB_VPOS,
+ .ramp_mask = MT6370_DBSLEW_MASK,
+ .ramp_delay_table = mt6370_vpos_ramp_tbl,
+ .n_ramp_values = ARRAY_SIZE(mt6370_vpos_ramp_tbl),
+ .active_discharge_reg = MT6370_REG_DB_CTRL2,
+ .active_discharge_mask = MT6370_DBVPOSDISG_MASK,
+ .active_discharge_on = MT6370_DBVPOSDISG_MASK,
+ },
+ {
+ .name = "mt6370-dsv-vneg",
+ .of_match = of_match_ptr("dsvneg"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = MT6370_IDX_DSVNEG,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .of_parse_cb = mt6370_of_parse_cb,
+ .ops = &mt6370_dbvout_ops,
+ .min_uV = MT6370_DBVOUT_MINUV,
+ .uV_step = MT6370_DBVOUT_STPUV,
+ .n_voltages = MT6370_DBVOUT_N_VOLT,
+ .vsel_reg = MT6370_REG_DB_VNEG,
+ .vsel_mask = MT6370_DBVOUT_MASK,
+ .enable_reg = MT6370_REG_DB_CTRL2,
+ .enable_mask = MT6370_DBVNEGEN_MASK,
+ .ramp_reg = MT6370_REG_DB_VNEG,
+ .ramp_mask = MT6370_DBSLEW_MASK,
+ .ramp_delay_table = mt6370_vneg_ramp_tbl,
+ .n_ramp_values = ARRAY_SIZE(mt6370_vneg_ramp_tbl),
+ .active_discharge_reg = MT6370_REG_DB_CTRL2,
+ .active_discharge_mask = MT6370_DBVNEGDISG_MASK,
+ .active_discharge_on = MT6370_DBVNEGDISG_MASK,
+ },
+ {
+ .name = "mt6370-vib-ldo",
+ .of_match = of_match_ptr("vibldo"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = MT6370_IDX_VIBLDO,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &mt6370_ldo_ops,
+ .min_uV = MT6370_LDO_MINUV,
+ .uV_step = MT6370_LDO_STPUV,
+ .n_voltages = MT6370_LDO_N_VOLT,
+ .vsel_reg = MT6370_REG_LDO_VOUT,
+ .vsel_mask = MT6370_LDOVOUT_MASK,
+ .enable_reg = MT6370_REG_LDO_VOUT,
+ .enable_mask = MT6370_LDOEN_MASK,
+ .active_discharge_reg = MT6370_REG_LDO_CFG,
+ .active_discharge_mask = MT6370_LDOOMS_MASK,
+ .active_discharge_on = MT6370_LDOOMS_MASK,
+ }
+};
+
+static irqreturn_t mt6370_scp_handler(int irq, void *data)
+{
+ struct regulator_dev *rdev = data;
+
+ regulator_notifier_call_chain(rdev, REGULATOR_EVENT_UNDER_VOLTAGE,
+ NULL);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mt6370_ocp_handler(int irq, void *data)
+{
+ struct regulator_dev *rdev = data;
+
+ regulator_notifier_call_chain(rdev, REGULATOR_EVENT_OVER_CURRENT, NULL);
+ return IRQ_HANDLED;
+}
+
+static int mt6370_regulator_irq_register(struct mt6370_priv *priv)
+{
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ static const struct {
+ const char *name;
+ int rid;
+ irq_handler_t handler;
+ } mt6370_irqs[] = {
+ { "db_vpos_scp", MT6370_IDX_DSVPOS, mt6370_scp_handler },
+ { "db_vneg_scp", MT6370_IDX_DSVNEG, mt6370_scp_handler },
+ { "db_vbst_ocp", MT6370_IDX_DSVBOOST, mt6370_ocp_handler },
+ { "db_vpos_ocp", MT6370_IDX_DSVPOS, mt6370_ocp_handler },
+ { "db_vneg_ocp", MT6370_IDX_DSVNEG, mt6370_ocp_handler },
+ { "ldo_oc", MT6370_IDX_VIBLDO, mt6370_ocp_handler }
+ };
+ struct regulator_dev *rdev;
+ int i, irq, ret;
+
+ for (i = 0; i < ARRAY_SIZE(mt6370_irqs); i++) {
+ irq = platform_get_irq_byname(pdev, mt6370_irqs[i].name);
+
+ rdev = priv->rdev[mt6370_irqs[i].rid];
+
+ ret = devm_request_threaded_irq(priv->dev, irq, NULL,
+ mt6370_irqs[i].handler, 0,
+ mt6370_irqs[i].name, rdev);
+ if (ret) {
+ dev_err(priv->dev,
+ "Failed to register (%d) interrupt\n", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mt6370_regualtor_register(struct mt6370_priv *priv)
+{
+ struct regulator_dev *rdev;
+ struct regulator_config cfg = {};
+ struct device *parent = priv->dev->parent;
+ int i;
+
+ cfg.dev = parent;
+ cfg.driver_data = priv;
+
+ for (i = 0; i < MT6370_MAX_IDX; i++) {
+ rdev = devm_regulator_register(priv->dev,
+ mt6370_regulator_descs + i,
+ &cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(priv->dev,
+ "Failed to register (%d) regulator\n", i);
+ return PTR_ERR(rdev);
+ }
+
+ priv->rdev[i] = rdev;
+ }
+
+ return 0;
+}
+
+static int mt6370_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6370_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+
+ priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!priv->regmap) {
+ dev_err(&pdev->dev, "Failed to init regmap\n");
+ return -ENODEV;
+ }
+
+ ret = mt6370_regualtor_register(priv);
+ if (ret)
+ return ret;
+
+ return mt6370_regulator_irq_register(priv);
+}
+
+static const struct platform_device_id mt6370_devid_table[] = {
+ { "mt6370-regulator", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mt6370_devid_table);
+
+static struct platform_driver mt6370_regulator_driver = {
+ .driver = {
+ .name = "mt6370-regulator",
+ },
+ .id_table = mt6370_devid_table,
+ .probe = mt6370_regulator_probe,
+};
+module_platform_driver(mt6370_regulator_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Mediatek MT6370 Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mt6380-regulator.c b/drivers/regulator/mt6380-regulator.c
index 2e6b61d3b0cf..43234296df36 100644
--- a/drivers/regulator/mt6380-regulator.c
+++ b/drivers/regulator/mt6380-regulator.c
@@ -319,7 +319,7 @@ static const struct platform_device_id mt6380_platform_ids[] = {
};
MODULE_DEVICE_TABLE(platform, mt6380_platform_ids);
-static const struct of_device_id mt6380_of_match[] = {
+static const struct of_device_id __maybe_unused mt6380_of_match[] = {
{ .compatible = "mediatek,mt6380-regulator", },
{ /* sentinel */ },
};
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index f54d4f176882..e12b681c72e5 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -264,8 +264,12 @@ static int of_get_regulation_constraints(struct device *dev,
}
suspend_np = of_get_child_by_name(np, regulator_states[i]);
- if (!suspend_np || !suspend_state)
+ if (!suspend_np)
continue;
+ if (!suspend_state) {
+ of_node_put(suspend_np);
+ continue;
+ }
if (!of_property_read_u32(suspend_np, "regulator-mode",
&pval)) {
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index ef6e47d025ca..59024c639141 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -205,6 +205,7 @@ static const struct regulator_ops rpm_mp5496_ops = {
.is_enabled = rpm_reg_is_enabled,
.list_voltage = regulator_list_voltage_linear_range,
+ .get_voltage = rpm_reg_get_voltage,
.set_voltage = rpm_reg_set_voltage,
};
@@ -357,10 +358,10 @@ static const struct regulator_desc pm8941_switch = {
static const struct regulator_desc pm8916_pldo = {
.linear_ranges = (struct linear_range[]) {
- REGULATOR_LINEAR_RANGE(750000, 0, 208, 12500),
+ REGULATOR_LINEAR_RANGE(1750000, 0, 127, 12500),
},
.n_linear_ranges = 1,
- .n_voltages = 209,
+ .n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
@@ -783,6 +784,29 @@ static const struct rpm_regulator_data rpm_pm8841_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8909_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8916_buck_lvo_smps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8916_buck_hvo_smps, "vdd_s2" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8916_nldo, "vdd_l1" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8916_nldo, "vdd_l2_l5" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8916_nldo, "vdd_l3_l6_l10" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8916_pldo, "vdd_l4_l7" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_pldo, "vdd_l2_l5" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l3_l6_l10" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l4_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8916_pldo, "vdd_l8_l11_l15_l18" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8916_pldo, "vdd_l9_l12_l14_l17" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8916_nldo, "vdd_l3_l6_l10" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l8_l11_l15_l18" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8916_pldo, "vdd_l9_l12_l14_l17" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8916_pldo, "vdd_l13" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8916_pldo, "vdd_l9_l12_l14_l17" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8916_pldo, "vdd_l8_l11_l15_l18" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8916_pldo, "vdd_l9_l12_l14_l17" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8916_pldo, "vdd_l8_l11_l15_l18" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8916_buck_lvo_smps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8916_buck_lvo_smps, "vdd_s2" },
@@ -1221,6 +1245,7 @@ static const struct rpm_regulator_data rpm_pm2250_regulators[] = {
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-mp5496-regulators", .data = &rpm_mp5496_regulators },
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
+ { .compatible = "qcom,rpm-pm8909-regulators", .data = &rpm_pm8909_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{ .compatible = "qcom,rpm-pm8226-regulators", .data = &rpm_pm8226_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 02bfce981150..a2d0292a92fd 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -164,6 +164,8 @@ enum spmi_regulator_subtype {
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL3 = 0x0f,
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL4 = 0x10,
SPMI_REGULATOR_SUBTYPE_HFS430 = 0x0a,
+ SPMI_REGULATOR_SUBTYPE_HT_P150 = 0x35,
+ SPMI_REGULATOR_SUBTYPE_HT_P600 = 0x3d,
};
enum spmi_common_regulator_registers {
@@ -544,6 +546,14 @@ static struct spmi_voltage_range hfs430_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 320000, 320000, 2040000, 2040000, 8000),
};
+static struct spmi_voltage_range ht_p150_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 1616000, 1616000, 3304000, 3304000, 8000),
+};
+
+static struct spmi_voltage_range ht_p600_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 1704000, 1704000, 1896000, 1896000, 8000),
+};
+
static DEFINE_SPMI_SET_POINTS(pldo);
static DEFINE_SPMI_SET_POINTS(nldo1);
static DEFINE_SPMI_SET_POINTS(nldo2);
@@ -564,6 +574,8 @@ static DEFINE_SPMI_SET_POINTS(nldo660);
static DEFINE_SPMI_SET_POINTS(ht_lvpldo);
static DEFINE_SPMI_SET_POINTS(ht_nldo);
static DEFINE_SPMI_SET_POINTS(hfs430);
+static DEFINE_SPMI_SET_POINTS(ht_p150);
+static DEFINE_SPMI_SET_POINTS(ht_p600);
static inline int spmi_vreg_read(struct spmi_regulator *vreg, u16 addr, u8 *buf,
int len)
@@ -1458,6 +1470,8 @@ static const struct regulator_ops spmi_hfs430_ops = {
static const struct spmi_regulator_mapping supported_regulators[] = {
/* type subtype dig_min dig_max ltype ops setpoints hpm_min */
+ SPMI_VREG(LDO, HT_P600, 0, INF, HFS430, hfs430, ht_p600, 10000),
+ SPMI_VREG(LDO, HT_P150, 0, INF, HFS430, hfs430, ht_p150, 10000),
SPMI_VREG(BUCK, GP_CTL, 0, INF, SMPS, smps, smps, 100000),
SPMI_VREG(BUCK, HFS430, 0, INF, HFS430, hfs430, hfs430, 10000),
SPMI_VREG(LDO, N300, 0, INF, LDO, ldo, nldo1, 10000),
@@ -2125,6 +2139,28 @@ static const struct spmi_regulator_data pm8005_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pmp8074_regulators[] = {
+ { "s1", 0x1400, "vdd_s1"},
+ { "s2", 0x1700, "vdd_s2"},
+ { "s3", 0x1a00, "vdd_s3"},
+ { "s4", 0x1d00, "vdd_s4"},
+ { "s5", 0x2000, "vdd_s5"},
+ { "l1", 0x4000, "vdd_l1_l2"},
+ { "l2", 0x4100, "vdd_l1_l2"},
+ { "l3", 0x4200, "vdd_l3_l8"},
+ { "l4", 0x4300, "vdd_l4"},
+ { "l5", 0x4400, "vdd_l5_l6_l15"},
+ { "l6", 0x4500, "vdd_l5_l6_l15"},
+ { "l7", 0x4600, "vdd_l7"},
+ { "l8", 0x4700, "vdd_l3_l8"},
+ { "l9", 0x4800, "vdd_l9"},
+ /* l10 is currently unsupported HT_P50 */
+ { "l11", 0x4a00, "vdd_l10_l11_l12_l13"},
+ { "l12", 0x4b00, "vdd_l10_l11_l12_l13"},
+ { "l13", 0x4c00, "vdd_l10_l11_l12_l13"},
+ { }
+};
+
static const struct spmi_regulator_data pms405_regulators[] = {
{ "s3", 0x1a00, "vdd_s3"},
{ }
@@ -2142,6 +2178,7 @@ static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
{ .compatible = "qcom,pm660-regulators", .data = &pm660_regulators },
{ .compatible = "qcom,pm660l-regulators", .data = &pm660l_regulators },
+ { .compatible = "qcom,pmp8074-regulators", .data = &pmp8074_regulators },
{ .compatible = "qcom,pms405-regulators", .data = &pms405_regulators },
{ }
};
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index fa8706a352ce..105f694a67e6 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -187,15 +187,11 @@ static int attiny_update_status(struct backlight_device *bl)
{
struct attiny_lcd *state = bl_get_data(bl);
struct regmap *regmap = state->regmap;
- int brightness = bl->props.brightness;
+ int brightness = backlight_get_brightness(bl);
int ret, i;
mutex_lock(&state->lock);
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
for (i = 0; i < 10; i++) {
ret = regmap_write(regmap, REG_PWM, brightness);
if (!ret)
diff --git a/drivers/regulator/rt5120-regulator.c b/drivers/regulator/rt5120-regulator.c
new file mode 100644
index 000000000000..8173ede09414
--- /dev/null
+++ b/drivers/regulator/rt5120-regulator.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define RT5120_REG_PGSTAT 0x03
+#define RT5120_REG_CH1VID 0x06
+#define RT5120_REG_CH1SLPVID 0x07
+#define RT5120_REG_ENABLE 0x08
+#define RT5120_REG_MODECTL 0x09
+#define RT5120_REG_UVOVPROT 0x0A
+#define RT5120_REG_SLPCTL 0x0C
+#define RT5120_REG_INTSTAT 0x1E
+#define RT5120_REG_DISCHG 0x1F
+
+#define RT5120_OUTPG_MASK(rid) BIT(rid + 1)
+#define RT5120_OUTUV_MASK(rid) BIT(rid + 9)
+#define RT5120_OUTOV_MASK(rid) BIT(rid + 16)
+#define RT5120_CH1VID_MASK GENMASK(6, 0)
+#define RT5120_RIDEN_MASK(rid) BIT(rid + 1)
+#define RT5120_RADEN_MASK(rid) BIT(rid)
+#define RT5120_FPWM_MASK(rid) BIT(rid + 1)
+#define RT5120_UVHICCUP_MASK BIT(1)
+#define RT5120_OVHICCUP_MASK BIT(0)
+#define RT5120_HOTDIE_MASK BIT(1)
+
+#define RT5120_BUCK1_MINUV 600000
+#define RT5120_BUCK1_MAXUV 1393750
+#define RT5120_BUCK1_STEPUV 6250
+#define RT5120_BUCK1_NUM_VOLT 0x80
+
+#define RT5120_AUTO_MODE 0
+#define RT5120_FPWM_MODE 1
+
+enum {
+ RT5120_REGULATOR_BUCK1 = 0,
+ RT5120_REGULATOR_BUCK2,
+ RT5120_REGULATOR_BUCK3,
+ RT5120_REGULATOR_BUCK4,
+ RT5120_REGULATOR_LDO,
+ RT5120_REGULATOR_EXTEN,
+ RT5120_MAX_REGULATOR
+};
+
+struct rt5120_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_desc rdesc[RT5120_MAX_REGULATOR];
+};
+
+static int rt5120_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int mask = RT5120_FPWM_MASK(rid), val;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ case REGULATOR_MODE_FAST:
+ val = RT5120_FPWM_MASK(rid);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(regmap, RT5120_REG_MODECTL, mask, val);
+}
+
+static unsigned int rt5120_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int ret, rid = rdev_get_id(rdev);
+ unsigned int val;
+
+ ret = regmap_read(regmap, RT5120_REG_MODECTL, &val);
+ if (ret)
+ return REGULATOR_MODE_INVALID;
+
+ if (val & RT5120_FPWM_MASK(rid))
+ return REGULATOR_MODE_FAST;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int rt5120_regulator_get_error_flags(struct regulator_dev *rdev,
+ unsigned int *flags)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int stat, hd_stat, cur_flags = 0;
+ int rid = rdev_get_id(rdev), ret;
+
+ /*
+ * reg 0x03/0x04/0x05 to indicate PG/UV/OV
+ * use block read to descrease I/O xfer time
+ */
+ ret = regmap_raw_read(regmap, RT5120_REG_PGSTAT, &stat, 3);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(regmap, RT5120_REG_INTSTAT, &hd_stat);
+ if (ret)
+ return ret;
+
+ if (!(stat & RT5120_OUTPG_MASK(rid))) {
+ if (stat & RT5120_OUTUV_MASK(rid))
+ cur_flags |= REGULATOR_ERROR_UNDER_VOLTAGE;
+
+ if (stat & RT5120_OUTOV_MASK(rid))
+ cur_flags |= REGULATOR_ERROR_REGULATION_OUT;
+ }
+
+ if (hd_stat & RT5120_HOTDIE_MASK)
+ cur_flags |= REGULATOR_ERROR_OVER_TEMP;
+
+ *flags = cur_flags;
+ return 0;
+}
+
+static int rt5120_buck1_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int sel;
+
+ if (uV < RT5120_BUCK1_MINUV || uV > RT5120_BUCK1_MAXUV)
+ return -EINVAL;
+
+ sel = (uV - RT5120_BUCK1_MINUV) / RT5120_BUCK1_STEPUV;
+ return regmap_write(regmap, RT5120_REG_CH1SLPVID, sel);
+}
+
+static int rt5120_regulator_set_suspend_enable(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int mask = RT5120_RIDEN_MASK(rid);
+
+ return regmap_update_bits(regmap, RT5120_REG_SLPCTL, mask, mask);
+}
+
+static int rt5120_regulator_set_suspend_disable(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int rid = rdev_get_id(rdev);
+ unsigned int mask = RT5120_RIDEN_MASK(rid);
+
+ return regmap_update_bits(regmap, RT5120_REG_SLPCTL, mask, 0);
+}
+
+static const struct regulator_ops rt5120_buck1_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_mode = rt5120_buck_set_mode,
+ .get_mode = rt5120_buck_get_mode,
+ .get_error_flags = rt5120_regulator_get_error_flags,
+ .set_suspend_voltage = rt5120_buck1_set_suspend_voltage,
+ .set_suspend_enable = rt5120_regulator_set_suspend_enable,
+ .set_suspend_disable = rt5120_regulator_set_suspend_disable,
+};
+
+static const struct regulator_ops rt5120_buck234_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_mode = rt5120_buck_set_mode,
+ .get_mode = rt5120_buck_get_mode,
+ .get_error_flags = rt5120_regulator_get_error_flags,
+ .set_suspend_enable = rt5120_regulator_set_suspend_enable,
+ .set_suspend_disable = rt5120_regulator_set_suspend_disable,
+};
+
+static const struct regulator_ops rt5120_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_error_flags = rt5120_regulator_get_error_flags,
+ .set_suspend_enable = rt5120_regulator_set_suspend_enable,
+ .set_suspend_disable = rt5120_regulator_set_suspend_disable,
+};
+
+static const struct regulator_ops rt5120_exten_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_enable = rt5120_regulator_set_suspend_enable,
+ .set_suspend_disable = rt5120_regulator_set_suspend_disable,
+};
+
+static unsigned int rt5120_buck_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case RT5120_AUTO_MODE:
+ return REGULATOR_MODE_NORMAL;
+ case RT5120_FPWM_MODE:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static void rt5120_fillin_regulator_desc(struct regulator_desc *desc, int rid)
+{
+ static const char * const name[] = {
+ "buck1", "buck2", "buck3", "buck4", "ldo", "exten" };
+ static const char * const sname[] = {
+ "vin1", "vin2", "vin3", "vin4", "vinldo", NULL };
+
+ /* Common regulator property */
+ desc->name = name[rid];
+ desc->supply_name = sname[rid];
+ desc->owner = THIS_MODULE;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->id = rid;
+ desc->enable_reg = RT5120_REG_ENABLE;
+ desc->enable_mask = RT5120_RIDEN_MASK(rid);
+ desc->active_discharge_reg = RT5120_REG_DISCHG;
+ desc->active_discharge_mask = RT5120_RADEN_MASK(rid);
+ desc->active_discharge_on = RT5120_RADEN_MASK(rid);
+ /* Config n_voltages to 1 for all*/
+ desc->n_voltages = 1;
+
+ /* Only buck support mode change */
+ if (rid >= RT5120_REGULATOR_BUCK1 && rid <= RT5120_REGULATOR_BUCK4)
+ desc->of_map_mode = rt5120_buck_of_map_mode;
+
+ /* RID specific property init */
+ switch (rid) {
+ case RT5120_REGULATOR_BUCK1:
+ /* Only buck1 support voltage change by I2C */
+ desc->n_voltages = RT5120_BUCK1_NUM_VOLT;
+ desc->min_uV = RT5120_BUCK1_MINUV;
+ desc->uV_step = RT5120_BUCK1_STEPUV;
+ desc->vsel_reg = RT5120_REG_CH1VID,
+ desc->vsel_mask = RT5120_CH1VID_MASK,
+ desc->ops = &rt5120_buck1_ops;
+ break;
+ case RT5120_REGULATOR_BUCK2 ... RT5120_REGULATOR_BUCK4:
+ desc->ops = &rt5120_buck234_ops;
+ break;
+ case RT5120_REGULATOR_LDO:
+ desc->ops = &rt5120_ldo_ops;
+ break;
+ default:
+ desc->ops = &rt5120_exten_ops;
+ }
+}
+
+static int rt5120_of_parse_cb(struct rt5120_priv *priv, int rid,
+ struct of_regulator_match *match)
+{
+ struct regulator_desc *desc = priv->rdesc + rid;
+ struct regulator_init_data *init_data = match->init_data;
+
+ if (!init_data || rid == RT5120_REGULATOR_BUCK1)
+ return 0;
+
+ if (init_data->constraints.min_uV != init_data->constraints.max_uV) {
+ dev_err(priv->dev, "Variable voltage for fixed regulator\n");
+ return -EINVAL;
+ }
+
+ desc->fixed_uV = init_data->constraints.min_uV;
+ return 0;
+}
+
+static struct of_regulator_match rt5120_regu_match[RT5120_MAX_REGULATOR] = {
+ [RT5120_REGULATOR_BUCK1] = { .name = "buck1", },
+ [RT5120_REGULATOR_BUCK2] = { .name = "buck2", },
+ [RT5120_REGULATOR_BUCK3] = { .name = "buck3", },
+ [RT5120_REGULATOR_BUCK4] = { .name = "buck4", },
+ [RT5120_REGULATOR_LDO] = { .name = "ldo", },
+ [RT5120_REGULATOR_EXTEN] = { .name = "exten", }
+};
+
+static int rt5120_parse_regulator_dt_data(struct rt5120_priv *priv)
+{
+ struct device *dev = priv->dev->parent;
+ struct device_node *reg_node;
+ int i, ret;
+
+ for (i = 0; i < RT5120_MAX_REGULATOR; i++) {
+ rt5120_fillin_regulator_desc(priv->rdesc + i, i);
+
+ rt5120_regu_match[i].desc = priv->rdesc + i;
+ }
+
+ reg_node = of_get_child_by_name(dev->of_node, "regulators");
+ if (!reg_node) {
+ dev_err(priv->dev, "Couldn't find 'regulators' node\n");
+ return -ENODEV;
+ }
+
+ ret = of_regulator_match(priv->dev, reg_node, rt5120_regu_match,
+ ARRAY_SIZE(rt5120_regu_match));
+
+ of_node_put(reg_node);
+
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "Error parsing regulator init data (%d)\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < RT5120_MAX_REGULATOR; i++) {
+ ret = rt5120_of_parse_cb(priv, i, rt5120_regu_match + i);
+ if (ret) {
+ dev_err(priv->dev, "Failed in [%d] of_passe_cb\n", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rt5120_device_property_init(struct rt5120_priv *priv)
+{
+ struct device *dev = priv->dev->parent;
+ struct device_node *np = dev->of_node;
+ bool prot_enable;
+ unsigned int prot_enable_val = 0;
+
+ /* Assign UV/OV HW protection behavior */
+ prot_enable = of_property_read_bool(np,
+ "richtek,enable-undervolt-hiccup");
+ if (prot_enable)
+ prot_enable_val |= RT5120_UVHICCUP_MASK;
+
+ prot_enable = of_property_read_bool(np,
+ "richtek,enable-overvolt-hiccup");
+ if (prot_enable)
+ prot_enable_val |= RT5120_OVHICCUP_MASK;
+
+ return regmap_update_bits(priv->regmap, RT5120_REG_UVOVPROT,
+ RT5120_UVHICCUP_MASK | RT5120_OVHICCUP_MASK,
+ prot_enable_val);
+}
+
+static int rt5120_regulator_probe(struct platform_device *pdev)
+{
+ struct rt5120_priv *priv;
+ struct regulator_dev *rdev;
+ struct regulator_config config = {};
+ int i, ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+
+ priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!priv->regmap) {
+ dev_err(&pdev->dev, "Failed to init regmap\n");
+ return -ENODEV;
+ }
+
+ ret = rt5120_device_property_init(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to do property init\n");
+ return ret;
+ }
+
+ ret = rt5120_parse_regulator_dt_data(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to parse dt data\n");
+ return ret;
+ }
+
+ config.dev = &pdev->dev;
+ config.regmap = priv->regmap;
+
+ for (i = 0; i < RT5120_MAX_REGULATOR; i++) {
+ config.of_node = rt5120_regu_match[i].of_node;
+ config.init_data = rt5120_regu_match[i].init_data;
+
+ rdev = devm_regulator_register(&pdev->dev, priv->rdesc + i,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "Failed to register regulator [%d]\n", i);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id rt5120_regulator_dev_table[] = {
+ { "rt5120-regulator", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, rt5120_regulator_dev_table);
+
+static struct platform_driver rt5120_regulator_driver = {
+ .driver = {
+ .name = "rt5120-regulator",
+ },
+ .id_table = rt5120_regulator_dev_table,
+ .probe = rt5120_regulator_probe,
+};
+module_platform_driver(rt5120_regulator_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT5120 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rt5190a-regulator.c b/drivers/regulator/rt5190a-regulator.c
index 155d4afd00b1..4a3397b32582 100644
--- a/drivers/regulator/rt5190a-regulator.c
+++ b/drivers/regulator/rt5190a-regulator.c
@@ -224,6 +224,9 @@ static int rt5190a_of_parse_cb(struct rt5190a_priv *priv, int rid,
bool latchup_enable;
unsigned int mask = RT5190A_RID_BITMASK(rid), val;
+ if (!init_data)
+ return 0;
+
switch (rid) {
case RT5190A_IDX_BUCK1:
case RT5190A_IDX_BUCK4:
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index 41ae7ac27ff6..b9918f4fd241 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -343,6 +343,7 @@ static int scmi_regulator_probe(struct scmi_device *sdev)
* plausible SCMI Voltage Domain number, all belonging to this SCMI
* platform instance node (handle->dev->of_node).
*/
+ of_node_get(handle->dev->of_node);
np = of_find_node_by_name(handle->dev->of_node, "regulators");
for_each_child_of_node(np, child) {
ret = process_scmi_regulator_of_node(sdev, ph, child, rinfo);
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index bd7b2f287250..ce00db27589a 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Texas Instruments SoC Adaptive Body Bias(ABB) Regulator
*
@@ -7,15 +8,6 @@
* Copyright (C) 2012-2013 Texas Instruments, Inc.
* Andrii Tseglytskyi <andrii.tseglytskyi@ti.com>
* Nishanth Menon <nm@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/delay.h>
@@ -309,7 +301,7 @@ out:
*
* Return: 0 on success or appropriate error value when fails
*/
-static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel)
+static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
{
const struct regulator_desc *desc = rdev->desc;
struct ti_abb *abb = rdev_get_drvdata(rdev);
@@ -344,7 +336,7 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel)
info = &abb->info[sel];
/*
- * When Linux kernel is starting up, we are'nt sure of the
+ * When Linux kernel is starting up, we aren't sure of the
* Bias configuration that bootloader has configured.
* So, we get to know the actual setting the first time
* we are asked to transition.
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index a15e415e61d5..85e3326b99eb 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tps51632-regulator.c -- TI TPS51632
*
@@ -7,20 +8,6 @@
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#include <linux/err.h>
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index 574958690ace..7c697bdf344e 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tps62360.c -- TI tps62360
*
@@ -6,20 +7,6 @@
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#include <linux/kernel.h>
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index f25806531c7e..d24333344f93 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tps65023-regulator.c
*
* Supports TPS65023 Regulator
*
* Copyright (C) 2009 Texas Instrument Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index eafbc2bb4b57..b83816ee6867 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tps6507x-regulator.c
*
* Regulator driver for TPS65073 PMIC
*
* Copyright (C) 2009 Texas Instrument Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index 070c956216b0..f1bc54c825dd 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
*
* Author: Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65912 driver
*/
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index e88ed96f4744..6bb5b02e19e2 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tps65217-regulator.c
*
* Regulator driver for TPS65217 PMIC
*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index fa263545a70e..48809c3b3abc 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tps65218-regulator.c
*
* Regulator driver for TPS65218 PMIC
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index b52d4f2874b7..76f90202ae09 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulator driver for TI TPS65912x PMICs
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 4a3352821b1d..38383e7de3c1 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -594,16 +594,17 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
node = of_parse_phandle(np, "memory-region", a);
/* Not map vdevbuffer, vdevring region */
- if (!strncmp(node->name, "vdev", strlen("vdev")))
+ if (!strncmp(node->name, "vdev", strlen("vdev"))) {
+ of_node_put(node);
continue;
+ }
err = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
return err;
}
- of_node_put(node);
-
if (b >= IMX_RPROC_MEM_MAX)
break;
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 54781f553f4e..594a9b43b7ae 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -410,10 +410,9 @@ static int keystone_rproc_probe(struct platform_device *pdev)
/* enable clock for accessing DSP internal memories */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "failed to enable clock, status = %d\n", ret);
- pm_runtime_put_noidle(dev);
goto disable_rpm;
}
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 47b2a40e1b4a..d421a2ccaa1e 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -401,6 +401,14 @@ static int mt8186_scp_before_load(struct mtk_scp *scp)
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+ /*
+ * Set I-cache and D-cache size before loading SCP FW.
+ * SCP SRAM logical address may change when cache size setting differs.
+ */
+ writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+ scp->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
return 0;
}
@@ -943,7 +951,19 @@ static const struct mtk_scp_of_data mt8186_of_data = {
.scp_da_to_va = mt8183_scp_da_to_va,
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
- .ipi_buf_offset = 0x7bdb0,
+ .ipi_buf_offset = 0x3bdb0,
+};
+
+static const struct mtk_scp_of_data mt8188_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8192_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8192_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
};
static const struct mtk_scp_of_data mt8192_of_data = {
@@ -973,6 +993,7 @@ static const struct mtk_scp_of_data mt8195_of_data = {
static const struct of_device_id mtk_scp_of_match[] = {
{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
+ { .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
{},
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 32a588fefbdc..430fab0266ed 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -243,7 +243,7 @@ static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer)
* omap_rproc_ack_timer_irq() - acknowledge a timer irq
* @timer: handle to a OMAP rproc timer
*
- * This function is used to clear the irq associated with a watchdog timer. The
+ * This function is used to clear the irq associated with a watchdog timer.
* The function is called by the OMAP remoteproc upon a watchdog event on the
* remote processor to clear the interrupt status of the watchdog timer.
*/
@@ -303,7 +303,7 @@ static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data)
* @configure: boolean flag used to acquire and configure the timer handle
*
* This function is used primarily to enable the timers associated with
- * a remoteproc. The configure flag is provided to allow the driver to
+ * a remoteproc. The configure flag is provided to allow the driver
* to either acquire and start a timer (during device initialization) or
* to just start a timer (during a resume operation).
*
@@ -443,7 +443,7 @@ free_timers:
* @configure: boolean flag used to release the timer handle
*
* This function is used primarily to disable the timers associated with
- * a remoteproc. The configure flag is provided to allow the driver to
+ * a remoteproc. The configure flag is provided to allow the driver
* to either stop and release a timer (during device shutdown) or to just
* stop a timer (during a suspend operation).
*
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 1777a01fa84e..128bf9912f2c 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -897,6 +897,7 @@ static const struct of_device_id pru_rproc_match[] = {
{ .compatible = "ti,j721e-pru", .data = &k3_pru_data },
{ .compatible = "ti,j721e-rtu", .data = &k3_rtu_data },
{ .compatible = "ti,j721e-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,am625-pru", .data = &k3_pru_data },
{},
};
MODULE_DEVICE_TABLE(of, pru_rproc_match);
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 4b91e3c9eafa..020349f8979d 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -50,7 +50,7 @@ struct minidump_region {
};
/**
- * struct minidump_subsystem_toc: Subsystem's SMEM Table of content
+ * struct minidump_subsystem - Subsystem's SMEM Table of content
* @status : Subsystem toc init status
* @enabled : if set to 1, this region would be copied during coredump
* @encryption_status: Encryption status for this subsystem
@@ -68,7 +68,7 @@ struct minidump_subsystem {
};
/**
- * struct minidump_global_toc: Global Table of Content
+ * struct minidump_global_toc - Global Table of Content
* @status : Global Minidump init status
* @md_revision : Minidump revision
* @enabled : Minidump enable status
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 5280ec9b5449..497acfb33f8f 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -112,6 +112,7 @@ static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
else
dev_err(q6v5->dev, "watchdog without message\n");
+ q6v5->running = false;
rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG);
return IRQ_HANDLED;
@@ -123,6 +124,9 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
size_t len;
char *msg;
+ if (!q6v5->running)
+ return IRQ_HANDLED;
+
msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
if (!IS_ERR(msg) && len > 0 && msg[0])
dev_err(q6v5->dev, "fatal error received: %s\n", msg);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 2f3b9f54251e..4c9a1b99cd51 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -175,9 +175,8 @@ static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
- ret = pm_runtime_get_sync(pds[i]);
+ ret = pm_runtime_resume_and_get(pds[i]);
if (ret < 0) {
- pm_runtime_put_noidle(pds[i]);
dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
}
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index af217de75e4d..fddb63cffee0 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/devcoredump.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -932,27 +933,52 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
const char *fw_name)
{
- unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
+ unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_KERNEL_MAPPING;
+ unsigned long flags = VM_DMA_COHERENT | VM_FLUSH_RESET_PERMS;
+ struct page **pages;
+ struct page *page;
dma_addr_t phys;
void *metadata;
int mdata_perm;
int xferop_ret;
size_t size;
- void *ptr;
+ void *vaddr;
+ int count;
int ret;
+ int i;
metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
if (IS_ERR(metadata))
return PTR_ERR(metadata);
- ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
- if (!ptr) {
+ page = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
+ if (!page) {
kfree(metadata);
dev_err(qproc->dev, "failed to allocate mdt buffer\n");
return -ENOMEM;
}
- memcpy(ptr, metadata, size);
+ count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ goto free_dma_attrs;
+ }
+
+ for (i = 0; i < count; i++)
+ pages[i] = nth_page(page, i);
+
+ vaddr = vmap(pages, count, flags, pgprot_dmacoherent(PAGE_KERNEL));
+ kfree(pages);
+ if (!vaddr) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", &phys, size);
+ ret = -EBUSY;
+ goto free_dma_attrs;
+ }
+
+ memcpy(vaddr, metadata, size);
+
+ vunmap(vaddr);
/* Hypervisor mapping to access metadata by modem */
mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
@@ -982,7 +1008,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
"mdt buffer not reclaimed system may become unstable\n");
free_dma_attrs:
- dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
+ dma_free_attrs(qproc->dev, size, page, phys, dma_attrs);
kfree(metadata);
return ret < 0 ? ret : 0;
@@ -1102,6 +1128,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
if (ret)
goto reclaim_mba;
+ if (qproc->has_mba_logs)
+ qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
+
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "MBA boot timed out\n");
@@ -1594,11 +1623,19 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
return ret;
}
+static unsigned long q6v5_panic(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+
+ return qcom_q6v5_panic(&qproc->q6v5);
+}
+
static const struct rproc_ops q6v5_ops = {
.start = q6v5_start,
.stop = q6v5_stop,
.parse_fw = qcom_q6v5_register_dump_segments,
.load = q6v5_load,
+ .panic = q6v5_panic,
};
static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
@@ -2188,6 +2225,11 @@ static const struct rproc_hexagon_res msm8996_mss = {
"mnoc_axi",
NULL
},
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = false,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 6ae39c5653b1..6afd0941e552 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -29,6 +30,8 @@
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
+#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100
+
struct adsp_data {
int crash_reason_smem;
const char *firmware_name;
@@ -36,6 +39,7 @@ struct adsp_data {
unsigned int minidump_id;
bool has_aggre2_clk;
bool auto_boot;
+ bool decrypt_shutdown;
char **proxy_pd_names;
@@ -65,6 +69,7 @@ struct qcom_adsp {
unsigned int minidump_id;
int crash_reason_smem;
bool has_aggre2_clk;
+ bool decrypt_shutdown;
const char *info_name;
struct completion start_done;
@@ -87,6 +92,9 @@ static void adsp_minidump(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
+ if (rproc->dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
qcom_minidump(rproc, adsp->minidump_id);
}
@@ -128,6 +136,19 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
}
}
+static int adsp_shutdown_poll_decrypt(struct qcom_adsp *adsp)
+{
+ unsigned int retry_num = 50;
+ int ret;
+
+ do {
+ msleep(ADSP_DECRYPT_SHUTDOWN_DELAY_MS);
+ ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ } while (ret == -EINVAL && --retry_num);
+
+ return ret;
+}
+
static int adsp_unprepare(struct rproc *rproc)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
@@ -185,13 +206,17 @@ static int adsp_start(struct rproc *rproc)
if (ret)
goto disable_xo_clk;
- ret = regulator_enable(adsp->cx_supply);
- if (ret)
- goto disable_aggre2_clk;
+ if (adsp->cx_supply) {
+ ret = regulator_enable(adsp->cx_supply);
+ if (ret)
+ goto disable_aggre2_clk;
+ }
- ret = regulator_enable(adsp->px_supply);
- if (ret)
- goto disable_cx_supply;
+ if (adsp->px_supply) {
+ ret = regulator_enable(adsp->px_supply);
+ if (ret)
+ goto disable_cx_supply;
+ }
ret = qcom_scm_pas_auth_and_reset(adsp->pas_id);
if (ret) {
@@ -212,9 +237,11 @@ static int adsp_start(struct rproc *rproc)
return 0;
disable_px_supply:
- regulator_disable(adsp->px_supply);
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
disable_cx_supply:
- regulator_disable(adsp->cx_supply);
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
disable_aggre2_clk:
clk_disable_unprepare(adsp->aggre2_clk);
disable_xo_clk:
@@ -231,8 +258,10 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
{
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
- regulator_disable(adsp->px_supply);
- regulator_disable(adsp->cx_supply);
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
clk_disable_unprepare(adsp->aggre2_clk);
clk_disable_unprepare(adsp->xo);
adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
@@ -249,6 +278,9 @@ static int adsp_stop(struct rproc *rproc)
dev_err(adsp->dev, "timed out on wait\n");
ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ if (ret && adsp->decrypt_shutdown)
+ ret = adsp_shutdown_poll_decrypt(adsp);
+
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
@@ -268,6 +300,9 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iom
if (offset < 0 || offset + len > adsp->mem_size)
return NULL;
+ if (is_iomem)
+ *is_iomem = true;
+
return adsp->mem_region + offset;
}
@@ -326,14 +361,26 @@ static int adsp_init_clock(struct qcom_adsp *adsp)
static int adsp_init_regulator(struct qcom_adsp *adsp)
{
- adsp->cx_supply = devm_regulator_get(adsp->dev, "cx");
- if (IS_ERR(adsp->cx_supply))
- return PTR_ERR(adsp->cx_supply);
+ adsp->cx_supply = devm_regulator_get_optional(adsp->dev, "cx");
+ if (IS_ERR(adsp->cx_supply)) {
+ if (PTR_ERR(adsp->cx_supply) == -ENODEV)
+ adsp->cx_supply = NULL;
+ else
+ return PTR_ERR(adsp->cx_supply);
+ }
- regulator_set_load(adsp->cx_supply, 100000);
+ if (adsp->cx_supply)
+ regulator_set_load(adsp->cx_supply, 100000);
- adsp->px_supply = devm_regulator_get(adsp->dev, "px");
- return PTR_ERR_OR_ZERO(adsp->px_supply);
+ adsp->px_supply = devm_regulator_get_optional(adsp->dev, "px");
+ if (IS_ERR(adsp->px_supply)) {
+ if (PTR_ERR(adsp->px_supply) == -ENODEV)
+ adsp->px_supply = NULL;
+ else
+ return PTR_ERR(adsp->px_supply);
+ }
+
+ return 0;
}
static int adsp_pds_attach(struct device *dev, struct device **devs,
@@ -459,9 +506,12 @@ static int adsp_probe(struct platform_device *pdev)
adsp->pas_id = desc->pas_id;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
adsp->info_name = desc->sysmon_name;
+ adsp->decrypt_shutdown = desc->decrypt_shutdown;
platform_set_drvdata(pdev, adsp);
- device_wakeup_enable(adsp->dev);
+ ret = device_init_wakeup(adsp->dev, true);
+ if (ret)
+ goto free_rproc;
ret = adsp_alloc_memory_region(adsp);
if (ret)
@@ -877,6 +927,25 @@ static const struct adsp_data sdx55_mpss_resource = {
.ssctl_id = 0x22,
};
+static const struct adsp_data sm8450_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .minidump_id = 3,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .decrypt_shutdown = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
@@ -916,7 +985,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
{ .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource},
- { .compatible = "qcom,sm8450-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8450-mpss-pas", .data = &sm8450_mpss_resource},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index 9fca81492863..57dde2a69b9d 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -41,6 +41,7 @@ struct qcom_sysmon {
struct completion comp;
struct completion ind_comp;
struct completion shutdown_comp;
+ struct completion ssctl_comp;
struct mutex lock;
bool ssr_ack;
@@ -445,6 +446,8 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
svc->priv = sysmon;
+ complete(&sysmon->ssctl_comp);
+
return 0;
}
@@ -501,6 +504,7 @@ static int sysmon_start(struct rproc_subdev *subdev)
.ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
};
+ reinit_completion(&sysmon->ssctl_comp);
mutex_lock(&sysmon->state_lock);
sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
@@ -508,10 +512,12 @@ static int sysmon_start(struct rproc_subdev *subdev)
mutex_lock(&sysmon_lock);
list_for_each_entry(target, &sysmon_list, node) {
- if (target == sysmon)
+ mutex_lock(&target->state_lock);
+ if (target == sysmon || target->state != SSCTL_SSR_EVENT_AFTER_POWERUP) {
+ mutex_unlock(&target->state_lock);
continue;
+ }
- mutex_lock(&target->state_lock);
event.subsys_name = target->name;
event.ssr_event = target->state;
@@ -545,6 +551,11 @@ static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
if (crashed)
return;
+ if (sysmon->ssctl_instance) {
+ if (!wait_for_completion_timeout(&sysmon->ssctl_comp, HZ / 2))
+ dev_err(sysmon->dev, "timeout waiting for ssctl service\n");
+ }
+
if (sysmon->ssctl_version)
sysmon->shutdown_acked = ssctl_request_shutdown(sysmon);
else if (sysmon->ept)
@@ -631,6 +642,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
init_completion(&sysmon->comp);
init_completion(&sysmon->ind_comp);
init_completion(&sysmon->shutdown_comp);
+ init_completion(&sysmon->ssctl_comp);
mutex_init(&sysmon->lock);
mutex_init(&sysmon->state_lock);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 9a223d394087..68f37296b151 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -467,6 +467,7 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
irq_handler_t thread_fn)
{
int ret;
+ int irq_number;
ret = platform_get_irq_byname(pdev, name);
if (ret < 0 && optional) {
@@ -477,14 +478,19 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
return ret;
}
+ irq_number = ret;
+
ret = devm_request_threaded_irq(&pdev->dev, ret,
NULL, thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"wcnss", wcnss);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+ }
- return ret;
+ /* Return the IRQ number if the IRQ was successfully acquired */
+ return irq_number;
}
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 02a04ab34a23..e5279ed9a8d7 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -59,6 +59,7 @@ static int rproc_release_carveout(struct rproc *rproc,
/* Unique indices for remoteproc devices */
static DEFINE_IDA(rproc_dev_index);
+static struct workqueue_struct *rproc_recovery_wq;
static const char * const rproc_crash_names[] = {
[RPROC_MMUFAULT] = "mmufault",
@@ -334,7 +335,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
size_t size;
/* actual size of vring (in bytes) */
- size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
+ size = PAGE_ALIGN(vring_size(rvring->num, rvring->align));
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
@@ -401,7 +402,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
return -EINVAL;
}
- rvring->len = vring->num;
+ rvring->num = vring->num;
rvring->align = vring->align;
rvring->rvdev = rvdev;
@@ -461,6 +462,7 @@ static void rproc_rvdev_release(struct device *dev)
struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
of_reserved_mem_device_release(dev);
+ dma_release_coherent_memory(dev);
kfree(rvdev);
}
@@ -970,7 +972,7 @@ static int rproc_handle_carveout(struct rproc *rproc,
return 0;
}
- /* Register carveout in in list */
+ /* Register carveout in list */
carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
rproc_alloc_carveout,
rproc_release_carveout, rsc->name);
@@ -2434,7 +2436,7 @@ static void rproc_type_release(struct device *dev)
idr_destroy(&rproc->notifyids);
if (rproc->index >= 0)
- ida_simple_remove(&rproc_dev_index, rproc->index);
+ ida_free(&rproc_dev_index, rproc->index);
kfree_const(rproc->firmware);
kfree_const(rproc->name);
@@ -2551,9 +2553,9 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
goto put_device;
/* Assign a unique device index and name */
- rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
+ rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
if (rproc->index < 0) {
- dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
+ dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
goto put_device;
}
@@ -2762,8 +2764,7 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
dev_err(&rproc->dev, "crash detected in %s: type %s\n",
rproc->name, rproc_crash_to_string(type));
- /* Have a worker handle the error; ensure system is not suspended */
- queue_work(system_freezable_wq, &rproc->crash_handler);
+ queue_work(rproc_recovery_wq, &rproc->crash_handler);
}
EXPORT_SYMBOL(rproc_report_crash);
@@ -2812,6 +2813,13 @@ static void __exit rproc_exit_panic(void)
static int __init remoteproc_init(void)
{
+ rproc_recovery_wq = alloc_workqueue("rproc_recovery_wq",
+ WQ_UNBOUND | WQ_FREEZABLE, 0);
+ if (!rproc_recovery_wq) {
+ pr_err("remoteproc: creation of rproc_recovery_wq failed\n");
+ return -ENOMEM;
+ }
+
rproc_init_sysfs();
rproc_init_debugfs();
rproc_init_cdev();
@@ -2825,9 +2833,13 @@ static void __exit remoteproc_exit(void)
{
ida_destroy(&rproc_dev_index);
+ if (!rproc_recovery_wq)
+ return;
+
rproc_exit_panic();
rproc_exit_debugfs();
rproc_exit_sysfs();
+ destroy_workqueue(rproc_recovery_wq);
}
module_exit(remoteproc_exit);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 70ab496d0431..0f7706e23eb9 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -87,7 +87,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
struct fw_rsc_vdev *rsc;
struct virtqueue *vq;
void *addr;
- int len, size;
+ int num, size;
/* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring))
@@ -104,20 +104,20 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
rvring = &rvdev->vring[id];
addr = mem->va;
- len = rvring->len;
+ num = rvring->num;
/* zero vring */
- size = vring_size(len, rvring->align);
+ size = vring_size(num, rvring->align);
memset(addr, 0, size);
dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
- id, addr, len, rvring->notifyid);
+ id, addr, num, rvring->notifyid);
/*
* Create the new vq, and tell virtio we're not interested in
* the 'weak' smp barriers, since we're talking with a real device.
*/
- vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
+ vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
addr, rproc_virtio_notify, callback, name);
if (!vq) {
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
@@ -125,6 +125,8 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
return ERR_PTR(-ENOMEM);
}
+ vq->num_max = num;
+
rvring->vq = vq;
vq->priv = rvring;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 4840ad906018..0481926c6975 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1655,6 +1655,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
if (!cpdev) {
ret = -ENODEV;
dev_err(dev, "could not get R5 core platform device\n");
+ of_node_put(child);
goto fail;
}
@@ -1663,6 +1664,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
ret);
put_device(&cpdev->dev);
+ of_node_put(child);
goto fail;
}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 93c8d07ee328..806773e88832 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -17,7 +17,7 @@ if RESET_CONTROLLER
config RESET_A10SR
tristate "Altera Arria10 System Resource Reset"
- depends on MFD_ALTERA_A10SR
+ depends on MFD_ALTERA_A10SR || COMPILE_TEST
help
This option enables support for the external reset functions for
peripheral PHYs on the Altera Arria10 System Resource Chip.
@@ -200,8 +200,9 @@ config RESET_SCMI
firmware controlling all the reset signals.
config RESET_SIMPLE
- bool "Simple Reset Controller Driver" if COMPILE_TEST
+ bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
default ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+ depends on HAS_IOMEM
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
@@ -231,6 +232,15 @@ config RESET_STARFIVE_JH7100
help
This enables the reset controller driver for the StarFive JH7100 SoC.
+config RESET_SUNPLUS
+ bool "Sunplus SoCs Reset Driver" if COMPILE_TEST
+ default ARCH_SUNPLUS
+ help
+ This enables the reset driver support for Sunplus SoCs.
+ The reset lines that can be asserted and deasserted by toggling bits
+ in a contiguous, exclusive register space. The register is HIWORD_MASKED,
+ which means each register holds 16 reset lines.
+
config RESET_SUNXI
bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
default ARCH_SUNXI
@@ -256,6 +266,14 @@ config RESET_TI_SYSCON
you wish to use the reset framework for such memory-mapped devices,
say Y here. Otherwise, say N.
+config RESET_TI_TPS380X
+ tristate "TI TPS380x Reset Driver"
+ select GPIOLIB
+ help
+ This enables the reset driver support for TI TPS380x devices. If
+ you wish to use the reset framework for such devices, say Y here.
+ Otherwise, say N.
+
config RESET_TN48M_CPLD
tristate "Delta Networks TN48M switch CPLD reset controller"
depends on MFD_TN48M_CPLD || COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index a80a9c4008a7..cd5cf8e7c6a7 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -30,9 +30,11 @@ obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_RESET_STARFIVE_JH7100) += reset-starfive-jh7100.o
+obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
+obj-$(CONFIG_RESET_TI_TPS380X) += reset-tps380x.o
obj-$(CONFIG_RESET_TN48M_CPLD) += reset-tn48m.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c
index 2ea4d3136e15..24c55efa98e5 100644
--- a/drivers/reset/reset-npcm.c
+++ b/drivers/reset/reset-npcm.c
@@ -17,13 +17,20 @@
/* NPCM7xx GCR registers */
#define NPCM_MDLR_OFFSET 0x7C
-#define NPCM_MDLR_USBD0 BIT(9)
-#define NPCM_MDLR_USBD1 BIT(8)
-#define NPCM_MDLR_USBD2_4 BIT(21)
-#define NPCM_MDLR_USBD5_9 BIT(22)
+#define NPCM7XX_MDLR_USBD0 BIT(9)
+#define NPCM7XX_MDLR_USBD1 BIT(8)
+#define NPCM7XX_MDLR_USBD2_4 BIT(21)
+#define NPCM7XX_MDLR_USBD5_9 BIT(22)
+
+/* NPCM8xx MDLR bits */
+#define NPCM8XX_MDLR_USBD0_3 BIT(9)
+#define NPCM8XX_MDLR_USBD4_7 BIT(22)
+#define NPCM8XX_MDLR_USBD8 BIT(24)
+#define NPCM8XX_MDLR_USBD9 BIT(21)
#define NPCM_USB1PHYCTL_OFFSET 0x140
#define NPCM_USB2PHYCTL_OFFSET 0x144
+#define NPCM_USB3PHYCTL_OFFSET 0x148
#define NPCM_USBXPHYCTL_RS BIT(28)
/* NPCM7xx Reset registers */
@@ -49,12 +56,38 @@
#define NPCM_IPSRST3_USBPHY1 BIT(24)
#define NPCM_IPSRST3_USBPHY2 BIT(25)
+#define NPCM_IPSRST4 0x74
+#define NPCM_IPSRST4_USBPHY3 BIT(25)
+#define NPCM_IPSRST4_USB_HOST2 BIT(31)
+
#define NPCM_RC_RESETS_PER_REG 32
#define NPCM_MASK_RESETS GENMASK(4, 0)
+enum {
+ BMC_NPCM7XX = 0,
+ BMC_NPCM8XX,
+};
+
+static const u32 npxm7xx_ipsrst[] = {NPCM_IPSRST1, NPCM_IPSRST2, NPCM_IPSRST3};
+static const u32 npxm8xx_ipsrst[] = {NPCM_IPSRST1, NPCM_IPSRST2, NPCM_IPSRST3,
+ NPCM_IPSRST4};
+
+struct npcm_reset_info {
+ u32 bmc_id;
+ u32 num_ipsrst;
+ const u32 *ipsrst;
+};
+
+static const struct npcm_reset_info npxm7xx_reset_info[] = {
+ {.bmc_id = BMC_NPCM7XX, .num_ipsrst = 3, .ipsrst = npxm7xx_ipsrst}};
+static const struct npcm_reset_info npxm8xx_reset_info[] = {
+ {.bmc_id = BMC_NPCM8XX, .num_ipsrst = 4, .ipsrst = npxm8xx_ipsrst}};
+
struct npcm_rc_data {
struct reset_controller_dev rcdev;
struct notifier_block restart_nb;
+ const struct npcm_reset_info *info;
+ struct regmap *gcr_regmap;
u32 sw_reset_number;
void __iomem *base;
spinlock_t lock;
@@ -120,14 +153,24 @@ static int npcm_rc_status(struct reset_controller_dev *rcdev,
static int npcm_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
+ struct npcm_rc_data *rc = to_rc_data(rcdev);
unsigned int offset, bit;
+ bool offset_found = false;
+ int off_num;
offset = reset_spec->args[0];
- if (offset != NPCM_IPSRST1 && offset != NPCM_IPSRST2 &&
- offset != NPCM_IPSRST3) {
+ for (off_num = 0 ; off_num < rc->info->num_ipsrst ; off_num++) {
+ if (offset == rc->info->ipsrst[off_num]) {
+ offset_found = true;
+ break;
+ }
+ }
+
+ if (!offset_found) {
dev_err(rcdev->dev, "Error reset register (0x%x)\n", offset);
return -EINVAL;
}
+
bit = reset_spec->args[1];
if (bit >= NPCM_RC_RESETS_PER_REG) {
dev_err(rcdev->dev, "Error reset number (%d)\n", bit);
@@ -138,45 +181,29 @@ static int npcm_reset_xlate(struct reset_controller_dev *rcdev,
}
static const struct of_device_id npcm_rc_match[] = {
- { .compatible = "nuvoton,npcm750-reset",
- .data = (void *)"nuvoton,npcm750-gcr" },
+ { .compatible = "nuvoton,npcm750-reset", .data = &npxm7xx_reset_info},
+ { .compatible = "nuvoton,npcm845-reset", .data = &npxm8xx_reset_info},
{ }
};
-/*
- * The following procedure should be observed in USB PHY, USB device and
- * USB host initialization at BMC boot
- */
-static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc)
+static void npcm_usb_reset_npcm7xx(struct npcm_rc_data *rc)
{
u32 mdlr, iprst1, iprst2, iprst3;
- struct device *dev = &pdev->dev;
- struct regmap *gcr_regmap;
u32 ipsrst1_bits = 0;
u32 ipsrst2_bits = NPCM_IPSRST2_USB_HOST;
u32 ipsrst3_bits = 0;
- const char *gcr_dt;
-
- gcr_dt = (const char *)
- of_match_device(dev->driver->of_match_table, dev)->data;
-
- gcr_regmap = syscon_regmap_lookup_by_compatible(gcr_dt);
- if (IS_ERR(gcr_regmap)) {
- dev_err(&pdev->dev, "Failed to find %s\n", gcr_dt);
- return PTR_ERR(gcr_regmap);
- }
/* checking which USB device is enabled */
- regmap_read(gcr_regmap, NPCM_MDLR_OFFSET, &mdlr);
- if (!(mdlr & NPCM_MDLR_USBD0))
+ regmap_read(rc->gcr_regmap, NPCM_MDLR_OFFSET, &mdlr);
+ if (!(mdlr & NPCM7XX_MDLR_USBD0))
ipsrst3_bits |= NPCM_IPSRST3_USBD0;
- if (!(mdlr & NPCM_MDLR_USBD1))
+ if (!(mdlr & NPCM7XX_MDLR_USBD1))
ipsrst1_bits |= NPCM_IPSRST1_USBD1;
- if (!(mdlr & NPCM_MDLR_USBD2_4))
+ if (!(mdlr & NPCM7XX_MDLR_USBD2_4))
ipsrst1_bits |= (NPCM_IPSRST1_USBD2 |
NPCM_IPSRST1_USBD3 |
NPCM_IPSRST1_USBD4);
- if (!(mdlr & NPCM_MDLR_USBD0)) {
+ if (!(mdlr & NPCM7XX_MDLR_USBD0)) {
ipsrst1_bits |= (NPCM_IPSRST1_USBD5 |
NPCM_IPSRST1_USBD6);
ipsrst3_bits |= (NPCM_IPSRST3_USBD7 |
@@ -199,9 +226,9 @@ static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc)
writel(iprst3, rc->base + NPCM_IPSRST3);
/* clear USB PHY RS bit */
- regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, 0);
- regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, 0);
/* deassert reset USB PHY */
@@ -211,19 +238,131 @@ static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc)
udelay(50);
/* set USB PHY RS bit */
- regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
+
+ /* deassert reset USB devices*/
+ iprst1 &= ~ipsrst1_bits;
+ iprst2 &= ~ipsrst2_bits;
+ iprst3 &= ~ipsrst3_bits;
+
+ writel(iprst1, rc->base + NPCM_IPSRST1);
+ writel(iprst2, rc->base + NPCM_IPSRST2);
+ writel(iprst3, rc->base + NPCM_IPSRST3);
+}
+
+static void npcm_usb_reset_npcm8xx(struct npcm_rc_data *rc)
+{
+ u32 mdlr, iprst1, iprst2, iprst3, iprst4;
+ u32 ipsrst1_bits = 0;
+ u32 ipsrst2_bits = NPCM_IPSRST2_USB_HOST;
+ u32 ipsrst3_bits = 0;
+ u32 ipsrst4_bits = NPCM_IPSRST4_USB_HOST2 | NPCM_IPSRST4_USBPHY3;
+
+ /* checking which USB device is enabled */
+ regmap_read(rc->gcr_regmap, NPCM_MDLR_OFFSET, &mdlr);
+ if (!(mdlr & NPCM8XX_MDLR_USBD0_3)) {
+ ipsrst3_bits |= NPCM_IPSRST3_USBD0;
+ ipsrst1_bits |= (NPCM_IPSRST1_USBD1 |
+ NPCM_IPSRST1_USBD2 |
+ NPCM_IPSRST1_USBD3);
+ }
+ if (!(mdlr & NPCM8XX_MDLR_USBD4_7)) {
+ ipsrst1_bits |= (NPCM_IPSRST1_USBD4 |
+ NPCM_IPSRST1_USBD5 |
+ NPCM_IPSRST1_USBD6);
+ ipsrst3_bits |= NPCM_IPSRST3_USBD7;
+ }
+
+ if (!(mdlr & NPCM8XX_MDLR_USBD8))
+ ipsrst3_bits |= NPCM_IPSRST3_USBD8;
+ if (!(mdlr & NPCM8XX_MDLR_USBD9))
+ ipsrst3_bits |= NPCM_IPSRST3_USBD9;
+
+ /* assert reset USB PHY and USB devices */
+ iprst1 = readl(rc->base + NPCM_IPSRST1);
+ iprst2 = readl(rc->base + NPCM_IPSRST2);
+ iprst3 = readl(rc->base + NPCM_IPSRST3);
+ iprst4 = readl(rc->base + NPCM_IPSRST4);
+
+ iprst1 |= ipsrst1_bits;
+ iprst2 |= ipsrst2_bits;
+ iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
+ NPCM_IPSRST3_USBPHY2);
+ iprst2 |= ipsrst4_bits;
+
+ writel(iprst1, rc->base + NPCM_IPSRST1);
+ writel(iprst2, rc->base + NPCM_IPSRST2);
+ writel(iprst3, rc->base + NPCM_IPSRST3);
+ writel(iprst4, rc->base + NPCM_IPSRST4);
+
+ /* clear USB PHY RS bit */
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, 0);
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, 0);
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB3PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, 0);
+
+ /* deassert reset USB PHY */
+ iprst3 &= ~(NPCM_IPSRST3_USBPHY1 | NPCM_IPSRST3_USBPHY2);
+ writel(iprst3, rc->base + NPCM_IPSRST3);
+ iprst4 &= ~NPCM_IPSRST4_USBPHY3;
+ writel(iprst4, rc->base + NPCM_IPSRST4);
+
+ /* set USB PHY RS bit */
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
- regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
+ regmap_update_bits(rc->gcr_regmap, NPCM_USB3PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
/* deassert reset USB devices*/
iprst1 &= ~ipsrst1_bits;
iprst2 &= ~ipsrst2_bits;
iprst3 &= ~ipsrst3_bits;
+ iprst4 &= ~ipsrst4_bits;
writel(iprst1, rc->base + NPCM_IPSRST1);
writel(iprst2, rc->base + NPCM_IPSRST2);
writel(iprst3, rc->base + NPCM_IPSRST3);
+ writel(iprst4, rc->base + NPCM_IPSRST4);
+}
+
+/*
+ * The following procedure should be observed in USB PHY, USB device and
+ * USB host initialization at BMC boot
+ */
+static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc)
+{
+ struct device *dev = &pdev->dev;
+
+ rc->gcr_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "nuvoton,sysgcr");
+ if (IS_ERR(rc->gcr_regmap)) {
+ dev_warn(&pdev->dev, "Failed to find nuvoton,sysgcr property, please update the device tree\n");
+ dev_info(&pdev->dev, "Using nuvoton,npcm750-gcr for Poleg backward compatibility\n");
+ rc->gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+ if (IS_ERR(rc->gcr_regmap)) {
+ dev_err(&pdev->dev, "Failed to find nuvoton,npcm750-gcr");
+ return PTR_ERR(rc->gcr_regmap);
+ }
+ }
+
+ rc->info = (const struct npcm_reset_info *)
+ of_match_device(dev->driver->of_match_table, dev)->data;
+ switch (rc->info->bmc_id) {
+ case BMC_NPCM7XX:
+ npcm_usb_reset_npcm7xx(rc);
+ break;
+ case BMC_NPCM8XX:
+ npcm_usb_reset_npcm8xx(rc);
+ break;
+ default:
+ return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/reset/reset-sunplus.c b/drivers/reset/reset-sunplus.c
new file mode 100644
index 000000000000..2f23ecaa7b98
--- /dev/null
+++ b/drivers/reset/reset-sunplus.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * SP7021 reset driver
+ *
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/reboot.h>
+
+/* HIWORD_MASK_REG BITS */
+#define BITS_PER_HWM_REG 16
+
+/* resets HW info: reg_index_shift */
+static const u32 sp_resets[] = {
+/* SP7021: mo_reset0 ~ mo_reset9 */
+ 0x00,
+ 0x02,
+ 0x03,
+ 0x04,
+ 0x05,
+ 0x06,
+ 0x07,
+ 0x08,
+ 0x09,
+ 0x0a,
+ 0x0b,
+ 0x0d,
+ 0x0e,
+ 0x0f,
+ 0x10,
+ 0x12,
+ 0x14,
+ 0x15,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f,
+ 0x20,
+ 0x21,
+ 0x22,
+ 0x23,
+ 0x24,
+ 0x25,
+ 0x26,
+ 0x2a,
+ 0x2b,
+ 0x2d,
+ 0x2e,
+ 0x30,
+ 0x31,
+ 0x32,
+ 0x33,
+ 0x3d,
+ 0x3e,
+ 0x3f,
+ 0x42,
+ 0x44,
+ 0x4b,
+ 0x4c,
+ 0x4d,
+ 0x4e,
+ 0x4f,
+ 0x50,
+ 0x55,
+ 0x60,
+ 0x61,
+ 0x6a,
+ 0x6f,
+ 0x70,
+ 0x73,
+ 0x74,
+ 0x86,
+ 0x8a,
+ 0x8b,
+ 0x8d,
+ 0x8e,
+ 0x8f,
+ 0x90,
+ 0x92,
+ 0x93,
+ 0x94,
+ 0x95,
+ 0x96,
+ 0x97,
+ 0x98,
+ 0x99,
+};
+
+struct sp_reset {
+ struct reset_controller_dev rcdev;
+ struct notifier_block notifier;
+ void __iomem *base;
+};
+
+static inline struct sp_reset *to_sp_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct sp_reset, rcdev);
+}
+
+static int sp_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct sp_reset *reset = to_sp_reset(rcdev);
+ int index = sp_resets[id] / BITS_PER_HWM_REG;
+ int shift = sp_resets[id] % BITS_PER_HWM_REG;
+ u32 val;
+
+ val = (1 << (16 + shift)) | (assert << shift);
+ writel(val, reset->base + (index * 4));
+
+ return 0;
+}
+
+static int sp_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return sp_reset_update(rcdev, id, true);
+}
+
+static int sp_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return sp_reset_update(rcdev, id, false);
+}
+
+static int sp_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct sp_reset *reset = to_sp_reset(rcdev);
+ int index = sp_resets[id] / BITS_PER_HWM_REG;
+ int shift = sp_resets[id] % BITS_PER_HWM_REG;
+ u32 reg;
+
+ reg = readl(reset->base + (index * 4));
+
+ return !!(reg & BIT(shift));
+}
+
+static const struct reset_control_ops sp_reset_ops = {
+ .assert = sp_reset_assert,
+ .deassert = sp_reset_deassert,
+ .status = sp_reset_status,
+};
+
+static int sp_restart(struct notifier_block *nb, unsigned long mode,
+ void *cmd)
+{
+ struct sp_reset *reset = container_of(nb, struct sp_reset, notifier);
+
+ sp_reset_assert(&reset->rcdev, 0);
+ sp_reset_deassert(&reset->rcdev, 0);
+
+ return NOTIFY_DONE;
+}
+
+static int sp_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_reset *reset;
+ struct resource *res;
+ int ret;
+
+ reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reset->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reset->base))
+ return PTR_ERR(reset->base);
+
+ reset->rcdev.ops = &sp_reset_ops;
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.nr_resets = resource_size(res) / 4 * BITS_PER_HWM_REG;
+
+ ret = devm_reset_controller_register(dev, &reset->rcdev);
+ if (ret)
+ return ret;
+
+ reset->notifier.notifier_call = sp_restart;
+ reset->notifier.priority = 192;
+
+ return register_restart_handler(&reset->notifier);
+}
+
+static const struct of_device_id sp_reset_dt_ids[] = {
+ {.compatible = "sunplus,sp7021-reset",},
+ { /* sentinel */ },
+};
+
+static struct platform_driver sp_reset_driver = {
+ .probe = sp_reset_probe,
+ .driver = {
+ .name = "sunplus-reset",
+ .of_match_table = sp_reset_dt_ids,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(sp_reset_driver);
diff --git a/drivers/reset/reset-ti-sci.c b/drivers/reset/reset-ti-sci.c
index b799aefad547..cc01fa5b0bea 100644
--- a/drivers/reset/reset-ti-sci.c
+++ b/drivers/reset/reset-ti-sci.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Texas Instrument's System Control Interface (TI-SCI) reset driver
*
* Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/idr.h>
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
index 2b92775d58f0..f0dd7ffc3b72 100644
--- a/drivers/reset/reset-ti-syscon.c
+++ b/drivers/reset/reset-ti-syscon.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI SYSCON regmap reset driver
*
* Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
* Suman Anna <afd@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/mfd/syscon.h>
diff --git a/drivers/reset/reset-tps380x.c b/drivers/reset/reset-tps380x.c
new file mode 100644
index 000000000000..09d511f069ba
--- /dev/null
+++ b/drivers/reset/reset-tps380x.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * TI TPS380x Supply Voltage Supervisor and Reset Controller Driver
+ *
+ * Copyright (C) 2022 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ *
+ * Based on Simple Reset Controller Driver
+ *
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset-controller.h>
+
+struct tps380x_reset {
+ struct reset_controller_dev rcdev;
+ struct gpio_desc *reset_gpio;
+ unsigned int reset_ms;
+};
+
+struct tps380x_reset_devdata {
+ unsigned int min_reset_ms;
+ unsigned int typ_reset_ms;
+ unsigned int max_reset_ms;
+};
+
+static inline
+struct tps380x_reset *to_tps380x_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct tps380x_reset, rcdev);
+}
+
+static int
+tps380x_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
+
+ gpiod_set_value_cansleep(tps380x->reset_gpio, 1);
+
+ return 0;
+}
+
+static int
+tps380x_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
+
+ gpiod_set_value_cansleep(tps380x->reset_gpio, 0);
+ msleep(tps380x->reset_ms);
+
+ return 0;
+}
+
+static const struct reset_control_ops reset_tps380x_ops = {
+ .assert = tps380x_reset_assert,
+ .deassert = tps380x_reset_deassert,
+};
+
+static int tps380x_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ /* No special handling needed, we have only one reset line per device */
+ return 0;
+}
+
+static int tps380x_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct tps380x_reset_devdata *devdata;
+ struct tps380x_reset *tps380x;
+
+ devdata = device_get_match_data(dev);
+ if (!devdata)
+ return -EINVAL;
+
+ tps380x = devm_kzalloc(dev, sizeof(*tps380x), GFP_KERNEL);
+ if (!tps380x)
+ return -ENOMEM;
+
+ tps380x->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(tps380x->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(tps380x->reset_gpio),
+ "Failed to get GPIO\n");
+
+ tps380x->reset_ms = devdata->max_reset_ms;
+
+ tps380x->rcdev.ops = &reset_tps380x_ops;
+ tps380x->rcdev.owner = THIS_MODULE;
+ tps380x->rcdev.dev = dev;
+ tps380x->rcdev.of_node = dev->of_node;
+ tps380x->rcdev.of_reset_n_cells = 0;
+ tps380x->rcdev.of_xlate = tps380x_reset_of_xlate;
+ tps380x->rcdev.nr_resets = 1;
+
+ return devm_reset_controller_register(dev, &tps380x->rcdev);
+}
+
+static const struct tps380x_reset_devdata tps3801_reset_data = {
+ .min_reset_ms = 120,
+ .typ_reset_ms = 200,
+ .max_reset_ms = 280,
+};
+
+static const struct of_device_id tps380x_reset_dt_ids[] = {
+ { .compatible = "ti,tps3801", .data = &tps3801_reset_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tps380x_reset_dt_ids);
+
+static struct platform_driver tps380x_reset_driver = {
+ .probe = tps380x_reset_probe,
+ .driver = {
+ .name = "tps380x-reset",
+ .of_match_table = tps380x_reset_dt_ids,
+ },
+};
+module_platform_driver(tps380x_reset_driver);
+
+MODULE_AUTHOR("Marco Felsch <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("TI TPS380x Supply Voltage Supervisor and Reset Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
index 5b4404b8be4c..d1213c33da20 100644
--- a/drivers/rpmsg/mtk_rpmsg.c
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -234,7 +234,9 @@ static void mtk_register_device_work_function(struct work_struct *register_work)
if (info->registered)
continue;
+ mutex_unlock(&subdev->channels_lock);
ret = mtk_rpmsg_register_device(subdev, &info->info);
+ mutex_lock(&subdev->channels_lock);
if (ret) {
dev_err(&pdev->dev, "Can't create rpmsg_device\n");
continue;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 07586514991f..115c0a1eddb1 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -98,8 +98,6 @@ struct glink_core_rx_intent {
struct qcom_glink {
struct device *dev;
- const char *name;
-
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
@@ -1546,7 +1544,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
cancel_work_sync(&channel->intent_work);
if (channel->rpdev) {
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
@@ -1674,7 +1672,7 @@ static ssize_t rpmsg_name_show(struct device *dev,
if (ret < 0)
name = dev->of_node->name;
- return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", name);
+ return sysfs_emit(buf, "%s\n", name);
}
static DEVICE_ATTR_RO(rpmsg_name);
@@ -1755,10 +1753,6 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
if (ret)
dev_err(dev, "failed to add groups\n");
- ret = of_property_read_string(dev->of_node, "label", &glink->name);
- if (ret < 0)
- glink->name = dev->of_node->name;
-
glink->mbox_client.dev = dev;
glink->mbox_client.knows_txdone = true;
glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
diff --git a/drivers/rpmsg/qcom_glink_ssr.c b/drivers/rpmsg/qcom_glink_ssr.c
index dea929c6045d..776d64446879 100644
--- a/drivers/rpmsg/qcom_glink_ssr.c
+++ b/drivers/rpmsg/qcom_glink_ssr.c
@@ -39,7 +39,7 @@ struct cleanup_done_msg {
__le32 seq_num;
};
-/**
+/*
* G-Link SSR protocol commands
*/
#define GLINK_SSR_DO_CLEANUP 0
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 1957b27c4cf3..1044cf03c542 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -729,11 +729,11 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
}
/**
- * qcom_smd_send - write data to smd channel
+ * __qcom_smd_send - write data to smd channel
* @channel: channel handle
* @data: buffer of data to write
* @len: number of bytes to write
- * @wait: flag to indicate if write has ca wait
+ * @wait: flag to indicate if write can wait
*
* This is a blocking write of len bytes into the channel's tx ring buffer and
* signal the remote end. It will sleep until there is enough space available
@@ -1089,7 +1089,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
/* Assign public information to the rpmsg_device */
rpdev = &qsdev->rpdev;
- strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
+ strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
@@ -1323,7 +1323,7 @@ static void qcom_channel_state_worker(struct work_struct *work)
spin_unlock_irqrestore(&edge->channels_lock, flags);
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
rpmsg_unregister_device(&edge->dev, &chinfo);
@@ -1383,6 +1383,7 @@ static int qcom_smd_parse_edge(struct device *dev,
}
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
goto put_node;
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index b6183d4f62a2..4f2189111494 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -120,8 +120,11 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
struct rpmsg_device *rpdev = eptdev->rpdev;
struct device *dev = &eptdev->dev;
- if (eptdev->ept)
+ mutex_lock(&eptdev->ept_lock);
+ if (eptdev->ept) {
+ mutex_unlock(&eptdev->ept_lock);
return -EBUSY;
+ }
get_device(dev);
@@ -137,11 +140,13 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
if (!ept) {
dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
put_device(dev);
+ mutex_unlock(&eptdev->ept_lock);
return -EINVAL;
}
eptdev->ept = ept;
filp->private_data = eptdev;
+ mutex_unlock(&eptdev->ept_lock);
return 0;
}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 290c1f02da10..d6dde00efdae 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -604,7 +604,7 @@ int rpmsg_register_device_override(struct rpmsg_device *rpdev,
int ret;
if (driver_override)
- strcpy(rpdev->id.name, driver_override);
+ strscpy_pad(rpdev->id.name, driver_override, RPMSG_NAME_SIZE);
dev_set_name(dev, "%s.%s.%d.%d", dev_name(dev->parent),
rpdev->id.name, rpdev->src, rpdev->dst);
@@ -618,6 +618,7 @@ int rpmsg_register_device_override(struct rpmsg_device *rpdev,
strlen(driver_override));
if (ret) {
dev_err(dev, "device_set_override failed: %d\n", ret);
+ put_device(dev);
return ret;
}
}
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index a22cd4abe7d1..39b646d0d40d 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -41,8 +41,8 @@ struct rpmsg_device_ops {
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo);
- int (*announce_create)(struct rpmsg_device *ept);
- int (*announce_destroy)(struct rpmsg_device *ept);
+ int (*announce_create)(struct rpmsg_device *rpdev);
+ int (*announce_destroy)(struct rpmsg_device *rpdev);
};
/**
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a00f901b5c1d..b8de25118ad0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -383,6 +383,16 @@ config RTC_DRV_MAX77686
This driver can also be built as a module. If so, the module
will be called rtc-max77686.
+config RTC_DRV_NCT3018Y
+ tristate "Nuvoton NCT3018Y"
+ depends on OF
+ help
+ If you say yes here you get support for the Nuvoton NCT3018Y I2C RTC
+ chip.
+
+ This driver can also be built as a module, if so, the module will be
+ called "rtc-nct3018y".
+
config RTC_DRV_RK808
tristate "Rockchip RK805/RK808/RK809/RK817/RK818 RTC"
depends on MFD_RK808
@@ -1478,16 +1488,6 @@ config RTC_DRV_SUNPLUS
This driver can also be built as a module. If so, the module
will be called rtc-sunplus.
-config RTC_DRV_VR41XX
- tristate "NEC VR41XX"
- depends on CPU_VR41XX || COMPILE_TEST
- help
- If you say Y here you will get access to the real time clock
- built into your NEC VR41XX CPU.
-
- To compile this driver as a module, choose M here: the
- module will be called rtc-vr41xx.
-
config RTC_DRV_PL030
tristate "ARM AMBA PL030 RTC"
depends on ARM_AMBA
@@ -1929,6 +1929,17 @@ config RTC_DRV_ASPEED
This driver can also be built as a module, if so, the module
will be called "rtc-aspeed".
+config RTC_DRV_TI_K3
+ tristate "TI K3 RTC"
+ depends on ARCH_K3 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for the Texas Instruments's
+ Real Time Clock for K3 architecture.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-ti-k3".
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
@@ -1973,4 +1984,14 @@ config RTC_DRV_MSC313
This driver can also be built as a module, if so, the module
will be called "rtc-msc313".
+config RTC_DRV_POLARFIRE_SOC
+ tristate "Microchip PolarFire SoC built-in RTC"
+ depends on SOC_MICROCHIP_POLARFIRE
+ help
+ If you say yes here you will get support for the
+ built-in RTC on Polarfire SoC.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-mpfs".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index fb04467b652d..aab22bc63432 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -112,6 +112,7 @@ obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MXC_V2) += rtc-mxc_v2.o
obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o
+obj-$(CONFIG_RTC_DRV_NCT3018Y) += rtc-nct3018y.o
obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
@@ -130,6 +131,7 @@ obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o
obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
+obj-$(CONFIG_RTC_DRV_POLARFIRE_SOC) += rtc-mpfs.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o
@@ -172,11 +174,11 @@ obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o
obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_TI_K3) += rtc-ti-k3.o
obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
-obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
obj-$(CONFIG_RTC_DRV_WILCO_EC) += rtc-wilco-ec.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 3c8eec2218df..e48223c00c67 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -36,7 +36,7 @@ static void rtc_device_release(struct device *dev)
cancel_work_sync(&rtc->irqwork);
- ida_simple_remove(&rtc_ida, rtc->id);
+ ida_free(&rtc_ida, rtc->id);
mutex_destroy(&rtc->ops_lock);
kfree(rtc);
}
@@ -262,7 +262,7 @@ static int rtc_device_get_id(struct device *dev)
}
if (id < 0)
- id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&rtc_ida, GFP_KERNEL);
return id;
}
@@ -368,7 +368,7 @@ struct rtc_device *devm_rtc_allocate_device(struct device *dev)
rtc = rtc_allocate_device();
if (!rtc) {
- ida_simple_remove(&rtc_ida, id);
+ ida_free(&rtc_ida, id);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 69325aeede1a..4aad9bb99868 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -96,7 +96,7 @@ static int clear_uie(struct rtc_device *rtc)
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
- flush_scheduled_work();
+ flush_work(&rtc->uie_task);
spin_lock_irq(&rtc->irq_lock);
}
rtc->uie_irq_active = 0;
@@ -566,9 +566,3 @@ void __init rtc_dev_init(void)
if (err < 0)
pr_err("failed to allocate char dev region\n");
}
-
-void __exit rtc_dev_exit(void)
-{
- if (rtc_devt)
- unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
-}
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 6e3e320dc727..f2b0971d2c65 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -817,8 +817,7 @@ static const struct regmap_config abb5zes3_rtc_regmap_config = {
.val_bits = 8,
};
-static int abb5zes3_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int abb5zes3_probe(struct i2c_client *client)
{
struct abb5zes3_rtc_data *data = NULL;
struct device *dev = &client->dev;
@@ -945,7 +944,7 @@ static struct i2c_driver abb5zes3_driver = {
.pm = &abb5zes3_rtc_pm_ops,
.of_match_table = of_match_ptr(abb5zes3_dt_match),
},
- .probe = abb5zes3_probe,
+ .probe_new = abb5zes3_probe,
.id_table = abb5zes3_id,
};
module_i2c_driver(abb5zes3_driver);
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index e188ab517f1e..2f8deb8c4cd3 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -495,8 +495,7 @@ static void abeoz9_hwmon_register(struct device *dev,
#endif
-static int abeoz9_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int abeoz9_probe(struct i2c_client *client)
{
struct abeoz9_rtc_data *data = NULL;
struct device *dev = &client->dev;
@@ -580,7 +579,7 @@ static struct i2c_driver abeoz9_driver = {
.name = "rtc-ab-eoz9",
.of_match_table = of_match_ptr(abeoz9_dt_match),
},
- .probe = abeoz9_probe,
+ .probe_new = abeoz9_probe,
.id_table = abeoz9_id,
};
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 2235c968842d..e0bbb11d912e 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -249,8 +249,7 @@ static void bq32k_sysfs_unregister(struct device *dev)
device_remove_file(dev, &dev_attr_trickle_charge_bypass);
}
-static int bq32k_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bq32k_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct rtc_device *rtc;
@@ -322,7 +321,7 @@ static struct i2c_driver bq32k_driver = {
.name = "bq32k",
.of_match_table = of_match_ptr(bq32k_of_match),
},
- .probe = bq32k_probe,
+ .probe_new = bq32k_probe,
.remove = bq32k_remove,
.id_table = bq32k_id,
};
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7c006c2b125f..bdb1df843c78 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1260,9 +1260,6 @@ static void use_acpi_alarm_quirks(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return;
-
if (!is_hpet_enabled())
return;
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 0abf98983e13..4b10a1b8f370 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -2,7 +2,6 @@
#ifdef CONFIG_RTC_INTF_DEV
extern void __init rtc_dev_init(void);
-extern void __exit rtc_dev_exit(void);
extern void rtc_dev_prepare(struct rtc_device *rtc);
#else
@@ -11,10 +10,6 @@ static inline void rtc_dev_init(void)
{
}
-static inline void rtc_dev_exit(void)
-{
-}
-
static inline void rtc_dev_prepare(struct rtc_device *rtc)
{
}
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 70626793ca69..887f5193e253 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -375,10 +375,8 @@ static int cros_ec_rtc_remove(struct platform_device *pdev)
ret = blocking_notifier_chain_unregister(
&cros_ec_rtc->cros_ec->event_notifier,
&cros_ec_rtc->notifier);
- if (ret) {
+ if (ret)
dev_err(dev, "failed to unregister notifier\n");
- return ret;
- }
return 0;
}
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 8db5a631bca8..b19de5100b1a 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -467,8 +467,7 @@ static const struct watchdog_ops ds1374_wdt_ops = {
*
*****************************************************************************
*/
-static int ds1374_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1374_probe(struct i2c_client *client)
{
struct ds1374 *ds1374;
int ret;
@@ -575,7 +574,7 @@ static struct i2c_driver ds1374_driver = {
.of_match_table = of_match_ptr(ds1374_of_match),
.pm = &ds1374_pm,
},
- .probe = ds1374_probe,
+ .probe_new = ds1374_probe,
.remove = ds1374_remove,
.id_table = ds1374_id,
};
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 4cd8efbef6cf..a3bb2cd9c881 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -106,8 +106,7 @@ static const struct rtc_class_ops ds1672_rtc_ops = {
.set_time = ds1672_set_time,
};
-static int ds1672_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1672_probe(struct i2c_client *client)
{
int err = 0;
struct rtc_device *rtc;
@@ -150,7 +149,7 @@ static struct i2c_driver ds1672_driver = {
.name = "rtc-ds1672",
.of_match_table = of_match_ptr(ds1672_of_match),
},
- .probe = &ds1672_probe,
+ .probe_new = ds1672_probe,
.id_table = ds1672_id,
};
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 168bc27f1f5a..dd31a60c1fc6 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -566,8 +566,7 @@ static const struct dev_pm_ops ds3232_pm_ops = {
#if IS_ENABLED(CONFIG_I2C)
-static int ds3232_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds3232_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
static const struct regmap_config config = {
@@ -604,7 +603,7 @@ static struct i2c_driver ds3232_driver = {
.of_match_table = of_match_ptr(ds3232_of_match),
.pm = &ds3232_pm_ops,
},
- .probe = ds3232_i2c_probe,
+ .probe_new = ds3232_i2c_probe,
.id_table = ds3232_id,
};
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index 9f176bce48ba..53f9f9391a5f 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -111,8 +111,7 @@ static const struct rtc_class_ops em3027_rtc_ops = {
.set_time = em3027_set_time,
};
-static int em3027_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int em3027_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
@@ -148,7 +147,7 @@ static struct i2c_driver em3027_driver = {
.name = "rtc-em3027",
.of_match_table = of_match_ptr(em3027_of_match),
},
- .probe = &em3027_probe,
+ .probe_new = em3027_probe,
.id_table = em3027_id,
};
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 677ec2da13d8..f59bb81f23c0 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -340,8 +340,7 @@ static const struct rtc_class_ops fm3130_rtc_ops = {
static struct i2c_driver fm3130_driver;
-static int fm3130_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int fm3130_probe(struct i2c_client *client)
{
struct fm3130 *fm3130;
int err = -ENODEV;
@@ -518,7 +517,7 @@ static struct i2c_driver fm3130_driver = {
.driver = {
.name = "rtc-fm3130",
},
- .probe = fm3130_probe,
+ .probe_new = fm3130_probe,
.id_table = fm3130_id,
};
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 90e602e99d03..cc710d682121 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -495,8 +495,7 @@ static int hym8563_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(hym8563_pm_ops, hym8563_suspend, hym8563_resume);
-static int hym8563_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int hym8563_probe(struct i2c_client *client)
{
struct hym8563 *hym8563;
int ret;
@@ -572,7 +571,7 @@ static struct i2c_driver hym8563_driver = {
.pm = &hym8563_pm_ops,
.of_match_table = hym8563_dt_idtable,
},
- .probe = hym8563_probe,
+ .probe_new = hym8563_probe,
.id_table = hym8563_id,
};
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 961bd5d1d109..79461ded1a48 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -232,8 +232,7 @@ static const struct rtc_class_ops isl12022_rtc_ops = {
.set_time = isl12022_rtc_set_time,
};
-static int isl12022_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int isl12022_probe(struct i2c_client *client)
{
struct isl12022 *isl12022;
@@ -275,7 +274,7 @@ static struct i2c_driver isl12022_driver = {
.of_match_table = of_match_ptr(isl12022_dt_match),
#endif
},
- .probe = isl12022_probe,
+ .probe_new = isl12022_probe,
.id_table = isl12022_id,
};
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 182dfa605515..f448a525333e 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -880,10 +880,14 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (rc)
return rc;
- if (client->irq > 0)
+ if (client->irq > 0) {
rc = isl1208_setup_irq(client, client->irq);
- if (rc)
- return rc;
+ if (rc)
+ return rc;
+
+ } else {
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, isl1208->rtc->features);
+ }
if (evdet_irq > 0 && evdet_irq != client->irq)
rc = isl1208_setup_irq(client, evdet_irq);
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 4beadfa41644..0a33851cc51f 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -197,8 +197,7 @@ static const struct rtc_class_ops max6900_rtc_ops = {
.set_time = max6900_rtc_set_time,
};
-static int
-max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int max6900_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
@@ -225,7 +224,7 @@ static struct i2c_driver max6900_driver = {
.driver = {
.name = "rtc-max6900",
},
- .probe = max6900_probe,
+ .probe_new = max6900_probe,
.id_table = max6900_id,
};
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 522449b25921..f1c09f1db044 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -21,13 +21,13 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
unsigned long flags;
unsigned char seconds;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 100; i++) {
spin_lock_irqsave(&rtc_lock, flags);
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
- * every msec for completion.
+ * every 100 usec for completion.
*
* Store the second value before checking UIP so a long lasting
* NMI which happens to hit after the UIP check cannot make
@@ -37,7 +37,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
+ udelay(100);
continue;
}
@@ -56,7 +56,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
*/
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
+ udelay(100);
continue;
}
diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c
new file mode 100644
index 000000000000..f14d1925e0c9
--- /dev/null
+++ b/drivers/rtc/rtc-mpfs.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip MPFS RTC driver
+ *
+ * Copyright (c) 2021-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ * & Conor Dooley <conor.dooley@microchip.com>
+ */
+#include "linux/bits.h"
+#include "linux/iopoll.h"
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+
+#define CONTROL_REG 0x00
+#define MODE_REG 0x04
+#define PRESCALER_REG 0x08
+#define ALARM_LOWER_REG 0x0c
+#define ALARM_UPPER_REG 0x10
+#define COMPARE_LOWER_REG 0x14
+#define COMPARE_UPPER_REG 0x18
+#define DATETIME_LOWER_REG 0x20
+#define DATETIME_UPPER_REG 0x24
+
+#define CONTROL_RUNNING_BIT BIT(0)
+#define CONTROL_START_BIT BIT(0)
+#define CONTROL_STOP_BIT BIT(1)
+#define CONTROL_ALARM_ON_BIT BIT(2)
+#define CONTROL_ALARM_OFF_BIT BIT(3)
+#define CONTROL_RESET_BIT BIT(4)
+#define CONTROL_UPLOAD_BIT BIT(5)
+#define CONTROL_DOWNLOAD_BIT BIT(6)
+#define CONTROL_MATCH_BIT BIT(7)
+#define CONTROL_WAKEUP_CLR_BIT BIT(8)
+#define CONTROL_WAKEUP_SET_BIT BIT(9)
+#define CONTROL_UPDATED_BIT BIT(10)
+
+#define MODE_CLOCK_CALENDAR BIT(0)
+#define MODE_WAKE_EN BIT(1)
+#define MODE_WAKE_RESET BIT(2)
+#define MODE_WAKE_CONTINUE BIT(3)
+
+#define MAX_PRESCALER_COUNT GENMASK(25, 0)
+#define DATETIME_UPPER_MASK GENMASK(29, 0)
+#define ALARM_UPPER_MASK GENMASK(10, 0)
+
+#define UPLOAD_TIMEOUT_US 50
+
+struct mpfs_rtc_dev {
+ struct rtc_device *rtc;
+ void __iomem *base;
+};
+
+static void mpfs_rtc_start(struct mpfs_rtc_dev *rtcdev)
+{
+ u32 ctrl;
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_START_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+}
+
+static void mpfs_rtc_clear_irq(struct mpfs_rtc_dev *rtcdev)
+{
+ u32 val = readl(rtcdev->base + CONTROL_REG);
+
+ val &= ~(CONTROL_ALARM_ON_BIT | CONTROL_STOP_BIT);
+ val |= CONTROL_ALARM_OFF_BIT;
+ writel(val, rtcdev->base + CONTROL_REG);
+ /*
+ * Ensure that the posted write to the CONTROL_REG register completed before
+ * returning from this function. Not doing this may result in the interrupt
+ * only being cleared some time after this function returns.
+ */
+ (void)readl(rtcdev->base + CONTROL_REG);
+}
+
+static int mpfs_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u64 time;
+
+ time = readl(rtcdev->base + DATETIME_LOWER_REG);
+ time |= ((u64)readl(rtcdev->base + DATETIME_UPPER_REG) & DATETIME_UPPER_MASK) << 32;
+ rtc_time64_to_tm(time, tm);
+
+ return 0;
+}
+
+static int mpfs_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 ctrl, prog;
+ u64 time;
+ int ret;
+
+ time = rtc_tm_to_time64(tm);
+
+ writel((u32)time, rtcdev->base + DATETIME_LOWER_REG);
+ writel((u32)(time >> 32) & DATETIME_UPPER_MASK, rtcdev->base + DATETIME_UPPER_REG);
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_UPLOAD_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ ret = read_poll_timeout(readl, prog, prog & CONTROL_UPLOAD_BIT, 0, UPLOAD_TIMEOUT_US,
+ false, rtcdev->base + CONTROL_REG);
+ if (ret) {
+ dev_err(dev, "timed out uploading time to rtc");
+ return ret;
+ }
+ mpfs_rtc_start(rtcdev);
+
+ return 0;
+}
+
+static int mpfs_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 mode = readl(rtcdev->base + MODE_REG);
+ u64 time;
+
+ alrm->enabled = mode & MODE_WAKE_EN;
+
+ time = (u64)readl(rtcdev->base + ALARM_LOWER_REG) << 32;
+ time |= (readl(rtcdev->base + ALARM_UPPER_REG) & ALARM_UPPER_MASK);
+ rtc_time64_to_tm(time, &alrm->time);
+
+ return 0;
+}
+
+static int mpfs_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 mode, ctrl;
+ u64 time;
+
+ /* Disable the alarm before updating */
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl |= CONTROL_ALARM_OFF_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ time = rtc_tm_to_time64(&alrm->time);
+
+ writel((u32)time, rtcdev->base + ALARM_LOWER_REG);
+ writel((u32)(time >> 32) & ALARM_UPPER_MASK, rtcdev->base + ALARM_UPPER_REG);
+
+ /* Bypass compare register in alarm mode */
+ writel(GENMASK(31, 0), rtcdev->base + COMPARE_LOWER_REG);
+ writel(GENMASK(29, 0), rtcdev->base + COMPARE_UPPER_REG);
+
+ /* Configure the RTC to enable the alarm. */
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ mode = readl(rtcdev->base + MODE_REG);
+ if (alrm->enabled) {
+ mode = MODE_WAKE_EN | MODE_WAKE_CONTINUE;
+ /* Enable the alarm */
+ ctrl &= ~CONTROL_ALARM_OFF_BIT;
+ ctrl |= CONTROL_ALARM_ON_BIT;
+ }
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_START_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+ writel(mode, rtcdev->base + MODE_REG);
+
+ return 0;
+}
+
+static int mpfs_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~(CONTROL_ALARM_ON_BIT | CONTROL_ALARM_OFF_BIT | CONTROL_STOP_BIT);
+
+ if (enabled)
+ ctrl |= CONTROL_ALARM_ON_BIT;
+ else
+ ctrl |= CONTROL_ALARM_OFF_BIT;
+
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ return 0;
+}
+
+static inline struct clk *mpfs_rtc_init_clk(struct device *dev)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, "rtc");
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk);
+ return clk;
+}
+
+static irqreturn_t mpfs_rtc_wakeup_irq_handler(int irq, void *dev)
+{
+ struct mpfs_rtc_dev *rtcdev = dev;
+
+ mpfs_rtc_clear_irq(rtcdev);
+
+ rtc_update_irq(rtcdev->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops mpfs_rtc_ops = {
+ .read_time = mpfs_rtc_readtime,
+ .set_time = mpfs_rtc_settime,
+ .read_alarm = mpfs_rtc_readalarm,
+ .set_alarm = mpfs_rtc_setalarm,
+ .alarm_irq_enable = mpfs_rtc_alarm_irq_enable,
+};
+
+static int mpfs_rtc_probe(struct platform_device *pdev)
+{
+ struct mpfs_rtc_dev *rtcdev;
+ struct clk *clk;
+ u32 prescaler;
+ int wakeup_irq, ret;
+
+ rtcdev = devm_kzalloc(&pdev->dev, sizeof(struct mpfs_rtc_dev), GFP_KERNEL);
+ if (!rtcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rtcdev);
+
+ rtcdev->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtcdev->rtc))
+ return PTR_ERR(rtcdev->rtc);
+
+ rtcdev->rtc->ops = &mpfs_rtc_ops;
+
+ /* range is capped by alarm max, lower reg is 31:0 & upper is 10:0 */
+ rtcdev->rtc->range_max = GENMASK_ULL(42, 0);
+
+ clk = mpfs_rtc_init_clk(&pdev->dev);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rtcdev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtcdev->base)) {
+ dev_dbg(&pdev->dev, "invalid ioremap resources\n");
+ return PTR_ERR(rtcdev->base);
+ }
+
+ wakeup_irq = platform_get_irq(pdev, 0);
+ if (wakeup_irq <= 0) {
+ dev_dbg(&pdev->dev, "could not get wakeup irq\n");
+ return wakeup_irq;
+ }
+ ret = devm_request_irq(&pdev->dev, wakeup_irq, mpfs_rtc_wakeup_irq_handler, 0,
+ dev_name(&pdev->dev), rtcdev);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not request wakeup irq\n");
+ return ret;
+ }
+
+ /* prescaler hardware adds 1 to reg value */
+ prescaler = clk_get_rate(devm_clk_get(&pdev->dev, "rtcref")) - 1;
+
+ if (prescaler > MAX_PRESCALER_COUNT) {
+ dev_dbg(&pdev->dev, "invalid prescaler %d\n", prescaler);
+ return -EINVAL;
+ }
+
+ writel(prescaler, rtcdev->base + PRESCALER_REG);
+ dev_info(&pdev->dev, "prescaler set to: 0x%X \r\n", prescaler);
+
+ device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to enable irq wake\n");
+
+ return devm_rtc_register_device(rtcdev->rtc);
+}
+
+static int mpfs_rtc_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id mpfs_rtc_of_match[] = {
+ { .compatible = "microchip,mpfs-rtc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mpfs_rtc_of_match);
+
+static struct platform_driver mpfs_rtc_driver = {
+ .probe = mpfs_rtc_probe,
+ .remove = mpfs_rtc_remove,
+ .driver = {
+ .name = "mpfs_rtc",
+ .of_match_table = mpfs_rtc_of_match,
+ },
+};
+
+module_platform_driver(mpfs_rtc_driver);
+
+MODULE_DESCRIPTION("Real time clock for Microchip Polarfire SoC");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
new file mode 100644
index 000000000000..d43acd3920ed
--- /dev/null
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -0,0 +1,553 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Nuvoton Technology Corporation
+
+#include <linux/bcd.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define NCT3018Y_REG_SC 0x00 /* seconds */
+#define NCT3018Y_REG_SCA 0x01 /* alarm */
+#define NCT3018Y_REG_MN 0x02
+#define NCT3018Y_REG_MNA 0x03 /* alarm */
+#define NCT3018Y_REG_HR 0x04
+#define NCT3018Y_REG_HRA 0x05 /* alarm */
+#define NCT3018Y_REG_DW 0x06
+#define NCT3018Y_REG_DM 0x07
+#define NCT3018Y_REG_MO 0x08
+#define NCT3018Y_REG_YR 0x09
+#define NCT3018Y_REG_CTRL 0x0A /* timer control */
+#define NCT3018Y_REG_ST 0x0B /* status */
+#define NCT3018Y_REG_CLKO 0x0C /* clock out */
+
+#define NCT3018Y_BIT_AF BIT(7)
+#define NCT3018Y_BIT_ST BIT(7)
+#define NCT3018Y_BIT_DM BIT(6)
+#define NCT3018Y_BIT_HF BIT(5)
+#define NCT3018Y_BIT_DSM BIT(4)
+#define NCT3018Y_BIT_AIE BIT(3)
+#define NCT3018Y_BIT_OFIE BIT(2)
+#define NCT3018Y_BIT_CIE BIT(1)
+#define NCT3018Y_BIT_TWO BIT(0)
+
+#define NCT3018Y_REG_BAT_MASK 0x07
+#define NCT3018Y_REG_CLKO_F_MASK 0x03 /* frequenc mask */
+#define NCT3018Y_REG_CLKO_CKE 0x80 /* clock out enabled */
+
+struct nct3018y {
+ struct rtc_device *rtc;
+ struct i2c_client *client;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
+};
+
+static int nct3018y_set_alarm_mode(struct i2c_client *client, bool on)
+{
+ int err, flags;
+
+ dev_dbg(&client->dev, "%s:on:%d\n", __func__, on);
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0) {
+ dev_dbg(&client->dev,
+ "Failed to read NCT3018Y_REG_CTRL\n");
+ return flags;
+ }
+
+ if (on)
+ flags |= NCT3018Y_BIT_AIE;
+ else
+ flags &= ~NCT3018Y_BIT_AIE;
+
+ flags |= NCT3018Y_BIT_CIE;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
+ return err;
+ }
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (flags < 0) {
+ dev_dbg(&client->dev,
+ "Failed to read NCT3018Y_REG_ST\n");
+ return flags;
+ }
+
+ flags &= ~(NCT3018Y_BIT_AF);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_ST\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *alarm_enable,
+ unsigned char *alarm_flag)
+{
+ int flags;
+
+ if (alarm_enable) {
+ dev_dbg(&client->dev, "%s:NCT3018Y_REG_CTRL\n", __func__);
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0)
+ return flags;
+ *alarm_enable = flags & NCT3018Y_BIT_AIE;
+ }
+
+ if (alarm_flag) {
+ dev_dbg(&client->dev, "%s:NCT3018Y_REG_ST\n", __func__);
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (flags < 0)
+ return flags;
+ *alarm_flag = flags & NCT3018Y_BIT_AF;
+ }
+
+ dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
+ __func__, *alarm_enable, *alarm_flag);
+
+ return 0;
+}
+
+static irqreturn_t nct3018y_irq(int irq, void *dev_id)
+{
+ struct nct3018y *nct3018y = i2c_get_clientdata(dev_id);
+ struct i2c_client *client = nct3018y->client;
+ int err;
+ unsigned char alarm_flag;
+ unsigned char alarm_enable;
+
+ dev_dbg(&client->dev, "%s:irq:%d\n", __func__, irq);
+ err = nct3018y_get_alarm_mode(nct3018y->client, &alarm_enable, &alarm_flag);
+ if (err)
+ return IRQ_NONE;
+
+ if (alarm_flag) {
+ dev_dbg(&client->dev, "%s:alarm flag:%x\n",
+ __func__, alarm_flag);
+ rtc_update_irq(nct3018y->rtc, 1, RTC_IRQF | RTC_AF);
+ nct3018y_set_alarm_mode(nct3018y->client, 0);
+ dev_dbg(&client->dev, "%s:IRQ_HANDLED\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/*
+ * In the routines that deal directly with the nct3018y hardware, we use
+ * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
+ */
+static int nct3018y_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[10];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_ST, 1, buf);
+ if (err < 0)
+ return err;
+
+ if (!buf[0]) {
+ dev_dbg(&client->dev, " voltage <=1.7, date/time is not reliable.\n");
+ return -EINVAL;
+ }
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SC, sizeof(buf), buf);
+ if (err < 0)
+ return err;
+
+ tm->tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->tm_min = bcd2bin(buf[2] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[4] & 0x3F);
+ tm->tm_wday = buf[6] & 0x07;
+ tm->tm_mday = bcd2bin(buf[7] & 0x3F);
+ tm->tm_mon = bcd2bin(buf[8] & 0x1F) - 1;
+ tm->tm_year = bcd2bin(buf[9]) + 100;
+
+ return 0;
+}
+
+static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[4] = {0};
+ int err;
+
+ buf[0] = bin2bcd(tm->tm_sec);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SC, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SC\n");
+ return err;
+ }
+
+ buf[0] = bin2bcd(tm->tm_min);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MN, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MN\n");
+ return err;
+ }
+
+ buf[0] = bin2bcd(tm->tm_hour);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HR, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HR\n");
+ return err;
+ }
+
+ buf[0] = tm->tm_wday & 0x07;
+ buf[1] = bin2bcd(tm->tm_mday);
+ buf[2] = bin2bcd(tm->tm_mon + 1);
+ buf[3] = bin2bcd(tm->tm_year - 100);
+ err = i2c_smbus_write_i2c_block_data(client, NCT3018Y_REG_DW,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write for day and mon and year\n");
+ return -EIO;
+ }
+
+ return err;
+}
+
+static int nct3018y_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[5];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SCA,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to read date\n");
+ return -EIO;
+ }
+
+ dev_dbg(&client->dev, "%s: raw data is sec=%02x, min=%02x hr=%02x\n",
+ __func__, buf[0], buf[2], buf[4]);
+
+ tm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->time.tm_min = bcd2bin(buf[2] & 0x7F);
+ tm->time.tm_hour = bcd2bin(buf[4] & 0x3F);
+
+ err = nct3018y_get_alarm_mode(client, &tm->enabled, &tm->pending);
+ if (err < 0)
+ return err;
+
+ dev_dbg(&client->dev, "%s:s=%d m=%d, hr=%d, enabled=%d, pending=%d\n",
+ __func__, tm->time.tm_sec, tm->time.tm_min,
+ tm->time.tm_hour, tm->enabled, tm->pending);
+
+ return 0;
+}
+
+static int nct3018y_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
+
+ dev_dbg(dev, "%s, sec=%d, min=%d hour=%d tm->enabled:%d\n",
+ __func__, tm->time.tm_sec, tm->time.tm_min, tm->time.tm_hour,
+ tm->enabled);
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SCA, bin2bcd(tm->time.tm_sec));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SCA\n");
+ return err;
+ }
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MNA, bin2bcd(tm->time.tm_min));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MNA\n");
+ return err;
+ }
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HRA, bin2bcd(tm->time.tm_hour));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HRA\n");
+ return err;
+ }
+
+ return nct3018y_set_alarm_mode(client, tm->enabled);
+}
+
+static int nct3018y_irq_enable(struct device *dev, unsigned int enabled)
+{
+ dev_dbg(dev, "%s: alarm enable=%d\n", __func__, enabled);
+
+ return nct3018y_set_alarm_mode(to_i2c_client(dev), enabled);
+}
+
+static int nct3018y_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int status, flags = 0;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ status = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (status < 0)
+ return status;
+
+ if (!(status & NCT3018Y_REG_BAT_MASK))
+ flags |= RTC_VL_DATA_INVALID;
+
+ return put_user(flags, (unsigned int __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMMON_CLK
+/*
+ * Handling of the clkout
+ */
+
+#define clkout_hw_to_nct3018y(_hw) container_of(_hw, struct nct3018y, clkout_hw)
+
+static const int clkout_rates[] = {
+ 32768,
+ 1024,
+ 32,
+ 1,
+};
+
+static unsigned long nct3018y_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return 0;
+
+ flags &= NCT3018Y_REG_CLKO_F_MASK;
+ return clkout_rates[flags];
+}
+
+static long nct3018y_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int nct3018y_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int i, flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] == rate) {
+ flags &= ~NCT3018Y_REG_CLKO_F_MASK;
+ flags |= i;
+ return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
+ }
+
+ return -EINVAL;
+}
+
+static int nct3018y_clkout_control(struct clk_hw *hw, bool enable)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ if (enable)
+ flags |= NCT3018Y_REG_CLKO_CKE;
+ else
+ flags &= ~NCT3018Y_REG_CLKO_CKE;
+
+ return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
+}
+
+static int nct3018y_clkout_prepare(struct clk_hw *hw)
+{
+ return nct3018y_clkout_control(hw, 1);
+}
+
+static void nct3018y_clkout_unprepare(struct clk_hw *hw)
+{
+ nct3018y_clkout_control(hw, 0);
+}
+
+static int nct3018y_clkout_is_prepared(struct clk_hw *hw)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ return flags & NCT3018Y_REG_CLKO_CKE;
+}
+
+static const struct clk_ops nct3018y_clkout_ops = {
+ .prepare = nct3018y_clkout_prepare,
+ .unprepare = nct3018y_clkout_unprepare,
+ .is_prepared = nct3018y_clkout_is_prepared,
+ .recalc_rate = nct3018y_clkout_recalc_rate,
+ .round_rate = nct3018y_clkout_round_rate,
+ .set_rate = nct3018y_clkout_set_rate,
+};
+
+static struct clk *nct3018y_clkout_register_clk(struct nct3018y *nct3018y)
+{
+ struct i2c_client *client = nct3018y->client;
+ struct device_node *node = client->dev.of_node;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ init.name = "nct3018y-clkout";
+ init.ops = &nct3018y_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ nct3018y->clkout_hw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = devm_clk_register(&client->dev, &nct3018y->clkout_hw);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return clk;
+}
+#endif
+
+static const struct rtc_class_ops nct3018y_rtc_ops = {
+ .read_time = nct3018y_rtc_read_time,
+ .set_time = nct3018y_rtc_set_time,
+ .read_alarm = nct3018y_rtc_read_alarm,
+ .set_alarm = nct3018y_rtc_set_alarm,
+ .alarm_irq_enable = nct3018y_irq_enable,
+ .ioctl = nct3018y_ioctl,
+};
+
+static int nct3018y_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct nct3018y *nct3018y;
+ int err, flags;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ nct3018y = devm_kzalloc(&client->dev, sizeof(struct nct3018y),
+ GFP_KERNEL);
+ if (!nct3018y)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, nct3018y);
+ nct3018y->client = client;
+ device_set_wakeup_capable(&client->dev, 1);
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0) {
+ dev_dbg(&client->dev, "%s: read error\n", __func__);
+ return flags;
+ } else if (flags & NCT3018Y_BIT_TWO) {
+ dev_dbg(&client->dev, "%s: NCT3018Y_BIT_TWO is set\n", __func__);
+ }
+
+ flags = NCT3018Y_BIT_TWO;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
+ return err;
+ }
+
+ flags = 0;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "%s: write error\n", __func__);
+ return err;
+ }
+
+ nct3018y->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(nct3018y->rtc))
+ return PTR_ERR(nct3018y->rtc);
+
+ nct3018y->rtc->ops = &nct3018y_rtc_ops;
+ nct3018y->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ nct3018y->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ if (client->irq > 0) {
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, nct3018y_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ "nct3018y", client);
+ if (err) {
+ dev_dbg(&client->dev, "unable to request IRQ %d\n", client->irq);
+ return err;
+ }
+ } else {
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, nct3018y->rtc->features);
+ clear_bit(RTC_FEATURE_ALARM, nct3018y->rtc->features);
+ }
+
+#ifdef CONFIG_COMMON_CLK
+ /* register clk in common clk framework */
+ nct3018y_clkout_register_clk(nct3018y);
+#endif
+
+ return devm_rtc_register_device(nct3018y->rtc);
+}
+
+static const struct i2c_device_id nct3018y_id[] = {
+ { "nct3018y", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nct3018y_id);
+
+static const struct of_device_id nct3018y_of_match[] = {
+ { .compatible = "nuvoton,nct3018y" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, nct3018y_of_match);
+
+static struct i2c_driver nct3018y_driver = {
+ .driver = {
+ .name = "rtc-nct3018y",
+ .of_match_table = of_match_ptr(nct3018y_of_match),
+ },
+ .probe = nct3018y_probe,
+ .id_table = nct3018y_id,
+};
+
+module_i2c_driver(nct3018y_driver);
+
+MODULE_AUTHOR("Medad CChien <ctcchien@nuvoton.com>");
+MODULE_AUTHOR("Mia Lin <mimi05633@gmail.com>");
+MODULE_DESCRIPTION("Nuvoton NCT3018Y RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index b1b1943de844..6174b3fd4b98 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -390,8 +390,7 @@ static const struct regmap_config regmap_config = {
.max_register = 0x13,
};
-static int pcf8523_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8523_probe(struct i2c_client *client)
{
struct pcf8523 *pcf8523;
struct rtc_device *rtc;
@@ -485,7 +484,7 @@ static struct i2c_driver pcf8523_driver = {
.name = "rtc-pcf8523",
.of_match_table = pcf8523_of_match,
},
- .probe = pcf8523_probe,
+ .probe_new = pcf8523_probe,
.id_table = pcf8523_id,
};
module_i2c_driver(pcf8523_driver);
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index bb3e9ba75f6c..c05b722f0060 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -350,8 +350,7 @@ static const struct pcf85x63_config pcf_85363_config = {
.num_nvram = 2
};
-static int pcf85363_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf85363_probe(struct i2c_client *client)
{
struct pcf85363 *pcf85363;
const struct pcf85x63_config *config = &pcf_85363_config;
@@ -436,7 +435,7 @@ static struct i2c_driver pcf85363_driver = {
.name = "pcf85363",
.of_match_table = of_match_ptr(dev_ids),
},
- .probe = pcf85363_probe,
+ .probe_new = pcf85363_probe,
};
module_i2c_driver(pcf85363_driver);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 9d06813e2e6d..11fa9788558b 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -509,8 +509,7 @@ static const struct rtc_class_ops pcf8563_rtc_ops = {
.alarm_irq_enable = pcf8563_irq_enable,
};
-static int pcf8563_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8563_probe(struct i2c_client *client)
{
struct pcf8563 *pcf8563;
int err;
@@ -606,7 +605,7 @@ static struct i2c_driver pcf8563_driver = {
.name = "rtc-pcf8563",
.of_match_table = of_match_ptr(pcf8563_of_match),
},
- .probe = pcf8563_probe,
+ .probe_new = pcf8563_probe,
.id_table = pcf8563_id,
};
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index c80ca20e5d8d..87074d178274 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -275,8 +275,7 @@ static const struct rtc_class_ops pcf8583_rtc_ops = {
.set_time = pcf8583_rtc_set_time,
};
-static int pcf8583_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8583_probe(struct i2c_client *client)
{
struct pcf8583 *pcf8583;
@@ -307,7 +306,7 @@ static struct i2c_driver pcf8583_driver = {
.driver = {
.name = "pcf8583",
},
- .probe = pcf8583_probe,
+ .probe_new = pcf8583_probe,
.id_table = pcf8583_id,
};
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 8cb84c9595fc..eb483a30bd92 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -784,8 +784,7 @@ static const struct regmap_config config = {
#if IS_ENABLED(CONFIG_I2C)
-static int rv3029_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rv3029_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
@@ -819,7 +818,7 @@ static struct i2c_driver rv3029_driver = {
.name = "rv3029",
.of_match_table = of_match_ptr(rv3029_of_match),
},
- .probe = rv3029_i2c_probe,
+ .probe_new = rv3029_i2c_probe,
.id_table = rv3029_id,
};
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index f69e0b1137cd..3527a0521e9b 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -9,6 +9,7 @@
#include <linux/bcd.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/log2.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
#define RV8803_EXT 0x0D
#define RV8803_FLAG 0x0E
#define RV8803_CTRL 0x0F
+#define RV8803_OSC_OFFSET 0x2C
#define RV8803_EXT_WADA BIT(6)
@@ -49,12 +51,15 @@
#define RV8803_CTRL_TIE BIT(4)
#define RV8803_CTRL_UIE BIT(5)
+#define RX8803_CTRL_CSEL GENMASK(7, 6)
+
#define RX8900_BACKUP_CTRL 0x18
#define RX8900_FLAG_SWOFF BIT(2)
#define RX8900_FLAG_VDETOFF BIT(3)
enum rv8803_type {
rv_8803,
+ rx_8803,
rx_8804,
rx_8900
};
@@ -64,6 +69,7 @@ struct rv8803_data {
struct rtc_device *rtc;
struct mutex flags_lock;
u8 ctrl;
+ u8 backup;
enum rv8803_type type;
};
@@ -136,6 +142,44 @@ static int rv8803_write_regs(const struct i2c_client *client,
return ret;
}
+static int rv8803_regs_init(struct rv8803_data *rv8803)
+{
+ int ret;
+
+ ret = rv8803_write_reg(rv8803->client, RV8803_OSC_OFFSET, 0x00);
+ if (ret)
+ return ret;
+
+ ret = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ FIELD_PREP(RX8803_CTRL_CSEL, 1)); /* 2s */
+ if (ret)
+ return ret;
+
+ ret = rv8803_write_regs(rv8803->client, RV8803_ALARM_MIN, 3,
+ (u8[]){ 0, 0, 0 });
+ if (ret)
+ return ret;
+
+ return rv8803_write_reg(rv8803->client, RV8803_RAM, 0x00);
+}
+
+static int rv8803_regs_configure(struct rv8803_data *rv8803);
+
+static int rv8803_regs_reset(struct rv8803_data *rv8803)
+{
+ /*
+ * The RV-8803 resets all registers to POR defaults after voltage-loss,
+ * the Epson RTCs don't, so we manually reset the remainder here.
+ */
+ if (rv8803->type == rx_8803 || rv8803->type == rx_8900) {
+ int ret = rv8803_regs_init(rv8803);
+ if (ret)
+ return ret;
+ }
+
+ return rv8803_regs_configure(rv8803);
+}
+
static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
@@ -269,6 +313,14 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
return flags;
}
+ if (flags & RV8803_FLAG_V2F) {
+ ret = rv8803_regs_reset(rv8803);
+ if (ret) {
+ mutex_unlock(&rv8803->flags_lock);
+ return ret;
+ }
+ }
+
ret = rv8803_write_reg(rv8803->client, RV8803_FLAG,
flags & ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F));
@@ -498,18 +550,32 @@ static int rx8900_trickle_charger_init(struct rv8803_data *rv8803)
if (err < 0)
return err;
- flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err;
-
- if (of_property_read_bool(node, "epson,vdet-disable"))
- flags |= RX8900_FLAG_VDETOFF;
-
- if (of_property_read_bool(node, "trickle-diode-disable"))
- flags |= RX8900_FLAG_SWOFF;
+ flags = (u8)err;
+ flags &= ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF);
+ flags |= rv8803->backup;
return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL,
flags);
}
+/* configure registers with values different than the Power-On reset defaults */
+static int rv8803_regs_configure(struct rv8803_data *rv8803)
+{
+ int err;
+
+ err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
+ if (err)
+ return err;
+
+ err = rx8900_trickle_charger_init(rv8803);
+ if (err) {
+ dev_err(&rv8803->client->dev, "failed to init charger\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int rv8803_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -576,15 +642,15 @@ static int rv8803_probe(struct i2c_client *client,
if (!client->irq)
clear_bit(RTC_FEATURE_ALARM, rv8803->rtc->features);
- err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
- if (err)
- return err;
+ if (of_property_read_bool(client->dev.of_node, "epson,vdet-disable"))
+ rv8803->backup |= RX8900_FLAG_VDETOFF;
- err = rx8900_trickle_charger_init(rv8803);
- if (err) {
- dev_err(&client->dev, "failed to init charger\n");
+ if (of_property_read_bool(client->dev.of_node, "trickle-diode-disable"))
+ rv8803->backup |= RX8900_FLAG_SWOFF;
+
+ err = rv8803_regs_configure(rv8803);
+ if (err)
return err;
- }
rv8803->rtc->ops = &rv8803_rtc_ops;
rv8803->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
@@ -603,7 +669,7 @@ static int rv8803_probe(struct i2c_client *client,
static const struct i2c_device_id rv8803_id[] = {
{ "rv8803", rv_8803 },
{ "rv8804", rx_8804 },
- { "rx8803", rv_8803 },
+ { "rx8803", rx_8803 },
{ "rx8900", rx_8900 },
{ }
};
@@ -616,7 +682,7 @@ static const __maybe_unused struct of_device_id rv8803_of_match[] = {
},
{
.compatible = "epson,rx8803",
- .data = (void *)rv_8803
+ .data = (void *)rx_8803
},
{
.compatible = "epson,rx8804",
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 758fd6e11a15..cc634558b928 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -419,8 +419,7 @@ static struct regmap_config regmap_i2c_config = {
.read_flag_mask = 0x80,
};
-static int rx6110_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx6110_i2c_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct rx6110_data *rx6110;
@@ -464,7 +463,7 @@ static struct i2c_driver rx6110_i2c_driver = {
.name = RX6110_DRIVER_NAME,
.acpi_match_table = rx6110_i2c_acpi_match,
},
- .probe = rx6110_i2c_probe,
+ .probe_new = rx6110_i2c_probe,
.id_table = rx6110_i2c_id,
};
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index b32117ccd74b..dde86f3e2a4b 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -55,6 +55,8 @@
#define RX8025_BIT_CTRL2_XST BIT(5)
#define RX8025_BIT_CTRL2_VDET BIT(6)
+#define RX8035_BIT_HOUR_1224 BIT(7)
+
/* Clock precision adjustment */
#define RX8025_ADJ_RESOLUTION 3050 /* in ppb */
#define RX8025_ADJ_DATA_MAX 62
@@ -78,6 +80,7 @@ struct rx8025_data {
struct rtc_device *rtc;
enum rx_model model;
u8 ctrl1;
+ int is_24;
};
static s32 rx8025_read_reg(const struct i2c_client *client, u8 number)
@@ -226,7 +229,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
dt->tm_sec = bcd2bin(date[RX8025_REG_SEC] & 0x7f);
dt->tm_min = bcd2bin(date[RX8025_REG_MIN] & 0x7f);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x3f);
else
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x1f) % 12
@@ -254,7 +257,7 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
*/
date[RX8025_REG_SEC] = bin2bcd(dt->tm_sec);
date[RX8025_REG_MIN] = bin2bcd(dt->tm_min);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
date[RX8025_REG_HOUR] = bin2bcd(dt->tm_hour);
else
date[RX8025_REG_HOUR] = (dt->tm_hour >= 12 ? 0x20 : 0)
@@ -279,6 +282,7 @@ static int rx8025_init_client(struct i2c_client *client)
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
u8 ctrl[2], ctrl2;
int need_clear = 0;
+ int hour_reg;
int err;
err = rx8025_read_regs(client, RX8025_REG_CTRL1, 2, ctrl);
@@ -303,6 +307,16 @@ static int rx8025_init_client(struct i2c_client *client)
err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2);
}
+
+ if (rx8025->model == model_rx_8035) {
+ /* In RX-8035, 12/24 flag is in the hour register */
+ hour_reg = rx8025_read_reg(client, RX8025_REG_HOUR);
+ if (hour_reg < 0)
+ return hour_reg;
+ rx8025->is_24 = (hour_reg & RX8035_BIT_HOUR_1224);
+ } else {
+ rx8025->is_24 = (ctrl[1] & RX8025_BIT_CTRL1_1224);
+ }
out:
return err;
}
@@ -329,7 +343,7 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
/* Hardware alarms precision is 1 minute! */
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(ald[0] & 0x7f);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
t->time.tm_hour = bcd2bin(ald[1] & 0x3f);
else
t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
@@ -350,7 +364,7 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
int err;
ald[0] = bin2bcd(t->time.tm_min);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
ald[1] = bin2bcd(t->time.tm_hour);
else
ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0)
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index aed4898a0ff4..14edb7534c97 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -248,8 +248,7 @@ static const struct rx85x1_config rx8571_config = {
.num_nvram = 2
};
-static int rx8581_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx8581_probe(struct i2c_client *client)
{
struct rx8581 *rx8581;
const struct rx85x1_config *config = &rx8581_config;
@@ -326,7 +325,7 @@ static struct i2c_driver rx8581_driver = {
.name = "rtc-rx8581",
.of_match_table = of_match_ptr(rx8581_of_match),
},
- .probe = rx8581_probe,
+ .probe_new = rx8581_probe,
.id_table = rx8581_id,
};
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 26278c770731..81d97b1d3159 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -420,8 +420,7 @@ static const struct rtc_class_ops s35390a_rtc_ops = {
.ioctl = s35390a_rtc_ioctl,
};
-static int s35390a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int s35390a_probe(struct i2c_client *client)
{
int err, err_read;
unsigned int i;
@@ -502,7 +501,7 @@ static struct i2c_driver s35390a_driver = {
.name = "rtc-s35390a",
.of_match_table = of_match_ptr(s35390a_of_match),
},
- .probe = s35390a_probe,
+ .probe_new = s35390a_probe,
.id_table = s35390a_id,
};
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index 24e8528e23ec..e2f90d768ca8 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -163,8 +163,7 @@ static const struct regmap_config regmap_config = {
.max_register = 0x11,
};
-static int sd3078_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sd3078_probe(struct i2c_client *client)
{
int ret;
struct sd3078 *sd3078;
@@ -218,7 +217,7 @@ static struct i2c_driver sd3078_driver = {
.name = "sd3078",
.of_match_table = of_match_ptr(rtc_dt_match),
},
- .probe = sd3078_probe,
+ .probe_new = sd3078_probe,
.id_table = sd3078_id,
};
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index d4777b01ab22..736fe535cd45 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -388,7 +388,7 @@ static int spear_rtc_probe(struct platform_device *pdev)
config->rtc->ops = &spear_rtc_ops;
config->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
- config->rtc->range_min = RTC_TIMESTAMP_END_9999;
+ config->rtc->range_max = RTC_TIMESTAMP_END_9999;
status = devm_rtc_register_device(config->rtc);
if (status)
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 57540727ce1c..ed5516089e9a 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -875,6 +875,8 @@ static const struct of_device_id sun6i_rtc_dt_ids[] = {
{ .compatible = "allwinner,sun50i-h6-rtc" },
{ .compatible = "allwinner,sun50i-h616-rtc",
.data = (void *)RTC_LINEAR_DAY },
+ { .compatible = "allwinner,sun50i-r329-rtc",
+ .data = (void *)RTC_LINEAR_DAY },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
new file mode 100644
index 000000000000..7a0f181d3fef
--- /dev/null
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments K3 RTC driver
+ *
+ * Copyright (C) 2021-2022 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+/* Registers */
+#define REG_K3RTC_S_CNT_LSW 0x08
+#define REG_K3RTC_S_CNT_MSW 0x0c
+#define REG_K3RTC_COMP 0x10
+#define REG_K3RTC_ON_OFF_S_CNT_LSW 0x20
+#define REG_K3RTC_ON_OFF_S_CNT_MSW 0x24
+#define REG_K3RTC_SCRATCH0 0x30
+#define REG_K3RTC_SCRATCH7 0x4c
+#define REG_K3RTC_GENERAL_CTL 0x50
+#define REG_K3RTC_IRQSTATUS_RAW_SYS 0x54
+#define REG_K3RTC_IRQSTATUS_SYS 0x58
+#define REG_K3RTC_IRQENABLE_SET_SYS 0x5c
+#define REG_K3RTC_IRQENABLE_CLR_SYS 0x60
+#define REG_K3RTC_SYNCPEND 0x68
+#define REG_K3RTC_KICK0 0x70
+#define REG_K3RTC_KICK1 0x74
+
+/* Freeze when lsw is read and unfreeze when msw is read */
+#define K3RTC_CNT_FMODE_S_CNT_VALUE (0x2 << 24)
+
+/* Magic values for lock/unlock */
+#define K3RTC_KICK0_UNLOCK_VALUE 0x83e70b13
+#define K3RTC_KICK1_UNLOCK_VALUE 0x95a4f1e0
+
+/* Multiplier for ppb conversions */
+#define K3RTC_PPB_MULT (1000000000LL)
+/* Min and max values supported with 'offset' interface (swapped sign) */
+#define K3RTC_MIN_OFFSET (-277761)
+#define K3RTC_MAX_OFFSET (277778)
+
+/**
+ * struct ti_k3_rtc_soc_data - Private of compatible data for ti-k3-rtc
+ * @unlock_irq_erratum: Has erratum for unlock infinite IRQs (erratum i2327)
+ */
+struct ti_k3_rtc_soc_data {
+ const bool unlock_irq_erratum;
+};
+
+static const struct regmap_config ti_k3_rtc_regmap_config = {
+ .name = "peripheral-registers",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_K3RTC_KICK1,
+};
+
+enum ti_k3_rtc_fields {
+ K3RTC_KICK0,
+ K3RTC_KICK1,
+ K3RTC_S_CNT_LSW,
+ K3RTC_S_CNT_MSW,
+ K3RTC_O32K_OSC_DEP_EN,
+ K3RTC_UNLOCK,
+ K3RTC_CNT_FMODE,
+ K3RTC_PEND,
+ K3RTC_RELOAD_FROM_BBD,
+ K3RTC_COMP,
+
+ K3RTC_ALM_S_CNT_LSW,
+ K3RTC_ALM_S_CNT_MSW,
+ K3RTC_IRQ_STATUS_RAW,
+ K3RTC_IRQ_STATUS,
+ K3RTC_IRQ_ENABLE_SET,
+ K3RTC_IRQ_ENABLE_CLR,
+
+ K3RTC_IRQ_STATUS_ALT,
+ K3RTC_IRQ_ENABLE_CLR_ALT,
+
+ K3_RTC_MAX_FIELDS
+};
+
+static const struct reg_field ti_rtc_reg_fields[] = {
+ [K3RTC_KICK0] = REG_FIELD(REG_K3RTC_KICK0, 0, 31),
+ [K3RTC_KICK1] = REG_FIELD(REG_K3RTC_KICK1, 0, 31),
+ [K3RTC_S_CNT_LSW] = REG_FIELD(REG_K3RTC_S_CNT_LSW, 0, 31),
+ [K3RTC_S_CNT_MSW] = REG_FIELD(REG_K3RTC_S_CNT_MSW, 0, 15),
+ [K3RTC_O32K_OSC_DEP_EN] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 21, 21),
+ [K3RTC_UNLOCK] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 23, 23),
+ [K3RTC_CNT_FMODE] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 24, 25),
+ [K3RTC_PEND] = REG_FIELD(REG_K3RTC_SYNCPEND, 0, 1),
+ [K3RTC_RELOAD_FROM_BBD] = REG_FIELD(REG_K3RTC_SYNCPEND, 31, 31),
+ [K3RTC_COMP] = REG_FIELD(REG_K3RTC_COMP, 0, 31),
+
+ /* We use on to off as alarm trigger */
+ [K3RTC_ALM_S_CNT_LSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_LSW, 0, 31),
+ [K3RTC_ALM_S_CNT_MSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_MSW, 0, 15),
+ [K3RTC_IRQ_STATUS_RAW] = REG_FIELD(REG_K3RTC_IRQSTATUS_RAW_SYS, 0, 0),
+ [K3RTC_IRQ_STATUS] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_SET] = REG_FIELD(REG_K3RTC_IRQENABLE_SET_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_CLR] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 0, 0),
+ /* Off to on is alternate */
+ [K3RTC_IRQ_STATUS_ALT] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 1, 1),
+ [K3RTC_IRQ_ENABLE_CLR_ALT] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 1, 1),
+};
+
+/**
+ * struct ti_k3_rtc - Private data for ti-k3-rtc
+ * @irq: IRQ
+ * @sync_timeout_us: data sync timeout period in uSec
+ * @rate_32k: 32k clock rate in Hz
+ * @rtc_dev: rtc device
+ * @regmap: rtc mmio regmap
+ * @r_fields: rtc register fields
+ * @soc: SoC compatible match data
+ */
+struct ti_k3_rtc {
+ unsigned int irq;
+ u32 sync_timeout_us;
+ unsigned long rate_32k;
+ struct rtc_device *rtc_dev;
+ struct regmap *regmap;
+ struct regmap_field *r_fields[K3_RTC_MAX_FIELDS];
+ const struct ti_k3_rtc_soc_data *soc;
+};
+
+static int k3rtc_field_read(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f)
+{
+ int ret;
+ int val;
+
+ ret = regmap_field_read(priv->r_fields[f], &val);
+ /*
+ * We shouldn't be seeing regmap fail on us for mmio reads
+ * This is possible if clock context fails, but that isn't the case for us
+ */
+ if (WARN_ON_ONCE(ret))
+ return ret;
+ return val;
+}
+
+static void k3rtc_field_write(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f, u32 val)
+{
+ regmap_field_write(priv->r_fields[f], val);
+}
+
+/**
+ * k3rtc_fence - Ensure a register sync took place between the two domains
+ * @priv: pointer to priv data
+ *
+ * Return: 0 if the sync took place, else returns -ETIMEDOUT
+ */
+static int k3rtc_fence(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_PEND], ret,
+ !ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+static inline int k3rtc_check_unlocked(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_field_read(priv, K3RTC_UNLOCK);
+ if (ret < 0)
+ return ret;
+
+ return (ret) ? 0 : 1;
+}
+
+static int k3rtc_unlock_rtc(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_check_unlocked(priv);
+ if (!ret)
+ return ret;
+
+ k3rtc_field_write(priv, K3RTC_KICK0, K3RTC_KICK0_UNLOCK_VALUE);
+ k3rtc_field_write(priv, K3RTC_KICK1, K3RTC_KICK1_UNLOCK_VALUE);
+
+ /* Skip fence since we are going to check the unlock bit as fence */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_UNLOCK], ret,
+ !ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+static int k3rtc_configure(struct device *dev)
+{
+ int ret;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ /*
+ * HWBUG: The compare state machine is broken if the RTC module
+ * is NOT unlocked in under one second of boot - which is pretty long
+ * time from the perspective of Linux driver (module load, u-boot
+ * shell all can take much longer than this.
+ *
+ * In such occurrence, it is assumed that the RTC module is unusable
+ */
+ if (priv->soc->unlock_irq_erratum) {
+ ret = k3rtc_check_unlocked(priv);
+ /* If there is an error OR if we are locked, return error */
+ if (ret) {
+ dev_err(dev,
+ HW_ERR "Erratum i2327 unlock QUIRK! Cannot operate!!\n");
+ return -EFAULT;
+ }
+ } else {
+ /* May need to explicitly unlock first time */
+ ret = k3rtc_unlock_rtc(priv);
+ if (ret) {
+ dev_err(dev, "Failed to unlock(%d)!\n", ret);
+ return ret;
+ }
+ }
+
+ /* Enable Shadow register sync on 32k clock boundary */
+ k3rtc_field_write(priv, K3RTC_O32K_OSC_DEP_EN, 0x1);
+
+ /*
+ * Wait at least clock sync time before proceeding further programming.
+ * This ensures that the 32k based sync is active.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 5);
+
+ /* We need to ensure fence here to make sure sync here */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev,
+ "Failed fence osc_dep enable(%d) - is 32k clk working?!\n", ret);
+ return ret;
+ }
+
+ /*
+ * FMODE setting: Reading lower seconds will freeze value on higher
+ * seconds. This also implies that we must *ALWAYS* read lower seconds
+ * prior to reading higher seconds
+ */
+ k3rtc_field_write(priv, K3RTC_CNT_FMODE, K3RTC_CNT_FMODE_S_CNT_VALUE);
+
+ /* Clear any spurious IRQ sources if any */
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS, 0x1);
+ /* Disable all IRQs */
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR, 0x1);
+
+ /* And.. Let us Sync the writes in */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, tm);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+
+ seconds = rtc_tm_to_time64(tm);
+
+ /*
+ * Read operation on LSW will freeze the RTC, so to update
+ * the time, we cannot use field operations. Just write since the
+ * reserved bits are ignored.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_LSW, seconds);
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_MSW, seconds >> 32);
+
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ u32 offset = enabled ? K3RTC_IRQ_ENABLE_SET : K3RTC_IRQ_ENABLE_CLR;
+
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+ if ((enabled && reg) || (!enabled && !reg))
+ return 0;
+
+ k3rtc_field_write(priv, offset, 0x1);
+
+ /*
+ * Ensure the write sync is through - NOTE: it should be OK to have
+ * ISR to fire as we are checking sync (which should be done in a 32k
+ * cycle or so).
+ */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, &alarm->time);
+
+ alarm->enabled = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+ int ret;
+
+ seconds = rtc_tm_to_time64(&alarm->time);
+
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_LSW, seconds);
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_MSW, (seconds >> 32));
+
+ /* Make sure the alarm time is synced in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence(%d)! Potential config issue?\n", ret);
+ return ret;
+ }
+
+ /* Alarm IRQ enable will do a sync */
+ return ti_k3_rtc_alarm_irq_enable(dev, alarm->enabled);
+}
+
+static int ti_k3_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ comp = k3rtc_field_read(priv, K3RTC_COMP);
+
+ /* Convert from RTC calibration register format to ppb format */
+ tmp = comp * (s64)K3RTC_PPB_MULT;
+ if (tmp < 0)
+ tmp -= ticks_per_hr / 2LL;
+ else
+ tmp += ticks_per_hr / 2LL;
+ tmp = div_s64(tmp, ticks_per_hr);
+
+ /* Offset value operates in negative way, so swap sign */
+ *offset = (long)-tmp;
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_offset(struct device *dev, long offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ /* Make sure offset value is within supported range */
+ if (offset < K3RTC_MIN_OFFSET || offset > K3RTC_MAX_OFFSET)
+ return -ERANGE;
+
+ /* Convert from ppb format to RTC calibration register format */
+ tmp = offset * (s64)ticks_per_hr;
+ if (tmp < 0)
+ tmp -= K3RTC_PPB_MULT / 2LL;
+ else
+ tmp += K3RTC_PPB_MULT / 2LL;
+ tmp = div_s64(tmp, K3RTC_PPB_MULT);
+
+ /* Offset value operates in negative way, so swap sign */
+ comp = (int)-tmp;
+
+ k3rtc_field_write(priv, K3RTC_COMP, comp);
+
+ return k3rtc_fence(priv);
+}
+
+static irqreturn_t ti_k3_rtc_interrupt(s32 irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ int ret;
+
+ /*
+ * IRQ assertion can be very fast, however, the IRQ Status clear
+ * de-assert depends on 32k clock edge in the 32k domain
+ * If we clear the status prior to the first 32k clock edge,
+ * the status bit is cleared, but the IRQ stays re-asserted.
+ *
+ * To prevent this condition, we need to wait for clock sync time.
+ * We can either do that by polling the 32k observability signal for
+ * a toggle OR we could just sleep and let the processor do other
+ * stuff.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 2);
+
+ /* Lets make sure that this is a valid interrupt */
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_STATUS);
+
+ if (!reg) {
+ u32 raw = k3rtc_field_read(priv, K3RTC_IRQ_STATUS_RAW);
+
+ dev_err(dev,
+ HW_ERR
+ "Erratum i2327/IRQ trig: status: 0x%08x / 0x%08x\n", reg, raw);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Write 1 to clear status reg
+ * We cannot use a field operation here due to a potential race between
+ * 32k domain and vbus domain.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_IRQSTATUS_SYS, 0x1);
+
+ /* Sync the write in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence irq status clr(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Force the 32k status to be reloaded back in to ensure status is
+ * reflected back correctly.
+ */
+ k3rtc_field_write(priv, K3RTC_RELOAD_FROM_BBD, 0x1);
+
+ /* Ensure the write sync is through */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence reload from bbd(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Now we ensure that the status bit is cleared */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_IRQ_STATUS],
+ ret, !ret, 2, priv->sync_timeout_us);
+ if (ret) {
+ dev_err(dev, "Time out waiting for status clear\n");
+ return IRQ_NONE;
+ }
+
+ /* Notify RTC core on event */
+ rtc_update_irq(priv->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops ti_k3_rtc_ops = {
+ .read_time = ti_k3_rtc_read_time,
+ .set_time = ti_k3_rtc_set_time,
+ .read_alarm = ti_k3_rtc_read_alarm,
+ .set_alarm = ti_k3_rtc_set_alarm,
+ .read_offset = ti_k3_rtc_read_offset,
+ .set_offset = ti_k3_rtc_set_offset,
+ .alarm_irq_enable = ti_k3_rtc_alarm_irq_enable,
+};
+
+static int ti_k3_rtc_scratch_read(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+
+ return regmap_bulk_read(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+}
+
+static int ti_k3_rtc_scratch_write(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+ int ret;
+
+ ret = regmap_bulk_write(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+ if (ret)
+ return ret;
+
+ return k3rtc_fence(priv);
+}
+
+static struct nvmem_config ti_k3_rtc_nvmem_config = {
+ .name = "ti_k3_rtc_scratch",
+ .word_size = 4,
+ .stride = 4,
+ .size = REG_K3RTC_SCRATCH7 - REG_K3RTC_SCRATCH0 + 4,
+ .reg_read = ti_k3_rtc_scratch_read,
+ .reg_write = ti_k3_rtc_scratch_write,
+};
+
+static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = devm_clk_get(dev, "osc32k");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
+
+ priv->rate_32k = clk_get_rate(clk);
+
+ /* Make sure we are exact 32k clock. Else, try to compensate delay */
+ if (priv->rate_32k != 32768)
+ dev_warn(dev, "Clock rate %ld is not 32768! Could misbehave!\n",
+ priv->rate_32k);
+
+ /*
+ * Sync timeout should be two 32k clk sync cycles = ~61uS. We double
+ * it to comprehend intermediate bus segment and cpu frequency
+ * deltas
+ */
+ priv->sync_timeout_us = (u32)(DIV_ROUND_UP_ULL(1000000, priv->rate_32k) * 4);
+
+ return ret;
+}
+
+static int k3rtc_get_vbusclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ /* Note: VBUS isn't a context clock, it is needed for hardware operation */
+ clk = devm_clk_get(dev, "vbus");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+}
+
+static int ti_k3_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_k3_rtc *priv;
+ void __iomem *rtc_base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct ti_k3_rtc), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rtc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc_base))
+ return PTR_ERR(rtc_base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, rtc_base, &ti_k3_rtc_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ ret = devm_regmap_field_bulk_alloc(dev, priv->regmap, priv->r_fields,
+ ti_rtc_reg_fields, K3_RTC_MAX_FIELDS);
+ if (ret)
+ return ret;
+
+ ret = k3rtc_get_32kclk(dev, priv);
+ if (ret)
+ return ret;
+ ret = k3rtc_get_vbusclk(dev, priv);
+ if (ret)
+ return ret;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ priv->irq = (unsigned int)ret;
+
+ priv->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(priv->rtc_dev))
+ return PTR_ERR(priv->rtc_dev);
+
+ priv->soc = of_device_get_match_data(dev);
+
+ priv->rtc_dev->ops = &ti_k3_rtc_ops;
+ priv->rtc_dev->range_max = (1ULL << 48) - 1; /* 48Bit seconds */
+ ti_k3_rtc_nvmem_config.priv = priv;
+
+ ret = devm_request_threaded_irq(dev, priv->irq, NULL,
+ ti_k3_rtc_interrupt,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (ret) {
+ dev_err(dev, "Could not request IRQ: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = k3rtc_configure(dev);
+ if (ret)
+ return ret;
+
+ if (device_property_present(dev, "wakeup-source"))
+ device_init_wakeup(dev, true);
+ else
+ device_set_wakeup_capable(dev, true);
+
+ ret = devm_rtc_register_device(priv->rtc_dev);
+ if (ret)
+ return ret;
+
+ return devm_rtc_nvmem_register(priv->rtc_dev, &ti_k3_rtc_nvmem_config);
+}
+
+static const struct ti_k3_rtc_soc_data ti_k3_am62_data = {
+ .unlock_irq_erratum = true,
+};
+
+static const struct of_device_id ti_k3_rtc_of_match_table[] = {
+ {.compatible = "ti,am62-rtc", .data = &ti_k3_am62_data},
+ {}
+};
+MODULE_DEVICE_TABLE(of, ti_k3_rtc_of_match_table);
+
+static int __maybe_unused ti_k3_rtc_suspend(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(priv->irq);
+ return 0;
+}
+
+static int __maybe_unused ti_k3_rtc_resume(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(priv->irq);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ti_k3_rtc_pm_ops, ti_k3_rtc_suspend, ti_k3_rtc_resume);
+
+static struct platform_driver ti_k3_rtc_driver = {
+ .probe = ti_k3_rtc_probe,
+ .driver = {
+ .name = "rtc-ti-k3",
+ .of_match_table = ti_k3_rtc_of_match_table,
+ .pm = &ti_k3_rtc_pm_ops,
+ },
+};
+module_platform_driver(ti_k3_rtc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 RTC driver");
+MODULE_AUTHOR("Nishanth Menon");
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
deleted file mode 100644
index 5a9f9ad86d32..000000000000
--- a/drivers/rtc/rtc-vr41xx.c
+++ /dev/null
@@ -1,363 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for NEC VR4100 series Real Time Clock unit.
- *
- * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/compat.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/rtc.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/log2.h>
-
-#include <asm/div64.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
-MODULE_LICENSE("GPL v2");
-
-/* RTC 1 registers */
-#define ETIMELREG 0x00
-#define ETIMEMREG 0x02
-#define ETIMEHREG 0x04
-/* RFU */
-#define ECMPLREG 0x08
-#define ECMPMREG 0x0a
-#define ECMPHREG 0x0c
-/* RFU */
-#define RTCL1LREG 0x10
-#define RTCL1HREG 0x12
-#define RTCL1CNTLREG 0x14
-#define RTCL1CNTHREG 0x16
-#define RTCL2LREG 0x18
-#define RTCL2HREG 0x1a
-#define RTCL2CNTLREG 0x1c
-#define RTCL2CNTHREG 0x1e
-
-/* RTC 2 registers */
-#define TCLKLREG 0x00
-#define TCLKHREG 0x02
-#define TCLKCNTLREG 0x04
-#define TCLKCNTHREG 0x06
-/* RFU */
-#define RTCINTREG 0x1e
- #define TCLOCK_INT 0x08
- #define RTCLONG2_INT 0x04
- #define RTCLONG1_INT 0x02
- #define ELAPSEDTIME_INT 0x01
-
-#define RTC_FREQUENCY 32768
-#define MAX_PERIODIC_RATE 6553
-
-static void __iomem *rtc1_base;
-static void __iomem *rtc2_base;
-
-#define rtc1_read(offset) readw(rtc1_base + (offset))
-#define rtc1_write(offset, value) writew((value), rtc1_base + (offset))
-
-#define rtc2_read(offset) readw(rtc2_base + (offset))
-#define rtc2_write(offset, value) writew((value), rtc2_base + (offset))
-
-/* 32-bit compat for ioctls that nobody else uses */
-#define RTC_EPOCH_READ32 _IOR('p', 0x0d, __u32)
-
-static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
-
-static DEFINE_SPINLOCK(rtc_lock);
-static char rtc_name[] = "RTC";
-static unsigned long periodic_count;
-static unsigned int alarm_enabled;
-static int aie_irq;
-static int pie_irq;
-
-static inline time64_t read_elapsed_second(void)
-{
-
- unsigned long first_low, first_mid, first_high;
-
- unsigned long second_low, second_mid, second_high;
-
- do {
- first_low = rtc1_read(ETIMELREG);
- first_mid = rtc1_read(ETIMEMREG);
- first_high = rtc1_read(ETIMEHREG);
- second_low = rtc1_read(ETIMELREG);
- second_mid = rtc1_read(ETIMEMREG);
- second_high = rtc1_read(ETIMEHREG);
- } while (first_low != second_low || first_mid != second_mid ||
- first_high != second_high);
-
- return ((u64)first_high << 17) | (first_mid << 1) | (first_low >> 15);
-}
-
-static inline void write_elapsed_second(time64_t sec)
-{
- spin_lock_irq(&rtc_lock);
-
- rtc1_write(ETIMELREG, (uint16_t)(sec << 15));
- rtc1_write(ETIMEMREG, (uint16_t)(sec >> 1));
- rtc1_write(ETIMEHREG, (uint16_t)(sec >> 17));
-
- spin_unlock_irq(&rtc_lock);
-}
-
-static int vr41xx_rtc_read_time(struct device *dev, struct rtc_time *time)
-{
- time64_t epoch_sec, elapsed_sec;
-
- epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
- elapsed_sec = read_elapsed_second();
-
- rtc_time64_to_tm(epoch_sec + elapsed_sec, time);
-
- return 0;
-}
-
-static int vr41xx_rtc_set_time(struct device *dev, struct rtc_time *time)
-{
- time64_t epoch_sec, current_sec;
-
- epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
- current_sec = rtc_tm_to_time64(time);
-
- write_elapsed_second(current_sec - epoch_sec);
-
- return 0;
-}
-
-static int vr41xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- unsigned long low, mid, high;
- struct rtc_time *time = &wkalrm->time;
-
- spin_lock_irq(&rtc_lock);
-
- low = rtc1_read(ECMPLREG);
- mid = rtc1_read(ECMPMREG);
- high = rtc1_read(ECMPHREG);
- wkalrm->enabled = alarm_enabled;
-
- spin_unlock_irq(&rtc_lock);
-
- rtc_time64_to_tm((high << 17) | (mid << 1) | (low >> 15), time);
-
- return 0;
-}
-
-static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- time64_t alarm_sec;
-
- alarm_sec = rtc_tm_to_time64(&wkalrm->time);
-
- spin_lock_irq(&rtc_lock);
-
- if (alarm_enabled)
- disable_irq(aie_irq);
-
- rtc1_write(ECMPLREG, (uint16_t)(alarm_sec << 15));
- rtc1_write(ECMPMREG, (uint16_t)(alarm_sec >> 1));
- rtc1_write(ECMPHREG, (uint16_t)(alarm_sec >> 17));
-
- if (wkalrm->enabled)
- enable_irq(aie_irq);
-
- alarm_enabled = wkalrm->enabled;
-
- spin_unlock_irq(&rtc_lock);
-
- return 0;
-}
-
-static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case RTC_EPOCH_READ:
- return put_user(epoch, (unsigned long __user *)arg);
-#ifdef CONFIG_64BIT
- case RTC_EPOCH_READ32:
- return put_user(epoch, (unsigned int __user *)arg);
-#endif
- case RTC_EPOCH_SET:
- /* Doesn't support before 1900 */
- if (arg < 1900)
- return -EINVAL;
- epoch = arg;
- break;
- default:
- return -ENOIOCTLCMD;
- }
-
- return 0;
-}
-
-static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- spin_lock_irq(&rtc_lock);
- if (enabled) {
- if (!alarm_enabled) {
- enable_irq(aie_irq);
- alarm_enabled = 1;
- }
- } else {
- if (alarm_enabled) {
- disable_irq(aie_irq);
- alarm_enabled = 0;
- }
- }
- spin_unlock_irq(&rtc_lock);
- return 0;
-}
-
-static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
-{
- struct platform_device *pdev = (struct platform_device *)dev_id;
- struct rtc_device *rtc = platform_get_drvdata(pdev);
-
- rtc2_write(RTCINTREG, ELAPSEDTIME_INT);
-
- rtc_update_irq(rtc, 1, RTC_AF);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
-{
- struct platform_device *pdev = (struct platform_device *)dev_id;
- struct rtc_device *rtc = platform_get_drvdata(pdev);
- unsigned long count = periodic_count;
-
- rtc2_write(RTCINTREG, RTCLONG1_INT);
-
- rtc1_write(RTCL1LREG, count);
- rtc1_write(RTCL1HREG, count >> 16);
-
- rtc_update_irq(rtc, 1, RTC_PF);
-
- return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops vr41xx_rtc_ops = {
- .ioctl = vr41xx_rtc_ioctl,
- .read_time = vr41xx_rtc_read_time,
- .set_time = vr41xx_rtc_set_time,
- .read_alarm = vr41xx_rtc_read_alarm,
- .set_alarm = vr41xx_rtc_set_alarm,
- .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
-};
-
-static int rtc_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct rtc_device *rtc;
- int retval;
-
- if (pdev->num_resources != 4)
- return -EBUSY;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
-
- rtc1_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!rtc1_base)
- return -EBUSY;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- retval = -EBUSY;
- goto err_rtc1_iounmap;
- }
-
- rtc2_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!rtc2_base) {
- retval = -EBUSY;
- goto err_rtc1_iounmap;
- }
-
- rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtc)) {
- retval = PTR_ERR(rtc);
- goto err_iounmap_all;
- }
-
- rtc->ops = &vr41xx_rtc_ops;
-
- /* 48-bit counter at 32.768 kHz */
- rtc->range_max = (1ULL << 33) - 1;
- rtc->max_user_freq = MAX_PERIODIC_RATE;
-
- spin_lock_irq(&rtc_lock);
-
- rtc1_write(ECMPLREG, 0);
- rtc1_write(ECMPMREG, 0);
- rtc1_write(ECMPHREG, 0);
- rtc1_write(RTCL1LREG, 0);
- rtc1_write(RTCL1HREG, 0);
-
- spin_unlock_irq(&rtc_lock);
-
- aie_irq = platform_get_irq(pdev, 0);
- if (aie_irq <= 0) {
- retval = -EBUSY;
- goto err_iounmap_all;
- }
-
- retval = devm_request_irq(&pdev->dev, aie_irq, elapsedtime_interrupt, 0,
- "elapsed_time", pdev);
- if (retval < 0)
- goto err_iounmap_all;
-
- pie_irq = platform_get_irq(pdev, 1);
- if (pie_irq <= 0) {
- retval = -EBUSY;
- goto err_iounmap_all;
- }
-
- retval = devm_request_irq(&pdev->dev, pie_irq, rtclong1_interrupt, 0,
- "rtclong1", pdev);
- if (retval < 0)
- goto err_iounmap_all;
-
- platform_set_drvdata(pdev, rtc);
-
- disable_irq(aie_irq);
- disable_irq(pie_irq);
-
- dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
-
- retval = devm_rtc_register_device(rtc);
- if (retval)
- goto err_iounmap_all;
-
- return 0;
-
-err_iounmap_all:
- rtc2_base = NULL;
-
-err_rtc1_iounmap:
- rtc1_base = NULL;
-
- return retval;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:RTC");
-
-static struct platform_driver rtc_platform_driver = {
- .probe = rtc_probe,
- .driver = {
- .name = rtc_name,
- },
-};
-
-module_platform_driver(rtc_platform_driver);
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index d1d5a44d9122..ba0d22a5b421 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -614,8 +614,7 @@ static void x1205_sysfs_unregister(struct device *dev)
}
-static int x1205_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int x1205_probe(struct i2c_client *client)
{
int err = 0;
unsigned char sr;
@@ -681,7 +680,7 @@ static struct i2c_driver x1205_driver = {
.name = "rtc-x1205",
.of_match_table = x1205_dt_ids,
},
- .probe = x1205_probe,
+ .probe_new = x1205_probe,
.remove = x1205_remove,
.id_table = x1205_id,
};
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index f440bb52be92..c9b85c838ebe 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -36,17 +37,23 @@
#define RTC_OSC_EN BIT(24)
#define RTC_BATT_EN BIT(31)
-#define RTC_CALIB_DEF 0x198233
+#define RTC_CALIB_DEF 0x7FFF
#define RTC_CALIB_MASK 0x1FFFFF
#define RTC_ALRM_MASK BIT(1)
#define RTC_MSEC 1000
+#define RTC_FR_MASK 0xF0000
+#define RTC_FR_MAX_TICKS 16
+#define RTC_PPB 1000000000LL
+#define RTC_MIN_OFFSET -32768000
+#define RTC_MAX_OFFSET 32767000
struct xlnx_rtc_dev {
struct rtc_device *rtc;
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
- unsigned int calibval;
+ struct clk *rtc_clk;
+ unsigned int freq;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -61,13 +68,6 @@ static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
*/
new_time = rtc_tm_to_time64(tm) + 1;
- /*
- * Writing into calibration register will clear the Tick Counter and
- * force the next second to be signaled exactly in 1 second period
- */
- xrtcdev->calibval &= RTC_CALIB_MASK;
- writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
-
writel(new_time, xrtcdev->reg_base + RTC_SET_TM_WR);
/*
@@ -173,15 +173,76 @@ static void xlnx_init_rtc(struct xlnx_rtc_dev *xrtcdev)
rtc_ctrl = readl(xrtcdev->reg_base + RTC_CTRL);
rtc_ctrl |= RTC_BATT_EN;
writel(rtc_ctrl, xrtcdev->reg_base + RTC_CTRL);
+}
- /*
- * Based on crystal freq of 33.330 KHz
- * set the seconds counter and enable, set fractions counter
- * to default value suggested as per design spec
- * to correct RTC delay in frequency over period of time.
+static int xlnx_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long long rtc_ppb = RTC_PPB;
+ unsigned int tick_mult = do_div(rtc_ppb, xrtcdev->freq);
+ unsigned int calibval;
+ long offset_val;
+
+ calibval = readl(xrtcdev->reg_base + RTC_CALIB_RD);
+ /* Offset with seconds ticks */
+ offset_val = calibval & RTC_TICK_MASK;
+ offset_val = offset_val - RTC_CALIB_DEF;
+ offset_val = offset_val * tick_mult;
+
+ /* Offset with fractional ticks */
+ if (calibval & RTC_FR_EN)
+ offset_val += ((calibval & RTC_FR_MASK) >> RTC_FR_DATSHIFT)
+ * (tick_mult / RTC_FR_MAX_TICKS);
+ *offset = offset_val;
+
+ return 0;
+}
+
+static int xlnx_rtc_set_offset(struct device *dev, long offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long long rtc_ppb = RTC_PPB;
+ unsigned int tick_mult = do_div(rtc_ppb, xrtcdev->freq);
+ unsigned char fract_tick = 0;
+ unsigned int calibval;
+ short int max_tick;
+ int fract_offset;
+
+ if (offset < RTC_MIN_OFFSET || offset > RTC_MAX_OFFSET)
+ return -ERANGE;
+
+ /* Number ticks for given offset */
+ max_tick = div_s64_rem(offset, tick_mult, &fract_offset);
+
+ /* Number fractional ticks for given offset */
+ if (fract_offset) {
+ if (fract_offset < 0) {
+ fract_offset = fract_offset + tick_mult;
+ max_tick--;
+ }
+ if (fract_offset > (tick_mult / RTC_FR_MAX_TICKS)) {
+ for (fract_tick = 1; fract_tick < 16; fract_tick++) {
+ if (fract_offset <=
+ (fract_tick *
+ (tick_mult / RTC_FR_MAX_TICKS)))
+ break;
+ }
+ }
+ }
+
+ /* Zynqmp RTC uses second and fractional tick
+ * counters for compensation
*/
- xrtcdev->calibval &= RTC_CALIB_MASK;
- writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+ calibval = max_tick + RTC_CALIB_DEF;
+
+ if (fract_tick)
+ calibval |= RTC_FR_EN;
+
+ calibval |= (fract_tick << RTC_FR_DATSHIFT);
+
+ writel(calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+
+ return 0;
}
static const struct rtc_class_ops xlnx_rtc_ops = {
@@ -190,6 +251,8 @@ static const struct rtc_class_ops xlnx_rtc_ops = {
.read_alarm = xlnx_rtc_read_alarm,
.set_alarm = xlnx_rtc_set_alarm,
.alarm_irq_enable = xlnx_rtc_alarm_irq_enable,
+ .read_offset = xlnx_rtc_read_offset,
+ .set_offset = xlnx_rtc_set_offset,
};
static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
@@ -255,10 +318,22 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node, "calibration",
- &xrtcdev->calibval);
- if (ret)
- xrtcdev->calibval = RTC_CALIB_DEF;
+ /* Getting the rtc_clk info */
+ xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc_clk");
+ if (IS_ERR(xrtcdev->rtc_clk)) {
+ if (PTR_ERR(xrtcdev->rtc_clk) != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "Device clock not found.\n");
+ }
+ xrtcdev->freq = clk_get_rate(xrtcdev->rtc_clk);
+ if (!xrtcdev->freq) {
+ ret = of_property_read_u32(pdev->dev.of_node, "calibration",
+ &xrtcdev->freq);
+ if (ret)
+ xrtcdev->freq = RTC_CALIB_DEF;
+ }
+ ret = readl(xrtcdev->reg_base + RTC_CALIB_RD);
+ if (!ret)
+ writel(xrtcdev->freq, (xrtcdev->reg_base + RTC_CALIB_WR));
xlnx_init_rtc(xrtcdev);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ba6d78789660..ea82821599f6 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1725,7 +1725,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
dasd_put_device(device);
}
- /* check for for attention message */
+ /* check for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
if (!IS_ERR(device)) {
@@ -3145,7 +3145,7 @@ out:
* BLK_EH_DONE if the request is handled or terminated
* by the driver.
*/
-enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
+enum blk_eh_timer_return dasd_times_out(struct request *req)
{
struct dasd_block *block = req->q->queuedata;
struct dasd_device *device;
@@ -3280,7 +3280,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
static void dasd_free_queue(struct dasd_block *block)
{
if (block->request_queue) {
- blk_cleanup_queue(block->request_queue);
+ blk_mq_destroy_queue(block->request_queue);
blk_mq_free_tag_set(&block->tag_set);
block->request_queue = NULL;
}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index e9edf3b6ed7c..94ee59864971 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -639,6 +639,7 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, PAGE_SIZE - 1);
}
static int dasd_diag_pe_handler(struct dasd_device *device,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 836838f7d686..3cc93e2e4e15 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -6626,6 +6626,7 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, PAGE_SIZE - 1);
}
static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index a7a33ebf4bbe..5a83f0a39901 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -41,8 +41,8 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (base->devindex >= DASD_PER_MAJOR)
return -EBUSY;
- gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE,
- &dasd_bio_compl_lkclass);
+ gdp = blk_mq_alloc_disk_for_queue(block->request_queue,
+ &dasd_bio_compl_lkclass);
if (!gdp)
return -ENOMEM;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 83b918b84b4a..333a399f754e 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -795,7 +795,7 @@ void dasd_free_device(struct dasd_device *);
struct dasd_block *dasd_alloc_block(void);
void dasd_free_block(struct dasd_block *);
-enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved);
+enum blk_eh_timer_return dasd_times_out(struct request *req);
void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 8d0d0eaa3059..5187705bd0f3 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -414,7 +414,7 @@ removeseg:
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
- blk_cleanup_disk(dev_info->gd);
+ put_disk(dev_info->gd);
up_write(&dcssblk_devices_sem);
if (device_remove_file_self(dev, attr)) {
@@ -712,7 +712,7 @@ out_dax:
put_dax(dev_info->dax_dev);
put_dev:
list_del(&dev_info->lh);
- blk_cleanup_disk(dev_info->gd);
+ put_disk(dev_info->gd);
list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
segment_unload(seg_info->segment_name);
}
@@ -722,7 +722,7 @@ put_dev:
dev_list_del:
list_del(&dev_info->lh);
release_gd:
- blk_cleanup_disk(dev_info->gd);
+ put_disk(dev_info->gd);
up_write(&dcssblk_devices_sem);
seg_list_del:
if (dev_info == NULL)
@@ -790,7 +790,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
- blk_cleanup_disk(dev_info->gd);
+ put_disk(dev_info->gd);
/* unload all related segments */
list_for_each_entry(entry, &dev_info->seg_list, lh)
@@ -863,7 +863,7 @@ dcssblk_submit_bio(struct bio *bio)
unsigned long source_addr;
unsigned long bytes_done;
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 2a9c0ddcade5..0c1df1d5f1ac 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -501,7 +501,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(bdev->gendisk);
+ put_disk(bdev->gendisk);
out_tag:
blk_mq_free_tag_set(&bdev->tag_set);
out:
@@ -512,7 +512,7 @@ out:
void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
{
del_gendisk(bdev->gendisk);
- blk_cleanup_disk(bdev->gendisk);
+ put_disk(bdev->gendisk);
blk_mq_free_tag_set(&bdev->tag_set);
}
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 57f41efb8043..7d1749b0d378 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -89,7 +89,7 @@ config HMC_DRV
Management Console (HMC) drive CD/DVD-ROM. It is available as a
module, called 'hmcdrv', and also as kernel built-in. There is one
optional parameter for this module: cachesize=N, which modifies the
- transfer cache size from it's default value 0.5MB to N bytes. If N
+ transfer cache size from its default value 0.5MB to N bytes. If N
is zero, then no caching is performed.
config SCLP_OFB
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index dd313ff57df3..d15b0d541de3 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -45,6 +45,10 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
+ sclp.has_aisii = !!(sccb->fac118 & 0x40);
+ sclp.has_aeni = !!(sccb->fac118 & 0x20);
+ sclp.has_aisi = !!(sccb->fac118 & 0x10);
+ sclp.has_zpci_lsi = !!(sccb->fac118 & 0x01);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 38cc1565d6ae..751945fb6793 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -548,7 +548,7 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
case 0x2e:
/*
* Not capable. This indicates either that the drive fails
- * reading the format id mark or that that format specified
+ * reading the format id mark or that format specified
* is not supported by the drive.
*/
dev_warn (&device->cdev->dev, "The tape unit cannot process "
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index 66505d7166a6..1d40457c7b10 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -27,6 +27,7 @@
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/cpufeature.h>
#include <asm/uvdevice.h>
#include <asm/uv.h>
@@ -244,12 +245,10 @@ static void __exit uvio_dev_exit(void)
static int __init uvio_dev_init(void)
{
- if (!test_facility(158))
- return -ENXIO;
return misc_register(&uvio_dev_miscdev);
}
-module_init(uvio_dev_init);
+module_cpu_feature_match(S390_CPU_FEATURE_UV, uvio_dev_init);
module_exit(uvio_dev_exit);
MODULE_AUTHOR("IBM Corporation");
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 516783ba950f..f6da215ccf9f 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
+#include <linux/uio.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
@@ -50,36 +51,41 @@ static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block;
+static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
/*
- * Copy memory from HSA to user memory (not reentrant):
+ * Copy memory from HSA to iterator (not reentrant):
*
- * @dest: User buffer where memory should be copied to
+ * @iter: Iterator where memory should be copied to
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
-int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
+size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count)
{
- unsigned long offset, bytes;
+ size_t bytes, copied, res = 0;
+ unsigned long offset;
if (!hsa_available)
- return -ENODATA;
+ return 0;
+ mutex_lock(&hsa_buf_mutex);
while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
- return -EIO;
+ break;
}
offset = src % PAGE_SIZE;
bytes = min(PAGE_SIZE - offset, count);
- if (copy_to_user(dest, hsa_buf + offset, bytes))
- return -EFAULT;
- src += bytes;
- dest += bytes;
- count -= bytes;
+ copied = copy_to_iter(hsa_buf + offset, bytes, iter);
+ count -= copied;
+ src += copied;
+ res += copied;
+ if (copied < bytes)
+ break;
}
- return 0;
+ mutex_unlock(&hsa_buf_mutex);
+ return res;
}
/*
@@ -89,25 +95,16 @@ int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
-int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
+static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count)
{
- unsigned long offset, bytes;
+ struct iov_iter iter;
+ struct kvec kvec;
- if (!hsa_available)
- return -ENODATA;
-
- while (count) {
- if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
- TRACE("sclp_sdias_copy() failed\n");
- return -EIO;
- }
- offset = src % PAGE_SIZE;
- bytes = min(PAGE_SIZE - offset, count);
- memcpy(dest, hsa_buf + offset, bytes);
- src += bytes;
- dest += bytes;
- count -= bytes;
- }
+ kvec.iov_base = dst;
+ kvec.iov_len = count;
+ iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
+ if (memcpy_hsa_iter(&iter, src, count) < count)
+ return -EIO;
return 0;
}
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index c0ed364bf446..34967e67249e 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -99,7 +99,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list)
if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
- airq->handler(airq, !tpi_info->directed_irq);
+ airq->handler(airq, tpi_info);
rcu_read_unlock();
return IRQ_HANDLED;
@@ -122,10 +122,12 @@ static inline unsigned long iv_size(unsigned long bits)
* airq_iv_create - create an interrupt vector
* @bits: number of bits in the interrupt vector
* @flags: allocation flags
+ * @vec: pointer to pinned guest memory if AIRQ_IV_GUESTVEC
*
* Returns a pointer to an interrupt vector structure
*/
-struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags,
+ unsigned long *vec)
{
struct airq_iv *iv;
unsigned long size;
@@ -146,6 +148,8 @@ struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
&iv->vector_dma);
if (!iv->vector)
goto out_free;
+ } else if (flags & AIRQ_IV_GUESTVEC) {
+ iv->vector = vec;
} else {
iv->vector = cio_dma_zalloc(size);
if (!iv->vector)
@@ -185,7 +189,7 @@ out_free:
kfree(iv->avail);
if (iv->flags & AIRQ_IV_CACHELINE && iv->vector)
dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
- else
+ else if (!(iv->flags & AIRQ_IV_GUESTVEC))
cio_dma_free(iv->vector, size);
kfree(iv);
out:
@@ -204,7 +208,7 @@ void airq_iv_release(struct airq_iv *iv)
kfree(iv->bitlock);
if (iv->flags & AIRQ_IV_CACHELINE)
dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
- else
+ else if (!(iv->flags & AIRQ_IV_GUESTVEC))
cio_dma_free(iv->vector, iv_size(iv->bits));
kfree(iv->avail);
kfree(iv);
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8e09bf3a2fcd..9b9335dd06db 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -15,6 +15,7 @@
#include <asm/qdio.h>
#include <asm/airq.h>
#include <asm/isc.h>
+#include <asm/tpi.h>
#include "cio.h"
#include "ioasm.h"
@@ -93,9 +94,10 @@ static inline u32 clear_shared_ind(void)
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @airq: pointer to adapter interrupt descriptor
- * @floating: flag to recognize floating vs. directed interrupts (unused)
+ * @tpi_info: interrupt information (e.g. floating vs directed -- unused)
*/
-static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
+static void tiqdio_thinint_handler(struct airq_struct *airq,
+ struct tpi_info *tpi_info)
{
u64 irq_time = S390_lowcore.int_clock;
u32 si_used = clear_shared_ind();
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
index 7a838e3d7c0f..420d89ba7f83 100644
--- a/drivers/s390/cio/vfio_ccw_async.c
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -8,7 +8,6 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
#include "vfio_ccw_private.h"
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 0c2be9421ab7..7b02e97f4b29 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -11,6 +11,7 @@
#include <linux/ratelimit.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/vfio.h>
#include <asm/idals.h>
@@ -18,13 +19,11 @@
#include "vfio_ccw_cp.h"
#include "vfio_ccw_private.h"
-struct pfn_array {
- /* Starting guest physical I/O address. */
- unsigned long pa_iova;
- /* Array that stores PFNs of the pages need to pin. */
- unsigned long *pa_iova_pfn;
- /* Array that receives PFNs of the pages pinned. */
- unsigned long *pa_pfn;
+struct page_array {
+ /* Array that stores pages need to pin. */
+ dma_addr_t *pa_iova;
+ /* Array that receives the pinned pages. */
+ struct page **pa_page;
/* Number of pages pinned from @pa_iova. */
int pa_nr;
};
@@ -37,116 +36,158 @@ struct ccwchain {
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
- struct pfn_array *ch_pa;
+ struct page_array *ch_pa;
};
/*
- * pfn_array_alloc() - alloc memory for PFNs
- * @pa: pfn_array on which to perform the operation
+ * page_array_alloc() - alloc memory for page array
+ * @pa: page_array on which to perform the operation
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
*
- * Attempt to allocate memory for PFNs.
+ * Attempt to allocate memory for page array.
*
- * Usage of pfn_array:
- * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * Usage of page_array:
+ * We expect (pa_nr == 0) and (pa_iova == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
- * 0 if PFNs are allocated
- * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * 0 if page array is allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL
* -ENOMEM if alloc failed
*/
-static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
+static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
{
int i;
- if (pa->pa_nr || pa->pa_iova_pfn)
+ if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
- pa->pa_iova = iova;
-
pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (!pa->pa_nr)
return -EINVAL;
- pa->pa_iova_pfn = kcalloc(pa->pa_nr,
- sizeof(*pa->pa_iova_pfn) +
- sizeof(*pa->pa_pfn),
- GFP_KERNEL);
- if (unlikely(!pa->pa_iova_pfn)) {
+ pa->pa_iova = kcalloc(pa->pa_nr,
+ sizeof(*pa->pa_iova) + sizeof(*pa->pa_page),
+ GFP_KERNEL);
+ if (unlikely(!pa->pa_iova)) {
pa->pa_nr = 0;
return -ENOMEM;
}
- pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+ pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
- pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- pa->pa_pfn[0] = -1ULL;
+ pa->pa_iova[0] = iova;
+ pa->pa_page[0] = NULL;
for (i = 1; i < pa->pa_nr; i++) {
- pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
- pa->pa_pfn[i] = -1ULL;
+ pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
+ pa->pa_page[i] = NULL;
}
return 0;
}
/*
- * pfn_array_pin() - Pin user pages in memory
- * @pa: pfn_array on which to perform the operation
+ * page_array_unpin() - Unpin user pages in memory
+ * @pa: page_array on which to perform the operation
+ * @vdev: the vfio device to perform the operation
+ * @pa_nr: number of user pages to unpin
+ *
+ * Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0,
+ * otherwise only clear pa->pa_nr
+ */
+static void page_array_unpin(struct page_array *pa,
+ struct vfio_device *vdev, int pa_nr)
+{
+ int unpinned = 0, npage = 1;
+
+ while (unpinned < pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[unpinned];
+ dma_addr_t *last = &first[npage];
+
+ if (unpinned + npage < pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ vfio_unpin_pages(vdev, *first, npage);
+ unpinned += npage;
+ npage = 1;
+ }
+
+ pa->pa_nr = 0;
+}
+
+/*
+ * page_array_pin() - Pin user pages in memory
+ * @pa: page_array on which to perform the operation
* @mdev: the mediated device to perform pin operations
*
* Returns number of pages pinned upon success.
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
*/
-static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev)
+static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
{
+ int pinned = 0, npage = 1;
int ret = 0;
- ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr,
- IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+ while (pinned < pa->pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[pinned];
+ dma_addr_t *last = &first[npage];
- if (ret < 0) {
- goto err_out;
- } else if (ret > 0 && ret != pa->pa_nr) {
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret);
- ret = -EINVAL;
- goto err_out;
+ if (pinned + npage < pa->pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ ret = vfio_pin_pages(vdev, *first, npage,
+ IOMMU_READ | IOMMU_WRITE,
+ &pa->pa_page[pinned]);
+ if (ret < 0) {
+ goto err_out;
+ } else if (ret > 0 && ret != npage) {
+ pinned += ret;
+ ret = -EINVAL;
+ goto err_out;
+ }
+ pinned += npage;
+ npage = 1;
}
return ret;
err_out:
- pa->pa_nr = 0;
-
+ page_array_unpin(pa, vdev, pinned);
return ret;
}
/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev)
+static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev)
{
- /* Only unpin if any pages were pinned to begin with */
- if (pa->pa_nr)
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr);
- pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
+ page_array_unpin(pa, vdev, pa->pa_nr);
+ kfree(pa->pa_iova);
}
-static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
+static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
{
- unsigned long iova_pfn = iova >> PAGE_SHIFT;
+ u64 iova_pfn_start = iova >> PAGE_SHIFT;
+ u64 iova_pfn_end = (iova + length - 1) >> PAGE_SHIFT;
+ u64 pfn;
int i;
- for (i = 0; i < pa->pa_nr; i++)
- if (pa->pa_iova_pfn[i] == iova_pfn)
+ for (i = 0; i < pa->pa_nr; i++) {
+ pfn = pa->pa_iova[i] >> PAGE_SHIFT;
+ if (pfn >= iova_pfn_start && pfn <= iova_pfn_end)
return true;
+ }
return false;
}
-/* Create the list of IDAL words for a pfn_array. */
-static inline void pfn_array_idal_create_words(
- struct pfn_array *pa,
- unsigned long *idaws)
+/* Create the list of IDAL words for a page_array. */
+static inline void page_array_idal_create_words(struct page_array *pa,
+ unsigned long *idaws)
{
int i;
@@ -159,10 +200,10 @@ static inline void pfn_array_idal_create_words(
*/
for (i = 0; i < pa->pa_nr; i++)
- idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+ idaws[i] = page_to_phys(pa->pa_page[i]);
/* Adjust the first IDAW, since it may not start on a page boundary */
- idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+ idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
}
static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
@@ -194,24 +235,24 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
unsigned long n)
{
- struct pfn_array pa = {0};
- u64 from;
+ struct page_array pa = {0};
int i, ret;
unsigned long l, m;
- ret = pfn_array_alloc(&pa, iova, n);
+ ret = page_array_alloc(&pa, iova, n);
if (ret < 0)
return ret;
- ret = pfn_array_pin(&pa, vdev);
+ ret = page_array_pin(&pa, vdev);
if (ret < 0) {
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return ret;
}
l = n;
for (i = 0; i < pa.pa_nr; i++) {
- from = pa.pa_pfn[i] << PAGE_SHIFT;
+ void *from = kmap_local_page(pa.pa_page[i]);
+
m = PAGE_SIZE;
if (i == 0) {
from += iova & (PAGE_SIZE - 1);
@@ -219,14 +260,15 @@ static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
}
m = min(l, m);
- memcpy(to + (n - l), (void *)from, m);
+ memcpy(to + (n - l), from, m);
+ kunmap_local(from);
l -= m;
if (l == 0)
break;
}
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return l;
}
@@ -329,7 +371,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
chain->ch_ccw = (struct ccw1 *)data;
data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pa = (struct pfn_array *)data;
+ chain->ch_pa = (struct page_array *)data;
chain->ch_len = len;
@@ -513,7 +555,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccw1 *ccw;
- struct pfn_array *pa;
+ struct page_array *pa;
u64 iova;
unsigned long *idaws;
int ret;
@@ -547,13 +589,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
}
/*
- * Allocate an array of pfn's for pages to pin/translate.
+ * Allocate an array of pages to pin/translate.
* The number of pages is actually the count of the idaws
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
pa = chain->ch_pa + idx;
- ret = pfn_array_alloc(pa, iova, bytes);
+ ret = page_array_alloc(pa, iova, bytes);
if (ret < 0)
goto out_free_idaws;
@@ -564,21 +606,21 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
goto out_unpin;
/*
- * Copy guest IDAWs into pfn_array, in case the memory they
+ * Copy guest IDAWs into page_array, in case the memory they
* occupy is not contiguous.
*/
for (i = 0; i < idaw_nr; i++)
- pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ pa->pa_iova[i] = idaws[i];
} else {
/*
- * No action is required here; the iova addresses in pfn_array
- * were initialized sequentially in pfn_array_alloc() beginning
+ * No action is required here; the iova addresses in page_array
+ * were initialized sequentially in page_array_alloc() beginning
* with the contents of ccw->cda.
*/
}
if (ccw_does_data_transfer(ccw)) {
- ret = pfn_array_pin(pa, vdev);
+ ret = page_array_pin(pa, vdev);
if (ret < 0)
goto out_unpin;
} else {
@@ -588,13 +630,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ccw->cda = (__u32) virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
- /* Populate the IDAL with pinned/translated addresses from pfn */
- pfn_array_idal_create_words(pa, idaws);
+ /* Populate the IDAL with pinned/translated addresses from page */
+ page_array_idal_create_words(pa, idaws);
return 0;
out_unpin:
- pfn_array_unpin_free(pa, vdev);
+ page_array_unpin_free(pa, vdev);
out_free_idaws:
kfree(idaws);
out_init:
@@ -700,7 +742,7 @@ void cp_free(struct channel_program *cp)
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
- pfn_array_unpin_free(chain->ch_pa + i, vdev);
+ page_array_unpin_free(chain->ch_pa + i, vdev);
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
@@ -862,11 +904,12 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
* cp_iova_pinned() - check if an iova is pinned for a ccw chain.
* @cp: channel_program on which to perform the operation
* @iova: the iova to check
+ * @length: the length to check from @iova
*
* If the @iova is currently pinned for the ccw chain, return true;
* else return false.
*/
-bool cp_iova_pinned(struct channel_program *cp, u64 iova)
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length)
{
struct ccwchain *chain;
int i;
@@ -876,7 +919,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
+ if (page_array_iova_pinned(chain->ch_pa + i, iova, length))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index e4c436199b4c..54d26e242533 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -41,11 +41,11 @@ struct channel_program {
struct ccw1 *guest_cp;
};
-extern int cp_init(struct channel_program *cp, union orb *orb);
-extern void cp_free(struct channel_program *cp);
-extern int cp_prefetch(struct channel_program *cp);
-extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
-extern void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
-extern bool cp_iova_pinned(struct channel_program *cp, u64 iova);
+int cp_init(struct channel_program *cp, union orb *orb);
+void cp_free(struct channel_program *cp);
+int cp_prefetch(struct channel_program *cp);
+union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
+void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length);
#endif
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index ee182cfb467d..86d9e428357b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include <asm/isc.h>
@@ -42,13 +41,6 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
DECLARE_COMPLETION_ONSTACK(completion);
int iretry, ret = 0;
- spin_lock_irq(sch->lock);
- if (!sch->schib.pmcw.ena)
- goto out_unlock;
- ret = cio_disable_subchannel(sch);
- if (ret != -EBUSY)
- goto out_unlock;
-
iretry = 255;
do {
@@ -75,9 +67,7 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
-out_unlock:
- private->state = VFIO_CCW_STATE_NOT_OPER;
- spin_unlock_irq(sch->lock);
+
return ret;
}
@@ -107,9 +97,10 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
/*
* Reset to IDLE only if processing of a channel program
* has finished. Do not overwrite a possible processing
- * state if the final interrupt was for HSCH or CSCH.
+ * state if the interrupt was unsolicited, or if the final
+ * interrupt was for HSCH or CSCH.
*/
- if (private->mdev && cp_is_finished)
+ if (cp_is_finished)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
@@ -147,7 +138,7 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
private->sch = sch;
mutex_init(&private->io_mutex);
- private->state = VFIO_CCW_STATE_NOT_OPER;
+ private->state = VFIO_CCW_STATE_STANDBY;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
@@ -231,26 +222,15 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
dev_set_drvdata(&sch->dev, private);
- spin_lock_irq(sch->lock);
- sch->isc = VFIO_CCW_ISC;
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- spin_unlock_irq(sch->lock);
+ ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
if (ret)
goto out_free;
- private->state = VFIO_CCW_STATE_STANDBY;
-
- ret = vfio_ccw_mdev_reg(sch);
- if (ret)
- goto out_disable;
-
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
return 0;
-out_disable:
- cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
vfio_ccw_free_private(private);
@@ -261,8 +241,7 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- vfio_ccw_sch_quiesce(sch);
- vfio_ccw_mdev_unreg(sch);
+ mdev_unregister_device(&sch->dev);
dev_set_drvdata(&sch->dev, NULL);
@@ -275,7 +254,10 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
static void vfio_ccw_sch_shutdown(struct subchannel *sch)
{
- vfio_ccw_sch_quiesce(sch);
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
/**
@@ -301,19 +283,11 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
if (work_pending(&sch->todo_work))
goto out_unlock;
- if (cio_update_schib(sch)) {
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
- rc = 0;
- goto out_unlock;
- }
-
- private = dev_get_drvdata(&sch->dev);
- if (private->state == VFIO_CCW_STATE_NOT_OPER) {
- private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
- VFIO_CCW_STATE_STANDBY;
- }
rc = 0;
+ if (cio_update_schib(sch))
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
@@ -358,8 +332,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
return 0;
trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
- VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
- mdev_uuid(private->mdev), sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
+ sch->schid.cssid,
sch->schid.ssid, sch->schid.sch_no,
mask, event);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 8483a266051c..a59c758869f8 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -10,7 +10,8 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
+
+#include <asm/isc.h>
#include "ioasm.h"
#include "vfio_ccw_private.h"
@@ -161,8 +162,12 @@ static void fsm_notoper(struct vfio_ccw_private *private,
{
struct subchannel *sch = private->sch;
- VFIO_CCW_TRACE_EVENT(2, "notoper");
- VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
+ sch->schid.cssid,
+ sch->schid.ssid,
+ sch->schid.sch_no,
+ event,
+ private->state);
/*
* TODO:
@@ -170,6 +175,9 @@ static void fsm_notoper(struct vfio_ccw_private *private,
*/
css_sched_sch_todo(sch, SCH_TODO_UNREG);
private->state = VFIO_CCW_STATE_NOT_OPER;
+
+ /* This is usually handled during CLOSE event */
+ cp_free(&private->cp);
}
/*
@@ -242,7 +250,6 @@ static void fsm_io_request(struct vfio_ccw_private *private,
union orb *orb;
union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = private->io_region;
- struct mdev_device *mdev = private->mdev;
char *errstr = "request";
struct subchannel_id schid = get_schid(private);
@@ -256,8 +263,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP;
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): transport mode\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: transport mode\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
errstr = "transport mode";
goto err_out;
@@ -265,8 +272,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_init(&private->cp, orb);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_init=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_init=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp init";
@@ -276,8 +283,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_prefetch=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp prefetch";
@@ -289,8 +296,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: fsm_io_helper=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp fsm_io_helper";
@@ -300,16 +307,16 @@ static void fsm_io_request(struct vfio_ccw_private *private,
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): halt on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: halt on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): clear on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: clear on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
@@ -366,6 +373,54 @@ static void fsm_irq(struct vfio_ccw_private *private,
complete(private->completion);
}
+static void fsm_open(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ sch->isc = VFIO_CCW_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_IDLE;
+ spin_unlock_irq(sch->lock);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
+static void fsm_close(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+
+ if (!sch->schib.pmcw.ena)
+ goto err_unlock;
+
+ ret = cio_disable_subchannel(sch);
+ if (ret == -EBUSY)
+ ret = vfio_ccw_sch_quiesce(sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_STANDBY;
+ spin_unlock_irq(sch->lock);
+ cp_free(&private->cp);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
/*
* Device statemachine
*/
@@ -375,29 +430,39 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_nop,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_nop,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
- [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_open,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_notoper,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PROCESSING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index b49e2e9db2dc..4a806a2273b5 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -21,54 +21,28 @@ static const struct vfio_device_ops vfio_ccw_dev_ops;
static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
{
- struct subchannel *sch;
- int ret;
-
- sch = private->sch;
/*
- * TODO:
- * In the cureent stage, some things like "no I/O running" and "no
- * interrupt pending" are clear, but we are not sure what other state
- * we need to care about.
- * There are still a lot more instructions need to be handled. We
- * should come back here later.
+ * If the FSM state is seen as Not Operational after closing
+ * and re-opening the mdev, return an error.
*/
- ret = vfio_ccw_sch_quiesce(sch);
- if (ret)
- return ret;
-
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- if (!ret)
- private->state = VFIO_CCW_STATE_IDLE;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
- return ret;
+ return 0;
}
-static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
+static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
{
struct vfio_ccw_private *private =
- container_of(nb, struct vfio_ccw_private, nb);
-
- /*
- * Vendor drivers MUST unpin pages in response to an
- * invalidation.
- */
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
-
- if (!cp_iova_pinned(&private->cp, unmap->iova))
- return NOTIFY_OK;
+ container_of(vdev, struct vfio_ccw_private, vdev);
- if (vfio_ccw_mdev_reset(private))
- return NOTIFY_BAD;
+ /* Drivers MUST unpin pages in response to an invalidation. */
+ if (!cp_iova_pinned(&private->cp, iova, length))
+ return;
- cp_free(&private->cp);
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
+ vfio_ccw_mdev_reset(private);
}
static ssize_t name_show(struct mdev_type *mtype,
@@ -128,11 +102,8 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
vfio_init_group_dev(&private->vdev, &mdev->dev,
&vfio_ccw_dev_ops);
- private->mdev = mdev;
- private->state = VFIO_CCW_STATE_IDLE;
-
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
@@ -145,8 +116,6 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
err_atomic:
vfio_uninit_group_dev(&private->vdev);
atomic_inc(&private->avail);
- private->mdev = NULL;
- private->state = VFIO_CCW_STATE_IDLE;
return ret;
}
@@ -154,23 +123,14 @@ static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
vfio_unregister_group_dev(&private->vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_sch_quiesce(private->sch))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
vfio_uninit_group_dev(&private->vdev);
- cp_free(&private->cp);
- private->mdev = NULL;
atomic_inc(&private->avail);
}
@@ -178,19 +138,15 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
int ret;
- private->nb.notifier_call = vfio_ccw_mdev_notifier;
-
- ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY,
- &events, &private->nb);
- if (ret)
- return ret;
+ /* Device cannot simply be opened again from this state */
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
ret = vfio_ccw_register_async_dev_regions(private);
if (ret)
- goto out_unregister;
+ return ret;
ret = vfio_ccw_register_schib_dev_regions(private);
if (ret)
@@ -200,11 +156,16 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
if (ret)
goto out_unregister;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER) {
+ ret = -EINVAL;
+ goto out_unregister;
+ }
+
return ret;
out_unregister:
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
return ret;
}
@@ -213,16 +174,8 @@ static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_mdev_reset(private))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
- cp_free(&private->cp);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
@@ -645,6 +598,7 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = {
.write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl,
.request = vfio_ccw_mdev_request,
+ .dma_unmap = vfio_ccw_dma_unmap,
};
struct mdev_driver vfio_ccw_mdev_driver = {
@@ -657,13 +611,3 @@ struct mdev_driver vfio_ccw_mdev_driver = {
.remove = vfio_ccw_mdev_remove,
.supported_type_groups = mdev_type_groups,
};
-
-int vfio_ccw_mdev_reg(struct subchannel *sch)
-{
- return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
-}
-
-void vfio_ccw_mdev_unreg(struct subchannel *sch)
-{
- mdev_unregister_device(&sch->dev);
-}
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 7272eb788612..cd24b7fada91 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -73,8 +73,6 @@ struct vfio_ccw_crw {
* @state: internal state of the device
* @completion: synchronization helper of the I/O completion
* @avail: available for creating a mediated device
- * @mdev: pointer to the mediated device
- * @nb: notifier for vfio events
* @io_region: MMIO region to input/output I/O arguments/results
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
@@ -97,8 +95,6 @@ struct vfio_ccw_private {
int state;
struct completion *completion;
atomic_t avail;
- struct mdev_device *mdev;
- struct notifier_block nb;
struct ccw_io_region *io_region;
struct mutex io_mutex;
struct vfio_ccw_region *region;
@@ -119,10 +115,7 @@ struct vfio_ccw_private {
struct work_struct crw_work;
} __aligned(8);
-extern int vfio_ccw_mdev_reg(struct subchannel *sch);
-extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
-
-extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
+int vfio_ccw_sch_quiesce(struct subchannel *sch);
extern struct mdev_driver vfio_ccw_mdev_driver;
@@ -147,6 +140,8 @@ enum vfio_ccw_event {
VFIO_CCW_EVENT_IO_REQ,
VFIO_CCW_EVENT_INTERRUPT,
VFIO_CCW_EVENT_ASYNC_REQ,
+ VFIO_CCW_EVENT_OPEN,
+ VFIO_CCW_EVENT_CLOSE,
/* last element! */
NR_VFIO_CCW_EVENTS
};
@@ -158,7 +153,7 @@ typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
- int event)
+ enum vfio_ccw_event event)
{
trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
vfio_ccw_jumptable[private->state][event](private, event);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5c13d2079d96..59ac98f2bd27 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -27,6 +27,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <asm/airq.h>
+#include <asm/tpi.h>
#include <linux/atomic.h>
#include <asm/isc.h>
#include <linux/hrtimer.h>
@@ -131,7 +132,8 @@ static int ap_max_adapter_id = 63;
static struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
-static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
+static void ap_interrupt_handler(struct airq_struct *airq,
+ struct tpi_info *tpi_info);
static bool ap_irq_flag;
@@ -452,9 +454,10 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
/**
* ap_interrupt_handler() - Schedule ap_tasklet on interrupt
* @airq: pointer to adapter interrupt descriptor
- * @floating: ignored
+ * @tpi_info: ignored
*/
-static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
+static void ap_interrupt_handler(struct airq_struct *airq,
+ struct tpi_info *tpi_info)
{
inc_irq_stat(IRQIO_APB);
tasklet_schedule(&ap_tasklet);
@@ -835,6 +838,17 @@ static void ap_bus_revise_bindings(void)
bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
}
+/**
+ * ap_owned_by_def_drv: indicates whether an AP adapter is reserved for the
+ * default host driver or not.
+ * @card: the APID of the adapter card to check
+ * @queue: the APQI of the queue to check
+ *
+ * Note: the ap_perms_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether the AP adapter is reserved for the host (1)
+ * or not (0).
+ */
int ap_owned_by_def_drv(int card, int queue)
{
int rc = 0;
@@ -842,25 +856,31 @@ int ap_owned_by_def_drv(int card, int queue)
if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
return -EINVAL;
- mutex_lock(&ap_perms_mutex);
-
if (test_bit_inv(card, ap_perms.apm) &&
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
- mutex_unlock(&ap_perms_mutex);
-
return rc;
}
EXPORT_SYMBOL(ap_owned_by_def_drv);
+/**
+ * ap_apqn_in_matrix_owned_by_def_drv: indicates whether every APQN contained in
+ * a set is reserved for the host drivers
+ * or not.
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
+ *
+ * Note: the ap_perms_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether each APQN is reserved for the host (1) or
+ * not (0)
+ */
int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
unsigned long *aqm)
{
int card, queue, rc = 0;
- mutex_lock(&ap_perms_mutex);
-
for (card = 0; !rc && card < AP_DEVICES; card++)
if (test_bit_inv(card, apm) &&
test_bit_inv(card, ap_perms.apm))
@@ -869,8 +889,6 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
- mutex_unlock(&ap_perms_mutex);
-
return rc;
}
EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
@@ -1435,7 +1453,7 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data)
if (ap_drv->in_use) {
rc = ap_drv->in_use(ap_perms.apm, newaqm);
if (rc)
- return -EBUSY;
+ rc = -EBUSY;
}
/* release the driver's module */
@@ -2068,6 +2086,9 @@ static inline void ap_scan_adapter(int ap)
*/
static bool ap_get_configuration(void)
{
+ if (!ap_qci_info) /* QCI not supported */
+ return false;
+
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
ap_fetch_qci_info(ap_qci_info);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 0c40af157df2..0f17933954fb 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -148,12 +148,16 @@ struct ap_driver {
/*
* Called at the start of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_config_changed)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
/*
* Called at the end of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_scan_complete)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index c48b0db824e3..a32457b4cbb8 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -34,7 +34,7 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
qirqctrl.ir = 1;
qirqctrl.isc = AP_ISC;
- status = ap_aqic(aq->qid, qirqctrl, ind);
+ status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 7329caa7d467..5a05d1cdfec2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -2115,5 +2115,5 @@ static void __exit pkey_exit(void)
pkey_debug_exit();
}
-module_cpu_feature_match(MSA, pkey_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 4ac9c6521ec1..f43cfeabd2cc 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -18,9 +18,6 @@
#define VFIO_AP_ROOT_NAME "vfio_ap"
#define VFIO_AP_DEV_NAME "matrix"
-#define AP_QUEUE_ASSIGNED "assigned"
-#define AP_QUEUE_UNASSIGNED "unassigned"
-#define AP_QUEUE_IN_USE "in use"
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
@@ -46,120 +43,12 @@ static struct ap_device_id ap_queue_ids[] = {
{ /* end of sibling */ },
};
-static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
-{
- struct ap_matrix_mdev *matrix_mdev;
- unsigned long apid = AP_QID_CARD(q->apqn);
- unsigned long apqi = AP_QID_QUEUE(q->apqn);
-
- list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
- if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
- test_bit_inv(apqi, matrix_mdev->matrix.aqm))
- return matrix_mdev;
- }
-
- return NULL;
-}
-
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t nchars = 0;
- struct vfio_ap_queue *q;
- struct ap_matrix_mdev *matrix_mdev;
- struct ap_device *apdev = to_ap_dev(dev);
-
- mutex_lock(&matrix_dev->lock);
- q = dev_get_drvdata(&apdev->device);
- matrix_mdev = vfio_ap_mdev_for_queue(q);
-
- if (matrix_mdev) {
- if (matrix_mdev->kvm)
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_IN_USE);
- else
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_ASSIGNED);
- } else {
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_UNASSIGNED);
- }
-
- mutex_unlock(&matrix_dev->lock);
-
- return nchars;
-}
-
-static DEVICE_ATTR_RO(status);
-
-static struct attribute *vfio_queue_attrs[] = {
- &dev_attr_status.attr,
- NULL,
-};
-
-static const struct attribute_group vfio_queue_attr_group = {
- .attrs = vfio_queue_attrs,
-};
-
-/**
- * vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it
- * with the device as driver_data.
- *
- * @apdev: the AP device being probed
- *
- * Return: returns 0 if the probe succeeded; otherwise, returns an error if
- * storage could not be allocated for a vfio_ap_queue object or the
- * sysfs 'status' attribute could not be created for the queue device.
- */
-static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
-{
- int ret;
- struct vfio_ap_queue *q;
-
- q = kzalloc(sizeof(*q), GFP_KERNEL);
- if (!q)
- return -ENOMEM;
-
- mutex_lock(&matrix_dev->lock);
- dev_set_drvdata(&apdev->device, q);
- q->apqn = to_ap_queue(&apdev->device)->qid;
- q->saved_isc = VFIO_AP_ISC_INVALID;
-
- ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
- if (ret) {
- dev_set_drvdata(&apdev->device, NULL);
- kfree(q);
- }
-
- mutex_unlock(&matrix_dev->lock);
-
- return ret;
-}
-
-/**
- * vfio_ap_queue_dev_remove: Free the associated vfio_ap_queue structure.
- *
- * @apdev: the AP device being removed
- *
- * Takes the matrix lock to avoid actions on this device while doing the remove.
- */
-static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
-{
- struct vfio_ap_queue *q;
-
- mutex_lock(&matrix_dev->lock);
- sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
- q = dev_get_drvdata(&apdev->device);
- vfio_ap_mdev_reset_queue(q, 1);
- dev_set_drvdata(&apdev->device, NULL);
- kfree(q);
- mutex_unlock(&matrix_dev->lock);
-}
-
static struct ap_driver vfio_ap_drv = {
- .probe = vfio_ap_queue_dev_probe,
- .remove = vfio_ap_queue_dev_remove,
+ .probe = vfio_ap_mdev_probe_queue,
+ .remove = vfio_ap_mdev_remove_queue,
+ .in_use = vfio_ap_mdev_resource_in_use,
+ .on_config_changed = vfio_ap_on_cfg_changed,
+ .on_scan_complete = vfio_ap_on_scan_complete,
.ids = ap_queue_ids,
};
@@ -212,8 +101,9 @@ static int vfio_ap_matrix_dev_create(void)
goto matrix_alloc_err;
}
- mutex_init(&matrix_dev->lock);
+ mutex_init(&matrix_dev->mdevs_lock);
INIT_LIST_HEAD(&matrix_dev->mdev_list);
+ mutex_init(&matrix_dev->guests_lock);
dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
matrix_dev->device.parent = root_device;
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index a7d2a95796d3..6c8c41fac4e1 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -26,44 +26,193 @@
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
-static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
+#define AP_QUEUE_ASSIGNED "assigned"
+#define AP_QUEUE_UNASSIGNED "unassigned"
+#define AP_QUEUE_IN_USE "in use"
+
+static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry);
+
+/**
+ * get_update_locks_for_kvm: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be taken.
+ */
+static inline void get_update_locks_for_kvm(struct kvm *kvm)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (kvm)
+ mutex_lock(&kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_kvm: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be released.
+ */
+static inline void release_update_locks_for_kvm(struct kvm *kvm)
+{
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (kvm)
+ mutex_unlock(&kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+/**
+ * get_update_locks_for_mdev: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be taken.
+ */
+static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_mdev: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. matrix_mdev->kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be released.
+ */
+static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+}
-static int match_apqn(struct device *dev, const void *data)
+/**
+ * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and
+ * acquire the locks required to update the APCB of
+ * the KVM guest to which the mdev is attached.
+ *
+ * @apqn: the APQN of a queue device.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock
+ * will not be taken.
+ *
+ * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn
+ * is not assigned to an ap_matrix_mdev.
+ */
+static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
{
- struct vfio_ap_queue *q = dev_get_drvdata(dev);
+ struct ap_matrix_mdev *matrix_mdev;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
+ test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
+ if (matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ return matrix_mdev;
+ }
+ }
- return (q->apqn == *(int *)(data)) ? 1 : 0;
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ return NULL;
}
/**
- * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list
- * @matrix_mdev: the associated mediated matrix
- * @apqn: The queue APQN
+ * get_update_locks_for_queue: get the locks required to update the APCB of the
+ * KVM guest to which the matrix mdev linked to a
+ * vfio_ap_queue object is attached.
+ *
+ * @q: a pointer to a vfio_ap_queue object.
*
- * Retrieve a queue with a specific APQN from the list of the
- * devices of the vfio_ap_drv.
- * Verify that the APID and the APQI are set in the matrix.
+ * The proper locking order is:
+ * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a
+ * KVM guest's APCB.
+ * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev
+ *
+ * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock
+ * will not be taken.
+ */
+static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (q->matrix_mdev && q->matrix_mdev->kvm)
+ mutex_lock(&q->matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a
+ * hash table of queues assigned to a matrix mdev
+ * @matrix_mdev: the matrix mdev
+ * @apqn: The APQN of a queue device
*
- * Return: the pointer to the associated vfio_ap_queue
+ * Return: the pointer to the vfio_ap_queue struct representing the queue or
+ * NULL if the queue is not assigned to @matrix_mdev
*/
-static struct vfio_ap_queue *vfio_ap_get_queue(
+static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
struct ap_matrix_mdev *matrix_mdev,
int apqn)
{
struct vfio_ap_queue *q;
- if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
- return NULL;
- if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
- return NULL;
-
- q = vfio_ap_find_queue(apqn);
- if (q)
- q->matrix_mdev = matrix_mdev;
+ hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
+ apqn) {
+ if (q && q->apqn == apqn)
+ return q;
+ }
- return q;
+ return NULL;
}
/**
@@ -112,7 +261,7 @@ static void vfio_ap_wait_for_irqclear(int apqn)
*
* Unregisters the ISC in the GIB when the saved ISC not invalid.
* Unpins the guest's page holding the NIB when it exists.
- * Resets the saved_pfn and saved_isc to invalid values.
+ * Resets the saved_iova and saved_isc to invalid values.
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
@@ -123,9 +272,9 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
q->saved_isc = VFIO_AP_ISC_INVALID;
}
- if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
- vfio_unpin_pages(&q->matrix_mdev->vdev, &q->saved_pfn, 1);
- q->saved_pfn = 0;
+ if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
+ vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
+ q->saved_iova = 0;
}
}
@@ -154,7 +303,7 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
int retries = 5;
do {
- status = ap_aqic(q->apqn, aqic_gisa, NULL);
+ status = ap_aqic(q->apqn, aqic_gisa, 0);
switch (status.response_code) {
case AP_RESPONSE_OTHERWISE_CHANGED:
case AP_RESPONSE_NORMAL:
@@ -180,7 +329,6 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
status.response_code);
end_free:
vfio_ap_free_aqic_resources(q);
- q->matrix_mdev = NULL;
return status;
}
@@ -189,27 +337,19 @@ end_free:
*
* @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction.
* @nib: the location for storing the nib address.
- * @g_pfn: the location for storing the page frame number of the page containing
- * the nib.
*
* When the PQAP(AQIC) instruction is executed, general register 2 contains the
* address of the notification indicator byte (nib) used for IRQ notification.
- * This function parses the nib from gr2 and calculates the page frame
- * number for the guest of the page containing the nib. The values are
- * stored in @nib and @g_pfn respectively.
- *
- * The g_pfn of the nib is then validated to ensure the nib address is valid.
+ * This function parses and validates the nib from gr2.
*
* Return: returns zero if the nib address is a valid; otherwise, returns
* -EINVAL.
*/
-static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, unsigned long *nib,
- unsigned long *g_pfn)
+static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
{
*nib = vcpu->run->s.regs.gprs[2];
- *g_pfn = *nib >> PAGE_SHIFT;
- if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *g_pfn)))
+ if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
return -EINVAL;
return 0;
@@ -239,33 +379,34 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
int isc,
struct kvm_vcpu *vcpu)
{
- unsigned long nib;
struct ap_qirq_ctrl aqic_gisa = {};
struct ap_queue_status status = {};
struct kvm_s390_gisa *gisa;
+ struct page *h_page;
int nisc;
struct kvm *kvm;
- unsigned long h_nib, g_pfn, h_pfn;
+ phys_addr_t h_nib;
+ dma_addr_t nib;
int ret;
/* Verify that the notification indicator byte address is valid */
- if (vfio_ap_validate_nib(vcpu, &nib, &g_pfn)) {
- VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%#lx, g_pfn=%#lx, apqn=%#04x\n",
- __func__, nib, g_pfn, q->apqn);
+ if (vfio_ap_validate_nib(vcpu, &nib)) {
+ VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
+ __func__, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
}
- ret = vfio_pin_pages(&q->matrix_mdev->vdev, &g_pfn, 1,
- IOMMU_READ | IOMMU_WRITE, &h_pfn);
+ ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_page);
switch (ret) {
case 1:
break;
default:
VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
- "nib=%#lx, g_pfn=%#lx, apqn=%#04x\n",
- __func__, ret, nib, g_pfn, q->apqn);
+ "nib=%pad, apqn=%#04x\n",
+ __func__, ret, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
@@ -274,7 +415,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
kvm = q->matrix_mdev->kvm;
gisa = kvm->arch.gisa_int.origin;
- h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+ h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
aqic_gisa.gisc = isc;
nisc = kvm_s390_gisc_register(kvm, isc);
@@ -290,17 +431,17 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
aqic_gisa.ir = 1;
aqic_gisa.gisa = (uint64_t)gisa >> 4;
- status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
+ status = ap_aqic(q->apqn, aqic_gisa, h_nib);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
/* See if we did clear older IRQ configuration */
vfio_ap_free_aqic_resources(q);
- q->saved_pfn = g_pfn;
+ q->saved_iova = nib;
q->saved_isc = isc;
break;
case AP_RESPONSE_OTHERWISE_CHANGED:
/* We could not modify IRQ setings: clear new configuration */
- vfio_unpin_pages(&q->matrix_mdev->vdev, &g_pfn, 1);
+ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
kvm_s390_gisc_unregister(kvm, isc);
break;
default:
@@ -406,10 +547,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
if (!vcpu->kvm->arch.crypto.pqap_hook) {
VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
__func__, apqn);
+
goto out_unlock;
}
@@ -425,7 +568,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
goto out_unlock;
}
- q = vfio_ap_get_queue(matrix_mdev, apqn);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
if (!q) {
VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
__func__, AP_QID_CARD(apqn),
@@ -444,7 +587,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
out_unlock:
memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
vcpu->run->s.regs.gprs[1] >>= 32;
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
}
@@ -456,6 +599,91 @@ static void vfio_ap_matrix_init(struct ap_config_info *info,
matrix->adm_max = info->apxa ? info->Nd : 15;
}
+static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (matrix_mdev->kvm)
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm,
+ matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.adm);
+}
+
+static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
+{
+ DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
+
+ bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
+ bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
+ (unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
+
+ return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
+ AP_DOMAINS);
+}
+
+/*
+ * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
+ * to ensure no queue devices are passed through to
+ * the guest that are not bound to the vfio_ap
+ * device driver.
+ *
+ * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
+ *
+ * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
+ * driver, its APID will be filtered from the guest's APCB. The matrix
+ * structure precludes filtering an individual APQN, so its APID will be
+ * filtered.
+ *
+ * Return: a boolean value indicating whether the KVM guest's APCB was changed
+ * by the filtering or not.
+ */
+static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ struct ap_matrix_mdev *matrix_mdev)
+{
+ unsigned long apid, apqi, apqn;
+ DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+ DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
+ struct vfio_ap_queue *q;
+
+ bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
+ bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+
+ /*
+ * Copy the adapters, domains and control domains to the shadow_apcb
+ * from the matrix mdev, but only those that are assigned to the host's
+ * AP configuration.
+ */
+ bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
+ (unsigned long *)matrix_dev->info.apm, AP_DEVICES);
+ bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ /*
+ * If the APQN is not bound to the vfio_ap device
+ * driver, then we can't assign it to the guest's
+ * AP configuration. The AP architecture won't
+ * allow filtering of a single APQN, so let's filter
+ * the APID since an adapter represents a physical
+ * hardware device.
+ */
+ apqn = AP_MKQID(apid, apqi);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
+ if (!q || q->reset_rc) {
+ clear_bit_inv(apid,
+ matrix_mdev->shadow_apcb.apm);
+ break;
+ }
+ }
+ }
+
+ return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
+ AP_DEVICES) ||
+ !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
+ AP_DOMAINS);
+}
+
static int vfio_ap_mdev_probe(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev;
@@ -475,20 +703,19 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
matrix_mdev->mdev = mdev;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
matrix_mdev->pqap_hook = handle_pqap;
- mutex_lock(&matrix_dev->lock);
- list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
- mutex_unlock(&matrix_dev->lock);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+ hash_init(matrix_mdev->qtable.queues);
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
goto err_list;
dev_set_drvdata(&mdev->dev, matrix_mdev);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
err_list:
- mutex_lock(&matrix_dev->lock);
- list_del(&matrix_mdev->node);
- mutex_unlock(&matrix_dev->lock);
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
err_dec_available:
@@ -496,16 +723,62 @@ err_dec_available:
return ret;
}
+static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
+ struct vfio_ap_queue *q)
+{
+ if (q) {
+ q->matrix_mdev = matrix_mdev;
+ hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
+ }
+}
+
+static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
+{
+ struct vfio_ap_queue *q;
+
+ q = vfio_ap_find_queue(apqn);
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+}
+
+static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
+{
+ hash_del(&q->mdev_qnode);
+}
+
+static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
+{
+ q->matrix_mdev = NULL;
+}
+
+static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct vfio_ap_queue *q;
+ unsigned long apid, apqi;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ AP_DOMAINS) {
+ q = vfio_ap_mdev_get_queue(matrix_mdev,
+ AP_MKQID(apid, apqi));
+ if (q)
+ q->matrix_mdev = NULL;
+ }
+ }
+}
+
static void vfio_ap_mdev_remove(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
vfio_unregister_group_dev(&matrix_mdev->vdev);
- mutex_lock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(matrix_mdev);
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
+ vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
list_del(&matrix_mdev->node);
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
atomic_inc(&matrix_dev->available_instances);
@@ -554,141 +827,48 @@ static struct attribute_group *vfio_ap_mdev_type_groups[] = {
NULL,
};
-struct vfio_ap_queue_reserved {
- unsigned long *apid;
- unsigned long *apqi;
- bool reserved;
-};
-
-/**
- * vfio_ap_has_queue - determines if the AP queue containing the target in @data
- *
- * @dev: an AP queue device
- * @data: a struct vfio_ap_queue_reserved reference
- *
- * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
- * apid or apqi specified in @data:
- *
- * - If @data contains both an apid and apqi value, then @data will be flagged
- * as reserved if the APID and APQI fields for the AP queue device matches
- *
- * - If @data contains only an apid value, @data will be flagged as
- * reserved if the APID field in the AP queue device matches
- *
- * - If @data contains only an apqi value, @data will be flagged as
- * reserved if the APQI field in the AP queue device matches
- *
- * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if
- * @data does not contain either an apid or apqi.
- */
-static int vfio_ap_has_queue(struct device *dev, void *data)
-{
- struct vfio_ap_queue_reserved *qres = data;
- struct ap_queue *ap_queue = to_ap_queue(dev);
- ap_qid_t qid;
- unsigned long id;
-
- if (qres->apid && qres->apqi) {
- qid = AP_MKQID(*qres->apid, *qres->apqi);
- if (qid == ap_queue->qid)
- qres->reserved = true;
- } else if (qres->apid && !qres->apqi) {
- id = AP_QID_CARD(ap_queue->qid);
- if (id == *qres->apid)
- qres->reserved = true;
- } else if (!qres->apid && qres->apqi) {
- id = AP_QID_QUEUE(ap_queue->qid);
- if (id == *qres->apqi)
- qres->reserved = true;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * vfio_ap_verify_queue_reserved - verifies that the AP queue containing
- * @apid or @aqpi is reserved
- *
- * @apid: an AP adapter ID
- * @apqi: an AP queue index
- *
- * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
- * driver according to the following rules:
- *
- * - If both @apid and @apqi are not NULL, then there must be an AP queue
- * device bound to the vfio_ap driver with the APQN identified by @apid and
- * @apqi
- *
- * - If only @apid is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apid
- *
- * - If only @apqi is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apqi
- *
- * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
- */
-static int vfio_ap_verify_queue_reserved(unsigned long *apid,
- unsigned long *apqi)
-{
- int ret;
- struct vfio_ap_queue_reserved qres;
-
- qres.apid = apid;
- qres.apqi = apqi;
- qres.reserved = false;
-
- ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &qres, vfio_ap_has_queue);
- if (ret)
- return ret;
-
- if (qres.reserved)
- return 0;
-
- return -EADDRNOTAVAIL;
-}
+#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
+ "already assigned to %s"
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apid)
+static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apm,
+ unsigned long *aqm)
{
- int ret;
- unsigned long apqi;
- unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
-
- if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(&apid, NULL);
-
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
- }
+ unsigned long apid, apqi;
+ const struct device *dev = mdev_dev(matrix_mdev->mdev);
+ const char *mdev_name = dev_name(dev);
- return 0;
+ for_each_set_bit_inv(apid, apm, AP_DEVICES)
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
+ dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
}
/**
- * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured
+ * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
*
- * @matrix_mdev: the mediated matrix device
+ * @mdev_apm: mask indicating the APIDs of the APQNs to be verified
+ * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
*
- * Verifies that the APQNs derived from the cross product of the AP adapter IDs
- * and AP queue indexes comprising the AP matrix are not configured for another
+ * Verifies that each APQN derived from the Cartesian product of a bitmap of
+ * AP adapter IDs and AP queue indexes is not configured for any matrix
* mediated device. AP queue sharing is not allowed.
*
- * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE.
+ * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
*/
-static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
+static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
+ unsigned long *mdev_aqm)
{
- struct ap_matrix_mdev *lstdev;
+ struct ap_matrix_mdev *matrix_mdev;
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
- list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
- if (matrix_mdev == lstdev)
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ /*
+ * If the input apm and aqm are fields of the matrix_mdev
+ * object, then move on to the next matrix_mdev.
+ */
+ if (mdev_apm == matrix_mdev->matrix.apm &&
+ mdev_aqm == matrix_mdev->matrix.aqm)
continue;
memset(apm, 0, sizeof(apm));
@@ -698,14 +878,16 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* We work on full longs, as we can only exclude the leftover
* bits in non-inverse order. The leftover is all zeros.
*/
- if (!bitmap_and(apm, matrix_mdev->matrix.apm,
- lstdev->matrix.apm, AP_DEVICES))
+ if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
+ AP_DEVICES))
continue;
- if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
- lstdev->matrix.aqm, AP_DOMAINS))
+ if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
+ AP_DOMAINS))
continue;
+ vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
+
return -EADDRINUSE;
}
@@ -713,6 +895,41 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
}
/**
+ * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are
+ * not reserved for the default zcrypt driver and
+ * are not assigned to another mdev.
+ *
+ * @matrix_mdev: the mdev to which the APQNs being validated are assigned.
+ *
+ * Return: One of the following values:
+ * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
+ * most likely -EBUSY indicating the ap_perms_mutex lock is already held.
+ * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
+ * zcrypt default driver.
+ * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
+ * o A zero indicating validation succeeded.
+ */
+static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm))
+ return -EADDRNOTAVAIL;
+
+ return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm);
+}
+
+static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ unsigned long apqi;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
+}
+
+/**
* assign_adapter_store - parses the APID from @buf and sets the
* corresponding bit in the mediated matrix device's APM
*
@@ -741,6 +958,10 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* An APQN derived from the cross product of the APID being assigned
* and the APQIs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * A lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_adapter_store(struct device *dev,
struct device_attribute *attr,
@@ -748,15 +969,11 @@ static ssize_t assign_adapter_store(struct device *dev,
{
int ret;
unsigned long apid;
+ DECLARE_BITMAP(apm_delta, AP_DEVICES);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow assignment of adapter */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ mutex_lock(&ap_perms_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
@@ -767,33 +984,97 @@ static ssize_t assign_adapter_store(struct device *dev,
goto done;
}
- /*
- * Set the bit in the AP mask (APM) corresponding to the AP adapter
- * number (APID). The bits in the mask, from most significant to least
- * significant bit, correspond to APIDs 0-255.
- */
- ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
- if (ret)
+ set_bit_inv(apid, matrix_mdev->matrix.apm);
+
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apid, matrix_mdev->matrix.apm);
goto done;
+ }
- set_bit_inv(apid, matrix_mdev->matrix.apm);
+ vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+ memset(apm_delta, 0, sizeof(apm_delta));
+ set_bit_inv(apid, apm_delta);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ if (vfio_ap_mdev_filter_matrix(apm_delta,
+ matrix_mdev->matrix.aqm, matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
- goto done;
-
-share_err:
- clear_bit_inv(apid, matrix_mdev->matrix.apm);
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_adapter);
+static struct vfio_ap_queue
+*vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid, unsigned long apqi)
+{
+ struct vfio_ap_queue *q = NULL;
+
+ q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
+ /* If the queue is assigned to the matrix mdev, unlink it. */
+ if (q)
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ return q;
+}
+
+/**
+ * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned
+ * adapter from the matrix mdev to which the
+ * adapter was assigned.
+ * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
+ * @apid: the APID of the unassigned adapter.
+ * @qtable: table for storing queues associated with unassigned adapter.
+ */
+static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid,
+ struct ap_queue_table *qtable)
+{
+ unsigned long apqi;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qtable) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ hash_add(qtable->queues, &q->mdev_qnode,
+ q->apqn);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ int loop_cursor;
+ struct vfio_ap_queue *q;
+ struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+
+ hash_init(qtable->queues);
+ vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
+
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+ vfio_ap_mdev_reset_queues(qtable);
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ hash_del(&q->mdev_qnode);
+ }
+
+ kfree(qtable);
+}
+
/**
* unassign_adapter_store - parses the APID from @buf and clears the
* corresponding bit in the mediated matrix device's APM
@@ -817,13 +1098,7 @@ static ssize_t unassign_adapter_store(struct device *dev,
unsigned long apid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow unassignment of adapter */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
@@ -835,31 +1110,22 @@ static ssize_t unassign_adapter_store(struct device *dev,
}
clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
+ vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_adapter);
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apqi)
+static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
{
- int ret;
unsigned long apid;
- unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
-
- if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(NULL, &apqi);
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
- }
-
- return 0;
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
}
/**
@@ -891,6 +1157,10 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
* An APQN derived from the cross product of the APQI being assigned
* and the APIDs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * The lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_domain_store(struct device *dev,
struct device_attribute *attr,
@@ -898,47 +1168,89 @@ static ssize_t assign_domain_store(struct device *dev,
{
int ret;
unsigned long apqi;
+ DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
-
- mutex_lock(&matrix_dev->lock);
- /* If the KVM guest is running, disallow assignment of domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ mutex_lock(&ap_perms_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
goto done;
- if (apqi > max_apqi) {
+
+ if (apqi > matrix_mdev->matrix.aqm_max) {
ret = -ENODEV;
goto done;
}
- ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
- if (ret)
+ set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
goto done;
+ }
- set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+ vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+ memset(aqm_delta, 0, sizeof(aqm_delta));
+ set_bit_inv(apqi, aqm_delta);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
+ matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
- goto done;
-
-share_err:
- clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_domain);
+static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi,
+ struct ap_queue_table *qtable)
+{
+ unsigned long apid;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qtable) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ hash_add(qtable->queues, &q->mdev_qnode,
+ q->apqn);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ int loop_cursor;
+ struct vfio_ap_queue *q;
+ struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+
+ hash_init(qtable->queues);
+ vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
+
+ if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+ vfio_ap_mdev_reset_queues(qtable);
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ hash_del(&q->mdev_qnode);
+ }
+
+ kfree(qtable);
+}
/**
* unassign_domain_store - parses the APQI from @buf and clears the
@@ -963,13 +1275,7 @@ static ssize_t unassign_domain_store(struct device *dev,
unsigned long apqi;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow unassignment of domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
@@ -981,10 +1287,11 @@ static ssize_t unassign_domain_store(struct device *dev,
}
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+ vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_domain);
@@ -1011,13 +1318,7 @@ static ssize_t assign_control_domain_store(struct device *dev,
unsigned long id;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow assignment of control domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &id);
if (ret)
@@ -1034,9 +1335,12 @@ static ssize_t assign_control_domain_store(struct device *dev,
* number of control domains that can be assigned.
*/
set_bit_inv(id, matrix_mdev->matrix.adm);
+ if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(assign_control_domain);
@@ -1062,28 +1366,28 @@ static ssize_t unassign_control_domain_store(struct device *dev,
int ret;
unsigned long domid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- unsigned long max_domid = matrix_mdev->matrix.adm_max;
-
- mutex_lock(&matrix_dev->lock);
- /* If a KVM guest is running, disallow unassignment of control domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &domid);
if (ret)
goto done;
- if (domid > max_domid) {
+
+ if (domid > matrix_mdev->matrix.adm_max) {
ret = -ENODEV;
goto done;
}
clear_bit_inv(domid, matrix_mdev->matrix.adm);
+
+ if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
+ clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_control_domain);
@@ -1099,40 +1403,36 @@ static ssize_t control_domains_show(struct device *dev,
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
n = sprintf(bufpos, "%04lx\n", id);
bufpos += n;
nchars += n;
}
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(control_domains);
-static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
{
- struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
char *bufpos = buf;
unsigned long apid;
unsigned long apqi;
unsigned long apid1;
unsigned long apqi1;
- unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
- unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
+ unsigned long napm_bits = matrix->apm_max + 1;
+ unsigned long naqm_bits = matrix->aqm_max + 1;
int nchars = 0;
int n;
- apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
- apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
-
- mutex_lock(&matrix_dev->lock);
+ apid1 = find_first_bit_inv(matrix->apm, napm_bits);
+ apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
+ for_each_set_bit_inv(apqi, matrix->aqm,
naqm_bits) {
n = sprintf(bufpos, "%02lx.%04lx\n", apid,
apqi);
@@ -1141,25 +1441,50 @@ static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
}
}
} else if (apid1 < napm_bits) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
n = sprintf(bufpos, "%02lx.\n", apid);
bufpos += n;
nchars += n;
}
} else if (apqi1 < naqm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
n = sprintf(bufpos, ".%04lx\n", apqi);
bufpos += n;
nchars += n;
}
}
- mutex_unlock(&matrix_dev->lock);
+ return nchars;
+}
+
+static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(matrix);
+static ssize_t guest_matrix_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(guest_matrix);
+
static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_assign_adapter.attr,
&dev_attr_unassign_adapter.attr,
@@ -1169,6 +1494,7 @@ static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_unassign_control_domain.attr,
&dev_attr_control_domains.attr,
&dev_attr_matrix.attr,
+ &dev_attr_guest_matrix.attr,
NULL,
};
@@ -1201,59 +1527,32 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- mutex_lock(&kvm->lock);
- mutex_lock(&matrix_dev->lock);
+ get_update_locks_for_kvm(kvm);
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
if (m != matrix_mdev && m->kvm == kvm) {
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
return -EPERM;
}
}
kvm_get_kvm(kvm);
matrix_mdev->kvm = kvm;
- kvm_arch_crypto_set_masks(kvm,
- matrix_mdev->matrix.apm,
- matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.adm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
}
return 0;
}
-/**
- * vfio_ap_mdev_iommu_notifier - IOMMU notifier callback
- *
- * @nb: The notifier block
- * @action: Action to be taken
- * @data: data associated with the request
- *
- * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
- * pinned before). Other requests are ignored.
- *
- * Return: for an UNMAP request, NOFITY_OK; otherwise NOTIFY_DONE.
- */
-static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
+ u64 length)
{
- struct ap_matrix_mdev *matrix_mdev;
-
- matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
-
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
- unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
-
- vfio_unpin_pages(&matrix_mdev->vdev, &g_pfn, 1);
- return NOTIFY_OK;
- }
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
- return NOTIFY_DONE;
+ vfio_unpin_pages(&matrix_mdev->vdev, iova, 1);
}
/**
@@ -1271,36 +1570,36 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
kvm->arch.crypto.pqap_hook = NULL;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- mutex_lock(&kvm->lock);
- mutex_lock(&matrix_dev->lock);
+ get_update_locks_for_kvm(kvm);
kvm_arch_crypto_clear_masks(kvm);
- vfio_ap_mdev_reset_queues(matrix_mdev);
+ vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
kvm_put_kvm(kvm);
matrix_mdev->kvm = NULL;
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
}
}
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
- struct device *dev;
+ struct ap_queue *queue;
struct vfio_ap_queue *q = NULL;
- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &apqn, match_apqn);
- if (dev) {
- q = dev_get_drvdata(dev);
- put_device(dev);
- }
+ queue = ap_get_qdev(apqn);
+ if (!queue)
+ return NULL;
+
+ if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
+ q = dev_get_drvdata(&queue->ap_dev.device);
+
+ put_device(&queue->ap_dev.device);
return q;
}
-int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
- unsigned int retry)
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry)
{
struct ap_queue_status status;
int ret;
@@ -1308,9 +1607,9 @@ int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
if (!q)
return 0;
-
retry_zapq:
status = ap_zapq(q->apqn);
+ q->reset_rc = status.response_code;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
ret = 0;
@@ -1325,12 +1624,17 @@ retry_zapq:
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
- WARN_ON_ONCE(status.irq_enabled);
+ WARN_ONCE(status.irq_enabled,
+ "PQAP/ZAPQ for %02x.%04x failed with rc=%u while IRQ enabled",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
+ status.response_code);
ret = -EBUSY;
goto free_resources;
default:
/* things are really broken, give up */
- WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+ WARN(true,
+ "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
status.response_code);
return -EIO;
}
@@ -1342,7 +1646,8 @@ retry_zapq:
msleep(20);
status = ap_tapq(q->apqn, NULL);
}
- WARN_ON_ONCE(retry2 <= 0);
+ WARN_ONCE(retry2 <= 0, "unable to verify reset of queue %02x.%04x",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn));
free_resources:
vfio_ap_free_aqic_resources(q);
@@ -1350,27 +1655,20 @@ free_resources:
return ret;
}
-static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
+static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
{
- int ret;
- int rc = 0;
- unsigned long apid, apqi;
+ int ret, loop_cursor, rc = 0;
struct vfio_ap_queue *q;
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
- matrix_mdev->matrix.apm_max + 1) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.aqm_max + 1) {
- q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
- ret = vfio_ap_mdev_reset_queue(q, 1);
- /*
- * Regardless whether a queue turns out to be busy, or
- * is not operational, we need to continue resetting
- * the remaining queues.
- */
- if (ret)
- rc = ret;
- }
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ ret = vfio_ap_mdev_reset_queue(q, 1);
+ /*
+ * Regardless whether a queue turns out to be busy, or
+ * is not operational, we need to continue resetting
+ * the remaining queues.
+ */
+ if (ret)
+ rc = ret;
}
return rc;
@@ -1380,27 +1678,11 @@ static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- unsigned long events;
- int ret;
if (!vdev->kvm)
return -EINVAL;
- ret = vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
- if (ret)
- return ret;
-
- matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
- events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY, &events,
- &matrix_mdev->iommu_notifier);
- if (ret)
- goto err_kvm;
- return 0;
-
-err_kvm:
- vfio_ap_mdev_unset_kvm(matrix_mdev);
- return ret;
+ return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
}
static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
@@ -1408,8 +1690,6 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY,
- &matrix_mdev->iommu_notifier);
vfio_ap_mdev_unset_kvm(matrix_mdev);
}
@@ -1440,27 +1720,84 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
container_of(vdev, struct ap_matrix_mdev, vdev);
int ret;
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
break;
case VFIO_DEVICE_RESET:
- ret = vfio_ap_mdev_reset_queues(matrix_mdev);
+ ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
break;
default:
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return ret;
}
+static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long apid = AP_QID_CARD(q->apqn);
+ unsigned long apqi = AP_QID_QUEUE(q->apqn);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
+ test_bit_inv(apqi, matrix_mdev->matrix.aqm))
+ return matrix_mdev;
+ }
+
+ return NULL;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars = 0;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+ struct ap_device *apdev = to_ap_dev(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ q = dev_get_drvdata(&apdev->device);
+ matrix_mdev = vfio_ap_mdev_for_queue(q);
+
+ if (matrix_mdev) {
+ if (matrix_mdev->kvm)
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_IN_USE);
+ else
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_ASSIGNED);
+ } else {
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_UNASSIGNED);
+ }
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *vfio_queue_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group vfio_queue_attr_group = {
+ .attrs = vfio_queue_attrs,
+};
+
static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.open_device = vfio_ap_mdev_open_device,
.close_device = vfio_ap_mdev_close_device,
.ioctl = vfio_ap_mdev_ioctl,
+ .dma_unmap = vfio_ap_mdev_dma_unmap,
};
static struct mdev_driver vfio_ap_matrix_driver = {
@@ -1500,3 +1837,432 @@ void vfio_ap_mdev_unregister(void)
mdev_unregister_device(&matrix_dev->device);
mdev_unregister_driver(&vfio_ap_matrix_driver);
}
+
+int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+{
+ int ret;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ if (ret)
+ return ret;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ matrix_mdev = get_update_locks_by_apqn(q->apqn);
+
+ if (matrix_mdev) {
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+ dev_set_drvdata(&apdev->device, q);
+ release_update_locks_for_mdev(matrix_mdev);
+
+ return 0;
+}
+
+void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
+{
+ unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ q = dev_get_drvdata(&apdev->device);
+ get_update_locks_for_queue(q);
+ matrix_mdev = q->matrix_mdev;
+
+ if (matrix_mdev) {
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+
+ /*
+ * If the queue is assigned to the guest's APCB, then remove
+ * the adapter's APID from the APCB and hot it into the guest.
+ */
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+ }
+
+ vfio_ap_mdev_reset_queue(q, 1);
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ release_update_locks_for_mdev(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is
+ * assigned to a mediated device under the control
+ * of the vfio_ap device driver.
+ *
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check.
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check.
+ *
+ * Return:
+ * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are
+ * assigned to a mediated device under the control of the vfio_ap
+ * device driver.
+ * * Otherwise, return 0.
+ */
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
+{
+ int ret;
+
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+
+ return ret;
+}
+
+/**
+ * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control
+ * domains that have been removed from the host's
+ * AP configuration from a guest.
+ *
+ * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest.
+ * @aprem: the adapters that have been removed from the host's AP configuration
+ * @aqrem: the domains that have been removed from the host's AP configuration
+ * @cdrem: the control domains that have been removed from the host's AP
+ * configuration.
+ */
+static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *aprem,
+ unsigned long *aqrem,
+ unsigned long *cdrem)
+{
+ int do_hotplug = 0;
+
+ if (!bitmap_empty(aprem, AP_DEVICES)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.apm,
+ aprem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(aqrem, AP_DOMAINS)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.aqm,
+ aqrem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(cdrem, AP_DOMAINS))
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
+ matrix_mdev->shadow_apcb.adm,
+ cdrem, AP_DOMAINS);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters,
+ * domains and control domains that have been removed
+ * from the host AP configuration and unplugs them
+ * from those guests.
+ *
+ * @ap_remove: bitmap specifying which adapters have been removed from the host
+ * config.
+ * @aq_remove: bitmap specifying which domains have been removed from the host
+ * config.
+ * @cd_remove: bitmap specifying which control domains have been removed from
+ * the host config.
+ */
+static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
+ unsigned long *aq_remove,
+ unsigned long *cd_remove)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+ int do_remove = 0;
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ do_remove |= bitmap_and(aprem, ap_remove,
+ matrix_mdev->matrix.apm,
+ AP_DEVICES);
+ do_remove |= bitmap_and(aqrem, aq_remove,
+ matrix_mdev->matrix.aqm,
+ AP_DOMAINS);
+ do_remove |= bitmap_andnot(cdrem, cd_remove,
+ matrix_mdev->matrix.adm,
+ AP_DOMAINS);
+
+ if (do_remove)
+ vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
+ cdrem);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and
+ * control domains from the host AP configuration
+ * by unplugging them from the guests that are
+ * using them.
+ * @cur_config_info: the current host AP configuration information
+ * @prev_config_info: the previous host AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ int do_remove;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+
+ do_remove = bitmap_andnot(aprem,
+ (unsigned long *)prev_config_info->apm,
+ (unsigned long *)cur_config_info->apm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(aqrem,
+ (unsigned long *)prev_config_info->aqm,
+ (unsigned long *)cur_config_info->aqm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(cdrem,
+ (unsigned long *)prev_config_info->adm,
+ (unsigned long *)cur_config_info->adm,
+ AP_DEVICES);
+
+ if (do_remove)
+ vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
+}
+
+/**
+ * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that
+ * are older than AP type 10 (CEX4).
+ * @apm: a bitmap of the APIDs to examine
+ * @aqm: a bitmap of the APQIs of the queues to query for the AP type.
+ */
+static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
+{
+ bool apid_cleared;
+ struct ap_queue_status status;
+ unsigned long apid, apqi, info;
+ int qtype, qtype_mask = 0xff000000;
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ apid_cleared = false;
+
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
+ switch (status.response_code) {
+ /*
+ * According to the architecture in each case
+ * below, the queue's info should be filled.
+ */
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_BUSY:
+ qtype = info & qtype_mask;
+
+ /*
+ * The vfio_ap device driver only
+ * supports CEX4 and newer adapters, so
+ * remove the APID if the adapter is
+ * older than a CEX4.
+ */
+ if (qtype < AP_DEVICE_TYPE_CEX4) {
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ }
+
+ break;
+
+ default:
+ /*
+ * If we don't know the adapter type,
+ * clear its APID since it can't be
+ * determined whether the vfio_ap
+ * device driver supports it.
+ */
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ break;
+ }
+
+ /*
+ * If we've already cleared the APID from the apm, there
+ * is no need to continue examining the remainin AP
+ * queues to determine the type of the adapter.
+ */
+ if (apid_cleared)
+ continue;
+ }
+ }
+}
+
+/**
+ * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and
+ * control domains that have been added to the host's
+ * AP configuration for each matrix mdev to which they
+ * are assigned.
+ *
+ * @apm_add: a bitmap specifying the adapters that have been added to the AP
+ * configuration.
+ * @aqm_add: a bitmap specifying the domains that have been added to the AP
+ * configuration.
+ * @adm_add: a bitmap specifying the control domains that have been added to the
+ * AP configuration.
+ */
+static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
+ unsigned long *adm_add)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if (list_empty(&matrix_dev->mdev_list))
+ return;
+
+ vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ bitmap_and(matrix_mdev->apm_add,
+ matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
+ bitmap_and(matrix_mdev->aqm_add,
+ matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
+ bitmap_and(matrix_mdev->adm_add,
+ matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and
+ * control domains to the host AP configuration
+ * by updating the bitmaps that specify what adapters,
+ * domains and control domains have been added so they
+ * can be hot plugged into the guest when the AP bus
+ * scan completes (see vfio_ap_on_scan_complete
+ * function).
+ * @cur_config_info: the current AP configuration information
+ * @prev_config_info: the previous AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ bool do_add;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
+
+ do_add = bitmap_andnot(apm_add,
+ (unsigned long *)cur_config_info->apm,
+ (unsigned long *)prev_config_info->apm,
+ AP_DEVICES);
+ do_add |= bitmap_andnot(aqm_add,
+ (unsigned long *)cur_config_info->aqm,
+ (unsigned long *)prev_config_info->aqm,
+ AP_DOMAINS);
+ do_add |= bitmap_andnot(adm_add,
+ (unsigned long *)cur_config_info->adm,
+ (unsigned long *)prev_config_info->adm,
+ AP_DOMAINS);
+
+ if (do_add)
+ vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
+}
+
+/**
+ * vfio_ap_on_cfg_changed - handles notification of changes to the host AP
+ * configuration.
+ *
+ * @cur_cfg_info: the current host AP configuration
+ * @prev_cfg_info: the previous host AP configuration
+ */
+void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
+ struct ap_config_info *prev_cfg_info)
+{
+ if (!cur_cfg_info || !prev_cfg_info)
+ return;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
+ vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
+ memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
+
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+{
+ bool do_hotplug = false;
+ int filter_domains = 0;
+ int filter_adapters = 0;
+ DECLARE_BITMAP(apm, AP_DEVICES);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
+
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
+ matrix_mdev->apm_add, AP_DEVICES);
+ filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
+ matrix_mdev->aqm_add, AP_DOMAINS);
+
+ if (filter_adapters && filter_domains)
+ do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
+ else if (filter_adapters)
+ do_hotplug |=
+ vfio_ap_mdev_filter_matrix(apm,
+ matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev);
+ else
+ do_hotplug |=
+ vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
+ aqm, matrix_mdev);
+
+ if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
+ AP_DOMAINS))
+ do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+}
+
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
+ bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
+ bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
+ continue;
+
+ vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
+ bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
+ bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
+ bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
+ }
+
+ mutex_unlock(&matrix_dev->guests_lock);
+}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index a26efd804d0d..d782cf463eab 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/hashtable.h>
#include "ap_bus.h"
@@ -32,20 +33,26 @@
* @available_instances: number of mediated matrix devices that can be created
* @info: the struct containing the output from the PQAP(QCI) instruction
* @mdev_list: the list of mediated matrix devices created
- * @lock: mutex for locking the AP matrix device. This lock will be
+ * @mdevs_lock: mutex for locking the AP matrix device. This lock will be
* taken every time we fiddle with state managed by the vfio_ap
* driver, be it using @mdev_list or writing the state of a
* single ap_matrix_mdev device. It's quite coarse but we don't
* expect much contention.
* @vfio_ap_drv: the vfio_ap device driver
+ * @guests_lock: mutex for controlling access to a guest that is using AP
+ * devices passed through by the vfio_ap device driver. This lock
+ * will be taken when the AP devices are plugged into or unplugged
+ * from a guest, and when an ap_matrix_mdev device is added to or
+ * removed from @mdev_list or the list is iterated.
*/
struct ap_matrix_dev {
struct device device;
atomic_t available_instances;
struct ap_config_info info;
struct list_head mdev_list;
- struct mutex lock;
+ struct mutex mdevs_lock; /* serializes access to each ap_matrix_mdev */
struct ap_driver *vfio_ap_drv;
+ struct mutex guests_lock; /* serializes access to each KVM guest */
};
extern struct ap_matrix_dev *matrix_dev;
@@ -75,48 +82,77 @@ struct ap_matrix {
};
/**
+ * struct ap_queue_table - a table of queue objects.
+ *
+ * @queues: a hashtable of queues (struct vfio_ap_queue).
+ */
+struct ap_queue_table {
+ DECLARE_HASHTABLE(queues, 8);
+};
+
+/**
* struct ap_matrix_mdev - Contains the data associated with a matrix mediated
* device.
* @vdev: the vfio device
* @node: allows the ap_matrix_mdev struct to be added to a list
* @matrix: the adapters, usage domains and control domains assigned to the
* mediated matrix device.
- * @iommu_notifier: notifier block used for specifying callback function for
- * handling the VFIO_IOMMU_NOTIFY_DMA_UNMAP even
+ * @shadow_apcb: the shadow copy of the APCB field of the KVM guest's CRYCB
* @kvm: the struct holding guest's state
* @pqap_hook: the function pointer to the interception handler for the
* PQAP(AQIC) instruction.
* @mdev: the mediated device
+ * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev
+ * @apm_add: bitmap of APIDs added to the host's AP configuration
+ * @aqm_add: bitmap of APQIs added to the host's AP configuration
+ * @adm_add: bitmap of control domain numbers added to the host's AP
+ * configuration
*/
struct ap_matrix_mdev {
struct vfio_device vdev;
struct list_head node;
struct ap_matrix matrix;
- struct notifier_block iommu_notifier;
+ struct ap_matrix shadow_apcb;
struct kvm *kvm;
crypto_hook pqap_hook;
struct mdev_device *mdev;
+ struct ap_queue_table qtable;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
};
/**
* struct vfio_ap_queue - contains the data associated with a queue bound to the
* vfio_ap device driver
* @matrix_mdev: the matrix mediated device
- * @saved_pfn: the guest PFN pinned for the guest
+ * @saved_iova: the notification indicator byte (nib) address
* @apqn: the APQN of the AP queue device
* @saved_isc: the guest ISC registered with the GIB interface
+ * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
+ * @reset_rc: the status response code from the last reset of the queue
*/
struct vfio_ap_queue {
struct ap_matrix_mdev *matrix_mdev;
- unsigned long saved_pfn;
+ dma_addr_t saved_iova;
int apqn;
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
+ struct hlist_node mdev_qnode;
+ unsigned int reset_rc;
};
int vfio_ap_mdev_register(void);
void vfio_ap_mdev_unregister(void);
-int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
- unsigned int retry);
+
+int vfio_ap_mdev_probe_queue(struct ap_device *queue);
+void vfio_ap_mdev_remove_queue(struct ap_device *queue);
+
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm);
+
+void vfio_ap_on_cfg_changed(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 5f7e28de8b15..d34bb6ec1490 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -409,20 +409,19 @@ static void ism_create_system_eid(void)
memcpy(&SYSTEM_EID.type, tmp, 4);
}
-static void ism_get_system_eid(struct smcd_dev *smcd, u8 **eid)
+static u8 *ism_get_system_eid(void)
{
- *eid = &SYSTEM_EID.seid_string[0];
+ return SYSTEM_EID.seid_string;
}
static u16 ism_get_chid(struct smcd_dev *smcd)
{
- struct ism_dev *ismdev;
+ struct ism_dev *ism = (struct ism_dev *)smcd->priv;
- ismdev = (struct ism_dev *)smcd->priv;
- if (!ismdev || !ismdev->pdev)
+ if (!ism || !ism->pdev)
return 0;
- return to_zpci(ismdev->pdev)->pchid;
+ return to_zpci(ism->pdev)->pchid;
}
static void ism_handle_event(struct ism_dev *ism)
@@ -444,6 +443,7 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
struct ism_dev *ism = data;
unsigned long bit, end;
unsigned long *bv;
+ u16 dmbemask;
bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
@@ -457,9 +457,10 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
break;
clear_bit_inv(bit, bv);
+ dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
barrier();
- smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
+ smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET, dmbemask);
}
if (ism->sba->e) {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 9e54fe76a9b2..8bd9fd51208c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -763,6 +763,49 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
ipa_name, com, CARD_DEVID(card));
}
+static void qeth_default_link_info(struct qeth_card *card)
+{
+ struct qeth_link_info *link_info = &card->info.link_info;
+
+ QETH_CARD_TEXT(card, 2, "dftlinfo");
+ link_info->duplex = DUPLEX_FULL;
+
+ if (IS_IQD(card) || IS_VM_NIC(card)) {
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
+ } else {
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ case QETH_LINK_TYPE_LANE_ETH100:
+ link_info->speed = SPEED_100;
+ link_info->port = PORT_TP;
+ break;
+ case QETH_LINK_TYPE_GBIT_ETH:
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ link_info->speed = SPEED_1000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ link_info->speed = SPEED_25000;
+ link_info->port = PORT_FIBRE;
+ break;
+ default:
+ dev_info(&card->gdev->dev,
+ "Unknown link type %x\n",
+ card->info.link_type);
+ link_info->speed = SPEED_UNKNOWN;
+ link_info->port = PORT_OTHER;
+ }
+
+ link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
+ }
+}
+
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
@@ -790,6 +833,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
netdev_name(card->dev), card->info.chpid);
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
netif_carrier_off(card->dev);
+ qeth_default_link_info(card);
}
return NULL;
case IPA_CMD_STARTLAN:
@@ -3565,7 +3609,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (!atomic_read(&queue->set_pci_flags_count)) {
/*
* there's no outstanding PCI any more, so we
- * have to request a PCI to be sure the the PCI
+ * have to request a PCI to be sure the PCI
* will wake at some time in the future then we
* can flush packed buffers that might still be
* hanging around, which can happen if no
@@ -4744,92 +4788,6 @@ out_free:
return rc;
}
-static int qeth_query_card_info_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
-{
- struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
- struct qeth_link_info *link_info = reply->param;
- struct qeth_query_card_info *card_info;
-
- QETH_CARD_TEXT(card, 2, "qcrdincb");
- if (qeth_setadpparms_inspect_rc(cmd))
- return -EIO;
-
- card_info = &cmd->data.setadapterparms.data.card_info;
- netdev_dbg(card->dev,
- "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
- card_info->card_type, card_info->port_mode,
- card_info->port_speed);
-
- switch (card_info->port_mode) {
- case CARD_INFO_PORTM_FULLDUPLEX:
- link_info->duplex = DUPLEX_FULL;
- break;
- case CARD_INFO_PORTM_HALFDUPLEX:
- link_info->duplex = DUPLEX_HALF;
- break;
- default:
- link_info->duplex = DUPLEX_UNKNOWN;
- }
-
- switch (card_info->card_type) {
- case CARD_INFO_TYPE_1G_COPPER_A:
- case CARD_INFO_TYPE_1G_COPPER_B:
- link_info->speed = SPEED_1000;
- link_info->port = PORT_TP;
- break;
- case CARD_INFO_TYPE_1G_FIBRE_A:
- case CARD_INFO_TYPE_1G_FIBRE_B:
- link_info->speed = SPEED_1000;
- link_info->port = PORT_FIBRE;
- break;
- case CARD_INFO_TYPE_10G_FIBRE_A:
- case CARD_INFO_TYPE_10G_FIBRE_B:
- link_info->speed = SPEED_10000;
- link_info->port = PORT_FIBRE;
- break;
- default:
- switch (card_info->port_speed) {
- case CARD_INFO_PORTS_10M:
- link_info->speed = SPEED_10;
- break;
- case CARD_INFO_PORTS_100M:
- link_info->speed = SPEED_100;
- break;
- case CARD_INFO_PORTS_1G:
- link_info->speed = SPEED_1000;
- break;
- case CARD_INFO_PORTS_10G:
- link_info->speed = SPEED_10000;
- break;
- case CARD_INFO_PORTS_25G:
- link_info->speed = SPEED_25000;
- break;
- default:
- link_info->speed = SPEED_UNKNOWN;
- }
-
- link_info->port = PORT_OTHER;
- }
-
- return 0;
-}
-
-int qeth_query_card_info(struct qeth_card *card,
- struct qeth_link_info *link_info)
-{
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 2, "qcrdinfo");
- if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
- return -EOPNOTSUPP;
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
- if (!iob)
- return -ENOMEM;
-
- return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
-}
-
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
struct qeth_reply *reply_priv,
unsigned long data)
@@ -4839,6 +4797,7 @@ static int qeth_init_link_info_oat_cb(struct qeth_card *card,
struct qeth_query_oat_physical_if *phys_if;
struct qeth_query_oat_reply *reply;
+ QETH_CARD_TEXT(card, 2, "qoatincb");
if (qeth_setadpparms_inspect_rc(cmd))
return -EIO;
@@ -4918,41 +4877,7 @@ static int qeth_init_link_info_oat_cb(struct qeth_card *card,
static void qeth_init_link_info(struct qeth_card *card)
{
- card->info.link_info.duplex = DUPLEX_FULL;
-
- if (IS_IQD(card) || IS_VM_NIC(card)) {
- card->info.link_info.speed = SPEED_10000;
- card->info.link_info.port = PORT_FIBRE;
- card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
- } else {
- switch (card->info.link_type) {
- case QETH_LINK_TYPE_FAST_ETH:
- case QETH_LINK_TYPE_LANE_ETH100:
- card->info.link_info.speed = SPEED_100;
- card->info.link_info.port = PORT_TP;
- break;
- case QETH_LINK_TYPE_GBIT_ETH:
- case QETH_LINK_TYPE_LANE_ETH1000:
- card->info.link_info.speed = SPEED_1000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_10GBIT_ETH:
- card->info.link_info.speed = SPEED_10000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_25GBIT_ETH:
- card->info.link_info.speed = SPEED_25000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- default:
- dev_info(&card->gdev->dev, "Unknown link type %x\n",
- card->info.link_type);
- card->info.link_info.speed = SPEED_UNKNOWN;
- card->info.link_info.port = PORT_OTHER;
- }
-
- card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
- }
+ qeth_default_link_info(card);
/* Get more accurate data via QUERY OAT: */
if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
@@ -5461,6 +5386,7 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
qeth_clear_working_pool_list(card);
qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
+ qeth_default_link_info(card);
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index b0b36b2132fe..9eba0a32e9f9 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -428,8 +428,8 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct qeth_card *card = netdev->ml_priv;
- struct qeth_link_info link_info;
+ QETH_CARD_TEXT(card, 4, "ethtglks");
cmd->base.speed = card->info.link_info.speed;
cmd->base.duplex = card->info.link_info.duplex;
cmd->base.port = card->info.link_info.port;
@@ -439,16 +439,6 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- /* Check if we can obtain more accurate information. */
- if (!qeth_query_card_info(card, &link_info)) {
- if (link_info.speed != SPEED_UNKNOWN)
- cmd->base.speed = link_info.speed;
- if (link_info.duplex != DUPLEX_UNKNOWN)
- cmd->base.duplex = link_info.duplex;
- if (link_info.port != PORT_OTHER)
- cmd->base.port = link_info.port;
- }
-
qeth_set_ethtool_link_modes(cmd, card->info.link_info.link_mode);
return 0;
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
index da55133da8fe..15c25fefe91a 100644
--- a/drivers/s390/scsi/zfcp_diag.h
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -2,7 +2,7 @@
/*
* zfcp device driver
*
- * Definitions for handling diagnostics in the the zfcp device driver.
+ * Definitions for handling diagnostics in the zfcp device driver.
*
* Copyright IBM Corp. 2018, 2020
*/
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 511bf8e0a436..b61acbb09be3 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -145,27 +145,33 @@ void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
+ int ret = -EIO;
+
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
- if (zfcp_fsf_open_wka_port(wka_port))
+ if (zfcp_fsf_open_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+ goto out;
+ }
}
- mutex_unlock(&wka_port->mutex);
-
- wait_event(wka_port->completion_wq,
+ wait_event(wka_port->opened,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
- return 0;
+ ret = 0;
+ goto out;
}
- return -EIO;
+out:
+ mutex_unlock(&wka_port->mutex);
+ return ret;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
@@ -181,9 +187,12 @@ static void zfcp_fc_wka_port_offline(struct work_struct *work)
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ goto out;
}
+ wait_event(wka_port->closed,
+ wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
out:
mutex_unlock(&wka_port->mutex);
}
@@ -193,13 +202,15 @@ static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
- schedule_delayed_work(&wka_port->work, HZ / 100);
+ queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
+ msecs_to_jiffies(10));
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
- init_waitqueue_head(&wka_port->completion_wq);
+ init_waitqueue_head(&wka_port->opened);
+ init_waitqueue_head(&wka_port->closed);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 8aaf409ce9cb..97755407ce1b 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -185,7 +185,8 @@ enum zfcp_fc_wka_status {
/**
* struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
* @adapter: Pointer to adapter structure this WKA port belongs to
- * @completion_wq: Wait for completion of open/close command
+ * @opened: Wait for completion of open command
+ * @closed: Wait for completion of close command
* @status: Current status of WKA port
* @refcount: Reference count to keep port open as long as it is in use
* @d_id: FC destination id or well-known-address
@@ -195,7 +196,8 @@ enum zfcp_fc_wka_status {
*/
struct zfcp_fc_wka_port {
struct zfcp_adapter *adapter;
- wait_queue_head_t completion_wq;
+ wait_queue_head_t opened;
+ wait_queue_head_t closed;
enum zfcp_fc_wka_status status;
atomic_t refcount;
u32 d_id;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4f1e4385ce58..19223b075568 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1907,7 +1907,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->opened);
}
/**
@@ -1966,7 +1966,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
}
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->closed);
}
/**
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index dbf3e50444e6..cb67fa80fb12 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -672,7 +672,7 @@ ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
atomic_read(&zfcp_sdev->status));
-struct attribute *zfcp_sdev_attrs[] = {
+static struct attribute *zfcp_sdev_attrs[] = {
&dev_attr_fcp_lun.attr,
&dev_attr_wwpn.attr,
&dev_attr_hba_id.attr,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 161d3b141f0d..a10dbe632ef9 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -33,6 +33,7 @@
#include <asm/virtio-ccw.h>
#include <asm/isc.h>
#include <asm/airq.h>
+#include <asm/tpi.h>
/*
* virtio related functions
@@ -204,7 +205,8 @@ static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
write_unlock_irqrestore(&info->lock, flags);
}
-static void virtio_airq_handler(struct airq_struct *airq, bool floating)
+static void virtio_airq_handler(struct airq_struct *airq,
+ struct tpi_info *tpi_info)
{
struct airq_info *info = container_of(airq, struct airq_info, airq);
unsigned long ai;
@@ -240,7 +242,7 @@ static struct airq_info *new_airq_info(int index)
return NULL;
rwlock_init(&info->lock);
info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR
- | AIRQ_IV_CACHELINE);
+ | AIRQ_IV_CACHELINE, NULL);
if (!info->aiv) {
kfree(info);
return NULL;
@@ -530,6 +532,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = -ENOMEM;
goto out_err;
}
+
+ vq->num_max = info->num;
+
/* it may have been reduced */
info->num = virtqueue_get_vring_size(vq);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index a897c8f914cf..f2abffce2659 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2515,12 +2515,26 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
return (hoststatus << 16) | tgt_status;
}
+/*
+ * turn the dma address from an inbox into a ccb pointer
+ * This is rather inefficient.
+ */
+static struct blogic_ccb *
+blogic_inbox_to_ccb(struct blogic_adapter *adapter, struct blogic_inbox *inbox)
+{
+ struct blogic_ccb *ccb;
+
+ for (ccb = adapter->all_ccbs; ccb; ccb = ccb->next_all)
+ if (inbox->ccb == ccb->dma_handle)
+ break;
+
+ return ccb;
+}
/*
blogic_scan_inbox scans the Incoming Mailboxes saving any
Incoming Mailbox entries for completion processing.
*/
-
static void blogic_scan_inbox(struct blogic_adapter *adapter)
{
/*
@@ -2540,17 +2554,14 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter)
enum blogic_cmplt_code comp_code;
while ((comp_code = next_inbox->comp_code) != BLOGIC_INBOX_FREE) {
- /*
- We are only allowed to do this because we limit our
- architectures we run on to machines where bus_to_virt(
- actually works. There *needs* to be a dma_addr_to_virt()
- in the new PCI DMA mapping interface to replace
- bus_to_virt() or else this code is going to become very
- innefficient.
- */
- struct blogic_ccb *ccb =
- (struct blogic_ccb *) bus_to_virt(next_inbox->ccb);
- if (comp_code != BLOGIC_CMD_NOTFOUND) {
+ struct blogic_ccb *ccb = blogic_inbox_to_ccb(adapter, next_inbox);
+ if (!ccb) {
+ /*
+ * This should never happen, unless the CCB list is
+ * corrupted in memory.
+ */
+ blogic_warn("Could not find CCB for dma address %x\n", adapter, next_inbox->ccb);
+ } else if (comp_code != BLOGIC_CMD_NOTFOUND) {
if (ccb->status == BLOGIC_CCB_ACTIVE ||
ccb->status == BLOGIC_CCB_RESET) {
/*
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 90253208a72f..3d9c56ac8224 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1712,7 +1712,7 @@ static unsigned char FlashPoint_InterruptPending(void *pCurrCard)
static int FlashPoint_HandleInterrupt(void *pcard)
{
struct sccb *currSCCB;
- unsigned char thisCard, result, bm_status, bm_int_st;
+ unsigned char thisCard, result, bm_status;
unsigned short hp_int;
unsigned char i, target;
struct sccb_card *pCurrCard = pcard;
@@ -1723,7 +1723,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
MDISABLE_INT(ioport);
- if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON)
+ if (RD_HARPOON(ioport + hp_int_status) & EXT_STATUS_ON)
bm_status = RD_HARPOON(ioport + hp_ext_status) &
(unsigned char)BAD_EXT_STATUS;
else
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a9fe5152addd..955cb69a5418 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -458,17 +458,6 @@ config SCSI_MVUMI
To compile this driver as a module, choose M here: the
module will be called mvumi.
-config SCSI_DPT_I2O
- tristate "Adaptec I2O RAID support "
- depends on SCSI && PCI && VIRT_TO_BUS
- help
- This driver supports all of Adaptec's I2O based RAID controllers as
- well as the DPT SmartRaid V cards. This is an Adaptec maintained
- driver by Deanna Bonds. See <file:Documentation/scsi/dpti.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called dpt_i2o.
-
config SCSI_ADVANSYS
tristate "AdvanSys SCSI support"
depends on SCSI
@@ -513,7 +502,7 @@ config SCSI_HPTIOP
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on PCI && SCSI && VIRT_TO_BUS
+ depends on PCI && SCSI
help
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2ad3bc052531..f055bfd54a68 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -63,7 +63,6 @@ obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
-obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index cf703a1ecdda..74312400468b 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -24,8 +24,11 @@
struct a2091_hostdata {
struct WD33C93_hostdata wh;
struct a2091_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a2091_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -45,15 +48,31 @@ static irqreturn_t a2091_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a2091_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a2091_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(scsi_pointer->ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/* don't allow DMA if the physical address is bad */
if (addr & A2091_XFER_MASK) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
@@ -64,8 +83,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- /* get the physical address of the bounce buffer */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev, wh->dma_bounce_buffer,
+ wh->dma_bounce_len, DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
/* the bounce buffer may not be in the first 16M of physmem */
if (addr & A2091_XFER_MASK) {
@@ -76,11 +108,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
- scsi_pointer->this_residual);
- }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -95,13 +123,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, scsi_pointer->this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, scsi_pointer->this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
+
/* start DMA */
regs->ST_DMA = 1;
@@ -142,6 +165,10 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* restore the CONTROL bits (minus the direction flag) */
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir)
@@ -178,6 +205,11 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wd33c93_regs wdregs;
struct a2091_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&z->dev, DMA_BIT_MASK(24))) {
+ dev_warn(&z->dev, "cannot use 24 bit DMA\n");
+ return -ENODEV;
+ }
+
if (!request_mem_region(z->resource.start, 256, "wd33c93"))
return -EBUSY;
@@ -198,6 +230,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &z->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index dd161885eed1..2c5cb1a02e86 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/page.h>
@@ -25,8 +26,11 @@
struct a3000_hostdata {
struct WD33C93_hostdata wh;
struct a3000_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a3000_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -49,20 +53,38 @@ static irqreturn_t a3000_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(scsi_pointer->ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/*
* if the physical address has the wrong alignment, or if
* physical address is bad, or if it is a write and at the
* end of a physical memory chunk, then allocate a bounce
* buffer
+ * MSch 20220629 - only wrong alignment tested - bounce
+ * buffer returned by kmalloc is guaranteed to be aligned
*/
if (addr & A3000_XFER_MASK) {
+ WARN_ONCE(1, "Invalid alignment for DMA!");
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
@@ -70,6 +92,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* can't allocate memory; use PIO */
if (!wh->dma_bounce_buffer) {
wh->dma_bounce_len = 0;
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
return 1;
}
@@ -79,7 +102,15 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
scsi_pointer->this_residual);
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -94,13 +125,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, scsi_pointer->this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, scsi_pointer->this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
/* start DMA */
mb(); /* make sure setup is completed */
@@ -151,6 +176,10 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
mb(); /* make sure CNTR is updated before next IO */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (SCpnt) {
@@ -193,6 +222,11 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wd33c93_regs wdregs;
struct a3000_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_warn(&pdev->dev, "cannot use 32 bit DMA\n");
+ return -ENODEV;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
@@ -216,6 +250,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &pdev->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 81462f4ddb90..4d4cb47b3846 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1050,7 +1050,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
vpdpage83data.type1.productid));
/* Convert to ascii based serial number.
- * The LSB is the the end.
+ * The LSB is the end.
*/
for (i = 0; i < 8; i++) {
u8 temp =
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 940a6deab38f..bd99c5492b7d 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -272,7 +272,7 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
-static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data, bool rsvd)
+static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data)
{
int *active = data;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 9c27bc37e5de..5ba5c18b77b4 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -633,7 +633,7 @@ struct fib_count_data {
int krlcnt;
};
-static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data, bool reserved)
+static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data)
{
struct fib_count_data *fib_count = data;
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 73506a459bf8..91d196f26b76 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -159,7 +159,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
flags |= OPEN_REQUIRED;
if ((dev->dev_type == SAS_SATA_DEV) ||
(dev->tproto & SAS_PROTOCOL_STP)) {
- struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
+ struct smp_rps_resp *rps_resp = &dev->sata_dev.rps_resp;
if (rps_resp->frame_type == SMP_RESPONSE &&
rps_resp->function == SMP_REPORT_PHY_SATA &&
rps_resp->result == SMP_RESP_FUNC_ACC) {
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 3bb0adefbe06..50a577ac3bb4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -231,6 +231,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
+completion_check:
/* check if we raced, task just got cleaned up under us */
spin_lock_bh(&session->back_lock);
if (!abrt_task || !abrt_task->sc) {
@@ -238,7 +239,13 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(abrt_task);
+ if (!iscsi_get_task(abrt_task)) {
+ spin_unlock(&session->back_lock);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(5);
+ goto completion_check;
+ }
+
abrt_io_task = abrt_task->dd_data;
conn = abrt_task->conn;
beiscsi_conn = conn->dd_data;
@@ -323,7 +330,15 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ /*
+ * The task has completed in the driver and is
+ * completing in libiscsi. Just ignore it here. When we
+ * call iscsi_eh_device_reset, it will wait for us.
+ */
+ continue;
+ }
+
io_task = task->dd_data;
/* mark WRB invalid which have been not processed by FW yet */
if (is_chip_be2_be3r(phba)) {
@@ -5745,7 +5760,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
cancel_work_sync(&phba->sess_work);
beiscsi_iface_destroy_default(phba);
- iscsi_host_remove(phba->shost);
+ iscsi_host_remove(phba->shost, false);
beiscsi_disable_port(phba, 1);
/* after cancelling boot_work */
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 15fbd09baa94..a3c800e04a2e 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -909,7 +909,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
{
struct Scsi_Host *shost = hba->shost;
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 908854869864..7ab29eaec6f3 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -63,7 +63,7 @@ static int verbose = 1;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose,"be verbose (default: on)");
-static int debug = 0;
+static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more "
"detailed sense codes on scsi errors (default: off)");
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 4365d52c6430..af281e271f88 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -328,7 +328,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
chba = cdev->hbas[i];
if (chba) {
cdev->hbas[i] = NULL;
- iscsi_host_remove(chba->shost);
+ iscsi_host_remove(chba->shost, false);
pci_dev_put(cdev->pdev);
iscsi_host_free(chba->shost);
}
@@ -1455,7 +1455,7 @@ void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
if (conn) {
log_debug(1 << CXGBI_DBG_SOCK,
"csk 0x%p, cid %d.\n", csk, conn->id);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
}
EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 244fc27215dc..631eda2d467e 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -16,6 +16,7 @@
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/xive.h>
#include <misc/ocxl.h>
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 1d9be771f3ee..610a51538f03 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -127,7 +127,7 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
int bufflen, struct scsi_sense_hdr *sshdr, int flags)
{
u8 cdb[MAX_COMMAND_SIZE];
- int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
/* Prepare the command. */
@@ -157,7 +157,7 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
u8 cdb[MAX_COMMAND_SIZE];
unsigned char stpg_data[8];
int stpg_len = 8;
- int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
/* Prepare the data buffer */
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index bd28ec6cfb72..2e21ab447873 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -239,7 +239,7 @@ static int send_trespass_cmd(struct scsi_device *sdev,
unsigned char cdb[MAX_COMMAND_SIZE];
int err, res = SCSI_DH_OK, len;
struct scsi_sense_hdr sshdr;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
if (csdev->flags & CLARIION_SHORT_TRESPASS) {
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 4a3f7831a2d6..0d2cfa60aa06 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -83,7 +83,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
unsigned char cmd[6] = { TEST_UNIT_READY };
struct scsi_sense_hdr sshdr;
int ret = SCSI_DH_OK, res;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
retry:
@@ -121,7 +121,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
struct scsi_device *sdev = h->sdev;
int res, rc = SCSI_DH_OK;
int retry_cnt = HP_SW_RETRIES;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
retry:
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 66652ab409cc..bf8754741f85 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -536,7 +536,7 @@ static void send_mode_select(struct work_struct *work)
unsigned char cdb[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sshdr;
unsigned int data_size;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
spin_lock(&ctlr->ms_lock);
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
deleted file mode 100644
index e1fbbf55c09d..000000000000
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ /dev/null
@@ -1,441 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _SCSI_I2O_H
-#define _SCSI_I2O_H
-
-/* I2O kernel space accessible structures/APIs
- *
- * (c) Copyright 1999, 2000 Red Hat Software
- *
- *************************************************************************
- *
- * This header file defined the I2O APIs/structures for use by
- * the I2O kernel modules.
- */
-
-#ifdef __KERNEL__ /* This file to be included by kernel only */
-
-#include <linux/i2o-dev.h>
-
-#include <linux/notifier.h>
-#include <linux/atomic.h>
-
-
-/*
- * Tunable parameters first
- */
-
-/* How many different OSM's are we allowing */
-#define MAX_I2O_MODULES 64
-
-#define I2O_EVT_CAPABILITY_OTHER 0x01
-#define I2O_EVT_CAPABILITY_CHANGED 0x02
-
-#define I2O_EVT_SENSOR_STATE_CHANGED 0x01
-
-//#ifdef __KERNEL__ /* ioctl stuff only thing exported to users */
-
-#define I2O_MAX_MANAGERS 4
-
-/*
- * I2O Interface Objects
- */
-
-#include <linux/wait.h>
-typedef wait_queue_head_t adpt_wait_queue_head_t;
-#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
-typedef wait_queue_entry_t adpt_wait_queue_entry_t;
-
-/*
- * message structures
- */
-
-struct i2o_message
-{
- u8 version_offset;
- u8 flags;
- u16 size;
- u32 target_tid:12;
- u32 init_tid:12;
- u32 function:8;
- u32 initiator_context;
- /* List follows */
-};
-
-struct adpt_device;
-struct _adpt_hba;
-struct i2o_device
-{
- struct i2o_device *next; /* Chain */
- struct i2o_device *prev;
-
- char dev_name[8]; /* linux /dev name if available */
- i2o_lct_entry lct_data;/* Device LCT information */
- u32 flags;
- struct proc_dir_entry* proc_entry; /* /proc dir */
- struct adpt_device *owner;
- struct _adpt_hba *controller; /* Controlling IOP */
-};
-
-/*
- * Each I2O controller has one of these objects
- */
-
-struct i2o_controller
-{
- char name[16];
- int unit;
- int type;
- int enabled;
-
- struct notifier_block *event_notifer; /* Events */
- atomic_t users;
- struct i2o_device *devices; /* I2O device chain */
- struct i2o_controller *next; /* Controller chain */
-
-};
-
-/*
- * I2O System table entry
- */
-struct i2o_sys_tbl_entry
-{
- u16 org_id;
- u16 reserved1;
- u32 iop_id:12;
- u32 reserved2:20;
- u16 seg_num:12;
- u16 i2o_version:4;
- u8 iop_state;
- u8 msg_type;
- u16 frame_size;
- u16 reserved3;
- u32 last_changed;
- u32 iop_capabilities;
- u32 inbound_low;
- u32 inbound_high;
-};
-
-struct i2o_sys_tbl
-{
- u8 num_entries;
- u8 version;
- u16 reserved1;
- u32 change_ind;
- u32 reserved2;
- u32 reserved3;
- struct i2o_sys_tbl_entry iops[];
-};
-
-/*
- * I2O classes / subclasses
- */
-
-/* Class ID and Code Assignments
- * (LCT.ClassID.Version field)
- */
-#define I2O_CLASS_VERSION_10 0x00
-#define I2O_CLASS_VERSION_11 0x01
-
-/* Class code names
- * (from v1.5 Table 6-1 Class Code Assignments.)
- */
-
-#define I2O_CLASS_EXECUTIVE 0x000
-#define I2O_CLASS_DDM 0x001
-#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
-#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011
-#define I2O_CLASS_LAN 0x020
-#define I2O_CLASS_WAN 0x030
-#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040
-#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041
-#define I2O_CLASS_SCSI_PERIPHERAL 0x051
-#define I2O_CLASS_ATE_PORT 0x060
-#define I2O_CLASS_ATE_PERIPHERAL 0x061
-#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
-#define I2O_CLASS_FLOPPY_DEVICE 0x071
-#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
-#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
-#define I2O_CLASS_PEER_TRANSPORT 0x091
-
-/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
- */
-
-#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
-
-/* Subclasses
- */
-
-#define I2O_SUBCLASS_i960 0x001
-#define I2O_SUBCLASS_HDM 0x020
-#define I2O_SUBCLASS_ISM 0x021
-
-/* Operation functions */
-
-#define I2O_PARAMS_FIELD_GET 0x0001
-#define I2O_PARAMS_LIST_GET 0x0002
-#define I2O_PARAMS_MORE_GET 0x0003
-#define I2O_PARAMS_SIZE_GET 0x0004
-#define I2O_PARAMS_TABLE_GET 0x0005
-#define I2O_PARAMS_FIELD_SET 0x0006
-#define I2O_PARAMS_LIST_SET 0x0007
-#define I2O_PARAMS_ROW_ADD 0x0008
-#define I2O_PARAMS_ROW_DELETE 0x0009
-#define I2O_PARAMS_TABLE_CLEAR 0x000A
-
-/*
- * I2O serial number conventions / formats
- * (circa v1.5)
- */
-
-#define I2O_SNFORMAT_UNKNOWN 0
-#define I2O_SNFORMAT_BINARY 1
-#define I2O_SNFORMAT_ASCII 2
-#define I2O_SNFORMAT_UNICODE 3
-#define I2O_SNFORMAT_LAN48_MAC 4
-#define I2O_SNFORMAT_WAN 5
-
-/* Plus new in v2.0 (Yellowstone pdf doc)
- */
-
-#define I2O_SNFORMAT_LAN64_MAC 6
-#define I2O_SNFORMAT_DDM 7
-#define I2O_SNFORMAT_IEEE_REG64 8
-#define I2O_SNFORMAT_IEEE_REG128 9
-#define I2O_SNFORMAT_UNKNOWN2 0xff
-
-/* Transaction Reply Lists (TRL) Control Word structure */
-
-#define TRL_SINGLE_FIXED_LENGTH 0x00
-#define TRL_SINGLE_VARIABLE_LENGTH 0x40
-#define TRL_MULTIPLE_FIXED_LENGTH 0x80
-
-/*
- * Messaging API values
- */
-
-#define I2O_CMD_ADAPTER_ASSIGN 0xB3
-#define I2O_CMD_ADAPTER_READ 0xB2
-#define I2O_CMD_ADAPTER_RELEASE 0xB5
-#define I2O_CMD_BIOS_INFO_SET 0xA5
-#define I2O_CMD_BOOT_DEVICE_SET 0xA7
-#define I2O_CMD_CONFIG_VALIDATE 0xBB
-#define I2O_CMD_CONN_SETUP 0xCA
-#define I2O_CMD_DDM_DESTROY 0xB1
-#define I2O_CMD_DDM_ENABLE 0xD5
-#define I2O_CMD_DDM_QUIESCE 0xC7
-#define I2O_CMD_DDM_RESET 0xD9
-#define I2O_CMD_DDM_SUSPEND 0xAF
-#define I2O_CMD_DEVICE_ASSIGN 0xB7
-#define I2O_CMD_DEVICE_RELEASE 0xB9
-#define I2O_CMD_HRT_GET 0xA8
-#define I2O_CMD_ADAPTER_CLEAR 0xBE
-#define I2O_CMD_ADAPTER_CONNECT 0xC9
-#define I2O_CMD_ADAPTER_RESET 0xBD
-#define I2O_CMD_LCT_NOTIFY 0xA2
-#define I2O_CMD_OUTBOUND_INIT 0xA1
-#define I2O_CMD_PATH_ENABLE 0xD3
-#define I2O_CMD_PATH_QUIESCE 0xC5
-#define I2O_CMD_PATH_RESET 0xD7
-#define I2O_CMD_STATIC_MF_CREATE 0xDD
-#define I2O_CMD_STATIC_MF_RELEASE 0xDF
-#define I2O_CMD_STATUS_GET 0xA0
-#define I2O_CMD_SW_DOWNLOAD 0xA9
-#define I2O_CMD_SW_UPLOAD 0xAB
-#define I2O_CMD_SW_REMOVE 0xAD
-#define I2O_CMD_SYS_ENABLE 0xD1
-#define I2O_CMD_SYS_MODIFY 0xC1
-#define I2O_CMD_SYS_QUIESCE 0xC3
-#define I2O_CMD_SYS_TAB_SET 0xA3
-
-#define I2O_CMD_UTIL_NOP 0x00
-#define I2O_CMD_UTIL_ABORT 0x01
-#define I2O_CMD_UTIL_CLAIM 0x09
-#define I2O_CMD_UTIL_RELEASE 0x0B
-#define I2O_CMD_UTIL_PARAMS_GET 0x06
-#define I2O_CMD_UTIL_PARAMS_SET 0x05
-#define I2O_CMD_UTIL_EVT_REGISTER 0x13
-#define I2O_CMD_UTIL_EVT_ACK 0x14
-#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
-#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
-#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
-#define I2O_CMD_UTIL_LOCK 0x17
-#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
-#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
-
-#define I2O_CMD_SCSI_EXEC 0x81
-#define I2O_CMD_SCSI_ABORT 0x83
-#define I2O_CMD_SCSI_BUSRESET 0x27
-
-#define I2O_CMD_BLOCK_READ 0x30
-#define I2O_CMD_BLOCK_WRITE 0x31
-#define I2O_CMD_BLOCK_CFLUSH 0x37
-#define I2O_CMD_BLOCK_MLOCK 0x49
-#define I2O_CMD_BLOCK_MUNLOCK 0x4B
-#define I2O_CMD_BLOCK_MMOUNT 0x41
-#define I2O_CMD_BLOCK_MEJECT 0x43
-
-#define I2O_PRIVATE_MSG 0xFF
-
-/*
- * Init Outbound Q status
- */
-
-#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
-#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
-#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
-#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
-
-/*
- * I2O Get Status State values
- */
-
-#define ADAPTER_STATE_INITIALIZING 0x01
-#define ADAPTER_STATE_RESET 0x02
-#define ADAPTER_STATE_HOLD 0x04
-#define ADAPTER_STATE_READY 0x05
-#define ADAPTER_STATE_OPERATIONAL 0x08
-#define ADAPTER_STATE_FAILED 0x10
-#define ADAPTER_STATE_FAULTED 0x11
-
-/* I2O API function return values */
-
-#define I2O_RTN_NO_ERROR 0
-#define I2O_RTN_NOT_INIT 1
-#define I2O_RTN_FREE_Q_EMPTY 2
-#define I2O_RTN_TCB_ERROR 3
-#define I2O_RTN_TRANSACTION_ERROR 4
-#define I2O_RTN_ADAPTER_ALREADY_INIT 5
-#define I2O_RTN_MALLOC_ERROR 6
-#define I2O_RTN_ADPTR_NOT_REGISTERED 7
-#define I2O_RTN_MSG_REPLY_TIMEOUT 8
-#define I2O_RTN_NO_STATUS 9
-#define I2O_RTN_NO_FIRM_VER 10
-#define I2O_RTN_NO_LINK_SPEED 11
-
-/* Reply message status defines for all messages */
-
-#define I2O_REPLY_STATUS_SUCCESS 0x00
-#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
-#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
-#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
-#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
-#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
-#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
-#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
-#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
-#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
-#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
-#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
-
-/* Status codes and Error Information for Parameter functions */
-
-#define I2O_PARAMS_STATUS_SUCCESS 0x00
-#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
-#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
-#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
-#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
-#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
-#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
-#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
-#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
-#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
-#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
-#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
-#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
-#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
-#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
-#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
-#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
-
-/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
- * messages: Table 3-2 Detailed Status Codes.*/
-
-#define I2O_DSC_SUCCESS 0x0000
-#define I2O_DSC_BAD_KEY 0x0002
-#define I2O_DSC_TCL_ERROR 0x0003
-#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
-#define I2O_DSC_NO_SUCH_PAGE 0x0005
-#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
-#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
-#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
-#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
-#define I2O_DSC_DEVICE_LOCKED 0x000B
-#define I2O_DSC_DEVICE_RESET 0x000C
-#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
-#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
-#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
-#define I2O_DSC_INVALID_OFFSET 0x0010
-#define I2O_DSC_INVALID_PARAMETER 0x0011
-#define I2O_DSC_INVALID_REQUEST 0x0012
-#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
-#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
-#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
-#define I2O_DSC_MISSING_PARAMETER 0x0016
-#define I2O_DSC_TIMEOUT 0x0017
-#define I2O_DSC_UNKNOWN_ERROR 0x0018
-#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
-#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
-#define I2O_DSC_DEVICE_BUSY 0x001B
-#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
-
-/* Device Claim Types */
-#define I2O_CLAIM_PRIMARY 0x01000000
-#define I2O_CLAIM_MANAGEMENT 0x02000000
-#define I2O_CLAIM_AUTHORIZED 0x03000000
-#define I2O_CLAIM_SECONDARY 0x04000000
-
-/* Message header defines for VersionOffset */
-#define I2OVER15 0x0001
-#define I2OVER20 0x0002
-/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */
-#define I2OVERSION I2OVER15
-#define SGL_OFFSET_0 I2OVERSION
-#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
-#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
-#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
-#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
-#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
-#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
-#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
-#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
-
-#define TRL_OFFSET_5 (0x0050 | I2OVERSION)
-#define TRL_OFFSET_6 (0x0060 | I2OVERSION)
-
- /* msg header defines for MsgFlags */
-#define MSG_STATIC 0x0100
-#define MSG_64BIT_CNTXT 0x0200
-#define MSG_MULTI_TRANS 0x1000
-#define MSG_FAIL 0x2000
-#define MSG_LAST 0x4000
-#define MSG_REPLY 0x8000
-
- /* minimum size msg */
-#define THREE_WORD_MSG_SIZE 0x00030000
-#define FOUR_WORD_MSG_SIZE 0x00040000
-#define FIVE_WORD_MSG_SIZE 0x00050000
-#define SIX_WORD_MSG_SIZE 0x00060000
-#define SEVEN_WORD_MSG_SIZE 0x00070000
-#define EIGHT_WORD_MSG_SIZE 0x00080000
-#define NINE_WORD_MSG_SIZE 0x00090000
-#define TEN_WORD_MSG_SIZE 0x000A0000
-#define I2O_MESSAGE_SIZE(x) ((x)<<16)
-
-
-/* Special TID Assignments */
-
-#define ADAPTER_TID 0
-#define HOST_TID 1
-
-#define MSG_FRAME_SIZE 128
-#define NMBR_MSG_FRAMES 128
-
-#define MSG_POOL_SIZE 16384
-
-#define I2O_POST_WAIT_OK 0
-#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
-
-
-#endif /* __KERNEL__ */
-
-#endif /* _SCSI_I2O_H */
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
deleted file mode 100644
index 25e9251f8c78..000000000000
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti_ioctl.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-/***************************************************************************
- * This file is generated from osd_unix.h *
- * *************************************************************************/
-
-#ifndef _dpti_ioctl_h
-#define _dpti_ioctl_h
-
-// IOCTL interface commands
-
-#ifndef _IOWR
-# define _IOWR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOW
-# define _IOW(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOR
-# define _IOR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IO
-# define _IO(x,y) (((x)<<8)|y)
-#endif
-/* EATA PassThrough Command */
-#define EATAUSRCMD _IOWR('D',65,EATA_CP)
-/* Set Debug Level If Enabled */
-#define DPT_DEBUG _IOW('D',66,int)
-/* Get Signature Structure */
-#define DPT_SIGNATURE _IOR('D',67,dpt_sig_S)
-#if defined __bsdi__
-#define DPT_SIGNATURE_PACKED _IOR('D',67,dpt_sig_S_Packed)
-#endif
-/* Get Number Of DPT Adapters */
-#define DPT_NUMCTRLS _IOR('D',68,int)
-/* Get Adapter Info Structure */
-#define DPT_CTRLINFO _IOR('D',69,CtrlInfo)
-/* Get Statistics If Enabled */
-#define DPT_STATINFO _IO('D',70)
-/* Clear Stats If Enabled */
-#define DPT_CLRSTAT _IO('D',71)
-/* Get System Info Structure */
-#define DPT_SYSINFO _IOR('D',72,sysInfo_S)
-/* Set Timeout Value */
-#define DPT_TIMEOUT _IO('D',73)
-/* Get config Data */
-#define DPT_CONFIG _IO('D',74)
-/* Get Blink LED Code */
-#define DPT_BLINKLED _IOR('D',75,int)
-/* Get Statistical information (if available) */
-#define DPT_STATS_INFO _IOR('D',80,STATS_DATA)
-/* Clear the statistical information */
-#define DPT_STATS_CLEAR _IO('D',81)
-/* Get Performance metrics */
-#define DPT_PERF_INFO _IOR('D',82,dpt_perf_t)
-/* Send an I2O command */
-#define I2OUSRCMD _IO('D',76)
-/* Inform driver to re-acquire LCT information */
-#define I2ORESCANCMD _IO('D',77)
-/* Inform driver to reset adapter */
-#define I2ORESETCMD _IO('D',78)
-/* See if the target is mounted */
-#define DPT_TARGET_BUSY _IOR('D',79, TARGET_BUSY_T)
-
-
- /* Structure Returned From Get Controller Info */
-
-typedef struct {
- uCHAR state; /* Operational state */
- uCHAR id; /* Host adapter SCSI id */
- int vect; /* Interrupt vector number */
- int base; /* Base I/O address */
- int njobs; /* # of jobs sent to HA */
- int qdepth; /* Controller queue depth. */
- int wakebase; /* mpx wakeup base index. */
- uINT SGsize; /* Scatter/Gather list size. */
- unsigned heads; /* heads for drives on cntlr. */
- unsigned sectors; /* sectors for drives on cntlr. */
- uCHAR do_drive32; /* Flag for Above 16 MB Ability */
- uCHAR BusQuiet; /* SCSI Bus Quiet Flag */
- char idPAL[4]; /* 4 Bytes Of The ID Pal */
- uCHAR primary; /* 1 For Primary, 0 For Secondary */
- uCHAR eataVersion; /* EATA Version */
- uINT cpLength; /* EATA Command Packet Length */
- uINT spLength; /* EATA Status Packet Length */
- uCHAR drqNum; /* DRQ Index (0,5,6,7) */
- uCHAR flag1; /* EATA Flags 1 (Byte 9) */
- uCHAR flag2; /* EATA Flags 2 (Byte 30) */
-} CtrlInfo;
-
-typedef struct {
- uSHORT length; // Remaining length of this
- uSHORT drvrHBAnum; // Relative HBA # used by the driver
- uINT baseAddr; // Base I/O address
- uSHORT blinkState; // Blink LED state (0=Not in blink LED)
- uCHAR pciBusNum; // PCI Bus # (Optional)
- uCHAR pciDeviceNum; // PCI Device # (Optional)
- uSHORT hbaFlags; // Miscellaneous HBA flags
- uSHORT Interrupt; // Interrupt set for this device.
-# if (defined(_DPT_ARC))
- uINT baseLength;
- ADAPTER_OBJECT *AdapterObject;
- LARGE_INTEGER DmaLogicalAddress;
- PVOID DmaVirtualAddress;
- LARGE_INTEGER ReplyLogicalAddress;
- PVOID ReplyVirtualAddress;
-# else
- uINT reserved1; // Reserved for future expansion
- uINT reserved2; // Reserved for future expansion
- uINT reserved3; // Reserved for future expansion
-# endif
-} drvrHBAinfo_S;
-
-typedef struct TARGET_BUSY
-{
- uLONG channel;
- uLONG id;
- uLONG lun;
- uLONG isBusy;
-} TARGET_BUSY_T;
-
-#endif
-
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
deleted file mode 100644
index a6644b332b53..000000000000
--- a/drivers/scsi/dpt/dptsig.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* BSDI dptsig.h,v 1.7 1998/06/03 19:15:00 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __DPTSIG_H_
-#define __DPTSIG_H_
-#ifdef _SINIX_ADDON
-#include "dpt.h"
-#endif
-/* DPT SIGNATURE SPEC AND HEADER FILE */
-/* Signature Version 1 (sorry no 'A') */
-
-/* to make sure we are talking the same size under all OS's */
-typedef unsigned char sigBYTE;
-typedef unsigned short sigWORD;
-typedef unsigned int sigINT;
-
-/*
- * use sigWORDLittleEndian for:
- * dsCapabilities
- * dsDeviceSupp
- * dsAdapterSupp
- * dsApplication
- * use sigLONGLittleEndian for:
- * dsOS
- * so that the sig can be standardised to Little Endian
- */
-#if (defined(_DPT_BIG_ENDIAN))
-# define sigWORDLittleEndian(x) ((((x)&0xFF)<<8)|(((x)>>8)&0xFF))
-# define sigLONGLittleEndian(x) \
- ((((x)&0xFF)<<24) | \
- (((x)&0xFF00)<<8) | \
- (((x)&0xFF0000L)>>8) | \
- (((x)&0xFF000000L)>>24))
-#else
-# define sigWORDLittleEndian(x) (x)
-# define sigLONGLittleEndian(x) (x)
-#endif
-
-/* must make sure the structure is not word or double-word aligned */
-/* --------------------------------------------------------------- */
-/* Borland will ignore the following pragma: */
-/* Word alignment is OFF by default. If in the, IDE make */
-/* sure that Options | Compiler | Code Generation | Word Alignment */
-/* is not checked. If using BCC, do not use the -a option. */
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=mac68k
-#endif
-
-
-/* Current Signature Version - sigBYTE dsSigVersion; */
-/* ------------------------------------------------------------------ */
-#define SIG_VERSION 1
-
-/* Processor Family - sigBYTE dsProcessorFamily; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-/* What type of processor the file is meant to run on. */
-/* This will let us know whether to read sigWORDs as high/low or low/high. */
-#define PROC_INTEL 0x00 /* Intel 80x86/ia64 */
-#define PROC_MOTOROLA 0x01 /* Motorola 68K */
-#define PROC_MIPS4000 0x02 /* MIPS RISC 4000 */
-#define PROC_ALPHA 0x03 /* DEC Alpha */
-#define PROC_POWERPC 0x04 /* IBM Power PC */
-#define PROC_i960 0x05 /* Intel i960 */
-#define PROC_ULTRASPARC 0x06 /* SPARC processor */
-
-/* Specific Minimim Processor - sigBYTE dsProcessor; FLAG BITS */
-/* ------------------------------------------------------------------ */
-/* Different bit definitions dependent on processor_family */
-
-/* PROC_INTEL: */
-#define PROC_8086 0x01 /* Intel 8086 */
-#define PROC_286 0x02 /* Intel 80286 */
-#define PROC_386 0x04 /* Intel 80386 */
-#define PROC_486 0x08 /* Intel 80486 */
-#define PROC_PENTIUM 0x10 /* Intel 586 aka P5 aka Pentium */
-#define PROC_SEXIUM 0x20 /* Intel 686 aka P6 aka Pentium Pro or MMX */
-#define PROC_IA64 0x40 /* Intel IA64 processor */
-
-/* PROC_i960: */
-#define PROC_960RX 0x01 /* Intel 80960RC/RD */
-#define PROC_960HX 0x02 /* Intel 80960HA/HD/HT */
-
-/* PROC_MOTOROLA: */
-#define PROC_68000 0x01 /* Motorola 68000 */
-#define PROC_68010 0x02 /* Motorola 68010 */
-#define PROC_68020 0x04 /* Motorola 68020 */
-#define PROC_68030 0x08 /* Motorola 68030 */
-#define PROC_68040 0x10 /* Motorola 68040 */
-
-/* PROC_POWERPC */
-#define PROC_PPC601 0x01 /* PowerPC 601 */
-#define PROC_PPC603 0x02 /* PowerPC 603 */
-#define PROC_PPC604 0x04 /* PowerPC 604 */
-
-/* PROC_MIPS4000: */
-#define PROC_R4000 0x01 /* MIPS R4000 */
-
-/* Filetype - sigBYTE dsFiletype; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define FT_EXECUTABLE 0 /* Executable Program */
-#define FT_SCRIPT 1 /* Script/Batch File??? */
-#define FT_HBADRVR 2 /* HBA Driver */
-#define FT_OTHERDRVR 3 /* Other Driver */
-#define FT_IFS 4 /* Installable Filesystem Driver */
-#define FT_ENGINE 5 /* DPT Engine */
-#define FT_COMPDRVR 6 /* Compressed Driver Disk */
-#define FT_LANGUAGE 7 /* Foreign Language file */
-#define FT_FIRMWARE 8 /* Downloadable or actual Firmware */
-#define FT_COMMMODL 9 /* Communications Module */
-#define FT_INT13 10 /* INT 13 style HBA Driver */
-#define FT_HELPFILE 11 /* Help file */
-#define FT_LOGGER 12 /* Event Logger */
-#define FT_INSTALL 13 /* An Install Program */
-#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
-#define FT_RESOURCE 15 /* Storage Manager Resource File */
-#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
-
-/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define FTF_DLL 0x01 /* Dynamic Link Library */
-#define FTF_NLM 0x02 /* Netware Loadable Module */
-#define FTF_OVERLAYS 0x04 /* Uses overlays */
-#define FTF_DEBUG 0x08 /* Debug version */
-#define FTF_TSR 0x10 /* TSR */
-#define FTF_SYS 0x20 /* DOS Loadable driver */
-#define FTF_PROTECTED 0x40 /* Runs in protected mode */
-#define FTF_APP_SPEC 0x80 /* Application Specific */
-#define FTF_ROM (FTF_SYS|FTF_TSR) /* Special Case */
-
-/* OEM - sigBYTE dsOEM; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define OEM_DPT 0 /* DPT */
-#define OEM_ATT 1 /* ATT */
-#define OEM_NEC 2 /* NEC */
-#define OEM_ALPHA 3 /* Alphatronix */
-#define OEM_AST 4 /* AST */
-#define OEM_OLIVETTI 5 /* Olivetti */
-#define OEM_SNI 6 /* Siemens/Nixdorf */
-#define OEM_SUN 7 /* SUN Microsystems */
-
-/* Operating System - sigLONG dsOS; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define OS_DOS 0x00000001 /* PC/MS-DOS */
-#define OS_WINDOWS 0x00000002 /* Microsoft Windows 3.x */
-#define OS_WINDOWS_NT 0x00000004 /* Microsoft Windows NT */
-#define OS_OS2M 0x00000008 /* OS/2 1.2.x,MS 1.3.0,IBM 1.3.x - Monolithic */
-#define OS_OS2L 0x00000010 /* Microsoft OS/2 1.301 - LADDR */
-#define OS_OS22x 0x00000020 /* IBM OS/2 2.x */
-#define OS_NW286 0x00000040 /* Novell NetWare 286 */
-#define OS_NW386 0x00000080 /* Novell NetWare 386 */
-#define OS_GEN_UNIX 0x00000100 /* Generic Unix */
-#define OS_SCO_UNIX 0x00000200 /* SCO Unix */
-#define OS_ATT_UNIX 0x00000400 /* ATT Unix */
-#define OS_UNIXWARE 0x00000800 /* USL Unix */
-#define OS_INT_UNIX 0x00001000 /* Interactive Unix */
-#define OS_SOLARIS 0x00002000 /* SunSoft Solaris */
-#define OS_QNX 0x00004000 /* QNX for Tom Moch */
-#define OS_NEXTSTEP 0x00008000 /* NeXTSTEP/OPENSTEP/MACH */
-#define OS_BANYAN 0x00010000 /* Banyan Vines */
-#define OS_OLIVETTI_UNIX 0x00020000/* Olivetti Unix */
-#define OS_MAC_OS 0x00040000 /* Mac OS */
-#define OS_WINDOWS_95 0x00080000 /* Microsoft Windows '95 */
-#define OS_NW4x 0x00100000 /* Novell Netware 4.x */
-#define OS_BSDI_UNIX 0x00200000 /* BSDi Unix BSD/OS 2.0 and up */
-#define OS_AIX_UNIX 0x00400000 /* AIX Unix */
-#define OS_FREE_BSD 0x00800000 /* FreeBSD Unix */
-#define OS_LINUX 0x01000000 /* Linux */
-#define OS_DGUX_UNIX 0x02000000 /* Data General Unix */
-#define OS_SINIX_N 0x04000000 /* SNI SINIX-N */
-#define OS_PLAN9 0x08000000 /* ATT Plan 9 */
-#define OS_TSX 0x10000000 /* SNH TSX-32 */
-
-#define OS_OTHER 0x80000000 /* Other */
-
-/* Capabilities - sigWORD dsCapabilities; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define CAP_RAID0 0x0001 /* RAID-0 */
-#define CAP_RAID1 0x0002 /* RAID-1 */
-#define CAP_RAID3 0x0004 /* RAID-3 */
-#define CAP_RAID5 0x0008 /* RAID-5 */
-#define CAP_SPAN 0x0010 /* Spanning */
-#define CAP_PASS 0x0020 /* Provides passthrough */
-#define CAP_OVERLAP 0x0040 /* Passthrough supports overlapped commands */
-#define CAP_ASPI 0x0080 /* Supports ASPI Command Requests */
-#define CAP_ABOVE16MB 0x0100 /* ISA Driver supports greater than 16MB */
-#define CAP_EXTEND 0x8000 /* Extended info appears after description */
-#ifdef SNI_MIPS
-#define CAP_CACHEMODE 0x1000 /* dpt_force_cache is set in driver */
-#endif
-
-/* Devices Supported - sigWORD dsDeviceSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define DEV_DASD 0x0001 /* DASD (hard drives) */
-#define DEV_TAPE 0x0002 /* Tape drives */
-#define DEV_PRINTER 0x0004 /* Printers */
-#define DEV_PROC 0x0008 /* Processors */
-#define DEV_WORM 0x0010 /* WORM drives */
-#define DEV_CDROM 0x0020 /* CD-ROM drives */
-#define DEV_SCANNER 0x0040 /* Scanners */
-#define DEV_OPTICAL 0x0080 /* Optical Drives */
-#define DEV_JUKEBOX 0x0100 /* Jukebox */
-#define DEV_COMM 0x0200 /* Communications Devices */
-#define DEV_OTHER 0x0400 /* Other Devices */
-#define DEV_ALL 0xFFFF /* All SCSI Devices */
-
-/* Adapters Families Supported - sigWORD dsAdapterSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define ADF_2001 0x0001 /* PM2001 */
-#define ADF_2012A 0x0002 /* PM2012A */
-#define ADF_PLUS_ISA 0x0004 /* PM2011,PM2021 */
-#define ADF_PLUS_EISA 0x0008 /* PM2012B,PM2022 */
-#define ADF_SC3_ISA 0x0010 /* PM2021 */
-#define ADF_SC3_EISA 0x0020 /* PM2022,PM2122, etc */
-#define ADF_SC3_PCI 0x0040 /* SmartCache III PCI */
-#define ADF_SC4_ISA 0x0080 /* SmartCache IV ISA */
-#define ADF_SC4_EISA 0x0100 /* SmartCache IV EISA */
-#define ADF_SC4_PCI 0x0200 /* SmartCache IV PCI */
-#define ADF_SC5_PCI 0x0400 /* Fifth Generation I2O products */
-/*
- * Combinations of products
- */
-#define ADF_ALL_2000 (ADF_2001|ADF_2012A)
-#define ADF_ALL_PLUS (ADF_PLUS_ISA|ADF_PLUS_EISA)
-#define ADF_ALL_SC3 (ADF_SC3_ISA|ADF_SC3_EISA|ADF_SC3_PCI)
-#define ADF_ALL_SC4 (ADF_SC4_ISA|ADF_SC4_EISA|ADF_SC4_PCI)
-#define ADF_ALL_SC5 (ADF_SC5_PCI)
-/* All EATA Cacheing Products */
-#define ADF_ALL_CACHE (ADF_ALL_PLUS|ADF_ALL_SC3|ADF_ALL_SC4)
-/* All EATA Bus Mastering Products */
-#define ADF_ALL_MASTER (ADF_2012A|ADF_ALL_CACHE)
-/* All EATA Adapter Products */
-#define ADF_ALL_EATA (ADF_2001|ADF_ALL_MASTER)
-#define ADF_ALL ADF_ALL_EATA
-
-/* Application - sigWORD dsApplication; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define APP_DPTMGR 0x0001 /* DPT Storage Manager */
-#define APP_ENGINE 0x0002 /* DPT Engine */
-#define APP_SYTOS 0x0004 /* Sytron Sytos Plus */
-#define APP_CHEYENNE 0x0008 /* Cheyenne ARCServe + ARCSolo */
-#define APP_MSCDEX 0x0010 /* Microsoft CD-ROM extensions */
-#define APP_NOVABACK 0x0020 /* NovaStor Novaback */
-#define APP_AIM 0x0040 /* Archive Information Manager */
-
-/* Requirements - sigBYTE dsRequirements; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define REQ_SMARTROM 0x01 /* Requires SmartROM to be present */
-#define REQ_DPTDDL 0x02 /* Requires DPTDDL.SYS to be loaded */
-#define REQ_HBA_DRIVER 0x04 /* Requires an HBA driver to be loaded */
-#define REQ_ASPI_TRAN 0x08 /* Requires an ASPI Transport Modules */
-#define REQ_ENGINE 0x10 /* Requires a DPT Engine to be loaded */
-#define REQ_COMM_ENG 0x20 /* Requires a DPT Communications Engine */
-
-/*
- * You may adjust dsDescription_size with an override to a value less than
- * 50 so that the structure allocates less real space.
- */
-#if (!defined(dsDescription_size))
-# define dsDescription_size 50
-#endif
-
-typedef struct dpt_sig {
- char dsSignature[6]; /* ALWAYS "dPtSiG" */
- sigBYTE dsSigVersion; /* signature version (currently 1) */
- sigBYTE dsProcessorFamily; /* what type of processor */
- sigBYTE dsProcessor; /* precise processor */
- sigBYTE dsFiletype; /* type of file */
- sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
- sigBYTE dsOEM; /* OEM file was created for */
- sigINT dsOS; /* which Operating systems */
- sigWORD dsCapabilities; /* RAID levels, etc. */
- sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
- sigWORD dsAdapterSupp; /* DPT adapter families supported */
- sigWORD dsApplication; /* applications file is for */
- sigBYTE dsRequirements; /* Other driver dependencies */
- sigBYTE dsVersion; /* 1 */
- sigBYTE dsRevision; /* 'J' */
- sigBYTE dsSubRevision; /* '9' ' ' if N/A */
- sigBYTE dsMonth; /* creation month */
- sigBYTE dsDay; /* creation day */
- sigBYTE dsYear; /* creation year since 1980 (1993=13) */
- /* description (NULL terminated) */
- char dsDescription[dsDescription_size];
-} dpt_sig_S;
-/* 32 bytes minimum - with no description. Put NULL at description[0] */
-/* 81 bytes maximum - with 49 character description plus NULL. */
-
-/* This line added at Roycroft's request */
-/* Microsoft's NT compiler gets confused if you do a pack and don't */
-/* restore it. */
-
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=reset
-#endif
-
-#endif
diff --git a/drivers/scsi/dpt/osd_defs.h b/drivers/scsi/dpt/osd_defs.h
deleted file mode 100644
index de3ae5722982..000000000000
--- a/drivers/scsi/dpt/osd_defs.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* BSDI osd_defs.h,v 1.4 1998/06/03 19:14:58 karels Exp */
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef _OSD_DEFS_H
-#define _OSD_DEFS_H
-
-/*File - OSD_DEFS.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains the OS dependent defines. This file is included
- *in osd_util.h and provides the OS specific defines for that file.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/31/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
- /* Define the operating system */
-#if (defined(__linux__))
-# define _DPT_LINUX
-#elif (defined(__bsdi__))
-# define _DPT_BSDI
-#elif (defined(__FreeBSD__))
-# define _DPT_FREE_BSD
-#else
-# define _DPT_SCO
-#endif
-
-#if defined (ZIL_CURSES)
-#define _DPT_CURSES
-#else
-#define _DPT_MOTIF
-#endif
-
- /* Redefine 'far' to nothing - no far pointer type required in UNIX */
-#define far
-
- /* Define the mutually exclusive semaphore type */
-#define SEMAPHORE_T unsigned int *
- /* Define a handle to a DLL */
-#define DLL_HANDLE_T unsigned int *
-
-#endif
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
deleted file mode 100644
index b2613c2eaac7..000000000000
--- a/drivers/scsi/dpt/osd_util.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/* BSDI osd_util.h,v 1.8 1998/06/03 19:14:58 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __OSD_UTIL_H
-#define __OSD_UTIL_H
-
-/*File - OSD_UTIL.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains defines and function prototypes that are
- *operating system dependent. The resources defined in this file
- *are not specific to any particular application.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/7/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
-/*----------------------------- */
-/* Operating system selections: */
-/*----------------------------- */
-
-/*#define _DPT_MSDOS */
-/*#define _DPT_WIN_3X */
-/*#define _DPT_WIN_4X */
-/*#define _DPT_WIN_NT */
-/*#define _DPT_NETWARE */
-/*#define _DPT_OS2 */
-/*#define _DPT_SCO */
-/*#define _DPT_UNIXWARE */
-/*#define _DPT_SOLARIS */
-/*#define _DPT_NEXTSTEP */
-/*#define _DPT_BANYAN */
-
-/*-------------------------------- */
-/* Include the OS specific defines */
-/*-------------------------------- */
-
-/*#define OS_SELECTION From Above List */
-/*#define SEMAPHORE_T ??? */
-/*#define DLL_HANDLE_T ??? */
-
-#if (defined(KERNEL) && (defined(__FreeBSD__) || defined(__bsdi__)))
-# include "i386/isa/dpt_osd_defs.h"
-#else
-# include "osd_defs.h"
-#endif
-
-#ifndef DPT_UNALIGNED
- #define DPT_UNALIGNED
-#endif
-
-#ifndef DPT_EXPORT
- #define DPT_EXPORT
-#endif
-
-#ifndef DPT_IMPORT
- #define DPT_IMPORT
-#endif
-
-#ifndef DPT_RUNTIME_IMPORT
- #define DPT_RUNTIME_IMPORT DPT_IMPORT
-#endif
-
-/*--------------------- */
-/* OS dependent defines */
-/*--------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_WIN_3X)
- #define _DPT_16_BIT
-#else
- #define _DPT_32_BIT
-#endif
-
-#if defined (_DPT_SCO) || defined (_DPT_UNIXWARE) || defined (_DPT_SOLARIS) || defined (_DPT_AIX) || defined (SNI_MIPS) || defined (_DPT_BSDI) || defined (_DPT_FREE_BSD) || defined(_DPT_LINUX)
- #define _DPT_UNIX
-#endif
-
-#if defined (_DPT_WIN_3x) || defined (_DPT_WIN_4X) || defined (_DPT_WIN_NT) \
- || defined (_DPT_OS2)
- #define _DPT_DLL_SUPPORT
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) && !defined (_DPT_NETWARE)
- #define _DPT_PREEMPTIVE
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X)
- #define _DPT_MULTI_THREADED
-#endif
-
-#if !defined (_DPT_MSDOS)
- #define _DPT_MULTI_TASKING
-#endif
-
- /* These exist for platforms that */
- /* chunk when accessing mis-aligned */
- /* data */
-#if defined (SNI_MIPS) || defined (_DPT_SOLARIS)
- #if defined (_DPT_BIG_ENDIAN)
- #if !defined (_DPT_STRICT_ALIGN)
- #define _DPT_STRICT_ALIGN
- #endif
- #endif
-#endif
-
- /* Determine if in C or C++ mode */
-#ifdef __cplusplus
- #define _DPT_CPP
-#else
- #define _DPT_C
-#endif
-
-/*-------------------------------------------------------------------*/
-/* Under Solaris the compiler refuses to accept code like: */
-/* { {"DPT"}, 0, NULL .... }, */
-/* and complains about the {"DPT"} part by saying "cannot use { } */
-/* to initialize char*". */
-/* */
-/* By defining these ugly macros we can get around this and also */
-/* not have to copy and #ifdef large sections of code. I know that */
-/* these macros are *really* ugly, but they should help reduce */
-/* maintenance in the long run. */
-/* */
-/*-------------------------------------------------------------------*/
-#if !defined (DPTSQO)
- #if defined (_DPT_SOLARIS)
- #define DPTSQO
- #define DPTSQC
- #else
- #define DPTSQO {
- #define DPTSQC }
- #endif /* solaris */
-#endif /* DPTSQO */
-
-
-/*---------------------- */
-/* OS dependent typedefs */
-/*---------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_SCO)
- #define BYTE unsigned char
- #define WORD unsigned short
-#endif
-
-#ifndef _DPT_TYPEDEFS
- #define _DPT_TYPEDEFS
- typedef unsigned char uCHAR;
- typedef unsigned short uSHORT;
- typedef unsigned int uINT;
- typedef unsigned long uLONG;
-
- typedef union {
- uCHAR u8[4];
- uSHORT u16[2];
- uLONG u32;
- } access_U;
-#endif
-
-#if !defined (NULL)
- #define NULL 0
-#endif
-
-
-/*Prototypes - function ----------------------------------------------------- */
-
-#ifdef __cplusplus
- extern "C" { /* Declare all these functions as "C" functions */
-#endif
-
-/*------------------------ */
-/* Byte reversal functions */
-/*------------------------ */
-
- /* Reverses the byte ordering of a 2 byte variable */
-#if (!defined(osdSwap2))
- uSHORT osdSwap2(DPT_UNALIGNED uSHORT *);
-#endif // !osdSwap2
-
- /* Reverses the byte ordering of a 4 byte variable and shifts left 8 bits */
-#if (!defined(osdSwap3))
- uLONG osdSwap3(DPT_UNALIGNED uLONG *);
-#endif // !osdSwap3
-
-
-#ifdef _DPT_NETWARE
- #include "novpass.h" /* For DPT_Bswapl() prototype */
- /* Inline the byte swap */
- #ifdef __cplusplus
- inline uLONG osdSwap4(uLONG *inLong) {
- return *inLong = DPT_Bswapl(*inLong);
- }
- #else
- #define osdSwap4(inLong) DPT_Bswapl(inLong)
- #endif // cplusplus
-#else
- /* Reverses the byte ordering of a 4 byte variable */
-# if (!defined(osdSwap4))
- uLONG osdSwap4(DPT_UNALIGNED uLONG *);
-# endif // !osdSwap4
-
- /* The following functions ALWAYS swap regardless of the *
- * presence of DPT_BIG_ENDIAN */
-
- uSHORT trueSwap2(DPT_UNALIGNED uSHORT *);
- uLONG trueSwap4(DPT_UNALIGNED uLONG *);
-
-#endif // netware
-
-
-/*-------------------------------------*
- * Network order swap functions *
- * *
- * These functions/macros will be used *
- * by the structure insert()/extract() *
- * functions. *
- *
- * We will enclose all structure *
- * portability modifications inside *
- * #ifdefs. When we are ready, we *
- * will #define DPT_PORTABLE to begin *
- * using the modifications. *
- *-------------------------------------*/
-uLONG netSwap4(uLONG val);
-
-#if defined (_DPT_BIG_ENDIAN)
-
-// for big-endian we need to swap
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (((x) >> 8) | ((x) << 8))
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) netSwap4((x))
-#endif // NET_SWAP_4
-
-#else
-
-// for little-endian we don't need to do anything
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (x)
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) (x)
-#endif // NET_SWAP_4
-
-#endif // big endian
-
-
-
-/*----------------------------------- */
-/* Run-time loadable module functions */
-/*----------------------------------- */
-
- /* Loads the specified run-time loadable DLL */
-DLL_HANDLE_T osdLoadModule(uCHAR *);
- /* Unloads the specified run-time loadable DLL */
-uSHORT osdUnloadModule(DLL_HANDLE_T);
- /* Returns a pointer to a function inside a run-time loadable DLL */
-void * osdGetFnAddr(DLL_HANDLE_T,uCHAR *);
-
-/*--------------------------------------- */
-/* Mutually exclusive semaphore functions */
-/*--------------------------------------- */
-
- /* Create a named semaphore */
-SEMAPHORE_T osdCreateNamedSemaphore(char *);
- /* Create a mutually exlusive semaphore */
-SEMAPHORE_T osdCreateSemaphore(void);
- /* create an event semaphore */
-SEMAPHORE_T osdCreateEventSemaphore(void);
- /* create a named event semaphore */
-SEMAPHORE_T osdCreateNamedEventSemaphore(char *);
-
- /* Destroy the specified mutually exclusive semaphore object */
-uSHORT osdDestroySemaphore(SEMAPHORE_T);
- /* Request access to the specified mutually exclusive semaphore */
-uLONG osdRequestSemaphore(SEMAPHORE_T,uLONG);
- /* Release access to the specified mutually exclusive semaphore */
-uSHORT osdReleaseSemaphore(SEMAPHORE_T);
- /* wait for a event to happen */
-uLONG osdWaitForEventSemaphore(SEMAPHORE_T, uLONG);
- /* signal an event */
-uLONG osdSignalEventSemaphore(SEMAPHORE_T);
- /* reset the event */
-uLONG osdResetEventSemaphore(SEMAPHORE_T);
-
-/*----------------- */
-/* Thread functions */
-/*----------------- */
-
- /* Releases control to the task switcher in non-preemptive */
- /* multitasking operating systems. */
-void osdSwitchThreads(void);
-
- /* Starts a thread function */
-uLONG osdStartThread(void *,void *);
-
-/* what is my thread id */
-uLONG osdGetThreadID(void);
-
-/* wakes up the specifed thread */
-void osdWakeThread(uLONG);
-
-/* osd sleep for x milliseconds */
-void osdSleep(uLONG);
-
-#define DPT_THREAD_PRIORITY_LOWEST 0x00
-#define DPT_THREAD_PRIORITY_NORMAL 0x01
-#define DPT_THREAD_PRIORITY_HIGHEST 0x02
-
-uCHAR osdSetThreadPriority(uLONG tid, uCHAR priority);
-
-#ifdef __cplusplus
- } /* end the xtern "C" declaration */
-#endif
-
-#endif /* osd_util_h */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
deleted file mode 100644
index a4aa1c31ff72..000000000000
--- a/drivers/scsi/dpt/sys_info.h
+++ /dev/null
@@ -1,417 +0,0 @@
-/* BSDI sys_info.h,v 1.6 1998/06/03 19:14:59 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __SYS_INFO_H
-#define __SYS_INFO_H
-
-/*File - SYS_INFO.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains structure definitions for the OS dependent
- *layer system information buffers.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Don Kemper
- *Date: 5/10/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Include Files ------------------------------------------------------------- */
-
-#include "osd_util.h"
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif // no unpack
-
-
-/*struct - driveParam_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the drive parameters seen during
- *booting.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct driveParam_S {
-#else
- typedef struct {
-#endif
-
- uSHORT cylinders; /* Up to 1024 */
- uCHAR heads; /* Up to 255 */
- uCHAR sectors; /* Up to 63 */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } driveParam_S;
-#endif
-/*driveParam_S - end */
-
-
-/*struct - sysInfo_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the command system information that
- *should be returned by every OS dependent layer.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define SI_CMOS_Valid 0x0001
-#define SI_NumDrivesValid 0x0002
-#define SI_ProcessorValid 0x0004
-#define SI_MemorySizeValid 0x0008
-#define SI_DriveParamsValid 0x0010
-#define SI_SmartROMverValid 0x0020
-#define SI_OSversionValid 0x0040
-#define SI_OSspecificValid 0x0080 /* 1 if OS structure returned */
-#define SI_BusTypeValid 0x0100
-
-#define SI_ALL_VALID 0x0FFF /* All Std SysInfo is valid */
-#define SI_NO_SmartROM 0x8000
-
-/*busType - definitions */
-#define SI_ISA_BUS 0x00
-#define SI_MCA_BUS 0x01
-#define SI_EISA_BUS 0x02
-#define SI_PCI_BUS 0x04
-
-#ifdef __cplusplus
- struct sysInfo_S {
-#else
- typedef struct {
-#endif
-
- uCHAR drive0CMOS; /* CMOS Drive 0 Type */
- uCHAR drive1CMOS; /* CMOS Drive 1 Type */
- uCHAR numDrives; /* 0040:0075 contents */
- uCHAR processorFamily; /* Same as DPTSIG's definition */
- uCHAR processorType; /* Same as DPTSIG's definition */
- uCHAR smartROMMajorVersion;
- uCHAR smartROMMinorVersion; /* SmartROM version */
- uCHAR smartROMRevision;
- uSHORT flags; /* See bit definitions above */
- uSHORT conventionalMemSize; /* in KB */
- uINT extendedMemSize; /* in KB */
- uINT osType; /* Same as DPTSIG's definition */
- uCHAR osMajorVersion;
- uCHAR osMinorVersion; /* The OS version */
- uCHAR osRevision;
-#ifdef _SINIX_ADDON
- uCHAR busType; /* See defininitions above */
- uSHORT osSubRevision;
- uCHAR pad[2]; /* For alignment */
-#else
- uCHAR osSubRevision;
- uCHAR busType; /* See defininitions above */
- uCHAR pad[3]; /* For alignment */
-#endif
- driveParam_S drives[16]; /* SmartROM Logical Drives */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } sysInfo_S;
-#endif
-/*sysInfo_S - end */
-
-
-/*struct - DOS_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *DOS workstation.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define DI_DOS_HIGH 0x01 /* DOS is loaded high */
-#define DI_DPMI_VALID 0x02 /* DPMI version is valid */
-
-#ifdef __cplusplus
- struct DOS_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR flags; /* See bit definitions above */
- uSHORT driverLocation; /* SmartROM BIOS address */
- uSHORT DOS_version;
- uSHORT DPMI_version;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } DOS_Info_S;
-#endif
-/*DOS_Info_S - end */
-
-
-/*struct - Netware_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Netware machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct Netware_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR driverName[13]; /* ie PM12NW31.DSK */
- uCHAR serverName[48];
- uCHAR netwareVersion; /* The Netware OS version */
- uCHAR netwareSubVersion;
- uCHAR netwareRevision;
- uSHORT maxConnections; /* Probably 250 or 1000 */
- uSHORT connectionsInUse;
- uSHORT maxVolumes;
- uCHAR unused;
- uCHAR SFTlevel;
- uCHAR TTSlevel;
-
- uCHAR clibMajorVersion; /* The CLIB.NLM version */
- uCHAR clibMinorVersion;
- uCHAR clibRevision;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } Netware_Info_S;
-#endif
-/*Netware_Info_S - end */
-
-
-/*struct - OS2_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *OS/2 machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct OS2_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } OS2_Info_S;
-#endif
-/*OS2_Info_S - end */
-
-
-/*struct - WinNT_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Windows NT machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct WinNT_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } WinNT_Info_S;
-#endif
-/*WinNT_Info_S - end */
-
-
-/*struct - SCO_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *SCO UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct SCO_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } SCO_Info_S;
-#endif
-/*SCO_Info_S - end */
-
-
-/*struct - USL_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *USL UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct USL_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } USL_Info_S;
-#endif
-/*USL_Info_S - end */
-
-
- /* Restore default structure packing */
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif // no unpack
-
-#endif // __SYS_INFO_H
-
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
deleted file mode 100644
index 2e9155ba7408..000000000000
--- a/drivers/scsi/dpt_i2o.c
+++ /dev/null
@@ -1,3545 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/***************************************************************************
- dpti.c - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2000 by Adaptec
-
- July 30, 2001 First version being submitted
- for inclusion in the kernel. V2.4
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-/***************************************************************************
- * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
- - Support 2.6 kernel and DMA-mapping
- - ioctl fix for raid tools
- - use schedule_timeout in long long loop
- **************************************************************************/
-
-/*#define DEBUG 1 */
-/*#define UARTDELAY 1 */
-
-#include <linux/module.h>
-#include <linux/pgtable.h>
-
-MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
-MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
-
-////////////////////////////////////////////////////////////////
-
-#include <linux/ioctl.h> /* For SCSI-Passthrough */
-#include <linux/uaccess.h>
-
-#include <linux/stat.h>
-#include <linux/slab.h> /* for kmalloc() */
-#include <linux/pci.h> /* for PCI support */
-#include <linux/proc_fs.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h> /* for udelay */
-#include <linux/interrupt.h>
-#include <linux/kernel.h> /* for printk */
-#include <linux/sched.h>
-#include <linux/reboot.h>
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/mutex.h>
-
-#include <asm/processor.h> /* for boot_cpu_data */
-#include <asm/io.h> /* for virt_to_bus, etc. */
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-
-#include "dpt/dptsig.h"
-#include "dpti.h"
-
-/*============================================================================
- * Create a binary signature - this is read by dptsig
- * Needed for our management apps
- *============================================================================
- */
-static DEFINE_MUTEX(adpt_mutex);
-static dpt_sig_S DPTI_sig = {
- {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
-#ifdef __i386__
- PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
-#elif defined(__ia64__)
- PROC_INTEL, PROC_IA64,
-#elif defined(__sparc__)
- PROC_ULTRASPARC, PROC_ULTRASPARC,
-#elif defined(__alpha__)
- PROC_ALPHA, PROC_ALPHA,
-#else
- (-1),(-1),
-#endif
- FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
- ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
- DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
-};
-
-
-
-
-/*============================================================================
- * Globals
- *============================================================================
- */
-
-static DEFINE_MUTEX(adpt_configuration_lock);
-
-static struct i2o_sys_tbl *sys_tbl;
-static dma_addr_t sys_tbl_pa;
-static int sys_tbl_ind;
-static int sys_tbl_len;
-
-static adpt_hba* hba_chain = NULL;
-static int hba_count = 0;
-
-static struct class *adpt_sysfs_class;
-
-static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
-#endif
-
-static const struct file_operations adpt_fops = {
- .unlocked_ioctl = adpt_unlocked_ioctl,
- .open = adpt_open,
- .release = adpt_close,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_adpt_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-/* Structures and definitions for synchronous message posting.
- * See adpt_i2o_post_wait() for description
- * */
-struct adpt_i2o_post_wait_data
-{
- int status;
- u32 id;
- adpt_wait_queue_head_t *wq;
- struct adpt_i2o_post_wait_data *next;
-};
-
-static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
-static u32 adpt_post_wait_id = 0;
-static DEFINE_SPINLOCK(adpt_post_wait_lock);
-
-
-/*============================================================================
- * Functions
- *============================================================================
- */
-
-static inline int dpt_dma64(adpt_hba *pHba)
-{
- return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
-}
-
-static inline u32 dma_high(dma_addr_t addr)
-{
- return upper_32_bits(addr);
-}
-
-static inline u32 dma_low(dma_addr_t addr)
-{
- return (u32)addr;
-}
-
-static u8 adpt_read_blink_led(adpt_hba* host)
-{
- if (host->FwDebugBLEDflag_P) {
- if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
- return readb(host->FwDebugBLEDvalue_P);
- }
- }
- return 0;
-}
-
-/*============================================================================
- * Scsi host template interface functions
- *============================================================================
- */
-
-#ifdef MODULE
-static struct pci_device_id dptids[] = {
- { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { 0, }
-};
-#endif
-
-MODULE_DEVICE_TABLE(pci,dptids);
-
-static int adpt_detect(struct scsi_host_template* sht)
-{
- struct pci_dev *pDev = NULL;
- adpt_hba *pHba;
- adpt_hba *next;
-
- PINFO("Detecting Adaptec I2O RAID controllers...\n");
-
- /* search for all Adatpec I2O RAID cards */
- while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
- if(pDev->device == PCI_DPT_DEVICE_ID ||
- pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
- if(adpt_install_hba(sht, pDev) ){
- PERROR("Could not Init an I2O RAID device\n");
- PERROR("Will not try to detect others.\n");
- return hba_count-1;
- }
- pci_dev_get(pDev);
- }
- }
-
- /* In INIT state, Activate IOPs */
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- // Activate does get status , init outbound, and get hrt
- if (adpt_i2o_activate_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- }
- }
-
-
- /* Active IOPs in HOLD state */
-
-rebuild_sys_tab:
- if (hba_chain == NULL)
- return 0;
-
- /*
- * If build_sys_table fails, we kill everything and bail
- * as we can't init the IOPs w/o a system table
- */
- if (adpt_i2o_build_sys_table() < 0) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
-
- PDEBUG("HBA's in HOLD state\n");
-
- /* If IOP don't get online, we need to rebuild the System table */
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (adpt_i2o_online_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- goto rebuild_sys_tab;
- }
- }
-
- /* Active IOPs now in OPERATIONAL state */
- PDEBUG("HBA's in OPERATIONAL state\n");
-
- printk("dpti: If you have a lot of devices this could take a few minutes.\n");
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
- if (adpt_i2o_lct_get(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
-
- if (adpt_i2o_parse_lct(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- adpt_inquiry(pHba);
- }
-
- adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
- if (IS_ERR(adpt_sysfs_class)) {
- printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
- adpt_sysfs_class = NULL;
- }
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- if (adpt_scsi_host_alloc(pHba, sht) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- pHba->initialized = TRUE;
- pHba->state &= ~DPTI_STATE_RESET;
- if (adpt_sysfs_class) {
- struct device *dev = device_create(adpt_sysfs_class,
- NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
- "dpti%d", pHba->unit);
- if (IS_ERR(dev)) {
- printk(KERN_WARNING"dpti%d: unable to "
- "create device in dpt_i2o class\n",
- pHba->unit);
- }
- }
- }
-
- // Register our control device node
- // nodes will need to be created in /dev to access this
- // the nodes can not be created from within the driver
- if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
- return hba_count;
-}
-
-
-static void adpt_release(adpt_hba *pHba)
-{
- struct Scsi_Host *shost = pHba->host;
-
- scsi_remove_host(shost);
-// adpt_i2o_quiesce_hba(pHba);
- adpt_i2o_delete_hba(pHba);
- scsi_host_put(shost);
-}
-
-
-static void adpt_inquiry(adpt_hba* pHba)
-{
- u32 msg[17];
- u32 *mptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- u32 len;
- u32 reqlen;
- u8* buf;
- dma_addr_t addr;
- u8 scb[16];
- s32 rcode;
-
- memset(msg, 0, sizeof(msg));
- buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
- if(!buf){
- printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
- return;
- }
- memset((void*)buf, 0, 36);
-
- len = 36;
- direction = 0x00000000;
- scsidir =0x40000000; // DATA IN (iop<--dev)
-
- if (dpt_dma64(pHba))
- reqlen = 17; // SINGLE SGE, 64 bit
- else
- reqlen = 14; // SINGLE SGE, 32 bit
- /* Stick the headers on */
- msg[0] = reqlen<<16 | SGL_OFFSET_12;
- msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
- msg[2] = 0;
- msg[3] = 0;
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
- msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
-
- mptr=msg+7;
-
- memset(scb, 0, sizeof(scb));
- // Write SCSI command into the message - always 16 byte block
- scb[0] = INQUIRY;
- scb[1] = 0;
- scb[2] = 0;
- scb[3] = 0;
- scb[4] = 36;
- scb[5] = 0;
- // Don't care about the rest of scb
-
- memcpy(mptr, scb, sizeof(scb));
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
-
- /* Now fill in the SGList and command */
- *lenptr = len;
- if (dpt_dma64(pHba)) {
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = dma_low(addr);
- *mptr++ = dma_high(addr);
- } else {
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = addr;
- }
-
- // Send it on it's way
- rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
- if (rcode != 0) {
- sprintf(pHba->detail, "Adaptec I2O RAID");
- printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
- if (rcode != -ETIME && rcode != -EINTR)
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- } else {
- memset(pHba->detail, 0, sizeof(pHba->detail));
- memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
- memcpy(&(pHba->detail[16]), " Model: ", 8);
- memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
- memcpy(&(pHba->detail[40]), " FW: ", 4);
- memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
- pHba->detail[48] = '\0'; /* precautionary */
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- }
- adpt_i2o_status_get(pHba);
- return ;
-}
-
-
-static int adpt_slave_configure(struct scsi_device * device)
-{
- struct Scsi_Host *host = device->host;
-
- if (host->can_queue && device->tagged_supported) {
- scsi_change_queue_depth(device,
- host->can_queue - 1);
- }
- return 0;
-}
-
-static int adpt_queue_lck(struct scsi_cmnd *cmd)
-{
- adpt_hba* pHba = NULL;
- struct adpt_device* pDev = NULL; /* dpt per device information */
-
- /*
- * SCSI REQUEST_SENSE commands will be executed automatically by the
- * Host Adapter for any errors, so they should not be executed
- * explicitly unless the Sense Data is zero indicating that no error
- * occurred.
- */
-
- if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
- cmd->result = (DID_OK << 16);
- scsi_done(cmd);
- return 0;
- }
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- if (!pHba) {
- return FAILED;
- }
-
- rmb();
- if ((pHba->state) & DPTI_STATE_RESET)
- return SCSI_MLQUEUE_HOST_BUSY;
-
- // TODO if the cmd->device if offline then I may need to issue a bus rescan
- // followed by a get_lct to see if the device is there anymore
- if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
- /*
- * First command request for this device. Set up a pointer
- * to the device structure. This should be a TEST_UNIT_READY
- * command from scan_scsis_single.
- */
- if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
- // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
- // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
- cmd->result = (DID_NO_CONNECT << 16);
- scsi_done(cmd);
- return 0;
- }
- cmd->device->hostdata = pDev;
- }
- pDev->pScsi_dev = cmd->device;
-
- /*
- * If we are being called from when the device is being reset,
- * delay processing of the command until later.
- */
- if (pDev->state & DPTI_DEV_RESET ) {
- return FAILED;
- }
- return adpt_scsi_to_i2o(pHba, cmd, pDev);
-}
-
-static DEF_SCSI_QCMD(adpt_queue)
-
-static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
- sector_t capacity, int geom[])
-{
- int heads=-1;
- int sectors=-1;
- int cylinders=-1;
-
- // *** First lets set the default geometry ****
-
- // If the capacity is less than ox2000
- if (capacity < 0x2000 ) { // floppy
- heads = 18;
- sectors = 2;
- }
- // else if between 0x2000 and 0x20000
- else if (capacity < 0x20000) {
- heads = 64;
- sectors = 32;
- }
- // else if between 0x20000 and 0x40000
- else if (capacity < 0x40000) {
- heads = 65;
- sectors = 63;
- }
- // else if between 0x4000 and 0x80000
- else if (capacity < 0x80000) {
- heads = 128;
- sectors = 63;
- }
- // else if greater than 0x80000
- else {
- heads = 255;
- sectors = 63;
- }
- cylinders = sector_div(capacity, heads * sectors);
-
- // Special case if CDROM
- if(sdev->type == 5) { // CDROM
- heads = 252;
- sectors = 63;
- cylinders = 1111;
- }
-
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
-
- PDEBUG("adpt_bios_param: exit\n");
- return 0;
-}
-
-
-static const char *adpt_info(struct Scsi_Host *host)
-{
- adpt_hba* pHba;
-
- pHba = (adpt_hba *) host->hostdata[0];
- return (char *) (pHba->detail);
-}
-
-static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
-{
- struct adpt_device* d;
- int id;
- int chan;
- adpt_hba* pHba;
- int unit;
-
- // Find HBA (host bus adapter) we are looking for
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->host == host) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return 0;
- }
- host = pHba->host;
-
- seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
- seq_printf(m, "%s\n", pHba->detail);
- seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
- pHba->host->host_no, pHba->name, host->irq);
- seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
- host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
-
- seq_puts(m, "Devices:\n");
- for(chan = 0; chan < MAX_CHANNEL; chan++) {
- for(id = 0; id < MAX_ID; id++) {
- d = pHba->channel[chan].device[id];
- while(d) {
- seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
- seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
-
- unit = d->pI2o_dev->lct_data.tid;
- seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
- unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
- scsi_device_online(d->pScsi_dev)? "online":"offline");
- d = d->next_lun;
- }
- }
- }
- return 0;
-}
-
-/*
- * Turn a pointer to ioctl reply data into an u32 'context'
- */
-static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
-{
-#if BITS_PER_LONG == 32
- return (u32)(unsigned long)reply;
-#else
- ulong flags = 0;
- u32 nr, i;
-
- spin_lock_irqsave(pHba->host->host_lock, flags);
- nr = ARRAY_SIZE(pHba->ioctl_reply_context);
- for (i = 0; i < nr; i++) {
- if (pHba->ioctl_reply_context[i] == NULL) {
- pHba->ioctl_reply_context[i] = reply;
- break;
- }
- }
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- if (i >= nr) {
- printk(KERN_WARNING"%s: Too many outstanding "
- "ioctl commands\n", pHba->name);
- return (u32)-1;
- }
-
- return i;
-#endif
-}
-
-/*
- * Go from an u32 'context' to a pointer to ioctl reply data.
- */
-static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
-{
-#if BITS_PER_LONG == 32
- return (void *)(unsigned long)context;
-#else
- void *p = pHba->ioctl_reply_context[context];
- pHba->ioctl_reply_context[context] = NULL;
-
- return p;
-#endif
-}
-
-/*===========================================================================
- * Error Handling routines
- *===========================================================================
- */
-
-static int adpt_abort(struct scsi_cmnd * cmd)
-{
- adpt_hba* pHba = NULL; /* host bus adapter structure */
- struct adpt_device* dptdevice; /* dpt per device information */
- u32 msg[5];
- int rcode;
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
- if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
- printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
- return FAILED;
- }
-
- memset(msg, 0, sizeof(msg));
- msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
- msg[2] = 0;
- msg[3]= 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
- return SUCCESS;
-}
-
-
-#define I2O_DEVICE_RESET 0x27
-// This is the same for BLK and SCSI devices
-// NOTE this is wrong in the i2o.h definitions
-// This is not currently supported by our adapter but we issue it anyway
-static int adpt_device_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
- int old_state;
- struct adpt_device* d = cmd->device->hostdata;
-
- pHba = (void*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
- if (!d) {
- printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
- return FAILED;
- }
- memset(msg, 0, sizeof(msg));
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
- msg[2] = 0;
- msg[3] = 0;
-
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- old_state = d->state;
- d->state |= DPTI_DEV_RESET;
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- d->state = old_state;
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
- return SUCCESS;
- }
-}
-
-
-#define I2O_HBA_BUS_RESET 0x87
-// This version of bus reset is called by the eh_error handler
-static int adpt_bus_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- memset(msg, 0, sizeof(msg));
- printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
- msg[2] = 0;
- msg[3] = 0;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
- return SUCCESS;
- }
-}
-
-// This version of reset is called by the eh_error_handler
-static int __adpt_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- int rcode;
- char name[32];
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- strncpy(name, pHba->name, sizeof(name));
- printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
- rcode = adpt_hba_reset(pHba);
- if(rcode == 0){
- printk(KERN_WARNING"%s: HBA reset complete\n", name);
- return SUCCESS;
- } else {
- printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
- return FAILED;
- }
-}
-
-static int adpt_reset(struct scsi_cmnd* cmd)
-{
- int rc;
-
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __adpt_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
-
- return rc;
-}
-
-// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
-static int adpt_hba_reset(adpt_hba* pHba)
-{
- int rcode;
-
- pHba->state |= DPTI_STATE_RESET;
-
- // Activate does get status , init outbound, and get hrt
- if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
- printk(KERN_ERR "%s: Could not activate\n", pHba->name);
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_build_sys_table()) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in HOLD state\n",pHba->name);
-
- if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
-
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- pHba->state &= ~DPTI_STATE_RESET;
-
- scsi_host_complete_all_commands(pHba->host, DID_RESET);
- return 0; /* return success */
-}
-
-/*===========================================================================
- *
- *===========================================================================
- */
-
-
-static void adpt_i2o_sys_shutdown(void)
-{
- adpt_hba *pHba, *pNext;
- struct adpt_i2o_post_wait_data *p1, *old;
-
- printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
- printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
- /* Delete all IOPs from the controller chain */
- /* They should have already been released by the
- * scsi-core
- */
- for (pHba = hba_chain; pHba; pHba = pNext) {
- pNext = pHba->next;
- adpt_i2o_delete_hba(pHba);
- }
-
- /* Remove any timedout entries from the wait queue. */
-// spin_lock_irqsave(&adpt_post_wait_lock, flags);
- /* Nothing should be outstanding at this point so just
- * free them
- */
- for(p1 = adpt_post_wait_queue; p1;) {
- old = p1;
- p1 = p1->next;
- kfree(old);
- }
-// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
- adpt_post_wait_queue = NULL;
-
- printk(KERN_INFO "Adaptec I2O controllers down.\n");
-}
-
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
-{
-
- adpt_hba* pHba = NULL;
- adpt_hba* p = NULL;
- ulong base_addr0_phys = 0;
- ulong base_addr1_phys = 0;
- u32 hba_map0_area_size = 0;
- u32 hba_map1_area_size = 0;
- void __iomem *base_addr_virt = NULL;
- void __iomem *msg_addr_virt = NULL;
- int dma64 = 0;
-
- int raptorFlag = FALSE;
-
- if(pci_enable_device(pDev)) {
- return -EINVAL;
- }
-
- if (pci_request_regions(pDev, "dpt_i2o")) {
- PERROR("dpti: adpt_config_hba: pci request region failed\n");
- return -EINVAL;
- }
-
- pci_set_master(pDev);
-
- /*
- * See if we should enable dma64 mode.
- */
- if (sizeof(dma_addr_t) > 4 &&
- dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
- dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
- dma64 = 1;
-
- if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
- return -EINVAL;
-
- /* adapter only supports message blocks below 4GB */
- dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
-
- base_addr0_phys = pci_resource_start(pDev,0);
- hba_map0_area_size = pci_resource_len(pDev,0);
-
- // Check if standard PCI card or single BAR Raptor
- if(pDev->device == PCI_DPT_DEVICE_ID){
- if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
- // Raptor card with this device id needs 4M
- hba_map0_area_size = 0x400000;
- } else { // Not Raptor - it is a PCI card
- if(hba_map0_area_size > 0x100000 ){
- hba_map0_area_size = 0x100000;
- }
- }
- } else {// Raptor split BAR config
- // Use BAR1 in this configuration
- base_addr1_phys = pci_resource_start(pDev,1);
- hba_map1_area_size = pci_resource_len(pDev,1);
- raptorFlag = TRUE;
- }
-
-#if BITS_PER_LONG == 64
- /*
- * The original Adaptec 64 bit driver has this comment here:
- * "x86_64 machines need more optimal mappings"
- *
- * I assume some HBAs report ridiculously large mappings
- * and we need to limit them on platforms with IOMMUs.
- */
- if (raptorFlag == TRUE) {
- if (hba_map0_area_size > 128)
- hba_map0_area_size = 128;
- if (hba_map1_area_size > 524288)
- hba_map1_area_size = 524288;
- } else {
- if (hba_map0_area_size > 524288)
- hba_map0_area_size = 524288;
- }
-#endif
-
- base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
- if (!base_addr_virt) {
- pci_release_regions(pDev);
- PERROR("dpti: adpt_config_hba: io remap failed\n");
- return -EINVAL;
- }
-
- if(raptorFlag == TRUE) {
- msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
- if (!msg_addr_virt) {
- PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -EINVAL;
- }
- } else {
- msg_addr_virt = base_addr_virt;
- }
-
- // Allocate and zero the data structure
- pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
- if (!pHba) {
- if (msg_addr_virt != base_addr_virt)
- iounmap(msg_addr_virt);
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -ENOMEM;
- }
-
- mutex_lock(&adpt_configuration_lock);
-
- if(hba_chain != NULL){
- for(p = hba_chain; p->next; p = p->next);
- p->next = pHba;
- } else {
- hba_chain = pHba;
- }
- pHba->next = NULL;
- pHba->unit = hba_count;
- sprintf(pHba->name, "dpti%d", hba_count);
- hba_count++;
-
- mutex_unlock(&adpt_configuration_lock);
-
- pHba->pDev = pDev;
- pHba->base_addr_phys = base_addr0_phys;
-
- // Set up the Virtual Base Address of the I2O Device
- pHba->base_addr_virt = base_addr_virt;
- pHba->msg_addr_virt = msg_addr_virt;
- pHba->irq_mask = base_addr_virt+0x30;
- pHba->post_port = base_addr_virt+0x40;
- pHba->reply_port = base_addr_virt+0x44;
-
- pHba->hrt = NULL;
- pHba->lct = NULL;
- pHba->lct_size = 0;
- pHba->status_block = NULL;
- pHba->post_count = 0;
- pHba->state = DPTI_STATE_RESET;
- pHba->pDev = pDev;
- pHba->devices = NULL;
- pHba->dma64 = dma64;
-
- // Initializing the spinlocks
- spin_lock_init(&pHba->state_lock);
-
- if(raptorFlag == 0){
- printk(KERN_INFO "Adaptec I2O RAID controller"
- " %d at %p size=%x irq=%d%s\n",
- hba_count-1, base_addr_virt,
- hba_map0_area_size, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- } else {
- printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
- hba_count-1, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
- printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
- }
-
- if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
- printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
- adpt_i2o_delete_hba(pHba);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static void adpt_i2o_delete_hba(adpt_hba* pHba)
-{
- adpt_hba* p1;
- adpt_hba* p2;
- struct i2o_device* d;
- struct i2o_device* next;
- int i;
- int j;
- struct adpt_device* pDev;
- struct adpt_device* pNext;
-
-
- mutex_lock(&adpt_configuration_lock);
- if(pHba->host){
- free_irq(pHba->host->irq, pHba);
- }
- p2 = NULL;
- for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
- if(p1 == pHba) {
- if(p2) {
- p2->next = p1->next;
- } else {
- hba_chain = p1->next;
- }
- break;
- }
- }
-
- hba_count--;
- mutex_unlock(&adpt_configuration_lock);
-
- iounmap(pHba->base_addr_virt);
- pci_release_regions(pHba->pDev);
- if(pHba->msg_addr_virt != pHba->base_addr_virt){
- iounmap(pHba->msg_addr_virt);
- }
- if(pHba->FwDebugBuffer_P)
- iounmap(pHba->FwDebugBuffer_P);
- if(pHba->hrt) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
- pHba->hrt, pHba->hrt_pa);
- }
- if(pHba->lct) {
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- }
- if(pHba->status_block) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
- pHba->status_block, pHba->status_block_pa);
- }
- if(pHba->reply_pool) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- for(d = pHba->devices; d ; d = next){
- next = d->next;
- kfree(d);
- }
- for(i = 0 ; i < pHba->top_scsi_channel ; i++){
- for(j = 0; j < MAX_ID; j++){
- if(pHba->channel[i].device[j] != NULL){
- for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
- pNext = pDev->next_lun;
- kfree(pDev);
- }
- }
- }
- }
- pci_dev_put(pHba->pDev);
- if (adpt_sysfs_class)
- device_destroy(adpt_sysfs_class,
- MKDEV(DPTI_I2O_MAJOR, pHba->unit));
- kfree(pHba);
-
- if(hba_count <= 0){
- unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
- if (adpt_sysfs_class) {
- class_destroy(adpt_sysfs_class);
- adpt_sysfs_class = NULL;
- }
- }
-}
-
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
-{
- struct adpt_device* d;
-
- if (chan >= MAX_CHANNEL)
- return NULL;
-
- d = pHba->channel[chan].device[id];
- if(!d || d->tid == 0) {
- return NULL;
- }
-
- /* If it is the only lun at that address then this should match*/
- if(d->scsi_lun == lun){
- return d;
- }
-
- /* else we need to look through all the luns */
- for(d=d->next_lun ; d ; d = d->next_lun){
- if(d->scsi_lun == lun){
- return d;
- }
- }
- return NULL;
-}
-
-
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
-{
- // I used my own version of the WAIT_QUEUE_HEAD
- // to handle some version differences
- // When embedded in the kernel this could go back to the vanilla one
- ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
- int status = 0;
- ulong flags = 0;
- struct adpt_i2o_post_wait_data *p1, *p2;
- struct adpt_i2o_post_wait_data *wait_data =
- kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
- DECLARE_WAITQUEUE(wait, current);
-
- if (!wait_data)
- return -ENOMEM;
-
- /*
- * The spin locking is needed to keep anyone from playing
- * with the queue pointers and id while we do the same
- */
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- // TODO we need a MORE unique way of getting ids
- // to support async LCT get
- wait_data->next = adpt_post_wait_queue;
- adpt_post_wait_queue = wait_data;
- adpt_post_wait_id++;
- adpt_post_wait_id &= 0x7fff;
- wait_data->id = adpt_post_wait_id;
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- wait_data->wq = &adpt_wq_i2o_post;
- wait_data->status = -ETIMEDOUT;
-
- add_wait_queue(&adpt_wq_i2o_post, &wait);
-
- msg[2] |= 0x80000000 | ((u32)wait_data->id);
- timeout *= HZ;
- if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
- set_current_state(TASK_INTERRUPTIBLE);
- if(pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (!timeout)
- schedule();
- else{
- timeout = schedule_timeout(timeout);
- if (timeout == 0) {
- // I/O issued, but cannot get result in
- // specified time. Freeing resorces is
- // dangerous.
- status = -ETIME;
- }
- }
- if(pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- }
- remove_wait_queue(&adpt_wq_i2o_post, &wait);
-
- if(status == -ETIMEDOUT){
- printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
- // We will have to free the wait_data memory during shutdown
- return status;
- }
-
- /* Remove the entry from the queue. */
- p2 = NULL;
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
- if(p1 == wait_data) {
- if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
- status = -EOPNOTSUPP;
- }
- if(p2) {
- p2->next = p1->next;
- } else {
- adpt_post_wait_queue = p1->next;
- }
- break;
- }
- }
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- kfree(wait_data);
-
- return status;
-}
-
-
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
-{
-
- u32 m = EMPTY_QUEUE;
- u32 __iomem *msg;
- ulong timeout = jiffies + 30*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg = pHba->msg_addr_virt + m;
- memcpy_toio(msg, data, len);
- wmb();
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- return 0;
-}
-
-
-static void adpt_i2o_post_wait_complete(u32 context, int status)
-{
- struct adpt_i2o_post_wait_data *p1 = NULL;
- /*
- * We need to search through the adpt_post_wait
- * queue to see if the given message is still
- * outstanding. If not, it means that the IOP
- * took longer to respond to the message than we
- * had allowed and timer has already expired.
- * Not much we can do about that except log
- * it for debug purposes, increase timeout, and recompile
- *
- * Lock needed to keep anyone from moving queue pointers
- * around while we're looking through them.
- */
-
- context &= 0x7fff;
-
- spin_lock(&adpt_post_wait_lock);
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- if(p1->id == context) {
- p1->status = status;
- spin_unlock(&adpt_post_wait_lock);
- wake_up_interruptible(p1->wq);
- return;
- }
- }
- spin_unlock(&adpt_post_wait_lock);
- // If this happens we lose commands that probably really completed
- printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
- printk(KERN_DEBUG" Tasks in wait queue:\n");
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- printk(KERN_DEBUG" %d\n",p1->id);
- }
- return;
-}
-
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
-{
- u32 msg[8];
- u8* status;
- dma_addr_t addr;
- u32 m = EMPTY_QUEUE ;
- ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
-
- if(pHba->initialized == FALSE) { // First time reset should be quick
- timeout = jiffies + (25*HZ);
- } else {
- adpt_i2o_quiesce_hba(pHba);
- }
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"Timeout waiting for message!\n");
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if(status == NULL) {
- adpt_send_nop(pHba, m);
- printk(KERN_ERR"IOP reset failed - no free memory.\n");
- return -ENOMEM;
- }
-
- msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]=0;
- msg[3]=0;
- msg[4]=0;
- msg[5]=0;
- msg[6]=dma_low(addr);
- msg[7]=dma_high(addr);
-
- memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
- wmb();
- writel(m, pHba->post_port);
- wmb();
-
- while(*status == 0){
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we cannot
- free these because controller may awake and corrupt
- those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
- PDEBUG("%s: Reset in progress...\n", pHba->name);
- // Here we wait for message frame to become available
- // indicated that reset has finished
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
- // Flush the offset
- adpt_send_nop(pHba, m);
- }
- adpt_i2o_status_get(pHba);
- if(*status == 0x02 ||
- pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
- pHba->name);
- } else {
- PDEBUG("%s: Reset completed.\n", pHba->name);
- }
-
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-#ifdef UARTDELAY
- // This delay is to allow someone attached to the card through the debug UART to
- // set up the dump levels that they want before the rest of the initialization sequence
- adpt_delay(20000);
-#endif
- return 0;
-}
-
-
-static int adpt_i2o_parse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // larger than 7, or 8 ...
- struct adpt_device* pDev;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- /*
- * If we have hidden devices, we need to inform the upper layers about
- * the possible maximum id reference to handle device access when
- * an array is disassembled. This code has no other purpose but to
- * allow us future access to devices that are currently hidden
- * behind arrays, hotspares or have not been configured (JBOD mode).
- */
- if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
- lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
- lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- continue;
- }
- tid = lct->lct_entry[i].tid;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- continue;
- }
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if (scsi_id >= MAX_ID){
- printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- }
- d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
- if(d==NULL)
- {
- printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- tid = d->lct_data.tid;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
- }
- bus_no = 0;
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
- tid = d->lct_data.tid;
- // TODO get the bus_no from hrt-but for now they are in order
- //bus_no =
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- pHba->channel[bus_no].type = d->lct_data.class_id;
- pHba->channel[bus_no].tid = tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
- {
- pHba->channel[bus_no].scsi_id = buf[1];
- PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
- }
- // TODO remove - this is just until we get from hrt
- bus_no++;
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
- break;
- }
- }
- }
-
- // Setup adpt_device table
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
-
- tid = d->lct_data.tid;
- scsi_id = -1;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- continue;
- }
- if (scsi_id >= MAX_ID) {
- continue;
- }
- if( pHba->channel[bus_no].device[scsi_id] == NULL){
- pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- for( pDev = pHba->channel[bus_no].device[scsi_id];
- pDev->next_lun; pDev = pDev->next_lun){
- }
- pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev->next_lun == NULL) {
- return -ENOMEM;
- }
- pDev = pDev->next_lun;
- }
- pDev->tid = tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- }
- if(scsi_id == -1){
- printk(KERN_WARNING"Could not find SCSI ID for %s\n",
- d->lct_data.identity_tag);
- }
- }
- }
- return 0;
-}
-
-
-/*
- * Each I2O controller has a chain of devices on it - these match
- * the useful parts of the LCT of the board.
- */
-
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
-{
- mutex_lock(&adpt_configuration_lock);
- d->controller=pHba;
- d->owner=NULL;
- d->next=pHba->devices;
- d->prev=NULL;
- if (pHba->devices != NULL){
- pHba->devices->prev=d;
- }
- pHba->devices=d;
- *d->dev_name = 0;
-
- mutex_unlock(&adpt_configuration_lock);
- return 0;
-}
-
-static int adpt_open(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- mutex_lock(&adpt_mutex);
- //TODO check for root access
- //
- minor = iminor(inode);
- if (minor >= hba_count) {
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- if (pHba == NULL) {
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
-
-// if(pHba->in_use){
- // mutex_unlock(&adpt_configuration_lock);
-// return -EBUSY;
-// }
-
- pHba->in_use = 1;
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
-
- return 0;
-}
-
-static int adpt_close(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- minor = iminor(inode);
- if (minor >= hba_count) {
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return -ENXIO;
- }
-
- pHba->in_use = 0;
-
- return 0;
-}
-
-
-static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
-{
- u32 msg[MAX_MESSAGE_SIZE];
- u32* reply = NULL;
- u32 size = 0;
- u32 reply_size = 0;
- u32 __user *user_msg = arg;
- u32 __user * user_reply = NULL;
- void **sg_list = NULL;
- u32 sg_offset = 0;
- u32 sg_count = 0;
- int sg_index = 0;
- u32 i = 0;
- u32 rcode = 0;
- void *p = NULL;
- dma_addr_t addr;
- ulong flags = 0;
-
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- return -EFAULT;
- }
- size = size>>16;
-
- user_reply = &user_msg[size];
- if(size > MAX_MESSAGE_SIZE){
- return -EFAULT;
- }
- size *= 4; // Convert to bytes
-
- /* Copy in the user's I2O command */
- if(copy_from_user(msg, user_msg, size)) {
- return -EFAULT;
- }
- get_user(reply_size, &user_reply[0]);
- reply_size = reply_size>>16;
- if(reply_size > REPLY_FRAME_SIZE){
- reply_size = REPLY_FRAME_SIZE;
- }
- reply_size *= 4;
- reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
- if(reply == NULL) {
- printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
- return -ENOMEM;
- }
- sg_offset = (msg[0]>>4)&0xf;
- msg[2] = 0x40000000; // IOCTL context
- msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1) {
- rcode = -EBUSY;
- goto free;
- }
-
- sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
- if (!sg_list) {
- rcode = -ENOMEM;
- goto free;
- }
- if(sg_offset) {
- // TODO add 64 bit API
- struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
- if (sg_count > pHba->sg_tablesize){
- printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
- rcode = -EINVAL;
- goto free;
- }
-
- for(i = 0; i < sg_count; i++) {
- int sg_size;
-
- if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
- printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
- rcode = -EINVAL;
- goto cleanup;
- }
- sg_size = sg[i].flag_count & 0xffffff;
- /* Allocate memory for the transfer */
- p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
- if(!p) {
- printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- pHba->name,sg_size,i,sg_count);
- rcode = -ENOMEM;
- goto cleanup;
- }
- sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
- /* Copy in the user's SG buffer if necessary */
- if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
- // sg_simple_element API is 32 bit
- if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
- printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- /* sg_simple_element API is 32 bit, but addr < 4GB */
- sg[i].addr_bus = addr;
- }
- }
-
- do {
- /*
- * Stop any new commands from enterring the
- * controller while processing the ioctl
- */
- if (pHba->host) {
- scsi_block_requests(pHba->host);
- spin_lock_irqsave(pHba->host->host_lock, flags);
- }
- rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
- if (rcode != 0)
- printk("adpt_i2o_passthru: post wait failed %d %p\n",
- rcode, reply);
- if (pHba->host) {
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- scsi_unblock_requests(pHba->host);
- }
- } while (rcode == -ETIMEDOUT);
-
- if(rcode){
- goto cleanup;
- }
-
- if(sg_offset) {
- /* Copy back the Scatter Gather buffers back to user space */
- u32 j;
- // TODO add 64 bit API
- struct sg_simple_element* sg;
- int sg_size;
-
- // re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- rcode = -EFAULT;
- goto cleanup;
- }
- size = size>>16;
- size *= 4;
- if (size > MAX_MESSAGE_SIZE) {
- rcode = -EINVAL;
- goto cleanup;
- }
- /* Copy in the user's I2O command */
- if (copy_from_user (msg, user_msg, size)) {
- rcode = -EFAULT;
- goto cleanup;
- }
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
-
- // TODO add 64 bit API
- sg = (struct sg_simple_element*)(msg + sg_offset);
- for (j = 0; j < sg_count; j++) {
- /* Copy out the SG list to user's buffer if necessary */
- if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
- sg_size = sg[j].flag_count & 0xffffff;
- // sg_simple_element API is 32 bit
- if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- }
- }
-
- /* Copy back the reply to user space */
- if (reply_size) {
- // we wrote our own values for context - now restore the user supplied ones
- if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
- printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
- rcode = -EFAULT;
- }
- if(copy_to_user(user_reply, reply, reply_size)) {
- printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
- rcode = -EFAULT;
- }
- }
-
-
-cleanup:
- if (rcode != -ETIME && rcode != -EINTR) {
- struct sg_simple_element *sg =
- (struct sg_simple_element*) (msg +sg_offset);
- while(sg_index) {
- if(sg_list[--sg_index]) {
- dma_free_coherent(&pHba->pDev->dev,
- sg[sg_index].flag_count & 0xffffff,
- sg_list[sg_index],
- sg[sg_index].addr_bus);
- }
- }
- }
-
-free:
- kfree(sg_list);
- kfree(reply);
- return rcode;
-}
-
-#if defined __ia64__
-static void adpt_ia64_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_IA64;
-}
-#endif
-
-#if defined __sparc__
-static void adpt_sparc_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ULTRASPARC;
-}
-#endif
-#if defined __alpha__
-static void adpt_alpha_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ALPHA;
-}
-#endif
-
-#if defined __i386__
-
-#include <uapi/asm/vm86.h>
-
-static void adpt_i386_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- switch (boot_cpu_data.x86) {
- case CPU_386:
- si->processorType = PROC_386;
- break;
- case CPU_486:
- si->processorType = PROC_486;
- break;
- case CPU_586:
- si->processorType = PROC_PENTIUM;
- break;
- default: // Just in case
- si->processorType = PROC_PENTIUM;
- break;
- }
-}
-#endif
-
-/*
- * This routine returns information about the system. This does not effect
- * any logic and if the info is wrong - it doesn't matter.
- */
-
-/* Get all the info we can not get from kernel services */
-static int adpt_system_info(void __user *buffer)
-{
- sysInfo_S si;
-
- memset(&si, 0, sizeof(si));
-
- si.osType = OS_LINUX;
- si.osMajorVersion = 0;
- si.osMinorVersion = 0;
- si.osRevision = 0;
- si.busType = SI_PCI_BUS;
- si.processorFamily = DPTI_sig.dsProcessorFamily;
-
-#if defined __i386__
- adpt_i386_info(&si);
-#elif defined (__ia64__)
- adpt_ia64_info(&si);
-#elif defined(__sparc__)
- adpt_sparc_info(&si);
-#elif defined (__alpha__)
- adpt_alpha_info(&si);
-#else
- si.processorType = 0xff ;
-#endif
- if (copy_to_user(buffer, &si, sizeof(si))){
- printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
-{
- int minor;
- int error = 0;
- adpt_hba* pHba;
- ulong flags = 0;
- void __user *argp = (void __user *)arg;
-
- minor = iminor(inode);
- if (minor >= DPTI_MAX_HBA){
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if(pHba == NULL){
- return -ENXIO;
- }
-
- while((volatile u32) pHba->state & DPTI_STATE_RESET )
- schedule_timeout_uninterruptible(2);
-
- switch (cmd) {
- // TODO: handle 3 cases
- case DPT_SIGNATURE:
- if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
- return -EFAULT;
- }
- break;
- case I2OUSRCMD:
- return adpt_i2o_passthru(pHba, argp);
-
- case DPT_CTRLINFO:{
- drvrHBAinfo_S HbaInfo;
-
-#define FLG_OSD_PCI_VALID 0x0001
-#define FLG_OSD_DMA 0x0002
-#define FLG_OSD_I2O 0x0004
- memset(&HbaInfo, 0, sizeof(HbaInfo));
- HbaInfo.drvrHBAnum = pHba->unit;
- HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
- HbaInfo.blinkState = adpt_read_blink_led(pHba);
- HbaInfo.pciBusNum = pHba->pDev->bus->number;
- HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
- HbaInfo.Interrupt = pHba->pDev->irq;
- HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
- if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
- printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
- return -EFAULT;
- }
- break;
- }
- case DPT_SYSINFO:
- return adpt_system_info(argp);
- case DPT_BLINKLED:{
- u32 value;
- value = (u32)adpt_read_blink_led(pHba);
- if (copy_to_user(argp, &value, sizeof(value))) {
- return -EFAULT;
- }
- break;
- }
- case I2ORESETCMD: {
- struct Scsi_Host *shost = pHba->host;
-
- if (shost)
- spin_lock_irqsave(shost->host_lock, flags);
- adpt_hba_reset(pHba);
- if (shost)
- spin_unlock_irqrestore(shost->host_lock, flags);
- break;
- }
- case I2ORESCANCMD:
- adpt_rescan(pHba);
- break;
- default:
- return -EINVAL;
- }
-
- return error;
-}
-
-static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
- ret = adpt_ioctl(inode, file, cmd, arg);
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
-
- switch(cmd) {
- case DPT_SIGNATURE:
- case I2OUSRCMD:
- case DPT_CTRLINFO:
- case DPT_SYSINFO:
- case DPT_BLINKLED:
- case I2ORESETCMD:
- case I2ORESCANCMD:
- case (DPT_TARGET_BUSY & 0xFFFF):
- case DPT_TARGET_BUSY:
- ret = adpt_ioctl(inode, file, cmd, arg);
- break;
- default:
- ret = -ENOIOCTLCMD;
- }
-
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-#endif
-
-static irqreturn_t adpt_isr(int irq, void *dev_id)
-{
- struct scsi_cmnd* cmd;
- adpt_hba* pHba = dev_id;
- u32 m;
- void __iomem *reply;
- u32 status=0;
- u32 context;
- ulong flags = 0;
- int handled = 0;
-
- if (pHba == NULL){
- printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
- return IRQ_NONE;
- }
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
-
- while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // Try twice then give up
- rmb();
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // This really should not happen
- printk(KERN_ERR"dpti: Could not get reply frame\n");
- goto out;
- }
- }
- if (pHba->reply_pool_pa <= m &&
- m < pHba->reply_pool_pa +
- (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
- reply = (u8 *)pHba->reply_pool +
- (m - pHba->reply_pool_pa);
- } else {
- /* Ick, we should *never* be here */
- printk(KERN_ERR "dpti: reply frame not from pool\n");
- reply = (u8 *)bus_to_virt(m);
- }
-
- if (readl(reply) & MSG_FAIL) {
- u32 old_m = readl(reply+28);
- void __iomem *msg;
- u32 old_context;
- PDEBUG("%s: Failed message\n",pHba->name);
- if(old_m >= 0x100000){
- printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
- writel(m,pHba->reply_port);
- continue;
- }
- // Transaction context is 0 in failed reply frame
- msg = pHba->msg_addr_virt + old_m;
- old_context = readl(msg+12);
- writel(old_context, reply+12);
- adpt_send_nop(pHba, old_m);
- }
- context = readl(reply+8);
- if(context & 0x40000000){ // IOCTL
- void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
- if( p != NULL) {
- memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
- }
- // All IOCTLs will also be post wait
- }
- if(context & 0x80000000){ // Post wait message
- status = readl(reply+16);
- if(status >> 24){
- status &= 0xffff; /* Get detail status */
- } else {
- status = I2O_POST_WAIT_OK;
- }
- if(!(context & 0x40000000)) {
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL) {
- printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
- }
- }
- adpt_i2o_post_wait_complete(context, status);
- } else { // SCSI message
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL){
- scsi_dma_unmap(cmd);
- adpt_i2o_scsi_complete(reply, cmd);
- }
- }
- writel(m, pHba->reply_port);
- wmb();
- rmb();
- }
- handled = 1;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return IRQ_RETVAL(handled);
-}
-
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
-{
- int i;
- u32 msg[MAX_MESSAGE_SIZE];
- u32* mptr;
- u32* lptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- int nseg;
- u32 len;
- u32 reqlen;
- s32 rcode;
- dma_addr_t addr;
-
- memset(msg, 0 , sizeof(msg));
- len = scsi_bufflen(cmd);
- direction = 0x00000000;
-
- scsidir = 0x00000000; // DATA NO XFER
- if(len) {
- /*
- * Set SCBFlags to indicate if data is being transferred
- * in or out, or no data transfer
- * Note: Do not have to verify index is less than 0 since
- * cmd->cmnd[0] is an unsigned char
- */
- switch(cmd->sc_data_direction){
- case DMA_FROM_DEVICE:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- break;
- case DMA_TO_DEVICE:
- direction=0x04000000; // SGL OUT
- scsidir =0x80000000; // DATA OUT (iop-->dev)
- break;
- case DMA_NONE:
- break;
- case DMA_BIDIRECTIONAL:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- // Assume In - and continue;
- break;
- default:
- printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
- pHba->name, cmd->cmnd[0]);
- cmd->result = (DID_ERROR <<16);
- scsi_done(cmd);
- return 0;
- }
- }
- // msg[0] is set later
- // I2O_CMD_SCSI_EXEC
- msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
- msg[2] = 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
- // Our cards use the transaction context as the tag for queueing
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
- msg[5] = d->tid;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000|cmd->cmd_len;
-
- mptr=msg+7;
-
- // Write SCSI command into the message - always 16 byte block
- memset(mptr, 0, 16);
- memcpy(mptr, cmd->cmnd, cmd->cmd_len);
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
- if (dpt_dma64(pHba)) {
- reqlen = 16; // SINGLE SGE
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- } else {
- reqlen = 14; // SINGLE SGE
- }
- /* Now fill in the SGList and command */
-
- nseg = scsi_dma_map(cmd);
- BUG_ON(nseg < 0);
- if (nseg) {
- struct scatterlist *sg;
-
- len = 0;
- scsi_for_each_sg(cmd, sg, nseg, i) {
- lptr = mptr;
- *mptr++ = direction|0x10000000|sg_dma_len(sg);
- len+=sg_dma_len(sg);
- addr = sg_dma_address(sg);
- *mptr++ = dma_low(addr);
- if (dpt_dma64(pHba))
- *mptr++ = dma_high(addr);
- /* Make this an end of list */
- if (i == nseg - 1)
- *lptr = direction|0xD0000000|sg_dma_len(sg);
- }
- reqlen = mptr - msg;
- *lenptr = len;
-
- if(cmd->underflow && len != cmd->underflow){
- printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
- len, cmd->underflow);
- }
- } else {
- *lenptr = len = 0;
- reqlen = 12;
- }
-
- /* Stick the headers on */
- msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
-
- // Send it on it's way
- rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
- if (rcode == 0) {
- return 0;
- }
- return rcode;
-}
-
-
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
-{
- struct Scsi_Host *host;
-
- host = scsi_host_alloc(sht, sizeof(adpt_hba*));
- if (host == NULL) {
- printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
- return -1;
- }
- host->hostdata[0] = (unsigned long)pHba;
- pHba->host = host;
-
- host->irq = pHba->pDev->irq;
- /* no IO ports, so don't have to set host->io_port and
- * host->n_io_port
- */
- host->io_port = 0;
- host->n_io_port = 0;
- /* see comments in scsi_host.h */
- host->max_id = 16;
- host->max_lun = 256;
- host->max_channel = pHba->top_scsi_channel + 1;
- host->cmd_per_lun = 1;
- host->unique_id = (u32)sys_tbl_pa + pHba->unit;
- host->sg_tablesize = pHba->sg_tablesize;
- host->can_queue = pHba->post_fifo_size;
-
- return 0;
-}
-
-
-static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
-{
- adpt_hba* pHba;
- u32 hba_status;
- u32 dev_status;
- u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
- // I know this would look cleaner if I just read bytes
- // but the model I have been using for all the rest of the
- // io is in 4 byte words - so I keep that model
- u16 detailed_status = readl(reply+16) &0xffff;
- dev_status = (detailed_status & 0xff);
- hba_status = detailed_status >> 8;
-
- // calculate resid for sg
- scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
-
- cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
-
- if(!(reply_flags & MSG_FAIL)) {
- switch(detailed_status & I2O_SCSI_DSC_MASK) {
- case I2O_SCSI_DSC_SUCCESS:
- cmd->result = (DID_OK << 16);
- // handle underflow
- if (readl(reply+20) < cmd->underflow) {
- cmd->result = (DID_ERROR <<16);
- printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
- }
- break;
- case I2O_SCSI_DSC_REQUEST_ABORTED:
- cmd->result = (DID_ABORT << 16);
- break;
- case I2O_SCSI_DSC_PATH_INVALID:
- case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
- case I2O_SCSI_DSC_SELECTION_TIMEOUT:
- case I2O_SCSI_DSC_COMMAND_TIMEOUT:
- case I2O_SCSI_DSC_NO_ADAPTER:
- case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
- printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_TIME_OUT << 16);
- break;
- case I2O_SCSI_DSC_ADAPTER_BUSY:
- case I2O_SCSI_DSC_BUS_BUSY:
- cmd->result = (DID_BUS_BUSY << 16);
- break;
- case I2O_SCSI_DSC_SCSI_BUS_RESET:
- case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
- cmd->result = (DID_RESET << 16);
- break;
- case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
- printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
- cmd->result = (DID_PARITY << 16);
- break;
- case I2O_SCSI_DSC_UNABLE_TO_ABORT:
- case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
- case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
- case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_AUTOSENSE_FAILED:
- case I2O_SCSI_DSC_DATA_OVERRUN:
- case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
- case I2O_SCSI_DSC_SEQUENCE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
- case I2O_SCSI_DSC_PROVIDE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_TERMINATED:
- case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
- case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
- case I2O_SCSI_DSC_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_INVALID_CDB:
- case I2O_SCSI_DSC_LUN_INVALID:
- case I2O_SCSI_DSC_SCSI_TID_INVALID:
- case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
- case I2O_SCSI_DSC_NO_NEXUS:
- case I2O_SCSI_DSC_CDB_RECEIVED:
- case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
- case I2O_SCSI_DSC_QUEUE_FROZEN:
- case I2O_SCSI_DSC_REQUEST_INVALID:
- default:
- printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_ERROR << 16);
- break;
- }
-
- // copy over the request sense data if it was a check
- // condition status
- if (dev_status == SAM_STAT_CHECK_CONDITION) {
- u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
- // Copy over the sense data
- memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
- if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
- cmd->sense_buffer[2] == DATA_PROTECT ){
- /* This is to handle an array failed */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
-
- }
- }
- } else {
- /* In this condtion we could not talk to the tid
- * the card rejected it. We should signal a retry
- * for a limitted number of retries.
- */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
- }
-
- cmd->result |= (dev_status);
-
- scsi_done(cmd);
-}
-
-
-static s32 adpt_rescan(adpt_hba* pHba)
-{
- s32 rcode;
- ulong flags = 0;
-
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
- goto out;
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
- goto out;
- rcode = 0;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return rcode;
-}
-
-
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // at least 8 u32's
- struct adpt_device* pDev = NULL;
- struct i2o_device* pI2o_dev = NULL;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- // Mark each drive as unscanned
- for (d = pHba->devices; d; d = d->next) {
- pDev =(struct adpt_device*) d->owner;
- if(!pDev){
- continue;
- }
- pDev->state |= DPTI_DEV_UNSCANNED;
- }
-
- printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- continue;
- }
-
- if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- tid = lct->lct_entry[i].tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- printk(KERN_ERR"%s: Could not query device\n",pHba->name);
- continue;
- }
- bus_no = buf[0]>>16;
- if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
- printk(KERN_WARNING
- "%s: Channel number %d out of range\n",
- pHba->name, bus_no);
- continue;
- }
-
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- pDev = pHba->channel[bus_no].device[scsi_id];
- /* da lun */
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- break;
- }
- pDev = pDev->next_lun;
- }
- if(!pDev ) { // Something new add it
- d = kmalloc(sizeof(struct i2o_device),
- GFP_ATOMIC);
- if(d==NULL)
- {
- printk(KERN_CRIT "Out of memory for I2O device data.\n");
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
-
- pDev = pHba->channel[bus_no].device[scsi_id];
- if( pDev == NULL){
- pDev =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- while (pDev->next_lun) {
- pDev = pDev->next_lun;
- }
- pDev = pDev->next_lun =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- }
- pDev->tid = d->lct_data.tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- // Too late, SCSI system has made up it's mind, but what the hey ...
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- } // end of new i2o device
-
- // We found an old device - check it
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- if(!scsi_device_online(pDev->pScsi_dev)) {
- printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
- pHba->name,bus_no,scsi_id,scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
- }
- }
- d = pDev->pI2o_dev;
- if(d->lct_data.tid != tid) { // something changed
- pDev->tid = tid;
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
- if (pDev->pScsi_dev) {
- pDev->pScsi_dev->changed = TRUE;
- pDev->pScsi_dev->removable = TRUE;
- }
- }
- // Found it - mark it scanned
- pDev->state = DPTI_DEV_ONLINE;
- break;
- }
- pDev = pDev->next_lun;
- }
- }
- }
- for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
- pDev =(struct adpt_device*) pI2o_dev->owner;
- if(!pDev){
- continue;
- }
- // Drive offline drives that previously existed but could not be found
- // in the LCT table
- if (pDev->state & DPTI_DEV_UNSCANNED){
- pDev->state = DPTI_DEV_OFFLINE;
- printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
- }
- }
- }
- return 0;
-}
-
-/*============================================================================
- * Routines from i2o subsystem
- *============================================================================
- */
-
-
-
-/*
- * Bring an I2O controller into HOLD state. See the spec.
- */
-static int adpt_i2o_activate_hba(adpt_hba* pHba)
-{
- int rcode;
-
- if(pHba->initialized ) {
- if (adpt_i2o_status_get(pHba) < 0) {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
- if (adpt_i2o_status_get(pHba) < 0) {
- printk(KERN_INFO "HBA not responding.\n");
- return -1;
- }
- }
-
- if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
- printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
- return -1;
- }
-
- if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
- pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
- pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
- pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
- adpt_i2o_reset_hba(pHba);
- if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
- return -1;
- }
- }
- } else {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
-
- }
-
- if (adpt_i2o_init_outbound_q(pHba) < 0) {
- return -1;
- }
-
- /* In HOLD state */
-
- if (adpt_i2o_hrt_get(pHba) < 0) {
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Bring a controller online into OPERATIONAL state.
- */
-
-static int adpt_i2o_online_hba(adpt_hba* pHba)
-{
- if (adpt_i2o_systab_send(pHba) < 0)
- return -1;
- /* In READY state */
-
- if (adpt_i2o_enable_hba(pHba) < 0)
- return -1;
-
- /* In OPERATIONAL state */
- return 0;
-}
-
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
-{
- u32 __iomem *msg;
- ulong timeout = jiffies + 5*HZ;
-
- while(m == EMPTY_QUEUE){
- rmb();
- m = readl(pHba->post_port);
- if(m != EMPTY_QUEUE){
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
- return 2;
- }
- schedule_timeout_uninterruptible(1);
- }
- msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
- writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
- writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
- writel( 0,&msg[2]);
- wmb();
-
- writel(m, pHba->post_port);
- wmb();
- return 0;
-}
-
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
-{
- u8 *status;
- dma_addr_t addr;
- u32 __iomem *msg = NULL;
- int i;
- ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
- u32 m;
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
-
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if (!status) {
- adpt_send_nop(pHba, m);
- printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
- pHba->name);
- return -ENOMEM;
- }
-
- writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
- writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
- writel(0, &msg[2]);
- writel(0x0106, &msg[3]); /* Transaction context */
- writel(4096, &msg[4]); /* Host page frame size */
- writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
- writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
- writel((u32)addr, &msg[7]);
-
- writel(m, pHba->post_port);
- wmb();
-
- // Wait for the reply status to come back
- do {
- if (*status) {
- if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
- break;
- }
- }
- rmb();
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (1);
-
- // If the command was successful, fill the fifo with our reply
- // message packets
- if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
- return -2;
- }
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-
- if(pHba->reply_pool != NULL) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- &pHba->reply_pool_pa, GFP_KERNEL);
- if (!pHba->reply_pool) {
- printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
- return -ENOMEM;
- }
-
- for(i = 0; i < pHba->reply_fifo_size; i++) {
- writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
- pHba->reply_port);
- wmb();
- }
- adpt_i2o_status_get(pHba);
- return 0;
-}
-
-
-/*
- * I2O System Table. Contains information about
- * all the IOPs in the system. Used to inform IOPs
- * about each other's existence.
- *
- * sys_tbl_ver is the CurrentChangeIndicator that is
- * used by IOPs to track changes.
- */
-
-
-
-static s32 adpt_i2o_status_get(adpt_hba* pHba)
-{
- ulong timeout;
- u32 m;
- u32 __iomem *msg;
- u8 *status_block=NULL;
-
- if(pHba->status_block == NULL) {
- pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(i2o_status_block),
- &pHba->status_block_pa, GFP_KERNEL);
- if(pHba->status_block == NULL) {
- printk(KERN_ERR
- "dpti%d: Get Status Block failed; Out of memory. \n",
- pHba->unit);
- return -ENOMEM;
- }
- }
- memset(pHba->status_block, 0, sizeof(i2o_status_block));
- status_block = (u8*)(pHba->status_block);
- timeout = jiffies+TMOUT_GETSTATUS*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message !\n",
- pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m==EMPTY_QUEUE);
-
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
- writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
- writel(1, &msg[2]);
- writel(0, &msg[3]);
- writel(0, &msg[4]);
- writel(0, &msg[5]);
- writel( dma_low(pHba->status_block_pa), &msg[6]);
- writel( dma_high(pHba->status_block_pa), &msg[7]);
- writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- while(status_block[87]!=0xff){
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR"dpti%d: Get status timeout.\n",
- pHba->unit);
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- // Set up our number of outbound and inbound messages
- pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
- if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
- pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
- }
-
- pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
- if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
- pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
- }
-
- // Calculate the Scatter Gather list size
- if (dpt_dma64(pHba)) {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 14 * sizeof(u32))
- / (sizeof(struct sg_simple_element) + sizeof(u32)));
- } else {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 12 * sizeof(u32))
- / sizeof(struct sg_simple_element));
- }
- if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
- pHba->sg_tablesize = SG_LIST_ELEMENTS;
- }
-
-
-#ifdef DEBUG
- printk("dpti%d: State = ",pHba->unit);
- switch(pHba->status_block->iop_state) {
- case 0x01:
- printk("INIT\n");
- break;
- case 0x02:
- printk("RESET\n");
- break;
- case 0x04:
- printk("HOLD\n");
- break;
- case 0x05:
- printk("READY\n");
- break;
- case 0x08:
- printk("OPERATIONAL\n");
- break;
- case 0x10:
- printk("FAILED\n");
- break;
- case 0x11:
- printk("FAULTED\n");
- break;
- default:
- printk("%x (unknown!!)\n",pHba->status_block->iop_state);
- }
-#endif
- return 0;
-}
-
-/*
- * Get the IOP's Logical Configuration Table
- */
-static int adpt_i2o_lct_get(adpt_hba* pHba)
-{
- u32 msg[8];
- int ret;
- u32 buf[16];
-
- if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
- pHba->lct_size = pHba->status_block->expected_lct_size;
- }
- do {
- if (pHba->lct == NULL) {
- pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->lct_size, &pHba->lct_pa,
- GFP_ATOMIC);
- if(pHba->lct == NULL) {
- printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- }
- memset(pHba->lct, 0, pHba->lct_size);
-
- msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
- msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0xFFFFFFFF; /* All devices */
- msg[5] = 0x00000000; /* Report now */
- msg[6] = 0xD0000000|pHba->lct_size;
- msg[7] = (u32)pHba->lct_pa;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
- printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
- pHba->name, ret);
- printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
- return ret;
- }
-
- if ((pHba->lct->table_size << 2) > pHba->lct_size) {
- pHba->lct_size = pHba->lct->table_size << 2;
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- pHba->lct = NULL;
- }
- } while (pHba->lct == NULL);
-
- PDEBUG("%s: Hardware resource table read.\n", pHba->name);
-
-
- // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
- pHba->FwDebugBufferSize = buf[1];
- pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
- pHba->FwDebugBufferSize);
- if (pHba->FwDebugBuffer_P) {
- pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_FLAGS_OFFSET;
- pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_BLED_OFFSET;
- pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
- pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_STR_LENGTH_OFFSET;
- pHba->FwDebugBuffer_P += buf[2];
- pHba->FwDebugFlags = 0;
- }
- }
-
- return 0;
-}
-
-static int adpt_i2o_build_sys_table(void)
-{
- adpt_hba* pHba = hba_chain;
- int count = 0;
-
- if (sys_tbl)
- dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
- sys_tbl, sys_tbl_pa);
-
- sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
- (hba_count) * sizeof(struct i2o_sys_tbl_entry);
-
- sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
- sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
- if (!sys_tbl) {
- printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
- return -ENOMEM;
- }
-
- sys_tbl->num_entries = hba_count;
- sys_tbl->version = I2OVERSION;
- sys_tbl->change_ind = sys_tbl_ind++;
-
- for(pHba = hba_chain; pHba; pHba = pHba->next) {
- u64 addr;
- // Get updated Status Block so we have the latest information
- if (adpt_i2o_status_get(pHba)) {
- sys_tbl->num_entries--;
- continue; // try next one
- }
-
- sys_tbl->iops[count].org_id = pHba->status_block->org_id;
- sys_tbl->iops[count].iop_id = pHba->unit + 2;
- sys_tbl->iops[count].seg_num = 0;
- sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
- sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
- sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
- sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
- sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
- sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
- addr = pHba->base_addr_phys + 0x40;
- sys_tbl->iops[count].inbound_low = dma_low(addr);
- sys_tbl->iops[count].inbound_high = dma_high(addr);
-
- count++;
- }
-
-#ifdef DEBUG
-{
- u32 *table = (u32*)sys_tbl;
- printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
- for(count = 0; count < (sys_tbl_len >>2); count++) {
- printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
- count, table[count]);
- }
-}
-#endif
-
- return 0;
-}
-
-
-/*
- * Dump the information block associated with a given unit (TID)
- */
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
-{
- char buf[64];
- int unit = d->lct_data.tid;
-
- printk(KERN_INFO "TID %3.3d ", unit);
-
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Vendor: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Device: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
- {
- buf[8]=0;
- printk(" Rev: %-12.12s\n", buf);
- }
-#ifdef DEBUG
- printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
- printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
- printk(KERN_INFO "\tFlags: ");
-
- if(d->lct_data.device_flags&(1<<0))
- printk("C"); // ConfigDialog requested
- if(d->lct_data.device_flags&(1<<1))
- printk("U"); // Multi-user capable
- if(!(d->lct_data.device_flags&(1<<4)))
- printk("P"); // Peer service enabled!
- if(!(d->lct_data.device_flags&(1<<5)))
- printk("M"); // Mgmt service enabled!
- printk("\n");
-#endif
-}
-
-#ifdef DEBUG
-/*
- * Do i2o class name lookup
- */
-static const char *adpt_i2o_get_class_name(int class)
-{
- int idx = 16;
- static char *i2o_class_name[] = {
- "Executive",
- "Device Driver Module",
- "Block Device",
- "Tape Device",
- "LAN Interface",
- "WAN Interface",
- "Fibre Channel Port",
- "Fibre Channel Device",
- "SCSI Device",
- "ATE Port",
- "ATE Device",
- "Floppy Controller",
- "Floppy Device",
- "Secondary Bus Port",
- "Peer Transport Agent",
- "Peer Transport",
- "Unknown"
- };
-
- switch(class&0xFFF) {
- case I2O_CLASS_EXECUTIVE:
- idx = 0; break;
- case I2O_CLASS_DDM:
- idx = 1; break;
- case I2O_CLASS_RANDOM_BLOCK_STORAGE:
- idx = 2; break;
- case I2O_CLASS_SEQUENTIAL_STORAGE:
- idx = 3; break;
- case I2O_CLASS_LAN:
- idx = 4; break;
- case I2O_CLASS_WAN:
- idx = 5; break;
- case I2O_CLASS_FIBRE_CHANNEL_PORT:
- idx = 6; break;
- case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
- idx = 7; break;
- case I2O_CLASS_SCSI_PERIPHERAL:
- idx = 8; break;
- case I2O_CLASS_ATE_PORT:
- idx = 9; break;
- case I2O_CLASS_ATE_PERIPHERAL:
- idx = 10; break;
- case I2O_CLASS_FLOPPY_CONTROLLER:
- idx = 11; break;
- case I2O_CLASS_FLOPPY_DEVICE:
- idx = 12; break;
- case I2O_CLASS_BUS_ADAPTER_PORT:
- idx = 13; break;
- case I2O_CLASS_PEER_TRANSPORT_AGENT:
- idx = 14; break;
- case I2O_CLASS_PEER_TRANSPORT:
- idx = 15; break;
- }
- return i2o_class_name[idx];
-}
-#endif
-
-
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
-{
- u32 msg[6];
- int ret, size = sizeof(i2o_hrt);
-
- do {
- if (pHba->hrt == NULL) {
- pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
- size, &pHba->hrt_pa, GFP_KERNEL);
- if (pHba->hrt == NULL) {
- printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
- }
-
- msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
- msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
- msg[4]= (0xD0000000 | size); /* Simple transaction */
- msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
- printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
- return ret;
- }
-
- if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
- int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
- dma_free_coherent(&pHba->pDev->dev, size,
- pHba->hrt, pHba->hrt_pa);
- size = newsize;
- pHba->hrt = NULL;
- }
- } while(pHba->hrt == NULL);
- return 0;
-}
-
-/*
- * Query one scalar group value or a whole scalar group.
- */
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen)
-{
- u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
- u8 *opblk_va;
- dma_addr_t opblk_pa;
- u8 *resblk_va;
- dma_addr_t resblk_pa;
-
- int size;
-
- /* 8 bytes for header */
- resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
- if (resblk_va == NULL) {
- printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
-
- opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(opblk), &opblk_pa, GFP_KERNEL);
- if (opblk_va == NULL) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- if (field == -1) /* whole group */
- opblk[4] = -1;
-
- memcpy(opblk_va, opblk, sizeof(opblk));
- size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
- opblk_va, opblk_pa, sizeof(opblk),
- resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
- dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
- if (size == -ETIME) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
- return -ETIME;
- } else if (size == -EINTR) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
- return -EINTR;
- }
-
- memcpy(buf, resblk_va+8, buflen); /* cut off header */
-
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- if (size < 0)
- return size;
-
- return buflen;
-}
-
-
-/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
- *
- * This function can be used for all UtilParamsGet/Set operations.
- * The OperationBlock is given in opblk-buffer,
- * and results are returned in resblk-buffer.
- * Note that the minimum sized resblk is 8 bytes and contains
- * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
- */
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk_va, dma_addr_t opblk_pa, int oplen,
- void *resblk_va, dma_addr_t resblk_pa, int reslen)
-{
- u32 msg[9];
- u32 *res = (u32 *)resblk_va;
- int wait_status;
-
- msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
- msg[1] = cmd << 24 | HOST_TID << 12 | tid;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0;
- msg[5] = 0x54000000 | oplen; /* OperationBlock */
- msg[6] = (u32)opblk_pa;
- msg[7] = 0xD0000000 | reslen; /* ResultBlock */
- msg[8] = (u32)resblk_pa;
-
- if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
- printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
- return wait_status; /* -DetailedStatus */
- }
-
- if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
- printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
- "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
- pHba->name,
- (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
- : "PARAMS_GET",
- res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
- return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
- }
-
- return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
-}
-
-
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
-
- /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
-
- if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
- (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
- return 0;
- }
-
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
-
- if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
- pHba->unit, -ret);
- } else {
- printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-/*
- * Enable IOP. Allows the IOP to resume external operations.
- */
-static int adpt_i2o_enable_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
- if(!pHba->status_block){
- return -ENOMEM;
- }
- /* Enable only allowed on READY state */
- if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
- return 0;
-
- if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
- return -EINVAL;
-
- msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
- pHba->name, ret);
- } else {
- PDEBUG("%s: Enabled.\n", pHba->name);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-static int adpt_i2o_systab_send(adpt_hba* pHba)
-{
- u32 msg[12];
- int ret;
-
- msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
- msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
- msg[5] = 0; /* Segment 0 */
-
- /*
- * Provide three SGL-elements:
- * System table (SysTab), Private memory space declaration and
- * Private i/o space declaration
- */
- msg[6] = 0x54000000 | sys_tbl_len;
- msg[7] = (u32)sys_tbl_pa;
- msg[8] = 0x54000000 | 0;
- msg[9] = 0;
- msg[10] = 0xD4000000 | 0;
- msg[11] = 0;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
- printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
- pHba->name, ret);
- }
-#ifdef DEBUG
- else {
- PINFO("%s: SysTab set.\n", pHba->name);
- }
-#endif
-
- return ret;
-}
-
-
-/*============================================================================
- *
- *============================================================================
- */
-
-
-#ifdef UARTDELAY
-
-static static void adpt_delay(int millisec)
-{
- int i;
- for (i = 0; i < millisec; i++) {
- udelay(1000); /* delay for one millisecond */
- }
-}
-
-#endif
-
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = "dpt_i2o",
- .proc_name = "dpt_i2o",
- .show_info = adpt_show_info,
- .info = adpt_info,
- .queuecommand = adpt_queue,
- .eh_abort_handler = adpt_abort,
- .eh_device_reset_handler = adpt_device_reset,
- .eh_bus_reset_handler = adpt_bus_reset,
- .eh_host_reset_handler = adpt_reset,
- .bios_param = adpt_bios_param,
- .slave_configure = adpt_slave_configure,
- .can_queue = MAX_TO_IOP_MESSAGES,
- .this_id = 7,
-};
-
-static int __init adpt_init(void)
-{
- int error;
- adpt_hba *pHba, *next;
-
- printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
-
- error = adpt_detect(&driver_template);
- if (error < 0)
- return error;
- if (hba_chain == NULL)
- return -ENODEV;
-
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- error = scsi_add_host(pHba->host, &pHba->pDev->dev);
- if (error)
- goto fail;
- scsi_scan_host(pHba->host);
- }
- return 0;
-fail:
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- scsi_remove_host(pHba->host);
- }
- return error;
-}
-
-static void __exit adpt_exit(void)
-{
- adpt_hba *pHba, *next;
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- adpt_release(pHba);
- }
-}
-
-module_init(adpt_init);
-module_exit(adpt_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
deleted file mode 100644
index 8a079e8d7f65..000000000000
--- a/drivers/scsi/dpti.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-#ifndef _DPT_H
-#define _DPT_H
-
-#define MAX_TO_IOP_MESSAGES (255)
-#define MAX_FROM_IOP_MESSAGES (255)
-
-
-/*
- * SCSI interface function Prototypes
- */
-
-static int adpt_detect(struct scsi_host_template * sht);
-static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd);
-static int adpt_abort(struct scsi_cmnd * cmd);
-static int adpt_reset(struct scsi_cmnd* cmd);
-static int adpt_slave_configure(struct scsi_device *);
-
-static const char *adpt_info(struct Scsi_Host *pSHost);
-static int adpt_bios_param(struct scsi_device * sdev, struct block_device *dev,
- sector_t, int geom[]);
-
-static int adpt_bus_reset(struct scsi_cmnd* cmd);
-static int adpt_device_reset(struct scsi_cmnd* cmd);
-
-
-/*
- * struct scsi_host_template (see scsi/scsi_host.h)
- */
-
-#define DPT_DRIVER_NAME "Adaptec I2O RAID"
-
-#ifndef HOSTS_C
-
-#include "dpt/sys_info.h"
-#include <linux/wait.h>
-#include "dpt/dpti_i2o.h"
-#include "dpt/dpti_ioctl.h"
-
-#define DPT_I2O_VERSION "2.4 Build 5go"
-#define DPT_VERSION 2
-#define DPT_REVISION '4'
-#define DPT_SUBREVISION '5'
-#define DPT_BETA ""
-#define DPT_MONTH 8
-#define DPT_DAY 7
-#define DPT_YEAR (2001-1980)
-
-#define DPT_DRIVER "dpt_i2o"
-#define DPTI_I2O_MAJOR (151)
-#define DPT_ORGANIZATION_ID (0x1B) /* For Private Messages */
-#define DPTI_MAX_HBA (16)
-#define MAX_CHANNEL (5) // Maximum Channel # Supported
-#define MAX_ID (128) // Maximum Target ID Supported
-
-/* Sizes in 4 byte words */
-#define REPLY_FRAME_SIZE (17)
-#define MAX_MESSAGE_SIZE (128)
-#define SG_LIST_ELEMENTS (56)
-
-#define EMPTY_QUEUE 0xffffffff
-#define I2O_INTERRUPT_PENDING_B (0x08)
-
-#define PCI_DPT_VENDOR_ID (0x1044) // DPT PCI Vendor ID
-#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID
-#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511)
-
-/* Debugging macro from Linux Device Drivers - Rubini */
-#undef PDEBUG
-#ifdef DEBUG
-//TODO add debug level switch
-# define PDEBUG(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-# define PDEBUGV(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-#else
-# define PDEBUG(fmt, args...) /* not debugging: nothing */
-# define PDEBUGV(fmt, args...) /* not debugging: nothing */
-#endif
-
-#define PERROR(fmt, args...) printk(KERN_ERR fmt, ##args)
-#define PWARN(fmt, args...) printk(KERN_WARNING fmt, ##args)
-#define PINFO(fmt, args...) printk(KERN_INFO fmt, ##args)
-#define PCRIT(fmt, args...) printk(KERN_CRIT fmt, ##args)
-
-#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
-
-// Command timeouts
-#define FOREVER (0)
-#define TMOUT_INQUIRY (20)
-#define TMOUT_FLUSH (360/45)
-#define TMOUT_ABORT (30)
-#define TMOUT_SCSI (300)
-#define TMOUT_IOPRESET (360)
-#define TMOUT_GETSTATUS (15)
-#define TMOUT_INITOUTBOUND (15)
-#define TMOUT_LCT (360)
-
-
-#define I2O_SCSI_DEVICE_DSC_MASK 0x00FF
-
-#define I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION 0x000A
-
-#define I2O_SCSI_DSC_MASK 0xFF00
-#define I2O_SCSI_DSC_SUCCESS 0x0000
-#define I2O_SCSI_DSC_REQUEST_ABORTED 0x0200
-#define I2O_SCSI_DSC_UNABLE_TO_ABORT 0x0300
-#define I2O_SCSI_DSC_COMPLETE_WITH_ERROR 0x0400
-#define I2O_SCSI_DSC_ADAPTER_BUSY 0x0500
-#define I2O_SCSI_DSC_REQUEST_INVALID 0x0600
-#define I2O_SCSI_DSC_PATH_INVALID 0x0700
-#define I2O_SCSI_DSC_DEVICE_NOT_PRESENT 0x0800
-#define I2O_SCSI_DSC_UNABLE_TO_TERMINATE 0x0900
-#define I2O_SCSI_DSC_SELECTION_TIMEOUT 0x0A00
-#define I2O_SCSI_DSC_COMMAND_TIMEOUT 0x0B00
-#define I2O_SCSI_DSC_MR_MESSAGE_RECEIVED 0x0D00
-#define I2O_SCSI_DSC_SCSI_BUS_RESET 0x0E00
-#define I2O_SCSI_DSC_PARITY_ERROR_FAILURE 0x0F00
-#define I2O_SCSI_DSC_AUTOSENSE_FAILED 0x1000
-#define I2O_SCSI_DSC_NO_ADAPTER 0x1100
-#define I2O_SCSI_DSC_DATA_OVERRUN 0x1200
-#define I2O_SCSI_DSC_UNEXPECTED_BUS_FREE 0x1300
-#define I2O_SCSI_DSC_SEQUENCE_FAILURE 0x1400
-#define I2O_SCSI_DSC_REQUEST_LENGTH_ERROR 0x1500
-#define I2O_SCSI_DSC_PROVIDE_FAILURE 0x1600
-#define I2O_SCSI_DSC_BDR_MESSAGE_SENT 0x1700
-#define I2O_SCSI_DSC_REQUEST_TERMINATED 0x1800
-#define I2O_SCSI_DSC_IDE_MESSAGE_SENT 0x3300
-#define I2O_SCSI_DSC_RESOURCE_UNAVAILABLE 0x3400
-#define I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT 0x3500
-#define I2O_SCSI_DSC_MESSAGE_RECEIVED 0x3600
-#define I2O_SCSI_DSC_INVALID_CDB 0x3700
-#define I2O_SCSI_DSC_LUN_INVALID 0x3800
-#define I2O_SCSI_DSC_SCSI_TID_INVALID 0x3900
-#define I2O_SCSI_DSC_FUNCTION_UNAVAILABLE 0x3A00
-#define I2O_SCSI_DSC_NO_NEXUS 0x3B00
-#define I2O_SCSI_DSC_SCSI_IID_INVALID 0x3C00
-#define I2O_SCSI_DSC_CDB_RECEIVED 0x3D00
-#define I2O_SCSI_DSC_LUN_ALREADY_ENABLED 0x3E00
-#define I2O_SCSI_DSC_BUS_BUSY 0x3F00
-#define I2O_SCSI_DSC_QUEUE_FROZEN 0x4000
-
-
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
-#define HBA_FLAGS_INSTALLED_B 0x00000001 // Adapter Was Installed
-#define HBA_FLAGS_BLINKLED_B 0x00000002 // Adapter In Blink LED State
-#define HBA_FLAGS_IN_RESET 0x00000040 /* in reset */
-#define HBA_HOSTRESET_FAILED 0x00000080 /* adpt_resethost failed */
-
-
-// Device state flags
-#define DPTI_DEV_ONLINE 0x00
-#define DPTI_DEV_UNSCANNED 0x01
-#define DPTI_DEV_RESET 0x02
-#define DPTI_DEV_OFFLINE 0x04
-
-
-struct adpt_device {
- struct adpt_device* next_lun;
- u32 flags;
- u32 type;
- u32 capacity;
- u32 block_size;
- u8 scsi_channel;
- u8 scsi_id;
- u64 scsi_lun;
- u8 state;
- u16 tid;
- struct i2o_device* pI2o_dev;
- struct scsi_device *pScsi_dev;
-};
-
-struct adpt_channel {
- struct adpt_device* device[MAX_ID]; /* used as an array of 128 scsi ids */
- u8 scsi_id;
- u8 type;
- u16 tid;
- u32 state;
- struct i2o_device* pI2o_dev;
-};
-
-// HBA state flags
-#define DPTI_STATE_RESET (0x01)
-
-typedef struct _adpt_hba {
- struct _adpt_hba *next;
- struct pci_dev *pDev;
- struct Scsi_Host *host;
- u32 state;
- spinlock_t state_lock;
- int unit;
- int host_no; /* SCSI host number */
- u8 initialized;
- u8 in_use; /* is the management node open*/
-
- char name[32];
- char detail[55];
-
- void __iomem *base_addr_virt;
- void __iomem *msg_addr_virt;
- ulong base_addr_phys;
- void __iomem *post_port;
- void __iomem *reply_port;
- void __iomem *irq_mask;
- u16 post_count;
- u32 post_fifo_size;
- u32 reply_fifo_size;
- u32* reply_pool;
- dma_addr_t reply_pool_pa;
- u32 sg_tablesize; // Scatter/Gather List Size.
- u8 top_scsi_channel;
- u8 top_scsi_id;
- u64 top_scsi_lun;
- u8 dma64;
-
- i2o_status_block* status_block;
- dma_addr_t status_block_pa;
- i2o_hrt* hrt;
- dma_addr_t hrt_pa;
- i2o_lct* lct;
- dma_addr_t lct_pa;
- uint lct_size;
- struct i2o_device* devices;
- struct adpt_channel channel[MAX_CHANNEL];
- struct proc_dir_entry* proc_entry; /* /proc dir */
-
- void __iomem *FwDebugBuffer_P; // Virtual Address Of FW Debug Buffer
- u32 FwDebugBufferSize; // FW Debug Buffer Size In Bytes
- void __iomem *FwDebugStrLength_P;// Virtual Addr Of FW Debug String Len
- void __iomem *FwDebugFlags_P; // Virtual Address Of FW Debug Flags
- void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
- void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
- u32 FwDebugFlags;
- u32 *ioctl_reply_context[4];
-} adpt_hba;
-
-struct sg_simple_element {
- u32 flag_count;
- u32 addr_bus;
-};
-
-/*
- * Function Prototypes
- */
-
-static void adpt_i2o_sys_shutdown(void);
-static int adpt_init(void);
-static int adpt_i2o_build_sys_table(void);
-static irqreturn_t adpt_isr(int irq, void *dev_id);
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d);
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen);
-#ifdef DEBUG
-static const char *adpt_i2o_get_class_name(int class);
-#endif
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk, dma_addr_t opblk_pa, int oplen,
- void *resblk, dma_addr_t resblk_pa, int reslen);
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
-static int adpt_i2o_lct_get(adpt_hba* pHba);
-static int adpt_i2o_parse_lct(adpt_hba* pHba);
-static int adpt_i2o_activate_hba(adpt_hba* pHba);
-static int adpt_i2o_enable_hba(adpt_hba* pHba);
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d);
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len);
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba);
-static s32 adpt_i2o_status_get(adpt_hba* pHba);
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
-static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd);
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
-static s32 adpt_hba_reset(adpt_hba* pHba);
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
-static s32 adpt_rescan(adpt_hba* pHba);
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba);
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
-static void adpt_i2o_delete_hba(adpt_hba* pHba);
-static void adpt_inquiry(adpt_hba* pHba);
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun);
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
-static int adpt_i2o_online_hba(adpt_hba* pHba);
-static void adpt_i2o_post_wait_complete(u32, int);
-static int adpt_i2o_systab_send(adpt_hba* pHba);
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg);
-static int adpt_open(struct inode *inode, struct file *file);
-static int adpt_close(struct inode *inode, struct file *file);
-
-
-#ifdef UARTDELAY
-static void adpt_delay(int millisec);
-#endif
-
-#define PRINT_BUFFER_SIZE 512
-
-#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags
-#define HBA_FLAGS_DBG_KERNEL_PRINT_B 0x00010000 // Kernel Debugger Print
-#define HBA_FLAGS_DBG_FW_PRINT_B 0x00020000 // Firmware Debugger Print
-#define HBA_FLAGS_DBG_FUNCTION_ENTRY_B 0x00040000 // Function Entry Point
-#define HBA_FLAGS_DBG_FUNCTION_EXIT_B 0x00080000 // Function Exit
-#define HBA_FLAGS_DBG_ERROR_B 0x00100000 // Error Conditions
-#define HBA_FLAGS_DBG_INIT_B 0x00200000 // Init Prints
-#define HBA_FLAGS_DBG_OS_COMMANDS_B 0x00400000 // OS Command Info
-#define HBA_FLAGS_DBG_SCAN_B 0x00800000 // Device Scan
-
-#define FW_DEBUG_STR_LENGTH_OFFSET 0
-#define FW_DEBUG_FLAGS_OFFSET 4
-#define FW_DEBUG_BLED_OFFSET 8
-
-#define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01
-#endif /* !HOSTS_C */
-#endif /* _DPT_H */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index c2a59109857a..6ec296321ffc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1488,7 +1488,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
- wlen = skb->len / FCOE_WORD_TO_BYTE;
if (!lport->link_up) {
kfree_skb(skb);
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h
index d1225cf6320e..0eb4ba277264 100644
--- a/drivers/scsi/fnic/cq_desc.h
+++ b/drivers/scsi/fnic/cq_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h
index a9fa26f82ddd..b6113291cf68 100644
--- a/drivers/scsi/fnic/cq_enet_desc.h
+++ b/drivers/scsi/fnic/cq_enet_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h
index 501660cfe228..4d94329c8ef5 100644
--- a/drivers/scsi/fnic/cq_exch_desc.h
+++ b/drivers/scsi/fnic/cq_exch_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CQ_EXCH_DESC_H_
#define _CQ_EXCH_DESC_H_
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h
index 12d770d885c5..54a0b2ba8f6f 100644
--- a/drivers/scsi/fnic/fcpio.h
+++ b/drivers/scsi/fnic/fcpio.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FCPIO_H_
#define _FCPIO_H_
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 85ec6163ddab..d82de34f6fd7 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_H_
#define _FNIC_H_
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
index bbe2ca4971b2..a61e0c5e6506 100644
--- a/drivers/scsi/fnic/fnic_attrs.c
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/string.h>
#include <linux/device.h>
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 866b4c983ace..6fedc3b7d1ab 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2012 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2012 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 1885218f9d15..79ddfaaf71a4 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
index 7761f33ab5d4..79f53029737b 100644
--- a/drivers/scsi/fnic/fnic_fip.h
+++ b/drivers/scsi/fnic/fnic_fip.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_FIP_H_
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index 1cb6a68c8e4e..f4c8769df312 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_IO_H_
#define _FNIC_IO_H_
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 2fb2731f50fb..8896758fed8c 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/string.h>
#include <linux/errno.h>
@@ -332,4 +320,3 @@ void fnic_clear_intr_mode(struct fnic *fnic)
pci_free_irq_vectors(fnic->pdev);
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
-
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 51e7c344ddc3..1077110ab273 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mempool.h>
@@ -556,6 +544,39 @@ static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
}
+static int fnic_scsi_drv_init(struct fnic *fnic)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+
+ /* Configure maximum outstanding IO reqs*/
+ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
+ host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+ max_t(u32, FNIC_MIN_IO_REQ,
+ fnic->config.io_throttle_count));
+
+ fnic->fnic_max_tag_id = host->can_queue;
+ host->max_lun = fnic->config.luns_per_tgt;
+ host->max_id = FNIC_MAX_FCP_TARGET;
+ host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
+ host->nr_hw_queues = fnic->wq_copy_count;
+ if (host->nr_hw_queues > 1)
+ shost_printk(KERN_ERR, host,
+ "fnic: blk-mq is not supported");
+
+ host->nr_hw_queues = fnic->wq_copy_count = 1;
+
+ shost_printk(KERN_INFO, host,
+ "fnic: can_queue: %d max_lun: %llu",
+ host->can_queue, host->max_lun);
+
+ shost_printk(KERN_INFO, host,
+ "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
+ host->max_id, host->max_cmd_len, host->nr_hw_queues);
+
+ return 0;
+}
+
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *host;
@@ -696,17 +717,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
- /* Configure Maximum Outstanding IO reqs*/
- if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
- host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
- max_t(u32, FNIC_MIN_IO_REQ,
- fnic->config.io_throttle_count));
- }
- fnic->fnic_max_tag_id = host->can_queue;
-
- host->max_lun = fnic->config.luns_per_tgt;
- host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FCOE_MAX_CMD_LEN;
+ fnic_scsi_drv_init(fnic);
fnic_get_res_counts(fnic);
@@ -1159,4 +1170,3 @@ static void __exit fnic_cleanup_module(void)
module_init(fnic_init_module);
module_exit(fnic_cleanup_module);
-
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 50488f8e169d..a1c9cfcace7f 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
index ef8aaf2156dd..92a2fcfd3ea9 100644
--- a/drivers/scsi/fnic/fnic_res.h
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_RES_H_
#define _FNIC_RES_H_
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 3d64877bda8d..26dbd347156e 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/mempool.h>
#include <linux/errno.h>
@@ -1350,8 +1338,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
return wq_work_done;
}
-static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
- bool reserved)
+static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
{
const int tag = scsi_cmd_to_rq(sc)->tag;
struct fnic *fnic = data;
@@ -1548,8 +1535,7 @@ struct fnic_rport_abort_io_iter_data {
int term_cnt;
};
-static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
- bool reserved)
+static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_rport_abort_io_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
@@ -2003,8 +1989,7 @@ struct fnic_pending_aborts_iter_data {
int ret;
};
-static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
- void *data, bool reserved)
+static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
@@ -2019,8 +2004,6 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
if (sc == iter_data->lr_sc || sc->device != lun_dev)
return true;
- if (reserved)
- return true;
io_lock = fnic_io_lock_tag(fnic, abt_tag);
spin_lock_irqsave(io_lock, flags);
@@ -2670,8 +2653,7 @@ call_fc_exch_mgr_reset:
}
-static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
- bool reserved)
+static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 086f729f3c46..bdf639eef8cf 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4a7536bb0ab3..e03967463561 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2012 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2012 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index 8aa55c1e2025..d1c301bf3fde 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2012 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2012 Cisco Systems, Inc. All rights reserved. */
#ifndef __FNIC_TRACE_H__
#define __FNIC_TRACE_H__
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h
index 92e80ae6b725..9bc509d355c4 100644
--- a/drivers/scsi/fnic/rq_enet_desc.h
+++ b/drivers/scsi/fnic/rq_enet_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _RQ_ENET_DESC_H_
#define _RQ_ENET_DESC_H_
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c
index c5db32eda5ef..ed3dd443fe3e 100644
--- a/drivers/scsi/fnic/vnic_cq.c
+++ b/drivers/scsi/fnic/vnic_cq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h
index 4ede6809fb1e..e7cc1f165390 100644
--- a/drivers/scsi/fnic/vnic_cq.h
+++ b/drivers/scsi/fnic/vnic_cq.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h
index 7901ce255a81..1b198ee59dd6 100644
--- a/drivers/scsi/fnic/vnic_cq_copy.h
+++ b/drivers/scsi/fnic/vnic_cq_copy.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_CQ_COPY_H_
#define _VNIC_CQ_COPY_H_
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 5988c300cc82..3e5b437c0492 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
index ef5309a5df5d..7a568d141cde 100644
--- a/drivers/scsi/fnic/vnic_dev.h
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index c20d30e36dfc..f876d223b2b4 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c
index 4f4dc8793d23..df7f63acd879 100644
--- a/drivers/scsi/fnic/vnic_intr.c
+++ b/drivers/scsi/fnic/vnic_intr.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h
index d5fb40e7c98e..acc194c0f522 100644
--- a/drivers/scsi/fnic/vnic_intr.h
+++ b/drivers/scsi/fnic/vnic_intr.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h
index f15b83eeaced..6896f16d564b 100644
--- a/drivers/scsi/fnic/vnic_nic.h
+++ b/drivers/scsi/fnic/vnic_nic.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_NIC_H_
#define _VNIC_NIC_H_
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
index 7c6163f73bd3..3d260b831fc5 100644
--- a/drivers/scsi/fnic/vnic_resource.h
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
index 6a35b1be0032..350607d13c9a 100644
--- a/drivers/scsi/fnic/vnic_rq.c
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
@@ -191,4 +179,3 @@ void vnic_rq_clean(struct vnic_rq *rq,
vnic_dev_clear_desc_ring(&rq->ring);
}
-
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h
index aebdfbd6ad3c..1066255de808 100644
--- a/drivers/scsi/fnic/vnic_rq.h
+++ b/drivers/scsi/fnic/vnic_rq.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index e343e1d0f801..4e12f7b32d9d 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_SCSI_H_
#define _VNIC_SCSI_H_
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h
index 5372e23c1cb3..4396397b089d 100644
--- a/drivers/scsi/fnic/vnic_stats.h
+++ b/drivers/scsi/fnic/vnic_stats.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
index 442972c04e65..426b901c80f3 100644
--- a/drivers/scsi/fnic/vnic_wq.c
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
index 5d1e0a44d94a..041618e13ce2 100644
--- a/drivers/scsi/fnic/vnic_wq.h
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
index 7b18635df7e6..96569d4ccc58 100644
--- a/drivers/scsi/fnic/vnic_wq_copy.c
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
@@ -108,4 +96,3 @@ void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
}
-
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h
index 6aff9740c3df..2f8340144e79 100644
--- a/drivers/scsi/fnic/vnic_wq_copy.h
+++ b/drivers/scsi/fnic/vnic_wq_copy.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_WQ_COPY_H_
#define _VNIC_WQ_COPY_H_
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h
index b121cbad18b8..9a933a5dee79 100644
--- a/drivers/scsi/fnic/wq_enet_desc.h
+++ b/drivers/scsi/fnic/wq_enet_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 2f6c56aabe1d..7d56a236a011 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -26,8 +26,12 @@
struct gvp11_hostdata {
struct WD33C93_hostdata wh;
struct gvp11_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define TO_DMA_MASK(m) (~((unsigned long long)m & 0xffffffff))
+
static irqreturn_t gvp11_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -54,17 +58,33 @@ void gvp11_setup(char *str, int *ints)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
unsigned short cntr = GVP11_DMAC_INT_ENABLE;
- unsigned long addr = virt_to_bus(scsi_pointer->ptr);
+ dma_addr_t addr;
int bank_mask;
static int scsi_alloc_out_of_range = 0;
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
+
/* use bounce buffer if the physical address is bad */
if (addr & wh->dma_xfer_mask) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
if (!scsi_alloc_out_of_range) {
@@ -87,10 +107,32 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
- /* check if the address of the bounce buffer is OK */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev,
+ wh->dma_bounce_buffer,
+ wh->dma_bounce_len,
+ DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
+ }
if (addr & wh->dma_xfer_mask) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
/* fall back to Chip RAM if address out of range */
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
kfree(wh->dma_bounce_buffer);
@@ -108,15 +150,19 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+ /* chip RAM can be mapped to phys. address directly */
+ addr = virt_to_phys(wh->dma_bounce_buffer);
+ /* no need to flush/invalidate cache */
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
+ /* finally, have OK mapping (punted for PIO else) */
+ scsi_pointer->dma_handle = addr;
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
- scsi_pointer->this_residual);
- }
}
/* setup dma direction */
@@ -129,13 +175,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, scsi_pointer->this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, scsi_pointer->this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
if (bank_mask)
@@ -161,6 +201,11 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* remove write bit from CONTROL bits */
regs->CNTR = GVP11_DMAC_INT_ENABLE;
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir && SCpnt)
@@ -287,6 +332,13 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
default_dma_xfer_mask = ent->driver_data;
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(default_dma_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(default_dma_xfer_mask));
+ return -ENODEV;
+ }
+
/*
* Rumors state that some GVP ram boards use the same product
* code as the SCSI controllers. Therefore if the board-size
@@ -327,9 +379,16 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
- if (gvp11_xfer_mask)
+ if (gvp11_xfer_mask) {
hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
- else
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(gvp11_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(gvp11_xfer_mask));
+ error = -ENODEV;
+ goto fail_check_or_alloc;
+ }
+ } else
hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
hdata->wh.no_sync = 0xff;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 764e859d0106..33af5b8dede2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -219,10 +219,15 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->lldd_task = NULL;
if (!sas_protocol_ata(task->task_proto)) {
- if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+ if (slot->n_elem) {
+ if (task->task_proto & SAS_PROTOCOL_SSP)
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
+ task->data_dir);
+ else
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ }
if (slot->n_elem_dif) {
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
@@ -269,28 +274,23 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
}
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
- struct sas_task *task, int n_elem,
- int n_elem_req)
+ struct sas_task *task, int n_elem)
{
struct device *dev = hisi_hba->dev;
- if (!sas_protocol_ata(task->task_proto)) {
+ if (!sas_protocol_ata(task->task_proto) && n_elem) {
if (task->num_scatter) {
- if (n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(dev, task->scatter, task->num_scatter,
+ task->data_dir);
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- if (n_elem_req)
- dma_unmap_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
}
}
}
static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
- struct sas_task *task, int *n_elem,
- int *n_elem_req)
+ struct sas_task *task, int *n_elem)
{
struct device *dev = hisi_hba->dev;
int rc;
@@ -308,9 +308,9 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
goto prep_out;
}
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
- if (!*n_elem_req) {
+ *n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ if (!*n_elem) {
rc = -ENOMEM;
goto prep_out;
}
@@ -332,8 +332,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
err_out_dma_unmap:
/* It would be better to call dma_unmap_sg() here, but it's messy */
- hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
- *n_elem_req);
+ hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
prep_out:
return rc;
}
@@ -457,7 +456,7 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
- int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
+ int n_elem = 0, n_elem_dif = 0;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -568,8 +567,7 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
return -EINVAL;
}
- rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
- &n_elem_req);
+ rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
if (rc < 0)
goto prep_out;
@@ -605,8 +603,7 @@ err_out_dif_dma_unmap:
if (!sas_protocol_ata(task->task_proto))
hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
err_out_dma_unmap:
- hisi_sas_dma_unmap(hisi_hba, task, n_elem,
- n_elem_req);
+ hisi_sas_dma_unmap(hisi_hba, task, n_elem);
prep_out:
dev_err(dev, "task exec: failed[%d]!\n", rc);
return rc;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 4582791def32..349546bacb2b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1282,8 +1282,6 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 455d49299ddf..70e401fd432a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -805,8 +805,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
return -SAS_QUEUE_FULL;
}
/*
- * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
- */
+ * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
+ */
if (sata_dev ^ (start & 1))
break;
start++;
@@ -2428,8 +2428,6 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index eb86afb21aab..efe8c5be5870 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -481,6 +481,9 @@ struct hisi_sas_err_record_v3 {
#define RX_DATA_LEN_UNDERFLOW_OFF 6
#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
+#define RX_FIS_STATUS_ERR_OFF 0
+#define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF)
+
#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
#define HISI_SAS_MSI_COUNT_V3_HW 32
@@ -2161,6 +2164,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
hisi_sas_status_buf_addr_mem(slot);
u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
+ u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type);
u32 dw3 = le32_to_cpu(complete_hdr->dw3);
switch (task->task_proto) {
@@ -2188,7 +2192,10 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ if ((complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
+ ts->stat = SAS_PROTO_RESPONSE;
+ } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
@@ -2311,8 +2318,6 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
@@ -2778,16 +2783,13 @@ static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
static int slave_configure_v3_hw(struct scsi_device *sdev)
{
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
- struct domain_device *ddev = sdev_to_domain_dev(sdev);
struct hisi_hba *hisi_hba = shost_priv(shost);
+ int ret = hisi_sas_slave_configure(sdev);
struct device *dev = hisi_hba->dev;
- int ret = sas_slave_configure(sdev);
unsigned int max_sectors;
if (ret)
return ret;
- if (!dev_is_sata(ddev))
- sas_change_queue_depth(sdev, 64);
if (sdev->type == TYPE_ENCLOSURE)
return 0;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 8352f90d997d..0738238ed6cc 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -190,6 +190,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
transport_unregister_device(&shost->shost_gendev);
device_unregister(&shost->shost_dev);
device_del(&shost->shost_gendev);
+
+ /*
+ * After scsi_remove_host() has returned the scsi LLD module can be
+ * unloaded and/or the host resources can be released. Hence wait until
+ * the dependent SCSI targets and devices are gone before returning.
+ */
+ wait_event(shost->targets_wq, atomic_read(&shost->target_count) == 0);
+
+ scsi_mq_destroy_tags(shost);
}
EXPORT_SYMBOL(scsi_remove_host);
@@ -236,6 +245,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
shost->dma_dev = dma_dev;
+ if (dma_dev->dma_mask) {
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
error = scsi_mq_setup_tags(shost);
if (error)
goto fail;
@@ -295,8 +309,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
return error;
/*
- * Any host allocation in this function will be freed in
- * scsi_host_dev_release().
+ * Any resources associated with the SCSI host in this function except
+ * the tag set will be freed by scsi_host_dev_release().
*/
out_del_dev:
device_del(&shost->shost_dev);
@@ -312,6 +326,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
+ scsi_mq_destroy_tags(shost);
fail:
return error;
}
@@ -345,12 +360,9 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
- if (shost->tag_set.tags)
- scsi_mq_destroy_tags(shost);
-
kfree(shost->shost_data);
- ida_simple_remove(&host_index_ida, shost->host_no);
+ ida_free(&host_index_ida, shost->host_no);
if (shost->shost_state != SHOST_CREATED)
put_device(parent);
@@ -394,8 +406,9 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
+ init_waitqueue_head(&shost->targets_wq);
- index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+ index = ida_alloc(&host_index_ida, GFP_KERNEL);
if (index < 0) {
kfree(shost);
return NULL;
@@ -566,8 +579,7 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
-static bool scsi_host_check_in_flight(struct request *rq, void *data,
- bool reserved)
+static bool scsi_host_check_in_flight(struct request *rq, void *data)
{
int *count = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -662,7 +674,7 @@ void scsi_flush_work(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(scsi_flush_work);
-static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
+static bool complete_all_cmds_iter(struct request *rq, void *data)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
enum scsi_host_status status = *(enum scsi_host_status *)data;
@@ -693,17 +705,16 @@ void scsi_host_complete_all_commands(struct Scsi_Host *shost,
EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
struct scsi_host_busy_iter_data {
- bool (*fn)(struct scsi_cmnd *, void *, bool);
+ bool (*fn)(struct scsi_cmnd *, void *);
void *priv;
};
-static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
- bool reserved)
+static bool __scsi_host_busy_iter_fn(struct request *req, void *priv)
{
struct scsi_host_busy_iter_data *iter_data = priv;
struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
- return iter_data->fn(sc, iter_data->priv, reserved);
+ return iter_data->fn(sc, iter_data->priv);
}
/**
@@ -716,7 +727,7 @@ static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
* ithas to be provided by the caller
**/
void scsi_host_busy_iter(struct Scsi_Host *shost,
- bool (*fn)(struct scsi_cmnd *, void *, bool),
+ bool (*fn)(struct scsi_cmnd *, void *),
void *priv)
{
struct scsi_host_busy_iter_data iter_data = {
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 9fee70d6434a..29b1bd755afe 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -52,6 +52,10 @@ static struct iscsi_transport iscsi_sw_tcp_transport;
static unsigned int iscsi_max_lun = ~0;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+static bool iscsi_recv_from_iscsi_q;
+module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644);
+MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
+
static int iscsi_sw_tcp_dbg;
module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
S_IRUGO | S_IWUSR);
@@ -122,20 +126,13 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
return 0;
}
-static void iscsi_sw_tcp_data_ready(struct sock *sk)
+static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
{
- struct iscsi_conn *conn;
- struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
read_descriptor_t rd_desc;
- read_lock_bh(&sk->sk_callback_lock);
- conn = sk->sk_user_data;
- if (!conn) {
- read_unlock_bh(&sk->sk_callback_lock);
- return;
- }
- tcp_conn = conn->dd_data;
-
/*
* Use rd_desc to pass 'conn' to iscsi_tcp_recv.
* We set count to 1 because we want the network layer to
@@ -144,13 +141,48 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
*/
rd_desc.arg.data = conn;
rd_desc.count = 1;
- tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
- iscsi_sw_sk_state_check(sk);
+ tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
/* If we had to (atomically) map a highmem page,
* unmap it now. */
iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+
+ iscsi_sw_sk_state_check(sk);
+}
+
+static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
+{
+ struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
+ recvwork);
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
+
+ lock_sock(sk);
+ iscsi_sw_tcp_recv_data(conn);
+ release_sock(sk);
+}
+
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
+{
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_conn *conn;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ if (tcp_sw_conn->queue_recv)
+ iscsi_conn_queue_recv(conn);
+ else
+ iscsi_sw_tcp_recv_data(conn);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -205,7 +237,7 @@ static void iscsi_sw_tcp_write_space(struct sock *sk)
old_write_space(sk);
ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
@@ -274,7 +306,10 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
copy = segment->size - offset;
if (segment->total_copied + segment->size < segment->total_size)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
+
+ if (tcp_sw_conn->queue_recv)
+ flags |= MSG_DONTWAIT;
/* Use sendpage if we can; else fall back to sendmsg */
if (!segment->data) {
@@ -557,6 +592,8 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
conn = cls_conn->dd_data;
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
+ INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
+ tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
@@ -610,6 +647,8 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
+ iscsi_suspend_rx(conn);
+
spin_lock_bh(&session->frwd_lock);
tcp_sw_conn->sock = NULL;
spin_unlock_bh(&session->frwd_lock);
@@ -898,7 +937,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
remove_session:
iscsi_session_teardown(cls_session);
remove_host:
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
@@ -915,7 +954,7 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
@@ -1003,7 +1042,6 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
.slave_configure = iscsi_sw_tcp_slave_configure,
- .target_alloc = iscsi_target_alloc,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 791453195099..850a018aefb9 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -28,6 +28,8 @@ struct iscsi_sw_tcp_send {
struct iscsi_sw_tcp_conn {
struct socket *sock;
+ struct work_struct recvwork;
+ bool queue_recv;
struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 797abf4f5399..d95f4bcdeb2e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -83,7 +83,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
"%s " dbg_fmt, __func__, ##arg); \
} while (0);
-inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
+#define ISCSI_CMD_COMPL_WAIT 5
+
+inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn)
{
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
@@ -91,7 +93,17 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
if (ihost->workq)
queue_work(ihost->workq, &conn->xmitwork);
}
-EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit);
+
+inline void iscsi_conn_queue_recv(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ if (ihost->workq && !test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))
+ queue_work(ihost->workq, &conn->recvwork);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_recv);
static void __iscsi_update_cmdsn(struct iscsi_session *session,
uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -472,12 +484,18 @@ static void iscsi_free_task(struct iscsi_task *task)
}
}
-void __iscsi_get_task(struct iscsi_task *task)
+bool iscsi_get_task(struct iscsi_task *task)
{
- refcount_inc(&task->refcount);
+ return refcount_inc_not_zero(&task->refcount);
}
-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+EXPORT_SYMBOL_GPL(iscsi_get_task);
+/**
+ * __iscsi_put_task - drop the refcount on a task
+ * @task: iscsi_task to drop the refcount on
+ *
+ * The back_lock must be held when calling in case it frees the task.
+ */
void __iscsi_put_task(struct iscsi_task *task)
{
if (refcount_dec_and_test(&task->refcount))
@@ -489,10 +507,11 @@ void iscsi_put_task(struct iscsi_task *task)
{
struct iscsi_session *session = task->conn->session;
- /* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ if (refcount_dec_and_test(&task->refcount)) {
+ spin_lock_bh(&session->back_lock);
+ iscsi_free_task(task);
+ spin_unlock_bh(&session->back_lock);
+ }
}
EXPORT_SYMBOL_GPL(iscsi_put_task);
@@ -557,16 +576,19 @@ static bool cleanup_queued_task(struct iscsi_task *task)
struct iscsi_conn *conn = task->conn;
bool early_complete = false;
- /* Bad target might have completed task while it was still running */
+ /*
+ * We might have raced where we handled a R2T early and got a response
+ * but have not yet taken the task off the requeue list, then a TMF or
+ * recovery happened and so we can still see it here.
+ */
if (task->state == ISCSI_TASK_COMPLETED)
early_complete = true;
if (!list_empty(&task->running)) {
list_del_init(&task->running);
/*
- * If it's on a list but still running, this could be from
- * a bad target sending a rsp early, cleanup from a TMF, or
- * session recovery.
+ * If it's on a list but still running this could be cleanup
+ * from a TMF or session recovery.
*/
if (task->state == ISCSI_TASK_RUNNING ||
task->state == ISCSI_TASK_COMPLETED)
@@ -587,20 +609,17 @@ static bool cleanup_queued_task(struct iscsi_task *task)
}
/*
- * session frwd lock must be held and if not called for a task that is still
- * pending or from the xmit thread, then xmit thread must be suspended
+ * session back and frwd lock must be held and if not called for a task that
+ * is still pending or from the xmit thread, then xmit thread must be suspended
*/
-static void fail_scsi_task(struct iscsi_task *task, int err)
+static void __fail_scsi_task(struct iscsi_task *task, int err)
{
struct iscsi_conn *conn = task->conn;
struct scsi_cmnd *sc;
int state;
- spin_lock_bh(&conn->session->back_lock);
- if (cleanup_queued_task(task)) {
- spin_unlock_bh(&conn->session->back_lock);
+ if (cleanup_queued_task(task))
return;
- }
if (task->state == ISCSI_TASK_PENDING) {
/*
@@ -619,7 +638,15 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
sc->result = err << 16;
scsi_set_resid(sc, scsi_bufflen(sc));
iscsi_complete_task(task, state);
- spin_unlock_bh(&conn->session->back_lock);
+}
+
+static void fail_scsi_task(struct iscsi_task *task, int err)
+{
+ struct iscsi_session *session = task->conn->session;
+
+ spin_lock_bh(&session->back_lock);
+ __fail_scsi_task(task, err);
+ spin_unlock_bh(&session->back_lock);
}
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -668,12 +695,18 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
return 0;
}
+/**
+ * iscsi_alloc_mgmt_task - allocate and setup a mgmt task.
+ * @conn: iscsi conn that the task will be sent on.
+ * @hdr: iscsi pdu that will be sent.
+ * @data: buffer for data segment if needed.
+ * @data_size: length of data in bytes.
+ */
static struct iscsi_task *
-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+iscsi_alloc_mgmt_task(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
- struct iscsi_host *ihost = shost_priv(session->host);
uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
struct iscsi_task *task;
itt_t itt;
@@ -754,28 +787,57 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
- if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
- WRITE_ONCE(conn->ping_task, task);
+ return task;
+
+free_task:
+ iscsi_put_task(task);
+ return NULL;
+}
+
+/**
+ * iscsi_send_mgmt_task - Send task created with iscsi_alloc_mgmt_task.
+ * @task: iscsi task to send.
+ *
+ * On failure this returns a non-zero error code, and the driver must free
+ * the task with iscsi_put_task;
+ */
+static int iscsi_send_mgmt_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_host *ihost = shost_priv(conn->session->host);
+ int rc = 0;
if (!ihost->workq) {
- if (iscsi_prep_mgmt_task(conn, task))
- goto free_task;
+ rc = iscsi_prep_mgmt_task(conn, task);
+ if (rc)
+ return rc;
- if (session->tt->xmit_task(task))
- goto free_task;
+ rc = session->tt->xmit_task(task);
+ if (rc)
+ return rc;
} else {
list_add_tail(&task->running, &conn->mgmtqueue);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
- return task;
+ return 0;
+}
-free_task:
- /* regular RX path uses back_lock */
- spin_lock(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock(&session->back_lock);
- return NULL;
+static int __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct iscsi_task *task;
+ int rc;
+
+ task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size);
+ if (!task)
+ return -ENOMEM;
+
+ rc = iscsi_send_mgmt_task(task);
+ if (rc)
+ iscsi_put_task(task);
+ return rc;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -786,7 +848,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
int err = 0;
spin_lock_bh(&session->frwd_lock);
- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ if (__iscsi_conn_send_pdu(conn, hdr, data, data_size))
err = -EPERM;
spin_unlock_bh(&session->frwd_lock);
return err;
@@ -959,7 +1021,6 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
if (!rhdr) {
if (READ_ONCE(conn->ping_task))
return -EINVAL;
- WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
}
memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -973,10 +1034,18 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
} else
hdr.ttt = RESERVED_ITT;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
- if (!task) {
+ task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ if (!task)
+ return -ENOMEM;
+
+ if (!rhdr)
+ WRITE_ONCE(conn->ping_task, task);
+
+ if (iscsi_send_mgmt_task(task)) {
if (!rhdr)
WRITE_ONCE(conn->ping_task, NULL);
+ iscsi_put_task(task);
+
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return -EIO;
} else if (!rhdr) {
@@ -1434,11 +1503,17 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
{
int rc;
- spin_lock_bh(&conn->session->back_lock);
-
if (!conn->task) {
- /* Take a ref so we can access it after xmit_task() */
- __iscsi_get_task(task);
+ /*
+ * Take a ref so we can access it after xmit_task().
+ *
+ * This should never fail because the failure paths will have
+ * stopped the xmit thread.
+ */
+ if (!iscsi_get_task(task)) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
} else {
/* Already have a ref from when we failed to send it last call */
conn->task = NULL;
@@ -1449,7 +1524,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* case a bad target sends a cmd rsp before we have handled the task.
*/
if (was_requeue)
- __iscsi_put_task(task);
+ iscsi_put_task(task);
/*
* Do this after dropping the extra ref because if this was a requeue
@@ -1461,10 +1536,8 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* task and get woken up again.
*/
conn->task = task;
- spin_unlock_bh(&conn->session->back_lock);
return -ENODATA;
}
- spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
@@ -1472,20 +1545,16 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
if (!rc) {
/* done with this task */
task->last_xfer = jiffies;
- }
- /* regular RX path uses back_lock */
- spin_lock(&conn->session->back_lock);
- if (rc && task->state == ISCSI_TASK_RUNNING) {
+ } else {
/*
* get an extra ref that is released next time we access it
* as conn->task above.
*/
- __iscsi_get_task(task);
+ iscsi_get_task(task);
conn->task = task;
}
- __iscsi_put_task(task);
- spin_unlock(&conn->session->back_lock);
+ iscsi_put_task(task);
return rc;
}
@@ -1513,7 +1582,7 @@ void iscsi_requeue_task(struct iscsi_task *task)
*/
iscsi_put_task(task);
}
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1567,6 +1636,28 @@ check_mgmt:
goto done;
}
+check_requeue:
+ while (!list_empty(&conn->requeue)) {
+ /*
+ * we always do fastlogout - conn stop code will clean up.
+ */
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+ task = list_entry(conn->requeue.next, struct iscsi_task,
+ running);
+
+ if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
+ break;
+
+ list_del_init(&task->running);
+ rc = iscsi_xmit_task(conn, task, true);
+ if (rc)
+ goto done;
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
+
/* process pending command queue */
while (!list_empty(&conn->cmdqueue)) {
task = list_entry(conn->cmdqueue.next, struct iscsi_task,
@@ -1594,28 +1685,10 @@ check_mgmt:
*/
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
+ if (!list_empty(&conn->requeue))
+ goto check_requeue;
}
- while (!list_empty(&conn->requeue)) {
- /*
- * we always do fastlogout - conn stop code will clean up.
- */
- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
- break;
-
- task = list_entry(conn->requeue.next, struct iscsi_task,
- running);
-
- if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
- break;
-
- list_del_init(&task->running);
- rc = iscsi_xmit_task(conn, task, true);
- if (rc)
- goto done;
- if (!list_empty(&conn->mgmtqueue))
- goto check_mgmt;
- }
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1782,7 +1855,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
}
} else {
list_add_tail(&task->running, &conn->cmdqueue);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
session->queued_cmdsn++;
@@ -1843,11 +1916,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
__must_hold(&session->frwd_lock)
{
struct iscsi_session *session = conn->session;
- struct iscsi_task *task;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
- NULL, 0);
- if (!task) {
+ if (__iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0)) {
spin_unlock_bh(&session->frwd_lock);
iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
@@ -1895,6 +1965,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
struct iscsi_task *task;
int i;
+restart_cmd_loop:
spin_lock_bh(&session->back_lock);
for (i = 0; i < session->cmds_max; i++) {
task = session->cmds[i];
@@ -1903,22 +1974,25 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
if (lun != -1 && lun != task->sc->device->lun)
continue;
-
- __iscsi_get_task(task);
- spin_unlock_bh(&session->back_lock);
+ /*
+ * The cmd is completing but if this is called from an eh
+ * callout path then when we return scsi-ml owns the cmd. Wait
+ * for the completion path to finish freeing the cmd.
+ */
+ if (!iscsi_get_task(task)) {
+ spin_unlock_bh(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ spin_lock_bh(&session->frwd_lock);
+ goto restart_cmd_loop;
+ }
ISCSI_DBG_SESSION(session,
"failing sc %p itt 0x%x state %d\n",
task->sc, task->itt, task->state);
- fail_scsi_task(task, error);
-
- spin_unlock_bh(&session->frwd_lock);
- iscsi_put_task(task);
- spin_lock_bh(&session->frwd_lock);
-
- spin_lock_bh(&session->back_lock);
+ __fail_scsi_task(task, error);
+ __iscsi_put_task(task);
}
-
spin_unlock_bh(&session->back_lock);
}
@@ -1943,7 +2017,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
/**
* iscsi_suspend_tx - suspend iscsi_data_xmit
- * @conn: iscsi conn tp stop processing IO on.
+ * @conn: iscsi conn to stop processing IO on.
*
* This function sets the suspend bit to prevent iscsi_data_xmit
* from sending new IO, and if work is queued on the xmit thread
@@ -1956,15 +2030,30 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
- flush_workqueue(ihost->workq);
+ flush_work(&conn->xmitwork);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
+}
+
+/**
+ * iscsi_suspend_rx - Prevent recvwork from running again.
+ * @conn: iscsi conn to stop.
+ */
+void iscsi_suspend_rx(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ if (ihost->workq)
+ flush_work(&conn->recvwork);
}
+EXPORT_SYMBOL_GPL(iscsi_suspend_rx);
/*
* We want to make sure a ping is in flight. It has timed out.
@@ -2008,7 +2097,16 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
spin_unlock(&session->back_lock);
goto done;
}
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ /*
+ * Racing with the completion path right now, so give it more
+ * time so that path can complete it like normal.
+ */
+ rc = BLK_EH_RESET_TIMER;
+ task = NULL;
+ spin_unlock(&session->back_lock);
+ goto done;
+ }
spin_unlock(&session->back_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -2257,6 +2355,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
+completion_check:
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
/*
@@ -2296,13 +2395,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ goto completion_check;
+ }
+
+ ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
conn = session->leadconn;
iscsi_get_conn(conn->cls_conn);
conn->eh_abort_cnt++;
age = session->age;
-
- ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
- __iscsi_get_task(task);
spin_unlock(&session->back_lock);
if (task->state == ISCSI_TASK_PENDING) {
@@ -2828,11 +2934,12 @@ static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
/**
* iscsi_host_remove - remove host and sessions
* @shost: scsi host
+ * @is_shutdown: true if called from a driver shutdown callout
*
* If there are any sessions left, this will initiate the removal and wait
* for the completion.
*/
-void iscsi_host_remove(struct Scsi_Host *shost)
+void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown)
{
struct iscsi_host *ihost = shost_priv(shost);
unsigned long flags;
@@ -2841,7 +2948,11 @@ void iscsi_host_remove(struct Scsi_Host *shost)
ihost->state = ISCSI_HOST_REMOVED;
spin_unlock_irqrestore(&ihost->lock, flags);
- iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ if (!is_shutdown)
+ iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ else
+ iscsi_host_for_each_session(shost, iscsi_force_destroy_session);
+
wait_event_interruptible(ihost->session_removal_wq,
ihost->num_sessions == 0);
if (signal_pending(current))
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 883005757ddb..c182aa83f2c9 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -558,7 +558,11 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
return 0;
}
task->last_xfer = jiffies;
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ /* Let the path that got the early rsp complete it */
+ return 0;
+ }
tcp_conn = conn->dd_data;
rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 260e735d06fa..fa2209080cc2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -175,13 +175,13 @@ static enum sas_device_type to_dev_type(struct discover_resp *dr)
return dr->attached_dev_type;
}
-static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
+ struct smp_disc_resp *disc_resp)
{
enum sas_device_type dev_type;
enum sas_linkrate linkrate;
u8 sas_addr[SAS_ADDR_SIZE];
- struct smp_resp *resp = rsp;
- struct discover_resp *dr = &resp->disc;
+ struct discover_resp *dr = &disc_resp->disc;
struct sas_ha_struct *ha = dev->port->ha;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
@@ -198,7 +198,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
BUG_ON(!phy->phy);
}
- switch (resp->result) {
+ switch (disc_resp->result) {
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
break;
@@ -347,12 +347,13 @@ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
}
#define DISCOVER_REQ_SIZE 16
-#define DISCOVER_RESP_SIZE 56
+#define DISCOVER_RESP_SIZE sizeof(struct smp_disc_resp)
static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
- u8 *disc_resp, int single)
+ struct smp_disc_resp *disc_resp,
+ int single)
{
- struct discover_resp *dr;
+ struct discover_resp *dr = &disc_resp->disc;
int res;
disc_req[9] = single;
@@ -361,7 +362,6 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
return res;
- dr = &((struct smp_resp *)disc_resp)->disc;
if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
pr_notice("Found loopback topology, just ignore it!\n");
return 0;
@@ -375,7 +375,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
struct expander_device *ex = &dev->ex_dev;
int res = 0;
u8 *disc_req;
- u8 *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
if (!disc_req)
@@ -429,27 +429,14 @@ static int sas_expander_discover(struct domain_device *dev)
#define MAX_EXPANDER_PHYS 128
-static void ex_assign_report_general(struct domain_device *dev,
- struct smp_resp *resp)
-{
- struct report_general_resp *rg = &resp->rg;
-
- dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
- dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
- dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
- dev->ex_dev.t2t_supp = rg->t2t_supp;
- dev->ex_dev.conf_route_table = rg->conf_route_table;
- dev->ex_dev.configuring = rg->configuring;
- memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
-}
-
#define RG_REQ_SIZE 8
-#define RG_RESP_SIZE 32
+#define RG_RESP_SIZE sizeof(struct smp_rg_resp)
static int sas_ex_general(struct domain_device *dev)
{
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
+ struct report_general_resp *rg;
int res;
int i;
@@ -480,7 +467,15 @@ static int sas_ex_general(struct domain_device *dev)
goto out;
}
- ex_assign_report_general(dev, rg_resp);
+ rg = &rg_resp->rg;
+ dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
+ dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
+ dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
+ dev->ex_dev.t2t_supp = rg->t2t_supp;
+ dev->ex_dev.conf_route_table = rg->conf_route_table;
+ dev->ex_dev.configuring = rg->configuring;
+ memcpy(dev->ex_dev.enclosure_logical_id,
+ rg->enclosure_logical_id, 8);
if (dev->ex_dev.configuring) {
pr_debug("RG: ex %016llx self-configuring...\n",
@@ -681,10 +676,10 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
#ifdef CONFIG_SCSI_SAS_ATA
#define RPS_REQ_SIZE 16
-#define RPS_RESP_SIZE 60
+#define RPS_RESP_SIZE sizeof(struct smp_rps_resp)
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp)
+ struct smp_rps_resp *rps_resp)
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
@@ -1657,7 +1652,7 @@ out_err:
/* ---------- Domain revalidation ---------- */
static int sas_get_phy_discover(struct domain_device *dev,
- int phy_id, struct smp_resp *disc_resp)
+ int phy_id, struct smp_disc_resp *disc_resp)
{
int res;
u8 *disc_req;
@@ -1673,10 +1668,8 @@ static int sas_get_phy_discover(struct domain_device *dev,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
goto out;
- else if (disc_resp->result != SMP_RESP_FUNC_ACC) {
+ if (disc_resp->result != SMP_RESP_FUNC_ACC)
res = disc_resp->result;
- goto out;
- }
out:
kfree(disc_req);
return res;
@@ -1686,7 +1679,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
int phy_id, int *pcc)
{
int res;
- struct smp_resp *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
@@ -1704,19 +1697,17 @@ static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_device_type *type)
{
int res;
- struct smp_resp *disc_resp;
- struct discover_resp *dr;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
return -ENOMEM;
- dr = &disc_resp->disc;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
if (res == 0) {
memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
SAS_ADDR_SIZE);
- *type = to_dev_type(dr);
+ *type = to_dev_type(&disc_resp->disc);
if (*type == 0)
memset(sas_addr, 0, SAS_ADDR_SIZE);
}
@@ -1760,7 +1751,7 @@ static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
{
int res;
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
rg_req = alloc_smp_req(RG_REQ_SIZE);
if (!rg_req)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index dc35f0f8eae3..e4f77072a58d 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -531,6 +531,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->reset_result = 0;
@@ -544,6 +545,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (rc == 0)
rc = d->reset_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
@@ -558,6 +560,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->enable_result = 0;
@@ -571,6 +574,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (rc == 0)
rc = d->enable_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 13d0ffaada93..8d0ad3abc7b5 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -83,7 +83,7 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
int sas_ex_phy_discover(struct domain_device *dev, int single);
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp);
+ struct smp_rps_resp *rps_resp);
int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index da9070cdad91..e6a083d098a1 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -48,9 +48,6 @@ struct lpfc_sli2_slim;
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
-#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
- cmnd for menlo needs nearly twice as for firmware
- downloads using bsg */
#define LPFC_DEFAULT_XPSGL_SIZE 256
#define LPFC_MAX_SG_TABLESIZE 0xffff
@@ -604,7 +601,6 @@ struct lpfc_vport {
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
-#define FC_RSCN_MEMENTO 0x4000000/* RSCN cmd processed */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -987,7 +983,8 @@ struct lpfc_hba {
u8 last_seq, u8 cr_cx_cmd);
void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq,
u16 ulp_context, u16 iotag,
- u8 ulp_class, u16 cqid, bool ia);
+ u8 ulp_class, u16 cqid, bool ia,
+ bool wqec);
/* expedite pool */
struct lpfc_epd_pool epd_pool;
@@ -1439,8 +1436,6 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- int wait_4_mlo_maint_flg;
- wait_queue_head_t wait_4_mlo_m_q;
/* data structure used for latency data collection */
#define LPFC_NO_BUCKET 0
#define LPFC_LINEAR_BUCKET 1
@@ -1475,8 +1470,6 @@ struct lpfc_hba {
/* RAS Support */
struct lpfc_ras_fwlog ras_fwlog;
- uint8_t menlo_flag; /* menlo generic flags */
-#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3caaa7c4af48..09cf2cd0ae60 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -922,25 +922,6 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
- * @dev: class converted to a Scsi_host structure.
- * @attr: device attribute, not used.
- * @buf: on return contains the Menlo Maintenance sli flag.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- (phba->sli.sli_flag & LPFC_MENLO_MAINT));
-}
-
-/**
* lpfc_vportnum_show - Return the port number in ascii of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
@@ -1109,10 +1090,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
"Unknown\n");
break;
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- len += scnprintf(buf + len, PAGE_SIZE-len,
- " Menlo Maint Mode\n");
- else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
@@ -2827,7 +2805,6 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
-static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static DEVICE_ATTR_RO(lpfc_drvr_version);
static DEVICE_ATTR_RO(lpfc_enable_fip);
@@ -6220,7 +6197,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_option_rom_version.attr,
&dev_attr_link_state.attr,
&dev_attr_num_discovered_ports.attr,
- &dev_attr_menlo_mgmt_mode.attr,
&dev_attr_lpfc_drvr_version.attr,
&dev_attr_lpfc_enable_fip.attr,
&dev_attr_lpfc_temp_sensor.attr,
@@ -7396,7 +7372,6 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
case PCI_DEVICE_ID_LANCER_FCOE:
case PCI_DEVICE_ID_LANCER_FCOE_VF:
case PCI_DEVICE_ID_ZEPHYR_DCSP:
- case PCI_DEVICE_ID_HORNET:
case PCI_DEVICE_ID_TIGERSHARK:
case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 676e7d54b97a..9be3bb01a8ec 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -88,17 +88,9 @@ struct lpfc_bsg_mbox {
uint32_t outExtWLen; /* from app */
};
-#define MENLO_DID 0x0000FC0E
-
-struct lpfc_bsg_menlo {
- struct lpfc_iocbq *cmdiocbq;
- struct lpfc_dmabuf *rmp;
-};
-
#define TYPE_EVT 1
#define TYPE_IOCB 2
#define TYPE_MBOX 3
-#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
struct bsg_job *set_job; /* job waiting for this iocb to finish */
@@ -106,7 +98,6 @@ struct bsg_job_data {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
struct lpfc_bsg_mbox mbox;
- struct lpfc_bsg_menlo menlo;
} context_un;
};
@@ -3502,15 +3493,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
"1226 mbox: set_variable 0x%x, 0x%x\n",
mb->un.varWords[0],
mb->un.varWords[1]);
- if ((mb->un.varWords[0] == SETVAR_MLOMNT)
- && (mb->un.varWords[1] == 1)) {
- phba->wait_4_mlo_maint_flg = 1;
- } else if (mb->un.varWords[0] == SETVAR_MLORST) {
- spin_lock_irq(&phba->hbalock);
- phba->link_flag &= ~LS_LOOPBACK_MODE;
- spin_unlock_irq(&phba->hbalock);
- phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
- }
break;
case MBX_READ_SPARM64:
case MBX_REG_LOGIN:
@@ -4992,283 +4974,6 @@ lpfc_bsg_mbox_cmd(struct bsg_job *job)
return rc;
}
-/**
- * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
- * @phba: Pointer to HBA context object.
- * @cmdiocbq: Pointer to command iocb.
- * @rspiocbq: Pointer to response iocb.
- *
- * This function is the completion handler for iocbs issued using
- * lpfc_menlo_cmd function. This function is called by the
- * ring event handler function without any lock held. This function
- * can be called from both worker thread context and interrupt
- * context. This function also can be called from another thread which
- * cleans up the SLI layer objects.
- * This function copies the contents of the response iocb to the
- * response iocb memory object provided by the caller of
- * lpfc_sli_issue_iocb_wait and then wakes up the thread which
- * sleeps for the iocb completion.
- **/
-static void
-lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdiocbq,
- struct lpfc_iocbq *rspiocbq)
-{
- struct bsg_job_data *dd_data;
- struct bsg_job *job;
- struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
- struct lpfc_dmabuf *bmp, *cmp, *rmp;
- struct lpfc_bsg_menlo *menlo;
- unsigned long flags;
- struct menlo_response *menlo_resp;
- unsigned int rsp_size;
- int rc = 0;
-
- dd_data = cmdiocbq->context_un.dd_data;
- cmp = cmdiocbq->cmd_dmabuf;
- bmp = cmdiocbq->bpl_dmabuf;
- menlo = &dd_data->context_un.menlo;
- rmp = menlo->rmp;
- rsp = &rspiocbq->iocb;
-
- /* Determine if job has been aborted */
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
- job = dd_data->set_job;
- if (job) {
- bsg_reply = job->reply;
- /* Prevent timeout handling from trying to abort job */
- job->dd_data = NULL;
- }
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- /* Copy the job data or set the failing status for the job */
-
- if (job) {
- /* always return the xri, this would be used in the case
- * of a menlo download to allow the data to be sent as a
- * continuation of the exchange.
- */
-
- menlo_resp = (struct menlo_response *)
- bsg_reply->reply_data.vendor_reply.vendor_rsp;
- menlo_resp->xri = rsp->ulpContext;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
- rc = -EACCES;
- break;
- }
- } else {
- rc = -EACCES;
- }
- } else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
- bsg_reply->reply_payload_rcv_len =
- lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
- }
-
- }
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
- lpfc_free_bsg_buffers(phba, cmp);
- lpfc_free_bsg_buffers(phba, rmp);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
- kfree(dd_data);
-
- /* Complete the job if active */
-
- if (job) {
- bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
- }
-
- return;
-}
-
-/**
- * lpfc_menlo_cmd - send an ioctl for menlo hardware
- * @job: fc_bsg_job to handle
- *
- * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
- * all the command completions will return the xri for the command.
- * For menlo data requests a gen request 64 CX is used to continue the exchange
- * supplied in the menlo request header xri field.
- **/
-static int
-lpfc_menlo_cmd(struct bsg_job *job)
-{
- struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct fc_bsg_request *bsg_request = job->request;
- struct fc_bsg_reply *bsg_reply = job->reply;
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *cmdiocbq;
- IOCB_t *cmd;
- int rc = 0;
- struct menlo_command *menlo_cmd;
- struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
- struct bsg_job_data *dd_data;
- struct ulp_bde64 *bpl = NULL;
-
- /* in case no data is returned return just the return code */
- bsg_reply->reply_payload_rcv_len = 0;
-
- if (job->request_len <
- sizeof(struct fc_bsg_request) +
- sizeof(struct menlo_command)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2784 Received MENLO_CMD request below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (job->reply_len < sizeof(*bsg_reply) +
- sizeof(struct menlo_response)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2785 Received MENLO_CMD reply below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2786 Adapter does not support menlo "
- "commands\n");
- rc = -EPERM;
- goto no_dd_data;
- }
-
- menlo_cmd = (struct menlo_command *)
- bsg_request->rqst_data.h_vendor.vendor_cmd;
-
- /* allocate our bsg tracking structure */
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2787 Failed allocation of dd_data\n");
- rc = -ENOMEM;
- goto no_dd_data;
- }
-
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
- rc = -ENOMEM;
- goto free_dd;
- }
-
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
- if (!bmp->virt) {
- rc = -ENOMEM;
- goto free_bmp;
- }
-
- INIT_LIST_HEAD(&bmp->list);
-
- bpl = (struct ulp_bde64 *)bmp->virt;
- request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
- cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
- 1, bpl, &request_nseg);
- if (!cmp) {
- rc = -ENOMEM;
- goto free_bmp;
- }
- lpfc_bsg_copy_data(cmp, &job->request_payload,
- job->request_payload.payload_len, 1);
-
- bpl += request_nseg;
- reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
- rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
- bpl, &reply_nseg);
- if (!rmp) {
- rc = -ENOMEM;
- goto free_cmp;
- }
-
- cmdiocbq = lpfc_sli_get_iocbq(phba);
- if (!cmdiocbq) {
- rc = -ENOMEM;
- goto free_rmp;
- }
-
- cmd = &cmdiocbq->iocb;
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
- cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
- cmd->ulpBdeCount = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpOwner = OWN_CHIP;
- cmd->ulpLe = 1; /* Limited Edition */
- cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->vport = phba->pport;
- /* We want the firmware to timeout before we do */
- cmd->ulpTimeout = MENLO_TIMEOUT - 5;
- cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
- cmdiocbq->context_un.dd_data = dd_data;
- cmdiocbq->cmd_dmabuf = cmp;
- cmdiocbq->bpl_dmabuf = bmp;
- if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->ulpPU = MENLO_PU; /* 3 */
- cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
- cmd->ulpContext = MENLO_CONTEXT; /* 0 */
- } else {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
- cmd->ulpPU = 1;
- cmd->un.ulpWord[4] = 0;
- cmd->ulpContext = menlo_cmd->xri;
- }
-
- dd_data->type = TYPE_MENLO;
- dd_data->set_job = job;
- dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
- dd_data->context_un.menlo.rmp = rmp;
- job->dd_data = dd_data;
-
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
- MENLO_TIMEOUT - 5);
- if (rc == IOCB_SUCCESS)
- return 0; /* done for now */
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
-
-free_rmp:
- lpfc_free_bsg_buffers(phba, rmp);
-free_cmp:
- lpfc_free_bsg_buffers(phba, cmp);
-free_bmp:
- if (bmp->virt)
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
-free_dd:
- kfree(dd_data);
-no_dd_data:
- /* make error code available to userspace */
- bsg_reply->result = rc;
- job->dd_data = NULL;
- return rc;
-}
-
static int
lpfc_forced_link_speed(struct bsg_job *job)
{
@@ -5823,10 +5528,6 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_MBOX:
rc = lpfc_bsg_mbox_cmd(job);
break;
- case LPFC_BSG_VENDOR_MENLO_CMD:
- case LPFC_BSG_VENDOR_MENLO_DATA:
- rc = lpfc_menlo_cmd(job);
- break;
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job);
break;
@@ -5979,31 +5680,6 @@ lpfc_bsg_timeout(struct bsg_job *job)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
- case TYPE_MENLO:
- /* Check to see if IOCB was issued to the port or not. If not,
- * remove it from the txq queue and call cancel iocbs.
- * Otherwise, call abort iotag.
- */
- cmdiocb = dd_data->context_un.menlo.cmdiocbq;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- spin_lock_irqsave(&phba->hbalock, flags);
- list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
- list) {
- if (check_iocb == cmdiocb) {
- list_move_tail(&check_iocb->list, &completions);
- break;
- }
- }
- if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- if (!list_empty(&completions)) {
- lpfc_sli_cancel_iocbs(phba, &completions,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
- }
- break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 749d6c43cfce..3c04ca2d7455 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -33,8 +33,6 @@
#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
@@ -131,16 +129,6 @@ struct dfc_mbox_req {
uint32_t extSeqNum;
};
-/* Used for menlo command or menlo data. The xri is only used for menlo data */
-struct menlo_command {
- uint32_t cmd;
- uint32_t xri;
-};
-
-struct menlo_response {
- uint32_t xri; /* return the xri of the iocb exchange */
-};
-
/*
* macros and data structures for handling sli-config mailbox command
* pass-through support, this header file is shared between user and
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f5d74958b664..bcad91204328 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -370,7 +370,7 @@ void lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba,
u8 cr_cx_cmd);
void lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
- bool ia);
+ bool ia, bool wqec);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 7b24c932e812..5037ea09a810 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2607,8 +2607,8 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_multixri_pool *multixri_pool;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2688,8 +2688,8 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
if (!phba->targetport)
return -ENXIO;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2826,8 +2826,8 @@ lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 63)
- nbytes = 63;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -3060,8 +3060,8 @@ lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
char *pbuf;
int i;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 3fababb7c181..9e69de9eb992 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1790,18 +1790,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Move this back to NPR state */
if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
- /* The new_ndlp is replacing ndlp totally, so we need
- * to put ndlp on UNUSED list and try to free it.
+ /* The ndlp doesn't have a portname yet, but does have an
+ * NPort ID. The new_ndlp portname matches the Rport's
+ * portname. Reinstantiate the new_ndlp and reset the ndlp.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3179 PLOGI confirm NEW: %x %x\n",
new_ndlp->nlp_DID, keepDID);
/* Two ndlps cannot have the same did on the nodelist.
- * Note: for this case, ndlp has a NULL WWPN so setting
- * the nlp_fc4_type isn't required.
+ * The KeepDID and keep_nlp_fc4_type need to be swapped
+ * because ndlp is inflight with no WWPN.
*/
ndlp->nlp_DID = keepDID;
+ ndlp->nlp_fc4_type = keep_nlp_fc4_type;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
@@ -1816,9 +1818,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
lpfc_unreg_rpi(vport, ndlp);
- /* Two ndlps cannot have the same did and the fc4
- * type must be transferred because the ndlp is in
- * flight.
+ /* The ndlp and new_ndlp both have WWPNs but are swapping
+ * NPort Ids and attributes.
*/
ndlp->nlp_DID = keepDID;
ndlp->nlp_fc4_type = keep_nlp_fc4_type;
@@ -1886,7 +1887,6 @@ lpfc_end_rscn(struct lpfc_vport *vport)
else {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_RSCN_MODE;
- vport->fc_flag |= FC_RSCN_MEMENTO;
spin_unlock_irq(shost->host_lock);
}
}
@@ -2434,14 +2434,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
u32 local_nlp_type, elscmd;
/*
- * If discovery was kicked off from RSCN mode,
- * the FC4 types supported from a
+ * If we are in RSCN mode, the FC4 types supported from a
* previous GFT_ID command may not be accurate. So, if we
* are a NVME Initiator, always look for the possibility of
* the remote NPort beng a NVME Target.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport->fc_flag & (FC_RSCN_MODE | FC_RSCN_MEMENTO) &&
+ vport->fc_flag & FC_RSCN_MODE &&
vport->nvmei_support)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
@@ -4571,15 +4570,6 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOSTAT_LOCAL_REJECT:
switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_LOOP_OPEN_FAILURE:
- if (cmd == ELS_CMD_FLOGI) {
- if (PCI_DEVICE_ID_HORNET ==
- phba->pcidev->device) {
- phba->fc_topology = LPFC_TOPOLOGY_LOOP;
- phba->pport->fc_myDID = 0;
- phba->alpa_map[0] = 0;
- phba->alpa_map[1] = 0;
- }
- }
if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
delay = 1000;
retry = 1;
@@ -7915,7 +7905,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE;
- vport->fc_flag &= ~FC_RSCN_MEMENTO;
spin_unlock_irq(shost->host_lock);
if (rscn_cnt) {
cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
@@ -7965,7 +7954,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_MODE;
- vport->fc_flag &= ~FC_RSCN_MEMENTO;
spin_unlock_irq(shost->host_lock);
vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
/* Indicate we are done walking fc_rscn_id_list on this vport */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index fb36f26170e4..2645def612e6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1354,8 +1354,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MEMENTO | FC_RSCN_MODE |
- FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
@@ -3763,18 +3762,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
- if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (bf_get(lpfc_mbx_read_top_mm, la))
- phba->sli.sli_flag |= LPFC_MENLO_MAINT;
- else
- phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
phba->link_events++;
- if ((attn_type == LPFC_ATT_LINK_UP) &&
- !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
+ if (attn_type == LPFC_ATT_LINK_UP) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3788,15 +3777,13 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
- "Data: x%x x%x x%x x%x x%x x%x %d\n",
+ "Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
bf_get(lpfc_mbx_read_top_alpa_granted,
la),
bf_get(lpfc_mbx_read_top_link_spd, la),
phba->alpa_map[0],
- bf_get(lpfc_mbx_read_top_mm, la),
- bf_get(lpfc_mbx_read_top_fa, la),
- phba->wait_4_mlo_maint_flg);
+ bf_get(lpfc_mbx_read_top_fa, la));
}
lpfc_mbx_process_link_up(phba, la);
@@ -3816,58 +3803,25 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1313 Link Down Unexpected FA WWPN Event x%x "
- "received Data: x%x x%x x%x x%x x%x\n",
+ "received Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
- attn_type == LPFC_ATT_LINK_UP) {
- if (phba->link_state != LPFC_LINK_DOWN) {
- phba->fc_stat.LinkDown++;
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1312 Link Down Event x%x received "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- lpfc_mbx_issue_link_down(phba);
- } else
- lpfc_enable_la(phba);
-
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1310 Menlo Maint Mode Link up Event x%x rcvd "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- /*
- * The cmnd that triggered this will be waiting for this
- * signal.
- */
- /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
- if (phba->wait_4_mlo_maint_flg) {
- phba->wait_4_mlo_maint_flg = 0;
- wake_up_interruptible(&phba->wait_4_mlo_m_q);
- }
- }
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- bf_get(lpfc_mbx_read_top_fa, la)) {
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- lpfc_issue_clear_la(phba, vport);
+ bf_get(lpfc_mbx_read_top_fa, la))
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n",
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_cmpl_read_topology_free_mbuf:
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7b8cf678abb5..071983e2cdfe 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1728,7 +1728,6 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
-#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
@@ -1773,7 +1772,6 @@ struct lpfc_fdmi_reg_portattr {
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
-#define HORNET_JDEC_ID 0x2057706D
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@@ -3074,7 +3072,6 @@ struct lpfc_mbx_read_top {
#define lpfc_mbx_read_top_topology_WORD word3
#define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
#define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
-#define LPFC_TOPOLOGY_MM 0x05 /* maint mode zephtr to menlo */
/* store the LILP AL_PA position map into */
struct ulp_bde64 lilpBde64;
#define LPFC_ALPA_MAP_SIZE 128
@@ -4423,11 +4420,4 @@ lpfc_error_lost_link(u32 ulp_status, u32 ulp_word4)
ulp_word4 == IOERR_SLI_DOWN));
}
-#define MENLO_TRANSPORT_TYPE 0xfe
-#define MENLO_CONTEXT 0
-#define MENLO_PU 3
-#define MENLO_TIMEOUT 30
-#define SETVAR_MLOMNT 0x103107
-#define SETVAR_MLORST 0x103007
-
#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f024415731ac..4527fef23ae7 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4736,7 +4736,6 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
-#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index a1b9be245560..0b1616e93cf4 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -60,8 +60,6 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
- PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 750dd1e9f2cc..c69c5a0979ec 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -375,6 +375,9 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4 &&
vport->port_type == LPFC_PHYSICAL_PORT &&
phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
+ if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
+ phba->sli4_hba.fawwpn_flag &=
+ ~LPFC_FAWWPN_FABRIC;
lpfc_printf_log(phba, KERN_INFO,
LOG_SLI | LOG_DISCOVERY | LOG_ELS,
"2701 FA-PWWN change WWPN from %llx to "
@@ -2682,11 +2685,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_SAT_S:
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
break;
- case PCI_DEVICE_ID_HORNET:
- m = (typeof(m)){"LP21000", "PCIe",
- "Obsolete, Unsupported FCoE Adapter"};
- GE = 1;
- break;
case PCI_DEVICE_ID_PROTEUS_VF:
m = (typeof(m)){"LPev12000", "PCIe IOV",
"Obsolete, Unsupported Fibre Channel Adapter"};
@@ -7692,7 +7690,6 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->port_list);
INIT_LIST_HEAD(&phba->work_list);
- init_waitqueue_head(&phba->wait_4_mlo_m_q);
/* Initialize the wait queue head for the kernel thread */
init_waitqueue_head(&phba->work_waitq);
@@ -7776,13 +7773,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
return -ENODEV;
- if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
- phba->menlo_flag |= HBA_MENLO_SUPPORT;
- /* check for menlo minimum sg count */
- if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
- phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
- }
-
if (!phba->sli.sli3_ring)
phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
sizeof(struct lpfc_sli_ring),
@@ -7958,6 +7948,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* The lpfc_wq workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ if (!phba->wq)
+ return -ENOMEM;
/*
* Initialize timers used by driver
@@ -9975,7 +9967,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
"configured on\n");
phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
} else {
- phba->sli4_hba.fawwpn_flag = 0;
+ /* Clear FW configured flag, preserve driver flag */
+ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
}
phba->sli4_hba.conf_trunk =
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index cd10ee6482fc..152245f7cacc 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2824,6 +2824,7 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
wcqep->word0 = 0;
bf_set(lpfc_wcqe_c_status, wcqep, stat);
wcqep->parameter = param;
+ wcqep->total_data_placed = 0;
wcqep->word3 = 0; /* xb is 0 */
/* Call release with XB=1 to queue the IO into the abort list. */
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index c0ee0b39075d..f7cfac0da9b6 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -722,7 +722,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
struct lpfc_async_xchg_ctx *ctxp;
- uint32_t status, result, op, start_clean, logerr;
+ uint32_t status, result, op, logerr;
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int id;
@@ -820,9 +820,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
} else {
ctxp->entry_cnt++;
- start_clean = offsetof(struct lpfc_iocbq, cmd_flag);
- memset(((char *)cmdwqe) + start_clean, 0,
- (sizeof(struct lpfc_iocbq) - start_clean));
+ memset_startat(cmdwqe, 0, cmd_flag);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
@@ -3337,46 +3335,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
return 1;
}
-/**
- * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
- * @pwqeq: Pointer to command iocb.
- * @xritag: Tag that uniqely identifies the local exchange resource.
- * @opt: Option bits -
- * bit 0 = inhibit sending abts on the link
- *
- * This function is called with hbalock held.
- **/
-static void
-lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
-{
- union lpfc_wqe128 *wqe = &pwqeq->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(wqe, 0, sizeof(*wqe));
-
- if (opt & INHIBIT_ABORT)
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- /* Abort specified xri tag, with the mask deliberately zeroed */
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* Abort the I/O associated with this outstanding exchange ID. */
- wqe->abort_cmd.wqe_com.abort_tag = xritag;
-
- /* iotag for the wqe completion. */
- bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
-
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-}
-
static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_async_xchg_ctx *ctxp,
@@ -3386,7 +3344,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_iocbq *abts_wqeq;
struct lpfc_nodelist *ndlp;
unsigned long flags;
- u8 opt;
+ bool ia;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3426,7 +3384,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
abts_wqeq = ctxp->abort_wqeq;
ctxp->state = LPFC_NVME_STE_ABORT;
- opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
+ ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3472,7 +3430,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
- lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
+ lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
+ abts_wqeq->iotag, CLASS3,
+ LPFC_WQE_CQ_ID_DEFAULT, ia, true);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ba5e4016262e..084c0f9fdc3a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5456,7 +5456,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
cur_iocbq->cmd_flag |= LPFC_IO_VMID;
}
}
- atomic_inc(&ndlp->cmd_pending);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 80ac3a051c19..608016725db9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2003,10 +2003,12 @@ initpath:
sync_buf->cmd_flag |= LPFC_IO_CMF;
ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
- if (ret_val)
+ if (ret_val) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6214 Cannot issue CMF_SYNC_WQE: x%x\n",
ret_val);
+ __lpfc_sli_release_iocbq(phba, sync_buf);
+ }
out_unlock:
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret_val;
@@ -5263,7 +5265,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
phba->pport->stopped = 0;
phba->link_state = LPFC_INIT_START;
phba->hba_flag = 0;
- phba->sli4_hba.fawwpn_flag = 0;
+ /* Preserve FA-PWWN expectation */
+ phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -6052,6 +6055,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
/* obtain link type and link number via READ_CONFIG */
phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
lpfc_sli4_read_config(phba);
+
+ if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+
if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
goto retrieve_ppname;
@@ -10216,16 +10223,6 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
- case CMD_GEN_REQUEST64_CR:
- case CMD_GEN_REQUEST64_CX:
- if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
- FC_RCTL_DD_UNSOL_CMD) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Type !=
- MENLO_TRANSPORT_TYPE))
-
- goto iocb_busy;
- break;
case CMD_QUE_RING_BUF_CN:
case CMD_QUE_RING_BUF64_CN:
/*
@@ -10549,6 +10546,7 @@ __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ cmd->ulpPU = PARM_NPIV_DID;
}
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
@@ -10855,7 +10853,8 @@ lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
static void
__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
- u16 iotag, u8 ulp_class, u16 cqid, bool ia)
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
{
IOCB_t *icmd = NULL;
@@ -10884,7 +10883,8 @@ __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
static void
__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
- u16 iotag, u8 ulp_class, u16 cqid, bool ia)
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
{
union lpfc_wqe128 *wqe;
@@ -10911,6 +10911,8 @@ __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
/* Word 11 */
+ if (wqec)
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
}
@@ -10918,10 +10920,10 @@ __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
void
lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
- bool ia)
+ bool ia, bool wqec)
{
phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
- cqid, ia);
+ cqid, ia, wqec);
}
/**
@@ -12199,7 +12201,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
cmdiocb->iocb.ulpClass,
- LPFC_WQE_CQ_ID_DEFAULT, ia);
+ LPFC_WQE_CQ_ID_DEFAULT, ia, false);
abtsiocbp->vport = vport;
@@ -12659,7 +12661,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
iocbq->iocb.ulpClass, cqid,
- ia);
+ ia, false);
abtsiocbq->vport = vport;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 0af6860b8936..cd33dfec758c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -355,7 +355,6 @@ struct lpfc_sli {
#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
-#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 2ab6f7db64d8..63eba9928e4b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.4"
+#define LPFC_DRIVER_VERSION "14.2.0.5"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 2a339d4a7e9d..157c3bdb50be 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -181,7 +181,7 @@ MODULE_PARM_DESC(cmd_per_lun,
* This would result in non-disk devices being skipped during driver load
* time. These can be later added though, using /proc/scsi/scsi
*/
-static unsigned int megaraid_fast_load = 0;
+static unsigned int megaraid_fast_load;
module_param_named(fast_load, megaraid_fast_load, int, 0);
MODULE_PARM_DESC(fast_load,
"Faster loading of the driver, skips physical devices! (default=0)");
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index c95360a3c186..a3e117a4b8e7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3195,6 +3195,9 @@ static int megasas_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
offset += map->nr_queues;
+ /* we never use READ queue, so can't cheat blk-mq */
+ shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0;
+
/* Setup Poll hctx */
map = &shost->tag_set.map[HCTX_TYPE_POLL];
map->nr_queues = instance->iopoll_q_count;
@@ -3947,9 +3950,9 @@ process_fw_state_change_wq(struct work_struct *work)
u32 wait;
unsigned long flags;
- if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
- atomic_read(&instance->adprecovery));
+ atomic_read(&instance->adprecovery));
return ;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 5b5885d9732b..e48d4261d0bc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3199,7 +3199,6 @@ megasas_build_io_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd)
{
int sge_count;
- u8 cmd_type;
u16 pd_index = 0;
u8 drive_type = 0;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
@@ -3225,7 +3224,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
*/
io_request->IoFlags = cpu_to_le16(scp->cmd_len);
- switch (cmd_type = megasas_cmd_type(scp)) {
+ switch (megasas_cmd_type(scp)) {
case READ_WRITE_LDIO:
megasas_build_ldio_fusion(instance, scp, cmd);
break;
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 322d3ad38159..84b541a57b7b 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -38,7 +38,7 @@
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/processor.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/pmac_feature.h>
#include <asm/macio.h>
@@ -1882,11 +1882,6 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
goto out_release;
}
- /* Old junk for root discovery, that will die ultimately */
-#if !defined(MODULE)
- note_scsi_host(mesh, mesh_host);
-#endif
-
mesh_host->base = macio_resource_start(mdev, 0);
mesh_host->irq = macio_irq(mdev, 0);
ms = (struct mesh_state *) mesh_host->hostdata;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0e1cb4aa4ca2..0935b2e80662 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -66,12 +66,14 @@ extern atomic64_t event_counter;
#define MPI3MR_NAME_LENGTH 32
#define IOCNAME "%s: "
+#define MPI3MR_MAX_SECTORS 2048
+
/* Definitions for internal SGL and Chain SGL buffers */
#define MPI3MR_PAGE_SIZE_4K 4096
#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
/* Definitions for MAX values for shost */
-#define MPI3MR_MAX_CMDS_LUN 7
+#define MPI3MR_MAX_CMDS_LUN 128
#define MPI3MR_MAX_CDB_LENGTH 32
/* Admin queue management definitions */
@@ -333,6 +335,12 @@ struct mpi3mr_ioc_facts {
u8 sge_mod_mask;
u8 sge_mod_value;
u8 sge_mod_shift;
+ u8 max_dev_per_tg;
+ u16 max_io_throttle_group;
+ u16 io_throttle_data_length;
+ u16 io_throttle_low;
+ u16 io_throttle_high;
+
};
/**
@@ -425,6 +433,31 @@ struct mpi3mr_intr_info {
};
/**
+ * struct mpi3mr_throttle_group_info - Throttle group info
+ *
+ * @io_divert: Flag indicates io divert is on or off for the TG
+ * @need_qd_reduction: Flag to indicate QD reduction is needed
+ * @qd_reduction: Queue Depth reduction in units of 10%
+ * @fw_qd: QueueDepth value reported by the firmware
+ * @modified_qd: Modified QueueDepth value due to throttling
+ * @id: Throttle Group ID.
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @pend_large_data_sz: Counter to track pending large data
+ */
+struct mpi3mr_throttle_group_info {
+ u8 io_divert;
+ u8 need_qd_reduction;
+ u8 qd_reduction;
+ u16 fw_qd;
+ u16 modified_qd;
+ u16 id;
+ u32 high;
+ u32 low;
+ atomic_t pend_large_data_sz;
+};
+
+/**
* struct tgt_dev_sas_sata - SAS/SATA device specific
* information cached from firmware given data
*
@@ -457,22 +490,33 @@ struct tgt_dev_pcie {
};
/**
- * struct tgt_dev_volume - virtual device specific information
+ * struct tgt_dev_vd - virtual device specific information
* cached from firmware given data
*
* @state: State of the VD
+ * @tg_qd_reduction: Queue Depth reduction in units of 10%
+ * @tg_id: VDs throttle group ID
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @tg: Pointer to throttle group info
*/
-struct tgt_dev_volume {
+struct tgt_dev_vd {
u8 state;
+ u8 tg_qd_reduction;
+ u16 tg_id;
+ u32 tg_high;
+ u32 tg_low;
+ struct mpi3mr_throttle_group_info *tg;
};
+
/**
* union _form_spec_inf - union of device specific information
*/
union _form_spec_inf {
struct tgt_dev_sas_sata sas_sata_inf;
struct tgt_dev_pcie pcie_inf;
- struct tgt_dev_volume vol_inf;
+ struct tgt_dev_vd vd_inf;
};
@@ -490,6 +534,7 @@ union _form_spec_inf {
* @dev_type: SAS/SATA/PCIE device type
* @is_hidden: Should be exposed to upper layers or not
* @host_exposed: Already exposed to host or not
+ * @io_throttle_enabled: I/O throttling needed or not
* @q_depth: Device specific Queue Depth
* @wwid: World wide ID
* @dev_spec: Device type specific information
@@ -506,6 +551,7 @@ struct mpi3mr_tgt_dev {
u8 dev_type;
u8 is_hidden;
u8 host_exposed;
+ u8 io_throttle_enabled;
u16 q_depth;
u64 wwid;
union _form_spec_inf dev_spec;
@@ -557,6 +603,9 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removed: Device removed in the Firmware
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
+ * @io_throttle_enabled: I/O throttling needed or not
+ * @io_divert: Flag indicates io divert is on or off for the dev
+ * @throttle_group: Pointer to throttle group info
* @tgt_dev: Internal target device pointer
* @pend_count: Counter to track pending I/Os during error
* handling
@@ -570,6 +619,9 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removed;
u8 dev_removedelay;
u8 dev_type;
+ u8 io_throttle_enabled;
+ u8 io_divert;
+ struct mpi3mr_throttle_group_info *throttle_group;
struct mpi3mr_tgt_dev *tgt_dev;
u32 pend_count;
};
@@ -796,6 +848,12 @@ struct scmd_priv {
* @logdata_buf: Circular buffer to store log data entries
* @logdata_buf_idx: Index of entry in buffer to store
* @logdata_entry_sz: log data entry size
+ * @pend_large_data_sz: Counter to track pending large data
+ * @io_throttle_data_length: I/O size to track in 512b blocks
+ * @io_throttle_high: I/O size to start throttle in 512b blocks
+ * @io_throttle_low: I/O size to stop throttle in 512b blocks
+ * @num_io_throttle_group: Maximum number of throttle groups
+ * @throttle_groups: Pointer to throttle group info structures
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -960,6 +1018,13 @@ struct mpi3mr_ioc {
u8 *logdata_buf;
u16 logdata_buf_idx;
u16 logdata_entry_sz;
+
+ atomic_t pend_large_data_sz;
+ u32 io_throttle_data_length;
+ u32 io_throttle_high;
+ u32 io_throttle_low;
+ u16 num_io_throttle_group;
+ struct mpi3mr_throttle_group_info *throttle_groups;
};
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index f1d4ea8ba989..0866dfd43318 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2785,6 +2785,27 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.shutdown_timeout =
le16_to_cpu(facts_data->shutdown_timeout);
+ mrioc->facts.max_dev_per_tg =
+ facts_data->max_devices_per_throttle_group;
+ mrioc->facts.io_throttle_data_length =
+ le16_to_cpu(facts_data->io_throttle_data_length);
+ mrioc->facts.max_io_throttle_group =
+ le16_to_cpu(facts_data->max_io_throttle_group);
+ mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
+ mrioc->facts.io_throttle_high =
+ le16_to_cpu(facts_data->io_throttle_high);
+
+ /* Store in 512b block count */
+ if (mrioc->facts.io_throttle_data_length)
+ mrioc->io_throttle_data_length =
+ (mrioc->facts.io_throttle_data_length * 2 * 4);
+ else
+ /* set the length to 1MB + 1K to disable throttle */
+ mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
+
+ mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
+ mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
+
ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
@@ -2798,6 +2819,13 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
mrioc->facts.dma_mask, (facts_flags &
MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+ ioc_info(mrioc,
+ "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
+ mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
+ ioc_info(mrioc,
+ "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
+ mrioc->facts.io_throttle_data_length * 4,
+ mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
}
/**
@@ -3666,6 +3694,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
int retval = 0;
u8 retry = 0;
struct mpi3_ioc_facts_data facts_data;
+ u32 sz;
retry_init:
retval = mpi3mr_bring_ioc_ready(mrioc);
@@ -3691,6 +3720,9 @@ retry_init:
mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
+ mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+
if (reset_devices)
mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
MPI3MR_HOST_IOS_KDUMP);
@@ -3760,6 +3792,15 @@ retry_init:
}
}
+ if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
+ dprint_init(mrioc, "allocating memory for throttle groups\n");
+ sz = sizeof(struct mpi3mr_throttle_group_info);
+ mrioc->throttle_groups = (struct mpi3mr_throttle_group_info *)
+ kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+ if (!mrioc->throttle_groups)
+ goto out_failed_noretry;
+ }
+
retval = mpi3mr_enable_events(mrioc);
if (retval) {
ioc_err(mrioc, "failed to enable events %d\n",
@@ -3981,6 +4022,7 @@ static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
{
u16 i;
+ struct mpi3mr_throttle_group_info *tg;
mrioc->change_count = 0;
mrioc->active_poll_qcount = 0;
@@ -4029,6 +4071,22 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
spin_lock_init(&mrioc->req_qinfo[i].q_lock);
mpi3mr_memset_op_req_q_buffers(mrioc, i);
}
+
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+ if (mrioc->throttle_groups) {
+ tg = mrioc->throttle_groups;
+ for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
+ tg->id = 0;
+ tg->fw_qd = 0;
+ tg->modified_qd = 0;
+ tg->io_divert = 0;
+ tg->need_qd_reduction = 0;
+ tg->high = 0;
+ tg->low = 0;
+ tg->qd_reduction = 0;
+ atomic_set(&tg->pend_large_data_sz, 0);
+ }
+ }
}
/**
@@ -4663,6 +4721,15 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
+ if (mrioc->num_io_throttle_group !=
+ mrioc->facts.max_io_throttle_group) {
+ ioc_err(mrioc,
+ "max io throttle group doesn't match old(%d), new(%d)\n",
+ mrioc->num_io_throttle_group,
+ mrioc->facts.max_io_throttle_group);
+ retval = -EPERM;
+ goto out;
+ }
mpi3mr_flush_delayed_cmd_lists(mrioc);
mpi3mr_flush_drv_cmds(mrioc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index d8c195b7ca57..bfa1165e23b6 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -38,6 +38,8 @@ MODULE_PARM_DESC(logging_level,
static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
+#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
+
/**
* mpi3mr_host_tag_for_scmd - Get host tag for a scmd
* @mrioc: Adapter instance reference
@@ -355,6 +357,50 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
}
/**
+ * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to queue on synthetically generated driver event to
+ * the event worker thread, the driver event will be used to
+ * reduce the QD of all VDs in the TG from the worker thread.
+ *
+ * Return: None.
+ */
+static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ struct mpi3mr_fwevt *fwevt;
+ u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
+
+ /*
+ * If the QD reduction event is already queued due to throttle and if
+ * the QD is not restored through device info change event
+ * then dont queue further reduction events
+ */
+ if (tg->fw_qd != tg->modified_qd)
+ return;
+
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
+ return;
+ }
+ *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = sz;
+ tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
+
+ dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
+ tg->id);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
* mpi3mr_invalidate_devhandles -Invalidate device handles
* @mrioc: Adapter instance reference
*
@@ -373,6 +419,9 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
if (tgtdev->starget && tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ tgt_priv->io_throttle_enabled = 0;
+ tgt_priv->io_divert = 0;
+ tgt_priv->throttle_group = NULL;
}
}
}
@@ -381,14 +430,12 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
* mpi3mr_print_scmd - print individual SCSI command
* @rq: Block request
* @data: Adapter instance reference
- * @reserved: N/A. Currently not used
*
* Print the SCSI command details if it is in LLD scope.
*
* Return: true always.
*/
-static bool mpi3mr_print_scmd(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_print_scmd(struct request *rq, void *data)
{
struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -412,7 +459,6 @@ out:
* mpi3mr_flush_scmd - Flush individual SCSI command
* @rq: Block request
* @data: Adapter instance reference
- * @reserved: N/A. Currently not used
*
* Return the SCSI command to the upper layers if it is in LLD
* scope.
@@ -420,8 +466,7 @@ out:
* Return: true always.
*/
-static bool mpi3mr_flush_scmd(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_flush_scmd(struct request *rq, void *data)
{
struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -451,7 +496,6 @@ out:
* mpi3mr_count_dev_pending - Count commands pending for a lun
* @rq: Block request
* @data: SCSI device reference
- * @reserved: Unused
*
* This is an iterator function called for each SCSI command in
* a host and if the command is pending in the LLD for the
@@ -461,8 +505,7 @@ out:
* Return: true always.
*/
-static bool mpi3mr_count_dev_pending(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
{
struct scsi_device *sdev = (struct scsi_device *)data;
struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
@@ -485,7 +528,6 @@ out:
* mpi3mr_count_tgt_pending - Count commands pending for target
* @rq: Block request
* @data: SCSI target reference
- * @reserved: Unused
*
* This is an iterator function called for each SCSI command in
* a host and if the command is pending in the LLD for the
@@ -495,8 +537,7 @@ out:
* Return: true always.
*/
-static bool mpi3mr_count_tgt_pending(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
{
struct scsi_target *starget = (struct scsi_target *)data;
struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
@@ -718,6 +759,35 @@ static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
}
/**
+ * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ * @divert_value: 1 or 0
+ *
+ * Accessor to set io_divert flag for each device associated
+ * with the given throttle group with the given value.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg, u8 divert_value)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg)
+ tgt_priv->io_divert = divert_value;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
* mpi3mr_print_device_event_notice - print notice related to post processing of
* device event after controller reset.
*
@@ -848,6 +918,7 @@ static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
else if (!q_depth)
q_depth = MPI3MR_DEFAULT_SDEV_QD;
retval = scsi_change_queue_depth(sdev, q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
return retval;
}
@@ -934,6 +1005,7 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
* @mrioc: Adapter instance reference
* @tgtdev: Target device internal structure
* @dev_pg0: New device page0
+ * @is_added: Flag to indicate the device is just added
*
* Update the information from the device page0 into the driver
* cached target device structure.
@@ -941,10 +1013,11 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
* Return: Nothing.
*/
static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
+ struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
+ bool is_added)
{
u16 flags = 0;
- struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
u8 prot_mask = 0;
tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
@@ -959,12 +1032,19 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
flags = le16_to_cpu(dev_pg0->flags);
tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
+ if (is_added == true)
+ tgtdev->io_throttle_enabled =
+ (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+
+
if (tgtdev->starget && tgtdev->starget->hostdata) {
scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
tgtdev->starget->hostdata;
scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgtdev->io_throttle_enabled;
}
switch (dev_pg0->access_status) {
@@ -1042,10 +1122,32 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
{
struct mpi3_device0_vd_format *vdinf =
&dev_pg0->device_specific.vd_format;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u16 vdinf_io_throttle_group =
+ le16_to_cpu(vdinf->io_throttle_group);
- tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
+ tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
tgtdev->is_hidden = 1;
+ tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
+ tgtdev->dev_spec.vd_inf.tg_high =
+ le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
+ tgtdev->dev_spec.vd_inf.tg_low =
+ le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
+ if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
+ tg = mrioc->throttle_groups + vdinf_io_throttle_group;
+ tg->id = vdinf_io_throttle_group;
+ tg->high = tgtdev->dev_spec.vd_inf.tg_high;
+ tg->low = tgtdev->dev_spec.vd_inf.tg_low;
+ tg->qd_reduction =
+ tgtdev->dev_spec.vd_inf.tg_qd_reduction;
+ if (is_added == true)
+ tg->fw_qd = tgtdev->q_depth;
+ tg->modified_qd = tgtdev->q_depth;
+ }
+ tgtdev->dev_spec.vd_inf.tg = tg;
+ if (scsi_tgt_priv_data)
+ scsi_tgt_priv_data->throttle_group = tg;
break;
}
default:
@@ -1142,7 +1244,7 @@ static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
if (!tgtdev)
goto out;
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
if (!tgtdev->is_hidden && !tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
if (tgtdev->is_hidden && tgtdev->host_exposed)
@@ -1436,6 +1538,60 @@ static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_update_sdev_qd - Update SCSI device queue depath
+ * @sdev: SCSI device reference
+ * @data: Queue depth reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the QD of each SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
+{
+ u16 *q_depth = (u16 *)data;
+
+ scsi_change_queue_depth(sdev, (int)*q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
+}
+
+/**
+ * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to reduce QD for each device associated with the
+ * given throttle group.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg) {
+ dprint_event_bh(mrioc,
+ "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
+ tgt_priv->perst_id, tgtdev->q_depth,
+ tg->modified_qd);
+ starget_for_each_device(tgtdev->starget,
+ (void *)&tg->modified_qd,
+ mpi3mr_update_sdev_qd);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
* mpi3mr_fwevt_bh - Firmware event bottomhalf handler
* @mrioc: Adapter instance reference
* @fwevt: Firmware event reference
@@ -1492,6 +1648,20 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_logdata_evt_bh(mrioc, fwevt);
break;
}
+ case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
+ {
+ struct mpi3mr_throttle_group_info *tg;
+
+ tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
+ dprint_event_bh(mrioc,
+ "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
+ tg->id, tg->need_qd_reduction);
+ if (tg->need_qd_reduction) {
+ mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
+ tg->need_qd_reduction = 0;
+ }
+ break;
+ }
default:
break;
}
@@ -1548,13 +1718,13 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
perst_id = le16_to_cpu(dev_pg0->persistent_id);
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
if (tgtdev) {
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
mpi3mr_tgtdev_put(tgtdev);
} else {
tgtdev = mpi3mr_alloc_tgtdev();
if (!tgtdev)
return -ENOMEM;
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
}
@@ -2566,6 +2736,11 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
u32 xfer_count = 0, sense_count = 0, resp_data = 0;
u16 dev_handle = 0xFFFF;
struct scsi_sense_hdr sshdr;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u8 throttle_enabled_dev = 0;
*reply_dma = 0;
reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
@@ -2622,6 +2797,51 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
goto out;
}
priv = scsi_cmd_priv(scmd);
+
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (stgt_priv_data) {
+ tg = stgt_priv_data->throttle_group;
+ throttle_enabled_dev =
+ stgt_priv_data->io_throttle_enabled;
+ }
+ }
+ if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
+ throttle_enabled_dev)) {
+ ioc_pend_data_len = atomic_sub_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (tg) {
+ tg_pend_data_len = atomic_sub_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (tg->io_divert && ((ioc_pend_data_len <=
+ mrioc->io_throttle_low) &&
+ (tg_pend_data_len <= tg->low))) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ } else {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+ }
+ } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
+ ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
+ if (!tg) {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+
+ } else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
+ tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
+ if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ }
+ }
+
if (success_desc) {
scmd->result = DID_OK << 16;
goto out_success;
@@ -3842,6 +4062,11 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
tgt_dev->starget = starget;
atomic_set(&scsi_tgt_priv_data->block_io, 0);
retval = 0;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgt_dev->io_throttle_enabled;
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ scsi_tgt_priv_data->throttle_group =
+ tgt_dev->dev_spec.vd_inf.tg;
} else
retval = -ENXIO;
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
@@ -3997,10 +4222,13 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
int retval = 0;
u16 dev_handle;
u16 host_tag;
- u32 scsiio_flags = 0;
+ u32 scsiio_flags = 0, data_len_blks = 0;
struct request *rq = scsi_cmd_to_rq(scmd);
int iprio_class;
u8 is_pcie_dev = 0;
+ u32 tracked_io_sz = 0;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
if (mrioc->unrecoverable) {
scmd->result = DID_ERROR << 16;
@@ -4104,11 +4332,50 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ if ((data_len_blks >= mrioc->io_throttle_data_length) &&
+ stgt_priv_data->io_throttle_enabled) {
+ tracked_io_sz = data_len_blks;
+ tg = stgt_priv_data->throttle_group;
+ if (tg) {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ tg_pend_data_len = atomic_add_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (!tg->io_divert && ((ioc_pend_data_len >=
+ mrioc->io_throttle_high) ||
+ (tg_pend_data_len >= tg->high))) {
+ tg->io_divert = 1;
+ tg->need_qd_reduction = 1;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
+ tg, 1);
+ mpi3mr_queue_qd_reduction_event(mrioc, tg);
+ }
+ } else {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (ioc_pend_data_len >= mrioc->io_throttle_high)
+ stgt_priv_data->io_divert = 1;
+ }
+ }
+
+ if (stgt_priv_data->io_divert) {
+ scsiio_req->msg_flags |=
+ MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
+ }
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
if (mpi3mr_op_request_post(mrioc, op_req_q,
scmd_priv_data->mpi3mr_scsiio_req)) {
mpi3mr_clear_scmd_priv(mrioc, scmd);
retval = SCSI_MLQUEUE_HOST_BUSY;
+ if (tracked_io_sz) {
+ atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
+ if (tg)
+ atomic_sub(tracked_io_sz,
+ &tg->pend_large_data_sz);
+ }
goto out;
}
@@ -4321,6 +4588,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_channel = 0;
shost->max_id = 0xFFFFFFFF;
+ shost->host_tagset = 1;
+
if (prot_mask >= 0)
scsi_host_set_prot(shost, prot_mask);
else {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 9a1ae52bb621..565339a0811d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -873,7 +873,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
* @fault_code: fault code
*/
void
-mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
+mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
{
ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
}
@@ -1057,7 +1057,7 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
desc = "config no defaults";
break;
case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
- desc = "config cant commit";
+ desc = "config can't commit";
break;
/****************************************************************************
@@ -1321,7 +1321,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
* @log_info: log info
*/
static void
-_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
+_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info)
{
union loginfo_type {
u32 loginfo;
@@ -1393,7 +1393,7 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
(ioc->logging_level & MPT_DEBUG_REPLY)) {
- _base_sas_ioc_info(ioc , mpi_reply,
+ _base_sas_ioc_info(ioc, mpi_reply,
mpt3sas_base_get_msg_frame(ioc, smid));
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b519f4b59d30..def37a7e5980 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5294,7 +5294,7 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
}
/**
- * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
* @ioc: per adapter object
* @scmd: pointer to scsi command object
* @mpi_reply: reply mf payload returned from firmware
@@ -11386,6 +11386,7 @@ scsih_shutdown(struct pci_dev *pdev)
_scsih_ir_shutdown(ioc);
_scsih_nvme_shutdown(ioc);
mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_stop_watchdog(ioc);
ioc->shost_recovery = 1;
mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
ioc->shost_recovery = 0;
@@ -12409,7 +12410,6 @@ scsih_suspend(struct device *dev)
return rc;
mpt3sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
_scsih_nvme_shutdown(ioc);
ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index f7466a895d3b..91d78d0a38fe 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -699,6 +699,10 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
return 0;
}
+static void pm8001_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
+}
+
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
{
u32 max_wait_count;
@@ -3134,7 +3138,7 @@ int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
*
* when HBA driver received the identify done event or initiate FIS received
* event(for SATA), it will invoke this function to notify the sas layer that
- * the sas toplogy has formed, please discover the the whole sas domain,
+ * the sas toplogy has formed, please discover the whole sas domain,
* while receive a broadcast(change) primitive just tell the sas
* layer to discover the changed domain rather than the whole domain.
*/
@@ -3145,15 +3149,6 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
if (!phy->phy_attached)
return;
- if (sas_phy->phy) {
- struct sas_phy *sphy = sas_phy->phy;
- sphy->negotiated_linkrate = sas_phy->linkrate;
- sphy->minimum_linkrate = phy->minimum_linkrate;
- sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sphy->maximum_linkrate = phy->maximum_linkrate;
- sphy->maximum_linkrate_hw = phy->maximum_linkrate;
- }
-
if (phy->phy_type & PORT_TYPE_SAS) {
struct sas_identify_frame *id;
id = (struct sas_identify_frame *)phy->frame_rcvd;
@@ -3177,26 +3172,22 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
switch (link_rate) {
case PHY_SPEED_120:
phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
break;
case PHY_SPEED_60:
phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
break;
case PHY_SPEED_30:
phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
case PHY_SPEED_15:
phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
}
sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
- sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
+ sas_phy->maximum_linkrate_hw = phy->maximum_linkrate;
sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
- sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ sas_phy->maximum_linkrate = phy->maximum_linkrate;
+ sas_phy->minimum_linkrate = phy->minimum_linkrate;
}
/**
@@ -4947,6 +4938,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
const struct pm8001_dispatch pm8001_8001_dispatch = {
.name = "pmc8001",
.chip_init = pm8001_chip_init,
+ .chip_post_init = pm8001_chip_post_init,
.chip_soft_rst = pm8001_chip_soft_rst,
.chip_rst = pm8001_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 9b04f1a6a67d..a0028e130a7e 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
" 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
-static int pm8001_init_ccb_tag(struct pm8001_hba_info *, struct Scsi_Host *, struct pci_dev *);
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *);
/*
* chip info structure to identify chip key functionality as
@@ -81,6 +81,18 @@ LIST_HEAD(hba_list);
struct workqueue_struct *pm8001_wq;
+static int pm8001_map_queues(struct Scsi_Host *shost)
+{
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+
+ if (pm8001_ha->number_of_intr > 1)
+ blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+
+ return blk_mq_map_queues(qmap);
+}
+
/*
* The main structure which LLDD must register for scsi core.
*/
@@ -109,6 +121,8 @@ static struct scsi_host_template pm8001_sht = {
#endif
.shost_groups = pm8001_host_groups,
.track_queue_depth = 1,
+ .cmd_per_lun = 32,
+ .map_queues = pm8001_map_queues,
};
/*
@@ -143,6 +157,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->phy_state = PHY_LINK_DISABLE;
phy->pm8001_ha = pm8001_ha;
+ phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
@@ -605,12 +621,8 @@ static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
shost->transportt = pm8001_stt;
shost->max_id = PM8001_MAX_DEVICES;
- shost->max_lun = 8;
- shost->max_channel = 0;
shost->unique_id = pm8001_id;
shost->max_cmd_len = 16;
- shost->can_queue = PM8001_CAN_QUEUE;
- shost->cmd_per_lun = 32;
return 0;
exit_free1:
kfree(arr_port);
@@ -931,31 +943,35 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
*/
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
{
- u32 number_of_intr;
- int rc, cpu_online_count;
unsigned int allocated_irq_vectors;
+ int rc;
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
- number_of_intr = 1;
+ rc = pci_alloc_irq_vectors(pm8001_ha->pdev, 1, 1,
+ PCI_IRQ_MSIX);
} else {
- number_of_intr = PM8001_MAX_MSIX_VEC;
+ /*
+ * Queue index #0 is used always for housekeeping, so don't
+ * include in the affinity spreading.
+ */
+ struct irq_affinity desc = {
+ .pre_vectors = 1,
+ };
+ rc = pci_alloc_irq_vectors_affinity(
+ pm8001_ha->pdev, 2, PM8001_MAX_MSIX_VEC,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
}
- cpu_online_count = num_online_cpus();
- number_of_intr = min_t(int, cpu_online_count, number_of_intr);
- rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
- number_of_intr, PCI_IRQ_MSIX);
allocated_irq_vectors = rc;
if (rc < 0)
return rc;
/* Assigns the number of interrupts */
- number_of_intr = min_t(int, allocated_irq_vectors, number_of_intr);
- pm8001_ha->number_of_intr = number_of_intr;
+ pm8001_ha->number_of_intr = allocated_irq_vectors;
/* Maximum queue number updating in HBA structure */
- pm8001_ha->max_q_num = number_of_intr;
+ pm8001_ha->max_q_num = allocated_irq_vectors;
pm8001_dbg(pm8001_ha, INIT,
"pci_alloc_irq_vectors request ret:%d no of intr %d\n",
@@ -1122,10 +1138,23 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_ha_free;
}
- rc = pm8001_init_ccb_tag(pm8001_ha, shost, pdev);
+ rc = pm8001_init_ccb_tag(pm8001_ha);
if (rc)
goto err_out_enable;
+
+ PM8001_CHIP_DISP->chip_post_init(pm8001_ha);
+
+ if (pm8001_ha->number_of_intr > 1) {
+ shost->nr_hw_queues = pm8001_ha->number_of_intr - 1;
+ /*
+ * For now, ensure we're not sent too many commands by setting
+ * host_tagset. This is also required if we start using request
+ * tag.
+ */
+ shost->host_tagset = 1;
+ }
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
@@ -1175,16 +1204,14 @@ err_out_enable:
/**
* pm8001_init_ccb_tag - allocate memory to CCB and tag.
* @pm8001_ha: our hba card information.
- * @shost: scsi host which has been allocated outside.
- * @pdev: pci device.
*/
-static int
-pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
- struct pci_dev *pdev)
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha)
{
- int i = 0;
+ struct Scsi_Host *shost = pm8001_ha->shost;
+ struct device *dev = pm8001_ha->dev;
u32 max_out_io, ccb_count;
u32 can_queue;
+ int i;
max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io;
ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io);
@@ -1207,7 +1234,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
goto err_out_noccb;
}
for (i = 0; i < ccb_count; i++) {
- pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(&pdev->dev,
+ pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(dev,
sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
&pm8001_ha->ccb_info[i].ccb_dma_handle,
GFP_KERNEL);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 3a863d776724..8e3f2f9ddaac 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -66,7 +66,11 @@ static int pm8001_find_tag(struct sas_task *task, u32 *tag)
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
{
void *bitmap = pm8001_ha->tags;
- clear_bit(tag, bitmap);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
+ __clear_bit(tag, bitmap);
+ spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
}
/**
@@ -76,9 +80,9 @@ void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
*/
int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
{
- unsigned int tag;
void *bitmap = pm8001_ha->tags;
unsigned long flags;
+ unsigned int tag;
spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
@@ -86,7 +90,7 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
return -SAS_QUEUE_FULL;
}
- set_bit(tag, bitmap);
+ __set_bit(tag, bitmap);
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
*tag_out = tag;
return 0;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 060ab680a7ed..c5e3f380a01c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -55,6 +55,8 @@
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
@@ -172,6 +174,7 @@ struct forensic_data {
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
+ void (*chip_post_init)(struct pm8001_hba_info *pm8001_ha);
int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 01c5e8ff4cc5..f8b8624458f7 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -1469,11 +1469,18 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
} else
return -EBUSY;
+ return 0;
+}
+
+static void pm80xx_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
/* send SAS protocol timer configuration page to FW */
- ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+ pm80xx_set_sas_protocol_timer_config(pm8001_ha);
/* Check for encryption */
if (pm8001_ha->chip->encrypt) {
+ int ret;
+
pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n");
ret = pm80xx_get_encrypt_info(pm8001_ha);
if (ret == -1) {
@@ -1485,7 +1492,6 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
}
}
}
- return 0;
}
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
@@ -3723,8 +3729,12 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n",
phyid, status);
if (status == PHY_STOP_SUCCESS ||
- status == PHY_STOP_ERR_DEVICE_ATTACHED)
+ status == PHY_STOP_ERR_DEVICE_ATTACHED) {
phy->phy_state = PHY_LINK_DISABLE;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_PHY_DISABLED;
+ phy->sas_phy.linkrate = SAS_PHY_DISABLED;
+ }
+
return 0;
}
@@ -4345,6 +4355,29 @@ static int check_enc_sat_cmd(struct sas_task *task)
return ret;
}
+static u32 pm80xx_chip_get_q_index(struct sas_task *task)
+{
+ struct scsi_cmnd *scmd = NULL;
+ u32 blk_tag;
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(task->dev)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
+
+ if (!scmd)
+ return 0;
+
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ return blk_mq_unique_tag_to_hwq(blk_tag);
+}
+
/**
* pm80xx_chip_ssp_io_req - send an SSP task to FW
* @pm8001_ha: our hba card information.
@@ -4360,7 +4393,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
u32 tag = ccb->ccb_tag;
u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
- u32 q_index, cpu_id;
+ u32 q_index;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
@@ -4381,8 +4414,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- cpu_id = smp_processor_id();
- q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
+ q_index = pm80xx_chip_get_q_index(task);
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
@@ -4511,8 +4543,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
struct ata_queued_cmd *qc = task->uldd_task;
- u32 tag = ccb->ccb_tag;
- u32 q_index, cpu_id;
+ u32 tag = ccb->ccb_tag, q_index;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr, end_addr;
@@ -4522,8 +4553,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- cpu_id = smp_processor_id();
- q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
+
+ q_index = pm80xx_chip_get_q_index(task);
if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
@@ -5007,6 +5038,7 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
const struct pm8001_dispatch pm8001_80xx_dispatch = {
.name = "pmc80xx",
.chip_init = pm80xx_chip_init,
+ .chip_post_init = pm80xx_chip_post_init,
.chip_soft_rst = pm80xx_chip_soft_rst,
.chip_rst = pm80xx_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 83ffba7f51da..cecfb2cb4c7b 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2414,9 +2414,12 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
int rval;
u16 retry = 10;
- if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
- iscsi_host_remove(qedi->shost);
+ if (mode == QEDI_MODE_NORMAL)
+ iscsi_host_remove(qedi->shost, false);
+ else if (mode == QEDI_MODE_SHUTDOWN)
+ iscsi_host_remove(qedi->shost, true);
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
if (qedi->tmf_thread) {
destroy_workqueue(qedi->tmf_thread);
qedi->tmf_thread = NULL;
@@ -2491,7 +2494,7 @@ static void qedi_board_disable_work(struct work_struct *work)
if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
return;
- __qedi_remove(qedi->pdev, QEDI_MODE_SHUTDOWN);
+ __qedi_remove(qedi->pdev, QEDI_MODE_NORMAL);
}
static void qedi_shutdown(struct pci_dev *pdev)
@@ -2791,7 +2794,7 @@ remove_host:
#ifdef CONFIG_DEBUG_FS
qedi_dbg_host_exit(&qedi->dbg_ctx);
#endif
- iscsi_host_remove(qedi->shost);
+ iscsi_host_remove(qedi->shost, false);
stop_iscsi_func:
qedi_ops->stop(qedi->cdev);
stop_slowpath:
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3b3e4234f37a..fa1fcbfb946f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2476,7 +2476,6 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
qla2x00_port_speed_store);
static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
-static DEVICE_ATTR_RO(edif_doorbell);
static struct attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version.attr,
@@ -2521,7 +2520,6 @@ static struct attribute *qla2x00_host_attrs[] = {
&dev_attr_port_no.attr,
&dev_attr_fw_attr.attr,
&dev_attr_dport_diagnostics.attr,
- &dev_attr_edif_doorbell.attr,
&dev_attr_mpi_pause.attr,
&dev_attr_qlini_mode.attr,
&dev_attr_ql2xiniexchg.attr,
@@ -2716,17 +2714,27 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!fcport)
return;
- /* Now that the rport has been deleted, set the fcport state to
- FCS_DEVICE_DEAD */
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5101,
+ DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d",
+ rport->port_state));
+
+ /*
+ * Now that the rport has been deleted, set the fcport state to
+ * FCS_DEVICE_DEAD, if the fcport is still lost.
+ */
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
/*
* Transport has effectively 'deleted' the rport, clear
* all local references.
*/
spin_lock_irqsave(host->host_lock, flags);
- fcport->rport = fcport->drport = NULL;
- *((fc_port_t **)rport->dd_data) = NULL;
+ /* Confirm port has not reappeared before clearing pointers. */
+ if (rport->port_state != FC_PORTSTATE_ONLINE) {
+ fcport->rport = fcport->drport = NULL;
+ *((fc_port_t **)rport->dd_data) = NULL;
+ }
spin_unlock_irqrestore(host->host_lock, flags);
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
@@ -2759,9 +2767,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
/*
* At this point all fcport's software-states are cleared. Perform any
* final cleanup of firmware resources (PCBs and XCBs).
+ *
+ * Attempt to cleanup only lost devices.
*/
if (fcport->loop_id != FC_NO_LOOP_ID) {
- if (IS_FWI2_CAPABLE(fcport->vha->hw)) {
+ if (IS_FWI2_CAPABLE(fcport->vha->hw) &&
+ fcport->scan_state != QLA_FCPORT_FOUND) {
if (fcport->loop_id != FC_NO_LOOP_ID)
fcport->logout_on_delete = 1;
@@ -2771,7 +2782,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
__LINE__);
qlt_schedule_sess_for_deletion(fcport);
}
- } else {
+ } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) {
qla2x00_port_logout(fcport->vha, fcport);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c2f00f076f79..5db9bf69dcff 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2425,6 +2425,89 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
}
static int
+qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval;
+ struct qla_dport_diag_v2 *dd;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint16_t options;
+
+ if (!IS_DPORT_CAPABLE(vha->hw))
+ return -EPERM;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
+
+ options = dd->options;
+
+ /* Check dport Test in progress */
+ if (options == QLA_GET_DPORT_RESULT_V2 &&
+ vha->dport_status & DPORT_DIAG_IN_PROGRESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_IN_PROCESS;
+ goto dportcomplete;
+ }
+
+ /* Check chip reset in progress and start/restart requests arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2)) {
+ vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ }
+
+ /* Check chip reset in progress and get result request arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ options == QLA_GET_DPORT_RESULT_V2) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_NOT_RUNNING;
+ goto dportcomplete;
+ }
+
+ rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_OK;
+ if (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2) {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ vha->dport_status |= DPORT_DIAG_IN_PROGRESS;
+ } else if (options == QLA_GET_DPORT_RESULT_V2) {
+ dd->mbx1 = le16_to_cpu(vha->dport_data[1]);
+ dd->mbx2 = le16_to_cpu(vha->dport_data[2]);
+ }
+ } else {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_ERR;
+ }
+
+dportcomplete:
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
+
+ bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ kfree(dd);
+
+ return 0;
+}
+
+static int
qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
{
scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
@@ -2860,6 +2943,9 @@ qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_j
case QL_VND_DPORT_DIAGNOSTICS:
return qla2x00_do_dport_diagnostics(bsg_job);
+ case QL_VND_DPORT_DIAGNOSTICS_V2:
+ return qla2x00_do_dport_diagnostics_v2(bsg_job);
+
case QL_VND_EDIF_MGMT:
return qla_edif_app_mgmt(bsg_job);
@@ -2975,6 +3061,13 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
__func__, bsg_job);
+
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x9007,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
+
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
@@ -2992,7 +3085,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
sp->u.bsg_job == bsg_job) {
req->outstanding_cmds[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
+
+ if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command failed.\n");
bsg_reply->result = -EIO;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 6d2b0a7436c1..bb64b9c5a74b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -37,6 +37,7 @@
#define QL_VND_GET_TGT_STATS 0x25
#define QL_VND_MANAGE_HOST_PORT 0x26
#define QL_VND_MBX_PASSTHRU 0x2B
+#define QL_VND_DPORT_DIAGNOSTICS_V2 0x2C
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -60,6 +61,9 @@
#define EXT_STATUS_TIMEOUT 30
#define EXT_STATUS_THREAD_FAILED 31
#define EXT_STATUS_DATA_CMP_FAILED 32
+#define EXT_STATUS_DPORT_DIAG_ERR 40
+#define EXT_STATUS_DPORT_DIAG_IN_PROCESS 41
+#define EXT_STATUS_DPORT_DIAG_NOT_RUNNING 42
/* BSG definations for interpreting CommandSent field */
#define INT_DEF_LB_LOOPBACK_CMD 0
@@ -288,6 +292,17 @@ struct qla_dport_diag {
uint8_t unused[62];
} __packed;
+#define QLA_GET_DPORT_RESULT_V2 0 /* Get Result */
+#define QLA_RESTART_DPORT_TEST_V2 1 /* Restart test */
+#define QLA_START_DPORT_TEST_V2 2 /* Start test */
+struct qla_dport_diag_v2 {
+ uint16_t options;
+ uint16_t mbx1;
+ uint16_t mbx2;
+ uint8_t unused[58];
+ uint8_t buf[1024]; /* Test Result */
+} __packed;
+
/* D_Port options */
#define QLA_DPORT_RESULT 0x0
#define QLA_DPORT_START 0x2
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f1f6c740bdcd..feeb1666227f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -383,5 +383,5 @@ ql_mask_match(uint level)
if (ql2xextended_error_logging == 1)
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
- return (level & ql2xextended_error_logging) == level;
+ return level && ((level & ql2xextended_error_logging) == level);
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e8f69c486be1..3ec6a200942e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -78,7 +78,7 @@ typedef union {
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
-#define QLA2XXX_MANUFACTURER "QLogic Corporation"
+#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -1173,6 +1173,12 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
/* ISP mailbox loopback echo diagnostic error code */
#define MBS_LB_RESET 0x17
+
+/* AEN mailbox Port Diagnostics test */
+#define AEN_START_DIAG_TEST 0x0 /* start the diagnostics */
+#define AEN_DONE_DIAG_TEST_WITH_NOERR 0x1 /* Done with no errors */
+#define AEN_DONE_DIAG_TEST_WITH_ERR 0x2 /* Done with error.*/
+
/*
* Firmware options 1, 2, 3.
*/
@@ -2158,6 +2164,11 @@ typedef struct {
#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
failure */
#define CS_REJECT_RECEIVED 0x4E /* Reject received */
+#define CS_EDIF_AUTH_ERROR 0x63 /* decrypt error */
+#define CS_EDIF_PAD_LEN_ERROR 0x65 /* pad > frame size, not 4byte align */
+#define CS_EDIF_INV_REQ 0x66 /* invalid request */
+#define CS_EDIF_SPI_ERROR 0x67 /* rx frame unable to locate sa */
+#define CS_EDIF_HDR_ERROR 0x69 /* data frame != expected len */
#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
#define CS_UNKNOWN 0x81 /* Driver defined */
#define CS_RETRY 0x82 /* Driver defined */
@@ -2626,7 +2637,6 @@ typedef struct fc_port {
struct {
uint32_t enable:1; /* device is edif enabled/req'd */
uint32_t app_stop:2;
- uint32_t app_started:1;
uint32_t aes_gmac:1;
uint32_t app_sess_online:1;
uint32_t tx_sa_set:1;
@@ -2637,6 +2647,7 @@ typedef struct fc_port {
uint32_t rx_rekey_cnt;
uint64_t tx_bytes;
uint64_t rx_bytes;
+ uint8_t sess_down_acked;
uint8_t auth_state;
uint16_t authok:1;
uint16_t rekey_cnt;
@@ -3204,6 +3215,8 @@ struct ct_sns_rsp {
#define GFF_NVME_OFFSET 23 /* type = 28h */
struct {
uint8_t fc4_features[128];
+#define FC4_FF_TARGET BIT_0
+#define FC4_FF_INITIATOR BIT_1
} gff_id;
struct {
uint8_t reserved;
@@ -3975,6 +3988,7 @@ struct qla_hw_data {
/* SRB cache. */
#define SRB_MIN_REQ 128
mempool_t *srb_mempool;
+ u8 port_name[WWN_SIZE];
volatile struct {
uint32_t mbox_int :1;
@@ -4040,6 +4054,9 @@ struct qla_hw_data {
uint32_t n2n_fw_acc_sec:1;
uint32_t plogi_template_valid:1;
uint32_t port_isolated:1;
+ uint32_t eeh_flush:2;
+#define EEH_FLUSH_RDY 1
+#define EEH_FLUSH_DONE 2
} flags;
uint16_t max_exchg;
@@ -4074,6 +4091,7 @@ struct qla_hw_data {
uint32_t rsp_que_len;
uint32_t req_que_off;
uint32_t rsp_que_off;
+ unsigned long eeh_jif;
/* Multi queue data structs */
device_reg_t *mqiobase;
@@ -4256,8 +4274,8 @@ struct qla_hw_data {
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_BIDI_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
@@ -5012,6 +5030,10 @@ typedef struct scsi_qla_host {
u64 short_link_down_cnt;
struct edif_dbell e_dbell;
struct pur_core pur_cinfo;
+
+#define DPORT_DIAG_IN_PROGRESS BIT_0
+#define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1
+ uint16_t dport_status;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -5443,4 +5465,10 @@ struct ql_vnd_tgt_stats_resp {
#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \
_fcport->disc_state == DSC_DELETED)
+#define DBG_FCPORT_PRFMT(_fp, _fmt, _args...) \
+ "%s: %8phC: " _fmt " (state=%d disc_state=%d scan_state=%d loopid=0x%x deleted=%d flags=0x%x)\n", \
+ __func__, _fp->port_name, ##_args, atomic_read(&_fp->state), \
+ _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \
+ _fp->flags
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index cb8145a9ac09..400a8b6f3982 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -52,6 +52,31 @@ const char *sc_to_str(uint16_t cmd)
return "unknown";
}
+static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *edbnode = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ /* db nodes are fifo - no qualifications done */
+ if (!list_empty(&vha->e_dbell.head)) {
+ edbnode = list_first_entry(&vha->e_dbell.head,
+ struct edb_node, list);
+ list_del_init(&edbnode->list);
+ }
+
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ return edbnode;
+}
+
+static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
+{
+ list_del_init(&node->list);
+ kfree(node);
+}
+
static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
uint16_t handle)
{
@@ -257,14 +282,8 @@ qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
f = NULL;
list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
- if ((f->flags & FCF_FCSP_DEVICE)) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
- "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
- f->node_name, f->port_name,
- f->d_id.b24, id->b24);
- if (f->d_id.b24 == id->b24)
- return f;
- }
+ if (f->d_id.b24 == id->b24)
+ return f;
}
return NULL;
}
@@ -280,14 +299,19 @@ qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
{
/* check that the app is allow/known to the driver */
- if (appid.app_vid == EDIF_APP_ID) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
- return true;
+ if (appid.app_vid != EDIF_APP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
+ __func__, appid.app_vid);
+ return false;
+ }
+
+ if (appid.version != EDIF_VERSION1) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)",
+ __func__, appid.version);
+ return false;
}
- ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
- __func__, appid.app_vid);
- return false;
+ return true;
}
static void
@@ -486,16 +510,35 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
/* mark doorbell as active since an app is now present */
vha->e_dbell.db_flags |= EDB_ACTIVE;
} else {
- ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
- __func__);
+ goto out;
}
if (N2N_TOPO(vha->hw)) {
- if (vha->hw->flags.n2n_fw_acc_sec)
- set_bit(N2N_LINK_RESET, &vha->dpc_flags);
- else
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ fcport->n2n_link_reset_cnt = 0;
+
+ if (vha->hw->flags.n2n_fw_acc_sec) {
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ qla_edif_sa_ctl_init(vha, fcport);
+
+ /*
+ * While authentication app was not running, remote device
+ * could still try to login with this local port. Let's
+ * clear the state and try again.
+ */
+ qla2x00_wait_for_sess_deletion(vha);
+
+ /* bounce the link to get the other guy to relogin */
+ if (!vha->hw->flags.n2n_bigger) {
+ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else {
+ qla2x00_wait_for_hba_online(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+ }
} else {
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_edif, vha, 0x2058,
@@ -517,19 +560,31 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_DOWN)
break;
- fcport->edif.app_started = 1;
fcport->login_retry = vha->hw->login_retry_count;
- /* no activity */
fcport->edif.app_stop = 0;
+ fcport->edif.app_sess_online = 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport, true);
+
+ if (!rval && !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET)))
+ continue;
+
+ rval = 0;
ql_dbg(ql_dbg_edif, vha, 0x911e,
"%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
__func__, fcport->port_name);
- fcport->edif.app_sess_online = 0;
qlt_schedule_sess_for_deletion(fcport);
qla_edif_sa_ctl_init(vha, fcport);
}
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
@@ -540,9 +595,11 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
__func__);
}
+out:
appreply.host_support_edif = vha->hw->flags.edif_enabled;
appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
appreply.edif_edb_active = vha->e_dbell.db_flags;
+ appreply.version = EDIF_VERSION1;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
@@ -610,9 +667,6 @@ qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fcport->send_els_logo = 1;
qlt_schedule_sess_for_deletion(fcport);
-
- /* qla_edif_flush_sa_ctl_lists(fcport); */
- fcport->edif.app_started = 0;
}
}
@@ -672,6 +726,7 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
portid.b.area = appplogiok.u.d_id.b.area;
portid.b.al_pa = appplogiok.u.d_id.b.al_pa;
+ appplogireply.version = EDIF_VERSION1;
switch (appplogiok.type) {
case PL_TYPE_WWPN:
fcport = qla2x00_find_fcport_by_wwpn(vha,
@@ -864,6 +919,8 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
} else {
struct fc_port *fcport = NULL, *tf;
+ app_reply->version = EDIF_VERSION1;
+
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (!(fcport->flags & FCF_FCSP_DEVICE))
continue;
@@ -880,9 +937,25 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
continue;
- app_reply->ports[pcnt].rekey_count =
- fcport->edif.rekey_cnt;
+ if (!N2N_TOPO(vha->hw)) {
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport,
+ true);
+ if (!rval &&
+ !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type &
+ (FCT_TARGET | FCT_NVME_TARGET)))
+ continue;
+ }
+
+ rval = 0;
+
+ app_reply->ports[pcnt].version = EDIF_VERSION1;
app_reply->ports[pcnt].remote_type =
VND_CMD_RTYPE_UNKNOWN;
if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
@@ -979,6 +1052,8 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
} else {
struct fc_port *fcport = NULL, *tf;
+ app_reply->version = EDIF_VERSION1;
+
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
if (pcnt > app_req.num_ports)
@@ -1012,6 +1087,164 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
return rval;
}
+static int32_t
+qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_port *fcport;
+ struct aen_complete_cmd ack;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &ack, sizeof(ack));
+
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: %06x event_code %x\n",
+ __func__, ack.port_id.b24, ack.event_code);
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: unable to find fcport %06x \n",
+ __func__, ack.port_id.b24);
+ return 0;
+ }
+
+ switch (ack.event_code) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ fcport->edif.sess_down_acked = 1;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ u32 sg_skip, reply_payload_len;
+ bool keep;
+ struct edb_node *dbnode = NULL;
+ struct edif_app_dbell ap;
+ int dat_size = 0;
+
+ sg_skip = 0;
+ reply_payload_len = bsg_job->reply_payload.payload_len;
+
+ while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) {
+ dbnode = qla_edb_getnext(vha);
+ if (dbnode) {
+ keep = true;
+ dat_size = 0;
+ ap.event_code = dbnode->ntype;
+ switch (dbnode->ntype) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ case VND_CMD_AUTH_STATE_NEEDED:
+ ap.port_id = dbnode->u.plogi_did;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ ap.port_id = dbnode->u.els_sid;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ ap.port_id = dbnode->u.sa_aen.port_id;
+ memcpy(&ap.event_data, &dbnode->u,
+ sizeof(struct edif_sa_update_aen));
+ dat_size += sizeof(struct edif_sa_update_aen);
+ break;
+ default:
+ keep = false;
+ ql_log(ql_log_warn, vha, 0x09102,
+ "%s unknown DB type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+ break;
+ }
+ ap.event_data_size = dat_size;
+ /* 8 = sizeof(ap.event_code + ap.event_data_size) */
+ dat_size += 8;
+ if (keep)
+ sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &ap, dat_size, sg_skip, false);
+
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell consumed : type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+
+ kfree(dbnode);
+ } else {
+ break;
+ }
+ }
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ bsg_reply->reply_payload_rcv_len = sg_skip;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+ return 0;
+}
+
+static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
+ u32 delay)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ /* small sleep for doorbell events to accumulate */
+ if (delay)
+ msleep(delay);
+
+ qla_edif_consume_dbell(vha, bsg_job);
+
+ bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+}
+
+static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct bsg_job *prev_bsg_job = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (vha->e_dbell.dbell_bsg_job) {
+ prev_bsg_job = vha->e_dbell.dbell_bsg_job;
+ vha->e_dbell.dbell_bsg_job = NULL;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (prev_bsg_job)
+ __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0);
+}
+
+static int
+qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ unsigned long flags;
+ bool return_bsg = false;
+
+ /* flush previous dbell bsg */
+ qla_edif_dbell_bsg_done(vha);
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) {
+ /*
+ * when the next db event happens, bsg_job will return.
+ * Otherwise, timer will return it.
+ */
+ vha->e_dbell.dbell_bsg_job = bsg_job;
+ vha->e_dbell.bsg_expire = jiffies + 10 * HZ;
+ } else {
+ return_bsg = true;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (return_bsg)
+ __qla_edif_dbell_bsg_done(vha, bsg_job, 1);
+
+ return 0;
+}
+
int32_t
qla_edif_app_mgmt(struct bsg_job *bsg_job)
{
@@ -1023,8 +1256,13 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
bool done = true;
int32_t rval = 0;
uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+ u32 level = ql_dbg_edif;
+
+ /* doorbell is high traffic */
+ if (vnd_sc == QL_VND_SC_READ_DBELL)
+ level = 0;
- ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
+ ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n",
__func__, vnd_sc);
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
@@ -1033,7 +1271,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
if (!vha->hw->flags.edif_enabled ||
test_bit(VPORT_DELETE, &vha->dpc_flags)) {
- ql_dbg(ql_dbg_edif, vha, 0x911d,
+ ql_dbg(level, vha, 0x911d,
"%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
__func__, bsg_job, vha->dpc_flags);
@@ -1042,7 +1280,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
}
if (!qla_edif_app_check(vha, appcheck)) {
- ql_dbg(ql_dbg_edif, vha, 0x911d,
+ ql_dbg(level, vha, 0x911d,
"%s app checked failed.\n",
__func__);
@@ -1074,6 +1312,13 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
case QL_VND_SC_GET_STATS:
rval = qla_edif_app_getstats(vha, bsg_job);
break;
+ case QL_VND_SC_AEN_COMPLETE:
+ rval = qla_edif_ack(vha, bsg_job);
+ break;
+ case QL_VND_SC_READ_DBELL:
+ rval = qla_edif_dbell_bsg(vha, bsg_job);
+ done = false;
+ break;
default:
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
__func__,
@@ -1085,7 +1330,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
done:
if (done) {
- ql_dbg(ql_dbg_user, vha, 0x7009,
+ ql_dbg(level, vha, 0x7009,
"%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
@@ -1247,6 +1492,8 @@ qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
+#define EDIF_MSLEEP_INTERVAL 100
+#define EDIF_RETRY_COUNT 50
int
qla24xx_sadb_update(struct bsg_job *bsg_job)
@@ -1259,7 +1506,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
struct edif_list_entry *edif_entry = NULL;
int found = 0;
int rval = 0;
- int result = 0;
+ int result = 0, cnt;
struct qla_sa_update_frame sa_frame;
struct srb_iocb *iocb_cmd;
port_id_t portid;
@@ -1500,11 +1747,23 @@ force_rx_delete:
sp->done = qla2x00_bsg_job_done;
iocb_cmd = &sp->u.iocb_cmd;
iocb_cmd->u.sa_update.sa_frame = sa_frame;
-
+ cnt = 0;
+retry:
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
ql_log(ql_dbg_edif, vha, 0x70e3,
- "qla2x00_start_sp failed=%d.\n", rval);
+ "%s qla2x00_start_sp failed=%d.\n",
+ __func__, rval);
qla2x00_rel_sp(sp);
rval = -EIO;
@@ -1797,30 +2056,6 @@ qla_edb_init(scsi_qla_host_t *vha)
/* initialize lock which protects doorbell & init list */
spin_lock_init(&vha->e_dbell.db_lock);
INIT_LIST_HEAD(&vha->e_dbell.head);
-
- /* create and initialize doorbell */
- init_completion(&vha->e_dbell.dbell);
-}
-
-static void
-qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
-{
- /*
- * releases the space held by this edb node entry
- * this function does _not_ free the edb node itself
- * NB: the edb node entry passed should not be on any list
- *
- * currently for doorbell there's no additional cleanup
- * needed, but here as a placeholder for furture use.
- */
-
- if (!node) {
- ql_dbg(ql_dbg_edif, vha, 0x09122,
- "%s error - no valid node passed\n", __func__);
- return;
- }
-
- node->ntype = N_UNDEF;
}
static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
@@ -1867,11 +2102,8 @@ static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
}
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- list_for_each_entry_safe(e, tmp, &edb_list, list) {
+ list_for_each_entry_safe(e, tmp, &edb_list, list)
qla_edb_node_free(vha, e);
- list_del_init(&e->list);
- kfree(e);
- }
}
/* function called when app is stopping */
@@ -1899,14 +2131,10 @@ qla_edb_stop(scsi_qla_host_t *vha)
"%s freeing edb_node type=%x\n",
__func__, node->ntype);
qla_edb_node_free(vha, node);
- list_del(&node->list);
-
- kfree(node);
}
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- /* wake up doorbell waiters - they'll be dismissed with error code */
- complete_all(&vha->e_dbell.dbell);
+ qla_edif_dbell_bsg_done(vha);
}
static struct edb_node *
@@ -1944,9 +2172,6 @@ qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
list_add_tail(&ptr->list, &vha->e_dbell.head);
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- /* ring doorbell for waiters */
- complete(&vha->e_dbell.dbell);
-
return true;
}
@@ -2010,47 +2235,29 @@ qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
edbnode->u.sa_aen.port_id = fcport->d_id;
edbnode->u.sa_aen.status = data;
edbnode->u.sa_aen.key_type = data2;
+ edbnode->u.sa_aen.version = EDIF_VERSION1;
break;
default:
ql_dbg(ql_dbg_edif, vha, 0x09102,
"%s unknown type: %x\n", __func__, dbtype);
- qla_edb_node_free(vha, edbnode);
kfree(edbnode);
edbnode = NULL;
break;
}
- if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
+ if (edbnode) {
+ if (!qla_edb_node_add(vha, edbnode)) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to add dbnode\n", __func__);
+ kfree(edbnode);
+ return;
+ }
ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s unable to add dbnode\n", __func__);
- qla_edb_node_free(vha, edbnode);
- kfree(edbnode);
- return;
- }
- if (edbnode && fcport)
- fcport->edif.auth_state = dbtype;
- ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
-}
-
-static struct edb_node *
-qla_edb_getnext(scsi_qla_host_t *vha)
-{
- unsigned long flags;
- struct edb_node *edbnode = NULL;
-
- spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
-
- /* db nodes are fifo - no qualifications done */
- if (!list_empty(&vha->e_dbell.head)) {
- edbnode = list_first_entry(&vha->e_dbell.head,
- struct edb_node, list);
- list_del(&edbnode->list);
+ "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
+ qla_edif_dbell_bsg_done(vha);
+ if (fcport)
+ fcport->edif.auth_state = dbtype;
}
-
- spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
-
- return edbnode;
}
void
@@ -2078,89 +2285,14 @@ qla_edif_timer(scsi_qla_host_t *vha)
ha->edif_post_stop_cnt_down = 60;
}
}
-}
-/*
- * app uses separate thread to read this. It'll wait until the doorbell
- * is rung by the driver or the max wait time has expired
- */
-ssize_t
-edif_doorbell_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- struct edb_node *dbnode = NULL;
- struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
- uint32_t dat_siz, buf_size, sz;
-
- /* TODO: app currently hardcoded to 256. Will transition to bsg */
- sz = 256;
-
- /* stop new threads from waiting if we're not init'd */
- if (DBELL_INACTIVE(vha)) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
- "%s error - edif db not enabled\n", __func__);
- return 0;
- }
-
- if (!vha->hw->flags.edif_enabled) {
- /* edif not enabled */
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
- "%s error - edif not enabled\n", __func__);
- return -1;
- }
-
- buf_size = 0;
- while ((sz - buf_size) >= sizeof(struct edb_node)) {
- /* remove the next item from the doorbell list */
- dat_siz = 0;
- dbnode = qla_edb_getnext(vha);
- if (dbnode) {
- ap->event_code = dbnode->ntype;
- switch (dbnode->ntype) {
- case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
- case VND_CMD_AUTH_STATE_NEEDED:
- ap->port_id = dbnode->u.plogi_did;
- dat_siz += sizeof(ap->port_id);
- break;
- case VND_CMD_AUTH_STATE_ELS_RCVD:
- ap->port_id = dbnode->u.els_sid;
- dat_siz += sizeof(ap->port_id);
- break;
- case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
- ap->port_id = dbnode->u.sa_aen.port_id;
- memcpy(ap->event_data, &dbnode->u,
- sizeof(struct edif_sa_update_aen));
- dat_siz += sizeof(struct edif_sa_update_aen);
- break;
- default:
- /* unknown node type, rtn unknown ntype */
- ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
- memcpy(ap->event_data, &dbnode->ntype, 4);
- dat_siz += 4;
- break;
- }
-
- ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s Doorbell consumed : type=%d %p\n",
- __func__, dbnode->ntype, dbnode);
- /* we're done with the db node, so free it up */
- qla_edb_node_free(vha, dbnode);
- kfree(dbnode);
- } else {
- break;
- }
-
- ap->event_data_size = dat_siz;
- /* 8bytes = ap->event_code + ap->event_data_size */
- buf_size += dat_siz + 8;
- ap = (struct edif_app_dbell *)(buf + buf_size);
- }
- return buf_size;
+ if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire))
+ qla_edif_dbell_bsg_done(vha);
}
static void qla_noop_sp_done(srb_t *sp, int res)
{
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
@@ -2185,7 +2317,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
if (!sa_ctl) {
ql_dbg(ql_dbg_edif, vha, 0x70e6,
"sa_ctl allocation failed\n");
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto done;
}
fcport = sa_ctl->fcport;
@@ -2195,7 +2328,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
if (!sp) {
ql_dbg(ql_dbg_edif, vha, 0x70e6,
"SRB allocation failed\n");
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto done;
}
fcport->flags |= FCF_ASYNC_SENT;
@@ -2224,10 +2358,17 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- rval = QLA_FUNCTION_FAILED;
+ if (rval != QLA_SUCCESS) {
+ goto done_free_sp;
+ }
return rval;
+done_free_sp:
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
+ return rval;
}
void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
@@ -2446,8 +2587,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
- if (DBELL_INACTIVE(vha) ||
- (fcport && EDIF_SESSION_DOWN(fcport))) {
+ if (DBELL_INACTIVE(vha)) {
ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
__func__, host->e_dbell.db_flags,
fcport ? fcport->d_id.b24 : 0);
@@ -2457,6 +2597,22 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
return;
}
+ if (fcport && EDIF_SESSION_DOWN(fcport)) {
+ ql_dbg(ql_dbg_edif, host, 0x13b6,
+ "%s terminate exchange. Send logo to 0x%x\n",
+ __func__, a.did.b24);
+
+ a.tx_byte_count = a.tx_len = 0;
+ a.tx_addr = 0;
+ a.control_flags = EPD_RX_XCHG; /* EPD_RX_XCHG = terminate cmd */
+ qla_els_reject_iocb(host, (*rsp)->qpair, &a);
+ qla_enode_free(host, ptr);
+ /* send logo to let remote port knows to tear down session */
+ fcport->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+
/* add the local enode to the list */
qla_enode_add(host, ptr);
@@ -2832,6 +2988,12 @@ qla28xx_start_scsi_edif(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword(req->req_q_out);
@@ -3023,6 +3185,7 @@ queuing_error:
mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
sp->u.scmd.ct6_ctx = NULL;
}
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(lock, flags);
return QLA_FUNCTION_FAILED;
@@ -3349,10 +3512,14 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fc_port_t *fcport = NULL;
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- int rval = (DID_ERROR << 16);
+ int rval = (DID_ERROR << 16), cnt;
port_id_t d_id;
struct qla_bsg_auth_els_request *p =
(struct qla_bsg_auth_els_request *)bsg_job->request;
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ rpl->version = EDIF_VERSION1;
d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
@@ -3371,7 +3538,7 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (qla_bsg_check(vha, bsg_job, fcport))
return 0;
- if (fcport->loop_id == FC_NO_LOOP_ID) {
+ if (EDIF_SESS_DELETE(fcport)) {
ql_dbg(ql_dbg_edif, vha, 0x910d,
"%s ELS code %x, no loop id.\n", __func__,
bsg_request->rqst_data.r_els.els_code);
@@ -3440,17 +3607,26 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
+ cnt = 0;
+retry:
rval = qla2x00_start_sp(sp);
-
- ql_dbg(ql_dbg_edif, vha, 0x700a,
- "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
- __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
- p->e.extra_rx_xchg_address, p->e.extra_control_flags,
- sp->handle, sp->remap.req.len, bsg_job);
-
- if (rval != QLA_SUCCESS) {
+ switch (rval) {
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_edif, vha, 0x700a,
+ "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
+ p->e.extra_rx_xchg_address, p->e.extra_control_flags,
+ sp->handle, sp->remap.req.len, bsg_job);
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+ fallthrough;
+ default:
ql_log(ql_log_warn, vha, 0x700e,
- "qla2x00_start_sp failed = %d\n", rval);
+ "%s qla2x00_start_sp failed = %d\n", __func__, rval);
SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
rval = -EIO;
goto done_free_remap_rsp;
@@ -3472,14 +3648,29 @@ done:
void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
{
+ u16 cnt = 0;
+
if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) {
ql_dbg(ql_dbg_disc, vha, 0xf09c,
"%s: sess %8phN send port_offline event\n",
__func__, sess->port_name);
sess->edif.app_sess_online = 0;
+ sess->edif.sess_down_acked = 0;
qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
sess->d_id.b24, 0, sess);
qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
+
+ while (!READ_ONCE(sess->edif.sess_down_acked) &&
+ !test_bit(VPORT_DELETE, &vha->dpc_flags)) {
+ msleep(100);
+ cnt++;
+ if (cnt > 100)
+ break;
+ }
+ sess->edif.sess_down_acked = 0;
+ ql_dbg(ql_dbg_disc, vha, 0xf09c,
+ "%s: sess %8phN port_offline event completed\n",
+ __func__, sess->port_name);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
index a965ca8e47ce..7cdb89ccdc6e 100644
--- a/drivers/scsi/qla2xxx/qla_edif.h
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -51,7 +51,8 @@ struct edif_dbell {
enum db_flags_t db_flags;
spinlock_t db_lock;
struct list_head head;
- struct completion dbell;
+ struct bsg_job *dbell_bsg_job;
+ unsigned long bsg_expire;
};
#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */
@@ -140,4 +141,8 @@ struct enode {
(DBELL_ACTIVE(_fcport->vha) && \
(_fcport->disc_state == DSC_LOGIN_AUTH_PEND))
+#define EDIF_SESS_DELETE(_s) \
+ (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
+ _s->disc_state == DSC_DELETED))
+
#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
index 5a26c77157da..0931f4e4e127 100644
--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -7,13 +7,15 @@
#ifndef __QLA_EDIF_BSG_H
#define __QLA_EDIF_BSG_H
+#define EDIF_VERSION1 1
+
/* BSG Vendor specific commands */
#define ELS_MAX_PAYLOAD 2112
#ifndef WWN_SIZE
#define WWN_SIZE 8
#endif
-#define VND_CMD_APP_RESERVED_SIZE 32
-
+#define VND_CMD_APP_RESERVED_SIZE 28
+#define VND_CMD_PAD_SIZE 3
enum auth_els_sub_cmd {
SEND_ELS = 0,
SEND_ELS_REPLY,
@@ -28,7 +30,9 @@ struct extra_auth_els {
#define BSG_CTL_FLAG_LS_ACC 1
#define BSG_CTL_FLAG_LS_RJT 2
#define BSG_CTL_FLAG_TRM 3
- uint8_t extra_rsvd[3];
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct qla_bsg_auth_els_request {
@@ -39,51 +43,46 @@ struct qla_bsg_auth_els_request {
struct qla_bsg_auth_els_reply {
struct fc_bsg_reply r;
uint32_t rx_xchg_address;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
};
struct app_id {
int app_vid;
- uint8_t app_key[32];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_start_reply {
uint32_t host_support_edif;
uint32_t edif_enode_active;
uint32_t edif_edb_active;
- uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_start {
struct app_id app_info;
- uint32_t prli_to;
- uint32_t key_shred;
uint8_t app_start_flags;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE - 1];
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_stop {
struct app_id app_info;
- char buf[16];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_plogi_reply {
uint32_t prli_status;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
-} __packed;
-
-#define RECFG_TIME 1
-#define RECFG_BYTES 2
-
-struct app_rekey_cfg {
- struct app_id app_info;
- uint8_t rekey_mode;
- port_id_t d_id;
- uint8_t force;
- union {
- int64_t bytes;
- int64_t time;
- } rky_units;
-
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -91,7 +90,9 @@ struct app_pinfo_req {
struct app_id app_info;
uint8_t num_ports;
port_id_t remote_pid;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_pinfo {
@@ -103,11 +104,8 @@ struct app_pinfo {
#define VND_CMD_RTYPE_INITIATOR 2
uint8_t remote_state;
uint8_t auth_state;
- uint8_t rekey_mode;
- int64_t rekey_count;
- int64_t rekey_config_value;
- int64_t rekey_consumed_value;
-
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -120,6 +118,8 @@ struct app_pinfo {
struct app_pinfo_reply {
uint8_t port_count;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
struct app_pinfo ports[];
} __packed;
@@ -127,6 +127,8 @@ struct app_pinfo_reply {
struct app_sinfo_req {
struct app_id app_info;
uint8_t num_ports;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -140,6 +142,9 @@ struct app_sinfo {
struct app_stats_reply {
uint8_t elem_count;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
struct app_sinfo elem[];
} __packed;
@@ -163,9 +168,11 @@ struct qla_sa_update_frame {
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
port_id_t port_id;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved2[VND_CMD_APP_RESERVED_SIZE];
} __packed;
-// used for edif mgmt bsg interface
#define QL_VND_SC_UNDEF 0
#define QL_VND_SC_SA_UPDATE 1
#define QL_VND_SC_APP_START 2
@@ -175,6 +182,22 @@ struct qla_sa_update_frame {
#define QL_VND_SC_REKEY_CONFIG 6
#define QL_VND_SC_GET_FCINFO 7
#define QL_VND_SC_GET_STATS 8
+#define QL_VND_SC_AEN_COMPLETE 9
+#define QL_VND_SC_READ_DBELL 10
+
+/*
+ * bsg caller to provide empty buffer for doorbell events.
+ *
+ * sg_io_v4.din_xferp = empty buffer for door bell events
+ * sg_io_v4.dout_xferp = struct edif_read_dbell *buf
+ */
+struct edif_read_dbell {
+ struct app_id app_info;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+};
+
/* Application interface data structure for rtn data */
#define EXT_DEF_EVENT_DATA_SIZE 64
@@ -191,7 +214,9 @@ struct edif_sa_update_aen {
port_id_t port_id;
uint32_t key_type; /* Tx (1) or RX (2) */
uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */
- uint8_t reserved[16];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
#define QL_VND_SA_STAT_SUCCESS 0
@@ -212,9 +237,22 @@ struct auth_complete_cmd {
uint8_t wwpn[WWN_SIZE];
port_id_t d_id;
} u;
- uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct aen_complete_cmd {
+ struct app_id app_info;
+ port_id_t port_id;
+ uint32_t event_code;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
#define RX_DELAY_DELETE_TIMEOUT 20
+#define FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN 1
+
#endif /* QLA_EDIF_BSG_H */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 0bb1d562f0bf..361015b5763e 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -807,7 +807,7 @@ struct els_entry_24xx {
#define EPD_ELS_COMMAND (0 << 13)
#define EPD_ELS_ACC (1 << 13)
#define EPD_ELS_RJT (2 << 13)
-#define EPD_RX_XCHG (3 << 13)
+#define EPD_RX_XCHG (3 << 13) /* terminate exchange */
#define ECF_CLR_PASSTHRU_PEND BIT_12
#define ECF_INCL_FRAME_HDR BIT_11
#define ECF_SEC_LOGIN BIT_3
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index dac27b5ff0ac..5dd2932382ee 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -193,6 +193,8 @@ extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
extern u32 ql2xnvme_queues;
+extern int ql2xrspq_follow_inptr;
+extern int ql2xrspq_follow_inptr_legacy;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -335,6 +337,7 @@ extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
struct qla_work_evt *e);
void qla2x00_sp_release(struct kref *kref);
+void qla2x00_els_dcmd2_iocb_timeout(void *data);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -433,7 +436,8 @@ extern int
qla2x00_get_resource_cnts(scsi_qla_host_t *);
extern int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
+qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map,
+ u8 *num_entries);
extern int
qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
@@ -554,6 +558,10 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+extern int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *,
+ struct qla_dport_diag_v2 *, mbx_cmd_t *);
+
int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
@@ -727,7 +735,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
@@ -989,7 +997,6 @@ fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id);
void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2,
fc_port_t *fcport);
void qla_edb_stop(scsi_qla_host_t *vha);
-ssize_t edif_doorbell_show(struct device *dev, struct device_attribute *attr, char *buf);
int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job);
void qla_enode_init(scsi_qla_host_t *vha);
void qla_enode_stop(scsi_qla_host_t *vha);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index e811de2f6a25..64ab070b8716 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1596,7 +1596,6 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
struct ct_fdmi_hba_attr *eiter;
uint16_t alen;
@@ -1617,7 +1616,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
alen = scnprintf(
eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
+ "%s", QLA2XXX_MANUFACTURER);
alen += FDMI_ATTR_ALIGNMENT(alen);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -1758,8 +1757,8 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
/* MAX CT Payload Length */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2);
+
alen = sizeof(eiter->a.max_ct_len);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -1851,7 +1850,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
char *hostname = p_sysid ?
p_sysid->nodename : fc_host_system_hostname(vha->host);
@@ -1903,8 +1901,7 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
/* Max frame size. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size);
alen = sizeof(eiter->a.max_frame_size);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -3280,19 +3277,12 @@ done:
return rval;
}
-void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- qla24xx_post_gnl_work(vha, fcport);
-}
void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
{
struct scsi_qla_host *vha = sp->vha;
fc_port_t *fcport = sp->fcport;
struct ct_sns_rsp *ct_rsp;
- struct event_arg ea;
uint8_t fc4_scsi_feat;
uint8_t fc4_nvme_feat;
@@ -3300,10 +3290,10 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
"Async done-%s res %x ID %x. %8phC\n",
sp->name, res, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
- ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp;
fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ sp->rc = res;
/*
* FC-GS-7, 5.2.3.12 FC-4 Features - format
@@ -3324,24 +3314,42 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
}
}
- memset(&ea, 0, sizeof(ea));
- ea.sp = sp;
- ea.fcport = sp->fcport;
- ea.rc = res;
+ if (sp->flags & SRB_WAKEUP_ON_COMP) {
+ complete(sp->comp);
+ } else {
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
- qla24xx_handle_gffid_event(vha, &ea);
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ /* we should not be here */
+ dump_stack();
+ }
}
/* Get FC4 Feature with Nport ID. */
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
srb_t *sp;
+ DECLARE_COMPLETION_ONSTACK(comp);
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ /* this routine does not have handling for no wait */
+ if (!vha->flags.online || !wait)
return rval;
/* ref: INIT */
@@ -3349,43 +3357,86 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!sp)
return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gffid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
qla24xx_async_gffid_sp_done);
+ sp->comp = &comp;
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
+
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
+
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns response.\n",
+ __func__);
+ goto done_free_sp;
+ }
/* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
- GFF_ID_RSP_SIZE);
+ ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE);
ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- ql_dbg(ql_dbg_disc, vha, 0x2132,
- "Async-%s hdl=%x %8phC.\n", sp->name,
- sp->handle, fcport->port_name);
-
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+
+ if (rval != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
goto done_free_sp;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x3074,
+ "Async-%s hdl=%x portid %06x\n",
+ sp->name, sp->handle, fcport->d_id.b24);
+ }
+
+ wait_for_completion(sp->comp);
+ rval = sp->rc;
- return rval;
done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
- fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
@@ -3578,7 +3629,7 @@ login_logout:
do_delete) {
if (fcport->loop_id != FC_NO_LOOP_ID) {
if (fcport->flags & FCF_FCP2_DEVICE)
- fcport->logout_on_delete = 0;
+ continue;
ql_log(ql_log_warn, vha, 0x20f0,
"%s %d %8phC post del sess\n",
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3f3417a3e891..e7fe0e52c11d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -47,6 +47,7 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
+ scsi_qla_host_t *vha = sp->vha;
WARN_ON(irqs_disabled());
iocb = &sp->u.iocb_cmd;
@@ -54,6 +55,12 @@ qla2x00_sp_timeout(struct timer_list *t)
/* ref: TMR */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+
+ if (vha && qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9008,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
}
void qla2x00_sp_free(srb_t *sp)
@@ -161,6 +168,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
struct srb_iocb *abt_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
/* ref: INIT for ABTS command */
sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
@@ -168,6 +176,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
if (!sp)
return QLA_MEMORY_ALLOC_FAILED;
+ QLA_VHA_MARK_BUSY(vha, bail);
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
@@ -1480,7 +1489,6 @@ static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
ql_dbg(ql_dbg_disc, vha, 0x20ef,
"%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
__func__, __LINE__, fcport->port_name);
- fcport->edif.app_started = 1;
fcport->edif.app_sess_online = 1;
qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
@@ -1763,8 +1771,16 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_PEND:
- if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ if (vha->hw->flags.edif_enabled)
+ break;
+
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post %s PRLI\n",
+ __func__, __LINE__, fcport->port_name,
+ NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
qla24xx_post_prli_work(vha, fcport);
+ }
break;
case DSC_UPD_FCPORT:
@@ -1818,7 +1834,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_PORT_ADDR:
fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
if (fcport) {
- if (fcport->flags & FCF_FCP2_DEVICE) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, vha, 0x2115,
"Delaying session delete for FCP2 portid=%06x %8phC ",
fcport->d_id.b24, fcport->port_name);
@@ -1850,7 +1867,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_AREA_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
@@ -1861,7 +1879,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_DOM_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
@@ -1873,7 +1892,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_FAB_ADDR:
default:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
fcport->scan_needed = 1;
@@ -2000,12 +2020,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
/* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ QLA_VHA_MARK_BUSY(vha, bail);
sp->type = SRB_TM_CMD;
sp->name = "tmf";
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
@@ -2124,6 +2146,13 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
}
if (N2N_TOPO(vha->hw)) {
+ if (ea->fcport->n2n_link_reset_cnt ==
+ vha->hw->login_retry_count &&
+ ea->fcport->flags & FCF_FCSP_DEVICE) {
+ /* remote authentication app just started */
+ ea->fcport->n2n_link_reset_cnt = 0;
+ }
+
if (ea->fcport->n2n_link_reset_cnt <
vha->hw->login_retry_count) {
ea->fcport->n2n_link_reset_cnt++;
@@ -4509,6 +4538,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
+ /* Init_cb will be reused for other command(s). Save a backup copy of port_name */
+ memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
}
/* ELS pass through payload is limit by frame size. */
@@ -5273,9 +5304,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
- if (vha->e_dbell.db_flags == EDB_ACTIVE)
- fcport->edif.app_started = 1;
-
spin_lock_init(&fcport->edif.indx_list_lock);
INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
@@ -5488,6 +5516,22 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
+static void
+qla_reinitialize_link(scsi_qla_host_t *vha)
+{
+ int rval;
+
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ rval = qla2x00_full_login_lip(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xd051,
+ "Link reinitialization failed (%d)\n", rval);
+ }
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5539,6 +5583,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&vha->work_lock, flags);
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ u8 loop_map_entries = 0;
+ int rc;
+
+ rc = qla2x00_get_fcal_position_map(vha, NULL,
+ &loop_map_entries);
+ if (rc == QLA_SUCCESS && loop_map_entries > 1) {
+ /*
+ * There are devices that are still not logged
+ * in. Reinitialize to give them a chance.
+ */
+ qla_reinitialize_link(vha);
+ return QLA_FUNCTION_FAILED;
+ }
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -5767,8 +5824,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
if (atomic_read(&fcport->state) == FCS_ONLINE)
return;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
@@ -5869,7 +5924,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_reg_remote_port(vha, fcport);
break;
case MODE_TARGET:
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (!vha->vha_tgt.qla_tgt->tgt_stop &&
!vha->vha_tgt.qla_tgt->tgt_stopped)
qlt_fc_port_added(vha, fcport);
@@ -5887,6 +5941,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (NVME_TARGET(vha->hw, fcport))
qla_nvme_register_remote(vha, fcport);
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
if (fcport->id_changed) {
fcport->id_changed = 0;
@@ -7197,6 +7253,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
+ vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
+
if (vha->hw->flags.port_isolated)
return status;
@@ -9657,6 +9716,12 @@ int qla2xxx_disable_port(struct Scsi_Host *host)
vha->hw->flags.port_isolated = 1;
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9006,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
if (qla2x00_chip_is_down(vha))
return 0;
@@ -9672,6 +9737,13 @@ int qla2xxx_enable_port(struct Scsi_Host *host)
{
scsi_qla_host_t *vha = shost_priv(host);
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9001,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
+
vha->hw->flags.port_isolated = 0;
/* Set the flag to 1, so that isp_abort can proceed */
vha->flags.online = 1;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index e0fe9ddb4bd2..42ce4e1fe744 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2819,7 +2819,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
sp->vha->qla_stats.control_requests++;
}
-static void
+void
qla2x00_els_dcmd2_iocb_timeout(void *data)
{
srb_t *sp = data;
@@ -2882,6 +2882,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
+ /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/
+ fcport->logout_on_delete = 1;
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 21b31d6359c8..76e79f350a22 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1354,9 +1354,7 @@ skip_rio:
if (!vha->vp_idx) {
if (ha->flags.fawwpn_enabled &&
(ha->current_topology == ISP_CFG_F)) {
- void *wwpn = ha->init_cb->port_name;
-
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+ memcpy(vha->port_name, ha->port_name, WWN_SIZE);
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
ql_dbg(ql_dbg_init + ql_dbg_verbose,
@@ -1761,6 +1759,9 @@ global_port_update:
break;
case MBA_DPORT_DIAGNOSTICS:
+ if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
+ (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
ql_dbg(ql_dbg_async, vha, 0x5052,
"D-Port Diagnostics: %04x %04x %04x %04x\n",
mb[0], mb[1], mb[2], mb[3]);
@@ -2245,9 +2246,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
res = DID_ERROR << 16;
}
- if (logit) {
- if (sp->remap.remapped &&
- ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ if (logit) {
ql_dbg(ql_dbg_user, vha, 0x503f,
"%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
type, sp->handle, comp_status);
@@ -2259,18 +2260,24 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
pkt)->total_byte_count),
e->s_id[0], e->s_id[2], e->s_id[1],
e->d_id[2], e->d_id[1], e->d_id[0]);
- } else {
- ql_log(ql_log_info, vha, 0x503f,
- "%s IOCB Done hdl=%x comp_status=0x%x\n",
- type, sp->handle, comp_status);
- ql_log(ql_log_info, vha, 0x503f,
- "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
- fw_status[1], fw_status[2],
- le32_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count),
- e->s_id[0], e->s_id[2], e->s_id[1],
- e->d_id[2], e->d_id[1], e->d_id[0]);
}
+ if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
+ sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s rcv reject. Sched delete\n", __func__);
+ qlt_schedule_sess_for_deletion(sp->fcport);
+ }
+ } else if (logit) {
+ ql_log(ql_log_info, vha, 0x503f,
+ "%s IOCB Done hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+ ql_log(ql_log_info, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
}
}
goto els_ct_done;
@@ -2639,7 +2646,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (unlikely(logit))
- ql_log(ql_dbg_io, fcport->vha, 0x5060,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status,
fd->transferred_length, le32_to_cpu(sts->residual_len),
@@ -3426,6 +3433,7 @@ check_scsi_status:
case CS_PORT_UNAVAILABLE:
case CS_TIMEOUT:
case CS_RESET:
+ case CS_EDIF_INV_REQ:
/*
* We are going to have the fc class block the rport
@@ -3496,7 +3504,7 @@ check_scsi_status:
out:
if (logit)
- ql_log(ql_dbg_io, fcport->vha, 0x3022,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
@@ -3712,12 +3720,11 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
* Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
*/
static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
- struct rsp_que *rsp, response_t *pkt)
+ struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
{
- int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
- response_t *end_pkt;
+ int start_pkt_ring_index;
+ u32 iocb_cnt = 0;
int rc = 0;
- u32 rsp_q_in;
if (pkt->entry_count == 1)
return rc;
@@ -3728,34 +3735,18 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
else
start_pkt_ring_index = rsp->ring_index - 1;
- if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
- rsp->length - 1;
+ if (rsp_q_in < start_pkt_ring_index)
+ /* q in ptr is wrapped */
+ iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
else
- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
+ iocb_cnt = rsp_q_in - start_pkt_ring_index;
- end_pkt = rsp->ring + end_pkt_ring_index;
-
- /* next pkt = end_pkt + 1 */
- n_ring_index = end_pkt_ring_index + 1;
- if (n_ring_index >= rsp->length)
- n_ring_index = 0;
-
- rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
- rd_reg_dword(rsp->rsp_q_in);
-
- /* rsp_q_in is either wrapped or pointing beyond endpkt */
- if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
- rsp_q_in >= n_ring_index)
- /* all IOCBs arrived. */
- rc = 0;
- else
+ if (iocb_cnt < pkt->entry_count)
rc = -EIO;
- ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
- "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
- __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
- rsp_q_in, rc);
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
+ __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
return rc;
}
@@ -3772,6 +3763,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
+ u16 rsp_in = 0, cur_ring_index;
+ int follow_inptr, is_shadow_hba;
if (!ha->flags.fw_started)
return;
@@ -3781,8 +3774,27 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla_cpu_update(rsp->qpair, smp_processor_id());
}
- while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+#define __update_rsp_in(_update, _is_shadow_hba, _rsp, _rsp_in) \
+ do { \
+ if (_update) { \
+ _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
+ rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
+ } \
+ } while (0)
+
+ is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
+ follow_inptr = is_shadow_hba ? ql2xrspq_follow_inptr :
+ ql2xrspq_follow_inptr_legacy;
+
+ __update_rsp_in(follow_inptr, is_shadow_hba, rsp, rsp_in);
+
+ while ((likely(follow_inptr &&
+ rsp->ring_index != rsp_in &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)) ||
+ (!follow_inptr &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
+ cur_ring_index = rsp->ring_index;
rsp->ring_index++;
if (rsp->ring_index == rsp->length) {
@@ -3894,6 +3906,8 @@ process_err:
}
pure_item = qla27xx_copy_fpin_pkt(vha,
(void **)&pkt, &rsp);
+ __update_rsp_in(follow_inptr, is_shadow_hba,
+ rsp, rsp_in);
if (!pure_item)
break;
qla24xx_queue_purex_item(vha, pure_item,
@@ -3901,7 +3915,17 @@ process_err:
break;
case ELS_AUTH_ELS:
- if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
+ /*
+ * ring_ptr and ring_index were
+ * pre-incremented above. Reset them
+ * back to current. Wait for next
+ * interrupt with all IOCBs to arrive
+ * and re-process.
+ */
+ rsp->ring_ptr = (response_t *)pkt;
+ rsp->ring_index = cur_ring_index;
+
ql_dbg(ql_dbg_init, vha, 0x5091,
"Defer processing ELS opcode %#x...\n",
purex_entry->els_frame_payload[3]);
@@ -4420,16 +4444,12 @@ msix_register_fail:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
- } else
- if (ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
+ if (IS_MQUE_CAPABLE(ha) &&
+ (ha->msixbase && ha->mqiobase && ha->max_qpairs))
+ ha->mqenable = 1;
+ else
+ ha->mqenable = 0;
+
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 892caf2475df..359595a64664 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -238,6 +238,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr);
wrt_reg_word(optr, *iptr);
+ } else {
+ wrt_reg_word(optr, 0);
}
mboxes >>= 1;
@@ -274,6 +276,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117a,
+ "cmd=%x Timeout.\n", command);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
if (chip_reset != ha->chip_reset) {
eeh_delay = ha->flags.eeh_busy ? 1 : 0;
@@ -286,12 +294,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_ABORTED;
goto premature_exit;
}
- ql_dbg(ql_dbg_mbx, vha, 0x117a,
- "cmd=%x Timeout.\n", command);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
eeh_delay = ha->flags.eeh_busy ? 1 : 0;
@@ -3066,7 +3068,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
* Kernel context.
*/
int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
+ u8 *num_entries)
{
int rval;
mbx_cmd_t mc;
@@ -3106,6 +3109,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE);
+ if (num_entries)
+ *num_entries = pmap[0];
}
dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
@@ -6471,6 +6476,54 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
+int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
+ struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp)
+{
+ int rval;
+ dma_addr_t dd_dma;
+ uint size = sizeof(dd->buf);
+ uint16_t options = dd->options;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
+ "Entered %s.\n", __func__);
+
+ dd_dma = dma_map_single(&vha->hw->pdev->dev,
+ dd->buf, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
+ ql_log(ql_log_warn, vha, 0x1194,
+ "Failed to map dma buffer.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(dd->buf, 0, size);
+
+ mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
+ mcp->mb[1] = options;
+ mcp->mb[2] = MSW(LSD(dd_dma));
+ mcp->mb[3] = LSW(LSD(dd_dma));
+ mcp->mb[6] = MSW(MSD(dd_dma));
+ mcp->mb[7] = LSW(MSD(dd_dma));
+ mcp->mb[8] = size;
+ mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = MBX_TOV_SECONDS * 4;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
+ "Done %s.\n", __func__);
+ }
+
+ dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
+
+ return rval;
+}
+
static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
{
sp->u.iocb_cmd.u.mbx.rc = res;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 346d47b61c07..16a9f22bb860 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -166,9 +166,13 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
int ret = QLA_SUCCESS;
fc_port_t *fcport;
- if (vha->hw->flags.edif_enabled)
+ if (vha->hw->flags.edif_enabled) {
+ if (DBELL_ACTIVE(vha))
+ qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE,
+ FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN);
/* delete sessions and flush sa_indexes */
qla2x00_wait_for_sess_deletion(vha);
+ }
if (vha->hw->flags.fw_started)
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 87c9404aa401..7450c3458be7 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -37,11 +37,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return 0;
- if (atomic_read(&fcport->state) == FCS_ONLINE)
- return 0;
-
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 73073fb08369..0bd0fd1042df 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -333,6 +333,21 @@ MODULE_PARM_DESC(ql2xabts_wait_nvme,
"To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
+u32 ql2xdelay_before_pci_error_handling = 5;
+module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
+MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
+ "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
+
+int ql2xrspq_follow_inptr = 1;
+module_param(ql2xrspq_follow_inptr, int, 0644);
+MODULE_PARM_DESC(ql2xrspq_follow_inptr,
+ "Follow RSP IN pointer for RSP updates for HBAs 27xx and newer (default: 1).");
+
+int ql2xrspq_follow_inptr_legacy = 1;
+module_param(ql2xrspq_follow_inptr_legacy, int, 0644);
+MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
+ "Follow RSP IN pointer for RSP updates for HBAs older than 27XX. (default: 1).");
+
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
@@ -1337,21 +1352,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
/*
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
*/
-int
-qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
- uint64_t l, enum nexus_wait_type type)
+static int
+__qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
{
int cnt, match, status;
unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req;
+ scsi_qla_host_t *vha = qpair->vha;
+ struct req_que *req = qpair->req;
srb_t *sp;
struct scsi_cmnd *cmd;
status = QLA_SUCCESS;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = vha->req;
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (cnt = 1; status == QLA_SUCCESS &&
cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
@@ -1378,12 +1392,32 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
if (!match)
continue;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
status = qla2x00_eh_wait_on_command(cmd);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ return status;
+}
+
+int
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
+{
+ struct qla_qpair *qpair;
+ struct qla_hw_data *ha = vha->hw;
+ int i, status = QLA_SUCCESS;
+ status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
+ type);
+ for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
+ qpair = ha->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
+ type);
+ }
return status;
}
@@ -1420,7 +1454,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
return err;
if (fcport->deleted)
- return SUCCESS;
+ return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
@@ -1488,7 +1522,7 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
return err;
if (fcport->deleted)
- return SUCCESS;
+ return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
@@ -5472,7 +5506,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
e->u.fcport.fcport, false);
break;
case QLA_EVT_SA_REPLACE:
- qla24xx_issue_sa_replace_iocb(vha, e);
+ rc = qla24xx_issue_sa_replace_iocb(vha, e);
break;
}
@@ -7238,6 +7272,44 @@ static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
}
}
+static void qla_wind_down_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->flags.eeh_busy)
+ return;
+ if (ha->pci_error_state)
+ /* system is trying to recover */
+ return;
+
+ /*
+ * Current system is not handling PCIE error. At this point, this is
+ * best effort to wind down the adapter.
+ */
+ if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
+ !ha->flags.eeh_flush) {
+ ql_log(ql_log_info, vha, 0x9009,
+ "PCI Error detected, attempting to reset hardware.\n");
+
+ ha->isp_ops->reset_chip(vha);
+ ha->isp_ops->disable_intrs(ha);
+
+ ha->flags.eeh_flush = EEH_FLUSH_RDY;
+ ha->eeh_jif = jiffies;
+
+ } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
+ time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) {
+ pci_clear_master(ha->pdev);
+
+ /* flush all command */
+ qla2x00_abort_isp_cleanup(vha);
+ ha->flags.eeh_flush = EEH_FLUSH_DONE;
+
+ ql_log(ql_log_info, vha, 0x900a,
+ "PCI Error handling complete, all IOs aborted.\n");
+ }
+}
+
/**************************************************************************
* qla2x00_timer
*
@@ -7261,6 +7333,8 @@ qla2x00_timer(struct timer_list *t)
fc_port_t *fcport = NULL;
if (ha->flags.eeh_busy) {
+ qla_wind_down_chip(vha);
+
ql_dbg(ql_dbg_timer, vha, 0x6000,
"EEH = %d, restarting timer.\n",
ha->flags.eeh_busy);
@@ -7841,6 +7915,9 @@ void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
spin_lock_irqsave(&base_vha->work_lock, flags);
if (!ha->flags.eeh_busy) {
+ ha->eeh_jif = jiffies;
+ ha->flags.eeh_flush = 0;
+
ha->flags.eeh_busy = 1;
do_cleanup = true;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index cb97f625970d..2b2f68288375 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -981,22 +981,6 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
- if (ha->flags.edif_enabled &&
- (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
- sess->edif.authok = 0;
- if (!ha->flags.host_shutting_down) {
- ql_dbg(ql_dbg_edif, vha, 0x911e,
- "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
- __func__, sess->port_name);
- qla2x00_release_all_sadb(vha, sess);
- } else {
- ql_dbg(ql_dbg_edif, vha, 0x911e,
- "%s bypassing release_all_sadb\n",
- __func__);
- }
- qla_edif_clear_appdata(vha, sess);
- qla_edif_sess_down(vha, sess);
- }
qla2x00_mark_device_lost(vha, sess, 0);
if (sess->send_els_logo) {
@@ -1042,6 +1026,25 @@ void qlt_free_session_done(struct work_struct *work)
sess->nvme_flag |= NVME_FLAG_DELETING;
qla_nvme_unregister_remote_port(sess);
}
+
+ if (ha->flags.edif_enabled &&
+ (!own || (own &&
+ own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ sess->edif.authok = 0;
+ if (!ha->flags.host_shutting_down) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
+ __func__, sess->port_name);
+ qla2x00_release_all_sadb(vha, sess);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s bypassing release_all_sadb\n",
+ __func__);
+ }
+
+ qla_edif_clear_appdata(vha, sess);
+ qla_edif_sess_down(vha, sess);
+ }
}
/*
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b09d7d2080c0..f3257d46b6d2 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.400-k"
+#define QLA2XXX_VERSION "10.02.07.800-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 400
+#define QLA_DRIVER_BETA_VER 800
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c59eac7a32f2..086ec5b5862d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -586,10 +586,13 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
- struct module *mod = sdev->host->hostt->module;
-
+ /*
+ * Decreasing the module reference count before the device reference
+ * count is safe since scsi_remove_host() only returns after all
+ * devices have been removed.
+ */
+ module_put(sdev->host->hostt->module);
put_device(&sdev->sdev_gendev);
- module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 49ef864df581..448748e3fba5 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -139,7 +139,7 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
*
* Note: this function must be called only for a command that has timed out.
* Because the block layer marks a request as complete before it calls
- * scsi_times_out(), a .scsi_done() call from the LLD for a command that has
+ * scsi_timeout(), a .scsi_done() call from the LLD for a command that has
* timed out do not have any effect. Hence it is safe to call
* scsi_finish_command() from this function.
*/
@@ -316,7 +316,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
}
/**
- * scsi_times_out - Timeout function for normal scsi commands.
+ * scsi_timeout - Timeout function for normal scsi commands.
* @req: request that is timing out.
*
* Notes:
@@ -325,7 +325,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
*/
-enum blk_eh_timer_return scsi_times_out(struct request *req)
+enum blk_eh_timer_return scsi_timeout(struct request *req)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
enum blk_eh_timer_return rtn = BLK_EH_DONE;
@@ -463,14 +463,12 @@ static void scsi_report_sense(struct scsi_device *sdev,
evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
scsi_report_lun_change(sdev);
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
"LUN assignments on this target have "
"changed. The Linux SCSI layer does not "
"automatically remap LUN assignments.\n");
} else if (sshdr->asc == 0x3f)
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
- "operating parameters on this target have "
+ "Operating parameters on this target have "
"changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n");
@@ -1779,7 +1777,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
* scsi_noretry_cmd - determine if command should be failed fast
* @scmd: SCSI cmd to examine.
*/
-int scsi_noretry_cmd(struct scsi_cmnd *scmd)
+bool scsi_noretry_cmd(struct scsi_cmnd *scmd)
{
struct request *req = scsi_cmd_to_rq(scmd);
@@ -1789,19 +1787,19 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
case DID_TIME_OUT:
goto check_type;
case DID_BUS_BUSY:
- return req->cmd_flags & REQ_FAILFAST_TRANSPORT;
+ return !!(req->cmd_flags & REQ_FAILFAST_TRANSPORT);
case DID_PARITY:
- return req->cmd_flags & REQ_FAILFAST_DEV;
+ return !!(req->cmd_flags & REQ_FAILFAST_DEV);
case DID_ERROR:
if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
- return 0;
+ return false;
fallthrough;
case DID_SOFT_ERROR:
- return req->cmd_flags & REQ_FAILFAST_DRIVER;
+ return !!(req->cmd_flags & REQ_FAILFAST_DRIVER);
}
if (!scsi_status_is_check_condition(scmd->result))
- return 0;
+ return false;
check_type:
/*
@@ -1809,9 +1807,9 @@ check_type:
* the check condition was retryable.
*/
if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req))
- return 1;
+ return true;
- return 0;
+ return false;
}
/**
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index a480c4d589f5..729e309e6034 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -450,7 +450,7 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
goto out_put_request;
ret = 0;
- if (hdr->iovec_count) {
+ if (hdr->iovec_count && hdr->dxfer_len) {
struct iov_iter i;
struct iovec *iov = NULL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6ffc9e4258a8..4dbd29ab1dcc 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -75,13 +75,6 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
return ret;
}
-/*
- * When to reinvoke queueing after a resource shortage. It's 3 msecs to
- * not change behaviour from the previous unplug mechanism, experimentation
- * may prove this needs changing.
- */
-#define SCSI_QUEUE_DELAY 3
-
static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{
@@ -163,7 +156,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
* Requeue this command. It will go before all other commands
* that are already in the queue. Schedule requeue work under
* lock such that the kblockd_schedule_work() call happens
- * before blk_cleanup_queue() finishes.
+ * before blk_mq_destroy_queue() finishes.
*/
cmd->result = 0;
@@ -209,8 +202,8 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
- int timeout, int retries, u64 flags, req_flags_t rq_flags,
- int *resid)
+ int timeout, int retries, blk_opf_t flags,
+ req_flags_t rq_flags, int *resid)
{
struct request *req;
struct scsi_cmnd *scmd;
@@ -424,9 +417,9 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
* it and the queue. Mitigate by taking a reference to the
* queue and never touching the sdev again after we drop the
* host lock. Note: if __scsi_remove_device() invokes
- * blk_cleanup_queue() before the queue is run from this
+ * blk_mq_destroy_queue() before the queue is run from this
* function then blk_run_queue() will return immediately since
- * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
+ * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
*/
slq = sdev->request_queue;
if (!blk_get_queue(slq))
@@ -633,7 +626,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
*/
static unsigned int scsi_rq_err_bytes(const struct request *rq)
{
- unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
unsigned int bytes = 0;
struct bio *bio;
@@ -1125,12 +1118,12 @@ static void scsi_initialize_rq(struct request *rq)
cmd->retries = 0;
}
-struct request *scsi_alloc_request(struct request_queue *q,
- unsigned int op, blk_mq_req_flags_t flags)
+struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
+ blk_mq_req_flags_t flags)
{
struct request *rq;
- rq = blk_mq_alloc_request(q, op, flags);
+ rq = blk_mq_alloc_request(q, opf, flags);
if (!IS_ERR(rq))
scsi_initialize_rq(rq);
return rq;
@@ -1648,6 +1641,13 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
sbitmap_put(&sdev->budget_map, budget_token);
}
+/*
+ * When to reinvoke queueing after a resource shortage. It's 3 msecs to
+ * not change behaviour from the previous unplug mechanism, experimentation
+ * may prove this needs changing.
+ */
+#define SCSI_QUEUE_DELAY 3
+
static int scsi_mq_get_budget(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
@@ -1790,14 +1790,6 @@ out_put_budget:
return ret;
}
-static enum blk_eh_timer_return scsi_timeout(struct request *req,
- bool reserved)
-{
- if (reserved)
- return BLK_EH_RESET_TIMER;
- return scsi_times_out(req);
-}
-
static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
@@ -1884,10 +1876,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
- if (dev->dma_mask) {
- shost->max_sectors = min_t(unsigned int, shost->max_sectors,
- dma_max_mapping_size(dev) >> SECTOR_SHIFT);
- }
blk_queue_max_hw_sectors(q, shost->max_sectors);
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5c4786310a31..429663bd78ec 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -72,7 +72,7 @@ extern void scsi_exit_devinfo(void);
/* scsi_error.c */
extern void scmd_eh_abort_handler(struct work_struct *work);
-extern enum blk_eh_timer_return scsi_times_out(struct request *req);
+extern enum blk_eh_timer_return scsi_timeout(struct request *req);
extern int scsi_error_handler(void *host);
extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
@@ -82,7 +82,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *done_q);
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q);
-int scsi_noretry_cmd(struct scsi_cmnd *scmd);
+bool scsi_noretry_cmd(struct scsi_cmnd *scmd);
void scsi_eh_done(struct scsi_cmnd *scmd);
/* scsi_lib.c */
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 91ac901a6682..ac6059702d13 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -406,9 +406,14 @@ static void scsi_target_destroy(struct scsi_target *starget)
static void scsi_target_dev_release(struct device *dev)
{
struct device *parent = dev->parent;
+ struct Scsi_Host *shost = dev_to_shost(parent);
struct scsi_target *starget = to_scsi_target(dev);
kfree(starget);
+
+ if (atomic_dec_return(&shost->target_count) == 0)
+ wake_up(&shost->targets_wq);
+
put_device(parent);
}
@@ -521,6 +526,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
+ init_waitqueue_head(&starget->sdev_wq);
+
+ atomic_inc(&shost->target_count);
+
retry:
spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 43949798a2e4..9dad2fd5297f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -443,18 +443,15 @@ static void scsi_device_cls_release(struct device *class_dev)
static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
- struct scsi_device *sdev;
+ struct scsi_device *sdev = container_of(work, struct scsi_device,
+ ew.work);
+ struct scsi_target *starget = sdev->sdev_target;
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
- struct module *mod;
-
- sdev = container_of(work, struct scsi_device, ew.work);
-
- mod = sdev->host->hostt->module;
scsi_dh_release_device(sdev);
@@ -516,19 +513,16 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(sdev->inquiry);
kfree(sdev);
+ if (starget && atomic_dec_return(&starget->sdev_count) == 0)
+ wake_up(&starget->sdev_wq);
+
if (parent)
put_device(parent);
- module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
-
- /* Set module pointer as NULL in case of module unloading */
- if (!try_module_get(sdp->host->hostt->module))
- sdp->host->hostt->module = NULL;
-
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
@@ -1475,7 +1469,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
scsi_device_set_state(sdev, SDEV_DEL);
mutex_unlock(&sdev->state_mutex);
- blk_cleanup_queue(sdev->request_queue);
+ blk_mq_destroy_queue(sdev->request_queue);
cancel_work_sync(&sdev->requeue_work);
if (sdev->host->hostt->slave_destroy)
@@ -1535,6 +1529,14 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * After scsi_remove_target() returns its caller can remove resources
+ * associated with @starget, e.g. an rport or session. Wait until all
+ * devices associated with @starget have been removed to prevent that
+ * a SCSI error handling callback function triggers a use-after-free.
+ */
+ wait_event(starget->sdev_wq, atomic_read(&starget->sdev_count) == 0);
}
/**
@@ -1645,6 +1647,9 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ atomic_inc(&starget->sdev_count);
+
/*
* device can now only be removed via __scsi_remove_device() so hold
* the target. Target will be held in CREATED state until something
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 5d21f07456c6..cd3db9684e52 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1980,7 +1980,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
scsi_remove_target(&session->dev);
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, target_id);
+ ida_free(&iscsi_sess_ida, target_id);
unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
@@ -2049,7 +2049,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
return -ENOMEM;
if (target_id == ISCSI_MAX_TARGET) {
- id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&iscsi_sess_ida, GFP_KERNEL);
if (id < 0) {
iscsi_cls_session_printk(KERN_ERR, session,
@@ -2088,7 +2088,7 @@ release_dev:
device_del(&session->dev);
release_ida:
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, session->target_id);
+ ida_free(&iscsi_sess_ida, session->target_id);
destroy_wq:
destroy_workqueue(session->workq);
return err;
@@ -2143,8 +2143,6 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
return 0;
iscsi_remove_conn(iscsi_dev_to_conn(dev));
- iscsi_put_conn(iscsi_dev_to_conn(dev));
-
return 0;
}
@@ -2264,18 +2262,20 @@ static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
}
}
-static int iscsi_if_stop_conn(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+static int iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
{
- int flag = ev->u.stop_conn.flag;
- struct iscsi_cls_conn *conn;
-
- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
- if (!conn)
- return -EINVAL;
-
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
/*
+ * For offload, iscsid may not know about the ep like when iscsid is
+ * restarted or for kernel based session shutdown iscsid is not even
+ * up. For these cases, we do the disconnect now.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
* If this is a termination we have to call stop_conn with that flag
* so the correct states get set. If we haven't run the work yet try to
* avoid the extra run.
@@ -2285,16 +2285,6 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
iscsi_stop_conn(conn, flag);
} else {
/*
- * For offload, when iscsid is restarted it won't know about
- * existing endpoints so it can't do a ep_disconnect. We clean
- * it up here for userspace.
- */
- mutex_lock(&conn->ep_mutex);
- if (conn->ep)
- iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
- mutex_unlock(&conn->ep_mutex);
-
- /*
* Figure out if it was the kernel or userspace initiating this.
*/
spin_lock_irq(&conn->lock);
@@ -2349,6 +2339,55 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
}
+static int iscsi_iter_force_destroy_conn_fn(struct device *dev, void *data)
+{
+ struct iscsi_transport *transport;
+ struct iscsi_cls_conn *conn;
+
+ if (!iscsi_is_conn_dev(dev))
+ return 0;
+
+ conn = iscsi_dev_to_conn(dev);
+ transport = conn->transport;
+
+ if (READ_ONCE(conn->state) != ISCSI_CONN_DOWN)
+ iscsi_if_stop_conn(conn, STOP_CONN_TERM);
+
+ transport->destroy_conn(conn);
+ return 0;
+}
+
+/**
+ * iscsi_force_destroy_session - destroy a session from the kernel
+ * @session: session to destroy
+ *
+ * Force the destruction of a session from the kernel. This should only be
+ * used when userspace is no longer running during system shutdown.
+ */
+void iscsi_force_destroy_session(struct iscsi_cls_session *session)
+{
+ struct iscsi_transport *transport = session->transport;
+ unsigned long flags;
+
+ WARN_ON_ONCE(system_state == SYSTEM_RUNNING);
+
+ spin_lock_irqsave(&sesslock, flags);
+ if (list_empty(&session->sess_list)) {
+ spin_unlock_irqrestore(&sesslock, flags);
+ /*
+ * Conn/ep is already freed. Session is being torn down via
+ * async path. For shutdown we don't care about it so return.
+ */
+ return;
+ }
+ spin_unlock_irqrestore(&sesslock, flags);
+
+ device_for_each_child(&session->dev, NULL,
+ iscsi_iter_force_destroy_conn_fn);
+ transport->destroy_session(session);
+}
+EXPORT_SYMBOL_GPL(iscsi_force_destroy_session);
+
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
@@ -3720,7 +3759,12 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_DESTROY_CONN:
return iscsi_if_destroy_conn(transport, ev);
case ISCSI_UEVENT_STOP_CONN:
- return iscsi_if_stop_conn(transport, ev);
+ conn = iscsi_conn_lookup(ev->u.stop_conn.sid,
+ ev->u.stop_conn.cid);
+ if (!conn)
+ return -EINVAL;
+
+ return iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
}
/*
@@ -4812,7 +4856,7 @@ free_priv:
}
EXPORT_SYMBOL_GPL(iscsi_register_transport);
-int iscsi_unregister_transport(struct iscsi_transport *tt)
+void iscsi_unregister_transport(struct iscsi_transport *tt)
{
struct iscsi_internal *priv;
unsigned long flags;
@@ -4835,8 +4879,6 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
device_unregister(&priv->dev);
mutex_unlock(&rx_queue_mutex);
-
- return 0;
}
EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 12bff64dade6..2f88c61216ee 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -225,6 +225,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct device *dma_dev = shost->dma_dev;
INIT_LIST_HEAD(&sas_host->rphy_list);
mutex_init(&sas_host->lock);
@@ -236,6 +237,11 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
shost->host_no);
+ if (dma_dev->dma_mask) {
+ shost->opt_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
return 0;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a1a2ac09066f..8f79fa6318fe 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -103,6 +103,7 @@ static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
+static void sd_start_done_work(struct work_struct *work);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
@@ -2934,15 +2935,15 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
if (sdkp->device->type == TYPE_ZBC) {
/* Host-managed */
- blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
+ disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
} else {
sdkp->zoned = zoned;
if (sdkp->zoned == 1) {
/* Host-aware */
- blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
+ disk_set_zoned(sdkp->disk, BLK_ZONED_HA);
} else {
/* Regular disk or drive managed disk */
- blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
+ disk_set_zoned(sdkp->disk, BLK_ZONED_NONE);
}
}
@@ -3296,6 +3297,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
(sector_t)BLK_DEF_MAX_SECTORS);
}
+ /*
+ * Limit default to SCSI host optimal sector limit if set. There may be
+ * an impact on performance for when the size of a request exceeds this
+ * host limit.
+ */
+ rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
+
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
@@ -3440,8 +3448,8 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
- &sd_bio_compl_lkclass);
+ gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
+ &sd_bio_compl_lkclass);
if (!gd)
goto out_free;
@@ -3463,6 +3471,7 @@ static int sd_probe(struct device *dev)
sdkp->max_retries = SD_MAX_RETRIES;
atomic_set(&sdkp->openers, 0);
atomic_set(&sdkp->device->ioerr_cnt, 0);
+ INIT_WORK(&sdkp->start_done_work, sd_start_done_work);
if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
@@ -3585,12 +3594,69 @@ static void scsi_disk_release(struct device *dev)
kfree(sdkp);
}
+/* Process sense data after a START command finished. */
+static void sd_start_done_work(struct work_struct *work)
+{
+ struct scsi_disk *sdkp = container_of(work, typeof(*sdkp),
+ start_done_work);
+ struct scsi_sense_hdr sshdr;
+ int res = sdkp->start_result;
+
+ if (res == 0)
+ return;
+
+ sd_print_result(sdkp, "Start/Stop Unit failed", res);
+
+ if (res < 0)
+ return;
+
+ if (scsi_normalize_sense(sdkp->start_sense_buffer,
+ sdkp->start_sense_len, &sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
+}
+
+/* A START command finished. May be called from interrupt context. */
+static void sd_start_done(struct request *req, blk_status_t status)
+{
+ const struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
+ struct scsi_disk *sdkp = scsi_disk(req->q->disk);
+
+ sdkp->start_result = scmd->result;
+ WARN_ON_ONCE(scmd->sense_len > SCSI_SENSE_BUFFERSIZE);
+ sdkp->start_sense_len = scmd->sense_len;
+ memcpy(sdkp->start_sense_buffer, scmd->sense_buffer,
+ ARRAY_SIZE(sdkp->start_sense_buffer));
+ WARN_ON_ONCE(!schedule_work(&sdkp->start_done_work));
+}
+
+/* Submit a START command asynchronously. */
+static int sd_submit_start(struct scsi_disk *sdkp, u8 cmd[], u8 cmd_len)
+{
+ struct scsi_device *sdev = sdkp->device;
+ struct request_queue *q = sdev->request_queue;
+ struct request *req;
+ struct scsi_cmnd *scmd;
+
+ req = scsi_alloc_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ scmd = blk_mq_rq_to_pdu(req);
+ scmd->cmd_len = cmd_len;
+ memcpy(scmd->cmnd, cmd, cmd_len);
+ scmd->allowed = sdkp->max_retries;
+ req->timeout = SD_TIMEOUT;
+ req->rq_flags |= RQF_PM | RQF_QUIET;
+ req->end_io = sd_start_done;
+ blk_execute_rq_nowait(req, /*at_head=*/true);
+
+ return 0;
+}
+
static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
{
unsigned char cmd[6] = { START_STOP }; /* START_VALID */
- struct scsi_sense_hdr sshdr;
struct scsi_device *sdp = sdkp->device;
- int res;
if (start)
cmd[4] |= 1; /* START */
@@ -3601,23 +3667,10 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
if (!scsi_device_online(sdp))
return -ENODEV;
- res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
- if (res) {
- sd_print_result(sdkp, "Start/Stop Unit failed", res);
- if (res > 0 && scsi_sense_valid(&sshdr)) {
- sd_print_sense_hdr(sdkp, &sshdr);
- /* 0x3a is medium not present */
- if (sshdr.asc == 0x3a)
- res = 0;
- }
- }
-
- /* SCSI error codes must not go to the generic layer */
- if (res)
- return -EIO;
+ /* Wait until processing of sense data has finished. */
+ flush_work(&sdkp->start_done_work);
- return 0;
+ return sd_submit_start(sdkp, cmd, sizeof(cmd));
}
/*
@@ -3644,6 +3697,8 @@ static void sd_shutdown(struct device *dev)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
+
+ flush_work(&sdkp->start_done_work);
}
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5eea762f84d1..b89187761d61 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -150,6 +150,11 @@ struct scsi_disk {
unsigned urswrz : 1;
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
+
+ int start_result;
+ u32 start_sense_len;
+ u8 start_sense_buffer[SCSI_SENSE_BUFFERSIZE];
+ struct work_struct start_done_work;
};
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6acc4f406eb8..bd15624c6322 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -529,7 +529,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
unsigned int zno = blk_rq_zone_no(rq);
- enum req_opf op = req_op(rq);
+ enum req_op op = req_op(rq);
unsigned long flags;
/*
@@ -855,7 +855,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
if (sdkp->zone_info.zone_blocks == zone_blocks &&
sdkp->zone_info.nr_zones == nr_zones &&
- disk->queue->nr_zones == nr_zones)
+ disk->nr_zones == nr_zones)
goto unlock;
flags = memalloc_noio_save();
@@ -929,7 +929,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
/*
* This can happen for a host aware disk with partitions.
* The block device zone model was already cleared by
- * blk_queue_set_zoned(). Only free the scsi disk zone
+ * disk_set_zoned(). Only free the scsi disk zone
* information and exit early.
*/
sd_zbc_free_zone_info(sdkp);
@@ -950,10 +950,10 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
if (sdkp->zones_max_open == U32_MAX)
- blk_queue_max_open_zones(q, 0);
+ disk_set_max_open_zones(disk, 0);
else
- blk_queue_max_open_zones(q, sdkp->zones_max_open);
- blk_queue_max_active_zones(q, 0);
+ disk_set_max_open_zones(disk, sdkp->zones_max_open);
+ disk_set_max_active_zones(disk, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
/*
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 118c7b4a8af2..340b050ad28d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -195,7 +195,7 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
-static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
@@ -444,6 +444,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
+ bool busy;
sg_io_hdr_t *hp;
struct sg_header *old_hdr;
int retval;
@@ -466,20 +467,16 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
if (retval)
return retval;
- srp = sg_get_rq_mark(sfp, req_pack_id);
+ srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
- (atomic_read(&sdp->detaching) ||
- (srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
- if (retval)
- /* -ERESTARTSYS as signal hit process */
- return retval;
+ ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
+ (!busy && atomic_read(&sdp->detaching))));
+ if (!srp)
+ /* signal or detaching */
+ return retval ? retval : -ENODEV;
}
if (srp->header.interface_id != '\0')
return sg_new_read(sfp, buf, count, srp);
@@ -940,9 +937,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
+ srp_done(sfp, srp));
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
@@ -2079,19 +2074,28 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
}
static Sg_request *
-sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
{
Sg_request *resp;
unsigned long iflags;
+ *busy = false;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
- /* look for requests that are ready + not SG_IO owned */
- if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ /* look for requests that are not SG_IO owned */
+ if ((!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
- resp->done = 2; /* guard against other readers */
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ switch (resp->done) {
+ case 0: /* request active */
+ *busy = true;
+ break;
+ case 1: /* request done; response ready to return */
+ resp->done = 2; /* guard against other readers */
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+ case 2: /* response already being returned */
+ break;
+ }
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@@ -2145,6 +2149,15 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ /*
+ * If the device is detaching, wakeup any readers in case we just
+ * removed the last response, which would leave nothing for them to
+ * return other than -ENODEV.
+ */
+ if (unlikely(atomic_read(&sfp->parentdp->detaching)))
+ wake_up_interruptible_all(&sfp->read_wait);
+
return res;
}
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 6f83e2df4d64..973d240649ab 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,7 +1,7 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 2e40320129c0..e550b12e525a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -293,7 +293,8 @@ struct pqi_raid_path_request {
u8 additional_cdb_bytes_usage : 3;
u8 reserved5 : 3;
u8 cdb[16];
- u8 reserved6[12];
+ u8 reserved6[11];
+ u8 ml_device_lun_number;
__le32 timeout;
struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
@@ -467,7 +468,8 @@ struct pqi_task_management_request {
struct pqi_iu_header header;
__le16 request_id;
__le16 nexus_id;
- u8 reserved[2];
+ u8 reserved;
+ u8 ml_device_lun_number;
__le16 timeout;
u8 lun_number[8];
__le16 protocol_specific;
@@ -708,6 +710,7 @@ typedef u32 pqi_index_t;
#define SOP_TMF_COMPLETE 0x0
#define SOP_TMF_REJECTED 0x4
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
+#define SOP_RC_INCORRECT_LOGICAL_UNIT 0x9
/* additional CDB bytes usage field codes */
#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
@@ -863,7 +866,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16
#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
-#define PQI_FIRMWARE_FEATURE_MAXIMUM 18
+#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 21
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -1081,6 +1085,8 @@ struct pqi_stream_data {
u32 last_accessed;
};
+#define PQI_MAX_LUNS_PER_DEVICE 256
+
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */
@@ -1124,6 +1130,7 @@ struct pqi_scsi_dev {
u8 phy_id;
u8 ncq_prio_enable;
u8 ncq_prio_support;
+ u8 multi_lun_device_lun_count;
bool raid_bypass_configured; /* RAID bypass configured */
bool raid_bypass_enabled; /* RAID bypass enabled */
u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
@@ -1139,7 +1146,7 @@ struct pqi_scsi_dev {
struct list_head delete_list_entry;
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
- atomic_t scsi_cmds_outstanding;
+ atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
atomic_t raid_bypass_cnt;
};
@@ -1262,6 +1269,12 @@ struct pqi_event {
#define PQI_CTRL_PRODUCT_REVISION_A 0
#define PQI_CTRL_PRODUCT_REVISION_B 1
+enum pqi_ctrl_removal_state {
+ PQI_CTRL_PRESENT = 0,
+ PQI_CTRL_GRACEFUL_REMOVAL,
+ PQI_CTRL_SURPRISE_REMOVAL
+};
+
struct pqi_ctrl_info {
unsigned int ctrl_id;
struct pci_dev *pci_dev;
@@ -1332,12 +1345,13 @@ struct pqi_ctrl_info {
u8 tmf_iu_timeout_supported : 1;
u8 firmware_triage_supported : 1;
u8 rpl_extended_format_4_5_supported : 1;
+ u8 multi_lun_device_supported : 1;
u8 enable_r1_writes : 1;
u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1;
u8 lv_drive_type_mix_valid : 1;
u8 enable_stream_detection : 1;
-
+ u8 disable_managed_interrupts : 1;
u8 ciss_report_log_flags;
u32 max_transfer_encrypted_sas_sata;
u32 max_transfer_encrypted_nvme;
@@ -1381,6 +1395,7 @@ struct pqi_ctrl_info {
struct work_struct ofa_quiesce_work;
u32 ofa_bytes_requested;
u16 ofa_cancel_reason;
+ enum pqi_ctrl_removal_state ctrl_removal_state;
};
enum pqi_ctrl_mode {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 7c0d069a3158..7a8c2c75acba 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.14-035"
+#define DRIVER_VERSION "2.1.18-045"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 14
-#define DRIVER_REVISION 35
+#define DRIVER_RELEASE 18
+#define DRIVER_REVISION 45
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -94,7 +94,8 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_msecs);
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
+static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -174,6 +175,18 @@ module_param_named(hide_vsep,
pqi_hide_vsep, int, 0644);
MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
+static int pqi_disable_managed_interrupts;
+module_param_named(disable_managed_interrupts,
+ pqi_disable_managed_interrupts, int, 0644);
+MODULE_PARM_DESC(disable_managed_interrupts,
+ "Disable the kernel automatically assigning SMP affinity to IRQs.");
+
+static unsigned int pqi_ctrl_ready_timeout_secs;
+module_param_named(ctrl_ready_timeout,
+ pqi_ctrl_ready_timeout_secs, uint, 0644);
+MODULE_PARM_DESC(ctrl_ready_timeout,
+ "Timeout in seconds for driver to wait for controller ready.");
+
static char *raid_levels[] = {
"RAID-0",
"RAID-4",
@@ -1597,7 +1610,9 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
&id_phys->alternate_paths_phys_connector,
sizeof(device->phys_connector));
device->bay = id_phys->phys_bay_in_box;
-
+ device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
+ if (!device->multi_lun_device_lun_count)
+ device->multi_lun_device_lun_count = 1;
if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
id_phys->phy_count)
device->phy_id =
@@ -1880,15 +1895,18 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
int rc;
+ int lun;
- rc = pqi_device_wait_for_pending_io(ctrl_info, device,
- PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun,
- atomic_read(&device->scsi_cmds_outstanding));
+ for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
+ PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus,
+ device->target, lun,
+ atomic_read(&device->scsi_cmds_outstanding[lun]));
+ }
if (pqi_is_logical_device(device))
scsi_remove_device(device->sdev);
@@ -2020,6 +2038,23 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
}
+static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
+{
+ u32 raid_map1_size;
+ u32 raid_map2_size;
+
+ if (raid_map1 == NULL || raid_map2 == NULL)
+ return raid_map1 == raid_map2;
+
+ raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
+ raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
+
+ if (raid_map1_size != raid_map2_size)
+ return false;
+
+ return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
+}
+
/* Assumes the SCSI device list lock is held. */
static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
@@ -2033,49 +2068,51 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
existing_device->target_lun_valid = true;
}
- if (pqi_is_logical_device(existing_device) &&
- ctrl_info->logical_volume_rescan_needed)
- existing_device->rescan = true;
-
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
- existing_device->is_external_raid_device =
- new_device->is_external_raid_device;
- existing_device->is_expander_smp_device =
- new_device->is_expander_smp_device;
- existing_device->aio_enabled = new_device->aio_enabled;
- memcpy(existing_device->vendor, new_device->vendor,
- sizeof(existing_device->vendor));
- memcpy(existing_device->model, new_device->model,
- sizeof(existing_device->model));
+ memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
+ memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
existing_device->sas_address = new_device->sas_address;
- existing_device->raid_level = new_device->raid_level;
existing_device->queue_depth = new_device->queue_depth;
- existing_device->aio_handle = new_device->aio_handle;
- existing_device->volume_status = new_device->volume_status;
- existing_device->active_path_index = new_device->active_path_index;
- existing_device->phy_id = new_device->phy_id;
- existing_device->path_map = new_device->path_map;
- existing_device->bay = new_device->bay;
- existing_device->box_index = new_device->box_index;
- existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
- existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
- memcpy(existing_device->box, new_device->box,
- sizeof(existing_device->box));
- memcpy(existing_device->phys_connector, new_device->phys_connector,
- sizeof(existing_device->phys_connector));
- memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
- kfree(existing_device->raid_map);
- existing_device->raid_map = new_device->raid_map;
- existing_device->raid_bypass_configured =
- new_device->raid_bypass_configured;
- existing_device->raid_bypass_enabled =
- new_device->raid_bypass_enabled;
existing_device->device_offline = false;
- /* To prevent this from being freed later. */
- new_device->raid_map = NULL;
+ if (pqi_is_logical_device(existing_device)) {
+ existing_device->is_external_raid_device = new_device->is_external_raid_device;
+
+ if (existing_device->devtype == TYPE_DISK) {
+ existing_device->raid_level = new_device->raid_level;
+ existing_device->volume_status = new_device->volume_status;
+ if (ctrl_info->logical_volume_rescan_needed)
+ existing_device->rescan = true;
+ memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
+ if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
+ kfree(existing_device->raid_map);
+ existing_device->raid_map = new_device->raid_map;
+ /* To prevent this from being freed later. */
+ new_device->raid_map = NULL;
+ }
+ existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
+ existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
+ }
+ } else {
+ existing_device->aio_enabled = new_device->aio_enabled;
+ existing_device->aio_handle = new_device->aio_handle;
+ existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
+ existing_device->active_path_index = new_device->active_path_index;
+ existing_device->phy_id = new_device->phy_id;
+ existing_device->path_map = new_device->path_map;
+ existing_device->bay = new_device->bay;
+ existing_device->box_index = new_device->box_index;
+ existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
+ existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
+ memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
+ memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
+
+ existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
+ if (existing_device->multi_lun_device_lun_count == 0)
+ existing_device->multi_lun_device_lun_count = 1;
+ }
}
static inline void pqi_free_device(struct pqi_scsi_dev *device)
@@ -2505,23 +2542,6 @@ out:
return rc;
}
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_scsi_dev *device;
- struct pqi_scsi_dev *next;
-
- list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
- list_del(&device->scsi_device_list_entry);
- pqi_free_device(device);
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- }
-}
-
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -3322,6 +3342,9 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf
case SOP_TMF_REJECTED:
rc = -EAGAIN;
break;
+ case SOP_RC_INCORRECT_LOGICAL_UNIT:
+ rc = -ENODEV;
+ break;
default:
rc = -EIO;
break;
@@ -3663,6 +3686,20 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
return ack_event;
}
+static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->raid_bypass_enabled)
+ device->raid_bypass_enabled = false;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
@@ -3690,6 +3727,8 @@ static void pqi_event_worker(struct work_struct *work)
rescan_needed = true;
if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
ctrl_info->logical_volume_rescan_needed = true;
+ else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
+ pqi_disable_raid_bypass(ctrl_info);
}
if (ack_event)
pqi_acknowledge_event(ctrl_info, event);
@@ -3697,8 +3736,11 @@ static void pqi_event_worker(struct work_struct *work)
event++;
}
+#define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
+
if (rescan_needed)
- pqi_schedule_rescan_worker_delayed(ctrl_info);
+ pqi_schedule_rescan_worker_with_delay(ctrl_info,
+ PQI_RESCAN_WORK_FOR_EVENT_DELAY);
out:
pqi_ctrl_unbusy(ctrl_info);
@@ -3992,10 +4034,14 @@ static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
int num_vectors_enabled;
+ unsigned int flags = PCI_IRQ_MSIX;
+
+ if (!pqi_disable_managed_interrupts)
+ flags |= PCI_IRQ_AFFINITY;
num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ flags);
if (num_vectors_enabled < 0) {
dev_err(&ctrl_info->pci_dev->dev,
"MSI-X init failed with error %d\n",
@@ -5457,6 +5503,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
+ request->ml_device_lun_number = (u8)scmd->device->lun;
cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
memcpy(request->cdb, scmd->cmnd, cdb_length);
@@ -5484,10 +5531,10 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
}
switch (scmd->sc_data_direction) {
- case DMA_TO_DEVICE:
+ case DMA_FROM_DEVICE:
request->data_direction = SOP_READ_FLAG;
break;
- case DMA_FROM_DEVICE:
+ case DMA_TO_DEVICE:
request->data_direction = SOP_WRITE_FLAG;
break;
case DMA_NONE:
@@ -5621,7 +5668,9 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
int rc;
struct pqi_io_request *io_request;
struct pqi_aio_path_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
@@ -5637,6 +5686,8 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb))
cdb_length = sizeof(request->cdb);
request->cdb_length = cdb_length;
@@ -5846,7 +5897,7 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
return;
}
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
}
static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
@@ -5941,7 +5992,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- atomic_inc(&device->scsi_cmds_outstanding);
+ atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
ctrl_info = shost_to_hba(shost);
@@ -5987,7 +6038,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
out:
if (rc)
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
return rc;
}
@@ -6127,7 +6178,7 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_msecs)
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
{
int cmds_outstanding;
unsigned long start_jiffies;
@@ -6137,23 +6188,25 @@ static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
start_jiffies = jiffies;
warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
- while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
+ while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
+ if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ }
msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
if (msecs_waiting >= timeout_msecs) {
dev_err(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
- device->lun, msecs_waiting / 1000, cmds_outstanding);
+ lun, msecs_waiting / 1000, cmds_outstanding);
return -ETIMEDOUT;
}
if (time_after(jiffies, warning_timeout)) {
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
- device->lun, msecs_waiting / 1000, cmds_outstanding);
+ lun, msecs_waiting / 1000, cmds_outstanding);
warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
@@ -6173,7 +6226,7 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct completion *wait)
+ struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
{
int rc;
unsigned int wait_secs;
@@ -6195,10 +6248,10 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
}
wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
- cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
+ cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
- ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
}
return rc;
@@ -6206,13 +6259,15 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
struct pqi_io_request *io_request;
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -6226,6 +6281,8 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
put_unaligned_le16(io_request->index, &request->request_id);
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ request->ml_device_lun_number = (u8)scmd->device->lun;
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
if (ctrl_info->tmf_iu_timeout_supported)
put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
@@ -6233,7 +6290,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
if (rc == 0)
rc = io_request->status;
@@ -6247,16 +6304,18 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int reset_rc;
int wait_rc;
unsigned int retries;
unsigned long timeout_msecs;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
for (retries = 0;;) {
- reset_rc = pqi_lun_reset(ctrl_info, device);
- if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, scmd);
+ if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -6264,18 +6323,19 @@ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pq
timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
if (wait_rc && reset_rc == 0)
reset_rc = wait_rc;
return reset_rc == 0 ? SUCCESS : FAILED;
}
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_io_queued_for_device(ctrl_info, device);
@@ -6283,7 +6343,7 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
if (rc)
rc = FAILED;
else
- rc = pqi_lun_reset_with_retries(ctrl_info, device);
+ rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
pqi_ctrl_unblock_requests(ctrl_info);
return rc;
@@ -6305,18 +6365,18 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
shost->host_no,
- device->bus, device->target, device->lun,
+ device->bus, device->target, (u32)scmd->device->lun,
scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
rc = FAILED;
else
- rc = pqi_device_reset(ctrl_info, device);
+ rc = pqi_device_reset(ctrl_info, scmd);
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
- shost->host_no, device->bus, device->target, device->lun,
+ shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&ctrl_info->lun_reset_mutex);
@@ -6405,6 +6465,35 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return rc;
}
+static void pqi_slave_destroy(struct scsi_device *sdev)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ int mutex_acquired;
+ unsigned long flags;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+ if (!mutex_acquired)
+ return;
+
+ device = sdev->hostdata;
+ if (!device) {
+ mutex_unlock(&ctrl_info->scan_mutex);
+ return;
+ }
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_del(&device->scsi_device_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ mutex_unlock(&ctrl_info->scan_mutex);
+
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+}
+
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{
struct pci_dev *pci_dev;
@@ -6919,6 +7008,9 @@ static ssize_t pqi_unique_id_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -6955,6 +7047,9 @@ static ssize_t pqi_lunid_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -6990,6 +7085,9 @@ static ssize_t pqi_path_info_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7067,6 +7165,9 @@ static ssize_t pqi_sas_address_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7093,6 +7194,9 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7122,6 +7226,9 @@ static ssize_t pqi_raid_level_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7152,6 +7259,9 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7179,6 +7289,9 @@ static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7268,6 +7381,7 @@ static struct scsi_host_template pqi_driver_template = {
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
+ .slave_destroy = pqi_slave_destroy,
.map_queues = pqi_map_queues,
.sdev_groups = pqi_sdev_groups,
.shost_groups = pqi_shost_groups,
@@ -7290,6 +7404,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->this_id = -1;
shost->max_channel = PQI_MAX_BUS;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
shost->max_lun = ~0;
shost->max_id = ~0;
shost->max_sectors = ctrl_info->max_sectors;
@@ -7358,8 +7473,7 @@ static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
reset_reg.all_bits = readl(&pqi_registers->device_reset);
if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
+ if (!sis_is_firmware_running(ctrl_info)) {
rc = -ENXIO;
break;
}
@@ -7463,6 +7577,9 @@ static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
sizeof(identify->vendor_id));
ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Firmware version: %s\n", ctrl_info->firmware_version);
+
out:
kfree(identify);
@@ -7634,6 +7751,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
break;
+ case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
+ ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
+ break;
}
pqi_firmware_feature_status(ctrl_info, firmware_feature);
@@ -7734,6 +7854,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "Multi-LUN Target",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -7835,6 +7960,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->tmf_iu_timeout_supported = false;
ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false;
+ ctrl_info->multi_lun_device_supported = false;
}
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@@ -8491,6 +8617,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
ctrl_info->max_write_raid_1_10_2drive = ~0;
ctrl_info->max_write_raid_1_10_3drive = ~0;
+ ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
return ctrl_info;
}
@@ -8508,7 +8635,6 @@ static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
{
- pqi_stop_heartbeat_timer(ctrl_info);
pqi_free_interrupts(ctrl_info);
if (ctrl_info->queue_memory_base)
dma_free_coherent(&ctrl_info->pci_dev->dev,
@@ -8533,9 +8659,15 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
+ ctrl_info->controller_online = false;
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
pqi_cancel_rescan_worker(ctrl_info);
pqi_cancel_update_time_worker(ctrl_info);
- pqi_remove_all_scsi_devices(ctrl_info);
+ if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
+ pqi_fail_all_outstanding_requests(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ }
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8875,11 +9007,18 @@ error:
static void pqi_pci_remove(struct pci_dev *pci_dev)
{
struct pqi_ctrl_info *ctrl_info;
+ u16 vendor_id;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
return;
+ pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
+ if (vendor_id == 0xffff)
+ ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
+ else
+ ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
+
pqi_remove_ctrl(ctrl_info);
}
@@ -8956,9 +9095,31 @@ static void pqi_process_lockup_action_param(void)
DRIVER_NAME_SHORT, pqi_lockup_action_param);
}
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
+
+static void pqi_process_ctrl_ready_timeout_param(void)
+{
+ if (pqi_ctrl_ready_timeout_secs == 0)
+ return;
+
+ if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
+ } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
+ }
+
+ sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
+}
+
static void pqi_process_module_params(void)
{
pqi_process_lockup_action_param();
+ pqi_process_ctrl_ready_timeout_param();
}
#if defined(CONFIG_PM)
@@ -9275,6 +9436,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0659)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0800)
},
{
@@ -9739,6 +9904,46 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0101)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0201)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0220)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0221)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0520)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0522)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0620)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0621)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0622)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0623)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index dea4ebaf1677..13e8c539010e 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index afc27adf68e9..5811fb3c22a9 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -86,6 +86,8 @@ struct sis_base_struct {
#pragma pack()
+unsigned int sis_ctrl_ready_timeout_secs = SIS_CTRL_READY_TIMEOUT_SECS;
+
static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
unsigned int timeout_secs)
{
@@ -122,7 +124,7 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
{
return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
- SIS_CTRL_READY_TIMEOUT_SECS);
+ sis_ctrl_ready_timeout_secs);
}
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info)
@@ -138,7 +140,7 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
status = readl(&ctrl_info->registers->sis_firmware_status);
- if (status & SIS_CTRL_KERNEL_PANIC)
+ if (status != ~0 && (status & SIS_CTRL_KERNEL_PANIC))
running = false;
else
running = true;
@@ -194,6 +196,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
/* Disable doorbell interrupts by masking all interrupts. */
writel(~0, &registers->sis_interrupt_mask);
+ usleep_range(1000, 2000);
/*
* Force the completion of the interrupt mask register write before
@@ -383,6 +386,7 @@ static int sis_wait_for_doorbell_bit_to_clear(
static inline int sis_set_doorbell_bit(struct pqi_ctrl_info *ctrl_info, u32 bit)
{
writel(bit, &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ usleep_range(1000, 2000);
return sis_wait_for_doorbell_bit_to_clear(ctrl_info, bit);
}
@@ -423,6 +427,7 @@ int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
{
writel(value, &ctrl_info->registers->sis_driver_scratch);
+ usleep_range(1000, 2000);
}
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 5f3575261a8e..9dcbae96a5c6 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -32,4 +32,6 @@ void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
+extern unsigned int sis_ctrl_ready_timeout_secs;
+
#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/snic/cq_desc.h b/drivers/scsi/snic/cq_desc.h
index a5290562c1fa..52a916fd0824 100644
--- a/drivers/scsi/snic/cq_desc.h
+++ b/drivers/scsi/snic/cq_desc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
diff --git a/drivers/scsi/snic/cq_enet_desc.h b/drivers/scsi/snic/cq_enet_desc.h
index 0a1be2ed0288..bd7381e52521 100644
--- a/drivers/scsi/snic/cq_enet_desc.h
+++ b/drivers/scsi/snic/cq_enet_desc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index 4ec7e30678e1..32f5a34b6987 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _SNIC_H_
#define _SNIC_H_
diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c
index dc03ce1ec909..3ddbdbc3ded1 100644
--- a/drivers/scsi/snic/snic_attrs.c
+++ b/drivers/scsi/snic/snic_attrs.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/string.h>
#include <linux/device.h>
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index 703f229862fc..5f4fca96b192 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 5e0faeba516e..57bdc3ba49d9 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index 27e98df83b31..9b2b5f8c23b9 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/mempool.h>
diff --git a/drivers/scsi/snic/snic_disc.h b/drivers/scsi/snic/snic_disc.h
index 97fa3f5c5bb4..9ad7f84a3484 100644
--- a/drivers/scsi/snic/snic_disc.h
+++ b/drivers/scsi/snic/snic_disc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_DISC_H
#define __SNIC_DISC_H
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
index 2a045a57e365..2550ba964b03 100644
--- a/drivers/scsi/snic/snic_fwint.h
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_FWINT_H
#define __SNIC_FWINT_H
@@ -159,7 +145,7 @@ struct snic_exch_ver_req {
* HBA Capabilities
* Bit 1: Reserved.
* Bit 2: Dynamic Discovery of LUNs.
- * Bit 3: Async event notifications on on tgt online/offline events.
+ * Bit 3: Async event notifications on tgt online/offline events.
* Bit 4: IO timeout support in FW.
* Bit 5-31: Reserved.
*/
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 159ee94d2a55..32a77bee41d5 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/snic/snic_io.h b/drivers/scsi/snic/snic_io.h
index 093d6524cd42..de6694a24c5f 100644
--- a/drivers/scsi/snic/snic_io.h
+++ b/drivers/scsi/snic/snic_io.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _SNIC_IO_H
#define _SNIC_IO_H
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
index c4da3673f2ae..471a37422da9 100644
--- a/drivers/scsi/snic/snic_isr.c
+++ b/drivers/scsi/snic/snic_isr.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/string.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 29d56396058c..174f7811fe50 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
diff --git a/drivers/scsi/snic/snic_res.c b/drivers/scsi/snic/snic_res.c
index b54912c8ca0c..43f1a2823514 100644
--- a/drivers/scsi/snic/snic_res.c
+++ b/drivers/scsi/snic/snic_res.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/snic/snic_res.h b/drivers/scsi/snic/snic_res.h
index 273f72f2a023..53cf6b19ab28 100644
--- a/drivers/scsi/snic/snic_res.h
+++ b/drivers/scsi/snic/snic_res.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_RES_H
#define __SNIC_RES_H
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index 5f17666f3e1d..961af6fc21bc 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/mempool.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h
index faf0cb601954..f0285c5a35f8 100644
--- a/drivers/scsi/snic/snic_stats.h
+++ b/drivers/scsi/snic/snic_stats.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_STATS_H
#define __SNIC_STATS_H
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index f23fe2f88438..c2e5ab7e976c 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h
index ce305b4b8fa2..c38e0dadc958 100644
--- a/drivers/scsi/snic/snic_trc.h
+++ b/drivers/scsi/snic/snic_trc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_TRC_H
#define __SNIC_TRC_H
diff --git a/drivers/scsi/snic/vnic_cq.c b/drivers/scsi/snic/vnic_cq.c
index 3455dd7e73f4..0d5d3bd4be1c 100644
--- a/drivers/scsi/snic/vnic_cq.c
+++ b/drivers/scsi/snic/vnic_cq.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/snic/vnic_cq.h b/drivers/scsi/snic/vnic_cq.h
index 6e651c3e16f7..6cee911eec5f 100644
--- a/drivers/scsi/snic/vnic_cq.h
+++ b/drivers/scsi/snic/vnic_cq.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
diff --git a/drivers/scsi/snic/vnic_cq_fw.h b/drivers/scsi/snic/vnic_cq_fw.h
index c2d1bbd44bd1..d74954bc70e3 100644
--- a/drivers/scsi/snic/vnic_cq_fw.h
+++ b/drivers/scsi/snic/vnic_cq_fw.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_CQ_FW_H_
#define _VNIC_CQ_FW_H_
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index 05e374f80946..760f3f22095c 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/vnic_dev.h b/drivers/scsi/snic/vnic_dev.h
index e65726da6504..d2f9b6f7b313 100644
--- a/drivers/scsi/snic/vnic_dev.h
+++ b/drivers/scsi/snic/vnic_dev.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
diff --git a/drivers/scsi/snic/vnic_devcmd.h b/drivers/scsi/snic/vnic_devcmd.h
index 0e0fa38f8d90..9d82fcb7414b 100644
--- a/drivers/scsi/snic/vnic_devcmd.h
+++ b/drivers/scsi/snic/vnic_devcmd.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
diff --git a/drivers/scsi/snic/vnic_intr.c b/drivers/scsi/snic/vnic_intr.c
index a7d54806787d..23627f9591f2 100644
--- a/drivers/scsi/snic/vnic_intr.c
+++ b/drivers/scsi/snic/vnic_intr.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/vnic_intr.h b/drivers/scsi/snic/vnic_intr.h
index 4547f603fe5e..7bff60fafb07 100644
--- a/drivers/scsi/snic/vnic_intr.h
+++ b/drivers/scsi/snic/vnic_intr.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
diff --git a/drivers/scsi/snic/vnic_resource.h b/drivers/scsi/snic/vnic_resource.h
index 9713d6835db3..372596b0915f 100644
--- a/drivers/scsi/snic/vnic_resource.h
+++ b/drivers/scsi/snic/vnic_resource.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
diff --git a/drivers/scsi/snic/vnic_snic.h b/drivers/scsi/snic/vnic_snic.h
index 514d39f5cf00..ffc8a0fee577 100644
--- a/drivers/scsi/snic/vnic_snic.h
+++ b/drivers/scsi/snic/vnic_snic.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_SNIC_H_
#define _VNIC_SNIC_H_
diff --git a/drivers/scsi/snic/vnic_stats.h b/drivers/scsi/snic/vnic_stats.h
index 370a37c97748..38155aae7a52 100644
--- a/drivers/scsi/snic/vnic_stats.h
+++ b/drivers/scsi/snic/vnic_stats.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
diff --git a/drivers/scsi/snic/vnic_wq.c b/drivers/scsi/snic/vnic_wq.c
index 1e91d432089e..48be9a3f4c3d 100644
--- a/drivers/scsi/snic/vnic_wq.c
+++ b/drivers/scsi/snic/vnic_wq.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/snic/vnic_wq.h b/drivers/scsi/snic/vnic_wq.h
index 7cc031c7ceba..1415da4b68dc 100644
--- a/drivers/scsi/snic/vnic_wq.h
+++ b/drivers/scsi/snic/vnic_wq.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
diff --git a/drivers/scsi/snic/wq_enet_desc.h b/drivers/scsi/snic/wq_enet_desc.h
index 68f62b6d105b..e8025331b503 100644
--- a/drivers/scsi/snic/wq_enet_desc.h
+++ b/drivers/scsi/snic/wq_enet_desc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 32d3b8274f14..a278b739d0c5 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -624,8 +624,8 @@ static int sr_probe(struct device *dev)
if (!cd)
goto fail;
- disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
- &sr_bio_compl_lkclass);
+ disk = blk_mq_alloc_disk_for_queue(sdev->request_queue,
+ &sr_bio_compl_lkclass);
if (!disk)
goto fail_free;
mutex_init(&cd->lock);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 255a2d48d421..f0db17e34ea0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3598,7 +3598,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num)
}
/*
- * Gerard's alchemy:) that deals with with the data
+ * Gerard's alchemy:) that deals with the data
* pointer for both MDP and the residual calculation.
*
* I didn't want to bloat the code by more than 200
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index 358df7510186..828d81e02b37 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -72,7 +72,7 @@ static int intc_set_affinity(struct irq_data *data,
if (!cpumask_intersects(cpumask, cpu_online_mask))
return -1;
- cpumask_copy(irq_data_get_affinity_mask(data), cpumask);
+ irq_data_update_affinity(data, cpumask);
return IRQ_SET_MASK_OK_NOCOPY;
}
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 78480e332ab8..219483b79c09 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -250,7 +250,7 @@ int slim_register_controller(struct slim_controller *ctrl)
{
int id;
- id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&ctrl_ida, GFP_KERNEL);
if (id < 0)
return id;
@@ -299,7 +299,7 @@ int slim_unregister_controller(struct slim_controller *ctrl)
{
/* Remove all clients */
device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
- ida_simple_remove(&ctrl_ida, ctrl->id);
+ ida_free(&ctrl_ida, ctrl->id);
return 0;
}
@@ -323,7 +323,7 @@ void slim_report_absent(struct slim_device *sbdev)
sbdev->is_laddr_valid = false;
mutex_unlock(&ctrl->lock);
if (!ctrl->get_laddr)
- ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
+ ida_free(&ctrl->laddr_ida, sbdev->laddr);
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
}
EXPORT_SYMBOL_GPL(slim_report_absent);
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index e5ae26227bdb..4ce0cb61e481 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -79,7 +79,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
EXPORT_SYMBOL_GPL(slim_alloc_txn_tid);
/**
- * slim_free_txn_tid() - Freee tid of txn
+ * slim_free_txn_tid() - Free tid of txn
*
* @ctrl: Controller handle
* @txn: transaction whose tid should be freed
@@ -101,7 +101,7 @@ EXPORT_SYMBOL_GPL(slim_free_txn_tid);
* @txn: Transaction to be sent over SLIMbus
*
* Called by controller to transmit messaging transactions not dealing with
- * Interface/Value elements. (e.g. transmittting a message to assign logical
+ * Interface/Value elements. (e.g. transmitting a message to assign logical
* address to a slave device
*
* Return: -ETIMEDOUT: If transmission of this message timed out
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 86ccf5970bc1..e461c071189b 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -9,6 +9,7 @@ source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/canaan/Kconfig"
source "drivers/soc/fsl/Kconfig"
+source "drivers/soc/fujitsu/Kconfig"
source "drivers/soc/imx/Kconfig"
source "drivers/soc/ixp4xx/Kconfig"
source "drivers/soc/litex/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 919716e0e700..69ba6508cf2c 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SOC_CANAAN) += canaan/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
+obj-y += fujitsu/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-y += imx/
obj-y += ixp4xx/
diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c
index 78f0f1aeca57..92125dd65f33 100644
--- a/drivers/soc/amlogic/meson-mx-socinfo.c
+++ b/drivers/soc/amlogic/meson-mx-socinfo.c
@@ -126,6 +126,7 @@ static int __init meson_mx_socinfo_init(void)
np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids);
if (np) {
analog_top_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(analog_top_regmap))
return PTR_ERR(analog_top_regmap);
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
index a10a417a87db..e93518763526 100644
--- a/drivers/soc/amlogic/meson-secure-pwrc.c
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -152,8 +152,10 @@ static int meson_secure_pwrc_probe(struct platform_device *pdev)
}
pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
- if (!pwrc)
+ if (!pwrc) {
+ of_node_put(sm_np);
return -ENOMEM;
+ }
pwrc->fw = meson_sm_get(sm_np);
of_node_put(sm_np);
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
index 1e0041ec8132..5bcd047768b6 100644
--- a/drivers/soc/bcm/bcm2835-power.c
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -126,8 +126,7 @@
#define ASB_AXI_BRDG_ID 0x20
-#define ASB_READ(reg) readl(power->asb + (reg))
-#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg))
+#define BCM2835_BRDG_ID 0x62726467
struct bcm2835_power_domain {
struct generic_pm_domain base;
@@ -142,24 +141,41 @@ struct bcm2835_power {
void __iomem *base;
/* AXI Async bridge registers. */
void __iomem *asb;
+ /* RPiVid bridge registers. */
+ void __iomem *rpivid_asb;
struct genpd_onecell_data pd_xlate;
struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT];
struct reset_controller_dev reset;
};
-static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
+static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable)
{
+ void __iomem *base = power->asb;
u64 start;
+ u32 val;
- if (!reg)
+ switch (reg) {
+ case 0:
return 0;
+ case ASB_V3D_S_CTRL:
+ case ASB_V3D_M_CTRL:
+ if (power->rpivid_asb)
+ base = power->rpivid_asb;
+ break;
+ }
start = ktime_get_ns();
/* Enable the module's async AXI bridges. */
- ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
- while (ASB_READ(reg) & ASB_ACK) {
+ if (enable) {
+ val = readl(base + reg) & ~ASB_REQ_STOP;
+ } else {
+ val = readl(base + reg) | ASB_REQ_STOP;
+ }
+ writel(PM_PASSWORD | val, base + reg);
+
+ while (readl(base + reg) & ASB_ACK) {
cpu_relax();
if (ktime_get_ns() - start >= 1000)
return -ETIMEDOUT;
@@ -168,30 +184,24 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
return 0;
}
-static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
+static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
{
- u64 start;
-
- if (!reg)
- return 0;
-
- start = ktime_get_ns();
-
- /* Enable the module's async AXI bridges. */
- ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
- while (!(ASB_READ(reg) & ASB_ACK)) {
- cpu_relax();
- if (ktime_get_ns() - start >= 1000)
- return -ETIMEDOUT;
- }
+ return bcm2835_asb_control(power, reg, true);
+}
- return 0;
+static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
+{
+ return bcm2835_asb_control(power, reg, false);
}
static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg)
{
struct bcm2835_power *power = pd->power;
+ /* We don't run this on BCM2711 */
+ if (power->rpivid_asb)
+ return 0;
+
/* Enable functional isolation */
PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC);
@@ -213,6 +223,10 @@ static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg)
int inrush;
bool powok;
+ /* We don't run this on BCM2711 */
+ if (power->rpivid_asb)
+ return 0;
+
/* If it was already powered on by the fw, leave it that way. */
if (PM_READ(pm_reg) & PM_POWUP)
return 0;
@@ -626,13 +640,23 @@ static int bcm2835_power_probe(struct platform_device *pdev)
power->dev = dev;
power->base = pm->base;
power->asb = pm->asb;
+ power->rpivid_asb = pm->rpivid_asb;
- id = ASB_READ(ASB_AXI_BRDG_ID);
- if (id != 0x62726467 /* "BRDG" */) {
+ id = readl(power->asb + ASB_AXI_BRDG_ID);
+ if (id != BCM2835_BRDG_ID /* "BRDG" */) {
dev_err(dev, "ASB register ID returned 0x%08x\n", id);
return -ENODEV;
}
+ if (power->rpivid_asb) {
+ id = readl(power->rpivid_asb + ASB_AXI_BRDG_ID);
+ if (id != BCM2835_BRDG_ID /* "BRDG" */) {
+ dev_err(dev, "RPiVid ASB register ID returned 0x%08x\n",
+ id);
+ return -ENODEV;
+ }
+ }
+
power->pd_xlate.domains = devm_kcalloc(dev,
ARRAY_SIZE(power_domain_names),
sizeof(*power->pd_xlate.domains),
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 2c975d79fe8e..1467bbd59690 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -340,12 +340,12 @@ static int __init brcmstb_biuctrl_init(void)
ret = setup_hifcpubiuctrl_regs(np);
if (ret)
- return ret;
+ goto out_put;
ret = mcp_write_pairing_set();
if (ret) {
pr_err("MCP: Unable to disable write pairing!\n");
- return ret;
+ goto out_put;
}
a72_b53_rac_enable_all(np);
@@ -353,6 +353,9 @@ static int __init brcmstb_biuctrl_init(void)
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
#endif
- return 0;
+ ret = 0;
+out_put:
+ of_node_put(np);
+ return ret;
}
early_initcall(brcmstb_biuctrl_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index 70ad0f3dce28..d6b30d521307 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -721,7 +721,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
/*
- * Slightly grosss to use the phy ver to get a memc,
+ * Slightly gross to use the phy ver to get a memc,
* offset but that is the only versioned things so far
* we can test for.
*/
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 5ed2fc1c53a0..6bf3e6a980ff 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -14,21 +14,16 @@
#include <linux/platform_device.h>
#include <linux/fsl/guts.h>
-struct guts {
- struct ccsr_guts __iomem *regs;
- bool little_endian;
-};
-
struct fsl_soc_die_attr {
char *die;
u32 svr;
u32 mask;
};
-static struct guts *guts;
-static struct soc_device_attribute soc_dev_attr;
-static struct soc_device *soc_dev;
-
+struct fsl_soc_data {
+ const char *sfp_compat;
+ u32 uid_offset;
+};
/* SoC die attribute definition for QorIQ platform */
static const struct fsl_soc_die_attr fsl_soc_die[] = {
@@ -120,88 +115,36 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match(
return NULL;
}
-static u32 fsl_guts_get_svr(void)
-{
- u32 svr = 0;
-
- if (!guts || !guts->regs)
- return svr;
-
- if (guts->little_endian)
- svr = ioread32(&guts->regs->svr);
- else
- svr = ioread32be(&guts->regs->svr);
-
- return svr;
-}
-
-static int fsl_guts_probe(struct platform_device *pdev)
+static u64 fsl_guts_get_soc_uid(const char *compat, unsigned int offset)
{
- struct device_node *root, *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- const struct fsl_soc_die_attr *soc_die;
- const char *machine;
- u32 svr;
-
- /* Initialize guts */
- guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL);
- if (!guts)
- return -ENOMEM;
-
- guts->little_endian = of_property_read_bool(np, "little-endian");
+ struct device_node *np;
+ void __iomem *sfp_base;
+ u64 uid;
- guts->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(guts->regs))
- return PTR_ERR(guts->regs);
+ np = of_find_compatible_node(NULL, NULL, compat);
+ if (!np)
+ return 0;
- /* Register soc device */
- root = of_find_node_by_path("/");
- if (of_property_read_string(root, "model", &machine))
- of_property_read_string_index(root, "compatible", 0, &machine);
- if (machine) {
- soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
- if (!soc_dev_attr.machine) {
- of_node_put(root);
- return -ENOMEM;
- }
+ sfp_base = of_iomap(np, 0);
+ if (!sfp_base) {
+ of_node_put(np);
+ return 0;
}
- of_node_put(root);
- svr = fsl_guts_get_svr();
- soc_die = fsl_soc_die_match(svr, fsl_soc_die);
- if (soc_die) {
- soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL,
- "QorIQ %s", soc_die->die);
- } else {
- soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ");
- }
- if (!soc_dev_attr.family)
- return -ENOMEM;
- soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL,
- "svr:0x%08x", svr);
- if (!soc_dev_attr.soc_id)
- return -ENOMEM;
- soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
- (svr >> 4) & 0xf, svr & 0xf);
- if (!soc_dev_attr.revision)
- return -ENOMEM;
+ uid = ioread32(sfp_base + offset);
+ uid <<= 32;
+ uid |= ioread32(sfp_base + offset + 4);
- soc_dev = soc_device_register(&soc_dev_attr);
- if (IS_ERR(soc_dev))
- return PTR_ERR(soc_dev);
+ iounmap(sfp_base);
+ of_node_put(np);
- pr_info("Machine: %s\n", soc_dev_attr.machine);
- pr_info("SoC family: %s\n", soc_dev_attr.family);
- pr_info("SoC ID: %s, Revision: %s\n",
- soc_dev_attr.soc_id, soc_dev_attr.revision);
- return 0;
+ return uid;
}
-static int fsl_guts_remove(struct platform_device *dev)
-{
- soc_device_unregister(soc_dev);
- return 0;
-}
+static const struct fsl_soc_data ls1028a_data = {
+ .sfp_compat = "fsl,ls1028a-sfp",
+ .uid_offset = 0x21c,
+};
/*
* Table for matching compatible strings, for device tree
@@ -231,28 +174,106 @@ static const struct of_device_id fsl_guts_of_match[] = {
{ .compatible = "fsl,ls1012a-dcfg", },
{ .compatible = "fsl,ls1046a-dcfg", },
{ .compatible = "fsl,lx2160a-dcfg", },
- { .compatible = "fsl,ls1028a-dcfg", },
+ { .compatible = "fsl,ls1028a-dcfg", .data = &ls1028a_data},
{}
};
-MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
-
-static struct platform_driver fsl_guts_driver = {
- .driver = {
- .name = "fsl-guts",
- .of_match_table = fsl_guts_of_match,
- },
- .probe = fsl_guts_probe,
- .remove = fsl_guts_remove,
-};
static int __init fsl_guts_init(void)
{
- return platform_driver_register(&fsl_guts_driver);
-}
-core_initcall(fsl_guts_init);
+ struct soc_device_attribute *soc_dev_attr;
+ static struct soc_device *soc_dev;
+ const struct fsl_soc_die_attr *soc_die;
+ const struct fsl_soc_data *soc_data;
+ const struct of_device_id *match;
+ struct ccsr_guts __iomem *regs;
+ const char *machine = NULL;
+ struct device_node *np;
+ bool little_endian;
+ u64 soc_uid = 0;
+ u32 svr;
+ int ret;
-static void __exit fsl_guts_exit(void)
-{
- platform_driver_unregister(&fsl_guts_driver);
+ np = of_find_matching_node_and_match(NULL, fsl_guts_of_match, &match);
+ if (!np)
+ return 0;
+ soc_data = match->data;
+
+ regs = of_iomap(np, 0);
+ if (!regs) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ little_endian = of_property_read_bool(np, "little-endian");
+ if (little_endian)
+ svr = ioread32(&regs->svr);
+ else
+ svr = ioread32be(&regs->svr);
+ iounmap(regs);
+ of_node_put(np);
+
+ /* Register soc device */
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ if (of_property_read_string(of_root, "model", &machine))
+ of_property_read_string_index(of_root, "compatible", 0, &machine);
+ if (machine) {
+ soc_dev_attr->machine = kstrdup(machine, GFP_KERNEL);
+ if (!soc_dev_attr->machine)
+ goto err_nomem;
+ }
+
+ soc_die = fsl_soc_die_match(svr, fsl_soc_die);
+ if (soc_die) {
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ %s",
+ soc_die->die);
+ } else {
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ");
+ }
+ if (!soc_dev_attr->family)
+ goto err_nomem;
+
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "svr:0x%08x", svr);
+ if (!soc_dev_attr->soc_id)
+ goto err_nomem;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
+ (svr >> 4) & 0xf, svr & 0xf);
+ if (!soc_dev_attr->revision)
+ goto err_nomem;
+
+ if (soc_data)
+ soc_uid = fsl_guts_get_soc_uid(soc_data->sfp_compat,
+ soc_data->uid_offset);
+ if (soc_uid)
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX",
+ soc_uid);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err;
+ }
+
+ pr_info("Machine: %s\n", soc_dev_attr->machine);
+ pr_info("SoC family: %s\n", soc_dev_attr->family);
+ pr_info("SoC ID: %s, Revision: %s\n",
+ soc_dev_attr->soc_id, soc_dev_attr->revision);
+
+ return 0;
+
+err_nomem:
+ ret = -ENOMEM;
+err:
+ kfree(soc_dev_attr->machine);
+ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr->serial_number);
+ kfree(soc_dev_attr);
+
+ return ret;
}
-module_exit(fsl_guts_exit);
+core_initcall(fsl_guts_init);
diff --git a/drivers/soc/fujitsu/Kconfig b/drivers/soc/fujitsu/Kconfig
new file mode 100644
index 000000000000..987731e80612
--- /dev/null
+++ b/drivers/soc/fujitsu/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "fujitsu SoC drivers"
+
+config A64FX_DIAG
+ bool "A64FX diag driver"
+ depends on ARM64
+ depends on ACPI
+ help
+ Say Y here if you want to enable diag interrupt on Fujitsu A64FX.
+ This driver enables BMC's diagnostic requests and enables
+ A64FX-specific interrupts. This allows administrators to obtain
+ kernel dumps via diagnostic requests using ipmitool, etc.
+
+ If unsure, say N.
+
+endmenu
diff --git a/drivers/soc/fujitsu/Makefile b/drivers/soc/fujitsu/Makefile
new file mode 100644
index 000000000000..945bc1c14ad0
--- /dev/null
+++ b/drivers/soc/fujitsu/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_A64FX_DIAG) += a64fx-diag.o
diff --git a/drivers/soc/fujitsu/a64fx-diag.c b/drivers/soc/fujitsu/a64fx-diag.c
new file mode 100644
index 000000000000..d87f348427bf
--- /dev/null
+++ b/drivers/soc/fujitsu/a64fx-diag.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A64FX diag driver.
+ * Copyright (c) 2022 Fujitsu Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define A64FX_DIAG_IRQ 1
+#define BMC_DIAG_INTERRUPT_ENABLE 0x40
+#define BMC_DIAG_INTERRUPT_STATUS 0x44
+#define BMC_DIAG_INTERRUPT_MASK BIT(31)
+
+struct a64fx_diag_priv {
+ void __iomem *mmsc_reg_base;
+ int irq;
+ bool has_nmi;
+};
+
+static irqreturn_t a64fx_diag_handler_nmi(int irq, void *dev_id)
+{
+ nmi_panic(NULL, "a64fx_diag: interrupt received\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t a64fx_diag_handler_irq(int irq, void *dev_id)
+{
+ panic("a64fx_diag: interrupt received\n");
+
+ return IRQ_HANDLED;
+}
+
+static void a64fx_diag_interrupt_clear(struct a64fx_diag_priv *priv)
+{
+ void __iomem *diag_status_reg_addr;
+ u32 mmsc;
+
+ diag_status_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_STATUS;
+ mmsc = readl(diag_status_reg_addr);
+ if (mmsc & BMC_DIAG_INTERRUPT_MASK)
+ writel(BMC_DIAG_INTERRUPT_MASK, diag_status_reg_addr);
+}
+
+static void a64fx_diag_interrupt_enable(struct a64fx_diag_priv *priv)
+{
+ void __iomem *diag_enable_reg_addr;
+ u32 mmsc;
+
+ diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE;
+ mmsc = readl(diag_enable_reg_addr);
+ if (!(mmsc & BMC_DIAG_INTERRUPT_MASK)) {
+ mmsc |= BMC_DIAG_INTERRUPT_MASK;
+ writel(mmsc, diag_enable_reg_addr);
+ }
+}
+
+static void a64fx_diag_interrupt_disable(struct a64fx_diag_priv *priv)
+{
+ void __iomem *diag_enable_reg_addr;
+ u32 mmsc;
+
+ diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE;
+ mmsc = readl(diag_enable_reg_addr);
+ if (mmsc & BMC_DIAG_INTERRUPT_MASK) {
+ mmsc &= ~BMC_DIAG_INTERRUPT_MASK;
+ writel(mmsc, diag_enable_reg_addr);
+ }
+}
+
+static int a64fx_diag_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct a64fx_diag_priv *priv;
+ unsigned long irq_flags;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ priv->mmsc_reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->mmsc_reg_base))
+ return PTR_ERR(priv->mmsc_reg_base);
+
+ priv->irq = platform_get_irq(pdev, A64FX_DIAG_IRQ);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ platform_set_drvdata(pdev, priv);
+
+ irq_flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_AUTOEN |
+ IRQF_NO_THREAD;
+ ret = request_nmi(priv->irq, &a64fx_diag_handler_nmi, irq_flags,
+ "a64fx_diag_nmi", NULL);
+ if (ret) {
+ ret = request_irq(priv->irq, &a64fx_diag_handler_irq,
+ irq_flags, "a64fx_diag_irq", NULL);
+ if (ret) {
+ dev_err(dev, "cannot register IRQ %d\n", ret);
+ return ret;
+ }
+ enable_irq(priv->irq);
+ } else {
+ enable_nmi(priv->irq);
+ priv->has_nmi = true;
+ }
+
+ a64fx_diag_interrupt_clear(priv);
+ a64fx_diag_interrupt_enable(priv);
+
+ return 0;
+}
+
+static int a64fx_diag_remove(struct platform_device *pdev)
+{
+ struct a64fx_diag_priv *priv = platform_get_drvdata(pdev);
+
+ a64fx_diag_interrupt_disable(priv);
+ a64fx_diag_interrupt_clear(priv);
+
+ if (priv->has_nmi)
+ free_nmi(priv->irq, NULL);
+ else
+ free_irq(priv->irq, NULL);
+
+ return 0;
+}
+
+static const struct acpi_device_id a64fx_diag_acpi_match[] = {
+ { "FUJI2007", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, a64fx_diag_acpi_match);
+
+
+static struct platform_driver a64fx_diag_driver = {
+ .driver = {
+ .name = "a64fx_diag_driver",
+ .acpi_match_table = ACPI_PTR(a64fx_diag_acpi_match),
+ },
+ .probe = a64fx_diag_probe,
+ .remove = a64fx_diag_remove,
+};
+
+module_platform_driver(a64fx_diag_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Hitomi Hasegawa <hasegawa-hitomi@fujitsu.com>");
+MODULE_DESCRIPTION("A64FX diag driver");
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 85aa86e1338a..6383a4edc360 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -328,7 +328,9 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
if (!IS_ERR(domain->regulator)) {
ret = regulator_enable(domain->regulator);
if (ret) {
- dev_err(domain->dev, "failed to enable regulator\n");
+ dev_err(domain->dev,
+ "failed to enable regulator: %pe\n",
+ ERR_PTR(ret));
goto out_put_pm;
}
}
@@ -467,7 +469,9 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
if (!IS_ERR(domain->regulator)) {
ret = regulator_disable(domain->regulator);
if (ret) {
- dev_err(domain->dev, "failed to disable regulator\n");
+ dev_err(domain->dev,
+ "failed to disable regulator: %pe\n",
+ ERR_PTR(ret));
return ret;
}
}
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index 7ebc28709e94..dff7529268e4 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -216,7 +216,7 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
bc->bus_power_dev = genpd_dev_pm_attach_by_name(dev, "bus");
if (IS_ERR(bc->bus_power_dev))
return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev),
- "failed to attach power domain\n");
+ "failed to attach power domain \"bus\"\n");
for (i = 0; i < bc_data->num_domains; i++) {
const struct imx8m_blk_ctrl_domain_data *data = &bc_data->domains[i];
@@ -238,7 +238,8 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
dev_pm_domain_attach_by_name(dev, data->gpc_name);
if (IS_ERR(domain->power_dev)) {
dev_err_probe(dev, PTR_ERR(domain->power_dev),
- "failed to attach power domain\n");
+ "failed to attach power domain \"%s\"\n",
+ data->gpc_name);
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
@@ -251,7 +252,9 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
- dev_err_probe(dev, ret, "failed to init power domain\n");
+ dev_err_probe(dev, ret,
+ "failed to init power domain \"%s\"\n",
+ data->gpc_name);
dev_pm_domain_detach(domain->power_dev, true);
goto cleanup_pds;
}
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index fdd8bc08569e..3c3eedea35f7 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -73,4 +73,14 @@ config MTK_MMSYS
Say yes here to add support for the MediaTek Multimedia
Subsystem (MMSYS).
+config MTK_SVS
+ tristate "MediaTek Smart Voltage Scaling(SVS)"
+ depends on MTK_EFUSE && NVMEM
+ help
+ The Smart Voltage Scaling(SVS) engine is a piece of hardware
+ which has several controllers(banks) for calculating suitable
+ voltage to different power domains(CPU/GPU/CCI) according to
+ chip process corner, temperatures and other factors. Then DVFS
+ driver could apply SVS bank voltage to PMIC/Buck.
+
endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 90270f8114ed..0e9e703c931a 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o
+obj-$(CONFIG_MTK_SVS) += mtk-svs.o
diff --git a/drivers/soc/mediatek/mt6795-pm-domains.h b/drivers/soc/mediatek/mt6795-pm-domains.h
new file mode 100644
index 000000000000..ef07c9dfdd9b
--- /dev/null
+++ b/drivers/soc/mediatek/mt6795-pm-domains.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT6795_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT6795_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt6795-power.h>
+
+/*
+ * MT6795 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt6795[] = {
+ [MT6795_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT6795_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT6795_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT6795_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1),
+ },
+ },
+ [MT6795_POWER_DOMAIN_MJC] = {
+ .name = "mjc",
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x298,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT6795_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT6795_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ },
+ [MT6795_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT6795_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT),
+ },
+ },
+};
+
+static const struct scpsys_soc_data mt6795_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt6795,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt6795),
+};
+
+#endif /* __SOC_MEDIATEK_MT6795_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
index 71b8757e552d..99de67fe5de8 100644
--- a/drivers/soc/mediatek/mt8183-pm-domains.h
+++ b/drivers/soc/mediatek/mt8183-pm-domains.h
@@ -41,6 +41,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.pwr_sta2nd_offs = 0x0184,
.sram_pdn_bits = 0,
.sram_pdn_ack_bits = 0,
+ .caps = MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8183_POWER_DOMAIN_MFG] = {
.name = "mfg",
diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h
index bf2dd0cdc3a8..108af61854a3 100644
--- a/drivers/soc/mediatek/mt8186-pm-domains.h
+++ b/drivers/soc/mediatek/mt8186-pm-domains.h
@@ -51,7 +51,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
MT8186_TOP_AXI_PROT_EN_1_CLR,
MT8186_TOP_AXI_PROT_EN_1_STA),
},
- .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8186_POWER_DOMAIN_MFG2] = {
.name = "mfg2",
diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
index 558c4ee4784a..b97b2051920f 100644
--- a/drivers/soc/mediatek/mt8192-pm-domains.h
+++ b/drivers/soc/mediatek/mt8192-pm-domains.h
@@ -58,6 +58,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.pwr_sta2nd_offs = 0x0170,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
+ .caps = MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8192_POWER_DOMAIN_MFG1] = {
.name = "mfg1",
@@ -85,6 +86,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
MT8192_TOP_AXI_PROT_EN_2_CLR,
MT8192_TOP_AXI_PROT_EN_2_STA1),
},
+ .caps = MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8192_POWER_DOMAIN_MFG2] = {
.name = "mfg2",
diff --git a/drivers/soc/mediatek/mt8195-pm-domains.h b/drivers/soc/mediatek/mt8195-pm-domains.h
index 938f4d51f5ae..d7387ea1b9c9 100644
--- a/drivers/soc/mediatek/mt8195-pm-domains.h
+++ b/drivers/soc/mediatek/mt8195-pm-domains.h
@@ -67,7 +67,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = {
.ctl_offs = 0x334,
.pwr_sta_offs = 0x174,
.pwr_sta2nd_offs = 0x178,
- .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP | MTK_SCPD_ALWAYS_ON,
},
[MT8195_POWER_DOMAIN_CSI_RX_TOP] = {
.name = "csi_rx_top",
@@ -162,7 +162,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = {
MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1),
},
- .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8195_POWER_DOMAIN_MFG2] = {
.name = "mfg2",
diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h
index 24129a6c25f8..7abaf048d91e 100644
--- a/drivers/soc/mediatek/mt8365-mmsys.h
+++ b/drivers/soc/mediatek/mt8365-mmsys.h
@@ -10,6 +10,9 @@
#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN 0xf60
#define MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0xf64
#define MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN 0xf68
+#define MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL 0xfd0
+#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8
+#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc
#define MT8365_RDMA0_SOUT_COLOR0 0x1
#define MT8365_DITHER_MOUT_EN_DSI0 0x1
@@ -18,6 +21,10 @@
#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0
#define MT8365_DISP_COLOR_SEL_IN_COLOR0 0x0
#define MT8365_OVL0_MOUT_PATH0_SEL BIT(0)
+#define MT8365_RDMA1_SOUT_DPI0 0x1
+#define MT8365_DPI0_SEL_IN_RDMA1 0x0
+#define MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK 0x1
+#define MT8365_DPI0_SEL_IN_RDMA1 0x0
static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
{
@@ -55,6 +62,21 @@ static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0
},
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
+ MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
+ MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0
+ },
};
#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
index 7c65ad3d1f8a..fc13334db1b1 100644
--- a/drivers/soc/mediatek/mtk-devapc.c
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -31,10 +31,7 @@ struct mtk_devapc_vio_dbgs {
u32 vio_dbg1;
};
-struct mtk_devapc_data {
- /* numbers of violation index */
- u32 vio_idx_num;
-
+struct mtk_devapc_regs_ofs {
/* reg offset */
u32 vio_mask_offset;
u32 vio_sta_offset;
@@ -46,6 +43,12 @@ struct mtk_devapc_data {
u32 vio_shift_con_offset;
};
+struct mtk_devapc_data {
+ /* numbers of violation index */
+ u32 vio_idx_num;
+ const struct mtk_devapc_regs_ofs *regs_ofs;
+};
+
struct mtk_devapc_context {
struct device *dev;
void __iomem *infra_base;
@@ -58,7 +61,7 @@ static void clear_vio_status(struct mtk_devapc_context *ctx)
void __iomem *reg;
int i;
- reg = ctx->infra_base + ctx->data->vio_sta_offset;
+ reg = ctx->infra_base + ctx->data->regs_ofs->vio_sta_offset;
for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
writel(GENMASK(31, 0), reg + 4 * i);
@@ -73,7 +76,7 @@ static void mask_module_irq(struct mtk_devapc_context *ctx, bool mask)
u32 val;
int i;
- reg = ctx->infra_base + ctx->data->vio_mask_offset;
+ reg = ctx->infra_base + ctx->data->regs_ofs->vio_mask_offset;
if (mask)
val = GENMASK(31, 0);
@@ -116,11 +119,11 @@ static int devapc_sync_vio_dbg(struct mtk_devapc_context *ctx)
u32 val;
pd_vio_shift_sta_reg = ctx->infra_base +
- ctx->data->vio_shift_sta_offset;
+ ctx->data->regs_ofs->vio_shift_sta_offset;
pd_vio_shift_sel_reg = ctx->infra_base +
- ctx->data->vio_shift_sel_offset;
+ ctx->data->regs_ofs->vio_shift_sel_offset;
pd_vio_shift_con_reg = ctx->infra_base +
- ctx->data->vio_shift_con_offset;
+ ctx->data->regs_ofs->vio_shift_con_offset;
/* Find the minimum shift group which has violation */
val = readl(pd_vio_shift_sta_reg);
@@ -161,8 +164,8 @@ static void devapc_extract_vio_dbg(struct mtk_devapc_context *ctx)
void __iomem *vio_dbg0_reg;
void __iomem *vio_dbg1_reg;
- vio_dbg0_reg = ctx->infra_base + ctx->data->vio_dbg0_offset;
- vio_dbg1_reg = ctx->infra_base + ctx->data->vio_dbg1_offset;
+ vio_dbg0_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg0_offset;
+ vio_dbg1_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg1_offset;
vio_dbgs.vio_dbg0 = readl(vio_dbg0_reg);
vio_dbgs.vio_dbg1 = readl(vio_dbg1_reg);
@@ -200,7 +203,7 @@ static irqreturn_t devapc_violation_irq(int irq_number, void *data)
*/
static void start_devapc(struct mtk_devapc_context *ctx)
{
- writel(BIT(31), ctx->infra_base + ctx->data->apc_con_offset);
+ writel(BIT(31), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset);
mask_module_irq(ctx, false);
}
@@ -212,11 +215,10 @@ static void stop_devapc(struct mtk_devapc_context *ctx)
{
mask_module_irq(ctx, true);
- writel(BIT(2), ctx->infra_base + ctx->data->apc_con_offset);
+ writel(BIT(2), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset);
}
-static const struct mtk_devapc_data devapc_mt6779 = {
- .vio_idx_num = 511,
+static const struct mtk_devapc_regs_ofs devapc_regs_ofs_mt6779 = {
.vio_mask_offset = 0x0,
.vio_sta_offset = 0x400,
.vio_dbg0_offset = 0x900,
@@ -227,11 +229,24 @@ static const struct mtk_devapc_data devapc_mt6779 = {
.vio_shift_con_offset = 0xF20,
};
+static const struct mtk_devapc_data devapc_mt6779 = {
+ .vio_idx_num = 511,
+ .regs_ofs = &devapc_regs_ofs_mt6779,
+};
+
+static const struct mtk_devapc_data devapc_mt8186 = {
+ .vio_idx_num = 519,
+ .regs_ofs = &devapc_regs_ofs_mt6779,
+};
+
static const struct of_device_id mtk_devapc_dt_match[] = {
{
.compatible = "mediatek,mt6779-devapc",
.data = &devapc_mt6779,
}, {
+ .compatible = "mediatek,mt8186-devapc",
+ .data = &devapc_mt8186,
+ }, {
},
};
MODULE_DEVICE_TABLE(of, mtk_devapc_dt_match);
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 981d56967e7a..5ea43de4e410 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -7,10 +7,12 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#define MT2701_MUTEX0_MOD0 0x2c
#define MT2701_MUTEX0_SOF0 0x30
@@ -80,6 +82,15 @@
#define MT8183_MUTEX_MOD_DISP_GAMMA0 16
#define MT8183_MUTEX_MOD_DISP_DITHER0 17
+#define MT8183_MUTEX_MOD_MDP_RDMA0 2
+#define MT8183_MUTEX_MOD_MDP_RSZ0 4
+#define MT8183_MUTEX_MOD_MDP_RSZ1 5
+#define MT8183_MUTEX_MOD_MDP_TDSHP0 6
+#define MT8183_MUTEX_MOD_MDP_WROT0 7
+#define MT8183_MUTEX_MOD_MDP_WDMA 8
+#define MT8183_MUTEX_MOD_MDP_AAL0 23
+#define MT8183_MUTEX_MOD_MDP_CCORR0 24
+
#define MT8173_MUTEX_MOD_DISP_OVL0 11
#define MT8173_MUTEX_MOD_DISP_OVL1 12
#define MT8173_MUTEX_MOD_DISP_RDMA0 13
@@ -110,6 +121,20 @@
#define MT8195_MUTEX_MOD_DISP_DP_INTF0 21
#define MT8195_MUTEX_MOD_DISP_PWM0 27
+#define MT8365_MUTEX_MOD_DISP_OVL0 7
+#define MT8365_MUTEX_MOD_DISP_OVL0_2L 8
+#define MT8365_MUTEX_MOD_DISP_RDMA0 9
+#define MT8365_MUTEX_MOD_DISP_RDMA1 10
+#define MT8365_MUTEX_MOD_DISP_WDMA0 11
+#define MT8365_MUTEX_MOD_DISP_COLOR0 12
+#define MT8365_MUTEX_MOD_DISP_CCORR 13
+#define MT8365_MUTEX_MOD_DISP_AAL 14
+#define MT8365_MUTEX_MOD_DISP_GAMMA 15
+#define MT8365_MUTEX_MOD_DISP_DITHER 16
+#define MT8365_MUTEX_MOD_DISP_DSI0 17
+#define MT8365_MUTEX_MOD_DISP_PWM0 20
+#define MT8365_MUTEX_MOD_DISP_DPI0 22
+
#define MT2712_MUTEX_MOD_DISP_PWM2 10
#define MT2712_MUTEX_MOD_DISP_OVL0 11
#define MT2712_MUTEX_MOD_DISP_OVL1 12
@@ -185,6 +210,7 @@ struct mtk_mutex_data {
const unsigned int *mutex_sof;
const unsigned int mutex_mod_reg;
const unsigned int mutex_sof_reg;
+ const unsigned int *mutex_table_mod;
const bool no_clk;
};
@@ -194,6 +220,8 @@ struct mtk_mutex_ctx {
void __iomem *regs;
struct mtk_mutex mutex[10];
const struct mtk_mutex_data *data;
+ phys_addr_t addr;
+ struct cmdq_client_reg cmdq_reg;
};
static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -272,6 +300,17 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
};
+static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+ [MUTEX_MOD_IDX_MDP_RDMA0] = MT8183_MUTEX_MOD_MDP_RDMA0,
+ [MUTEX_MOD_IDX_MDP_RSZ0] = MT8183_MUTEX_MOD_MDP_RSZ0,
+ [MUTEX_MOD_IDX_MDP_RSZ1] = MT8183_MUTEX_MOD_MDP_RSZ1,
+ [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8183_MUTEX_MOD_MDP_TDSHP0,
+ [MUTEX_MOD_IDX_MDP_WROT0] = MT8183_MUTEX_MOD_MDP_WROT0,
+ [MUTEX_MOD_IDX_MDP_WDMA] = MT8183_MUTEX_MOD_MDP_WDMA,
+ [MUTEX_MOD_IDX_MDP_AAL0] = MT8183_MUTEX_MOD_MDP_AAL0,
+ [MUTEX_MOD_IDX_MDP_CCORR0] = MT8183_MUTEX_MOD_MDP_CCORR0,
+};
+
static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0,
@@ -315,6 +354,22 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_DP_INTF0] = MT8195_MUTEX_MOD_DISP_DP_INTF0,
};
+static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8365_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_CCORR] = MT8365_MUTEX_MOD_DISP_CCORR,
+ [DDP_COMPONENT_COLOR0] = MT8365_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER0] = MT8365_MUTEX_MOD_DISP_DITHER,
+ [DDP_COMPONENT_DPI0] = MT8365_MUTEX_MOD_DISP_DPI0,
+ [DDP_COMPONENT_DSI0] = MT8365_MUTEX_MOD_DISP_DSI0,
+ [DDP_COMPONENT_GAMMA] = MT8365_MUTEX_MOD_DISP_GAMMA,
+ [DDP_COMPONENT_OVL0] = MT8365_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8365_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_PWM0] = MT8365_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_RDMA0] = MT8365_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8365_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_WDMA0] = MT8365_MUTEX_MOD_DISP_WDMA0,
+};
+
static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
@@ -399,6 +454,7 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = {
.mutex_sof = mt8183_mutex_sof,
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .mutex_table_mod = mt8183_mutex_table_mod,
.no_clk = true,
};
@@ -423,6 +479,14 @@ static const struct mtk_mutex_data mt8195_mutex_driver_data = {
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
};
+static const struct mtk_mutex_data mt8365_mutex_driver_data = {
+ .mutex_mod = mt8365_mutex_mod,
+ .mutex_sof = mt8183_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .no_clk = true,
+};
+
struct mtk_mutex *mtk_mutex_get(struct device *dev)
{
struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev);
@@ -572,6 +636,30 @@ void mtk_mutex_enable(struct mtk_mutex *mutex)
}
EXPORT_SYMBOL_GPL(mtk_mutex_enable);
+int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex, void *pkt)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct cmdq_pkt *cmdq_pkt = (struct cmdq_pkt *)pkt;
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ if (!mtx->cmdq_reg.size) {
+ dev_err(mtx->dev, "mediatek,gce-client-reg hasn't been set");
+ return -EINVAL;
+ }
+
+ cmdq_pkt_write(cmdq_pkt, mtx->cmdq_reg.subsys,
+ mtx->addr + DISP_REG_MUTEX_EN(mutex->id), 1);
+ return 0;
+#else
+ dev_err(mtx->dev, "Not support for enable MUTEX by CMDQ");
+ return -ENODEV;
+#endif
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_enable_by_cmdq);
+
void mtk_mutex_disable(struct mtk_mutex *mutex)
{
struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
@@ -606,12 +694,67 @@ void mtk_mutex_release(struct mtk_mutex *mutex)
}
EXPORT_SYMBOL_GPL(mtk_mutex_release);
+int mtk_mutex_write_mod(struct mtk_mutex *mutex,
+ enum mtk_mutex_mod_index idx, bool clear)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ unsigned int reg;
+ unsigned int offset;
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ if (idx < MUTEX_MOD_IDX_MDP_RDMA0 ||
+ idx >= MUTEX_MOD_IDX_MAX) {
+ dev_err(mtx->dev, "Not supported MOD table index : %d", idx);
+ return -EINVAL;
+ }
+
+ offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
+ mutex->id);
+ reg = readl_relaxed(mtx->regs + offset);
+
+ if (clear)
+ reg &= ~BIT(mtx->data->mutex_table_mod[idx]);
+ else
+ reg |= BIT(mtx->data->mutex_table_mod[idx]);
+
+ writel_relaxed(reg, mtx->regs + offset);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_write_mod);
+
+int mtk_mutex_write_sof(struct mtk_mutex *mutex,
+ enum mtk_mutex_sof_index idx)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ if (idx < MUTEX_SOF_IDX_SINGLE_MODE ||
+ idx >= MUTEX_SOF_IDX_MAX) {
+ dev_err(mtx->dev, "Not supported SOF index : %d", idx);
+ return -EINVAL;
+ }
+
+ writel_relaxed(idx, mtx->regs +
+ DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, mutex->id));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_write_sof);
+
static int mtk_mutex_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_mutex_ctx *mtx;
struct resource *regs;
int i;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ int ret;
+#endif
mtx = devm_kzalloc(dev, sizeof(*mtx), GFP_KERNEL);
if (!mtx)
@@ -631,12 +774,18 @@ static int mtk_mutex_probe(struct platform_device *pdev)
}
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mtx->regs = devm_ioremap_resource(dev, regs);
+ mtx->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(mtx->regs)) {
dev_err(dev, "Failed to map mutex registers\n");
return PTR_ERR(mtx->regs);
}
+ mtx->addr = regs->start;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &mtx->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "No mediatek,gce-client-reg!\n");
+#endif
platform_set_drvdata(pdev, mtx);
@@ -665,6 +814,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8192_mutex_driver_data},
{ .compatible = "mediatek,mt8195-disp-mutex",
.data = &mt8195_mutex_driver_data},
+ { .compatible = "mediatek,mt8365-disp-mutex",
+ .data = &mt8365_mutex_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
index 5ced254b082b..9734f1091c69 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.c
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -16,6 +16,7 @@
#include <linux/regulator/consumer.h>
#include <linux/soc/mediatek/infracfg.h>
+#include "mt6795-pm-domains.h"
#include "mt8167-pm-domains.h"
#include "mt8173-pm-domains.h"
#include "mt8183-pm-domains.h"
@@ -428,6 +429,9 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
goto err_put_subsys_clocks;
}
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_ALWAYS_ON))
+ pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
}
if (scpsys->domains[id]) {
@@ -556,6 +560,10 @@ static void scpsys_domain_cleanup(struct scpsys *scpsys)
static const struct of_device_id scpsys_of_match[] = {
{
+ .compatible = "mediatek,mt6795-power-controller",
+ .data = &mt6795_scpsys_data,
+ },
+ {
.compatible = "mediatek,mt8167-power-controller",
.data = &mt8167_scpsys_data,
},
diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
index daa24e890dd4..7d3c0c36316c 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.h
+++ b/drivers/soc/mediatek/mtk-pm-domains.h
@@ -8,6 +8,8 @@
#define MTK_SCPD_SRAM_ISO BIT(2)
#define MTK_SCPD_KEEP_DEFAULT_OFF BIT(3)
#define MTK_SCPD_DOMAIN_SUPPLY BIT(4)
+/* can't set MTK_SCPD_KEEP_DEFAULT_OFF at the same time */
+#define MTK_SCPD_ALWAYS_ON BIT(5)
#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
#define SPM_VDE_PWR_CON 0x0210
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index bf39a64f3ecc..d8cb0f833645 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -13,6 +13,9 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#define PWRAP_POLL_DELAY_US 10
+#define PWRAP_POLL_TIMEOUT_US 10000
+
#define PWRAP_MT8135_BRIDGE_IORD_ARB_EN 0x4
#define PWRAP_MT8135_BRIDGE_WACS3_EN 0x10
#define PWRAP_MT8135_BRIDGE_INIT_DONE3 0x14
@@ -1140,12 +1143,9 @@ enum pwrap_type {
};
struct pmic_wrapper;
-struct pwrap_slv_type {
- const u32 *dew_regs;
- enum pmic_type type;
+
+struct pwrap_slv_regops {
const struct regmap_config *regmap;
- /* Flags indicating the capability for the target slave */
- u32 caps;
/*
* pwrap operations are highly associated with the PMIC types,
* so the pointers added increases flexibility allowing determination
@@ -1155,6 +1155,14 @@ struct pwrap_slv_type {
int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata);
};
+struct pwrap_slv_type {
+ const u32 *dew_regs;
+ enum pmic_type type;
+ const struct pwrap_slv_regops *regops;
+ /* Flags indicating the capability for the target slave */
+ u32 caps;
+};
+
struct pmic_wrapper {
struct device *dev;
void __iomem *base;
@@ -1241,27 +1249,14 @@ static bool pwrap_is_fsm_idle_and_sync_idle(struct pmic_wrapper *wrp)
(val & PWRAP_STATE_SYNC_IDLE0);
}
-static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
- bool (*fp)(struct pmic_wrapper *))
-{
- unsigned long timeout;
-
- timeout = jiffies + usecs_to_jiffies(10000);
-
- do {
- if (time_after(jiffies, timeout))
- return fp(wrp) ? 0 : -ETIMEDOUT;
- if (fp(wrp))
- return 0;
- } while (1);
-}
-
static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
+ bool tmp;
int ret;
u32 val;
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
pwrap_leave_fsm_vldclr(wrp);
return ret;
@@ -1273,7 +1268,8 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
val = (adr >> 1) << 16;
pwrap_writel(wrp, val, PWRAP_WACS2_CMD);
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret)
return ret;
@@ -1290,11 +1286,14 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
+ bool tmp;
int ret, msb;
*rdata = 0;
for (msb = 0; msb < 2; msb++) {
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+
if (ret) {
pwrap_leave_fsm_vldclr(wrp);
return ret;
@@ -1303,7 +1302,8 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
pwrap_writel(wrp, ((msb << 30) | (adr << 16)),
PWRAP_WACS2_CMD);
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret)
return ret;
@@ -1318,14 +1318,16 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
- return wrp->slave->pwrap_read(wrp, adr, rdata);
+ return wrp->slave->regops->pwrap_read(wrp, adr, rdata);
}
static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
{
+ bool tmp;
int ret;
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
pwrap_leave_fsm_vldclr(wrp);
return ret;
@@ -1344,10 +1346,12 @@ static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
{
+ bool tmp;
int ret, msb, rdata;
for (msb = 0; msb < 2; msb++) {
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
pwrap_leave_fsm_vldclr(wrp);
return ret;
@@ -1373,7 +1377,7 @@ static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
{
- return wrp->slave->pwrap_write(wrp, adr, wdata);
+ return wrp->slave->regops->pwrap_write(wrp, adr, wdata);
}
static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata)
@@ -1388,6 +1392,7 @@ static int pwrap_regmap_write(void *context, u32 adr, u32 wdata)
static int pwrap_reset_spislave(struct pmic_wrapper *wrp)
{
+ bool tmp;
int ret, i;
pwrap_writel(wrp, 0, PWRAP_HIPRIO_ARB_EN);
@@ -1407,7 +1412,8 @@ static int pwrap_reset_spislave(struct pmic_wrapper *wrp)
pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
PWRAP_MAN_CMD);
- ret = pwrap_wait_for_state(wrp, pwrap_is_sync_idle);
+ ret = readx_poll_timeout(pwrap_is_sync_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
return ret;
@@ -1458,14 +1464,15 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
static int pwrap_init_dual_io(struct pmic_wrapper *wrp)
{
int ret;
+ bool tmp;
u32 rdata;
/* Enable dual IO mode */
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
/* Check IDLE & INIT_DONE in advance */
- ret = pwrap_wait_for_state(wrp,
- pwrap_is_fsm_idle_and_sync_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
return ret;
@@ -1570,6 +1577,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
static int pwrap_init_cipher(struct pmic_wrapper *wrp)
{
int ret;
+ bool tmp;
u32 rdata = 0;
pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
@@ -1624,14 +1632,16 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
}
/* wait for cipher data ready@AP */
- ret = pwrap_wait_for_state(wrp, pwrap_is_cipher_ready);
+ ret = readx_poll_timeout(pwrap_is_cipher_ready, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev, "cipher data ready@AP fail, ret=%d\n", ret);
return ret;
}
/* wait for cipher data ready@PMIC */
- ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready);
+ ret = readx_poll_timeout(pwrap_is_pmic_cipher_ready, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev,
"timeout waiting for cipher data ready@PMIC\n");
@@ -1640,7 +1650,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
/* wait for cipher mode idle */
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_MODE], 0x1);
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
if (ret) {
dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret);
return ret;
@@ -1885,99 +1896,82 @@ static const struct regmap_config pwrap_regmap_config32 = {
.max_register = 0xffff,
};
+static const struct pwrap_slv_regops pwrap_regops16 = {
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+ .regmap = &pwrap_regmap_config16,
+};
+
+static const struct pwrap_slv_regops pwrap_regops32 = {
+ .pwrap_read = pwrap_read32,
+ .pwrap_write = pwrap_write32,
+ .regmap = &pwrap_regmap_config32,
+};
+
static const struct pwrap_slv_type pmic_mt6323 = {
.dew_regs = mt6323_regs,
.type = PMIC_MT6323,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
PWRAP_SLV_CAP_SECURITY,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6351 = {
.dew_regs = mt6351_regs,
.type = PMIC_MT6351,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = 0,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6357 = {
.dew_regs = mt6357_regs,
.type = PMIC_MT6357,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = 0,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6358 = {
.dew_regs = mt6358_regs,
.type = PMIC_MT6358,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6359 = {
.dew_regs = mt6359_regs,
.type = PMIC_MT6359,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = PWRAP_SLV_CAP_DUALIO,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct pwrap_slv_type pmic_mt6380 = {
.dew_regs = NULL,
.type = PMIC_MT6380,
- .regmap = &pwrap_regmap_config32,
+ .regops = &pwrap_regops32,
.caps = 0,
- .pwrap_read = pwrap_read32,
- .pwrap_write = pwrap_write32,
};
static const struct pwrap_slv_type pmic_mt6397 = {
.dew_regs = mt6397_regs,
.type = PMIC_MT6397,
- .regmap = &pwrap_regmap_config16,
+ .regops = &pwrap_regops16,
.caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
PWRAP_SLV_CAP_SECURITY,
- .pwrap_read = pwrap_read16,
- .pwrap_write = pwrap_write16,
};
static const struct of_device_id of_slave_match_tbl[] = {
- {
- .compatible = "mediatek,mt6323",
- .data = &pmic_mt6323,
- }, {
- .compatible = "mediatek,mt6351",
- .data = &pmic_mt6351,
- }, {
- .compatible = "mediatek,mt6357",
- .data = &pmic_mt6357,
- }, {
- .compatible = "mediatek,mt6358",
- .data = &pmic_mt6358,
- }, {
- .compatible = "mediatek,mt6359",
- .data = &pmic_mt6359,
- }, {
- /* The MT6380 PMIC only implements a regulator, so we bind it
- * directly instead of using a MFD.
- */
- .compatible = "mediatek,mt6380-regulator",
- .data = &pmic_mt6380,
- }, {
- .compatible = "mediatek,mt6397",
- .data = &pmic_mt6397,
- }, {
- /* sentinel */
- }
+ { .compatible = "mediatek,mt6323", .data = &pmic_mt6323 },
+ { .compatible = "mediatek,mt6351", .data = &pmic_mt6351 },
+ { .compatible = "mediatek,mt6357", .data = &pmic_mt6357 },
+ { .compatible = "mediatek,mt6358", .data = &pmic_mt6358 },
+ { .compatible = "mediatek,mt6359", .data = &pmic_mt6359 },
+
+ /* The MT6380 PMIC only implements a regulator, so we bind it
+ * directly instead of using a MFD.
+ */
+ { .compatible = "mediatek,mt6380-regulator", .data = &pmic_mt6380 },
+ { .compatible = "mediatek,mt6397", .data = &pmic_mt6397 },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_slave_match_tbl);
@@ -2136,45 +2130,19 @@ static struct pmic_wrapper_type pwrap_mt8186 = {
};
static const struct of_device_id of_pwrap_match_tbl[] = {
- {
- .compatible = "mediatek,mt2701-pwrap",
- .data = &pwrap_mt2701,
- }, {
- .compatible = "mediatek,mt6765-pwrap",
- .data = &pwrap_mt6765,
- }, {
- .compatible = "mediatek,mt6779-pwrap",
- .data = &pwrap_mt6779,
- }, {
- .compatible = "mediatek,mt6797-pwrap",
- .data = &pwrap_mt6797,
- }, {
- .compatible = "mediatek,mt6873-pwrap",
- .data = &pwrap_mt6873,
- }, {
- .compatible = "mediatek,mt7622-pwrap",
- .data = &pwrap_mt7622,
- }, {
- .compatible = "mediatek,mt8135-pwrap",
- .data = &pwrap_mt8135,
- }, {
- .compatible = "mediatek,mt8173-pwrap",
- .data = &pwrap_mt8173,
- }, {
- .compatible = "mediatek,mt8183-pwrap",
- .data = &pwrap_mt8183,
- }, {
- .compatible = "mediatek,mt8186-pwrap",
- .data = &pwrap_mt8186,
- }, {
- .compatible = "mediatek,mt8195-pwrap",
- .data = &pwrap_mt8195,
- }, {
- .compatible = "mediatek,mt8516-pwrap",
- .data = &pwrap_mt8516,
- }, {
- /* sentinel */
- }
+ { .compatible = "mediatek,mt2701-pwrap", .data = &pwrap_mt2701 },
+ { .compatible = "mediatek,mt6765-pwrap", .data = &pwrap_mt6765 },
+ { .compatible = "mediatek,mt6779-pwrap", .data = &pwrap_mt6779 },
+ { .compatible = "mediatek,mt6797-pwrap", .data = &pwrap_mt6797 },
+ { .compatible = "mediatek,mt6873-pwrap", .data = &pwrap_mt6873 },
+ { .compatible = "mediatek,mt7622-pwrap", .data = &pwrap_mt7622 },
+ { .compatible = "mediatek,mt8135-pwrap", .data = &pwrap_mt8135 },
+ { .compatible = "mediatek,mt8173-pwrap", .data = &pwrap_mt8173 },
+ { .compatible = "mediatek,mt8183-pwrap", .data = &pwrap_mt8183 },
+ { .compatible = "mediatek,mt8186-pwrap", .data = &pwrap_mt8186 },
+ { .compatible = "mediatek,mt8195-pwrap", .data = &pwrap_mt8195 },
+ { .compatible = "mediatek,mt8516-pwrap", .data = &pwrap_mt8516 },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl);
@@ -2185,7 +2153,6 @@ static int pwrap_probe(struct platform_device *pdev)
struct pmic_wrapper *wrp;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_slave_id = NULL;
- struct resource *res;
if (np->child)
of_slave_id = of_match_node(of_slave_match_tbl, np->child);
@@ -2205,8 +2172,7 @@ static int pwrap_probe(struct platform_device *pdev)
wrp->slave = of_slave_id->data;
wrp->dev = &pdev->dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap");
- wrp->base = devm_ioremap_resource(wrp->dev, res);
+ wrp->base = devm_platform_ioremap_resource_byname(pdev, "pwrap");
if (IS_ERR(wrp->base))
return PTR_ERR(wrp->base);
@@ -2220,9 +2186,7 @@ static int pwrap_probe(struct platform_device *pdev)
}
if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "pwrap-bridge");
- wrp->bridge_base = devm_ioremap_resource(wrp->dev, res);
+ wrp->bridge_base = devm_platform_ioremap_resource_byname(pdev, "pwrap-bridge");
if (IS_ERR(wrp->bridge_base))
return PTR_ERR(wrp->bridge_base);
@@ -2315,13 +2279,18 @@ static int pwrap_probe(struct platform_device *pdev)
pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_out2;
+ }
+
ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt,
IRQF_TRIGGER_HIGH,
"mt-pmic-pwrap", wrp);
if (ret)
goto err_out2;
- wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regmap);
+ wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regops->regmap);
if (IS_ERR(wrp->regmap)) {
ret = PTR_ERR(wrp->regmap);
goto err_out2;
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
new file mode 100644
index 000000000000..dee8664a12fd
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -0,0 +1,2403 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/thermal.h>
+
+/* svs bank 1-line software id */
+#define SVSB_CPU_LITTLE BIT(0)
+#define SVSB_CPU_BIG BIT(1)
+#define SVSB_CCI BIT(2)
+#define SVSB_GPU BIT(3)
+
+/* svs bank 2-line type */
+#define SVSB_LOW BIT(8)
+#define SVSB_HIGH BIT(9)
+
+/* svs bank mode support */
+#define SVSB_MODE_ALL_DISABLE 0
+#define SVSB_MODE_INIT01 BIT(1)
+#define SVSB_MODE_INIT02 BIT(2)
+#define SVSB_MODE_MON BIT(3)
+
+/* svs bank volt flags */
+#define SVSB_INIT01_PD_REQ BIT(0)
+#define SVSB_INIT01_VOLT_IGNORE BIT(1)
+#define SVSB_INIT01_VOLT_INC_ONLY BIT(2)
+#define SVSB_MON_VOLT_IGNORE BIT(16)
+#define SVSB_REMOVE_DVTFIXED_VOLT BIT(24)
+
+/* svs bank register common configuration */
+#define SVSB_DET_MAX 0xffff
+#define SVSB_DET_WINDOW 0xa28
+#define SVSB_DTHI 0x1
+#define SVSB_DTLO 0xfe
+#define SVSB_EN_INIT01 0x1
+#define SVSB_EN_INIT02 0x5
+#define SVSB_EN_MON 0x2
+#define SVSB_EN_OFF 0x0
+#define SVSB_INTEN_INIT0x 0x00005f01
+#define SVSB_INTEN_MONVOPEN 0x00ff0000
+#define SVSB_INTSTS_CLEAN 0x00ffffff
+#define SVSB_INTSTS_COMPLETE 0x1
+#define SVSB_INTSTS_MONVOP 0x00ff0000
+#define SVSB_RUNCONFIG_DEFAULT 0x80000000
+
+/* svs bank related setting */
+#define BITS8 8
+#define MAX_OPP_ENTRIES 16
+#define REG_BYTES 4
+#define SVSB_DC_SIGNED_BIT BIT(15)
+#define SVSB_DET_CLK_EN BIT(31)
+#define SVSB_TEMP_LOWER_BOUND 0xb2
+#define SVSB_TEMP_UPPER_BOUND 0x64
+
+static DEFINE_SPINLOCK(svs_lock);
+
+#define debug_fops_ro(name) \
+ static int svs_##name##_debug_open(struct inode *inode, \
+ struct file *filp) \
+ { \
+ return single_open(filp, svs_##name##_debug_show, \
+ inode->i_private); \
+ } \
+ static const struct file_operations svs_##name##_debug_fops = { \
+ .owner = THIS_MODULE, \
+ .open = svs_##name##_debug_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define debug_fops_rw(name) \
+ static int svs_##name##_debug_open(struct inode *inode, \
+ struct file *filp) \
+ { \
+ return single_open(filp, svs_##name##_debug_show, \
+ inode->i_private); \
+ } \
+ static const struct file_operations svs_##name##_debug_fops = { \
+ .owner = THIS_MODULE, \
+ .open = svs_##name##_debug_open, \
+ .read = seq_read, \
+ .write = svs_##name##_debug_write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define svs_dentry_data(name) {__stringify(name), &svs_##name##_debug_fops}
+
+/**
+ * enum svsb_phase - svs bank phase enumeration
+ * @SVSB_PHASE_ERROR: svs bank encounters unexpected condition
+ * @SVSB_PHASE_INIT01: svs bank basic init for data calibration
+ * @SVSB_PHASE_INIT02: svs bank can provide voltages to opp table
+ * @SVSB_PHASE_MON: svs bank can provide voltages with thermal effect
+ * @SVSB_PHASE_MAX: total number of svs bank phase (debug purpose)
+ *
+ * Each svs bank has its own independent phase and we enable each svs bank by
+ * running their phase orderly. However, when svs bank encounters unexpected
+ * condition, it will fire an irq (PHASE_ERROR) to inform svs software.
+ *
+ * svs bank general phase-enabled order:
+ * SVSB_PHASE_INIT01 -> SVSB_PHASE_INIT02 -> SVSB_PHASE_MON
+ */
+enum svsb_phase {
+ SVSB_PHASE_ERROR = 0,
+ SVSB_PHASE_INIT01,
+ SVSB_PHASE_INIT02,
+ SVSB_PHASE_MON,
+ SVSB_PHASE_MAX,
+};
+
+enum svs_reg_index {
+ DESCHAR = 0,
+ TEMPCHAR,
+ DETCHAR,
+ AGECHAR,
+ DCCONFIG,
+ AGECONFIG,
+ FREQPCT30,
+ FREQPCT74,
+ LIMITVALS,
+ VBOOT,
+ DETWINDOW,
+ CONFIG,
+ TSCALCS,
+ RUNCONFIG,
+ SVSEN,
+ INIT2VALS,
+ DCVALUES,
+ AGEVALUES,
+ VOP30,
+ VOP74,
+ TEMP,
+ INTSTS,
+ INTSTSRAW,
+ INTEN,
+ CHKINT,
+ CHKSHIFT,
+ STATUS,
+ VDESIGN30,
+ VDESIGN74,
+ DVT30,
+ DVT74,
+ AGECOUNT,
+ SMSTATE0,
+ SMSTATE1,
+ CTL0,
+ DESDETSEC,
+ TEMPAGESEC,
+ CTRLSPARE0,
+ CTRLSPARE1,
+ CTRLSPARE2,
+ CTRLSPARE3,
+ CORESEL,
+ THERMINTST,
+ INTST,
+ THSTAGE0ST,
+ THSTAGE1ST,
+ THSTAGE2ST,
+ THAHBST0,
+ THAHBST1,
+ SPARE0,
+ SPARE1,
+ SPARE2,
+ SPARE3,
+ THSLPEVEB,
+ SVS_REG_MAX,
+};
+
+static const u32 svs_regs_v2[] = {
+ [DESCHAR] = 0xc00,
+ [TEMPCHAR] = 0xc04,
+ [DETCHAR] = 0xc08,
+ [AGECHAR] = 0xc0c,
+ [DCCONFIG] = 0xc10,
+ [AGECONFIG] = 0xc14,
+ [FREQPCT30] = 0xc18,
+ [FREQPCT74] = 0xc1c,
+ [LIMITVALS] = 0xc20,
+ [VBOOT] = 0xc24,
+ [DETWINDOW] = 0xc28,
+ [CONFIG] = 0xc2c,
+ [TSCALCS] = 0xc30,
+ [RUNCONFIG] = 0xc34,
+ [SVSEN] = 0xc38,
+ [INIT2VALS] = 0xc3c,
+ [DCVALUES] = 0xc40,
+ [AGEVALUES] = 0xc44,
+ [VOP30] = 0xc48,
+ [VOP74] = 0xc4c,
+ [TEMP] = 0xc50,
+ [INTSTS] = 0xc54,
+ [INTSTSRAW] = 0xc58,
+ [INTEN] = 0xc5c,
+ [CHKINT] = 0xc60,
+ [CHKSHIFT] = 0xc64,
+ [STATUS] = 0xc68,
+ [VDESIGN30] = 0xc6c,
+ [VDESIGN74] = 0xc70,
+ [DVT30] = 0xc74,
+ [DVT74] = 0xc78,
+ [AGECOUNT] = 0xc7c,
+ [SMSTATE0] = 0xc80,
+ [SMSTATE1] = 0xc84,
+ [CTL0] = 0xc88,
+ [DESDETSEC] = 0xce0,
+ [TEMPAGESEC] = 0xce4,
+ [CTRLSPARE0] = 0xcf0,
+ [CTRLSPARE1] = 0xcf4,
+ [CTRLSPARE2] = 0xcf8,
+ [CTRLSPARE3] = 0xcfc,
+ [CORESEL] = 0xf00,
+ [THERMINTST] = 0xf04,
+ [INTST] = 0xf08,
+ [THSTAGE0ST] = 0xf0c,
+ [THSTAGE1ST] = 0xf10,
+ [THSTAGE2ST] = 0xf14,
+ [THAHBST0] = 0xf18,
+ [THAHBST1] = 0xf1c,
+ [SPARE0] = 0xf20,
+ [SPARE1] = 0xf24,
+ [SPARE2] = 0xf28,
+ [SPARE3] = 0xf2c,
+ [THSLPEVEB] = 0xf30,
+};
+
+/**
+ * struct svs_platform - svs platform control
+ * @name: svs platform name
+ * @base: svs platform register base
+ * @dev: svs platform device
+ * @main_clk: main clock for svs bank
+ * @pbank: svs bank pointer needing to be protected by spin_lock section
+ * @banks: svs banks that svs platform supports
+ * @rst: svs platform reset control
+ * @efuse_parsing: svs platform efuse parsing function pointer
+ * @probe: svs platform probe function pointer
+ * @irqflags: svs platform irq settings flags
+ * @efuse_max: total number of svs efuse
+ * @tefuse_max: total number of thermal efuse
+ * @regs: svs platform registers map
+ * @bank_max: total number of svs banks
+ * @efuse: svs efuse data received from NVMEM framework
+ * @tefuse: thermal efuse data received from NVMEM framework
+ */
+struct svs_platform {
+ char *name;
+ void __iomem *base;
+ struct device *dev;
+ struct clk *main_clk;
+ struct svs_bank *pbank;
+ struct svs_bank *banks;
+ struct reset_control *rst;
+ bool (*efuse_parsing)(struct svs_platform *svsp);
+ int (*probe)(struct svs_platform *svsp);
+ unsigned long irqflags;
+ size_t efuse_max;
+ size_t tefuse_max;
+ const u32 *regs;
+ u32 bank_max;
+ u32 *efuse;
+ u32 *tefuse;
+};
+
+struct svs_platform_data {
+ char *name;
+ struct svs_bank *banks;
+ bool (*efuse_parsing)(struct svs_platform *svsp);
+ int (*probe)(struct svs_platform *svsp);
+ unsigned long irqflags;
+ const u32 *regs;
+ u32 bank_max;
+};
+
+/**
+ * struct svs_bank - svs bank representation
+ * @dev: bank device
+ * @opp_dev: device for opp table/buck control
+ * @init_completion: the timeout completion for bank init
+ * @buck: regulator used by opp_dev
+ * @tzd: thermal zone device for getting temperature
+ * @lock: mutex lock to protect voltage update process
+ * @set_freq_pct: function pointer to set bank frequency percent table
+ * @get_volts: function pointer to get bank voltages
+ * @name: bank name
+ * @buck_name: regulator name
+ * @tzone_name: thermal zone name
+ * @phase: bank current phase
+ * @volt_od: bank voltage overdrive
+ * @reg_data: bank register data in different phase for debug purpose
+ * @pm_runtime_enabled_count: bank pm runtime enabled count
+ * @mode_support: bank mode support.
+ * @freq_base: reference frequency for bank init
+ * @turn_freq_base: refenrece frequency for 2-line turn point
+ * @vboot: voltage request for bank init01 only
+ * @opp_dfreq: default opp frequency table
+ * @opp_dvolt: default opp voltage table
+ * @freq_pct: frequency percent table for bank init
+ * @volt: bank voltage table
+ * @volt_step: bank voltage step
+ * @volt_base: bank voltage base
+ * @volt_flags: bank voltage flags
+ * @vmax: bank voltage maximum
+ * @vmin: bank voltage minimum
+ * @age_config: bank age configuration
+ * @age_voffset_in: bank age voltage offset
+ * @dc_config: bank dc configuration
+ * @dc_voffset_in: bank dc voltage offset
+ * @dvt_fixed: bank dvt fixed value
+ * @vco: bank VCO value
+ * @chk_shift: bank chicken shift
+ * @core_sel: bank selection
+ * @opp_count: bank opp count
+ * @int_st: bank interrupt identification
+ * @sw_id: bank software identification
+ * @cpu_id: cpu core id for SVS CPU bank use only
+ * @ctl0: TS-x selection
+ * @temp: bank temperature
+ * @tzone_htemp: thermal zone high temperature threshold
+ * @tzone_htemp_voffset: thermal zone high temperature voltage offset
+ * @tzone_ltemp: thermal zone low temperature threshold
+ * @tzone_ltemp_voffset: thermal zone low temperature voltage offset
+ * @bts: svs efuse data
+ * @mts: svs efuse data
+ * @bdes: svs efuse data
+ * @mdes: svs efuse data
+ * @mtdes: svs efuse data
+ * @dcbdet: svs efuse data
+ * @dcmdet: svs efuse data
+ * @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank
+ * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank
+ *
+ * Svs bank will generate suitalbe voltages by below general math equation
+ * and provide these voltages to opp voltage table.
+ *
+ * opp_volt[i] = (volt[i] * volt_step) + volt_base;
+ */
+struct svs_bank {
+ struct device *dev;
+ struct device *opp_dev;
+ struct completion init_completion;
+ struct regulator *buck;
+ struct thermal_zone_device *tzd;
+ struct mutex lock; /* lock to protect voltage update process */
+ void (*set_freq_pct)(struct svs_platform *svsp);
+ void (*get_volts)(struct svs_platform *svsp);
+ char *name;
+ char *buck_name;
+ char *tzone_name;
+ enum svsb_phase phase;
+ s32 volt_od;
+ u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX];
+ u32 pm_runtime_enabled_count;
+ u32 mode_support;
+ u32 freq_base;
+ u32 turn_freq_base;
+ u32 vboot;
+ u32 opp_dfreq[MAX_OPP_ENTRIES];
+ u32 opp_dvolt[MAX_OPP_ENTRIES];
+ u32 freq_pct[MAX_OPP_ENTRIES];
+ u32 volt[MAX_OPP_ENTRIES];
+ u32 volt_step;
+ u32 volt_base;
+ u32 volt_flags;
+ u32 vmax;
+ u32 vmin;
+ u32 age_config;
+ u32 age_voffset_in;
+ u32 dc_config;
+ u32 dc_voffset_in;
+ u32 dvt_fixed;
+ u32 vco;
+ u32 chk_shift;
+ u32 core_sel;
+ u32 opp_count;
+ u32 int_st;
+ u32 sw_id;
+ u32 cpu_id;
+ u32 ctl0;
+ u32 temp;
+ u32 tzone_htemp;
+ u32 tzone_htemp_voffset;
+ u32 tzone_ltemp;
+ u32 tzone_ltemp_voffset;
+ u32 bts;
+ u32 mts;
+ u32 bdes;
+ u32 mdes;
+ u32 mtdes;
+ u32 dcbdet;
+ u32 dcmdet;
+ u32 turn_pt;
+ u32 type;
+};
+
+static u32 percent(u32 numerator, u32 denominator)
+{
+ /* If not divide 1000, "numerator * 100" will have data overflow. */
+ numerator /= 1000;
+ denominator /= 1000;
+
+ return DIV_ROUND_UP(numerator * 100, denominator);
+}
+
+static u32 svs_readl_relaxed(struct svs_platform *svsp, enum svs_reg_index rg_i)
+{
+ return readl_relaxed(svsp->base + svsp->regs[rg_i]);
+}
+
+static void svs_writel_relaxed(struct svs_platform *svsp, u32 val,
+ enum svs_reg_index rg_i)
+{
+ writel_relaxed(val, svsp->base + svsp->regs[rg_i]);
+}
+
+static void svs_switch_bank(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ svs_writel_relaxed(svsp, svsb->core_sel, CORESEL);
+}
+
+static u32 svs_bank_volt_to_opp_volt(u32 svsb_volt, u32 svsb_volt_step,
+ u32 svsb_volt_base)
+{
+ return (svsb_volt * svsb_volt_step) + svsb_volt_base;
+}
+
+static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step,
+ u32 svsb_volt_base)
+{
+ return (opp_u_volt - svsb_volt_base) / svsb_volt_step;
+}
+
+static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb)
+{
+ struct dev_pm_opp *opp;
+ u32 i, opp_u_volt;
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
+ svsb->opp_dfreq[i],
+ true);
+ if (IS_ERR(opp)) {
+ dev_err(svsb->dev, "cannot find freq = %u (%ld)\n",
+ svsb->opp_dfreq[i], PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ opp_u_volt = dev_pm_opp_get_voltage(opp);
+ svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt,
+ svsb->volt_step,
+ svsb->volt_base);
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+}
+
+static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
+{
+ int ret = -EPERM, tzone_temp = 0;
+ u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop;
+
+ mutex_lock(&svsb->lock);
+
+ /*
+ * 2-line bank updates its corresponding opp volts.
+ * 1-line bank updates all opp volts.
+ */
+ if (svsb->type == SVSB_HIGH) {
+ opp_start = 0;
+ opp_stop = svsb->turn_pt;
+ } else if (svsb->type == SVSB_LOW) {
+ opp_start = svsb->turn_pt;
+ opp_stop = svsb->opp_count;
+ } else {
+ opp_start = 0;
+ opp_stop = svsb->opp_count;
+ }
+
+ /* Get thermal effect */
+ if (svsb->phase == SVSB_PHASE_MON) {
+ ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp);
+ if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND &&
+ svsb->temp < SVSB_TEMP_LOWER_BOUND)) {
+ dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n",
+ svsb->tzone_name, ret, svsb->temp);
+ svsb->phase = SVSB_PHASE_ERROR;
+ }
+
+ if (tzone_temp >= svsb->tzone_htemp)
+ temp_voffset += svsb->tzone_htemp_voffset;
+ else if (tzone_temp <= svsb->tzone_ltemp)
+ temp_voffset += svsb->tzone_ltemp_voffset;
+
+ /* 2-line bank update all opp volts when running mon mode */
+ if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
+ opp_start = 0;
+ opp_stop = svsb->opp_count;
+ }
+ }
+
+ /* vmin <= svsb_volt (opp_volt) <= default opp voltage */
+ for (i = opp_start; i < opp_stop; i++) {
+ switch (svsb->phase) {
+ case SVSB_PHASE_ERROR:
+ opp_volt = svsb->opp_dvolt[i];
+ break;
+ case SVSB_PHASE_INIT01:
+ /* do nothing */
+ goto unlock_mutex;
+ case SVSB_PHASE_INIT02:
+ svsb_volt = max(svsb->volt[i], svsb->vmin);
+ opp_volt = svs_bank_volt_to_opp_volt(svsb_volt,
+ svsb->volt_step,
+ svsb->volt_base);
+ break;
+ case SVSB_PHASE_MON:
+ svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin);
+ opp_volt = svs_bank_volt_to_opp_volt(svsb_volt,
+ svsb->volt_step,
+ svsb->volt_base);
+ break;
+ default:
+ dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase);
+ ret = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ opp_volt = min(opp_volt, svsb->opp_dvolt[i]);
+ ret = dev_pm_opp_adjust_voltage(svsb->opp_dev,
+ svsb->opp_dfreq[i],
+ opp_volt, opp_volt,
+ svsb->opp_dvolt[i]);
+ if (ret) {
+ dev_err(svsb->dev, "set %uuV fail: %d\n",
+ opp_volt, ret);
+ goto unlock_mutex;
+ }
+ }
+
+unlock_mutex:
+ mutex_unlock(&svsb->lock);
+
+ return ret;
+}
+
+static int svs_dump_debug_show(struct seq_file *m, void *p)
+{
+ struct svs_platform *svsp = (struct svs_platform *)m->private;
+ struct svs_bank *svsb;
+ unsigned long svs_reg_addr;
+ u32 idx, i, j, bank_id;
+
+ for (i = 0; i < svsp->efuse_max; i++)
+ if (svsp->efuse && svsp->efuse[i])
+ seq_printf(m, "M_HW_RES%d = 0x%08x\n",
+ i, svsp->efuse[i]);
+
+ for (i = 0; i < svsp->tefuse_max; i++)
+ if (svsp->tefuse)
+ seq_printf(m, "THERMAL_EFUSE%d = 0x%08x\n",
+ i, svsp->tefuse[i]);
+
+ for (bank_id = 0, idx = 0; idx < svsp->bank_max; idx++, bank_id++) {
+ svsb = &svsp->banks[idx];
+
+ for (i = SVSB_PHASE_INIT01; i <= SVSB_PHASE_MON; i++) {
+ seq_printf(m, "Bank_number = %u\n", bank_id);
+
+ if (i == SVSB_PHASE_INIT01 || i == SVSB_PHASE_INIT02)
+ seq_printf(m, "mode = init%d\n", i);
+ else if (i == SVSB_PHASE_MON)
+ seq_puts(m, "mode = mon\n");
+ else
+ seq_puts(m, "mode = error\n");
+
+ for (j = DESCHAR; j < SVS_REG_MAX; j++) {
+ svs_reg_addr = (unsigned long)(svsp->base +
+ svsp->regs[j]);
+ seq_printf(m, "0x%08lx = 0x%08x\n",
+ svs_reg_addr, svsb->reg_data[i][j]);
+ }
+ }
+ }
+
+ return 0;
+}
+
+debug_fops_ro(dump);
+
+static int svs_enable_debug_show(struct seq_file *m, void *v)
+{
+ struct svs_bank *svsb = (struct svs_bank *)m->private;
+
+ switch (svsb->phase) {
+ case SVSB_PHASE_ERROR:
+ seq_puts(m, "disabled\n");
+ break;
+ case SVSB_PHASE_INIT01:
+ seq_puts(m, "init1\n");
+ break;
+ case SVSB_PHASE_INIT02:
+ seq_puts(m, "init2\n");
+ break;
+ case SVSB_PHASE_MON:
+ seq_puts(m, "mon mode\n");
+ break;
+ default:
+ seq_puts(m, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static ssize_t svs_enable_debug_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct svs_bank *svsb = file_inode(filp)->i_private;
+ struct svs_platform *svsp = dev_get_drvdata(svsb->dev);
+ unsigned long flags;
+ int enabled, ret;
+ char *buf = NULL;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)memdup_user_nul(buffer, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = kstrtoint(buf, 10, &enabled);
+ if (ret)
+ return ret;
+
+ if (!enabled) {
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svsb->mode_support = SVSB_MODE_ALL_DISABLE;
+ svs_switch_bank(svsp);
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ svsb->phase = SVSB_PHASE_ERROR;
+ svs_adjust_pm_opp_volts(svsb);
+ }
+
+ kfree(buf);
+
+ return count;
+}
+
+debug_fops_rw(enable);
+
+static int svs_status_debug_show(struct seq_file *m, void *v)
+{
+ struct svs_bank *svsb = (struct svs_bank *)m->private;
+ struct dev_pm_opp *opp;
+ int tzone_temp = 0, ret;
+ u32 i;
+
+ ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp);
+ if (ret)
+ seq_printf(m, "%s: temperature ignore, turn_pt = %u\n",
+ svsb->name, svsb->turn_pt);
+ else
+ seq_printf(m, "%s: temperature = %d, turn_pt = %u\n",
+ svsb->name, tzone_temp, svsb->turn_pt);
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
+ svsb->opp_dfreq[i], true);
+ if (IS_ERR(opp)) {
+ seq_printf(m, "%s: cannot find freq = %u (%ld)\n",
+ svsb->name, svsb->opp_dfreq[i],
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ seq_printf(m, "opp_freq[%02u]: %u, opp_volt[%02u]: %lu, ",
+ i, svsb->opp_dfreq[i], i,
+ dev_pm_opp_get_voltage(opp));
+ seq_printf(m, "svsb_volt[%02u]: 0x%x, freq_pct[%02u]: %u\n",
+ i, svsb->volt[i], i, svsb->freq_pct[i]);
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+}
+
+debug_fops_ro(status);
+
+static int svs_create_debug_cmds(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct dentry *svs_dir, *svsb_dir, *file_entry;
+ const char *d = "/sys/kernel/debug/svs";
+ u32 i, idx;
+
+ struct svs_dentry {
+ const char *name;
+ const struct file_operations *fops;
+ };
+
+ struct svs_dentry svs_entries[] = {
+ svs_dentry_data(dump),
+ };
+
+ struct svs_dentry svsb_entries[] = {
+ svs_dentry_data(enable),
+ svs_dentry_data(status),
+ };
+
+ svs_dir = debugfs_create_dir("svs", NULL);
+ if (IS_ERR(svs_dir)) {
+ dev_err(svsp->dev, "cannot create %s: %ld\n",
+ d, PTR_ERR(svs_dir));
+ return PTR_ERR(svs_dir);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(svs_entries); i++) {
+ file_entry = debugfs_create_file(svs_entries[i].name, 0664,
+ svs_dir, svsp,
+ svs_entries[i].fops);
+ if (IS_ERR(file_entry)) {
+ dev_err(svsp->dev, "cannot create %s/%s: %ld\n",
+ d, svs_entries[i].name, PTR_ERR(file_entry));
+ return PTR_ERR(file_entry);
+ }
+ }
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (svsb->mode_support == SVSB_MODE_ALL_DISABLE)
+ continue;
+
+ svsb_dir = debugfs_create_dir(svsb->name, svs_dir);
+ if (IS_ERR(svsb_dir)) {
+ dev_err(svsp->dev, "cannot create %s/%s: %ld\n",
+ d, svsb->name, PTR_ERR(svsb_dir));
+ return PTR_ERR(svsb_dir);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(svsb_entries); i++) {
+ file_entry = debugfs_create_file(svsb_entries[i].name,
+ 0664, svsb_dir, svsb,
+ svsb_entries[i].fops);
+ if (IS_ERR(file_entry)) {
+ dev_err(svsp->dev, "no %s/%s/%s?: %ld\n",
+ d, svsb->name, svsb_entries[i].name,
+ PTR_ERR(file_entry));
+ return PTR_ERR(file_entry);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx)
+{
+ u32 vx;
+
+ if (v0 == v1 || f0 == f1)
+ return v0;
+
+ /* *100 to have decimal fraction factor */
+ vx = (v0 * 100) - ((((v0 - v1) * 100) / (f0 - f1)) * (f0 - fx));
+
+ return DIV_ROUND_UP(vx, 100);
+}
+
+static void svs_get_bank_volts_v3(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt;
+ u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0;
+ u32 middle_index = (svsb->opp_count / 2);
+
+ if (svsb->phase == SVSB_PHASE_MON &&
+ svsb->volt_flags & SVSB_MON_VOLT_IGNORE)
+ return;
+
+ vop74 = svs_readl_relaxed(svsp, VOP74);
+ vop30 = svs_readl_relaxed(svsp, VOP30);
+
+ /* Target is to set svsb->volt[] by algorithm */
+ if (turn_pt < middle_index) {
+ if (svsb->type == SVSB_HIGH) {
+ /* volt[0] ~ volt[turn_pt - 1] */
+ for (i = 0; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+ } else if (svsb->type == SVSB_LOW) {
+ /* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */
+ j = svsb->opp_count - 7;
+ svsb->volt[turn_pt] = vop30 & GENMASK(7, 0);
+ shift_byte++;
+ for (i = j; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+
+ /* volt[turn_pt + 1] ~ volt[j - 1] by interpolate */
+ for (i = turn_pt + 1; i < j; i++)
+ svsb->volt[i] = interpolate(svsb->freq_pct[turn_pt],
+ svsb->freq_pct[j],
+ svsb->volt[turn_pt],
+ svsb->volt[j],
+ svsb->freq_pct[i]);
+ }
+ } else {
+ if (svsb->type == SVSB_HIGH) {
+ /* volt[0] + volt[j] ~ volt[turn_pt - 1] */
+ j = turn_pt - 7;
+ svsb->volt[0] = vop30 & GENMASK(7, 0);
+ shift_byte++;
+ for (i = j; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+
+ /* volt[1] ~ volt[j - 1] by interpolate */
+ for (i = 1; i < j; i++)
+ svsb->volt[i] = interpolate(svsb->freq_pct[0],
+ svsb->freq_pct[j],
+ svsb->volt[0],
+ svsb->volt[j],
+ svsb->freq_pct[i]);
+ } else if (svsb->type == SVSB_LOW) {
+ /* volt[turn_pt] ~ volt[opp_count - 1] */
+ for (i = turn_pt; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+ }
+ }
+
+ if (svsb->type == SVSB_HIGH) {
+ opp_start = 0;
+ opp_stop = svsb->turn_pt;
+ } else if (svsb->type == SVSB_LOW) {
+ opp_start = svsb->turn_pt;
+ opp_stop = svsb->opp_count;
+ }
+
+ for (i = opp_start; i < opp_stop; i++)
+ if (svsb->volt_flags & SVSB_REMOVE_DVTFIXED_VOLT)
+ svsb->volt[i] -= svsb->dvt_fixed;
+}
+
+static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0;
+ u32 b_sft, shift_byte = 0, turn_pt;
+ u32 middle_index = (svsb->opp_count / 2);
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) {
+ svsb->turn_pt = i;
+ break;
+ }
+ }
+
+ turn_pt = svsb->turn_pt;
+
+ /* Target is to fill out freq_pct74 / freq_pct30 by algorithm */
+ if (turn_pt < middle_index) {
+ if (svsb->type == SVSB_HIGH) {
+ /*
+ * If we don't handle this situation,
+ * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0"
+ * and this leads SVSB_LOW to work abnormally.
+ */
+ if (turn_pt == 0)
+ freq_pct30 = svsb->freq_pct[0];
+
+ /* freq_pct[0] ~ freq_pct[turn_pt - 1] */
+ for (i = 0; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ } else if (svsb->type == SVSB_LOW) {
+ /*
+ * freq_pct[turn_pt] +
+ * freq_pct[opp_count - 7] ~ freq_pct[opp_count -1]
+ */
+ freq_pct30 = svsb->freq_pct[turn_pt];
+ shift_byte++;
+ j = svsb->opp_count - 7;
+ for (i = j; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ }
+ } else {
+ if (svsb->type == SVSB_HIGH) {
+ /*
+ * freq_pct[0] +
+ * freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1]
+ */
+ freq_pct30 = svsb->freq_pct[0];
+ shift_byte++;
+ j = turn_pt - 7;
+ for (i = j; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ } else if (svsb->type == SVSB_LOW) {
+ /* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */
+ for (i = turn_pt; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ }
+ }
+
+ svs_writel_relaxed(svsp, freq_pct74, FREQPCT74);
+ svs_writel_relaxed(svsp, freq_pct30, FREQPCT30);
+}
+
+static void svs_get_bank_volts_v2(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 temp, i;
+
+ temp = svs_readl_relaxed(svsp, VOP74);
+ svsb->volt[14] = (temp >> 24) & GENMASK(7, 0);
+ svsb->volt[12] = (temp >> 16) & GENMASK(7, 0);
+ svsb->volt[10] = (temp >> 8) & GENMASK(7, 0);
+ svsb->volt[8] = (temp & GENMASK(7, 0));
+
+ temp = svs_readl_relaxed(svsp, VOP30);
+ svsb->volt[6] = (temp >> 24) & GENMASK(7, 0);
+ svsb->volt[4] = (temp >> 16) & GENMASK(7, 0);
+ svsb->volt[2] = (temp >> 8) & GENMASK(7, 0);
+ svsb->volt[0] = (temp & GENMASK(7, 0));
+
+ for (i = 0; i <= 12; i += 2)
+ svsb->volt[i + 1] = interpolate(svsb->freq_pct[i],
+ svsb->freq_pct[i + 2],
+ svsb->volt[i],
+ svsb->volt[i + 2],
+ svsb->freq_pct[i + 1]);
+
+ svsb->volt[15] = interpolate(svsb->freq_pct[12],
+ svsb->freq_pct[14],
+ svsb->volt[12],
+ svsb->volt[14],
+ svsb->freq_pct[15]);
+
+ for (i = 0; i < svsb->opp_count; i++)
+ svsb->volt[i] += svsb->volt_od;
+}
+
+static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ svs_writel_relaxed(svsp,
+ (svsb->freq_pct[14] << 24) |
+ (svsb->freq_pct[12] << 16) |
+ (svsb->freq_pct[10] << 8) |
+ svsb->freq_pct[8],
+ FREQPCT74);
+
+ svs_writel_relaxed(svsp,
+ (svsb->freq_pct[6] << 24) |
+ (svsb->freq_pct[4] << 16) |
+ (svsb->freq_pct[2] << 8) |
+ svsb->freq_pct[0],
+ FREQPCT30);
+}
+
+static void svs_set_bank_phase(struct svs_platform *svsp,
+ enum svsb_phase target_phase)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs;
+
+ svs_switch_bank(svsp);
+
+ des_char = (svsb->bdes << 8) | svsb->mdes;
+ svs_writel_relaxed(svsp, des_char, DESCHAR);
+
+ temp_char = (svsb->vco << 16) | (svsb->mtdes << 8) | svsb->dvt_fixed;
+ svs_writel_relaxed(svsp, temp_char, TEMPCHAR);
+
+ det_char = (svsb->dcbdet << 8) | svsb->dcmdet;
+ svs_writel_relaxed(svsp, det_char, DETCHAR);
+
+ svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG);
+ svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG);
+ svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG);
+
+ svsb->set_freq_pct(svsp);
+
+ limit_vals = (svsb->vmax << 24) | (svsb->vmin << 16) |
+ (SVSB_DTHI << 8) | SVSB_DTLO;
+ svs_writel_relaxed(svsp, limit_vals, LIMITVALS);
+
+ svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW);
+ svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG);
+ svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT);
+ svs_writel_relaxed(svsp, svsb->ctl0, CTL0);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+
+ switch (target_phase) {
+ case SVSB_PHASE_INIT01:
+ svs_writel_relaxed(svsp, svsb->vboot, VBOOT);
+ svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
+ svs_writel_relaxed(svsp, SVSB_EN_INIT01, SVSEN);
+ break;
+ case SVSB_PHASE_INIT02:
+ svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
+ init2vals = (svsb->age_voffset_in << 16) | svsb->dc_voffset_in;
+ svs_writel_relaxed(svsp, init2vals, INIT2VALS);
+ svs_writel_relaxed(svsp, SVSB_EN_INIT02, SVSEN);
+ break;
+ case SVSB_PHASE_MON:
+ ts_calcs = (svsb->bts << 12) | svsb->mts;
+ svs_writel_relaxed(svsp, ts_calcs, TSCALCS);
+ svs_writel_relaxed(svsp, SVSB_INTEN_MONVOPEN, INTEN);
+ svs_writel_relaxed(svsp, SVSB_EN_MON, SVSEN);
+ break;
+ default:
+ dev_err(svsb->dev, "requested unknown target phase: %u\n",
+ target_phase);
+ break;
+ }
+}
+
+static inline void svs_save_bank_register_data(struct svs_platform *svsp,
+ enum svsb_phase phase)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ enum svs_reg_index rg_i;
+
+ for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++)
+ svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i);
+}
+
+static inline void svs_error_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n",
+ __func__, svs_readl_relaxed(svsp, CORESEL));
+ dev_err(svsb->dev, "SVSEN = 0x%08x, INTSTS = 0x%08x\n",
+ svs_readl_relaxed(svsp, SVSEN),
+ svs_readl_relaxed(svsp, INTSTS));
+ dev_err(svsb->dev, "SMSTATE0 = 0x%08x, SMSTATE1 = 0x%08x\n",
+ svs_readl_relaxed(svsp, SMSTATE0),
+ svs_readl_relaxed(svsp, SMSTATE1));
+ dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP));
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR);
+
+ svsb->phase = SVSB_PHASE_ERROR;
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+}
+
+static inline void svs_init01_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n",
+ __func__, svs_readl_relaxed(svsp, VDESIGN74),
+ svs_readl_relaxed(svsp, VDESIGN30),
+ svs_readl_relaxed(svsp, DCVALUES));
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01);
+
+ svsb->phase = SVSB_PHASE_INIT01;
+ svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) &
+ GENMASK(15, 0)) + 1;
+ if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE ||
+ (svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT &&
+ svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY))
+ svsb->dc_voffset_in = 0;
+
+ svsb->age_voffset_in = svs_readl_relaxed(svsp, AGEVALUES) &
+ GENMASK(15, 0);
+
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS);
+ svsb->core_sel &= ~SVSB_DET_CLK_EN;
+}
+
+static inline void svs_init02_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n",
+ __func__, svs_readl_relaxed(svsp, VOP74),
+ svs_readl_relaxed(svsp, VOP30),
+ svs_readl_relaxed(svsp, DCVALUES));
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02);
+
+ svsb->phase = SVSB_PHASE_INIT02;
+ svsb->get_volts(svsp);
+
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS);
+}
+
+static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_MON);
+
+ svsb->phase = SVSB_PHASE_MON;
+ svsb->get_volts(svsp);
+
+ svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_MONVOP, INTSTS);
+}
+
+static irqreturn_t svs_isr(int irq, void *data)
+{
+ struct svs_platform *svsp = data;
+ struct svs_bank *svsb = NULL;
+ unsigned long flags;
+ u32 idx, int_sts, svs_en;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name);
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+
+ /* Find out which svs bank fires interrupt */
+ if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) {
+ spin_unlock_irqrestore(&svs_lock, flags);
+ continue;
+ }
+
+ svs_switch_bank(svsp);
+ int_sts = svs_readl_relaxed(svsp, INTSTS);
+ svs_en = svs_readl_relaxed(svsp, SVSEN);
+
+ if (int_sts == SVSB_INTSTS_COMPLETE &&
+ svs_en == SVSB_EN_INIT01)
+ svs_init01_isr_handler(svsp);
+ else if (int_sts == SVSB_INTSTS_COMPLETE &&
+ svs_en == SVSB_EN_INIT02)
+ svs_init02_isr_handler(svsp);
+ else if (int_sts & SVSB_INTSTS_MONVOP)
+ svs_mon_mode_isr_handler(svsp);
+ else
+ svs_error_isr_handler(svsp);
+
+ spin_unlock_irqrestore(&svs_lock, flags);
+ break;
+ }
+
+ svs_adjust_pm_opp_volts(svsb);
+
+ if (svsb->phase == SVSB_PHASE_INIT01 ||
+ svsb->phase == SVSB_PHASE_INIT02)
+ complete(&svsb->init_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int svs_init01(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ unsigned long flags, time_left;
+ bool search_done;
+ int ret = 0, r;
+ u32 opp_freq, opp_vboot, buck_volt, idx, i;
+
+ /* Keep CPUs' core power on for svs_init01 initialization */
+ cpuidle_pause_and_lock();
+
+ /* Svs bank init01 preparation - power enable */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ ret = regulator_enable(svsb->buck);
+ if (ret) {
+ dev_err(svsb->dev, "%s enable fail: %d\n",
+ svsb->buck_name, ret);
+ goto svs_init01_resume_cpuidle;
+ }
+
+ /* Some buck doesn't support mode change. Show fail msg only */
+ ret = regulator_set_mode(svsb->buck, REGULATOR_MODE_FAST);
+ if (ret)
+ dev_notice(svsb->dev, "set fast mode fail: %d\n", ret);
+
+ if (svsb->volt_flags & SVSB_INIT01_PD_REQ) {
+ if (!pm_runtime_enabled(svsb->opp_dev)) {
+ pm_runtime_enable(svsb->opp_dev);
+ svsb->pm_runtime_enabled_count++;
+ }
+
+ ret = pm_runtime_get_sync(svsb->opp_dev);
+ if (ret < 0) {
+ dev_err(svsb->dev, "mtcmos on fail: %d\n", ret);
+ goto svs_init01_resume_cpuidle;
+ }
+ }
+ }
+
+ /*
+ * Svs bank init01 preparation - vboot voltage adjustment
+ * Sometimes two svs banks use the same buck. Therefore,
+ * we have to set each svs bank to target voltage(vboot) first.
+ */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ /*
+ * Find the fastest freq that can be run at vboot and
+ * fix to that freq until svs_init01 is done.
+ */
+ search_done = false;
+ opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
+ svsb->volt_step,
+ svsb->volt_base);
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ opp_freq = svsb->opp_dfreq[i];
+ if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) {
+ ret = dev_pm_opp_adjust_voltage(svsb->opp_dev,
+ opp_freq,
+ opp_vboot,
+ opp_vboot,
+ opp_vboot);
+ if (ret) {
+ dev_err(svsb->dev,
+ "set opp %uuV vboot fail: %d\n",
+ opp_vboot, ret);
+ goto svs_init01_finish;
+ }
+
+ search_done = true;
+ } else {
+ ret = dev_pm_opp_disable(svsb->opp_dev,
+ svsb->opp_dfreq[i]);
+ if (ret) {
+ dev_err(svsb->dev,
+ "opp %uHz disable fail: %d\n",
+ svsb->opp_dfreq[i], ret);
+ goto svs_init01_finish;
+ }
+ }
+ }
+ }
+
+ /* Svs bank init01 begins */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
+ svsb->volt_step,
+ svsb->volt_base);
+
+ buck_volt = regulator_get_voltage(svsb->buck);
+ if (buck_volt != opp_vboot) {
+ dev_err(svsb->dev,
+ "buck voltage: %uuV, expected vboot: %uuV\n",
+ buck_volt, opp_vboot);
+ ret = -EPERM;
+ goto svs_init01_finish;
+ }
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_set_bank_phase(svsp, SVSB_PHASE_INIT01);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ time_left = wait_for_completion_timeout(&svsb->init_completion,
+ msecs_to_jiffies(5000));
+ if (!time_left) {
+ dev_err(svsb->dev, "init01 completion timeout\n");
+ ret = -EBUSY;
+ goto svs_init01_finish;
+ }
+ }
+
+svs_init01_finish:
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ r = dev_pm_opp_enable(svsb->opp_dev,
+ svsb->opp_dfreq[i]);
+ if (r)
+ dev_err(svsb->dev, "opp %uHz enable fail: %d\n",
+ svsb->opp_dfreq[i], r);
+ }
+
+ if (svsb->volt_flags & SVSB_INIT01_PD_REQ) {
+ r = pm_runtime_put_sync(svsb->opp_dev);
+ if (r)
+ dev_err(svsb->dev, "mtcmos off fail: %d\n", r);
+
+ if (svsb->pm_runtime_enabled_count > 0) {
+ pm_runtime_disable(svsb->opp_dev);
+ svsb->pm_runtime_enabled_count--;
+ }
+ }
+
+ r = regulator_set_mode(svsb->buck, REGULATOR_MODE_NORMAL);
+ if (r)
+ dev_notice(svsb->dev, "set normal mode fail: %d\n", r);
+
+ r = regulator_disable(svsb->buck);
+ if (r)
+ dev_err(svsb->dev, "%s disable fail: %d\n",
+ svsb->buck_name, r);
+ }
+
+svs_init01_resume_cpuidle:
+ cpuidle_resume_and_unlock();
+
+ return ret;
+}
+
+static int svs_init02(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ unsigned long flags, time_left;
+ u32 idx;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT02))
+ continue;
+
+ reinit_completion(&svsb->init_completion);
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_set_bank_phase(svsp, SVSB_PHASE_INIT02);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ time_left = wait_for_completion_timeout(&svsb->init_completion,
+ msecs_to_jiffies(5000));
+ if (!time_left) {
+ dev_err(svsb->dev, "init02 completion timeout\n");
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * 2-line high/low bank update its corresponding opp voltages only.
+ * Therefore, we sync voltages from opp for high/low bank voltages
+ * consistency.
+ */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT02))
+ continue;
+
+ if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
+ if (svs_sync_bank_volts_from_opp(svsb)) {
+ dev_err(svsb->dev, "sync volt fail\n");
+ return -EPERM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void svs_mon_mode(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ unsigned long flags;
+ u32 idx;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_MON))
+ continue;
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_set_bank_phase(svsp, SVSB_PHASE_MON);
+ spin_unlock_irqrestore(&svs_lock, flags);
+ }
+}
+
+static int svs_start(struct svs_platform *svsp)
+{
+ int ret;
+
+ ret = svs_init01(svsp);
+ if (ret)
+ return ret;
+
+ ret = svs_init02(svsp);
+ if (ret)
+ return ret;
+
+ svs_mon_mode(svsp);
+
+ return 0;
+}
+
+static int svs_suspend(struct device *dev)
+{
+ struct svs_platform *svsp = dev_get_drvdata(dev);
+ struct svs_bank *svsb;
+ unsigned long flags;
+ int ret;
+ u32 idx;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ /* This might wait for svs_isr() process */
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_switch_bank(svsp);
+ svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ svsb->phase = SVSB_PHASE_ERROR;
+ svs_adjust_pm_opp_volts(svsb);
+ }
+
+ ret = reset_control_assert(svsp->rst);
+ if (ret) {
+ dev_err(svsp->dev, "cannot assert reset %d\n", ret);
+ return ret;
+ }
+
+ clk_disable_unprepare(svsp->main_clk);
+
+ return 0;
+}
+
+static int svs_resume(struct device *dev)
+{
+ struct svs_platform *svsp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(svsp->main_clk);
+ if (ret) {
+ dev_err(svsp->dev, "cannot enable main_clk, disable svs\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(svsp->rst);
+ if (ret) {
+ dev_err(svsp->dev, "cannot deassert reset %d\n", ret);
+ goto out_of_resume;
+ }
+
+ ret = svs_init02(svsp);
+ if (ret)
+ goto out_of_resume;
+
+ svs_mon_mode(svsp);
+
+ return 0;
+
+out_of_resume:
+ clk_disable_unprepare(svsp->main_clk);
+ return ret;
+}
+
+static int svs_bank_resource_setup(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ int count, ret;
+ u32 idx, i;
+
+ dev_set_drvdata(svsp->dev, svsp);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ svsb->name = "SVSB_CPU_LITTLE";
+ break;
+ case SVSB_CPU_BIG:
+ svsb->name = "SVSB_CPU_BIG";
+ break;
+ case SVSB_CCI:
+ svsb->name = "SVSB_CCI";
+ break;
+ case SVSB_GPU:
+ if (svsb->type == SVSB_HIGH)
+ svsb->name = "SVSB_GPU_HIGH";
+ else if (svsb->type == SVSB_LOW)
+ svsb->name = "SVSB_GPU_LOW";
+ else
+ svsb->name = "SVSB_GPU";
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ return -EINVAL;
+ }
+
+ svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev),
+ GFP_KERNEL);
+ if (!svsb->dev)
+ return -ENOMEM;
+
+ ret = dev_set_name(svsb->dev, "%s", svsb->name);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(svsb->dev, svsp);
+
+ ret = dev_pm_opp_of_add_table(svsb->opp_dev);
+ if (ret) {
+ dev_err(svsb->dev, "add opp table fail: %d\n", ret);
+ return ret;
+ }
+
+ mutex_init(&svsb->lock);
+ init_completion(&svsb->init_completion);
+
+ if (svsb->mode_support & SVSB_MODE_INIT01) {
+ svsb->buck = devm_regulator_get_optional(svsb->opp_dev,
+ svsb->buck_name);
+ if (IS_ERR(svsb->buck)) {
+ dev_err(svsb->dev, "cannot get \"%s-supply\"\n",
+ svsb->buck_name);
+ return PTR_ERR(svsb->buck);
+ }
+ }
+
+ if (svsb->mode_support & SVSB_MODE_MON) {
+ svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name);
+ if (IS_ERR(svsb->tzd)) {
+ dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n",
+ svsb->tzone_name);
+ return PTR_ERR(svsb->tzd);
+ }
+ }
+
+ count = dev_pm_opp_get_opp_count(svsb->opp_dev);
+ if (svsb->opp_count != count) {
+ dev_err(svsb->dev,
+ "opp_count not \"%u\" but get \"%d\"?\n",
+ svsb->opp_count, count);
+ return count;
+ }
+
+ for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) {
+ opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(svsb->dev, "cannot find freq = %ld\n",
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ svsb->opp_dfreq[i] = freq;
+ svsb->opp_dvolt[i] = dev_pm_opp_get_voltage(opp);
+ svsb->freq_pct[i] = percent(svsb->opp_dfreq[i],
+ svsb->freq_base);
+ dev_pm_opp_put(opp);
+ }
+ }
+
+ return 0;
+}
+
+static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct nvmem_cell *cell;
+ u32 idx, i, vmin, golden_temp;
+
+ for (i = 0; i < svsp->efuse_max; i++)
+ if (svsp->efuse[i])
+ dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n",
+ i, svsp->efuse[i]);
+
+ if (!svsp->efuse[9]) {
+ dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n");
+ return false;
+ }
+
+ /* Svs efuse parsing */
+ vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (vmin == 0x1)
+ svsb->vmin = 0x1e;
+
+ if (svsb->type == SVSB_LOW) {
+ svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0);
+ svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0);
+ } else if (svsb->type == SVSB_HIGH) {
+ svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0);
+ svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0);
+ }
+
+ svsb->vmax += svsb->dvt_fixed;
+ }
+
+ /* Thermal efuse parsing */
+ cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
+ if (IS_ERR_OR_NULL(cell)) {
+ dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n",
+ PTR_ERR(cell));
+ return false;
+ }
+
+ svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
+ if (IS_ERR(svsp->tefuse)) {
+ dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
+ PTR_ERR(svsp->tefuse));
+ nvmem_cell_put(cell);
+ return false;
+ }
+
+ svsp->tefuse_max /= sizeof(u32);
+ nvmem_cell_put(cell);
+
+ for (i = 0; i < svsp->tefuse_max; i++)
+ if (svsp->tefuse[i] != 0)
+ break;
+
+ if (i == svsp->tefuse_max)
+ golden_temp = 50; /* All thermal efuse data are 0 */
+ else
+ golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svsb->mts = 500;
+ svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4;
+ }
+
+ return true;
+}
+
+static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct nvmem_cell *cell;
+ int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0;
+ int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t;
+ int o_slope, o_slope_sign, ts_id;
+ u32 idx, i, ft_pgm, mts, temp0, temp1, temp2;
+
+ for (i = 0; i < svsp->efuse_max; i++)
+ if (svsp->efuse[i])
+ dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n",
+ i, svsp->efuse[i]);
+
+ if (!svsp->efuse[2]) {
+ dev_notice(svsp->dev, "svs_efuse[2] = 0x0?\n");
+ return false;
+ }
+
+ /* Svs efuse parsing */
+ ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (ft_pgm <= 1)
+ svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE;
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ svsb->bdes = svsp->efuse[16] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
+
+ if (ft_pgm <= 3)
+ svsb->volt_od += 10;
+ else
+ svsb->volt_od += 2;
+ break;
+ case SVSB_CPU_BIG:
+ svsb->bdes = svsp->efuse[18] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0);
+
+ if (ft_pgm <= 3)
+ svsb->volt_od += 15;
+ else
+ svsb->volt_od += 12;
+ break;
+ case SVSB_CCI:
+ svsb->bdes = svsp->efuse[4] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0);
+
+ if (ft_pgm <= 3)
+ svsb->volt_od += 10;
+ else
+ svsb->volt_od += 2;
+ break;
+ case SVSB_GPU:
+ svsb->bdes = svsp->efuse[6] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0);
+
+ if (ft_pgm >= 2) {
+ svsb->freq_base = 800000000; /* 800MHz */
+ svsb->dvt_fixed = 2;
+ }
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ return false;
+ }
+ }
+
+ /* Get thermal efuse by nvmem */
+ cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
+ if (IS_ERR(cell)) {
+ dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n",
+ PTR_ERR(cell));
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
+ if (IS_ERR(svsp->tefuse)) {
+ dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
+ PTR_ERR(svsp->tefuse));
+ nvmem_cell_put(cell);
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ svsp->tefuse_max /= sizeof(u32);
+ nvmem_cell_put(cell);
+
+ /* Thermal efuse parsing */
+ adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0);
+ adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0);
+
+ o_vtsmcu[0] = (svsp->tefuse[0] >> 17) & GENMASK(8, 0);
+ o_vtsmcu[1] = (svsp->tefuse[0] >> 8) & GENMASK(8, 0);
+ o_vtsmcu[2] = svsp->tefuse[1] & GENMASK(8, 0);
+ o_vtsmcu[3] = (svsp->tefuse[2] >> 23) & GENMASK(8, 0);
+ o_vtsmcu[4] = (svsp->tefuse[2] >> 5) & GENMASK(8, 0);
+ o_vtsabb = (svsp->tefuse[2] >> 14) & GENMASK(8, 0);
+
+ degc_cali = (svsp->tefuse[0] >> 1) & GENMASK(5, 0);
+ adc_cali_en_t = svsp->tefuse[0] & BIT(0);
+ o_slope_sign = (svsp->tefuse[0] >> 7) & BIT(0);
+
+ ts_id = (svsp->tefuse[1] >> 9) & BIT(0);
+ o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0);
+
+ if (adc_cali_en_t == 1) {
+ if (!ts_id)
+ o_slope = 0;
+
+ if (adc_ge_t < 265 || adc_ge_t > 758 ||
+ adc_oe_t < 265 || adc_oe_t > 758 ||
+ o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 ||
+ o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 ||
+ o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 ||
+ o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 ||
+ o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 ||
+ o_vtsabb < -8 || o_vtsabb > 484 ||
+ degc_cali < 1 || degc_cali > 63) {
+ dev_err(svsp->dev, "bad thermal efuse, no mon mode\n");
+ goto remove_mt8183_svsb_mon_mode;
+ }
+ } else {
+ dev_err(svsp->dev, "no thermal efuse, no mon mode\n");
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ ge = ((adc_ge_t - 512) * 10000) / 4096;
+ oe = (adc_oe_t - 512);
+ gain = (10000 + ge);
+
+ format[0] = (o_vtsmcu[0] + 3350 - oe);
+ format[1] = (o_vtsmcu[1] + 3350 - oe);
+ format[2] = (o_vtsmcu[2] + 3350 - oe);
+ format[3] = (o_vtsmcu[3] + 3350 - oe);
+ format[4] = (o_vtsmcu[4] + 3350 - oe);
+ format[5] = (o_vtsabb + 3350 - oe);
+
+ for (i = 0; i < 6; i++)
+ x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / gain;
+
+ temp0 = (10000 * 100000 / gain) * 15 / 18;
+
+ if (!o_slope_sign)
+ mts = (temp0 * 10) / (1534 + o_slope * 10);
+ else
+ mts = (temp0 * 10) / (1534 - o_slope * 10);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svsb->mts = mts;
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ tb_roomt = x_roomt[3];
+ break;
+ case SVSB_CPU_BIG:
+ tb_roomt = x_roomt[4];
+ break;
+ case SVSB_CCI:
+ tb_roomt = x_roomt[3];
+ break;
+ case SVSB_GPU:
+ tb_roomt = x_roomt[1];
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ temp0 = (degc_cali * 10 / 2);
+ temp1 = ((10000 * 100000 / 4096 / gain) *
+ oe + tb_roomt * 10) * 15 / 18;
+
+ if (!o_slope_sign)
+ temp2 = temp1 * 100 / (1534 + o_slope * 10);
+ else
+ temp2 = temp1 * 100 / (1534 - o_slope * 10);
+
+ svsb->bts = (temp0 + temp2 - 250) * 4 / 10;
+ }
+
+ return true;
+
+remove_mt8183_svsb_mon_mode:
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svsb->mode_support &= ~SVSB_MODE_MON;
+ }
+
+ return true;
+}
+
+static bool svs_is_efuse_data_correct(struct svs_platform *svsp)
+{
+ struct nvmem_cell *cell;
+
+ /* Get svs efuse by nvmem */
+ cell = nvmem_cell_get(svsp->dev, "svs-calibration-data");
+ if (IS_ERR(cell)) {
+ dev_err(svsp->dev, "no \"svs-calibration-data\"? %ld\n",
+ PTR_ERR(cell));
+ return false;
+ }
+
+ svsp->efuse = nvmem_cell_read(cell, &svsp->efuse_max);
+ if (IS_ERR(svsp->efuse)) {
+ dev_err(svsp->dev, "cannot read svs efuse: %ld\n",
+ PTR_ERR(svsp->efuse));
+ nvmem_cell_put(cell);
+ return false;
+ }
+
+ svsp->efuse_max /= sizeof(u32);
+ nvmem_cell_put(cell);
+
+ return svsp->efuse_parsing(svsp);
+}
+
+static struct device *svs_get_subsys_device(struct svs_platform *svsp,
+ const char *node_name)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_find_node_by_name(NULL, node_name);
+ if (!np) {
+ dev_err(svsp->dev, "cannot find %s node\n", node_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ of_node_put(np);
+ dev_err(svsp->dev, "cannot find pdev by %s\n", node_name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ of_node_put(np);
+
+ return &pdev->dev;
+}
+
+static struct device *svs_add_device_link(struct svs_platform *svsp,
+ const char *node_name)
+{
+ struct device *dev;
+ struct device_link *sup_link;
+
+ if (!node_name) {
+ dev_err(svsp->dev, "node name cannot be null\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev = svs_get_subsys_device(svsp, node_name);
+ if (IS_ERR(dev))
+ return dev;
+
+ sup_link = device_link_add(svsp->dev, dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!sup_link) {
+ dev_err(svsp->dev, "sup_link is NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return dev;
+}
+
+static int svs_mt8192_platform_probe(struct svs_platform *svsp)
+{
+ struct device *dev;
+ struct svs_bank *svsb;
+ u32 idx;
+
+ svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst");
+ if (IS_ERR(svsp->rst))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst),
+ "cannot get svs reset control\n");
+
+ dev = svs_add_device_link(svsp, "lvts");
+ if (IS_ERR(dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(dev),
+ "failed to get lvts device\n");
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (svsb->type == SVSB_HIGH)
+ svsb->opp_dev = svs_add_device_link(svsp, "mali");
+ else if (svsb->type == SVSB_LOW)
+ svsb->opp_dev = svs_get_subsys_device(svsp, "mali");
+
+ if (IS_ERR(svsb->opp_dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+ "failed to get OPP device for bank %d\n",
+ idx);
+ }
+
+ return 0;
+}
+
+static int svs_mt8183_platform_probe(struct svs_platform *svsp)
+{
+ struct device *dev;
+ struct svs_bank *svsb;
+ u32 idx;
+
+ dev = svs_add_device_link(svsp, "thermal");
+ if (IS_ERR(dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(dev),
+ "failed to get thermal device\n");
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ case SVSB_CPU_BIG:
+ svsb->opp_dev = get_cpu_device(svsb->cpu_id);
+ break;
+ case SVSB_CCI:
+ svsb->opp_dev = svs_add_device_link(svsp, "cci");
+ break;
+ case SVSB_GPU:
+ svsb->opp_dev = svs_add_device_link(svsp, "gpu");
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(svsb->opp_dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+ "failed to get OPP device for bank %d\n",
+ idx);
+ }
+
+ return 0;
+}
+
+static struct svs_bank svs_mt8192_banks[] = {
+ {
+ .sw_id = SVSB_GPU,
+ .type = SVSB_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .mode_support = SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 688000000,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .vmax = 0x60,
+ .vmin = 0x1a,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .dvt_fixed = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .core_sel = 0x0fff0100,
+ .int_st = BIT(0),
+ .ctl0 = 0x00540003,
+ },
+ {
+ .sw_id = SVSB_GPU,
+ .type = SVSB_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu1",
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT |
+ SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 902000000,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .vmax = 0x60,
+ .vmin = 0x1a,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .dvt_fixed = 0x6,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .core_sel = 0x0fff0101,
+ .int_st = BIT(1),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ },
+};
+
+static struct svs_bank svs_mt8183_banks[] = {
+ {
+ .sw_id = SVSB_CPU_LITTLE,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 0,
+ .buck_name = "proc",
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 1989000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x64,
+ .vmin = 0x18,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x7,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0000,
+ .int_st = BIT(0),
+ .ctl0 = 0x00010001,
+ },
+ {
+ .sw_id = SVSB_CPU_BIG,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 4,
+ .buck_name = "proc",
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 1989000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x58,
+ .vmin = 0x10,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x7,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0001,
+ .int_st = BIT(1),
+ .ctl0 = 0x00000001,
+ },
+ {
+ .sw_id = SVSB_CCI,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "proc",
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 1196000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x64,
+ .vmin = 0x18,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x7,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0002,
+ .int_st = BIT(2),
+ .ctl0 = 0x00100003,
+ },
+ {
+ .sw_id = SVSB_GPU,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "mali",
+ .tzone_name = "tzts2",
+ .volt_flags = SVSB_INIT01_PD_REQ |
+ SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 |
+ SVSB_MODE_MON,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 900000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x40,
+ .vmin = 0x14,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x3,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0003,
+ .int_st = BIT(3),
+ .ctl0 = 0x00050001,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 3,
+ },
+};
+
+static const struct svs_platform_data svs_mt8192_platform_data = {
+ .name = "mt8192-svs",
+ .banks = svs_mt8192_banks,
+ .efuse_parsing = svs_mt8192_efuse_parsing,
+ .probe = svs_mt8192_platform_probe,
+ .irqflags = IRQF_TRIGGER_HIGH,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8192_banks),
+};
+
+static const struct svs_platform_data svs_mt8183_platform_data = {
+ .name = "mt8183-svs",
+ .banks = svs_mt8183_banks,
+ .efuse_parsing = svs_mt8183_efuse_parsing,
+ .probe = svs_mt8183_platform_probe,
+ .irqflags = IRQF_TRIGGER_LOW,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8183_banks),
+};
+
+static const struct of_device_id svs_of_match[] = {
+ {
+ .compatible = "mediatek,mt8192-svs",
+ .data = &svs_mt8192_platform_data,
+ }, {
+ .compatible = "mediatek,mt8183-svs",
+ .data = &svs_mt8183_platform_data,
+ }, {
+ /* Sentinel */
+ },
+};
+
+static struct svs_platform *svs_platform_probe(struct platform_device *pdev)
+{
+ struct svs_platform *svsp;
+ const struct svs_platform_data *svsp_data;
+ int ret;
+
+ svsp_data = of_device_get_match_data(&pdev->dev);
+ if (!svsp_data) {
+ dev_err(&pdev->dev, "no svs platform data?\n");
+ return ERR_PTR(-EPERM);
+ }
+
+ svsp = devm_kzalloc(&pdev->dev, sizeof(*svsp), GFP_KERNEL);
+ if (!svsp)
+ return ERR_PTR(-ENOMEM);
+
+ svsp->dev = &pdev->dev;
+ svsp->name = svsp_data->name;
+ svsp->banks = svsp_data->banks;
+ svsp->efuse_parsing = svsp_data->efuse_parsing;
+ svsp->probe = svsp_data->probe;
+ svsp->irqflags = svsp_data->irqflags;
+ svsp->regs = svsp_data->regs;
+ svsp->bank_max = svsp_data->bank_max;
+
+ ret = svsp->probe(svsp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return svsp;
+}
+
+static int svs_probe(struct platform_device *pdev)
+{
+ struct svs_platform *svsp;
+ unsigned int svsp_irq;
+ int ret;
+
+ svsp = svs_platform_probe(pdev);
+ if (IS_ERR(svsp))
+ return PTR_ERR(svsp);
+
+ if (!svs_is_efuse_data_correct(svsp)) {
+ dev_notice(svsp->dev, "efuse data isn't correct\n");
+ ret = -EPERM;
+ goto svs_probe_free_resource;
+ }
+
+ ret = svs_bank_resource_setup(svsp);
+ if (ret) {
+ dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret);
+ goto svs_probe_free_resource;
+ }
+
+ svsp_irq = irq_of_parse_and_map(svsp->dev->of_node, 0);
+ ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
+ svsp->irqflags | IRQF_ONESHOT,
+ svsp->name, svsp);
+ if (ret) {
+ dev_err(svsp->dev, "register irq(%d) failed: %d\n",
+ svsp_irq, ret);
+ goto svs_probe_free_resource;
+ }
+
+ svsp->main_clk = devm_clk_get(svsp->dev, "main");
+ if (IS_ERR(svsp->main_clk)) {
+ dev_err(svsp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(svsp->main_clk));
+ ret = PTR_ERR(svsp->main_clk);
+ goto svs_probe_free_resource;
+ }
+
+ ret = clk_prepare_enable(svsp->main_clk);
+ if (ret) {
+ dev_err(svsp->dev, "cannot enable main clk: %d\n", ret);
+ goto svs_probe_free_resource;
+ }
+
+ svsp->base = of_iomap(svsp->dev->of_node, 0);
+ if (IS_ERR_OR_NULL(svsp->base)) {
+ dev_err(svsp->dev, "cannot find svs register base\n");
+ ret = -EINVAL;
+ goto svs_probe_clk_disable;
+ }
+
+ ret = svs_start(svsp);
+ if (ret) {
+ dev_err(svsp->dev, "svs start fail: %d\n", ret);
+ goto svs_probe_iounmap;
+ }
+
+ ret = svs_create_debug_cmds(svsp);
+ if (ret) {
+ dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret);
+ goto svs_probe_iounmap;
+ }
+
+ return 0;
+
+svs_probe_iounmap:
+ iounmap(svsp->base);
+
+svs_probe_clk_disable:
+ clk_disable_unprepare(svsp->main_clk);
+
+svs_probe_free_resource:
+ if (!IS_ERR_OR_NULL(svsp->efuse))
+ kfree(svsp->efuse);
+ if (!IS_ERR_OR_NULL(svsp->tefuse))
+ kfree(svsp->tefuse);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(svs_pm_ops, svs_suspend, svs_resume);
+
+static struct platform_driver svs_driver = {
+ .probe = svs_probe,
+ .driver = {
+ .name = "mtk-svs",
+ .pm = &svs_pm_ops,
+ .of_match_table = of_match_ptr(svs_of_match),
+ },
+};
+
+module_platform_driver(svs_driver);
+
+MODULE_AUTHOR("Roger Lu <roger.lu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek SVS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index e718b8735444..e0d7a5459562 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -129,7 +129,10 @@ config QCOM_RPMHPD
config QCOM_RPMPD
tristate "Qualcomm RPM Power domain driver"
+ depends on PM
depends on QCOM_SMD_RPM
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
help
QCOM RPM Power domain driver to support power-domains with
performance states. The driver communicates a performance state
@@ -228,4 +231,19 @@ config QCOM_APR
application processor and QDSP6. APR is
used by audio driver to configure QDSP6
ASM, ADM and AFE modules.
+
+config QCOM_ICC_BWMON
+ tristate "QCOM Interconnect Bandwidth Monitor driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select PM_OPP
+ help
+ Sets up driver monitoring bandwidth on various interconnects and
+ based on that voting for interconnect bandwidth, adjusting their
+ speed to current demand.
+ Current implementation brings support for BWMON v4, used for example
+ on SDM845 to measure bandwidth between CPU (gladiator_noc) and Last
+ Level Cache (memnoc). Usage of this BWMON allows to remove some of
+ the fixed bandwidth votes from cpufreq (CPU nodes) thus achieve high
+ memory throughput even with lower CPU frequencies.
+
endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 70d5de69fd7b..d66604aff2b0 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
+obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 3caabd873322..b4046f393575 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -377,17 +377,14 @@ static int apr_device_probe(struct device *dev)
static void apr_device_remove(struct device *dev)
{
struct apr_device *adev = to_apr_device(dev);
- struct apr_driver *adrv;
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
struct packet_router *apr = dev_get_drvdata(adev->dev.parent);
- if (dev->driver) {
- adrv = to_apr_driver(dev->driver);
- if (adrv->remove)
- adrv->remove(adev);
- spin_lock(&apr->svcs_lock);
- idr_remove(&apr->svcs_idr, adev->svc.id);
- spin_unlock(&apr->svcs_lock);
- }
+ if (adrv->remove)
+ adrv->remove(adev);
+ spin_lock(&apr->svcs_lock);
+ idr_remove(&apr->svcs_idr, adev->svc.id);
+ spin_unlock(&apr->svcs_lock);
}
static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index dd872017f345..629a7188b576 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -141,13 +141,17 @@ static int cmd_db_get_header(const char *id, const struct entry_header **eh,
const struct rsc_hdr *rsc_hdr;
const struct entry_header *ent;
int ret, i, j;
- u8 query[8];
+ u8 query[sizeof(ent->id)] __nonstring;
ret = cmd_db_ready();
if (ret)
return ret;
- /* Pad out query string to same length as in DB */
+ /*
+ * Pad out query string to same length as in DB. NOTE: the output
+ * query string is not necessarily '\0' terminated if it bumps up
+ * against the max size. That's OK and expected.
+ */
strncpy(query, id, sizeof(query));
for (i = 0; i < MAX_SLV_ID; i++) {
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
new file mode 100644
index 000000000000..7f8aca533cd3
--- /dev/null
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2021-2022 Linaro Ltd
+ * Author: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, based on
+ * previous work of Thara Gopinath and msm-4.9 downstream sources.
+ */
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/sizes.h>
+
+/*
+ * The BWMON samples data throughput within 'sample_ms' time. With three
+ * configurable thresholds (Low, Medium and High) gives four windows (called
+ * zones) of current bandwidth:
+ *
+ * Zone 0: byte count < THRES_LO
+ * Zone 1: THRES_LO < byte count < THRES_MED
+ * Zone 2: THRES_MED < byte count < THRES_HIGH
+ * Zone 3: THRES_HIGH < byte count
+ *
+ * Zones 0 and 2 are not used by this driver.
+ */
+
+/* Internal sampling clock frequency */
+#define HW_TIMER_HZ 19200000
+
+#define BWMON_GLOBAL_IRQ_STATUS 0x0
+#define BWMON_GLOBAL_IRQ_CLEAR 0x8
+#define BWMON_GLOBAL_IRQ_ENABLE 0xc
+#define BWMON_GLOBAL_IRQ_ENABLE_ENABLE BIT(0)
+
+#define BWMON_IRQ_STATUS 0x100
+#define BWMON_IRQ_STATUS_ZONE_SHIFT 4
+#define BWMON_IRQ_CLEAR 0x108
+#define BWMON_IRQ_ENABLE 0x10c
+#define BWMON_IRQ_ENABLE_ZONE1_SHIFT 5
+#define BWMON_IRQ_ENABLE_ZONE2_SHIFT 6
+#define BWMON_IRQ_ENABLE_ZONE3_SHIFT 7
+#define BWMON_IRQ_ENABLE_MASK (BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT) | \
+ BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT))
+
+#define BWMON_ENABLE 0x2a0
+#define BWMON_ENABLE_ENABLE BIT(0)
+
+#define BWMON_CLEAR 0x2a4
+#define BWMON_CLEAR_CLEAR BIT(0)
+
+#define BWMON_SAMPLE_WINDOW 0x2a8
+#define BWMON_THRESHOLD_HIGH 0x2ac
+#define BWMON_THRESHOLD_MED 0x2b0
+#define BWMON_THRESHOLD_LOW 0x2b4
+
+#define BWMON_ZONE_ACTIONS 0x2b8
+/*
+ * Actions to perform on some zone 'z' when current zone hits the threshold:
+ * Increment counter of zone 'z'
+ */
+#define BWMON_ZONE_ACTIONS_INCREMENT(z) (0x2 << ((z) * 2))
+/* Clear counter of zone 'z' */
+#define BWMON_ZONE_ACTIONS_CLEAR(z) (0x1 << ((z) * 2))
+
+/* Zone 0 threshold hit: Clear zone count */
+#define BWMON_ZONE_ACTIONS_ZONE0 (BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 1 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE1 (BWMON_ZONE_ACTIONS_INCREMENT(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 2 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE2 (BWMON_ZONE_ACTIONS_INCREMENT(2) | \
+ BWMON_ZONE_ACTIONS_CLEAR(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 3 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE3 (BWMON_ZONE_ACTIONS_INCREMENT(3) | \
+ BWMON_ZONE_ACTIONS_CLEAR(2) | \
+ BWMON_ZONE_ACTIONS_CLEAR(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+/* Value for BWMON_ZONE_ACTIONS */
+#define BWMON_ZONE_ACTIONS_DEFAULT (BWMON_ZONE_ACTIONS_ZONE0 | \
+ BWMON_ZONE_ACTIONS_ZONE1 << 8 | \
+ BWMON_ZONE_ACTIONS_ZONE2 << 16 | \
+ BWMON_ZONE_ACTIONS_ZONE3 << 24)
+
+/*
+ * There is no clear documentation/explanation of BWMON_THRESHOLD_COUNT
+ * register. Based on observations, this is number of times one threshold has to
+ * be reached, to trigger interrupt in given zone.
+ *
+ * 0xff are maximum values meant to ignore the zones 0 and 2.
+ */
+#define BWMON_THRESHOLD_COUNT 0x2bc
+#define BWMON_THRESHOLD_COUNT_ZONE1_SHIFT 8
+#define BWMON_THRESHOLD_COUNT_ZONE2_SHIFT 16
+#define BWMON_THRESHOLD_COUNT_ZONE3_SHIFT 24
+#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff
+#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff
+
+/* BWMONv4 count registers use count unit of 64 kB */
+#define BWMON_COUNT_UNIT_KB 64
+#define BWMON_ZONE_COUNT 0x2d8
+#define BWMON_ZONE_MAX(zone) (0x2e0 + 4 * (zone))
+
+struct icc_bwmon_data {
+ unsigned int sample_ms;
+ unsigned int default_highbw_kbps;
+ unsigned int default_medbw_kbps;
+ unsigned int default_lowbw_kbps;
+ u8 zone1_thres_count;
+ u8 zone3_thres_count;
+};
+
+struct icc_bwmon {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+
+ unsigned int default_lowbw_kbps;
+ unsigned int sample_ms;
+ unsigned int max_bw_kbps;
+ unsigned int min_bw_kbps;
+ unsigned int target_kbps;
+ unsigned int current_kbps;
+};
+
+static void bwmon_clear_counters(struct icc_bwmon *bwmon)
+{
+ /*
+ * Clear counters. The order and barriers are
+ * important. Quoting downstream Qualcomm msm-4.9 tree:
+ *
+ * The counter clear and IRQ clear bits are not in the same 4KB
+ * region. So, we need to make sure the counter clear is completed
+ * before we try to clear the IRQ or do any other counter operations.
+ */
+ writel(BWMON_CLEAR_CLEAR, bwmon->base + BWMON_CLEAR);
+}
+
+static void bwmon_clear_irq(struct icc_bwmon *bwmon)
+{
+ /*
+ * Clear zone and global interrupts. The order and barriers are
+ * important. Quoting downstream Qualcomm msm-4.9 tree:
+ *
+ * Synchronize the local interrupt clear in mon_irq_clear()
+ * with the global interrupt clear here. Otherwise, the CPU
+ * may reorder the two writes and clear the global interrupt
+ * before the local interrupt, causing the global interrupt
+ * to be retriggered by the local interrupt still being high.
+ *
+ * Similarly, because the global registers are in a different
+ * region than the local registers, we need to ensure any register
+ * writes to enable the monitor after this call are ordered with the
+ * clearing here so that local writes don't happen before the
+ * interrupt is cleared.
+ */
+ writel(BWMON_IRQ_ENABLE_MASK, bwmon->base + BWMON_IRQ_CLEAR);
+ writel(BIT(0), bwmon->base + BWMON_GLOBAL_IRQ_CLEAR);
+}
+
+static void bwmon_disable(struct icc_bwmon *bwmon)
+{
+ /* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */
+ writel(0x0, bwmon->base + BWMON_GLOBAL_IRQ_ENABLE);
+ writel(0x0, bwmon->base + BWMON_IRQ_ENABLE);
+
+ /*
+ * Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious
+ * IRQ.
+ */
+ writel(0x0, bwmon->base + BWMON_ENABLE);
+}
+
+static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable)
+{
+ /* Enable interrupts */
+ writel(BWMON_GLOBAL_IRQ_ENABLE_ENABLE,
+ bwmon->base + BWMON_GLOBAL_IRQ_ENABLE);
+ writel(irq_enable, bwmon->base + BWMON_IRQ_ENABLE);
+
+ /* Enable bwmon */
+ writel(BWMON_ENABLE_ENABLE, bwmon->base + BWMON_ENABLE);
+}
+
+static unsigned int bwmon_kbps_to_count(unsigned int kbps)
+{
+ return kbps / BWMON_COUNT_UNIT_KB;
+}
+
+static void bwmon_set_threshold(struct icc_bwmon *bwmon, unsigned int reg,
+ unsigned int kbps)
+{
+ unsigned int thres;
+
+ thres = mult_frac(bwmon_kbps_to_count(kbps), bwmon->sample_ms,
+ MSEC_PER_SEC);
+ writel_relaxed(thres, bwmon->base + reg);
+}
+
+static void bwmon_start(struct icc_bwmon *bwmon,
+ const struct icc_bwmon_data *data)
+{
+ unsigned int thres_count;
+ int window;
+
+ bwmon_clear_counters(bwmon);
+
+ window = mult_frac(bwmon->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
+ /* Maximum sampling window: 0xfffff */
+ writel_relaxed(window, bwmon->base + BWMON_SAMPLE_WINDOW);
+
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH,
+ data->default_highbw_kbps);
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED,
+ data->default_medbw_kbps);
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_LOW,
+ data->default_lowbw_kbps);
+
+ thres_count = data->zone3_thres_count << BWMON_THRESHOLD_COUNT_ZONE3_SHIFT |
+ BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT << BWMON_THRESHOLD_COUNT_ZONE2_SHIFT |
+ data->zone1_thres_count << BWMON_THRESHOLD_COUNT_ZONE1_SHIFT |
+ BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT;
+ writel_relaxed(thres_count, bwmon->base + BWMON_THRESHOLD_COUNT);
+ writel_relaxed(BWMON_ZONE_ACTIONS_DEFAULT,
+ bwmon->base + BWMON_ZONE_ACTIONS);
+ /* Write barriers in bwmon_clear_irq() */
+
+ bwmon_clear_irq(bwmon);
+ bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK);
+}
+
+static irqreturn_t bwmon_intr(int irq, void *dev_id)
+{
+ struct icc_bwmon *bwmon = dev_id;
+ unsigned int status, max;
+ int zone;
+
+ status = readl(bwmon->base + BWMON_IRQ_STATUS);
+ status &= BWMON_IRQ_ENABLE_MASK;
+ if (!status) {
+ /*
+ * Only zone 1 and zone 3 interrupts are enabled but zone 2
+ * threshold could be hit and trigger interrupt even if not
+ * enabled.
+ * Such spurious interrupt might come with valuable max count or
+ * not, so solution would be to always check all
+ * BWMON_ZONE_MAX() registers to find the highest value.
+ * Such case is currently ignored.
+ */
+ return IRQ_NONE;
+ }
+
+ bwmon_disable(bwmon);
+
+ zone = get_bitmask_order(status >> BWMON_IRQ_STATUS_ZONE_SHIFT) - 1;
+ /*
+ * Zone max bytes count register returns count units within sampling
+ * window. Downstream kernel for BWMONv4 (called BWMON type 2 in
+ * downstream) always increments the max bytes count by one.
+ */
+ max = readl(bwmon->base + BWMON_ZONE_MAX(zone)) + 1;
+ max *= BWMON_COUNT_UNIT_KB;
+ bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->sample_ms);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
+{
+ struct icc_bwmon *bwmon = dev_id;
+ unsigned int irq_enable = 0;
+ struct dev_pm_opp *opp, *target_opp;
+ unsigned int bw_kbps, up_kbps, down_kbps;
+
+ bw_kbps = bwmon->target_kbps;
+
+ target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE)
+ target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+
+ bwmon->target_kbps = bw_kbps;
+
+ bw_kbps--;
+ opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
+ down_kbps = bwmon->target_kbps;
+ else
+ down_kbps = bw_kbps;
+
+ up_kbps = bwmon->target_kbps + 1;
+
+ if (bwmon->target_kbps >= bwmon->max_bw_kbps)
+ irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT);
+ else if (bwmon->target_kbps <= bwmon->min_bw_kbps)
+ irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT);
+ else
+ irq_enable = BWMON_IRQ_ENABLE_MASK;
+
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, up_kbps);
+ bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, down_kbps);
+ /* Write barriers in bwmon_clear_counters() */
+ bwmon_clear_counters(bwmon);
+ bwmon_clear_irq(bwmon);
+ bwmon_enable(bwmon, irq_enable);
+
+ if (bwmon->target_kbps == bwmon->current_kbps)
+ goto out;
+
+ dev_pm_opp_set_opp(bwmon->dev, target_opp);
+ bwmon->current_kbps = bwmon->target_kbps;
+
+out:
+ dev_pm_opp_put(target_opp);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
+
+ return IRQ_HANDLED;
+}
+
+static int bwmon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
+ struct icc_bwmon *bwmon;
+ const struct icc_bwmon_data *data;
+ int ret;
+
+ bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL);
+ if (!bwmon)
+ return -ENOMEM;
+
+ data = of_device_get_match_data(dev);
+
+ bwmon->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bwmon->base)) {
+ dev_err(dev, "failed to map bwmon registers\n");
+ return PTR_ERR(bwmon->base);
+ }
+
+ bwmon->irq = platform_get_irq(pdev, 0);
+ if (bwmon->irq < 0)
+ return bwmon->irq;
+
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add OPP table\n");
+
+ bwmon->max_bw_kbps = UINT_MAX;
+ opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n");
+
+ bwmon->min_bw_kbps = 0;
+ opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n");
+
+ bwmon->sample_ms = data->sample_ms;
+ bwmon->default_lowbw_kbps = data->default_lowbw_kbps;
+ bwmon->dev = dev;
+
+ bwmon_disable(bwmon);
+ ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr,
+ bwmon_intr_thread,
+ IRQF_ONESHOT, dev_name(dev), bwmon);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ\n");
+
+ platform_set_drvdata(pdev, bwmon);
+ bwmon_start(bwmon, data);
+
+ return 0;
+}
+
+static int bwmon_remove(struct platform_device *pdev)
+{
+ struct icc_bwmon *bwmon = platform_get_drvdata(pdev);
+
+ bwmon_disable(bwmon);
+
+ return 0;
+}
+
+/* BWMON v4 */
+static const struct icc_bwmon_data msm8998_bwmon_data = {
+ .sample_ms = 4,
+ .default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */
+ .default_medbw_kbps = 512 * 1024, /* 512 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+};
+
+static const struct of_device_id bwmon_of_match[] = {
+ { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bwmon_of_match);
+
+static struct platform_driver bwmon_driver = {
+ .probe = bwmon_probe,
+ .remove = bwmon_remove,
+ .driver = {
+ .name = "qcom-bwmon",
+ .of_match_table = bwmon_of_match,
+ },
+};
+module_platform_driver(bwmon_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("QCOM BWMON driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 4b143cf7b4ce..38d7296315a2 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -382,7 +382,7 @@ static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
* llcc_slice_getd - get llcc slice descriptor
* @uid: usecase_id for the client
*
- * A pointer to llcc slice descriptor will be returned on success and
+ * A pointer to llcc slice descriptor will be returned on success
* and error pointer is returned on failure
*/
struct llcc_slice_desc *llcc_slice_getd(u32 uid)
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 366db493579b..3f11554df2f3 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -108,6 +108,8 @@ EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
* qcom_mdt_read_metadata() - read header and metadata from mdt or mbn
* @fw: firmware of mdt header or mbn
* @data_len: length of the read metadata blob
+ * @fw_name: name of the firmware, for construction of segment file names
+ * @dev: device handle to associate resources with
*
* The mechanism that performs the authentication of the loading firmware
* expects an ELF header directly followed by the segment of hashes, with no
@@ -192,7 +194,7 @@ EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata);
* qcom_mdt_pas_init() - initialize PAS region for firmware loading
* @dev: device handle to associate resources with
* @fw: firmware object for the mdt file
- * @firmware: name of the firmware, for construction of segment file names
+ * @fw_name: name of the firmware, for construction of segment file names
* @pas_id: PAS identifier
* @mem_phys: physical address of allocated memory region
* @ctx: PAS metadata context, to be released by caller
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index 97fd24c178f8..c92d26b73e6f 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -194,14 +194,17 @@ struct ocmem *of_get_ocmem(struct device *dev)
devnode = of_parse_phandle(dev->of_node, "sram", 0);
if (!devnode || !devnode->parent) {
dev_err(dev, "Cannot look up sram phandle\n");
+ of_node_put(devnode);
return ERR_PTR(-ENODEV);
}
pdev = of_find_device_by_node(devnode->parent);
if (!pdev) {
dev_err(dev, "Cannot find device node %s\n", devnode->name);
+ of_node_put(devnode);
return ERR_PTR(-EPROBE_DEFER);
}
+ of_node_put(devnode);
ocmem = platform_get_drvdata(pdev);
if (!ocmem) {
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 28a8c0dda66c..a0ceeede450f 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/slab.h>
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index a59bb34e5eba..18c856056475 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -399,8 +399,10 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
continue;
ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
goto unroll;
+ }
}
if (!count)
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 05fff8691ee3..092f6ab09acf 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -23,8 +23,8 @@
/**
* struct rpmhpd - top level RPMh power domain resource data structure
* @dev: rpmh power domain controller device
- * @pd: generic_pm_domain corrresponding to the power domain
- * @parent: generic_pm_domain corrresponding to the parent's power domain
+ * @pd: generic_pm_domain corresponding to the power domain
+ * @parent: generic_pm_domain corresponding to the parent's power domain
* @peer: A peer power domain in case Active only Voting is
* supported
* @active_only: True if it represents an Active only peer
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 3b5b91621532..5803038c744e 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -453,6 +453,7 @@ static const struct rpmpd_desc qcm2290_desc = {
static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
{ .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc },
+ { .compatible = "qcom,msm8909-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc },
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 30dda1af63c8..413f9f4ae9cd 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -234,6 +234,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8084" },
{ .compatible = "qcom,rpm-ipq6018" },
{ .compatible = "qcom,rpm-msm8226" },
+ { .compatible = "qcom,rpm-msm8909" },
{ .compatible = "qcom,rpm-msm8916" },
{ .compatible = "qcom,rpm-msm8936" },
{ .compatible = "qcom,rpm-msm8953" },
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 59dbf4b61e6c..d9c28a8a7cbf 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -119,6 +119,9 @@ struct smp2p_entry {
* @out: pointer to the outbound smem item
* @smem_items: ids of the two smem items
* @valid_entries: already scanned inbound entries
+ * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled
+ * @ssr_ack: current cached state of the local ack bit
+ * @negotiation_done: whether negotiating finished
* @local_pid: processor id of the inbound edge
* @remote_pid: processor id of the outbound edge
* @ipc_regmap: regmap for the outbound ipc
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index cee579a267a6..4554fb8655d3 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -328,10 +328,12 @@ static const struct soc_id soc_id[] = {
{ 455, "QRB5165" },
{ 457, "SM8450" },
{ 459, "SM7225" },
- { 460, "SA8540P" },
+ { 460, "SA8295P" },
+ { 461, "SA8540P" },
{ 480, "SM8450" },
{ 482, "SM8450" },
{ 487, "SC7280" },
+ { 495, "SC7180P" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index f831420b7fd4..484b42b7454e 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -74,6 +74,18 @@ static const u16 spm_reg_offset_v3_0[SPM_REG_NR] = {
[SPM_REG_SEQ_ENTRY] = 0x400,
};
+/* SPM register data for 8909 */
+static const struct spm_reg_data spm_reg_8909_cpu = {
+ .reg_offset = spm_reg_offset_v3_0,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x76, 0x76, 0x0B, 0x94, 0x5B, 0x80,
+ 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
/* SPM register data for 8916 */
static const struct spm_reg_data spm_reg_8916_cpu = {
.reg_offset = spm_reg_offset_v3_0,
@@ -195,6 +207,8 @@ static const struct of_device_id spm_match_table[] = {
.data = &spm_reg_660_silver_l2 },
{ .compatible = "qcom,msm8226-saw2-v2.1-cpu",
.data = &spm_reg_8226_cpu },
+ { .compatible = "qcom,msm8909-saw2-v3.0-cpu",
+ .data = &spm_reg_8909_cpu },
{ .compatible = "qcom,msm8916-saw2-v3.0-cpu",
.data = &spm_reg_8916_cpu },
{ .compatible = "qcom,msm8974-saw2-v2.1-cpu",
diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c
index fdfc857df334..04f1bc322ae7 100644
--- a/drivers/soc/renesas/r8a779a0-sysc.c
+++ b/drivers/soc/renesas/r8a779a0-sysc.c
@@ -57,11 +57,11 @@ static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = {
{ "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
{ "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR },
{ "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR },
- { "a2dp1", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR },
- { "a2cv2", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR },
- { "a2cv3", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR },
- { "a2cv5", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR },
- { "a2cv7", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
+ { "a2dp1", R8A779A0_PD_A2DP1, R8A779A0_PD_A3IR },
+ { "a2cv2", R8A779A0_PD_A2CV2, R8A779A0_PD_A3IR },
+ { "a2cv3", R8A779A0_PD_A2CV3, R8A779A0_PD_A3IR },
+ { "a2cv5", R8A779A0_PD_A2CV5, R8A779A0_PD_A3IR },
+ { "a2cv7", R8A779A0_PD_A2CV7, R8A779A0_PD_A3IR },
{ "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR },
{ "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 },
{ "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 },
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h
index fe2d98254754..388cfa8f8f9f 100644
--- a/drivers/soc/renesas/rcar-gen4-sysc.h
+++ b/drivers/soc/renesas/rcar-gen4-sysc.h
@@ -25,8 +25,8 @@
struct rcar_gen4_sysc_area {
const char *name;
u8 pdr; /* PDRn */
- int parent; /* -1 if none */
- unsigned int flags; /* See PD_* */
+ s8 parent; /* -1 if none */
+ u8 flags; /* See PD_* */
};
/*
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 8d861c1cfdf7..266c599a0a9b 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -31,8 +31,8 @@ struct rcar_sysc_area {
u16 chan_offs; /* Offset of PWRSR register for this area */
u8 chan_bit; /* Bit in PWR* (except for PWRUP in PWRSR) */
u8 isr_bit; /* Bit in SYSCI*R */
- int parent; /* -1 if none */
- unsigned int flags; /* See PD_* */
+ s8 parent; /* -1 if none */
+ u8 flags; /* See PD_* */
};
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
index 1fef0e711056..8aecbc9b1976 100644
--- a/drivers/soc/sunxi/Kconfig
+++ b/drivers/soc/sunxi/Kconfig
@@ -6,6 +6,7 @@
config SUNXI_MBUS
bool
default ARCH_SUNXI
+ depends on ARM || ARM64
help
Say y to enable the fixups needed to support the Allwinner
MBUS DMA quirks.
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index 32c346b72635..dff6d5ef4e46 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -107,30 +107,47 @@ int devm_tegra_core_dev_init_opp_table(struct device *dev,
{
u32 hw_version;
int err;
-
- err = devm_pm_opp_set_clkname(dev, NULL);
- if (err) {
- dev_err(dev, "failed to set OPP clk: %d\n", err);
- return err;
- }
-
- /* Tegra114+ doesn't support OPP yet */
- if (!of_machine_is_compatible("nvidia,tegra20") &&
- !of_machine_is_compatible("nvidia,tegra30"))
- return -ENODEV;
-
- if (of_machine_is_compatible("nvidia,tegra20"))
+ /*
+ * The clk's connection id to set is NULL and this is a NULL terminated
+ * array, hence two NULL entries.
+ */
+ const char *clk_names[] = { NULL, NULL };
+ struct dev_pm_opp_config config = {
+ /*
+ * For some devices we don't have any OPP table in the DT, and
+ * in order to use the same code path for all the devices, we
+ * create a dummy OPP table for them via this. The dummy OPP
+ * table is only capable of doing clk_set_rate() on invocation
+ * of dev_pm_opp_set_rate() and doesn't provide any other
+ * functionality.
+ */
+ .clk_names = clk_names,
+ };
+
+ if (of_machine_is_compatible("nvidia,tegra20")) {
hw_version = BIT(tegra_sku_info.soc_process_id);
- else
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ } else if (of_machine_is_compatible("nvidia,tegra30")) {
hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ }
- err = devm_pm_opp_set_supported_hw(dev, &hw_version, 1);
+ err = devm_pm_opp_set_config(dev, &config);
if (err) {
- dev_err(dev, "failed to set OPP supported HW: %d\n", err);
+ dev_err(dev, "failed to set OPP config: %d\n", err);
return err;
}
/*
+ * Tegra114+ doesn't support OPP yet, return early for non tegra20/30
+ * case.
+ */
+ if (!config.supported_hw)
+ return -ENODEV;
+
+ /*
* Older device-trees have an empty OPP table, we will get
* -ENODEV from devm_pm_opp_of_add_table() in this case.
*/
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 5611d14d3ba2..6a4b8f7e7948 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1384,7 +1384,7 @@ tegra_pmc_core_pd_opp_to_performance_state(struct generic_pm_domain *genpd,
static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
{
struct generic_pm_domain *genpd;
- const char *rname = "core";
+ const char *rname[] = { "core", NULL};
int err;
genpd = devm_kzalloc(pmc->dev, sizeof(*genpd), GFP_KERNEL);
@@ -1395,7 +1395,7 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
- err = devm_pm_opp_set_regulators(pmc->dev, &rname, 1);
+ err = devm_pm_opp_set_regulators(pmc->dev, rname);
if (err)
return dev_err_probe(pmc->dev, err,
"failed to set core OPP regulator\n");
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index d756591de973..84afebd355be 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Texas Instruments Incorporated
* Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
* Sandeep Nair <sandeep_n@ti.com>
* Cyril Chemparathy <cyril@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index 0e4ba0f89533..6882c86b3ce5 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -338,6 +338,7 @@ static const struct of_device_id pruss_of_match[] = {
{ .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
{ .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
{ .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am625-pruss", .data = &am65x_j721e_pruss_data, },
{},
};
MODULE_DEVICE_TABLE(of, pruss_of_match);
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 0076d467ff6b..343c58ed5896 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -688,7 +688,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
&m3_ipc->sd_fw_name);
if (ret) {
dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
- };
+ }
/*
* Wait for firmware loading completion in a thread so we
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
index 5dcb7665fe22..2de082765bef 100644
--- a/drivers/soc/xilinx/xlnx_event_manager.c
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -647,8 +647,7 @@ static int xlnx_event_manager_probe(struct platform_device *pdev)
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
- ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
- 0, NULL);
+ ret = zynqmp_pm_register_sgi(sgi_num, 0);
if (ret) {
dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
xlnx_event_cleanup_sgi(pdev);
@@ -681,7 +680,7 @@ static int xlnx_event_manager_remove(struct platform_device *pdev)
kfree(eve_data);
}
- ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, 0, 1, NULL);
+ ret = zynqmp_pm_register_sgi(0, 1);
if (ret)
dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index a2bfb0434a67..8d4000664fa3 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
#include "bus.h"
#include "sysfs_local.h"
@@ -842,15 +843,21 @@ static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
enum sdw_clk_stop_mode mode,
enum sdw_clk_stop_type type)
{
- int ret;
+ int ret = 0;
- if (slave->ops && slave->ops->clk_stop) {
- ret = slave->ops->clk_stop(slave, mode, type);
- if (ret < 0)
- return ret;
+ mutex_lock(&slave->sdw_dev_lock);
+
+ if (slave->probed) {
+ struct device *dev = &slave->dev;
+ struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+
+ if (drv->ops && drv->ops->clk_stop)
+ ret = drv->ops->clk_stop(slave, mode, type);
}
- return 0;
+ mutex_unlock(&slave->sdw_dev_lock);
+
+ return ret;
}
static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
@@ -1611,14 +1618,24 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
}
/* Update the Slave driver */
- if (slave_notify && slave->ops &&
- slave->ops->interrupt_callback) {
- slave_intr.sdca_cascade = sdca_cascade;
- slave_intr.control_port = clear;
- memcpy(slave_intr.port, &port_status,
- sizeof(slave_intr.port));
-
- slave->ops->interrupt_callback(slave, &slave_intr);
+ if (slave_notify) {
+ mutex_lock(&slave->sdw_dev_lock);
+
+ if (slave->probed) {
+ struct device *dev = &slave->dev;
+ struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+
+ if (drv->ops && drv->ops->interrupt_callback) {
+ slave_intr.sdca_cascade = sdca_cascade;
+ slave_intr.control_port = clear;
+ memcpy(slave_intr.port, &port_status,
+ sizeof(slave_intr.port));
+
+ drv->ops->interrupt_callback(slave, &slave_intr);
+ }
+ }
+
+ mutex_unlock(&slave->sdw_dev_lock);
}
/* Ack interrupt */
@@ -1692,29 +1709,21 @@ io_err:
static int sdw_update_slave_status(struct sdw_slave *slave,
enum sdw_slave_status status)
{
- unsigned long time;
+ int ret = 0;
- if (!slave->probed) {
- /*
- * the slave status update is typically handled in an
- * interrupt thread, which can race with the driver
- * probe, e.g. when a module needs to be loaded.
- *
- * make sure the probe is complete before updating
- * status.
- */
- time = wait_for_completion_timeout(&slave->probe_complete,
- msecs_to_jiffies(DEFAULT_PROBE_TIMEOUT));
- if (!time) {
- dev_err(&slave->dev, "Probe not complete, timed out\n");
- return -ETIMEDOUT;
- }
+ mutex_lock(&slave->sdw_dev_lock);
+
+ if (slave->probed) {
+ struct device *dev = &slave->dev;
+ struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+
+ if (drv->ops && drv->ops->update_status)
+ ret = drv->ops->update_status(slave, status);
}
- if (!slave->ops || !slave->ops->update_status)
- return 0;
+ mutex_unlock(&slave->sdw_dev_lock);
- return slave->ops->update_status(slave, status);
+ return ret;
}
/**
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 893296f3fe39..04b3529f8929 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -98,8 +98,6 @@ static int sdw_drv_probe(struct device *dev)
if (!id)
return -ENODEV;
- slave->ops = drv->ops;
-
/*
* attach to power domain but don't turn on (last arg)
*/
@@ -107,19 +105,23 @@ static int sdw_drv_probe(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&slave->sdw_dev_lock);
+
ret = drv->probe(slave, id);
if (ret) {
name = drv->name;
if (!name)
name = drv->driver.name;
+ mutex_unlock(&slave->sdw_dev_lock);
+
dev_err(dev, "Probe of %s failed: %d\n", name, ret);
dev_pm_domain_detach(dev, false);
return ret;
}
/* device is probed so let's read the properties now */
- if (slave->ops && slave->ops->read_prop)
- slave->ops->read_prop(slave);
+ if (drv->ops && drv->ops->read_prop)
+ drv->ops->read_prop(slave);
/* init the sysfs as we have properties now */
ret = sdw_slave_sysfs_init(slave);
@@ -139,7 +141,19 @@ static int sdw_drv_probe(struct device *dev)
slave->prop.clk_stop_timeout);
slave->probed = true;
- complete(&slave->probe_complete);
+
+ /*
+ * if the probe happened after the bus was started, notify the codec driver
+ * of the current hardware status to e.g. start the initialization.
+ * Errors are only logged as warnings to avoid failing the probe.
+ */
+ if (drv->ops && drv->ops->update_status) {
+ ret = drv->ops->update_status(slave, slave->status);
+ if (ret < 0)
+ dev_warn(dev, "%s: update_status failed with status %d\n", __func__, ret);
+ }
+
+ mutex_unlock(&slave->sdw_dev_lock);
dev_dbg(dev, "probe complete\n");
@@ -152,9 +166,15 @@ static int sdw_drv_remove(struct device *dev)
struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
int ret = 0;
+ mutex_lock(&slave->sdw_dev_lock);
+
+ slave->probed = false;
+
if (drv->remove)
ret = drv->remove(slave);
+ mutex_unlock(&slave->sdw_dev_lock);
+
dev_pm_domain_detach(dev, false);
return ret;
@@ -193,12 +213,8 @@ int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
drv->driver.owner = owner;
drv->driver.probe = sdw_drv_probe;
-
- if (drv->remove)
- drv->driver.remove = sdw_drv_remove;
-
- if (drv->shutdown)
- drv->driver.shutdown = sdw_drv_shutdown;
+ drv->driver.remove = sdw_drv_remove;
+ drv->driver.shutdown = sdw_drv_shutdown;
return driver_register(&drv->driver);
}
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 505c5ef061e3..89d1d0d021fc 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -1004,9 +1004,18 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_cdns_dma_data *dma;
int ret = 0;
+ /*
+ * The .trigger callback is used to send required IPC to audio
+ * firmware. The .free_stream callback will still be called
+ * by intel_free_stream() in the TRIGGER_SUSPEND case.
+ */
+ if (res->ops && res->ops->trigger)
+ res->ops->trigger(dai, cmd, substream->stream);
+
dma = snd_soc_dai_get_dma_data(dai, substream);
if (!dma) {
dev_err(dai->dev, "failed to get dma data in %s\n",
@@ -1043,6 +1052,23 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
return ret;
}
+static int intel_component_probe(struct snd_soc_component *component)
+{
+ int ret;
+
+ /*
+ * make sure the device is pm_runtime_active before initiating
+ * bus transactions during the card registration.
+ * We use pm_runtime_resume() here, without taking a reference
+ * and releasing it immediately.
+ */
+ ret = pm_runtime_resume(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ return 0;
+}
+
static int intel_component_dais_suspend(struct snd_soc_component *component)
{
struct snd_soc_dai *dai;
@@ -1097,8 +1123,10 @@ static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
};
static const struct snd_soc_component_driver dai_component = {
- .name = "soundwire",
- .suspend = intel_component_dais_suspend
+ .name = "soundwire",
+ .probe = intel_component_probe,
+ .suspend = intel_component_dais_suspend,
+ .legacy_dai_naming = 1,
};
static int intel_create_dai(struct sdw_cdns *cdns,
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 22b706350ead..9df970eeca45 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -13,6 +13,7 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/pm_wakeirq.h>
#include <linux/slimbus.h>
@@ -142,6 +143,7 @@ struct qcom_swrm_ctrl {
struct device *dev;
struct regmap *regmap;
void __iomem *mmio;
+ struct reset_control *audio_cgcr;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
@@ -179,6 +181,7 @@ struct qcom_swrm_ctrl {
struct qcom_swrm_data {
u32 default_cols;
u32 default_rows;
+ bool sw_clk_gate_required;
};
static const struct qcom_swrm_data swrm_v1_3_data = {
@@ -191,6 +194,12 @@ static const struct qcom_swrm_data swrm_v1_5_data = {
.default_cols = 16,
};
+static const struct qcom_swrm_data swrm_v1_6_data = {
+ .default_rows = 50,
+ .default_cols = 16,
+ .sw_clk_gate_required = true,
+};
+
#define to_qcom_sdw(b) container_of(b, struct qcom_swrm_ctrl, bus)
static int qcom_swrm_ahb_reg_read(struct qcom_swrm_ctrl *ctrl, int reg,
@@ -471,6 +480,10 @@ static int qcom_swrm_enumerate(struct sdw_bus *bus)
char *buf1 = (char *)&val1, *buf2 = (char *)&val2;
for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+ /* do not continue if the status is Not Present */
+ if (!ctrl->status[i])
+ continue;
+
/*SCP_Devid5 - Devid 4*/
ctrl->reg_read(ctrl, SWRM_ENUMERATOR_SLAVE_DEV_ID_1(i), &val1);
@@ -656,6 +669,8 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
val = FIELD_PREP(SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK, ctrl->rows_index);
val |= FIELD_PREP(SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK, ctrl->cols_index);
+ reset_control_reset(ctrl->audio_cgcr);
+
ctrl->reg_write(ctrl, SWRM_MCP_FRAME_CTRL_BANK_ADDR(0), val);
/* Enable Auto enumeration */
@@ -1307,6 +1322,15 @@ static int qcom_swrm_probe(struct platform_device *pdev)
return PTR_ERR(ctrl->mmio);
}
+ if (data->sw_clk_gate_required) {
+ ctrl->audio_cgcr = devm_reset_control_get_exclusive(dev, "swr_audio_cgcr");
+ if (IS_ERR_OR_NULL(ctrl->audio_cgcr)) {
+ dev_err(dev, "Failed to get cgcr reset ctrl required for SW gating\n");
+ ret = PTR_ERR(ctrl->audio_cgcr);
+ goto err_init;
+ }
+ }
+
ctrl->irq = of_irq_get(dev->of_node, 0);
if (ctrl->irq < 0) {
ret = ctrl->irq;
@@ -1332,6 +1356,10 @@ static int qcom_swrm_probe(struct platform_device *pdev)
ctrl->bus.compute_params = &qcom_swrm_compute_params;
ctrl->bus.clk_stop_timeout = 300;
+ ctrl->audio_cgcr = devm_reset_control_get_exclusive(dev, "swr_audio_cgcr");
+ if (IS_ERR(ctrl->audio_cgcr))
+ dev_err(dev, "Failed to get audio_cgcr reset required for soundwire-v1.6.0\n");
+
ret = qcom_swrm_get_port_config(ctrl);
if (ret)
goto err_clk;
@@ -1485,6 +1513,8 @@ static int __maybe_unused swrm_runtime_resume(struct device *dev)
qcom_swrm_get_device_status(ctrl);
sdw_handle_slave_status(&ctrl->bus, ctrl->status);
} else {
+ reset_control_reset(ctrl->audio_cgcr);
+
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START);
ctrl->reg_write(ctrl, SWRM_INTERRUPT_CLEAR,
SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET);
@@ -1548,7 +1578,7 @@ static const struct dev_pm_ops swrm_dev_pm_ops = {
static const struct of_device_id qcom_swrm_of_match[] = {
{ .compatible = "qcom,soundwire-v1.3.0", .data = &swrm_v1_3_data },
{ .compatible = "qcom,soundwire-v1.5.1", .data = &swrm_v1_5_data },
- { .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_5_data },
+ { .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_6_data },
{/* sentinel */},
};
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 669d7573320b..c1c1a2ac293a 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -12,6 +12,7 @@ static void sdw_slave_release(struct device *dev)
{
struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ mutex_destroy(&slave->sdw_dev_lock);
kfree(slave);
}
@@ -58,9 +59,9 @@ int sdw_slave_add(struct sdw_bus *bus,
init_completion(&slave->enumeration_complete);
init_completion(&slave->initialization_complete);
slave->dev_num = 0;
- init_completion(&slave->probe_complete);
slave->probed = false;
slave->first_interrupt_done = false;
+ mutex_init(&slave->sdw_dev_lock);
for (i = 0; i < SDW_MAX_PORTS; i++)
init_completion(&slave->port_ready[i]);
@@ -127,6 +128,71 @@ static bool find_slave(struct sdw_bus *bus,
return true;
}
+struct sdw_acpi_child_walk_data {
+ struct sdw_bus *bus;
+ struct acpi_device *adev;
+ struct sdw_slave_id id;
+ bool ignore_unique_id;
+};
+
+static int sdw_acpi_check_duplicate(struct acpi_device *adev, void *data)
+{
+ struct sdw_acpi_child_walk_data *cwd = data;
+ struct sdw_bus *bus = cwd->bus;
+ struct sdw_slave_id id;
+
+ if (adev == cwd->adev)
+ return 0;
+
+ if (!find_slave(bus, adev, &id))
+ return 0;
+
+ if (cwd->id.sdw_version != id.sdw_version || cwd->id.mfg_id != id.mfg_id ||
+ cwd->id.part_id != id.part_id || cwd->id.class_id != id.class_id)
+ return 0;
+
+ if (cwd->id.unique_id != id.unique_id) {
+ dev_dbg(bus->dev,
+ "Valid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n",
+ cwd->id.unique_id, id.unique_id, cwd->id.mfg_id,
+ cwd->id.part_id);
+ cwd->ignore_unique_id = false;
+ return 0;
+ }
+
+ dev_err(bus->dev,
+ "Invalid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n",
+ cwd->id.unique_id, id.unique_id, cwd->id.mfg_id, cwd->id.part_id);
+ return -ENODEV;
+}
+
+static int sdw_acpi_find_one(struct acpi_device *adev, void *data)
+{
+ struct sdw_bus *bus = data;
+ struct sdw_acpi_child_walk_data cwd = {
+ .bus = bus,
+ .adev = adev,
+ .ignore_unique_id = true,
+ };
+ int ret;
+
+ if (!find_slave(bus, adev, &cwd.id))
+ return 0;
+
+ /* Brute-force O(N^2) search for duplicates. */
+ ret = acpi_dev_for_each_child(ACPI_COMPANION(bus->dev),
+ sdw_acpi_check_duplicate, &cwd);
+ if (ret)
+ return ret;
+
+ if (cwd.ignore_unique_id)
+ cwd.id.unique_id = SDW_IGNORED_UNIQUE_ID;
+
+ /* Ignore errors and continue. */
+ sdw_slave_add(bus, &cwd.id, acpi_fwnode_handle(adev));
+ return 0;
+}
+
/*
* sdw_acpi_find_slaves() - Find Slave devices in Master ACPI node
* @bus: SDW bus instance
@@ -135,8 +201,7 @@ static bool find_slave(struct sdw_bus *bus,
*/
int sdw_acpi_find_slaves(struct sdw_bus *bus)
{
- struct acpi_device *adev, *parent;
- struct acpi_device *adev2, *parent2;
+ struct acpi_device *parent;
parent = ACPI_COMPANION(bus->dev);
if (!parent) {
@@ -144,54 +209,7 @@ int sdw_acpi_find_slaves(struct sdw_bus *bus)
return -ENODEV;
}
- list_for_each_entry(adev, &parent->children, node) {
- struct sdw_slave_id id;
- struct sdw_slave_id id2;
- bool ignore_unique_id = true;
-
- if (!find_slave(bus, adev, &id))
- continue;
-
- /* brute-force O(N^2) search for duplicates */
- parent2 = parent;
- list_for_each_entry(adev2, &parent2->children, node) {
-
- if (adev == adev2)
- continue;
-
- if (!find_slave(bus, adev2, &id2))
- continue;
-
- if (id.sdw_version != id2.sdw_version ||
- id.mfg_id != id2.mfg_id ||
- id.part_id != id2.part_id ||
- id.class_id != id2.class_id)
- continue;
-
- if (id.unique_id != id2.unique_id) {
- dev_dbg(bus->dev,
- "Valid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n",
- id.unique_id, id2.unique_id, id.mfg_id, id.part_id);
- ignore_unique_id = false;
- } else {
- dev_err(bus->dev,
- "Invalid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n",
- id.unique_id, id2.unique_id, id.mfg_id, id.part_id);
- return -ENODEV;
- }
- }
-
- if (ignore_unique_id)
- id.unique_id = SDW_IGNORED_UNIQUE_ID;
-
- /*
- * don't error check for sdw_slave_add as we want to continue
- * adding Slaves
- */
- sdw_slave_add(bus, &id, acpi_fwnode_handle(adev));
- }
-
- return 0;
+ return acpi_dev_for_each_child(parent, sdw_acpi_find_one, bus);
}
#endif
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index d34150559142..bd502368339e 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
#include <sound/soc.h>
#include "bus.h"
@@ -401,20 +402,26 @@ static int sdw_do_port_prep(struct sdw_slave_runtime *s_rt,
struct sdw_prepare_ch prep_ch,
enum sdw_port_prep_ops cmd)
{
- const struct sdw_slave_ops *ops = s_rt->slave->ops;
- int ret;
+ int ret = 0;
+ struct sdw_slave *slave = s_rt->slave;
- if (ops->port_prep) {
- ret = ops->port_prep(s_rt->slave, &prep_ch, cmd);
- if (ret < 0) {
- dev_err(&s_rt->slave->dev,
- "Slave Port Prep cmd %d failed: %d\n",
- cmd, ret);
- return ret;
+ mutex_lock(&slave->sdw_dev_lock);
+
+ if (slave->probed) {
+ struct device *dev = &slave->dev;
+ struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+
+ if (drv->ops && drv->ops->port_prep) {
+ ret = drv->ops->port_prep(slave, &prep_ch, cmd);
+ if (ret < 0)
+ dev_err(dev, "Slave Port Prep cmd %d failed: %d\n",
+ cmd, ret);
}
}
- return 0;
+ mutex_unlock(&slave->sdw_dev_lock);
+
+ return ret;
}
static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
@@ -578,7 +585,7 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
struct sdw_slave_runtime *s_rt;
struct sdw_bus *bus = m_rt->bus;
struct sdw_slave *slave;
- int ret = 0;
+ int ret;
if (bus->ops->set_bus_conf) {
ret = bus->ops->set_bus_conf(bus, &bus->params);
@@ -589,17 +596,27 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
slave = s_rt->slave;
- if (slave->ops->bus_config) {
- ret = slave->ops->bus_config(slave, &bus->params);
- if (ret < 0) {
- dev_err(bus->dev, "Notify Slave: %d failed\n",
- slave->dev_num);
- return ret;
+ mutex_lock(&slave->sdw_dev_lock);
+
+ if (slave->probed) {
+ struct device *dev = &slave->dev;
+ struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+
+ if (drv->ops && drv->ops->bus_config) {
+ ret = drv->ops->bus_config(slave, &bus->params);
+ if (ret < 0) {
+ dev_err(dev, "Notify Slave: %d failed\n",
+ slave->dev_num);
+ mutex_unlock(&slave->sdw_dev_lock);
+ return ret;
+ }
}
}
+
+ mutex_unlock(&slave->sdw_dev_lock);
}
- return ret;
+ return 0;
}
/**
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3b1044ebc400..e32f6a2058ae 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -183,7 +183,7 @@ config SPI_BCM63XX
config SPI_BCM63XX_HSSPI
tristate "Broadcom BCM63XX HS SPI controller driver"
- depends on BCM63XX || BMIPS_GENERIC || ARCH_BCM_63XX || COMPILE_TEST
+ depends on BCM63XX || BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST
help
This enables support for the High Speed SPI controller present on
newer Broadcom BCM63XX SoCs.
@@ -371,6 +371,13 @@ config SPI_FSL_QUADSPI
This controller does not support generic SPI messages. It only
supports the high-level SPI memory interface.
+config SPI_GXP
+ tristate "GXP SPI driver"
+ depends on ARCH_HPE || COMPILE_TEST
+ help
+ This enables support for the driver for GXP bus attached SPI
+ controllers.
+
config SPI_HISI_KUNPENG
tristate "HiSilicon SPI Controller for Kunpeng SoCs"
depends on (ARM64 && ACPI) || COMPILE_TEST
@@ -575,6 +582,15 @@ config SPI_MESON_SPIFC
This enables master mode support for the SPIFC (SPI flash
controller) available in Amlogic Meson SoCs.
+config SPI_MICROCHIP_CORE
+ tristate "Microchip FPGA SPI controllers"
+ depends on SPI_MASTER
+ help
+ This enables the SPI driver for Microchip FPGA SPI controllers.
+ Say Y or M here if you want to use the "hard" controllers on
+ PolarFire SoC.
+ If built as a module, it will be called spi-microchip-core.
+
config SPI_MT65XX
tristate "MediaTek SPI controller"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 0f44eb6083a5..15d2f3835e45 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_GXP) += spi-gxp.o
obj-$(CONFIG_SPI_HISI_KUNPENG) += spi-hisi-kunpeng.o
obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
@@ -71,6 +72,7 @@ obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
+obj-$(CONFIG_SPI_MICROCHIP_CORE) += spi-microchip-core.o
obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 480c0c8c18e4..976a217e356d 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi-mem.h>
/* QSPI register offsets */
@@ -285,7 +286,7 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
/* special case not supported by hardware */
if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
- op->dummy.nbytes == 0)
+ op->dummy.nbytes == 0)
return false;
return true;
@@ -417,9 +418,13 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (op->addr.val + op->data.nbytes > aq->mmap_size)
return -ENOTSUPP;
+ err = pm_runtime_resume_and_get(&aq->pdev->dev);
+ if (err < 0)
+ return err;
+
err = atmel_qspi_set_cfg(aq, op, &offset);
if (err)
- return err;
+ goto pm_runtime_put;
/* Skip to the final steps if there is no data */
if (op->data.nbytes) {
@@ -441,7 +446,7 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
/* Poll INSTRuction End status */
sr = atmel_qspi_read(aq, QSPI_SR);
if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
- return err;
+ goto pm_runtime_put;
/* Wait for INSTRuction End interrupt */
reinit_completion(&aq->cmd_completion);
@@ -452,6 +457,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
err = -ETIMEDOUT;
atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
+pm_runtime_put:
+ pm_runtime_mark_last_busy(&aq->pdev->dev);
+ pm_runtime_put_autosuspend(&aq->pdev->dev);
return err;
}
@@ -472,6 +480,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long src_rate;
u32 scbr;
+ int ret;
if (ctrl->busy)
return -EBUSY;
@@ -488,9 +497,16 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (scbr > 0)
scbr--;
+ ret = pm_runtime_resume_and_get(ctrl->dev.parent);
+ if (ret < 0)
+ return ret;
+
aq->scr = QSPI_SCR_SCBR(scbr);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
+ pm_runtime_mark_last_busy(ctrl->dev.parent);
+ pm_runtime_put_autosuspend(ctrl->dev.parent);
+
return 0;
}
@@ -621,11 +637,24 @@ static int atmel_qspi_probe(struct platform_device *pdev)
if (err)
goto disable_qspick;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
atmel_qspi_init(aq);
err = spi_register_controller(ctrl);
- if (err)
+ if (err) {
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
goto disable_qspick;
+ }
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -641,9 +670,18 @@ static int atmel_qspi_remove(struct platform_device *pdev)
{
struct spi_controller *ctrl = platform_get_drvdata(pdev);
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
spi_unregister_controller(ctrl);
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
clk_disable_unprepare(aq->qspick);
clk_disable_unprepare(aq->pclk);
return 0;
@@ -653,10 +691,19 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
- clk_disable_unprepare(aq->qspick);
- clk_disable_unprepare(aq->pclk);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_force_suspend(dev);
+
+ clk_unprepare(aq->qspick);
+ clk_unprepare(aq->pclk);
return 0;
}
@@ -665,19 +712,54 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
- clk_prepare_enable(aq->pclk);
- clk_prepare_enable(aq->qspick);
+ clk_prepare(aq->pclk);
+ clk_prepare(aq->qspick);
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
atmel_qspi_init(aq);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+
+ clk_disable(aq->qspick);
+ clk_disable(aq->pclk);
+
return 0;
}
-static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
- atmel_qspi_resume);
+static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ ret = clk_enable(aq->pclk);
+ if (ret)
+ return ret;
+
+ return clk_enable(aq->qspick);
+}
+
+static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume)
+ SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend,
+ atmel_qspi_runtime_resume, NULL)
+};
static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
@@ -704,7 +786,7 @@ static struct platform_driver atmel_qspi_driver = {
.driver = {
.name = "atmel_qspi",
.of_match_table = atmel_qspi_dt_ids,
- .pm = &atmel_qspi_pm_ops,
+ .pm = pm_ptr(&atmel_qspi_pm_ops),
},
.probe = atmel_qspi_probe,
.remove = atmel_qspi_remove,
diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c
index ca40923258af..596e181ae136 100644
--- a/drivers/spi/spi-altera-dfl.c
+++ b/drivers/spi/spi-altera-dfl.c
@@ -128,9 +128,9 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
struct spi_master *master;
struct altera_spi *hw;
void __iomem *base;
- int err = -ENODEV;
+ int err;
- master = spi_alloc_master(dev, sizeof(struct altera_spi));
+ master = devm_spi_alloc_master(dev, sizeof(struct altera_spi));
if (!master)
return -ENOMEM;
@@ -159,10 +159,9 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
altera_spi_init_master(master);
err = devm_spi_register_master(dev, master);
- if (err) {
- dev_err(dev, "%s failed to register spi master %d\n", __func__, err);
- goto exit;
- }
+ if (err)
+ return dev_err_probe(dev, err, "%s failed to register spi master\n",
+ __func__);
if (dfl_dev->revision == FME_FEATURE_REV_MAX10_SPI_N5010)
strscpy(board_info.modalias, "m10-n5010", SPI_NAME_SIZE);
@@ -179,9 +178,6 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
}
return 0;
-exit:
- spi_master_put(master);
- return err;
}
static const struct dfl_device_id dfl_spi_altera_ids[] = {
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index cba6a4486c24..08df4f8d0531 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -33,20 +33,30 @@
#define AMD_SPI_RX_COUNT_REG 0x4B
#define AMD_SPI_STATUS_REG 0x4C
+#define AMD_SPI_FIFO_SIZE 70
#define AMD_SPI_MEM_SIZE 200
/* M_CMD OP codes for SPI */
#define AMD_SPI_XFER_TX 1
#define AMD_SPI_XFER_RX 2
+/**
+ * enum amd_spi_versions - SPI controller versions
+ * @AMD_SPI_V1: AMDI0061 hardware version
+ * @AMD_SPI_V2: AMDI0062 hardware version
+ */
enum amd_spi_versions {
- AMD_SPI_V1 = 1, /* AMDI0061 */
- AMD_SPI_V2, /* AMDI0062 */
+ AMD_SPI_V1 = 1,
+ AMD_SPI_V2,
};
+/**
+ * struct amd_spi - SPI driver instance
+ * @io_remap_addr: Start address of the SPI controller registers
+ * @version: SPI controller hardware version
+ */
struct amd_spi {
void __iomem *io_remap_addr;
- unsigned long io_base_addr;
enum amd_spi_versions version;
};
@@ -270,27 +280,29 @@ static int amd_spi_master_transfer(struct spi_master *master,
return 0;
}
+static size_t amd_spi_max_transfer_size(struct spi_device *spi)
+{
+ return AMD_SPI_FIFO_SIZE;
+}
+
static int amd_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_master *master;
struct amd_spi *amd_spi;
- int err = 0;
+ int err;
/* Allocate storage for spi_master and driver private data */
- master = spi_alloc_master(dev, sizeof(struct amd_spi));
- if (!master) {
- dev_err(dev, "Error allocating SPI master\n");
- return -ENOMEM;
- }
+ master = devm_spi_alloc_master(dev, sizeof(struct amd_spi));
+ if (!master)
+ return dev_err_probe(dev, -ENOMEM, "Error allocating SPI master\n");
amd_spi = spi_master_get_devdata(master);
amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(amd_spi->io_remap_addr)) {
- err = PTR_ERR(amd_spi->io_remap_addr);
- dev_err(dev, "error %d ioremap of SPI registers failed\n", err);
- goto err_free_master;
- }
+ if (IS_ERR(amd_spi->io_remap_addr))
+ return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr),
+ "ioremap of SPI registers failed\n");
+
dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
amd_spi->version = (enum amd_spi_versions) device_get_match_data(dev);
@@ -302,20 +314,15 @@ static int amd_spi_probe(struct platform_device *pdev)
master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = amd_spi_master_setup;
master->transfer_one_message = amd_spi_master_transfer;
+ master->max_transfer_size = amd_spi_max_transfer_size;
+ master->max_message_size = amd_spi_max_transfer_size;
/* Register the controller with SPI framework */
err = devm_spi_register_master(dev, master);
- if (err) {
- dev_err(dev, "error %d registering SPI controller\n", err);
- goto err_free_master;
- }
+ if (err)
+ return dev_err_probe(dev, err, "error registering SPI controller\n");
return 0;
-
-err_free_master:
- spi_master_put(master);
-
- return err;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index d8cc4b270644..9df9fc40b783 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -497,7 +497,7 @@ static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
val = *(u32 *)a3700_spi->tx_buf;
- spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, cpu_to_le32(val));
a3700_spi->buf_len -= 4;
a3700_spi->tx_buf += 4;
}
@@ -519,7 +519,7 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
if (a3700_spi->buf_len >= 4) {
-
+ val = le32_to_cpu(val);
memcpy(a3700_spi->rx_buf, &val, 4);
a3700_spi->buf_len -= 4;
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index 496f3e1e9079..3e891bf22470 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -558,6 +558,14 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
u32 ctl_val;
int ret = 0;
+ dev_dbg(aspi->dev,
+ "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n",
+ chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
+ desc->info.offset, desc->info.offset + desc->info.length,
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.nbytes, op->dummy.nbytes);
+
chip->clk_freq = desc->mem->spi->max_speed_hz;
/* Only for reads */
@@ -574,9 +582,11 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
ctl_val |= aspeed_spi_get_io_mode(op) |
op->cmd.opcode << CTRL_COMMAND_SHIFT |
- CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth) |
CTRL_IO_MODE_READ;
+ if (op->dummy.nbytes)
+ ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
+
/* Tune 4BYTE address mode */
if (op->addr.nbytes) {
u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 9e300a932699..c4f22d50dba5 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1631,7 +1631,6 @@ static int atmel_spi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int atmel_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -1653,7 +1652,6 @@ static int atmel_spi_runtime_resume(struct device *dev)
return clk_prepare_enable(as->clk);
}
-#ifdef CONFIG_PM_SLEEP
static int atmel_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -1693,17 +1691,12 @@ static int atmel_spi_resume(struct device *dev)
/* Start the queue running */
return spi_master_resume(master);
}
-#endif
static const struct dev_pm_ops atmel_spi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
- SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
- atmel_spi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
+ RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
+ atmel_spi_runtime_resume, NULL)
};
-#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops)
-#else
-#define ATMEL_SPI_PM_OPS NULL
-#endif
static const struct of_device_id atmel_spi_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-spi" },
@@ -1715,7 +1708,7 @@ MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
- .pm = ATMEL_SPI_PM_OPS,
+ .pm = pm_ptr(&atmel_spi_pm_ops),
.of_match_table = atmel_spi_dt_ids,
},
.probe = atmel_spi_probe,
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 775c0bf2f923..747e03228c48 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -372,6 +372,10 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
struct bcm2835_spi *bs = dev_id;
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ /* Bail out early if interrupts are not enabled */
+ if (!(cs & BCM2835_SPI_CS_INTR))
+ return IRQ_NONE;
+
/*
* An interrupt is signaled either if DONE is set (TX FIFO empty)
* or if RXR is set (RX FIFO >= ¾ full).
@@ -1138,10 +1142,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */
- dmaengine_terminate_sync(ctlr->dma_tx);
- bs->tx_dma_active = false;
- dmaengine_terminate_sync(ctlr->dma_rx);
- bs->rx_dma_active = false;
+ if (ctlr->dma_tx) {
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ bs->tx_dma_active = false;
+ }
+ if (ctlr->dma_rx) {
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ bs->rx_dma_active = false;
+ }
bcm2835_spi_undo_prologue(bs);
/* and reset */
@@ -1365,8 +1373,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
- err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
- dev_name(&pdev->dev), bs);
+ err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), bs);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_dma_release;
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 2b9fc8449a62..72b1a5a2298c 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1578,8 +1578,7 @@ static int cqspi_probe(struct platform_device *pdev)
ret = cqspi_of_get_pdata(cqspi);
if (ret) {
dev_err(dev, "Cannot get mandatory OF data.\n");
- ret = -ENODEV;
- goto probe_master_put;
+ return -ENODEV;
}
/* Obtain QSPI clock. */
@@ -1587,7 +1586,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(cqspi->clk)) {
dev_err(dev, "Cannot claim QSPI clock.\n");
ret = PTR_ERR(cqspi->clk);
- goto probe_master_put;
+ return ret;
}
/* Obtain and remap controller address. */
@@ -1596,7 +1595,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(cqspi->iobase)) {
dev_err(dev, "Cannot remap controller address.\n");
ret = PTR_ERR(cqspi->iobase);
- goto probe_master_put;
+ return ret;
}
/* Obtain and remap AHB address. */
@@ -1605,7 +1604,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(cqspi->ahb_base)) {
dev_err(dev, "Cannot remap AHB address.\n");
ret = PTR_ERR(cqspi->ahb_base);
- goto probe_master_put;
+ return ret;
}
cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
cqspi->ahb_size = resource_size(res_ahb);
@@ -1614,15 +1613,13 @@ static int cqspi_probe(struct platform_device *pdev)
/* Obtain IRQ line. */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = -ENXIO;
- goto probe_master_put;
- }
+ if (irq < 0)
+ return -ENXIO;
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
- goto probe_master_put;
+ return ret;
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
@@ -1716,8 +1713,6 @@ probe_reset_failed:
probe_clk_failed:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-probe_master_put:
- spi_master_put(master);
return ret;
}
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 31d778e9d255..6a7f7df1e776 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -69,7 +69,7 @@
#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
-#define CDNS_SPI_NOSS 0x3C /* No Slave select */
+#define CDNS_SPI_NOSS 0xF /* No Slave select */
/*
* SPI Interrupt Registers bit Masks
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index ecea471ff42c..f87d97ccd2d6 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -307,8 +307,9 @@ static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
if (spi->mode & SPI_LOOP)
cr0 |= DW_HSSI_CTRLR0_SRL;
- if (dws->caps & DW_SPI_CAP_KEEMBAY_MST)
- cr0 |= DW_HSSI_CTRLR0_KEEMBAY_MST;
+ /* CTRLR0[31] MST */
+ if (dw_spi_ver_is_ge(dws, HSSI, 102A))
+ cr0 |= DW_HSSI_CTRLR0_MST;
}
return cr0;
@@ -942,7 +943,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dev, dws);
- if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ goto err_free_irq;
+ } else if (ret) {
dev_warn(dev, "DMA init failed\n");
} else {
master->can_dma = dws->dma_ops->can_dma;
@@ -963,6 +966,7 @@ err_dma_exit:
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
dw_spi_enable_chip(dws, 0);
+err_free_irq:
free_irq(dws->irq, master);
err_free_master:
spi_controller_put(master);
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index 63e5260100ec..1322b8cce5b7 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -139,15 +139,20 @@ err_exit:
static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
{
- dws->rxchan = dma_request_slave_channel(dev, "rx");
- if (!dws->rxchan)
- return -ENODEV;
+ int ret;
- dws->txchan = dma_request_slave_channel(dev, "tx");
- if (!dws->txchan) {
- dma_release_channel(dws->rxchan);
+ dws->rxchan = dma_request_chan(dev, "rx");
+ if (IS_ERR(dws->rxchan)) {
+ ret = PTR_ERR(dws->rxchan);
dws->rxchan = NULL;
- return -ENODEV;
+ goto err_exit;
+ }
+
+ dws->txchan = dma_request_chan(dev, "tx");
+ if (IS_ERR(dws->txchan)) {
+ ret = PTR_ERR(dws->txchan);
+ dws->txchan = NULL;
+ goto free_rxchan;
}
dws->master->dma_rx = dws->rxchan;
@@ -160,6 +165,12 @@ static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
dw_spi_dma_sg_burst_init(dws);
return 0;
+
+free_rxchan:
+ dma_release_channel(dws->rxchan);
+ dws->rxchan = NULL;
+err_exit:
+ return ret;
}
static void dw_spi_dma_exit(struct dw_spi *dws)
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 5101c4c6017b..26c40ea6dd12 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -214,11 +214,10 @@ static int dw_spi_hssi_init(struct platform_device *pdev,
return 0;
}
-static int dw_spi_keembay_init(struct platform_device *pdev,
- struct dw_spi_mmio *dwsmmio)
+static int dw_spi_intel_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
{
dwsmmio->dws.ip = DW_HSSI_ID;
- dwsmmio->dws.caps = DW_SPI_CAP_KEEMBAY_MST;
return 0;
}
@@ -349,7 +348,8 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init},
{ .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init},
{ .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init},
- { .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init},
+ { .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init},
+ { .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init},
{ .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init},
{ .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init},
{ /* end of table */}
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index d5ee5130601e..9e8eb2b52d5c 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -23,7 +23,7 @@
((_dws)->ip == DW_ ## _ip ## _ID)
#define __dw_spi_ver_cmp(_dws, _ip, _ver, _op) \
- (dw_spi_ip_is(_dws, _ip) && (_dws)->ver _op DW_ ## _ip ## _ver)
+ (dw_spi_ip_is(_dws, _ip) && (_dws)->ver _op DW_ ## _ip ## _ ## _ver)
#define dw_spi_ver_is(_dws, _ip, _ver) __dw_spi_ver_cmp(_dws, _ip, _ver, ==)
@@ -31,8 +31,7 @@
/* DW SPI controller capabilities */
#define DW_SPI_CAP_CS_OVERRIDE BIT(0)
-#define DW_SPI_CAP_KEEMBAY_MST BIT(1)
-#define DW_SPI_CAP_DFS32 BIT(2)
+#define DW_SPI_CAP_DFS32 BIT(1)
/* Register offsets (Generic for both DWC APB SSI and DWC SSI IP-cores) */
#define DW_SPI_CTRLR0 0x00
@@ -94,13 +93,7 @@
#define DW_HSSI_CTRLR0_SCPOL BIT(9)
#define DW_HSSI_CTRLR0_TMOD_MASK GENMASK(11, 10)
#define DW_HSSI_CTRLR0_SRL BIT(13)
-
-/*
- * For Keem Bay, CTRLR0[31] is used to select controller mode.
- * 0: SSI is slave
- * 1: SSI is master
- */
-#define DW_HSSI_CTRLR0_KEEMBAY_MST BIT(31)
+#define DW_HSSI_CTRLR0_MST BIT(31)
/* Bit fields in CTRLR1 */
#define DW_SPI_NDF_MASK GENMASK(15, 0)
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index 72ab066ce552..cf1e4f9ebd72 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -24,8 +24,7 @@
#define FSI2SPI_IRQ 0x20
#define SPI_FSI_BASE 0x70000
-#define SPI_FSI_INIT_TIMEOUT_MS 1000
-#define SPI_FSI_STATUS_TIMEOUT_MS 100
+#define SPI_FSI_TIMEOUT_MS 1000
#define SPI_FSI_MAX_RX_SIZE 8
#define SPI_FSI_MAX_TX_SIZE 40
@@ -299,6 +298,7 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
static int fsi_spi_transfer_data(struct fsi_spi *ctx,
struct spi_transfer *transfer)
{
+ int loops;
int rc = 0;
unsigned long end;
u64 status = 0ULL;
@@ -317,9 +317,10 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
if (rc)
return rc;
- end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
+ loops = 0;
+ end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
do {
- if (time_after(jiffies, end))
+ if (loops++ && time_after(jiffies, end))
return -ETIMEDOUT;
rc = fsi_spi_status(ctx, &status, "TX");
@@ -335,9 +336,10 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
u8 *rx = transfer->rx_buf;
while (transfer->len > recv) {
- end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
+ loops = 0;
+ end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
do {
- if (time_after(jiffies, end))
+ if (loops++ && time_after(jiffies, end))
return -ETIMEDOUT;
rc = fsi_spi_status(ctx, &status, "RX");
@@ -359,6 +361,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
static int fsi_spi_transfer_init(struct fsi_spi *ctx)
{
+ int loops = 0;
int rc;
bool reset = false;
unsigned long end;
@@ -369,9 +372,9 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
- end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
+ end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
do {
- if (time_after(jiffies, end))
+ if (loops++ && time_after(jiffies, end))
return -ETIMEDOUT;
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status);
diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c
new file mode 100644
index 000000000000..9ea355f7d64f
--- /dev/null
+++ b/drivers/spi/spi-gxp.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0=or-later
+/* Copyright (C) 2022 Hewlett-Packard Development Company, L.P. */
+
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define GXP_SPI0_MAX_CHIPSELECT 2
+#define GXP_SPI_SLEEP_TIME 1
+#define GXP_SPI_TIMEOUT (130 * 1000000 / GXP_SPI_SLEEP_TIME)
+
+#define MANUAL_MODE 0
+#define DIRECT_MODE 1
+#define SPILDAT_LEN 256
+
+#define OFFSET_SPIMCFG 0x0
+#define OFFSET_SPIMCTRL 0x4
+#define OFFSET_SPICMD 0x5
+#define OFFSET_SPIDCNT 0x6
+#define OFFSET_SPIADDR 0x8
+#define OFFSET_SPIINTSTS 0xc
+
+#define SPIMCTRL_START 0x01
+#define SPIMCTRL_BUSY 0x02
+#define SPIMCTRL_DIR 0x08
+
+struct gxp_spi;
+
+struct gxp_spi_chip {
+ struct gxp_spi *spifi;
+ u32 cs;
+};
+
+struct gxp_spi_data {
+ u32 max_cs;
+ u32 mode_bits;
+};
+
+struct gxp_spi {
+ const struct gxp_spi_data *data;
+ void __iomem *reg_base;
+ void __iomem *dat_base;
+ void __iomem *dir_base;
+ struct device *dev;
+ struct gxp_spi_chip chips[GXP_SPI0_MAX_CHIPSELECT];
+};
+
+static void gxp_spi_set_mode(struct gxp_spi *spifi, int mode)
+{
+ u8 value;
+ void __iomem *reg_base = spifi->reg_base;
+
+ value = readb(reg_base + OFFSET_SPIMCTRL);
+
+ if (mode == MANUAL_MODE) {
+ writeb(0x55, reg_base + OFFSET_SPICMD);
+ writeb(0xaa, reg_base + OFFSET_SPICMD);
+ value &= ~0x30;
+ } else {
+ value |= 0x30;
+ }
+ writeb(value, reg_base + OFFSET_SPIMCTRL);
+}
+
+static int gxp_spi_read_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
+{
+ int ret;
+ struct gxp_spi *spifi = chip->spifi;
+ void __iomem *reg_base = spifi->reg_base;
+ u32 value;
+
+ value = readl(reg_base + OFFSET_SPIMCFG);
+ value &= ~(1 << 24);
+ value |= (chip->cs << 24);
+ value &= ~(0x07 << 16);
+ value &= ~(0x1f << 19);
+ writel(value, reg_base + OFFSET_SPIMCFG);
+
+ writel(0, reg_base + OFFSET_SPIADDR);
+
+ writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD);
+
+ writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT);
+
+ value = readb(reg_base + OFFSET_SPIMCTRL);
+ value &= ~SPIMCTRL_DIR;
+ value |= SPIMCTRL_START;
+
+ writeb(value, reg_base + OFFSET_SPIMCTRL);
+
+ ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value,
+ !(value & SPIMCTRL_BUSY),
+ GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT);
+ if (ret) {
+ dev_warn(spifi->dev, "read reg busy time out\n");
+ return ret;
+ }
+
+ memcpy_fromio(op->data.buf.in, spifi->dat_base, op->data.nbytes);
+ return ret;
+}
+
+static int gxp_spi_write_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
+{
+ int ret;
+ struct gxp_spi *spifi = chip->spifi;
+ void __iomem *reg_base = spifi->reg_base;
+ u32 value;
+
+ value = readl(reg_base + OFFSET_SPIMCFG);
+ value &= ~(1 << 24);
+ value |= (chip->cs << 24);
+ value &= ~(0x07 << 16);
+ value &= ~(0x1f << 19);
+ writel(value, reg_base + OFFSET_SPIMCFG);
+
+ writel(0, reg_base + OFFSET_SPIADDR);
+
+ writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD);
+
+ memcpy_toio(spifi->dat_base, op->data.buf.in, op->data.nbytes);
+
+ writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT);
+
+ value = readb(reg_base + OFFSET_SPIMCTRL);
+ value |= SPIMCTRL_DIR;
+ value |= SPIMCTRL_START;
+
+ writeb(value, reg_base + OFFSET_SPIMCTRL);
+
+ ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value,
+ !(value & SPIMCTRL_BUSY),
+ GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT);
+ if (ret)
+ dev_warn(spifi->dev, "write reg busy time out\n");
+
+ return ret;
+}
+
+static ssize_t gxp_spi_read(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
+{
+ struct gxp_spi *spifi = chip->spifi;
+ u32 offset = op->addr.val;
+
+ if (chip->cs == 0)
+ offset += 0x4000000;
+
+ memcpy_fromio(op->data.buf.in, spifi->dir_base + offset, op->data.nbytes);
+
+ return 0;
+}
+
+static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
+{
+ struct gxp_spi *spifi = chip->spifi;
+ void __iomem *reg_base = spifi->reg_base;
+ u32 write_len;
+ u32 value;
+ int ret;
+
+ write_len = op->data.nbytes;
+ if (write_len > SPILDAT_LEN)
+ write_len = SPILDAT_LEN;
+
+ value = readl(reg_base + OFFSET_SPIMCFG);
+ value &= ~(1 << 24);
+ value |= (chip->cs << 24);
+ value &= ~(0x07 << 16);
+ value |= (op->addr.nbytes << 16);
+ value &= ~(0x1f << 19);
+ writel(value, reg_base + OFFSET_SPIMCFG);
+
+ writel(op->addr.val, reg_base + OFFSET_SPIADDR);
+
+ writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD);
+
+ writew(write_len, reg_base + OFFSET_SPIDCNT);
+
+ memcpy_toio(spifi->dat_base, op->data.buf.in, write_len);
+
+ value = readb(reg_base + OFFSET_SPIMCTRL);
+ value |= SPIMCTRL_DIR;
+ value |= SPIMCTRL_START;
+
+ writeb(value, reg_base + OFFSET_SPIMCTRL);
+
+ ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value,
+ !(value & SPIMCTRL_BUSY),
+ GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT);
+ if (ret) {
+ dev_warn(spifi->dev, "write busy time out\n");
+ return ret;
+ }
+
+ return write_len;
+}
+
+static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct gxp_spi *spifi = spi_controller_get_devdata(mem->spi->master);
+ struct gxp_spi_chip *chip = &spifi->chips[mem->spi->chip_select];
+ int ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (!op->addr.nbytes)
+ ret = gxp_spi_read_reg(chip, op);
+ else
+ ret = gxp_spi_read(chip, op);
+ } else {
+ if (!op->addr.nbytes)
+ ret = gxp_spi_write_reg(chip, op);
+ else
+ ret = gxp_spi_write(chip, op);
+ }
+
+ return ret;
+}
+
+static int gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ int ret;
+
+ ret = do_gxp_exec_mem_op(mem, op);
+ if (ret)
+ dev_err(&mem->spi->dev, "operation failed: %d", ret);
+
+ return ret;
+}
+
+static const struct spi_controller_mem_ops gxp_spi_mem_ops = {
+ .exec_op = gxp_exec_mem_op,
+};
+
+static int gxp_spi_setup(struct spi_device *spi)
+{
+ struct gxp_spi *spifi = spi_controller_get_devdata(spi->master);
+ unsigned int cs = spi->chip_select;
+ struct gxp_spi_chip *chip = &spifi->chips[cs];
+
+ chip->spifi = spifi;
+ chip->cs = cs;
+
+ gxp_spi_set_mode(spifi, MANUAL_MODE);
+
+ return 0;
+}
+
+static int gxp_spifi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct gxp_spi_data *data;
+ struct spi_controller *ctlr;
+ struct gxp_spi *spifi;
+ struct resource *res;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ ctlr = devm_spi_alloc_master(dev, sizeof(*spifi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ spifi = spi_controller_get_devdata(ctlr);
+
+ platform_set_drvdata(pdev, spifi);
+ spifi->data = data;
+ spifi->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spifi->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->reg_base))
+ return PTR_ERR(spifi->reg_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ spifi->dat_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->dat_base))
+ return PTR_ERR(spifi->dat_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ spifi->dir_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->dir_base))
+ return PTR_ERR(spifi->dir_base);
+
+ ctlr->mode_bits = data->mode_bits;
+ ctlr->bus_num = pdev->id;
+ ctlr->mem_ops = &gxp_spi_mem_ops;
+ ctlr->setup = gxp_spi_setup;
+ ctlr->num_chipselect = data->max_cs;
+ ctlr->dev.of_node = dev->of_node;
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret) {
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register spi controller\n");
+ }
+
+ return 0;
+}
+
+static const struct gxp_spi_data gxp_spifi_data = {
+ .max_cs = 2,
+ .mode_bits = 0,
+};
+
+static const struct of_device_id gxp_spifi_match[] = {
+ {.compatible = "hpe,gxp-spifi", .data = &gxp_spifi_data },
+ { /* null */ }
+};
+MODULE_DEVICE_TABLE(of, gxp_spifi_match);
+
+static struct platform_driver gxp_spifi_driver = {
+ .probe = gxp_spifi_probe,
+ .driver = {
+ .name = "gxp-spifi",
+ .of_match_table = gxp_spifi_match,
+ },
+};
+module_platform_driver(gxp_spifi_driver);
+
+MODULE_DESCRIPTION("HPE GXP SPI Flash Interface driver");
+MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index f6eec7a869b6..f0d532ea40e8 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -74,6 +74,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 50f42983b950..66063687ae27 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -1236,8 +1236,8 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
return -ENOMEM;
pdata->nr_parts = 1;
- pdata->parts = devm_kcalloc(ispi->dev, sizeof(*pdata->parts),
- pdata->nr_parts, GFP_KERNEL);
+ pdata->parts = devm_kcalloc(ispi->dev, pdata->nr_parts,
+ sizeof(*pdata->parts), GFP_KERNEL);
if (!pdata->parts)
return -ENOMEM;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 0bc7daa7afc8..e4cb52e1fe26 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -156,6 +156,7 @@ struct meson_spicc_device {
void __iomem *base;
struct clk *core;
struct clk *pclk;
+ struct clk_divider pow2_div;
struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
@@ -168,6 +169,8 @@ struct meson_spicc_device {
unsigned long xfer_remain;
};
+#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
+
static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
{
u32 conf;
@@ -421,7 +424,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
struct spi_device *spi = message->spi;
- u32 conf = 0;
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Store current message */
spicc->message = message;
@@ -458,8 +461,6 @@ static int meson_spicc_prepare_message(struct spi_master *master,
/* Select CS */
conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
- /* Default Clock rate core/4 */
-
/* Default 8bit word */
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
@@ -476,12 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master,
static int meson_spicc_unprepare_transfer(struct spi_master *master)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
device_reset_optional(&spicc->pdev->dev);
+ /* Set default configuration, keeping datarate field */
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
return 0;
}
@@ -518,14 +523,60 @@ static void meson_spicc_cleanup(struct spi_device *spi)
* Clk path for G12A series:
* pclk -> pow2 fixed div -> pow2 div -> mux -> out
* pclk -> enh fixed div -> enh div -> mux -> out
+ *
+ * The pow2 divider is tied to the controller HW state, and the
+ * divider is only valid when the controller is initialized.
+ *
+ * A set of clock ops is added to make sure we don't read/set this
+ * clock rate while the controller is in an unknown state.
*/
-static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return 0;
+
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops meson_spicc_pow2_clk_ops = {
+ .recalc_rate = meson_spicc_pow2_recalc_rate,
+ .determine_rate = meson_spicc_pow2_determine_rate,
+ .set_rate = meson_spicc_pow2_set_rate,
+};
+
+static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
{
struct device *dev = &spicc->pdev->dev;
- struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
- struct clk_divider *pow2_div, *enh_div;
- struct clk_mux *mux;
+ struct clk_fixed_factor *pow2_fixed_div;
struct clk_init_data init;
struct clk *clk;
struct clk_parent_data parent_data[2];
@@ -560,31 +611,45 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
- pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
- if (!pow2_div)
- return -ENOMEM;
-
snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
init.name = name;
- init.ops = &clk_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.ops = &meson_spicc_pow2_clk_ops;
+ /*
+ * Set NOCACHE here to make sure we read the actual HW value
+ * since we reset the HW after each transfer.
+ */
+ init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
parent_data[0].hw = &pow2_fixed_div->hw;
init.num_parents = 1;
- pow2_div->shift = 16,
- pow2_div->width = 3,
- pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
- pow2_div->reg = spicc->base + SPICC_CONREG;
- pow2_div->hw.init = &init;
+ spicc->pow2_div.shift = 16,
+ spicc->pow2_div.width = 3,
+ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+ spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
+ spicc->pow2_div.hw.init = &init;
- clk = devm_clk_register(dev, &pow2_div->hw);
- if (WARN_ON(IS_ERR(clk)))
- return PTR_ERR(clk);
+ spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
- if (!spicc->data->has_enhance_clk_div) {
- spicc->clk = clk;
- return 0;
- }
+ return 0;
+}
+
+static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *enh_fixed_div;
+ struct clk_divider *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
/* algorithm for enh div: rate = freq / 2 / (N + 1) */
@@ -637,7 +702,7 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
init.name = name;
init.ops = &clk_mux_ops;
- parent_data[0].hw = &pow2_div->hw;
+ parent_data[0].hw = &spicc->pow2_div.hw;
parent_data[1].hw = &enh_div->hw;
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
@@ -754,12 +819,20 @@ static int meson_spicc_probe(struct platform_device *pdev)
meson_spicc_oen_enable(spicc);
- ret = meson_spicc_clk_init(spicc);
+ ret = meson_spicc_pow2_clk_init(spicc);
if (ret) {
- dev_err(&pdev->dev, "clock registration failed\n");
+ dev_err(&pdev->dev, "pow2 clock registration failed\n");
goto out_clk;
}
+ if (spicc->data->has_enhance_clk_div) {
+ ret = meson_spicc_enh_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_clk;
+ }
+ }
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "spi master registration failed\n");
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
new file mode 100644
index 000000000000..ce4385330b19
--- /dev/null
+++ b/drivers/spi/spi-microchip-core.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Microchip CoreSPI SPI controller driver
+ *
+ * Copyright (c) 2018-2022 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#define MAX_LEN (0xffff)
+#define MAX_CS (8)
+#define DEFAULT_FRAMESIZE (8)
+#define FIFO_DEPTH (32)
+#define CLK_GEN_MODE1_MAX (255)
+#define CLK_GEN_MODE0_MAX (15)
+#define CLK_GEN_MIN (0)
+#define MODE_X_MASK_SHIFT (24)
+
+#define CONTROL_ENABLE BIT(0)
+#define CONTROL_MASTER BIT(1)
+#define CONTROL_RX_DATA_INT BIT(4)
+#define CONTROL_TX_DATA_INT BIT(5)
+#define CONTROL_RX_OVER_INT BIT(6)
+#define CONTROL_TX_UNDER_INT BIT(7)
+#define CONTROL_SPO BIT(24)
+#define CONTROL_SPH BIT(25)
+#define CONTROL_SPS BIT(26)
+#define CONTROL_FRAMEURUN BIT(27)
+#define CONTROL_CLKMODE BIT(28)
+#define CONTROL_BIGFIFO BIT(29)
+#define CONTROL_OENOFF BIT(30)
+#define CONTROL_RESET BIT(31)
+
+#define CONTROL_MODE_MASK GENMASK(3, 2)
+#define MOTOROLA_MODE (0)
+#define CONTROL_FRAMECNT_MASK GENMASK(23, 8)
+#define CONTROL_FRAMECNT_SHIFT (8)
+
+#define STATUS_ACTIVE BIT(14)
+#define STATUS_SSEL BIT(13)
+#define STATUS_FRAMESTART BIT(12)
+#define STATUS_TXFIFO_EMPTY_NEXT_READ BIT(11)
+#define STATUS_TXFIFO_EMPTY BIT(10)
+#define STATUS_TXFIFO_FULL_NEXT_WRITE BIT(9)
+#define STATUS_TXFIFO_FULL BIT(8)
+#define STATUS_RXFIFO_EMPTY_NEXT_READ BIT(7)
+#define STATUS_RXFIFO_EMPTY BIT(6)
+#define STATUS_RXFIFO_FULL_NEXT_WRITE BIT(5)
+#define STATUS_RXFIFO_FULL BIT(4)
+#define STATUS_TX_UNDERRUN BIT(3)
+#define STATUS_RX_OVERFLOW BIT(2)
+#define STATUS_RXDAT_RXED BIT(1)
+#define STATUS_TXDAT_SENT BIT(0)
+
+#define INT_TXDONE BIT(0)
+#define INT_RXRDY BIT(1)
+#define INT_RX_CHANNEL_OVERFLOW BIT(2)
+#define INT_TX_CHANNEL_UNDERRUN BIT(3)
+
+#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \
+ CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
+
+#define REG_CONTROL (0x00)
+#define REG_FRAME_SIZE (0x04)
+#define REG_STATUS (0x08)
+#define REG_INT_CLEAR (0x0c)
+#define REG_RX_DATA (0x10)
+#define REG_TX_DATA (0x14)
+#define REG_CLK_GEN (0x18)
+#define REG_SLAVE_SELECT (0x1c)
+#define SSEL_MASK GENMASK(7, 0)
+#define SSEL_DIRECT BIT(8)
+#define SSELOUT_SHIFT 9
+#define SSELOUT BIT(SSELOUT_SHIFT)
+#define REG_MIS (0x20)
+#define REG_RIS (0x24)
+#define REG_CONTROL2 (0x28)
+#define REG_COMMAND (0x2c)
+#define REG_PKTSIZE (0x30)
+#define REG_CMD_SIZE (0x34)
+#define REG_HWSTATUS (0x38)
+#define REG_STAT8 (0x3c)
+#define REG_CTRL2 (0x48)
+#define REG_FRAMESUP (0x50)
+
+struct mchp_corespi {
+ void __iomem *regs;
+ struct clk *clk;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ u32 clk_gen; /* divider for spi output clock generated by the controller */
+ u32 clk_mode;
+ int irq;
+ int tx_len;
+ int rx_len;
+ int pending;
+};
+
+static inline u32 mchp_corespi_read(struct mchp_corespi *spi, unsigned int reg)
+{
+ return readl(spi->regs + reg);
+}
+
+static inline void mchp_corespi_write(struct mchp_corespi *spi, unsigned int reg, u32 val)
+{
+ writel(val, spi->regs + reg);
+}
+
+static inline void mchp_corespi_enable(struct mchp_corespi *spi)
+{
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control |= CONTROL_ENABLE;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_disable(struct mchp_corespi *spi)
+{
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control &= ~CONTROL_ENABLE;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
+{
+ u8 data;
+ int fifo_max, i = 0;
+
+ fifo_max = min(spi->rx_len, FIFO_DEPTH);
+
+ while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
+ data = mchp_corespi_read(spi, REG_RX_DATA);
+
+ if (spi->rx_buf)
+ *spi->rx_buf++ = data;
+ i++;
+ }
+ spi->rx_len -= i;
+ spi->pending -= i;
+}
+
+static void mchp_corespi_enable_ints(struct mchp_corespi *spi)
+{
+ u32 control, mask = INT_ENABLE_MASK;
+
+ mchp_corespi_disable(spi);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control |= mask;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static void mchp_corespi_disable_ints(struct mchp_corespi *spi)
+{
+ u32 control, mask = INT_ENABLE_MASK;
+
+ mchp_corespi_disable(spi);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control &= ~mask;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
+{
+ u32 control;
+ u16 lenpart;
+
+ /*
+ * Disable the SPI controller. Writes to transfer length have
+ * no effect when the controller is enabled.
+ */
+ mchp_corespi_disable(spi);
+
+ /*
+ * The lower 16 bits of the frame count are stored in the control reg
+ * for legacy reasons, but the upper 16 written to a different register:
+ * FRAMESUP. While both the upper and lower bits can be *READ* from the
+ * FRAMESUP register, writing to the lower 16 bits is a NOP
+ */
+ lenpart = len & 0xffff;
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control &= ~CONTROL_FRAMECNT_MASK;
+ control |= lenpart << CONTROL_FRAMECNT_SHIFT;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ lenpart = len & 0xffff0000;
+ mchp_corespi_write(spi, REG_FRAMESUP, lenpart);
+
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+{
+ u8 byte;
+ int fifo_max, i = 0;
+
+ fifo_max = min(spi->tx_len, FIFO_DEPTH);
+ mchp_corespi_set_xfer_size(spi, fifo_max);
+
+ while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
+ byte = spi->tx_buf ? *spi->tx_buf++ : 0xaa;
+ mchp_corespi_write(spi, REG_TX_DATA, byte);
+ i++;
+ }
+
+ spi->tx_len -= i;
+ spi->pending += i;
+}
+
+static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
+{
+ u32 control;
+
+ /*
+ * Disable the SPI controller. Writes to the frame size have
+ * no effect when the controller is enabled.
+ */
+ mchp_corespi_disable(spi);
+
+ mchp_corespi_write(spi, REG_FRAME_SIZE, bt);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static void mchp_corespi_set_cs(struct spi_device *spi, bool disable)
+{
+ u32 reg;
+ struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
+
+ reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
+ reg &= ~BIT(spi->chip_select);
+ reg |= !disable << spi->chip_select;
+
+ mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
+}
+
+static int mchp_corespi_setup(struct spi_device *spi)
+{
+ struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ /*
+ * Active high slaves need to be specifically set to their inactive
+ * states during probe by adding them to the "control group" & thus
+ * driving their select line low.
+ */
+ if (spi->mode & SPI_CS_HIGH) {
+ reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
+ reg |= BIT(spi->chip_select);
+ mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
+ }
+ return 0;
+}
+
+static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *spi)
+{
+ unsigned long clk_hz;
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control |= CONTROL_MASTER;
+
+ control &= ~CONTROL_MODE_MASK;
+ control |= MOTOROLA_MODE;
+
+ mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
+
+ /* max. possible spi clock rate is the apb clock rate */
+ clk_hz = clk_get_rate(spi->clk);
+ master->max_speed_hz = clk_hz;
+
+ /*
+ * The controller must be configured so that it doesn't remove Chip
+ * Select until the entire message has been transferred, even if at
+ * some points TX FIFO becomes empty.
+ *
+ * BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames
+ * for the 8 bit transfers that this driver uses.
+ */
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control |= CONTROL_SPS | CONTROL_BIGFIFO;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ mchp_corespi_enable_ints(spi);
+
+ /*
+ * It is required to enable direct mode, otherwise control over the chip
+ * select is relinquished to the hardware. SSELOUT is enabled too so we
+ * can deal with active high slaves.
+ */
+ mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control &= ~CONTROL_RESET;
+ control |= CONTROL_ENABLE;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
+{
+ u32 control;
+
+ mchp_corespi_disable(spi);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ if (spi->clk_mode)
+ control |= CONTROL_CLKMODE;
+ else
+ control &= ~CONTROL_CLKMODE;
+
+ mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen);
+ mchp_corespi_write(spi, REG_CONTROL, control);
+ mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE);
+}
+
+static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode)
+{
+ u32 control, mode_val;
+
+ switch (mode & SPI_MODE_X_MASK) {
+ case SPI_MODE_0:
+ mode_val = 0;
+ break;
+ case SPI_MODE_1:
+ mode_val = CONTROL_SPH;
+ break;
+ case SPI_MODE_2:
+ mode_val = CONTROL_SPO;
+ break;
+ case SPI_MODE_3:
+ mode_val = CONTROL_SPH | CONTROL_SPO;
+ break;
+ }
+
+ /*
+ * Disable the SPI controller. Writes to the frame size have
+ * no effect when the controller is enabled.
+ */
+ mchp_corespi_disable(spi);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT);
+ control |= mode_val;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct mchp_corespi *spi = spi_master_get_devdata(master);
+ u32 intfield = mchp_corespi_read(spi, REG_MIS) & 0xf;
+ bool finalise = false;
+
+ /* Interrupt line may be shared and not for us at all */
+ if (intfield == 0)
+ return IRQ_NONE;
+
+ if (intfield & INT_TXDONE) {
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
+
+ if (spi->rx_len)
+ mchp_corespi_read_fifo(spi);
+
+ if (spi->tx_len)
+ mchp_corespi_write_fifo(spi);
+
+ if (!spi->rx_len)
+ finalise = true;
+ }
+
+ if (intfield & INT_RXRDY)
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
+
+ if (intfield & INT_RX_CHANNEL_OVERFLOW) {
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
+ finalise = true;
+ dev_err(&master->dev,
+ "%s: RX OVERFLOW: rxlen: %d, txlen: %d\n", __func__,
+ spi->rx_len, spi->tx_len);
+ }
+
+ if (intfield & INT_TX_CHANNEL_UNDERRUN) {
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN);
+ finalise = true;
+ dev_err(&master->dev,
+ "%s: TX UNDERFLOW: rxlen: %d, txlen: %d\n", __func__,
+ spi->rx_len, spi->tx_len);
+ }
+
+ if (finalise)
+ spi_finalize_current_transfer(master);
+
+ return IRQ_HANDLED;
+}
+
+static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi,
+ unsigned long target_hz)
+{
+ unsigned long clk_hz, spi_hz, clk_gen;
+
+ clk_hz = clk_get_rate(spi->clk);
+ if (!clk_hz)
+ return -EINVAL;
+ spi_hz = min(target_hz, clk_hz);
+
+ /*
+ * There are two possible clock modes for the controller generated
+ * clock's division ratio:
+ * CLK_MODE = 0: 1 / (2^(CLK_GEN + 1)) where CLK_GEN = 0 to 15.
+ * CLK_MODE = 1: 1 / (2 * CLK_GEN + 1) where CLK_GEN = 0 to 255.
+ * First try mode 1, fall back to 0 and if we have tried both modes and
+ * we /still/ can't get a good setting, we then throw the toys out of
+ * the pram and give up
+ * clk_gen is the register name for the clock divider on MPFS.
+ */
+ clk_gen = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1;
+ if (clk_gen > CLK_GEN_MODE1_MAX || clk_gen <= CLK_GEN_MIN) {
+ clk_gen = DIV_ROUND_UP(clk_hz, spi_hz);
+ clk_gen = fls(clk_gen) - 1;
+
+ if (clk_gen > CLK_GEN_MODE0_MAX)
+ return -EINVAL;
+
+ spi->clk_mode = 0;
+ } else {
+ spi->clk_mode = 1;
+ }
+
+ spi->clk_gen = clk_gen;
+ return 0;
+}
+
+static int mchp_corespi_transfer_one(struct spi_master *master,
+ struct spi_device *spi_dev,
+ struct spi_transfer *xfer)
+{
+ struct mchp_corespi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = mchp_corespi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz);
+ if (ret) {
+ dev_err(&master->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz);
+ return ret;
+ }
+
+ mchp_corespi_set_clk_gen(spi);
+
+ spi->tx_buf = xfer->tx_buf;
+ spi->rx_buf = xfer->rx_buf;
+ spi->tx_len = xfer->len;
+ spi->rx_len = xfer->len;
+ spi->pending = 0;
+
+ mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH)
+ ? FIFO_DEPTH : spi->tx_len);
+
+ if (spi->tx_len)
+ mchp_corespi_write_fifo(spi);
+ return 1;
+}
+
+static int mchp_corespi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_device *spi_dev = msg->spi;
+ struct mchp_corespi *spi = spi_master_get_devdata(master);
+
+ mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
+ mchp_corespi_set_mode(spi, spi_dev->mode);
+
+ return 0;
+}
+
+static int mchp_corespi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct mchp_corespi *spi;
+ struct resource *res;
+ u32 num_cs;
+ int ret = 0;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi));
+ if (!master)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "unable to allocate master for SPI controller\n");
+
+ platform_set_drvdata(pdev, master);
+
+ if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs))
+ num_cs = MAX_CS;
+
+ master->num_chipselect = num_cs;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = mchp_corespi_setup;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->transfer_one = mchp_corespi_transfer_one;
+ master->prepare_message = mchp_corespi_prepare_message;
+ master->set_cs = mchp_corespi_set_cs;
+ master->dev.of_node = pdev->dev.of_node;
+
+ spi = spi_master_get_devdata(master);
+
+ spi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(spi->regs))
+ return PTR_ERR(spi->regs);
+
+ spi->irq = platform_get_irq(pdev, 0);
+ if (spi->irq <= 0)
+ return dev_err_probe(&pdev->dev, -ENXIO,
+ "invalid IRQ %d for SPI controller\n",
+ spi->irq);
+
+ ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), master);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not request irq: %d\n", ret);
+
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk),
+ "could not get clk: %d\n", ret);
+
+ ret = clk_prepare_enable(spi->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable clock\n");
+
+ mchp_corespi_init(master, spi);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ mchp_corespi_disable(spi);
+ clk_disable_unprepare(spi->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "unable to register master for SPI controller\n");
+ }
+
+ dev_info(&pdev->dev, "Registered SPI controller %d\n", master->bus_num);
+
+ return 0;
+}
+
+static int mchp_corespi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mchp_corespi *spi = spi_master_get_devdata(master);
+
+ mchp_corespi_disable_ints(spi);
+ clk_disable_unprepare(spi->clk);
+ mchp_corespi_disable(spi);
+
+ return 0;
+}
+
+#define MICROCHIP_SPI_PM_OPS (NULL)
+
+/*
+ * Platform driver data structure
+ */
+
+#if defined(CONFIG_OF)
+static const struct of_device_id mchp_corespi_dt_ids[] = {
+ { .compatible = "microchip,mpfs-spi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mchp_corespi_dt_ids);
+#endif
+
+static struct platform_driver mchp_corespi_driver = {
+ .probe = mchp_corespi_probe,
+ .driver = {
+ .name = "microchip-corespi",
+ .pm = MICROCHIP_SPI_PM_OPS,
+ .of_match_table = of_match_ptr(mchp_corespi_dt_ids),
+ },
+ .remove = mchp_corespi_remove,
+};
+module_platform_driver(mchp_corespi_driver);
+MODULE_DESCRIPTION("Microchip coreSPI SPI controller driver");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 7654736c2c0e..609311231e64 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -37,12 +37,6 @@ struct mpc52xx_psc_spi {
struct mpc52xx_psc_fifo __iomem *fifo;
unsigned int irq;
u8 bits_per_word;
- u8 busy;
-
- struct work_struct work;
-
- struct list_head queue;
- spinlock_t lock;
struct completion done;
};
@@ -198,69 +192,53 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
return 0;
}
-static void mpc52xx_psc_spi_work(struct work_struct *work)
+int mpc52xx_psc_spi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *m)
{
- struct mpc52xx_psc_spi *mps =
- container_of(work, struct mpc52xx_psc_spi, work);
-
- spin_lock_irq(&mps->lock);
- mps->busy = 1;
- while (!list_empty(&mps->queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
- unsigned cs_change;
- int status;
-
- m = container_of(mps->queue.next, struct spi_message, queue);
- list_del_init(&m->queue);
- spin_unlock_irq(&mps->lock);
-
- spi = m->spi;
- cs_change = 1;
- status = 0;
- list_for_each_entry (t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- status = mpc52xx_psc_spi_transfer_setup(spi, t);
- if (status < 0)
- break;
- }
-
- if (cs_change)
- mpc52xx_psc_spi_activate_cs(spi);
- cs_change = t->cs_change;
-
- status = mpc52xx_psc_spi_transfer_rxtx(spi, t);
- if (status)
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ unsigned cs_change;
+ int status;
+
+ spi = m->spi;
+ cs_change = 1;
+ status = 0;
+ list_for_each_entry (t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
+ status = mpc52xx_psc_spi_transfer_setup(spi, t);
+ if (status < 0)
break;
- m->actual_length += t->len;
+ }
- spi_transfer_delay_exec(t);
+ if (cs_change)
+ mpc52xx_psc_spi_activate_cs(spi);
+ cs_change = t->cs_change;
- if (cs_change)
- mpc52xx_psc_spi_deactivate_cs(spi);
- }
+ status = mpc52xx_psc_spi_transfer_rxtx(spi, t);
+ if (status)
+ break;
+ m->actual_length += t->len;
- m->status = status;
- if (m->complete)
- m->complete(m->context);
+ spi_transfer_delay_exec(t);
- if (status || !cs_change)
+ if (cs_change)
mpc52xx_psc_spi_deactivate_cs(spi);
+ }
- mpc52xx_psc_spi_transfer_setup(spi, NULL);
+ m->status = status;
+ if (status || !cs_change)
+ mpc52xx_psc_spi_deactivate_cs(spi);
- spin_lock_irq(&mps->lock);
- }
- mps->busy = 0;
- spin_unlock_irq(&mps->lock);
+ mpc52xx_psc_spi_transfer_setup(spi, NULL);
+
+ spi_finalize_current_message(ctlr);
+
+ return 0;
}
static int mpc52xx_psc_spi_setup(struct spi_device *spi)
{
- struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
- unsigned long flags;
if (spi->bits_per_word%8)
return -EINVAL;
@@ -275,28 +253,6 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
cs->bits_per_word = spi->bits_per_word;
cs->speed_hz = spi->max_speed_hz;
- spin_lock_irqsave(&mps->lock, flags);
- if (!mps->busy)
- mpc52xx_psc_spi_deactivate_cs(spi);
- spin_unlock_irqrestore(&mps->lock, flags);
-
- return 0;
-}
-
-static int mpc52xx_psc_spi_transfer(struct spi_device *spi,
- struct spi_message *m)
-{
- struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- spin_lock_irqsave(&mps->lock, flags);
- list_add_tail(&m->queue, &mps->queue);
- schedule_work(&mps->work);
- spin_unlock_irqrestore(&mps->lock, flags);
-
return 0;
}
@@ -391,7 +347,7 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
master->num_chipselect = pdata->max_chipselect;
}
master->setup = mpc52xx_psc_spi_setup;
- master->transfer = mpc52xx_psc_spi_transfer;
+ master->transfer_one_message = mpc52xx_psc_spi_transfer_one_message;
master->cleanup = mpc52xx_psc_spi_cleanup;
master->dev.of_node = dev->of_node;
@@ -415,10 +371,7 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
goto free_irq;
}
- spin_lock_init(&mps->lock);
init_completion(&mps->done);
- INIT_WORK(&mps->work, mpc52xx_psc_spi_work);
- INIT_LIST_HEAD(&mps->queue);
ret = spi_register_master(master);
if (ret < 0)
@@ -470,7 +423,6 @@ static int mpc52xx_psc_spi_of_remove(struct platform_device *op)
struct spi_master *master = spi_master_get(platform_get_drvdata(op));
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
- flush_work(&mps->work);
spi_unregister_master(master);
free_irq(mps->irq, mps);
if (mps->psc)
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 3ebdce804b90..bc5e36fd4288 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -437,7 +437,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
- ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ms->ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
if (ms->gpio_cs_count > 0) {
master->num_chipselect = ms->gpio_cs_count;
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index ba67dbed9fb8..49f6424e35af 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -36,6 +36,7 @@
#define NPCM_FIU_UMA_DR1 0x34
#define NPCM_FIU_UMA_DR2 0x38
#define NPCM_FIU_UMA_DR3 0x3C
+#define NPCM_FIU_CFG 0x78
#define NPCM_FIU_MAX_REG_LIMIT 0x80
/* FIU Direct Read Configuration Register */
@@ -151,6 +152,9 @@
#define NPCM_FIU_UMA_DR3_RB13 GENMASK(15, 8)
#define NPCM_FIU_UMA_DR3_RB12 GENMASK(7, 0)
+/* FIU Configuration Register */
+#define NPCM_FIU_CFG_FIU_FIX BIT(31)
+
/* FIU Read Mode */
enum {
DRD_SINGLE_WIRE_MODE = 0,
@@ -187,6 +191,7 @@ enum {
FIU0 = 0,
FIU3,
FIUX,
+ FIU1,
};
struct npcm_fiu_info {
@@ -214,6 +219,21 @@ static const struct fiu_data npcm7xx_fiu_data = {
.fiu_max = 3,
};
+static const struct npcm_fiu_info npxm8xx_fiu_info[] = {
+ {.name = "FIU0", .fiu_id = FIU0,
+ .max_map_size = MAP_SIZE_128MB, .max_cs = 2},
+ {.name = "FIU3", .fiu_id = FIU3,
+ .max_map_size = MAP_SIZE_128MB, .max_cs = 4},
+ {.name = "FIUX", .fiu_id = FIUX,
+ .max_map_size = MAP_SIZE_16MB, .max_cs = 2},
+ {.name = "FIU1", .fiu_id = FIU1,
+ .max_map_size = MAP_SIZE_16MB, .max_cs = 4} };
+
+static const struct fiu_data npxm8xx_fiu_data = {
+ .npcm_fiu_data_info = npxm8xx_fiu_info,
+ .fiu_max = 4,
+};
+
struct npcm_fiu_spi;
struct npcm_fiu_chip {
@@ -252,8 +272,7 @@ static void npcm_fiu_set_drd(struct npcm_fiu_spi *fiu,
fiu->drd_op.addr.buswidth = op->addr.buswidth;
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_DBW,
- ((op->dummy.nbytes * ilog2(op->addr.buswidth)) / BITS_PER_BYTE)
- << NPCM_FIU_DRD_DBW_SHIFT);
+ op->dummy.nbytes << NPCM_FIU_DRD_DBW_SHIFT);
fiu->drd_op.dummy.nbytes = op->dummy.nbytes;
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_RDCMD, op->cmd.opcode);
@@ -625,6 +644,10 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
regmap_update_bits(gcr_regmap, NPCM7XX_INTCR3_OFFSET,
NPCM7XX_INTCR3_FIU_FIX,
NPCM7XX_INTCR3_FIU_FIX);
+ } else {
+ regmap_update_bits(fiu->regmap, NPCM_FIU_CFG,
+ NPCM_FIU_CFG_FIU_FIX,
+ NPCM_FIU_CFG_FIU_FIX);
}
if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) {
@@ -665,6 +688,7 @@ static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
static const struct of_device_id npcm_fiu_dt_ids[] = {
{ .compatible = "nuvoton,npcm750-fiu", .data = &npcm7xx_fiu_data },
+ { .compatible = "nuvoton,npcm845-fiu", .data = &npxm8xx_fiu_data },
{ /* sentinel */ }
};
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index edb42d08857d..838d12e65144 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1404,6 +1404,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x7aab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x7af9), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x7afb), LPSS_CNL_SSP },
+ /* MTL-P */
+ { PCI_VDEVICE(INTEL, 0x7e27), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7e30), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7e46), LPSS_CNL_SSP },
/* CNL-LP */
{ PCI_VDEVICE(INTEL, 0x9daa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x9dab), LPSS_CNL_SSP },
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 7a014eeec2d0..411b1307b7fd 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -613,6 +613,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
+ if (tx)
+ dmaengine_synchronize(rspi->ctlr->dma_tx);
+ if (rx)
+ dmaengine_synchronize(rspi->ctlr->dma_rx);
} else {
if (!ret) {
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index c26440e9058d..7f346866614a 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -18,7 +18,7 @@
#include <linux/platform_data/spi-s3c64xx.h>
-#define MAX_SPI_PORTS 6
+#define MAX_SPI_PORTS 12
#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
#define AUTOSUSPEND_TIMEOUT 2000
@@ -59,6 +59,7 @@
#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
+#define S3C64XX_SPI_MODE_SELF_LOOPBACK (1<<3)
#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
#define S3C64XX_SPI_MODE_4BURST (1<<0)
@@ -130,11 +131,13 @@ struct s3c64xx_spi_dma_data {
* @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
* @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
* @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
+ * @clk_div: Internal clock divider
* @quirks: Bitmask of known quirks
* @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
* @clk_from_cmu: True, if the controller does not include a clock mux and
* prescaler unit.
* @clk_ioclk: True if clock is present on this device
+ * @has_loopback: True if loopback mode can be supported
*
* The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
* differ in some aspects such as the size of the fifo and spi bus clock
@@ -146,9 +149,11 @@ struct s3c64xx_spi_port_config {
int rx_lvl_offset;
int tx_st_done;
int quirks;
+ int clk_div;
bool high_speed;
bool clk_from_cmu;
bool clk_ioclk;
+ bool has_loopback;
};
/**
@@ -350,19 +355,59 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
if (is_polling(sdd))
return 0;
+ /* Requests DMA channels */
+ sdd->rx_dma.ch = dma_request_chan(&sdd->pdev->dev, "rx");
+ if (IS_ERR(sdd->rx_dma.ch)) {
+ dev_err(&sdd->pdev->dev, "Failed to get RX DMA channel\n");
+ sdd->rx_dma.ch = NULL;
+ return 0;
+ }
+
+ sdd->tx_dma.ch = dma_request_chan(&sdd->pdev->dev, "tx");
+ if (IS_ERR(sdd->tx_dma.ch)) {
+ dev_err(&sdd->pdev->dev, "Failed to get TX DMA channel\n");
+ dma_release_channel(sdd->rx_dma.ch);
+ sdd->tx_dma.ch = NULL;
+ sdd->rx_dma.ch = NULL;
+ return 0;
+ }
+
spi->dma_rx = sdd->rx_dma.ch;
spi->dma_tx = sdd->tx_dma.ch;
return 0;
}
+static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+
+ if (is_polling(sdd))
+ return 0;
+
+ /* Releases DMA channels if they are allocated */
+ if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
+ dma_release_channel(sdd->rx_dma.ch);
+ dma_release_channel(sdd->tx_dma.ch);
+ sdd->rx_dma.ch = 0;
+ sdd->tx_dma.ch = 0;
+ }
+
+ return 0;
+}
+
static bool s3c64xx_spi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
+ return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ } else {
+ return false;
+ }
+
}
static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
@@ -577,6 +622,7 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
void __iomem *regs = sdd->regs;
int ret;
u32 val;
+ int div = sdd->port_conf->clk_div;
/* Disable Clock */
if (!sdd->port_conf->clk_from_cmu) {
@@ -619,19 +665,21 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
break;
}
+ if ((sdd->cur_mode & SPI_LOOP) && sdd->port_conf->has_loopback)
+ val |= S3C64XX_SPI_MODE_SELF_LOOPBACK;
+
writel(val, regs + S3C64XX_SPI_MODE_CFG);
if (sdd->port_conf->clk_from_cmu) {
- /* The src_clk clock is divided internally by 2 */
- ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * div);
if (ret)
return ret;
- sdd->cur_speed = clk_get_rate(sdd->src_clk) / 2;
+ sdd->cur_speed = clk_get_rate(sdd->src_clk) / div;
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_PSR_MASK;
- val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
+ val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / div - 1)
& S3C64XX_SPI_PSR_MASK);
writel(val, regs + S3C64XX_SPI_CLK_CFG);
@@ -697,7 +745,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->rx_dma.ch && sdd->tx_dma.ch) {
use_dma = 1;
- } else if (is_polling(sdd) && xfer->len > fifo_len) {
+ } else if (xfer->len > fifo_len) {
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
origin_len = xfer->len;
@@ -825,6 +873,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
int err;
+ int div;
sdd = spi_master_get_devdata(spi->master);
if (spi->dev.of_node) {
@@ -843,22 +892,24 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
pm_runtime_get_sync(&sdd->pdev->dev);
+ div = sdd->port_conf->clk_div;
+
/* Check if we can provide the requested rate */
if (!sdd->port_conf->clk_from_cmu) {
u32 psr, speed;
/* Max possible */
- speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
+ speed = clk_get_rate(sdd->src_clk) / div / (0 + 1);
if (spi->max_speed_hz > speed)
spi->max_speed_hz = speed;
- psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
+ psr = clk_get_rate(sdd->src_clk) / div / spi->max_speed_hz - 1;
psr &= S3C64XX_SPI_PSR_MASK;
if (psr == S3C64XX_SPI_PSR_MASK)
psr--;
- speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
+ speed = clk_get_rate(sdd->src_clk) / div / (psr + 1);
if (spi->max_speed_hz < speed) {
if (psr+1 < S3C64XX_SPI_PSR_MASK) {
psr++;
@@ -868,7 +919,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
}
- speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
+ speed = clk_get_rate(sdd->src_clk) / div / (psr + 1);
if (spi->max_speed_hz >= speed) {
spi->max_speed_hz = speed;
} else {
@@ -1098,6 +1149,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->setup = s3c64xx_spi_setup;
master->cleanup = s3c64xx_spi_cleanup;
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
+ master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
master->num_chipselect = sci->num_cs;
@@ -1107,6 +1159,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
SPI_BPW_MASK(8);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (sdd->port_conf->has_loopback)
+ master->mode_bits |= SPI_LOOP;
master->auto_runtime_pm = true;
if (!is_polling(sdd))
master->can_dma = s3c64xx_spi_can_dma;
@@ -1167,22 +1221,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
}
}
- if (!is_polling(sdd)) {
- /* Acquire DMA channels */
- sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx");
- if (IS_ERR(sdd->rx_dma.ch)) {
- dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
- ret = PTR_ERR(sdd->rx_dma.ch);
- goto err_disable_io_clk;
- }
- sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx");
- if (IS_ERR(sdd->tx_dma.ch)) {
- dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
- ret = PTR_ERR(sdd->tx_dma.ch);
- goto err_release_rx_dma;
- }
- }
-
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -1228,12 +1266,6 @@ err_pm_put:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
- if (!is_polling(sdd))
- dma_release_channel(sdd->tx_dma.ch);
-err_release_rx_dma:
- if (!is_polling(sdd))
- dma_release_channel(sdd->rx_dma.ch);
-err_disable_io_clk:
clk_disable_unprepare(sdd->ioclk);
err_disable_src_clk:
clk_disable_unprepare(sdd->src_clk);
@@ -1369,6 +1401,7 @@ static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
.fifo_lvl_mask = { 0x7f },
.rx_lvl_offset = 13,
.tx_st_done = 21,
+ .clk_div = 2,
.high_speed = true,
};
@@ -1376,12 +1409,14 @@ static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
.fifo_lvl_mask = { 0x7f, 0x7F },
.rx_lvl_offset = 13,
.tx_st_done = 21,
+ .clk_div = 2,
};
static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
+ .clk_div = 2,
.high_speed = true,
};
@@ -1389,6 +1424,7 @@ static const struct s3c64xx_spi_port_config exynos4_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
+ .clk_div = 2,
.high_speed = true,
.clk_from_cmu = true,
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
@@ -1398,6 +1434,7 @@ static const struct s3c64xx_spi_port_config exynos7_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
.rx_lvl_offset = 15,
.tx_st_done = 25,
+ .clk_div = 2,
.high_speed = true,
.clk_from_cmu = true,
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
@@ -1407,16 +1444,31 @@ static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
.rx_lvl_offset = 15,
.tx_st_done = 25,
+ .clk_div = 2,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .clk_ioclk = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
+static const struct s3c64xx_spi_port_config exynosautov9_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .clk_div = 4,
.high_speed = true,
.clk_from_cmu = true,
.clk_ioclk = true,
+ .has_loopback = true,
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
-static struct s3c64xx_spi_port_config fsd_spi_port_config = {
+static const struct s3c64xx_spi_port_config fsd_spi_port_config = {
.fifo_lvl_mask = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f},
.rx_lvl_offset = 15,
.tx_st_done = 25,
+ .clk_div = 2,
.high_speed = true,
.clk_from_cmu = true,
.clk_ioclk = false,
@@ -1453,6 +1505,9 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,exynos5433-spi",
.data = (void *)&exynos5433_spi_port_config,
},
+ { .compatible = "samsung,exynosautov9-spi",
+ .data = (void *)&exynosautov9_spi_port_config,
+ },
{ .compatible = "tesla,fsd-spi",
.data = (void *)&fsd_spi_port_config,
},
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 45f304935332..3e72fad99adf 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -73,11 +73,8 @@ struct spi_sh_data {
void __iomem *addr;
int irq;
struct spi_master *master;
- struct list_head queue;
- struct work_struct ws;
unsigned long cr1;
wait_queue_head_t wait;
- spinlock_t lock;
int width;
};
@@ -271,47 +268,39 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
return 0;
}
-static void spi_sh_work(struct work_struct *work)
+static int spi_sh_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *mesg)
{
- struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
- struct spi_message *mesg;
+ struct spi_sh_data *ss = spi_controller_get_devdata(ctlr);
struct spi_transfer *t;
- unsigned long flags;
int ret;
pr_debug("%s: enter\n", __func__);
- spin_lock_irqsave(&ss->lock, flags);
- while (!list_empty(&ss->queue)) {
- mesg = list_entry(ss->queue.next, struct spi_message, queue);
- list_del_init(&mesg->queue);
-
- spin_unlock_irqrestore(&ss->lock, flags);
- list_for_each_entry(t, &mesg->transfers, transfer_list) {
- pr_debug("tx_buf = %p, rx_buf = %p\n",
- t->tx_buf, t->rx_buf);
- pr_debug("len = %d, delay.value = %d\n",
- t->len, t->delay.value);
-
- if (t->tx_buf) {
- ret = spi_sh_send(ss, mesg, t);
- if (ret < 0)
- goto error;
- }
- if (t->rx_buf) {
- ret = spi_sh_receive(ss, mesg, t);
- if (ret < 0)
- goto error;
- }
- mesg->actual_length += t->len;
- }
- spin_lock_irqsave(&ss->lock, flags);
+ spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
- mesg->status = 0;
- if (mesg->complete)
- mesg->complete(mesg->context);
+ list_for_each_entry(t, &mesg->transfers, transfer_list) {
+ pr_debug("tx_buf = %p, rx_buf = %p\n",
+ t->tx_buf, t->rx_buf);
+ pr_debug("len = %d, delay.value = %d\n",
+ t->len, t->delay.value);
+
+ if (t->tx_buf) {
+ ret = spi_sh_send(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ if (t->rx_buf) {
+ ret = spi_sh_receive(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ mesg->actual_length += t->len;
}
+ mesg->status = 0;
+ spi_finalize_current_message(ctlr);
+
clear_fifo(ss);
spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
udelay(100);
@@ -321,12 +310,11 @@ static void spi_sh_work(struct work_struct *work)
clear_fifo(ss);
- spin_unlock_irqrestore(&ss->lock, flags);
-
- return;
+ return 0;
error:
mesg->status = ret;
+ spi_finalize_current_message(ctlr);
if (mesg->complete)
mesg->complete(mesg->context);
@@ -334,6 +322,7 @@ static void spi_sh_work(struct work_struct *work)
SPI_SH_CR1);
clear_fifo(ss);
+ return ret;
}
static int spi_sh_setup(struct spi_device *spi)
@@ -355,29 +344,6 @@ static int spi_sh_setup(struct spi_device *spi)
return 0;
}
-static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
-{
- struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- pr_debug("%s: enter\n", __func__);
- pr_debug("\tmode = %02x\n", spi->mode);
-
- spin_lock_irqsave(&ss->lock, flags);
-
- mesg->actual_length = 0;
- mesg->status = -EINPROGRESS;
-
- spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
-
- list_add_tail(&mesg->queue, &ss->queue);
- schedule_work(&ss->ws);
-
- spin_unlock_irqrestore(&ss->lock, flags);
-
- return 0;
-}
-
static void spi_sh_cleanup(struct spi_device *spi)
{
struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
@@ -416,7 +382,6 @@ static int spi_sh_remove(struct platform_device *pdev)
struct spi_sh_data *ss = platform_get_drvdata(pdev);
spi_unregister_master(ss->master);
- flush_work(&ss->ws);
free_irq(ss->irq, ss);
return 0;
@@ -467,9 +432,6 @@ static int spi_sh_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "ioremap error.\n");
return -ENOMEM;
}
- INIT_LIST_HEAD(&ss->queue);
- spin_lock_init(&ss->lock);
- INIT_WORK(&ss->ws, spi_sh_work);
init_waitqueue_head(&ss->wait);
ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
@@ -481,7 +443,7 @@ static int spi_sh_probe(struct platform_device *pdev)
master->num_chipselect = 2;
master->bus_num = pdev->id;
master->setup = spi_sh_setup;
- master->transfer = spi_sh_transfer;
+ master->transfer_one_message = spi_sh_transfer_one_message;
master->cleanup = spi_sh_cleanup;
ret = spi_register_master(master);
diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
index f7c1e20432e0..e29e85cee88a 100644
--- a/drivers/spi/spi-sifive.c
+++ b/drivers/spi/spi-sifive.c
@@ -427,6 +427,44 @@ static int sifive_spi_remove(struct platform_device *pdev)
return 0;
}
+static int sifive_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ /* Disable all the interrupts just in case */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+
+ clk_disable_unprepare(spi->clk);
+
+ return ret;
+}
+
+static int sifive_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(spi->clk);
+ if (ret)
+ return ret;
+ ret = spi_master_resume(master);
+ if (ret)
+ clk_disable_unprepare(spi->clk);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(sifive_spi_pm_ops,
+ sifive_spi_suspend, sifive_spi_resume);
+
+
static const struct of_device_id sifive_spi_of_match[] = {
{ .compatible = "sifive,spi0", },
{}
@@ -438,6 +476,7 @@ static struct platform_driver sifive_spi_driver = {
.remove = sifive_spi_remove,
.driver = {
.name = SIFIVE_SPI_DRIVER_NAME,
+ .pm = &sifive_spi_pm_ops,
.of_match_table = sifive_spi_of_match,
},
};
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index c0239e405c39..f3fe92300639 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -299,8 +299,7 @@ static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
STM32_BUSY_TIMEOUT_US);
}
-static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
- const struct spi_mem_op *op)
+static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi)
{
u32 cr, sr;
int err = 0;
@@ -331,8 +330,7 @@ out:
return err;
}
-static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi,
- const struct spi_mem_op *op)
+static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
{
u32 cr;
@@ -349,7 +347,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi,
return 0;
}
-static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth)
+static int stm32_qspi_get_mode(u8 buswidth)
{
if (buswidth == 4)
return CCR_BUSWIDTH_4;
@@ -382,11 +380,11 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
ccr = qspi->fmode;
ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode);
ccr |= FIELD_PREP(CCR_IMODE_MASK,
- stm32_qspi_get_mode(qspi, op->cmd.buswidth));
+ stm32_qspi_get_mode(op->cmd.buswidth));
if (op->addr.nbytes) {
ccr |= FIELD_PREP(CCR_ADMODE_MASK,
- stm32_qspi_get_mode(qspi, op->addr.buswidth));
+ stm32_qspi_get_mode(op->addr.buswidth));
ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
}
@@ -396,7 +394,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
if (op->data.nbytes) {
ccr |= FIELD_PREP(CCR_DMODE_MASK,
- stm32_qspi_get_mode(qspi, op->data.buswidth));
+ stm32_qspi_get_mode(op->data.buswidth));
}
writel_relaxed(ccr, qspi->io_base + QSPI_CCR);
@@ -405,7 +403,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
if (qspi->fmode == CCR_FMODE_APM)
- err_poll_status = stm32_qspi_wait_poll_status(qspi, op);
+ err_poll_status = stm32_qspi_wait_poll_status(qspi);
err = stm32_qspi_tx(qspi, op);
@@ -420,7 +418,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
goto abort;
/* wait end of tx in indirect mode */
- err = stm32_qspi_wait_cmd(qspi, op);
+ err = stm32_qspi_wait_cmd(qspi);
if (err)
goto abort;
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index ea706d9629cb..47cbe73137c2 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -783,6 +783,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev)
ret = synquacer_spi_enable(master);
if (ret) {
+ clk_disable_unprepare(sspi->clk);
dev_err(dev, "failed to enable spi (%d)\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 38360434d6e9..148043d0c2b8 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1136,7 +1136,7 @@ exit_free_master:
static int tegra_slink_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
spi_unregister_master(master);
@@ -1151,6 +1151,7 @@ static int tegra_slink_remove(struct platform_device *pdev)
if (tspi->rx_dma_chan)
tegra_slink_deinit_dma_param(tspi, true);
+ spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 66f647f32876..c89592b21ffc 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -37,6 +37,16 @@
#define QSPI_RX_EN BIT(12)
#define QSPI_CS_SW_VAL BIT(20)
#define QSPI_CS_SW_HW BIT(21)
+
+#define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
+#define QSPI_CS_POL_INACTIVE_MASK (0xF << 22)
+#define QSPI_CS_SEL_0 (0 << 26)
+#define QSPI_CS_SEL_1 (1 << 26)
+#define QSPI_CS_SEL_2 (2 << 26)
+#define QSPI_CS_SEL_3 (3 << 26)
+#define QSPI_CS_SEL_MASK (3 << 26)
+#define QSPI_CS_SEL(x) (((x) & 0x3) << 26)
+
#define QSPI_CONTROL_MODE_0 (0 << 28)
#define QSPI_CONTROL_MODE_3 (3 << 28)
#define QSPI_CONTROL_MODE_MASK (3 << 28)
@@ -154,6 +164,7 @@
struct tegra_qspi_soc_data {
bool has_dma;
bool cmb_xfer_capable;
+ unsigned int cs_count;
};
struct tegra_qspi_client_data {
@@ -812,6 +823,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran
tegra_qspi_mask_clear_irq(tqspi);
command1 = tqspi->def_command1_reg;
+ command1 |= QSPI_CS_SEL(spi->chip_select);
command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
command1 &= ~QSPI_CONTROL_MODE_MASK;
@@ -941,10 +953,11 @@ static int tegra_qspi_setup(struct spi_device *spi)
/* keep default cs state to inactive */
val = tqspi->def_command1_reg;
+ val |= QSPI_CS_SEL(spi->chip_select);
if (spi->mode & SPI_CS_HIGH)
- val &= ~QSPI_CS_SW_VAL;
+ val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select);
else
- val |= QSPI_CS_SW_VAL;
+ val |= QSPI_CS_POL_INACTIVE(spi->chip_select);
tqspi->def_command1_reg = val;
tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
@@ -1425,16 +1438,25 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
.has_dma = true,
.cmb_xfer_capable = false,
+ .cs_count = 1,
};
static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
.has_dma = true,
.cmb_xfer_capable = true,
+ .cs_count = 1,
};
static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
.has_dma = false,
.cmb_xfer_capable = true,
+ .cs_count = 1,
+};
+
+static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
+ .has_dma = false,
+ .cmb_xfer_capable = true,
+ .cs_count = 4,
};
static const struct of_device_id tegra_qspi_of_match[] = {
@@ -1450,6 +1472,9 @@ static const struct of_device_id tegra_qspi_of_match[] = {
}, {
.compatible = "nvidia,tegra234-qspi",
.data = &tegra234_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra241-qspi",
+ .data = &tegra241_qspi_soc_data,
},
{}
};
@@ -1467,6 +1492,9 @@ static const struct acpi_device_id tegra_qspi_acpi_match[] = {
}, {
.id = "NVDA1413",
.driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
+ }, {
+ .id = "NVDA1513",
+ .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
},
{}
};
@@ -1506,6 +1534,7 @@ static int tegra_qspi_probe(struct platform_device *pdev)
spin_lock_init(&tqspi->lock);
tqspi->soc_data = device_get_match_data(&pdev->dev);
+ master->num_chipselect = tqspi->soc_data->cs_count;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tqspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(tqspi->base))
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index b5b65d882d7a..60086869bcae 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -57,7 +57,6 @@ struct ti_qspi {
void *rx_bb_addr;
struct dma_chan *rx_chan;
- u32 spi_max_frequency;
u32 cmd;
u32 dc;
@@ -140,37 +139,19 @@ static inline void ti_qspi_write(struct ti_qspi *qspi,
static int ti_qspi_setup(struct spi_device *spi)
{
struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
- struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
- int clk_div = 0, ret;
- u32 clk_ctrl_reg, clk_rate, clk_mask;
+ int ret;
if (spi->master->busy) {
dev_dbg(qspi->dev, "master busy doing other transfers\n");
return -EBUSY;
}
- if (!qspi->spi_max_frequency) {
+ if (!qspi->master->max_speed_hz) {
dev_err(qspi->dev, "spi max frequency not defined\n");
return -EINVAL;
}
- clk_rate = clk_get_rate(qspi->fclk);
-
- clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1;
-
- if (clk_div < 0) {
- dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n");
- return -EINVAL;
- }
-
- if (clk_div > QSPI_CLK_DIV_MAX) {
- dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n",
- QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1);
- return -EINVAL;
- }
-
- dev_dbg(qspi->dev, "hz: %d, clock divider %d\n",
- qspi->spi_max_frequency, clk_div);
+ spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz);
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0) {
@@ -178,18 +159,6 @@ static int ti_qspi_setup(struct spi_device *spi)
return ret;
}
- clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
-
- clk_ctrl_reg &= ~QSPI_CLK_EN;
-
- /* disable SCLK */
- ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
-
- /* enable SCLK */
- clk_mask = QSPI_CLK_EN | clk_div;
- ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG);
- ctx_reg->clkctrl = clk_mask;
-
pm_runtime_mark_last_busy(qspi->dev);
ret = pm_runtime_put_autosuspend(qspi->dev);
if (ret < 0) {
@@ -200,6 +169,37 @@ static int ti_qspi_setup(struct spi_device *spi)
return 0;
}
+static void ti_qspi_setup_clk(struct ti_qspi *qspi, u32 speed_hz)
+{
+ struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
+ int clk_div;
+ u32 clk_ctrl_reg, clk_rate, clk_ctrl_new;
+
+ clk_rate = clk_get_rate(qspi->fclk);
+ clk_div = DIV_ROUND_UP(clk_rate, speed_hz) - 1;
+ clk_div = clamp(clk_div, 0, QSPI_CLK_DIV_MAX);
+ dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", speed_hz, clk_div);
+
+ pm_runtime_resume_and_get(qspi->dev);
+
+ clk_ctrl_new = QSPI_CLK_EN | clk_div;
+ if (ctx_reg->clkctrl != clk_ctrl_new) {
+ clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ clk_ctrl_reg &= ~QSPI_CLK_EN;
+
+ /* disable SCLK */
+ ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ /* enable SCLK */
+ ti_qspi_write(qspi, clk_ctrl_new, QSPI_SPI_CLOCK_CNTRL_REG);
+ ctx_reg->clkctrl = clk_ctrl_new;
+ }
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+}
+
static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
{
struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
@@ -623,8 +623,10 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
mutex_lock(&qspi->list_lock);
- if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select)
+ if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) {
+ ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz);
ti_qspi_enable_memory_map(mem->spi);
+ }
ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
op->addr.nbytes, op->dummy.nbytes);
@@ -701,6 +703,7 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
wlen = t->bits_per_word >> 3;
transfer_len_words = min(t->len / wlen, frame_len_words);
+ ti_qspi_setup_clk(qspi, t->speed_hz);
ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
if (ret) {
dev_dbg(qspi->dev, "transfer message failed\n");
@@ -851,7 +854,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
- qspi->spi_max_frequency = max_freq;
+ master->max_speed_hz = max_freq;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index dfaa1d79a78b..cbb60198a7f0 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -455,35 +455,10 @@ static void pch_spi_reset(struct spi_master *master)
static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
{
-
- struct spi_transfer *transfer;
struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
int retval;
unsigned long flags;
- spin_lock_irqsave(&data->lock, flags);
- /* validate Tx/Rx buffers and Transfer length */
- list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
- if (!transfer->tx_buf && !transfer->rx_buf) {
- dev_err(&pspi->dev,
- "%s Tx and Rx buffer NULL\n", __func__);
- retval = -EINVAL;
- goto err_return_spinlock;
- }
-
- if (!transfer->len) {
- dev_err(&pspi->dev, "%s Transfer length invalid\n",
- __func__);
- retval = -EINVAL;
- goto err_return_spinlock;
- }
-
- dev_dbg(&pspi->dev,
- "%s Tx/Rx buffer valid. Transfer length valid\n",
- __func__);
- }
- spin_unlock_irqrestore(&data->lock, flags);
-
/* We won't process any messages if we have been asked to terminate */
if (data->status == STATUS_EXITING) {
dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
@@ -518,10 +493,6 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
err_out:
dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
return retval;
-err_return_spinlock:
- dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
- spin_unlock_irqrestore(&data->lock, flags);
- return retval;
}
static inline void pch_spi_select_chip(struct pch_spi_data *data,
@@ -1365,6 +1336,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->max_speed_hz = PCH_MAX_BAUDRATE;
+ master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
data->board_dat = board_dat;
data->plat_dev = plat_dev;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 2b5afae8ff7f..c760aac070e5 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -134,6 +134,8 @@
#define GQSPI_DMA_UNALIGN 0x3
#define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */
+#define GQSPI_MAX_NUM_CS 2 /* Maximum number of chip selects */
+
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
@@ -363,8 +365,13 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
if (!is_high) {
- xqspi->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
- xqspi->genfifocs = GQSPI_GENFIFO_CS_LOWER;
+ if (!qspi->chip_select) {
+ xqspi->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
+ xqspi->genfifocs = GQSPI_GENFIFO_CS_LOWER;
+ } else {
+ xqspi->genfifobus = GQSPI_GENFIFO_BUS_UPPER;
+ xqspi->genfifocs = GQSPI_GENFIFO_CS_UPPER;
+ }
genfifoentry |= xqspi->genfifobus;
genfifoentry |= xqspi->genfifocs;
genfifoentry |= GQSPI_GENFIFO_CS_SETUP;
@@ -1099,6 +1106,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct zynqmp_qspi *xqspi;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ u32 num_cs;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
@@ -1176,8 +1184,19 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
if (ret)
goto clk_dis_all;
+ ret = of_property_read_u32(np, "num-cs", &num_cs);
+ if (ret < 0) {
+ ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ } else if (num_cs > GQSPI_MAX_NUM_CS) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "only %d chip selects are available\n",
+ GQSPI_MAX_NUM_CS);
+ goto clk_dis_all;
+ } else {
+ ctlr->num_chipselect = num_cs;
+ }
+
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
- ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
ctlr->mem_ops = &zynqmp_qspi_mem_ops;
ctlr->setup = zynqmp_qspi_setup_op;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ea09d1b42bf6..83da8862b8f2 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -33,6 +33,7 @@
#include <linux/idr.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/percpu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
@@ -49,6 +50,7 @@ static void spidev_release(struct device *dev)
spi_controller_put(spi->controller);
kfree(spi->driver_override);
+ free_percpu(spi->pcpu_statistics);
kfree(spi);
}
@@ -93,6 +95,47 @@ static ssize_t driver_override_show(struct device *dev,
}
static DEVICE_ATTR_RW(driver_override);
+static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
+{
+ struct spi_statistics __percpu *pcpu_stats;
+
+ if (dev)
+ pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
+ else
+ pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
+
+ if (pcpu_stats) {
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct spi_statistics *stat;
+
+ stat = per_cpu_ptr(pcpu_stats, cpu);
+ u64_stats_init(&stat->syncp);
+ }
+ }
+ return pcpu_stats;
+}
+
+#define spi_pcpu_stats_totalize(ret, in, field) \
+do { \
+ int i; \
+ ret = 0; \
+ for_each_possible_cpu(i) { \
+ const struct spi_statistics *pcpu_stats; \
+ u64 inc; \
+ unsigned int start; \
+ pcpu_stats = per_cpu_ptr(in, i); \
+ do { \
+ start = u64_stats_fetch_begin_irq( \
+ &pcpu_stats->syncp); \
+ inc = u64_stats_read(&pcpu_stats->field); \
+ } while (u64_stats_fetch_retry_irq( \
+ &pcpu_stats->syncp, start)); \
+ ret += inc; \
+ } \
+} while (0)
+
#define SPI_STATISTICS_ATTRS(field, file) \
static ssize_t spi_controller_##field##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -100,7 +143,7 @@ static ssize_t spi_controller_##field##_show(struct device *dev, \
{ \
struct spi_controller *ctlr = container_of(dev, \
struct spi_controller, dev); \
- return spi_statistics_##field##_show(&ctlr->statistics, buf); \
+ return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
} \
static struct device_attribute dev_attr_spi_controller_##field = { \
.attr = { .name = file, .mode = 0444 }, \
@@ -111,47 +154,46 @@ static ssize_t spi_device_##field##_show(struct device *dev, \
char *buf) \
{ \
struct spi_device *spi = to_spi_device(dev); \
- return spi_statistics_##field##_show(&spi->statistics, buf); \
+ return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
} \
static struct device_attribute dev_attr_spi_device_##field = { \
.attr = { .name = file, .mode = 0444 }, \
.show = spi_device_##field##_show, \
}
-#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
-static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
char *buf) \
{ \
- unsigned long flags; \
ssize_t len; \
- spin_lock_irqsave(&stat->lock, flags); \
- len = sysfs_emit(buf, format_string "\n", stat->field); \
- spin_unlock_irqrestore(&stat->lock, flags); \
+ u64 val; \
+ spi_pcpu_stats_totalize(val, stat, field); \
+ len = sysfs_emit(buf, "%llu\n", val); \
return len; \
} \
SPI_STATISTICS_ATTRS(name, file)
-#define SPI_STATISTICS_SHOW(field, format_string) \
+#define SPI_STATISTICS_SHOW(field) \
SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
- field, format_string)
+ field)
-SPI_STATISTICS_SHOW(messages, "%lu");
-SPI_STATISTICS_SHOW(transfers, "%lu");
-SPI_STATISTICS_SHOW(errors, "%lu");
-SPI_STATISTICS_SHOW(timedout, "%lu");
+SPI_STATISTICS_SHOW(messages);
+SPI_STATISTICS_SHOW(transfers);
+SPI_STATISTICS_SHOW(errors);
+SPI_STATISTICS_SHOW(timedout);
-SPI_STATISTICS_SHOW(spi_sync, "%lu");
-SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
-SPI_STATISTICS_SHOW(spi_async, "%lu");
+SPI_STATISTICS_SHOW(spi_sync);
+SPI_STATISTICS_SHOW(spi_sync_immediate);
+SPI_STATISTICS_SHOW(spi_async);
-SPI_STATISTICS_SHOW(bytes, "%llu");
-SPI_STATISTICS_SHOW(bytes_rx, "%llu");
-SPI_STATISTICS_SHOW(bytes_tx, "%llu");
+SPI_STATISTICS_SHOW(bytes);
+SPI_STATISTICS_SHOW(bytes_rx);
+SPI_STATISTICS_SHOW(bytes_tx);
#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
"transfer_bytes_histo_" number, \
- transfer_bytes_histo[index], "%lu")
+ transfer_bytes_histo[index])
SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
@@ -170,7 +212,7 @@ SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
-SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
+SPI_STATISTICS_SHOW(transfers_split_maxsize);
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
@@ -267,30 +309,33 @@ static const struct attribute_group *spi_master_groups[] = {
NULL,
};
-static void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
+static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
struct spi_transfer *xfer,
struct spi_controller *ctlr)
{
- unsigned long flags;
int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
+ struct spi_statistics *stats;
if (l2len < 0)
l2len = 0;
- spin_lock_irqsave(&stats->lock, flags);
+ get_cpu();
+ stats = this_cpu_ptr(pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
- stats->transfers++;
- stats->transfer_bytes_histo[l2len]++;
+ u64_stats_inc(&stats->transfers);
+ u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
- stats->bytes += xfer->len;
+ u64_stats_add(&stats->bytes, xfer->len);
if ((xfer->tx_buf) &&
(xfer->tx_buf != ctlr->dummy_tx))
- stats->bytes_tx += xfer->len;
+ u64_stats_add(&stats->bytes_tx, xfer->len);
if ((xfer->rx_buf) &&
(xfer->rx_buf != ctlr->dummy_rx))
- stats->bytes_rx += xfer->len;
+ u64_stats_add(&stats->bytes_rx, xfer->len);
- spin_unlock_irqrestore(&stats->lock, flags);
+ u64_stats_update_end(&stats->syncp);
+ put_cpu();
}
/*
@@ -519,14 +564,19 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
return NULL;
}
+ spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
+ if (!spi->pcpu_statistics) {
+ kfree(spi);
+ spi_controller_put(ctlr);
+ return NULL;
+ }
+
spi->master = spi->controller = ctlr;
spi->dev.parent = &ctlr->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->mode = ctlr->buswidth_override_bits;
- spin_lock_init(&spi->statistics.lock);
-
device_initialize(&spi->dev);
return spi;
}
@@ -1225,8 +1275,8 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer *xfer)
{
- struct spi_statistics *statm = &ctlr->statistics;
- struct spi_statistics *stats = &msg->spi->statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
@@ -1304,7 +1354,7 @@ int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
/* Nothing to do here */
break;
case SPI_DELAY_UNIT_SCK:
- /* clock cycles need to be obtained from spi_transfer */
+ /* Clock cycles need to be obtained from spi_transfer */
if (!xfer)
return -EINVAL;
/*
@@ -1353,7 +1403,7 @@ static void _spi_transfer_cs_change_delay(struct spi_message *msg,
u32 unit = xfer->cs_change_delay.unit;
int ret;
- /* return early on "fast" mode - for everything but USECS */
+ /* Return early on "fast" mode - for everything but USECS */
if (!delay) {
if (unit == SPI_DELAY_UNIT_USECS)
_spi_transfer_delay_ns(default_delay_ns);
@@ -1382,8 +1432,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- struct spi_statistics *statm = &ctlr->statistics;
- struct spi_statistics *stats = &msg->spi->statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
spi_set_cs(msg->spi, true, false);
@@ -1499,6 +1549,103 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr)
}
}
+static int __spi_pump_transfer_message(struct spi_controller *ctlr,
+ struct spi_message *msg, bool was_busy)
+{
+ struct spi_transfer *xfer;
+ int ret;
+
+ if (!was_busy && ctlr->auto_runtime_pm) {
+ ret = pm_runtime_get_sync(ctlr->dev.parent);
+ if (ret < 0) {
+ pm_runtime_put_noidle(ctlr->dev.parent);
+ dev_err(&ctlr->dev, "Failed to power device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!was_busy)
+ trace_spi_controller_busy(ctlr);
+
+ if (!was_busy && ctlr->prepare_transfer_hardware) {
+ ret = ctlr->prepare_transfer_hardware(ctlr);
+ if (ret) {
+ dev_err(&ctlr->dev,
+ "failed to prepare transfer hardware: %d\n",
+ ret);
+
+ if (ctlr->auto_runtime_pm)
+ pm_runtime_put(ctlr->dev.parent);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+
+ return ret;
+ }
+ }
+
+ trace_spi_message_start(msg);
+
+ if (ctlr->prepare_message) {
+ ret = ctlr->prepare_message(ctlr, msg);
+ if (ret) {
+ dev_err(&ctlr->dev, "failed to prepare message: %d\n",
+ ret);
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+ msg->prepared = true;
+ }
+
+ ret = spi_map_msg(ctlr, msg);
+ if (ret) {
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
+ /*
+ * Drivers implementation of transfer_one_message() must arrange for
+ * spi_finalize_current_message() to get called. Most drivers will do
+ * this in the calling context, but some don't. For those cases, a
+ * completion is used to guarantee that this function does not return
+ * until spi_finalize_current_message() is done accessing
+ * ctlr->cur_msg.
+ * Use of the following two flags enable to opportunistically skip the
+ * use of the completion since its use involves expensive spin locks.
+ * In case of a race with the context that calls
+ * spi_finalize_current_message() the completion will always be used,
+ * due to strict ordering of these flags using barriers.
+ */
+ WRITE_ONCE(ctlr->cur_msg_incomplete, true);
+ WRITE_ONCE(ctlr->cur_msg_need_completion, false);
+ reinit_completion(&ctlr->cur_msg_completion);
+ smp_wmb(); /* Make these available to spi_finalize_current_message() */
+
+ ret = ctlr->transfer_one_message(ctlr, msg);
+ if (ret) {
+ dev_err(&ctlr->dev,
+ "failed to transfer one message from queue\n");
+ return ret;
+ }
+
+ WRITE_ONCE(ctlr->cur_msg_need_completion, true);
+ smp_mb(); /* See spi_finalize_current_message()... */
+ if (READ_ONCE(ctlr->cur_msg_incomplete))
+ wait_for_completion(&ctlr->cur_msg_completion);
+
+ return 0;
+}
+
/**
* __spi_pump_messages - function which processes spi message queue
* @ctlr: controller to process queue for
@@ -1514,34 +1661,25 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr)
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
- struct spi_transfer *xfer;
struct spi_message *msg;
bool was_busy = false;
unsigned long flags;
int ret;
+ /* Take the IO mutex */
+ mutex_lock(&ctlr->io_mutex);
+
/* Lock queue */
spin_lock_irqsave(&ctlr->queue_lock, flags);
/* Make sure we are not already running a message */
- if (ctlr->cur_msg) {
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
- }
-
- /* If another context is idling the device then defer */
- if (ctlr->idling) {
- kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
- }
+ if (ctlr->cur_msg)
+ goto out_unlock;
/* Check if the queue is idle */
if (list_empty(&ctlr->queue) || !ctlr->running) {
- if (!ctlr->busy) {
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
- }
+ if (!ctlr->busy)
+ goto out_unlock;
/* Defer any non-atomic teardown to the thread */
if (!in_kthread) {
@@ -1549,17 +1687,16 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
!ctlr->unprepare_transfer_hardware) {
spi_idle_runtime_pm(ctlr);
ctlr->busy = false;
+ ctlr->queue_empty = true;
trace_spi_controller_idle(ctlr);
} else {
kthread_queue_work(ctlr->kworker,
&ctlr->pump_messages);
}
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
+ goto out_unlock;
}
ctlr->busy = false;
- ctlr->idling = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
kfree(ctlr->dummy_rx);
@@ -1574,9 +1711,8 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
trace_spi_controller_idle(ctlr);
spin_lock_irqsave(&ctlr->queue_lock, flags);
- ctlr->idling = false;
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
+ ctlr->queue_empty = true;
+ goto out_unlock;
}
/* Extract head of queue */
@@ -1590,81 +1726,23 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
ctlr->busy = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- mutex_lock(&ctlr->io_mutex);
-
- if (!was_busy && ctlr->auto_runtime_pm) {
- ret = pm_runtime_resume_and_get(ctlr->dev.parent);
- if (ret < 0) {
- dev_err(&ctlr->dev, "Failed to power device: %d\n",
- ret);
- mutex_unlock(&ctlr->io_mutex);
- return;
- }
- }
-
- if (!was_busy)
- trace_spi_controller_busy(ctlr);
-
- if (!was_busy && ctlr->prepare_transfer_hardware) {
- ret = ctlr->prepare_transfer_hardware(ctlr);
- if (ret) {
- dev_err(&ctlr->dev,
- "failed to prepare transfer hardware: %d\n",
- ret);
-
- if (ctlr->auto_runtime_pm)
- pm_runtime_put(ctlr->dev.parent);
-
- msg->status = ret;
- spi_finalize_current_message(ctlr);
-
- mutex_unlock(&ctlr->io_mutex);
- return;
- }
- }
-
- trace_spi_message_start(msg);
-
- if (ctlr->prepare_message) {
- ret = ctlr->prepare_message(ctlr, msg);
- if (ret) {
- dev_err(&ctlr->dev, "failed to prepare message: %d\n",
- ret);
- msg->status = ret;
- spi_finalize_current_message(ctlr);
- goto out;
- }
- ctlr->cur_msg_prepared = true;
- }
-
- ret = spi_map_msg(ctlr, msg);
- if (ret) {
- msg->status = ret;
- spi_finalize_current_message(ctlr);
- goto out;
- }
-
- if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- xfer->ptp_sts_word_pre = 0;
- ptp_read_system_prets(xfer->ptp_sts);
- }
- }
+ ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ if (!ret)
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
- ret = ctlr->transfer_one_message(ctlr, msg);
- if (ret) {
- dev_err(&ctlr->dev,
- "failed to transfer one message from queue: %d\n",
- ret);
- goto out;
- }
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
-out:
mutex_unlock(&ctlr->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
if (!ret)
cond_resched();
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ mutex_unlock(&ctlr->io_mutex);
}
/**
@@ -1789,6 +1867,7 @@ static int spi_init_queue(struct spi_controller *ctlr)
{
ctlr->running = false;
ctlr->busy = false;
+ ctlr->queue_empty = true;
ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
if (IS_ERR(ctlr->kworker)) {
@@ -1826,7 +1905,7 @@ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
struct spi_message *next;
unsigned long flags;
- /* get a pointer to the next message, if any */
+ /* Get a pointer to the next message, if any */
spin_lock_irqsave(&ctlr->queue_lock, flags);
next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
queue);
@@ -1847,12 +1926,9 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
{
struct spi_transfer *xfer;
struct spi_message *mesg;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&ctlr->queue_lock, flags);
mesg = ctlr->cur_msg;
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
@@ -1876,7 +1952,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
*/
spi_res_release(ctlr, mesg);
- if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
+ if (mesg->prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
@@ -1884,12 +1960,12 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
}
}
- spin_lock_irqsave(&ctlr->queue_lock, flags);
- ctlr->cur_msg = NULL;
- ctlr->cur_msg_prepared = false;
- ctlr->fallback = false;
- kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ mesg->prepared = false;
+
+ WRITE_ONCE(ctlr->cur_msg_incomplete, false);
+ smp_mb(); /* See __spi_pump_transfer_message()... */
+ if (READ_ONCE(ctlr->cur_msg_need_completion))
+ complete(&ctlr->cur_msg_completion);
trace_spi_message_done(mesg);
@@ -1992,6 +2068,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &ctlr->queue);
+ ctlr->queue_empty = false;
if (!ctlr->busy && need_pump)
kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
@@ -2376,9 +2453,6 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
if (lookup->index != -1 && lookup->n++ != lookup->index)
return 1;
- if (lookup->index == -1 && !ctlr)
- return -ENODEV;
-
status = acpi_get_handle(NULL,
sb->resource_source.string_ptr,
&parent_handle);
@@ -2398,7 +2472,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
ctlr = acpi_spi_find_controller_by_adev(adev);
if (!ctlr)
- return -ENODEV;
+ return -EPROBE_DEFER;
lookup->ctlr = ctlr;
}
@@ -2481,8 +2555,8 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
acpi_dev_free_resource_list(&resource_list);
if (ret < 0)
- /* found SPI in _CRS but it points to another controller */
- return ERR_PTR(-ENODEV);
+ /* Found SPI in _CRS but it points to another controller */
+ return ERR_PTR(ret);
if (!lookup.max_speed_hz &&
ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
@@ -2613,11 +2687,6 @@ int spi_slave_abort(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_slave_abort);
-static int match_true(struct device *dev, void *data)
-{
- return 1;
-}
-
static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -2625,7 +2694,7 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
dev);
struct device *child;
- child = device_find_child(&ctlr->dev, NULL, match_true);
+ child = device_find_any_child(&ctlr->dev);
return sprintf(buf, "%s\n",
child ? to_spi_device(child)->modalias : NULL);
}
@@ -2644,7 +2713,7 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
if (rc != 1 || !name[0])
return -EINVAL;
- child = device_find_child(&ctlr->dev, NULL, match_true);
+ child = device_find_any_child(&ctlr->dev);
if (child) {
/* Remove registered slave */
device_unregister(child);
@@ -2937,7 +3006,7 @@ int spi_register_controller(struct spi_controller *ctlr)
return status;
if (ctlr->bus_num >= 0) {
- /* devices with a fixed bus num must check-in with the num */
+ /* Devices with a fixed bus num must check-in with the num */
mutex_lock(&board_lock);
id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
ctlr->bus_num + 1, GFP_KERNEL);
@@ -2946,7 +3015,7 @@ int spi_register_controller(struct spi_controller *ctlr)
return id == -ENOSPC ? -EBUSY : id;
ctlr->bus_num = id;
} else if (ctlr->dev.of_node) {
- /* allocate dynamic bus number using Linux idr */
+ /* Allocate dynamic bus number using Linux idr */
id = of_alias_get_id(ctlr->dev.of_node, "spi");
if (id >= 0) {
ctlr->bus_num = id;
@@ -2975,6 +3044,7 @@ int spi_register_controller(struct spi_controller *ctlr)
}
ctlr->bus_lock_flag = 0;
init_completion(&ctlr->xfer_completion);
+ init_completion(&ctlr->cur_msg_completion);
if (!ctlr->max_dma_len)
ctlr->max_dma_len = INT_MAX;
@@ -3004,7 +3074,7 @@ int spi_register_controller(struct spi_controller *ctlr)
goto free_bus_id;
}
- /* setting last_cs to -1 means no chip selected */
+ /* Setting last_cs to -1 means no chip selected */
ctlr->last_cs = -1;
status = device_add(&ctlr->dev);
@@ -3028,8 +3098,13 @@ int spi_register_controller(struct spi_controller *ctlr)
goto free_bus_id;
}
}
- /* add statistics */
- spin_lock_init(&ctlr->statistics.lock);
+ /* Add statistics */
+ ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
+ if (!ctlr->pcpu_statistics) {
+ dev_err(dev, "Error allocating per-cpu statistics\n");
+ status = -ENOMEM;
+ goto destroy_queue;
+ }
mutex_lock(&board_lock);
list_add_tail(&ctlr->list, &spi_controller_list);
@@ -3042,6 +3117,8 @@ int spi_register_controller(struct spi_controller *ctlr)
acpi_register_spi_devices(ctlr);
return status;
+destroy_queue:
+ spi_destroy_queue(ctlr);
free_bus_id:
mutex_lock(&board_lock);
idr_remove(&spi_master_idr, ctlr->bus_num);
@@ -3050,9 +3127,9 @@ free_bus_id:
}
EXPORT_SYMBOL_GPL(spi_register_controller);
-static void devm_spi_unregister(void *ctlr)
+static void devm_spi_unregister(struct device *dev, void *res)
{
- spi_unregister_controller(ctlr);
+ spi_unregister_controller(*(struct spi_controller **)res);
}
/**
@@ -3071,13 +3148,22 @@ static void devm_spi_unregister(void *ctlr)
int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr)
{
+ struct spi_controller **ptr;
int ret;
+ ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
ret = spi_register_controller(ctlr);
- if (ret)
- return ret;
+ if (!ret) {
+ *ptr = ctlr;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
- return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr);
+ return ret;
}
EXPORT_SYMBOL_GPL(devm_spi_register_controller);
@@ -3124,7 +3210,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
device_del(&ctlr->dev);
- /* free bus id */
+ /* Free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
idr_remove(&spi_master_idr, id);
@@ -3183,14 +3269,14 @@ static void __spi_replace_transfers_release(struct spi_controller *ctlr,
struct spi_replaced_transfers *rxfer = res;
size_t i;
- /* call extra callback if requested */
+ /* Call extra callback if requested */
if (rxfer->release)
rxfer->release(ctlr, msg, res);
- /* insert replaced transfers back into the message */
+ /* Insert replaced transfers back into the message */
list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
- /* remove the formerly inserted entries */
+ /* Remove the formerly inserted entries */
for (i = 0; i < rxfer->inserted; i++)
list_del(&rxfer->inserted_transfers[i].transfer_list);
}
@@ -3223,7 +3309,7 @@ static struct spi_replaced_transfers *spi_replace_transfers(
struct spi_transfer *xfer;
size_t i;
- /* allocate the structure using spi_res */
+ /* Allocate the structure using spi_res */
rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
struct_size(rxfer, inserted_transfers, insert)
+ extradatasize,
@@ -3231,15 +3317,15 @@ static struct spi_replaced_transfers *spi_replace_transfers(
if (!rxfer)
return ERR_PTR(-ENOMEM);
- /* the release code to invoke before running the generic release */
+ /* The release code to invoke before running the generic release */
rxfer->release = release;
- /* assign extradata */
+ /* Assign extradata */
if (extradatasize)
rxfer->extradata =
&rxfer->inserted_transfers[insert];
- /* init the replaced_transfers list */
+ /* Init the replaced_transfers list */
INIT_LIST_HEAD(&rxfer->replaced_transfers);
/*
@@ -3248,7 +3334,7 @@ static struct spi_replaced_transfers *spi_replace_transfers(
*/
rxfer->replaced_after = xfer_first->transfer_list.prev;
- /* remove the requested number of transfers */
+ /* Remove the requested number of transfers */
for (i = 0; i < remove; i++) {
/*
* If the entry after replaced_after it is msg->transfers
@@ -3258,14 +3344,14 @@ static struct spi_replaced_transfers *spi_replace_transfers(
if (rxfer->replaced_after->next == &msg->transfers) {
dev_err(&msg->spi->dev,
"requested to remove more spi_transfers than are available\n");
- /* insert replaced transfers back into the message */
+ /* Insert replaced transfers back into the message */
list_splice(&rxfer->replaced_transfers,
rxfer->replaced_after);
- /* free the spi_replace_transfer structure */
+ /* Free the spi_replace_transfer structure... */
spi_res_free(rxfer);
- /* and return with an error */
+ /* ...and return with an error */
return ERR_PTR(-EINVAL);
}
@@ -3282,26 +3368,26 @@ static struct spi_replaced_transfers *spi_replace_transfers(
* based on the first transfer to get removed.
*/
for (i = 0; i < insert; i++) {
- /* we need to run in reverse order */
+ /* We need to run in reverse order */
xfer = &rxfer->inserted_transfers[insert - 1 - i];
- /* copy all spi_transfer data */
+ /* Copy all spi_transfer data */
memcpy(xfer, xfer_first, sizeof(*xfer));
- /* add to list */
+ /* Add to list */
list_add(&xfer->transfer_list, rxfer->replaced_after);
- /* clear cs_change and delay for all but the last */
+ /* Clear cs_change and delay for all but the last */
if (i) {
xfer->cs_change = false;
xfer->delay.value = 0;
}
}
- /* set up inserted */
+ /* Set up inserted... */
rxfer->inserted = insert;
- /* and register it with spi_res/spi_message */
+ /* ...and register it with spi_res/spi_message */
spi_res_add(msg, rxfer);
return rxfer;
@@ -3318,10 +3404,10 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
size_t offset;
size_t count, i;
- /* calculate how many we have to replace */
+ /* Calculate how many we have to replace */
count = DIV_ROUND_UP(xfer->len, maxsize);
- /* create replacement */
+ /* Create replacement */
srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
if (IS_ERR(srt))
return PTR_ERR(srt);
@@ -3344,9 +3430,9 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
*/
xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
- /* all the others need rx_buf/tx_buf also set */
+ /* All the others need rx_buf/tx_buf also set */
for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
- /* update rx_buf, tx_buf and dma */
+ /* Update rx_buf, tx_buf and dma */
if (xfers[i].rx_buf)
xfers[i].rx_buf += offset;
if (xfers[i].rx_dma)
@@ -3356,7 +3442,7 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
if (xfers[i].tx_dma)
xfers[i].tx_dma += offset;
- /* update length */
+ /* Update length */
xfers[i].len = min(maxsize, xfers[i].len - offset);
}
@@ -3366,10 +3452,10 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
*/
*xferp = &xfers[count - 1];
- /* increment statistics counters */
- SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
+ /* Increment statistics counters */
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
transfers_split_maxsize);
- SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
+ SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
transfers_split_maxsize);
return 0;
@@ -3628,7 +3714,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return ret;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
- /* don't change cs_change on the last entry in the list */
+ /* Don't change cs_change on the last entry in the list */
if (list_is_last(&xfer->transfer_list, &message->transfers))
break;
xfer->cs_change = 1;
@@ -3721,7 +3807,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
!(spi->mode & SPI_TX_QUAD))
return -EINVAL;
}
- /* check transfer rx_nbits */
+ /* Check transfer rx_nbits */
if (xfer->rx_buf) {
if (spi->mode & SPI_NO_RX)
return -EINVAL;
@@ -3760,8 +3846,8 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
message->spi = spi;
- SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
- SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
+ SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
trace_spi_message_submit(message);
@@ -3880,6 +3966,39 @@ static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
}
+static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
+{
+ bool was_busy;
+ int ret;
+
+ mutex_lock(&ctlr->io_mutex);
+
+ was_busy = ctlr->busy;
+
+ ctlr->cur_msg = msg;
+ ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ if (ret)
+ goto out;
+
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
+ if (!was_busy) {
+ kfree(ctlr->dummy_rx);
+ ctlr->dummy_rx = NULL;
+ kfree(ctlr->dummy_tx);
+ ctlr->dummy_tx = NULL;
+ if (ctlr->unprepare_transfer_hardware &&
+ ctlr->unprepare_transfer_hardware(ctlr))
+ dev_err(&ctlr->dev,
+ "failed to unprepare transfer hardware\n");
+ spi_idle_runtime_pm(ctlr);
+ }
+
+out:
+ mutex_unlock(&ctlr->io_mutex);
+}
+
/*-------------------------------------------------------------------------*/
/*
@@ -3898,51 +4017,51 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
DECLARE_COMPLETION_ONSTACK(done);
int status;
struct spi_controller *ctlr = spi->controller;
- unsigned long flags;
status = __spi_validate(spi, message);
if (status != 0)
return status;
- message->complete = spi_complete;
- message->context = &done;
message->spi = spi;
- SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
- SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
+ SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
/*
- * If we're not using the legacy transfer method then we will
- * try to transfer in the calling context so special case.
- * This code would be less tricky if we could remove the
- * support for driver implemented message queues.
+ * Checking queue_empty here only guarantees async/sync message
+ * ordering when coming from the same context. It does not need to
+ * guard against reentrancy from a different context. The io_mutex
+ * will catch those cases.
*/
- if (ctlr->transfer == spi_queued_transfer) {
- spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+ if (READ_ONCE(ctlr->queue_empty)) {
+ message->actual_length = 0;
+ message->status = -EINPROGRESS;
trace_spi_message_submit(message);
- status = __spi_queued_transfer(spi, message, false);
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
+ SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
- spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
- } else {
- status = spi_async_locked(spi, message);
+ __spi_transfer_message_noqueue(ctlr, message);
+
+ return message->status;
}
+ /*
+ * There are messages in the async queue that could have originated
+ * from the same context, so we need to preserve ordering.
+ * Therefor we send the message to the async queue and wait until they
+ * are completed.
+ */
+ message->complete = spi_complete;
+ message->context = &done;
+ status = spi_async_locked(spi, message);
if (status == 0) {
- /* Push out the messages in the calling context if we can */
- if (ctlr->transfer == spi_queued_transfer) {
- SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
- spi_sync_immediate);
- SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
- spi_sync_immediate);
- __spi_pump_messages(ctlr, false);
- }
-
wait_for_completion(&done);
status = message->status;
}
message->context = NULL;
+
return status;
}
@@ -4026,7 +4145,7 @@ int spi_bus_lock(struct spi_controller *ctlr)
ctlr->bus_lock_flag = 1;
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
- /* mutex remains locked until spi_bus_unlock is called */
+ /* Mutex remains locked until spi_bus_unlock() is called */
return 0;
}
@@ -4055,7 +4174,7 @@ int spi_bus_unlock(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_bus_unlock);
-/* portable code must never pass more than 32 bytes */
+/* Portable code must never pass more than 32 bytes */
#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
static u8 *buf;
@@ -4121,7 +4240,7 @@ int spi_write_then_read(struct spi_device *spi,
x[0].tx_buf = local_buf;
x[1].rx_buf = local_buf + n_tx;
- /* do the i/o */
+ /* Do the i/o */
status = spi_sync(spi, &message);
if (status == 0)
memcpy(rxbuf, x[1].rx_buf, n_rx);
@@ -4138,7 +4257,7 @@ EXPORT_SYMBOL_GPL(spi_write_then_read);
/*-------------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
-/* must call put_device() when done with returned spi_device device */
+/* Must call put_device() when done with returned spi_device device */
static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
{
struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
@@ -4146,7 +4265,7 @@ static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
return dev ? to_spi_device(dev) : NULL;
}
-/* the spi controllers are not using spi_bus, so we find it with another way */
+/* The spi controllers are not using spi_bus, so we find it with another way */
static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
{
struct device *dev;
@@ -4157,7 +4276,7 @@ static struct spi_controller *of_find_spi_controller_by_node(struct device_node
if (!dev)
return NULL;
- /* reference got in class_find_device */
+ /* Reference got in class_find_device */
return container_of(dev, struct spi_controller, dev);
}
@@ -4172,7 +4291,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
case OF_RECONFIG_CHANGE_ADD:
ctlr = of_find_spi_controller_by_node(rd->dn->parent);
if (ctlr == NULL)
- return NOTIFY_OK; /* not for us */
+ return NOTIFY_OK; /* Not for us */
if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
put_device(&ctlr->dev);
@@ -4191,19 +4310,19 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
break;
case OF_RECONFIG_CHANGE_REMOVE:
- /* already depopulated? */
+ /* Already depopulated? */
if (!of_node_check_flag(rd->dn, OF_POPULATED))
return NOTIFY_OK;
- /* find our device by node */
+ /* Find our device by node */
spi = of_find_spi_device_by_node(rd->dn);
if (spi == NULL)
- return NOTIFY_OK; /* no? not meant for us */
+ return NOTIFY_OK; /* No? not meant for us */
- /* unregister takes one ref away */
+ /* Unregister takes one ref away */
spi_unregister_device(spi);
- /* and put the reference of the find */
+ /* And put the reference of the find */
put_device(&spi->dev);
break;
}
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index b37ead9e2fad..a456ce5141e1 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -387,6 +387,23 @@ static struct bus_type spmi_bus_type = {
};
/**
+ * spmi_device_from_of() - get the associated SPMI device from a device node
+ *
+ * @np: device node
+ *
+ * Returns the struct spmi_device associated with a device node or NULL.
+ */
+struct spmi_device *spmi_device_from_of(struct device_node *np)
+{
+ struct device *dev = bus_find_device_by_of_node(&spmi_bus_type, np);
+
+ if (dev)
+ return to_spmi_device(dev);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(spmi_device_from_of);
+
+/**
* spmi_controller_alloc() - Allocate a new SPMI device
* @ctrl: associated controller
*
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 0a993c47273e..3bd80f9695ac 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -42,8 +42,6 @@ source "drivers/staging/rts5208/Kconfig"
source "drivers/staging/octeon/Kconfig"
-source "drivers/staging/octeon-usb/Kconfig"
-
source "drivers/staging/vt6655/Kconfig"
source "drivers/staging/vt6656/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2800ab9b2d1d..1d9ae39fea14 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += r8188eu/
obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
-obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme_user/
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index 6fd549a424d5..b8d55aa8c5c7 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -196,8 +196,7 @@ static int update_onboard_backlight(struct backlight_device *bd)
"%s: power=%d, fb_blank=%d\n",
__func__, bd->props.power, bd->props.fb_blank);
- on = (bd->props.power == FB_BLANK_UNBLANK) &&
- (bd->props.fb_blank == FB_BLANK_UNBLANK);
+ on = !backlight_is_blank(bd);
/* Onboard backlight connected to GPIO0 on SSD1351, GPIO1 unused */
write_reg(par, 0xB5, on ? 0x03 : 0x02);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 60b2278d8b16..afaba94d1d1c 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -137,8 +137,7 @@ static int fbtft_backlight_update_status(struct backlight_device *bd)
"%s: polarity=%d, power=%d, fb_blank=%d\n",
__func__, polarity, bd->props.power, bd->props.fb_blank);
- if ((bd->props.power == FB_BLANK_UNBLANK) &&
- (bd->props.fb_blank == FB_BLANK_UNBLANK))
+ if (!backlight_is_blank(bd))
gpiod_set_value(par->gpio.led[0], polarity);
else
gpiod_set_value(par->gpio.led[0], !polarity);
@@ -655,7 +654,6 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
fbdefio->delay = HZ / fps;
fbdefio->sort_pagereflist = true;
fbdefio->deferred_io = fbtft_deferred_io;
- fb_deferred_io_init(info);
snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name);
info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -666,6 +664,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
info->fix.line_length = width * bpp / 8;
info->fix.accel = FB_ACCEL_NONE;
info->fix.smem_len = vmem_size;
+ fb_deferred_io_init(info);
info->var.rotate = pdata->rotate;
info->var.xres = width;
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index 04df6f9f5403..cc6d80554c98 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -17,12 +17,6 @@
#define GDM_TTY_MAJOR 0
#define GDM_TTY_MINOR 32
-#define ACM_CTRL_DTR 0x01
-#define ACM_CTRL_RTS 0x02
-#define ACM_CTRL_DSR 0x02
-#define ACM_CTRL_RI 0x08
-#define ACM_CTRL_DCD 0x01
-
#define WRITE_SIZE 2048
#define MUX_TX_MAX_SIZE 2048
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
index 843760675876..05e91e6bc2a0 100644
--- a/drivers/staging/greybus/audio_helper.c
+++ b/drivers/staging/greybus/audio_helper.c
@@ -115,7 +115,7 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm,
int num)
{
int i;
- struct snd_soc_dapm_widget *w, *next_w;
+ struct snd_soc_dapm_widget *w, *tmp_w;
#ifdef CONFIG_DEBUG_FS
struct dentry *parent = dapm->debugfs_dapm;
struct dentry *debugfs_w = NULL;
@@ -124,13 +124,13 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm,
mutex_lock(&dapm->card->dapm_mutex);
for (i = 0; i < num; i++) {
/* below logic can be optimized to identify widget pointer */
- list_for_each_entry_safe(w, next_w, &dapm->card->widgets,
- list) {
- if (w->dapm != dapm)
- continue;
- if (!strcmp(w->name, widget->name))
+ w = NULL;
+ list_for_each_entry(tmp_w, &dapm->card->widgets, list) {
+ if (tmp_w->dapm == dapm &&
+ !strcmp(tmp_w->name, widget->name)) {
+ w = tmp_w;
break;
- w = NULL;
+ }
}
if (!w) {
dev_err(dapm->dev, "%s: widget not found\n",
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
index 687c6405c65b..3342b84597da 100644
--- a/drivers/staging/greybus/fw-management.c
+++ b/drivers/staging/greybus/fw-management.c
@@ -102,7 +102,7 @@ unlock:
}
static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
- struct fw_mgmt_ioc_get_intf_version *fw_info)
+ struct fw_mgmt_ioc_get_intf_version *fw_info)
{
struct gb_connection *connection = fw_mgmt->connection;
struct gb_fw_mgmt_interface_fw_version_response response;
@@ -240,7 +240,7 @@ static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
}
static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
- struct fw_mgmt_ioc_get_backend_version *fw_info)
+ struct fw_mgmt_ioc_get_backend_version *fw_info)
{
struct gb_connection *connection = fw_mgmt->connection;
struct gb_fw_mgmt_backend_fw_version_request request;
@@ -473,7 +473,7 @@ static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd,
return -EFAULT;
ret = fw_mgmt_backend_fw_update_operation(fw_mgmt,
- backend_update.firmware_tag);
+ backend_update.firmware_tag);
if (ret)
return ret;
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index 2471448ba42a..1a61fce98056 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -870,7 +870,7 @@ static int gb_loopback_fn(void *data)
if (gb->send_count == gb->iteration_max) {
mutex_unlock(&gb->mutex);
- /* Wait for synchronous and asynchronus completion */
+ /* Wait for synchronous and asynchronous completion */
gb_loopback_async_wait_all(gb);
/* Mark complete unless user-space has poked us */
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 1fd6a0c6e1d8..421ce9dbf44c 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -22,10 +22,14 @@ if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
source "drivers/staging/media/atomisp/Kconfig"
+source "drivers/staging/media/av7110/Kconfig"
+
source "drivers/staging/media/hantro/Kconfig"
source "drivers/staging/media/imx/Kconfig"
+source "drivers/staging/media/ipu3/Kconfig"
+
source "drivers/staging/media/max96712/Kconfig"
source "drivers/staging/media/meson/vdec/Kconfig"
@@ -34,14 +38,12 @@ source "drivers/staging/media/omap4iss/Kconfig"
source "drivers/staging/media/rkvdec/Kconfig"
-source "drivers/staging/media/sunxi/Kconfig"
+source "drivers/staging/media/stkwebcam/Kconfig"
-source "drivers/staging/media/zoran/Kconfig"
+source "drivers/staging/media/sunxi/Kconfig"
source "drivers/staging/media/tegra-video/Kconfig"
-source "drivers/staging/media/ipu3/Kconfig"
-
-source "drivers/staging/media/av7110/Kconfig"
+source "drivers/staging/media/zoran/Kconfig"
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 66d6f6d51c86..950e96f10aad 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_VIDEO_MAX96712) += max96712/
obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rkvdec/
+obj-$(CONFIG_VIDEO_STKWEBCAM) += stkwebcam/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/
obj-$(CONFIG_VIDEO_HANTRO) += hantro/
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
index 2485d7b3fee2..fb7b406f50bf 100644
--- a/drivers/staging/media/atomisp/Makefile
+++ b/drivers/staging/media/atomisp/Makefile
@@ -13,7 +13,6 @@ atomisp = $(srctree)/drivers/staging/media/atomisp/
# SPDX-License-Identifier: GPL-2.0
atomisp-objs += \
- pci/atomisp_acc.o \
pci/atomisp_cmd.o \
pci/atomisp_compat_css20.o \
pci/atomisp_csi2.o \
@@ -45,9 +44,7 @@ atomisp-objs += \
pci/camera/pipe/src/pipe_util.o \
pci/camera/util/src/util.o \
pci/hmm/hmm_bo.o \
- pci/hmm/hmm_dynamic_pool.o \
pci/hmm/hmm.o \
- pci/hmm/hmm_reserved_pool.o \
pci/ia_css_device_access.o \
pci/ia_css_isp_configs.o \
pci/ia_css_isp_states.o \
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 00d6842c07d6..3c81ab73cdae 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -616,13 +616,15 @@ static int mt9m114_get_intg_factor(struct i2c_client *client,
struct camera_mipi_info *info,
const struct mt9m114_res_struct *res)
{
- struct atomisp_sensor_mode_data *buf = &info->data;
+ struct atomisp_sensor_mode_data *buf;
u32 reg_val;
int ret;
if (!info)
return -EINVAL;
+ buf = &info->data;
+
ret = mt9m114_read_reg(client, MISENSOR_32BIT,
REG_PIXEL_CLK, &reg_val);
if (ret)
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index da98094d7094..d5d099ac1b70 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -906,22 +906,17 @@ static int ov2722_get_fmt(struct v4l2_subdev *sd,
static int ov2722_detect(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
- u16 high, low;
- int ret;
+ u16 high = 0, low = 0;
u16 id;
u8 revision;
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return -ENODEV;
- ret = ov2722_read_reg(client, OV2722_8BIT,
- OV2722_SC_CMMN_CHIP_ID_H, &high);
- if (ret) {
- dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
- return -ENODEV;
- }
- ret = ov2722_read_reg(client, OV2722_8BIT,
- OV2722_SC_CMMN_CHIP_ID_L, &low);
+ ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_CHIP_ID_H, &high);
+ ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_CHIP_ID_L, &low);
id = (high << 8) | low;
if ((id != OV2722_ID) && (id != OV2720_ID)) {
@@ -929,8 +924,9 @@ static int ov2722_detect(struct i2c_client *client)
return -ENODEV;
}
- ret = ov2722_read_reg(client, OV2722_8BIT,
- OV2722_SC_CMMN_SUB_ID, &high);
+ high = 0;
+ ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_SUB_ID, &high);
revision = (u8)high & 0x0f;
dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision);
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
index 79df07bd69b6..a1366666f49c 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
@@ -855,7 +855,7 @@ static struct ov5693_reg const ov5693_1616x1216_30fps[] = {
{OV5693_8BIT, 0x3813, 0x06}, /*{3812,3813} windowing Y offset*/
{OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
{OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
- {OV5693_8BIT, 0x3820, 0x00}, /*FLIP/Binnning control*/
+ {OV5693_8BIT, 0x3820, 0x00}, /*FLIP/Binning control*/
{OV5693_8BIT, 0x3821, 0x1e}, /*MIRROR control*/
{OV5693_8BIT, 0x5002, 0x00},
{OV5693_8BIT, 0x5041, 0x84},
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm.h b/drivers/staging/media/atomisp/include/hmm/hmm.h
index b48bdf5c274c..c0384bb0a762 100644
--- a/drivers/staging/media/atomisp/include/hmm/hmm.h
+++ b/drivers/staging/media/atomisp/include/hmm/hmm.h
@@ -26,21 +26,18 @@
#include <linux/slab.h>
#include <linux/mm.h>
-#include "hmm/hmm_pool.h"
+#include "hmm_common.h"
+#include "hmm/hmm_bo.h"
#include "ia_css_types.h"
#define mmgr_NULL ((ia_css_ptr)0)
#define mmgr_EXCEPTION ((ia_css_ptr) - 1)
-int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type);
-void hmm_pool_unregister(enum hmm_pool_type pool_type);
-
int hmm_init(void);
void hmm_cleanup(void);
-ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
- int from_highmem, const void __user *userptr,
- const uint16_t attrs);
+ia_css_ptr hmm_alloc(size_t bytes);
+ia_css_ptr hmm_create_from_userdata(size_t bytes, const void __user *userptr);
void hmm_free(ia_css_ptr ptr);
int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes);
int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes);
@@ -69,17 +66,6 @@ void hmm_vunmap(ia_css_ptr virt);
void hmm_flush_vmap(ia_css_ptr virt);
/*
- * Address translation from ISP shared memory address to kernel virtual address
- * if the memory is not vmmaped, then do it.
- */
-void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached);
-
-/*
- * Address translation from kernel virtual address to ISP shared memory address
- */
-ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr);
-
-/*
* map ISP memory starts with virt to specific vma.
*
* used for mmap operation.
@@ -89,16 +75,6 @@ ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr);
*/
int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt);
-/* show memory statistic
- */
-void hmm_show_mem_stat(const char *func, const int line);
-
-/* init memory statistic
- */
-void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr);
-
-extern bool dypool_enable;
-extern unsigned int dypool_pgnr;
extern struct hmm_bo_device bo_device;
#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h
index 8c78a5d87b65..385e22fc4a46 100644
--- a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h
+++ b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h
@@ -76,17 +76,10 @@
enum hmm_bo_type {
HMM_BO_PRIVATE,
- HMM_BO_SHARE,
HMM_BO_USER,
HMM_BO_LAST,
};
-enum hmm_page_type {
- HMM_PAGE_TYPE_RESERVED,
- HMM_PAGE_TYPE_DYNAMIC,
- HMM_PAGE_TYPE_GENERAL,
-};
-
#define HMM_BO_MASK 0x1
#define HMM_BO_FREE 0x0
#define HMM_BO_ALLOCED 0x1
@@ -121,11 +114,6 @@ struct hmm_bo_device {
struct kmem_cache *bo_cache;
};
-struct hmm_page_object {
- struct page *page;
- enum hmm_page_type type;
-};
-
struct hmm_buffer_object {
struct hmm_bo_device *bdev;
struct list_head list;
@@ -136,8 +124,6 @@ struct hmm_buffer_object {
/* mutex protecting this BO */
struct mutex mutex;
enum hmm_bo_type type;
- struct hmm_page_object *page_obj; /* physical pages */
- int from_highmem;
int mmap_count;
int status;
int mem_type;
@@ -218,34 +204,20 @@ void hmm_bo_ref(struct hmm_buffer_object *bo);
*/
void hmm_bo_unref(struct hmm_buffer_object *bo);
-/*
- * allocate/free physical pages for the bo. will try to alloc mem
- * from highmem if from_highmem is set, and type indicate that the
- * pages will be allocated by using video driver (for share buffer)
- * or by ISP driver itself.
- */
-
int hmm_bo_allocated(struct hmm_buffer_object *bo);
/*
- * allocate/free physical pages for the bo. will try to alloc mem
- * from highmem if from_highmem is set, and type indicate that the
+ * Allocate/Free physical pages for the bo. Type indicates if the
* pages will be allocated by using video driver (for share buffer)
* or by ISP driver itself.
*/
int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
- enum hmm_bo_type type, int from_highmem,
- const void __user *userptr, bool cached);
+ enum hmm_bo_type type,
+ const void __user *userptr);
void hmm_bo_free_pages(struct hmm_buffer_object *bo);
int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
/*
- * get physical page info of the bo.
- */
-int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
- struct hmm_page_object **page_obj, int *pgnr);
-
-/*
* bind/unbind the physical pages to a virtual address space.
*/
int hmm_bo_bind(struct hmm_buffer_object *bo);
@@ -280,9 +252,6 @@ void hmm_bo_vunmap(struct hmm_buffer_object *bo);
int hmm_bo_mmap(struct vm_area_struct *vma,
struct hmm_buffer_object *bo);
-extern struct hmm_pool dynamic_pool;
-extern struct hmm_pool reserved_pool;
-
/*
* find the buffer object by its virtual address vaddr.
* return NULL if no such buffer object found.
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_common.h b/drivers/staging/media/atomisp/include/hmm/hmm_common.h
index 7152e9b52ba4..d8610b135de0 100644
--- a/drivers/staging/media/atomisp/include/hmm/hmm_common.h
+++ b/drivers/staging/media/atomisp/include/hmm/hmm_common.h
@@ -68,30 +68,4 @@
#define check_null_return_void(ptr, fmt, arg ...) \
var_equal_return_void(ptr, NULL, fmt, ## arg)
-/* hmm_mem_stat is used to trace the hmm mem used by ISP pipe. The unit is page
- * number.
- *
- * res_size: reserved mem pool size, being allocated from system at system boot time.
- * res_size >= res_cnt.
- * sys_size: system mem pool size, being allocated from system at camera running time.
- * dyc_size: dynamic mem pool size.
- * dyc_thr: dynamic mem pool high watermark.
- * dyc_size <= dyc_thr.
- * usr_size: user ptr mem size.
- *
- * res_cnt: track the mem allocated from reserved pool at camera running time.
- * tol_cnt: track the total mem used by ISP pipe at camera running time.
- */
-struct _hmm_mem_stat {
- int res_size;
- int sys_size;
- int dyc_size;
- int dyc_thr;
- int usr_size;
- int res_cnt;
- int tol_cnt;
-};
-
-extern struct _hmm_mem_stat hmm_mem_stat;
-
#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_pool.h b/drivers/staging/media/atomisp/include/hmm/hmm_pool.h
deleted file mode 100644
index 3fef57de973c..000000000000
--- a/drivers/staging/media/atomisp/include/hmm/hmm_pool.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Medifield PNW Camera Imaging ISP subsystem.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-#ifndef __HMM_POOL_H__
-#define __HMM_POOL_H__
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/kref.h>
-#include "hmm_common.h"
-#include "hmm/hmm_bo.h"
-
-#define ALLOC_PAGE_FAIL_NUM 5
-
-enum hmm_pool_type {
- HMM_POOL_TYPE_RESERVED,
- HMM_POOL_TYPE_DYNAMIC,
-};
-
-/**
- * struct hmm_pool_ops - memory pool callbacks.
- *
- * @pool_init: initialize the memory pool.
- * @pool_exit: uninitialize the memory pool.
- * @pool_alloc_pages: allocate pages from memory pool.
- * @pool_free_pages: free pages to memory pool.
- * @pool_inited: check whether memory pool is initialized.
- */
-struct hmm_pool_ops {
- int (*pool_init)(void **pool, unsigned int pool_size);
- void (*pool_exit)(void **pool);
- unsigned int (*pool_alloc_pages)(void *pool,
- struct hmm_page_object *page_obj,
- unsigned int size, bool cached);
- void (*pool_free_pages)(void *pool,
- struct hmm_page_object *page_obj);
- int (*pool_inited)(void *pool);
-};
-
-struct hmm_pool {
- struct hmm_pool_ops *pops;
-
- void *pool_info;
-};
-
-/**
- * struct hmm_reserved_pool_info - represents reserved pool private data.
- * @pages: a array that store physical pages.
- * The array is as reserved memory pool.
- * @index: to indicate the first blank page number
- * in reserved memory pool(pages array).
- * @pgnr: the valid page amount in reserved memory
- * pool.
- * @list_lock: list lock is used to protect the operation
- * to reserved memory pool.
- * @flag: reserved memory pool state flag.
- */
-struct hmm_reserved_pool_info {
- struct page **pages;
-
- unsigned int index;
- unsigned int pgnr;
- spinlock_t list_lock;
- bool initialized;
-};
-
-/**
- * struct hmm_dynamic_pool_info - represents dynamic pool private data.
- * @pages_list: a list that store physical pages.
- * The pages list is as dynamic memory pool.
- * @list_lock: list lock is used to protect the operation
- * to dynamic memory pool.
- * @flag: dynamic memory pool state flag.
- * @pgptr_cache: struct kmem_cache, manages a cache.
- */
-struct hmm_dynamic_pool_info {
- struct list_head pages_list;
-
- /* list lock is used to protect the free pages block lists */
- spinlock_t list_lock;
-
- struct kmem_cache *pgptr_cache;
- bool initialized;
-
- unsigned int pool_size;
- unsigned int pgnr;
-};
-
-struct hmm_page {
- struct page *page;
- struct list_head list;
-};
-
-extern struct hmm_pool_ops reserved_pops;
-extern struct hmm_pool_ops dynamic_pops;
-
-#endif
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
index 22c4103b0385..f96f5adbd9de 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -740,24 +740,6 @@ enum atomisp_frame_status {
ATOMISP_FRAME_STATUS_FLASH_FAILED,
};
-enum atomisp_acc_type {
- ATOMISP_ACC_STANDALONE, /* Stand-alone acceleration */
- ATOMISP_ACC_OUTPUT, /* Accelerator stage on output frame */
- ATOMISP_ACC_VIEWFINDER /* Accelerator stage on viewfinder frame */
-};
-
-enum atomisp_acc_arg_type {
- ATOMISP_ACC_ARG_SCALAR_IN, /* Scalar input argument */
- ATOMISP_ACC_ARG_SCALAR_OUT, /* Scalar output argument */
- ATOMISP_ACC_ARG_SCALAR_IO, /* Scalar in/output argument */
- ATOMISP_ACC_ARG_PTR_IN, /* Pointer input argument */
- ATOMISP_ACC_ARG_PTR_OUT, /* Pointer output argument */
- ATOMISP_ACC_ARG_PTR_IO, /* Pointer in/output argument */
- ATOMISP_ARG_PTR_NOFLUSH, /* Pointer argument will not be flushed */
- ATOMISP_ARG_PTR_STABLE, /* Pointer input argument that is stable */
- ATOMISP_ACC_ARG_FRAME /* Frame argument */
-};
-
/* ISP memories, isp2400 */
enum atomisp_acc_memory {
ATOMISP_ACC_MEMORY_PMEM0 = 0,
@@ -836,56 +818,6 @@ enum atomisp_burst_capture_options {
#define EXT_ISP_SHOT_MODE_ANIMATED_PHOTO 10
#define EXT_ISP_SHOT_MODE_SPORTS 11
-struct atomisp_sp_arg {
- enum atomisp_acc_arg_type type; /* Type of SP argument */
- void *value; /* Value of SP argument */
- unsigned int size; /* Size of SP argument */
-};
-
-/* Acceleration API */
-
-/* For CSS 1.0 only */
-struct atomisp_acc_fw_arg {
- unsigned int fw_handle;
- unsigned int index;
- void __user *value;
- size_t size;
-};
-
-/*
- * Set arguments after first mapping with ATOMISP_IOC_ACC_S_MAPPED_ARG.
- */
-struct atomisp_acc_s_mapped_arg {
- unsigned int fw_handle;
- __u32 memory; /* one of enum atomisp_acc_memory */
- size_t length;
- unsigned long css_ptr;
-};
-
-struct atomisp_acc_fw_abort {
- unsigned int fw_handle;
- /* Timeout in us */
- unsigned int timeout;
-};
-
-struct atomisp_acc_fw_load {
- unsigned int size;
- unsigned int fw_handle;
- void __user *data;
-};
-
-/*
- * Load firmware to specified pipeline.
- */
-struct atomisp_acc_fw_load_to_pipe {
- __u32 flags; /* Flags, see below for valid values */
- unsigned int fw_handle; /* Handle, filled by kernel. */
- __u32 size; /* Firmware binary size */
- void __user *data; /* Pointer to firmware */
- __u32 type; /* Binary type */
- __u32 reserved[3]; /* Set to zero */
-};
-
/*
* Set Senor run mode
*/
@@ -893,37 +825,6 @@ struct atomisp_s_runmode {
__u32 mode;
};
-#define ATOMISP_ACC_FW_LOAD_FL_PREVIEW BIT(0)
-#define ATOMISP_ACC_FW_LOAD_FL_COPY BIT(1)
-#define ATOMISP_ACC_FW_LOAD_FL_VIDEO BIT(2)
-#define ATOMISP_ACC_FW_LOAD_FL_CAPTURE BIT(3)
-#define ATOMISP_ACC_FW_LOAD_FL_ACC BIT(4)
-#define ATOMISP_ACC_FW_LOAD_FL_ENABLE BIT(16)
-
-#define ATOMISP_ACC_FW_LOAD_TYPE_NONE 0 /* Normal binary: don't use */
-#define ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT 1 /* Stage on output */
-#define ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER 2 /* Stage on viewfinder */
-#define ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE 3 /* Stand-alone acceleration */
-
-struct atomisp_acc_map {
- __u32 flags; /* Flags, see list below */
- __u32 length; /* Length of data in bytes */
- void __user *user_ptr; /* Pointer into user space */
- unsigned long css_ptr; /* Pointer into CSS address space */
- __u32 reserved[4]; /* Set to zero */
-};
-
-#define ATOMISP_MAP_FLAG_NOFLUSH 0x0001 /* Do not flush cache */
-#define ATOMISP_MAP_FLAG_CACHED 0x0002 /* Enable cache */
-#define ATOMISP_MAP_FLAG_CONTIGUOUS 0x0004
-#define ATOMISP_MAP_FLAG_CLEARED 0x0008
-
-struct atomisp_acc_state {
- __u32 flags; /* Flags, see list below */
-#define ATOMISP_STATE_FLAG_ENABLE ATOMISP_ACC_FW_LOAD_FL_ENABLE
- unsigned int fw_handle;
-};
-
struct atomisp_update_exposure {
unsigned int gain;
unsigned int digi_gain;
@@ -1091,29 +992,6 @@ struct atomisp_sensor_ae_bracketing_lut {
#define ATOMISP_IOC_S_3A_CONFIG \
_IOW('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
-/* Accelerate ioctls */
-#define ATOMISP_IOC_ACC_LOAD \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load)
-
-#define ATOMISP_IOC_ACC_UNLOAD \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 24, unsigned int)
-
-/* For CSS 1.0 only */
-#define ATOMISP_IOC_ACC_S_ARG \
- _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg)
-
-#define ATOMISP_IOC_ACC_START \
- _IOW('v', BASE_VIDIOC_PRIVATE + 24, unsigned int)
-
-#define ATOMISP_IOC_ACC_WAIT \
- _IOW('v', BASE_VIDIOC_PRIVATE + 25, unsigned int)
-
-#define ATOMISP_IOC_ACC_ABORT \
- _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_abort)
-
-#define ATOMISP_IOC_ACC_DESTAB \
- _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg)
-
/* sensor OTP memory read */
#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA \
_IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data)
@@ -1133,24 +1011,6 @@ struct atomisp_sensor_ae_bracketing_lut {
#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA \
_IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data)
-/*
- * Ioctls to map and unmap user buffers to CSS address space for acceleration.
- * User fills fields length and user_ptr and sets other fields to zero,
- * kernel may modify the flags and sets css_ptr.
- */
-#define ATOMISP_IOC_ACC_MAP \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map)
-
-/* User fills fields length, user_ptr, and css_ptr and zeroes other fields. */
-#define ATOMISP_IOC_ACC_UNMAP \
- _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map)
-
-#define ATOMISP_IOC_ACC_S_MAPPED_ARG \
- _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg)
-
-#define ATOMISP_IOC_ACC_LOAD_TO_PIPE \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe)
-
#define ATOMISP_IOC_S_PARAMETERS \
_IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters)
@@ -1184,12 +1044,6 @@ struct atomisp_sensor_ae_bracketing_lut {
#define ATOMISP_IOC_S_EXPOSURE_WINDOW \
_IOW('v', BASE_VIDIOC_PRIVATE + 40, struct atomisp_ae_window)
-#define ATOMISP_IOC_S_ACC_STATE \
- _IOW('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state)
-
-#define ATOMISP_IOC_G_ACC_STATE \
- _IOR('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state)
-
#define ATOMISP_IOC_INJECT_A_FAKE_EVENT \
_IOW('v', BASE_VIDIOC_PRIVATE + 42, int)
diff --git a/drivers/staging/media/atomisp/notes.txt b/drivers/staging/media/atomisp/notes.txt
new file mode 100644
index 000000000000..d128b792e05f
--- /dev/null
+++ b/drivers/staging/media/atomisp/notes.txt
@@ -0,0 +1,30 @@
+Some notes about the working of the atomisp drivers (learned while working
+on cleaning it up).
+
+The atomisp seems to be a generic DSP(ISP) like processor without a fixed
+pipeline. It does not have its own memory, but instead uses main memory.
+The ISP has its own address-space and main memory needs to be mapped into
+its address space through the ISP's MMU.
+
+Memory is allocated by the hmm code. hmm_alloc() returns an ISP virtual
+address. The hmm code keeps a list of all allocations and when necessary
+the hmm code finds the backing hmm-buffer-object (hmm_bo) by looking
+up the hmm_bo based on the ISP virtual address.
+
+The actual processing pipeline is made by loading one or more programs,
+called binaries. The shisp_240??0_v21.bin firmware file contains many
+different binaries. Binaries are picked by filling a ia_css_binary_descr
+struct with various input and output parameters and then calling
+ia_css_binary_find(). Some binaries support creating multiple outputs
+(preview + video frame?) at the same time.
+
+For example for the /dev/video0 preview node load_preview_binaries()
+from atomisp/pci/sh_css.c is called and then loads a preview and
+optionally a scalar binary. Note when digital zoom is disabled
+(it is enabled by default) only the preview binary is loaded.
+So in this case a single binary handles the entire pipeline.
+
+Since getting a picture requires multiple processing steps,
+this means that unlike in fixed pipelines the soft pipelines
+on the ISP can do multiple processing steps in a single pipeline
+element (in a single binary).
diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c
deleted file mode 100644
index 28cb271663c4..000000000000
--- a/drivers/staging/media/atomisp/pci/atomisp_acc.c
+++ /dev/null
@@ -1,625 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Clovertrail PNW Camera Imaging ISP subsystem.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-
-/*
- * This file implements loadable acceleration firmware API,
- * including ioctls to map and unmap acceleration parameters and buffers.
- */
-
-#include <linux/init.h>
-#include <media/v4l2-event.h>
-
-#include "hmm.h"
-
-#include "atomisp_acc.h"
-#include "atomisp_internal.h"
-#include "atomisp_compat.h"
-#include "atomisp_cmd.h"
-
-#include "ia_css.h"
-
-static const struct {
- unsigned int flag;
- enum ia_css_pipe_id pipe_id;
-} acc_flag_to_pipe[] = {
- { ATOMISP_ACC_FW_LOAD_FL_PREVIEW, IA_CSS_PIPE_ID_PREVIEW },
- { ATOMISP_ACC_FW_LOAD_FL_COPY, IA_CSS_PIPE_ID_COPY },
- { ATOMISP_ACC_FW_LOAD_FL_VIDEO, IA_CSS_PIPE_ID_VIDEO },
- { ATOMISP_ACC_FW_LOAD_FL_CAPTURE, IA_CSS_PIPE_ID_CAPTURE },
- { ATOMISP_ACC_FW_LOAD_FL_ACC, IA_CSS_PIPE_ID_ACC }
-};
-
-/*
- * Allocate struct atomisp_acc_fw along with space for firmware.
- * The returned struct atomisp_acc_fw is cleared (firmware region is not).
- */
-static struct atomisp_acc_fw *acc_alloc_fw(unsigned int fw_size)
-{
- struct atomisp_acc_fw *acc_fw;
-
- acc_fw = kzalloc(sizeof(*acc_fw), GFP_KERNEL);
- if (!acc_fw)
- return NULL;
-
- acc_fw->fw = vmalloc(fw_size);
- if (!acc_fw->fw) {
- kfree(acc_fw);
- return NULL;
- }
-
- return acc_fw;
-}
-
-static void acc_free_fw(struct atomisp_acc_fw *acc_fw)
-{
- vfree(acc_fw->fw);
- kfree(acc_fw);
-}
-
-static struct atomisp_acc_fw *
-acc_get_fw(struct atomisp_sub_device *asd, unsigned int handle)
-{
- struct atomisp_acc_fw *acc_fw;
-
- list_for_each_entry(acc_fw, &asd->acc.fw, list)
- if (acc_fw->handle == handle)
- return acc_fw;
-
- return NULL;
-}
-
-static struct atomisp_map *acc_get_map(struct atomisp_sub_device *asd,
- unsigned long css_ptr, size_t length)
-{
- struct atomisp_map *atomisp_map;
-
- list_for_each_entry(atomisp_map, &asd->acc.memory_maps, list) {
- if (atomisp_map->ptr == css_ptr &&
- atomisp_map->length == length)
- return atomisp_map;
- }
- return NULL;
-}
-
-static int acc_stop_acceleration(struct atomisp_sub_device *asd)
-{
- int ret;
-
- ret = atomisp_css_stop_acc_pipe(asd);
- atomisp_css_destroy_acc_pipe(asd);
-
- return ret;
-}
-
-void atomisp_acc_cleanup(struct atomisp_device *isp)
-{
- int i;
-
- for (i = 0; i < isp->num_of_streams; i++)
- ida_destroy(&isp->asd[i].acc.ida);
-}
-
-void atomisp_acc_release(struct atomisp_sub_device *asd)
-{
- struct atomisp_acc_fw *acc_fw, *ta;
- struct atomisp_map *atomisp_map, *tm;
-
- /* Stop acceleration if already running */
- if (asd->acc.pipeline)
- acc_stop_acceleration(asd);
-
- /* Unload all loaded acceleration binaries */
- list_for_each_entry_safe(acc_fw, ta, &asd->acc.fw, list) {
- list_del(&acc_fw->list);
- ida_free(&asd->acc.ida, acc_fw->handle);
- acc_free_fw(acc_fw);
- }
-
- /* Free all mapped memory blocks */
- list_for_each_entry_safe(atomisp_map, tm, &asd->acc.memory_maps, list) {
- list_del(&atomisp_map->list);
- hmm_free(atomisp_map->ptr);
- kfree(atomisp_map);
- }
-}
-
-int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd,
- struct atomisp_acc_fw_load_to_pipe *user_fw)
-{
- static const unsigned int pipeline_flags =
- ATOMISP_ACC_FW_LOAD_FL_PREVIEW | ATOMISP_ACC_FW_LOAD_FL_COPY |
- ATOMISP_ACC_FW_LOAD_FL_VIDEO |
- ATOMISP_ACC_FW_LOAD_FL_CAPTURE | ATOMISP_ACC_FW_LOAD_FL_ACC;
-
- struct atomisp_acc_fw *acc_fw;
- int handle;
-
- if (!user_fw->data || user_fw->size < sizeof(*acc_fw->fw))
- return -EINVAL;
-
- /* Binary has to be enabled at least for one pipeline */
- if (!(user_fw->flags & pipeline_flags))
- return -EINVAL;
-
- /* We do not support other flags yet */
- if (user_fw->flags & ~pipeline_flags)
- return -EINVAL;
-
- if (user_fw->type < ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT ||
- user_fw->type > ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE)
- return -EINVAL;
-
- if (asd->acc.pipeline || asd->acc.extension_mode)
- return -EBUSY;
-
- acc_fw = acc_alloc_fw(user_fw->size);
- if (!acc_fw)
- return -ENOMEM;
-
- if (copy_from_user(acc_fw->fw, user_fw->data, user_fw->size)) {
- acc_free_fw(acc_fw);
- return -EFAULT;
- }
-
- handle = ida_alloc(&asd->acc.ida, GFP_KERNEL);
- if (handle < 0) {
- acc_free_fw(acc_fw);
- return -ENOSPC;
- }
-
- user_fw->fw_handle = handle;
- acc_fw->handle = handle;
- acc_fw->flags = user_fw->flags;
- acc_fw->type = user_fw->type;
- acc_fw->fw->handle = handle;
-
- /*
- * correct isp firmware type in order ISP firmware can be appended
- * to correct pipe properly
- */
- if (acc_fw->fw->type == ia_css_isp_firmware) {
- static const int type_to_css[] = {
- [ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT] =
- IA_CSS_ACC_OUTPUT,
- [ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER] =
- IA_CSS_ACC_VIEWFINDER,
- [ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE] =
- IA_CSS_ACC_STANDALONE,
- };
- acc_fw->fw->info.isp.type = type_to_css[acc_fw->type];
- }
-
- list_add_tail(&acc_fw->list, &asd->acc.fw);
- return 0;
-}
-
-int atomisp_acc_load(struct atomisp_sub_device *asd,
- struct atomisp_acc_fw_load *user_fw)
-{
- struct atomisp_acc_fw_load_to_pipe ltp = {0};
- int r;
-
- ltp.flags = ATOMISP_ACC_FW_LOAD_FL_ACC;
- ltp.type = ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE;
- ltp.size = user_fw->size;
- ltp.data = user_fw->data;
- r = atomisp_acc_load_to_pipe(asd, &ltp);
- user_fw->fw_handle = ltp.fw_handle;
- return r;
-}
-
-int atomisp_acc_unload(struct atomisp_sub_device *asd, unsigned int *handle)
-{
- struct atomisp_acc_fw *acc_fw;
-
- if (asd->acc.pipeline || asd->acc.extension_mode)
- return -EBUSY;
-
- acc_fw = acc_get_fw(asd, *handle);
- if (!acc_fw)
- return -EINVAL;
-
- list_del(&acc_fw->list);
- ida_free(&asd->acc.ida, acc_fw->handle);
- acc_free_fw(acc_fw);
-
- return 0;
-}
-
-int atomisp_acc_start(struct atomisp_sub_device *asd, unsigned int *handle)
-{
- struct atomisp_device *isp = asd->isp;
- struct atomisp_acc_fw *acc_fw;
- int ret;
- unsigned int nbin;
-
- if (asd->acc.pipeline || asd->acc.extension_mode)
- return -EBUSY;
-
- /* Invalidate caches. FIXME: should flush only necessary buffers */
- wbinvd();
-
- ret = atomisp_css_create_acc_pipe(asd);
- if (ret)
- return ret;
-
- nbin = 0;
- list_for_each_entry(acc_fw, &asd->acc.fw, list) {
- if (*handle != 0 && *handle != acc_fw->handle)
- continue;
-
- if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE)
- continue;
-
- /* Add the binary into the pipeline */
- ret = atomisp_css_load_acc_binary(asd, acc_fw->fw, nbin);
- if (ret < 0) {
- dev_err(isp->dev, "acc_load_binary failed\n");
- goto err_stage;
- }
-
- ret = atomisp_css_set_acc_parameters(acc_fw);
- if (ret < 0) {
- dev_err(isp->dev, "acc_set_parameters failed\n");
- goto err_stage;
- }
- nbin++;
- }
- if (nbin < 1) {
- /* Refuse creating pipelines with no binaries */
- dev_err(isp->dev, "%s: no acc binary available\n", __func__);
- ret = -EINVAL;
- goto err_stage;
- }
-
- ret = atomisp_css_start_acc_pipe(asd);
- if (ret) {
- dev_err(isp->dev, "%s: atomisp_acc_start_acc_pipe failed\n",
- __func__);
- goto err_stage;
- }
-
- return 0;
-
-err_stage:
- atomisp_css_destroy_acc_pipe(asd);
- return ret;
-}
-
-int atomisp_acc_wait(struct atomisp_sub_device *asd, unsigned int *handle)
-{
- struct atomisp_device *isp = asd->isp;
- int ret;
-
- if (!asd->acc.pipeline)
- return -ENOENT;
-
- if (*handle && !acc_get_fw(asd, *handle))
- return -EINVAL;
-
- ret = atomisp_css_wait_acc_finish(asd);
- if (acc_stop_acceleration(asd) == -EIO) {
- atomisp_reset(isp);
- return -EINVAL;
- }
-
- return ret;
-}
-
-void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle)
-{
- struct v4l2_event event = { 0 };
-
- event.type = V4L2_EVENT_ATOMISP_ACC_COMPLETE;
- event.u.frame_sync.frame_sequence = atomic_read(&asd->sequence);
- event.id = handle;
-
- v4l2_event_queue(asd->subdev.devnode, &event);
-}
-
-int atomisp_acc_map(struct atomisp_sub_device *asd, struct atomisp_acc_map *map)
-{
- struct atomisp_map *atomisp_map;
- ia_css_ptr cssptr;
- int pgnr;
-
- if (map->css_ptr)
- return -EINVAL;
-
- if (asd->acc.pipeline)
- return -EBUSY;
-
- if (map->user_ptr) {
- /* Buffer to map must be page-aligned */
- if ((unsigned long)map->user_ptr & ~PAGE_MASK) {
- dev_err(asd->isp->dev,
- "%s: mapped buffer address %p is not page aligned\n",
- __func__, map->user_ptr);
- return -EINVAL;
- }
-
- pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE);
- if (pgnr < ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) {
- dev_err(asd->isp->dev,
- "user space memory size is less than the expected size..\n");
- return -ENOMEM;
- } else if (pgnr > ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) {
- dev_err(asd->isp->dev,
- "user space memory size is large than the expected size..\n");
- return -ENOMEM;
- }
-
- cssptr = hmm_alloc(map->length, HMM_BO_USER, 0, map->user_ptr,
- map->flags & ATOMISP_MAP_FLAG_CACHED);
-
- } else {
- /* Allocate private buffer. */
- cssptr = hmm_alloc(map->length, HMM_BO_PRIVATE, 0, NULL,
- map->flags & ATOMISP_MAP_FLAG_CACHED);
- }
-
- if (!cssptr)
- return -ENOMEM;
-
- atomisp_map = kmalloc(sizeof(*atomisp_map), GFP_KERNEL);
- if (!atomisp_map) {
- hmm_free(cssptr);
- return -ENOMEM;
- }
- atomisp_map->ptr = cssptr;
- atomisp_map->length = map->length;
- list_add(&atomisp_map->list, &asd->acc.memory_maps);
-
- dev_dbg(asd->isp->dev, "%s: userptr %p, css_address 0x%x, size %d\n",
- __func__, map->user_ptr, cssptr, map->length);
- map->css_ptr = cssptr;
- return 0;
-}
-
-int atomisp_acc_unmap(struct atomisp_sub_device *asd,
- struct atomisp_acc_map *map)
-{
- struct atomisp_map *atomisp_map;
-
- if (asd->acc.pipeline)
- return -EBUSY;
-
- atomisp_map = acc_get_map(asd, map->css_ptr, map->length);
- if (!atomisp_map)
- return -EINVAL;
-
- list_del(&atomisp_map->list);
- hmm_free(atomisp_map->ptr);
- kfree(atomisp_map);
- return 0;
-}
-
-int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd,
- struct atomisp_acc_s_mapped_arg *arg)
-{
- struct atomisp_acc_fw *acc_fw;
-
- if (arg->memory >= ATOMISP_ACC_NR_MEMORY)
- return -EINVAL;
-
- if (asd->acc.pipeline)
- return -EBUSY;
-
- acc_fw = acc_get_fw(asd, arg->fw_handle);
- if (!acc_fw)
- return -EINVAL;
-
- if (arg->css_ptr != 0 || arg->length != 0) {
- /* Unless the parameter is cleared, check that it exists */
- if (!acc_get_map(asd, arg->css_ptr, arg->length))
- return -EINVAL;
- }
-
- acc_fw->args[arg->memory].length = arg->length;
- acc_fw->args[arg->memory].css_ptr = arg->css_ptr;
-
- dev_dbg(asd->isp->dev, "%s: mem %d, address %p, size %ld\n",
- __func__, arg->memory, (void *)arg->css_ptr,
- (unsigned long)arg->length);
- return 0;
-}
-
-static void atomisp_acc_unload_some_extensions(struct atomisp_sub_device *asd,
- int i,
- struct atomisp_acc_fw *acc_fw)
-{
- while (--i >= 0) {
- if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
- atomisp_css_unload_acc_extension(asd, acc_fw->fw,
- acc_flag_to_pipe[i].pipe_id);
- }
- }
-}
-
-/*
- * Appends the loaded acceleration binary extensions to the
- * current ISP mode. Must be called just before sh_css_start().
- */
-int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
-{
- struct atomisp_acc_fw *acc_fw;
- bool ext_loaded = false;
- bool continuous = asd->continuous_mode->val &&
- asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW;
- int ret = 0, i = -1;
- struct atomisp_device *isp = asd->isp;
-
- if (asd->acc.pipeline || asd->acc.extension_mode)
- return -EBUSY;
-
- /* Invalidate caches. FIXME: should flush only necessary buffers */
- wbinvd();
-
- list_for_each_entry(acc_fw, &asd->acc.fw, list) {
- if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
- acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
- continue;
-
- for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) {
- /*
- * QoS (ACC pipe) acceleration stages are
- * currently allowed only in continuous mode.
- * Skip them for all other modes.
- */
- if (!continuous &&
- acc_flag_to_pipe[i].flag ==
- ATOMISP_ACC_FW_LOAD_FL_ACC)
- continue;
-
- if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
- ret = atomisp_css_load_acc_extension(asd,
- acc_fw->fw,
- acc_flag_to_pipe[i].pipe_id,
- acc_fw->type);
- if (ret) {
- atomisp_acc_unload_some_extensions(asd, i, acc_fw);
- goto error;
- }
-
- ext_loaded = true;
- }
- }
-
- ret = atomisp_css_set_acc_parameters(acc_fw);
- if (ret < 0) {
- atomisp_acc_unload_some_extensions(asd, i, acc_fw);
- goto error;
- }
- }
-
- if (!ext_loaded)
- return ret;
-
- ret = atomisp_css_update_stream(asd);
- if (ret) {
- dev_err(isp->dev, "%s: update stream failed.\n", __func__);
- atomisp_acc_unload_extensions(asd);
- goto error;
- }
-
- asd->acc.extension_mode = true;
- return 0;
-
-error:
- list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) {
- if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
- acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
- continue;
-
- for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) {
- if (!continuous &&
- acc_flag_to_pipe[i].flag ==
- ATOMISP_ACC_FW_LOAD_FL_ACC)
- continue;
- if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
- atomisp_css_unload_acc_extension(asd,
- acc_fw->fw,
- acc_flag_to_pipe[i].pipe_id);
- }
- }
- }
- return ret;
-}
-
-void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd)
-{
- struct atomisp_acc_fw *acc_fw;
- int i;
-
- if (!asd->acc.extension_mode)
- return;
-
- list_for_each_entry_reverse(acc_fw, &asd->acc.fw, list) {
- if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
- acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
- continue;
-
- for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) {
- if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
- atomisp_css_unload_acc_extension(asd,
- acc_fw->fw,
- acc_flag_to_pipe[i].pipe_id);
- }
- }
- }
-
- asd->acc.extension_mode = false;
-}
-
-int atomisp_acc_set_state(struct atomisp_sub_device *asd,
- struct atomisp_acc_state *arg)
-{
- struct atomisp_acc_fw *acc_fw;
- bool enable = (arg->flags & ATOMISP_STATE_FLAG_ENABLE) != 0;
- struct ia_css_pipe *pipe;
- int r;
- int i;
-
- if (!asd->acc.extension_mode)
- return -EBUSY;
-
- if (arg->flags & ~ATOMISP_STATE_FLAG_ENABLE)
- return -EINVAL;
-
- acc_fw = acc_get_fw(asd, arg->fw_handle);
- if (!acc_fw)
- return -EINVAL;
-
- if (enable)
- wbinvd();
-
- for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) {
- if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
- pipe = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
- pipes[acc_flag_to_pipe[i].pipe_id];
- r = ia_css_pipe_set_qos_ext_state(pipe, acc_fw->handle,
- enable);
- if (r)
- return -EBADRQC;
- }
- }
-
- if (enable)
- acc_fw->flags |= ATOMISP_ACC_FW_LOAD_FL_ENABLE;
- else
- acc_fw->flags &= ~ATOMISP_ACC_FW_LOAD_FL_ENABLE;
-
- return 0;
-}
-
-int atomisp_acc_get_state(struct atomisp_sub_device *asd,
- struct atomisp_acc_state *arg)
-{
- struct atomisp_acc_fw *acc_fw;
-
- if (!asd->acc.extension_mode)
- return -EBUSY;
-
- acc_fw = acc_get_fw(asd, arg->fw_handle);
- if (!acc_fw)
- return -EINVAL;
-
- arg->flags = acc_fw->flags;
-
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.h b/drivers/staging/media/atomisp/pci/atomisp_acc.h
deleted file mode 100644
index 48d94232229b..000000000000
--- a/drivers/staging/media/atomisp/pci/atomisp_acc.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Clovertrail PNW Camera Imaging ISP subsystem.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#ifndef __ATOMISP_ACC_H__
-#define __ATOMISP_ACC_H__
-
-#include "../../include/linux/atomisp.h"
-#include "atomisp_internal.h"
-
-#include "ia_css_types.h"
-
-/*
- * Interface functions for AtomISP driver acceleration API implementation.
- */
-
-struct atomisp_sub_device;
-
-void atomisp_acc_cleanup(struct atomisp_device *isp);
-
-/*
- * Free up any allocated resources.
- * Must be called each time when the device is closed.
- * Note that there isn't corresponding open() call;
- * this function may be called sequentially multiple times.
- * Must be called to free up resources before driver is unloaded.
- */
-void atomisp_acc_release(struct atomisp_sub_device *asd);
-
-/* Load acceleration binary. DEPRECATED. */
-int atomisp_acc_load(struct atomisp_sub_device *asd,
- struct atomisp_acc_fw_load *fw);
-
-/* Load acceleration binary with specified properties */
-int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd,
- struct atomisp_acc_fw_load_to_pipe *fw);
-
-/* Unload specified acceleration binary */
-int atomisp_acc_unload(struct atomisp_sub_device *asd,
- unsigned int *handle);
-
-/*
- * Map a memory region into ISP memory space.
- */
-int atomisp_acc_map(struct atomisp_sub_device *asd,
- struct atomisp_acc_map *map);
-
-/*
- * Unmap a mapped memory region.
- */
-int atomisp_acc_unmap(struct atomisp_sub_device *asd,
- struct atomisp_acc_map *map);
-
-/*
- * Set acceleration binary argument to a previously mapped memory region.
- */
-int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd,
- struct atomisp_acc_s_mapped_arg *arg);
-
-/*
- * Start acceleration.
- * Return immediately, acceleration is left running in background.
- * Specify either acceleration binary or pipeline which to start.
- */
-int atomisp_acc_start(struct atomisp_sub_device *asd,
- unsigned int *handle);
-
-/*
- * Wait until acceleration finishes.
- * This MUST be called after each acceleration has been started.
- * Specify either acceleration binary or pipeline handle.
- */
-int atomisp_acc_wait(struct atomisp_sub_device *asd,
- unsigned int *handle);
-
-/*
- * Used by ISR to notify ACC stage finished.
- * This is internally used and does not export as IOCTL.
- */
-void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle);
-
-/*
- * Appends the loaded acceleration binary extensions to the
- * current ISP mode. Must be called just before atomisp_css_start().
- */
-int atomisp_acc_load_extensions(struct atomisp_sub_device *asd);
-
-/*
- * Must be called after streaming is stopped:
- * unloads any loaded acceleration extensions.
- */
-void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd);
-
-/*
- * Set acceleration firmware flags.
- */
-int atomisp_acc_set_state(struct atomisp_sub_device *asd,
- struct atomisp_acc_state *arg);
-
-/*
- * Get acceleration firmware flags.
- */
-int atomisp_acc_get_state(struct atomisp_sub_device *asd,
- struct atomisp_acc_state *arg);
-
-#endif /* __ATOMISP_ACC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 97d5a528969b..c932f340068f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -42,7 +42,6 @@
#include "atomisp_ioctl.h"
#include "atomisp-regs.h"
#include "atomisp_tables.h"
-#include "atomisp_acc.h"
#include "atomisp_compat.h"
#include "atomisp_subdev.h"
#include "atomisp_dfs_tables.h"
@@ -539,7 +538,7 @@ irqreturn_t atomisp_isr(int irq, void *dev)
clear_irq_reg(isp);
- if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp))
+ if (!atomisp_streaming_count(isp))
goto out_nowake;
for (i = 0; i < isp->num_of_streams; i++) {
@@ -901,9 +900,9 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
int err;
unsigned long irqflags;
struct ia_css_frame *frame = NULL;
- struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp;
- struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp;
- struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp;
+ struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp, *s3a_iter;
+ struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp, *dis_iter;
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp, *md_iter;
enum atomisp_metadata_type md_type;
struct atomisp_device *isp = asd->isp;
struct v4l2_control ctrl;
@@ -942,60 +941,75 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
switch (buf_type) {
case IA_CSS_BUFFER_TYPE_3A_STATISTICS:
- list_for_each_entry_safe(s3a_buf, _s3a_buf_tmp,
+ list_for_each_entry_safe(s3a_iter, _s3a_buf_tmp,
&asd->s3a_stats_in_css, list) {
- if (s3a_buf->s3a_data ==
+ if (s3a_iter->s3a_data ==
buffer.css_buffer.data.stats_3a) {
- list_del_init(&s3a_buf->list);
- list_add_tail(&s3a_buf->list,
+ list_del_init(&s3a_iter->list);
+ list_add_tail(&s3a_iter->list,
&asd->s3a_stats_ready);
+ s3a_buf = s3a_iter;
break;
}
}
asd->s3a_bufs_in_css[css_pipe_id]--;
atomisp_3a_stats_ready_event(asd, buffer.css_buffer.exp_id);
- dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n",
- __func__, s3a_buf->s3a_data->exp_id);
+ if (s3a_buf)
+ dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n",
+ __func__, s3a_buf->s3a_data->exp_id);
+ else
+ dev_dbg(isp->dev, "%s: s3a stat is ready with no exp_id found\n",
+ __func__);
break;
case IA_CSS_BUFFER_TYPE_METADATA:
if (error)
break;
md_type = atomisp_get_metadata_type(asd, css_pipe_id);
- list_for_each_entry_safe(md_buf, _md_buf_tmp,
+ list_for_each_entry_safe(md_iter, _md_buf_tmp,
&asd->metadata_in_css[md_type], list) {
- if (md_buf->metadata ==
+ if (md_iter->metadata ==
buffer.css_buffer.data.metadata) {
- list_del_init(&md_buf->list);
- list_add_tail(&md_buf->list,
+ list_del_init(&md_iter->list);
+ list_add_tail(&md_iter->list,
&asd->metadata_ready[md_type]);
+ md_buf = md_iter;
break;
}
}
asd->metadata_bufs_in_css[stream_id][css_pipe_id]--;
atomisp_metadata_ready_event(asd, md_type);
- dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n",
- __func__, md_buf->metadata->exp_id);
+ if (md_buf)
+ dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n",
+ __func__, md_buf->metadata->exp_id);
+ else
+ dev_dbg(isp->dev, "%s: metadata is ready with no exp_id found\n",
+ __func__);
break;
case IA_CSS_BUFFER_TYPE_DIS_STATISTICS:
- list_for_each_entry_safe(dis_buf, _dis_buf_tmp,
+ list_for_each_entry_safe(dis_iter, _dis_buf_tmp,
&asd->dis_stats_in_css, list) {
- if (dis_buf->dis_data ==
+ if (dis_iter->dis_data ==
buffer.css_buffer.data.stats_dvs) {
spin_lock_irqsave(&asd->dis_stats_lock,
irqflags);
- list_del_init(&dis_buf->list);
- list_add(&dis_buf->list, &asd->dis_stats);
+ list_del_init(&dis_iter->list);
+ list_add(&dis_iter->list, &asd->dis_stats);
asd->params.dis_proj_data_valid = true;
spin_unlock_irqrestore(&asd->dis_stats_lock,
irqflags);
+ dis_buf = dis_iter;
break;
}
}
asd->dis_bufs_in_css--;
- dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n",
- __func__, dis_buf->dis_data->exp_id);
+ if (dis_buf)
+ dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n",
+ __func__, dis_buf->dis_data->exp_id);
+ else
+ dev_dbg(isp->dev, "%s: dis stat is ready with no exp_id found\n",
+ __func__);
break;
case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
@@ -1302,34 +1316,11 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
for (i = 0; i < isp->num_of_streams; i++) {
struct atomisp_sub_device *asd = &isp->asd[i];
- struct ia_css_pipeline *acc_pipeline;
- struct ia_css_pipe *acc_pipe = NULL;
if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED &&
!asd->stream_prepared)
continue;
- /*
- * AtomISP::waitStageUpdate is blocked when WDT happens.
- * By calling acc_done() for all loaded fw_handles,
- * HAL will be unblocked.
- */
- acc_pipe = asd->stream_env[i].pipes[IA_CSS_PIPE_ID_ACC];
- if (acc_pipe) {
- acc_pipeline = ia_css_pipe_get_pipeline(acc_pipe);
- if (acc_pipeline) {
- struct ia_css_pipeline_stage *stage;
-
- for (stage = acc_pipeline->stages; stage;
- stage = stage->next) {
- const struct ia_css_fw_info *fw;
-
- fw = stage->firmware;
- atomisp_acc_done(asd, fw->handle);
- }
- }
- }
-
depth_cnt++;
if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED)
@@ -1350,8 +1341,6 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
dev_warn(isp->dev,
"can't stop streaming on sensor!\n");
- atomisp_acc_unload_extensions(asd);
-
atomisp_clear_css_buffer_counters(asd);
css_pipe_id = atomisp_get_css_pipe_id(asd);
@@ -1863,7 +1852,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
spin_lock_irqsave(&isp->lock, flags);
- if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp)) {
+ if (!atomisp_streaming_count(isp)) {
spin_unlock_irqrestore(&isp->lock, flags);
return IRQ_HANDLED;
}
@@ -1914,9 +1903,6 @@ out:
&& isp->sw_contex.file_input)
v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
video, s_stream, 1);
- /* FIXME! FIX ACC implementation */
- if (asd->acc.pipeline && css_pipe_done[asd->index])
- atomisp_css_acc_done(asd);
}
dev_dbg(isp->dev, "<%s\n", __func__);
@@ -6504,7 +6490,7 @@ int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id)
ret = atomisp_css_exp_id_unlock(asd, exp_id);
if (ret) {
dev_err(asd->isp->dev,
- "%s exp_id is wrapping back to %d but force unlock failed,, err %d.\n",
+ "%s exp_id is wrapping back to %d but force unlock failed, err %d.\n",
__func__, exp_id, ret);
return ret;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h
index 64c1bf0943e6..3393ae6824f0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h
@@ -240,7 +240,7 @@ int atomisp_css_input_configure_port(struct atomisp_sub_device *asd,
unsigned int metadata_width,
unsigned int metadata_height);
-void atomisp_create_pipes_stream(struct atomisp_sub_device *asd);
+int atomisp_create_pipes_stream(struct atomisp_sub_device *asd);
void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd);
void atomisp_css_stop(struct atomisp_sub_device *asd,
@@ -442,33 +442,6 @@ int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
int atomisp_css_update_stream(struct atomisp_sub_device *asd);
-int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd);
-
-int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd);
-
-int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd);
-
-void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd);
-
-int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd,
- struct ia_css_fw_info *fw,
- enum ia_css_pipe_id pipe_id,
- unsigned int type);
-
-void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd,
- struct ia_css_fw_info *fw,
- enum ia_css_pipe_id pipe_id);
-
-int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd);
-
-void atomisp_css_acc_done(struct atomisp_sub_device *asd);
-
-int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd,
- struct ia_css_fw_info *fw,
- unsigned int index);
-
-void atomisp_css_unload_acc_binary(struct atomisp_sub_device *asd);
-
struct atomisp_acc_fw;
int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index 781a11cca599..5aa108a1724c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -31,7 +31,6 @@
#include "atomisp-regs.h"
#include "atomisp_fops.h"
#include "atomisp_ioctl.h"
-#include "atomisp_acc.h"
#include "ia_css_debug.h"
#include "ia_css_isp_param.h"
@@ -419,24 +418,14 @@ static void __dump_stream_config(struct atomisp_sub_device *asd,
}
static int __destroy_stream(struct atomisp_sub_device *asd,
- struct atomisp_stream_env *stream_env, bool force)
+ struct atomisp_stream_env *stream_env)
{
struct atomisp_device *isp = asd->isp;
- int i;
unsigned long timeout;
if (!stream_env->stream)
return 0;
- if (!force) {
- for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
- if (stream_env->update_pipe[i])
- break;
-
- if (i == IA_CSS_PIPE_ID_NUM)
- return 0;
- }
-
if (stream_env->stream_state == CSS_STREAM_STARTED
&& ia_css_stream_stop(stream_env->stream) != 0) {
dev_err(isp->dev, "stop stream failed.\n");
@@ -470,12 +459,12 @@ static int __destroy_stream(struct atomisp_sub_device *asd,
return 0;
}
-static int __destroy_streams(struct atomisp_sub_device *asd, bool force)
+static int __destroy_streams(struct atomisp_sub_device *asd)
{
int ret, i;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
- ret = __destroy_stream(asd, &asd->stream_env[i], force);
+ ret = __destroy_stream(asd, &asd->stream_env[i]);
if (ret)
return ret;
}
@@ -530,21 +519,19 @@ static int __create_streams(struct atomisp_sub_device *asd)
return 0;
rollback:
for (i--; i >= 0; i--)
- __destroy_stream(asd, &asd->stream_env[i], true);
+ __destroy_stream(asd, &asd->stream_env[i]);
return ret;
}
static int __destroy_stream_pipes(struct atomisp_sub_device *asd,
- struct atomisp_stream_env *stream_env,
- bool force)
+ struct atomisp_stream_env *stream_env)
{
struct atomisp_device *isp = asd->isp;
int ret = 0;
int i;
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
- if (!stream_env->pipes[i] ||
- !(force || stream_env->update_pipe[i]))
+ if (!stream_env->pipes[i])
continue;
if (ia_css_pipe_destroy(stream_env->pipes[i])
!= 0) {
@@ -558,7 +545,7 @@ static int __destroy_stream_pipes(struct atomisp_sub_device *asd,
return ret;
}
-static int __destroy_pipes(struct atomisp_sub_device *asd, bool force)
+static int __destroy_pipes(struct atomisp_sub_device *asd)
{
struct atomisp_device *isp = asd->isp;
int i;
@@ -572,7 +559,7 @@ static int __destroy_pipes(struct atomisp_sub_device *asd, bool force)
continue;
}
- ret = __destroy_stream_pipes(asd, &asd->stream_env[i], force);
+ ret = __destroy_stream_pipes(asd, &asd->stream_env[i]);
if (ret)
return ret;
}
@@ -582,8 +569,11 @@ static int __destroy_pipes(struct atomisp_sub_device *asd, bool force)
void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd)
{
- __destroy_streams(asd, true);
- __destroy_pipes(asd, true);
+ if (__destroy_streams(asd))
+ dev_warn(asd->isp->dev, "destroy stream failed.\n");
+
+ if (__destroy_pipes(asd))
+ dev_warn(asd->isp->dev, "destroy pipe failed.\n");
}
static void __apply_additional_pipe_config(
@@ -786,39 +776,32 @@ pipe_err:
return -EINVAL;
}
-void atomisp_create_pipes_stream(struct atomisp_sub_device *asd)
-{
- __create_pipes(asd);
- __create_streams(asd);
-}
-
-int atomisp_css_update_stream(struct atomisp_sub_device *asd)
+int atomisp_create_pipes_stream(struct atomisp_sub_device *asd)
{
int ret;
- struct atomisp_device *isp = asd->isp;
-
- if (__destroy_streams(asd, true))
- dev_warn(isp->dev, "destroy stream failed.\n");
-
- if (__destroy_pipes(asd, true))
- dev_warn(isp->dev, "destroy pipe failed.\n");
ret = __create_pipes(asd);
if (ret) {
- dev_err(isp->dev, "create pipe failed %d.\n", ret);
- return -EIO;
+ dev_err(asd->isp->dev, "create pipe failed %d.\n", ret);
+ return ret;
}
ret = __create_streams(asd);
if (ret) {
- dev_warn(isp->dev, "create stream failed %d.\n", ret);
- __destroy_pipes(asd, true);
- return -EIO;
+ dev_warn(asd->isp->dev, "create stream failed %d.\n", ret);
+ __destroy_pipes(asd);
+ return ret;
}
return 0;
}
+int atomisp_css_update_stream(struct atomisp_sub_device *asd)
+{
+ atomisp_destroy_pipes_stream_force(asd);
+ return atomisp_create_pipes_stream(asd);
+}
+
int atomisp_css_init(struct atomisp_device *isp)
{
unsigned int mmu_base_addr;
@@ -1103,23 +1086,12 @@ int atomisp_css_start(struct atomisp_sub_device *asd,
int ret = 0, i = 0;
if (in_reset) {
- if (__destroy_streams(asd, true))
- dev_warn(isp->dev, "destroy stream failed.\n");
-
- if (__destroy_pipes(asd, true))
- dev_warn(isp->dev, "destroy pipe failed.\n");
+ ret = atomisp_css_update_stream(asd);
+ if (ret)
+ return ret;
- if (__create_pipes(asd)) {
- dev_err(isp->dev, "create pipe error.\n");
- return -EINVAL;
- }
- if (__create_streams(asd)) {
- dev_err(isp->dev, "create stream error.\n");
- ret = -EINVAL;
- goto stream_err;
- }
- /* in_reset == true, extension firmwares are reloaded after the recovery */
- atomisp_acc_load_extensions(asd);
+ /* Invalidate caches. FIXME: should flush only necessary buffers */
+ wbinvd();
}
/*
@@ -1134,15 +1106,9 @@ int atomisp_css_start(struct atomisp_sub_device *asd,
* recreated in the next stream on.
*/
if (!asd->stream_prepared) {
- if (__create_pipes(asd)) {
- dev_err(isp->dev, "create pipe error.\n");
- return -EINVAL;
- }
- if (__create_streams(asd)) {
- dev_err(isp->dev, "create stream error.\n");
- ret = -EINVAL;
- goto stream_err;
- }
+ ret = atomisp_create_pipes_stream(asd);
+ if (ret)
+ return ret;
}
/*
* SP can only be started one time
@@ -1181,9 +1147,7 @@ int atomisp_css_start(struct atomisp_sub_device *asd,
return 0;
start_err:
- __destroy_streams(asd, true);
-stream_err:
- __destroy_pipes(asd, true);
+ atomisp_destroy_pipes_stream_force(asd);
/* css 2.0 API limitation: ia_css_stop_sp() could be only called after
* destroy all pipes
@@ -2088,13 +2052,8 @@ void atomisp_css_stop(struct atomisp_sub_device *asd,
unsigned long irqflags;
unsigned int i;
- /* if is called in atomisp_reset(), force destroy stream */
- if (__destroy_streams(asd, true))
- dev_err(isp->dev, "destroy stream failed.\n");
-
- /* if is called in atomisp_reset(), force destroy all pipes */
- if (__destroy_pipes(asd, true))
- dev_err(isp->dev, "destroy pipes failed.\n");
+ /* if is called in atomisp_reset(), force destroy streams and pipes */
+ atomisp_destroy_pipes_stream_force(asd);
atomisp_init_raw_buffer_bitmap(asd);
@@ -2634,27 +2593,15 @@ static int __get_frame_info(struct atomisp_sub_device *asd,
struct ia_css_pipe_info p_info;
/* FIXME! No need to destroy/recreate all streams */
- if (__destroy_streams(asd, true))
- dev_warn(isp->dev, "destroy stream failed.\n");
-
- if (__destroy_pipes(asd, true))
- dev_warn(isp->dev, "destroy pipe failed.\n");
-
- if (__create_pipes(asd)) {
- dev_err(isp->dev, "can't create pipes\n");
- return -EINVAL;
- }
-
- if (__create_streams(asd)) {
- dev_err(isp->dev, "can't create streams\n");
- goto stream_err;
- }
+ ret = atomisp_css_update_stream(asd);
+ if (ret)
+ return ret;
ret = ia_css_pipe_get_info(asd->stream_env[stream_index].pipes[pipe_id],
&p_info);
if (ret) {
dev_err(isp->dev, "can't get info from pipe\n");
- goto stream_err;
+ goto get_info_err;
}
switch (type) {
@@ -2685,8 +2632,8 @@ static int __get_frame_info(struct atomisp_sub_device *asd,
return 0;
-stream_err:
- __destroy_pipes(asd, true);
+get_info_err:
+ atomisp_destroy_pipes_stream_force(asd);
return -EINVAL;
}
@@ -3824,30 +3771,6 @@ void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp,
return;
}
-void atomisp_css_acc_done(struct atomisp_sub_device *asd)
-{
- complete(&asd->acc.acc_done);
-}
-
-int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd)
-{
- int ret = 0;
- struct atomisp_device *isp = asd->isp;
-
- /* Unlock the isp mutex taken in IOCTL handler before sleeping! */
- rt_mutex_unlock(&isp->mutex);
- if (wait_for_completion_interruptible_timeout(&asd->acc.acc_done,
- ATOMISP_ISP_TIMEOUT_DURATION) == 0) {
- dev_err(isp->dev, "<%s: completion timeout\n", __func__);
- ia_css_debug_dump_sp_sw_debug_info();
- ia_css_debug_dump_debug_info(__func__);
- ret = -EIO;
- }
- rt_mutex_lock(&isp->mutex);
-
- return ret;
-}
-
/* Set the ACC binary arguments */
int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw)
{
@@ -3866,204 +3789,6 @@ int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw)
return 0;
}
-/* Load acc binary extension */
-int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd,
- struct ia_css_fw_info *fw,
- enum ia_css_pipe_id pipe_id,
- unsigned int type)
-{
- struct ia_css_fw_info **hd;
-
- fw->next = NULL;
- hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
- .pipe_configs[pipe_id].acc_extension);
- while (*hd)
- hd = &(*hd)->next;
- *hd = fw;
-
- asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
- .update_pipe[pipe_id] = true;
- return 0;
-}
-
-/* Unload acc binary extension */
-void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd,
- struct ia_css_fw_info *fw,
- enum ia_css_pipe_id pipe_id)
-{
- struct ia_css_fw_info **hd;
-
- hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
- .pipe_configs[pipe_id].acc_extension);
- while (*hd && *hd != fw)
- hd = &(*hd)->next;
- if (!*hd) {
- dev_err(asd->isp->dev, "did not find acc fw for removal\n");
- return;
- }
- *hd = fw->next;
- fw->next = NULL;
-
- asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
- .update_pipe[pipe_id] = true;
-}
-
-int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd)
-{
- struct atomisp_device *isp = asd->isp;
- struct ia_css_pipe_config *pipe_config;
- struct atomisp_stream_env *stream_env =
- &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
-
- if (stream_env->acc_stream) {
- if (stream_env->acc_stream_state == CSS_STREAM_STARTED) {
- if (ia_css_stream_stop(stream_env->acc_stream)
- != 0) {
- dev_err(isp->dev, "stop acc_stream failed.\n");
- return -EBUSY;
- }
- }
-
- if (ia_css_stream_destroy(stream_env->acc_stream)
- != 0) {
- dev_err(isp->dev, "destroy acc_stream failed.\n");
- return -EBUSY;
- }
- stream_env->acc_stream = NULL;
- }
-
- pipe_config = &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC];
- ia_css_pipe_config_defaults(pipe_config);
- asd->acc.acc_stages = kzalloc(MAX_ACC_STAGES *
- sizeof(void *), GFP_KERNEL);
- if (!asd->acc.acc_stages)
- return -ENOMEM;
- pipe_config->acc_stages = asd->acc.acc_stages;
- pipe_config->mode = IA_CSS_PIPE_MODE_ACC;
- pipe_config->num_acc_stages = 0;
-
- /*
- * We delay the ACC pipeline creation to atomisp_css_start_acc_pipe,
- * because pipe configuration will soon be changed by
- * atomisp_css_load_acc_binary()
- */
- return 0;
-}
-
-int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd)
-{
- struct atomisp_device *isp = asd->isp;
- struct atomisp_stream_env *stream_env =
- &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
- struct ia_css_pipe_config *pipe_config =
- &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC];
-
- if (ia_css_pipe_create(pipe_config,
- &stream_env->pipes[IA_CSS_PIPE_ID_ACC]) != 0) {
- dev_err(isp->dev, "%s: ia_css_pipe_create failed\n",
- __func__);
- return -EBADE;
- }
-
- memset(&stream_env->acc_stream_config, 0,
- sizeof(struct ia_css_stream_config));
- if (ia_css_stream_create(&stream_env->acc_stream_config, 1,
- &stream_env->pipes[IA_CSS_PIPE_ID_ACC],
- &stream_env->acc_stream) != 0) {
- dev_err(isp->dev, "%s: create acc_stream error.\n", __func__);
- return -EINVAL;
- }
- stream_env->acc_stream_state = CSS_STREAM_CREATED;
-
- init_completion(&asd->acc.acc_done);
- asd->acc.pipeline = stream_env->pipes[IA_CSS_PIPE_ID_ACC];
-
- atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false);
-
- if (ia_css_start_sp()) {
- dev_err(isp->dev, "start sp error.\n");
- return -EIO;
- }
-
- if (ia_css_stream_start(stream_env->acc_stream)
- != 0) {
- dev_err(isp->dev, "acc_stream start error.\n");
- return -EIO;
- }
-
- stream_env->acc_stream_state = CSS_STREAM_STARTED;
- return 0;
-}
-
-int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd)
-{
- struct atomisp_stream_env *stream_env =
- &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
- if (stream_env->acc_stream_state == CSS_STREAM_STARTED) {
- ia_css_stream_stop(stream_env->acc_stream);
- stream_env->acc_stream_state = CSS_STREAM_STOPPED;
- }
- return 0;
-}
-
-void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd)
-{
- struct atomisp_stream_env *stream_env =
- &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
- if (stream_env->acc_stream) {
- if (ia_css_stream_destroy(stream_env->acc_stream)
- != 0)
- dev_warn(asd->isp->dev,
- "destroy acc_stream failed.\n");
- stream_env->acc_stream = NULL;
- }
-
- if (stream_env->pipes[IA_CSS_PIPE_ID_ACC]) {
- if (ia_css_pipe_destroy(stream_env->pipes[IA_CSS_PIPE_ID_ACC])
- != 0)
- dev_warn(asd->isp->dev,
- "destroy ACC pipe failed.\n");
- stream_env->pipes[IA_CSS_PIPE_ID_ACC] = NULL;
- stream_env->update_pipe[IA_CSS_PIPE_ID_ACC] = false;
- ia_css_pipe_config_defaults(
- &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC]);
- ia_css_pipe_extra_config_defaults(
- &stream_env->pipe_extra_configs[IA_CSS_PIPE_ID_ACC]);
- }
- asd->acc.pipeline = NULL;
-
- /* css 2.0 API limitation: ia_css_stop_sp() could be only called after
- * destroy all pipes
- */
- ia_css_stop_sp();
-
- kfree(asd->acc.acc_stages);
- asd->acc.acc_stages = NULL;
-
- atomisp_freq_scaling(asd->isp, ATOMISP_DFS_MODE_LOW, false);
-}
-
-int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd,
- struct ia_css_fw_info *fw,
- unsigned int index)
-{
- struct ia_css_pipe_config *pipe_config =
- &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
- .pipe_configs[IA_CSS_PIPE_ID_ACC];
-
- if (index >= MAX_ACC_STAGES) {
- dev_dbg(asd->isp->dev, "%s: index(%d) out of range\n",
- __func__, index);
- return -ENOMEM;
- }
-
- pipe_config->acc_stages[index] = fw;
- pipe_config->num_acc_stages = index + 1;
- pipe_config->acc_num_execs = 1;
-
- return 0;
-}
-
static struct atomisp_sub_device *__get_atomisp_subdev(
struct ia_css_pipe *css_pipe,
struct atomisp_device *isp,
@@ -4075,8 +3800,7 @@ static struct atomisp_sub_device *__get_atomisp_subdev(
for (i = 0; i < isp->num_of_streams; i++) {
asd = &isp->asd[i];
- if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED &&
- !asd->acc.pipeline)
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED)
continue;
for (j = 0; j < ATOMISP_INPUT_STREAM_NUM; j++) {
stream_env = &asd->stream_env[j];
@@ -4211,8 +3935,7 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
css_pipe_done[asd->index] = true;
break;
case IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE:
- dev_dbg(isp->dev, "event: acc stage done");
- atomisp_acc_done(asd, current_event.event.fw_handle);
+ dev_warn(isp->dev, "unexpected event: acc stage done");
break;
default:
dev_dbg(isp->dev, "unhandled css stored event: 0x%x\n",
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
index 86d3fbe01378..33821b51d90e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
@@ -140,19 +140,6 @@ struct atomisp_calibration_group32 {
compat_uptr_t calb_grp_values;
};
-struct atomisp_acc_fw_load32 {
- unsigned int size;
- unsigned int fw_handle;
- compat_uptr_t data;
-};
-
-struct atomisp_acc_fw_arg32 {
- unsigned int fw_handle;
- unsigned int index;
- compat_uptr_t value;
- compat_size_t size;
-};
-
struct v4l2_private_int_data32 {
__u32 size;
compat_uptr_t data;
@@ -170,21 +157,6 @@ struct atomisp_shading_table32 {
compat_uptr_t data[ATOMISP_NUM_SC_COLORS];
};
-struct atomisp_acc_map32 {
- __u32 flags; /* Flags, see list below */
- __u32 length; /* Length of data in bytes */
- compat_uptr_t user_ptr; /* Pointer into user space */
- compat_ulong_t css_ptr; /* Pointer into CSS address space */
- __u32 reserved[4]; /* Set to zero */
-};
-
-struct atomisp_acc_s_mapped_arg32 {
- unsigned int fw_handle;
- __u32 memory; /* one of enum atomisp_acc_memory */
- compat_size_t length;
- compat_ulong_t css_ptr;
-};
-
struct atomisp_parameters32 {
compat_uptr_t wb_config; /* White Balance config */
compat_uptr_t cc_config; /* Color Correction config */
@@ -265,15 +237,6 @@ struct atomisp_parameters32 {
u32 per_frame_setting;
};
-struct atomisp_acc_fw_load_to_pipe32 {
- __u32 flags; /* Flags, see below for valid values */
- unsigned int fw_handle; /* Handle, filled by kernel. */
- __u32 size; /* Firmware binary size */
- compat_uptr_t data; /* Pointer to firmware */
- __u32 type; /* Binary type */
- __u32 reserved[3]; /* Set to zero */
-};
-
struct atomisp_dvs_6axis_config32 {
u32 exp_id;
u32 width_y;
@@ -323,15 +286,6 @@ struct atomisp_sensor_ae_bracketing_lut32 {
#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32 \
_IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group32)
-#define ATOMISP_IOC_ACC_LOAD32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load32)
-
-#define ATOMISP_IOC_ACC_S_ARG32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg32)
-
-#define ATOMISP_IOC_ACC_DESTAB32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg32)
-
#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32 \
_IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data32)
@@ -341,18 +295,6 @@ struct atomisp_sensor_ae_bracketing_lut32 {
#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32 \
_IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data32)
-#define ATOMISP_IOC_ACC_MAP32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32)
-
-#define ATOMISP_IOC_ACC_UNMAP32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32)
-
-#define ATOMISP_IOC_ACC_S_MAPPED_ARG32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg32)
-
-#define ATOMISP_IOC_ACC_LOAD_TO_PIPE32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe32)
-
#define ATOMISP_IOC_S_PARAMETERS32 \
_IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters32)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
index dcb571f515a7..3ddc935ec01d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
@@ -45,10 +45,8 @@ struct _iunit_debug {
#define OPTION_BIN_LIST BIT(0)
#define OPTION_BIN_RUN BIT(1)
-#define OPTION_MEM_STAT BIT(2)
#define OPTION_VALID (OPTION_BIN_LIST \
- | OPTION_BIN_RUN \
- | OPTION_MEM_STAT)
+ | OPTION_BIN_RUN)
static struct _iunit_debug iunit_debug = {
.dbglvl = 0,
@@ -81,9 +79,6 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
goto opt_err;
}
}
-
- if (opt & OPTION_MEM_STAT)
- hmm_show_mem_stat(__func__, __LINE__);
} else {
ret = -EINVAL;
dev_err(isp->dev, "%s dump nothing[ret=%d]\n", __func__, ret);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
index be6a74d5ac19..77150e4ae144 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -38,8 +38,6 @@
#include "type_support.h"
#include "device_access/device_access.h"
-#include "atomisp_acc.h"
-
#define ISP_LEFT_PAD 128 /* equal to 2*NWAY */
/*
@@ -865,12 +863,6 @@ dev_init:
goto error;
}
- if (dypool_enable) {
- ret = hmm_pool_register(dypool_pgnr, HMM_POOL_TYPE_DYNAMIC);
- if (ret)
- dev_err(isp->dev, "Failed to register dynamic memory pool.\n");
- }
-
/* Init ISP */
if (atomisp_css_init(isp)) {
ret = -EINVAL;
@@ -910,7 +902,6 @@ css_error:
atomisp_css_uninit(isp);
pm_runtime_put(vdev->v4l2_dev->dev);
error:
- hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC);
rt_mutex_unlock(&isp->mutex);
return ret;
}
@@ -1021,8 +1012,6 @@ subdev_uninit:
if (atomisp_dev_users(isp))
goto done;
- atomisp_acc_release(asd);
-
atomisp_destroy_pipes_stream_force(asd);
atomisp_css_uninit(isp);
@@ -1032,8 +1021,6 @@ subdev_uninit:
isp->css_env.isp_css_fw.bytes = 0;
}
- hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC);
-
ret = v4l2_subdev_call(isp->flash, core, s_power, 0);
if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD)
dev_warn(isp->dev, "Failed to power-off flash\n");
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
index 7e47db82de07..bf527b366ab3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -1284,7 +1284,7 @@ static int gmin_get_config_var(struct device *maindev,
const struct dmi_system_id *id;
struct device *dev = maindev;
char var8[CFG_VAR_NAME_MAX];
- struct efivar_entry *ev;
+ efi_status_t status;
int i, ret;
/* For sensors, try first to use the _DSM table */
@@ -1326,24 +1326,11 @@ static int gmin_get_config_var(struct device *maindev,
for (i = 0; i < sizeof(var8) && var8[i]; i++)
var16[i] = var8[i];
- /* Not sure this API usage is kosher; efivar_entry_get()'s
- * implementation simply uses VariableName and VendorGuid from
- * the struct and ignores the rest, but it seems like there
- * ought to be an "official" efivar_entry registered
- * somewhere?
- */
- ev = kzalloc(sizeof(*ev), GFP_KERNEL);
- if (!ev)
- return -ENOMEM;
- memcpy(&ev->var.VariableName, var16, sizeof(var16));
- ev->var.VendorGuid = GMIN_CFG_VAR_EFI_GUID;
- ev->var.DataSize = *out_len;
-
- ret = efivar_entry_get(ev, &ev->var.Attributes,
- &ev->var.DataSize, ev->var.Data);
- if (ret == 0) {
- memcpy(out, ev->var.Data, ev->var.DataSize);
- *out_len = ev->var.DataSize;
+ status = EFI_UNSUPPORTED;
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ status = efi.get_variable(var16, &GMIN_CFG_VAR_EFI_GUID, NULL,
+ (unsigned long *)out_len, out);
+ if (status == EFI_SUCCESS) {
dev_info(maindev, "found EFI entry for '%s'\n", var8);
} else if (is_gmin) {
dev_info(maindev, "Failed to find EFI gmin variable %s\n", var8);
@@ -1351,8 +1338,6 @@ static int gmin_get_config_var(struct device *maindev,
dev_info(maindev, "Failed to find EFI variable %s\n", var8);
}
- kfree(ev);
-
return ret;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 8fd470efd658..459645c2e2a7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -25,7 +25,6 @@
#include <media/v4l2-event.h>
#include <media/videobuf-vmalloc.h>
-#include "atomisp_acc.h"
#include "atomisp_cmd.h"
#include "atomisp_common.h"
#include "atomisp_fops.h"
@@ -625,17 +624,6 @@ unsigned int atomisp_streaming_count(struct atomisp_device *isp)
return sum;
}
-unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp)
-{
- unsigned int i;
-
- for (i = 0; i < isp->num_of_streams; i++)
- if (isp->asd[i].acc.pipeline)
- return 1;
-
- return 0;
-}
-
/*
* get input are used to get current primary/secondary camera
*/
@@ -1371,7 +1359,7 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
ret = ia_css_frame_map(&handle, &frame_info,
(void __user *)buf->m.userptr,
- 0, pgnr);
+ pgnr);
if (ret) {
dev_err(isp->dev, "Failed to map user buffer\n");
goto error;
@@ -1913,11 +1901,8 @@ static int atomisp_streamon(struct file *file, void *fh,
css_pipe_id = atomisp_get_css_pipe_id(asd);
- ret = atomisp_acc_load_extensions(asd);
- if (ret < 0) {
- dev_err(isp->dev, "acc extension failed to load\n");
- goto out;
- }
+ /* Invalidate caches. FIXME: should flush only necessary buffers */
+ wbinvd();
if (asd->params.css_update_params_needed) {
atomisp_apply_css_parameters(asd, &asd->params.css_param);
@@ -2154,7 +2139,6 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
video, s_stream, 0);
rt_mutex_lock(&isp->mutex);
- atomisp_acc_unload_extensions(asd);
}
spin_lock_irqsave(&isp->lock, flags);
@@ -2283,8 +2267,17 @@ stopsensor:
dev_err(isp->dev, "atomisp_reset");
atomisp_reset(isp);
for (i = 0; i < isp->num_of_streams; i++) {
- if (recreate_streams[i])
- atomisp_create_pipes_stream(&isp->asd[i]);
+ if (recreate_streams[i]) {
+ int ret2;
+
+ ret2 = atomisp_create_pipes_stream(&isp->asd[i]);
+ if (ret2) {
+ dev_err(isp->dev, "%s error re-creating streams: %d\n",
+ __func__, ret2);
+ if (!ret)
+ ret = ret2;
+ }
+ }
}
isp->isp_timeout = false;
}
@@ -3118,38 +3111,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
err = -EINVAL;
break;
- case ATOMISP_IOC_ACC_LOAD:
- err = atomisp_acc_load(asd, arg);
- break;
-
- case ATOMISP_IOC_ACC_LOAD_TO_PIPE:
- err = atomisp_acc_load_to_pipe(asd, arg);
- break;
-
- case ATOMISP_IOC_ACC_UNLOAD:
- err = atomisp_acc_unload(asd, arg);
- break;
-
- case ATOMISP_IOC_ACC_START:
- err = atomisp_acc_start(asd, arg);
- break;
-
- case ATOMISP_IOC_ACC_WAIT:
- err = atomisp_acc_wait(asd, arg);
- break;
-
- case ATOMISP_IOC_ACC_MAP:
- err = atomisp_acc_map(asd, arg);
- break;
-
- case ATOMISP_IOC_ACC_UNMAP:
- err = atomisp_acc_unmap(asd, arg);
- break;
-
- case ATOMISP_IOC_ACC_S_MAPPED_ARG:
- err = atomisp_acc_s_mapped_arg(asd, arg);
- break;
-
case ATOMISP_IOC_S_ISP_SHD_TAB:
err = atomisp_set_shading_table(asd, arg);
break;
@@ -3198,12 +3159,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
case ATOMISP_IOC_S_EXPOSURE_WINDOW:
err = atomisp_s_ae_window(asd, arg);
break;
- case ATOMISP_IOC_S_ACC_STATE:
- err = atomisp_acc_set_state(asd, arg);
- break;
- case ATOMISP_IOC_G_ACC_STATE:
- err = atomisp_acc_get_state(asd, arg);
- break;
case ATOMISP_IOC_INJECT_A_FAKE_EVENT:
err = atomisp_inject_a_fake_event(asd, arg);
break;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
index 412bfcf33c0f..d85e0d697a4e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
@@ -57,7 +57,6 @@ extern const struct v4l2_ioctl_ops atomisp_file_ioctl_ops;
unsigned int atomisp_streaming_count(struct atomisp_device *isp);
-unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp);
/* compat_ioctl for 32bit userland app and 64bit kernel */
long atomisp_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 1807cfa786a7..394fe6959033 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -1081,9 +1081,6 @@ static void atomisp_init_acc_pipe(struct atomisp_sub_device *asd,
{
pipe->asd = asd;
pipe->isp = asd->isp;
- INIT_LIST_HEAD(&asd->acc.fw);
- INIT_LIST_HEAD(&asd->acc.memory_maps);
- ida_init(&asd->acc.ida);
}
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
index 7d731f1fee72..798a93793a9a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
@@ -322,16 +322,6 @@ struct atomisp_sub_device {
struct v4l2_ctrl *disable_dz;
- struct {
- struct list_head fw;
- struct list_head memory_maps;
- struct ia_css_pipe *pipeline;
- bool extension_mode;
- struct ida ida;
- struct completion acc_done;
- void *acc_stages;
- } acc;
-
struct atomisp_subdev_params params;
struct atomisp_stream_env stream_env[ATOMISP_INPUT_STREAM_NUM];
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index 49ccfb1646da..643ba981601b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -37,7 +37,6 @@
#include "atomisp_file.h"
#include "atomisp_ioctl.h"
#include "atomisp_internal.h"
-#include "atomisp_acc.h"
#include "atomisp-regs.h"
#include "atomisp_dfs_tables.h"
#include "atomisp_drvfs.h"
@@ -59,23 +58,6 @@ static uint skip_fwload;
module_param(skip_fwload, uint, 0644);
MODULE_PARM_DESC(skip_fwload, "Skip atomisp firmware load");
-/* set reserved memory pool size in page */
-static unsigned int repool_pgnr = 32768;
-module_param(repool_pgnr, uint, 0644);
-MODULE_PARM_DESC(repool_pgnr,
- "Set the reserved memory pool size in page (default:32768)");
-
-/* set dynamic memory pool size in page */
-unsigned int dypool_pgnr = UINT_MAX;
-module_param(dypool_pgnr, uint, 0644);
-MODULE_PARM_DESC(dypool_pgnr,
- "Set the dynamic memory pool size in page (default: unlimited)");
-
-bool dypool_enable = true;
-module_param(dypool_enable, bool, 0644);
-MODULE_PARM_DESC(dypool_enable,
- "dynamic memory pool enable/disable (default:enabled)");
-
/* memory optimization: deferred firmware loading */
bool defer_fw_load;
module_param(defer_fw_load, bool, 0644);
@@ -1770,13 +1752,6 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
- hmm_init_mem_stat(repool_pgnr, dypool_enable, dypool_pgnr);
- err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
- if (err) {
- dev_err(&pdev->dev, "Failed to register reserved memory pool.\n");
- goto hmm_pool_fail;
- }
-
/* Init ISP memory management */
hmm_init();
@@ -1813,12 +1788,9 @@ css_init_fail:
devm_free_irq(&pdev->dev, pdev->irq, isp);
request_irq_fail:
hmm_cleanup();
- hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
-hmm_pool_fail:
pm_runtime_get_noresume(&pdev->dev);
destroy_workqueue(isp->wdt_work_queue);
wdt_work_queue_fail:
- atomisp_acc_cleanup(isp);
atomisp_unregister_entities(isp);
register_entities_fail:
atomisp_uninitialize_modules(isp);
@@ -1869,8 +1841,6 @@ static void atomisp_pci_remove(struct pci_dev *pdev)
atomisp_drvfs_exit();
- atomisp_acc_cleanup(isp);
-
ia_css_unload_firmware();
hmm_cleanup();
@@ -1885,8 +1855,6 @@ static void atomisp_pci_remove(struct pci_dev *pdev)
atomisp_file_input_cleanup(isp);
release_firmware(isp->firmware);
-
- hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
}
static const struct pci_device_id atomisp_pci_tbl[] = {
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h
index ee861ddb8e92..5660bd4221be 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h
@@ -25,7 +25,7 @@
* Simple queuing trace buffer for debug data
* instantiatable in SP DMEM
*
- * The buffer has a remote and and a local store
+ * The buffer has a remote and a local store
* which contain duplicate data (when in sync).
* The buffers are automatically synched when the
* user dequeues, or manualy using the synch function
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index c1cda16f2dc0..fc6cfe9f7744 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -28,7 +28,6 @@
#include <linux/sysfs.h>
#include "hmm/hmm.h"
-#include "hmm/hmm_pool.h"
#include "hmm/hmm_bo.h"
#include "atomisp_internal.h"
@@ -37,11 +36,8 @@
#include "mmu/sh_mmu_mrfld.h"
struct hmm_bo_device bo_device;
-struct hmm_pool dynamic_pool;
-struct hmm_pool reserved_pool;
static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
static bool hmm_initialized;
-struct _hmm_mem_stat hmm_mem_stat;
/*
* p: private
@@ -113,62 +109,13 @@ static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
}
-static ssize_t reserved_pool_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t ret = 0;
-
- struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
- unsigned long flags;
-
- if (!pinfo || !pinfo->initialized)
- return 0;
-
- spin_lock_irqsave(&pinfo->list_lock, flags);
- ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
- pinfo->index, pinfo->pgnr);
- spin_unlock_irqrestore(&pinfo->list_lock, flags);
-
- if (ret > 0)
- ret++; /* Add trailing zero, not included by scnprintf */
-
- return ret;
-};
-
-static ssize_t dynamic_pool_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t ret = 0;
-
- struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
- unsigned long flags;
-
- if (!pinfo || !pinfo->initialized)
- return 0;
-
- spin_lock_irqsave(&pinfo->list_lock, flags);
- ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
- pinfo->pgnr, pinfo->pool_size);
- spin_unlock_irqrestore(&pinfo->list_lock, flags);
-
- if (ret > 0)
- ret++; /* Add trailing zero, not included by scnprintf */
-
- return ret;
-};
static DEVICE_ATTR_RO(active_bo);
static DEVICE_ATTR_RO(free_bo);
-static DEVICE_ATTR_RO(reserved_pool);
-static DEVICE_ATTR_RO(dynamic_pool);
static struct attribute *sysfs_attrs_ctrl[] = {
&dev_attr_active_bo.attr,
&dev_attr_free_bo.attr,
- &dev_attr_reserved_pool.attr,
- &dev_attr_dynamic_pool.attr,
NULL
};
@@ -194,7 +141,7 @@ int hmm_init(void)
* at the beginning, to avoid hmm_alloc return 0 in the
* further allocation.
*/
- dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, 0);
+ dummy_ptr = hmm_alloc(1);
if (!ret) {
ret = sysfs_create_group(&atomisp_dev->kobj,
@@ -221,17 +168,12 @@ void hmm_cleanup(void)
hmm_initialized = false;
}
-ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
- int from_highmem, const void __user *userptr,
- const uint16_t attrs)
+static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type, const void __user *userptr)
{
unsigned int pgnr;
struct hmm_buffer_object *bo;
- bool cached = attrs & ATOMISP_MAP_FLAG_CACHED;
int ret;
- WARN_ON(attrs & ATOMISP_MAP_FLAG_CONTIGUOUS);
-
/*
* Check if we are initialized. In the ideal world we wouldn't need
* this but we can tackle it once the driver is a lot cleaner
@@ -250,7 +192,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
}
/* Allocate pages for memory */
- ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
+ ret = hmm_bo_alloc_pages(bo, type, userptr);
if (ret) {
dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
goto alloc_page_err;
@@ -263,14 +205,9 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
goto bind_err;
}
- hmm_mem_stat.tol_cnt += pgnr;
-
- if (attrs & ATOMISP_MAP_FLAG_CLEARED)
- hmm_set(bo->start, 0, bytes);
-
dev_dbg(atomisp_dev,
- "%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
- __func__, bo->start, bytes, type, from_highmem, userptr, cached);
+ "%s: pages: 0x%08x (%zu bytes), type: %d, user ptr %p\n",
+ __func__, bo->start, bytes, type, userptr);
return bo->start;
@@ -282,6 +219,16 @@ create_bo_err:
return 0;
}
+ia_css_ptr hmm_alloc(size_t bytes)
+{
+ return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL);
+}
+
+ia_css_ptr hmm_create_from_userdata(size_t bytes, const void __user *userptr)
+{
+ return __hmm_alloc(bytes, HMM_BO_USER, userptr);
+}
+
void hmm_free(ia_css_ptr virt)
{
struct hmm_buffer_object *bo;
@@ -300,8 +247,6 @@ void hmm_free(ia_css_ptr virt)
return;
}
- hmm_mem_stat.tol_cnt -= bo->pgnr;
-
hmm_bo_unbind(bo);
hmm_bo_free_pages(bo);
hmm_bo_unref(bo);
@@ -350,7 +295,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
idx = (virt - bo->start) >> PAGE_SHIFT;
offset = (virt - bo->start) - (idx << PAGE_SHIFT);
- src = (char *)kmap(bo->page_obj[idx].page) + offset;
+ src = (char *)kmap_local_page(bo->pages[idx]) + offset;
if ((bytes + offset) >= PAGE_SIZE) {
len = PAGE_SIZE - offset;
@@ -369,7 +314,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
clflush_cache_range(src, len);
- kunmap(bo->page_obj[idx].page);
+ kunmap_local(src);
}
return 0;
@@ -482,10 +427,7 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
idx = (virt - bo->start) >> PAGE_SHIFT;
offset = (virt - bo->start) - (idx << PAGE_SHIFT);
- if (in_atomic())
- des = (char *)kmap_atomic(bo->page_obj[idx].page);
- else
- des = (char *)kmap(bo->page_obj[idx].page);
+ des = (char *)kmap_local_page(bo->pages[idx]);
if (!des) {
dev_err(atomisp_dev,
@@ -512,14 +454,7 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
clflush_cache_range(des, len);
- if (in_atomic())
- /*
- * Note: kunmap_atomic requires return addr from
- * kmap_atomic, not the page. See linux/highmem.h
- */
- kunmap_atomic(des - offset);
- else
- kunmap(bo->page_obj[idx].page);
+ kunmap_local(des);
}
return 0;
@@ -563,7 +498,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
idx = (virt - bo->start) >> PAGE_SHIFT;
offset = (virt - bo->start) - (idx << PAGE_SHIFT);
- des = (char *)kmap(bo->page_obj[idx].page) + offset;
+ des = (char *)kmap_local_page(bo->pages[idx]) + offset;
if ((bytes + offset) >= PAGE_SIZE) {
len = PAGE_SIZE - offset;
@@ -579,7 +514,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
clflush_cache_range(des, len);
- kunmap(bo->page_obj[idx].page);
+ kunmap_local(des);
}
return 0;
@@ -602,7 +537,7 @@ phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
idx = (virt - bo->start) >> PAGE_SHIFT;
offset = (virt - bo->start) - (idx << PAGE_SHIFT);
- return page_to_phys(bo->page_obj[idx].page) + offset;
+ return page_to_phys(bo->pages[idx]) + offset;
}
int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
@@ -671,96 +606,3 @@ void hmm_vunmap(ia_css_ptr virt)
hmm_bo_vunmap(bo);
}
-
-int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type)
-{
-#if 0 // Just use the "normal" pool
- switch (pool_type) {
- case HMM_POOL_TYPE_RESERVED:
- reserved_pool.pops = &reserved_pops;
- return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
- pool_size);
- case HMM_POOL_TYPE_DYNAMIC:
- dynamic_pool.pops = &dynamic_pops;
- return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
- pool_size);
- default:
- dev_err(atomisp_dev, "invalid pool type.\n");
- return -EINVAL;
- }
-#else
- return 0;
-#endif
-}
-
-void hmm_pool_unregister(enum hmm_pool_type pool_type)
-{
-#if 0 // Just use the "normal" pool
- switch (pool_type) {
- case HMM_POOL_TYPE_RESERVED:
- if (reserved_pool.pops && reserved_pool.pops->pool_exit)
- reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
- break;
- case HMM_POOL_TYPE_DYNAMIC:
- if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
- dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
- break;
- default:
- dev_err(atomisp_dev, "invalid pool type.\n");
- break;
- }
-#endif
-
- return;
-}
-
-void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
-{
- return hmm_vmap(ptr, cached);
- /* vmunmap will be done in hmm_bo_release() */
-}
-
-ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
-{
- struct hmm_buffer_object *bo;
-
- bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
- if (bo)
- return bo->start;
-
- dev_err(atomisp_dev,
- "can not find buffer object whose kernel virtual address is %p\n",
- ptr);
- return 0;
-}
-
-void hmm_show_mem_stat(const char *func, const int line)
-{
- pr_info("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
- hmm_mem_stat.tol_cnt,
- hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
- hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
- hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
-}
-
-void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
-{
- hmm_mem_stat.res_size = res_pgnr;
- /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
- if (hmm_mem_stat.res_size == 0) {
- hmm_mem_stat.res_size = -1;
- hmm_mem_stat.res_cnt = -1;
- }
-
- /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
- if (!dyc_en) {
- hmm_mem_stat.dyc_size = -1;
- hmm_mem_stat.dyc_thr = -1;
- } else {
- hmm_mem_stat.dyc_size = 0;
- hmm_mem_stat.dyc_thr = dyc_pgnr;
- }
- hmm_mem_stat.usr_size = 0;
- hmm_mem_stat.sys_size = 0;
- hmm_mem_stat.tol_cnt = 0;
-}
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
index 0168f9839c90..f50494123f03 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -42,7 +42,6 @@
#include "atomisp_internal.h"
#include "hmm/hmm_common.h"
-#include "hmm/hmm_pool.h"
#include "hmm/hmm_bo.h"
static unsigned int order_to_nr(unsigned int order)
@@ -627,75 +626,31 @@ found:
}
static void free_private_bo_pages(struct hmm_buffer_object *bo,
- struct hmm_pool *dypool,
- struct hmm_pool *repool,
int free_pgnr)
{
int i, ret;
for (i = 0; i < free_pgnr; i++) {
- switch (bo->page_obj[i].type) {
- case HMM_PAGE_TYPE_RESERVED:
- if (repool->pops
- && repool->pops->pool_free_pages) {
- repool->pops->pool_free_pages(repool->pool_info,
- &bo->page_obj[i]);
- hmm_mem_stat.res_cnt--;
- }
- break;
- /*
- * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
- * memory, so when free them, they should be put into dynamic
- * pool.
- */
- case HMM_PAGE_TYPE_DYNAMIC:
- case HMM_PAGE_TYPE_GENERAL:
- if (dypool->pops
- && dypool->pops->pool_inited
- && dypool->pops->pool_inited(dypool->pool_info)) {
- if (dypool->pops->pool_free_pages)
- dypool->pops->pool_free_pages(
- dypool->pool_info,
- &bo->page_obj[i]);
- break;
- }
-
- fallthrough;
-
+ ret = set_pages_wb(bo->pages[i], 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret = %d\n",
+ ret);
/*
- * if dynamic memory pool doesn't exist, need to free
- * pages to system directly.
- */
- default:
- ret = set_pages_wb(bo->page_obj[i].page, 1);
- if (ret)
- dev_err(atomisp_dev,
- "set page to WB err ...ret = %d\n",
- ret);
- /*
- W/A: set_pages_wb seldom return value = -EFAULT
- indicate that address of page is not in valid
- range(0xffff880000000000~0xffffc7ffffffffff)
- then, _free_pages would panic; Do not know why page
- address be valid,it maybe memory corruption by lowmemory
- */
- if (!ret) {
- __free_pages(bo->page_obj[i].page, 0);
- hmm_mem_stat.sys_size--;
- }
- break;
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why page
+ address be valid,it maybe memory corruption by lowmemory
+ */
+ if (!ret) {
+ __free_pages(bo->pages[i], 0);
}
}
-
- return;
}
/*Allocate pages which will be used only by ISP*/
-static int alloc_private_pages(struct hmm_buffer_object *bo,
- int from_highmem,
- bool cached,
- struct hmm_pool *dypool,
- struct hmm_pool *repool)
+static int alloc_private_pages(struct hmm_buffer_object *bo)
{
int ret;
unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
@@ -706,50 +661,11 @@ static int alloc_private_pages(struct hmm_buffer_object *bo,
bool reduce_order = false;
bool lack_mem = true;
- if (from_highmem)
- gfp |= __GFP_HIGHMEM;
-
pgnr = bo->pgnr;
- bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
- GFP_KERNEL);
- if (unlikely(!bo->page_obj))
- return -ENOMEM;
-
i = 0;
alloc_pgnr = 0;
- /*
- * get physical pages from dynamic pages pool.
- */
- if (dypool->pops && dypool->pops->pool_alloc_pages) {
- alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
- bo->page_obj, pgnr,
- cached);
- hmm_mem_stat.dyc_size -= alloc_pgnr;
-
- if (alloc_pgnr == pgnr)
- return 0;
- }
-
- pgnr -= alloc_pgnr;
- i += alloc_pgnr;
-
- /*
- * get physical pages from reserved pages pool for atomisp.
- */
- if (repool->pops && repool->pops->pool_alloc_pages) {
- alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
- &bo->page_obj[i], pgnr,
- cached);
- hmm_mem_stat.res_cnt += alloc_pgnr;
- if (alloc_pgnr == pgnr)
- return 0;
- }
-
- pgnr -= alloc_pgnr;
- i += alloc_pgnr;
-
while (pgnr) {
order = nr_to_order_bottom(pgnr);
/*
@@ -804,28 +720,24 @@ retry:
} else {
blk_pgnr = order_to_nr(order);
- if (!cached) {
- /*
- * set memory to uncacheable -- UC_MINUS
- */
- ret = set_pages_uc(pages, blk_pgnr);
- if (ret) {
- dev_err(atomisp_dev,
- "set page uncacheablefailed.\n");
+ /*
+ * set memory to uncacheable -- UC_MINUS
+ */
+ ret = set_pages_uc(pages, blk_pgnr);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "set page uncacheablefailed.\n");
- __free_pages(pages, order);
+ __free_pages(pages, order);
- goto cleanup;
- }
+ goto cleanup;
}
- for (j = 0; j < blk_pgnr; j++) {
- bo->page_obj[i].page = pages + j;
- bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
+ for (j = 0; j < blk_pgnr; j++, i++) {
+ bo->pages[i] = pages + j;
}
pgnr -= blk_pgnr;
- hmm_mem_stat.sys_size += blk_pgnr;
/*
* if order is not reduced this time, clear
@@ -841,60 +753,31 @@ retry:
return 0;
cleanup:
alloc_pgnr = i;
- free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
-
- kfree(bo->page_obj);
-
+ free_private_bo_pages(bo, alloc_pgnr);
return -ENOMEM;
}
-static void free_private_pages(struct hmm_buffer_object *bo,
- struct hmm_pool *dypool,
- struct hmm_pool *repool)
-{
- free_private_bo_pages(bo, dypool, repool, bo->pgnr);
-
- kfree(bo->page_obj);
-}
-
static void free_user_pages(struct hmm_buffer_object *bo,
unsigned int page_nr)
{
int i;
- hmm_mem_stat.usr_size -= bo->pgnr;
-
if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
unpin_user_pages(bo->pages, page_nr);
} else {
for (i = 0; i < page_nr; i++)
put_page(bo->pages[i]);
}
- kfree(bo->pages);
- kfree(bo->page_obj);
}
/*
* Convert user space virtual address into pages list
*/
static int alloc_user_pages(struct hmm_buffer_object *bo,
- const void __user *userptr, bool cached)
+ const void __user *userptr)
{
int page_nr;
- int i;
struct vm_area_struct *vma;
- struct page **pages;
-
- pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
- if (unlikely(!pages))
- return -ENOMEM;
-
- bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
- GFP_KERNEL);
- if (unlikely(!bo->page_obj)) {
- kfree(pages);
- return -ENOMEM;
- }
mutex_unlock(&bo->mutex);
mmap_read_lock(current->mm);
@@ -902,8 +785,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
mmap_read_unlock(current->mm);
if (!vma) {
dev_err(atomisp_dev, "find_vma failed\n");
- kfree(bo->page_obj);
- kfree(pages);
mutex_lock(&bo->mutex);
return -EFAULT;
}
@@ -915,18 +796,16 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
userptr = untagged_addr(userptr);
- bo->pages = pages;
-
if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
FOLL_LONGTERM | FOLL_WRITE,
- pages, NULL);
+ bo->pages, NULL);
bo->mem_type = HMM_BO_MEM_TYPE_PFN;
} else {
/*Handle frame buffer allocated in user space*/
mutex_unlock(&bo->mutex);
page_nr = get_user_pages_fast((unsigned long)userptr,
- (int)(bo->pgnr), 1, pages);
+ (int)(bo->pgnr), 1, bo->pages);
mutex_lock(&bo->mutex);
bo->mem_type = HMM_BO_MEM_TYPE_USER;
}
@@ -936,8 +815,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
bo->pgnr,
bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr);
- hmm_mem_stat.usr_size += bo->pgnr;
-
/* can be written by caller, not forced */
if (page_nr != bo->pgnr) {
dev_err(atomisp_dev,
@@ -948,11 +825,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
goto out_of_mem;
}
- for (i = 0; i < bo->pgnr; i++) {
- bo->page_obj[i].page = pages[i];
- bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
- }
-
return 0;
out_of_mem:
@@ -966,20 +838,14 @@ out_of_mem:
* allocate/free physical pages for the bo.
*
* type indicate where are the pages from. currently we have 3 types
- * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
- *
- * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
- * try to alloc memory from highmem if from_highmem is set.
+ * of memory: HMM_BO_PRIVATE, HMM_BO_USER.
*
* userptr is only valid when type is HMM_BO_USER, it indicates
* the start address from user space task.
- *
- * from_highmem and userptr will both be ignored when type is
- * HMM_BO_SHARE.
*/
int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
- enum hmm_bo_type type, int from_highmem,
- const void __user *userptr, bool cached)
+ enum hmm_bo_type type,
+ const void __user *userptr)
{
int ret = -EINVAL;
@@ -988,15 +854,20 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
mutex_lock(&bo->mutex);
check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+ bo->pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(!bo->pages)) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+
/*
* TO DO:
* add HMM_BO_USER type
*/
if (type == HMM_BO_PRIVATE) {
- ret = alloc_private_pages(bo, from_highmem,
- cached, &dynamic_pool, &reserved_pool);
+ ret = alloc_private_pages(bo);
} else if (type == HMM_BO_USER) {
- ret = alloc_user_pages(bo, userptr, cached);
+ ret = alloc_user_pages(bo, userptr);
} else {
dev_err(atomisp_dev, "invalid buffer type.\n");
ret = -EINVAL;
@@ -1013,6 +884,7 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
return 0;
alloc_err:
+ kfree(bo->pages);
mutex_unlock(&bo->mutex);
dev_err(atomisp_dev, "alloc pages err...\n");
return ret;
@@ -1038,11 +910,13 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
bo->status &= (~HMM_BO_PAGE_ALLOCED);
if (bo->type == HMM_BO_PRIVATE)
- free_private_pages(bo, &dynamic_pool, &reserved_pool);
+ free_private_bo_pages(bo, bo->pgnr);
else if (bo->type == HMM_BO_USER)
free_user_pages(bo, bo->pgnr);
else
dev_err(atomisp_dev, "invalid buffer type.\n");
+
+ kfree(bo->pages);
mutex_unlock(&bo->mutex);
return;
@@ -1061,32 +935,6 @@ int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
}
/*
- * get physical page info of the bo.
- */
-int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
- struct hmm_page_object **page_obj, int *pgnr)
-{
- check_bo_null_return(bo, -EINVAL);
-
- mutex_lock(&bo->mutex);
-
- check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
-
- *page_obj = bo->page_obj;
- *pgnr = bo->pgnr;
-
- mutex_unlock(&bo->mutex);
-
- return 0;
-
-status_err:
- dev_err(atomisp_dev,
- "buffer object not page allocated yet.\n");
- mutex_unlock(&bo->mutex);
- return -EINVAL;
-}
-
-/*
* bind the physical pages to a virtual address space.
*/
int hmm_bo_bind(struct hmm_buffer_object *bo)
@@ -1113,7 +961,7 @@ int hmm_bo_bind(struct hmm_buffer_object *bo)
for (i = 0; i < bo->pgnr; i++) {
ret =
isp_mmu_map(&bdev->mmu, virt,
- page_to_phys(bo->page_obj[i].page), 1);
+ page_to_phys(bo->pages[i]), 1);
if (ret)
goto map_err;
virt += (1 << PAGE_SHIFT);
@@ -1227,9 +1075,6 @@ int hmm_bo_binded(struct hmm_buffer_object *bo)
void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
{
- struct page **pages;
- int i;
-
check_bo_null_return(bo, NULL);
mutex_lock(&bo->mutex);
@@ -1246,27 +1091,15 @@ void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
}
- pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
- if (unlikely(!pages)) {
- mutex_unlock(&bo->mutex);
- return NULL;
- }
-
- for (i = 0; i < bo->pgnr; i++)
- pages[i] = bo->page_obj[i].page;
-
- bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
+ bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP,
cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
if (unlikely(!bo->vmap_addr)) {
- kfree(pages);
mutex_unlock(&bo->mutex);
dev_err(atomisp_dev, "vmap failed...\n");
return NULL;
}
bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
- kfree(pages);
-
mutex_unlock(&bo->mutex);
return bo->vmap_addr;
}
@@ -1396,7 +1229,7 @@ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
virt = vma->vm_start;
for (i = 0; i < pgnr; i++) {
- pfn = page_to_pfn(bo->page_obj[i].page);
+ pfn = page_to_pfn(bo->pages[i]);
if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
dev_warn(atomisp_dev,
"remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c
deleted file mode 100644
index eaf97e5f3b68..000000000000
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c
+++ /dev/null
@@ -1,234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Medifield PNW Camera Imaging ISP subsystem.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-/*
- * This file contains functions for dynamic memory pool management
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-
-#include <asm/set_memory.h>
-
-#include "atomisp_internal.h"
-
-#include "hmm/hmm_pool.h"
-
-/*
- * dynamic memory pool ops.
- */
-static unsigned int get_pages_from_dynamic_pool(void *pool,
- struct hmm_page_object *page_obj,
- unsigned int size, bool cached)
-{
- struct hmm_page *hmm_page;
- unsigned long flags;
- unsigned int i = 0;
- struct hmm_dynamic_pool_info *dypool_info = pool;
-
- if (!dypool_info)
- return 0;
-
- spin_lock_irqsave(&dypool_info->list_lock, flags);
- if (dypool_info->initialized) {
- while (!list_empty(&dypool_info->pages_list)) {
- hmm_page = list_entry(dypool_info->pages_list.next,
- struct hmm_page, list);
-
- list_del(&hmm_page->list);
- dypool_info->pgnr--;
- spin_unlock_irqrestore(&dypool_info->list_lock, flags);
-
- page_obj[i].page = hmm_page->page;
- page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
- kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
-
- if (i == size)
- return i;
-
- spin_lock_irqsave(&dypool_info->list_lock, flags);
- }
- }
- spin_unlock_irqrestore(&dypool_info->list_lock, flags);
-
- return i;
-}
-
-static void free_pages_to_dynamic_pool(void *pool,
- struct hmm_page_object *page_obj)
-{
- struct hmm_page *hmm_page;
- unsigned long flags;
- int ret;
- struct hmm_dynamic_pool_info *dypool_info = pool;
-
- if (!dypool_info)
- return;
-
- spin_lock_irqsave(&dypool_info->list_lock, flags);
- if (!dypool_info->initialized) {
- spin_unlock_irqrestore(&dypool_info->list_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&dypool_info->list_lock, flags);
-
- if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
- return;
-
- if (dypool_info->pgnr >= dypool_info->pool_size) {
- /* free page directly back to system */
- ret = set_pages_wb(page_obj->page, 1);
- if (ret)
- dev_err(atomisp_dev,
- "set page to WB err ...ret=%d\n", ret);
- /*
- W/A: set_pages_wb seldom return value = -EFAULT
- indicate that address of page is not in valid
- range(0xffff880000000000~0xffffc7ffffffffff)
- then, _free_pages would panic; Do not know why page
- address be valid, it maybe memory corruption by lowmemory
- */
- if (!ret) {
- __free_pages(page_obj->page, 0);
- hmm_mem_stat.sys_size--;
- }
- return;
- }
- hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
- GFP_KERNEL);
- if (!hmm_page) {
- /* free page directly */
- ret = set_pages_wb(page_obj->page, 1);
- if (ret)
- dev_err(atomisp_dev,
- "set page to WB err ...ret=%d\n", ret);
- if (!ret) {
- __free_pages(page_obj->page, 0);
- hmm_mem_stat.sys_size--;
- }
- return;
- }
-
- hmm_page->page = page_obj->page;
-
- /*
- * add to pages_list of pages_pool
- */
- spin_lock_irqsave(&dypool_info->list_lock, flags);
- list_add_tail(&hmm_page->list, &dypool_info->pages_list);
- dypool_info->pgnr++;
- spin_unlock_irqrestore(&dypool_info->list_lock, flags);
- hmm_mem_stat.dyc_size++;
-}
-
-static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
-{
- struct hmm_dynamic_pool_info *dypool_info;
-
- if (pool_size == 0)
- return 0;
-
- dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
- GFP_KERNEL);
- if (unlikely(!dypool_info))
- return -ENOMEM;
-
- dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
- sizeof(struct hmm_page), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!dypool_info->pgptr_cache) {
- kfree(dypool_info);
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&dypool_info->pages_list);
- spin_lock_init(&dypool_info->list_lock);
- dypool_info->initialized = true;
- dypool_info->pool_size = pool_size;
- dypool_info->pgnr = 0;
-
- *pool = dypool_info;
-
- return 0;
-}
-
-static void hmm_dynamic_pool_exit(void **pool)
-{
- struct hmm_dynamic_pool_info *dypool_info = *pool;
- struct hmm_page *hmm_page;
- unsigned long flags;
- int ret;
-
- if (!dypool_info)
- return;
-
- spin_lock_irqsave(&dypool_info->list_lock, flags);
- if (!dypool_info->initialized) {
- spin_unlock_irqrestore(&dypool_info->list_lock, flags);
- return;
- }
- dypool_info->initialized = false;
-
- while (!list_empty(&dypool_info->pages_list)) {
- hmm_page = list_entry(dypool_info->pages_list.next,
- struct hmm_page, list);
-
- list_del(&hmm_page->list);
- spin_unlock_irqrestore(&dypool_info->list_lock, flags);
-
- /* can cause thread sleep, so cannot be put into spin_lock */
- ret = set_pages_wb(hmm_page->page, 1);
- if (ret)
- dev_err(atomisp_dev,
- "set page to WB err...ret=%d\n", ret);
- if (!ret) {
- __free_pages(hmm_page->page, 0);
- hmm_mem_stat.dyc_size--;
- hmm_mem_stat.sys_size--;
- }
- kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
- spin_lock_irqsave(&dypool_info->list_lock, flags);
- }
-
- spin_unlock_irqrestore(&dypool_info->list_lock, flags);
-
- kmem_cache_destroy(dypool_info->pgptr_cache);
-
- kfree(dypool_info);
-
- *pool = NULL;
-}
-
-static int hmm_dynamic_pool_inited(void *pool)
-{
- struct hmm_dynamic_pool_info *dypool_info = pool;
-
- if (!dypool_info)
- return 0;
-
- return dypool_info->initialized;
-}
-
-struct hmm_pool_ops dynamic_pops = {
- .pool_init = hmm_dynamic_pool_init,
- .pool_exit = hmm_dynamic_pool_exit,
- .pool_alloc_pages = get_pages_from_dynamic_pool,
- .pool_free_pages = free_pages_to_dynamic_pool,
- .pool_inited = hmm_dynamic_pool_inited,
-};
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c
deleted file mode 100644
index 57525fece921..000000000000
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c
+++ /dev/null
@@ -1,253 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Medifield PNW Camera Imaging ISP subsystem.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-/*
- * This file contains functions for reserved memory pool management
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-
-#include <asm/set_memory.h>
-
-#include "atomisp_internal.h"
-#include "hmm/hmm_pool.h"
-
-/*
- * reserved memory pool ops.
- */
-static unsigned int get_pages_from_reserved_pool(void *pool,
- struct hmm_page_object *page_obj,
- unsigned int size, bool cached)
-{
- unsigned long flags;
- unsigned int i = 0;
- unsigned int repool_pgnr;
- int j;
- struct hmm_reserved_pool_info *repool_info = pool;
-
- if (!repool_info)
- return 0;
-
- spin_lock_irqsave(&repool_info->list_lock, flags);
- if (repool_info->initialized) {
- repool_pgnr = repool_info->index;
-
- for (j = repool_pgnr - 1; j >= 0; j--) {
- page_obj[i].page = repool_info->pages[j];
- page_obj[i].type = HMM_PAGE_TYPE_RESERVED;
- i++;
- repool_info->index--;
- if (i == size)
- break;
- }
- }
- spin_unlock_irqrestore(&repool_info->list_lock, flags);
- return i;
-}
-
-static void free_pages_to_reserved_pool(void *pool,
- struct hmm_page_object *page_obj)
-{
- unsigned long flags;
- struct hmm_reserved_pool_info *repool_info = pool;
-
- if (!repool_info)
- return;
-
- spin_lock_irqsave(&repool_info->list_lock, flags);
-
- if (repool_info->initialized &&
- repool_info->index < repool_info->pgnr &&
- page_obj->type == HMM_PAGE_TYPE_RESERVED) {
- repool_info->pages[repool_info->index++] = page_obj->page;
- }
-
- spin_unlock_irqrestore(&repool_info->list_lock, flags);
-}
-
-static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
- unsigned int pool_size)
-{
- struct hmm_reserved_pool_info *pool_info;
-
- pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
- GFP_KERNEL);
- if (unlikely(!pool_info))
- return -ENOMEM;
-
- pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
- GFP_KERNEL);
- if (unlikely(!pool_info->pages)) {
- kfree(pool_info);
- return -ENOMEM;
- }
-
- pool_info->index = 0;
- pool_info->pgnr = 0;
- spin_lock_init(&pool_info->list_lock);
- pool_info->initialized = true;
-
- *repool_info = pool_info;
-
- return 0;
-}
-
-static int hmm_reserved_pool_init(void **pool, unsigned int pool_size)
-{
- int ret;
- unsigned int blk_pgnr;
- unsigned int pgnr = pool_size;
- unsigned int order = 0;
- unsigned int i = 0;
- int fail_number = 0;
- struct page *pages;
- int j;
- struct hmm_reserved_pool_info *repool_info;
-
- if (pool_size == 0)
- return 0;
-
- ret = hmm_reserved_pool_setup(&repool_info, pool_size);
- if (ret) {
- dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n");
- return ret;
- }
-
- pgnr = pool_size;
-
- i = 0;
- order = MAX_ORDER;
-
- while (pgnr) {
- blk_pgnr = 1U << order;
- while (blk_pgnr > pgnr) {
- order--;
- blk_pgnr >>= 1U;
- }
- BUG_ON(order > MAX_ORDER);
-
- pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order);
- if (unlikely(!pages)) {
- if (order == 0) {
- fail_number++;
- dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n",
- __func__, fail_number);
- /* if fail five times, will goto end */
-
- /* FIXME: whether is the mechanism is ok? */
- if (fail_number == ALLOC_PAGE_FAIL_NUM)
- goto end;
- } else {
- order--;
- }
- } else {
- blk_pgnr = 1U << order;
-
- ret = set_pages_uc(pages, blk_pgnr);
- if (ret) {
- dev_err(atomisp_dev,
- "set pages uncached failed\n");
- __free_pages(pages, order);
- goto end;
- }
-
- for (j = 0; j < blk_pgnr; j++)
- repool_info->pages[i++] = pages + j;
-
- repool_info->index += blk_pgnr;
- repool_info->pgnr += blk_pgnr;
-
- pgnr -= blk_pgnr;
-
- fail_number = 0;
- }
- }
-
-end:
- repool_info->initialized = true;
-
- *pool = repool_info;
-
- dev_info(atomisp_dev,
- "hmm_reserved_pool init successfully,hmm_reserved_pool is with %d pages.\n",
- repool_info->pgnr);
- return 0;
-}
-
-static void hmm_reserved_pool_exit(void **pool)
-{
- unsigned long flags;
- int i, ret;
- unsigned int pgnr;
- struct hmm_reserved_pool_info *repool_info = *pool;
-
- if (!repool_info)
- return;
-
- spin_lock_irqsave(&repool_info->list_lock, flags);
- if (!repool_info->initialized) {
- spin_unlock_irqrestore(&repool_info->list_lock, flags);
- return;
- }
- pgnr = repool_info->pgnr;
- repool_info->index = 0;
- repool_info->pgnr = 0;
- repool_info->initialized = false;
- spin_unlock_irqrestore(&repool_info->list_lock, flags);
-
- for (i = 0; i < pgnr; i++) {
- ret = set_pages_wb(repool_info->pages[i], 1);
- if (ret)
- dev_err(atomisp_dev,
- "set page to WB err...ret=%d\n", ret);
- /*
- W/A: set_pages_wb seldom return value = -EFAULT
- indicate that address of page is not in valid
- range(0xffff880000000000~0xffffc7ffffffffff)
- then, _free_pages would panic; Do not know why
- page address be valid, it maybe memory corruption by lowmemory
- */
- if (!ret)
- __free_pages(repool_info->pages[i], 0);
- }
-
- kfree(repool_info->pages);
- kfree(repool_info);
-
- *pool = NULL;
-}
-
-static int hmm_reserved_pool_inited(void *pool)
-{
- struct hmm_reserved_pool_info *repool_info = pool;
-
- if (!repool_info)
- return 0;
-
- return repool_info->initialized;
-}
-
-struct hmm_pool_ops reserved_pops = {
- .pool_init = hmm_reserved_pool_init,
- .pool_exit = hmm_reserved_pool_exit,
- .pool_alloc_pages = get_pages_from_reserved_pool,
- .pool_free_pages = free_pages_to_reserved_pool,
- .pool_inited = hmm_reserved_pool_inited,
-};
diff --git a/drivers/staging/media/atomisp/pci/ia_css_frame_public.h b/drivers/staging/media/atomisp/pci/ia_css_frame_public.h
index 96c86f0dc81c..514d933f934d 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_frame_public.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_frame_public.h
@@ -169,7 +169,6 @@ struct ia_css_frame {
/** exposure id, see ia_css_event_public.h for more detail */
u32 isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
bool valid; /** First video output frame is not valid */
- bool contiguous; /** memory is allocated physically contiguously */
union {
unsigned int _initialisation_dummy;
struct ia_css_frame_plane raw;
@@ -245,44 +244,6 @@ ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
void
ia_css_frame_free(struct ia_css_frame *frame);
-/* @brief Allocate a contiguous CSS frame structure
- *
- * @param frame The allocated frame.
- * @param width The width (in pixels) of the frame.
- * @param height The height (in lines) of the frame.
- * @param format The frame format.
- * @param stride The padded stride, in pixels.
- * @param raw_bit_depth The raw bit depth, in bits.
- * @return The error code.
- *
- * Contiguous frame allocation, only for FPGA display driver which needs
- * physically contiguous memory.
- * Deprecated.
- */
-int
-ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
- unsigned int width,
- unsigned int height,
- enum ia_css_frame_format format,
- unsigned int stride,
- unsigned int raw_bit_depth);
-
-/* @brief Allocate a contiguous CSS frame from a frame info structure.
- *
- * @param frame The allocated frame.
- * @param[in] info The frame info structure.
- * @return The error code.
- *
- * Allocate a frame using the resolution and format from a frame info struct.
- * This is a convenience function, implemented on top of
- * ia_css_frame_allocate_contiguous().
- * Only for FPGA display driver which needs physically contiguous memory.
- * Deprecated.
- */
-int
-ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame,
- const struct ia_css_frame_info *info);
-
/* @brief Allocate a CSS frame structure using a frame info structure.
*
* @param frame The allocated frame.
@@ -334,7 +295,6 @@ int
ia_css_frame_map(struct ia_css_frame **frame,
const struct ia_css_frame_info *info,
const void __user *data,
- u16 attribute,
unsigned int pgnr);
/* @brief Unmap a CSS frame structure.
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
index 13caa55fd51a..bf0a768f8fe1 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
@@ -331,7 +331,7 @@ ia_css_isp_dvs_statistics_allocate(
HIVE_ISP_DDR_WORD_BYTES);
me->size = hor_size + ver_size;
- me->data_ptr = hmm_alloc(me->size, HMM_BO_PRIVATE, 0, NULL, 0);
+ me->data_ptr = hmm_alloc(me->size);
if (me->data_ptr == mmgr_NULL)
goto err;
me->hor_size = hor_size;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
index f608740e8340..c13de289a3db 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
@@ -294,7 +294,7 @@ ia_css_isp_dvs2_statistics_allocate(
* grid->aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES;
me->size = 2 * size;
- me->data_ptr = hmm_alloc(me->size, HMM_BO_PRIVATE, 0, NULL, 0);
+ me->data_ptr = hmm_alloc(me->size);
if (me->data_ptr == mmgr_NULL)
goto err;
me->hor_proj = me->data_ptr;
diff --git a/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h
index bfe4f5976771..73432dc35ae3 100644
--- a/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h
+++ b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h
@@ -145,12 +145,6 @@ more details.
#define RAW_BUF_LINES ((ENABLE_RAW_BINNING || ENABLE_FIXED_BAYER_DS) ? 4 : 2)
-#define RAW_BUF_STRIDE \
- (BINARY_ID == SH_CSS_BINARY_ID_POST_ISP ? MAX_VECTORS_PER_INPUT_CHUNK : \
- ISP_NUM_STRIPES > 1 ? MAX_VECTORS_PER_INPUT_STRIPE + _ISP_EXTRA_PADDING_VECS : \
- !ENABLE_CONTINUOUS ? MAX_VECTORS_PER_INPUT_LINE : \
- MAX_VECTORS_PER_INPUT_CHUNK)
-
/* [isp vmem] table size[vectors] per line per color (GR,R,B,GB),
multiples of NWAY */
#define ISP2400_SCTBL_VECTORS_PER_LINE_PER_COLOR \
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
index f46238725eea..3d269bd23207 100644
--- a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
@@ -1305,8 +1305,6 @@ void ia_css_debug_frame_print(const struct ia_css_frame *frame,
ia_css_debug_dtrace(2, " padded width = %d\n",
frame->info.padded_width);
ia_css_debug_dtrace(2, " format = %d\n", frame->info.format);
- ia_css_debug_dtrace(2, " is contiguous = %s\n",
- frame->contiguous ? "yes" : "no");
switch (frame->info.format) {
case IA_CSS_FRAME_FORMAT_NV12:
case IA_CSS_FRAME_FORMAT_NV16:
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
index c756a134efc3..700070c58eda 100644
--- a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
@@ -109,16 +109,13 @@ void ia_css_frame_free_multiple(unsigned int num_frames,
*
* @param frame The allocated frame.
* @param[in] size_bytes The frame size in bytes.
- * @param[in] contiguous Allocate memory physically contiguously or not.
* @return The error code.
*
* Allocate a frame using the given size in bytes.
* The frame structure is partially null initialized.
*/
-int ia_css_frame_allocate_with_buffer_size(
- struct ia_css_frame **frame,
- const unsigned int size_bytes,
- const bool contiguous);
+int ia_css_frame_allocate_with_buffer_size(struct ia_css_frame **frame,
+ const unsigned int size_bytes);
/* @brief Check whether 2 frames are same type
*
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
index a3aae638b0bf..5a7058320ee6 100644
--- a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
@@ -48,12 +48,6 @@ static void frame_init_raw_single_plane(
unsigned int subpixels_per_line,
unsigned int bits_per_pixel);
-static void frame_init_mipi_plane(struct ia_css_frame *frame,
- struct ia_css_frame_plane *plane,
- unsigned int height,
- unsigned int subpixels_per_line,
- unsigned int bytes_per_pixel);
-
static void frame_init_nv_planes(struct ia_css_frame *frame,
unsigned int horizontal_decimation,
unsigned int vertical_decimation,
@@ -77,15 +71,13 @@ static int frame_allocate_with_data(struct ia_css_frame **frame,
unsigned int height,
enum ia_css_frame_format format,
unsigned int padded_width,
- unsigned int raw_bit_depth,
- bool contiguous);
+ unsigned int raw_bit_depth);
static struct ia_css_frame *frame_create(unsigned int width,
unsigned int height,
enum ia_css_frame_format format,
unsigned int padded_width,
unsigned int raw_bit_depth,
- bool contiguous,
bool valid);
static unsigned
@@ -137,7 +129,7 @@ int ia_css_frame_allocate(struct ia_css_frame **frame,
width, height, format, padded_width, raw_bit_depth);
err = frame_allocate_with_data(frame, width, height, format,
- padded_width, raw_bit_depth, false);
+ padded_width, raw_bit_depth);
if ((*frame) && err == 0)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
@@ -154,7 +146,6 @@ int ia_css_frame_allocate(struct ia_css_frame **frame,
int ia_css_frame_map(struct ia_css_frame **frame,
const struct ia_css_frame_info *info,
const void __user *data,
- u16 attribute,
unsigned int pgnr)
{
int err = 0;
@@ -180,9 +171,7 @@ int ia_css_frame_map(struct ia_css_frame **frame,
goto error;
}
- me->data = hmm_alloc(me->data_bytes, HMM_BO_USER, 0, data,
- attribute & ATOMISP_MAP_FLAG_CACHED);
-
+ me->data = hmm_create_from_userdata(me->data_bytes, data);
if (me->data == mmgr_NULL)
err = -EINVAL;
@@ -216,7 +205,6 @@ int ia_css_frame_create_from_info(struct ia_css_frame **frame,
info->format,
info->padded_width,
info->raw_bit_depth,
- false,
false);
if (!me) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
@@ -270,49 +258,6 @@ int ia_css_frame_set_data(struct ia_css_frame *frame,
return err;
}
-int ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
- unsigned int width,
- unsigned int height,
- enum ia_css_frame_format format,
- unsigned int padded_width,
- unsigned int raw_bit_depth)
-{
- int err = 0;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_frame_allocate_contiguous() enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n",
- width, height, format, padded_width, raw_bit_depth);
-
- err = frame_allocate_with_data(frame, width, height, format,
- padded_width, raw_bit_depth, true);
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_frame_allocate_contiguous() leave: frame=%p\n",
- frame ? *frame : (void *)-1);
-
- return err;
-}
-
-int ia_css_frame_allocate_contiguous_from_info(
- struct ia_css_frame **frame,
- const struct ia_css_frame_info *info)
-{
- int err = 0;
-
- assert(frame);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_frame_allocate_contiguous_from_info() enter:\n");
- err = ia_css_frame_allocate_contiguous(frame,
- info->res.width,
- info->res.height,
- info->format,
- info->padded_width,
- info->raw_bit_depth);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_frame_allocate_contiguous_from_info() leave:\n");
- return err;
-}
-
void ia_css_frame_free(struct ia_css_frame *frame)
{
IA_CSS_ENTER_PRIVATE("frame = %p", frame);
@@ -343,11 +288,9 @@ int ia_css_frame_init_planes(struct ia_css_frame *frame)
switch (frame->info.format) {
case IA_CSS_FRAME_FORMAT_MIPI:
- frame_init_mipi_plane(frame, &frame->planes.raw,
- frame->info.res.height,
- frame->info.padded_width,
- frame->info.raw_bit_depth <= 8 ? 1 : 2);
- break;
+ dev_err(atomisp_dev,
+ "%s: unexpected use of IA_CSS_FRAME_FORMAT_MIPI\n", __func__);
+ return -EINVAL;
case IA_CSS_FRAME_FORMAT_RAW_PACKED:
frame_init_raw_single_plane(frame, &frame->planes.raw,
frame->info.res.height,
@@ -460,10 +403,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
IA_CSS_LEAVE_PRIVATE("");
return;
}
- if (min_padded_width > width)
- align = min_padded_width;
- else
- align = width;
+ align = max(min_padded_width, width);
info->res.width = width;
/* frames with a U and V plane of 8 bits per pixel need to have
@@ -529,16 +469,14 @@ void ia_css_frame_free_multiple(unsigned int num_frames,
}
}
-int ia_css_frame_allocate_with_buffer_size(
- struct ia_css_frame **frame,
- const unsigned int buffer_size_bytes,
- const bool contiguous)
+int ia_css_frame_allocate_with_buffer_size(struct ia_css_frame **frame,
+ const unsigned int buffer_size_bytes)
{
/* AM: Body coppied from frame_allocate_with_data(). */
int err;
struct ia_css_frame *me = frame_create(0, 0,
IA_CSS_FRAME_FORMAT_NUM,/* Not valid format yet */
- 0, 0, contiguous, false);
+ 0, 0, false);
if (!me)
return -ENOMEM;
@@ -670,22 +608,6 @@ static void frame_init_raw_single_plane(
return;
}
-static void frame_init_mipi_plane(struct ia_css_frame *frame,
- struct ia_css_frame_plane *plane,
- unsigned int height,
- unsigned int subpixels_per_line,
- unsigned int bytes_per_pixel)
-{
- unsigned int stride;
-
- stride = subpixels_per_line * bytes_per_pixel;
- frame->data_bytes = 8388608; /* 8*1024*1024 */
- frame->valid = false;
- frame->contiguous = true;
- frame_init_plane(plane, subpixels_per_line, stride, height, 0);
- return;
-}
-
static void frame_init_nv_planes(struct ia_css_frame *frame,
unsigned int horizontal_decimation,
unsigned int vertical_decimation,
@@ -803,11 +725,7 @@ static int frame_allocate_buffer_data(struct ia_css_frame *frame)
#ifdef ISP2401
IA_CSS_ENTER_LEAVE_PRIVATE("frame->data_bytes=%d\n", frame->data_bytes);
#endif
- frame->data = hmm_alloc(frame->data_bytes,
- HMM_BO_PRIVATE, 0, NULL,
- frame->contiguous ?
- ATOMISP_MAP_FLAG_CONTIGUOUS : 0);
-
+ frame->data = hmm_alloc(frame->data_bytes);
if (frame->data == mmgr_NULL)
return -ENOMEM;
return 0;
@@ -818,8 +736,7 @@ static int frame_allocate_with_data(struct ia_css_frame **frame,
unsigned int height,
enum ia_css_frame_format format,
unsigned int padded_width,
- unsigned int raw_bit_depth,
- bool contiguous)
+ unsigned int raw_bit_depth)
{
int err;
struct ia_css_frame *me = frame_create(width,
@@ -827,7 +744,6 @@ static int frame_allocate_with_data(struct ia_css_frame **frame,
format,
padded_width,
raw_bit_depth,
- contiguous,
true);
if (!me)
@@ -857,7 +773,6 @@ static struct ia_css_frame *frame_create(unsigned int width,
enum ia_css_frame_format format,
unsigned int padded_width,
unsigned int raw_bit_depth,
- bool contiguous,
bool valid)
{
struct ia_css_frame *me = kvmalloc(sizeof(*me), GFP_KERNEL);
@@ -871,7 +786,6 @@ static struct ia_css_frame *frame_create(unsigned int width,
me->info.format = format;
me->info.padded_width = padded_width;
me->info.raw_bit_depth = raw_bit_depth;
- me->contiguous = contiguous;
me->valid = valid;
me->data_bytes = 0;
me->data = mmgr_NULL;
diff --git a/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c b/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c
index 823ec54b6281..99c2f3a533ab 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c
@@ -131,7 +131,7 @@ ia_css_isp_param_allocate_isp_parameters(
goto cleanup;
}
if (pclass != IA_CSS_PARAM_CLASS_PARAM) {
- css_params->params[pclass][mem].address = hmm_alloc(size, HMM_BO_PRIVATE, 0, NULL, 0);
+ css_params->params[pclass][mem].address = hmm_alloc(size);
if (!css_params->params[pclass][mem].address) {
err = -ENOMEM;
goto cleanup;
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
index 39604752785b..2e07dab8bf51 100644
--- a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
@@ -254,14 +254,15 @@ void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
struct ia_css_rmgr_vbuf_handle **handle)
{
- struct ia_css_rmgr_vbuf_handle h = { 0 };
-
if ((!pool) || (!handle) || (!*handle)) {
IA_CSS_LOG("Invalid inputs");
return;
}
if (pool->copy_on_write) {
+ struct ia_css_rmgr_vbuf_handle *new_handle;
+ struct ia_css_rmgr_vbuf_handle h = { 0 };
+
/* only one reference, reuse (no new retain) */
if ((*handle)->count == 1)
return;
@@ -272,23 +273,29 @@ void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
h.size = (*handle)->size;
/* release ref to current buffer */
ia_css_rmgr_refcount_release_vbuf(handle);
- **handle = h;
+ new_handle = &h;
+ } else {
+ new_handle = *handle;
}
/* get new buffer for needed size */
- if ((*handle)->vptr == 0x0) {
+ if (new_handle->vptr == 0x0) {
if (pool->recycle) {
/* try and pop from pool */
- rmgr_pop_handle(pool, handle);
+ rmgr_pop_handle(pool, &new_handle);
}
- if ((*handle)->vptr == 0x0) {
+ if (new_handle->vptr == 0x0) {
/* we need to allocate */
- (*handle)->vptr = hmm_alloc((*handle)->size,
- HMM_BO_PRIVATE, 0, NULL, 0);
+ new_handle->vptr = hmm_alloc(new_handle->size);
} else {
/* we popped a buffer */
+ *handle = new_handle;
return;
}
}
+ /* Note that new_handle will change to an internally maintained one */
+ ia_css_rmgr_refcount_retain_vbuf(&new_handle);
+ *handle = new_handle;
+ return;
}
/* Note that handle will change to an internally maintained one */
ia_css_rmgr_refcount_retain_vbuf(handle);
diff --git a/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c
index 7f4592565af6..c34bfc5f970d 100644
--- a/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c
+++ b/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c
@@ -64,7 +64,7 @@ int ia_css_spctrl_load_fw(sp_ID_t sp_id, ia_css_spctrl_cfg *spctrl_cfg)
* Data used to be stored separately, because of access alignment constraints,
* fix the FW generation instead
*/
- code_addr = hmm_alloc(spctrl_cfg->code_size, HMM_BO_PRIVATE, 0, NULL, 0);
+ code_addr = hmm_alloc(spctrl_cfg->code_size);
if (code_addr == mmgr_NULL)
return -ENOMEM;
hmm_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size);
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index 1d605e533e29..da96aaffebc1 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -3061,7 +3061,6 @@ init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
assert(vf_frame);
sh_css_pipe_get_viewfinder_frame_info(pipe, &vf_frame->info, idx);
- vf_frame->contiguous = false;
vf_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, thread_id, &queue_id);
@@ -3243,7 +3242,6 @@ init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
in_frame->info.raw_bit_depth =
ia_css_pipe_util_pipe_input_format_bpp(pipe);
ia_css_frame_info_set_width(&in_frame->info, pipe->stream->config.input_config.input_res.width, 0);
- in_frame->contiguous = false;
in_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id);
@@ -3271,7 +3269,6 @@ init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
assert(out_frame);
sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, idx);
- out_frame->contiguous = false;
out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, thread_id, &queue_id);
@@ -3510,8 +3507,7 @@ create_host_acc_pipeline(struct ia_css_pipe *pipe)
if (pipe->config.acc_extension)
pipe->pipeline.pipe_qos_config = 0;
- fw = pipe->vf_stage;
- for (i = 0; fw; fw = fw->next) {
+ for (fw = pipe->vf_stage; fw; fw = fw->next) {
err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
if (err)
goto ERR;
@@ -7158,7 +7154,6 @@ create_host_copy_pipeline(struct ia_css_pipe *pipe,
ia_css_pipeline_clean(me);
/* Construct out_frame info */
- out_frame->contiguous = false;
out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
if (copy_on_sp(pipe) &&
@@ -7208,7 +7203,6 @@ create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe)
err = sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, 0);
if (err)
return err;
- out_frame->contiguous = false;
out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id, &queue_id);
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
index dd688f8ab649..e7ef578db8ab 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_firmware.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
@@ -369,7 +369,7 @@ void sh_css_unload_firmware(void)
ia_css_ptr
sh_css_load_blob(const unsigned char *blob, unsigned int size)
{
- ia_css_ptr target_addr = hmm_alloc(size, HMM_BO_PRIVATE, 0, NULL, 0);
+ ia_css_ptr target_addr = hmm_alloc(size);
/*
* this will allocate memory aligned to a DDR word boundary which
* is required for the CSS DMA to read the instructions.
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
index 0acf75497ae7..bc6e8598a776 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
@@ -431,8 +431,7 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
/* allocate new frame */
err = ia_css_frame_allocate_with_buffer_size(
&my_css.mipi_frames[port][i],
- my_css.mipi_frame_size[port] * HIVE_ISP_DDR_WORD_BYTES,
- false);
+ my_css.mipi_frame_size[port] * HIVE_ISP_DDR_WORD_BYTES);
if (err) {
for (j = 0; j < i; j++) {
if (my_css.mipi_frames[port][j]) {
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
index 09f87c285b8d..0e7c38b2bfe3 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_params.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
@@ -2072,8 +2072,7 @@ static bool realloc_isp_css_mm_buf(
size_t *curr_size,
size_t needed_size,
bool force,
- int *err,
- uint16_t mmgr_attribute)
+ int *err)
{
s32 id;
@@ -2095,11 +2094,7 @@ static bool realloc_isp_css_mm_buf(
id = IA_CSS_REFCOUNT_PARAM_BUFFER;
ia_css_refcount_decrement(id, *curr_buf);
- *curr_buf = ia_css_refcount_increment(id, hmm_alloc(needed_size,
- HMM_BO_PRIVATE, 0,
- NULL,
- mmgr_attribute));
-
+ *curr_buf = ia_css_refcount_increment(id, hmm_alloc(needed_size));
if (!*curr_buf) {
*err = -ENOMEM;
*curr_size = 0;
@@ -2122,7 +2117,7 @@ static bool reallocate_buffer(
IA_CSS_ENTER_PRIVATE("void");
ret = realloc_isp_css_mm_buf(curr_buf,
- curr_size, needed_size, force, err, 0);
+ curr_size, needed_size, force, err);
IA_CSS_LEAVE_PRIVATE("ret=%d", ret);
return ret;
@@ -2161,7 +2156,7 @@ ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
me->hmem_size = CEIL_MUL(me->hmem_size, HIVE_ISP_DDR_WORD_BYTES);
me->size = me->dmem_size + me->vmem_size * 2 + me->hmem_size;
- me->data_ptr = hmm_alloc(me->size, HMM_BO_PRIVATE, 0, NULL, 0);
+ me->data_ptr = hmm_alloc(me->size);
if (me->data_ptr == mmgr_NULL) {
kvfree(me);
me = NULL;
@@ -2211,7 +2206,7 @@ ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info)
md->info = *metadata_info;
md->exp_id = 0;
- md->address = hmm_alloc(metadata_info->size, HMM_BO_PRIVATE, 0, NULL, 0);
+ md->address = hmm_alloc(metadata_info->size);
if (md->address == mmgr_NULL)
goto error;
@@ -2364,13 +2359,13 @@ sh_css_create_isp_params(struct ia_css_stream *stream,
ddr_ptrs_size->isp_param = params_size;
ddr_ptrs->isp_param =
ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
- hmm_alloc(params_size, HMM_BO_PRIVATE, 0, NULL, 0));
+ hmm_alloc(params_size));
succ &= (ddr_ptrs->isp_param != mmgr_NULL);
ddr_ptrs_size->macc_tbl = sizeof(struct ia_css_macc_table);
ddr_ptrs->macc_tbl =
ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
- hmm_alloc(sizeof(struct ia_css_macc_table), HMM_BO_PRIVATE, 0, NULL, 0));
+ hmm_alloc(sizeof(struct ia_css_macc_table)));
succ &= (ddr_ptrs->macc_tbl != mmgr_NULL);
*isp_params_out = params;
@@ -2584,14 +2579,10 @@ sh_css_params_init(void)
for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
xmem_sp_stage_ptrs[p][i] =
ia_css_refcount_increment(-1,
- hmm_alloc(sizeof(struct sh_css_sp_stage),
- HMM_BO_PRIVATE, 0, NULL,
- ATOMISP_MAP_FLAG_CLEARED));
+ hmm_alloc(sizeof(struct sh_css_sp_stage)));
xmem_isp_stage_ptrs[p][i] =
ia_css_refcount_increment(-1,
- hmm_alloc(sizeof(struct sh_css_sp_stage),
- HMM_BO_PRIVATE, 0, NULL,
- ATOMISP_MAP_FLAG_CLEARED));
+ hmm_alloc(sizeof(struct sh_css_sp_stage)));
if ((xmem_sp_stage_ptrs[p][i] == mmgr_NULL) ||
(xmem_isp_stage_ptrs[p][i] == mmgr_NULL)) {
@@ -2599,6 +2590,9 @@ sh_css_params_init(void)
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
+
+ hmm_set(xmem_sp_stage_ptrs[p][i], 0, sizeof(struct sh_css_sp_stage));
+ hmm_set(xmem_isp_stage_ptrs[p][i], 0, sizeof(struct sh_css_sp_stage));
}
}
@@ -2609,13 +2603,9 @@ sh_css_params_init(void)
sp_ddr_ptrs = ia_css_refcount_increment(-1,
hmm_alloc(CEIL_MUL(sizeof(struct sh_css_ddr_address_map),
- HIVE_ISP_DDR_WORD_BYTES),
- HMM_BO_PRIVATE, 0, NULL,
- ATOMISP_MAP_FLAG_CLEARED));
+ HIVE_ISP_DDR_WORD_BYTES)));
xmem_sp_group_ptrs = ia_css_refcount_increment(-1,
- hmm_alloc(sizeof(struct sh_css_sp_group),
- HMM_BO_PRIVATE, 0, NULL,
- ATOMISP_MAP_FLAG_CLEARED));
+ hmm_alloc(sizeof(struct sh_css_sp_group)));
if ((sp_ddr_ptrs == mmgr_NULL) ||
(xmem_sp_group_ptrs == mmgr_NULL)) {
@@ -2623,6 +2613,9 @@ sh_css_params_init(void)
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
+ hmm_set(sp_ddr_ptrs, 0, CEIL_MUL(sizeof(struct sh_css_ddr_address_map),
+ HIVE_ISP_DDR_WORD_BYTES));
+ hmm_set(xmem_sp_group_ptrs, 0, sizeof(struct sh_css_sp_group));
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
@@ -2667,7 +2660,7 @@ int ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
}
if (!stream_started) {
- pipe->scaler_pp_lut = hmm_alloc(sizeof(zoom_table), HMM_BO_PRIVATE, 0, NULL, 0);
+ pipe->scaler_pp_lut = hmm_alloc(sizeof(zoom_table));
if (pipe->scaler_pp_lut == mmgr_NULL) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
@@ -2709,7 +2702,7 @@ int sh_css_params_map_and_store_default_gdc_lut(void)
host_lut_store((void *)zoom_table);
- default_gdc_lut = hmm_alloc(sizeof(zoom_table), HMM_BO_PRIVATE, 0, NULL, 0);
+ default_gdc_lut = hmm_alloc(sizeof(zoom_table));
if (default_gdc_lut == mmgr_NULL)
return -ENOMEM;
@@ -3802,7 +3795,7 @@ static int write_ia_css_isp_parameter_set_info_to_ddr(
assert(out);
*out = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_SET_POOL,
- hmm_alloc(sizeof(struct ia_css_isp_parameter_set_info), HMM_BO_PRIVATE, 0, NULL, 0));
+ hmm_alloc(sizeof(struct ia_css_isp_parameter_set_info)));
succ = (*out != mmgr_NULL);
if (succ)
hmm_store(*out,
diff --git a/drivers/staging/media/av7110/av7110.c b/drivers/staging/media/av7110/av7110.c
index d74ee0ecfb36..df81a9b744c2 100644
--- a/drivers/staging/media/av7110/av7110.c
+++ b/drivers/staging/media/av7110/av7110.c
@@ -2364,7 +2364,7 @@ static int av7110_attach(struct saa7146_dev* dev,
budgetpatch = 0;
/* autodetect the presence of budget patch
* this only works if saa7146 has been recently
- * reset with with MASK_31 to MC1
+ * reset with MASK_31 to MC1
*
* will wait for VBI_B event (vertical blank at port B)
* and will reset GPIO3 after VBI_B is detected.
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index 26308bb29adc..2989ebc631cc 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -227,6 +227,7 @@ struct hantro_dev {
*
* @ctrl_handler: Control handler used to register controls.
* @jpeg_quality: User-specified JPEG compression quality.
+ * @bit_depth: Bit depth of current frame
*
* @codec_ops: Set of operations related to codec mode.
* @postproc: Post-processing context.
@@ -252,6 +253,7 @@ struct hantro_ctx {
struct v4l2_ctrl_handler ctrl_handler;
int jpeg_quality;
+ int bit_depth;
const struct hantro_codec_ops *codec_ops;
struct hantro_postproc_ctx postproc;
@@ -277,6 +279,7 @@ struct hantro_ctx {
* @enc_fmt: Format identifier for encoder registers.
* @frmsize: Supported range of frame sizes (only for bitstream formats).
* @postprocessed: Indicates if this format needs the post-processor.
+ * @match_depth: Indicates if format bit depth must match video bit depth
*/
struct hantro_fmt {
char *name;
@@ -287,6 +290,7 @@ struct hantro_fmt {
enum hantro_enc_fmt enc_fmt;
struct v4l2_frmsize_stepwise frmsize;
bool postprocessed;
+ bool match_depth;
};
struct hantro_reg {
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index ac232b5f7825..2036f72eeb4a 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -47,12 +47,10 @@ dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
{
struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
struct vb2_buffer *buf;
- int index;
- index = vb2_find_timestamp(q, ts, 0);
- if (index < 0)
+ buf = vb2_find_buffer(q, ts);
+ if (!buf)
return 0;
- buf = vb2_get_buffer(q, index);
return hantro_get_dec_buf_addr(ctx, buf);
}
@@ -265,7 +263,7 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
if (sps->bit_depth_luma_minus8 != 0)
/* Only 8-bit is supported */
return -EINVAL;
- } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_SPS) {
+ } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
@@ -304,18 +302,16 @@ static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
}
-static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl)
+static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct hantro_ctx *ctx;
ctx = container_of(ctrl->handler,
struct hantro_ctx, ctrl_handler);
- vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
-
switch (ctrl->id) {
- case V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP:
- ctx->hevc_dec.ctrls.hevc_hdr_skip_length = ctrl->val;
+ case V4L2_CID_STATELESS_VP9_FRAME:
+ ctx->bit_depth = ctrl->p_new.p_vp9_frame->bit_depth;
break;
default:
return -EINVAL;
@@ -332,8 +328,8 @@ static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
.s_ctrl = hantro_jpeg_s_ctrl,
};
-static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = {
- .s_ctrl = hantro_hevc_s_ctrl,
+static const struct v4l2_ctrl_ops hantro_vp9_ctrl_ops = {
+ .s_ctrl = hantro_vp9_s_ctrl,
};
#define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \
@@ -438,18 +434,18 @@ static const struct hantro_ctrl controls[] = {
}, {
.codec = HANTRO_HEVC_DECODER,
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
- .min = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
- .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
- .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
+ .min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
},
}, {
.codec = HANTRO_HEVC_DECODER,
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE,
- .min = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
- .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
- .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
+ .id = V4L2_CID_STATELESS_HEVC_START_CODE,
+ .min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
},
}, {
.codec = HANTRO_HEVC_DECODER,
@@ -469,40 +465,29 @@ static const struct hantro_ctrl controls[] = {
}, {
.codec = HANTRO_HEVC_DECODER,
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ .id = V4L2_CID_STATELESS_HEVC_SPS,
.ops = &hantro_ctrl_ops,
},
}, {
.codec = HANTRO_HEVC_DECODER,
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
+ .id = V4L2_CID_STATELESS_HEVC_PPS,
},
}, {
.codec = HANTRO_HEVC_DECODER,
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS,
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
},
}, {
.codec = HANTRO_HEVC_DECODER,
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX,
- },
- }, {
- .codec = HANTRO_HEVC_DECODER,
- .cfg = {
- .id = V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP,
- .name = "Hantro HEVC slice header skip bytes",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 0x100,
- .step = 1,
- .ops = &hantro_hevc_ctrl_ops,
+ .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
},
}, {
.codec = HANTRO_VP9_DECODER,
.cfg = {
.id = V4L2_CID_STATELESS_VP9_FRAME,
+ .ops = &hantro_vp9_ctrl_ops,
},
}, {
.codec = HANTRO_VP9_DECODER,
@@ -638,6 +623,7 @@ static const struct of_device_id of_hantro_match[] = {
{ .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
{ .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
{ .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
+ { .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
{ .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
#endif
#ifdef CONFIG_VIDEO_HANTRO_IMX8M
diff --git a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
index 5df6f08e26f5..233ecd863d5f 100644
--- a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
+++ b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
@@ -117,6 +117,41 @@ static void prepare_tile_info_buffer(struct hantro_ctx *ctx)
vpu_debug(1, "%s: no chroma!\n", __func__);
}
+static int compute_header_skip_length(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ int skip = 0;
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT)
+ /* size of pic_output_flag */
+ skip++;
+
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE)
+ /* size of pic_order_cnt_lsb */
+ skip += 2;
+
+ if (!(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC)) {
+ /* size of pic_order_cnt_lsb */
+ skip += sps->log2_max_pic_order_cnt_lsb_minus4 + 4;
+
+ /* size of short_term_ref_pic_set_sps_flag */
+ skip++;
+
+ if (decode_params->short_term_ref_pic_set_size)
+ /* size of st_ref_pic_set( num_short_term_ref_pic_sets ) */
+ skip += decode_params->short_term_ref_pic_set_size;
+ else if (sps->num_short_term_ref_pic_sets > 1)
+ skip += fls(sps->num_short_term_ref_pic_sets - 1);
+
+ skip += decode_params->long_term_ref_pic_set_size;
+ }
+
+ return skip;
+}
+
static void set_params(struct hantro_ctx *ctx)
{
const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
@@ -134,7 +169,7 @@ static void set_params(struct hantro_ctx *ctx)
hantro_reg_write(vpu, &g2_output_8_bits, 0);
- hantro_reg_write(vpu, &g2_hdr_skip_length, ctrls->hevc_hdr_skip_length);
+ hantro_reg_write(vpu, &g2_hdr_skip_length, compute_header_skip_length(ctx));
min_log2_cb_size = sps->log2_min_luma_coding_block_size_minus3 + 3;
max_log2_ctb_size = min_log2_cb_size + sps->log2_diff_max_min_luma_coding_block_size;
@@ -390,11 +425,10 @@ static int set_ref(struct hantro_ctx *ctx)
!!(pps->flags & V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED));
/*
- * Write POC count diff from current pic. For frame decoding only compute
- * pic_order_cnt[0] and ignore pic_order_cnt[1] used in field-coding.
+ * Write POC count diff from current pic.
*/
for (i = 0; i < decode_params->num_active_dpb_entries && i < ARRAY_SIZE(cur_poc); i++) {
- char poc_diff = decode_params->pic_order_cnt_val - dpb[i].pic_order_cnt[0];
+ char poc_diff = decode_params->pic_order_cnt_val - dpb[i].pic_order_cnt_val;
hantro_reg_write(vpu, &cur_poc[i], poc_diff);
}
@@ -421,7 +455,7 @@ static int set_ref(struct hantro_ctx *ctx)
dpb_longterm_e = 0;
for (i = 0; i < decode_params->num_active_dpb_entries &&
i < (V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1); i++) {
- luma_addr = hantro_hevc_get_ref_buf(ctx, dpb[i].pic_order_cnt[0]);
+ luma_addr = hantro_hevc_get_ref_buf(ctx, dpb[i].pic_order_cnt_val);
if (!luma_addr)
return -ENOMEM;
diff --git a/drivers/staging/media/hantro/hantro_g2_regs.h b/drivers/staging/media/hantro/hantro_g2_regs.h
index 877d663a8181..82606783591a 100644
--- a/drivers/staging/media/hantro/hantro_g2_regs.h
+++ b/drivers/staging/media/hantro/hantro_g2_regs.h
@@ -107,7 +107,7 @@
#define g2_start_code_e G2_DEC_REG(10, 31, 0x1)
#define g2_init_qp_old G2_DEC_REG(10, 25, 0x3f)
-#define g2_init_qp G2_DEC_REG(10, 24, 0x3f)
+#define g2_init_qp G2_DEC_REG(10, 24, 0x7f)
#define g2_num_tile_cols_old G2_DEC_REG(10, 20, 0x1f)
#define g2_num_tile_cols G2_DEC_REG(10, 19, 0x1f)
#define g2_num_tile_rows_old G2_DEC_REG(10, 15, 0x1f)
diff --git a/drivers/staging/media/hantro/hantro_g2_vp9_dec.c b/drivers/staging/media/hantro/hantro_g2_vp9_dec.c
index 91c21b634fab..6fc4b555517f 100644
--- a/drivers/staging/media/hantro/hantro_g2_vp9_dec.c
+++ b/drivers/staging/media/hantro/hantro_g2_vp9_dec.c
@@ -111,17 +111,17 @@ get_ref_buf(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
{
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
- int buf_idx;
+ struct vb2_buffer *buf;
/*
* If a ref is unused or invalid, address of current destination
* buffer is returned.
*/
- buf_idx = vb2_find_timestamp(cap_q, timestamp, 0);
- if (buf_idx < 0)
- return vb2_to_hantro_decoded_buf(&dst->vb2_buf);
+ buf = vb2_find_buffer(cap_q, timestamp);
+ if (!buf)
+ buf = &dst->vb2_buf;
- return vb2_to_hantro_decoded_buf(vb2_get_buffer(cap_q, buf_idx));
+ return vb2_to_hantro_decoded_buf(buf);
}
static void update_dec_buf_info(struct hantro_decoded_buffer *buf,
@@ -515,16 +515,8 @@ static void
config_bit_depth(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
{
if (ctx->dev->variant->legacy_regs) {
- u8 pp_shift = 0;
-
hantro_reg_write(ctx->dev, &g2_bit_depth_y, dec_params->bit_depth);
hantro_reg_write(ctx->dev, &g2_bit_depth_c, dec_params->bit_depth);
- hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, dec_params->bit_depth);
-
- if (dec_params->bit_depth > 8)
- pp_shift = 16 - dec_params->bit_depth;
-
- hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift);
hantro_reg_write(ctx->dev, &g2_pix_shift, 0);
} else {
hantro_reg_write(ctx->dev, &g2_bit_depth_y_minus8, dec_params->bit_depth - 8);
diff --git a/drivers/staging/media/hantro/hantro_hevc.c b/drivers/staging/media/hantro/hantro_hevc.c
index f86c98e19177..b990bc98164c 100644
--- a/drivers/staging/media/hantro/hantro_hevc.c
+++ b/drivers/staging/media/hantro/hantro_hevc.c
@@ -33,7 +33,7 @@ void hantro_hevc_ref_init(struct hantro_ctx *ctx)
}
dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx,
- int poc)
+ s32 poc)
{
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
int i;
@@ -154,6 +154,25 @@ err_free_tile_buffers:
return -ENOMEM;
}
+static int hantro_hevc_validate_sps(struct hantro_ctx *ctx, const struct v4l2_ctrl_hevc_sps *sps)
+{
+ /*
+ * for tile pixel format check if the width and height match
+ * hardware constraints
+ */
+ if (ctx->vpu_dst_fmt->fourcc == V4L2_PIX_FMT_NV12_4L4) {
+ if (ctx->dst_fmt.width !=
+ ALIGN(sps->pic_width_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_width))
+ return -EINVAL;
+
+ if (ctx->dst_fmt.height !=
+ ALIGN(sps->pic_height_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_height))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx)
{
struct hantro_hevc_dec_hw_ctx *hevc_ctx = &ctx->hevc_dec;
@@ -163,22 +182,26 @@ int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx)
hantro_start_prepare_run(ctx);
ctrls->decode_params =
- hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS);
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_DECODE_PARAMS);
if (WARN_ON(!ctrls->decode_params))
return -EINVAL;
ctrls->scaling =
- hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX);
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SCALING_MATRIX);
if (WARN_ON(!ctrls->scaling))
return -EINVAL;
ctrls->sps =
- hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_SPS);
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SPS);
if (WARN_ON(!ctrls->sps))
return -EINVAL;
+ ret = hantro_hevc_validate_sps(ctx, ctrls->sps);
+ if (ret)
+ return ret;
+
ctrls->pps =
- hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_PPS);
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_PPS);
if (WARN_ON(!ctrls->pps))
return -EINVAL;
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 52a960f6fa4a..e83f0c523a30 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -18,9 +18,21 @@
#define DEC_8190_ALIGN_MASK 0x07U
#define MB_DIM 16
+#define TILE_MB_DIM 4
#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
+#define FMT_MIN_WIDTH 48
+#define FMT_MIN_HEIGHT 48
+#define FMT_HD_WIDTH 1280
+#define FMT_HD_HEIGHT 720
+#define FMT_FHD_WIDTH 1920
+#define FMT_FHD_HEIGHT 1088
+#define FMT_UHD_WIDTH 3840
+#define FMT_UHD_HEIGHT 2160
+#define FMT_4K_WIDTH 4096
+#define FMT_4K_HEIGHT 2304
+
#define NUM_REF_PICTURES (V4L2_HEVC_DPB_ENTRIES_NUM_MAX + 1)
struct hantro_dev;
@@ -133,7 +145,7 @@ struct hantro_hevc_dec_hw_ctx {
struct hantro_aux_buf tile_bsd;
struct hantro_aux_buf ref_bufs[NUM_REF_PICTURES];
struct hantro_aux_buf scaling_lists;
- int ref_bufs_poc[NUM_REF_PICTURES];
+ s32 ref_bufs_poc[NUM_REF_PICTURES];
u32 ref_bufs_used;
struct hantro_hevc_dec_ctrls ctrls;
unsigned int num_tile_cols_allocated;
@@ -306,6 +318,7 @@ extern const struct hantro_variant rk3066_vpu_variant;
extern const struct hantro_variant rk3288_vpu_variant;
extern const struct hantro_variant rk3328_vpu_variant;
extern const struct hantro_variant rk3399_vpu_variant;
+extern const struct hantro_variant rk3568_vepu_variant;
extern const struct hantro_variant rk3568_vpu_variant;
extern const struct hantro_variant sama5d4_vdec_variant;
extern const struct hantro_variant sunxi_vpu_variant;
@@ -345,9 +358,10 @@ void hantro_hevc_dec_exit(struct hantro_ctx *ctx);
int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx);
int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx);
void hantro_hevc_ref_init(struct hantro_ctx *ctx);
-dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, int poc);
+dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, s32 poc);
int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr);
+
static inline unsigned short hantro_vp9_num_sbs(unsigned short dimension)
{
return (dimension + 63) / 64;
diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
index ab168c1c0d28..a0928c508434 100644
--- a/drivers/staging/media/hantro/hantro_postproc.c
+++ b/drivers/staging/media/hantro/hantro_postproc.c
@@ -12,6 +12,7 @@
#include "hantro_hw.h"
#include "hantro_g1_regs.h"
#include "hantro_g2_regs.h"
+#include "hantro_v4l2.h"
#define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \
{ \
@@ -112,12 +113,14 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *dst_buf;
- size_t chroma_offset = ctx->dst_fmt.width * ctx->dst_fmt.height;
int down_scale = down_scale_factor(ctx);
+ size_t chroma_offset;
dma_addr_t dst_dma;
dst_buf = hantro_get_dst_buf(ctx);
dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ chroma_offset = ctx->dst_fmt.plane_fmt[0].bytesperline *
+ ctx->dst_fmt.height;
if (down_scale) {
hantro_reg_write(vpu, &g2_down_scale_e, 1);
@@ -129,6 +132,16 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma);
hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset);
}
+ if (ctx->dev->variant->legacy_regs) {
+ int out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat);
+ u8 pp_shift = 0;
+
+ if (out_depth > 8)
+ pp_shift = 16 - out_depth;
+
+ hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, out_depth);
+ hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift);
+ }
hantro_reg_write(vpu, &g2_out_rs_e, 1);
}
@@ -174,18 +187,27 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
unsigned int num_buffers = cap_queue->num_buffers;
+ struct v4l2_pix_format_mplane pix_mp;
+ const struct hantro_fmt *fmt;
unsigned int i, buf_size;
- buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
+ /* this should always pick native format */
+ fmt = hantro_get_default_fmt(ctx, false);
+ if (!fmt)
+ return -EINVAL;
+ v4l2_fill_pixfmt_mp(&pix_mp, fmt->fourcc, ctx->src_fmt.width,
+ ctx->src_fmt.height);
+
+ buf_size = pix_mp.plane_fmt[0].sizeimage;
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
- buf_size += hantro_h264_mv_size(ctx->dst_fmt.width,
- ctx->dst_fmt.height);
+ buf_size += hantro_h264_mv_size(pix_mp.width,
+ pix_mp.height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
- buf_size += hantro_vp9_mv_size(ctx->dst_fmt.width,
- ctx->dst_fmt.height);
+ buf_size += hantro_vp9_mv_size(pix_mp.width,
+ pix_mp.height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE)
- buf_size += hantro_hevc_mv_size(ctx->dst_fmt.width,
- ctx->dst_fmt.height);
+ buf_size += hantro_hevc_mv_size(pix_mp.width,
+ pix_mp.height);
for (i = 0; i < num_buffers; ++i) {
struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 22ad182ee972..2c7a805289e7 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -64,6 +64,42 @@ hantro_get_postproc_formats(const struct hantro_ctx *ctx,
return ctx->dev->variant->postproc_fmts;
}
+int hantro_get_format_depth(u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_P010:
+ case V4L2_PIX_FMT_P010_4L4:
+ return 10;
+ default:
+ return 8;
+ }
+}
+
+static bool
+hantro_check_depth_match(const struct hantro_ctx *ctx,
+ const struct hantro_fmt *fmt)
+{
+ int fmt_depth, ctx_depth = 8;
+
+ if (!fmt->match_depth && !fmt->postprocessed)
+ return true;
+
+ /* 0 means default depth, which is 8 */
+ if (ctx->bit_depth)
+ ctx_depth = ctx->bit_depth;
+
+ fmt_depth = hantro_get_format_depth(fmt->fourcc);
+
+ /*
+ * Allow only downconversion for postproc formats for now.
+ * It may be possible to relax that on some HW.
+ */
+ if (!fmt->match_depth)
+ return fmt_depth <= ctx_depth;
+
+ return fmt_depth == ctx_depth;
+}
+
static const struct hantro_fmt *
hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
{
@@ -82,7 +118,7 @@ hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
return NULL;
}
-static const struct hantro_fmt *
+const struct hantro_fmt *
hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream)
{
const struct hantro_fmt *formats;
@@ -91,7 +127,8 @@ hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream)
formats = hantro_get_formats(ctx, &num_fmts);
for (i = 0; i < num_fmts; i++) {
if (bitstream == (formats[i].codec_mode !=
- HANTRO_MODE_NONE))
+ HANTRO_MODE_NONE) &&
+ hantro_check_depth_match(ctx, &formats[i]))
return &formats[i];
}
return NULL;
@@ -162,11 +199,13 @@ static int vidioc_enum_fmt(struct file *file, void *priv,
formats = hantro_get_formats(ctx, &num_fmts);
for (i = 0; i < num_fmts; i++) {
bool mode_none = formats[i].codec_mode == HANTRO_MODE_NONE;
+ fmt = &formats[i];
if (skip_mode_none == mode_none)
continue;
+ if (!hantro_check_depth_match(ctx, fmt))
+ continue;
if (j == f->index) {
- fmt = &formats[i];
f->pixelformat = fmt->fourcc;
return 0;
}
@@ -182,8 +221,11 @@ static int vidioc_enum_fmt(struct file *file, void *priv,
return -EINVAL;
formats = hantro_get_postproc_formats(ctx, &num_fmts);
for (i = 0; i < num_fmts; i++) {
+ fmt = &formats[i];
+
+ if (!hantro_check_depth_match(ctx, fmt))
+ continue;
if (j == f->index) {
- fmt = &formats[i];
f->pixelformat = fmt->fourcc;
return 0;
}
@@ -259,7 +301,7 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
} else if (ctx->is_encoder) {
vpu_fmt = ctx->vpu_dst_fmt;
} else {
- vpu_fmt = ctx->vpu_src_fmt;
+ vpu_fmt = fmt;
/*
* Width/height on the CAPTURE end of a decoder are ignored and
* replaced by the OUTPUT ones.
diff --git a/drivers/staging/media/hantro/hantro_v4l2.h b/drivers/staging/media/hantro/hantro_v4l2.h
index 18bc682c8556..64f6f57e9d7a 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.h
+++ b/drivers/staging/media/hantro/hantro_v4l2.h
@@ -22,5 +22,8 @@ extern const struct v4l2_ioctl_ops hantro_ioctl_ops;
extern const struct vb2_ops hantro_queue_ops;
void hantro_reset_fmts(struct hantro_ctx *ctx);
+int hantro_get_format_depth(u32 fourcc);
+const struct hantro_fmt *
+hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream);
#endif /* HANTRO_V4L2_H_ */
diff --git a/drivers/staging/media/hantro/imx8m_vpu_hw.c b/drivers/staging/media/hantro/imx8m_vpu_hw.c
index 9802508bade2..77f574fdfa77 100644
--- a/drivers/staging/media/hantro/imx8m_vpu_hw.c
+++ b/drivers/staging/media/hantro/imx8m_vpu_hw.c
@@ -83,6 +83,14 @@ static const struct hantro_fmt imx8m_vpu_postproc_fmts[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
},
};
@@ -90,17 +98,25 @@ static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
},
{
.fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1920,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 1088,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -109,11 +125,11 @@ static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 3840,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2160,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -122,11 +138,11 @@ static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 3840,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2160,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -137,6 +153,14 @@ static const struct hantro_fmt imx8m_vpu_g2_postproc_fmts[] = {
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
},
};
@@ -144,18 +168,26 @@ static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12_4L4,
.codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
+ },
},
{
.fourcc = V4L2_PIX_FMT_HEVC_SLICE,
.codec_mode = HANTRO_MODE_HEVC_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 3840,
- .step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2160,
- .step_height = MB_DIM,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
},
},
{
@@ -163,12 +195,12 @@ static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
.codec_mode = HANTRO_MODE_VP9_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 3840,
- .step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2160,
- .step_height = MB_DIM,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
},
},
};
diff --git a/drivers/staging/media/hantro/rockchip_vpu_hw.c b/drivers/staging/media/hantro/rockchip_vpu_hw.c
index fc96501f3bc8..8de6fd2e8eef 100644
--- a/drivers/staging/media/hantro/rockchip_vpu_hw.c
+++ b/drivers/staging/media/hantro/rockchip_vpu_hw.c
@@ -63,6 +63,14 @@ static const struct hantro_fmt rockchip_vpu1_postproc_fmts[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
},
};
@@ -70,17 +78,25 @@ static const struct hantro_fmt rk3066_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
},
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1920,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 1088,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -89,11 +105,11 @@ static const struct hantro_fmt rk3066_vpu_dec_fmts[] = {
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1920,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 1088,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -102,11 +118,11 @@ static const struct hantro_fmt rk3066_vpu_dec_fmts[] = {
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1920,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 1088,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -116,17 +132,25 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_4K_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
+ .step_height = MB_DIM,
+ },
},
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 4096,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_4K_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2304,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -135,11 +159,11 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1920,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 1088,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -148,31 +172,80 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 3840,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2160,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
-static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
+static const struct hantro_fmt rockchip_vdpu2_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
},
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1920,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 1088,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -181,11 +254,11 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1920,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 1088,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -194,11 +267,11 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 3840,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2160,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -417,6 +490,14 @@ static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
},
};
+static const struct hantro_codec_ops rk3568_vepu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = rockchip_vpu2_jpeg_enc_run,
+ .reset = rockchip_vpu2_enc_reset,
+ .done = rockchip_vpu2_jpeg_enc_done,
+ },
+};
+
/*
* VPU variant.
*/
@@ -439,6 +520,10 @@ static const struct hantro_irq rockchip_vpu2_irqs[] = {
{ "vdpu", rockchip_vpu2_vdpu_irq },
};
+static const struct hantro_irq rk3568_vepu_irqs[] = {
+ { "vepu", rockchip_vpu2_vepu_irq },
+};
+
static const char * const rk3066_vpu_clk_names[] = {
"aclk_vdpu", "hclk_vdpu",
"aclk_vepu", "hclk_vepu"
@@ -516,8 +601,8 @@ const struct hantro_variant rk3288_vpu_variant = {
const struct hantro_variant rk3328_vpu_variant = {
.dec_offset = 0x400,
- .dec_fmts = rk3399_vpu_dec_fmts,
- .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .dec_fmts = rockchip_vdpu2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = rk3399_vpu_codec_ops,
@@ -528,6 +613,11 @@ const struct hantro_variant rk3328_vpu_variant = {
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names),
};
+/*
+ * H.264 decoding explicitly disabled in RK3399.
+ * This ensures userspace applications use the Rockchip VDEC core,
+ * which has better performance.
+ */
const struct hantro_variant rk3399_vpu_variant = {
.enc_offset = 0x0,
.enc_fmts = rockchip_vpu_enc_fmts,
@@ -545,10 +635,23 @@ const struct hantro_variant rk3399_vpu_variant = {
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
};
+const struct hantro_variant rk3568_vepu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .codec = HANTRO_JPEG_ENCODER,
+ .codec_ops = rk3568_vepu_codec_ops,
+ .irqs = rk3568_vepu_irqs,
+ .num_irqs = ARRAY_SIZE(rk3568_vepu_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
const struct hantro_variant rk3568_vpu_variant = {
.dec_offset = 0x400,
- .dec_fmts = rk3399_vpu_dec_fmts,
- .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .dec_fmts = rockchip_vdpu2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
.codec = HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3399_vpu_codec_ops,
@@ -564,8 +667,8 @@ const struct hantro_variant px30_vpu_variant = {
.enc_fmts = rockchip_vpu_enc_fmts,
.num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
.dec_offset = 0x400,
- .dec_fmts = rk3399_vpu_dec_fmts,
- .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .dec_fmts = rockchip_vdpu2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
.codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3399_vpu_codec_ops,
diff --git a/drivers/staging/media/hantro/sama5d4_vdec_hw.c b/drivers/staging/media/hantro/sama5d4_vdec_hw.c
index b2fc1c5613e1..b205e2db5b04 100644
--- a/drivers/staging/media/hantro/sama5d4_vdec_hw.c
+++ b/drivers/staging/media/hantro/sama5d4_vdec_hw.c
@@ -16,6 +16,14 @@ static const struct hantro_fmt sama5d4_vdec_postproc_fmts[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
},
};
@@ -23,17 +31,25 @@ static const struct hantro_fmt sama5d4_vdec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
},
{
.fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1280,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 720,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -42,11 +58,11 @@ static const struct hantro_fmt sama5d4_vdec_fmts[] = {
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1280,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 720,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -55,11 +71,11 @@ static const struct hantro_fmt sama5d4_vdec_fmts[] = {
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 1280,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
.step_width = MB_DIM,
- .min_height = 48,
- .max_height = 720,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
.step_height = MB_DIM,
},
},
diff --git a/drivers/staging/media/hantro/sunxi_vpu_hw.c b/drivers/staging/media/hantro/sunxi_vpu_hw.c
index c0edd5856a0c..02ce8b064a8f 100644
--- a/drivers/staging/media/hantro/sunxi_vpu_hw.c
+++ b/drivers/staging/media/hantro/sunxi_vpu_hw.c
@@ -14,6 +14,27 @@ static const struct hantro_fmt sunxi_vpu_postproc_fmts[] = {
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
},
};
@@ -21,17 +42,39 @@ static const struct hantro_fmt sunxi_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12_4L4,
.codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
},
{
.fourcc = V4L2_PIX_FMT_VP9_FRAME,
.codec_mode = HANTRO_MODE_VP9_DEC,
.max_depth = 2,
.frmsize = {
- .min_width = 48,
- .max_width = 3840,
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
.step_width = 32,
- .min_height = 48,
- .max_height = 2160,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
.step_height = 32,
},
},
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
index 80b69a9a752c..e6d6ed3b1161 100644
--- a/drivers/staging/media/imx/imx-media-dev-common.c
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -235,7 +235,7 @@ static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
if (!(spad->flags & MEDIA_PAD_FL_SINK))
continue;
- pad = media_entity_remote_pad(spad);
+ pad = media_pad_remote_pad_first(spad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
continue;
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 94bc866ca28c..294c808b2ebe 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -698,7 +698,7 @@ imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
(!upstream && !(spad->flags & MEDIA_PAD_FL_SOURCE)))
continue;
- pad = media_entity_remote_pad(spad);
+ pad = media_pad_remote_pad_first(spad);
if (!pad)
continue;
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 8467a1491048..a0553c24cce4 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -17,18 +17,17 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
-#include <media/imx.h>
-#include "imx-media.h"
-
#define IMX7_CSI_PAD_SINK 0
#define IMX7_CSI_PAD_SRC 1
#define IMX7_CSI_PADS_NUM 2
@@ -159,45 +158,102 @@
#define CSI_CSICR18 0x48
#define CSI_CSICR19 0x4c
+#define IMX7_CSI_VIDEO_NAME "imx-capture"
+/* In bytes, per queue */
+#define IMX7_CSI_VIDEO_MEM_LIMIT SZ_64M
+#define IMX7_CSI_VIDEO_EOF_TIMEOUT 2000
+
+#define IMX7_CSI_DEF_MBUS_CODE MEDIA_BUS_FMT_UYVY8_2X8
+#define IMX7_CSI_DEF_PIX_FORMAT V4L2_PIX_FMT_UYVY
+#define IMX7_CSI_DEF_PIX_WIDTH 640
+#define IMX7_CSI_DEF_PIX_HEIGHT 480
+
enum imx_csi_model {
IMX7_CSI_IMX7 = 0,
IMX7_CSI_IMX8MQ,
};
+struct imx7_csi_pixfmt {
+ /* the in-memory FourCC pixel format */
+ u32 fourcc;
+ /*
+ * the set of equivalent media bus codes for the fourcc.
+ * NOTE! codes pointer is NULL for in-memory-only formats.
+ */
+ const u32 *codes;
+ int bpp; /* total bpp */
+ bool yuv;
+};
+
+struct imx7_csi_vb2_buffer {
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head list;
+};
+
+static inline struct imx7_csi_vb2_buffer *
+to_imx7_csi_vb2_buffer(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ return container_of(vbuf, struct imx7_csi_vb2_buffer, vbuf);
+}
+
+struct imx7_csi_dma_buf {
+ void *virt;
+ dma_addr_t phys;
+ unsigned long len;
+};
+
struct imx7_csi {
struct device *dev;
- struct v4l2_subdev sd;
- struct v4l2_async_notifier notifier;
- struct imx_media_video_dev *vdev;
- struct imx_media_dev *imxmd;
- struct media_pad pad[IMX7_CSI_PADS_NUM];
- /* lock to protect members below */
- struct mutex lock;
- /* lock to protect irq handler when stop streaming */
- spinlock_t irqlock;
+ /* Resources and locks */
+ void __iomem *regbase;
+ int irq;
+ struct clk *mclk;
+
+ struct mutex lock; /* Protects is_streaming, format_mbus, cc */
+ spinlock_t irqlock; /* Protects last_eof */
+
+ /* Media and V4L2 device */
+ struct media_device mdev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_async_notifier notifier;
+ struct media_pipeline pipe;
struct v4l2_subdev *src_sd;
+ bool is_csi2;
+
+ /* V4L2 subdev */
+ struct v4l2_subdev sd;
+ struct media_pad pad[IMX7_CSI_PADS_NUM];
struct v4l2_mbus_framefmt format_mbus[IMX7_CSI_PADS_NUM];
- const struct imx_media_pixfmt *cc[IMX7_CSI_PADS_NUM];
- struct v4l2_fract frame_interval[IMX7_CSI_PADS_NUM];
+ const struct imx7_csi_pixfmt *cc[IMX7_CSI_PADS_NUM];
- void __iomem *regbase;
- int irq;
- struct clk *mclk;
+ /* Video device */
+ struct video_device *vdev; /* Video device */
+ struct media_pad vdev_pad; /* Video device pad */
+
+ struct v4l2_pix_format vdev_fmt; /* The user format */
+ const struct imx7_csi_pixfmt *vdev_cc;
+ struct v4l2_rect vdev_compose; /* The compose rectangle */
- /* active vb2 buffers to send to video dev sink */
- struct imx_media_buffer *active_vb2_buf[2];
- struct imx_media_dma_buf underrun_buf;
+ struct mutex vdev_mutex; /* Protect vdev operations */
+ struct vb2_queue q; /* The videobuf2 queue */
+ struct list_head ready_q; /* List of queued buffers */
+ spinlock_t q_lock; /* Protect ready_q */
+
+ /* Buffers and streaming state */
+ struct imx7_csi_vb2_buffer *active_vb2_buf[2];
+ struct imx7_csi_dma_buf underrun_buf;
+
+ bool is_streaming;
int buf_num;
u32 frame_sequence;
bool last_eof;
- bool is_streaming;
- bool is_csi2;
-
struct completion last_eof_completion;
enum imx_csi_model model;
@@ -242,7 +298,8 @@ static void imx7_csi_init_default(struct imx7_csi *csi)
imx7_csi_reg_write(csi, 0, CSI_CSICR2);
imx7_csi_reg_write(csi, BIT_FRMCNT_RST, CSI_CSICR3);
- imx7_csi_reg_write(csi, BIT_IMAGE_WIDTH(800) | BIT_IMAGE_HEIGHT(600),
+ imx7_csi_reg_write(csi, BIT_IMAGE_WIDTH(IMX7_CSI_DEF_PIX_WIDTH) |
+ BIT_IMAGE_HEIGHT(IMX7_CSI_DEF_PIX_HEIGHT),
CSI_CSIIMAG_PARA);
imx7_csi_reg_write(csi, BIT_DMA_REFLASH_RFF, CSI_CSICR3);
@@ -336,16 +393,17 @@ static void imx7_csi_update_buf(struct imx7_csi *csi, dma_addr_t phys,
imx7_csi_reg_write(csi, phys, CSI_CSIDMASA_FB1);
}
+static struct imx7_csi_vb2_buffer *imx7_csi_video_next_buf(struct imx7_csi *csi);
+
static void imx7_csi_setup_vb2_buf(struct imx7_csi *csi)
{
- struct imx_media_video_dev *vdev = csi->vdev;
- struct imx_media_buffer *buf;
+ struct imx7_csi_vb2_buffer *buf;
struct vb2_buffer *vb2_buf;
dma_addr_t phys[2];
int i;
for (i = 0; i < 2; i++) {
- buf = imx_media_capture_device_next_buf(vdev);
+ buf = imx7_csi_video_next_buf(csi);
if (buf) {
csi->active_vb2_buf[i] = buf;
vb2_buf = &buf->vbuf.vb2_buf;
@@ -362,7 +420,7 @@ static void imx7_csi_setup_vb2_buf(struct imx7_csi *csi)
static void imx7_csi_dma_unsetup_vb2_buf(struct imx7_csi *csi,
enum vb2_buffer_state return_status)
{
- struct imx_media_buffer *buf;
+ struct imx7_csi_vb2_buffer *buf;
int i;
/* return any remaining active frames with return_status */
@@ -378,13 +436,36 @@ static void imx7_csi_dma_unsetup_vb2_buf(struct imx7_csi *csi,
}
}
+static void imx7_csi_free_dma_buf(struct imx7_csi *csi,
+ struct imx7_csi_dma_buf *buf)
+{
+ if (buf->virt)
+ dma_free_coherent(csi->dev, buf->len, buf->virt, buf->phys);
+
+ buf->virt = NULL;
+ buf->phys = 0;
+}
+
+static int imx7_csi_alloc_dma_buf(struct imx7_csi *csi,
+ struct imx7_csi_dma_buf *buf, int size)
+{
+ imx7_csi_free_dma_buf(csi, buf);
+
+ buf->len = PAGE_ALIGN(size);
+ buf->virt = dma_alloc_coherent(csi->dev, buf->len, &buf->phys,
+ GFP_DMA | GFP_KERNEL);
+ if (!buf->virt)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int imx7_csi_dma_setup(struct imx7_csi *csi)
{
- struct imx_media_video_dev *vdev = csi->vdev;
int ret;
- ret = imx_media_alloc_dma_buf(csi->dev, &csi->underrun_buf,
- vdev->fmt.sizeimage);
+ ret = imx7_csi_alloc_dma_buf(csi, &csi->underrun_buf,
+ csi->vdev_fmt.sizeimage);
if (ret < 0) {
v4l2_warn(&csi->sd, "consider increasing the CMA area\n");
return ret;
@@ -403,7 +484,7 @@ static void imx7_csi_dma_cleanup(struct imx7_csi *csi,
enum vb2_buffer_state return_status)
{
imx7_csi_dma_unsetup_vb2_buf(csi, return_status);
- imx_media_free_dma_buf(csi->dev, &csi->underrun_buf);
+ imx7_csi_free_dma_buf(csi, &csi->underrun_buf);
}
static void imx7_csi_dma_stop(struct imx7_csi *csi)
@@ -420,7 +501,7 @@ static void imx7_csi_dma_stop(struct imx7_csi *csi)
/*
* and then wait for interrupt handler to mark completion.
*/
- timeout_jiffies = msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT);
+ timeout_jiffies = msecs_to_jiffies(IMX7_CSI_VIDEO_EOF_TIMEOUT);
ret = wait_for_completion_timeout(&csi->last_eof_completion,
timeout_jiffies);
if (ret == 0)
@@ -431,8 +512,7 @@ static void imx7_csi_dma_stop(struct imx7_csi *csi)
static void imx7_csi_configure(struct imx7_csi *csi)
{
- struct imx_media_video_dev *vdev = csi->vdev;
- struct v4l2_pix_format *out_pix = &vdev->fmt;
+ struct v4l2_pix_format *out_pix = &csi->vdev_fmt;
int width = out_pix->width;
u32 stride = 0;
u32 cr3 = BIT_FRMCNT_RST;
@@ -631,14 +711,13 @@ static void imx7_csi_error_recovery(struct imx7_csi *csi)
static void imx7_csi_vb2_buf_done(struct imx7_csi *csi)
{
- struct imx_media_video_dev *vdev = csi->vdev;
- struct imx_media_buffer *done, *next;
+ struct imx7_csi_vb2_buffer *done, *next;
struct vb2_buffer *vb;
dma_addr_t phys;
done = csi->active_vb2_buf[csi->buf_num];
if (done) {
- done->vbuf.field = vdev->fmt.field;
+ done->vbuf.field = csi->vdev_fmt.field;
done->vbuf.sequence = csi->frame_sequence;
vb = &done->vbuf.vb2_buf;
vb->timestamp = ktime_get_ns();
@@ -647,7 +726,7 @@ static void imx7_csi_vb2_buf_done(struct imx7_csi *csi)
csi->frame_sequence++;
/* get next queued buffer */
- next = imx_media_capture_device_next_buf(vdev);
+ next = imx7_csi_video_next_buf(csi);
if (next) {
phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
csi->active_vb2_buf[csi->buf_num] = next;
@@ -718,6 +797,831 @@ static irqreturn_t imx7_csi_irq_handler(int irq, void *data)
}
/* -----------------------------------------------------------------------------
+ * Format Helpers
+ */
+
+#define IMX_BUS_FMTS(fmt...) (const u32[]) {fmt, 0}
+
+/*
+ * List of supported pixel formats for the subdevs. Keep V4L2_PIX_FMT_UYVY and
+ * MEDIA_BUS_FMT_UYVY8_2X8 first to match IMX7_CSI_DEF_PIX_FORMAT and
+ * IMX7_CSI_DEF_MBUS_CODE.
+ */
+static const struct imx7_csi_pixfmt pixel_formats[] = {
+ /*** YUV formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16
+ ),
+ .yuv = true,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16
+ ),
+ .yuv = true,
+ .bpp = 16,
+ },
+ /*** raw bayer and grayscale formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR14_1X14),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG14_1X14),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG14_1X14),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB14_1X14),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y14_1X14),
+ .bpp = 16,
+ },
+};
+
+/*
+ * Search in the pixel_formats[] array for an entry with the given fourcc
+ * return it.
+ */
+static const struct imx7_csi_pixfmt *imx7_csi_find_pixel_format(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx7_csi_pixfmt *fmt = &pixel_formats[i];
+
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+/*
+ * Search in the pixel_formats[] array for an entry with the given media
+ * bus code and return it.
+ */
+static const struct imx7_csi_pixfmt *imx7_csi_find_mbus_format(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx7_csi_pixfmt *fmt = &pixel_formats[i];
+ unsigned int j;
+
+ if (!fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (code == fmt->codes[j])
+ return fmt;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Enumerate entries in the pixel_formats[] array that match the
+ * requested search criteria. Return the media-bus code that matches
+ * the search criteria at the requested match index.
+ *
+ * @code: The returned media-bus code that matches the search criteria at
+ * the requested match index.
+ * @index: The requested match index.
+ */
+static int imx7_csi_enum_mbus_formats(u32 *code, u32 index)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx7_csi_pixfmt *fmt = &pixel_formats[i];
+ unsigned int j;
+
+ if (!fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (index == 0) {
+ *code = fmt->codes[j];
+ return 0;
+ }
+
+ index--;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int imx7_csi_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
+ const struct v4l2_mbus_framefmt *mbus,
+ const struct imx7_csi_pixfmt *cc)
+{
+ u32 width;
+ u32 stride;
+
+ if (!cc) {
+ cc = imx7_csi_find_mbus_format(mbus->code);
+ if (!cc)
+ return -EINVAL;
+ }
+
+ /* Round up width for minimum burst size */
+ width = round_up(mbus->width, 8);
+
+ /* Round up stride for IDMAC line start address alignment */
+ stride = round_up((width * cc->bpp) >> 3, 8);
+
+ pix->width = width;
+ pix->height = mbus->height;
+ pix->pixelformat = cc->fourcc;
+ pix->colorspace = mbus->colorspace;
+ pix->xfer_func = mbus->xfer_func;
+ pix->ycbcr_enc = mbus->ycbcr_enc;
+ pix->quantization = mbus->quantization;
+ pix->field = mbus->field;
+ pix->bytesperline = stride;
+ pix->sizeimage = stride * pix->height;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video Capture Device - IOCTLs
+ */
+
+static int imx7_csi_video_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+
+ strscpy(cap->driver, IMX7_CSI_VIDEO_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX7_CSI_VIDEO_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev_name(csi->dev));
+
+ return 0;
+}
+
+static int imx7_csi_video_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ unsigned int index = f->index;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx7_csi_pixfmt *fmt = &pixel_formats[i];
+
+ /*
+ * If a media bus code is specified, only consider formats that
+ * match it.
+ */
+ if (f->mbus_code) {
+ unsigned int j;
+
+ if (!fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (f->mbus_code == fmt->codes[j])
+ break;
+ }
+
+ if (!fmt->codes[j])
+ continue;
+ }
+
+ if (index == 0) {
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ index--;
+ }
+
+ return -EINVAL;
+}
+
+static int imx7_csi_video_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct imx7_csi_pixfmt *cc;
+
+ if (fsize->index > 0)
+ return -EINVAL;
+
+ cc = imx7_csi_find_pixel_format(fsize->pixel_format);
+ if (!cc)
+ return -EINVAL;
+
+ /*
+ * TODO: The constraints are hardware-specific and may depend on the
+ * pixel format. This should come from the driver using
+ * imx_media_capture.
+ */
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = 1;
+ fsize->stepwise.max_width = 65535;
+ fsize->stepwise.min_height = 1;
+ fsize->stepwise.max_height = 65535;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int imx7_csi_video_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+
+ f->fmt.pix = csi->vdev_fmt;
+
+ return 0;
+}
+
+static const struct imx7_csi_pixfmt *
+__imx7_csi_video_try_fmt(struct v4l2_pix_format *pixfmt,
+ struct v4l2_rect *compose)
+{
+ struct v4l2_mbus_framefmt fmt_src;
+ const struct imx7_csi_pixfmt *cc;
+
+ /*
+ * Find the pixel format, default to the first supported format if not
+ * found.
+ */
+ cc = imx7_csi_find_pixel_format(pixfmt->pixelformat);
+ if (!cc) {
+ pixfmt->pixelformat = IMX7_CSI_DEF_PIX_FORMAT;
+ cc = imx7_csi_find_pixel_format(pixfmt->pixelformat);
+ }
+
+ /* Allow IDMAC interweave but enforce field order from source. */
+ if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) {
+ switch (pixfmt->field) {
+ case V4L2_FIELD_SEQ_TB:
+ pixfmt->field = V4L2_FIELD_INTERLACED_TB;
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ pixfmt->field = V4L2_FIELD_INTERLACED_BT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ v4l2_fill_mbus_format(&fmt_src, pixfmt, 0);
+ imx7_csi_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src, cc);
+
+ if (compose) {
+ compose->width = fmt_src.width;
+ compose->height = fmt_src.height;
+ }
+
+ return cc;
+}
+
+static int imx7_csi_video_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ __imx7_csi_video_try_fmt(&f->fmt.pix, NULL);
+ return 0;
+}
+
+static int imx7_csi_video_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+ const struct imx7_csi_pixfmt *cc;
+
+ if (vb2_is_busy(&csi->q)) {
+ dev_err(csi->dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ cc = __imx7_csi_video_try_fmt(&f->fmt.pix, &csi->vdev_compose);
+
+ csi->vdev_cc = cc;
+ csi->vdev_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int imx7_csi_video_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* The compose rectangle is fixed to the source format. */
+ s->r = csi->vdev_compose;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ /*
+ * The hardware writes with a configurable but fixed DMA burst
+ * size. If the source format width is not burst size aligned,
+ * the written frame contains padding to the right.
+ */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = csi->vdev_fmt.width;
+ s->r.height = csi->vdev_fmt.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops imx7_csi_video_ioctl_ops = {
+ .vidioc_querycap = imx7_csi_video_querycap,
+
+ .vidioc_enum_fmt_vid_cap = imx7_csi_video_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = imx7_csi_video_enum_framesizes,
+
+ .vidioc_g_fmt_vid_cap = imx7_csi_video_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = imx7_csi_video_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = imx7_csi_video_s_fmt_vid_cap,
+
+ .vidioc_g_selection = imx7_csi_video_g_selection,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * Video Capture Device - Queue Operations
+ */
+
+static int imx7_csi_video_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &csi->vdev_fmt;
+ unsigned int count = *nbuffers;
+
+ if (vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (*nplanes != 1 || sizes[0] < pix->sizeimage)
+ return -EINVAL;
+ count += vq->num_buffers;
+ }
+
+ count = min_t(__u32, IMX7_CSI_VIDEO_MEM_LIMIT / pix->sizeimage, count);
+
+ if (*nplanes)
+ *nbuffers = (count < vq->num_buffers) ? 0 :
+ count - vq->num_buffers;
+ else
+ *nbuffers = count;
+
+ *nplanes = 1;
+ sizes[0] = pix->sizeimage;
+
+ return 0;
+}
+
+static int imx7_csi_video_buf_init(struct vb2_buffer *vb)
+{
+ struct imx7_csi_vb2_buffer *buf = to_imx7_csi_vb2_buffer(vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int imx7_csi_video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_pix_format *pix = &csi->vdev_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix->sizeimage) {
+ dev_err(csi->dev,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), (long)pix->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, pix->sizeimage);
+
+ return 0;
+}
+
+static void imx7_csi_video_buf_queue(struct vb2_buffer *vb)
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
+ struct imx7_csi_vb2_buffer *buf = to_imx7_csi_vb2_buffer(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&csi->q_lock, flags);
+
+ list_add_tail(&buf->list, &csi->ready_q);
+
+ spin_unlock_irqrestore(&csi->q_lock, flags);
+}
+
+static int imx7_csi_video_validate_fmt(struct imx7_csi *csi)
+{
+ struct v4l2_subdev_format fmt_src;
+ const struct imx7_csi_pixfmt *cc;
+ int ret;
+
+ /* Retrieve the media bus format on the source subdev. */
+ fmt_src.pad = IMX7_CSI_PAD_SRC;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(&csi->sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ /*
+ * Verify that the media bus size matches the size set on the video
+ * node. It is sufficient to check the compose rectangle size without
+ * checking the rounded size from pix_fmt, as the rounded size is
+ * derived directly from the compose rectangle size, and will thus
+ * always match if the compose rectangle matches.
+ */
+ if (csi->vdev_compose.width != fmt_src.format.width ||
+ csi->vdev_compose.height != fmt_src.format.height)
+ return -EPIPE;
+
+ /*
+ * Verify that the media bus code is compatible with the pixel format
+ * set on the video node.
+ */
+ cc = imx7_csi_find_mbus_format(fmt_src.format.code);
+ if (!cc || csi->vdev_cc->yuv != cc->yuv)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int imx7_csi_video_start_streaming(struct vb2_queue *vq,
+ unsigned int count)
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vq);
+ struct imx7_csi_vb2_buffer *buf, *tmp;
+ unsigned long flags;
+ int ret;
+
+ ret = imx7_csi_video_validate_fmt(csi);
+ if (ret) {
+ dev_err(csi->dev, "capture format not valid\n");
+ goto err_buffers;
+ }
+
+ mutex_lock(&csi->mdev.graph_mutex);
+
+ ret = __media_pipeline_start(&csi->sd.entity, &csi->pipe);
+ if (ret)
+ goto err_unlock;
+
+ ret = v4l2_subdev_call(&csi->sd, video, s_stream, 1);
+ if (ret)
+ goto err_stop;
+
+ mutex_unlock(&csi->mdev.graph_mutex);
+
+ return 0;
+
+err_stop:
+ __media_pipeline_stop(&csi->sd.entity);
+err_unlock:
+ mutex_unlock(&csi->mdev.graph_mutex);
+ dev_err(csi->dev, "pipeline start failed with %d\n", ret);
+err_buffers:
+ spin_lock_irqsave(&csi->q_lock, flags);
+ list_for_each_entry_safe(buf, tmp, &csi->ready_q, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&csi->q_lock, flags);
+ return ret;
+}
+
+static void imx7_csi_video_stop_streaming(struct vb2_queue *vq)
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vq);
+ struct imx7_csi_vb2_buffer *frame;
+ struct imx7_csi_vb2_buffer *tmp;
+ unsigned long flags;
+
+ mutex_lock(&csi->mdev.graph_mutex);
+ v4l2_subdev_call(&csi->sd, video, s_stream, 0);
+ __media_pipeline_stop(&csi->sd.entity);
+ mutex_unlock(&csi->mdev.graph_mutex);
+
+ /* release all active buffers */
+ spin_lock_irqsave(&csi->q_lock, flags);
+ list_for_each_entry_safe(frame, tmp, &csi->ready_q, list) {
+ list_del(&frame->list);
+ vb2_buffer_done(&frame->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&csi->q_lock, flags);
+}
+
+static const struct vb2_ops imx7_csi_video_qops = {
+ .queue_setup = imx7_csi_video_queue_setup,
+ .buf_init = imx7_csi_video_buf_init,
+ .buf_prepare = imx7_csi_video_buf_prepare,
+ .buf_queue = imx7_csi_video_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = imx7_csi_video_start_streaming,
+ .stop_streaming = imx7_csi_video_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * Video Capture Device - File Operations
+ */
+
+static int imx7_csi_video_open(struct file *file)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&csi->vdev_mutex))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ dev_err(csi->dev, "v4l2_fh_open failed\n");
+ goto out;
+ }
+
+ ret = v4l2_pipeline_pm_get(&csi->vdev->entity);
+ if (ret)
+ v4l2_fh_release(file);
+
+out:
+ mutex_unlock(&csi->vdev_mutex);
+ return ret;
+}
+
+static int imx7_csi_video_release(struct file *file)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+ struct vb2_queue *vq = &csi->q;
+
+ mutex_lock(&csi->vdev_mutex);
+
+ if (file->private_data == vq->owner) {
+ vb2_queue_release(vq);
+ vq->owner = NULL;
+ }
+
+ v4l2_pipeline_pm_put(&csi->vdev->entity);
+
+ v4l2_fh_release(file);
+ mutex_unlock(&csi->vdev_mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations imx7_csi_video_fops = {
+ .owner = THIS_MODULE,
+ .open = imx7_csi_video_open,
+ .release = imx7_csi_video_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * Video Capture Device - Init & Cleanup
+ */
+
+static struct imx7_csi_vb2_buffer *imx7_csi_video_next_buf(struct imx7_csi *csi)
+{
+ struct imx7_csi_vb2_buffer *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&csi->q_lock, flags);
+
+ /* get next queued buffer */
+ if (!list_empty(&csi->ready_q)) {
+ buf = list_entry(csi->ready_q.next, struct imx7_csi_vb2_buffer,
+ list);
+ list_del(&buf->list);
+ }
+
+ spin_unlock_irqrestore(&csi->q_lock, flags);
+
+ return buf;
+}
+
+static int imx7_csi_video_init_format(struct imx7_csi *csi)
+{
+ struct v4l2_subdev_format fmt_src = {
+ .pad = IMX7_CSI_PAD_SRC,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ fmt_src.format.code = IMX7_CSI_DEF_MBUS_CODE;
+ fmt_src.format.width = IMX7_CSI_DEF_PIX_WIDTH;
+ fmt_src.format.height = IMX7_CSI_DEF_PIX_HEIGHT;
+
+ imx7_csi_mbus_fmt_to_pix_fmt(&csi->vdev_fmt, &fmt_src.format, NULL);
+ csi->vdev_compose.width = fmt_src.format.width;
+ csi->vdev_compose.height = fmt_src.format.height;
+
+ csi->vdev_cc = imx7_csi_find_pixel_format(csi->vdev_fmt.pixelformat);
+
+ return 0;
+}
+
+static int imx7_csi_video_register(struct imx7_csi *csi)
+{
+ struct v4l2_subdev *sd = &csi->sd;
+ struct v4l2_device *v4l2_dev = sd->v4l2_dev;
+ struct video_device *vdev = csi->vdev;
+ int ret;
+
+ vdev->v4l2_dev = v4l2_dev;
+
+ /* Initialize the default format and compose rectangle. */
+ ret = imx7_csi_video_init_format(csi);
+ if (ret < 0)
+ return ret;
+
+ /* Register the video device. */
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(csi->dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ dev_info(csi->dev, "Registered %s as /dev/%s\n", vdev->name,
+ video_device_node_name(vdev));
+
+ /* Create the link from the CSI subdev to the video device. */
+ ret = media_create_pad_link(&sd->entity, IMX7_CSI_PAD_SRC,
+ &vdev->entity, 0, MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(csi->dev, "failed to create link to device node\n");
+ video_unregister_device(vdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void imx7_csi_video_unregister(struct imx7_csi *csi)
+{
+ media_entity_cleanup(&csi->vdev->entity);
+ video_unregister_device(csi->vdev);
+}
+
+static int imx7_csi_video_init(struct imx7_csi *csi)
+{
+ struct video_device *vdev;
+ struct vb2_queue *vq;
+ int ret;
+
+ mutex_init(&csi->vdev_mutex);
+ INIT_LIST_HEAD(&csi->ready_q);
+ spin_lock_init(&csi->q_lock);
+
+ /* Allocate and initialize the video device. */
+ vdev = video_device_alloc();
+ if (!vdev)
+ return -ENOMEM;
+
+ vdev->fops = &imx7_csi_video_fops;
+ vdev->ioctl_ops = &imx7_csi_video_ioctl_ops;
+ vdev->minor = -1;
+ vdev->release = video_device_release;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
+ | V4L2_CAP_IO_MC;
+ vdev->lock = &csi->vdev_mutex;
+ vdev->queue = &csi->q;
+
+ snprintf(vdev->name, sizeof(vdev->name), "%s capture", csi->sd.name);
+
+ video_set_drvdata(vdev, csi);
+ csi->vdev = vdev;
+
+ /* Initialize the video device pad. */
+ csi->vdev_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, &csi->vdev_pad);
+ if (ret) {
+ video_device_release(vdev);
+ return ret;
+ }
+
+ /* Initialize the vb2 queue. */
+ vq = &csi->q;
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vq->drv_priv = csi;
+ vq->buf_struct_size = sizeof(struct imx7_csi_vb2_buffer);
+ vq->ops = &imx7_csi_video_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->lock = &csi->vdev_mutex;
+ vq->min_buffers_needed = 2;
+ vq->dev = csi->dev;
+
+ ret = vb2_queue_init(vq);
+ if (ret) {
+ dev_err(csi->dev, "vb2_queue_init failed\n");
+ video_device_release(vdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
* V4L2 Subdev Operations
*/
@@ -764,36 +1668,48 @@ out_unlock:
return ret;
}
+static struct v4l2_mbus_framefmt *
+imx7_csi_get_format(struct imx7_csi *csi,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
+
+ return &csi->format_mbus[pad];
+}
+
static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
+ const enum v4l2_subdev_format_whence which =
+ sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *mf;
- int ret;
+ const struct imx7_csi_pixfmt *cc;
int i;
+ cc = imx7_csi_find_mbus_format(IMX7_CSI_DEF_MBUS_CODE);
+
for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, i);
+ struct v4l2_mbus_framefmt *mf =
+ imx7_csi_get_format(csi, sd_state, i, which);
- ret = imx_media_init_mbus_fmt(mf, 800, 600, 0, V4L2_FIELD_NONE,
- &csi->cc[i]);
- if (ret < 0)
- return ret;
- }
+ mf->code = IMX7_CSI_DEF_MBUS_CODE;
+ mf->width = IMX7_CSI_DEF_PIX_WIDTH;
+ mf->height = IMX7_CSI_DEF_PIX_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
- return 0;
-}
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(mf->colorspace);
+ mf->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(mf->colorspace);
+ mf->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(!cc->yuv,
+ mf->colorspace, mf->ycbcr_enc);
-static struct v4l2_mbus_framefmt *
-imx7_csi_get_format(struct imx7_csi *csi,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
+ csi->cc[i] = cc;
+ }
- return &csi->format_mbus[pad];
+ return 0;
}
static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
@@ -811,8 +1727,7 @@ static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
switch (code->pad) {
case IMX7_CSI_PAD_SINK:
- ret = imx_media_enum_mbus_formats(&code->code, code->index,
- PIXFMT_SEL_ANY);
+ ret = imx7_csi_enum_mbus_formats(&code->code, code->index);
break;
case IMX7_CSI_PAD_SRC:
if (code->index != 0) {
@@ -857,12 +1772,58 @@ out_unlock:
return ret;
}
+/*
+ * Default the colorspace in tryfmt to SRGB if set to an unsupported
+ * colorspace or not initialized. Then set the remaining colorimetry
+ * parameters based on the colorspace if they are uninitialized.
+ *
+ * tryfmt->code must be set on entry.
+ */
+static void imx7_csi_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt)
+{
+ const struct imx7_csi_pixfmt *cc;
+ bool is_rgb = false;
+
+ cc = imx7_csi_find_mbus_format(tryfmt->code);
+ if (cc && !cc->yuv)
+ is_rgb = true;
+
+ switch (tryfmt->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_BT2020:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_DCI_P3:
+ case V4L2_COLORSPACE_RAW:
+ break;
+ default:
+ tryfmt->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ }
+
+ if (tryfmt->xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ tryfmt->xfer_func =
+ V4L2_MAP_XFER_FUNC_DEFAULT(tryfmt->colorspace);
+
+ if (tryfmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ tryfmt->ycbcr_enc =
+ V4L2_MAP_YCBCR_ENC_DEFAULT(tryfmt->colorspace);
+
+ if (tryfmt->quantization == V4L2_QUANTIZATION_DEFAULT)
+ tryfmt->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb,
+ tryfmt->colorspace,
+ tryfmt->ycbcr_enc);
+}
+
static int imx7_csi_try_fmt(struct imx7_csi *csi,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
- const struct imx_media_pixfmt **cc)
+ const struct imx7_csi_pixfmt **cc)
{
- const struct imx_media_pixfmt *in_cc;
+ const struct imx7_csi_pixfmt *in_cc;
struct v4l2_mbus_framefmt *in_fmt;
u32 code;
@@ -873,8 +1834,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
switch (sdformat->pad) {
case IMX7_CSI_PAD_SRC:
- in_cc = imx_media_find_mbus_format(in_fmt->code,
- PIXFMT_SEL_ANY);
+ in_cc = imx7_csi_find_mbus_format(in_fmt->code);
sdformat->format.width = in_fmt->width;
sdformat->format.height = in_fmt->height;
@@ -888,14 +1848,11 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
sdformat->format.ycbcr_enc = in_fmt->ycbcr_enc;
break;
case IMX7_CSI_PAD_SINK:
- *cc = imx_media_find_mbus_format(sdformat->format.code,
- PIXFMT_SEL_ANY);
+ *cc = imx7_csi_find_mbus_format(sdformat->format.code);
if (!*cc) {
- imx_media_enum_mbus_formats(&code, 0,
- PIXFMT_SEL_YUV_RGB);
- *cc = imx_media_find_mbus_format(code,
- PIXFMT_SEL_YUV_RGB);
- sdformat->format.code = (*cc)->codes[0];
+ code = IMX7_CSI_DEF_MBUS_CODE;
+ *cc = imx7_csi_find_mbus_format(code);
+ sdformat->format.code = code;
}
if (sdformat->format.field != V4L2_FIELD_INTERLACED)
@@ -905,7 +1862,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
return -EINVAL;
}
- imx_media_try_colorimetry(&sdformat->format, false);
+ imx7_csi_try_colorimetry(&sdformat->format);
return 0;
}
@@ -915,9 +1872,9 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sdformat)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
- const struct imx_media_pixfmt *outcc;
+ const struct imx7_csi_pixfmt *outcc;
struct v4l2_mbus_framefmt *outfmt;
- const struct imx_media_pixfmt *cc;
+ const struct imx7_csi_pixfmt *cc;
struct v4l2_mbus_framefmt *fmt;
struct v4l2_subdev_format format;
int ret = 0;
@@ -977,9 +1934,8 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sink_fmt)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
- struct imx_media_video_dev *vdev = csi->vdev;
- const struct v4l2_pix_format *out_pix = &vdev->fmt;
- struct media_pad *pad;
+ struct media_pad *pad = NULL;
+ unsigned int i;
int ret;
if (!csi->src_sd)
@@ -1001,7 +1957,17 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
case MEDIA_ENT_F_VID_MUX:
/* The input is the mux, check its input. */
- pad = imx_media_pipeline_pad(&csi->src_sd->entity, 0, 0, true);
+ for (i = 0; i < csi->src_sd->entity.num_pads; i++) {
+ struct media_pad *spad = &csi->src_sd->entity.pads[i];
+
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ pad = media_pad_remote_pad_first(spad);
+ if (pad)
+ break;
+ }
+
if (!pad)
return -ENODEV;
@@ -1017,29 +1983,6 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
break;
}
- /* Validate the sink link, ensure the pixel format is supported. */
- switch (out_pix->pixelformat) {
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_GREY:
- case V4L2_PIX_FMT_Y10:
- case V4L2_PIX_FMT_Y12:
- case V4L2_PIX_FMT_SBGGR8:
- case V4L2_PIX_FMT_SGBRG8:
- case V4L2_PIX_FMT_SGRBG8:
- case V4L2_PIX_FMT_SRGGB8:
- case V4L2_PIX_FMT_SBGGR16:
- case V4L2_PIX_FMT_SGBRG16:
- case V4L2_PIX_FMT_SGRBG16:
- case V4L2_PIX_FMT_SRGGB16:
- break;
-
- default:
- dev_dbg(csi->dev, "Invalid capture pixel format 0x%08x\n",
- out_pix->pixelformat);
- return -EINVAL;
- }
-
return 0;
}
@@ -1047,31 +1990,27 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
int ret;
- int i;
- for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
- /* set a default mbus format */
- ret = imx_media_init_mbus_fmt(&csi->format_mbus[i],
- 800, 600, 0, V4L2_FIELD_NONE,
- &csi->cc[i]);
- if (ret < 0)
- return ret;
+ ret = imx7_csi_video_init(csi);
+ if (ret)
+ return ret;
- /* init default frame interval */
- csi->frame_interval[i].numerator = 1;
- csi->frame_interval[i].denominator = 30;
- }
+ ret = imx7_csi_video_register(csi);
+ if (ret)
+ return ret;
- csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
- IMX7_CSI_PAD_SRC, false);
- if (IS_ERR(csi->vdev))
- return PTR_ERR(csi->vdev);
+ ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
+ if (ret)
+ goto err_unreg;
- ret = imx_media_capture_device_register(csi->vdev,
- MEDIA_LNK_FL_IMMUTABLE);
+ ret = media_device_register(&csi->mdev);
if (ret)
- imx_media_capture_device_remove(csi->vdev);
+ goto err_unreg;
+
+ return 0;
+err_unreg:
+ imx7_csi_video_unregister(csi);
return ret;
}
@@ -1079,8 +2018,7 @@ static void imx7_csi_unregistered(struct v4l2_subdev *sd)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
- imx_media_capture_device_unregister(csi->vdev);
- imx_media_capture_device_remove(csi->vdev);
+ imx7_csi_video_unregister(csi);
}
static const struct v4l2_subdev_video_ops imx7_csi_video_ops = {
@@ -1125,21 +2063,22 @@ static int imx7_csi_notify_bound(struct v4l2_async_notifier *notifier,
struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier);
struct media_pad *sink = &csi->sd.entity.pads[IMX7_CSI_PAD_SINK];
- /*
- * If the subdev is a video mux, it must be one of the CSI
- * muxes. Mark it as such via its group id.
- */
- if (sd->entity.function == MEDIA_ENT_F_VID_MUX)
- sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
-
csi->src_sd = sd;
return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
}
+static int imx7_csi_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier);
+
+ return v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
+}
+
static const struct v4l2_async_notifier_operations imx7_csi_notify_ops = {
.bound = imx7_csi_notify_bound,
+ .complete = imx7_csi_notify_complete,
};
static int imx7_csi_async_register(struct imx7_csi *csi)
@@ -1168,48 +2107,133 @@ static int imx7_csi_async_register(struct imx7_csi *csi)
csi->notifier.ops = &imx7_csi_notify_ops;
- ret = v4l2_async_subdev_nf_register(&csi->sd, &csi->notifier);
+ ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier);
if (ret)
return ret;
- return v4l2_async_register_subdev(&csi->sd);
+ return 0;
+}
+
+static void imx7_csi_media_cleanup(struct imx7_csi *csi)
+{
+ v4l2_device_unregister(&csi->v4l2_dev);
+ media_device_unregister(&csi->mdev);
+ media_device_cleanup(&csi->mdev);
+}
+
+static const struct media_device_ops imx7_csi_media_ops = {
+ .link_notify = v4l2_pipeline_link_notify,
+};
+
+static int imx7_csi_media_dev_init(struct imx7_csi *csi)
+{
+ int ret;
+
+ strscpy(csi->mdev.model, "imx-media", sizeof(csi->mdev.model));
+ csi->mdev.ops = &imx7_csi_media_ops;
+ csi->mdev.dev = csi->dev;
+
+ csi->v4l2_dev.mdev = &csi->mdev;
+ strscpy(csi->v4l2_dev.name, "imx-media",
+ sizeof(csi->v4l2_dev.name));
+ snprintf(csi->mdev.bus_info, sizeof(csi->mdev.bus_info),
+ "platform:%s", dev_name(csi->mdev.dev));
+
+ media_device_init(&csi->mdev);
+
+ ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(&csi->v4l2_dev,
+ "Failed to register v4l2_device: %d\n", ret);
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ media_device_cleanup(&csi->mdev);
+
+ return ret;
+}
+
+static int imx7_csi_media_init(struct imx7_csi *csi)
+{
+ unsigned int i;
+ int ret;
+
+ /* add media device */
+ ret = imx7_csi_media_dev_init(csi);
+ if (ret)
+ return ret;
+
+ v4l2_subdev_init(&csi->sd, &imx7_csi_subdev_ops);
+ v4l2_set_subdevdata(&csi->sd, csi);
+ csi->sd.internal_ops = &imx7_csi_internal_ops;
+ csi->sd.entity.ops = &imx7_csi_entity_ops;
+ csi->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi->sd.dev = csi->dev;
+ csi->sd.owner = THIS_MODULE;
+ csi->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
+
+ for (i = 0; i < IMX7_CSI_PADS_NUM; i++)
+ csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csi->sd.entity, IMX7_CSI_PADS_NUM,
+ csi->pad);
+ if (ret)
+ goto error;
+
+ ret = v4l2_device_register_subdev(&csi->v4l2_dev, &csi->sd);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ imx7_csi_media_cleanup(csi);
+ return ret;
}
static int imx7_csi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- struct imx_media_dev *imxmd;
struct imx7_csi *csi;
- int i, ret;
+ int ret;
csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
if (!csi)
return -ENOMEM;
csi->dev = dev;
+ platform_set_drvdata(pdev, csi);
+
+ spin_lock_init(&csi->irqlock);
+ mutex_init(&csi->lock);
+ /* Acquire resources and install interrupt handler. */
csi->mclk = devm_clk_get(&pdev->dev, "mclk");
if (IS_ERR(csi->mclk)) {
ret = PTR_ERR(csi->mclk);
dev_err(dev, "Failed to get mclk: %d", ret);
- return ret;
+ goto destroy_mutex;
}
csi->irq = platform_get_irq(pdev, 0);
- if (csi->irq < 0)
- return csi->irq;
+ if (csi->irq < 0) {
+ ret = csi->irq;
+ goto destroy_mutex;
+ }
csi->regbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(csi->regbase))
- return PTR_ERR(csi->regbase);
+ if (IS_ERR(csi->regbase)) {
+ ret = PTR_ERR(csi->regbase);
+ goto destroy_mutex;
+ }
csi->model = (enum imx_csi_model)(uintptr_t)of_device_get_match_data(&pdev->dev);
- spin_lock_init(&csi->irqlock);
- mutex_init(&csi->lock);
-
- /* install interrupt handler */
ret = devm_request_irq(dev, csi->irq, imx7_csi_irq_handler, 0, "csi",
(void *)csi);
if (ret < 0) {
@@ -1217,42 +2241,15 @@ static int imx7_csi_probe(struct platform_device *pdev)
goto destroy_mutex;
}
- /* add media device */
- imxmd = imx_media_dev_init(dev, NULL);
- if (IS_ERR(imxmd)) {
- ret = PTR_ERR(imxmd);
+ /* Initialize all the media device infrastructure. */
+ ret = imx7_csi_media_init(csi);
+ if (ret)
goto destroy_mutex;
- }
- platform_set_drvdata(pdev, &csi->sd);
-
- ret = imx_media_of_add_csi(imxmd, node);
- if (ret < 0 && ret != -ENODEV && ret != -EEXIST)
- goto cleanup;
-
- ret = imx_media_dev_notifier_register(imxmd, NULL);
- if (ret < 0)
- goto cleanup;
-
- csi->imxmd = imxmd;
- v4l2_subdev_init(&csi->sd, &imx7_csi_subdev_ops);
- v4l2_set_subdevdata(&csi->sd, csi);
- csi->sd.internal_ops = &imx7_csi_internal_ops;
- csi->sd.entity.ops = &imx7_csi_entity_ops;
- csi->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
- csi->sd.dev = &pdev->dev;
- csi->sd.owner = THIS_MODULE;
- csi->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
- csi->sd.grp_id = IMX_MEDIA_GRP_ID_CSI;
- snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
-
- for (i = 0; i < IMX7_CSI_PADS_NUM; i++)
- csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- ret = media_entity_pads_init(&csi->sd.entity, IMX7_CSI_PADS_NUM,
- csi->pad);
- if (ret < 0)
- goto cleanup;
+ /* Set the default mbus formats. */
+ ret = imx7_csi_init_cfg(&csi->sd, NULL);
+ if (ret)
+ goto media_cleanup;
ret = imx7_csi_async_register(csi);
if (ret)
@@ -1263,13 +2260,8 @@ static int imx7_csi_probe(struct platform_device *pdev)
subdev_notifier_cleanup:
v4l2_async_nf_unregister(&csi->notifier);
v4l2_async_nf_cleanup(&csi->notifier);
-
-cleanup:
- v4l2_async_nf_unregister(&imxmd->notifier);
- v4l2_async_nf_cleanup(&imxmd->notifier);
- v4l2_device_unregister(&imxmd->v4l2_dev);
- media_device_unregister(&imxmd->md);
- media_device_cleanup(&imxmd->md);
+media_cleanup:
+ imx7_csi_media_cleanup(csi);
destroy_mutex:
mutex_destroy(&csi->lock);
@@ -1279,20 +2271,13 @@ destroy_mutex:
static int imx7_csi_remove(struct platform_device *pdev)
{
- struct v4l2_subdev *sd = platform_get_drvdata(pdev);
- struct imx7_csi *csi = v4l2_get_subdevdata(sd);
- struct imx_media_dev *imxmd = csi->imxmd;
-
- v4l2_async_nf_unregister(&imxmd->notifier);
- v4l2_async_nf_cleanup(&imxmd->notifier);
+ struct imx7_csi *csi = platform_get_drvdata(pdev);
- media_device_unregister(&imxmd->md);
- v4l2_device_unregister(&imxmd->v4l2_dev);
- media_device_cleanup(&imxmd->md);
+ imx7_csi_media_cleanup(csi);
v4l2_async_nf_unregister(&csi->notifier);
v4l2_async_nf_cleanup(&csi->notifier);
- v4l2_async_unregister_subdev(sd);
+ v4l2_async_unregister_subdev(&csi->sd);
mutex_destroy(&csi->lock);
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 68588e9dab0b..28aacda0f5a7 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -395,7 +395,7 @@ static int iss_pipeline_disable(struct iss_pipeline *pipe,
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
@@ -464,7 +464,7 @@ static int iss_pipeline_enable(struct iss_pipeline *pipe,
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
@@ -553,7 +553,7 @@ static int iss_pipeline_is_last(struct media_entity *me)
pipe = to_iss_pipeline(me);
if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED)
return 0;
- pad = media_entity_remote_pad(&pipe->output->pad);
+ pad = media_pad_remote_pad_first(&pipe->output->pad);
return pad->entity == me;
}
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index 124ab2f44fbf..04ce0e7eb557 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -538,7 +538,7 @@ static int csi2_configure(struct iss_csi2_device *csi2)
if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
return -EBUSY;
- pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
+ pad = media_pad_remote_pad_first(&csi2->pads[CSI2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
pdata = sensor->host_priv;
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index d0da083deed5..9512cd3314f2 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -190,7 +190,7 @@ iss_video_remote_subdev(struct iss_video *video, u32 *pad)
{
struct media_pad *remote;
- remote = media_entity_remote_pad(&video->pad);
+ remote = media_pad_remote_pad_first(&video->pad);
if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
return NULL;
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 2992fb87cf72..4af5a831bde0 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -109,7 +109,7 @@ struct rkvdec_h264_run {
const struct v4l2_ctrl_h264_sps *sps;
const struct v4l2_ctrl_h264_pps *pps;
const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
- int ref_buf_idx[V4L2_H264_NUM_DPB_ENTRIES];
+ struct vb2_buffer *ref_buf[V4L2_H264_NUM_DPB_ENTRIES];
};
struct rkvdec_h264_ctx {
@@ -742,17 +742,16 @@ static void lookup_ref_buf_idx(struct rkvdec_ctx *ctx,
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb;
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
- int buf_idx = -1;
+ struct vb2_buffer *buf = NULL;
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE) {
- buf_idx = vb2_find_timestamp(cap_q,
- dpb[i].reference_ts, 0);
- if (buf_idx < 0)
+ buf = vb2_find_buffer(cap_q, dpb[i].reference_ts);
+ if (!buf)
pr_debug("No buffer for reference_ts %llu",
dpb[i].reference_ts);
}
- run->ref_buf_idx[i] = buf_idx;
+ run->ref_buf[i] = buf;
}
}
@@ -805,7 +804,7 @@ static void assemble_hw_rps(struct rkvdec_ctx *ctx,
if (WARN_ON(ref->index >= ARRAY_SIZE(dec_params->dpb)))
continue;
- dpb_valid = run->ref_buf_idx[ref->index] >= 0;
+ dpb_valid = run->ref_buf[ref->index] != NULL;
bottom = ref->fields == V4L2_H264_BOTTOM_FIELD_REF;
set_ps_field(hw_rps, DPB_INFO(i, j),
@@ -881,24 +880,6 @@ static const u32 poc_reg_tbl_bottom_field[16] = {
RKVDEC_REG_H264_POC_REFER2(1)
};
-static struct vb2_buffer *
-get_ref_buf(struct rkvdec_ctx *ctx, struct rkvdec_h264_run *run,
- unsigned int dpb_idx)
-{
- struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
- struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
- int buf_idx = run->ref_buf_idx[dpb_idx];
-
- /*
- * If a DPB entry is unused or invalid, address of current destination
- * buffer is returned.
- */
- if (buf_idx < 0)
- return &run->base.bufs.dst->vb2_buf;
-
- return vb2_get_buffer(cap_q, buf_idx);
-}
-
static void config_registers(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
@@ -971,8 +952,14 @@ static void config_registers(struct rkvdec_ctx *ctx,
/* config ref pic address & poc */
for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
- struct vb2_buffer *vb_buf = get_ref_buf(ctx, run, i);
-
+ struct vb2_buffer *vb_buf = run->ref_buf[i];
+
+ /*
+ * If a DPB entry is unused or invalid, address of current destination
+ * buffer is returned.
+ */
+ if (!vb_buf)
+ vb_buf = &dst_buf->vb2_buf;
refer_addr = vb2_dma_contig_plane_dma_addr(vb_buf, 0);
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
diff --git a/drivers/staging/media/rkvdec/rkvdec-vp9.c b/drivers/staging/media/rkvdec/rkvdec-vp9.c
index 311a12656072..d8c1c0db15c7 100644
--- a/drivers/staging/media/rkvdec/rkvdec-vp9.c
+++ b/drivers/staging/media/rkvdec/rkvdec-vp9.c
@@ -383,17 +383,17 @@ get_ref_buf(struct rkvdec_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
{
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
- int buf_idx;
+ struct vb2_buffer *buf;
/*
* If a ref is unused or invalid, address of current destination
* buffer is returned.
*/
- buf_idx = vb2_find_timestamp(cap_q, timestamp, 0);
- if (buf_idx < 0)
- return vb2_to_rkvdec_decoded_buf(&dst->vb2_buf);
+ buf = vb2_find_buffer(cap_q, timestamp);
+ if (!buf)
+ buf = &dst->vb2_buf;
- return vb2_to_rkvdec_decoded_buf(vb2_get_buffer(cap_q, buf_idx));
+ return vb2_to_rkvdec_decoded_buf(buf);
}
static dma_addr_t get_mv_base_addr(struct rkvdec_decoded_buffer *buf)
@@ -1015,7 +1015,6 @@ static int rkvdec_vp9_start(struct rkvdec_ctx *ctx)
vp9_ctx->priv_tbl.size = sizeof(*priv_tbl);
vp9_ctx->priv_tbl.cpu = priv_tbl;
- memset(priv_tbl, 0, sizeof(*priv_tbl));
count_tbl = dma_alloc_coherent(rkvdec->dev, RKVDEC_VP9_COUNT_SIZE,
&vp9_ctx->count_tbl.dma, GFP_KERNEL);
@@ -1026,7 +1025,6 @@ static int rkvdec_vp9_start(struct rkvdec_ctx *ctx)
vp9_ctx->count_tbl.size = RKVDEC_VP9_COUNT_SIZE;
vp9_ctx->count_tbl.cpu = count_tbl;
- memset(count_tbl, 0, sizeof(*count_tbl));
rkvdec_init_v4l2_vp9_count_tbl(ctx);
return 0;
diff --git a/drivers/media/usb/stkwebcam/Kconfig b/drivers/staging/media/stkwebcam/Kconfig
index d94d023f1aa0..4450403dff41 100644
--- a/drivers/media/usb/stkwebcam/Kconfig
+++ b/drivers/staging/media/stkwebcam/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-config USB_STKWEBCAM
- tristate "USB Syntek DC1125 Camera support"
+config VIDEO_STKWEBCAM
+ tristate "USB Syntek DC1125 Camera support (DEPRECATED)"
depends on VIDEO_DEV
+ depends on USB
help
Say Y here if you want to use this type of camera.
Supported devices are typically found in some Asus laptops,
@@ -9,6 +10,9 @@ config USB_STKWEBCAM
may be supported by the stk11xx driver, from which this is
derived, see <http://sourceforge.net/projects/syntekdriver/>
+ This driver is deprecated and is scheduled for removal by
+ the end of 2022. See the TODO file for more information.
+
To compile this driver as a module, choose M here: the
module will be called stkwebcam.
diff --git a/drivers/media/usb/stkwebcam/Makefile b/drivers/staging/media/stkwebcam/Makefile
index daa9ae6d48c2..17ad7b6f43d0 100644
--- a/drivers/media/usb/stkwebcam/Makefile
+++ b/drivers/staging/media/stkwebcam/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
stkwebcam-objs := stk-webcam.o stk-sensor.o
-obj-$(CONFIG_USB_STKWEBCAM) += stkwebcam.o
+obj-$(CONFIG_VIDEO_STKWEBCAM) += stkwebcam.o
diff --git a/drivers/staging/media/stkwebcam/TODO b/drivers/staging/media/stkwebcam/TODO
new file mode 100644
index 000000000000..735304a72729
--- /dev/null
+++ b/drivers/staging/media/stkwebcam/TODO
@@ -0,0 +1,12 @@
+This is a very old driver for very old hardware (specifically
+laptops that use this sensor). In addition according to reports
+the picture quality is quite bad.
+
+This is also one of the few drivers still not using the vb2
+framework (or even the old videobuf framework!), so this driver
+is now deprecated with the intent of removing it altogether by
+the end of 2022.
+
+In order to keep this driver it has to be converted to vb2.
+If someone is interested in doing this work, then contact the
+linux-media mailinglist (https://linuxtv.org/lists.php).
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/staging/media/stkwebcam/stk-sensor.c
index 94aa6a27f934..94aa6a27f934 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/staging/media/stkwebcam/stk-sensor.c
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/staging/media/stkwebcam/stk-webcam.c
index 787edb3d47c2..787edb3d47c2 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/staging/media/stkwebcam/stk-webcam.c
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/staging/media/stkwebcam/stk-webcam.h
index 136decffe9ce..136decffe9ce 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/staging/media/stkwebcam/stk-webcam.h
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 68b3dcdb5df3..960a0130cd62 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -42,7 +42,7 @@ static int cedrus_try_ctrl(struct v4l2_ctrl *ctrl)
if (sps->bit_depth_luma_minus8 != 0)
/* Only 8-bit is supported */
return -EINVAL;
- } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_SPS) {
+ } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
struct cedrus_ctx *ctx = container_of(ctrl->handler, struct cedrus_ctx, hdl);
@@ -164,42 +164,54 @@ static const struct cedrus_control cedrus_controls[] = {
},
{
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ .id = V4L2_CID_STATELESS_HEVC_SPS,
.ops = &cedrus_ctrl_ops,
},
.codec = CEDRUS_CODEC_H265,
},
{
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
+ .id = V4L2_CID_STATELESS_HEVC_PPS,
},
.codec = CEDRUS_CODEC_H265,
},
{
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS,
+ .id = V4L2_CID_STATELESS_HEVC_SLICE_PARAMS,
+ /* The driver can only handle 1 entry per slice for now */
+ .dims = { 1 },
},
.codec = CEDRUS_CODEC_H265,
},
{
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX,
+ .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
},
.codec = CEDRUS_CODEC_H265,
},
{
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
- .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
- .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ .id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS,
+ /* maximum 256 entry point offsets per slice */
+ .dims = { 256 },
+ .max = 0xffffffff,
+ .step = 1,
},
.codec = CEDRUS_CODEC_H265,
},
{
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE,
- .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
- .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
+ .max = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ .def = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_START_CODE,
+ .max = V4L2_STATELESS_HEVC_START_CODE_NONE,
+ .def = V4L2_STATELESS_HEVC_START_CODE_NONE,
},
.codec = CEDRUS_CODEC_H265,
},
@@ -211,7 +223,7 @@ static const struct cedrus_control cedrus_controls[] = {
},
{
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS,
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
},
.codec = CEDRUS_CODEC_H265,
},
@@ -230,6 +242,17 @@ void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id)
return NULL;
}
+u32 cedrus_get_num_of_controls(struct cedrus_ctx *ctx, u32 id)
+{
+ unsigned int i;
+
+ for (i = 0; ctx->ctrls[i]; i++)
+ if (ctx->ctrls[i]->id == id)
+ return ctx->ctrls[i]->elems;
+
+ return 0;
+}
+
static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
{
struct v4l2_ctrl_handler *hdl = &ctx->hdl;
@@ -240,7 +263,8 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
v4l2_ctrl_handler_init(hdl, CEDRUS_CONTROLS_COUNT);
if (hdl->error) {
v4l2_err(&dev->v4l2_dev,
- "Failed to initialize control handler\n");
+ "Failed to initialize control handler: %d\n",
+ hdl->error);
return hdl->error;
}
@@ -255,7 +279,9 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
NULL);
if (hdl->error) {
v4l2_err(&dev->v4l2_dev,
- "Failed to create new custom control\n");
+ "Failed to create %s control: %d\n",
+ v4l2_ctrl_get_name(cedrus_controls[i].cfg.id),
+ hdl->error);
v4l2_ctrl_handler_free(hdl);
kfree(ctx->ctrls);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 3bc094eb497f..084193019350 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -81,6 +81,8 @@ struct cedrus_h265_run {
const struct v4l2_ctrl_hevc_slice_params *slice_params;
const struct v4l2_ctrl_hevc_decode_params *decode_params;
const struct v4l2_ctrl_hevc_scaling_matrix *scaling_matrix;
+ const u32 *entry_points;
+ u32 entry_points_count;
};
struct cedrus_vp8_run {
@@ -146,6 +148,8 @@ struct cedrus_ctx {
ssize_t mv_col_buf_unit_size;
void *neighbor_info_buf;
dma_addr_t neighbor_info_buf_addr;
+ void *entry_points_buf;
+ dma_addr_t entry_points_buf_addr;
} h265;
struct {
unsigned int last_frame_p_type;
@@ -162,7 +166,7 @@ struct cedrus_dec_ops {
void (*irq_clear)(struct cedrus_ctx *ctx);
void (*irq_disable)(struct cedrus_ctx *ctx);
enum cedrus_irq_status (*irq_status)(struct cedrus_ctx *ctx);
- void (*setup)(struct cedrus_ctx *ctx, struct cedrus_run *run);
+ int (*setup)(struct cedrus_ctx *ctx, struct cedrus_run *run);
int (*start)(struct cedrus_ctx *ctx);
void (*stop)(struct cedrus_ctx *ctx);
void (*trigger)(struct cedrus_ctx *ctx);
@@ -261,5 +265,6 @@ vb2_to_cedrus_buffer(const struct vb2_buffer *p)
}
void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id);
+u32 cedrus_get_num_of_controls(struct cedrus_ctx *ctx, u32 id);
#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 9c7200299465..3b6aa78a2985 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -28,6 +28,7 @@ void cedrus_device_run(void *priv)
struct cedrus_dev *dev = ctx->dev;
struct cedrus_run run = {};
struct media_request *src_req;
+ int error;
run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -65,15 +66,19 @@ void cedrus_device_run(void *priv)
case V4L2_PIX_FMT_HEVC_SLICE:
run.h265.sps = cedrus_find_control_data(ctx,
- V4L2_CID_MPEG_VIDEO_HEVC_SPS);
+ V4L2_CID_STATELESS_HEVC_SPS);
run.h265.pps = cedrus_find_control_data(ctx,
- V4L2_CID_MPEG_VIDEO_HEVC_PPS);
+ V4L2_CID_STATELESS_HEVC_PPS);
run.h265.slice_params = cedrus_find_control_data(ctx,
- V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS);
+ V4L2_CID_STATELESS_HEVC_SLICE_PARAMS);
run.h265.decode_params = cedrus_find_control_data(ctx,
- V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS);
+ V4L2_CID_STATELESS_HEVC_DECODE_PARAMS);
run.h265.scaling_matrix = cedrus_find_control_data(ctx,
- V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX);
+ V4L2_CID_STATELESS_HEVC_SCALING_MATRIX);
+ run.h265.entry_points = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS);
+ run.h265.entry_points_count = cedrus_get_num_of_controls(ctx,
+ V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS);
break;
case V4L2_PIX_FMT_VP8_FRAME:
@@ -89,16 +94,26 @@ void cedrus_device_run(void *priv)
cedrus_dst_format_set(dev, &ctx->dst_fmt);
- dev->dec_ops[ctx->current_codec]->setup(ctx, &run);
+ error = dev->dec_ops[ctx->current_codec]->setup(ctx, &run);
+ if (error)
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Failed to setup decoding job: %d\n", error);
/* Complete request(s) controls if needed. */
if (src_req)
v4l2_ctrl_request_complete(src_req, &ctx->hdl);
- dev->dec_ops[ctx->current_codec]->trigger(ctx);
-
- /* Start the watchdog timer. */
- schedule_delayed_work(&dev->watchdog_work,
- msecs_to_jiffies(2000));
+ /* Trigger decoding if setup went well, bail out otherwise. */
+ if (!error) {
+ dev->dec_ops[ctx->current_codec]->trigger(ctx);
+
+ /* Start the watchdog timer. */
+ schedule_delayed_work(&dev->watchdog_work,
+ msecs_to_jiffies(2000));
+ } else {
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev,
+ ctx->fh.m2m_ctx,
+ VB2_BUF_STATE_ERROR);
+ }
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index d8fb93035470..c345e67ba9bc 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -493,8 +493,7 @@ static void cedrus_h264_irq_disable(struct cedrus_ctx *ctx)
reg & ~VE_H264_CTRL_INT_MASK);
}
-static void cedrus_h264_setup(struct cedrus_ctx *ctx,
- struct cedrus_run *run)
+static int cedrus_h264_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
{
struct cedrus_dev *dev = ctx->dev;
@@ -510,6 +509,8 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx,
cedrus_write_frame_list(ctx, run);
cedrus_set_params(ctx, run);
+
+ return 0;
}
static int cedrus_h264_start(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index 44f385be9f6c..687f87598f78 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -143,10 +143,13 @@ static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx,
for (i = 0; i < num_active_dpb_entries; i++) {
int buffer_index = vb2_find_timestamp(vq, dpb[i].timestamp, 0);
u32 pic_order_cnt[2] = {
- dpb[i].pic_order_cnt[0],
- dpb[i].pic_order_cnt[1]
+ dpb[i].pic_order_cnt_val,
+ dpb[i].pic_order_cnt_val
};
+ if (buffer_index < 0)
+ continue;
+
cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic,
pic_order_cnt,
buffer_index);
@@ -301,8 +304,91 @@ static void cedrus_h265_write_scaling_list(struct cedrus_ctx *ctx,
}
}
-static void cedrus_h265_setup(struct cedrus_ctx *ctx,
- struct cedrus_run *run)
+static int cedrus_h265_is_low_delay(struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_hevc_dpb_entry *dpb;
+ s32 poc;
+ int i;
+
+ slice_params = run->h265.slice_params;
+ poc = run->h265.decode_params->pic_order_cnt_val;
+ dpb = run->h265.decode_params->dpb;
+
+ for (i = 0; i < slice_params->num_ref_idx_l0_active_minus1 + 1; i++)
+ if (dpb[slice_params->ref_idx_l0[i]].pic_order_cnt_val > poc)
+ return 1;
+
+ if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_B)
+ return 0;
+
+ for (i = 0; i < slice_params->num_ref_idx_l1_active_minus1 + 1; i++)
+ if (dpb[slice_params->ref_idx_l1[i]].pic_order_cnt_val > poc)
+ return 1;
+
+ return 0;
+}
+
+static void cedrus_h265_write_tiles(struct cedrus_ctx *ctx,
+ struct cedrus_run *run,
+ unsigned int ctb_addr_x,
+ unsigned int ctb_addr_y)
+{
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ struct cedrus_dev *dev = ctx->dev;
+ const u32 *entry_points;
+ u32 *entry_points_buf;
+ int i, x, tx, y, ty;
+
+ pps = run->h265.pps;
+ slice_params = run->h265.slice_params;
+ entry_points = run->h265.entry_points;
+ entry_points_buf = ctx->codec.h265.entry_points_buf;
+
+ for (x = 0, tx = 0; tx < pps->num_tile_columns_minus1 + 1; tx++) {
+ if (x + pps->column_width_minus1[tx] + 1 > ctb_addr_x)
+ break;
+
+ x += pps->column_width_minus1[tx] + 1;
+ }
+
+ for (y = 0, ty = 0; ty < pps->num_tile_rows_minus1 + 1; ty++) {
+ if (y + pps->row_height_minus1[ty] + 1 > ctb_addr_y)
+ break;
+
+ y += pps->row_height_minus1[ty] + 1;
+ }
+
+ cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, (y << 16) | (x << 0));
+ cedrus_write(dev, VE_DEC_H265_TILE_END_CTB,
+ ((y + pps->row_height_minus1[ty]) << 16) |
+ ((x + pps->column_width_minus1[tx]) << 0));
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED) {
+ for (i = 0; i < slice_params->num_entry_point_offsets; i++)
+ entry_points_buf[i] = entry_points[i];
+ } else {
+ for (i = 0; i < slice_params->num_entry_point_offsets; i++) {
+ if (tx + 1 >= pps->num_tile_columns_minus1 + 1) {
+ x = 0;
+ tx = 0;
+ y += pps->row_height_minus1[ty++] + 1;
+ } else {
+ x += pps->column_width_minus1[tx++] + 1;
+ }
+
+ entry_points_buf[i * 4 + 0] = entry_points[i];
+ entry_points_buf[i * 4 + 1] = 0x0;
+ entry_points_buf[i * 4 + 2] = (y << 16) | (x << 0);
+ entry_points_buf[i * 4 + 3] =
+ ((y + pps->row_height_minus1[ty]) << 16) |
+ ((x + pps->column_width_minus1[tx]) << 0);
+ }
+ }
+}
+
+static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
{
struct cedrus_dev *dev = ctx->dev;
const struct v4l2_ctrl_hevc_sps *sps;
@@ -312,11 +398,15 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
const struct v4l2_hevc_pred_weight_table *pred_weight_table;
unsigned int width_in_ctb_luma, ctb_size_luma;
unsigned int log2_max_luma_coding_block_size;
+ unsigned int ctb_addr_x, ctb_addr_y;
dma_addr_t src_buf_addr;
dma_addr_t src_buf_end_addr;
u32 chroma_log2_weight_denom;
+ u32 num_entry_point_offsets;
u32 output_pic_list_index;
u32 pic_order_cnt[2];
+ u8 *padding;
+ int count;
u32 reg;
sps = run->h265.sps;
@@ -324,6 +414,15 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
slice_params = run->h265.slice_params;
decode_params = run->h265.decode_params;
pred_weight_table = &slice_params->pred_weight_table;
+ num_entry_point_offsets = slice_params->num_entry_point_offsets;
+
+ /*
+ * If entry points offsets are present, we should get them
+ * exactly the right amount.
+ */
+ if (num_entry_point_offsets &&
+ num_entry_point_offsets != run->h265.entry_points_count)
+ return -ERANGE;
log2_max_luma_coding_block_size =
sps->log2_min_luma_coding_block_size_minus3 + 3 +
@@ -358,8 +457,7 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h265.mv_col_buf) {
ctx->codec.h265.mv_col_buf_size = 0;
- // TODO: Abort the process here.
- return;
+ return -ENOMEM;
}
}
@@ -391,12 +489,19 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
/* Coding tree block address */
- reg = VE_DEC_H265_DEC_CTB_ADDR_X(slice_params->slice_segment_addr % width_in_ctb_luma);
- reg |= VE_DEC_H265_DEC_CTB_ADDR_Y(slice_params->slice_segment_addr / width_in_ctb_luma);
+ ctb_addr_x = slice_params->slice_segment_addr % width_in_ctb_luma;
+ ctb_addr_y = slice_params->slice_segment_addr / width_in_ctb_luma;
+ reg = VE_DEC_H265_DEC_CTB_ADDR_X(ctb_addr_x);
+ reg |= VE_DEC_H265_DEC_CTB_ADDR_Y(ctb_addr_y);
cedrus_write(dev, VE_DEC_H265_DEC_CTB_ADDR, reg);
- cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0);
- cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0);
+ if ((pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED) ||
+ (pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED)) {
+ cedrus_h265_write_tiles(ctx, run, ctb_addr_x, ctb_addr_y);
+ } else {
+ cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0);
+ cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0);
+ }
/* Clear the number of correctly-decoded coding tree blocks. */
if (ctx->fh.m2m_ctx->new_frame)
@@ -405,7 +510,30 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
/* Initialize bitstream access. */
cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC);
- cedrus_h265_skip_bits(dev, slice_params->data_bit_offset);
+ /*
+ * Cedrus expects that bitstream pointer is actually at the end of the slice header
+ * instead of start of slice data. Padding is 8 bits at most (one bit set to 1 and
+ * at most seven bits set to 0), so we have to inspect only one byte before slice data.
+ */
+
+ if (slice_params->data_byte_offset == 0)
+ return -EOPNOTSUPP;
+
+ padding = (u8 *)vb2_plane_vaddr(&run->src->vb2_buf, 0) +
+ slice_params->data_byte_offset - 1;
+
+ /* at least one bit must be set in that byte */
+ if (*padding == 0)
+ return -EINVAL;
+
+ for (count = 0; count < 8; count++)
+ if (*padding & (1 << count))
+ break;
+
+ /* Include the one bit. */
+ count++;
+
+ cedrus_h265_skip_bits(dev, slice_params->data_byte_offset * 8 - count);
/* Bitstream parameters. */
@@ -500,7 +628,9 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED,
pps->flags);
- /* TODO: VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED */
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TILES_ENABLED,
+ pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED,
V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED,
@@ -559,7 +689,6 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
reg = VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(slice_params->slice_tc_offset_div2) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(slice_params->slice_beta_offset_div2) |
- VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(decode_params->num_poc_st_curr_after == 0) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(slice_params->slice_cr_qp_offset) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(slice_params->slice_cb_qp_offset) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(slice_params->slice_qp_delta);
@@ -572,16 +701,22 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
slice_params->flags);
+ if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I && !cedrus_h265_is_low_delay(run))
+ reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_NOT_LOW_DELAY;
+
cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO1, reg);
chroma_log2_weight_denom = pred_weight_table->luma_log2_weight_denom +
pred_weight_table->delta_chroma_log2_weight_denom;
- reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(0) |
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(num_entry_point_offsets) |
VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(chroma_log2_weight_denom) |
VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(pred_weight_table->luma_log2_weight_denom);
cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO2, reg);
+ cedrus_write(dev, VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR,
+ ctx->codec.h265.entry_points_buf_addr >> 8);
+
/* Decoded picture size. */
reg = VE_DEC_H265_DEC_PIC_SIZE_WIDTH(ctx->src_fmt.width) |
@@ -659,6 +794,8 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
/* Enable appropriate interruptions. */
cedrus_write(dev, VE_DEC_H265_CTRL, VE_DEC_H265_CTRL_IRQ_MASK);
+
+ return 0;
}
static int cedrus_h265_start(struct cedrus_ctx *ctx)
@@ -676,6 +813,18 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx)
if (!ctx->codec.h265.neighbor_info_buf)
return -ENOMEM;
+ ctx->codec.h265.entry_points_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_H265_ENTRY_POINTS_BUF_SIZE,
+ &ctx->codec.h265.entry_points_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.entry_points_buf) {
+ dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h265.neighbor_info_buf,
+ ctx->codec.h265.neighbor_info_buf_addr,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -696,6 +845,9 @@ static void cedrus_h265_stop(struct cedrus_ctx *ctx)
ctx->codec.h265.neighbor_info_buf,
ctx->codec.h265.neighbor_info_buf_addr,
DMA_ATTR_NO_KERNEL_MAPPING);
+ dma_free_coherent(dev->dev, CEDRUS_H265_ENTRY_POINTS_BUF_SIZE,
+ ctx->codec.h265.entry_points_buf,
+ ctx->codec.h265.entry_points_buf_addr);
}
static void cedrus_h265_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
index 5dad2f296c6d..4cfc4a3c8a7f 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -48,7 +48,7 @@ static void cedrus_mpeg2_irq_disable(struct cedrus_ctx *ctx)
cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
}
-static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+static int cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
{
const struct v4l2_ctrl_mpeg2_sequence *seq;
const struct v4l2_ctrl_mpeg2_picture *pic;
@@ -185,6 +185,8 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
VE_DEC_MPEG_CTRL_MC_CACHE_EN;
cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
+
+ return 0;
}
static void cedrus_mpeg2_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index bdb062ad8682..d81f7513ade0 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -377,13 +377,12 @@
#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED BIT(23)
#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(22)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_NOT_LOW_DELAY BIT(21)
#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(v) \
SHIFT_AND_MASK_BITS(v, 31, 28)
#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(v) \
SHIFT_AND_MASK_BITS(v, 27, 24)
-#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(v) \
- ((v) ? BIT(21) : 0)
#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(v) \
SHIFT_AND_MASK_BITS(v, 20, 16)
#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(v) \
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index 33726175d980..66714609b577 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -568,7 +568,6 @@ int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
- src_vq->dma_attrs = DMA_ATTR_NO_KERNEL_MAPPING;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct cedrus_buffer);
src_vq->ops = &cedrus_qops;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c b/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c
index f4016684b32d..3f750d1795b6 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c
@@ -651,8 +651,7 @@ static void cedrus_vp8_irq_disable(struct cedrus_ctx *ctx)
reg & ~VE_H264_CTRL_INT_MASK);
}
-static void cedrus_vp8_setup(struct cedrus_ctx *ctx,
- struct cedrus_run *run)
+static int cedrus_vp8_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
{
const struct v4l2_ctrl_vp8_frame *slice = run->vp8.frame_params;
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
@@ -855,6 +854,8 @@ static void cedrus_vp8_setup(struct cedrus_ctx *ctx,
ctx->codec.vp8.last_sharpness_level =
slice->lf.sharpness_level;
}
+
+ return 0;
}
static int cedrus_vp8_start(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 8e184aa4c252..9d46a36cc014 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -157,7 +157,7 @@ tegra_channel_get_remote_csi_subdev(struct tegra_vi_channel *chan)
{
struct media_pad *pad;
- pad = media_entity_remote_pad(&chan->pad);
+ pad = media_pad_remote_pad_first(&chan->pad);
if (!pad)
return NULL;
@@ -177,7 +177,7 @@ tegra_channel_get_remote_source_subdev(struct tegra_vi_channel *chan)
pad = &subdev->entity.pads[0];
while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) {
- pad = media_entity_remote_pad(pad);
+ pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
entity = pad->entity;
diff --git a/drivers/staging/media/zoran/videocodec.c b/drivers/staging/media/zoran/videocodec.c
index 3af7d02bd910..a0c8bde5ec11 100644
--- a/drivers/staging/media/zoran/videocodec.c
+++ b/drivers/staging/media/zoran/videocodec.c
@@ -16,16 +16,6 @@
#include "videocodec.h"
-static int videocodec_debug;
-module_param(videocodec_debug, int, 0);
-MODULE_PARM_DESC(videocodec_debug, "Debug level (0-4)");
-
-#define dprintk(num, format, args...) \
- do { \
- if (videocodec_debug >= num) \
- printk(format, ##args); \
- } while (0)
-
struct attached_list {
struct videocodec *codec;
struct attached_list *next;
@@ -47,6 +37,7 @@ static struct codec_list *codeclist_top;
struct videocodec *videocodec_attach(struct videocodec_master *master)
{
struct codec_list *h = codeclist_top;
+ struct zoran *zr;
struct attached_list *a, *ptr;
struct videocodec *codec;
int res;
@@ -56,11 +47,13 @@ struct videocodec *videocodec_attach(struct videocodec_master *master)
return NULL;
}
- dprintk(2, "%s: '%s', flags %lx, magic %lx\n", __func__,
- master->name, master->flags, master->magic);
+ zr = videocodec_master_to_zoran(master);
+
+ zrdev_dbg(zr, "%s: '%s', flags %lx, magic %lx\n", __func__,
+ master->name, master->flags, master->magic);
if (!h) {
- pr_err("%s: no device available\n", __func__);
+ zrdev_err(zr, "%s: no device available\n", __func__);
return NULL;
}
@@ -68,7 +61,7 @@ struct videocodec *videocodec_attach(struct videocodec_master *master)
// attach only if the slave has at least the flags
// expected by the master
if ((master->flags & h->codec->flags) == master->flags) {
- dprintk(4, "%s: try '%s'\n", __func__, h->codec->name);
+ zrdev_dbg(zr, "%s: try '%s'\n", __func__, h->codec->name);
codec = kmemdup(h->codec, sizeof(struct videocodec), GFP_KERNEL);
if (!codec)
@@ -79,7 +72,7 @@ struct videocodec *videocodec_attach(struct videocodec_master *master)
codec->master_data = master;
res = codec->setup(codec);
if (res == 0) {
- dprintk(3, "%s: '%s'\n", __func__, codec->name);
+ zrdev_dbg(zr, "%s: '%s'\n", __func__, codec->name);
ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
if (!ptr)
goto out_kfree;
@@ -88,12 +81,13 @@ struct videocodec *videocodec_attach(struct videocodec_master *master)
a = h->list;
if (!a) {
h->list = ptr;
- dprintk(4, "videocodec: first element\n");
+ zrdev_dbg(zr, "videocodec: first element\n");
} else {
while (a->next)
a = a->next; // find end
a->next = ptr;
- dprintk(4, "videocodec: in after '%s'\n", h->codec->name);
+ zrdev_dbg(zr, "videocodec: in after '%s'\n",
+ h->codec->name);
}
h->attached += 1;
@@ -105,7 +99,7 @@ struct videocodec *videocodec_attach(struct videocodec_master *master)
h = h->next;
}
- pr_err("%s: no codec found!\n", __func__);
+ zrdev_err(zr, "%s: no codec found!\n", __func__);
return NULL;
out_kfree:
@@ -116,6 +110,7 @@ struct videocodec *videocodec_attach(struct videocodec_master *master)
int videocodec_detach(struct videocodec *codec)
{
struct codec_list *h = codeclist_top;
+ struct zoran *zr;
struct attached_list *a, *prev;
int res;
@@ -124,11 +119,13 @@ int videocodec_detach(struct videocodec *codec)
return -EINVAL;
}
- dprintk(2, "%s: '%s', type: %x, flags %lx, magic %lx\n", __func__,
- codec->name, codec->type, codec->flags, codec->magic);
+ zr = videocodec_to_zoran(codec);
+
+ zrdev_dbg(zr, "%s: '%s', type: %x, flags %lx, magic %lx\n", __func__,
+ codec->name, codec->type, codec->flags, codec->magic);
if (!h) {
- pr_err("%s: no device left...\n", __func__);
+ zrdev_err(zr, "%s: no device left...\n", __func__);
return -ENXIO;
}
@@ -139,18 +136,19 @@ int videocodec_detach(struct videocodec *codec)
if (codec == a->codec) {
res = a->codec->unset(a->codec);
if (res >= 0) {
- dprintk(3, "%s: '%s'\n", __func__, a->codec->name);
+ zrdev_dbg(zr, "%s: '%s'\n", __func__,
+ a->codec->name);
a->codec->master_data = NULL;
} else {
- pr_err("%s: '%s'\n", __func__, a->codec->name);
+ zrdev_err(zr, "%s: '%s'\n", __func__, a->codec->name);
a->codec->master_data = NULL;
}
if (!prev) {
h->list = a->next;
- dprintk(4, "videocodec: delete first\n");
+ zrdev_dbg(zr, "videocodec: delete first\n");
} else {
prev->next = a->next;
- dprintk(4, "videocodec: delete middle\n");
+ zrdev_dbg(zr, "videocodec: delete middle\n");
}
kfree(a->codec);
kfree(a);
@@ -163,22 +161,25 @@ int videocodec_detach(struct videocodec *codec)
h = h->next;
}
- pr_err("%s: given codec not found!\n", __func__);
+ zrdev_err(zr, "%s: given codec not found!\n", __func__);
return -EINVAL;
}
int videocodec_register(const struct videocodec *codec)
{
struct codec_list *ptr, *h = codeclist_top;
+ struct zoran *zr;
if (!codec) {
pr_err("%s: no data!\n", __func__);
return -EINVAL;
}
- dprintk(2,
- "videocodec: register '%s', type: %x, flags %lx, magic %lx\n",
- codec->name, codec->type, codec->flags, codec->magic);
+ zr = videocodec_to_zoran((struct videocodec *)codec);
+
+ zrdev_dbg(zr,
+ "videocodec: register '%s', type: %x, flags %lx, magic %lx\n",
+ codec->name, codec->type, codec->flags, codec->magic);
ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
if (!ptr)
@@ -187,13 +188,13 @@ int videocodec_register(const struct videocodec *codec)
if (!h) {
codeclist_top = ptr;
- dprintk(4, "videocodec: hooked in as first element\n");
+ zrdev_dbg(zr, "videocodec: hooked in as first element\n");
} else {
while (h->next)
h = h->next; // find the end
h->next = ptr;
- dprintk(4, "videocodec: hooked in after '%s'\n",
- h->codec->name);
+ zrdev_dbg(zr, "videocodec: hooked in after '%s'\n",
+ h->codec->name);
}
return 0;
@@ -202,37 +203,41 @@ int videocodec_register(const struct videocodec *codec)
int videocodec_unregister(const struct videocodec *codec)
{
struct codec_list *prev = NULL, *h = codeclist_top;
+ struct zoran *zr;
if (!codec) {
pr_err("%s: no data!\n", __func__);
return -EINVAL;
}
- dprintk(2,
- "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n",
- codec->name, codec->type, codec->flags, codec->magic);
+ zr = videocodec_to_zoran((struct videocodec *)codec);
+
+ zrdev_dbg(zr,
+ "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n",
+ codec->name, codec->type, codec->flags, codec->magic);
if (!h) {
- pr_err("%s: no device left...\n", __func__);
+ zrdev_err(zr, "%s: no device left...\n", __func__);
return -ENXIO;
}
while (h) {
if (codec == h->codec) {
if (h->attached) {
- pr_err("videocodec: '%s' is used\n", h->codec->name);
+ zrdev_err(zr, "videocodec: '%s' is used\n",
+ h->codec->name);
return -EBUSY;
}
- dprintk(3, "videocodec: unregister '%s' is ok.\n",
- h->codec->name);
+ zrdev_dbg(zr, "videocodec: unregister '%s' is ok.\n",
+ h->codec->name);
if (!prev) {
codeclist_top = h->next;
- dprintk(4,
- "videocodec: delete first element\n");
+ zrdev_dbg(zr,
+ "videocodec: delete first element\n");
} else {
prev->next = h->next;
- dprintk(4,
- "videocodec: delete middle element\n");
+ zrdev_dbg(zr,
+ "videocodec: delete middle element\n");
}
kfree(h);
return 0;
@@ -241,7 +246,7 @@ int videocodec_unregister(const struct videocodec *codec)
h = h->next;
}
- pr_err("%s: given codec not found!\n", __func__);
+ zrdev_err(zr, "%s: given codec not found!\n", __func__);
return -EINVAL;
}
diff --git a/drivers/staging/media/zoran/videocodec.h b/drivers/staging/media/zoran/videocodec.h
index 9dea348fee40..5e6057edd339 100644
--- a/drivers/staging/media/zoran/videocodec.h
+++ b/drivers/staging/media/zoran/videocodec.h
@@ -307,4 +307,19 @@ extern int videocodec_unregister(const struct videocodec *);
int videocodec_debugfs_show(struct seq_file *m);
+#include "zoran.h"
+static inline struct zoran *videocodec_master_to_zoran(struct videocodec_master *master)
+{
+ struct zoran *zr = master->data;
+
+ return zr;
+}
+
+static inline struct zoran *videocodec_to_zoran(struct videocodec *codec)
+{
+ struct videocodec_master *master = codec->master_data;
+
+ return videocodec_master_to_zoran(master);
+}
+
#endif /*ifndef __LINUX_VIDEOCODEC_H */
diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h
index 654c95fa5aba..05227e5298f6 100644
--- a/drivers/staging/media/zoran/zoran.h
+++ b/drivers/staging/media/zoran/zoran.h
@@ -19,6 +19,8 @@
#define _BUZ_H_
#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/i2c-algo-bit.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/videobuf2-core.h>
@@ -301,6 +303,18 @@ static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev)
#endif
+/*
+ * Debugging macros
+ */
+#define zrdev_dbg(zr, format, args...) \
+ pci_dbg((zr)->pci_dev, format, ##args) \
+
+#define zrdev_err(zr, format, args...) \
+ pci_err((zr)->pci_dev, format, ##args) \
+
+#define zrdev_info(zr, format, args...) \
+ pci_info((zr)->pci_dev, format, ##args) \
+
int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir);
void zoran_queue_exit(struct zoran *zr);
int zr_set_buf(struct zoran *zr);
diff --git a/drivers/staging/media/zoran/zr36016.c b/drivers/staging/media/zoran/zr36016.c
index 26c7c32b6bc0..0e0532537a3e 100644
--- a/drivers/staging/media/zoran/zr36016.c
+++ b/drivers/staging/media/zoran/zr36016.c
@@ -22,17 +22,6 @@
/* amount of chips attached via this driver */
static int zr36016_codecs;
-static int zr36016_debug;
-module_param(zr36016_debug, int, 0);
-MODULE_PARM_DESC(zr36016_debug, "Debug level (0-4)");
-
-
-#define dprintk(num, format, args...) \
- do { \
- if (zr36016_debug >= num) \
- printk(format, ##args); \
- } while (0)
-
/* =========================================================================
Local hardware I/O functions:
@@ -43,27 +32,30 @@ MODULE_PARM_DESC(zr36016_debug, "Debug level (0-4)");
static u8 zr36016_read(struct zr36016 *ptr, u16 reg)
{
u8 value = 0;
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
/* just in case something is wrong... */
if (ptr->codec->master_data->readreg)
value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xFF;
else
- pr_err("%s: invalid I/O setup, nothing read!\n", ptr->name);
+ zrdev_err(zr, "%s: invalid I/O setup, nothing read!\n", ptr->name);
- dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value);
+ zrdev_dbg(zr, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value);
return value;
}
static void zr36016_write(struct zr36016 *ptr, u16 reg, u8 value)
{
- dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg);
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
+
+ zrdev_dbg(zr, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg);
// just in case something is wrong...
if (ptr->codec->master_data->writereg)
ptr->codec->master_data->writereg(ptr->codec, reg, value);
else
- pr_err("%s: invalid I/O setup, nothing written!\n", ptr->name);
+ zrdev_err(zr, "%s: invalid I/O setup, nothing written!\n", ptr->name);
}
/* indirect read and write functions */
@@ -72,30 +64,34 @@ static void zr36016_write(struct zr36016 *ptr, u16 reg, u8 value)
static u8 zr36016_readi(struct zr36016 *ptr, u16 reg)
{
u8 value = 0;
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
/* just in case something is wrong... */
if ((ptr->codec->master_data->writereg) && (ptr->codec->master_data->readreg)) {
ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR
value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA
} else {
- pr_err("%s: invalid I/O setup, nothing read (i)!\n", ptr->name);
+ zrdev_err(zr, "%s: invalid I/O setup, nothing read (i)!\n", ptr->name);
}
- dprintk(4, "%s: reading indirect from 0x%04x: %02x\n", ptr->name, reg, value);
+ zrdev_dbg(zr, "%s: reading indirect from 0x%04x: %02x\n",
+ ptr->name, reg, value);
return value;
}
static void zr36016_writei(struct zr36016 *ptr, u16 reg, u8 value)
{
- dprintk(4, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name,
- value, reg);
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
+
+ zrdev_dbg(zr, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name,
+ value, reg);
/* just in case something is wrong... */
if (ptr->codec->master_data->writereg) {
ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR
ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA
} else {
- pr_err("%s: invalid I/O setup, nothing written (i)!\n", ptr->name);
+ zrdev_err(zr, "%s: invalid I/O setup, nothing written (i)!\n", ptr->name);
}
}
@@ -120,32 +116,34 @@ static u8 zr36016_read_version(struct zr36016 *ptr)
static int zr36016_basic_test(struct zr36016 *ptr)
{
- if (zr36016_debug) {
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
+
+ if (*KERN_INFO <= CONSOLE_LOGLEVEL_DEFAULT) {
int i;
zr36016_writei(ptr, ZR016I_PAX_LO, 0x55);
- dprintk(1, KERN_INFO "%s: registers: ", ptr->name);
+ zrdev_dbg(zr, "%s: registers: ", ptr->name);
for (i = 0; i <= 0x0b; i++)
- dprintk(1, "%02x ", zr36016_readi(ptr, i));
- dprintk(1, "\n");
+ zrdev_dbg(zr, "%02x ", zr36016_readi(ptr, i));
+ zrdev_dbg(zr, "\n");
}
// for testing just write 0, then the default value to a register and read
// it back in both cases
zr36016_writei(ptr, ZR016I_PAX_LO, 0x00);
if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0) {
- pr_err("%s: attach failed, can't connect to vfe processor!\n", ptr->name);
+ zrdev_err(zr, "%s: attach failed, can't connect to vfe processor!\n", ptr->name);
return -ENXIO;
}
zr36016_writei(ptr, ZR016I_PAX_LO, 0x0d0);
if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0d0) {
- pr_err("%s: attach failed, can't connect to vfe processor!\n", ptr->name);
+ zrdev_err(zr, "%s: attach failed, can't connect to vfe processor!\n", ptr->name);
return -ENXIO;
}
// we allow version numbers from 0-3, should be enough, though
zr36016_read_version(ptr);
if (ptr->version & 0x0c) {
- pr_err("%s: attach failed, suspicious version %d found...\n", ptr->name,
- ptr->version);
+ zrdev_err(zr, "%s: attach failed, suspicious version %d found...\n", ptr->name,
+ ptr->version);
return -ENXIO;
}
@@ -164,10 +162,11 @@ static int zr36016_pushit(struct zr36016 *ptr,
u16 len,
const char *data)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
int i = 0;
- dprintk(4, "%s: write data block to 0x%04x (len=%d)\n",
- ptr->name, startreg, len);
+ zrdev_dbg(zr, "%s: write data block to 0x%04x (len=%d)\n",
+ ptr->name, startreg, len);
while (i < len) {
zr36016_writei(ptr, startreg++, data[i++]);
}
@@ -225,8 +224,9 @@ static void zr36016_init(struct zr36016 *ptr)
static int zr36016_set_mode(struct videocodec *codec, int mode)
{
struct zr36016 *ptr = (struct zr36016 *)codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
- dprintk(2, "%s: set_mode %d call\n", ptr->name, mode);
+ zrdev_dbg(zr, "%s: set_mode %d call\n", ptr->name, mode);
if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION))
return -EINVAL;
@@ -242,11 +242,12 @@ static int zr36016_set_video(struct videocodec *codec, const struct tvnorm *norm
struct vfe_settings *cap, struct vfe_polarity *pol)
{
struct zr36016 *ptr = (struct zr36016 *)codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
- dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n",
- ptr->name, norm->h_start, norm->v_start,
- cap->x, cap->y, cap->width, cap->height,
- cap->decimation);
+ zrdev_dbg(zr, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n",
+ ptr->name, norm->h_start, norm->v_start,
+ cap->x, cap->y, cap->width, cap->height,
+ cap->decimation);
/* if () return -EINVAL;
* trust the master driver that it knows what it does - so
@@ -276,9 +277,11 @@ static int zr36016_set_video(struct videocodec *codec, const struct tvnorm *norm
static int zr36016_control(struct videocodec *codec, int type, int size, void *data)
{
struct zr36016 *ptr = (struct zr36016 *)codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
int *ival = (int *)data;
- dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size);
+ zrdev_dbg(zr, "%s: control %d call with %d byte\n",
+ ptr->name, type, size);
switch (type) {
case CODEC_G_STATUS: /* get last status - we don't know it ... */
@@ -325,11 +328,12 @@ static int zr36016_control(struct videocodec *codec, int type, int size, void *d
static int zr36016_unset(struct videocodec *codec)
{
struct zr36016 *ptr = codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
if (ptr) {
/* do wee need some codec deinit here, too ???? */
- dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num);
+ zrdev_dbg(zr, "%s: finished codec #%d\n", ptr->name, ptr->num);
kfree(ptr);
codec->data = NULL;
@@ -352,12 +356,13 @@ static int zr36016_unset(struct videocodec *codec)
static int zr36016_setup(struct videocodec *codec)
{
struct zr36016 *ptr;
+ struct zoran *zr = videocodec_to_zoran(codec);
int res;
- dprintk(2, "zr36016: initializing VFE subsystem #%d.\n", zr36016_codecs);
+ zrdev_dbg(zr, "zr36016: initializing VFE subsystem #%d.\n", zr36016_codecs);
if (zr36016_codecs == MAX_CODECS) {
- pr_err("zr36016: Can't attach more codecs!\n");
+ zrdev_err(zr, "zr36016: Can't attach more codecs!\n");
return -ENOSPC;
}
//mem structure init
@@ -384,7 +389,8 @@ static int zr36016_setup(struct videocodec *codec)
ptr->ydec = 0;
zr36016_init(ptr);
- dprintk(1, KERN_INFO "%s: codec v%d attached and running\n", ptr->name, ptr->version);
+ zrdev_dbg(zr, "%s: codec v%d attached and running\n",
+ ptr->name, ptr->version);
return 0;
}
@@ -417,9 +423,8 @@ int zr36016_init_module(void)
void zr36016_cleanup_module(void)
{
if (zr36016_codecs) {
- dprintk(1,
- "zr36016: something's wrong - %d codecs left somehow.\n",
- zr36016_codecs);
+ pr_debug("zr36016: something's wrong - %d codecs left somehow.\n",
+ zr36016_codecs);
}
videocodec_unregister(&zr36016_codec);
}
diff --git a/drivers/staging/media/zoran/zr36050.c b/drivers/staging/media/zoran/zr36050.c
index 38f7021e7b06..6a7ef28d996c 100644
--- a/drivers/staging/media/zoran/zr36050.c
+++ b/drivers/staging/media/zoran/zr36050.c
@@ -29,17 +29,6 @@
/* amount of chips attached via this driver */
static int zr36050_codecs;
-/* debugging is available via module parameter */
-static int zr36050_debug;
-module_param(zr36050_debug, int, 0);
-MODULE_PARM_DESC(zr36050_debug, "Debug level (0-4)");
-
-#define dprintk(num, format, args...) \
- do { \
- if (zr36050_debug >= num) \
- printk(format, ##args); \
- } while (0)
-
/* =========================================================================
Local hardware I/O functions:
@@ -49,32 +38,32 @@ MODULE_PARM_DESC(zr36050_debug, "Debug level (0-4)");
/* read and write functions */
static u8 zr36050_read(struct zr36050 *ptr, u16 reg)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
u8 value = 0;
/* just in case something is wrong... */
if (ptr->codec->master_data->readreg)
value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xFF;
else
- dprintk(1,
- KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name);
+ zrdev_err(zr, "%s: invalid I/O setup, nothing read!\n", ptr->name);
- dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value);
+ zrdev_dbg(zr, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value);
return value;
}
static void zr36050_write(struct zr36050 *ptr, u16 reg, u8 value)
{
- dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg);
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
+
+ zrdev_dbg(zr, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg);
/* just in case something is wrong... */
if (ptr->codec->master_data->writereg)
ptr->codec->master_data->writereg(ptr->codec, reg, value);
else
- dprintk(1,
- KERN_ERR
- "%s: invalid I/O setup, nothing written!\n",
- ptr->name);
+ zrdev_err(zr, "%s: invalid I/O setup, nothing written!\n",
+ ptr->name);
}
/* =========================================================================
@@ -117,14 +106,15 @@ static u16 zr36050_read_scalefactor(struct zr36050 *ptr)
static void zr36050_wait_end(struct zr36050 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
int i = 0;
while (!(zr36050_read_status1(ptr) & 0x4)) {
udelay(1);
if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
- dprintk(1,
- "%s: timeout at wait_end (last status: 0x%02x)\n",
- ptr->name, ptr->status1);
+ zrdev_err(zr,
+ "%s: timeout at wait_end (last status: 0x%02x)\n",
+ ptr->name, ptr->status1);
break;
}
}
@@ -138,33 +128,32 @@ static void zr36050_wait_end(struct zr36050 *ptr)
static int zr36050_basic_test(struct zr36050 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
+
zr36050_write(ptr, ZR050_SOF_IDX, 0x00);
zr36050_write(ptr, ZR050_SOF_IDX + 1, 0x00);
if ((zr36050_read(ptr, ZR050_SOF_IDX) |
zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0x0000) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, can't connect to jpeg processor!\n",
- ptr->name);
+ zrdev_err(zr,
+ "%s: attach failed, can't connect to jpeg processor!\n",
+ ptr->name);
return -ENXIO;
}
zr36050_write(ptr, ZR050_SOF_IDX, 0xff);
zr36050_write(ptr, ZR050_SOF_IDX + 1, 0xc0);
if (((zr36050_read(ptr, ZR050_SOF_IDX) << 8) |
zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0xffc0) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, can't connect to jpeg processor!\n",
- ptr->name);
+ zrdev_err(zr,
+ "%s: attach failed, can't connect to jpeg processor!\n",
+ ptr->name);
return -ENXIO;
}
zr36050_wait_end(ptr);
if ((ptr->status1 & 0x4) == 0) {
- dprintk(1,
- KERN_ERR
- "%s: attach failed, jpeg processor failed (end flag)!\n",
- ptr->name);
+ zrdev_err(zr,
+ "%s: attach failed, jpeg processor failed (end flag)!\n",
+ ptr->name);
return -EBUSY;
}
@@ -179,10 +168,11 @@ static int zr36050_basic_test(struct zr36050 *ptr)
static int zr36050_pushit(struct zr36050 *ptr, u16 startreg, u16 len, const char *data)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
int i = 0;
- dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name,
- startreg, len);
+ zrdev_dbg(zr, "%s: write data block to 0x%04x (len=%d)\n", ptr->name,
+ startreg, len);
while (i < len)
zr36050_write(ptr, startreg++, data[i++]);
@@ -305,11 +295,12 @@ static const char zr36050_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 };
static int zr36050_set_sof(struct zr36050 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
char sof_data[34]; // max. size of register set
int i;
- dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name,
- ptr->width, ptr->height, NO_OF_COMPONENTS);
+ zrdev_dbg(zr, "%s: write SOF (%dx%d, %d components)\n", ptr->name,
+ ptr->width, ptr->height, NO_OF_COMPONENTS);
sof_data[0] = 0xff;
sof_data[1] = 0xc0;
sof_data[2] = 0x00;
@@ -336,10 +327,11 @@ static int zr36050_set_sof(struct zr36050 *ptr)
static int zr36050_set_sos(struct zr36050 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
char sos_data[16]; // max. size of register set
int i;
- dprintk(3, "%s: write SOS\n", ptr->name);
+ zrdev_dbg(zr, "%s: write SOS\n", ptr->name);
sos_data[0] = 0xff;
sos_data[1] = 0xda;
sos_data[2] = 0x00;
@@ -363,9 +355,10 @@ static int zr36050_set_sos(struct zr36050 *ptr)
static int zr36050_set_dri(struct zr36050 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
char dri_data[6]; // max. size of register set
- dprintk(3, "%s: write DRI\n", ptr->name);
+ zrdev_dbg(zr, "%s: write DRI\n", ptr->name);
dri_data[0] = 0xff;
dri_data[1] = 0xdd;
dri_data[2] = 0x00;
@@ -387,9 +380,10 @@ static void zr36050_init(struct zr36050 *ptr)
{
int sum = 0;
long bitcnt, tmp;
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
if (ptr->mode == CODEC_DO_COMPRESSION) {
- dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name);
+ zrdev_dbg(zr, "%s: COMPRESSION SETUP\n", ptr->name);
/* 050 communicates with 057 in master mode */
zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR);
@@ -419,7 +413,7 @@ static void zr36050_init(struct zr36050 *ptr)
/* setup the fixed jpeg tables - maybe variable, though -
* (see table init section above) */
- dprintk(3, "%s: write DQT, DHT, APP\n", ptr->name);
+ zrdev_dbg(zr, "%s: write DQT, DHT, APP\n", ptr->name);
sum += zr36050_pushit(ptr, ZR050_DQT_IDX,
sizeof(zr36050_dqt), zr36050_dqt);
sum += zr36050_pushit(ptr, ZR050_DHT_IDX,
@@ -442,11 +436,11 @@ static void zr36050_init(struct zr36050 *ptr)
zr36050_write(ptr, ZR050_GO, 1); // launch codec
zr36050_wait_end(ptr);
- dprintk(2, "%s: Status after table preload: 0x%02x\n",
- ptr->name, ptr->status1);
+ zrdev_dbg(zr, "%s: Status after table preload: 0x%02x\n",
+ ptr->name, ptr->status1);
if ((ptr->status1 & 0x4) == 0) {
- pr_err("%s: init aborted!\n", ptr->name);
+ zrdev_err(zr, "%s: init aborted!\n", ptr->name);
return; // something is wrong, its timed out!!!!
}
@@ -457,9 +451,9 @@ static void zr36050_init(struct zr36050 *ptr)
bitcnt = sum << 3; /* need the size in bits */
tmp = bitcnt >> 16;
- dprintk(3,
- "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n",
- ptr->name, sum, ptr->real_code_vol, bitcnt, tmp);
+ zrdev_dbg(zr,
+ "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n",
+ ptr->name, sum, ptr->real_code_vol, bitcnt, tmp);
zr36050_write(ptr, ZR050_TCV_NET_HI, tmp >> 8);
zr36050_write(ptr, ZR050_TCV_NET_MH, tmp & 0xff);
tmp = bitcnt & 0xffff;
@@ -470,8 +464,8 @@ static void zr36050_init(struct zr36050 *ptr)
bitcnt -= ((bitcnt * 5) >> 6); // bits without eob
tmp = bitcnt >> 16;
- dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n",
- ptr->name, bitcnt, tmp);
+ zrdev_dbg(zr, "%s: code: nettobit=%ld, highnettobits=%ld\n",
+ ptr->name, bitcnt, tmp);
zr36050_write(ptr, ZR050_TCV_DATA_HI, tmp >> 8);
zr36050_write(ptr, ZR050_TCV_DATA_MH, tmp & 0xff);
tmp = bitcnt & 0xffff;
@@ -489,7 +483,7 @@ static void zr36050_init(struct zr36050 *ptr)
((ptr->app.len > 0) ? ZR050_ME_APP : 0) |
((ptr->com.len > 0) ? ZR050_ME_COM : 0));
} else {
- dprintk(2, "%s: EXPANSION SETUP\n", ptr->name);
+ zrdev_dbg(zr, "%s: EXPANSION SETUP\n", ptr->name);
/* 050 communicates with 055 in master mode */
zr36050_write(ptr, ZR050_HARDWARE,
@@ -502,7 +496,7 @@ static void zr36050_init(struct zr36050 *ptr)
zr36050_write(ptr, ZR050_INT_REQ_0, 0);
zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1
- dprintk(3, "%s: write DHT\n", ptr->name);
+ zrdev_dbg(zr, "%s: write DHT\n", ptr->name);
zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht),
zr36050_dht);
@@ -511,11 +505,11 @@ static void zr36050_init(struct zr36050 *ptr)
zr36050_write(ptr, ZR050_GO, 1); // launch codec
zr36050_wait_end(ptr);
- dprintk(2, "%s: Status after table preload: 0x%02x\n",
- ptr->name, ptr->status1);
+ zrdev_dbg(zr, "%s: Status after table preload: 0x%02x\n",
+ ptr->name, ptr->status1);
if ((ptr->status1 & 0x4) == 0) {
- pr_err("%s: init aborted!\n", ptr->name);
+ zrdev_err(zr, "%s: init aborted!\n", ptr->name);
return; // something is wrong, its timed out!!!!
}
@@ -539,8 +533,9 @@ static void zr36050_init(struct zr36050 *ptr)
static int zr36050_set_mode(struct videocodec *codec, int mode)
{
struct zr36050 *ptr = (struct zr36050 *)codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
- dprintk(2, "%s: set_mode %d call\n", ptr->name, mode);
+ zrdev_dbg(zr, "%s: set_mode %d call\n", ptr->name, mode);
if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION))
return -EINVAL;
@@ -556,12 +551,13 @@ static int zr36050_set_video(struct videocodec *codec, const struct tvnorm *norm
struct vfe_settings *cap, struct vfe_polarity *pol)
{
struct zr36050 *ptr = (struct zr36050 *)codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
int size;
- dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n",
- ptr->name, norm->h_start, norm->v_start,
- cap->x, cap->y, cap->width, cap->height,
- cap->decimation, cap->quality);
+ zrdev_dbg(zr, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n",
+ ptr->name, norm->h_start, norm->v_start,
+ cap->x, cap->y, cap->width, cap->height,
+ cap->decimation, cap->quality);
/* if () return -EINVAL;
* trust the master driver that it knows what it does - so
* we allow invalid startx/y and norm for now ... */
@@ -594,10 +590,11 @@ static int zr36050_set_video(struct videocodec *codec, const struct tvnorm *norm
static int zr36050_control(struct videocodec *codec, int type, int size, void *data)
{
struct zr36050 *ptr = (struct zr36050 *)codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
int *ival = (int *)data;
- dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type,
- size);
+ zrdev_dbg(zr, "%s: control %d call with %d byte\n", ptr->name, type,
+ size);
switch (type) {
case CODEC_G_STATUS: /* get last status */
@@ -713,12 +710,13 @@ static int zr36050_control(struct videocodec *codec, int type, int size, void *d
static int zr36050_unset(struct videocodec *codec)
{
struct zr36050 *ptr = codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
if (ptr) {
/* do wee need some codec deinit here, too ???? */
- dprintk(1, "%s: finished codec #%d\n", ptr->name,
- ptr->num);
+ zrdev_dbg(zr, "%s: finished codec #%d\n", ptr->name,
+ ptr->num);
kfree(ptr);
codec->data = NULL;
@@ -741,14 +739,15 @@ static int zr36050_unset(struct videocodec *codec)
static int zr36050_setup(struct videocodec *codec)
{
struct zr36050 *ptr;
+ struct zoran *zr = videocodec_to_zoran(codec);
int res;
- dprintk(2, "zr36050: initializing MJPEG subsystem #%d.\n",
- zr36050_codecs);
+ zrdev_dbg(zr, "zr36050: initializing MJPEG subsystem #%d.\n",
+ zr36050_codecs);
if (zr36050_codecs == MAX_CODECS) {
- dprintk(1,
- KERN_ERR "zr36050: Can't attach more codecs!\n");
+ zrdev_err(zr,
+ "zr36050: Can't attach more codecs!\n");
return -ENOSPC;
}
//mem structure init
@@ -789,8 +788,8 @@ static int zr36050_setup(struct videocodec *codec)
zr36050_init(ptr);
- dprintk(1, KERN_INFO "%s: codec attached and running\n",
- ptr->name);
+ zrdev_info(zr, "%s: codec attached and running\n",
+ ptr->name);
return 0;
}
@@ -823,9 +822,8 @@ int zr36050_init_module(void)
void zr36050_cleanup_module(void)
{
if (zr36050_codecs) {
- dprintk(1,
- "zr36050: something's wrong - %d codecs left somehow.\n",
- zr36050_codecs);
+ pr_debug("zr36050: something's wrong - %d codecs left somehow.\n",
+ zr36050_codecs);
}
videocodec_unregister(&zr36050_codec);
}
diff --git a/drivers/staging/media/zoran/zr36060.c b/drivers/staging/media/zoran/zr36060.c
index d0c369e31c81..7798016f1f96 100644
--- a/drivers/staging/media/zoran/zr36060.c
+++ b/drivers/staging/media/zoran/zr36060.c
@@ -32,16 +32,6 @@ static bool low_bitrate;
module_param(low_bitrate, bool, 0);
MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate");
-static int zr36060_debug;
-module_param(zr36060_debug, int, 0);
-MODULE_PARM_DESC(zr36060_debug, "Debug level (0-4)");
-
-#define dprintk(num, format, args...) \
- do { \
- if (zr36060_debug >= num) \
- printk(format, ##args); \
- } while (0)
-
/* =========================================================================
* Local hardware I/O functions:
* read/write via codec layer (registers are located in the master device)
@@ -51,25 +41,28 @@ MODULE_PARM_DESC(zr36060_debug, "Debug level (0-4)");
static u8 zr36060_read(struct zr36060 *ptr, u16 reg)
{
u8 value = 0;
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
// just in case something is wrong...
if (ptr->codec->master_data->readreg)
value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xff;
else
- pr_err("%s: invalid I/O setup, nothing read!\n", ptr->name);
+ zrdev_err(zr, "%s: invalid I/O setup, nothing read!\n", ptr->name);
return value;
}
static void zr36060_write(struct zr36060 *ptr, u16 reg, u8 value)
{
- dprintk(4, "0x%02x @0x%04x\n", value, reg);
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
+
+ zrdev_dbg(zr, "0x%02x @0x%04x\n", value, reg);
// just in case something is wrong...
if (ptr->codec->master_data->writereg)
ptr->codec->master_data->writereg(ptr->codec, reg, value);
else
- pr_err("%s: invalid I/O setup, nothing written!\n", ptr->name);
+ zrdev_err(zr, "%s: invalid I/O setup, nothing written!\n", ptr->name);
}
/* =========================================================================
@@ -101,14 +94,15 @@ static u16 zr36060_read_scalefactor(struct zr36060 *ptr)
/* wait if codec is ready to proceed (end of processing) or time is over */
static void zr36060_wait_end(struct zr36060 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
int i = 0;
while (zr36060_read_status(ptr) & ZR060_CFSR_BUSY) {
udelay(1);
if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
- dprintk(1,
- "%s: timeout at wait_end (last status: 0x%02x)\n",
- ptr->name, ptr->status);
+ zrdev_dbg(zr,
+ "%s: timeout at wait_end (last status: 0x%02x)\n",
+ ptr->name, ptr->status);
break;
}
}
@@ -117,15 +111,17 @@ static void zr36060_wait_end(struct zr36060 *ptr)
/* Basic test of "connectivity", writes/reads to/from memory the SOF marker */
static int zr36060_basic_test(struct zr36060 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
+
if ((zr36060_read(ptr, ZR060_IDR_DEV) != 0x33) &&
(zr36060_read(ptr, ZR060_IDR_REV) != 0x01)) {
- pr_err("%s: attach failed, can't connect to jpeg processor!\n", ptr->name);
+ zrdev_err(zr, "%s: attach failed, can't connect to jpeg processor!\n", ptr->name);
return -ENXIO;
}
zr36060_wait_end(ptr);
if (ptr->status & ZR060_CFSR_BUSY) {
- pr_err("%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name);
+ zrdev_err(zr, "%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name);
return -EBUSY;
}
@@ -135,10 +131,11 @@ static int zr36060_basic_test(struct zr36060 *ptr)
/* simple loop for pushing the init datasets */
static int zr36060_pushit(struct zr36060 *ptr, u16 startreg, u16 len, const char *data)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
int i = 0;
- dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name,
- startreg, len);
+ zrdev_dbg(zr, "%s: write data block to 0x%04x (len=%d)\n", ptr->name,
+ startreg, len);
while (i < len)
zr36060_write(ptr, startreg++, data[i++]);
@@ -249,11 +246,12 @@ static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 };
/* SOF (start of frame) segment depends on width, height and sampling ratio of each color component */
static int zr36060_set_sof(struct zr36060 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
char sof_data[34]; // max. size of register set
int i;
- dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name,
- ptr->width, ptr->height, NO_OF_COMPONENTS);
+ zrdev_dbg(zr, "%s: write SOF (%dx%d, %d components)\n", ptr->name,
+ ptr->width, ptr->height, NO_OF_COMPONENTS);
sof_data[0] = 0xff;
sof_data[1] = 0xc0;
sof_data[2] = 0x00;
@@ -277,10 +275,11 @@ static int zr36060_set_sof(struct zr36060 *ptr)
/* SOS (start of scan) segment depends on the used scan components of each color component */
static int zr36060_set_sos(struct zr36060 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
char sos_data[16]; // max. size of register set
int i;
- dprintk(3, "%s: write SOS\n", ptr->name);
+ zrdev_dbg(zr, "%s: write SOS\n", ptr->name);
sos_data[0] = 0xff;
sos_data[1] = 0xda;
sos_data[2] = 0x00;
@@ -302,9 +301,10 @@ static int zr36060_set_sos(struct zr36060 *ptr)
/* DRI (define restart interval) */
static int zr36060_set_dri(struct zr36060 *ptr)
{
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
char dri_data[6]; // max. size of register set
- dprintk(3, "%s: write DRI\n", ptr->name);
+ zrdev_dbg(zr, "%s: write DRI\n", ptr->name);
dri_data[0] = 0xff;
dri_data[1] = 0xdd;
dri_data[2] = 0x00;
@@ -321,9 +321,10 @@ static void zr36060_init(struct zr36060 *ptr)
{
int sum = 0;
long bitcnt, tmp;
+ struct zoran *zr = videocodec_to_zoran(ptr->codec);
if (ptr->mode == CODEC_DO_COMPRESSION) {
- dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name);
+ zrdev_dbg(zr, "%s: COMPRESSION SETUP\n", ptr->name);
zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST);
@@ -376,9 +377,9 @@ static void zr36060_init(struct zr36060 *ptr)
bitcnt = sum << 3; /* need the size in bits */
tmp = bitcnt >> 16;
- dprintk(3,
- "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n",
- ptr->name, sum, ptr->real_code_vol, bitcnt, tmp);
+ zrdev_dbg(zr,
+ "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n",
+ ptr->name, sum, ptr->real_code_vol, bitcnt, tmp);
zr36060_write(ptr, ZR060_TCV_NET_HI, tmp >> 8);
zr36060_write(ptr, ZR060_TCV_NET_MH, tmp & 0xff);
tmp = bitcnt & 0xffff;
@@ -389,8 +390,8 @@ static void zr36060_init(struct zr36060 *ptr)
bitcnt -= ((bitcnt * 5) >> 6); // bits without eob
tmp = bitcnt >> 16;
- dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n",
- ptr->name, bitcnt, tmp);
+ zrdev_dbg(zr, "%s: code: nettobit=%ld, highnettobits=%ld\n",
+ ptr->name, bitcnt, tmp);
zr36060_write(ptr, ZR060_TCV_DATA_HI, tmp >> 8);
zr36060_write(ptr, ZR060_TCV_DATA_MH, tmp & 0xff);
tmp = bitcnt & 0xffff;
@@ -408,7 +409,7 @@ static void zr36060_init(struct zr36060 *ptr)
zr36060_write(ptr, ZR060_VCR, ZR060_VCR_RANGE);
} else {
- dprintk(2, "%s: EXPANSION SETUP\n", ptr->name);
+ zrdev_dbg(zr, "%s: EXPANSION SETUP\n", ptr->name);
zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST);
@@ -441,10 +442,11 @@ static void zr36060_init(struct zr36060 *ptr)
/* Load the tables */
zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST | ZR060_LOAD_LOAD);
zr36060_wait_end(ptr);
- dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status);
+ zrdev_dbg(zr, "%s: Status after table preload: 0x%02x\n",
+ ptr->name, ptr->status);
if (ptr->status & ZR060_CFSR_BUSY) {
- pr_err("%s: init aborted!\n", ptr->name);
+ zrdev_err(zr, "%s: init aborted!\n", ptr->name);
return; // something is wrong, its timed out!!!!
}
}
@@ -461,8 +463,9 @@ static void zr36060_init(struct zr36060 *ptr)
static int zr36060_set_mode(struct videocodec *codec, int mode)
{
struct zr36060 *ptr = (struct zr36060 *)codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
- dprintk(2, "%s: set_mode %d call\n", ptr->name, mode);
+ zrdev_dbg(zr, "%s: set_mode %d call\n", ptr->name, mode);
if (mode != CODEC_DO_EXPANSION && mode != CODEC_DO_COMPRESSION)
return -EINVAL;
@@ -478,11 +481,12 @@ static int zr36060_set_video(struct videocodec *codec, const struct tvnorm *norm
struct vfe_settings *cap, struct vfe_polarity *pol)
{
struct zr36060 *ptr = (struct zr36060 *)codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
u32 reg;
int size;
- dprintk(2, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name,
- cap->x, cap->y, cap->width, cap->height, cap->decimation);
+ zrdev_dbg(zr, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name,
+ cap->x, cap->y, cap->width, cap->height, cap->decimation);
/* if () return -EINVAL;
* trust the master driver that it knows what it does - so
@@ -637,10 +641,11 @@ static int zr36060_set_video(struct videocodec *codec, const struct tvnorm *norm
static int zr36060_control(struct videocodec *codec, int type, int size, void *data)
{
struct zr36060 *ptr = (struct zr36060 *)codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
int *ival = (int *)data;
- dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type,
- size);
+ zrdev_dbg(zr, "%s: control %d call with %d byte\n", ptr->name, type,
+ size);
switch (type) {
case CODEC_G_STATUS: /* get last status */
@@ -753,11 +758,12 @@ static int zr36060_control(struct videocodec *codec, int type, int size, void *d
static int zr36060_unset(struct videocodec *codec)
{
struct zr36060 *ptr = codec->data;
+ struct zoran *zr = videocodec_to_zoran(codec);
if (ptr) {
/* do wee need some codec deinit here, too ???? */
- dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num);
+ zrdev_dbg(zr, "%s: finished codec #%d\n", ptr->name, ptr->num);
kfree(ptr);
codec->data = NULL;
@@ -778,12 +784,14 @@ static int zr36060_unset(struct videocodec *codec)
static int zr36060_setup(struct videocodec *codec)
{
struct zr36060 *ptr;
+ struct zoran *zr = videocodec_to_zoran(codec);
int res;
- dprintk(2, "zr36060: initializing MJPEG subsystem #%d.\n", zr36060_codecs);
+ zrdev_dbg(zr, "zr36060: initializing MJPEG subsystem #%d.\n",
+ zr36060_codecs);
if (zr36060_codecs == MAX_CODECS) {
- pr_err("zr36060: Can't attach more codecs!\n");
+ zrdev_err(zr, "zr36060: Can't attach more codecs!\n");
return -ENOSPC;
}
//mem structure init
@@ -823,7 +831,7 @@ static int zr36060_setup(struct videocodec *codec)
zr36060_init(ptr);
- dprintk(1, KERN_INFO "%s: codec attached and running\n", ptr->name);
+ zrdev_info(zr, "%s: codec attached and running\n", ptr->name);
return 0;
}
@@ -852,9 +860,8 @@ int zr36060_init_module(void)
void zr36060_cleanup_module(void)
{
if (zr36060_codecs) {
- dprintk(1,
- "zr36060: something's wrong - %d codecs left somehow.\n",
- zr36060_codecs);
+ pr_debug("zr36060: something's wrong - %d codecs left somehow.\n",
+ zr36060_codecs);
}
/* however, we can't just stay alive */
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
deleted file mode 100644
index 6a5d842ee0f2..000000000000
--- a/drivers/staging/octeon-usb/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config OCTEON_USB
- tristate "Cavium Networks Octeon USB support"
- depends on CAVIUM_OCTEON_SOC && USB
- help
- This driver supports USB host controller on some Cavium
- Networks' products in the Octeon family.
-
- To compile this driver as a module, choose M here. The module
- will be called octeon-hcd.
-
diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile
deleted file mode 100644
index 9873a0130ad5..000000000000
--- a/drivers/staging/octeon-usb/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-${CONFIG_OCTEON_USB} := octeon-hcd.o
diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO
deleted file mode 100644
index 2b29acca5caa..000000000000
--- a/drivers/staging/octeon-usb/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-This driver is functional and has been tested on EdgeRouter Lite,
-D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage.
-
-TODO:
- - kernel coding style
- - checkpatch warnings
-
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 9ebd665e5d42..965330eec80a 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -469,8 +469,8 @@ void cvm_oct_rx_initialize(void)
if (!(pow_receive_groups & BIT(i)))
continue;
- netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
- cvm_oct_napi_poll, rx_napi_weight);
+ netif_napi_add_weight(dev_for_napi, &oct_rx_group[i].napi,
+ cvm_oct_napi_poll, rx_napi_weight);
napi_enable(&oct_rx_group[i].napi);
oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 7284cb4ac395..9363c5cfe50f 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -383,7 +383,7 @@ static void dcon_set_source(struct dcon_priv *dcon, int arg)
static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
{
dcon_set_source(dcon, arg);
- flush_scheduled_work();
+ flush_work(&dcon->switch_source);
}
static ssize_t dcon_mode_show(struct device *dev,
@@ -517,10 +517,7 @@ static struct device_attribute dcon_device_files[] = {
static int dcon_bl_update(struct backlight_device *dev)
{
struct dcon_priv *dcon = bl_get_data(dev);
- u8 level = dev->props.brightness & 0x0F;
-
- if (dev->props.power != FB_BLANK_UNBLANK)
- level = 0;
+ u8 level = backlight_get_brightness(dev) & 0x0F;
if (level != dcon->bl_val)
dcon_set_backlight(dcon, level);
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 941aaa7eab2e..df02335fdbab 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1406,7 +1406,7 @@ static int __init pi433_init(void)
/*
* Claim device numbers. Then register a class
- * that will key udev/mdev to add/remove /dev nodes. Last, register
+ * that will key udev/mdev to add/remove /dev nodes.
* Last, register the driver which manages those device numbers.
*/
status = alloc_chrdev_region(&pi433_dev, 0, N_PI433_MINORS, "pi433");
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 113a3efd12e9..ca6b966f5dd3 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -1976,7 +1976,7 @@ static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
vlan_id);
} else {
/* Non-TCP/UDP large frames that span multiple buffers
- * can be processed corrrectly by the split frame logic.
+ * can be processed correctly by the split frame logic.
*/
qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
vlan_id);
@@ -2461,7 +2461,7 @@ static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_io
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ cpu_to_le16(skb_tcp_all_headers(skb));
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb)
@@ -2955,7 +2955,7 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
- u64 tmp;
+ u64 dma;
__le64 *base_indirect_ptr;
int page_entries;
@@ -3004,15 +3004,15 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
FLAGS_LI; /* Load irq delay values */
if (rx_ring->cq_id < qdev->rss_ring_count) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
- tmp = (u64)rx_ring->lbq.base_dma;
+ dma = (u64)rx_ring->lbq.base_dma;
base_indirect_ptr = rx_ring->lbq.base_indirect;
- page_entries = 0;
- do {
- *base_indirect_ptr = cpu_to_le64(tmp);
- tmp += DB_PAGE_SIZE;
- base_indirect_ptr++;
- page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+
+ for (page_entries = 0;
+ page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
+ page_entries++) {
+ base_indirect_ptr[page_entries] = cpu_to_le64(dma);
+ dma += DB_PAGE_SIZE;
+ }
cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
cqicb->lbq_buf_size =
cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
@@ -3021,15 +3021,15 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
rx_ring->lbq.next_to_clean = 0;
cqicb->flags |= FLAGS_LS; /* Load sbq values */
- tmp = (u64)rx_ring->sbq.base_dma;
+ dma = (u64)rx_ring->sbq.base_dma;
base_indirect_ptr = rx_ring->sbq.base_indirect;
- page_entries = 0;
- do {
- *base_indirect_ptr = cpu_to_le64(tmp);
- tmp += DB_PAGE_SIZE;
- base_indirect_ptr++;
- page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+
+ for (page_entries = 0;
+ page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
+ page_entries++) {
+ base_indirect_ptr[page_entries] = cpu_to_le64(dma);
+ dma += DB_PAGE_SIZE;
+ }
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq.base_indirect_dma);
cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
@@ -3041,8 +3041,8 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
- netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix,
- 64);
+ netif_napi_add_weight(qdev->ndev, &rx_ring->napi,
+ qlge_napi_poll_msix, 64);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
} else {
diff --git a/drivers/staging/r8188eu/Makefile b/drivers/staging/r8188eu/Makefile
index 1d7982b618ba..eea16eb7caa0 100644
--- a/drivers/staging/r8188eu/Makefile
+++ b/drivers/staging/r8188eu/Makefile
@@ -5,7 +5,6 @@ r8188eu-y = \
hal/HalHWImg8188E_RF.o \
hal/HalPhyRf_8188e.o \
hal/HalPwrSeqCmd.o \
- hal/Hal8188EPwrSeq.o \
hal/Hal8188ERateAdaptive.o \
hal/hal_intf.o \
hal/hal_com.o \
diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c
index ac6effbecf6d..5bd9dfa57cc5 100644
--- a/drivers/staging/r8188eu/core/rtw_ap.c
+++ b/drivers/staging/r8188eu/core/rtw_ap.c
@@ -654,18 +654,17 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
set_tx_beacon_cmd(padapter);
}
-/*
-op_mode
-Set to 0 (HT pure) under the following conditions
- - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
- - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
-Set to 1 (HT non-member protection) if there may be non-HT STAs
- in both the primary and the secondary channel
-Set to 2 if only HT STAs are associated in BSS,
- however and at least one 20 MHz HT STA is associated
-Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
- (currently non-GF HT station is considered as non-HT STA also)
-*/
+/* op_mode
+ * Set to 0 (HT pure) under the following conditions
+ * - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
+ * - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
+ * Set to 1 (HT non-member protection) if there may be non-HT STAs
+ * in both the primary and the secondary channel
+ * Set to 2 if only HT STAs are associated in BSS,
+ * however and at least one 20 MHz HT STA is associated
+ * Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
+ * (currently non-GF HT station is considered as non-HT STA also)
+ */
static int rtw_ht_operation_update(struct adapter *padapter)
{
u16 cur_op_mode, new_op_mode;
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
index 06523d91939a..5b6a891b5d67 100644
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ b/drivers/staging/r8188eu/core/rtw_cmd.c
@@ -898,8 +898,12 @@ static void traffic_status_watchdog(struct adapter *padapter)
static void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
{
u32 txdma_status;
+ int res;
+
+ res = rtw_read32(padapter, REG_TXDMA_STATUS, &txdma_status);
+ if (res)
+ return;
- txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS);
if (txdma_status != 0x00)
rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status);
/* total xmit irp = 4 */
@@ -1177,7 +1181,14 @@ exit:
static bool rtw_is_hi_queue_empty(struct adapter *adapter)
{
- return (rtw_read32(adapter, REG_HGQ_INFORMATION) & 0x0000ff00) == 0;
+ int res;
+ u32 reg;
+
+ res = rtw_read32(adapter, REG_HGQ_INFORMATION, &reg);
+ if (res)
+ return false;
+
+ return (reg & 0x0000ff00) == 0;
}
static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
diff --git a/drivers/staging/r8188eu/core/rtw_efuse.c b/drivers/staging/r8188eu/core/rtw_efuse.c
index 0e0e60638880..df9534dd25cb 100644
--- a/drivers/staging/r8188eu/core/rtw_efuse.c
+++ b/drivers/staging/r8188eu/core/rtw_efuse.c
@@ -28,22 +28,35 @@ ReadEFuseByte(
u32 value32;
u8 readbyte;
u16 retry;
+ int res;
/* Write Address */
rtw_write8(Adapter, EFUSE_CTRL + 1, (_offset & 0xff));
- readbyte = rtw_read8(Adapter, EFUSE_CTRL + 2);
+ res = rtw_read8(Adapter, EFUSE_CTRL + 2, &readbyte);
+ if (res)
+ return;
+
rtw_write8(Adapter, EFUSE_CTRL + 2, ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
/* Write bit 32 0 */
- readbyte = rtw_read8(Adapter, EFUSE_CTRL + 3);
+ res = rtw_read8(Adapter, EFUSE_CTRL + 3, &readbyte);
+ if (res)
+ return;
+
rtw_write8(Adapter, EFUSE_CTRL + 3, (readbyte & 0x7f));
/* Check bit 32 read-ready */
- retry = 0;
- value32 = rtw_read32(Adapter, EFUSE_CTRL);
- while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
- value32 = rtw_read32(Adapter, EFUSE_CTRL);
- retry++;
+ res = rtw_read32(Adapter, EFUSE_CTRL, &value32);
+ if (res)
+ return;
+
+ for (retry = 0; retry < 10000; retry++) {
+ res = rtw_read32(Adapter, EFUSE_CTRL, &value32);
+ if (res)
+ continue;
+
+ if (((value32 >> 24) & 0xff) & 0x80)
+ break;
}
/* 20100205 Joseph: Add delay suggested by SD1 Victor. */
@@ -51,37 +64,11 @@ ReadEFuseByte(
/* Designer says that there shall be some delay after ready bit is set, or the */
/* result will always stay on last data we read. */
udelay(50);
- value32 = rtw_read32(Adapter, EFUSE_CTRL);
+ res = rtw_read32(Adapter, EFUSE_CTRL, &value32);
+ if (res)
+ return;
*pbuf = (u8)(value32 & 0xff);
-}
-
-/*-----------------------------------------------------------------------------
- * Function: EFUSE_ShadowMapUpdate
- *
- * Overview: Transfer current EFUSE content to shadow init and modify map.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/13/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-void EFUSE_ShadowMapUpdate(struct adapter *pAdapter)
-{
- struct eeprom_priv *pEEPROM = &pAdapter->eeprompriv;
-
- if (pEEPROM->bautoload_fail_flag) {
- memset(pEEPROM->efuse_eeprom_data, 0xFF, EFUSE_MAP_LEN_88E);
- return;
- }
- rtl8188e_EfusePowerSwitch(pAdapter, true);
- rtl8188e_ReadEFuse(pAdapter, 0, EFUSE_MAP_LEN_88E, pEEPROM->efuse_eeprom_data);
- rtl8188e_EfusePowerSwitch(pAdapter, false);
+ /* FIXME: return an error to caller */
}
diff --git a/drivers/staging/r8188eu/core/rtw_fw.c b/drivers/staging/r8188eu/core/rtw_fw.c
index 0451e5177644..95534f9c7a0f 100644
--- a/drivers/staging/r8188eu/core/rtw_fw.c
+++ b/drivers/staging/r8188eu/core/rtw_fw.c
@@ -44,18 +44,28 @@ static_assert(sizeof(struct rt_firmware_hdr) == 32);
static void fw_download_enable(struct adapter *padapter, bool enable)
{
u8 tmp;
+ int res;
if (enable) {
/* MCU firmware download enable. */
- tmp = rtw_read8(padapter, REG_MCUFWDL);
+ res = rtw_read8(padapter, REG_MCUFWDL, &tmp);
+ if (res)
+ return;
+
rtw_write8(padapter, REG_MCUFWDL, tmp | 0x01);
/* 8051 reset */
- tmp = rtw_read8(padapter, REG_MCUFWDL + 2);
+ res = rtw_read8(padapter, REG_MCUFWDL + 2, &tmp);
+ if (res)
+ return;
+
rtw_write8(padapter, REG_MCUFWDL + 2, tmp & 0xf7);
} else {
/* MCU firmware download disable. */
- tmp = rtw_read8(padapter, REG_MCUFWDL);
+ res = rtw_read8(padapter, REG_MCUFWDL, &tmp);
+ if (res)
+ return;
+
rtw_write8(padapter, REG_MCUFWDL, tmp & 0xfe);
/* Reserved for fw extension. */
@@ -125,8 +135,13 @@ static int page_write(struct adapter *padapter, u32 page, u8 *buffer, u32 size)
{
u8 value8;
u8 u8Page = (u8)(page & 0x07);
+ int res;
+
+ res = rtw_read8(padapter, REG_MCUFWDL + 2, &value8);
+ if (res)
+ return _FAIL;
- value8 = (rtw_read8(padapter, REG_MCUFWDL + 2) & 0xF8) | u8Page;
+ value8 = (value8 & 0xF8) | u8Page;
rtw_write8(padapter, REG_MCUFWDL + 2, value8);
return block_write(padapter, buffer, size);
@@ -165,8 +180,12 @@ exit:
void rtw_reset_8051(struct adapter *padapter)
{
u8 val8;
+ int res;
+
+ res = rtw_read8(padapter, REG_SYS_FUNC_EN + 1, &val8);
+ if (res)
+ return;
- val8 = rtw_read8(padapter, REG_SYS_FUNC_EN + 1);
rtw_write8(padapter, REG_SYS_FUNC_EN + 1, val8 & (~BIT(2)));
rtw_write8(padapter, REG_SYS_FUNC_EN + 1, val8 | (BIT(2)));
}
@@ -175,10 +194,14 @@ static int fw_free_to_go(struct adapter *padapter)
{
u32 counter = 0;
u32 value32;
+ int res;
/* polling CheckSum report */
do {
- value32 = rtw_read32(padapter, REG_MCUFWDL);
+ res = rtw_read32(padapter, REG_MCUFWDL, &value32);
+ if (res)
+ continue;
+
if (value32 & FWDL_CHKSUM_RPT)
break;
} while (counter++ < POLLING_READY_TIMEOUT_COUNT);
@@ -186,7 +209,10 @@ static int fw_free_to_go(struct adapter *padapter)
if (counter >= POLLING_READY_TIMEOUT_COUNT)
return _FAIL;
- value32 = rtw_read32(padapter, REG_MCUFWDL);
+ res = rtw_read32(padapter, REG_MCUFWDL, &value32);
+ if (res)
+ return _FAIL;
+
value32 |= MCUFWDL_RDY;
value32 &= ~WINTINI_RDY;
rtw_write32(padapter, REG_MCUFWDL, value32);
@@ -196,9 +222,10 @@ static int fw_free_to_go(struct adapter *padapter)
/* polling for FW ready */
counter = 0;
do {
- value32 = rtw_read32(padapter, REG_MCUFWDL);
- if (value32 & WINTINI_RDY)
+ res = rtw_read32(padapter, REG_MCUFWDL, &value32);
+ if (!res && value32 & WINTINI_RDY)
return _SUCCESS;
+
udelay(5);
} while (counter++ < POLLING_READY_TIMEOUT_COUNT);
@@ -239,7 +266,7 @@ exit:
int rtl8188e_firmware_download(struct adapter *padapter)
{
int ret = _SUCCESS;
- u8 write_fw_retry = 0;
+ u8 reg;
unsigned long fwdl_timeout;
struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
struct device *device = dvobj_to_dev(dvobj);
@@ -259,9 +286,9 @@ int rtl8188e_firmware_download(struct adapter *padapter)
fwhdr = (struct rt_firmware_hdr *)dvobj->firmware.data;
if (IS_FW_HEADER_EXIST(fwhdr)) {
- pr_info_once("R8188EU: Firmware Version %d, SubVersion %d, Signature 0x%x\n",
- le16_to_cpu(fwhdr->version), fwhdr->subversion,
- le16_to_cpu(fwhdr->signature));
+ dev_info_once(device, "Firmware Version %d, SubVersion %d, Signature 0x%x\n",
+ le16_to_cpu(fwhdr->version), fwhdr->subversion,
+ le16_to_cpu(fwhdr->signature));
fw_data = fw_data + sizeof(struct rt_firmware_hdr);
fw_size = fw_size - sizeof(struct rt_firmware_hdr);
@@ -269,23 +296,34 @@ int rtl8188e_firmware_download(struct adapter *padapter)
/* Suggested by Filen. If 8051 is running in RAM code, driver should inform Fw to reset by itself, */
/* or it will cause download Fw fail. 2010.02.01. by tynli. */
- if (rtw_read8(padapter, REG_MCUFWDL) & RAM_DL_SEL) { /* 8051 RAM code */
+ ret = rtw_read8(padapter, REG_MCUFWDL, &reg);
+ if (ret) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (reg & RAM_DL_SEL) { /* 8051 RAM code */
rtw_write8(padapter, REG_MCUFWDL, 0x00);
rtw_reset_8051(padapter);
}
fw_download_enable(padapter, true);
fwdl_timeout = jiffies + msecs_to_jiffies(500);
- while (1) {
+ do {
/* reset the FWDL chksum */
- rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL) | FWDL_CHKSUM_RPT);
+ ret = rtw_read8(padapter, REG_MCUFWDL, &reg);
+ if (ret) {
+ ret = _FAIL;
+ continue;
+ }
- ret = write_fw(padapter, fw_data, fw_size);
+ rtw_write8(padapter, REG_MCUFWDL, reg | FWDL_CHKSUM_RPT);
- if (ret == _SUCCESS ||
- (time_after(jiffies, fwdl_timeout) && write_fw_retry++ >= 3))
+ ret = write_fw(padapter, fw_data, fw_size);
+ if (ret == _SUCCESS)
break;
- }
+ } while (!time_after(jiffies, fwdl_timeout));
+
fw_download_enable(padapter, false);
if (ret != _SUCCESS)
goto exit;
diff --git a/drivers/staging/r8188eu/core/rtw_ieee80211.c b/drivers/staging/r8188eu/core/rtw_ieee80211.c
index 385a9ed8eff7..bc8543ea2e66 100644
--- a/drivers/staging/r8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/r8188eu/core/rtw_ieee80211.c
@@ -1048,6 +1048,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
unsigned char *pbuf;
int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
int ret = _FAIL;
+
pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
if (pbuf && (wpa_ielen > 0)) {
diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
index 7ba75f73e47e..17f6bcbeebf4 100644
--- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
@@ -71,7 +71,6 @@ u8 rtw_do_join(struct adapter *padapter)
pibss = padapter->registrypriv.dev_network.MacAddress;
- memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(padapter);
diff --git a/drivers/staging/r8188eu/core/rtw_iol.c b/drivers/staging/r8188eu/core/rtw_iol.c
index af8e84a41b85..31e196ccd899 100644
--- a/drivers/staging/r8188eu/core/rtw_iol.c
+++ b/drivers/staging/r8188eu/core/rtw_iol.c
@@ -67,7 +67,7 @@ bool rtw_IOL_applied(struct adapter *adapter)
return false;
}
-int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8 mask)
+int rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8 mask)
{
struct ioreg_cfg cmd = {8, IOREG_CMD_WB_REG, 0x0, 0x0, 0x0};
@@ -81,7 +81,7 @@ int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8
return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
}
-int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, u16 value, u16 mask)
+int rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, u16 value, u16 mask)
{
struct ioreg_cfg cmd = {8, IOREG_CMD_WW_REG, 0x0, 0x0, 0x0};
@@ -95,7 +95,7 @@ int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, u16 value, u
return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
}
-int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, u32 value, u32 mask)
+int rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, u32 value, u32 mask)
{
struct ioreg_cfg cmd = {8, IOREG_CMD_WD_REG, 0x0, 0x0, 0x0};
@@ -109,7 +109,7 @@ int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, u32 value, u
return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
}
-int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path, u16 addr, u32 value, u32 mask)
+int rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path, u16 addr, u32 value, u32 mask)
{
struct ioreg_cfg cmd = {8, IOREG_CMD_W_RF, 0x0, 0x0, 0x0};
diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
index 2f3000428af7..d5c6c5e29621 100644
--- a/drivers/staging/r8188eu/core/rtw_led.c
+++ b/drivers/staging/r8188eu/core/rtw_led.c
@@ -16,7 +16,7 @@
(l)->CurrLedState == LED_BLINK_WPS_STOP || \
(l)->bLedWPSBlinkInProgress)
-static void ResetLedStatus(struct LED_871x *pLed)
+static void ResetLedStatus(struct led_priv *pLed)
{
pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
@@ -32,30 +32,40 @@ static void ResetLedStatus(struct LED_871x *pLed)
pLed->bLedScanBlinkInProgress = false;
}
-static void SwLedOn(struct adapter *padapter, struct LED_871x *pLed)
+static void SwLedOn(struct adapter *padapter, struct led_priv *pLed)
{
u8 LedCfg;
+ int res;
if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
return;
- LedCfg = rtw_read8(padapter, REG_LEDCFG2);
+ res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);
+ if (res)
+ return;
+
rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0xf0) | BIT(5) | BIT(6)); /* SW control led0 on. */
pLed->bLedOn = true;
}
-static void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
+static void SwLedOff(struct adapter *padapter, struct led_priv *pLed)
{
u8 LedCfg;
+ int res;
if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
goto exit;
- LedCfg = rtw_read8(padapter, REG_LEDCFG2);/* 0x4E */
+ res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);/* 0x4E */
+ if (res)
+ goto exit;
LedCfg &= 0x90; /* Set to software control. */
rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
- LedCfg = rtw_read8(padapter, REG_MAC_PINMUX_CFG);
+ res = rtw_read8(padapter, REG_MAC_PINMUX_CFG, &LedCfg);
+ if (res)
+ goto exit;
+
LedCfg &= 0xFE;
rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
exit:
@@ -65,7 +75,7 @@ exit:
static void blink_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct LED_871x *pLed = container_of(dwork, struct LED_871x, blink_work);
+ struct led_priv *pLed = container_of(dwork, struct led_priv, blink_work);
struct adapter *padapter = pLed->padapter;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -172,35 +182,32 @@ static void blink_work(struct work_struct *work)
void rtl8188eu_InitSwLeds(struct adapter *padapter)
{
struct led_priv *pledpriv = &padapter->ledpriv;
- struct LED_871x *pLed = &pledpriv->SwLed0;
- pLed->padapter = padapter;
- ResetLedStatus(pLed);
- INIT_DELAYED_WORK(&pLed->blink_work, blink_work);
+ pledpriv->padapter = padapter;
+ ResetLedStatus(pledpriv);
+ INIT_DELAYED_WORK(&pledpriv->blink_work, blink_work);
}
void rtl8188eu_DeInitSwLeds(struct adapter *padapter)
{
struct led_priv *ledpriv = &padapter->ledpriv;
- struct LED_871x *pLed = &ledpriv->SwLed0;
- cancel_delayed_work_sync(&pLed->blink_work);
- ResetLedStatus(pLed);
- SwLedOff(padapter, pLed);
+ cancel_delayed_work_sync(&ledpriv->blink_work);
+ ResetLedStatus(ledpriv);
+ SwLedOff(padapter, ledpriv);
}
void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
{
- struct led_priv *ledpriv = &padapter->ledpriv;
+ struct led_priv *pLed = &padapter->ledpriv;
struct registry_priv *registry_par;
- struct LED_871x *pLed = &ledpriv->SwLed0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
(!padapter->hw_init_completed))
return;
- if (!ledpriv->bRegUseLed)
+ if (!pLed->bRegUseLed)
return;
registry_par = &padapter->registrypriv;
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
index 5a815642c3f6..2705c9d87b14 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme.c
@@ -676,7 +676,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(adapter);
@@ -1118,7 +1117,7 @@ void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
/* MACID|OPMODE:1 connect */
media_status_rpt = (u16)((psta->mac_id << 8) | mstatus);
- SetHwReg8188EU(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status_rpt);
+ rtl8188e_set_FwMediaStatus_cmd(adapter, media_status_rpt);
}
void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
@@ -1196,7 +1195,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
u16 media_status;
media_status = (mac_id << 8) | 0; /* MACID|OPMODE:0 means disconnect */
/* for STA, AP, ADHOC mode, report disconnect stauts to FW */
- SetHwReg8188EU(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ rtl8188e_set_FwMediaStatus_cmd(adapter, media_status);
}
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
@@ -1253,7 +1252,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network));
- memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(adapter);
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index faf23fc950c5..32d0e101d0c2 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -428,6 +428,58 @@ static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da)
return _SUCCESS;
}
+static void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe)
+{
+ u8 *pIE;
+ __le32 *pbuf;
+
+ pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
+ pbuf = (__le32 *)pIE;
+
+ pmlmeext->TSFValue = le32_to_cpu(*(pbuf + 1));
+
+ pmlmeext->TSFValue = pmlmeext->TSFValue << 32;
+
+ pmlmeext->TSFValue |= le32_to_cpu(*pbuf);
+}
+
+static void correct_TSF(struct adapter *padapter)
+{
+ u8 reg;
+ int res;
+ u64 tsf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ tsf = pmlmeext->TSFValue - do_div(pmlmeext->TSFValue,
+ pmlmeinfo->bcn_interval * 1024) - 1024; /* us */
+
+ if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) ||
+ ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE))
+ rtw_stop_tx_beacon(padapter);
+
+ /* disable related TSF function */
+ res = rtw_read8(padapter, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(padapter, REG_BCN_CTRL, reg & (~BIT(3)));
+
+ rtw_write32(padapter, REG_TSFTR, tsf);
+ rtw_write32(padapter, REG_TSFTR + 4, tsf >> 32);
+
+ /* enable related TSF function */
+ res = rtw_read8(padapter, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(padapter, REG_BCN_CTRL, reg | BIT(3));
+
+ if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) ||
+ ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE))
+ rtw_resume_tx_beacon(padapter);
+}
+
/****************************************************************************
Following are the callback functions for each subtype of the management frames
@@ -582,7 +634,7 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr));
/* update TSF Value */
- update_TSF(pmlmeext, pframe, len);
+ update_TSF(pmlmeext, pframe);
/* start auth */
start_clnt_auth(padapter);
@@ -625,7 +677,7 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
}
/* update TSF Value */
- update_TSF(pmlmeext, pframe, len);
+ update_TSF(pmlmeext, pframe);
/* report sta add event */
report_add_sta_event(padapter, GetAddr2Ptr(pframe), cam_idx);
@@ -5363,26 +5415,20 @@ exit:
return ret;
}
-void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status)
+void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action, u16 status)
{
- u8 category = WLAN_CATEGORY_BACK;
u16 start_seq;
- u16 BA_para_set;
- u16 reason_code;
- u16 BA_timeout_value;
- __le16 le_tmp;
u16 BA_starting_seqctrl = 0;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
- u8 *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct ieee80211_mgmt *mgmt;
+ u16 capab, params;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
@@ -5394,81 +5440,70 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
+ mgmt = (struct ieee80211_mgmt *)(pmgntframe->buf_addr + TXDESC_OFFSET);
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
+ mgmt->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION | IEEE80211_FTYPE_MGMT);
- /* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
- memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
+ memcpy(mgmt->da, raddr, ETH_ALEN);
+ memcpy(mgmt->sa, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(mgmt->bssid, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ mgmt->seq_ctrl = cpu_to_le16(pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
- pframe = rtw_set_fixed_ie(pframe, 1, &(category), &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &(action), &pattrib->pktlen);
+ mgmt->u.action.category = WLAN_CATEGORY_BACK;
- if (category == 3) {
- switch (action) {
- case 0: /* ADDBA req */
- do {
- pmlmeinfo->dialogToken++;
- } while (pmlmeinfo->dialogToken == 0);
- pframe = rtw_set_fixed_ie(pframe, 1, &pmlmeinfo->dialogToken, &pattrib->pktlen);
+ switch (action) {
+ case WLAN_ACTION_ADDBA_REQ:
+ mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
+ do {
+ pmlmeinfo->dialogToken++;
+ } while (pmlmeinfo->dialogToken == 0);
+ mgmt->u.action.u.addba_req.dialog_token = pmlmeinfo->dialogToken;
- BA_para_set = (0x1002 | ((status & 0xf) << 2)); /* immediate ack & 64 buffer size */
- le_tmp = cpu_to_le16(BA_para_set);
- pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen);
+ /* immediate ack & 64 buffer size */
+ capab = u16_encode_bits(64, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
+ capab |= u16_encode_bits(1, IEEE80211_ADDBA_PARAM_POLICY_MASK);
+ capab |= u16_encode_bits(status, IEEE80211_ADDBA_PARAM_TID_MASK);
+ mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
- BA_timeout_value = 5000;/* 5ms */
- le_tmp = cpu_to_le16(BA_timeout_value);
- pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen);
+ mgmt->u.action.u.addba_req.timeout = cpu_to_le16(5000); /* 5 ms */
- psta = rtw_get_stainfo(pstapriv, raddr);
- if (psta) {
- start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07] & 0xfff) + 1;
+ psta = rtw_get_stainfo(pstapriv, raddr);
+ if (psta) {
+ start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07] & 0xfff) + 1;
- psta->BA_starting_seqctrl[status & 0x07] = start_seq;
+ psta->BA_starting_seqctrl[status & 0x07] = start_seq;
- BA_starting_seqctrl = start_seq << 4;
- }
- le_tmp = cpu_to_le16(BA_starting_seqctrl);
- pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen);
- break;
- case 1: /* ADDBA rsp */
- pframe = rtw_set_fixed_ie(pframe, 1, &pmlmeinfo->ADDBA_req.dialog_token, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&status, &pattrib->pktlen);
- BA_para_set = le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f;
- BA_para_set |= 0x1000; /* 64 buffer size */
-
- if (pregpriv->ampdu_amsdu == 0)/* disabled */
- BA_para_set = BA_para_set & ~BIT(0);
- else if (pregpriv->ampdu_amsdu == 1)/* enabled */
- BA_para_set = BA_para_set | BIT(0);
- le_tmp = cpu_to_le16(BA_para_set);
-
- pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&pmlmeinfo->ADDBA_req.BA_timeout_value, &pattrib->pktlen);
- break;
- case 2:/* DELBA */
- BA_para_set = (status & 0x1F) << 3;
- le_tmp = cpu_to_le16(BA_para_set);
- pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen);
-
- reason_code = 37;/* Requested from peer STA as it does not want to use the mechanism */
- le_tmp = cpu_to_le16(reason_code);
- pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen);
- break;
- default:
- break;
+ BA_starting_seqctrl = start_seq << 4;
}
+ mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(BA_starting_seqctrl);
+
+ pattrib->pktlen = offsetofend(struct ieee80211_mgmt,
+ u.action.u.addba_req.start_seq_num);
+ break;
+ case WLAN_ACTION_ADDBA_RESP:
+ mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
+ mgmt->u.action.u.addba_resp.dialog_token = pmlmeinfo->ADDBA_req.dialog_token;
+ mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
+ capab = le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f;
+ capab |= u16_encode_bits(64, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
+ capab |= u16_encode_bits(pregpriv->ampdu_amsdu, IEEE80211_ADDBA_PARAM_AMSDU_MASK);
+ mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
+ mgmt->u.action.u.addba_resp.timeout = pmlmeinfo->ADDBA_req.BA_timeout_value;
+ pattrib->pktlen = offsetofend(struct ieee80211_mgmt, u.action.u.addba_resp.timeout);
+ break;
+ case WLAN_ACTION_DELBA:
+ mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
+ mgmt->u.action.u.delba.params = cpu_to_le16((status & 0x1F) << 3);
+ params = u16_encode_bits((status & 0x1), IEEE80211_DELBA_PARAM_INITIATOR_MASK);
+ params |= u16_encode_bits((status >> 1) & 0xF, IEEE80211_DELBA_PARAM_TID_MASK);
+ mgmt->u.action.u.delba.params = cpu_to_le16(params);
+ mgmt->u.action.u.delba.reason_code = cpu_to_le16(WLAN_STATUS_REQUEST_DECLINED);
+ pattrib->pktlen = offsetofend(struct ieee80211_mgmt, u.action.u.delba.reason_code);
+ break;
+ default:
+ break;
}
pattrib->last_txcmdsz = pattrib->pktlen;
@@ -5623,7 +5658,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
if (initiator == 0) { /* recipient */
for (tid = 0; tid < MAXTID; tid++) {
if (psta->recvreorder_ctrl[tid].enable) {
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F));
+ issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F));
psta->recvreorder_ctrl[tid].enable = false;
psta->recvreorder_ctrl[tid].indicate_seq = 0xffff;
}
@@ -5631,7 +5666,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
} else if (initiator == 1) { /* originator */
for (tid = 0; tid < MAXTID; tid++) {
if (psta->htpriv.agg_enable_bitmap & BIT(tid)) {
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F));
+ issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F));
psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
}
@@ -5667,14 +5702,129 @@ unsigned int send_beacon(struct adapter *padapter)
bool get_beacon_valid_bit(struct adapter *adapter)
{
+ int res;
+ u8 reg;
+
+ res = rtw_read8(adapter, REG_TDECTRL + 2, &reg);
+ if (res)
+ return false;
+
/* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2 */
- return BIT(0) & rtw_read8(adapter, REG_TDECTRL + 2);
+ return BIT(0) & reg;
}
void clear_beacon_valid_bit(struct adapter *adapter)
{
+ int res;
+ u8 reg;
+
+ res = rtw_read8(adapter, REG_TDECTRL + 2, &reg);
+ if (res)
+ return;
+
/* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2, write 1 to clear, Clear by sw */
- rtw_write8(adapter, REG_TDECTRL + 2, rtw_read8(adapter, REG_TDECTRL + 2) | BIT(0));
+ rtw_write8(adapter, REG_TDECTRL + 2, reg | BIT(0));
+}
+
+void rtw_resume_tx_beacon(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata = &adapt->haldata;
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
+ /* which should be read from register to a global variable. */
+
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) | BIT(6));
+ haldata->RegFwHwTxQCtrl |= BIT(6);
+ rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0xff);
+ haldata->RegReg542 |= BIT(0);
+ rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542);
+}
+
+void rtw_stop_tx_beacon(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata = &adapt->haldata;
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
+ /* which should be read from register to a global variable. */
+
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) & (~BIT(6)));
+ haldata->RegFwHwTxQCtrl &= (~BIT(6));
+ rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0x64);
+ haldata->RegReg542 &= ~(BIT(0));
+ rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542);
+
+ /* todo: CheckFwRsvdPageContent(Adapter); 2010.06.23. Added by tynli. */
+}
+
+static void rtw_set_opmode(struct adapter *adapter, u8 mode)
+{
+ u8 val8;
+ int res;
+
+ /* disable Port0 TSF update */
+ res = rtw_read8(adapter, REG_BCN_CTRL, &val8);
+ if (res)
+ return;
+
+ rtw_write8(adapter, REG_BCN_CTRL, val8 | BIT(4));
+
+ /* set net_type */
+ res = rtw_read8(adapter, MSR, &val8);
+ if (res)
+ return;
+
+ val8 &= 0x0c;
+ val8 |= mode;
+ rtw_write8(adapter, MSR, val8);
+
+ if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) {
+ rtw_stop_tx_beacon(adapter);
+
+ rtw_write8(adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */
+ } else if (mode == _HW_STATE_ADHOC_) {
+ rtw_resume_tx_beacon(adapter);
+ rtw_write8(adapter, REG_BCN_CTRL, 0x1a);
+ } else if (mode == _HW_STATE_AP_) {
+ rtw_resume_tx_beacon(adapter);
+
+ rtw_write8(adapter, REG_BCN_CTRL, 0x12);
+
+ /* Set RCR */
+ rtw_write32(adapter, REG_RCR, 0x7000208e);/* CBSSID_DATA must set to 0,reject ICV_ERR packet */
+ /* enable to rx data frame */
+ rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
+ /* enable to rx ps-poll */
+ rtw_write16(adapter, REG_RXFLTMAP1, 0x0400);
+
+ /* Beacon Control related register for first time */
+ rtw_write8(adapter, REG_BCNDMATIM, 0x02); /* 2ms */
+
+ rtw_write8(adapter, REG_ATIMWND, 0x0a); /* 10ms */
+ rtw_write16(adapter, REG_BCNTCFG, 0x00);
+ rtw_write16(adapter, REG_TBTT_PROHIBIT, 0xff04);
+ rtw_write16(adapter, REG_TSFTR_SYN_OFFSET, 0x7fff);/* +32767 (~32ms) */
+
+ /* reset TSF */
+ rtw_write8(adapter, REG_DUAL_TSF_RST, BIT(0));
+
+ /* BIT(3) - If set 0, hw will clr bcnq when tx becon ok/fail or port 0 */
+ res = rtw_read8(adapter, REG_MBID_NUM, &val8);
+ if (res)
+ return;
+
+ rtw_write8(adapter, REG_MBID_NUM, val8 | BIT(3) | BIT(4));
+
+ /* enable BCN0 Function for if1 */
+ /* don't enable update TSF0 for if1 (due to TSF update when beacon/probe rsp are received) */
+ rtw_write8(adapter, REG_BCN_CTRL, (DIS_TSF_UDT0_NORMAL_CHIP | EN_BCN_FUNCTION | BIT(1)));
+
+ /* dis BCN1 ATIM WND if if2 is station */
+ res = rtw_read8(adapter, REG_BCN_CTRL_1, &val8);
+ if (res)
+ return;
+
+ rtw_write8(adapter, REG_BCN_CTRL_1, val8 | BIT(0));
+ }
}
/****************************************************************************
@@ -5698,9 +5848,70 @@ static void rtw_set_initial_gain(struct adapter *adapter, u8 gain)
}
}
+void rtw_mlme_under_site_survey(struct adapter *adapter)
+{
+ /* config RCR to receive different BSSID & not to receive data frame */
+
+ int res;
+ u8 reg;
+ u32 v;
+
+ res = rtw_read32(adapter, REG_RCR, &v);
+ if (res)
+ return;
+
+ v &= ~(RCR_CBSSID_BCN);
+ rtw_write32(adapter, REG_RCR, v);
+ /* reject all data frame */
+ rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
+
+ /* disable update TSF */
+ res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapter, REG_BCN_CTRL, reg | BIT(4));
+}
+
+void rtw_mlme_site_survey_done(struct adapter *adapter)
+{
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u32 reg32;
+ int res;
+ u8 reg;
+
+ if ((is_client_associated_to_ap(adapter)) ||
+ ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE)) {
+ /* enable to rx data frame */
+ rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
+
+ /* enable update TSF */
+ res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapter, REG_BCN_CTRL, reg & (~BIT(4)));
+ } else if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
+ rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
+ /* enable update TSF */
+ res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapter, REG_BCN_CTRL, reg & (~BIT(4)));
+ }
+
+ res = rtw_read32(adapter, REG_RCR, &reg32);
+ if (res)
+ return;
+
+ rtw_write32(adapter, REG_RCR, reg32 | RCR_CBSSID_BCN);
+}
+
void site_survey(struct adapter *padapter)
{
- unsigned char survey_channel = 0, val8;
+ unsigned char survey_channel = 0;
enum rt_scan_type ScanType = SCAN_PASSIVE;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -5824,8 +6035,7 @@ void site_survey(struct adapter *padapter)
if (is_client_associated_to_ap(padapter))
issue_nulldata(padapter, NULL, 0, 3, 500);
- val8 = 0; /* survey done */
- SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ rtw_mlme_site_survey_done(padapter);
report_surveydone_event(padapter);
@@ -6002,7 +6212,9 @@ static void rtw_set_bssid(struct adapter *adapter, u8 *bssid)
static void mlme_join(struct adapter *adapter, int type)
{
struct mlme_priv *mlmepriv = &adapter->mlmepriv;
- u8 retry_limit = 0x30;
+ u8 retry_limit = 0x30, reg;
+ u32 reg32;
+ int res;
switch (type) {
case 0:
@@ -6010,8 +6222,12 @@ static void mlme_join(struct adapter *adapter, int type)
/* enable to rx data frame, accept all data frame */
rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
+ res = rtw_read32(adapter, REG_RCR, &reg32);
+ if (res)
+ return;
+
rtw_write32(adapter, REG_RCR,
- rtw_read32(adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ reg32 | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
if (check_fwstate(mlmepriv, WIFI_STATION_STATE)) {
retry_limit = 48;
@@ -6027,7 +6243,11 @@ static void mlme_join(struct adapter *adapter, int type)
case 2:
/* sta add event call back */
/* enable update TSF */
- rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) & (~BIT(4)));
+ res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapter, REG_BCN_CTRL, reg & (~BIT(4)));
if (check_fwstate(mlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))
retry_limit = 0x7;
@@ -6184,14 +6404,14 @@ void start_clnt_assoc(struct adapter *padapter)
set_link_timer(pmlmeext, REASSOC_TO);
}
-unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+void receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
/* check A3 */
if (!(!memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
- return _SUCCESS;
+ return;
if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) {
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
@@ -6202,7 +6422,6 @@ unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr
report_join_res(padapter, -2);
}
}
- return _SUCCESS;
}
static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid)
@@ -6640,6 +6859,23 @@ void update_sta_info(struct adapter *padapter, struct sta_info *psta)
psta->state = _FW_LINKED;
}
+static void rtw_reset_dm_func_flag(struct adapter *adapter)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ struct dm_priv *dmpriv = &haldata->dmpriv;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+ odmpriv->SupportAbility = dmpriv->InitODMFlag;
+}
+
+static void rtw_clear_dm_func_flag(struct adapter *adapter)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+ odmpriv->SupportAbility = 0;
+}
+
void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
{
struct sta_info *psta, *psta_bmc;
@@ -6670,12 +6906,12 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
}
/* turn on dynamic functions */
- SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_RESET, NULL);
+ rtw_reset_dm_func_flag(padapter);
/* update IOT-releated issue */
update_IOT_info(padapter);
- SetHwReg8188EU(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
+ rtw_set_basic_rate(padapter, cur_network->SupportedRates);
/* BCN interval */
rtw_write16(padapter, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval);
@@ -6702,14 +6938,14 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
rtw_set_max_rpt_macid(padapter, psta->mac_id);
media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE: 1 means connect */
- SetHwReg8188EU(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ rtl8188e_set_FwMediaStatus_cmd(padapter, media_status);
}
mlme_join(padapter, 2);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) {
/* correcting TSF */
- correct_TSF(padapter, pmlmeext);
+ correct_TSF(padapter);
}
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_CONNECT, 0);
}
@@ -6724,7 +6960,7 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
/* nothing to do */
} else { /* adhoc client */
/* correcting TSF */
- correct_TSF(padapter, pmlmeext);
+ correct_TSF(padapter);
/* start beacon */
if (send_beacon(padapter) == _FAIL) {
@@ -6748,6 +6984,9 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
static void mlme_disconnect(struct adapter *adapter)
{
+ int res;
+ u8 reg;
+
/* Set RCR to not to receive data frame when NO LINK state */
/* reject all data frames */
rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
@@ -6756,7 +6995,12 @@ static void mlme_disconnect(struct adapter *adapter)
rtw_write8(adapter, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
/* disable update TSF */
- rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) | BIT(4));
+
+ res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapter, REG_BCN_CTRL, reg | BIT(4));
}
void mlmeext_sta_del_event_callback(struct adapter *padapter)
@@ -6810,14 +7054,20 @@ static u8 chk_ap_is_alive(struct sta_info *psta)
return ret;
}
-static void rtl8188e_sreset_linked_status_check(struct adapter *padapter)
+static int rtl8188e_sreset_linked_status_check(struct adapter *padapter)
{
- u32 rx_dma_status = rtw_read32(padapter, REG_RXDMA_STATUS);
+ u32 rx_dma_status;
+ int res;
+ u8 reg;
+
+ res = rtw_read32(padapter, REG_RXDMA_STATUS, &rx_dma_status);
+ if (res)
+ return res;
if (rx_dma_status != 0x00)
rtw_write32(padapter, REG_RXDMA_STATUS, rx_dma_status);
- rtw_read8(padapter, REG_FMETHR);
+ return rtw_read8(padapter, REG_FMETHR, &reg);
}
void linked_status_chk(struct adapter *padapter)
@@ -7045,7 +7295,7 @@ u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
type = _HW_STATE_NOLINK_;
}
- SetHwReg8188EU(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type));
+ rtw_set_opmode(padapter, type);
return H2C_SUCCESS;
}
@@ -7081,7 +7331,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
/* disable dynamic functions, such as high power, DIG */
Save_DM_Func_Flag(padapter);
- SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL);
+ rtw_clear_dm_func_flag(padapter);
/* cancel link timer */
_cancel_timer_ex(&pmlmeext->link_timer);
@@ -7089,7 +7339,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
/* clear CAM */
flush_all_cam_entry(padapter);
- memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength));
pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
@@ -7146,7 +7396,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
pmlmeinfo->candidate_tid_bitmap = 0;
pmlmeinfo->bwmode_updated = false;
- memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength));
pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
@@ -7219,6 +7469,7 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
u8 val8;
+ int res;
if (is_client_associated_to_ap(padapter))
issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100);
@@ -7231,7 +7482,10 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) {
/* Stop BCN */
- val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ res = rtw_read8(padapter, REG_BCN_CTRL, &val8);
+ if (res)
+ return H2C_DROPPED;
+
rtw_write8(padapter, REG_BCN_CTRL, val8 & (~(EN_BCN_FUNCTION | EN_TXBCN_RPT)));
}
@@ -7302,7 +7556,6 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
u8 bdelayscan = false;
- u8 val8;
u32 i;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
@@ -7347,7 +7600,7 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
/* disable dynamic functions, such as high power, DIG */
Save_DM_Func_Flag(padapter);
- SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL);
+ rtw_clear_dm_func_flag(padapter);
/* config the initial gain under scanning, need to write the BB registers */
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
@@ -7359,8 +7612,7 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* set MSR to no link state */
Set_MSR(padapter, _HW_STATE_NOLINK_);
- val8 = 1; /* under site survey */
- SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ rtw_mlme_under_site_survey(padapter);
pmlmeext->sitesurvey_res.state = SCAN_PROCESS;
}
@@ -7475,7 +7727,7 @@ u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf)
if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && (pmlmeinfo->HT_enable)) ||
((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) {
- issue_action_BA(padapter, pparm->addr, RTW_WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
+ issue_action_BA(padapter, pparm->addr, WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
_set_timer(&psta->addba_retry_timer, ADDBA_TO);
} else {
psta->htpriv.candidate_tid_bitmap &= ~BIT(pparm->tid);
diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c
index beffe5b16f1e..bd654d4ff8b4 100644
--- a/drivers/staging/r8188eu/core/rtw_p2p.c
+++ b/drivers/staging/r8188eu/core/rtw_p2p.c
@@ -1450,10 +1450,9 @@ static void restore_p2p_state_handler(struct adapter *padapter)
static void pre_tx_invitereq_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 val8 = 1;
set_channel_bwmode(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ rtw_mlme_under_site_survey(padapter);
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -1462,10 +1461,9 @@ static void pre_tx_invitereq_handler(struct adapter *padapter)
static void pre_tx_provdisc_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 val8 = 1;
set_channel_bwmode(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ rtw_mlme_under_site_survey(padapter);
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -1474,10 +1472,9 @@ static void pre_tx_provdisc_handler(struct adapter *padapter)
static void pre_tx_negoreq_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 val8 = 1;
set_channel_bwmode(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ rtw_mlme_under_site_survey(padapter);
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -1891,7 +1888,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) {
/* leave IPS/Autosuspend */
- if (rtw_pwr_wakeup(padapter) == _FAIL) {
+ if (rtw_pwr_wakeup(padapter)) {
ret = _FAIL;
goto exit;
}
@@ -1905,7 +1902,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
init_wifidirect_info(padapter, role);
} else if (role == P2P_ROLE_DISABLE) {
- if (rtw_pwr_wakeup(padapter) == _FAIL) {
+ if (rtw_pwr_wakeup(padapter)) {
ret = _FAIL;
goto exit;
}
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
index 7b816b824947..10550bd2c16d 100644
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
@@ -229,6 +229,9 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
static bool lps_rf_on(struct adapter *adapter)
{
+ int res;
+ u32 reg;
+
/* When we halt NIC, we should check if FW LPS is leave. */
if (adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
/* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
@@ -236,7 +239,11 @@ static bool lps_rf_on(struct adapter *adapter)
return true;
}
- if (rtw_read32(adapter, REG_RCR) & 0x00070000)
+ res = rtw_read32(adapter, REG_RCR, &reg);
+ if (res)
+ return false;
+
+ if (reg & 0x00070000)
return false;
return true;
@@ -266,7 +273,7 @@ static s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
err = -1;
break;
}
- rtw_usleep_os(100);
+ msleep(1);
}
return err;
@@ -374,24 +381,24 @@ int rtw_pwr_wakeup(struct adapter *padapter)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
unsigned long timeout = jiffies + msecs_to_jiffies(3000);
unsigned long deny_time;
- int ret = _SUCCESS;
+ int ret;
while (pwrpriv->ps_processing && time_before(jiffies, timeout))
msleep(10);
/* I think this should be check in IPS, LPS, autosuspend functions... */
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- ret = _SUCCESS;
+ /* Below goto is a success path taken for already linked devices */
+ ret = 0;
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
goto exit;
- }
if (pwrpriv->rf_pwrstate == rf_off && ips_leave(padapter) == _FAIL) {
- ret = _FAIL;
+ ret = -ENOMEM;
goto exit;
}
if (padapter->bDriverStopped || !padapter->bup || !padapter->hw_init_completed) {
- ret = _FAIL;
+ ret = -EBUSY;
goto exit;
}
@@ -432,7 +439,7 @@ int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
return 0;
} else if (mode == IPS_NONE) {
rtw_ips_mode_req(pwrctrlpriv, mode);
- if ((padapter->bSurpriseRemoved == 0) && (rtw_pwr_wakeup(padapter) == _FAIL))
+ if ((padapter->bSurpriseRemoved == 0) && rtw_pwr_wakeup(padapter))
return -EFAULT;
} else {
return -EINVAL;
diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c
index df518439aea2..e5a7b7dfc387 100644
--- a/drivers/staging/r8188eu/core/rtw_recv.c
+++ b/drivers/staging/r8188eu/core/rtw_recv.c
@@ -17,14 +17,14 @@ static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
static u8 rtw_bridge_tunnel_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8
};
static u8 rtw_rfc1042_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
};
-void rtw_signal_stat_timer_hdl(struct timer_list *);
+static void rtw_signal_stat_timer_hdl(struct timer_list *t);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
@@ -62,7 +62,7 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
goto exit;
}
- precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
+ precvpriv->precv_frame_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
precvframe = (struct recv_frame *)precvpriv->precv_frame_buf;
@@ -166,10 +166,8 @@ int rtw_free_recvframe(struct recv_frame *precvframe, struct __queue *pfree_recv
list_add_tail(&precvframe->list, get_list_head(pfree_recv_queue));
- if (padapter) {
- if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
- }
+ if (padapter && (pfree_recv_queue == &precvpriv->free_recv_queue))
+ precvpriv->free_recvframe_cnt++;
spin_unlock_bh(&pfree_recv_queue->lock);
@@ -204,12 +202,12 @@ int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
}
/*
-caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
-pframequeue: defrag_queue : will be accessed in recv_thread (passive)
-
-using spinlock to protect
-
-*/
+ * caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
+ * pframequeue: defrag_queue : will be accessed in recv_thread (passive)
+ *
+ * using spinlock to protect
+ *
+ */
void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
{
@@ -237,6 +235,7 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
{
u32 cnt = 0;
struct recv_frame *pending_frame;
+
while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
cnt++;
@@ -327,6 +326,7 @@ static struct recv_frame *decryptor(struct adapter *padapter, struct recv_frame
if (prxattrib->encrypt > 0) {
u8 *iv = precv_frame->rx_data + prxattrib->hdrlen;
+
prxattrib->key_index = (((iv[3]) >> 6) & 0x3);
if (prxattrib->key_index > WEP_KEYS) {
@@ -452,8 +452,7 @@ static int recv_decache(struct recv_frame *precv_frame, u8 bretry, struct stainf
return _SUCCESS;
}
-void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_frame);
-void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_frame)
+static void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned char pwrbit;
u8 *ptr = precv_frame->rx_data;
@@ -557,15 +556,9 @@ static void count_rx_stats(struct adapter *padapter, struct recv_frame *prframe,
}
}
-int sta2sta_data_frame(
- struct adapter *adapter,
- struct recv_frame *precv_frame,
- struct sta_info **psta
-);
-
-int sta2sta_data_frame(struct adapter *adapter, struct recv_frame *precv_frame, struct sta_info **psta)
+static int sta2sta_data_frame(struct adapter *adapter,
+ struct recv_frame *precv_frame, struct sta_info **psta)
{
- u8 *ptr = precv_frame->rx_data;
int ret = _SUCCESS;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
@@ -620,12 +613,6 @@ int sta2sta_data_frame(struct adapter *adapter, struct recv_frame *precv_frame,
sta_addr = pattrib->src;
}
} else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
- memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
- memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
- memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
- memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
-
sta_addr = mybssid;
} else {
ret = _FAIL;
@@ -650,6 +637,7 @@ static int ap2sta_data_frame(
struct sta_info **psta)
{
u8 *ptr = precv_frame->rx_data;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
int ret = _SUCCESS;
struct sta_priv *pstapriv = &adapter->stapriv;
@@ -694,24 +682,16 @@ static int ap2sta_data_frame(
goto exit;
}
- /* if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) { */
- /* */
-
- if (GetFrameSubType(ptr) & BIT(6)) {
- /* No data, will not indicate to upper layer, temporily count it here */
+ if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ /* We count the nullfunc frame, but we'll not pass it on to higher layers. */
count_rx_stats(adapter, precv_frame, *psta);
ret = RTW_RX_HANDLED;
goto exit;
}
} else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) &&
check_fwstate(pmlmepriv, _FW_LINKED)) {
- memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
- memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
- memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
- memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
- /* */
memcpy(pattrib->bssid, mybssid, ETH_ALEN);
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
@@ -778,6 +758,7 @@ static int sta2ap_data_frame(struct adapter *adapter,
}
} else {
u8 *myhwaddr = myid(&adapter->eeprompriv);
+
if (memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
ret = RTW_RX_HANDLED;
goto exit;
@@ -1023,6 +1004,7 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter));
+
if (ch_set_idx >= 0)
pmlmeext->channel_set[ch_set_idx].rx_count++;
}
@@ -1050,6 +1032,7 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
struct recv_priv *precvpriv = &adapter->recvpriv;
+
precvpriv->rx_drop++;
}
}
@@ -1313,9 +1296,11 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
struct rx_pkt_attrib *pattrib;
unsigned char *data_ptr;
struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
+
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
int ret = _SUCCESS;
+
nr_subframes = 0;
pattrib = &prframe->attrib;
@@ -1366,13 +1351,12 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
a_len -= nSubframe_Length;
if (a_len != 0) {
padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4 - 1));
- if (padding_len == 4) {
+ if (padding_len == 4)
padding_len = 0;
- }
- if (a_len < padding_len) {
+ if (a_len < padding_len)
goto exit;
- }
+
pdata += padding_len;
a_len -= padding_len;
}
@@ -1747,9 +1731,11 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
!psecuritypriv->busetkipkey) {
rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
if (recvpriv->free_recvframe_cnt < NR_RECVFRAME / 4) {
- /* to prevent from recvframe starvation,
+ /*
+ * to prevent from recvframe starvation,
* get recvframe from uc_swdec_pending_queue to
- * free_recvframe_cnt */
+ * free_recvframe_cnt
+ */
rframe = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue);
if (rframe)
goto do_posthandle;
@@ -1787,7 +1773,7 @@ _recv_entry_drop:
return ret;
}
-void rtw_signal_stat_timer_hdl(struct timer_list *t)
+static void rtw_signal_stat_timer_hdl(struct timer_list *t)
{
struct adapter *adapter = from_timer(adapter, t, recvpriv.signal_stat_timer);
struct recv_priv *recvpriv = &adapter->recvpriv;
diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c
index 392a65783f32..3a002cb6834f 100644
--- a/drivers/staging/r8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c
@@ -264,23 +264,30 @@ void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
void Save_DM_Func_Flag(struct adapter *padapter)
{
- u8 saveflag = true;
+ struct hal_data_8188e *haldata = &padapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
- SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
+ odmpriv->BK_SupportAbility = odmpriv->SupportAbility;
}
void Restore_DM_Func_Flag(struct adapter *padapter)
{
- u8 saveflag = false;
+ struct hal_data_8188e *haldata = &padapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
- SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
+ odmpriv->SupportAbility = odmpriv->BK_SupportAbility;
}
void Set_MSR(struct adapter *padapter, u8 type)
{
u8 val8;
+ int res;
+
+ res = rtw_read8(padapter, MSR, &val8);
+ if (res)
+ return;
- val8 = rtw_read8(padapter, MSR) & 0x0c;
+ val8 &= 0x0c;
val8 |= type;
rtw_write8(padapter, MSR, val8);
}
@@ -505,7 +512,11 @@ int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
static void set_acm_ctrl(struct adapter *adapter, u8 acm_mask)
{
- u8 acmctrl = rtw_read8(adapter, REG_ACMHWCTRL);
+ u8 acmctrl;
+ int res = rtw_read8(adapter, REG_ACMHWCTRL, &acmctrl);
+
+ if (res)
+ return;
if (acm_mask > 1)
acmctrl = acmctrl | 0x1;
@@ -765,6 +776,7 @@ void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
static void set_min_ampdu_spacing(struct adapter *adapter, u8 spacing)
{
u8 sec_spacing;
+ int res;
if (spacing <= 7) {
switch (adapter->securitypriv.dot11PrivacyAlgrthm) {
@@ -786,8 +798,38 @@ static void set_min_ampdu_spacing(struct adapter *adapter, u8 spacing)
if (spacing < sec_spacing)
spacing = sec_spacing;
+ res = rtw_read8(adapter, REG_AMPDU_MIN_SPACE, &sec_spacing);
+ if (res)
+ return;
+
rtw_write8(adapter, REG_AMPDU_MIN_SPACE,
- (rtw_read8(adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | spacing);
+ (sec_spacing & 0xf8) | spacing);
+ }
+}
+
+static void set_ampdu_factor(struct adapter *adapter, u8 factor)
+{
+ u8 RegToSet_Normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+ u8 FactorToSet;
+ u8 *pRegToSet;
+ u8 index = 0;
+
+ pRegToSet = RegToSet_Normal; /* 0xb972a841; */
+ FactorToSet = factor;
+ if (FactorToSet <= 3) {
+ FactorToSet = (1 << (FactorToSet + 2));
+ if (FactorToSet > 0xf)
+ FactorToSet = 0xf;
+
+ for (index = 0; index < 4; index++) {
+ if ((pRegToSet[index] & 0xf0) > (FactorToSet << 4))
+ pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet << 4);
+
+ if ((pRegToSet[index] & 0x0f) > FactorToSet)
+ pRegToSet[index] = (pRegToSet[index] & 0xf0) | (FactorToSet);
+
+ rtw_write8(adapter, (REG_AGGLEN_LMT + index), pRegToSet[index]);
+ }
}
}
@@ -817,7 +859,7 @@ void HTOnAssocRsp(struct adapter *padapter)
set_min_ampdu_spacing(padapter, min_MPDU_spacing);
- SetHwReg8188EU(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+ set_ampdu_factor(padapter, max_AMPDU_len);
}
void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
@@ -1225,6 +1267,45 @@ void set_sta_rate(struct adapter *padapter, struct sta_info *psta)
enable_rate_adaptive(padapter, psta->mac_id);
}
+void rtw_set_basic_rate(struct adapter *adapter, u8 *rates)
+{
+ u16 BrateCfg = 0;
+ u8 RateIndex = 0;
+ int res;
+ u8 reg;
+
+ /* 2007.01.16, by Emily */
+ /* Select RRSR (in Legacy-OFDM and CCK) */
+ /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M, and 1M from the Basic rate. */
+ /* We do not use other rates. */
+ HalSetBrateCfg(adapter, rates, &BrateCfg);
+
+ /* 2011.03.30 add by Luke Lee */
+ /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */
+ /* because CCK 2M has poor TXEVM */
+ /* CCK 5.5M & 11M ACK should be enabled for better performance */
+
+ BrateCfg = (BrateCfg | 0xd) & 0x15d;
+
+ BrateCfg |= 0x01; /* default enable 1M ACK rate */
+ /* Set RRSR rate table. */
+ rtw_write8(adapter, REG_RRSR, BrateCfg & 0xff);
+ rtw_write8(adapter, REG_RRSR + 1, (BrateCfg >> 8) & 0xff);
+ res = rtw_read8(adapter, REG_RRSR + 2, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapter, REG_RRSR + 2, reg & 0xf0);
+
+ /* Set RTS initial rate */
+ while (BrateCfg > 0x1) {
+ BrateCfg = (BrateCfg >> 1);
+ RateIndex++;
+ }
+ /* Ziv - Check */
+ rtw_write8(adapter, REG_INIRTS_RATE_SEL, RateIndex);
+}
+
/* Update RRSR and Rate for USERATE */
void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
{
@@ -1250,7 +1331,7 @@ void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
else
update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB);
- SetHwReg8188EU(padapter, HW_VAR_BASIC_RATE, supported_rates);
+ rtw_set_basic_rate(padapter, supported_rates);
}
unsigned char check_assoc_AP(u8 *pframe, uint len)
@@ -1348,6 +1429,30 @@ static void set_ack_preamble(struct adapter *adapter, bool short_preamble)
rtw_write8(adapter, REG_RRSR + 2, val8);
};
+static void set_slot_time(struct adapter *adapter, u8 slot_time)
+{
+ u8 u1bAIFS, aSifsTime;
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+
+ rtw_write8(adapter, REG_SLOT, slot_time);
+
+ if (pmlmeinfo->WMM_enable == 0) {
+ if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
+ aSifsTime = 10;
+ else
+ aSifsTime = 16;
+
+ u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime);
+
+ /* <Roger_EXP> Temporary removed, 2008.06.20. */
+ rtw_write8(adapter, REG_EDCA_VO_PARAM, u1bAIFS);
+ rtw_write8(adapter, REG_EDCA_VI_PARAM, u1bAIFS);
+ rtw_write8(adapter, REG_EDCA_BE_PARAM, u1bAIFS);
+ rtw_write8(adapter, REG_EDCA_BK_PARAM, u1bAIFS);
+ }
+}
+
void update_capinfo(struct adapter *Adapter, u16 updateCap)
{
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
@@ -1386,7 +1491,7 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
}
}
- SetHwReg8188EU(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime);
+ set_slot_time(Adapter, pmlmeinfo->slotTime);
}
void update_wireless_mode(struct adapter *padapter)
@@ -1466,26 +1571,6 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
return _SUCCESS;
}
-void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
-{
- u8 *pIE;
- __le32 *pbuf;
-
- pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
- pbuf = (__le32 *)pIE;
-
- pmlmeext->TSFValue = le32_to_cpu(*(pbuf + 1));
-
- pmlmeext->TSFValue = pmlmeext->TSFValue << 32;
-
- pmlmeext->TSFValue |= le32_to_cpu(*pbuf);
-}
-
-void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext)
-{
- SetHwReg8188EU(padapter, HW_VAR_CORRECT_TSF, NULL);
-}
-
void beacon_timing_control(struct adapter *padapter)
{
SetBeaconRelatedRegisters8188EUsb(padapter);
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index 7135d89caac1..24401f3ae2a0 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -16,16 +16,13 @@ static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
static void _init_txservq(struct tx_servq *ptxservq)
{
-
INIT_LIST_HEAD(&ptxservq->tx_pending);
rtw_init_queue(&ptxservq->sta_pending);
ptxservq->qcnt = 0;
-
}
void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
{
-
memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv));
spin_lock_init(&psta_xmitpriv->lock);
_init_txservq(&psta_xmitpriv->be_q);
@@ -34,7 +31,6 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
_init_txservq(&psta_xmitpriv->vo_q);
INIT_LIST_HEAD(&psta_xmitpriv->legacy_dz);
INIT_LIST_HEAD(&psta_xmitpriv->apsd);
-
}
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
@@ -78,7 +74,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
res = _FAIL;
goto exit;
}
- pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_frame_buf), 4);
+ pxmitpriv->pxmit_frame_buf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_frame_buf), 4);
/* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
/* ((size_t) (pxmitpriv->pallocated_frame_buf) &3); */
@@ -115,7 +111,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
}
- pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmitbuf), 4);
+ pxmitpriv->pxmitbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmitbuf), 4);
/* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
/* ((size_t) (pxmitpriv->pallocated_xmitbuf) &3); */
@@ -155,7 +151,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
}
- pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
+ pxmitpriv->pxmit_extbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
@@ -299,6 +295,7 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *
/* check HT op mode */
if (pattrib->ht_en) {
u8 htopmode = pmlmeinfo->HT_protection;
+
if ((pmlmeext->cur_bwmode && (htopmode == 2 || htopmode == 3)) ||
(!pmlmeext->cur_bwmode && htopmode == 3)) {
pattrib->vcs_mode = RTS_CTS;
@@ -445,10 +442,11 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
pattrib->pktlen = pktfile.pkt_len;
- if (ETH_P_IP == pattrib->ether_type) {
+ if (pattrib->ether_type == ETH_P_IP) {
/* The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time */
/* to prevent DHCP protocol fail */
u8 tmp[24];
+
_rtw_pktfile_read(&pktfile, &tmp[0], 24);
pattrib->dhcp_pkt = 0;
if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
@@ -627,7 +625,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
if (pframe[1] & 2) /* From Ds == 1 */
rtw_secmicappend(&micdata, &pframe[24], 6);
else
- rtw_secmicappend(&micdata, &pframe[10], 6);
+ rtw_secmicappend(&micdata, &pframe[10], 6);
} else { /* ToDS == 0 */
rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */
if (pframe[1] & 2) /* From Ds == 1 */
@@ -953,12 +951,11 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
mpdu_len -= llc_sz;
}
- if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc))
mpdu_len -= pattrib->icv_len;
- }
if (bmcst) {
- /* don't do fragment to broadcat/multicast packets */
+ /* don't do fragment to broadcast/multicast packets */
mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
} else {
mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len);
@@ -1068,7 +1065,6 @@ void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
}
break;
}
-
}
void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz)
@@ -1315,7 +1311,6 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
spin_unlock_bh(&pframequeue->lock);
-
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -1505,7 +1500,6 @@ void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry)
for (i = 0; i < entry; i++, phwxmit++)
phwxmit->accnt = 0;
-
}
static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
@@ -1732,7 +1726,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
bool bmcst = is_multicast_ether_addr(pattrib->ra);
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
- return ret;
+ return ret;
if (pattrib->psta)
psta = pattrib->psta;
@@ -1760,8 +1754,8 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
pstapriv->tim_bitmap |= BIT(0);/* */
pstapriv->sta_dz_bitmap |= BIT(0);
-
- update_beacon(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after upate bcn */
+ /* tx bc/mc packets after update bcn */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
ret = true;
}
@@ -1811,7 +1805,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
pstapriv->tim_bitmap |= BIT(psta->aid);
if (psta->sleepq_len == 1) {
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
}
@@ -2080,7 +2074,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
}
diff --git a/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c b/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c
deleted file mode 100644
index 6505e1fcb070..000000000000
--- a/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c
+++ /dev/null
@@ -1,45 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/Hal8188EPwrSeq.h"
-#include "../include/rtl8188e_hal.h"
-
-struct wl_pwr_cfg rtl8188E_power_on_flow[] = {
- { 0x0006, PWR_CMD_POLLING, BIT(1), BIT(1) },
- { 0x0002, PWR_CMD_WRITE, BIT(0) | BIT(1), 0 }, /* reset BB */
- { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
- { 0x0005, PWR_CMD_WRITE, BIT(7), 0 }, /* disable HWPDN (control by DRV)*/
- { 0x0005, PWR_CMD_WRITE, BIT(4) | BIT(3), 0 }, /* disable WL suspend*/
- { 0x0005, PWR_CMD_WRITE, BIT(0), BIT(0) },
- { 0x0005, PWR_CMD_POLLING, BIT(0), 0 },
- { 0x0023, PWR_CMD_WRITE, BIT(4), 0 },
- { 0xFFFF, PWR_CMD_END, 0, 0 },
-};
-
-struct wl_pwr_cfg rtl8188E_card_disable_flow[] = {
- { 0x001F, PWR_CMD_WRITE, 0xFF, 0 }, /* turn off RF */
- { 0x0023, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* LDO Sleep mode */
- { 0x0005, PWR_CMD_WRITE, BIT(1), BIT(1) }, /* turn off MAC by HW state machine */
- { 0x0005, PWR_CMD_POLLING, BIT(1), 0 },
- { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
- { 0x0005, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) }, /* enable WL suspend */
- { 0x0007, PWR_CMD_WRITE, 0xFF, 0 }, /* enable bandgap mbias in suspend */
- { 0x0041, PWR_CMD_WRITE, BIT(4), 0 }, /* Clear SIC_EN register */
- { 0xfe10, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* Set USB suspend enable local register */
- { 0xFFFF, PWR_CMD_END, 0, 0 },
-};
-
-/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
-struct wl_pwr_cfg rtl8188E_enter_lps_flow[] = {
- { 0x0522, PWR_CMD_WRITE, 0xFF, 0x7F },/* Tx Pause */
- { 0x05F8, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
- { 0x05F9, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
- { 0x05FA, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
- { 0x05FB, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
- { 0x0002, PWR_CMD_WRITE, BIT(0), 0 }, /* CCK and OFDM are disabled, clocks are gated */
- { 0x0002, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US },
- { 0x0100, PWR_CMD_WRITE, 0xFF, 0x3F }, /* Reset MAC TRX */
- { 0x0101, PWR_CMD_WRITE, BIT(1), 0 }, /* check if removed later */
- { 0x0553, PWR_CMD_WRITE, BIT(5), BIT(5) }, /* Respond TxOK to scheduler */
- { 0xFFFF, PWR_CMD_END, 0, 0 },
-};
diff --git a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
index 57e8f5573846..1e04de3a6622 100644
--- a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
@@ -279,6 +279,7 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
{ /* Wilson 2011/10/26 */
u32 MaskFromReg;
s8 i;
+ int res;
switch (pRaInfo->RateID) {
case RATR_INX_WIRELESS_NGB:
@@ -303,19 +304,31 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x0000000d;
break;
case 12:
- MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR0);
+ res = rtw_read32(dm_odm->Adapter, REG_ARFR0, &MaskFromReg);
+ if (res)
+ return res;
+
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
case 13:
- MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR1);
+ res = rtw_read32(dm_odm->Adapter, REG_ARFR1, &MaskFromReg);
+ if (res)
+ return res;
+
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
case 14:
- MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR2);
+ res = rtw_read32(dm_odm->Adapter, REG_ARFR2, &MaskFromReg);
+ if (res)
+ return res;
+
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
case 15:
- MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR3);
+ res = rtw_read32(dm_odm->Adapter, REG_ARFR3, &MaskFromReg);
+ if (res)
+ return res;
+
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
default:
@@ -601,12 +614,12 @@ void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16
pRAInfo = &dm_odm->RAInfo[MacId];
if (valid) {
- pRAInfo->RTY[0] = (u16)GET_TX_REPORT_TYPE1_RERTY_0(pBuffer);
- pRAInfo->RTY[1] = (u16)GET_TX_REPORT_TYPE1_RERTY_1(pBuffer);
- pRAInfo->RTY[2] = (u16)GET_TX_REPORT_TYPE1_RERTY_2((u8 *)pBuffer);
- pRAInfo->RTY[3] = (u16)GET_TX_REPORT_TYPE1_RERTY_3(pBuffer);
- pRAInfo->RTY[4] = (u16)GET_TX_REPORT_TYPE1_RERTY_4(pBuffer);
- pRAInfo->DROP = (u16)GET_TX_REPORT_TYPE1_DROP_0(pBuffer);
+ pRAInfo->RTY[0] = le16_to_cpup((__le16 *)pBuffer);
+ pRAInfo->RTY[1] = pBuffer[2];
+ pRAInfo->RTY[2] = pBuffer[3];
+ pRAInfo->RTY[3] = pBuffer[4];
+ pRAInfo->RTY[4] = pBuffer[5];
+ pRAInfo->DROP = pBuffer[6];
pRAInfo->TOTAL = pRAInfo->RTY[0] + pRAInfo->RTY[1] +
pRAInfo->RTY[2] + pRAInfo->RTY[3] +
pRAInfo->RTY[4] + pRAInfo->DROP;
diff --git a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
index b944c8071a3b..525deab10820 100644
--- a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
+++ b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
@@ -463,6 +463,7 @@ void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup
}
}
+/* FIXME: return an error to caller */
static void _PHY_SaveMACRegisters(
struct adapter *adapt,
u32 *MACReg,
@@ -470,11 +471,20 @@ static void _PHY_SaveMACRegisters(
)
{
u32 i;
+ int res;
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- MACBackup[i] = rtw_read8(adapt, MACReg[i]);
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
+ u8 reg;
+
+ res = rtw_read8(adapt, MACReg[i], &reg);
+ if (res)
+ return;
- MACBackup[i] = rtw_read32(adapt, MACReg[i]);
+ MACBackup[i] = reg;
+ }
+
+ res = rtw_read32(adapt, MACReg[i], MACBackup + i);
+ (void)res;
}
static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum)
@@ -739,9 +749,12 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt)
{
u8 tmpreg;
u32 RF_Amode = 0, LC_Cal;
+ int res;
/* Check continuous TX and Packet TX */
- tmpreg = rtw_read8(adapt, 0xd03);
+ res = rtw_read8(adapt, 0xd03, &tmpreg);
+ if (res)
+ return;
if ((tmpreg & 0x70) != 0) /* Deal with contisuous TX case */
rtw_write8(adapt, 0xd03, tmpreg & 0x8F); /* disable all continuous TX */
diff --git a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
index 150ea380c39e..6c0b1368383d 100644
--- a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
@@ -3,25 +3,116 @@
#include "../include/HalPwrSeqCmd.h"
-u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[])
+#define PWR_CMD_WRITE 0x01
+ /* offset: the read register offset */
+ /* msk: the mask of the write bits */
+ /* value: write value */
+ /* note: driver shall implement this cmd by read & msk after write */
+
+#define PWR_CMD_POLLING 0x02
+ /* offset: the read register offset */
+ /* msk: the mask of the polled value */
+ /* value: the value to be polled, masked by the msd field. */
+ /* note: driver shall implement this cmd by */
+ /* do{ */
+ /* if ( (Read(offset) & msk) == (value & msk) ) */
+ /* break; */
+ /* } while (not timeout); */
+
+#define PWR_CMD_DELAY 0x03
+ /* offset: the value to delay (in us) */
+ /* msk: N/A */
+ /* value: N/A */
+
+struct wl_pwr_cfg {
+ u16 offset;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+};
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
+#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
+#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
+#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
+
+static struct wl_pwr_cfg rtl8188E_power_on_flow[] = {
+ { 0x0006, PWR_CMD_POLLING, BIT(1), BIT(1) },
+ { 0x0002, PWR_CMD_WRITE, BIT(0) | BIT(1), 0 }, /* reset BB */
+ { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
+ { 0x0005, PWR_CMD_WRITE, BIT(7), 0 }, /* disable HWPDN (control by DRV)*/
+ { 0x0005, PWR_CMD_WRITE, BIT(4) | BIT(3), 0 }, /* disable WL suspend*/
+ { 0x0005, PWR_CMD_WRITE, BIT(0), BIT(0) },
+ { 0x0005, PWR_CMD_POLLING, BIT(0), 0 },
+ { 0x0023, PWR_CMD_WRITE, BIT(4), 0 },
+};
+
+static struct wl_pwr_cfg rtl8188E_card_disable_flow[] = {
+ { 0x001F, PWR_CMD_WRITE, 0xFF, 0 }, /* turn off RF */
+ { 0x0023, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* LDO Sleep mode */
+ { 0x0005, PWR_CMD_WRITE, BIT(1), BIT(1) }, /* turn off MAC by HW state machine */
+ { 0x0005, PWR_CMD_POLLING, BIT(1), 0 },
+ { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
+ { 0x0005, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) }, /* enable WL suspend */
+ { 0x0007, PWR_CMD_WRITE, 0xFF, 0 }, /* enable bandgap mbias in suspend */
+ { 0x0041, PWR_CMD_WRITE, BIT(4), 0 }, /* Clear SIC_EN register */
+ { 0xfe10, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* Set USB suspend enable local register */
+};
+
+/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
+static struct wl_pwr_cfg rtl8188E_enter_lps_flow[] = {
+ { 0x0522, PWR_CMD_WRITE, 0xFF, 0x7F },/* Tx Pause */
+ { 0x05F8, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x05F9, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x05FA, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x05FB, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x0002, PWR_CMD_WRITE, BIT(0), 0 }, /* CCK and OFDM are disabled, clocks are gated */
+ { 0x0002, PWR_CMD_DELAY, 0, 0 },
+ { 0x0100, PWR_CMD_WRITE, 0xFF, 0x3F }, /* Reset MAC TRX */
+ { 0x0101, PWR_CMD_WRITE, BIT(1), 0 }, /* check if removed later */
+ { 0x0553, PWR_CMD_WRITE, BIT(5), BIT(5) }, /* Respond TxOK to scheduler */
+};
+
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, enum r8188eu_pwr_seq seq)
{
struct wl_pwr_cfg pwrcfgcmd = {0};
+ struct wl_pwr_cfg *pwrseqcmd;
u8 poll_bit = false;
- u32 aryidx = 0;
+ u8 idx, num_steps;
u8 value = 0;
u32 offset = 0;
u32 poll_count = 0; /* polling autoload done. */
u32 max_poll_count = 5000;
+ int res;
+
+ switch (seq) {
+ case PWR_ON_FLOW:
+ pwrseqcmd = rtl8188E_power_on_flow;
+ num_steps = ARRAY_SIZE(rtl8188E_power_on_flow);
+ break;
+ case DISABLE_FLOW:
+ pwrseqcmd = rtl8188E_card_disable_flow;
+ num_steps = ARRAY_SIZE(rtl8188E_card_disable_flow);
+ break;
+ case LPS_ENTER_FLOW:
+ pwrseqcmd = rtl8188E_enter_lps_flow;
+ num_steps = ARRAY_SIZE(rtl8188E_enter_lps_flow);
+ break;
+ default:
+ return false;
+ }
- do {
- pwrcfgcmd = pwrseqcmd[aryidx];
+ for (idx = 0; idx < num_steps; idx++) {
+ pwrcfgcmd = pwrseqcmd[idx];
switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
case PWR_CMD_WRITE:
offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
/* Read the value from system register */
- value = rtw_read8(padapter, offset);
+ res = rtw_read8(padapter, offset, &value);
+ if (res)
+ return false;
value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd));
value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd));
@@ -33,7 +124,9 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[])
poll_bit = false;
offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
do {
- value = rtw_read8(padapter, offset);
+ res = rtw_read8(padapter, offset, &value);
+ if (res)
+ return false;
value &= GET_PWR_CFG_MASK(pwrcfgcmd);
if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
@@ -46,20 +139,11 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[])
} while (!poll_bit);
break;
case PWR_CMD_DELAY:
- if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
- udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
- else
- udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd) * 1000);
- break;
- case PWR_CMD_END:
- /* When this command is parsed, end the process */
- return true;
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
break;
default:
break;
}
-
- aryidx++;/* Add Array Index */
- } while (1);
+ }
return true;
}
diff --git a/drivers/staging/r8188eu/hal/hal_com.c b/drivers/staging/r8188eu/hal/hal_com.c
index 910cc07f656c..6a1cdc67335b 100644
--- a/drivers/staging/r8188eu/hal/hal_com.c
+++ b/drivers/staging/r8188eu/hal/hal_com.c
@@ -10,45 +10,6 @@
#define _HAL_INIT_C_
-void dump_chip_info(struct HAL_VERSION chip_vers)
-{
- uint cnt = 0;
- char buf[128];
-
- cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_");
- cnt += sprintf((buf + cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ?
- "Normal_Chip" : "Test_Chip");
- cnt += sprintf((buf + cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ?
- "TSMC" : "UMC");
-
- switch (chip_vers.CUTVersion) {
- case A_CUT_VERSION:
- cnt += sprintf((buf + cnt), "A_CUT_");
- break;
- case B_CUT_VERSION:
- cnt += sprintf((buf + cnt), "B_CUT_");
- break;
- case C_CUT_VERSION:
- cnt += sprintf((buf + cnt), "C_CUT_");
- break;
- case D_CUT_VERSION:
- cnt += sprintf((buf + cnt), "D_CUT_");
- break;
- case E_CUT_VERSION:
- cnt += sprintf((buf + cnt), "E_CUT_");
- break;
- default:
- cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_", chip_vers.CUTVersion);
- break;
- }
-
- cnt += sprintf((buf + cnt), "1T1R_");
-
- cnt += sprintf((buf + cnt), "RomVer(%d)\n", 0);
-
- pr_info("%s", buf);
-}
-
#define CHAN_PLAN_HW 0x80
u8 /* return the final channel plan decision */
@@ -303,7 +264,9 @@ s32 c2h_evt_read(struct adapter *adapter, u8 *buf)
if (!buf)
goto exit;
- trigger = rtw_read8(adapter, REG_C2HEVT_CLEAR);
+ ret = rtw_read8(adapter, REG_C2HEVT_CLEAR, &trigger);
+ if (ret)
+ return _FAIL;
if (trigger == C2H_EVT_HOST_CLOSE)
goto exit; /* Not ready */
@@ -314,13 +277,26 @@ s32 c2h_evt_read(struct adapter *adapter, u8 *buf)
memset(c2h_evt, 0, 16);
- *buf = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL);
- *(buf + 1) = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1);
+ ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL, buf);
+ if (ret) {
+ ret = _FAIL;
+ goto clear_evt;
+ }
+ ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1, buf + 1);
+ if (ret) {
+ ret = _FAIL;
+ goto clear_evt;
+ }
/* Read the content */
- for (i = 0; i < c2h_evt->plen; i++)
- c2h_evt->payload[i] = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL +
- sizeof(*c2h_evt) + i);
+ for (i = 0; i < c2h_evt->plen; i++) {
+ ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL +
+ sizeof(*c2h_evt) + i, c2h_evt->payload + i);
+ if (ret) {
+ ret = _FAIL;
+ goto clear_evt;
+ }
+ }
ret = _SUCCESS;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
index 475650dc7301..b01ee1695fee 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
@@ -18,13 +18,18 @@
static u8 _is_fw_read_cmd_down(struct adapter *adapt, u8 msgbox_num)
{
- u8 read_down = false;
+ u8 read_down = false, reg;
int retry_cnts = 100;
+ int res;
u8 valid;
do {
- valid = rtw_read8(adapt, REG_HMETFR) & BIT(msgbox_num);
+ res = rtw_read8(adapt, REG_HMETFR, &reg);
+ if (res)
+ continue;
+
+ valid = reg & BIT(msgbox_num);
if (0 == valid)
read_down = true;
} while ((!read_down) && (retry_cnts--));
@@ -533,6 +538,8 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
bool bcn_valid = false;
u8 DLBcnCount = 0;
u32 poll = 0;
+ u8 reg;
+ int res;
if (mstatus == 1) {
/* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
@@ -547,8 +554,17 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
/* Disable Hw protection for a time which revserd for Hw sending beacon. */
/* Fix download reserved page packet fail that access collision with the protection time. */
/* 2010.05.11. Added by tynli. */
- rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL) & (~BIT(3)));
- rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL) | BIT(4));
+ res = rtw_read8(adapt, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapt, REG_BCN_CTRL, reg & (~BIT(3)));
+
+ res = rtw_read8(adapt, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapt, REG_BCN_CTRL, reg | BIT(4));
if (haldata->RegFwHwTxQCtrl & BIT(6))
bSendBeacon = true;
@@ -581,8 +597,17 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
/* */
/* Enable Bcn */
- rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL) | BIT(3));
- rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL) & (~BIT(4)));
+ res = rtw_read8(adapt, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapt, REG_BCN_CTRL, reg | BIT(3));
+
+ res = rtw_read8(adapt, REG_BCN_CTRL, &reg);
+ if (res)
+ return;
+
+ rtw_write8(adapt, REG_BCN_CTRL, reg & (~BIT(4)));
/* To make sure that if there exists an adapter which would like to send beacon. */
/* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_dm.c b/drivers/staging/r8188eu/hal/rtl8188e_dm.c
index 6d28e3dc0d26..0399872c4546 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_dm.c
@@ -12,8 +12,12 @@
static void dm_InitGPIOSetting(struct adapter *Adapter)
{
u8 tmp1byte;
+ int res;
+
+ res = rtw_read8(Adapter, REG_GPIO_MUXCFG, &tmp1byte);
+ if (res)
+ return;
- tmp1byte = rtw_read8(Adapter, REG_GPIO_MUXCFG);
tmp1byte &= (GPIOSEL_GPIO | ~GPIOSEL_ENBT);
rtw_write8(Adapter, REG_GPIO_MUXCFG, tmp1byte);
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
index e17375a74f17..5b8f1a912bbb 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
@@ -13,10 +13,14 @@
static void iol_mode_enable(struct adapter *padapter, u8 enable)
{
u8 reg_0xf0 = 0;
+ int res;
if (enable) {
/* Enable initial offload */
- reg_0xf0 = rtw_read8(padapter, REG_SYS_CFG);
+ res = rtw_read8(padapter, REG_SYS_CFG, &reg_0xf0);
+ if (res)
+ return;
+
rtw_write8(padapter, REG_SYS_CFG, reg_0xf0 | SW_OFFLOAD_EN);
if (!padapter->bFWReady)
@@ -24,7 +28,10 @@ static void iol_mode_enable(struct adapter *padapter, u8 enable)
} else {
/* disable initial offload */
- reg_0xf0 = rtw_read8(padapter, REG_SYS_CFG);
+ res = rtw_read8(padapter, REG_SYS_CFG, &reg_0xf0);
+ if (res)
+ return;
+
rtw_write8(padapter, REG_SYS_CFG, reg_0xf0 & ~SW_OFFLOAD_EN);
}
}
@@ -34,17 +41,31 @@ static s32 iol_execute(struct adapter *padapter, u8 control)
s32 status = _FAIL;
u8 reg_0x88 = 0;
unsigned long timeout;
+ int res;
control = control & 0x0f;
- reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
+ res = rtw_read8(padapter, REG_HMEBOX_E0, &reg_0x88);
+ if (res)
+ return _FAIL;
+
rtw_write8(padapter, REG_HMEBOX_E0, reg_0x88 | control);
timeout = jiffies + msecs_to_jiffies(1000);
- while ((reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0)) & control &&
- time_before(jiffies, timeout))
- ;
- reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
+ do {
+ res = rtw_read8(padapter, REG_HMEBOX_E0, &reg_0x88);
+ if (res)
+ continue;
+
+ if (!(reg_0x88 & control))
+ break;
+
+ } while (time_before(jiffies, timeout));
+
+ res = rtw_read8(padapter, REG_HMEBOX_E0, &reg_0x88);
+ if (res)
+ return _FAIL;
+
status = (reg_0x88 & control) ? _FAIL : _SUCCESS;
if (reg_0x88 & control << 4)
status = _FAIL;
@@ -62,7 +83,7 @@ static s32 iol_InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
}
static void
-efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
+efuse_phymap_to_logical(u8 *phymap, u16 _size_byte, u8 *pbuf)
{
u8 *efuseTbl = NULL;
u8 rtemp8;
@@ -70,7 +91,6 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
u8 offset, wren;
u16 i, j;
u16 **eFuseWord = NULL;
- u16 efuse_utilized = 0;
u8 u1temp = 0;
efuseTbl = kzalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
@@ -92,7 +112,6 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
/* */
rtemp8 = *(phymap + eFuse_Addr);
if (rtemp8 != 0xFF) {
- efuse_utilized++;
eFuse_Addr++;
} else {
goto exit;
@@ -130,13 +149,11 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
if (!(wren & 0x01)) {
rtemp8 = *(phymap + eFuse_Addr);
eFuse_Addr++;
- efuse_utilized++;
eFuseWord[offset][i] = (rtemp8 & 0xff);
if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
break;
rtemp8 = *(phymap + eFuse_Addr);
eFuse_Addr++;
- efuse_utilized++;
eFuseWord[offset][i] |= (((u16)rtemp8 << 8) & 0xff00);
if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
@@ -149,7 +166,6 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
rtemp8 = *(phymap + eFuse_Addr);
if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
- efuse_utilized++;
eFuse_Addr++;
}
}
@@ -167,59 +183,70 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
/* */
/* 4. Copy from Efuse map to output pointer memory!!! */
/* */
- for (i = 0; i < _size_byte; i++)
- pbuf[i] = efuseTbl[_offset + i];
-
- /* */
- /* 5. Calculate Efuse utilization. */
- /* */
+ memcpy(pbuf, efuseTbl, _size_byte);
exit:
kfree(efuseTbl);
kfree(eFuseWord);
}
-static void efuse_read_phymap_from_txpktbuf(
+/* FIXME: add error handling in callers */
+static int efuse_read_phymap_from_txpktbuf(
struct adapter *adapter,
- int bcnhead, /* beacon head, where FW store len(2-byte) and efuse physical map. */
u8 *content, /* buffer to store efuse physical map */
u16 *size /* for efuse content: the max byte to read. will update to byte read */
)
{
unsigned long timeout;
- u16 dbg_addr = 0;
__le32 lo32 = 0, hi32 = 0;
u16 len = 0, count = 0;
- int i = 0;
+ int i = 0, res;
u16 limit = *size;
-
+ u8 reg;
u8 *pos = content;
-
- if (bcnhead < 0) /* if not valid */
- bcnhead = rtw_read8(adapter, REG_TDECTRL + 1);
+ u32 reg32;
rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
- dbg_addr = bcnhead * 128 / 8; /* 8-bytes addressing */
-
while (1) {
- rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr + i);
+ rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, i);
rtw_write8(adapter, REG_TXPKTBUF_DBG, 0);
timeout = jiffies + msecs_to_jiffies(1000);
- while (!rtw_read8(adapter, REG_TXPKTBUF_DBG) && time_before(jiffies, timeout))
- rtw_usleep_os(100);
+ do {
+ res = rtw_read8(adapter, REG_TXPKTBUF_DBG, &reg);
+ if (res)
+ continue;
+
+ if (reg)
+ break;
+
+ msleep(1);
+ } while (time_before(jiffies, timeout));
/* data from EEPROM needs to be in LE */
- lo32 = cpu_to_le32(rtw_read32(adapter, REG_PKTBUF_DBG_DATA_L));
- hi32 = cpu_to_le32(rtw_read32(adapter, REG_PKTBUF_DBG_DATA_H));
+ res = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_L, &reg32);
+ if (res)
+ return res;
+
+ lo32 = cpu_to_le32(reg32);
+
+ res = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_H, &reg32);
+ if (res)
+ return res;
+
+ hi32 = cpu_to_le32(reg32);
if (i == 0) {
+ u16 reg;
+
/* Although lenc is only used in a debug statement,
* do not remove it as the rtw_read16() call consumes
* 2 bytes from the EEPROM source.
*/
- rtw_read16(adapter, REG_PKTBUF_DBG_DATA_L);
+ res = rtw_read16(adapter, REG_PKTBUF_DBG_DATA_L, &reg);
+ if (res)
+ return res;
len = le32_to_cpu(lo32) & 0x0000ffff;
@@ -246,21 +273,23 @@ static void efuse_read_phymap_from_txpktbuf(
}
rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, DISABLE_TRXPKT_BUF_ACCESS);
*size = count;
+
+ return 0;
}
-static s32 iol_read_efuse(struct adapter *padapter, u8 txpktbuf_bndy, u16 offset, u16 size_byte, u8 *logical_map)
+static s32 iol_read_efuse(struct adapter *padapter, u16 size_byte, u8 *logical_map)
{
s32 status = _FAIL;
u8 physical_map[512];
u16 size = 512;
- rtw_write8(padapter, REG_TDECTRL + 1, txpktbuf_bndy);
+ rtw_write8(padapter, REG_TDECTRL + 1, 0);
memset(physical_map, 0xFF, 512);
rtw_write8(padapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
status = iol_execute(padapter, CMD_READ_EFUSE_MAP);
if (status == _SUCCESS)
- efuse_read_phymap_from_txpktbuf(padapter, txpktbuf_bndy, physical_map, &size);
- efuse_phymap_to_logical(physical_map, offset, size_byte, logical_map);
+ efuse_read_phymap_from_txpktbuf(padapter, physical_map, &size);
+ efuse_phymap_to_logical(physical_map, size_byte, logical_map);
return status;
}
@@ -321,25 +350,35 @@ exit:
void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState)
{
u16 tmpV16;
+ int res;
if (PwrState) {
rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
/* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid */
- tmpV16 = rtw_read16(pAdapter, REG_SYS_ISO_CTRL);
+ res = rtw_read16(pAdapter, REG_SYS_ISO_CTRL, &tmpV16);
+ if (res)
+ return;
+
if (!(tmpV16 & PWC_EV12V)) {
tmpV16 |= PWC_EV12V;
rtw_write16(pAdapter, REG_SYS_ISO_CTRL, tmpV16);
}
/* Reset: 0x0000h[28], default valid */
- tmpV16 = rtw_read16(pAdapter, REG_SYS_FUNC_EN);
+ res = rtw_read16(pAdapter, REG_SYS_FUNC_EN, &tmpV16);
+ if (res)
+ return;
+
if (!(tmpV16 & FEN_ELDR)) {
tmpV16 |= FEN_ELDR;
rtw_write16(pAdapter, REG_SYS_FUNC_EN, tmpV16);
}
/* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid */
- tmpV16 = rtw_read16(pAdapter, REG_SYS_CLKR);
+ res = rtw_read16(pAdapter, REG_SYS_CLKR, &tmpV16);
+ if (res)
+ return;
+
if ((!(tmpV16 & LOADER_CLK_EN)) || (!(tmpV16 & ANA8M))) {
tmpV16 |= (LOADER_CLK_EN | ANA8M);
rtw_write16(pAdapter, REG_SYS_CLKR, tmpV16);
@@ -470,26 +509,60 @@ exit:
kfree(eFuseWord);
}
-static void ReadEFuseByIC(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf)
+void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _size_byte, u8 *pbuf)
{
int ret = _FAIL;
if (rtw_IOL_applied(Adapter)) {
rtl8188eu_InitPowerOn(Adapter);
iol_mode_enable(Adapter, 1);
- ret = iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
+ ret = iol_read_efuse(Adapter, _size_byte, pbuf);
iol_mode_enable(Adapter, 0);
if (_SUCCESS == ret)
return;
}
- Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf);
+ Hal_EfuseReadEFuse88E(Adapter, 0, _size_byte, pbuf);
}
-void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf)
+static void dump_chip_info(struct HAL_VERSION chip_vers)
{
- ReadEFuseByIC(Adapter, _offset, _size_byte, pbuf);
+ uint cnt = 0;
+ char buf[128];
+
+ cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_");
+ cnt += sprintf((buf + cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ?
+ "Normal_Chip" : "Test_Chip");
+ cnt += sprintf((buf + cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ?
+ "TSMC" : "UMC");
+
+ switch (chip_vers.CUTVersion) {
+ case A_CUT_VERSION:
+ cnt += sprintf((buf + cnt), "A_CUT_");
+ break;
+ case B_CUT_VERSION:
+ cnt += sprintf((buf + cnt), "B_CUT_");
+ break;
+ case C_CUT_VERSION:
+ cnt += sprintf((buf + cnt), "C_CUT_");
+ break;
+ case D_CUT_VERSION:
+ cnt += sprintf((buf + cnt), "D_CUT_");
+ break;
+ case E_CUT_VERSION:
+ cnt += sprintf((buf + cnt), "E_CUT_");
+ break;
+ default:
+ cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_", chip_vers.CUTVersion);
+ break;
+ }
+
+ cnt += sprintf((buf + cnt), "1T1R_");
+
+ cnt += sprintf((buf + cnt), "RomVer(%d)\n", 0);
+
+ pr_info("%s", buf);
}
void rtl8188e_read_chip_version(struct adapter *padapter)
@@ -497,8 +570,12 @@ void rtl8188e_read_chip_version(struct adapter *padapter)
u32 value32;
struct HAL_VERSION ChipVersion;
struct hal_data_8188e *pHalData = &padapter->haldata;
+ int res;
+
+ res = rtw_read32(padapter, REG_SYS_CFG, &value32);
+ if (res)
+ return;
- value32 = rtw_read32(padapter, REG_SYS_CFG);
ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
@@ -525,10 +602,17 @@ void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet)
void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
{
+ int res;
+ u8 reg;
+
+ res = rtw_read8(adapter, rOFDM0_RxDSP + 1, &reg);
+ if (res)
+ return;
+
if (enable)
- rtw_write8(adapter, rOFDM0_RxDSP + 1, rtw_read8(adapter, rOFDM0_RxDSP + 1) | BIT(1));
+ rtw_write8(adapter, rOFDM0_RxDSP + 1, reg | BIT(1));
else
- rtw_write8(adapter, rOFDM0_RxDSP + 1, rtw_read8(adapter, rOFDM0_RxDSP + 1) & ~BIT(1));
+ rtw_write8(adapter, rOFDM0_RxDSP + 1, reg & ~BIT(1));
}
/* */
@@ -538,26 +622,24 @@ void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
/* */
static s32 _LLTWrite(struct adapter *padapter, u32 address, u32 data)
{
- s32 status = _SUCCESS;
- s32 count = 0;
+ s32 count;
u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
u16 LLTReg = REG_LLT_INIT;
+ int res;
rtw_write32(padapter, LLTReg, value);
/* polling */
- do {
- value = rtw_read32(padapter, LLTReg);
- if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
- break;
+ for (count = 0; count <= POLLING_LLT_THRESHOLD; count++) {
+ res = rtw_read32(padapter, LLTReg, &value);
+ if (res)
+ continue;
- if (count > POLLING_LLT_THRESHOLD) {
- status = _FAIL;
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
break;
- }
- } while (count++);
+ }
- return status;
+ return count > POLLING_LLT_THRESHOLD ? _FAIL : _SUCCESS;
}
s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
index 4864dafd887b..dea6d915a1f4 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
@@ -56,8 +56,12 @@ rtl8188e_PHY_QueryBBReg(
)
{
u32 ReturnValue = 0, OriginalValue, BitShift;
+ int res;
+
+ res = rtw_read32(Adapter, RegAddr, &OriginalValue);
+ if (res)
+ return 0;
- OriginalValue = rtw_read32(Adapter, RegAddr);
BitShift = phy_CalculateBitShift(BitMask);
ReturnValue = (OriginalValue & BitMask) >> BitShift;
return ReturnValue;
@@ -84,9 +88,13 @@ rtl8188e_PHY_QueryBBReg(
void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
{
u32 OriginalValue, BitShift;
+ int res;
if (BitMask != bMaskDWord) { /* if not "double word" write */
- OriginalValue = rtw_read32(Adapter, RegAddr);
+ res = rtw_read32(Adapter, RegAddr, &OriginalValue);
+ if (res)
+ return;
+
BitShift = phy_CalculateBitShift(BitMask);
Data = ((OriginalValue & (~BitMask)) | (Data << BitShift));
}
@@ -484,13 +492,17 @@ PHY_BBConfig8188E(
{
int rtStatus = _SUCCESS;
struct hal_data_8188e *pHalData = &Adapter->haldata;
- u32 RegVal;
+ u16 RegVal;
u8 CrystalCap;
+ int res;
phy_InitBBRFRegisterDefinition(Adapter);
/* Enable BB and RF */
- RegVal = rtw_read16(Adapter, REG_SYS_FUNC_EN);
+ res = rtw_read16(Adapter, REG_SYS_FUNC_EN, &RegVal);
+ if (res)
+ return _FAIL;
+
rtw_write16(Adapter, REG_SYS_FUNC_EN, (u16)(RegVal | BIT(13) | BIT(0) | BIT(1)));
/* 20090923 Joseph: Advised by Steven and Jenyu. Power sequence before init RF. */
@@ -594,6 +606,7 @@ _PHY_SetBWMode92C(
struct hal_data_8188e *pHalData = &Adapter->haldata;
u8 regBwOpMode;
u8 regRRSR_RSC;
+ int res;
if (Adapter->bDriverStopped)
return;
@@ -602,8 +615,13 @@ _PHY_SetBWMode92C(
/* 3<1>Set MAC register */
/* 3 */
- regBwOpMode = rtw_read8(Adapter, REG_BWOPMODE);
- regRRSR_RSC = rtw_read8(Adapter, REG_RRSR + 2);
+ res = rtw_read8(Adapter, REG_BWOPMODE, &regBwOpMode);
+ if (res)
+ return;
+
+ res = rtw_read8(Adapter, REG_RRSR + 2, &regRRSR_RSC);
+ if (res)
+ return;
switch (pHalData->CurrentChannelBW) {
case HT_CHANNEL_WIDTH_20:
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c b/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
index 727e1adce1dc..def6d0d6e402 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
@@ -32,7 +32,7 @@ int rtl8188eu_init_recv_priv(struct adapter *padapter)
goto exit;
}
- precvpriv->precv_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_recv_buf), 4);
+ precvpriv->precv_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_recv_buf), 4);
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
index a217272a07f8..ff074d246dab 100644
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ b/drivers/staging/r8188eu/hal/usb_halinit.c
@@ -11,7 +11,7 @@
#include "../include/rtw_iol.h"
#include "../include/usb_ops.h"
#include "../include/usb_osintf.h"
-#include "../include/Hal8188EPwrSeq.h"
+#include "../include/HalPwrSeqCmd.h"
static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
{
@@ -52,12 +52,14 @@ void rtl8188eu_interface_configure(struct adapter *adapt)
u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
{
u16 value16;
+ int res;
+
/* HW Power on sequence */
struct hal_data_8188e *haldata = &adapt->haldata;
if (haldata->bMacPwrCtrlOn)
return _SUCCESS;
- if (!HalPwrSeqCmdParsing(adapt, Rtl8188E_NIC_PWR_ON_FLOW))
+ if (!HalPwrSeqCmdParsing(adapt, PWR_ON_FLOW))
return _FAIL;
/* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
@@ -65,7 +67,10 @@ u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
rtw_write16(adapt, REG_CR, 0x00); /* suggseted by zhouzhou, by page, 20111230 */
/* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
- value16 = rtw_read16(adapt, REG_CR);
+ res = rtw_read16(adapt, REG_CR, &value16);
+ if (res)
+ return _FAIL;
+
value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN
| PROTOCOL_EN | SCHEDULE_EN | ENSEC | CALTMR_EN);
/* for SDIO - Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */
@@ -81,6 +86,7 @@ static void _InitInterrupt(struct adapter *Adapter)
{
u32 imr, imr_ex;
u8 usb_opt;
+ int res;
/* HISR write one to clear */
rtw_write32(Adapter, REG_HISR_88E, 0xFFFFFFFF);
@@ -94,7 +100,9 @@ static void _InitInterrupt(struct adapter *Adapter)
/* REG_USB_SPECIAL_OPTION - BIT(4) */
/* 0; Use interrupt endpoint to upload interrupt pkt */
/* 1; Use bulk endpoint to upload interrupt pkt, */
- usb_opt = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION);
+ res = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION, &usb_opt);
+ if (res)
+ return;
if (adapter_to_dvobj(Adapter)->pusbdev->speed == USB_SPEED_HIGH)
usb_opt = usb_opt | (INT_BULK_SEL);
@@ -163,7 +171,14 @@ static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ,
u16 bkQ, u16 viQ, u16 voQ, u16 mgtQ,
u16 hiQ)
{
- u16 value16 = (rtw_read16(Adapter, REG_TRXDMA_CTRL) & 0x7);
+ u16 value16;
+ int res;
+
+ res = rtw_read16(Adapter, REG_TRXDMA_CTRL, &value16);
+ if (res)
+ return;
+
+ value16 &= 0x7;
value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
_TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
@@ -282,8 +297,12 @@ static void _InitQueuePriority(struct adapter *Adapter)
static void _InitNetworkType(struct adapter *Adapter)
{
u32 value32;
+ int res;
+
+ res = rtw_read32(Adapter, REG_CR, &value32);
+ if (res)
+ return;
- value32 = rtw_read32(Adapter, REG_CR);
/* TODO: use the other function to set network type */
value32 = (value32 & ~MASK_NETTYPE) | _NETTYPE(NT_LINK_AP);
@@ -323,9 +342,13 @@ static void _InitAdaptiveCtrl(struct adapter *Adapter)
{
u16 value16;
u32 value32;
+ int res;
/* Response Rate Set */
- value32 = rtw_read32(Adapter, REG_RRSR);
+ res = rtw_read32(Adapter, REG_RRSR, &value32);
+ if (res)
+ return;
+
value32 &= ~RATE_BITMAP_ALL;
value32 |= RATE_RRSR_CCK_ONLY_1M;
rtw_write32(Adapter, REG_RRSR, value32);
@@ -363,8 +386,12 @@ static void _InitEDCA(struct adapter *Adapter)
static void _InitRetryFunction(struct adapter *Adapter)
{
u8 value8;
+ int res;
+
+ res = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL, &value8);
+ if (res)
+ return;
- value8 = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL);
value8 |= EN_AMPDU_RTY_NEW;
rtw_write8(Adapter, REG_FWHW_TXQ_CTRL, value8);
@@ -390,11 +417,15 @@ static void _InitRetryFunction(struct adapter *Adapter)
static void usb_AggSettingTxUpdate(struct adapter *Adapter)
{
u32 value32;
+ int res;
if (Adapter->registrypriv.wifi_spec)
return;
- value32 = rtw_read32(Adapter, REG_TDECTRL);
+ res = rtw_read32(Adapter, REG_TDECTRL, &value32);
+ if (res)
+ return;
+
value32 = value32 & ~(BLK_DESC_NUM_MASK << BLK_DESC_NUM_SHIFT);
value32 |= ((USB_TXAGG_DESC_NUM & BLK_DESC_NUM_MASK) << BLK_DESC_NUM_SHIFT);
@@ -423,9 +454,15 @@ usb_AggSettingRxUpdate(
{
u8 valueDMA;
u8 valueUSB;
+ int res;
- valueDMA = rtw_read8(Adapter, REG_TRXDMA_CTRL);
- valueUSB = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION);
+ res = rtw_read8(Adapter, REG_TRXDMA_CTRL, &valueDMA);
+ if (res)
+ return;
+
+ res = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION, &valueUSB);
+ if (res)
+ return;
valueDMA |= RXDMA_AGG_EN;
valueUSB &= ~USB_AGG_EN;
@@ -446,9 +483,11 @@ static void InitUsbAggregationSetting(struct adapter *Adapter)
usb_AggSettingRxUpdate(Adapter);
}
-static void _InitBeaconParameters(struct adapter *Adapter)
+/* FIXME: add error handling in callers */
+static int _InitBeaconParameters(struct adapter *Adapter)
{
struct hal_data_8188e *haldata = &Adapter->haldata;
+ int res;
rtw_write16(Adapter, REG_BCN_CTRL, 0x1010);
@@ -461,9 +500,19 @@ static void _InitBeaconParameters(struct adapter *Adapter)
/* beacause test chip does not contension before sending beacon. by tynli. 2009.11.03 */
rtw_write16(Adapter, REG_BCNTCFG, 0x660F);
- haldata->RegFwHwTxQCtrl = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL + 2);
- haldata->RegReg542 = rtw_read8(Adapter, REG_TBTT_PROHIBIT + 2);
- haldata->RegCR_1 = rtw_read8(Adapter, REG_CR + 1);
+ res = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL + 2, &haldata->RegFwHwTxQCtrl);
+ if (res)
+ return res;
+
+ res = rtw_read8(Adapter, REG_TBTT_PROHIBIT + 2, &haldata->RegReg542);
+ if (res)
+ return res;
+
+ res = rtw_read8(Adapter, REG_CR + 1, &haldata->RegCR_1);
+ if (res)
+ return res;
+
+ return 0;
}
static void _BeaconFunctionEnable(struct adapter *Adapter,
@@ -484,11 +533,17 @@ static void _BBTurnOnBlock(struct adapter *Adapter)
static void _InitAntenna_Selection(struct adapter *Adapter)
{
struct hal_data_8188e *haldata = &Adapter->haldata;
+ int res;
+ u32 reg;
if (haldata->AntDivCfg == 0)
return;
- rtw_write32(Adapter, REG_LEDCFG0, rtw_read32(Adapter, REG_LEDCFG0) | BIT(23));
+ res = rtw_read32(Adapter, REG_LEDCFG0, &reg);
+ if (res)
+ return;
+
+ rtw_write32(Adapter, REG_LEDCFG0, reg | BIT(23));
rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, BIT(13), 0x01);
if (rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, 0x300) == Antenna_A)
@@ -514,9 +569,11 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
u16 value16;
u8 txpktbuf_bndy;
u32 status = _SUCCESS;
+ int res;
struct hal_data_8188e *haldata = &Adapter->haldata;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u32 reg;
if (Adapter->pwrctrlpriv.bkeepfwalive) {
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
@@ -614,13 +671,19 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* Hw bug which Hw initials RxFF boundary size to a value which is larger than the real Rx buffer size in 88E. */
/* */
/* Enable MACTXEN/MACRXEN block */
- value16 = rtw_read16(Adapter, REG_CR);
+ res = rtw_read16(Adapter, REG_CR, &value16);
+ if (res)
+ return _FAIL;
+
value16 |= (MACTXEN | MACRXEN);
rtw_write8(Adapter, REG_CR, value16);
/* Enable TX Report */
/* Enable Tx Report Timer */
- value8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
+ res = rtw_read8(Adapter, REG_TX_RPT_CTRL, &value8);
+ if (res)
+ return _FAIL;
+
rtw_write8(Adapter, REG_TX_RPT_CTRL, (value8 | BIT(1) | BIT(0)));
/* Set MAX RPT MACID */
rtw_write8(Adapter, REG_TX_RPT_CTRL + 1, 2);/* FOR sta mode ,0: bc/mc ,1:AP */
@@ -684,7 +747,11 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
rtw_write16(Adapter, REG_TX_RPT_TIME, 0x3DF0);
/* enable tx DMA to drop the redundate data of packet */
- rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK) | DROP_DATA_EN));
+ res = rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK, &value16);
+ if (res)
+ return _FAIL;
+
+ rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (value16 | DROP_DATA_EN));
/* 2010/08/26 MH Merge from 8192CE. */
if (pwrctrlpriv->rf_pwrstate == rf_on) {
@@ -704,7 +771,11 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
rtw_write8(Adapter, REG_USB_HRPWM, 0);
/* ack for xmit mgmt frames. */
- rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, rtw_read32(Adapter, REG_FWHW_TXQ_CTRL) | BIT(12));
+ res = rtw_read32(Adapter, REG_FWHW_TXQ_CTRL, &reg);
+ if (res)
+ return _FAIL;
+
+ rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, reg | BIT(12));
exit:
return status;
@@ -714,23 +785,33 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
{
u8 val8;
struct hal_data_8188e *haldata = &Adapter->haldata;
+ int res;
/* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
- val8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
+ res = rtw_read8(Adapter, REG_TX_RPT_CTRL, &val8);
+ if (res)
+ return;
+
rtw_write8(Adapter, REG_TX_RPT_CTRL, val8 & (~BIT(1)));
/* stop rx */
rtw_write8(Adapter, REG_CR, 0x0);
/* Run LPS WL RFOFF flow */
- HalPwrSeqCmdParsing(Adapter, Rtl8188E_NIC_LPS_ENTER_FLOW);
+ HalPwrSeqCmdParsing(Adapter, LPS_ENTER_FLOW);
/* 2. 0x1F[7:0] = 0 turn off RF */
- val8 = rtw_read8(Adapter, REG_MCUFWDL);
+ res = rtw_read8(Adapter, REG_MCUFWDL, &val8);
+ if (res)
+ return;
+
if ((val8 & RAM_DL_SEL) && Adapter->bFWReady) { /* 8051 RAM code */
/* Reset MCU 0x2[10]=0. */
- val8 = rtw_read8(Adapter, REG_SYS_FUNC_EN + 1);
+ res = rtw_read8(Adapter, REG_SYS_FUNC_EN + 1, &val8);
+ if (res)
+ return;
+
val8 &= ~BIT(2); /* 0x2[10], FEN_CPUEN */
rtw_write8(Adapter, REG_SYS_FUNC_EN + 1, val8);
}
@@ -740,26 +821,45 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
/* YJ,add,111212 */
/* Disable 32k */
- val8 = rtw_read8(Adapter, REG_32K_CTRL);
+ res = rtw_read8(Adapter, REG_32K_CTRL, &val8);
+ if (res)
+ return;
+
rtw_write8(Adapter, REG_32K_CTRL, val8 & (~BIT(0)));
/* Card disable power action flow */
- HalPwrSeqCmdParsing(Adapter, Rtl8188E_NIC_DISABLE_FLOW);
+ HalPwrSeqCmdParsing(Adapter, DISABLE_FLOW);
/* Reset MCU IO Wrapper */
- val8 = rtw_read8(Adapter, REG_RSV_CTRL + 1);
+ res = rtw_read8(Adapter, REG_RSV_CTRL + 1, &val8);
+ if (res)
+ return;
+
rtw_write8(Adapter, REG_RSV_CTRL + 1, (val8 & (~BIT(3))));
- val8 = rtw_read8(Adapter, REG_RSV_CTRL + 1);
+
+ res = rtw_read8(Adapter, REG_RSV_CTRL + 1, &val8);
+ if (res)
+ return;
+
rtw_write8(Adapter, REG_RSV_CTRL + 1, val8 | BIT(3));
/* YJ,test add, 111207. For Power Consumption. */
- val8 = rtw_read8(Adapter, GPIO_IN);
+ res = rtw_read8(Adapter, GPIO_IN, &val8);
+ if (res)
+ return;
+
rtw_write8(Adapter, GPIO_OUT, val8);
rtw_write8(Adapter, GPIO_IO_SEL, 0xFF);/* Reg0x46 */
- val8 = rtw_read8(Adapter, REG_GPIO_IO_SEL);
+ res = rtw_read8(Adapter, REG_GPIO_IO_SEL, &val8);
+ if (res)
+ return;
+
rtw_write8(Adapter, REG_GPIO_IO_SEL, (val8 << 4));
- val8 = rtw_read8(Adapter, REG_GPIO_IO_SEL + 1);
+ res = rtw_read8(Adapter, REG_GPIO_IO_SEL + 1, &val8);
+ if (res)
+ return;
+
rtw_write8(Adapter, REG_GPIO_IO_SEL + 1, val8 | 0x0F);/* Reg0x43 */
rtw_write32(Adapter, REG_BB_PAD_CTRL, 0x00080808);/* set LNA ,TRSW,EX_PA Pin to output mode */
haldata->bMacPwrCtrlOn = false;
@@ -812,13 +912,10 @@ exit:
static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
{
- u16 i;
- u8 sMacAddr[6] = {0x00, 0xE0, 0x4C, 0x81, 0x88, 0x02};
struct eeprom_priv *eeprom = &adapt->eeprompriv;
if (AutoLoadFail) {
- for (i = 0; i < 6; i++)
- eeprom->mac_addr[i] = sMacAddr[i];
+ eth_random_addr(eeprom->mac_addr);
} else {
/* Read Permanent MAC address */
memcpy(eeprom->mac_addr, &hwinfo[EEPROM_MAC_ADDR_88EU], ETH_ALEN);
@@ -829,285 +926,41 @@ void ReadAdapterInfo8188EU(struct adapter *Adapter)
{
struct eeprom_priv *eeprom = &Adapter->eeprompriv;
struct led_priv *ledpriv = &Adapter->ledpriv;
+ u8 *efuse_buf;
u8 eeValue;
+ int res;
/* check system boot selection */
- eeValue = rtw_read8(Adapter, REG_9346CR);
- eeprom->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM);
- eeprom->bautoload_fail_flag = !(eeValue & EEPROM_EN);
-
- if (!is_boot_from_eeprom(Adapter))
- EFUSE_ShadowMapUpdate(Adapter);
-
- /* parse the eeprom/efuse content */
- Hal_EfuseParseIDCode88E(Adapter, eeprom->efuse_eeprom_data);
- Hal_EfuseParseMACAddr_8188EU(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-
- Hal_ReadPowerSavingMode88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_ReadTxPowerInfo88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- rtl8188e_EfuseParseChnlPlan(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseXtal_8188E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_ReadAntennaDiversity88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_ReadThermalMeter_88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-
- ledpriv->bRegUseLed = true;
-}
-
-static void ResumeTxBeacon(struct adapter *adapt)
-{
- struct hal_data_8188e *haldata = &adapt->haldata;
-
- /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
- /* which should be read from register to a global variable. */
-
- rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) | BIT(6));
- haldata->RegFwHwTxQCtrl |= BIT(6);
- rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0xff);
- haldata->RegReg542 |= BIT(0);
- rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542);
-}
-
-static void StopTxBeacon(struct adapter *adapt)
-{
- struct hal_data_8188e *haldata = &adapt->haldata;
-
- /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
- /* which should be read from register to a global variable. */
-
- rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) & (~BIT(6)));
- haldata->RegFwHwTxQCtrl &= (~BIT(6));
- rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0x64);
- haldata->RegReg542 &= ~(BIT(0));
- rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542);
-
- /* todo: CheckFwRsvdPageContent(Adapter); 2010.06.23. Added by tynli. */
-}
-
-static void hw_var_set_opmode(struct adapter *Adapter, u8 *val)
-{
- u8 val8;
- u8 mode = *((u8 *)val);
-
- /* disable Port0 TSF update */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) | BIT(4));
-
- /* set net_type */
- val8 = rtw_read8(Adapter, MSR) & 0x0c;
- val8 |= mode;
- rtw_write8(Adapter, MSR, val8);
-
- if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) {
- StopTxBeacon(Adapter);
-
- rtw_write8(Adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */
- } else if (mode == _HW_STATE_ADHOC_) {
- ResumeTxBeacon(Adapter);
- rtw_write8(Adapter, REG_BCN_CTRL, 0x1a);
- } else if (mode == _HW_STATE_AP_) {
- ResumeTxBeacon(Adapter);
-
- rtw_write8(Adapter, REG_BCN_CTRL, 0x12);
-
- /* Set RCR */
- rtw_write32(Adapter, REG_RCR, 0x7000208e);/* CBSSID_DATA must set to 0,reject ICV_ERR packet */
- /* enable to rx data frame */
- rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
- /* enable to rx ps-poll */
- rtw_write16(Adapter, REG_RXFLTMAP1, 0x0400);
-
- /* Beacon Control related register for first time */
- rtw_write8(Adapter, REG_BCNDMATIM, 0x02); /* 2ms */
-
- rtw_write8(Adapter, REG_ATIMWND, 0x0a); /* 10ms */
- rtw_write16(Adapter, REG_BCNTCFG, 0x00);
- rtw_write16(Adapter, REG_TBTT_PROHIBIT, 0xff04);
- rtw_write16(Adapter, REG_TSFTR_SYN_OFFSET, 0x7fff);/* +32767 (~32ms) */
-
- /* reset TSF */
- rtw_write8(Adapter, REG_DUAL_TSF_RST, BIT(0));
+ res = rtw_read8(Adapter, REG_9346CR, &eeValue);
+ if (res)
+ return;
- /* BIT(3) - If set 0, hw will clr bcnq when tx becon ok/fail or port 0 */
- rtw_write8(Adapter, REG_MBID_NUM, rtw_read8(Adapter, REG_MBID_NUM) | BIT(3) | BIT(4));
+ eeprom->bautoload_fail_flag = !(eeValue & EEPROM_EN);
- /* enable BCN0 Function for if1 */
- /* don't enable update TSF0 for if1 (due to TSF update when beacon/probe rsp are received) */
- rtw_write8(Adapter, REG_BCN_CTRL, (DIS_TSF_UDT0_NORMAL_CHIP | EN_BCN_FUNCTION | BIT(1)));
+ efuse_buf = kmalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
+ if (!efuse_buf)
+ return;
+ memset(efuse_buf, 0xFF, EFUSE_MAP_LEN_88E);
- /* dis BCN1 ATIM WND if if2 is station */
- rtw_write8(Adapter, REG_BCN_CTRL_1, rtw_read8(Adapter, REG_BCN_CTRL_1) | BIT(0));
+ if (!(eeValue & BOOT_FROM_EEPROM) && !eeprom->bautoload_fail_flag) {
+ rtl8188e_EfusePowerSwitch(Adapter, true);
+ rtl8188e_ReadEFuse(Adapter, EFUSE_MAP_LEN_88E, efuse_buf);
+ rtl8188e_EfusePowerSwitch(Adapter, false);
}
-}
-
-void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
- struct dm_priv *pdmpriv = &haldata->dmpriv;
- struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
- switch (variable) {
- case HW_VAR_SET_OPMODE:
- hw_var_set_opmode(Adapter, val);
- break;
- case HW_VAR_BASIC_RATE:
- {
- u16 BrateCfg = 0;
- u8 RateIndex = 0;
-
- /* 2007.01.16, by Emily */
- /* Select RRSR (in Legacy-OFDM and CCK) */
- /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M, and 1M from the Basic rate. */
- /* We do not use other rates. */
- HalSetBrateCfg(Adapter, val, &BrateCfg);
-
- /* 2011.03.30 add by Luke Lee */
- /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */
- /* because CCK 2M has poor TXEVM */
- /* CCK 5.5M & 11M ACK should be enabled for better performance */
-
- BrateCfg = (BrateCfg | 0xd) & 0x15d;
-
- BrateCfg |= 0x01; /* default enable 1M ACK rate */
- /* Set RRSR rate table. */
- rtw_write8(Adapter, REG_RRSR, BrateCfg & 0xff);
- rtw_write8(Adapter, REG_RRSR + 1, (BrateCfg >> 8) & 0xff);
- rtw_write8(Adapter, REG_RRSR + 2, rtw_read8(Adapter, REG_RRSR + 2) & 0xf0);
-
- /* Set RTS initial rate */
- while (BrateCfg > 0x1) {
- BrateCfg = (BrateCfg >> 1);
- RateIndex++;
- }
- /* Ziv - Check */
- rtw_write8(Adapter, REG_INIRTS_RATE_SEL, RateIndex);
- }
- break;
- case HW_VAR_CORRECT_TSF:
- {
- u64 tsf;
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- tsf = pmlmeext->TSFValue - do_div(pmlmeext->TSFValue,
- pmlmeinfo->bcn_interval * 1024) - 1024; /* us */
- if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE))
- StopTxBeacon(Adapter);
-
- /* disable related TSF function */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) & (~BIT(3)));
-
- rtw_write32(Adapter, REG_TSFTR, tsf);
- rtw_write32(Adapter, REG_TSFTR + 4, tsf >> 32);
-
- /* enable related TSF function */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) | BIT(3));
+ /* parse the eeprom/efuse content */
+ Hal_EfuseParseIDCode88E(Adapter, efuse_buf);
+ Hal_EfuseParseMACAddr_8188EU(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
- if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE))
- ResumeTxBeacon(Adapter);
- }
- break;
- case HW_VAR_MLME_SITESURVEY:
- if (*((u8 *)val)) { /* under sitesurvey */
- /* config RCR to receive different BSSID & not to receive data frame */
- u32 v = rtw_read32(Adapter, REG_RCR);
- v &= ~(RCR_CBSSID_BCN);
- rtw_write32(Adapter, REG_RCR, v);
- /* reject all data frame */
- rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
-
- /* disable update TSF */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) | BIT(4));
- } else { /* sitesurvey done */
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if ((is_client_associated_to_ap(Adapter)) ||
- ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE)) {
- /* enable to rx data frame */
- rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
-
- /* enable update TSF */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) & (~BIT(4)));
- } else if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
- rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
- /* enable update TSF */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) & (~BIT(4)));
- }
- rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR) | RCR_CBSSID_BCN);
- }
- break;
- case HW_VAR_SLOT_TIME:
- {
- u8 u1bAIFS, aSifsTime;
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- rtw_write8(Adapter, REG_SLOT, val[0]);
-
- if (pmlmeinfo->WMM_enable == 0) {
- if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
- aSifsTime = 10;
- else
- aSifsTime = 16;
-
- u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime);
-
- /* <Roger_EXP> Temporary removed, 2008.06.20. */
- rtw_write8(Adapter, REG_EDCA_VO_PARAM, u1bAIFS);
- rtw_write8(Adapter, REG_EDCA_VI_PARAM, u1bAIFS);
- rtw_write8(Adapter, REG_EDCA_BE_PARAM, u1bAIFS);
- rtw_write8(Adapter, REG_EDCA_BK_PARAM, u1bAIFS);
- }
- }
- break;
- case HW_VAR_DM_FLAG:
- podmpriv->SupportAbility = *((u8 *)val);
- break;
- case HW_VAR_DM_FUNC_OP:
- if (val[0])
- podmpriv->BK_SupportAbility = podmpriv->SupportAbility;
- else
- podmpriv->SupportAbility = podmpriv->BK_SupportAbility;
- break;
- case HW_VAR_DM_FUNC_RESET:
- podmpriv->SupportAbility = pdmpriv->InitODMFlag;
- break;
- case HW_VAR_DM_FUNC_CLR:
- podmpriv->SupportAbility = 0;
- break;
- case HW_VAR_AMPDU_FACTOR:
- {
- u8 RegToSet_Normal[4] = {0x41, 0xa8, 0x72, 0xb9};
- u8 FactorToSet;
- u8 *pRegToSet;
- u8 index = 0;
-
- pRegToSet = RegToSet_Normal; /* 0xb972a841; */
- FactorToSet = *((u8 *)val);
- if (FactorToSet <= 3) {
- FactorToSet = (1 << (FactorToSet + 2));
- if (FactorToSet > 0xf)
- FactorToSet = 0xf;
-
- for (index = 0; index < 4; index++) {
- if ((pRegToSet[index] & 0xf0) > (FactorToSet << 4))
- pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet << 4);
-
- if ((pRegToSet[index] & 0x0f) > FactorToSet)
- pRegToSet[index] = (pRegToSet[index] & 0xf0) | (FactorToSet);
-
- rtw_write8(Adapter, (REG_AGGLEN_LMT + index), pRegToSet[index]);
- }
- }
- }
- break;
- case HW_VAR_H2C_MEDIA_STATUS_RPT:
- rtl8188e_set_FwMediaStatus_cmd(Adapter, (*(__le16 *)val));
- break;
- default:
- break;
- }
+ Hal_ReadPowerSavingMode88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
+ Hal_ReadTxPowerInfo88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
+ rtl8188e_EfuseParseChnlPlan(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseXtal_8188E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
+ Hal_ReadAntennaDiversity88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
+ Hal_ReadThermalMeter_88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
+ ledpriv->bRegUseLed = true;
+ kfree(efuse_buf);
}
void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
@@ -1190,6 +1043,8 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u32 bcn_ctrl_reg = REG_BCN_CTRL;
+ int res;
+ u8 reg;
/* reset TSF, enable update TSF, correcting TSF On Beacon */
/* BCN interval */
@@ -1200,7 +1055,10 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
rtw_write8(adapt, REG_SLOT, 0x09);
- value32 = rtw_read32(adapt, REG_TCR);
+ res = rtw_read32(adapt, REG_TCR, &value32);
+ if (res)
+ return;
+
value32 &= ~TSFRST;
rtw_write32(adapt, REG_TCR, value32);
@@ -1213,9 +1071,13 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
_BeaconFunctionEnable(adapt, true, true);
- ResumeTxBeacon(adapt);
+ rtw_resume_tx_beacon(adapt);
+
+ res = rtw_read8(adapt, bcn_ctrl_reg, &reg);
+ if (res)
+ return;
- rtw_write8(adapt, bcn_ctrl_reg, rtw_read8(adapt, bcn_ctrl_reg) | BIT(1));
+ rtw_write8(adapt, bcn_ctrl_reg, reg | BIT(1));
}
void rtl8188eu_init_default_value(struct adapter *adapt)
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
index d5e674542a78..c1a4d023f627 100644
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c
@@ -94,40 +94,47 @@ static int usb_write(struct intf_hdl *intf, u16 value, void *data, u8 size)
return status;
}
-u8 rtw_read8(struct adapter *adapter, u32 addr)
+int __must_check rtw_read8(struct adapter *adapter, u32 addr, u8 *data)
{
struct io_priv *io_priv = &adapter->iopriv;
struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
- u8 data;
- usb_read(intf, value, &data, 1);
-
- return data;
+ return usb_read(intf, value, data, 1);
}
-u16 rtw_read16(struct adapter *adapter, u32 addr)
+int __must_check rtw_read16(struct adapter *adapter, u32 addr, u16 *data)
{
struct io_priv *io_priv = &adapter->iopriv;
struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
- __le16 data;
+ __le16 le_data;
+ int res;
+
+ res = usb_read(intf, value, &le_data, 2);
+ if (res)
+ return res;
- usb_read(intf, value, &data, 2);
+ *data = le16_to_cpu(le_data);
- return le16_to_cpu(data);
+ return 0;
}
-u32 rtw_read32(struct adapter *adapter, u32 addr)
+int __must_check rtw_read32(struct adapter *adapter, u32 addr, u32 *data)
{
struct io_priv *io_priv = &adapter->iopriv;
struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
- __le32 data;
+ __le32 le_data;
+ int res;
+
+ res = usb_read(intf, value, &le_data, 4);
+ if (res)
+ return res;
- usb_read(intf, value, &data, 4);
+ *data = le32_to_cpu(le_data);
- return le32_to_cpu(data);
+ return 0;
}
int rtw_write8(struct adapter *adapter, u32 addr, u8 val)
diff --git a/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h b/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h
deleted file mode 100644
index e4c5b5d23cb4..000000000000
--- a/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __HAL8188EPWRSEQ_H__
-#define __HAL8188EPWRSEQ_H__
-
-#include "HalPwrSeqCmd.h"
-
-extern struct wl_pwr_cfg rtl8188E_power_on_flow[];
-extern struct wl_pwr_cfg rtl8188E_card_disable_flow[];
-extern struct wl_pwr_cfg rtl8188E_enter_lps_flow[];
-
-#endif /* __HAL8188EPWRSEQ_H__ */
diff --git a/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h b/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h
index 20d73ca781e8..c571ad9478ea 100644
--- a/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h
+++ b/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h
@@ -22,19 +22,6 @@
le32_to_cpu((*(__le32 *)(__rxstatusdesc + 16))
#define GET_TX_RPT2_DESC_MACID_VALID_2_88E(__rxstatusdesc) \
le32_to_cpu((*(__le32 *)(__rxstatusdesc + 20))
-
-#define GET_TX_REPORT_TYPE1_RERTY_0(__paddr) \
- le16_get_bits(*(__le16 *)__paddr, GENMASK(15, 0))
-#define GET_TX_REPORT_TYPE1_RERTY_1(__paddr) \
- LE_BITS_TO_1BYTE(__paddr + 2, 0, 8)
-#define GET_TX_REPORT_TYPE1_RERTY_2(__paddr) \
- LE_BITS_TO_1BYTE(__paddr + 3, 0, 8)
-#define GET_TX_REPORT_TYPE1_RERTY_3(__paddr) \
- LE_BITS_TO_1BYTE(__paddr + 4, 0, 8)
-#define GET_TX_REPORT_TYPE1_RERTY_4(__paddr) \
- LE_BITS_TO_1BYTE(__paddr + 5, 0, 8)
-#define GET_TX_REPORT_TYPE1_DROP_0(__paddr) \
- LE_BITS_TO_1BYTE(__paddr + 6, 0, 8)
/* End rate adaptive define */
int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm);
diff --git a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h b/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
index 49c02cce569e..0886300d26bf 100644
--- a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
+++ b/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
@@ -6,54 +6,13 @@
#include "drv_types.h"
-/*---------------------------------------------*/
-/* 3 The value of cmd: 4 bits */
-/*---------------------------------------------*/
-
-#define PWR_CMD_WRITE 0x01
- /* offset: the read register offset */
- /* msk: the mask of the write bits */
- /* value: write value */
- /* note: driver shall implement this cmd by read & msk after write */
-
-#define PWR_CMD_POLLING 0x02
- /* offset: the read register offset */
- /* msk: the mask of the polled value */
- /* value: the value to be polled, masked by the msd field. */
- /* note: driver shall implement this cmd by */
- /* do{ */
- /* if ( (Read(offset) & msk) == (value & msk) ) */
- /* break; */
- /* } while (not timeout); */
-
-#define PWR_CMD_DELAY 0x03
- /* offset: the value to delay */
- /* msk: N/A */
- /* value: the unit of delay, 0: us, 1: ms */
-
-#define PWR_CMD_END 0x04
- /* offset: N/A */
- /* msk: N/A */
- /* value: N/A */
-
-enum pwrseq_cmd_delat_unit {
- PWRSEQ_DELAY_US,
- PWRSEQ_DELAY_MS,
-};
-
-struct wl_pwr_cfg {
- u16 offset;
- u8 cmd:4;
- u8 msk;
- u8 value;
+enum r8188eu_pwr_seq {
+ PWR_ON_FLOW,
+ DISABLE_FLOW,
+ LPS_ENTER_FLOW,
};
-#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
-#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
-#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
-#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
-
/* Prototype of protected function. */
-u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg PwrCfgCmd[]);
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, enum r8188eu_pwr_seq seq);
#endif
diff --git a/drivers/staging/r8188eu/include/basic_types.h b/drivers/staging/r8188eu/include/basic_types.h
deleted file mode 100644
index ffb21170e898..000000000000
--- a/drivers/staging/r8188eu/include/basic_types.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __BASIC_TYPES_H__
-#define __BASIC_TYPES_H__
-
-#include <linux/types.h>
-#define NDIS_OID uint
-
-typedef void (*proc_t)(void *);
-
-#define FIELD_OFFSET(s, field) ((ssize_t)&((s *)(0))->field)
-
-/* port from fw */
-/* TODO: Macros Below are Sync from SD7-Driver. It is necessary
- * to check correctness */
-
-/*
- * Call endian free function when
- * 1. Read/write packet content.
- * 2. Before write integer to IO.
- * 3. After read integer from IO.
-*/
-
-/* Convert little data endian to host ordering */
-#define EF1BYTE(_val) \
- ((u8)(_val))
-
-/* Create a bit mask */
-#define BIT_LEN_MASK_8(__bitlen) \
- (0xFF >> (8 - (__bitlen)))
-
-/*Description:
- * Return 4-byte value in host byte ordering from
- * 4-byte pointer in little-endian system.
- */
-#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
- (EF1BYTE(*((u8 *)(__pstart))))
-
-/*Description:
-Translate subfield (continuous bits in little-endian) of 4-byte
-value to host byte ordering.*/
-#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_8(__bitlen) \
- )
-
-#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
- (__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
-
-#endif /* __BASIC_TYPES_H__ */
diff --git a/drivers/staging/r8188eu/include/hal_com.h b/drivers/staging/r8188eu/include/hal_com.h
index 56ba356b5371..d7e333f6ce39 100644
--- a/drivers/staging/r8188eu/include/hal_com.h
+++ b/drivers/staging/r8188eu/include/hal_com.h
@@ -131,9 +131,6 @@
#define REG_NOA_DESC_START 0x05E8
#define REG_NOA_DESC_COUNT 0x05EC
-#include "HalVerDef.h"
-void dump_chip_info(struct HAL_VERSION ChipVersion);
-
/* return the final channel plan decision */
u8 hal_com_get_channel_plan(struct adapter *padapter,
u8 hw_channel_plan,
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
index a56f3d6ca399..ab6856d8a090 100644
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ b/drivers/staging/r8188eu/include/hal_intf.h
@@ -8,31 +8,15 @@
#include "drv_types.h"
#include "Hal8188EPhyCfg.h"
-enum hw_variables {
- HW_VAR_SET_OPMODE,
- HW_VAR_BASIC_RATE,
- HW_VAR_CORRECT_TSF,
- HW_VAR_MLME_SITESURVEY,
- HW_VAR_SLOT_TIME,
- HW_VAR_DM_FLAG,
- HW_VAR_DM_FUNC_OP,
- HW_VAR_DM_FUNC_RESET,
- HW_VAR_DM_FUNC_CLR,
- HW_VAR_AMPDU_FACTOR,
- HW_VAR_H2C_MEDIA_STATUS_RPT,
-};
-
typedef s32 (*c2h_id_filter)(u8 id);
-#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
-
void rtl8188eu_interface_configure(struct adapter *adapt);
void ReadAdapterInfo8188EU(struct adapter *Adapter);
void rtl8188eu_init_default_value(struct adapter *adapt);
void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet);
u32 rtl8188eu_InitPowerOn(struct adapter *adapt);
void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState);
-void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf);
+void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _size_byte, u8 *pbuf);
void hal_notch_filter_8188e(struct adapter *adapter, bool enable);
@@ -44,8 +28,6 @@ int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter,
unsigned int rtl8188eu_inirp_init(struct adapter *Adapter);
-void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val);
-
uint rtw_hal_init(struct adapter *padapter);
uint rtw_hal_deinit(struct adapter *padapter);
void rtw_hal_stop(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/ieee80211.h b/drivers/staging/r8188eu/include/ieee80211.h
index 15636a808f52..e7a4f8af497a 100644
--- a/drivers/staging/r8188eu/include/ieee80211.h
+++ b/drivers/staging/r8188eu/include/ieee80211.h
@@ -624,13 +624,6 @@ enum _PUBLIC_ACTION {
ACT_PUBLIC_MAX
};
-/* BACK action code */
-enum rtw_ieee80211_back_actioncode {
- RTW_WLAN_ACTION_ADDBA_REQ = 0,
- RTW_WLAN_ACTION_ADDBA_RESP = 1,
- RTW_WLAN_ACTION_DELBA = 2,
-};
-
#define OUI_MICROSOFT 0x0050f2 /* Microsoft (also used in Wi-Fi specs)
* 00:50:F2 */
#define WME_OUI_TYPE 2
diff --git a/drivers/staging/r8188eu/include/osdep_service.h b/drivers/staging/r8188eu/include/osdep_service.h
index f1a703643e74..72990a1cdc66 100644
--- a/drivers/staging/r8188eu/include/osdep_service.h
+++ b/drivers/staging/r8188eu/include/osdep_service.h
@@ -5,7 +5,6 @@
#define __OSDEP_SERVICE_H_
#include <linux/sched/signal.h>
-#include "basic_types.h"
#define _FAIL 0
#define _SUCCESS 1
@@ -77,8 +76,6 @@ void *rtw_malloc2d(int h, int w, int size);
spin_lock_init(&((q)->lock)); \
} while (0)
-void rtw_usleep_os(int us);
-
static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
{
return del_timer_sync(ptimer);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h
index d2a069d4e1cc..5cd62b216720 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h
@@ -26,11 +26,6 @@
#include "odm_RegConfig8188E.h"
#include "odm_RTL8188E.h"
-/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
-#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow
-#define Rtl8188E_NIC_DISABLE_FLOW rtl8188E_card_disable_flow
-#define Rtl8188E_NIC_LPS_ENTER_FLOW rtl8188E_enter_lps_flow
-
#define DRVINFO_SZ 4 /* unit is 8bytes */
#define PageNum_128(_Len) (u32)(((_Len)>>7) + ((_Len) & 0x7F ? 1 : 0))
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
index ef42c4b2f20c..9e7b1f89037c 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h
@@ -9,7 +9,6 @@
#define HAL_PS_TIMER_INT_DELAY 50 /* 50 microseconds */
#define HAL_92C_NAV_UPPER_UNIT 128 /* micro-second */
-#define MAC_ADDR_LEN 6
/* 8188E PKT_BUFF_ACCESS_CTRL value */
#define TXPKT_BUF_SELECT 0x69
#define RXPKT_BUF_SELECT 0xA5
@@ -427,12 +426,6 @@
#define MAX_MSS_DENSITY_2T 0x13
#define MAX_MSS_DENSITY_1T 0x0A
-/* EEPROM enable when set 1 */
-#define CmdEEPROM_En BIT(5)
-/* System EEPROM select, 0: boot from E-FUSE, 1: The EEPROM used is 9346 */
-#define CmdEERPOMSEL BIT(4)
-#define Cmd9346CR_9356SEL BIT(4)
-
/* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
#define GPIOSEL_GPIO 0
#define GPIOSEL_ENBT BIT(5)
@@ -1059,142 +1052,6 @@ Current IOREG MAP
#define SCR_TXBCUSEDK BIT(6) /* Force Tx Bcast pkt Use Default Key */
#define SCR_RXBCUSEDK BIT(7) /* Force Rx Bcast pkt Use Default Key */
-/* RTL8188E SDIO Configuration */
-
-/* I/O bus domain address mapping */
-#define SDIO_LOCAL_BASE 0x10250000
-#define WLAN_IOREG_BASE 0x10260000
-#define FIRMWARE_FIFO_BASE 0x10270000
-#define TX_HIQ_BASE 0x10310000
-#define TX_MIQ_BASE 0x10320000
-#define TX_LOQ_BASE 0x10330000
-#define RX_RX0FF_BASE 0x10340000
-
-/* SDIO host local register space mapping. */
-#define SDIO_LOCAL_MSK 0x0FFF
-#define WLAN_IOREG_MSK 0x7FFF
-#define WLAN_FIFO_MSK 0x1FFF /* Aggregation Length[12:0] */
-#define WLAN_RX0FF_MSK 0x0003
-
-/* Without ref to the SDIO Device ID */
-#define SDIO_WITHOUT_REF_DEVICE_ID 0
-#define SDIO_LOCAL_DEVICE_ID 0 /* 0b[16], 000b[15:13] */
-#define WLAN_TX_HIQ_DEVICE_ID 4 /* 0b[16], 100b[15:13] */
-#define WLAN_TX_MIQ_DEVICE_ID 5 /* 0b[16], 101b[15:13] */
-#define WLAN_TX_LOQ_DEVICE_ID 6 /* 0b[16], 110b[15:13] */
-#define WLAN_RX0FF_DEVICE_ID 7 /* 0b[16], 111b[15:13] */
-#define WLAN_IOREG_DEVICE_ID 8 /* 1b[16] */
-
-/* SDIO Tx Free Page Index */
-#define HI_QUEUE_IDX 0
-#define MID_QUEUE_IDX 1
-#define LOW_QUEUE_IDX 2
-#define PUBLIC_QUEUE_IDX 3
-
-#define SDIO_MAX_TX_QUEUE 3 /* HIQ, MIQ and LOQ */
-#define SDIO_MAX_RX_QUEUE 1
-
-/* SDIO Tx Control */
-#define SDIO_REG_TX_CTRL 0x0000
-/* SDIO Host Interrupt Mask */
-#define SDIO_REG_HIMR 0x0014
-/* SDIO Host Interrupt Service Routine */
-#define SDIO_REG_HISR 0x0018
-/* HCI Current Power Mode */
-#define SDIO_REG_HCPWM 0x0019
-/* RXDMA Request Length */
-#define SDIO_REG_RX0_REQ_LEN 0x001C
-/* Free Tx Buffer Page */
-#define SDIO_REG_FREE_TXPG 0x0020
-/* HCI Current Power Mode 1 */
-#define SDIO_REG_HCPWM1 0x0024
-/* HCI Current Power Mode 2 */
-#define SDIO_REG_HCPWM2 0x0026
-/* HTSF Informaion */
-#define SDIO_REG_HTSFR_INFO 0x0030
-/* HCI Request Power Mode 1 */
-#define SDIO_REG_HRPWM1 0x0080
-/* HCI Request Power Mode 2 */
-#define SDIO_REG_HRPWM2 0x0082
-/* HCI Power Save Clock */
-#define SDIO_REG_HPS_CLKR 0x0084
-/* SDIO HCI Suspend Control */
-#define SDIO_REG_HSUS_CTRL 0x0086
-/* SDIO Host Extension Interrupt Mask Always */
-#define SDIO_REG_HIMR_ON 0x0090
-/* SDIO Host Extension Interrupt Status Always */
-#define SDIO_REG_HISR_ON 0x0091
-
-#define SDIO_HIMR_DISABLED 0
-
-/* RTL8188E SDIO Host Interrupt Mask Register */
-#define SDIO_HIMR_RX_REQUEST_MSK BIT(0)
-#define SDIO_HIMR_AVAL_MSK BIT(1)
-#define SDIO_HIMR_TXERR_MSK BIT(2)
-#define SDIO_HIMR_RXERR_MSK BIT(3)
-#define SDIO_HIMR_TXFOVW_MSK BIT(4)
-#define SDIO_HIMR_RXFOVW_MSK BIT(5)
-#define SDIO_HIMR_TXBCNOK_MSK BIT(6)
-#define SDIO_HIMR_TXBCNERR_MSK BIT(7)
-#define SDIO_HIMR_BCNERLY_INT_MSK BIT(16)
-#define SDIO_HIMR_C2HCMD_MSK BIT(17)
-#define SDIO_HIMR_CPWM1_MSK BIT(18)
-#define SDIO_HIMR_CPWM2_MSK BIT(19)
-#define SDIO_HIMR_HSISR_IND_MSK BIT(20)
-#define SDIO_HIMR_GTINT3_IND_MSK BIT(21)
-#define SDIO_HIMR_GTINT4_IND_MSK BIT(22)
-#define SDIO_HIMR_PSTIMEOUT_MSK BIT(23)
-#define SDIO_HIMR_OCPINT_MSK BIT(24)
-#define SDIO_HIMR_ATIMEND_MSK BIT(25)
-#define SDIO_HIMR_ATIMEND_E_MSK BIT(26)
-#define SDIO_HIMR_CTWEND_MSK BIT(27)
-
-/* RTL8188E SDIO Specific */
-#define SDIO_HIMR_MCU_ERR_MSK BIT(28)
-#define SDIO_HIMR_TSF_BIT32_TOGGLE_MSK BIT(29)
-
-/* SDIO Host Interrupt Service Routine */
-#define SDIO_HISR_RX_REQUEST BIT(0)
-#define SDIO_HISR_AVAL BIT(1)
-#define SDIO_HISR_TXERR BIT(2)
-#define SDIO_HISR_RXERR BIT(3)
-#define SDIO_HISR_TXFOVW BIT(4)
-#define SDIO_HISR_RXFOVW BIT(5)
-#define SDIO_HISR_TXBCNOK BIT(6)
-#define SDIO_HISR_TXBCNERR BIT(7)
-#define SDIO_HISR_BCNERLY_INT BIT(16)
-#define SDIO_HISR_C2HCMD BIT(17)
-#define SDIO_HISR_CPWM1 BIT(18)
-#define SDIO_HISR_CPWM2 BIT(19)
-#define SDIO_HISR_HSISR_IND BIT(20)
-#define SDIO_HISR_GTINT3_IND BIT(21)
-#define SDIO_HISR_GTINT4_IND BIT(22)
-#define SDIO_HISR_PSTIME BIT(23)
-#define SDIO_HISR_OCPINT BIT(24)
-#define SDIO_HISR_ATIMEND BIT(25)
-#define SDIO_HISR_ATIMEND_E BIT(26)
-#define SDIO_HISR_CTWEND BIT(27)
-
-/* RTL8188E SDIO Specific */
-#define SDIO_HISR_MCU_ERR BIT(28)
-#define SDIO_HISR_TSF_BIT32_TOGGLE BIT(29)
-
-#define MASK_SDIO_HISR_CLEAR \
- (SDIO_HISR_TXERR | SDIO_HISR_RXERR | SDIO_HISR_TXFOVW |\
- SDIO_HISR_RXFOVW | SDIO_HISR_TXBCNOK | SDIO_HISR_TXBCNERR |\
- SDIO_HISR_C2HCMD | SDIO_HISR_CPWM1 | SDIO_HISR_CPWM2 |\
- SDIO_HISR_HSISR_IND | SDIO_HISR_GTINT3_IND | SDIO_HISR_GTINT4_IND |\
- SDIO_HISR_PSTIMEOUT | SDIO_HISR_OCPINT)
-
-/* SDIO HCI Suspend Control Register */
-#define HCI_RESUME_PWR_RDY BIT(1)
-#define HCI_SUS_CTRL BIT(0)
-
-/* SDIO Tx FIFO related */
-/* The number of Tx FIFO free page */
-#define SDIO_TX_FREE_PG_QUEUE 4
-#define SDIO_TX_FIFO_PAGE_SZ 128
-
/* 0xFE00h ~ 0xFE55h USB Configuration */
/* 2 USB Information (0xFE17) */
diff --git a/drivers/staging/r8188eu/include/rtw_eeprom.h b/drivers/staging/r8188eu/include/rtw_eeprom.h
index d8d48ace356c..94d735b1d0db 100644
--- a/drivers/staging/r8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/r8188eu/include/rtw_eeprom.h
@@ -7,19 +7,9 @@
#include "osdep_service.h"
#include "drv_types.h"
-#define HWSET_MAX_SIZE_512 512
-
struct eeprom_priv {
u8 bautoload_fail_flag;
u8 mac_addr[ETH_ALEN] __aligned(2); /* PermanentAddress */
- u8 EepromOrEfuse;
- u8 efuse_eeprom_data[HWSET_MAX_SIZE_512] __aligned(4);
};
-void eeprom_write16(struct adapter *padapter, u16 reg, u16 data);
-u16 eeprom_read16(struct adapter *padapter, u16 reg);
-void read_eeprom_content(struct adapter *padapter);
-void eeprom_read_sz(struct adapter *adapt, u16 reg, u8 *data, u32 sz);
-void read_eeprom_content_by_attrib(struct adapter *padapter);
-
#endif /* __RTL871X_EEPROM_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_efuse.h b/drivers/staging/r8188eu/include/rtw_efuse.h
index 2daf69f554d5..3d688a0e6dfb 100644
--- a/drivers/staging/r8188eu/include/rtw_efuse.h
+++ b/drivers/staging/r8188eu/include/rtw_efuse.h
@@ -8,6 +8,4 @@
void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf);
-void EFUSE_ShadowMapUpdate(struct adapter *adapter);
-
#endif
diff --git a/drivers/staging/r8188eu/include/rtw_io.h b/drivers/staging/r8188eu/include/rtw_io.h
index 6910e2b430e2..925c7967ac04 100644
--- a/drivers/staging/r8188eu/include/rtw_io.h
+++ b/drivers/staging/r8188eu/include/rtw_io.h
@@ -220,9 +220,9 @@ void unregister_intf_hdl(struct intf_hdl *pintfhdl);
void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-u8 rtw_read8(struct adapter *adapter, u32 addr);
-u16 rtw_read16(struct adapter *adapter, u32 addr);
-u32 rtw_read32(struct adapter *adapter, u32 addr);
+int __must_check rtw_read8(struct adapter *adapter, u32 addr, u8 *data);
+int __must_check rtw_read16(struct adapter *adapter, u32 addr, u16 *data);
+int __must_check rtw_read32(struct adapter *adapter, u32 addr, u32 *data);
void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
u32 rtw_read_port(struct adapter *adapter, u8 *pmem);
void rtw_read_port_cancel(struct adapter *adapter);
@@ -283,7 +283,7 @@ void free_io_queue(struct adapter *adapter);
void async_bus_io(struct io_queue *pio_q);
void bus_sync_io(struct io_queue *pio_q);
u32 _ioreq2rwmem(struct io_queue *pio_q);
-void dev_power_down(struct adapter * Adapter, u8 bpwrup);
+void dev_power_down(struct adapter *Adapter, u8 bpwrup);
#define PlatformEFIOWrite1Byte(_a,_b,_c) \
rtw_write8(_a,_b,_c)
diff --git a/drivers/staging/r8188eu/include/rtw_iol.h b/drivers/staging/r8188eu/include/rtw_iol.h
index fb88ebc1dabb..099f5a075274 100644
--- a/drivers/staging/r8188eu/include/rtw_iol.h
+++ b/drivers/staging/r8188eu/include/rtw_iol.h
@@ -41,22 +41,14 @@ int rtw_IOL_append_END_cmd(struct xmit_frame *xmit_frame);
void read_efuse_from_txpktbuf(struct adapter *adapter, int bcnhead,
u8 *content, u16 *size);
-int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr,
- u8 value, u8 mask);
-int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr,
- u16 value, u16 mask);
-int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr,
- u32 value, u32 mask);
-int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path,
- u16 addr, u32 value, u32 mask);
-#define rtw_IOL_append_WB_cmd(xmit_frame, addr, value, mask) \
- _rtw_IOL_append_WB_cmd((xmit_frame), (addr), (value) ,(mask))
-#define rtw_IOL_append_WW_cmd(xmit_frame, addr, value, mask) \
- _rtw_IOL_append_WW_cmd((xmit_frame), (addr), (value),(mask))
-#define rtw_IOL_append_WD_cmd(xmit_frame, addr, value, mask) \
- _rtw_IOL_append_WD_cmd((xmit_frame), (addr), (value), (mask))
-#define rtw_IOL_append_WRF_cmd(xmit_frame, rf_path, addr, value, mask) \
- _rtw_IOL_append_WRF_cmd((xmit_frame),(rf_path), (addr), (value), (mask))
+int rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr,
+ u8 value, u8 mask);
+int rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr,
+ u16 value, u16 mask);
+int rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr,
+ u32 value, u32 mask);
+int rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path,
+ u16 addr, u32 value, u32 mask);
u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame);
diff --git a/drivers/staging/r8188eu/include/rtw_led.h b/drivers/staging/r8188eu/include/rtw_led.h
index 2c14cb23d9ad..d6b0c1c2f9a2 100644
--- a/drivers/staging/r8188eu/include/rtw_led.h
+++ b/drivers/staging/r8188eu/include/rtw_led.h
@@ -37,9 +37,11 @@ enum LED_STATE_871x {
LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
};
-struct LED_871x {
+struct led_priv {
struct adapter *padapter;
+ bool bRegUseLed;
+
enum LED_STATE_871x CurrLedState; /* Current LED state. */
enum LED_STATE_871x BlinkingLedState; /* Next state for blinking,
* either RTW_LED_ON or RTW_LED_OFF are. */
@@ -58,11 +60,6 @@ struct LED_871x {
struct delayed_work blink_work;
};
-struct led_priv{
- struct LED_871x SwLed0;
- bool bRegUseLed;
-};
-
void rtl8188eu_InitSwLeds(struct adapter *padapter);
void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
index 573d65b175cc..343ce1ce4b3d 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
@@ -424,6 +424,9 @@ void invalidate_cam_all(struct adapter *padapter);
int allocate_fw_sta_entry(struct adapter *padapter);
void flush_all_cam_entry(struct adapter *padapter);
+void rtw_mlme_under_site_survey(struct adapter *adapter);
+void rtw_mlme_site_survey_done(struct adapter *adapter);
+
void site_survey(struct adapter *padapter);
u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame,
struct wlan_bssid_ex *bssid);
@@ -455,6 +458,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len);
void update_IOT_info(struct adapter *padapter);
void update_capinfo(struct adapter *adapter, u16 updatecap);
void update_wireless_mode(struct adapter *padapter);
+void rtw_set_basic_rate(struct adapter *adapter, u8 *rates);
void update_tx_basic_rate(struct adapter *padapter, u8 modulation);
void update_bmc_sta_support_rate(struct adapter *padapter, u32 mac_id);
int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie,
@@ -468,8 +472,7 @@ unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps);
void Update_RA_Entry(struct adapter *padapter, u32 mac_id);
void set_sta_rate(struct adapter *padapter, struct sta_info *psta);
-unsigned int receive_disconnect(struct adapter *padapter,
- unsigned char *macaddr, unsigned short reason);
+void receive_disconnect(struct adapter *padapter, unsigned char *macaddr, unsigned short reason);
unsigned char get_highest_rate_idx(u32 mask);
int support_short_GI(struct adapter *padapter, struct HT_caps_element *caps);
@@ -524,12 +527,13 @@ int issue_deauth(struct adapter *padapter, unsigned char *da,
unsigned short reason);
int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason,
int try_cnt, int wait_ms);
-void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
- unsigned char action, unsigned short status);
+void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action, u16 status);
unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
unsigned int send_beacon(struct adapter *padapter);
bool get_beacon_valid_bit(struct adapter *adapter);
void clear_beacon_valid_bit(struct adapter *adapter);
+void rtw_resume_tx_beacon(struct adapter *adapt);
+void rtw_stop_tx_beacon(struct adapter *adapt);
void start_clnt_assoc(struct adapter *padapter);
void start_clnt_auth(struct adapter *padapter);
@@ -544,12 +548,8 @@ unsigned int OnProbeReq(struct adapter *padapter,
struct recv_frame *precv_frame);
unsigned int OnProbeRsp(struct adapter *padapter,
struct recv_frame *precv_frame);
-unsigned int DoReserved(struct adapter *padapter,
- struct recv_frame *precv_frame);
unsigned int OnBeacon(struct adapter *padapter,
struct recv_frame *precv_frame);
-unsigned int OnAtim(struct adapter *padapter,
- struct recv_frame *precv_frame);
unsigned int OnDisassoc(struct adapter *padapter,
struct recv_frame *precv_frame);
unsigned int OnAuth(struct adapter *padapter,
@@ -592,9 +592,6 @@ void addba_timer_hdl(struct sta_info *psta);
bool cckrates_included(unsigned char *rate, int ratelen);
bool cckratesonly_included(unsigned char *rate, int ratelen);
-void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
-void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext);
-
struct cmd_hdl {
uint parmsize;
u8 (*h2cfuns)(struct adapter *padapter, u8 *pbuf);
diff --git a/drivers/staging/r8188eu/include/usb_ops_linux.h b/drivers/staging/r8188eu/include/usb_ops_linux.h
index 641f059ffaf7..966688eedf66 100644
--- a/drivers/staging/r8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/r8188eu/include/usb_ops_linux.h
@@ -26,6 +26,4 @@
#define usb_read_interrupt_complete(purb, regs) \
usb_read_interrupt_complete(purb)
-unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr);
-
#endif
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 8dd280e2739a..7f91dac2e41b 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -687,12 +687,9 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
enum ndis_802_11_network_infra networkType;
int ret = 0;
-
-
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
- ret = -EPERM;
+ ret = rtw_pwr_wakeup(padapter);
+ if (ret)
goto exit;
- }
if (!padapter->hw_init_completed) {
ret = -EPERM;
@@ -931,12 +928,9 @@ static int rtw_wx_set_wap(struct net_device *dev,
struct wlan_network *pnetwork = NULL;
enum ndis_802_11_auth_mode authmode;
-
-
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
- ret = -1;
+ ret = rtw_pwr_wakeup(padapter);
+ if (ret)
goto exit;
- }
if (!padapter->bup) {
ret = -1;
@@ -1049,10 +1043,9 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
- ret = -1;
+ ret = rtw_pwr_wakeup(padapter);
+ if (ret)
goto exit;
- }
if (padapter->bDriverStopped) {
ret = -1;
@@ -1252,10 +1245,9 @@ static int rtw_wx_set_essid(struct net_device *dev,
uint ret = 0, len;
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
- ret = -1;
+ ret = rtw_pwr_wakeup(padapter);
+ if (ret)
goto exit;
- }
if (!padapter->bup) {
ret = -1;
@@ -1593,7 +1585,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
if (erq->length > 0) {
wep.KeyLength = erq->length <= 5 ? 5 : 13;
- wep.Length = wep.KeyLength + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ wep.Length = wep.KeyLength + offsetof(struct ndis_802_11_wep, KeyMaterial);
} else {
wep.KeyLength = 0;
@@ -3126,18 +3118,29 @@ exit:
static void mac_reg_dump(struct adapter *padapter)
{
int i, j = 1;
+ u32 reg;
+ int res;
+
pr_info("\n ======= MAC REG =======\n");
for (i = 0x0; i < 0x300; i += 4) {
if (j % 4 == 1)
pr_info("0x%02x", i);
- pr_info(" 0x%08x ", rtw_read32(padapter, i));
+
+ res = rtw_read32(padapter, i, &reg);
+ if (!res)
+ pr_info(" 0x%08x ", reg);
+
if ((j++) % 4 == 0)
pr_info("\n");
}
for (i = 0x400; i < 0x800; i += 4) {
if (j % 4 == 1)
pr_info("0x%02x", i);
- pr_info(" 0x%08x ", rtw_read32(padapter, i));
+
+ res = rtw_read32(padapter, i, &reg);
+ if (!res)
+ pr_info(" 0x%08x ", reg);
+
if ((j++) % 4 == 0)
pr_info("\n");
}
@@ -3145,13 +3148,18 @@ static void mac_reg_dump(struct adapter *padapter)
static void bb_reg_dump(struct adapter *padapter)
{
- int i, j = 1;
+ int i, j = 1, res;
+ u32 reg;
+
pr_info("\n ======= BB REG =======\n");
for (i = 0x800; i < 0x1000; i += 4) {
if (j % 4 == 1)
pr_info("0x%02x", i);
- pr_info(" 0x%08x ", rtw_read32(padapter, i));
+ res = rtw_read32(padapter, i, &reg);
+ if (!res)
+ pr_info(" 0x%08x ", reg);
+
if ((j++) % 4 == 0)
pr_info("\n");
}
@@ -3178,6 +3186,7 @@ static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func)
{
struct hal_data_8188e *haldata = &adapter->haldata;
struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+ int res;
switch (dm_func) {
case 0:
@@ -3193,7 +3202,9 @@ static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func)
if (!(odmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
struct rtw_dig *digtable = &odmpriv->DM_DigTable;
- digtable->CurIGValue = rtw_read8(adapter, 0xc50);
+ res = rtw_read8(adapter, 0xc50, &digtable->CurIGValue);
+ (void)res;
+ /* FIXME: return an error to caller */
}
odmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
break;
@@ -3202,6 +3213,14 @@ static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func)
}
}
+static void rtw_set_dm_func_flag(struct adapter *adapter, u32 odm_flag)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+ odmpriv->SupportAbility = odm_flag;
+}
+
static int rtw_dbg_port(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3329,8 +3348,9 @@ static int rtw_dbg_port(struct net_device *dev,
u16 reg = arg;
u16 start_value = 0;
u32 write_num = extra_arg;
- int i;
+ int i, res;
struct xmit_frame *xmit_frame;
+ u8 val8;
xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
if (!xmit_frame) {
@@ -3343,7 +3363,9 @@ static int rtw_dbg_port(struct net_device *dev,
if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS)
ret = -EPERM;
- rtw_read8(padapter, reg);
+ /* FIXME: is this read necessary? */
+ res = rtw_read8(padapter, reg, &val8);
+ (void)res;
}
break;
@@ -3352,8 +3374,8 @@ static int rtw_dbg_port(struct net_device *dev,
u16 reg = arg;
u16 start_value = 200;
u32 write_num = extra_arg;
-
- int i;
+ u16 val16;
+ int i, res;
struct xmit_frame *xmit_frame;
xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
@@ -3367,7 +3389,9 @@ static int rtw_dbg_port(struct net_device *dev,
if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS)
ret = -EPERM;
- rtw_read16(padapter, reg);
+ /* FIXME: is this read necessary? */
+ res = rtw_read16(padapter, reg, &val16);
+ (void)res;
}
break;
case 0x08: /* continuous write dword test */
@@ -3390,7 +3414,8 @@ static int rtw_dbg_port(struct net_device *dev,
if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS)
ret = -EPERM;
- rtw_read32(padapter, reg);
+ /* FIXME: is this read necessary? */
+ ret = rtw_read32(padapter, reg, &write_num);
}
break;
}
@@ -3434,7 +3459,7 @@ static int rtw_dbg_port(struct net_device *dev,
case 0x06:
{
u32 ODMFlag = (u32)(0x0f & arg);
- SetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
+ rtw_set_dm_func_flag(padapter, ODMFlag);
}
break;
case 0x07:
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index 891c85b088ca..cac9553666e6 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -740,19 +740,32 @@ static void rtw_fifo_cleanup(struct adapter *adapter)
{
struct pwrctrl_priv *pwrpriv = &adapter->pwrctrlpriv;
u8 trycnt = 100;
+ int res;
+ u32 reg;
/* pause tx */
rtw_write8(adapter, REG_TXPAUSE, 0xff);
/* keep sn */
- adapter->xmitpriv.nqos_ssn = rtw_read16(adapter, REG_NQOS_SEQ);
+ /* FIXME: return an error to caller */
+ res = rtw_read16(adapter, REG_NQOS_SEQ, &adapter->xmitpriv.nqos_ssn);
+ if (res)
+ return;
if (!pwrpriv->bkeepfwalive) {
/* RX DMA stop */
+ res = rtw_read32(adapter, REG_RXPKT_NUM, &reg);
+ if (res)
+ return;
+
rtw_write32(adapter, REG_RXPKT_NUM,
- (rtw_read32(adapter, REG_RXPKT_NUM) | RW_RELEASE_EN));
+ (reg | RW_RELEASE_EN));
do {
- if (!(rtw_read32(adapter, REG_RXPKT_NUM) & RXDMA_IDLE))
+ res = rtw_read32(adapter, REG_RXPKT_NUM, &reg);
+ if (res)
+ continue;
+
+ if (!(reg & RXDMA_IDLE))
break;
} while (trycnt--);
diff --git a/drivers/staging/r8188eu/os_dep/osdep_service.c b/drivers/staging/r8188eu/os_dep/osdep_service.c
index 812acd59be79..3504a0a9ba87 100644
--- a/drivers/staging/r8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/r8188eu/os_dep/osdep_service.c
@@ -42,14 +42,6 @@ Otherwise, there will be racing condition.
Caller must check if the list is empty before calling rtw_list_delete
*/
-void rtw_usleep_os(int us)
-{
- if (1 < (us / 1000))
- msleep(1);
- else
- msleep((us / 1000) + 1);
-}
-
static const struct device_type wlan_type = {
.name = "wlan",
};
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index 68869c5daeff..cc2b44f60c46 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -372,7 +372,7 @@ handle_dualmac:
free_adapter:
if (pnetdev)
rtw_free_netdev(pnetdev);
- else if (padapter)
+ else
vfree(padapter);
return NULL;
diff --git a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
index 0269e602b217..220e592b757c 100644
--- a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
@@ -7,7 +7,7 @@
#include "../include/usb_ops_linux.h"
#include "../include/rtl8188e_recv.h"
-unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
+static unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
{
unsigned int pipe = 0, ep_num = 0;
struct usb_device *pusbd = pdvobj->pusbdev;
diff --git a/drivers/staging/r8188eu/os_dep/xmit_linux.c b/drivers/staging/r8188eu/os_dep/xmit_linux.c
index e430c64e9068..91a1e4e3219a 100644
--- a/drivers/staging/r8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/r8188eu/os_dep/xmit_linux.c
@@ -71,7 +71,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
if (!pxmitbuf->pallocated_buf)
return _FAIL;
- pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
+ pxmitbuf->pbuf = (u8 *)ALIGN((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
pxmitbuf->dma_transfer_addr = 0;
pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 37715afb0210..42f81b23a144 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -205,30 +205,28 @@ static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
struct rtllib_txb *txb;
int i;
- txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
- gfp_mask);
+ txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
if (!txb)
return NULL;
- memset(txb, 0, sizeof(struct rtllib_txb));
txb->nr_frags = nr_frags;
txb->frag_size = cpu_to_le16(txb_size);
for (i = 0; i < nr_frags; i++) {
txb->fragments[i] = dev_alloc_skb(txb_size);
- if (unlikely(!txb->fragments[i])) {
- i--;
- break;
- }
+ if (unlikely(!txb->fragments[i]))
+ goto err_free;
memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
}
- if (unlikely(i != nr_frags)) {
- while (i >= 0)
- dev_kfree_skb_any(txb->fragments[i--]);
- kfree(txb);
- return NULL;
- }
+
return txb;
+
+err_free:
+ while (--i >= 0)
+ dev_kfree_skb_any(txb->fragments[i]);
+ kfree(txb);
+
+ return NULL;
}
static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index cf9a240924f2..da2c41c9b92f 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -17,17 +17,9 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
#include "rtllib.h"
-struct modes_unit {
- char *mode_string;
- int mode_size;
-};
-static struct modes_unit rtllib_modes[] = {
- {"a", 1},
- {"b", 1},
- {"g", 1},
- {"?", 1},
- {"N-24G", 5},
- {"N-5G", 4},
+
+static const char * const rtllib_modes[] = {
+ "a", "b", "g", "?", "N-24G", "N-5G"
};
#define MAX_CUSTOM_LEN 64
@@ -72,10 +64,9 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
for (i = 0; i < ARRAY_SIZE(rtllib_modes); i++) {
- if (network->mode&(1<<i)) {
- sprintf(pname, rtllib_modes[i].mode_string,
- rtllib_modes[i].mode_size);
- pname += rtllib_modes[i].mode_size;
+ if (network->mode & BIT(i)) {
+ strcpy(pname, rtllib_modes[i]);
+ pname += strlen(rtllib_modes[i]);
}
}
*pname = '\0';
@@ -158,7 +149,8 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
max_rate = rate;
}
iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ iwe.u.bitrate.disabled = 0;
+ iwe.u.bitrate.fixed = 0;
iwe.u.bitrate.value = max_rate * 500000;
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_PARAM_LEN);
iwe.cmd = IWEVCUSTOM;
@@ -285,7 +277,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
- struct iw_point *erq = &(wrqu->encoding);
+ struct iw_point *erq = &wrqu->encoding;
struct net_device *dev = ieee->dev;
struct rtllib_security sec = {
.flags = 0
@@ -312,8 +304,9 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
netdev_dbg(ieee->dev,
"Disabling encryption on key %d.\n", key);
lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
- } else
+ } else {
netdev_dbg(ieee->dev, "Disabling encryption.\n");
+ }
/* Check all the keys to see if any are still configured,
* and if no key index was provided, de-init them all
@@ -457,7 +450,7 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
- struct iw_point *erq = &(wrqu->encoding);
+ struct iw_point *erq = &wrqu->encoding;
int len, key;
struct lib80211_crypt_data *crypt;
@@ -608,7 +601,6 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
goto done;
}
*crypt = new_crypt;
-
}
if (ext->key_len > 0 && (*crypt)->ops->set_key &&
@@ -732,8 +724,9 @@ int rtllib_wx_set_auth(struct rtllib_device *ieee,
} else if (data->value & IW_AUTH_ALG_LEAP) {
ieee->open_wep = 1;
ieee->auth_mode = 2;
- } else
+ } else {
return -EINVAL;
+ }
break;
case IW_AUTH_WPA_ENABLED:
@@ -776,7 +769,7 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
kfree(ieee->wps_ie);
ieee->wps_ie = NULL;
if (len) {
- if (len != ie[1]+2)
+ if (len != ie[1] + 2)
return -EINVAL;
buf = kmemdup(ie, len, GFP_KERNEL);
if (!buf)
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 14ca00a2789b..1942cb849374 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1013,7 +1013,7 @@ typedef struct r8192_priv {
bool bis_any_nonbepkts;
bool bcurrent_turbo_EDCA;
bool bis_cur_rdlstate;
- struct timer_list fsync_timer;
+ struct delayed_work fsync_work;
bool bfsync_processing; /* 500ms Fsync timer is active or not */
u32 rate_record;
u32 rateCountDiffRecord;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 725bf5ca9e34..00fc8fd344db 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -2578,19 +2578,20 @@ static void dm_init_fsync(struct net_device *dev)
priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
priv->ieee80211->fsync_state = Default_Fsync;
priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */
- timer_setup(&priv->fsync_timer, dm_fsync_timer_callback, 0);
+ INIT_DELAYED_WORK(&priv->fsync_work, dm_fsync_work_callback);
}
static void dm_deInit_fsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- del_timer_sync(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
}
-void dm_fsync_timer_callback(struct timer_list *t)
+void dm_fsync_work_callback(struct work_struct *work)
{
- struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
+ struct r8192_priv *priv =
+ container_of(work, struct r8192_priv, fsync_work.work);
struct net_device *dev = priv->ieee80211->dev;
u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
@@ -2657,17 +2658,16 @@ void dm_fsync_timer_callback(struct timer_list *t)
}
}
if (bDoubleTimeInterval) {
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv
+ ->ieee80211->fsync_time_interval *
+ priv->ieee80211->fsync_multiple_timeinterval));
} else {
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv
+ ->ieee80211->fsync_time_interval));
}
} else {
/* Let Register return to default value; */
@@ -2695,7 +2695,7 @@ static void dm_EndSWFsync(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
RT_TRACE(COMP_HALDM, "%s\n", __func__);
- del_timer_sync(&(priv->fsync_timer));
+ cancel_delayed_work_sync(&priv->fsync_work);
/* Let Register return to default value; */
if (priv->bswitch_fsync) {
@@ -2736,11 +2736,9 @@ static void dm_StartSWFsync(struct net_device *dev)
if (priv->ieee80211->fsync_rate_bitmap & rateBitmap)
priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex];
}
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv->ieee80211->fsync_time_interval));
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
}
@@ -3002,7 +3000,7 @@ static void dm_check_txrateandretrycount(struct net_device *dev)
/* for initial tx rate */
/*priv->stats.last_packet_rate = read_nic_byte(dev, INITIAL_TX_RATE_REG);*/
read_nic_byte(dev, INITIAL_TX_RATE_REG, &ieee->softmac_stats.last_packet_rate);
- /* for tx tx retry count */
+ /* for tx retry count */
/*priv->stats.txretrycount = read_nic_dword(dev, TX_RETRY_COUNT_REG);*/
read_nic_dword(dev, TX_RETRY_COUNT_REG, &ieee->softmac_stats.txretrycount);
}
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 0b2a1c688597..2159018b4e38 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -166,7 +166,7 @@ void dm_force_tx_fw_info(struct net_device *dev,
void dm_init_edca_turbo(struct net_device *dev);
void dm_rf_operation_test_callback(unsigned long data);
void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-void dm_fsync_timer_callback(struct timer_list *t);
+void dm_fsync_work_callback(struct work_struct *work);
void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
void dm_shadow_init(struct net_device *dev);
void dm_initialize_txpower_tracking(struct net_device *dev);
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 1bdbd0971f73..f878b04076d8 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -960,7 +960,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
return _FAIL;
frame_type = GetFrameSubType(pframe);
- if (frame_type == WIFI_ASSOCREQ)
+ if (frame_type == WIFI_ASSOCREQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
else /* WIFI_REASSOCREQ */
ie_offset = _REASOCREQ_IE_OFFSET_;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 43b5604c0bca..cb6d287f580d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -366,9 +366,8 @@ void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter)
int freq = (int)cur_network->network.configuration.ds_config;
struct ieee80211_channel *chan;
- if (pwdev->iftype != NL80211_IFTYPE_ADHOC) {
+ if (pwdev->iftype != NL80211_IFTYPE_ADHOC)
return;
- }
if (!rtw_cfg80211_check_bss(padapter)) {
struct wlan_bssid_ex *pnetwork = &(padapter->mlmeextpriv.mlmext_info.network);
@@ -450,8 +449,8 @@ check_bss:
notify_channel = ieee80211_get_channel(wiphy, freq);
- roam_info.channel = notify_channel;
- roam_info.bssid = cur_network->network.mac_address;
+ roam_info.links[0].channel = notify_channel;
+ roam_info.links[0].bssid = cur_network->network.mac_address;
roam_info.req_ie =
pmlmepriv->assoc_req+sizeof(struct ieee80211_hdr_3addr)+2;
roam_info.req_ie_len =
@@ -544,9 +543,8 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
goto exit;
}
- if (wep_key_len > 0) {
+ if (wep_key_len > 0)
wep_key_len = wep_key_len <= 5 ? 5 : 13;
- }
if (psecuritypriv->bWepDefaultKeyIdxSet == 0) {
/* wep default key has not been set, so use this key index as default key. */
@@ -582,9 +580,8 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
- if (param->u.crypt.key_len == 13) {
+ if (param->u.crypt.key_len == 13)
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
- }
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
@@ -626,24 +623,16 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
}
- if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) /* psk/802_1x */
- {
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- {
- if (param->u.crypt.set_tx == 1) /* pairwise key */
- {
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ if (param->u.crypt.set_tx == 1) { /* pairwise key */
memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- if (strcmp(param->u.crypt.alg, "WEP") == 0)
- {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
psta->dot118021XPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
- {
psta->dot118021XPrivacy = _WEP104_;
- }
- }
- else if (strcmp(param->u.crypt.alg, "TKIP") == 0)
- {
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psta->dot118021XPrivacy = _TKIP_;
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
@@ -653,14 +642,10 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
psecuritypriv->busetkipkey = true;
- }
- else if (strcmp(param->u.crypt.alg, "CCMP") == 0)
- {
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
psta->dot118021XPrivacy = _AES_;
- }
- else
- {
+ } else {
psta->dot118021XPrivacy = _NO_PRIVACY_;
}
@@ -670,21 +655,14 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
psta->bpairwise_key_installed = true;
- }
- else/* group key??? */
- {
- if (strcmp(param->u.crypt.alg, "WEP") == 0)
- {
+ } else { /* group key??? */
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
- {
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
- }
- }
- else if (strcmp(param->u.crypt.alg, "TKIP") == 0)
- {
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
@@ -696,15 +674,11 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
psecuritypriv->busetkipkey = true;
- }
- else if (strcmp(param->u.crypt.alg, "CCMP") == 0)
- {
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _AES_;
memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- }
- else
- {
+ } else {
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
@@ -717,8 +691,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta)
- {
+ if (pbcmc_sta) {
pbcmc_sta->ieee8021x_blocked = false;
pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
}
@@ -746,20 +719,16 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
- if (param_len < (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len)
- {
+ if (param_len < (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len) {
ret = -EINVAL;
goto exit;
}
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
- {
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
if (param->u.crypt.idx >= WEP_KEYS
- || param->u.crypt.idx >= BIP_MAX_KEYID
- )
- {
+ || param->u.crypt.idx >= BIP_MAX_KEYID) {
ret = -EINVAL;
goto exit;
}
@@ -770,19 +739,16 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
}
}
- if (strcmp(param->u.crypt.alg, "WEP") == 0)
- {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
- if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0))
- {
+ if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
ret = -EINVAL;
goto exit;
}
- if (psecuritypriv->bWepDefaultKeyIdxSet == 0)
- {
+ if (psecuritypriv->bWepDefaultKeyIdxSet == 0) {
/* wep default key has not been set, so use this key index as default key. */
wep_key_len = wep_key_len <= 5 ? 5 : 13;
@@ -791,8 +757,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
- if (wep_key_len == 13)
- {
+ if (wep_key_len == 13) {
psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
}
@@ -809,13 +774,11 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
goto exit;
}
- if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) /* 802_1x */
- {
+ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { /* 802_1x */
struct sta_info *psta, *pbcmc_sta;
struct sta_priv *pstapriv = &padapter->stapriv;
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == true) /* sta mode */
- {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == true) { /* sta mode */
psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
if (psta) {
/* Jeff: don't disable ieee8021x_blocked while clearing key */
@@ -824,18 +787,15 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
- {
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
- if (param->u.crypt.set_tx == 1)/* pairwise key */
- {
+ if (param->u.crypt.set_tx == 1) { /* pairwise key */
memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- if (strcmp(param->u.crypt.alg, "TKIP") == 0)/* set mic key */
- {
+ if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
/* DEBUG_ERR(("\nset key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
@@ -845,11 +805,8 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
}
rtw_setstakey_cmd(padapter, psta, true, true);
- }
- else/* group key */
- {
- if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0)
- {
+ } else { /* group key */
+ if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0) {
memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
@@ -857,9 +814,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true);
- }
- else if (strcmp(param->u.crypt.alg, "BIP") == 0)
- {
+ } else if (strcmp(param->u.crypt.alg, "BIP") == 0) {
/* save the IGTK key, length 16 bytes */
memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/*
@@ -873,25 +828,19 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
}
pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (!pbcmc_sta)
- {
+ if (!pbcmc_sta) {
/* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */
- }
- else
- {
+ } else {
/* Jeff: don't disable ieee8021x_blocked while clearing key */
if (strcmp(param->u.crypt.alg, "none") != 0)
pbcmc_sta->ieee8021x_blocked = false;
if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
- {
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
}
- }
- else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) /* adhoc mode */
- {
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { /* adhoc mode */
}
}
@@ -949,39 +898,29 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (!mac_addr || is_broadcast_ether_addr(mac_addr))
- {
param->u.crypt.set_tx = 0; /* for wpa/wpa2 group key */
- } else {
+ else
param->u.crypt.set_tx = 1; /* for wpa/wpa2 pairwise key */
- }
param->u.crypt.idx = key_index;
if (params->seq_len && params->seq)
- {
memcpy(param->u.crypt.seq, (u8 *)params->seq, params->seq_len);
- }
- if (params->key_len && params->key)
- {
+ if (params->key_len && params->key) {
param->u.crypt.key_len = params->key_len;
memcpy(param->u.crypt.key, (u8 *)params->key, params->key_len);
}
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
- {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
- }
- else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
- {
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
if (mac_addr)
memcpy(param->sta_addr, (void *)mac_addr, ETH_ALEN);
ret = rtw_cfg80211_ap_set_encryption(ndev, param, param_len);
- }
- else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true
- || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)
- {
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true
+ || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
}
@@ -1007,8 +946,7 @@ static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
struct adapter *padapter = rtw_netdev_priv(ndev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
- if (key_index == psecuritypriv->dot11PrivacyKeyIndex)
- {
+ if (key_index == psecuritypriv->dot11PrivacyKeyIndex) {
/* clear the flag of wep default key set. */
psecuritypriv->bWepDefaultKeyIdxSet = 0;
}
@@ -1024,16 +962,14 @@ static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
struct adapter *padapter = rtw_netdev_priv(ndev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
- if ((key_index < WEP_KEYS) && ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_))) /* set wep default key */
- {
+ if ((key_index < WEP_KEYS) && ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_))) { /* set wep default key */
psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
psecuritypriv->dot11PrivacyKeyIndex = key_index;
psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
- if (psecuritypriv->dot11DefKeylen[key_index] == 13)
- {
+ if (psecuritypriv->dot11DefKeylen[key_index] == 13) {
psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
}
@@ -1071,9 +1007,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
/* for infra./P2PClient mode */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
- && check_fwstate(pmlmepriv, _FW_LINKED)
- )
- {
+ && check_fwstate(pmlmepriv, _FW_LINKED)) {
struct wlan_network *cur_network = &(pmlmepriv->cur_network);
if (memcmp((u8 *)mac, cur_network->network.mac_address, ETH_ALEN)) {
@@ -1099,9 +1033,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)
|| check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)
|| check_fwstate(pmlmepriv, WIFI_AP_STATE))
- && check_fwstate(pmlmepriv, _FW_LINKED)
- )
- {
+ && check_fwstate(pmlmepriv, _FW_LINKED)) {
/* TODO: should acquire station info... */
}
@@ -1121,8 +1053,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
int ret = 0;
- if (adapter_to_dvobj(padapter)->processing_dev_remove == true)
- {
+ if (adapter_to_dvobj(padapter)->processing_dev_remove == true) {
ret = -EPERM;
goto exit;
}
@@ -1141,8 +1072,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
old_type = rtw_wdev->iftype;
- if (old_type != type)
- {
+ if (old_type != type) {
pmlmeext->action_public_rxseq = 0xffff;
pmlmeext->action_public_dialog_token = 0xff;
}
@@ -1164,8 +1094,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
rtw_wdev->iftype = type;
- if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false)
- {
+ if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false) {
rtw_wdev->iftype = old_type;
ret = -EPERM;
goto exit;
@@ -1230,9 +1159,7 @@ void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter)
/* report network only if the current channel set contains the channel to which this network belongs */
if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.configuration.ds_config) >= 0
- && true == rtw_validate_ssid(&(pnetwork->network.ssid))
- )
- {
+ && true == rtw_validate_ssid(&(pnetwork->network.ssid))) {
/* ev =translate_scan(padapter, a, pnetwork, ev, stop); */
rtw_cfg80211_inform_bss(padapter, pnetwork);
}
@@ -1249,13 +1176,10 @@ static int rtw_cfg80211_set_probe_req_wpsp2pie(struct adapter *padapter, char *b
u8 *wps_ie;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- if (len > 0)
- {
+ if (len > 0) {
wps_ie = rtw_get_wps_ie(buf, len, NULL, &wps_ielen);
- if (wps_ie)
- {
- if (pmlmepriv->wps_probe_req_ie)
- {
+ if (wps_ie) {
+ if (pmlmepriv->wps_probe_req_ie) {
pmlmepriv->wps_probe_req_ie_len = 0;
kfree(pmlmepriv->wps_probe_req_ie);
pmlmepriv->wps_probe_req_ie = NULL;
@@ -1307,10 +1231,8 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
pwdev_priv->scan_request = request;
spin_unlock_bh(&pwdev_priv->scan_req_lock);
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
- {
- if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS|_FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
- {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS|_FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true) {
need_indicate_scan_done = true;
goto check_need_indicate_scan_done;
}
@@ -1333,15 +1255,13 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
goto check_need_indicate_scan_done;
}
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true)
- {
- static unsigned long lastscantime = 0;
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true) {
+ static unsigned long lastscantime;
unsigned long passtime;
passtime = jiffies_to_msecs(jiffies - lastscantime);
lastscantime = jiffies;
- if (passtime > 12000)
- {
+ if (passtime > 12000) {
need_indicate_scan_done = true;
goto check_need_indicate_scan_done;
}
@@ -1380,9 +1300,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
} else if (request->n_channels <= 4) {
for (j = request->n_channels - 1; j >= 0; j--)
for (i = 0; i < survey_times; i++)
- {
memcpy(&ch[j*survey_times+i], &ch[j], sizeof(struct rtw_ieee80211_channel));
- }
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times * request->n_channels);
} else {
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, NULL, 0);
@@ -1391,14 +1309,11 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
if (_status == false)
- {
ret = -1;
- }
check_need_indicate_scan_done:
kfree(ssid);
- if (need_indicate_scan_done)
- {
+ if (need_indicate_scan_done) {
rtw_cfg80211_surveydone_event_callback(padapter);
rtw_cfg80211_indicate_scan_done(padapter, false);
}
@@ -1424,9 +1339,7 @@ static int rtw_cfg80211_set_wpa_version(struct security_priv *psecuritypriv, u32
if (wpa_version & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
- {
psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPAPSK;
- }
return 0;
@@ -1585,8 +1498,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
if (pairwise_cipher == 0)
pairwise_cipher = WPA_CIPHER_NONE;
- switch (group_cipher)
- {
+ switch (group_cipher) {
case WPA_CIPHER_NONE:
padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
@@ -1609,8 +1521,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
break;
}
- switch (pairwise_cipher)
- {
+ switch (pairwise_cipher) {
case WPA_CIPHER_NONE:
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
@@ -1731,8 +1642,7 @@ static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
rtw_wdev->iftype = NL80211_IFTYPE_STATION;
- if (rtw_set_802_11_infrastructure_mode(padapter, Ndis802_11Infrastructure) == false)
- {
+ if (rtw_set_802_11_infrastructure_mode(padapter, Ndis802_11Infrastructure) == false) {
rtw_wdev->iftype = old_type;
ret = -EPERM;
goto leave_ibss;
@@ -1792,9 +1702,8 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
ret = -EBUSY;
goto exit;
}
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
rtw_scan_abort(padapter);
- }
psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled;
psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
@@ -2086,6 +1995,7 @@ static u8 rtw_get_chan_type(struct adapter *adapter)
}
static int cfg80211_rtw_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef)
{
struct adapter *adapter = wiphy_to_adapter(wiphy);
@@ -2287,9 +2197,8 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
mon_ndev->ieee80211_ptr = mon_wdev;
ret = cfg80211_register_netdevice(mon_ndev);
- if (ret) {
+ if (ret)
goto out;
- }
*ndev = pwdev_priv->pmon_ndev = mon_ndev;
memcpy(pwdev_priv->ifname_mon, name, IFNAMSIZ+1);
@@ -2402,11 +2311,10 @@ static int rtw_add_beacon(struct adapter *adapter, const u8 *head, size_t head_l
rtw_ies_remove_ie(pbuf, &len, _BEACON_IE_OFFSET_, WLAN_EID_VENDOR_SPECIFIC, P2P_OUI, 4);
rtw_ies_remove_ie(pbuf, &len, _BEACON_IE_OFFSET_, WLAN_EID_VENDOR_SPECIFIC, WFD_OUI, 4);
- if (rtw_check_beacon_data(adapter, pbuf, len) == _SUCCESS) {
+ if (rtw_check_beacon_data(adapter, pbuf, len) == _SUCCESS)
ret = 0;
- } else {
+ else
ret = -EINVAL;
- }
kfree(pbuf);
@@ -2446,7 +2354,8 @@ static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *nd
return rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
}
-static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
+static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
+ unsigned int link_id)
{
return 0;
}
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
index f1e9e80044ed..e88fe1a998f8 100644
--- a/drivers/staging/rts5208/spi.c
+++ b/drivers/staging/rts5208/spi.c
@@ -460,10 +460,8 @@ int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip)
spi->clk_div = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
spi->write_en = srb->cmnd[6];
- dev_dbg(rtsx_dev(chip), "%s: ", __func__);
- dev_dbg(rtsx_dev(chip), "spi_clock = %d, ", spi->spi_clock);
- dev_dbg(rtsx_dev(chip), "clk_div = %d, ", spi->clk_div);
- dev_dbg(rtsx_dev(chip), "write_en = %d\n", spi->write_en);
+ dev_dbg(rtsx_dev(chip), "spi_clock = %d, clk_div = %d, write_en = %d\n",
+ spi->spi_clock, spi->clk_div, spi->write_en);
return STATUS_SUCCESS;
}
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index 029d9acec47d..e0c7ff3352bf 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -15,7 +15,7 @@ static struct dvi_ctrl_device dcft_supported_dvi_controller[] = {
#ifdef DVI_CTRL_SII164
{
.init = sii164InitChip,
- .get_vendor_id = sii164GetVendorID,
+ .get_vendor_id = sii164_get_vendor_id,
.get_device_id = sii164GetDeviceID,
#ifdef SII164_FULL_FUNCTIONS
.reset_chip = sii164ResetChip,
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 7002567a47d2..63c9e8b6ffb3 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -15,7 +15,7 @@ enum dpms {
}
void ddk750_set_dpms(enum dpms state);
-void sm750_set_power_mode(unsigned int powerMode);
+void sm750_set_power_mode(unsigned int mode);
void sm750_set_current_gate(unsigned int gate);
/*
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 73e0e9f41ec5..3da1796cd7aa 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -29,13 +29,13 @@ static char *gDviCtrlChipName = "Silicon Image SiI 164";
#endif
/*
- * sii164GetVendorID
+ * sii164_get_vendor_id
* This function gets the vendor ID of the DVI controller chip.
*
* Output:
* Vendor ID
*/
-unsigned short sii164GetVendorID(void)
+unsigned short sii164_get_vendor_id(void)
{
unsigned short vendorID;
@@ -140,7 +140,7 @@ long sii164InitChip(unsigned char edge_select,
#endif
/* Check if SII164 Chip exists */
- if ((sii164GetVendorID() == SII164_VENDOR_ID) &&
+ if ((sii164_get_vendor_id() == SII164_VENDOR_ID) &&
(sii164GetDeviceID() == SII164_DEVICE_ID)) {
/*
* Initialize SII164 controller chip.
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index d940cb729066..ca330f6a43e2 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -27,7 +27,7 @@ long sii164InitChip(unsigned char edgeSelect,
unsigned char pllFilterEnable,
unsigned char pllFilterValue);
-unsigned short sii164GetVendorID(void);
+unsigned short sii164_get_vendor_id(void);
unsigned short sii164GetDeviceID(void);
#ifdef SII164_FULL_FUNCTIONS
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index e429b33b4d39..f4c2c9506d86 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -25,12 +25,14 @@ MODULE_PARM_DESC(force_bulk, "Force use of vchiq bulk for audio");
static void bcm2835_audio_lock(struct bcm2835_audio_instance *instance)
{
mutex_lock(&instance->vchi_mutex);
- vchiq_use_service(instance->service_handle);
+ vchiq_use_service(instance->alsa_stream->chip->vchi_ctx->instance,
+ instance->service_handle);
}
static void bcm2835_audio_unlock(struct bcm2835_audio_instance *instance)
{
- vchiq_release_service(instance->service_handle);
+ vchiq_release_service(instance->alsa_stream->chip->vchi_ctx->instance,
+ instance->service_handle);
mutex_unlock(&instance->vchi_mutex);
}
@@ -44,8 +46,8 @@ static int bcm2835_audio_send_msg_locked(struct bcm2835_audio_instance *instance
init_completion(&instance->msg_avail_comp);
}
- status = vchiq_queue_kernel_message(instance->service_handle,
- m, sizeof(*m));
+ status = vchiq_queue_kernel_message(instance->alsa_stream->chip->vchi_ctx->instance,
+ instance->service_handle, m, sizeof(*m));
if (status) {
dev_err(instance->dev,
"vchi message queue failed: %d, msg=%d\n",
@@ -89,11 +91,13 @@ static int bcm2835_audio_send_simple(struct bcm2835_audio_instance *instance,
return bcm2835_audio_send_msg(instance, &m, wait);
}
-static enum vchiq_status audio_vchi_callback(enum vchiq_reason reason,
+static enum vchiq_status audio_vchi_callback(struct vchiq_instance *vchiq_instance,
+ enum vchiq_reason reason,
struct vchiq_header *header,
unsigned int handle, void *userdata)
{
- struct bcm2835_audio_instance *instance = vchiq_get_service_userdata(handle);
+ struct bcm2835_audio_instance *instance = vchiq_get_service_userdata(vchiq_instance,
+ handle);
struct vc_audio_msg *m;
if (reason != VCHIQ_MESSAGE_AVAILABLE)
@@ -114,7 +118,7 @@ static enum vchiq_status audio_vchi_callback(enum vchiq_reason reason,
dev_err(instance->dev, "unexpected callback type=%d\n", m->type);
}
- vchiq_release_message(handle, header);
+ vchiq_release_message(vchiq_instance, instance->service_handle, header);
return VCHIQ_SUCCESS;
}
@@ -143,7 +147,8 @@ vc_vchi_audio_init(struct vchiq_instance *vchiq_instance,
}
/* Finished with the service for now */
- vchiq_release_service(instance->service_handle);
+ vchiq_release_service(instance->alsa_stream->chip->vchi_ctx->instance,
+ instance->service_handle);
return 0;
}
@@ -153,10 +158,12 @@ static void vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance)
int status;
mutex_lock(&instance->vchi_mutex);
- vchiq_use_service(instance->service_handle);
+ vchiq_use_service(instance->alsa_stream->chip->vchi_ctx->instance,
+ instance->service_handle);
/* Close all VCHI service connections */
- status = vchiq_close_service(instance->service_handle);
+ status = vchiq_close_service(instance->alsa_stream->chip->vchi_ctx->instance,
+ instance->service_handle);
if (status) {
dev_err(instance->dev,
"failed to close VCHI service connection (status=%d)\n",
@@ -226,7 +233,7 @@ int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
goto deinit;
bcm2835_audio_lock(instance);
- vchiq_get_peer_version(instance->service_handle,
+ vchiq_get_peer_version(vchi_ctx->instance, instance->service_handle,
&instance->peer_version);
bcm2835_audio_unlock(instance);
if (instance->peer_version < 2 || force_bulk)
@@ -322,6 +329,8 @@ int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream,
unsigned int size, void *src)
{
struct bcm2835_audio_instance *instance = alsa_stream->instance;
+ struct bcm2835_vchi_ctx *vchi_ctx = alsa_stream->chip->vchi_ctx;
+ struct vchiq_instance *vchiq_instance = vchi_ctx->instance;
struct vc_audio_msg m = {
.type = VC_AUDIO_MSG_TYPE_WRITE,
.write.count = size,
@@ -343,15 +352,14 @@ int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream,
count = size;
if (!instance->max_packet) {
/* Send the message to the videocore */
- status = vchiq_bulk_transmit(instance->service_handle, src,
- count, NULL,
- VCHIQ_BULK_MODE_BLOCKING);
+ status = vchiq_bulk_transmit(vchiq_instance, instance->service_handle, src, count,
+ NULL, VCHIQ_BULK_MODE_BLOCKING);
} else {
while (count > 0) {
int bytes = min(instance->max_packet, count);
- status = vchiq_queue_kernel_message(instance->service_handle,
- src, bytes);
+ status = vchiq_queue_kernel_message(vchiq_instance,
+ instance->service_handle, src, bytes);
src += bytes;
count -= bytes;
}
diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
index c93f2f3e87bb..db1441c0cc66 100644
--- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
+++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
@@ -53,9 +53,12 @@ struct vchiq_element {
unsigned int size;
};
+struct vchiq_instance;
+
struct vchiq_service_base {
int fourcc;
- enum vchiq_status (*callback)(enum vchiq_reason reason,
+ enum vchiq_status (*callback)(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
struct vchiq_header *header,
unsigned int handle,
void *bulk_userdata);
@@ -71,7 +74,8 @@ struct vchiq_completion_data_kernel {
struct vchiq_service_params_kernel {
int fourcc;
- enum vchiq_status (*callback)(enum vchiq_reason reason,
+ enum vchiq_status (*callback)(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
struct vchiq_header *header,
unsigned int handle,
void *bulk_userdata);
@@ -88,23 +92,27 @@ extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
const struct vchiq_service_params_kernel *params,
unsigned int *pservice);
-extern enum vchiq_status vchiq_close_service(unsigned int service);
-extern enum vchiq_status vchiq_use_service(unsigned int service);
-extern enum vchiq_status vchiq_release_service(unsigned int service);
-extern void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header);
-extern void vchiq_release_message(unsigned int service,
- struct vchiq_header *header);
-extern int vchiq_queue_kernel_message(unsigned int handle, void *data,
- unsigned int size);
-extern enum vchiq_status vchiq_bulk_transmit(unsigned int service,
- const void *data, unsigned int size, void *userdata,
- enum vchiq_bulk_mode mode);
-extern enum vchiq_status vchiq_bulk_receive(unsigned int service,
- void *data, unsigned int size, void *userdata,
- enum vchiq_bulk_mode mode);
-extern void *vchiq_get_service_userdata(unsigned int service);
-extern enum vchiq_status vchiq_get_peer_version(unsigned int handle,
- short *peer_version);
-extern struct vchiq_header *vchiq_msg_hold(unsigned int handle);
+extern enum vchiq_status vchiq_close_service(struct vchiq_instance *instance,
+ unsigned int service);
+extern enum vchiq_status vchiq_use_service(struct vchiq_instance *instance, unsigned int service);
+extern enum vchiq_status vchiq_release_service(struct vchiq_instance *instance,
+ unsigned int service);
+extern void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_header *header);
+extern void vchiq_release_message(struct vchiq_instance *instance, unsigned int service,
+ struct vchiq_header *header);
+extern int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle,
+ void *data, unsigned int size);
+extern enum vchiq_status vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int service,
+ const void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int service,
+ void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode);
+extern void *vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int service);
+extern enum vchiq_status vchiq_get_peer_version(struct vchiq_instance *instance,
+ unsigned int handle,
+ short *peer_version);
+extern struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle);
#endif /* VCHIQ_H */
diff --git a/drivers/staging/vc04_services/interface/TESTING b/drivers/staging/vc04_services/interface/TESTING
new file mode 100644
index 000000000000..a6d63efcbcb9
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/TESTING
@@ -0,0 +1,82 @@
+This document contains some hints to test the function of the VCHIQ driver
+without having additional hardware to the Raspberry Pi.
+
+* Requirements & limitations
+
+Testing the VCHIQ driver requires a Raspberry Pi with one of the following SoC:
+ - BCM2835 ( e.g. Raspberry Pi Zero W )
+ - BCM2836 ( e.g. Raspberry Pi 2 )
+ - BCM2837 ( e.g. Raspberry Pi 3 B+ )
+
+The BCM2711 used in the Raspberry Pi 4 is currently not supported in the
+mainline kernel.
+
+There are no specific requirements to the VideoCore firmware to get VCHIQ
+working.
+
+The test scenarios described in this document based on the tool vchiq_test.
+Its source code is available here: https://github.com/raspberrypi/userland
+
+* Configuration
+
+Here are the most common kernel configurations:
+
+ 1. BCM2835 target SoC (ARM 32 bit)
+
+ Just use bcm2835_defconfig which already has VCHIQ enabled.
+
+ 2. BCM2836/7 target SoC (ARM 32 bit)
+
+ Use the multi_v7_defconfig as a base and then enable all VCHIQ options.
+
+ 3. BCM2837 target SoC (ARM 64 bit)
+
+ Use the defconfig as a base and then enable all VCHIQ options.
+
+* Scenarios
+
+ * Initial test
+
+ Check the driver is probed and /dev/vchiq is created
+
+ * Functional test
+
+ Command: vchiq_test -f 10
+
+ Expected output:
+ Functional test - iters:10
+ ======== iteration 1 ========
+ Testing bulk transfer for alignment.
+ Testing bulk transfer at PAGE_SIZE.
+ ...
+
+ * Ping test
+
+ Command: vchiq_test -p 1
+
+ Expected output:
+ Ping test - service:echo, iters:1, version 3
+ vchi ping (size 0) -> 57.000000us
+ vchi ping (size 0, 0 async, 0 oneway) -> 122.000000us
+ vchi bulk (size 0, 0 async, 0 oneway) -> 546.000000us
+ vchi bulk (size 0, 0 oneway) -> 230.000000us
+ vchi ping (size 0) -> 49.000000us
+ vchi ping (size 0, 0 async, 0 oneway) -> 70.000000us
+ vchi bulk (size 0, 0 async, 0 oneway) -> 296.000000us
+ vchi bulk (size 0, 0 oneway) -> 266.000000us
+ vchi ping (size 0, 1 async, 0 oneway) -> 65.000000us
+ vchi bulk (size 0, 0 oneway) -> 456.000000us
+ vchi ping (size 0, 2 async, 0 oneway) -> 74.000000us
+ vchi bulk (size 0, 0 oneway) -> 640.000000us
+ vchi ping (size 0, 10 async, 0 oneway) -> 125.000000us
+ vchi bulk (size 0, 0 oneway) -> 2309.000000us
+ vchi ping (size 0, 0 async, 1 oneway) -> 70.000000us
+ vchi ping (size 0, 0 async, 2 oneway) -> 76.000000us
+ vchi ping (size 0, 0 async, 10 oneway) -> 105.000000us
+ vchi ping (size 0, 10 async, 10 oneway) -> 165.000000us
+ vchi ping (size 0, 100 async, 0 oneway) -> nanus
+ vchi bulk (size 0, 0 oneway) -> nanus
+ vchi ping (size 0, 0 async, 100 oneway) -> nanus
+ vchi ping (size 0, 100 async, 100 oneway) -> infus
+ vchi ping (size 0, 200 async, 0 oneway) -> infus
+ ...
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 0596ac61e286..dc33490ba7fb 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -148,12 +148,11 @@ static unsigned int g_fragments_size;
static char *g_fragments_base;
static char *g_free_fragments;
static struct semaphore g_free_fragments_sema;
-static struct device *g_dev;
static DEFINE_SEMAPHORE(g_free_fragments_mutex);
static enum vchiq_status
-vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
+vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
unsigned int size, enum vchiq_bulk_dir dir);
static irqreturn_t
@@ -175,17 +174,17 @@ vchiq_doorbell_irq(int irq, void *dev_id)
}
static void
-cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
+cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
{
if (pagelistinfo->scatterlist_mapped) {
- dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+ dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
pagelistinfo->num_pages, pagelistinfo->dma_dir);
}
if (pagelistinfo->pages_need_release)
unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
- dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
+ dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
pagelistinfo->pagelist, pagelistinfo->dma_addr);
}
@@ -212,7 +211,7 @@ is_adjacent_block(u32 *addrs, u32 addr, unsigned int k)
*/
static struct vchiq_pagelist_info *
-create_pagelist(char *buf, char __user *ubuf,
+create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
size_t count, unsigned short type)
{
struct pagelist *pagelist;
@@ -250,7 +249,7 @@ create_pagelist(char *buf, char __user *ubuf,
/* Allocate enough storage to hold the page pointers and the page
* list
*/
- pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
+ pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
GFP_KERNEL);
vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
@@ -292,7 +291,7 @@ create_pagelist(char *buf, char __user *ubuf,
size_t bytes = PAGE_SIZE - off;
if (!pg) {
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
@@ -315,7 +314,7 @@ create_pagelist(char *buf, char __user *ubuf,
/* This is probably due to the process being killed */
if (actual_pages > 0)
unpin_user_pages(pages, actual_pages);
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
/* release user pages */
@@ -338,13 +337,13 @@ create_pagelist(char *buf, char __user *ubuf,
count -= len;
}
- dma_buffers = dma_map_sg(g_dev,
+ dma_buffers = dma_map_sg(instance->state->dev,
scatterlist,
num_pages,
pagelistinfo->dma_dir);
if (dma_buffers == 0) {
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
@@ -378,7 +377,7 @@ create_pagelist(char *buf, char __user *ubuf,
char *fragments;
if (down_interruptible(&g_free_fragments_sema)) {
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
return NULL;
}
@@ -397,7 +396,7 @@ create_pagelist(char *buf, char __user *ubuf,
}
static void
-free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
+free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
int actual)
{
struct pagelist *pagelist = pagelistinfo->pagelist;
@@ -411,7 +410,7 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
* NOTE: dma_unmap_sg must be called before the
* cpu can touch any of the data/pages.
*/
- dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+ dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
pagelistinfo->num_pages, pagelistinfo->dma_dir);
pagelistinfo->scatterlist_mapped = 0;
@@ -460,7 +459,7 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
set_page_dirty(pages[i]);
}
- cleanup_pagelistinfo(pagelistinfo);
+ cleanup_pagelistinfo(instance, pagelistinfo);
}
int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
@@ -519,7 +518,7 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
- err = vchiq_init_state(state, vchiq_slot_zero);
+ err = vchiq_init_state(state, vchiq_slot_zero, dev);
if (err)
return err;
@@ -547,7 +546,6 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
return err ? : -ENXIO;
}
- g_dev = dev;
vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
vchiq_slot_zero, &slot_phys);
@@ -604,6 +602,10 @@ static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *
void
remote_event_signal(struct remote_event *event)
{
+ /*
+ * Ensure that all writes to shared data structures have completed
+ * before signalling the peer.
+ */
wmb();
event->fired = 1;
@@ -615,12 +617,12 @@ remote_event_signal(struct remote_event *event)
}
int
-vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
+vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
void __user *uoffset, int size, int dir)
{
struct vchiq_pagelist_info *pagelistinfo;
- pagelistinfo = create_pagelist(offset, uoffset, size,
+ pagelistinfo = create_pagelist(instance, offset, uoffset, size,
(dir == VCHIQ_BULK_RECEIVE)
? PAGELIST_READ
: PAGELIST_WRITE);
@@ -640,10 +642,10 @@ vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
}
void
-vchiq_complete_bulk(struct vchiq_bulk *bulk)
+vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
{
if (bulk && bulk->remote_data && bulk->actual)
- free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
+ free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
bulk->actual);
}
@@ -821,7 +823,7 @@ vchiq_open_service(struct vchiq_instance *instance,
*phandle = service->handle;
status = vchiq_open_service_internal(service, current->pid);
if (status != VCHIQ_SUCCESS) {
- vchiq_remove_service(service->handle);
+ vchiq_remove_service(instance, service->handle);
*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
}
}
@@ -834,8 +836,8 @@ failed:
EXPORT_SYMBOL(vchiq_open_service);
enum vchiq_status
-vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
- void *userdata, enum vchiq_bulk_mode mode)
+vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
+ unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
enum vchiq_status status;
@@ -843,13 +845,13 @@ vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- status = vchiq_bulk_transfer(handle,
+ status = vchiq_bulk_transfer(instance, handle,
(void *)data, NULL,
size, userdata, mode,
VCHIQ_BULK_TRANSMIT);
break;
case VCHIQ_BULK_MODE_BLOCKING:
- status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
+ status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
VCHIQ_BULK_TRANSMIT);
break;
default:
@@ -871,8 +873,8 @@ vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
}
EXPORT_SYMBOL(vchiq_bulk_transmit);
-enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
- unsigned int size, void *userdata,
+enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
+ void *data, unsigned int size, void *userdata,
enum vchiq_bulk_mode mode)
{
enum vchiq_status status;
@@ -881,12 +883,12 @@ enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- status = vchiq_bulk_transfer(handle, data, NULL,
+ status = vchiq_bulk_transfer(instance, handle, data, NULL,
size, userdata,
mode, VCHIQ_BULK_RECEIVE);
break;
case VCHIQ_BULK_MODE_BLOCKING:
- status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
+ status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
VCHIQ_BULK_RECEIVE);
break;
default:
@@ -909,20 +911,17 @@ enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
EXPORT_SYMBOL(vchiq_bulk_receive);
static enum vchiq_status
-vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
- enum vchiq_bulk_dir dir)
+vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
+ unsigned int size, enum vchiq_bulk_dir dir)
{
- struct vchiq_instance *instance;
struct vchiq_service *service;
enum vchiq_status status;
struct bulk_waiter_node *waiter = NULL, *iter;
- service = find_service_by_handle(handle);
+ service = find_service_by_handle(instance, handle);
if (!service)
return VCHIQ_ERROR;
- instance = service->instance;
-
vchiq_service_put(service);
mutex_lock(&instance->bulk_waiter_list_mutex);
@@ -959,7 +958,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
}
}
- status = vchiq_bulk_transfer(handle, data, NULL, size,
+ status = vchiq_bulk_transfer(instance, handle, data, NULL, size,
&waiter->bulk_waiter,
VCHIQ_BULK_MODE_BLOCKING, dir);
if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
@@ -1046,8 +1045,8 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
}
enum vchiq_status
-service_callback(enum vchiq_reason reason, struct vchiq_header *header,
- unsigned int handle, void *bulk_userdata)
+service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
+ struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
{
/*
* How do we ensure the callback goes to the right client?
@@ -1057,7 +1056,6 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
*/
struct user_service *user_service;
struct vchiq_service *service;
- struct vchiq_instance *instance;
bool skip_completion = false;
DEBUG_INITIALISE(g_state.local);
@@ -1065,14 +1063,13 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
rcu_read_lock();
- service = handle_to_service(handle);
+ service = handle_to_service(instance, handle);
if (WARN_ON(!service)) {
rcu_read_unlock();
return VCHIQ_SUCCESS;
}
user_service = (struct user_service *)service->base.userdata;
- instance = user_service->instance;
if (!instance || instance->closing) {
rcu_read_unlock();
@@ -1318,7 +1315,8 @@ vchiq_get_state(void)
*/
static enum vchiq_status
-vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
+vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
struct vchiq_header *header,
unsigned int service_user, void *bulk_user)
{
@@ -1387,14 +1385,14 @@ vchiq_keepalive_thread_func(void *v)
*/
while (uc--) {
atomic_inc(&arm_state->ka_use_ack_count);
- status = vchiq_use_service(ka_handle);
+ status = vchiq_use_service(instance, ka_handle);
if (status != VCHIQ_SUCCESS) {
vchiq_log_error(vchiq_susp_log_level,
"%s vchiq_use_service error %d", __func__, status);
}
}
while (rc--) {
- status = vchiq_release_service(ka_handle);
+ status = vchiq_release_service(instance, ka_handle);
if (status != VCHIQ_SUCCESS) {
vchiq_log_error(vchiq_susp_log_level,
"%s vchiq_release_service error %d", __func__,
@@ -1590,10 +1588,10 @@ vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
}
enum vchiq_status
-vchiq_use_service(unsigned int handle)
+vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
{
enum vchiq_status ret = VCHIQ_ERROR;
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
if (service) {
ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
@@ -1604,10 +1602,10 @@ vchiq_use_service(unsigned int handle)
EXPORT_SYMBOL(vchiq_use_service);
enum vchiq_status
-vchiq_release_service(unsigned int handle)
+vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
{
enum vchiq_status ret = VCHIQ_ERROR;
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
if (service) {
ret = vchiq_release_internal(service->state, service);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index 2aa46b119a46..2851ef6b9cd0 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -86,10 +86,10 @@ extern struct vchiq_state *
vchiq_get_state(void);
enum vchiq_status
-vchiq_use_service(unsigned int handle);
+vchiq_use_service(struct vchiq_instance *instance, unsigned int handle);
extern enum vchiq_status
-vchiq_release_service(unsigned int handle);
+vchiq_release_service(struct vchiq_instance *instance, unsigned int handle);
extern enum vchiq_status
vchiq_check_service(struct vchiq_service *service);
@@ -138,8 +138,8 @@ static inline int vchiq_register_chrdev(struct device *parent) { return 0; }
#endif /* IS_ENABLED(CONFIG_VCHIQ_CDEV) */
extern enum vchiq_status
-service_callback(enum vchiq_reason reason, struct vchiq_header *header,
- unsigned int handle, void *bulk_userdata);
+service_callback(struct vchiq_instance *vchiq_instance, enum vchiq_reason reason,
+ struct vchiq_header *header, unsigned int handle, void *bulk_userdata);
extern void
free_bulk_waiter(struct vchiq_instance *instance);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 8f99272dbd6f..45ed30bfdbf5 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -13,6 +13,7 @@
#include <linux/rcupdate.h>
#include <linux/sched/signal.h>
+#include "vchiq_arm.h"
#include "vchiq_core.h"
#define VCHIQ_SLOT_HANDLER_STACK 8192
@@ -161,7 +162,6 @@ int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
DEFINE_SPINLOCK(bulk_waiter_spinlock);
static DEFINE_SPINLOCK(quota_spinlock);
-struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
static unsigned int handle_seq;
static const char *const srvstate_names[] = {
@@ -234,13 +234,19 @@ set_service_state(struct vchiq_service *service, int newstate)
service->srvstate = newstate;
}
+struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle)
+{
+ int idx = handle & (VCHIQ_MAX_SERVICES - 1);
+
+ return rcu_dereference(instance->state->services[idx]);
+}
struct vchiq_service *
-find_service_by_handle(unsigned int handle)
+find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
{
struct vchiq_service *service;
rcu_read_lock();
- service = handle_to_service(handle);
+ service = handle_to_service(instance, handle);
if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
service->handle == handle &&
kref_get_unless_zero(&service->ref_count)) {
@@ -281,7 +287,7 @@ find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
struct vchiq_service *service;
rcu_read_lock();
- service = handle_to_service(handle);
+ service = handle_to_service(instance, handle);
if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
service->handle == handle &&
service->instance == instance &&
@@ -302,7 +308,7 @@ find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int h
struct vchiq_service *service;
rcu_read_lock();
- service = handle_to_service(handle);
+ service = handle_to_service(instance, handle);
if (service &&
(service->srvstate == VCHIQ_SRVSTATE_FREE ||
service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
@@ -398,26 +404,26 @@ vchiq_service_put(struct vchiq_service *service)
}
int
-vchiq_get_client_id(unsigned int handle)
+vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle)
{
struct vchiq_service *service;
int id;
rcu_read_lock();
- service = handle_to_service(handle);
+ service = handle_to_service(instance, handle);
id = service ? service->client_id : 0;
rcu_read_unlock();
return id;
}
void *
-vchiq_get_service_userdata(unsigned int handle)
+vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle)
{
void *userdata;
struct vchiq_service *service;
rcu_read_lock();
- service = handle_to_service(handle);
+ service = handle_to_service(instance, handle);
userdata = service ? service->base.userdata : NULL;
rcu_read_unlock();
return userdata;
@@ -466,7 +472,8 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
header, bulk_userdata);
- status = service->base.callback(reason, header, service->handle, bulk_userdata);
+ status = service->base.callback(service->instance, reason, header, service->handle,
+ bulk_userdata);
if (status == VCHIQ_ERROR) {
vchiq_log_warning(vchiq_core_log_level,
"%d: ignoring ERROR from callback to service %x",
@@ -475,7 +482,7 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
}
if (reason != VCHIQ_MESSAGE_AVAILABLE)
- vchiq_release_message(service->handle, header);
+ vchiq_release_message(service->instance, service->handle, header);
return status;
}
@@ -521,6 +528,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
return 0;
}
event->armed = 0;
+ /* Ensure that the peer sees that we are not waiting (armed == 0). */
wmb();
}
@@ -643,6 +651,7 @@ request_poll(struct vchiq_state *state, struct vchiq_service *service,
skip_service:
state->poll_needed = 1;
+ /* Ensure the slot handler thread sees the poll_needed flag. */
wmb();
/* ... and ensure the slot handler runs. */
@@ -1149,6 +1158,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
remote_event_wait(&state->sync_release_event, &local->sync_release);
+ /* Ensure that reads don't overtake the remote_event_wait. */
rmb();
header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
@@ -1441,7 +1451,7 @@ abort_outstanding_bulks(struct vchiq_service *service,
}
if (queue->process != queue->local_insert) {
- vchiq_complete_bulk(bulk);
+ vchiq_complete_bulk(service->instance, bulk);
vchiq_log_info(SRVTRACE_LEVEL(service),
"%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
@@ -1769,7 +1779,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
DEBUG_TRACE(PARSE_LINE);
WARN_ON(queue->process == queue->local_insert);
- vchiq_complete_bulk(bulk);
+ vchiq_complete_bulk(service->instance, bulk);
queue->process++;
mutex_unlock(&service->bulk_mutex);
DEBUG_TRACE(PARSE_LINE);
@@ -1952,6 +1962,7 @@ slot_handler_func(void *v)
DEBUG_TRACE(SLOT_HANDLER_LINE);
remote_event_wait(&state->trigger_event, &local->trigger);
+ /* Ensure that reads don't overtake the remote_event_wait. */
rmb();
DEBUG_TRACE(SLOT_HANDLER_LINE);
@@ -2014,6 +2025,7 @@ sync_func(void *v)
remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
+ /* Ensure that reads don't overtake the remote_event_wait. */
rmb();
msgid = header->msgid;
@@ -2142,18 +2154,13 @@ vchiq_init_slots(void *mem_base, int mem_size)
}
int
-vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
+vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev)
{
struct vchiq_shared_state *local;
struct vchiq_shared_state *remote;
char threadname[16];
int i, ret;
- if (vchiq_states[0]) {
- pr_err("%s: VCHIQ state already initialized\n", __func__);
- return -EINVAL;
- }
-
local = &slot_zero->slave;
remote = &slot_zero->master;
@@ -2169,6 +2176,8 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
memset(state, 0, sizeof(struct vchiq_state));
+ state->dev = dev;
+
/*
* initialize shared state pointers
*/
@@ -2272,8 +2281,6 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
wake_up_process(state->recycle_thread);
wake_up_process(state->sync_thread);
- vchiq_states[0] = state;
-
/* Indicate readiness to the other side */
local->initialised = 1;
@@ -2287,9 +2294,10 @@ fail_free_handler_thread:
return ret;
}
-void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
+void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_header *header)
{
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
int pos;
if (!service)
@@ -2309,9 +2317,9 @@ void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
}
EXPORT_SYMBOL(vchiq_msg_queue_push);
-struct vchiq_header *vchiq_msg_hold(unsigned int handle)
+struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle)
{
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
struct vchiq_header *header;
int pos;
@@ -2866,16 +2874,16 @@ vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instan
/* Find all services registered to this client and remove them. */
i = 0;
while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
- (void)vchiq_remove_service(service->handle);
+ (void)vchiq_remove_service(instance, service->handle);
vchiq_service_put(service);
}
}
enum vchiq_status
-vchiq_close_service(unsigned int handle)
+vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
{
/* Unregister the service */
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
enum vchiq_status status = VCHIQ_SUCCESS;
if (!service)
@@ -2930,10 +2938,10 @@ vchiq_close_service(unsigned int handle)
EXPORT_SYMBOL(vchiq_close_service);
enum vchiq_status
-vchiq_remove_service(unsigned int handle)
+vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
{
/* Unregister the service */
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
enum vchiq_status status = VCHIQ_SUCCESS;
if (!service)
@@ -2996,11 +3004,11 @@ vchiq_remove_service(unsigned int handle)
* When called in blocking mode, the userdata field points to a bulk_waiter
* structure.
*/
-enum vchiq_status vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset,
- int size, void *userdata, enum vchiq_bulk_mode mode,
- enum vchiq_bulk_dir dir)
+enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size, void *userdata,
+ enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
{
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
struct vchiq_bulk_queue *queue;
struct vchiq_bulk *bulk;
struct vchiq_state *state;
@@ -3075,9 +3083,13 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle, void *offset, void __
bulk->size = size;
bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
- if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
+ if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
goto unlock_error_exit;
+ /*
+ * Ensure that the bulk data record is visible to the peer
+ * before proceeding.
+ */
wmb();
vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK",
@@ -3139,7 +3151,7 @@ waiting:
unlock_both_error_exit:
mutex_unlock(&state->slot_mutex);
cancel_bulk_error_exit:
- vchiq_complete_bulk(bulk);
+ vchiq_complete_bulk(service->instance, bulk);
unlock_error_exit:
mutex_unlock(&service->bulk_mutex);
@@ -3150,13 +3162,13 @@ error_exit:
}
enum vchiq_status
-vchiq_queue_message(unsigned int handle,
+vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
size_t size)
{
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
enum vchiq_status status = VCHIQ_ERROR;
int data_id;
@@ -3199,12 +3211,13 @@ error_exit:
return status;
}
-int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
+int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data,
+ unsigned int size)
{
enum vchiq_status status;
while (1) {
- status = vchiq_queue_message(handle, memcpy_copy_callback,
+ status = vchiq_queue_message(instance, handle, memcpy_copy_callback,
data, size);
/*
@@ -3223,10 +3236,10 @@ int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int siz
EXPORT_SYMBOL(vchiq_queue_kernel_message);
void
-vchiq_release_message(unsigned int handle,
+vchiq_release_message(struct vchiq_instance *instance, unsigned int handle,
struct vchiq_header *header)
{
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
struct vchiq_shared_state *remote;
struct vchiq_state *state;
int slot_index;
@@ -3265,10 +3278,10 @@ release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
}
enum vchiq_status
-vchiq_get_peer_version(unsigned int handle, short *peer_version)
+vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version)
{
enum vchiq_status status = VCHIQ_ERROR;
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
if (!service)
goto exit;
@@ -3300,9 +3313,10 @@ void vchiq_get_config(struct vchiq_config *config)
}
int
-vchiq_set_service_option(unsigned int handle, enum vchiq_service_option option, int value)
+vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
+ enum vchiq_service_option option, int value)
{
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
struct vchiq_service_quota *quota;
int ret = -EINVAL;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 1ddc661642a9..8b4a38f5b3f2 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -314,6 +314,7 @@ struct vchiq_slot_zero {
};
struct vchiq_state {
+ struct device *dev;
int id;
int initialised;
enum vchiq_connstate conn_state;
@@ -448,8 +449,6 @@ extern int vchiq_core_log_level;
extern int vchiq_core_msg_log_level;
extern int vchiq_sync_log_level;
-extern struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
-
extern const char *
get_conn_state_name(enum vchiq_connstate conn_state);
@@ -457,7 +456,7 @@ extern struct vchiq_slot_zero *
vchiq_init_slots(void *mem_base, int mem_size);
extern int
-vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero);
+vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev);
extern enum vchiq_status
vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
@@ -487,8 +486,8 @@ extern void
remote_event_pollall(struct vchiq_state *state);
extern enum vchiq_status
-vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset,
- int size, void *userdata, enum vchiq_bulk_mode mode,
+vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *offset,
+ void __user *uoffset, int size, void *userdata, enum vchiq_bulk_mode mode,
enum vchiq_bulk_dir dir);
extern int
@@ -507,20 +506,10 @@ extern void
request_poll(struct vchiq_state *state, struct vchiq_service *service,
int poll_type);
-static inline struct vchiq_service *
-handle_to_service(unsigned int handle)
-{
- int idx = handle & (VCHIQ_MAX_SERVICES - 1);
- struct vchiq_state *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
- (VCHIQ_MAX_STATES - 1)];
-
- if (!state)
- return NULL;
- return rcu_dereference(state->services[idx]);
-}
+struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle);
extern struct vchiq_service *
-find_service_by_handle(unsigned int handle);
+find_service_by_handle(struct vchiq_instance *instance, unsigned int handle);
extern struct vchiq_service *
find_service_by_port(struct vchiq_state *state, unsigned int localport);
@@ -548,16 +537,16 @@ extern void
vchiq_service_put(struct vchiq_service *service);
extern enum vchiq_status
-vchiq_queue_message(unsigned int handle,
+vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
size_t size);
-int vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, void __user *uoffset,
- int size, int dir);
+int vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
+ void __user *uoffset, int size, int dir);
-void vchiq_complete_bulk(struct vchiq_bulk *bulk);
+void vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk);
void remote_event_signal(struct remote_event *event);
@@ -595,12 +584,13 @@ void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newsta
void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes);
-enum vchiq_status vchiq_remove_service(unsigned int service);
+enum vchiq_status vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
-int vchiq_get_client_id(unsigned int service);
+int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service);
void vchiq_get_config(struct vchiq_config *config);
-int vchiq_set_service_option(unsigned int service, enum vchiq_service_option option, int value);
+int vchiq_set_service_option(struct vchiq_instance *instance, unsigned int service,
+ enum vchiq_service_option option, int value);
#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 66bbfec332ba..7e297494437e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -108,8 +108,8 @@ static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
}
static int
-vchiq_ioc_queue_message(unsigned int handle, struct vchiq_element *elements,
- unsigned long count)
+vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_element *elements, unsigned long count)
{
struct vchiq_io_copy_callback_context context;
enum vchiq_status status = VCHIQ_SUCCESS;
@@ -127,7 +127,7 @@ vchiq_ioc_queue_message(unsigned int handle, struct vchiq_element *elements,
total_size += elements[i].size;
}
- status = vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
+ status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data,
&context, total_size);
if (status == VCHIQ_ERROR)
@@ -191,7 +191,7 @@ static int vchiq_ioc_create_service(struct vchiq_instance *instance,
if (args->is_open) {
status = vchiq_open_service_internal(service, instance->pid);
if (status != VCHIQ_SUCCESS) {
- vchiq_remove_service(service->handle);
+ vchiq_remove_service(instance, service->handle);
return (status == VCHIQ_RETRY) ?
-EINTR : -EIO;
}
@@ -266,7 +266,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
/* Copy to user space if msgbuf is not NULL */
if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) {
ret = header->size;
- vchiq_release_message(service->handle, header);
+ vchiq_release_message(instance, service->handle, header);
} else {
ret = -EFAULT;
}
@@ -330,7 +330,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
userdata = args->userdata;
}
- status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size,
+ status = vchiq_bulk_transfer(instance, args->handle, NULL, args->data, args->size,
userdata, args->mode, dir);
if (!waiter) {
@@ -529,7 +529,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
}
/* Now it has been copied, the message can be released. */
- vchiq_release_message(service->handle, header);
+ vchiq_release_message(instance, service->handle, header);
/* The completion must point to the msgbuf. */
user_completion.header = msgbuf;
@@ -596,7 +596,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
i = 0;
while ((service = next_service_by_instance(instance->state,
instance, &i))) {
- status = vchiq_remove_service(service->handle);
+ status = vchiq_remove_service(instance, service->handle);
vchiq_service_put(service);
if (status != VCHIQ_SUCCESS)
break;
@@ -649,7 +649,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
if (put_user(args.handle, &argp->handle)) {
- vchiq_remove_service(args.handle);
+ vchiq_remove_service(instance, args.handle);
ret = -EFAULT;
}
} break;
@@ -673,8 +673,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
if (!user_service->close_pending) {
status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
- vchiq_close_service(service->handle) :
- vchiq_remove_service(service->handle);
+ vchiq_close_service(instance, service->handle) :
+ vchiq_remove_service(instance, service->handle);
if (status != VCHIQ_SUCCESS)
break;
}
@@ -731,7 +731,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(elements, args.elements,
args.count * sizeof(struct vchiq_element)) == 0)
- ret = vchiq_ioc_queue_message(args.handle, elements,
+ ret = vchiq_ioc_queue_message(instance, args.handle, elements,
args.count);
else
ret = -EFAULT;
@@ -788,7 +788,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_GET_CLIENT_ID: {
unsigned int handle = (unsigned int)arg;
- ret = vchiq_get_client_id(handle);
+ ret = vchiq_get_client_id(instance, handle);
} break;
case VCHIQ_IOC_GET_CONFIG: {
@@ -827,7 +827,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- ret = vchiq_set_service_option(args.handle, args.option,
+ ret = vchiq_set_service_option(instance, args.handle, args.option,
args.value);
} break;
@@ -908,6 +908,7 @@ vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd,
{
struct vchiq_create_service args;
struct vchiq_create_service32 args32;
+ struct vchiq_instance *instance = file->private_data;
long ret;
if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
@@ -926,12 +927,12 @@ vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd,
.handle = args32.handle,
};
- ret = vchiq_ioc_create_service(file->private_data, &args);
+ ret = vchiq_ioc_create_service(instance, &args);
if (ret < 0)
return ret;
if (put_user(args.handle, &ptrargs32->handle)) {
- vchiq_remove_service(args.handle);
+ vchiq_remove_service(instance, args.handle);
return -EFAULT;
}
@@ -960,6 +961,7 @@ vchiq_compat_ioctl_queue_message(struct file *file,
struct vchiq_queue_message args;
struct vchiq_queue_message32 args32;
struct vchiq_service *service;
+ struct vchiq_instance *instance = file->private_data;
int ret;
if (copy_from_user(&args32, arg, sizeof(args32)))
@@ -974,7 +976,7 @@ vchiq_compat_ioctl_queue_message(struct file *file,
if (args32.count > MAX_ELEMENTS)
return -EINVAL;
- service = find_service_for_instance(file->private_data, args.handle);
+ service = find_service_for_instance(instance, args.handle);
if (!service)
return -EINVAL;
@@ -994,7 +996,7 @@ vchiq_compat_ioctl_queue_message(struct file *file,
compat_ptr(element32[count].data);
elements[count].size = element32[count].size;
}
- ret = vchiq_ioc_queue_message(args.handle, elements,
+ ret = vchiq_ioc_queue_message(instance, args.handle, elements,
args.count);
} else {
ret = -EINVAL;
@@ -1261,7 +1263,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
spin_unlock(&msg_queue_spinlock);
if (header)
- vchiq_release_message(service->handle, header);
+ vchiq_release_message(instance, service->handle, header);
spin_lock(&msg_queue_spinlock);
}
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 845b20e4d05a..cb921c94996a 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -292,8 +292,8 @@ static void buffer_to_host_work_cb(struct work_struct *work)
/* Dummy receive to ensure the buffers remain in order */
len = 8;
/* queue the bulk submission */
- vchiq_use_service(instance->service_handle);
- ret = vchiq_bulk_receive(instance->service_handle,
+ vchiq_use_service(instance->vchiq_instance, instance->service_handle);
+ ret = vchiq_bulk_receive(instance->vchiq_instance, instance->service_handle,
msg_context->u.bulk.buffer->buffer,
/* Actual receive needs to be a multiple
* of 4 bytes
@@ -302,7 +302,7 @@ static void buffer_to_host_work_cb(struct work_struct *work)
msg_context,
VCHIQ_BULK_MODE_CALLBACK);
- vchiq_release_service(instance->service_handle);
+ vchiq_release_service(instance->vchiq_instance, instance->service_handle);
if (ret != 0)
pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
@@ -436,15 +436,15 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
/* no payload in message */
m.u.buffer_from_host.payload_in_message = 0;
- vchiq_use_service(instance->service_handle);
+ vchiq_use_service(instance->vchiq_instance, instance->service_handle);
- ret = vchiq_queue_kernel_message(instance->service_handle, &m,
+ ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, &m,
sizeof(struct mmal_msg_header) +
sizeof(m.u.buffer_from_host));
if (ret)
atomic_dec(&port->buffers_with_vpu);
- vchiq_release_service(instance->service_handle);
+ vchiq_release_service(instance->vchiq_instance, instance->service_handle);
return ret;
}
@@ -548,11 +548,12 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
}
/* incoming event service callback */
-static enum vchiq_status service_callback(enum vchiq_reason reason,
+static enum vchiq_status service_callback(struct vchiq_instance *vchiq_instance,
+ enum vchiq_reason reason,
struct vchiq_header *header,
unsigned int handle, void *bulk_ctx)
{
- struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(handle);
+ struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle);
u32 msg_len;
struct mmal_msg *msg;
struct mmal_msg_context *msg_context;
@@ -572,25 +573,25 @@ static enum vchiq_status service_callback(enum vchiq_reason reason,
/* handling is different for buffer messages */
switch (msg->h.type) {
case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
- vchiq_release_message(handle, header);
+ vchiq_release_message(vchiq_instance, handle, header);
break;
case MMAL_MSG_TYPE_EVENT_TO_HOST:
event_to_host_cb(instance, msg, msg_len);
- vchiq_release_message(handle, header);
+ vchiq_release_message(vchiq_instance, handle, header);
break;
case MMAL_MSG_TYPE_BUFFER_TO_HOST:
buffer_to_host_cb(instance, msg, msg_len);
- vchiq_release_message(handle, header);
+ vchiq_release_message(vchiq_instance, handle, header);
break;
default:
/* messages dependent on header context to complete */
if (!msg->h.context) {
pr_err("received message context was null!\n");
- vchiq_release_message(handle, header);
+ vchiq_release_message(vchiq_instance, handle, header);
break;
}
@@ -599,7 +600,7 @@ static enum vchiq_status service_callback(enum vchiq_reason reason,
if (!msg_context) {
pr_err("received invalid message context %u!\n",
msg->h.context);
- vchiq_release_message(handle, header);
+ vchiq_release_message(vchiq_instance, handle, header);
break;
}
@@ -678,13 +679,13 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
">>> sync message");
- vchiq_use_service(instance->service_handle);
+ vchiq_use_service(instance->vchiq_instance, instance->service_handle);
- ret = vchiq_queue_kernel_message(instance->service_handle, msg,
+ ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, msg,
sizeof(struct mmal_msg_header) +
payload_len);
- vchiq_release_service(instance->service_handle);
+ vchiq_release_service(instance->vchiq_instance, instance->service_handle);
if (ret) {
pr_err("error %d queuing message\n", ret);
@@ -824,7 +825,7 @@ static int port_info_set(struct vchiq_mmal_instance *instance,
port->component->handle, port->handle);
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -919,7 +920,7 @@ release_msg:
pr_debug("%s:result:%d component:0x%x port:%d\n",
__func__, ret, port->component->handle, port->handle);
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -967,7 +968,7 @@ static int create_component(struct vchiq_mmal_instance *instance,
component->inputs, component->outputs, component->clocks);
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -1000,7 +1001,7 @@ static int destroy_component(struct vchiq_mmal_instance *instance,
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -1032,7 +1033,7 @@ static int enable_component(struct vchiq_mmal_instance *instance,
ret = -rmsg->u.component_enable_reply.status;
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -1065,7 +1066,7 @@ static int disable_component(struct vchiq_mmal_instance *instance,
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -1097,7 +1098,7 @@ static int get_version(struct vchiq_mmal_instance *instance,
*minor_out = rmsg->u.version.minor;
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -1139,7 +1140,7 @@ static int port_action_port(struct vchiq_mmal_instance *instance,
port_action_type_names[action_type], action_type);
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -1187,7 +1188,7 @@ static int port_action_handle(struct vchiq_mmal_instance *instance,
action_type, connect_component_handle, connect_port_handle);
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -1228,7 +1229,7 @@ static int port_parameter_set(struct vchiq_mmal_instance *instance,
ret, port->component->handle, port->handle, parameter_id);
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -1287,7 +1288,7 @@ static int port_parameter_get(struct vchiq_mmal_instance *instance,
ret, port->component->handle, port->handle, parameter_id);
release_msg:
- vchiq_release_message(instance->service_handle, rmsg_handle);
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
return ret;
}
@@ -1832,9 +1833,9 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
- vchiq_use_service(instance->service_handle);
+ vchiq_use_service(instance->vchiq_instance, instance->service_handle);
- status = vchiq_close_service(instance->service_handle);
+ status = vchiq_close_service(instance->vchiq_instance, instance->service_handle);
if (status != 0)
pr_err("mmal-vchiq: VCHIQ close failed\n");
@@ -1922,14 +1923,14 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
goto err_close_services;
}
- vchiq_release_service(instance->service_handle);
+ vchiq_release_service(instance->vchiq_instance, instance->service_handle);
*out_instance = instance;
return 0;
err_close_services:
- vchiq_close_service(instance->service_handle);
+ vchiq_close_service(instance->vchiq_instance, instance->service_handle);
destroy_workqueue(instance->bulk_wq);
err_free:
kfree(instance);
diff --git a/drivers/staging/vme_user/Kconfig b/drivers/staging/vme_user/Kconfig
index e8b4461bf27f..c8eabf8f40f1 100644
--- a/drivers/staging/vme_user/Kconfig
+++ b/drivers/staging/vme_user/Kconfig
@@ -1,4 +1,29 @@
# SPDX-License-Identifier: GPL-2.0
+menuconfig VME_BUS
+ bool "VME bridge support"
+ depends on STAGING && PCI
+ help
+ If you say Y here you get support for the VME bridge Framework.
+
+if VME_BUS
+
+comment "VME Bridge Drivers"
+
+config VME_TSI148
+ tristate "Tempe"
+ depends on HAS_DMA
+ help
+ If you say Y here you get support for the Tundra TSI148 VME bridge
+ chip.
+
+config VME_FAKE
+ tristate "Fake"
+ help
+ If you say Y here you get support for the fake VME bridge. This
+ provides a virtualised VME Bus for devices with no VME bridge. This
+ is mainly useful for VME development (in the absence of VME
+ hardware).
+
comment "VME Device Drivers"
config VME_USER
@@ -11,3 +36,5 @@ config VME_USER
To compile this driver as a module, choose M here. The module will
be called vme_user. If unsure, say N.
+
+endif
diff --git a/drivers/staging/vme_user/Makefile b/drivers/staging/vme_user/Makefile
index 5380115139b0..8dcc6938ce5c 100644
--- a/drivers/staging/vme_user/Makefile
+++ b/drivers/staging/vme_user/Makefile
@@ -3,4 +3,7 @@
# Makefile for the VME device drivers.
#
+obj-$(CONFIG_VME_BUS) += vme.o
obj-$(CONFIG_VME_USER) += vme_user.o
+obj-$(CONFIG_VME_TSI148) += vme_tsi148.o
+obj-$(CONFIG_VME_FAKE) += vme_fake.o
diff --git a/drivers/vme/vme.c b/drivers/staging/vme_user/vme.c
index 8dba20186be3..b5555683a069 100644
--- a/drivers/vme/vme.c
+++ b/drivers/staging/vme_user/vme.c
@@ -26,8 +26,8 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <linux/vme.h>
+#include "vme.h"
#include "vme_bridge.h"
/* Bitmask and list of registered buses both protected by common mutex */
diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h
new file mode 100644
index 000000000000..b204a9b4be1b
--- /dev/null
+++ b/drivers/staging/vme_user/vme.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VME_H_
+#define _VME_H_
+
+/* Resource Type */
+enum vme_resource_type {
+ VME_MASTER,
+ VME_SLAVE,
+ VME_DMA,
+ VME_LM
+};
+
+/* VME Address Spaces */
+#define VME_A16 0x1
+#define VME_A24 0x2
+#define VME_A32 0x4
+#define VME_A64 0x8
+#define VME_CRCSR 0x10
+#define VME_USER1 0x20
+#define VME_USER2 0x40
+#define VME_USER3 0x80
+#define VME_USER4 0x100
+
+#define VME_A16_MAX 0x10000ULL
+#define VME_A24_MAX 0x1000000ULL
+#define VME_A32_MAX 0x100000000ULL
+#define VME_A64_MAX 0x10000000000000000ULL
+#define VME_CRCSR_MAX 0x1000000ULL
+
+
+/* VME Cycle Types */
+#define VME_SCT 0x1
+#define VME_BLT 0x2
+#define VME_MBLT 0x4
+#define VME_2eVME 0x8
+#define VME_2eSST 0x10
+#define VME_2eSSTB 0x20
+
+#define VME_2eSST160 0x100
+#define VME_2eSST267 0x200
+#define VME_2eSST320 0x400
+
+#define VME_SUPER 0x1000
+#define VME_USER 0x2000
+#define VME_PROG 0x4000
+#define VME_DATA 0x8000
+
+/* VME Data Widths */
+#define VME_D8 0x1
+#define VME_D16 0x2
+#define VME_D32 0x4
+#define VME_D64 0x8
+
+/* Arbitration Scheduling Modes */
+#define VME_R_ROBIN_MODE 0x1
+#define VME_PRIORITY_MODE 0x2
+
+#define VME_DMA_PATTERN (1<<0)
+#define VME_DMA_PCI (1<<1)
+#define VME_DMA_VME (1<<2)
+
+#define VME_DMA_PATTERN_BYTE (1<<0)
+#define VME_DMA_PATTERN_WORD (1<<1)
+#define VME_DMA_PATTERN_INCREMENT (1<<2)
+
+#define VME_DMA_VME_TO_MEM (1<<0)
+#define VME_DMA_MEM_TO_VME (1<<1)
+#define VME_DMA_VME_TO_VME (1<<2)
+#define VME_DMA_MEM_TO_MEM (1<<3)
+#define VME_DMA_PATTERN_TO_VME (1<<4)
+#define VME_DMA_PATTERN_TO_MEM (1<<5)
+
+struct vme_dma_attr {
+ u32 type;
+ void *private;
+};
+
+struct vme_resource {
+ enum vme_resource_type type;
+ struct list_head *entry;
+};
+
+extern struct bus_type vme_bus_type;
+
+/* Number of VME interrupt vectors */
+#define VME_NUM_STATUSID 256
+
+/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */
+#define VME_MAX_BRIDGES (sizeof(unsigned int)*8)
+#define VME_MAX_SLOTS 32
+
+#define VME_SLOT_CURRENT -1
+#define VME_SLOT_ALL -2
+
+/**
+ * struct vme_dev - Structure representing a VME device
+ * @num: The device number
+ * @bridge: Pointer to the bridge device this device is on
+ * @dev: Internal device structure
+ * @drv_list: List of devices (per driver)
+ * @bridge_list: List of devices (per bridge)
+ */
+struct vme_dev {
+ int num;
+ struct vme_bridge *bridge;
+ struct device dev;
+ struct list_head drv_list;
+ struct list_head bridge_list;
+};
+
+/**
+ * struct vme_driver - Structure representing a VME driver
+ * @name: Driver name, should be unique among VME drivers and usually the same
+ * as the module name.
+ * @match: Callback used to determine whether probe should be run.
+ * @probe: Callback for device binding, called when new device is detected.
+ * @remove: Callback, called on device removal.
+ * @driver: Underlying generic device driver structure.
+ * @devices: List of VME devices (struct vme_dev) associated with this driver.
+ */
+struct vme_driver {
+ const char *name;
+ int (*match)(struct vme_dev *);
+ int (*probe)(struct vme_dev *);
+ void (*remove)(struct vme_dev *);
+ struct device_driver driver;
+ struct list_head devices;
+};
+
+void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *);
+void vme_free_consistent(struct vme_resource *, size_t, void *,
+ dma_addr_t);
+
+size_t vme_get_size(struct vme_resource *);
+int vme_check_window(u32 aspace, unsigned long long vme_base,
+ unsigned long long size);
+
+struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
+int vme_slave_set(struct vme_resource *, int, unsigned long long,
+ unsigned long long, dma_addr_t, u32, u32);
+int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
+ unsigned long long *, dma_addr_t *, u32 *, u32 *);
+void vme_slave_free(struct vme_resource *);
+
+struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
+int vme_master_set(struct vme_resource *, int, unsigned long long,
+ unsigned long long, u32, u32, u32);
+int vme_master_get(struct vme_resource *, int *, unsigned long long *,
+ unsigned long long *, u32 *, u32 *, u32 *);
+ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
+ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
+unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
+ unsigned int, loff_t);
+int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma);
+void vme_master_free(struct vme_resource *);
+
+struct vme_resource *vme_dma_request(struct vme_dev *, u32);
+struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
+struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32);
+struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
+struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
+void vme_dma_free_attribute(struct vme_dma_attr *);
+int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
+ struct vme_dma_attr *, size_t);
+int vme_dma_list_exec(struct vme_dma_list *);
+int vme_dma_list_free(struct vme_dma_list *);
+int vme_dma_free(struct vme_resource *);
+
+int vme_irq_request(struct vme_dev *, int, int,
+ void (*callback)(int, int, void *), void *);
+void vme_irq_free(struct vme_dev *, int, int);
+int vme_irq_generate(struct vme_dev *, int, int);
+
+struct vme_resource *vme_lm_request(struct vme_dev *);
+int vme_lm_count(struct vme_resource *);
+int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
+int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
+int vme_lm_attach(struct vme_resource *, int, void (*callback)(void *), void *);
+int vme_lm_detach(struct vme_resource *, int);
+void vme_lm_free(struct vme_resource *);
+
+int vme_slot_num(struct vme_dev *);
+int vme_bus_num(struct vme_dev *);
+
+int vme_register_driver(struct vme_driver *, unsigned int);
+void vme_unregister_driver(struct vme_driver *);
+
+
+#endif /* _VME_H_ */
+
diff --git a/drivers/vme/vme_bridge.h b/drivers/staging/vme_user/vme_bridge.h
index 42ecf961004e..0bbefe9851d7 100644
--- a/drivers/vme/vme_bridge.h
+++ b/drivers/staging/vme_user/vme_bridge.h
@@ -2,7 +2,7 @@
#ifndef _VME_BRIDGE_H_
#define _VME_BRIDGE_H_
-#include <linux/vme.h>
+#include "vme.h"
#define VME_CRCSR_BUF_SIZE (508*1024)
/*
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/staging/vme_user/vme_fake.c
index 6a1bc284f297..dd646b0c531d 100644
--- a/drivers/vme/bridges/vme_fake.c
+++ b/drivers/staging/vme_user/vme_fake.c
@@ -29,9 +29,9 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/vme.h>
-#include "../vme_bridge.h"
+#include "vme.h"
+#include "vme_bridge.h"
/*
* Define the number of each that the fake driver supports.
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
index be9051b02f24..956476213241 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme_user/vme_tsi148.c
@@ -26,9 +26,9 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/byteorder/generic.h>
-#include <linux/vme.h>
-#include "../vme_bridge.h"
+#include "vme.h"
+#include "vme_bridge.h"
#include "vme_tsi148.h"
static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
diff --git a/drivers/vme/bridges/vme_tsi148.h b/drivers/staging/vme_user/vme_tsi148.h
index 226fedc6f167..226fedc6f167 100644
--- a/drivers/vme/bridges/vme_tsi148.h
+++ b/drivers/staging/vme_user/vme_tsi148.h
diff --git a/drivers/staging/vme_user/vme_user.c b/drivers/staging/vme_user/vme_user.c
index 859af797630c..4e533c0bfe6d 100644
--- a/drivers/staging/vme_user/vme_user.c
+++ b/drivers/staging/vme_user/vme_user.c
@@ -33,8 +33,8 @@
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <linux/vme.h>
+#include "vme.h"
#include "vme_user.h"
static const char driver_name[] = "vme_user";
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 577a38fae369..5de841cb776c 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1912,7 +1912,7 @@ bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR);
/* turn on REGR */
- MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
+ vt6655_mac_reg_bits_on(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
by_value = ioread8(iobase + MAC_REG_BBREGCTL);
@@ -1957,7 +1957,7 @@ bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
iowrite8(by_data, iobase + MAC_REG_BBREGDATA);
/* turn on BBREGCTL_REGW */
- MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
+ vt6655_mac_reg_bits_on(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
by_value = ioread8(iobase + MAC_REG_BBREGCTL);
@@ -2013,8 +2013,8 @@ bool bb_vt3253_init(struct vnt_private *priv)
byVT3253B0_AGC4_RFMD2959[ii][0],
byVT3253B0_AGC4_RFMD2959[ii][1]);
- VNSvOutPortD(iobase + MAC_REG_ITRTMSET, 0x23);
- MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
+ iowrite32(0x23, iobase + MAC_REG_ITRTMSET);
+ vt6655_mac_reg_bits_on(iobase, MAC_REG_PAPEDELAY, BIT(0));
}
priv->abyBBVGA[0] = 0x18;
priv->abyBBVGA[1] = 0x0A;
@@ -2054,7 +2054,7 @@ bool bb_vt3253_init(struct vnt_private *priv)
byVT3253B0_AGC[ii][1]);
iowrite8(0x23, iobase + MAC_REG_ITRTMSET);
- MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
+ vt6655_mac_reg_bits_on(iobase, MAC_REG_PAPEDELAY, BIT(0));
priv->abyBBVGA[0] = 0x14;
priv->abyBBVGA[1] = 0x0A;
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 2cde0082fc03..846469cc06bb 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -293,12 +293,10 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
- VNSvOutPortD(priv->port_offset + MAC_REG_TSFOFST,
- (u32)qwTSFOffset);
- VNSvOutPortD(priv->port_offset + MAC_REG_TSFOFST + 4,
- (u32)(qwTSFOffset >> 32));
- MACvRegBitsOn(priv->port_offset, MAC_REG_TFTCTL,
- TFTCTL_TSFSYNCEN);
+ qwTSFOffset = le64_to_cpu(qwTSFOffset);
+ iowrite32((u32)qwTSFOffset, priv->port_offset + MAC_REG_TSFOFST);
+ iowrite32((u32)(qwTSFOffset >> 32), priv->port_offset + MAC_REG_TSFOFST + 4);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
}
return true;
}
@@ -326,13 +324,13 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
/* set HW beacon interval */
- VNSvOutPortW(priv->port_offset + MAC_REG_BI, wBeaconInterval);
+ iowrite16(wBeaconInterval, priv->port_offset + MAC_REG_BI);
priv->wBeaconInterval = wBeaconInterval;
/* Set NextTBTT */
- VNSvOutPortD(priv->port_offset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
- VNSvOutPortD(priv->port_offset + MAC_REG_NEXTTBTT + 4,
- (u32)(qwNextTBTT >> 32));
- MACvRegBitsOn(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+ qwNextTBTT = le64_to_cpu(qwNextTBTT);
+ iowrite32((u32)qwNextTBTT, priv->port_offset + MAC_REG_NEXTTBTT);
+ iowrite32((u32)(qwNextTBTT >> 32), priv->port_offset + MAC_REG_NEXTTBTT + 4);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
return true;
}
@@ -354,29 +352,28 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_RFMD2959:
- MACvWordRegBitsOff(priv->port_offset, MAC_REG_SOFTPWRCTL,
- SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOn(priv->port_offset, MAC_REG_SOFTPWRCTL,
- SOFTPWRCTL_SWPE1);
+ vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_TXPEINV);
+ vt6655_mac_word_reg_bits_on(priv->port_offset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE1);
break;
case RF_AIROHA:
case RF_AL2230S:
- MACvWordRegBitsOff(priv->port_offset, MAC_REG_SOFTPWRCTL,
- SOFTPWRCTL_SWPE2);
- MACvWordRegBitsOff(priv->port_offset, MAC_REG_SOFTPWRCTL,
- SOFTPWRCTL_SWPE3);
+ vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE2);
+ vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE3);
break;
}
- MACvRegBitsOff(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_RXON);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_RXON);
bb_set_deep_sleep(priv, priv->local_id);
priv->radio_off = true;
pr_debug("chester power off\n");
- MACvRegBitsOn(priv->port_offset, MAC_REG_GPIOCTL0,
- LED_ACTSET); /* LED issue */
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
}
void CARDvSafeResetTx(struct vnt_private *priv)
@@ -411,8 +408,7 @@ void CARDvSafeResetTx(struct vnt_private *priv)
MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv, priv->td1_pool_dma);
/* set MAC Beacon TX pointer */
- MACvSetCurrBCNTxDescAddr(priv->port_offset,
- (priv->tx_beacon_dma));
+ iowrite32((u32)priv->tx_beacon_dma, priv->port_offset + MAC_REG_BCNDMAPTR);
}
/*
@@ -453,8 +449,8 @@ void CARDvSafeResetRx(struct vnt_private *priv)
}
/* set perPkt mode */
- MACvRx0PerPktMode(priv->port_offset);
- MACvRx1PerPktMode(priv->port_offset);
+ iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL0);
+ iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL1);
/* set MAC RD pointer */
MACvSetCurrRx0DescAddr(priv, priv->rd0_pool_dma);
@@ -553,7 +549,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
/* swap over to get correct write order */
swap(phy.swap[0], phy.swap[1]);
- VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_1, phy.field_write);
+ iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_1);
/* RSPINF_b_2 */
vnt_get_phy_field(priv, 14,
@@ -562,7 +558,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
swap(phy.swap[0], phy.swap[1]);
- VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_2, phy.field_write);
+ iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_2);
/* RSPINF_b_5 */
vnt_get_phy_field(priv, 14,
@@ -571,7 +567,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
swap(phy.swap[0], phy.swap[1]);
- VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_5, phy.field_write);
+ iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_5);
/* RSPINF_b_11 */
vnt_get_phy_field(priv, 14,
@@ -580,75 +576,66 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
swap(phy.swap[0], phy.swap[1]);
- VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_11, phy.field_write);
+ iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_11);
/* RSPINF_a_6 */
s_vCalculateOFDMRParameter(RATE_6M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_6,
- MAKEWORD(byTxRate, byRsvTime));
+ iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_6);
/* RSPINF_a_9 */
s_vCalculateOFDMRParameter(RATE_9M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_9,
- MAKEWORD(byTxRate, byRsvTime));
+ iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_9);
/* RSPINF_a_12 */
s_vCalculateOFDMRParameter(RATE_12M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_12,
- MAKEWORD(byTxRate, byRsvTime));
+ iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_12);
/* RSPINF_a_18 */
s_vCalculateOFDMRParameter(RATE_18M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_18,
- MAKEWORD(byTxRate, byRsvTime));
+ iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_18);
/* RSPINF_a_24 */
s_vCalculateOFDMRParameter(RATE_24M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_24,
- MAKEWORD(byTxRate, byRsvTime));
+ iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_24);
/* RSPINF_a_36 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_36M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_36,
- MAKEWORD(byTxRate, byRsvTime));
+ iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_36);
/* RSPINF_a_48 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_48M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_48,
- MAKEWORD(byTxRate, byRsvTime));
+ iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_48);
/* RSPINF_a_54 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_54,
- MAKEWORD(byTxRate, byRsvTime));
+ iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_54);
/* RSPINF_a_72 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_72,
- MAKEWORD(byTxRate, byRsvTime));
+ iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_72);
/* Set to Page0 */
MACvSelectPage0(priv->port_offset);
@@ -734,9 +721,9 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
* In:
* priv - The adapter to be read
* Out:
- * qwCurrTSF - Current TSF counter
+ * none
*
- * Return Value: true if success; otherwise false
+ * Return Value: Current TSF counter
*/
u64 vt6655_get_current_tsf(struct vnt_private *priv)
{
@@ -745,7 +732,7 @@ u64 vt6655_get_current_tsf(struct vnt_private *priv)
unsigned char data;
u32 low, high;
- MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
+ vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
data = ioread8(iobase + MAC_REG_TFTCTL);
if (!(data & TFTCTL_TSFCNTRRD))
@@ -808,9 +795,10 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
/* Set NextTBTT */
- VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
- VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
- MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+ qwNextTBTT = le64_to_cpu(qwNextTBTT);
+ iowrite32((u32)qwNextTBTT, iobase + MAC_REG_NEXTTBTT);
+ iowrite32((u32)(qwNextTBTT >> 32), iobase + MAC_REG_NEXTTBTT + 4);
+ vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
}
/*
@@ -834,8 +822,9 @@ void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
/* Set NextTBTT */
- VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwTSF);
- VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
- MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+ qwTSF = le64_to_cpu(qwTSF);
+ iowrite32((u32)qwTSF, iobase + MAC_REG_NEXTTBTT);
+ iowrite32((u32)(qwTSF >> 32), iobase + MAC_REG_NEXTTBTT + 4);
+ vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
pr_debug("Card:Update Next TBTT[%8llx]\n", qwTSF);
}
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 652dcaf61169..e926f9829a15 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -94,7 +94,7 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
}
/* clear NAV */
- MACvRegBitsOn(priv->port_offset, MAC_REG_MACCR, MACCR_CLRNAV);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_MACCR, MACCR_CLRNAV);
/* TX_PE will reserve 3 us for MAX2829 A mode only,
* it is for better TX throughput
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index afaf331fe125..bab08a40fe66 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -122,6 +122,9 @@ static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
static void device_free_info(struct vnt_private *priv);
static void device_print_info(struct vnt_private *priv);
+static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr);
+static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr);
+
static int device_init_rd0_ring(struct vnt_private *priv);
static int device_init_rd1_ring(struct vnt_private *priv);
static int device_init_td0_ring(struct vnt_private *priv);
@@ -186,6 +189,22 @@ device_set_options(struct vnt_private *priv)
pr_debug(" byBBType= %d\n", (int)priv->byBBType);
}
+static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr)
+{
+ iowrite8(1, iobase + MAC_REG_PAGE1SEL);
+ for (int i = 0; i < 6; i++)
+ iowrite8(mac_addr[i], iobase + MAC_REG_BSSID0 + i);
+ iowrite8(0, iobase + MAC_REG_PAGE1SEL);
+}
+
+static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr)
+{
+ iowrite8(1, iobase + MAC_REG_PAGE1SEL);
+ for (int i = 0; i < 6; i++)
+ mac_addr[i] = ioread8(iobase + MAC_REG_PAR0 + i);
+ iowrite8(0, iobase + MAC_REG_PAGE1SEL);
+}
+
/*
* Initialisation of MAC & BBP registers
*/
@@ -340,8 +359,8 @@ static void device_init_registers(struct vnt_private *priv)
}
/* use relative tx timeout and 802.11i D4 */
- MACvWordRegBitsOn(priv->port_offset,
- MAC_REG_CFG, (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
+ vt6655_mac_word_reg_bits_on(priv->port_offset, MAC_REG_CFG,
+ (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
/* set performance parameter by registry */
MACvSetShortRetryLimit(priv, priv->byShortRetryLimit);
@@ -398,7 +417,7 @@ static void device_init_registers(struct vnt_private *priv)
CARDvSafeResetTx(priv);
if (priv->local_id <= REV_ID_VT3253_A1)
- MACvRegBitsOn(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
/* Turn On Rx DMA */
MACvReceive0(priv->port_offset);
@@ -979,7 +998,7 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
- if (!(priv->vif->bss_conf.assoc && priv->current_rssi))
+ if (!(priv->vif->cfg.assoc && priv->current_rssi))
return;
RFvRSSITodBm(priv, (u8)priv->current_rssi, &dbm);
@@ -1055,13 +1074,12 @@ static void vnt_interrupt_process(struct vnt_private *priv)
* update ISR counter
*/
while (isr && priv->vif) {
- MACvWriteISR(priv->port_offset, isr);
+ iowrite32(isr, priv->port_offset + MAC_REG_ISR);
if (isr & ISR_FETALERR) {
pr_debug(" ISR_FETALERR\n");
iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL);
- VNSvOutPortW(priv->port_offset +
- MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPECTI);
+ iowrite16(SOFTPWRCTL_SWPECTI, priv->port_offset + MAC_REG_SOFTPWRCTL);
device_error(priv, isr);
}
@@ -1135,7 +1153,7 @@ static void vnt_interrupt_work(struct work_struct *work)
if (priv->vif)
vnt_interrupt_process(priv);
- MACvIntEnable(priv->port_offset, IMR_MASK_VALUE);
+ iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
}
static irqreturn_t vnt_interrupt(int irq, void *arg)
@@ -1144,7 +1162,7 @@ static irqreturn_t vnt_interrupt(int irq, void *arg)
schedule_work(&priv->interrupt_work);
- MACvIntDisable(priv->port_offset);
+ iowrite32(0, priv->port_offset + MAC_REG_IMR);
return IRQ_HANDLED;
}
@@ -1253,8 +1271,8 @@ static int vnt_start(struct ieee80211_hw *hw)
device_init_registers(priv);
- dev_dbg(&priv->pcid->dev, "call MACvIntEnable\n");
- MACvIntEnable(priv->port_offset, IMR_MASK_VALUE);
+ dev_dbg(&priv->pcid->dev, "enable MAC interrupt\n");
+ iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
ieee80211_wake_queues(hw);
@@ -1304,15 +1322,15 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
case NL80211_IFTYPE_STATION:
break;
case NL80211_IFTYPE_ADHOC:
- MACvRegBitsOff(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
- MACvRegBitsOn(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
break;
case NL80211_IFTYPE_AP:
- MACvRegBitsOff(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
- MACvRegBitsOn(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
break;
default:
@@ -1333,16 +1351,16 @@ static void vnt_remove_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_STATION:
break;
case NL80211_IFTYPE_ADHOC:
- MACvRegBitsOff(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
- MACvRegBitsOff(priv->port_offset,
- MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
- MACvRegBitsOff(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ vt6655_mac_reg_bits_off(priv->port_offset,
+ MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
break;
case NL80211_IFTYPE_AP:
- MACvRegBitsOff(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
- MACvRegBitsOff(priv->port_offset,
- MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
- MACvRegBitsOff(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ vt6655_mac_reg_bits_off(priv->port_offset,
+ MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
break;
default:
break;
@@ -1395,18 +1413,18 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
static void vnt_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *conf, u32 changed)
+ struct ieee80211_bss_conf *conf, u64 changed)
{
struct vnt_private *priv = hw->priv;
- priv->current_aid = conf->aid;
+ priv->current_aid = vif->cfg.aid;
if (changed & BSS_CHANGED_BSSID && conf->bssid) {
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
- MACvWriteBSSIDAddress(priv->port_offset, conf->bssid);
+ vt6655_mac_write_bssid_addr(priv->port_offset, conf->bssid);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1458,17 +1476,16 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (conf->enable_beacon) {
vnt_beacon_enable(priv, vif, conf);
- MACvRegBitsOn(priv->port_offset, MAC_REG_TCR,
- TCR_AUTOBCNTX);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
} else {
- MACvRegBitsOff(priv->port_offset, MAC_REG_TCR,
- TCR_AUTOBCNTX);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR,
+ TCR_AUTOBCNTX);
}
}
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
priv->op_mode != NL80211_IFTYPE_AP) {
- if (conf->assoc && conf->beacon_rate) {
+ if (vif->cfg.assoc && conf->beacon_rate) {
CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
conf->sync_tsf);
@@ -1523,20 +1540,17 @@ static void vnt_configure(struct ieee80211_hw *hw,
if (priv->mc_list_count > 2) {
MACvSelectPage1(priv->port_offset);
- VNSvOutPortD(priv->port_offset +
- MAC_REG_MAR0, 0xffffffff);
- VNSvOutPortD(priv->port_offset +
- MAC_REG_MAR0 + 4, 0xffffffff);
+ iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0);
+ iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0 + 4);
MACvSelectPage0(priv->port_offset);
} else {
MACvSelectPage1(priv->port_offset);
- VNSvOutPortD(priv->port_offset +
- MAC_REG_MAR0, (u32)multicast);
- VNSvOutPortD(priv->port_offset +
- MAC_REG_MAR0 + 4,
- (u32)(multicast >> 32));
+ multicast = le64_to_cpu(multicast);
+ iowrite32((u32)multicast, priv->port_offset + MAC_REG_MAR0);
+ iowrite32((u32)(multicast >> 32),
+ priv->port_offset + MAC_REG_MAR0 + 4);
MACvSelectPage0(priv->port_offset);
}
@@ -1726,7 +1740,7 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
}
/* initial to reload eeprom */
MACvInitialize(priv);
- MACvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr);
+ vt6655_mac_read_ether_addr(priv->port_offset, priv->abyCurrentNetAddr);
/* Get RFType */
priv->byRFType = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_RFTYPE);
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 88ddd0676463..dcc649532737 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -38,6 +38,47 @@
#include "mac.h"
+void vt6655_mac_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask)
+{
+ unsigned char reg_value;
+
+ reg_value = ioread8(iobase + reg_offset);
+ iowrite8(reg_value | bit_mask, iobase + reg_offset);
+}
+
+void vt6655_mac_word_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask)
+{
+ unsigned short reg_value;
+
+ reg_value = ioread16(iobase + reg_offset);
+ iowrite16(reg_value | (bit_mask), iobase + reg_offset);
+}
+
+void vt6655_mac_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask)
+{
+ unsigned char reg_value;
+
+ reg_value = ioread8(iobase + reg_offset);
+ iowrite8(reg_value & ~(bit_mask), iobase + reg_offset);
+}
+
+void vt6655_mac_word_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask)
+{
+ unsigned short reg_value;
+
+ reg_value = ioread16(iobase + reg_offset);
+ iowrite16(reg_value & ~(bit_mask), iobase + reg_offset);
+}
+
+static void vt6655_mac_clear_stck_ds(void __iomem *iobase)
+{
+ u8 reg_value;
+
+ reg_value = ioread8(iobase + MAC_REG_STICKHW);
+ reg_value = reg_value & 0xFC;
+ iowrite8(reg_value, iobase + MAC_REG_STICKHW);
+}
+
/*
* Description:
* Test if all test bits off
@@ -337,7 +378,7 @@ bool MACbSafeRxOff(struct vnt_private *priv)
}
/* try to safe shutdown RX */
- MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_RXON);
+ vt6655_mac_reg_bits_off(io_base, MAC_REG_HOSTCR, HOSTCR_RXON);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_RXONST))
@@ -392,7 +433,7 @@ bool MACbSafeTxOff(struct vnt_private *priv)
}
/* try to safe shutdown TX */
- MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_TXON);
+ vt6655_mac_reg_bits_off(io_base, MAC_REG_HOSTCR, HOSTCR_TXON);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
@@ -423,7 +464,7 @@ bool MACbSafeStop(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
- MACvRegBitsOff(io_base, MAC_REG_TCR, TCR_AUTOBCNTX);
+ vt6655_mac_reg_bits_off(io_base, MAC_REG_TCR, TCR_AUTOBCNTX);
if (!MACbSafeRxOff(priv)) {
pr_debug(" MACbSafeRxOff == false)\n");
@@ -436,7 +477,7 @@ bool MACbSafeStop(struct vnt_private *priv)
return false;
}
- MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_MACEN);
+ vt6655_mac_reg_bits_off(io_base, MAC_REG_HOSTCR, HOSTCR_MACEN);
return true;
}
@@ -458,7 +499,7 @@ bool MACbShutdown(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
/* disable MAC IMR */
- MACvIntDisable(io_base);
+ iowrite32(0, io_base + MAC_REG_IMR);
MACvSetLoopbackMode(priv, MAC_LB_INTERNAL);
/* stop the adapter */
if (!MACbSafeStop(priv)) {
@@ -486,7 +527,7 @@ void MACvInitialize(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
/* clear sticky bits */
- MACvClearStckDS(io_base);
+ vt6655_mac_clear_stck_ds(io_base);
/* disable force PME-enable */
iowrite8(PME_OVR, io_base + MAC_REG_PMC1);
/* only 3253 A */
@@ -730,7 +771,7 @@ bool MACbPSWakeup(struct vnt_private *priv)
return true;
/* Disable PS */
- MACvRegBitsOff(io_base, MAC_REG_PSCTL, PSCTL_PSEN);
+ vt6655_mac_reg_bits_off(io_base, MAC_REG_PSCTL, PSCTL_PSEN);
/* Check if SyncFlushOK */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 57ae3bdbdb2d..0122c4603c66 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -18,7 +18,7 @@
#ifndef __MAC_H__
#define __MAC_H__
-#include "upc.h"
+#include "device.h"
/*--------------------- Export Definitions -------------------------*/
/* Registers in the MAC */
@@ -537,82 +537,14 @@
/*--------------------- Export Macros ------------------------------*/
-#define MACvRegBitsOn(iobase, byRegOfs, byBits) \
-do { \
- unsigned char byData; \
- byData = ioread8(iobase + byRegOfs); \
- iowrite8(byData | (byBits), iobase + byRegOfs); \
-} while (0)
-
-#define MACvWordRegBitsOn(iobase, byRegOfs, wBits) \
-do { \
- unsigned short wData; \
- wData = ioread16(iobase + byRegOfs); \
- VNSvOutPortW(iobase + byRegOfs, wData | (wBits)); \
-} while (0)
-
-#define MACvRegBitsOff(iobase, byRegOfs, byBits) \
-do { \
- unsigned char byData; \
- byData = ioread8(iobase + byRegOfs); \
- iowrite8(byData & ~(byBits), iobase + byRegOfs); \
-} while (0)
-
-#define MACvWordRegBitsOff(iobase, byRegOfs, wBits) \
-do { \
- unsigned short wData; \
- wData = ioread16(iobase + byRegOfs); \
- VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits)); \
-} while (0)
-
-/* set the chip with current BCN tx descriptor address */
-#define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr) \
- VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR, \
- dwCurrDescAddr)
-
-/* set the chip with current BCN length */
-#define MACvSetCurrBCNLength(iobase, wCurrBCNLength) \
- VNSvOutPortW(iobase + MAC_REG_BCNDMACTL + 2, \
- wCurrBCNLength)
-
-#define MACvWriteBSSIDAddress(iobase, pbyEtherAddr) \
-do { \
- iowrite8(1, iobase + MAC_REG_PAGE1SEL); \
- iowrite8(pbyEtherAddr[0], iobase + MAC_REG_BSSID0); \
- iowrite8(pbyEtherAddr[1], iobase + MAC_REG_BSSID0 + 1); \
- iowrite8(pbyEtherAddr[2], iobase + MAC_REG_BSSID0 + 2); \
- iowrite8(pbyEtherAddr[3], iobase + MAC_REG_BSSID0 + 3); \
- iowrite8(pbyEtherAddr[4], iobase + MAC_REG_BSSID0 + 4); \
- iowrite8(pbyEtherAddr[5], iobase + MAC_REG_BSSID0 + 5); \
- iowrite8(0, iobase + MAC_REG_PAGE1SEL); \
-} while (0)
-
-#define MACvReadEtherAddress(iobase, pbyEtherAddr) \
-do { \
- iowrite8(1, iobase + MAC_REG_PAGE1SEL); \
- pbyEtherAddr[0] = ioread8(iobase + MAC_REG_PAR0); \
- pbyEtherAddr[1] = ioread8(iobase + MAC_REG_PAR0 + 1); \
- pbyEtherAddr[2] = ioread8(iobase + MAC_REG_PAR0 + 2); \
- pbyEtherAddr[3] = ioread8(iobase + MAC_REG_PAR0 + 3); \
- pbyEtherAddr[4] = ioread8(iobase + MAC_REG_PAR0 + 4); \
- pbyEtherAddr[5] = ioread8(iobase + MAC_REG_PAR0 + 5); \
- iowrite8(0, iobase + MAC_REG_PAGE1SEL); \
-} while (0)
-
-#define MACvRx0PerPktMode(iobase) \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT)
-
-#define MACvRx1PerPktMode(iobase) \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT)
-
#define MACvReceive0(iobase) \
do { \
unsigned long dwData; \
dwData = ioread32(iobase + MAC_REG_RXDMACTL0); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
+ iowrite32(DMACTL_WAKE, iobase + MAC_REG_RXDMACTL0); \
else \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_RUN); \
+ iowrite32(DMACTL_RUN, iobase + MAC_REG_RXDMACTL0); \
} while (0)
#define MACvReceive1(iobase) \
@@ -620,9 +552,9 @@ do { \
unsigned long dwData; \
dwData = ioread32(iobase + MAC_REG_RXDMACTL1); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
+ iowrite32(DMACTL_WAKE, iobase + MAC_REG_RXDMACTL1); \
else \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
+ iowrite32(DMACTL_RUN, iobase + MAC_REG_RXDMACTL1); \
} while (0)
#define MACvTransmit0(iobase) \
@@ -630,9 +562,9 @@ do { \
unsigned long dwData; \
dwData = ioread32(iobase + MAC_REG_TXDMACTL0); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
+ iowrite32(DMACTL_WAKE, iobase + MAC_REG_TXDMACTL0); \
else \
- VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_RUN); \
+ iowrite32(DMACTL_RUN, iobase + MAC_REG_TXDMACTL0); \
} while (0)
#define MACvTransmitAC0(iobase) \
@@ -640,28 +572,11 @@ do { \
unsigned long dwData; \
dwData = ioread32(iobase + MAC_REG_AC0DMACTL); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
+ iowrite32(DMACTL_WAKE, iobase + MAC_REG_AC0DMACTL); \
else \
- VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
-} while (0)
-
-#define MACvClearStckDS(iobase) \
-do { \
- unsigned char byOrgValue; \
- byOrgValue = ioread8(iobase + MAC_REG_STICKHW); \
- byOrgValue = byOrgValue & 0xFC; \
- iowrite8(byOrgValue, iobase + MAC_REG_STICKHW); \
+ iowrite32(DMACTL_RUN, iobase + MAC_REG_AC0DMACTL); \
} while (0)
-#define MACvWriteISR(iobase, dwValue) \
- VNSvOutPortD(iobase + MAC_REG_ISR, dwValue)
-
-#define MACvIntEnable(iobase, dwMask) \
- VNSvOutPortD(iobase + MAC_REG_IMR, dwMask)
-
-#define MACvIntDisable(iobase) \
- VNSvOutPortD(iobase + MAC_REG_IMR, 0)
-
#define MACvSelectPage0(iobase) \
iowrite8(0, iobase + MAC_REG_PAGE1SEL)
@@ -673,7 +588,7 @@ do { \
unsigned long dwOrgValue; \
dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
dwOrgValue = dwOrgValue | ENCFG_PROTECTMD; \
- VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
+ iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
} while (0)
#define MACvDisableProtectMD(iobase) \
@@ -681,7 +596,7 @@ do { \
unsigned long dwOrgValue; \
dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
dwOrgValue = dwOrgValue & ~ENCFG_PROTECTMD; \
- VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
+ iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
} while (0)
#define MACvEnableBarkerPreambleMd(iobase) \
@@ -689,7 +604,7 @@ do { \
unsigned long dwOrgValue; \
dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
dwOrgValue = dwOrgValue | ENCFG_BARKERPREAM; \
- VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
+ iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
} while (0)
#define MACvDisableBarkerPreambleMd(iobase) \
@@ -697,7 +612,7 @@ do { \
unsigned long dwOrgValue; \
dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
dwOrgValue = dwOrgValue & ~ENCFG_BARKERPREAM; \
- VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
+ iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
} while (0)
#define MACvSetBBType(iobase, byTyp) \
@@ -706,15 +621,20 @@ do { \
dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
dwOrgValue = dwOrgValue & ~ENCFG_BBTYPE_MASK; \
dwOrgValue = dwOrgValue | (unsigned long)byTyp; \
- VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
+ iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
} while (0)
#define MACvSetRFLE_LatchBase(iobase) \
- MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
+ vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
#define MAKEWORD(lb, hb) \
((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
+void vt6655_mac_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask);
+void vt6655_mac_word_reg_bits_on(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask);
+void vt6655_mac_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask);
+void vt6655_mac_word_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask);
+
bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
unsigned char byTestBits);
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 06066fa56dd5..8527ad3eff48 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -52,30 +52,30 @@ void PSvEnablePowerSaving(struct vnt_private *priv,
u16 wAID = priv->current_aid | BIT(14) | BIT(15);
/* set period of power up before TBTT */
- VNSvOutPortW(priv->port_offset + MAC_REG_PWBT, C_PWBT);
+ iowrite16(C_PWBT, priv->port_offset + MAC_REG_PWBT);
if (priv->op_mode != NL80211_IFTYPE_ADHOC) {
/* set AID */
- VNSvOutPortW(priv->port_offset + MAC_REG_AIDATIM, wAID);
+ iowrite16(wAID, priv->port_offset + MAC_REG_AIDATIM);
}
/* Set AutoSleep */
- MACvRegBitsOn(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* Set HWUTSF */
- MACvRegBitsOn(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
if (wListenInterval >= 2) {
/* clear always listen beacon */
- MACvRegBitsOff(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
/* first time set listen next beacon */
- MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_LNBCN);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_LNBCN);
} else {
/* always listen beacon */
- MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
}
/* enable power saving hw function */
- MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_PSEN);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_PSEN);
priv->bEnablePSMode = true;
priv->bPWBitOn = true;
@@ -98,13 +98,13 @@ void PSvDisablePowerSaving(struct vnt_private *priv)
MACbPSWakeup(priv);
/* clear AutoSleep */
- MACvRegBitsOff(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* clear HWUTSF */
- MACvRegBitsOff(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
+ vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
/* set always listen beacon */
- MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
priv->bEnablePSMode = false;
@@ -135,8 +135,7 @@ bool PSbIsNextTBTTWakeUp(struct vnt_private *priv)
if (priv->wake_up_count == 1) {
/* Turn on wake up to listen next beacon */
- MACvRegBitsOn(priv->port_offset,
- MAC_REG_PSCTL, PSCTL_LNBCN);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_PSCTL, PSCTL_LNBCN);
wake_up = true;
}
}
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index ee5e2e0d9a8c..1fadc2fc4412 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -171,7 +171,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
unsigned short ww;
unsigned long dwValue;
- VNSvOutPortD(iobase + MAC_REG_IFREGCTL, dwData);
+ iowrite32((u32)dwData, iobase + MAC_REG_IFREGCTL);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
@@ -209,10 +209,10 @@ static bool RFbAL2230Init(struct vnt_private *priv)
/* 3-wire control for normal mode */
iowrite8(0, iobase + MAC_REG_SOFTPWRCTL);
- MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
- SOFTPWRCTL_TXPEINV));
+ vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL,
+ (SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV));
/* PLL Off */
- MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ vt6655_mac_word_reg_bits_off(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* patch abnormal AL2230 frequency output */
IFRFbWriteEmbedded(priv, (0x07168700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
@@ -222,7 +222,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */
/* PLL On */
- MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
MACvTimer0MicroSDelay(priv, 150);/* 150us */
ret &= IFRFbWriteEmbedded(priv, (0x00d80f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
@@ -232,10 +232,10 @@ static bool RFbAL2230Init(struct vnt_private *priv)
ret &= IFRFbWriteEmbedded(priv,
al2230_init_table[CB_AL2230_INIT_SEQ - 1]);
- MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
- SOFTPWRCTL_SWPE2 |
- SOFTPWRCTL_SWPECTI |
- SOFTPWRCTL_TXPEINV));
+ vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
+ SOFTPWRCTL_SWPE2 |
+ SOFTPWRCTL_SWPECTI |
+ SOFTPWRCTL_TXPEINV));
/* 3-wire control for power saving mode */
iowrite8(PSSIG_WPE3 | PSSIG_WPE2, iobase + MAC_REG_PSPWRSIG);
@@ -350,7 +350,7 @@ bool rf_write_wake_prog_syn(struct vnt_private *priv, unsigned char rf_type,
unsigned char sleep_count = 0;
unsigned short idx = MISCFIFO_SYNDATA_IDX;
- VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0);
+ iowrite16(0, iobase + MAC_REG_MISCFFNDEX);
switch (rf_type) {
case RF_AIROHA:
case RF_AL2230S:
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 71cbfa607d96..5bdb5176772c 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1420,11 +1420,11 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
priv->wBCNBufLen = sizeof(*short_head) + skb->len;
- MACvSetCurrBCNTxDescAddr(priv->port_offset, priv->tx_beacon_dma);
+ iowrite32((u32)priv->tx_beacon_dma, priv->port_offset + MAC_REG_BCNDMAPTR);
- MACvSetCurrBCNLength(priv->port_offset, priv->wBCNBufLen);
+ iowrite16(priv->wBCNBufLen, priv->port_offset + MAC_REG_BCNDMACTL + 2);
/* Set auto Transmit on */
- MACvRegBitsOn(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
/* Poll Transmit the adapter */
iowrite8(BEACON_READY, priv->port_offset + MAC_REG_BCNDMACTL);
@@ -1435,7 +1435,7 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
{
struct sk_buff *beacon;
- beacon = ieee80211_beacon_get(priv->hw, vif);
+ beacon = ieee80211_beacon_get(priv->hw, vif, 0);
if (!beacon)
return -ENOMEM;
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 722a2cc9a473..ee5ca4db74dc 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -27,7 +27,7 @@
*
*/
-#include "upc.h"
+#include "device.h"
#include "mac.h"
#include "srom.h"
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
deleted file mode 100644
index 2a47f5782b71..000000000000
--- a/drivers/staging/vt6655/upc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * Purpose: Macros to access device
- *
- * Author: Tevin Chen
- *
- * Date: Mar 17, 1997
- *
- */
-
-#ifndef __UPC_H__
-#define __UPC_H__
-
-#include "device.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-/* For memory mapped IO */
-
-#define VNSvOutPortW(dwIOAddress, wData) \
- iowrite16((u16)(wData), dwIOAddress)
-
-#define VNSvOutPortD(dwIOAddress, dwData) \
- iowrite32((u32)(dwData), dwIOAddress)
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-#endif /* __UPC_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ae7f5916d4d6..897ee0f7fc6b 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -745,11 +745,11 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
static void vnt_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *conf, u32 changed)
+ struct ieee80211_bss_conf *conf, u64 changed)
{
struct vnt_private *priv = hw->priv;
- priv->current_aid = conf->aid;
+ priv->current_aid = vif->cfg.aid;
if (changed & BSS_CHANGED_BSSID && conf->bssid)
vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid);
@@ -811,7 +811,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
priv->op_mode != NL80211_IFTYPE_AP) {
- if (conf->assoc && conf->beacon_rate) {
+ if (vif->cfg.assoc && conf->beacon_rate) {
u16 ps_beacon_int = conf->beacon_int;
if (conf->dtim_period)
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 4d29f8ebb393..cd99091c6c28 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -699,7 +699,7 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
{
struct sk_buff *beacon;
- beacon = ieee80211_beacon_get(priv->hw, vif);
+ beacon = ieee80211_beacon_get(priv->hw, vif, 0);
if (!beacon)
return -ENOMEM;
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 87379edce9a8..b7b56d8406d1 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -645,7 +645,7 @@ void prism2_disconnected(struct wlandevice *wlandev)
void prism2_roamed(struct wlandevice *wlandev)
{
struct cfg80211_roam_info roam_info = {
- .bssid = wlandev->bssid,
+ .links[0].bssid = wlandev->bssid,
};
cfg80211_roamed(wlandev->netdev, &roam_info, GFP_KERNEL);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index e368f038ff5c..baf4da7bb3b4 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1004,8 +1004,10 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
int data_direction, payload_length;
+ struct iscsi_ecdb_ahdr *ecdb_ahdr;
struct iscsi_scsi_req *hdr;
int iscsi_task_attr;
+ unsigned char *cdb;
int sam_task_attr;
atomic_long_inc(&conn->sess->cmd_pdus);
@@ -1106,6 +1108,27 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
+ cdb = hdr->cdb;
+
+ if (hdr->hlength) {
+ ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
+ if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
+ pr_err("Additional Header Segment type %d not supported!\n",
+ ecdb_ahdr->ahstype);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
+ }
+
+ cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
+ GFP_KERNEL);
+ if (cdb == NULL)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
+ memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
+ be16_to_cpu(ecdb_ahdr->ahslength) - 1);
+ }
+
data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
(hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
DMA_NONE;
@@ -1153,9 +1176,12 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr;
dr = iscsit_allocate_datain_req();
- if (!dr)
+ if (!dr) {
+ if (cdb != hdr->cdb)
+ kfree(cdb);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
iscsit_attach_datain_req(cmd, dr);
}
@@ -1176,9 +1202,12 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
target_get_sess_cmd(&cmd->se_cmd, true);
cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
- cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb,
+ cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
GFP_KERNEL);
+ if (cdb != hdr->cdb)
+ kfree(cdb);
+
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
return iscsit_add_reject_cmd(cmd,
@@ -4036,8 +4065,9 @@ static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
{
int ret;
- u8 *buffer, opcode;
+ u8 *buffer, *tmp_buf, opcode;
u32 checksum = 0, digest = 0;
+ struct iscsi_hdr *hdr;
struct kvec iov;
buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
@@ -4062,6 +4092,25 @@ static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
break;
}
+ hdr = (struct iscsi_hdr *) buffer;
+ if (hdr->hlength) {
+ iov.iov_len = hdr->hlength * 4;
+ tmp_buf = krealloc(buffer,
+ ISCSI_HDR_LEN + iov.iov_len,
+ GFP_KERNEL);
+ if (!tmp_buf)
+ break;
+
+ buffer = tmp_buf;
+ iov.iov_base = &buffer[ISCSI_HDR_LEN];
+
+ ret = rx_data(conn, &iov, 1, iov.iov_len);
+ if (ret != iov.iov_len) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ break;
+ }
+ }
+
if (conn->conn_ops->HeaderDigest) {
iov.iov_base = &digest;
iov.iov_len = ISCSI_CRC_LEN;
@@ -4361,7 +4410,7 @@ int iscsit_close_connection(
spin_lock_bh(&sess->conn_lock);
atomic_dec(&sess->nconn);
- pr_debug("Decremented iSCSI connection count to %hu from node:"
+ pr_debug("Decremented iSCSI connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
/*
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 6e5611d8f51b..c8a248bd11be 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -205,6 +205,38 @@ static struct iscsi_chap *chap_server_open(
return chap;
}
+static const char base64_lookup_table[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+static int chap_base64_decode(u8 *dst, const char *src, size_t len)
+{
+ int i, bits = 0, ac = 0;
+ const char *p;
+ u8 *cp = dst;
+
+ for (i = 0; i < len; i++) {
+ if (src[i] == '=')
+ return cp - dst;
+
+ p = strchr(base64_lookup_table, src[i]);
+ if (p == NULL || src[i] == 0)
+ return -2;
+
+ ac <<= 6;
+ ac += (p - base64_lookup_table);
+ bits += 6;
+ if (bits >= 8) {
+ *cp++ = (ac >> (bits - 8)) & 0xff;
+ ac &= ~(BIT(16) - BIT(bits - 8));
+ bits -= 8;
+ }
+ }
+ if (ac)
+ return -1;
+
+ return cp - dst;
+}
+
static int chap_server_compute_hash(
struct iscsit_conn *conn,
struct iscsi_node_auth *auth,
@@ -295,16 +327,27 @@ static int chap_server_compute_hash(
pr_err("Could not find CHAP_R.\n");
goto out;
}
- if (type != HEX) {
- pr_err("Could not find CHAP_R.\n");
- goto out;
- }
- if (strlen(chap_r) != chap->digest_size * 2) {
- pr_err("Malformed CHAP_R\n");
- goto out;
- }
- if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
- pr_err("Malformed CHAP_R\n");
+
+ switch (type) {
+ case HEX:
+ if (strlen(chap_r) != chap->digest_size * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
+ pr_err("Malformed CHAP_R: invalid HEX\n");
+ goto out;
+ }
+ break;
+ case BASE64:
+ if (chap_base64_decode(client_digest, chap_r, strlen(chap_r)) !=
+ chap->digest_size) {
+ pr_err("Malformed CHAP_R: invalid BASE64\n");
+ goto out;
+ }
+ break;
+ default:
+ pr_err("Could not find CHAP_R\n");
goto out;
}
@@ -373,7 +416,13 @@ static int chap_server_compute_hash(
/*
* Get CHAP_I.
*/
- if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
+ ret = extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type);
+ if (ret == -ENOENT) {
+ pr_debug("Could not find CHAP_I. Initiator uses One way authentication.\n");
+ auth_ret = 0;
+ goto out;
+ }
+ if (ret < 0) {
pr_err("Could not find CHAP_I.\n");
goto out;
}
@@ -404,23 +453,46 @@ static int chap_server_compute_hash(
goto out;
}
- if (type != HEX) {
+ switch (type) {
+ case HEX:
+ initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
+ if (!initiatorchg_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ if (initiatorchg_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+
+ if (hex2bin(initiatorchg_binhex, initiatorchg,
+ initiatorchg_len) < 0) {
+ pr_err("Malformed CHAP_C: invalid HEX\n");
+ goto out;
+ }
+ break;
+ case BASE64:
+ initiatorchg_len = chap_base64_decode(initiatorchg_binhex,
+ initiatorchg,
+ strlen(initiatorchg));
+ if (initiatorchg_len < 0) {
+ pr_err("Malformed CHAP_C: invalid BASE64\n");
+ goto out;
+ }
+ if (!initiatorchg_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ if (initiatorchg_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+ break;
+ default:
pr_err("Could not find CHAP_C.\n");
goto out;
}
- initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
- if (!initiatorchg_len) {
- pr_err("Unable to convert incoming challenge\n");
- goto out;
- }
- if (initiatorchg_len > 1024) {
- pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
- goto out;
- }
- if (hex2bin(initiatorchg_binhex, initiatorchg, initiatorchg_len) < 0) {
- pr_err("Malformed CHAP_C\n");
- goto out;
- }
+
pr_debug("[server] Got CHAP_C=%s\n", initiatorchg);
/*
* During mutual authentication, the CHAP_C generated by the
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index ce14540ba650..5d0f51822414 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -210,7 +210,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
return ERR_PTR(ret);
}
- tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tpg = to_iscsi_tpg(se_tpg);
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return ERR_PTR(-EINVAL);
@@ -281,9 +281,7 @@ static ssize_t iscsi_nacl_attrib_##name##_show(struct config_item *item,\
char *page) \
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
- struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
- se_node_acl); \
- \
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
return sprintf(page, "%u\n", nacl->node_attrib.name); \
} \
\
@@ -291,8 +289,7 @@ static ssize_t iscsi_nacl_attrib_##name##_store(struct config_item *item,\
const char *page, size_t count) \
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
- struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
- se_node_acl); \
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
u32 val; \
int ret; \
\
@@ -317,6 +314,36 @@ ISCSI_NACL_ATTR(random_datain_pdu_offsets);
ISCSI_NACL_ATTR(random_datain_seq_offsets);
ISCSI_NACL_ATTR(random_r2t_offsets);
+static ssize_t iscsi_nacl_attrib_authentication_show(struct config_item *item,
+ char *page)
+{
+ struct se_node_acl *se_nacl = attrib_to_nacl(item);
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
+
+ return sprintf(page, "%d\n", nacl->node_attrib.authentication);
+}
+
+static ssize_t iscsi_nacl_attrib_authentication_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_node_acl *se_nacl = attrib_to_nacl(item);
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
+ s32 val;
+ int ret;
+
+ ret = kstrtos32(page, 0, &val);
+ if (ret)
+ return ret;
+ if (val != 0 && val != 1 && val != NA_AUTHENTICATION_INHERITED)
+ return -EINVAL;
+
+ nacl->node_attrib.authentication = val;
+
+ return count;
+}
+
+CONFIGFS_ATTR(iscsi_nacl_attrib_, authentication);
+
static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
&iscsi_nacl_attrib_attr_dataout_timeout,
&iscsi_nacl_attrib_attr_dataout_timeout_retries,
@@ -326,6 +353,7 @@ static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
&iscsi_nacl_attrib_attr_random_datain_pdu_offsets,
&iscsi_nacl_attrib_attr_random_datain_seq_offsets,
&iscsi_nacl_attrib_attr_random_r2t_offsets,
+ &iscsi_nacl_attrib_attr_authentication,
NULL,
};
@@ -377,15 +405,14 @@ static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
- return __iscsi_nacl_auth_##name##_show(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page); \
+ return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
} \
static ssize_t iscsi_nacl_auth_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
- return __iscsi_nacl_auth_##name##_store(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page, count); \
+ return __iscsi_nacl_auth_##name##_store(to_iscsi_nacl(nacl), \
+ page, count); \
} \
\
CONFIGFS_ATTR(iscsi_nacl_auth_, name)
@@ -417,8 +444,7 @@ static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
- return __iscsi_nacl_auth_##name##_show(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page); \
+ return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
} \
\
CONFIGFS_ATTR_RO(iscsi_nacl_auth_, name)
@@ -623,8 +649,7 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
struct se_portal_group *se_tpg = se_nacl->se_tpg;
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
struct config_item *acl_ci, *tpg_ci, *wwn_ci;
u32 cmdsn_depth = 0;
int ret;
@@ -700,8 +725,7 @@ static struct configfs_attribute *lio_target_initiator_attrs[] = {
static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
const char *name)
{
- struct iscsi_node_acl *acl =
- container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
@@ -720,8 +744,7 @@ static ssize_t iscsi_tpg_attrib_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_portal_group *se_tpg = attrib_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
ssize_t rb; \
\
if (iscsit_get_tpg(tpg) < 0) \
@@ -736,8 +759,7 @@ static ssize_t iscsi_tpg_attrib_##name##_store(struct config_item *item,\
const char *page, size_t count) \
{ \
struct se_portal_group *se_tpg = attrib_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
u32 val; \
int ret; \
\
@@ -800,8 +822,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -813,8 +834,7 @@ static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg,
static ssize_t __iscsi_##prefix##_##name##_store(struct se_portal_group *se_tpg,\
const char *page, size_t count) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -861,8 +881,7 @@ DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -900,8 +919,7 @@ static ssize_t iscsi_tpg_param_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_portal_group *se_tpg = param_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_param *param; \
ssize_t rb; \
\
@@ -923,8 +941,7 @@ static ssize_t iscsi_tpg_param_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
struct se_portal_group *se_tpg = param_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
char *buf; \
int ret, len; \
\
@@ -1073,8 +1090,7 @@ free_out:
static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg,
bool enable)
{
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
int ret;
ret = iscsit_get_tpg(tpg);
@@ -1106,7 +1122,7 @@ static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
- tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tpg = to_iscsi_tpg(se_tpg);
tiqn = tpg->tpg_tiqn;
/*
* iscsit_tpg_del_portal_group() assumes force=1
@@ -1416,46 +1432,41 @@ static void lio_aborted_task(struct se_cmd *se_cmd)
cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
}
-static inline struct iscsi_portal_group *iscsi_tpg(struct se_portal_group *se_tpg)
-{
- return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
-}
-
static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
+ return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
}
static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpgt;
+ return to_iscsi_tpg(se_tpg)->tpgt;
}
static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
}
static int lio_tpg_check_prot_fabric_only(
@@ -1465,9 +1476,9 @@ static int lio_tpg_check_prot_fabric_only(
* Only report fabric_prot_type if t10_pi has also been enabled
* for incoming ib_isert sessions.
*/
- if (!iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
+ if (!to_iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
return 0;
- return iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
}
/*
@@ -1504,16 +1515,14 @@ static void lio_tpg_close_session(struct se_session *se_sess)
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
+ return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
}
static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
{
- struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
- se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_acl);
struct se_portal_group *se_tpg = se_acl->se_tpg;
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
acl->node_attrib.nacl = acl;
iscsit_set_default_node_attribues(acl, tpg);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 6b94eecc4790..27e448c2d066 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -341,6 +341,7 @@ static int iscsi_login_zero_tsih_s2(
{
struct iscsi_node_attrib *na;
struct iscsit_session *sess = conn->sess;
+ struct iscsi_param *param;
bool iser = false;
sess->tpg = conn->tpg;
@@ -375,6 +376,18 @@ static int iscsi_login_zero_tsih_s2(
na = iscsit_tpg_get_node_attrib(sess);
/*
+ * If ACL allows non-authorized access in TPG with CHAP,
+ * then set None to AuthMethod.
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (param && !strstr(param->value, NONE)) {
+ if (!iscsi_conn_auth_required(conn))
+ if (iscsi_change_param_sprintf(conn, "AuthMethod=%s",
+ NONE))
+ return -1;
+ }
+
+ /*
* Need to send TargetPortalGroupTag back in first login response
* on any iSCSI connection where the Initiator provides TargetName.
* See 5.3.1. Login Phase Start
@@ -715,7 +728,7 @@ void iscsi_post_login_handler(
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
- pr_debug("Incremented iSCSI Connection count to %hu"
+ pr_debug("Incremented iSCSI Connection count to %d"
" from node: %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
@@ -763,7 +776,7 @@ void iscsi_post_login_handler(
spin_lock_bh(&sess->conn_lock);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
- pr_debug("Incremented iSCSI Connection count to %hu from node:"
+ pr_debug("Incremented iSCSI Connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index b34ac9ecac31..f2919319ad38 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -62,31 +62,34 @@ int extract_param(
int len;
if (!in_buf || !pattern || !out_buf || !type)
- return -1;
+ return -EINVAL;
ptr = strstr(in_buf, pattern);
if (!ptr)
- return -1;
+ return -ENOENT;
ptr = strstr(ptr, "=");
if (!ptr)
- return -1;
+ return -EINVAL;
ptr += 1;
if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
ptr += 2; /* skip 0x */
*type = HEX;
+ } else if (*ptr == '0' && (*(ptr+1) == 'b' || *(ptr+1) == 'B')) {
+ ptr += 2; /* skip 0b */
+ *type = BASE64;
} else
*type = DECIMAL;
len = strlen_semi(ptr);
if (len < 0)
- return -1;
+ return -EINVAL;
if (len >= max_length) {
pr_err("Length of input: %d exceeds max_length:"
" %d\n", len, max_length);
- return -1;
+ return -EINVAL;
}
memcpy(out_buf, ptr, len);
out_buf[len] = '\0';
@@ -94,6 +97,31 @@ int extract_param(
return 0;
}
+static struct iscsi_node_auth *iscsi_get_node_auth(struct iscsit_conn *conn)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_node_acl *nacl;
+ struct se_node_acl *se_nacl;
+
+ if (conn->sess->sess_ops->SessionType)
+ return &iscsit_global->discovery_acl.node_auth;
+
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate struct se_node_acl for CHAP auth\n");
+ return NULL;
+ }
+
+ if (se_nacl->dynamic_node_acl) {
+ tpg = to_iscsi_tpg(se_nacl->se_tpg);
+ return &tpg->tpg_demo_auth;
+ }
+
+ nacl = to_iscsi_nacl(se_nacl);
+
+ return &nacl->node_auth;
+}
+
static u32 iscsi_handle_authentication(
struct iscsit_conn *conn,
char *in_buf,
@@ -102,40 +130,11 @@ static u32 iscsi_handle_authentication(
int *out_length,
unsigned char *authtype)
{
- struct iscsit_session *sess = conn->sess;
struct iscsi_node_auth *auth;
- struct iscsi_node_acl *iscsi_nacl;
- struct iscsi_portal_group *iscsi_tpg;
- struct se_node_acl *se_nacl;
-
- if (!sess->sess_ops->SessionType) {
- /*
- * For SessionType=Normal
- */
- se_nacl = conn->sess->se_sess->se_node_acl;
- if (!se_nacl) {
- pr_err("Unable to locate struct se_node_acl for"
- " CHAP auth\n");
- return -1;
- }
-
- if (se_nacl->dynamic_node_acl) {
- iscsi_tpg = container_of(se_nacl->se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
-
- auth = &iscsi_tpg->tpg_demo_auth;
- } else {
- iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
- auth = &iscsi_nacl->node_auth;
- }
- } else {
- /*
- * For SessionType=Discovery
- */
- auth = &iscsit_global->discovery_acl.node_auth;
- }
+ auth = iscsi_get_node_auth(conn);
+ if (!auth)
+ return -1;
if (strstr("CHAP", authtype))
strcpy(conn->sess->auth_type, "CHAP");
@@ -815,6 +814,42 @@ static int iscsi_target_do_authentication(
return 0;
}
+bool iscsi_conn_auth_required(struct iscsit_conn *conn)
+{
+ struct iscsi_node_acl *nacl;
+ struct se_node_acl *se_nacl;
+
+ if (conn->sess->sess_ops->SessionType) {
+ /*
+ * For SessionType=Discovery
+ */
+ return conn->tpg->tpg_attrib.authentication;
+ }
+ /*
+ * For SessionType=Normal
+ */
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_debug("Unknown ACL is trying to connect\n");
+ return true;
+ }
+
+ if (se_nacl->dynamic_node_acl) {
+ pr_debug("Dynamic ACL %s is trying to connect\n",
+ se_nacl->initiatorname);
+ return conn->tpg->tpg_attrib.authentication;
+ }
+
+ pr_debug("Known ACL %s is trying to connect\n",
+ se_nacl->initiatorname);
+
+ nacl = to_iscsi_nacl(se_nacl);
+ if (nacl->node_attrib.authentication == NA_AUTHENTICATION_INHERITED)
+ return conn->tpg->tpg_attrib.authentication;
+
+ return nacl->node_attrib.authentication;
+}
+
static int iscsi_target_handle_csg_zero(
struct iscsit_conn *conn,
struct iscsi_login *login)
@@ -876,22 +911,26 @@ static int iscsi_target_handle_csg_zero(
return -1;
if (!iscsi_check_negotiated_keys(conn->param_list)) {
- if (conn->tpg->tpg_attrib.authentication &&
- !strncmp(param->value, NONE, 4)) {
- pr_err("Initiator sent AuthMethod=None but"
- " Target is enforcing iSCSI Authentication,"
- " login failed.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
- ISCSI_LOGIN_STATUS_AUTH_FAILED);
- return -1;
- }
+ bool auth_required = iscsi_conn_auth_required(conn);
+
+ if (auth_required) {
+ if (!strncmp(param->value, NONE, 4)) {
+ pr_err("Initiator sent AuthMethod=None but"
+ " Target is enforcing iSCSI Authentication,"
+ " login failed.\n");
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
- if (conn->tpg->tpg_attrib.authentication &&
- !login->auth_complete)
- return 0;
+ if (!login->auth_complete)
+ return 0;
- if (strncmp(param->value, NONE, 4) && !login->auth_complete)
- return 0;
+ if (strncmp(param->value, NONE, 4) &&
+ !login->auth_complete)
+ return 0;
+ }
if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
(login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
@@ -906,6 +945,18 @@ do_auth:
return iscsi_target_do_authentication(conn, login);
}
+static bool iscsi_conn_authenticated(struct iscsit_conn *conn,
+ struct iscsi_login *login)
+{
+ if (!iscsi_conn_auth_required(conn))
+ return true;
+
+ if (login->auth_complete)
+ return true;
+
+ return false;
+}
+
static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_login *login)
{
int ret;
@@ -949,11 +1000,10 @@ static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_lo
return -1;
}
- if (!login->auth_complete &&
- conn->tpg->tpg_attrib.authentication) {
+ if (!iscsi_conn_authenticated(conn, login)) {
pr_err("Initiator is requesting CSG: 1, has not been"
- " successfully authenticated, and the Target is"
- " enforcing iSCSI Authentication, login failed.\n");
+ " successfully authenticated, and the Target is"
+ " enforcing iSCSI Authentication, login failed.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_AUTH_FAILED);
return -1;
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index ed30b9ee75e6..41c3db3ddeaa 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -4,6 +4,7 @@
#define DECIMAL 0
#define HEX 1
+#define BASE64 2
struct iscsit_conn;
struct iscsi_login;
@@ -21,5 +22,5 @@ extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *,
extern int iscsi_target_start_negotiation(
struct iscsi_login *, struct iscsit_conn *);
extern void iscsi_target_nego_release(struct iscsit_conn *);
-
+extern bool iscsi_conn_auth_required(struct iscsit_conn *conn);
#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 874cb33c9be0..d63efdefb18e 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -30,6 +30,7 @@ void iscsit_set_default_node_attribues(
{
struct iscsi_node_attrib *a = &acl->node_attrib;
+ a->authentication = NA_AUTHENTICATION_INHERITED;
a->dataout_timeout = NA_DATAOUT_TIMEOUT;
a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
a->nopin_timeout = NA_NOPIN_TIMEOUT;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4339ee517434..3cac1aafef68 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -394,8 +394,7 @@ struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
{
struct se_session *se_sess = sess->se_sess;
struct se_node_acl *se_nacl = se_sess->se_node_acl;
- struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
return &acl->node_attrib;
}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index b56ef8af66e7..fb91423a4e2e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -385,7 +385,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
/*
* Extract the RELATIVE TARGET PORT IDENTIFIER to identify
- * the Target Port in question for the the incoming
+ * the Target Port in question for the incoming
* SET_TARGET_PORT_GROUPS op.
*/
rtpi = get_unaligned_be16(ptr + 2);
@@ -934,8 +934,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
spin_lock(&lun->lun_deve_lock);
list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
- lacl = rcu_dereference_check(se_deve->se_lun_acl,
- lockdep_is_held(&lun->lun_deve_lock));
+ lacl = se_deve->se_lun_acl;
/*
* spc4r37 p.242:
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index bbcbbfa72b07..416514c5c7ac 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -732,6 +732,7 @@ static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
bool flag;
int ret;
@@ -744,8 +745,11 @@ static ssize_t emulate_tpu_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
+ if (!dev->transport->configure_unmap ||
+ !dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
}
da->emulate_tpu = flag;
@@ -758,6 +762,7 @@ static ssize_t emulate_tpws_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
bool flag;
int ret;
@@ -770,8 +775,11 @@ static ssize_t emulate_tpws_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
+ if (!dev->transport->configure_unmap ||
+ !dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
}
da->emulate_tpws = flag;
@@ -964,6 +972,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
bool flag;
int ret;
@@ -982,10 +991,12 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
- pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
- " because max_unmap_block_desc_count is zero\n",
- da->da_dev);
- return -ENOSYS;
+ if (!dev->transport->configure_unmap ||
+ !dev->transport->configure_unmap(dev)) {
+ pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n",
+ da->da_dev);
+ return -ENOSYS;
+ }
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 25f33eb25337..b7f16ee8aa0e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -75,7 +75,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
return TCM_WRITE_PROTECTED;
}
- se_lun = rcu_dereference(deve->se_lun);
+ se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
@@ -152,7 +152,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
- se_lun = rcu_dereference(deve->se_lun);
+ se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
@@ -216,7 +216,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- lun = rcu_dereference(deve->se_lun);
+ lun = deve->se_lun;
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
@@ -243,11 +243,8 @@ void core_free_device_list_for_node(
struct se_dev_entry *deve;
mutex_lock(&nacl->lun_entry_mutex);
- hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- struct se_lun *lun = rcu_dereference_check(deve->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
- core_disable_device_list_for_node(lun, deve, nacl, tpg);
- }
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+ core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
mutex_unlock(&nacl->lun_entry_mutex);
}
@@ -334,8 +331,7 @@ int core_enable_device_list_for_node(
mutex_lock(&nacl->lun_entry_mutex);
orig = target_nacl_find_deve(nacl, mapped_lun);
if (orig && orig->se_lun) {
- struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
+ struct se_lun *orig_lun = orig->se_lun;
if (orig_lun != lun) {
pr_err("Existing orig->se_lun doesn't match new lun"
@@ -355,8 +351,8 @@ int core_enable_device_list_for_node(
return -EINVAL;
}
- rcu_assign_pointer(new->se_lun, lun);
- rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
hlist_del_rcu(&orig->link);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
@@ -374,8 +370,8 @@ int core_enable_device_list_for_node(
return 0;
}
- rcu_assign_pointer(new->se_lun, lun);
- rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
@@ -434,9 +430,6 @@ void core_disable_device_list_for_node(
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
- rcu_assign_pointer(orig->se_lun, NULL);
- rcu_assign_pointer(orig->se_lun_acl, NULL);
-
kfree_rcu(orig, rcu_head);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
@@ -457,10 +450,7 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
-
- if (lun != tmp_lun)
+ if (lun != deve->se_lun)
continue;
core_disable_device_list_for_node(lun, deve, nacl, tpg);
@@ -960,6 +950,12 @@ int target_configure_device(struct se_device *dev)
ret = dev->transport->configure_device(dev);
if (ret)
goto out_free_index;
+
+ if (dev->transport->configure_unmap &&
+ dev->transport->configure_unmap(dev)) {
+ pr_debug("Discard support available, but disabled by default.\n");
+ }
+
/*
* XXX: there is not much point to have two different values here..
*/
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e68f1cc8ef98..28aa643be5d5 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -86,6 +86,24 @@ static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
return &fd_dev->dev;
}
+static bool fd_configure_unmap(struct se_device *dev)
+{
+ struct file *file = FD_DEV(dev)->fd_file;
+ struct inode *inode = file->f_mapping->host;
+
+ if (S_ISBLK(inode->i_mode))
+ return target_configure_unmap_from_queue(&dev->dev_attrib,
+ I_BDEV(inode));
+
+ /* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
+ dev->dev_attrib.max_unmap_lba_count = 0x2000;
+ /* Currently hardcoded to 1 in Linux/SCSI code. */
+ dev->dev_attrib.max_unmap_block_desc_count = 1;
+ dev->dev_attrib.unmap_granularity = 1;
+ dev->dev_attrib.unmap_granularity_alignment = 0;
+ return true;
+}
+
static int fd_configure_device(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
@@ -149,10 +167,6 @@ static int fd_configure_device(struct se_device *dev)
" block_device blocks: %llu logical_block_size: %d\n",
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
-
- if (target_configure_unmap_from_queue(&dev->dev_attrib, bdev))
- pr_debug("IFILE: BLOCK Discard support available,"
- " disabled by default\n");
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -170,16 +184,6 @@ static int fd_configure_device(struct se_device *dev)
}
fd_dev->fd_block_size = FD_BLOCKSIZE;
- /*
- * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
- */
- dev->dev_attrib.max_unmap_lba_count = 0x2000;
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity = 1;
- dev->dev_attrib.unmap_granularity_alignment = 0;
/*
* Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
@@ -438,16 +442,15 @@ fd_execute_write_same(struct se_cmd *cmd)
unsigned int len = 0, i;
ssize_t ret;
- if (!nolb) {
- target_complete_cmd(cmd, SAM_STAT_GOOD);
- return 0;
- }
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with FILEIO"
" backends not supported\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+ if (!cmd->t_data_nents)
+ return TCM_INVALID_CDB_FIELD;
+
if (cmd->t_data_nents > 1 ||
cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
@@ -924,6 +927,7 @@ static const struct target_backend_ops fileio_ops = {
.configure_device = fd_configure_device,
.destroy_device = fd_destroy_device,
.free_device = fd_free_device,
+ .configure_unmap = fd_configure_unmap,
.parse_cdb = fd_parse_cdb,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 378c80313a0f..8351c974cee3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -76,6 +76,14 @@ free_dev:
return NULL;
}
+static bool iblock_configure_unmap(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+ return target_configure_unmap_from_queue(&dev->dev_attrib,
+ ib_dev->ibd_bd);
+}
+
static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -119,10 +127,6 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
- pr_debug("IBLOCK: BLOCK Discard support available,"
- " disabled by default\n");
-
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -343,7 +347,7 @@ static void iblock_bio_done(struct bio *bio)
}
static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
- unsigned int opf)
+ blk_opf_t opf)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
@@ -494,6 +498,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
" backends not supported\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
+ if (!cmd->t_data_nents)
+ return TCM_INVALID_CDB_FIELD;
+
sg = &cmd->t_data_sg[0];
if (cmd->t_data_nents > 1 ||
@@ -719,7 +727,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
- unsigned int opf;
+ blk_opf_t opf;
unsigned bio_cnt;
int i, rc;
struct sg_mapping_iter prot_miter;
@@ -899,6 +907,7 @@ static const struct target_backend_ops iblock_ops = {
.configure_device = iblock_configure_device,
.destroy_device = iblock_destroy_device,
.free_device = iblock_free_device,
+ .configure_unmap = iblock_configure_unmap,
.plug_device = iblock_plug_device,
.unplug_device = iblock_unplug_device,
.parse_cdb = iblock_parse_cdb,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 3829b61b56c1..a1d67554709f 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -739,8 +739,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (!deve_tmp->se_lun_acl)
continue;
- lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl,
- lockdep_is_held(&lun_tmp->lun_deve_lock));
+ lacl_tmp = deve_tmp->se_lun_acl;
nacl_tmp = lacl_tmp->se_lun_nacl;
/*
* Skip the matching struct se_node_acl that is allocated
@@ -784,8 +783,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* the original *pr_reg is processed in
* __core_scsi3_add_registration()
*/
- dest_lun = rcu_dereference_check(deve_tmp->se_lun,
- kref_read(&deve_tmp->pr_kref) != 0);
+ dest_lun = deve_tmp->se_lun;
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, dest_lun, deve_tmp,
@@ -1437,34 +1435,26 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl;
-
/*
* For nacl->dynamic_node_acl=1
*/
- lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- kref_read(&se_deve->pr_kref) != 0);
- if (!lun_acl)
+ if (!se_deve->se_lun_acl)
return 0;
- return target_depend_item(&lun_acl->se_lun_group.cg_item);
+ return target_depend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
}
static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl;
-
/*
* For nacl->dynamic_node_acl=1
*/
- lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- kref_read(&se_deve->pr_kref) != 0);
- if (!lun_acl) {
+ if (!se_deve->se_lun_acl) {
kref_put(&se_deve->pr_kref, target_pr_kref_release);
return;
}
- target_undepend_item(&lun_acl->se_lun_group.cg_item);
+ target_undepend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
kref_put(&se_deve->pr_kref, target_pr_kref_release);
}
@@ -1751,8 +1741,7 @@ core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
- dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- kref_read(&dest_se_deve->pr_kref) != 0);
+ dest_lun = dest_se_deve->se_lun;
dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_lun, dest_se_deve,
@@ -3446,8 +3435,7 @@ after_iport_check:
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
- struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- kref_read(&dest_se_deve->pr_kref) != 0);
+ struct se_lun *dest_lun = dest_se_deve->se_lun;
spin_unlock(&dev->dev_reservation_lock);
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index ca1b2312d6e7..1e3216de1e04 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -312,6 +312,12 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op
pr_warn("WRITE SAME with ANCHOR not supported\n");
return TCM_INVALID_CDB_FIELD;
}
+
+ if (flags & 0x01) {
+ pr_warn("WRITE SAME with NDOB not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
/*
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code.
@@ -339,68 +345,6 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op
return 0;
}
-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
- int *post_ret)
-{
- unsigned char *buf, *addr;
- struct scatterlist *sg;
- unsigned int offset;
- sense_reason_t ret = TCM_NO_SENSE;
- int i, count;
-
- if (!success)
- return 0;
-
- /*
- * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
- *
- * 1) read the specified logical block(s);
- * 2) transfer logical blocks from the data-out buffer;
- * 3) XOR the logical blocks transferred from the data-out buffer with
- * the logical blocks read, storing the resulting XOR data in a buffer;
- * 4) if the DISABLE WRITE bit is set to zero, then write the logical
- * blocks transferred from the data-out buffer; and
- * 5) transfer the resulting XOR data to the data-in buffer.
- */
- buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate xor_callback buf\n");
- return TCM_OUT_OF_RESOURCES;
- }
- /*
- * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
- * into the locally allocated *buf
- */
- sg_copy_to_buffer(cmd->t_data_sg,
- cmd->t_data_nents,
- buf,
- cmd->data_length);
-
- /*
- * Now perform the XOR against the BIDI read memory located at
- * cmd->t_mem_bidi_list
- */
-
- offset = 0;
- for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
- addr = kmap_atomic(sg_page(sg));
- if (!addr) {
- ret = TCM_OUT_OF_RESOURCES;
- goto out;
- }
-
- for (i = 0; i < sg->length; i++)
- *(addr + sg->offset + i) ^= *(buf + offset + i);
-
- offset += sg->length;
- kunmap_atomic(addr);
- }
-
-out:
- kfree(buf);
- return ret;
-}
-
static sense_reason_t
sbc_execute_rw(struct se_cmd *cmd)
{
@@ -927,47 +871,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
- case XDWRITEREAD_10:
- if (cmd->data_direction != DMA_TO_DEVICE ||
- !(cmd->se_cmd_flags & SCF_BIDI))
- return TCM_INVALID_CDB_FIELD;
- sectors = transport_get_sectors_10(cdb);
-
- if (sbc_check_dpofua(dev, cmd, cdb))
- return TCM_INVALID_CDB_FIELD;
-
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-
- /*
- * Setup BIDI XOR callback to be run after I/O completion.
- */
- cmd->execute_cmd = sbc_execute_rw;
- cmd->transport_complete_callback = &xdreadwrite_callback;
- break;
case VARIABLE_LENGTH_CMD:
{
u16 service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
- case XDWRITEREAD_32:
- sectors = transport_get_sectors_32(cdb);
-
- if (sbc_check_dpofua(dev, cmd, cdb))
- return TCM_INVALID_CDB_FIELD;
- /*
- * Use WRITE_32 and READ_32 opcodes for the emulated
- * XDWRITE_READ_32 logic.
- */
- cmd->t_task_lba = transport_lba_64_ext(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-
- /*
- * Setup BIDI XOR callback to be run during after I/O
- * completion.
- */
- cmd->execute_cmd = sbc_execute_rw;
- cmd->transport_complete_callback = &xdreadwrite_callback;
- break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 62d15bcc3d93..f85ee5b0fd80 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -877,7 +877,6 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
@@ -886,9 +885,9 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
- lun = rcu_dereference(deve->se_lun);
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
@@ -1217,7 +1216,6 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
@@ -1226,9 +1224,9 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
- lun = rcu_dereference(deve->se_lun);
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 6bb20aa9c5bc..8713cda0c2fb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
struct se_device *this_dev;
int rc;
- this_lun = rcu_dereference(deve->se_lun);
+ this_lun = deve->se_lun;
this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index c60896cf71cb..73b5e7760d10 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -189,7 +189,7 @@ struct optee_smc_call_get_os_revision_result {
* Have config return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 Physical address of start of SHM
- * a2 Size of of SHM
+ * a2 Size of SHM
* a3 Cache settings of memory, as defined by the
* OPTEE_SMC_SHM_* values above
* a4-7 Preserved
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 385cb0aee610..a1c1fa1a9c28 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -884,8 +884,8 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
- if (IS_ERR(arg))
- return PTR_ERR(arg);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
}
if (rpc_arg && tee_shm_is_dynamic(shm)) {
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index af0f7c603fa4..98da206cd761 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -1073,7 +1073,7 @@ EXPORT_SYMBOL_GPL(tee_device_unregister);
/**
* tee_get_drvdata() - Return driver_data pointer
* @teedev: Device containing the driver_data pointer
- * @returns the driver_data pointer supplied to tee_register().
+ * @returns the driver_data pointer supplied to tee_device_alloc().
*/
void *tee_get_drvdata(struct tee_device *teedev)
{
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index f2b1bcefcadd..1175f3a46859 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -326,6 +326,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
void *ret;
int id;
+ if (!access_ok((void __user *)addr, length))
+ return ERR_PTR(-EFAULT);
+
mutex_lock(&teedev->mutex);
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 0e5cc948373c..e052dae614eb 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -221,7 +221,7 @@ config THERMAL_EMULATION
config THERMAL_MMIO
tristate "Generic Thermal MMIO driver"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
help
This option enables the generic thermal MMIO driver that will use
@@ -496,7 +496,7 @@ config SPRD_THERMAL
config KHADAS_MCU_FAN_THERMAL
tristate "Khadas MCU controller FAN cooling support"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on MFD_KHADAS_MCU
select MFD_CORE
select REGMAP
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index b8151d95a806..b76293cc989c 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -21,6 +21,7 @@
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/thermal.h>
+#include <linux/units.h>
#include <trace/events/thermal.h>
@@ -59,6 +60,7 @@ struct time_in_idle {
* @cdev: thermal_cooling_device pointer to keep track of the
* registered cooling device.
* @policy: cpufreq policy.
+ * @cooling_ops: cpufreq callbacks to thermal cooling device ops
* @idle_time: idle time stats
* @qos_req: PM QoS contraint to apply
*
@@ -71,6 +73,7 @@ struct cpufreq_cooling_device {
unsigned int max_level;
struct em_perf_domain *em;
struct cpufreq_policy *policy;
+ struct thermal_cooling_device_ops cooling_ops;
#ifndef CONFIG_SMP
struct time_in_idle *idle_time;
#endif
@@ -101,6 +104,7 @@ static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
u32 freq)
{
+ unsigned long power_mw;
int i;
for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
@@ -108,16 +112,23 @@ static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
break;
}
- return cpufreq_cdev->em->table[i + 1].power;
+ power_mw = cpufreq_cdev->em->table[i + 1].power;
+ power_mw /= MICROWATT_PER_MILLIWATT;
+
+ return power_mw;
}
static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
u32 power)
{
+ unsigned long em_power_mw;
int i;
for (i = cpufreq_cdev->max_level; i > 0; i--) {
- if (power >= cpufreq_cdev->em->table[i].power)
+ /* Convert EM power to milli-Watts to make safe comparison */
+ em_power_mw = cpufreq_cdev->em->table[i].power;
+ em_power_mw /= MICROWATT_PER_MILLIWATT;
+ if (power >= em_power_mw)
break;
}
@@ -137,11 +148,9 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
int cpu_idx)
{
- unsigned long max = arch_scale_cpu_capacity(cpu);
- unsigned long util;
+ unsigned long util = sched_cpu_util(cpu);
- util = sched_cpu_util(cpu, max);
- return (util * 100) / max;
+ return (util * 100) / arch_scale_cpu_capacity(cpu);
}
#else /* !CONFIG_SMP */
static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
@@ -204,7 +213,7 @@ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
* complex code may be needed if experiments show that it's not
* accurate enough.
*
- * Return: 0 on success, -E* if getting the static power failed.
+ * Return: 0 on success, this function doesn't fail.
*/
static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
u32 *power)
@@ -214,16 +223,9 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
u32 total_load = 0;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
- u32 *load_cpu = NULL;
freq = cpufreq_quick_get(policy->cpu);
- if (trace_thermal_power_cpu_get_power_enabled()) {
- u32 ncpus = cpumask_weight(policy->related_cpus);
-
- load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
- }
-
for_each_cpu(cpu, policy->related_cpus) {
u32 load;
@@ -233,22 +235,13 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
load = 0;
total_load += load;
- if (load_cpu)
- load_cpu[i] = load;
-
- i++;
}
cpufreq_cdev->last_load = total_load;
*power = get_dynamic_power(cpufreq_cdev, freq);
- if (load_cpu) {
- trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
- load_cpu, i, *power);
-
- kfree(load_cpu);
- }
+ trace_thermal_power_cpu_get_power_simple(policy->cpu, *power);
return 0;
}
@@ -263,9 +256,8 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
* milliwatts assuming 100% load. Store the calculated power in
* @power.
*
- * Return: 0 on success, -EINVAL if the cooling device state could not
- * be converted into a frequency or other -E* if there was an error
- * when calculating the static power.
+ * Return: 0 on success, -EINVAL if the cooling device state is bigger
+ * than maximum allowed.
*/
static int cpufreq_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
@@ -295,15 +287,11 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
* Calculate a cooling device state for the cpus described by @cdev
* that would allow them to consume at most @power mW and store it in
* @state. Note that this calculation depends on external factors
- * such as the cpu load or the current static power. Calling this
- * function with the same power as input can yield different cooling
- * device states depending on those external factors.
- *
- * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if
- * the calculated frequency could not be converted to a valid state.
- * The latter should not happen unless the frequencies available to
- * cpufreq have changed since the initialization of the cpu cooling
- * device.
+ * such as the CPUs load. Calling this function with the same power
+ * as input can yield different cooling device states depending on those
+ * external factors.
+ *
+ * Return: 0 on success, this function doesn't fail.
*/
static int cpufreq_power2state(struct thermal_cooling_device *cdev,
u32 power, unsigned long *state)
@@ -415,7 +403,7 @@ static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev,
* Callback for the thermal cooling device to return the cpufreq
* max cooling state.
*
- * Return: 0 on success, an error code otherwise.
+ * Return: 0 on success, this function doesn't fail.
*/
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
@@ -434,7 +422,7 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
* Callback for the thermal cooling device to return the cpufreq
* current cooling state.
*
- * Return: 0 on success, an error code otherwise.
+ * Return: 0 on success, this function doesn't fail.
*/
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
@@ -485,14 +473,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
return ret;
}
-/* Bind cpufreq callbacks to thermal cooling device ops */
-
-static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
- .get_max_state = cpufreq_get_max_state,
- .get_cur_state = cpufreq_get_cur_state,
- .set_cur_state = cpufreq_set_cur_state,
-};
-
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
* @np: a valid struct device_node to the cooling device device tree node
@@ -501,7 +481,7 @@ static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
* @em: Energy Model of the cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
- * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
+ * "cpufreq-%s". This API can support multiple instances of cpufreq
* cooling devices. It also gives the opportunity to link the cooling device
* with a device tree node, in order to bind it via the thermal DT code.
*
@@ -554,7 +534,10 @@ __cpufreq_cooling_register(struct device_node *np,
/* max_level is an index, not a counter */
cpufreq_cdev->max_level = i - 1;
- cooling_ops = &cpufreq_cooling_ops;
+ cooling_ops = &cpufreq_cdev->cooling_ops;
+ cooling_ops->get_max_state = cpufreq_get_max_state;
+ cooling_ops->get_cur_state = cpufreq_get_cur_state;
+ cooling_ops->set_cur_state = cpufreq_set_cur_state;
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
if (em_is_sane(cpufreq_cdev, em)) {
@@ -609,8 +592,8 @@ free_cdev:
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
- * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
- * cooling devices.
+ * "cpufreq-%s". This API can support multiple instances of cpufreq cooling
+ * devices.
*
* Return: a valid struct thermal_cooling_device pointer on success,
* on failure, it returns a corresponding ERR_PTR().
@@ -627,17 +610,14 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
- * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
- * cooling devices. Using this API, the cpufreq cooling device will be
- * linked to the device tree node provided.
+ * "cpufreq-%s". This API can support multiple instances of cpufreq cooling
+ * devices. Using this API, the cpufreq cooling device will be linked to the
+ * device tree node provided.
*
* Using this function, the cooling device will implement the power
- * extensions by using a simple cpu power model. The cpus must have
+ * extensions by using the Energy Model (if present). The cpus must have
* registered their OPPs using the OPP library.
*
- * It also takes into account, if property present in policy CPU node, the
- * static power consumed by the cpu.
- *
* Return: a valid struct thermal_cooling_device pointer on success,
* and NULL on failure.
*/
@@ -673,7 +653,7 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
*
- * This interface function unregisters the "thermal-cpufreq-%x" cooling device.
+ * This interface function unregisters the "cpufreq-%x" cooling device.
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 21d4d6e6409a..121cf853e545 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -53,7 +53,6 @@ static const unsigned long db8500_thermal_points[] = {
struct db8500_thermal_zone {
struct thermal_zone_device *tz;
- enum thermal_trend trend;
unsigned long interpolated_temp;
unsigned int cur_index;
};
@@ -73,24 +72,12 @@ static int db8500_thermal_get_temp(void *data, int *temp)
return 0;
}
-/* Callback to get temperature changing trend */
-static int db8500_thermal_get_trend(void *data, int trip, enum thermal_trend *trend)
-{
- struct db8500_thermal_zone *th = data;
-
- *trend = th->trend;
-
- return 0;
-}
-
static struct thermal_zone_of_device_ops thdev_ops = {
.get_temp = db8500_thermal_get_temp,
- .get_trend = db8500_thermal_get_trend,
};
static void db8500_thermal_update_config(struct db8500_thermal_zone *th,
unsigned int idx,
- enum thermal_trend trend,
unsigned long next_low,
unsigned long next_high)
{
@@ -98,7 +85,6 @@ static void db8500_thermal_update_config(struct db8500_thermal_zone *th,
th->cur_index = idx;
th->interpolated_temp = (next_low + next_high)/2;
- th->trend = trend;
/*
* The PRCMU accept absolute temperatures in celsius so divide
@@ -127,8 +113,7 @@ static irqreturn_t prcmu_low_irq_handler(int irq, void *irq_data)
}
idx -= 1;
- db8500_thermal_update_config(th, idx, THERMAL_TREND_DROPPING,
- next_low, next_high);
+ db8500_thermal_update_config(th, idx, next_low, next_high);
dev_dbg(&th->tz->device,
"PRCMU set max %ld, min %ld\n", next_high, next_low);
@@ -149,8 +134,7 @@ static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data)
next_low = db8500_thermal_points[idx];
idx += 1;
- db8500_thermal_update_config(th, idx, THERMAL_TREND_RAISING,
- next_low, next_high);
+ db8500_thermal_update_config(th, idx, next_low, next_high);
dev_dbg(&th->tz->device,
"PRCMU set max %ld, min %ld\n", next_high, next_low);
@@ -174,10 +158,8 @@ static int db8500_thermal_probe(struct platform_device *pdev)
return -ENOMEM;
low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW");
- if (low_irq < 0) {
- dev_err(dev, "Get IRQ_HOTMON_LOW failed\n");
+ if (low_irq < 0)
return low_irq;
- }
ret = devm_request_threaded_irq(dev, low_irq, NULL,
prcmu_low_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
@@ -188,10 +170,8 @@ static int db8500_thermal_probe(struct platform_device *pdev)
}
high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH");
- if (high_irq < 0) {
- dev_err(dev, "Get IRQ_HOTMON_HIGH failed\n");
+ if (high_irq < 0)
return high_irq;
- }
ret = devm_request_threaded_irq(dev, high_irq, NULL,
prcmu_high_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
@@ -210,8 +190,7 @@ static int db8500_thermal_probe(struct platform_device *pdev)
dev_info(dev, "thermal zone sensor registered\n");
/* Start measuring at the lowest point */
- db8500_thermal_update_config(th, 0, THERMAL_TREND_STABLE,
- PRCMU_DEFAULT_LOW_TEMP,
+ db8500_thermal_update_config(th, 0, PRCMU_DEFAULT_LOW_TEMP,
db8500_thermal_points[0]);
platform_set_drvdata(pdev, th);
@@ -232,8 +211,7 @@ static int db8500_thermal_resume(struct platform_device *pdev)
struct db8500_thermal_zone *th = platform_get_drvdata(pdev);
/* Resume and start measuring at the lowest point */
- db8500_thermal_update_config(th, 0, THERMAL_TREND_STABLE,
- PRCMU_DEFAULT_LOW_TEMP,
+ db8500_thermal_update_config(th, 0, PRCMU_DEFAULT_LOW_TEMP,
db8500_thermal_points[0]);
return 0;
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 8c76f9655e57..24b474925cd6 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -28,6 +28,7 @@
* struct devfreq_cooling_device - Devfreq cooling device
* devfreq_cooling_device registered.
* @cdev: Pointer to associated thermal cooling device.
+ * @cooling_ops: devfreq callbacks to thermal cooling device ops
* @devfreq: Pointer to associated devfreq device.
* @cooling_state: Current cooling state.
* @freq_table: Pointer to a table with the frequencies sorted in descending
@@ -48,6 +49,7 @@
*/
struct devfreq_cooling_device {
struct thermal_cooling_device *cdev;
+ struct thermal_cooling_device_ops cooling_ops;
struct devfreq *devfreq;
unsigned long cooling_state;
u32 *freq_table;
@@ -200,7 +202,11 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
res = dfc->power_ops->get_real_power(df, power, freq, voltage);
if (!res) {
state = dfc->capped_state;
+
+ /* Convert EM power into milli-Watts first */
dfc->res_util = dfc->em_pd->table[state].power;
+ dfc->res_util /= MICROWATT_PER_MILLIWATT;
+
dfc->res_util *= SCALE_ERROR_MITIGATION;
if (*power > 1)
@@ -218,8 +224,10 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
_normalize_load(&status);
- /* Scale power for utilization */
+ /* Convert EM power into milli-Watts first */
*power = dfc->em_pd->table[perf_idx].power;
+ *power /= MICROWATT_PER_MILLIWATT;
+ /* Scale power for utilization */
*power *= status.busy_time;
*power >>= 10;
}
@@ -244,6 +252,7 @@ static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev,
perf_idx = dfc->max_state - state;
*power = dfc->em_pd->table[perf_idx].power;
+ *power /= MICROWATT_PER_MILLIWATT;
return 0;
}
@@ -254,7 +263,7 @@ static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev,
struct devfreq_cooling_device *dfc = cdev->devdata;
struct devfreq *df = dfc->devfreq;
struct devfreq_dev_status status;
- unsigned long freq;
+ unsigned long freq, em_power_mw;
s32 est_power;
int i;
@@ -279,9 +288,13 @@ static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev,
* Find the first cooling state that is within the power
* budget. The EM power table is sorted ascending.
*/
- for (i = dfc->max_state; i > 0; i--)
- if (est_power >= dfc->em_pd->table[i].power)
+ for (i = dfc->max_state; i > 0; i--) {
+ /* Convert EM power to milli-Watts to make safe comparison */
+ em_power_mw = dfc->em_pd->table[i].power;
+ em_power_mw /= MICROWATT_PER_MILLIWATT;
+ if (est_power >= em_power_mw)
break;
+ }
*state = dfc->max_state - i;
dfc->capped_state = *state;
@@ -290,12 +303,6 @@ static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev,
return 0;
}
-static struct thermal_cooling_device_ops devfreq_cooling_ops = {
- .get_max_state = devfreq_cooling_get_max_state,
- .get_cur_state = devfreq_cooling_get_cur_state,
- .set_cur_state = devfreq_cooling_set_cur_state,
-};
-
/**
* devfreq_cooling_gen_tables() - Generate frequency table.
* @dfc: Pointer to devfreq cooling device.
@@ -363,18 +370,18 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
char *name;
int err, num_opps;
- ops = kmemdup(&devfreq_cooling_ops, sizeof(*ops), GFP_KERNEL);
- if (!ops)
- return ERR_PTR(-ENOMEM);
dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
- if (!dfc) {
- err = -ENOMEM;
- goto free_ops;
- }
+ if (!dfc)
+ return ERR_PTR(-ENOMEM);
dfc->devfreq = df;
+ ops = &dfc->cooling_ops;
+ ops->get_max_state = devfreq_cooling_get_max_state;
+ ops->get_cur_state = devfreq_cooling_get_cur_state;
+ ops->set_cur_state = devfreq_cooling_set_cur_state;
+
em = em_pd_get(dev);
if (em && !em_is_artificial(em)) {
dfc->em_pd = em;
@@ -437,8 +444,6 @@ free_table:
kfree(dfc->freq_table);
free_dfc:
kfree(dfc);
-free_ops:
- kfree(ops);
return ERR_PTR(err);
}
@@ -520,13 +525,11 @@ EXPORT_SYMBOL_GPL(devfreq_cooling_em_register);
void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
struct devfreq_cooling_device *dfc;
- const struct thermal_cooling_device_ops *ops;
struct device *dev;
if (IS_ERR_OR_NULL(cdev))
return;
- ops = cdev->ops;
dfc = cdev->devdata;
dev = dfc->devfreq->dev.parent;
@@ -537,6 +540,5 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
kfree(dfc->freq_table);
kfree(dfc);
- kfree(ops);
}
EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
index 1e5abf4822be..6a2abcfc648f 100644
--- a/drivers/thermal/gov_fair_share.c
+++ b/drivers/thermal/gov_fair_share.c
@@ -25,10 +25,10 @@ static int get_trip_level(struct thermal_zone_device *tz)
int trip_temp;
enum thermal_trip_type trip_type;
- if (tz->trips == 0 || !tz->ops->get_trip_temp)
+ if (tz->num_trips == 0 || !tz->ops->get_trip_temp)
return 0;
- for (count = 0; count < tz->trips; count++) {
+ for (count = 0; count < tz->num_trips; count++) {
tz->ops->get_trip_temp(tz, count, &trip_temp);
if (tz->temperature < trip_temp)
break;
@@ -53,7 +53,7 @@ static long get_target_state(struct thermal_zone_device *tz,
cdev->ops->get_max_state(cdev, &max_state);
- return (long)(percentage * level * max_state) / (100 * tz->trips);
+ return (long)(percentage * level * max_state) / (100 * tz->num_trips);
}
/**
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 13e375751d22..1d5052470967 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -527,7 +527,7 @@ static void get_governor_trips(struct thermal_zone_device *tz,
last_active = INVALID_TRIP;
last_passive = INVALID_TRIP;
- for (i = 0; i < tz->trips; i++) {
+ for (i = 0; i < tz->num_trips; i++) {
enum thermal_trip_type type;
int ret;
@@ -668,7 +668,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
get_governor_trips(tz, params);
- if (tz->trips > 0) {
+ if (tz->num_trips > 0) {
ret = tz->ops->get_trip_temp(tz,
params->trip_max_desired_temperature,
&control_temp);
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index 12acb12aac50..9729b46d0258 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -11,6 +11,7 @@
*/
#include <linux/thermal.h>
+#include <linux/minmax.h>
#include <trace/events/thermal.h>
#include "thermal_core.h"
@@ -52,10 +53,7 @@ static unsigned long get_target_state(struct thermal_instance *instance,
if (!instance->initialized) {
if (throttle) {
- next_target = (cur_state + 1) >= instance->upper ?
- instance->upper :
- ((cur_state + 1) < instance->lower ?
- instance->lower : (cur_state + 1));
+ next_target = clamp((cur_state + 1), instance->lower, instance->upper);
} else {
next_target = THERMAL_NO_TARGET;
}
@@ -66,35 +64,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
switch (trend) {
case THERMAL_TREND_RAISING:
if (throttle) {
- next_target = cur_state < instance->upper ?
- (cur_state + 1) : instance->upper;
- if (next_target < instance->lower)
- next_target = instance->lower;
+ next_target = clamp((cur_state + 1), instance->lower, instance->upper);
}
break;
- case THERMAL_TREND_RAISE_FULL:
- if (throttle)
- next_target = instance->upper;
- break;
case THERMAL_TREND_DROPPING:
if (cur_state <= instance->lower) {
if (!throttle)
next_target = THERMAL_NO_TARGET;
} else {
if (!throttle) {
- next_target = cur_state - 1;
- if (next_target > instance->upper)
- next_target = instance->upper;
+ next_target = clamp((cur_state - 1), instance->lower, instance->upper);
}
}
break;
- case THERMAL_TREND_DROP_FULL:
- if (cur_state == instance->lower) {
- if (!throttle)
- next_target = THERMAL_NO_TARGET;
- } else
- next_target = instance->lower;
- break;
default:
break;
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index b29ab09040d5..19a242c69ce6 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon thermal sensor driver
*
@@ -6,15 +7,6 @@
*
* Xinwei Kong <kong.kongxinwei@hisilicon.com>
* Leo Yan <leo.yan@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/cpufreq.h>
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index c1fa2b29b153..dabf11a687a1 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -207,14 +207,6 @@ static int pch_wpt_suspend(struct pch_thermal_device *ptd)
return 0;
}
- /* Do not check temperature if it is not a S0ix capable platform */
-#ifdef CONFIG_ACPI
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return 0;
-#else
- return 0;
-#endif
-
/* Do not check temperature if it is not s2idle */
if (pm_suspend_via_firmware())
return 0;
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index a9596e7562ea..95adac427b6f 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -81,7 +81,9 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
{}
};
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 4d8edc61a78b..a0e234fce71a 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -105,7 +105,7 @@ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
}
/*
-* tj-max is is interesting because threshold is set relative to this
+* tj-max is interesting because threshold is set relative to this
* temperature.
*/
static int get_tj_max(int cpu, u32 *tj_max)
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index 64e323158952..115a44eb4fbf 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -151,8 +151,6 @@ static int prep_lookup_table(struct err_values *err_vals, int *ref_table)
/* 300 milli celsius steps */
while (i--)
derived_table[i] = derived_table[i + 1] - 300;
- /* case 0 */
- derived_table[i] = derived_table[i + 1] - 300;
}
/*
@@ -433,7 +431,7 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!derived_table) {
ret = -ENOMEM;
- goto err_alloc;
+ goto err_free_ref_table;
}
/* Workaround not needed if bit30/bit31 is set even for J721e */
@@ -483,7 +481,7 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
if (IS_ERR(ti_thermal)) {
dev_err(bgp->dev, "thermal zone device is NULL\n");
ret = PTR_ERR(ti_thermal);
- goto err_alloc;
+ goto err_free_ref_table;
}
}
@@ -514,6 +512,9 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
return 0;
+err_free_ref_table:
+ kfree(ref_table);
+
err_alloc:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -529,11 +530,11 @@ static int k3_j72xx_bandgap_remove(struct platform_device *pdev)
return 0;
}
-const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = {
+static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = {
.has_errata_i2128 = 1,
};
-const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j7200_data = {
+static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j7200_data = {
.has_errata_i2128 = 0,
};
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index d9c9c975f931..073943cbcc2b 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -20,6 +20,8 @@
#include <linux/thermal.h>
#include <asm-generic/unaligned.h>
+#include "../thermal_hwmon.h"
+
/*
* Thermal monitoring block consists of 8 (ADC_TM5_NUM_CHANNELS) channels. Each
* channel is programmed to use one of ADC channels for voltage comparison.
@@ -687,6 +689,9 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
return PTR_ERR(tzd);
}
adc_tm->channels[i].tzd = tzd;
+ if (devm_thermal_add_hwmon_sysfs(tzd))
+ dev_warn(adc_tm->dev,
+ "Failed to add hwmon sysfs attributes\n");
}
return 0;
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 7419e196dbb0..770f82cc9bca 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -16,6 +16,7 @@
#include <linux/thermal.h>
#include "../thermal_core.h"
+#include "../thermal_hwmon.h"
#define QPNP_TM_REG_DIG_MAJOR 0x01
#define QPNP_TM_REG_TYPE 0x04
@@ -458,6 +459,10 @@ static int qpnp_tm_probe(struct platform_device *pdev)
return ret;
}
+ if (devm_thermal_add_hwmon_sysfs(chip->tz_dev))
+ dev_warn(&pdev->dev,
+ "Failed to add hwmon sysfs attributes\n");
+
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr,
IRQF_ONESHOT, node->name, chip);
if (ret < 0)
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 7963ee33bf75..e49f58e83513 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -933,17 +933,6 @@ static int tsens_get_temp(void *data, int *temp)
return priv->ops->get_temp(s, temp);
}
-static int tsens_get_trend(void *data, int trip, enum thermal_trend *trend)
-{
- struct tsens_sensor *s = data;
- struct tsens_priv *priv = s->priv;
-
- if (priv->ops->get_trend)
- return priv->ops->get_trend(s, trend);
-
- return -ENOTSUPP;
-}
-
static int __maybe_unused tsens_suspend(struct device *dev)
{
struct tsens_priv *priv = dev_get_drvdata(dev);
@@ -1004,7 +993,6 @@ MODULE_DEVICE_TABLE(of, tsens_table);
static const struct thermal_zone_of_device_ops tsens_of_ops = {
.get_temp = tsens_get_temp,
- .get_trend = tsens_get_trend,
.set_trips = tsens_set_trips,
};
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 1471a2c00f15..ba05c8233356 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -65,7 +65,6 @@ struct tsens_sensor {
* @disable: Function to disable the tsens device
* @suspend: Function to suspend the tsens device
* @resume: Function to resume the tsens device
- * @get_trend: Function to get the thermal/temp trend
*/
struct tsens_ops {
/* mandatory callbacks */
@@ -77,7 +76,6 @@ struct tsens_ops {
void (*disable)(struct tsens_priv *priv);
int (*suspend)(struct tsens_priv *priv);
int (*resume)(struct tsens_priv *priv);
- int (*get_trend)(struct tsens_sensor *s, enum thermal_trend *trend);
};
#define REG_FIELD_FOR_EACH_SENSOR11(_name, _offset, _startbit, _stopbit) \
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 43eb25b167bc..cda7c52f2319 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -399,6 +399,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
.compatible = "renesas,r8a779a0-thermal",
.data = &rcar_gen3_ths_tj_1,
},
+ {
+ .compatible = "renesas,r8a779f0-thermal",
+ .data = &rcar_gen3_ths_tj_1,
+ },
{},
};
MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
@@ -507,7 +511,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
&rcar_gen3_tz_of_ops);
if (IS_ERR(zone)) {
- dev_err(dev, "Can't register thermal zone\n");
+ dev_err(dev, "Sensor %u: Can't register thermal zone\n", i);
ret = PTR_ERR(zone);
goto error_unregister;
}
@@ -529,7 +533,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (ret < 0)
goto error_unregister;
- dev_info(dev, "TSC%u: Loaded %d trip points\n", i, ret);
+ dev_info(dev, "Sensor %u: Loaded %d trip points\n", i, ret);
}
if (!priv->num_tscs) {
diff --git a/drivers/thermal/rzg2l_thermal.c b/drivers/thermal/rzg2l_thermal.c
index be07e04c6926..51ae80eda6af 100644
--- a/drivers/thermal/rzg2l_thermal.c
+++ b/drivers/thermal/rzg2l_thermal.c
@@ -47,7 +47,7 @@
#define TS_CODE_AVE_SCALE(x) ((x) * 1000000)
#define MCELSIUS(temp) ((temp) * MILLIDEGREE_PER_DEGREE)
-#define TS_CODE_CAP_TIMES 8 /* Capture times */
+#define TS_CODE_CAP_TIMES 8 /* Total number of ADC data samples */
#define RZG2L_THERMAL_GRAN 500 /* milli Celsius */
#define RZG2L_TSU_SS_TIMEOUT_US 1000
@@ -80,7 +80,8 @@ static int rzg2l_thermal_get_temp(void *devdata, int *temp)
int val, i;
for (i = 0; i < TS_CODE_CAP_TIMES ; i++) {
- /* TSU repeats measurement at 20 microseconds intervals and
+ /*
+ * TSU repeats measurement at 20 microseconds intervals and
* automatically updates the results of measurement. As per
* the HW manual for measuring temperature we need to read 8
* values consecutively and then take the average.
@@ -92,16 +93,18 @@ static int rzg2l_thermal_get_temp(void *devdata, int *temp)
ts_code_ave = result / TS_CODE_CAP_TIMES;
- /* Calculate actual sensor value by applying curvature correction formula
+ /*
+ * Calculate actual sensor value by applying curvature correction formula
* dsensor = ts_code_ave / (1 + ts_code_ave * 0.000013). Here we are doing
* integer calculation by scaling all the values by 1000000.
*/
dsensor = TS_CODE_AVE_SCALE(ts_code_ave) /
(TS_CODE_AVE_SCALE(1) + (ts_code_ave * CURVATURE_CORRECTION_CONST));
- /* The temperature Tj is calculated by the formula
+ /*
+ * The temperature Tj is calculated by the formula
* Tj = (dsensor − calib1) * 165/ (calib0 − calib1) − 40
- * where calib0 and calib1 are the caliberation values.
+ * where calib0 and calib1 are the calibration values.
*/
val = ((dsensor - priv->calib1) * (MCELSIUS(165) /
(priv->calib0 - priv->calib1))) - MCELSIUS(40);
@@ -122,7 +125,8 @@ static int rzg2l_thermal_init(struct rzg2l_thermal_priv *priv)
rzg2l_thermal_write(priv, TSU_SM, TSU_SM_NORMAL_MODE);
rzg2l_thermal_write(priv, TSU_ST, 0);
- /* Before setting the START bit, TSU should be in normal operating
+ /*
+ * Before setting the START bit, TSU should be in normal operating
* mode. As per the HW manual, it will take 60 µs to place the TSU
* into normal operating mode.
*/
@@ -217,7 +221,7 @@ static int rzg2l_thermal_probe(struct platform_device *pdev)
if (ret)
goto err;
- dev_dbg(dev, "TSU probed with %s caliberation values",
+ dev_dbg(dev, "TSU probed with %s calibration values",
rzg2l_thermal_read(priv, OTPTSUTRIM_REG(0)) ? "hw" : "sw");
return 0;
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index d9cd23cbb671..212c87e63a66 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -237,7 +237,7 @@ static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
* The calibration data on the H6 is the ambient temperature and
* sensor values that are filled during the factory test stage.
*
- * The unit of stored FT temperature is 0.1 degreee celusis.
+ * The unit of stored FT temperature is 0.1 degree celsius.
*
* We need to calculate a delta between measured and caluclated
* register values and this will become a calibration offset.
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 210325f92559..825eab526619 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -633,37 +633,6 @@ static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
return 0;
}
-static int tegra_thermctl_get_trend(void *data, int trip,
- enum thermal_trend *trend)
-{
- struct tegra_thermctl_zone *zone = data;
- struct thermal_zone_device *tz = zone->tz;
- int trip_temp, temp, last_temp, ret;
-
- if (!tz)
- return -EINVAL;
-
- ret = tz->ops->get_trip_temp(zone->tz, trip, &trip_temp);
- if (ret)
- return ret;
-
- temp = READ_ONCE(tz->temperature);
- last_temp = READ_ONCE(tz->last_temperature);
-
- if (temp > trip_temp) {
- if (temp >= last_temp)
- *trend = THERMAL_TREND_RAISING;
- else
- *trend = THERMAL_TREND_STABLE;
- } else if (temp < trip_temp) {
- *trend = THERMAL_TREND_DROPPING;
- } else {
- *trend = THERMAL_TREND_STABLE;
- }
-
- return 0;
-}
-
static void thermal_irq_enable(struct tegra_thermctl_zone *zn)
{
u32 r;
@@ -716,7 +685,6 @@ static int tegra_thermctl_set_trips(void *data, int lo, int hi)
static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
.get_temp = tegra_thermctl_get_temp,
.set_trip_temp = tegra_thermctl_set_trip_temp,
- .get_trend = tegra_thermctl_get_trend,
.set_trips = tegra_thermctl_set_trips,
};
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
index 9b6b693cbcf8..05886684f429 100644
--- a/drivers/thermal/tegra/tegra30-tsensor.c
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -316,7 +316,7 @@ static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd,
*hot_trip = 85000;
*crit_trip = 90000;
- for (i = 0; i < tzd->trips; i++) {
+ for (i = 0; i < tzd->num_trips; i++) {
enum thermal_trip_type type;
int trip_temp;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index cdc0552e8c42..6a5d0ae5d7a4 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -340,12 +340,8 @@ void thermal_zone_device_critical(struct thermal_zone_device *tz)
EXPORT_SYMBOL(thermal_zone_device_critical);
static void handle_critical_trips(struct thermal_zone_device *tz,
- int trip, enum thermal_trip_type trip_type)
+ int trip, int trip_temp, enum thermal_trip_type trip_type)
{
- int trip_temp;
-
- tz->ops->get_trip_temp(tz, trip, &trip_temp);
-
/* If we have not crossed the trip_temp, we do not care. */
if (trip_temp <= 0 || tz->temperature < trip_temp)
return;
@@ -384,7 +380,7 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
}
if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
- handle_critical_trips(tz, trip, type);
+ handle_critical_trips(tz, trip, trip_temp, type);
else
handle_non_critical_trips(tz, trip);
/*
@@ -505,7 +501,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
tz->notify_event = event;
- for (count = 0; count < tz->trips; count++)
+ for (count = 0; count < tz->num_trips; count++)
handle_thermal_trip(tz, count);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
@@ -630,7 +626,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
unsigned long max_state;
int result, ret;
- if (trip >= tz->trips || trip < 0)
+ if (trip >= tz->num_trips || trip < 0)
return -EINVAL;
list_for_each_entry(pos1, &thermal_tz_list, node) {
@@ -667,7 +663,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
dev->target = THERMAL_NO_TARGET;
dev->weight = weight;
- result = ida_simple_get(&tz->ida, 0, 0, GFP_KERNEL);
+ result = ida_alloc(&tz->ida, GFP_KERNEL);
if (result < 0)
goto free_mem;
@@ -721,7 +717,7 @@ remove_trip_file:
remove_symbol_link:
sysfs_remove_link(&tz->device.kobj, dev->name);
release_ida:
- ida_simple_remove(&tz->ida, dev->id);
+ ida_free(&tz->ida, dev->id);
free_mem:
kfree(dev);
return result;
@@ -768,7 +764,7 @@ unbind:
device_remove_file(&tz->device, &pos->weight_attr);
device_remove_file(&tz->device, &pos->attr);
sysfs_remove_link(&tz->device.kobj, pos->name);
- ida_simple_remove(&tz->ida, pos->id);
+ ida_free(&tz->ida, pos->id);
kfree(pos);
return 0;
}
@@ -811,7 +807,7 @@ static void __bind(struct thermal_zone_device *tz, int mask,
{
int i, ret;
- for (i = 0; i < tz->trips; i++) {
+ for (i = 0; i < tz->num_trips; i++) {
if (mask & (1 << i)) {
unsigned long upper, lower;
@@ -901,7 +897,7 @@ __thermal_cooling_device_register(struct device_node *np,
if (!cdev)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&thermal_cdev_ida, GFP_KERNEL);
if (ret < 0)
goto out_kfree_cdev;
cdev->id = ret;
@@ -952,7 +948,7 @@ out_kfree_type:
put_device(&cdev->device);
cdev = NULL;
out_ida_remove:
- ida_simple_remove(&thermal_cdev_ida, id);
+ ida_free(&thermal_cdev_ida, id);
out_kfree_cdev:
kfree(cdev);
return ERR_PTR(ret);
@@ -1057,7 +1053,7 @@ static void __unbind(struct thermal_zone_device *tz, int mask,
{
int i;
- for (i = 0; i < tz->trips; i++)
+ for (i = 0; i < tz->num_trips; i++)
if (mask & (1 << i))
thermal_zone_unbind_cooling_device(tz, i, cdev);
}
@@ -1111,7 +1107,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
mutex_unlock(&thermal_list_lock);
- ida_simple_remove(&thermal_cdev_ida, cdev->id);
+ ida_free(&thermal_cdev_ida, cdev->id);
device_del(&cdev->device);
thermal_cooling_device_destroy_sysfs(cdev);
kfree(cdev->type);
@@ -1159,10 +1155,18 @@ exit:
mutex_unlock(&thermal_list_lock);
}
+static void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms)
+{
+ *delay_jiffies = msecs_to_jiffies(delay_ms);
+ if (delay_ms > 1000)
+ *delay_jiffies = round_jiffies(*delay_jiffies);
+}
+
/**
- * thermal_zone_device_register() - register a new thermal zone device
+ * thermal_zone_device_register_with_trips() - register a new thermal zone device
* @type: the thermal zone device type
- * @trips: the number of trip points the thermal zone support
+ * @trips: a pointer to an array of thermal trips
+ * @num_trips: the number of trip points the thermal zone support
* @mask: a bit string indicating the writeablility of trip points
* @devdata: private device data
* @ops: standard thermal zone device callbacks
@@ -1184,10 +1188,10 @@ exit:
* IS_ERR*() helpers.
*/
struct thermal_zone_device *
-thermal_zone_device_register(const char *type, int trips, int mask,
- void *devdata, struct thermal_zone_device_ops *ops,
- struct thermal_zone_params *tzp, int passive_delay,
- int polling_delay)
+thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *trips, int num_trips, int mask,
+ void *devdata, struct thermal_zone_device_ops *ops,
+ struct thermal_zone_params *tzp, int passive_delay,
+ int polling_delay)
{
struct thermal_zone_device *tz;
enum thermal_trip_type trip_type;
@@ -1198,27 +1202,27 @@ thermal_zone_device_register(const char *type, int trips, int mask,
struct thermal_governor *governor;
if (!type || strlen(type) == 0) {
- pr_err("Error: No thermal zone type defined\n");
+ pr_err("No thermal zone type defined\n");
return ERR_PTR(-EINVAL);
}
if (type && strlen(type) >= THERMAL_NAME_LENGTH) {
- pr_err("Error: Thermal zone name (%s) too long, should be under %d chars\n",
+ pr_err("Thermal zone name (%s) too long, should be under %d chars\n",
type, THERMAL_NAME_LENGTH);
return ERR_PTR(-EINVAL);
}
- if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips) {
- pr_err("Error: Incorrect number of thermal trips\n");
+ if (num_trips > THERMAL_MAX_TRIPS || num_trips < 0 || mask >> num_trips) {
+ pr_err("Incorrect number of thermal trips\n");
return ERR_PTR(-EINVAL);
}
if (!ops) {
- pr_err("Error: Thermal zone device ops not defined\n");
+ pr_err("Thermal zone device ops not defined\n");
return ERR_PTR(-EINVAL);
}
- if (trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp))
+ if (num_trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp))
return ERR_PTR(-EINVAL);
tz = kzalloc(sizeof(*tz), GFP_KERNEL);
@@ -1228,7 +1232,7 @@ thermal_zone_device_register(const char *type, int trips, int mask,
INIT_LIST_HEAD(&tz->thermal_instances);
ida_init(&tz->ida);
mutex_init(&tz->lock);
- id = ida_simple_get(&thermal_tz_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&thermal_tz_ida, GFP_KERNEL);
if (id < 0) {
result = id;
goto free_tz;
@@ -1249,6 +1253,7 @@ thermal_zone_device_register(const char *type, int trips, int mask,
tz->device.class = &thermal_class;
tz->devdata = devdata;
tz->trips = trips;
+ tz->num_trips = num_trips;
thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay);
thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay);
@@ -1266,7 +1271,7 @@ thermal_zone_device_register(const char *type, int trips, int mask,
if (result)
goto release_device;
- for (count = 0; count < trips; count++) {
+ for (count = 0; count < num_trips; count++) {
if (tz->ops->get_trip_type(tz, count, &trip_type) ||
tz->ops->get_trip_temp(tz, count, &trip_temp) ||
!trip_temp)
@@ -1319,11 +1324,21 @@ release_device:
put_device(&tz->device);
tz = NULL;
remove_id:
- ida_simple_remove(&thermal_tz_ida, id);
+ ida_free(&thermal_tz_ida, id);
free_tz:
kfree(tz);
return ERR_PTR(result);
}
+
+struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
+ void *devdata, struct thermal_zone_device_ops *ops,
+ struct thermal_zone_params *tzp, int passive_delay,
+ int polling_delay)
+{
+ return thermal_zone_device_register_with_trips(type, NULL, ntrips, mask,
+ devdata, ops, tzp,
+ passive_delay, polling_delay);
+}
EXPORT_SYMBOL_GPL(thermal_zone_device_register);
/**
@@ -1379,7 +1394,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
thermal_set_governor(tz, NULL);
thermal_remove_hwmon_sysfs(tz);
- ida_simple_remove(&thermal_tz_ida, tz->id);
+ ida_free(&thermal_tz_ida, tz->id);
ida_destroy(&tz->ida);
mutex_destroy(&tz->lock);
device_unregister(&tz->device);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 726e327b4205..c991bb290512 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -68,20 +68,6 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
void thermal_cdev_update(struct thermal_cooling_device *);
void __thermal_cdev_update(struct thermal_cooling_device *cdev);
-/**
- * struct thermal_trip - representation of a point in temperature domain
- * @np: pointer to struct device_node that this trip point was created from
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- */
-struct thermal_trip {
- struct device_node *np;
- int temperature;
- int hysteresis;
- enum thermal_trip_type type;
-};
-
int get_tz_trend(struct thermal_zone_device *tz, int trip);
struct thermal_instance *
@@ -126,7 +112,6 @@ int thermal_build_list_of_policies(char *buf);
/* Helpers */
void thermal_zone_set_trips(struct thermal_zone_device *tz);
-void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms);
/* sysfs I/F */
int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 3edd047e144f..690890f054a3 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -39,7 +39,6 @@ int get_tz_trend(struct thermal_zone_device *tz, int trip)
return trend;
}
-EXPORT_SYMBOL(get_tz_trend);
struct thermal_instance *
get_thermal_instance(struct thermal_zone_device *tz,
@@ -90,7 +89,7 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
ret = tz->ops->get_temp(tz, temp);
if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
- for (count = 0; count < tz->trips; count++) {
+ for (count = 0; count < tz->num_trips; count++) {
ret = tz->ops->get_trip_type(tz, count, &type);
if (!ret && type == THERMAL_TRIP_CRITICAL) {
ret = tz->ops->get_trip_temp(tz, count,
@@ -138,7 +137,7 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz)
if (!tz->ops->set_trips || !tz->ops->get_trip_hyst)
goto exit;
- for (i = 0; i < tz->trips; i++) {
+ for (i = 0; i < tz->num_trips; i++) {
int trip_low;
tz->ops->get_trip_temp(tz, i, &trip_temp);
@@ -175,13 +174,6 @@ exit:
mutex_unlock(&tz->lock);
}
-void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms)
-{
- *delay_jiffies = msecs_to_jiffies(delay_ms);
- if (delay_ms > 1000)
- *delay_jiffies = round_jiffies(*delay_jiffies);
-}
-
static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev,
int target)
{
@@ -228,7 +220,6 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
}
mutex_unlock(&cdev->lock);
}
-EXPORT_SYMBOL(thermal_cdev_update);
/**
* thermal_zone_get_slope - return the slope attribute of the thermal zone
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index 32fea5174cc0..050d243a5fa1 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -469,7 +469,7 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
mutex_lock(&tz->lock);
- for (i = 0; i < tz->trips; i++) {
+ for (i = 0; i < tz->num_trips; i++) {
enum thermal_trip_type type;
int temp, hyst = 0;
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index b65d435cb92f..802c30b72a92 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -118,12 +118,7 @@ static int of_thermal_set_trips(struct thermal_zone_device *tz,
*/
int of_thermal_get_ntrips(struct thermal_zone_device *tz)
{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data || IS_ERR(data))
- return -ENODEV;
-
- return data->ntrips;
+ return tz->num_trips;
}
EXPORT_SYMBOL_GPL(of_thermal_get_ntrips);
@@ -139,9 +134,7 @@ EXPORT_SYMBOL_GPL(of_thermal_get_ntrips);
*/
bool of_thermal_is_trip_valid(struct thermal_zone_device *tz, int trip)
{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data || trip >= data->ntrips || trip < 0)
+ if (trip >= tz->num_trips || trip < 0)
return false;
return true;
@@ -161,12 +154,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data)
- return NULL;
-
- return data->trips;
+ return tz->trips;
}
EXPORT_SYMBOL_GPL(of_thermal_get_trip_points);
@@ -281,12 +269,10 @@ static int of_thermal_unbind(struct thermal_zone_device *thermal,
static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip,
enum thermal_trip_type *type)
{
- struct __thermal_zone *data = tz->devdata;
-
- if (trip >= data->ntrips || trip < 0)
+ if (trip >= tz->num_trips || trip < 0)
return -EDOM;
- *type = data->trips[trip].type;
+ *type = tz->trips[trip].type;
return 0;
}
@@ -294,12 +280,10 @@ static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip,
static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
int *temp)
{
- struct __thermal_zone *data = tz->devdata;
-
- if (trip >= data->ntrips || trip < 0)
+ if (trip >= tz->num_trips || trip < 0)
return -EDOM;
- *temp = data->trips[trip].temperature;
+ *temp = tz->trips[trip].temperature;
return 0;
}
@@ -309,7 +293,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
{
struct __thermal_zone *data = tz->devdata;
- if (trip >= data->ntrips || trip < 0)
+ if (trip >= tz->num_trips || trip < 0)
return -EDOM;
if (data->ops && data->ops->set_trip_temp) {
@@ -321,7 +305,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
}
/* thermal framework should take care of data->mask & (1 << trip) */
- data->trips[trip].temperature = temp;
+ tz->trips[trip].temperature = temp;
return 0;
}
@@ -329,12 +313,10 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
int *hyst)
{
- struct __thermal_zone *data = tz->devdata;
-
- if (trip >= data->ntrips || trip < 0)
+ if (trip >= tz->num_trips || trip < 0)
return -EDOM;
- *hyst = data->trips[trip].hysteresis;
+ *hyst = tz->trips[trip].hysteresis;
return 0;
}
@@ -342,13 +324,11 @@ static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
int hyst)
{
- struct __thermal_zone *data = tz->devdata;
-
- if (trip >= data->ntrips || trip < 0)
+ if (trip >= tz->num_trips || trip < 0)
return -EDOM;
/* thermal framework should take care of data->mask & (1 << trip) */
- data->trips[trip].hysteresis = hyst;
+ tz->trips[trip].hysteresis = hyst;
return 0;
}
@@ -356,12 +336,11 @@ static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
int *temp)
{
- struct __thermal_zone *data = tz->devdata;
int i;
- for (i = 0; i < data->ntrips; i++)
- if (data->trips[i].type == THERMAL_TRIP_CRITICAL) {
- *temp = data->trips[i].temperature;
+ for (i = 0; i < tz->num_trips; i++)
+ if (tz->trips[i].type == THERMAL_TRIP_CRITICAL) {
+ *temp = tz->trips[i].temperature;
return 0;
}
@@ -671,6 +650,35 @@ EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_unregister);
/*** functions parsing device tree nodes ***/
+static int of_find_trip_id(struct device_node *np, struct device_node *trip)
+{
+ struct device_node *trips;
+ struct device_node *t;
+ int i = 0;
+
+ trips = of_get_child_by_name(np, "trips");
+ if (!trips) {
+ pr_err("Failed to find 'trips' node\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find the trip id point associated with the cooling device map
+ */
+ for_each_child_of_node(trips, t) {
+
+ if (t == trip)
+ goto out;
+ i++;
+ }
+
+ i = -ENXIO;
+out:
+ of_node_put(trips);
+
+ return i;
+}
+
/**
* thermal_of_populate_bind_params - parse and fill cooling map data
* @np: DT node containing a cooling-map node
@@ -685,15 +693,15 @@ EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_unregister);
*
* Return: 0 on success, proper error code otherwise
*/
-static int thermal_of_populate_bind_params(struct device_node *np,
- struct __thermal_bind_params *__tbp,
- struct thermal_trip *trips,
- int ntrips)
+static int thermal_of_populate_bind_params(struct device_node *tz_np,
+ struct device_node *np,
+ struct __thermal_bind_params *__tbp)
{
struct of_phandle_args cooling_spec;
struct __thermal_cooling_bind_param *__tcbp;
struct device_node *trip;
int ret, i, count;
+ int trip_id;
u32 prop;
/* Default weight. Usage is optional */
@@ -708,18 +716,14 @@ static int thermal_of_populate_bind_params(struct device_node *np,
return -ENODEV;
}
- /* match using device_node */
- for (i = 0; i < ntrips; i++)
- if (trip == trips[i].np) {
- __tbp->trip_id = i;
- break;
- }
-
- if (i == ntrips) {
- ret = -ENODEV;
+ trip_id = of_find_trip_id(tz_np, trip);
+ if (trip_id < 0) {
+ ret = trip_id;
goto end;
}
+ __tbp->trip_id = trip_id;
+
count = of_count_phandle_with_args(np, "cooling-device",
"#cooling-cells");
if (count <= 0) {
@@ -843,13 +847,56 @@ static int thermal_of_populate_trip(struct device_node *np,
return ret;
}
- /* Required for cooling map matching */
- trip->np = np;
- of_node_get(np);
-
return 0;
}
+static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *ntrips)
+{
+ struct thermal_trip *tt;
+ struct device_node *trips, *trip;
+ int ret, count;
+
+ trips = of_get_child_by_name(np, "trips");
+ if (!trips) {
+ pr_err("Failed to find 'trips' node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ count = of_get_child_count(trips);
+ if (!count) {
+ pr_err("No trip point defined\n");
+ ret = -EINVAL;
+ goto out_of_node_put;
+ }
+
+ tt = kzalloc(sizeof(*tt) * count, GFP_KERNEL);
+ if (!tt) {
+ ret = -ENOMEM;
+ goto out_of_node_put;
+ }
+
+ *ntrips = count;
+
+ count = 0;
+ for_each_child_of_node(trips, trip) {
+ ret = thermal_of_populate_trip(trip, &tt[count++]);
+ if (ret)
+ goto out_kfree;
+ }
+
+ of_node_put(trips);
+
+ return tt;
+
+out_kfree:
+ kfree(tt);
+ *ntrips = 0;
+out_of_node_put:
+ of_node_put(trips);
+
+ return ERR_PTR(ret);
+}
+
/**
* thermal_of_build_thermal_zone - parse and fill one thermal zone data
* @np: DT node containing a thermal zone node
@@ -909,32 +956,12 @@ __init *thermal_of_build_thermal_zone(struct device_node *np)
tz->offset = 0;
}
- /* trips */
- child = of_get_child_by_name(np, "trips");
-
- /* No trips provided */
- if (!child)
+ tz->trips = thermal_of_trips_init(np, &tz->ntrips);
+ if (IS_ERR(tz->trips)) {
+ ret = PTR_ERR(tz->trips);
goto finish;
-
- tz->ntrips = of_get_child_count(child);
- if (tz->ntrips == 0) /* must have at least one child */
- goto finish;
-
- tz->trips = kcalloc(tz->ntrips, sizeof(*tz->trips), GFP_KERNEL);
- if (!tz->trips) {
- ret = -ENOMEM;
- goto free_tz;
}
- i = 0;
- for_each_child_of_node(child, gchild) {
- ret = thermal_of_populate_trip(gchild, &tz->trips[i++]);
- if (ret)
- goto free_trips;
- }
-
- of_node_put(child);
-
/* cooling-maps */
child = of_get_child_by_name(np, "cooling-maps");
@@ -954,10 +981,11 @@ __init *thermal_of_build_thermal_zone(struct device_node *np)
i = 0;
for_each_child_of_node(child, gchild) {
- ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
- tz->trips, tz->ntrips);
- if (ret)
+ ret = thermal_of_populate_bind_params(np, gchild, &tz->tbps[i++]);
+ if (ret) {
+ of_node_put(gchild);
goto free_tbps;
+ }
}
finish:
@@ -978,10 +1006,7 @@ free_tbps:
kfree(tz->tbps);
free_trips:
- for (i = 0; i < tz->ntrips; i++)
- of_node_put(tz->trips[i].np);
kfree(tz->trips);
- of_node_put(gchild);
free_tz:
kfree(tz);
of_node_put(child);
@@ -1004,8 +1029,6 @@ static __init void of_thermal_free_zone(struct __thermal_zone *tz)
}
kfree(tz->tbps);
- for (i = 0; i < tz->ntrips; i++)
- of_node_put(tz->trips[i].np);
kfree(tz->trips);
kfree(tz);
}
@@ -1103,11 +1126,9 @@ int __init of_parse_thermal_zones(void)
tzp->slope = tz->slope;
tzp->offset = tz->offset;
- zone = thermal_zone_device_register(child->name, tz->ntrips,
- mask, tz,
- ops, tzp,
- tz->passive_delay,
- tz->polling_delay);
+ zone = thermal_zone_device_register_with_trips(child->name, tz->trips, tz->ntrips,
+ mask, tz, ops, tzp, tz->passive_delay,
+ tz->polling_delay);
if (IS_ERR(zone)) {
pr_err("Failed to build %pOFn zone %ld\n", child,
PTR_ERR(zone));
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 1c4aac8464a7..3a8d6e747c25 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -416,15 +416,15 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
int indx;
/* This function works only for zones with at least one trip */
- if (tz->trips <= 0)
+ if (tz->num_trips <= 0)
return -EINVAL;
- tz->trip_type_attrs = kcalloc(tz->trips, sizeof(*tz->trip_type_attrs),
+ tz->trip_type_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_type_attrs),
GFP_KERNEL);
if (!tz->trip_type_attrs)
return -ENOMEM;
- tz->trip_temp_attrs = kcalloc(tz->trips, sizeof(*tz->trip_temp_attrs),
+ tz->trip_temp_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_temp_attrs),
GFP_KERNEL);
if (!tz->trip_temp_attrs) {
kfree(tz->trip_type_attrs);
@@ -432,7 +432,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
}
if (tz->ops->get_trip_hyst) {
- tz->trip_hyst_attrs = kcalloc(tz->trips,
+ tz->trip_hyst_attrs = kcalloc(tz->num_trips,
sizeof(*tz->trip_hyst_attrs),
GFP_KERNEL);
if (!tz->trip_hyst_attrs) {
@@ -442,7 +442,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
}
}
- attrs = kcalloc(tz->trips * 3 + 1, sizeof(*attrs), GFP_KERNEL);
+ attrs = kcalloc(tz->num_trips * 3 + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs) {
kfree(tz->trip_type_attrs);
kfree(tz->trip_temp_attrs);
@@ -451,7 +451,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
return -ENOMEM;
}
- for (indx = 0; indx < tz->trips; indx++) {
+ for (indx = 0; indx < tz->num_trips; indx++) {
/* create trip type attribute */
snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH,
"trip_point_%d_type", indx);
@@ -478,7 +478,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
tz->trip_temp_attrs[indx].attr.store =
trip_point_temp_store;
}
- attrs[indx + tz->trips] = &tz->trip_temp_attrs[indx].attr.attr;
+ attrs[indx + tz->num_trips] = &tz->trip_temp_attrs[indx].attr.attr;
/* create Optional trip hyst attribute */
if (!tz->ops->get_trip_hyst)
@@ -496,10 +496,10 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
tz->trip_hyst_attrs[indx].attr.store =
trip_point_hyst_store;
}
- attrs[indx + tz->trips * 2] =
+ attrs[indx + tz->num_trips * 2] =
&tz->trip_hyst_attrs[indx].attr.attr;
}
- attrs[tz->trips * 3] = NULL;
+ attrs[tz->num_trips * 3] = NULL;
tz->trips_attribute_group.attrs = attrs;
@@ -540,7 +540,7 @@ int thermal_zone_create_device_groups(struct thermal_zone_device *tz,
for (i = 0; i < size - 2; i++)
groups[i] = thermal_zone_attribute_groups[i];
- if (tz->trips) {
+ if (tz->num_trips) {
result = create_trip_attrs(tz, mask);
if (result) {
kfree(groups);
@@ -561,7 +561,7 @@ void thermal_zone_destroy_device_groups(struct thermal_zone_device *tz)
if (!tz)
return;
- if (tz->trips)
+ if (tz->num_trips)
destroy_trip_attrs(tz);
kfree(tz->device.groups);
@@ -813,12 +813,13 @@ static const struct attribute_group cooling_device_stats_attr_group = {
static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
{
+ const struct attribute_group *stats_attr_group = NULL;
struct cooling_dev_stats *stats;
unsigned long states;
int var;
if (cdev->ops->get_max_state(cdev, &states))
- return;
+ goto out;
states++; /* Total number of states is highest state + 1 */
@@ -828,7 +829,7 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
stats = kzalloc(var, GFP_KERNEL);
if (!stats)
- return;
+ goto out;
stats->time_in_state = (ktime_t *)(stats + 1);
stats->trans_table = (unsigned int *)(stats->time_in_state + states);
@@ -838,9 +839,12 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
spin_lock_init(&stats->lock);
+ stats_attr_group = &cooling_device_stats_attr_group;
+
+out:
/* Fill the empty slot left in cooling_device_attr_groups */
var = ARRAY_SIZE(cooling_device_attr_groups) - 2;
- cooling_device_attr_groups[var] = &cooling_device_stats_attr_group;
+ cooling_device_attr_groups[var] = stats_attr_group;
}
static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index ea0603b59309..67050a1a5b07 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -226,7 +226,7 @@ static irqreturn_t ti_bandgap_talert_irq_handler(int irq, void *data)
/*
* One TALERT interrupt: Two sources
* If the interrupt is due to t_hot then mask t_hot and
- * and unmask t_cold else mask t_cold and unmask t_hot
+ * unmask t_cold else mask t_cold and unmask t_hot
*/
if (t_hot) {
ctrl &= ~tsr->mask_hot_mask;
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index 4bfec8a28064..e76a6c173637 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -28,8 +28,10 @@ config USB4_DEBUGFS_WRITE
this for production systems or distro kernels.
config USB4_KUNIT_TEST
- bool "KUnit tests"
- depends on KUNIT=y
+ bool "KUnit tests" if !KUNIT_ALL_TESTS
+ depends on (USB4=m || KUNIT=y)
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
config USB4_DMA_TEST
tristate "DMA traffic test driver"
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index c89daac0ad8c..b1f0dc8df47c 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -301,37 +301,22 @@ static bool tb_acpi_bus_match(struct device *dev)
return tb_is_switch(dev) || tb_is_usb4_port_device(dev);
}
-static struct acpi_device *tb_acpi_find_port(struct acpi_device *adev,
- const struct tb_port *port)
+static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw)
{
- struct acpi_device *port_adev;
-
- if (!adev)
- return NULL;
+ struct acpi_device *adev = NULL;
+ struct tb_switch *parent_sw;
/*
* Device routers exists under the downstream facing USB4 port
* of the parent router. Their _ADR is always 0.
*/
- list_for_each_entry(port_adev, &adev->children, node) {
- if (acpi_device_adr(port_adev) == port->port)
- return port_adev;
- }
-
- return NULL;
-}
-
-static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw)
-{
- struct acpi_device *adev = NULL;
- struct tb_switch *parent_sw;
-
parent_sw = tb_switch_parent(sw);
if (parent_sw) {
struct tb_port *port = tb_port_at(tb_route(sw), parent_sw);
struct acpi_device *port_adev;
- port_adev = tb_acpi_find_port(ACPI_COMPANION(&parent_sw->dev), port);
+ port_adev = acpi_find_child_by_adr(ACPI_COMPANION(&parent_sw->dev),
+ port->port);
if (port_adev)
adev = acpi_find_child_device(port_adev, 0, false);
} else {
@@ -364,8 +349,8 @@ static struct acpi_device *tb_acpi_find_companion(struct device *dev)
if (tb_is_switch(dev))
return tb_acpi_switch_find_companion(tb_to_switch(dev));
else if (tb_is_usb4_port_device(dev))
- return tb_acpi_find_port(ACPI_COMPANION(dev->parent),
- tb_to_usb4_port_device(dev)->port);
+ return acpi_find_child_by_adr(ACPI_COMPANION(dev->parent),
+ tb_to_usb4_port_device(dev)->port->port);
return NULL;
}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index e92c658dba1c..e5ede5debfb0 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -694,7 +694,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
}
/**
- * tb_cfg_start() - start/resume the control channel
+ * tb_ctl_start() - start/resume the control channel
* @ctl: Control channel to start
*/
void tb_ctl_start(struct tb_ctl *ctl)
@@ -710,7 +710,7 @@ void tb_ctl_start(struct tb_ctl *ctl)
}
/**
- * tb_ctrl_stop() - pause the control channel
+ * tb_ctl_stop() - pause the control channel
* @ctl: Control channel to stop
*
* All invocations of ctl->callback will have finished after this method
@@ -912,7 +912,7 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
}
/**
- * tb_cfg_write() - write from buffer into config space
+ * tb_cfg_write_raw() - write from buffer into config space
* @ctl: Pointer to the control channel
* @buffer: Data to write
* @route: Route string of the router
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index e8c64898dfce..7c7d80f96c0c 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -35,7 +35,7 @@ struct tb_cfg_result {
* If err = 1 then this is the port that send the
* error.
* If err = 0 and if this was a cfg_read/write then
- * this is the the upstream port of the responding
+ * this is the upstream port of the responding
* switch.
* Otherwise the field is set to zero.
*/
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 2889a214dadc..99211f35a5cd 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -872,7 +872,6 @@ int tb_domain_init(void)
{
int ret;
- tb_test_init();
tb_debugfs_init();
tb_acpi_init();
@@ -890,7 +889,6 @@ err_xdomain:
err_acpi:
tb_acpi_exit();
tb_debugfs_exit();
- tb_test_exit();
return ret;
}
@@ -903,5 +901,4 @@ void tb_domain_exit(void)
tb_xdomain_exit();
tb_acpi_exit();
tb_debugfs_exit();
- tb_test_exit();
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index fff0c740c8f3..ae38f0d25a8d 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2516,6 +2516,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
case PCI_DEVICE_ID_INTEL_ADL_NHI0:
case PCI_DEVICE_ID_INTEL_ADL_NHI1:
+ case PCI_DEVICE_ID_INTEL_RPL_NHI0:
+ case PCI_DEVICE_ID_INTEL_RPL_NHI1:
icm->is_supported = icm_tgl_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 1333b158a95e..cb8c9c4ae93a 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1410,6 +1410,10 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 69083aab2736..f09da5b62233 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -80,6 +80,8 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TGL_NHI1 0x9a1d
#define PCI_DEVICE_ID_INTEL_TGL_H_NHI0 0x9a1f
#define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21
+#define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e
+#define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 561e1d77240e..244f8cd38b25 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -3133,9 +3133,13 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
/*
* Actually only needed for Titan Ridge but for simplicity can be
* done for USB4 device too as CLx is re-enabled at resume.
+ * CL0s and CL1 are enabled and supported together.
*/
- if (tb_switch_disable_clx(sw, TB_CL0S))
- tb_sw_warn(sw, "failed to disable CLx on upstream port\n");
+ if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
+ if (tb_switch_disable_clx(sw, TB_CL1))
+ tb_sw_warn(sw, "failed to disable %s on upstream port\n",
+ tb_switch_clx_name(TB_CL1));
+ }
err = tb_plug_events_active(sw, false);
if (err)
@@ -3426,13 +3430,12 @@ static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
}
switch (clx) {
- case TB_CL0S:
- /* CL0s support requires also CL1 support */
+ case TB_CL1:
+ /* CL0s and CL1 are enabled and supported together */
mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
break;
- /* For now we support only CL0s. Not CL1, CL2 */
- case TB_CL1:
+ /* For now we support only CL0s and CL1. Not CL2 */
case TB_CL2:
default:
return false;
@@ -3446,18 +3449,18 @@ static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
return !!(val & mask);
}
-static inline bool tb_port_cl0s_supported(struct tb_port *port)
-{
- return tb_port_clx_supported(port, TB_CL0S);
-}
-
-static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
+static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
{
u32 phy, mask;
int ret;
- /* To enable CL0s also required to enable CL1 */
- mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
+ /* CL0s and CL1 are enabled and supported together */
+ if (clx == TB_CL1)
+ mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
+ else
+ /* For now we support only CL0s and CL1. Not CL2 */
+ return -EOPNOTSUPP;
+
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
@@ -3472,20 +3475,20 @@ static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
port->cap_phy + LANE_ADP_CS_1, 1);
}
-static int tb_port_cl0s_disable(struct tb_port *port)
+static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
{
- return __tb_port_cl0s_set(port, false);
+ return __tb_port_clx_set(port, clx, false);
}
-static int tb_port_cl0s_enable(struct tb_port *port)
+static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
{
- return __tb_port_cl0s_set(port, true);
+ return __tb_port_clx_set(port, clx, true);
}
-static int tb_switch_enable_cl0s(struct tb_switch *sw)
+static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *parent = tb_switch_parent(sw);
- bool up_cl0s_support, down_cl0s_support;
+ bool up_clx_support, down_clx_support;
struct tb_port *up, *down;
int ret;
@@ -3510,37 +3513,37 @@ static int tb_switch_enable_cl0s(struct tb_switch *sw)
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
- up_cl0s_support = tb_port_cl0s_supported(up);
- down_cl0s_support = tb_port_cl0s_supported(down);
+ up_clx_support = tb_port_clx_supported(up, clx);
+ down_clx_support = tb_port_clx_supported(down, clx);
- tb_port_dbg(up, "CL0s %ssupported\n",
- up_cl0s_support ? "" : "not ");
- tb_port_dbg(down, "CL0s %ssupported\n",
- down_cl0s_support ? "" : "not ");
+ tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
+ up_clx_support ? "" : "not ");
+ tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
+ down_clx_support ? "" : "not ");
- if (!up_cl0s_support || !down_cl0s_support)
+ if (!up_clx_support || !down_clx_support)
return -EOPNOTSUPP;
- ret = tb_port_cl0s_enable(up);
+ ret = tb_port_clx_enable(up, clx);
if (ret)
return ret;
- ret = tb_port_cl0s_enable(down);
+ ret = tb_port_clx_enable(down, clx);
if (ret) {
- tb_port_cl0s_disable(up);
+ tb_port_clx_disable(up, clx);
return ret;
}
ret = tb_switch_mask_clx_objections(sw);
if (ret) {
- tb_port_cl0s_disable(up);
- tb_port_cl0s_disable(down);
+ tb_port_clx_disable(up, clx);
+ tb_port_clx_disable(down, clx);
return ret;
}
- sw->clx = TB_CL0S;
+ sw->clx = clx;
- tb_port_dbg(up, "CL0s enabled\n");
+ tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
return 0;
}
@@ -3554,7 +3557,7 @@ static int tb_switch_enable_cl0s(struct tb_switch *sw)
* to improve performance. CLx is enabled only if both sides of the link
* support CLx, and if both sides of the link are not configured as two
* single lane links and only if the link is not inter-domain link. The
- * complete set of conditions is descibed in CM Guide 1.0 section 8.1.
+ * complete set of conditions is described in CM Guide 1.0 section 8.1.
*
* Return: Returns 0 on success or an error code on failure.
*/
@@ -3573,15 +3576,16 @@ int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
return 0;
switch (clx) {
- case TB_CL0S:
- return tb_switch_enable_cl0s(sw);
+ case TB_CL1:
+ /* CL0s and CL1 are enabled and supported together */
+ return __tb_switch_enable_clx(sw, clx);
default:
return -EOPNOTSUPP;
}
}
-static int tb_switch_disable_cl0s(struct tb_switch *sw)
+static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *up, *down;
@@ -3603,17 +3607,17 @@ static int tb_switch_disable_cl0s(struct tb_switch *sw)
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
- ret = tb_port_cl0s_disable(up);
+ ret = tb_port_clx_disable(up, clx);
if (ret)
return ret;
- ret = tb_port_cl0s_disable(down);
+ ret = tb_port_clx_disable(down, clx);
if (ret)
return ret;
sw->clx = TB_CLX_DISABLE;
- tb_port_dbg(up, "CL0s disabled\n");
+ tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
return 0;
}
@@ -3630,8 +3634,9 @@ int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
return 0;
switch (clx) {
- case TB_CL0S:
- return tb_switch_disable_cl0s(sw);
+ case TB_CL1:
+ /* CL0s and CL1 are enabled and supported together */
+ return __tb_switch_disable_clx(sw, clx);
default:
return -EOPNOTSUPP;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 9a3214fb5038..9853f6c7e81d 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -118,6 +118,13 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
+ /*
+ * In case of DP tunnel exists, change host router's
+ * 1st children TMU mode to HiFi for CL0s to work.
+ */
+ if (tunnel)
+ tb_switch_enable_tmu_1st_child(tb->root_switch,
+ TB_SWITCH_TMU_RATE_HIFI);
break;
case TB_TYPE_PCIE_DOWN:
@@ -215,7 +222,7 @@ static int tb_enable_tmu(struct tb_switch *sw)
int ret;
/* If it is already enabled in correct mode, don't touch it */
- if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
+ if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
return 0;
ret = tb_switch_tmu_disable(sw);
@@ -575,6 +582,7 @@ static void tb_scan_port(struct tb_port *port)
struct tb_cm *tcm = tb_priv(port->sw->tb);
struct tb_port *upstream_port;
struct tb_switch *sw;
+ int ret;
if (tb_is_upstream_port(port))
return;
@@ -663,11 +671,24 @@ static void tb_scan_port(struct tb_port *port)
tb_switch_lane_bonding_enable(sw);
/* Set the link configured */
tb_switch_configure_link(sw);
- if (tb_switch_enable_clx(sw, TB_CL0S))
- tb_sw_warn(sw, "failed to enable CLx on upstream port\n");
+ /*
+ * CL0s and CL1 are enabled and supported together.
+ * Silently ignore CLx enabling in case CLx is not supported.
+ */
+ ret = tb_switch_enable_clx(sw, TB_CL1);
+ if (ret && ret != -EOPNOTSUPP)
+ tb_sw_warn(sw, "failed to enable %s on upstream port\n",
+ tb_switch_clx_name(TB_CL1));
- tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
- tb_switch_is_clx_enabled(sw));
+ if (tb_switch_is_clx_enabled(sw, TB_CL1))
+ /*
+ * To support highest CLx state, we set router's TMU to
+ * Normal-Uni mode.
+ */
+ tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
+ else
+ /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
+ tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to enable TMU\n");
@@ -965,6 +986,12 @@ static void tb_tunnel_dp(struct tb *tb)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
tb_reclaim_usb3_bandwidth(tb, in, out);
+ /*
+ * In case of DP tunnel exists, change host router's 1st children
+ * TMU mode to HiFi for CL0s to work.
+ */
+ tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
+
return;
err_free:
@@ -1407,7 +1434,12 @@ static int tb_start(struct tb *tb)
return ret;
}
- tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false);
+ /*
+ * To support highest CLx state, we set host router's TMU to
+ * Normal mode.
+ */
+ tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
+ false);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
@@ -1446,19 +1478,31 @@ static int tb_suspend_noirq(struct tb *tb)
static void tb_restore_children(struct tb_switch *sw)
{
struct tb_port *port;
+ int ret;
/* No need to restore if the router is already unplugged */
if (sw->is_unplugged)
return;
- if (tb_switch_enable_clx(sw, TB_CL0S))
- tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n");
-
/*
- * tb_switch_tmu_configure() was already called when the switch was
- * added before entering system sleep or runtime suspend,
- * so no need to call it again before enabling TMU.
+ * CL0s and CL1 are enabled and supported together.
+ * Silently ignore CLx re-enabling in case CLx is not supported.
*/
+ ret = tb_switch_enable_clx(sw, TB_CL1);
+ if (ret && ret != -EOPNOTSUPP)
+ tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
+ tb_switch_clx_name(TB_CL1));
+
+ if (tb_switch_is_clx_enabled(sw, TB_CL1))
+ /*
+ * To support highest CLx state, we set router's TMU to
+ * Normal-Uni mode.
+ */
+ tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
+ else
+ /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
+ tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
+
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to restore TMU configuration\n");
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 4602c69913fa..5db76de40cc1 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/thunderbolt.h>
#include <linux/uuid.h>
+#include <linux/bitfield.h>
#include "tb_regs.h"
#include "ctl.h"
@@ -111,7 +112,7 @@ struct tb_switch_tmu {
enum tb_clx {
TB_CLX_DISABLE,
- TB_CL0S,
+ /* CL0s and CL1 are enabled and supported together */
TB_CL1,
TB_CL2,
};
@@ -933,46 +934,49 @@ int tb_switch_tmu_enable(struct tb_switch *sw);
void tb_switch_tmu_configure(struct tb_switch *sw,
enum tb_switch_tmu_rate rate,
bool unidirectional);
+void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
+ enum tb_switch_tmu_rate rate);
/**
- * tb_switch_tmu_hifi_is_enabled() - Checks if the specified TMU mode is enabled
+ * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
* @sw: Router whose TMU mode to check
* @unidirectional: If uni-directional (bi-directional otherwise)
*
* Return true if hardware TMU configuration matches the one passed in
- * as parameter. That is HiFi and either uni-directional or bi-directional.
+ * as parameter. That is HiFi/Normal and either uni-directional or bi-directional.
*/
-static inline bool tb_switch_tmu_hifi_is_enabled(const struct tb_switch *sw,
- bool unidirectional)
+static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw,
+ bool unidirectional)
{
- return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
+ return sw->tmu.rate == sw->tmu.rate_request &&
sw->tmu.unidirectional == unidirectional;
}
+static inline const char *tb_switch_clx_name(enum tb_clx clx)
+{
+ switch (clx) {
+ /* CL0s and CL1 are enabled and supported together */
+ case TB_CL1:
+ return "CL0s/CL1";
+ default:
+ return "unknown";
+ }
+}
+
int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx);
int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx);
/**
* tb_switch_is_clx_enabled() - Checks if the CLx is enabled
- * @sw: Router to check the CLx state for
- *
- * Checks if the CLx is enabled on the router upstream link.
- * Not applicable for a host router.
- */
-static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw)
-{
- return sw->clx != TB_CLX_DISABLE;
-}
-
-/**
- * tb_switch_is_cl0s_enabled() - Checks if the CL0s is enabled
- * @sw: Router to check for the CL0s
+ * @sw: Router to check for the CLx
+ * @clx: The CLx state to check for
*
- * Checks if the CL0s is enabled on the router upstream link.
+ * Checks if the specified CLx is enabled on the router upstream link.
* Not applicable for a host router.
*/
-static inline bool tb_switch_is_cl0s_enabled(const struct tb_switch *sw)
+static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
+ enum tb_clx clx)
{
- return sw->clx == TB_CL0S;
+ return sw->clx == clx;
}
/**
@@ -1271,12 +1275,4 @@ static inline void tb_service_debugfs_init(struct tb_service *svc) { }
static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
#endif
-#ifdef CONFIG_USB4_KUNIT_TEST
-int tb_test_init(void);
-void tb_test_exit(void);
-#else
-static inline int tb_test_init(void) { return 0; }
-static inline void tb_test_exit(void) { }
-#endif
-
#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6a16f61a72a1..166054110388 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -234,6 +234,7 @@ enum usb4_switch_op {
/* Router TMU configuration */
#define TMU_RTR_CS_0 0x00
+#define TMU_RTR_CS_0_FREQ_WIND_MASK GENMASK(26, 16)
#define TMU_RTR_CS_0_TD BIT(27)
#define TMU_RTR_CS_0_UCAP BIT(30)
#define TMU_RTR_CS_1 0x01
@@ -244,6 +245,11 @@ enum usb4_switch_op {
#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
+#define TMU_RTR_CS_15 0xf
+#define TMU_RTR_CS_15_FREQ_AVG_MASK GENMASK(5, 0)
+#define TMU_RTR_CS_15_DELAY_AVG_MASK GENMASK(11, 6)
+#define TMU_RTR_CS_15_OFFSET_AVG_MASK GENMASK(17, 12)
+#define TMU_RTR_CS_15_ERROR_AVG_MASK GENMASK(23, 18)
#define TMU_RTR_CS_22 0x16
#define TMU_RTR_CS_24 0x18
#define TMU_RTR_CS_25 0x19
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index ee37f8b58f50..24c06e7354cd 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -2817,14 +2817,4 @@ static struct kunit_suite tb_test_suite = {
.test_cases = tb_test_cases,
};
-static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
-
-int tb_test_init(void)
-{
- return __kunit_test_suites_init(tb_test_suites);
-}
-
-void tb_test_exit(void)
-{
- return __kunit_test_suites_exit(tb_test_suites);
-}
+kunit_test_suite(tb_test_suite);
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
index e4a07a26f693..626aca3124b1 100644
--- a/drivers/thunderbolt/tmu.c
+++ b/drivers/thunderbolt/tmu.c
@@ -11,6 +11,55 @@
#include "tb.h"
+static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
+ enum tb_switch_tmu_rate rate)
+{
+ u32 freq_meas_wind[2] = { 30, 800 };
+ u32 avg_const[2] = { 4, 8 };
+ u32 freq, avg, val;
+ int ret;
+
+ if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
+ freq = freq_meas_wind[0];
+ avg = avg_const[0];
+ } else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
+ freq = freq_meas_wind[1];
+ avg = avg_const[1];
+ } else {
+ return 0;
+ }
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return ret;
+
+ val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
+ val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_15, 1);
+ if (ret)
+ return ret;
+
+ val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
+ ~TMU_RTR_CS_15_DELAY_AVG_MASK &
+ ~TMU_RTR_CS_15_OFFSET_AVG_MASK &
+ ~TMU_RTR_CS_15_ERROR_AVG_MASK;
+ val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
+ FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
+ FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
+ FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_15, 1);
+}
+
static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
{
bool root_switch = !tb_route(sw);
@@ -348,7 +397,7 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
if (tb_route(sw)) {
- bool unidirectional = tb_switch_tmu_hifi_is_enabled(sw, true);
+ bool unidirectional = sw->tmu.unidirectional;
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *down, *up;
int ret;
@@ -359,13 +408,14 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
* In case of uni-directional time sync, TMU handshake is
* initiated by upstream router. In case of bi-directional
* time sync, TMU handshake is initiated by downstream router.
- * Therefore, we change the rate to off in the respective
- * router.
+ * We change downstream router's rate to off for both uni/bidir
+ * cases although it is needed only for the bi-directional mode.
+ * We avoid changing upstream router's mode since it might
+ * have another downstream router plugged, that is set to
+ * uni-directional mode and we don't want to change it's TMU
+ * mode.
*/
- if (unidirectional)
- tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
- else
- tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
tb_port_tmu_time_sync_disable(up);
ret = tb_port_tmu_time_sync_disable(down);
@@ -411,6 +461,7 @@ static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
else
tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+ tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
tb_port_tmu_unidirectional_disable(down);
tb_port_tmu_unidirectional_disable(up);
}
@@ -492,7 +543,11 @@ static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
- ret = tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_HIFI);
+ ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
if (ret)
return ret;
@@ -519,7 +574,83 @@ out:
return ret;
}
-static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
+static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *down, *up;
+
+ down = tb_port_at(tb_route(sw), parent);
+ up = tb_upstream_port(sw);
+ /*
+ * In case of any failure in one of the steps when change mode,
+ * get back to the TMU configurations in previous mode.
+ * In case of additional failures in the functions below,
+ * ignore them since the caller shall already report a failure.
+ */
+ tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
+ if (sw->tmu.unidirectional_request)
+ tb_switch_tmu_rate_write(parent, sw->tmu.rate);
+ else
+ tb_switch_tmu_rate_write(sw, sw->tmu.rate);
+
+ tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
+ tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
+}
+
+static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+ int ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+ ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
+ if (ret)
+ goto out;
+
+ if (sw->tmu.unidirectional_request)
+ ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
+ else
+ ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
+ if (ret)
+ return ret;
+
+ ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(down);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(up);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ __tb_switch_tmu_change_mode_prev(sw);
+ return ret;
+}
+
+/**
+ * tb_switch_tmu_enable() - Enable TMU on a router
+ * @sw: Router whose TMU to enable
+ *
+ * Enables TMU of a router to be in uni-directional Normal/HiFi
+ * or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required
+ * before calling this function, to select the mode Normal/HiFi and
+ * directionality (uni-directional/bi-directional).
+ * In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't
+ * work. Uni-directional mode is required for CLx (Link Low-Power) to work.
+ */
+int tb_switch_tmu_enable(struct tb_switch *sw)
{
bool unidirectional = sw->tmu.unidirectional_request;
int ret;
@@ -535,12 +666,15 @@ static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
if (!tb_switch_is_clx_supported(sw))
return 0;
- if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
+ if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
return 0;
if (tb_switch_is_titan_ridge(sw) && unidirectional) {
- /* Titan Ridge supports only CL0s */
- if (!tb_switch_is_cl0s_enabled(sw))
+ /*
+ * Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are
+ * enabled and supported together.
+ */
+ if (!tb_switch_is_clx_enabled(sw, TB_CL1))
return -EOPNOTSUPP;
ret = tb_switch_tmu_objection_mask(sw);
@@ -557,7 +691,11 @@ static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
return ret;
if (tb_route(sw)) {
- /* The used mode changes are from OFF to HiFi-Uni/HiFi-BiDir */
+ /*
+ * The used mode changes are from OFF to
+ * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
+ * HiFi-Uni.
+ */
if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
if (unidirectional)
ret = __tb_switch_tmu_enable_unidirectional(sw);
@@ -565,6 +703,10 @@ static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
ret = __tb_switch_tmu_enable_bidirectional(sw);
if (ret)
return ret;
+ } else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
+ ret = __tb_switch_tmu_change_mode(sw);
+ if (ret)
+ return ret;
}
sw->tmu.unidirectional = unidirectional;
} else {
@@ -574,39 +716,21 @@ static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
* of the child node - see above.
* Here only the host router' rate configuration is written.
*/
- ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
if (ret)
return ret;
}
- sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
+ sw->tmu.rate = sw->tmu.rate_request;
tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
return tb_switch_tmu_set_time_disruption(sw, false);
}
/**
- * tb_switch_tmu_enable() - Enable TMU on a router
- * @sw: Router whose TMU to enable
- *
- * Enables TMU of a router to be in uni-directional or bi-directional HiFi mode.
- * Calling tb_switch_tmu_configure() is required before calling this function,
- * to select the mode HiFi and directionality (uni-directional/bi-directional).
- * In both modes all tunneling should work. Uni-directional mode is required for
- * CLx (Link Low-Power) to work.
- */
-int tb_switch_tmu_enable(struct tb_switch *sw)
-{
- if (sw->tmu.rate_request == TB_SWITCH_TMU_RATE_NORMAL)
- return -EOPNOTSUPP;
-
- return tb_switch_tmu_hifi_enable(sw);
-}
-
-/**
* tb_switch_tmu_configure() - Configure the TMU rate and directionality
* @sw: Router whose mode to change
- * @rate: Rate to configure Off/LowRes/HiFi
+ * @rate: Rate to configure Off/Normal/HiFi
* @unidirectional: If uni-directional (bi-directional otherwise)
*
* Selects the rate of the TMU and directionality (uni-directional or
@@ -618,3 +742,32 @@ void tb_switch_tmu_configure(struct tb_switch *sw,
sw->tmu.unidirectional_request = unidirectional;
sw->tmu.rate_request = rate;
}
+
+static int tb_switch_tmu_config_enable(struct device *dev, void *rate)
+{
+ if (tb_is_switch(dev)) {
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate,
+ tb_switch_is_clx_enabled(sw, TB_CL1));
+ if (tb_switch_tmu_enable(sw))
+ tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n");
+ }
+
+ return 0;
+}
+
+/**
+ * tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren
+ * @sw: The router to configure and enable it's children TMU
+ * @rate: Rate of the TMU to configure the router's chidren to
+ *
+ * Configures and enables the TMU mode of 1st depth children of the specified
+ * router to the specified rate.
+ */
+void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
+ enum tb_switch_tmu_rate rate)
+{
+ device_for_each_child(&sw->dev, &rate,
+ tb_switch_tmu_config_enable);
+}
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index afb2d373dd47..81e7f64c1739 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -12,7 +12,7 @@
* (non hardware specific) changes to serial.c.
*
* The port is registered with the tty driver as minor device 64, and
- * therefore other ports should should only use 65 upwards.
+ * therefore other ports should only use 65 upwards.
*
* Richard Lucock 28/12/99
*
@@ -51,6 +51,7 @@
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
+#include <linux/serial_core.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
@@ -283,12 +284,12 @@ static void transmit_chars(struct serial_state *info)
amiga_custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100;
mb();
- info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1);
+ info->xmit.tail = info->xmit.tail & (UART_XMIT_SIZE - 1);
info->icount.tx++;
if (CIRC_CNT(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
+ UART_XMIT_SIZE) < WAKEUP_CHARS)
tty_wakeup(info->tport.tty);
#ifdef SERIAL_DEBUG_INTR
@@ -708,13 +709,13 @@ static int rs_put_char(struct tty_struct *tty, unsigned char ch)
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE) == 0) {
+ UART_XMIT_SIZE) == 0) {
local_irq_restore(flags);
return 0;
}
info->xmit.buf[info->xmit.head++] = ch;
- info->xmit.head &= SERIAL_XMIT_SIZE-1;
+ info->xmit.head &= UART_XMIT_SIZE - 1;
local_irq_restore(flags);
return 1;
}
@@ -753,15 +754,14 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE);
+ UART_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0) {
break;
}
memcpy(info->xmit.buf + info->xmit.head, buf, c);
- info->xmit.head = ((info->xmit.head + c) &
- (SERIAL_XMIT_SIZE-1));
+ info->xmit.head = (info->xmit.head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
@@ -788,14 +788,14 @@ static unsigned int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+ return CIRC_SPACE(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static unsigned int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+ return CIRC_CNT(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static void rs_flush_buffer(struct tty_struct *tty)
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 31dceb5039b5..e81701a66429 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -916,7 +916,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
/* Make each port's xmit FIFO big enough to fill FDC TX FIFO */
- priv->xmit_size = min(tx_fifo * 4, (unsigned int)SERIAL_XMIT_SIZE);
+ priv->xmit_size = min(tx_fifo * 4, (unsigned int)UART_XMIT_SIZE);
driver = tty_alloc_driver(NUM_TTY_CHANNELS, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
@@ -1222,7 +1222,7 @@ static void kgdbfdc_push_one(void)
/* Construct a word from any data in buffer */
word = mips_ejtag_fdc_encode(bufs, &kgdbfdc_wbuflen, 1);
- /* Relocate any remaining data to beginnning of buffer */
+ /* Relocate any remaining data to beginning of buffer */
kgdbfdc_wbuflen -= word.bytes;
for (i = 0; i < kgdbfdc_wbuflen; ++i)
kgdbfdc_wbuf[i] = kgdbfdc_wbuf[i + word.bytes];
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fd4d24f61c46..caa5c14ed57f 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -5,6 +5,14 @@
*
* * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
*
+ * Outgoing path:
+ * tty -> DLCI fifo -> scheduler -> GSM MUX data queue ---o-> ldisc
+ * control message -> GSM MUX control queue --´
+ *
+ * Incoming path:
+ * ldisc -> gsm_queue() -o--> tty
+ * `-> gsm_control_response()
+ *
* TO DO:
* Mostly done: ioctls for setting modes/timing
* Partly done: hooks so you can pull off frames to non tty devs
@@ -210,6 +218,9 @@ struct gsm_mux {
/* Events on the GSM channel */
wait_queue_head_t event;
+ /* ldisc send work */
+ struct work_struct tx_work;
+
/* Bits for GSM mode decoding */
/* Framing Layer */
@@ -235,14 +246,17 @@ struct gsm_mux {
struct gsm_dlci *dlci[NUM_DLCI];
int old_c_iflag; /* termios c_iflag value before attach */
bool constipated; /* Asked by remote to shut up */
+ bool has_devices; /* Devices were registered */
spinlock_t tx_lock;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
- struct list_head tx_list; /* Pending data packets */
+ struct list_head tx_ctrl_list; /* Pending control packets */
+ struct list_head tx_data_list; /* Pending data packets */
/* Control messages */
+ struct timer_list kick_timer; /* Kick TX queuing on timeout */
struct timer_list t2_timer; /* Retransmit timer for commands */
int cretries; /* Command retry counter */
struct gsm_control *pending_cmd;/* Our current pending command */
@@ -369,6 +383,11 @@ static const u8 gsm_fcs8[256] = {
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len);
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk);
+static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ u8 ctrl);
+static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg);
+static void gsmld_write_trigger(struct gsm_mux *gsm);
+static void gsmld_write_task(struct work_struct *work);
/**
* gsm_fcs_add - update FCS
@@ -420,6 +439,27 @@ static int gsm_read_ea(unsigned int *val, u8 c)
}
/**
+ * gsm_read_ea_val - read a value until EA
+ * @val: variable holding value
+ * @data: buffer of data
+ * @dlen: length of data
+ *
+ * Processes an EA value. Updates the passed variable and
+ * returns the processed data length.
+ */
+static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen)
+{
+ unsigned int len = 0;
+
+ for (; dlen > 0; dlen--) {
+ len++;
+ if (gsm_read_ea(val, *data++))
+ break;
+ }
+ return len;
+}
+
+/**
* gsm_encode_modem - encode modem data bits
* @dlci: DLCI to encode from
*
@@ -464,6 +504,68 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
}
/**
+ * gsm_register_devices - register all tty devices for a given mux index
+ *
+ * @driver: the tty driver that describes the tty devices
+ * @index: the mux number is used to calculate the minor numbers of the
+ * ttys for this mux and may differ from the position in the
+ * mux array.
+ */
+static int gsm_register_devices(struct tty_driver *driver, unsigned int index)
+{
+ struct device *dev;
+ int i;
+ unsigned int base;
+
+ if (!driver || index >= MAX_MUX)
+ return -EINVAL;
+
+ base = index * NUM_DLCI; /* first minor for this index */
+ for (i = 1; i < NUM_DLCI; i++) {
+ /* Don't register device 0 - this is the control channel
+ * and not a usable tty interface
+ */
+ dev = tty_register_device(gsm_tty_driver, base + i, NULL);
+ if (IS_ERR(dev)) {
+ if (debug & 8)
+ pr_info("%s failed to register device minor %u",
+ __func__, base + i);
+ for (i--; i >= 1; i--)
+ tty_unregister_device(gsm_tty_driver, base + i);
+ return PTR_ERR(dev);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * gsm_unregister_devices - unregister all tty devices for a given mux index
+ *
+ * @driver: the tty driver that describes the tty devices
+ * @index: the mux number is used to calculate the minor numbers of the
+ * ttys for this mux and may differ from the position in the
+ * mux array.
+ */
+static void gsm_unregister_devices(struct tty_driver *driver,
+ unsigned int index)
+{
+ int i;
+ unsigned int base;
+
+ if (!driver || index >= MAX_MUX)
+ return;
+
+ base = index * NUM_DLCI; /* first minor for this index */
+ for (i = 1; i < NUM_DLCI; i++) {
+ /* Don't unregister device 0 - this is the control
+ * channel and not a usable tty interface
+ */
+ tty_unregister_device(gsm_tty_driver, base + i);
+ }
+}
+
+/**
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
* @addr: address EA from the frame
@@ -570,57 +672,73 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
* @cr: command/response bit seen as initiator
* @control: control byte including PF bit
*
- * Format up and transmit a control frame. These do not go via the
- * queueing logic as they should be transmitted ahead of data when
- * they are needed.
- *
- * FIXME: Lock versus data TX path
+ * Format up and transmit a control frame. These should be transmitted
+ * ahead of data when they are needed.
*/
-
-static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
+static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
{
- int len;
- u8 cbuf[10];
- u8 ibuf[3];
+ struct gsm_msg *msg;
+ u8 *dp;
int ocr;
+ unsigned long flags;
+
+ msg = gsm_data_alloc(gsm, addr, 0, control);
+ if (!msg)
+ return -ENOMEM;
/* toggle C/R coding if not initiator */
ocr = cr ^ (gsm->initiator ? 0 : 1);
- switch (gsm->encoding) {
- case 0:
- cbuf[0] = GSM0_SOF;
- cbuf[1] = (addr << 2) | (ocr << 1) | EA;
- cbuf[2] = control;
- cbuf[3] = EA; /* Length of data = 0 */
- cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
- cbuf[5] = GSM0_SOF;
- len = 6;
- break;
- case 1:
- case 2:
- /* Control frame + packing (but not frame stuffing) in mode 1 */
- ibuf[0] = (addr << 2) | (ocr << 1) | EA;
- ibuf[1] = control;
- ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
- /* Stuffing may double the size worst case */
- len = gsm_stuff_frame(ibuf, cbuf + 1, 3);
- /* Now add the SOF markers */
- cbuf[0] = GSM1_SOF;
- cbuf[len + 1] = GSM1_SOF;
- /* FIXME: we can omit the lead one in many cases */
- len += 2;
- break;
- default:
- WARN_ON(1);
- return;
- }
- gsmld_output(gsm, cbuf, len);
- if (!gsm->initiator) {
- cr = cr & gsm->initiator;
- control = control & ~PF;
+ msg->data -= 3;
+ dp = msg->data;
+ *dp++ = (addr << 2) | (ocr << 1) | EA;
+ *dp++ = control;
+
+ if (gsm->encoding == 0)
+ *dp++ = EA; /* Length of data = 0 */
+
+ *dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data);
+ msg->len = (dp - msg->data) + 1;
+
+ gsm_print_packet("Q->", addr, cr, control, NULL, 0);
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ list_add_tail(&msg->list, &gsm->tx_ctrl_list);
+ gsm->tx_bytes += msg->len;
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
+
+ return 0;
+}
+
+/**
+ * gsm_dlci_clear_queues - remove outstanding data for a DLCI
+ * @gsm: mux
+ * @dlci: clear for this DLCI
+ *
+ * Clears the data queues for a given DLCI.
+ */
+static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg, *nmsg;
+ int addr = dlci->addr;
+ unsigned long flags;
+
+ /* Clear DLCI write fifo first */
+ spin_lock_irqsave(&dlci->lock, flags);
+ kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
+
+ /* Clear data packets in MUX write queue */
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
+ if (msg->addr != addr)
+ continue;
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
}
- gsm_print_packet("-->", addr, cr, control, NULL, 0);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
}
/**
@@ -683,59 +801,151 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
}
/**
- * gsm_data_kick - poke the queue
+ * gsm_send_packet - sends a single packet
* @gsm: GSM Mux
- * @dlci: DLCI sending the data
+ * @msg: packet to send
*
- * The tty device has called us to indicate that room has appeared in
- * the transmit queue. Ram more data into the pipe if we have any
- * If we have been flow-stopped by a CMD_FCOFF, then we can only
- * send messages on DLCI0 until CMD_FCON
+ * The given packet is encoded and sent out. No memory is freed.
+ * The caller must hold the gsm tx lock.
+ */
+static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
+{
+ int len, ret;
+
+
+ if (gsm->encoding == 0) {
+ gsm->txframe[0] = GSM0_SOF;
+ memcpy(gsm->txframe + 1, msg->data, msg->len);
+ gsm->txframe[msg->len + 1] = GSM0_SOF;
+ len = msg->len + 2;
+ } else {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data, gsm->txframe + 1, msg->len);
+ gsm->txframe[len + 1] = GSM1_SOF;
+ len += 2;
+ }
+
+ if (debug & 4)
+ gsm_hex_dump_bytes(__func__, gsm->txframe, len);
+ gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data,
+ msg->len);
+
+ ret = gsmld_output(gsm, gsm->txframe, len);
+ if (ret <= 0)
+ return ret;
+ /* FIXME: Can eliminate one SOF in many more cases */
+ gsm->tx_bytes -= msg->len;
+
+ return 0;
+}
+
+/**
+ * gsm_is_flow_ctrl_msg - checks if flow control message
+ * @msg: message to check
*
- * FIXME: lock against link layer control transmissions
+ * Returns true if the given message is a flow control command of the
+ * control channel. False is returned in any other case.
*/
+static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg)
+{
+ unsigned int cmd;
+
+ if (msg->addr > 0)
+ return false;
+
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ cmd = 0;
+ if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1)
+ break;
+ switch (cmd & ~PF) {
+ case CMD_FCOFF:
+ case CMD_FCON:
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
-static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+/**
+ * gsm_data_kick - poke the queue
+ * @gsm: GSM Mux
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any.
+ * If we have been flow-stopped by a CMD_FCOFF, then we can only
+ * send messages on DLCI0 until CMD_FCON. The caller must hold
+ * the gsm tx lock.
+ */
+static int gsm_data_kick(struct gsm_mux *gsm)
{
struct gsm_msg *msg, *nmsg;
- int len;
+ struct gsm_dlci *dlci;
+ int ret;
- list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
- if (gsm->constipated && msg->addr)
- continue;
- if (gsm->encoding != 0) {
- gsm->txframe[0] = GSM1_SOF;
- len = gsm_stuff_frame(msg->data,
- gsm->txframe + 1, msg->len);
- gsm->txframe[len + 1] = GSM1_SOF;
- len += 2;
- } else {
- gsm->txframe[0] = GSM0_SOF;
- memcpy(gsm->txframe + 1 , msg->data, msg->len);
- gsm->txframe[msg->len + 1] = GSM0_SOF;
- len = msg->len + 2;
- }
+ clear_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
- if (debug & 4)
- gsm_hex_dump_bytes(__func__, gsm->txframe, len);
- if (gsmld_output(gsm, gsm->txframe, len) <= 0)
+ /* Serialize control messages and control channel messages first */
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_ctrl_list, list) {
+ if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg))
+ continue;
+ ret = gsm_send_packet(gsm, msg);
+ switch (ret) {
+ case -ENOSPC:
+ return -ENOSPC;
+ case -ENODEV:
+ /* ldisc not open */
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ default:
+ if (ret >= 0) {
+ list_del(&msg->list);
+ kfree(msg);
+ }
break;
- /* FIXME: Can eliminate one SOF in many more cases */
- gsm->tx_bytes -= msg->len;
-
- list_del(&msg->list);
- kfree(msg);
+ }
+ }
- if (dlci) {
- tty_port_tty_wakeup(&dlci->port);
- } else {
- int i = 0;
+ if (gsm->constipated)
+ return -EAGAIN;
- for (i = 0; i < NUM_DLCI; i++)
- if (gsm->dlci[i])
- tty_port_tty_wakeup(&gsm->dlci[i]->port);
+ /* Serialize other channels */
+ if (list_empty(&gsm->tx_data_list))
+ return 0;
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
+ dlci = gsm->dlci[msg->addr];
+ /* Send only messages for DLCIs with valid state */
+ if (dlci->state != DLCI_OPEN) {
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ }
+ ret = gsm_send_packet(gsm, msg);
+ switch (ret) {
+ case -ENOSPC:
+ return -ENOSPC;
+ case -ENODEV:
+ /* ldisc not open */
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ default:
+ if (ret >= 0) {
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ break;
}
}
+
+ return 1;
}
/**
@@ -784,9 +994,22 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
msg->data = dp;
/* Add to the actual output queue */
- list_add_tail(&msg->list, &gsm->tx_list);
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ if (msg->addr > 0) {
+ list_add_tail(&msg->list, &gsm->tx_data_list);
+ break;
+ }
+ fallthrough;
+ default:
+ list_add_tail(&msg->list, &gsm->tx_ctrl_list);
+ break;
+ }
gsm->tx_bytes += msg->len;
- gsm_data_kick(gsm, dlci);
+
+ gsmld_write_trigger(gsm);
+ mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
}
/**
@@ -823,41 +1046,48 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg;
u8 *dp;
- int len, total_size, size;
- int h = dlci->adaption - 1;
+ int h, len, size;
- total_size = 0;
- while (1) {
- len = kfifo_len(&dlci->fifo);
- if (len == 0)
- return total_size;
-
- /* MTU/MRU count only the data bits */
- if (len > gsm->mtu)
- len = gsm->mtu;
-
- size = len + h;
-
- msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
- if (msg == NULL)
- return -ENOMEM;
- dp = msg->data;
- switch (dlci->adaption) {
- case 1: /* Unstructured */
- break;
- case 2: /* Unstructed with modem bits.
- Always one byte as we never send inline break data */
- *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
- break;
- }
- WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len);
- __gsm_data_queue(dlci, msg);
- total_size += size;
+ /* for modem bits without break data */
+ h = ((dlci->adaption == 1) ? 0 : 1);
+
+ len = kfifo_len(&dlci->fifo);
+ if (len == 0)
+ return 0;
+
+ /* MTU/MRU count only the data bits but watch adaption mode */
+ if ((len + h) > gsm->mtu)
+ len = gsm->mtu - h;
+
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ if (!msg)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructured with modem bits.
+ * Always one byte as we never send inline break data
+ */
+ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
+ break;
+ default:
+ pr_err("%s: unsupported adaption %d\n", __func__,
+ dlci->adaption);
+ break;
}
+
+ WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len,
+ &dlci->lock));
+
+ /* Notify upper layer about available send space. */
+ tty_port_tty_wakeup(&dlci->port);
+
+ __gsm_data_queue(dlci, msg);
/* Bytes of data we used up */
- return total_size;
+ return size;
}
/**
@@ -908,9 +1138,6 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
size = len + overhead;
msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
-
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
if (msg == NULL) {
skb_queue_tail(&dlci->skb_list, dlci->skb);
dlci->skb = NULL;
@@ -1006,32 +1233,43 @@ static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
* renegotiate DLCI priorities with optional stuff. Needs optimising.
*/
-static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
{
- int len;
/* Priority ordering: We should do priority with RR of the groups */
- int i = 1;
-
- while (i < NUM_DLCI) {
- struct gsm_dlci *dlci;
+ int i, len, ret = 0;
+ bool sent;
+ struct gsm_dlci *dlci;
- if (gsm->tx_bytes > TX_THRESH_HI)
- break;
- dlci = gsm->dlci[i];
- if (dlci == NULL || dlci->constipated) {
- i++;
- continue;
+ while (gsm->tx_bytes < TX_THRESH_HI) {
+ for (sent = false, i = 1; i < NUM_DLCI; i++) {
+ dlci = gsm->dlci[i];
+ /* skip unused or blocked channel */
+ if (!dlci || dlci->constipated)
+ continue;
+ /* skip channels with invalid state */
+ if (dlci->state != DLCI_OPEN)
+ continue;
+ /* count the sent data per adaption */
+ if (dlci->adaption < 3 && !dlci->net)
+ len = gsm_dlci_data_output(gsm, dlci);
+ else
+ len = gsm_dlci_data_output_framed(gsm, dlci);
+ /* on error exit */
+ if (len < 0)
+ return ret;
+ if (len > 0) {
+ ret++;
+ sent = true;
+ /* The lower DLCs can starve the higher DLCs! */
+ break;
+ }
+ /* try next */
}
- if (dlci->adaption < 3 && !dlci->net)
- len = gsm_dlci_data_output(gsm, dlci);
- else
- len = gsm_dlci_data_output_framed(gsm, dlci);
- if (len < 0)
+ if (!sent)
break;
- /* DLCI empty - try the next */
- if (len == 0)
- i++;
- }
+ };
+
+ return ret;
}
/**
@@ -1277,7 +1515,6 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
const u8 *data, int clen)
{
u8 buf[1];
- unsigned long flags;
switch (command) {
case CMD_CLD: {
@@ -1299,9 +1536,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
gsm->constipated = false;
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
- spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm, NULL);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
break;
case CMD_FCOFF:
/* Modem wants us to STFU */
@@ -1407,7 +1642,7 @@ static void gsm_control_retransmit(struct timer_list *t)
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
if (ctrl) {
- if (gsm->cretries == 0) {
+ if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) {
gsm->pending_cmd = NULL;
ctrl->error = -ETIMEDOUT;
ctrl->done = 1;
@@ -1504,25 +1739,24 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
- unsigned long flags;
-
del_timer(&dlci->t1);
if (debug & 8)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
+ /* Prevent us from sending data before the link is up again */
+ dlci->constipated = true;
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
- spin_lock_irqsave(&dlci->lock, flags);
- kfifo_reset(&dlci->fifo);
- spin_unlock_irqrestore(&dlci->lock, flags);
+ gsm_dlci_clear_queues(dlci->gsm, dlci);
/* Ensure that gsmtty_open() can return. */
tty_port_set_initialized(&dlci->port, 0);
wake_up_interruptible(&dlci->port.open_wait);
} else
dlci->gsm->dead = true;
- wake_up(&dlci->gsm->event);
/* A DLCI 0 close is a MUX termination so we need to kick that
back to userspace somehow */
+ gsm_dlci_data_kick(dlci);
+ wake_up(&dlci->gsm->event);
}
/**
@@ -1539,11 +1773,13 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
del_timer(&dlci->t1);
/* This will let a tty open continue */
dlci->state = DLCI_OPEN;
+ dlci->constipated = false;
if (debug & 8)
pr_debug("DLCI %d goes open.\n", dlci->addr);
/* Send current modem state */
if (dlci->addr)
gsm_modem_update(dlci, 0);
+ gsm_dlci_data_kick(dlci);
wake_up(&dlci->gsm->event);
}
@@ -1569,8 +1805,8 @@ static void gsm_dlci_t1(struct timer_list *t)
switch (dlci->state) {
case DLCI_OPENING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else if (!dlci->addr && gsm->control == (DM | PF)) {
@@ -1585,8 +1821,8 @@ static void gsm_dlci_t1(struct timer_list *t)
break;
case DLCI_CLOSING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else
@@ -1620,6 +1856,25 @@ static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
}
/**
+ * gsm_dlci_set_opening - change state to opening
+ * @dlci: DLCI to open
+ *
+ * Change internal state to wait for DLCI open from initiator side.
+ * We set off timers and responses upon reception of an SABM.
+ */
+static void gsm_dlci_set_opening(struct gsm_dlci *dlci)
+{
+ switch (dlci->state) {
+ case DLCI_CLOSED:
+ case DLCI_CLOSING:
+ dlci->state = DLCI_OPENING;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
* gsm_dlci_begin_close - start channel open procedure
* @dlci: DLCI to open
*
@@ -1728,6 +1983,30 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
}
}
+/**
+ * gsm_kick_timer - transmit if possible
+ * @t: timer contained in our gsm object
+ *
+ * Transmit data from DLCIs if the queue is empty. We can't rely on
+ * a tty wakeup except when we filled the pipe so we need to fire off
+ * new data ourselves in other cases.
+ */
+static void gsm_kick_timer(struct timer_list *t)
+{
+ struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
+ unsigned long flags;
+ int sent = 0;
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
+ if (gsm->tx_bytes < TX_THRESH_LO)
+ sent = gsm_dlci_data_sweep(gsm);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ if (sent && debug & 4)
+ pr_info("%s TX queue stalled\n", __func__);
+}
+
/*
* Allocate/Free DLCI channels
*/
@@ -1762,10 +2041,13 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
dlci->addr = addr;
dlci->adaption = gsm->adaption;
dlci->state = DLCI_CLOSED;
- if (addr)
+ if (addr) {
dlci->data = gsm_dlci_data;
- else
+ /* Prevent us from sending data before the link is up */
+ dlci->constipated = true;
+ } else {
dlci->data = gsm_dlci_command;
+ }
gsm->dlci[addr] = dlci;
return dlci;
}
@@ -1925,7 +2207,7 @@ static void gsm_queue(struct gsm_mux *gsm)
case UIH:
case UIH|PF:
if (dlci == NULL || dlci->state != DLCI_OPEN) {
- gsm_command(gsm, address, DM|PF);
+ gsm_response(gsm, address, DM|PF);
return;
}
dlci->data(dlci, gsm->buf, gsm->len);
@@ -2048,7 +2330,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
} else if ((c & ISO_IEC_646_MASK) == XOFF) {
gsm->constipated = false;
/* Kick the link in case it is idling */
- gsm_data_kick(gsm, NULL);
+ gsmld_write_trigger(gsm);
return;
}
if (c == GSM1_SOF) {
@@ -2176,18 +2458,29 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
}
/* Finish outstanding timers, making sure they are done */
+ del_timer_sync(&gsm->kick_timer);
del_timer_sync(&gsm->t2_timer);
+ /* Finish writing to ldisc */
+ flush_work(&gsm->tx_work);
+
/* Free up any link layer users and finally the control channel */
+ if (gsm->has_devices) {
+ gsm_unregister_devices(gsm_tty_driver, gsm->num);
+ gsm->has_devices = false;
+ }
for (i = NUM_DLCI - 1; i >= 0; i--)
if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
- list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
+ list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list)
+ kfree(txq);
+ INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+ list_for_each_entry_safe(txq, ntxq, &gsm->tx_data_list, list)
kfree(txq);
- INIT_LIST_HEAD(&gsm->tx_list);
+ INIT_LIST_HEAD(&gsm->tx_data_list);
}
/**
@@ -2202,8 +2495,15 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
+ int ret;
+
+ dlci = gsm_dlci_alloc(gsm, 0);
+ if (dlci == NULL)
+ return -ENOMEM;
+ timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
spin_lock_init(&gsm->tx_lock);
@@ -2213,9 +2513,11 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
else
gsm->receive = gsm1_receive;
- dlci = gsm_dlci_alloc(gsm, 0);
- if (dlci == NULL)
- return -ENOMEM;
+ ret = gsm_register_devices(gsm_tty_driver, gsm->num);
+ if (ret)
+ return ret;
+
+ gsm->has_devices = true;
gsm->dead = false; /* Tty opens are now permissible */
return 0;
}
@@ -2308,7 +2610,8 @@ static struct gsm_mux *gsm_alloc_mux(void)
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
kref_init(&gsm->ref);
- INIT_LIST_HEAD(&gsm->tx_list);
+ INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+ INIT_LIST_HEAD(&gsm->tx_data_list);
gsm->t1 = T1;
gsm->t2 = T2;
@@ -2465,6 +2768,47 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
return gsm->tty->ops->write(gsm->tty, data, len);
}
+
+/**
+ * gsmld_write_trigger - schedule ldisc write task
+ * @gsm: our mux
+ */
+static void gsmld_write_trigger(struct gsm_mux *gsm)
+{
+ if (!gsm || !gsm->dlci[0] || gsm->dlci[0]->dead)
+ return;
+ schedule_work(&gsm->tx_work);
+}
+
+
+/**
+ * gsmld_write_task - ldisc write task
+ * @work: our tx write work
+ *
+ * Writes out data to the ldisc if possible. We are doing this here to
+ * avoid dead-locking. This returns if no space or data is left for output.
+ */
+static void gsmld_write_task(struct work_struct *work)
+{
+ struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
+ unsigned long flags;
+ int i, ret;
+
+ /* All outstanding control channel and control messages and one data
+ * frame is sent.
+ */
+ ret = -ENODEV;
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ if (gsm->tty)
+ ret = gsm_data_kick(gsm);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ if (ret >= 0)
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ tty_port_tty_wakeup(&gsm->dlci[i]->port);
+}
+
/**
* gsmld_attach_gsm - mode set up
* @tty: our tty structure
@@ -2475,39 +2819,14 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
* will need moving to an ioctl path.
*/
-static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+static void gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- unsigned int base;
- int ret, i;
-
gsm->tty = tty_kref_get(tty);
/* Turn off tty XON/XOFF handling to handle it explicitly. */
gsm->old_c_iflag = tty->termios.c_iflag;
tty->termios.c_iflag &= (IXON | IXOFF);
- ret = gsm_activate_mux(gsm);
- if (ret != 0)
- tty_kref_put(gsm->tty);
- else {
- /* Don't register device 0 - this is the control channel and not
- a usable tty interface */
- base = mux_num_to_base(gsm); /* Base for this MUX */
- for (i = 1; i < NUM_DLCI; i++) {
- struct device *dev;
-
- dev = tty_register_device(gsm_tty_driver,
- base + i, NULL);
- if (IS_ERR(dev)) {
- for (i--; i >= 1; i--)
- tty_unregister_device(gsm_tty_driver,
- base + i);
- return PTR_ERR(dev);
- }
- }
- }
- return ret;
}
-
/**
* gsmld_detach_gsm - stop doing 0710 mux
* @tty: tty attached to the mux
@@ -2518,12 +2837,7 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- unsigned int base = mux_num_to_base(gsm); /* Base for this MUX */
- int i;
-
WARN_ON(tty != gsm->tty);
- for (i = 1; i < NUM_DLCI; i++)
- tty_unregister_device(gsm_tty_driver, base + i);
/* Restore tty XON/XOFF handling. */
gsm->tty->termios.c_iflag = gsm->old_c_iflag;
tty_kref_put(gsm->tty);
@@ -2615,7 +2929,6 @@ static void gsmld_close(struct tty_struct *tty)
static int gsmld_open(struct tty_struct *tty)
{
struct gsm_mux *gsm;
- int ret;
if (tty->ops->write == NULL)
return -EINVAL;
@@ -2631,12 +2944,13 @@ static int gsmld_open(struct tty_struct *tty)
/* Attach the initial passive connection */
gsm->encoding = 1;
- ret = gsmld_attach_gsm(tty, gsm);
- if (ret != 0) {
- gsm_cleanup_mux(gsm, false);
- mux_put(gsm);
- }
- return ret;
+ gsmld_attach_gsm(tty, gsm);
+
+ timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
+
+ return 0;
}
/**
@@ -2651,16 +2965,9 @@ static int gsmld_open(struct tty_struct *tty)
static void gsmld_write_wakeup(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
- unsigned long flags;
/* Queue poll */
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm, NULL);
- if (gsm->tx_bytes < TX_THRESH_LO) {
- gsm_dlci_data_sweep(gsm);
- }
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
}
/**
@@ -2704,11 +3011,24 @@ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
- int space = tty_write_room(tty);
+ struct gsm_mux *gsm = tty->disc_data;
+ unsigned long flags;
+ int space;
+ int ret;
+
+ if (!gsm)
+ return -ENODEV;
+
+ ret = -ENOBUFS;
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ space = tty_write_room(tty);
if (space >= nr)
- return tty->ops->write(tty, buf, nr);
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- return -ENOBUFS;
+ ret = tty->ops->write(tty, buf, nr);
+ else
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ return ret;
}
/**
@@ -2733,12 +3053,15 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
+
+ if (gsm->dead)
+ mask |= EPOLLHUP;
if (tty_hung_up_p(file))
mask |= EPOLLHUP;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= EPOLLHUP;
if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (gsm->dead)
- mask |= EPOLLHUP;
return mask;
}
@@ -3174,6 +3497,8 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
/* Start sending off SABM messages */
if (gsm->initiator)
gsm_dlci_begin_open(dlci);
+ else
+ gsm_dlci_set_opening(dlci);
/* And wait for virtual carrier */
return tty_port_block_til_ready(port, tty, filp);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 640c9e871044..3afdd9033a9c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -118,6 +118,9 @@ struct n_tty_data {
size_t read_tail;
size_t line_start;
+ /* # of chars looked ahead (to find software flow control chars) */
+ size_t lookahead_count;
+
/* protected by output lock */
unsigned int column;
unsigned int canon_column;
@@ -333,6 +336,8 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
ldata->erasing = 0;
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->push = 0;
+
+ ldata->lookahead_count = 0;
}
static void n_tty_packet_mode_flush(struct tty_struct *tty)
@@ -1225,12 +1230,30 @@ static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
return c == START_CHAR(tty) || c == STOP_CHAR(tty);
}
-/* Returns true if c is consumed as flow-control character */
-static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+/**
+ * n_tty_receive_char_flow_ctrl - receive flow control chars
+ * @tty: terminal device
+ * @c: character
+ * @lookahead_done: lookahead has processed this character already
+ *
+ * Receive and process flow control character actions.
+ *
+ * In case lookahead for flow control chars already handled the character in
+ * advance to the normal receive, the actions are skipped during normal
+ * receive.
+ *
+ * Returns true if @c is consumed as flow-control character, the character
+ * must not be treated as normal character.
+ */
+static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
if (!n_tty_is_char_flow_ctrl(tty, c))
return false;
+ if (lookahead_done)
+ return true;
+
if (c == START_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
@@ -1242,11 +1265,12 @@ static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c
return true;
}
-static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
- if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c))
+ if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c, lookahead_done))
return;
if (L_ISIG(tty)) {
@@ -1401,7 +1425,8 @@ static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
put_tty_queue(c, ldata);
}
-static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
if (I_ISTRIP(tty))
c &= 0x7f;
@@ -1409,12 +1434,10 @@ static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
c = tolower(c);
if (I_IXON(tty)) {
- if (c == STOP_CHAR(tty))
- stop_tty(tty);
- else if (c == START_CHAR(tty) ||
- (tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
- c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
- c != SUSP_CHAR(tty))) {
+ if (!n_tty_receive_char_flow_ctrl(tty, c, lookahead_done) &&
+ tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
+ c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
+ c != SUSP_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
}
@@ -1457,6 +1480,27 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
n_tty_receive_char_flagged(tty, c, flag);
}
+/* Caller must ensure count > 0 */
+static void n_tty_lookahead_flow_ctrl(struct tty_struct *tty, const unsigned char *cp,
+ const unsigned char *fp, unsigned int count)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ unsigned char flag = TTY_NORMAL;
+
+ ldata->lookahead_count += count;
+
+ if (!I_IXON(tty))
+ return;
+
+ while (count--) {
+ if (fp)
+ flag = *fp++;
+ if (likely(flag == TTY_NORMAL))
+ n_tty_receive_char_flow_ctrl(tty, *cp, false);
+ cp++;
+ }
+}
+
static void
n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
const char *fp, int count)
@@ -1496,7 +1540,7 @@ n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
static void
n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+ const char *fp, int count, bool lookahead_done)
{
char flag = TTY_NORMAL;
@@ -1504,12 +1548,12 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
if (fp)
flag = *fp++;
if (likely(flag == TTY_NORMAL))
- n_tty_receive_char_closing(tty, *cp++);
+ n_tty_receive_char_closing(tty, *cp++, lookahead_done);
}
}
static void n_tty_receive_buf_standard(struct tty_struct *tty,
- const unsigned char *cp, const char *fp, int count)
+ const unsigned char *cp, const char *fp, int count, bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
char flag = TTY_NORMAL;
@@ -1540,7 +1584,7 @@ static void n_tty_receive_buf_standard(struct tty_struct *tty,
}
if (test_bit(c, ldata->char_map))
- n_tty_receive_char_special(tty, c);
+ n_tty_receive_char_special(tty, c, lookahead_done);
else
n_tty_receive_char(tty, c);
}
@@ -1551,21 +1595,30 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
{
struct n_tty_data *ldata = tty->disc_data;
bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
+ size_t la_count = min_t(size_t, ldata->lookahead_count, count);
if (ldata->real_raw)
n_tty_receive_buf_real_raw(tty, cp, fp, count);
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
- else if (tty->closing && !L_EXTPROC(tty))
- n_tty_receive_buf_closing(tty, cp, fp, count);
- else {
- n_tty_receive_buf_standard(tty, cp, fp, count);
+ else if (tty->closing && !L_EXTPROC(tty)) {
+ if (la_count > 0)
+ n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
+ if (count > la_count)
+ n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
+ } else {
+ if (la_count > 0)
+ n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
+ if (count > la_count)
+ n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
flush_echoes(tty);
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
+ ldata->lookahead_count -= la_count;
+
if (ldata->icanon && !L_EXTPROC(tty))
return;
@@ -2446,6 +2499,7 @@ static struct tty_ldisc_ops n_tty_ops = {
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup,
.receive_buf2 = n_tty_receive_buf2,
+ .lookahead_buf = n_tty_lookahead_flow_ctrl,
};
/**
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 74bfabe5b453..752dab3356d7 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -111,21 +111,11 @@ static void pty_unthrottle(struct tty_struct *tty)
static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
{
struct tty_struct *to = tty->link;
- unsigned long flags;
- if (tty->flow.stopped)
+ if (tty->flow.stopped || !c)
return 0;
- if (c > 0) {
- spin_lock_irqsave(&to->port->lock, flags);
- /* Stuff the data into the input queue of the other end */
- c = tty_insert_flip_string(to->port, buf, c);
- spin_unlock_irqrestore(&to->port->lock, flags);
- /* And shovel */
- if (c)
- tty_flip_buffer_push(to->port);
- }
- return c;
+ return tty_insert_flip_string_and_push_buffer(to->port, buf, c);
}
/**
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 696030cfcb09..287153d32536 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -123,6 +123,26 @@ static inline void serial_out(struct uart_8250_port *up, int offset, int value)
up->port.serial_out(&up->port, offset, value);
}
+/**
+ * serial_lsr_in - Read LSR register and preserve flags across reads
+ * @up: uart 8250 port
+ *
+ * Read LSR register and handle saving non-preserved flags across reads.
+ * The flags that are not preserved across reads are stored into
+ * up->lsr_saved_flags.
+ *
+ * Returns LSR value or'ed with the preserved flags (if any).
+ */
+static inline u16 serial_lsr_in(struct uart_8250_port *up)
+{
+ u16 lsr = up->lsr_saved_flags;
+
+ lsr |= serial_in(up, UART_LSR);
+ up->lsr_saved_flags = lsr & up->lsr_save_mask;
+
+ return lsr;
+}
+
/*
* For the 16C950
*/
@@ -183,10 +203,12 @@ void serial8250_rpm_put(struct uart_8250_port *p);
void serial8250_rpm_get_tx(struct uart_8250_port *p);
void serial8250_rpm_put_tx(struct uart_8250_port *p);
-int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485);
+int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485);
void serial8250_em485_start_tx(struct uart_8250_port *p);
void serial8250_em485_stop_tx(struct uart_8250_port *p);
void serial8250_em485_destroy(struct uart_8250_port *p);
+extern struct serial_rs485 serial8250_em485_supported;
/* MCR <-> TIOCM conversion */
static inline int serial8250_TIOCM_to_MCR(int tiocm)
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 2a1226a78a0c..15a2387a5b25 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -108,6 +108,7 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
UPF_SKIP_TEST | UPF_IOREMAP;
up.port.rs485_config = serial8250_em485_config;
+ up.port.rs485_supported = serial8250_em485_supported;
up.rs485_start_tx = bcm2835aux_rs485_start_tx;
up.rs485_stop_tx = bcm2835aux_rs485_stop_tx;
@@ -166,8 +167,10 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
uartclk = clk_get_rate(data->clk);
if (!uartclk) {
ret = device_property_read_u32(&pdev->dev, "clock-frequency", &uartclk);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
+ goto dis_clk;
+ }
}
/* the HW-clock divider for bcm2835aux is 8,
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 9b878d023dac..8efdc271eb75 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1139,16 +1139,19 @@ static int __maybe_unused brcmuart_suspend(struct device *dev)
struct brcmuart_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
struct uart_port *port = &up->port;
-
- serial8250_suspend_port(priv->line);
- clk_disable_unprepare(priv->baud_mux_clk);
+ unsigned long flags;
/*
* This will prevent resume from enabling RTS before the
- * baud rate has been resored.
+ * baud rate has been restored.
*/
+ spin_lock_irqsave(&port->lock, flags);
priv->saved_mctrl = port->mctrl;
- port->mctrl = 0;
+ port->mctrl &= ~TIOCM_RTS;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ serial8250_suspend_port(priv->line);
+ clk_disable_unprepare(priv->baud_mux_clk);
return 0;
}
@@ -1158,6 +1161,7 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
struct brcmuart_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
struct uart_port *port = &up->port;
+ unsigned long flags;
int ret;
ret = clk_prepare_enable(priv->baud_mux_clk);
@@ -1180,7 +1184,15 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
start_rx_dma(serial8250_get_port(priv->line));
}
serial8250_resume_port(priv->line);
- port->mctrl = priv->saved_mctrl;
+
+ if (priv->saved_mctrl & TIOCM_RTS) {
+ /* Restore RTS */
+ spin_lock_irqsave(&port->lock, flags);
+ port->mctrl |= TIOCM_RTS;
+ port->ops->set_mctrl(port, port->mctrl);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index cfbd2de0ca6e..2e83e7367441 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -23,6 +23,7 @@
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/tty.h>
#include <linux/ratelimit.h>
#include <linux/tty_flip.h>
@@ -276,8 +277,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
* the "Diva" UART used on the management processor on many HP
* ia64 and parisc boxes.
*/
- lsr = serial_in(up, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ lsr = serial_lsr_in(up);
if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
(!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
(lsr & UART_LSR_THRE)) {
@@ -559,6 +559,9 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
up->port.dev = dev;
+ if (uart_console_enabled(&up->port))
+ pm_runtime_get_sync(up->port.dev);
+
serial8250_apply_quirks(up);
uart_add_one_port(drv, &up->port);
}
@@ -1004,9 +1007,11 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
uart->port.throttle = up->port.throttle;
uart->port.unthrottle = up->port.unthrottle;
uart->port.rs485_config = up->port.rs485_config;
+ uart->port.rs485_supported = up->port.rs485_supported;
uart->port.rs485 = up->port.rs485;
uart->rs485_start_tx = up->rs485_start_tx;
uart->rs485_stop_tx = up->rs485_stop_tx;
+ uart->lsr_save_mask = up->lsr_save_mask;
uart->dma = up->dma;
/* Take tx_loadsz from fifosize if it wasn't set separately */
@@ -1094,6 +1099,9 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
ret = 0;
}
+ if (!uart->lsr_save_mask)
+ uart->lsr_save_mask = LSR_SAVE_FLAGS; /* Use default LSR mask */
+
/* Initialise interrupt backoff work if required */
if (up->overrun_backoff_time_ms > 0) {
uart->overrun_backoff_time_ms =
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 7133fceed35e..a8dba4a0a8fb 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -106,10 +106,10 @@ int serial8250_tx_dma(struct uart_8250_port *p)
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_async_issue_pending(dma->txchan);
- if (dma->tx_err) {
+ serial8250_clear_THRI(p);
+ if (dma->tx_err)
dma->tx_err = 0;
- serial8250_clear_THRI(p);
- }
+
return 0;
err:
dma->tx_err = 1;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index f57bbd32ef11..a604b42e4458 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -9,26 +9,27 @@
* LCR is written whilst busy. If it is, then a busy detect interrupt is
* raised, the LCR needs to be rewritten and the uart status register read.
*/
+#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/serial_8250.h>
-#include <linux/serial_reg.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <linux/workqueue.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
-#include <linux/clk.h>
#include <linux/reset.h>
-#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+
#include "8250_dwlib.h"
/* Offsets for the DesignWare specific registers */
@@ -47,7 +48,7 @@
#define RZN1_UART_xDMACR_DMA_EN BIT(0)
#define RZN1_UART_xDMACR_1_WORD_BURST (0 << 1)
#define RZN1_UART_xDMACR_4_WORD_BURST (1 << 1)
-#define RZN1_UART_xDMACR_8_WORD_BURST (3 << 1)
+#define RZN1_UART_xDMACR_8_WORD_BURST (2 << 1)
#define RZN1_UART_xDMACR_BLK_SZ(x) ((x) << 3)
/* Quirks */
@@ -82,8 +83,21 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
static void dw8250_force_idle(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
+ unsigned int lsr;
serial8250_clear_and_reinit_fifos(up);
+
+ /*
+ * With PSLVERR_RESP_EN parameter set to 1, the device generates an
+ * error response when an attempt to read an empty RBR with FIFO
+ * enabled.
+ */
+ if (up->fcr & UART_FCR_ENABLE_FIFO) {
+ lsr = p->serial_in(p, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ return;
+ }
+
(void)p->serial_in(p, UART_RX);
}
@@ -122,12 +136,15 @@ static void dw8250_check_lcr(struct uart_port *p, int value)
/* Returns once the transmitter is empty or we run out of retries */
static void dw8250_tx_wait_empty(struct uart_port *p)
{
+ struct uart_8250_port *up = up_to_u8250p(p);
unsigned int tries = 20000;
unsigned int delay_threshold = tries - 1000;
unsigned int lsr;
while (tries--) {
lsr = readb (p->membase + (UART_LSR << p->regshift));
+ up->lsr_saved_flags |= lsr & up->lsr_save_mask;
+
if (lsr & UART_LSR_TEMT)
break;
@@ -140,29 +157,23 @@ static void dw8250_tx_wait_empty(struct uart_port *p)
}
}
-static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
+static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = to_dw8250_data(p->private_data);
- /* Allow the TX to drain before we reconfigure */
- if (offset == UART_LCR)
- dw8250_tx_wait_empty(p);
-
writeb(value, p->membase + (offset << p->regshift));
if (offset == UART_LCR && !d->uart_16550_compatible)
dw8250_check_lcr(p, value);
}
-
-static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
-
- writeb(value, p->membase + (offset << p->regshift));
+ /* Allow the TX to drain before we reconfigure */
+ if (offset == UART_LCR)
+ dw8250_tx_wait_empty(p);
- if (offset == UART_LCR && !d->uart_16550_compatible)
- dw8250_check_lcr(p, value);
+ dw8250_serial_out(p, offset, value);
}
static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
@@ -253,7 +264,7 @@ static int dw8250_handle_irq(struct uart_port *p)
*/
if (!up->dma && rx_timeout) {
spin_lock_irqsave(&p->lock, flags);
- status = p->serial_in(p, UART_LSR);
+ status = serial_lsr_in(up);
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
(void) p->serial_in(p, UART_RX);
@@ -263,7 +274,10 @@ static int dw8250_handle_irq(struct uart_port *p)
/* Manually stop the Rx DMA transfer when acting as flow controller */
if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
- status = p->serial_in(p, UART_LSR);
+ spin_lock_irqsave(&p->lock, flags);
+ status = serial_lsr_in(up);
+ spin_unlock_irqrestore(&p->lock, flags);
+
if (status & (UART_LSR_DR | UART_LSR_BI)) {
dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
dw8250_writel_ext(p, DW_UART_DMASA, 1);
@@ -688,7 +702,6 @@ static int dw8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -706,9 +719,7 @@ static int dw8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
static int dw8250_runtime_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -730,11 +741,10 @@ static int dw8250_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops dw8250_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume)
- SET_RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume)
+ RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
};
static const struct dw8250_platform_data dw8250_dw_apb = {
@@ -773,18 +783,18 @@ static const struct of_device_id dw8250_of_match[] = {
MODULE_DEVICE_TABLE(of, dw8250_of_match);
static const struct acpi_device_id dw8250_acpi_match[] = {
- { "INT33C4", 0 },
- { "INT33C5", 0 },
- { "INT3434", 0 },
- { "INT3435", 0 },
- { "80860F0A", 0 },
- { "8086228A", 0 },
- { "APMC0D08", 0},
- { "AMD0020", 0 },
- { "AMDI0020", 0 },
- { "AMDI0022", 0 },
- { "BRCM2032", 0 },
- { "HISI0031", 0 },
+ { "80860F0A", (kernel_ulong_t)&dw8250_dw_apb },
+ { "8086228A", (kernel_ulong_t)&dw8250_dw_apb },
+ { "AMD0020", (kernel_ulong_t)&dw8250_dw_apb },
+ { "AMDI0020", (kernel_ulong_t)&dw8250_dw_apb },
+ { "AMDI0022", (kernel_ulong_t)&dw8250_dw_apb },
+ { "APMC0D08", (kernel_ulong_t)&dw8250_dw_apb},
+ { "BRCM2032", (kernel_ulong_t)&dw8250_dw_apb },
+ { "HISI0031", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT33C4", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT3434", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT3435", (kernel_ulong_t)&dw8250_dw_apb },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
@@ -792,7 +802,7 @@ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
static struct platform_driver dw8250_platform_driver = {
.driver = {
.name = "dw-apb-uart",
- .pm = &dw8250_pm_ops,
+ .pm = pm_ptr(&dw8250_pm_ops),
.of_match_table = dw8250_of_match,
.acpi_match_table = dw8250_acpi_match,
},
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index fbabfdd8c7b8..dbe4d44f60d4 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -3,8 +3,10 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/property.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
@@ -16,9 +18,18 @@
#define DW_UART_DE_EN 0xb0 /* Driver Output Enable Register */
#define DW_UART_RE_EN 0xb4 /* Receiver Output Enable Register */
#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */
+#define DW_UART_RAR 0xc4 /* Receive Address Register */
+#define DW_UART_TAR 0xc8 /* Transmit Address Register */
+#define DW_UART_LCR_EXT 0xcc /* Line Extended Control Register */
#define DW_UART_CPR 0xf4 /* Component Parameter Register */
#define DW_UART_UCV 0xf8 /* UART Component Version */
+/* Receive / Transmit Address Register bits */
+#define DW_UART_ADDR_MASK GENMASK(7, 0)
+
+/* Line Status Register bits */
+#define DW_UART_LSR_ADDR_RCVD BIT(8)
+
/* Transceiver Control Register bits */
#define DW_UART_TCR_RS485_EN BIT(0)
#define DW_UART_TCR_RE_POL BIT(1)
@@ -28,22 +39,28 @@
#define DW_UART_TCR_XFER_MODE_SW_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 1)
#define DW_UART_TCR_XFER_MODE_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 2)
+/* Line Extended Control Register bits */
+#define DW_UART_LCR_EXT_DLS_E BIT(0)
+#define DW_UART_LCR_EXT_ADDR_MATCH BIT(1)
+#define DW_UART_LCR_EXT_SEND_ADDR BIT(2)
+#define DW_UART_LCR_EXT_TRANSMIT_MODE BIT(3)
+
/* Component Parameter Register bits */
-#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
-#define DW_UART_CPR_AFCE_MODE (1 << 4)
-#define DW_UART_CPR_THRE_MODE (1 << 5)
-#define DW_UART_CPR_SIR_MODE (1 << 6)
-#define DW_UART_CPR_SIR_LP_MODE (1 << 7)
-#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8)
-#define DW_UART_CPR_FIFO_ACCESS (1 << 9)
-#define DW_UART_CPR_FIFO_STAT (1 << 10)
-#define DW_UART_CPR_SHADOW (1 << 11)
-#define DW_UART_CPR_ENCODED_PARMS (1 << 12)
-#define DW_UART_CPR_DMA_EXTRA (1 << 13)
-#define DW_UART_CPR_FIFO_MODE (0xff << 16)
+#define DW_UART_CPR_ABP_DATA_WIDTH GENMASK(1, 0)
+#define DW_UART_CPR_AFCE_MODE BIT(4)
+#define DW_UART_CPR_THRE_MODE BIT(5)
+#define DW_UART_CPR_SIR_MODE BIT(6)
+#define DW_UART_CPR_SIR_LP_MODE BIT(7)
+#define DW_UART_CPR_ADDITIONAL_FEATURES BIT(8)
+#define DW_UART_CPR_FIFO_ACCESS BIT(9)
+#define DW_UART_CPR_FIFO_STAT BIT(10)
+#define DW_UART_CPR_SHADOW BIT(11)
+#define DW_UART_CPR_ENCODED_PARMS BIT(12)
+#define DW_UART_CPR_DMA_EXTRA BIT(13)
+#define DW_UART_CPR_FIFO_MODE GENMASK(23, 16)
/* Helper for FIFO size calculation */
-#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
+#define DW_UART_CPR_FIFO_SIZE(a) (FIELD_GET(DW_UART_CPR_FIFO_MODE, (a)) * 16)
/*
* divisor = div(I) + div(F)
@@ -82,10 +99,85 @@ void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct
p->status |= UPSTAT_AUTOCTS;
serial8250_do_set_termios(p, termios, old);
+
+ /* Filter addresses which have 9th bit set */
+ p->ignore_status_mask |= DW_UART_LSR_ADDR_RCVD;
+ p->read_status_mask |= DW_UART_LSR_ADDR_RCVD;
}
EXPORT_SYMBOL_GPL(dw8250_do_set_termios);
-static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
+/*
+ * Wait until re is de-asserted for sure. An ongoing receive will keep
+ * re asserted until end of frame. Without BUSY indication available,
+ * only available course of action is to wait for the time it takes to
+ * receive one frame (there might nothing to receive but w/o BUSY the
+ * driver cannot know).
+ */
+static void dw8250_wait_re_deassert(struct uart_port *p)
+{
+ ndelay(p->frame_time);
+}
+
+static void dw8250_update_rar(struct uart_port *p, u32 addr)
+{
+ u32 re_en = dw8250_readl_ext(p, DW_UART_RE_EN);
+
+ /*
+ * RAR shouldn't be changed while receiving. Thus, de-assert RE_EN
+ * if asserted and wait.
+ */
+ if (re_en)
+ dw8250_writel_ext(p, DW_UART_RE_EN, 0);
+ dw8250_wait_re_deassert(p);
+ dw8250_writel_ext(p, DW_UART_RAR, addr);
+ if (re_en)
+ dw8250_writel_ext(p, DW_UART_RE_EN, re_en);
+}
+
+static void dw8250_rs485_set_addr(struct uart_port *p, struct serial_rs485 *rs485,
+ struct ktermios *termios)
+{
+ u32 lcr = dw8250_readl_ext(p, DW_UART_LCR_EXT);
+
+ if (rs485->flags & SER_RS485_ADDRB) {
+ lcr |= DW_UART_LCR_EXT_DLS_E;
+ if (termios)
+ termios->c_cflag |= ADDRB;
+
+ if (rs485->flags & SER_RS485_ADDR_RECV) {
+ u32 delta = p->rs485.flags ^ rs485->flags;
+
+ /*
+ * rs485 (param) is equal to uart_port's rs485 only during init
+ * (during init, delta is not yet applicable).
+ */
+ if (unlikely(&p->rs485 == rs485))
+ delta = rs485->flags;
+
+ if ((delta & SER_RS485_ADDR_RECV) ||
+ (p->rs485.addr_recv != rs485->addr_recv))
+ dw8250_update_rar(p, rs485->addr_recv);
+ lcr |= DW_UART_LCR_EXT_ADDR_MATCH;
+ } else {
+ lcr &= ~DW_UART_LCR_EXT_ADDR_MATCH;
+ }
+ if (rs485->flags & SER_RS485_ADDR_DEST) {
+ /*
+ * Don't skip writes here as another endpoint could
+ * have changed communication line's destination
+ * address in between.
+ */
+ dw8250_writel_ext(p, DW_UART_TAR, rs485->addr_dest);
+ lcr |= DW_UART_LCR_EXT_SEND_ADDR;
+ }
+ } else {
+ lcr = 0;
+ }
+ dw8250_writel_ext(p, DW_UART_LCR_EXT, lcr);
+}
+
+static int dw8250_rs485_config(struct uart_port *p, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
u32 tcr;
@@ -93,25 +185,17 @@ static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
tcr &= ~DW_UART_TCR_XFER_MODE;
if (rs485->flags & SER_RS485_ENABLED) {
- /* Clear unsupported flags. */
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RX_DURING_TX |
- SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND;
tcr |= DW_UART_TCR_RS485_EN;
- if (rs485->flags & SER_RS485_RX_DURING_TX) {
+ if (rs485->flags & SER_RS485_RX_DURING_TX)
tcr |= DW_UART_TCR_XFER_MODE_DE_DURING_RE;
- } else {
- /* HW does not support same DE level for tx and rx */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
- return -EINVAL;
-
+ else
tcr |= DW_UART_TCR_XFER_MODE_DE_OR_RE;
- }
dw8250_writel_ext(p, DW_UART_DE_EN, 1);
dw8250_writel_ext(p, DW_UART_RE_EN, 1);
} else {
- rs485->flags = 0;
+ if (termios)
+ termios->c_cflag &= ~ADDRB;
tcr &= ~DW_UART_TCR_RS485_EN;
}
@@ -127,10 +211,9 @@ static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
dw8250_writel_ext(p, DW_UART_TCR, tcr);
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
-
- p->rs485 = *rs485;
+ /* Addressing mode can only be set up after TCR */
+ if (rs485->flags & SER_RS485_ENABLED)
+ dw8250_rs485_set_addr(p, rs485, termios);
return 0;
}
@@ -149,6 +232,12 @@ static bool dw8250_detect_rs485_hw(struct uart_port *p)
return reg;
}
+static const struct serial_rs485 dw8250_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND | SER_RS485_ADDRB | SER_RS485_ADDR_RECV |
+ SER_RS485_ADDR_DEST,
+};
+
void dw8250_setup_port(struct uart_port *p)
{
struct dw8250_port_data *pd = p->private_data;
@@ -159,8 +248,11 @@ void dw8250_setup_port(struct uart_port *p)
pd->hw_rs485_support = dw8250_detect_rs485_hw(p);
if (pd->hw_rs485_support) {
p->rs485_config = dw8250_rs485_config;
+ up->lsr_save_mask = LSR_SAVE_FLAGS | DW_UART_LSR_ADDR_RCVD;
+ p->rs485_supported = dw8250_rs485_supported;
} else {
p->rs485_config = serial8250_em485_config;
+ p->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
up->rs485_stop_tx = serial8250_em485_stop_tx;
}
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index e52585064565..f271becfc46c 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -84,8 +84,6 @@ static void serial8250_early_out(struct uart_port *port, int offset, int value)
}
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
static void serial_putc(struct uart_port *port, unsigned char c)
{
unsigned int status;
@@ -94,7 +92,7 @@ static void serial_putc(struct uart_port *port, unsigned char c)
for (;;) {
status = serial8250_early_in(port, UART_LSR);
- if ((status & BOTH_EMPTY) == BOTH_EMPTY)
+ if (uart_lsr_tx_empty(status))
break;
cpu_relax();
}
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 7292917ac878..314a05e009df 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -112,7 +112,9 @@
struct exar8250;
struct exar8250_platform {
- int (*rs485_config)(struct uart_port *, struct serial_rs485 *);
+ int (*rs485_config)(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485);
+ const struct serial_rs485 *rs485_supported;
int (*register_gpio)(struct pci_dev *, struct uart_8250_port *);
void (*unregister_gpio)(struct uart_8250_port *);
};
@@ -194,11 +196,11 @@ static int xr17v35x_startup(struct uart_port *port)
static void exar_shutdown(struct uart_port *port)
{
- unsigned char lsr;
bool tx_complete = false;
struct uart_8250_port *up = up_to_u8250p(port);
struct circ_buf *xmit = &port->state->xmit;
int i = 0;
+ u16 lsr;
do {
lsr = serial_in(up, UART_LSR);
@@ -408,7 +410,7 @@ static void xr17v35x_unregister_gpio(struct uart_8250_port *port)
port->port.private_data = NULL;
}
-static int generic_rs485_config(struct uart_port *port,
+static int generic_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
@@ -426,18 +428,21 @@ static int generic_rs485_config(struct uart_port *port,
if (is_rs485)
writeb(UART_EXAR_RS485_DLY(4), p + UART_MSR);
- port->rs485 = *rs485;
-
return 0;
}
+static const struct serial_rs485 generic_rs485_supported = {
+ .flags = SER_RS485_ENABLED,
+};
+
static const struct exar8250_platform exar8250_default_platform = {
.register_gpio = xr17v35x_register_gpio,
.unregister_gpio = xr17v35x_unregister_gpio,
.rs485_config = generic_rs485_config,
+ .rs485_supported = &generic_rs485_supported,
};
-static int iot2040_rs485_config(struct uart_port *port,
+static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
@@ -467,9 +472,13 @@ static int iot2040_rs485_config(struct uart_port *port,
value |= mode;
writeb(value, p + UART_EXAR_MPIOLVL_7_0);
- return generic_rs485_config(port, rs485);
+ return generic_rs485_config(port, termios, rs485);
}
+static const struct serial_rs485 iot2040_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
+};
+
static const struct property_entry iot2040_gpio_properties[] = {
PROPERTY_ENTRY_U32("exar,first-pin", 10),
PROPERTY_ENTRY_U32("ngpios", 1),
@@ -498,6 +507,7 @@ static int iot2040_register_gpio(struct pci_dev *pcidev,
static const struct exar8250_platform iot2040_platform = {
.rs485_config = iot2040_rs485_config,
+ .rs485_supported = &iot2040_rs485_supported,
.register_gpio = iot2040_register_gpio,
.unregister_gpio = xr17v35x_unregister_gpio,
};
@@ -540,6 +550,7 @@ pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
port->port.uartclk = baud * 16;
port->port.rs485_config = platform->rs485_config;
+ port->port.rs485_supported = *(platform->rs485_supported);
/*
* Setup the UART clock for the devices on expansion slot to
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index dba5950b8d0e..65b6b3cbaff6 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -191,7 +191,7 @@ static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
return -ENODEV;
}
-static int fintek_8250_rs485_config(struct uart_port *port,
+static int fintek_8250_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
uint8_t config = 0;
@@ -206,19 +206,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
!(rs485->flags & SER_RS485_RTS_AFTER_SEND))
return -EINVAL;
- memset(rs485->padding, 0, sizeof(rs485->padding));
config |= RS485_URA;
- } else {
- memset(rs485, 0, sizeof(*rs485));
- }
-
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND;
-
- /* Only the first port supports delays */
- if (pdata->index) {
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
}
if (rs485->delay_rts_before_send) {
@@ -241,8 +229,6 @@ static int fintek_8250_rs485_config(struct uart_port *port,
sio_write_reg(pdata, RS485, config);
fintek_8250_exit_key(pdata->base_port);
- port->rs485 = *rs485;
-
return 0;
}
@@ -424,6 +410,17 @@ static int probe_setup_port(struct fintek_8250 *pdata,
return -ENODEV;
}
+/* Only the first port supports delays */
+static const struct serial_rs485 fintek_8250_rs485_supported_port0 = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
+static const struct serial_rs485 fintek_8250_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+};
+
static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
{
struct fintek_8250 *pdata = uart->port.private_data;
@@ -435,6 +432,10 @@ static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
case CHIP_ID_F81866:
case CHIP_ID_F81865:
uart->port.rs485_config = fintek_8250_rs485_config;
+ if (!pdata->index)
+ uart->port.rs485_supported = fintek_8250_rs485_supported_port0;
+ else
+ uart->port.rs485_supported = fintek_8250_rs485_supported;
break;
default: /* No RS485 Auto direction functional */
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 9c01c531349d..8aad15622a2e 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -25,8 +25,8 @@
int fsl8250_handle_irq(struct uart_port *port)
{
- unsigned char lsr, orig_lsr;
unsigned long flags;
+ u16 lsr, orig_lsr;
unsigned int iir;
struct uart_8250_port *up = up_to_u8250p(port);
@@ -77,7 +77,7 @@ int fsl8250_handle_irq(struct uart_port *port)
if ((lsr & UART_LSR_THRE) && (up->ier & UART_IER_THRI))
serial8250_tx_chars(up);
- up->lsr_saved_flags = orig_lsr;
+ up->lsr_saved_flags |= orig_lsr & UART_LSR_BI;
uart_unlock_and_check_sysrq_irqrestore(&up->port, flags);
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index cff91aa03f29..2b2f5d8d24b9 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -54,7 +54,7 @@ static void early_out(struct uart_port *port, int offset, uint8_t value)
static void ingenic_early_console_putc(struct uart_port *port, unsigned char c)
{
- uint8_t lsr;
+ u16 lsr;
do {
lsr = early_in(port, UART_LSR);
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 570e25d6f37e..6dc85aaba5d0 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -32,7 +32,7 @@ struct lpc18xx_uart_data {
int line;
};
-static int lpc18xx_rs485_config(struct uart_port *port,
+static int lpc18xx_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -40,24 +40,12 @@ static int lpc18xx_rs485_config(struct uart_port *port,
u32 rs485_dly_reg = 0;
unsigned baud_clk;
- if (rs485->flags & SER_RS485_ENABLED)
- memset(rs485->padding, 0, sizeof(rs485->padding));
- else
- memset(rs485, 0, sizeof(*rs485));
-
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND;
-
if (rs485->flags & SER_RS485_ENABLED) {
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_NMMEN |
LPC18XX_UART_RS485CTRL_DCTRL;
- if (rs485->flags & SER_RS485_RTS_ON_SEND) {
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_OINV;
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
- } else {
- rs485->flags |= SER_RS485_RTS_AFTER_SEND;
- }
}
if (rs485->delay_rts_after_send) {
@@ -73,14 +61,9 @@ static int lpc18xx_rs485_config(struct uart_port *port,
/ baud_clk;
}
- /* Delay RTS before send not supported */
- rs485->delay_rts_before_send = 0;
-
serial_out(up, LPC18XX_UART_RS485CTRL, rs485_ctrl_reg);
serial_out(up, LPC18XX_UART_RS485DLY, rs485_dly_reg);
- port->rs485 = *rs485;
-
return 0;
}
@@ -98,6 +81,12 @@ static void lpc18xx_uart_serial_out(struct uart_port *p, int offset, int value)
writel(value, p->membase + offset);
}
+static const struct serial_rs485 lpc18xx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_after_send = 1,
+ /* Delay RTS before send is not supported */
+};
+
static int lpc18xx_serial_probe(struct platform_device *pdev)
{
struct lpc18xx_uart_data *data;
@@ -168,6 +157,7 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
uart.port.uartclk = clk_get_rate(data->clk_uart);
uart.port.private_data = data;
uart.port.rs485_config = lpc18xx_rs485_config;
+ uart.port.rs485_supported = lpc18xx_rs485_supported;
uart.port.serial_out = lpc18xx_uart_serial_out;
uart.dma = &data->dma;
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 0f5af061e0b4..4ba43bef9933 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -330,7 +330,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
uart.port.irq = pci_irq_vector(pdev, 0);
uart.port.private_data = &lpss->data;
uart.port.type = PORT_16550A;
- uart.port.iotype = UPIO_MEM;
+ uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
uart.port.uartclk = lpss->board->base_baud * 16;
uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 5a699a1aa79c..1b461fba15a3 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -165,6 +165,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->dev = &ofdev->dev;
port->rs485_config = serial8250_em485_config;
+ port->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
up->rs485_stop_tx = serial8250_em485_stop_tx;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index ac8bfa042391..0dcecbbc3967 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1115,8 +1115,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
return omap_8250_rx_dma(up);
}
-static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
- u8 iir, unsigned char status)
+static u16 omap_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir, u16 status)
{
if ((status & (UART_LSR_DR | UART_LSR_BI)) &&
(iir & UART_IIR_RDI)) {
@@ -1130,7 +1129,7 @@ static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
}
static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir,
- unsigned char status)
+ u16 status)
{
/*
* Queue a new transfer if FIFO has data.
@@ -1164,7 +1163,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = up->port.private_data;
- unsigned char status;
+ u16 status;
u8 iir;
serial8250_rpm_get(up);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index a17619db7939..6f66dc2ebacc 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1553,7 +1553,7 @@ pci_brcm_trumanage_setup(struct serial_private *priv,
#define FINTEK_RTS_INVERT BIT(5)
/* We should do proper H/W transceiver setting before change to RS485 mode */
-static int pci_fintek_rs485_config(struct uart_port *port,
+static int pci_fintek_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct pci_dev *pci_dev = to_pci_dev(port->dev);
@@ -1562,16 +1562,6 @@ static int pci_fintek_rs485_config(struct uart_port *port,
pci_read_config_byte(pci_dev, 0x40 + 8 * *index + 7, &setting);
- if (!rs485)
- rs485 = &port->rs485;
- else if (rs485->flags & SER_RS485_ENABLED)
- memset(rs485->padding, 0, sizeof(rs485->padding));
- else
- memset(rs485, 0, sizeof(*rs485));
-
- /* F81504/508/512 not support RTS delay before or after send */
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable RTS H/W control mode */
setting |= FINTEK_RTS_CONTROL_BY_HW;
@@ -1583,9 +1573,6 @@ static int pci_fintek_rs485_config(struct uart_port *port,
/* RTS driving low on TX */
setting |= FINTEK_RTS_INVERT;
}
-
- rs485->delay_rts_after_send = 0;
- rs485->delay_rts_before_send = 0;
} else {
/* Disable RTS H/W control mode */
setting &= ~(FINTEK_RTS_CONTROL_BY_HW | FINTEK_RTS_INVERT);
@@ -1593,12 +1580,14 @@ static int pci_fintek_rs485_config(struct uart_port *port,
pci_write_config_byte(pci_dev, 0x40 + 8 * *index + 7, setting);
- if (rs485 != &port->rs485)
- port->rs485 = *rs485;
-
return 0;
}
+static const struct serial_rs485 pci_fintek_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND,
+ /* F81504/508/512 does not support RTS delay before or after send */
+};
+
static int pci_fintek_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1618,6 +1607,7 @@ static int pci_fintek_setup(struct serial_private *priv,
port->port.iotype = UPIO_PORT;
port->port.iobase = iobase;
port->port.rs485_config = pci_fintek_rs485_config;
+ port->port.rs485_supported = pci_fintek_rs485_supported;
data = devm_kzalloc(&pdev->dev, sizeof(u8), GFP_KERNEL);
if (!data)
@@ -1689,7 +1679,7 @@ static int pci_fintek_init(struct pci_dev *dev)
* pciserial_resume_ports()
*/
port = serial8250_get_port(priv->line[i]);
- pci_fintek_rs485_config(&port->port, NULL);
+ uart_rs485_config(&port->port);
} else {
/* First init without port data
* force init to RS232 Mode
@@ -5077,6 +5067,115 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b2_4_115200 },
/*
+ * Brainboxes PX-101
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4005,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4019,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-235/246
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4004,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4016,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-203/PX-257
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4006,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4015,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-260/PX-701
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400A,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-310
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-313
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400C,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-320/324/PX-376/PX-387
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400B,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-335/346
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400F,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-368
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4010,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-420
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4000,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4011,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-803
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4009,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x401E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-846
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4008,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4017,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+
+ /*
* Perle PCI-RAS cards
*/
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c
index 95ff10f25d58..b8d5b7714a9d 100644
--- a/drivers/tty/serial/8250/8250_pericom.c
+++ b/drivers/tty/serial/8250/8250_pericom.c
@@ -73,7 +73,7 @@ static void pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
struct uart_8250_port *up = up_to_u8250p(port);
int lcr = serial_port_in(port, UART_LCR);
- serial_port_out(port, UART_LCR, lcr | 0x80);
+ serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
serial_dl_write(up, divisor);
serial_port_out(port, 2, 16 - scr);
serial_port_out(port, UART_LCR, lcr);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 8f32fe9e149e..39b35a61958c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -50,8 +50,6 @@
#define DEBUG_AUTOCONF(fmt...) do { } while (0)
#endif
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Here we define the default xmit fifo size used for each type of UART.
*/
@@ -336,27 +334,29 @@ static void default_serial_dl_write(struct uart_8250_port *up, int value)
#ifdef CONFIG_SERIAL_8250_RT288X
+#define UART_REG_UNMAPPED -1
+
/* Au1x00/RT288x UART hardware has a weird register layout */
static const s8 au_io_in_map[8] = {
- 0, /* UART_RX */
- 2, /* UART_IER */
- 3, /* UART_IIR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- 7, /* UART_LSR */
- 8, /* UART_MSR */
- -1, /* UART_SCR (unmapped) */
+ [UART_RX] = 0,
+ [UART_IER] = 2,
+ [UART_IIR] = 3,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = 7,
+ [UART_MSR] = 8,
+ [UART_SCR] = UART_REG_UNMAPPED,
};
static const s8 au_io_out_map[8] = {
- 1, /* UART_TX */
- 2, /* UART_IER */
- 4, /* UART_FCR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- -1, /* UART_LSR (unmapped) */
- -1, /* UART_MSR (unmapped) */
- -1, /* UART_SCR (unmapped) */
+ [UART_TX] = 1,
+ [UART_IER] = 2,
+ [UART_FCR] = 4,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = UART_REG_UNMAPPED,
+ [UART_MSR] = UART_REG_UNMAPPED,
+ [UART_SCR] = UART_REG_UNMAPPED,
};
unsigned int au_serial_in(struct uart_port *p, int offset)
@@ -364,7 +364,7 @@ unsigned int au_serial_in(struct uart_port *p, int offset)
if (offset >= ARRAY_SIZE(au_io_in_map))
return UINT_MAX;
offset = au_io_in_map[offset];
- if (offset < 0)
+ if (offset == UART_REG_UNMAPPED)
return UINT_MAX;
return __raw_readl(p->membase + (offset << p->regshift));
}
@@ -374,7 +374,7 @@ void au_serial_out(struct uart_port *p, int offset, int value)
if (offset >= ARRAY_SIZE(au_io_out_map))
return;
offset = au_io_out_map[offset];
- if (offset < 0)
+ if (offset == UART_REG_UNMAPPED)
return;
__raw_writel(value, p->membase + (offset << p->regshift));
}
@@ -647,6 +647,14 @@ void serial8250_em485_destroy(struct uart_8250_port *p)
}
EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
+struct serial_rs485 serial8250_em485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_TERMINATE_BUS | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+EXPORT_SYMBOL_GPL(serial8250_em485_supported);
+
/**
* serial8250_em485_config() - generic ->rs485_config() callback
* @port: uart port
@@ -656,7 +664,8 @@ EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
* if the uart is incapable of driving RTS as a Transmit Enable signal in
* hardware, relying on software emulation instead.
*/
-int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
+int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -667,29 +676,12 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
}
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
- memset(rs485->padding, 0, sizeof(rs485->padding));
- port->rs485 = *rs485;
-
- gpiod_set_value(port->rs485_term_gpio,
- rs485->flags & SER_RS485_TERMINATE_BUS);
-
/*
* Both serial8250_em485_init() and serial8250_em485_destroy()
* are idempotent.
*/
- if (rs485->flags & SER_RS485_ENABLED) {
- int ret = serial8250_em485_init(up);
-
- if (ret) {
- rs485->flags &= ~SER_RS485_ENABLED;
- port->rs485.flags &= ~SER_RS485_ENABLED;
- }
- return ret;
- }
+ if (rs485->flags & SER_RS485_ENABLED)
+ return serial8250_em485_init(up);
serial8250_em485_destroy(up);
return 0;
@@ -849,7 +841,7 @@ static int size_fifo(struct uart_8250_port *up)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
old_dl = serial_dl_read(up);
serial_dl_write(up, 0x0001);
- serial_out(up, UART_LCR, 0x03);
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
for (count = 0; count < 256; count++)
serial_out(up, UART_TX, count);
mdelay(20);/* FIXME - schedule_timeout */
@@ -1503,18 +1495,12 @@ static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
}
}
-static inline void __do_stop_tx(struct uart_8250_port *p)
-{
- if (serial8250_clear_THRI(p))
- serial8250_rpm_put_tx(p);
-}
-
static inline void __stop_tx(struct uart_8250_port *p)
{
struct uart_8250_em485 *em485 = p->em485;
if (em485) {
- unsigned char lsr = serial_in(p, UART_LSR);
+ u16 lsr = serial_lsr_in(p);
u64 stop_delay = 0;
p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
@@ -1522,7 +1508,7 @@ static inline void __stop_tx(struct uart_8250_port *p)
if (!(lsr & UART_LSR_THRE))
return;
/*
- * To provide required timeing and allow FIFO transfer,
+ * To provide required timing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and
* shift register are empty. The device driver should either
* enable interrupt on TEMT or set UART_CAP_NOTEMT that will
@@ -1544,7 +1530,9 @@ static inline void __stop_tx(struct uart_8250_port *p)
__stop_tx_rs485(p, stop_delay);
}
- __do_stop_tx(p);
+
+ if (serial8250_clear_THRI(p))
+ serial8250_rpm_put_tx(p);
}
static void serial8250_stop_tx(struct uart_port *port)
@@ -1573,10 +1561,8 @@ static inline void __start_tx(struct uart_port *port)
if (serial8250_set_THRI(up)) {
if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr;
+ u16 lsr = serial_lsr_in(up);
- lsr = serial_in(up, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
if (lsr & UART_LSR_THRE)
serial8250_tx_chars(up);
}
@@ -1616,7 +1602,8 @@ void serial8250_em485_start_tx(struct uart_8250_port *up)
}
EXPORT_SYMBOL_GPL(serial8250_em485_start_tx);
-static inline void start_tx_rs485(struct uart_port *port)
+/* Returns false, if start_tx_timer was setup to defer TX start */
+static bool start_tx_rs485(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
@@ -1644,11 +1631,11 @@ static inline void start_tx_rs485(struct uart_port *port)
em485->active_timer = &em485->start_tx_timer;
start_hrtimer_ms(&em485->start_tx_timer,
up->port.rs485.delay_rts_before_send);
- return;
+ return false;
}
}
- __start_tx(port);
+ return true;
}
static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
@@ -1678,14 +1665,12 @@ static void serial8250_start_tx(struct uart_port *port)
serial8250_rpm_get_tx(up);
- if (em485 &&
- em485->active_timer == &em485->start_tx_timer)
- return;
-
- if (em485)
- start_tx_rs485(port);
- else
- __start_tx(port);
+ if (em485) {
+ if ((em485->active_timer == &em485->start_tx_timer) ||
+ !start_tx_rs485(port))
+ return;
+ }
+ __start_tx(port);
}
static void serial8250_throttle(struct uart_port *port)
@@ -1729,7 +1714,7 @@ static void serial8250_enable_ms(struct uart_port *port)
serial8250_rpm_put(up);
}
-void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
+void serial8250_read_char(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
unsigned char ch;
@@ -1792,11 +1777,13 @@ void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
EXPORT_SYMBOL_GPL(serial8250_read_char);
/*
- * serial8250_rx_chars: processes according to the passed in LSR
- * value, and returns the remaining LSR bits not handled
- * by this Rx routine.
+ * serial8250_rx_chars - Read characters. The first LSR value must be passed in.
+ *
+ * Returns LSR bits. The caller should rely only on non-Rx related LSR bits
+ * (such as THRE) because the LSR value might come from an already consumed
+ * character.
*/
-unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
int max_count = 256;
@@ -1852,7 +1839,7 @@ void serial8250_tx_chars(struct uart_8250_port *up)
if (uart_circ_empty(xmit))
break;
if ((up->capabilities & UART_CAP_HFIFO) &&
- (serial_in(up, UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+ !uart_lsr_tx_empty(serial_in(up, UART_LSR)))
break;
/* The BCM2835 MINI UART THRE bit is really a not-full bit. */
if ((up->capabilities & UART_CAP_MINI) &&
@@ -1916,17 +1903,17 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
*/
int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
{
- unsigned char status;
struct uart_8250_port *up = up_to_u8250p(port);
bool skip_rx = false;
unsigned long flags;
+ u16 status;
if (iir & UART_IIR_NO_INT)
return 0;
spin_lock_irqsave(&port->lock, flags);
- status = serial_port_in(port, UART_LSR);
+ status = serial_lsr_in(up);
/*
* If port is stopped and there are no error conditions in the
@@ -1949,7 +1936,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
if ((status & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) {
if (!up->dma || up->dma->tx_err)
serial8250_tx_chars(up);
- else
+ else if (!up->dma->tx_running)
__stop_tx(up);
}
@@ -2002,18 +1989,17 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- unsigned int lsr;
+ u16 lsr;
serial8250_rpm_get(up);
spin_lock_irqsave(&port->lock, flags);
- lsr = serial_port_in(port, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ lsr = serial_lsr_in(up);
spin_unlock_irqrestore(&port->lock, flags);
serial8250_rpm_put(up);
- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+ return uart_lsr_tx_empty(lsr) ? TIOCSER_TEMT : 0;
}
unsigned int serial8250_do_get_mctrl(struct uart_port *port)
@@ -2084,9 +2070,7 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits)
/* Wait up to 10ms for the character(s) to be sent. */
for (;;) {
- status = serial_in(up, UART_LSR);
-
- up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+ status = serial_lsr_in(up);
if ((status & bits) == bits)
break;
@@ -2128,8 +2112,8 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
static int serial8250_get_poll_char(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
- unsigned char lsr;
int status;
+ u16 lsr;
serial8250_rpm_get(up);
@@ -2163,7 +2147,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
else
serial_port_out(port, UART_IER, 0);
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
/*
* Send the character out.
*/
@@ -2173,7 +2157,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
serial_port_out(port, UART_IER, ier);
serial8250_rpm_put(up);
}
@@ -2184,8 +2168,9 @@ int serial8250_do_startup(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- unsigned char lsr, iir;
+ unsigned char iir;
int retval;
+ u16 lsr;
if (!port->fifosize)
port->fifosize = uart_config[port->type].fifo_size;
@@ -2810,7 +2795,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask |= UART_LSR_BI;
/*
- * Characteres to ignore
+ * Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
@@ -2975,8 +2960,10 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
- if (!port->mapbase)
+ if (!port->mapbase) {
+ ret = -EINVAL;
break;
+ }
if (!request_mem_region(port->mapbase, size, "serial")) {
ret = -EBUSY;
@@ -3201,7 +3188,7 @@ static void serial8250_config_port(struct uart_port *port, int flags)
autoconfig(up);
if (port->rs485.flags & SER_RS485_ENABLED)
- port->rs485_config(port, &port->rs485);
+ uart_rs485_config(port);
/* if access method is AU, it is a 16550 with a quirk */
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
@@ -3443,7 +3430,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
if (em485) {
mdelay(port->rs485.delay_rts_after_send);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index fdb6c4188695..d0b49e15fbf5 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -254,6 +254,7 @@ config SERIAL_8250_ASPEED_VUART
depends on SERIAL_8250
depends on OF
depends on REGMAP && MFD_SYSCON
+ depends on ARCH_ASPEED || COMPILE_TEST
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a452748c69b2..877173907c53 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -324,6 +324,7 @@ config SERIAL_MAX310X
depends on SPI_MASTER
select SERIAL_CORE
select REGMAP_SPI if SPI_MASTER
+ select REGMAP_I2C if I2C
help
This selects support for an advanced UART from Maxim (Dallas).
Supported ICs are MAX3107, MAX3108, MAX3109, MAX14830.
@@ -889,23 +890,6 @@ config SERIAL_TXX9_STDSERIAL
bool "TX39XX/49XX SIO act as standard serial"
depends on !SERIAL_8250 && SERIAL_TXX9
-config SERIAL_VR41XX
- tristate "NEC VR4100 series Serial Interface Unit support"
- depends on CPU_VR41XX
- select SERIAL_CORE
- help
- If you have a NEC VR4100 series processor and you want to use
- Serial Interface Unit(SIU) or Debug Serial Interface Unit(DSIU)
- (not include VR4111/VR4121 DSIU), say Y. Otherwise, say N.
-
-config SERIAL_VR41XX_CONSOLE
- bool "Enable NEC VR4100 series Serial Interface Unit console"
- depends on SERIAL_VR41XX=y
- select SERIAL_CORE_CONSOLE
- help
- If you have a NEC VR4100 series processor and you want to use
- a console on a serial port, say Y. Otherwise, say N.
-
config SERIAL_JSM
tristate "Digi International NEO and Classic PCI Support"
depends on PCI
@@ -1099,8 +1083,8 @@ config SERIAL_TIMBERDALE
config SERIAL_BCM63XX
tristate "Broadcom BCM63xx/BCM33xx UART support"
select SERIAL_CORE
- depends on ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
- default ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
+ depends on ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ default ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC
help
This enables the driver for the onchip UART core found on
the following chipsets:
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 61cc8de95571..238a9557b487 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
-obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 97ef41cb2721..15f0e4d88c5a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1367,6 +1367,15 @@ static void pl011_stop_rx(struct uart_port *port)
pl011_dma_rx_stop(uap);
}
+static void pl011_throttle_rx(struct uart_port *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pl011_stop_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
static void pl011_enable_ms(struct uart_port *port)
{
struct uart_amba_port *uap =
@@ -1788,9 +1797,10 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
*/
static void pl011_enable_interrupts(struct uart_amba_port *uap)
{
+ unsigned long flags;
unsigned int i;
- spin_lock_irq(&uap->port.lock);
+ spin_lock_irqsave(&uap->port.lock, flags);
/* Clear out any spuriously appearing RX interrupts */
pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
@@ -1812,7 +1822,14 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irq(&uap->port.lock);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+}
+
+static void pl011_unthrottle_rx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
+
+ pl011_enable_interrupts(uap);
}
static int pl011_startup(struct uart_port *port)
@@ -2197,7 +2214,7 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
-static int pl011_rs485_config(struct uart_port *port,
+static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_amba_port *uap =
@@ -2225,6 +2242,8 @@ static const struct uart_ops amba_pl011_pops = {
.stop_tx = pl011_stop_tx,
.start_tx = pl011_start_tx,
.stop_rx = pl011_stop_rx,
+ .throttle = pl011_throttle_rx,
+ .unthrottle = pl011_unthrottle_rx,
.enable_ms = pl011_enable_ms,
.break_ctl = pl011_break_ctl,
.startup = pl011_startup,
@@ -2681,17 +2700,12 @@ static int pl011_find_free_port(void)
static int pl011_get_rs485_mode(struct uart_amba_port *uap)
{
struct uart_port *port = &uap->port;
- struct serial_rs485 *rs485 = &port->rs485;
int ret;
ret = uart_get_rs485_mode(port);
if (ret)
return ret;
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
return 0;
}
@@ -2751,6 +2765,13 @@ static int pl011_register_port(struct uart_amba_port *uap)
return ret;
}
+static const struct serial_rs485 pl011_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
@@ -2777,6 +2798,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.irq = dev->irq[0];
uap->port.ops = &amba_pl011_pops;
uap->port.rs485_config = pl011_rs485_config;
+ uap->port.rs485_supported = pl011_rs485_supported;
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 6269dbf93546..32caeac12985 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -580,18 +580,9 @@ static const struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
-static int ar933x_config_rs485(struct uart_port *port,
+static int ar933x_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
- struct ar933x_uart_port *up =
- container_of(port, struct ar933x_uart_port, port);
-
- if ((rs485conf->flags & SER_RS485_ENABLED) &&
- !up->rts_gpiod) {
- dev_err(port->dev, "RS485 needs rts-gpio\n");
- return 1;
- }
- port->rs485 = *rs485conf;
return 0;
}
@@ -702,6 +693,11 @@ static struct uart_driver ar933x_uart_driver = {
.cons = NULL, /* filled in runtime */
};
+static const struct serial_rs485 ar933x_no_rs485 = {};
+static const struct serial_rs485 ar933x_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+};
+
static int ar933x_uart_probe(struct platform_device *pdev)
{
struct ar933x_uart_port *up;
@@ -773,6 +769,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
port->fifosize = AR933X_UART_FIFO_SIZE;
port->ops = &ar933x_uart_ops;
port->rs485_config = ar933x_config_rs485;
+ port->rs485_supported = ar933x_rs485_supported;
baud = ar933x_uart_get_baud(port->uartclk, AR933X_UART_MAX_SCALE, 1);
up->min_baud = max_t(unsigned int, baud, AR933X_UART_MIN_BAUD);
@@ -792,10 +789,12 @@ static int ar933x_uart_probe(struct platform_device *pdev)
up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS);
- if ((port->rs485.flags & SER_RS485_ENABLED) &&
- !up->rts_gpiod) {
- dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
- port->rs485.flags &= ~SER_RS485_ENABLED;
+ if (!up->rts_gpiod) {
+ port->rs485_supported = ar933x_no_rs485;
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
}
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index dd1c7e4bd1c9..30ba9eef7b39 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,7 +166,6 @@ struct atmel_uart_port {
unsigned int fidi_min;
unsigned int fidi_max;
-#ifdef CONFIG_PM
struct {
u32 cr;
u32 mr;
@@ -177,7 +176,6 @@ struct atmel_uart_port {
u32 fmr;
u32 fimr;
} cache;
-#endif
int (*prepare_rx)(struct uart_port *port);
int (*prepare_tx)(struct uart_port *port);
@@ -285,7 +283,7 @@ static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
}
/* Enable or disable the rs485 support */
-static int atmel_config_rs485(struct uart_port *port,
+static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -2473,6 +2471,12 @@ static const struct uart_ops atmel_pops = {
#endif
};
+static const struct serial_rs485 atmel_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
/*
* Configure the port from the platform device resource info.
*/
@@ -2494,6 +2498,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
port->mapbase = mpdev->resource[0].start;
port->irq = platform_get_irq(mpdev, 0);
port->rs485_config = atmel_config_rs485;
+ port->rs485_supported = atmel_rs485_supported;
port->iso7816_config = atmel_config_iso7816;
port->membase = NULL;
@@ -2503,24 +2508,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
if (ret)
return ret;
- /* for console, the clock could already be configured */
- if (!atmel_port->clk) {
- atmel_port->clk = clk_get(&mpdev->dev, "usart");
- if (IS_ERR(atmel_port->clk)) {
- ret = PTR_ERR(atmel_port->clk);
- atmel_port->clk = NULL;
- return ret;
- }
- ret = clk_prepare_enable(atmel_port->clk);
- if (ret) {
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
- return ret;
- }
- port->uartclk = clk_get_rate(atmel_port->clk);
- clk_disable_unprepare(atmel_port->clk);
- /* only enable clock when USART is in use */
- }
+ port->uartclk = clk_get_rate(atmel_port->clk);
/*
* Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
@@ -2629,7 +2617,6 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
static int __init atmel_console_setup(struct console *co, char *options)
{
- int ret;
struct uart_port *port = &atmel_ports[co->index].uart;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int baud = 115200;
@@ -2642,10 +2629,6 @@ static int __init atmel_console_setup(struct console *co, char *options)
return -ENODEV;
}
- ret = clk_prepare_enable(atmel_ports[co->index].clk);
- if (ret)
- return ret;
-
atmel_uart_writel(port, ATMEL_US_IDR, -1);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
@@ -2711,7 +2694,6 @@ static struct uart_driver atmel_uart = {
.cons = ATMEL_CONSOLE_DEVICE,
};
-#ifdef CONFIG_PM
static bool atmel_serial_clk_will_stop(void)
{
#ifdef CONFIG_ARCH_AT91
@@ -2721,10 +2703,9 @@ static bool atmel_serial_clk_will_stop(void)
#endif
}
-static int atmel_serial_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused atmel_serial_suspend(struct device *dev)
{
- struct uart_port *port = platform_get_drvdata(pdev);
+ struct uart_port *port = dev_get_drvdata(dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (uart_console(port) && console_suspend_enabled) {
@@ -2749,14 +2730,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
}
/* we can not wake up if we're running on slow clock */
- atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
+ atmel_port->may_wakeup = device_may_wakeup(dev);
if (atmel_serial_clk_will_stop()) {
unsigned long flags;
spin_lock_irqsave(&atmel_port->lock_suspended, flags);
atmel_port->suspended = true;
spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
- device_set_wakeup_enable(&pdev->dev, 0);
+ device_set_wakeup_enable(dev, 0);
}
uart_suspend_port(&atmel_uart, port);
@@ -2764,9 +2745,9 @@ static int atmel_serial_suspend(struct platform_device *pdev,
return 0;
}
-static int atmel_serial_resume(struct platform_device *pdev)
+static int __maybe_unused atmel_serial_resume(struct device *dev)
{
- struct uart_port *port = platform_get_drvdata(pdev);
+ struct uart_port *port = dev_get_drvdata(dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned long flags;
@@ -2801,14 +2782,10 @@ static int atmel_serial_resume(struct platform_device *pdev)
spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
uart_resume_port(&atmel_uart, port);
- device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
+ device_set_wakeup_enable(dev, atmel_port->may_wakeup);
return 0;
}
-#else
-#define atmel_serial_suspend NULL
-#define atmel_serial_resume NULL
-#endif
static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
@@ -2897,14 +2874,23 @@ static int atmel_serial_probe(struct platform_device *pdev)
atomic_set(&atmel_port->tasklet_shutdown, 0);
spin_lock_init(&atmel_port->lock_suspended);
+ atmel_port->clk = devm_clk_get(&pdev->dev, "usart");
+ if (IS_ERR(atmel_port->clk)) {
+ ret = PTR_ERR(atmel_port->clk);
+ goto err;
+ }
+ ret = clk_prepare_enable(atmel_port->clk);
+ if (ret)
+ goto err;
+
ret = atmel_init_port(atmel_port, pdev);
if (ret)
- goto err_clear_bit;
+ goto err_clk_disable_unprepare;
atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
if (IS_ERR(atmel_port->gpios)) {
ret = PTR_ERR(atmel_port->gpios);
- goto err_clear_bit;
+ goto err_clk_disable_unprepare;
}
if (!atmel_use_pdc_rx(&atmel_port->uart)) {
@@ -2913,7 +2899,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
sizeof(struct atmel_uart_char),
GFP_KERNEL);
if (!data)
- goto err_alloc_ring;
+ goto err_clk_disable_unprepare;
atmel_port->rx_ring.buf = data;
}
@@ -2923,26 +2909,9 @@ static int atmel_serial_probe(struct platform_device *pdev)
if (ret)
goto err_add_port;
-#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
- if (uart_console(&atmel_port->uart)
- && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
- /*
- * The serial core enabled the clock for us, so undo
- * the clk_prepare_enable() in atmel_console_setup()
- */
- clk_disable_unprepare(atmel_port->clk);
- }
-#endif
-
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, atmel_port);
- /*
- * The peripheral clock has been disabled by atmel_init_port():
- * enable it before accessing I/O registers
- */
- clk_prepare_enable(atmel_port->clk);
-
if (rs485_enabled) {
atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
ATMEL_US_USMODE_NORMAL);
@@ -2966,12 +2935,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
err_add_port:
kfree(atmel_port->rx_ring.buf);
atmel_port->rx_ring.buf = NULL;
-err_alloc_ring:
- if (!uart_console(&atmel_port->uart)) {
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
- }
-err_clear_bit:
+err_clk_disable_unprepare:
+ clk_disable_unprepare(atmel_port->clk);
clear_bit(atmel_port->uart.line, atmel_ports_in_use);
err:
return ret;
@@ -3005,21 +2970,21 @@ static int atmel_serial_remove(struct platform_device *pdev)
clear_bit(port->line, atmel_ports_in_use);
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
pdev->dev.of_node = NULL;
return ret;
}
+static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
+ atmel_serial_resume);
+
static struct platform_driver atmel_serial_driver = {
.probe = atmel_serial_probe,
.remove = atmel_serial_remove,
- .suspend = atmel_serial_suspend,
- .resume = atmel_serial_resume,
.driver = {
.name = "atmel_usart_serial",
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
+ .pm = pm_ptr(&atmel_serial_pm_ops),
},
};
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 57c70851f22a..88d08ba1ca83 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -253,6 +253,9 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
bool big_endian;
u64 addr;
+ if (early_con.flags & CON_ENABLED)
+ return -EALREADY;
+
spin_lock_init(&port->lock);
port->iotype = UPIO_MEM;
addr = of_flat_dt_translate_address(node);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 0d6e62f6bb07..f6c33cd228c8 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -274,6 +274,8 @@ struct lpuart_port {
int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
+ bool is_cs7; /* Set to true when character size is 7 */
+ /* and the parity is enabled */
};
struct lpuart_soc_data {
@@ -990,12 +992,12 @@ static void lpuart32_rxint(struct lpuart_port *sport)
if (sr & (UARTSTAT_PE | UARTSTAT_OR | UARTSTAT_FE)) {
if (sr & UARTSTAT_PE) {
+ sport->port.icount.parity++;
+ } else if (sr & UARTSTAT_FE) {
if (is_break)
sport->port.icount.brk++;
else
- sport->port.icount.parity++;
- } else if (sr & UARTSTAT_FE) {
- sport->port.icount.frame++;
+ sport->port.icount.frame++;
}
if (sr & UARTSTAT_OR)
@@ -1010,18 +1012,21 @@ static void lpuart32_rxint(struct lpuart_port *sport)
sr &= sport->port.read_status_mask;
if (sr & UARTSTAT_PE) {
+ flg = TTY_PARITY;
+ } else if (sr & UARTSTAT_FE) {
if (is_break)
flg = TTY_BREAK;
else
- flg = TTY_PARITY;
- } else if (sr & UARTSTAT_FE) {
- flg = TTY_FRAME;
+ flg = TTY_FRAME;
}
if (sr & UARTSTAT_OR)
flg = TTY_OVERRUN;
}
+ if (sport->is_cs7)
+ rx &= 0x7F;
+
if (tty_insert_flip_char(port, rx, flg) == 0)
sport->port.icount.buf_overrun++;
}
@@ -1107,6 +1112,17 @@ static void lpuart_handle_sysrq(struct lpuart_port *sport)
}
}
+static int lpuart_tty_insert_flip_string(struct tty_port *port,
+ unsigned char *chars, size_t size, bool is_cs7)
+{
+ int i;
+
+ if (is_cs7)
+ for (i = 0; i < size; i++)
+ chars[i] &= 0x7F;
+ return tty_insert_flip_string(port, chars, size);
+}
+
static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
{
struct tty_port *port = &sport->port.state->port;
@@ -1217,7 +1233,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
if (ring->head < ring->tail) {
count = sport->rx_sgl.length - ring->tail;
- copied = tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail,
+ count, sport->is_cs7);
if (copied != count)
sport->port.icount.buf_overrun++;
ring->tail = 0;
@@ -1227,7 +1244,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
/* Finally we read data from tail to head */
if (ring->tail < ring->head) {
count = ring->head - ring->tail;
- copied = tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail,
+ count, sport->is_cs7);
if (copied != count)
sport->port.icount.buf_overrun++;
/* Wrap ring->head if needed */
@@ -1355,7 +1373,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
sport->dma_rx_cookie = -EINVAL;
}
-static int lpuart_config_rs485(struct uart_port *port,
+static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct lpuart_port *sport = container_of(port,
@@ -1365,11 +1383,6 @@ static int lpuart_config_rs485(struct uart_port *port,
~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
writeb(modem, sport->port.membase + UARTMODEM);
- /* clear unsupported configurations */
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
- rs485->flags &= ~SER_RS485_RX_DURING_TX;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
modem |= UARTMODEM_TXRTSE;
@@ -1390,7 +1403,7 @@ static int lpuart_config_rs485(struct uart_port *port,
return 0;
}
-static int lpuart32_config_rs485(struct uart_port *port,
+static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct lpuart_port *sport = container_of(port,
@@ -1400,11 +1413,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
& ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
lpuart32_write(&sport->port, modem, UARTMODIR);
- /* clear unsupported configurations */
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
- rs485->flags &= ~SER_RS485_RX_DURING_TX;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
modem |= UARTMODEM_TXRTSE;
@@ -2076,6 +2084,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
bd = lpuart32_read(&sport->port, UARTBAUD);
modem = lpuart32_read(&sport->port, UARTMODIR);
+ sport->is_cs7 = false;
/*
* only support CS8 and CS7, and for CS7 must enable PE.
* supported mode:
@@ -2194,6 +2203,9 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
lpuart32_write(&sport->port, ctrl, UARTCTRL);
/* restore control register */
+ if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
+ sport->is_cs7 = true;
+
if (old && sport->lpuart_dma_rx_use) {
if (!lpuart_start_rx_dma(sport))
rx_dma_timer_init(sport);
@@ -2621,6 +2633,11 @@ static struct uart_driver lpuart_reg = {
.cons = LPUART_CONSOLE,
};
+static const struct serial_rs485 lpuart_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ /* delay_rts_* and RX_DURING_TX are not supported */
+};
+
static int lpuart_probe(struct platform_device *pdev)
{
const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev);
@@ -2660,6 +2677,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485_config = lpuart32_config_rs485;
else
sport->port.rs485_config = lpuart_config_rs485;
+ sport->port.rs485_supported = lpuart_rs485_supported;
sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->ipg_clk)) {
@@ -2717,14 +2735,7 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_get_rs485;
- if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX)
- dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
-
- if (sport->port.rs485.delay_rts_before_send ||
- sport->port.rs485.delay_rts_after_send)
- dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
-
- sport->port.rs485_config(&sport->port, &sport->port.rs485);
+ uart_rs485_config(&sport->port);
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
DRIVER_NAME, sport);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 30edb35a6a15..522445a8f666 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1907,16 +1907,12 @@ static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c)
#endif
/* called with port.lock taken and irqs off or from .probe without locking */
-static int imx_uart_rs485_config(struct uart_port *port,
+static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr2;
- /* RTS is required to control the transmitter */
- if (!sport->have_rtscts && !sport->have_rtsgpio)
- rs485conf->flags &= ~SER_RS485_ENABLED;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
/* Enable receiver if low-active RTS signal is requested */
if (sport->have_rtscts && !sport->have_rtsgpio &&
@@ -2200,6 +2196,14 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
return HRTIMER_NORESTART;
}
+static const struct serial_rs485 imx_no_rs485 = {}; /* No RS485 if no RTS */
+static const struct serial_rs485 imx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
/* Default RX DMA buffer configuration */
#define RX_DMA_PERIODS 16
#define RX_DMA_PERIOD_LEN (PAGE_SIZE / 4)
@@ -2279,6 +2283,11 @@ static int imx_uart_probe(struct platform_device *pdev)
sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE);
sport->port.ops = &imx_uart_pops;
sport->port.rs485_config = imx_uart_rs485_config;
+ /* RTS is required to control the RS485 transmitter */
+ if (sport->have_rtscts || sport->have_rtsgpio)
+ sport->port.rs485_supported = imx_rs485_supported;
+ else
+ sport->port.rs485_supported = imx_no_rs485;
sport->port.flags = UPF_BOOT_AUTOCONF;
timer_setup(&sport->timer, imx_uart_timeout, 0);
@@ -2338,7 +2347,7 @@ static int imx_uart_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"low-active RTS not possible when receiver is off, enabling receiver\n");
- imx_uart_rs485_config(&sport->port, &sport->port.rs485);
+ uart_rs485_config(&sport->port);
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 79b7db8580e0..7aa37be3216a 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -342,7 +342,7 @@ static int param_set_kgdboc_var(const char *kmessage,
/*
* Configure with the new params as long as init already ran.
* Note that we can get called before init if someone loads us
- * with "modprobe kgdboc kgdboc=..." or if they happen to use the
+ * with "modprobe kgdboc kgdboc=..." or if they happen to use
* the odd syntax of "kgdboc.kgdboc=..." on the kernel command.
*/
if (configured >= 0)
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index a0b6ea52d133..ab10ca4a45b5 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
@@ -72,7 +73,8 @@
#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
/* Extended registers */
-#define MAX310X_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
@@ -245,7 +247,17 @@
#define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */
#define MAX14830_REV_ID (0xb0)
+struct max310x_if_cfg {
+ int (*extended_reg_enable)(struct device *dev, bool enable);
+
+ unsigned int rev_id_reg;
+};
+
struct max310x_devtype {
+ struct {
+ unsigned short min;
+ unsigned short max;
+ } slave_addr;
char name[9];
int nr;
u8 mode1;
@@ -258,9 +270,8 @@ struct max310x_one {
struct work_struct tx_work;
struct work_struct md_work;
struct work_struct rs_work;
+ struct regmap *regmap;
- u8 wr_header;
- u8 rd_header;
u8 rx_buf[MAX310X_FIFO_SIZE];
};
#define to_max310x_port(_port) \
@@ -268,6 +279,7 @@ struct max310x_one {
struct max310x_port {
const struct max310x_devtype *devtype;
+ const struct max310x_if_cfg *if_cfg;
struct regmap *regmap;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
@@ -289,26 +301,26 @@ static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
static u8 max310x_port_read(struct uart_port *port, u8 reg)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
unsigned int val = 0;
- regmap_read(s->regmap, port->iobase + reg, &val);
+ regmap_read(one->regmap, reg, &val);
return val;
}
static void max310x_port_write(struct uart_port *port, u8 reg, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_write(s->regmap, port->iobase + reg, val);
+ regmap_write(one->regmap, reg, val);
}
static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_update_bits(s->regmap, port->iobase + reg, mask, val);
+ regmap_update_bits(one->regmap, reg, mask, val);
}
static int max3107_detect(struct device *dev)
@@ -357,13 +369,12 @@ static int max3109_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -388,13 +399,12 @@ static int max14830_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -419,6 +429,10 @@ static const struct max310x_devtype max3107_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
.detect = max3107_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x2c,
+ .max = 0x2f,
+ },
};
static const struct max310x_devtype max3108_devtype = {
@@ -427,6 +441,10 @@ static const struct max310x_devtype max3108_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3108_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max3109_devtype = {
@@ -435,6 +453,10 @@ static const struct max310x_devtype max3109_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3109_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max14830_devtype = {
@@ -443,11 +465,15 @@ static const struct max310x_devtype max14830_devtype = {
.mode1 = MAX310X_MODE1_IRQSEL_BIT,
.detect = max14830_detect,
.power = max14830_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -464,7 +490,7 @@ static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
@@ -486,7 +512,7 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -624,31 +650,15 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->wr_header,
- .len = sizeof(one->wr_header),
- }, {
- .tx_buf = txbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_raw_write(one->regmap, MAX310X_THR_REG, txbuf, len);
}
static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->rd_header,
- .len = sizeof(one->rd_header),
- }, {
- .rx_buf = rxbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_raw_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
}
static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
@@ -1026,7 +1036,7 @@ static void max310x_rs_proc(struct work_struct *ws)
MAX310X_MODE2_ECHOSUPR_BIT, mode2);
}
-static int max310x_rs485_config(struct uart_port *port,
+static int max310x_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct max310x_one *one = to_max310x_port(port);
@@ -1035,8 +1045,6 @@ static int max310x_rs485_config(struct uart_port *port,
(rs485->delay_rts_after_send > 0x0f))
return -ERANGE;
- rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX |
- SER_RS485_ENABLED;
port->rs485 = *rs485;
schedule_work(&one->rs_work);
@@ -1249,16 +1257,24 @@ static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
#endif
+static const struct serial_rs485 max310x_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype,
- struct regmap *regmap, int irq)
+ const struct max310x_if_cfg *if_cfg,
+ struct regmap *regmaps[], int irq)
{
int i, ret, fmin, fmax, freq;
struct max310x_port *s;
u32 uartclk = 0;
bool xtal;
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ for (i = 0; i < devtype->nr; i++)
+ if (IS_ERR(regmaps[i]))
+ return PTR_ERR(regmaps[i]);
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
@@ -1305,8 +1321,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
}
- s->regmap = regmap;
+ s->regmap = regmaps[0];
s->devtype = devtype;
+ s->if_cfg = if_cfg;
dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
@@ -1315,22 +1332,18 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
for (i = 0; i < devtype->nr; i++) {
- unsigned int offs = i << 5;
-
/* Reset port */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs,
+ regmap_write(regmaps[i], MAX310X_MODE2_REG,
MAX310X_MODE2_RST_BIT);
/* Clear port reset */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs, 0);
+ regmap_write(regmaps[i], MAX310X_MODE2_REG, 0);
/* Wait for port startup */
do {
- regmap_read(s->regmap,
- MAX310X_BRGDIVLSB_REG + offs, &ret);
+ regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
} while (ret != 0x01);
- regmap_write(s->regmap, MAX310X_MODE1_REG + offs,
- devtype->mode1);
+ regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
@@ -1353,11 +1366,14 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
- s->p[i].port.iobase = i * 0x20;
+ s->p[i].port.iobase = i;
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.uartclk = uartclk;
s->p[i].port.rs485_config = max310x_rs485_config;
+ s->p[i].port.rs485_supported = max310x_rs485_supported;
s->p[i].port.ops = &max310x_ops;
+ s->p[i].regmap = regmaps[i];
+
/* Disable all interrupts */
max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
/* Clear IRQ status register */
@@ -1368,10 +1384,6 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
INIT_WORK(&s->p[i].md_work, max310x_md_proc);
/* Initialize queue for changing RS485 mode */
INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
- /* Initialize SPI-transfer buffers */
- s->p[i].wr_header = (s->p[i].port.iobase + MAX310X_THR_REG) |
- MAX310X_WRITE_BIT;
- s->p[i].rd_header = (s->p[i].port.iobase + MAX310X_RHR_REG);
/* Register port */
ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
@@ -1456,16 +1468,31 @@ static struct regmap_config regcfg = {
.val_bits = 8,
.write_flag_mask = MAX310X_WRITE_BIT,
.cache_type = REGCACHE_RBTREE,
+ .max_register = MAX310X_REG_1F,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
};
#ifdef CONFIG_SPI_MASTER
+static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
+{
+ struct max310x_port *s = dev_get_drvdata(dev);
+
+ return regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
+ enable ? MAX310X_EXTREG_ENBL : MAX310X_EXTREG_DSBL);
+}
+
+static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = {
+ .extended_reg_enable = max310x_spi_extended_reg_enable,
+ .rev_id_reg = MAX310X_SPI_REVID_EXTREG,
+};
+
static int max310x_spi_probe(struct spi_device *spi)
{
const struct max310x_devtype *devtype;
- struct regmap *regmap;
+ struct regmap *regmaps[4];
+ unsigned int i;
int ret;
/* Setup SPI bus */
@@ -1480,10 +1507,14 @@ static int max310x_spi_probe(struct spi_device *spi)
if (!devtype)
devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
- regcfg.max_register = devtype->nr * 0x20 - 1;
- regmap = devm_regmap_init_spi(spi, &regcfg);
+ for (i = 0; i < devtype->nr; i++) {
+ u8 port_mask = i * 0x20;
+ regcfg.read_flag_mask = port_mask;
+ regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT;
+ regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
+ }
- return max310x_probe(&spi->dev, devtype, regmap, spi->irq);
+ return max310x_probe(&spi->dev, devtype, &max310x_spi_if_cfg, regmaps, spi->irq);
}
static void max310x_spi_remove(struct spi_device *spi)
@@ -1512,6 +1543,97 @@ static struct spi_driver max310x_spi_driver = {
};
#endif
+#ifdef CONFIG_I2C
+static int max310x_i2c_extended_reg_enable(struct device *dev, bool enable)
+{
+ return 0;
+}
+
+static struct regmap_config regcfg_i2c = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = max310x_reg_writeable,
+ .volatile_reg = max310x_reg_volatile,
+ .precious_reg = max310x_reg_precious,
+ .max_register = MAX310X_I2C_REVID_EXTREG,
+};
+
+static const struct max310x_if_cfg max310x_i2c_if_cfg = {
+ .extended_reg_enable = max310x_i2c_extended_reg_enable,
+ .rev_id_reg = MAX310X_I2C_REVID_EXTREG,
+};
+
+static unsigned short max310x_i2c_slave_addr(unsigned short addr,
+ unsigned int nr)
+{
+ /*
+ * For MAX14830 and MAX3109, the slave address depends on what the
+ * A0 and A1 pins are tied to.
+ * See Table I2C Address Map of the datasheet.
+ * Based on that table, the following formulas were determined.
+ * UART1 - UART0 = 0x10
+ * UART2 - UART1 = 0x20 + 0x10
+ * UART3 - UART2 = 0x10
+ */
+
+ addr -= nr * 0x10;
+
+ if (nr >= 2)
+ addr -= 0x20;
+
+ return addr;
+}
+
+static int max310x_i2c_probe(struct i2c_client *client)
+{
+ const struct max310x_devtype *devtype =
+ device_get_match_data(&client->dev);
+ struct i2c_client *port_client;
+ struct regmap *regmaps[4];
+ unsigned int i;
+ u8 port_addr;
+
+ if (client->addr < devtype->slave_addr.min ||
+ client->addr > devtype->slave_addr.max)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "Slave addr 0x%x outside of range [0x%x, 0x%x]\n",
+ client->addr, devtype->slave_addr.min,
+ devtype->slave_addr.max);
+
+ regmaps[0] = devm_regmap_init_i2c(client, &regcfg_i2c);
+
+ for (i = 1; i < devtype->nr; i++) {
+ port_addr = max310x_i2c_slave_addr(client->addr, i);
+ port_client = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ port_addr);
+
+ regmaps[i] = devm_regmap_init_i2c(port_client, &regcfg_i2c);
+ }
+
+ return max310x_probe(&client->dev, devtype, &max310x_i2c_if_cfg,
+ regmaps, client->irq);
+}
+
+static int max310x_i2c_remove(struct i2c_client *client)
+{
+ max310x_remove(&client->dev);
+
+ return 0;
+}
+
+static struct i2c_driver max310x_i2c_driver = {
+ .driver = {
+ .name = MAX310X_NAME,
+ .of_match_table = max310x_dt_ids,
+ .pm = &max310x_pm_ops,
+ },
+ .probe_new = max310x_i2c_probe,
+ .remove = max310x_i2c_remove,
+};
+#endif
+
static int __init max310x_uart_init(void)
{
int ret;
@@ -1525,15 +1647,35 @@ static int __init max310x_uart_init(void)
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
if (ret)
- uart_unregister_driver(&max310x_uart);
+ goto err_spi_register;
+#endif
+
+#ifdef CONFIG_I2C
+ ret = i2c_add_driver(&max310x_i2c_driver);
+ if (ret)
+ goto err_i2c_register;
+#endif
+
+ return 0;
+
+#ifdef CONFIG_I2C
+err_i2c_register:
+ spi_unregister_driver(&max310x_spi_driver);
#endif
+err_spi_register:
+ uart_unregister_driver(&max310x_uart);
+
return ret;
}
module_init(max310x_uart_init);
static void __exit max310x_uart_exit(void)
{
+#ifdef CONFIG_I2C
+ i2c_del_driver(&max310x_i2c_driver);
+#endif
+
#ifdef CONFIG_SPI_MASTER
spi_unregister_driver(&max310x_spi_driver);
#endif
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 2aec62b5d6c4..f4aaaadd0742 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -431,7 +431,8 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
/****************************************************************************/
/* Enable or disable the RS485 support */
-static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+static int mcf_config_rs485(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
unsigned char mr1, mr2;
@@ -448,11 +449,14 @@ static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
}
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
- port->rs485 = *rs485;
return 0;
}
+static const struct serial_rs485 mcf_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+};
+
/****************************************************************************/
/*
@@ -502,6 +506,7 @@ int __init early_mcf_setup(struct mcf_platform_uart *platp)
port->uartclk = MCF_BUSCLK;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
+ port->rs485_supported = mcf_rs485_supported;
port->ops = &mcf_uart_ops;
}
@@ -629,6 +634,7 @@ static int mcf_probe(struct platform_device *pdev)
port->ops = &mcf_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
+ port->rs485_supported = mcf_rs485_supported;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MCF_CONSOLE);
uart_add_one_port(&mcf_driver, port);
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 4869c0059c98..6c8db19fd572 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -162,7 +162,7 @@ static void meson_uart_start_tx(struct uart_port *port)
ch = xmit->buf[xmit->tail];
writel(ch, port->membase + AML_UART_WFIFO);
- xmit->tail = (xmit->tail+1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index e50f069b5ebb..3f1986c89694 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1630,7 +1630,7 @@ mpc52xx_console_setup(struct console *co, char *options)
return ret;
}
- uartclk = mpc5xxx_get_bus_frequency(np);
+ uartclk = mpc5xxx_fwnode_get_bus_frequency(of_fwnode_handle(np));
if (uartclk == 0) {
pr_debug("Could not find uart clock frequency!\n");
return -EINVAL;
@@ -1747,7 +1747,7 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
/* set the uart clock to the input clock of the psc, the different
* prescalers are taken into account in the set_baudrate() methods
* of the respective chip */
- uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ uartclk = mpc5xxx_get_bus_frequency(&op->dev);
if (uartclk == 0) {
dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index e676ec761f18..3159889ddae1 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -29,103 +29,103 @@
#include <linux/of_device.h>
#include <linux/wait.h>
-#define UART_MR1 0x0000
-
-#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
-#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
-#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
-#define UART_MR1_RX_RDY_CTL BIT(7)
-#define UART_MR1_CTS_CTL BIT(6)
-
-#define UART_MR2 0x0004
-#define UART_MR2_ERROR_MODE BIT(6)
-#define UART_MR2_BITS_PER_CHAR 0x30
-#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
-#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
-#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
-#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
-#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
-#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
-#define UART_MR2_PARITY_MODE_NONE 0x0
-#define UART_MR2_PARITY_MODE_ODD 0x1
-#define UART_MR2_PARITY_MODE_EVEN 0x2
-#define UART_MR2_PARITY_MODE_SPACE 0x3
-#define UART_MR2_PARITY_MODE 0x3
-
-#define UART_CSR 0x0008
-
-#define UART_TF 0x000C
+#define MSM_UART_MR1 0x0000
+
+#define MSM_UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define MSM_UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define MSM_UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
+#define MSM_UART_MR1_RX_RDY_CTL BIT(7)
+#define MSM_UART_MR1_CTS_CTL BIT(6)
+
+#define MSM_UART_MR2 0x0004
+#define MSM_UART_MR2_ERROR_MODE BIT(6)
+#define MSM_UART_MR2_BITS_PER_CHAR 0x30
+#define MSM_UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define MSM_UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define MSM_UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define MSM_UART_MR2_PARITY_MODE_NONE 0x0
+#define MSM_UART_MR2_PARITY_MODE_ODD 0x1
+#define MSM_UART_MR2_PARITY_MODE_EVEN 0x2
+#define MSM_UART_MR2_PARITY_MODE_SPACE 0x3
+#define MSM_UART_MR2_PARITY_MODE 0x3
+
+#define MSM_UART_CSR 0x0008
+
+#define MSM_UART_TF 0x000C
#define UARTDM_TF 0x0070
-#define UART_CR 0x0010
-#define UART_CR_CMD_NULL (0 << 4)
-#define UART_CR_CMD_RESET_RX (1 << 4)
-#define UART_CR_CMD_RESET_TX (2 << 4)
-#define UART_CR_CMD_RESET_ERR (3 << 4)
-#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
-#define UART_CR_CMD_START_BREAK (5 << 4)
-#define UART_CR_CMD_STOP_BREAK (6 << 4)
-#define UART_CR_CMD_RESET_CTS (7 << 4)
-#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
-#define UART_CR_CMD_PACKET_MODE (9 << 4)
-#define UART_CR_CMD_MODE_RESET (12 << 4)
-#define UART_CR_CMD_SET_RFR (13 << 4)
-#define UART_CR_CMD_RESET_RFR (14 << 4)
-#define UART_CR_CMD_PROTECTION_EN (16 << 4)
-#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
-#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
-#define UART_CR_CMD_FORCE_STALE (4 << 8)
-#define UART_CR_CMD_RESET_TX_READY (3 << 8)
-#define UART_CR_TX_DISABLE BIT(3)
-#define UART_CR_TX_ENABLE BIT(2)
-#define UART_CR_RX_DISABLE BIT(1)
-#define UART_CR_RX_ENABLE BIT(0)
-#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
-
-#define UART_IMR 0x0014
-#define UART_IMR_TXLEV BIT(0)
-#define UART_IMR_RXSTALE BIT(3)
-#define UART_IMR_RXLEV BIT(4)
-#define UART_IMR_DELTA_CTS BIT(5)
-#define UART_IMR_CURRENT_CTS BIT(6)
-#define UART_IMR_RXBREAK_START BIT(10)
-
-#define UART_IPR_RXSTALE_LAST 0x20
-#define UART_IPR_STALE_LSB 0x1F
-#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
-#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
-
-#define UART_IPR 0x0018
-#define UART_TFWR 0x001C
-#define UART_RFWR 0x0020
-#define UART_HCR 0x0024
-
-#define UART_MREG 0x0028
-#define UART_NREG 0x002C
-#define UART_DREG 0x0030
-#define UART_MNDREG 0x0034
-#define UART_IRDA 0x0038
-#define UART_MISR_MODE 0x0040
-#define UART_MISR_RESET 0x0044
-#define UART_MISR_EXPORT 0x0048
-#define UART_MISR_VAL 0x004C
-#define UART_TEST_CTRL 0x0050
-
-#define UART_SR 0x0008
-#define UART_SR_HUNT_CHAR BIT(7)
-#define UART_SR_RX_BREAK BIT(6)
-#define UART_SR_PAR_FRAME_ERR BIT(5)
-#define UART_SR_OVERRUN BIT(4)
-#define UART_SR_TX_EMPTY BIT(3)
-#define UART_SR_TX_READY BIT(2)
-#define UART_SR_RX_FULL BIT(1)
-#define UART_SR_RX_READY BIT(0)
-
-#define UART_RF 0x000C
+#define MSM_UART_CR 0x0010
+#define MSM_UART_CR_CMD_NULL (0 << 4)
+#define MSM_UART_CR_CMD_RESET_RX (1 << 4)
+#define MSM_UART_CR_CMD_RESET_TX (2 << 4)
+#define MSM_UART_CR_CMD_RESET_ERR (3 << 4)
+#define MSM_UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define MSM_UART_CR_CMD_START_BREAK (5 << 4)
+#define MSM_UART_CR_CMD_STOP_BREAK (6 << 4)
+#define MSM_UART_CR_CMD_RESET_CTS (7 << 4)
+#define MSM_UART_CR_CMD_RESET_STALE_INT (8 << 4)
+#define MSM_UART_CR_CMD_PACKET_MODE (9 << 4)
+#define MSM_UART_CR_CMD_MODE_RESET (12 << 4)
+#define MSM_UART_CR_CMD_SET_RFR (13 << 4)
+#define MSM_UART_CR_CMD_RESET_RFR (14 << 4)
+#define MSM_UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define MSM_UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
+#define MSM_UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define MSM_UART_CR_CMD_FORCE_STALE (4 << 8)
+#define MSM_UART_CR_CMD_RESET_TX_READY (3 << 8)
+#define MSM_UART_CR_TX_DISABLE BIT(3)
+#define MSM_UART_CR_TX_ENABLE BIT(2)
+#define MSM_UART_CR_RX_DISABLE BIT(1)
+#define MSM_UART_CR_RX_ENABLE BIT(0)
+#define MSM_UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
+
+#define MSM_UART_IMR 0x0014
+#define MSM_UART_IMR_TXLEV BIT(0)
+#define MSM_UART_IMR_RXSTALE BIT(3)
+#define MSM_UART_IMR_RXLEV BIT(4)
+#define MSM_UART_IMR_DELTA_CTS BIT(5)
+#define MSM_UART_IMR_CURRENT_CTS BIT(6)
+#define MSM_UART_IMR_RXBREAK_START BIT(10)
+
+#define MSM_UART_IPR_RXSTALE_LAST 0x20
+#define MSM_UART_IPR_STALE_LSB 0x1F
+#define MSM_UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+#define MSM_UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
+
+#define MSM_UART_IPR 0x0018
+#define MSM_UART_TFWR 0x001C
+#define MSM_UART_RFWR 0x0020
+#define MSM_UART_HCR 0x0024
+
+#define MSM_UART_MREG 0x0028
+#define MSM_UART_NREG 0x002C
+#define MSM_UART_DREG 0x0030
+#define MSM_UART_MNDREG 0x0034
+#define MSM_UART_IRDA 0x0038
+#define MSM_UART_MISR_MODE 0x0040
+#define MSM_UART_MISR_RESET 0x0044
+#define MSM_UART_MISR_EXPORT 0x0048
+#define MSM_UART_MISR_VAL 0x004C
+#define MSM_UART_TEST_CTRL 0x0050
+
+#define MSM_UART_SR 0x0008
+#define MSM_UART_SR_HUNT_CHAR BIT(7)
+#define MSM_UART_SR_RX_BREAK BIT(6)
+#define MSM_UART_SR_PAR_FRAME_ERR BIT(5)
+#define MSM_UART_SR_OVERRUN BIT(4)
+#define MSM_UART_SR_TX_EMPTY BIT(3)
+#define MSM_UART_SR_TX_READY BIT(2)
+#define MSM_UART_SR_RX_FULL BIT(1)
+#define MSM_UART_SR_RX_READY BIT(0)
+
+#define MSM_UART_RF 0x000C
#define UARTDM_RF 0x0070
-#define UART_MISR 0x0010
-#define UART_ISR 0x0014
-#define UART_ISR_TX_READY BIT(7)
+#define MSM_UART_MISR 0x0010
+#define MSM_UART_ISR 0x0014
+#define MSM_UART_ISR_TX_READY BIT(7)
#define UARTDM_RXFS 0x50
#define UARTDM_RXFS_BUF_SHIFT 0x7
@@ -181,7 +181,10 @@ struct msm_port {
struct msm_dma rx_dma;
};
-#define UART_TO_MSM(uart_port) container_of(uart_port, struct msm_port, uart)
+static inline struct msm_port *to_msm_port(struct uart_port *up)
+{
+ return container_of(up, struct msm_port, uart);
+}
static
void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
@@ -200,10 +203,10 @@ unsigned int msm_read(struct uart_port *port, unsigned int off)
*/
static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
{
- msm_write(port, 0x06, UART_MREG);
- msm_write(port, 0xF1, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x1A, UART_MNDREG);
+ msm_write(port, 0x06, MSM_UART_MREG);
+ msm_write(port, 0xF1, MSM_UART_NREG);
+ msm_write(port, 0x0F, MSM_UART_DREG);
+ msm_write(port, 0x1A, MSM_UART_MNDREG);
port->uartclk = 1843200;
}
@@ -212,16 +215,16 @@ static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
*/
static void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
{
- msm_write(port, 0x18, UART_MREG);
- msm_write(port, 0xF6, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x0A, UART_MNDREG);
+ msm_write(port, 0x18, MSM_UART_MREG);
+ msm_write(port, 0xF6, MSM_UART_NREG);
+ msm_write(port, 0x0F, MSM_UART_DREG);
+ msm_write(port, 0x0A, MSM_UART_MNDREG);
port->uartclk = 1843200;
}
static void msm_serial_set_mnd_regs(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/*
* These registers don't exist so we change the clk input rate
@@ -392,35 +395,35 @@ static inline void msm_wait_for_xmitr(struct uart_port *port)
{
unsigned int timeout = 500000;
- while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
- if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY)) {
+ if (msm_read(port, MSM_UART_ISR) & MSM_UART_ISR_TX_READY)
break;
udelay(1);
if (!timeout--)
break;
}
- msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX_READY, MSM_UART_CR);
}
static void msm_stop_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- msm_port->imr &= ~UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_start_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->tx_dma;
/* Already started in DMA mode */
if (dma->count)
return;
- msm_port->imr |= UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_reset_dm_count(struct uart_port *port, int count)
@@ -456,8 +459,8 @@ static void msm_complete_tx_dma(void *args)
msm_write(port, val, UARTDM_DMEN);
if (msm_port->is_uartdm > UARTDM_1P3) {
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR);
}
count = dma->count - state.residue;
@@ -468,8 +471,8 @@ static void msm_complete_tx_dma(void *args)
xmit->tail &= UART_XMIT_SIZE - 1;
/* Restore "Tx FIFO below watermark" interrupt */
- msm_port->imr |= UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -516,8 +519,8 @@ static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
* Using DMA complete for Tx FIFO reload, no need for
* "Tx FIFO below watermark" one, disable it
*/
- msm_port->imr &= ~UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
dma->count = count;
@@ -559,10 +562,10 @@ static void msm_complete_rx_dma(void *args)
val &= ~dma->enable_bit;
msm_write(port, val, UARTDM_DMEN);
- if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
+ if (msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
count = msm_read(port, UARTDM_RX_TOTAL_SNAP);
@@ -584,7 +587,7 @@ static void msm_complete_rx_dma(void *args)
continue;
}
- if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
spin_unlock_irqrestore(&port->lock, flags);
@@ -638,23 +641,23 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
* Using DMA for FIFO off-load, no need for "Rx FIFO over
* watermark" or "stale" interrupts, disable them
*/
- msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE);
/*
* Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3),
* we need RXSTALE to flush input DMA fifo to memory
*/
if (msm_port->is_uartdm < UARTDM_1P4)
- msm_port->imr |= UART_IMR_RXSTALE;
+ msm_port->imr |= MSM_UART_IMR_RXSTALE;
- msm_write(uart, msm_port->imr, UART_IMR);
+ msm_write(uart, msm_port->imr, MSM_UART_IMR);
dma->count = UARTDM_RX_SIZE;
dma_async_issue_pending(dma->chan);
- msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
- msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
val = msm_read(uart, UARTDM_DMEN);
val |= dma->enable_bit;
@@ -676,25 +679,25 @@ sw_mode:
* Switch from DMA to SW/FIFO mode. After clearing Rx BAM (UARTDM_DMEN),
* receiver must be reset.
*/
- msm_write(uart, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(uart, UART_CR_RX_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(uart, MSM_UART_CR_RX_ENABLE, MSM_UART_CR);
- msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(uart, 0xFFFFFF, UARTDM_DMRX);
- msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
/* Re-enable RX interrupts */
- msm_port->imr |= (UART_IMR_RXLEV | UART_IMR_RXSTALE);
- msm_write(uart, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE;
+ msm_write(uart, msm_port->imr, MSM_UART_IMR);
}
static void msm_stop_rx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
- msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE);
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (dma->chan)
msm_stop_dma(port, dma);
@@ -702,10 +705,10 @@ static void msm_stop_rx(struct uart_port *port)
static void msm_enable_ms(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- msm_port->imr |= UART_IMR_DELTA_CTS;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_DELTA_CTS;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
@@ -714,20 +717,20 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
struct tty_port *tport = &port->state->port;
unsigned int sr;
int count = 0;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
- if (misr & UART_IMR_RXSTALE) {
+ if (misr & MSM_UART_IMR_RXSTALE) {
count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
msm_port->old_snap_state;
msm_port->old_snap_state = 0;
} else {
- count = 4 * (msm_read(port, UART_RFWR));
+ count = 4 * (msm_read(port, MSM_UART_RFWR));
msm_port->old_snap_state += count;
}
@@ -739,8 +742,8 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
unsigned char buf[4];
int sysrq, r_count, i;
- sr = msm_read(port, UART_SR);
- if ((sr & UART_SR_RX_READY) == 0) {
+ sr = msm_read(port, MSM_UART_SR);
+ if ((sr & MSM_UART_SR_RX_READY) == 0) {
msm_port->old_snap_state -= count;
break;
}
@@ -759,7 +762,7 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
continue;
}
- if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
spin_unlock(&port->lock);
@@ -773,10 +776,10 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
tty_flip_buffer_push(tport);
- if (misr & (UART_IMR_RXSTALE))
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ if (misr & (MSM_UART_IMR_RXSTALE))
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
/* Try to use DMA */
msm_start_rx_dma(msm_port);
@@ -792,25 +795,25 @@ static void msm_handle_rx(struct uart_port *port)
* Handle overrun. My understanding of the hardware is that overrun
* is not tied to the RX buffer, so we handle the case out of band.
*/
- if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
/* and now the main RX loop */
- while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
+ while ((sr = msm_read(port, MSM_UART_SR)) & MSM_UART_SR_RX_READY) {
unsigned int c;
char flag = TTY_NORMAL;
int sysrq;
- c = msm_read(port, UART_RF);
+ c = msm_read(port, MSM_UART_RF);
- if (sr & UART_SR_RX_BREAK) {
+ if (sr & MSM_UART_SR_RX_BREAK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
- } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ } else if (sr & MSM_UART_SR_PAR_FRAME_ERR) {
port->icount.frame++;
} else {
port->icount.rx++;
@@ -819,9 +822,9 @@ static void msm_handle_rx(struct uart_port *port)
/* Mask conditions we're ignorning. */
sr &= port->read_status_mask;
- if (sr & UART_SR_RX_BREAK)
+ if (sr & MSM_UART_SR_RX_BREAK)
flag = TTY_BREAK;
- else if (sr & UART_SR_PAR_FRAME_ERR)
+ else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
flag = TTY_FRAME;
spin_unlock(&port->lock);
@@ -837,7 +840,7 @@ static void msm_handle_rx(struct uart_port *port)
static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
{
struct circ_buf *xmit = &port->state->xmit;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int num_chars;
unsigned int tf_pointer = 0;
void __iomem *tf;
@@ -845,7 +848,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
if (msm_port->is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
if (tx_count && msm_port->is_uartdm)
msm_reset_dm_count(port, tx_count);
@@ -854,7 +857,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
int i;
char buf[4] = { 0 };
- if (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
break;
if (msm_port->is_uartdm)
@@ -883,7 +886,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
static void msm_handle_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct circ_buf *xmit = &msm_port->uart.state->xmit;
struct msm_dma *dma = &msm_port->tx_dma;
unsigned int pio_count, dma_count, dma_min;
@@ -895,7 +898,7 @@ static void msm_handle_tx(struct uart_port *port)
if (msm_port->is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
buf[0] = port->x_char;
@@ -939,7 +942,7 @@ static void msm_handle_tx(struct uart_port *port)
static void msm_handle_delta_cts(struct uart_port *port)
{
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR);
port->icount.cts++;
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
@@ -947,27 +950,27 @@ static void msm_handle_delta_cts(struct uart_port *port)
static irqreturn_t msm_uart_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int misr;
u32 val;
spin_lock_irqsave(&port->lock, flags);
- misr = msm_read(port, UART_MISR);
- msm_write(port, 0, UART_IMR); /* disable interrupt */
+ misr = msm_read(port, MSM_UART_MISR);
+ msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
- if (misr & UART_IMR_RXBREAK_START) {
+ if (misr & MSM_UART_IMR_RXBREAK_START) {
msm_port->break_detected = true;
- msm_write(port, UART_CR_CMD_RESET_RXBREAK_START, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RXBREAK_START, MSM_UART_CR);
}
- if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
+ if (misr & (MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE)) {
if (dma->count) {
- val = UART_CR_CMD_STALE_EVENT_DISABLE;
- msm_write(port, val, UART_CR);
- val = UART_CR_CMD_RESET_STALE_INT;
- msm_write(port, val, UART_CR);
+ val = MSM_UART_CR_CMD_STALE_EVENT_DISABLE;
+ msm_write(port, val, MSM_UART_CR);
+ val = MSM_UART_CR_CMD_RESET_STALE_INT;
+ msm_write(port, val, MSM_UART_CR);
/*
* Flush DMA input fifo to memory, this will also
* trigger DMA RX completion
@@ -979,12 +982,12 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_handle_rx(port);
}
}
- if (misr & UART_IMR_TXLEV)
+ if (misr & MSM_UART_IMR_TXLEV)
msm_handle_tx(port);
- if (misr & UART_IMR_DELTA_CTS)
+ if (misr & MSM_UART_IMR_DELTA_CTS)
msm_handle_delta_cts(port);
- msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+ msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
@@ -992,7 +995,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
static unsigned int msm_tx_empty(struct uart_port *port)
{
- return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+ return (msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
}
static unsigned int msm_get_mctrl(struct uart_port *port)
@@ -1002,19 +1005,19 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int mr;
/* reset everything */
- msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
- mr = msm_read(port, UART_MR1);
- mr &= ~UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_BREAK_INT, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR);
+ mr = msm_read(port, MSM_UART_MR1);
+ mr &= ~MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
/* Disable DM modes */
if (msm_port->is_uartdm)
@@ -1025,24 +1028,24 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int mr;
- mr = msm_read(port, UART_MR1);
+ mr = msm_read(port, MSM_UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
- mr &= ~UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
- msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ mr &= ~MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR);
} else {
- mr |= UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
+ mr |= MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
}
}
static void msm_break_ctl(struct uart_port *port, int break_ctl)
{
if (break_ctl)
- msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_START_BREAK, MSM_UART_CR);
else
- msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STOP_BREAK, MSM_UART_CR);
}
struct msm_baud_map {
@@ -1055,7 +1058,7 @@ static const struct msm_baud_map *
msm_find_best_baud(struct uart_port *port, unsigned int baud,
unsigned long *rate)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int divisor, result;
unsigned long target, old, best_rate = 0, diff, best_diff = ULONG_MAX;
const struct msm_baud_map *entry, *end, *best;
@@ -1124,7 +1127,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
unsigned long *saved_flags)
{
unsigned int rxstale, watermark, mask;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
const struct msm_baud_map *entry;
unsigned long flags, rate;
@@ -1139,45 +1142,45 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
*saved_flags = flags;
port->uartclk = rate;
- msm_write(port, entry->code, UART_CSR);
+ msm_write(port, entry->code, MSM_UART_CSR);
/* RX stale watermark */
rxstale = entry->rxstale;
- watermark = UART_IPR_STALE_LSB & rxstale;
+ watermark = MSM_UART_IPR_STALE_LSB & rxstale;
if (msm_port->is_uartdm) {
- mask = UART_DM_IPR_STALE_TIMEOUT_MSB;
+ mask = MSM_UART_DM_IPR_STALE_TIMEOUT_MSB;
} else {
- watermark |= UART_IPR_RXSTALE_LAST;
- mask = UART_IPR_STALE_TIMEOUT_MSB;
+ watermark |= MSM_UART_IPR_RXSTALE_LAST;
+ mask = MSM_UART_IPR_STALE_TIMEOUT_MSB;
}
watermark |= mask & (rxstale << 2);
- msm_write(port, watermark, UART_IPR);
+ msm_write(port, watermark, MSM_UART_IPR);
/* set RX watermark */
watermark = (port->fifosize * 3) / 4;
- msm_write(port, watermark, UART_RFWR);
+ msm_write(port, watermark, MSM_UART_RFWR);
/* set TX watermark */
- msm_write(port, 10, UART_TFWR);
+ msm_write(port, 10, MSM_UART_TFWR);
- msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_PROTECTION_EN, MSM_UART_CR);
msm_reset(port);
/* Enable RX and TX */
- msm_write(port, UART_CR_TX_ENABLE | UART_CR_RX_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_TX_ENABLE | MSM_UART_CR_RX_ENABLE, MSM_UART_CR);
/* turn on RX and CTS interrupts */
- msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
- UART_IMR_CURRENT_CTS | UART_IMR_RXBREAK_START;
+ msm_port->imr = MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE |
+ MSM_UART_IMR_CURRENT_CTS | MSM_UART_IMR_RXBREAK_START;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (msm_port->is_uartdm) {
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
}
return baud;
@@ -1185,7 +1188,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
static void msm_init_clock(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
clk_prepare_enable(msm_port->clk);
clk_prepare_enable(msm_port->pclk);
@@ -1194,7 +1197,7 @@ static void msm_init_clock(struct uart_port *port)
static int msm_startup(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int data, rfr_level, mask;
int ret;
@@ -1209,18 +1212,18 @@ static int msm_startup(struct uart_port *port)
rfr_level = port->fifosize;
/* set automatic RFR level */
- data = msm_read(port, UART_MR1);
+ data = msm_read(port, MSM_UART_MR1);
if (msm_port->is_uartdm)
- mask = UART_DM_MR1_AUTO_RFR_LEVEL1;
+ mask = MSM_UART_DM_MR1_AUTO_RFR_LEVEL1;
else
- mask = UART_MR1_AUTO_RFR_LEVEL1;
+ mask = MSM_UART_MR1_AUTO_RFR_LEVEL1;
data &= ~mask;
- data &= ~UART_MR1_AUTO_RFR_LEVEL0;
+ data &= ~MSM_UART_MR1_AUTO_RFR_LEVEL0;
data |= mask & (rfr_level << 2);
- data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
- msm_write(port, data, UART_MR1);
+ data |= MSM_UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
+ msm_write(port, data, MSM_UART_MR1);
if (msm_port->is_uartdm) {
msm_request_tx_dma(msm_port, msm_port->uart.mapbase);
@@ -1246,10 +1249,10 @@ err_irq:
static void msm_shutdown(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
msm_port->imr = 0;
- msm_write(port, 0, UART_IMR); /* disable interrupts */
+ msm_write(port, 0, MSM_UART_IMR); /* disable interrupts */
if (msm_port->is_uartdm)
msm_release_dma(msm_port);
@@ -1262,7 +1265,7 @@ static void msm_shutdown(struct uart_port *port)
static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int baud, mr;
@@ -1279,60 +1282,60 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
tty_termios_encode_baud_rate(termios, baud, baud);
/* calculate parity */
- mr = msm_read(port, UART_MR2);
- mr &= ~UART_MR2_PARITY_MODE;
+ mr = msm_read(port, MSM_UART_MR2);
+ mr &= ~MSM_UART_MR2_PARITY_MODE;
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
- mr |= UART_MR2_PARITY_MODE_ODD;
+ mr |= MSM_UART_MR2_PARITY_MODE_ODD;
else if (termios->c_cflag & CMSPAR)
- mr |= UART_MR2_PARITY_MODE_SPACE;
+ mr |= MSM_UART_MR2_PARITY_MODE_SPACE;
else
- mr |= UART_MR2_PARITY_MODE_EVEN;
+ mr |= MSM_UART_MR2_PARITY_MODE_EVEN;
}
/* calculate bits per char */
- mr &= ~UART_MR2_BITS_PER_CHAR;
+ mr &= ~MSM_UART_MR2_BITS_PER_CHAR;
switch (termios->c_cflag & CSIZE) {
case CS5:
- mr |= UART_MR2_BITS_PER_CHAR_5;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_5;
break;
case CS6:
- mr |= UART_MR2_BITS_PER_CHAR_6;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_6;
break;
case CS7:
- mr |= UART_MR2_BITS_PER_CHAR_7;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_7;
break;
case CS8:
default:
- mr |= UART_MR2_BITS_PER_CHAR_8;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_8;
break;
}
/* calculate stop bits */
- mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
+ mr &= ~(MSM_UART_MR2_STOP_BIT_LEN_ONE | MSM_UART_MR2_STOP_BIT_LEN_TWO);
if (termios->c_cflag & CSTOPB)
- mr |= UART_MR2_STOP_BIT_LEN_TWO;
+ mr |= MSM_UART_MR2_STOP_BIT_LEN_TWO;
else
- mr |= UART_MR2_STOP_BIT_LEN_ONE;
+ mr |= MSM_UART_MR2_STOP_BIT_LEN_ONE;
/* set parity, bits per char, and stop bit */
- msm_write(port, mr, UART_MR2);
+ msm_write(port, mr, MSM_UART_MR2);
/* calculate and set hardware flow control */
- mr = msm_read(port, UART_MR1);
- mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
+ mr = msm_read(port, MSM_UART_MR1);
+ mr &= ~(MSM_UART_MR1_CTS_CTL | MSM_UART_MR1_RX_RDY_CTL);
if (termios->c_cflag & CRTSCTS) {
- mr |= UART_MR1_CTS_CTL;
- mr |= UART_MR1_RX_RDY_CTL;
+ mr |= MSM_UART_MR1_CTS_CTL;
+ mr |= MSM_UART_MR1_RX_RDY_CTL;
}
- msm_write(port, mr, UART_MR1);
+ msm_write(port, mr, MSM_UART_MR1);
/* Configure status bits to ignore based on termio flags. */
port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
- port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+ port->read_status_mask |= MSM_UART_SR_PAR_FRAME_ERR;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= UART_SR_RX_BREAK;
+ port->read_status_mask |= MSM_UART_SR_RX_BREAK;
uart_update_timeout(port, termios->c_cflag, baud);
@@ -1416,7 +1419,7 @@ static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
static void msm_power(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
switch (state) {
case 0:
@@ -1435,10 +1438,10 @@ static void msm_power(struct uart_port *port, unsigned int state,
#ifdef CONFIG_CONSOLE_POLL
static int msm_poll_get_char_single(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : UART_RF;
+ struct msm_port *msm_port = to_msm_port(port);
+ unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : MSM_UART_RF;
- if (!(msm_read(port, UART_SR) & UART_SR_RX_READY))
+ if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY))
return NO_POLL_CHAR;
return msm_read(port, rf_reg) & 0xff;
@@ -1456,7 +1459,7 @@ static int msm_poll_get_char_dm(struct uart_port *port)
c = sp[sizeof(slop) - count];
count--;
/* Or if FIFO is empty */
- } else if (!(msm_read(port, UART_SR) & UART_SR_RX_READY)) {
+ } else if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY)) {
/*
* If RX packing buffer has less than a word, force stale to
* push contents into RX FIFO
@@ -1464,14 +1467,13 @@ static int msm_poll_get_char_dm(struct uart_port *port)
count = msm_read(port, UARTDM_RXFS);
count = (count >> UARTDM_RXFS_BUF_SHIFT) & UARTDM_RXFS_BUF_MASK;
if (count) {
- msm_write(port, UART_CR_CMD_FORCE_STALE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_FORCE_STALE, MSM_UART_CR);
slop = msm_read(port, UARTDM_RF);
c = sp[0];
count--;
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE,
- UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
} else {
c = NO_POLL_CHAR;
}
@@ -1489,11 +1491,11 @@ static int msm_poll_get_char(struct uart_port *port)
{
u32 imr;
int c;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/* Disable all interrupts */
- imr = msm_read(port, UART_IMR);
- msm_write(port, 0, UART_IMR);
+ imr = msm_read(port, MSM_UART_IMR);
+ msm_write(port, 0, MSM_UART_IMR);
if (msm_port->is_uartdm)
c = msm_poll_get_char_dm(port);
@@ -1501,7 +1503,7 @@ static int msm_poll_get_char(struct uart_port *port)
c = msm_poll_get_char_single(port);
/* Enable interrupts */
- msm_write(port, imr, UART_IMR);
+ msm_write(port, imr, MSM_UART_IMR);
return c;
}
@@ -1509,28 +1511,28 @@ static int msm_poll_get_char(struct uart_port *port)
static void msm_poll_put_char(struct uart_port *port, unsigned char c)
{
u32 imr;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/* Disable all interrupts */
- imr = msm_read(port, UART_IMR);
- msm_write(port, 0, UART_IMR);
+ imr = msm_read(port, MSM_UART_IMR);
+ msm_write(port, 0, MSM_UART_IMR);
if (msm_port->is_uartdm)
msm_reset_dm_count(port, 1);
/* Wait until FIFO is empty */
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
/* Write a character */
- msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : MSM_UART_TF);
/* Wait until FIFO is empty */
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
/* Enable interrupts */
- msm_write(port, imr, UART_IMR);
+ msm_write(port, imr, MSM_UART_IMR);
}
#endif
@@ -1588,7 +1590,7 @@ static struct msm_port msm_uart_ports[] = {
},
};
-#define UART_NR ARRAY_SIZE(msm_uart_ports)
+#define MSM_UART_NR ARRAY_SIZE(msm_uart_ports)
static inline struct uart_port *msm_get_port_from_line(unsigned int line)
{
@@ -1609,7 +1611,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
if (is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
/* Account for newlines that will get a carriage return added */
for (i = 0; i < count; i++)
@@ -1655,7 +1657,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
}
}
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
iowrite32_rep(tf, buf, 1);
@@ -1674,10 +1676,10 @@ static void msm_console_write(struct console *co, const char *s,
struct uart_port *port;
struct msm_port *msm_port;
- BUG_ON(co->index < 0 || co->index >= UART_NR);
+ BUG_ON(co->index < 0 || co->index >= MSM_UART_NR);
port = msm_get_port_from_line(co->index);
- msm_port = UART_TO_MSM(port);
+ msm_port = to_msm_port(port);
__msm_console_write(port, s, count, msm_port->is_uartdm);
}
@@ -1690,7 +1692,7 @@ static int msm_console_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
- if (unlikely(co->index >= UART_NR || co->index < 0))
+ if (unlikely(co->index >= MSM_UART_NR || co->index < 0))
return -ENXIO;
port = msm_get_port_from_line(co->index);
@@ -1771,7 +1773,7 @@ static struct uart_driver msm_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "msm_serial",
.dev_name = "ttyMSM",
- .nr = UART_NR,
+ .nr = MSM_UART_NR,
.cons = MSM_CONSOLE,
};
@@ -1801,14 +1803,14 @@ static int msm_serial_probe(struct platform_device *pdev)
if (line < 0)
line = atomic_inc_return(&msm_uart_next_id) - 1;
- if (unlikely(line < 0 || line >= UART_NR))
+ if (unlikely(line < 0 || line >= MSM_UART_NR))
return -ENXIO;
dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line);
port = msm_get_port_from_line(line);
port->dev = &pdev->dev;
- msm_port = UART_TO_MSM(port);
+ msm_port = to_msm_port(port);
id = of_match_device(msm_uartdm_table, &pdev->dev);
if (id)
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 643dfbcc43f9..0ba0f4d9459d 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -481,12 +481,6 @@ static int __init mux_probe(struct parisc_device *dev)
port->line = port_cnt;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MUX_CONSOLE);
- /* The port->timeout needs to match what is present in
- * uart_wait_until_sent in serial_core.c. Otherwise
- * the time spent in msleep_interruptable will be very
- * long, causing the appearance of a console hang.
- */
- port->timeout = HZ / 50;
spin_lock_init(&port->lock);
status = uart_add_one_port(&mux_driver, port);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 0429c2a54290..65eaecd10b7c 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -265,6 +265,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = 0;
+ int ret;
do {
if (status & STAT_RX_RDY(port)) {
@@ -277,6 +278,16 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
port->icount.parity++;
}
+ /*
+ * For UART2, error bits are not cleared on buffer read.
+ * This causes interrupt loop and system hang.
+ */
+ if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
+ ret = readl(port->membase + UART_STAT);
+ ret |= STAT_BRK_ERR;
+ writel(ret, port->membase + UART_STAT);
+ }
+
if (status & STAT_BRK_DET) {
port->icount.brk++;
status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
@@ -470,14 +481,14 @@ static void mvebu_uart_shutdown(struct uart_port *port)
}
}
-static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
+static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
{
unsigned int d_divisor, m_divisor;
unsigned long flags;
u32 brdv, osamp;
if (!port->uartclk)
- return -EOPNOTSUPP;
+ return 0;
/*
* The baudrate is derived from the UART clock thanks to divisors:
@@ -548,7 +559,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
(m_divisor << 16) | (m_divisor << 24);
writel(osamp, port->membase + UART_OSAMP);
- return 0;
+ return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
}
static void mvebu_uart_set_termios(struct uart_port *port,
@@ -587,15 +598,11 @@ static void mvebu_uart_set_termios(struct uart_port *port,
max_baud = port->uartclk / 80;
baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
- if (mvebu_uart_baud_rate_set(port, baud)) {
- /* No clock available, baudrate cannot be changed */
- if (old)
- baud = uart_get_baud_rate(port, old, NULL,
- min_baud, max_baud);
- } else {
- tty_termios_encode_baud_rate(termios, baud, baud);
- uart_update_timeout(port, termios->c_cflag, baud);
- }
+ baud = mvebu_uart_baud_rate_set(port, baud);
+
+ /* In case baudrate cannot be changed, report previous old value */
+ if (baud == 0 && old)
+ baud = tty_termios_baud_rate(old);
/* Only the following flag changes are supported */
if (old) {
@@ -606,6 +613,11 @@ static void mvebu_uart_set_termios(struct uart_port *port,
termios->c_cflag |= CS8;
}
+ if (baud != 0) {
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ }
+
spin_unlock_irqrestore(&port->lock, flags);
}
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 46f4d4cacb6e..0aa666e247d5 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -1102,8 +1103,6 @@ serial_omap_type(struct uart_port *port)
return up->name;
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
{
unsigned int status, tmout = 10000;
@@ -1118,7 +1117,7 @@ static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
@@ -1186,7 +1185,7 @@ static void omap_serial_early_putc(struct uart_port *port, unsigned char c)
for (;;) {
status = omap_serial_early_in(port, UART_LSR);
- if ((status & BOTH_EMPTY) == BOTH_EMPTY)
+ if (uart_lsr_tx_empty(status))
break;
cpu_relax();
}
@@ -1325,7 +1324,8 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
/* Enable or disable the rs485 support */
static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+serial_omap_config_rs485(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int mode;
@@ -1559,6 +1559,13 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
return 0;
}
+static const struct serial_rs485 serial_omap_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int serial_omap_probe(struct platform_device *pdev)
{
struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
@@ -1636,6 +1643,7 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.flags = omap_up_info->flags;
up->port.uartclk = omap_up_info->uartclk;
up->port.rs485_config = serial_omap_config_rs485;
+ up->port.rs485_supported = serial_omap_rs485_supported;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev,
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 44d20e5a7dd3..888e17e3f25f 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -201,7 +201,7 @@ static void owl_uart_send_chars(struct uart_port *port)
ch = xmit->buf[xmit->tail];
owl_uart_write(port, ch, OWL_UART_TXDAT);
- xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 3b26524d48e3..8a9065e4a903 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -3,6 +3,7 @@
*Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
#include <linux/kernel.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -189,8 +190,6 @@ enum {
#define PCH_UART_HAL_LOOP (PCH_UART_MCR_LOOP)
#define PCH_UART_HAL_AFE (PCH_UART_MCR_AFE)
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
#define DEFAULT_UARTCLK 1843200 /* 1.8432 MHz */
#define CMITC_UARTCLK 192000000 /* 192.0000 MHz */
#define FRI2_64_UARTCLK 64000000 /* 64.0000 MHz */
@@ -1516,7 +1515,7 @@ static void pch_uart_put_poll_char(struct uart_port *port,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(priv, BOTH_EMPTY);
+ wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -1602,7 +1601,7 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(priv, BOTH_EMPTY);
+ wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
if (port_locked)
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index b399aac530fe..f418f1de66b3 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -503,7 +503,7 @@ static int pic32_uart_startup(struct uart_port *port)
if (!sport->irq_fault_name) {
dev_err(port->dev, "%s: kasprintf err!", __func__);
ret = -ENOMEM;
- goto out_done;
+ goto out_disable_clk;
}
irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt,
@@ -579,6 +579,8 @@ out_r:
out_f:
free_irq(sport->irq_fault, port);
kfree(sport->irq_fault_name);
+out_disable_clk:
+ clk_disable_unprepare(sport->clk);
out_done:
return ret;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 3133446e806c..f63257b8e872 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -52,7 +52,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/dbdma.h>
#include <asm/macio.h>
#else
#include <linux/platform_device.h>
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index e80ba8e10407..9309ffd87c8e 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
@@ -575,8 +576,6 @@ static struct uart_driver serial_pxa_reg;
#ifdef CONFIG_SERIAL_PXA_CONSOLE
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
@@ -594,7 +593,7 @@ static void wait_for_xmitr(struct uart_pxa_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index f8f950641ad9..f4698a064a4d 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
@@ -940,52 +943,63 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
return 0;
}
-static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
- unsigned int sampling_rate, unsigned int *clk_div)
+static unsigned long find_clk_rate_in_tol(struct clk *clk, unsigned int desired_clk,
+ unsigned int *clk_div, unsigned int percent_tol)
{
- unsigned long ser_clk;
- unsigned long desired_clk;
- unsigned long freq, prev;
+ unsigned long freq;
unsigned long div, maxdiv;
- int64_t mult;
-
- desired_clk = baud * sampling_rate;
- if (!desired_clk) {
- pr_err("%s: Invalid frequency\n", __func__);
- return 0;
- }
+ u64 mult;
+ unsigned long offset, abs_tol, achieved;
+ abs_tol = div_u64((u64)desired_clk * percent_tol, 100);
maxdiv = CLK_DIV_MSK >> CLK_DIV_SHFT;
- prev = 0;
-
- for (div = 1; div <= maxdiv; div++) {
- mult = div * desired_clk;
- if (mult > ULONG_MAX)
+ div = 1;
+ while (div <= maxdiv) {
+ mult = (u64)div * desired_clk;
+ if (mult != (unsigned long)mult)
break;
- freq = clk_round_rate(clk, (unsigned long)mult);
- if (!(freq % desired_clk)) {
- ser_clk = freq;
- break;
- }
+ offset = div * abs_tol;
+ freq = clk_round_rate(clk, mult - offset);
- if (!prev)
- ser_clk = freq;
- else if (prev == freq)
+ /* Can only get lower if we're done */
+ if (freq < mult - offset)
break;
- prev = freq;
- }
+ /*
+ * Re-calculate div in case rounding skipped rates but we
+ * ended up at a good one, then check for a match.
+ */
+ div = DIV_ROUND_CLOSEST(freq, desired_clk);
+ achieved = DIV_ROUND_CLOSEST(freq, div);
+ if (achieved <= desired_clk + abs_tol &&
+ achieved >= desired_clk - abs_tol) {
+ *clk_div = div;
+ return freq;
+ }
- if (!ser_clk) {
- pr_err("%s: Can't find matching DFS entry for baud %d\n",
- __func__, baud);
- return ser_clk;
+ div = DIV_ROUND_UP(freq, desired_clk);
}
- *clk_div = ser_clk / desired_clk;
- if (!(*clk_div))
- *clk_div = 1;
+ return 0;
+}
+
+static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
+ unsigned int sampling_rate, unsigned int *clk_div)
+{
+ unsigned long ser_clk;
+ unsigned long desired_clk;
+
+ desired_clk = baud * sampling_rate;
+ if (!desired_clk)
+ return 0;
+
+ /*
+ * try to find a clock rate within 2% tolerance, then within 5%
+ */
+ ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 2);
+ if (!ser_clk)
+ ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 5);
return ser_clk;
}
@@ -1020,8 +1034,15 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
clk_rate = get_clk_div_rate(port->se.clk, baud,
sampling_rate, &clk_div);
- if (!clk_rate)
+ if (!clk_rate) {
+ dev_err(port->se.dev,
+ "Couldn't find suitable clock rate for %u\n",
+ baud * sampling_rate);
goto out_restart_rx;
+ }
+
+ dev_dbg(port->se.dev, "desired_rate-%u, clk_rate-%lu, clk_div-%u\n",
+ baud * sampling_rate, clk_rate, clk_div);
uport->uartclk = clk_rate;
dev_pm_opp_set_rate(uport->dev, clk_rate);
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index f556b4955f59..feb2054aba37 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -353,7 +353,7 @@ static void rda_uart_send_chars(struct uart_port *port)
ch = xmit->buf[xmit->tail];
rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER);
- xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index d5ca904def34..b7a4b47ce74e 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -48,6 +48,12 @@
#define S3C24XX_SERIAL_MAJOR 204
#define S3C24XX_SERIAL_MINOR 64
+#ifdef CONFIG_ARM64
+#define UART_NR 12
+#else
+#define UART_NR CONFIG_SERIAL_SAMSUNG_UARTS
+#endif
+
#define S3C24XX_TX_PIO 1
#define S3C24XX_TX_DMA 2
#define S3C24XX_RX_PIO 1
@@ -87,7 +93,7 @@ struct s3c24xx_uart_info {
struct s3c24xx_serial_drv_data {
const struct s3c24xx_uart_info info;
const struct s3c2410_uartcfg def_cfg;
- const unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+ const unsigned int fifosize[UART_NR];
};
struct s3c24xx_uart_dma {
@@ -377,8 +383,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
/* Enable tx dma mode */
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK);
- ucon |= (dma_get_cache_alignment() >= 16) ?
- S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1;
+ ucon |= S3C64XX_UCON_TXBURST_1;
ucon |= S3C64XX_UCON_TXMODE_DMA;
wr_regl(port, S3C2410_UCON, ucon);
@@ -674,7 +679,7 @@ static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
S3C64XX_UCON_DMASUS_EN |
S3C64XX_UCON_TIMEOUT_EN |
S3C64XX_UCON_RXMODE_MASK);
- ucon |= S3C64XX_UCON_RXBURST_16 |
+ ucon |= S3C64XX_UCON_RXBURST_1 |
0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
S3C64XX_UCON_EMPTYINT_EN |
S3C64XX_UCON_TIMEOUT_EN |
@@ -1012,6 +1017,7 @@ static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int umcon = rd_regl(port, S3C2410_UMCON);
+ unsigned int ucon = rd_regl(port, S3C2410_UCON);
if (mctrl & TIOCM_RTS)
umcon |= S3C2410_UMCOM_RTS_LOW;
@@ -1019,6 +1025,13 @@ static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
umcon &= ~S3C2410_UMCOM_RTS_LOW;
wr_regl(port, S3C2410_UMCON, umcon);
+
+ if (mctrl & TIOCM_LOOP)
+ ucon |= S3C2410_UCON_LOOPBACK;
+ else
+ ucon &= ~S3C2410_UCON_LOOPBACK;
+
+ wr_regl(port, S3C2410_UCON, ucon);
}
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
@@ -1802,67 +1815,27 @@ static const struct uart_ops apple_s5l_serial_ops = {
static struct uart_driver s3c24xx_uart_drv = {
.owner = THIS_MODULE,
.driver_name = "s3c2410_serial",
- .nr = CONFIG_SERIAL_SAMSUNG_UARTS,
+ .nr = UART_NR,
.cons = S3C24XX_SERIAL_CONSOLE,
.dev_name = S3C24XX_SERIAL_NAME,
.major = S3C24XX_SERIAL_MAJOR,
.minor = S3C24XX_SERIAL_MINOR,
};
-#define __PORT_LOCK_UNLOCKED(i) \
- __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[i].port.lock)
-static struct s3c24xx_uart_port
-s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
- [0] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(0),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 0,
- }
- },
- [1] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(1),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 1,
- }
- },
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 2
- [2] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(2),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 2,
- }
- },
-#endif
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
- [3] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(3),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 3,
- }
- }
-#endif
-};
-#undef __PORT_LOCK_UNLOCKED
+static struct s3c24xx_uart_port s3c24xx_serial_ports[UART_NR];
+
+static void s3c24xx_serial_init_port_default(int index) {
+ struct uart_port *port = &s3c24xx_serial_ports[index].port;
+
+ spin_lock_init(&port->lock);
+
+ port->iotype = UPIO_MEM;
+ port->uartclk = 0;
+ port->fifosize = 16;
+ port->ops = &s3c24xx_serial_ops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->line = index;
+}
/* s3c24xx_serial_resetport
*
@@ -2178,6 +2151,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
}
ourport = &s3c24xx_serial_ports[index];
+ s3c24xx_serial_init_port_default(index);
+
ourport->drv_data = s3c24xx_get_driver_data(pdev);
if (!ourport->drv_data) {
dev_err(&pdev->dev, "could not find driver data\n");
@@ -2576,7 +2551,7 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
/* is this a valid port */
- if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
+ if (co->index == -1 || co->index >= UART_NR)
co->index = 0;
port = &s3c24xx_serial_ports[co->index].port;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 8472bf70477c..259e08cc347c 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1127,7 +1127,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&port->lock, flags);
}
-static int sc16is7xx_config_rs485(struct uart_port *port,
+static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
@@ -1143,7 +1143,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
return -EINVAL;
}
- port->rs485 = *rs485;
one->config.flags |= SC16IS7XX_RECONF_RS485;
kthread_queue_work(&s->kworker, &one->reg_work);
@@ -1354,6 +1353,12 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
}
#endif
+static const struct serial_rs485 sc16is7xx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1, /* Not supported but keep returning -EINVAL */
+};
+
static int sc16is7xx_probe(struct device *dev,
const struct sc16is7xx_devtype *devtype,
struct regmap *regmap, int irq)
@@ -1456,6 +1461,7 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;
+ s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
s->p[i].port.ops = &sc16is7xx_ops;
s->p[i].old_mctrl = 0;
s->p[i].port.line = sc16is7xx_alloc_line();
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d942ab152f5a..ad4f3567ff90 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -441,7 +441,7 @@ static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
if (lsr & UART_LSR_OE) {
- /* Overrrun error */
+ /* Overrun error */
flag = TTY_OVERRUN;
tup->uport.icount.overrun++;
dev_dbg(tup->uport.dev, "Got overrun errors\n");
@@ -1080,7 +1080,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->rx_in_progress = 1;
/*
- * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RXS for the receive status interrupts like line errors.
* Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
*
* EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
@@ -1667,6 +1667,7 @@ static int __init tegra_uart_init(void)
node = of_find_matching_node(NULL, tegra_uart_of_match);
if (node)
match = of_match_node(tegra_uart_of_match, node);
+ of_node_put(node);
if (match)
cdata = match->data;
if (cdata)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 338ebadfd44b..12c87cd201a7 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -97,9 +97,16 @@ static inline struct uart_port *uart_port_check(struct uart_state *state)
return state->uart_port;
}
-/*
- * This routine is used by the interrupt handler to schedule processing in
- * the software interrupt portion of the driver.
+/**
+ * uart_write_wakeup - schedule write processing
+ * @port: port to be processed
+ *
+ * This routine is used by the interrupt handler to schedule processing in the
+ * software interrupt portion of the driver. A driver is expected to call this
+ * function when the number of characters in the transmit buffer have dropped
+ * below a threshold.
+ *
+ * Locking: @port->lock should be held
*/
void uart_write_wakeup(struct uart_port *port)
{
@@ -327,13 +334,16 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
}
/**
- * uart_update_timeout - update per-port FIFO timeout.
- * @port: uart_port structure describing the port
- * @cflag: termios cflag value
- * @baud: speed of the port
+ * uart_update_timeout - update per-port frame timing information
+ * @port: uart_port structure describing the port
+ * @cflag: termios cflag value
+ * @baud: speed of the port
*
- * Set the port FIFO timeout value. The @cflag value should
- * reflect the actual hardware settings.
+ * Set the @port frame timing information from which the FIFO timeout value is
+ * derived. The @cflag value should reflect the actual hardware settings as
+ * number of bits, parity, stop bits and baud rate is taken into account here.
+ *
+ * Locking: caller is expected to take @port->lock
*/
void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
@@ -343,35 +353,30 @@ uart_update_timeout(struct uart_port *port, unsigned int cflag,
u64 frame_time;
frame_time = (u64)size * NSEC_PER_SEC;
- size *= port->fifosize;
-
- /*
- * Figure the timeout to send the above number of bits.
- * Add .02 seconds of slop
- */
- port->timeout = (HZ * size) / baud + HZ/50;
port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
}
EXPORT_SYMBOL(uart_update_timeout);
/**
- * uart_get_baud_rate - return baud rate for a particular port
- * @port: uart_port structure describing the port in question.
- * @termios: desired termios settings.
- * @old: old termios (or NULL)
- * @min: minimum acceptable baud rate
- * @max: maximum acceptable baud rate
+ * uart_get_baud_rate - return baud rate for a particular port
+ * @port: uart_port structure describing the port in question.
+ * @termios: desired termios settings
+ * @old: old termios (or %NULL)
+ * @min: minimum acceptable baud rate
+ * @max: maximum acceptable baud rate
+ *
+ * Decode the termios structure into a numeric baud rate, taking account of the
+ * magic 38400 baud rate (with spd_* flags), and mapping the %B0 rate to 9600
+ * baud.
*
- * Decode the termios structure into a numeric baud rate,
- * taking account of the magic 38400 baud rate (with spd_*
- * flags), and mapping the %B0 rate to 9600 baud.
+ * If the new baud rate is invalid, try the @old termios setting. If it's still
+ * invalid, we try 9600 baud.
*
- * If the new baud rate is invalid, try the old termios setting.
- * If it's still invalid, we try 9600 baud.
+ * The @termios structure is updated to reflect the baud rate we're actually
+ * going to be using. Don't do this for the case where B0 is requested ("hang
+ * up").
*
- * Update the @termios structure to reflect the baud rate
- * we're actually going to be using. Don't do this for the case
- * where B0 is requested ("hang up").
+ * Locking: caller dependent
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
@@ -456,11 +461,17 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
EXPORT_SYMBOL(uart_get_baud_rate);
/**
- * uart_get_divisor - return uart clock divisor
- * @port: uart_port structure describing the port.
- * @baud: desired baud rate
+ * uart_get_divisor - return uart clock divisor
+ * @port: uart_port structure describing the port
+ * @baud: desired baud rate
+ *
+ * Calculate the divisor (baud_base / baud) for the specified @baud,
+ * appropriately rounded.
*
- * Calculate the uart clock divisor for the port.
+ * If 38400 baud and custom divisor is selected, return the custom divisor
+ * instead.
+ *
+ * Locking: caller dependent
*/
unsigned int
uart_get_divisor(struct uart_port *port, unsigned int baud)
@@ -1023,10 +1034,10 @@ static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss)
}
/**
- * uart_get_lsr_info - get line status register info
- * @tty: tty associated with the UART
- * @state: UART being queried
- * @value: returned modem value
+ * uart_get_lsr_info - get line status register info
+ * @tty: tty associated with the UART
+ * @state: UART being queried
+ * @value: returned modem value
*/
static int uart_get_lsr_info(struct tty_struct *tty,
struct uart_state *state, unsigned int __user *value)
@@ -1276,6 +1287,126 @@ static int uart_get_icount(struct tty_struct *tty,
return 0;
}
+#define SER_RS485_LEGACY_FLAGS (SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | \
+ SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX | \
+ SER_RS485_TERMINATE_BUS)
+
+static int uart_check_rs485_flags(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ u32 flags = rs485->flags;
+
+ /* Don't return -EINVAL for unsupported legacy flags */
+ flags &= ~SER_RS485_LEGACY_FLAGS;
+
+ /*
+ * For any bit outside of the legacy ones that is not supported by
+ * the driver, return -EINVAL.
+ */
+ if (flags & ~port->rs485_supported.flags)
+ return -EINVAL;
+
+ /* Asking for address w/o addressing mode? */
+ if (!(rs485->flags & SER_RS485_ADDRB) &&
+ (rs485->flags & (SER_RS485_ADDR_RECV|SER_RS485_ADDR_DEST)))
+ return -EINVAL;
+
+ /* Address given but not enabled? */
+ if (!(rs485->flags & SER_RS485_ADDR_RECV) && rs485->addr_recv)
+ return -EINVAL;
+ if (!(rs485->flags & SER_RS485_ADDR_DEST) && rs485->addr_dest)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void uart_sanitize_serial_rs485_delays(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ if (!port->rs485_supported.delay_rts_before_send) {
+ if (rs485->delay_rts_before_send) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending not supported\n",
+ port->name, port->line);
+ }
+ rs485->delay_rts_before_send = 0;
+ } else if (rs485->delay_rts_before_send > RS485_MAX_RTS_DELAY) {
+ rs485->delay_rts_before_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending clamped to %u ms\n",
+ port->name, port->line, rs485->delay_rts_before_send);
+ }
+
+ if (!port->rs485_supported.delay_rts_after_send) {
+ if (rs485->delay_rts_after_send) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending not supported\n",
+ port->name, port->line);
+ }
+ rs485->delay_rts_after_send = 0;
+ } else if (rs485->delay_rts_after_send > RS485_MAX_RTS_DELAY) {
+ rs485->delay_rts_after_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending clamped to %u ms\n",
+ port->name, port->line, rs485->delay_rts_after_send);
+ }
+}
+
+static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ u32 supported_flags = port->rs485_supported.flags;
+
+ if (!(rs485->flags & SER_RS485_ENABLED)) {
+ memset(rs485, 0, sizeof(*rs485));
+ return;
+ }
+
+ /* Pick sane settings if the user hasn't */
+ if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) &&
+ !(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+ port->name, port->line);
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+ supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND;
+ }
+
+ rs485->flags &= supported_flags;
+
+ uart_sanitize_serial_rs485_delays(port, rs485);
+
+ /* Return clean padding area to userspace */
+ memset(rs485->padding0, 0, sizeof(rs485->padding0));
+ memset(rs485->padding1, 0, sizeof(rs485->padding1));
+}
+
+static void uart_set_rs485_termination(struct uart_port *port,
+ const struct serial_rs485 *rs485)
+{
+ if (!(rs485->flags & SER_RS485_ENABLED))
+ return;
+
+ gpiod_set_value_cansleep(port->rs485_term_gpio,
+ !!(rs485->flags & SER_RS485_TERMINATE_BUS));
+}
+
+int uart_rs485_config(struct uart_port *port)
+{
+ struct serial_rs485 *rs485 = &port->rs485;
+ int ret;
+
+ uart_sanitize_serial_rs485(port, rs485);
+ uart_set_rs485_termination(port, rs485);
+
+ ret = port->rs485_config(port, NULL, rs485);
+ if (ret)
+ memset(rs485, 0, sizeof(*rs485));
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(uart_rs485_config);
+
static int uart_get_rs485_config(struct uart_port *port,
struct serial_rs485 __user *rs485)
{
@@ -1292,7 +1423,7 @@ static int uart_get_rs485_config(struct uart_port *port,
return 0;
}
-static int uart_set_rs485_config(struct uart_port *port,
+static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
struct serial_rs485 __user *rs485_user)
{
struct serial_rs485 rs485;
@@ -1305,34 +1436,14 @@ static int uart_set_rs485_config(struct uart_port *port,
if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
return -EFAULT;
- /* pick sane settings if the user hasn't */
- if (!(rs485.flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) {
- dev_warn_ratelimited(port->dev,
- "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
- port->name, port->line);
- rs485.flags |= SER_RS485_RTS_ON_SEND;
- rs485.flags &= ~SER_RS485_RTS_AFTER_SEND;
- }
-
- if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) {
- rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY;
- dev_warn_ratelimited(port->dev,
- "%s (%d): RTS delay before sending clamped to %u ms\n",
- port->name, port->line, rs485.delay_rts_before_send);
- }
-
- if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) {
- rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY;
- dev_warn_ratelimited(port->dev,
- "%s (%d): RTS delay after sending clamped to %u ms\n",
- port->name, port->line, rs485.delay_rts_after_send);
- }
- /* Return clean padding area to userspace */
- memset(rs485.padding, 0, sizeof(rs485.padding));
+ ret = uart_check_rs485_flags(port, &rs485);
+ if (ret)
+ return ret;
+ uart_sanitize_serial_rs485(port, &rs485);
+ uart_set_rs485_termination(port, &rs485);
spin_lock_irqsave(&port->lock, flags);
- ret = port->rs485_config(port, &rs485);
+ ret = port->rs485_config(port, &tty->termios, &rs485);
if (!ret)
port->rs485 = rs485;
spin_unlock_irqrestore(&port->lock, flags);
@@ -1441,6 +1552,10 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
if (ret != -ENOIOCTLCMD)
goto out;
+ /* rs485_config requires more locking than others */
+ if (cmd == TIOCGRS485)
+ down_write(&tty->termios_rwsem);
+
mutex_lock(&port->mutex);
uport = uart_port_check(state);
@@ -1464,7 +1579,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
break;
case TIOCSRS485:
- ret = uart_set_rs485_config(uport, uarg);
+ ret = uart_set_rs485_config(tty, uport, uarg);
break;
case TIOCSISO7816:
@@ -1481,6 +1596,8 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
}
out_up:
mutex_unlock(&port->mutex);
+ if (cmd == TIOCGRS485)
+ up_write(&tty->termios_rwsem);
out:
return ret;
}
@@ -1628,7 +1745,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
- unsigned long char_time, expire;
+ unsigned long char_time, expire, fifo_timeout;
port = uart_port_ref(state);
if (!port)
@@ -1658,12 +1775,13 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
- * takes longer than port->timeout, this is probably due to a
+ * takes longer than FIFO timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
- * 2*port->timeout.
+ * 2 * FIFO timeout.
*/
- if (timeout == 0 || timeout > 2 * port->timeout)
- timeout = 2 * port->timeout;
+ fifo_timeout = uart_fifo_timeout(port);
+ if (timeout == 0 || timeout > 2 * fifo_timeout)
+ timeout = 2 * fifo_timeout;
}
expire = jiffies + timeout;
@@ -1941,11 +2059,6 @@ static int uart_proc_show(struct seq_file *m, void *v)
}
#endif
-static inline bool uart_console_enabled(struct uart_port *port)
-{
- return uart_console(port) && (port->cons->flags & CON_ENABLED);
-}
-
static void uart_port_spin_lock_init(struct uart_port *port)
{
spin_lock_init(&port->lock);
@@ -1954,11 +2067,11 @@ static void uart_port_spin_lock_init(struct uart_port *port)
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/**
- * uart_console_write - write a console message to a serial port
- * @port: the port to write the message
- * @s: array of characters
- * @count: number of characters in string to write
- * @putchar: function to write character to port
+ * uart_console_write - write a console message to a serial port
+ * @port: the port to write the message
+ * @s: array of characters
+ * @count: number of characters in string to write
+ * @putchar: function to write character to port
*/
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
@@ -1974,10 +2087,15 @@ void uart_console_write(struct uart_port *port, const char *s,
}
EXPORT_SYMBOL_GPL(uart_console_write);
-/*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
+/**
+ * uart_get_console - get uart port for console
+ * @ports: ports to search in
+ * @nr: number of @ports
+ * @co: console to search for
+ * Returns: uart_port for the console @co
+ *
+ * Check whether an invalid uart number has been specified (as @co->index), and
+ * if so, search for the first available port that does have console support.
*/
struct uart_port * __init
uart_get_console(struct uart_port *ports, int nr, struct console *co)
@@ -1997,24 +2115,23 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
}
/**
- * uart_parse_earlycon - Parse earlycon options
- * @p: ptr to 2nd field (ie., just beyond '<name>,')
- * @iotype: ptr for decoded iotype (out)
- * @addr: ptr for decoded mapbase/iobase (out)
- * @options: ptr for <options> field; NULL if not present (out)
- *
- * Decodes earlycon kernel command line parameters of the form
- * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
- * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
+ * uart_parse_earlycon - Parse earlycon options
+ * @p: ptr to 2nd field (ie., just beyond '<name>,')
+ * @iotype: ptr for decoded iotype (out)
+ * @addr: ptr for decoded mapbase/iobase (out)
+ * @options: ptr for <options> field; %NULL if not present (out)
*
- * The optional form
+ * Decodes earlycon kernel command line parameters of the form:
+ * * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
+ * * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
*
- * earlycon=<name>,0x<addr>,<options>
- * console=<name>,0x<addr>,<options>
+ * The optional form:
+ * * earlycon=<name>,0x<addr>,<options>
+ * * console=<name>,0x<addr>,<options>
*
- * is also accepted; the returned @iotype will be UPIO_MEM.
+ * is also accepted; the returned @iotype will be %UPIO_MEM.
*
- * Returns 0 on success or -EINVAL on failure
+ * Returns: 0 on success or -%EINVAL on failure
*/
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options)
@@ -2059,16 +2176,16 @@ int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
EXPORT_SYMBOL_GPL(uart_parse_earlycon);
/**
- * uart_parse_options - Parse serial port baud/parity/bits/flow control.
- * @options: pointer to option string
- * @baud: pointer to an 'int' variable for the baud rate.
- * @parity: pointer to an 'int' variable for the parity.
- * @bits: pointer to an 'int' variable for the number of data bits.
- * @flow: pointer to an 'int' variable for the flow control character.
+ * uart_parse_options - Parse serial port baud/parity/bits/flow control.
+ * @options: pointer to option string
+ * @baud: pointer to an 'int' variable for the baud rate.
+ * @parity: pointer to an 'int' variable for the parity.
+ * @bits: pointer to an 'int' variable for the number of data bits.
+ * @flow: pointer to an 'int' variable for the flow control character.
*
- * uart_parse_options decodes a string containing the serial console
- * options. The format of the string is <baud><parity><bits><flow>,
- * eg: 115200n8r
+ * uart_parse_options() decodes a string containing the serial console
+ * options. The format of the string is <baud><parity><bits><flow>,
+ * eg: 115200n8r
*/
void
uart_parse_options(const char *options, int *baud, int *parity,
@@ -2089,13 +2206,13 @@ uart_parse_options(const char *options, int *baud, int *parity,
EXPORT_SYMBOL_GPL(uart_parse_options);
/**
- * uart_set_options - setup the serial console parameters
- * @port: pointer to the serial ports uart_port structure
- * @co: console pointer
- * @baud: baud rate
- * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
- * @bits: number of data bits
- * @flow: flow control character - 'r' (rts)
+ * uart_set_options - setup the serial console parameters
+ * @port: pointer to the serial ports uart_port structure
+ * @co: console pointer
+ * @baud: baud rate
+ * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
+ * @bits: number of data bits
+ * @flow: flow control character - 'r' (rts)
*/
int
uart_set_options(struct uart_port *port, struct console *co,
@@ -2582,17 +2699,19 @@ static const struct tty_port_operations uart_port_ops = {
};
/**
- * uart_register_driver - register a driver with the uart core layer
- * @drv: low level driver structure
+ * uart_register_driver - register a driver with the uart core layer
+ * @drv: low level driver structure
+ *
+ * Register a uart driver with the core driver. We in turn register with the
+ * tty layer, and initialise the core driver per-port state.
*
- * Register a uart driver with the core driver. We in turn register
- * with the tty layer, and initialise the core driver per-port state.
+ * We have a proc file in /proc/tty/driver which is named after the normal
+ * driver.
*
- * We have a proc file in /proc/tty/driver which is named after the
- * normal driver.
+ * @drv->port should be %NULL, and the per-port structures should be registered
+ * using uart_add_one_port() after this call has succeeded.
*
- * drv->port should be NULL, and the per-port structures should be
- * registered using uart_add_one_port after this call has succeeded.
+ * Locking: none, Interrupts: enabled
*/
int uart_register_driver(struct uart_driver *drv)
{
@@ -2656,13 +2775,14 @@ out:
EXPORT_SYMBOL(uart_register_driver);
/**
- * uart_unregister_driver - remove a driver from the uart core layer
- * @drv: low level driver structure
+ * uart_unregister_driver - remove a driver from the uart core layer
+ * @drv: low level driver structure
*
- * Remove all references to a driver from the core driver. The low
- * level driver must have removed all its ports via the
- * uart_remove_one_port() if it registered them with uart_add_one_port().
- * (ie, drv->port == NULL)
+ * Remove all references to a driver from the core driver. The low level
+ * driver must have removed all its ports via the uart_remove_one_port() if it
+ * registered them with uart_add_one_port(). (I.e. @drv->port is %NULL.)
+ *
+ * Locking: none, Interrupts: enabled
*/
void uart_unregister_driver(struct uart_driver *drv)
{
@@ -2911,16 +3031,15 @@ static const struct attribute_group tty_dev_attr_group = {
};
/**
- * uart_add_one_port - attach a driver-defined port structure
- * @drv: pointer to the uart low level driver structure for this port
- * @uport: uart port structure to use for this port.
+ * uart_add_one_port - attach a driver-defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure to use for this port.
*
- * Context: task context, might sleep
+ * Context: task context, might sleep
*
- * This allows the driver to register its own uart_port structure
- * with the core driver. The main purpose is to allow the low
- * level uart drivers to expand uart_port, rather than having yet
- * more levels of structures.
+ * This allows the driver @drv to register its own uart_port structure with the
+ * core driver. The main purpose is to allow the low level uart drivers to
+ * expand uart_port, rather than having yet more levels of structures.
*/
int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
{
@@ -3015,15 +3134,14 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
EXPORT_SYMBOL(uart_add_one_port);
/**
- * uart_remove_one_port - detach a driver defined port structure
- * @drv: pointer to the uart low level driver structure for this port
- * @uport: uart port structure for this port
+ * uart_remove_one_port - detach a driver defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure for this port
*
- * Context: task context, might sleep
+ * Context: task context, might sleep
*
- * This unhooks (and hangs up) the specified port structure from the
- * core driver. No further calls will be made to the low-level code
- * for this port.
+ * This unhooks (and hangs up) the specified port structure from the core
+ * driver. No further calls will be made to the low-level code for this port.
*/
int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
@@ -3095,8 +3213,13 @@ out:
}
EXPORT_SYMBOL(uart_remove_one_port);
-/*
- * Are the two ports equivalent?
+/**
+ * uart_match_port - are the two ports equivalent?
+ * @port1: first port
+ * @port2: second port
+ *
+ * This utility function can be used to determine whether two uart_port
+ * structures describe the same port.
*/
bool uart_match_port(const struct uart_port *port1,
const struct uart_port *port2)
@@ -3124,11 +3247,11 @@ bool uart_match_port(const struct uart_port *port1,
EXPORT_SYMBOL(uart_match_port);
/**
- * uart_handle_dcd_change - handle a change of carrier detect state
- * @uport: uart_port structure for the open port
- * @status: new carrier detect status, nonzero if active
+ * uart_handle_dcd_change - handle a change of carrier detect state
+ * @uport: uart_port structure for the open port
+ * @status: new carrier detect status, nonzero if active
*
- * Caller must hold uport->lock
+ * Caller must hold uport->lock.
*/
void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
{
@@ -3159,11 +3282,11 @@ void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
/**
- * uart_handle_cts_change - handle a change of clear-to-send state
- * @uport: uart_port structure for the open port
- * @status: new clear to send status, nonzero if active
+ * uart_handle_cts_change - handle a change of clear-to-send state
+ * @uport: uart_port structure for the open port
+ * @status: new clear to send status, nonzero if active
*
- * Caller must hold uport->lock
+ * Caller must hold uport->lock.
*/
void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
{
@@ -3234,15 +3357,15 @@ static void uart_sysrq_on(struct work_struct *w)
static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
/**
- * uart_try_toggle_sysrq - Enables SysRq from serial line
- * @port: uart_port structure where char(s) after BREAK met
- * @ch: new character in the sequence after received BREAK
+ * uart_try_toggle_sysrq - Enables SysRq from serial line
+ * @port: uart_port structure where char(s) after BREAK met
+ * @ch: new character in the sequence after received BREAK
*
- * Enables magic SysRq when the required sequence is met on port
- * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
+ * Enables magic SysRq when the required sequence is met on port
+ * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
*
- * Returns false if @ch is out of enabling sequence and should be
- * handled some other way, true if @ch was consumed.
+ * Returns: %false if @ch is out of enabling sequence and should be
+ * handled some other way, %true if @ch was consumed.
*/
bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
{
@@ -3294,6 +3417,8 @@ int uart_get_rs485_mode(struct uart_port *port)
rs485conf->delay_rts_after_send = 0;
}
+ uart_sanitize_serial_rs485_delays(port, rs485conf);
+
/*
* Clear full-duplex and enabled flags, set RTS polarity to active high
* to get to a defined state with the following properties:
@@ -3326,10 +3451,20 @@ int uart_get_rs485_mode(struct uart_port *port)
port->rs485_term_gpio = NULL;
return dev_err_probe(dev, ret, "Cannot get rs485-term-gpios\n");
}
+ if (port->rs485_term_gpio)
+ port->rs485_supported.flags |= SER_RS485_TERMINATE_BUS;
return 0;
}
EXPORT_SYMBOL_GPL(uart_get_rs485_mode);
+/* Compile-time assertions for serial_rs485 layout */
+static_assert(offsetof(struct serial_rs485, padding) ==
+ (offsetof(struct serial_rs485, delay_rts_after_send) + sizeof(__u32)));
+static_assert(offsetof(struct serial_rs485, padding1) ==
+ offsetof(struct serial_rs485, padding[1]));
+static_assert((offsetof(struct serial_rs485, padding[4]) + sizeof(__u32)) ==
+ sizeof(struct serial_rs485));
+
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 1663b3afc3a0..7d5aaa8d422b 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -42,6 +42,13 @@ static bool mctrl_gpio_flags_is_dir_out(unsigned int idx)
return mctrl_gpios_desc[idx].flags & GPIOD_FLAGS_BIT_DIR_OUT;
}
+/**
+ * mctrl_gpio_set - set gpios according to mctrl state
+ * @gpios: gpios to set
+ * @mctrl: state to set
+ *
+ * Set the gpios according to the mctrl state.
+ */
void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
{
enum mctrl_gpio_idx i;
@@ -63,6 +70,12 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_set);
+/**
+ * mctrl_gpio_to_gpiod - obtain gpio_desc of modem line index
+ * @gpios: gpios to look into
+ * @gidx: index of the modem line
+ * Returns: the gpio_desc structure associated to the modem line index
+ */
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
@@ -73,6 +86,14 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
+/**
+ * mctrl_gpio_get - update mctrl with the gpios values.
+ * @gpios: gpios to get the info from
+ * @mctrl: mctrl to set
+ * Returns: modified mctrl (the same value as in @mctrl)
+ *
+ * Update mctrl with the gpios values.
+ */
unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
{
enum mctrl_gpio_idx i;
@@ -189,6 +210,17 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
return IRQ_HANDLED;
}
+/**
+ * mctrl_gpio_init - initialize uart gpios
+ * @port: port to initialize gpios for
+ * @idx: index of the gpio in the @port's device
+ *
+ * This will get the {cts,rts,...}-gpios from device tree if they are present
+ * and request them, set direction etc, and return an allocated structure.
+ * `devm_*` functions are used, so there's no need to call mctrl_gpio_free().
+ * As this sets up the irq handling, make sure to not handle changes to the
+ * gpio input lines in your driver, too.
+ */
struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
{
struct mctrl_gpios *gpios;
@@ -235,6 +267,14 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_init);
+/**
+ * mctrl_gpio_free - explicitly free uart gpios
+ * @dev: uart port's device
+ * @gpios: gpios structure to be freed
+ *
+ * This will free the requested gpios in mctrl_gpio_init(). As `devm_*`
+ * functions are used, there's generally no need to call this function.
+ */
void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
@@ -253,6 +293,10 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_free);
+/**
+ * mctrl_gpio_enable_ms - enable irqs and handling of changes to the ms lines
+ * @gpios: gpios to enable
+ */
void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
@@ -278,6 +322,10 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms);
+/**
+ * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines
+ * @gpios: gpios to disable
+ */
void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index c0869b080cc3..5c3a07546a58 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -4,16 +4,6 @@
* Copyright (C) 2018 Paul Walmsley <paul@pwsan.com>
* Copyright (C) 2018-2019 SiFive
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Based partially on:
* - drivers/tty/serial/pxa.c
* - drivers/tty/serial/amba-pl011.c
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 1b0da603ab54..cce42f4c9bc2 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -17,7 +17,6 @@
#include <linux/tty_flip.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index b7b44f4050d4..2c85dbf165c4 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -35,6 +35,75 @@
#include "serial_mctrl_gpio.h"
#include "stm32-usart.h"
+
+/* Register offsets */
+static struct stm32_usart_info stm32f4_info = {
+ .ofs = {
+ .isr = 0x00,
+ .rdr = 0x04,
+ .tdr = 0x04,
+ .brr = 0x08,
+ .cr1 = 0x0c,
+ .cr2 = 0x10,
+ .cr3 = 0x14,
+ .gtpr = 0x18,
+ .rtor = UNDEF_REG,
+ .rqr = UNDEF_REG,
+ .icr = UNDEF_REG,
+ },
+ .cfg = {
+ .uart_enable_bit = 13,
+ .has_7bits_data = false,
+ .fifosize = 1,
+ }
+};
+
+static struct stm32_usart_info stm32f7_info = {
+ .ofs = {
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ },
+ .cfg = {
+ .uart_enable_bit = 0,
+ .has_7bits_data = true,
+ .has_swap = true,
+ .fifosize = 1,
+ }
+};
+
+static struct stm32_usart_info stm32h7_info = {
+ .ofs = {
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ },
+ .cfg = {
+ .uart_enable_bit = 0,
+ .has_7bits_data = true,
+ .has_swap = true,
+ .has_wakeup = true,
+ .has_fifo = true,
+ .fifosize = 16,
+ }
+};
+
static void stm32_usart_stop_tx(struct uart_port *port);
static void stm32_usart_transmit_chars(struct uart_port *port);
static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
@@ -72,6 +141,8 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
*cr3 |= USART_CR3_DEM;
over8 = *cr1 & USART_CR1_OVER8;
+ *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
+
if (over8)
rs485_deat_dedt = delay_ADE * baud * 8;
else
@@ -97,7 +168,7 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
*cr1 |= rs485_deat_dedt;
}
-static int stm32_usart_config_rs485(struct uart_port *port,
+static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -1375,6 +1446,13 @@ static void stm32_usart_deinit_port(struct stm32_port *stm32port)
clk_disable_unprepare(stm32port->clk);
}
+static const struct serial_rs485 stm32_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int stm32_usart_init_port(struct stm32_port *stm32port,
struct platform_device *pdev)
{
@@ -1394,6 +1472,7 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
port->irq = irq;
port->rs485_config = stm32_usart_config_rs485;
+ port->rs485_supported = stm32_rs485_supported;
ret = stm32_usart_init_rs485(port, pdev);
if (ret)
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index ee69c203b926..0ec41a732c88 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -38,74 +38,6 @@ struct stm32_usart_info {
#define UNDEF_REG 0xff
-/* Register offsets */
-struct stm32_usart_info stm32f4_info = {
- .ofs = {
- .isr = 0x00,
- .rdr = 0x04,
- .tdr = 0x04,
- .brr = 0x08,
- .cr1 = 0x0c,
- .cr2 = 0x10,
- .cr3 = 0x14,
- .gtpr = 0x18,
- .rtor = UNDEF_REG,
- .rqr = UNDEF_REG,
- .icr = UNDEF_REG,
- },
- .cfg = {
- .uart_enable_bit = 13,
- .has_7bits_data = false,
- .fifosize = 1,
- }
-};
-
-struct stm32_usart_info stm32f7_info = {
- .ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
- },
- .cfg = {
- .uart_enable_bit = 0,
- .has_7bits_data = true,
- .has_swap = true,
- .fifosize = 1,
- }
-};
-
-struct stm32_usart_info stm32h7_info = {
- .ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
- },
- .cfg = {
- .uart_enable_bit = 0,
- .has_7bits_data = true,
- .has_swap = true,
- .has_wakeup = true,
- .has_fifo = true,
- .fifosize = 16,
- }
-};
-
/* USART_SR (F4) / USART_ISR (F7) */
#define USART_SR_PE BIT(0)
#define USART_SR_FE BIT(1)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index fff50b5b82eb..84d545e5a8c7 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1249,8 +1249,6 @@ static int sunsu_kbd_ms_init(struct uart_sunsu_port *up)
#ifdef CONFIG_SERIAL_SUNSU_CONSOLE
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
@@ -1268,7 +1266,7 @@ static void wait_for_xmitr(struct uart_sunsu_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 6000853973c1..3cc9ef08455c 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1137,6 +1137,8 @@ static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l)
/* No compatible property, so try the name. */
soc_string = np->name;
+ of_node_put(np);
+
/* Extract the SOC number from the "PowerPC," string */
if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
return 0;
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
deleted file mode 100644
index e0bf003ca3a1..000000000000
--- a/drivers/tty/serial/vr41xx_siu.c
+++ /dev/null
@@ -1,934 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for NEC VR4100 series Serial Interface Unit.
- *
- * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * Based on drivers/serial/8250.c, by Russell King.
- */
-
-#include <linux/console.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/serial_reg.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-
-#include <linux/io.h>
-#include <asm/vr41xx/siu.h>
-#include <asm/vr41xx/vr41xx.h>
-
-#define SIU_BAUD_BASE 1152000
-#define SIU_MAJOR 204
-#define SIU_MINOR_BASE 82
-
-#define RX_MAX_COUNT 256
-#define TX_MAX_COUNT 15
-
-#define SIUIRSEL 0x08
- #define TMICMODE 0x20
- #define TMICTX 0x10
- #define IRMSEL 0x0c
- #define IRMSEL_HP 0x08
- #define IRMSEL_TEMIC 0x04
- #define IRMSEL_SHARP 0x00
- #define IRUSESEL 0x02
- #define SIRSEL 0x01
-
-static struct uart_port siu_uart_ports[SIU_PORTS_MAX] = {
- [0 ... SIU_PORTS_MAX-1] = {
- .lock = __SPIN_LOCK_UNLOCKED(siu_uart_ports->lock),
- .irq = 0,
- },
-};
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-static uint8_t lsr_break_flag[SIU_PORTS_MAX];
-#endif
-
-#define siu_read(port, offset) readb((port)->membase + (offset))
-#define siu_write(port, offset, value) writeb((value), (port)->membase + (offset))
-
-void vr41xx_select_siu_interface(siu_interface_t interface)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- if (interface == SIU_INTERFACE_IRDA)
- irsel |= SIRSEL;
- else
- irsel &= ~SIRSEL;
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_select_siu_interface);
-
-void vr41xx_use_irda(irda_use_t use)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- if (use == FIR_USE_IRDA)
- irsel |= IRUSESEL;
- else
- irsel &= ~IRUSESEL;
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_use_irda);
-
-void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- irsel &= ~(IRMSEL | TMICTX | TMICMODE);
- switch (module) {
- case SHARP_IRDA:
- irsel |= IRMSEL_SHARP;
- break;
- case TEMIC_IRDA:
- irsel |= IRMSEL_TEMIC | TMICMODE;
- if (speed == IRDA_TX_4MBPS)
- irsel |= TMICTX;
- break;
- case HP_IRDA:
- irsel |= IRMSEL_HP;
- break;
- default:
- break;
- }
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_select_irda_module);
-
-static inline void siu_clear_fifo(struct uart_port *port)
-{
- siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO);
- siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT);
- siu_write(port, UART_FCR, 0);
-}
-
-static inline unsigned long siu_port_size(struct uart_port *port)
-{
- switch (port->type) {
- case PORT_VR41XX_SIU:
- return 11UL;
- case PORT_VR41XX_DSIU:
- return 8UL;
- }
-
- return 0;
-}
-
-static inline unsigned int siu_check_type(struct uart_port *port)
-{
- if (port->line == 0)
- return PORT_VR41XX_SIU;
- if (port->line == 1 && port->irq)
- return PORT_VR41XX_DSIU;
-
- return PORT_UNKNOWN;
-}
-
-static inline const char *siu_type_name(struct uart_port *port)
-{
- switch (port->type) {
- case PORT_VR41XX_SIU:
- return "SIU";
- case PORT_VR41XX_DSIU:
- return "DSIU";
- }
-
- return NULL;
-}
-
-static unsigned int siu_tx_empty(struct uart_port *port)
-{
- uint8_t lsr;
-
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_TEMT)
- return TIOCSER_TEMT;
-
- return 0;
-}
-
-static void siu_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- uint8_t mcr = 0;
-
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
- siu_write(port, UART_MCR, mcr);
-}
-
-static unsigned int siu_get_mctrl(struct uart_port *port)
-{
- uint8_t msr;
- unsigned int mctrl = 0;
-
- msr = siu_read(port, UART_MSR);
- if (msr & UART_MSR_DCD)
- mctrl |= TIOCM_CAR;
- if (msr & UART_MSR_RI)
- mctrl |= TIOCM_RNG;
- if (msr & UART_MSR_DSR)
- mctrl |= TIOCM_DSR;
- if (msr & UART_MSR_CTS)
- mctrl |= TIOCM_CTS;
-
- return mctrl;
-}
-
-static void siu_stop_tx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_THRI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_start_tx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier |= UART_IER_THRI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_stop_rx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_RLSI;
- siu_write(port, UART_IER, ier);
-
- port->read_status_mask &= ~UART_LSR_DR;
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_enable_ms(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier |= UART_IER_MSI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_break_ctl(struct uart_port *port, int ctl)
-{
- unsigned long flags;
- uint8_t lcr;
-
- spin_lock_irqsave(&port->lock, flags);
-
- lcr = siu_read(port, UART_LCR);
- if (ctl == -1)
- lcr |= UART_LCR_SBC;
- else
- lcr &= ~UART_LCR_SBC;
- siu_write(port, UART_LCR, lcr);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static inline void receive_chars(struct uart_port *port, uint8_t *status)
-{
- uint8_t lsr, ch;
- char flag;
- int max_count = RX_MAX_COUNT;
-
- lsr = *status;
-
- do {
- ch = siu_read(port, UART_RX);
- port->icount.rx++;
- flag = TTY_NORMAL;
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
- lsr |= lsr_break_flag[port->line];
- lsr_break_flag[port->line] = 0;
-#endif
- if (unlikely(lsr & (UART_LSR_BI | UART_LSR_FE |
- UART_LSR_PE | UART_LSR_OE))) {
- if (lsr & UART_LSR_BI) {
- lsr &= ~(UART_LSR_FE | UART_LSR_PE);
- port->icount.brk++;
-
- if (uart_handle_break(port))
- goto ignore_char;
- }
-
- if (lsr & UART_LSR_FE)
- port->icount.frame++;
- if (lsr & UART_LSR_PE)
- port->icount.parity++;
- if (lsr & UART_LSR_OE)
- port->icount.overrun++;
-
- lsr &= port->read_status_mask;
- if (lsr & UART_LSR_BI)
- flag = TTY_BREAK;
- if (lsr & UART_LSR_FE)
- flag = TTY_FRAME;
- if (lsr & UART_LSR_PE)
- flag = TTY_PARITY;
- }
-
- if (uart_handle_sysrq_char(port, ch))
- goto ignore_char;
-
- uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
-
- ignore_char:
- lsr = siu_read(port, UART_LSR);
- } while ((lsr & UART_LSR_DR) && (max_count-- > 0));
-
- tty_flip_buffer_push(&port->state->port);
-
- *status = lsr;
-}
-
-static inline void check_modem_status(struct uart_port *port)
-{
- uint8_t msr;
-
- msr = siu_read(port, UART_MSR);
- if ((msr & UART_MSR_ANY_DELTA) == 0)
- return;
- if (msr & UART_MSR_DDCD)
- uart_handle_dcd_change(port, msr & UART_MSR_DCD);
- if (msr & UART_MSR_TERI)
- port->icount.rng++;
- if (msr & UART_MSR_DDSR)
- port->icount.dsr++;
- if (msr & UART_MSR_DCTS)
- uart_handle_cts_change(port, msr & UART_MSR_CTS);
-
- wake_up_interruptible(&port->state->port.delta_msr_wait);
-}
-
-static inline void transmit_chars(struct uart_port *port)
-{
- struct circ_buf *xmit;
- int max_count = TX_MAX_COUNT;
-
- xmit = &port->state->xmit;
-
- if (port->x_char) {
- siu_write(port, UART_TX, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- return;
- }
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- siu_stop_tx(port);
- return;
- }
-
- do {
- siu_write(port, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- } while (max_count-- > 0);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- if (uart_circ_empty(xmit))
- siu_stop_tx(port);
-}
-
-static irqreturn_t siu_interrupt(int irq, void *dev_id)
-{
- struct uart_port *port;
- uint8_t iir, lsr;
-
- port = (struct uart_port *)dev_id;
-
- iir = siu_read(port, UART_IIR);
- if (iir & UART_IIR_NO_INT)
- return IRQ_NONE;
-
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_DR)
- receive_chars(port, &lsr);
-
- check_modem_status(port);
-
- if (lsr & UART_LSR_THRE)
- transmit_chars(port);
-
- return IRQ_HANDLED;
-}
-
-static int siu_startup(struct uart_port *port)
-{
- int retval;
-
- if (port->membase == NULL)
- return -ENODEV;
-
- siu_clear_fifo(port);
-
- (void)siu_read(port, UART_LSR);
- (void)siu_read(port, UART_RX);
- (void)siu_read(port, UART_IIR);
- (void)siu_read(port, UART_MSR);
-
- if (siu_read(port, UART_LSR) == 0xff)
- return -ENODEV;
-
- retval = request_irq(port->irq, siu_interrupt, 0, siu_type_name(port), port);
- if (retval)
- return retval;
-
- if (port->type == PORT_VR41XX_DSIU)
- vr41xx_enable_dsiuint(DSIUINT_ALL);
-
- siu_write(port, UART_LCR, UART_LCR_WLEN8);
-
- spin_lock_irq(&port->lock);
- siu_set_mctrl(port, port->mctrl);
- spin_unlock_irq(&port->lock);
-
- siu_write(port, UART_IER, UART_IER_RLSI | UART_IER_RDI);
-
- (void)siu_read(port, UART_LSR);
- (void)siu_read(port, UART_RX);
- (void)siu_read(port, UART_IIR);
- (void)siu_read(port, UART_MSR);
-
- return 0;
-}
-
-static void siu_shutdown(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t lcr;
-
- siu_write(port, UART_IER, 0);
-
- spin_lock_irqsave(&port->lock, flags);
-
- port->mctrl &= ~TIOCM_OUT2;
- siu_set_mctrl(port, port->mctrl);
-
- spin_unlock_irqrestore(&port->lock, flags);
-
- lcr = siu_read(port, UART_LCR);
- lcr &= ~UART_LCR_SBC;
- siu_write(port, UART_LCR, lcr);
-
- siu_clear_fifo(port);
-
- (void)siu_read(port, UART_RX);
-
- if (port->type == PORT_VR41XX_DSIU)
- vr41xx_disable_dsiuint(DSIUINT_ALL);
-
- free_irq(port->irq, port);
-}
-
-static void siu_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
-{
- tcflag_t c_cflag, c_iflag;
- uint8_t lcr, fcr, ier;
- unsigned int baud, quot;
- unsigned long flags;
-
- c_cflag = new->c_cflag;
- lcr = UART_LCR_WLEN(tty_get_char_size(c_cflag));
-
- if (c_cflag & CSTOPB)
- lcr |= UART_LCR_STOP;
- if (c_cflag & PARENB)
- lcr |= UART_LCR_PARITY;
- if ((c_cflag & PARODD) != PARODD)
- lcr |= UART_LCR_EPAR;
- if (c_cflag & CMSPAR)
- lcr |= UART_LCR_SPAR;
-
- baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
-
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
-
- spin_lock_irqsave(&port->lock, flags);
-
- uart_update_timeout(port, c_cflag, baud);
-
- c_iflag = new->c_iflag;
-
- port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
- if (c_iflag & INPCK)
- port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= UART_LSR_BI;
-
- port->ignore_status_mask = 0;
- if (c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (c_iflag & IGNBRK) {
- port->ignore_status_mask |= UART_LSR_BI;
- if (c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_OE;
- }
-
- if ((c_cflag & CREAD) == 0)
- port->ignore_status_mask |= UART_LSR_DR;
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_MSI;
- if (UART_ENABLE_MS(port, c_cflag))
- ier |= UART_IER_MSI;
- siu_write(port, UART_IER, ier);
-
- siu_write(port, UART_LCR, lcr | UART_LCR_DLAB);
-
- siu_write(port, UART_DLL, (uint8_t)quot);
- siu_write(port, UART_DLM, (uint8_t)(quot >> 8));
-
- siu_write(port, UART_LCR, lcr);
-
- siu_write(port, UART_FCR, fcr);
-
- siu_set_mctrl(port, port->mctrl);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_pm(struct uart_port *port, unsigned int state, unsigned int oldstate)
-{
- switch (state) {
- case 0:
- switch (port->type) {
- case PORT_VR41XX_SIU:
- vr41xx_supply_clock(SIU_CLOCK);
- break;
- case PORT_VR41XX_DSIU:
- vr41xx_supply_clock(DSIU_CLOCK);
- break;
- }
- break;
- case 3:
- switch (port->type) {
- case PORT_VR41XX_SIU:
- vr41xx_mask_clock(SIU_CLOCK);
- break;
- case PORT_VR41XX_DSIU:
- vr41xx_mask_clock(DSIU_CLOCK);
- break;
- }
- break;
- }
-}
-
-static const char *siu_type(struct uart_port *port)
-{
- return siu_type_name(port);
-}
-
-static void siu_release_port(struct uart_port *port)
-{
- unsigned long size;
-
- if (port->flags & UPF_IOREMAP) {
- iounmap(port->membase);
- port->membase = NULL;
- }
-
- size = siu_port_size(port);
- release_mem_region(port->mapbase, size);
-}
-
-static int siu_request_port(struct uart_port *port)
-{
- unsigned long size;
- struct resource *res;
-
- size = siu_port_size(port);
- res = request_mem_region(port->mapbase, size, siu_type_name(port));
- if (res == NULL)
- return -EBUSY;
-
- if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap(port->mapbase, size);
- if (port->membase == NULL) {
- release_resource(res);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static void siu_config_port(struct uart_port *port, int flags)
-{
- if (flags & UART_CONFIG_TYPE) {
- port->type = siu_check_type(port);
- (void)siu_request_port(port);
- }
-}
-
-static int siu_verify_port(struct uart_port *port, struct serial_struct *serial)
-{
- if (port->type != PORT_VR41XX_SIU && port->type != PORT_VR41XX_DSIU)
- return -EINVAL;
- if (port->irq != serial->irq)
- return -EINVAL;
- if (port->iotype != serial->io_type)
- return -EINVAL;
- if (port->mapbase != (unsigned long)serial->iomem_base)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct uart_ops siu_uart_ops = {
- .tx_empty = siu_tx_empty,
- .set_mctrl = siu_set_mctrl,
- .get_mctrl = siu_get_mctrl,
- .stop_tx = siu_stop_tx,
- .start_tx = siu_start_tx,
- .stop_rx = siu_stop_rx,
- .enable_ms = siu_enable_ms,
- .break_ctl = siu_break_ctl,
- .startup = siu_startup,
- .shutdown = siu_shutdown,
- .set_termios = siu_set_termios,
- .pm = siu_pm,
- .type = siu_type,
- .release_port = siu_release_port,
- .request_port = siu_request_port,
- .config_port = siu_config_port,
- .verify_port = siu_verify_port,
-};
-
-static int siu_init_ports(struct platform_device *pdev)
-{
- struct uart_port *port;
- struct resource *res;
- int *type = dev_get_platdata(&pdev->dev);
- int i;
-
- if (!type)
- return 0;
-
- port = siu_uart_ports;
- for (i = 0; i < SIU_PORTS_MAX; i++) {
- port->type = type[i];
- if (port->type == PORT_UNKNOWN)
- continue;
- port->irq = platform_get_irq(pdev, i);
- port->uartclk = SIU_BAUD_BASE * 16;
- port->fifosize = 16;
- port->regshift = 0;
- port->iotype = UPIO_MEM;
- port->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
- port->line = i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- port->mapbase = res->start;
- port++;
- }
-
- return i;
-}
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-static void wait_for_xmitr(struct uart_port *port)
-{
- int timeout = 10000;
- uint8_t lsr, msr;
-
- do {
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_BI)
- lsr_break_flag[port->line] = UART_LSR_BI;
-
- if ((lsr & BOTH_EMPTY) == BOTH_EMPTY)
- break;
- } while (timeout-- > 0);
-
- if (port->flags & UPF_CONS_FLOW) {
- timeout = 1000000;
-
- do {
- msr = siu_read(port, UART_MSR);
- if ((msr & UART_MSR_CTS) != 0)
- break;
- } while (timeout-- > 0);
- }
-}
-
-static void siu_console_putchar(struct uart_port *port, unsigned char ch)
-{
- wait_for_xmitr(port);
- siu_write(port, UART_TX, ch);
-}
-
-static void siu_console_write(struct console *con, const char *s, unsigned count)
-{
- struct uart_port *port;
- uint8_t ier;
-
- port = &siu_uart_ports[con->index];
-
- ier = siu_read(port, UART_IER);
- siu_write(port, UART_IER, 0);
-
- uart_console_write(port, s, count, siu_console_putchar);
-
- wait_for_xmitr(port);
- siu_write(port, UART_IER, ier);
-}
-
-static int __init siu_console_setup(struct console *con, char *options)
-{
- struct uart_port *port;
- int baud = 9600;
- int parity = 'n';
- int bits = 8;
- int flow = 'n';
-
- if (con->index >= SIU_PORTS_MAX)
- con->index = 0;
-
- port = &siu_uart_ports[con->index];
- if (port->membase == NULL) {
- if (port->mapbase == 0)
- return -ENODEV;
- port->membase = ioremap(port->mapbase, siu_port_size(port));
- }
-
- if (port->type == PORT_VR41XX_SIU)
- vr41xx_select_siu_interface(SIU_INTERFACE_RS232C);
-
- if (options != NULL)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
-
- return uart_set_options(port, con, baud, parity, bits, flow);
-}
-
-static struct uart_driver siu_uart_driver;
-
-static struct console siu_console = {
- .name = "ttyVR",
- .write = siu_console_write,
- .device = uart_console_device,
- .setup = siu_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &siu_uart_driver,
-};
-
-static int siu_console_init(void)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < SIU_PORTS_MAX; i++) {
- port = &siu_uart_ports[i];
- port->ops = &siu_uart_ops;
- }
-
- register_console(&siu_console);
-
- return 0;
-}
-
-console_initcall(siu_console_init);
-
-void __init vr41xx_siu_early_setup(struct uart_port *port)
-{
- if (port->type == PORT_UNKNOWN)
- return;
-
- siu_uart_ports[port->line].line = port->line;
- siu_uart_ports[port->line].type = port->type;
- siu_uart_ports[port->line].uartclk = SIU_BAUD_BASE * 16;
- siu_uart_ports[port->line].mapbase = port->mapbase;
- siu_uart_ports[port->line].ops = &siu_uart_ops;
-}
-
-#define SERIAL_VR41XX_CONSOLE &siu_console
-#else
-#define SERIAL_VR41XX_CONSOLE NULL
-#endif
-
-static struct uart_driver siu_uart_driver = {
- .owner = THIS_MODULE,
- .driver_name = "SIU",
- .dev_name = "ttyVR",
- .major = SIU_MAJOR,
- .minor = SIU_MINOR_BASE,
- .cons = SERIAL_VR41XX_CONSOLE,
-};
-
-static int siu_probe(struct platform_device *dev)
-{
- struct uart_port *port;
- int num, i, retval;
-
- num = siu_init_ports(dev);
- if (num <= 0)
- return -ENODEV;
-
- siu_uart_driver.nr = num;
- retval = uart_register_driver(&siu_uart_driver);
- if (retval)
- return retval;
-
- for (i = 0; i < num; i++) {
- port = &siu_uart_ports[i];
- port->ops = &siu_uart_ops;
- port->dev = &dev->dev;
- port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_VR41XX_CONSOLE);
-
- retval = uart_add_one_port(&siu_uart_driver, port);
- if (retval < 0) {
- port->dev = NULL;
- break;
- }
- }
-
- if (i == 0 && retval < 0) {
- uart_unregister_driver(&siu_uart_driver);
- return retval;
- }
-
- return 0;
-}
-
-static int siu_remove(struct platform_device *dev)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if (port->dev == &dev->dev) {
- uart_remove_one_port(&siu_uart_driver, port);
- port->dev = NULL;
- }
- }
-
- uart_unregister_driver(&siu_uart_driver);
-
- return 0;
-}
-
-static int siu_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if ((port->type == PORT_VR41XX_SIU ||
- port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
- uart_suspend_port(&siu_uart_driver, port);
-
- }
-
- return 0;
-}
-
-static int siu_resume(struct platform_device *dev)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if ((port->type == PORT_VR41XX_SIU ||
- port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
- uart_resume_port(&siu_uart_driver, port);
- }
-
- return 0;
-}
-
-static struct platform_driver siu_device_driver = {
- .probe = siu_probe,
- .remove = siu_remove,
- .suspend = siu_suspend,
- .resume = siu_resume,
- .driver = {
- .name = "SIU",
- },
-};
-
-module_platform_driver(siu_device_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:SIU");
diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
index b710c5ef89ab..f310a8274df1 100644
--- a/drivers/tty/tty.h
+++ b/drivers/tty/tty.h
@@ -111,4 +111,7 @@ static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
+int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
+ const unsigned char *chars, size_t cnt);
+
#endif
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index bfa431a8e690..9fdecc795b6b 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/minmax.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
@@ -104,6 +105,7 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size)
p->size = size;
p->next = NULL;
p->commit = 0;
+ p->lookahead = 0;
p->read = 0;
p->flags = 0;
}
@@ -234,6 +236,7 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
buf->head = next;
}
buf->head->read = buf->head->commit;
+ buf->head->lookahead = buf->head->read;
if (ld && ld->ops->flush_buffer)
ld->ops->flush_buffer(tty);
@@ -276,13 +279,15 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
if (n != NULL) {
n->flags = flags;
buf->tail = n;
- /* paired w/ acquire in flush_to_ldisc(); ensures
- * flush_to_ldisc() sees buffer data.
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures they see all buffer data.
*/
smp_store_release(&b->commit, b->used);
- /* paired w/ acquire in flush_to_ldisc(); ensures the
- * latest commit value can be read before the head is
- * advanced to the next buffer
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures the latest commit value can be read before the head
+ * is advanced to the next buffer.
*/
smp_store_release(&b->next, n);
} else if (change)
@@ -459,6 +464,40 @@ int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
}
EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
+static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
+{
+ head->lookahead = max(head->lookahead, head->read);
+
+ while (head) {
+ struct tty_buffer *next;
+ unsigned char *p, *f = NULL;
+ unsigned int count;
+
+ /*
+ * Paired w/ release in __tty_buffer_request_room();
+ * ensures commit value read is not stale if the head
+ * is advancing to the next buffer.
+ */
+ next = smp_load_acquire(&head->next);
+ /*
+ * Paired w/ release in __tty_buffer_request_room() or in
+ * tty_buffer_flush(); ensures we see the committed buffer data.
+ */
+ count = smp_load_acquire(&head->commit) - head->lookahead;
+ if (!count) {
+ head = next;
+ continue;
+ }
+
+ p = char_buf_ptr(head, head->lookahead);
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->lookahead);
+
+ port->client_ops->lookahead_buf(port, p, f, count);
+ head->lookahead += count;
+ }
+}
+
static int
receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
{
@@ -496,7 +535,7 @@ static void flush_to_ldisc(struct work_struct *work)
while (1) {
struct tty_buffer *head = buf->head;
struct tty_buffer *next;
- int count;
+ int count, rcvd;
/* Ldisc or user is trying to gain exclusive access */
if (atomic_read(&buf->priority))
@@ -519,10 +558,12 @@ static void flush_to_ldisc(struct work_struct *work)
continue;
}
- count = receive_buf(port, head, count);
- if (!count)
+ rcvd = receive_buf(port, head, count);
+ head->read += rcvd;
+ if (rcvd < count)
+ lookahead_bufs(port, head);
+ if (!rcvd)
break;
- head->read += count;
if (need_resched())
cond_resched();
@@ -532,6 +573,15 @@ static void flush_to_ldisc(struct work_struct *work)
}
+static inline void tty_flip_buffer_commit(struct tty_buffer *tail)
+{
+ /*
+ * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
+ * buffer data.
+ */
+ smp_store_release(&tail->commit, tail->used);
+}
+
/**
* tty_flip_buffer_push - push terminal buffers
* @port: tty port to push
@@ -546,16 +596,43 @@ void tty_flip_buffer_push(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- /*
- * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
- * buffer data.
- */
- smp_store_release(&buf->tail->commit, buf->tail->used);
+ tty_flip_buffer_commit(buf->tail);
queue_work(system_unbound_wq, &buf->work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
/**
+ * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and
+ * push
+ * @port: tty port
+ * @chars: characters
+ * @size: size
+ *
+ * The function combines tty_insert_flip_string() and tty_flip_buffer_push()
+ * with the exception of properly holding the @port->lock.
+ *
+ * To be used only internally (by pty currently).
+ *
+ * Returns: the number added.
+ */
+int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
+ const unsigned char *chars, size_t size)
+{
+ struct tty_bufhead *buf = &port->buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ size = tty_insert_flip_string(port, chars, size);
+ if (size)
+ tty_flip_buffer_commit(buf->tail);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ queue_work(system_unbound_wq, &buf->work);
+
+ return size;
+}
+
+/**
* tty_buffer_init - prepare a tty buffer structure
* @port: tty port to initialise
*
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8fec1d8648f5..82a8855981f7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1663,7 +1663,7 @@ void tty_kclose(struct tty_struct *tty)
*/
tty_ldisc_release(tty);
- /* Wait for pending work before tty destruction commmences */
+ /* Wait for pending work before tty destruction commences */
tty_flush_works(tty);
tty_debug_hangup(tty, "freeing structure\n");
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index adae687f654b..2a76b330e108 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -319,6 +319,8 @@ unsigned char tty_get_frame_size(unsigned int cflag)
bits++;
if (cflag & PARENB)
bits++;
+ if (cflag & ADDRB)
+ bits++;
return bits;
}
@@ -353,6 +355,8 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
old_termios = tty->termios;
tty->termios = *new_termios;
unset_locked_termios(tty, &old_termios);
+ /* Reset any ADDRB changes, ADDRB is changed through ->rs485_config() */
+ tty->termios.c_cflag ^= (tty->termios.c_cflag ^ old_termios.c_cflag) & ADDRB;
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old_termios);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 880608a65773..dce08a6d7b5e 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -43,6 +43,26 @@ static int tty_port_default_receive_buf(struct tty_port *port,
return ret;
}
+static void tty_port_default_lookahead_buf(struct tty_port *port, const unsigned char *p,
+ const unsigned char *f, unsigned int count)
+{
+ struct tty_struct *tty;
+ struct tty_ldisc *disc;
+
+ tty = READ_ONCE(port->itty);
+ if (!tty)
+ return;
+
+ disc = tty_ldisc_ref(tty);
+ if (!disc)
+ return;
+
+ if (disc->ops->lookahead_buf)
+ disc->ops->lookahead_buf(disc->tty, p, f, count);
+
+ tty_ldisc_deref(disc);
+}
+
static void tty_port_default_wakeup(struct tty_port *port)
{
struct tty_struct *tty = tty_port_tty_get(port);
@@ -55,6 +75,7 @@ static void tty_port_default_wakeup(struct tty_port *port)
const struct tty_port_client_operations tty_port_default_client_ops = {
.receive_buf = tty_port_default_receive_buf,
+ .lookahead_buf = tty_port_default_lookahead_buf,
.write_wakeup = tty_port_default_wakeup,
};
EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index fe30ce512819..b3dfe9d5717e 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -30,6 +30,6 @@ $(obj)/defkeymap.o: $(obj)/defkeymap.c
ifdef GENERATE_KEYMAP
$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
- loadkeys --mktable $< > $@
+ loadkeys --mktable --unicode $< > $@
endif
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index d815ac98b39e..f02d21e2a96e 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -23,6 +23,8 @@
* stack overflow.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kd.h>
#include <linux/errno.h>
@@ -36,9 +38,9 @@
#include <linux/vt_kern.h>
#include <linux/string.h>
-static unsigned short translations[][256] = {
+static unsigned short translations[][E_TABSZ] = {
/* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
- {
+ [LAT1_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
@@ -71,9 +73,9 @@ static unsigned short translations[][256] = {
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
- },
+ },
/* VT100 graphics mapped to Unicode */
- {
+ [GRAF_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
@@ -108,8 +110,8 @@ static unsigned short translations[][256] = {
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
},
/* IBM Codepage 437 mapped to Unicode */
- {
- 0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
+ [IBMPC_MAP] = {
+ 0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
0x25d8, 0x25cb, 0x25d9, 0x2642, 0x2640, 0x266a, 0x266b, 0x263c,
0x25b6, 0x25c0, 0x2195, 0x203c, 0x00b6, 0x00a7, 0x25ac, 0x21a8,
0x2191, 0x2193, 0x2192, 0x2190, 0x221f, 0x2194, 0x25b2, 0x25bc,
@@ -141,9 +143,9 @@ static unsigned short translations[][256] = {
0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229,
0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0
- },
+ },
/* User mapping -- default to codes for direct font mapping */
- {
+ [USER_MAP] = {
0xf000, 0xf001, 0xf002, 0xf003, 0xf004, 0xf005, 0xf006, 0xf007,
0xf008, 0xf009, 0xf00a, 0xf00b, 0xf00c, 0xf00d, 0xf00e, 0xf00f,
0xf010, 0xf011, 0xf012, 0xf013, 0xf014, 0xf015, 0xf016, 0xf017,
@@ -184,78 +186,105 @@ static unsigned short translations[][256] = {
#define MAX_GLYPH 512 /* Max possible glyph value */
-static int inv_translate[MAX_NR_CONSOLES];
+static enum translation_map inv_translate[MAX_NR_CONSOLES];
+
+#define UNI_DIRS 32U
+#define UNI_DIR_ROWS 32U
+#define UNI_ROW_GLYPHS 64U
+
+#define UNI_DIR_BITS GENMASK(15, 11)
+#define UNI_ROW_BITS GENMASK(10, 6)
+#define UNI_GLYPH_BITS GENMASK( 5, 0)
-struct uni_pagedir {
- u16 **uni_pgdir[32];
+#define UNI_DIR(uni) FIELD_GET(UNI_DIR_BITS, (uni))
+#define UNI_ROW(uni) FIELD_GET(UNI_ROW_BITS, (uni))
+#define UNI_GLYPH(uni) FIELD_GET(UNI_GLYPH_BITS, (uni))
+
+#define UNI(dir, row, glyph) (FIELD_PREP(UNI_DIR_BITS, (dir)) | \
+ FIELD_PREP(UNI_ROW_BITS, (row)) | \
+ FIELD_PREP(UNI_GLYPH_BITS, (glyph)))
+
+/**
+ * struct uni_pagedict -- unicode directory
+ *
+ * @uni_pgdir: 32*32*64 table with glyphs
+ * @refcount: reference count of this structure
+ * @sum: checksum
+ * @inverse_translations: best-effort inverse mapping
+ * @inverse_trans_unicode: best-effort inverse mapping to unicode
+ */
+struct uni_pagedict {
+ u16 **uni_pgdir[UNI_DIRS];
unsigned long refcount;
unsigned long sum;
- unsigned char *inverse_translations[4];
+ unsigned char *inverse_translations[LAST_MAP + 1];
u16 *inverse_trans_unicode;
};
-static struct uni_pagedir *dflt;
+static struct uni_pagedict *dflt;
-static void set_inverse_transl(struct vc_data *conp, struct uni_pagedir *p, int i)
+static void set_inverse_transl(struct vc_data *conp, struct uni_pagedict *dict,
+ enum translation_map m)
{
- int j, glyph;
- unsigned short *t = translations[i];
- unsigned char *q;
-
- if (!p) return;
- q = p->inverse_translations[i];
-
- if (!q) {
- q = p->inverse_translations[i] = kmalloc(MAX_GLYPH, GFP_KERNEL);
- if (!q) return;
+ unsigned short *t = translations[m];
+ unsigned char *inv;
+
+ if (!dict)
+ return;
+ inv = dict->inverse_translations[m];
+
+ if (!inv) {
+ inv = dict->inverse_translations[m] = kmalloc(MAX_GLYPH,
+ GFP_KERNEL);
+ if (!inv)
+ return;
}
- memset(q, 0, MAX_GLYPH);
+ memset(inv, 0, MAX_GLYPH);
- for (j = 0; j < E_TABSZ; j++) {
- glyph = conv_uni_to_pc(conp, t[j]);
- if (glyph >= 0 && glyph < MAX_GLYPH && q[glyph] < 32) {
+ for (unsigned int ch = 0; ch < ARRAY_SIZE(translations[m]); ch++) {
+ int glyph = conv_uni_to_pc(conp, t[ch]);
+ if (glyph >= 0 && glyph < MAX_GLYPH && inv[glyph] < 32) {
/* prefer '-' above SHY etc. */
- q[glyph] = j;
+ inv[glyph] = ch;
}
}
}
-static void set_inverse_trans_unicode(struct vc_data *conp,
- struct uni_pagedir *p)
+static void set_inverse_trans_unicode(struct uni_pagedict *dict)
{
- int i, j, k, glyph;
- u16 **p1, *p2;
- u16 *q;
-
- if (!p) return;
- q = p->inverse_trans_unicode;
- if (!q) {
- q = p->inverse_trans_unicode =
- kmalloc_array(MAX_GLYPH, sizeof(u16), GFP_KERNEL);
- if (!q)
+ unsigned int d, r, g;
+ u16 *inv;
+
+ if (!dict)
+ return;
+
+ inv = dict->inverse_trans_unicode;
+ if (!inv) {
+ inv = dict->inverse_trans_unicode = kmalloc_array(MAX_GLYPH,
+ sizeof(*inv), GFP_KERNEL);
+ if (!inv)
return;
}
- memset(q, 0, MAX_GLYPH * sizeof(u16));
+ memset(inv, 0, MAX_GLYPH * sizeof(*inv));
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (!p1)
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (!dir)
continue;
- for (j = 0; j < 32; j++) {
- p2 = p1[j];
- if (!p2)
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row)
continue;
- for (k = 0; k < 64; k++) {
- glyph = p2[k];
- if (glyph >= 0 && glyph < MAX_GLYPH
- && q[glyph] < 32)
- q[glyph] = (i << 11) + (j << 6) + k;
+ for (g = 0; g < UNI_ROW_GLYPHS; g++) {
+ u16 glyph = row[g];
+ if (glyph < MAX_GLYPH && inv[glyph] < 32)
+ inv[glyph] = UNI(d, r, g);
}
}
}
}
-unsigned short *set_translate(int m, struct vc_data *vc)
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc)
{
inv_translate[vc->vc_num] = m;
return translations[m];
@@ -268,44 +297,45 @@ unsigned short *set_translate(int m, struct vc_data *vc)
* was active.
* Still, it is now possible to a certain extent to cut and paste non-ASCII.
*/
-u16 inverse_translate(const struct vc_data *conp, int glyph, int use_unicode)
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode)
{
- struct uni_pagedir *p;
- int m;
- if (glyph < 0 || glyph >= MAX_GLYPH)
+ struct uni_pagedict *p;
+ enum translation_map m;
+
+ if (glyph >= MAX_GLYPH)
return 0;
- else {
- p = *conp->vc_uni_pagedir_loc;
- if (!p)
+
+ p = *conp->uni_pagedict_loc;
+ if (!p)
+ return glyph;
+
+ if (use_unicode) {
+ if (!p->inverse_trans_unicode)
return glyph;
- else if (use_unicode) {
- if (!p->inverse_trans_unicode)
- return glyph;
- else
- return p->inverse_trans_unicode[glyph];
- } else {
- m = inv_translate[conp->vc_num];
- if (!p->inverse_translations[m])
- return glyph;
- else
- return p->inverse_translations[m][glyph];
- }
+
+ return p->inverse_trans_unicode[glyph];
}
+
+ m = inv_translate[conp->vc_num];
+ if (!p->inverse_translations[m])
+ return glyph;
+
+ return p->inverse_translations[m][glyph];
}
EXPORT_SYMBOL_GPL(inverse_translate);
static void update_user_maps(void)
{
int i;
- struct uni_pagedir *p, *q = NULL;
-
+ struct uni_pagedict *p, *q = NULL;
+
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons_allocated(i))
continue;
- p = *vc_cons[i].d->vc_uni_pagedir_loc;
+ p = *vc_cons[i].d->uni_pagedict_loc;
if (p && p != q) {
set_inverse_transl(vc_cons[i].d, p, USER_MAP);
- set_inverse_trans_unicode(vc_cons[i].d, p);
+ set_inverse_trans_unicode(p);
q = p;
}
}
@@ -321,15 +351,15 @@ static void update_user_maps(void)
*/
int con_set_trans_old(unsigned char __user * arg)
{
- int i;
unsigned short inbuf[E_TABSZ];
- unsigned char ubuf[E_TABSZ];
-
- if (copy_from_user(ubuf, arg, E_TABSZ))
- return -EFAULT;
+ unsigned int i;
+ unsigned char ch;
- for (i = 0; i < E_TABSZ ; i++)
- inbuf[i] = UNI_DIRECT_BASE | ubuf[i];
+ for (i = 0; i < ARRAY_SIZE(inbuf); i++) {
+ if (get_user(ch, &arg[i]))
+ return -EFAULT;
+ inbuf[i] = UNI_DIRECT_BASE | ch;
+ }
console_lock();
memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
@@ -345,7 +375,7 @@ int con_get_trans_old(unsigned char __user * arg)
unsigned char outbuf[E_TABSZ];
console_lock();
- for (i = 0; i < E_TABSZ ; i++)
+ for (i = 0; i < ARRAY_SIZE(outbuf); i++)
{
ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
outbuf[i] = (ch & ~0xff) ? 0 : ch;
@@ -381,7 +411,7 @@ int con_get_trans_new(ushort __user * arg)
}
/*
- * Unicode -> current font conversion
+ * Unicode -> current font conversion
*
* A font has at most 512 chars, usually 256.
* But one font position may represent several Unicode chars.
@@ -393,78 +423,82 @@ int con_get_trans_new(ushort __user * arg)
extern u8 dfont_unicount[]; /* Defined in console_defmap.c */
extern u16 dfont_unitable[];
-static void con_release_unimap(struct uni_pagedir *p)
+static void con_release_unimap(struct uni_pagedict *dict)
{
- u16 **p1;
- int i, j;
-
- if (p == dflt) dflt = NULL;
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1 != NULL) {
- for (j = 0; j < 32; j++)
- kfree(p1[j]);
- kfree(p1);
+ unsigned int d, r;
+
+ if (dict == dflt)
+ dflt = NULL;
+
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (dir != NULL) {
+ for (r = 0; r < UNI_DIR_ROWS; r++)
+ kfree(dir[r]);
+ kfree(dir);
}
- p->uni_pgdir[i] = NULL;
+ dict->uni_pgdir[d] = NULL;
}
- for (i = 0; i < 4; i++) {
- kfree(p->inverse_translations[i]);
- p->inverse_translations[i] = NULL;
+
+ for (r = 0; r < ARRAY_SIZE(dict->inverse_translations); r++) {
+ kfree(dict->inverse_translations[r]);
+ dict->inverse_translations[r] = NULL;
}
- kfree(p->inverse_trans_unicode);
- p->inverse_trans_unicode = NULL;
+
+ kfree(dict->inverse_trans_unicode);
+ dict->inverse_trans_unicode = NULL;
}
/* Caller must hold the console lock */
void con_free_unimap(struct vc_data *vc)
{
- struct uni_pagedir *p;
+ struct uni_pagedict *p;
- p = *vc->vc_uni_pagedir_loc;
+ p = *vc->uni_pagedict_loc;
if (!p)
return;
- *vc->vc_uni_pagedir_loc = NULL;
+ *vc->uni_pagedict_loc = NULL;
if (--p->refcount)
return;
con_release_unimap(p);
kfree(p);
}
-
-static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
+
+static int con_unify_unimap(struct vc_data *conp, struct uni_pagedict *dict1)
{
- int i, j, k;
- struct uni_pagedir *q;
-
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
- if (!vc_cons_allocated(i))
+ struct uni_pagedict *dict2;
+ unsigned int cons, d, r;
+
+ for (cons = 0; cons < MAX_NR_CONSOLES; cons++) {
+ if (!vc_cons_allocated(cons))
continue;
- q = *vc_cons[i].d->vc_uni_pagedir_loc;
- if (!q || q == p || q->sum != p->sum)
+ dict2 = *vc_cons[cons].d->uni_pagedict_loc;
+ if (!dict2 || dict2 == dict1 || dict2->sum != dict1->sum)
continue;
- for (j = 0; j < 32; j++) {
- u16 **p1, **q1;
- p1 = p->uni_pgdir[j]; q1 = q->uni_pgdir[j];
- if (!p1 && !q1)
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir1 = dict1->uni_pgdir[d];
+ u16 **dir2 = dict2->uni_pgdir[d];
+ if (!dir1 && !dir2)
continue;
- if (!p1 || !q1)
+ if (!dir1 || !dir2)
break;
- for (k = 0; k < 32; k++) {
- if (!p1[k] && !q1[k])
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ if (!dir1[r] && !dir2[r])
continue;
- if (!p1[k] || !q1[k])
+ if (!dir1[r] || !dir2[r])
break;
- if (memcmp(p1[k], q1[k], 64*sizeof(u16)))
+ if (memcmp(dir1[r], dir2[r], UNI_ROW_GLYPHS *
+ sizeof(*dir1[r])))
break;
}
- if (k < 32)
+ if (r < UNI_DIR_ROWS)
break;
}
- if (j == 32) {
- q->refcount++;
- *conp->vc_uni_pagedir_loc = q;
- con_release_unimap(p);
- kfree(p);
+ if (d == UNI_DIRS) {
+ dict2->refcount++;
+ *conp->uni_pagedict_loc = dict2;
+ con_release_unimap(dict1);
+ kfree(dict1);
return 1;
}
}
@@ -472,55 +506,66 @@ static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
}
static int
-con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
+con_insert_unipair(struct uni_pagedict *p, u_short unicode, u_short fontpos)
{
- int i, n;
- u16 **p1, *p2;
-
- p1 = p->uni_pgdir[n = unicode >> 11];
- if (!p1) {
- p1 = p->uni_pgdir[n] = kmalloc_array(32, sizeof(u16 *),
- GFP_KERNEL);
- if (!p1) return -ENOMEM;
- for (i = 0; i < 32; i++)
- p1[i] = NULL;
+ u16 **dir, *row;
+ unsigned int n;
+
+ n = UNI_DIR(unicode);
+ dir = p->uni_pgdir[n];
+ if (!dir) {
+ dir = p->uni_pgdir[n] = kcalloc(UNI_DIR_ROWS, sizeof(*dir),
+ GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
}
- p2 = p1[n = (unicode >> 6) & 0x1f];
- if (!p2) {
- p2 = p1[n] = kmalloc_array(64, sizeof(u16), GFP_KERNEL);
- if (!p2) return -ENOMEM;
- memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
+ n = UNI_ROW(unicode);
+ row = dir[n];
+ if (!row) {
+ row = dir[n] = kmalloc_array(UNI_ROW_GLYPHS, sizeof(*row),
+ GFP_KERNEL);
+ if (!row)
+ return -ENOMEM;
+ /* No glyphs for the characters (yet) */
+ memset(row, 0xff, UNI_ROW_GLYPHS * sizeof(*row));
}
- p2[unicode & 0x3f] = fontpos;
-
+ row[UNI_GLYPH(unicode)] = fontpos;
+
p->sum += (fontpos << 20U) + unicode;
return 0;
}
+static int con_allocate_new(struct vc_data *vc)
+{
+ struct uni_pagedict *new, *old = *vc->uni_pagedict_loc;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ new->refcount = 1;
+ *vc->uni_pagedict_loc = new;
+
+ if (old)
+ old->refcount--;
+
+ return 0;
+}
+
/* Caller must hold the lock */
static int con_do_clear_unimap(struct vc_data *vc)
{
- struct uni_pagedir *p, *q;
-
- p = *vc->vc_uni_pagedir_loc;
- if (!p || --p->refcount) {
- q = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!q) {
- if (p)
- p->refcount++;
- return -ENOMEM;
- }
- q->refcount=1;
- *vc->vc_uni_pagedir_loc = q;
- } else {
- if (p == dflt) dflt = NULL;
- p->refcount++;
- p->sum = 0;
- con_release_unimap(p);
- }
+ struct uni_pagedict *old = *vc->uni_pagedict_loc;
+
+ if (!old || old->refcount > 1)
+ return con_allocate_new(vc);
+
+ old->sum = 0;
+ con_release_unimap(old);
+
return 0;
}
@@ -532,91 +577,93 @@ int con_clear_unimap(struct vc_data *vc)
console_unlock();
return ret;
}
-
+
+static struct uni_pagedict *con_unshare_unimap(struct vc_data *vc,
+ struct uni_pagedict *old)
+{
+ struct uni_pagedict *new;
+ unsigned int d, r, g;
+ int ret;
+ u16 uni = 0;
+
+ ret = con_allocate_new(vc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ new = *vc->uni_pagedict_loc;
+
+ /*
+ * uni_pgdir is a 32*32*64 table with rows allocated when its first
+ * entry is added. The unicode value must still be incremented for
+ * empty rows. We are copying entries from "old" to "new".
+ */
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = old->uni_pgdir[d];
+ if (!dir) {
+ /* Account for empty table */
+ uni += UNI_DIR_ROWS * UNI_ROW_GLYPHS;
+ continue;
+ }
+
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row) {
+ /* Account for row of 64 empty entries */
+ uni += UNI_ROW_GLYPHS;
+ continue;
+ }
+
+ for (g = 0; g < UNI_ROW_GLYPHS; g++, uni++) {
+ if (row[g] == 0xffff)
+ continue;
+ /*
+ * Found one, copy entry for unicode uni with
+ * fontpos value row[g].
+ */
+ ret = con_insert_unipair(new, uni, row[g]);
+ if (ret) {
+ old->refcount++;
+ *vc->uni_pagedict_loc = old;
+ con_release_unimap(new);
+ kfree(new);
+ return ERR_PTR(ret);
+ }
+ }
+ }
+ }
+
+ return new;
+}
+
int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
{
- int err = 0, err1, i;
- struct uni_pagedir *p, *q;
+ int err = 0, err1;
+ struct uni_pagedict *dict;
struct unipair *unilist, *plist;
if (!ct)
return 0;
- unilist = vmemdup_user(list, array_size(sizeof(struct unipair), ct));
+ unilist = vmemdup_user(list, array_size(sizeof(*unilist), ct));
if (IS_ERR(unilist))
return PTR_ERR(unilist);
console_lock();
/* Save original vc_unipagdir_loc in case we allocate a new one */
- p = *vc->vc_uni_pagedir_loc;
-
- if (!p) {
+ dict = *vc->uni_pagedict_loc;
+ if (!dict) {
err = -EINVAL;
-
goto out_unlock;
}
-
- if (p->refcount > 1) {
- int j, k;
- u16 **p1, *p2, l;
-
- err1 = con_do_clear_unimap(vc);
- if (err1) {
- err = err1;
+
+ if (dict->refcount > 1) {
+ dict = con_unshare_unimap(vc, dict);
+ if (IS_ERR(dict)) {
+ err = PTR_ERR(dict);
goto out_unlock;
}
-
- /*
- * Since refcount was > 1, con_clear_unimap() allocated a
- * a new uni_pagedir for this vc. Re: p != q
- */
- q = *vc->vc_uni_pagedir_loc;
-
- /*
- * uni_pgdir is a 32*32*64 table with rows allocated
- * when its first entry is added. The unicode value must
- * still be incremented for empty rows. We are copying
- * entries from "p" (old) to "q" (new).
- */
- l = 0; /* unicode value */
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1)
- for (j = 0; j < 32; j++) {
- p2 = p1[j];
- if (p2) {
- for (k = 0; k < 64; k++, l++)
- if (p2[k] != 0xffff) {
- /*
- * Found one, copy entry for unicode
- * l with fontpos value p2[k].
- */
- err1 = con_insert_unipair(q, l, p2[k]);
- if (err1) {
- p->refcount++;
- *vc->vc_uni_pagedir_loc = p;
- con_release_unimap(q);
- kfree(q);
- err = err1;
- goto out_unlock;
- }
- }
- } else {
- /* Account for row of 64 empty entries */
- l += 64;
- }
- }
- else
- /* Account for empty table */
- l += 32 * 64;
- }
-
- /*
- * Finished copying font table, set vc_uni_pagedir to new table
- */
- p = q;
- } else if (p == dflt) {
+ } else if (dict == dflt) {
dflt = NULL;
}
@@ -624,20 +671,20 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
* Insert user specified unicode pairs into new table.
*/
for (plist = unilist; ct; ct--, plist++) {
- err1 = con_insert_unipair(p, plist->unicode, plist->fontpos);
+ err1 = con_insert_unipair(dict, plist->unicode, plist->fontpos);
if (err1)
err = err1;
}
-
+
/*
* Merge with fontmaps of any other virtual consoles.
*/
- if (con_unify_unimap(vc, p))
+ if (con_unify_unimap(vc, dict))
goto out_unlock;
- for (i = 0; i <= 3; i++)
- set_inverse_transl(vc, p, i); /* Update inverse translations */
- set_inverse_trans_unicode(vc, p);
+ for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
+ set_inverse_transl(vc, dict, m);
+ set_inverse_trans_unicode(dict);
out_unlock:
console_unlock();
@@ -652,55 +699,56 @@ out_unlock:
* Loads the unimap for the hardware font, as defined in uni_hash.tbl.
* The representation used was the most compact I could come up
* with. This routine is executed at video setup, and when the
- * PIO_FONTRESET ioctl is called.
+ * PIO_FONTRESET ioctl is called.
*
* The caller must hold the console lock
*/
int con_set_default_unimap(struct vc_data *vc)
{
- int i, j, err = 0, err1;
- u16 *q;
- struct uni_pagedir *p;
+ struct uni_pagedict *dict;
+ unsigned int fontpos, count;
+ int err = 0, err1;
+ u16 *dfont;
if (dflt) {
- p = *vc->vc_uni_pagedir_loc;
- if (p == dflt)
+ dict = *vc->uni_pagedict_loc;
+ if (dict == dflt)
return 0;
dflt->refcount++;
- *vc->vc_uni_pagedir_loc = dflt;
- if (p && !--p->refcount) {
- con_release_unimap(p);
- kfree(p);
+ *vc->uni_pagedict_loc = dflt;
+ if (dict && !--dict->refcount) {
+ con_release_unimap(dict);
+ kfree(dict);
}
return 0;
}
-
+
/* The default font is always 256 characters */
err = con_do_clear_unimap(vc);
if (err)
return err;
-
- p = *vc->vc_uni_pagedir_loc;
- q = dfont_unitable;
-
- for (i = 0; i < 256; i++)
- for (j = dfont_unicount[i]; j; j--) {
- err1 = con_insert_unipair(p, *(q++), i);
+
+ dict = *vc->uni_pagedict_loc;
+ dfont = dfont_unitable;
+
+ for (fontpos = 0; fontpos < 256U; fontpos++)
+ for (count = dfont_unicount[fontpos]; count; count--) {
+ err1 = con_insert_unipair(dict, *(dfont++), fontpos);
if (err1)
err = err1;
}
-
- if (con_unify_unimap(vc, p)) {
- dflt = *vc->vc_uni_pagedir_loc;
+
+ if (con_unify_unimap(vc, dict)) {
+ dflt = *vc->uni_pagedict_loc;
return err;
}
- for (i = 0; i <= 3; i++)
- set_inverse_transl(vc, p, i); /* Update all inverse translations */
- set_inverse_trans_unicode(vc, p);
- dflt = p;
+ for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
+ set_inverse_transl(vc, dict, m);
+ set_inverse_trans_unicode(dict);
+ dflt = dict;
return err;
}
EXPORT_SYMBOL(con_set_default_unimap);
@@ -714,16 +762,16 @@ EXPORT_SYMBOL(con_set_default_unimap);
*/
int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
{
- struct uni_pagedir *q;
+ struct uni_pagedict *src;
- if (!*src_vc->vc_uni_pagedir_loc)
+ if (!*src_vc->uni_pagedict_loc)
return -EINVAL;
- if (*dst_vc->vc_uni_pagedir_loc == *src_vc->vc_uni_pagedir_loc)
+ if (*dst_vc->uni_pagedict_loc == *src_vc->uni_pagedict_loc)
return 0;
con_free_unimap(dst_vc);
- q = *src_vc->vc_uni_pagedir_loc;
- q->refcount++;
- *dst_vc->vc_uni_pagedir_loc = q;
+ src = *src_vc->uni_pagedict_loc;
+ src->refcount++;
+ *dst_vc->uni_pagedict_loc = src;
return 0;
}
EXPORT_SYMBOL(con_copy_unimap);
@@ -734,46 +782,53 @@ EXPORT_SYMBOL(con_copy_unimap);
* Read the console unicode data for this console. Called from the ioctl
* handlers.
*/
-int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
+int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct,
+ struct unipair __user *list)
{
- int i, j, k, ret = 0;
ushort ect;
- u16 **p1, *p2;
- struct uni_pagedir *p;
+ struct uni_pagedict *dict;
struct unipair *unilist;
+ unsigned int d, r, g;
+ int ret = 0;
- unilist = kvmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+ unilist = kvmalloc_array(ct, sizeof(*unilist), GFP_KERNEL);
if (!unilist)
return -ENOMEM;
console_lock();
ect = 0;
- if (*vc->vc_uni_pagedir_loc) {
- p = *vc->vc_uni_pagedir_loc;
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1)
- for (j = 0; j < 32; j++) {
- p2 = *(p1++);
- if (p2)
- for (k = 0; k < 64; k++, p2++) {
- if (*p2 >= MAX_GLYPH)
- continue;
- if (ect < ct) {
- unilist[ect].unicode =
- (i<<11)+(j<<6)+k;
- unilist[ect].fontpos = *p2;
- }
- ect++;
+ dict = *vc->uni_pagedict_loc;
+ if (!dict)
+ goto unlock;
+
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (!dir)
+ continue;
+
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row)
+ continue;
+
+ for (g = 0; g < UNI_ROW_GLYPHS; g++, row++) {
+ if (*row >= MAX_GLYPH)
+ continue;
+ if (ect < ct) {
+ unilist[ect].unicode = UNI(d, r, g);
+ unilist[ect].fontpos = *row;
}
+ ect++;
}
}
}
+unlock:
console_unlock();
- if (copy_to_user(list, unilist, min(ect, ct) * sizeof(struct unipair)))
+ if (copy_to_user(list, unilist, min(ect, ct) * sizeof(*unilist)))
+ ret = -EFAULT;
+ if (put_user(ect, uct))
ret = -EFAULT;
- put_user(ect, uct);
kvfree(unilist);
return ret ? ret : (ect <= ct) ? 0 : -ENOMEM;
}
@@ -798,20 +853,18 @@ u32 conv_8bit_to_uni(unsigned char c)
int conv_uni_to_8bit(u32 uni)
{
int c;
- for (c = 0; c < 0x100; c++)
+ for (c = 0; c < ARRAY_SIZE(translations[USER_MAP]); c++)
if (translations[USER_MAP][c] == uni ||
(translations[USER_MAP][c] == (c | 0xf000) && uni == c))
return c;
return -1;
}
-int
-conv_uni_to_pc(struct vc_data *conp, long ucs)
+int conv_uni_to_pc(struct vc_data *conp, long ucs)
{
- int h;
- u16 **p1, *p2;
- struct uni_pagedir *p;
-
+ struct uni_pagedict *dict;
+ u16 **dir, *row, glyph;
+
/* Only 16-bit codes supported at this time */
if (ucs > 0xffff)
return -4; /* Not found */
@@ -826,17 +879,24 @@ conv_uni_to_pc(struct vc_data *conp, long ucs)
*/
else if ((ucs & ~UNI_DIRECT_MASK) == UNI_DIRECT_BASE)
return ucs & UNI_DIRECT_MASK;
-
- if (!*conp->vc_uni_pagedir_loc)
+
+ dict = *conp->uni_pagedict_loc;
+ if (!dict)
return -3;
- p = *conp->vc_uni_pagedir_loc;
- if ((p1 = p->uni_pgdir[ucs >> 11]) &&
- (p2 = p1[(ucs >> 6) & 0x1f]) &&
- (h = p2[ucs & 0x3f]) < MAX_GLYPH)
- return h;
+ dir = dict->uni_pgdir[UNI_DIR(ucs)];
+ if (!dir)
+ return -4;
- return -4; /* not found */
+ row = dir[UNI_ROW(ucs)];
+ if (!row)
+ return -4;
+
+ glyph = row[UNI_GLYPH(ucs)];
+ if (glyph >= MAX_GLYPH)
+ return -4;
+
+ return glyph;
}
/*
@@ -844,13 +904,13 @@ conv_uni_to_pc(struct vc_data *conp, long ucs)
* initialized. It must be possible to call kmalloc(..., GFP_KERNEL)
* from this function, hence the call from sys_setup.
*/
-void __init
+void __init
console_map_init(void)
{
int i;
-
+
for (i = 0; i < MAX_NR_CONSOLES; i++)
- if (vc_cons_allocated(i) && !*vc_cons[i].d->vc_uni_pagedir_loc)
+ if (vc_cons_allocated(i) && !*vc_cons[i].d->uni_pagedict_loc)
con_set_default_unimap(vc_cons[i].d);
}
diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
index 094d95bf0005..0c043e4f292e 100644
--- a/drivers/tty/vt/defkeymap.c_shipped
+++ b/drivers/tty/vt/defkeymap.c_shipped
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* Do not edit this file! It was automatically generated by */
-/* loadkeys --mktable defkeymap.map > defkeymap.c */
+/* Do not edit this file! It was automatically generated by */
+/* loadkeys --mktable --unicode defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
@@ -139,7 +139,7 @@ static unsigned short ctrl_alt_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-ushort *key_maps[MAX_NR_KEYMAPS] = {
+unsigned short *key_maps[MAX_NR_KEYMAPS] = {
plain_map, shift_map, altgr_map, NULL,
ctrl_map, shift_ctrl_map, NULL, NULL,
alt_map, NULL, NULL, NULL,
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index f7755e73696e..6ef22f01cc51 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -68,7 +68,8 @@ sel_pos(int n, bool unicode)
{
if (unicode)
return screen_glyph_unicode(vc_sel.cons, n / 2);
- return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n), 0);
+ return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n),
+ false);
}
/**
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index f8c87c4d7399..ae9c926acd6f 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -344,7 +344,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
/* allocate everything in one go */
memsize = cols * rows * sizeof(char32_t);
memsize += rows * sizeof(char32_t *);
- p = vmalloc(memsize);
+ p = vzalloc(memsize);
if (!p)
return NULL;
@@ -855,7 +855,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
unsigned short *p = (unsigned short *) vc->vc_pos;
vc_uniscr_delete(vc, nr);
- scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
+ scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char,
nr * 2);
vc->vc_need_wrap = 0;
@@ -1063,10 +1063,10 @@ static void visual_init(struct vc_data *vc, int num, int init)
__module_get(vc->vc_sw->owner);
vc->vc_num = num;
vc->vc_display_fg = &master_display_fg;
- if (vc->vc_uni_pagedir_loc)
+ if (vc->uni_pagedict_loc)
con_free_unimap(vc);
- vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
- vc->vc_uni_pagedir = NULL;
+ vc->uni_pagedict_loc = &vc->uni_pagedict;
+ vc->uni_pagedict = NULL;
vc->vc_hi_font_mask = 0;
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
@@ -1136,7 +1136,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
visual_init(vc, currcons, 1);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_set_default_unimap(vc);
err = -EINVAL;
@@ -3939,7 +3939,7 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
bind = con_is_bound(con->con);
console_unlock();
- return snprintf(buf, PAGE_SIZE, "%i\n", bind);
+ return sysfs_emit(buf, "%i\n", bind);
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr,
@@ -3947,7 +3947,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr,
{
struct con_driver *con = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s %s\n",
+ return sysfs_emit(buf, "%s %s\n",
(con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
con->desc);
@@ -4741,7 +4741,7 @@ u32 screen_glyph_unicode(const struct vc_data *vc, int n)
if (uniscr)
return uniscr->lines[n / vc->vc_cols][n % vc->vc_cols];
- return inverse_translate(vc, screen_glyph(vc, n * 2), 1);
+ return inverse_translate(vc, screen_glyph(vc, n * 2), true);
}
EXPORT_SYMBOL_GPL(screen_glyph_unicode);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index ffb01fc6de75..8f67db202d7b 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -215,7 +215,7 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
-extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
+extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
* ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
@@ -234,8 +234,8 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
int ufshcd_write_ee_control(struct ufs_hba *hba);
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr);
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
+ const u16 *other_mask, u16 set, u16 clr);
static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
u16 set, u16 clr)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index ce86d1b790c0..6bc679d22927 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -64,9 +64,6 @@
/* maximum number of link-startup retries */
#define DME_LINKSTARTUP_RETRIES 3
-/* Maximum retries for Hibern8 enter */
-#define UIC_HIBERN8_ENTER_RETRIES 3
-
/* maximum number of reset retries before giving up */
#define MAX_HOST_RESET_RETRIES 5
@@ -175,7 +172,7 @@ enum {
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
-struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
[UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
[UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
[UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
@@ -363,7 +360,7 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
}
static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
- struct uic_command *ucmd,
+ const struct uic_command *ucmd,
enum ufs_trace_str_t str_t)
{
u32 cmd;
@@ -443,11 +440,11 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
}
static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
- char *err_name)
+ const char *err_name)
{
int i;
bool found = false;
- struct ufs_event_hist *e;
+ const struct ufs_event_hist *e;
if (id >= UFS_EVT_CNT)
return;
@@ -497,7 +494,7 @@ static void ufshcd_print_evt_hist(struct ufs_hba *hba)
static
void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
{
- struct ufshcd_lrb *lrbp;
+ const struct ufshcd_lrb *lrbp;
int prdt_length;
int tag;
@@ -553,7 +550,7 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
- struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
+ const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
@@ -1109,7 +1106,7 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
*/
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
- struct scsi_device *sdev;
+ const struct scsi_device *sdev;
u32 pending = 0;
lockdep_assert_held(hba->host->host_lock);
@@ -2080,14 +2077,15 @@ static inline int ufshcd_monitor_opcode2dir(u8 opcode)
static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
- struct ufs_hba_monitor *m = &hba->monitor;
+ const struct ufs_hba_monitor *m = &hba->monitor;
return (m->enabled && lrbp && lrbp->cmd &&
(!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
}
-static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_start_monitor(struct ufs_hba *hba,
+ const struct ufshcd_lrb *lrbp)
{
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
@@ -2098,14 +2096,14 @@ static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
-static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
{
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- struct request *req = scsi_cmd_to_rq(lrbp->cmd);
+ const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
@@ -2227,6 +2225,8 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
int err;
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
+ hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
@@ -2953,37 +2953,59 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int max_timeout)
{
- int err = 0;
- unsigned long time_left;
+ unsigned long time_left = msecs_to_jiffies(max_timeout);
unsigned long flags;
+ bool pending;
+ int err;
+retry:
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
- msecs_to_jiffies(max_timeout));
+ time_left);
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
+ /*
+ * The completion handler called complete() and the caller of
+ * this function still owns the @lrbp tag so the code below does
+ * not trigger any race conditions.
+ */
+ hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (!time_left) {
+ } else {
err = -ETIMEDOUT;
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
__func__, lrbp->task_tag);
- if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag))
+ if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {
/* successfully cleared the command, retry if needed */
err = -EAGAIN;
- /*
- * in case of an error, after clearing the doorbell,
- * we also need to clear the outstanding_request
- * field in hba
- */
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ /*
+ * Since clearing the command succeeded we also need to
+ * clear the task tag bit from the outstanding_reqs
+ * variable.
+ */
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ pending = test_bit(lrbp->task_tag,
+ &hba->outstanding_reqs);
+ if (pending) {
+ hba->dev_cmd.complete = NULL;
+ __clear_bit(lrbp->task_tag,
+ &hba->outstanding_reqs);
+ }
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (!pending) {
+ /*
+ * The completion handler ran while we tried to
+ * clear the command.
+ */
+ time_left = 1;
+ goto retry;
+ }
+ } else {
+ dev_err(hba->dev, "%s: failed to clear tag %d\n",
+ __func__, lrbp->task_tag);
+ }
}
return err;
@@ -3073,7 +3095,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
if (ret)
dev_err(hba->dev,
- "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retries\n",
__func__, opcode, idn, ret, retries);
return ret;
}
@@ -3241,7 +3263,7 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
if (ret)
dev_err(hba->dev,
- "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+ "%s: query attribute, idn %d, failed with error %d after %d retries\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
return ret;
}
@@ -3812,7 +3834,7 @@ int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
{
int ret;
- if (agreed_gear != UFS_HS_G4)
+ if (agreed_gear < UFS_HS_G4)
adapt_val = PA_NO_ADAPT;
ret = ufshcd_dme_set(hba,
@@ -4101,7 +4123,7 @@ out_unlock:
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
struct uic_command uic_cmd = {0};
int ret;
@@ -4126,6 +4148,7 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
int ufshcd_link_recovery(struct ufs_hba *hba)
{
@@ -4268,8 +4291,13 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
- pwr_info->pwr_tx = FAST_MODE;
- pwr_info->pwr_rx = FAST_MODE;
+ if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
+ pwr_info->pwr_tx = FASTAUTO_MODE;
+ pwr_info->pwr_rx = FASTAUTO_MODE;
+ } else {
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
+ }
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@@ -4744,7 +4772,7 @@ link_startup:
* but we can't be sure if the link is up until link startup
* succeeds. So reset the local Uni-Pro and try again.
*/
- if (ret && ufshcd_hba_enable(hba)) {
+ if (ret && retries && ufshcd_hba_enable(hba)) {
ufshcd_update_evt_hist(hba,
UFS_EVT_LINK_STARTUP_FAIL,
(u32)ret);
@@ -4909,7 +4937,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba,
*
*/
static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
- struct scsi_device *sdev)
+ const struct scsi_device *sdev)
{
if (hba->dev_info.f_power_on_wp_en &&
!hba->dev_info.is_lu_power_on_wp) {
@@ -5428,8 +5456,8 @@ int ufshcd_write_ee_control(struct ufs_hba *hba)
return err;
}
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr)
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
+ const u16 *other_mask, u16 set, u16 clr)
{
u16 new_mask, ee_ctrl_mask;
int err = 0;
@@ -5738,7 +5766,7 @@ int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
}
hba->dev_info.wb_enabled = enable;
- dev_info(hba->dev, "%s Write Booster %s\n",
+ dev_dbg(hba->dev, "%s Write Booster %s\n",
__func__, enable ? "enabled" : "disabled");
return ret;
@@ -7253,7 +7281,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_set_clk_freq(hba, true);
+ ufshcd_scale_clks(hba, true);
err = ufshcd_hba_enable(hba);
@@ -7366,7 +7394,8 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
*
* Returns calculated max ICC level for specific regulator
*/
-static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
+ const char *buff)
{
int i;
int curr_uA;
@@ -7413,7 +7442,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
* Returns calculated ICC level
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
- u8 *desc_buf, int len)
+ const u8 *desc_buf, int len)
{
u32 icc_level = 0;
@@ -7563,7 +7592,7 @@ out:
return ret;
}
-static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
+static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u8 lun;
@@ -7634,7 +7663,7 @@ wb_disabled:
hba->caps &= ~UFSHCD_CAP_WB_EN;
}
-static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
+static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u32 ext_ufs_feature;
@@ -7868,7 +7897,7 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
u32 granularity, peer_granularity;
u32 pa_tactivate, peer_pa_tactivate;
u32 pa_tactivate_us, peer_pa_tactivate_us;
- u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+ static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
&granularity);
@@ -7985,7 +8014,7 @@ struct ufs_ref_clk {
enum ufs_ref_clk_freq val;
};
-static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
+static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
{38400000, REF_CLK_FREQ_38_4_MHZ},
@@ -8297,6 +8326,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
+ .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
@@ -8427,7 +8457,7 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
}
-static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
@@ -8443,6 +8473,7 @@ static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
static int ufshcd_init_vreg(struct ufs_hba *hba)
{
@@ -8536,6 +8567,19 @@ out:
return ret;
}
+static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
+{
+ u32 freq;
+ int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
+
+ if (ret) {
+ dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
+ return REF_CLK_FREQ_INVAL;
+ }
+
+ return ufs_get_bref_clk_from_hz(freq);
+}
+
static int ufshcd_init_clocks(struct ufs_hba *hba)
{
int ret = 0;
@@ -8629,6 +8673,9 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
if (err)
goto out_disable_hba_vreg;
+ if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
+ hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
+
err = ufshcd_setup_clocks(hba, true);
if (err)
goto out_disable_hba_vreg;
@@ -9462,12 +9509,8 @@ EXPORT_SYMBOL(ufshcd_runtime_resume);
int ufshcd_shutdown(struct ufs_hba *hba)
{
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
- goto out;
+ ufshcd_suspend(hba);
- pm_runtime_get_sync(hba->dev);
-
- ufshcd_suspend(hba);
-out:
hba->is_powered = false;
/* allow force shutdown even in case of errors */
return 0;
@@ -9487,7 +9530,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufs_bsg_remove(hba);
ufshpb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
- blk_cleanup_queue(hba->tmf_queue);
+ blk_mq_destroy_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
scsi_remove_host(hba->host);
/* disable interrupts */
@@ -9783,7 +9826,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
return 0;
free_tmf_queue:
- blk_cleanup_queue(hba->tmf_queue);
+ blk_mq_destroy_queue(hba->tmf_queue);
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
out_remove_scsi_host:
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index de2bb8401bc4..a1a7a1175a5a 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -433,9 +433,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return 0;
}
-static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
- int rgn_idx, enum req_opf dir,
- bool atomic)
+static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, int rgn_idx,
+ enum req_op op, bool atomic)
{
struct ufshpb_req *rq;
struct request *req;
@@ -446,7 +445,7 @@ static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
return NULL;
retry:
- req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
+ req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, op,
BLK_MQ_REQ_NOWAIT);
if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 82590224da13..4cc2dbd79ed0 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -92,6 +92,18 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+config SCSI_UFS_RENESAS
+ tristate "Renesas specific hooks to UFS controller platform driver"
+ depends on (ARCH_RENESAS || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
+ help
+ This selects the Renesas specific additions to UFSHCD platform driver.
+ UFS host on Renesas needs some vendor specific configuration before
+ accessing the hardware.
+
+ Select this if you have UFS controller on Renesas chipset.
+
+ If unsure, say N.
+
config SCSI_UFS_TI_J721E
tristate "TI glue layer for Cadence UFS Controller"
depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index e4be54273c98..7717ca93e7d5 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -11,4 +11,5 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
+obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index a81d8cbd542f..eced97538082 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -52,11 +52,12 @@
#define HCI_ERR_EN_DME_LAYER 0x88
#define HCI_CLKSTOP_CTRL 0xB0
#define REFCLKOUT_STOP BIT(4)
+#define MPHY_APBCLK_STOP BIT(3)
#define REFCLK_STOP BIT(2)
#define UNIPRO_MCLK_STOP BIT(1)
#define UNIPRO_PCLK_STOP BIT(0)
#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
- UNIPRO_MCLK_STOP |\
+ UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\
UNIPRO_PCLK_STOP)
#define HCI_MISC 0xB4
#define REFCLK_CTRL_EN BIT(7)
@@ -135,15 +136,9 @@ enum {
/*
* UNIPRO registers
*/
-#define UNIPRO_COMP_VERSION 0x000
-#define UNIPRO_DME_PWR_REQ 0x090
-#define UNIPRO_DME_PWR_REQ_POWERMODE 0x094
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0
/*
* UFS Protector registers
@@ -157,7 +152,6 @@ enum {
#define CNTR_DIV_VAL 40
-static struct exynos_ufs_drv_data exynos_ufs_drvs;
static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
@@ -657,8 +651,9 @@ static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs)
if (attr->rx_min_actv_time_cap)
ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP,
- i), attr->rx_min_actv_time_cap);
+ UIC_ARG_MIB_SEL(
+ RX_MIN_ACTIVATETIME_CAPABILITY, i),
+ attr->rx_min_actv_time_cap);
if (attr->rx_hibern8_time_cap)
ufshcd_dme_set(hba,
@@ -910,9 +905,13 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
- goto out_exit_phy;
+ return ret;
}
+ ret = phy_power_on(generic_phy);
+ if (ret)
+ goto out_exit_phy;
+
return 0;
out_exit_phy:
@@ -1174,10 +1173,6 @@ static int exynos_ufs_init(struct ufs_hba *hba)
goto out;
}
- ret = phy_power_on(ufs->phy);
- if (ret)
- goto phy_off;
-
exynos_ufs_priv_init(hba, ufs);
if (ufs->drv_data->drv_init) {
@@ -1195,8 +1190,6 @@ static int exynos_ufs_init(struct ufs_hba *hba)
exynos_ufs_config_smu(ufs);
return 0;
-phy_off:
- phy_power_off(ufs->phy);
out:
hba->priv = NULL;
return ret;
@@ -1473,7 +1466,100 @@ static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
return 0;
}
-static struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
+static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
+{
+ int i;
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xAA, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8F, i), 0x3F);
+ }
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x12, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x5C, i), 0x38);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0F, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x65, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x69, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x21, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x22, i), 0x0);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
+
+ exynos_ufs_establish_connt(ufs);
+
+ return 0;
+}
+
+static int fsd_ufs_post_link(struct exynos_ufs *ufs)
+{
+ int i;
+ struct ufs_hba *hba = ufs->hba;
+ u32 hw_cap_min_tactivate;
+ u32 peer_rx_min_actv_time_cap;
+ u32 max_rx_hibern8_time_cap;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x8F, 4),
+ &hw_cap_min_tactivate); /* HW Capability of MIN_TACTIVATE */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_rx_min_actv_time_cap); /* PA_TActivate */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ &max_rx_hibern8_time_cap); /* PA_Hibern8Time */
+
+ if (peer_rx_min_actv_time_cap >= hw_cap_min_tactivate)
+ ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ peer_rx_min_actv_time_cap + 1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), max_rx_hibern8_time_cap + 1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xFA);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x00);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x35, i), 0x05);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x73, i), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x41, i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x42, i), 0xAC);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+
+ return 0;
+}
+
+static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
+
+ unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0);
+ unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1);
+ unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2);
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
@@ -1514,9 +1600,14 @@ static int exynos_ufs_probe(struct platform_device *pdev)
static int exynos_ufs_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+
return 0;
}
@@ -1545,7 +1636,7 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = {
.pa_dbg_option_suite = 0x30103,
};
-static struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
+static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
@@ -1561,7 +1652,7 @@ static struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.post_pwr_change = exynosauto_ufs_post_pwr_change,
};
-static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
+static const struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
.vops = &ufs_hba_exynosauto_vh_ops,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
@@ -1573,7 +1664,7 @@ static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
.opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
};
-static struct exynos_ufs_drv_data exynos_ufs_drvs = {
+static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
@@ -1595,6 +1686,47 @@ static struct exynos_ufs_drv_data exynos_ufs_drvs = {
.post_pwr_change = exynos7_ufs_post_pwr_change,
};
+static struct exynos_ufs_uic_attr fsd_uic_attr = {
+ .tx_trailingclks = 0x10,
+ .tx_dif_p_nsec = 3000000, /* unit: ns */
+ .tx_dif_n_nsec = 1000000, /* unit: ns */
+ .tx_high_z_cnt_nsec = 20000, /* unit: ns */
+ .tx_base_unit_nsec = 100000, /* unit: ns */
+ .tx_gran_unit_nsec = 4000, /* unit: ns */
+ .tx_sleep_cnt = 1000, /* unit: ns */
+ .tx_min_activatetime = 0xa,
+ .rx_filler_enable = 0x2,
+ .rx_dif_p_nsec = 1000000, /* unit: ns */
+ .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
+ .rx_base_unit_nsec = 100000, /* unit: ns */
+ .rx_gran_unit_nsec = 4000, /* unit: ns */
+ .rx_sleep_cnt = 1280, /* unit: ns */
+ .rx_stall_cnt = 320, /* unit: ns */
+ .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
+ .pa_dbg_option_suite = 0x2E820183,
+};
+
+struct exynos_ufs_drv_data fsd_ufs_drvs = {
+ .uic_attr = &fsd_uic_attr,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR,
+ .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
+ EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .pre_link = fsd_ufs_pre_link,
+ .post_link = fsd_ufs_post_link,
+ .pre_pwr_change = fsd_ufs_pre_pwr_change,
+};
+
static const struct of_device_id exynos_ufs_of_match[] = {
{ .compatible = "samsung,exynos7-ufs",
.data = &exynos_ufs_drvs },
@@ -1602,6 +1734,8 @@ static const struct of_device_id exynos_ufs_of_match[] = {
.data = &exynosauto_ufs_drvs },
{ .compatible = "samsung,exynosautov9-ufs-vh",
.data = &exynosauto_ufs_vh_drvs },
+ { .compatible = "tesla,fsd-ufs",
+ .data = &fsd_ufs_drvs },
{},
};
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index 0b0a3d530ca6..a4bd6646d7f1 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -22,6 +22,7 @@
#define PA_DBG_RXPHY_CFGUPDT 0x9519
#define PA_DBG_MODE 0x9529
#define PA_DBG_SKIP_RESET_PHY 0x9539
+#define PA_DBG_AUTOMODE_THLD 0x9536
#define PA_DBG_OV_TM 0x9540
#define PA_DBG_SKIP_LINE_RESET 0x9541
#define PA_DBG_LINE_RESET_REQ 0x9543
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index beabc3ccd30b..c958279bdd8f 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -16,6 +16,7 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sched/clock.h>
@@ -30,26 +31,11 @@
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
-#define ufs_mtk_smc(cmd, val, res) \
- arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
- cmd, val, 0, 0, 0, 0, 0, &(res))
-
-#define ufs_mtk_va09_pwr_ctrl(res, on) \
- ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
-
-#define ufs_mtk_crypto_ctrl(res, enable) \
- ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
-
-#define ufs_mtk_ref_clk_notify(on, res) \
- ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
-
-#define ufs_mtk_device_reset_ctrl(high, res) \
- ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
-
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
- { .wmanufacturerid = UFS_VENDOR_MICRON,
+ { .wmanufacturerid = UFS_ANY_VENDOR,
.model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM },
+ .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "H9HQ21AFAMZDAR",
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
@@ -82,6 +68,13 @@ static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
}
+static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
+}
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -191,6 +184,14 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
hba->ahit = 0;
}
+
+ /*
+ * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
+ * to prevent host hang issue
+ */
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
+ REG_UFS_XOUFS_CTRL);
}
return 0;
@@ -244,8 +245,9 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
if (host->ref_clk_enabled == on)
return 0;
+ ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
+
if (on) {
- ufs_mtk_ref_clk_notify(on, res);
ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
} else {
ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
@@ -267,7 +269,7 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
+ ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
return -ETIMEDOUT;
@@ -275,8 +277,8 @@ out:
host->ref_clk_enabled = on;
if (on)
ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
- else
- ufs_mtk_ref_clk_notify(on, res);
+
+ ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
return 0;
}
@@ -579,20 +581,38 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+ if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
+ host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
-static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
+static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- ufs_mtk_boost_crypt(hba, up);
- ufs_mtk_setup_ref_clk(hba, up);
+ if (!host || !host->pm_qos_init)
+ return;
+
+ cpu_latency_qos_update_request(&host->pm_qos_req,
+ boost ? 0 : PM_QOS_DEFAULT_VALUE);
+}
- if (up)
+static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (on) {
phy_power_on(host->mphy);
- else
+ ufs_mtk_setup_ref_clk(hba, on);
+ ufs_mtk_boost_crypt(hba, on);
+ ufs_mtk_boost_pm_qos(hba, on);
+ } else {
+ ufs_mtk_boost_pm_qos(hba, on);
+ ufs_mtk_boost_crypt(hba, on);
+ ufs_mtk_setup_ref_clk(hba, on);
phy_power_off(host->mphy);
+ }
}
/**
@@ -637,9 +657,9 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
}
if (clk_pwr_off)
- ufs_mtk_scale_perf(hba, false);
+ ufs_mtk_pwr_ctrl(hba, false);
} else if (on && status == POST_CHANGE) {
- ufs_mtk_scale_perf(hba, true);
+ ufs_mtk_pwr_ctrl(hba, true);
}
return ret;
@@ -675,6 +695,73 @@ static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
return hba->ufs_version;
}
+#define MAX_VCC_NAME 30
+static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+ struct device_node *np = hba->dev->of_node;
+ struct device *dev = hba->dev;
+ char vcc_name[MAX_VCC_NAME];
+ struct arm_smccc_res res;
+ int err, ver;
+
+ if (hba->vreg_info.vcc)
+ return 0;
+
+ if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
+ ufs_mtk_get_vcc_num(res);
+ if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
+ snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
+ else
+ return -ENODEV;
+ } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
+ ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
+ snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
+ } else {
+ return 0;
+ }
+
+ err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
+ if (err)
+ return err;
+
+ err = ufshcd_get_vreg(dev, info->vcc);
+ if (err)
+ return err;
+
+ err = regulator_enable(info->vcc->reg);
+ if (!err) {
+ info->vcc->enabled = true;
+ dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
+ }
+
+ return err;
+}
+
+static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+ struct ufs_vreg **vreg_on, **vreg_off;
+
+ if (hba->dev_info.wspecversion >= 0x0300) {
+ vreg_on = &info->vccq;
+ vreg_off = &info->vccq2;
+ } else {
+ vreg_on = &info->vccq2;
+ vreg_off = &info->vccq;
+ }
+
+ if (*vreg_on)
+ (*vreg_on)->always_on = true;
+
+ if (*vreg_off) {
+ regulator_disable((*vreg_off)->reg);
+ devm_kfree(hba->dev, (*vreg_off)->name);
+ devm_kfree(hba->dev, *vreg_off);
+ *vreg_off = NULL;
+ }
+}
+
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@@ -754,6 +841,26 @@ out:
return err;
}
+static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ if (!ufs_mtk_is_pmc_via_fastauto(hba))
+ return false;
+
+ if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
+ return false;
+
+ if (dev_req_params->pwr_tx != FAST_MODE &&
+ dev_req_params->gear_tx < UFS_HS_G4)
+ return false;
+
+ if (dev_req_params->pwr_rx != FAST_MODE &&
+ dev_req_params->gear_rx < UFS_HS_G4)
+ return false;
+
+ return true;
+}
+
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
@@ -763,8 +870,8 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
int ret;
ufshcd_init_pwr_dev_param(&host_cap);
- host_cap.hs_rx_gear = UFS_HS_G4;
- host_cap.hs_tx_gear = UFS_HS_G4;
+ host_cap.hs_rx_gear = UFS_HS_G5;
+ host_cap.hs_tx_gear = UFS_HS_G5;
ret = ufshcd_get_pwr_dev_param(&host_cap,
dev_max_params,
@@ -774,6 +881,32 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
__func__);
}
+ if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ dev_req_params->lane_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ dev_req_params->lane_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ dev_req_params->hs_rate);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ PA_NO_ADAPT);
+
+ ret = ufshcd_uic_change_pwr_mode(hba,
+ FASTAUTO_MODE << 4 | FASTAUTO_MODE);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
+ __func__, ret);
+ }
+ }
+
if (host->hw_ver.major >= 3) {
ret = ufshcd_dme_configure_adapt(hba,
dev_req_params->gear_tx,
@@ -963,6 +1096,11 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{
int err;
+ /* Disable reset confirm feature by UniPro */
+ ufshcd_writel(hba,
+ (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
+ REG_UFS_XOUFS_CTRL);
+
err = ufs_mtk_unipro_set_lpm(hba, true);
if (err) {
/* Resume UniPro state for following error recovery */
@@ -973,17 +1111,52 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
return 0;
}
-static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
+static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ struct ufs_vreg *vccqx = NULL;
+
+ if (hba->vreg_info.vccq)
+ vccqx = hba->vreg_info.vccq;
+ else
+ vccqx = hba->vreg_info.vccq2;
+
+ regulator_set_mode(vccqx->reg,
+ lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
+}
+
+static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ struct arm_smccc_res res;
+
+ ufs_mtk_device_pwr_ctrl(!lpm,
+ (unsigned long)hba->dev_info.wspecversion,
+ res);
+}
+
+static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
{
- if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
+ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
+ return;
+
+ /* Skip if VCC is assumed always-on */
+ if (!hba->vreg_info.vcc)
+ return;
+
+ /* Bypass LPM when device is still active */
+ if (lpm && ufshcd_is_ufs_dev_active(hba))
+ return;
+
+ /* Bypass LPM if VCC is enabled */
+ if (lpm && hba->vreg_info.vcc->enabled)
return;
- if (lpm && !hba->vreg_info.vcc->enabled)
- regulator_set_mode(hba->vreg_info.vccq2->reg,
- REGULATOR_MODE_IDLE);
- else if (!lpm)
- regulator_set_mode(hba->vreg_info.vccq2->reg,
- REGULATOR_MODE_NORMAL);
+ if (lpm) {
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
+ ufs_mtk_vsx_set_lpm(hba, lpm);
+ } else {
+ ufs_mtk_vsx_set_lpm(hba, lpm);
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
+ }
}
static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
@@ -1026,7 +1199,6 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
* ufshcd_suspend() re-enabling regulators while vreg is still
* in low-power mode.
*/
- ufs_mtk_vreg_set_lpm(hba, true);
err = ufs_mtk_mphy_power_on(hba, false);
if (err)
goto fail;
@@ -1035,6 +1207,8 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (ufshcd_is_link_off(hba))
ufs_mtk_device_reset_ctrl(0, res);
+ ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
+
return 0;
fail:
/*
@@ -1049,13 +1223,17 @@ fail:
static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
+ struct arm_smccc_res res;
+
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
err = ufs_mtk_mphy_power_on(hba, true);
if (err)
goto fail;
- ufs_mtk_vreg_set_lpm(hba, false);
-
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
@@ -1087,8 +1265,10 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
- if (mid == UFS_VENDOR_SAMSUNG)
+ if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
+ }
/*
* Decide waiting time before gating reference clock and
@@ -1104,7 +1284,6 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
else
ufs_mtk_setup_ref_clk_wait_us(hba,
REFCLK_DEFAULT_WAIT_US);
-
return 0;
}
@@ -1122,6 +1301,9 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
}
+
+ ufs_mtk_vreg_fix_vcc(hba);
+ ufs_mtk_vreg_fix_vccqx(hba);
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
@@ -1220,9 +1402,59 @@ static int ufs_mtk_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int ufs_mtk_system_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ufshcd_system_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_dev_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+static int ufs_mtk_system_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
+static int ufs_mtk_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ufshcd_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_dev_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+static int ufs_mtk_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ return ufshcd_runtime_resume(dev);
+}
+
static const struct dev_pm_ops ufs_mtk_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
+ ufs_mtk_system_resume)
+ SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
+ ufs_mtk_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 414dca86c09f..aa26d415527b 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -7,11 +7,13 @@
#define _UFS_MEDIATEK_H
#include <linux/bitops.h>
+#include <linux/pm_qos.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
* Vendor specific UFSHCI Registers
*/
+#define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
@@ -83,6 +85,9 @@ enum {
#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2)
#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
+#define UFS_MTK_SIP_HOST_PWR_CTRL BIT(5)
+#define UFS_MTK_SIP_GET_VCC_NUM BIT(6)
+#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7)
/*
* VS_DEBUGCLOCKENABLE
@@ -108,6 +113,7 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
+ UFS_MTK_CAP_PMC_VIA_FASTAUTO = 1 << 6,
};
struct ufs_mtk_crypt_cfg {
@@ -126,6 +132,7 @@ struct ufs_mtk_hw_ver {
struct ufs_mtk_host {
struct phy *mphy;
+ struct pm_qos_request pm_qos_req;
struct regulator *reg_va09;
struct reset_control *hci_reset;
struct reset_control *unipro_reset;
@@ -135,6 +142,7 @@ struct ufs_mtk_host {
struct ufs_mtk_hw_ver hw_ver;
enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
+ bool pm_qos_init;
bool unipro_lpm;
bool ref_clk_enabled;
u16 ref_clk_ungating_wait_us;
@@ -142,4 +150,70 @@ struct ufs_mtk_host {
u32 ip_ver;
};
+/*
+ * Multi-VCC by Numbering
+ */
+enum ufs_mtk_vcc_num {
+ UFS_VCC_NONE = 0,
+ UFS_VCC_1,
+ UFS_VCC_2,
+ UFS_VCC_MAX
+};
+
+/*
+ * Host Power Control options
+ */
+enum {
+ HOST_PWR_HCI = 0,
+ HOST_PWR_MPHY
+};
+
+/*
+ * SMC call wrapper function
+ */
+struct ufs_mtk_smc_arg {
+ unsigned long cmd;
+ struct arm_smccc_res *res;
+ unsigned long v1;
+ unsigned long v2;
+ unsigned long v3;
+ unsigned long v4;
+ unsigned long v5;
+ unsigned long v6;
+ unsigned long v7;
+};
+
+static void _ufs_mtk_smc(struct ufs_mtk_smc_arg s)
+{
+ arm_smccc_smc(MTK_SIP_UFS_CONTROL,
+ s.cmd, s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res);
+}
+
+#define ufs_mtk_smc(...) \
+ _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__})
+
+/*
+ * SMC call interface
+ */
+#define ufs_mtk_va09_pwr_ctrl(res, on) \
+ ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on)
+
+#define ufs_mtk_crypto_ctrl(res, enable) \
+ ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable)
+
+#define ufs_mtk_ref_clk_notify(on, stage, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high)
+
+#define ufs_mtk_host_pwr_ctrl(opt, on, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_HOST_PWR_CTRL, &(res), opt, on)
+
+#define ufs_mtk_get_vcc_num(res) \
+ ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res))
+
+#define ufs_mtk_device_pwr_ctrl(on, ufs_ver, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_ver)
+
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index f10d4668814c..473fad83701e 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -58,19 +58,6 @@ static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
ufshcd_dump_regs(hba, offset, len * 4, prefix);
}
-static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
-{
- int err = 0;
-
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
- __func__, err);
-
- return err;
-}
-
static int ufs_qcom_host_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool optional)
{
@@ -194,13 +181,6 @@ out:
return err;
}
-static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
-{
- u32 tx_lanes;
-
- return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
-}
-
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
{
int err;
@@ -570,9 +550,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
err = ufshcd_disable_host_tx_lcc(hba);
break;
- case POST_CHANGE:
- ufs_qcom_link_startup_post_change(hba);
- break;
default:
break;
}
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
new file mode 100644
index 000000000000..f8a5e79ed3b4
--- /dev/null
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Renesas UFS host controller driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <ufs/ufshcd.h>
+
+#include "ufshcd-pltfrm.h"
+
+struct ufs_renesas_priv {
+ bool initialized; /* The hardware needs initialization once */
+};
+
+enum {
+ SET_PHY_INDEX_LO = 0,
+ SET_PHY_INDEX_HI,
+ TIMER_INDEX,
+ MAX_INDEX
+};
+
+enum ufs_renesas_init_param_mode {
+ MODE_RESTORE,
+ MODE_SET,
+ MODE_SAVE,
+ MODE_POLL,
+ MODE_WAIT,
+ MODE_WRITE,
+};
+
+#define PARAM_RESTORE(_reg, _index) \
+ { .mode = MODE_RESTORE, .reg = _reg, .index = _index }
+#define PARAM_SET(_index, _set) \
+ { .mode = MODE_SET, .index = _index, .u.set = _set }
+#define PARAM_SAVE(_reg, _mask, _index) \
+ { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \
+ .index = _index }
+#define PARAM_POLL(_reg, _expected, _mask) \
+ { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \
+ .mask = (u32)(_mask) }
+#define PARAM_WAIT(_delay_us) \
+ { .mode = MODE_WAIT, .u.delay_us = _delay_us }
+
+#define PARAM_WRITE(_reg, _val) \
+ { .mode = MODE_WRITE, .reg = _reg, .u.val = _val }
+
+#define PARAM_WRITE_D0_D4(_d0, _d4) \
+ PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4)
+
+#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_RESTORE_800_80C_POLL(_index) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE(0xd0, 0x00000800), \
+ PARAM_RESTORE(0xd4, _index), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_WRITE_828_82C_POLL(_data_828) \
+ PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \
+ PARAM_WRITE_D0_D4(0x00000828, _data_828), \
+ PARAM_WRITE(0xd0, 0x0000082c), \
+ PARAM_POLL(0xd4, _data_828, _data_828)
+
+#define PARAM_WRITE_PHY(_addr16, _data16) \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_SET_PHY(_addr16, _data16) \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE_804_80C_POLL(0x1a, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \
+ PARAM_WRITE_804_80C_POLL(0x1b, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0), \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \
+ PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \
+ PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \
+ PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \
+ PARAM_WRITE(0xf0, _gpio), \
+ PARAM_WRITE_800_80C_POLL(_addr, _data_800), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \
+ PARAM_WRITE(0xf0, _gpio), \
+ PARAM_WRITE_800_80C_POLL(_addr, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_POLL(0xd4, _expected, _mask), \
+ PARAM_WRITE(0xf0, 0)
+
+struct ufs_renesas_init_param {
+ enum ufs_renesas_init_param_mode mode;
+ u32 reg;
+ union {
+ u32 expected;
+ u32 delay_us;
+ u32 set;
+ u32 val;
+ } u;
+ u32 mask;
+ u32 index;
+};
+
+/* This setting is for SERIES B */
+static const struct ufs_renesas_init_param ufs_param[] = {
+ PARAM_WRITE(0xc0, 0x49425308),
+ PARAM_WRITE_D0_D4(0x00000104, 0x00000002),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000828, 0x00000200),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000828, 0x00000000),
+ PARAM_WRITE_D0_D4(0x00000104, 0x00000001),
+ PARAM_WRITE_D0_D4(0x00000940, 0x00000001),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000940, 0x00000000),
+
+ PARAM_WRITE(0xc0, 0x49425308),
+ PARAM_WRITE(0xc0, 0x41584901),
+
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100),
+ PARAM_WRITE_D0_D4(0x00000804, 0x00000000),
+ PARAM_WRITE(0xd0, 0x0000080c),
+ PARAM_POLL(0xd4, BIT(8), BIT(8)),
+
+ PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001),
+
+ PARAM_WRITE(0xd0, 0x00000804),
+ PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)),
+
+ PARAM_WRITE(0xd0, 0x00000d00),
+ PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX),
+ PARAM_WRITE(0xd4, 0x00000000),
+ PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000),
+ PARAM_WRITE_D0_D4(0x00000828, 0x08000000),
+ PARAM_WRITE(0xd0, 0x0000082c),
+ PARAM_POLL(0xd4, BIT(27), BIT(27)),
+ PARAM_WRITE(0xd0, 0x00000d2c),
+ PARAM_POLL(0xd4, BIT(0), BIT(0)),
+
+ /* phy setup */
+ PARAM_INDIRECT_WRITE(1, 0x01, 0x001f),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
+ PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x60, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6),
+ PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003),
+
+ PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)),
+ PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)),
+
+ PARAM_INDIRECT_WRITE(1, 0x32, 0x0080),
+ PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001),
+ PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001),
+ PARAM_INDIRECT_WRITE(0, 0x32, 0x0087),
+
+ PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061),
+ PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009),
+ PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005),
+ PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058),
+ PARAM_INDIRECT_WRITE(1, 0x39, 0x0027),
+ PARAM_INDIRECT_WRITE(1, 0x47, 0x004c),
+
+ PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002),
+ PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
+
+ PARAM_WRITE_PHY(0x0028, 0x0061),
+ PARAM_WRITE_PHY(0x4014, 0x0061),
+ PARAM_SET_PHY(0x401c, BIT(2)),
+ PARAM_WRITE_PHY(0x4000, 0x0000),
+ PARAM_WRITE_PHY(0x4001, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0000),
+ PARAM_WRITE_PHY(0x10af, 0x0001),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0000),
+ PARAM_WRITE_PHY(0x10af, 0x0002),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0080),
+ PARAM_WRITE_PHY(0x10af, 0x0000),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0080),
+ PARAM_WRITE_PHY(0x10af, 0x001a),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_INDIRECT_WRITE(7, 0x70, 0x0016),
+ PARAM_INDIRECT_WRITE(7, 0x71, 0x0016),
+ PARAM_INDIRECT_WRITE(7, 0x72, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x73, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x74, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x75, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x76, 0x0010),
+ PARAM_INDIRECT_WRITE(7, 0x77, 0x0010),
+ PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff),
+ PARAM_INDIRECT_WRITE(7, 0x79, 0x0000),
+
+ PARAM_INDIRECT_WRITE(7, 0x19, 0x0007),
+
+ PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007),
+
+ PARAM_INDIRECT_WRITE(7, 0x24, 0x000c),
+
+ PARAM_INDIRECT_WRITE(7, 0x25, 0x000c),
+
+ PARAM_INDIRECT_WRITE(7, 0x62, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x63, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
+ PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)),
+ PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)),
+ /* end of phy setup */
+
+ PARAM_WRITE(0xf0, 0),
+ PARAM_WRITE(0xd0, 0x00000d00),
+ PARAM_RESTORE(0xd4, TIMER_INDEX),
+};
+
+static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+}
+
+static void ufs_renesas_reg_control(struct ufs_hba *hba,
+ const struct ufs_renesas_init_param *p)
+{
+ static u32 save[MAX_INDEX];
+ int ret;
+ u32 val;
+
+ WARN_ON(p->index >= MAX_INDEX);
+
+ switch (p->mode) {
+ case MODE_RESTORE:
+ ufshcd_writel(hba, save[p->index], p->reg);
+ break;
+ case MODE_SET:
+ save[p->index] |= p->u.set;
+ break;
+ case MODE_SAVE:
+ save[p->index] = ufshcd_readl(hba, p->reg) & p->mask;
+ break;
+ case MODE_POLL:
+ ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg,
+ val,
+ (val & p->mask) == p->u.expected,
+ 10, 1000);
+ if (ret)
+ dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
+ __func__, ret, val, p->mask, p->u.expected);
+ break;
+ case MODE_WAIT:
+ if (p->u.delay_us > 1000)
+ mdelay(DIV_ROUND_UP(p->u.delay_us, 1000));
+ else
+ udelay(p->u.delay_us);
+ break;
+ case MODE_WRITE:
+ ufshcd_writel(hba, p->u.val, p->reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ufs_renesas_pre_init(struct ufs_hba *hba)
+{
+ const struct ufs_renesas_init_param *p = ufs_param;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ufs_param); i++)
+ ufs_renesas_reg_control(hba, &p[i]);
+}
+
+static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+
+ if (priv->initialized)
+ return 0;
+
+ if (status == PRE_CHANGE)
+ ufs_renesas_pre_init(hba);
+
+ priv->initialized = true;
+
+ return 0;
+}
+
+static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ if (on && status == PRE_CHANGE)
+ pm_runtime_get_sync(hba->dev);
+ else if (!on && status == POST_CHANGE)
+ pm_runtime_put(hba->dev);
+
+ return 0;
+}
+
+static int ufs_renesas_init(struct ufs_hba *hba)
+{
+ struct ufs_renesas_priv *priv;
+
+ priv = devm_kmalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, priv);
+
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO;
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_renesas_vops = {
+ .name = "renesas",
+ .init = ufs_renesas_init,
+ .setup_clocks = ufs_renesas_setup_clocks,
+ .hce_enable_notify = ufs_renesas_hce_enable_notify,
+ .dbg_register_dump = ufs_renesas_dbg_register_dump,
+};
+
+static const struct of_device_id __maybe_unused ufs_renesas_of_match[] = {
+ { .compatible = "renesas,r8a779f0-ufs" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ufs_renesas_of_match);
+
+static int ufs_renesas_probe(struct platform_device *pdev)
+{
+ return ufshcd_pltfrm_init(pdev, &ufs_renesas_vops);
+}
+
+static int ufs_renesas_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+
+ return 0;
+}
+
+static struct platform_driver ufs_renesas_platform = {
+ .probe = ufs_renesas_probe,
+ .remove = ufs_renesas_remove,
+ .driver = {
+ .name = "ufshcd-renesas",
+ .of_match_table = of_match_ptr(ufs_renesas_of_match),
+ },
+};
+module_platform_driver(ufs_renesas_platform);
+
+MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
+MODULE_DESCRIPTION("Renesas UFS host controller driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 04166bda41da..1c91f43e15c8 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -24,7 +24,7 @@ struct ufs_host {
void (*late_init)(struct ufs_hba *hba);
};
-enum {
+enum intel_ufs_dsm_func_id {
INTEL_DSM_FNS = 0,
INTEL_DSM_RESET = 1,
};
@@ -42,6 +42,15 @@ static const guid_t intel_dsm_guid =
GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
+static bool __intel_dsm_supported(struct intel_host *host,
+ enum intel_ufs_dsm_func_id fn)
+{
+ return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
+}
+
+#define INTEL_DSM_SUPPORTED(host, name) \
+ __intel_dsm_supported(host, INTEL_DSM_##name)
+
static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
@@ -71,7 +80,7 @@ out:
static int intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
- if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ if (!__intel_dsm_supported(intel_host, fn))
return -EOPNOTSUPP;
return __intel_dsm(intel_host, dev, fn, result);
@@ -300,7 +309,7 @@ static int ufs_intel_device_reset(struct ufs_hba *hba)
{
struct intel_host *host = ufshcd_get_variant(hba);
- if (host->dsm_fns & INTEL_DSM_RESET) {
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
u32 result = 0;
int err;
@@ -342,7 +351,7 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
return -ENOMEM;
ufshcd_set_variant(hba, host);
intel_dsm_init(host, hba->dev);
- if (host->dsm_fns & INTEL_DSM_RESET) {
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
if (hba->vops->device_reset)
hba->caps |= UFSHCD_CAP_DEEPSLEEP;
} else {
@@ -426,6 +435,7 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
{
hba->nop_out_timeout = 200;
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->caps |= UFSHCD_CAP_WB_EN;
return ufs_intel_common_init(hba);
}
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index e7332cc65b1f..5739ff007828 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -26,7 +26,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
int i;
struct device *dev = hba->dev;
struct device_node *np = dev->of_node;
- char *name;
+ const char *name;
u32 *clkfreq = NULL;
struct ufs_clk_info *clki;
int len = 0;
@@ -79,8 +79,8 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
}
for (i = 0; i < sz; i += 2) {
- ret = of_property_read_string_index(np,
- "clock-names", i/2, (const char **)&name);
+ ret = of_property_read_string_index(np, "clock-names", i/2,
+ &name);
if (ret)
goto out;
@@ -108,9 +108,20 @@ out:
return ret;
}
+static bool phandle_exists(const struct device_node *np,
+ const char *phandle_name, int index)
+{
+ struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
+
+ if (parse_np)
+ of_node_put(parse_np);
+
+ return parse_np != NULL;
+}
+
#define MAX_PROP_SIZE 32
-static int ufshcd_populate_vreg(struct device *dev, const char *name,
- struct ufs_vreg **out_vreg)
+int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg)
{
char prop_name[MAX_PROP_SIZE];
struct ufs_vreg *vreg = NULL;
@@ -122,7 +133,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
}
snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
- if (!of_parse_phandle(np, prop_name, 0)) {
+ if (!phandle_exists(np, prop_name, 0)) {
dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
__func__, prop_name);
goto out;
@@ -145,6 +156,7 @@ out:
*out_vreg = vreg;
return 0;
}
+EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
/**
* ufshcd_parse_regulator_info - get regulator info from device tree
@@ -208,8 +220,8 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
*
* Returns 0 on success, non-zero value on failure
*/
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
- struct ufs_pa_layer_attr *dev_max,
+int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
+ const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr)
{
int min_pltfrm_gear;
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index 43c2e412bd99..2e4ba2bfbcad 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -25,12 +25,14 @@ struct ufs_dev_params {
u32 desired_working_mode;
};
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param,
- struct ufs_pa_layer_attr *dev_max,
+int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *dev_param,
+ const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr);
void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
+int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg);
#endif /* UFSHCD_PLTFRM_H_ */
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index e9096f53b4cc..83966dbd3bbf 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Programmable Real-Time Unit Sub System (PRUSS) UIO driver (uio_pruss)
*
@@ -5,15 +6,6 @@
* and DDR RAM to user space for applications interacting with PRUSS firmware
*
* Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
#include <linux/module.h>
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 786299892c7f..5812f7ea7f90 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -9,7 +9,7 @@
* HISTORY : some part of the code was base on ueagle 1.3 BSD driver,
* Damien Bergamini agree to put his code under a DUAL GPL/BSD license.
*
- * The rest of the code was was rewritten from scratch.
+ * The rest of the code was rewritten from scratch.
*/
#include <linux/module.h>
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 5c15c48952a6..d21b69997e75 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -220,7 +220,7 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
if (!priv_ep->trb_pool) {
priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool,
- GFP_DMA32 | GFP_ATOMIC,
+ GFP_ATOMIC,
&priv_ep->trb_pool_dma);
if (!priv_ep->trb_pool)
@@ -625,9 +625,9 @@ static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
trace_cdns3_wa2(priv_ep, "removes eldest request");
kfree(priv_req->request.buf);
+ list_del_init(&priv_req->list);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
- list_del_init(&priv_req->list);
--priv_ep->wa2_counter;
if (!chain)
@@ -2284,11 +2284,16 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
int ret = 0;
int val;
+ if (!ep) {
+ pr_debug("usbss: ep not configured?\n");
+ return -EINVAL;
+ }
+
priv_ep = ep_to_cdns3_ep(ep);
priv_dev = priv_ep->cdns3_dev;
comp_desc = priv_ep->endpoint.comp_desc;
- if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
return -EINVAL;
}
@@ -2600,7 +2605,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
struct usb_request *request)
{
struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
- struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct cdns3_device *priv_dev;
struct usb_request *req, *req_temp;
struct cdns3_request *priv_req;
struct cdns3_trb *link_trb;
@@ -2611,6 +2616,8 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
if (!ep || !request || !ep->desc)
return -EINVAL;
+ priv_dev = priv_ep->cdns3_dev;
+
spin_lock_irqsave(&priv_dev->lock, flags);
priv_req = to_cdns3_request(request);
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 99440baa6458..a4a3be049910 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -49,6 +49,7 @@ enum ci_hw_regs {
OP_USBCMD,
OP_USBSTS,
OP_USBINTR,
+ OP_FRINDEX,
OP_DEVICEADDR,
OP_ENDPTLISTADDR,
OP_TTCTRL,
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 097142ffb184..9ffcecd3058c 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -348,25 +348,18 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->pinctrl = devm_pinctrl_get(dev);
if (PTR_ERR(data->pinctrl) == -ENODEV)
data->pinctrl = NULL;
- else if (IS_ERR(data->pinctrl)) {
- if (PTR_ERR(data->pinctrl) != -EPROBE_DEFER)
- dev_err(dev, "pinctrl get failed, err=%ld\n",
- PTR_ERR(data->pinctrl));
- return PTR_ERR(data->pinctrl);
- }
+ else if (IS_ERR(data->pinctrl))
+ return dev_err_probe(dev, PTR_ERR(data->pinctrl),
+ "pinctrl get failed\n");
data->hsic_pad_regulator =
devm_regulator_get_optional(dev, "hsic");
if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
/* no pad regualator is needed */
data->hsic_pad_regulator = NULL;
- } else if (IS_ERR(data->hsic_pad_regulator)) {
- if (PTR_ERR(data->hsic_pad_regulator) != -EPROBE_DEFER)
- dev_err(dev,
- "Get HSIC pad regulator error: %ld\n",
- PTR_ERR(data->hsic_pad_regulator));
- return PTR_ERR(data->hsic_pad_regulator);
- }
+ } else if (IS_ERR(data->hsic_pad_regulator))
+ return dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
+ "Get HSIC pad regulator error\n");
if (data->hsic_pad_regulator) {
ret = regulator_enable(data->hsic_pad_regulator);
@@ -458,9 +451,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
&pdata);
if (IS_ERR(data->ci_pdev)) {
ret = PTR_ERR(data->ci_pdev);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "ci_hdrc_add_device failed, err=%d\n",
- ret);
+ dev_err_probe(dev, ret, "ci_hdrc_add_device failed\n");
goto err_clk;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index 999c65390b7f..7daccb9c5006 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -21,7 +21,7 @@ struct imx_usbmisc_data {
unsigned int pwr_pol:1; /* power polarity */
unsigned int evdo:1; /* set external vbus divider option */
unsigned int ulpi:1; /* connected to an ULPI phy */
- unsigned int hsic:1; /* HSIC controlller */
+ unsigned int hsic:1; /* HSIC controller */
unsigned int ext_id:1; /* ID from exteranl event */
unsigned int ext_vbus:1; /* Vbus from exteranl event */
struct usb_phy *usb_phy;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5359b2a2e4d2..6330fa911792 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -53,6 +53,7 @@ static const u8 ci_regs_nolpm[] = {
[OP_USBCMD] = 0x00U,
[OP_USBSTS] = 0x04U,
[OP_USBINTR] = 0x08U,
+ [OP_FRINDEX] = 0x0CU,
[OP_DEVICEADDR] = 0x14U,
[OP_ENDPTLISTADDR] = 0x18U,
[OP_TTCTRL] = 0x1CU,
@@ -78,6 +79,7 @@ static const u8 ci_regs_lpm[] = {
[OP_USBCMD] = 0x00U,
[OP_USBSTS] = 0x04U,
[OP_USBINTR] = 0x08U,
+ [OP_FRINDEX] = 0x0CU,
[OP_DEVICEADDR] = 0x14U,
[OP_ENDPTLISTADDR] = 0x18U,
[OP_TTCTRL] = 0x1CU,
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 6ed4b00dba96..61b157b9c662 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -459,7 +459,7 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
if (on) {
- /* Enable power power */
+ /* Enable power */
hw_write(ci, OP_PORTSC, PORTSC_W1C_BITS | PORTSC_PP,
PORTSC_PP);
if (ci->platdata->reg_vbus) {
diff --git a/drivers/usb/chipidea/trace.h b/drivers/usb/chipidea/trace.h
index 1601fd86c4c1..ca0e65b48f0a 100644
--- a/drivers/usb/chipidea/trace.h
+++ b/drivers/usb/chipidea/trace.h
@@ -28,11 +28,11 @@ TRACE_EVENT(ci_log,
TP_ARGS(ci, vaf),
TP_STRUCT__entry(
__string(name, dev_name(ci->dev))
- __dynamic_array(char, msg, CHIPIDEA_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(ci->dev));
- vsnprintf(__get_str(msg), CHIPIDEA_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 3b8bf6daf7d0..8c3e3a635ac2 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1654,6 +1654,19 @@ static const struct usb_ep_ops usb_ep_ops = {
/******************************************************************************
* GADGET block
*****************************************************************************/
+
+static int ci_udc_get_frame(struct usb_gadget *_gadget)
+{
+ struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ci->lock, flags);
+ ret = hw_read(ci, OP_FRINDEX, 0x3fff);
+ spin_unlock_irqrestore(&ci->lock, flags);
+ return ret >> 3;
+}
+
/*
* ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
*/
@@ -1810,6 +1823,7 @@ static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
* Check "usb_gadget.h" for details
*/
static const struct usb_gadget_ops usb_gadget_ops = {
+ .get_frame = ci_udc_get_frame,
.vbus_session = ci_udc_vbus_session,
.wakeup = ci_udc_wakeup,
.set_selfpowered = ci_udc_selfpowered,
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 9b9aea24d58c..483bcb1213f7 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -119,7 +119,7 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value,
retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
request, USB_RT_ACM, value,
acm->control->altsetting[0].desc.bInterfaceNumber,
- buf, len, 5000);
+ buf, len, USB_CTRL_SET_TIMEOUT);
dev_dbg(&acm->control->dev,
"%s - rq 0x%02x, val %#x, len %#x, result %d\n",
@@ -311,7 +311,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
dev_dbg(&acm->control->dev,
"%s - serial state: 0x%x\n", __func__, newctrl);
- if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
+ if (!acm->clocal && (acm->ctrlin & ~newctrl & USB_CDC_SERIAL_STATE_DCD)) {
dev_dbg(&acm->control->dev,
"%s - calling hangup\n", __func__);
tty_port_tty_hangup(&acm->port, false);
@@ -322,25 +322,25 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
acm->ctrlin = newctrl;
acm->oldcount = acm->iocount;
- if (difference & ACM_CTRL_DSR)
+ if (difference & USB_CDC_SERIAL_STATE_DSR)
acm->iocount.dsr++;
- if (difference & ACM_CTRL_DCD)
+ if (difference & USB_CDC_SERIAL_STATE_DCD)
acm->iocount.dcd++;
- if (newctrl & ACM_CTRL_BRK) {
+ if (newctrl & USB_CDC_SERIAL_STATE_BREAK) {
acm->iocount.brk++;
tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
}
- if (newctrl & ACM_CTRL_RI)
+ if (newctrl & USB_CDC_SERIAL_STATE_RING_SIGNAL)
acm->iocount.rng++;
- if (newctrl & ACM_CTRL_FRAMING)
+ if (newctrl & USB_CDC_SERIAL_STATE_FRAMING)
acm->iocount.frame++;
- if (newctrl & ACM_CTRL_PARITY)
+ if (newctrl & USB_CDC_SERIAL_STATE_PARITY)
acm->iocount.parity++;
- if (newctrl & ACM_CTRL_OVERRUN)
+ if (newctrl & USB_CDC_SERIAL_STATE_OVERRUN)
acm->iocount.overrun++;
spin_unlock_irqrestore(&acm->read_lock, flags);
- if (newctrl & ACM_CTRL_BRK)
+ if (newctrl & USB_CDC_SERIAL_STATE_BREAK)
tty_flip_buffer_push(&acm->port);
if (difference)
@@ -658,7 +658,7 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
int res;
if (raise)
- val = ACM_CTRL_DTR | ACM_CTRL_RTS;
+ val = USB_CDC_CTRL_DTR | USB_CDC_CTRL_RTS;
else
val = 0;
@@ -903,11 +903,11 @@ static int acm_tty_tiocmget(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- return (acm->ctrlout & ACM_CTRL_DTR ? TIOCM_DTR : 0) |
- (acm->ctrlout & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
- (acm->ctrlin & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
- (acm->ctrlin & ACM_CTRL_RI ? TIOCM_RI : 0) |
- (acm->ctrlin & ACM_CTRL_DCD ? TIOCM_CD : 0) |
+ return (acm->ctrlout & USB_CDC_CTRL_DTR ? TIOCM_DTR : 0) |
+ (acm->ctrlout & USB_CDC_CTRL_RTS ? TIOCM_RTS : 0) |
+ (acm->ctrlin & USB_CDC_SERIAL_STATE_DSR ? TIOCM_DSR : 0) |
+ (acm->ctrlin & USB_CDC_SERIAL_STATE_RING_SIGNAL ? TIOCM_RI : 0) |
+ (acm->ctrlin & USB_CDC_SERIAL_STATE_DCD ? TIOCM_CD : 0) |
TIOCM_CTS;
}
@@ -918,10 +918,10 @@ static int acm_tty_tiocmset(struct tty_struct *tty,
unsigned int newctrl;
newctrl = acm->ctrlout;
- set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
- (set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
- clear = (clear & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
- (clear & TIOCM_RTS ? ACM_CTRL_RTS : 0);
+ set = (set & TIOCM_DTR ? USB_CDC_CTRL_DTR : 0) |
+ (set & TIOCM_RTS ? USB_CDC_CTRL_RTS : 0);
+ clear = (clear & TIOCM_DTR ? USB_CDC_CTRL_DTR : 0) |
+ (clear & TIOCM_RTS ? USB_CDC_CTRL_RTS : 0);
newctrl = (newctrl & ~clear) | set;
@@ -1068,9 +1068,9 @@ static void acm_tty_set_termios(struct tty_struct *tty,
if (C_BAUD(tty) == B0) {
newline.dwDTERate = acm->line.dwDTERate;
- newctrl &= ~ACM_CTRL_DTR;
+ newctrl &= ~USB_CDC_CTRL_DTR;
} else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
- newctrl |= ACM_CTRL_DTR;
+ newctrl |= USB_CDC_CTRL_DTR;
}
if (newctrl != acm->ctrlout)
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index d26ecd15be60..759ac15631d3 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -23,26 +23,6 @@
#define USB_RT_ACM (USB_TYPE_CLASS | USB_RECIP_INTERFACE)
/*
- * Output control lines.
- */
-
-#define ACM_CTRL_DTR 0x01
-#define ACM_CTRL_RTS 0x02
-
-/*
- * Input control lines and line errors.
- */
-
-#define ACM_CTRL_DCD 0x01
-#define ACM_CTRL_DSR 0x02
-#define ACM_CTRL_BRK 0x04
-#define ACM_CTRL_RI 0x08
-
-#define ACM_CTRL_FRAMING 0x10
-#define ACM_CTRL_PARITY 0x20
-#define ACM_CTRL_OVERRUN 0x40
-
-/*
* Internal driver structures.
*/
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index 395f9bbe3056..b39c9f1c375d 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -257,6 +257,7 @@ static int usb_conn_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
+ device_set_wakeup_capable(&pdev->dev, true);
/* Perform initial detection */
usb_conn_queue_dwork(info, 0);
@@ -286,6 +287,14 @@ static int __maybe_unused usb_conn_suspend(struct device *dev)
{
struct usb_conn_info *info = dev_get_drvdata(dev);
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod)
+ enable_irq_wake(info->id_irq);
+ if (info->vbus_gpiod)
+ enable_irq_wake(info->vbus_irq);
+ return 0;
+ }
+
if (info->id_gpiod)
disable_irq(info->id_irq);
if (info->vbus_gpiod)
@@ -300,6 +309,14 @@ static int __maybe_unused usb_conn_resume(struct device *dev)
{
struct usb_conn_info *info = dev_get_drvdata(dev);
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod)
+ disable_irq_wake(info->id_irq);
+ if (info->vbus_gpiod)
+ disable_irq_wake(info->vbus_irq);
+ return 0;
+ }
+
pinctrl_pm_select_default_state(dev);
if (info->id_gpiod)
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index 18e874b0441e..7d338e9c0657 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -12,6 +12,10 @@ usbcore-$(CONFIG_OF) += of.o
usbcore-$(CONFIG_USB_PCI) += hcd-pci.o
usbcore-$(CONFIG_ACPI) += usb-acpi.o
+ifdef CONFIG_USB_ONBOARD_HUB
+usbcore-y += ../misc/onboard_usb_hub_pdevs.o
+endif
+
obj-$(CONFIG_USB) += usbcore.o
obj-$(CONFIG_USB_LEDS_TRIGGER_USBPORT) += ledtrig-usbport.o
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index b87452e22835..7e7e119c253f 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1482,7 +1482,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
* @msg: Power Management message describing this state transition
*
* This is the central routine for resuming USB devices. It calls the
- * the resume method for @udev and then calls the resume methods for all
+ * resume method for @udev and then calls the resume methods for all
* the interface drivers in @udev.
*
* Autoresume requests originating from a child device or an interface
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 06eea8848ccc..94b305bbd621 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1251,7 +1251,8 @@ void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
/*
- * Some usb host controllers can only perform dma using a small SRAM area.
+ * Some usb host controllers can only perform dma using a small SRAM area,
+ * or have restrictions on addressable DRAM.
* The usb core itself is however optimized for host controllers that can dma
* using regular system memory - like pci devices doing bus mastering.
*
@@ -1691,7 +1692,6 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
spin_lock_irq(&bh->lock);
bh->running = true;
- restart:
list_replace_init(&bh->head, &local_list);
spin_unlock_irq(&bh->lock);
@@ -1705,10 +1705,17 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
bh->completing_ep = NULL;
}
- /* check if there are new URBs to giveback */
+ /*
+ * giveback new URBs next time to prevent this function
+ * from not exiting for a long time.
+ */
spin_lock_irq(&bh->lock);
- if (!list_empty(&bh->head))
- goto restart;
+ if (!list_empty(&bh->head)) {
+ if (bh->high_prio)
+ tasklet_hi_schedule(&bh->bh);
+ else
+ tasklet_schedule(&bh->bh);
+ }
bh->running = false;
spin_unlock_irq(&bh->lock);
}
@@ -1737,7 +1744,7 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct giveback_urb_bh *bh;
- bool running, high_prio_bh;
+ bool running;
/* pass status to tasklet via unlinked */
if (likely(!urb->unlinked))
@@ -1748,13 +1755,10 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
return;
}
- if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
+ if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe))
bh = &hcd->high_prio_bh;
- high_prio_bh = true;
- } else {
+ else
bh = &hcd->low_prio_bh;
- high_prio_bh = false;
- }
spin_lock(&bh->lock);
list_add_tail(&urb->urb_list, &bh->head);
@@ -1763,7 +1767,7 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
if (running)
;
- else if (high_prio_bh)
+ else if (bh->high_prio)
tasklet_hi_schedule(&bh->bh);
else
tasklet_schedule(&bh->bh);
@@ -2959,6 +2963,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
/* initialize tasklets */
init_giveback_urb_bh(&hcd->high_prio_bh);
+ hcd->high_prio_bh.high_prio = true;
init_giveback_urb_bh(&hcd->low_prio_bh);
/* enable irqs just before we start the controller,
@@ -3033,9 +3038,15 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
*/
void usb_remove_hcd(struct usb_hcd *hcd)
{
- struct usb_device *rhdev = hcd->self.root_hub;
+ struct usb_device *rhdev;
bool rh_registered;
+ if (!hcd) {
+ pr_debug("%s: hcd is NULL\n", __func__);
+ return;
+ }
+ rhdev = hcd->self.root_hub;
+
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
usb_get_dev(rhdev);
@@ -3117,8 +3128,18 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
if (IS_ERR(hcd->localmem_pool))
return PTR_ERR(hcd->localmem_pool);
- local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
- size, MEMREMAP_WC);
+ /*
+ * if a physical SRAM address was passed, map it, otherwise
+ * allocate system memory as a buffer.
+ */
+ if (phys_addr)
+ local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
+ size, MEMREMAP_WC);
+ else
+ local_mem = dmam_alloc_attrs(hcd->self.sysdev, size, &dma,
+ GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+
if (IS_ERR(local_mem))
return PTR_ERR(local_mem);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 68e9121c1878..2633acde7ac1 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -23,6 +23,7 @@
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/onboard_hub.h>
#include <linux/usb/otg.h>
#include <linux/usb/quirks.h>
#include <linux/workqueue.h>
@@ -613,7 +614,7 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
return ret;
}
-static int hub_port_status(struct usb_hub *hub, int port1,
+int usb_hub_port_status(struct usb_hub *hub, int port1,
u16 *status, u16 *change)
{
return hub_ext_port_status(hub, port1, HUB_PORT_STATUS,
@@ -1126,7 +1127,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
u16 portstatus, portchange;
portstatus = portchange = 0;
- status = hub_port_status(hub, port1, &portstatus, &portchange);
+ status = usb_hub_port_status(hub, port1, &portstatus, &portchange);
if (status)
goto abort;
@@ -1752,6 +1753,8 @@ static void hub_disconnect(struct usb_interface *intf)
if (hub->quirk_disable_autosuspend)
usb_autopm_put_interface(intf);
+ onboard_hub_destroy_pdevs(&hub->onboard_hub_devs);
+
kref_put(&hub->kref, hub_release);
}
@@ -1869,6 +1872,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
INIT_DELAYED_WORK(&hub->leds, led_work);
INIT_DELAYED_WORK(&hub->init_work, NULL);
INIT_WORK(&hub->events, hub_event);
+ INIT_LIST_HEAD(&hub->onboard_hub_devs);
spin_lock_init(&hub->irq_urb_lock);
timer_setup(&hub->irq_urb_retry, hub_retry_irq_urb, 0);
usb_get_intf(intf);
@@ -1889,8 +1893,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_autopm_get_interface_no_resume(intf);
}
- if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
+ if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) {
+ onboard_hub_create_pdevs(hdev, &hub->onboard_hub_devs);
+
return 0;
+ }
hub_disconnect(intf);
return -ENODEV;
@@ -2855,7 +2862,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
&portstatus, &portchange,
&ext_portstatus);
else
- ret = hub_port_status(hub, port1, &portstatus,
+ ret = usb_hub_port_status(hub, port1, &portstatus,
&portchange);
if (ret < 0)
return ret;
@@ -2956,7 +2963,8 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
* If the caller hasn't explicitly requested a warm reset,
* double check and see if one is needed.
*/
- if (hub_port_status(hub, port1, &portstatus, &portchange) == 0)
+ if (usb_hub_port_status(hub, port1, &portstatus,
+ &portchange) == 0)
if (hub_port_warm_reset_required(hub, port1,
portstatus))
warm = true;
@@ -3008,7 +3016,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
* If a USB 3.0 device migrates from reset to an error
* state, re-issue the warm reset.
*/
- if (hub_port_status(hub, port1,
+ if (usb_hub_port_status(hub, port1,
&portstatus, &portchange) < 0)
goto done;
@@ -3074,7 +3082,7 @@ done:
}
/* Check if a port is power on */
-static int port_is_power_on(struct usb_hub *hub, unsigned portstatus)
+int usb_port_is_power_on(struct usb_hub *hub, unsigned int portstatus)
{
int ret = 0;
@@ -3140,13 +3148,13 @@ static int check_port_resume_type(struct usb_device *udev,
}
/* Is the device still present? */
else if (status || port_is_suspended(hub, portstatus) ||
- !port_is_power_on(hub, portstatus)) {
+ !usb_port_is_power_on(hub, portstatus)) {
if (status >= 0)
status = -ENODEV;
} else if (!(portstatus & USB_PORT_STAT_CONNECTION)) {
if (retries--) {
usleep_range(200, 300);
- status = hub_port_status(hub, port1, &portstatus,
+ status = usb_hub_port_status(hub, port1, &portstatus,
&portchange);
goto retry;
}
@@ -3409,7 +3417,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
u16 portstatus, portchange;
portstatus = portchange = 0;
- ret = hub_port_status(hub, port1, &portstatus,
+ ret = usb_hub_port_status(hub, port1, &portstatus,
&portchange);
dev_dbg(&port_dev->dev,
@@ -3587,13 +3595,13 @@ static int wait_for_connected(struct usb_device *udev,
while (delay_ms < 2000) {
if (status || *portstatus & USB_PORT_STAT_CONNECTION)
break;
- if (!port_is_power_on(hub, *portstatus)) {
+ if (!usb_port_is_power_on(hub, *portstatus)) {
status = -ENODEV;
break;
}
msleep(20);
delay_ms += 20;
- status = hub_port_status(hub, port1, portstatus, portchange);
+ status = usb_hub_port_status(hub, port1, portstatus, portchange);
}
dev_dbg(&udev->dev, "Waited %dms for CONNECT\n", delay_ms);
return status;
@@ -3653,7 +3661,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
usb_lock_port(port_dev);
/* Skip the initial Clear-Suspend step for a remote wakeup */
- status = hub_port_status(hub, port1, &portstatus, &portchange);
+ status = usb_hub_port_status(hub, port1, &portstatus, &portchange);
if (status == 0 && !port_is_suspended(hub, portstatus)) {
if (portchange & USB_PORT_STAT_C_SUSPEND)
pm_wakeup_event(&udev->dev, 0);
@@ -3678,7 +3686,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
* stop resume signaling. Then finish the resume
* sequence.
*/
- status = hub_port_status(hub, port1, &portstatus, &portchange);
+ status = usb_hub_port_status(hub, port1, &portstatus, &portchange);
}
SuspendCleared:
@@ -3791,7 +3799,7 @@ static int check_ports_changed(struct usb_hub *hub)
u16 portstatus, portchange;
int status;
- status = hub_port_status(hub, port1, &portstatus, &portchange);
+ status = usb_hub_port_status(hub, port1, &portstatus, &portchange);
if (!status && portchange)
return 1;
}
@@ -3946,7 +3954,7 @@ static const char * const usb3_lpm_names[] = {
* This function will fail if the SEL or PEL values for udev are greater than
* the maximum allowed values for the link state to be enabled.
*/
-static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
+static int usb_req_set_sel(struct usb_device *udev)
{
struct usb_set_sel_req *sel_values;
unsigned long long u1_sel;
@@ -3955,7 +3963,7 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
unsigned long long u2_pel;
int ret;
- if (udev->state != USB_STATE_CONFIGURED)
+ if (!udev->parent || udev->speed < USB_SPEED_SUPER || !udev->lpm_capable)
return 0;
/* Convert SEL and PEL stored in ns to us */
@@ -3972,35 +3980,15 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
* latency for the link state, and could start a device-initiated
* U1/U2 when the exit latencies are too high.
*/
- if ((state == USB3_LPM_U1 &&
- (u1_sel > USB3_LPM_MAX_U1_SEL_PEL ||
- u1_pel > USB3_LPM_MAX_U1_SEL_PEL)) ||
- (state == USB3_LPM_U2 &&
- (u2_sel > USB3_LPM_MAX_U2_SEL_PEL ||
- u2_pel > USB3_LPM_MAX_U2_SEL_PEL))) {
- dev_dbg(&udev->dev, "Device-initiated %s disabled due to long SEL %llu us or PEL %llu us\n",
- usb3_lpm_names[state], u1_sel, u1_pel);
+ if (u1_sel > USB3_LPM_MAX_U1_SEL_PEL ||
+ u1_pel > USB3_LPM_MAX_U1_SEL_PEL ||
+ u2_sel > USB3_LPM_MAX_U2_SEL_PEL ||
+ u2_pel > USB3_LPM_MAX_U2_SEL_PEL) {
+ dev_dbg(&udev->dev, "Device-initiated U1/U2 disabled due to long SEL or PEL\n");
return -EINVAL;
}
/*
- * If we're enabling device-initiated LPM for one link state,
- * but the other link state has a too high SEL or PEL value,
- * just set those values to the max in the Set SEL request.
- */
- if (u1_sel > USB3_LPM_MAX_U1_SEL_PEL)
- u1_sel = USB3_LPM_MAX_U1_SEL_PEL;
-
- if (u1_pel > USB3_LPM_MAX_U1_SEL_PEL)
- u1_pel = USB3_LPM_MAX_U1_SEL_PEL;
-
- if (u2_sel > USB3_LPM_MAX_U2_SEL_PEL)
- u2_sel = USB3_LPM_MAX_U2_SEL_PEL;
-
- if (u2_pel > USB3_LPM_MAX_U2_SEL_PEL)
- u2_pel = USB3_LPM_MAX_U2_SEL_PEL;
-
- /*
* usb_enable_lpm() can be called as part of a failed device reset,
* which may be initiated by an error path of a mass storage driver.
* Therefore, use GFP_NOIO.
@@ -4021,6 +4009,10 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
sel_values, sizeof *(sel_values),
USB_CTRL_SET_TIMEOUT);
kfree(sel_values);
+
+ if (ret > 0)
+ udev->lpm_devinit_allow = 1;
+
return ret;
}
@@ -4136,6 +4128,9 @@ static bool usb_device_may_initiate_lpm(struct usb_device *udev,
unsigned int sel; /* us */
int i, j;
+ if (!udev->lpm_devinit_allow)
+ return false;
+
if (state == USB3_LPM_U1)
sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
else if (state == USB3_LPM_U2)
@@ -4184,7 +4179,7 @@ static bool usb_device_may_initiate_lpm(struct usb_device *udev,
static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
- int timeout, ret;
+ int timeout;
__u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
__le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
@@ -4196,17 +4191,6 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
(state == USB3_LPM_U2 && u2_mel == 0))
return;
- /*
- * First, let the device know about the exit latencies
- * associated with the link state we're about to enable.
- */
- ret = usb_req_set_sel(udev, state);
- if (ret < 0) {
- dev_warn(&udev->dev, "Set SEL for device-initiated %s failed.\n",
- usb3_lpm_names[state]);
- return;
- }
-
/* We allow the host controller to set the U1/U2 timeout internally
* first, so that it can change its schedule to account for the
* additional latency to send data to a device in a lower power
@@ -4486,6 +4470,11 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
return 0;
}
+static int usb_req_set_sel(struct usb_device *udev)
+{
+ return 0;
+}
+
#endif /* CONFIG_PM */
/*
@@ -4554,7 +4543,7 @@ int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected)
struct usb_port *port_dev = hub->ports[port1 - 1];
for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
- ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ ret = usb_hub_port_status(hub, port1, &portstatus, &portchange);
if (ret < 0)
return ret;
@@ -5011,6 +5000,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
udev->lpm_capable = usb_device_supports_lpm(udev);
udev->lpm_disable_count = 1;
usb_set_lpm_parameters(udev);
+ usb_req_set_sel(udev);
}
}
@@ -5240,7 +5230,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
* but only if the port isn't owned by someone else.
*/
if (hub_is_port_power_switchable(hub)
- && !port_is_power_on(hub, portstatus)
+ && !usb_port_is_power_on(hub, portstatus)
&& !port_dev->port_owner)
set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
@@ -5557,7 +5547,7 @@ static void port_event(struct usb_hub *hub, int port1)
clear_bit(port1, hub->event_bits);
clear_bit(port1, hub->wakeup_bits);
- if (hub_port_status(hub, port1, &portstatus, &portchange) < 0)
+ if (usb_hub_port_status(hub, port1, &portstatus, &portchange) < 0)
return;
if (portchange & USB_PORT_STAT_C_CONNECTION) {
@@ -5594,7 +5584,7 @@ static void port_event(struct usb_hub *hub, int port1)
USB_PORT_FEAT_C_OVER_CURRENT);
msleep(100); /* Cool down */
hub_power_on(hub, true);
- hub_port_status(hub, port1, &status, &unused);
+ usb_hub_port_status(hub, port1, &status, &unused);
if (status & USB_PORT_STAT_OVERCURRENT)
dev_err(&port_dev->dev, "over-current condition\n");
}
@@ -5638,7 +5628,7 @@ static void port_event(struct usb_hub *hub, int port1)
u16 unused;
msleep(20);
- hub_port_status(hub, port1, &portstatus, &unused);
+ usb_hub_port_status(hub, port1, &portstatus, &unused);
dev_dbg(&port_dev->dev, "Wait for inactive link disconnect detect\n");
continue;
} else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 22ea1f4f2d66..b2925856b4cb 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -73,6 +73,7 @@ struct usb_hub {
spinlock_t irq_urb_lock;
struct timer_list irq_urb_retry;
struct usb_port **ports;
+ struct list_head onboard_hub_devs;
};
/**
@@ -121,6 +122,9 @@ extern int hub_port_debounce(struct usb_hub *hub, int port1,
bool must_be_connected);
extern int usb_clear_port_feature(struct usb_device *hdev,
int port1, int feature);
+extern int usb_hub_port_status(struct usb_hub *hub, int port1,
+ u16 *status, u16 *change);
+extern int usb_port_is_power_on(struct usb_hub *hub, unsigned int portstatus);
static inline bool hub_is_port_power_switchable(struct usb_hub *hub)
{
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index d5bc36ca5b1f..38c1a4f4fdea 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -17,6 +17,88 @@ static int usb_port_block_power_off;
static const struct attribute_group *port_dev_group[];
+static ssize_t disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+ struct usb_device *hdev = to_usb_device(dev->parent->parent);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+ struct usb_interface *intf = to_usb_interface(hub->intfdev);
+ int port1 = port_dev->portnum;
+ u16 portstatus, unused;
+ bool disabled;
+ int rc;
+
+ rc = usb_autopm_get_interface(intf);
+ if (rc < 0)
+ return rc;
+
+ usb_lock_device(hdev);
+ if (hub->disconnected) {
+ rc = -ENODEV;
+ goto out_hdev_lock;
+ }
+
+ usb_hub_port_status(hub, port1, &portstatus, &unused);
+ disabled = !usb_port_is_power_on(hub, portstatus);
+
+out_hdev_lock:
+ usb_unlock_device(hdev);
+ usb_autopm_put_interface(intf);
+
+ if (rc)
+ return rc;
+
+ return sysfs_emit(buf, "%s\n", disabled ? "1" : "0");
+}
+
+static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+ struct usb_device *hdev = to_usb_device(dev->parent->parent);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+ struct usb_interface *intf = to_usb_interface(hub->intfdev);
+ int port1 = port_dev->portnum;
+ bool disabled;
+ int rc;
+
+ rc = strtobool(buf, &disabled);
+ if (rc)
+ return rc;
+
+ rc = usb_autopm_get_interface(intf);
+ if (rc < 0)
+ return rc;
+
+ usb_lock_device(hdev);
+ if (hub->disconnected) {
+ rc = -ENODEV;
+ goto out_hdev_lock;
+ }
+
+ if (disabled && port_dev->child)
+ usb_disconnect(&port_dev->child);
+
+ rc = usb_hub_set_port_power(hdev, hub, port1, !disabled);
+
+ if (disabled) {
+ usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
+ if (!port_dev->is_superspeed)
+ usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
+ }
+
+ if (!rc)
+ rc = count;
+
+out_hdev_lock:
+ usb_unlock_device(hdev);
+ usb_autopm_put_interface(intf);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(disable);
+
static ssize_t location_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -153,6 +235,7 @@ static struct attribute *port_dev_attrs[] = {
&dev_attr_location.attr,
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
+ &dev_attr_disable.attr,
NULL,
};
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index fa2e49d432ff..631574718d8a 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -35,7 +35,7 @@ static ssize_t field##_show(struct device *dev, \
return -EINTR; \
actconfig = udev->actconfig; \
if (actconfig) \
- rc = sprintf(buf, format_string, \
+ rc = sysfs_emit(buf, format_string, \
actconfig->desc.field); \
usb_unlock_device(udev); \
return rc; \
@@ -61,7 +61,7 @@ static ssize_t bMaxPower_show(struct device *dev,
return -EINTR;
actconfig = udev->actconfig;
if (actconfig)
- rc = sprintf(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
+ rc = sysfs_emit(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
usb_unlock_device(udev);
return rc;
}
@@ -80,7 +80,7 @@ static ssize_t configuration_show(struct device *dev,
return -EINTR;
actconfig = udev->actconfig;
if (actconfig && actconfig->string)
- rc = sprintf(buf, "%s\n", actconfig->string);
+ rc = sysfs_emit(buf, "%s\n", actconfig->string);
usb_unlock_device(udev);
return rc;
}
@@ -114,7 +114,7 @@ static ssize_t devspec_show(struct device *dev, struct device_attribute *attr,
{
struct device_node *of_node = dev->of_node;
- return sprintf(buf, "%pOF\n", of_node);
+ return sysfs_emit(buf, "%pOF\n", of_node);
}
static DEVICE_ATTR_RO(devspec);
#endif
@@ -131,7 +131,7 @@ static ssize_t name##_show(struct device *dev, \
retval = usb_lock_device_interruptible(udev); \
if (retval < 0) \
return -EINTR; \
- retval = sprintf(buf, "%s\n", udev->name); \
+ retval = sysfs_emit(buf, "%s\n", udev->name); \
usb_unlock_device(udev); \
return retval; \
} \
@@ -175,7 +175,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
default:
speed = "unknown";
}
- return sprintf(buf, "%s\n", speed);
+ return sysfs_emit(buf, "%s\n", speed);
}
static DEVICE_ATTR_RO(speed);
@@ -185,7 +185,7 @@ static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
struct usb_device *udev;
udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", udev->rx_lanes);
+ return sysfs_emit(buf, "%d\n", udev->rx_lanes);
}
static DEVICE_ATTR_RO(rx_lanes);
@@ -195,7 +195,7 @@ static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
struct usb_device *udev;
udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", udev->tx_lanes);
+ return sysfs_emit(buf, "%d\n", udev->tx_lanes);
}
static DEVICE_ATTR_RO(tx_lanes);
@@ -205,7 +205,7 @@ static ssize_t busnum_show(struct device *dev, struct device_attribute *attr,
struct usb_device *udev;
udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", udev->bus->busnum);
+ return sysfs_emit(buf, "%d\n", udev->bus->busnum);
}
static DEVICE_ATTR_RO(busnum);
@@ -215,7 +215,7 @@ static ssize_t devnum_show(struct device *dev, struct device_attribute *attr,
struct usb_device *udev;
udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", udev->devnum);
+ return sysfs_emit(buf, "%d\n", udev->devnum);
}
static DEVICE_ATTR_RO(devnum);
@@ -225,7 +225,7 @@ static ssize_t devpath_show(struct device *dev, struct device_attribute *attr,
struct usb_device *udev;
udev = to_usb_device(dev);
- return sprintf(buf, "%s\n", udev->devpath);
+ return sysfs_emit(buf, "%s\n", udev->devpath);
}
static DEVICE_ATTR_RO(devpath);
@@ -237,7 +237,7 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr,
udev = to_usb_device(dev);
bcdUSB = le16_to_cpu(udev->descriptor.bcdUSB);
- return sprintf(buf, "%2x.%02x\n", bcdUSB >> 8, bcdUSB & 0xff);
+ return sysfs_emit(buf, "%2x.%02x\n", bcdUSB >> 8, bcdUSB & 0xff);
}
static DEVICE_ATTR_RO(version);
@@ -247,7 +247,7 @@ static ssize_t maxchild_show(struct device *dev, struct device_attribute *attr,
struct usb_device *udev;
udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", udev->maxchild);
+ return sysfs_emit(buf, "%d\n", udev->maxchild);
}
static DEVICE_ATTR_RO(maxchild);
@@ -257,7 +257,7 @@ static ssize_t quirks_show(struct device *dev, struct device_attribute *attr,
struct usb_device *udev;
udev = to_usb_device(dev);
- return sprintf(buf, "0x%x\n", udev->quirks);
+ return sysfs_emit(buf, "0x%x\n", udev->quirks);
}
static DEVICE_ATTR_RO(quirks);
@@ -267,7 +267,7 @@ static ssize_t avoid_reset_quirk_show(struct device *dev,
struct usb_device *udev;
udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET));
+ return sysfs_emit(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET));
}
static ssize_t avoid_reset_quirk_store(struct device *dev,
@@ -297,7 +297,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
struct usb_device *udev;
udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
+ return sysfs_emit(buf, "%d\n", atomic_read(&udev->urbnum));
}
static DEVICE_ATTR_RO(urbnum);
@@ -305,8 +305,8 @@ static ssize_t ltm_capable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (usb_device_supports_ltm(to_usb_device(dev)))
- return sprintf(buf, "%s\n", "yes");
- return sprintf(buf, "%s\n", "no");
+ return sysfs_emit(buf, "%s\n", "yes");
+ return sysfs_emit(buf, "%s\n", "no");
}
static DEVICE_ATTR_RO(ltm_capable);
@@ -317,7 +317,7 @@ static ssize_t persist_show(struct device *dev, struct device_attribute *attr,
{
struct usb_device *udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", udev->persist_enabled);
+ return sysfs_emit(buf, "%d\n", udev->persist_enabled);
}
static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
@@ -372,7 +372,7 @@ static ssize_t connected_duration_show(struct device *dev,
{
struct usb_device *udev = to_usb_device(dev);
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
jiffies_to_msecs(jiffies - udev->connect_time));
}
static DEVICE_ATTR_RO(connected_duration);
@@ -394,14 +394,14 @@ static ssize_t active_duration_show(struct device *dev,
duration = jiffies_to_msecs(jiffies + udev->active_duration);
else
duration = jiffies_to_msecs(udev->active_duration);
- return sprintf(buf, "%u\n", duration);
+ return sysfs_emit(buf, "%u\n", duration);
}
static DEVICE_ATTR_RO(active_duration);
static ssize_t autosuspend_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev->power.autosuspend_delay / 1000);
+ return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay / 1000);
}
static ssize_t autosuspend_store(struct device *dev,
@@ -442,7 +442,7 @@ static ssize_t level_show(struct device *dev, struct device_attribute *attr,
warn_level();
if (udev->state != USB_STATE_SUSPENDED && !udev->dev.power.runtime_auto)
p = on_string;
- return sprintf(buf, "%s\n", p);
+ return sysfs_emit(buf, "%s\n", p);
}
static ssize_t level_store(struct device *dev, struct device_attribute *attr,
@@ -490,7 +490,7 @@ static ssize_t usb2_hardware_lpm_show(struct device *dev,
else
p = "disabled";
- return sprintf(buf, "%s\n", p);
+ return sysfs_emit(buf, "%s\n", p);
}
static ssize_t usb2_hardware_lpm_store(struct device *dev,
@@ -529,7 +529,7 @@ static ssize_t usb2_lpm_l1_timeout_show(struct device *dev,
char *buf)
{
struct usb_device *udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", udev->l1_params.timeout);
+ return sysfs_emit(buf, "%d\n", udev->l1_params.timeout);
}
static ssize_t usb2_lpm_l1_timeout_store(struct device *dev,
@@ -552,7 +552,7 @@ static ssize_t usb2_lpm_besl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
- return sprintf(buf, "%d\n", udev->l1_params.besl);
+ return sysfs_emit(buf, "%d\n", udev->l1_params.besl);
}
static ssize_t usb2_lpm_besl_store(struct device *dev,
@@ -589,7 +589,7 @@ static ssize_t usb3_hardware_lpm_u1_show(struct device *dev,
usb_unlock_device(udev);
- return sprintf(buf, "%s\n", p);
+ return sysfs_emit(buf, "%s\n", p);
}
static DEVICE_ATTR_RO(usb3_hardware_lpm_u1);
@@ -611,7 +611,7 @@ static ssize_t usb3_hardware_lpm_u2_show(struct device *dev,
usb_unlock_device(udev);
- return sprintf(buf, "%s\n", p);
+ return sysfs_emit(buf, "%s\n", p);
}
static DEVICE_ATTR_RO(usb3_hardware_lpm_u2);
@@ -694,7 +694,7 @@ field##_show(struct device *dev, struct device_attribute *attr, \
struct usb_device *udev; \
\
udev = to_usb_device(dev); \
- return sprintf(buf, format_string, \
+ return sysfs_emit(buf, format_string, \
le16_to_cpu(udev->descriptor.field)); \
} \
static DEVICE_ATTR_RO(field)
@@ -711,7 +711,7 @@ field##_show(struct device *dev, struct device_attribute *attr, \
struct usb_device *udev; \
\
udev = to_usb_device(dev); \
- return sprintf(buf, format_string, udev->descriptor.field); \
+ return sysfs_emit(buf, format_string, udev->descriptor.field); \
} \
static DEVICE_ATTR_RO(field)
@@ -727,7 +727,7 @@ static ssize_t authorized_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *usb_dev = to_usb_device(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", usb_dev->authorized);
+ return sysfs_emit(buf, "%u\n", usb_dev->authorized);
}
/*
@@ -918,7 +918,7 @@ static ssize_t authorized_default_show(struct device *dev,
struct usb_hcd *hcd;
hcd = bus_to_hcd(usb_bus);
- return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
+ return sysfs_emit(buf, "%u\n", hcd->dev_policy);
}
static ssize_t authorized_default_store(struct device *dev,
@@ -957,7 +957,7 @@ static ssize_t interface_authorized_default_show(struct device *dev,
struct usb_device *usb_dev = to_usb_device(dev);
struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
- return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
+ return sysfs_emit(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
}
/*
@@ -1066,7 +1066,7 @@ iad_##field##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
\
- return sprintf(buf, format_string, \
+ return sysfs_emit(buf, format_string, \
intf->intf_assoc->field); \
} \
static DEVICE_ATTR_RO(iad_##field)
@@ -1085,7 +1085,7 @@ field##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
\
- return sprintf(buf, format_string, \
+ return sysfs_emit(buf, format_string, \
intf->cur_altsetting->desc.field); \
} \
static DEVICE_ATTR_RO(field)
@@ -1107,7 +1107,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
string = READ_ONCE(intf->cur_altsetting->string);
if (!string)
return 0;
- return sprintf(buf, "%s\n", string);
+ return sysfs_emit(buf, "%s\n", string);
}
static DEVICE_ATTR_RO(interface);
@@ -1122,7 +1122,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
udev = interface_to_usbdev(intf);
alt = READ_ONCE(intf->cur_altsetting);
- return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
+ return sysfs_emit(buf,
+ "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
"ic%02Xisc%02Xip%02Xin%02X\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
@@ -1150,7 +1151,7 @@ static ssize_t supports_autosuspend_show(struct device *dev,
s = (!dev->driver || to_usb_driver(dev->driver)->supports_autosuspend);
device_unlock(dev);
- return sprintf(buf, "%u\n", s);
+ return sysfs_emit(buf, "%u\n", s);
}
static DEVICE_ATTR_RO(supports_autosuspend);
@@ -1163,7 +1164,7 @@ static ssize_t interface_authorized_show(struct device *dev,
{
struct usb_interface *intf = to_usb_interface(dev);
- return sprintf(buf, "%u\n", intf->authorized);
+ return sysfs_emit(buf, "%u\n", intf->authorized);
}
/*
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index d4dcaefd0ea4..6d93428432f1 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -124,22 +124,6 @@ out:
*/
#define USB_ACPI_LOCATION_VALID (1 << 31)
-static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
- int raw)
-{
- struct acpi_device *adev;
-
- if (!parent)
- return NULL;
-
- list_for_each_entry(adev, &parent->children, node) {
- if (acpi_device_adr(adev) == raw)
- return adev;
- }
-
- return acpi_find_child_device(parent, raw, false);
-}
-
static struct acpi_device *
usb_acpi_get_companion_for_port(struct usb_port *port_dev)
{
@@ -170,7 +154,7 @@ usb_acpi_get_companion_for_port(struct usb_port *port_dev)
port1 = port_dev->portnum;
}
- return usb_acpi_find_port(adev, port1);
+ return acpi_find_child_by_adr(adev, port1);
}
static struct acpi_device *
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 2f71636af6e1..11b15d7b357a 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -801,7 +801,7 @@ EXPORT_SYMBOL_GPL(usb_intf_get_dma_device);
* is simple:
*
* When locking both a device and its parent, always lock the
- * the parent first.
+ * parent first.
*/
/**
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index fe2a58c75861..8b15742d9e8a 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3594,7 +3594,8 @@ void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
{
/* remove the soft-disconnect and let's go */
- dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
+ if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD))
+ dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
}
/**
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 3f107a06817d..aaf7b9fc4d34 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -52,6 +52,7 @@
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
+#include <linux/usb/of.h>
#include "core.h"
#include "hcd.h"
@@ -999,7 +1000,7 @@ static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
/*
* Try to figure out if we're an even or odd frame. If we set
- * even and the current frame number is even the the transfer
+ * even and the current frame number is even the transfer
* will happen immediately. Similar if both are odd. If one is
* even and the other is odd then the transfer will happen when
* the frame number ticks.
@@ -5339,6 +5340,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
/* Don't support SG list at this point */
hcd->self.sg_tablesize = 0;
+ hcd->tpl_support = of_usb_host_tpl_support(hsotg->dev->of_node);
+
if (!IS_ERR_OR_NULL(hsotg->uphy))
otg_set_host(hsotg->uphy->otg, &hcd->self);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index cd9a734522a7..03ededa86da1 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -9,7 +9,7 @@ config USB_DWC3
Say Y or M here if your system has a Dual Role SuperSpeed
USB controller based on the DesignWare USB3 IP Core.
- If you choose to build this driver is a dynamically linked
+ If you choose to build this driver as a dynamically linked
module, the module will be called dwc3.ko.
if USB_DWC3
@@ -165,7 +165,7 @@ config USB_DWC3_AM62
default USB_DWC3
help
Support TI's AM62 platforms with DesignWare Core USB3 IP.
- The Designware Core USB3 IP is progammed to operate in
+ The Designware Core USB3 IP is programmed to operate in
in USB 2.0 mode only.
Say 'Y' or 'M' here if you have one such device
endif
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 573421984948..c5c238ab3083 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -158,8 +158,13 @@ static void __dwc3_set_mode(struct work_struct *work)
break;
}
- /* For DRD host or device mode only */
- if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
+ /*
+ * When current_dr_role is not set, there's no role switching.
+ * Only perform GCTL.CoreSoftReset when there's DRD role switching.
+ */
+ if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
+ DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
+ dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg |= DWC3_GCTL_CORESOFTRESET;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
@@ -426,7 +431,7 @@ static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
* otherwise ERR_PTR(errno).
*/
static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
- unsigned length)
+ unsigned int length)
{
struct dwc3_event_buffer *evt;
@@ -469,7 +474,7 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
* Returns 0 on success otherwise negative errno. In the error case, dwc
* may contain some buffers allocated but not all which were requested.
*/
-static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
+static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
{
struct dwc3_event_buffer *evt;
@@ -1029,6 +1034,37 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
}
+static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
+{
+ u32 scale;
+ u32 reg;
+
+ if (!dwc->susp_clk)
+ return;
+
+ /*
+ * The power down scale field specifies how many suspend_clk
+ * periods fit into a 16KHz clock period. When performing
+ * the division, round up the remainder.
+ *
+ * The power down scale value is calculated using the fastest
+ * frequency of the suspend_clk. If it isn't fixed (but within
+ * the accuracy requirement), the driver may not know the max
+ * rate of the suspend_clk, so only update the power down scale
+ * if the default is less than the calculated value from
+ * clk_get_rate() or if the default is questionably high
+ * (3x or more) to be within the requirement.
+ */
+ scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000);
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) ||
+ (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) {
+ reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK);
+ reg |= DWC3_GCTL_PWRDNSCALE(scale);
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ }
+}
+
/**
* dwc3_core_init - Low-level initialization of DWC3 Core
* @dwc: Pointer to our controller context structure
@@ -1105,6 +1141,9 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err1;
+ /* Set power down scale of suspend_clk */
+ dwc3_set_power_down_clk_scale(dwc);
+
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc);
@@ -1782,6 +1821,7 @@ static int dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
+ device_init_wakeup(&pdev->dev, of_property_read_bool(dev->of_node, "wakeup-source"));
spin_lock_init(&dwc->lock);
mutex_init(&dwc->mutex);
@@ -1943,7 +1983,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
dwc3_core_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg)) {
+ if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
dwc3_core_exit(dwc);
break;
}
@@ -2004,7 +2044,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
spin_unlock_irqrestore(&dwc->lock, flags);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg)) {
+ if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
@@ -2081,8 +2121,6 @@ static int dwc3_runtime_suspend(struct device *dev)
if (ret)
return ret;
- device_init_wakeup(dev, true);
-
return 0;
}
@@ -2091,8 +2129,6 @@ static int dwc3_runtime_resume(struct device *dev)
struct dwc3 *dwc = dev_get_drvdata(dev);
int ret;
- device_init_wakeup(dev, false);
-
ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 81c486b3941c..4fe4287dc934 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -231,6 +231,7 @@
/* Global Configuration Register */
#define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19)
+#define DWC3_GCTL_PWRDNSCALE_MASK GENMASK(31, 19)
#define DWC3_GCTL_U2RSTECN BIT(16)
#define DWC3_GCTL_RAMCLKSEL(x) (((x) & DWC3_GCTL_CLK_MASK) << 6)
#define DWC3_GCTL_CLK_BUS (0)
@@ -1086,6 +1087,8 @@ struct dwc3_scratchpad_array {
* @dis_u1_entry_quirk: set if link entering into U1 state needs to be disabled.
* @dis_u2_entry_quirk: set if link entering into U2 state needs to be disabled.
* @dis_rxdet_inp3_quirk: set if we disable Rx.Detect in P3
+ * @async_callbacks: if set, indicate that async callbacks will be used.
+ *
* @dis_u2_freeclk_exists_quirk : set if we clear u2_freeclk_exists
* in GUSB2PHYCFG, specify that USB2 PHY doesn't
* provide a free-running PHY clock.
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index fea7aca35dc8..173cf3579c55 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -195,8 +195,7 @@ static int dwc3_ti_probe(struct platform_device *pdev)
if (i == ARRAY_SIZE(dwc3_ti_rate_table)) {
dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
- ret = -EINVAL;
- goto err_clk_disable;
+ return -EINVAL;
}
data->rate_code = i;
@@ -204,7 +203,7 @@ static int dwc3_ti_probe(struct platform_device *pdev)
/* Read the syscon property and set the rate code */
ret = phy_syscon_pll_refclk(data);
if (ret)
- goto err_clk_disable;
+ return ret;
/* VBUS divider select */
data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
@@ -245,8 +244,6 @@ err_pm_disable:
clk_disable_unprepare(data->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
-err_clk_disable:
- clk_put(data->usb2_refclk);
return ret;
}
@@ -276,7 +273,6 @@ static int dwc3_ti_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- clk_put(data->usb2_refclk);
platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 6cba990da32e..c5e482f53e9d 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -17,10 +17,12 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/pm_domain.h>
#include <linux/usb/of.h>
#include <linux/reset.h>
#include <linux/iopoll.h>
-
+#include <linux/usb/hcd.h>
+#include <linux/usb.h>
#include "core.h"
/* USB QSCRATCH Hardware registers */
@@ -76,6 +78,7 @@ struct dwc3_qcom {
int dp_hs_phy_irq;
int dm_hs_phy_irq;
int ss_phy_irq;
+ enum usb_device_speed usb2_speed;
struct extcon_dev *edev;
struct extcon_dev *host_edev;
@@ -296,50 +299,92 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
icc_put(qcom->icc_path_apps);
}
-static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
+static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
{
- if (qcom->hs_phy_irq) {
- disable_irq_wake(qcom->hs_phy_irq);
- disable_irq_nosync(qcom->hs_phy_irq);
- }
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+ struct usb_hcd *hcd = platform_get_drvdata(dwc->xhci);
+ struct usb_device *udev;
- if (qcom->dp_hs_phy_irq) {
- disable_irq_wake(qcom->dp_hs_phy_irq);
- disable_irq_nosync(qcom->dp_hs_phy_irq);
- }
+ /*
+ * It is possible to query the speed of all children of
+ * USB2.0 root hub via usb_hub_for_each_child(). DWC3 code
+ * currently supports only 1 port per controller. So
+ * this is sufficient.
+ */
+ udev = usb_hub_find_child(hcd->self.root_hub, 1);
- if (qcom->dm_hs_phy_irq) {
- disable_irq_wake(qcom->dm_hs_phy_irq);
- disable_irq_nosync(qcom->dm_hs_phy_irq);
- }
+ if (!udev)
+ return USB_SPEED_UNKNOWN;
+
+ return udev->speed;
+}
+
+static void dwc3_qcom_enable_wakeup_irq(int irq, unsigned int polarity)
+{
+ if (!irq)
+ return;
+
+ if (polarity)
+ irq_set_irq_type(irq, polarity);
+
+ enable_irq(irq);
+ enable_irq_wake(irq);
+}
+
+static void dwc3_qcom_disable_wakeup_irq(int irq)
+{
+ if (!irq)
+ return;
+
+ disable_irq_wake(irq);
+ disable_irq_nosync(irq);
+}
+
+static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
+{
+ dwc3_qcom_disable_wakeup_irq(qcom->hs_phy_irq);
- if (qcom->ss_phy_irq) {
- disable_irq_wake(qcom->ss_phy_irq);
- disable_irq_nosync(qcom->ss_phy_irq);
+ if (qcom->usb2_speed == USB_SPEED_LOW) {
+ dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
+ } else if ((qcom->usb2_speed == USB_SPEED_HIGH) ||
+ (qcom->usb2_speed == USB_SPEED_FULL)) {
+ dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq);
+ } else {
+ dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq);
+ dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
}
+
+ dwc3_qcom_disable_wakeup_irq(qcom->ss_phy_irq);
}
static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
{
- if (qcom->hs_phy_irq) {
- enable_irq(qcom->hs_phy_irq);
- enable_irq_wake(qcom->hs_phy_irq);
- }
+ dwc3_qcom_enable_wakeup_irq(qcom->hs_phy_irq, 0);
- if (qcom->dp_hs_phy_irq) {
- enable_irq(qcom->dp_hs_phy_irq);
- enable_irq_wake(qcom->dp_hs_phy_irq);
- }
+ /*
+ * Configure DP/DM line interrupts based on the USB2 device attached to
+ * the root hub port. When HS/FS device is connected, configure the DP line
+ * as falling edge to detect both disconnect and remote wakeup scenarios. When
+ * LS device is connected, configure DM line as falling edge to detect both
+ * disconnect and remote wakeup. When no device is connected, configure both
+ * DP and DM lines as rising edge to detect HS/HS/LS device connect scenario.
+ */
- if (qcom->dm_hs_phy_irq) {
- enable_irq(qcom->dm_hs_phy_irq);
- enable_irq_wake(qcom->dm_hs_phy_irq);
+ if (qcom->usb2_speed == USB_SPEED_LOW) {
+ dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq,
+ IRQ_TYPE_EDGE_FALLING);
+ } else if ((qcom->usb2_speed == USB_SPEED_HIGH) ||
+ (qcom->usb2_speed == USB_SPEED_FULL)) {
+ dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq,
+ IRQ_TYPE_EDGE_FALLING);
+ } else {
+ dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq,
+ IRQ_TYPE_EDGE_RISING);
+ dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq,
+ IRQ_TYPE_EDGE_RISING);
}
- if (qcom->ss_phy_irq) {
- enable_irq(qcom->ss_phy_irq);
- enable_irq_wake(qcom->ss_phy_irq);
- }
+ dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq, 0);
}
static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
@@ -361,8 +406,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
if (ret)
dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
- if (device_may_wakeup(qcom->dev))
+ if (device_may_wakeup(qcom->dev)) {
+ qcom->usb2_speed = dwc3_qcom_read_usb2_speed(qcom);
dwc3_qcom_enable_interrupts(qcom);
+ }
qcom->is_suspended = true;
@@ -443,9 +490,9 @@ static int dwc3_qcom_get_irq(struct platform_device *pdev,
int ret;
if (np)
- ret = platform_get_irq_byname(pdev_irq, name);
+ ret = platform_get_irq_byname_optional(pdev_irq, name);
else
- ret = platform_get_irq(pdev_irq, num);
+ ret = platform_get_irq_optional(pdev_irq, num);
return ret;
}
@@ -710,12 +757,13 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
static int dwc3_qcom_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct dwc3_qcom *qcom;
- struct resource *res, *parent_res = NULL;
- int ret, i;
- bool ignore_pipe_clk;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct dwc3_qcom *qcom;
+ struct resource *res, *parent_res = NULL;
+ int ret, i;
+ bool ignore_pipe_clk;
+ struct generic_pm_domain *genpd;
qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL);
if (!qcom)
@@ -724,6 +772,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
+ genpd = pd_to_genpd(qcom->dev->pm_domain);
+
if (has_acpi_companion(dev)) {
qcom->acpi_pdata = acpi_device_get_match_data(dev);
if (!qcom->acpi_pdata) {
@@ -831,7 +881,17 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret)
goto interconnect_exit;
- device_init_wakeup(&pdev->dev, 1);
+ if (device_can_wakeup(&qcom->dwc3->dev)) {
+ /*
+ * Setting GENPD_FLAG_ALWAYS_ON flag takes care of keeping
+ * genpd on in both runtime suspend and system suspend cases.
+ */
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ device_init_wakeup(&pdev->dev, true);
+ } else {
+ genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
+ }
+
qcom->is_suspended = false;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 5d642660fd15..197af63f8d05 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -239,6 +239,8 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
dwc3_gadget_giveback(dep, req, -ECONNRESET);
}
+ dwc->eps[0]->trb_enqueue = 0;
+ dwc->eps[1]->trb_enqueue = 0;
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
@@ -473,7 +475,7 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
case USB_DEVICE_REMOTE_WAKEUP:
break;
/*
- * 9.4.1 says only only for SS, in AddressState only for
+ * 9.4.1 says only for SS, in AddressState only for
* default control pipe
*/
case USB_DEVICE_U1_ENABLE:
@@ -1140,6 +1142,11 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
return;
+ if (dwc->setup_packet_pending) {
+ dwc3_ep0_stall_and_restart(dwc);
+ return;
+ }
+
dwc->ep0state = EP0_STATUS_PHASE;
if (dwc->delayed_status) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8716bece1072..aeeec751c53c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -657,6 +657,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
/**
* dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
* @dwc: pointer to the DWC3 context
+ * @mult: multiplier to be used when calculating the fifo_size
*
* Calculates the size value based on the equation below:
*
@@ -1182,17 +1183,49 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
return trbs_left;
}
-static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
- dma_addr_t dma, unsigned int length, unsigned int chain,
- unsigned int node, unsigned int stream_id,
- unsigned int short_not_ok, unsigned int no_interrupt,
- unsigned int is_last, bool must_interrupt)
+/**
+ * dwc3_prepare_one_trb - setup one TRB from one request
+ * @dep: endpoint for which this request is prepared
+ * @req: dwc3_request pointer
+ * @trb_length: buffer size of the TRB
+ * @chain: should this TRB be chained to the next?
+ * @node: only for isochronous endpoints. First TRB needs different type.
+ * @use_bounce_buffer: set to use bounce buffer
+ * @must_interrupt: set to interrupt on TRB completion
+ */
+static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned int trb_length,
+ unsigned int chain, unsigned int node, bool use_bounce_buffer,
+ bool must_interrupt)
{
+ struct dwc3_trb *trb;
+ dma_addr_t dma;
+ unsigned int stream_id = req->request.stream_id;
+ unsigned int short_not_ok = req->request.short_not_ok;
+ unsigned int no_interrupt = req->request.no_interrupt;
+ unsigned int is_last = req->request.is_last;
struct dwc3 *dwc = dep->dwc;
struct usb_gadget *gadget = dwc->gadget;
enum usb_device_speed speed = gadget->speed;
- trb->size = DWC3_TRB_SIZE_LENGTH(length);
+ if (use_bounce_buffer)
+ dma = dep->dwc->bounce_addr;
+ else if (req->request.num_sgs > 0)
+ dma = sg_dma_address(req->start_sg);
+ else
+ dma = req->request.dma;
+
+ trb = &dep->trb_pool[dep->trb_enqueue];
+
+ if (!req->trb) {
+ dwc3_gadget_move_started_request(req);
+ req->trb = trb;
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+ }
+
+ req->num_trbs++;
+
+ trb->size = DWC3_TRB_SIZE_LENGTH(trb_length);
trb->bpl = lower_32_bits(dma);
trb->bph = upper_32_bits(dma);
@@ -1232,10 +1265,10 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
unsigned int mult = 2;
unsigned int maxp = usb_endpoint_maxp(ep->desc);
- if (length <= (2 * maxp))
+ if (req->request.length <= (2 * maxp))
mult--;
- if (length <= maxp)
+ if (req->request.length <= maxp)
mult--;
trb->size |= DWC3_TRB_SIZE_PCM1(mult);
@@ -1309,50 +1342,6 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
trace_dwc3_prepare_trb(dep, trb);
}
-/**
- * dwc3_prepare_one_trb - setup one TRB from one request
- * @dep: endpoint for which this request is prepared
- * @req: dwc3_request pointer
- * @trb_length: buffer size of the TRB
- * @chain: should this TRB be chained to the next?
- * @node: only for isochronous endpoints. First TRB needs different type.
- * @use_bounce_buffer: set to use bounce buffer
- * @must_interrupt: set to interrupt on TRB completion
- */
-static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
- struct dwc3_request *req, unsigned int trb_length,
- unsigned int chain, unsigned int node, bool use_bounce_buffer,
- bool must_interrupt)
-{
- struct dwc3_trb *trb;
- dma_addr_t dma;
- unsigned int stream_id = req->request.stream_id;
- unsigned int short_not_ok = req->request.short_not_ok;
- unsigned int no_interrupt = req->request.no_interrupt;
- unsigned int is_last = req->request.is_last;
-
- if (use_bounce_buffer)
- dma = dep->dwc->bounce_addr;
- else if (req->request.num_sgs > 0)
- dma = sg_dma_address(req->start_sg);
- else
- dma = req->request.dma;
-
- trb = &dep->trb_pool[dep->trb_enqueue];
-
- if (!req->trb) {
- dwc3_gadget_move_started_request(req);
- req->trb = trb;
- req->trb_dma = dwc3_trb_dma_offset(dep, trb);
- }
-
- req->num_trbs++;
-
- __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
- stream_id, short_not_ok, no_interrupt, is_last,
- must_interrupt);
-}
-
static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
{
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
@@ -4249,7 +4238,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
}
evt->count = 0;
- evt->flags &= ~DWC3_EVENT_PENDING;
ret = IRQ_HANDLED;
/* Unmask interrupt */
@@ -4261,6 +4249,9 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
+ /* Keep the clearing of DWC3_EVENT_PENDING at the end */
+ evt->flags &= ~DWC3_EVENT_PENDING;
+
return ret;
}
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 411eb489e0ff..cb523f118f04 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -57,18 +57,8 @@ struct f_acm {
/* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
u16 port_handshake_bits;
-#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
-#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
-
/* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
u16 serial_state;
-#define ACM_CTRL_OVERRUN (1 << 6)
-#define ACM_CTRL_PARITY (1 << 5)
-#define ACM_CTRL_FRAMING (1 << 4)
-#define ACM_CTRL_RI (1 << 3)
-#define ACM_CTRL_BRK (1 << 2)
-#define ACM_CTRL_DSR (1 << 1)
-#define ACM_CTRL_DCD (1 << 0)
};
static inline struct f_acm *func_to_acm(struct usb_function *f)
@@ -387,7 +377,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
value = 0;
/* FIXME we should not allow data to flow until the
- * host sets the ACM_CTRL_DTR bit; and when it clears
+ * host sets the USB_CDC_CTRL_DTR bit; and when it clears
* that bit, we should return to that no-flow state.
*/
acm->port_handshake_bits = w_value;
@@ -585,7 +575,7 @@ static void acm_connect(struct gserial *port)
{
struct f_acm *acm = port_to_acm(port);
- acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ acm->serial_state |= USB_CDC_SERIAL_STATE_DSR | USB_CDC_SERIAL_STATE_DCD;
acm_notify_serial_state(acm);
}
@@ -593,7 +583,7 @@ static void acm_disconnect(struct gserial *port)
{
struct f_acm *acm = port_to_acm(port);
- acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ acm->serial_state &= ~(USB_CDC_SERIAL_STATE_DSR | USB_CDC_SERIAL_STATE_DCD);
acm_notify_serial_state(acm);
}
@@ -603,9 +593,9 @@ static int acm_send_break(struct gserial *port, int duration)
u16 state;
state = acm->serial_state;
- state &= ~ACM_CTRL_BRK;
+ state &= ~USB_CDC_SERIAL_STATE_BREAK;
if (duration)
- state |= ACM_CTRL_BRK;
+ state |= USB_CDC_SERIAL_STATE_BREAK;
acm->serial_state = state;
return acm_notify_serial_state(acm);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 3a77bca0ebe1..925e99f9775c 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -1192,13 +1192,14 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
u8 format;
int i, len;
+ format = common->cmnd[2] & 0xf;
+
if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
- start_track > 1) {
+ (start_track > 1 && format != 0x1)) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
- format = common->cmnd[2] & 0xf;
/*
* Check if CDB is old style SFF-8020i
* i.e. format is in 2 MSBs of byte 9
@@ -1208,8 +1209,8 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
format = (common->cmnd[9] >> 6) & 0x3;
switch (format) {
- case 0:
- /* Formatted TOC */
+ case 0: /* Formatted TOC */
+ case 1: /* Multi-session info */
len = 4 + 2*8; /* 4 byte header + 2 descriptors */
memset(buf, 0, len);
buf[1] = len - 2; /* TOC Length excludes length field */
@@ -1250,7 +1251,7 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
return len;
default:
- /* Multi-session, PMA, ATIP, CD-TEXT not supported/required */
+ /* PMA, ATIP, CD-TEXT not supported/required */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
@@ -2650,10 +2651,21 @@ static ssize_t file_store(struct device *dev, struct device_attribute *attr,
return fsg_store_file(curlun, filesem, buf, count);
}
+static ssize_t forced_eject_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+ struct rw_semaphore *filesem = dev_get_drvdata(dev);
+
+ return fsg_store_forced_eject(curlun, filesem, buf, count);
+}
+
static DEVICE_ATTR_RW(nofua);
/* mode wil be set in fsg_lun_attr_is_visible() */
static DEVICE_ATTR(ro, 0, ro_show, ro_store);
static DEVICE_ATTR(file, 0, file_show, file_store);
+static DEVICE_ATTR_WO(forced_eject);
/****************************** FSG COMMON ******************************/
@@ -2807,6 +2819,7 @@ static struct attribute *fsg_lun_dev_attrs[] = {
&dev_attr_ro.attr,
&dev_attr_file.attr,
&dev_attr_nofua.attr,
+ &dev_attr_forced_eject.attr,
NULL
};
@@ -3220,6 +3233,18 @@ static ssize_t fsg_lun_opts_inquiry_string_store(struct config_item *item,
CONFIGFS_ATTR(fsg_lun_opts_, inquiry_string);
+static ssize_t fsg_lun_opts_forced_eject_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
+ struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
+
+ return fsg_store_forced_eject(opts->lun, &fsg_opts->common->filesem,
+ page, len);
+}
+
+CONFIGFS_ATTR_WO(fsg_lun_opts_, forced_eject);
+
static struct configfs_attribute *fsg_lun_attrs[] = {
&fsg_lun_opts_attr_file,
&fsg_lun_opts_attr_ro,
@@ -3227,6 +3252,7 @@ static struct configfs_attribute *fsg_lun_attrs[] = {
&fsg_lun_opts_attr_cdrom,
&fsg_lun_opts_attr_nofua,
&fsg_lun_opts_attr_inquiry_string,
+ &fsg_lun_opts_attr_forced_eject,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index d3feeeb50841..71669e0e4d00 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -141,7 +141,8 @@ static struct usb_endpoint_descriptor uvc_fs_streaming_ep = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_SYNC_ASYNC
| USB_ENDPOINT_XFER_ISOC,
- /* The wMaxPacketSize and bInterval values will be initialized from
+ /*
+ * The wMaxPacketSize and bInterval values will be initialized from
* module parameters.
*/
};
@@ -152,7 +153,8 @@ static struct usb_endpoint_descriptor uvc_hs_streaming_ep = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_SYNC_ASYNC
| USB_ENDPOINT_XFER_ISOC,
- /* The wMaxPacketSize and bInterval values will be initialized from
+ /*
+ * The wMaxPacketSize and bInterval values will be initialized from
* module parameters.
*/
};
@@ -164,7 +166,8 @@ static struct usb_endpoint_descriptor uvc_ss_streaming_ep = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_SYNC_ASYNC
| USB_ENDPOINT_XFER_ISOC,
- /* The wMaxPacketSize and bInterval values will be initialized from
+ /*
+ * The wMaxPacketSize and bInterval values will be initialized from
* module parameters.
*/
};
@@ -172,7 +175,8 @@ static struct usb_endpoint_descriptor uvc_ss_streaming_ep = {
static struct usb_ss_ep_comp_descriptor uvc_ss_streaming_comp = {
.bLength = sizeof(uvc_ss_streaming_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- /* The bMaxBurst, bmAttributes and wBytesPerInterval values will be
+ /*
+ * The bMaxBurst, bmAttributes and wBytesPerInterval values will be
* initialized from module parameters.
*/
};
@@ -234,7 +238,8 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
if (le16_to_cpu(ctrl->wLength) > UVC_MAX_REQUEST_SIZE)
return -EINVAL;
- /* Tell the complete callback to generate an event for the next request
+ /*
+ * Tell the complete callback to generate an event for the next request
* that will be enqueued by UVCIOC_SEND_RESPONSE.
*/
uvc->event_setup_out = !(ctrl->bRequestType & USB_DIR_IN);
@@ -500,7 +505,8 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
if (!uvc_control_desc || !uvc_streaming_cls)
return ERR_PTR(-ENODEV);
- /* Descriptors layout
+ /*
+ * Descriptors layout
*
* uvc_iad
* uvc_control_intf
@@ -597,8 +603,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvcg_info(f, "%s()\n", __func__);
opts = fi_to_f_uvc_opts(f->fi);
- /* Sanity check the streaming endpoint module parameters.
- */
+ /* Sanity check the streaming endpoint module parameters. */
opts->streaming_interval = clamp(opts->streaming_interval, 1U, 16U);
opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
@@ -611,7 +616,8 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
opts->streaming_maxpacket);
}
- /* Fill in the FS/HS/SS Video Streaming specific descriptors from the
+ /*
+ * Fill in the FS/HS/SS Video Streaming specific descriptors from the
* module parameters.
*
* NOTE: We assume that the user knows what they are doing and won't
@@ -895,7 +901,8 @@ static void uvc_function_unbind(struct usb_configuration *c,
uvcg_info(f, "%s()\n", __func__);
- /* If we know we're connected via v4l2, then there should be a cleanup
+ /*
+ * If we know we're connected via v4l2, then there should be a cleanup
* of the device from userspace either via UVC_EVENT_DISCONNECT or
* though the video device removal uevent. Allow some time for the
* application to close out before things get deleted.
@@ -912,7 +919,8 @@ static void uvc_function_unbind(struct usb_configuration *c,
v4l2_device_unregister(&uvc->v4l2_dev);
if (uvc->func_connected) {
- /* Wait for the release to occur to ensure there are no longer any
+ /*
+ * Wait for the release to occur to ensure there are no longer any
* pending operations that may cause panics when resources are cleaned
* up.
*/
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index b859a158a414..03035dbbe97b 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -519,4 +519,19 @@ ssize_t fsg_store_inquiry_string(struct fsg_lun *curlun, const char *buf,
}
EXPORT_SYMBOL_GPL(fsg_store_inquiry_string);
+ssize_t fsg_store_forced_eject(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ /*
+ * Forcibly detach the backing file from the LUN
+ * regardless of whether the host has allowed it.
+ */
+ curlun->prevent_medium_removal = 0;
+ ret = fsg_store_file(curlun, filesem, "", 0);
+ return ret < 0 ? ret : count;
+}
+EXPORT_SYMBOL_GPL(fsg_store_forced_eject);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index bdeb1e233fc9..0a544a82cbf8 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -219,5 +219,7 @@ ssize_t fsg_store_removable(struct fsg_lun *curlun, const char *buf,
size_t count);
ssize_t fsg_store_inquiry_string(struct fsg_lun *curlun, const char *buf,
size_t count);
+ssize_t fsg_store_forced_eject(struct fsg_lun *curlun, struct rw_semaphore *filesem,
+ const char *buf, size_t count);
#endif /* USB_STORAGE_COMMON_H */
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index f51694f29de9..7887def05dc2 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -17,7 +17,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/etherdevice.h>
#include "u_ether.h"
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index e5a6b6e36b3d..4303a3283ba0 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -2371,6 +2371,7 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
const char *page, size_t len) \
{ \
struct f_uvc_opts *opts = to_f_uvc_opts(item); \
+ int size = min(sizeof(opts->aname), len + 1); \
int ret = 0; \
\
mutex_lock(&opts->lock); \
@@ -2379,8 +2380,9 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
goto end; \
} \
\
- ret = snprintf(opts->aname, min(sizeof(opts->aname), len), \
- "%s", page); \
+ ret = strscpy(opts->aname, page, size); \
+ if (ret == -E2BIG) \
+ ret = size - 1; \
\
end: \
mutex_unlock(&opts->lock); \
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index d25edc3d2174..ec500ee499ee 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -44,7 +44,8 @@ static int uvc_queue_setup(struct vb2_queue *vq,
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
struct uvc_video *video = container_of(queue, struct uvc_video, queue);
- struct usb_composite_dev *cdev = video->uvc->func.config->cdev;
+ unsigned int req_size;
+ unsigned int nreq;
if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
*nbuffers = UVC_MAX_VIDEO_BUFFERS;
@@ -53,10 +54,16 @@ static int uvc_queue_setup(struct vb2_queue *vq,
sizes[0] = video->imagesize;
- if (cdev->gadget->speed < USB_SPEED_SUPER)
- video->uvc_num_requests = 4;
- else
- video->uvc_num_requests = 64;
+ req_size = video->ep->maxpacket
+ * max_t(unsigned int, video->ep->maxburst, 1)
+ * (video->ep->mult);
+
+ /* We divide by two, to increase the chance to run
+ * into fewer requests for smaller framesizes.
+ */
+ nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size);
+ nreq = clamp(nreq, 4U, 64U);
+ video->uvc_num_requests = nreq;
return 0;
}
@@ -104,7 +111,8 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
list_add_tail(&buf->queue, &queue->irqqueue);
} else {
- /* If the device is disconnected return the buffer to userspace
+ /*
+ * If the device is disconnected return the buffer to userspace
* directly. The next QBUF call will fail with -ENODEV.
*/
buf->state = UVC_BUF_STATE_ERROR;
@@ -255,7 +263,8 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
}
queue->buf_used = 0;
- /* This must be protected by the irqlock spinlock to avoid race
+ /*
+ * This must be protected by the irqlock spinlock to avoid race
* conditions between uvc_queue_buffer and the disconnection event that
* could result in an interruptible wait in uvc_dequeue_buffer. Do not
* blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index d42bb3346745..c00ce0e91f5d 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -261,7 +261,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
break;
default:
- uvcg_info(&video->uvc->func,
+ uvcg_warn(&video->uvc->func,
"VS request completed with status %d.\n",
req->status);
uvcg_queue_cancel(queue, 0);
@@ -378,7 +378,8 @@ static void uvcg_video_pump(struct work_struct *work)
int ret;
while (video->ep->enabled) {
- /* Retrieve the first available USB request, protected by the
+ /*
+ * Retrieve the first available USB request, protected by the
* request lock.
*/
spin_lock_irqsave(&video->req_lock, flags);
@@ -391,7 +392,8 @@ static void uvcg_video_pump(struct work_struct *work)
list_del(&req->list);
spin_unlock_irqrestore(&video->req_lock, flags);
- /* Retrieve the first available video buffer and fill the
+ /*
+ * Retrieve the first available video buffer and fill the
* request, protected by the video queue irqlock.
*/
spin_lock_irqsave(&queue->irqlock, flags);
@@ -403,9 +405,11 @@ static void uvcg_video_pump(struct work_struct *work)
video->encode(req, video, buf);
- /* With usb3 we have more requests. This will decrease the
+ /*
+ * With usb3 we have more requests. This will decrease the
* interrupt load to a quarter but also catches the corner
- * cases, which needs to be handled */
+ * cases, which needs to be handled.
+ */
if (list_empty(&video->req_free) ||
buf->state == UVC_BUF_STATE_DONE ||
!(video->req_int_count %
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 79990597c39f..01c3ead7d1b4 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -362,6 +362,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
spin_unlock_irq (&epdata->dev->lock);
DBG (epdata->dev, "endpoint gone\n");
+ wait_for_completion(&done);
epdata->status = -ENODEV;
}
}
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 69394dc1cdfb..5756acb07b8d 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -59,8 +59,8 @@ config USB_ATMEL_USBA
tristate "Atmel USBA"
depends on ARCH_AT91
help
- USBA is the integrated high-speed USB Device controller on
- the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel.
+ USBA is the integrated high-speed USB Device controller on some
+ AT91SAM9 and AT91CAP9 processors from Atmel.
The fifo_mode parameter is used to select endpoint allocation mode.
fifo_mode = 0 is used to let the driver autoconfigure the endpoints.
@@ -311,7 +311,7 @@ source "drivers/usb/gadget/udc/bdc/Kconfig"
config USB_AMD5536UDC
tristate "AMD5536 UDC"
- depends on USB_PCI
+ depends on USB_PCI && HAS_DMA
select USB_SNP_CORE
help
The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
@@ -463,6 +463,19 @@ config USB_TEGRA_XUDC
dynamically linked module called "tegra_xudc" and force all
gadget drivers to also be dynamically linked.
+config USB_ASPEED_UDC
+ tristate "Aspeed UDC driver support"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on USB_LIBCOMPOSITE
+ help
+ Enables Aspeed USB2.0 Device Controller driver for AST260x
+ family SoCs. The controller supports 1 control endpoint and
+ 4 programmable endpoints.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "aspeed_udc" and force all
+ gadget drivers to also be dynamically linked.
+
source "drivers/usb/gadget/udc/aspeed-vhub/Kconfig"
#
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index a21f2224e7eb..12f9e4c9eb0c 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -40,5 +40,6 @@ obj-$(CONFIG_USB_GR_UDC) += gr_udc.o
obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o
obj-$(CONFIG_USB_SNP_UDC_PLAT) += snps_udc_plat.o
obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub/
+obj-$(CONFIG_USB_ASPEED_UDC) += aspeed_udc.o
obj-$(CONFIG_USB_BDC_UDC) += bdc/
obj-$(CONFIG_USB_MAX3420_UDC) += max3420_udc.o
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index 65cd4e46f031..e2207d014620 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -1059,8 +1059,10 @@ static int ast_vhub_init_desc(struct ast_vhub *vhub)
/* Initialize vhub String Descriptors. */
INIT_LIST_HEAD(&vhub->vhub_str_desc);
desc_np = of_get_child_by_name(vhub_np, "vhub-strings");
- if (desc_np)
+ if (desc_np) {
ret = ast_vhub_of_parse_str_desc(vhub, desc_np);
+ of_node_put(desc_np);
+ }
else
ret = ast_vhub_str_alloc_add(vhub, &ast_vhub_strings);
diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
new file mode 100644
index 000000000000..01968e2167f9
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed_udc.c
@@ -0,0 +1,1597 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/slab.h>
+
+#define AST_UDC_NUM_ENDPOINTS (1 + 4)
+#define AST_UDC_EP0_MAX_PACKET 64 /* EP0's max packet size */
+#define AST_UDC_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */
+#define AST_UDC_DESCS_COUNT 256 /* Use 256 stages descriptor mode (32/256) */
+#define AST_UDC_DESC_MODE 1 /* Single/Multiple Stage(s) Descriptor Mode */
+
+#define AST_UDC_EP_DMA_SIZE (AST_UDC_EPn_MAX_PACKET + 8 * AST_UDC_DESCS_COUNT)
+
+/*****************************
+ * *
+ * UDC register definitions *
+ * *
+ *****************************/
+
+#define AST_UDC_FUNC_CTRL 0x00 /* Root Function Control & Status Register */
+#define AST_UDC_CONFIG 0x04 /* Root Configuration Setting Register */
+#define AST_UDC_IER 0x08 /* Interrupt Control Register */
+#define AST_UDC_ISR 0x0C /* Interrupt Status Register */
+#define AST_UDC_EP_ACK_IER 0x10 /* Programmable ep Pool ACK Interrupt Enable Reg */
+#define AST_UDC_EP_NAK_IER 0x14 /* Programmable ep Pool NAK Interrupt Enable Reg */
+#define AST_UDC_EP_ACK_ISR 0x18 /* Programmable ep Pool ACK Interrupt Status Reg */
+#define AST_UDC_EP_NAK_ISR 0x1C /* Programmable ep Pool NAK Interrupt Status Reg */
+#define AST_UDC_DEV_RESET 0x20 /* Device Controller Soft Reset Enable Register */
+#define AST_UDC_STS 0x24 /* USB Status Register */
+#define AST_VHUB_EP_DATA 0x28 /* Programmable ep Pool Data Toggle Value Set */
+#define AST_VHUB_ISO_TX_FAIL 0x2C /* Isochronous Transaction Fail Accumulator */
+#define AST_UDC_EP0_CTRL 0x30 /* Endpoint 0 Control/Status Register */
+#define AST_UDC_EP0_DATA_BUFF 0x34 /* Base Address of ep0 IN/OUT Data Buffer Reg */
+#define AST_UDC_SETUP0 0x80 /* Root Device Setup Data Buffer0 */
+#define AST_UDC_SETUP1 0x84 /* Root Device Setup Data Buffer1 */
+
+
+/* Main control reg */
+#define USB_PHY_CLK_EN BIT(31)
+#define USB_FIFO_DYN_PWRD_EN BIT(19)
+#define USB_EP_LONG_DESC BIT(18)
+#define USB_BIST_TEST_PASS BIT(13)
+#define USB_BIST_TURN_ON BIT(12)
+#define USB_PHY_RESET_DIS BIT(11)
+#define USB_TEST_MODE(x) ((x) << 8)
+#define USB_FORCE_TIMER_HS BIT(7)
+#define USB_FORCE_HS BIT(6)
+#define USB_REMOTE_WAKEUP_12MS BIT(5)
+#define USB_REMOTE_WAKEUP_EN BIT(4)
+#define USB_AUTO_REMOTE_WAKEUP_EN BIT(3)
+#define USB_STOP_CLK_IN_SUPEND BIT(2)
+#define USB_UPSTREAM_FS BIT(1)
+#define USB_UPSTREAM_EN BIT(0)
+
+/* Main config reg */
+#define UDC_CFG_SET_ADDR(x) ((x) & 0x3f)
+#define UDC_CFG_ADDR_MASK (0x3f)
+
+/* Interrupt ctrl & status reg */
+#define UDC_IRQ_EP_POOL_NAK BIT(17)
+#define UDC_IRQ_EP_POOL_ACK_STALL BIT(16)
+#define UDC_IRQ_BUS_RESUME BIT(8)
+#define UDC_IRQ_BUS_SUSPEND BIT(7)
+#define UDC_IRQ_BUS_RESET BIT(6)
+#define UDC_IRQ_EP0_IN_DATA_NAK BIT(4)
+#define UDC_IRQ_EP0_IN_ACK_STALL BIT(3)
+#define UDC_IRQ_EP0_OUT_NAK BIT(2)
+#define UDC_IRQ_EP0_OUT_ACK_STALL BIT(1)
+#define UDC_IRQ_EP0_SETUP BIT(0)
+#define UDC_IRQ_ACK_ALL (0x1ff)
+
+/* EP isr reg */
+#define USB_EP3_ISR BIT(3)
+#define USB_EP2_ISR BIT(2)
+#define USB_EP1_ISR BIT(1)
+#define USB_EP0_ISR BIT(0)
+#define UDC_IRQ_EP_ACK_ALL (0xf)
+
+/*Soft reset reg */
+#define ROOT_UDC_SOFT_RESET BIT(0)
+
+/* USB status reg */
+#define UDC_STS_HIGHSPEED BIT(27)
+
+/* Programmable EP data toggle */
+#define EP_TOGGLE_SET_EPNUM(x) ((x) & 0x3)
+
+/* EP0 ctrl reg */
+#define EP0_GET_RX_LEN(x) ((x >> 16) & 0x7f)
+#define EP0_TX_LEN(x) ((x & 0x7f) << 8)
+#define EP0_RX_BUFF_RDY BIT(2)
+#define EP0_TX_BUFF_RDY BIT(1)
+#define EP0_STALL BIT(0)
+
+/*************************************
+ * *
+ * per-endpoint register definitions *
+ * *
+ *************************************/
+
+#define AST_UDC_EP_CONFIG 0x00 /* Endpoint Configuration Register */
+#define AST_UDC_EP_DMA_CTRL 0x04 /* DMA Descriptor List Control/Status Register */
+#define AST_UDC_EP_DMA_BUFF 0x08 /* DMA Descriptor/Buffer Base Address */
+#define AST_UDC_EP_DMA_STS 0x0C /* DMA Descriptor List R/W Pointer and Status */
+
+#define AST_UDC_EP_BASE 0x200
+#define AST_UDC_EP_OFFSET 0x10
+
+/* EP config reg */
+#define EP_SET_MAX_PKT(x) ((x & 0x3ff) << 16)
+#define EP_DATA_FETCH_CTRL(x) ((x & 0x3) << 14)
+#define EP_AUTO_DATA_DISABLE (0x1 << 13)
+#define EP_SET_EP_STALL (0x1 << 12)
+#define EP_SET_EP_NUM(x) ((x & 0xf) << 8)
+#define EP_SET_TYPE_MASK(x) ((x) << 5)
+#define EP_TYPE_BULK (0x1)
+#define EP_TYPE_INT (0x2)
+#define EP_TYPE_ISO (0x3)
+#define EP_DIR_OUT (0x1 << 4)
+#define EP_ALLOCATED_MASK (0x7 << 1)
+#define EP_ENABLE BIT(0)
+
+/* EP DMA ctrl reg */
+#define EP_DMA_CTRL_GET_PROC_STS(x) ((x >> 4) & 0xf)
+#define EP_DMA_CTRL_STS_RX_IDLE 0x0
+#define EP_DMA_CTRL_STS_TX_IDLE 0x8
+#define EP_DMA_CTRL_IN_LONG_MODE (0x1 << 3)
+#define EP_DMA_CTRL_RESET (0x1 << 2)
+#define EP_DMA_SINGLE_STAGE (0x1 << 1)
+#define EP_DMA_DESC_MODE (0x1 << 0)
+
+/* EP DMA status reg */
+#define EP_DMA_SET_TX_SIZE(x) ((x & 0x7ff) << 16)
+#define EP_DMA_GET_TX_SIZE(x) (((x) >> 16) & 0x7ff)
+#define EP_DMA_GET_RPTR(x) (((x) >> 8) & 0xff)
+#define EP_DMA_GET_WPTR(x) ((x) & 0xff)
+#define EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */
+
+/* EP desc reg */
+#define AST_EP_DMA_DESC_INTR_ENABLE BIT(31)
+#define AST_EP_DMA_DESC_PID_DATA0 (0 << 14)
+#define AST_EP_DMA_DESC_PID_DATA2 BIT(14)
+#define AST_EP_DMA_DESC_PID_DATA1 (2 << 14)
+#define AST_EP_DMA_DESC_PID_MDATA (3 << 14)
+#define EP_DESC1_IN_LEN(x) ((x) & 0x1fff)
+#define AST_EP_DMA_DESC_MAX_LEN (7680) /* Max packet length for trasmit in 1 desc */
+
+struct ast_udc_request {
+ struct usb_request req;
+ struct list_head queue;
+ unsigned mapped:1;
+ unsigned int actual_dma_length;
+ u32 saved_dma_wptr;
+};
+
+#define to_ast_req(__req) container_of(__req, struct ast_udc_request, req)
+
+struct ast_dma_desc {
+ u32 des_0;
+ u32 des_1;
+};
+
+struct ast_udc_ep {
+ struct usb_ep ep;
+
+ /* Request queue */
+ struct list_head queue;
+
+ struct ast_udc_dev *udc;
+ void __iomem *ep_reg;
+ void *epn_buf;
+ dma_addr_t epn_buf_dma;
+ const struct usb_endpoint_descriptor *desc;
+
+ /* DMA Descriptors */
+ struct ast_dma_desc *descs;
+ dma_addr_t descs_dma;
+ u32 descs_wptr;
+ u32 chunk_max;
+
+ bool dir_in:1;
+ unsigned stopped:1;
+ bool desc_mode:1;
+};
+
+#define to_ast_ep(__ep) container_of(__ep, struct ast_udc_ep, ep)
+
+struct ast_udc_dev {
+ struct platform_device *pdev;
+ void __iomem *reg;
+ int irq;
+ spinlock_t lock;
+ struct clk *clk;
+ struct work_struct wake_work;
+
+ /* EP0 DMA buffers allocated in one chunk */
+ void *ep0_buf;
+ dma_addr_t ep0_buf_dma;
+ struct ast_udc_ep ep[AST_UDC_NUM_ENDPOINTS];
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ void __iomem *creq;
+ enum usb_device_state suspended_from;
+ int desc_mode;
+
+ /* Force full speed only */
+ bool force_usb1:1;
+ unsigned is_control_tx:1;
+ bool wakeup_en:1;
+};
+
+#define to_ast_dev(__g) container_of(__g, struct ast_udc_dev, gadget)
+
+static const char * const ast_ep_name[] = {
+ "ep0", "ep1", "ep2", "ep3", "ep4"
+};
+
+#ifdef AST_UDC_DEBUG_ALL
+#define AST_UDC_DEBUG
+#define AST_SETUP_DEBUG
+#define AST_EP_DEBUG
+#define AST_ISR_DEBUG
+#endif
+
+#ifdef AST_SETUP_DEBUG
+#define SETUP_DBG(u, fmt, ...) \
+ dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define SETUP_DBG(u, fmt, ...)
+#endif
+
+#ifdef AST_EP_DEBUG
+#define EP_DBG(e, fmt, ...) \
+ dev_dbg(&(e)->udc->pdev->dev, "%s():%s " fmt, __func__, \
+ (e)->ep.name, ##__VA_ARGS__)
+#else
+#define EP_DBG(ep, fmt, ...) ((void)(ep))
+#endif
+
+#ifdef AST_UDC_DEBUG
+#define UDC_DBG(u, fmt, ...) \
+ dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define UDC_DBG(u, fmt, ...)
+#endif
+
+#ifdef AST_ISR_DEBUG
+#define ISR_DBG(u, fmt, ...) \
+ dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define ISR_DBG(u, fmt, ...)
+#endif
+
+/*-------------------------------------------------------------------------*/
+#define ast_udc_read(udc, offset) \
+ readl((udc)->reg + (offset))
+#define ast_udc_write(udc, val, offset) \
+ writel((val), (udc)->reg + (offset))
+
+#define ast_ep_read(ep, reg) \
+ readl((ep)->ep_reg + (reg))
+#define ast_ep_write(ep, val, reg) \
+ writel((val), (ep)->ep_reg + (reg))
+
+/*-------------------------------------------------------------------------*/
+
+static void ast_udc_done(struct ast_udc_ep *ep, struct ast_udc_request *req,
+ int status)
+{
+ struct ast_udc_dev *udc = ep->udc;
+
+ EP_DBG(ep, "req @%p, len (%d/%d), buf:0x%x, dir:0x%x\n",
+ req, req->req.actual, req->req.length,
+ (u32)req->req.buf, ep->dir_in);
+
+ list_del(&req->queue);
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ if (status && status != -ESHUTDOWN)
+ EP_DBG(ep, "done req:%p, status:%d\n", req, status);
+
+ spin_unlock(&udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&udc->lock);
+}
+
+static void ast_udc_nuke(struct ast_udc_ep *ep, int status)
+{
+ int count = 0;
+
+ while (!list_empty(&ep->queue)) {
+ struct ast_udc_request *req;
+
+ req = list_entry(ep->queue.next, struct ast_udc_request,
+ queue);
+ ast_udc_done(ep, req, status);
+ count++;
+ }
+
+ if (count)
+ EP_DBG(ep, "Nuked %d request(s)\n", count);
+}
+
+/*
+ * Stop activity on all endpoints.
+ * Device controller for which EP activity is to be stopped.
+ *
+ * All the endpoints are stopped and any pending transfer requests if any on
+ * the endpoint are terminated.
+ */
+static void ast_udc_stop_activity(struct ast_udc_dev *udc)
+{
+ struct ast_udc_ep *ep;
+ int i;
+
+ for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ ep->stopped = 1;
+ ast_udc_nuke(ep, -ESHUTDOWN);
+ }
+}
+
+static int ast_udc_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ u16 maxpacket = usb_endpoint_maxp(desc);
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ u8 epnum = usb_endpoint_num(desc);
+ unsigned long flags;
+ u32 ep_conf = 0;
+ u8 dir_in;
+ u8 type;
+
+ if (!_ep || !ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
+ maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
+ EP_DBG(ep, "Failed, invalid EP enable param\n");
+ return -EINVAL;
+ }
+
+ if (!udc->driver) {
+ EP_DBG(ep, "bogus device state\n");
+ return -ESHUTDOWN;
+ }
+
+ EP_DBG(ep, "maxpacket:0x%x\n", maxpacket);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ ep->desc = desc;
+ ep->stopped = 0;
+ ep->ep.maxpacket = maxpacket;
+ ep->chunk_max = AST_EP_DMA_DESC_MAX_LEN;
+
+ if (maxpacket < AST_UDC_EPn_MAX_PACKET)
+ ep_conf = EP_SET_MAX_PKT(maxpacket);
+
+ ep_conf |= EP_SET_EP_NUM(epnum);
+
+ type = usb_endpoint_type(desc);
+ dir_in = usb_endpoint_dir_in(desc);
+ ep->dir_in = dir_in;
+ if (!ep->dir_in)
+ ep_conf |= EP_DIR_OUT;
+
+ EP_DBG(ep, "type %d, dir_in %d\n", type, dir_in);
+ switch (type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_ISO);
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_BULK);
+ break;
+
+ case USB_ENDPOINT_XFER_INT:
+ ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_INT);
+ break;
+ }
+
+ ep->desc_mode = udc->desc_mode && ep->descs_dma && ep->dir_in;
+ if (ep->desc_mode) {
+ ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL);
+ ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS);
+ ast_ep_write(ep, ep->descs_dma, AST_UDC_EP_DMA_BUFF);
+
+ /* Enable Long Descriptor Mode */
+ ast_ep_write(ep, EP_DMA_CTRL_IN_LONG_MODE | EP_DMA_DESC_MODE,
+ AST_UDC_EP_DMA_CTRL);
+
+ ep->descs_wptr = 0;
+
+ } else {
+ ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL);
+ ast_ep_write(ep, EP_DMA_SINGLE_STAGE, AST_UDC_EP_DMA_CTRL);
+ ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS);
+ }
+
+ /* Cleanup data toggle just in case */
+ ast_udc_write(udc, EP_TOGGLE_SET_EPNUM(epnum), AST_VHUB_EP_DATA);
+
+ /* Enable EP */
+ ast_ep_write(ep, ep_conf | EP_ENABLE, AST_UDC_EP_CONFIG);
+
+ EP_DBG(ep, "ep_config: 0x%x\n", ast_ep_read(ep, AST_UDC_EP_CONFIG));
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int ast_udc_ep_disable(struct usb_ep *_ep)
+{
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ ep->ep.desc = NULL;
+ ep->stopped = 1;
+
+ ast_udc_nuke(ep, -ESHUTDOWN);
+ ast_ep_write(ep, 0, AST_UDC_EP_CONFIG);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static struct usb_request *ast_udc_ep_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_request *req;
+
+ req = kzalloc(sizeof(struct ast_udc_request), gfp_flags);
+ if (!req) {
+ EP_DBG(ep, "request allocation failed\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void ast_udc_ep_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct ast_udc_request *req = to_ast_req(_req);
+
+ kfree(req);
+}
+
+static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf,
+ u16 tx_len, struct ast_udc_request *req)
+{
+ struct ast_udc_dev *udc = ep->udc;
+ struct device *dev = &udc->pdev->dev;
+ bool last = false;
+ int chunk, count;
+ u32 offset;
+
+ if (!ep->descs) {
+ dev_warn(dev, "%s: Empty DMA descs list failure\n",
+ ep->ep.name);
+ return -EINVAL;
+ }
+
+ chunk = tx_len;
+ offset = count = 0;
+
+ EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x\n", req,
+ "wptr", ep->descs_wptr, "dma_buf", dma_buf,
+ "tx_len", tx_len);
+
+ /* Create Descriptor Lists */
+ while (chunk >= 0 && !last && count < AST_UDC_DESCS_COUNT) {
+
+ ep->descs[ep->descs_wptr].des_0 = dma_buf + offset;
+
+ if (chunk > ep->chunk_max) {
+ ep->descs[ep->descs_wptr].des_1 = ep->chunk_max;
+ } else {
+ ep->descs[ep->descs_wptr].des_1 = chunk;
+ last = true;
+ }
+
+ chunk -= ep->chunk_max;
+
+ EP_DBG(ep, "descs[%d]: 0x%x 0x%x\n",
+ ep->descs_wptr,
+ ep->descs[ep->descs_wptr].des_0,
+ ep->descs[ep->descs_wptr].des_1);
+
+ if (count == 0)
+ req->saved_dma_wptr = ep->descs_wptr;
+
+ ep->descs_wptr++;
+ count++;
+
+ if (ep->descs_wptr >= AST_UDC_DESCS_COUNT)
+ ep->descs_wptr = 0;
+
+ offset = ep->chunk_max * count;
+ }
+
+ return 0;
+}
+
+static void ast_udc_epn_kick(struct ast_udc_ep *ep, struct ast_udc_request *req)
+{
+ u32 tx_len;
+ u32 last;
+
+ last = req->req.length - req->req.actual;
+ tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last;
+
+ EP_DBG(ep, "kick req @%p, len:%d, dir:%d\n",
+ req, tx_len, ep->dir_in);
+
+ ast_ep_write(ep, req->req.dma + req->req.actual, AST_UDC_EP_DMA_BUFF);
+
+ /* Start DMA */
+ ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len), AST_UDC_EP_DMA_STS);
+ ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len) | EP_DMA_SINGLE_KICK,
+ AST_UDC_EP_DMA_STS);
+}
+
+static void ast_udc_epn_kick_desc(struct ast_udc_ep *ep,
+ struct ast_udc_request *req)
+{
+ u32 descs_max_size;
+ u32 tx_len;
+ u32 last;
+
+ descs_max_size = AST_EP_DMA_DESC_MAX_LEN * AST_UDC_DESCS_COUNT;
+
+ last = req->req.length - req->req.actual;
+ tx_len = last > descs_max_size ? descs_max_size : last;
+
+ EP_DBG(ep, "kick req @%p, %s:%d, %s:0x%x, %s:0x%x (%d/%d), %s:0x%x\n",
+ req, "tx_len", tx_len, "dir_in", ep->dir_in,
+ "dma", req->req.dma + req->req.actual,
+ req->req.actual, req->req.length,
+ "descs_max_size", descs_max_size);
+
+ if (!ast_dma_descriptor_setup(ep, req->req.dma + req->req.actual,
+ tx_len, req))
+ req->actual_dma_length += tx_len;
+
+ /* make sure CPU done everything before triggering DMA */
+ mb();
+
+ ast_ep_write(ep, ep->descs_wptr, AST_UDC_EP_DMA_STS);
+
+ EP_DBG(ep, "descs_wptr:%d, dstat:0x%x, dctrl:0x%x\n",
+ ep->descs_wptr,
+ ast_ep_read(ep, AST_UDC_EP_DMA_STS),
+ ast_ep_read(ep, AST_UDC_EP_DMA_CTRL));
+}
+
+static void ast_udc_ep0_queue(struct ast_udc_ep *ep,
+ struct ast_udc_request *req)
+{
+ struct ast_udc_dev *udc = ep->udc;
+ u32 tx_len;
+ u32 last;
+
+ last = req->req.length - req->req.actual;
+ tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last;
+
+ ast_udc_write(udc, req->req.dma + req->req.actual,
+ AST_UDC_EP0_DATA_BUFF);
+
+ if (ep->dir_in) {
+ /* IN requests, send data */
+ SETUP_DBG(udc, "IN: %s:0x%x, %s:0x%x, %s:%d (%d/%d), %s:%d\n",
+ "buf", (u32)req->req.buf,
+ "dma", req->req.dma + req->req.actual,
+ "tx_len", tx_len,
+ req->req.actual, req->req.length,
+ "dir_in", ep->dir_in);
+
+ req->req.actual += tx_len;
+ ast_udc_write(udc, EP0_TX_LEN(tx_len), AST_UDC_EP0_CTRL);
+ ast_udc_write(udc, EP0_TX_LEN(tx_len) | EP0_TX_BUFF_RDY,
+ AST_UDC_EP0_CTRL);
+
+ } else {
+ /* OUT requests, receive data */
+ SETUP_DBG(udc, "OUT: %s:%x, %s:%x, %s:(%d/%d), %s:%d\n",
+ "buf", (u32)req->req.buf,
+ "dma", req->req.dma + req->req.actual,
+ "len", req->req.actual, req->req.length,
+ "dir_in", ep->dir_in);
+
+ if (!req->req.length) {
+ /* 0 len request, send tx as completion */
+ ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
+ ep->dir_in = 0x1;
+ } else
+ ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL);
+ }
+}
+
+static int ast_udc_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct ast_udc_request *req = to_ast_req(_req);
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ struct device *dev = &udc->pdev->dev;
+ unsigned long flags;
+ int rc;
+
+ if (unlikely(!_req || !_req->complete || !_req->buf || !_ep)) {
+ dev_warn(dev, "Invalid EP request !\n");
+ return -EINVAL;
+ }
+
+ if (ep->stopped) {
+ dev_warn(dev, "%s is already stopped !\n", _ep->name);
+ return -ESHUTDOWN;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+ req->actual_dma_length = 0;
+
+ rc = usb_gadget_map_request(&udc->gadget, &req->req, ep->dir_in);
+ if (rc) {
+ EP_DBG(ep, "Request mapping failure %d\n", rc);
+ dev_warn(dev, "Request mapping failure %d\n", rc);
+ goto end;
+ }
+
+ EP_DBG(ep, "enqueue req @%p\n", req);
+ EP_DBG(ep, "l=%d, dma:0x%x, zero:%d, is_in:%d\n",
+ _req->length, _req->dma, _req->zero, ep->dir_in);
+
+ /* EP0 request enqueue */
+ if (ep->ep.desc == NULL) {
+ if ((req->req.dma % 4) != 0) {
+ dev_warn(dev, "EP0 req dma alignment error\n");
+ rc = -ESHUTDOWN;
+ goto end;
+ }
+
+ ast_udc_ep0_queue(ep, req);
+ goto end;
+ }
+
+ /* EPn request enqueue */
+ if (list_is_singular(&ep->queue)) {
+ if (ep->desc_mode)
+ ast_udc_epn_kick_desc(ep, req);
+ else
+ ast_udc_epn_kick(ep, req);
+ }
+
+end:
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return rc;
+}
+
+static int ast_udc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ struct ast_udc_request *req;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req) {
+ list_del_init(&req->queue);
+ ast_udc_done(ep, req, -ESHUTDOWN);
+ _req->status = -ECONNRESET;
+ break;
+ }
+ }
+
+ /* dequeue request not found */
+ if (&req->req != _req)
+ rc = -EINVAL;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return rc;
+}
+
+static int ast_udc_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ unsigned long flags;
+ int epnum;
+ u32 ctrl;
+
+ EP_DBG(ep, "val:%d\n", value);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ epnum = usb_endpoint_num(ep->desc);
+
+ /* EP0 */
+ if (epnum == 0) {
+ ctrl = ast_udc_read(udc, AST_UDC_EP0_CTRL);
+ if (value)
+ ctrl |= EP0_STALL;
+ else
+ ctrl &= ~EP0_STALL;
+
+ ast_udc_write(udc, ctrl, AST_UDC_EP0_CTRL);
+
+ } else {
+ /* EPn */
+ ctrl = ast_udc_read(udc, AST_UDC_EP_CONFIG);
+ if (value)
+ ctrl |= EP_SET_EP_STALL;
+ else
+ ctrl &= ~EP_SET_EP_STALL;
+
+ ast_ep_write(ep, ctrl, AST_UDC_EP_CONFIG);
+
+ /* only epn is stopped and waits for clear */
+ ep->stopped = value ? 1 : 0;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_ep_ops ast_udc_ep_ops = {
+ .enable = ast_udc_ep_enable,
+ .disable = ast_udc_ep_disable,
+ .alloc_request = ast_udc_ep_alloc_request,
+ .free_request = ast_udc_ep_free_request,
+ .queue = ast_udc_ep_queue,
+ .dequeue = ast_udc_ep_dequeue,
+ .set_halt = ast_udc_ep_set_halt,
+ /* there's only imprecise fifo status reporting */
+};
+
+static void ast_udc_ep0_rx(struct ast_udc_dev *udc)
+{
+ ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
+ ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL);
+}
+
+static void ast_udc_ep0_tx(struct ast_udc_dev *udc)
+{
+ ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
+ ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
+}
+
+static void ast_udc_ep0_out(struct ast_udc_dev *udc)
+{
+ struct device *dev = &udc->pdev->dev;
+ struct ast_udc_ep *ep = &udc->ep[0];
+ struct ast_udc_request *req;
+ u16 rx_len;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ req = list_entry(ep->queue.next, struct ast_udc_request, queue);
+
+ rx_len = EP0_GET_RX_LEN(ast_udc_read(udc, AST_UDC_EP0_CTRL));
+ req->req.actual += rx_len;
+
+ SETUP_DBG(udc, "req %p (%d/%d)\n", req,
+ req->req.actual, req->req.length);
+
+ if ((rx_len < ep->ep.maxpacket) ||
+ (req->req.actual == req->req.length)) {
+ ast_udc_ep0_tx(udc);
+ if (!ep->dir_in)
+ ast_udc_done(ep, req, 0);
+
+ } else {
+ if (rx_len > req->req.length) {
+ // Issue Fix
+ dev_warn(dev, "Something wrong (%d/%d)\n",
+ req->req.actual, req->req.length);
+ ast_udc_ep0_tx(udc);
+ ast_udc_done(ep, req, 0);
+ return;
+ }
+
+ ep->dir_in = 0;
+
+ /* More works */
+ ast_udc_ep0_queue(ep, req);
+ }
+}
+
+static void ast_udc_ep0_in(struct ast_udc_dev *udc)
+{
+ struct ast_udc_ep *ep = &udc->ep[0];
+ struct ast_udc_request *req;
+
+ if (list_empty(&ep->queue)) {
+ if (udc->is_control_tx) {
+ ast_udc_ep0_rx(udc);
+ udc->is_control_tx = 0;
+ }
+
+ return;
+ }
+
+ req = list_entry(ep->queue.next, struct ast_udc_request, queue);
+
+ SETUP_DBG(udc, "req %p (%d/%d)\n", req,
+ req->req.actual, req->req.length);
+
+ if (req->req.length == req->req.actual) {
+ if (req->req.length)
+ ast_udc_ep0_rx(udc);
+
+ if (ep->dir_in)
+ ast_udc_done(ep, req, 0);
+
+ } else {
+ /* More works */
+ ast_udc_ep0_queue(ep, req);
+ }
+}
+
+static void ast_udc_epn_handle(struct ast_udc_dev *udc, u16 ep_num)
+{
+ struct ast_udc_ep *ep = &udc->ep[ep_num];
+ struct ast_udc_request *req;
+ u16 len = 0;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ req = list_first_entry(&ep->queue, struct ast_udc_request, queue);
+
+ len = EP_DMA_GET_TX_SIZE(ast_ep_read(ep, AST_UDC_EP_DMA_STS));
+ req->req.actual += len;
+
+ EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req,
+ req->req.actual, req->req.length, "len", len);
+
+ /* Done this request */
+ if (req->req.length == req->req.actual) {
+ ast_udc_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue,
+ struct ast_udc_request,
+ queue);
+
+ } else {
+ /* Check for short packet */
+ if (len < ep->ep.maxpacket) {
+ ast_udc_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue,
+ struct ast_udc_request,
+ queue);
+ }
+ }
+
+ /* More requests */
+ if (req)
+ ast_udc_epn_kick(ep, req);
+}
+
+static void ast_udc_epn_handle_desc(struct ast_udc_dev *udc, u16 ep_num)
+{
+ struct ast_udc_ep *ep = &udc->ep[ep_num];
+ struct device *dev = &udc->pdev->dev;
+ struct ast_udc_request *req;
+ u32 proc_sts, wr_ptr, rd_ptr;
+ u32 len_in_desc, ctrl;
+ u16 total_len = 0;
+ int i;
+
+ if (list_empty(&ep->queue)) {
+ dev_warn(dev, "%s request queue empty!\n", ep->ep.name);
+ return;
+ }
+
+ req = list_first_entry(&ep->queue, struct ast_udc_request, queue);
+
+ ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_CTRL);
+ proc_sts = EP_DMA_CTRL_GET_PROC_STS(ctrl);
+
+ /* Check processing status is idle */
+ if (proc_sts != EP_DMA_CTRL_STS_RX_IDLE &&
+ proc_sts != EP_DMA_CTRL_STS_TX_IDLE) {
+ dev_warn(dev, "EP DMA CTRL: 0x%x, PS:0x%x\n",
+ ast_ep_read(ep, AST_UDC_EP_DMA_CTRL),
+ proc_sts);
+ return;
+ }
+
+ ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_STS);
+ rd_ptr = EP_DMA_GET_RPTR(ctrl);
+ wr_ptr = EP_DMA_GET_WPTR(ctrl);
+
+ if (rd_ptr != wr_ptr) {
+ dev_warn(dev, "desc list is not empty ! %s:%d, %s:%d\n",
+ "rptr", rd_ptr, "wptr", wr_ptr);
+ return;
+ }
+
+ EP_DBG(ep, "rd_ptr:%d, wr_ptr:%d\n", rd_ptr, wr_ptr);
+ i = req->saved_dma_wptr;
+
+ do {
+ len_in_desc = EP_DESC1_IN_LEN(ep->descs[i].des_1);
+ EP_DBG(ep, "desc[%d] len: %d\n", i, len_in_desc);
+ total_len += len_in_desc;
+ i++;
+ if (i >= AST_UDC_DESCS_COUNT)
+ i = 0;
+
+ } while (i != wr_ptr);
+
+ req->req.actual += total_len;
+
+ EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req,
+ req->req.actual, req->req.length, "len", total_len);
+
+ /* Done this request */
+ if (req->req.length == req->req.actual) {
+ ast_udc_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue,
+ struct ast_udc_request,
+ queue);
+
+ } else {
+ /* Check for short packet */
+ if (total_len < ep->ep.maxpacket) {
+ ast_udc_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue,
+ struct ast_udc_request,
+ queue);
+ }
+ }
+
+ /* More requests & dma descs not setup yet */
+ if (req && (req->actual_dma_length == req->req.actual)) {
+ EP_DBG(ep, "More requests\n");
+ ast_udc_epn_kick_desc(ep, req);
+ }
+}
+
+static void ast_udc_ep0_data_tx(struct ast_udc_dev *udc, u8 *tx_data, u32 len)
+{
+ if (len) {
+ memcpy(udc->ep0_buf, tx_data, len);
+
+ ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
+ ast_udc_write(udc, EP0_TX_LEN(len), AST_UDC_EP0_CTRL);
+ ast_udc_write(udc, EP0_TX_LEN(len) | EP0_TX_BUFF_RDY,
+ AST_UDC_EP0_CTRL);
+ udc->is_control_tx = 1;
+
+ } else
+ ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
+}
+
+static void ast_udc_getstatus(struct ast_udc_dev *udc)
+{
+ struct usb_ctrlrequest crq;
+ struct ast_udc_ep *ep;
+ u16 status = 0;
+ u16 epnum = 0;
+
+ memcpy_fromio(&crq, udc->creq, sizeof(crq));
+
+ switch (crq.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ /* Get device status */
+ status = 1 << USB_DEVICE_SELF_POWERED;
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ status = udc->ep[epnum].stopped;
+ break;
+ default:
+ goto stall;
+ }
+
+ ep = &udc->ep[epnum];
+ EP_DBG(ep, "status: 0x%x\n", status);
+ ast_udc_ep0_data_tx(udc, (u8 *)&status, sizeof(status));
+
+ return;
+
+stall:
+ EP_DBG(ep, "Can't respond request\n");
+ ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL,
+ AST_UDC_EP0_CTRL);
+}
+
+static void ast_udc_ep0_handle_setup(struct ast_udc_dev *udc)
+{
+ struct ast_udc_ep *ep = &udc->ep[0];
+ struct ast_udc_request *req;
+ struct usb_ctrlrequest crq;
+ int req_num = 0;
+ int rc = 0;
+ u32 reg;
+
+ memcpy_fromio(&crq, udc->creq, sizeof(crq));
+
+ SETUP_DBG(udc, "SETUP packet: %02x/%02x/%04x/%04x/%04x\n",
+ crq.bRequestType, crq.bRequest, le16_to_cpu(crq.wValue),
+ le16_to_cpu(crq.wIndex), le16_to_cpu(crq.wLength));
+
+ /*
+ * Cleanup ep0 request(s) in queue because
+ * there is a new control setup comes.
+ */
+ list_for_each_entry(req, &udc->ep[0].queue, queue) {
+ req_num++;
+ EP_DBG(ep, "there is req %p in ep0 queue !\n", req);
+ }
+
+ if (req_num)
+ ast_udc_nuke(&udc->ep[0], -ETIMEDOUT);
+
+ udc->ep[0].dir_in = crq.bRequestType & USB_DIR_IN;
+
+ if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (crq.bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ if (ast_udc_read(udc, AST_UDC_STS) & UDC_STS_HIGHSPEED)
+ udc->gadget.speed = USB_SPEED_HIGH;
+ else
+ udc->gadget.speed = USB_SPEED_FULL;
+
+ SETUP_DBG(udc, "set addr: 0x%x\n", crq.wValue);
+ reg = ast_udc_read(udc, AST_UDC_CONFIG);
+ reg &= ~UDC_CFG_ADDR_MASK;
+ reg |= UDC_CFG_SET_ADDR(crq.wValue);
+ ast_udc_write(udc, reg, AST_UDC_CONFIG);
+ goto req_complete;
+
+ case USB_REQ_CLEAR_FEATURE:
+ SETUP_DBG(udc, "ep0: CLEAR FEATURE\n");
+ goto req_driver;
+
+ case USB_REQ_SET_FEATURE:
+ SETUP_DBG(udc, "ep0: SET FEATURE\n");
+ goto req_driver;
+
+ case USB_REQ_GET_STATUS:
+ ast_udc_getstatus(udc);
+ return;
+
+ default:
+ goto req_driver;
+ }
+
+ }
+
+req_driver:
+ if (udc->driver) {
+ SETUP_DBG(udc, "Forwarding %s to gadget...\n",
+ udc->gadget.name);
+
+ spin_unlock(&udc->lock);
+ rc = udc->driver->setup(&udc->gadget, &crq);
+ spin_lock(&udc->lock);
+
+ } else {
+ SETUP_DBG(udc, "No gadget for request !\n");
+ }
+
+ if (rc >= 0)
+ return;
+
+ /* Stall if gadget failed */
+ SETUP_DBG(udc, "Stalling, rc:0x%x\n", rc);
+ ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL,
+ AST_UDC_EP0_CTRL);
+ return;
+
+req_complete:
+ SETUP_DBG(udc, "ep0: Sending IN status without data\n");
+ ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
+}
+
+static irqreturn_t ast_udc_isr(int irq, void *data)
+{
+ struct ast_udc_dev *udc = (struct ast_udc_dev *)data;
+ struct ast_udc_ep *ep;
+ u32 isr, ep_isr;
+ int i;
+
+ spin_lock(&udc->lock);
+
+ isr = ast_udc_read(udc, AST_UDC_ISR);
+ if (!isr)
+ goto done;
+
+ /* Ack interrupts */
+ ast_udc_write(udc, isr, AST_UDC_ISR);
+
+ if (isr & UDC_IRQ_BUS_RESET) {
+ ISR_DBG(udc, "UDC_IRQ_BUS_RESET\n");
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+
+ ep = &udc->ep[1];
+ EP_DBG(ep, "dctrl:0x%x\n",
+ ast_ep_read(ep, AST_UDC_EP_DMA_CTRL));
+
+ if (udc->driver && udc->driver->reset) {
+ spin_unlock(&udc->lock);
+ udc->driver->reset(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ if (isr & UDC_IRQ_BUS_SUSPEND) {
+ ISR_DBG(udc, "UDC_IRQ_BUS_SUSPEND\n");
+ udc->suspended_from = udc->gadget.state;
+ usb_gadget_set_state(&udc->gadget, USB_STATE_SUSPENDED);
+
+ if (udc->driver && udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ if (isr & UDC_IRQ_BUS_RESUME) {
+ ISR_DBG(udc, "UDC_IRQ_BUS_RESUME\n");
+ usb_gadget_set_state(&udc->gadget, udc->suspended_from);
+
+ if (udc->driver && udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ if (isr & UDC_IRQ_EP0_IN_ACK_STALL) {
+ ISR_DBG(udc, "UDC_IRQ_EP0_IN_ACK_STALL\n");
+ ast_udc_ep0_in(udc);
+ }
+
+ if (isr & UDC_IRQ_EP0_OUT_ACK_STALL) {
+ ISR_DBG(udc, "UDC_IRQ_EP0_OUT_ACK_STALL\n");
+ ast_udc_ep0_out(udc);
+ }
+
+ if (isr & UDC_IRQ_EP0_SETUP) {
+ ISR_DBG(udc, "UDC_IRQ_EP0_SETUP\n");
+ ast_udc_ep0_handle_setup(udc);
+ }
+
+ if (isr & UDC_IRQ_EP_POOL_ACK_STALL) {
+ ISR_DBG(udc, "UDC_IRQ_EP_POOL_ACK_STALL\n");
+ ep_isr = ast_udc_read(udc, AST_UDC_EP_ACK_ISR);
+
+ /* Ack EP interrupts */
+ ast_udc_write(udc, ep_isr, AST_UDC_EP_ACK_ISR);
+
+ /* Handle each EP */
+ for (i = 0; i < AST_UDC_NUM_ENDPOINTS - 1; i++) {
+ if (ep_isr & (0x1 << i)) {
+ ep = &udc->ep[i + 1];
+ if (ep->desc_mode)
+ ast_udc_epn_handle_desc(udc, i + 1);
+ else
+ ast_udc_epn_handle(udc, i + 1);
+ }
+ }
+ }
+
+done:
+ spin_unlock(&udc->lock);
+ return IRQ_HANDLED;
+}
+
+static int ast_udc_gadget_getframe(struct usb_gadget *gadget)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+
+ return (ast_udc_read(udc, AST_UDC_STS) >> 16) & 0x7ff;
+}
+
+static void ast_udc_wake_work(struct work_struct *work)
+{
+ struct ast_udc_dev *udc = container_of(work, struct ast_udc_dev,
+ wake_work);
+ unsigned long flags;
+ u32 ctrl;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ UDC_DBG(udc, "Wakeup Host !\n");
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL);
+ ast_udc_write(udc, ctrl | USB_REMOTE_WAKEUP_EN, AST_UDC_FUNC_CTRL);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static void ast_udc_wakeup_all(struct ast_udc_dev *udc)
+{
+ /*
+ * A device is trying to wake the world, because this
+ * can recurse into the device, we break the call chain
+ * using a work queue
+ */
+ schedule_work(&udc->wake_work);
+}
+
+static int ast_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!udc->wakeup_en) {
+ UDC_DBG(udc, "Remote Wakeup is disabled\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ UDC_DBG(udc, "Device initiated wakeup\n");
+ ast_udc_wakeup_all(udc);
+
+err:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return rc;
+}
+
+/*
+ * Activate/Deactivate link with host
+ */
+static int ast_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+ unsigned long flags;
+ u32 ctrl;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ UDC_DBG(udc, "is_on: %d\n", is_on);
+ if (is_on)
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) | USB_UPSTREAM_EN;
+ else
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
+
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int ast_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+ struct ast_udc_ep *ep;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ UDC_DBG(udc, "\n");
+ udc->driver = driver;
+ udc->gadget.dev.of_node = udc->pdev->dev.of_node;
+
+ for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ ep->stopped = 0;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int ast_udc_stop(struct usb_gadget *gadget)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+ unsigned long flags;
+ u32 ctrl;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ UDC_DBG(udc, "\n");
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->driver = NULL;
+
+ ast_udc_stop_activity(udc);
+ usb_gadget_set_state(&udc->gadget, USB_STATE_NOTATTACHED);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops ast_udc_ops = {
+ .get_frame = ast_udc_gadget_getframe,
+ .wakeup = ast_udc_wakeup,
+ .pullup = ast_udc_pullup,
+ .udc_start = ast_udc_start,
+ .udc_stop = ast_udc_stop,
+};
+
+/*
+ * Support 1 Control Endpoint.
+ * Support multiple programmable endpoints that can be configured to
+ * Bulk IN/OUT, Interrupt IN/OUT, and Isochronous IN/OUT type endpoint.
+ */
+static void ast_udc_init_ep(struct ast_udc_dev *udc)
+{
+ struct ast_udc_ep *ep;
+ int i;
+
+ for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ ep->ep.name = ast_ep_name[i];
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+
+ ep->ep.ops = &ast_udc_ep_ops;
+ ep->udc = udc;
+
+ INIT_LIST_HEAD(&ep->queue);
+
+ if (i == 0) {
+ usb_ep_set_maxpacket_limit(&ep->ep,
+ AST_UDC_EP0_MAX_PACKET);
+ continue;
+ }
+
+ ep->ep_reg = udc->reg + AST_UDC_EP_BASE +
+ (AST_UDC_EP_OFFSET * (i - 1));
+
+ ep->epn_buf = udc->ep0_buf + (i * AST_UDC_EP_DMA_SIZE);
+ ep->epn_buf_dma = udc->ep0_buf_dma + (i * AST_UDC_EP_DMA_SIZE);
+ usb_ep_set_maxpacket_limit(&ep->ep, AST_UDC_EPn_MAX_PACKET);
+
+ ep->descs = ep->epn_buf + AST_UDC_EPn_MAX_PACKET;
+ ep->descs_dma = ep->epn_buf_dma + AST_UDC_EPn_MAX_PACKET;
+ ep->descs_wptr = 0;
+
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ }
+}
+
+static void ast_udc_init_dev(struct ast_udc_dev *udc)
+{
+ INIT_WORK(&udc->wake_work, ast_udc_wake_work);
+}
+
+static void ast_udc_init_hw(struct ast_udc_dev *udc)
+{
+ u32 ctrl;
+
+ /* Enable PHY */
+ ctrl = USB_PHY_CLK_EN | USB_PHY_RESET_DIS;
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+
+ udelay(1);
+ ast_udc_write(udc, 0, AST_UDC_DEV_RESET);
+
+ /* Set descriptor ring size */
+ if (AST_UDC_DESCS_COUNT == 256) {
+ ctrl |= USB_EP_LONG_DESC;
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+ }
+
+ /* Mask & ack all interrupts before installing the handler */
+ ast_udc_write(udc, 0, AST_UDC_IER);
+ ast_udc_write(udc, UDC_IRQ_ACK_ALL, AST_UDC_ISR);
+
+ /* Enable some interrupts */
+ ctrl = UDC_IRQ_EP_POOL_ACK_STALL | UDC_IRQ_BUS_RESUME |
+ UDC_IRQ_BUS_SUSPEND | UDC_IRQ_BUS_RESET |
+ UDC_IRQ_EP0_IN_ACK_STALL | UDC_IRQ_EP0_OUT_ACK_STALL |
+ UDC_IRQ_EP0_SETUP;
+ ast_udc_write(udc, ctrl, AST_UDC_IER);
+
+ /* Cleanup and enable ep ACK interrupts */
+ ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_IER);
+ ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_ISR);
+
+ ast_udc_write(udc, 0, AST_UDC_EP0_CTRL);
+}
+
+static int ast_udc_remove(struct platform_device *pdev)
+{
+ struct ast_udc_dev *udc = platform_get_drvdata(pdev);
+ unsigned long flags;
+ u32 ctrl;
+
+ usb_del_gadget_udc(&udc->gadget);
+ if (udc->driver)
+ return -EBUSY;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Disable upstream port connection */
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+
+ clk_disable_unprepare(udc->clk);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->ep0_buf)
+ dma_free_coherent(&pdev->dev,
+ AST_UDC_EP_DMA_SIZE * AST_UDC_NUM_ENDPOINTS,
+ udc->ep0_buf,
+ udc->ep0_buf_dma);
+
+ udc->ep0_buf = NULL;
+
+ return 0;
+}
+
+static int ast_udc_probe(struct platform_device *pdev)
+{
+ enum usb_device_speed max_speed;
+ struct device *dev = &pdev->dev;
+ struct ast_udc_dev *udc;
+ struct resource *res;
+ int rc;
+
+ udc = devm_kzalloc(&pdev->dev, sizeof(struct ast_udc_dev), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ udc->gadget.dev.parent = dev;
+ udc->pdev = pdev;
+ spin_lock_init(&udc->lock);
+
+ udc->gadget.ops = &ast_udc_ops;
+ udc->gadget.ep0 = &udc->ep[0].ep;
+ udc->gadget.name = "aspeed-udc";
+ udc->gadget.dev.init_name = "gadget";
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ udc->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(udc->reg)) {
+ dev_err(&pdev->dev, "Failed to map resources\n");
+ return PTR_ERR(udc->reg);
+ }
+
+ platform_set_drvdata(pdev, udc);
+
+ udc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(udc->clk)) {
+ rc = PTR_ERR(udc->clk);
+ goto err;
+ }
+ rc = clk_prepare_enable(udc->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to enable clock (0x%x)\n", rc);
+ goto err;
+ }
+
+ /* Check if we need to limit the HW to USB1 */
+ max_speed = usb_get_maximum_speed(&pdev->dev);
+ if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH)
+ udc->force_usb1 = true;
+
+ /*
+ * Allocate DMA buffers for all EPs in one chunk
+ */
+ udc->ep0_buf = dma_alloc_coherent(&pdev->dev,
+ AST_UDC_EP_DMA_SIZE *
+ AST_UDC_NUM_ENDPOINTS,
+ &udc->ep0_buf_dma, GFP_KERNEL);
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->creq = udc->reg + AST_UDC_SETUP0;
+
+ /*
+ * Support single stage mode or 32/256 stages descriptor mode.
+ * Set default as Descriptor Mode.
+ */
+ udc->desc_mode = AST_UDC_DESC_MODE;
+
+ dev_info(&pdev->dev, "DMA %s\n", udc->desc_mode ?
+ "descriptor mode" : "single mode");
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
+
+ /* Initialized udc ep */
+ ast_udc_init_ep(udc);
+
+ /* Initialized udc device */
+ ast_udc_init_dev(udc);
+
+ /* Initialized udc hardware */
+ ast_udc_init_hw(udc);
+
+ /* Find interrupt and install handler */
+ udc->irq = platform_get_irq(pdev, 0);
+ if (udc->irq < 0) {
+ rc = udc->irq;
+ goto err;
+ }
+
+ rc = devm_request_irq(&pdev->dev, udc->irq, ast_udc_isr, 0,
+ KBUILD_MODNAME, udc);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to request interrupt\n");
+ goto err;
+ }
+
+ rc = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to add gadget udc\n");
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "Initialized udc in USB%s mode\n",
+ udc->force_usb1 ? "1" : "2");
+
+ return 0;
+
+err:
+ dev_err(&pdev->dev, "Failed to udc probe, rc:0x%x\n", rc);
+ ast_udc_remove(pdev);
+
+ return rc;
+}
+
+static const struct of_device_id ast_udc_of_dt_ids[] = {
+ { .compatible = "aspeed,ast2600-udc", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, ast_udc_of_dt_ids);
+
+static struct platform_driver ast_udc_driver = {
+ .probe = ast_udc_probe,
+ .remove = ast_udc_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ast_udc_of_dt_ids,
+ },
+};
+
+module_platform_driver(ast_udc_driver);
+
+MODULE_DESCRIPTION("ASPEED UDC driver");
+MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index ae2bfbac603e..53ca38c4b3ec 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2060,7 +2060,7 @@ static const struct usba_udc_errata at91sam9g45_errata = {
.pulse_bias = at91sam9g45_pulse_bias,
};
-static const struct usba_ep_config ep_config_sam9[] __initconst = {
+static const struct usba_ep_config ep_config_sam9[] = {
{ .nr_banks = 1 }, /* ep 0 */
{ .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
{ .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
@@ -2070,7 +2070,7 @@ static const struct usba_ep_config ep_config_sam9[] __initconst = {
{ .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */
};
-static const struct usba_ep_config ep_config_sama5[] __initconst = {
+static const struct usba_ep_config ep_config_sama5[] = {
{ .nr_banks = 1 }, /* ep 0 */
{ .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
{ .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
@@ -2165,6 +2165,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
udc->vbus_pin = devm_gpiod_get_optional(&pdev->dev, "atmel,vbus",
GPIOD_IN);
+ if (IS_ERR(udc->vbus_pin))
+ return ERR_CAST(udc->vbus_pin);
if (fifo_mode == 0) {
udc->num_ep = udc_config->num_ep;
@@ -2447,6 +2449,7 @@ static int usba_udc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume);
static struct platform_driver udc_driver = {
+ .probe = usba_udc_probe,
.remove = usba_udc_remove,
.driver = {
.name = "atmel_usba_udc",
@@ -2454,8 +2457,7 @@ static struct platform_driver udc_driver = {
.of_match_table = atmel_udc_dt_ids,
},
};
-
-module_platform_driver_probe(udc_driver, usba_udc_probe);
+module_platform_driver(udc_driver);
MODULE_DESCRIPTION("Atmel USBA UDC driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 67887316a1a6..1848ced073f8 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -307,7 +307,7 @@ int bdc_ep_clear_stall(struct bdc *bdc, int epnum)
* his will reset the seq number for non EP0.
*/
if (epnum != 1) {
- /* if the endpoint it not stallled */
+ /* if the endpoint it not stalled */
if (!(ep->flags & BDC_EP_STALL)) {
ret = bdc_ep_set_stall(bdc, epnum);
if (ret)
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 7886497253cc..cafcf260394c 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1728,13 +1728,14 @@ static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
- if (udc->driver) {
+ mutex_lock(&udc_lock);
+ if (udc->driver)
ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
udc->driver->function);
- if (ret) {
- dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
- return ret;
- }
+ mutex_unlock(&udc_lock);
+ if (ret) {
+ dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
+ return ret;
}
return 0;
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 6d31ccf6aee5..3c37effdfa64 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -3691,15 +3691,15 @@ static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
int err;
xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
- if (IS_ERR(xudc->genpd_dev_device)) {
- err = PTR_ERR(xudc->genpd_dev_device);
+ if (IS_ERR_OR_NULL(xudc->genpd_dev_device)) {
+ err = PTR_ERR(xudc->genpd_dev_device) ? : -ENODATA;
dev_err(dev, "failed to get device power domain: %d\n", err);
return err;
}
xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
- if (IS_ERR(xudc->genpd_dev_ss)) {
- err = PTR_ERR(xudc->genpd_dev_ss);
+ if (IS_ERR_OR_NULL(xudc->genpd_dev_ss)) {
+ err = PTR_ERR(xudc->genpd_dev_ss) ? : -ENODATA;
dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
return err;
}
diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
index 98584f6b6c66..abdbcb1bacb0 100644
--- a/drivers/usb/gadget/udc/trace.h
+++ b/drivers/usb/gadget/udc/trace.h
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(udc_log_ep,
TP_PROTO(struct usb_ep *ep, int ret),
TP_ARGS(ep, ret),
TP_STRUCT__entry(
- __dynamic_array(char, name, UDC_TRACE_STR_MAX)
+ __string(name, ep->name)
__field(unsigned, maxpacket)
__field(unsigned, maxpacket_limit)
__field(unsigned, max_streams)
@@ -152,7 +152,7 @@ DECLARE_EVENT_CLASS(udc_log_ep,
__field(int, ret)
),
TP_fast_assign(
- snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name);
+ __assign_str(name, ep->name);
__entry->maxpacket = ep->maxpacket;
__entry->maxpacket_limit = ep->maxpacket_limit;
__entry->max_streams = ep->max_streams;
@@ -214,7 +214,7 @@ DECLARE_EVENT_CLASS(udc_log_req,
TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
TP_ARGS(ep, req, ret),
TP_STRUCT__entry(
- __dynamic_array(char, name, UDC_TRACE_STR_MAX)
+ __string(name, ep->name)
__field(unsigned, length)
__field(unsigned, actual)
__field(unsigned, num_sgs)
@@ -228,7 +228,7 @@ DECLARE_EVENT_CLASS(udc_log_req,
__field(struct usb_request *, req)
),
TP_fast_assign(
- snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name);
+ __assign_str(name, ep->name);
__entry->length = req->length;
__entry->actual = req->actual;
__entry->num_sgs = req->num_sgs;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 682b3d2da623..fd9264cf6c87 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -306,6 +306,16 @@ config USB_EHCI_MV
Dova, Armada 370 and Armada XP. See "Support for Marvell EBU
on-chip EHCI USB controller" for those.
+config USB_OCTEON_HCD
+ tristate "Cavium Networks Octeon USB support"
+ depends on CAVIUM_OCTEON_SOC && USB
+ help
+ This driver supports USB host controller on some Cavium
+ Networks' products in the Octeon family.
+
+ To compile this driver as a module, choose M here. The module
+ will be called octeon-hcd.
+
config USB_CNS3XXX_EHCI
bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
depends on ARCH_CNS3XXX || COMPILE_TEST
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 2948983618fb..2c8a61be7e46 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_USB_OHCI_HCD_S3C2410) += ohci-s3c2410.o
obj-$(CONFIG_USB_OHCI_HCD_LPC32XX) += ohci-nxp.o
obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o
obj-$(CONFIG_USB_OHCI_HCD_DAVINCI) += ohci-da8xx.o
+obj-$(CONFIG_USB_OCTEON_HCD) += octeon-hcd.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 385be30baad3..896c0d107f72 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -76,14 +76,9 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
return -ENODEV;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no IRQ. Check %s setup!\n",
- dev_name(&pdev->dev));
- return -ENODEV;
- }
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
hcd = __usb_create_hcd(&fsl_ehci_hc_driver, pdev->dev.parent,
&pdev->dev, dev_name(&pdev->dev), NULL);
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index f343967443e2..6924f0316e9a 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -370,6 +370,8 @@ static int ehci_platform_probe(struct platform_device *dev)
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
+ hcd->tpl_support = of_usb_host_tpl_support(dev->dev.of_node);
+
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_power;
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 6bbaee74f7e7..28a19693c19f 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -148,6 +148,7 @@ static int ehci_hcd_ppc_of_probe(struct platform_device *op)
} else {
ehci->has_amcc_usb23 = 1;
}
+ of_node_put(np);
}
if (of_get_property(dn, "big-endian", NULL)) {
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 1163af6fad77..807e64991e3e 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1162,7 +1162,7 @@ submit_async (
* This is done in two parts: first SETUP req for GetDesc is sent then
* 15 seconds later, the IN stage for GetDesc starts to req data from dev
*
- * is_setup : i/p arguement decides which of the two stage needs to be
+ * is_setup : i/p argument decides which of the two stage needs to be
* performed; TRUE - SETUP and FALSE - IN+STATUS
* Returns 0 if success
*/
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 44a7e58a26e3..e5df17522892 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -112,6 +112,9 @@ static struct platform_device *fsl_usb2_device_register(
goto error;
}
+ pdev->dev.of_node = ofdev->dev.of_node;
+ pdev->dev.of_node_reused = true;
+
retval = platform_device_add(pdev);
if (retval)
goto error;
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 502a3ac5e35b..352e3ac2b377 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -312,7 +312,7 @@ static const int hrsl_to_error[] = {
/*
* See https://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a
- * reasonable overview of how control transfers use the the IN/OUT
+ * reasonable overview of how control transfers use the IN/OUT
* tokens.
*/
#define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/usb/host/octeon-hcd.c
index a1cd81d4a114..a1cd81d4a114 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/usb/host/octeon-hcd.c
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/usb/host/octeon-hcd.h
index 9ed619c93a4e..9ed619c93a4e 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ b/drivers/usb/host/octeon-hcd.h
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index a24aea3d2759..98326465e2dc 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -13,6 +13,7 @@
* This file is licenced under the GPL.
*/
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/gpio/consumer.h>
@@ -55,6 +56,7 @@ struct ohci_at91_priv {
bool clocked;
bool wakeup; /* Saved wake-up state for resume */
struct regmap *sfr_regmap;
+ u32 suspend_smc_id;
};
/* interface and function clocks; sometimes also an AHB clock */
@@ -135,6 +137,19 @@ static void at91_stop_hc(struct platform_device *pdev)
static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
+static u32 at91_dt_suspend_smc(struct device *dev)
+{
+ u32 suspend_smc_id;
+
+ if (!dev->of_node)
+ return 0;
+
+ if (of_property_read_u32(dev->of_node, "microchip,suspend-smc-id", &suspend_smc_id))
+ return 0;
+
+ return suspend_smc_id;
+}
+
static struct regmap *at91_dt_syscon_sfr(void)
{
struct regmap *regmap;
@@ -215,9 +230,13 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
goto err;
}
- ohci_at91->sfr_regmap = at91_dt_syscon_sfr();
- if (!ohci_at91->sfr_regmap)
- dev_dbg(dev, "failed to find sfr node\n");
+ ohci_at91->suspend_smc_id = at91_dt_suspend_smc(dev);
+ if (!ohci_at91->suspend_smc_id) {
+ dev_dbg(dev, "failed to find sfr suspend smc id, using regmap\n");
+ ohci_at91->sfr_regmap = at91_dt_syscon_sfr();
+ if (!ohci_at91->sfr_regmap)
+ dev_dbg(dev, "failed to find sfr node\n");
+ }
board = hcd->self.controller->platform_data;
ohci = hcd_to_ohci(hcd);
@@ -303,24 +322,30 @@ static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
return length;
}
-static int ohci_at91_port_suspend(struct regmap *regmap, u8 set)
+static int ohci_at91_port_suspend(struct ohci_at91_priv *ohci_at91, u8 set)
{
+ struct regmap *regmap = ohci_at91->sfr_regmap;
u32 regval;
int ret;
- if (!regmap)
- return 0;
+ if (ohci_at91->suspend_smc_id) {
+ struct arm_smccc_res res;
- ret = regmap_read(regmap, AT91_SFR_OHCIICR, &regval);
- if (ret)
- return ret;
+ arm_smccc_smc(ohci_at91->suspend_smc_id, set, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ return -EINVAL;
+ } else if (regmap) {
+ ret = regmap_read(regmap, AT91_SFR_OHCIICR, &regval);
+ if (ret)
+ return ret;
- if (set)
- regval |= AT91_OHCIICR_USB_SUSPEND;
- else
- regval &= ~AT91_OHCIICR_USB_SUSPEND;
+ if (set)
+ regval |= AT91_OHCIICR_USB_SUSPEND;
+ else
+ regval &= ~AT91_OHCIICR_USB_SUSPEND;
- regmap_write(regmap, AT91_SFR_OHCIICR, regval);
+ regmap_write(regmap, AT91_SFR_OHCIICR, regval);
+ }
return 0;
}
@@ -357,9 +382,8 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
- if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
- ohci_at91_port_suspend(ohci_at91->sfr_regmap,
- 1);
+ if (valid_port(wIndex)) {
+ ohci_at91_port_suspend(ohci_at91, 1);
return 0;
}
break;
@@ -400,9 +424,8 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
- if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
- ohci_at91_port_suspend(ohci_at91->sfr_regmap,
- 0);
+ if (valid_port(wIndex)) {
+ ohci_at91_port_suspend(ohci_at91, 0);
return 0;
}
break;
@@ -630,10 +653,10 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
msleep(1);
- ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
+ ohci_at91_port_suspend(ohci_at91, 1);
at91_stop_clock(ohci_at91);
} else {
- ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
+ ohci_at91_port_suspend(ohci_at91, 1);
}
return ret;
@@ -645,7 +668,7 @@ ohci_hcd_at91_drv_resume(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
- ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
+ ohci_at91_port_suspend(ohci_at91, 0);
if (ohci_at91->wakeup)
disable_irq_wake(hcd->irq);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 85878e8ad331..106a6bcefb08 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -164,6 +164,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
}
isp1301_i2c_client = isp1301_get_client(isp1301_node);
+ of_node_put(isp1301_node);
if (!isp1301_i2c_client)
return -EPROBE_DEFER;
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 47dfbfe9e519..0adae6265127 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -28,6 +28,7 @@
#include <linux/usb/ohci_pdriver.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/of.h>
#include "ohci.h"
@@ -210,6 +211,8 @@ static int ohci_platform_probe(struct platform_device *dev)
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
+ hcd->tpl_support = of_usb_host_tpl_support(dev->dev.of_node);
+
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_power;
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 1960b8dfdba5..591f675cc930 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -166,6 +166,7 @@ static int ohci_hcd_ppc_of_probe(struct platform_device *op)
release_mem_region(res.start, 0x4);
} else
pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
+ of_node_put(np);
}
irq_dispose_mapping(irq);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index feca826d3f6a..75c2b28b3379 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -203,6 +203,31 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
goto err1;
}
+ /*
+ * According to the "Intel StrongARM SA-1111 Microprocessor Companion
+ * Chip Specification Update" (June 2000), erratum #7, there is a
+ * significant bug in the SA1111 SDRAM shared memory controller. If
+ * an access to a region of memory above 1MB relative to the bank base,
+ * it is important that address bit 10 _NOT_ be asserted. Depending
+ * on the configuration of the RAM, bit 10 may correspond to one
+ * of several different (processor-relative) address bits.
+ *
+ * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
+ * User's Guide" mentions that jumpers R51 and R52 control the
+ * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
+ * SDRAM bank 1 on Neponset). The default configuration selects
+ * Assabet, so any address in bank 1 is necessarily invalid.
+ *
+ * As a workaround, use a bounce buffer in addressable memory
+ * as local_mem, relying on ZONE_DMA to provide an area that
+ * fits within the above constraints.
+ *
+ * SZ_64K is an estimate for what size this might need.
+ */
+ ret = usb_hcd_setup_local_mem(hcd, 0, 0, SZ_64K);
+ if (ret)
+ goto err1;
+
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&dev->dev, "request_mem_region failed\n");
ret = -EBUSY;
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index b91d50da6127..f5de586454e3 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -153,7 +153,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
* fine. This is however not always the case - buffers may be allocated
* using kmalloc() - so the usb core needs to be told that it must copy
* data into our local memory if the buffers happen to be placed in
- * regular memory. A non-null hcd->localmem_pool initialized by the
+ * regular memory. A non-null hcd->localmem_pool initialized by
* the call to usb_hcd_setup_local_mem() below does just that.
*/
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index 0a201a73b196..3ef6d52839e5 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -43,7 +43,7 @@ static int uhci_grlib_init(struct usb_hcd *hcd)
uhci->rh_numports = uhci_count_ports(hcd);
- /* Set up pointers to to generic functions */
+ /* Set up pointers to generic functions */
uhci->reset_hc = uhci_generic_reset_hc;
uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc;
/* No special actions need to be taken for the functions below */
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 8ae5ccd26753..0688c3e5bfe2 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -314,7 +314,7 @@ struct uhci_td {
*
* There's a special skeleton QH for Isochronous QHs which never appears
* on the schedule. Isochronous TDs go on the schedule before the
- * the skeleton QHs. The hardware accesses them directly rather than
+ * skeleton QHs. The hardware accesses them directly rather than
* through their QH, which is used only for bookkeeping purposes.
* While the UHCI spec doesn't forbid the use of QHs for Isochronous,
* it doesn't use them either. And the spec says that queues never
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index b1045f534a4b..01705e559c42 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -18,6 +18,7 @@
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include "xhci.h"
#include "xhci-mtk.h"
@@ -550,6 +551,12 @@ static int xhci_mtk_probe(struct platform_device *pdev)
if (ret)
goto disable_ldos;
+ ret = device_reset_optional(dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to reset controller\n");
+ goto disable_clk;
+ }
+
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
ret = -ENOMEM;
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
index 52599d96634f..93f8b355bc70 100644
--- a/drivers/usb/host/xhci-pci-renesas.c
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -120,7 +120,6 @@ static int renesas_fw_verify(const void *fw_data,
size_t length)
{
u16 fw_version_pointer;
- u16 fw_version;
/*
* The Firmware's Data Format is describe in
@@ -150,9 +149,6 @@ static int renesas_fw_verify(const void *fw_data,
return -EINVAL;
}
- fw_version = get_unaligned_le16(fw_data + fw_version_pointer);
- pr_err("got firmware version: %02x.", fw_version);
-
return 0;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 46d0b9ad6f74..ad81e9a508b1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1964,7 +1964,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
/*
* Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
- * RExit to a disconnect state). If so, let the the driver know it's
+ * RExit to a disconnect state). If so, let the driver know it's
* out of the RExit state.
*/
if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 996958a6565c..bdb776553826 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1010,15 +1010,15 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
int err;
tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host");
- if (IS_ERR(tegra->genpd_dev_host)) {
- err = PTR_ERR(tegra->genpd_dev_host);
+ if (IS_ERR_OR_NULL(tegra->genpd_dev_host)) {
+ err = PTR_ERR(tegra->genpd_dev_host) ? : -ENODATA;
dev_err(dev, "failed to get host pm-domain: %d\n", err);
return err;
}
tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss");
- if (IS_ERR(tegra->genpd_dev_ss)) {
- err = PTR_ERR(tegra->genpd_dev_ss);
+ if (IS_ERR_OR_NULL(tegra->genpd_dev_ss)) {
+ err = PTR_ERR(tegra->genpd_dev_ss) ? : -ENODATA;
dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
return err;
}
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index a5da02077297..61e93a3540a7 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -28,9 +28,9 @@
DECLARE_EVENT_CLASS(xhci_log_msg,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
- TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
+ TP_STRUCT__entry(__vstring(msg, vaf->fmt, vaf->va)),
TP_fast_assign(
- vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 28aaf031f9a8..1960b47acfb2 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2417,7 +2417,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size,
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_STOP_RING:
- sprintf(str,
+ snprintf(str, size,
"%s: slot %d sp %d ep %d flags %c",
xhci_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index fc0e22cc6fda..67f098579fb4 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -38,7 +38,7 @@
*
* version 0.7.3
* bugfix : The mdc800->state field gets set to READY after the
- * the disconnect function sets it to NOT_CONNECTED. This makes the
+ * disconnect function sets it to NOT_CONNECTED. This makes the
* driver running like the camera is connected and causes some
* hang ups.
*
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 4c5ddbd75b7e..9367c12c7e6f 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -295,3 +295,19 @@ config BRCM_USB_PINMAP
This option enables support for remapping some USB external
signals, which are typically on dedicated pins on the chip,
to any gpio.
+
+config USB_ONBOARD_HUB
+ tristate "Onboard USB hub support"
+ depends on OF || COMPILE_TEST
+ help
+ Say Y here if you want to support discrete onboard USB hubs that
+ don't require an additional control bus for initialization, but
+ need some non-trivial form of initialization, such as enabling a
+ power regulator. An example for such a hub is the Realtek
+ RTS5411.
+
+ This driver can be used as a module but its state (module vs
+ builtin) must match the state of the USB subsystem. Enabling
+ this config will enable the driver and it will automatically
+ match the state of the USB subsystem. If this driver is a
+ module it will be called onboard_usb_hub.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 35bdb4b6c3b6..93581baec3a8 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_USB_CHAOSKEY) += chaoskey.o
obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
obj-$(CONFIG_USB_LINK_LAYER_TEST) += lvstest.o
obj-$(CONFIG_BRCM_USB_PINMAP) += brcmstb-usb-pinmap.o
+obj-$(CONFIG_USB_ONBOARD_HUB) += onboard_usb_hub.o
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index dcc88df72df4..7cbef74dfc9a 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -716,9 +716,11 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb)
goto error;
- dev->interrupt_in_interval = min_interrupt_in_interval > dev->interrupt_in_endpoint->bInterval ? min_interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
+ dev->interrupt_in_interval = max_t(int, min_interrupt_in_interval,
+ dev->interrupt_in_endpoint->bInterval);
if (dev->interrupt_out_endpoint)
- dev->interrupt_out_interval = min_interrupt_out_interval > dev->interrupt_out_endpoint->bInterval ? min_interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
+ dev->interrupt_out_interval = max_t(int, min_interrupt_out_interval,
+ dev->interrupt_out_endpoint->bInterval);
/* we can register the device now, as it is ready */
usb_set_intfdata(intf, dev);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
new file mode 100644
index 000000000000..d1df153e7f5a
--- /dev/null
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for onboard USB hubs
+ *
+ * Copyright (c) 2022, Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/sysfs.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/onboard_hub.h>
+#include <linux/workqueue.h>
+
+#include "onboard_usb_hub.h"
+
+static struct usb_device_driver onboard_hub_usbdev_driver;
+
+/************************** Platform driver **************************/
+
+struct usbdev_node {
+ struct usb_device *udev;
+ struct list_head list;
+};
+
+struct onboard_hub {
+ struct regulator *vdd;
+ struct device *dev;
+ const struct onboard_hub_pdata *pdata;
+ struct gpio_desc *reset_gpio;
+ bool always_powered_in_suspend;
+ bool is_powered_on;
+ bool going_away;
+ struct list_head udev_list;
+ struct work_struct attach_usb_driver_work;
+ struct mutex lock;
+};
+
+static int onboard_hub_power_on(struct onboard_hub *hub)
+{
+ int err;
+
+ err = regulator_enable(hub->vdd);
+ if (err) {
+ dev_err(hub->dev, "failed to enable regulator: %d\n", err);
+ return err;
+ }
+
+ fsleep(hub->pdata->reset_us);
+ gpiod_set_value_cansleep(hub->reset_gpio, 0);
+
+ hub->is_powered_on = true;
+
+ return 0;
+}
+
+static int onboard_hub_power_off(struct onboard_hub *hub)
+{
+ int err;
+
+ if (hub->reset_gpio) {
+ gpiod_set_value_cansleep(hub->reset_gpio, 1);
+ fsleep(hub->pdata->reset_us);
+ }
+
+ err = regulator_disable(hub->vdd);
+ if (err) {
+ dev_err(hub->dev, "failed to disable regulator: %d\n", err);
+ return err;
+ }
+
+ hub->is_powered_on = false;
+
+ return 0;
+}
+
+static int __maybe_unused onboard_hub_suspend(struct device *dev)
+{
+ struct onboard_hub *hub = dev_get_drvdata(dev);
+ struct usbdev_node *node;
+ bool power_off = true;
+
+ if (hub->always_powered_in_suspend)
+ return 0;
+
+ mutex_lock(&hub->lock);
+
+ list_for_each_entry(node, &hub->udev_list, list) {
+ if (!device_may_wakeup(node->udev->bus->controller))
+ continue;
+
+ if (usb_wakeup_enabled_descendants(node->udev)) {
+ power_off = false;
+ break;
+ }
+ }
+
+ mutex_unlock(&hub->lock);
+
+ if (!power_off)
+ return 0;
+
+ return onboard_hub_power_off(hub);
+}
+
+static int __maybe_unused onboard_hub_resume(struct device *dev)
+{
+ struct onboard_hub *hub = dev_get_drvdata(dev);
+
+ if (hub->is_powered_on)
+ return 0;
+
+ return onboard_hub_power_on(hub);
+}
+
+static inline void get_udev_link_name(const struct usb_device *udev, char *buf, size_t size)
+{
+ snprintf(buf, size, "usb_dev.%s", dev_name(&udev->dev));
+}
+
+static int onboard_hub_add_usbdev(struct onboard_hub *hub, struct usb_device *udev)
+{
+ struct usbdev_node *node;
+ char link_name[64];
+ int err;
+
+ mutex_lock(&hub->lock);
+
+ if (hub->going_away) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ node->udev = udev;
+
+ list_add(&node->list, &hub->udev_list);
+
+ mutex_unlock(&hub->lock);
+
+ get_udev_link_name(udev, link_name, sizeof(link_name));
+ WARN_ON(sysfs_create_link(&hub->dev->kobj, &udev->dev.kobj, link_name));
+
+ return 0;
+
+error:
+ mutex_unlock(&hub->lock);
+
+ return err;
+}
+
+static void onboard_hub_remove_usbdev(struct onboard_hub *hub, const struct usb_device *udev)
+{
+ struct usbdev_node *node;
+ char link_name[64];
+
+ get_udev_link_name(udev, link_name, sizeof(link_name));
+ sysfs_remove_link(&hub->dev->kobj, link_name);
+
+ mutex_lock(&hub->lock);
+
+ list_for_each_entry(node, &hub->udev_list, list) {
+ if (node->udev == udev) {
+ list_del(&node->list);
+ kfree(node);
+ break;
+ }
+ }
+
+ mutex_unlock(&hub->lock);
+}
+
+static ssize_t always_powered_in_suspend_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const struct onboard_hub *hub = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hub->always_powered_in_suspend);
+}
+
+static ssize_t always_powered_in_suspend_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct onboard_hub *hub = dev_get_drvdata(dev);
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret < 0)
+ return ret;
+
+ hub->always_powered_in_suspend = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(always_powered_in_suspend);
+
+static struct attribute *onboard_hub_attrs[] = {
+ &dev_attr_always_powered_in_suspend.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(onboard_hub);
+
+static void onboard_hub_attach_usb_driver(struct work_struct *work)
+{
+ int err;
+
+ err = driver_attach(&onboard_hub_usbdev_driver.drvwrap.driver);
+ if (err)
+ pr_err("Failed to attach USB driver: %d\n", err);
+}
+
+static int onboard_hub_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct onboard_hub *hub;
+ int err;
+
+ hub = devm_kzalloc(dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ of_id = of_match_device(onboard_hub_match, &pdev->dev);
+ if (!of_id)
+ return -ENODEV;
+
+ hub->pdata = of_id->data;
+ if (!hub->pdata)
+ return -EINVAL;
+
+ hub->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(hub->vdd))
+ return PTR_ERR(hub->vdd);
+
+ hub->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(hub->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(hub->reset_gpio), "failed to get reset GPIO\n");
+
+ hub->dev = dev;
+ mutex_init(&hub->lock);
+ INIT_LIST_HEAD(&hub->udev_list);
+
+ dev_set_drvdata(dev, hub);
+
+ err = onboard_hub_power_on(hub);
+ if (err)
+ return err;
+
+ /*
+ * The USB driver might have been detached from the USB devices by
+ * onboard_hub_remove() (e.g. through an 'unbind' by userspace),
+ * make sure to re-attach it if needed.
+ *
+ * This needs to be done deferred to avoid self-deadlocks on systems
+ * with nested onboard hubs.
+ */
+ INIT_WORK(&hub->attach_usb_driver_work, onboard_hub_attach_usb_driver);
+ schedule_work(&hub->attach_usb_driver_work);
+
+ return 0;
+}
+
+static int onboard_hub_remove(struct platform_device *pdev)
+{
+ struct onboard_hub *hub = dev_get_drvdata(&pdev->dev);
+ struct usbdev_node *node;
+ struct usb_device *udev;
+
+ hub->going_away = true;
+
+ if (&hub->attach_usb_driver_work != current_work())
+ cancel_work_sync(&hub->attach_usb_driver_work);
+
+ mutex_lock(&hub->lock);
+
+ /* unbind the USB devices to avoid dangling references to this device */
+ while (!list_empty(&hub->udev_list)) {
+ node = list_first_entry(&hub->udev_list, struct usbdev_node, list);
+ udev = node->udev;
+
+ /*
+ * Unbinding the driver will call onboard_hub_remove_usbdev(),
+ * which acquires hub->lock. We must release the lock first.
+ */
+ get_device(&udev->dev);
+ mutex_unlock(&hub->lock);
+ device_release_driver(&udev->dev);
+ put_device(&udev->dev);
+ mutex_lock(&hub->lock);
+ }
+
+ mutex_unlock(&hub->lock);
+
+ return onboard_hub_power_off(hub);
+}
+
+MODULE_DEVICE_TABLE(of, onboard_hub_match);
+
+static const struct dev_pm_ops __maybe_unused onboard_hub_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(onboard_hub_suspend, onboard_hub_resume)
+};
+
+static struct platform_driver onboard_hub_driver = {
+ .probe = onboard_hub_probe,
+ .remove = onboard_hub_remove,
+
+ .driver = {
+ .name = "onboard-usb-hub",
+ .of_match_table = onboard_hub_match,
+ .pm = pm_ptr(&onboard_hub_pm_ops),
+ .dev_groups = onboard_hub_groups,
+ },
+};
+
+/************************** USB driver **************************/
+
+#define VENDOR_ID_MICROCHIP 0x0424
+#define VENDOR_ID_REALTEK 0x0bda
+#define VENDOR_ID_TI 0x0451
+
+/*
+ * Returns the onboard_hub platform device that is associated with the USB
+ * device passed as parameter.
+ */
+static struct onboard_hub *_find_onboard_hub(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct onboard_hub *hub;
+
+ pdev = of_find_device_by_node(dev->of_node);
+ if (!pdev) {
+ np = of_parse_phandle(dev->of_node, "peer-hub", 0);
+ if (!np) {
+ dev_err(dev, "failed to find device node for peer hub\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+ }
+
+ hub = dev_get_drvdata(&pdev->dev);
+ put_device(&pdev->dev);
+
+ /*
+ * The presence of drvdata ('hub') indicates that the platform driver
+ * finished probing. This handles the case where (conceivably) we could
+ * be running at the exact same time as the platform driver's probe. If
+ * we detect the race we request probe deferral and we'll come back and
+ * try again.
+ */
+ if (!hub)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return hub;
+}
+
+static int onboard_hub_usbdev_probe(struct usb_device *udev)
+{
+ struct device *dev = &udev->dev;
+ struct onboard_hub *hub;
+ int err;
+
+ /* ignore supported hubs without device tree node */
+ if (!dev->of_node)
+ return -ENODEV;
+
+ hub = _find_onboard_hub(dev);
+ if (IS_ERR(hub))
+ return PTR_ERR(hub);
+
+ dev_set_drvdata(dev, hub);
+
+ err = onboard_hub_add_usbdev(hub, udev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
+{
+ struct onboard_hub *hub = dev_get_drvdata(&udev->dev);
+
+ onboard_hub_remove_usbdev(hub, udev);
+}
+
+static const struct usb_device_id onboard_hub_id_table[] = {
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, onboard_hub_id_table);
+
+static struct usb_device_driver onboard_hub_usbdev_driver = {
+ .name = "onboard-usb-hub",
+ .probe = onboard_hub_usbdev_probe,
+ .disconnect = onboard_hub_usbdev_disconnect,
+ .generic_subclass = 1,
+ .supports_autosuspend = 1,
+ .id_table = onboard_hub_id_table,
+};
+
+static int __init onboard_hub_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&onboard_hub_driver);
+ if (ret)
+ return ret;
+
+ ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
+ if (ret)
+ platform_driver_unregister(&onboard_hub_driver);
+
+ return ret;
+}
+module_init(onboard_hub_init);
+
+static void __exit onboard_hub_exit(void)
+{
+ usb_deregister_device_driver(&onboard_hub_usbdev_driver);
+ platform_driver_unregister(&onboard_hub_driver);
+}
+module_exit(onboard_hub_exit);
+
+MODULE_AUTHOR("Matthias Kaehlcke <mka@chromium.org>");
+MODULE_DESCRIPTION("Driver for discrete onboard USB hubs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
new file mode 100644
index 000000000000..34beab8bce3d
--- /dev/null
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2022, Google LLC
+ */
+
+#ifndef _USB_MISC_ONBOARD_USB_HUB_H
+#define _USB_MISC_ONBOARD_USB_HUB_H
+
+struct onboard_hub_pdata {
+ unsigned long reset_us; /* reset pulse width in us */
+};
+
+static const struct onboard_hub_pdata microchip_usb424_data = {
+ .reset_us = 1,
+};
+
+static const struct onboard_hub_pdata realtek_rts5411_data = {
+ .reset_us = 0,
+};
+
+static const struct onboard_hub_pdata ti_tusb8041_data = {
+ .reset_us = 3000,
+};
+
+static const struct of_device_id onboard_hub_match[] = {
+ { .compatible = "usb424,2514", .data = &microchip_usb424_data, },
+ { .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
+ { .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
+ { .compatible = "usbbda,411", .data = &realtek_rts5411_data, },
+ { .compatible = "usbbda,5411", .data = &realtek_rts5411_data, },
+ { .compatible = "usbbda,414", .data = &realtek_rts5411_data, },
+ { .compatible = "usbbda,5414", .data = &realtek_rts5411_data, },
+ {}
+};
+
+#endif /* _USB_MISC_ONBOARD_USB_HUB_H */
diff --git a/drivers/usb/misc/onboard_usb_hub_pdevs.c b/drivers/usb/misc/onboard_usb_hub_pdevs.c
new file mode 100644
index 000000000000..ed22a18f4ab7
--- /dev/null
+++ b/drivers/usb/misc/onboard_usb_hub_pdevs.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * API for creating and destroying USB onboard hub platform devices
+ *
+ * Copyright (c) 2022, Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/of.h>
+#include <linux/usb/onboard_hub.h>
+
+#include "onboard_usb_hub.h"
+
+struct pdev_list_entry {
+ struct platform_device *pdev;
+ struct list_head node;
+};
+
+static bool of_is_onboard_usb_hub(const struct device_node *np)
+{
+ return !!of_match_node(onboard_hub_match, np);
+}
+
+/**
+ * onboard_hub_create_pdevs -- create platform devices for onboard USB hubs
+ * @parent_hub : parent hub to scan for connected onboard hubs
+ * @pdev_list : list of onboard hub platform devices owned by the parent hub
+ *
+ * Creates a platform device for each supported onboard hub that is connected to
+ * the given parent hub. The platform device is in charge of initializing the
+ * hub (enable regulators, take the hub out of reset, ...) and can optionally
+ * control whether the hub remains powered during system suspend or not.
+ *
+ * To keep track of the platform devices they are added to a list that is owned
+ * by the parent hub.
+ *
+ * Some background about the logic in this function, which can be a bit hard
+ * to follow:
+ *
+ * Root hubs don't have dedicated device tree nodes, but use the node of their
+ * HCD. The primary and secondary HCD are usually represented by a single DT
+ * node. That means the root hubs of the primary and secondary HCD share the
+ * same device tree node (the HCD node). As a result this function can be called
+ * twice with the same DT node for root hubs. We only want to create a single
+ * platform device for each physical onboard hub, hence for root hubs the loop
+ * is only executed for the root hub of the primary HCD. Since the function
+ * scans through all child nodes it still creates pdevs for onboard hubs
+ * connected to the root hub of the secondary HCD if needed.
+ *
+ * Further there must be only one platform device for onboard hubs with a peer
+ * hub (the hub is a single physical device). To achieve this two measures are
+ * taken: pdevs for onboard hubs with a peer are only created when the function
+ * is called on behalf of the parent hub that is connected to the primary HCD
+ * (directly or through other hubs). For onboard hubs connected to root hubs
+ * the function processes the nodes of both peers. A platform device is only
+ * created if the peer hub doesn't have one already.
+ */
+void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list)
+{
+ int i;
+ struct usb_hcd *hcd = bus_to_hcd(parent_hub->bus);
+ struct device_node *np, *npc;
+ struct platform_device *pdev;
+ struct pdev_list_entry *pdle;
+
+ if (!parent_hub->dev.of_node)
+ return;
+
+ if (!parent_hub->parent && !usb_hcd_is_primary_hcd(hcd))
+ return;
+
+ for (i = 1; i <= parent_hub->maxchild; i++) {
+ np = usb_of_get_device_node(parent_hub, i);
+ if (!np)
+ continue;
+
+ if (!of_is_onboard_usb_hub(np))
+ goto node_put;
+
+ npc = of_parse_phandle(np, "peer-hub", 0);
+ if (npc) {
+ if (!usb_hcd_is_primary_hcd(hcd)) {
+ of_node_put(npc);
+ goto node_put;
+ }
+
+ pdev = of_find_device_by_node(npc);
+ of_node_put(npc);
+
+ if (pdev) {
+ put_device(&pdev->dev);
+ goto node_put;
+ }
+ }
+
+ pdev = of_platform_device_create(np, NULL, &parent_hub->dev);
+ if (!pdev) {
+ dev_err(&parent_hub->dev,
+ "failed to create platform device for onboard hub '%pOF'\n", np);
+ goto node_put;
+ }
+
+ pdle = kzalloc(sizeof(*pdle), GFP_KERNEL);
+ if (!pdle) {
+ of_platform_device_destroy(&pdev->dev, NULL);
+ goto node_put;
+ }
+
+ pdle->pdev = pdev;
+ list_add(&pdle->node, pdev_list);
+
+node_put:
+ of_node_put(np);
+ }
+}
+EXPORT_SYMBOL_GPL(onboard_hub_create_pdevs);
+
+/**
+ * onboard_hub_destroy_pdevs -- free resources of onboard hub platform devices
+ * @pdev_list : list of onboard hub platform devices
+ *
+ * Destroys the platform devices in the given list and frees the memory associated
+ * with the list entry.
+ */
+void onboard_hub_destroy_pdevs(struct list_head *pdev_list)
+{
+ struct pdev_list_entry *pdle, *tmp;
+
+ list_for_each_entry_safe(pdle, tmp, pdev_list, node) {
+ list_del(&pdle->node);
+ of_platform_device_destroy(&pdle->pdev->dev, NULL);
+ kfree(pdle);
+ }
+}
+EXPORT_SYMBOL_GPL(onboard_hub_destroy_pdevs);
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index dfa0d5ce6012..fcb95fb639e0 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -248,7 +248,7 @@ sisusbcon_init(struct vc_data *c, int init)
*/
kref_get(&sisusb->kref);
- if (!*c->vc_uni_pagedir_loc)
+ if (!*c->uni_pagedict_loc)
con_set_default_unimap(c);
mutex_unlock(&sisusb->lock);
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 4bc816bb09bb..c3114d9bd128 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -167,7 +167,7 @@ static ssize_t text_show(struct device *dev,
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
- return snprintf(buf, mydev->textlength, "%s\n", mydev->text);
+ return sysfs_emit(buf, "%s\n", mydev->text);
}
static ssize_t text_store(struct device *dev,
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 150090ee4ec1..ac0d75ac2d2f 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -2638,7 +2638,7 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
* different busses) to use when testing, and allocate one thread per
* test. So discovery is simplified, and we have no device naming issues.
*
- * Don't use these only as stress/load tests. Use them along with with
+ * Don't use these only as stress/load tests. Use them along with
* other USB bus activity: plugging, unplugging, mousing, mp3 playback,
* video capture, and so on. Run different tests at different times, in
* different sequences. Nothing here should interact with other devices,
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index 022bbdc54e68..2d7b57e07eee 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -317,6 +317,7 @@ static inline struct ssusb_mtk *dev_to_ssusb(struct device *dev)
* @ep0_req: dummy request used while handling standard USB requests
* for GET_STATUS and SET_SEL
* @setup_buf: ep0 response buffer for GET_STATUS and SET_SEL requests
+ * @u3_capable: is capable of supporting USB3
*/
struct mtu3 {
spinlock_t lock;
@@ -353,10 +354,12 @@ struct mtu3 {
unsigned softconnect:1;
unsigned u1_enable:1;
unsigned u2_enable:1;
- unsigned is_u3_ip:1;
+ unsigned u3_capable:1;
unsigned delayed_status:1;
unsigned gen2cp:1;
unsigned connected:1;
+ unsigned async_callbacks:1;
+ unsigned separate_fifo:1;
u8 address;
u8 test_mode_nr;
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index c4a2c37abf62..0ca173af87bb 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -100,7 +100,7 @@ static int mtu3_device_enable(struct mtu3 *mtu)
mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
- if (mtu->is_u3_ip) {
+ if (mtu->u3_capable) {
check_clk = SSUSB_U3_MAC_RST_B_STS;
mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
(SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN |
@@ -112,7 +112,7 @@ static int mtu3_device_enable(struct mtu3 *mtu)
if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
- if (mtu->is_u3_ip)
+ if (mtu->u3_capable)
mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
SSUSB_U3_PORT_DUAL_MODE);
}
@@ -124,7 +124,7 @@ static void mtu3_device_disable(struct mtu3 *mtu)
{
void __iomem *ibase = mtu->ippc_base;
- if (mtu->is_u3_ip)
+ if (mtu->u3_capable)
mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
(SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN));
@@ -133,7 +133,7 @@ static void mtu3_device_disable(struct mtu3 *mtu)
if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
- if (mtu->is_u3_ip)
+ if (mtu->u3_capable)
mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
SSUSB_U3_PORT_DUAL_MODE);
}
@@ -146,7 +146,7 @@ static void mtu3_dev_power_on(struct mtu3 *mtu)
void __iomem *ibase = mtu->ippc_base;
mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
- if (mtu->is_u3_ip)
+ if (mtu->u3_capable)
mtu3_clrbits(ibase, SSUSB_U3_CTRL(0), SSUSB_U3_PORT_PDN);
mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_PDN);
@@ -156,7 +156,7 @@ static void mtu3_dev_power_down(struct mtu3 *mtu)
{
void __iomem *ibase = mtu->ippc_base;
- if (mtu->is_u3_ip)
+ if (mtu->u3_capable)
mtu3_setbits(ibase, SSUSB_U3_CTRL(0), SSUSB_U3_PORT_PDN);
mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_PDN);
@@ -213,7 +213,7 @@ static void mtu3_intr_enable(struct mtu3 *mtu)
value = SUSPEND_INTR | RESUME_INTR | RESET_INTR;
mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value);
- if (mtu->is_u3_ip) {
+ if (mtu->u3_capable) {
/* Enable U3 LTSSM interrupts */
value = HOT_RST_INTR | WARM_RST_INTR |
ENTER_U3_INTR | EXIT_U3_INTR;
@@ -273,7 +273,7 @@ static void mtu3_csr_init(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
- if (mtu->is_u3_ip) {
+ if (mtu->u3_capable) {
/* disable LGO_U1/U2 by default */
mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
@@ -341,7 +341,7 @@ void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set)
void mtu3_dev_on_off(struct mtu3 *mtu, int is_on)
{
- if (mtu->is_u3_ip && mtu->speed >= USB_SPEED_SUPER)
+ if (mtu->u3_capable && mtu->speed >= USB_SPEED_SUPER)
mtu3_ss_func_set(mtu, is_on);
else
mtu3_hs_softconn_set(mtu, is_on);
@@ -544,7 +544,7 @@ static void get_ep_fifo_config(struct mtu3 *mtu)
struct mtu3_fifo_info *rx_fifo;
u32 fifosize;
- if (mtu->is_u3_ip) {
+ if (mtu->separate_fifo) {
fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
tx_fifo = &mtu->tx_fifo;
tx_fifo->base = 0;
@@ -821,6 +821,10 @@ static irqreturn_t mtu3_irq(int irq, void *data)
static void mtu3_check_params(struct mtu3 *mtu)
{
+ /* device's u3 port (port0) is disabled */
+ if (mtu->u3_capable && (mtu->ssusb->u3p_dis_msk & BIT(0)))
+ mtu->u3_capable = 0;
+
/* check the max_speed parameter */
switch (mtu->max_speed) {
case USB_SPEED_FULL:
@@ -838,7 +842,7 @@ static void mtu3_check_params(struct mtu3 *mtu)
break;
}
- if (!mtu->is_u3_ip && (mtu->max_speed > USB_SPEED_HIGH))
+ if (!mtu->u3_capable && (mtu->max_speed > USB_SPEED_HIGH))
mtu->max_speed = USB_SPEED_HIGH;
mtu->speed = mtu->max_speed;
@@ -857,10 +861,12 @@ static int mtu3_hw_init(struct mtu3 *mtu)
mtu->gen2cp = !!(mtu->hw_version >= MTU3_TRUNK_VERS_1003);
value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
- mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(value);
+ mtu->u3_capable = !!SSUSB_IP_DEV_U3_PORT_NUM(value);
+ /* usb3 ip uses separate fifo */
+ mtu->separate_fifo = mtu->u3_capable;
dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version,
- mtu->is_u3_ip ? "U3" : "U2");
+ mtu->u3_capable ? "U3" : "U2");
mtu3_check_params(mtu);
@@ -965,7 +971,8 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
goto dma_mask_err;
}
- ret = devm_request_irq(dev, mtu->irq, mtu3_irq, 0, dev_name(dev), mtu);
+ ret = devm_request_threaded_irq(dev, mtu->irq, NULL, mtu3_irq,
+ IRQF_ONESHOT, dev_name(dev), mtu);
if (ret) {
dev_err(dev, "request irq %d failed!\n", mtu->irq);
goto irq_err;
diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
index d27de647c86a..f0de99858353 100644
--- a/drivers/usb/mtu3/mtu3_debugfs.c
+++ b/drivers/usb/mtu3/mtu3_debugfs.c
@@ -101,13 +101,13 @@ static int mtu3_ep_used_show(struct seq_file *sf, void *unused)
for (i = 0; i < mtu->num_eps; i++) {
mep = mtu->in_eps + i;
if (mep->flags & MTU3_EP_ENABLED) {
- seq_printf(sf, "%s - type: %d\n", mep->name, mep->type);
+ seq_printf(sf, "%s - type: %s\n", mep->name, usb_ep_type_string(mep->type));
used++;
}
mep = mtu->out_eps + i;
if (mep->flags & MTU3_EP_ENABLED) {
- seq_printf(sf, "%s - type: %d\n", mep->name, mep->type);
+ seq_printf(sf, "%s - type: %s\n", mep->name, usb_ep_type_string(mep->type));
used++;
}
}
@@ -177,8 +177,8 @@ static int mtu3_ep_info_show(struct seq_file *sf, void *unused)
unsigned long flags;
spin_lock_irqsave(&mtu->lock, flags);
- seq_printf(sf, "ep - type:%d, maxp:%d, slot:%d, flags:%x\n",
- mep->type, mep->maxp, mep->slot, mep->flags);
+ seq_printf(sf, "ep - type:%s, maxp:%d, slot:%d, flags:%x\n",
+ usb_ep_type_string(mep->type), mep->maxp, mep->slot, mep->flags);
spin_unlock_irqrestore(&mtu->lock, flags);
return 0;
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 9977600616d7..80236e7b0895 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -433,6 +433,13 @@ static int mtu3_gadget_get_frame(struct usb_gadget *gadget)
return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM);
}
+static void function_wake_notif(struct mtu3 *mtu, u8 intf)
+{
+ mtu3_writel(mtu->mac_base, U3D_DEV_NOTIF_0,
+ TYPE_FUNCTION_WAKE | DEV_NOTIF_VAL_FW(intf));
+ mtu3_setbits(mtu->mac_base, U3D_DEV_NOTIF_0, SEND_DEV_NOTIF);
+}
+
static int mtu3_gadget_wakeup(struct usb_gadget *gadget)
{
struct mtu3 *mtu = gadget_to_mtu3(gadget);
@@ -446,7 +453,18 @@ static int mtu3_gadget_wakeup(struct usb_gadget *gadget)
spin_lock_irqsave(&mtu->lock, flags);
if (mtu->g.speed >= USB_SPEED_SUPER) {
+ /*
+ * class driver may do function wakeup even UFP is in U0,
+ * and UX_EXIT only takes effect in U1/U2/U3;
+ */
mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT);
+ /*
+ * Assume there's only one function on the composite device
+ * and enable remote wake for the first interface.
+ * FIXME if the IAD (interface association descriptor) shows
+ * there is more than one function.
+ */
+ function_wake_notif(mtu, 0);
} else {
mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
spin_unlock_irqrestore(&mtu->lock, flags);
@@ -592,6 +610,18 @@ mtu3_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
spin_unlock_irqrestore(&mtu->lock, flags);
}
+static void mtu3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(g);
+ unsigned long flags;
+
+ dev_dbg(mtu->dev, "%s %s\n", __func__, enable ? "en" : "dis");
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ mtu->async_callbacks = enable;
+ spin_unlock_irqrestore(&mtu->lock, flags);
+}
+
static const struct usb_gadget_ops mtu3_gadget_ops = {
.get_frame = mtu3_gadget_get_frame,
.wakeup = mtu3_gadget_wakeup,
@@ -600,6 +630,7 @@ static const struct usb_gadget_ops mtu3_gadget_ops = {
.udc_start = mtu3_gadget_start,
.udc_stop = mtu3_gadget_stop,
.udc_set_speed = mtu3_gadget_set_speed,
+ .udc_async_callbacks = mtu3_gadget_async_callbacks,
};
static void mtu3_state_reset(struct mtu3 *mtu)
@@ -680,6 +711,7 @@ int mtu3_gadget_setup(struct mtu3 *mtu)
mtu->g.speed = USB_SPEED_UNKNOWN;
mtu->g.sg_supported = 0;
mtu->g.name = MTU3_DRIVER_NAME;
+ mtu->g.irq = mtu->irq;
mtu->is_active = 0;
mtu->delayed_status = false;
@@ -696,7 +728,7 @@ void mtu3_gadget_cleanup(struct mtu3 *mtu)
void mtu3_gadget_resume(struct mtu3 *mtu)
{
dev_dbg(mtu->dev, "gadget RESUME\n");
- if (mtu->gadget_driver && mtu->gadget_driver->resume) {
+ if (mtu->async_callbacks && mtu->gadget_driver && mtu->gadget_driver->resume) {
spin_unlock(&mtu->lock);
mtu->gadget_driver->resume(&mtu->g);
spin_lock(&mtu->lock);
@@ -707,7 +739,7 @@ void mtu3_gadget_resume(struct mtu3 *mtu)
void mtu3_gadget_suspend(struct mtu3 *mtu)
{
dev_dbg(mtu->dev, "gadget SUSPEND\n");
- if (mtu->gadget_driver && mtu->gadget_driver->suspend) {
+ if (mtu->async_callbacks && mtu->gadget_driver && mtu->gadget_driver->suspend) {
spin_unlock(&mtu->lock);
mtu->gadget_driver->suspend(&mtu->g);
spin_lock(&mtu->lock);
@@ -718,7 +750,7 @@ void mtu3_gadget_suspend(struct mtu3 *mtu)
void mtu3_gadget_disconnect(struct mtu3 *mtu)
{
dev_dbg(mtu->dev, "gadget DISCONNECT\n");
- if (mtu->gadget_driver && mtu->gadget_driver->disconnect) {
+ if (mtu->async_callbacks && mtu->gadget_driver && mtu->gadget_driver->disconnect) {
spin_unlock(&mtu->lock);
mtu->gadget_driver->disconnect(&mtu->g);
spin_lock(&mtu->lock);
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 0ca47212f1ec..e4fd1bb14a55 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -66,7 +66,7 @@ __acquires(mtu->lock)
{
int ret;
- if (!mtu->gadget_driver)
+ if (!mtu->gadget_driver || !mtu->async_callbacks)
return -EOPNOTSUPP;
spin_unlock(&mtu->lock);
@@ -226,6 +226,8 @@ ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
break;
case USB_RECIP_INTERFACE:
+ /* status of function remote wakeup, forward request */
+ handled = 0;
break;
case USB_RECIP_ENDPOINT:
epnum = (u8) le16_to_cpu(setup->wIndex);
@@ -397,10 +399,8 @@ static int ep0_handle_feature(struct mtu3 *mtu,
/* superspeed only */
if (value == USB_INTRF_FUNC_SUSPEND &&
mtu->g.speed >= USB_SPEED_SUPER) {
- /*
- * forward the request because function drivers
- * should handle it
- */
+ /* forward the request for function suspend */
+ mtu->may_wakeup = !!(index & USB_INTRF_FUNC_SUSPEND_RW);
handled = 0;
}
break;
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 072db1f6470e..519a58301f45 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -341,6 +341,8 @@
#define U3D_LINK_UX_INACT_TIMER (SSUSB_USB3_SYS_CSR_BASE + 0x020C)
#define U3D_LINK_POWER_CONTROL (SSUSB_USB3_SYS_CSR_BASE + 0x0210)
#define U3D_LINK_ERR_COUNT (SSUSB_USB3_SYS_CSR_BASE + 0x0214)
+#define U3D_DEV_NOTIF_0 (SSUSB_USB3_SYS_CSR_BASE + 0x0290)
+#define U3D_DEV_NOTIF_1 (SSUSB_USB3_SYS_CSR_BASE + 0x0294)
/*---------------- SSUSB_USB3_SYS_CSR FIELD DEFINITION ----------------*/
@@ -365,6 +367,20 @@
#define CLR_LINK_ERR_CNT BIT(16)
#define LINK_ERROR_COUNT GENMASK(15, 0)
+/* U3D_DEV_NOTIF_0 */
+#define DEV_NOTIF_TYPE_SPECIFIC_LOW_MSK GENMASK(31, 8)
+#define DEV_NOTIF_VAL_FW(x) (((x) & 0xff) << 8)
+#define DEV_NOTIF_VAL_LTM(x) (((x) & 0xfff) << 8)
+#define DEV_NOTIF_VAL_IAM(x) (((x) & 0xffff) << 8)
+#define DEV_NOTIF_TYPE_MSK GENMASK(7, 4)
+/* Notification Type */
+#define TYPE_FUNCTION_WAKE (0x1 << 4)
+#define TYPE_LATENCY_TOLERANCE_MESSAGE (0x2 << 4)
+#define TYPE_BUS_INTERVAL_ADJUST_MESSAGE (0x3 << 4)
+#define TYPE_HOST_ROLE_REQUEST (0x4 << 4)
+#define TYPE_SUBLINK_SPEED (0x5 << 4)
+#define SEND_DEV_NOTIF BIT(0)
+
/*---------------- SSUSB_USB2_CSR REGISTER DEFINITION ----------------*/
#define U3D_POWER_MANAGEMENT (SSUSB_USB2_CSR_BASE + 0x0004)
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 4309ed939178..4cb65346789d 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -13,6 +13,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
+#include <linux/reset.h>
#include "mtu3.h"
#include "mtu3_dr.h"
@@ -189,6 +190,31 @@ static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb)
mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
}
+static void ssusb_u3_drd_check(struct ssusb_mtk *ssusb)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+ u32 dev_u3p_num;
+ u32 host_u3p_num;
+ u32 value;
+
+ /* u3 port0 is disabled */
+ if (ssusb->u3p_dis_msk & BIT(0)) {
+ otg_sx->is_u3_drd = false;
+ goto out;
+ }
+
+ value = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_DEV_CAP);
+ dev_u3p_num = SSUSB_IP_DEV_U3_PORT_NUM(value);
+
+ value = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_XHCI_CAP);
+ host_u3p_num = SSUSB_IP_XHCI_U3_PORT_NUM(value);
+
+ otg_sx->is_u3_drd = !!(dev_u3p_num && host_u3p_num);
+
+out:
+ dev_info(ssusb->dev, "usb3-drd: %d\n", otg_sx->is_u3_drd);
+}
+
static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
{
struct device_node *node = pdev->dev.of_node;
@@ -243,6 +269,8 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN)
ssusb->dr_mode = USB_DR_MODE_OTG;
+ of_property_read_u32(node, "mediatek,u3p-dis-msk", &ssusb->u3p_dis_msk);
+
if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
goto out;
@@ -254,8 +282,6 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
}
/* optional property, ignore the error if it does not exist */
- of_property_read_u32(node, "mediatek,u3p-dis-msk",
- &ssusb->u3p_dis_msk);
of_property_read_u32(node, "mediatek,u2p-dis-msk",
&ssusb->u2p_dis_msk);
@@ -269,7 +295,6 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
goto out;
/* if dual-role mode is supported */
- otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd");
otg_sx->manual_drd_enabled =
of_property_read_bool(node, "enable-manual-drd");
otg_sx->role_sw_used = of_property_read_bool(node, "usb-role-switch");
@@ -289,9 +314,8 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
}
out:
- dev_info(dev, "dr_mode: %d, is_u3_dr: %d, drd: %s\n",
- ssusb->dr_mode, otg_sx->is_u3_drd,
- otg_sx->manual_drd_enabled ? "manual" : "auto");
+ dev_info(dev, "dr_mode: %d, drd: %s\n", ssusb->dr_mode,
+ otg_sx->manual_drd_enabled ? "manual" : "auto");
dev_info(dev, "u2p_dis_msk: %x, u3p_dis_msk: %x\n",
ssusb->u2p_dis_msk, ssusb->u3p_dis_msk);
@@ -345,7 +369,14 @@ static int mtu3_probe(struct platform_device *pdev)
dev_info(dev, "wakeup irq %d\n", ssusb->wakeup_irq);
}
+ ret = device_reset_optional(dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to reset controller\n");
+ goto comm_exit;
+ }
+
ssusb_ip_sw_reset(ssusb);
+ ssusb_u3_drd_check(ssusb);
if (IS_ENABLED(CONFIG_USB_MTU3_HOST))
ssusb->dr_mode = USB_DR_MODE_HOST;
diff --git a/drivers/usb/mtu3/mtu3_trace.h b/drivers/usb/mtu3/mtu3_trace.h
index 1b897636daf2..03d2a9bac27e 100644
--- a/drivers/usb/mtu3/mtu3_trace.h
+++ b/drivers/usb/mtu3/mtu3_trace.h
@@ -18,18 +18,16 @@
#include "mtu3.h"
-#define MTU3_MSG_MAX 256
-
TRACE_EVENT(mtu3_log,
TP_PROTO(struct device *dev, struct va_format *vaf),
TP_ARGS(dev, vaf),
TP_STRUCT__entry(
__string(name, dev_name(dev))
- __dynamic_array(char, msg, MTU3_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
- vsnprintf(__get_str(msg), MTU3_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
@@ -238,8 +236,8 @@ DECLARE_EVENT_CLASS(mtu3_log_ep,
__entry->direction = mep->is_in;
__entry->gpd_ring = &mep->gpd_ring;
),
- TP_printk("%s: type %d maxp %d slot %d mult %d burst %d ring %p/%pad flags %c:%c%c%c:%c",
- __get_str(name), __entry->type,
+ TP_printk("%s: type %s maxp %d slot %d mult %d burst %d ring %p/%pad flags %c:%c%c%c:%c",
+ __get_str(name), usb_ep_type_string(__entry->type),
__entry->maxp, __entry->slot,
__entry->mult, __entry->maxburst,
__entry->gpd_ring, &__entry->gpd_ring->dma,
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 4d61df6a9b5c..f906dfd360d3 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -123,6 +123,17 @@ config USB_MUSB_MEDIATEK
select GENERIC_PHY
select USB_ROLE_SWITCH
+config USB_MUSB_POLARFIRE_SOC
+ tristate "Microchip PolarFire SoC platforms"
+ depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
+ depends on NOP_USB_XCEIV
+ select USB_MUSB_DUAL_ROLE
+ help
+ Say Y here to enable support for USB on Microchip's PolarFire SoC.
+
+ This support is also available as a module. If so, the module
+ will be called mpfs.
+
comment "MUSB DMA mode"
config MUSB_PIO_ONLY
@@ -146,7 +157,7 @@ config USB_UX500_DMA
config USB_INVENTRA_DMA
bool 'Inventra'
- depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK || USB_MUSB_JZ4740
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK || USB_MUSB_JZ4740 || USB_MUSB_POLARFIRE_SOC
help
Enable DMA transfers using Mentor's engine.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 932247360a9f..51dd54a8de49 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
obj-$(CONFIG_USB_MUSB_JZ4740) += jz4740.o
obj-$(CONFIG_USB_MUSB_SUNXI) += sunxi.o
obj-$(CONFIG_USB_MUSB_MEDIATEK) += mediatek.o
+obj-$(CONFIG_USB_MUSB_POLARFIRE_SOC) += mpfs.o
# the kconfig must guarantee that only one of the
# possible I/O schemes will be enabled at a time ...
diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c
new file mode 100644
index 000000000000..cea2e8108867
--- /dev/null
+++ b/drivers/usb/musb/mpfs.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PolarFire SoC (MPFS) MUSB Glue Layer
+ *
+ * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
+ * Based on {omap2430,tusb6010,ux500}.c
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/usb_phy_generic.h>
+#include "musb_core.h"
+#include "musb_dma.h"
+
+#define MPFS_MUSB_MAX_EP_NUM 8
+#define MPFS_MUSB_RAM_BITS 12
+
+struct mpfs_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct platform_device *phy;
+ struct clk *clk;
+};
+
+static struct musb_fifo_cfg mpfs_musb_mode_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 1024, },
+ { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 4096, },
+};
+
+static const struct musb_hdrc_config mpfs_musb_hdrc_config = {
+ .fifo_cfg = mpfs_musb_mode_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(mpfs_musb_mode_cfg),
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = MPFS_MUSB_MAX_EP_NUM,
+ .ram_bits = MPFS_MUSB_RAM_BITS,
+};
+
+static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx) {
+ musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
+ musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
+ musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
+ ret = musb_interrupt(musb);
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static void mpfs_musb_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+
+ /*
+ * HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ musb->is_active = 1;
+ musb->xceiv->otg->default_a = 1;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+ MUSB_HST_MODE(musb);
+ } else {
+ musb->is_active = 0;
+
+ /*
+ * NOTE: skipping A_WAIT_VFALL -> A_IDLE and
+ * jumping right to B_IDLE...
+ */
+ musb->xceiv->otg->default_a = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ MUSB_DEV_MODE(musb);
+ }
+
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+
+static int mpfs_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+
+ musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(musb->xceiv)) {
+ dev_err(dev, "HS UDC: no transceiver configured\n");
+ return PTR_ERR(musb->xceiv);
+ }
+
+ musb->dyn_fifo = true;
+ musb->isr = mpfs_musb_interrupt;
+
+ musb_platform_set_vbus(musb, 1);
+
+ return 0;
+}
+
+static const struct musb_platform_ops mpfs_ops = {
+ .quirks = MUSB_DMA_INVENTRA,
+ .init = mpfs_musb_init,
+ .fifo_mode = 2,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
+ .set_vbus = mpfs_musb_set_vbus
+};
+
+static int mpfs_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mpfs_glue *glue;
+ struct platform_device *musb_pdev;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int ret;
+
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ musb_pdev = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb_pdev) {
+ dev_err(dev, "failed to allocate musb device\n");
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err_phy_release;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err_phy_release;
+ }
+
+ musb_pdev->dev.parent = dev;
+ musb_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(39);
+ musb_pdev->dev.dma_mask = &musb_pdev->dev.coherent_dma_mask;
+ device_set_of_node_from_dev(&musb_pdev->dev, dev);
+
+ glue->dev = dev;
+ glue->musb = musb_pdev;
+ glue->clk = clk;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ pdata->config = &mpfs_musb_hdrc_config;
+ pdata->platform_ops = &mpfs_ops;
+
+ pdata->mode = usb_get_dr_mode(dev);
+ if (pdata->mode == USB_DR_MODE_UNKNOWN) {
+ dev_info(dev, "No dr_mode property found, defaulting to otg\n");
+ pdata->mode = USB_DR_MODE_OTG;
+ }
+
+ glue->phy = usb_phy_generic_register();
+ if (IS_ERR(glue->phy)) {
+ dev_err(dev, "failed to register usb-phy %ld\n",
+ PTR_ERR(glue->phy));
+ ret = PTR_ERR(glue->phy);
+ goto err_clk_disable;
+ }
+
+ platform_set_drvdata(pdev, glue);
+
+ ret = platform_device_add_resources(musb_pdev, pdev->resource, pdev->num_resources);
+ if (ret) {
+ dev_err(dev, "failed to add resources\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add_data(musb_pdev, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(dev, "failed to add platform_data\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add(musb_pdev);
+ if (ret) {
+ dev_err(dev, "failed to register musb device\n");
+ goto err_clk_disable;
+ }
+
+ dev_info(&pdev->dev, "Registered MPFS MUSB driver\n");
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(clk);
+
+err_phy_release:
+ usb_phy_generic_unregister(glue->phy);
+ platform_device_put(musb_pdev);
+ return ret;
+}
+
+static int mpfs_remove(struct platform_device *pdev)
+{
+ struct mpfs_glue *glue = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(glue->clk);
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mpfs_id_table[] = {
+ { .compatible = "microchip,mpfs-musb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mpfs_id_table);
+#endif
+
+static struct platform_driver mpfs_musb_driver = {
+ .probe = mpfs_probe,
+ .remove = mpfs_remove,
+ .driver = {
+ .name = "mpfs-musb",
+ .of_match_table = of_match_ptr(mpfs_id_table)
+ },
+};
+
+module_platform_driver(mpfs_musb_driver);
+
+MODULE_DESCRIPTION("PolarFire SoC MUSB Glue Layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index f7b1d5993f8c..bbbcfd49fb35 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2684,13 +2684,7 @@ static void musb_save_context(struct musb *musb)
musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
for (i = 0; i < musb->config->num_eps; ++i) {
- struct musb_hw_ep *hw_ep;
-
- hw_ep = &musb->endpoints[i];
- if (!hw_ep)
- continue;
-
- epio = hw_ep->regs;
+ epio = musb->endpoints[i].regs;
if (!epio)
continue;
@@ -2765,13 +2759,7 @@ static void musb_restore_context(struct musb *musb)
musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
for (i = 0; i < musb->config->num_eps; ++i) {
- struct musb_hw_ep *hw_ep;
-
- hw_ep = &musb->endpoints[i];
- if (!hw_ep)
- continue;
-
- epio = hw_ep->regs;
+ epio = musb->endpoints[i].regs;
if (!epio)
continue;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 7fbb8a307145..c963cb8565f2 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -286,7 +286,7 @@ static void cppi41_dma_callback(void *private_data,
* receive a FIFO empty interrupt so the only thing we can do is
* to poll for the bit. On HS it usually takes 2us, on FS around
* 110us - 150us depending on the transfer size.
- * We spin on HS (no longer than than 25us and setup a timer on
+ * We spin on HS (no longer than 25us and setup a timer on
* FS to check for the bit and complete the transfer.
*/
if (is_host_active(musb)) {
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 51274b87f46c..daada4b66a92 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1910,8 +1910,6 @@ static int musb_gadget_stop(struct usb_gadget *g)
*/
/* Force check of devctl register for PM runtime */
- schedule_delayed_work(&musb->irq_work, 0);
-
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index ec28b5716796..f246b14394c4 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -28,11 +28,11 @@ TRACE_EVENT(musb_log,
TP_ARGS(musb, vaf),
TP_STRUCT__entry(
__string(name, dev_name(musb->controller))
- __dynamic_array(char, msg, MUSB_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(musb->controller));
- vsnprintf(__get_str(msg), MUSB_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 7ed4cc348d99..5609b4e84d40 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -495,7 +495,7 @@ done:
}
/*
- * Maybe put TUSB6010 into idle mode mode depending on USB link status,
+ * Maybe put TUSB6010 into idle mode depending on USB link status,
* like "disconnected" or "suspended". We'll be woken out of it by
* connect, resume, or disconnect.
*
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 358d05cb643d..f75912279b39 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -59,7 +59,7 @@ static void keystone_usbphy_shutdown(struct usb_phy *phy)
val = keystone_usbphy_readl(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK);
keystone_usbphy_writel(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK,
- val &= ~PHY_REF_SSP_EN);
+ val & ~PHY_REF_SSP_EN);
}
static int keystone_usbphy_probe(struct platform_device *pdev)
diff --git a/drivers/usb/renesas_usbhs/rza.c b/drivers/usb/renesas_usbhs/rza.c
index 24de64edb674..2d77edefb4b3 100644
--- a/drivers/usb/renesas_usbhs/rza.c
+++ b/drivers/usb/renesas_usbhs/rza.c
@@ -23,6 +23,10 @@ static int usbhs_rza1_hardware_init(struct platform_device *pdev)
extal_clk = of_find_node_by_name(NULL, "extal");
of_property_read_u32(usb_x1_clk, "clock-frequency", &freq_usb);
of_property_read_u32(extal_clk, "clock-frequency", &freq_extal);
+
+ of_node_put(usb_x1_clk);
+ of_node_put(extal_clk);
+
if (freq_usb == 0) {
if (freq_extal == 12000000) {
/* Select 12MHz XTAL */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 6924fa95f6bd..5fbcc155e8f5 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -256,7 +256,7 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
/*
* Mike Isely <isely@pobox.com> 2-Feb-2008: The
* Cypress app note that describes this mechanism
- * states the the low-speed part can't handle more
+ * states that the low-speed part can't handle more
* than 800 bytes/sec, in which case 4800 baud is the
* safest speed for a part like that.
*/
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b440d338a895..d5a3986dfee7 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1023,6 +1023,9 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ /* Belimo Automation devices */
+ { USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) },
+ { USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) },
/* ICP DAS I-756xU devices */
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index d1a9564697a4..4e92c165c86b 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1569,6 +1569,12 @@
#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
/*
+ * Belimo Automation
+ */
+#define BELIMO_ZTH_PID 0x8050
+#define BELIMO_ZIP_PID 0xC811
+
+/*
* Unjo AB
*/
#define UNJO_VID 0x22B7
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index e5c75944ebb7..f1a8d8343623 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -988,7 +988,7 @@ static int garmin_write_bulk(struct usb_serial_port *port,
garmin_data_p->flags &= ~FLAGS_DROP_DATA;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
- buffer = kmalloc(count, GFP_ATOMIC);
+ buffer = kmemdup(buf, count, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
@@ -998,8 +998,6 @@ static int garmin_write_bulk(struct usb_serial_port *port,
return -ENOMEM;
}
- memcpy(buffer, buf, count);
-
usb_serial_debug_data(&port->dev, __func__, count, buffer);
usb_fill_bulk_urb(urb, serial->dev,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index bdee78cc4a07..ffa622539a25 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -220,7 +220,7 @@ struct edgeport_serial {
__u8 rxHeader3; /* receive header byte 3 */
__u8 rxPort; /* the port that we are currently receiving data for */
__u8 rxStatusCode; /* the receive status code */
- __u8 rxStatusParam; /* the receive status paramater */
+ __u8 rxStatusParam; /* the receive status parameter */
__s16 rxBytesRemaining; /* the number of port bytes left to read */
struct usb_serial *serial; /* loop back to the owner of this object */
};
@@ -901,7 +901,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
if (!edge_port->open) {
/* open timed out */
- dev_dbg(dev, "%s - open timedout\n", __func__);
+ dev_dbg(dev, "%s - open timeout\n", __func__);
edge_port->openPending = false;
return -ENODEV;
}
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 1e12b5f30dcc..23ccbba716c7 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -826,7 +826,7 @@ static int mos77xx_calc_num_ports(struct usb_serial *serial,
/*
* The 7715 uses the first bulk in/out endpoint pair for the
* parallel port, and the second for the serial port. We swap
- * the endpoint descriptors here so that the the first and
+ * the endpoint descriptors here so that the first and
* only registered port structure uses the serial-port
* endpoints.
*/
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index aed28c35caff..e31a6d77da3a 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -208,7 +208,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
priv->outstanding_bytes += count;
spin_unlock_irqrestore(&priv->lock, flags);
- buffer = kmalloc(count, GFP_ATOMIC);
+ buffer = kmemdup(buf, count, GFP_ATOMIC);
if (!buffer)
goto error_no_buffer;
@@ -216,8 +216,6 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
if (!urb)
goto error_no_urb;
- memcpy(buffer, buf, count);
-
usb_serial_debug_data(&port->dev, __func__, count, buffer);
/* The connected devices do not have a bulk write endpoint,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 9d56138133a9..353b2549eaa8 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -453,7 +453,7 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
goto error_simple;
}
- buffer = kmalloc(writesize, GFP_ATOMIC);
+ buffer = kmemdup(buf, writesize, GFP_ATOMIC);
if (!buffer) {
retval = -ENOMEM;
goto error_no_buffer;
@@ -465,8 +465,6 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
goto error_no_urb;
}
- memcpy(buffer, buf, writesize);
-
usb_serial_debug_data(&port->dev, __func__, writesize, buffer);
usb_fill_bulk_urb(urb, serial->dev,
@@ -737,7 +735,8 @@ static void sierra_close(struct usb_serial_port *port)
/*
* Need to take susp_lock to make sure port is not already being
- * resumed, but no need to hold it due to initialized
+ * resumed, but no need to hold it due to the tty-port initialized
+ * flag.
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 24101bd7fcad..e35bea2235c1 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -295,7 +295,7 @@ static int serial_open(struct tty_struct *tty, struct file *filp)
*
* Shut down a USB serial port. Serialized against activate by the
* tport mutex and kept to matching open/close pairs
- * of calls by the initialized flag.
+ * of calls by the tty-port initialized flag.
*
* Not called if tty is console.
*/
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index dab38b63eaf7..0017f6e969e1 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -29,6 +29,7 @@
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
+#include <linux/usb/cdc.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
#include "usb-wwan.h"
@@ -48,9 +49,9 @@ static int usb_wwan_send_setup(struct usb_serial_port *port)
portdata = usb_get_serial_port_data(port);
if (portdata->dtr_state)
- val |= 0x01;
+ val |= USB_CDC_CTRL_DTR;
if (portdata->rts_state)
- val |= 0x02;
+ val |= USB_CDC_CTRL_RTS;
ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
@@ -59,8 +60,9 @@ static int usb_wwan_send_setup(struct usb_serial_port *port)
return res;
res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- 0x22, 0x21, val, ifnum, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ USB_CDC_REQ_SET_CONTROL_LINE_STATE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ val, ifnum, NULL, 0, USB_CTRL_SET_TIMEOUT);
usb_autopm_put_interface(port->serial->interface);
@@ -388,7 +390,8 @@ void usb_wwan_close(struct usb_serial_port *port)
/*
* Need to take susp_lock to make sure port is not already being
- * resumed, but no need to hold it due to initialized
+ * resumed, but no need to hold it due to the tty-port initialized
+ * flag.
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 64d96d210e02..7449e379077a 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1178,7 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
/*
* If the device tried to send back more data than the
* amount requested, the spec requires us to transfer
- * the CSW anyway. Since there's no point retrying the
+ * the CSW anyway. Since there's no point retrying
* the command, we'll return fake sense data indicating
* Illegal Request, Invalid Field in CDB.
*/
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index ba24847fb245..5defdfead653 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -52,6 +52,17 @@ source "drivers/usb/typec/ucsi/Kconfig"
source "drivers/usb/typec/tipd/Kconfig"
+config TYPEC_ANX7411
+ tristate "Analogix ANX7411 Type-C DRP Port controller driver"
+ depends on I2C
+ depends on USB_ROLE_SWITCH
+ help
+ Say Y or M here if your system has Analogix ANX7411 Type-C DRP Port
+ controller driver.
+
+ If you choose to build this driver as a dynamically linked module, the
+ module will be called anx7411.ko.
+
config TYPEC_RT1719
tristate "Richtek RT1719 Sink Only Type-C controller driver"
depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 43626acc0aaf..4a83dad51a6c 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC) += typec.o
-typec-y := class.o mux.o bus.o
+typec-y := class.o mux.o bus.o pd.o retimer.o
typec-$(CONFIG_ACPI) += port-mapper.o
obj-$(CONFIG_TYPEC) += altmodes/
obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
obj-$(CONFIG_TYPEC_TPS6598X) += tipd/
+obj-$(CONFIG_TYPEC_ANX7411) += anx7411.o
obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o
obj-$(CONFIG_TYPEC_QCOM_PMIC) += qcom-pmic-typec.o
obj-$(CONFIG_TYPEC_STUSB160X) += stusb160x.o
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
new file mode 100644
index 000000000000..c0f0842d443c
--- /dev/null
+++ b/drivers/usb/typec/anx7411.c
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Driver for Analogix ANX7411 USB Type-C and PD controller
+ *
+ * Copyright(c) 2022, Analogix Semiconductor. All rights reserved.
+ *
+ */
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/role.h>
+#include <linux/usb/tcpci.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/workqueue.h>
+#include <linux/power_supply.h>
+
+#define TCPC_ADDRESS1 0x58
+#define TCPC_ADDRESS2 0x56
+#define TCPC_ADDRESS3 0x54
+#define TCPC_ADDRESS4 0x52
+#define SPI_ADDRESS1 0x7e
+#define SPI_ADDRESS2 0x6e
+#define SPI_ADDRESS3 0x64
+#define SPI_ADDRESS4 0x62
+
+struct anx7411_i2c_select {
+ u8 tcpc_address;
+ u8 spi_address;
+};
+
+#define VID_ANALOGIX 0x1F29
+#define PID_ANALOGIX 0x7411
+
+/* TCPC register define */
+
+#define ANALOG_CTRL_10 0xAA
+
+#define STATUS_LEN 2
+#define ALERT_0 0xCB
+#define RECEIVED_MSG BIT(7)
+#define SOFTWARE_INT BIT(6)
+#define MSG_LEN 32
+#define HEADER_LEN 2
+#define MSG_HEADER 0x00
+#define MSG_TYPE 0x01
+#define MSG_RAWDATA 0x02
+#define MSG_LEN_MASK 0x1F
+
+#define ALERT_1 0xCC
+#define INTP_POW_ON BIT(7)
+#define INTP_POW_OFF BIT(6)
+
+#define VBUS_THRESHOLD_H 0xDD
+#define VBUS_THRESHOLD_L 0xDE
+
+#define FW_CTRL_0 0xF0
+#define UNSTRUCT_VDM_EN BIT(0)
+#define DELAY_200MS BIT(1)
+#define VSAFE0 0
+#define VSAFE1 BIT(2)
+#define VSAFE2 BIT(3)
+#define VSAFE3 (BIT(2) | BIT(3))
+#define FRS_EN BIT(7)
+
+#define FW_PARAM 0xF1
+#define DONGLE_IOP BIT(0)
+
+#define FW_CTRL_2 0xF7
+#define SINK_CTRL_DIS_FLAG BIT(5)
+
+/* SPI register define */
+#define OCM_CTRL_0 0x6E
+#define OCM_RESET BIT(6)
+
+#define MAX_VOLTAGE 0xAC
+#define MAX_POWER 0xAD
+#define MIN_POWER 0xAE
+
+#define REQUEST_VOLTAGE 0xAF
+#define VOLTAGE_UNIT 100 /* mV per unit */
+
+#define REQUEST_CURRENT 0xB1
+#define CURRENT_UNIT 50 /* mA per unit */
+
+#define CMD_SEND_BUF 0xC0
+#define CMD_RECV_BUF 0xE0
+
+#define REQ_VOL_20V_IN_100MV 0xC8
+#define REQ_CUR_2_25A_IN_50MA 0x2D
+#define REQ_CUR_3_25A_IN_50MA 0x41
+
+#define DEF_5V 5000
+#define DEF_1_5A 1500
+
+#define LOBYTE(w) ((u8)((w) & 0xFF))
+#define HIBYTE(w) ((u8)(((u16)(w) >> 8) & 0xFF))
+
+enum anx7411_typec_message_type {
+ TYPE_SRC_CAP = 0x00,
+ TYPE_SNK_CAP = 0x01,
+ TYPE_SNK_IDENTITY = 0x02,
+ TYPE_SVID = 0x03,
+ TYPE_SET_SNK_DP_CAP = 0x08,
+ TYPE_PSWAP_REQ = 0x10,
+ TYPE_DSWAP_REQ = 0x11,
+ TYPE_VDM = 0x14,
+ TYPE_OBJ_REQ = 0x16,
+ TYPE_DP_ALT_ENTER = 0x19,
+ TYPE_DP_DISCOVER_MODES_INFO = 0x27,
+ TYPE_GET_DP_CONFIG = 0x29,
+ TYPE_DP_CONFIGURE = 0x2A,
+ TYPE_GET_DP_DISCOVER_MODES_INFO = 0x2E,
+ TYPE_GET_DP_ALT_ENTER = 0x2F,
+};
+
+#define FW_CTRL_1 0xB2
+#define AUTO_PD_EN BIT(1)
+#define TRYSRC_EN BIT(2)
+#define TRYSNK_EN BIT(3)
+#define FORCE_SEND_RDO BIT(6)
+
+#define FW_VER 0xB4
+#define FW_SUBVER 0xB5
+
+#define INT_MASK 0xB6
+#define INT_STS 0xB7
+#define OCM_BOOT_UP BIT(0)
+#define OC_OV_EVENT BIT(1)
+#define VCONN_CHANGE BIT(2)
+#define VBUS_CHANGE BIT(3)
+#define CC_STATUS_CHANGE BIT(4)
+#define DATA_ROLE_CHANGE BIT(5)
+#define PR_CONSUMER_GOT_POWER BIT(6)
+#define HPD_STATUS_CHANGE BIT(7)
+
+#define SYSTEM_STSTUS 0xB8
+/* 0: SINK off; 1: SINK on */
+#define SINK_STATUS BIT(1)
+/* 0: VCONN off; 1: VCONN on*/
+#define VCONN_STATUS BIT(2)
+/* 0: vbus off; 1: vbus on*/
+#define VBUS_STATUS BIT(3)
+/* 1: host; 0:device*/
+#define DATA_ROLE BIT(5)
+/* 0: Chunking; 1: Unchunked*/
+#define SUPPORT_UNCHUNKING BIT(6)
+/* 0: HPD low; 1: HPD high*/
+#define HPD_STATUS BIT(7)
+
+#define DATA_DFP 1
+#define DATA_UFP 2
+#define POWER_SOURCE 1
+#define POWER_SINK 2
+
+#define CC_STATUS 0xB9
+#define CC1_RD BIT(0)
+#define CC2_RD BIT(4)
+#define CC1_RA BIT(1)
+#define CC2_RA BIT(5)
+#define CC1_RD BIT(0)
+#define CC1_RP(cc) (((cc) >> 2) & 0x03)
+#define CC2_RP(cc) (((cc) >> 6) & 0x03)
+
+#define PD_REV_INIT 0xBA
+
+#define PD_EXT_MSG_CTRL 0xBB
+#define SRC_CAP_EXT_REPLY BIT(0)
+#define MANUFACTURER_INFO_REPLY BIT(1)
+#define BATTERY_STS_REPLY BIT(2)
+#define BATTERY_CAP_REPLY BIT(3)
+#define ALERT_REPLY BIT(4)
+#define STATUS_REPLY BIT(5)
+#define PPS_STATUS_REPLY BIT(6)
+#define SNK_CAP_EXT_REPLY BIT(7)
+
+#define NO_CONNECT 0x00
+#define USB3_1_CONNECTED 0x01
+#define DP_ALT_4LANES 0x02
+#define USB3_1_DP_2LANES 0x03
+#define CC1_CONNECTED 0x01
+#define CC2_CONNECTED 0x02
+#define SELECT_PIN_ASSIGMENT_C 0x04
+#define SELECT_PIN_ASSIGMENT_D 0x08
+#define SELECT_PIN_ASSIGMENT_E 0x10
+#define SELECT_PIN_ASSIGMENT_U 0x00
+#define REDRIVER_ADDRESS 0x20
+#define REDRIVER_OFFSET 0x00
+
+#define DP_SVID 0xFF01
+#define VDM_ACK 0x40
+#define VDM_CMD_RES 0x00
+#define VDM_CMD_DIS_ID 0x01
+#define VDM_CMD_DIS_SVID 0x02
+#define VDM_CMD_DIS_MOD 0x03
+#define VDM_CMD_ENTER_MODE 0x04
+#define VDM_CMD_EXIT_MODE 0x05
+#define VDM_CMD_ATTENTION 0x06
+#define VDM_CMD_GET_STS 0x10
+#define VDM_CMD_AND_ACK_MASK 0x5F
+
+#define MAX_ALTMODE 2
+
+#define HAS_SOURCE_CAP BIT(0)
+#define HAS_SINK_CAP BIT(1)
+#define HAS_SINK_WATT BIT(2)
+
+enum anx7411_psy_state {
+ /* copy from drivers/usb/typec/tcpm */
+ ANX7411_PSY_OFFLINE = 0,
+ ANX7411_PSY_FIXED_ONLINE,
+
+ /* private */
+ /* PD keep in, but disconnct power to bq25700,
+ * this state can be active when higher capacity adapter plug in,
+ * and change to ONLINE state when higher capacity adapter plug out
+ */
+ ANX7411_PSY_HANG = 0xff,
+};
+
+struct typec_params {
+ int request_current; /* ma */
+ int request_voltage; /* mv */
+ int cc_connect;
+ int cc_orientation_valid;
+ int cc_status;
+ int data_role;
+ int power_role;
+ int vconn_role;
+ int dp_altmode_enter;
+ int cust_altmode_enter;
+ struct usb_role_switch *role_sw;
+ struct typec_port *port;
+ struct typec_partner *partner;
+ struct typec_mux_dev *typec_mux;
+ struct typec_switch_dev *typec_switch;
+ struct typec_altmode *amode[MAX_ALTMODE];
+ struct typec_altmode *port_amode[MAX_ALTMODE];
+ struct typec_displayport_data data;
+ int pin_assignment;
+ struct typec_capability caps;
+ u32 src_pdo[PDO_MAX_OBJECTS];
+ u32 sink_pdo[PDO_MAX_OBJECTS];
+ u8 caps_flags;
+ u8 src_pdo_nr;
+ u8 sink_pdo_nr;
+ u8 sink_watt;
+ u8 sink_voltage;
+};
+
+#define MAX_BUF_LEN 30
+struct fw_msg {
+ u8 msg_len;
+ u8 msg_type;
+ u8 buf[MAX_BUF_LEN];
+} __packed;
+
+struct anx7411_data {
+ int fw_version;
+ int fw_subversion;
+ struct i2c_client *tcpc_client;
+ struct i2c_client *spi_client;
+ struct fw_msg send_msg;
+ struct fw_msg recv_msg;
+ struct gpio_desc *intp_gpiod;
+ struct fwnode_handle *connector_fwnode;
+ struct typec_params typec;
+ int intp_irq;
+ struct work_struct work;
+ struct workqueue_struct *workqueue;
+ /* Lock for interrupt work queue */
+ struct mutex lock;
+
+ enum anx7411_psy_state psy_online;
+ enum power_supply_usb_type usb_type;
+ struct power_supply *psy;
+ struct power_supply_desc psy_desc;
+ struct device *dev;
+};
+
+static u8 snk_identity[] = {
+ LOBYTE(VID_ANALOGIX), HIBYTE(VID_ANALOGIX), 0x00, 0x82, /* snk_id_hdr */
+ 0x00, 0x00, 0x00, 0x00, /* snk_cert */
+ 0x00, 0x00, LOBYTE(PID_ANALOGIX), HIBYTE(PID_ANALOGIX), /* 5snk_ama */
+};
+
+static u8 dp_caps[4] = {0xC6, 0x00, 0x00, 0x00};
+
+static int anx7411_reg_read(struct i2c_client *client,
+ u8 reg_addr)
+{
+ return i2c_smbus_read_byte_data(client, reg_addr);
+}
+
+static int anx7411_reg_block_read(struct i2c_client *client,
+ u8 reg_addr, u8 len, u8 *buf)
+{
+ return i2c_smbus_read_i2c_block_data(client, reg_addr, len, buf);
+}
+
+static int anx7411_reg_write(struct i2c_client *client,
+ u8 reg_addr, u8 reg_val)
+{
+ return i2c_smbus_write_byte_data(client, reg_addr, reg_val);
+}
+
+static int anx7411_reg_block_write(struct i2c_client *client,
+ u8 reg_addr, u8 len, u8 *buf)
+{
+ return i2c_smbus_write_i2c_block_data(client, reg_addr, len, buf);
+}
+
+static struct anx7411_i2c_select anx7411_i2c_addr[] = {
+ {TCPC_ADDRESS1, SPI_ADDRESS1},
+ {TCPC_ADDRESS2, SPI_ADDRESS2},
+ {TCPC_ADDRESS3, SPI_ADDRESS3},
+ {TCPC_ADDRESS4, SPI_ADDRESS4},
+};
+
+static int anx7411_detect_power_mode(struct anx7411_data *ctx)
+{
+ int ret;
+ int mode;
+
+ ret = anx7411_reg_read(ctx->spi_client, REQUEST_CURRENT);
+ if (ret < 0)
+ return ret;
+
+ ctx->typec.request_current = ret * CURRENT_UNIT; /* 50ma per unit */
+
+ ret = anx7411_reg_read(ctx->spi_client, REQUEST_VOLTAGE);
+ if (ret < 0)
+ return ret;
+
+ ctx->typec.request_voltage = ret * VOLTAGE_UNIT; /* 100mv per unit */
+
+ if (ctx->psy_online == ANX7411_PSY_OFFLINE) {
+ ctx->psy_online = ANX7411_PSY_FIXED_ONLINE;
+ ctx->usb_type = POWER_SUPPLY_USB_TYPE_PD;
+ power_supply_changed(ctx->psy);
+ }
+
+ if (!ctx->typec.cc_orientation_valid)
+ return 0;
+
+ if (ctx->typec.cc_connect == CC1_CONNECTED)
+ mode = CC1_RP(ctx->typec.cc_status);
+ else
+ mode = CC2_RP(ctx->typec.cc_status);
+ if (mode) {
+ typec_set_pwr_opmode(ctx->typec.port, mode - 1);
+ return 0;
+ }
+
+ typec_set_pwr_opmode(ctx->typec.port, TYPEC_PWR_MODE_PD);
+
+ return 0;
+}
+
+static int anx7411_register_partner(struct anx7411_data *ctx,
+ int pd, int accessory)
+{
+ struct typec_partner_desc desc;
+ struct typec_partner *partner;
+
+ if (ctx->typec.partner)
+ return 0;
+
+ desc.usb_pd = pd;
+ desc.accessory = accessory;
+ desc.identity = NULL;
+ partner = typec_register_partner(ctx->typec.port, &desc);
+ if (IS_ERR(partner))
+ return PTR_ERR(partner);
+
+ ctx->typec.partner = partner;
+
+ return 0;
+}
+
+static int anx7411_detect_cc_orientation(struct anx7411_data *ctx)
+{
+ struct device *dev = &ctx->spi_client->dev;
+ int ret;
+ int cc1_rd, cc2_rd;
+ int cc1_ra, cc2_ra;
+ int cc1_rp, cc2_rp;
+
+ ret = anx7411_reg_read(ctx->spi_client, CC_STATUS);
+ if (ret < 0)
+ return ret;
+
+ ctx->typec.cc_status = ret;
+
+ cc1_rd = ret & CC1_RD ? 1 : 0;
+ cc2_rd = ret & CC2_RD ? 1 : 0;
+ cc1_ra = ret & CC1_RA ? 1 : 0;
+ cc2_ra = ret & CC2_RA ? 1 : 0;
+ cc1_rp = CC1_RP(ret);
+ cc2_rp = CC2_RP(ret);
+
+ /* Debug cable, nothing to do */
+ if (cc1_rd && cc2_rd) {
+ ctx->typec.cc_orientation_valid = 0;
+ return anx7411_register_partner(ctx, 0, TYPEC_ACCESSORY_DEBUG);
+ }
+
+ if (cc1_ra && cc2_ra) {
+ ctx->typec.cc_orientation_valid = 0;
+ return anx7411_register_partner(ctx, 0, TYPEC_ACCESSORY_AUDIO);
+ }
+
+ ctx->typec.cc_orientation_valid = 1;
+
+ ret = anx7411_register_partner(ctx, 1, TYPEC_ACCESSORY_NONE);
+ if (ret) {
+ dev_err(dev, "register partner\n");
+ return ret;
+ }
+
+ if (cc1_rd || cc1_rp) {
+ typec_set_orientation(ctx->typec.port, TYPEC_ORIENTATION_NORMAL);
+ ctx->typec.cc_connect = CC1_CONNECTED;
+ }
+
+ if (cc2_rd || cc2_rp) {
+ typec_set_orientation(ctx->typec.port, TYPEC_ORIENTATION_REVERSE);
+ ctx->typec.cc_connect = CC2_CONNECTED;
+ }
+
+ return 0;
+}
+
+static int anx7411_set_mux(struct anx7411_data *ctx, int pin_assignment)
+{
+ int mode = TYPEC_STATE_SAFE;
+
+ switch (pin_assignment) {
+ case SELECT_PIN_ASSIGMENT_U:
+ /* default 4 line USB 3.1 */
+ mode = TYPEC_STATE_MODAL;
+ break;
+ case SELECT_PIN_ASSIGMENT_C:
+ case SELECT_PIN_ASSIGMENT_E:
+ /* 4 line DP */
+ mode = TYPEC_STATE_SAFE;
+ break;
+ case SELECT_PIN_ASSIGMENT_D:
+ /* 2 line DP, 2 line USB */
+ mode = TYPEC_MODE_USB3;
+ break;
+ default:
+ mode = TYPEC_STATE_SAFE;
+ break;
+ }
+
+ ctx->typec.pin_assignment = pin_assignment;
+
+ return typec_set_mode(ctx->typec.port, mode);
+}
+
+static int anx7411_set_usb_role(struct anx7411_data *ctx, enum usb_role role)
+{
+ if (!ctx->typec.role_sw)
+ return 0;
+
+ return usb_role_switch_set_role(ctx->typec.role_sw, role);
+}
+
+static int anx7411_data_role_detect(struct anx7411_data *ctx)
+{
+ int ret;
+
+ ret = anx7411_reg_read(ctx->spi_client, SYSTEM_STSTUS);
+ if (ret < 0)
+ return ret;
+
+ ctx->typec.data_role = (ret & DATA_ROLE) ? TYPEC_HOST : TYPEC_DEVICE;
+ ctx->typec.vconn_role = (ret & VCONN_STATUS) ? TYPEC_SOURCE : TYPEC_SINK;
+
+ typec_set_data_role(ctx->typec.port, ctx->typec.data_role);
+
+ typec_set_vconn_role(ctx->typec.port, ctx->typec.vconn_role);
+
+ if (ctx->typec.data_role == TYPEC_HOST)
+ return anx7411_set_usb_role(ctx, USB_ROLE_HOST);
+
+ return anx7411_set_usb_role(ctx, USB_ROLE_DEVICE);
+}
+
+static int anx7411_power_role_detect(struct anx7411_data *ctx)
+{
+ int ret;
+
+ ret = anx7411_reg_read(ctx->spi_client, SYSTEM_STSTUS);
+ if (ret < 0)
+ return ret;
+
+ ctx->typec.power_role = (ret & SINK_STATUS) ? TYPEC_SINK : TYPEC_SOURCE;
+
+ if (ctx->typec.power_role == TYPEC_SOURCE) {
+ ctx->typec.request_current = DEF_1_5A;
+ ctx->typec.request_voltage = DEF_5V;
+ }
+
+ typec_set_pwr_role(ctx->typec.port, ctx->typec.power_role);
+
+ return 0;
+}
+
+static int anx7411_cc_status_detect(struct anx7411_data *ctx)
+{
+ anx7411_detect_cc_orientation(ctx);
+ anx7411_detect_power_mode(ctx);
+
+ return 0;
+}
+
+static void anx7411_partner_unregister_altmode(struct anx7411_data *ctx)
+{
+ int i;
+
+ ctx->typec.dp_altmode_enter = 0;
+ ctx->typec.cust_altmode_enter = 0;
+
+ for (i = 0; i < MAX_ALTMODE; i++)
+ if (ctx->typec.amode[i]) {
+ typec_unregister_altmode(ctx->typec.amode[i]);
+ ctx->typec.amode[i] = NULL;
+ }
+
+ ctx->typec.pin_assignment = 0;
+}
+
+static int anx7411_typec_register_altmode(struct anx7411_data *ctx,
+ int svid, int vdo)
+{
+ struct device *dev = &ctx->spi_client->dev;
+ struct typec_altmode_desc desc;
+ int err;
+ int i;
+
+ desc.svid = svid;
+ desc.vdo = vdo;
+
+ for (i = 0; i < MAX_ALTMODE; i++)
+ if (!ctx->typec.amode[i])
+ break;
+
+ desc.mode = i + 1; /* start with 1 */
+
+ if (i >= MAX_ALTMODE) {
+ dev_err(dev, "no altmode space for registering\n");
+ return -ENOMEM;
+ }
+
+ ctx->typec.amode[i] = typec_partner_register_altmode(ctx->typec.partner,
+ &desc);
+ if (IS_ERR(ctx->typec.amode[i])) {
+ dev_err(dev, "failed to register altmode\n");
+ err = PTR_ERR(ctx->typec.amode[i]);
+ ctx->typec.amode[i] = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+static void anx7411_unregister_partner(struct anx7411_data *ctx)
+{
+ if (ctx->typec.partner) {
+ typec_unregister_partner(ctx->typec.partner);
+ ctx->typec.partner = NULL;
+ }
+}
+
+static int anx7411_update_altmode(struct anx7411_data *ctx, int svid)
+{
+ int i;
+
+ if (svid == DP_SVID)
+ ctx->typec.dp_altmode_enter = 1;
+ else
+ ctx->typec.cust_altmode_enter = 1;
+
+ for (i = 0; i < MAX_ALTMODE; i++) {
+ if (!ctx->typec.amode[i])
+ continue;
+
+ if (ctx->typec.amode[i]->svid == svid) {
+ typec_altmode_update_active(ctx->typec.amode[i], true);
+ typec_altmode_notify(ctx->typec.amode[i],
+ ctx->typec.pin_assignment,
+ &ctx->typec.data);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int anx7411_register_altmode(struct anx7411_data *ctx,
+ bool dp_altmode, u8 *buf)
+{
+ int ret;
+ int svid;
+ int mid;
+
+ if (!ctx->typec.partner)
+ return 0;
+
+ svid = DP_SVID;
+ if (dp_altmode) {
+ mid = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+
+ return anx7411_typec_register_altmode(ctx, svid, mid);
+ }
+
+ svid = (buf[3] << 8) | buf[2];
+ if ((buf[0] & VDM_CMD_AND_ACK_MASK) != (VDM_ACK | VDM_CMD_ENTER_MODE))
+ return anx7411_update_altmode(ctx, svid);
+
+ if ((buf[0] & VDM_CMD_AND_ACK_MASK) != (VDM_ACK | VDM_CMD_DIS_MOD))
+ return 0;
+
+ mid = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24);
+
+ ret = anx7411_typec_register_altmode(ctx, svid, mid);
+ if (ctx->typec.cust_altmode_enter)
+ ret |= anx7411_update_altmode(ctx, svid);
+
+ return ret;
+}
+
+static int anx7411_parse_cmd(struct anx7411_data *ctx, u8 type, u8 *buf, u8 len)
+{
+ struct device *dev = &ctx->spi_client->dev;
+ u8 cur_50ma, vol_100mv;
+
+ switch (type) {
+ case TYPE_SRC_CAP:
+ cur_50ma = anx7411_reg_read(ctx->spi_client, REQUEST_CURRENT);
+ vol_100mv = anx7411_reg_read(ctx->spi_client, REQUEST_VOLTAGE);
+
+ ctx->typec.request_voltage = vol_100mv * VOLTAGE_UNIT;
+ ctx->typec.request_current = cur_50ma * CURRENT_UNIT;
+
+ ctx->psy_online = ANX7411_PSY_FIXED_ONLINE;
+ ctx->usb_type = POWER_SUPPLY_USB_TYPE_PD;
+ power_supply_changed(ctx->psy);
+ break;
+ case TYPE_SNK_CAP:
+ break;
+ case TYPE_SVID:
+ break;
+ case TYPE_SNK_IDENTITY:
+ break;
+ case TYPE_GET_DP_ALT_ENTER:
+ /* DP alt mode enter success */
+ if (buf[0])
+ anx7411_update_altmode(ctx, DP_SVID);
+ break;
+ case TYPE_DP_ALT_ENTER:
+ /* Update DP altmode */
+ anx7411_update_altmode(ctx, DP_SVID);
+ break;
+ case TYPE_OBJ_REQ:
+ anx7411_detect_power_mode(ctx);
+ break;
+ case TYPE_DP_CONFIGURE:
+ anx7411_set_mux(ctx, buf[1]);
+ break;
+ case TYPE_DP_DISCOVER_MODES_INFO:
+ /* Make sure discover modes valid */
+ if (buf[0] | buf[1])
+ /* Register DP Altmode */
+ anx7411_register_altmode(ctx, 1, buf);
+ break;
+ case TYPE_VDM:
+ /* Register other altmode */
+ anx7411_register_altmode(ctx, 0, buf);
+ break;
+ default:
+ dev_err(dev, "ignore message(0x%.02x).\n", type);
+ break;
+ }
+
+ return 0;
+}
+
+static u8 checksum(struct device *dev, u8 *buf, u8 len)
+{
+ u8 ret = 0;
+ u8 i;
+
+ for (i = 0; i < len; i++)
+ ret += buf[i];
+
+ return ret;
+}
+
+static int anx7411_read_msg_ctrl_status(struct i2c_client *client)
+{
+ return anx7411_reg_read(client, CMD_SEND_BUF);
+}
+
+static int anx7411_wait_msg_empty(struct i2c_client *client)
+{
+ int val;
+
+ return readx_poll_timeout(anx7411_read_msg_ctrl_status,
+ client, val, (val < 0) || (val == 0),
+ 2000, 2000 * 150);
+}
+
+static int anx7411_send_msg(struct anx7411_data *ctx, u8 type, u8 *buf, u8 size)
+{
+ struct device *dev = &ctx->spi_client->dev;
+ struct fw_msg *msg = &ctx->send_msg;
+ u8 crc;
+ int ret;
+
+ size = min_t(u8, size, (u8)MAX_BUF_LEN);
+ memcpy(msg->buf, buf, size);
+ msg->msg_type = type;
+ /* msg len equals buffer length + msg_type */
+ msg->msg_len = size + 1;
+
+ /* Do CRC check for all buffer data and msg_len and msg_type */
+ crc = checksum(dev, (u8 *)msg, size + HEADER_LEN);
+ msg->buf[size] = 0 - crc;
+
+ ret = anx7411_wait_msg_empty(ctx->spi_client);
+ if (ret)
+ return ret;
+
+ ret = anx7411_reg_block_write(ctx->spi_client,
+ CMD_SEND_BUF + 1, size + HEADER_LEN,
+ &msg->msg_type);
+ ret |= anx7411_reg_write(ctx->spi_client, CMD_SEND_BUF,
+ msg->msg_len);
+ return ret;
+}
+
+static int anx7411_process_cmd(struct anx7411_data *ctx)
+{
+ struct device *dev = &ctx->spi_client->dev;
+ struct fw_msg *msg = &ctx->recv_msg;
+ u8 len;
+ u8 crc;
+ int ret;
+
+ /* Read message from firmware */
+ ret = anx7411_reg_block_read(ctx->spi_client, CMD_RECV_BUF,
+ MSG_LEN, (u8 *)msg);
+ if (ret < 0)
+ return 0;
+
+ if (!msg->msg_len)
+ return 0;
+
+ ret = anx7411_reg_write(ctx->spi_client, CMD_RECV_BUF, 0);
+ if (ret)
+ return ret;
+
+ len = msg->msg_len & MSG_LEN_MASK;
+ crc = checksum(dev, (u8 *)msg, len + HEADER_LEN);
+ if (crc) {
+ dev_err(dev, "message error crc(0x%.02x)\n", crc);
+ return -ERANGE;
+ }
+
+ return anx7411_parse_cmd(ctx, msg->msg_type, msg->buf, len - 1);
+}
+
+static void anx7411_translate_payload(struct device *dev, __le32 *payload,
+ u32 *pdo, int nr, const char *type)
+{
+ int i;
+
+ if (nr > PDO_MAX_OBJECTS) {
+ dev_err(dev, "nr(%d) exceed PDO_MAX_OBJECTS(%d)\n",
+ nr, PDO_MAX_OBJECTS);
+
+ return;
+ }
+
+ for (i = 0; i < nr; i++)
+ payload[i] = cpu_to_le32(pdo[i]);
+}
+
+static int anx7411_config(struct anx7411_data *ctx)
+{
+ struct device *dev = &ctx->spi_client->dev;
+ struct typec_params *typecp = &ctx->typec;
+ __le32 payload[PDO_MAX_OBJECTS];
+ int ret;
+
+ /* Config PD FW work under PD 2.0 */
+ ret = anx7411_reg_write(ctx->spi_client, PD_REV_INIT, PD_REV20);
+ ret |= anx7411_reg_write(ctx->tcpc_client, FW_CTRL_0,
+ UNSTRUCT_VDM_EN | DELAY_200MS |
+ VSAFE1 | FRS_EN);
+ ret |= anx7411_reg_write(ctx->spi_client, FW_CTRL_1,
+ AUTO_PD_EN | FORCE_SEND_RDO);
+
+ /* Set VBUS current threshold */
+ ret |= anx7411_reg_write(ctx->tcpc_client, VBUS_THRESHOLD_H, 0xff);
+ ret |= anx7411_reg_write(ctx->tcpc_client, VBUS_THRESHOLD_L, 0x03);
+
+ /* Fix dongle compatible issue */
+ ret |= anx7411_reg_write(ctx->tcpc_client, FW_PARAM,
+ anx7411_reg_read(ctx->tcpc_client, FW_PARAM) |
+ DONGLE_IOP);
+ ret |= anx7411_reg_write(ctx->spi_client, INT_MASK, 0);
+
+ ret |= anx7411_reg_write(ctx->spi_client, PD_EXT_MSG_CTRL, 0xFF);
+ if (ret)
+ return ret;
+
+ if (typecp->caps_flags & HAS_SOURCE_CAP) {
+ anx7411_translate_payload(dev, payload, typecp->src_pdo,
+ typecp->src_pdo_nr, "source");
+ anx7411_send_msg(ctx, TYPE_SRC_CAP, (u8 *)&payload,
+ typecp->src_pdo_nr * 4);
+ anx7411_send_msg(ctx, TYPE_SNK_IDENTITY, snk_identity,
+ sizeof(snk_identity));
+ anx7411_send_msg(ctx, TYPE_SET_SNK_DP_CAP, dp_caps,
+ sizeof(dp_caps));
+ }
+
+ if (typecp->caps_flags & HAS_SINK_CAP) {
+ anx7411_translate_payload(dev, payload, typecp->sink_pdo,
+ typecp->sink_pdo_nr, "sink");
+ anx7411_send_msg(ctx, TYPE_SNK_CAP, (u8 *)&payload,
+ typecp->sink_pdo_nr * 4);
+ }
+
+ if (typecp->caps_flags & HAS_SINK_WATT) {
+ if (typecp->sink_watt) {
+ ret |= anx7411_reg_write(ctx->spi_client, MAX_POWER,
+ typecp->sink_watt);
+ /* Set min power to 1W */
+ ret |= anx7411_reg_write(ctx->spi_client, MIN_POWER, 2);
+ }
+
+ if (typecp->sink_voltage)
+ ret |= anx7411_reg_write(ctx->spi_client, MAX_VOLTAGE,
+ typecp->sink_voltage);
+ if (ret)
+ return ret;
+ }
+
+ if (!typecp->caps_flags)
+ usleep_range(5000, 6000);
+
+ ctx->fw_version = anx7411_reg_read(ctx->spi_client, FW_VER);
+ ctx->fw_subversion = anx7411_reg_read(ctx->spi_client, FW_SUBVER);
+
+ return 0;
+}
+
+static void anx7411_chip_standby(struct anx7411_data *ctx)
+{
+ int ret;
+ u8 cc1, cc2;
+ struct device *dev = &ctx->spi_client->dev;
+
+ ret = anx7411_reg_write(ctx->spi_client, OCM_CTRL_0,
+ anx7411_reg_read(ctx->spi_client, OCM_CTRL_0) |
+ OCM_RESET);
+ ret |= anx7411_reg_write(ctx->tcpc_client, ANALOG_CTRL_10, 0x80);
+ /* Set TCPC to RD and DRP enable */
+ cc1 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
+ cc2 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
+ ret |= anx7411_reg_write(ctx->tcpc_client, TCPC_ROLE_CTRL,
+ TCPC_ROLE_CTRL_DRP | cc1 | cc2);
+
+ /* Send DRP toggle command */
+ ret |= anx7411_reg_write(ctx->tcpc_client, TCPC_COMMAND,
+ TCPC_CMD_LOOK4CONNECTION);
+
+ /* Send TCPC enter standby command */
+ ret |= anx7411_reg_write(ctx->tcpc_client,
+ TCPC_COMMAND, TCPC_CMD_I2C_IDLE);
+ if (ret)
+ dev_err(dev, "Chip standby failed\n");
+}
+
+static void anx7411_work_func(struct work_struct *work)
+{
+ int ret;
+ u8 buf[STATUS_LEN];
+ u8 int_change; /* Interrupt change */
+ u8 int_status; /* Firmware status update */
+ u8 alert0, alert1; /* Interrupt alert source */
+ struct anx7411_data *ctx = container_of(work, struct anx7411_data, work);
+ struct device *dev = &ctx->spi_client->dev;
+
+ mutex_lock(&ctx->lock);
+
+ /* Read interrupt change status */
+ ret = anx7411_reg_block_read(ctx->spi_client, INT_STS, STATUS_LEN, buf);
+ if (ret < 0) {
+ /* Power standby mode, just return */
+ goto unlock;
+ }
+ int_change = buf[0];
+ int_status = buf[1];
+
+ /* Read alert register */
+ ret = anx7411_reg_block_read(ctx->tcpc_client, ALERT_0, STATUS_LEN, buf);
+ if (ret < 0)
+ goto unlock;
+
+ alert0 = buf[0];
+ alert1 = buf[1];
+
+ /* Clear interrupt and alert status */
+ ret = anx7411_reg_write(ctx->spi_client, INT_STS, 0);
+ ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_0, alert0);
+ ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_1, alert1);
+ if (ret)
+ goto unlock;
+
+ if (alert1 & INTP_POW_OFF) {
+ anx7411_partner_unregister_altmode(ctx);
+ if (anx7411_set_usb_role(ctx, USB_ROLE_NONE))
+ dev_err(dev, "Set usb role\n");
+ anx7411_unregister_partner(ctx);
+ ctx->psy_online = ANX7411_PSY_OFFLINE;
+ ctx->usb_type = POWER_SUPPLY_USB_TYPE_C;
+ ctx->typec.request_voltage = 0;
+ ctx->typec.request_current = 0;
+ power_supply_changed(ctx->psy);
+ anx7411_chip_standby(ctx);
+ goto unlock;
+ }
+
+ if ((alert0 & SOFTWARE_INT) && (int_change & OCM_BOOT_UP)) {
+ if (anx7411_config(ctx))
+ dev_err(dev, "Config failed\n");
+ if (anx7411_data_role_detect(ctx))
+ dev_err(dev, "set PD data role\n");
+ if (anx7411_power_role_detect(ctx))
+ dev_err(dev, "set PD power role\n");
+ anx7411_set_mux(ctx, SELECT_PIN_ASSIGMENT_C);
+ }
+
+ if (alert0 & RECEIVED_MSG)
+ anx7411_process_cmd(ctx);
+
+ ret = (int_status & DATA_ROLE) ? TYPEC_HOST : TYPEC_DEVICE;
+ if (ctx->typec.data_role != ret)
+ if (anx7411_data_role_detect(ctx))
+ dev_err(dev, "set PD data role\n");
+
+ ret = (int_status & SINK_STATUS) ? TYPEC_SINK : TYPEC_SOURCE;
+ if (ctx->typec.power_role != ret)
+ if (anx7411_power_role_detect(ctx))
+ dev_err(dev, "set PD power role\n");
+
+ if ((alert0 & SOFTWARE_INT) && (int_change & CC_STATUS_CHANGE))
+ anx7411_cc_status_detect(ctx);
+
+unlock:
+ mutex_unlock(&ctx->lock);
+}
+
+static irqreturn_t anx7411_intr_isr(int irq, void *data)
+{
+ struct anx7411_data *ctx = (struct anx7411_data *)data;
+
+ queue_work(ctx->workqueue, &ctx->work);
+
+ return IRQ_HANDLED;
+}
+
+static int anx7411_register_i2c_dummy_clients(struct anx7411_data *ctx,
+ struct i2c_client *client)
+{
+ int i;
+ u8 spi_addr;
+
+ for (i = 0; i < ARRAY_SIZE(anx7411_i2c_addr); i++) {
+ if (client->addr == (anx7411_i2c_addr[i].tcpc_address >> 1)) {
+ spi_addr = anx7411_i2c_addr[i].spi_address >> 1;
+ ctx->spi_client = i2c_new_dummy_device(client->adapter,
+ spi_addr);
+ if (!IS_ERR(ctx->spi_client))
+ return 0;
+ }
+ }
+
+ dev_err(&client->dev, "unable to get SPI slave\n");
+ return -ENOMEM;
+}
+
+static void anx7411_port_unregister_altmodes(struct typec_altmode **adev)
+{
+ int i;
+
+ for (i = 0; i < MAX_ALTMODE; i++)
+ if (adev[i]) {
+ typec_unregister_altmode(adev[i]);
+ adev[i] = NULL;
+ }
+}
+
+static int anx7411_usb_mux_set(struct typec_mux_dev *mux,
+ struct typec_mux_state *state)
+{
+ struct anx7411_data *ctx = typec_mux_get_drvdata(mux);
+ struct device *dev = &ctx->spi_client->dev;
+ int has_dp;
+
+ has_dp = (state->alt && state->alt->svid == USB_TYPEC_DP_SID &&
+ state->alt->mode == USB_TYPEC_DP_MODE);
+ if (!has_dp)
+ dev_err(dev, "dp altmode not register\n");
+
+ return 0;
+}
+
+static int anx7411_usb_set_orientation(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ /* No need set */
+
+ return 0;
+}
+
+static int anx7411_register_switch(struct anx7411_data *ctx,
+ struct device *dev,
+ struct fwnode_handle *fwnode)
+{
+ struct typec_switch_desc sw_desc = { };
+
+ sw_desc.fwnode = fwnode;
+ sw_desc.drvdata = ctx;
+ sw_desc.name = fwnode_get_name(fwnode);
+ sw_desc.set = anx7411_usb_set_orientation;
+
+ ctx->typec.typec_switch = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(ctx->typec.typec_switch)) {
+ dev_err(dev, "switch register failed\n");
+ return PTR_ERR(ctx->typec.typec_switch);
+ }
+
+ return 0;
+}
+
+static int anx7411_register_mux(struct anx7411_data *ctx,
+ struct device *dev,
+ struct fwnode_handle *fwnode)
+{
+ struct typec_mux_desc mux_desc = { };
+
+ mux_desc.fwnode = fwnode;
+ mux_desc.drvdata = ctx;
+ mux_desc.name = fwnode_get_name(fwnode);
+ mux_desc.set = anx7411_usb_mux_set;
+
+ ctx->typec.typec_mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(ctx->typec.typec_mux)) {
+ dev_err(dev, "mux register failed\n");
+ return PTR_ERR(ctx->typec.typec_mux);
+ }
+
+ return 0;
+}
+
+static void anx7411_unregister_mux(struct anx7411_data *ctx)
+{
+ if (ctx->typec.typec_mux) {
+ typec_mux_unregister(ctx->typec.typec_mux);
+ ctx->typec.typec_mux = NULL;
+ }
+}
+
+static void anx7411_unregister_switch(struct anx7411_data *ctx)
+{
+ if (ctx->typec.typec_switch) {
+ typec_switch_unregister(ctx->typec.typec_switch);
+ ctx->typec.typec_switch = NULL;
+ }
+}
+
+static int anx7411_typec_switch_probe(struct anx7411_data *ctx,
+ struct device *dev)
+{
+ int ret;
+ struct device_node *node;
+
+ node = of_find_node_by_name(dev->of_node, "orientation_switch");
+ if (!node)
+ return 0;
+
+ ret = anx7411_register_switch(ctx, dev, &node->fwnode);
+ if (ret) {
+ dev_err(dev, "failed register switch");
+ return ret;
+ }
+
+ node = of_find_node_by_name(dev->of_node, "mode_switch");
+ if (!node) {
+ dev_err(dev, "no typec mux exist");
+ ret = -ENODEV;
+ goto unregister_switch;
+ }
+
+ ret = anx7411_register_mux(ctx, dev, &node->fwnode);
+ if (ret) {
+ dev_err(dev, "failed register mode switch");
+ ret = -ENODEV;
+ goto unregister_switch;
+ }
+
+ return 0;
+
+unregister_switch:
+ anx7411_unregister_switch(ctx);
+
+ return ret;
+}
+
+static int anx7411_typec_port_probe(struct anx7411_data *ctx,
+ struct device *dev)
+{
+ struct typec_capability *cap = &ctx->typec.caps;
+ struct typec_params *typecp = &ctx->typec;
+ struct fwnode_handle *fwnode;
+ const char *buf;
+ int ret, i;
+
+ fwnode = device_get_named_child_node(dev, "connector");
+ if (!fwnode)
+ return -EINVAL;
+
+ ret = fwnode_property_read_string(fwnode, "power-role", &buf);
+ if (ret) {
+ dev_err(dev, "power-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_power_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->type = ret;
+
+ ret = fwnode_property_read_string(fwnode, "data-role", &buf);
+ if (ret) {
+ dev_err(dev, "data-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_data_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->data = ret;
+
+ ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
+ if (ret) {
+ dev_err(dev, "try-power-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_power_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->prefer_role = ret;
+
+ /* Get source pdos */
+ ret = fwnode_property_count_u32(fwnode, "source-pdos");
+ if (ret > 0) {
+ typecp->src_pdo_nr = min_t(u8, ret, PDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
+ typecp->src_pdo,
+ typecp->src_pdo_nr);
+ if (ret < 0) {
+ dev_err(dev, "source cap validate failed: %d\n", ret);
+ return -EINVAL;
+ }
+
+ typecp->caps_flags |= HAS_SOURCE_CAP;
+ }
+
+ ret = fwnode_property_count_u32(fwnode, "sink-pdos");
+ if (ret > 0) {
+ typecp->sink_pdo_nr = min_t(u8, ret, PDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
+ typecp->sink_pdo,
+ typecp->sink_pdo_nr);
+ if (ret < 0) {
+ dev_err(dev, "sink cap validate failed: %d\n", ret);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < typecp->sink_pdo_nr; i++) {
+ ret = 0;
+ switch (pdo_type(typecp->sink_pdo[i])) {
+ case PDO_TYPE_FIXED:
+ ret = pdo_fixed_voltage(typecp->sink_pdo[i]);
+ break;
+ case PDO_TYPE_BATT:
+ case PDO_TYPE_VAR:
+ ret = pdo_max_voltage(typecp->sink_pdo[i]);
+ break;
+ case PDO_TYPE_APDO:
+ default:
+ ret = 0;
+ break;
+ }
+
+ /* 100mv per unit */
+ typecp->sink_voltage = max(5000, ret) / 100;
+ }
+
+ typecp->caps_flags |= HAS_SINK_CAP;
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "op-sink-microwatt", &ret)) {
+ typecp->sink_watt = ret / 500000; /* 500mw per unit */
+ typecp->caps_flags |= HAS_SINK_WATT;
+ }
+
+ cap->fwnode = fwnode;
+
+ ctx->typec.role_sw = usb_role_switch_get(dev);
+ if (IS_ERR(ctx->typec.role_sw)) {
+ dev_err(dev, "USB role switch not found.\n");
+ ctx->typec.role_sw = NULL;
+ }
+
+ ctx->typec.port = typec_register_port(dev, cap);
+ if (IS_ERR(ctx->typec.port)) {
+ ret = PTR_ERR(ctx->typec.port);
+ ctx->typec.port = NULL;
+ dev_err(dev, "Failed to register type c port %d\n", ret);
+ return ret;
+ }
+
+ typec_port_register_altmodes(ctx->typec.port, NULL, ctx,
+ ctx->typec.port_amode,
+ MAX_ALTMODE);
+ return 0;
+}
+
+static int anx7411_typec_check_connection(struct anx7411_data *ctx)
+{
+ int ret;
+
+ ret = anx7411_reg_read(ctx->spi_client, FW_VER);
+ if (ret < 0)
+ return 0; /* No device attached in typec port */
+
+ /* Clear interrupt and alert status */
+ ret = anx7411_reg_write(ctx->spi_client, INT_STS, 0);
+ ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_0, 0xFF);
+ ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_1, 0xFF);
+ if (ret)
+ return ret;
+
+ ret = anx7411_cc_status_detect(ctx);
+ ret |= anx7411_power_role_detect(ctx);
+ ret |= anx7411_data_role_detect(ctx);
+ ret |= anx7411_set_mux(ctx, SELECT_PIN_ASSIGMENT_C);
+ if (ret)
+ return ret;
+
+ ret = anx7411_send_msg(ctx, TYPE_GET_DP_ALT_ENTER, NULL, 0);
+ ret |= anx7411_send_msg(ctx, TYPE_GET_DP_DISCOVER_MODES_INFO, NULL, 0);
+
+ return ret;
+}
+
+static int __maybe_unused anx7411_runtime_pm_suspend(struct device *dev)
+{
+ struct anx7411_data *ctx = dev_get_drvdata(dev);
+
+ mutex_lock(&ctx->lock);
+
+ anx7411_partner_unregister_altmode(ctx);
+
+ if (ctx->typec.partner)
+ anx7411_unregister_partner(ctx);
+
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+}
+
+static int __maybe_unused anx7411_runtime_pm_resume(struct device *dev)
+{
+ struct anx7411_data *ctx = dev_get_drvdata(dev);
+
+ mutex_lock(&ctx->lock);
+ /* Detect PD connection */
+ if (anx7411_typec_check_connection(ctx))
+ dev_err(dev, "check connection");
+
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops anx7411_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(anx7411_runtime_pm_suspend,
+ anx7411_runtime_pm_resume, NULL)
+};
+
+static void anx7411_get_gpio_irq(struct anx7411_data *ctx)
+{
+ struct device *dev = &ctx->tcpc_client->dev;
+
+ ctx->intp_gpiod = devm_gpiod_get_optional(dev, "interrupt", GPIOD_IN);
+ if (IS_ERR_OR_NULL(ctx->intp_gpiod)) {
+ dev_err(dev, "no interrupt gpio property\n");
+ return;
+ }
+
+ ctx->intp_irq = gpiod_to_irq(ctx->intp_gpiod);
+ if (ctx->intp_irq < 0)
+ dev_err(dev, "failed to get GPIO IRQ\n");
+}
+
+static enum power_supply_usb_type anx7411_psy_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_C,
+ POWER_SUPPLY_USB_TYPE_PD,
+ POWER_SUPPLY_USB_TYPE_PD_PPS,
+};
+
+static enum power_supply_property anx7411_psy_props[] = {
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static int anx7411_psy_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct anx7411_data *ctx = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ if (psp == POWER_SUPPLY_PROP_ONLINE)
+ ctx->psy_online = val->intval;
+ else
+ ret = -EINVAL;
+
+ power_supply_changed(ctx->psy);
+ return ret;
+}
+
+static int anx7411_psy_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return psp == POWER_SUPPLY_PROP_ONLINE;
+}
+
+static int anx7411_psy_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct anx7411_data *ctx = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = ctx->usb_type;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = ctx->psy_online;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = (ctx->psy_online) ?
+ ctx->typec.request_voltage * 1000 : 0;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = (ctx->psy_online) ?
+ ctx->typec.request_current * 1000 : 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int anx7411_psy_register(struct anx7411_data *ctx)
+{
+ struct power_supply_desc *psy_desc = &ctx->psy_desc;
+ struct power_supply_config psy_cfg = {};
+ char *psy_name;
+
+ psy_name = devm_kasprintf(ctx->dev, GFP_KERNEL, "anx7411-source-psy-%s",
+ dev_name(ctx->dev));
+ if (!psy_name)
+ return -ENOMEM;
+
+ psy_desc->name = psy_name;
+ psy_desc->type = POWER_SUPPLY_TYPE_USB;
+ psy_desc->usb_types = anx7411_psy_usb_types;
+ psy_desc->num_usb_types = ARRAY_SIZE(anx7411_psy_usb_types);
+ psy_desc->properties = anx7411_psy_props;
+ psy_desc->num_properties = ARRAY_SIZE(anx7411_psy_props);
+
+ psy_desc->get_property = anx7411_psy_get_prop;
+ psy_desc->set_property = anx7411_psy_set_prop;
+ psy_desc->property_is_writeable = anx7411_psy_prop_writeable;
+
+ ctx->usb_type = POWER_SUPPLY_USB_TYPE_C;
+ ctx->psy = devm_power_supply_register(ctx->dev, psy_desc, &psy_cfg);
+
+ if (IS_ERR(ctx->psy))
+ dev_warn(ctx->dev, "unable to register psy\n");
+
+ return PTR_ERR_OR_ZERO(ctx->psy);
+}
+
+static int anx7411_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct anx7411_data *plat;
+ struct device *dev = &client->dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENODEV;
+
+ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->tcpc_client = client;
+ i2c_set_clientdata(client, plat);
+
+ mutex_init(&plat->lock);
+
+ ret = anx7411_register_i2c_dummy_clients(plat, client);
+ if (ret) {
+ dev_err(dev, "fail to reserve I2C bus\n");
+ return ret;
+ }
+
+ ret = anx7411_typec_switch_probe(plat, dev);
+ if (ret) {
+ dev_err(dev, "fail to probe typec switch\n");
+ goto free_i2c_dummy;
+ }
+
+ ret = anx7411_typec_port_probe(plat, dev);
+ if (ret) {
+ dev_err(dev, "fail to probe typec property.\n");
+ ret = -ENODEV;
+ goto free_typec_switch;
+ }
+
+ plat->intp_irq = client->irq;
+ if (!client->irq)
+ anx7411_get_gpio_irq(plat);
+
+ if (!plat->intp_irq) {
+ dev_err(dev, "fail to get interrupt IRQ\n");
+ ret = -EINVAL;
+ goto free_typec_port;
+ }
+
+ plat->dev = dev;
+ plat->psy_online = ANX7411_PSY_OFFLINE;
+ ret = anx7411_psy_register(plat);
+ if (ret) {
+ dev_err(dev, "register psy\n");
+ goto free_typec_port;
+ }
+
+ INIT_WORK(&plat->work, anx7411_work_func);
+ plat->workqueue = alloc_workqueue("anx7411_work",
+ WQ_FREEZABLE |
+ WQ_MEM_RECLAIM,
+ 1);
+ if (!plat->workqueue) {
+ dev_err(dev, "fail to create work queue\n");
+ ret = -ENOMEM;
+ goto free_typec_port;
+ }
+
+ ret = devm_request_threaded_irq(dev, plat->intp_irq,
+ NULL, anx7411_intr_isr,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "anx7411-intp", plat);
+ if (ret) {
+ dev_err(dev, "fail to request irq\n");
+ goto free_wq;
+ }
+
+ if (anx7411_typec_check_connection(plat))
+ dev_err(dev, "check status\n");
+
+ pm_runtime_enable(dev);
+
+ return 0;
+
+free_wq:
+ destroy_workqueue(plat->workqueue);
+
+free_typec_port:
+ typec_unregister_port(plat->typec.port);
+ anx7411_port_unregister_altmodes(plat->typec.port_amode);
+
+free_typec_switch:
+ anx7411_unregister_switch(plat);
+ anx7411_unregister_mux(plat);
+
+free_i2c_dummy:
+ i2c_unregister_device(plat->spi_client);
+
+ return ret;
+}
+
+static int anx7411_i2c_remove(struct i2c_client *client)
+{
+ struct anx7411_data *plat = i2c_get_clientdata(client);
+
+ anx7411_partner_unregister_altmode(plat);
+ anx7411_unregister_partner(plat);
+
+ if (plat->workqueue)
+ destroy_workqueue(plat->workqueue);
+
+ if (plat->spi_client)
+ i2c_unregister_device(plat->spi_client);
+
+ if (plat->typec.role_sw)
+ usb_role_switch_put(plat->typec.role_sw);
+
+ anx7411_unregister_mux(plat);
+
+ anx7411_unregister_switch(plat);
+
+ if (plat->typec.port)
+ typec_unregister_port(plat->typec.port);
+
+ anx7411_port_unregister_altmodes(plat->typec.port_amode);
+
+ return 0;
+}
+
+static const struct i2c_device_id anx7411_id[] = {
+ {"anx7411", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, anx7411_id);
+
+static const struct of_device_id anx_match_table[] = {
+ {.compatible = "analogix,anx7411",},
+ {},
+};
+
+static struct i2c_driver anx7411_driver = {
+ .driver = {
+ .name = "anx7411",
+ .of_match_table = anx_match_table,
+ .pm = &anx7411_pm_ops,
+ },
+ .probe = anx7411_i2c_probe,
+ .remove = anx7411_i2c_remove,
+
+ .id_table = anx7411_id,
+};
+
+module_i2c_driver(anx7411_driver);
+
+MODULE_DESCRIPTION("Anx7411 USB Type-C PD driver");
+MODULE_AUTHOR("Xin Ji <xji@analogixsemi.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.5");
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ee0e520707dd..ebc29ec20e3f 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -12,9 +12,11 @@
#include <linux/slab.h>
#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
#include "bus.h"
#include "class.h"
+#include "pd.h"
static DEFINE_IDA(typec_index_ida);
@@ -721,6 +723,39 @@ void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revisio
EXPORT_SYMBOL_GPL(typec_partner_set_pd_revision);
/**
+ * typec_partner_set_usb_power_delivery - Declare USB Power Delivery Contract.
+ * @partner: The partner device.
+ * @pd: The USB PD instance.
+ *
+ * This routine can be used to declare USB Power Delivery Contract with @partner
+ * by linking @partner to @pd which contains the objects that were used during the
+ * negotiation of the contract.
+ *
+ * If @pd is NULL, the link is removed and the contract with @partner has ended.
+ */
+int typec_partner_set_usb_power_delivery(struct typec_partner *partner,
+ struct usb_power_delivery *pd)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(partner) || partner->pd == pd)
+ return 0;
+
+ if (pd) {
+ ret = usb_power_delivery_link_device(pd, &partner->dev);
+ if (ret)
+ return ret;
+ } else {
+ usb_power_delivery_unlink_device(partner->pd, &partner->dev);
+ }
+
+ partner->pd = pd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(typec_partner_set_usb_power_delivery);
+
+/**
* typec_partner_set_num_altmodes - Set the number of available partner altmodes
* @partner: The partner to be updated.
* @num_altmodes: The number of altmodes we want to specify as available.
@@ -1170,6 +1205,104 @@ EXPORT_SYMBOL_GPL(typec_unregister_cable);
/* ------------------------------------------------------------------------- */
/* USB Type-C ports */
+/**
+ * typec_port_set_usb_power_delivery - Assign USB PD for port.
+ * @port: USB Type-C port.
+ * @pd: USB PD instance.
+ *
+ * This routine can be used to set the USB Power Delivery Capabilities for @port
+ * that it will advertise to the partner.
+ *
+ * If @pd is NULL, the assignment is removed.
+ */
+int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_delivery *pd)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(port) || port->pd == pd)
+ return 0;
+
+ if (pd) {
+ ret = usb_power_delivery_link_device(pd, &port->dev);
+ if (ret)
+ return ret;
+ } else {
+ usb_power_delivery_unlink_device(port->pd, &port->dev);
+ }
+
+ port->pd = pd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(typec_port_set_usb_power_delivery);
+
+static ssize_t select_usb_power_delivery_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct typec_port *port = to_typec_port(dev);
+ struct usb_power_delivery *pd;
+
+ if (!port->ops || !port->ops->pd_set)
+ return -EOPNOTSUPP;
+
+ pd = usb_power_delivery_find(buf);
+ if (!pd)
+ return -EINVAL;
+
+ return port->ops->pd_set(port, pd);
+}
+
+static ssize_t select_usb_power_delivery_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct typec_port *port = to_typec_port(dev);
+ struct usb_power_delivery **pds;
+ struct usb_power_delivery *pd;
+ int ret = 0;
+
+ if (!port->ops || !port->ops->pd_get)
+ return -EOPNOTSUPP;
+
+ pds = port->ops->pd_get(port);
+ if (!pds)
+ return 0;
+
+ for (pd = pds[0]; pd; pd++) {
+ if (pd == port->pd)
+ ret += sysfs_emit(buf + ret, "[%s] ", dev_name(&pd->dev));
+ else
+ ret += sysfs_emit(buf + ret, "%s ", dev_name(&pd->dev));
+ }
+
+ buf[ret - 1] = '\n';
+
+ return ret;
+}
+static DEVICE_ATTR_RW(select_usb_power_delivery);
+
+static struct attribute *port_attrs[] = {
+ &dev_attr_select_usb_power_delivery.attr,
+ NULL
+};
+
+static umode_t port_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct typec_port *port = to_typec_port(kobj_to_dev(kobj));
+
+ if (!port->pd || !port->ops || !port->ops->pd_get)
+ return 0;
+ if (!port->ops->pd_set)
+ return 0444;
+
+ return attr->mode;
+}
+
+static const struct attribute_group pd_group = {
+ .is_visible = port_attr_is_visible,
+ .attrs = port_attrs,
+};
+
static const char * const typec_orientations[] = {
[TYPEC_ORIENTATION_NONE] = "unknown",
[TYPEC_ORIENTATION_NORMAL] = "normal",
@@ -1581,6 +1714,7 @@ static const struct attribute_group typec_group = {
static const struct attribute_group *typec_groups[] = {
&typec_group,
+ &pd_group,
NULL
};
@@ -1603,6 +1737,7 @@ static void typec_release(struct device *dev)
ida_destroy(&port->mode_ids);
typec_switch_put(port->sw);
typec_mux_put(port->mux);
+ typec_retimer_put(port->retimer);
kfree(port->cap);
kfree(port);
}
@@ -1718,6 +1853,7 @@ void typec_set_pwr_opmode(struct typec_port *port,
partner->usb_pd = 1;
sysfs_notify(&partner_dev->kobj, NULL,
"supports_usb_power_delivery");
+ kobject_uevent(&partner_dev->kobj, KOBJ_CHANGE);
}
put_device(partner_dev);
}
@@ -2116,6 +2252,13 @@ struct typec_port *typec_register_port(struct device *parent,
return ERR_PTR(ret);
}
+ port->retimer = typec_retimer_get(&port->dev);
+ if (IS_ERR(port->retimer)) {
+ ret = PTR_ERR(port->retimer);
+ put_device(&port->dev);
+ return ERR_PTR(ret);
+ }
+
ret = device_add(&port->dev);
if (ret) {
dev_err(parent, "failed to register port (%d)\n", ret);
@@ -2123,6 +2266,13 @@ struct typec_port *typec_register_port(struct device *parent,
return ERR_PTR(ret);
}
+ ret = typec_port_set_usb_power_delivery(port, cap->pd);
+ if (ret) {
+ dev_err(&port->dev, "failed to link pd\n");
+ device_unregister(&port->dev);
+ return ERR_PTR(ret);
+ }
+
ret = typec_link_ports(port);
if (ret)
dev_warn(&port->dev, "failed to create symlinks (%d)\n", ret);
@@ -2141,6 +2291,7 @@ void typec_unregister_port(struct typec_port *port)
{
if (!IS_ERR_OR_NULL(port)) {
typec_unlink_ports(port);
+ typec_port_set_usb_power_delivery(port, NULL);
device_unregister(&port->dev);
}
}
@@ -2158,12 +2309,26 @@ static int __init typec_init(void)
if (ret)
goto err_unregister_bus;
- ret = class_register(&typec_class);
+ ret = class_register(&retimer_class);
if (ret)
goto err_unregister_mux_class;
+ ret = class_register(&typec_class);
+ if (ret)
+ goto err_unregister_retimer_class;
+
+ ret = usb_power_delivery_init();
+ if (ret)
+ goto err_unregister_class;
+
return 0;
+err_unregister_class:
+ class_unregister(&typec_class);
+
+err_unregister_retimer_class:
+ class_unregister(&retimer_class);
+
err_unregister_mux_class:
class_unregister(&typec_mux_class);
@@ -2176,6 +2341,7 @@ subsys_initcall(typec_init);
static void __exit typec_exit(void)
{
+ usb_power_delivery_exit();
class_unregister(&typec_class);
ida_destroy(&typec_index_ida);
bus_unregister(&typec_bus);
diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
index 0f1bd6d19d67..673b2952b074 100644
--- a/drivers/usb/typec/class.h
+++ b/drivers/usb/typec/class.h
@@ -33,6 +33,8 @@ struct typec_partner {
int num_altmodes;
u16 pd_revision; /* 0300H = "3.0" */
enum usb_pd_svdm_ver svdm_version;
+
+ struct usb_power_delivery *pd;
};
struct typec_port {
@@ -40,6 +42,8 @@ struct typec_port {
struct device dev;
struct ida mode_ids;
+ struct usb_power_delivery *pd;
+
int prefer_role;
enum typec_data_role data_role;
enum typec_role pwr_role;
@@ -51,6 +55,7 @@ struct typec_port {
enum typec_orientation orientation;
struct typec_switch *sw;
struct typec_mux *mux;
+ struct typec_retimer *retimer;
const struct typec_capability *cap;
const struct typec_operations *ops;
@@ -72,6 +77,7 @@ extern const struct device_type typec_port_dev_type;
#define is_typec_port(dev) ((dev)->type == &typec_port_dev_type)
extern struct class typec_mux_class;
+extern struct class retimer_class;
extern struct class typec_class;
#if defined(CONFIG_ACPI)
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index fd55c2c516a5..464330776cd6 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -281,9 +281,13 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
if (match)
goto find_mux;
- /* Accessory Mode muxes */
if (!desc) {
- match = fwnode_property_present(fwnode, "accessory");
+ /*
+ * Accessory Mode muxes & muxes which explicitly specify
+ * the required identifier can avoid SVID matching.
+ */
+ match = fwnode_property_present(fwnode, "accessory") ||
+ fwnode_property_present(fwnode, id);
if (match)
goto find_mux;
return NULL;
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
new file mode 100644
index 000000000000..dc72005d68db
--- /dev/null
+++ b/drivers/usb/typec/pd.c
@@ -0,0 +1,708 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB Power Delivery sysfs entries
+ *
+ * Copyright (C) 2022, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/usb/pd.h>
+
+#include "pd.h"
+
+static DEFINE_IDA(pd_ida);
+
+static struct class pd_class = {
+ .name = "usb_power_delivery",
+ .owner = THIS_MODULE,
+};
+
+#define to_pdo(o) container_of(o, struct pdo, dev)
+
+struct pdo {
+ struct device dev;
+ int object_position;
+ u32 pdo;
+};
+
+static void pdo_release(struct device *dev)
+{
+ kfree(to_pdo(dev));
+}
+
+/* -------------------------------------------------------------------------- */
+/* Fixed Supply */
+
+static ssize_t
+dual_role_power_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_DUAL_ROLE));
+}
+static DEVICE_ATTR_RO(dual_role_power);
+
+static ssize_t
+usb_suspend_supported_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_SUSPEND));
+}
+static DEVICE_ATTR_RO(usb_suspend_supported);
+
+static ssize_t
+unconstrained_power_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_EXTPOWER));
+}
+static DEVICE_ATTR_RO(unconstrained_power);
+
+static ssize_t
+usb_communication_capable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_USB_COMM));
+}
+static DEVICE_ATTR_RO(usb_communication_capable);
+
+static ssize_t
+dual_role_data_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_DATA_SWAP));
+}
+static DEVICE_ATTR_RO(dual_role_data);
+
+static ssize_t
+unchunked_extended_messages_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_UNCHUNK_EXT));
+}
+static DEVICE_ATTR_RO(unchunked_extended_messages_supported);
+
+/*
+ * REVISIT: Peak Current requires access also to the RDO.
+static ssize_t
+peak_current_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ...
+}
+*/
+
+static ssize_t
+fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3;
+}
+static DEVICE_ATTR_RO(fast_role_swap_current);
+
+static ssize_t voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umV\n", pdo_fixed_voltage(to_pdo(dev)->pdo));
+}
+static DEVICE_ATTR_RO(voltage);
+
+/* Shared with Variable supplies, both source and sink */
+static ssize_t current_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umA\n", pdo_max_current(to_pdo(dev)->pdo));
+}
+
+/* Shared with Variable type supplies */
+static struct device_attribute maximum_current_attr = {
+ .attr = {
+ .name = "maximum_current",
+ .mode = 0444,
+ },
+ .show = current_show,
+};
+
+static struct device_attribute operational_current_attr = {
+ .attr = {
+ .name = "operational_current",
+ .mode = 0444,
+ },
+ .show = current_show,
+};
+
+static struct attribute *source_fixed_supply_attrs[] = {
+ &dev_attr_dual_role_power.attr,
+ &dev_attr_usb_suspend_supported.attr,
+ &dev_attr_unconstrained_power.attr,
+ &dev_attr_usb_communication_capable.attr,
+ &dev_attr_dual_role_data.attr,
+ &dev_attr_unchunked_extended_messages_supported.attr,
+ /*&dev_attr_peak_current.attr,*/
+ &dev_attr_voltage.attr,
+ &maximum_current_attr.attr,
+ NULL
+};
+
+static umode_t fixed_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ if (to_pdo(kobj_to_dev(kobj))->object_position &&
+ /*attr != &dev_attr_peak_current.attr &&*/
+ attr != &dev_attr_voltage.attr &&
+ attr != &maximum_current_attr.attr &&
+ attr != &operational_current_attr.attr)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group source_fixed_supply_group = {
+ .is_visible = fixed_attr_is_visible,
+ .attrs = source_fixed_supply_attrs,
+};
+__ATTRIBUTE_GROUPS(source_fixed_supply);
+
+static struct device_type source_fixed_supply_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = source_fixed_supply_groups,
+};
+
+static struct attribute *sink_fixed_supply_attrs[] = {
+ &dev_attr_dual_role_power.attr,
+ &dev_attr_usb_suspend_supported.attr,
+ &dev_attr_unconstrained_power.attr,
+ &dev_attr_usb_communication_capable.attr,
+ &dev_attr_dual_role_data.attr,
+ &dev_attr_unchunked_extended_messages_supported.attr,
+ &dev_attr_fast_role_swap_current.attr,
+ &dev_attr_voltage.attr,
+ &operational_current_attr.attr,
+ NULL
+};
+
+static const struct attribute_group sink_fixed_supply_group = {
+ .is_visible = fixed_attr_is_visible,
+ .attrs = sink_fixed_supply_attrs,
+};
+__ATTRIBUTE_GROUPS(sink_fixed_supply);
+
+static struct device_type sink_fixed_supply_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = sink_fixed_supply_groups,
+};
+
+/* -------------------------------------------------------------------------- */
+/* Variable Supply */
+
+static ssize_t
+maximum_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umV\n", pdo_max_voltage(to_pdo(dev)->pdo));
+}
+static DEVICE_ATTR_RO(maximum_voltage);
+
+static ssize_t
+minimum_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umV\n", pdo_min_voltage(to_pdo(dev)->pdo));
+}
+static DEVICE_ATTR_RO(minimum_voltage);
+
+static struct attribute *source_variable_supply_attrs[] = {
+ &dev_attr_maximum_voltage.attr,
+ &dev_attr_minimum_voltage.attr,
+ &maximum_current_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(source_variable_supply);
+
+static struct device_type source_variable_supply_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = source_variable_supply_groups,
+};
+
+static struct attribute *sink_variable_supply_attrs[] = {
+ &dev_attr_maximum_voltage.attr,
+ &dev_attr_minimum_voltage.attr,
+ &operational_current_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(sink_variable_supply);
+
+static struct device_type sink_variable_supply_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = sink_variable_supply_groups,
+};
+
+/* -------------------------------------------------------------------------- */
+/* Battery */
+
+static ssize_t
+maximum_power_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umW\n", pdo_max_power(to_pdo(dev)->pdo));
+}
+static DEVICE_ATTR_RO(maximum_power);
+
+static ssize_t
+operational_power_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umW\n", pdo_max_power(to_pdo(dev)->pdo));
+}
+static DEVICE_ATTR_RO(operational_power);
+
+static struct attribute *source_battery_attrs[] = {
+ &dev_attr_maximum_voltage.attr,
+ &dev_attr_minimum_voltage.attr,
+ &dev_attr_maximum_power.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(source_battery);
+
+static struct device_type source_battery_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = source_battery_groups,
+};
+
+static struct attribute *sink_battery_attrs[] = {
+ &dev_attr_maximum_voltage.attr,
+ &dev_attr_minimum_voltage.attr,
+ &dev_attr_operational_power.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(sink_battery);
+
+static struct device_type sink_battery_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = sink_battery_groups,
+};
+
+/* -------------------------------------------------------------------------- */
+/* Standard Power Range (SPR) Programmable Power Supply (PPS) */
+
+static ssize_t
+pps_power_limited_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & BIT(27)));
+}
+static DEVICE_ATTR_RO(pps_power_limited);
+
+static ssize_t
+pps_max_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umV\n", pdo_pps_apdo_max_voltage(to_pdo(dev)->pdo));
+}
+
+static ssize_t
+pps_min_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umV\n", pdo_pps_apdo_min_voltage(to_pdo(dev)->pdo));
+}
+
+static ssize_t
+pps_max_current_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umA\n", pdo_pps_apdo_max_current(to_pdo(dev)->pdo));
+}
+
+static struct device_attribute pps_max_voltage_attr = {
+ .attr = {
+ .name = "maximum_voltage",
+ .mode = 0444,
+ },
+ .show = pps_max_voltage_show,
+};
+
+static struct device_attribute pps_min_voltage_attr = {
+ .attr = {
+ .name = "minimum_voltage",
+ .mode = 0444,
+ },
+ .show = pps_min_voltage_show,
+};
+
+static struct device_attribute pps_max_current_attr = {
+ .attr = {
+ .name = "maximum_current",
+ .mode = 0444,
+ },
+ .show = pps_max_current_show,
+};
+
+static struct attribute *source_pps_attrs[] = {
+ &dev_attr_pps_power_limited.attr,
+ &pps_max_voltage_attr.attr,
+ &pps_min_voltage_attr.attr,
+ &pps_max_current_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(source_pps);
+
+static struct device_type source_pps_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = source_pps_groups,
+};
+
+static struct attribute *sink_pps_attrs[] = {
+ &pps_max_voltage_attr.attr,
+ &pps_min_voltage_attr.attr,
+ &pps_max_current_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(sink_pps);
+
+static struct device_type sink_pps_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = sink_pps_groups,
+};
+
+/* -------------------------------------------------------------------------- */
+
+static const char * const supply_name[] = {
+ [PDO_TYPE_FIXED] = "fixed_supply",
+ [PDO_TYPE_BATT] = "battery",
+ [PDO_TYPE_VAR] = "variable_supply",
+};
+
+static const char * const apdo_supply_name[] = {
+ [APDO_TYPE_PPS] = "programmable_supply",
+};
+
+static struct device_type *source_type[] = {
+ [PDO_TYPE_FIXED] = &source_fixed_supply_type,
+ [PDO_TYPE_BATT] = &source_battery_type,
+ [PDO_TYPE_VAR] = &source_variable_supply_type,
+};
+
+static struct device_type *source_apdo_type[] = {
+ [APDO_TYPE_PPS] = &source_pps_type,
+};
+
+static struct device_type *sink_type[] = {
+ [PDO_TYPE_FIXED] = &sink_fixed_supply_type,
+ [PDO_TYPE_BATT] = &sink_battery_type,
+ [PDO_TYPE_VAR] = &sink_variable_supply_type,
+};
+
+static struct device_type *sink_apdo_type[] = {
+ [APDO_TYPE_PPS] = &sink_pps_type,
+};
+
+/* REVISIT: Export when EPR_*_Capabilities need to be supported. */
+static int add_pdo(struct usb_power_delivery_capabilities *cap, u32 pdo, int position)
+{
+ struct device_type *type;
+ const char *name;
+ struct pdo *p;
+ int ret;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->pdo = pdo;
+ p->object_position = position;
+
+ if (pdo_type(pdo) == PDO_TYPE_APDO) {
+ /* FIXME: Only PPS supported for now! Skipping others. */
+ if (pdo_apdo_type(pdo) > APDO_TYPE_PPS) {
+ dev_warn(&cap->dev, "Unknown APDO type. PDO 0x%08x\n", pdo);
+ kfree(p);
+ return 0;
+ }
+
+ if (is_source(cap->role))
+ type = source_apdo_type[pdo_apdo_type(pdo)];
+ else
+ type = sink_apdo_type[pdo_apdo_type(pdo)];
+
+ name = apdo_supply_name[pdo_apdo_type(pdo)];
+ } else {
+ if (is_source(cap->role))
+ type = source_type[pdo_type(pdo)];
+ else
+ type = sink_type[pdo_type(pdo)];
+
+ name = supply_name[pdo_type(pdo)];
+ }
+
+ p->dev.parent = &cap->dev;
+ p->dev.type = type;
+ dev_set_name(&p->dev, "%u:%s", position + 1, name);
+
+ ret = device_register(&p->dev);
+ if (ret) {
+ put_device(&p->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int remove_pdo(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static const char * const cap_name[] = {
+ [TYPEC_SINK] = "sink-capabilities",
+ [TYPEC_SOURCE] = "source-capabilities",
+};
+
+static void pd_capabilities_release(struct device *dev)
+{
+ kfree(to_usb_power_delivery_capabilities(dev));
+}
+
+static struct device_type pd_capabilities_type = {
+ .name = "capabilities",
+ .release = pd_capabilities_release,
+};
+
+/**
+ * usb_power_delivery_register_capabilities - Register a set of capabilities.
+ * @pd: The USB PD instance that the capabilities belong to.
+ * @desc: Description of the Capablities Message.
+ *
+ * This function registers a Capabilities Message described in @desc. The
+ * capabilities will have their own sub-directory under @pd in sysfs.
+ *
+ * The function returns pointer to struct usb_power_delivery_capabilities, or
+ * ERR_PRT(errno).
+ */
+struct usb_power_delivery_capabilities *
+usb_power_delivery_register_capabilities(struct usb_power_delivery *pd,
+ struct usb_power_delivery_capabilities_desc *desc)
+{
+ struct usb_power_delivery_capabilities *cap;
+ int ret;
+ int i;
+
+ cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+ if (!cap)
+ return ERR_PTR(-ENOMEM);
+
+ cap->pd = pd;
+ cap->role = desc->role;
+
+ cap->dev.parent = &pd->dev;
+ cap->dev.type = &pd_capabilities_type;
+ dev_set_name(&cap->dev, "%s", cap_name[cap->role]);
+
+ ret = device_register(&cap->dev);
+ if (ret) {
+ put_device(&cap->dev);
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < PDO_MAX_OBJECTS && desc->pdo[i]; i++) {
+ ret = add_pdo(cap, desc->pdo[i], i);
+ if (ret) {
+ usb_power_delivery_unregister_capabilities(cap);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return cap;
+}
+EXPORT_SYMBOL_GPL(usb_power_delivery_register_capabilities);
+
+/**
+ * usb_power_delivery_unregister_capabilities - Unregister a set of capabilities
+ * @cap: The capabilities
+ */
+void usb_power_delivery_unregister_capabilities(struct usb_power_delivery_capabilities *cap)
+{
+ if (!cap)
+ return;
+
+ device_for_each_child(&cap->dev, NULL, remove_pdo);
+ device_unregister(&cap->dev);
+}
+EXPORT_SYMBOL_GPL(usb_power_delivery_unregister_capabilities);
+
+/* -------------------------------------------------------------------------- */
+
+static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct usb_power_delivery *pd = to_usb_power_delivery(dev);
+
+ return sysfs_emit(buf, "%u.%u\n", (pd->revision >> 8) & 0xff, (pd->revision >> 4) & 0xf);
+}
+static DEVICE_ATTR_RO(revision);
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct usb_power_delivery *pd = to_usb_power_delivery(dev);
+
+ return sysfs_emit(buf, "%u.%u\n", (pd->version >> 8) & 0xff, (pd->version >> 4) & 0xf);
+}
+static DEVICE_ATTR_RO(version);
+
+static struct attribute *pd_attrs[] = {
+ &dev_attr_revision.attr,
+ &dev_attr_version.attr,
+ NULL
+};
+
+static umode_t pd_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct usb_power_delivery *pd = to_usb_power_delivery(kobj_to_dev(kobj));
+
+ if (attr == &dev_attr_version.attr && !pd->version)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group pd_group = {
+ .is_visible = pd_attr_is_visible,
+ .attrs = pd_attrs,
+};
+__ATTRIBUTE_GROUPS(pd);
+
+static void pd_release(struct device *dev)
+{
+ struct usb_power_delivery *pd = to_usb_power_delivery(dev);
+
+ ida_simple_remove(&pd_ida, pd->id);
+ kfree(pd);
+}
+
+static struct device_type pd_type = {
+ .name = "usb_power_delivery",
+ .release = pd_release,
+ .groups = pd_groups,
+};
+
+struct usb_power_delivery *usb_power_delivery_find(const char *name)
+{
+ struct device *dev;
+
+ dev = class_find_device_by_name(&pd_class, name);
+
+ return dev ? to_usb_power_delivery(dev) : NULL;
+}
+
+/**
+ * usb_power_delivery_register - Register USB Power Delivery Support.
+ * @parent: Parent device.
+ * @desc: Description of the USB PD contract.
+ *
+ * This routine can be used to register USB Power Delivery capabilities that a
+ * device or devices can support. These capabilities represent all the
+ * capabilities that can be negotiated with a partner, so not only the Power
+ * Capabilities that are negotiated using the USB PD Capabilities Message.
+ *
+ * The USB Power Delivery Support object that this routine generates can be used
+ * as the parent object for all the actual USB Power Delivery Messages and
+ * objects that can be negotiated with the partner.
+ *
+ * Returns handle to struct usb_power_delivery or ERR_PTR.
+ */
+struct usb_power_delivery *
+usb_power_delivery_register(struct device *parent, struct usb_power_delivery_desc *desc)
+{
+ struct usb_power_delivery *pd;
+ int ret;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ida_simple_get(&pd_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(pd);
+ return ERR_PTR(ret);
+ }
+
+ pd->id = ret;
+ pd->revision = desc->revision;
+ pd->version = desc->version;
+
+ pd->dev.parent = parent;
+ pd->dev.type = &pd_type;
+ pd->dev.class = &pd_class;
+ dev_set_name(&pd->dev, "pd%d", pd->id);
+
+ ret = device_register(&pd->dev);
+ if (ret) {
+ put_device(&pd->dev);
+ return ERR_PTR(ret);
+ }
+
+ return pd;
+}
+EXPORT_SYMBOL_GPL(usb_power_delivery_register);
+
+/**
+ * usb_power_delivery_unregister - Unregister USB Power Delivery Support.
+ * @pd: The USB PD contract.
+ */
+void usb_power_delivery_unregister(struct usb_power_delivery *pd)
+{
+ if (IS_ERR_OR_NULL(pd))
+ return;
+
+ device_unregister(&pd->dev);
+}
+EXPORT_SYMBOL_GPL(usb_power_delivery_unregister);
+
+/**
+ * usb_power_delivery_link_device - Link device to its USB PD object.
+ * @pd: The USB PD instance.
+ * @dev: The device.
+ *
+ * This function can be used to create a symlink named "usb_power_delivery" for
+ * @dev that points to @pd.
+ */
+int usb_power_delivery_link_device(struct usb_power_delivery *pd, struct device *dev)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(pd) || !dev)
+ return 0;
+
+ ret = sysfs_create_link(&dev->kobj, &pd->dev.kobj, "usb_power_delivery");
+ if (ret)
+ return ret;
+
+ get_device(&pd->dev);
+ get_device(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_power_delivery_link_device);
+
+/**
+ * usb_power_delivery_unlink_device - Unlink device from its USB PD object.
+ * @pd: The USB PD instance.
+ * @dev: The device.
+ *
+ * Remove the symlink that was previously created with pd_link_device().
+ */
+void usb_power_delivery_unlink_device(struct usb_power_delivery *pd, struct device *dev)
+{
+ if (IS_ERR_OR_NULL(pd) || !dev)
+ return;
+
+ sysfs_remove_link(&dev->kobj, "usb_power_delivery");
+ put_device(&pd->dev);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(usb_power_delivery_unlink_device);
+
+/* -------------------------------------------------------------------------- */
+
+int __init usb_power_delivery_init(void)
+{
+ return class_register(&pd_class);
+}
+
+void __exit usb_power_delivery_exit(void)
+{
+ ida_destroy(&pd_ida);
+ class_unregister(&pd_class);
+}
diff --git a/drivers/usb/typec/pd.h b/drivers/usb/typec/pd.h
new file mode 100644
index 000000000000..049a1aad440a
--- /dev/null
+++ b/drivers/usb/typec/pd.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __USB_POWER_DELIVERY__
+#define __USB_POWER_DELIVERY__
+
+#include <linux/device.h>
+#include <linux/usb/typec.h>
+
+struct usb_power_delivery {
+ struct device dev;
+ int id;
+ u16 revision;
+ u16 version;
+};
+
+struct usb_power_delivery_capabilities {
+ struct device dev;
+ struct usb_power_delivery *pd;
+ enum typec_role role;
+};
+
+#define to_usb_power_delivery_capabilities(o) container_of(o, struct usb_power_delivery_capabilities, dev)
+#define to_usb_power_delivery(o) container_of(o, struct usb_power_delivery, dev)
+
+struct usb_power_delivery *usb_power_delivery_find(const char *name);
+
+int usb_power_delivery_init(void);
+void usb_power_delivery_exit(void);
+
+#endif /* __USB_POWER_DELIVERY__ */
diff --git a/drivers/usb/typec/retimer.c b/drivers/usb/typec/retimer.c
new file mode 100644
index 000000000000..2003731f1bee
--- /dev/null
+++ b/drivers/usb/typec/retimer.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 Google LLC
+ *
+ * USB Type-C Retimer support.
+ * Author: Prashant Malani <pmalani@chromium.org>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include "class.h"
+#include "retimer.h"
+
+static bool dev_name_ends_with(struct device *dev, const char *suffix)
+{
+ const char *name = dev_name(dev);
+ const int name_len = strlen(name);
+ const int suffix_len = strlen(suffix);
+
+ if (suffix_len > name_len)
+ return false;
+
+ return strcmp(name + (name_len - suffix_len), suffix) == 0;
+}
+
+static int retimer_fwnode_match(struct device *dev, const void *fwnode)
+{
+ return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-retimer");
+}
+
+static void *typec_retimer_match(struct fwnode_handle *fwnode, const char *id, void *data)
+{
+ struct device *dev;
+
+ if (id && !fwnode_property_present(fwnode, id))
+ return NULL;
+
+ dev = class_find_device(&retimer_class, NULL, fwnode,
+ retimer_fwnode_match);
+
+ return dev ? to_typec_retimer(dev) : ERR_PTR(-EPROBE_DEFER);
+}
+
+/**
+ * fwnode_typec_retimer_get - Find USB Type-C retimer.
+ * @fwnode: The caller device node.
+ *
+ * Finds a retimer linked to the caller. This function is primarily meant for the
+ * Type-C drivers. Returns a reference to the retimer on success, NULL if no
+ * matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection
+ * was found but the retimer has not been enumerated yet.
+ */
+struct typec_retimer *fwnode_typec_retimer_get(struct fwnode_handle *fwnode)
+{
+ struct typec_retimer *retimer;
+
+ retimer = fwnode_connection_find_match(fwnode, "retimer-switch", NULL, typec_retimer_match);
+ if (!IS_ERR_OR_NULL(retimer))
+ WARN_ON(!try_module_get(retimer->dev.parent->driver->owner));
+
+ return retimer;
+}
+EXPORT_SYMBOL_GPL(fwnode_typec_retimer_get);
+
+/**
+ * typec_retimer_put - Release handle to a retimer.
+ * @retimer: USB Type-C Connector Retimer.
+ *
+ * Decrements reference count for @retimer.
+ */
+void typec_retimer_put(struct typec_retimer *retimer)
+{
+ if (!IS_ERR_OR_NULL(retimer)) {
+ module_put(retimer->dev.parent->driver->owner);
+ put_device(&retimer->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(typec_retimer_put);
+
+int typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state)
+{
+ if (IS_ERR_OR_NULL(retimer))
+ return 0;
+
+ return retimer->set(retimer, state);
+}
+EXPORT_SYMBOL_GPL(typec_retimer_set);
+
+static void typec_retimer_release(struct device *dev)
+{
+ kfree(to_typec_retimer(dev));
+}
+
+static const struct device_type typec_retimer_dev_type = {
+ .name = "typec_retimer",
+ .release = typec_retimer_release,
+};
+
+/**
+ * typec_retimer_register - Register a retimer device.
+ * @parent: Parent device.
+ * @desc: Retimer description.
+ *
+ * Some USB Type-C connectors have their physical lines routed through retimers before they
+ * reach muxes or host controllers. In some cases (for example: using alternate modes)
+ * these retimers need to be reconfigured appropriately. This function registers retimer
+ * switches which route and potentially modify the signals on the Type C physical lines
+ * enroute to the host controllers.
+ */
+struct typec_retimer *
+typec_retimer_register(struct device *parent, const struct typec_retimer_desc *desc)
+{
+ struct typec_retimer *retimer;
+ int ret;
+
+ if (!desc || !desc->set)
+ return ERR_PTR(-EINVAL);
+
+ retimer = kzalloc(sizeof(*retimer), GFP_KERNEL);
+ if (!retimer)
+ return ERR_PTR(-ENOMEM);
+
+ retimer->set = desc->set;
+
+ device_initialize(&retimer->dev);
+ retimer->dev.parent = parent;
+ retimer->dev.fwnode = desc->fwnode;
+ retimer->dev.class = &retimer_class;
+ retimer->dev.type = &typec_retimer_dev_type;
+ retimer->dev.driver_data = desc->drvdata;
+ dev_set_name(&retimer->dev, "%s-retimer",
+ desc->name ? desc->name : dev_name(parent));
+
+ ret = device_add(&retimer->dev);
+ if (ret) {
+ dev_err(parent, "failed to register retimer (%d)\n", ret);
+ put_device(&retimer->dev);
+ return ERR_PTR(ret);
+ }
+
+ return retimer;
+}
+EXPORT_SYMBOL_GPL(typec_retimer_register);
+
+/**
+ * typec_retimer_unregister - Unregister retimer device.
+ * @retimer: USB Type-C Connector retimer.
+ *
+ * Unregister retimer that was registered with typec_retimer_register().
+ */
+void typec_retimer_unregister(struct typec_retimer *retimer)
+{
+ if (!IS_ERR_OR_NULL(retimer))
+ device_unregister(&retimer->dev);
+}
+EXPORT_SYMBOL_GPL(typec_retimer_unregister);
+
+void *typec_retimer_get_drvdata(struct typec_retimer *retimer)
+{
+ return dev_get_drvdata(&retimer->dev);
+}
+EXPORT_SYMBOL_GPL(typec_retimer_get_drvdata);
+
+struct class retimer_class = {
+ .name = "retimer",
+ .owner = THIS_MODULE,
+};
diff --git a/drivers/usb/typec/retimer.h b/drivers/usb/typec/retimer.h
new file mode 100644
index 000000000000..fa15951d4846
--- /dev/null
+++ b/drivers/usb/typec/retimer.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __USB_TYPEC_RETIMER__
+#define __USB_TYPEC_RETIMER__
+
+#include <linux/usb/typec_retimer.h>
+
+struct typec_retimer {
+ struct device dev;
+ typec_retimer_set_fn_t set;
+};
+
+#define to_typec_retimer(_dev_) container_of(_dev_, struct typec_retimer, dev)
+
+#endif /* __USB_TYPEC_RETIMER__ */
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index f33e08eb7670..812784702d53 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -13,11 +13,10 @@
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/usb/pd.h>
+#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
-#include "tcpci.h"
-
#define PD_RETRY_COUNT_DEFAULT 3
#define PD_RETRY_COUNT_3_0_OR_HIGHER 2
#define AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV 3500
diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
deleted file mode 100644
index b2edd45f13c6..000000000000
--- a/drivers/usb/typec/tcpm/tcpci.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright 2015-2017 Google, Inc
- *
- * USB Type-C Port Controller Interface.
- */
-
-#ifndef __LINUX_USB_TCPCI_H
-#define __LINUX_USB_TCPCI_H
-
-#include <linux/usb/typec.h>
-
-#define TCPC_VENDOR_ID 0x0
-#define TCPC_PRODUCT_ID 0x2
-#define TCPC_BCD_DEV 0x4
-#define TCPC_TC_REV 0x6
-#define TCPC_PD_REV 0x8
-#define TCPC_PD_INT_REV 0xa
-
-#define TCPC_ALERT 0x10
-#define TCPC_ALERT_EXTND BIT(14)
-#define TCPC_ALERT_EXTENDED_STATUS BIT(13)
-#define TCPC_ALERT_VBUS_DISCNCT BIT(11)
-#define TCPC_ALERT_RX_BUF_OVF BIT(10)
-#define TCPC_ALERT_FAULT BIT(9)
-#define TCPC_ALERT_V_ALARM_LO BIT(8)
-#define TCPC_ALERT_V_ALARM_HI BIT(7)
-#define TCPC_ALERT_TX_SUCCESS BIT(6)
-#define TCPC_ALERT_TX_DISCARDED BIT(5)
-#define TCPC_ALERT_TX_FAILED BIT(4)
-#define TCPC_ALERT_RX_HARD_RST BIT(3)
-#define TCPC_ALERT_RX_STATUS BIT(2)
-#define TCPC_ALERT_POWER_STATUS BIT(1)
-#define TCPC_ALERT_CC_STATUS BIT(0)
-
-#define TCPC_ALERT_MASK 0x12
-#define TCPC_POWER_STATUS_MASK 0x14
-#define TCPC_FAULT_STATUS_MASK 0x15
-
-#define TCPC_EXTENDED_STATUS_MASK 0x16
-#define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0)
-
-#define TCPC_ALERT_EXTENDED_MASK 0x17
-#define TCPC_SINK_FAST_ROLE_SWAP BIT(0)
-
-#define TCPC_CONFIG_STD_OUTPUT 0x18
-
-#define TCPC_TCPC_CTRL 0x19
-#define TCPC_TCPC_CTRL_ORIENTATION BIT(0)
-#define PLUG_ORNT_CC1 0
-#define PLUG_ORNT_CC2 1
-#define TCPC_TCPC_CTRL_BIST_TM BIT(1)
-#define TCPC_TCPC_CTRL_EN_LK4CONN_ALRT BIT(6)
-
-#define TCPC_EXTENDED_STATUS 0x20
-#define TCPC_EXTENDED_STATUS_VSAFE0V BIT(0)
-
-#define TCPC_ROLE_CTRL 0x1a
-#define TCPC_ROLE_CTRL_DRP BIT(6)
-#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4
-#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3
-#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0
-#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1
-#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2
-#define TCPC_ROLE_CTRL_CC2_SHIFT 2
-#define TCPC_ROLE_CTRL_CC2_MASK 0x3
-#define TCPC_ROLE_CTRL_CC1_SHIFT 0
-#define TCPC_ROLE_CTRL_CC1_MASK 0x3
-#define TCPC_ROLE_CTRL_CC_RA 0x0
-#define TCPC_ROLE_CTRL_CC_RP 0x1
-#define TCPC_ROLE_CTRL_CC_RD 0x2
-#define TCPC_ROLE_CTRL_CC_OPEN 0x3
-
-#define TCPC_FAULT_CTRL 0x1b
-
-#define TCPC_POWER_CTRL 0x1c
-#define TCPC_POWER_CTRL_VCONN_ENABLE BIT(0)
-#define TCPC_POWER_CTRL_BLEED_DISCHARGE BIT(3)
-#define TCPC_POWER_CTRL_AUTO_DISCHARGE BIT(4)
-#define TCPC_DIS_VOLT_ALRM BIT(5)
-#define TCPC_POWER_CTRL_VBUS_VOLT_MON BIT(6)
-#define TCPC_FAST_ROLE_SWAP_EN BIT(7)
-
-#define TCPC_CC_STATUS 0x1d
-#define TCPC_CC_STATUS_TOGGLING BIT(5)
-#define TCPC_CC_STATUS_TERM BIT(4)
-#define TCPC_CC_STATUS_TERM_RP 0
-#define TCPC_CC_STATUS_TERM_RD 1
-#define TCPC_CC_STATE_SRC_OPEN 0
-#define TCPC_CC_STATUS_CC2_SHIFT 2
-#define TCPC_CC_STATUS_CC2_MASK 0x3
-#define TCPC_CC_STATUS_CC1_SHIFT 0
-#define TCPC_CC_STATUS_CC1_MASK 0x3
-
-#define TCPC_POWER_STATUS 0x1e
-#define TCPC_POWER_STATUS_DBG_ACC_CON BIT(7)
-#define TCPC_POWER_STATUS_UNINIT BIT(6)
-#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4)
-#define TCPC_POWER_STATUS_VBUS_DET BIT(3)
-#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
-#define TCPC_POWER_STATUS_VCONN_PRES BIT(1)
-#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
-
-#define TCPC_FAULT_STATUS 0x1f
-
-#define TCPC_ALERT_EXTENDED 0x21
-
-#define TCPC_COMMAND 0x23
-#define TCPC_CMD_WAKE_I2C 0x11
-#define TCPC_CMD_DISABLE_VBUS_DETECT 0x22
-#define TCPC_CMD_ENABLE_VBUS_DETECT 0x33
-#define TCPC_CMD_DISABLE_SINK_VBUS 0x44
-#define TCPC_CMD_SINK_VBUS 0x55
-#define TCPC_CMD_DISABLE_SRC_VBUS 0x66
-#define TCPC_CMD_SRC_VBUS_DEFAULT 0x77
-#define TCPC_CMD_SRC_VBUS_HIGH 0x88
-#define TCPC_CMD_LOOK4CONNECTION 0x99
-#define TCPC_CMD_RXONEMORE 0xAA
-#define TCPC_CMD_I2C_IDLE 0xFF
-
-#define TCPC_DEV_CAP_1 0x24
-#define TCPC_DEV_CAP_2 0x26
-#define TCPC_STD_INPUT_CAP 0x28
-#define TCPC_STD_OUTPUT_CAP 0x29
-
-#define TCPC_MSG_HDR_INFO 0x2e
-#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3)
-#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0)
-#define TCPC_MSG_HDR_INFO_REV_SHIFT 1
-#define TCPC_MSG_HDR_INFO_REV_MASK 0x3
-
-#define TCPC_RX_DETECT 0x2f
-#define TCPC_RX_DETECT_HARD_RESET BIT(5)
-#define TCPC_RX_DETECT_SOP BIT(0)
-#define TCPC_RX_DETECT_SOP1 BIT(1)
-#define TCPC_RX_DETECT_SOP2 BIT(2)
-#define TCPC_RX_DETECT_DBG1 BIT(3)
-#define TCPC_RX_DETECT_DBG2 BIT(4)
-
-#define TCPC_RX_BYTE_CNT 0x30
-#define TCPC_RX_BUF_FRAME_TYPE 0x31
-#define TCPC_RX_BUF_FRAME_TYPE_SOP 0
-#define TCPC_RX_HDR 0x32
-#define TCPC_RX_DATA 0x34 /* through 0x4f */
-
-#define TCPC_TRANSMIT 0x50
-#define TCPC_TRANSMIT_RETRY_SHIFT 4
-#define TCPC_TRANSMIT_RETRY_MASK 0x3
-#define TCPC_TRANSMIT_TYPE_SHIFT 0
-#define TCPC_TRANSMIT_TYPE_MASK 0x7
-
-#define TCPC_TX_BYTE_CNT 0x51
-#define TCPC_TX_HDR 0x52
-#define TCPC_TX_DATA 0x54 /* through 0x6f */
-
-#define TCPC_VBUS_VOLTAGE 0x70
-#define TCPC_VBUS_VOLTAGE_MASK 0x3ff
-#define TCPC_VBUS_VOLTAGE_LSB_MV 25
-#define TCPC_VBUS_SINK_DISCONNECT_THRESH 0x72
-#define TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV 25
-#define TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX 0x3ff
-#define TCPC_VBUS_STOP_DISCHARGE_THRESH 0x74
-#define TCPC_VBUS_VOLTAGE_ALARM_HI_CFG 0x76
-#define TCPC_VBUS_VOLTAGE_ALARM_LO_CFG 0x78
-
-/* I2C_WRITE_BYTE_COUNT + 1 when TX_BUF_BYTE_x is only accessible I2C_WRITE_BYTE_COUNT */
-#define TCPC_TRANSMIT_BUFFER_MAX_LEN 31
-
-struct tcpci;
-
-/*
- * @TX_BUF_BYTE_x_hidden:
- * optional; Set when TX_BUF_BYTE_x can only be accessed through I2C_WRITE_BYTE_COUNT.
- * @frs_sourcing_vbus:
- * Optional; Callback to perform chip specific operations when FRS
- * is sourcing vbus.
- * @auto_discharge_disconnect:
- * Optional; Enables TCPC to autonously discharge vbus on disconnect.
- * @vbus_vsafe0v:
- * optional; Set when TCPC can detect whether vbus is at VSAFE0V.
- * @set_partner_usb_comm_capable:
- * Optional; The USB Communications Capable bit indicates if port
- * partner is capable of communication over the USB data lines
- * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
- */
-struct tcpci_data {
- struct regmap *regmap;
- unsigned char TX_BUF_BYTE_x_hidden:1;
- unsigned char auto_discharge_disconnect:1;
- unsigned char vbus_vsafe0v:1;
-
- int (*init)(struct tcpci *tcpci, struct tcpci_data *data);
- int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data,
- bool enable);
- int (*start_drp_toggling)(struct tcpci *tcpci, struct tcpci_data *data,
- enum typec_cc_status cc);
- int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink);
- void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data);
- void (*set_partner_usb_comm_capable)(struct tcpci *tcpci, struct tcpci_data *data,
- bool capable);
-};
-
-struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data);
-void tcpci_unregister_port(struct tcpci *tcpci);
-irqreturn_t tcpci_irq(struct tcpci *tcpci);
-
-struct tcpm_port;
-struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci);
-#endif /* __LINUX_USB_TCPCI_H */
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c
index df2505570f07..4b6705f3d7b7 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.c
@@ -11,11 +11,10 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/usb/pd.h>
+#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
-#include "tcpci.h"
-
#define PD_ACTIVITY_TIMEOUT_MS 10000
#define TCPC_VENDOR_ALERT 0x80
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6360.c b/drivers/usb/typec/tcpm/tcpci_mt6360.c
index 8a952eaf9016..1b7c31278ebb 100644
--- a/drivers/usb/typec/tcpm/tcpci_mt6360.c
+++ b/drivers/usb/typec/tcpm/tcpci_mt6360.c
@@ -11,10 +11,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
-#include "tcpci.h"
-
#define MT6360_REG_PHYCTRL1 0x80
#define MT6360_REG_PHYCTRL3 0x82
#define MT6360_REG_PHYCTRL7 0x86
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index b56a0880a044..3291ca4948da 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -10,9 +10,9 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/gpio/consumer.h>
+#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/regmap.h>
-#include "tcpci.h"
#define RT1711H_VID 0x29CF
#define RT1711H_PID 0x1711
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 3bc2f4ebd1fe..ea5a917c51b1 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -394,6 +394,14 @@ struct tcpm_port {
bool explicit_contract;
unsigned int rx_msgid;
+ /* USB PD objects */
+ struct usb_power_delivery *pd;
+ struct usb_power_delivery_capabilities *port_source_caps;
+ struct usb_power_delivery_capabilities *port_sink_caps;
+ struct usb_power_delivery *partner_pd;
+ struct usb_power_delivery_capabilities *partner_source_caps;
+ struct usb_power_delivery_capabilities *partner_sink_caps;
+
/* Partner capabilities/requests */
u32 sink_request;
u32 source_caps[PDO_MAX_OBJECTS];
@@ -471,7 +479,7 @@ struct tcpm_port {
/*
* When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
- * the actual currrent limit after RX of PD_CTRL_PSRDY for PD link,
+ * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
* SNK_READY for non-pd link.
*/
bool slow_charger_loop;
@@ -2352,6 +2360,52 @@ static void tcpm_pd_handle_msg(struct tcpm_port *port,
}
}
+static int tcpm_register_source_caps(struct tcpm_port *port)
+{
+ struct usb_power_delivery_desc desc = { port->negotiated_rev };
+ struct usb_power_delivery_capabilities_desc caps = { };
+ struct usb_power_delivery_capabilities *cap;
+
+ if (!port->partner_pd)
+ port->partner_pd = usb_power_delivery_register(NULL, &desc);
+ if (IS_ERR(port->partner_pd))
+ return PTR_ERR(port->partner_pd);
+
+ memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
+ caps.role = TYPEC_SOURCE;
+
+ cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
+ if (IS_ERR(cap))
+ return PTR_ERR(cap);
+
+ port->partner_source_caps = cap;
+
+ return 0;
+}
+
+static int tcpm_register_sink_caps(struct tcpm_port *port)
+{
+ struct usb_power_delivery_desc desc = { port->negotiated_rev };
+ struct usb_power_delivery_capabilities_desc caps = { };
+ struct usb_power_delivery_capabilities *cap;
+
+ if (!port->partner_pd)
+ port->partner_pd = usb_power_delivery_register(NULL, &desc);
+ if (IS_ERR(port->partner_pd))
+ return PTR_ERR(port->partner_pd);
+
+ memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
+ caps.role = TYPEC_SINK;
+
+ cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
+ if (IS_ERR(cap))
+ return PTR_ERR(cap);
+
+ port->partner_sink_caps = cap;
+
+ return 0;
+}
+
static void tcpm_pd_data_request(struct tcpm_port *port,
const struct pd_message *msg)
{
@@ -2381,6 +2435,8 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
tcpm_validate_caps(port, port->source_caps,
port->nr_source_caps);
+ tcpm_register_source_caps(port);
+
/*
* Adjust revision in subsequent message headers, as required,
* to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
@@ -2488,6 +2544,8 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
port->nr_sink_caps = cnt;
port->sink_cap_done = true;
+ tcpm_register_sink_caps(port);
+
if (port->ams == GET_SINK_CAPABILITIES)
tcpm_set_state(port, ready_state(port), 0);
/* Unexpected Sink Capabilities */
@@ -3554,6 +3612,7 @@ static void tcpm_typec_connect(struct tcpm_port *port)
port->partner = typec_register_partner(port->typec_port,
&port->partner_desc);
port->connected = true;
+ typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
}
}
@@ -3622,6 +3681,7 @@ out_disable_mux:
static void tcpm_typec_disconnect(struct tcpm_port *port)
{
if (port->connected) {
+ typec_partner_set_usb_power_delivery(port->partner, NULL);
typec_unregister_partner(port->partner);
port->partner = NULL;
port->connected = false;
@@ -3684,6 +3744,13 @@ static void tcpm_reset_port(struct tcpm_port *port)
port->sink_cap_done = false;
if (port->tcpc->enable_frs)
port->tcpc->enable_frs(port->tcpc, false);
+
+ usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
+ port->partner_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
+ usb_power_delivery_unregister(port->partner_pd);
+ port->partner_pd = NULL;
}
static void tcpm_detach(struct tcpm_port *port)
@@ -4453,7 +4520,7 @@ static void run_state_machine(struct tcpm_port *port)
* The specification suggests that dual mode ports in sink
* mode should transition to state PE_SRC_Transition_to_default.
* See USB power delivery specification chapter 8.3.3.6.1.3.
- * This would mean to to
+ * This would mean to
* - turn off VCONN, reset power supply
* - request hardware reset
* - turn on VCONN
@@ -5924,6 +5991,68 @@ void tcpm_tcpc_reset(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
+static void tcpm_port_unregister_pd(struct tcpm_port *port)
+{
+ usb_power_delivery_unregister_capabilities(port->port_sink_caps);
+ port->port_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(port->port_source_caps);
+ port->port_source_caps = NULL;
+ usb_power_delivery_unregister(port->pd);
+ port->pd = NULL;
+}
+
+static int tcpm_port_register_pd(struct tcpm_port *port)
+{
+ struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
+ struct usb_power_delivery_capabilities_desc caps = { };
+ struct usb_power_delivery_capabilities *cap;
+ int ret;
+
+ if (!port->nr_src_pdo && !port->nr_snk_pdo)
+ return 0;
+
+ port->pd = usb_power_delivery_register(port->dev, &desc);
+ if (IS_ERR(port->pd)) {
+ ret = PTR_ERR(port->pd);
+ goto err_unregister;
+ }
+
+ if (port->nr_src_pdo) {
+ memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->src_pdo,
+ port->nr_src_pdo * sizeof(u32), 0);
+ caps.role = TYPEC_SOURCE;
+
+ cap = usb_power_delivery_register_capabilities(port->pd, &caps);
+ if (IS_ERR(cap)) {
+ ret = PTR_ERR(cap);
+ goto err_unregister;
+ }
+
+ port->port_source_caps = cap;
+ }
+
+ if (port->nr_snk_pdo) {
+ memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->snk_pdo,
+ port->nr_snk_pdo * sizeof(u32), 0);
+ caps.role = TYPEC_SINK;
+
+ cap = usb_power_delivery_register_capabilities(port->pd, &caps);
+ if (IS_ERR(cap)) {
+ ret = PTR_ERR(cap);
+ goto err_unregister;
+ }
+
+ port->port_sink_caps = cap;
+ }
+
+ return 0;
+
+err_unregister:
+ tcpm_port_unregister_pd(port);
+
+ return ret;
+}
+
static int tcpm_fw_get_caps(struct tcpm_port *port,
struct fwnode_handle *fwnode)
{
@@ -6382,10 +6511,16 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
goto out_role_sw_put;
power_supply_changed(port->psy);
+ err = tcpm_port_register_pd(port);
+ if (err)
+ goto out_role_sw_put;
+
+ port->typec_caps.pd = port->pd;
+
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
if (IS_ERR(port->typec_port)) {
err = PTR_ERR(port->typec_port);
- goto out_role_sw_put;
+ goto out_unregister_pd;
}
typec_port_register_altmodes(port->typec_port,
@@ -6400,6 +6535,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
tcpm_log(port, "%s: registered", dev_name(dev));
return port;
+out_unregister_pd:
+ tcpm_port_unregister_pd(port);
out_role_sw_put:
usb_role_switch_put(port->role_sw);
out_destroy_wq:
@@ -6422,6 +6559,9 @@ void tcpm_unregister_port(struct tcpm_port *port)
hrtimer_cancel(&port->state_machine_timer);
tcpm_reset_port(port);
+
+ tcpm_port_unregister_pd(port);
+
for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
typec_unregister_altmode(port->port_altmode[i]);
typec_unregister_port(port->typec_port);
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index 5e9b37b3f25e..8f9c4b9f31f7 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -48,4 +48,14 @@ config UCSI_ACPI
To compile the driver as a module, choose M here: the module will be
called ucsi_acpi
+config UCSI_STM32G0
+ tristate "UCSI Interface Driver for STM32G0"
+ depends on I2C
+ help
+ This driver enables UCSI support on platforms that expose a STM32G0
+ Type-C controller over I2C interface.
+
+ To compile the driver as a module, choose M here: the module will be
+ called ucsi_stm32g0.
+
endif
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index 8a8eb5cb8e0f..480d533d762f 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -17,3 +17,4 @@ endif
obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
+obj-$(CONFIG_UCSI_STM32G0) += ucsi_stm32g0.o
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index cbd862f9f2a1..1aea46493b85 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -76,6 +76,10 @@ static int ucsi_read_error(struct ucsi *ucsi)
if (ret)
return ret;
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
+
switch (error) {
case UCSI_ERROR_INCOMPATIBLE_PARTNER:
return -EOPNOTSUPP;
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 6db7c8ddd51c..5c0bf48be766 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -627,6 +627,16 @@ err_clear_irq:
return IRQ_HANDLED;
}
+static int ccg_request_irq(struct ucsi_ccg *uc)
+{
+ unsigned long flags = IRQF_ONESHOT;
+
+ if (!has_acpi_companion(uc->dev))
+ flags |= IRQF_TRIGGER_HIGH;
+
+ return request_threaded_irq(uc->irq, NULL, ccg_irq_handler, flags, dev_name(uc->dev), uc);
+}
+
static void ccg_pm_workaround_work(struct work_struct *pm_work)
{
ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
@@ -1250,9 +1260,7 @@ static int ccg_restart(struct ucsi_ccg *uc)
return status;
}
- status = request_threaded_irq(uc->irq, NULL, ccg_irq_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
- dev_name(dev), uc);
+ status = ccg_request_irq(uc);
if (status < 0) {
dev_err(dev, "request_threaded_irq failed - %d\n", status);
return status;
@@ -1331,6 +1339,7 @@ static int ucsi_ccg_probe(struct i2c_client *client,
uc->dev = dev;
uc->client = client;
+ uc->irq = client->irq;
mutex_init(&uc->lock);
init_completion(&uc->complete);
INIT_WORK(&uc->work, ccg_update_firmware);
@@ -1366,16 +1375,12 @@ static int ucsi_ccg_probe(struct i2c_client *client,
ucsi_set_drvdata(uc->ucsi, uc);
- status = request_threaded_irq(client->irq, NULL, ccg_irq_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
- dev_name(dev), uc);
+ status = ccg_request_irq(uc);
if (status < 0) {
dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
goto out_ucsi_destroy;
}
- uc->irq = client->irq;
-
status = ucsi_register(uc->ucsi);
if (status)
goto out_free_irq;
@@ -1418,6 +1423,12 @@ static const struct i2c_device_id ucsi_ccg_device_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
+static const struct acpi_device_id amd_i2c_ucsi_match[] = {
+ {"AMDI0042"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, amd_i2c_ucsi_match);
+
static int ucsi_ccg_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1459,6 +1470,7 @@ static struct i2c_driver ucsi_ccg_driver = {
.name = "ucsi_ccg",
.pm = &ucsi_ccg_pm,
.dev_groups = ucsi_ccg_groups,
+ .acpi_match_table = amd_i2c_ucsi_match,
},
.probe = ucsi_ccg_probe,
.remove = ucsi_ccg_remove,
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
new file mode 100644
index 000000000000..061551d464f1
--- /dev/null
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+/*
+ * UCSI driver for STMicroelectronics STM32G0 Type-C PD controller
+ *
+ * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@foss.st.com>.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <asm/unaligned.h>
+
+#include "ucsi.h"
+
+/* STM32G0 I2C bootloader addr: 0b1010001x (See AN2606) */
+#define STM32G0_I2C_BL_ADDR (0xa2 >> 1)
+
+/* STM32G0 I2C bootloader max data size */
+#define STM32G0_I2C_BL_SZ 256
+
+/* STM32 I2C bootloader commands (See AN4221) */
+#define STM32_CMD_GVR 0x01 /* Gets the bootloader version */
+#define STM32_CMD_GVR_LEN 1
+#define STM32_CMD_RM 0x11 /* Reag memory */
+#define STM32_CMD_WM 0x31 /* Write memory */
+#define STM32_CMD_ADDR_LEN 5 /* Address len for go, mem write... */
+#define STM32_CMD_ERASE 0x44 /* Erase page, bank or all */
+#define STM32_CMD_ERASE_SPECIAL_LEN 3
+#define STM32_CMD_GLOBAL_MASS_ERASE 0xffff /* All-bank erase */
+
+/* STM32 I2C bootloader answer status */
+#define STM32G0_I2C_BL_ACK 0x79
+#define STM32G0_I2C_BL_NACK 0x1f
+#define STM32G0_I2C_BL_BUSY 0x76
+
+/* STM32G0 flash definitions */
+#define STM32G0_USER_OPTION_BYTES 0x1fff7800
+#define STM32G0_USER_OB_NBOOT0 BIT(26)
+#define STM32G0_USER_OB_NBOOT_SEL BIT(24)
+#define STM32G0_USER_OB_BOOT_MAIN (STM32G0_USER_OB_NBOOT0 | STM32G0_USER_OB_NBOOT_SEL)
+#define STM32G0_MAIN_MEM_ADDR 0x08000000
+
+/* STM32 Firmware definitions: additional commands */
+#define STM32G0_FW_GETVER 0x00 /* Gets the firmware version */
+#define STM32G0_FW_GETVER_LEN 4
+#define STM32G0_FW_RSTGOBL 0x21 /* Reset and go to bootloader */
+#define STM32G0_FW_KEYWORD 0xa56959a6
+
+/* ucsi_stm32g0_fw_info located at the end of the firmware */
+struct ucsi_stm32g0_fw_info {
+ u32 version;
+ u32 keyword;
+};
+
+struct ucsi_stm32g0 {
+ struct i2c_client *client;
+ struct i2c_client *i2c_bl;
+ bool in_bootloader;
+ u8 bl_version;
+ struct completion complete;
+ struct device *dev;
+ unsigned long flags;
+ const char *fw_name;
+ struct ucsi *ucsi;
+ bool suspended;
+ bool wakeup_event;
+};
+
+/*
+ * Bootloader commands helpers:
+ * - send command (2 bytes)
+ * - check ack
+ * Then either:
+ * - receive data
+ * - receive data + check ack
+ * - send data + check ack
+ * These operations depends on the command and have various length.
+ */
+static int ucsi_stm32g0_bl_check_ack(struct ucsi *ucsi)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ struct i2c_client *client = g0->i2c_bl;
+ unsigned char ack;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = &ack,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_err(g0->dev, "i2c bl ack (%02x), error: %d\n", client->addr, ret);
+
+ return ret < 0 ? ret : -EIO;
+ }
+
+ /* The 'ack' byte should contain bootloader answer: ack/nack/busy */
+ switch (ack) {
+ case STM32G0_I2C_BL_ACK:
+ return 0;
+ case STM32G0_I2C_BL_NACK:
+ return -ENOENT;
+ case STM32G0_I2C_BL_BUSY:
+ return -EBUSY;
+ default:
+ dev_err(g0->dev, "i2c bl ack (%02x), invalid byte: %02x\n",
+ client->addr, ack);
+ return -EINVAL;
+ }
+}
+
+static int ucsi_stm32g0_bl_cmd_check_ack(struct ucsi *ucsi, unsigned int cmd, bool check_ack)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ struct i2c_client *client = g0->i2c_bl;
+ unsigned char buf[2];
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ },
+ };
+ int ret;
+
+ /*
+ * Send STM32 bootloader command format is two bytes:
+ * - command code
+ * - XOR'ed command code
+ */
+ buf[0] = cmd;
+ buf[1] = cmd ^ 0xff;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_dbg(g0->dev, "i2c bl cmd %d (%02x), error: %d\n", cmd, client->addr, ret);
+
+ return ret < 0 ? ret : -EIO;
+ }
+
+ if (check_ack)
+ return ucsi_stm32g0_bl_check_ack(ucsi);
+
+ return 0;
+}
+
+static int ucsi_stm32g0_bl_cmd(struct ucsi *ucsi, unsigned int cmd)
+{
+ return ucsi_stm32g0_bl_cmd_check_ack(ucsi, cmd, true);
+}
+
+static int ucsi_stm32g0_bl_rcv_check_ack(struct ucsi *ucsi, void *data, size_t len, bool check_ack)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ struct i2c_client *client = g0->i2c_bl;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_err(g0->dev, "i2c bl rcv %02x, error: %d\n", client->addr, ret);
+
+ return ret < 0 ? ret : -EIO;
+ }
+
+ if (check_ack)
+ return ucsi_stm32g0_bl_check_ack(ucsi);
+
+ return 0;
+}
+
+static int ucsi_stm32g0_bl_rcv(struct ucsi *ucsi, void *data, size_t len)
+{
+ return ucsi_stm32g0_bl_rcv_check_ack(ucsi, data, len, true);
+}
+
+static int ucsi_stm32g0_bl_rcv_woack(struct ucsi *ucsi, void *data, size_t len)
+{
+ return ucsi_stm32g0_bl_rcv_check_ack(ucsi, data, len, false);
+}
+
+static int ucsi_stm32g0_bl_send(struct ucsi *ucsi, void *data, size_t len)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ struct i2c_client *client = g0->i2c_bl;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = len,
+ .buf = data,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_err(g0->dev, "i2c bl send %02x, error: %d\n", client->addr, ret);
+
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return ucsi_stm32g0_bl_check_ack(ucsi);
+}
+
+/* Bootloader commands */
+static int ucsi_stm32g0_bl_get_version(struct ucsi *ucsi, u8 *bl_version)
+{
+ int ret;
+
+ ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_GVR);
+ if (ret)
+ return ret;
+
+ return ucsi_stm32g0_bl_rcv(ucsi, bl_version, STM32_CMD_GVR_LEN);
+}
+
+static int ucsi_stm32g0_bl_send_addr(struct ucsi *ucsi, u32 addr)
+{
+ u8 data8[STM32_CMD_ADDR_LEN];
+
+ /* Address format: 4 bytes addr (MSB first) + XOR'ed addr bytes */
+ put_unaligned_be32(addr, data8);
+ data8[4] = data8[0] ^ data8[1] ^ data8[2] ^ data8[3];
+
+ return ucsi_stm32g0_bl_send(ucsi, data8, STM32_CMD_ADDR_LEN);
+}
+
+static int ucsi_stm32g0_bl_global_mass_erase(struct ucsi *ucsi)
+{
+ u8 data8[4];
+ u16 *data16 = (u16 *)&data8[0];
+ int ret;
+
+ data16[0] = STM32_CMD_GLOBAL_MASS_ERASE;
+ data8[2] = data8[0] ^ data8[1];
+
+ ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_ERASE);
+ if (ret)
+ return ret;
+
+ return ucsi_stm32g0_bl_send(ucsi, data8, STM32_CMD_ERASE_SPECIAL_LEN);
+}
+
+static int ucsi_stm32g0_bl_write(struct ucsi *ucsi, u32 addr, const void *data, size_t len)
+{
+ u8 *data8;
+ int i, ret;
+
+ if (!len || len > STM32G0_I2C_BL_SZ)
+ return -EINVAL;
+
+ /* Write memory: len bytes -1, data up to 256 bytes + XOR'ed bytes */
+ data8 = kmalloc(STM32G0_I2C_BL_SZ + 2, GFP_KERNEL);
+ if (!data8)
+ return -ENOMEM;
+
+ ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_WM);
+ if (ret)
+ goto free;
+
+ ret = ucsi_stm32g0_bl_send_addr(ucsi, addr);
+ if (ret)
+ goto free;
+
+ data8[0] = len - 1;
+ memcpy(data8 + 1, data, len);
+ data8[len + 1] = data8[0];
+ for (i = 1; i <= len; i++)
+ data8[len + 1] ^= data8[i];
+
+ ret = ucsi_stm32g0_bl_send(ucsi, data8, len + 2);
+free:
+ kfree(data8);
+
+ return ret;
+}
+
+static int ucsi_stm32g0_bl_read(struct ucsi *ucsi, u32 addr, void *data, size_t len)
+{
+ int ret;
+
+ if (!len || len > STM32G0_I2C_BL_SZ)
+ return -EINVAL;
+
+ ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_RM);
+ if (ret)
+ return ret;
+
+ ret = ucsi_stm32g0_bl_send_addr(ucsi, addr);
+ if (ret)
+ return ret;
+
+ ret = ucsi_stm32g0_bl_cmd(ucsi, len - 1);
+ if (ret)
+ return ret;
+
+ return ucsi_stm32g0_bl_rcv_woack(ucsi, data, len);
+}
+
+/* Firmware commands (the same address as the bootloader) */
+static int ucsi_stm32g0_fw_cmd(struct ucsi *ucsi, unsigned int cmd)
+{
+ return ucsi_stm32g0_bl_cmd_check_ack(ucsi, cmd, false);
+}
+
+static int ucsi_stm32g0_fw_rcv(struct ucsi *ucsi, void *data, size_t len)
+{
+ return ucsi_stm32g0_bl_rcv_woack(ucsi, data, len);
+}
+
+/* UCSI ops */
+static int ucsi_stm32g0_read(struct ucsi *ucsi, unsigned int offset, void *val, size_t len)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ struct i2c_client *client = g0->client;
+ u8 reg = offset;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_err(g0->dev, "i2c read %02x, %02x error: %d\n", client->addr, reg, ret);
+
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return 0;
+}
+
+static int ucsi_stm32g0_async_write(struct ucsi *ucsi, unsigned int offset, const void *val,
+ size_t len)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ struct i2c_client *client = g0->client;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ }
+ };
+ unsigned char *buf;
+ int ret;
+
+ buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = offset;
+ memcpy(&buf[1], val, len);
+ msg[0].len = len + 1;
+ msg[0].buf = buf;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ kfree(buf);
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_err(g0->dev, "i2c write %02x, %02x error: %d\n", client->addr, offset, ret);
+
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return 0;
+}
+
+static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const void *val,
+ size_t len)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ set_bit(COMMAND_PENDING, &g0->flags);
+
+ ret = ucsi_stm32g0_async_write(ucsi, offset, val, len);
+ if (ret)
+ goto out_clear_bit;
+
+ if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
+
+out_clear_bit:
+ clear_bit(COMMAND_PENDING, &g0->flags);
+
+ return ret;
+}
+
+static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
+{
+ struct ucsi_stm32g0 *g0 = data;
+ u32 cci;
+ int ret;
+
+ if (g0->suspended)
+ g0->wakeup_event = true;
+
+ ret = ucsi_stm32g0_read(g0->ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret)
+ return IRQ_NONE;
+
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci));
+
+ if (test_bit(COMMAND_PENDING, &g0->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&g0->complete);
+
+ return IRQ_HANDLED;
+}
+
+static const struct ucsi_operations ucsi_stm32g0_ops = {
+ .read = ucsi_stm32g0_read,
+ .sync_write = ucsi_stm32g0_sync_write,
+ .async_write = ucsi_stm32g0_async_write,
+};
+
+static int ucsi_stm32g0_register(struct ucsi *ucsi)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ struct i2c_client *client = g0->client;
+ int ret;
+
+ /* Request alert interrupt */
+ ret = request_threaded_irq(client->irq, NULL, ucsi_stm32g0_irq_handler, IRQF_ONESHOT,
+ dev_name(g0->dev), g0);
+ if (ret) {
+ dev_err(g0->dev, "request IRQ failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = ucsi_register(ucsi);
+ if (ret) {
+ dev_err_probe(g0->dev, ret, "ucsi_register failed\n");
+ free_irq(client->irq, g0);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ucsi_stm32g0_unregister(struct ucsi *ucsi)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ struct i2c_client *client = g0->client;
+
+ ucsi_unregister(ucsi);
+ free_irq(client->irq, g0);
+}
+
+static void ucsi_stm32g0_fw_cb(const struct firmware *fw, void *context)
+{
+ struct ucsi_stm32g0 *g0;
+ const u8 *data, *end;
+ const struct ucsi_stm32g0_fw_info *fw_info;
+ u32 addr = STM32G0_MAIN_MEM_ADDR, ob, fw_version;
+ int ret, size;
+
+ if (!context)
+ return;
+
+ g0 = ucsi_get_drvdata(context);
+
+ if (!fw)
+ goto fw_release;
+
+ fw_info = (struct ucsi_stm32g0_fw_info *)(fw->data + fw->size - sizeof(*fw_info));
+
+ if (!g0->in_bootloader) {
+ /* Read running firmware version */
+ ret = ucsi_stm32g0_fw_cmd(g0->ucsi, STM32G0_FW_GETVER);
+ if (ret) {
+ dev_err(g0->dev, "Get version cmd failed %d\n", ret);
+ goto fw_release;
+ }
+ ret = ucsi_stm32g0_fw_rcv(g0->ucsi, &fw_version,
+ STM32G0_FW_GETVER_LEN);
+ if (ret) {
+ dev_err(g0->dev, "Get version failed %d\n", ret);
+ goto fw_release;
+ }
+
+ /* Sanity check on keyword and firmware version */
+ if (fw_info->keyword != STM32G0_FW_KEYWORD || fw_info->version == fw_version)
+ goto fw_release;
+
+ dev_info(g0->dev, "Flashing FW: %08x (%08x cur)\n", fw_info->version, fw_version);
+
+ /* Switch to bootloader mode */
+ ucsi_stm32g0_unregister(g0->ucsi);
+ ret = ucsi_stm32g0_fw_cmd(g0->ucsi, STM32G0_FW_RSTGOBL);
+ if (ret) {
+ dev_err(g0->dev, "bootloader cmd failed %d\n", ret);
+ goto fw_release;
+ }
+ g0->in_bootloader = true;
+
+ /* STM32G0 reboot delay */
+ msleep(100);
+ }
+
+ ret = ucsi_stm32g0_bl_global_mass_erase(g0->ucsi);
+ if (ret) {
+ dev_err(g0->dev, "Erase failed %d\n", ret);
+ goto fw_release;
+ }
+
+ data = fw->data;
+ end = fw->data + fw->size;
+ while (data < end) {
+ if ((end - data) < STM32G0_I2C_BL_SZ)
+ size = end - data;
+ else
+ size = STM32G0_I2C_BL_SZ;
+
+ ret = ucsi_stm32g0_bl_write(g0->ucsi, addr, data, size);
+ if (ret) {
+ dev_err(g0->dev, "Write failed %d\n", ret);
+ goto fw_release;
+ }
+ addr += size;
+ data += size;
+ }
+
+ dev_dbg(g0->dev, "Configure to boot from main flash\n");
+
+ ret = ucsi_stm32g0_bl_read(g0->ucsi, STM32G0_USER_OPTION_BYTES, &ob, sizeof(ob));
+ if (ret) {
+ dev_err(g0->dev, "read user option bytes failed %d\n", ret);
+ goto fw_release;
+ }
+
+ dev_dbg(g0->dev, "STM32G0_USER_OPTION_BYTES 0x%08x\n", ob);
+
+ /* Configure user option bytes to boot from main flash next time */
+ ob |= STM32G0_USER_OB_BOOT_MAIN;
+
+ /* Writing option bytes will also reset G0 for updates to be loaded */
+ ret = ucsi_stm32g0_bl_write(g0->ucsi, STM32G0_USER_OPTION_BYTES, &ob, sizeof(ob));
+ if (ret) {
+ dev_err(g0->dev, "write user option bytes failed %d\n", ret);
+ goto fw_release;
+ }
+
+ dev_info(g0->dev, "Starting, option bytes:0x%08x\n", ob);
+
+ /* STM32G0 FW boot delay */
+ msleep(500);
+
+ /* Register UCSI interface */
+ if (!ucsi_stm32g0_register(g0->ucsi))
+ g0->in_bootloader = false;
+
+fw_release:
+ release_firmware(fw);
+}
+
+static int ucsi_stm32g0_probe_bootloader(struct ucsi *ucsi)
+{
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ int ret;
+ u16 ucsi_version;
+
+ /* firmware-name is optional */
+ if (device_property_present(g0->dev, "firmware-name")) {
+ ret = device_property_read_string(g0->dev, "firmware-name", &g0->fw_name);
+ if (ret < 0)
+ return dev_err_probe(g0->dev, ret, "Error reading firmware-name\n");
+ }
+
+ if (g0->fw_name) {
+ /* STM32G0 in bootloader mode communicates at reserved address 0x51 */
+ g0->i2c_bl = i2c_new_dummy_device(g0->client->adapter, STM32G0_I2C_BL_ADDR);
+ if (IS_ERR(g0->i2c_bl)) {
+ ret = dev_err_probe(g0->dev, PTR_ERR(g0->i2c_bl),
+ "Failed to register booloader I2C address\n");
+ return ret;
+ }
+ }
+
+ /*
+ * Try to guess if the STM32G0 is running a UCSI firmware. First probe the UCSI FW at its
+ * i2c address. Fallback to bootloader i2c address only if firmware-name is specified.
+ */
+ ret = ucsi_stm32g0_read(ucsi, UCSI_VERSION, &ucsi_version, sizeof(ucsi_version));
+ if (!ret || !g0->fw_name)
+ return ret;
+
+ /* Speculatively read the bootloader version that has a known length. */
+ ret = ucsi_stm32g0_bl_get_version(ucsi, &g0->bl_version);
+ if (ret < 0) {
+ i2c_unregister_device(g0->i2c_bl);
+ return ret;
+ }
+
+ /* Device in bootloader mode */
+ g0->in_bootloader = true;
+ dev_info(g0->dev, "Bootloader Version 0x%02x\n", g0->bl_version);
+
+ return 0;
+}
+
+static int ucsi_stm32g0_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ucsi_stm32g0 *g0;
+ int ret;
+
+ g0 = devm_kzalloc(dev, sizeof(*g0), GFP_KERNEL);
+ if (!g0)
+ return -ENOMEM;
+
+ g0->dev = dev;
+ g0->client = client;
+ init_completion(&g0->complete);
+ i2c_set_clientdata(client, g0);
+
+ g0->ucsi = ucsi_create(dev, &ucsi_stm32g0_ops);
+ if (IS_ERR(g0->ucsi))
+ return PTR_ERR(g0->ucsi);
+
+ ucsi_set_drvdata(g0->ucsi, g0);
+
+ ret = ucsi_stm32g0_probe_bootloader(g0->ucsi);
+ if (ret < 0)
+ goto destroy;
+
+ /*
+ * Don't register in bootloader mode: wait for the firmware to be loaded and started before
+ * registering UCSI device.
+ */
+ if (!g0->in_bootloader) {
+ ret = ucsi_stm32g0_register(g0->ucsi);
+ if (ret < 0)
+ goto freei2c;
+ }
+
+ if (g0->fw_name) {
+ /*
+ * Asynchronously flash (e.g. bootloader mode) or update the running firmware,
+ * not to hang the boot process
+ */
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, g0->fw_name, g0->dev,
+ GFP_KERNEL, g0->ucsi, ucsi_stm32g0_fw_cb);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "firmware request failed\n");
+ goto unregister;
+ }
+ }
+
+ return 0;
+
+unregister:
+ if (!g0->in_bootloader)
+ ucsi_stm32g0_unregister(g0->ucsi);
+freei2c:
+ if (g0->fw_name)
+ i2c_unregister_device(g0->i2c_bl);
+destroy:
+ ucsi_destroy(g0->ucsi);
+
+ return ret;
+}
+
+static int ucsi_stm32g0_remove(struct i2c_client *client)
+{
+ struct ucsi_stm32g0 *g0 = i2c_get_clientdata(client);
+
+ if (!g0->in_bootloader)
+ ucsi_stm32g0_unregister(g0->ucsi);
+ if (g0->fw_name)
+ i2c_unregister_device(g0->i2c_bl);
+ ucsi_destroy(g0->ucsi);
+
+ return 0;
+}
+
+static int ucsi_stm32g0_suspend(struct device *dev)
+{
+ struct ucsi_stm32g0 *g0 = dev_get_drvdata(dev);
+ struct i2c_client *client = g0->client;
+
+ if (g0->in_bootloader)
+ return 0;
+
+ /* Keep the interrupt disabled until the i2c bus has been resumed */
+ disable_irq(client->irq);
+
+ g0->suspended = true;
+ g0->wakeup_event = false;
+
+ if (device_may_wakeup(dev) || device_wakeup_path(dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int ucsi_stm32g0_resume(struct device *dev)
+{
+ struct ucsi_stm32g0 *g0 = dev_get_drvdata(dev);
+ struct i2c_client *client = g0->client;
+
+ if (g0->in_bootloader)
+ return 0;
+
+ if (device_may_wakeup(dev) || device_wakeup_path(dev))
+ disable_irq_wake(client->irq);
+
+ enable_irq(client->irq);
+
+ /* Enforce any pending handler gets called to signal a wakeup_event */
+ synchronize_irq(client->irq);
+
+ if (g0->wakeup_event)
+ pm_wakeup_event(g0->dev, 0);
+
+ g0->suspended = false;
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ucsi_stm32g0_pm_ops, ucsi_stm32g0_suspend, ucsi_stm32g0_resume);
+
+static const struct of_device_id __maybe_unused ucsi_stm32g0_typec_of_match[] = {
+ { .compatible = "st,stm32g0-typec" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ucsi_stm32g0_typec_of_match);
+
+static const struct i2c_device_id ucsi_stm32g0_typec_i2c_devid[] = {
+ {"stm32g0-typec", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ucsi_stm32g0_typec_i2c_devid);
+
+static struct i2c_driver ucsi_stm32g0_i2c_driver = {
+ .driver = {
+ .name = "ucsi-stm32g0-i2c",
+ .of_match_table = of_match_ptr(ucsi_stm32g0_typec_of_match),
+ .pm = pm_sleep_ptr(&ucsi_stm32g0_pm_ops),
+ },
+ .probe = ucsi_stm32g0_probe,
+ .remove = ucsi_stm32g0_remove,
+ .id_table = ucsi_stm32g0_typec_i2c_devid
+};
+module_i2c_driver(ucsi_stm32g0_i2c_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@foss.st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32G0 Type-C controller");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:ucsi-stm32g0");
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index 1e8a23d92cb4..d4a2f30a7580 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -104,18 +104,18 @@ static int v_recv_cmd_submit(struct vudc *udc,
if (pdu->base.direction == USBIP_DIR_IN)
address |= USB_DIR_IN;
- spin_lock_irq(&udc->lock);
+ spin_lock_irqsave(&udc->lock, flags);
urb_p->ep = vudc_find_endpoint(udc, address);
if (!urb_p->ep) {
/* we don't know the type, there may be isoc data! */
dev_err(&udc->pdev->dev, "request to nonexistent endpoint");
- spin_unlock_irq(&udc->lock);
+ spin_unlock_irqrestore(&udc->lock, flags);
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
ret = -EPIPE;
goto free_urbp;
}
urb_p->type = urb_p->ep->type;
- spin_unlock_irq(&udc->lock);
+ spin_unlock_irqrestore(&udc->lock, flags);
urb_p->new = 1;
urb_p->seqnum = pdu->base.seqnum;
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index d1cf6b51bf85..c95e6b2bfd32 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -128,7 +128,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
goto unlock;
}
- spin_lock_irq(&udc->ud.lock);
+ spin_lock(&udc->ud.lock);
if (udc->ud.status != SDEV_ST_AVAILABLE) {
ret = -EINVAL;
@@ -150,7 +150,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
}
/* unlock and create threads and get tasks */
- spin_unlock_irq(&udc->ud.lock);
+ spin_unlock(&udc->ud.lock);
spin_unlock_irqrestore(&udc->lock, flags);
tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
@@ -173,14 +173,14 @@ static ssize_t usbip_sockfd_store(struct device *dev,
/* lock and update udc->ud state */
spin_lock_irqsave(&udc->lock, flags);
- spin_lock_irq(&udc->ud.lock);
+ spin_lock(&udc->ud.lock);
udc->ud.tcp_socket = socket;
udc->ud.tcp_rx = tcp_rx;
udc->ud.tcp_tx = tcp_tx;
udc->ud.status = SDEV_ST_USED;
- spin_unlock_irq(&udc->ud.lock);
+ spin_unlock(&udc->ud.lock);
ktime_get_ts64(&udc->start_time);
v_start_timer(udc);
@@ -201,12 +201,12 @@ static ssize_t usbip_sockfd_store(struct device *dev,
goto unlock;
}
- spin_lock_irq(&udc->ud.lock);
+ spin_lock(&udc->ud.lock);
if (udc->ud.status != SDEV_ST_USED) {
ret = -EINVAL;
goto unlock_ud;
}
- spin_unlock_irq(&udc->ud.lock);
+ spin_unlock(&udc->ud.lock);
usbip_event_add(&udc->ud, VUDC_EVENT_DOWN);
}
@@ -219,7 +219,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
sock_err:
sockfd_put(socket);
unlock_ud:
- spin_unlock_irq(&udc->ud.lock);
+ spin_unlock(&udc->ud.lock);
unlock:
spin_unlock_irqrestore(&udc->lock, flags);
mutex_unlock(&udc->ud.sysfs_lock);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 48c4dadb0c7c..75a703b803a2 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -29,7 +29,6 @@ u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
- cfg = hw->common_cfg;
vp_iowrite16(vector, &cfg->msix_config);
return vp_ioread16(&cfg->msix_config);
@@ -128,6 +127,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
hw->dev_cfg = get_cap_addr(hw, &cap);
+ hw->cap_dev_config_size = le32_to_cpu(cap.length);
IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
break;
}
@@ -233,15 +233,23 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
{
struct ifcvf_adapter *adapter;
+ u32 net_config_size = sizeof(struct virtio_net_config);
+ u32 blk_config_size = sizeof(struct virtio_blk_config);
+ u32 cap_size = hw->cap_dev_config_size;
u32 config_size;
adapter = vf_to_adapter(hw);
+ /* If the onboard device config space size is greater than
+ * the size of struct virtio_net/blk_config, only the spec
+ * implementing contents size is returned, this is very
+ * unlikely, defensive programming.
+ */
switch (hw->dev_type) {
case VIRTIO_ID_NET:
- config_size = sizeof(struct virtio_net_config);
+ config_size = min(cap_size, net_config_size);
break;
case VIRTIO_ID_BLOCK:
- config_size = sizeof(struct virtio_blk_config);
+ config_size = min(cap_size, blk_config_size);
break;
default:
config_size = 0;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 115b61f4924b..f5563f665cc6 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -87,6 +87,8 @@ struct ifcvf_hw {
int config_irq;
int vqs_reused_irq;
u16 nr_vring;
+ /* VIRTIO_PCI_CAP_DEVICE_CFG size */
+ u32 cap_dev_config_size;
};
struct ifcvf_adapter {
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 0a5670729412..f9c0044c6442 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -685,7 +685,7 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
}
/*
- * IFCVF currently does't have on-chip IOMMU, so not
+ * IFCVF currently doesn't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
@@ -752,59 +752,36 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct ifcvf_adapter *adapter;
+ struct vdpa_device *vdpa_dev;
struct pci_dev *pdev;
struct ifcvf_hw *vf;
- struct device *dev;
- int ret, i;
+ int ret;
ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
- if (ifcvf_mgmt_dev->adapter)
+ if (!ifcvf_mgmt_dev->adapter)
return -EOPNOTSUPP;
- pdev = ifcvf_mgmt_dev->pdev;
- dev = &pdev->dev;
- adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, 1, 1, name, false);
- if (IS_ERR(adapter)) {
- IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
- return PTR_ERR(adapter);
- }
-
- ifcvf_mgmt_dev->adapter = adapter;
-
+ adapter = ifcvf_mgmt_dev->adapter;
vf = &adapter->vf;
- vf->dev_type = get_dev_type(pdev);
- vf->base = pcim_iomap_table(pdev);
+ pdev = adapter->pdev;
+ vdpa_dev = &adapter->vdpa;
- adapter->pdev = pdev;
- adapter->vdpa.dma_dev = &pdev->dev;
-
- ret = ifcvf_init_hw(vf, pdev);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
- goto err;
- }
-
- for (i = 0; i < vf->nr_vring; i++)
- vf->vring[i].irq = -EINVAL;
-
- vf->hw_features = ifcvf_get_hw_features(vf);
- vf->config_size = ifcvf_get_config_size(vf);
+ if (name)
+ ret = dev_set_name(&vdpa_dev->dev, "%s", name);
+ else
+ ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
- adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
if (ret) {
+ put_device(&adapter->vdpa.dev);
IFCVF_ERR(pdev, "Failed to register to vDPA bus");
- goto err;
+ return ret;
}
return 0;
-
-err:
- put_device(&adapter->vdpa.dev);
- return ret;
}
+
static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
@@ -823,61 +800,94 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct device *dev = &pdev->dev;
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
u32 dev_type;
- int ret;
-
- ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
- if (!ifcvf_mgmt_dev) {
- IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
- return -ENOMEM;
- }
-
- dev_type = get_dev_type(pdev);
- switch (dev_type) {
- case VIRTIO_ID_NET:
- ifcvf_mgmt_dev->mdev.id_table = id_table_net;
- break;
- case VIRTIO_ID_BLOCK:
- ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
- break;
- default:
- IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
- ret = -EOPNOTSUPP;
- goto err;
- }
-
- ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
- ifcvf_mgmt_dev->mdev.device = dev;
- ifcvf_mgmt_dev->pdev = pdev;
+ int ret, i;
ret = pcim_enable_device(pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to enable device\n");
- goto err;
+ return ret;
}
-
ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
IFCVF_DRIVER_NAME);
if (ret) {
IFCVF_ERR(pdev, "Failed to request MMIO region\n");
- goto err;
+ return ret;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
IFCVF_ERR(pdev, "No usable DMA configuration\n");
- goto err;
+ return ret;
}
ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
if (ret) {
IFCVF_ERR(pdev,
"Failed for adding devres for freeing irq vectors\n");
- goto err;
+ return ret;
}
pci_set_master(pdev);
+ adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
+ dev, &ifc_vdpa_ops, 1, 1, NULL, false);
+ if (IS_ERR(adapter)) {
+ IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
+ return PTR_ERR(adapter);
+ }
+
+ vf = &adapter->vf;
+ vf->dev_type = get_dev_type(pdev);
+ vf->base = pcim_iomap_table(pdev);
+
+ adapter->pdev = pdev;
+ adapter->vdpa.dma_dev = &pdev->dev;
+
+ ret = ifcvf_init_hw(vf, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
+ return ret;
+ }
+
+ for (i = 0; i < vf->nr_vring; i++)
+ vf->vring[i].irq = -EINVAL;
+
+ vf->hw_features = ifcvf_get_hw_features(vf);
+ vf->config_size = ifcvf_get_config_size(vf);
+
+ ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+ if (!ifcvf_mgmt_dev) {
+ IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+ return -ENOMEM;
+ }
+
+ ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+ ifcvf_mgmt_dev->mdev.device = dev;
+ ifcvf_mgmt_dev->adapter = adapter;
+
+ dev_type = get_dev_type(pdev);
+ switch (dev_type) {
+ case VIRTIO_ID_NET:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_net;
+ break;
+ case VIRTIO_ID_BLOCK:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
+ break;
+ default:
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
+ ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
+
+ adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
+
+
ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
if (ret) {
IFCVF_ERR(pdev,
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 44104093163b..6af9fdbb86b7 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -70,6 +70,16 @@ struct mlx5_vdpa_wq_ent {
struct mlx5_vdpa_dev *mvdev;
};
+enum {
+ MLX5_VDPA_DATAVQ_GROUP,
+ MLX5_VDPA_CVQ_GROUP,
+ MLX5_VDPA_NUMVQ_GROUPS
+};
+
+enum {
+ MLX5_VDPA_NUM_AS = MLX5_VDPA_NUMVQ_GROUPS
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
@@ -85,6 +95,7 @@ struct mlx5_vdpa_dev {
struct mlx5_vdpa_mr mr;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
+ unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e85c1d71f4ed..ed100a35e596 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -164,6 +164,7 @@ struct mlx5_vdpa_net {
bool setup;
u32 cur_num_vqs;
u32 rqt_size;
+ bool nb_registered;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
@@ -895,6 +896,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
if (err)
goto err_cmd;
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT;
kfree(in);
mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
@@ -922,6 +924,7 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq
mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
return;
}
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
umems_destroy(ndev, mvq);
}
@@ -1121,6 +1124,20 @@ err_cmd:
return err;
}
+static bool is_valid_state_change(int oldstate, int newstate)
+{
+ switch (oldstate) {
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
+ return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY;
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
+ return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
+ default:
+ return false;
+ }
+}
+
static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
{
int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
@@ -1130,6 +1147,12 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
void *in;
int err;
+ if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
+ return 0;
+
+ if (!is_valid_state_change(mvq->fw_state, state))
+ return -EINVAL;
+
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1440,7 +1463,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
- memset(dmac_c, 0xff, ETH_ALEN);
+ eth_broadcast_addr(dmac_c);
ether_addr_copy(dmac_v, mac);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
if (tagged) {
@@ -1992,6 +2015,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
+ int err;
if (!mvdev->actual_features)
return;
@@ -2005,8 +2029,16 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
}
mvq = &ndev->vqs[idx];
- if (!ready)
+ if (!ready) {
suspend_vq(ndev, mvq);
+ } else {
+ err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err);
+ ready = false;
+ }
+ }
+
mvq->ready = ready;
}
@@ -2095,9 +2127,14 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
return PAGE_SIZE;
}
-static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdev, u16 idx)
{
- return 0;
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ if (is_ctrl_vq_idx(mvdev, idx))
+ return MLX5_VDPA_CVQ_GROUP;
+
+ return MLX5_VDPA_DATAVQ_GROUP;
}
enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
@@ -2511,6 +2548,15 @@ err_clear:
up_write(&ndev->reslock);
}
+static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
+{
+ int i;
+
+ /* default mapping all groups are mapped to asid 0 */
+ for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
+ mvdev->group2asid[i] = 0;
+}
+
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2529,7 +2575,9 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
ndev->mvdev.cvq.completed_desc = 0;
memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
+ init_group_to_asid_map(mvdev);
++mvdev->generation;
+
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
@@ -2567,26 +2615,63 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
return mvdev->generation;
}
-static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
- struct vhost_iotlb *iotlb)
+static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ u64 start = 0ULL, last = 0ULL - 1;
+ struct vhost_iotlb_map *map;
+ int err = 0;
+
+ spin_lock(&mvdev->cvq.iommu_lock);
+ vhost_iotlb_reset(mvdev->cvq.iotlb);
+
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
+ map->last, map->addr, map->perm);
+ if (err)
+ goto out;
+ }
+
+out:
+ spin_unlock(&mvdev->cvq.iommu_lock);
+ return err;
+}
+
+static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
- down_write(&ndev->reslock);
-
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
- goto err;
+ return err;
}
if (change_map)
err = mlx5_vdpa_change_map(mvdev, iotlb);
-err:
+ return err;
+}
+
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+ struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int err = -EINVAL;
+
+ down_write(&ndev->reslock);
+ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+ err = set_map_data(mvdev, iotlb);
+ if (err)
+ goto out;
+ }
+
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
+ err = set_map_control(mvdev, iotlb);
+
+out:
up_write(&ndev->reslock);
return err;
}
@@ -2733,6 +2818,49 @@ out_err:
return err;
}
+static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_control_vq *cvq;
+
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ return;
+
+ cvq = &mvdev->cvq;
+ cvq->ready = false;
+}
+
+static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+ down_write(&ndev->reslock);
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ ndev->nb_registered = false;
+ flush_workqueue(ndev->mvdev.wq);
+ for (i = 0; i < ndev->cur_num_vqs; i++) {
+ mvq = &ndev->vqs[i];
+ suspend_vq(ndev, mvq);
+ }
+ mlx5_vdpa_cvq_suspend(mvdev);
+ up_write(&ndev->reslock);
+ return 0;
+}
+
+static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
+ unsigned int asid)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ if (group >= MLX5_VDPA_NUMVQ_GROUPS)
+ return -EINVAL;
+
+ mvdev->group2asid[group] = asid;
+ return 0;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2762,7 +2890,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_config = mlx5_vdpa_set_config,
.get_generation = mlx5_vdpa_get_generation,
.set_map = mlx5_vdpa_set_map,
+ .set_group_asid = mlx5_set_group_asid,
.free = mlx5_vdpa_free,
+ .suspend = mlx5_vdpa_suspend,
};
static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
@@ -2828,6 +2958,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
mvq->index = i;
mvq->ndev = ndev;
mvq->fwqp.fw = true;
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
}
for (; i < ndev->mvdev.max_vqs; i++) {
mvq = &ndev->vqs[i];
@@ -2902,13 +3033,21 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ down_read(&ndev->reslock);
+ if (!ndev->nb_registered) {
+ up_read(&ndev->reslock);
+ return NOTIFY_DONE;
+ }
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
- if (!wqent)
+ if (!wqent) {
+ up_read(&ndev->reslock);
return NOTIFY_DONE;
+ }
wqent->mvdev = &ndev->mvdev;
INIT_WORK(&wqent->work, update_carrier);
queue_work(ndev->mvdev.wq, &wqent->work);
+ up_read(&ndev->reslock);
ret = NOTIFY_OK;
break;
default:
@@ -2982,7 +3121,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- 1, 1, name, false);
+ MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -3062,6 +3201,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
+ ndev->nb_registered = true;
mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
if (err)
@@ -3093,7 +3233,10 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct workqueue_struct *wq;
- mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ if (ndev->nb_registered) {
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ ndev->nb_registered = false;
+ }
wq = mvdev->wq;
mvdev->wq = NULL;
destroy_workqueue(wq);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index ebf2f363fbe7..c06c02704461 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -824,11 +824,11 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
config.mac))
return -EMSGSIZE;
- val_u16 = le16_to_cpu(config.status);
+ val_u16 = __virtio16_to_cpu(true, config.status);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
return -EMSGSIZE;
- val_u16 = le16_to_cpu(config.mtu);
+ val_u16 = __virtio16_to_cpu(true, config.mtu);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
return -EMSGSIZE;
@@ -846,17 +846,9 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
{
u32 device_id;
void *hdr;
- u8 status;
int err;
down_read(&vdev->cf_lock);
- status = vdev->config->get_status(vdev);
- if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
- NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
- err = -EAGAIN;
- goto out;
- }
-
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
VDPA_CMD_DEV_CONFIG_GET);
if (!hdr) {
@@ -913,7 +905,7 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
}
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
- max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
+ max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
return -EMSGSIZE;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 0f2865899647..225b7f5d8be3 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
- "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
+ "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
#define VDPASIM_QUEUE_MAX 256
@@ -107,6 +107,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
for (i = 0; i < vdpasim->dev_attr.nas; i++)
vhost_iotlb_reset(&vdpasim->iommu[i]);
+ vdpasim->running = true;
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
@@ -291,7 +292,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
goto err_iommu;
for (i = 0; i < vdpasim->dev_attr.nas; i++)
- vhost_iotlb_init(&vdpasim->iommu[i], 0, 0);
+ vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
if (!vdpasim->buffer)
@@ -505,6 +506,17 @@ static int vdpasim_reset(struct vdpa_device *vdpa)
return 0;
}
+static int vdpasim_suspend(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ spin_lock(&vdpasim->lock);
+ vdpasim->running = false;
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -694,6 +706,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .suspend = vdpasim_suspend,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
@@ -726,6 +739,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .suspend = vdpasim_suspend,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 622782e92239..061986f30911 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -66,6 +66,7 @@ struct vdpasim {
u32 generation;
u64 features;
u32 groups;
+ bool running;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
};
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index 42d401d43911..c8bfea3b7db2 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -25,31 +25,49 @@
#define DRV_LICENSE "GPL v2"
#define VDPASIM_BLK_FEATURES (VDPASIM_FEATURES | \
+ (1ULL << VIRTIO_BLK_F_FLUSH) | \
(1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
(1ULL << VIRTIO_BLK_F_SEG_MAX) | \
(1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
(1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
- (1ULL << VIRTIO_BLK_F_MQ))
+ (1ULL << VIRTIO_BLK_F_MQ) | \
+ (1ULL << VIRTIO_BLK_F_DISCARD) | \
+ (1ULL << VIRTIO_BLK_F_WRITE_ZEROES))
#define VDPASIM_BLK_CAPACITY 0x40000
#define VDPASIM_BLK_SIZE_MAX 0x1000
#define VDPASIM_BLK_SEG_MAX 32
+#define VDPASIM_BLK_DWZ_MAX_SECTORS UINT_MAX
+
+/* 1 virtqueue, 1 address space, 1 virtqueue group */
#define VDPASIM_BLK_VQ_NUM 1
+#define VDPASIM_BLK_AS_NUM 1
+#define VDPASIM_BLK_GROUP_NUM 1
static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
-static bool vdpasim_blk_check_range(u64 start_sector, size_t range_size)
+static bool vdpasim_blk_check_range(struct vdpasim *vdpasim, u64 start_sector,
+ u64 num_sectors, u64 max_sectors)
{
- u64 range_sectors = range_size >> SECTOR_SHIFT;
-
- if (range_size > VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)
- return false;
+ if (start_sector > VDPASIM_BLK_CAPACITY) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "starting sector exceeds the capacity - start: 0x%llx capacity: 0x%x\n",
+ start_sector, VDPASIM_BLK_CAPACITY);
+ }
- if (start_sector > VDPASIM_BLK_CAPACITY)
+ if (num_sectors > max_sectors) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "number of sectors exceeds the max allowed in a request - num: 0x%llx max: 0x%llx\n",
+ num_sectors, max_sectors);
return false;
+ }
- if (range_sectors > VDPASIM_BLK_CAPACITY - start_sector)
+ if (num_sectors > VDPASIM_BLK_CAPACITY - start_sector) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "request exceeds the capacity - start: 0x%llx num: 0x%llx capacity: 0x%x\n",
+ start_sector, num_sectors, VDPASIM_BLK_CAPACITY);
return false;
+ }
return true;
}
@@ -63,6 +81,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
{
size_t pushed = 0, to_pull, to_push;
struct virtio_blk_outhdr hdr;
+ bool handled = false;
ssize_t bytes;
loff_t offset;
u64 sector;
@@ -76,14 +95,14 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
return false;
if (vq->out_iov.used < 1 || vq->in_iov.used < 1) {
- dev_err(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
+ dev_dbg(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
vq->out_iov.used, vq->in_iov.used);
- return false;
+ goto err;
}
if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) {
- dev_err(&vdpasim->vdpa.dev, "request in header too short\n");
- return false;
+ dev_dbg(&vdpasim->vdpa.dev, "request in header too short\n");
+ goto err;
}
/* The last byte is the status and we checked if the last iov has
@@ -96,8 +115,8 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr,
sizeof(hdr));
if (bytes != sizeof(hdr)) {
- dev_err(&vdpasim->vdpa.dev, "request out header too short\n");
- return false;
+ dev_dbg(&vdpasim->vdpa.dev, "request out header too short\n");
+ goto err;
}
to_pull -= bytes;
@@ -107,12 +126,20 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
offset = sector << SECTOR_SHIFT;
status = VIRTIO_BLK_S_OK;
+ if (type != VIRTIO_BLK_T_IN && type != VIRTIO_BLK_T_OUT &&
+ sector != 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "sector must be 0 for %u request - sector: 0x%llx\n",
+ type, sector);
+ status = VIRTIO_BLK_S_IOERR;
+ goto err_status;
+ }
+
switch (type) {
case VIRTIO_BLK_T_IN:
- if (!vdpasim_blk_check_range(sector, to_push)) {
- dev_err(&vdpasim->vdpa.dev,
- "reading over the capacity - offset: 0x%llx len: 0x%zx\n",
- offset, to_push);
+ if (!vdpasim_blk_check_range(vdpasim, sector,
+ to_push >> SECTOR_SHIFT,
+ VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
@@ -121,7 +148,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim->buffer + offset,
to_push);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_push);
status = VIRTIO_BLK_S_IOERR;
@@ -132,10 +159,9 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
break;
case VIRTIO_BLK_T_OUT:
- if (!vdpasim_blk_check_range(sector, to_pull)) {
- dev_err(&vdpasim->vdpa.dev,
- "writing over the capacity - offset: 0x%llx len: 0x%zx\n",
- offset, to_pull);
+ if (!vdpasim_blk_check_range(vdpasim, sector,
+ to_pull >> SECTOR_SHIFT,
+ VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
@@ -144,7 +170,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim->buffer + offset,
to_pull);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_pull);
status = VIRTIO_BLK_S_IOERR;
@@ -157,7 +183,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim_blk_id,
VIRTIO_BLK_ID_BYTES);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd\n", bytes);
status = VIRTIO_BLK_S_IOERR;
break;
@@ -166,13 +192,76 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
pushed += bytes;
break;
+ case VIRTIO_BLK_T_FLUSH:
+ /* nothing to do */
+ break;
+
+ case VIRTIO_BLK_T_DISCARD:
+ case VIRTIO_BLK_T_WRITE_ZEROES: {
+ struct virtio_blk_discard_write_zeroes range;
+ u32 num_sectors, flags;
+
+ if (to_pull != sizeof(range)) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "discard/write_zeroes header len: 0x%zx [expected: 0x%zx]\n",
+ to_pull, sizeof(range));
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &range,
+ to_pull);
+ if (bytes < 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
+ bytes, offset, to_pull);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ sector = le64_to_cpu(range.sector);
+ offset = sector << SECTOR_SHIFT;
+ num_sectors = le32_to_cpu(range.num_sectors);
+ flags = le32_to_cpu(range.flags);
+
+ if (type == VIRTIO_BLK_T_DISCARD && flags != 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "discard unexpected flags set - flags: 0x%x\n",
+ flags);
+ status = VIRTIO_BLK_S_UNSUPP;
+ break;
+ }
+
+ if (type == VIRTIO_BLK_T_WRITE_ZEROES &&
+ flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "write_zeroes unexpected flags set - flags: 0x%x\n",
+ flags);
+ status = VIRTIO_BLK_S_UNSUPP;
+ break;
+ }
+
+ if (!vdpasim_blk_check_range(vdpasim, sector, num_sectors,
+ VDPASIM_BLK_DWZ_MAX_SECTORS)) {
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
+ memset(vdpasim->buffer + offset, 0,
+ num_sectors << SECTOR_SHIFT);
+ }
+
+ break;
+ }
default:
- dev_warn(&vdpasim->vdpa.dev,
- "Unsupported request type %d\n", type);
+ dev_dbg(&vdpasim->vdpa.dev,
+ "Unsupported request type %d\n", type);
status = VIRTIO_BLK_S_IOERR;
break;
}
+err_status:
/* If some operations fail, we need to skip the remaining bytes
* to put the status in the last byte
*/
@@ -182,21 +271,25 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
/* Last byte is the status */
bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1);
if (bytes != 1)
- return false;
+ goto err;
pushed += bytes;
/* Make sure data is wrote before advancing index */
smp_wmb();
+ handled = true;
+
+err:
vringh_complete_iotlb(&vq->vring, vq->head, pushed);
- return true;
+ return handled;
}
static void vdpasim_blk_work(struct work_struct *work)
{
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
+ bool reschedule = false;
int i;
spin_lock(&vdpasim->lock);
@@ -204,8 +297,12 @@ static void vdpasim_blk_work(struct work_struct *work)
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
+ if (!vdpasim->running)
+ goto out;
+
for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
+ int reqs = 0;
if (!vq->ready)
continue;
@@ -218,10 +315,18 @@ static void vdpasim_blk_work(struct work_struct *work)
if (vringh_need_notify_iotlb(&vq->vring) > 0)
vringh_notify(&vq->vring);
local_bh_enable();
+
+ if (++reqs > 4) {
+ reschedule = true;
+ break;
+ }
}
}
out:
spin_unlock(&vdpasim->lock);
+
+ if (reschedule)
+ schedule_work(&vdpasim->work);
}
static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
@@ -237,6 +342,17 @@ static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1);
blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1);
blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+ /* VIRTIO_BLK_F_DISCARD */
+ blk_config->discard_sector_alignment =
+ cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+ blk_config->max_discard_sectors =
+ cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
+ blk_config->max_discard_seg = cpu_to_vdpasim32(vdpasim, 1);
+ /* VIRTIO_BLK_F_WRITE_ZEROES */
+ blk_config->max_write_zeroes_sectors =
+ cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
+ blk_config->max_write_zeroes_seg = cpu_to_vdpasim32(vdpasim, 1);
+
}
static void vdpasim_blk_mgmtdev_release(struct device *dev)
@@ -260,6 +376,8 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.id = VIRTIO_ID_BLOCK;
dev_attr.supported_features = VDPASIM_BLK_FEATURES;
dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
+ dev_attr.ngroups = VDPASIM_BLK_GROUP_NUM;
+ dev_attr.nas = VDPASIM_BLK_AS_NUM;
dev_attr.config_size = sizeof(struct virtio_blk_config);
dev_attr.get_config = vdpasim_blk_get_config;
dev_attr.work_fn = vdpasim_blk_work;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 5125976a4df8..886449e88502 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -154,6 +154,9 @@ static void vdpasim_net_work(struct work_struct *work)
spin_lock(&vdpasim->lock);
+ if (!vdpasim->running)
+ goto out;
+
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 6daa3978d290..e682bc7ee6c9 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -138,18 +138,17 @@ static void do_bounce(phys_addr_t orig, void *addr, size_t size,
{
unsigned long pfn = PFN_DOWN(orig);
unsigned int offset = offset_in_page(orig);
- char *buffer;
+ struct page *page;
unsigned int sz = 0;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
- buffer = kmap_atomic(pfn_to_page(pfn));
+ page = pfn_to_page(pfn);
if (dir == DMA_TO_DEVICE)
- memcpy(addr, buffer + offset, sz);
+ memcpy_from_page(addr, page, offset, sz);
else
- memcpy(buffer + offset, addr, sz);
- kunmap_atomic(buffer);
+ memcpy_to_page(page, offset, addr, sz);
size -= sz;
pfn++;
@@ -179,8 +178,9 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
map->orig_phys == INVALID_PHYS_ADDR))
return;
- addr = page_address(map->bounce_page) + offset;
- do_bounce(map->orig_phys + offset, addr, sz, dir);
+ addr = kmap_local_page(map->bounce_page);
+ do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
+ kunmap_local(addr);
size -= sz;
iova += sz;
}
@@ -213,21 +213,21 @@ vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
struct vduse_bounce_map *map;
struct page *page = NULL;
- spin_lock(&domain->iotlb_lock);
+ read_lock(&domain->bounce_lock);
map = &domain->bounce_maps[iova >> PAGE_SHIFT];
- if (!map->bounce_page)
+ if (domain->user_bounce_pages || !map->bounce_page)
goto out;
page = map->bounce_page;
get_page(page);
out:
- spin_unlock(&domain->iotlb_lock);
+ read_unlock(&domain->bounce_lock);
return page;
}
static void
-vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
+vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
{
struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns;
@@ -247,6 +247,73 @@ vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
}
}
+int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
+ struct page **pages, int count)
+{
+ struct vduse_bounce_map *map;
+ int i, ret;
+
+ /* Now we don't support partial mapping */
+ if (count != (domain->bounce_size >> PAGE_SHIFT))
+ return -EINVAL;
+
+ write_lock(&domain->bounce_lock);
+ ret = -EEXIST;
+ if (domain->user_bounce_pages)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ map = &domain->bounce_maps[i];
+ if (map->bounce_page) {
+ /* Copy kernel page to user page if it's in use */
+ if (map->orig_phys != INVALID_PHYS_ADDR)
+ memcpy_to_page(pages[i], 0,
+ page_address(map->bounce_page),
+ PAGE_SIZE);
+ __free_page(map->bounce_page);
+ }
+ map->bounce_page = pages[i];
+ get_page(pages[i]);
+ }
+ domain->user_bounce_pages = true;
+ ret = 0;
+out:
+ write_unlock(&domain->bounce_lock);
+
+ return ret;
+}
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
+{
+ struct vduse_bounce_map *map;
+ unsigned long i, count;
+
+ write_lock(&domain->bounce_lock);
+ if (!domain->user_bounce_pages)
+ goto out;
+
+ count = domain->bounce_size >> PAGE_SHIFT;
+ for (i = 0; i < count; i++) {
+ struct page *page = NULL;
+
+ map = &domain->bounce_maps[i];
+ if (WARN_ON(!map->bounce_page))
+ continue;
+
+ /* Copy user page to kernel page if it's in use */
+ if (map->orig_phys != INVALID_PHYS_ADDR) {
+ page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
+ memcpy_from_page(page_address(page),
+ map->bounce_page, 0, PAGE_SIZE);
+ }
+ put_page(map->bounce_page);
+ map->bounce_page = page;
+ }
+ domain->user_bounce_pages = false;
+out:
+ write_unlock(&domain->bounce_lock);
+}
+
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
{
if (!domain->bounce_map)
@@ -322,13 +389,18 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
if (vduse_domain_init_bounce_map(domain))
goto err;
+ read_lock(&domain->bounce_lock);
if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
- goto err;
+ goto err_unlock;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
+ read_unlock(&domain->bounce_lock);
+
return iova;
+err_unlock:
+ read_unlock(&domain->bounce_lock);
err:
vduse_domain_free_iova(iovad, iova, size);
return DMA_MAPPING_ERROR;
@@ -340,10 +412,12 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
{
struct iova_domain *iovad = &domain->stream_iovad;
+ read_lock(&domain->bounce_lock);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
+ read_unlock(&domain->bounce_lock);
vduse_domain_free_iova(iovad, dma_addr, size);
}
@@ -451,7 +525,8 @@ static int vduse_domain_release(struct inode *inode, struct file *file)
spin_lock(&domain->iotlb_lock);
vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
- vduse_domain_free_bounce_pages(domain);
+ vduse_domain_remove_user_bounce_pages(domain);
+ vduse_domain_free_kernel_bounce_pages(domain);
spin_unlock(&domain->iotlb_lock);
put_iova_domain(&domain->stream_iovad);
put_iova_domain(&domain->consistent_iovad);
@@ -511,6 +586,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
goto err_file;
domain->file = file;
+ rwlock_init(&domain->bounce_lock);
spin_lock_init(&domain->iotlb_lock);
init_iova_domain(&domain->stream_iovad,
PAGE_SIZE, IOVA_START_PFN);
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 2722d9b8e21a..4e0e50e7ac15 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -14,6 +14,7 @@
#include <linux/iova.h>
#include <linux/dma-mapping.h>
#include <linux/vhost_iotlb.h>
+#include <linux/rwlock.h>
#define IOVA_START_PFN 1
@@ -34,6 +35,8 @@ struct vduse_iova_domain {
struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock;
struct file *file;
+ bool user_bounce_pages;
+ rwlock_t bounce_lock;
};
int vduse_domain_set_map(struct vduse_iova_domain *domain,
@@ -61,6 +64,11 @@ void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
+int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
+ struct page **pages, int count);
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
+
void vduse_domain_destroy(struct vduse_iova_domain *domain);
struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit,
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 3bc27de58f46..41c0b29739f1 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -21,6 +21,8 @@
#include <linux/uio.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
#include <uapi/linux/vduse.h>
#include <uapi/linux/vdpa.h>
#include <uapi/linux/virtio_config.h>
@@ -64,6 +66,13 @@ struct vduse_vdpa {
struct vduse_dev *dev;
};
+struct vduse_umem {
+ unsigned long iova;
+ unsigned long npages;
+ struct page **pages;
+ struct mm_struct *mm;
+};
+
struct vduse_dev {
struct vduse_vdpa *vdev;
struct device *dev;
@@ -95,6 +104,8 @@ struct vduse_dev {
u8 status;
u32 vq_num;
u32 vq_align;
+ struct vduse_umem *umem;
+ struct mutex mem_lock;
};
struct vduse_dev_msg {
@@ -917,6 +928,102 @@ unlock:
return ret;
}
+static int vduse_dev_dereg_umem(struct vduse_dev *dev,
+ u64 iova, u64 size)
+{
+ int ret;
+
+ mutex_lock(&dev->mem_lock);
+ ret = -ENOENT;
+ if (!dev->umem)
+ goto unlock;
+
+ ret = -EINVAL;
+ if (dev->umem->iova != iova || size != dev->domain->bounce_size)
+ goto unlock;
+
+ vduse_domain_remove_user_bounce_pages(dev->domain);
+ unpin_user_pages_dirty_lock(dev->umem->pages,
+ dev->umem->npages, true);
+ atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
+ mmdrop(dev->umem->mm);
+ vfree(dev->umem->pages);
+ kfree(dev->umem);
+ dev->umem = NULL;
+ ret = 0;
+unlock:
+ mutex_unlock(&dev->mem_lock);
+ return ret;
+}
+
+static int vduse_dev_reg_umem(struct vduse_dev *dev,
+ u64 iova, u64 uaddr, u64 size)
+{
+ struct page **page_list = NULL;
+ struct vduse_umem *umem = NULL;
+ long pinned = 0;
+ unsigned long npages, lock_limit;
+ int ret;
+
+ if (!dev->domain->bounce_map ||
+ size != dev->domain->bounce_size ||
+ iova != 0 || uaddr & ~PAGE_MASK)
+ return -EINVAL;
+
+ mutex_lock(&dev->mem_lock);
+ ret = -EEXIST;
+ if (dev->umem)
+ goto unlock;
+
+ ret = -ENOMEM;
+ npages = size >> PAGE_SHIFT;
+ page_list = __vmalloc(array_size(npages, sizeof(struct page *)),
+ GFP_KERNEL_ACCOUNT);
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
+ if (!page_list || !umem)
+ goto unlock;
+
+ mmap_read_lock(current->mm);
+
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
+ if (npages + atomic64_read(&current->mm->pinned_vm) > lock_limit)
+ goto out;
+
+ pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE,
+ page_list, NULL);
+ if (pinned != npages) {
+ ret = pinned < 0 ? pinned : -ENOMEM;
+ goto out;
+ }
+
+ ret = vduse_domain_add_user_bounce_pages(dev->domain,
+ page_list, pinned);
+ if (ret)
+ goto out;
+
+ atomic64_add(npages, &current->mm->pinned_vm);
+
+ umem->pages = page_list;
+ umem->npages = pinned;
+ umem->iova = iova;
+ umem->mm = current->mm;
+ mmgrab(current->mm);
+
+ dev->umem = umem;
+out:
+ if (ret && pinned > 0)
+ unpin_user_pages(page_list, pinned);
+
+ mmap_read_unlock(current->mm);
+unlock:
+ if (ret) {
+ vfree(page_list);
+ kfree(umem);
+ }
+ mutex_unlock(&dev->mem_lock);
+ return ret;
+}
+
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1089,6 +1196,77 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject);
break;
}
+ case VDUSE_IOTLB_REG_UMEM: {
+ struct vduse_iova_umem umem;
+
+ ret = -EFAULT;
+ if (copy_from_user(&umem, argp, sizeof(umem)))
+ break;
+
+ ret = -EINVAL;
+ if (!is_mem_zero((const char *)umem.reserved,
+ sizeof(umem.reserved)))
+ break;
+
+ ret = vduse_dev_reg_umem(dev, umem.iova,
+ umem.uaddr, umem.size);
+ break;
+ }
+ case VDUSE_IOTLB_DEREG_UMEM: {
+ struct vduse_iova_umem umem;
+
+ ret = -EFAULT;
+ if (copy_from_user(&umem, argp, sizeof(umem)))
+ break;
+
+ ret = -EINVAL;
+ if (!is_mem_zero((const char *)umem.reserved,
+ sizeof(umem.reserved)))
+ break;
+
+ ret = vduse_dev_dereg_umem(dev, umem.iova,
+ umem.size);
+ break;
+ }
+ case VDUSE_IOTLB_GET_INFO: {
+ struct vduse_iova_info info;
+ struct vhost_iotlb_map *map;
+ struct vduse_iova_domain *domain = dev->domain;
+
+ ret = -EFAULT;
+ if (copy_from_user(&info, argp, sizeof(info)))
+ break;
+
+ ret = -EINVAL;
+ if (info.start > info.last)
+ break;
+
+ if (!is_mem_zero((const char *)info.reserved,
+ sizeof(info.reserved)))
+ break;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb,
+ info.start, info.last);
+ if (map) {
+ info.start = map->start;
+ info.last = map->last;
+ info.capability = 0;
+ if (domain->bounce_map && map->start == 0 &&
+ map->last == domain->bounce_size - 1)
+ info.capability |= VDUSE_IOVA_CAP_UMEM;
+ }
+ spin_unlock(&domain->iotlb_lock);
+ if (!map)
+ break;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &info, sizeof(info)))
+ break;
+
+ ret = 0;
+ break;
+ }
default:
ret = -ENOIOCTLCMD;
break;
@@ -1101,6 +1279,7 @@ static int vduse_dev_release(struct inode *inode, struct file *file)
{
struct vduse_dev *dev = file->private_data;
+ vduse_dev_dereg_umem(dev, 0, dev->domain->bounce_size);
spin_lock(&dev->msg_lock);
/* Make sure the inflight messages can processed after reconncection */
list_splice_init(&dev->recv_list, &dev->send_list);
@@ -1163,6 +1342,7 @@ static struct vduse_dev *vduse_dev_create(void)
return NULL;
mutex_init(&dev->lock);
+ mutex_init(&dev->mem_lock);
spin_lock_init(&dev->msg_lock);
INIT_LIST_HEAD(&dev->send_list);
INIT_LIST_HEAD(&dev->recv_list);
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index fee73f3d9480..1a32357592e3 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
vfio_virqfd-y := virqfd.o
+vfio-y += vfio_main.o
+
obj-$(CONFIG_VFIO) += vfio.o
obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
index 4ad63ececb91..7a29f572f93d 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
@@ -39,7 +39,7 @@ struct vfio_fsl_mc_device {
struct vfio_fsl_mc_irq *mc_irqs;
};
-extern int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
+int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
u32 flags, unsigned int index,
unsigned int start, unsigned int count,
void *data);
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 4da1914425e1..f9d0c908e738 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -44,6 +44,17 @@ config VFIO_PCI_IGD
To enable Intel IGD assignment through vfio-pci, say Y.
endif
+config VFIO_PCI_ZDEV_KVM
+ bool "VFIO PCI extensions for s390x KVM passthrough"
+ depends on S390 && KVM
+ default y
+ help
+ Support s390x-specific extensions to enable support for enhancements
+ to KVM passthrough capabilities, such as interpretive execution of
+ zPCI instructions.
+
+ To enable s390x KVM vfio-pci extensions, say Y.
+
source "drivers/vfio/pci/mlx5/Kconfig"
source "drivers/vfio/pci/hisilicon/Kconfig"
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 7052ebd893e0..24c524224da5 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
vfio-pci-core-y := vfio_pci_core.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
-vfio-pci-core-$(CONFIG_S390) += vfio_pci_zdev.o
+vfio-pci-core-$(CONFIG_VFIO_PCI_ZDEV_KVM) += vfio_pci_zdev.o
obj-$(CONFIG_VFIO_PCI_CORE) += vfio-pci-core.o
vfio-pci-y := vfio_pci.o
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 4def43f5f7b6..ea762e28c1cc 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -1185,7 +1185,7 @@ static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
if (ret)
return ret;
- if (core_vdev->ops->migration_set_state) {
+ if (core_vdev->mig_ops) {
ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
if (ret) {
vfio_pci_core_disable(vdev);
@@ -1208,6 +1208,11 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
vfio_pci_core_close_device(core_vdev);
}
+static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
+ .migration_set_state = hisi_acc_vfio_pci_set_device_state,
+ .migration_get_state = hisi_acc_vfio_pci_get_device_state,
+};
+
static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.name = "hisi-acc-vfio-pci-migration",
.open_device = hisi_acc_vfio_pci_open_device,
@@ -1219,8 +1224,6 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.mmap = hisi_acc_vfio_pci_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
- .migration_set_state = hisi_acc_vfio_pci_set_device_state,
- .migration_get_state = hisi_acc_vfio_pci_get_device_state,
};
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
@@ -1272,6 +1275,8 @@ static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device
if (!ret) {
vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
&hisi_acc_vfio_pci_migrn_ops);
+ hisi_acc_vdev->core_device.vdev.mig_ops =
+ &hisi_acc_vfio_pci_migrn_state_ops;
} else {
pci_warn(pdev, "migration support failed, continue with generic interface\n");
vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 9b9f33ca270a..dd5d7bfe0a49 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -88,6 +88,16 @@ static int mlx5fv_vf_event(struct notifier_block *nb,
return 0;
}
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (!mvdev->migrate_cap)
+ return;
+
+ mutex_lock(&mvdev->state_mutex);
+ mlx5vf_disable_fds(mvdev);
+ mlx5vf_state_mutex_unlock(mvdev);
+}
+
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
{
if (!mvdev->migrate_cap)
@@ -98,7 +108,8 @@ void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
destroy_workqueue(mvdev->cb_wq);
}
-void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops)
{
struct pci_dev *pdev = mvdev->core_device.pdev;
int ret;
@@ -139,6 +150,7 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
mvdev->core_device.vdev.migration_flags =
VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P;
+ mvdev->core_device.vdev.mig_ops = mig_ops;
end:
mlx5_vf_put_core_dev(mvdev->mdev);
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 6c3112fdd8b1..8208f4701a90 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -62,8 +62,10 @@ int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size);
-void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops);
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf);
int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 0558d0649ddb..a9b63d15c5d3 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -570,10 +570,15 @@ static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
struct mlx5vf_pci_core_device *mvdev = container_of(
core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
- mlx5vf_disable_fds(mvdev);
+ mlx5vf_cmd_close_migratable(mvdev);
vfio_pci_core_close_device(core_vdev);
}
+static const struct vfio_migration_ops mlx5vf_pci_mig_ops = {
+ .migration_set_state = mlx5vf_pci_set_device_state,
+ .migration_get_state = mlx5vf_pci_get_device_state,
+};
+
static const struct vfio_device_ops mlx5vf_pci_ops = {
.name = "mlx5-vfio-pci",
.open_device = mlx5vf_pci_open_device,
@@ -585,8 +590,6 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
- .migration_set_state = mlx5vf_pci_set_device_state,
- .migration_get_state = mlx5vf_pci_get_device_state,
};
static int mlx5vf_pci_probe(struct pci_dev *pdev,
@@ -599,7 +602,7 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
if (!mvdev)
return -ENOMEM;
vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
- mlx5vf_cmd_set_migratable(mvdev);
+ mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops);
dev_set_drvdata(&pdev->dev, &mvdev->core_device);
ret = vfio_pci_core_register_device(&mvdev->core_device);
if (ret)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 9343f597182d..442d3ba4122b 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -222,7 +222,7 @@ static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos,
memcpy(vdev->vconfig + pos, &virt_val, count);
}
- /* Non-virtualzed and writable bits go to hardware */
+ /* Non-virtualized and writable bits go to hardware */
if (write & ~virt) {
struct pci_dev *pdev = vdev->pdev;
__le32 phys_val = 0;
@@ -1728,7 +1728,7 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
/*
* Config space, caps and ecaps are all dword aligned, so we could
* use one byte per dword to record the type. However, there are
- * no requiremenst on the length of a capability, so the gap between
+ * no requirements on the length of a capability, so the gap between
* capabilities needs byte granularity.
*/
map = kmalloc(pdev->cfg_size, GFP_KERNEL);
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 756d049bd9cf..c8d3b0450fb3 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -317,10 +317,14 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
- ret = vfio_config_init(vdev);
+ ret = vfio_pci_zdev_open_device(vdev);
if (ret)
goto out_free_state;
+ ret = vfio_config_init(vdev);
+ if (ret)
+ goto out_free_zdev;
+
msix_pos = pdev->msix_cap;
if (msix_pos) {
u16 flags;
@@ -341,6 +345,8 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
return 0;
+out_free_zdev:
+ vfio_pci_zdev_close_device(vdev);
out_free_state:
kfree(vdev->pci_saved_state);
vdev->pci_saved_state = NULL;
@@ -419,6 +425,8 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
vdev->needs_reset = true;
+ vfio_pci_zdev_close_device(vdev);
+
/*
* If we have saved state, restore it. If we can reset the device,
* even better. Resetting with current state seems better than
@@ -1860,6 +1868,13 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
+ if (vdev->vdev.mig_ops) {
+ if (!(vdev->vdev.mig_ops->migration_get_state &&
+ vdev->vdev.mig_ops->migration_set_state) ||
+ !(vdev->vdev.migration_flags & VFIO_MIGRATION_STOP_COPY))
+ return -EINVAL;
+ }
+
/*
* Prevent binding to PFs with VFs enabled, the VFs might be in use
* by the host or other users. We cannot capture the VFs if they
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
index ea4c0d2b0663..e163aa9f6144 100644
--- a/drivers/vfio/pci/vfio_pci_zdev.c
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/vfio_zdev.h>
+#include <linux/kvm_host.h>
#include <asm/pci_clp.h>
#include <asm/pci_io.h>
@@ -23,14 +24,15 @@ static int zpci_base_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_base cap = {
.header.id = VFIO_DEVICE_INFO_CAP_ZPCI_BASE,
- .header.version = 1,
+ .header.version = 2,
.start_dma = zdev->start_dma,
.end_dma = zdev->end_dma,
.pchid = zdev->pchid,
.vfn = zdev->vfn,
.fmb_length = zdev->fmb_length,
.pft = zdev->pft,
- .gid = zdev->pfgid
+ .gid = zdev->pfgid,
+ .fh = zdev->fh
};
return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
@@ -43,14 +45,16 @@ static int zpci_group_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_group cap = {
.header.id = VFIO_DEVICE_INFO_CAP_ZPCI_GROUP,
- .header.version = 1,
+ .header.version = 2,
.dasm = zdev->dma_mask,
.msi_addr = zdev->msi_addr,
.flags = VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH,
.mui = zdev->fmb_update,
.noi = zdev->max_msi,
.maxstbl = ZPCI_MAX_WRITE_SIZE,
- .version = zdev->version
+ .version = zdev->version,
+ .reserved = 0,
+ .imaxstbl = zdev->maxstbl
};
return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
@@ -136,3 +140,26 @@ int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
return ret;
}
+
+int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
+{
+ struct zpci_dev *zdev = to_zpci(vdev->pdev);
+
+ if (!zdev)
+ return -ENODEV;
+
+ if (!vdev->vdev.kvm)
+ return 0;
+
+ return kvm_s390_pci_register_kvm(zdev, vdev->vdev.kvm);
+}
+
+void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
+{
+ struct zpci_dev *zdev = to_zpci(vdev->pdev);
+
+ if (!zdev || !vdev->vdev.kvm)
+ return;
+
+ kvm_s390_pci_unregister_kvm(zdev);
+}
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
index 520d2a8e8375..691b43f4b2b2 100644
--- a/drivers/vfio/platform/vfio_platform_private.h
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -78,21 +78,20 @@ struct vfio_platform_reset_node {
vfio_platform_reset_fn_t of_reset;
};
-extern int vfio_platform_probe_common(struct vfio_platform_device *vdev,
- struct device *dev);
+int vfio_platform_probe_common(struct vfio_platform_device *vdev,
+ struct device *dev);
void vfio_platform_remove_common(struct vfio_platform_device *vdev);
-extern int vfio_platform_irq_init(struct vfio_platform_device *vdev);
-extern void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
+int vfio_platform_irq_init(struct vfio_platform_device *vdev);
+void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
-extern int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
- uint32_t flags, unsigned index,
- unsigned start, unsigned count,
- void *data);
+int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+ uint32_t flags, unsigned index,
+ unsigned start, unsigned count, void *data);
-extern void __vfio_platform_register_reset(struct vfio_platform_reset_node *n);
-extern void vfio_platform_unregister_reset(const char *compat,
- vfio_platform_reset_fn_t fn);
+void __vfio_platform_register_reset(struct vfio_platform_reset_node *n);
+void vfio_platform_unregister_reset(const char *compat,
+ vfio_platform_reset_fn_t fn);
#define vfio_platform_register_reset(__compat, __reset) \
static struct vfio_platform_reset_node __reset ## _node = { \
.owner = THIS_MODULE, \
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index a67130221151..503bea6c843d 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -50,16 +50,15 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group);
int (*pin_pages)(void *iommu_data,
struct iommu_group *group,
- unsigned long *user_pfn,
+ dma_addr_t user_iova,
int npage, int prot,
- unsigned long *phys_pfn);
- int (*unpin_pages)(void *iommu_data,
- unsigned long *user_pfn, int npage);
- int (*register_notifier)(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb);
- int (*unregister_notifier)(void *iommu_data,
- struct notifier_block *nb);
+ struct page **pages);
+ void (*unpin_pages)(void *iommu_data,
+ dma_addr_t user_iova, int npage);
+ void (*register_device)(void *iommu_data,
+ struct vfio_device *vdev);
+ void (*unregister_device)(void *iommu_data,
+ struct vfio_device *vdev);
int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
void *data, size_t count, bool write);
struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 708a95e61831..169f07ac162d 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -378,8 +378,7 @@ static void tce_iommu_release(void *iommu_data)
kfree(container);
}
-static void tce_iommu_unuse_page(struct tce_container *container,
- unsigned long hpa)
+static void tce_iommu_unuse_page(unsigned long hpa)
{
struct page *page;
@@ -474,7 +473,7 @@ static int tce_iommu_clear(struct tce_container *container,
continue;
}
- tce_iommu_unuse_page(container, oldhpa);
+ tce_iommu_unuse_page(oldhpa);
}
iommu_tce_kill(tbl, firstentry, pages);
@@ -524,7 +523,7 @@ static long tce_iommu_build(struct tce_container *container,
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
&hpa, &dirtmp);
if (ret) {
- tce_iommu_unuse_page(container, hpa);
+ tce_iommu_unuse_page(hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
@@ -532,7 +531,7 @@ static long tce_iommu_build(struct tce_container *container,
}
if (dirtmp != DMA_NONE)
- tce_iommu_unuse_page(container, hpa);
+ tce_iommu_unuse_page(hpa);
tce += IOMMU_PAGE_SIZE(tbl);
}
@@ -1266,7 +1265,10 @@ static int tce_iommu_attach_group(void *iommu_data,
goto unlock_exit;
}
- /* Check if new group has the same iommu_ops (i.e. compatible) */
+ /*
+ * Check if new group has the same iommu_table_group_ops
+ * (i.e. compatible)
+ */
list_for_each_entry(tcegrp, &container->group_list, next) {
struct iommu_table_group *table_group_tmp;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c13b9290e357..db516c90a977 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -67,7 +67,8 @@ struct vfio_iommu {
struct list_head iova_list;
struct mutex lock;
struct rb_root dma_list;
- struct blocking_notifier_head notifier;
+ struct list_head device_list;
+ struct mutex device_list_lock;
unsigned int dma_avail;
unsigned int vaddr_invalid_count;
uint64_t pgsize_bitmap;
@@ -828,9 +829,9 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
static int vfio_iommu_type1_pin_pages(void *iommu_data,
struct iommu_group *iommu_group,
- unsigned long *user_pfn,
+ dma_addr_t user_iova,
int npage, int prot,
- unsigned long *phys_pfn)
+ struct page **pages)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
@@ -840,7 +841,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
bool do_accounting;
dma_addr_t iova;
- if (!iommu || !user_pfn || !phys_pfn)
+ if (!iommu || !pages)
return -EINVAL;
/* Supported for v2 version only */
@@ -856,7 +857,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
again:
if (iommu->vaddr_invalid_count) {
for (i = 0; i < npage; i++) {
- iova = user_pfn[i] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * i;
ret = vfio_find_dma_valid(iommu, iova, PAGE_SIZE, &dma);
if (ret < 0)
goto pin_done;
@@ -865,8 +866,8 @@ again:
}
}
- /* Fail if notifier list is empty */
- if (!iommu->notifier.head) {
+ /* Fail if no dma_umap notifier is registered */
+ if (list_empty(&iommu->device_list)) {
ret = -EINVAL;
goto pin_done;
}
@@ -879,9 +880,10 @@ again:
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
+ unsigned long phys_pfn;
struct vfio_pfn *vpfn;
- iova = user_pfn[i] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * i;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma) {
ret = -EINVAL;
@@ -895,23 +897,25 @@ again:
vpfn = vfio_iova_get_vfio_pfn(dma, iova);
if (vpfn) {
- phys_pfn[i] = vpfn->pfn;
+ pages[i] = pfn_to_page(vpfn->pfn);
continue;
}
remote_vaddr = dma->vaddr + (iova - dma->iova);
- ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
+ ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn,
do_accounting);
if (ret)
goto pin_unwind;
- ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
+ ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
if (ret) {
- if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
+ if (put_pfn(phys_pfn, dma->prot) && do_accounting)
vfio_lock_acct(dma, -1, true);
goto pin_unwind;
}
+ pages[i] = pfn_to_page(phys_pfn);
+
if (iommu->dirty_page_tracking) {
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
@@ -934,43 +938,38 @@ again:
goto pin_done;
pin_unwind:
- phys_pfn[i] = 0;
+ pages[i] = NULL;
for (j = 0; j < i; j++) {
dma_addr_t iova;
- iova = user_pfn[j] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * j;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
vfio_unpin_page_external(dma, iova, do_accounting);
- phys_pfn[j] = 0;
+ pages[j] = NULL;
}
pin_done:
mutex_unlock(&iommu->lock);
return ret;
}
-static int vfio_iommu_type1_unpin_pages(void *iommu_data,
- unsigned long *user_pfn,
- int npage)
+static void vfio_iommu_type1_unpin_pages(void *iommu_data,
+ dma_addr_t user_iova, int npage)
{
struct vfio_iommu *iommu = iommu_data;
bool do_accounting;
int i;
- if (!iommu || !user_pfn || npage <= 0)
- return -EINVAL;
-
/* Supported for v2 version only */
- if (!iommu->v2)
- return -EACCES;
+ if (WARN_ON(!iommu->v2))
+ return;
mutex_lock(&iommu->lock);
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
+ dma_addr_t iova = user_iova + PAGE_SIZE * i;
struct vfio_dma *dma;
- dma_addr_t iova;
- iova = user_pfn[i] << PAGE_SHIFT;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma)
break;
@@ -979,7 +978,8 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
}
mutex_unlock(&iommu->lock);
- return i > 0 ? i : -EINVAL;
+
+ WARN_ON(i != npage);
}
static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
@@ -1287,6 +1287,35 @@ static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
return 0;
}
+/*
+ * Notify VFIO drivers using vfio_register_emulated_iommu_dev() to invalidate
+ * and unmap iovas within the range we're about to unmap. Drivers MUST unpin
+ * pages in response to an invalidation.
+ */
+static void vfio_notify_dma_unmap(struct vfio_iommu *iommu,
+ struct vfio_dma *dma)
+{
+ struct vfio_device *device;
+
+ if (list_empty(&iommu->device_list))
+ return;
+
+ /*
+ * The device is expected to call vfio_unpin_pages() for any IOVA it has
+ * pinned within the range. Since vfio_unpin_pages() will eventually
+ * call back down to this code and try to obtain the iommu->lock we must
+ * drop it.
+ */
+ mutex_lock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
+
+ list_for_each_entry(device, &iommu->device_list, iommu_entry)
+ device->ops->dma_unmap(device, dma->iova, dma->size);
+
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_lock(&iommu->lock);
+}
+
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_unmap *unmap,
struct vfio_bitmap *bitmap)
@@ -1377,12 +1406,6 @@ again:
if (!iommu->v2 && iova > dma->iova)
break;
- /*
- * Task with same address space who mapped this iova range is
- * allowed to unmap the iova range.
- */
- if (dma->task->mm != current->mm)
- break;
if (invalidate_vaddr) {
if (dma->vaddr_invalid) {
@@ -1406,8 +1429,6 @@ again:
}
if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
- struct vfio_iommu_type1_dma_unmap nb_unmap;
-
if (dma_last == dma) {
BUG_ON(++retries > 10);
} else {
@@ -1415,20 +1436,7 @@ again:
retries = 0;
}
- nb_unmap.iova = dma->iova;
- nb_unmap.size = dma->size;
-
- /*
- * Notify anyone (mdev vendor drivers) to invalidate and
- * unmap iovas within the range we're about to unmap.
- * Vendor drivers MUST unpin pages in response to an
- * invalidation.
- */
- mutex_unlock(&iommu->lock);
- blocking_notifier_call_chain(&iommu->notifier,
- VFIO_IOMMU_NOTIFY_DMA_UNMAP,
- &nb_unmap);
- mutex_lock(&iommu->lock);
+ vfio_notify_dma_unmap(iommu, dma);
goto again;
}
@@ -1679,18 +1687,6 @@ out_unlock:
return ret;
}
-static int vfio_bus_type(struct device *dev, void *data)
-{
- struct bus_type **bus = data;
-
- if (*bus && *bus != dev->bus)
- return -EINVAL;
-
- *bus = dev->bus;
-
- return 0;
-}
-
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
@@ -2153,13 +2149,26 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
list_splice_tail(iova_copy, iova);
}
+/* Redundantly walks non-present capabilities to simplify caller */
+static int vfio_iommu_device_capable(struct device *dev, void *data)
+{
+ return device_iommu_capable(dev, (enum iommu_cap)data);
+}
+
+static int vfio_iommu_domain_alloc(struct device *dev, void *data)
+{
+ struct iommu_domain **domain = data;
+
+ *domain = iommu_domain_alloc(dev->bus);
+ return 1; /* Don't iterate */
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group, enum vfio_group_type type)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
- struct bus_type *bus = NULL;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry *geo;
@@ -2192,18 +2201,19 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_unlock;
}
- /* Determine bus_type in order to allocate a domain */
- ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
- if (ret)
- goto out_free_group;
-
ret = -ENOMEM;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
goto out_free_group;
+ /*
+ * Going via the iommu_group iterator avoids races, and trivially gives
+ * us a representative device for the IOMMU API call. We don't actually
+ * want to iterate beyond the first device (if any).
+ */
ret = -EIO;
- domain->domain = iommu_domain_alloc(bus);
+ iommu_group_for_each_dev(iommu_group, &domain->domain,
+ vfio_iommu_domain_alloc);
if (!domain->domain)
goto out_free_domain;
@@ -2258,7 +2268,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
list_add(&group->next, &domain->group_list);
msi_remap = irq_domain_check_msi_remap() ||
- iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
+ iommu_group_for_each_dev(iommu_group, (void *)IOMMU_CAP_INTR_REMAP,
+ vfio_iommu_device_capable);
if (!allow_unsafe_interrupts && !msi_remap) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
@@ -2478,7 +2489,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (list_empty(&iommu->emulated_iommu_groups) &&
list_empty(&iommu->domain_list)) {
- WARN_ON(iommu->notifier.head);
+ WARN_ON(!list_empty(&iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
}
goto detach_group_done;
@@ -2510,7 +2521,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (list_empty(&domain->group_list)) {
if (list_is_singular(&iommu->domain_list)) {
if (list_empty(&iommu->emulated_iommu_groups)) {
- WARN_ON(iommu->notifier.head);
+ WARN_ON(!list_empty(
+ &iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
} else {
vfio_iommu_unmap_unpin_reaccount(iommu);
@@ -2571,7 +2583,8 @@ static void *vfio_iommu_type1_open(unsigned long arg)
iommu->dma_avail = dma_entry_limit;
iommu->container_open = true;
mutex_init(&iommu->lock);
- BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
+ mutex_init(&iommu->device_list_lock);
+ INIT_LIST_HEAD(&iommu->device_list);
init_waitqueue_head(&iommu->vaddr_wait);
iommu->pgsize_bitmap = PAGE_MASK;
INIT_LIST_HEAD(&iommu->emulated_iommu_groups);
@@ -3008,28 +3021,40 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
}
}
-static int vfio_iommu_type1_register_notifier(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb)
+static void vfio_iommu_type1_register_device(void *iommu_data,
+ struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
- /* clear known events */
- *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
-
- /* refuse to register if still events remaining */
- if (*events)
- return -EINVAL;
+ if (!vdev->ops->dma_unmap)
+ return;
- return blocking_notifier_chain_register(&iommu->notifier, nb);
+ /*
+ * list_empty(&iommu->device_list) is tested under the iommu->lock while
+ * iteration for dma_unmap must be done under the device_list_lock.
+ * Holding both locks here allows avoiding the device_list_lock in
+ * several fast paths. See vfio_notify_dma_unmap()
+ */
+ mutex_lock(&iommu->lock);
+ mutex_lock(&iommu->device_list_lock);
+ list_add(&vdev->iommu_entry, &iommu->device_list);
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
}
-static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
- struct notifier_block *nb)
+static void vfio_iommu_type1_unregister_device(void *iommu_data,
+ struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
- return blocking_notifier_chain_unregister(&iommu->notifier, nb);
+ if (!vdev->ops->dma_unmap)
+ return;
+
+ mutex_lock(&iommu->lock);
+ mutex_lock(&iommu->device_list_lock);
+ list_del(&vdev->iommu_entry);
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
}
static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
@@ -3163,8 +3188,8 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.detach_group = vfio_iommu_type1_detach_group,
.pin_pages = vfio_iommu_type1_pin_pages,
.unpin_pages = vfio_iommu_type1_unpin_pages,
- .register_notifier = vfio_iommu_type1_register_notifier,
- .unregister_notifier = vfio_iommu_type1_unregister_notifier,
+ .register_device = vfio_iommu_type1_register_device,
+ .unregister_device = vfio_iommu_type1_unregister_device,
.dma_rw = vfio_iommu_type1_dma_rw,
.group_iommu_domain = vfio_iommu_type1_group_iommu_domain,
.notify = vfio_iommu_type1_notify,
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio_main.c
index 61e71c1154be..7cb56c382c97 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio_main.c
@@ -231,6 +231,9 @@ int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
{
struct vfio_iommu_driver *driver, *tmp;
+ if (WARN_ON(!ops->register_device != !ops->unregister_device))
+ return -EINVAL;
+
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return -ENOMEM;
@@ -504,7 +507,9 @@ static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
if (IS_ERR(iommu_group))
return ERR_CAST(iommu_group);
- iommu_group_set_name(iommu_group, "vfio-noiommu");
+ ret = iommu_group_set_name(iommu_group, "vfio-noiommu");
+ if (ret)
+ goto out_put_group;
ret = iommu_group_add_device(iommu_group, dev);
if (ret)
goto out_put_group;
@@ -549,6 +554,16 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
if (!iommu_group)
return ERR_PTR(-EINVAL);
+ /*
+ * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
+ * restore cache coherency. It has to be checked here because it is only
+ * valid for cases where we are using iommu groups.
+ */
+ if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
+ iommu_group_put(iommu_group);
+ return ERR_PTR(-EINVAL);
+ }
+
group = vfio_group_get_from_iommu(iommu_group);
if (!group)
group = vfio_create_group(iommu_group, VFIO_IOMMU);
@@ -601,13 +616,6 @@ static int __vfio_register_dev(struct vfio_device *device,
int vfio_register_group_dev(struct vfio_device *device)
{
- /*
- * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
- * restore cache coherency.
- */
- if (!iommu_capable(device->dev->bus, IOMMU_CAP_CACHE_COHERENCY))
- return -EINVAL;
-
return __vfio_register_dev(device,
vfio_group_find_or_alloc(device->dev));
}
@@ -1079,6 +1087,7 @@ static void vfio_device_unassign_container(struct vfio_device *device)
static struct file *vfio_device_open(struct vfio_device *device)
{
+ struct vfio_iommu_driver *iommu_driver;
struct file *filep;
int ret;
@@ -1109,6 +1118,12 @@ static struct file *vfio_device_open(struct vfio_device *device)
if (ret)
goto err_undo_count;
}
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->register_device)
+ iommu_driver->ops->register_device(
+ device->group->container->iommu_data, device);
+
up_read(&device->group->group_rwsem);
}
mutex_unlock(&device->dev_set->lock);
@@ -1129,7 +1144,7 @@ static struct file *vfio_device_open(struct vfio_device *device)
* Appears to be missing by lack of need rather than
* explicitly prevented. Now there's need.
*/
- filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
if (device->group->type == VFIO_NO_IOMMU)
dev_warn(device->dev, "vfio-noiommu device opened by user "
@@ -1143,13 +1158,19 @@ static struct file *vfio_device_open(struct vfio_device *device)
err_close_device:
mutex_lock(&device->dev_set->lock);
down_read(&device->group->group_rwsem);
- if (device->open_count == 1 && device->ops->close_device)
+ if (device->open_count == 1 && device->ops->close_device) {
device->ops->close_device(device);
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
+ }
err_undo_count:
+ up_read(&device->group->group_rwsem);
device->open_count--;
if (device->open_count == 0 && device->kvm)
device->kvm = NULL;
- up_read(&device->group->group_rwsem);
mutex_unlock(&device->dev_set->lock);
module_put(device->dev->driver->owner);
err_unassign_container:
@@ -1339,12 +1360,18 @@ static const struct file_operations vfio_group_fops = {
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_device *device = filep->private_data;
+ struct vfio_iommu_driver *iommu_driver;
mutex_lock(&device->dev_set->lock);
vfio_assert_device_open(device);
down_read(&device->group->group_rwsem);
if (device->open_count == 1 && device->ops->close_device)
device->ops->close_device(device);
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
up_read(&device->group->group_rwsem);
device->open_count--;
if (device->open_count == 0)
@@ -1541,8 +1568,7 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
struct file *filp = NULL;
int ret;
- if (!device->ops->migration_set_state ||
- !device->ops->migration_get_state)
+ if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz,
@@ -1558,7 +1584,8 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
if (flags & VFIO_DEVICE_FEATURE_GET) {
enum vfio_device_mig_state curr_state;
- ret = device->ops->migration_get_state(device, &curr_state);
+ ret = device->mig_ops->migration_get_state(device,
+ &curr_state);
if (ret)
return ret;
mig.device_state = curr_state;
@@ -1566,7 +1593,7 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
}
/* Handle the VFIO_DEVICE_FEATURE_SET */
- filp = device->ops->migration_set_state(device, mig.device_state);
+ filp = device->mig_ops->migration_set_state(device, mig.device_state);
if (IS_ERR(filp) || !filp)
goto out_copy;
@@ -1589,8 +1616,7 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
};
int ret;
- if (!device->ops->migration_set_state ||
- !device->ops->migration_get_state)
+ if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
@@ -1812,6 +1838,7 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
if (!buf) {
kfree(caps->buf);
+ caps->buf = NULL;
caps->size = 0;
return ERR_PTR(-ENOMEM);
}
@@ -1910,26 +1937,25 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
/*
- * Pin a set of guest PFNs and return their associated host PFNs for local
+ * Pin contiguous user pages and return their associated host pages for local
* domain only.
* @device [in] : device
- * @user_pfn [in]: array of user/guest PFNs to be pinned.
- * @npage [in] : count of elements in user_pfn array. This count should not
- * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @iova [in] : starting IOVA of user pages to be pinned.
+ * @npage [in] : count of pages to be pinned. This count should not
+ * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
* @prot [in] : protection flags
- * @phys_pfn[out]: array of host PFNs
+ * @pages[out] : array of host pages
* Return error or number of pages pinned.
*/
-int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage, int prot, unsigned long *phys_pfn)
+int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages)
{
struct vfio_container *container;
struct vfio_group *group = device->group;
struct vfio_iommu_driver *driver;
int ret;
- if (!user_pfn || !phys_pfn || !npage ||
- !vfio_assert_device_open(device))
+ if (!pages || !npage || !vfio_assert_device_open(device))
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
@@ -1943,8 +1969,8 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data,
- group->iommu_group, user_pfn,
- npage, prot, phys_pfn);
+ group->iommu_group, iova,
+ npage, prot, pages);
else
ret = -ENOTTY;
@@ -1953,37 +1979,28 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
EXPORT_SYMBOL(vfio_pin_pages);
/*
- * Unpin set of host PFNs for local domain only.
+ * Unpin contiguous host pages for local domain only.
* @device [in] : device
- * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
- * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * @npage [in] : count of elements in user_pfn array. This count should not
+ * @iova [in] : starting address of user pages to be unpinned.
+ * @npage [in] : count of pages to be unpinned. This count should not
* be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * Return error or number of pages unpinned.
*/
-int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage)
+void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
{
struct vfio_container *container;
struct vfio_iommu_driver *driver;
- int ret;
- if (!user_pfn || !npage || !vfio_assert_device_open(device))
- return -EINVAL;
+ if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
+ return;
- if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
- return -E2BIG;
+ if (WARN_ON(!vfio_assert_device_open(device)))
+ return;
/* group->container cannot change while a vfio device is open */
container = device->group->container;
driver = container->iommu_driver;
- if (likely(driver && driver->ops->unpin_pages))
- ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
- npage);
- else
- ret = -ENOTTY;
- return ret;
+ driver->ops->unpin_pages(container->iommu_data, iova, npage);
}
EXPORT_SYMBOL(vfio_unpin_pages);
@@ -1998,13 +2015,13 @@ EXPORT_SYMBOL(vfio_unpin_pages);
* not a real device DMA, it is not necessary to pin the user space memory.
*
* @device [in] : VFIO device
- * @user_iova [in] : base IOVA of a user space buffer
+ * @iova [in] : base IOVA of a user space buffer
* @data [in] : pointer to kernel buffer
* @len [in] : kernel buffer length
* @write : indicate read or write
* Return error code on failure or 0 on success.
*/
-int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
size_t len, bool write)
{
struct vfio_container *container;
@@ -2020,97 +2037,13 @@ int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
if (likely(driver && driver->ops->dma_rw))
ret = driver->ops->dma_rw(container->iommu_data,
- user_iova, data, len, write);
+ iova, data, len, write);
else
ret = -ENOTTY;
return ret;
}
EXPORT_SYMBOL(vfio_dma_rw);
-static int vfio_register_iommu_notifier(struct vfio_group *group,
- unsigned long *events,
- struct notifier_block *nb)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- lockdep_assert_held_read(&group->group_rwsem);
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->register_notifier))
- ret = driver->ops->register_notifier(container->iommu_data,
- events, nb);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-
-static int vfio_unregister_iommu_notifier(struct vfio_group *group,
- struct notifier_block *nb)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- lockdep_assert_held_read(&group->group_rwsem);
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->unregister_notifier))
- ret = driver->ops->unregister_notifier(container->iommu_data,
- nb);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-
-int vfio_register_notifier(struct vfio_device *device,
- enum vfio_notify_type type, unsigned long *events,
- struct notifier_block *nb)
-{
- struct vfio_group *group = device->group;
- int ret;
-
- if (!nb || !events || (*events == 0) ||
- !vfio_assert_device_open(device))
- return -EINVAL;
-
- switch (type) {
- case VFIO_IOMMU_NOTIFY:
- ret = vfio_register_iommu_notifier(group, events, nb);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-EXPORT_SYMBOL(vfio_register_notifier);
-
-int vfio_unregister_notifier(struct vfio_device *device,
- enum vfio_notify_type type,
- struct notifier_block *nb)
-{
- struct vfio_group *group = device->group;
- int ret;
-
- if (!nb || !vfio_assert_device_open(device))
- return -EINVAL;
-
- switch (type) {
- case VFIO_IOMMU_NOTIFY:
- ret = vfio_unregister_iommu_notifier(group, nb);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-EXPORT_SYMBOL(vfio_unregister_notifier);
-
/*
* Module/class support
*/
@@ -2156,13 +2089,17 @@ static int __init vfio_init(void)
if (ret)
goto err_alloc_chrdev;
- pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
#ifdef CONFIG_VFIO_NOIOMMU
- vfio_register_iommu_driver(&vfio_noiommu_ops);
+ ret = vfio_register_iommu_driver(&vfio_noiommu_ops);
#endif
+ if (ret)
+ goto err_driver_register;
+
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
+err_driver_register:
+ unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ffd9e6c2ffc1..7ebf106d50c1 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -159,9 +159,13 @@ enum {
};
#define VHOST_SCSI_MAX_TARGET 256
-#define VHOST_SCSI_MAX_VQ 128
+#define VHOST_SCSI_MAX_IO_VQ 1024
#define VHOST_SCSI_MAX_EVENT 128
+static unsigned vhost_scsi_max_io_vqs = 128;
+module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
+MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
+
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
/*
@@ -186,7 +190,9 @@ struct vhost_scsi {
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
struct vhost_dev dev;
- struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
+ struct vhost_scsi_virtqueue *vqs;
+ unsigned long *compl_bitmap;
+ struct vhost_scsi_inflight **old_inflight;
struct vhost_work vs_completion_work; /* cmd completion work item */
struct llist_head vs_completion_list; /* cmd completion queue */
@@ -245,7 +251,7 @@ static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
struct vhost_virtqueue *vq;
int idx, i;
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
@@ -533,7 +539,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_completion_work);
- DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
struct virtio_scsi_cmd_resp v_rsp;
struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
@@ -541,7 +546,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
struct iov_iter iov_iter;
int ret, vq;
- bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
+ bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
llnode = llist_del_all(&vs->vs_completion_list);
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
@@ -566,7 +571,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
vq = q - vs->vqs;
- __set_bit(vq, signal);
+ __set_bit(vq, vs->compl_bitmap);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
@@ -574,8 +579,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
}
vq = -1;
- while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
- < VHOST_SCSI_MAX_VQ)
+ while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
+ < vs->dev.nvqs)
vhost_signal(&vs->dev, &vs->vqs[vq].vq);
}
@@ -643,14 +648,12 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
size_t offset;
unsigned int npages = 0;
- bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
+ bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
VHOST_SCSI_PREALLOC_UPAGES, &offset);
/* No pages were pinned */
if (bytes <= 0)
return bytes < 0 ? bytes : -EFAULT;
- iov_iter_advance(iter, bytes);
-
while (bytes) {
unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
sg_set_page(sg++, pages[npages++], n, offset);
@@ -1421,26 +1424,25 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
/* Callers must hold dev mutex */
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
- struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
int i;
/* Init new inflight and remember the old inflight */
- vhost_scsi_init_inflight(vs, old_inflight);
+ vhost_scsi_init_inflight(vs, vs->old_inflight);
/*
* The inflight->kref was initialized to 1. We decrement it here to
* indicate the start of the flush operation so that it will reach 0
* when all the reqs are finished.
*/
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- wait_for_completion(&old_inflight[i]->comp);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ wait_for_completion(&vs->old_inflight[i]->comp);
}
static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
@@ -1603,7 +1605,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
if (!vhost_vq_is_setup(vq))
continue;
@@ -1613,7 +1615,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
goto destroy_vq_cmds;
}
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, vs_tpg);
@@ -1715,7 +1717,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
target_undepend_item(&se_tpg->tpg_group.cg_item);
}
if (match) {
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
@@ -1724,7 +1726,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
/* Make sure cmds are not running before tearing them down. */
vhost_scsi_flush(vs);
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
vhost_scsi_destroy_vq_cmds(vq);
}
@@ -1764,7 +1766,7 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
return -EFAULT;
}
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->acked_features = features;
@@ -1778,16 +1780,40 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
- int r = -ENOMEM, i;
+ int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
goto err_vs;
- vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
- if (!vqs)
+ if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
+ pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
+ VHOST_SCSI_MAX_IO_VQ);
+ nvqs = VHOST_SCSI_MAX_IO_VQ;
+ } else if (nvqs == 0) {
+ pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
+ nvqs = 1;
+ }
+ nvqs += VHOST_SCSI_VQ_IO;
+
+ vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
+ if (!vs->compl_bitmap)
+ goto err_compl_bitmap;
+
+ vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->old_inflight)
+ goto err_inflight;
+
+ vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->vqs)
goto err_vqs;
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_local_vqs;
+
vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
@@ -1798,11 +1824,11 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
+ vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
VHOST_SCSI_WEIGHT, 0, true, NULL);
vhost_scsi_init_inflight(vs, NULL);
@@ -1810,7 +1836,13 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
f->private_data = vs;
return 0;
+err_local_vqs:
+ kfree(vs->vqs);
err_vqs:
+ kfree(vs->old_inflight);
+err_inflight:
+ bitmap_free(vs->compl_bitmap);
+err_compl_bitmap:
kvfree(vs);
err_vs:
return r;
@@ -1828,6 +1860,9 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_dev_stop(&vs->dev);
vhost_dev_cleanup(&vs->dev);
kfree(vs->dev.vqs);
+ kfree(vs->vqs);
+ kfree(vs->old_inflight);
+ bitmap_free(vs->compl_bitmap);
kvfree(vs);
return 0;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 23dcbfdfa13b..166044642fd5 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -347,6 +347,14 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
return 0;
}
+static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->suspend;
+}
+
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -470,6 +478,22 @@ static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
return 0;
}
+/* After a successful return of ioctl the device must not process more
+ * virtqueue descriptors. The device can answer to read or writes of config
+ * fields as if it were not suspended. In particular, writing to "queue_enable"
+ * with a value of 1 will not make the device start processing buffers.
+ */
+static long vhost_vdpa_suspend(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ops->suspend(vdpa);
+}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
@@ -577,7 +601,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if (cmd == VHOST_SET_BACKEND_FEATURES) {
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+ if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
+ BIT_ULL(VHOST_BACKEND_F_SUSPEND)))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
+ !vhost_vdpa_can_suspend(v))
return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
return 0;
@@ -628,6 +656,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
break;
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_VDPA_BACKEND_FEATURES;
+ if (vhost_vdpa_can_suspend(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
break;
@@ -640,6 +670,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_VDPA_GET_VQS_COUNT:
r = vhost_vdpa_get_vqs_count(v, argp);
break;
+ case VHOST_VDPA_SUSPEND:
+ r = vhost_vdpa_suspend(v);
+ break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
if (r == -ENOIOCTLCMD)
@@ -1076,7 +1109,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
if (!bus)
return -EFAULT;
- if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
+ if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY))
return -ENOTSUPP;
v->domain = iommu_domain_alloc(bus);
@@ -1363,6 +1396,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
err:
put_device(&v->dev);
+ ida_simple_remove(&vhost_vdpa_ida, v->minor);
return r;
}
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index eab55accf381..11f59dd06a74 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -1095,7 +1095,8 @@ EXPORT_SYMBOL(vringh_need_notify_kern);
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
static int iotlb_translate(const struct vringh *vrh,
- u64 addr, u64 len, struct bio_vec iov[],
+ u64 addr, u64 len, u64 *translated,
+ struct bio_vec iov[],
int iov_size, u32 perm)
{
struct vhost_iotlb_map *map;
@@ -1136,43 +1137,76 @@ static int iotlb_translate(const struct vringh *vrh,
spin_unlock(vrh->iotlb_lock);
+ if (translated)
+ *translated = min(len, s);
+
return ret;
}
static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
- struct iov_iter iter;
- struct bio_vec iov[16];
- int ret;
+ u64 total_translated = 0;
- ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
- len, iov, 16, VHOST_MAP_RO);
- if (ret < 0)
- return ret;
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
- iov_iter_bvec(&iter, READ, iov, ret, len);
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_RO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
- ret = copy_from_iter(dst, len, &iter);
+ iov_iter_bvec(&iter, READ, iov, ret, translated);
- return ret;
+ ret = copy_from_iter(dst, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
+
+ return total_translated;
}
static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
- struct iov_iter iter;
- struct bio_vec iov[16];
- int ret;
+ u64 total_translated = 0;
- ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
- len, iov, 16, VHOST_MAP_WO);
- if (ret < 0)
- return ret;
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_WO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
- iov_iter_bvec(&iter, WRITE, iov, ret, len);
+ iov_iter_bvec(&iter, WRITE, iov, ret, translated);
+
+ ret = copy_to_iter(src, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
- return copy_to_iter(src, len, &iter);
+ return total_translated;
}
static inline int getu16_iotlb(const struct vringh *vrh,
@@ -1183,7 +1217,7 @@ static inline int getu16_iotlb(const struct vringh *vrh,
int ret;
/* Atomic read is needed for getu16 */
- ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
&iov, 1, VHOST_MAP_RO);
if (ret < 0)
return ret;
@@ -1204,7 +1238,7 @@ static inline int putu16_iotlb(const struct vringh *vrh,
int ret;
/* Atomic write is needed for putu16 */
- ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
&iov, 1, VHOST_MAP_WO);
if (ret < 0)
return ret;
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 2b9e2bbbb03e..fc02c5c16055 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -218,9 +218,8 @@ err:
static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
{
- unsigned int period = lp->pdata->period_ns;
- unsigned int duty = br * period / max_br;
struct pwm_device *pwm;
+ struct pwm_state state;
/* request pwm device with the consumer name */
if (!lp->pwm) {
@@ -230,18 +229,16 @@ static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
lp->pwm = pwm;
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pwm);
+ pwm_init_state(lp->pwm, &state);
+ } else {
+ pwm_get_state(lp->pwm, &state);
}
- pwm_config(lp->pwm, duty, period);
- if (duty)
- pwm_enable(lp->pwm);
- else
- pwm_disable(lp->pwm);
+ state.period = lp->pdata->period_ns;
+ state.duty_cycle = div_u64(br * state.period, max_br);
+ state.enabled = state.duty_cycle;
+
+ pwm_apply_state(lp->pwm, &state);
}
static int lp855x_bl_update_status(struct backlight_device *bl)
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index b6d373af6e3f..d54f501e4285 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -27,8 +27,7 @@ struct ltv350qv {
/*
* The power-on and power-off sequences are taken from the
* LTV350QV-F04 data sheet from Samsung. The register definitions are
- * taken from the S6F2002 command list also from Samsung. Both
- * documents are distributed with the AVR32 Linux BSP CD from Atmel.
+ * taken from the S6F2002 command list also from Samsung.
*
* There's still some voodoo going on here, but it's a lot better than
* in the first incarnation of the driver where all we had was the raw
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index b2bfbf070200..dc37494baf42 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -12,7 +12,6 @@
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/lcd.h>
-#include <linux/of.h>
#include <linux/slab.h>
#include <video/platform_lcd.h>
@@ -133,19 +132,10 @@ static int platform_lcd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(platform_lcd_pm_ops, platform_lcd_suspend,
platform_lcd_resume);
-#ifdef CONFIG_OF
-static const struct of_device_id platform_lcd_of_match[] = {
- { .compatible = "platform-lcd" },
- {},
-};
-MODULE_DEVICE_TABLE(of, platform_lcd_of_match);
-#endif
-
static struct platform_driver platform_lcd_driver = {
.driver = {
.name = "platform-lcd",
.pm = &platform_lcd_pm_ops,
- .of_match_table = of_match_ptr(platform_lcd_of_match),
},
.probe = platform_lcd_probe,
};
diff --git a/drivers/video/backlight/rt4831-backlight.c b/drivers/video/backlight/rt4831-backlight.c
index 42155c7d2db1..eb8c59e8713f 100644
--- a/drivers/video/backlight/rt4831-backlight.c
+++ b/drivers/video/backlight/rt4831-backlight.c
@@ -12,6 +12,7 @@
#define RT4831_REG_BLCFG 0x02
#define RT4831_REG_BLDIML 0x04
#define RT4831_REG_ENABLE 0x08
+#define RT4831_REG_BLOPT2 0x11
#define RT4831_BLMAX_BRIGHTNESS 2048
@@ -23,6 +24,11 @@
#define RT4831_BLDIML_MASK GENMASK(2, 0)
#define RT4831_BLDIMH_MASK GENMASK(10, 3)
#define RT4831_BLDIMH_SHIFT 3
+#define RT4831_BLOCP_MASK GENMASK(1, 0)
+
+#define RT4831_BLOCP_MINUA 900000
+#define RT4831_BLOCP_MAXUA 1800000
+#define RT4831_BLOCP_STEPUA 300000
struct rt4831_priv {
struct device *dev;
@@ -85,7 +91,7 @@ static int rt4831_parse_backlight_properties(struct rt4831_priv *priv,
{
struct device *dev = priv->dev;
u8 propval;
- u32 brightness;
+ u32 brightness, ocp_uA;
unsigned int val = 0;
int ret;
@@ -120,6 +126,31 @@ static int rt4831_parse_backlight_properties(struct rt4831_priv *priv,
if (ret)
return ret;
+ /*
+ * This OCP level is used to protect and limit the inductor current.
+ * If inductor peak current reach the level, low-side MOSFET will be
+ * turned off. Meanwhile, the output channel current may be limited.
+ * To match the configured channel current, the inductor chosen must
+ * be higher than the OCP level.
+ *
+ * Not like the OVP level, the default 21V can be used in the most
+ * application. But if the chosen OCP level is smaller than needed,
+ * it will also affect the backlight channel output current to be
+ * smaller than the register setting.
+ */
+ ret = device_property_read_u32(dev, "richtek,bled-ocp-microamp",
+ &ocp_uA);
+ if (!ret) {
+ ocp_uA = clamp_val(ocp_uA, RT4831_BLOCP_MINUA,
+ RT4831_BLOCP_MAXUA);
+ val = DIV_ROUND_UP(ocp_uA - RT4831_BLOCP_MINUA,
+ RT4831_BLOCP_STEPUA);
+ ret = regmap_update_bits(priv->regmap, RT4831_REG_BLOPT2,
+ RT4831_BLOCP_MASK, val);
+ if (ret)
+ return ret;
+ }
+
ret = device_property_read_u8(dev, "richtek,channel-use", &propval);
if (ret) {
dev_err(dev, "richtek,channel-use DT property missing\n");
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index 8457166f357f..d96d713fe7db 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tps65217_bl.c
*
@@ -5,15 +6,6 @@
*
* Copyright (C) 2012 Matthias Kaehlcke
* Author: Matthias Kaehlcke <matthias@kaehlcke.net>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 576612f18d59..fcdf017e2665 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -75,7 +75,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
static void vgacon_save_screen(struct vc_data *c);
static void vgacon_invert_region(struct vc_data *c, u16 * p, int count);
-static struct uni_pagedir *vgacon_uni_pagedir;
+static struct uni_pagedict *vgacon_uni_pagedir;
static int vgacon_refcount;
/* Description of the hardware situation */
@@ -342,7 +342,7 @@ static const char *vgacon_startup(void)
static void vgacon_init(struct vc_data *c, int init)
{
- struct uni_pagedir *p;
+ struct uni_pagedict *p;
/*
* We cannot be loaded as a module, therefore init will be 1
@@ -367,10 +367,10 @@ static void vgacon_init(struct vc_data *c, int init)
c->vc_complement_mask = 0x7700;
if (vga_512_chars)
c->vc_hi_font_mask = 0x0800;
- p = *c->vc_uni_pagedir_loc;
- if (c->vc_uni_pagedir_loc != &vgacon_uni_pagedir) {
+ p = *c->uni_pagedict_loc;
+ if (c->uni_pagedict_loc != &vgacon_uni_pagedir) {
con_free_unimap(c);
- c->vc_uni_pagedir_loc = &vgacon_uni_pagedir;
+ c->uni_pagedict_loc = &vgacon_uni_pagedir;
vgacon_refcount++;
}
if (!vgacon_uni_pagedir && p)
@@ -392,7 +392,7 @@ static void vgacon_deinit(struct vc_data *c)
if (!--vgacon_refcount)
con_free_unimap(c);
- c->vc_uni_pagedir_loc = &c->vc_uni_pagedir;
+ c->uni_pagedict_loc = &c->uni_pagedict;
con_set_default_unimap(c);
}
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index 9811f1bad8d4..7db03ed77c76 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -84,9 +84,6 @@ static const struct fb_fix_screeninfo mc68x328fb_fix __initconst = {
/*
* Interface used by the world
*/
-int mc68x328fb_init(void);
-int mc68x328fb_setup(char *);
-
static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
static int mc68x328fb_set_par(struct fb_info *info);
@@ -403,7 +400,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
#endif
}
-int __init mc68x328fb_setup(char *options)
+static int __init mc68x328fb_setup(char *options)
{
if (!options || !*options)
return 1;
@@ -414,7 +411,7 @@ int __init mc68x328fb_setup(char *options)
* Initialisation
*/
-int __init mc68x328fb_init(void)
+static int __init mc68x328fb_init(void)
{
#ifndef MODULE
char *option = NULL;
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 8080116aea84..f65c96d1394d 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -698,16 +698,18 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
return -ENODEV;
panel = of_graph_get_remote_port_parent(endpoint);
- if (!panel)
- return -ENODEV;
+ if (!panel) {
+ err = -ENODEV;
+ goto out_endpoint_put;
+ }
err = clcdfb_of_get_backlight(&fb->dev->dev, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = clcdfb_of_get_mode(&fb->dev->dev, panel, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth",
&max_bandwidth);
@@ -736,11 +738,21 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
if (of_property_read_u32_array(endpoint,
"arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0)
- return -ENOENT;
+ tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) {
+ err = -ENOENT;
+ goto out_panel_put;
+ }
+
+ of_node_put(panel);
+ of_node_put(endpoint);
return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
tft_r0b0g0[1], tft_r0b0g0[2]);
+out_panel_put:
+ of_node_put(panel);
+out_endpoint_put:
+ of_node_put(endpoint);
+ return err;
}
static int clcdfb_of_vram_setup(struct clcd_fb *fb)
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 6e07a97bbd31..d88265dbebf4 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -2540,27 +2540,16 @@ static int amifb_blank(int blank, struct fb_info *info)
static int amifb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0 ||
- var->yoffset >= info->var.yres_virtual || var->xoffset)
- return -EINVAL;
- } else {
+ if (!(var->vmode & FB_VMODE_YWRAP)) {
/*
* TODO: There will be problems when xpan!=1, so some columns
* on the right side will never be seen
*/
if (var->xoffset + info->var.xres >
- upx(16 << maxfmode, info->var.xres_virtual) ||
- var->yoffset + info->var.yres > info->var.yres_virtual)
+ upx(16 << maxfmode, info->var.xres_virtual))
return -EINVAL;
}
ami_pan_var(var, info);
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index 453daa072f53..a317d9fe1d67 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -782,7 +782,12 @@ static int arkfb_set_par(struct fb_info *info)
return -EINVAL;
}
- ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
+ value = (hdiv * info->var.pixclock) / hmul;
+ if (!value) {
+ fb_dbg(info, "invalid pixclock\n");
+ value = 1;
+ }
+ ark_set_pixclock(info, value);
svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
@@ -793,6 +798,8 @@ static int arkfb_set_par(struct fb_info *info)
value = ((value * hmul / hdiv) / 8) - 5;
vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index 52a35b661643..2bc4089865e6 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -236,8 +236,6 @@ static int *MV300_reg = MV300_reg_8bit;
#endif /* ATAFB_EXT */
-static int inverse;
-
/*
* struct fb_ops {
* * open/release and usage marking
@@ -467,27 +465,27 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 320x200, 15 kHz, 60 Hz (ST low) */
"st-low", 60, 320, 200, 32000, 32, 16, 31, 14, 96, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x200, 15 kHz, 60 Hz (ST medium) */
"st-mid", 60, 640, 200, 32000, 32, 16, 31, 14, 96, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x400, 30.25 kHz, 63.5 Hz (ST high) */
"st-high", 63, 640, 400, 32000, 128, 0, 40, 14, 128, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 320x480, 15 kHz, 60 Hz (TT low) */
"tt-low", 60, 320, 480, 31041, 120, 100, 8, 16, 140, 30,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x480, 29 kHz, 57 Hz (TT medium) */
"tt-mid", 60, 640, 480, 31041, 120, 100, 8, 16, 140, 30,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 1280x960, 72 kHz, 72 Hz (TT high) */
- "tt-high", 57, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "tt-high", 72, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
+ 0, FB_VMODE_NONINTERLACED
},
/*
@@ -496,12 +494,12 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 640x480, 31 kHz, 60 Hz (VGA) */
- "vga", 63.5, 640, 480, 32000, 18, 42, 31, 11, 96, 3,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "vga", 60, 640, 480, 39721, 42, 18, 31, 11, 100, 3,
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x400, 31 kHz, 70 Hz (VGA) */
- "vga70", 70, 640, 400, 32000, 18, 42, 31, 11, 96, 3,
- FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "vga70", 70, 640, 400, 39721, 42, 18, 31, 11, 100, 3,
+ FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED
},
/*
@@ -511,7 +509,7 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 896x608, 31 kHz, 60 Hz (Falcon High) */
"falh", 60, 896, 608, 32000, 18, 42, 31, 1, 96,3,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
},
};
@@ -1010,10 +1008,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
else if (yres_virtual < yres)
yres_virtual = yres;
- /* backward bug-compatibility */
- if (var->pixclock > 1)
- var->pixclock -= 1;
-
par->hw.falcon.line_width = bpp * xres / 16;
par->hw.falcon.line_offset = bpp * (xres_virtual - xres) / 16;
@@ -1072,8 +1066,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
xstretch = 2; /* Double pixel width only for hicolor */
/* Default values are used for vert./hor. timing if no pixelclock given. */
if (var->pixclock == 0) {
- int linesize;
-
/* Choose master pixelclock depending on hor. timing */
plen = 1 * xstretch;
if ((plen * xres + f25.right + f25.hsync + f25.left) *
@@ -1092,7 +1084,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
left_margin = pclock->left / plen;
right_margin = pclock->right / plen;
hsync_len = pclock->hsync / plen;
- linesize = left_margin + xres + right_margin + hsync_len;
upper_margin = 31;
lower_margin = 11;
vsync_len = 3;
@@ -1641,7 +1632,7 @@ static irqreturn_t falcon_vbl_switcher(int irq, void *dummy)
static int falcon_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int xoffset;
int bpp = info->var.bits_per_pixel;
@@ -2208,6 +2199,10 @@ static int ext_setcolreg(unsigned int regno, unsigned int red,
if (regno > 255)
return 1;
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
switch (external_card_type) {
case IS_VGA:
OUTB(0x3c8, regno);
@@ -2261,7 +2256,7 @@ static void set_screen_base(void *s_base)
static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
if (!fbhw->set_screen_base ||
(!ATARIHW_PRESENT(EXTD_SHIFTER) && var->xoffset))
@@ -2407,55 +2402,19 @@ static void atafb_set_disp(struct fb_info *info)
static int
atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
- int xoffset = var->xoffset;
- int yoffset = var->yoffset;
- int err;
-
- if (var->vmode & FB_VMODE_YWRAP) {
- if (yoffset < 0 || yoffset >= info->var.yres_virtual || xoffset)
- return -EINVAL;
- } else {
- if (xoffset + info->var.xres > info->var.xres_virtual ||
- yoffset + info->var.yres > info->var.yres_virtual)
- return -EINVAL;
- }
-
- if (fbhw->pan_display) {
- err = fbhw->pan_display(var, info);
- if (err)
- return err;
- } else
+ if (!fbhw->pan_display)
return -EINVAL;
- info->var.xoffset = xoffset;
- info->var.yoffset = yoffset;
-
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
-
- return 0;
+ return fbhw->pan_display(var, info);
}
/*
* generic drawing routines; imageblit needs updating for image depth > 1
*/
-#if BITS_PER_LONG == 32
-#define BYTES_PER_LONG 4
-#define SHIFT_PER_LONG 5
-#elif BITS_PER_LONG == 64
-#define BYTES_PER_LONG 8
-#define SHIFT_PER_LONG 6
-#else
-#define Please update me
-#endif
-
-
static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
u32 width, height;
@@ -2498,7 +2457,7 @@ static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
u32 dx, dy, sx, sy, width, height;
int rev_copy = 0;
@@ -2552,10 +2511,8 @@ static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
static void atafb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
- unsigned long *dst;
- int dst_idx;
const char *src;
u32 dx, dy, width, height, pitch;
@@ -2582,10 +2539,6 @@ static void atafb_imageblit(struct fb_info *info, const struct fb_image *image)
if (image->depth == 1) {
// used for font data
- dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
- dst_idx += dy * par->next_line * 8 + dx;
src = image->data;
pitch = (image->width + 7) / 8;
while (height--) {
@@ -2622,14 +2575,14 @@ atafb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
switch (cmd) {
#ifdef FBCMD_GET_CURRENTPAR
case FBCMD_GET_CURRENTPAR:
- if (copy_to_user((void *)arg, (void *)&current_par,
+ if (copy_to_user((void *)arg, &current_par,
sizeof(struct atafb_par)))
return -EFAULT;
return 0;
#endif
#ifdef FBCMD_SET_CURRENTPAR
case FBCMD_SET_CURRENTPAR:
- if (copy_from_user((void *)&current_par, (void *)arg,
+ if (copy_from_user(&current_par, (void *)arg,
sizeof(struct atafb_par)))
return -EFAULT;
ata_set_par(&current_par);
@@ -2695,7 +2648,7 @@ static int atafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
* hw par just decoded */
static int atafb_set_par(struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
/* Decode wanted screen parameters */
fbhw->decode_var(&info->var, par);
@@ -2981,7 +2934,7 @@ static void __init atafb_setup_user(char *spec)
}
}
-int __init atafb_setup(char *options)
+static int __init atafb_setup(char *options)
{
char *this_opt;
int temp;
@@ -2996,7 +2949,7 @@ int __init atafb_setup(char *options)
default_par = temp;
mode_option = this_opt;
} else if (!strcmp(this_opt, "inverse"))
- inverse = 1;
+ fb_invert_cmaps();
else if (!strncmp(this_opt, "hwscroll_", 9)) {
hwscroll = simple_strtoul(this_opt + 9, NULL, 10);
if (hwscroll < 0)
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 4ff6f624f912..851367e159c0 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -2306,7 +2306,7 @@ err_release_fb:
return error;
}
-void cirrusfb_zorro_unregister(struct zorro_dev *z)
+static void cirrusfb_zorro_unregister(struct zorro_dev *z)
{
struct fb_info *info = zorro_get_drvdata(z);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index f114242b5a70..cf9ac4da0a82 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1060,9 +1060,9 @@ static void fbcon_init(struct vc_data *vc, int init)
vc->vc_complement_mask <<= 1;
}
- if (!*svc->vc_uni_pagedir_loc)
+ if (!*svc->uni_pagedict_loc)
con_set_default_unimap(svc);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
ops = info->fbcon_par;
@@ -1384,9 +1384,9 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
vc->vc_complement_mask <<= 1;
}
- if (!*svc->vc_uni_pagedir_loc)
+ if (!*svc->uni_pagedict_loc)
con_set_default_unimap(svc);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c
index 3688f9165848..18405c402ec1 100644
--- a/drivers/video/fbdev/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
@@ -280,7 +280,7 @@ static struct platform_device dnfb_device = {
.name = "dnfb",
};
-int __init dnfb_init(void)
+static int __init dnfb_init(void)
{
int ret;
diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c
index 3b727d528fde..942e382cf1cf 100644
--- a/drivers/video/fbdev/fm2fb.c
+++ b/drivers/video/fbdev/fm2fb.c
@@ -293,7 +293,7 @@ static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id)
return 0;
}
-int __init fm2fb_setup(char *options)
+static int __init fm2fb_setup(char *options)
{
char *this_opt;
@@ -309,7 +309,7 @@ int __init fm2fb_setup(char *options)
return 0;
}
-int __init fm2fb_init(void)
+static int __init fm2fb_init(void)
{
char *option = NULL;
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index 8d418abdd767..cdd44e5deafe 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -375,7 +375,7 @@ static struct dio_driver hpfb_driver = {
.remove = hpfb_remove_one,
};
-int __init hpfb_init(void)
+static int __init hpfb_init(void)
{
unsigned int sid;
unsigned char i;
@@ -415,7 +415,7 @@ int __init hpfb_init(void)
return 0;
}
-void __exit hpfb_cleanup_module(void)
+static void __exit hpfb_cleanup_module(void)
{
dio_unregister_driver(&hpfb_driver);
}
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 23329de28e77..199f786f9eed 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -401,7 +401,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
u32 xres, right, hslen, left, xtotal;
u32 yres, lower, vslen, upper, ytotal;
u32 vxres, xoffset, vyres, yoffset;
- u32 bpp, base, dacspeed24, mem;
+ u32 bpp, base, dacspeed24, mem, freq;
u8 r7;
int i;
@@ -644,7 +644,12 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
par->atc[VGA_ATC_OVERSCAN] = 0;
/* Calculate VCLK that most closely matches the requested dot clock */
- i740_calc_vclk((((u32)1e9) / var->pixclock) * (u32)(1e3), par);
+ freq = (((u32)1e9) / var->pixclock) * (u32)(1e3);
+ if (freq < I740_RFREQ_FIX) {
+ fb_dbg(info, "invalid pixclock\n");
+ freq = I740_RFREQ_FIX;
+ }
+ i740_calc_vclk(freq, par);
/* Since we program the clocks ourselves, always use VCLK2. */
par->misc |= 0x0C;
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index a2f644c97f28..d97d7456d15a 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -41,7 +41,18 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
-#include <linux/platform_data/video-imxfb.h>
+#define PCR_TFT (1 << 31)
+#define PCR_BPIX_8 (3 << 25)
+#define PCR_BPIX_12 (4 << 25)
+#define PCR_BPIX_16 (5 << 25)
+#define PCR_BPIX_18 (6 << 25)
+
+struct imx_fb_videomode {
+ struct fb_videomode mode;
+ u32 pcr;
+ bool aus_mode;
+ unsigned char bpp;
+};
/*
* Complain if VAR is out of range.
@@ -656,7 +667,6 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
static int imxfb_init_fbinfo(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct fb_info *info = platform_get_drvdata(pdev);
struct imxfb_info *fbi = info->par;
struct device_node *np;
@@ -690,25 +700,20 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
info->fbops = &imxfb_ops;
info->flags = FBINFO_FLAG_DEFAULT |
FBINFO_READS_FAST;
- if (pdata) {
- fbi->lscr1 = pdata->lscr1;
- fbi->dmacr = pdata->dmacr;
- fbi->pwmr = pdata->pwmr;
- } else {
- np = pdev->dev.of_node;
- info->var.grayscale = of_property_read_bool(np,
- "cmap-greyscale");
- fbi->cmap_inverse = of_property_read_bool(np, "cmap-inverse");
- fbi->cmap_static = of_property_read_bool(np, "cmap-static");
- fbi->lscr1 = IMXFB_LSCR1_DEFAULT;
+ np = pdev->dev.of_node;
+ info->var.grayscale = of_property_read_bool(np,
+ "cmap-greyscale");
+ fbi->cmap_inverse = of_property_read_bool(np, "cmap-inverse");
+ fbi->cmap_static = of_property_read_bool(np, "cmap-static");
- of_property_read_u32(np, "fsl,lpccr", &fbi->pwmr);
+ fbi->lscr1 = IMXFB_LSCR1_DEFAULT;
- of_property_read_u32(np, "fsl,lscr1", &fbi->lscr1);
+ of_property_read_u32(np, "fsl,lpccr", &fbi->pwmr);
- of_property_read_u32(np, "fsl,dmacr", &fbi->dmacr);
- }
+ of_property_read_u32(np, "fsl,lscr1", &fbi->lscr1);
+
+ of_property_read_u32(np, "fsl,dmacr", &fbi->dmacr);
return 0;
}
@@ -863,10 +868,10 @@ static int imxfb_probe(struct platform_device *pdev)
struct imxfb_info *fbi;
struct lcd_device *lcd;
struct fb_info *info;
- struct imx_fb_platform_data *pdata;
struct resource *res;
struct imx_fb_videomode *m;
const struct of_device_id *of_id;
+ struct device_node *display_np;
int ret, i;
int bytes_per_pixel;
@@ -884,8 +889,6 @@ static int imxfb_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- pdata = dev_get_platdata(&pdev->dev);
-
info = framebuffer_alloc(sizeof(struct imxfb_info), &pdev->dev);
if (!info)
return -ENOMEM;
@@ -898,43 +901,34 @@ static int imxfb_probe(struct platform_device *pdev)
if (ret < 0)
goto failed_init;
- if (pdata) {
- if (!fb_mode)
- fb_mode = pdata->mode[0].mode.name;
-
- fbi->mode = pdata->mode;
- fbi->num_modes = pdata->num_modes;
- } else {
- struct device_node *display_np;
- fb_mode = NULL;
-
- display_np = of_parse_phandle(pdev->dev.of_node, "display", 0);
- if (!display_np) {
- dev_err(&pdev->dev, "No display defined in devicetree\n");
- ret = -EINVAL;
- goto failed_of_parse;
- }
+ fb_mode = NULL;
- /*
- * imxfb does not support more modes, we choose only the native
- * mode.
- */
- fbi->num_modes = 1;
-
- fbi->mode = devm_kzalloc(&pdev->dev,
- sizeof(struct imx_fb_videomode), GFP_KERNEL);
- if (!fbi->mode) {
- ret = -ENOMEM;
- of_node_put(display_np);
- goto failed_of_parse;
- }
+ display_np = of_parse_phandle(pdev->dev.of_node, "display", 0);
+ if (!display_np) {
+ dev_err(&pdev->dev, "No display defined in devicetree\n");
+ ret = -EINVAL;
+ goto failed_of_parse;
+ }
- ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ /*
+ * imxfb does not support more modes, we choose only the native
+ * mode.
+ */
+ fbi->num_modes = 1;
+
+ fbi->mode = devm_kzalloc(&pdev->dev,
+ sizeof(struct imx_fb_videomode), GFP_KERNEL);
+ if (!fbi->mode) {
+ ret = -ENOMEM;
of_node_put(display_np);
- if (ret)
- goto failed_of_parse;
+ goto failed_of_parse;
}
+ ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ of_node_put(display_np);
+ if (ret)
+ goto failed_of_parse;
+
/* Calculate maximum bytes used per pixel. In most cases this should
* be the same as m->bpp/8 */
m = &fbi->mode[0];
@@ -943,13 +937,6 @@ static int imxfb_probe(struct platform_device *pdev)
info->fix.smem_len = max_t(size_t, info->fix.smem_len,
m->mode.xres * m->mode.yres * bytes_per_pixel);
- res = request_mem_region(res->start, resource_size(res),
- DRIVER_NAME);
- if (!res) {
- ret = -EBUSY;
- goto failed_req;
- }
-
fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fbi->clk_ipg)) {
ret = PTR_ERR(fbi->clk_ipg);
@@ -983,10 +970,10 @@ static int imxfb_probe(struct platform_device *pdev)
goto failed_getclock;
}
- fbi->regs = ioremap(res->start, resource_size(res));
- if (fbi->regs == NULL) {
+ fbi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fbi->regs)) {
dev_err(&pdev->dev, "Cannot map frame buffer registers\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi->regs);
goto failed_ioremap;
}
@@ -1001,13 +988,6 @@ static int imxfb_probe(struct platform_device *pdev)
info->fix.smem_start = fbi->map_dma;
- if (pdata && pdata->init) {
- ret = pdata->init(fbi->pdev);
- if (ret)
- goto failed_platform_init;
- }
-
-
INIT_LIST_HEAD(&info->modelist);
for (i = 0; i < fbi->num_modes; i++)
fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
@@ -1059,17 +1039,12 @@ failed_lcd:
failed_register:
fb_dealloc_cmap(&info->cmap);
failed_cmap:
- if (pdata && pdata->exit)
- pdata->exit(fbi->pdev);
-failed_platform_init:
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
failed_map:
- iounmap(fbi->regs);
failed_ioremap:
failed_getclock:
release_mem_region(res->start, resource_size(res));
-failed_req:
failed_of_parse:
kfree(info->pseudo_palette);
failed_init:
@@ -1079,26 +1054,15 @@ failed_init:
static int imxfb_remove(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata;
struct fb_info *info = platform_get_drvdata(pdev);
struct imxfb_info *fbi = info->par;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
imxfb_disable_controller(fbi);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata && pdata->exit)
- pdata->exit(fbi->pdev);
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
- iounmap(fbi->regs);
- release_mem_region(res->start, resource_size(res));
kfree(info->pseudo_palette);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index b1acb1ebebe9..91001990e351 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#ifdef CONFIG_PPC32
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index 9d9fe5c3a7a1..161fc65d6b57 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -489,7 +489,7 @@ static void hwa742_update_window_auto(struct timer_list *unused)
__hwa742_update_window_auto(false);
}
-int hwa742_update_window_async(struct fb_info *fbi,
+static int hwa742_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*complete_callback)(void *arg),
void *complete_callback_data)
@@ -522,7 +522,6 @@ int hwa742_update_window_async(struct fb_info *fbi,
out:
return r;
}
-EXPORT_SYMBOL(hwa742_update_window_async);
static int hwa742_setup_plane(int plane, int channel_out,
unsigned long offset, int screen_width,
diff --git a/drivers/video/fbdev/omap/omapfb.h b/drivers/video/fbdev/omap/omapfb.h
index beb841ccb99c..ab1cb6e7f5f8 100644
--- a/drivers/video/fbdev/omap/omapfb.h
+++ b/drivers/video/fbdev/omap/omapfb.h
@@ -227,13 +227,4 @@ extern int omapfb_register_client(struct omapfb_notifier_block *nb,
omapfb_notifier_callback_t callback,
void *callback_data);
extern int omapfb_unregister_client(struct omapfb_notifier_block *nb);
-extern int omapfb_update_window_async(struct fb_info *fbi,
- struct omapfb_update_window *win,
- void (*callback)(void *),
- void *callback_data);
-extern int hwa742_update_window_async(struct fb_info *fbi,
- struct omapfb_update_window *win,
- void (*callback)(void *),
- void *callback_data);
-
#endif /* __OMAPFB_H */
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 292fcb0a24fc..dfb4ddc45701 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -668,7 +668,7 @@ static int omapfb_set_par(struct fb_info *fbi)
return r;
}
-int omapfb_update_window_async(struct fb_info *fbi,
+static int omapfb_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*callback)(void *),
void *callback_data)
@@ -714,7 +714,6 @@ int omapfb_update_window_async(struct fb_info *fbi,
return fbdev->ctrl->update_window(fbi, win, callback, callback_data);
}
-EXPORT_SYMBOL(omapfb_update_window_async);
static int omapfb_update_win(struct fb_info *fbi,
struct omapfb_update_window *win)
diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c
index 079a2a7fb2c5..964bc88bb89c 100644
--- a/drivers/video/fbdev/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
@@ -133,7 +133,7 @@ static struct platform_device q40fb_device = {
.name = "q40fb",
};
-int __init q40fb_init(void)
+static int __init q40fb_init(void)
{
int ret = 0;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index f66c4de0e188..1882408b2d13 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -906,6 +906,8 @@ static int s3fb_set_par(struct fb_info *info)
value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1);
svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index e31cf63b0a62..017c8efe8267 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1224,47 +1224,6 @@ int __init sa1100fb_init(void)
return platform_driver_register(&sa1100fb_driver);
}
-int __init sa1100fb_setup(char *options)
-{
-#if 0
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
-
- if (!strncmp(this_opt, "bpp:", 4))
- current_par.max_bpp =
- simple_strtoul(this_opt + 4, NULL, 0);
-
- if (!strncmp(this_opt, "lccr0:", 6))
- lcd_shadow.lccr0 =
- simple_strtoul(this_opt + 6, NULL, 0);
- if (!strncmp(this_opt, "lccr1:", 6)) {
- lcd_shadow.lccr1 =
- simple_strtoul(this_opt + 6, NULL, 0);
- current_par.max_xres =
- (lcd_shadow.lccr1 & 0x3ff) + 16;
- }
- if (!strncmp(this_opt, "lccr2:", 6)) {
- lcd_shadow.lccr2 =
- simple_strtoul(this_opt + 6, NULL, 0);
- current_par.max_yres =
- (lcd_shadow.
- lccr0 & LCCR0_SDS) ? ((lcd_shadow.
- lccr2 & 0x3ff) +
- 1) *
- 2 : ((lcd_shadow.lccr2 & 0x3ff) + 1);
- }
- if (!strncmp(this_opt, "lccr3:", 6))
- lcd_shadow.lccr3 =
- simple_strtoul(this_opt + 6, NULL, 0);
- }
-#endif
- return 0;
-}
-
module_init(sa1100fb_init);
MODULE_DESCRIPTION("StrongARM-1100/1110 framebuffer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index b568c646a76c..2ba91d62af92 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -355,12 +355,12 @@ SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay,
}
break;
case 400:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDwidth >= 600))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDheight >= 600))) {
if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth];
}
break;
case 512:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDwidth >= 768))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDheight >= 768))) {
if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth];
}
break;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index 125df366e23a..a10f1057293b 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -132,8 +132,6 @@ static struct fb_info info;
*/
static struct xxx_par __initdata current_par;
-int xxxfb_init(void);
-
/**
* xxxfb_open - Optional function. Called when the framebuffer is
* first accessed.
@@ -894,7 +892,7 @@ static struct pci_driver xxxfb_driver = {
MODULE_DEVICE_TABLE(pci, xxxfb_id_table);
-int __init xxxfb_init(void)
+static int __init xxxfb_init(void)
{
/*
* For kernel boot options (in 'video=xxxfb:<options>' format)
@@ -975,7 +973,7 @@ static struct platform_device *xxxfb_device;
* Only necessary if your driver takes special options,
* otherwise we fall back on the generic fb_setup().
*/
-int __init xxxfb_setup(char *options)
+static int __init xxxfb_setup(char *options)
{
/* Parse user specified options (`video=xxxfb:') */
}
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index a6c9d4f26669..1007023a5e88 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -90,11 +90,7 @@ struct fb_info_valkyrie {
u32 pseudo_palette[16];
};
-/*
- * Exported functions
- */
-int valkyriefb_init(void);
-int valkyriefb_setup(char*);
+static int valkyriefb_setup(char*);
static int valkyriefb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
@@ -302,7 +298,7 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
default_vmode, default_cmode);
}
-int __init valkyriefb_init(void)
+static int __init valkyriefb_init(void)
{
struct fb_info_valkyrie *p;
unsigned long frame_buffer_phys, cmap_regs_phys;
@@ -549,7 +545,7 @@ static int __init valkyrie_init_info(struct fb_info *info,
/*
* Parse user specified options (`video=valkyriefb:')
*/
-int __init valkyriefb_setup(char *options)
+static int __init valkyriefb_setup(char *options)
{
char *this_opt;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 62318cef5f8c..49b9f148d3a1 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -508,6 +508,8 @@ static int vt8623fb_set_par(struct fb_info *info)
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
1, info->node);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
diff --git a/drivers/virt/acrn/ioreq.c b/drivers/virt/acrn/ioreq.c
index 5ff1c53740c0..d75ab3f66da4 100644
--- a/drivers/virt/acrn/ioreq.c
+++ b/drivers/virt/acrn/ioreq.c
@@ -246,12 +246,8 @@ void acrn_ioreq_request_clear(struct acrn_vm *vm)
spin_lock_bh(&vm->ioreq_clients_lock);
client = vm->default_client;
if (client) {
- vcpu = find_first_bit(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
- while (vcpu < ACRN_IO_REQUEST_MAX) {
+ for_each_set_bit(vcpu, client->ioreqs_map, ACRN_IO_REQUEST_MAX)
acrn_ioreq_complete_request(client, vcpu, NULL);
- vcpu = find_next_bit(client->ioreqs_map,
- ACRN_IO_REQUEST_MAX, vcpu + 1);
- }
}
spin_unlock_bh(&vm->ioreq_clients_lock);
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 90ce16b6e05f..f422f9c58ba7 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct snp_guest_dev *snp_dev;
struct miscdevice *misc;
+ void __iomem *mapping;
int ret;
if (!dev->platform_data)
return -ENODEV;
data = (struct sev_guest_platform_data *)dev->platform_data;
- layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
- if (!layout)
+ mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
+ if (!mapping)
return -ENODEV;
+ layout = (__force void *)mapping;
+
ret = -ENOMEM;
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
if (!snp_dev)
@@ -706,7 +709,7 @@ e_free_response:
e_free_request:
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
e_unmap:
- iounmap(layout);
+ iounmap(mapping);
return ret;
}
diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig
index 2d3d98158121..ce91add81401 100644
--- a/drivers/virt/nitro_enclaves/Kconfig
+++ b/drivers/virt/nitro_enclaves/Kconfig
@@ -16,8 +16,9 @@ config NITRO_ENCLAVES
The module will be called nitro_enclaves.
config NITRO_ENCLAVES_MISC_DEV_TEST
- bool "Tests for the misc device functionality of the Nitro Enclaves"
- depends on NITRO_ENCLAVES && KUNIT=y
+ bool "Tests for the misc device functionality of the Nitro Enclaves" if !KUNIT_ALL_TESTS
+ depends on NITRO_ENCLAVES && KUNIT
+ default KUNIT_ALL_TESTS
help
Enable KUnit tests for the misc device functionality of the Nitro
Enclaves. Select this option only if you will boot the kernel for
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
index 20c881b6a4b6..241b94f62e56 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -1759,35 +1759,10 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#if defined(CONFIG_NITRO_ENCLAVES_MISC_DEV_TEST)
#include "ne_misc_dev_test.c"
-
-static inline int ne_misc_dev_test_init(void)
-{
- return __kunit_test_suites_init(ne_misc_dev_test_suites);
-}
-
-static inline void ne_misc_dev_test_exit(void)
-{
- __kunit_test_suites_exit(ne_misc_dev_test_suites);
-}
-#else
-static inline int ne_misc_dev_test_init(void)
-{
- return 0;
-}
-
-static inline void ne_misc_dev_test_exit(void)
-{
-}
#endif
static int __init ne_init(void)
{
- int rc = 0;
-
- rc = ne_misc_dev_test_init();
- if (rc < 0)
- return rc;
-
mutex_init(&ne_cpu_pool.mutex);
return pci_register_driver(&ne_pci_driver);
@@ -1798,8 +1773,6 @@ static void __exit ne_exit(void)
pci_unregister_driver(&ne_pci_driver);
ne_teardown_cpu_pool();
-
- ne_misc_dev_test_exit();
}
module_init(ne_init);
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev_test.c b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c
index 265797bed0ea..74df43b925be 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev_test.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c
@@ -151,7 +151,4 @@ static struct kunit_suite ne_misc_dev_test_suite = {
.test_cases = ne_misc_dev_test_cases,
};
-static struct kunit_suite *ne_misc_dev_test_suites[] = {
- &ne_misc_dev_test_suite,
- NULL
-};
+kunit_test_suite(ne_misc_dev_test_suite);
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 73eb34849eab..4ccfd30c2a30 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -356,8 +356,8 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
goto err_vbg_core_exit;
}
- ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
- DEVICE_NAME, gdev);
+ ret = request_irq(pci->irq, vbg_core_isr, IRQF_SHARED, DEVICE_NAME,
+ gdev);
if (ret) {
vbg_err("vboxguest: Error requesting irq: %d\n", ret);
goto err_vbg_core_exit;
@@ -367,7 +367,7 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (ret) {
vbg_err("vboxguest: Error misc_register %s failed: %d\n",
DEVICE_NAME, ret);
- goto err_vbg_core_exit;
+ goto err_free_irq;
}
ret = misc_register(&gdev->misc_device_user);
@@ -403,6 +403,8 @@ err_unregister_misc_device_user:
misc_deregister(&gdev->misc_device_user);
err_unregister_misc_device:
misc_deregister(&gdev->misc_device);
+err_free_irq:
+ free_irq(pci->irq, gdev);
err_vbg_core_exit:
vbg_core_exit(gdev);
err_disable_pcidev:
@@ -419,6 +421,7 @@ static void vbg_pci_remove(struct pci_dev *pci)
vbg_gdev = NULL;
mutex_unlock(&vbg_gdev_mutex);
+ free_irq(pci->irq, gdev);
device_remove_file(gdev->dev, &dev_attr_host_features);
device_remove_file(gdev->dev, &dev_attr_host_version);
misc_deregister(&gdev->misc_device_user);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index e1556d2a355a..0a53a61231c2 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
+config VIRTIO_ANCHOR
+ bool
+
config VIRTIO
tristate
+ select VIRTIO_ANCHOR
help
This option is selected by any driver which implements the virtio
bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG
@@ -31,11 +35,12 @@ if VIRTIO_MENU
config VIRTIO_HARDEN_NOTIFICATION
bool "Harden virtio notification"
+ depends on BROKEN
help
Enable this to harden the device notifications and suppress
those that happen at a time where notifications are illegal.
- Experimental: Note that several drivers still have bugs that
+ Experimental: Note that several drivers still have issues that
may cause crashes or hangs when correct handling of
notifications is enforced; depending on the subset of
drivers and devices you use, this may or may not work.
@@ -122,9 +127,11 @@ config VIRTIO_MEM
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
- This driver was only tested under x86-64 and arm64, but should
- theoretically work on all architectures that support memory hotplug
- and hotremove.
+ This driver currently only supports x86-64 and arm64. Although it
+ should compile on other architectures that implement memory
+ hot(un)plug, architecture-specific and/or common
+ code changes may be required for virtio-mem, kdump and kexec to work as
+ expected.
If unsure, say M.
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 0a82d0873248..8e98d24917cc 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
+obj-$(CONFIG_VIRTIO_ANCHOR) += virtio_anchor.o
obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio_pci_modern_dev.o
obj-$(CONFIG_VIRTIO_PCI_LIB_LEGACY) += virtio_pci_legacy_dev.o
obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 7deeed30d1f3..828ced060742 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -2,10 +2,10 @@
#include <linux/virtio.h>
#include <linux/spinlock.h>
#include <linux/virtio_config.h>
+#include <linux/virtio_anchor.h>
#include <linux/module.h>
#include <linux/idr.h>
#include <linux/of.h>
-#include <linux/platform-feature.h>
#include <uapi/linux/virtio_ids.h>
/* Unique numbering for virtio devices. */
@@ -174,7 +174,7 @@ static int virtio_features_ok(struct virtio_device *dev)
might_sleep();
- if (platform_has(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS)) {
+ if (virtio_check_mem_acc_cb(dev)) {
if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
dev_warn(&dev->dev,
"device must provide VIRTIO_F_VERSION_1\n");
@@ -428,7 +428,9 @@ int register_virtio_device(struct virtio_device *dev)
goto out;
dev->index = err;
- dev_set_name(&dev->dev, "virtio%u", dev->index);
+ err = dev_set_name(&dev->dev, "virtio%u", dev->index);
+ if (err)
+ goto out_ida_remove;
err = virtio_device_of_init(dev);
if (err)
diff --git a/drivers/virtio/virtio_anchor.c b/drivers/virtio/virtio_anchor.c
new file mode 100644
index 000000000000..4d6a5d269b55
--- /dev/null
+++ b/drivers/virtio/virtio_anchor.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/virtio.h>
+#include <linux/virtio_anchor.h>
+
+bool virtio_require_restricted_mem_acc(struct virtio_device *dev)
+{
+ return true;
+}
+EXPORT_SYMBOL_GPL(virtio_require_restricted_mem_acc);
+
+static bool virtio_no_restricted_mem_acc(struct virtio_device *dev)
+{
+ return false;
+}
+
+bool (*virtio_check_mem_acc_cb)(struct virtio_device *dev) =
+ virtio_no_restricted_mem_acc;
+EXPORT_SYMBOL_GPL(virtio_check_mem_acc_cb);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index b9737da6c4dd..3f78a3a1eb75 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -17,9 +17,6 @@
#include <linux/oom.h>
#include <linux/wait.h>
#include <linux/mm.h>
-#include <linux/mount.h>
-#include <linux/magic.h>
-#include <linux/pseudo_fs.h>
#include <linux/page_reporting.h>
/*
@@ -42,10 +39,6 @@
(1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
#define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER)
-#ifdef CONFIG_BALLOON_COMPACTION
-static struct vfsmount *balloon_mnt;
-#endif
-
enum virtio_balloon_vq {
VIRTIO_BALLOON_VQ_INFLATE,
VIRTIO_BALLOON_VQ_DEFLATE,
@@ -805,18 +798,6 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
return MIGRATEPAGE_SUCCESS;
}
-
-static int balloon_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, BALLOON_KVM_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type balloon_fs = {
- .name = "balloon-kvm",
- .init_fs_context = balloon_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
#endif /* CONFIG_BALLOON_COMPACTION */
static unsigned long shrink_free_pages(struct virtio_balloon *vb,
@@ -875,7 +856,7 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
vb->shrinker.count_objects = virtio_balloon_shrinker_count;
vb->shrinker.seeks = DEFAULT_SEEKS;
- return register_shrinker(&vb->shrinker);
+ return register_shrinker(&vb->shrinker, "virtio-balloon");
}
static int virtballoon_probe(struct virtio_device *vdev)
@@ -909,19 +890,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_free_vb;
#ifdef CONFIG_BALLOON_COMPACTION
- balloon_mnt = kern_mount(&balloon_fs);
- if (IS_ERR(balloon_mnt)) {
- err = PTR_ERR(balloon_mnt);
- goto out_del_vqs;
- }
-
vb->vb_dev_info.migratepage = virtballoon_migratepage;
- vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
- if (IS_ERR(vb->vb_dev_info.inode)) {
- err = PTR_ERR(vb->vb_dev_info.inode);
- goto out_kern_unmount;
- }
- vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
#endif
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
/*
@@ -930,13 +899,13 @@ static int virtballoon_probe(struct virtio_device *vdev)
*/
if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
err = -ENOSPC;
- goto out_iput;
+ goto out_del_vqs;
}
vb->balloon_wq = alloc_workqueue("balloon-wq",
WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
if (!vb->balloon_wq) {
err = -ENOMEM;
- goto out_iput;
+ goto out_del_vqs;
}
INIT_WORK(&vb->report_free_page_work, report_free_page_func);
vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
@@ -1030,13 +999,7 @@ out_unregister_shrinker:
out_del_balloon_wq:
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
destroy_workqueue(vb->balloon_wq);
-out_iput:
-#ifdef CONFIG_BALLOON_COMPACTION
- iput(vb->vb_dev_info.inode);
-out_kern_unmount:
- kern_unmount(balloon_mnt);
out_del_vqs:
-#endif
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
@@ -1083,12 +1046,6 @@ static void virtballoon_remove(struct virtio_device *vdev)
}
remove_common(vb);
-#ifdef CONFIG_BALLOON_COMPACTION
- if (vb->vb_dev_info.inode)
- iput(vb->vb_dev_info.inode);
-
- kern_unmount(balloon_mnt);
-#endif
kfree(vb);
}
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index e07486f01999..0c2892ec6817 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -862,8 +862,7 @@ static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
unsigned long mb_id,
unsigned long start_pfn)
{
- const bool is_movable = page_zonenum(pfn_to_page(start_pfn)) ==
- ZONE_MOVABLE;
+ const bool is_movable = is_zone_movable_page(pfn_to_page(start_pfn));
int new_state;
switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
@@ -1158,8 +1157,7 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
*/
static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
{
- const bool is_movable = page_zonenum(pfn_to_page(pfn)) ==
- ZONE_MOVABLE;
+ const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
int rc, retry_count;
/*
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 083ff1eb743d..3ff746e3f24a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -403,6 +403,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_new_virtqueue;
}
+ vq->num_max = num;
+
/* Activate the queue */
writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) {
@@ -487,6 +489,9 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
if (err)
return err;
+ if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
+ enable_irq_wake(irq);
+
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index ca51fcc9daab..ad258a9d3b9f 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -214,9 +214,15 @@ static void vp_del_vq(struct virtqueue *vq)
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vp_dev->lock, flags);
+ /*
+ * If it fails during re-enable reset vq. This way we won't rejoin
+ * info->node to the queue. Prevent unexpected irqs.
+ */
+ if (!vq->reset) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ }
vp_dev->del_vq(info);
kfree(info);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index a5e5721145c7..2257f1b3d8ae 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -135,6 +135,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
+ vq->num_max = num;
+
q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (q_pfn >> 32) {
dev_err(&vp_dev->pci_dev->dev,
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 623906b4996c..c3b9f2761849 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
+
+ if (features & BIT_ULL(VIRTIO_F_RING_RESET))
+ __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
}
/* virtio config->finalize_features() implementation */
@@ -176,6 +179,110 @@ static void vp_reset(struct virtio_device *vdev)
vp_synchronize_vectors(vdev);
}
+static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ unsigned long index;
+
+ index = vq->index;
+
+ /* activate the queue */
+ vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
+ vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
+ virtqueue_get_avail_addr(vq),
+ virtqueue_get_used_addr(vq));
+
+ if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
+ msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
+ if (msix_vec == VIRTIO_MSI_NO_VECTOR)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags;
+
+ if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+ return -ENOENT;
+
+ vp_modern_set_queue_reset(mdev, vq->index);
+
+ info = vp_dev->vqs[vq->index];
+
+ /* delete vq from irq handler */
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+ INIT_LIST_HEAD(&info->node);
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_break(vq);
+#endif
+
+ /* For the case where vq has an exclusive irq, call synchronize_irq() to
+ * wait for completion.
+ *
+ * note: We can't use disable_irq() since it conflicts with the affinity
+ * managed IRQ that is used by some drivers.
+ */
+ if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+ synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+ vq->reset = true;
+
+ return 0;
+}
+
+static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags, index;
+ int err;
+
+ if (!vq->reset)
+ return -EBUSY;
+
+ index = vq->index;
+ info = vp_dev->vqs[index];
+
+ if (vp_modern_get_queue_reset(mdev, index))
+ return -EBUSY;
+
+ if (vp_modern_get_queue_enable(mdev, index))
+ return -EBUSY;
+
+ err = vp_active_vq(vq, info->msix_vector);
+ if (err)
+ return err;
+
+ if (vq->callback) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_add(&info->node, &vp_dev->virtqueues);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ } else {
+ INIT_LIST_HEAD(&info->node);
+ }
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_unbreak(vq);
+#endif
+
+ vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+ vq->reset = false;
+
+ return 0;
+}
+
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -218,32 +325,21 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
- /* activate the queue */
- vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
- vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
- virtqueue_get_avail_addr(vq),
- virtqueue_get_used_addr(vq));
+ vq->num_max = num;
+
+ err = vp_active_vq(vq, msix_vec);
+ if (err)
+ goto err;
vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
if (!vq->priv) {
err = -ENOMEM;
- goto err_map_notify;
- }
-
- if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
- if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
- err = -EBUSY;
- goto err_assign_vector;
- }
+ goto err;
}
return vq;
-err_assign_vector:
- if (!mdev->notify_base)
- pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv);
-err_map_notify:
+err:
vring_del_virtqueue(vq);
return ERR_PTR(err);
}
@@ -401,6 +497,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -419,6 +517,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
/* the PCI probing function */
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index fa2a9445bb18..869cb46bef96 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -3,6 +3,7 @@
#include <linux/virtio_pci_modern.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/delay.h>
/*
* vp_modern_map_capability - map a part of virtio pci capability
@@ -475,6 +476,44 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
EXPORT_SYMBOL_GPL(vp_modern_set_status);
/*
+ * vp_modern_get_queue_reset - get the queue reset status
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ return vp_ioread16(&cfg->queue_reset);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset);
+
+/*
+ * vp_modern_set_queue_reset - reset the queue
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ vp_iowrite16(1, &cfg->queue_reset);
+
+ while (vp_ioread16(&cfg->queue_reset))
+ msleep(1);
+
+ while (vp_ioread16(&cfg->cfg.queue_enable))
+ msleep(1);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
+
+/*
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device
* @index: queue index
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 643ca779fcc6..4620e9d79dde 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -85,6 +85,71 @@ struct vring_desc_extra {
u16 next; /* The next desc state in a list. */
};
+struct vring_virtqueue_split {
+ /* Actual memory layout for this queue. */
+ struct vring vring;
+
+ /* Last written value to avail->flags */
+ u16 avail_flags_shadow;
+
+ /*
+ * Last written value to avail->idx in
+ * guest byte order.
+ */
+ u16 avail_idx_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_split *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t queue_dma_addr;
+ size_t queue_size_in_bytes;
+
+ /*
+ * The parameters for creating vrings are reserved for creating new
+ * vring.
+ */
+ u32 vring_align;
+ bool may_reduce_num;
+};
+
+struct vring_virtqueue_packed {
+ /* Actual memory layout for this queue. */
+ struct {
+ unsigned int num;
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
+ } vring;
+
+ /* Driver ring wrap counter. */
+ bool avail_wrap_counter;
+
+ /* Avail used flags. */
+ u16 avail_used_flags;
+
+ /* Index of the next avail descriptor. */
+ u16 next_avail_idx;
+
+ /*
+ * Last written value to driver->flags in
+ * guest byte order.
+ */
+ u16 event_flags_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_packed *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+};
+
struct vring_virtqueue {
struct virtqueue vq;
@@ -124,64 +189,10 @@ struct vring_virtqueue {
union {
/* Available for split ring */
- struct {
- /* Actual memory layout for this queue. */
- struct vring vring;
-
- /* Last written value to avail->flags */
- u16 avail_flags_shadow;
-
- /*
- * Last written value to avail->idx in
- * guest byte order.
- */
- u16 avail_idx_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_split *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t queue_dma_addr;
- size_t queue_size_in_bytes;
- } split;
+ struct vring_virtqueue_split split;
/* Available for packed ring */
- struct {
- /* Actual memory layout for this queue. */
- struct {
- unsigned int num;
- struct vring_packed_desc *desc;
- struct vring_packed_desc_event *driver;
- struct vring_packed_desc_event *device;
- } vring;
-
- /* Driver ring wrap counter. */
- bool avail_wrap_counter;
-
- /* Avail used flags. */
- u16 avail_used_flags;
-
- /* Index of the next avail descriptor. */
- u16 next_avail_idx;
-
- /*
- * Last written value to driver->flags in
- * guest byte order.
- */
- u16 event_flags_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_packed *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t ring_dma_addr;
- dma_addr_t driver_event_dma_addr;
- dma_addr_t device_event_dma_addr;
- size_t ring_size_in_bytes;
- size_t event_size_in_bytes;
- } packed;
+ struct vring_virtqueue_packed packed;
};
/* How to notify other side. FIXME: commonalize hcalls! */
@@ -200,6 +211,16 @@ struct vring_virtqueue {
#endif
};
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name);
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
+static void vring_free(struct virtqueue *_vq);
/*
* Helpers.
@@ -364,6 +385,24 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
return dma_mapping_error(vring_dma_dev(vq), addr);
}
+static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
+{
+ vq->vq.num_free = num;
+
+ if (vq->packed_ring)
+ vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+ else
+ vq->last_used_idx = 0;
+
+ vq->event_triggered = false;
+ vq->num_added = 0;
+
+#ifdef DEBUG
+ vq->in_use = false;
+ vq->last_add_time_valid = false;
+#endif
+}
+
/*
* Split ring specific functions - *_split().
@@ -907,28 +946,107 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
return NULL;
}
-static struct virtqueue *vring_create_virtqueue_split(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
+ struct vring_virtqueue *vq)
+{
+ struct virtio_device *vdev;
+
+ vdev = vq->vq.vdev;
+
+ vring_split->avail_flags_shadow = 0;
+ vring_split->avail_idx_shadow = 0;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!vq->vq.callback) {
+ vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
+ vring_split->avail_flags_shadow);
+ }
+}
+
+static void virtqueue_reinit_split(struct vring_virtqueue *vq)
+{
+ int num;
+
+ num = vq->split.vring.num;
+
+ vq->split.vring.avail->flags = 0;
+ vq->split.vring.avail->idx = 0;
+
+ /* reset avail event */
+ vq->split.vring.avail->ring[num] = 0;
+
+ vq->split.vring.used->flags = 0;
+ vq->split.vring.used->idx = 0;
+
+ /* reset used event */
+ *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
+
+ virtqueue_init(vq, num);
+
+ virtqueue_vring_init_split(&vq->split, vq);
+}
+
+static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
+ struct vring_virtqueue_split *vring_split)
+{
+ vq->split = *vring_split;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
+{
+ struct vring_desc_state_split *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_split->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
+ if (!state)
+ goto err_state;
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_extra;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_split));
+
+ vring_split->desc_state = state;
+ vring_split->desc_extra = extra;
+ return 0;
+
+err_extra:
+ kfree(state);
+err_state:
+ return -ENOMEM;
+}
+
+static void vring_free_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev)
+{
+ vring_free_queue(vdev, vring_split->queue_size_in_bytes,
+ vring_split->vring.desc,
+ vring_split->queue_dma_addr);
+
+ kfree(vring_split->desc_state);
+ kfree(vring_split->desc_extra);
+}
+
+static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ u32 num,
+ unsigned int vring_align,
+ bool may_reduce_num)
{
- struct virtqueue *vq;
void *queue = NULL;
dma_addr_t dma_addr;
- size_t queue_size_in_bytes;
- struct vring vring;
/* We assume num is a power of 2. */
if (num & (num - 1)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
- return NULL;
+ return -EINVAL;
}
/* TODO: allocate each queue chunk individually */
@@ -939,11 +1057,11 @@ static struct virtqueue *vring_create_virtqueue_split(
if (queue)
break;
if (!may_reduce_num)
- return NULL;
+ return -ENOMEM;
}
if (!num)
- return NULL;
+ return -ENOMEM;
if (!queue) {
/* Try to get a single page. You are my only hope! */
@@ -951,26 +1069,85 @@ static struct virtqueue *vring_create_virtqueue_split(
&dma_addr, GFP_KERNEL|__GFP_ZERO);
}
if (!queue)
- return NULL;
+ return -ENOMEM;
+
+ vring_init(&vring_split->vring, num, queue, vring_align);
+
+ vring_split->queue_dma_addr = dma_addr;
+ vring_split->queue_size_in_bytes = vring_size(num, vring_align);
+
+ vring_split->vring_align = vring_align;
+ vring_split->may_reduce_num = may_reduce_num;
+
+ return 0;
+}
+
+static struct virtqueue *vring_create_virtqueue_split(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct virtqueue *vq;
+ int err;
- queue_size_in_bytes = vring_size(num, vring_align);
- vring_init(&vring, num, queue, vring_align);
+ err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
+ may_reduce_num);
+ if (err)
+ return NULL;
- vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
if (!vq) {
- vring_free_queue(vdev, queue_size_in_bytes, queue,
- dma_addr);
+ vring_free_split(&vring_split, vdev);
return NULL;
}
- to_vvq(vq)->split.queue_dma_addr = dma_addr;
- to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
to_vvq(vq)->we_own_ring = true;
return vq;
}
+static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ err = vring_alloc_queue_split(&vring_split, vdev, num,
+ vq->split.vring_align,
+ vq->split.may_reduce_num);
+ if (err)
+ goto err;
+
+ err = vring_alloc_state_extra_split(&vring_split);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_split(&vring_split, vq);
+
+ virtqueue_init(vq, vring_split.vring.num);
+ virtqueue_vring_attach_split(vq, &vring_split);
+
+ return 0;
+
+err_state_extra:
+ vring_free_split(&vring_split, vdev);
+err:
+ virtqueue_reinit_split(vq);
+ return -ENOMEM;
+}
+
/*
* Packed ring specific functions - *_packed().
@@ -1637,8 +1814,7 @@ static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
return NULL;
}
-static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
- unsigned int num)
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
{
struct vring_desc_extra *desc_extra;
unsigned int i;
@@ -1656,19 +1832,32 @@ static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *v
return desc_extra;
}
-static struct virtqueue *vring_create_virtqueue_packed(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev)
+{
+ if (vring_packed->vring.desc)
+ vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
+ vring_packed->vring.desc,
+ vring_packed->ring_dma_addr);
+
+ if (vring_packed->vring.driver)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.driver,
+ vring_packed->driver_event_dma_addr);
+
+ if (vring_packed->vring.device)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.device,
+ vring_packed->device_event_dma_addr);
+
+ kfree(vring_packed->desc_state);
+ kfree(vring_packed->desc_extra);
+}
+
+static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev,
+ u32 num)
{
- struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
@@ -1680,7 +1869,11 @@ static struct virtqueue *vring_create_virtqueue_packed(
&ring_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!ring)
- goto err_ring;
+ goto err;
+
+ vring_packed->vring.desc = ring;
+ vring_packed->ring_dma_addr = ring_dma_addr;
+ vring_packed->ring_size_in_bytes = ring_size_in_bytes;
event_size_in_bytes = sizeof(struct vring_packed_desc_event);
@@ -1688,13 +1881,112 @@ static struct virtqueue *vring_create_virtqueue_packed(
&driver_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!driver)
- goto err_driver;
+ goto err;
+
+ vring_packed->vring.driver = driver;
+ vring_packed->event_size_in_bytes = event_size_in_bytes;
+ vring_packed->driver_event_dma_addr = driver_event_dma_addr;
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!device)
- goto err_device;
+ goto err;
+
+ vring_packed->vring.device = device;
+ vring_packed->device_event_dma_addr = device_event_dma_addr;
+
+ vring_packed->vring.num = num;
+
+ return 0;
+
+err:
+ vring_free_packed(vring_packed, vdev);
+ return -ENOMEM;
+}
+
+static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
+{
+ struct vring_desc_state_packed *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_packed->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
+ if (!state)
+ goto err_desc_state;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_packed));
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_desc_extra;
+
+ vring_packed->desc_state = state;
+ vring_packed->desc_extra = extra;
+
+ return 0;
+
+err_desc_extra:
+ kfree(state);
+err_desc_state:
+ return -ENOMEM;
+}
+
+static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
+ bool callback)
+{
+ vring_packed->next_avail_idx = 0;
+ vring_packed->avail_wrap_counter = 1;
+ vring_packed->event_flags_shadow = 0;
+ vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!callback) {
+ vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vring_packed->vring.driver->flags =
+ cpu_to_le16(vring_packed->event_flags_shadow);
+ }
+}
+
+static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
+ struct vring_virtqueue_packed *vring_packed)
+{
+ vq->packed = *vring_packed;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
+{
+ memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
+ memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
+
+ /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
+ memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
+
+ virtqueue_init(vq, vq->packed.vring.num);
+ virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
+}
+
+static struct virtqueue *vring_create_virtqueue_packed(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
@@ -1703,8 +1995,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -1713,15 +2005,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
- vq->event_triggered = false;
- vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -1730,65 +2015,58 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->packed.ring_dma_addr = ring_dma_addr;
- vq->packed.driver_event_dma_addr = driver_event_dma_addr;
- vq->packed.device_event_dma_addr = device_event_dma_addr;
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
- vq->packed.ring_size_in_bytes = ring_size_in_bytes;
- vq->packed.event_size_in_bytes = event_size_in_bytes;
+ virtqueue_vring_init_packed(&vring_packed, !!callback);
- vq->packed.vring.num = num;
- vq->packed.vring.desc = ring;
- vq->packed.vring.driver = driver;
- vq->packed.vring.device = device;
-
- vq->packed.next_avail_idx = 0;
- vq->packed.avail_wrap_counter = 1;
- vq->packed.event_flags_shadow = 0;
- vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
-
- vq->packed.desc_state = kmalloc_array(num,
- sizeof(struct vring_desc_state_packed),
- GFP_KERNEL);
- if (!vq->packed.desc_state)
- goto err_desc_state;
-
- memset(vq->packed.desc_state, 0,
- num * sizeof(struct vring_desc_state_packed));
-
- /* Put everything in free lists. */
- vq->free_head = 0;
-
- vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
- if (!vq->packed.desc_extra)
- goto err_desc_extra;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
- vq->packed.vring.driver->flags =
- cpu_to_le16(vq->packed.event_flags_shadow);
- }
+ virtqueue_init(vq, num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-err_desc_extra:
- kfree(vq->packed.desc_state);
-err_desc_state:
+err_state_extra:
kfree(vq);
err_vq:
- vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
-err_device:
- vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
-err_driver:
- vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
+ vring_free_packed(&vring_packed, vdev);
err_ring:
return NULL;
}
+static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
+
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
+
+ virtqueue_init(vq, vring_packed.vring.num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
+
+ return 0;
+
+err_state_extra:
+ vring_free_packed(&vring_packed, vdev);
+err_ring:
+ virtqueue_reinit_packed(vq);
+ return -ENOMEM;
+}
+
/*
* Generic functions and exported symbols.
@@ -2131,8 +2409,8 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
* @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
- * This is not valid on an active queue; it is useful only for device
- * shutdown.
+ * This is not valid on an active queue; it is useful for device
+ * shutdown or the reset queue.
*/
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
@@ -2148,6 +2426,14 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
}
+/**
+ * vring_interrupt - notify a virtqueue on an interrupt
+ * @irq: the IRQ number (ignored)
+ * @_vq: the struct virtqueue to notify
+ *
+ * Calls the callback function of @_vq to process the virtqueue
+ * notification.
+ */
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2180,16 +2466,17 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
EXPORT_SYMBOL_GPL(vring_interrupt);
/* Only available for split ring */
-struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring vring,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
{
struct vring_virtqueue *vq;
+ int err;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
@@ -2202,8 +2489,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = vring.num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -2212,14 +2499,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0;
- vq->event_triggered = false;
- vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2228,47 +2508,22 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->split.queue_dma_addr = 0;
- vq->split.queue_size_in_bytes = 0;
-
- vq->split.vring = vring;
- vq->split.avail_flags_shadow = 0;
- vq->split.avail_idx_shadow = 0;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
- vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
- vq->split.avail_flags_shadow);
+ err = vring_alloc_state_extra_split(vring_split);
+ if (err) {
+ kfree(vq);
+ return NULL;
}
- vq->split.desc_state = kmalloc_array(vring.num,
- sizeof(struct vring_desc_state_split), GFP_KERNEL);
- if (!vq->split.desc_state)
- goto err_state;
-
- vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
- if (!vq->split.desc_extra)
- goto err_extra;
+ virtqueue_vring_init_split(vring_split, vq);
- /* Put everything in free lists. */
- vq->free_head = 0;
- memset(vq->split.desc_state, 0, vring.num *
- sizeof(struct vring_desc_state_split));
+ virtqueue_init(vq, vring_split->vring.num);
+ virtqueue_vring_attach_split(vq, vring_split);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-
-err_extra:
- kfree(vq->split.desc_state);
-err_state:
- kfree(vq);
- return NULL;
}
-EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
struct virtqueue *vring_create_virtqueue(
unsigned int index,
@@ -2294,6 +2549,75 @@ struct virtqueue *vring_create_virtqueue(
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+/**
+ * virtqueue_resize - resize the vring of vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @num: new ring num
+ * @recycle: callback for recycle the useless buffer
+ *
+ * When it is really necessary to create a new vring, it will set the current vq
+ * into the reset state. Then call the passed callback to recycle the buffer
+ * that is no longer used. Only after the new vring is successfully created, the
+ * old vring will be released.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
+ * vq can still work normally
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -E2BIG/-EINVAL: num error
+ * -EPERM: Operation not permitted
+ *
+ */
+int virtqueue_resize(struct virtqueue *_vq, u32 num,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+ void *buf;
+ int err;
+
+ if (!vq->we_own_ring)
+ return -EPERM;
+
+ if (num > vq->vq.num_max)
+ return -E2BIG;
+
+ if (!num)
+ return -EINVAL;
+
+ if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
+ return 0;
+
+ if (!vdev->config->disable_vq_and_reset)
+ return -ENOENT;
+
+ if (!vdev->config->enable_vq_after_reset)
+ return -ENOENT;
+
+ err = vdev->config->disable_vq_and_reset(_vq);
+ if (err)
+ return err;
+
+ while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+ recycle(_vq, buf);
+
+ if (vq->packed_ring)
+ err = virtqueue_resize_packed(_vq, num);
+ else
+ err = virtqueue_resize_split(_vq, num);
+
+ if (vdev->config->enable_vq_after_reset(_vq))
+ return -EBUSY;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtqueue_resize);
+
/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
@@ -2306,25 +2630,21 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name)
{
- struct vring vring;
+ struct vring_virtqueue_split vring_split = {};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
- vring_init(&vring, num, pages, vring_align);
- return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vring_init(&vring_split.vring, num, pages, vring_align);
+ return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
-void vring_del_virtqueue(struct virtqueue *_vq)
+static void vring_free(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- spin_lock(&vq->vq.vdev->vqs_list_lock);
- list_del(&_vq->list);
- spin_unlock(&vq->vq.vdev->vqs_list_lock);
-
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
@@ -2355,6 +2675,18 @@ void vring_del_virtqueue(struct virtqueue *_vq)
kfree(vq->split.desc_state);
kfree(vq->split.desc_extra);
}
+}
+
+void vring_del_virtqueue(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ spin_lock(&vq->vq.vdev->vqs_list_lock);
+ list_del(&_vq->list);
+ spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
+ vring_free(_vq);
+
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2402,6 +2734,30 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_break(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, true);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_break);
+
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_unbreak(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, false);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
+
bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index c40f7deb6b5a..9670cc79371d 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -183,6 +183,8 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto error_new_virtqueue;
}
+ vq->num_max = max_num;
+
/* Setup virtqueue callback */
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
cb.private = info;
diff --git a/drivers/vme/Kconfig b/drivers/vme/Kconfig
deleted file mode 100644
index c13dd9d2a604..000000000000
--- a/drivers/vme/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# VME configuration.
-#
-
-menuconfig VME_BUS
- bool "VME bridge support"
- depends on PCI
- help
- If you say Y here you get support for the VME bridge Framework.
-
-if VME_BUS
-
-source "drivers/vme/bridges/Kconfig"
-
-source "drivers/vme/boards/Kconfig"
-
-endif # VME
diff --git a/drivers/vme/Makefile b/drivers/vme/Makefile
deleted file mode 100644
index 8bfe4b370c41..000000000000
--- a/drivers/vme/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the VME bridge device drivers.
-#
-obj-$(CONFIG_VME_BUS) += vme.o
-
-obj-y += bridges/
-obj-y += boards/
diff --git a/drivers/vme/boards/Kconfig b/drivers/vme/boards/Kconfig
deleted file mode 100644
index 7a255f72980b..000000000000
--- a/drivers/vme/boards/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-comment "VME Board Drivers"
-
-config VMIVME_7805
- tristate "VMIVME-7805"
- help
- If you say Y here you get support for the VMIVME-7805 board.
- This board has an additional control interface to the Universe II
- chip. This driver has to be included if you want to access VME bus
- with VMIVME-7805 board.
diff --git a/drivers/vme/boards/Makefile b/drivers/vme/boards/Makefile
deleted file mode 100644
index 87122381452c..000000000000
--- a/drivers/vme/boards/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the VME board drivers.
-#
-
-obj-$(CONFIG_VMIVME_7805) += vme_vmivme7805.o
diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c
deleted file mode 100644
index 51e056bae943..000000000000
--- a/drivers/vme/boards/vme_vmivme7805.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Support for the VMIVME-7805 board access to the Universe II bridge.
- *
- * Author: Arthur Benilov <arthur.benilov@iba-group.com>
- * Copyright 2010 Ion Beam Application, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/io.h>
-
-#include "vme_vmivme7805.h"
-
-static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
-static void vmic_remove(struct pci_dev *);
-
-/** Base address to access FPGA register */
-static void __iomem *vmic_base;
-
-static const char driver_name[] = "vmivme_7805";
-
-static const struct pci_device_id vmic_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
- { },
-};
-
-static struct pci_driver vmic_driver = {
- .name = driver_name,
- .id_table = vmic_ids,
- .probe = vmic_probe,
- .remove = vmic_remove,
-};
-
-static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- int retval;
- u32 data;
-
- /* Enable the device */
- retval = pci_enable_device(pdev);
- if (retval) {
- dev_err(&pdev->dev, "Unable to enable device\n");
- goto err;
- }
-
- /* Map Registers */
- retval = pci_request_regions(pdev, driver_name);
- if (retval) {
- dev_err(&pdev->dev, "Unable to reserve resources\n");
- goto err_resource;
- }
-
- /* Map registers in BAR 0 */
- vmic_base = ioremap(pci_resource_start(pdev, 0), 16);
- if (!vmic_base) {
- dev_err(&pdev->dev, "Unable to remap CRG region\n");
- retval = -EIO;
- goto err_remap;
- }
-
- /* Clear the FPGA VME IF contents */
- iowrite32(0, vmic_base + VME_CONTROL);
-
- /* Clear any initial BERR */
- data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
- data |= BM_VME_CONTROL_BERRST;
- iowrite32(data, vmic_base + VME_CONTROL);
-
- /* Enable the vme interface and byte swapping */
- data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
- data = data | BM_VME_CONTROL_MASTER_ENDIAN |
- BM_VME_CONTROL_SLAVE_ENDIAN |
- BM_VME_CONTROL_ABLE |
- BM_VME_CONTROL_BERRI |
- BM_VME_CONTROL_BPENA |
- BM_VME_CONTROL_VBENA;
- iowrite32(data, vmic_base + VME_CONTROL);
-
- return 0;
-
-err_remap:
- pci_release_regions(pdev);
-err_resource:
- pci_disable_device(pdev);
-err:
- return retval;
-}
-
-static void vmic_remove(struct pci_dev *pdev)
-{
- iounmap(vmic_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
-}
-
-module_pci_driver(vmic_driver);
-
-MODULE_DESCRIPTION("VMIVME-7805 board support driver");
-MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/vme/boards/vme_vmivme7805.h b/drivers/vme/boards/vme_vmivme7805.h
deleted file mode 100644
index c2c5e3053d3f..000000000000
--- a/drivers/vme/boards/vme_vmivme7805.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * vmivme_7805.h
- *
- * Support for the VMIVME-7805 board access to the Universe II bridge.
- *
- * Author: Arthur Benilov <arthur.benilov@iba-group.com>
- * Copyright 2010 Ion Beam Application, Inc.
- */
-
-
-#ifndef _VMIVME_7805_H
-#define _VMIVME_7805_H
-
-#ifndef PCI_VENDOR_ID_VMIC
-#define PCI_VENDOR_ID_VMIC 0x114A
-#endif
-
-#ifndef PCI_DEVICE_ID_VTIMR
-#define PCI_DEVICE_ID_VTIMR 0x0004
-#endif
-
-#define VME_CONTROL 0x0000
-#define BM_VME_CONTROL_MASTER_ENDIAN 0x0001
-#define BM_VME_CONTROL_SLAVE_ENDIAN 0x0002
-#define BM_VME_CONTROL_ABLE 0x0004
-#define BM_VME_CONTROL_BERRI 0x0040
-#define BM_VME_CONTROL_BERRST 0x0080
-#define BM_VME_CONTROL_BPENA 0x0400
-#define BM_VME_CONTROL_VBENA 0x0800
-
-#endif /* _VMIVME_7805_H */
-
diff --git a/drivers/vme/bridges/Kconfig b/drivers/vme/bridges/Kconfig
deleted file mode 100644
index cb3baed64914..000000000000
--- a/drivers/vme/bridges/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-comment "VME Bridge Drivers"
-
-config VME_CA91CX42
- tristate "Universe II"
- depends on VIRT_TO_BUS
- help
- If you say Y here you get support for the Tundra CA91C142
- (Universe II) VME bridge chip.
-
-config VME_TSI148
- tristate "Tempe"
- depends on HAS_DMA
- help
- If you say Y here you get support for the Tundra TSI148 VME bridge
- chip.
-
-config VME_FAKE
- tristate "Fake"
- help
- If you say Y here you get support for the fake VME bridge. This
- provides a virtualised VME Bus for devices with no VME bridge. This
- is mainly useful for VME development (in the absence of VME
- hardware).
diff --git a/drivers/vme/bridges/Makefile b/drivers/vme/bridges/Makefile
deleted file mode 100644
index 0a6cf843438a..000000000000
--- a/drivers/vme/bridges/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VME_CA91CX42) += vme_ca91cx42.o
-obj-$(CONFIG_VME_TSI148) += vme_tsi148.o
-obj-$(CONFIG_VME_FAKE) += vme_fake.o
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
deleted file mode 100644
index 439b0edeca08..000000000000
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ /dev/null
@@ -1,1928 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Support for the Tundra Universe I/II VME-PCI Bridge Chips
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * Based on work by Tom Armistead and Ajit Prem
- * Copyright 2004 Motorola Inc.
- *
- * Derived from ca91c042.c by Michael Wyrick
- */
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/poll.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/vme.h>
-
-#include "../vme_bridge.h"
-#include "vme_ca91cx42.h"
-
-static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
-static void ca91cx42_remove(struct pci_dev *);
-
-/* Module parameters */
-static int geoid;
-
-static const char driver_name[] = "vme_ca91cx42";
-
-static const struct pci_device_id ca91cx42_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
- { },
-};
-
-MODULE_DEVICE_TABLE(pci, ca91cx42_ids);
-
-static struct pci_driver ca91cx42_driver = {
- .name = driver_name,
- .id_table = ca91cx42_ids,
- .probe = ca91cx42_probe,
- .remove = ca91cx42_remove,
-};
-
-static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
-{
- wake_up(&bridge->dma_queue);
-
- return CA91CX42_LINT_DMA;
-}
-
-static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
-{
- int i;
- u32 serviced = 0;
-
- for (i = 0; i < 4; i++) {
- if (stat & CA91CX42_LINT_LM[i]) {
- /* We only enable interrupts if the callback is set */
- bridge->lm_callback[i](bridge->lm_data[i]);
- serviced |= CA91CX42_LINT_LM[i];
- }
- }
-
- return serviced;
-}
-
-/* XXX This needs to be split into 4 queues */
-static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
-{
- wake_up(&bridge->mbox_queue);
-
- return CA91CX42_LINT_MBOX;
-}
-
-static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
-{
- wake_up(&bridge->iack_queue);
-
- return CA91CX42_LINT_SW_IACK;
-}
-
-static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
-{
- int val;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- val = ioread32(bridge->base + DGCS);
-
- if (!(val & 0x00000800)) {
- dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
- "Read Error DGCS=%08X\n", val);
- }
-
- return CA91CX42_LINT_VERR;
-}
-
-static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
-{
- int val;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- val = ioread32(bridge->base + DGCS);
-
- if (!(val & 0x00000800))
- dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
- "Read Error DGCS=%08X\n", val);
-
- return CA91CX42_LINT_LERR;
-}
-
-
-static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
- int stat)
-{
- int vec, i, serviced = 0;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
-
- for (i = 7; i > 0; i--) {
- if (stat & (1 << i)) {
- vec = ioread32(bridge->base +
- CA91CX42_V_STATID[i]) & 0xff;
-
- vme_irq_handler(ca91cx42_bridge, i, vec);
-
- serviced |= (1 << i);
- }
- }
-
- return serviced;
-}
-
-static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
-{
- u32 stat, enable, serviced = 0;
- struct vme_bridge *ca91cx42_bridge;
- struct ca91cx42_driver *bridge;
-
- ca91cx42_bridge = ptr;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- enable = ioread32(bridge->base + LINT_EN);
- stat = ioread32(bridge->base + LINT_STAT);
-
- /* Only look at unmasked interrupts */
- stat &= enable;
-
- if (unlikely(!stat))
- return IRQ_NONE;
-
- if (stat & CA91CX42_LINT_DMA)
- serviced |= ca91cx42_DMA_irqhandler(bridge);
- if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
- CA91CX42_LINT_LM3))
- serviced |= ca91cx42_LM_irqhandler(bridge, stat);
- if (stat & CA91CX42_LINT_MBOX)
- serviced |= ca91cx42_MB_irqhandler(bridge, stat);
- if (stat & CA91CX42_LINT_SW_IACK)
- serviced |= ca91cx42_IACK_irqhandler(bridge);
- if (stat & CA91CX42_LINT_VERR)
- serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
- if (stat & CA91CX42_LINT_LERR)
- serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
- if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
- CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
- CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
- CA91CX42_LINT_VIRQ7))
- serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
-
- /* Clear serviced interrupts */
- iowrite32(serviced, bridge->base + LINT_STAT);
-
- return IRQ_HANDLED;
-}
-
-static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
-{
- int result, tmp;
- struct pci_dev *pdev;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- /* Need pdev */
- pdev = to_pci_dev(ca91cx42_bridge->parent);
-
- /* Disable interrupts from PCI to VME */
- iowrite32(0, bridge->base + VINT_EN);
-
- /* Disable PCI interrupts */
- iowrite32(0, bridge->base + LINT_EN);
- /* Clear Any Pending PCI Interrupts */
- iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
-
- result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
- driver_name, ca91cx42_bridge);
- if (result) {
- dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
- pdev->irq);
- return result;
- }
-
- /* Ensure all interrupts are mapped to PCI Interrupt 0 */
- iowrite32(0, bridge->base + LINT_MAP0);
- iowrite32(0, bridge->base + LINT_MAP1);
- iowrite32(0, bridge->base + LINT_MAP2);
-
- /* Enable DMA, mailbox & LM Interrupts */
- tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
- CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
- CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
-
- iowrite32(tmp, bridge->base + LINT_EN);
-
- return 0;
-}
-
-static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
- struct pci_dev *pdev)
-{
- struct vme_bridge *ca91cx42_bridge;
-
- /* Disable interrupts from PCI to VME */
- iowrite32(0, bridge->base + VINT_EN);
-
- /* Disable PCI interrupts */
- iowrite32(0, bridge->base + LINT_EN);
- /* Clear Any Pending PCI Interrupts */
- iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
-
- ca91cx42_bridge = container_of((void *)bridge, struct vme_bridge,
- driver_priv);
- free_irq(pdev->irq, ca91cx42_bridge);
-}
-
-static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
-{
- u32 tmp;
-
- tmp = ioread32(bridge->base + LINT_STAT);
-
- if (tmp & (1 << level))
- return 0;
- else
- return 1;
-}
-
-/*
- * Set up an VME interrupt
- */
-static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
- int state, int sync)
-
-{
- struct pci_dev *pdev;
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- /* Enable IRQ level */
- tmp = ioread32(bridge->base + LINT_EN);
-
- if (state == 0)
- tmp &= ~CA91CX42_LINT_VIRQ[level];
- else
- tmp |= CA91CX42_LINT_VIRQ[level];
-
- iowrite32(tmp, bridge->base + LINT_EN);
-
- if ((state == 0) && (sync != 0)) {
- pdev = to_pci_dev(ca91cx42_bridge->parent);
-
- synchronize_irq(pdev->irq);
- }
-}
-
-static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
- int statid)
-{
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- /* Universe can only generate even vectors */
- if (statid & 1)
- return -EINVAL;
-
- mutex_lock(&bridge->vme_int);
-
- tmp = ioread32(bridge->base + VINT_EN);
-
- /* Set Status/ID */
- iowrite32(statid << 24, bridge->base + STATID);
-
- /* Assert VMEbus IRQ */
- tmp = tmp | (1 << (level + 24));
- iowrite32(tmp, bridge->base + VINT_EN);
-
- /* Wait for IACK */
- wait_event_interruptible(bridge->iack_queue,
- ca91cx42_iack_received(bridge, level));
-
- /* Return interrupt to low state */
- tmp = ioread32(bridge->base + VINT_EN);
- tmp = tmp & ~(1 << (level + 24));
- iowrite32(tmp, bridge->base + VINT_EN);
-
- mutex_unlock(&bridge->vme_int);
-
- return 0;
-}
-
-static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, u32 aspace, u32 cycle)
-{
- unsigned int i, addr = 0, granularity;
- unsigned int temp_ctl = 0;
- unsigned int vme_bound, pci_offset;
- struct vme_bridge *ca91cx42_bridge;
- struct ca91cx42_driver *bridge;
-
- ca91cx42_bridge = image->parent;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- i = image->number;
-
- switch (aspace) {
- case VME_A16:
- addr |= CA91CX42_VSI_CTL_VAS_A16;
- break;
- case VME_A24:
- addr |= CA91CX42_VSI_CTL_VAS_A24;
- break;
- case VME_A32:
- addr |= CA91CX42_VSI_CTL_VAS_A32;
- break;
- case VME_USER1:
- addr |= CA91CX42_VSI_CTL_VAS_USER1;
- break;
- case VME_USER2:
- addr |= CA91CX42_VSI_CTL_VAS_USER2;
- break;
- case VME_A64:
- case VME_CRCSR:
- case VME_USER3:
- case VME_USER4:
- default:
- dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- /*
- * Bound address is a valid address for the window, adjust
- * accordingly
- */
- vme_bound = vme_base + size;
- pci_offset = pci_base - vme_base;
-
- if ((i == 0) || (i == 4))
- granularity = 0x1000;
- else
- granularity = 0x10000;
-
- if (vme_base & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid VME base "
- "alignment\n");
- return -EINVAL;
- }
- if (vme_bound & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
- "alignment\n");
- return -EINVAL;
- }
- if (pci_offset & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
- "alignment\n");
- return -EINVAL;
- }
-
- /* Disable while we are mucking around */
- temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
- temp_ctl &= ~CA91CX42_VSI_CTL_EN;
- iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
-
- /* Setup mapping */
- iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
- iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
- iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
-
- /* Setup address space */
- temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
- temp_ctl |= addr;
-
- /* Setup cycle types */
- temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
- if (cycle & VME_SUPER)
- temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
- if (cycle & VME_USER)
- temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
- if (cycle & VME_PROG)
- temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
- if (cycle & VME_DATA)
- temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
-
- /* Write ctl reg without enable */
- iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
-
- if (enabled)
- temp_ctl |= CA91CX42_VSI_CTL_EN;
-
- iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
-
- return 0;
-}
-
-static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
-{
- unsigned int i, granularity = 0, ctl = 0;
- unsigned long long vme_bound, pci_offset;
- struct ca91cx42_driver *bridge;
-
- bridge = image->parent->driver_priv;
-
- i = image->number;
-
- if ((i == 0) || (i == 4))
- granularity = 0x1000;
- else
- granularity = 0x10000;
-
- /* Read Registers */
- ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
-
- *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
- vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
- pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
-
- *pci_base = (dma_addr_t)*vme_base + pci_offset;
- *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
-
- *enabled = 0;
- *aspace = 0;
- *cycle = 0;
-
- if (ctl & CA91CX42_VSI_CTL_EN)
- *enabled = 1;
-
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
- *aspace = VME_A16;
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
- *aspace = VME_A24;
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
- *aspace = VME_A32;
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
- *aspace = VME_USER1;
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
- *aspace = VME_USER2;
-
- if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
- *cycle |= VME_SUPER;
- if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
- *cycle |= VME_USER;
- if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
- *cycle |= VME_PROG;
- if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
- *cycle |= VME_DATA;
-
- return 0;
-}
-
-/*
- * Allocate and map PCI Resource
- */
-static int ca91cx42_alloc_resource(struct vme_master_resource *image,
- unsigned long long size)
-{
- unsigned long long existing_size;
- int retval = 0;
- struct pci_dev *pdev;
- struct vme_bridge *ca91cx42_bridge;
-
- ca91cx42_bridge = image->parent;
-
- /* Find pci_dev container of dev */
- if (!ca91cx42_bridge->parent) {
- dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
- return -EINVAL;
- }
- pdev = to_pci_dev(ca91cx42_bridge->parent);
-
- existing_size = (unsigned long long)(image->bus_resource.end -
- image->bus_resource.start);
-
- /* If the existing size is OK, return */
- if (existing_size == (size - 1))
- return 0;
-
- if (existing_size != 0) {
- iounmap(image->kern_base);
- image->kern_base = NULL;
- kfree(image->bus_resource.name);
- release_resource(&image->bus_resource);
- memset(&image->bus_resource, 0, sizeof(image->bus_resource));
- }
-
- if (!image->bus_resource.name) {
- image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
- if (!image->bus_resource.name) {
- retval = -ENOMEM;
- goto err_name;
- }
- }
-
- sprintf((char *)image->bus_resource.name, "%s.%d",
- ca91cx42_bridge->name, image->number);
-
- image->bus_resource.start = 0;
- image->bus_resource.end = (unsigned long)size;
- image->bus_resource.flags = IORESOURCE_MEM;
-
- retval = pci_bus_alloc_resource(pdev->bus,
- &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
- 0, NULL, NULL);
- if (retval) {
- dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
- "resource for window %d size 0x%lx start 0x%lx\n",
- image->number, (unsigned long)size,
- (unsigned long)image->bus_resource.start);
- goto err_resource;
- }
-
- image->kern_base = ioremap(
- image->bus_resource.start, size);
- if (!image->kern_base) {
- dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
- retval = -ENOMEM;
- goto err_remap;
- }
-
- return 0;
-
-err_remap:
- release_resource(&image->bus_resource);
-err_resource:
- kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(image->bus_resource));
-err_name:
- return retval;
-}
-
-/*
- * Free and unmap PCI Resource
- */
-static void ca91cx42_free_resource(struct vme_master_resource *image)
-{
- iounmap(image->kern_base);
- image->kern_base = NULL;
- release_resource(&image->bus_resource);
- kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(image->bus_resource));
-}
-
-
-static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size, u32 aspace,
- u32 cycle, u32 dwidth)
-{
- int retval = 0;
- unsigned int i, granularity = 0;
- unsigned int temp_ctl = 0;
- unsigned long long pci_bound, vme_offset, pci_base;
- struct vme_bridge *ca91cx42_bridge;
- struct ca91cx42_driver *bridge;
-
- ca91cx42_bridge = image->parent;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- i = image->number;
-
- if ((i == 0) || (i == 4))
- granularity = 0x1000;
- else
- granularity = 0x10000;
-
- /* Verify input data */
- if (vme_base & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
- "alignment\n");
- retval = -EINVAL;
- goto err_window;
- }
- if (size & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
- "alignment\n");
- retval = -EINVAL;
- goto err_window;
- }
-
- spin_lock(&image->lock);
-
- /*
- * Let's allocate the resource here rather than further up the stack as
- * it avoids pushing loads of bus dependent stuff up the stack
- */
- retval = ca91cx42_alloc_resource(image, size);
- if (retval) {
- spin_unlock(&image->lock);
- dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
- "for resource name\n");
- retval = -ENOMEM;
- goto err_res;
- }
-
- pci_base = (unsigned long long)image->bus_resource.start;
-
- /*
- * Bound address is a valid address for the window, adjust
- * according to window granularity.
- */
- pci_bound = pci_base + size;
- vme_offset = vme_base - pci_base;
-
- /* Disable while we are mucking around */
- temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
- temp_ctl &= ~CA91CX42_LSI_CTL_EN;
- iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
-
- /* Setup cycle types */
- temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
- if (cycle & VME_BLT)
- temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
- if (cycle & VME_MBLT)
- temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
-
- /* Setup data width */
- temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
- switch (dwidth) {
- case VME_D8:
- temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
- break;
- case VME_D16:
- temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
- break;
- case VME_D32:
- temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
- break;
- case VME_D64:
- temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
- break;
- default:
- spin_unlock(&image->lock);
- dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
- retval = -EINVAL;
- goto err_dwidth;
- break;
- }
-
- /* Setup address space */
- temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
- switch (aspace) {
- case VME_A16:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
- break;
- case VME_A24:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
- break;
- case VME_A32:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
- break;
- case VME_CRCSR:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
- break;
- case VME_USER1:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
- break;
- case VME_USER2:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
- break;
- case VME_A64:
- case VME_USER3:
- case VME_USER4:
- default:
- spin_unlock(&image->lock);
- dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
- retval = -EINVAL;
- goto err_aspace;
- break;
- }
-
- temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
- if (cycle & VME_SUPER)
- temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
- if (cycle & VME_PROG)
- temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
-
- /* Setup mapping */
- iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
- iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
- iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
-
- /* Write ctl reg without enable */
- iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
-
- if (enabled)
- temp_ctl |= CA91CX42_LSI_CTL_EN;
-
- iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
-
- spin_unlock(&image->lock);
- return 0;
-
-err_aspace:
-err_dwidth:
- ca91cx42_free_resource(image);
-err_res:
-err_window:
- return retval;
-}
-
-static int __ca91cx42_master_get(struct vme_master_resource *image,
- int *enabled, unsigned long long *vme_base, unsigned long long *size,
- u32 *aspace, u32 *cycle, u32 *dwidth)
-{
- unsigned int i, ctl;
- unsigned long long pci_base, pci_bound, vme_offset;
- struct ca91cx42_driver *bridge;
-
- bridge = image->parent->driver_priv;
-
- i = image->number;
-
- ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
-
- pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
- vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
- pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
-
- *vme_base = pci_base + vme_offset;
- *size = (unsigned long long)(pci_bound - pci_base);
-
- *enabled = 0;
- *aspace = 0;
- *cycle = 0;
- *dwidth = 0;
-
- if (ctl & CA91CX42_LSI_CTL_EN)
- *enabled = 1;
-
- /* Setup address space */
- switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
- case CA91CX42_LSI_CTL_VAS_A16:
- *aspace = VME_A16;
- break;
- case CA91CX42_LSI_CTL_VAS_A24:
- *aspace = VME_A24;
- break;
- case CA91CX42_LSI_CTL_VAS_A32:
- *aspace = VME_A32;
- break;
- case CA91CX42_LSI_CTL_VAS_CRCSR:
- *aspace = VME_CRCSR;
- break;
- case CA91CX42_LSI_CTL_VAS_USER1:
- *aspace = VME_USER1;
- break;
- case CA91CX42_LSI_CTL_VAS_USER2:
- *aspace = VME_USER2;
- break;
- }
-
- /* XXX Not sure howto check for MBLT */
- /* Setup cycle types */
- if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
- *cycle |= VME_BLT;
- else
- *cycle |= VME_SCT;
-
- if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
- *cycle |= VME_SUPER;
- else
- *cycle |= VME_USER;
-
- if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
- *cycle = VME_PROG;
- else
- *cycle = VME_DATA;
-
- /* Setup data width */
- switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
- case CA91CX42_LSI_CTL_VDW_D8:
- *dwidth = VME_D8;
- break;
- case CA91CX42_LSI_CTL_VDW_D16:
- *dwidth = VME_D16;
- break;
- case CA91CX42_LSI_CTL_VDW_D32:
- *dwidth = VME_D32;
- break;
- case CA91CX42_LSI_CTL_VDW_D64:
- *dwidth = VME_D64;
- break;
- }
-
- return 0;
-}
-
-static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
-{
- int retval;
-
- spin_lock(&image->lock);
-
- retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
- cycle, dwidth);
-
- spin_unlock(&image->lock);
-
- return retval;
-}
-
-static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
- void *buf, size_t count, loff_t offset)
-{
- ssize_t retval;
- void __iomem *addr = image->kern_base + offset;
- unsigned int done = 0;
- unsigned int count32;
-
- if (count == 0)
- return 0;
-
- spin_lock(&image->lock);
-
- /* The following code handles VME address alignment. We cannot use
- * memcpy_xxx here because it may cut data transfers in to 8-bit
- * cycles when D16 or D32 cycles are required on the VME bus.
- * On the other hand, the bridge itself assures that the maximum data
- * cycle configured for the transfer is used and splits it
- * automatically for non-aligned addresses, so we don't want the
- * overhead of needlessly forcing small transfers for the entire cycle.
- */
- if ((uintptr_t)addr & 0x1) {
- *(u8 *)buf = ioread8(addr);
- done += 1;
- if (done == count)
- goto out;
- }
- if ((uintptr_t)(addr + done) & 0x2) {
- if ((count - done) < 2) {
- *(u8 *)(buf + done) = ioread8(addr + done);
- done += 1;
- goto out;
- } else {
- *(u16 *)(buf + done) = ioread16(addr + done);
- done += 2;
- }
- }
-
- count32 = (count - done) & ~0x3;
- while (done < count32) {
- *(u32 *)(buf + done) = ioread32(addr + done);
- done += 4;
- }
-
- if ((count - done) & 0x2) {
- *(u16 *)(buf + done) = ioread16(addr + done);
- done += 2;
- }
- if ((count - done) & 0x1) {
- *(u8 *)(buf + done) = ioread8(addr + done);
- done += 1;
- }
-out:
- retval = count;
- spin_unlock(&image->lock);
-
- return retval;
-}
-
-static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
- void *buf, size_t count, loff_t offset)
-{
- ssize_t retval;
- void __iomem *addr = image->kern_base + offset;
- unsigned int done = 0;
- unsigned int count32;
-
- if (count == 0)
- return 0;
-
- spin_lock(&image->lock);
-
- /* Here we apply for the same strategy we do in master_read
- * function in order to assure the correct cycles.
- */
- if ((uintptr_t)addr & 0x1) {
- iowrite8(*(u8 *)buf, addr);
- done += 1;
- if (done == count)
- goto out;
- }
- if ((uintptr_t)(addr + done) & 0x2) {
- if ((count - done) < 2) {
- iowrite8(*(u8 *)(buf + done), addr + done);
- done += 1;
- goto out;
- } else {
- iowrite16(*(u16 *)(buf + done), addr + done);
- done += 2;
- }
- }
-
- count32 = (count - done) & ~0x3;
- while (done < count32) {
- iowrite32(*(u32 *)(buf + done), addr + done);
- done += 4;
- }
-
- if ((count - done) & 0x2) {
- iowrite16(*(u16 *)(buf + done), addr + done);
- done += 2;
- }
- if ((count - done) & 0x1) {
- iowrite8(*(u8 *)(buf + done), addr + done);
- done += 1;
- }
-out:
- retval = count;
-
- spin_unlock(&image->lock);
-
- return retval;
-}
-
-static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
- unsigned int mask, unsigned int compare, unsigned int swap,
- loff_t offset)
-{
- u32 result;
- uintptr_t pci_addr;
- struct ca91cx42_driver *bridge;
- struct device *dev;
-
- bridge = image->parent->driver_priv;
- dev = image->parent->parent;
-
- /* Find the PCI address that maps to the desired VME address */
-
- /* Locking as we can only do one of these at a time */
- mutex_lock(&bridge->vme_rmw);
-
- /* Lock image */
- spin_lock(&image->lock);
-
- pci_addr = (uintptr_t)image->kern_base + offset;
-
- /* Address must be 4-byte aligned */
- if (pci_addr & 0x3) {
- dev_err(dev, "RMW Address not 4-byte aligned\n");
- result = -EINVAL;
- goto out;
- }
-
- /* Ensure RMW Disabled whilst configuring */
- iowrite32(0, bridge->base + SCYC_CTL);
-
- /* Configure registers */
- iowrite32(mask, bridge->base + SCYC_EN);
- iowrite32(compare, bridge->base + SCYC_CMP);
- iowrite32(swap, bridge->base + SCYC_SWP);
- iowrite32(pci_addr, bridge->base + SCYC_ADDR);
-
- /* Enable RMW */
- iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
-
- /* Kick process off with a read to the required address. */
- result = ioread32(image->kern_base + offset);
-
- /* Disable RMW */
- iowrite32(0, bridge->base + SCYC_CTL);
-
-out:
- spin_unlock(&image->lock);
-
- mutex_unlock(&bridge->vme_rmw);
-
- return result;
-}
-
-static int ca91cx42_dma_list_add(struct vme_dma_list *list,
- struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
-{
- struct ca91cx42_dma_entry *entry, *prev;
- struct vme_dma_pci *pci_attr;
- struct vme_dma_vme *vme_attr;
- dma_addr_t desc_ptr;
- int retval = 0;
- struct device *dev;
-
- dev = list->parent->parent->parent;
-
- /* XXX descriptor must be aligned on 64-bit boundaries */
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- retval = -ENOMEM;
- goto err_mem;
- }
-
- /* Test descriptor alignment */
- if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
- dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
- "required: %p\n", &entry->descriptor);
- retval = -EINVAL;
- goto err_align;
- }
-
- memset(&entry->descriptor, 0, sizeof(entry->descriptor));
-
- if (dest->type == VME_DMA_VME) {
- entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
- vme_attr = dest->private;
- pci_attr = src->private;
- } else {
- vme_attr = src->private;
- pci_attr = dest->private;
- }
-
- /* Check we can do fulfill required attributes */
- if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
- VME_USER2)) != 0) {
-
- dev_err(dev, "Unsupported cycle type\n");
- retval = -EINVAL;
- goto err_aspace;
- }
-
- if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
- VME_PROG | VME_DATA)) != 0) {
-
- dev_err(dev, "Unsupported cycle type\n");
- retval = -EINVAL;
- goto err_cycle;
- }
-
- /* Check to see if we can fulfill source and destination */
- if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
- ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
-
- dev_err(dev, "Cannot perform transfer with this "
- "source-destination combination\n");
- retval = -EINVAL;
- goto err_direct;
- }
-
- /* Setup cycle types */
- if (vme_attr->cycle & VME_BLT)
- entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
-
- /* Setup data width */
- switch (vme_attr->dwidth) {
- case VME_D8:
- entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
- break;
- case VME_D16:
- entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
- break;
- case VME_D32:
- entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
- break;
- case VME_D64:
- entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
- break;
- default:
- dev_err(dev, "Invalid data width\n");
- return -EINVAL;
- }
-
- /* Setup address space */
- switch (vme_attr->aspace) {
- case VME_A16:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
- break;
- case VME_A24:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
- break;
- case VME_A32:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
- break;
- case VME_USER1:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
- break;
- case VME_USER2:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
- break;
- default:
- dev_err(dev, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- if (vme_attr->cycle & VME_SUPER)
- entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
- if (vme_attr->cycle & VME_PROG)
- entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
-
- entry->descriptor.dtbc = count;
- entry->descriptor.dla = pci_attr->address;
- entry->descriptor.dva = vme_attr->address;
- entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
-
- /* Add to list */
- list_add_tail(&entry->list, &list->entries);
-
- /* Fill out previous descriptors "Next Address" */
- if (entry->list.prev != &list->entries) {
- prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
- list);
- /* We need the bus address for the pointer */
- desc_ptr = virt_to_bus(&entry->descriptor);
- prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
- }
-
- return 0;
-
-err_cycle:
-err_aspace:
-err_direct:
-err_align:
- kfree(entry);
-err_mem:
- return retval;
-}
-
-static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
-{
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- tmp = ioread32(bridge->base + DGCS);
-
- if (tmp & CA91CX42_DGCS_ACT)
- return 0;
- else
- return 1;
-}
-
-static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
-{
- struct vme_dma_resource *ctrlr;
- struct ca91cx42_dma_entry *entry;
- int retval;
- dma_addr_t bus_addr;
- u32 val;
- struct device *dev;
- struct ca91cx42_driver *bridge;
-
- ctrlr = list->parent;
-
- bridge = ctrlr->parent->driver_priv;
- dev = ctrlr->parent->parent;
-
- mutex_lock(&ctrlr->mtx);
-
- if (!(list_empty(&ctrlr->running))) {
- /*
- * XXX We have an active DMA transfer and currently haven't
- * sorted out the mechanism for "pending" DMA transfers.
- * Return busy.
- */
- /* Need to add to pending here */
- mutex_unlock(&ctrlr->mtx);
- return -EBUSY;
- } else {
- list_add(&list->list, &ctrlr->running);
- }
-
- /* Get first bus address and write into registers */
- entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
- list);
-
- bus_addr = virt_to_bus(&entry->descriptor);
-
- mutex_unlock(&ctrlr->mtx);
-
- iowrite32(0, bridge->base + DTBC);
- iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
-
- /* Start the operation */
- val = ioread32(bridge->base + DGCS);
-
- /* XXX Could set VMEbus On and Off Counters here */
- val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
-
- val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
- CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
- CA91CX42_DGCS_PERR);
-
- iowrite32(val, bridge->base + DGCS);
-
- val |= CA91CX42_DGCS_GO;
-
- iowrite32(val, bridge->base + DGCS);
-
- retval = wait_event_interruptible(bridge->dma_queue,
- ca91cx42_dma_busy(ctrlr->parent));
-
- if (retval) {
- val = ioread32(bridge->base + DGCS);
- iowrite32(val | CA91CX42_DGCS_STOP_REQ, bridge->base + DGCS);
- /* Wait for the operation to abort */
- wait_event(bridge->dma_queue,
- ca91cx42_dma_busy(ctrlr->parent));
- retval = -EINTR;
- goto exit;
- }
-
- /*
- * Read status register, this register is valid until we kick off a
- * new transfer.
- */
- val = ioread32(bridge->base + DGCS);
-
- if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
- CA91CX42_DGCS_PERR)) {
-
- dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
- val = ioread32(bridge->base + DCTL);
- retval = -EIO;
- }
-
-exit:
- /* Remove list from running list */
- mutex_lock(&ctrlr->mtx);
- list_del(&list->list);
- mutex_unlock(&ctrlr->mtx);
-
- return retval;
-
-}
-
-static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
-{
- struct list_head *pos, *temp;
- struct ca91cx42_dma_entry *entry;
-
- /* detach and free each entry */
- list_for_each_safe(pos, temp, &list->entries) {
- list_del(pos);
- entry = list_entry(pos, struct ca91cx42_dma_entry, list);
- kfree(entry);
- }
-
- return 0;
-}
-
-/*
- * All 4 location monitors reside at the same base - this is therefore a
- * system wide configuration.
- *
- * This does not enable the LM monitor - that should be done when the first
- * callback is attached and disabled when the last callback is removed.
- */
-static int ca91cx42_lm_set(struct vme_lm_resource *lm,
- unsigned long long lm_base, u32 aspace, u32 cycle)
-{
- u32 temp_base, lm_ctl = 0;
- int i;
- struct ca91cx42_driver *bridge;
- struct device *dev;
-
- bridge = lm->parent->driver_priv;
- dev = lm->parent->parent;
-
- /* Check the alignment of the location monitor */
- temp_base = (u32)lm_base;
- if (temp_base & 0xffff) {
- dev_err(dev, "Location monitor must be aligned to 64KB "
- "boundary");
- return -EINVAL;
- }
-
- mutex_lock(&lm->mtx);
-
- /* If we already have a callback attached, we can't move it! */
- for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i]) {
- mutex_unlock(&lm->mtx);
- dev_err(dev, "Location monitor callback attached, "
- "can't reset\n");
- return -EBUSY;
- }
- }
-
- switch (aspace) {
- case VME_A16:
- lm_ctl |= CA91CX42_LM_CTL_AS_A16;
- break;
- case VME_A24:
- lm_ctl |= CA91CX42_LM_CTL_AS_A24;
- break;
- case VME_A32:
- lm_ctl |= CA91CX42_LM_CTL_AS_A32;
- break;
- default:
- mutex_unlock(&lm->mtx);
- dev_err(dev, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- if (cycle & VME_SUPER)
- lm_ctl |= CA91CX42_LM_CTL_SUPR;
- if (cycle & VME_USER)
- lm_ctl |= CA91CX42_LM_CTL_NPRIV;
- if (cycle & VME_PROG)
- lm_ctl |= CA91CX42_LM_CTL_PGM;
- if (cycle & VME_DATA)
- lm_ctl |= CA91CX42_LM_CTL_DATA;
-
- iowrite32(lm_base, bridge->base + LM_BS);
- iowrite32(lm_ctl, bridge->base + LM_CTL);
-
- mutex_unlock(&lm->mtx);
-
- return 0;
-}
-
-/* Get configuration of the callback monitor and return whether it is enabled
- * or disabled.
- */
-static int ca91cx42_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, u32 *aspace, u32 *cycle)
-{
- u32 lm_ctl, enabled = 0;
- struct ca91cx42_driver *bridge;
-
- bridge = lm->parent->driver_priv;
-
- mutex_lock(&lm->mtx);
-
- *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
- lm_ctl = ioread32(bridge->base + LM_CTL);
-
- if (lm_ctl & CA91CX42_LM_CTL_EN)
- enabled = 1;
-
- if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
- *aspace = VME_A16;
- if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
- *aspace = VME_A24;
- if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
- *aspace = VME_A32;
-
- *cycle = 0;
- if (lm_ctl & CA91CX42_LM_CTL_SUPR)
- *cycle |= VME_SUPER;
- if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
- *cycle |= VME_USER;
- if (lm_ctl & CA91CX42_LM_CTL_PGM)
- *cycle |= VME_PROG;
- if (lm_ctl & CA91CX42_LM_CTL_DATA)
- *cycle |= VME_DATA;
-
- mutex_unlock(&lm->mtx);
-
- return enabled;
-}
-
-/*
- * Attach a callback to a specific location monitor.
- *
- * Callback will be passed the monitor triggered.
- */
-static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
- void (*callback)(void *), void *data)
-{
- u32 lm_ctl, tmp;
- struct ca91cx42_driver *bridge;
- struct device *dev;
-
- bridge = lm->parent->driver_priv;
- dev = lm->parent->parent;
-
- mutex_lock(&lm->mtx);
-
- /* Ensure that the location monitor is configured - need PGM or DATA */
- lm_ctl = ioread32(bridge->base + LM_CTL);
- if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
- mutex_unlock(&lm->mtx);
- dev_err(dev, "Location monitor not properly configured\n");
- return -EINVAL;
- }
-
- /* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor]) {
- mutex_unlock(&lm->mtx);
- dev_err(dev, "Existing callback attached\n");
- return -EBUSY;
- }
-
- /* Attach callback */
- bridge->lm_callback[monitor] = callback;
- bridge->lm_data[monitor] = data;
-
- /* Enable Location Monitor interrupt */
- tmp = ioread32(bridge->base + LINT_EN);
- tmp |= CA91CX42_LINT_LM[monitor];
- iowrite32(tmp, bridge->base + LINT_EN);
-
- /* Ensure that global Location Monitor Enable set */
- if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
- lm_ctl |= CA91CX42_LM_CTL_EN;
- iowrite32(lm_ctl, bridge->base + LM_CTL);
- }
-
- mutex_unlock(&lm->mtx);
-
- return 0;
-}
-
-/*
- * Detach a callback function forn a specific location monitor.
- */
-static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
-{
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = lm->parent->driver_priv;
-
- mutex_lock(&lm->mtx);
-
- /* Disable Location Monitor and ensure previous interrupts are clear */
- tmp = ioread32(bridge->base + LINT_EN);
- tmp &= ~CA91CX42_LINT_LM[monitor];
- iowrite32(tmp, bridge->base + LINT_EN);
-
- iowrite32(CA91CX42_LINT_LM[monitor],
- bridge->base + LINT_STAT);
-
- /* Detach callback */
- bridge->lm_callback[monitor] = NULL;
- bridge->lm_data[monitor] = NULL;
-
- /* If all location monitors disabled, disable global Location Monitor */
- if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
- CA91CX42_LINT_LM3)) == 0) {
- tmp = ioread32(bridge->base + LM_CTL);
- tmp &= ~CA91CX42_LM_CTL_EN;
- iowrite32(tmp, bridge->base + LM_CTL);
- }
-
- mutex_unlock(&lm->mtx);
-
- return 0;
-}
-
-static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
-{
- u32 slot = 0;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- if (!geoid) {
- slot = ioread32(bridge->base + VCSR_BS);
- slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
- } else
- slot = geoid;
-
- return (int)slot;
-
-}
-
-static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
- dma_addr_t *dma)
-{
- struct pci_dev *pdev;
-
- /* Find pci_dev container of dev */
- pdev = to_pci_dev(parent);
-
- return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
-}
-
-static void ca91cx42_free_consistent(struct device *parent, size_t size,
- void *vaddr, dma_addr_t dma)
-{
- struct pci_dev *pdev;
-
- /* Find pci_dev container of dev */
- pdev = to_pci_dev(parent);
-
- dma_free_coherent(&pdev->dev, size, vaddr, dma);
-}
-
-/*
- * Configure CR/CSR space
- *
- * Access to the CR/CSR can be configured at power-up. The location of the
- * CR/CSR registers in the CR/CSR address space is determined by the boards
- * Auto-ID or Geographic address. This function ensures that the window is
- * enabled at an offset consistent with the boards geopgraphic address.
- */
-static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
- struct pci_dev *pdev)
-{
- unsigned int crcsr_addr;
- int tmp, slot;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- slot = ca91cx42_slot_get(ca91cx42_bridge);
-
- /* Write CSR Base Address if slot ID is supplied as a module param */
- if (geoid)
- iowrite32(geoid << 27, bridge->base + VCSR_BS);
-
- dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
- if (slot == 0) {
- dev_err(&pdev->dev, "Slot number is unset, not configuring "
- "CR/CSR space\n");
- return -EINVAL;
- }
-
- /* Allocate mem for CR/CSR image */
- bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
- VME_CRCSR_BUF_SIZE,
- &bridge->crcsr_bus, GFP_KERNEL);
- if (!bridge->crcsr_kernel) {
- dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
- "image\n");
- return -ENOMEM;
- }
-
- crcsr_addr = slot * (512 * 1024);
- iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
-
- tmp = ioread32(bridge->base + VCSR_CTL);
- tmp |= CA91CX42_VCSR_CTL_EN;
- iowrite32(tmp, bridge->base + VCSR_CTL);
-
- return 0;
-}
-
-static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
- struct pci_dev *pdev)
-{
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- /* Turn off CR/CSR space */
- tmp = ioread32(bridge->base + VCSR_CTL);
- tmp &= ~CA91CX42_VCSR_CTL_EN;
- iowrite32(tmp, bridge->base + VCSR_CTL);
-
- /* Free image */
- iowrite32(0, bridge->base + VCSR_TO);
-
- dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
- bridge->crcsr_kernel, bridge->crcsr_bus);
-}
-
-static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- int retval, i;
- u32 data;
- struct list_head *pos = NULL, *n;
- struct vme_bridge *ca91cx42_bridge;
- struct ca91cx42_driver *ca91cx42_device;
- struct vme_master_resource *master_image;
- struct vme_slave_resource *slave_image;
- struct vme_dma_resource *dma_ctrlr;
- struct vme_lm_resource *lm;
-
- /* We want to support more than one of each bridge so we need to
- * dynamically allocate the bridge structure
- */
- ca91cx42_bridge = kzalloc(sizeof(*ca91cx42_bridge), GFP_KERNEL);
- if (!ca91cx42_bridge) {
- retval = -ENOMEM;
- goto err_struct;
- }
- vme_init_bridge(ca91cx42_bridge);
-
- ca91cx42_device = kzalloc(sizeof(*ca91cx42_device), GFP_KERNEL);
- if (!ca91cx42_device) {
- retval = -ENOMEM;
- goto err_driver;
- }
-
- ca91cx42_bridge->driver_priv = ca91cx42_device;
-
- /* Enable the device */
- retval = pci_enable_device(pdev);
- if (retval) {
- dev_err(&pdev->dev, "Unable to enable device\n");
- goto err_enable;
- }
-
- /* Map Registers */
- retval = pci_request_regions(pdev, driver_name);
- if (retval) {
- dev_err(&pdev->dev, "Unable to reserve resources\n");
- goto err_resource;
- }
-
- /* map registers in BAR 0 */
- ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0),
- 4096);
- if (!ca91cx42_device->base) {
- dev_err(&pdev->dev, "Unable to remap CRG region\n");
- retval = -EIO;
- goto err_remap;
- }
-
- /* Check to see if the mapping worked out */
- data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
- if (data != PCI_VENDOR_ID_TUNDRA) {
- dev_err(&pdev->dev, "PCI_ID check failed\n");
- retval = -EIO;
- goto err_test;
- }
-
- /* Initialize wait queues & mutual exclusion flags */
- init_waitqueue_head(&ca91cx42_device->dma_queue);
- init_waitqueue_head(&ca91cx42_device->iack_queue);
- mutex_init(&ca91cx42_device->vme_int);
- mutex_init(&ca91cx42_device->vme_rmw);
-
- ca91cx42_bridge->parent = &pdev->dev;
- strcpy(ca91cx42_bridge->name, driver_name);
-
- /* Setup IRQ */
- retval = ca91cx42_irq_init(ca91cx42_bridge);
- if (retval != 0) {
- dev_err(&pdev->dev, "Chip Initialization failed.\n");
- goto err_irq;
- }
-
- /* Add master windows to list */
- for (i = 0; i < CA91C142_MAX_MASTER; i++) {
- master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
- if (!master_image) {
- retval = -ENOMEM;
- goto err_master;
- }
- master_image->parent = ca91cx42_bridge;
- spin_lock_init(&master_image->lock);
- master_image->locked = 0;
- master_image->number = i;
- master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
- VME_CRCSR | VME_USER1 | VME_USER2;
- master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
- VME_SUPER | VME_USER | VME_PROG | VME_DATA;
- master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
- memset(&master_image->bus_resource, 0,
- sizeof(master_image->bus_resource));
- master_image->kern_base = NULL;
- list_add_tail(&master_image->list,
- &ca91cx42_bridge->master_resources);
- }
-
- /* Add slave windows to list */
- for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
- if (!slave_image) {
- retval = -ENOMEM;
- goto err_slave;
- }
- slave_image->parent = ca91cx42_bridge;
- mutex_init(&slave_image->mtx);
- slave_image->locked = 0;
- slave_image->number = i;
- slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
- VME_USER2;
-
- /* Only windows 0 and 4 support A16 */
- if (i == 0 || i == 4)
- slave_image->address_attr |= VME_A16;
-
- slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
- VME_SUPER | VME_USER | VME_PROG | VME_DATA;
- list_add_tail(&slave_image->list,
- &ca91cx42_bridge->slave_resources);
- }
-
- /* Add dma engines to list */
- for (i = 0; i < CA91C142_MAX_DMA; i++) {
- dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
- if (!dma_ctrlr) {
- retval = -ENOMEM;
- goto err_dma;
- }
- dma_ctrlr->parent = ca91cx42_bridge;
- mutex_init(&dma_ctrlr->mtx);
- dma_ctrlr->locked = 0;
- dma_ctrlr->number = i;
- dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
- VME_DMA_MEM_TO_VME;
- INIT_LIST_HEAD(&dma_ctrlr->pending);
- INIT_LIST_HEAD(&dma_ctrlr->running);
- list_add_tail(&dma_ctrlr->list,
- &ca91cx42_bridge->dma_resources);
- }
-
- /* Add location monitor to list */
- lm = kmalloc(sizeof(*lm), GFP_KERNEL);
- if (!lm) {
- retval = -ENOMEM;
- goto err_lm;
- }
- lm->parent = ca91cx42_bridge;
- mutex_init(&lm->mtx);
- lm->locked = 0;
- lm->number = 1;
- lm->monitors = 4;
- list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
-
- ca91cx42_bridge->slave_get = ca91cx42_slave_get;
- ca91cx42_bridge->slave_set = ca91cx42_slave_set;
- ca91cx42_bridge->master_get = ca91cx42_master_get;
- ca91cx42_bridge->master_set = ca91cx42_master_set;
- ca91cx42_bridge->master_read = ca91cx42_master_read;
- ca91cx42_bridge->master_write = ca91cx42_master_write;
- ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
- ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
- ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
- ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
- ca91cx42_bridge->irq_set = ca91cx42_irq_set;
- ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
- ca91cx42_bridge->lm_set = ca91cx42_lm_set;
- ca91cx42_bridge->lm_get = ca91cx42_lm_get;
- ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
- ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
- ca91cx42_bridge->slot_get = ca91cx42_slot_get;
- ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
- ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
-
- data = ioread32(ca91cx42_device->base + MISC_CTL);
- dev_info(&pdev->dev, "Board is%s the VME system controller\n",
- (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
- dev_info(&pdev->dev, "Slot ID is %d\n",
- ca91cx42_slot_get(ca91cx42_bridge));
-
- if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
- dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
-
- /* Need to save ca91cx42_bridge pointer locally in link list for use in
- * ca91cx42_remove()
- */
- retval = vme_register_bridge(ca91cx42_bridge);
- if (retval != 0) {
- dev_err(&pdev->dev, "Chip Registration failed.\n");
- goto err_reg;
- }
-
- pci_set_drvdata(pdev, ca91cx42_bridge);
-
- return 0;
-
-err_reg:
- ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
-err_lm:
- /* resources are stored in link list */
- list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
- lm = list_entry(pos, struct vme_lm_resource, list);
- list_del(pos);
- kfree(lm);
- }
-err_dma:
- /* resources are stored in link list */
- list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
- dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
- list_del(pos);
- kfree(dma_ctrlr);
- }
-err_slave:
- /* resources are stored in link list */
- list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
- slave_image = list_entry(pos, struct vme_slave_resource, list);
- list_del(pos);
- kfree(slave_image);
- }
-err_master:
- /* resources are stored in link list */
- list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
- list_del(pos);
- kfree(master_image);
- }
-
- ca91cx42_irq_exit(ca91cx42_device, pdev);
-err_irq:
-err_test:
- iounmap(ca91cx42_device->base);
-err_remap:
- pci_release_regions(pdev);
-err_resource:
- pci_disable_device(pdev);
-err_enable:
- kfree(ca91cx42_device);
-err_driver:
- kfree(ca91cx42_bridge);
-err_struct:
- return retval;
-
-}
-
-static void ca91cx42_remove(struct pci_dev *pdev)
-{
- struct list_head *pos = NULL, *n;
- struct vme_master_resource *master_image;
- struct vme_slave_resource *slave_image;
- struct vme_dma_resource *dma_ctrlr;
- struct vme_lm_resource *lm;
- struct ca91cx42_driver *bridge;
- struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
-
- bridge = ca91cx42_bridge->driver_priv;
-
-
- /* Turn off Ints */
- iowrite32(0, bridge->base + LINT_EN);
-
- /* Turn off the windows */
- iowrite32(0x00800000, bridge->base + LSI0_CTL);
- iowrite32(0x00800000, bridge->base + LSI1_CTL);
- iowrite32(0x00800000, bridge->base + LSI2_CTL);
- iowrite32(0x00800000, bridge->base + LSI3_CTL);
- iowrite32(0x00800000, bridge->base + LSI4_CTL);
- iowrite32(0x00800000, bridge->base + LSI5_CTL);
- iowrite32(0x00800000, bridge->base + LSI6_CTL);
- iowrite32(0x00800000, bridge->base + LSI7_CTL);
- iowrite32(0x00F00000, bridge->base + VSI0_CTL);
- iowrite32(0x00F00000, bridge->base + VSI1_CTL);
- iowrite32(0x00F00000, bridge->base + VSI2_CTL);
- iowrite32(0x00F00000, bridge->base + VSI3_CTL);
- iowrite32(0x00F00000, bridge->base + VSI4_CTL);
- iowrite32(0x00F00000, bridge->base + VSI5_CTL);
- iowrite32(0x00F00000, bridge->base + VSI6_CTL);
- iowrite32(0x00F00000, bridge->base + VSI7_CTL);
-
- vme_unregister_bridge(ca91cx42_bridge);
-
- ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
-
- /* resources are stored in link list */
- list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
- lm = list_entry(pos, struct vme_lm_resource, list);
- list_del(pos);
- kfree(lm);
- }
-
- /* resources are stored in link list */
- list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
- dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
- list_del(pos);
- kfree(dma_ctrlr);
- }
-
- /* resources are stored in link list */
- list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
- slave_image = list_entry(pos, struct vme_slave_resource, list);
- list_del(pos);
- kfree(slave_image);
- }
-
- /* resources are stored in link list */
- list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
- list_del(pos);
- kfree(master_image);
- }
-
- ca91cx42_irq_exit(bridge, pdev);
-
- iounmap(bridge->base);
-
- pci_release_regions(pdev);
-
- pci_disable_device(pdev);
-
- kfree(ca91cx42_bridge);
-}
-
-module_pci_driver(ca91cx42_driver);
-
-MODULE_PARM_DESC(geoid, "Override geographical addressing");
-module_param(geoid, int, 0);
-
-MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
-MODULE_LICENSE("GPL");
diff --git a/drivers/vme/bridges/vme_ca91cx42.h b/drivers/vme/bridges/vme_ca91cx42.h
deleted file mode 100644
index 34a8c25de613..000000000000
--- a/drivers/vme/bridges/vme_ca91cx42.h
+++ /dev/null
@@ -1,579 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * ca91c042.h
- *
- * Support for the Tundra Universe 1 and Universe II VME bridge chips
- *
- * Author: Tom Armistead
- * Updated by Ajit Prem
- * Copyright 2004 Motorola Inc.
- *
- * Further updated by Martyn Welch <martyn.welch@ge.com>
- * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * Derived from ca91c042.h by Michael Wyrick
- */
-
-#ifndef _CA91CX42_H
-#define _CA91CX42_H
-
-#ifndef PCI_VENDOR_ID_TUNDRA
-#define PCI_VENDOR_ID_TUNDRA 0x10e3
-#endif
-
-#ifndef PCI_DEVICE_ID_TUNDRA_CA91C142
-#define PCI_DEVICE_ID_TUNDRA_CA91C142 0x0000
-#endif
-
-/*
- * Define the number of each that the CA91C142 supports.
- */
-#define CA91C142_MAX_MASTER 8 /* Max Master Windows */
-#define CA91C142_MAX_SLAVE 8 /* Max Slave Windows */
-#define CA91C142_MAX_DMA 1 /* Max DMA Controllers */
-#define CA91C142_MAX_MAILBOX 4 /* Max Mail Box registers */
-
-/* Structure used to hold driver specific information */
-struct ca91cx42_driver {
- void __iomem *base; /* Base Address of device registers */
- wait_queue_head_t dma_queue;
- wait_queue_head_t iack_queue;
- wait_queue_head_t mbox_queue;
- void (*lm_callback[4])(void *); /* Called in interrupt handler */
- void *lm_data[4];
- void *crcsr_kernel;
- dma_addr_t crcsr_bus;
- struct mutex vme_rmw; /* Only one RMW cycle at a time */
- struct mutex vme_int; /*
- * Only one VME interrupt can be
- * generated at a time, provide locking
- */
-};
-
-/* See Page 2-77 in the Universe User Manual */
-struct ca91cx42_dma_descriptor {
- unsigned int dctl; /* DMA Control */
- unsigned int dtbc; /* Transfer Byte Count */
- unsigned int dla; /* PCI Address */
- unsigned int res1; /* Reserved */
- unsigned int dva; /* Vme Address */
- unsigned int res2; /* Reserved */
- unsigned int dcpp; /* Pointer to Numed Cmd Packet with rPN */
- unsigned int res3; /* Reserved */
-};
-
-struct ca91cx42_dma_entry {
- struct ca91cx42_dma_descriptor descriptor;
- struct list_head list;
-};
-
-/* Universe Register Offsets */
-/* general PCI configuration registers */
-#define CA91CX42_PCI_ID 0x000
-#define CA91CX42_PCI_CSR 0x004
-#define CA91CX42_PCI_CLASS 0x008
-#define CA91CX42_PCI_MISC0 0x00C
-#define CA91CX42_PCI_BS 0x010
-#define CA91CX42_PCI_MISC1 0x03C
-
-#define LSI0_CTL 0x0100
-#define LSI0_BS 0x0104
-#define LSI0_BD 0x0108
-#define LSI0_TO 0x010C
-
-#define LSI1_CTL 0x0114
-#define LSI1_BS 0x0118
-#define LSI1_BD 0x011C
-#define LSI1_TO 0x0120
-
-#define LSI2_CTL 0x0128
-#define LSI2_BS 0x012C
-#define LSI2_BD 0x0130
-#define LSI2_TO 0x0134
-
-#define LSI3_CTL 0x013C
-#define LSI3_BS 0x0140
-#define LSI3_BD 0x0144
-#define LSI3_TO 0x0148
-
-#define LSI4_CTL 0x01A0
-#define LSI4_BS 0x01A4
-#define LSI4_BD 0x01A8
-#define LSI4_TO 0x01AC
-
-#define LSI5_CTL 0x01B4
-#define LSI5_BS 0x01B8
-#define LSI5_BD 0x01BC
-#define LSI5_TO 0x01C0
-
-#define LSI6_CTL 0x01C8
-#define LSI6_BS 0x01CC
-#define LSI6_BD 0x01D0
-#define LSI6_TO 0x01D4
-
-#define LSI7_CTL 0x01DC
-#define LSI7_BS 0x01E0
-#define LSI7_BD 0x01E4
-#define LSI7_TO 0x01E8
-
-static const int CA91CX42_LSI_CTL[] = { LSI0_CTL, LSI1_CTL, LSI2_CTL, LSI3_CTL,
- LSI4_CTL, LSI5_CTL, LSI6_CTL, LSI7_CTL };
-
-static const int CA91CX42_LSI_BS[] = { LSI0_BS, LSI1_BS, LSI2_BS, LSI3_BS,
- LSI4_BS, LSI5_BS, LSI6_BS, LSI7_BS };
-
-static const int CA91CX42_LSI_BD[] = { LSI0_BD, LSI1_BD, LSI2_BD, LSI3_BD,
- LSI4_BD, LSI5_BD, LSI6_BD, LSI7_BD };
-
-static const int CA91CX42_LSI_TO[] = { LSI0_TO, LSI1_TO, LSI2_TO, LSI3_TO,
- LSI4_TO, LSI5_TO, LSI6_TO, LSI7_TO };
-
-#define SCYC_CTL 0x0170
-#define SCYC_ADDR 0x0174
-#define SCYC_EN 0x0178
-#define SCYC_CMP 0x017C
-#define SCYC_SWP 0x0180
-#define LMISC 0x0184
-#define SLSI 0x0188
-#define L_CMDERR 0x018C
-#define LAERR 0x0190
-
-#define DCTL 0x0200
-#define DTBC 0x0204
-#define DLA 0x0208
-#define DVA 0x0210
-#define DCPP 0x0218
-#define DGCS 0x0220
-#define D_LLUE 0x0224
-
-#define LINT_EN 0x0300
-#define LINT_STAT 0x0304
-#define LINT_MAP0 0x0308
-#define LINT_MAP1 0x030C
-#define VINT_EN 0x0310
-#define VINT_STAT 0x0314
-#define VINT_MAP0 0x0318
-#define VINT_MAP1 0x031C
-#define STATID 0x0320
-
-#define V1_STATID 0x0324
-#define V2_STATID 0x0328
-#define V3_STATID 0x032C
-#define V4_STATID 0x0330
-#define V5_STATID 0x0334
-#define V6_STATID 0x0338
-#define V7_STATID 0x033C
-
-static const int CA91CX42_V_STATID[8] = { 0, V1_STATID, V2_STATID, V3_STATID,
- V4_STATID, V5_STATID, V6_STATID,
- V7_STATID };
-
-#define LINT_MAP2 0x0340
-#define VINT_MAP2 0x0344
-
-#define MBOX0 0x0348
-#define MBOX1 0x034C
-#define MBOX2 0x0350
-#define MBOX3 0x0354
-#define SEMA0 0x0358
-#define SEMA1 0x035C
-
-#define MAST_CTL 0x0400
-#define MISC_CTL 0x0404
-#define MISC_STAT 0x0408
-#define USER_AM 0x040C
-
-#define VSI0_CTL 0x0F00
-#define VSI0_BS 0x0F04
-#define VSI0_BD 0x0F08
-#define VSI0_TO 0x0F0C
-
-#define VSI1_CTL 0x0F14
-#define VSI1_BS 0x0F18
-#define VSI1_BD 0x0F1C
-#define VSI1_TO 0x0F20
-
-#define VSI2_CTL 0x0F28
-#define VSI2_BS 0x0F2C
-#define VSI2_BD 0x0F30
-#define VSI2_TO 0x0F34
-
-#define VSI3_CTL 0x0F3C
-#define VSI3_BS 0x0F40
-#define VSI3_BD 0x0F44
-#define VSI3_TO 0x0F48
-
-#define LM_CTL 0x0F64
-#define LM_BS 0x0F68
-
-#define VRAI_CTL 0x0F70
-
-#define VRAI_BS 0x0F74
-#define VCSR_CTL 0x0F80
-#define VCSR_TO 0x0F84
-#define V_AMERR 0x0F88
-#define VAERR 0x0F8C
-
-#define VSI4_CTL 0x0F90
-#define VSI4_BS 0x0F94
-#define VSI4_BD 0x0F98
-#define VSI4_TO 0x0F9C
-
-#define VSI5_CTL 0x0FA4
-#define VSI5_BS 0x0FA8
-#define VSI5_BD 0x0FAC
-#define VSI5_TO 0x0FB0
-
-#define VSI6_CTL 0x0FB8
-#define VSI6_BS 0x0FBC
-#define VSI6_BD 0x0FC0
-#define VSI6_TO 0x0FC4
-
-#define VSI7_CTL 0x0FCC
-#define VSI7_BS 0x0FD0
-#define VSI7_BD 0x0FD4
-#define VSI7_TO 0x0FD8
-
-static const int CA91CX42_VSI_CTL[] = { VSI0_CTL, VSI1_CTL, VSI2_CTL, VSI3_CTL,
- VSI4_CTL, VSI5_CTL, VSI6_CTL, VSI7_CTL };
-
-static const int CA91CX42_VSI_BS[] = { VSI0_BS, VSI1_BS, VSI2_BS, VSI3_BS,
- VSI4_BS, VSI5_BS, VSI6_BS, VSI7_BS };
-
-static const int CA91CX42_VSI_BD[] = { VSI0_BD, VSI1_BD, VSI2_BD, VSI3_BD,
- VSI4_BD, VSI5_BD, VSI6_BD, VSI7_BD };
-
-static const int CA91CX42_VSI_TO[] = { VSI0_TO, VSI1_TO, VSI2_TO, VSI3_TO,
- VSI4_TO, VSI5_TO, VSI6_TO, VSI7_TO };
-
-#define VCSR_CLR 0x0FF4
-#define VCSR_SET 0x0FF8
-#define VCSR_BS 0x0FFC
-
-/*
- * PCI Class Register
- * offset 008
- */
-#define CA91CX42_BM_PCI_CLASS_BASE 0xFF000000
-#define CA91CX42_OF_PCI_CLASS_BASE 24
-#define CA91CX42_BM_PCI_CLASS_SUB 0x00FF0000
-#define CA91CX42_OF_PCI_CLASS_SUB 16
-#define CA91CX42_BM_PCI_CLASS_PROG 0x0000FF00
-#define CA91CX42_OF_PCI_CLASS_PROG 8
-#define CA91CX42_BM_PCI_CLASS_RID 0x000000FF
-#define CA91CX42_OF_PCI_CLASS_RID 0
-
-#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_I 0
-#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_II 1
-
-/*
- * PCI Misc Register
- * offset 00C
- */
-#define CA91CX42_BM_PCI_MISC0_BISTC 0x80000000
-#define CA91CX42_BM_PCI_MISC0_SBIST 0x60000000
-#define CA91CX42_BM_PCI_MISC0_CCODE 0x0F000000
-#define CA91CX42_BM_PCI_MISC0_MFUNCT 0x00800000
-#define CA91CX42_BM_PCI_MISC0_LAYOUT 0x007F0000
-#define CA91CX42_BM_PCI_MISC0_LTIMER 0x0000FF00
-#define CA91CX42_OF_PCI_MISC0_LTIMER 8
-
-
-/*
- * LSI Control Register
- * offset 100
- */
-#define CA91CX42_LSI_CTL_EN (1<<31)
-#define CA91CX42_LSI_CTL_PWEN (1<<30)
-
-#define CA91CX42_LSI_CTL_VDW_M (3<<22)
-#define CA91CX42_LSI_CTL_VDW_D8 0
-#define CA91CX42_LSI_CTL_VDW_D16 (1<<22)
-#define CA91CX42_LSI_CTL_VDW_D32 (1<<23)
-#define CA91CX42_LSI_CTL_VDW_D64 (3<<22)
-
-#define CA91CX42_LSI_CTL_VAS_M (7<<16)
-#define CA91CX42_LSI_CTL_VAS_A16 0
-#define CA91CX42_LSI_CTL_VAS_A24 (1<<16)
-#define CA91CX42_LSI_CTL_VAS_A32 (1<<17)
-#define CA91CX42_LSI_CTL_VAS_CRCSR (5<<16)
-#define CA91CX42_LSI_CTL_VAS_USER1 (3<<17)
-#define CA91CX42_LSI_CTL_VAS_USER2 (7<<16)
-
-#define CA91CX42_LSI_CTL_PGM_M (1<<14)
-#define CA91CX42_LSI_CTL_PGM_DATA 0
-#define CA91CX42_LSI_CTL_PGM_PGM (1<<14)
-
-#define CA91CX42_LSI_CTL_SUPER_M (1<<12)
-#define CA91CX42_LSI_CTL_SUPER_NPRIV 0
-#define CA91CX42_LSI_CTL_SUPER_SUPR (1<<12)
-
-#define CA91CX42_LSI_CTL_VCT_M (1<<8)
-#define CA91CX42_LSI_CTL_VCT_BLT (1<<8)
-#define CA91CX42_LSI_CTL_VCT_MBLT (1<<8)
-#define CA91CX42_LSI_CTL_LAS (1<<0)
-
-/*
- * SCYC_CTL Register
- * offset 178
- */
-#define CA91CX42_SCYC_CTL_LAS_PCIMEM 0
-#define CA91CX42_SCYC_CTL_LAS_PCIIO (1<<2)
-
-#define CA91CX42_SCYC_CTL_CYC_M (3<<0)
-#define CA91CX42_SCYC_CTL_CYC_RMW (1<<0)
-#define CA91CX42_SCYC_CTL_CYC_ADOH (1<<1)
-
-/*
- * LMISC Register
- * offset 184
- */
-#define CA91CX42_BM_LMISC_CRT 0xF0000000
-#define CA91CX42_OF_LMISC_CRT 28
-#define CA91CX42_BM_LMISC_CWT 0x0F000000
-#define CA91CX42_OF_LMISC_CWT 24
-
-/*
- * SLSI Register
- * offset 188
- */
-#define CA91CX42_BM_SLSI_EN 0x80000000
-#define CA91CX42_BM_SLSI_PWEN 0x40000000
-#define CA91CX42_BM_SLSI_VDW 0x00F00000
-#define CA91CX42_OF_SLSI_VDW 20
-#define CA91CX42_BM_SLSI_PGM 0x0000F000
-#define CA91CX42_OF_SLSI_PGM 12
-#define CA91CX42_BM_SLSI_SUPER 0x00000F00
-#define CA91CX42_OF_SLSI_SUPER 8
-#define CA91CX42_BM_SLSI_BS 0x000000F6
-#define CA91CX42_OF_SLSI_BS 2
-#define CA91CX42_BM_SLSI_LAS 0x00000003
-#define CA91CX42_OF_SLSI_LAS 0
-#define CA91CX42_BM_SLSI_RESERVED 0x3F0F0000
-
-/*
- * DCTL Register
- * offset 200
- */
-#define CA91CX42_DCTL_L2V (1<<31)
-#define CA91CX42_DCTL_VDW_M (3<<22)
-#define CA91CX42_DCTL_VDW_D8 0
-#define CA91CX42_DCTL_VDW_D16 (1<<22)
-#define CA91CX42_DCTL_VDW_D32 (1<<23)
-#define CA91CX42_DCTL_VDW_D64 (3<<22)
-
-#define CA91CX42_DCTL_VAS_M (7<<16)
-#define CA91CX42_DCTL_VAS_A16 0
-#define CA91CX42_DCTL_VAS_A24 (1<<16)
-#define CA91CX42_DCTL_VAS_A32 (1<<17)
-#define CA91CX42_DCTL_VAS_USER1 (3<<17)
-#define CA91CX42_DCTL_VAS_USER2 (7<<16)
-
-#define CA91CX42_DCTL_PGM_M (1<<14)
-#define CA91CX42_DCTL_PGM_DATA 0
-#define CA91CX42_DCTL_PGM_PGM (1<<14)
-
-#define CA91CX42_DCTL_SUPER_M (1<<12)
-#define CA91CX42_DCTL_SUPER_NPRIV 0
-#define CA91CX42_DCTL_SUPER_SUPR (1<<12)
-
-#define CA91CX42_DCTL_VCT_M (1<<8)
-#define CA91CX42_DCTL_VCT_BLT (1<<8)
-#define CA91CX42_DCTL_LD64EN (1<<7)
-
-/*
- * DCPP Register
- * offset 218
- */
-#define CA91CX42_DCPP_M 0xf
-#define CA91CX42_DCPP_NULL (1<<0)
-
-/*
- * DMA General Control/Status Register (DGCS)
- * offset 220
- */
-#define CA91CX42_DGCS_GO (1<<31)
-#define CA91CX42_DGCS_STOP_REQ (1<<30)
-#define CA91CX42_DGCS_HALT_REQ (1<<29)
-#define CA91CX42_DGCS_CHAIN (1<<27)
-
-#define CA91CX42_DGCS_VON_M (7<<20)
-
-#define CA91CX42_DGCS_VOFF_M (0xf<<16)
-
-#define CA91CX42_DGCS_ACT (1<<15)
-#define CA91CX42_DGCS_STOP (1<<14)
-#define CA91CX42_DGCS_HALT (1<<13)
-#define CA91CX42_DGCS_DONE (1<<11)
-#define CA91CX42_DGCS_LERR (1<<10)
-#define CA91CX42_DGCS_VERR (1<<9)
-#define CA91CX42_DGCS_PERR (1<<8)
-#define CA91CX42_DGCS_INT_STOP (1<<6)
-#define CA91CX42_DGCS_INT_HALT (1<<5)
-#define CA91CX42_DGCS_INT_DONE (1<<3)
-#define CA91CX42_DGCS_INT_LERR (1<<2)
-#define CA91CX42_DGCS_INT_VERR (1<<1)
-#define CA91CX42_DGCS_INT_PERR (1<<0)
-
-/*
- * PCI Interrupt Enable Register
- * offset 300
- */
-#define CA91CX42_LINT_LM3 0x00800000
-#define CA91CX42_LINT_LM2 0x00400000
-#define CA91CX42_LINT_LM1 0x00200000
-#define CA91CX42_LINT_LM0 0x00100000
-#define CA91CX42_LINT_MBOX3 0x00080000
-#define CA91CX42_LINT_MBOX2 0x00040000
-#define CA91CX42_LINT_MBOX1 0x00020000
-#define CA91CX42_LINT_MBOX0 0x00010000
-#define CA91CX42_LINT_ACFAIL 0x00008000
-#define CA91CX42_LINT_SYSFAIL 0x00004000
-#define CA91CX42_LINT_SW_INT 0x00002000
-#define CA91CX42_LINT_SW_IACK 0x00001000
-
-#define CA91CX42_LINT_VERR 0x00000400
-#define CA91CX42_LINT_LERR 0x00000200
-#define CA91CX42_LINT_DMA 0x00000100
-#define CA91CX42_LINT_VIRQ7 0x00000080
-#define CA91CX42_LINT_VIRQ6 0x00000040
-#define CA91CX42_LINT_VIRQ5 0x00000020
-#define CA91CX42_LINT_VIRQ4 0x00000010
-#define CA91CX42_LINT_VIRQ3 0x00000008
-#define CA91CX42_LINT_VIRQ2 0x00000004
-#define CA91CX42_LINT_VIRQ1 0x00000002
-#define CA91CX42_LINT_VOWN 0x00000001
-
-static const int CA91CX42_LINT_VIRQ[] = { 0, CA91CX42_LINT_VIRQ1,
- CA91CX42_LINT_VIRQ2, CA91CX42_LINT_VIRQ3,
- CA91CX42_LINT_VIRQ4, CA91CX42_LINT_VIRQ5,
- CA91CX42_LINT_VIRQ6, CA91CX42_LINT_VIRQ7 };
-
-#define CA91CX42_LINT_MBOX 0x000F0000
-
-static const int CA91CX42_LINT_LM[] = { CA91CX42_LINT_LM0, CA91CX42_LINT_LM1,
- CA91CX42_LINT_LM2, CA91CX42_LINT_LM3 };
-
-/*
- * MAST_CTL Register
- * offset 400
- */
-#define CA91CX42_BM_MAST_CTL_MAXRTRY 0xF0000000
-#define CA91CX42_OF_MAST_CTL_MAXRTRY 28
-#define CA91CX42_BM_MAST_CTL_PWON 0x0F000000
-#define CA91CX42_OF_MAST_CTL_PWON 24
-#define CA91CX42_BM_MAST_CTL_VRL 0x00C00000
-#define CA91CX42_OF_MAST_CTL_VRL 22
-#define CA91CX42_BM_MAST_CTL_VRM 0x00200000
-#define CA91CX42_BM_MAST_CTL_VREL 0x00100000
-#define CA91CX42_BM_MAST_CTL_VOWN 0x00080000
-#define CA91CX42_BM_MAST_CTL_VOWN_ACK 0x00040000
-#define CA91CX42_BM_MAST_CTL_PABS 0x00001000
-#define CA91CX42_BM_MAST_CTL_BUS_NO 0x0000000F
-#define CA91CX42_OF_MAST_CTL_BUS_NO 0
-
-/*
- * MISC_CTL Register
- * offset 404
- */
-#define CA91CX42_MISC_CTL_VBTO 0xF0000000
-#define CA91CX42_MISC_CTL_VARB 0x04000000
-#define CA91CX42_MISC_CTL_VARBTO 0x03000000
-#define CA91CX42_MISC_CTL_SW_LRST 0x00800000
-#define CA91CX42_MISC_CTL_SW_SRST 0x00400000
-#define CA91CX42_MISC_CTL_BI 0x00100000
-#define CA91CX42_MISC_CTL_ENGBI 0x00080000
-#define CA91CX42_MISC_CTL_RESCIND 0x00040000
-#define CA91CX42_MISC_CTL_SYSCON 0x00020000
-#define CA91CX42_MISC_CTL_V64AUTO 0x00010000
-#define CA91CX42_MISC_CTL_RESERVED 0x0820FFFF
-
-#define CA91CX42_OF_MISC_CTL_VARBTO 24
-#define CA91CX42_OF_MISC_CTL_VBTO 28
-
-/*
- * MISC_STAT Register
- * offset 408
- */
-#define CA91CX42_BM_MISC_STAT_ENDIAN 0x80000000
-#define CA91CX42_BM_MISC_STAT_LCLSIZE 0x40000000
-#define CA91CX42_BM_MISC_STAT_DY4AUTO 0x08000000
-#define CA91CX42_BM_MISC_STAT_MYBBSY 0x00200000
-#define CA91CX42_BM_MISC_STAT_DY4DONE 0x00080000
-#define CA91CX42_BM_MISC_STAT_TXFE 0x00040000
-#define CA91CX42_BM_MISC_STAT_RXFE 0x00020000
-#define CA91CX42_BM_MISC_STAT_DY4AUTOID 0x0000FF00
-#define CA91CX42_OF_MISC_STAT_DY4AUTOID 8
-
-/*
- * VSI Control Register
- * offset F00
- */
-#define CA91CX42_VSI_CTL_EN (1<<31)
-#define CA91CX42_VSI_CTL_PWEN (1<<30)
-#define CA91CX42_VSI_CTL_PREN (1<<29)
-
-#define CA91CX42_VSI_CTL_PGM_M (3<<22)
-#define CA91CX42_VSI_CTL_PGM_DATA (1<<22)
-#define CA91CX42_VSI_CTL_PGM_PGM (1<<23)
-
-#define CA91CX42_VSI_CTL_SUPER_M (3<<20)
-#define CA91CX42_VSI_CTL_SUPER_NPRIV (1<<20)
-#define CA91CX42_VSI_CTL_SUPER_SUPR (1<<21)
-
-#define CA91CX42_VSI_CTL_VAS_M (7<<16)
-#define CA91CX42_VSI_CTL_VAS_A16 0
-#define CA91CX42_VSI_CTL_VAS_A24 (1<<16)
-#define CA91CX42_VSI_CTL_VAS_A32 (1<<17)
-#define CA91CX42_VSI_CTL_VAS_USER1 (3<<17)
-#define CA91CX42_VSI_CTL_VAS_USER2 (7<<16)
-
-#define CA91CX42_VSI_CTL_LD64EN (1<<7)
-#define CA91CX42_VSI_CTL_LLRMW (1<<6)
-
-#define CA91CX42_VSI_CTL_LAS_M (3<<0)
-#define CA91CX42_VSI_CTL_LAS_PCI_MS 0
-#define CA91CX42_VSI_CTL_LAS_PCI_IO (1<<0)
-#define CA91CX42_VSI_CTL_LAS_PCI_CONF (1<<1)
-
-/* LM_CTL Register
- * offset F64
- */
-#define CA91CX42_LM_CTL_EN (1<<31)
-#define CA91CX42_LM_CTL_PGM (1<<23)
-#define CA91CX42_LM_CTL_DATA (1<<22)
-#define CA91CX42_LM_CTL_SUPR (1<<21)
-#define CA91CX42_LM_CTL_NPRIV (1<<20)
-#define CA91CX42_LM_CTL_AS_M (7<<16)
-#define CA91CX42_LM_CTL_AS_A16 0
-#define CA91CX42_LM_CTL_AS_A24 (1<<16)
-#define CA91CX42_LM_CTL_AS_A32 (1<<17)
-
-/*
- * VRAI_CTL Register
- * offset F70
- */
-#define CA91CX42_BM_VRAI_CTL_EN 0x80000000
-#define CA91CX42_BM_VRAI_CTL_PGM 0x00C00000
-#define CA91CX42_OF_VRAI_CTL_PGM 22
-#define CA91CX42_BM_VRAI_CTL_SUPER 0x00300000
-#define CA91CX42_OF_VRAI_CTL_SUPER 20
-#define CA91CX42_BM_VRAI_CTL_VAS 0x00030000
-#define CA91CX42_OF_VRAI_CTL_VAS 16
-
-/* VCSR_CTL Register
- * offset F80
- */
-#define CA91CX42_VCSR_CTL_EN (1<<31)
-
-#define CA91CX42_VCSR_CTL_LAS_M (3<<0)
-#define CA91CX42_VCSR_CTL_LAS_PCI_MS 0
-#define CA91CX42_VCSR_CTL_LAS_PCI_IO (1<<0)
-#define CA91CX42_VCSR_CTL_LAS_PCI_CONF (1<<1)
-
-/* VCSR_BS Register
- * offset FFC
- */
-#define CA91CX42_VCSR_BS_SLOT_M (0x1F<<27)
-
-#endif /* _CA91CX42_H */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 32fd37698932..9295492d24f7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1647,6 +1647,7 @@ config SIEMENS_SIMATIC_IPC_WDT
tristate "Siemens Simatic IPC Watchdog"
depends on SIEMENS_SIMATIC_IPC
select WATCHDOG_CORE
+ select P2SB
help
This driver adds support for several watchdogs found in Industrial
PCs from Siemens.
@@ -1962,6 +1963,14 @@ config MEN_A21_WDT
# PPC64 Architecture
+config PSERIES_WDT
+ tristate "POWER Architecture Platform Watchdog Timer"
+ depends on PPC_PSERIES
+ select WATCHDOG_CORE
+ help
+ Driver for virtual watchdog timers provided by PAPR
+ hypervisors (e.g. PowerVM, KVM).
+
config WATCHDOG_RTAS
tristate "RTAS watchdog"
depends on PPC_RTAS
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c324e9d820e9..cdeb119e6e61 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -187,6 +187,7 @@ obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
obj-$(CONFIG_MEN_A21_WDT) += mena21_wdt.o
# PPC64 Architecture
+obj-$(CONFIG_PSERIES_WDT) += pseries-wdt.o
obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
# S390 Architecture
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index 1635f421ef2c..854b1cc723cb 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -274,6 +274,8 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!dev->reg)
+ return -ENOMEM;
/* init clock */
dev->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index 1ffcf6aca6ae..9388838899ac 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -192,7 +192,6 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int bcm7038_wdt_suspend(struct device *dev)
{
struct bcm7038_watchdog *wdt = dev_get_drvdata(dev);
@@ -212,10 +211,9 @@ static int bcm7038_wdt_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend,
- bcm7038_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops,
+ bcm7038_wdt_suspend, bcm7038_wdt_resume);
static const struct of_device_id bcm7038_wdt_match[] = {
{ .compatible = "brcm,bcm6345-wdt" },
@@ -236,7 +234,7 @@ static struct platform_driver bcm7038_wdt_driver = {
.driver = {
.name = "bcm7038-wdt",
.of_match_table = bcm7038_wdt_match,
- .pm = &bcm7038_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&bcm7038_wdt_pm_ops),
}
};
module_platform_driver(bcm7038_wdt_driver);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 5e4dc1a0f2c6..75da5cd02615 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -74,7 +74,7 @@ static unsigned long long period_to_sec(unsigned int period)
/*
* This procedure will find the highest period which will give a timeout
* greater than the one required. e.g. for a bus speed of 66666666 and
- * and a parameter of 2 secs, then this procedure will return a value of 38.
+ * a parameter of 2 secs, then this procedure will return a value of 38.
*/
static unsigned int sec_to_period(unsigned int secs)
{
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index cd578843277e..52962e8d11a6 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -218,7 +218,7 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
/*
* Set the new value in the watchdog. Some versions of dw_wdt
- * have have TOPINIT in the TIMEOUT_RANGE register (as per
+ * have TOPINIT in the TIMEOUT_RANGE register (as per
* CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1). On those we
* effectively get a pat of the watchdog right here.
*/
@@ -375,7 +375,6 @@ static irqreturn_t dw_wdt_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
static int dw_wdt_suspend(struct device *dev)
{
struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
@@ -410,9 +409,8 @@ static int dw_wdt_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
/*
* In case if DW WDT IP core is synthesized with fixed TOP feature disabled the
@@ -710,7 +708,7 @@ static struct platform_driver dw_wdt_driver = {
.driver = {
.name = "dw_wdt",
.of_match_table = of_match_ptr(dw_wdt_of_match),
- .pm = &dw_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&dw_wdt_pm_ops),
},
};
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 7f59c680de25..6a16d3d0bb1e 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -634,7 +634,9 @@ static int __init fintek_wdt_init(void)
pdata.type = ret;
- platform_driver_register(&fintek_wdt_driver);
+ ret = platform_driver_register(&fintek_wdt_driver);
+ if (ret)
+ return ret;
wdt_res.name = "superio port";
wdt_res.flags = IORESOURCE_IO;
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index b76ad6ba0915..33835c0b06de 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -6,7 +6,7 @@
* Copyright (C) 2022 Luca Ceresoli
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/err.h>
@@ -260,5 +260,5 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index f0d4e3cc7459..e97787536792 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -401,7 +401,6 @@ static int mtk_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int mtk_wdt_suspend(struct device *dev)
{
struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
@@ -423,7 +422,6 @@ static int mtk_wdt_resume(struct device *dev)
return 0;
}
-#endif
static const struct of_device_id mtk_wdt_dt_ids[] = {
{ .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
@@ -437,16 +435,14 @@ static const struct of_device_id mtk_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
-static const struct dev_pm_ops mtk_wdt_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mtk_wdt_suspend,
- mtk_wdt_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(mtk_wdt_pm_ops,
+ mtk_wdt_suspend, mtk_wdt_resume);
static struct platform_driver mtk_wdt_driver = {
.probe = mtk_wdt_probe,
.driver = {
.name = DRV_NAME,
- .pm = &mtk_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_wdt_pm_ops),
.of_match_table = mtk_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 9f9a340427fc..c7f745caf203 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -442,7 +442,7 @@ static long pc87413_ioctl(struct file *file, unsigned int cmd,
}
}
-/* -- Notifier funtions -----------------------------------------*/
+/* -- Notifier functions -----------------------------------------*/
/**
* pc87413_notify_sys:
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 0937b8d33104..f4bfbffaf49c 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -9,6 +9,12 @@
#include <linux/regmap.h>
#include <linux/watchdog.h>
+#define PON_POFF_REASON1 0x0c
+#define PON_POFF_REASON1_PMIC_WD BIT(2)
+#define PON_POFF_REASON2 0x0d
+#define PON_POFF_REASON2_UVLO BIT(5)
+#define PON_POFF_REASON2_OTST3 BIT(6)
+
#define PON_INT_RT_STS 0x10
#define PMIC_WD_BARK_STS_BIT BIT(6)
@@ -58,9 +64,8 @@ static int pm8916_wdt_ping(struct watchdog_device *wdev)
{
struct pm8916_wdt *wdt = watchdog_get_drvdata(wdev);
- return regmap_update_bits(wdt->regmap,
- wdt->baseaddr + PON_PMIC_WD_RESET_PET,
- WATCHDOG_PET_BIT, WATCHDOG_PET_BIT);
+ return regmap_write(wdt->regmap, wdt->baseaddr + PON_PMIC_WD_RESET_PET,
+ WATCHDOG_PET_BIT);
}
static int pm8916_wdt_configure_timers(struct watchdog_device *wdev)
@@ -111,12 +116,14 @@ static irqreturn_t pm8916_wdt_isr(int irq, void *arg)
}
static const struct watchdog_info pm8916_wdt_ident = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+ WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_POWERUNDER,
.identity = "QCOM PM8916 PON WDT",
};
static const struct watchdog_info pm8916_wdt_pt_ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+ WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_POWERUNDER |
WDIOF_PRETIMEOUT,
.identity = "QCOM PM8916 PON WDT",
};
@@ -135,7 +142,9 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pm8916_wdt *wdt;
struct device *parent;
+ unsigned int val;
int err, irq;
+ u8 poff[2];
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
@@ -176,6 +185,30 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
wdt->wdev.info = &pm8916_wdt_ident;
}
+ err = regmap_bulk_read(wdt->regmap, wdt->baseaddr + PON_POFF_REASON1,
+ &poff, ARRAY_SIZE(poff));
+ if (err) {
+ dev_err(dev, "failed to read POFF reason: %d\n", err);
+ return err;
+ }
+
+ dev_dbg(dev, "POFF reason: %#x %#x\n", poff[0], poff[1]);
+ if (poff[0] & PON_POFF_REASON1_PMIC_WD)
+ wdt->wdev.bootstatus |= WDIOF_CARDRESET;
+ if (poff[1] & PON_POFF_REASON2_UVLO)
+ wdt->wdev.bootstatus |= WDIOF_POWERUNDER;
+ if (poff[1] & PON_POFF_REASON2_OTST3)
+ wdt->wdev.bootstatus |= WDIOF_OVERHEAT;
+
+ err = regmap_read(wdt->regmap, wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL2,
+ &val);
+ if (err) {
+ dev_err(dev, "failed to check if watchdog is active: %d\n", err);
+ return err;
+ }
+ if (val & S2_RESET_EN_BIT)
+ set_bit(WDOG_HW_RUNNING, &wdt->wdev.status);
+
/* Configure watchdog to hard-reset mode */
err = regmap_write(wdt->regmap,
wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL,
diff --git a/drivers/watchdog/pseries-wdt.c b/drivers/watchdog/pseries-wdt.c
new file mode 100644
index 000000000000..7f53b5293409
--- /dev/null
+++ b/drivers/watchdog/pseries-wdt.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022 International Business Machines, Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/time64.h>
+#include <linux/watchdog.h>
+
+#define DRV_NAME "pseries-wdt"
+
+/*
+ * H_WATCHDOG Input
+ *
+ * R4: "flags":
+ *
+ * Bits 48-55: "operation"
+ */
+#define PSERIES_WDTF_OP_START 0x100UL /* start timer */
+#define PSERIES_WDTF_OP_STOP 0x200UL /* stop timer */
+#define PSERIES_WDTF_OP_QUERY 0x300UL /* query timer capabilities */
+
+/*
+ * Bits 56-63: "timeoutAction" (for "Start Watchdog" only)
+ */
+#define PSERIES_WDTF_ACTION_HARD_POWEROFF 0x1UL /* poweroff */
+#define PSERIES_WDTF_ACTION_HARD_RESTART 0x2UL /* restart */
+#define PSERIES_WDTF_ACTION_DUMP_RESTART 0x3UL /* dump + restart */
+
+/*
+ * H_WATCHDOG Output
+ *
+ * R3: Return code
+ *
+ * H_SUCCESS The operation completed.
+ *
+ * H_BUSY The hypervisor is too busy; retry the operation.
+ *
+ * H_PARAMETER The given "flags" are somehow invalid. Either the
+ * "operation" or "timeoutAction" is invalid, or a
+ * reserved bit is set.
+ *
+ * H_P2 The given "watchdogNumber" is zero or exceeds the
+ * supported maximum value.
+ *
+ * H_P3 The given "timeoutInMs" is below the supported
+ * minimum value.
+ *
+ * H_NOOP The given "watchdogNumber" is already stopped.
+ *
+ * H_HARDWARE The operation failed for ineffable reasons.
+ *
+ * H_FUNCTION The H_WATCHDOG hypercall is not supported by this
+ * hypervisor.
+ *
+ * R4:
+ *
+ * - For the "Query Watchdog Capabilities" operation, a 64-bit
+ * structure:
+ */
+#define PSERIES_WDTQ_MIN_TIMEOUT(cap) (((cap) >> 48) & 0xffff)
+#define PSERIES_WDTQ_MAX_NUMBER(cap) (((cap) >> 32) & 0xffff)
+
+static const unsigned long pseries_wdt_action[] = {
+ [0] = PSERIES_WDTF_ACTION_HARD_POWEROFF,
+ [1] = PSERIES_WDTF_ACTION_HARD_RESTART,
+ [2] = PSERIES_WDTF_ACTION_DUMP_RESTART,
+};
+
+#define WATCHDOG_ACTION 1
+static unsigned int action = WATCHDOG_ACTION;
+module_param(action, uint, 0444);
+MODULE_PARM_DESC(action, "Action taken when watchdog expires (default="
+ __MODULE_STRING(WATCHDOG_ACTION) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0444);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define WATCHDOG_TIMEOUT 60
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, 0444);
+MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds (default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+struct pseries_wdt {
+ struct watchdog_device wd;
+ unsigned long action;
+ unsigned long num; /* Watchdog numbers are 1-based */
+};
+
+static int pseries_wdt_start(struct watchdog_device *wdd)
+{
+ struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
+ struct device *dev = wdd->parent;
+ unsigned long flags, msecs;
+ long rc;
+
+ flags = pw->action | PSERIES_WDTF_OP_START;
+ msecs = wdd->timeout * MSEC_PER_SEC;
+ rc = plpar_hcall_norets(H_WATCHDOG, flags, pw->num, msecs);
+ if (rc != H_SUCCESS) {
+ dev_crit(dev, "H_WATCHDOG: %ld: failed to start timer %lu",
+ rc, pw->num);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pseries_wdt_stop(struct watchdog_device *wdd)
+{
+ struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
+ struct device *dev = wdd->parent;
+ long rc;
+
+ rc = plpar_hcall_norets(H_WATCHDOG, PSERIES_WDTF_OP_STOP, pw->num);
+ if (rc != H_SUCCESS && rc != H_NOOP) {
+ dev_crit(dev, "H_WATCHDOG: %ld: failed to stop timer %lu",
+ rc, pw->num);
+ return -EIO;
+ }
+ return 0;
+}
+
+static struct watchdog_info pseries_wdt_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT
+ | WDIOF_PRETIMEOUT,
+};
+
+static const struct watchdog_ops pseries_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = pseries_wdt_start,
+ .stop = pseries_wdt_stop,
+};
+
+static int pseries_wdt_probe(struct platform_device *pdev)
+{
+ unsigned long ret[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct pseries_wdt *pw;
+ unsigned long cap;
+ long msecs, rc;
+ int err;
+
+ rc = plpar_hcall(H_WATCHDOG, ret, PSERIES_WDTF_OP_QUERY);
+ if (rc == H_FUNCTION)
+ return -ENODEV;
+ if (rc != H_SUCCESS)
+ return -EIO;
+ cap = ret[0];
+
+ pw = devm_kzalloc(&pdev->dev, sizeof(*pw), GFP_KERNEL);
+ if (!pw)
+ return -ENOMEM;
+
+ /*
+ * Assume watchdogNumber 1 for now. If we ever support
+ * multiple timers we will need to devise a way to choose a
+ * distinct watchdogNumber for each platform device at device
+ * registration time.
+ */
+ pw->num = 1;
+ if (PSERIES_WDTQ_MAX_NUMBER(cap) < pw->num)
+ return -ENODEV;
+
+ if (action >= ARRAY_SIZE(pseries_wdt_action))
+ return -EINVAL;
+ pw->action = pseries_wdt_action[action];
+
+ pw->wd.parent = &pdev->dev;
+ pw->wd.info = &pseries_wdt_info;
+ pw->wd.ops = &pseries_wdt_ops;
+ msecs = PSERIES_WDTQ_MIN_TIMEOUT(cap);
+ pw->wd.min_timeout = DIV_ROUND_UP(msecs, MSEC_PER_SEC);
+ pw->wd.max_timeout = UINT_MAX / 1000; /* from linux/watchdog.h */
+ pw->wd.timeout = timeout;
+ if (watchdog_init_timeout(&pw->wd, 0, NULL))
+ return -EINVAL;
+ watchdog_set_nowayout(&pw->wd, nowayout);
+ watchdog_stop_on_reboot(&pw->wd);
+ watchdog_stop_on_unregister(&pw->wd);
+ watchdog_set_drvdata(&pw->wd, pw);
+
+ err = devm_watchdog_register_device(&pdev->dev, &pw->wd);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, &pw->wd);
+
+ return 0;
+}
+
+static int pseries_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct watchdog_device *wd = platform_get_drvdata(pdev);
+
+ if (watchdog_active(wd))
+ return pseries_wdt_stop(wd);
+ return 0;
+}
+
+static int pseries_wdt_resume(struct platform_device *pdev)
+{
+ struct watchdog_device *wd = platform_get_drvdata(pdev);
+
+ if (watchdog_active(wd))
+ return pseries_wdt_start(wd);
+ return 0;
+}
+
+static const struct platform_device_id pseries_wdt_id[] = {
+ { .name = "pseries-wdt" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, pseries_wdt_id);
+
+static struct platform_driver pseries_wdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .id_table = pseries_wdt_id,
+ .probe = pseries_wdt_probe,
+ .resume = pseries_wdt_resume,
+ .suspend = pseries_wdt_suspend,
+};
+module_platform_driver(pseries_wdt_driver);
+
+MODULE_AUTHOR("Alexey Kardashevskiy");
+MODULE_AUTHOR("Scott Cheloha");
+MODULE_DESCRIPTION("POWER Architecture Platform Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/realtek_otto_wdt.c b/drivers/watchdog/realtek_otto_wdt.c
index 60058a0c3ec4..2a5298c5e8e4 100644
--- a/drivers/watchdog/realtek_otto_wdt.c
+++ b/drivers/watchdog/realtek_otto_wdt.c
@@ -366,6 +366,7 @@ static const struct of_device_id otto_wdt_ids[] = {
{ .compatible = "realtek,rtl8380-wdt" },
{ .compatible = "realtek,rtl8390-wdt" },
{ .compatible = "realtek,rtl9300-wdt" },
+ { .compatible = "realtek,rtl9310-wdt" },
{ }
};
MODULE_DEVICE_TABLE(of, otto_wdt_ids);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 6db22f2e3a4f..95919392927f 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -845,8 +845,6 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
s3c2410wdt_stop(&wdt->wdt_device);
}
-#ifdef CONFIG_PM_SLEEP
-
static int s3c2410wdt_suspend(struct device *dev)
{
int ret;
@@ -885,10 +883,9 @@ static int s3c2410wdt_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops, s3c2410wdt_suspend,
- s3c2410wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops,
+ s3c2410wdt_suspend, s3c2410wdt_resume);
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
@@ -897,7 +894,7 @@ static struct platform_driver s3c2410wdt_driver = {
.id_table = s3c2410_wdt_ids,
.driver = {
.name = "s3c2410-wdt",
- .pm = &s3c2410wdt_pm_ops,
+ .pm = pm_sleep_ptr(&s3c2410wdt_pm_ops),
.of_match_table = of_match_ptr(s3c2410_wdt_match),
},
};
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index ec20ad4e534f..aeee934ca51b 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -339,7 +339,6 @@ static const struct of_device_id sama5d4_wdt_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
-#ifdef CONFIG_PM_SLEEP
static int sama5d4_wdt_suspend_late(struct device *dev)
{
struct sama5d4_wdt *wdt = dev_get_drvdata(dev);
@@ -366,18 +365,17 @@ static int sama5d4_wdt_resume_early(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sama5d4_wdt_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(sama5d4_wdt_suspend_late,
- sama5d4_wdt_resume_early)
+ LATE_SYSTEM_SLEEP_PM_OPS(sama5d4_wdt_suspend_late,
+ sama5d4_wdt_resume_early)
};
static struct platform_driver sama5d4_wdt_driver = {
.probe = sama5d4_wdt_probe,
.driver = {
.name = "sama5d4_wdt",
- .pm = &sama5d4_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&sama5d4_wdt_pm_ops),
.of_match_table = sama5d4_wdt_of_match,
}
};
diff --git a/drivers/watchdog/simatic-ipc-wdt.c b/drivers/watchdog/simatic-ipc-wdt.c
index 8bac793c63fb..6599695dc672 100644
--- a/drivers/watchdog/simatic-ipc-wdt.c
+++ b/drivers/watchdog/simatic-ipc-wdt.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_data/x86/p2sb.h>
#include <linux/platform_data/x86/simatic-ipc-base.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
@@ -54,9 +55,9 @@ static struct resource io_resource_trigger =
DEFINE_RES_IO_NAMED(WD_TRIGGER_IOADR, SZ_1,
KBUILD_MODNAME " WD_TRIGGER_IOADR");
-/* the actual start will be discovered with pci, 0 is a placeholder */
+/* the actual start will be discovered with p2sb, 0 is a placeholder */
static struct resource mem_resource =
- DEFINE_RES_MEM_NAMED(0, SZ_4, "WD_RESET_BASE_ADR");
+ DEFINE_RES_MEM_NAMED(0, 0, "WD_RESET_BASE_ADR");
static u32 wd_timeout_table[] = {2, 4, 6, 8, 16, 32, 48, 64 };
static void __iomem *wd_reset_base_addr;
@@ -150,6 +151,7 @@ static int simatic_ipc_wdt_probe(struct platform_device *pdev)
struct simatic_ipc_platform *plat = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct resource *res;
+ int ret;
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_227E:
@@ -190,15 +192,14 @@ static int simatic_ipc_wdt_probe(struct platform_device *pdev)
if (plat->devmode == SIMATIC_IPC_DEVICE_427E) {
res = &mem_resource;
- /* get GPIO base from PCI */
- res->start = simatic_ipc_get_membase0(PCI_DEVFN(0x1f, 1));
- if (res->start == 0)
- return -ENODEV;
+ ret = p2sb_bar(NULL, 0, res);
+ if (ret)
+ return ret;
/* do the final address calculation */
res->start = res->start + (GPIO_COMMUNITY0_PORT_ID << 16) +
PAD_CFG_DW0_GPP_A_23;
- res->end += res->start;
+ res->end = res->start + SZ_4 - 1;
wd_reset_base_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(wd_reset_base_addr))
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 86ffb58fbc85..ae54dd33e233 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -402,6 +402,7 @@ out:
iounmap(addr);
release_resource(res);
+ kfree(res);
return ret;
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index f9479a3fe2a6..78ba36689eec 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/char/watchdog/sp805-wdt.c
*
@@ -341,6 +342,10 @@ static const struct amba_id sp805_wdt_ids[] = {
.id = 0x00141805,
.mask = 0x00ffffff,
},
+ {
+ .id = 0x001bb824,
+ .mask = 0x00ffffff,
+ },
{ 0, 0 },
};
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 14ab6559c748..39abecdb9dd1 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -248,7 +248,6 @@ static int st_wdog_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int st_wdog_suspend(struct device *dev)
{
struct st_wdog *st_wdog = watchdog_get_drvdata(&st_wdog_dev);
@@ -285,16 +284,14 @@ static int st_wdog_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(st_wdog_pm_ops,
- st_wdog_suspend,
- st_wdog_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_wdog_pm_ops,
+ st_wdog_suspend, st_wdog_resume);
static struct platform_driver st_wdog_driver = {
.driver = {
.name = "st-lpc-wdt",
- .pm = &st_wdog_pm_ops,
+ .pm = pm_sleep_ptr(&st_wdog_pm_ops),
.of_match_table = st_wdog_match,
},
.probe = st_wdog_probe,
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index dfe06e506cad..d5de6c0657a5 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -230,8 +230,7 @@ static int tegra_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_wdt_runtime_suspend(struct device *dev)
+static int tegra_wdt_suspend(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
@@ -241,7 +240,7 @@ static int tegra_wdt_runtime_suspend(struct device *dev)
return 0;
}
-static int tegra_wdt_runtime_resume(struct device *dev)
+static int tegra_wdt_resume(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
@@ -250,7 +249,6 @@ static int tegra_wdt_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct of_device_id tegra_wdt_of_match[] = {
{ .compatible = "nvidia,tegra30-timer", },
@@ -258,16 +256,14 @@ static const struct of_device_id tegra_wdt_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_wdt_of_match);
-static const struct dev_pm_ops tegra_wdt_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_wdt_runtime_suspend,
- tegra_wdt_runtime_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(tegra_wdt_pm_ops,
+ tegra_wdt_suspend, tegra_wdt_resume);
static struct platform_driver tegra_wdt_driver = {
.probe = tegra_wdt_probe,
.driver = {
.name = "tegra-wdt",
- .pm = &tegra_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&tegra_wdt_pm_ops),
.of_match_table = tegra_wdt_of_match,
},
};
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e6f95e99156d..aeadaa07c891 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -467,7 +467,6 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return devm_watchdog_register_device(dev, &wdat->wdd);
}
-#ifdef CONFIG_PM_SLEEP
static int wdat_wdt_suspend_noirq(struct device *dev)
{
struct wdat_wdt *wdat = dev_get_drvdata(dev);
@@ -528,18 +527,16 @@ static int wdat_wdt_resume_noirq(struct device *dev)
return wdat_wdt_start(&wdat->wdd);
}
-#endif
static const struct dev_pm_ops wdat_wdt_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(wdat_wdt_suspend_noirq,
- wdat_wdt_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(wdat_wdt_suspend_noirq, wdat_wdt_resume_noirq)
};
static struct platform_driver wdat_wdt_driver = {
.probe = wdat_wdt_probe,
.driver = {
.name = "wdat_wdt",
- .pm = &wdat_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&wdat_wdt_pm_ops),
},
};
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index bfd5f4f706bc..a65bd92121a5 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -355,4 +355,13 @@ config XEN_VIRTIO
If in doubt, say n.
+config XEN_VIRTIO_FORCE_GRANT
+ bool "Require Xen virtio support to use grants"
+ depends on XEN_VIRTIO
+ help
+ Require virtio for Xen guests to use grant mappings.
+ This will avoid the need to give the backend the right to map all
+ of the guest memory. This will need support on the backend side
+ (e.g. qemu or kernel, depending on the virtio device types used).
+
endmenu
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 46d9295d9a6e..c443f04aaad7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -45,6 +45,7 @@
#include <asm/irq.h>
#include <asm/io_apic.h>
#include <asm/i8259.h>
+#include <asm/xen/cpuid.h>
#include <asm/xen/pci.h>
#endif
#include <asm/sync_bitops.h>
@@ -528,9 +529,10 @@ static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
BUG_ON(irq == -1);
if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
- cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
- cpumask_copy(irq_get_effective_affinity_mask(irq),
- cpumask_of(cpu));
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ irq_data_update_affinity(data, cpumask_of(cpu));
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
}
xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
@@ -2183,6 +2185,7 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq,
};
+#ifdef CONFIG_X86
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any
@@ -2195,11 +2198,48 @@ void xen_setup_callback_vector(void)
callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
if (xen_set_callback_via(callback_via)) {
pr_err("Request for Xen HVM callback vector failed\n");
- xen_have_vector_callback = 0;
+ xen_have_vector_callback = false;
}
}
}
+/*
+ * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
+ * fallback to the global vector-type callback.
+ */
+static __init void xen_init_setup_upcall_vector(void)
+{
+ if (!xen_have_vector_callback)
+ return;
+
+ if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) &&
+ !xen_set_upcall_vector(0))
+ xen_percpu_upcall = true;
+ else if (xen_feature(XENFEAT_hvm_callback_vector))
+ xen_setup_callback_vector();
+ else
+ xen_have_vector_callback = false;
+}
+
+int xen_set_upcall_vector(unsigned int cpu)
+{
+ int rc;
+ xen_hvm_evtchn_upcall_vector_t op = {
+ .vector = HYPERVISOR_CALLBACK_VECTOR,
+ .vcpu = per_cpu(xen_vcpu_id, cpu),
+ };
+
+ rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op);
+ if (rc)
+ return rc;
+
+ /* Trick toolstack to think we are enlightened. */
+ if (!cpu)
+ rc = xen_set_callback_via(1);
+
+ return rc;
+}
+
static __init void xen_alloc_callback_vector(void)
{
if (!xen_have_vector_callback)
@@ -2210,8 +2250,11 @@ static __init void xen_alloc_callback_vector(void)
}
#else
void xen_setup_callback_vector(void) {}
+static inline void xen_init_setup_upcall_vector(void) {}
+int xen_set_upcall_vector(unsigned int cpu) {}
static inline void xen_alloc_callback_vector(void) {}
-#endif
+#endif /* CONFIG_XEN_PVHVM */
+#endif /* CONFIG_X86 */
bool xen_fifo_events = true;
module_param_named(fifo_events, xen_fifo_events, bool, 0);
@@ -2271,10 +2314,9 @@ void __init xen_init_IRQ(void)
if (xen_initial_domain())
pci_xen_initial_domain();
}
- if (xen_feature(XENFEAT_hvm_callback_vector)) {
- xen_setup_callback_vector();
- xen_alloc_callback_vector();
- }
+ xen_init_setup_upcall_vector();
+ xen_alloc_callback_vector();
+
if (xen_hvm_domain()) {
native_init_IRQ();
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 4b56c39f766d..84b143eef395 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -396,13 +396,15 @@ static void __unmap_grant_pages_done(int result,
unsigned int offset = data->unmap_ops - map->unmap_ops;
for (i = 0; i < data->count; i++) {
- WARN_ON(map->unmap_ops[offset+i].status);
+ WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
+ map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("unmap handle=%d st=%d\n",
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
if (use_ptemod) {
- WARN_ON(map->kunmap_ops[offset+i].status);
+ WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
+ map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("kunmap handle=%u st=%d\n",
map->kunmap_ops[offset+i].handle,
map->kunmap_ops[offset+i].status);
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index fc0142484001..8973fc1e9ccc 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/pfn.h>
#include <linux/xarray.h>
+#include <linux/virtio_anchor.h>
+#include <linux/virtio.h>
#include <xen/xen.h>
#include <xen/xen-ops.h>
#include <xen/grant_table.h>
@@ -287,6 +289,14 @@ bool xen_is_grant_dma_device(struct device *dev)
return has_iommu;
}
+bool xen_virtio_mem_acc(struct virtio_device *dev)
+{
+ if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT))
+ return true;
+
+ return xen_is_grant_dma_device(dev->dev.parent);
+}
+
void xen_grant_setup_dma_ops(struct device *dev)
{
struct xen_grant_dma_data *data;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 3d5a384d65f7..c16df629907e 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -205,7 +205,7 @@ static void do_poweroff(void)
static void do_reboot(void)
{
shutting_down = SHUTDOWN_POWEROFF; /* ? */
- ctrl_alt_del();
+ orderly_reboot();
}
static struct shutdown_handler shutdown_handlers[] = {
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
index bef8d72a6ca6..5c0b5cb5b419 100644
--- a/drivers/xen/xen-front-pgdir-shbuf.c
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
* shared by the frontend itself) or map the provided granted
* references onto the backing storage (buf->pages).
*
- * \param buf shared buffer which grants to be maped.
+ * \param buf shared buffer which grants to be mapped.
* \return zero on success or a negative number on failure.
*/
int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
@@ -110,7 +110,7 @@ EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
* shared by the frontend itself) or unmap the provided granted
* references.
*
- * \param buf shared buffer which grants to be unmaped.
+ * \param buf shared buffer which grants to be unmapped.
* \return zero on success or a negative number on failure.
*/
int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 3fbc21466a93..84e014490950 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -159,7 +159,7 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
return XEN_PCI_ERR_op_failed;
}
- /* The value the guest needs is actually the IDT vector, not the
+ /* The value the guest needs is actually the IDT vector, not
* the local domain's IRQ number. */
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 597af455a522..0792fda49a15 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -128,7 +128,7 @@ static ssize_t xenbus_file_read(struct file *filp,
{
struct xenbus_file_priv *u = filp->private_data;
struct read_buffer *rb;
- unsigned i;
+ ssize_t i;
int ret;
mutex_lock(&u->reply_mutex);
@@ -148,7 +148,7 @@ again:
rb = list_entry(u->read_buffers.next, struct read_buffer, list);
i = 0;
while (i < len) {
- unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
+ size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 5abded97e1a7..9c09f89d8278 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -305,7 +305,7 @@ static int __init xenbus_probe_backend_init(void)
register_xenstore_notifier(&xenstore_notifier);
- if (register_shrinker(&backend_memory_shrinker))
+ if (register_shrinker(&backend_memory_shrinker, "xen-backend"))
pr_warn("shrinker registration failed\n");
return 0;